From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/scsi/.gitignore | 3 + drivers/scsi/3w-9xxx.c | 2304 +++ drivers/scsi/3w-9xxx.h | 695 + drivers/scsi/3w-sas.c | 1857 ++ drivers/scsi/3w-sas.h | 405 + drivers/scsi/3w-xxxx.c | 2430 +++ drivers/scsi/3w-xxxx.h | 439 + drivers/scsi/53c700.c | 2118 ++ drivers/scsi/53c700.h | 528 + drivers/scsi/53c700.scr | 411 + drivers/scsi/53c700_d.h_shipped | 1329 ++ drivers/scsi/BusLogic.c | 3736 ++++ drivers/scsi/BusLogic.h | 1284 ++ drivers/scsi/FlashPoint.c | 7560 +++++++ drivers/scsi/Kconfig | 1527 ++ drivers/scsi/Makefile | 207 + drivers/scsi/NCR5380.c | 2411 +++ drivers/scsi/NCR5380.h | 331 + drivers/scsi/a100u2w.c | 1226 ++ drivers/scsi/a100u2w.h | 371 + drivers/scsi/a2091.c | 304 + drivers/scsi/a2091.h | 70 + drivers/scsi/a3000.c | 310 + drivers/scsi/a3000.h | 73 + drivers/scsi/a4000t.c | 125 + drivers/scsi/aacraid/Makefile | 7 + drivers/scsi/aacraid/aachba.c | 4164 ++++ drivers/scsi/aacraid/aacraid.h | 2786 +++ drivers/scsi/aacraid/commctrl.c | 1121 + drivers/scsi/aacraid/comminit.c | 660 + drivers/scsi/aacraid/commsup.c | 2582 +++ drivers/scsi/aacraid/dpcsup.c | 456 + drivers/scsi/aacraid/linit.c | 2074 ++ drivers/scsi/aacraid/nark.c | 72 + drivers/scsi/aacraid/rkt.c | 95 + drivers/scsi/aacraid/rx.c | 683 + drivers/scsi/aacraid/sa.c | 413 + drivers/scsi/aacraid/src.c | 1436 ++ drivers/scsi/advansys.c | 11552 +++++++++++ drivers/scsi/aha152x.c | 3434 ++++ drivers/scsi/aha152x.h | 338 + drivers/scsi/aha1542.c | 1168 ++ drivers/scsi/aha1542.h | 108 + drivers/scsi/aha1740.c | 684 + drivers/scsi/aha1740.h | 154 + drivers/scsi/aic7xxx/.gitignore | 7 + drivers/scsi/aic7xxx/Kconfig.aic79xx | 86 + drivers/scsi/aic7xxx/Kconfig.aic7xxx | 91 + drivers/scsi/aic7xxx/Makefile | 87 + drivers/scsi/aic7xxx/aic7770.c | 385 + drivers/scsi/aic7xxx/aic7770_osm.c | 156 + drivers/scsi/aic7xxx/aic79xx.h | 1465 ++ drivers/scsi/aic7xxx/aic79xx.reg | 4281 ++++ drivers/scsi/aic7xxx/aic79xx.seq | 2290 +++ drivers/scsi/aic7xxx/aic79xx_core.c | 10724 ++++++++++ drivers/scsi/aic7xxx/aic79xx_inline.h | 172 + drivers/scsi/aic7xxx/aic79xx_osm.c | 2851 +++ drivers/scsi/aic7xxx/aic79xx_osm.h | 658 + drivers/scsi/aic7xxx/aic79xx_osm_pci.c | 378 + drivers/scsi/aic7xxx/aic79xx_pci.c | 1004 + drivers/scsi/aic7xxx/aic79xx_pci.h | 72 + drivers/scsi/aic7xxx/aic79xx_proc.c | 316 + drivers/scsi/aic7xxx/aic79xx_reg.h_shipped | 1810 ++ drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped | 745 + drivers/scsi/aic7xxx/aic79xx_seq.h_shipped | 1189 ++ drivers/scsi/aic7xxx/aic7xxx.h | 1273 ++ drivers/scsi/aic7xxx/aic7xxx.reg | 1761 ++ drivers/scsi/aic7xxx/aic7xxx.seq | 2399 +++ drivers/scsi/aic7xxx/aic7xxx_93cx6.c | 318 + drivers/scsi/aic7xxx/aic7xxx_93cx6.h | 102 + drivers/scsi/aic7xxx/aic7xxx_core.c | 7901 +++++++ drivers/scsi/aic7xxx/aic7xxx_inline.h | 97 + drivers/scsi/aic7xxx/aic7xxx_osm.c | 2577 +++ drivers/scsi/aic7xxx/aic7xxx_osm.h | 674 + drivers/scsi/aic7xxx/aic7xxx_osm_pci.c | 447 + drivers/scsi/aic7xxx/aic7xxx_pci.c | 2460 +++ drivers/scsi/aic7xxx/aic7xxx_pci.h | 125 + drivers/scsi/aic7xxx/aic7xxx_proc.c | 343 + drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped | 912 + drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped | 413 + drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped | 1307 ++ drivers/scsi/aic7xxx/aicasm/Makefile | 74 + drivers/scsi/aic7xxx/aicasm/aicasm.c | 843 + drivers/scsi/aic7xxx/aicasm/aicasm.h | 91 + drivers/scsi/aic7xxx/aicasm/aicasm_gram.y | 1999 ++ drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h | 218 + drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y | 161 + drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l | 153 + drivers/scsi/aic7xxx/aicasm/aicasm_scan.l | 618 + drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c | 690 + drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h | 205 + drivers/scsi/aic7xxx/aiclib.h | 180 + drivers/scsi/aic7xxx/cam.h | 111 + drivers/scsi/aic7xxx/queue.h | 501 + drivers/scsi/aic7xxx/scsi_iu.h | 39 + drivers/scsi/aic7xxx/scsi_message.h | 40 + drivers/scsi/aic94xx/Kconfig | 24 + drivers/scsi/aic94xx/Makefile | 21 + drivers/scsi/aic94xx/aic94xx.h | 75 + drivers/scsi/aic94xx/aic94xx_dev.c | 344 + drivers/scsi/aic94xx/aic94xx_dump.c | 765 + drivers/scsi/aic94xx/aic94xx_dump.h | 25 + drivers/scsi/aic94xx/aic94xx_hwi.c | 1369 ++ drivers/scsi/aic94xx/aic94xx_hwi.h | 379 + drivers/scsi/aic94xx/aic94xx_init.c | 1053 + drivers/scsi/aic94xx/aic94xx_reg.c | 313 + drivers/scsi/aic94xx/aic94xx_reg.h | 284 + drivers/scsi/aic94xx/aic94xx_reg_def.h | 2381 +++ drivers/scsi/aic94xx/aic94xx_sas.h | 732 + drivers/scsi/aic94xx/aic94xx_scb.c | 928 + drivers/scsi/aic94xx/aic94xx_sds.c | 1462 ++ drivers/scsi/aic94xx/aic94xx_sds.h | 103 + drivers/scsi/aic94xx/aic94xx_seq.c | 1401 ++ drivers/scsi/aic94xx/aic94xx_seq.h | 50 + drivers/scsi/aic94xx/aic94xx_task.c | 612 + drivers/scsi/aic94xx/aic94xx_tmf.c | 686 + drivers/scsi/am53c974.c | 542 + drivers/scsi/arcmsr/Makefile | 7 + drivers/scsi/arcmsr/arcmsr.h | 1047 + drivers/scsi/arcmsr/arcmsr_attr.c | 411 + drivers/scsi/arcmsr/arcmsr_hba.c | 4723 +++++ drivers/scsi/arm/Kconfig | 74 + drivers/scsi/arm/Makefile | 14 + drivers/scsi/arm/acornscsi-io.S | 135 + drivers/scsi/arm/acornscsi.c | 2921 +++ drivers/scsi/arm/acornscsi.h | 350 + drivers/scsi/arm/arm_scsi.h | 136 + drivers/scsi/arm/arxescsi.c | 363 + drivers/scsi/arm/cumana_1.c | 341 + drivers/scsi/arm/cumana_2.c | 524 + drivers/scsi/arm/eesox.c | 646 + drivers/scsi/arm/fas216.c | 3038 +++ drivers/scsi/arm/fas216.h | 404 + drivers/scsi/arm/msgqueue.c | 168 + drivers/scsi/arm/msgqueue.h | 79 + drivers/scsi/arm/oak.c | 213 + drivers/scsi/arm/powertec.c | 452 + drivers/scsi/arm/queue.c | 319 + drivers/scsi/arm/queue.h | 104 + drivers/scsi/atari_scsi.c | 892 + drivers/scsi/atp870u.c | 2380 +++ drivers/scsi/atp870u.h | 63 + drivers/scsi/be2iscsi/Kconfig | 11 + drivers/scsi/be2iscsi/Makefile | 9 + drivers/scsi/be2iscsi/be.h | 205 + drivers/scsi/be2iscsi/be_cmds.c | 1864 ++ drivers/scsi/be2iscsi/be_cmds.h | 1461 ++ drivers/scsi/be2iscsi/be_iscsi.c | 1415 ++ drivers/scsi/be2iscsi/be_iscsi.h | 76 + drivers/scsi/be2iscsi/be_main.c | 5863 ++++++ drivers/scsi/be2iscsi/be_main.h | 1029 + drivers/scsi/be2iscsi/be_mgmt.c | 1559 ++ drivers/scsi/be2iscsi/be_mgmt.h | 242 + drivers/scsi/bfa/Makefile | 7 + drivers/scsi/bfa/bfa.h | 440 + drivers/scsi/bfa/bfa_core.c | 2008 ++ drivers/scsi/bfa/bfa_cs.h | 326 + drivers/scsi/bfa/bfa_defs.h | 1280 ++ drivers/scsi/bfa/bfa_defs_fcs.h | 471 + drivers/scsi/bfa/bfa_defs_svc.h | 1456 ++ drivers/scsi/bfa/bfa_fc.h | 1606 ++ drivers/scsi/bfa/bfa_fcbuild.c | 1348 ++ drivers/scsi/bfa/bfa_fcbuild.h | 312 + drivers/scsi/bfa/bfa_fcpim.c | 3897 ++++ drivers/scsi/bfa/bfa_fcpim.h | 422 + drivers/scsi/bfa/bfa_fcs.c | 1616 ++ drivers/scsi/bfa/bfa_fcs.h | 869 + drivers/scsi/bfa/bfa_fcs_fcpim.c | 833 + drivers/scsi/bfa/bfa_fcs_lport.c | 6982 +++++++ drivers/scsi/bfa/bfa_fcs_rport.c | 3449 ++++ drivers/scsi/bfa/bfa_hw_cb.c | 182 + drivers/scsi/bfa/bfa_hw_ct.c | 169 + drivers/scsi/bfa/bfa_ioc.c | 7032 +++++++ drivers/scsi/bfa/bfa_ioc.h | 1045 + drivers/scsi/bfa/bfa_ioc_cb.c | 401 + drivers/scsi/bfa/bfa_ioc_ct.c | 990 + drivers/scsi/bfa/bfa_modules.h | 123 + drivers/scsi/bfa/bfa_plog.h | 148 + drivers/scsi/bfa/bfa_port.c | 864 + drivers/scsi/bfa/bfa_port.h | 119 + drivers/scsi/bfa/bfa_svc.c | 6897 +++++++ drivers/scsi/bfa/bfa_svc.h | 756 + drivers/scsi/bfa/bfad.c | 1797 ++ drivers/scsi/bfa/bfad_attr.c | 1007 + drivers/scsi/bfa/bfad_bsg.c | 3615 ++++ drivers/scsi/bfa/bfad_bsg.h | 829 + drivers/scsi/bfa/bfad_debugfs.c | 503 + drivers/scsi/bfa/bfad_drv.h | 352 + drivers/scsi/bfa/bfad_im.c | 1330 ++ drivers/scsi/bfa/bfad_im.h | 201 + drivers/scsi/bfa/bfi.h | 1317 ++ drivers/scsi/bfa/bfi_ms.h | 871 + drivers/scsi/bfa/bfi_reg.h | 452 + drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h | 1004 + drivers/scsi/bnx2fc/Kconfig | 14 + drivers/scsi/bnx2fc/Makefile | 5 + drivers/scsi/bnx2fc/bnx2fc.h | 608 + drivers/scsi/bnx2fc/bnx2fc_constants.h | 288 + drivers/scsi/bnx2fc/bnx2fc_debug.c | 84 + drivers/scsi/bnx2fc/bnx2fc_debug.h | 47 + drivers/scsi/bnx2fc/bnx2fc_els.c | 950 + drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 2989 +++ drivers/scsi/bnx2fc/bnx2fc_hwi.c | 2199 ++ drivers/scsi/bnx2fc/bnx2fc_io.c | 2102 ++ drivers/scsi/bnx2fc/bnx2fc_tgt.c | 896 + drivers/scsi/bnx2i/57xx_iscsi_constants.h | 161 + drivers/scsi/bnx2i/57xx_iscsi_hsi.h | 1526 ++ drivers/scsi/bnx2i/Kconfig | 15 + drivers/scsi/bnx2i/Makefile | 4 + drivers/scsi/bnx2i/bnx2i.h | 882 + drivers/scsi/bnx2i/bnx2i_hwi.c | 2745 +++ drivers/scsi/bnx2i/bnx2i_init.c | 550 + drivers/scsi/bnx2i/bnx2i_iscsi.c | 2306 +++ drivers/scsi/bnx2i/bnx2i_sysfs.c | 158 + drivers/scsi/bvme6000_scsi.c | 139 + drivers/scsi/ch.c | 1031 + drivers/scsi/constants.c | 444 + drivers/scsi/csiostor/Kconfig | 20 + drivers/scsi/csiostor/Makefile | 13 + drivers/scsi/csiostor/csio_attr.c | 805 + drivers/scsi/csiostor/csio_defs.h | 121 + drivers/scsi/csiostor/csio_hw.c | 4434 ++++ drivers/scsi/csiostor/csio_hw.h | 666 + drivers/scsi/csiostor/csio_hw_chip.h | 135 + drivers/scsi/csiostor/csio_hw_t5.c | 369 + drivers/scsi/csiostor/csio_init.c | 1256 ++ drivers/scsi/csiostor/csio_init.h | 136 + drivers/scsi/csiostor/csio_isr.c | 610 + drivers/scsi/csiostor/csio_lnode.c | 2152 ++ drivers/scsi/csiostor/csio_lnode.h | 255 + drivers/scsi/csiostor/csio_mb.c | 1690 ++ drivers/scsi/csiostor/csio_mb.h | 263 + drivers/scsi/csiostor/csio_rnode.c | 921 + drivers/scsi/csiostor/csio_rnode.h | 141 + drivers/scsi/csiostor/csio_scsi.c | 2529 +++ drivers/scsi/csiostor/csio_scsi.h | 352 + drivers/scsi/csiostor/csio_wr.c | 1719 ++ drivers/scsi/csiostor/csio_wr.h | 512 + drivers/scsi/csiostor/t4fw_api_stor.h | 539 + drivers/scsi/cxgbi/Kconfig | 3 + drivers/scsi/cxgbi/Makefile | 5 + drivers/scsi/cxgbi/cxgb3i/Kbuild | 5 + drivers/scsi/cxgbi/cxgb3i/Kconfig | 12 + drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 1415 ++ drivers/scsi/cxgbi/cxgb3i/cxgb3i.h | 62 + drivers/scsi/cxgbi/cxgb4i/Kbuild | 5 + drivers/scsi/cxgbi/cxgb4i/Kconfig | 14 + drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 2473 +++ drivers/scsi/cxgbi/cxgb4i/cxgb4i.h | 28 + drivers/scsi/cxgbi/libcxgbi.c | 3097 +++ drivers/scsi/cxgbi/libcxgbi.h | 646 + drivers/scsi/cxlflash/Kconfig | 13 + drivers/scsi/cxlflash/Makefile | 5 + drivers/scsi/cxlflash/backend.h | 48 + drivers/scsi/cxlflash/common.h | 340 + drivers/scsi/cxlflash/cxl_hw.c | 177 + drivers/scsi/cxlflash/lunmgt.c | 278 + drivers/scsi/cxlflash/main.c | 3967 ++++ drivers/scsi/cxlflash/main.h | 129 + drivers/scsi/cxlflash/ocxl_hw.c | 1399 ++ drivers/scsi/cxlflash/ocxl_hw.h | 72 + drivers/scsi/cxlflash/sislite.h | 560 + drivers/scsi/cxlflash/superpipe.c | 2220 ++ drivers/scsi/cxlflash/superpipe.h | 153 + drivers/scsi/cxlflash/vlun.c | 1337 ++ drivers/scsi/cxlflash/vlun.h | 82 + drivers/scsi/dc395x.c | 4693 +++++ drivers/scsi/dc395x.h | 611 + drivers/scsi/device_handler/Kconfig | 41 + drivers/scsi/device_handler/Makefile | 8 + drivers/scsi/device_handler/scsi_dh_alua.c | 1315 ++ drivers/scsi/device_handler/scsi_dh_emc.c | 548 + drivers/scsi/device_handler/scsi_dh_hp_sw.c | 267 + drivers/scsi/device_handler/scsi_dh_rdac.c | 837 + drivers/scsi/dmx3191d.c | 148 + drivers/scsi/elx/Kconfig | 9 + drivers/scsi/elx/Makefile | 18 + drivers/scsi/elx/efct/efct_driver.c | 782 + drivers/scsi/elx/efct/efct_driver.h | 108 + drivers/scsi/elx/efct/efct_hw.c | 3580 ++++ drivers/scsi/elx/efct/efct_hw.h | 764 + drivers/scsi/elx/efct/efct_hw_queues.c | 677 + drivers/scsi/elx/efct/efct_io.c | 190 + drivers/scsi/elx/efct/efct_io.h | 174 + drivers/scsi/elx/efct/efct_lio.c | 1675 ++ drivers/scsi/elx/efct/efct_lio.h | 189 + drivers/scsi/elx/efct/efct_scsi.c | 1157 ++ drivers/scsi/elx/efct/efct_scsi.h | 203 + drivers/scsi/elx/efct/efct_unsol.c | 492 + drivers/scsi/elx/efct/efct_unsol.h | 17 + drivers/scsi/elx/efct/efct_xport.c | 1111 + drivers/scsi/elx/efct/efct_xport.h | 186 + drivers/scsi/elx/include/efc_common.h | 37 + drivers/scsi/elx/libefc/efc.h | 52 + drivers/scsi/elx/libefc/efc_cmds.c | 782 + drivers/scsi/elx/libefc/efc_cmds.h | 35 + drivers/scsi/elx/libefc/efc_device.c | 1602 ++ drivers/scsi/elx/libefc/efc_device.h | 72 + drivers/scsi/elx/libefc/efc_domain.c | 1088 + drivers/scsi/elx/libefc/efc_domain.h | 54 + drivers/scsi/elx/libefc/efc_els.c | 1094 + drivers/scsi/elx/libefc/efc_els.h | 107 + drivers/scsi/elx/libefc/efc_fabric.c | 1563 ++ drivers/scsi/elx/libefc/efc_fabric.h | 116 + drivers/scsi/elx/libefc/efc_node.c | 1102 + drivers/scsi/elx/libefc/efc_node.h | 191 + drivers/scsi/elx/libefc/efc_nport.c | 777 + drivers/scsi/elx/libefc/efc_nport.h | 50 + drivers/scsi/elx/libefc/efc_sm.c | 54 + drivers/scsi/elx/libefc/efc_sm.h | 197 + drivers/scsi/elx/libefc/efclib.c | 81 + drivers/scsi/elx/libefc/efclib.h | 623 + drivers/scsi/elx/libefc_sli/sli4.c | 5155 +++++ drivers/scsi/elx/libefc_sli/sli4.h | 4132 ++++ drivers/scsi/esas2r/Kconfig | 6 + drivers/scsi/esas2r/Makefile | 6 + drivers/scsi/esas2r/atioctl.h | 1255 ++ drivers/scsi/esas2r/atvda.h | 1319 ++ drivers/scsi/esas2r/esas2r.h | 1426 ++ drivers/scsi/esas2r/esas2r_disc.c | 1185 ++ drivers/scsi/esas2r/esas2r_flash.c | 1522 ++ drivers/scsi/esas2r/esas2r_init.c | 1699 ++ drivers/scsi/esas2r/esas2r_int.c | 944 + drivers/scsi/esas2r/esas2r_io.c | 877 + drivers/scsi/esas2r/esas2r_ioctl.c | 2087 ++ drivers/scsi/esas2r/esas2r_log.c | 252 + drivers/scsi/esas2r/esas2r_log.h | 118 + drivers/scsi/esas2r/esas2r_main.c | 1912 ++ drivers/scsi/esas2r/esas2r_targdb.c | 306 + drivers/scsi/esas2r/esas2r_vda.c | 524 + drivers/scsi/esp_scsi.c | 2909 +++ drivers/scsi/esp_scsi.h | 585 + drivers/scsi/fcoe/Makefile | 5 + drivers/scsi/fcoe/fcoe.c | 2824 +++ drivers/scsi/fcoe/fcoe.h | 93 + drivers/scsi/fcoe/fcoe_ctlr.c | 3258 +++ drivers/scsi/fcoe/fcoe_sysfs.c | 1066 + drivers/scsi/fcoe/fcoe_transport.c | 1057 + drivers/scsi/fcoe/libfcoe.h | 36 + drivers/scsi/fdomain.c | 607 + drivers/scsi/fdomain.h | 114 + drivers/scsi/fdomain_isa.c | 219 + drivers/scsi/fdomain_pci.c | 68 + drivers/scsi/fnic/Makefile | 18 + drivers/scsi/fnic/cq_desc.h | 66 + drivers/scsi/fnic/cq_enet_desc.h | 155 + drivers/scsi/fnic/cq_exch_desc.h | 170 + drivers/scsi/fnic/fcpio.h | 768 + drivers/scsi/fnic/fnic.h | 386 + drivers/scsi/fnic/fnic_attrs.c | 53 + drivers/scsi/fnic/fnic_debugfs.c | 718 + drivers/scsi/fnic/fnic_fcs.c | 1397 ++ drivers/scsi/fnic/fnic_fip.h | 48 + drivers/scsi/fnic/fnic_io.h | 69 + drivers/scsi/fnic/fnic_isr.c | 322 + drivers/scsi/fnic/fnic_main.c | 1174 ++ drivers/scsi/fnic/fnic_res.c | 431 + drivers/scsi/fnic/fnic_res.h | 237 + drivers/scsi/fnic/fnic_scsi.c | 2720 +++ drivers/scsi/fnic/fnic_stats.h | 129 + drivers/scsi/fnic/fnic_trace.c | 826 + drivers/scsi/fnic/fnic_trace.h | 115 + drivers/scsi/fnic/rq_enet_desc.h | 46 + drivers/scsi/fnic/vnic_cq.c | 73 + drivers/scsi/fnic/vnic_cq.h | 109 + drivers/scsi/fnic/vnic_cq_copy.h | 50 + drivers/scsi/fnic/vnic_dev.c | 943 + drivers/scsi/fnic/vnic_dev.h | 153 + drivers/scsi/fnic/vnic_devcmd.h | 492 + drivers/scsi/fnic/vnic_intr.c | 48 + drivers/scsi/fnic/vnic_intr.h | 106 + drivers/scsi/fnic/vnic_nic.h | 57 + drivers/scsi/fnic/vnic_resource.h | 56 + drivers/scsi/fnic/vnic_rq.c | 181 + drivers/scsi/fnic/vnic_rq.h | 223 + drivers/scsi/fnic/vnic_scsi.h | 88 + drivers/scsi/fnic/vnic_stats.h | 56 + drivers/scsi/fnic/vnic_wq.c | 234 + drivers/scsi/fnic/vnic_wq.h | 171 + drivers/scsi/fnic/vnic_wq_copy.c | 98 + drivers/scsi/fnic/vnic_wq_copy.h | 116 + drivers/scsi/fnic/wq_enet_desc.h | 84 + drivers/scsi/g_NCR5380.c | 818 + drivers/scsi/gvp11.c | 476 + drivers/scsi/gvp11.h | 53 + drivers/scsi/hisi_sas/Kconfig | 26 + drivers/scsi/hisi_sas/Makefile | 4 + drivers/scsi/hisi_sas/hisi_sas.h | 673 + drivers/scsi/hisi_sas/hisi_sas_main.c | 2626 +++ drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 1816 ++ drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 3657 ++++ drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 5298 +++++ drivers/scsi/hosts.c | 744 + drivers/scsi/hpsa.c | 10022 +++++++++ drivers/scsi/hpsa.h | 673 + drivers/scsi/hpsa_cmd.h | 888 + drivers/scsi/hptiop.c | 1699 ++ drivers/scsi/hptiop.h | 374 + drivers/scsi/ibmvscsi/Makefile | 3 + drivers/scsi/ibmvscsi/ibmvfc.c | 6503 ++++++ drivers/scsi/ibmvscsi/ibmvfc.h | 938 + drivers/scsi/ibmvscsi/ibmvscsi.c | 2434 +++ drivers/scsi/ibmvscsi/ibmvscsi.h | 104 + drivers/scsi/ibmvscsi_tgt/Makefile | 4 + drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 4076 ++++ drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h | 362 + drivers/scsi/ibmvscsi_tgt/libsrp.c | 418 + drivers/scsi/ibmvscsi_tgt/libsrp.h | 127 + drivers/scsi/imm.c | 1288 ++ drivers/scsi/imm.h | 149 + drivers/scsi/initio.c | 2965 +++ drivers/scsi/initio.h | 651 + drivers/scsi/ipr.c | 10121 +++++++++ drivers/scsi/ipr.h | 1929 ++ drivers/scsi/ips.c | 7099 +++++++ drivers/scsi/ips.h | 1213 ++ drivers/scsi/isci/Makefile | 9 + drivers/scsi/isci/host.c | 2804 +++ drivers/scsi/isci/host.h | 516 + drivers/scsi/isci/init.c | 777 + drivers/scsi/isci/isci.h | 537 + drivers/scsi/isci/phy.c | 1482 ++ drivers/scsi/isci/phy.h | 459 + drivers/scsi/isci/port.c | 1773 ++ drivers/scsi/isci/port.h | 283 + drivers/scsi/isci/port_config.c | 760 + drivers/scsi/isci/probe_roms.c | 231 + drivers/scsi/isci/probe_roms.h | 330 + drivers/scsi/isci/registers.h | 1863 ++ drivers/scsi/isci/remote_device.c | 1727 ++ drivers/scsi/isci/remote_device.h | 382 + drivers/scsi/isci/remote_node_context.c | 805 + drivers/scsi/isci/remote_node_context.h | 236 + drivers/scsi/isci/remote_node_table.c | 598 + drivers/scsi/isci/remote_node_table.h | 188 + drivers/scsi/isci/request.c | 3519 ++++ drivers/scsi/isci/request.h | 313 + drivers/scsi/isci/sas.h | 217 + drivers/scsi/isci/scu_completion_codes.h | 285 + drivers/scsi/isci/scu_event_codes.h | 336 + drivers/scsi/isci/scu_remote_node_context.h | 229 + drivers/scsi/isci/scu_task_context.h | 965 + drivers/scsi/isci/task.c | 781 + drivers/scsi/isci/task.h | 181 + drivers/scsi/isci/unsolicited_frame_control.c | 211 + drivers/scsi/isci/unsolicited_frame_control.h | 282 + drivers/scsi/iscsi_boot_sysfs.c | 554 + drivers/scsi/iscsi_tcp.c | 1151 ++ drivers/scsi/iscsi_tcp.h | 62 + drivers/scsi/jazz_esp.c | 211 + drivers/scsi/lasi700.c | 171 + drivers/scsi/libfc/Makefile | 15 + drivers/scsi/libfc/fc_disc.c | 746 + drivers/scsi/libfc/fc_elsct.c | 140 + drivers/scsi/libfc/fc_encode.h | 951 + drivers/scsi/libfc/fc_exch.c | 2712 +++ drivers/scsi/libfc/fc_fcp.c | 2313 +++ drivers/scsi/libfc/fc_frame.c | 79 + drivers/scsi/libfc/fc_libfc.c | 319 + drivers/scsi/libfc/fc_libfc.h | 127 + drivers/scsi/libfc/fc_lport.c | 2200 ++ drivers/scsi/libfc/fc_npiv.c | 147 + drivers/scsi/libfc/fc_rport.c | 2292 +++ drivers/scsi/libiscsi.c | 3934 ++++ drivers/scsi/libiscsi_tcp.c | 1250 ++ drivers/scsi/libsas/Kconfig | 33 + drivers/scsi/libsas/Makefile | 21 + drivers/scsi/libsas/sas_ata.c | 966 + drivers/scsi/libsas/sas_discover.c | 607 + drivers/scsi/libsas/sas_event.c | 216 + drivers/scsi/libsas/sas_expander.c | 2139 ++ drivers/scsi/libsas/sas_host_smp.c | 354 + drivers/scsi/libsas/sas_init.c | 711 + drivers/scsi/libsas/sas_internal.h | 214 + drivers/scsi/libsas/sas_phy.c | 162 + drivers/scsi/libsas/sas_port.c | 371 + drivers/scsi/libsas/sas_scsi_host.c | 1242 ++ drivers/scsi/libsas/sas_task.c | 42 + drivers/scsi/lpfc/Makefile | 36 + drivers/scsi/lpfc/lpfc.h | 1873 ++ drivers/scsi/lpfc/lpfc_attr.c | 7415 +++++++ drivers/scsi/lpfc/lpfc_attr.h | 128 + drivers/scsi/lpfc/lpfc_bsg.c | 5690 ++++++ drivers/scsi/lpfc/lpfc_bsg.h | 387 + drivers/scsi/lpfc/lpfc_compat.h | 98 + drivers/scsi/lpfc/lpfc_crtn.h | 694 + drivers/scsi/lpfc/lpfc_ct.c | 3825 ++++ drivers/scsi/lpfc/lpfc_debugfs.c | 6692 ++++++ drivers/scsi/lpfc/lpfc_debugfs.h | 699 + drivers/scsi/lpfc/lpfc_disc.h | 293 + drivers/scsi/lpfc/lpfc_els.c | 12467 +++++++++++ drivers/scsi/lpfc/lpfc_hbadisc.c | 7319 +++++++ drivers/scsi/lpfc/lpfc_hw.h | 4440 ++++ drivers/scsi/lpfc/lpfc_hw4.h | 5068 +++++ drivers/scsi/lpfc/lpfc_ids.h | 156 + drivers/scsi/lpfc/lpfc_init.c | 15880 ++++++++++++++ drivers/scsi/lpfc/lpfc_logmsg.h | 99 + drivers/scsi/lpfc/lpfc_mbox.c | 2671 +++ drivers/scsi/lpfc/lpfc_mem.c | 756 + drivers/scsi/lpfc/lpfc_nl.h | 181 + drivers/scsi/lpfc/lpfc_nportdisc.c | 3153 +++ drivers/scsi/lpfc/lpfc_nvme.c | 2846 +++ drivers/scsi/lpfc/lpfc_nvme.h | 253 + drivers/scsi/lpfc/lpfc_nvmet.c | 3642 ++++ drivers/scsi/lpfc/lpfc_scsi.c | 6801 ++++++ drivers/scsi/lpfc/lpfc_scsi.h | 149 + drivers/scsi/lpfc/lpfc_sli.c | 22736 +++++++++++++++++++++ drivers/scsi/lpfc/lpfc_sli.h | 483 + drivers/scsi/lpfc/lpfc_sli4.h | 1199 ++ drivers/scsi/lpfc/lpfc_version.h | 37 + drivers/scsi/lpfc/lpfc_vmid.c | 325 + drivers/scsi/lpfc/lpfc_vport.c | 801 + drivers/scsi/lpfc/lpfc_vport.h | 118 + drivers/scsi/mac53c94.c | 570 + drivers/scsi/mac53c94.h | 226 + drivers/scsi/mac_esp.c | 448 + drivers/scsi/mac_scsi.c | 548 + drivers/scsi/megaraid.c | 4636 +++++ drivers/scsi/megaraid.h | 1020 + drivers/scsi/megaraid/Kconfig.megaraid | 87 + drivers/scsi/megaraid/Makefile | 6 + drivers/scsi/megaraid/mbox_defs.h | 783 + drivers/scsi/megaraid/mega_common.h | 284 + drivers/scsi/megaraid/megaraid_ioctl.h | 302 + drivers/scsi/megaraid/megaraid_mbox.c | 4060 ++++ drivers/scsi/megaraid/megaraid_mbox.h | 232 + drivers/scsi/megaraid/megaraid_mm.c | 1246 ++ drivers/scsi/megaraid/megaraid_mm.h | 97 + drivers/scsi/megaraid/megaraid_sas.h | 2764 +++ drivers/scsi/megaraid/megaraid_sas_base.c | 9129 +++++++++ drivers/scsi/megaraid/megaraid_sas_debugfs.c | 179 + drivers/scsi/megaraid/megaraid_sas_fp.c | 1425 ++ drivers/scsi/megaraid/megaraid_sas_fusion.c | 5375 +++++ drivers/scsi/megaraid/megaraid_sas_fusion.h | 1396 ++ drivers/scsi/mesh.c | 2073 ++ drivers/scsi/mesh.h | 139 + drivers/scsi/mpi3mr/Kconfig | 9 + drivers/scsi/mpi3mr/Makefile | 6 + drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h | 2500 +++ drivers/scsi/mpi3mr/mpi/mpi30_image.h | 275 + drivers/scsi/mpi3mr/mpi/mpi30_init.h | 138 + drivers/scsi/mpi3mr/mpi/mpi30_ioc.h | 1064 + drivers/scsi/mpi3mr/mpi/mpi30_pci.h | 18 + drivers/scsi/mpi3mr/mpi/mpi30_sas.h | 46 + drivers/scsi/mpi3mr/mpi/mpi30_transport.h | 470 + drivers/scsi/mpi3mr/mpi3mr.h | 1420 ++ drivers/scsi/mpi3mr/mpi3mr_app.c | 1871 ++ drivers/scsi/mpi3mr/mpi3mr_debug.h | 197 + drivers/scsi/mpi3mr/mpi3mr_fw.c | 5832 ++++++ drivers/scsi/mpi3mr/mpi3mr_os.c | 5502 +++++ drivers/scsi/mpi3mr/mpi3mr_transport.c | 3291 +++ drivers/scsi/mpt3sas/Kconfig | 83 + drivers/scsi/mpt3sas/Makefile | 11 + drivers/scsi/mpt3sas/mpi/mpi2.h | 1300 ++ drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h | 4093 ++++ drivers/scsi/mpt3sas/mpi/mpi2_image.h | 516 + drivers/scsi/mpt3sas/mpi/mpi2_init.h | 591 + drivers/scsi/mpt3sas/mpi/mpi2_ioc.h | 1811 ++ drivers/scsi/mpt3sas/mpi/mpi2_pci.h | 113 + drivers/scsi/mpt3sas/mpi/mpi2_raid.h | 356 + drivers/scsi/mpt3sas/mpi/mpi2_sas.h | 304 + drivers/scsi/mpt3sas/mpi/mpi2_tool.h | 565 + drivers/scsi/mpt3sas/mpi/mpi2_type.h | 58 + drivers/scsi/mpt3sas/mpt3sas_base.c | 8959 ++++++++ drivers/scsi/mpt3sas/mpt3sas_base.h | 2072 ++ drivers/scsi/mpt3sas/mpt3sas_config.c | 2794 +++ drivers/scsi/mpt3sas/mpt3sas_ctl.c | 4189 ++++ drivers/scsi/mpt3sas/mpt3sas_ctl.h | 451 + drivers/scsi/mpt3sas/mpt3sas_debug.h | 206 + drivers/scsi/mpt3sas/mpt3sas_debugfs.c | 157 + drivers/scsi/mpt3sas/mpt3sas_scsih.c | 12942 ++++++++++++ drivers/scsi/mpt3sas/mpt3sas_transport.c | 2200 ++ drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c | 473 + drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h | 194 + drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h | 94 + drivers/scsi/mpt3sas/mpt3sas_warpdrive.c | 299 + drivers/scsi/mvme147.c | 162 + drivers/scsi/mvme147.h | 25 + drivers/scsi/mvme16x_scsi.c | 160 + drivers/scsi/mvsas/Kconfig | 32 + drivers/scsi/mvsas/Makefile | 16 + drivers/scsi/mvsas/mv_64xx.c | 814 + drivers/scsi/mvsas/mv_64xx.h | 136 + drivers/scsi/mvsas/mv_94xx.c | 1182 ++ drivers/scsi/mvsas/mv_94xx.h | 333 + drivers/scsi/mvsas/mv_chips.h | 254 + drivers/scsi/mvsas/mv_defs.h | 490 + drivers/scsi/mvsas/mv_init.c | 791 + drivers/scsi/mvsas/mv_sas.c | 1933 ++ drivers/scsi/mvsas/mv_sas.h | 456 + drivers/scsi/mvumi.c | 2632 +++ drivers/scsi/mvumi.h | 566 + drivers/scsi/myrb.c | 3562 ++++ drivers/scsi/myrb.h | 958 + drivers/scsi/myrs.c | 3168 +++ drivers/scsi/myrs.h | 1134 + drivers/scsi/ncr53c8xx.c | 8410 ++++++++ drivers/scsi/ncr53c8xx.h | 1303 ++ drivers/scsi/nsp32.c | 3404 +++ drivers/scsi/nsp32.h | 617 + drivers/scsi/nsp32_debug.c | 263 + drivers/scsi/nsp32_io.h | 259 + drivers/scsi/pcmcia/Kconfig | 89 + drivers/scsi/pcmcia/Makefile | 13 + drivers/scsi/pcmcia/aha152x_core.c | 3 + drivers/scsi/pcmcia/aha152x_stub.c | 226 + drivers/scsi/pcmcia/fdomain_cs.c | 97 + drivers/scsi/pcmcia/nsp_cs.c | 1758 ++ drivers/scsi/pcmcia/nsp_cs.h | 377 + drivers/scsi/pcmcia/nsp_debug.c | 215 + drivers/scsi/pcmcia/nsp_io.h | 274 + drivers/scsi/pcmcia/nsp_message.c | 78 + drivers/scsi/pcmcia/qlogic_stub.c | 313 + drivers/scsi/pcmcia/sym53c500_cs.c | 882 + drivers/scsi/pm8001/Makefile | 17 + drivers/scsi/pm8001/pm8001_chips.h | 89 + drivers/scsi/pm8001/pm8001_ctl.c | 1041 + drivers/scsi/pm8001/pm8001_ctl.h | 68 + drivers/scsi/pm8001/pm8001_defs.h | 143 + drivers/scsi/pm8001/pm8001_hwi.c | 4838 +++++ drivers/scsi/pm8001/pm8001_hwi.h | 1030 + drivers/scsi/pm8001/pm8001_init.c | 1569 ++ drivers/scsi/pm8001/pm8001_sas.c | 1195 ++ drivers/scsi/pm8001/pm8001_sas.h | 794 + drivers/scsi/pm8001/pm80xx_hwi.c | 4940 +++++ drivers/scsi/pm8001/pm80xx_hwi.h | 1665 ++ drivers/scsi/pm8001/pm80xx_tracepoints.c | 10 + drivers/scsi/pm8001/pm80xx_tracepoints.h | 113 + drivers/scsi/pmcraid.c | 5394 +++++ drivers/scsi/pmcraid.h | 1047 + drivers/scsi/ppa.c | 1164 ++ drivers/scsi/ppa.h | 147 + drivers/scsi/ps3rom.c | 437 + drivers/scsi/qedf/Kconfig | 12 + drivers/scsi/qedf/Makefile | 6 + drivers/scsi/qedf/drv_fcoe_fw_funcs.c | 196 + drivers/scsi/qedf/drv_fcoe_fw_funcs.h | 90 + drivers/scsi/qedf/drv_scsi_fw_funcs.c | 41 + drivers/scsi/qedf/drv_scsi_fw_funcs.h | 82 + drivers/scsi/qedf/qedf.h | 603 + drivers/scsi/qedf/qedf_attr.c | 186 + drivers/scsi/qedf/qedf_dbg.c | 175 + drivers/scsi/qedf/qedf_dbg.h | 158 + drivers/scsi/qedf/qedf_debugfs.c | 494 + drivers/scsi/qedf/qedf_els.c | 1066 + drivers/scsi/qedf/qedf_fip.c | 301 + drivers/scsi/qedf/qedf_hsi.h | 351 + drivers/scsi/qedf/qedf_io.c | 2630 +++ drivers/scsi/qedf/qedf_main.c | 4195 ++++ drivers/scsi/qedf/qedf_version.h | 12 + drivers/scsi/qedi/Kconfig | 13 + drivers/scsi/qedi/Makefile | 6 + drivers/scsi/qedi/qedi.h | 393 + drivers/scsi/qedi/qedi_dbg.c | 127 + drivers/scsi/qedi/qedi_dbg.h | 138 + drivers/scsi/qedi/qedi_debugfs.c | 228 + drivers/scsi/qedi/qedi_fw.c | 2158 ++ drivers/scsi/qedi/qedi_fw_api.c | 802 + drivers/scsi/qedi/qedi_fw_iscsi.h | 114 + drivers/scsi/qedi/qedi_fw_scsi.h | 52 + drivers/scsi/qedi/qedi_gbl.h | 74 + drivers/scsi/qedi/qedi_hsi.h | 49 + drivers/scsi/qedi/qedi_iscsi.c | 1710 ++ drivers/scsi/qedi/qedi_iscsi.h | 239 + drivers/scsi/qedi/qedi_main.c | 2964 +++ drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h | 207 + drivers/scsi/qedi/qedi_sysfs.c | 58 + drivers/scsi/qedi/qedi_version.h | 11 + drivers/scsi/qla1280.c | 4388 ++++ drivers/scsi/qla1280.h | 1071 + drivers/scsi/qla2xxx/Kconfig | 50 + drivers/scsi/qla2xxx/Makefile | 8 + drivers/scsi/qla2xxx/qla_attr.c | 3384 +++ drivers/scsi/qla2xxx/qla_bsg.c | 3170 +++ drivers/scsi/qla2xxx/qla_bsg.h | 323 + drivers/scsi/qla2xxx/qla_dbg.c | 2800 +++ drivers/scsi/qla2xxx/qla_dbg.h | 431 + drivers/scsi/qla2xxx/qla_def.h | 5563 +++++ drivers/scsi/qla2xxx/qla_devtbl.h | 100 + drivers/scsi/qla2xxx/qla_dfs.c | 780 + drivers/scsi/qla2xxx/qla_dsd.h | 32 + drivers/scsi/qla2xxx/qla_edif.c | 3714 ++++ drivers/scsi/qla2xxx/qla_edif.h | 150 + drivers/scsi/qla2xxx/qla_edif_bsg.h | 271 + drivers/scsi/qla2xxx/qla_fw.h | 2287 +++ drivers/scsi/qla2xxx/qla_gbl.h | 1021 + drivers/scsi/qla2xxx/qla_gs.c | 4022 ++++ drivers/scsi/qla2xxx/qla_init.c | 10029 +++++++++ drivers/scsi/qla2xxx/qla_inline.h | 633 + drivers/scsi/qla2xxx/qla_iocb.c | 4471 ++++ drivers/scsi/qla2xxx/qla_isr.c | 4820 +++++ drivers/scsi/qla2xxx/qla_mbx.c | 7109 +++++++ drivers/scsi/qla2xxx/qla_mid.c | 1292 ++ drivers/scsi/qla2xxx/qla_mr.c | 3407 +++ drivers/scsi/qla2xxx/qla_mr.h | 529 + drivers/scsi/qla2xxx/qla_nvme.c | 1326 ++ drivers/scsi/qla2xxx/qla_nvme.h | 150 + drivers/scsi/qla2xxx/qla_nx.c | 4477 ++++ drivers/scsi/qla2xxx/qla_nx.h | 1192 ++ drivers/scsi/qla2xxx/qla_nx2.c | 4075 ++++ drivers/scsi/qla2xxx/qla_nx2.h | 579 + drivers/scsi/qla2xxx/qla_os.c | 8379 ++++++++ drivers/scsi/qla2xxx/qla_settings.h | 11 + drivers/scsi/qla2xxx/qla_sup.c | 3640 ++++ drivers/scsi/qla2xxx/qla_target.c | 7323 +++++++ drivers/scsi/qla2xxx/qla_target.h | 1093 + drivers/scsi/qla2xxx/qla_tmpl.c | 1100 + drivers/scsi/qla2xxx/qla_tmpl.h | 245 + drivers/scsi/qla2xxx/qla_version.h | 14 + drivers/scsi/qla2xxx/tcm_qla2xxx.c | 1938 ++ drivers/scsi/qla2xxx/tcm_qla2xxx.h | 80 + drivers/scsi/qla4xxx/Kconfig | 9 + drivers/scsi/qla4xxx/Makefile | 6 + drivers/scsi/qla4xxx/ql4_83xx.c | 1584 ++ drivers/scsi/qla4xxx/ql4_83xx.h | 353 + drivers/scsi/qla4xxx/ql4_attr.c | 359 + drivers/scsi/qla4xxx/ql4_bsg.c | 872 + drivers/scsi/qla4xxx/ql4_bsg.h | 31 + drivers/scsi/qla4xxx/ql4_dbg.c | 161 + drivers/scsi/qla4xxx/ql4_dbg.h | 61 + drivers/scsi/qla4xxx/ql4_def.h | 1084 + drivers/scsi/qla4xxx/ql4_fw.h | 1441 ++ drivers/scsi/qla4xxx/ql4_glbl.h | 291 + drivers/scsi/qla4xxx/ql4_init.c | 1265 ++ drivers/scsi/qla4xxx/ql4_inline.h | 95 + drivers/scsi/qla4xxx/ql4_iocb.c | 541 + drivers/scsi/qla4xxx/ql4_isr.c | 1621 ++ drivers/scsi/qla4xxx/ql4_mbx.c | 2451 +++ drivers/scsi/qla4xxx/ql4_nvram.c | 255 + drivers/scsi/qla4xxx/ql4_nvram.h | 253 + drivers/scsi/qla4xxx/ql4_nx.c | 4209 ++++ drivers/scsi/qla4xxx/ql4_nx.h | 1007 + drivers/scsi/qla4xxx/ql4_os.c | 9956 +++++++++ drivers/scsi/qla4xxx/ql4_version.h | 7 + drivers/scsi/qlogicfas.c | 229 + drivers/scsi/qlogicfas408.c | 644 + drivers/scsi/qlogicfas408.h | 119 + drivers/scsi/qlogicpti.c | 1476 ++ drivers/scsi/qlogicpti.h | 507 + drivers/scsi/raid_class.c | 268 + drivers/scsi/script_asm.pl | 971 + drivers/scsi/scsi.c | 1015 + drivers/scsi/scsi_bsg.c | 103 + drivers/scsi/scsi_common.c | 400 + drivers/scsi/scsi_debug.c | 7856 +++++++ drivers/scsi/scsi_debugfs.c | 69 + drivers/scsi/scsi_debugfs.h | 5 + drivers/scsi/scsi_devinfo.c | 886 + drivers/scsi/scsi_dh.c | 373 + drivers/scsi/scsi_error.c | 2576 +++ drivers/scsi/scsi_ioctl.c | 960 + drivers/scsi/scsi_lib.c | 3337 +++ drivers/scsi/scsi_lib_dma.c | 52 + drivers/scsi/scsi_logging.c | 439 + drivers/scsi/scsi_logging.h | 85 + drivers/scsi/scsi_netlink.c | 144 + drivers/scsi/scsi_pm.c | 273 + drivers/scsi/scsi_priv.h | 207 + drivers/scsi/scsi_proc.c | 576 + drivers/scsi/scsi_sas_internal.h | 43 + drivers/scsi/scsi_scan.c | 2009 ++ drivers/scsi/scsi_sysctl.c | 37 + drivers/scsi/scsi_sysfs.c | 1689 ++ drivers/scsi/scsi_trace.c | 391 + drivers/scsi/scsi_transport_api.h | 7 + drivers/scsi/scsi_transport_fc.c | 4356 ++++ drivers/scsi/scsi_transport_iscsi.c | 5060 +++++ drivers/scsi/scsi_transport_sas.c | 1930 ++ drivers/scsi/scsi_transport_spi.c | 1640 ++ drivers/scsi/scsi_transport_srp.c | 900 + drivers/scsi/scsicam.c | 257 + drivers/scsi/sd.c | 4106 ++++ drivers/scsi/sd.h | 299 + drivers/scsi/sd_dif.c | 83 + drivers/scsi/sd_trace.h | 84 + drivers/scsi/sd_zbc.c | 975 + drivers/scsi/sense_codes.h | 879 + drivers/scsi/ses.c | 923 + drivers/scsi/sg.c | 2630 +++ drivers/scsi/sgiwd93.c | 332 + drivers/scsi/sim710.c | 240 + drivers/scsi/smartpqi/Kconfig | 56 + drivers/scsi/smartpqi/Makefile | 3 + drivers/scsi/smartpqi/smartpqi.h | 1704 ++ drivers/scsi/smartpqi/smartpqi_init.c | 10754 ++++++++++ drivers/scsi/smartpqi/smartpqi_sas_transport.c | 574 + drivers/scsi/smartpqi/smartpqi_sis.c | 502 + drivers/scsi/smartpqi/smartpqi_sis.h | 37 + drivers/scsi/sni_53c710.c | 129 + drivers/scsi/snic/Makefile | 18 + drivers/scsi/snic/cq_desc.h | 63 + drivers/scsi/snic/cq_enet_desc.h | 24 + drivers/scsi/snic/snic.h | 402 + drivers/scsi/snic/snic_attrs.c | 72 + drivers/scsi/snic/snic_ctl.c | 257 + drivers/scsi/snic/snic_debugfs.c | 442 + drivers/scsi/snic/snic_disc.c | 554 + drivers/scsi/snic/snic_disc.h | 110 + drivers/scsi/snic/snic_fwint.h | 513 + drivers/scsi/snic/snic_io.c | 555 + drivers/scsi/snic/snic_io.h | 104 + drivers/scsi/snic/snic_isr.c | 180 + drivers/scsi/snic/snic_main.c | 998 + drivers/scsi/snic/snic_res.c | 281 + drivers/scsi/snic/snic_res.h | 83 + drivers/scsi/snic/snic_scsi.c | 2643 +++ drivers/scsi/snic/snic_stats.h | 115 + drivers/scsi/snic/snic_trc.c | 156 + drivers/scsi/snic/snic_trc.h | 104 + drivers/scsi/snic/vnic_cq.c | 66 + drivers/scsi/snic/vnic_cq.h | 96 + drivers/scsi/snic/vnic_cq_fw.h | 48 + drivers/scsi/snic/vnic_dev.c | 749 + drivers/scsi/snic/vnic_dev.h | 96 + drivers/scsi/snic/vnic_devcmd.h | 256 + drivers/scsi/snic/vnic_intr.c | 45 + drivers/scsi/snic/vnic_intr.h | 91 + drivers/scsi/snic/vnic_resource.h | 54 + drivers/scsi/snic/vnic_snic.h | 40 + drivers/scsi/snic/vnic_stats.h | 54 + drivers/scsi/snic/vnic_wq.c | 223 + drivers/scsi/snic/vnic_wq.h | 156 + drivers/scsi/snic/wq_enet_desc.h | 82 + drivers/scsi/sr.c | 1008 + drivers/scsi/sr.h | 78 + drivers/scsi/sr_ioctl.c | 597 + drivers/scsi/sr_vendor.c | 340 + drivers/scsi/st.c | 4931 +++++ drivers/scsi/st.h | 245 + drivers/scsi/st_options.h | 105 + drivers/scsi/stex.c | 2030 ++ drivers/scsi/storvsc_drv.c | 2234 ++ drivers/scsi/sun3_scsi.c | 670 + drivers/scsi/sun3_scsi_vme.c | 3 + drivers/scsi/sun3x_esp.c | 281 + drivers/scsi/sun_esp.c | 615 + drivers/scsi/sym53c8xx_2/Makefile | 5 + drivers/scsi/sym53c8xx_2/sym53c8xx.h | 202 + drivers/scsi/sym53c8xx_2/sym_defs.h | 779 + drivers/scsi/sym53c8xx_2/sym_fw.c | 537 + drivers/scsi/sym53c8xx_2/sym_fw.h | 192 + drivers/scsi/sym53c8xx_2/sym_fw1.h | 1777 ++ drivers/scsi/sym53c8xx_2/sym_fw2.h | 1862 ++ drivers/scsi/sym53c8xx_2/sym_glue.c | 2057 ++ drivers/scsi/sym53c8xx_2/sym_glue.h | 257 + drivers/scsi/sym53c8xx_2/sym_hipd.c | 5839 ++++++ drivers/scsi/sym53c8xx_2/sym_hipd.h | 1213 ++ drivers/scsi/sym53c8xx_2/sym_malloc.c | 365 + drivers/scsi/sym53c8xx_2/sym_misc.h | 177 + drivers/scsi/sym53c8xx_2/sym_nvram.c | 767 + drivers/scsi/sym53c8xx_2/sym_nvram.h | 201 + drivers/scsi/virtio_scsi.c | 1042 + drivers/scsi/vmw_pvscsi.c | 1621 ++ drivers/scsi/vmw_pvscsi.h | 461 + drivers/scsi/wd33c93.c | 2147 ++ drivers/scsi/wd33c93.h | 341 + drivers/scsi/wd719x.c | 995 + drivers/scsi/wd719x.h | 248 + drivers/scsi/xen-scsifront.c | 1237 ++ drivers/scsi/zalon.c | 205 + drivers/scsi/zorro7xx.c | 187 + drivers/scsi/zorro_esp.c | 960 + 862 files changed, 1039631 insertions(+) create mode 100644 drivers/scsi/.gitignore create mode 100644 drivers/scsi/3w-9xxx.c create mode 100644 drivers/scsi/3w-9xxx.h create mode 100644 drivers/scsi/3w-sas.c create mode 100644 drivers/scsi/3w-sas.h create mode 100644 drivers/scsi/3w-xxxx.c create mode 100644 drivers/scsi/3w-xxxx.h create mode 100644 drivers/scsi/53c700.c create mode 100644 drivers/scsi/53c700.h create mode 100644 drivers/scsi/53c700.scr create mode 100644 drivers/scsi/53c700_d.h_shipped create mode 100644 drivers/scsi/BusLogic.c create mode 100644 drivers/scsi/BusLogic.h create mode 100644 drivers/scsi/FlashPoint.c create mode 100644 drivers/scsi/Kconfig create mode 100644 drivers/scsi/Makefile create mode 100644 drivers/scsi/NCR5380.c create mode 100644 drivers/scsi/NCR5380.h create mode 100644 drivers/scsi/a100u2w.c create mode 100644 drivers/scsi/a100u2w.h create mode 100644 drivers/scsi/a2091.c create mode 100644 drivers/scsi/a2091.h create mode 100644 drivers/scsi/a3000.c create mode 100644 drivers/scsi/a3000.h create mode 100644 drivers/scsi/a4000t.c create mode 100644 drivers/scsi/aacraid/Makefile create mode 100644 drivers/scsi/aacraid/aachba.c create mode 100644 drivers/scsi/aacraid/aacraid.h create mode 100644 drivers/scsi/aacraid/commctrl.c create mode 100644 drivers/scsi/aacraid/comminit.c create mode 100644 drivers/scsi/aacraid/commsup.c create mode 100644 drivers/scsi/aacraid/dpcsup.c create mode 100644 drivers/scsi/aacraid/linit.c create mode 100644 drivers/scsi/aacraid/nark.c create mode 100644 drivers/scsi/aacraid/rkt.c create mode 100644 drivers/scsi/aacraid/rx.c create mode 100644 drivers/scsi/aacraid/sa.c create mode 100644 drivers/scsi/aacraid/src.c create mode 100644 drivers/scsi/advansys.c create mode 100644 drivers/scsi/aha152x.c create mode 100644 drivers/scsi/aha152x.h create mode 100644 drivers/scsi/aha1542.c create mode 100644 drivers/scsi/aha1542.h create mode 100644 drivers/scsi/aha1740.c create mode 100644 drivers/scsi/aha1740.h create mode 100644 drivers/scsi/aic7xxx/.gitignore create mode 100644 drivers/scsi/aic7xxx/Kconfig.aic79xx create mode 100644 drivers/scsi/aic7xxx/Kconfig.aic7xxx create mode 100644 drivers/scsi/aic7xxx/Makefile create mode 100644 drivers/scsi/aic7xxx/aic7770.c create mode 100644 drivers/scsi/aic7xxx/aic7770_osm.c create mode 100644 drivers/scsi/aic7xxx/aic79xx.h create mode 100644 drivers/scsi/aic7xxx/aic79xx.reg create mode 100644 drivers/scsi/aic7xxx/aic79xx.seq create mode 100644 drivers/scsi/aic7xxx/aic79xx_core.c create mode 100644 drivers/scsi/aic7xxx/aic79xx_inline.h create mode 100644 drivers/scsi/aic7xxx/aic79xx_osm.c create mode 100644 drivers/scsi/aic7xxx/aic79xx_osm.h create mode 100644 drivers/scsi/aic7xxx/aic79xx_osm_pci.c create mode 100644 drivers/scsi/aic7xxx/aic79xx_pci.c create mode 100644 drivers/scsi/aic7xxx/aic79xx_pci.h create mode 100644 drivers/scsi/aic7xxx/aic79xx_proc.c create mode 100644 drivers/scsi/aic7xxx/aic79xx_reg.h_shipped create mode 100644 drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped create mode 100644 drivers/scsi/aic7xxx/aic79xx_seq.h_shipped create mode 100644 drivers/scsi/aic7xxx/aic7xxx.h create mode 100644 drivers/scsi/aic7xxx/aic7xxx.reg create mode 100644 drivers/scsi/aic7xxx/aic7xxx.seq create mode 100644 drivers/scsi/aic7xxx/aic7xxx_93cx6.c create mode 100644 drivers/scsi/aic7xxx/aic7xxx_93cx6.h create mode 100644 drivers/scsi/aic7xxx/aic7xxx_core.c create mode 100644 drivers/scsi/aic7xxx/aic7xxx_inline.h create mode 100644 drivers/scsi/aic7xxx/aic7xxx_osm.c create mode 100644 drivers/scsi/aic7xxx/aic7xxx_osm.h create mode 100644 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c create mode 100644 drivers/scsi/aic7xxx/aic7xxx_pci.c create mode 100644 drivers/scsi/aic7xxx/aic7xxx_pci.h create mode 100644 drivers/scsi/aic7xxx/aic7xxx_proc.c create mode 100644 drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped create mode 100644 drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped create mode 100644 drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped create mode 100644 drivers/scsi/aic7xxx/aicasm/Makefile create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm.c create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm.h create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm_gram.y create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm_scan.l create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c create mode 100644 drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h create mode 100644 drivers/scsi/aic7xxx/aiclib.h create mode 100644 drivers/scsi/aic7xxx/cam.h create mode 100644 drivers/scsi/aic7xxx/queue.h create mode 100644 drivers/scsi/aic7xxx/scsi_iu.h create mode 100644 drivers/scsi/aic7xxx/scsi_message.h create mode 100644 drivers/scsi/aic94xx/Kconfig create mode 100644 drivers/scsi/aic94xx/Makefile create mode 100644 drivers/scsi/aic94xx/aic94xx.h create mode 100644 drivers/scsi/aic94xx/aic94xx_dev.c create mode 100644 drivers/scsi/aic94xx/aic94xx_dump.c create mode 100644 drivers/scsi/aic94xx/aic94xx_dump.h create mode 100644 drivers/scsi/aic94xx/aic94xx_hwi.c create mode 100644 drivers/scsi/aic94xx/aic94xx_hwi.h create mode 100644 drivers/scsi/aic94xx/aic94xx_init.c create mode 100644 drivers/scsi/aic94xx/aic94xx_reg.c create mode 100644 drivers/scsi/aic94xx/aic94xx_reg.h create mode 100644 drivers/scsi/aic94xx/aic94xx_reg_def.h create mode 100644 drivers/scsi/aic94xx/aic94xx_sas.h create mode 100644 drivers/scsi/aic94xx/aic94xx_scb.c create mode 100644 drivers/scsi/aic94xx/aic94xx_sds.c create mode 100644 drivers/scsi/aic94xx/aic94xx_sds.h create mode 100644 drivers/scsi/aic94xx/aic94xx_seq.c create mode 100644 drivers/scsi/aic94xx/aic94xx_seq.h create mode 100644 drivers/scsi/aic94xx/aic94xx_task.c create mode 100644 drivers/scsi/aic94xx/aic94xx_tmf.c create mode 100644 drivers/scsi/am53c974.c create mode 100644 drivers/scsi/arcmsr/Makefile create mode 100644 drivers/scsi/arcmsr/arcmsr.h create mode 100644 drivers/scsi/arcmsr/arcmsr_attr.c create mode 100644 drivers/scsi/arcmsr/arcmsr_hba.c create mode 100644 drivers/scsi/arm/Kconfig create mode 100644 drivers/scsi/arm/Makefile create mode 100644 drivers/scsi/arm/acornscsi-io.S create mode 100644 drivers/scsi/arm/acornscsi.c create mode 100644 drivers/scsi/arm/acornscsi.h create mode 100644 drivers/scsi/arm/arm_scsi.h create mode 100644 drivers/scsi/arm/arxescsi.c create mode 100644 drivers/scsi/arm/cumana_1.c create mode 100644 drivers/scsi/arm/cumana_2.c create mode 100644 drivers/scsi/arm/eesox.c create mode 100644 drivers/scsi/arm/fas216.c create mode 100644 drivers/scsi/arm/fas216.h create mode 100644 drivers/scsi/arm/msgqueue.c create mode 100644 drivers/scsi/arm/msgqueue.h create mode 100644 drivers/scsi/arm/oak.c create mode 100644 drivers/scsi/arm/powertec.c create mode 100644 drivers/scsi/arm/queue.c create mode 100644 drivers/scsi/arm/queue.h create mode 100644 drivers/scsi/atari_scsi.c create mode 100644 drivers/scsi/atp870u.c create mode 100644 drivers/scsi/atp870u.h create mode 100644 drivers/scsi/be2iscsi/Kconfig create mode 100644 drivers/scsi/be2iscsi/Makefile create mode 100644 drivers/scsi/be2iscsi/be.h create mode 100644 drivers/scsi/be2iscsi/be_cmds.c create mode 100644 drivers/scsi/be2iscsi/be_cmds.h create mode 100644 drivers/scsi/be2iscsi/be_iscsi.c create mode 100644 drivers/scsi/be2iscsi/be_iscsi.h create mode 100644 drivers/scsi/be2iscsi/be_main.c create mode 100644 drivers/scsi/be2iscsi/be_main.h create mode 100644 drivers/scsi/be2iscsi/be_mgmt.c create mode 100644 drivers/scsi/be2iscsi/be_mgmt.h create mode 100644 drivers/scsi/bfa/Makefile create mode 100644 drivers/scsi/bfa/bfa.h create mode 100644 drivers/scsi/bfa/bfa_core.c create mode 100644 drivers/scsi/bfa/bfa_cs.h create mode 100644 drivers/scsi/bfa/bfa_defs.h create mode 100644 drivers/scsi/bfa/bfa_defs_fcs.h create mode 100644 drivers/scsi/bfa/bfa_defs_svc.h create mode 100644 drivers/scsi/bfa/bfa_fc.h create mode 100644 drivers/scsi/bfa/bfa_fcbuild.c create mode 100644 drivers/scsi/bfa/bfa_fcbuild.h create mode 100644 drivers/scsi/bfa/bfa_fcpim.c create mode 100644 drivers/scsi/bfa/bfa_fcpim.h create mode 100644 drivers/scsi/bfa/bfa_fcs.c create mode 100644 drivers/scsi/bfa/bfa_fcs.h create mode 100644 drivers/scsi/bfa/bfa_fcs_fcpim.c create mode 100644 drivers/scsi/bfa/bfa_fcs_lport.c create mode 100644 drivers/scsi/bfa/bfa_fcs_rport.c create mode 100644 drivers/scsi/bfa/bfa_hw_cb.c create mode 100644 drivers/scsi/bfa/bfa_hw_ct.c create mode 100644 drivers/scsi/bfa/bfa_ioc.c create mode 100644 drivers/scsi/bfa/bfa_ioc.h create mode 100644 drivers/scsi/bfa/bfa_ioc_cb.c create mode 100644 drivers/scsi/bfa/bfa_ioc_ct.c create mode 100644 drivers/scsi/bfa/bfa_modules.h create mode 100644 drivers/scsi/bfa/bfa_plog.h create mode 100644 drivers/scsi/bfa/bfa_port.c create mode 100644 drivers/scsi/bfa/bfa_port.h create mode 100644 drivers/scsi/bfa/bfa_svc.c create mode 100644 drivers/scsi/bfa/bfa_svc.h create mode 100644 drivers/scsi/bfa/bfad.c create mode 100644 drivers/scsi/bfa/bfad_attr.c create mode 100644 drivers/scsi/bfa/bfad_bsg.c create mode 100644 drivers/scsi/bfa/bfad_bsg.h create mode 100644 drivers/scsi/bfa/bfad_debugfs.c create mode 100644 drivers/scsi/bfa/bfad_drv.h create mode 100644 drivers/scsi/bfa/bfad_im.c create mode 100644 drivers/scsi/bfa/bfad_im.h create mode 100644 drivers/scsi/bfa/bfi.h create mode 100644 drivers/scsi/bfa/bfi_ms.h create mode 100644 drivers/scsi/bfa/bfi_reg.h create mode 100644 drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h create mode 100644 drivers/scsi/bnx2fc/Kconfig create mode 100644 drivers/scsi/bnx2fc/Makefile create mode 100644 drivers/scsi/bnx2fc/bnx2fc.h create mode 100644 drivers/scsi/bnx2fc/bnx2fc_constants.h create mode 100644 drivers/scsi/bnx2fc/bnx2fc_debug.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_debug.h create mode 100644 drivers/scsi/bnx2fc/bnx2fc_els.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_fcoe.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_hwi.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_io.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_tgt.c create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_constants.h create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_hsi.h create mode 100644 drivers/scsi/bnx2i/Kconfig create mode 100644 drivers/scsi/bnx2i/Makefile create mode 100644 drivers/scsi/bnx2i/bnx2i.h create mode 100644 drivers/scsi/bnx2i/bnx2i_hwi.c create mode 100644 drivers/scsi/bnx2i/bnx2i_init.c create mode 100644 drivers/scsi/bnx2i/bnx2i_iscsi.c create mode 100644 drivers/scsi/bnx2i/bnx2i_sysfs.c create mode 100644 drivers/scsi/bvme6000_scsi.c create mode 100644 drivers/scsi/ch.c create mode 100644 drivers/scsi/constants.c create mode 100644 drivers/scsi/csiostor/Kconfig create mode 100644 drivers/scsi/csiostor/Makefile create mode 100644 drivers/scsi/csiostor/csio_attr.c create mode 100644 drivers/scsi/csiostor/csio_defs.h create mode 100644 drivers/scsi/csiostor/csio_hw.c create mode 100644 drivers/scsi/csiostor/csio_hw.h create mode 100644 drivers/scsi/csiostor/csio_hw_chip.h create mode 100644 drivers/scsi/csiostor/csio_hw_t5.c create mode 100644 drivers/scsi/csiostor/csio_init.c create mode 100644 drivers/scsi/csiostor/csio_init.h create mode 100644 drivers/scsi/csiostor/csio_isr.c create mode 100644 drivers/scsi/csiostor/csio_lnode.c create mode 100644 drivers/scsi/csiostor/csio_lnode.h create mode 100644 drivers/scsi/csiostor/csio_mb.c create mode 100644 drivers/scsi/csiostor/csio_mb.h create mode 100644 drivers/scsi/csiostor/csio_rnode.c create mode 100644 drivers/scsi/csiostor/csio_rnode.h create mode 100644 drivers/scsi/csiostor/csio_scsi.c create mode 100644 drivers/scsi/csiostor/csio_scsi.h create mode 100644 drivers/scsi/csiostor/csio_wr.c create mode 100644 drivers/scsi/csiostor/csio_wr.h create mode 100644 drivers/scsi/csiostor/t4fw_api_stor.h create mode 100644 drivers/scsi/cxgbi/Kconfig create mode 100644 drivers/scsi/cxgbi/Makefile create mode 100644 drivers/scsi/cxgbi/cxgb3i/Kbuild create mode 100644 drivers/scsi/cxgbi/cxgb3i/Kconfig create mode 100644 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c create mode 100644 drivers/scsi/cxgbi/cxgb3i/cxgb3i.h create mode 100644 drivers/scsi/cxgbi/cxgb4i/Kbuild create mode 100644 drivers/scsi/cxgbi/cxgb4i/Kconfig create mode 100644 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c create mode 100644 drivers/scsi/cxgbi/cxgb4i/cxgb4i.h create mode 100644 drivers/scsi/cxgbi/libcxgbi.c create mode 100644 drivers/scsi/cxgbi/libcxgbi.h create mode 100644 drivers/scsi/cxlflash/Kconfig create mode 100644 drivers/scsi/cxlflash/Makefile create mode 100644 drivers/scsi/cxlflash/backend.h create mode 100644 drivers/scsi/cxlflash/common.h create mode 100644 drivers/scsi/cxlflash/cxl_hw.c create mode 100644 drivers/scsi/cxlflash/lunmgt.c create mode 100644 drivers/scsi/cxlflash/main.c create mode 100644 drivers/scsi/cxlflash/main.h create mode 100644 drivers/scsi/cxlflash/ocxl_hw.c create mode 100644 drivers/scsi/cxlflash/ocxl_hw.h create mode 100644 drivers/scsi/cxlflash/sislite.h create mode 100644 drivers/scsi/cxlflash/superpipe.c create mode 100644 drivers/scsi/cxlflash/superpipe.h create mode 100644 drivers/scsi/cxlflash/vlun.c create mode 100644 drivers/scsi/cxlflash/vlun.h create mode 100644 drivers/scsi/dc395x.c create mode 100644 drivers/scsi/dc395x.h create mode 100644 drivers/scsi/device_handler/Kconfig create mode 100644 drivers/scsi/device_handler/Makefile create mode 100644 drivers/scsi/device_handler/scsi_dh_alua.c create mode 100644 drivers/scsi/device_handler/scsi_dh_emc.c create mode 100644 drivers/scsi/device_handler/scsi_dh_hp_sw.c create mode 100644 drivers/scsi/device_handler/scsi_dh_rdac.c create mode 100644 drivers/scsi/dmx3191d.c create mode 100644 drivers/scsi/elx/Kconfig create mode 100644 drivers/scsi/elx/Makefile create mode 100644 drivers/scsi/elx/efct/efct_driver.c create mode 100644 drivers/scsi/elx/efct/efct_driver.h create mode 100644 drivers/scsi/elx/efct/efct_hw.c create mode 100644 drivers/scsi/elx/efct/efct_hw.h create mode 100644 drivers/scsi/elx/efct/efct_hw_queues.c create mode 100644 drivers/scsi/elx/efct/efct_io.c create mode 100644 drivers/scsi/elx/efct/efct_io.h create mode 100644 drivers/scsi/elx/efct/efct_lio.c create mode 100644 drivers/scsi/elx/efct/efct_lio.h create mode 100644 drivers/scsi/elx/efct/efct_scsi.c create mode 100644 drivers/scsi/elx/efct/efct_scsi.h create mode 100644 drivers/scsi/elx/efct/efct_unsol.c create mode 100644 drivers/scsi/elx/efct/efct_unsol.h create mode 100644 drivers/scsi/elx/efct/efct_xport.c create mode 100644 drivers/scsi/elx/efct/efct_xport.h create mode 100644 drivers/scsi/elx/include/efc_common.h create mode 100644 drivers/scsi/elx/libefc/efc.h create mode 100644 drivers/scsi/elx/libefc/efc_cmds.c create mode 100644 drivers/scsi/elx/libefc/efc_cmds.h create mode 100644 drivers/scsi/elx/libefc/efc_device.c create mode 100644 drivers/scsi/elx/libefc/efc_device.h create mode 100644 drivers/scsi/elx/libefc/efc_domain.c create mode 100644 drivers/scsi/elx/libefc/efc_domain.h create mode 100644 drivers/scsi/elx/libefc/efc_els.c create mode 100644 drivers/scsi/elx/libefc/efc_els.h create mode 100644 drivers/scsi/elx/libefc/efc_fabric.c create mode 100644 drivers/scsi/elx/libefc/efc_fabric.h create mode 100644 drivers/scsi/elx/libefc/efc_node.c create mode 100644 drivers/scsi/elx/libefc/efc_node.h create mode 100644 drivers/scsi/elx/libefc/efc_nport.c create mode 100644 drivers/scsi/elx/libefc/efc_nport.h create mode 100644 drivers/scsi/elx/libefc/efc_sm.c create mode 100644 drivers/scsi/elx/libefc/efc_sm.h create mode 100644 drivers/scsi/elx/libefc/efclib.c create mode 100644 drivers/scsi/elx/libefc/efclib.h create mode 100644 drivers/scsi/elx/libefc_sli/sli4.c create mode 100644 drivers/scsi/elx/libefc_sli/sli4.h create mode 100644 drivers/scsi/esas2r/Kconfig create mode 100644 drivers/scsi/esas2r/Makefile create mode 100644 drivers/scsi/esas2r/atioctl.h create mode 100644 drivers/scsi/esas2r/atvda.h create mode 100644 drivers/scsi/esas2r/esas2r.h create mode 100644 drivers/scsi/esas2r/esas2r_disc.c create mode 100644 drivers/scsi/esas2r/esas2r_flash.c create mode 100644 drivers/scsi/esas2r/esas2r_init.c create mode 100644 drivers/scsi/esas2r/esas2r_int.c create mode 100644 drivers/scsi/esas2r/esas2r_io.c create mode 100644 drivers/scsi/esas2r/esas2r_ioctl.c create mode 100644 drivers/scsi/esas2r/esas2r_log.c create mode 100644 drivers/scsi/esas2r/esas2r_log.h create mode 100644 drivers/scsi/esas2r/esas2r_main.c create mode 100644 drivers/scsi/esas2r/esas2r_targdb.c create mode 100644 drivers/scsi/esas2r/esas2r_vda.c create mode 100644 drivers/scsi/esp_scsi.c create mode 100644 drivers/scsi/esp_scsi.h create mode 100644 drivers/scsi/fcoe/Makefile create mode 100644 drivers/scsi/fcoe/fcoe.c create mode 100644 drivers/scsi/fcoe/fcoe.h create mode 100644 drivers/scsi/fcoe/fcoe_ctlr.c create mode 100644 drivers/scsi/fcoe/fcoe_sysfs.c create mode 100644 drivers/scsi/fcoe/fcoe_transport.c create mode 100644 drivers/scsi/fcoe/libfcoe.h create mode 100644 drivers/scsi/fdomain.c create mode 100644 drivers/scsi/fdomain.h create mode 100644 drivers/scsi/fdomain_isa.c create mode 100644 drivers/scsi/fdomain_pci.c create mode 100644 drivers/scsi/fnic/Makefile create mode 100644 drivers/scsi/fnic/cq_desc.h create mode 100644 drivers/scsi/fnic/cq_enet_desc.h create mode 100644 drivers/scsi/fnic/cq_exch_desc.h create mode 100644 drivers/scsi/fnic/fcpio.h create mode 100644 drivers/scsi/fnic/fnic.h create mode 100644 drivers/scsi/fnic/fnic_attrs.c create mode 100644 drivers/scsi/fnic/fnic_debugfs.c create mode 100644 drivers/scsi/fnic/fnic_fcs.c create mode 100644 drivers/scsi/fnic/fnic_fip.h create mode 100644 drivers/scsi/fnic/fnic_io.h create mode 100644 drivers/scsi/fnic/fnic_isr.c create mode 100644 drivers/scsi/fnic/fnic_main.c create mode 100644 drivers/scsi/fnic/fnic_res.c create mode 100644 drivers/scsi/fnic/fnic_res.h create mode 100644 drivers/scsi/fnic/fnic_scsi.c create mode 100644 drivers/scsi/fnic/fnic_stats.h create mode 100644 drivers/scsi/fnic/fnic_trace.c create mode 100644 drivers/scsi/fnic/fnic_trace.h create mode 100644 drivers/scsi/fnic/rq_enet_desc.h create mode 100644 drivers/scsi/fnic/vnic_cq.c create mode 100644 drivers/scsi/fnic/vnic_cq.h create mode 100644 drivers/scsi/fnic/vnic_cq_copy.h create mode 100644 drivers/scsi/fnic/vnic_dev.c create mode 100644 drivers/scsi/fnic/vnic_dev.h create mode 100644 drivers/scsi/fnic/vnic_devcmd.h create mode 100644 drivers/scsi/fnic/vnic_intr.c create mode 100644 drivers/scsi/fnic/vnic_intr.h create mode 100644 drivers/scsi/fnic/vnic_nic.h create mode 100644 drivers/scsi/fnic/vnic_resource.h create mode 100644 drivers/scsi/fnic/vnic_rq.c create mode 100644 drivers/scsi/fnic/vnic_rq.h create mode 100644 drivers/scsi/fnic/vnic_scsi.h create mode 100644 drivers/scsi/fnic/vnic_stats.h create mode 100644 drivers/scsi/fnic/vnic_wq.c create mode 100644 drivers/scsi/fnic/vnic_wq.h create mode 100644 drivers/scsi/fnic/vnic_wq_copy.c create mode 100644 drivers/scsi/fnic/vnic_wq_copy.h create mode 100644 drivers/scsi/fnic/wq_enet_desc.h create mode 100644 drivers/scsi/g_NCR5380.c create mode 100644 drivers/scsi/gvp11.c create mode 100644 drivers/scsi/gvp11.h create mode 100644 drivers/scsi/hisi_sas/Kconfig create mode 100644 drivers/scsi/hisi_sas/Makefile create mode 100644 drivers/scsi/hisi_sas/hisi_sas.h create mode 100644 drivers/scsi/hisi_sas/hisi_sas_main.c create mode 100644 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c create mode 100644 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c create mode 100644 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c create mode 100644 drivers/scsi/hosts.c create mode 100644 drivers/scsi/hpsa.c create mode 100644 drivers/scsi/hpsa.h create mode 100644 drivers/scsi/hpsa_cmd.h create mode 100644 drivers/scsi/hptiop.c create mode 100644 drivers/scsi/hptiop.h create mode 100644 drivers/scsi/ibmvscsi/Makefile create mode 100644 drivers/scsi/ibmvscsi/ibmvfc.c create mode 100644 drivers/scsi/ibmvscsi/ibmvfc.h create mode 100644 drivers/scsi/ibmvscsi/ibmvscsi.c create mode 100644 drivers/scsi/ibmvscsi/ibmvscsi.h create mode 100644 drivers/scsi/ibmvscsi_tgt/Makefile create mode 100644 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c create mode 100644 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h create mode 100644 drivers/scsi/ibmvscsi_tgt/libsrp.c create mode 100644 drivers/scsi/ibmvscsi_tgt/libsrp.h create mode 100644 drivers/scsi/imm.c create mode 100644 drivers/scsi/imm.h create mode 100644 drivers/scsi/initio.c create mode 100644 drivers/scsi/initio.h create mode 100644 drivers/scsi/ipr.c create mode 100644 drivers/scsi/ipr.h create mode 100644 drivers/scsi/ips.c create mode 100644 drivers/scsi/ips.h create mode 100644 drivers/scsi/isci/Makefile create mode 100644 drivers/scsi/isci/host.c create mode 100644 drivers/scsi/isci/host.h create mode 100644 drivers/scsi/isci/init.c create mode 100644 drivers/scsi/isci/isci.h create mode 100644 drivers/scsi/isci/phy.c create mode 100644 drivers/scsi/isci/phy.h create mode 100644 drivers/scsi/isci/port.c create mode 100644 drivers/scsi/isci/port.h create mode 100644 drivers/scsi/isci/port_config.c create mode 100644 drivers/scsi/isci/probe_roms.c create mode 100644 drivers/scsi/isci/probe_roms.h create mode 100644 drivers/scsi/isci/registers.h create mode 100644 drivers/scsi/isci/remote_device.c create mode 100644 drivers/scsi/isci/remote_device.h create mode 100644 drivers/scsi/isci/remote_node_context.c create mode 100644 drivers/scsi/isci/remote_node_context.h create mode 100644 drivers/scsi/isci/remote_node_table.c create mode 100644 drivers/scsi/isci/remote_node_table.h create mode 100644 drivers/scsi/isci/request.c create mode 100644 drivers/scsi/isci/request.h create mode 100644 drivers/scsi/isci/sas.h create mode 100644 drivers/scsi/isci/scu_completion_codes.h create mode 100644 drivers/scsi/isci/scu_event_codes.h create mode 100644 drivers/scsi/isci/scu_remote_node_context.h create mode 100644 drivers/scsi/isci/scu_task_context.h create mode 100644 drivers/scsi/isci/task.c create mode 100644 drivers/scsi/isci/task.h create mode 100644 drivers/scsi/isci/unsolicited_frame_control.c create mode 100644 drivers/scsi/isci/unsolicited_frame_control.h create mode 100644 drivers/scsi/iscsi_boot_sysfs.c create mode 100644 drivers/scsi/iscsi_tcp.c create mode 100644 drivers/scsi/iscsi_tcp.h create mode 100644 drivers/scsi/jazz_esp.c create mode 100644 drivers/scsi/lasi700.c create mode 100644 drivers/scsi/libfc/Makefile create mode 100644 drivers/scsi/libfc/fc_disc.c create mode 100644 drivers/scsi/libfc/fc_elsct.c create mode 100644 drivers/scsi/libfc/fc_encode.h create mode 100644 drivers/scsi/libfc/fc_exch.c create mode 100644 drivers/scsi/libfc/fc_fcp.c create mode 100644 drivers/scsi/libfc/fc_frame.c create mode 100644 drivers/scsi/libfc/fc_libfc.c create mode 100644 drivers/scsi/libfc/fc_libfc.h create mode 100644 drivers/scsi/libfc/fc_lport.c create mode 100644 drivers/scsi/libfc/fc_npiv.c create mode 100644 drivers/scsi/libfc/fc_rport.c create mode 100644 drivers/scsi/libiscsi.c create mode 100644 drivers/scsi/libiscsi_tcp.c create mode 100644 drivers/scsi/libsas/Kconfig create mode 100644 drivers/scsi/libsas/Makefile create mode 100644 drivers/scsi/libsas/sas_ata.c create mode 100644 drivers/scsi/libsas/sas_discover.c create mode 100644 drivers/scsi/libsas/sas_event.c create mode 100644 drivers/scsi/libsas/sas_expander.c create mode 100644 drivers/scsi/libsas/sas_host_smp.c create mode 100644 drivers/scsi/libsas/sas_init.c create mode 100644 drivers/scsi/libsas/sas_internal.h create mode 100644 drivers/scsi/libsas/sas_phy.c create mode 100644 drivers/scsi/libsas/sas_port.c create mode 100644 drivers/scsi/libsas/sas_scsi_host.c create mode 100644 drivers/scsi/libsas/sas_task.c create mode 100644 drivers/scsi/lpfc/Makefile create mode 100644 drivers/scsi/lpfc/lpfc.h create mode 100644 drivers/scsi/lpfc/lpfc_attr.c create mode 100644 drivers/scsi/lpfc/lpfc_attr.h create mode 100644 drivers/scsi/lpfc/lpfc_bsg.c create mode 100644 drivers/scsi/lpfc/lpfc_bsg.h create mode 100644 drivers/scsi/lpfc/lpfc_compat.h create mode 100644 drivers/scsi/lpfc/lpfc_crtn.h create mode 100644 drivers/scsi/lpfc/lpfc_ct.c create mode 100644 drivers/scsi/lpfc/lpfc_debugfs.c create mode 100644 drivers/scsi/lpfc/lpfc_debugfs.h create mode 100644 drivers/scsi/lpfc/lpfc_disc.h create mode 100644 drivers/scsi/lpfc/lpfc_els.c create mode 100644 drivers/scsi/lpfc/lpfc_hbadisc.c create mode 100644 drivers/scsi/lpfc/lpfc_hw.h create mode 100644 drivers/scsi/lpfc/lpfc_hw4.h create mode 100644 drivers/scsi/lpfc/lpfc_ids.h create mode 100644 drivers/scsi/lpfc/lpfc_init.c create mode 100644 drivers/scsi/lpfc/lpfc_logmsg.h create mode 100644 drivers/scsi/lpfc/lpfc_mbox.c create mode 100644 drivers/scsi/lpfc/lpfc_mem.c create mode 100644 drivers/scsi/lpfc/lpfc_nl.h create mode 100644 drivers/scsi/lpfc/lpfc_nportdisc.c create mode 100644 drivers/scsi/lpfc/lpfc_nvme.c create mode 100644 drivers/scsi/lpfc/lpfc_nvme.h create mode 100644 drivers/scsi/lpfc/lpfc_nvmet.c create mode 100644 drivers/scsi/lpfc/lpfc_scsi.c create mode 100644 drivers/scsi/lpfc/lpfc_scsi.h create mode 100644 drivers/scsi/lpfc/lpfc_sli.c create mode 100644 drivers/scsi/lpfc/lpfc_sli.h create mode 100644 drivers/scsi/lpfc/lpfc_sli4.h create mode 100644 drivers/scsi/lpfc/lpfc_version.h create mode 100644 drivers/scsi/lpfc/lpfc_vmid.c create mode 100644 drivers/scsi/lpfc/lpfc_vport.c create mode 100644 drivers/scsi/lpfc/lpfc_vport.h create mode 100644 drivers/scsi/mac53c94.c create mode 100644 drivers/scsi/mac53c94.h create mode 100644 drivers/scsi/mac_esp.c create mode 100644 drivers/scsi/mac_scsi.c create mode 100644 drivers/scsi/megaraid.c create mode 100644 drivers/scsi/megaraid.h create mode 100644 drivers/scsi/megaraid/Kconfig.megaraid create mode 100644 drivers/scsi/megaraid/Makefile create mode 100644 drivers/scsi/megaraid/mbox_defs.h create mode 100644 drivers/scsi/megaraid/mega_common.h create mode 100644 drivers/scsi/megaraid/megaraid_ioctl.h create mode 100644 drivers/scsi/megaraid/megaraid_mbox.c create mode 100644 drivers/scsi/megaraid/megaraid_mbox.h create mode 100644 drivers/scsi/megaraid/megaraid_mm.c create mode 100644 drivers/scsi/megaraid/megaraid_mm.h create mode 100644 drivers/scsi/megaraid/megaraid_sas.h create mode 100644 drivers/scsi/megaraid/megaraid_sas_base.c create mode 100644 drivers/scsi/megaraid/megaraid_sas_debugfs.c create mode 100644 drivers/scsi/megaraid/megaraid_sas_fp.c create mode 100644 drivers/scsi/megaraid/megaraid_sas_fusion.c create mode 100644 drivers/scsi/megaraid/megaraid_sas_fusion.h create mode 100644 drivers/scsi/mesh.c create mode 100644 drivers/scsi/mesh.h create mode 100644 drivers/scsi/mpi3mr/Kconfig create mode 100644 drivers/scsi/mpi3mr/Makefile create mode 100644 drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h create mode 100644 drivers/scsi/mpi3mr/mpi/mpi30_image.h create mode 100644 drivers/scsi/mpi3mr/mpi/mpi30_init.h create mode 100644 drivers/scsi/mpi3mr/mpi/mpi30_ioc.h create mode 100644 drivers/scsi/mpi3mr/mpi/mpi30_pci.h create mode 100644 drivers/scsi/mpi3mr/mpi/mpi30_sas.h create mode 100644 drivers/scsi/mpi3mr/mpi/mpi30_transport.h create mode 100644 drivers/scsi/mpi3mr/mpi3mr.h create mode 100644 drivers/scsi/mpi3mr/mpi3mr_app.c create mode 100644 drivers/scsi/mpi3mr/mpi3mr_debug.h create mode 100644 drivers/scsi/mpi3mr/mpi3mr_fw.c create mode 100644 drivers/scsi/mpi3mr/mpi3mr_os.c create mode 100644 drivers/scsi/mpi3mr/mpi3mr_transport.c create mode 100644 drivers/scsi/mpt3sas/Kconfig create mode 100644 drivers/scsi/mpt3sas/Makefile create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_image.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_init.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_ioc.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_pci.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_raid.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_sas.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_tool.h create mode 100644 drivers/scsi/mpt3sas/mpi/mpi2_type.h create mode 100644 drivers/scsi/mpt3sas/mpt3sas_base.c create mode 100644 drivers/scsi/mpt3sas/mpt3sas_base.h create mode 100644 drivers/scsi/mpt3sas/mpt3sas_config.c create mode 100644 drivers/scsi/mpt3sas/mpt3sas_ctl.c create mode 100644 drivers/scsi/mpt3sas/mpt3sas_ctl.h create mode 100644 drivers/scsi/mpt3sas/mpt3sas_debug.h create mode 100644 drivers/scsi/mpt3sas/mpt3sas_debugfs.c create mode 100644 drivers/scsi/mpt3sas/mpt3sas_scsih.c create mode 100644 drivers/scsi/mpt3sas/mpt3sas_transport.c create mode 100644 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c create mode 100644 drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h create mode 100644 drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h create mode 100644 drivers/scsi/mpt3sas/mpt3sas_warpdrive.c create mode 100644 drivers/scsi/mvme147.c create mode 100644 drivers/scsi/mvme147.h create mode 100644 drivers/scsi/mvme16x_scsi.c create mode 100644 drivers/scsi/mvsas/Kconfig create mode 100644 drivers/scsi/mvsas/Makefile create mode 100644 drivers/scsi/mvsas/mv_64xx.c create mode 100644 drivers/scsi/mvsas/mv_64xx.h create mode 100644 drivers/scsi/mvsas/mv_94xx.c create mode 100644 drivers/scsi/mvsas/mv_94xx.h create mode 100644 drivers/scsi/mvsas/mv_chips.h create mode 100644 drivers/scsi/mvsas/mv_defs.h create mode 100644 drivers/scsi/mvsas/mv_init.c create mode 100644 drivers/scsi/mvsas/mv_sas.c create mode 100644 drivers/scsi/mvsas/mv_sas.h create mode 100644 drivers/scsi/mvumi.c create mode 100644 drivers/scsi/mvumi.h create mode 100644 drivers/scsi/myrb.c create mode 100644 drivers/scsi/myrb.h create mode 100644 drivers/scsi/myrs.c create mode 100644 drivers/scsi/myrs.h create mode 100644 drivers/scsi/ncr53c8xx.c create mode 100644 drivers/scsi/ncr53c8xx.h create mode 100644 drivers/scsi/nsp32.c create mode 100644 drivers/scsi/nsp32.h create mode 100644 drivers/scsi/nsp32_debug.c create mode 100644 drivers/scsi/nsp32_io.h create mode 100644 drivers/scsi/pcmcia/Kconfig create mode 100644 drivers/scsi/pcmcia/Makefile create mode 100644 drivers/scsi/pcmcia/aha152x_core.c create mode 100644 drivers/scsi/pcmcia/aha152x_stub.c create mode 100644 drivers/scsi/pcmcia/fdomain_cs.c create mode 100644 drivers/scsi/pcmcia/nsp_cs.c create mode 100644 drivers/scsi/pcmcia/nsp_cs.h create mode 100644 drivers/scsi/pcmcia/nsp_debug.c create mode 100644 drivers/scsi/pcmcia/nsp_io.h create mode 100644 drivers/scsi/pcmcia/nsp_message.c create mode 100644 drivers/scsi/pcmcia/qlogic_stub.c create mode 100644 drivers/scsi/pcmcia/sym53c500_cs.c create mode 100644 drivers/scsi/pm8001/Makefile create mode 100644 drivers/scsi/pm8001/pm8001_chips.h create mode 100644 drivers/scsi/pm8001/pm8001_ctl.c create mode 100644 drivers/scsi/pm8001/pm8001_ctl.h create mode 100644 drivers/scsi/pm8001/pm8001_defs.h create mode 100644 drivers/scsi/pm8001/pm8001_hwi.c create mode 100644 drivers/scsi/pm8001/pm8001_hwi.h create mode 100644 drivers/scsi/pm8001/pm8001_init.c create mode 100644 drivers/scsi/pm8001/pm8001_sas.c create mode 100644 drivers/scsi/pm8001/pm8001_sas.h create mode 100644 drivers/scsi/pm8001/pm80xx_hwi.c create mode 100644 drivers/scsi/pm8001/pm80xx_hwi.h create mode 100644 drivers/scsi/pm8001/pm80xx_tracepoints.c create mode 100644 drivers/scsi/pm8001/pm80xx_tracepoints.h create mode 100644 drivers/scsi/pmcraid.c create mode 100644 drivers/scsi/pmcraid.h create mode 100644 drivers/scsi/ppa.c create mode 100644 drivers/scsi/ppa.h create mode 100644 drivers/scsi/ps3rom.c create mode 100644 drivers/scsi/qedf/Kconfig create mode 100644 drivers/scsi/qedf/Makefile create mode 100644 drivers/scsi/qedf/drv_fcoe_fw_funcs.c create mode 100644 drivers/scsi/qedf/drv_fcoe_fw_funcs.h create mode 100644 drivers/scsi/qedf/drv_scsi_fw_funcs.c create mode 100644 drivers/scsi/qedf/drv_scsi_fw_funcs.h create mode 100644 drivers/scsi/qedf/qedf.h create mode 100644 drivers/scsi/qedf/qedf_attr.c create mode 100644 drivers/scsi/qedf/qedf_dbg.c create mode 100644 drivers/scsi/qedf/qedf_dbg.h create mode 100644 drivers/scsi/qedf/qedf_debugfs.c create mode 100644 drivers/scsi/qedf/qedf_els.c create mode 100644 drivers/scsi/qedf/qedf_fip.c create mode 100644 drivers/scsi/qedf/qedf_hsi.h create mode 100644 drivers/scsi/qedf/qedf_io.c create mode 100644 drivers/scsi/qedf/qedf_main.c create mode 100644 drivers/scsi/qedf/qedf_version.h create mode 100644 drivers/scsi/qedi/Kconfig create mode 100644 drivers/scsi/qedi/Makefile create mode 100644 drivers/scsi/qedi/qedi.h create mode 100644 drivers/scsi/qedi/qedi_dbg.c create mode 100644 drivers/scsi/qedi/qedi_dbg.h create mode 100644 drivers/scsi/qedi/qedi_debugfs.c create mode 100644 drivers/scsi/qedi/qedi_fw.c create mode 100644 drivers/scsi/qedi/qedi_fw_api.c create mode 100644 drivers/scsi/qedi/qedi_fw_iscsi.h create mode 100644 drivers/scsi/qedi/qedi_fw_scsi.h create mode 100644 drivers/scsi/qedi/qedi_gbl.h create mode 100644 drivers/scsi/qedi/qedi_hsi.h create mode 100644 drivers/scsi/qedi/qedi_iscsi.c create mode 100644 drivers/scsi/qedi/qedi_iscsi.h create mode 100644 drivers/scsi/qedi/qedi_main.c create mode 100644 drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h create mode 100644 drivers/scsi/qedi/qedi_sysfs.c create mode 100644 drivers/scsi/qedi/qedi_version.h create mode 100644 drivers/scsi/qla1280.c create mode 100644 drivers/scsi/qla1280.h create mode 100644 drivers/scsi/qla2xxx/Kconfig create mode 100644 drivers/scsi/qla2xxx/Makefile create mode 100644 drivers/scsi/qla2xxx/qla_attr.c create mode 100644 drivers/scsi/qla2xxx/qla_bsg.c create mode 100644 drivers/scsi/qla2xxx/qla_bsg.h create mode 100644 drivers/scsi/qla2xxx/qla_dbg.c create mode 100644 drivers/scsi/qla2xxx/qla_dbg.h create mode 100644 drivers/scsi/qla2xxx/qla_def.h create mode 100644 drivers/scsi/qla2xxx/qla_devtbl.h create mode 100644 drivers/scsi/qla2xxx/qla_dfs.c create mode 100644 drivers/scsi/qla2xxx/qla_dsd.h create mode 100644 drivers/scsi/qla2xxx/qla_edif.c create mode 100644 drivers/scsi/qla2xxx/qla_edif.h create mode 100644 drivers/scsi/qla2xxx/qla_edif_bsg.h create mode 100644 drivers/scsi/qla2xxx/qla_fw.h create mode 100644 drivers/scsi/qla2xxx/qla_gbl.h create mode 100644 drivers/scsi/qla2xxx/qla_gs.c create mode 100644 drivers/scsi/qla2xxx/qla_init.c create mode 100644 drivers/scsi/qla2xxx/qla_inline.h create mode 100644 drivers/scsi/qla2xxx/qla_iocb.c create mode 100644 drivers/scsi/qla2xxx/qla_isr.c create mode 100644 drivers/scsi/qla2xxx/qla_mbx.c create mode 100644 drivers/scsi/qla2xxx/qla_mid.c create mode 100644 drivers/scsi/qla2xxx/qla_mr.c create mode 100644 drivers/scsi/qla2xxx/qla_mr.h create mode 100644 drivers/scsi/qla2xxx/qla_nvme.c create mode 100644 drivers/scsi/qla2xxx/qla_nvme.h create mode 100644 drivers/scsi/qla2xxx/qla_nx.c create mode 100644 drivers/scsi/qla2xxx/qla_nx.h create mode 100644 drivers/scsi/qla2xxx/qla_nx2.c create mode 100644 drivers/scsi/qla2xxx/qla_nx2.h create mode 100644 drivers/scsi/qla2xxx/qla_os.c create mode 100644 drivers/scsi/qla2xxx/qla_settings.h create mode 100644 drivers/scsi/qla2xxx/qla_sup.c create mode 100644 drivers/scsi/qla2xxx/qla_target.c create mode 100644 drivers/scsi/qla2xxx/qla_target.h create mode 100644 drivers/scsi/qla2xxx/qla_tmpl.c create mode 100644 drivers/scsi/qla2xxx/qla_tmpl.h create mode 100644 drivers/scsi/qla2xxx/qla_version.h create mode 100644 drivers/scsi/qla2xxx/tcm_qla2xxx.c create mode 100644 drivers/scsi/qla2xxx/tcm_qla2xxx.h create mode 100644 drivers/scsi/qla4xxx/Kconfig create mode 100644 drivers/scsi/qla4xxx/Makefile create mode 100644 drivers/scsi/qla4xxx/ql4_83xx.c create mode 100644 drivers/scsi/qla4xxx/ql4_83xx.h create mode 100644 drivers/scsi/qla4xxx/ql4_attr.c create mode 100644 drivers/scsi/qla4xxx/ql4_bsg.c create mode 100644 drivers/scsi/qla4xxx/ql4_bsg.h create mode 100644 drivers/scsi/qla4xxx/ql4_dbg.c create mode 100644 drivers/scsi/qla4xxx/ql4_dbg.h create mode 100644 drivers/scsi/qla4xxx/ql4_def.h create mode 100644 drivers/scsi/qla4xxx/ql4_fw.h create mode 100644 drivers/scsi/qla4xxx/ql4_glbl.h create mode 100644 drivers/scsi/qla4xxx/ql4_init.c create mode 100644 drivers/scsi/qla4xxx/ql4_inline.h create mode 100644 drivers/scsi/qla4xxx/ql4_iocb.c create mode 100644 drivers/scsi/qla4xxx/ql4_isr.c create mode 100644 drivers/scsi/qla4xxx/ql4_mbx.c create mode 100644 drivers/scsi/qla4xxx/ql4_nvram.c create mode 100644 drivers/scsi/qla4xxx/ql4_nvram.h create mode 100644 drivers/scsi/qla4xxx/ql4_nx.c create mode 100644 drivers/scsi/qla4xxx/ql4_nx.h create mode 100644 drivers/scsi/qla4xxx/ql4_os.c create mode 100644 drivers/scsi/qla4xxx/ql4_version.h create mode 100644 drivers/scsi/qlogicfas.c create mode 100644 drivers/scsi/qlogicfas408.c create mode 100644 drivers/scsi/qlogicfas408.h create mode 100644 drivers/scsi/qlogicpti.c create mode 100644 drivers/scsi/qlogicpti.h create mode 100644 drivers/scsi/raid_class.c create mode 100644 drivers/scsi/script_asm.pl create mode 100644 drivers/scsi/scsi.c create mode 100644 drivers/scsi/scsi_bsg.c create mode 100644 drivers/scsi/scsi_common.c create mode 100644 drivers/scsi/scsi_debug.c create mode 100644 drivers/scsi/scsi_debugfs.c create mode 100644 drivers/scsi/scsi_debugfs.h create mode 100644 drivers/scsi/scsi_devinfo.c create mode 100644 drivers/scsi/scsi_dh.c create mode 100644 drivers/scsi/scsi_error.c create mode 100644 drivers/scsi/scsi_ioctl.c create mode 100644 drivers/scsi/scsi_lib.c create mode 100644 drivers/scsi/scsi_lib_dma.c create mode 100644 drivers/scsi/scsi_logging.c create mode 100644 drivers/scsi/scsi_logging.h create mode 100644 drivers/scsi/scsi_netlink.c create mode 100644 drivers/scsi/scsi_pm.c create mode 100644 drivers/scsi/scsi_priv.h create mode 100644 drivers/scsi/scsi_proc.c create mode 100644 drivers/scsi/scsi_sas_internal.h create mode 100644 drivers/scsi/scsi_scan.c create mode 100644 drivers/scsi/scsi_sysctl.c create mode 100644 drivers/scsi/scsi_sysfs.c create mode 100644 drivers/scsi/scsi_trace.c create mode 100644 drivers/scsi/scsi_transport_api.h create mode 100644 drivers/scsi/scsi_transport_fc.c create mode 100644 drivers/scsi/scsi_transport_iscsi.c create mode 100644 drivers/scsi/scsi_transport_sas.c create mode 100644 drivers/scsi/scsi_transport_spi.c create mode 100644 drivers/scsi/scsi_transport_srp.c create mode 100644 drivers/scsi/scsicam.c create mode 100644 drivers/scsi/sd.c create mode 100644 drivers/scsi/sd.h create mode 100644 drivers/scsi/sd_dif.c create mode 100644 drivers/scsi/sd_trace.h create mode 100644 drivers/scsi/sd_zbc.c create mode 100644 drivers/scsi/sense_codes.h create mode 100644 drivers/scsi/ses.c create mode 100644 drivers/scsi/sg.c create mode 100644 drivers/scsi/sgiwd93.c create mode 100644 drivers/scsi/sim710.c create mode 100644 drivers/scsi/smartpqi/Kconfig create mode 100644 drivers/scsi/smartpqi/Makefile create mode 100644 drivers/scsi/smartpqi/smartpqi.h create mode 100644 drivers/scsi/smartpqi/smartpqi_init.c create mode 100644 drivers/scsi/smartpqi/smartpqi_sas_transport.c create mode 100644 drivers/scsi/smartpqi/smartpqi_sis.c create mode 100644 drivers/scsi/smartpqi/smartpqi_sis.h create mode 100644 drivers/scsi/sni_53c710.c create mode 100644 drivers/scsi/snic/Makefile create mode 100644 drivers/scsi/snic/cq_desc.h create mode 100644 drivers/scsi/snic/cq_enet_desc.h create mode 100644 drivers/scsi/snic/snic.h create mode 100644 drivers/scsi/snic/snic_attrs.c create mode 100644 drivers/scsi/snic/snic_ctl.c create mode 100644 drivers/scsi/snic/snic_debugfs.c create mode 100644 drivers/scsi/snic/snic_disc.c create mode 100644 drivers/scsi/snic/snic_disc.h create mode 100644 drivers/scsi/snic/snic_fwint.h create mode 100644 drivers/scsi/snic/snic_io.c create mode 100644 drivers/scsi/snic/snic_io.h create mode 100644 drivers/scsi/snic/snic_isr.c create mode 100644 drivers/scsi/snic/snic_main.c create mode 100644 drivers/scsi/snic/snic_res.c create mode 100644 drivers/scsi/snic/snic_res.h create mode 100644 drivers/scsi/snic/snic_scsi.c create mode 100644 drivers/scsi/snic/snic_stats.h create mode 100644 drivers/scsi/snic/snic_trc.c create mode 100644 drivers/scsi/snic/snic_trc.h create mode 100644 drivers/scsi/snic/vnic_cq.c create mode 100644 drivers/scsi/snic/vnic_cq.h create mode 100644 drivers/scsi/snic/vnic_cq_fw.h create mode 100644 drivers/scsi/snic/vnic_dev.c create mode 100644 drivers/scsi/snic/vnic_dev.h create mode 100644 drivers/scsi/snic/vnic_devcmd.h create mode 100644 drivers/scsi/snic/vnic_intr.c create mode 100644 drivers/scsi/snic/vnic_intr.h create mode 100644 drivers/scsi/snic/vnic_resource.h create mode 100644 drivers/scsi/snic/vnic_snic.h create mode 100644 drivers/scsi/snic/vnic_stats.h create mode 100644 drivers/scsi/snic/vnic_wq.c create mode 100644 drivers/scsi/snic/vnic_wq.h create mode 100644 drivers/scsi/snic/wq_enet_desc.h create mode 100644 drivers/scsi/sr.c create mode 100644 drivers/scsi/sr.h create mode 100644 drivers/scsi/sr_ioctl.c create mode 100644 drivers/scsi/sr_vendor.c create mode 100644 drivers/scsi/st.c create mode 100644 drivers/scsi/st.h create mode 100644 drivers/scsi/st_options.h create mode 100644 drivers/scsi/stex.c create mode 100644 drivers/scsi/storvsc_drv.c create mode 100644 drivers/scsi/sun3_scsi.c create mode 100644 drivers/scsi/sun3_scsi_vme.c create mode 100644 drivers/scsi/sun3x_esp.c create mode 100644 drivers/scsi/sun_esp.c create mode 100644 drivers/scsi/sym53c8xx_2/Makefile create mode 100644 drivers/scsi/sym53c8xx_2/sym53c8xx.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_defs.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_fw.c create mode 100644 drivers/scsi/sym53c8xx_2/sym_fw.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_fw1.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_fw2.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_glue.c create mode 100644 drivers/scsi/sym53c8xx_2/sym_glue.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_hipd.c create mode 100644 drivers/scsi/sym53c8xx_2/sym_hipd.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_malloc.c create mode 100644 drivers/scsi/sym53c8xx_2/sym_misc.h create mode 100644 drivers/scsi/sym53c8xx_2/sym_nvram.c create mode 100644 drivers/scsi/sym53c8xx_2/sym_nvram.h create mode 100644 drivers/scsi/virtio_scsi.c create mode 100644 drivers/scsi/vmw_pvscsi.c create mode 100644 drivers/scsi/vmw_pvscsi.h create mode 100644 drivers/scsi/wd33c93.c create mode 100644 drivers/scsi/wd33c93.h create mode 100644 drivers/scsi/wd719x.c create mode 100644 drivers/scsi/wd719x.h create mode 100644 drivers/scsi/xen-scsifront.c create mode 100644 drivers/scsi/zalon.c create mode 100644 drivers/scsi/zorro7xx.c create mode 100644 drivers/scsi/zorro_esp.c (limited to 'drivers/scsi') diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore new file mode 100644 index 000000000..5f65cb75f --- /dev/null +++ b/drivers/scsi/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +53c700_d.h +scsi_devinfo_tbl.c diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c new file mode 100644 index 000000000..f925f8664 --- /dev/null +++ b/drivers/scsi/3w-9xxx.c @@ -0,0 +1,2304 @@ +/* + 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. + + Written By: Adam Radford + Modifications By: Tom Couch + + Copyright (C) 2004-2009 Applied Micro Circuits Corporation. + Copyright (C) 2010 LSI Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + aradford@gmail.com + + Note: This version of the driver does not contain a bundled firmware + image. + + History + ------- + 2.26.02.000 - Driver cleanup for kernel submission. + 2.26.02.001 - Replace schedule_timeout() calls with msleep(). + 2.26.02.002 - Add support for PAE mode. + Add lun support. + Fix twa_remove() to free irq handler/unregister_chrdev() + before shutting down card. + Change to new 'change_queue_depth' api. + Fix 'handled=1' ISR usage, remove bogus IRQ check. + Remove un-needed eh_abort handler. + Add support for embedded firmware error strings. + 2.26.02.003 - Correctly handle single sgl's with use_sg=1. + 2.26.02.004 - Add support for 9550SX controllers. + 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. + 2.26.02.006 - Fix 9550SX pchip reset timeout. + Add big endian support. + 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic(). + 2.26.02.008 - Free irq handler in __twa_shutdown(). + Serialize reset code. + Add support for 9650SE controllers. + 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. + 2.26.02.010 - Add support for 9690SA controllers. + 2.26.02.011 - Increase max AENs drained to 256. + Add MSI support and "use_msi" module parameter. + Fix bug in twa_get_param() on 4GB+. + Use pci_resource_len() for ioremap(). + 2.26.02.012 - Add power management support. + 2.26.02.013 - Fix bug in twa_load_sgl(). + 2.26.02.014 - Force 60 second timeout default. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "3w-9xxx.h" + +/* Globals */ +#define TW_DRIVER_VERSION "2.26.02.014" +static DEFINE_MUTEX(twa_chrdev_mutex); +static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; +static unsigned int twa_device_extension_count; +static int twa_major = -1; +extern struct timezone sys_tz; + +/* Module parameters */ +MODULE_AUTHOR ("LSI"); +MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(TW_DRIVER_VERSION); + +static int use_msi = 0; +module_param(use_msi, int, S_IRUGO); +MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); + +/* Function prototypes */ +static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); +static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); +static char *twa_aen_severity_lookup(unsigned char severity_code); +static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); +static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static int twa_chrdev_open(struct inode *inode, struct file *file); +static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); +static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); +static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); +static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result); +static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); +static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); +static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); +static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); +static int twa_reset_device_extension(TW_Device_Extension *tw_dev); +static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry *sglistarg); +static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); +static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); + +/* Functions */ + +/* Show some statistics about the card */ +static ssize_t twa_show_stats(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + unsigned long flags = 0; + ssize_t len; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n" + "Current commands posted: %4d\n" + "Max commands posted: %4d\n" + "Current pending commands: %4d\n" + "Max pending commands: %4d\n" + "Last sgl length: %4d\n" + "Max sgl length: %4d\n" + "Last sector count: %4d\n" + "Max sector count: %4d\n" + "SCSI Host Resets: %4d\n" + "AEN's: %4d\n", + TW_DRIVER_VERSION, + tw_dev->posted_request_count, + tw_dev->max_posted_request_count, + tw_dev->pending_request_count, + tw_dev->max_pending_request_count, + tw_dev->sgl_entries, + tw_dev->max_sgl_entries, + tw_dev->sector_count, + tw_dev->max_sector_count, + tw_dev->num_resets, + tw_dev->aen_count); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + return len; +} /* End twa_show_stats() */ + +/* Create sysfs 'stats' entry */ +static struct device_attribute twa_host_stats_attr = { + .attr = { + .name = "stats", + .mode = S_IRUGO, + }, + .show = twa_show_stats +}; + +/* Host attributes initializer */ +static struct attribute *twa_host_attrs[] = { + &twa_host_stats_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(twa_host); + +/* File operations struct for character device */ +static const struct file_operations twa_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = twa_chrdev_ioctl, + .open = twa_chrdev_open, + .release = NULL, + .llseek = noop_llseek, +}; + +/* + * The controllers use an inline buffer instead of a mapped SGL for small, + * single entry buffers. Note that we treat a zero-length transfer like + * a mapped SGL. + */ +static bool twa_command_mapped(struct scsi_cmnd *cmd) +{ + return scsi_sg_count(cmd) != 1 || + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; +} + +/* This function will complete an aen request from the isr */ +static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int retval = 1; + + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + tw_dev->posted_request_count--; + aen = le16_to_cpu(header->status_block.error); + full_command_packet = tw_dev->command_packet_virt[request_id]; + command_packet = &full_command_packet->command.oldcommand; + + /* First check for internal completion of set param for time sync */ + if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { + /* Keep reading the queue in case there are more aen's */ + if (twa_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + /* Quit reading the queue if this is the last one */ + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + twa_aen_sync_time(tw_dev, request_id); + retval = 0; + goto out; + default: + twa_aen_queue_event(tw_dev, header); + + /* If there are more aen's, keep reading the queue */ + if (twa_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + retval = 0; +out2: + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); +out: + return retval; +} /* End twa_aen_complete() */ + +/* This function will drain aen queue */ +static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) +{ + int request_id = 0; + unsigned char cdb[TW_MAX_CDB_LEN]; + TW_SG_Entry sglist[1]; + int finished = 0, count = 0; + TW_Command_Full *full_command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int first_reset = 0, queue = 0, retval = 1; + + if (no_check_reset) + first_reset = 0; + else + first_reset = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Entry)); + sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE); + sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + + if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); + goto out; + } + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + do { + /* Send command to the board */ + if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); + goto out; + } + + /* Now poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); + tw_dev->posted_request_count--; + goto out; + } + + tw_dev->posted_request_count--; + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + aen = le16_to_cpu(header->status_block.error); + queue = 0; + count++; + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + if (first_reset != 1) + goto out; + else + finished = 1; + break; + case TW_AEN_SOFT_RESET: + if (first_reset == 0) + first_reset = 1; + else + queue = 1; + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + break; + default: + queue = 1; + } + + /* Now queue an event info */ + if (queue) + twa_aen_queue_event(tw_dev, header); + } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); + + if (count == TW_MAX_AEN_DRAIN) + goto out; + + retval = 0; +out: + tw_dev->state[request_id] = TW_S_INITIAL; + return retval; +} /* End twa_aen_drain_queue() */ + +/* This function will queue an event */ +static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) +{ + u32 local_time; + TW_Event *event; + unsigned short aen; + char host[16]; + char *error_str; + + tw_dev->aen_count++; + + /* Fill out event info */ + event = tw_dev->event_queue[tw_dev->error_index]; + + /* Check for clobber */ + host[0] = '\0'; + if (tw_dev->host) { + sprintf(host, " scsi%d:", tw_dev->host->host_no); + if (event->retrieved == TW_AEN_NOT_RETRIEVED) + tw_dev->aen_clobber = 1; + } + + aen = le16_to_cpu(header->status_block.error); + memset(event, 0, sizeof(TW_Event)); + + event->severity = TW_SEV_OUT(header->status_block.severity__reserved); + /* event->time_stamp_sec overflows in y2106 */ + local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); + event->time_stamp_sec = local_time; + event->aen_code = aen; + event->retrieved = TW_AEN_NOT_RETRIEVED; + event->sequence_id = tw_dev->error_sequence_id; + tw_dev->error_sequence_id++; + + /* Check for embedded error string */ + error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); + + header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; + event->parameter_len = strlen(header->err_specific_desc); + memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str)))); + if (event->severity != TW_AEN_SEVERITY_DEBUG) + printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", + host, + twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), + TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, + error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str, + header->err_specific_desc); + else + tw_dev->aen_count--; + + if ((tw_dev->error_index + 1) == TW_Q_LENGTH) + tw_dev->event_queue_wrapped = 1; + tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; +} /* End twa_aen_queue_event() */ + +/* This function will read the aen queue from the isr */ +static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) +{ + unsigned char cdb[TW_MAX_CDB_LEN]; + TW_SG_Entry sglist[1]; + TW_Command_Full *full_command_packet; + int retval = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Entry)); + sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE); + sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command packet */ + if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_aen_read_queue() */ + +/* This function will look up an AEN severity string */ +static char *twa_aen_severity_lookup(unsigned char severity_code) +{ + char *retval = NULL; + + if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || + (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) + goto out; + + retval = twa_aen_severity_table[severity_code]; +out: + return retval; +} /* End twa_aen_severity_lookup() */ + +/* This function will sync firmware time with the host time */ +static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) +{ + u32 schedulertime; + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + time64_t local_time; + + /* Fill out the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); + command_packet->request_id = request_id; + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); + command_packet->size = TW_COMMAND_SIZE; + command_packet->byte6_offset.parameter_count = cpu_to_le16(1); + + /* Setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ + param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ + param->parameter_size_bytes = cpu_to_le16(4); + + /* Convert system time in UTC to local time seconds since last + Sunday 12:00AM */ + local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); + div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime); + + memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32)); + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command */ + twa_post_command_packet(tw_dev, request_id, 1); +} /* End twa_aen_sync_time() */ + +/* This function will allocate memory and check if it is correctly aligned */ +static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) +{ + int i; + dma_addr_t dma_handle; + unsigned long *cpu_addr; + int retval = 1; + + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, + size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); + if (!cpu_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); + goto out; + } + + if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); + dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH, + cpu_addr, dma_handle); + goto out; + } + + memset(cpu_addr, 0, size*TW_Q_LENGTH); + + for (i = 0; i < TW_Q_LENGTH; i++) { + switch(which) { + case 0: + tw_dev->command_packet_phys[i] = dma_handle+(i*size); + tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); + break; + case 1: + tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); + tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); + break; + } + } + retval = 0; +out: + return retval; +} /* End twa_allocate_memory() */ + +/* This function will check the status register for unexpected bits */ +static int twa_check_bits(u32 status_reg_value) +{ + int retval = 1; + + if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) + goto out; + if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) + goto out; + + retval = 0; +out: + return retval; +} /* End twa_check_bits() */ + +/* This function will check the srl and decide if we are compatible */ +static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) +{ + int retval = 1; + unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; + unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; + u32 init_connect_result = 0; + + if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, + TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, + TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, + &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, + &fw_on_ctlr_build, &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); + goto out; + } + + tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl; + tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch; + tw_dev->tw_compat_info.working_build = fw_on_ctlr_build; + + /* Try base mode compatibility */ + if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { + if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, + TW_BASE_FW_SRL, TW_9000_ARCH_ID, + TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, + &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, + &fw_on_ctlr_branch, &fw_on_ctlr_build, + &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); + goto out; + } + if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { + if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); + } else { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); + } + goto out; + } + tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL; + tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH; + tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD; + } + + /* Load rest of compatibility struct */ + strscpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, + sizeof(tw_dev->tw_compat_info.driver_version)); + tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; + tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; + tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; + tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; + tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; + tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; + tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; + tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; + tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; + + retval = 0; +out: + return retval; +} /* End twa_check_srl() */ + +/* This function handles ioctl for the character device */ +static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct inode *inode = file_inode(file); + long timeout; + unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; + dma_addr_t dma_handle; + int request_id = 0; + unsigned int sequence_id = 0; + unsigned char event_index, start_index; + TW_Ioctl_Driver_Command driver_command; + TW_Ioctl_Buf_Apache *tw_ioctl; + TW_Lock *tw_lock; + TW_Command_Full *full_command_packet; + TW_Compatibility_Info *tw_compat_info; + TW_Event *event; + ktime_t current_time; + TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; + int retval = TW_IOCTL_ERROR_OS_EFAULT; + void __user *argp = (void __user *)arg; + + mutex_lock(&twa_chrdev_mutex); + + /* Only let one of these through at a time */ + if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { + retval = TW_IOCTL_ERROR_OS_EINTR; + goto out; + } + + /* First copy down the driver command */ + if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) + goto out2; + + /* Check data buffer size */ + if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { + retval = TW_IOCTL_ERROR_OS_EINVAL; + goto out2; + } + + /* Hardware can only do multiple of 512 byte transfers */ + data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; + + /* Now allocate ioctl buf memory */ + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, + sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted, + &dma_handle, GFP_KERNEL); + if (!cpu_addr) { + retval = TW_IOCTL_ERROR_OS_ENOMEM; + goto out2; + } + + tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; + + /* Now copy down the entire ioctl */ + if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length)) + goto out3; + + /* See which ioctl we are doing */ + switch (cmd) { + case TW_IOCTL_FIRMWARE_PASS_THROUGH: + spin_lock_irqsave(tw_dev->host->host_lock, flags); + twa_get_request_id(tw_dev, &request_id); + + /* Flag internal command */ + tw_dev->srb[request_id] = NULL; + + /* Flag chrdev ioctl */ + tw_dev->chrdev_request_id = request_id; + + full_command_packet = &tw_ioctl->firmware_command; + + /* Load request id and sglist for both command types */ + twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); + + memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); + + /* Now post the command packet to the controller */ + twa_post_command_packet(tw_dev, request_id, 1); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; + + /* Now wait for command to complete */ + timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); + + /* We timed out, and didn't get an interrupt */ + if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { + /* Now we need to reset the board */ + printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", + tw_dev->host->host_no, TW_DRIVER, 0x37, + cmd); + retval = TW_IOCTL_ERROR_OS_EIO; + twa_reset_device_extension(tw_dev); + goto out3; + } + + /* Now copy in the command packet response */ + memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); + + /* Now complete the io */ + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + break; + case TW_IOCTL_GET_COMPATIBILITY_INFO: + tw_ioctl->driver_command.status = 0; + /* Copy compatibility struct into ioctl data buffer */ + tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; + memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); + break; + case TW_IOCTL_GET_LAST_EVENT: + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } else + tw_ioctl->driver_command.status = 0; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + tw_ioctl->driver_command.status = 0; + } + event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_FIRST_EVENT: + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } else + tw_ioctl->driver_command.status = 0; + event_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + tw_ioctl->driver_command.status = 0; + event_index = 0; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_NEXT_EVENT: + event = (TW_Event *)tw_ioctl->data_buffer; + sequence_id = event->sequence_id; + tw_ioctl->driver_command.status = 0; + + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } + start_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + start_index = 0; + } + event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; + + if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { + if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) + tw_dev->aen_clobber = 1; + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_PREVIOUS_EVENT: + event = (TW_Event *)tw_ioctl->data_buffer; + sequence_id = event->sequence_id; + tw_ioctl->driver_command.status = 0; + + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } + start_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + start_index = 0; + } + event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; + + if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { + if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) + tw_dev->aen_clobber = 1; + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_LOCK: + tw_lock = (TW_Lock *)tw_ioctl->data_buffer; + current_time = ktime_get(); + + if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || + ktime_after(current_time, tw_dev->ioctl_time)) { + tw_dev->ioctl_sem_lock = 1; + tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec); + tw_ioctl->driver_command.status = 0; + tw_lock->time_remaining_msec = tw_lock->timeout_msec; + } else { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; + tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time); + } + break; + case TW_IOCTL_RELEASE_LOCK: + if (tw_dev->ioctl_sem_lock == 1) { + tw_dev->ioctl_sem_lock = 0; + tw_ioctl->driver_command.status = 0; + } else { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; + } + break; + default: + retval = TW_IOCTL_ERROR_OS_ENOTTY; + goto out3; + } + + /* Now copy the entire response to userspace */ + if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0) + retval = 0; +out3: + /* Now free ioctl buf memory */ + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted, + cpu_addr, dma_handle); +out2: + mutex_unlock(&tw_dev->ioctl_lock); +out: + mutex_unlock(&twa_chrdev_mutex); + return retval; +} /* End twa_chrdev_ioctl() */ + +/* This function handles open for the character device */ +/* NOTE that this function will race with remove. */ +static int twa_chrdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor_number; + int retval = TW_IOCTL_ERROR_OS_ENODEV; + + if (!capable(CAP_SYS_ADMIN)) { + retval = -EACCES; + goto out; + } + + minor_number = iminor(inode); + if (minor_number >= twa_device_extension_count) + goto out; + retval = 0; +out: + return retval; +} /* End twa_chrdev_open() */ + +/* This function will print readable messages from status register errors */ +static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) +{ + int retval = 1; + + /* Check for various error conditions and handle them appropriately */ + if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); + writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_PCI_ABORT) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); + writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); + pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); + } + + if (status_reg_value & TW_STATUS_QUEUE_ERROR) { + if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) && + (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) || + (!test_bit(TW_IN_RESET, &tw_dev->flags))) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); + writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { + if (tw_dev->reset_print == 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); + tw_dev->reset_print = 1; + } + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_decode_bits() */ + +/* This function will empty the response queue */ +static int twa_empty_response_queue(TW_Device_Extension *tw_dev) +{ + u32 status_reg_value; + int count = 0, retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { + readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + count++; + } + if (count == TW_MAX_RESPONSE_DRAIN) + goto out; + + retval = 0; +out: + return retval; +} /* End twa_empty_response_queue() */ + +/* This function will clear the pchip/response queue on 9550SX */ +static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev) +{ + u32 response_que_value = 0; + unsigned long before; + int retval = 1; + + if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) { + before = jiffies; + while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) { + response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); + msleep(1); + if (time_after(jiffies, before + HZ * 30)) + goto out; + } + /* P-chip settle time */ + msleep(500); + retval = 0; + } else + retval = 0; +out: + return retval; +} /* End twa_empty_response_queue_large() */ + +/* This function passes sense keys from firmware to scsi layer */ +static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) +{ + TW_Command_Full *full_command_packet; + unsigned short error; + int retval = 1; + char *error_str; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + + /* Check for embedded error string */ + error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]); + + /* Don't print error for Logical unit not supported during rollcall */ + error = le16_to_cpu(full_command_packet->header.status_block.error); + if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { + if (print_host) + printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", + tw_dev->host->host_no, + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error, + error_str[0] ? error_str : twa_string_lookup(twa_error_table, error), + full_command_packet->header.err_specific_desc); + else + printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error, + error_str[0] ? error_str : twa_string_lookup(twa_error_table, error), + full_command_packet->header.err_specific_desc); + } + + if (copy_sense) { + memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); + tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); + retval = TW_ISR_DONT_RESULT; + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_fill_sense() */ + +/* This function will free up device extension resources */ +static void twa_free_device_extension(TW_Device_Extension *tw_dev) +{ + if (tw_dev->command_packet_virt[0]) + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + sizeof(TW_Command_Full) * TW_Q_LENGTH, + tw_dev->command_packet_virt[0], + tw_dev->command_packet_phys[0]); + + if (tw_dev->generic_buffer_virt[0]) + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + TW_SECTOR_SIZE * TW_Q_LENGTH, + tw_dev->generic_buffer_virt[0], + tw_dev->generic_buffer_phys[0]); + + kfree(tw_dev->event_queue[0]); +} /* End twa_free_device_extension() */ + +/* This function will free a request id */ +static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) +{ + tw_dev->free_queue[tw_dev->free_tail] = request_id; + tw_dev->state[request_id] = TW_S_FINISHED; + tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; +} /* End twa_free_request_id() */ + +/* This function will get parameter table entries from the firmware */ +static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + void *retval = NULL; + + /* Setup the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = TW_COMMAND_SIZE; + command_packet->request_id = request_id; + command_packet->byte6_offset.block_count = cpu_to_le16(1); + + /* Now setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = cpu_to_le16(table_id | 0x8000); + param->parameter_id = cpu_to_le16(parameter_id); + param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); + + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); + + /* Post the command packet to the board */ + twa_post_command_packet(tw_dev, request_id, 1); + + /* Poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") + else + retval = (void *)&(param->data[0]); + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twa_get_param() */ + +/* This function will assign an available request id */ +static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) +{ + *request_id = tw_dev->free_queue[tw_dev->free_head]; + tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; + tw_dev->state[*request_id] = TW_S_STARTED; +} /* End twa_get_request_id() */ + +/* This function will send an initconnection command to controller */ +static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result) +{ + TW_Command_Full *full_command_packet; + TW_Initconnect *tw_initconnect; + int request_id = 0, retval = 1; + + /* Initialize InitConnection command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + full_command_packet->header.header_desc.size_header = 128; + + tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; + tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); + tw_initconnect->request_id = request_id; + tw_initconnect->message_credits = cpu_to_le16(message_credits); + + /* Turn on 64-bit sgl support if we need to */ + set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0; + + tw_initconnect->features = cpu_to_le32(set_features); + + if (set_features & TW_EXTENDED_INIT_CONNECT) { + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; + tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); + tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); + tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); + tw_initconnect->fw_build = cpu_to_le16(current_fw_build); + } else + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; + + /* Send command packet to the board */ + twa_post_command_packet(tw_dev, request_id, 1); + + /* Poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); + } else { + if (set_features & TW_EXTENDED_INIT_CONNECT) { + *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); + *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); + *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); + *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); + *init_connect_result = le32_to_cpu(tw_initconnect->result); + } + retval = 0; + } + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twa_initconnection() */ + +/* This function will initialize the fields of a device extension */ +static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) +{ + int i, retval = 1; + + /* Initialize command packet buffers */ + if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); + goto out; + } + + /* Initialize generic buffer */ + if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); + goto out; + } + + /* Allocate event info space */ + tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); + if (!tw_dev->event_queue[0]) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); + goto out; + } + + + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->error_sequence_id = 1; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + mutex_init(&tw_dev->ioctl_lock); + init_waitqueue_head(&tw_dev->ioctl_wqueue); + + retval = 0; +out: + return retval; +} /* End twa_initialize_device_extension() */ + +/* This function is the interrupt service routine */ +static irqreturn_t twa_interrupt(int irq, void *dev_instance) +{ + int request_id, error = 0; + u32 status_reg_value; + TW_Response_Queue response_que; + TW_Command_Full *full_command_packet; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; + int handled = 0; + + /* Get the per adapter lock */ + spin_lock(tw_dev->host->host_lock); + + /* Read the registers */ + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + /* Check if this is our interrupt, otherwise bail */ + if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) + goto twa_interrupt_bail; + + handled = 1; + + /* If we are resetting, bail */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) + goto twa_interrupt_bail; + + /* Check controller for errors */ + if (twa_check_bits(status_reg_value)) { + if (twa_decode_bits(tw_dev, status_reg_value)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + + /* Handle host interrupt */ + if (status_reg_value & TW_STATUS_HOST_INTERRUPT) + TW_CLEAR_HOST_INTERRUPT(tw_dev); + + /* Handle attention interrupt */ + if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { + TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); + if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { + twa_get_request_id(tw_dev, &request_id); + + error = twa_aen_read_queue(tw_dev, request_id); + if (error) { + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); + } + } + } + + /* Handle command interrupt */ + if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { + TW_MASK_COMMAND_INTERRUPT(tw_dev); + /* Drain as many pending commands as we can */ + while (tw_dev->pending_request_count > 0) { + request_id = tw_dev->pending_queue[tw_dev->pending_head]; + if (tw_dev->state[request_id] != TW_S_PENDING) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + if (twa_post_command_packet(tw_dev, request_id, 1)==0) { + tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; + tw_dev->pending_request_count--; + } else { + /* If we get here, we will continue re-posting on the next command interrupt */ + break; + } + } + } + + /* Handle response interrupt */ + if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { + + /* Drain the response queue from the board */ + while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { + /* Complete the response */ + response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + request_id = TW_RESID_OUT(response_que.response_id); + full_command_packet = tw_dev->command_packet_virt[request_id]; + error = 0; + /* Check for command packet errors */ + if (full_command_packet->command.newcommand.status != 0) { + if (tw_dev->srb[request_id] != NULL) { + error = twa_fill_sense(tw_dev, request_id, 1, 1); + } else { + /* Skip ioctl error prints */ + if (request_id != tw_dev->chrdev_request_id) { + error = twa_fill_sense(tw_dev, request_id, 0, 1); + } + } + } + + /* Check for correct state */ + if (tw_dev->state[request_id] != TW_S_POSTED) { + if (tw_dev->srb[request_id] != NULL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + + /* Check for internal command completion */ + if (tw_dev->srb[request_id] == NULL) { + if (request_id != tw_dev->chrdev_request_id) { + if (twa_aen_complete(tw_dev, request_id)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); + } else { + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + wake_up(&tw_dev->ioctl_wqueue); + } + } else { + struct scsi_cmnd *cmd; + + cmd = tw_dev->srb[request_id]; + + twa_scsiop_execute_scsi_complete(tw_dev, request_id); + /* If no error command was a success */ + if (error == 0) { + cmd->result = (DID_OK << 16); + } + + /* If error, command failed */ + if (error == 1) { + /* Ask for a host reset */ + cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; + } + + /* Report residual bytes for single sgl */ + if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { + u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length); + + if (length < scsi_bufflen(cmd)) + scsi_set_resid(cmd, scsi_bufflen(cmd) - length); + } + + /* Now complete the io */ + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); + scsi_done(cmd); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + tw_dev->posted_request_count--; + } + + /* Check for valid status after each drain */ + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + if (twa_check_bits(status_reg_value)) { + if (twa_decode_bits(tw_dev, status_reg_value)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + } + } + +twa_interrupt_bail: + spin_unlock(tw_dev->host->host_lock); + return IRQ_RETVAL(handled); +} /* End twa_interrupt() */ + +/* This function will load the request id and various sgls for ioctls */ +static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) +{ + TW_Command *oldcommand; + TW_Command_Apache *newcommand; + TW_SG_Entry *sgl; + unsigned int pae = 0; + + if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) + pae = 1; + + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + newcommand = &full_command_packet->command.newcommand; + newcommand->request_id__lunl = + TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id); + if (length) { + newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); + newcommand->sg_list[0].length = cpu_to_le32(length); + } + newcommand->sgl_entries__lunh = + TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0); + } else { + oldcommand = &full_command_packet->command.oldcommand; + oldcommand->request_id = request_id; + + if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { + /* Load the sg list */ + if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA) + sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae); + else + sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); + sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); + sgl->length = cpu_to_le32(length); + + oldcommand->size += pae; + } + } +} /* End twa_load_sgl() */ + +/* This function will poll for a response interrupt of a request */ +static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) +{ + int retval = 1, found = 0, response_request_id; + TW_Response_Queue response_queue; + TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; + + if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { + response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + response_request_id = TW_RESID_OUT(response_queue.response_id); + if (request_id != response_request_id) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); + goto out; + } + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + if (full_command_packet->command.newcommand.status != 0) { + /* bad response */ + twa_fill_sense(tw_dev, request_id, 0, 0); + goto out; + } + found = 1; + } else { + if (full_command_packet->command.oldcommand.status != 0) { + /* bad response */ + twa_fill_sense(tw_dev, request_id, 0, 0); + goto out; + } + found = 1; + } + } + + if (found) + retval = 0; +out: + return retval; +} /* End twa_poll_response() */ + +/* This function will poll the status register for a flag */ +static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + while ((status_reg_value & flag) != flag) { + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twa_poll_status() */ + +/* This function will poll the status register for disappearance of a flag */ +static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + while ((status_reg_value & flag) != 0) { + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twa_poll_status_gone() */ + +/* This function will attempt to post a command packet to the board */ +static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) +{ + u32 status_reg_value; + dma_addr_t command_que_value; + int retval = 1; + + command_que_value = tw_dev->command_packet_phys[request_id]; + + /* For 9650SE write low 4 bytes first */ + if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || + (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { + command_que_value += TW_COMMAND_OFFSET; + writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev)); + } + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { + + /* Only pend internal driver commands */ + if (!internal) { + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* Couldn't post the command packet, so we do it later */ + if (tw_dev->state[request_id] != TW_S_PENDING) { + tw_dev->state[request_id] = TW_S_PENDING; + tw_dev->pending_request_count++; + if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { + tw_dev->max_pending_request_count = tw_dev->pending_request_count; + } + tw_dev->pending_queue[tw_dev->pending_tail] = request_id; + tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; + } + TW_UNMASK_COMMAND_INTERRUPT(tw_dev); + goto out; + } else { + if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || + (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { + /* Now write upper 4 bytes */ + writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4); + } else { + if (sizeof(dma_addr_t) > 4) { + command_que_value += TW_COMMAND_OFFSET; + writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); + writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4); + } else { + writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); + } + } + tw_dev->state[request_id] = TW_S_POSTED; + tw_dev->posted_request_count++; + if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { + tw_dev->max_posted_request_count = tw_dev->posted_request_count; + } + } + retval = 0; +out: + return retval; +} /* End twa_post_command_packet() */ + +/* This function will reset a device extension */ +static int twa_reset_device_extension(TW_Device_Extension *tw_dev) +{ + int i = 0; + int retval = 1; + unsigned long flags = 0; + + set_bit(TW_IN_RESET, &tw_dev->flags); + TW_DISABLE_INTERRUPTS(tw_dev); + TW_MASK_COMMAND_INTERRUPT(tw_dev); + spin_lock_irqsave(tw_dev->host->host_lock, flags); + + /* Abort all requests that are in progress */ + for (i = 0; i < TW_Q_LENGTH; i++) { + if ((tw_dev->state[i] != TW_S_FINISHED) && + (tw_dev->state[i] != TW_S_INITIAL) && + (tw_dev->state[i] != TW_S_COMPLETED)) { + if (tw_dev->srb[i]) { + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + cmd->result = (DID_RESET << 16); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); + scsi_done(cmd); + } + } + } + + /* Reset queues and counts */ + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->posted_request_count = 0; + tw_dev->pending_request_count = 0; + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->reset_print = 0; + + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + if (twa_reset_sequence(tw_dev, 1)) + goto out; + + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + clear_bit(TW_IN_RESET, &tw_dev->flags); + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + retval = 0; +out: + return retval; +} /* End twa_reset_device_extension() */ + +/* This function will reset a controller */ +static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) +{ + int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; + + while (tries < TW_MAX_RESET_TRIES) { + if (do_soft_reset) { + TW_SOFT_RESET(tw_dev); + /* Clear pchip/response queue on 9550SX */ + if (twa_empty_response_queue_large(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + } + + /* Make sure controller is in a good state */ + if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Empty response queue */ + if (twa_empty_response_queue(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + flashed = 0; + + /* Check for compatibility/flash */ + if (twa_check_srl(tw_dev, &flashed)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } else { + if (flashed) { + tries++; + continue; + } + } + + /* Drain the AEN queue */ + if (twa_aen_drain_queue(tw_dev, soft_reset)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* If we got here, controller is in a good state */ + retval = 0; + goto out; + } +out: + return retval; +} /* End twa_reset_sequence() */ + +/* This funciton returns unit geometry in cylinders/heads/sectors */ +static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) +{ + int heads, sectors, cylinders; + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = sector_div(capacity, heads * sectors); + } else { + heads = 64; + sectors = 32; + cylinders = sector_div(capacity, heads * sectors); + } + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} /* End twa_scsi_biosparam() */ + +/* This is the new scsi eh reset function */ +static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) +{ + TW_Device_Extension *tw_dev = NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + tw_dev->num_resets++; + + sdev_printk(KERN_WARNING, SCpnt->device, + "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", + TW_DRIVER, 0x2c, SCpnt->cmnd[0]); + + /* Make sure we are not issuing an ioctl or resetting from ioctl */ + mutex_lock(&tw_dev->ioctl_lock); + + /* Now reset the card and some of the device extension data */ + if (twa_reset_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); + goto out; + } + + retval = SUCCESS; +out: + mutex_unlock(&tw_dev->ioctl_lock); + return retval; +} /* End twa_scsi_eh_reset() */ + +/* This is the main scsi queue function to handle scsi opcodes */ +static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + int request_id, retval; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + /* If we are resetting due to timed out ioctl, report as busy */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) { + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* Check if this FW supports luns */ + if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) { + SCpnt->result = (DID_BAD_TARGET << 16); + done(SCpnt); + retval = 0; + goto out; + } + + /* Get a free request id */ + twa_get_request_id(tw_dev, &request_id); + + /* Save the scsi command for use by the ISR */ + tw_dev->srb[request_id] = SCpnt; + + retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); + switch (retval) { + case SCSI_MLQUEUE_HOST_BUSY: + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); + twa_free_request_id(tw_dev, request_id); + break; + case 1: + SCpnt->result = (DID_ERROR << 16); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); + done(SCpnt); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + retval = 0; + } +out: + return retval; +} /* End twa_scsi_queue() */ + +static DEF_SCSI_QCMD(twa_scsi_queue) + +/* This function hands scsi cdb's to the firmware */ +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry *sglistarg) +{ + TW_Command_Full *full_command_packet; + TW_Command_Apache *command_packet; + u32 num_sectors = 0x0; + int i, sg_count; + struct scsi_cmnd *srb = NULL; + struct scatterlist *sg; + int retval = 1; + + if (tw_dev->srb[request_id]) + srb = tw_dev->srb[request_id]; + + /* Initialize command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + full_command_packet->header.header_desc.size_header = 128; + full_command_packet->header.status_block.error = 0; + full_command_packet->header.status_block.severity__reserved = 0; + + command_packet = &full_command_packet->command.newcommand; + command_packet->status = 0; + command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); + + /* We forced 16 byte cdb use earlier */ + if (!cdb) + memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); + else + memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); + + if (srb) { + command_packet->unit = srb->device->id; + command_packet->request_id__lunl = + TW_REQ_LUN_IN(srb->device->lun, request_id); + } else { + command_packet->request_id__lunl = + TW_REQ_LUN_IN(0, request_id); + command_packet->unit = 0; + } + + command_packet->sgl_offset = 16; + + if (!sglistarg) { + /* Map sglist from scsi layer to cmd packet */ + + if (scsi_sg_count(srb)) { + if (!twa_command_mapped(srb)) { + if (srb->sc_data_direction == DMA_TO_DEVICE || + srb->sc_data_direction == DMA_BIDIRECTIONAL) + scsi_sg_copy_to_buffer(srb, + tw_dev->generic_buffer_virt[request_id], + TW_SECTOR_SIZE); + command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); + } else { + sg_count = scsi_dma_map(srb); + if (sg_count < 0) + goto out; + + scsi_for_each_sg(srb, sg, sg_count, i) { + command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); + command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); + if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); + goto out; + } + } + } + command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])); + } + } else { + /* Internal cdb post */ + for (i = 0; i < use_sg; i++) { + command_packet->sg_list[i].address = sglistarg[i].address; + command_packet->sg_list[i].length = sglistarg[i].length; + if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); + goto out; + } + } + command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg); + } + + if (srb) { + if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) + num_sectors = (u32)srb->cmnd[4]; + + if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) + num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); + } + + /* Update sector statistic */ + tw_dev->sector_count = num_sectors; + if (tw_dev->sector_count > tw_dev->max_sector_count) + tw_dev->max_sector_count = tw_dev->sector_count; + + /* Update SG statistics */ + if (srb) { + tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]); + if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) + tw_dev->max_sgl_entries = tw_dev->sgl_entries; + } + + /* Now post the command to the board */ + if (srb) { + retval = twa_post_command_packet(tw_dev, request_id, 0); + } else { + twa_post_command_packet(tw_dev, request_id, 1); + retval = 0; + } +out: + return retval; +} /* End twa_scsiop_execute_scsi() */ + +/* This function completes an execute scsi operation */ +static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) +{ + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + + if (!twa_command_mapped(cmd) && + (cmd->sc_data_direction == DMA_FROM_DEVICE || + cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { + if (scsi_sg_count(cmd) == 1) { + void *buf = tw_dev->generic_buffer_virt[request_id]; + + scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); + } + } +} /* End twa_scsiop_execute_scsi_complete() */ + +/* This function tells the controller to shut down */ +static void __twa_shutdown(TW_Device_Extension *tw_dev) +{ + /* Disable interrupts */ + TW_DISABLE_INTERRUPTS(tw_dev); + + /* Free up the IRQ */ + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); + + /* Tell the card we are shutting down */ + if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); + } else { + printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); + } + + /* Clear all interrupts just before exit */ + TW_CLEAR_ALL_INTERRUPTS(tw_dev); +} /* End __twa_shutdown() */ + +/* Wrapper for __twa_shutdown */ +static void twa_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + __twa_shutdown(tw_dev); +} /* End twa_shutdown() */ + +/* This function will look up a string */ +static char *twa_string_lookup(twa_message_type *table, unsigned int code) +{ + int index; + + for (index = 0; ((code != table[index].code) && + (table[index].text != (char *)0)); index++); + return(table[index].text); +} /* End twa_string_lookup() */ + +/* This function gets called when a disk is coming on-line */ +static int twa_slave_configure(struct scsi_device *sdev) +{ + /* Force 60 second timeout */ + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + + return 0; +} /* End twa_slave_configure() */ + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "3ware 9000 Storage Controller", + .queuecommand = twa_scsi_queue, + .eh_host_reset_handler = twa_scsi_eh_reset, + .bios_param = twa_scsi_biosparam, + .change_queue_depth = scsi_change_queue_depth, + .can_queue = TW_Q_LENGTH-2, + .slave_configure = twa_slave_configure, + .this_id = -1, + .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, + .max_sectors = TW_MAX_SECTORS, + .cmd_per_lun = TW_MAX_CMDS_PER_LUN, + .shost_groups = twa_host_groups, + .emulated = 1, + .no_write_same = 1, +}; + +/* This function will probe and initialize a card */ +static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) +{ + struct Scsi_Host *host = NULL; + TW_Device_Extension *tw_dev; + unsigned long mem_addr, mem_len; + int retval; + + retval = pci_enable_device(pdev); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); + return -ENODEV; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); + retval = -ENODEV; + goto out_disable_device; + } + + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); + if (!host) { + TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); + retval = -ENOMEM; + goto out_disable_device; + } + tw_dev = (TW_Device_Extension *)host->hostdata; + + /* Save values to device extension */ + tw_dev->host = host; + tw_dev->tw_pci_dev = pdev; + + if (twa_initialize_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); + retval = -ENOMEM; + goto out_free_device_extension; + } + + /* Request IO regions */ + retval = pci_request_regions(pdev, "3w-9xxx"); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); + goto out_free_device_extension; + } + + if (pdev->device == PCI_DEVICE_ID_3WARE_9000) { + mem_addr = pci_resource_start(pdev, 1); + mem_len = pci_resource_len(pdev, 1); + } else { + mem_addr = pci_resource_start(pdev, 2); + mem_len = pci_resource_len(pdev, 2); + } + + /* Save base address */ + tw_dev->base_addr = ioremap(mem_addr, mem_len); + if (!tw_dev->base_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); + retval = -ENOMEM; + goto out_release_mem_region; + } + + /* Disable interrupts on the card */ + TW_DISABLE_INTERRUPTS(tw_dev); + + /* Initialize the card */ + if (twa_reset_sequence(tw_dev, 0)) { + retval = -ENOMEM; + goto out_iounmap; + } + + /* Set host specific parameters */ + if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || + (pdev->device == PCI_DEVICE_ID_3WARE_9690SA)) + host->max_id = TW_MAX_UNITS_9650SE; + else + host->max_id = TW_MAX_UNITS; + + host->max_cmd_len = TW_MAX_CDB_LEN; + + /* Channels aren't supported by adapter */ + host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl); + host->max_channel = 0; + + /* Register the card with the kernel SCSI layer */ + retval = scsi_add_host(host, &pdev->dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); + goto out_iounmap; + } + + pci_set_drvdata(pdev, host); + + printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n", + host->host_no, mem_addr, pdev->irq); + printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", + host->host_no, + (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, + TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), + (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, + TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), + le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, + TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); + + /* Try to enable MSI */ + if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) && + !pci_enable_msi(pdev)) + set_bit(TW_USING_MSI, &tw_dev->flags); + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); + goto out_remove_host; + } + + twa_device_extension_list[twa_device_extension_count] = tw_dev; + twa_device_extension_count++; + + /* Re-enable interrupts on the card */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + + /* Finally, scan the host */ + scsi_scan_host(host); + + if (twa_major == -1) { + if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) + TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); + } + return 0; + +out_remove_host: + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_disable_msi(pdev); + scsi_remove_host(host); +out_iounmap: + iounmap(tw_dev->base_addr); +out_release_mem_region: + pci_release_regions(pdev); +out_free_device_extension: + twa_free_device_extension(tw_dev); + scsi_host_put(host); +out_disable_device: + pci_disable_device(pdev); + + return retval; +} /* End twa_probe() */ + +/* This function is called to remove a device */ +static void twa_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + scsi_remove_host(tw_dev->host); + + /* Unregister character device */ + if (twa_major >= 0) { + unregister_chrdev(twa_major, "twa"); + twa_major = -1; + } + + /* Shutdown the card */ + __twa_shutdown(tw_dev); + + /* Disable MSI if enabled */ + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_disable_msi(pdev); + + /* Free IO remapping */ + iounmap(tw_dev->base_addr); + + /* Free up the mem region */ + pci_release_regions(pdev); + + /* Free up device extension resources */ + twa_free_device_extension(tw_dev); + + scsi_host_put(tw_dev->host); + pci_disable_device(pdev); + twa_device_extension_count--; +} /* End twa_remove() */ + +/* This function is called on PCI suspend */ +static int __maybe_unused twa_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no); + + TW_DISABLE_INTERRUPTS(tw_dev); + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_disable_msi(pdev); + + /* Tell the card we are shutting down */ + if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend"); + } else { + printk(KERN_WARNING "3w-9xxx: Suspend complete.\n"); + } + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + + return 0; +} /* End twa_suspend() */ + +/* This function is called on PCI resume */ +static int __maybe_unused twa_resume(struct device *dev) +{ + int retval = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no); + + pci_try_set_mwi(pdev); + + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); + retval = -ENODEV; + goto out_disable_device; + } + + /* Initialize the card */ + if (twa_reset_sequence(tw_dev, 0)) { + retval = -ENODEV; + goto out_disable_device; + } + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume"); + retval = -ENODEV; + goto out_disable_device; + } + + /* Now enable MSI if enabled */ + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_enable_msi(pdev); + + /* Re-enable interrupts on the card */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + + printk(KERN_WARNING "3w-9xxx: Resume complete.\n"); + return 0; + +out_disable_device: + scsi_remove_host(host); + + return retval; +} /* End twa_resume() */ + +/* PCI Devices supported by this driver */ +static struct pci_device_id twa_pci_tbl[] = { + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { } +}; +MODULE_DEVICE_TABLE(pci, twa_pci_tbl); + +static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume); + +/* pci_driver initializer */ +static struct pci_driver twa_driver = { + .name = "3w-9xxx", + .id_table = twa_pci_tbl, + .probe = twa_probe, + .remove = twa_remove, + .driver.pm = &twa_pm_ops, + .shutdown = twa_shutdown +}; + +/* This function is called on driver initialization */ +static int __init twa_init(void) +{ + printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); + + return pci_register_driver(&twa_driver); +} /* End twa_init() */ + +/* This function is called on driver exit */ +static void __exit twa_exit(void) +{ + pci_unregister_driver(&twa_driver); +} /* End twa_exit() */ + +module_init(twa_init); +module_exit(twa_exit); + diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h new file mode 100644 index 000000000..0b23b0422 --- /dev/null +++ b/drivers/scsi/3w-9xxx.h @@ -0,0 +1,695 @@ +/* + 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux. + + Written By: Adam Radford + Modifications By: Tom Couch + + Copyright (C) 2004-2009 Applied Micro Circuits Corporation. + Copyright (C) 2010 LSI Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + aradford@gmail.com +*/ + +#ifndef _3W_9XXX_H +#define _3W_9XXX_H + +/* AEN string type */ +typedef struct TAG_twa_message_type { + unsigned int code; + char *text; +} twa_message_type; + +/* AEN strings */ +static twa_message_type twa_aen_table[] = { + {0x0000, "AEN queue empty"}, + {0x0001, "Controller reset occurred"}, + {0x0002, "Degraded unit detected"}, + {0x0003, "Controller error occurred"}, + {0x0004, "Background rebuild failed"}, + {0x0005, "Background rebuild done"}, + {0x0006, "Incomplete unit detected"}, + {0x0007, "Background initialize done"}, + {0x0008, "Unclean shutdown detected"}, + {0x0009, "Drive timeout detected"}, + {0x000A, "Drive error detected"}, + {0x000B, "Rebuild started"}, + {0x000C, "Background initialize started"}, + {0x000D, "Entire logical unit was deleted"}, + {0x000E, "Background initialize failed"}, + {0x000F, "SMART attribute exceeded threshold"}, + {0x0010, "Power supply reported AC under range"}, + {0x0011, "Power supply reported DC out of range"}, + {0x0012, "Power supply reported a malfunction"}, + {0x0013, "Power supply predicted malfunction"}, + {0x0014, "Battery charge is below threshold"}, + {0x0015, "Fan speed is below threshold"}, + {0x0016, "Temperature sensor is above threshold"}, + {0x0017, "Power supply was removed"}, + {0x0018, "Power supply was inserted"}, + {0x0019, "Drive was removed from a bay"}, + {0x001A, "Drive was inserted into a bay"}, + {0x001B, "Drive bay cover door was opened"}, + {0x001C, "Drive bay cover door was closed"}, + {0x001D, "Product case was opened"}, + {0x0020, "Prepare for shutdown (power-off)"}, + {0x0021, "Downgrade UDMA mode to lower speed"}, + {0x0022, "Upgrade UDMA mode to higher speed"}, + {0x0023, "Sector repair completed"}, + {0x0024, "Sbuf memory test failed"}, + {0x0025, "Error flushing cached write data to array"}, + {0x0026, "Drive reported data ECC error"}, + {0x0027, "DCB has checksum error"}, + {0x0028, "DCB version is unsupported"}, + {0x0029, "Background verify started"}, + {0x002A, "Background verify failed"}, + {0x002B, "Background verify done"}, + {0x002C, "Bad sector overwritten during rebuild"}, + {0x002D, "Background rebuild error on source drive"}, + {0x002E, "Replace failed because replacement drive too small"}, + {0x002F, "Verify failed because array was never initialized"}, + {0x0030, "Unsupported ATA drive"}, + {0x0031, "Synchronize host/controller time"}, + {0x0032, "Spare capacity is inadequate for some units"}, + {0x0033, "Background migration started"}, + {0x0034, "Background migration failed"}, + {0x0035, "Background migration done"}, + {0x0036, "Verify detected and fixed data/parity mismatch"}, + {0x0037, "SO-DIMM incompatible"}, + {0x0038, "SO-DIMM not detected"}, + {0x0039, "Corrected Sbuf ECC error"}, + {0x003A, "Drive power on reset detected"}, + {0x003B, "Background rebuild paused"}, + {0x003C, "Background initialize paused"}, + {0x003D, "Background verify paused"}, + {0x003E, "Background migration paused"}, + {0x003F, "Corrupt flash file system detected"}, + {0x0040, "Flash file system repaired"}, + {0x0041, "Unit number assignments were lost"}, + {0x0042, "Error during read of primary DCB"}, + {0x0043, "Latent error found in backup DCB"}, + {0x00FC, "Recovered/finished array membership update"}, + {0x00FD, "Handler lockup"}, + {0x00FE, "Retrying PCI transfer"}, + {0x00FF, "AEN queue is full"}, + {0xFFFFFFFF, (char*) 0} +}; + +/* AEN severity table */ +static char *twa_aen_severity_table[] = +{ + "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0 +}; + +/* Error strings */ +static twa_message_type twa_error_table[] = { + {0x0100, "SGL entry contains zero data"}, + {0x0101, "Invalid command opcode"}, + {0x0102, "SGL entry has unaligned address"}, + {0x0103, "SGL size does not match command"}, + {0x0104, "SGL entry has illegal length"}, + {0x0105, "Command packet is not aligned"}, + {0x0106, "Invalid request ID"}, + {0x0107, "Duplicate request ID"}, + {0x0108, "ID not locked"}, + {0x0109, "LBA out of range"}, + {0x010A, "Logical unit not supported"}, + {0x010B, "Parameter table does not exist"}, + {0x010C, "Parameter index does not exist"}, + {0x010D, "Invalid field in CDB"}, + {0x010E, "Specified port has invalid drive"}, + {0x010F, "Parameter item size mismatch"}, + {0x0110, "Failed memory allocation"}, + {0x0111, "Memory request too large"}, + {0x0112, "Out of memory segments"}, + {0x0113, "Invalid address to deallocate"}, + {0x0114, "Out of memory"}, + {0x0115, "Out of heap"}, + {0x0120, "Double degrade"}, + {0x0121, "Drive not degraded"}, + {0x0122, "Reconstruct error"}, + {0x0123, "Replace not accepted"}, + {0x0124, "Replace drive capacity too small"}, + {0x0125, "Sector count not allowed"}, + {0x0126, "No spares left"}, + {0x0127, "Reconstruct error"}, + {0x0128, "Unit is offline"}, + {0x0129, "Cannot update status to DCB"}, + {0x0130, "Invalid stripe handle"}, + {0x0131, "Handle that was not locked"}, + {0x0132, "Handle that was not empty"}, + {0x0133, "Handle has different owner"}, + {0x0140, "IPR has parent"}, + {0x0150, "Illegal Pbuf address alignment"}, + {0x0151, "Illegal Pbuf transfer length"}, + {0x0152, "Illegal Sbuf address alignment"}, + {0x0153, "Illegal Sbuf transfer length"}, + {0x0160, "Command packet too large"}, + {0x0161, "SGL exceeds maximum length"}, + {0x0162, "SGL has too many entries"}, + {0x0170, "Insufficient resources for rebuilder"}, + {0x0171, "Verify error (data != parity)"}, + {0x0180, "Requested segment not in directory of this DCB"}, + {0x0181, "DCB segment has unsupported version"}, + {0x0182, "DCB segment has checksum error"}, + {0x0183, "DCB support (settings) segment invalid"}, + {0x0184, "DCB UDB (unit descriptor block) segment invalid"}, + {0x0185, "DCB GUID (globally unique identifier) segment invalid"}, + {0x01A0, "Could not clear Sbuf"}, + {0x01C0, "Flash identify failed"}, + {0x01C1, "Flash out of bounds"}, + {0x01C2, "Flash verify error"}, + {0x01C3, "Flash file object not found"}, + {0x01C4, "Flash file already present"}, + {0x01C5, "Flash file system full"}, + {0x01C6, "Flash file not present"}, + {0x01C7, "Flash file size error"}, + {0x01C8, "Bad flash file checksum"}, + {0x01CA, "Corrupt flash file system detected"}, + {0x01D0, "Invalid field in parameter list"}, + {0x01D1, "Parameter list length error"}, + {0x01D2, "Parameter item is not changeable"}, + {0x01D3, "Parameter item is not saveable"}, + {0x0200, "UDMA CRC error"}, + {0x0201, "Internal CRC error"}, + {0x0202, "Data ECC error"}, + {0x0203, "ADP level 1 error"}, + {0x0204, "Port timeout"}, + {0x0205, "Drive power on reset"}, + {0x0206, "ADP level 2 error"}, + {0x0207, "Soft reset failed"}, + {0x0208, "Drive not ready"}, + {0x0209, "Unclassified port error"}, + {0x020A, "Drive aborted command"}, + {0x0210, "Internal CRC error"}, + {0x0211, "PCI abort error"}, + {0x0212, "PCI parity error"}, + {0x0213, "Port handler error"}, + {0x0214, "Token interrupt count error"}, + {0x0215, "Timeout waiting for PCI transfer"}, + {0x0216, "Corrected buffer ECC"}, + {0x0217, "Uncorrected buffer ECC"}, + {0x0230, "Unsupported command during flash recovery"}, + {0x0231, "Next image buffer expected"}, + {0x0232, "Binary image architecture incompatible"}, + {0x0233, "Binary image has no signature"}, + {0x0234, "Binary image has bad checksum"}, + {0x0235, "Image downloaded overflowed buffer"}, + {0x0240, "I2C device not found"}, + {0x0241, "I2C transaction aborted"}, + {0x0242, "SO-DIMM parameter(s) incompatible using defaults"}, + {0x0243, "SO-DIMM unsupported"}, + {0x0248, "SPI transfer status error"}, + {0x0249, "SPI transfer timeout error"}, + {0x0250, "Invalid unit descriptor size in CreateUnit"}, + {0x0251, "Unit descriptor size exceeds data buffer in CreateUnit"}, + {0x0252, "Invalid value in CreateUnit descriptor"}, + {0x0253, "Inadequate disk space to support descriptor in CreateUnit"}, + {0x0254, "Unable to create data channel for this unit descriptor"}, + {0x0255, "CreateUnit descriptor specifies a drive already in use"}, + {0x0256, "Unable to write configuration to all disks during CreateUnit"}, + {0x0257, "CreateUnit does not support this descriptor version"}, + {0x0258, "Invalid subunit for RAID 0 or 5 in CreateUnit"}, + {0x0259, "Too many descriptors in CreateUnit"}, + {0x025A, "Invalid configuration specified in CreateUnit descriptor"}, + {0x025B, "Invalid LBA offset specified in CreateUnit descriptor"}, + {0x025C, "Invalid stripelet size specified in CreateUnit descriptor"}, + {0x0260, "SMART attribute exceeded threshold"}, + {0xFFFFFFFF, (char*) 0} +}; + +/* Control register bit definitions */ +#define TW_CONTROL_CLEAR_HOST_INTERRUPT 0x00080000 +#define TW_CONTROL_CLEAR_ATTENTION_INTERRUPT 0x00040000 +#define TW_CONTROL_MASK_COMMAND_INTERRUPT 0x00020000 +#define TW_CONTROL_MASK_RESPONSE_INTERRUPT 0x00010000 +#define TW_CONTROL_UNMASK_COMMAND_INTERRUPT 0x00008000 +#define TW_CONTROL_UNMASK_RESPONSE_INTERRUPT 0x00004000 +#define TW_CONTROL_CLEAR_ERROR_STATUS 0x00000200 +#define TW_CONTROL_ISSUE_SOFT_RESET 0x00000100 +#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080 +#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040 +#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020 +#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000 +#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000 +#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000 + +/* Status register bit definitions */ +#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 +#define TW_STATUS_MINOR_VERSION_MASK 0x0F000000 +#define TW_STATUS_PCI_PARITY_ERROR 0x00800000 +#define TW_STATUS_QUEUE_ERROR 0x00400000 +#define TW_STATUS_MICROCONTROLLER_ERROR 0x00200000 +#define TW_STATUS_PCI_ABORT 0x00100000 +#define TW_STATUS_HOST_INTERRUPT 0x00080000 +#define TW_STATUS_ATTENTION_INTERRUPT 0x00040000 +#define TW_STATUS_COMMAND_INTERRUPT 0x00020000 +#define TW_STATUS_RESPONSE_INTERRUPT 0x00010000 +#define TW_STATUS_COMMAND_QUEUE_FULL 0x00008000 +#define TW_STATUS_RESPONSE_QUEUE_EMPTY 0x00004000 +#define TW_STATUS_MICROCONTROLLER_READY 0x00002000 +#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 +#define TW_STATUS_EXPECTED_BITS 0x00002000 +#define TW_STATUS_UNEXPECTED_BITS 0x00F00000 +#define TW_STATUS_VALID_INTERRUPT 0x00DF0000 + +/* PCI related defines */ +#define TW_PCI_CLEAR_PARITY_ERRORS 0xc100 +#define TW_PCI_CLEAR_PCI_ABORT 0x2000 + +/* Command packet opcodes used by the driver */ +#define TW_OP_INIT_CONNECTION 0x1 +#define TW_OP_GET_PARAM 0x12 +#define TW_OP_SET_PARAM 0x13 +#define TW_OP_EXECUTE_SCSI 0x10 +#define TW_OP_DOWNLOAD_FIRMWARE 0x16 +#define TW_OP_RESET 0x1C + +/* Asynchronous Event Notification (AEN) codes used by the driver */ +#define TW_AEN_QUEUE_EMPTY 0x0000 +#define TW_AEN_SOFT_RESET 0x0001 +#define TW_AEN_SYNC_TIME_WITH_HOST 0x031 +#define TW_AEN_SEVERITY_ERROR 0x1 +#define TW_AEN_SEVERITY_DEBUG 0x4 +#define TW_AEN_NOT_RETRIEVED 0x1 +#define TW_AEN_RETRIEVED 0x2 + +/* Command state defines */ +#define TW_S_INITIAL 0x1 /* Initial state */ +#define TW_S_STARTED 0x2 /* Id in use */ +#define TW_S_POSTED 0x4 /* Posted to the controller */ +#define TW_S_PENDING 0x8 /* Waiting to be posted in isr */ +#define TW_S_COMPLETED 0x10 /* Completed by isr */ +#define TW_S_FINISHED 0x20 /* I/O completely done */ + +/* Compatibility defines */ +#define TW_9000_ARCH_ID 0x5 +#define TW_CURRENT_DRIVER_SRL 35 +#define TW_CURRENT_DRIVER_BUILD 0 +#define TW_CURRENT_DRIVER_BRANCH 0 + +/* Misc defines */ +#define TW_9550SX_DRAIN_COMPLETED 0xFFFF +#define TW_SECTOR_SIZE 512 +#define TW_ALIGNMENT_9000 4 /* 4 bytes */ +#define TW_ALIGNMENT_9000_SGL 0x3 +#define TW_MAX_UNITS 16 +#define TW_MAX_UNITS_9650SE 32 +#define TW_INIT_MESSAGE_CREDITS 0x100 +#define TW_INIT_COMMAND_PACKET_SIZE 0x3 +#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6 +#define TW_EXTENDED_INIT_CONNECT 0x2 +#define TW_BUNDLED_FW_SAFE_TO_FLASH 0x4 +#define TW_CTLR_FW_RECOMMENDS_FLASH 0x8 +#define TW_CTLR_FW_COMPATIBLE 0x2 +#define TW_BASE_FW_SRL 24 +#define TW_BASE_FW_BRANCH 0 +#define TW_BASE_FW_BUILD 1 +#define TW_FW_SRL_LUNS_SUPPORTED 28 +#define TW_Q_LENGTH 256 +#define TW_Q_START 0 +#define TW_MAX_SLOT 32 +#define TW_MAX_RESET_TRIES 2 +#define TW_MAX_CMDS_PER_LUN 254 +#define TW_MAX_RESPONSE_DRAIN 256 +#define TW_MAX_AEN_DRAIN 255 +#define TW_IN_RESET 2 +#define TW_USING_MSI 3 +#define TW_IN_ATTENTION_LOOP 4 +#define TW_MAX_SECTORS 256 +#define TW_AEN_WAIT_TIME 1000 +#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */ +#define TW_MAX_CDB_LEN 16 +#define TW_ISR_DONT_COMPLETE 2 +#define TW_ISR_DONT_RESULT 3 +#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ +#define TW_IOCTL_CHRDEV_FREE -1 +#define TW_COMMAND_OFFSET 128 /* 128 bytes */ +#define TW_VERSION_TABLE 0x0402 +#define TW_TIMEKEEP_TABLE 0x040A +#define TW_INFORMATION_TABLE 0x0403 +#define TW_PARAM_FWVER 3 +#define TW_PARAM_FWVER_LENGTH 16 +#define TW_PARAM_BIOSVER 4 +#define TW_PARAM_BIOSVER_LENGTH 16 +#define TW_PARAM_PORTCOUNT 3 +#define TW_PARAM_PORTCOUNT_LENGTH 1 +#define TW_MIN_SGL_LENGTH 0x200 /* 512 bytes */ +#define TW_MAX_SENSE_LENGTH 256 +#define TW_EVENT_SOURCE_AEN 0x1000 +#define TW_EVENT_SOURCE_COMMAND 0x1001 +#define TW_EVENT_SOURCE_PCHIP 0x1002 +#define TW_EVENT_SOURCE_DRIVER 0x1003 +#define TW_IOCTL_GET_COMPATIBILITY_INFO 0x101 +#define TW_IOCTL_GET_LAST_EVENT 0x102 +#define TW_IOCTL_GET_FIRST_EVENT 0x103 +#define TW_IOCTL_GET_NEXT_EVENT 0x104 +#define TW_IOCTL_GET_PREVIOUS_EVENT 0x105 +#define TW_IOCTL_GET_LOCK 0x106 +#define TW_IOCTL_RELEASE_LOCK 0x107 +#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 +#define TW_IOCTL_ERROR_STATUS_NOT_LOCKED 0x1001 // Not locked +#define TW_IOCTL_ERROR_STATUS_LOCKED 0x1002 // Already locked +#define TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS 0x1003 // No more events +#define TW_IOCTL_ERROR_STATUS_AEN_CLOBBER 0x1004 // AEN clobber occurred +#define TW_IOCTL_ERROR_OS_EFAULT -EFAULT // Bad address +#define TW_IOCTL_ERROR_OS_EINTR -EINTR // Interrupted system call +#define TW_IOCTL_ERROR_OS_EINVAL -EINVAL // Invalid argument +#define TW_IOCTL_ERROR_OS_ENOMEM -ENOMEM // Out of memory +#define TW_IOCTL_ERROR_OS_ERESTARTSYS -ERESTARTSYS // Restart system call +#define TW_IOCTL_ERROR_OS_EIO -EIO // I/O error +#define TW_IOCTL_ERROR_OS_ENOTTY -ENOTTY // Not a typewriter +#define TW_IOCTL_ERROR_OS_ENODEV -ENODEV // No such device +#define TW_ALLOCATION_LENGTH 128 +#define TW_SENSE_DATA_LENGTH 18 +#define TW_STATUS_CHECK_CONDITION 2 +#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a +#define TW_ERROR_UNIT_OFFLINE 0x128 +#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3 +#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4 +#define TW_MESSAGE_SOURCE_LINUX_DRIVER 6 +#define TW_DRIVER TW_MESSAGE_SOURCE_LINUX_DRIVER +#define TW_MESSAGE_SOURCE_LINUX_OS 9 +#define TW_OS TW_MESSAGE_SOURCE_LINUX_OS +#ifndef PCI_DEVICE_ID_3WARE_9000 +#define PCI_DEVICE_ID_3WARE_9000 0x1002 +#endif +#ifndef PCI_DEVICE_ID_3WARE_9550SX +#define PCI_DEVICE_ID_3WARE_9550SX 0x1003 +#endif +#ifndef PCI_DEVICE_ID_3WARE_9650SE +#define PCI_DEVICE_ID_3WARE_9650SE 0x1004 +#endif +#ifndef PCI_DEVICE_ID_3WARE_9690SA +#define PCI_DEVICE_ID_3WARE_9690SA 0x1005 +#endif + +/* Bitmask macros to eliminate bitfields */ + +/* opcode: 5, reserved: 3 */ +#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_OP_OUT(x) (x & 0x1f) + +/* opcode: 5, sgloffset: 3 */ +#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_SGL_OUT(x) ((x >> 5) & 0x7) + +/* severity: 3, reserved: 5 */ +#define TW_SEV_OUT(x) (x & 0x7) + +/* reserved_1: 4, response_id: 8, reserved_2: 20 */ +#define TW_RESID_OUT(x) ((x >> 4) & 0xff) + +/* request_id: 12, lun: 4 */ +#define TW_REQ_LUN_IN(lun, request_id) \ + cpu_to_le16(((lun << 12) & 0xf000) | (request_id & 0xfff)) +#define TW_LUN_OUT(lun) ((le16_to_cpu(lun) >> 12) & 0xf) + +/* Macros */ +#define TW_CONTROL_REG_ADDR(x) (x->base_addr) +#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4) +#define TW_COMMAND_QUEUE_REG_ADDR(x) \ + (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8)) +#define TW_COMMAND_QUEUE_REG_ADDR_LARGE(x) \ + ((unsigned char __iomem *)x->base_addr + 0x20) +#define TW_RESPONSE_QUEUE_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + 0xC) +#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) \ + ((unsigned char __iomem *)x->base_addr + 0x30) +#define TW_CLEAR_ALL_INTERRUPTS(x) \ + (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_CLEAR_ATTENTION_INTERRUPT(x) \ + (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_CLEAR_HOST_INTERRUPT(x) \ + (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_DISABLE_INTERRUPTS(x) \ + (writel(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) \ + (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \ + TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | \ + TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_MASK_COMMAND_INTERRUPT(x) \ + (writel(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_UNMASK_COMMAND_INTERRUPT(x) \ + (writel(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_SOFT_RESET(x) (writel(TW_CONTROL_ISSUE_SOFT_RESET | \ + TW_CONTROL_CLEAR_HOST_INTERRUPT | \ + TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \ + TW_CONTROL_MASK_COMMAND_INTERRUPT | \ + TW_CONTROL_MASK_RESPONSE_INTERRUPT | \ + TW_CONTROL_CLEAR_ERROR_STATUS | \ + TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_PRINTK(h,a,b,c) { \ +if (h) \ +printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ +else \ +printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ +} +#define TW_MAX_LUNS(srl) (srl < TW_FW_SRL_LUNS_SUPPORTED ? 1 : 16) +#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 5 : 4) +#define TW_APACHE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 72 : 109) +#define TW_ESCALADE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 41 : 62) +#define TW_PADDING_LENGTH (sizeof(dma_addr_t) > 4 ? 8 : 0) + +#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) +typedef __le64 twa_addr_t; +#define TW_CPU_TO_SGL(x) cpu_to_le64(x) +#else +typedef __le32 twa_addr_t; +#define TW_CPU_TO_SGL(x) cpu_to_le32(x) +#endif + +/* Scatter Gather List Entry */ +typedef struct TAG_TW_SG_Entry { + twa_addr_t address; + __le32 length; +} __packed TW_SG_Entry; + +/* Command Packet */ +typedef struct TW_Command { + u8 opcode__sgloffset; + u8 size; + u8 request_id; + u8 unit__hostid; + /* Second DWORD */ + u8 status; + u8 flags; + union { + __le16 block_count; + __le16 parameter_count; + } byte6_offset; + union { + struct { + __le32 lba; + TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH]; + twa_addr_t padding; + } io; + struct { + TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH]; + __le32 padding; + twa_addr_t padding2; + } param; + } byte8_offset; +} TW_Command; + +/* Command Packet for 9000+ controllers */ +typedef struct TAG_TW_Command_Apache { + u8 opcode__reserved; + u8 unit; + __le16 request_id__lunl; + u8 status; + u8 sgl_offset; + __le16 sgl_entries__lunh; + u8 cdb[16]; + TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH]; + u8 padding[TW_PADDING_LENGTH]; +} TW_Command_Apache; + +/* New command packet header */ +typedef struct TAG_TW_Command_Apache_Header { + unsigned char sense_data[TW_SENSE_DATA_LENGTH]; + struct { + u8 reserved[4]; + __le16 error; + u8 padding; + u8 severity__reserved; + } status_block; + unsigned char err_specific_desc[98]; + struct { + u8 size_header; + u8 reserved[2]; + u8 size_sense; + } header_desc; +} TW_Command_Apache_Header; + +/* This struct is a union of the 2 command packets */ +typedef struct TAG_TW_Command_Full { + TW_Command_Apache_Header header; + union { + TW_Command oldcommand; + TW_Command_Apache newcommand; + } command; +} TW_Command_Full; + +/* Initconnection structure */ +typedef struct TAG_TW_Initconnect { + u8 opcode__reserved; + u8 size; + u8 request_id; + u8 res2; + u8 status; + u8 flags; + __le16 message_credits; + __le32 features; + __le16 fw_srl; + __le16 fw_arch_id; + __le16 fw_branch; + __le16 fw_build; + __le32 result; +} TW_Initconnect; + +/* Event info structure */ +typedef struct TAG_TW_Event +{ + unsigned int sequence_id; + unsigned int time_stamp_sec; + unsigned short aen_code; + unsigned char severity; + unsigned char retrieved; + unsigned char repeat_count; + unsigned char parameter_len; + unsigned char parameter_data[98]; +} TW_Event; + +typedef struct TAG_TW_Ioctl_Driver_Command { + unsigned int control_code; + unsigned int status; + unsigned int unique_id; + unsigned int sequence_id; + unsigned int os_specific; + unsigned int buffer_length; +} TW_Ioctl_Driver_Command; + +typedef struct TAG_TW_Ioctl_Apache { + TW_Ioctl_Driver_Command driver_command; + char padding[488]; + TW_Command_Full firmware_command; + char data_buffer[]; +} TW_Ioctl_Buf_Apache; + +/* Lock structure for ioctl get/release lock */ +typedef struct TAG_TW_Lock { + unsigned long timeout_msec; + unsigned long time_remaining_msec; + unsigned long force_flag; +} TW_Lock; + +/* GetParam descriptor */ +typedef struct { + __le16 table_id; + __le16 parameter_id; + __le16 parameter_size_bytes; + __le16 actual_parameter_size_bytes; + u8 data[]; +} TW_Param_Apache, *PTW_Param_Apache; + +/* Response queue */ +typedef union TAG_TW_Response_Queue { + u32 response_id; + u32 value; +} TW_Response_Queue; + +/* Compatibility information structure */ +typedef struct TAG_TW_Compatibility_Info +{ + char driver_version[32]; + unsigned short working_srl; + unsigned short working_branch; + unsigned short working_build; + unsigned short driver_srl_high; + unsigned short driver_branch_high; + unsigned short driver_build_high; + unsigned short driver_srl_low; + unsigned short driver_branch_low; + unsigned short driver_build_low; + unsigned short fw_on_ctlr_srl; + unsigned short fw_on_ctlr_branch; + unsigned short fw_on_ctlr_build; +} TW_Compatibility_Info; + +typedef struct TAG_TW_Device_Extension { + u32 __iomem *base_addr; + unsigned long *generic_buffer_virt[TW_Q_LENGTH]; + dma_addr_t generic_buffer_phys[TW_Q_LENGTH]; + TW_Command_Full *command_packet_virt[TW_Q_LENGTH]; + dma_addr_t command_packet_phys[TW_Q_LENGTH]; + struct pci_dev *tw_pci_dev; + struct scsi_cmnd *srb[TW_Q_LENGTH]; + unsigned char free_queue[TW_Q_LENGTH]; + unsigned char free_head; + unsigned char free_tail; + unsigned char pending_queue[TW_Q_LENGTH]; + unsigned char pending_head; + unsigned char pending_tail; + int state[TW_Q_LENGTH]; + unsigned int posted_request_count; + unsigned int max_posted_request_count; + unsigned int pending_request_count; + unsigned int max_pending_request_count; + unsigned int max_sgl_entries; + unsigned int sgl_entries; + unsigned int num_resets; + unsigned int sector_count; + unsigned int max_sector_count; + unsigned int aen_count; + struct Scsi_Host *host; + long flags; + int reset_print; + TW_Event *event_queue[TW_Q_LENGTH]; + unsigned char error_index; + unsigned char event_queue_wrapped; + unsigned int error_sequence_id; + int ioctl_sem_lock; + ktime_t ioctl_time; + int chrdev_request_id; + wait_queue_head_t ioctl_wqueue; + struct mutex ioctl_lock; + char aen_clobber; + TW_Compatibility_Info tw_compat_info; +} TW_Device_Extension; + +#endif /* _3W_9XXX_H */ + diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c new file mode 100644 index 000000000..55989eaa2 --- /dev/null +++ b/drivers/scsi/3w-sas.c @@ -0,0 +1,1857 @@ +/* + 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. + + Written By: Adam Radford + + Copyright (C) 2009 LSI Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Controllers supported by this driver: + + LSI 3ware 9750 6Gb/s SAS/SATA-RAID + + Bugs/Comments/Suggestions should be mailed to: + aradford@gmail.com + + History + ------- + 3.26.02.000 - Initial driver release. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "3w-sas.h" + +/* Globals */ +#define TW_DRIVER_VERSION "3.26.02.000" +static DEFINE_MUTEX(twl_chrdev_mutex); +static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT]; +static unsigned int twl_device_extension_count; +static int twl_major = -1; +extern struct timezone sys_tz; + +/* Module parameters */ +MODULE_AUTHOR ("LSI"); +MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(TW_DRIVER_VERSION); + +static int use_msi; +module_param(use_msi, int, S_IRUGO); +MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); + +/* Function prototypes */ +static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset); + +/* Functions */ + +/* This function returns AENs through sysfs */ +static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *outbuf, loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; + unsigned long flags = 0; + ssize_t ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + return ret; +} /* End twl_sysfs_aen_read() */ + +/* aen_read sysfs attribute initializer */ +static struct bin_attribute twl_sysfs_aen_read_attr = { + .attr = { + .name = "3ware_aen_read", + .mode = S_IRUSR, + }, + .size = 0, + .read = twl_sysfs_aen_read +}; + +/* This function returns driver compatibility info through sysfs */ +static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *outbuf, loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; + unsigned long flags = 0; + ssize_t ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + return ret; +} /* End twl_sysfs_compat_info() */ + +/* compat_info sysfs attribute initializer */ +static struct bin_attribute twl_sysfs_compat_info_attr = { + .attr = { + .name = "3ware_compat_info", + .mode = S_IRUSR, + }, + .size = 0, + .read = twl_sysfs_compat_info +}; + +/* Show some statistics about the card */ +static ssize_t twl_show_stats(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + unsigned long flags = 0; + ssize_t len; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n" + "Current commands posted: %4d\n" + "Max commands posted: %4d\n" + "Last sgl length: %4d\n" + "Max sgl length: %4d\n" + "Last sector count: %4d\n" + "Max sector count: %4d\n" + "SCSI Host Resets: %4d\n" + "AEN's: %4d\n", + TW_DRIVER_VERSION, + tw_dev->posted_request_count, + tw_dev->max_posted_request_count, + tw_dev->sgl_entries, + tw_dev->max_sgl_entries, + tw_dev->sector_count, + tw_dev->max_sector_count, + tw_dev->num_resets, + tw_dev->aen_count); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + return len; +} /* End twl_show_stats() */ + +/* stats sysfs attribute initializer */ +static struct device_attribute twl_host_stats_attr = { + .attr = { + .name = "3ware_stats", + .mode = S_IRUGO, + }, + .show = twl_show_stats +}; + +/* Host attributes initializer */ +static struct attribute *twl_host_attrs[] = { + &twl_host_stats_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(twl_host); + +/* This function will look up an AEN severity string */ +static char *twl_aen_severity_lookup(unsigned char severity_code) +{ + char *retval = NULL; + + if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || + (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) + goto out; + + retval = twl_aen_severity_table[severity_code]; +out: + return retval; +} /* End twl_aen_severity_lookup() */ + +/* This function will queue an event */ +static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) +{ + u32 local_time; + TW_Event *event; + unsigned short aen; + char host[16]; + char *error_str; + + tw_dev->aen_count++; + + /* Fill out event info */ + event = tw_dev->event_queue[tw_dev->error_index]; + + host[0] = '\0'; + if (tw_dev->host) + sprintf(host, " scsi%d:", tw_dev->host->host_no); + + aen = le16_to_cpu(header->status_block.error); + memset(event, 0, sizeof(TW_Event)); + + event->severity = TW_SEV_OUT(header->status_block.severity__reserved); + /* event->time_stamp_sec overflows in y2106 */ + local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); + event->time_stamp_sec = local_time; + event->aen_code = aen; + event->retrieved = TW_AEN_NOT_RETRIEVED; + event->sequence_id = tw_dev->error_sequence_id; + tw_dev->error_sequence_id++; + + /* Check for embedded error string */ + error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); + + header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; + event->parameter_len = strlen(header->err_specific_desc); + memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str)); + if (event->severity != TW_AEN_SEVERITY_DEBUG) + printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", + host, + twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), + TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str, + header->err_specific_desc); + else + tw_dev->aen_count--; + + tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; +} /* End twl_aen_queue_event() */ + +/* This function will attempt to post a command packet to the board */ +static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) +{ + dma_addr_t command_que_value; + + command_que_value = tw_dev->command_packet_phys[request_id]; + command_que_value += TW_COMMAND_OFFSET; + + /* First write upper 4 bytes */ + writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev)); + /* Then the lower 4 bytes */ + writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev)); + + tw_dev->state[request_id] = TW_S_POSTED; + tw_dev->posted_request_count++; + if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) + tw_dev->max_posted_request_count = tw_dev->posted_request_count; + + return 0; +} /* End twl_post_command_packet() */ + +/* This function hands scsi cdb's to the firmware */ +static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry_ISO *sglistarg) +{ + TW_Command_Full *full_command_packet; + TW_Command_Apache *command_packet; + int i, sg_count; + struct scsi_cmnd *srb = NULL; + struct scatterlist *sg; + int retval = 1; + + if (tw_dev->srb[request_id]) + srb = tw_dev->srb[request_id]; + + /* Initialize command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + full_command_packet->header.header_desc.size_header = 128; + full_command_packet->header.status_block.error = 0; + full_command_packet->header.status_block.severity__reserved = 0; + + command_packet = &full_command_packet->command.newcommand; + command_packet->status = 0; + command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); + + /* We forced 16 byte cdb use earlier */ + if (!cdb) + memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); + else + memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); + + if (srb) { + command_packet->unit = srb->device->id; + command_packet->request_id__lunl = + cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id)); + } else { + command_packet->request_id__lunl = + cpu_to_le16(TW_REQ_LUN_IN(0, request_id)); + command_packet->unit = 0; + } + + command_packet->sgl_offset = 16; + + if (!sglistarg) { + /* Map sglist from scsi layer to cmd packet */ + if (scsi_sg_count(srb)) { + sg_count = scsi_dma_map(srb); + if (sg_count <= 0) + goto out; + + scsi_for_each_sg(srb, sg, sg_count, i) { + command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); + command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg)); + } + command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]))); + } + } else { + /* Internal cdb post */ + for (i = 0; i < use_sg; i++) { + command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); + command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length); + } + command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg)); + } + + /* Update some stats */ + if (srb) { + tw_dev->sector_count = scsi_bufflen(srb) / 512; + if (tw_dev->sector_count > tw_dev->max_sector_count) + tw_dev->max_sector_count = tw_dev->sector_count; + tw_dev->sgl_entries = scsi_sg_count(srb); + if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) + tw_dev->max_sgl_entries = tw_dev->sgl_entries; + } + + /* Now post the command to the board */ + retval = twl_post_command_packet(tw_dev, request_id); + +out: + return retval; +} /* End twl_scsiop_execute_scsi() */ + +/* This function will read the aen queue from the isr */ +static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) +{ + unsigned char cdb[TW_MAX_CDB_LEN]; + TW_SG_Entry_ISO sglist[1]; + TW_Command_Full *full_command_packet; + int retval = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command packet */ + if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue"); + goto out; + } + retval = 0; +out: + return retval; +} /* End twl_aen_read_queue() */ + +/* This function will sync firmware time with the host time */ +static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) +{ + u32 schedulertime; + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + time64_t local_time; + + /* Fill out the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); + command_packet->request_id = request_id; + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); + command_packet->size = TW_COMMAND_SIZE; + command_packet->byte6_offset.parameter_count = cpu_to_le16(1); + + /* Setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ + param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ + param->parameter_size_bytes = cpu_to_le16(4); + + /* Convert system time in UTC to local time seconds since last + Sunday 12:00AM */ + local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); + div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime); + schedulertime = cpu_to_le32(schedulertime); + + memcpy(param->data, &schedulertime, sizeof(u32)); + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command */ + twl_post_command_packet(tw_dev, request_id); +} /* End twl_aen_sync_time() */ + +/* This function will assign an available request id */ +static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id) +{ + *request_id = tw_dev->free_queue[tw_dev->free_head]; + tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; + tw_dev->state[*request_id] = TW_S_STARTED; +} /* End twl_get_request_id() */ + +/* This function will free a request id */ +static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id) +{ + tw_dev->free_queue[tw_dev->free_tail] = request_id; + tw_dev->state[request_id] = TW_S_FINISHED; + tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; +} /* End twl_free_request_id() */ + +/* This function will complete an aen request from the isr */ +static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int retval = 1; + + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + tw_dev->posted_request_count--; + aen = le16_to_cpu(header->status_block.error); + full_command_packet = tw_dev->command_packet_virt[request_id]; + command_packet = &full_command_packet->command.oldcommand; + + /* First check for internal completion of set param for time sync */ + if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { + /* Keep reading the queue in case there are more aen's */ + if (twl_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + /* Quit reading the queue if this is the last one */ + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + twl_aen_sync_time(tw_dev, request_id); + retval = 0; + goto out; + default: + twl_aen_queue_event(tw_dev, header); + + /* If there are more aen's, keep reading the queue */ + if (twl_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + retval = 0; +out2: + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); +out: + return retval; +} /* End twl_aen_complete() */ + +/* This function will poll for a response */ +static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) +{ + unsigned long before; + dma_addr_t mfa; + u32 regh, regl; + u32 response; + int retval = 1; + int found = 0; + + before = jiffies; + + while (!found) { + if (sizeof(dma_addr_t) > 4) { + regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); + regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + mfa = ((u64)regh << 32) | regl; + } else + mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + + response = (u32)mfa; + + if (TW_RESID_OUT(response) == request_id) + found = 1; + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twl_poll_response() */ + +/* This function will drain the aen queue */ +static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) +{ + int request_id = 0; + unsigned char cdb[TW_MAX_CDB_LEN]; + TW_SG_Entry_ISO sglist[1]; + int finished = 0, count = 0; + TW_Command_Full *full_command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int first_reset = 0, queue = 0, retval = 1; + + if (no_check_reset) + first_reset = 0; + else + first_reset = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + do { + /* Send command to the board */ + if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense"); + goto out; + } + + /* Now poll for completion */ + if (twl_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue"); + tw_dev->posted_request_count--; + goto out; + } + + tw_dev->posted_request_count--; + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + aen = le16_to_cpu(header->status_block.error); + queue = 0; + count++; + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + if (first_reset != 1) + goto out; + else + finished = 1; + break; + case TW_AEN_SOFT_RESET: + if (first_reset == 0) + first_reset = 1; + else + queue = 1; + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + break; + default: + queue = 1; + } + + /* Now queue an event info */ + if (queue) + twl_aen_queue_event(tw_dev, header); + } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); + + if (count == TW_MAX_AEN_DRAIN) + goto out; + + retval = 0; +out: + tw_dev->state[request_id] = TW_S_INITIAL; + return retval; +} /* End twl_aen_drain_queue() */ + +/* This function will allocate memory and check if it is correctly aligned */ +static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) +{ + int i; + dma_addr_t dma_handle; + unsigned long *cpu_addr; + int retval = 1; + + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, + size * TW_Q_LENGTH, &dma_handle, + GFP_KERNEL); + if (!cpu_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); + goto out; + } + + for (i = 0; i < TW_Q_LENGTH; i++) { + switch(which) { + case 0: + tw_dev->command_packet_phys[i] = dma_handle+(i*size); + tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); + break; + case 1: + tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); + tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); + break; + case 2: + tw_dev->sense_buffer_phys[i] = dma_handle+(i*size); + tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size)); + break; + } + } + retval = 0; +out: + return retval; +} /* End twl_allocate_memory() */ + +/* This function will load the request id and various sgls for ioctls */ +static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) +{ + TW_Command *oldcommand; + TW_Command_Apache *newcommand; + TW_SG_Entry_ISO *sgl; + unsigned int pae = 0; + + if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) + pae = 1; + + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + newcommand = &full_command_packet->command.newcommand; + newcommand->request_id__lunl = + cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); + if (length) { + newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); + newcommand->sg_list[0].length = TW_CPU_TO_SGL(length); + } + newcommand->sgl_entries__lunh = + cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0)); + } else { + oldcommand = &full_command_packet->command.oldcommand; + oldcommand->request_id = request_id; + + if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { + /* Load the sg list */ + sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0)); + sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); + sgl->length = TW_CPU_TO_SGL(length); + oldcommand->size += pae; + oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0; + } + } +} /* End twl_load_sgl() */ + +/* This function handles ioctl for the character device + This interface is used by smartmontools open source software */ +static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long timeout; + unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; + dma_addr_t dma_handle; + int request_id = 0; + TW_Ioctl_Driver_Command driver_command; + struct inode *inode = file_inode(file); + TW_Ioctl_Buf_Apache *tw_ioctl; + TW_Command_Full *full_command_packet; + TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; + int retval = -EFAULT; + void __user *argp = (void __user *)arg; + + mutex_lock(&twl_chrdev_mutex); + + /* Only let one of these through at a time */ + if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { + retval = -EINTR; + goto out; + } + + /* First copy down the driver command */ + if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) + goto out2; + + /* Check data buffer size */ + if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { + retval = -EINVAL; + goto out2; + } + + /* Hardware can only do multiple of 512 byte transfers */ + data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; + + /* Now allocate ioctl buf memory */ + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), &dma_handle, GFP_KERNEL); + if (!cpu_addr) { + retval = -ENOMEM; + goto out2; + } + + tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; + + /* Now copy down the entire ioctl */ + if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache))) + goto out3; + + /* See which ioctl we are doing */ + switch (cmd) { + case TW_IOCTL_FIRMWARE_PASS_THROUGH: + spin_lock_irqsave(tw_dev->host->host_lock, flags); + twl_get_request_id(tw_dev, &request_id); + + /* Flag internal command */ + tw_dev->srb[request_id] = NULL; + + /* Flag chrdev ioctl */ + tw_dev->chrdev_request_id = request_id; + + full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command; + + /* Load request id and sglist for both command types */ + twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); + + memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); + + /* Now post the command packet to the controller */ + twl_post_command_packet(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; + + /* Now wait for command to complete */ + timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); + + /* We timed out, and didn't get an interrupt */ + if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { + /* Now we need to reset the board */ + printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", + tw_dev->host->host_no, TW_DRIVER, 0x6, + cmd); + retval = -EIO; + twl_reset_device_extension(tw_dev, 1); + goto out3; + } + + /* Now copy in the command packet response */ + memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); + + /* Now complete the io */ + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + break; + default: + retval = -ENOTTY; + goto out3; + } + + /* Now copy the entire response to userspace */ + if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0) + retval = 0; +out3: + /* Now free ioctl buf memory */ + dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), cpu_addr, dma_handle); +out2: + mutex_unlock(&tw_dev->ioctl_lock); +out: + mutex_unlock(&twl_chrdev_mutex); + return retval; +} /* End twl_chrdev_ioctl() */ + +/* This function handles open for the character device */ +static int twl_chrdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor_number; + int retval = -ENODEV; + + if (!capable(CAP_SYS_ADMIN)) { + retval = -EACCES; + goto out; + } + + minor_number = iminor(inode); + if (minor_number >= twl_device_extension_count) + goto out; + retval = 0; +out: + return retval; +} /* End twl_chrdev_open() */ + +/* File operations struct for character device */ +static const struct file_operations twl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = twl_chrdev_ioctl, + .open = twl_chrdev_open, + .release = NULL, + .llseek = noop_llseek, +}; + +/* This function passes sense data from firmware to scsi layer */ +static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host) +{ + TW_Command_Apache_Header *header; + TW_Command_Full *full_command_packet; + unsigned short error; + char *error_str; + + header = tw_dev->sense_buffer_virt[i]; + full_command_packet = tw_dev->command_packet_virt[request_id]; + + /* Get embedded firmware error string */ + error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]); + + /* Don't print error for Logical unit not supported during rollcall */ + error = le16_to_cpu(header->status_block.error); + if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) { + if (print_host) + printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", + tw_dev->host->host_no, + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + header->status_block.error, + error_str, + header->err_specific_desc); + else + printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n", + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + header->status_block.error, + error_str, + header->err_specific_desc); + } + + if (copy_sense) { + memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH); + tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); + goto out; + } +out: + return 1; +} /* End twl_fill_sense() */ + +/* This function will free up device extension resources */ +static void twl_free_device_extension(TW_Device_Extension *tw_dev) +{ + if (tw_dev->command_packet_virt[0]) + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + sizeof(TW_Command_Full)*TW_Q_LENGTH, + tw_dev->command_packet_virt[0], + tw_dev->command_packet_phys[0]); + + if (tw_dev->generic_buffer_virt[0]) + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + TW_SECTOR_SIZE*TW_Q_LENGTH, + tw_dev->generic_buffer_virt[0], + tw_dev->generic_buffer_phys[0]); + + if (tw_dev->sense_buffer_virt[0]) + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + sizeof(TW_Command_Apache_Header)* + TW_Q_LENGTH, + tw_dev->sense_buffer_virt[0], + tw_dev->sense_buffer_phys[0]); + + kfree(tw_dev->event_queue[0]); +} /* End twl_free_device_extension() */ + +/* This function will get parameter table entries from the firmware */ +static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + void *retval = NULL; + + /* Setup the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = TW_COMMAND_SIZE; + command_packet->request_id = request_id; + command_packet->byte6_offset.block_count = cpu_to_le16(1); + + /* Now setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = cpu_to_le16(table_id | 0x8000); + param->parameter_id = cpu_to_le16(parameter_id); + param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); + + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); + + /* Post the command packet to the board */ + twl_post_command_packet(tw_dev, request_id); + + /* Poll for completion */ + if (twl_poll_response(tw_dev, request_id, 30)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param") + else + retval = (void *)&(param->data[0]); + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twl_get_param() */ + +/* This function will send an initconnection command to controller */ +static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result) +{ + TW_Command_Full *full_command_packet; + TW_Initconnect *tw_initconnect; + int request_id = 0, retval = 1; + + /* Initialize InitConnection command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + full_command_packet->header.header_desc.size_header = 128; + + tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; + tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); + tw_initconnect->request_id = request_id; + tw_initconnect->message_credits = cpu_to_le16(message_credits); + tw_initconnect->features = set_features; + + /* Turn on 64-bit sgl support if we need to */ + tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0; + + tw_initconnect->features = cpu_to_le32(tw_initconnect->features); + + if (set_features & TW_EXTENDED_INIT_CONNECT) { + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; + tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); + tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); + tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); + tw_initconnect->fw_build = cpu_to_le16(current_fw_build); + } else + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; + + /* Send command packet to the board */ + twl_post_command_packet(tw_dev, request_id); + + /* Poll for completion */ + if (twl_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection"); + } else { + if (set_features & TW_EXTENDED_INIT_CONNECT) { + *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); + *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); + *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); + *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); + *init_connect_result = le32_to_cpu(tw_initconnect->result); + } + retval = 0; + } + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twl_initconnection() */ + +/* This function will initialize the fields of a device extension */ +static int twl_initialize_device_extension(TW_Device_Extension *tw_dev) +{ + int i, retval = 1; + + /* Initialize command packet buffers */ + if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed"); + goto out; + } + + /* Initialize generic buffer */ + if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed"); + goto out; + } + + /* Allocate sense buffers */ + if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed"); + goto out; + } + + /* Allocate event info space */ + tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); + if (!tw_dev->event_queue[0]) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed"); + goto out; + } + + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->error_sequence_id = 1; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + mutex_init(&tw_dev->ioctl_lock); + init_waitqueue_head(&tw_dev->ioctl_wqueue); + + retval = 0; +out: + return retval; +} /* End twl_initialize_device_extension() */ + +/* This function will handle attention interrupts */ +static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) +{ + int retval = 1; + u32 request_id, doorbell; + + /* Read doorbell status */ + doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev)); + + /* Check for controller errors */ + if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing"); + goto out; + } + + /* Check if we need to perform an AEN drain */ + if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) { + if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { + twl_get_request_id(tw_dev, &request_id); + if (twl_aen_read_queue(tw_dev, request_id)) { + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); + } + } + } + + retval = 0; +out: + /* Clear doorbell interrupt */ + TWL_CLEAR_DB_INTERRUPT(tw_dev); + + /* Make sure the clear was flushed by reading it back */ + readl(TWL_HOBDBC_REG_ADDR(tw_dev)); + + return retval; +} /* End twl_handle_attention_interrupt() */ + +/* Interrupt service routine */ +static irqreturn_t twl_interrupt(int irq, void *dev_instance) +{ + TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; + int i, handled = 0, error = 0; + dma_addr_t mfa = 0; + u32 reg, regl, regh, response, request_id = 0; + struct scsi_cmnd *cmd; + TW_Command_Full *full_command_packet; + + spin_lock(tw_dev->host->host_lock); + + /* Read host interrupt status */ + reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); + + /* Check if this is our interrupt, otherwise bail */ + if (!(reg & TWL_HISTATUS_VALID_INTERRUPT)) + goto twl_interrupt_bail; + + handled = 1; + + /* If we are resetting, bail */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) + goto twl_interrupt_bail; + + /* Attention interrupt */ + if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) { + if (twl_handle_attention_interrupt(tw_dev)) { + TWL_MASK_INTERRUPTS(tw_dev); + goto twl_interrupt_bail; + } + } + + /* Response interrupt */ + while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) { + if (sizeof(dma_addr_t) > 4) { + regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); + regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + mfa = ((u64)regh << 32) | regl; + } else + mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + + error = 0; + response = (u32)mfa; + + /* Check for command packet error */ + if (!TW_NOTMFA_OUT(response)) { + for (i=0;isense_buffer_phys[i] == mfa) { + request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id); + if (tw_dev->srb[request_id] != NULL) + error = twl_fill_sense(tw_dev, i, request_id, 1, 1); + else { + /* Skip ioctl error prints */ + if (request_id != tw_dev->chrdev_request_id) + error = twl_fill_sense(tw_dev, i, request_id, 0, 1); + else + memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header)); + } + + /* Now re-post the sense buffer */ + writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); + writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); + break; + } + } + } else + request_id = TW_RESID_OUT(response); + + full_command_packet = tw_dev->command_packet_virt[request_id]; + + /* Check for correct state */ + if (tw_dev->state[request_id] != TW_S_POSTED) { + if (tw_dev->srb[request_id] != NULL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted"); + TWL_MASK_INTERRUPTS(tw_dev); + goto twl_interrupt_bail; + } + } + + /* Check for internal command completion */ + if (tw_dev->srb[request_id] == NULL) { + if (request_id != tw_dev->chrdev_request_id) { + if (twl_aen_complete(tw_dev, request_id)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt"); + } else { + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + wake_up(&tw_dev->ioctl_wqueue); + } + } else { + cmd = tw_dev->srb[request_id]; + + if (!error) + cmd->result = (DID_OK << 16); + + /* Report residual bytes for single sgl */ + if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { + if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) + scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); + } + + /* Now complete the io */ + scsi_dma_unmap(cmd); + scsi_done(cmd); + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + tw_dev->posted_request_count--; + } + + /* Check for another response interrupt */ + reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); + } + +twl_interrupt_bail: + spin_unlock(tw_dev->host->host_lock); + return IRQ_RETVAL(handled); +} /* End twl_interrupt() */ + +/* This function will poll for a register change */ +static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds) +{ + unsigned long before; + int retval = 1; + u32 reg_value; + + reg_value = readl(reg); + before = jiffies; + + while ((reg_value & value) != result) { + reg_value = readl(reg); + if (time_after(jiffies, before + HZ * seconds)) + goto out; + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twl_poll_register() */ + +/* This function will reset a controller */ +static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) +{ + int retval = 1; + int i = 0; + u32 status = 0; + unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; + unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; + u32 init_connect_result = 0; + int tries = 0; + int do_soft_reset = soft_reset; + + while (tries < TW_MAX_RESET_TRIES) { + /* Do a soft reset if one is needed */ + if (do_soft_reset) { + TWL_SOFT_RESET(tw_dev); + + /* Make sure controller is in a good state */ + if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence"); + tries++; + continue; + } + if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence"); + tries++; + continue; + } + } + + /* Initconnect */ + if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, + TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, + TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, + &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, + &fw_on_ctlr_build, &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Load sense buffers */ + while (i < TW_Q_LENGTH) { + writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); + writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); + + /* Check status for over-run after each write */ + status = readl(TWL_STATUS_REG_ADDR(tw_dev)); + if (!(status & TWL_STATUS_OVERRUN_SUBMIT)) + i++; + } + + /* Now check status */ + status = readl(TWL_STATUS_REG_ADDR(tw_dev)); + if (status) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Drain the AEN queue */ + if (twl_aen_drain_queue(tw_dev, soft_reset)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Load rest of compatibility struct */ + strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); + tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; + tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; + tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; + tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; + tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; + tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; + tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; + tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; + tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; + + /* If we got here, controller is in a good state */ + retval = 0; + goto out; + } +out: + return retval; +} /* End twl_reset_sequence() */ + +/* This function will reset a device extension */ +static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset) +{ + int i = 0, retval = 1; + unsigned long flags = 0; + + /* Block SCSI requests while we are resetting */ + if (ioctl_reset) + scsi_block_requests(tw_dev->host); + + set_bit(TW_IN_RESET, &tw_dev->flags); + TWL_MASK_INTERRUPTS(tw_dev); + TWL_CLEAR_DB_INTERRUPT(tw_dev); + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + + /* Abort all requests that are in progress */ + for (i = 0; i < TW_Q_LENGTH; i++) { + if ((tw_dev->state[i] != TW_S_FINISHED) && + (tw_dev->state[i] != TW_S_INITIAL) && + (tw_dev->state[i] != TW_S_COMPLETED)) { + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + if (cmd) { + cmd->result = (DID_RESET << 16); + scsi_dma_unmap(cmd); + scsi_done(cmd); + } + } + } + + /* Reset queues and counts */ + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->posted_request_count = 0; + + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + if (twl_reset_sequence(tw_dev, 1)) + goto out; + + TWL_UNMASK_INTERRUPTS(tw_dev); + + clear_bit(TW_IN_RESET, &tw_dev->flags); + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + retval = 0; +out: + if (ioctl_reset) + scsi_unblock_requests(tw_dev->host); + return retval; +} /* End twl_reset_device_extension() */ + +/* This funciton returns unit geometry in cylinders/heads/sectors */ +static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) +{ + int heads, sectors; + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + } else { + heads = 64; + sectors = 32; + } + + geom[0] = heads; + geom[1] = sectors; + geom[2] = sector_div(capacity, heads * sectors); /* cylinders */ + + return 0; +} /* End twl_scsi_biosparam() */ + +/* This is the new scsi eh reset function */ +static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt) +{ + TW_Device_Extension *tw_dev = NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + tw_dev->num_resets++; + + sdev_printk(KERN_WARNING, SCpnt->device, + "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", + TW_DRIVER, 0x2c, SCpnt->cmnd[0]); + + /* Make sure we are not issuing an ioctl or resetting from ioctl */ + mutex_lock(&tw_dev->ioctl_lock); + + /* Now reset the card and some of the device extension data */ + if (twl_reset_device_extension(tw_dev, 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset"); + goto out; + } + + retval = SUCCESS; +out: + mutex_unlock(&tw_dev->ioctl_lock); + return retval; +} /* End twl_scsi_eh_reset() */ + +/* This is the main scsi queue function to handle scsi opcodes */ +static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + int request_id, retval; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + /* If we are resetting due to timed out ioctl, report as busy */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) { + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* Get a free request id */ + twl_get_request_id(tw_dev, &request_id); + + /* Save the scsi command for use by the ISR */ + tw_dev->srb[request_id] = SCpnt; + + retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); + if (retval) { + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + SCpnt->result = (DID_ERROR << 16); + done(SCpnt); + retval = 0; + } +out: + return retval; +} /* End twl_scsi_queue() */ + +static DEF_SCSI_QCMD(twl_scsi_queue) + +/* This function tells the controller to shut down */ +static void __twl_shutdown(TW_Device_Extension *tw_dev) +{ + /* Disable interrupts */ + TWL_MASK_INTERRUPTS(tw_dev); + + /* Free up the IRQ */ + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no); + + /* Tell the card we are shutting down */ + if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed"); + } else { + printk(KERN_WARNING "3w-sas: Shutdown complete.\n"); + } + + /* Clear doorbell interrupt just before exit */ + TWL_CLEAR_DB_INTERRUPT(tw_dev); +} /* End __twl_shutdown() */ + +/* Wrapper for __twl_shutdown */ +static void twl_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev; + + if (!host) + return; + + tw_dev = (TW_Device_Extension *)host->hostdata; + + if (tw_dev->online) + __twl_shutdown(tw_dev); +} /* End twl_shutdown() */ + +/* This function configures unit settings when a unit is coming on-line */ +static int twl_slave_configure(struct scsi_device *sdev) +{ + /* Force 60 second timeout */ + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + + return 0; +} /* End twl_slave_configure() */ + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "3w-sas", + .queuecommand = twl_scsi_queue, + .eh_host_reset_handler = twl_scsi_eh_reset, + .bios_param = twl_scsi_biosparam, + .change_queue_depth = scsi_change_queue_depth, + .can_queue = TW_Q_LENGTH-2, + .slave_configure = twl_slave_configure, + .this_id = -1, + .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH, + .max_sectors = TW_MAX_SECTORS, + .cmd_per_lun = TW_MAX_CMDS_PER_LUN, + .shost_groups = twl_host_groups, + .emulated = 1, + .no_write_same = 1, +}; + +/* This function will probe and initialize a card */ +static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) +{ + struct Scsi_Host *host = NULL; + TW_Device_Extension *tw_dev; + int retval = -ENODEV; + int *ptr_phycount, phycount=0; + + retval = pci_enable_device(pdev); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device"); + goto out_disable_device; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); + retval = -ENODEV; + goto out_disable_device; + } + + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); + if (!host) { + TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension"); + retval = -ENOMEM; + goto out_disable_device; + } + tw_dev = shost_priv(host); + + /* Save values to device extension */ + tw_dev->host = host; + tw_dev->tw_pci_dev = pdev; + + if (twl_initialize_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); + retval = -ENOMEM; + goto out_free_device_extension; + } + + /* Request IO regions */ + retval = pci_request_regions(pdev, "3w-sas"); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region"); + goto out_free_device_extension; + } + + /* Save base address, use region 1 */ + tw_dev->base_addr = pci_iomap(pdev, 1, 0); + if (!tw_dev->base_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); + retval = -ENOMEM; + goto out_release_mem_region; + } + + /* Disable interrupts on the card */ + TWL_MASK_INTERRUPTS(tw_dev); + + /* Initialize the card */ + if (twl_reset_sequence(tw_dev, 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); + retval = -ENOMEM; + goto out_iounmap; + } + + /* Set host specific parameters */ + host->max_id = TW_MAX_UNITS; + host->max_cmd_len = TW_MAX_CDB_LEN; + host->max_lun = TW_MAX_LUNS; + host->max_channel = 0; + + /* Register the card with the kernel SCSI layer */ + retval = scsi_add_host(host, &pdev->dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed"); + goto out_iounmap; + } + + pci_set_drvdata(pdev, host); + + printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n", + host->host_no, + (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, + TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH), + (u64)pci_resource_start(pdev, 1), pdev->irq); + + ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE, + TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH); + if (ptr_phycount) + phycount = le32_to_cpu(*(int *)ptr_phycount); + + printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n", + host->host_no, + (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, + TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), + (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE, + TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), + phycount); + + /* Try to enable MSI */ + if (use_msi && !pci_enable_msi(pdev)) + set_bit(TW_USING_MSI, &tw_dev->flags); + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ"); + goto out_remove_host; + } + + twl_device_extension_list[twl_device_extension_count] = tw_dev; + twl_device_extension_count++; + + /* Re-enable interrupts on the card */ + TWL_UNMASK_INTERRUPTS(tw_dev); + + /* Finally, scan the host */ + scsi_scan_host(host); + + /* Add sysfs binary files */ + if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read"); + if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info"); + + if (twl_major == -1) { + if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0) + TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device"); + } + tw_dev->online = 1; + return 0; + +out_remove_host: + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_disable_msi(pdev); + scsi_remove_host(host); +out_iounmap: + iounmap(tw_dev->base_addr); +out_release_mem_region: + pci_release_regions(pdev); +out_free_device_extension: + twl_free_device_extension(tw_dev); + scsi_host_put(host); +out_disable_device: + pci_disable_device(pdev); + + return retval; +} /* End twl_probe() */ + +/* This function is called to remove a device */ +static void twl_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev; + + if (!host) + return; + + tw_dev = (TW_Device_Extension *)host->hostdata; + + if (!tw_dev->online) + return; + + /* Remove sysfs binary files */ + sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr); + sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr); + + scsi_remove_host(tw_dev->host); + + /* Unregister character device */ + if (twl_major >= 0) { + unregister_chrdev(twl_major, "twl"); + twl_major = -1; + } + + /* Shutdown the card */ + __twl_shutdown(tw_dev); + + /* Disable MSI if enabled */ + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_disable_msi(pdev); + + /* Free IO remapping */ + iounmap(tw_dev->base_addr); + + /* Free up the mem region */ + pci_release_regions(pdev); + + /* Free up device extension resources */ + twl_free_device_extension(tw_dev); + + scsi_host_put(tw_dev->host); + pci_disable_device(pdev); + twl_device_extension_count--; +} /* End twl_remove() */ + +/* This function is called on PCI suspend */ +static int __maybe_unused twl_suspend(struct device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no); + /* Disable interrupts */ + TWL_MASK_INTERRUPTS(tw_dev); + + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + /* Tell the card we are shutting down */ + if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend"); + } else { + printk(KERN_WARNING "3w-sas: Suspend complete.\n"); + } + + /* Clear doorbell interrupt */ + TWL_CLEAR_DB_INTERRUPT(tw_dev); + + return 0; +} /* End twl_suspend() */ + +/* This function is called on PCI resume */ +static int __maybe_unused twl_resume(struct device *dev) +{ + int retval = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no); + pci_try_set_mwi(pdev); + + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); + retval = -ENODEV; + goto out_disable_device; + } + + /* Initialize the card */ + if (twl_reset_sequence(tw_dev, 0)) { + retval = -ENODEV; + goto out_disable_device; + } + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume"); + retval = -ENODEV; + goto out_disable_device; + } + + /* Now enable MSI if enabled */ + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_enable_msi(pdev); + + /* Re-enable interrupts on the card */ + TWL_UNMASK_INTERRUPTS(tw_dev); + + printk(KERN_WARNING "3w-sas: Resume complete.\n"); + return 0; + +out_disable_device: + scsi_remove_host(host); + + return retval; +} /* End twl_resume() */ + +/* PCI Devices supported by this driver */ +static struct pci_device_id twl_pci_tbl[] = { + { PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) }, + { } +}; +MODULE_DEVICE_TABLE(pci, twl_pci_tbl); + +static SIMPLE_DEV_PM_OPS(twl_pm_ops, twl_suspend, twl_resume); + +/* pci_driver initializer */ +static struct pci_driver twl_driver = { + .name = "3w-sas", + .id_table = twl_pci_tbl, + .probe = twl_probe, + .remove = twl_remove, + .driver.pm = &twl_pm_ops, + .shutdown = twl_shutdown +}; + +/* This function is called on driver initialization */ +static int __init twl_init(void) +{ + printk(KERN_INFO "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); + + return pci_register_driver(&twl_driver); +} /* End twl_init() */ + +/* This function is called on driver exit */ +static void __exit twl_exit(void) +{ + pci_unregister_driver(&twl_driver); +} /* End twl_exit() */ + +module_init(twl_init); +module_exit(twl_exit); + diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h new file mode 100644 index 000000000..096dec29e --- /dev/null +++ b/drivers/scsi/3w-sas.h @@ -0,0 +1,405 @@ +/* + 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. + + Written By: Adam Radford + + Copyright (C) 2009 LSI Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + aradford@gmail.com +*/ + +#ifndef _3W_SAS_H +#define _3W_SAS_H + +/* AEN severity table */ +static char *twl_aen_severity_table[] = +{ + "None", "ERROR", "WARNING", "INFO", "DEBUG", NULL +}; + +/* Liberator register offsets */ +#define TWL_STATUS 0x0 /* Status */ +#define TWL_HIBDB 0x20 /* Inbound doorbell */ +#define TWL_HISTAT 0x30 /* Host interrupt status */ +#define TWL_HIMASK 0x34 /* Host interrupt mask */ +#define TWL_HOBDB 0x9C /* Outbound doorbell */ +#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */ +#define TWL_SCRPD3 0xBC /* Scratchpad */ +#define TWL_HIBQPL 0xC0 /* Host inbound Q low */ +#define TWL_HIBQPH 0xC4 /* Host inbound Q high */ +#define TWL_HOBQPL 0xC8 /* Host outbound Q low */ +#define TWL_HOBQPH 0xCC /* Host outbound Q high */ +#define TWL_HISTATUS_VALID_INTERRUPT 0xC +#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4 +#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8 +#define TWL_STATUS_OVERRUN_SUBMIT 0x2000 +#define TWL_ISSUE_SOFT_RESET 0x100 +#define TWL_CONTROLLER_READY 0x2000 +#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000 +#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000 +#define TWL_PULL_MODE 0x1 + +/* Command packet opcodes used by the driver */ +#define TW_OP_INIT_CONNECTION 0x1 +#define TW_OP_GET_PARAM 0x12 +#define TW_OP_SET_PARAM 0x13 +#define TW_OP_EXECUTE_SCSI 0x10 + +/* Asynchronous Event Notification (AEN) codes used by the driver */ +#define TW_AEN_QUEUE_EMPTY 0x0000 +#define TW_AEN_SOFT_RESET 0x0001 +#define TW_AEN_SYNC_TIME_WITH_HOST 0x031 +#define TW_AEN_SEVERITY_ERROR 0x1 +#define TW_AEN_SEVERITY_DEBUG 0x4 +#define TW_AEN_NOT_RETRIEVED 0x1 + +/* Command state defines */ +#define TW_S_INITIAL 0x1 /* Initial state */ +#define TW_S_STARTED 0x2 /* Id in use */ +#define TW_S_POSTED 0x4 /* Posted to the controller */ +#define TW_S_COMPLETED 0x8 /* Completed by isr */ +#define TW_S_FINISHED 0x10 /* I/O completely done */ + +/* Compatibility defines */ +#define TW_9750_ARCH_ID 10 +#define TW_CURRENT_DRIVER_SRL 40 +#define TW_CURRENT_DRIVER_BUILD 0 +#define TW_CURRENT_DRIVER_BRANCH 0 + +/* Misc defines */ +#define TW_SECTOR_SIZE 512 +#define TW_MAX_UNITS 32 +#define TW_INIT_MESSAGE_CREDITS 0x100 +#define TW_INIT_COMMAND_PACKET_SIZE 0x3 +#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6 +#define TW_EXTENDED_INIT_CONNECT 0x2 +#define TW_BASE_FW_SRL 24 +#define TW_BASE_FW_BRANCH 0 +#define TW_BASE_FW_BUILD 1 +#define TW_Q_LENGTH 256 +#define TW_Q_START 0 +#define TW_MAX_SLOT 32 +#define TW_MAX_RESET_TRIES 2 +#define TW_MAX_CMDS_PER_LUN 254 +#define TW_MAX_AEN_DRAIN 255 +#define TW_IN_RESET 2 +#define TW_USING_MSI 3 +#define TW_IN_ATTENTION_LOOP 4 +#define TW_MAX_SECTORS 256 +#define TW_MAX_CDB_LEN 16 +#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ +#define TW_IOCTL_CHRDEV_FREE -1 +#define TW_COMMAND_OFFSET 128 /* 128 bytes */ +#define TW_VERSION_TABLE 0x0402 +#define TW_TIMEKEEP_TABLE 0x040A +#define TW_INFORMATION_TABLE 0x0403 +#define TW_PARAM_FWVER 3 +#define TW_PARAM_FWVER_LENGTH 16 +#define TW_PARAM_BIOSVER 4 +#define TW_PARAM_BIOSVER_LENGTH 16 +#define TW_PARAM_MODEL 8 +#define TW_PARAM_MODEL_LENGTH 16 +#define TW_PARAM_PHY_SUMMARY_TABLE 1 +#define TW_PARAM_PHYCOUNT 2 +#define TW_PARAM_PHYCOUNT_LENGTH 1 +#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools +#define TW_ALLOCATION_LENGTH 128 +#define TW_SENSE_DATA_LENGTH 18 +#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a +#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d +#define TW_ERROR_UNIT_OFFLINE 0x128 +#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3 +#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4 +#define TW_DRIVER 6 +#ifndef PCI_DEVICE_ID_3WARE_9750 +#define PCI_DEVICE_ID_3WARE_9750 0x1010 +#endif + +/* Bitmask macros to eliminate bitfields */ + +/* opcode: 5, reserved: 3 */ +#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_OP_OUT(x) (x & 0x1f) + +/* opcode: 5, sgloffset: 3 */ +#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_SGL_OUT(x) ((x >> 5) & 0x7) + +/* severity: 3, reserved: 5 */ +#define TW_SEV_OUT(x) (x & 0x7) + +/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */ +#define TW_RESID_OUT(x) ((x >> 16) & 0xffff) +#define TW_NOTMFA_OUT(x) (x & 0x1) + +/* request_id: 12, lun: 4 */ +#define TW_REQ_LUN_IN(lun, request_id) \ + (((lun << 12) & 0xf000) | (request_id & 0xfff)) +#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf) + +/* Register access macros */ +#define TWL_STATUS_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_STATUS) +#define TWL_HOBQPL_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL) +#define TWL_HOBQPH_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH) +#define TWL_HOBDB_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HOBDB) +#define TWL_HOBDBC_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC) +#define TWL_HIMASK_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HIMASK) +#define TWL_HISTAT_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HISTAT) +#define TWL_HIBQPH_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH) +#define TWL_HIBQPL_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL) +#define TWL_HIBDB_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_HIBDB) +#define TWL_SCRPD3_REG_ADDR(x) \ + ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3) +#define TWL_MASK_INTERRUPTS(x) \ + (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev))) +#define TWL_UNMASK_INTERRUPTS(x) \ + (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev))) +#define TWL_CLEAR_DB_INTERRUPT(x) \ + (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev))) +#define TWL_SOFT_RESET(x) \ + (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev))) + +/* Macros */ +#define TW_PRINTK(h,a,b,c) { \ +if (h) \ +printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ +else \ +printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ +} +#define TW_MAX_LUNS 16 +#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4) +#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92) +#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94) +#define TW_PADDING_LENGTH_LIBERATOR 136 +#define TW_PADDING_LENGTH_LIBERATOR_OLD 132 +#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x)) + +#pragma pack(1) + +/* SGL entry */ +typedef struct TAG_TW_SG_Entry_ISO { + dma_addr_t address; + dma_addr_t length; +} TW_SG_Entry_ISO; + +/* Old Command Packet with ISO SGL */ +typedef struct TW_Command { + unsigned char opcode__sgloffset; + unsigned char size; + unsigned char request_id; + unsigned char unit__hostid; + /* Second DWORD */ + unsigned char status; + unsigned char flags; + union { + unsigned short block_count; + unsigned short parameter_count; + } byte6_offset; + union { + struct { + u32 lba; + TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; + unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD]; + } io; + struct { + TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; + u32 padding; + unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD]; + } param; + } byte8_offset; +} TW_Command; + +/* New Command Packet with ISO SGL */ +typedef struct TAG_TW_Command_Apache { + unsigned char opcode__reserved; + unsigned char unit; + unsigned short request_id__lunl; + unsigned char status; + unsigned char sgl_offset; + unsigned short sgl_entries__lunh; + unsigned char cdb[16]; + TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH]; + unsigned char padding[TW_PADDING_LENGTH_LIBERATOR]; +} TW_Command_Apache; + +/* New command packet header */ +typedef struct TAG_TW_Command_Apache_Header { + unsigned char sense_data[TW_SENSE_DATA_LENGTH]; + struct { + char reserved[4]; + unsigned short error; + unsigned char padding; + unsigned char severity__reserved; + } status_block; + unsigned char err_specific_desc[98]; + struct { + unsigned char size_header; + unsigned short request_id; + unsigned char size_sense; + } header_desc; +} TW_Command_Apache_Header; + +/* This struct is a union of the 2 command packets */ +typedef struct TAG_TW_Command_Full { + TW_Command_Apache_Header header; + union { + TW_Command oldcommand; + TW_Command_Apache newcommand; + } command; +} TW_Command_Full; + +/* Initconnection structure */ +typedef struct TAG_TW_Initconnect { + unsigned char opcode__reserved; + unsigned char size; + unsigned char request_id; + unsigned char res2; + unsigned char status; + unsigned char flags; + unsigned short message_credits; + u32 features; + unsigned short fw_srl; + unsigned short fw_arch_id; + unsigned short fw_branch; + unsigned short fw_build; + u32 result; +} TW_Initconnect; + +/* Event info structure */ +typedef struct TAG_TW_Event +{ + unsigned int sequence_id; + unsigned int time_stamp_sec; + unsigned short aen_code; + unsigned char severity; + unsigned char retrieved; + unsigned char repeat_count; + unsigned char parameter_len; + unsigned char parameter_data[98]; +} TW_Event; + +typedef struct TAG_TW_Ioctl_Driver_Command { + unsigned int control_code; + unsigned int status; + unsigned int unique_id; + unsigned int sequence_id; + unsigned int os_specific; + unsigned int buffer_length; +} TW_Ioctl_Driver_Command; + +typedef struct TAG_TW_Ioctl_Apache { + TW_Ioctl_Driver_Command driver_command; + char padding[488]; + TW_Command_Full firmware_command; + char data_buffer[]; +} TW_Ioctl_Buf_Apache; + +/* GetParam descriptor */ +typedef struct { + unsigned short table_id; + unsigned short parameter_id; + unsigned short parameter_size_bytes; + unsigned short actual_parameter_size_bytes; + unsigned char data[]; +} TW_Param_Apache; + +/* Compatibility information structure */ +typedef struct TAG_TW_Compatibility_Info +{ + char driver_version[32]; + unsigned short working_srl; + unsigned short working_branch; + unsigned short working_build; + unsigned short driver_srl_high; + unsigned short driver_branch_high; + unsigned short driver_build_high; + unsigned short driver_srl_low; + unsigned short driver_branch_low; + unsigned short driver_build_low; + unsigned short fw_on_ctlr_srl; + unsigned short fw_on_ctlr_branch; + unsigned short fw_on_ctlr_build; +} TW_Compatibility_Info; + +#pragma pack() + +typedef struct TAG_TW_Device_Extension { + void __iomem *base_addr; + unsigned long *generic_buffer_virt[TW_Q_LENGTH]; + dma_addr_t generic_buffer_phys[TW_Q_LENGTH]; + TW_Command_Full *command_packet_virt[TW_Q_LENGTH]; + dma_addr_t command_packet_phys[TW_Q_LENGTH]; + TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH]; + dma_addr_t sense_buffer_phys[TW_Q_LENGTH]; + struct pci_dev *tw_pci_dev; + struct scsi_cmnd *srb[TW_Q_LENGTH]; + unsigned char free_queue[TW_Q_LENGTH]; + unsigned char free_head; + unsigned char free_tail; + int state[TW_Q_LENGTH]; + unsigned int posted_request_count; + unsigned int max_posted_request_count; + unsigned int max_sgl_entries; + unsigned int sgl_entries; + unsigned int num_resets; + unsigned int sector_count; + unsigned int max_sector_count; + unsigned int aen_count; + struct Scsi_Host *host; + long flags; + TW_Event *event_queue[TW_Q_LENGTH]; + unsigned char error_index; + unsigned int error_sequence_id; + int chrdev_request_id; + wait_queue_head_t ioctl_wqueue; + struct mutex ioctl_lock; + TW_Compatibility_Info tw_compat_info; + char online; +} TW_Device_Extension; + +#endif /* _3W_SAS_H */ + diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c new file mode 100644 index 000000000..f39c9ec2e --- /dev/null +++ b/drivers/scsi/3w-xxxx.c @@ -0,0 +1,2430 @@ +/* + 3w-xxxx.c -- 3ware Storage Controller device driver for Linux. + + Written By: Adam Radford + Modifications By: Joel Jacobson + Arnaldo Carvalho de Melo + Brad Strand + + Copyright (C) 1999-2010 3ware Inc. + + Kernel compatibility By: Andre Hedrick + Non-Copyright (C) 2000 Andre Hedrick + + Further tiny build fixes and trivial hoovering Alan Cox + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + + aradford@gmail.com + + + History + ------- + 0.1.000 - Initial release. + 0.4.000 - Added support for Asynchronous Event Notification through + ioctls for 3DM. + 1.0.000 - Added DPO & FUA bit support for WRITE_10 & WRITE_6 cdb + to disable drive write-cache before writes. + 1.1.000 - Fixed performance bug with DPO & FUA not existing for WRITE_6. + 1.2.000 - Added support for clean shutdown notification/feature table. + 1.02.00.001 - Added support for full command packet posts through ioctls + for 3DM. + Bug fix so hot spare drives don't show up. + 1.02.00.002 - Fix bug with tw_setfeature() call that caused oops on some + systems. + 08/21/00 - release previously allocated resources on failure at + tw_allocate_memory (acme) + 1.02.00.003 - Fix tw_interrupt() to report error to scsi layer when + controller status is non-zero. + Added handling of request_sense opcode. + Fix possible null pointer dereference in + tw_reset_device_extension() + 1.02.00.004 - Add support for device id of 3ware 7000 series controllers. + Make tw_setfeature() call with interrupts disabled. + Register interrupt handler before enabling interrupts. + Clear attention interrupt before draining aen queue. + 1.02.00.005 - Allocate bounce buffers and custom queue depth for raid5 for + 6000 and 5000 series controllers. + Reduce polling mdelays causing problems on some systems. + Fix use_sg = 1 calculation bug. + Check for scsi_register returning NULL. + Add aen count to /proc/scsi/3w-xxxx. + Remove aen code unit masking in tw_aen_complete(). + 1.02.00.006 - Remove unit from printk in tw_scsi_eh_abort(), causing + possible oops. + Fix possible null pointer dereference in tw_scsi_queue() + if done function pointer was invalid. + 1.02.00.007 - Fix possible null pointer dereferences in tw_ioctl(). + Remove check for invalid done function pointer from + tw_scsi_queue(). + 1.02.00.008 - Set max sectors per io to TW_MAX_SECTORS in tw_findcards(). + Add tw_decode_error() for printing readable error messages. + Print some useful information on certain aen codes. + Add tw_decode_bits() for interpreting status register output. + Make scsi_set_pci_device() for kernels >= 2.4.4 + Fix bug where aen's could be lost before a reset. + Re-add spinlocks in tw_scsi_detect(). + Fix possible null pointer dereference in tw_aen_drain_queue() + during initialization. + Clear pci parity errors during initialization and during io. + 1.02.00.009 - Remove redundant increment in tw_state_request_start(). + Add ioctl support for direct ATA command passthru. + Add entire aen code string list. + 1.02.00.010 - Cleanup queueing code, fix jbod thoughput. + Fix get_param for specific units. + 1.02.00.011 - Fix bug in tw_aen_complete() where aen's could be lost. + Fix tw_aen_drain_queue() to display useful info at init. + Set tw_host->max_id for 12 port cards. + Add ioctl support for raw command packet post from userspace + with sglist fragments (parameter and io). + 1.02.00.012 - Fix read capacity to under report by 1 sector to fix get + last sector ioctl. + 1.02.00.013 - Fix bug where more AEN codes weren't coming out during + driver initialization. + Improved handling of PCI aborts. + 1.02.00.014 - Fix bug in tw_findcards() where AEN code could be lost. + Increase timeout in tw_aen_drain_queue() to 30 seconds. + 1.02.00.015 - Re-write raw command post with data ioctl method. + Remove raid5 bounce buffers for raid5 for 6XXX for kernel 2.5 + Add tw_map/unmap_scsi_sg/single_data() for kernel 2.5 + Replace io_request_lock with host_lock for kernel 2.5 + Set max_cmd_len to 16 for 3dm for kernel 2.5 + 1.02.00.016 - Set host->max_sectors back up to 256. + 1.02.00.017 - Modified pci parity error handling/clearing from config space + during initialization. + 1.02.00.018 - Better handling of request sense opcode and sense information + for failed commands. Add tw_decode_sense(). + Replace all mdelay()'s with scsi_sleep(). + 1.02.00.019 - Revert mdelay's and scsi_sleep's, this caused problems on + some SMP systems. + 1.02.00.020 - Add pci_set_dma_mask(), rewrite kmalloc()/virt_to_bus() to + pci_alloc/free_consistent(). + Better alignment checking in tw_allocate_memory(). + Cleanup tw_initialize_device_extension(). + 1.02.00.021 - Bump cmd_per_lun in SHT to 255 for better jbod performance. + Improve handling of errors in tw_interrupt(). + Add handling/clearing of controller queue error. + Empty stale responses before draining aen queue. + Fix tw_scsi_eh_abort() to not reset on every io abort. + Set can_queue in SHT to 255 to prevent hang from AEN. + 1.02.00.022 - Fix possible null pointer dereference in tw_scsi_release(). + 1.02.00.023 - Fix bug in tw_aen_drain_queue() where unit # was always zero. + 1.02.00.024 - Add severity levels to AEN strings. + 1.02.00.025 - Fix command interrupt spurious error messages. + Fix bug in raw command post with data ioctl method. + Fix bug where rollcall sometimes failed with cable errors. + Print unit # on all command timeouts. + 1.02.00.026 - Fix possible infinite retry bug with power glitch induced + drive timeouts. + Cleanup some AEN severity levels. + 1.02.00.027 - Add drive not supported AEN code for SATA controllers. + Remove spurious unknown ioctl error message. + 1.02.00.028 - Fix bug where multiple controllers with no units were the + same card number. + Fix bug where cards were being shut down more than once. + 1.02.00.029 - Add missing pci_free_consistent() in tw_allocate_memory(). + Replace pci_map_single() with pci_map_page() for highmem. + Check for tw_setfeature() failure. + 1.02.00.030 - Make driver 64-bit clean. + 1.02.00.031 - Cleanup polling timeouts/routines in several places. + Add support for mode sense opcode. + Add support for cache mode page. + Add support for synchronize cache opcode. + 1.02.00.032 - Fix small multicard rollcall bug. + Make driver stay loaded with no units for hot add/swap. + Add support for "twe" character device for ioctls. + Clean up request_id queueing code. + Fix tw_scsi_queue() spinlocks. + 1.02.00.033 - Fix tw_aen_complete() to not queue 'queue empty' AEN's. + Initialize queues correctly when loading with no valid units. + 1.02.00.034 - Fix tw_decode_bits() to handle multiple errors. + Add support for user configurable cmd_per_lun. + Add support for sht->slave_configure(). + 1.02.00.035 - Improve tw_allocate_memory() memory allocation. + Fix tw_chrdev_ioctl() to sleep correctly. + 1.02.00.036 - Increase character ioctl timeout to 60 seconds. + 1.02.00.037 - Fix tw_ioctl() to handle all non-data ATA passthru cmds + for 'smartmontools' support. + 1.26.00.038 - Roll driver minor version to 26 to denote kernel 2.6. + Add support for cmds_per_lun module parameter. + 1.26.00.039 - Fix bug in tw_chrdev_ioctl() polling code. + Fix data_buffer_length usage in tw_chrdev_ioctl(). + Update contact information. + 1.26.02.000 - Convert driver to pci_driver format. + 1.26.02.001 - Increase max ioctl buffer size to 512 sectors. + Make tw_scsi_queue() return 0 for 'Unknown scsi opcode'. + Fix tw_remove() to free irq handler/unregister_chrdev() + before shutting down card. + Change to new 'change_queue_depth' api. + Fix 'handled=1' ISR usage, remove bogus IRQ check. + 1.26.02.002 - Free irq handler in __tw_shutdown(). + Turn on RCD bit for caching mode page. + Serialize reset code. + 1.26.02.003 - Force 60 second timeout default. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "3w-xxxx.h" + +/* Globals */ +#define TW_DRIVER_VERSION "1.26.02.003" +static DEFINE_MUTEX(tw_mutex); +static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT]; +static int tw_device_extension_count = 0; +static int twe_major = -1; + +/* Module parameters */ +MODULE_AUTHOR("LSI"); +MODULE_DESCRIPTION("3ware Storage Controller Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(TW_DRIVER_VERSION); + +/* Function prototypes */ +static int tw_reset_device_extension(TW_Device_Extension *tw_dev); + +/* Functions */ + +/* This function will check the status register for unexpected bits */ +static int tw_check_bits(u32 status_reg_value) +{ + if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) { + dprintk(KERN_WARNING "3w-xxxx: tw_check_bits(): No expected bits (0x%x).\n", status_reg_value); + return 1; + } + if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) { + dprintk(KERN_WARNING "3w-xxxx: tw_check_bits(): Found unexpected bits (0x%x).\n", status_reg_value); + return 1; + } + + return 0; +} /* End tw_check_bits() */ + +/* This function will print readable messages from status register errors */ +static int tw_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value, int print_host) +{ + char host[16]; + + dprintk(KERN_WARNING "3w-xxxx: tw_decode_bits()\n"); + + if (print_host) + sprintf(host, " scsi%d:", tw_dev->host->host_no); + else + host[0] = '\0'; + + if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { + printk(KERN_WARNING "3w-xxxx:%s PCI Parity Error: clearing.\n", host); + outl(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_PCI_ABORT) { + printk(KERN_WARNING "3w-xxxx:%s PCI Abort: clearing.\n", host); + outl(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); + pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); + } + + if (status_reg_value & TW_STATUS_QUEUE_ERROR) { + printk(KERN_WARNING "3w-xxxx:%s Controller Queue Error: clearing.\n", host); + outl(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) { + printk(KERN_WARNING "3w-xxxx:%s SBUF Write Error: clearing.\n", host); + outl(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { + if (tw_dev->reset_print == 0) { + printk(KERN_WARNING "3w-xxxx:%s Microcontroller Error: clearing.\n", host); + tw_dev->reset_print = 1; + } + return 1; + } + + return 0; +} /* End tw_decode_bits() */ + +/* This function will poll the status register for a flag */ +static int tw_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (tw_check_bits(status_reg_value)) + tw_decode_bits(tw_dev, status_reg_value, 0); + + while ((status_reg_value & flag) != flag) { + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + + if (tw_check_bits(status_reg_value)) + tw_decode_bits(tw_dev, status_reg_value, 0); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End tw_poll_status() */ + +/* This function will poll the status register for disappearance of a flag */ +static int tw_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (tw_check_bits(status_reg_value)) + tw_decode_bits(tw_dev, status_reg_value, 0); + + while ((status_reg_value & flag) != 0) { + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + + if (tw_check_bits(status_reg_value)) + tw_decode_bits(tw_dev, status_reg_value, 0); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End tw_poll_status_gone() */ + +/* This function will attempt to post a command packet to the board */ +static int tw_post_command_packet(TW_Device_Extension *tw_dev, int request_id) +{ + u32 status_reg_value; + unsigned long command_que_value; + + dprintk(KERN_NOTICE "3w-xxxx: tw_post_command_packet()\n"); + command_que_value = tw_dev->command_packet_physical_address[request_id]; + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + + if (tw_check_bits(status_reg_value)) { + dprintk(KERN_WARNING "3w-xxxx: tw_post_command_packet(): Unexpected bits.\n"); + tw_decode_bits(tw_dev, status_reg_value, 1); + } + + if ((status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL) == 0) { + /* We successfully posted the command packet */ + outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); + tw_dev->state[request_id] = TW_S_POSTED; + tw_dev->posted_request_count++; + if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { + tw_dev->max_posted_request_count = tw_dev->posted_request_count; + } + } else { + /* Couldn't post the command packet, so we do it in the isr */ + if (tw_dev->state[request_id] != TW_S_PENDING) { + tw_dev->state[request_id] = TW_S_PENDING; + tw_dev->pending_request_count++; + if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { + tw_dev->max_pending_request_count = tw_dev->pending_request_count; + } + tw_dev->pending_queue[tw_dev->pending_tail] = request_id; + if (tw_dev->pending_tail == TW_Q_LENGTH-1) { + tw_dev->pending_tail = TW_Q_START; + } else { + tw_dev->pending_tail = tw_dev->pending_tail + 1; + } + } + TW_UNMASK_COMMAND_INTERRUPT(tw_dev); + return 1; + } + return 0; +} /* End tw_post_command_packet() */ + +/* This function will return valid sense buffer information for failed cmds */ +static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill_sense) +{ + int i; + TW_Command *command; + + dprintk(KERN_WARNING "3w-xxxx: tw_decode_sense()\n"); + command = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + + printk(KERN_WARNING "3w-xxxx: scsi%d: Command failed: status = 0x%x, flags = 0x%x, unit #%d.\n", tw_dev->host->host_no, command->status, command->flags, TW_UNIT_OUT(command->unit__hostid)); + + /* Attempt to return intelligent sense information */ + if (fill_sense) { + if ((command->status == 0xc7) || (command->status == 0xcb)) { + for (i = 0; i < ARRAY_SIZE(tw_sense_table); i++) { + if (command->flags == tw_sense_table[i][0]) { + + /* Valid bit and 'current errors' */ + tw_dev->srb[request_id]->sense_buffer[0] = (0x1 << 7 | 0x70); + + /* Sense key */ + tw_dev->srb[request_id]->sense_buffer[2] = tw_sense_table[i][1]; + + /* Additional sense length */ + tw_dev->srb[request_id]->sense_buffer[7] = 0xa; /* 10 bytes */ + + /* Additional sense code */ + tw_dev->srb[request_id]->sense_buffer[12] = tw_sense_table[i][2]; + + /* Additional sense code qualifier */ + tw_dev->srb[request_id]->sense_buffer[13] = tw_sense_table[i][3]; + + tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; + return TW_ISR_DONT_RESULT; /* Special case for isr to not over-write result */ + } + } + } + + /* If no table match, error so we get a reset */ + return 1; + } + + return 0; +} /* End tw_decode_sense() */ + +/* This function will report controller error status */ +static int tw_check_errors(TW_Device_Extension *tw_dev) +{ + u32 status_reg_value; + + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + + if (TW_STATUS_ERRORS(status_reg_value) || tw_check_bits(status_reg_value)) { + tw_decode_bits(tw_dev, status_reg_value, 0); + return 1; + } + + return 0; +} /* End tw_check_errors() */ + +/* This function will empty the response que */ +static void tw_empty_response_que(TW_Device_Extension *tw_dev) +{ + u32 status_reg_value; + + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + + while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { + inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + } +} /* End tw_empty_response_que() */ + +/* This function will free a request_id */ +static void tw_state_request_finish(TW_Device_Extension *tw_dev, int request_id) +{ + tw_dev->free_queue[tw_dev->free_tail] = request_id; + tw_dev->state[request_id] = TW_S_FINISHED; + tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; +} /* End tw_state_request_finish() */ + +/* This function will assign an available request_id */ +static void tw_state_request_start(TW_Device_Extension *tw_dev, int *request_id) +{ + *request_id = tw_dev->free_queue[tw_dev->free_head]; + tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; + tw_dev->state[*request_id] = TW_S_STARTED; +} /* End tw_state_request_start() */ + +/* Show some statistics about the card */ +static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + unsigned long flags = 0; + ssize_t len; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "3w-xxxx Driver version: %s\n" + "Current commands posted: %4d\n" + "Max commands posted: %4d\n" + "Current pending commands: %4d\n" + "Max pending commands: %4d\n" + "Last sgl length: %4d\n" + "Max sgl length: %4d\n" + "Last sector count: %4d\n" + "Max sector count: %4d\n" + "SCSI Host Resets: %4d\n" + "AEN's: %4d\n", + TW_DRIVER_VERSION, + tw_dev->posted_request_count, + tw_dev->max_posted_request_count, + tw_dev->pending_request_count, + tw_dev->max_pending_request_count, + tw_dev->sgl_entries, + tw_dev->max_sgl_entries, + tw_dev->sector_count, + tw_dev->max_sector_count, + tw_dev->num_resets, + tw_dev->aen_count); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + return len; +} /* End tw_show_stats() */ + +/* Create sysfs 'stats' entry */ +static struct device_attribute tw_host_stats_attr = { + .attr = { + .name = "stats", + .mode = S_IRUGO, + }, + .show = tw_show_stats +}; + +/* Host attributes initializer */ +static struct attribute *tw_host_attrs[] = { + &tw_host_stats_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(tw_host); + +/* This function will read the aen queue from the isr */ +static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command *command_packet; + TW_Param *param; + unsigned long command_que_value; + u32 status_reg_value; + unsigned long param_value = 0; + + dprintk(KERN_NOTICE "3w-xxxx: tw_aen_read_queue()\n"); + + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + if (tw_check_bits(status_reg_value)) { + dprintk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Unexpected bits.\n"); + tw_decode_bits(tw_dev, status_reg_value, 1); + return 1; + } + if (tw_dev->command_packet_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad command packet virtual address.\n"); + return 1; + } + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = 4; + command_packet->request_id = request_id; + command_packet->status = 0; + command_packet->flags = 0; + command_packet->byte6.parameter_count = 1; + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad command packet physical address.\n"); + return 1; + } + /* Now setup the param */ + if (tw_dev->alignment_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad alignment virtual address.\n"); + return 1; + } + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + memset(param, 0, sizeof(TW_Sector)); + param->table_id = 0x401; /* AEN table */ + param->parameter_id = 2; /* Unit code */ + param->parameter_size_bytes = 2; + param_value = tw_dev->alignment_physical_address[request_id]; + if (param_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad alignment physical address.\n"); + return 1; + } + command_packet->byte8.param.sgl[0].address = param_value; + command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); + + /* Now post the command packet */ + if ((status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL) == 0) { + dprintk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Post succeeded.\n"); + tw_dev->srb[request_id] = NULL; /* Flag internal command */ + tw_dev->state[request_id] = TW_S_POSTED; + outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); + } else { + printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Post failed, will retry.\n"); + return 1; + } + + return 0; +} /* End tw_aen_read_queue() */ + +/* This function will complete an aen request from the isr */ +static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Param *param; + unsigned short aen; + int error = 0, table_max = 0; + + dprintk(KERN_WARNING "3w-xxxx: tw_aen_complete()\n"); + if (tw_dev->alignment_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_aen_complete(): Bad alignment virtual address.\n"); + return 1; + } + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + aen = *(unsigned short *)(param->data); + dprintk(KERN_NOTICE "3w-xxxx: tw_aen_complete(): Queue'd code 0x%x\n", aen); + + /* Print some useful info when certain aen codes come out */ + if (aen == 0x0ff) { + printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: INFO: AEN queue overflow.\n", tw_dev->host->host_no); + } else { + table_max = ARRAY_SIZE(tw_aen_string); + if ((aen & 0x0ff) < table_max) { + if ((tw_aen_string[aen & 0xff][strlen(tw_aen_string[aen & 0xff])-1]) == '#') { + printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s%d.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff], aen >> 8); + } else { + if (aen != 0x0) + printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff]); + } + } else { + printk(KERN_WARNING "3w-xxxx: scsi%d: Received AEN %d.\n", tw_dev->host->host_no, aen); + } + } + if (aen != TW_AEN_QUEUE_EMPTY) { + tw_dev->aen_count++; + + /* Now queue the code */ + tw_dev->aen_queue[tw_dev->aen_tail] = aen; + if (tw_dev->aen_tail == TW_Q_LENGTH - 1) { + tw_dev->aen_tail = TW_Q_START; + } else { + tw_dev->aen_tail = tw_dev->aen_tail + 1; + } + if (tw_dev->aen_head == tw_dev->aen_tail) { + if (tw_dev->aen_head == TW_Q_LENGTH - 1) { + tw_dev->aen_head = TW_Q_START; + } else { + tw_dev->aen_head = tw_dev->aen_head + 1; + } + } + + error = tw_aen_read_queue(tw_dev, request_id); + if (error) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Error completing AEN.\n", tw_dev->host->host_no); + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + } + } else { + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + } + + return 0; +} /* End tw_aen_complete() */ + +/* This function will drain the aen queue after a soft reset */ +static int tw_aen_drain_queue(TW_Device_Extension *tw_dev) +{ + TW_Command *command_packet; + TW_Param *param; + int request_id = 0; + unsigned long command_que_value; + unsigned long param_value; + TW_Response_Queue response_queue; + unsigned short aen; + unsigned short aen_code; + int finished = 0; + int first_reset = 0; + int queue = 0; + int found = 0, table_max = 0; + + dprintk(KERN_NOTICE "3w-xxxx: tw_aen_drain_queue()\n"); + + if (tw_poll_status(tw_dev, TW_STATUS_ATTENTION_INTERRUPT | TW_STATUS_MICROCONTROLLER_READY, 30)) { + dprintk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): No attention interrupt for card %d.\n", tw_device_extension_count); + return 1; + } + TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); + + /* Empty response queue */ + tw_empty_response_que(tw_dev); + + /* Initialize command packet */ + if (tw_dev->command_packet_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad command packet virtual address.\n"); + return 1; + } + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = 4; + command_packet->request_id = request_id; + command_packet->status = 0; + command_packet->flags = 0; + command_packet->byte6.parameter_count = 1; + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad command packet physical address.\n"); + return 1; + } + + /* Now setup the param */ + if (tw_dev->alignment_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad alignment virtual address.\n"); + return 1; + } + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + memset(param, 0, sizeof(TW_Sector)); + param->table_id = 0x401; /* AEN table */ + param->parameter_id = 2; /* Unit code */ + param->parameter_size_bytes = 2; + param_value = tw_dev->alignment_physical_address[request_id]; + if (param_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad alignment physical address.\n"); + return 1; + } + command_packet->byte8.param.sgl[0].address = param_value; + command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); + + /* Now drain the controller's aen queue */ + do { + /* Post command packet */ + outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); + + /* Now poll for completion */ + if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) { + response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + request_id = TW_RESID_OUT(response_queue.response_id); + + if (request_id != 0) { + /* Unexpected request id */ + printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Unexpected request id.\n"); + return 1; + } + + if (command_packet->status != 0) { + if (command_packet->flags != TW_AEN_TABLE_UNDEFINED) { + /* Bad response */ + tw_decode_sense(tw_dev, request_id, 0); + return 1; + } else { + /* We know this is a 3w-1x00, and doesn't support aen's */ + return 0; + } + } + + /* Now check the aen */ + aen = *(unsigned short *)(param->data); + aen_code = (aen & 0x0ff); + queue = 0; + switch (aen_code) { + case TW_AEN_QUEUE_EMPTY: + dprintk(KERN_WARNING "3w-xxxx: AEN: %s.\n", tw_aen_string[aen & 0xff]); + if (first_reset != 1) { + return 1; + } else { + finished = 1; + } + break; + case TW_AEN_SOFT_RESET: + if (first_reset == 0) { + first_reset = 1; + } else { + printk(KERN_WARNING "3w-xxxx: AEN: %s.\n", tw_aen_string[aen & 0xff]); + tw_dev->aen_count++; + queue = 1; + } + break; + default: + if (aen == 0x0ff) { + printk(KERN_WARNING "3w-xxxx: AEN: INFO: AEN queue overflow.\n"); + } else { + table_max = ARRAY_SIZE(tw_aen_string); + if ((aen & 0x0ff) < table_max) { + if ((tw_aen_string[aen & 0xff][strlen(tw_aen_string[aen & 0xff])-1]) == '#') { + printk(KERN_WARNING "3w-xxxx: AEN: %s%d.\n", tw_aen_string[aen & 0xff], aen >> 8); + } else { + printk(KERN_WARNING "3w-xxxx: AEN: %s.\n", tw_aen_string[aen & 0xff]); + } + } else + printk(KERN_WARNING "3w-xxxx: Received AEN %d.\n", aen); + } + tw_dev->aen_count++; + queue = 1; + } + + /* Now put the aen on the aen_queue */ + if (queue == 1) { + tw_dev->aen_queue[tw_dev->aen_tail] = aen; + if (tw_dev->aen_tail == TW_Q_LENGTH - 1) { + tw_dev->aen_tail = TW_Q_START; + } else { + tw_dev->aen_tail = tw_dev->aen_tail + 1; + } + if (tw_dev->aen_head == tw_dev->aen_tail) { + if (tw_dev->aen_head == TW_Q_LENGTH - 1) { + tw_dev->aen_head = TW_Q_START; + } else { + tw_dev->aen_head = tw_dev->aen_head + 1; + } + } + } + found = 1; + } + if (found == 0) { + printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Response never received.\n"); + return 1; + } + } while (finished == 0); + + return 0; +} /* End tw_aen_drain_queue() */ + +/* This function will allocate memory */ +static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) +{ + int i; + dma_addr_t dma_handle; + unsigned long *cpu_addr = NULL; + + dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n"); + + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, + size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); + if (cpu_addr == NULL) { + printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n"); + return 1; + } + + if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) { + printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n"); + dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH, + cpu_addr, dma_handle); + return 1; + } + + memset(cpu_addr, 0, size*TW_Q_LENGTH); + + for (i=0;icommand_packet_physical_address[i] = dma_handle+(i*size); + tw_dev->command_packet_virtual_address[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); + break; + case 1: + tw_dev->alignment_physical_address[i] = dma_handle+(i*size); + tw_dev->alignment_virtual_address[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); + break; + default: + printk(KERN_WARNING "3w-xxxx: tw_allocate_memory(): case slip in tw_allocate_memory()\n"); + return 1; + } + } + + return 0; +} /* End tw_allocate_memory() */ + +/* This function handles ioctl for the character device */ +static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int request_id; + dma_addr_t dma_handle; + unsigned short tw_aen_code; + unsigned long flags; + unsigned int data_buffer_length = 0; + unsigned long data_buffer_length_adjusted = 0; + struct inode *inode = file_inode(file); + unsigned long *cpu_addr; + long timeout; + TW_New_Ioctl *tw_ioctl; + TW_Passthru *passthru; + TW_Device_Extension *tw_dev = tw_device_extension_list[iminor(inode)]; + int retval = -EFAULT; + void __user *argp = (void __user *)arg; + + dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n"); + + mutex_lock(&tw_mutex); + /* Only let one of these through at a time */ + if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { + mutex_unlock(&tw_mutex); + return -EINTR; + } + + /* First copy down the buffer length */ + if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int))) + goto out; + + /* Check size */ + if (data_buffer_length > TW_MAX_IOCTL_SECTORS * 512) { + retval = -EINVAL; + goto out; + } + + /* Hardware can only do multiple of 512 byte transfers */ + data_buffer_length_adjusted = (data_buffer_length + 511) & ~511; + + /* Now allocate ioctl buf memory */ + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), &dma_handle, GFP_KERNEL); + if (cpu_addr == NULL) { + retval = -ENOMEM; + goto out; + } + + tw_ioctl = (TW_New_Ioctl *)cpu_addr; + + /* Now copy down the entire ioctl */ + if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl))) + goto out2; + + passthru = (TW_Passthru *)&tw_ioctl->firmware_command; + + /* See which ioctl we are doing */ + switch (cmd) { + case TW_OP_NOP: + dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_OP_NOP.\n"); + break; + case TW_OP_AEN_LISTEN: + dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_AEN_LISTEN.\n"); + memset(tw_ioctl->data_buffer, 0, data_buffer_length); + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + if (tw_dev->aen_head == tw_dev->aen_tail) { + tw_aen_code = TW_AEN_QUEUE_EMPTY; + } else { + tw_aen_code = tw_dev->aen_queue[tw_dev->aen_head]; + if (tw_dev->aen_head == TW_Q_LENGTH - 1) { + tw_dev->aen_head = TW_Q_START; + } else { + tw_dev->aen_head = tw_dev->aen_head + 1; + } + } + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + memcpy(tw_ioctl->data_buffer, &tw_aen_code, sizeof(tw_aen_code)); + break; + case TW_CMD_PACKET_WITH_DATA: + dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_CMD_PACKET_WITH_DATA.\n"); + spin_lock_irqsave(tw_dev->host->host_lock, flags); + + tw_state_request_start(tw_dev, &request_id); + + /* Flag internal command */ + tw_dev->srb[request_id] = NULL; + + /* Flag chrdev ioctl */ + tw_dev->chrdev_request_id = request_id; + + tw_ioctl->firmware_command.request_id = request_id; + + /* Load the sg list */ + switch (TW_SGL_OUT(tw_ioctl->firmware_command.opcode__sgloffset)) { + case 2: + tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl); + tw_ioctl->firmware_command.byte8.param.sgl[0].length = data_buffer_length_adjusted; + break; + case 3: + tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl); + tw_ioctl->firmware_command.byte8.io.sgl[0].length = data_buffer_length_adjusted; + break; + case 5: + passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl); + passthru->sg_list[0].length = data_buffer_length_adjusted; + break; + } + + memcpy(tw_dev->command_packet_virtual_address[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command)); + + /* Now post the command packet to the controller */ + tw_post_command_packet(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; + + /* Now wait for the command to complete */ + timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); + + /* We timed out, and didn't get an interrupt */ + if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { + /* Now we need to reset the board */ + printk(KERN_WARNING "3w-xxxx: scsi%d: Character ioctl (0x%x) timed out, resetting card.\n", tw_dev->host->host_no, cmd); + retval = -EIO; + if (tw_reset_device_extension(tw_dev)) { + printk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): Reset failed for card %d.\n", tw_dev->host->host_no); + } + goto out2; + } + + /* Now copy in the command packet response */ + memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virtual_address[request_id], sizeof(TW_Command)); + + /* Now complete the io */ + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + break; + default: + retval = -ENOTTY; + goto out2; + } + + /* Now copy the response to userspace */ + if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length)) + goto out2; + retval = 0; +out2: + /* Now free ioctl buf memory */ + dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), cpu_addr, dma_handle); +out: + mutex_unlock(&tw_dev->ioctl_lock); + mutex_unlock(&tw_mutex); + return retval; +} /* End tw_chrdev_ioctl() */ + +/* This function handles open for the character device */ +/* NOTE that this function races with remove. */ +static int tw_chrdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor_number; + + dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + minor_number = iminor(inode); + if (minor_number >= tw_device_extension_count) + return -ENODEV; + + return 0; +} /* End tw_chrdev_open() */ + +/* File operations struct for character device */ +static const struct file_operations tw_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tw_chrdev_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .open = tw_chrdev_open, + .release = NULL, + .llseek = noop_llseek, +}; + +/* This function will free up device extension resources */ +static void tw_free_device_extension(TW_Device_Extension *tw_dev) +{ + dprintk(KERN_NOTICE "3w-xxxx: tw_free_device_extension()\n"); + + /* Free command packet and generic buffer memory */ + if (tw_dev->command_packet_virtual_address[0]) + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + sizeof(TW_Command) * TW_Q_LENGTH, + tw_dev->command_packet_virtual_address[0], + tw_dev->command_packet_physical_address[0]); + + if (tw_dev->alignment_virtual_address[0]) + dma_free_coherent(&tw_dev->tw_pci_dev->dev, + sizeof(TW_Sector) * TW_Q_LENGTH, + tw_dev->alignment_virtual_address[0], + tw_dev->alignment_physical_address[0]); +} /* End tw_free_device_extension() */ + +/* This function will send an initconnection command to controller */ +static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits) +{ + unsigned long command_que_value; + TW_Command *command_packet; + TW_Response_Queue response_queue; + int request_id = 0; + + dprintk(KERN_NOTICE "3w-xxxx: tw_initconnection()\n"); + + /* Initialize InitConnection command packet */ + if (tw_dev->command_packet_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Bad command packet virtual address.\n"); + return 1; + } + + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(0, TW_OP_INIT_CONNECTION); + command_packet->size = TW_INIT_COMMAND_PACKET_SIZE; + command_packet->request_id = request_id; + command_packet->status = 0x0; + command_packet->flags = 0x0; + command_packet->byte6.message_credits = message_credits; + command_packet->byte8.init_connection.response_queue_pointer = 0x0; + command_que_value = tw_dev->command_packet_physical_address[request_id]; + + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Bad command packet physical address.\n"); + return 1; + } + + /* Send command packet to the board */ + outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); + + /* Poll for completion */ + if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) { + response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + request_id = TW_RESID_OUT(response_queue.response_id); + + if (request_id != 0) { + /* unexpected request id */ + printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Unexpected request id.\n"); + return 1; + } + if (command_packet->status != 0) { + /* bad response */ + tw_decode_sense(tw_dev, request_id, 0); + return 1; + } + } + return 0; +} /* End tw_initconnection() */ + +/* Set a value in the features table */ +static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size, + unsigned char *val) +{ + TW_Param *param; + TW_Command *command_packet; + TW_Response_Queue response_queue; + int request_id = 0; + unsigned long command_que_value; + unsigned long param_value; + + /* Initialize SetParam command packet */ + if (tw_dev->command_packet_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet virtual address.\n"); + return 1; + } + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + memset(command_packet, 0, sizeof(TW_Sector)); + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); + param->table_id = 0x404; /* Features table */ + param->parameter_id = parm; + param->parameter_size_bytes = param_size; + memcpy(param->data, val, param_size); + + param_value = tw_dev->alignment_physical_address[request_id]; + if (param_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad alignment physical address.\n"); + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + tw_dev->srb[request_id]->result = (DID_OK << 16); + scsi_done(tw_dev->srb[request_id]); + } + command_packet->byte8.param.sgl[0].address = param_value; + command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); + + command_packet->size = 4; + command_packet->request_id = request_id; + command_packet->byte6.parameter_count = 1; + + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n"); + return 1; + } + + /* Send command packet to the board */ + outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); + + /* Poll for completion */ + if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) { + response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + request_id = TW_RESID_OUT(response_queue.response_id); + + if (request_id != 0) { + /* unexpected request id */ + printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Unexpected request id.\n"); + return 1; + } + if (command_packet->status != 0) { + /* bad response */ + tw_decode_sense(tw_dev, request_id, 0); + return 1; + } + } + + return 0; +} /* End tw_setfeature() */ + +/* This function will reset a controller */ +static int tw_reset_sequence(TW_Device_Extension *tw_dev) +{ + int error = 0; + int tries = 0; + unsigned char c = 1; + + /* Reset the board */ + while (tries < TW_MAX_RESET_TRIES) { + TW_SOFT_RESET(tw_dev); + + error = tw_aen_drain_queue(tw_dev); + if (error) { + printk(KERN_WARNING "3w-xxxx: scsi%d: AEN drain failed, retrying.\n", tw_dev->host->host_no); + tries++; + continue; + } + + /* Check for controller errors */ + if (tw_check_errors(tw_dev)) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Controller errors found, retrying.\n", tw_dev->host->host_no); + tries++; + continue; + } + + /* Now the controller is in a good state */ + break; + } + + if (tries >= TW_MAX_RESET_TRIES) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Controller errors, card not responding, check all cabling.\n", tw_dev->host->host_no); + return 1; + } + + error = tw_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS); + if (error) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Connection initialization failed.\n", tw_dev->host->host_no); + return 1; + } + + error = tw_setfeature(tw_dev, 2, 1, &c); + if (error) { + printk(KERN_WARNING "3w-xxxx: Unable to set features for card, probable old firmware or card.\n"); + } + + return 0; +} /* End tw_reset_sequence() */ + +/* This function will initialize the fields of a device extension */ +static int tw_initialize_device_extension(TW_Device_Extension *tw_dev) +{ + int i, error=0; + + dprintk(KERN_NOTICE "3w-xxxx: tw_initialize_device_extension()\n"); + + /* Initialize command packet buffers */ + error = tw_allocate_memory(tw_dev, sizeof(TW_Command), 0); + if (error) { + printk(KERN_WARNING "3w-xxxx: Command packet memory allocation failed.\n"); + return 1; + } + + /* Initialize generic buffer */ + error = tw_allocate_memory(tw_dev, sizeof(TW_Sector), 1); + if (error) { + printk(KERN_WARNING "3w-xxxx: Generic memory allocation failed.\n"); + return 1; + } + + for (i=0;ifree_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + mutex_init(&tw_dev->ioctl_lock); + init_waitqueue_head(&tw_dev->ioctl_wqueue); + + return 0; +} /* End tw_initialize_device_extension() */ + +/* This function will reset a device extension */ +static int tw_reset_device_extension(TW_Device_Extension *tw_dev) +{ + int i = 0; + struct scsi_cmnd *srb; + unsigned long flags = 0; + + dprintk(KERN_NOTICE "3w-xxxx: tw_reset_device_extension()\n"); + + set_bit(TW_IN_RESET, &tw_dev->flags); + TW_DISABLE_INTERRUPTS(tw_dev); + TW_MASK_COMMAND_INTERRUPT(tw_dev); + spin_lock_irqsave(tw_dev->host->host_lock, flags); + + /* Abort all requests that are in progress */ + for (i=0;istate[i] != TW_S_FINISHED) && + (tw_dev->state[i] != TW_S_INITIAL) && + (tw_dev->state[i] != TW_S_COMPLETED)) { + srb = tw_dev->srb[i]; + if (srb != NULL) { + srb->result = (DID_RESET << 16); + scsi_dma_unmap(srb); + scsi_done(srb); + } + } + } + + /* Reset queues and counts */ + for (i=0;ifree_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->posted_request_count = 0; + tw_dev->pending_request_count = 0; + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->reset_print = 0; + + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + if (tw_reset_sequence(tw_dev)) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Reset sequence failed.\n", tw_dev->host->host_no); + return 1; + } + + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + clear_bit(TW_IN_RESET, &tw_dev->flags); + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + return 0; +} /* End tw_reset_device_extension() */ + +/* This funciton returns unit geometry in cylinders/heads/sectors */ +static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + int heads, sectors, cylinders; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n"); + + heads = 64; + sectors = 32; + cylinders = sector_div(capacity, heads * sectors); + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = sector_div(capacity, heads * sectors); + } + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam(): heads = %d, sectors = %d, cylinders = %d\n", heads, sectors, cylinders); + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} /* End tw_scsi_biosparam() */ + +/* This is the new scsi eh reset function */ +static int tw_scsi_eh_reset(struct scsi_cmnd *SCpnt) +{ + TW_Device_Extension *tw_dev=NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + tw_dev->num_resets++; + + sdev_printk(KERN_WARNING, SCpnt->device, + "WARNING: Command (0x%x) timed out, resetting card.\n", + SCpnt->cmnd[0]); + + /* Make sure we are not issuing an ioctl or resetting from ioctl */ + mutex_lock(&tw_dev->ioctl_lock); + + /* Now reset the card and some of the device extension data */ + if (tw_reset_device_extension(tw_dev)) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Reset failed.\n", tw_dev->host->host_no); + goto out; + } + + retval = SUCCESS; +out: + mutex_unlock(&tw_dev->ioctl_lock); + return retval; +} /* End tw_scsi_eh_reset() */ + +/* This function handles scsi inquiry commands */ +static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Param *param; + TW_Command *command_packet; + unsigned long command_que_value; + unsigned long param_value; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry()\n"); + + /* Initialize command packet */ + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + if (command_packet == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad command packet virtual address.\n"); + return 1; + } + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = 4; + command_packet->request_id = request_id; + command_packet->status = 0; + command_packet->flags = 0; + command_packet->byte6.parameter_count = 1; + + /* Now setup the param */ + if (tw_dev->alignment_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad alignment virtual address.\n"); + return 1; + } + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + memset(param, 0, sizeof(TW_Sector)); + param->table_id = 3; /* unit summary table */ + param->parameter_id = 3; /* unitsstatus parameter */ + param->parameter_size_bytes = TW_MAX_UNITS; + param_value = tw_dev->alignment_physical_address[request_id]; + if (param_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad alignment physical address.\n"); + return 1; + } + + command_packet->byte8.param.sgl[0].address = param_value; + command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad command packet physical address.\n"); + return 1; + } + + /* Now try to post the command packet */ + tw_post_command_packet(tw_dev, request_id); + + return 0; +} /* End tw_scsiop_inquiry() */ + +static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id, + void *data, unsigned int len) +{ + scsi_sg_copy_from_buffer(tw_dev->srb[request_id], data, len); +} + +/* This function is called by the isr to complete an inquiry command */ +static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id) +{ + unsigned char *is_unit_present; + unsigned char request_buffer[36]; + TW_Param *param; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n"); + + memset(request_buffer, 0, sizeof(request_buffer)); + request_buffer[0] = TYPE_DISK; /* Peripheral device type */ + request_buffer[1] = 0; /* Device type modifier */ + request_buffer[2] = 0; /* No ansi/iso compliance */ + request_buffer[4] = 31; /* Additional length */ + memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */ + sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id); + memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3); + tw_transfer_internal(tw_dev, request_id, request_buffer, + sizeof(request_buffer)); + + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + if (param == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry_complete(): Bad alignment virtual address.\n"); + return 1; + } + is_unit_present = &(param->data[0]); + + if (is_unit_present[tw_dev->srb[request_id]->device->id] & TW_UNIT_ONLINE) { + tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 1; + } else { + tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 0; + tw_dev->srb[request_id]->result = (DID_BAD_TARGET << 16); + return TW_ISR_DONT_RESULT; + } + + return 0; +} /* End tw_scsiop_inquiry_complete() */ + +/* This function handles scsi mode_sense commands */ +static int tw_scsiop_mode_sense(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Param *param; + TW_Command *command_packet; + unsigned long command_que_value; + unsigned long param_value; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense()\n"); + + /* Only page control = 0, page code = 0x8 (cache page) supported */ + if (tw_dev->srb[request_id]->cmnd[2] != 0x8) { + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + tw_dev->srb[request_id]->result = (DID_OK << 16); + scsi_done(tw_dev->srb[request_id]); + return 0; + } + + /* Now read firmware cache setting for this unit */ + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + if (command_packet == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad command packet virtual address.\n"); + return 1; + } + + /* Setup the command packet */ + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = 4; + command_packet->request_id = request_id; + command_packet->status = 0; + command_packet->flags = 0; + command_packet->byte6.parameter_count = 1; + + /* Setup the param */ + if (tw_dev->alignment_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad alignment virtual address.\n"); + return 1; + } + + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + memset(param, 0, sizeof(TW_Sector)); + param->table_id = TW_UNIT_INFORMATION_TABLE_BASE + tw_dev->srb[request_id]->device->id; + param->parameter_id = 7; /* unit flags */ + param->parameter_size_bytes = 1; + param_value = tw_dev->alignment_physical_address[request_id]; + if (param_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad alignment physical address.\n"); + return 1; + } + + command_packet->byte8.param.sgl[0].address = param_value; + command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad command packet physical address.\n"); + return 1; + } + + /* Now try to post the command packet */ + tw_post_command_packet(tw_dev, request_id); + + return 0; +} /* End tw_scsiop_mode_sense() */ + +/* This function is called by the isr to complete a mode sense command */ +static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Param *param; + unsigned char *flags; + unsigned char request_buffer[8]; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n"); + + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + if (param == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense_complete(): Bad alignment virtual address.\n"); + return 1; + } + flags = (char *)&(param->data[0]); + memset(request_buffer, 0, sizeof(request_buffer)); + + request_buffer[0] = 0xf; /* mode data length */ + request_buffer[1] = 0; /* default medium type */ + request_buffer[2] = 0x10; /* dpo/fua support on */ + request_buffer[3] = 0; /* no block descriptors */ + request_buffer[4] = 0x8; /* caching page */ + request_buffer[5] = 0xa; /* page length */ + if (*flags & 0x1) + request_buffer[6] = 0x5; /* WCE on, RCD on */ + else + request_buffer[6] = 0x1; /* WCE off, RCD on */ + tw_transfer_internal(tw_dev, request_id, request_buffer, + sizeof(request_buffer)); + + return 0; +} /* End tw_scsiop_mode_sense_complete() */ + +/* This function handles scsi read_capacity commands */ +static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Param *param; + TW_Command *command_packet; + unsigned long command_que_value; + unsigned long param_value; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity()\n"); + + /* Initialize command packet */ + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + + if (command_packet == NULL) { + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad command packet virtual address.\n"); + return 1; + } + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = 4; + command_packet->request_id = request_id; + command_packet->unit__hostid = TW_UNITHOST_IN(0, tw_dev->srb[request_id]->device->id); + command_packet->status = 0; + command_packet->flags = 0; + command_packet->byte6.block_count = 1; + + /* Now setup the param */ + if (tw_dev->alignment_virtual_address[request_id] == NULL) { + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad alignment virtual address.\n"); + return 1; + } + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + memset(param, 0, sizeof(TW_Sector)); + param->table_id = TW_UNIT_INFORMATION_TABLE_BASE + + tw_dev->srb[request_id]->device->id; + param->parameter_id = 4; /* unitcapacity parameter */ + param->parameter_size_bytes = 4; + param_value = tw_dev->alignment_physical_address[request_id]; + if (param_value == 0) { + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad alignment physical address.\n"); + return 1; + } + + command_packet->byte8.param.sgl[0].address = param_value; + command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad command packet physical address.\n"); + return 1; + } + + /* Now try to post the command to the board */ + tw_post_command_packet(tw_dev, request_id); + + return 0; +} /* End tw_scsiop_read_capacity() */ + +/* This function is called by the isr to complete a readcapacity command */ +static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int request_id) +{ + unsigned char *param_data; + u32 capacity; + char buff[8]; + TW_Param *param; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n"); + + memset(buff, 0, sizeof(buff)); + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + if (param == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n"); + return 1; + } + param_data = &(param->data[0]); + + capacity = (param_data[3] << 24) | (param_data[2] << 16) | + (param_data[1] << 8) | param_data[0]; + + /* Subtract one sector to fix get last sector ioctl */ + capacity -= 1; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete(): Capacity = 0x%x.\n", capacity); + + /* Number of LBA's */ + buff[0] = (capacity >> 24); + buff[1] = (capacity >> 16) & 0xff; + buff[2] = (capacity >> 8) & 0xff; + buff[3] = capacity & 0xff; + + /* Block size in bytes (512) */ + buff[4] = (TW_BLOCK_SIZE >> 24); + buff[5] = (TW_BLOCK_SIZE >> 16) & 0xff; + buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff; + buff[7] = TW_BLOCK_SIZE & 0xff; + + tw_transfer_internal(tw_dev, request_id, buff, sizeof(buff)); + + return 0; +} /* End tw_scsiop_read_capacity_complete() */ + +/* This function handles scsi read or write commands */ +static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command *command_packet; + unsigned long command_que_value; + u32 lba = 0x0, num_sectors = 0x0; + int i, use_sg; + struct scsi_cmnd *srb; + struct scatterlist *sglist, *sg; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n"); + + srb = tw_dev->srb[request_id]; + + sglist = scsi_sglist(srb); + if (!sglist) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n"); + return 1; + } + + /* Initialize command packet */ + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + if (command_packet == NULL) { + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): Bad command packet virtual address.\n"); + return 1; + } + + if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == READ_10) { + command_packet->opcode__sgloffset = TW_OPSGL_IN(3, TW_OP_READ); + } else { + command_packet->opcode__sgloffset = TW_OPSGL_IN(3, TW_OP_WRITE); + } + + command_packet->size = 3; + command_packet->request_id = request_id; + command_packet->unit__hostid = TW_UNITHOST_IN(0, srb->device->id); + command_packet->status = 0; + command_packet->flags = 0; + + if (srb->cmnd[0] == WRITE_10) { + if ((srb->cmnd[1] & 0x8) || (srb->cmnd[1] & 0x10)) + command_packet->flags = 1; + } + + if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) { + lba = ((u32)srb->cmnd[1] << 16) | ((u32)srb->cmnd[2] << 8) | (u32)srb->cmnd[3]; + num_sectors = (u32)srb->cmnd[4]; + } else { + lba = ((u32)srb->cmnd[2] << 24) | ((u32)srb->cmnd[3] << 16) | ((u32)srb->cmnd[4] << 8) | (u32)srb->cmnd[5]; + num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); + } + + /* Update sector statistic */ + tw_dev->sector_count = num_sectors; + if (tw_dev->sector_count > tw_dev->max_sector_count) + tw_dev->max_sector_count = tw_dev->sector_count; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): lba = 0x%x num_sectors = 0x%x\n", lba, num_sectors); + command_packet->byte8.io.lba = lba; + command_packet->byte6.block_count = num_sectors; + + use_sg = scsi_dma_map(srb); + if (use_sg <= 0) + return 1; + + scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { + command_packet->byte8.io.sgl[i].address = sg_dma_address(sg); + command_packet->byte8.io.sgl[i].length = sg_dma_len(sg); + command_packet->size+=2; + } + + /* Update SG statistics */ + tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]); + if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) + tw_dev->max_sgl_entries = tw_dev->sgl_entries; + + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + dprintk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Bad command packet physical address.\n"); + return 1; + } + + /* Now try to post the command to the board */ + tw_post_command_packet(tw_dev, request_id); + + return 0; +} /* End tw_scsiop_read_write() */ + +/* This function will handle the request sense scsi command */ +static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id) +{ + char request_buffer[18]; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_request_sense()\n"); + + memset(request_buffer, 0, sizeof(request_buffer)); + request_buffer[0] = 0x70; /* Immediate fixed format */ + request_buffer[7] = 10; /* minimum size per SPC: 18 bytes */ + /* leave all other fields zero, giving effectively NO_SENSE return */ + tw_transfer_internal(tw_dev, request_id, request_buffer, + sizeof(request_buffer)); + + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + + /* If we got a request_sense, we probably want a reset, return error */ + tw_dev->srb[request_id]->result = (DID_ERROR << 16); + scsi_done(tw_dev->srb[request_id]); + + return 0; +} /* End tw_scsiop_request_sense() */ + +/* This function will handle synchronize cache scsi command */ +static int tw_scsiop_synchronize_cache(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command *command_packet; + unsigned long command_que_value; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_synchronize_cache()\n"); + + /* Send firmware flush command for this unit */ + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + if (command_packet == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_synchronize_cache(): Bad command packet virtual address.\n"); + return 1; + } + + /* Setup the command packet */ + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(0, TW_OP_FLUSH_CACHE); + command_packet->size = 2; + command_packet->request_id = request_id; + command_packet->unit__hostid = TW_UNITHOST_IN(0, tw_dev->srb[request_id]->device->id); + command_packet->status = 0; + command_packet->flags = 0; + command_packet->byte6.parameter_count = 1; + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_synchronize_cache(): Bad command packet physical address.\n"); + return 1; + } + + /* Now try to post the command packet */ + tw_post_command_packet(tw_dev, request_id); + + return 0; +} /* End tw_scsiop_synchronize_cache() */ + +/* This function will handle test unit ready scsi command */ +static int tw_scsiop_test_unit_ready(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Param *param; + TW_Command *command_packet; + unsigned long command_que_value; + unsigned long param_value; + + dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_test_unit_ready()\n"); + + /* Initialize command packet */ + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + if (command_packet == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad command packet virtual address.\n"); + return 1; + } + memset(command_packet, 0, sizeof(TW_Sector)); + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = 4; + command_packet->request_id = request_id; + command_packet->status = 0; + command_packet->flags = 0; + command_packet->byte6.parameter_count = 1; + + /* Now setup the param */ + if (tw_dev->alignment_virtual_address[request_id] == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad alignment virtual address.\n"); + return 1; + } + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + memset(param, 0, sizeof(TW_Sector)); + param->table_id = 3; /* unit summary table */ + param->parameter_id = 3; /* unitsstatus parameter */ + param->parameter_size_bytes = TW_MAX_UNITS; + param_value = tw_dev->alignment_physical_address[request_id]; + if (param_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad alignment physical address.\n"); + return 1; + } + + command_packet->byte8.param.sgl[0].address = param_value; + command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); + command_que_value = tw_dev->command_packet_physical_address[request_id]; + if (command_que_value == 0) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad command packet physical address.\n"); + return 1; + } + + /* Now try to post the command packet */ + tw_post_command_packet(tw_dev, request_id); + + return 0; +} /* End tw_scsiop_test_unit_ready() */ + +/* This function is called by the isr to complete a testunitready command */ +static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int request_id) +{ + unsigned char *is_unit_present; + TW_Param *param; + + dprintk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready_complete()\n"); + + param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; + if (param == NULL) { + printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready_complete(): Bad alignment virtual address.\n"); + return 1; + } + is_unit_present = &(param->data[0]); + + if (is_unit_present[tw_dev->srb[request_id]->device->id] & TW_UNIT_ONLINE) { + tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 1; + } else { + tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 0; + tw_dev->srb[request_id]->result = (DID_BAD_TARGET << 16); + return TW_ISR_DONT_RESULT; + } + + return 0; +} /* End tw_scsiop_test_unit_ready_complete() */ + +/* This is the main scsi queue function to handle scsi opcodes */ +static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + unsigned char *command = SCpnt->cmnd; + int request_id = 0; + int retval = 1; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + /* If we are resetting due to timed out ioctl, report as busy */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) + return SCSI_MLQUEUE_HOST_BUSY; + + /* Queue the command and get a request id */ + tw_state_request_start(tw_dev, &request_id); + + /* Save the scsi command for use by the ISR */ + tw_dev->srb[request_id] = SCpnt; + + switch (*command) { + case READ_10: + case READ_6: + case WRITE_10: + case WRITE_6: + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ/WRITE.\n"); + retval = tw_scsiop_read_write(tw_dev, request_id); + break; + case TEST_UNIT_READY: + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught TEST_UNIT_READY.\n"); + retval = tw_scsiop_test_unit_ready(tw_dev, request_id); + break; + case INQUIRY: + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught INQUIRY.\n"); + retval = tw_scsiop_inquiry(tw_dev, request_id); + break; + case READ_CAPACITY: + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ_CAPACITY.\n"); + retval = tw_scsiop_read_capacity(tw_dev, request_id); + break; + case REQUEST_SENSE: + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught REQUEST_SENSE.\n"); + retval = tw_scsiop_request_sense(tw_dev, request_id); + break; + case MODE_SENSE: + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught MODE_SENSE.\n"); + retval = tw_scsiop_mode_sense(tw_dev, request_id); + break; + case SYNCHRONIZE_CACHE: + dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught SYNCHRONIZE_CACHE.\n"); + retval = tw_scsiop_synchronize_cache(tw_dev, request_id); + break; + case TW_IOCTL: + printk(KERN_WARNING "3w-xxxx: SCSI_IOCTL_SEND_COMMAND deprecated, please update your 3ware tools.\n"); + break; + default: + printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command); + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + scsi_build_sense(SCpnt, 1, ILLEGAL_REQUEST, 0x20, 0); + done(SCpnt); + retval = 0; + } + if (retval) { + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + SCpnt->result = (DID_ERROR << 16); + done(SCpnt); + retval = 0; + } + return retval; +} /* End tw_scsi_queue() */ + +static DEF_SCSI_QCMD(tw_scsi_queue) + +/* This function is the interrupt service routine */ +static irqreturn_t tw_interrupt(int irq, void *dev_instance) +{ + int request_id; + u32 status_reg_value; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; + TW_Response_Queue response_que; + int error = 0, retval = 0; + TW_Command *command_packet; + int handled = 0; + + /* Get the host lock for io completions */ + spin_lock(tw_dev->host->host_lock); + + /* Read the registers */ + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + + /* Check if this is our interrupt, otherwise bail */ + if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) + goto tw_interrupt_bail; + + handled = 1; + + /* If we are resetting, bail */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) + goto tw_interrupt_bail; + + /* Check controller for errors */ + if (tw_check_bits(status_reg_value)) { + dprintk(KERN_WARNING "3w-xxxx: tw_interrupt(): Unexpected bits.\n"); + if (tw_decode_bits(tw_dev, status_reg_value, 1)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto tw_interrupt_bail; + } + } + + /* Handle host interrupt */ + if (status_reg_value & TW_STATUS_HOST_INTERRUPT) { + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): Received host interrupt.\n"); + TW_CLEAR_HOST_INTERRUPT(tw_dev); + } + + /* Handle attention interrupt */ + if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): Received attention interrupt.\n"); + TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); + tw_state_request_start(tw_dev, &request_id); + error = tw_aen_read_queue(tw_dev, request_id); + if (error) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Error reading aen queue.\n", tw_dev->host->host_no); + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + } + } + + /* Handle command interrupt */ + if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { + /* Drain as many pending commands as we can */ + while (tw_dev->pending_request_count > 0) { + request_id = tw_dev->pending_queue[tw_dev->pending_head]; + if (tw_dev->state[request_id] != TW_S_PENDING) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Found request id that wasn't pending.\n", tw_dev->host->host_no); + break; + } + if (tw_post_command_packet(tw_dev, request_id)==0) { + if (tw_dev->pending_head == TW_Q_LENGTH-1) { + tw_dev->pending_head = TW_Q_START; + } else { + tw_dev->pending_head = tw_dev->pending_head + 1; + } + tw_dev->pending_request_count--; + } else { + /* If we get here, we will continue re-posting on the next command interrupt */ + break; + } + } + /* If there are no more pending requests, we mask command interrupt */ + if (tw_dev->pending_request_count == 0) + TW_MASK_COMMAND_INTERRUPT(tw_dev); + } + + /* Handle response interrupt */ + if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { + /* Drain the response queue from the board */ + while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { + /* Read response queue register */ + response_que.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + request_id = TW_RESID_OUT(response_que.response_id); + command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; + error = 0; + + /* Check for bad response */ + if (command_packet->status != 0) { + /* If internal command, don't error, don't fill sense */ + if (tw_dev->srb[request_id] == NULL) { + tw_decode_sense(tw_dev, request_id, 0); + } else { + error = tw_decode_sense(tw_dev, request_id, 1); + } + } + + /* Check for correct state */ + if (tw_dev->state[request_id] != TW_S_POSTED) { + if (tw_dev->srb[request_id] != NULL) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Received a request id that wasn't posted.\n", tw_dev->host->host_no); + error = 1; + } + } + + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): Response queue request id: %d.\n", request_id); + + /* Check for internal command completion */ + if (tw_dev->srb[request_id] == NULL) { + dprintk(KERN_WARNING "3w-xxxx: tw_interrupt(): Found internally posted command.\n"); + /* Check for chrdev ioctl completion */ + if (request_id != tw_dev->chrdev_request_id) { + retval = tw_aen_complete(tw_dev, request_id); + if (retval) { + printk(KERN_WARNING "3w-xxxx: scsi%d: Error completing aen.\n", tw_dev->host->host_no); + } + } else { + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + wake_up(&tw_dev->ioctl_wqueue); + } + } else { + switch (tw_dev->srb[request_id]->cmnd[0]) { + case READ_10: + case READ_6: + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught READ_10/READ_6\n"); + break; + case WRITE_10: + case WRITE_6: + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught WRITE_10/WRITE_6\n"); + break; + case TEST_UNIT_READY: + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught TEST_UNIT_READY\n"); + error = tw_scsiop_test_unit_ready_complete(tw_dev, request_id); + break; + case INQUIRY: + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught INQUIRY\n"); + error = tw_scsiop_inquiry_complete(tw_dev, request_id); + break; + case READ_CAPACITY: + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught READ_CAPACITY\n"); + error = tw_scsiop_read_capacity_complete(tw_dev, request_id); + break; + case MODE_SENSE: + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught MODE_SENSE\n"); + error = tw_scsiop_mode_sense_complete(tw_dev, request_id); + break; + case SYNCHRONIZE_CACHE: + dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught SYNCHRONIZE_CACHE\n"); + break; + default: + printk(KERN_WARNING "3w-xxxx: case slip in tw_interrupt()\n"); + error = 1; + } + + /* If no error command was a success */ + if (error == 0) { + tw_dev->srb[request_id]->result = (DID_OK << 16); + } + + /* If error, command failed */ + if (error == 1) { + /* Ask for a host reset */ + tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; + } + + /* Now complete the io */ + if ((error != TW_ISR_DONT_COMPLETE)) { + scsi_dma_unmap(tw_dev->srb[request_id]); + scsi_done(tw_dev->srb[request_id]); + tw_dev->state[request_id] = TW_S_COMPLETED; + tw_state_request_finish(tw_dev, request_id); + tw_dev->posted_request_count--; + } + } + + /* Check for valid status after each drain */ + status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev)); + if (tw_check_bits(status_reg_value)) { + dprintk(KERN_WARNING "3w-xxxx: tw_interrupt(): Unexpected bits.\n"); + if (tw_decode_bits(tw_dev, status_reg_value, 1)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto tw_interrupt_bail; + } + } + } + } + +tw_interrupt_bail: + spin_unlock(tw_dev->host->host_lock); + return IRQ_RETVAL(handled); +} /* End tw_interrupt() */ + +/* This function tells the controller to shut down */ +static void __tw_shutdown(TW_Device_Extension *tw_dev) +{ + /* Disable interrupts */ + TW_DISABLE_INTERRUPTS(tw_dev); + + /* Free up the IRQ */ + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + printk(KERN_WARNING "3w-xxxx: Shutting down host %d.\n", tw_dev->host->host_no); + + /* Tell the card we are shutting down */ + if (tw_initconnection(tw_dev, 1)) { + printk(KERN_WARNING "3w-xxxx: Connection shutdown failed.\n"); + } else { + printk(KERN_WARNING "3w-xxxx: Shutdown complete.\n"); + } + + /* Clear all interrupts just before exit */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); +} /* End __tw_shutdown() */ + +/* Wrapper for __tw_shutdown */ +static void tw_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + __tw_shutdown(tw_dev); +} /* End tw_shutdown() */ + +/* This function gets called when a disk is coming online */ +static int tw_slave_configure(struct scsi_device *sdev) +{ + /* Force 60 second timeout */ + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + + return 0; +} /* End tw_slave_configure() */ + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "3ware Storage Controller", + .queuecommand = tw_scsi_queue, + .eh_host_reset_handler = tw_scsi_eh_reset, + .bios_param = tw_scsi_biosparam, + .change_queue_depth = scsi_change_queue_depth, + .can_queue = TW_Q_LENGTH-2, + .slave_configure = tw_slave_configure, + .this_id = -1, + .sg_tablesize = TW_MAX_SGL_LENGTH, + .max_sectors = TW_MAX_SECTORS, + .cmd_per_lun = TW_MAX_CMDS_PER_LUN, + .shost_groups = tw_host_groups, + .emulated = 1, + .no_write_same = 1, +}; + +/* This function will probe and initialize a card */ +static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) +{ + struct Scsi_Host *host = NULL; + TW_Device_Extension *tw_dev; + int retval; + + retval = pci_enable_device(pdev); + if (retval) { + printk(KERN_WARNING "3w-xxxx: Failed to enable pci device."); + goto out_disable_device; + } + + pci_set_master(pdev); + + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { + printk(KERN_WARNING "3w-xxxx: Failed to set dma mask."); + goto out_disable_device; + } + + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); + if (!host) { + printk(KERN_WARNING "3w-xxxx: Failed to allocate memory for device extension."); + retval = -ENOMEM; + goto out_disable_device; + } + tw_dev = (TW_Device_Extension *)host->hostdata; + + /* Save values to device extension */ + tw_dev->host = host; + tw_dev->tw_pci_dev = pdev; + + if (tw_initialize_device_extension(tw_dev)) { + printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension."); + retval = -ENOMEM; + goto out_free_device_extension; + } + + /* Request IO regions */ + retval = pci_request_regions(pdev, "3w-xxxx"); + if (retval) { + printk(KERN_WARNING "3w-xxxx: Failed to get mem region."); + goto out_free_device_extension; + } + + /* Save base address */ + tw_dev->base_addr = pci_resource_start(pdev, 0); + if (!tw_dev->base_addr) { + printk(KERN_WARNING "3w-xxxx: Failed to get io address."); + retval = -ENOMEM; + goto out_release_mem_region; + } + + /* Disable interrupts on the card */ + TW_DISABLE_INTERRUPTS(tw_dev); + + /* Initialize the card */ + if (tw_reset_sequence(tw_dev)) { + retval = -EINVAL; + goto out_release_mem_region; + } + + /* Set host specific parameters */ + host->max_id = TW_MAX_UNITS; + host->max_cmd_len = TW_MAX_CDB_LEN; + + /* Luns and channels aren't supported by adapter */ + host->max_lun = 0; + host->max_channel = 0; + + /* Register the card with the kernel SCSI layer */ + retval = scsi_add_host(host, &pdev->dev); + if (retval) { + printk(KERN_WARNING "3w-xxxx: scsi add host failed"); + goto out_release_mem_region; + } + + pci_set_drvdata(pdev, host); + + printk(KERN_WARNING "3w-xxxx: scsi%d: Found a 3ware Storage Controller at 0x%x, IRQ: %d.\n", host->host_no, tw_dev->base_addr, pdev->irq); + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, tw_interrupt, IRQF_SHARED, "3w-xxxx", tw_dev); + if (retval) { + printk(KERN_WARNING "3w-xxxx: Error requesting IRQ."); + goto out_remove_host; + } + + tw_device_extension_list[tw_device_extension_count] = tw_dev; + tw_device_extension_count++; + + /* Re-enable interrupts on the card */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + + /* Finally, scan the host */ + scsi_scan_host(host); + + if (twe_major == -1) { + if ((twe_major = register_chrdev (0, "twe", &tw_fops)) < 0) + printk(KERN_WARNING "3w-xxxx: Failed to register character device."); + } + return 0; + +out_remove_host: + scsi_remove_host(host); +out_release_mem_region: + pci_release_regions(pdev); +out_free_device_extension: + tw_free_device_extension(tw_dev); + scsi_host_put(host); +out_disable_device: + pci_disable_device(pdev); + + return retval; +} /* End tw_probe() */ + +/* This function is called to remove a device */ +static void tw_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + scsi_remove_host(tw_dev->host); + + /* Unregister character device */ + if (twe_major >= 0) { + unregister_chrdev(twe_major, "twe"); + twe_major = -1; + } + + /* Shutdown the card */ + __tw_shutdown(tw_dev); + + /* Free up the mem region */ + pci_release_regions(pdev); + + /* Free up device extension resources */ + tw_free_device_extension(tw_dev); + + scsi_host_put(tw_dev->host); + pci_disable_device(pdev); + tw_device_extension_count--; +} /* End tw_remove() */ + +/* PCI Devices supported by this driver */ +static struct pci_device_id tw_pci_tbl[] = { + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { } +}; +MODULE_DEVICE_TABLE(pci, tw_pci_tbl); + +/* pci_driver initializer */ +static struct pci_driver tw_driver = { + .name = "3w-xxxx", + .id_table = tw_pci_tbl, + .probe = tw_probe, + .remove = tw_remove, + .shutdown = tw_shutdown, +}; + +/* This function is called on driver initialization */ +static int __init tw_init(void) +{ + printk(KERN_WARNING "3ware Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); + + return pci_register_driver(&tw_driver); +} /* End tw_init() */ + +/* This function is called on driver exit */ +static void __exit tw_exit(void) +{ + pci_unregister_driver(&tw_driver); +} /* End tw_exit() */ + +module_init(tw_init); +module_exit(tw_exit); + diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h new file mode 100644 index 000000000..120a087bd --- /dev/null +++ b/drivers/scsi/3w-xxxx.h @@ -0,0 +1,439 @@ +/* + 3w-xxxx.h -- 3ware Storage Controller device driver for Linux. + + Written By: Adam Radford + Modifications By: Joel Jacobson + Arnaldo Carvalho de Melo + Brad Strand + + Copyright (C) 1999-2010 3ware Inc. + + Kernel compatibility By: Andre Hedrick + Non-Copyright (C) 2000 Andre Hedrick + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + + aradford@gmail.com + + For more information, goto: + http://www.lsi.com +*/ + +#ifndef _3W_XXXX_H +#define _3W_XXXX_H + +#include + +/* AEN strings */ +static char *tw_aen_string[] = { + [0x000] = "INFO: AEN queue empty", + [0x001] = "INFO: Soft reset occurred", + [0x002] = "ERROR: Unit degraded: Unit #", + [0x003] = "ERROR: Controller error", + [0x004] = "ERROR: Rebuild failed: Unit #", + [0x005] = "INFO: Rebuild complete: Unit #", + [0x006] = "ERROR: Incomplete unit detected: Unit #", + [0x007] = "INFO: Initialization complete: Unit #", + [0x008] = "WARNING: Unclean shutdown detected: Unit #", + [0x009] = "WARNING: ATA port timeout: Port #", + [0x00A] = "ERROR: Drive error: Port #", + [0x00B] = "INFO: Rebuild started: Unit #", + [0x00C] = "INFO: Initialization started: Unit #", + [0x00D] = "ERROR: Logical unit deleted: Unit #", + [0x00F] = "WARNING: SMART threshold exceeded: Port #", + [0x021] = "WARNING: ATA UDMA downgrade: Port #", + [0x022] = "WARNING: ATA UDMA upgrade: Port #", + [0x023] = "WARNING: Sector repair occurred: Port #", + [0x024] = "ERROR: SBUF integrity check failure", + [0x025] = "ERROR: Lost cached write: Port #", + [0x026] = "ERROR: Drive ECC error detected: Port #", + [0x027] = "ERROR: DCB checksum error: Port #", + [0x028] = "ERROR: DCB unsupported version: Port #", + [0x029] = "INFO: Verify started: Unit #", + [0x02A] = "ERROR: Verify failed: Port #", + [0x02B] = "INFO: Verify complete: Unit #", + [0x02C] = "WARNING: Overwrote bad sector during rebuild: Port #", + [0x02D] = "ERROR: Encountered bad sector during rebuild: Port #", + [0x02E] = "ERROR: Replacement drive is too small: Port #", + [0x02F] = "WARNING: Verify error: Unit not previously initialized: Unit #", + [0x030] = "ERROR: Drive not supported: Port #" +}; + +/* + Sense key lookup table + Format: ESDC/flags,SenseKey,AdditionalSenseCode,AdditionalSenseCodeQualifier +*/ +static unsigned char tw_sense_table[][4] = +{ + /* Codes for newer firmware */ + // ATA Error SCSI Error + {0x01, 0x03, 0x13, 0x00}, // Address mark not found Address mark not found for data field + {0x04, 0x0b, 0x00, 0x00}, // Aborted command Aborted command + {0x10, 0x0b, 0x14, 0x00}, // ID not found Recorded entity not found + {0x40, 0x03, 0x11, 0x00}, // Uncorrectable ECC error Unrecovered read error + {0x61, 0x04, 0x00, 0x00}, // Device fault Hardware error + {0x84, 0x0b, 0x47, 0x00}, // Data CRC error SCSI parity error + {0xd0, 0x0b, 0x00, 0x00}, // Device busy Aborted command + {0xd1, 0x0b, 0x00, 0x00}, // Device busy Aborted command + {0x37, 0x02, 0x04, 0x00}, // Unit offline Not ready + {0x09, 0x02, 0x04, 0x00}, // Unrecovered disk error Not ready + + /* Codes for older firmware */ + // 3ware Error SCSI Error + {0x51, 0x0b, 0x00, 0x00} // Unspecified Aborted command +}; + +/* Control register bit definitions */ +#define TW_CONTROL_CLEAR_HOST_INTERRUPT 0x00080000 +#define TW_CONTROL_CLEAR_ATTENTION_INTERRUPT 0x00040000 +#define TW_CONTROL_MASK_COMMAND_INTERRUPT 0x00020000 +#define TW_CONTROL_MASK_RESPONSE_INTERRUPT 0x00010000 +#define TW_CONTROL_UNMASK_COMMAND_INTERRUPT 0x00008000 +#define TW_CONTROL_UNMASK_RESPONSE_INTERRUPT 0x00004000 +#define TW_CONTROL_CLEAR_ERROR_STATUS 0x00000200 +#define TW_CONTROL_ISSUE_SOFT_RESET 0x00000100 +#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080 +#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040 +#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020 +#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000 +#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000 +#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000 +#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008 + +/* Status register bit definitions */ +#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 +#define TW_STATUS_MINOR_VERSION_MASK 0x0F000000 +#define TW_STATUS_PCI_PARITY_ERROR 0x00800000 +#define TW_STATUS_QUEUE_ERROR 0x00400000 +#define TW_STATUS_MICROCONTROLLER_ERROR 0x00200000 +#define TW_STATUS_PCI_ABORT 0x00100000 +#define TW_STATUS_HOST_INTERRUPT 0x00080000 +#define TW_STATUS_ATTENTION_INTERRUPT 0x00040000 +#define TW_STATUS_COMMAND_INTERRUPT 0x00020000 +#define TW_STATUS_RESPONSE_INTERRUPT 0x00010000 +#define TW_STATUS_COMMAND_QUEUE_FULL 0x00008000 +#define TW_STATUS_RESPONSE_QUEUE_EMPTY 0x00004000 +#define TW_STATUS_MICROCONTROLLER_READY 0x00002000 +#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 +#define TW_STATUS_ALL_INTERRUPTS 0x000F0000 +#define TW_STATUS_CLEARABLE_BITS 0x00D00000 +#define TW_STATUS_EXPECTED_BITS 0x00002000 +#define TW_STATUS_UNEXPECTED_BITS 0x00F00008 +#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008 +#define TW_STATUS_VALID_INTERRUPT 0x00DF0008 + +/* RESPONSE QUEUE BIT DEFINITIONS */ +#define TW_RESPONSE_ID_MASK 0x00000FF0 + +/* PCI related defines */ +#define TW_IO_ADDRESS_RANGE 0x10 +#define TW_DEVICE_NAME "3ware Storage Controller" +#define TW_VENDOR_ID (0x13C1) /* 3ware */ +#define TW_DEVICE_ID (0x1000) /* Storage Controller */ +#define TW_DEVICE_ID2 (0x1001) /* 7000 series controller */ +#define TW_NUMDEVICES 2 +#define TW_PCI_CLEAR_PARITY_ERRORS 0xc100 +#define TW_PCI_CLEAR_PCI_ABORT 0x2000 + +/* Command packet opcodes */ +#define TW_OP_NOP 0x0 +#define TW_OP_INIT_CONNECTION 0x1 +#define TW_OP_READ 0x2 +#define TW_OP_WRITE 0x3 +#define TW_OP_VERIFY 0x4 +#define TW_OP_GET_PARAM 0x12 +#define TW_OP_SET_PARAM 0x13 +#define TW_OP_SECTOR_INFO 0x1a +#define TW_OP_AEN_LISTEN 0x1c +#define TW_OP_FLUSH_CACHE 0x0e +#define TW_CMD_PACKET 0x1d +#define TW_CMD_PACKET_WITH_DATA 0x1f + +/* Asynchronous Event Notification (AEN) Codes */ +#define TW_AEN_QUEUE_EMPTY 0x0000 +#define TW_AEN_SOFT_RESET 0x0001 +#define TW_AEN_DEGRADED_MIRROR 0x0002 +#define TW_AEN_CONTROLLER_ERROR 0x0003 +#define TW_AEN_REBUILD_FAIL 0x0004 +#define TW_AEN_REBUILD_DONE 0x0005 +#define TW_AEN_QUEUE_FULL 0x00ff +#define TW_AEN_TABLE_UNDEFINED 0x15 +#define TW_AEN_APORT_TIMEOUT 0x0009 +#define TW_AEN_DRIVE_ERROR 0x000A +#define TW_AEN_SMART_FAIL 0x000F +#define TW_AEN_SBUF_FAIL 0x0024 + +/* Misc defines */ +#define TW_ALIGNMENT_6000 64 /* 64 bytes */ +#define TW_ALIGNMENT_7000 4 /* 4 bytes */ +#define TW_MAX_UNITS 16 +#define TW_COMMAND_ALIGNMENT_MASK 0x1ff +#define TW_INIT_MESSAGE_CREDITS 0x100 +#define TW_INIT_COMMAND_PACKET_SIZE 0x3 +#define TW_POLL_MAX_RETRIES 20000 +#define TW_MAX_SGL_LENGTH 62 +#define TW_ATA_PASS_SGL_MAX 60 +#define TW_Q_LENGTH 256 +#define TW_Q_START 0 +#define TW_MAX_SLOT 32 +#define TW_MAX_PCI_BUSES 255 +#define TW_MAX_RESET_TRIES 3 +#define TW_UNIT_INFORMATION_TABLE_BASE 0x300 +#define TW_MAX_CMDS_PER_LUN 254 /* 254 for io, 1 for + chrdev ioctl, one for + internal aen post */ +#define TW_BLOCK_SIZE 0x200 /* 512-byte blocks */ +#define TW_IOCTL 0x80 +#define TW_UNIT_ONLINE 1 +#define TW_IN_INTR 1 +#define TW_IN_RESET 2 +#define TW_IN_CHRDEV_IOCTL 3 +#define TW_MAX_SECTORS 256 +#define TW_MAX_IOCTL_SECTORS 512 +#define TW_AEN_WAIT_TIME 1000 +#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */ +#define TW_ISR_DONT_COMPLETE 2 +#define TW_ISR_DONT_RESULT 3 +#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */ +#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ +#define TW_IOCTL_CHRDEV_FREE -1 +#define TW_MAX_CDB_LEN 16 + +/* Bitmask macros to eliminate bitfields */ + +/* opcode: 5, sgloffset: 3 */ +#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_SGL_OUT(x) ((x >> 5) & 0x7) + +/* reserved_1: 4, response_id: 8, reserved_2: 20 */ +#define TW_RESID_OUT(x) ((x >> 4) & 0xff) + +/* unit: 4, host_id: 4 */ +#define TW_UNITHOST_IN(x,y) ((x << 4) | ( y & 0xf)) +#define TW_UNIT_OUT(x) (x & 0xf) + +/* Macros */ +#define TW_CONTROL_REG_ADDR(x) (x->base_addr) +#define TW_STATUS_REG_ADDR(x) (x->base_addr + 0x4) +#define TW_COMMAND_QUEUE_REG_ADDR(x) (x->base_addr + 0x8) +#define TW_RESPONSE_QUEUE_REG_ADDR(x) (x->base_addr + 0xC) +#define TW_CLEAR_ALL_INTERRUPTS(x) \ + (outl(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_CLEAR_ATTENTION_INTERRUPT(x) \ + (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_CLEAR_HOST_INTERRUPT(x) \ + (outl(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_DISABLE_INTERRUPTS(x) \ + (outl(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) \ + (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \ + TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | \ + TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_MASK_COMMAND_INTERRUPT(x) \ + (outl(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_UNMASK_COMMAND_INTERRUPT(x) \ + (outl(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_SOFT_RESET(x) (outl(TW_CONTROL_ISSUE_SOFT_RESET | \ + TW_CONTROL_CLEAR_HOST_INTERRUPT | \ + TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \ + TW_CONTROL_MASK_COMMAND_INTERRUPT | \ + TW_CONTROL_MASK_RESPONSE_INTERRUPT | \ + TW_CONTROL_CLEAR_ERROR_STATUS | \ + TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_STATUS_ERRORS(x) \ + (((x & TW_STATUS_PCI_ABORT) || \ + (x & TW_STATUS_PCI_PARITY_ERROR) || \ + (x & TW_STATUS_QUEUE_ERROR) || \ + (x & TW_STATUS_MICROCONTROLLER_ERROR)) && \ + (x & TW_STATUS_MICROCONTROLLER_READY)) + +#ifdef TW_DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + +#pragma pack(1) + +/* Scatter Gather List Entry */ +typedef struct TAG_TW_SG_Entry { + u32 address; + u32 length; +} TW_SG_Entry; + +typedef unsigned char TW_Sector[512]; + +/* Command Packet */ +typedef struct TW_Command { + unsigned char opcode__sgloffset; + unsigned char size; + unsigned char request_id; + unsigned char unit__hostid; + /* Second DWORD */ + unsigned char status; + unsigned char flags; + union { + unsigned short block_count; + unsigned short parameter_count; + unsigned short message_credits; + } byte6; + union { + struct { + u32 lba; + TW_SG_Entry sgl[TW_MAX_SGL_LENGTH]; + u32 padding; /* pad to 512 bytes */ + } io; + struct { + TW_SG_Entry sgl[TW_MAX_SGL_LENGTH]; + u32 padding[2]; + } param; + struct { + u32 response_queue_pointer; + u32 padding[125]; + } init_connection; + struct { + char version[504]; + } ioctl_miniport_version; + } byte8; +} TW_Command; + +#pragma pack() + +typedef struct TAG_TW_Ioctl { + unsigned char opcode; + unsigned short table_id; + unsigned char parameter_id; + unsigned char parameter_size_bytes; + unsigned char unit_index; + unsigned char data[1]; +} TW_Ioctl; + +#pragma pack(1) + +/* Structure for new chardev ioctls */ +typedef struct TAG_TW_New_Ioctl { + unsigned int data_buffer_length; + unsigned char padding [508]; + TW_Command firmware_command; + char data_buffer[]; +} TW_New_Ioctl; + +/* GetParam descriptor */ +typedef struct { + unsigned short table_id; + unsigned char parameter_id; + unsigned char parameter_size_bytes; + unsigned char data[1]; +} TW_Param, *PTW_Param; + +/* Response queue */ +typedef union TAG_TW_Response_Queue { + u32 response_id; + u32 value; +} TW_Response_Queue; + +typedef int TW_Cmd_State; + +#define TW_S_INITIAL 0x1 /* Initial state */ +#define TW_S_STARTED 0x2 /* Id in use */ +#define TW_S_POSTED 0x4 /* Posted to the controller */ +#define TW_S_PENDING 0x8 /* Waiting to be posted in isr */ +#define TW_S_COMPLETED 0x10 /* Completed by isr */ +#define TW_S_FINISHED 0x20 /* I/O completely done */ +#define TW_START_MASK (TW_S_STARTED | TW_S_POSTED | TW_S_PENDING | TW_S_COMPLETED) + +/* Command header for ATA pass-thru */ +typedef struct TAG_TW_Passthru +{ + unsigned char opcode__sgloffset; + unsigned char size; + unsigned char request_id; + unsigned char aport__hostid; + unsigned char status; + unsigned char flags; + unsigned short param; + unsigned short features; + unsigned short sector_count; + unsigned short sector_num; + unsigned short cylinder_lo; + unsigned short cylinder_hi; + unsigned char drive_head; + unsigned char command; + TW_SG_Entry sg_list[TW_ATA_PASS_SGL_MAX]; + unsigned char padding[12]; +} TW_Passthru; + +#pragma pack() + +typedef struct TAG_TW_Device_Extension { + u32 base_addr; + unsigned long *alignment_virtual_address[TW_Q_LENGTH]; + unsigned long alignment_physical_address[TW_Q_LENGTH]; + int is_unit_present[TW_MAX_UNITS]; + unsigned long *command_packet_virtual_address[TW_Q_LENGTH]; + unsigned long command_packet_physical_address[TW_Q_LENGTH]; + struct pci_dev *tw_pci_dev; + struct scsi_cmnd *srb[TW_Q_LENGTH]; + unsigned char free_queue[TW_Q_LENGTH]; + unsigned char free_head; + unsigned char free_tail; + unsigned char pending_queue[TW_Q_LENGTH]; + unsigned char pending_head; + unsigned char pending_tail; + TW_Cmd_State state[TW_Q_LENGTH]; + u32 posted_request_count; + u32 max_posted_request_count; + u32 request_count_marked_pending; + u32 pending_request_count; + u32 max_pending_request_count; + u32 max_sgl_entries; + u32 sgl_entries; + u32 num_resets; + u32 sector_count; + u32 max_sector_count; + u32 aen_count; + struct Scsi_Host *host; + struct mutex ioctl_lock; + unsigned short aen_queue[TW_Q_LENGTH]; + unsigned char aen_head; + unsigned char aen_tail; + volatile long flags; /* long req'd for set_bit --RR */ + int reset_print; + volatile int chrdev_request_id; + wait_queue_head_t ioctl_wqueue; +} TW_Device_Extension; + +#endif /* _3W_XXXX_H */ diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c new file mode 100644 index 000000000..857be0f3a --- /dev/null +++ b/drivers/scsi/53c700.c @@ -0,0 +1,2118 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* NCR (or Symbios) 53c700 and 53c700-66 Driver + * + * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com +**----------------------------------------------------------------------------- +** +** +**----------------------------------------------------------------------------- + */ + +/* Notes: + * + * This driver is designed exclusively for these chips (virtually the + * earliest of the scripts engine chips). They need their own drivers + * because they are missing so many of the scripts and snazzy register + * features of their elder brothers (the 710, 720 and 770). + * + * The 700 is the lowliest of the line, it can only do async SCSI. + * The 700-66 can at least do synchronous SCSI up to 10MHz. + * + * The 700 chip has no host bus interface logic of its own. However, + * it is usually mapped to a location with well defined register + * offsets. Therefore, if you can determine the base address and the + * irq your board incorporating this chip uses, you can probably use + * this driver to run it (although you'll probably have to write a + * minimal wrapper for the purpose---see the NCR_D700 driver for + * details about how to do this). + * + * + * TODO List: + * + * 1. Better statistics in the proc fs + * + * 2. Implement message queue (queues SCSI messages like commands) and make + * the abort and device reset functions use them. + * */ + +/* CHANGELOG + * + * Version 2.8 + * + * Fixed bad bug affecting tag starvation processing (previously the + * driver would hang the system if too many tags starved. Also fixed + * bad bug having to do with 10 byte command processing and REQUEST + * SENSE (the command would loop forever getting a transfer length + * mismatch in the CMD phase). + * + * Version 2.7 + * + * Fixed scripts problem which caused certain devices (notably CDRWs) + * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use + * __raw_readl/writel for parisc compatibility (Thomas + * Bogendoerfer). Added missing SCp->request_bufflen initialisation + * for sense requests (Ryan Bradetich). + * + * Version 2.6 + * + * Following test of the 64 bit parisc kernel by Richard Hirst, + * several problems have now been corrected. Also adds support for + * consistent memory allocation. + * + * Version 2.5 + * + * More Compatibility changes for 710 (now actually works). Enhanced + * support for odd clock speeds which constrain SDTR negotiations. + * correct cacheline separation for scsi messages and status for + * incoherent architectures. Use of the pci mapping functions on + * buffers to begin support for 64 bit drivers. + * + * Version 2.4 + * + * Added support for the 53c710 chip (in 53c700 emulation mode only---no + * special 53c710 instructions or registers are used). + * + * Version 2.3 + * + * More endianness/cache coherency changes. + * + * Better bad device handling (handles devices lying about tag + * queueing support and devices which fail to provide sense data on + * contingent allegiance conditions) + * + * Many thanks to Richard Hirst for patiently + * debugging this driver on the parisc architecture and suggesting + * many improvements and bug fixes. + * + * Thanks also go to Linuxcare Inc. for providing several PARISC + * machines for me to debug the driver on. + * + * Version 2.2 + * + * Made the driver mem or io mapped; added endian invariance; added + * dma cache flushing operations for architectures which need it; + * added support for more varied clocking speeds. + * + * Version 2.1 + * + * Initial modularisation from the D700. See NCR_D700.c for the rest of + * the changelog. + * */ +#define NCR_700_VERSION "2.8" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "53c700.h" + +/* NOTE: For 64 bit drivers there are points in the code where we use + * a non dereferenceable pointer to point to a structure in dma-able + * memory (which is 32 bits) so that we can use all of the structure + * operations but take the address at the end. This macro allows us + * to truncate the 64 bit pointer down to 32 bits without the compiler + * complaining */ +#define to32bit(x) ((__u32)((unsigned long)(x))) + +#ifdef NCR_700_DEBUG +#define STATIC +#else +#define STATIC static +#endif + +MODULE_AUTHOR("James Bottomley"); +MODULE_DESCRIPTION("53c700 and 53c700-66 Driver"); +MODULE_LICENSE("GPL"); + +/* This is the script */ +#include "53c700_d.h" + + +STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); +STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); +STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); +STATIC void NCR_700_chip_setup(struct Scsi_Host *host); +STATIC void NCR_700_chip_reset(struct Scsi_Host *host); +STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); +STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); +STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); +static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); + +STATIC const struct attribute_group *NCR_700_dev_groups[]; + +STATIC struct scsi_transport_template *NCR_700_transport_template = NULL; + +static char *NCR_700_phase[] = { + "", + "after selection", + "before command phase", + "after command phase", + "after status phase", + "after data in phase", + "after data out phase", + "during data phase", +}; + +static char *NCR_700_condition[] = { + "", + "NOT MSG_OUT", + "UNEXPECTED PHASE", + "NOT MSG_IN", + "UNEXPECTED MSG", + "MSG_IN", + "SDTR_MSG RECEIVED", + "REJECT_MSG RECEIVED", + "DISCONNECT_MSG RECEIVED", + "MSG_OUT", + "DATA_IN", + +}; + +static char *NCR_700_fatal_messages[] = { + "unexpected message after reselection", + "still MSG_OUT after message injection", + "not MSG_IN after selection", + "Illegal message length received", +}; + +static char *NCR_700_SBCL_bits[] = { + "IO ", + "CD ", + "MSG ", + "ATN ", + "SEL ", + "BSY ", + "ACK ", + "REQ ", +}; + +static char *NCR_700_SBCL_to_phase[] = { + "DATA_OUT", + "DATA_IN", + "CMD_OUT", + "STATE", + "ILLEGAL PHASE", + "ILLEGAL PHASE", + "MSG OUT", + "MSG IN", +}; + +/* This translates the SDTR message offset and period to a value + * which can be loaded into the SXFER_REG. + * + * NOTE: According to SCSI-2, the true transfer period (in ns) is + * actually four times this period value */ +static inline __u8 +NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata, + __u8 offset, __u8 period) +{ + int XFERP; + + __u8 min_xferp = (hostdata->chip710 + ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); + __u8 max_offset = (hostdata->chip710 + ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET); + + if(offset == 0) + return 0; + + if(period < hostdata->min_period) { + printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4); + period = hostdata->min_period; + } + XFERP = (period*4 * hostdata->sync_clock)/1000 - 4; + if(offset > max_offset) { + printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n", + offset, max_offset); + offset = max_offset; + } + if(XFERP < min_xferp) { + XFERP = min_xferp; + } + return (offset & 0x0f) | (XFERP & 0x07)<<4; +} + +static inline __u8 +NCR_700_get_SXFER(struct scsi_device *SDp) +{ + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; + + return NCR_700_offset_period_to_sxfer(hostdata, + spi_offset(SDp->sdev_target), + spi_period(SDp->sdev_target)); +} + +static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p) +{ + return h->pScript + ((uintptr_t)p - (uintptr_t)h->script); +} + +static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), + size, DMA_BIDIRECTIONAL); +} + +static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size, + DMA_BIDIRECTIONAL); +} + +struct Scsi_Host * +NCR_700_detect(struct scsi_host_template *tpnt, + struct NCR_700_Host_Parameters *hostdata, struct device *dev) +{ + dma_addr_t pScript, pSlots; + __u8 *memory; + __u32 *script; + struct Scsi_Host *host; + static int banner = 0; + int j; + + if (tpnt->sdev_groups == NULL) + tpnt->sdev_groups = NCR_700_dev_groups; + + memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL); + if (!memory) { + hostdata->noncoherent = 1; + memory = dma_alloc_noncoherent(dev, TOTAL_MEM_SIZE, &pScript, + DMA_BIDIRECTIONAL, GFP_KERNEL); + } + if (!memory) { + printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); + return NULL; + } + + script = (__u32 *)memory; + hostdata->msgin = memory + MSGIN_OFFSET; + hostdata->msgout = memory + MSGOUT_OFFSET; + hostdata->status = memory + STATUS_OFFSET; + hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET); + hostdata->dev = dev; + + pSlots = pScript + SLOTS_OFFSET; + + /* Fill in the missing routines from the host template */ + tpnt->queuecommand = NCR_700_queuecommand; + tpnt->eh_abort_handler = NCR_700_abort; + tpnt->eh_host_reset_handler = NCR_700_host_reset; + tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST; + tpnt->sg_tablesize = NCR_700_SG_SEGMENTS; + tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN; + tpnt->slave_configure = NCR_700_slave_configure; + tpnt->slave_destroy = NCR_700_slave_destroy; + tpnt->slave_alloc = NCR_700_slave_alloc; + tpnt->change_queue_depth = NCR_700_change_queue_depth; + + if(tpnt->name == NULL) + tpnt->name = "53c700"; + if(tpnt->proc_name == NULL) + tpnt->proc_name = "53c700"; + + host = scsi_host_alloc(tpnt, 4); + if (!host) + return NULL; + memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot) + * NCR_700_COMMAND_SLOTS_PER_HOST); + for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) { + dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0] + - (unsigned long)&hostdata->slots[0].SG[0]); + hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset)); + if(j == 0) + hostdata->free_list = &hostdata->slots[j]; + else + hostdata->slots[j-1].ITL_forw = &hostdata->slots[j]; + hostdata->slots[j].state = NCR_700_SLOT_FREE; + } + + for (j = 0; j < ARRAY_SIZE(SCRIPT); j++) + script[j] = bS_to_host(SCRIPT[j]); + + /* adjust all labels to be bus physical */ + for (j = 0; j < PATCHES; j++) + script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]); + /* now patch up fixed addresses. */ + script_patch_32(hostdata, script, MessageLocation, + pScript + MSGOUT_OFFSET); + script_patch_32(hostdata, script, StatusAddress, + pScript + STATUS_OFFSET); + script_patch_32(hostdata, script, ReceiveMsgAddress, + pScript + MSGIN_OFFSET); + + hostdata->script = script; + hostdata->pScript = pScript; + dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE); + hostdata->state = NCR_700_HOST_FREE; + hostdata->cmd = NULL; + host->max_id = 8; + host->max_lun = NCR_700_MAX_LUNS; + BUG_ON(NCR_700_transport_template == NULL); + host->transportt = NCR_700_transport_template; + host->unique_id = (unsigned long)hostdata->base; + hostdata->eh_complete = NULL; + host->hostdata[0] = (unsigned long)hostdata; + /* kick the chip */ + NCR_700_writeb(0xff, host, CTEST9_REG); + if (hostdata->chip710) + hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f; + else + hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f; + hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0); + if (banner == 0) { + printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n"); + banner = 1; + } + printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no, + hostdata->chip710 ? "53c710" : + (hostdata->fast ? "53c700-66" : "53c700"), + hostdata->rev, hostdata->differential ? + "(Differential)" : ""); + /* reset the chip */ + NCR_700_chip_reset(host); + + if (scsi_add_host(host, dev)) { + dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n"); + scsi_host_put(host); + return NULL; + } + + spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD : + SPI_SIGNAL_SE; + + return host; +} + +int +NCR_700_release(struct Scsi_Host *host) +{ + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + + if (hostdata->noncoherent) + dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE, + hostdata->script, hostdata->pScript, + DMA_BIDIRECTIONAL); + else + dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE, + hostdata->script, hostdata->pScript); + return 1; +} + +static inline __u8 +NCR_700_identify(int can_disconnect, __u8 lun) +{ + return IDENTIFY_BASE | + ((can_disconnect) ? 0x40 : 0) | + (lun & NCR_700_LUN_MASK); +} + +/* + * Function : static int data_residual (Scsi_Host *host) + * + * Purpose : return residual data count of what's in the chip. If you + * really want to know what this function is doing, it's almost a + * direct transcription of the algorithm described in the 53c710 + * guide, except that the DBC and DFIFO registers are only 6 bits + * wide on a 53c700. + * + * Inputs : host - SCSI host */ +static inline int +NCR_700_data_residual (struct Scsi_Host *host) { + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + int count, synchronous = 0; + unsigned int ddir; + + if(hostdata->chip710) { + count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) - + (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f; + } else { + count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) - + (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f; + } + + if(hostdata->fast) + synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f; + + /* get the data direction */ + ddir = NCR_700_readb(host, CTEST0_REG) & 0x01; + + if (ddir) { + /* Receive */ + if (synchronous) + count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4; + else + if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL) + ++count; + } else { + /* Send */ + __u8 sstat = NCR_700_readb(host, SSTAT1_REG); + if (sstat & SODL_REG_FULL) + ++count; + if (synchronous && (sstat & SODR_REG_FULL)) + ++count; + } +#ifdef NCR_700_DEBUG + if(count) + printk("RESIDUAL IS %d (ddir %d)\n", count, ddir); +#endif + return count; +} + +/* print out the SCSI wires and corresponding phase from the SBCL register + * in the chip */ +static inline char * +sbcl_to_string(__u8 sbcl) +{ + int i; + static char ret[256]; + + ret[0]='\0'; + for(i=0; i<8; i++) { + if((1<free_list; + + if(slot == NULL) { + /* sanity check */ + if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST) + printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST); + return NULL; + } + + if(slot->state != NCR_700_SLOT_FREE) + /* should panic! */ + printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n"); + + + hostdata->free_list = slot->ITL_forw; + slot->ITL_forw = NULL; + + + /* NOTE: set the state to busy here, not queued, since this + * indicates the slot is in use and cannot be run by the IRQ + * finish routine. If we cannot queue the command when it + * is properly build, we then change to NCR_700_SLOT_QUEUED */ + slot->state = NCR_700_SLOT_BUSY; + slot->flags = 0; + hostdata->command_slot_count++; + + return slot; +} + +STATIC void +free_slot(struct NCR_700_command_slot *slot, + struct NCR_700_Host_Parameters *hostdata) +{ + if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) { + printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot); + } + if(slot->state == NCR_700_SLOT_FREE) { + printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot); + } + + slot->resume_offset = 0; + slot->cmnd = NULL; + slot->state = NCR_700_SLOT_FREE; + slot->ITL_forw = hostdata->free_list; + hostdata->free_list = slot; + hostdata->command_slot_count--; +} + + +/* This routine really does very little. The command is indexed on + the ITL and (if tagged) the ITLQ lists in _queuecommand */ +STATIC void +save_for_reselection(struct NCR_700_Host_Parameters *hostdata, + struct scsi_cmnd *SCp, __u32 dsp) +{ + /* Its just possible that this gets executed twice */ + if(SCp != NULL) { + struct NCR_700_command_slot *slot = + (struct NCR_700_command_slot *)SCp->host_scribble; + + slot->resume_offset = dsp; + } + hostdata->state = NCR_700_HOST_FREE; + hostdata->cmd = NULL; +} + +STATIC inline void +NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp, + struct NCR_700_command_slot *slot) +{ + if(SCp->sc_data_direction != DMA_NONE && + SCp->sc_data_direction != DMA_BIDIRECTIONAL) + scsi_dma_unmap(SCp); +} + +STATIC inline void +NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata, + struct scsi_cmnd *SCp, int result) +{ + hostdata->state = NCR_700_HOST_FREE; + hostdata->cmd = NULL; + + if(SCp != NULL) { + struct NCR_700_command_slot *slot = + (struct NCR_700_command_slot *)SCp->host_scribble; + + dma_unmap_single(hostdata->dev, slot->pCmd, + MAX_COMMAND_SIZE, DMA_TO_DEVICE); + if (slot->flags == NCR_700_FLAG_AUTOSENSE) { + char *cmnd = NCR_700_get_sense_cmnd(SCp->device); + + dma_unmap_single(hostdata->dev, slot->dma_handle, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + /* restore the old result if the request sense was + * successful */ + if (result == 0) + result = cmnd[7]; + /* restore the original length */ + SCp->cmd_len = cmnd[8]; + } else + NCR_700_unmap(hostdata, SCp, slot); + + free_slot(slot, hostdata); +#ifdef NCR_700_DEBUG + if(NCR_700_get_depth(SCp->device) == 0 || + NCR_700_get_depth(SCp->device) > SCp->device->queue_depth) + printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n", + NCR_700_get_depth(SCp->device)); +#endif /* NCR_700_DEBUG */ + NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1); + + SCp->host_scribble = NULL; + SCp->result = result; + scsi_done(SCp); + } else { + printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n"); + } +} + + +STATIC void +NCR_700_internal_bus_reset(struct Scsi_Host *host) +{ + /* Bus reset */ + NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG); + udelay(50); + NCR_700_writeb(0, host, SCNTL1_REG); + +} + +STATIC void +NCR_700_chip_setup(struct Scsi_Host *host) +{ + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + __u8 min_period; + __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); + + if(hostdata->chip710) { + __u8 burst_disable = 0; + __u8 burst_length = 0; + + switch (hostdata->burst_length) { + case 1: + burst_length = BURST_LENGTH_1; + break; + case 2: + burst_length = BURST_LENGTH_2; + break; + case 4: + burst_length = BURST_LENGTH_4; + break; + case 8: + burst_length = BURST_LENGTH_8; + break; + default: + burst_disable = BURST_DISABLE; + break; + } + hostdata->dcntl_extra |= COMPAT_700_MODE; + + NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG); + NCR_700_writeb(burst_length | hostdata->dmode_extra, + host, DMODE_710_REG); + NCR_700_writeb(burst_disable | hostdata->ctest7_extra | + (hostdata->differential ? DIFF : 0), + host, CTEST7_REG); + NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG); + NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY + | AUTO_ATN, host, SCNTL0_REG); + } else { + NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra, + host, DMODE_700_REG); + NCR_700_writeb(hostdata->differential ? + DIFF : 0, host, CTEST7_REG); + if(hostdata->fast) { + /* this is for 700-66, does nothing on 700 */ + NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION + | GENERATE_RECEIVE_PARITY, host, + CTEST8_REG); + } else { + NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY + | PARITY | AUTO_ATN, host, SCNTL0_REG); + } + } + + NCR_700_writeb(1 << host->this_id, host, SCID_REG); + NCR_700_writeb(0, host, SBCL_REG); + NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG); + + NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT + | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG); + + NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG); + NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG); + if(hostdata->clock > 75) { + printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock); + /* do the best we can, but the async clock will be out + * of spec: sync divider 2, async divider 3 */ + DEBUG(("53c700: sync 2 async 3\n")); + NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG); + NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG); + hostdata->sync_clock = hostdata->clock/2; + } else if(hostdata->clock > 50 && hostdata->clock <= 75) { + /* sync divider 1.5, async divider 3 */ + DEBUG(("53c700: sync 1.5 async 3\n")); + NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG); + NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG); + hostdata->sync_clock = hostdata->clock*2; + hostdata->sync_clock /= 3; + + } else if(hostdata->clock > 37 && hostdata->clock <= 50) { + /* sync divider 1, async divider 2 */ + DEBUG(("53c700: sync 1 async 2\n")); + NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); + NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG); + hostdata->sync_clock = hostdata->clock; + } else if(hostdata->clock > 25 && hostdata->clock <=37) { + /* sync divider 1, async divider 1.5 */ + DEBUG(("53c700: sync 1 async 1.5\n")); + NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); + NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG); + hostdata->sync_clock = hostdata->clock; + } else { + DEBUG(("53c700: sync 1 async 1\n")); + NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); + NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG); + /* sync divider 1, async divider 1 */ + hostdata->sync_clock = hostdata->clock; + } + /* Calculate the actual minimum period that can be supported + * by our synchronous clock speed. See the 710 manual for + * exact details of this calculation which is based on a + * setting of the SXFER register */ + min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock); + hostdata->min_period = NCR_700_MIN_PERIOD; + if(min_period > NCR_700_MIN_PERIOD) + hostdata->min_period = min_period; +} + +STATIC void +NCR_700_chip_reset(struct Scsi_Host *host) +{ + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + if(hostdata->chip710) { + NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG); + udelay(100); + + NCR_700_writeb(0, host, ISTAT_REG); + } else { + NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG); + udelay(100); + + NCR_700_writeb(0, host, DCNTL_REG); + } + + mdelay(1000); + + NCR_700_chip_setup(host); +} + +/* The heart of the message processing engine is that the instruction + * immediately after the INT is the normal case (and so must be CLEAR + * ACK). If we want to do something else, we call that routine in + * scripts and set temp to be the normal case + 8 (skipping the CLEAR + * ACK) so that the routine returns correctly to resume its activity + * */ +STATIC __u32 +process_extended_message(struct Scsi_Host *host, + struct NCR_700_Host_Parameters *hostdata, + struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps) +{ + __u32 resume_offset = dsp, temp = dsp + 8; + __u8 pun = 0xff, lun = 0xff; + + if(SCp != NULL) { + pun = SCp->device->id; + lun = SCp->device->lun; + } + + switch(hostdata->msgin[2]) { + case A_SDTR_MSG: + if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) { + struct scsi_target *starget = SCp->device->sdev_target; + __u8 period = hostdata->msgin[3]; + __u8 offset = hostdata->msgin[4]; + + if(offset == 0 || period == 0) { + offset = 0; + period = 0; + } + + spi_offset(starget) = offset; + spi_period(starget) = period; + + if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) { + spi_display_xfer_agreement(starget); + NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION); + } + + NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC); + NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); + + NCR_700_writeb(NCR_700_get_SXFER(SCp->device), + host, SXFER_REG); + + } else { + /* SDTR message out of the blue, reject it */ + shost_printk(KERN_WARNING, host, + "Unexpected SDTR msg\n"); + hostdata->msgout[0] = A_REJECT_MSG; + dma_sync_to_dev(hostdata, hostdata->msgout, 1); + script_patch_16(hostdata, hostdata->script, + MessageCount, 1); + /* SendMsgOut returns, so set up the return + * address */ + resume_offset = hostdata->pScript + Ent_SendMessageWithATN; + } + break; + + case A_WDTR_MSG: + printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n", + host->host_no, pun, lun); + hostdata->msgout[0] = A_REJECT_MSG; + dma_sync_to_dev(hostdata, hostdata->msgout, 1); + script_patch_16(hostdata, hostdata->script, MessageCount, 1); + resume_offset = hostdata->pScript + Ent_SendMessageWithATN; + + break; + + default: + printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ", + host->host_no, pun, lun, + NCR_700_phase[(dsps & 0xf00) >> 8]); + spi_print_msg(hostdata->msgin); + printk("\n"); + /* just reject it */ + hostdata->msgout[0] = A_REJECT_MSG; + dma_sync_to_dev(hostdata, hostdata->msgout, 1); + script_patch_16(hostdata, hostdata->script, MessageCount, 1); + /* SendMsgOut returns, so set up the return + * address */ + resume_offset = hostdata->pScript + Ent_SendMessageWithATN; + } + NCR_700_writel(temp, host, TEMP_REG); + return resume_offset; +} + +STATIC __u32 +process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata, + struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps) +{ + /* work out where to return to */ + __u32 temp = dsp + 8, resume_offset = dsp; + __u8 pun = 0xff, lun = 0xff; + + if(SCp != NULL) { + pun = SCp->device->id; + lun = SCp->device->lun; + } + +#ifdef NCR_700_DEBUG + printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun, + NCR_700_phase[(dsps & 0xf00) >> 8]); + spi_print_msg(hostdata->msgin); + printk("\n"); +#endif + + switch(hostdata->msgin[0]) { + + case A_EXTENDED_MSG: + resume_offset = process_extended_message(host, hostdata, SCp, + dsp, dsps); + break; + + case A_REJECT_MSG: + if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) { + /* Rejected our sync negotiation attempt */ + spi_period(SCp->device->sdev_target) = + spi_offset(SCp->device->sdev_target) = 0; + NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC); + NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); + } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) { + /* rejected our first simple tag message */ + scmd_printk(KERN_WARNING, SCp, + "Rejected first tag queue attempt, turning off tag queueing\n"); + /* we're done negotiating */ + NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION); + hostdata->tag_negotiated &= ~(1<device->tagged_supported = 0; + SCp->device->simple_tags = 0; + scsi_change_queue_depth(SCp->device, host->cmd_per_lun); + } else { + shost_printk(KERN_WARNING, host, + "(%d:%d) Unexpected REJECT Message %s\n", + pun, lun, + NCR_700_phase[(dsps & 0xf00) >> 8]); + /* however, just ignore it */ + } + break; + + case A_PARITY_ERROR_MSG: + printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no, + pun, lun); + NCR_700_internal_bus_reset(host); + break; + case A_SIMPLE_TAG_MSG: + printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no, + pun, lun, hostdata->msgin[1], + NCR_700_phase[(dsps & 0xf00) >> 8]); + /* just ignore it */ + break; + default: + printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ", + host->host_no, pun, lun, + NCR_700_phase[(dsps & 0xf00) >> 8]); + + spi_print_msg(hostdata->msgin); + printk("\n"); + /* just reject it */ + hostdata->msgout[0] = A_REJECT_MSG; + dma_sync_to_dev(hostdata, hostdata->msgout, 1); + script_patch_16(hostdata, hostdata->script, MessageCount, 1); + /* SendMsgOut returns, so set up the return + * address */ + resume_offset = hostdata->pScript + Ent_SendMessageWithATN; + + break; + } + NCR_700_writel(temp, host, TEMP_REG); + /* set us up to receive another message */ + dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); + return resume_offset; +} + +STATIC __u32 +process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, + struct Scsi_Host *host, + struct NCR_700_Host_Parameters *hostdata) +{ + __u32 resume_offset = 0; + __u8 pun = 0xff, lun=0xff; + + if(SCp != NULL) { + pun = SCp->device->id; + lun = SCp->device->lun; + } + + if(dsps == A_GOOD_STATUS_AFTER_STATUS) { + DEBUG((" COMMAND COMPLETE, status=%02x\n", + hostdata->status[0])); + /* OK, if TCQ still under negotiation, we now know it works */ + if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) + NCR_700_set_tag_neg_state(SCp->device, + NCR_700_FINISHED_TAG_NEGOTIATION); + + /* check for contingent allegiance conditions */ + if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION || + hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) { + struct NCR_700_command_slot *slot = + (struct NCR_700_command_slot *)SCp->host_scribble; + if(slot->flags == NCR_700_FLAG_AUTOSENSE) { + /* OOPS: bad device, returning another + * contingent allegiance condition */ + scmd_printk(KERN_ERR, SCp, + "broken device is looping in contingent allegiance: ignoring\n"); + NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]); + } else { + char *cmnd = + NCR_700_get_sense_cmnd(SCp->device); +#ifdef NCR_DEBUG + scsi_print_command(SCp); + printk(" cmd %p has status %d, requesting sense\n", + SCp, hostdata->status[0]); +#endif + /* we can destroy the command here + * because the contingent allegiance + * condition will cause a retry which + * will re-copy the command from the + * saved data_cmnd. We also unmap any + * data associated with the command + * here */ + NCR_700_unmap(hostdata, SCp, slot); + dma_unmap_single(hostdata->dev, slot->pCmd, + MAX_COMMAND_SIZE, + DMA_TO_DEVICE); + + cmnd[0] = REQUEST_SENSE; + cmnd[1] = (lun & 0x7) << 5; + cmnd[2] = 0; + cmnd[3] = 0; + cmnd[4] = SCSI_SENSE_BUFFERSIZE; + cmnd[5] = 0; + /* Here's a quiet hack: the + * REQUEST_SENSE command is six bytes, + * so store a flag indicating that + * this was an internal sense request + * and the original status at the end + * of the command */ + cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC; + cmnd[7] = hostdata->status[0]; + cmnd[8] = SCp->cmd_len; + SCp->cmd_len = 6; /* command length for + * REQUEST_SENSE */ + slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); + slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE); + slot->SG[0].pAddr = bS_to_host(slot->dma_handle); + slot->SG[1].ins = bS_to_host(SCRIPT_RETURN); + slot->SG[1].pAddr = 0; + slot->resume_offset = hostdata->pScript; + dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2); + dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE); + + /* queue the command for reissue */ + slot->state = NCR_700_SLOT_QUEUED; + slot->flags = NCR_700_FLAG_AUTOSENSE; + hostdata->state = NCR_700_HOST_FREE; + hostdata->cmd = NULL; + } + } else { + // Currently rely on the mid layer evaluation + // of the tag queuing capability + // + //if(status_byte(hostdata->status[0]) == GOOD && + // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) { + // /* Piggy back the tag queueing support + // * on this command */ + // dma_sync_single_for_cpu(hostdata->dev, + // slot->dma_handle, + // SCp->request_bufflen, + // DMA_FROM_DEVICE); + // if(((char *)SCp->request_buffer)[7] & 0x02) { + // scmd_printk(KERN_INFO, SCp, + // "Enabling Tag Command Queuing\n"); + // hostdata->tag_negotiated |= (1<device, NCR_700_DEV_BEGIN_TAG_QUEUEING); + // } else { + // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING); + // hostdata->tag_negotiated &= ~(1<status[0]); + } + } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) { + __u8 i = (dsps & 0xf00) >> 8; + + scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n", + NCR_700_phase[i], + sbcl_to_string(NCR_700_readb(host, SBCL_REG))); + scmd_printk(KERN_ERR, SCp, " len = %d, cmd =", + SCp->cmd_len); + scsi_print_command(SCp); + + NCR_700_internal_bus_reset(host); + } else if((dsps & 0xfffff000) == A_FATAL) { + int i = (dsps & 0xfff); + + printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n", + host->host_no, pun, lun, NCR_700_fatal_messages[i]); + if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) { + printk(KERN_ERR " msg begins %02x %02x\n", + hostdata->msgin[0], hostdata->msgin[1]); + } + NCR_700_internal_bus_reset(host); + } else if((dsps & 0xfffff0f0) == A_DISCONNECT) { +#ifdef NCR_700_DEBUG + __u8 i = (dsps & 0xf00) >> 8; + + printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n", + host->host_no, pun, lun, + i, NCR_700_phase[i]); +#endif + save_for_reselection(hostdata, SCp, dsp); + + } else if(dsps == A_RESELECTION_IDENTIFIED) { + __u8 lun; + struct NCR_700_command_slot *slot; + __u8 reselection_id = hostdata->reselection_id; + struct scsi_device *SDp; + + lun = hostdata->msgin[0] & 0x1f; + + hostdata->reselection_id = 0xff; + DEBUG(("scsi%d: (%d:%d) RESELECTED!\n", + host->host_no, reselection_id, lun)); + /* clear the reselection indicator */ + SDp = __scsi_device_lookup(host, 0, reselection_id, lun); + if(unlikely(SDp == NULL)) { + printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n", + host->host_no, reselection_id, lun); + BUG(); + } + if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) { + struct scsi_cmnd *SCp; + + SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]); + if(unlikely(SCp == NULL)) { + printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n", + host->host_no, reselection_id, lun, hostdata->msgin[2]); + BUG(); + } + + slot = (struct NCR_700_command_slot *)SCp->host_scribble; + DDEBUG(KERN_DEBUG, SDp, + "reselection is tag %d, slot %p(%d)\n", + hostdata->msgin[2], slot, slot->tag); + } else { + struct NCR_700_Device_Parameters *p = SDp->hostdata; + struct scsi_cmnd *SCp = p->current_cmnd; + + if(unlikely(SCp == NULL)) { + sdev_printk(KERN_ERR, SDp, + "no saved request for untagged cmd\n"); + BUG(); + } + slot = (struct NCR_700_command_slot *)SCp->host_scribble; + } + + if(slot == NULL) { + printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n", + host->host_no, reselection_id, lun, + hostdata->msgin[0], hostdata->msgin[1], + hostdata->msgin[2]); + } else { + if(hostdata->state != NCR_700_HOST_BUSY) + printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n", + host->host_no); + resume_offset = slot->resume_offset; + hostdata->cmd = slot->cmnd; + + /* re-patch for this command */ + script_patch_32_abs(hostdata, hostdata->script, + CommandAddress, slot->pCmd); + script_patch_16(hostdata, hostdata->script, + CommandCount, slot->cmnd->cmd_len); + script_patch_32_abs(hostdata, hostdata->script, + SGScriptStartAddress, + to32bit(&slot->pSG[0].ins)); + + /* Note: setting SXFER only works if we're + * still in the MESSAGE phase, so it is vital + * that ACK is still asserted when we process + * the reselection message. The resume offset + * should therefore always clear ACK */ + NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device), + host, SXFER_REG); + dma_sync_from_dev(hostdata, hostdata->msgin, + MSG_ARRAY_SIZE); + dma_sync_to_dev(hostdata, hostdata->msgout, + MSG_ARRAY_SIZE); + /* I'm just being paranoid here, the command should + * already have been flushed from the cache */ + dma_sync_to_dev(hostdata, slot->cmnd->cmnd, + slot->cmnd->cmd_len); + + + + } + } else if(dsps == A_RESELECTED_DURING_SELECTION) { + + /* This section is full of debugging code because I've + * never managed to reach it. I think what happens is + * that, because the 700 runs with selection + * interrupts enabled the whole time that we take a + * selection interrupt before we manage to get to the + * reselected script interrupt */ + + __u8 reselection_id = NCR_700_readb(host, SFBR_REG); + struct NCR_700_command_slot *slot; + + /* Take out our own ID */ + reselection_id &= ~(1<this_id); + + /* I've never seen this happen, so keep this as a printk rather + * than a debug */ + printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n", + host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count); + + { + /* FIXME: DEBUGGING CODE */ + __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]); + int i; + + for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) { + if(SG >= to32bit(&hostdata->slots[i].pSG[0]) + && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS])) + break; + } + printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset); + SCp = hostdata->slots[i].cmnd; + } + + if(SCp != NULL) { + slot = (struct NCR_700_command_slot *)SCp->host_scribble; + /* change slot from busy to queued to redo command */ + slot->state = NCR_700_SLOT_QUEUED; + } + hostdata->cmd = NULL; + + if(reselection_id == 0) { + if(hostdata->reselection_id == 0xff) { + printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no); + return 0; + } else { + printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n", + host->host_no); + reselection_id = hostdata->reselection_id; + } + } else { + + /* convert to real ID */ + reselection_id = bitmap_to_number(reselection_id); + } + hostdata->reselection_id = reselection_id; + /* just in case we have a stale simple tag message, clear it */ + hostdata->msgin[1] = 0; + dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); + if(hostdata->tag_negotiated & (1<pScript + Ent_GetReselectionWithTag; + } else { + resume_offset = hostdata->pScript + Ent_GetReselectionData; + } + } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) { + /* we've just disconnected from the bus, do nothing since + * a return here will re-run the queued command slot + * that may have been interrupted by the initial selection */ + DEBUG((" SELECTION COMPLETED\n")); + } else if((dsps & 0xfffff0f0) == A_MSG_IN) { + resume_offset = process_message(host, hostdata, SCp, + dsp, dsps); + } else if((dsps & 0xfffff000) == 0) { + __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8; + printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n", + host->host_no, pun, lun, NCR_700_condition[i], + NCR_700_phase[j], dsp - hostdata->pScript); + if(SCp != NULL) { + struct scatterlist *sg; + + scsi_print_command(SCp); + scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) { + printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr); + } + } + NCR_700_internal_bus_reset(host); + } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) { + printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n", + host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript); + resume_offset = dsp; + } else { + printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n", + host->host_no, pun, lun, dsps, dsp - hostdata->pScript); + NCR_700_internal_bus_reset(host); + } + return resume_offset; +} + +/* We run the 53c700 with selection interrupts always enabled. This + * means that the chip may be selected as soon as the bus frees. On a + * busy bus, this can be before the scripts engine finishes its + * processing. Therefore, part of the selection processing has to be + * to find out what the scripts engine is doing and complete the + * function if necessary (i.e. process the pending disconnect or save + * the interrupted initial selection */ +STATIC inline __u32 +process_selection(struct Scsi_Host *host, __u32 dsp) +{ + __u8 id = 0; /* Squash compiler warning */ + int count = 0; + __u32 resume_offset = 0; + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + struct scsi_cmnd *SCp = hostdata->cmd; + __u8 sbcl; + + for(count = 0; count < 5; count++) { + id = NCR_700_readb(host, hostdata->chip710 ? + CTEST9_REG : SFBR_REG); + + /* Take out our own ID */ + id &= ~(1<this_id); + if(id != 0) + break; + udelay(5); + } + sbcl = NCR_700_readb(host, SBCL_REG); + if((sbcl & SBCL_IO) == 0) { + /* mark as having been selected rather than reselected */ + id = 0xff; + } else { + /* convert to real ID */ + hostdata->reselection_id = id = bitmap_to_number(id); + DEBUG(("scsi%d: Reselected by %d\n", + host->host_no, id)); + } + if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) { + struct NCR_700_command_slot *slot = + (struct NCR_700_command_slot *)SCp->host_scribble; + DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset)); + + switch(dsp - hostdata->pScript) { + case Ent_Disconnect1: + case Ent_Disconnect2: + save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript); + break; + case Ent_Disconnect3: + case Ent_Disconnect4: + save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript); + break; + case Ent_Disconnect5: + case Ent_Disconnect6: + save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript); + break; + case Ent_Disconnect7: + case Ent_Disconnect8: + save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript); + break; + case Ent_Finish1: + case Ent_Finish2: + process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata); + break; + + default: + slot->state = NCR_700_SLOT_QUEUED; + break; + } + } + hostdata->state = NCR_700_HOST_BUSY; + hostdata->cmd = NULL; + /* clear any stale simple tag message */ + hostdata->msgin[1] = 0; + dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); + + if(id == 0xff) { + /* Selected as target, Ignore */ + resume_offset = hostdata->pScript + Ent_SelectedAsTarget; + } else if(hostdata->tag_negotiated & (1<pScript + Ent_GetReselectionWithTag; + } else { + resume_offset = hostdata->pScript + Ent_GetReselectionData; + } + return resume_offset; +} + +static inline void +NCR_700_clear_fifo(struct Scsi_Host *host) { + const struct NCR_700_Host_Parameters *hostdata + = (struct NCR_700_Host_Parameters *)host->hostdata[0]; + if(hostdata->chip710) { + NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG); + } else { + NCR_700_writeb(CLR_FIFO, host, DFIFO_REG); + } +} + +static inline void +NCR_700_flush_fifo(struct Scsi_Host *host) { + const struct NCR_700_Host_Parameters *hostdata + = (struct NCR_700_Host_Parameters *)host->hostdata[0]; + if(hostdata->chip710) { + NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG); + udelay(10); + NCR_700_writeb(0, host, CTEST8_REG); + } else { + NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG); + udelay(10); + NCR_700_writeb(0, host, DFIFO_REG); + } +} + + +/* The queue lock with interrupts disabled must be held on entry to + * this function */ +STATIC int +NCR_700_start_command(struct scsi_cmnd *SCp) +{ + struct NCR_700_command_slot *slot = + (struct NCR_700_command_slot *)SCp->host_scribble; + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; + __u16 count = 1; /* for IDENTIFY message */ + u8 lun = SCp->device->lun; + + if(hostdata->state != NCR_700_HOST_FREE) { + /* keep this inside the lock to close the race window where + * the running command finishes on another CPU while we don't + * change the state to queued on this one */ + slot->state = NCR_700_SLOT_QUEUED; + + DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n", + SCp->device->host->host_no, slot->cmnd, slot)); + return 0; + } + hostdata->state = NCR_700_HOST_BUSY; + hostdata->cmd = SCp; + slot->state = NCR_700_SLOT_BUSY; + /* keep interrupts disabled until we have the command correctly + * set up so we cannot take a selection interrupt */ + + hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE && + slot->flags != NCR_700_FLAG_AUTOSENSE), + lun); + /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure + * if the negotiated transfer parameters still hold, so + * always renegotiate them */ + if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE || + slot->flags == NCR_700_FLAG_AUTOSENSE) { + NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC); + } + + /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status. + * If a contingent allegiance condition exists, the device + * will refuse all tags, so send the request sense as untagged + * */ + if((hostdata->tag_negotiated & (1<tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE && + slot->flags != NCR_700_FLAG_AUTOSENSE)) { + count += spi_populate_tag_msg(&hostdata->msgout[count], SCp); + } + + if(hostdata->fast && + NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) { + count += spi_populate_sync_msg(&hostdata->msgout[count], + spi_period(SCp->device->sdev_target), + spi_offset(SCp->device->sdev_target)); + NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); + } + + script_patch_16(hostdata, hostdata->script, MessageCount, count); + + script_patch_ID(hostdata, hostdata->script, Device_ID, 1<script, CommandAddress, + slot->pCmd); + script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len); + /* finally plumb the beginning of the SG list into the script + * */ + script_patch_32_abs(hostdata, hostdata->script, + SGScriptStartAddress, to32bit(&slot->pSG[0].ins)); + NCR_700_clear_fifo(SCp->device->host); + + if(slot->resume_offset == 0) + slot->resume_offset = hostdata->pScript; + /* now perform all the writebacks and invalidates */ + dma_sync_to_dev(hostdata, hostdata->msgout, count); + dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); + dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len); + dma_sync_from_dev(hostdata, hostdata->status, 1); + + /* set the synchronous period/offset */ + NCR_700_writeb(NCR_700_get_SXFER(SCp->device), + SCp->device->host, SXFER_REG); + NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG); + NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG); + + return 1; +} + +irqreturn_t +NCR_700_intr(int irq, void *dev_id) +{ + struct Scsi_Host *host = (struct Scsi_Host *)dev_id; + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + __u8 istat; + __u32 resume_offset = 0; + __u8 pun = 0xff, lun = 0xff; + unsigned long flags; + int handled = 0; + + /* Use the host lock to serialise access to the 53c700 + * hardware. Note: In future, we may need to take the queue + * lock to enter the done routines. When that happens, we + * need to ensure that for this driver, the host lock and the + * queue lock point to the same thing. */ + spin_lock_irqsave(host->host_lock, flags); + if((istat = NCR_700_readb(host, ISTAT_REG)) + & (SCSI_INT_PENDING | DMA_INT_PENDING)) { + __u32 dsps; + __u8 sstat0 = 0, dstat = 0; + __u32 dsp; + struct scsi_cmnd *SCp = hostdata->cmd; + + handled = 1; + + if(istat & SCSI_INT_PENDING) { + udelay(10); + + sstat0 = NCR_700_readb(host, SSTAT0_REG); + } + + if(istat & DMA_INT_PENDING) { + udelay(10); + + dstat = NCR_700_readb(host, DSTAT_REG); + } + + dsps = NCR_700_readl(host, DSPS_REG); + dsp = NCR_700_readl(host, DSP_REG); + + DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n", + host->host_no, istat, sstat0, dstat, + (dsp - (__u32)(hostdata->pScript))/4, + dsp, dsps)); + + if(SCp != NULL) { + pun = SCp->device->id; + lun = SCp->device->lun; + } + + if(sstat0 & SCSI_RESET_DETECTED) { + struct scsi_device *SDp; + int i; + + hostdata->state = NCR_700_HOST_BUSY; + + printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n", + host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript); + + scsi_report_bus_reset(host, 0); + + /* clear all the negotiated parameters */ + __shost_for_each_device(SDp, host) + NCR_700_clear_flag(SDp, ~0); + + /* clear all the slots and their pending commands */ + for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) { + struct scsi_cmnd *SCp; + struct NCR_700_command_slot *slot = + &hostdata->slots[i]; + + if(slot->state == NCR_700_SLOT_FREE) + continue; + + SCp = slot->cmnd; + printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n", + slot, SCp); + free_slot(slot, hostdata); + SCp->host_scribble = NULL; + NCR_700_set_depth(SCp->device, 0); + /* NOTE: deadlock potential here: we + * rely on mid-layer guarantees that + * scsi_done won't try to issue the + * command again otherwise we'll + * deadlock on the + * hostdata->state_lock */ + SCp->result = DID_RESET << 16; + scsi_done(SCp); + } + mdelay(25); + NCR_700_chip_setup(host); + + hostdata->state = NCR_700_HOST_FREE; + hostdata->cmd = NULL; + /* signal back if this was an eh induced reset */ + if(hostdata->eh_complete != NULL) + complete(hostdata->eh_complete); + goto out_unlock; + } else if(sstat0 & SELECTION_TIMEOUT) { + DEBUG(("scsi%d: (%d:%d) selection timeout\n", + host->host_no, pun, lun)); + NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16); + } else if(sstat0 & PHASE_MISMATCH) { + struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL : + (struct NCR_700_command_slot *)SCp->host_scribble; + + if(dsp == Ent_SendMessage + 8 + hostdata->pScript) { + /* It wants to reply to some part of + * our message */ +#ifdef NCR_700_DEBUG + __u32 temp = NCR_700_readl(host, TEMP_REG); + int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host)); + printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); +#endif + resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch; + } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) && + dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) { + int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; + int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List); + int residual = NCR_700_data_residual(host); + int i; +#ifdef NCR_700_DEBUG + __u32 naddr = NCR_700_readl(host, DNAD_REG); + + printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n", + host->host_no, pun, lun, + SGcount, data_transfer); + scsi_print_command(SCp); + if(residual) { + printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n", + host->host_no, pun, lun, + SGcount, data_transfer, residual); + } +#endif + data_transfer += residual; + + if(data_transfer != 0) { + int count; + __u32 pAddr; + + SGcount--; + + count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff); + DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer)); + slot->SG[SGcount].ins &= bS_to_host(0xff000000); + slot->SG[SGcount].ins |= bS_to_host(data_transfer); + pAddr = bS_to_cpu(slot->SG[SGcount].pAddr); + pAddr += (count - data_transfer); +#ifdef NCR_700_DEBUG + if(pAddr != naddr) { + printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual); + } +#endif + slot->SG[SGcount].pAddr = bS_to_host(pAddr); + } + /* set the executed moves to nops */ + for(i=0; iSG[i].ins = bS_to_host(SCRIPT_NOP); + slot->SG[i].pAddr = 0; + } + dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG)); + /* and pretend we disconnected after + * the command phase */ + resume_offset = hostdata->pScript + Ent_MsgInDuringData; + /* make sure all the data is flushed */ + NCR_700_flush_fifo(host); + } else { + __u8 sbcl = NCR_700_readb(host, SBCL_REG); + printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n", + host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl)); + NCR_700_internal_bus_reset(host); + } + + } else if(sstat0 & SCSI_GROSS_ERROR) { + printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n", + host->host_no, pun, lun); + NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); + } else if(sstat0 & PARITY_ERROR) { + printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n", + host->host_no, pun, lun); + NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); + } else if(dstat & SCRIPT_INT_RECEIVED) { + DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n", + host->host_no, pun, lun)); + resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata); + } else if(dstat & (ILGL_INST_DETECTED)) { + printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n" + " Please email James.Bottomley@HansenPartnership.com with the details\n", + host->host_no, pun, lun, + dsp, dsp - hostdata->pScript); + NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); + } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) { + printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n", + host->host_no, pun, lun, dstat); + NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); + } + + + /* NOTE: selection interrupt processing MUST occur + * after script interrupt processing to correctly cope + * with the case where we process a disconnect and + * then get reselected before we process the + * disconnection */ + if(sstat0 & SELECTED) { + /* FIXME: It currently takes at least FOUR + * interrupts to complete a command that + * disconnects: one for the disconnect, one + * for the reselection, one to get the + * reselection data and one to complete the + * command. If we guess the reselected + * command here and prepare it, we only need + * to get a reselection data interrupt if we + * guessed wrongly. Since the interrupt + * overhead is much greater than the command + * setup, this would be an efficient + * optimisation particularly as we probably + * only have one outstanding command on a + * target most of the time */ + + resume_offset = process_selection(host, dsp); + + } + + } + + if(resume_offset) { + if(hostdata->state != NCR_700_HOST_BUSY) { + printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n", + host->host_no, resume_offset, resume_offset - hostdata->pScript); + hostdata->state = NCR_700_HOST_BUSY; + } + + DEBUG(("Attempting to resume at %x\n", resume_offset)); + NCR_700_clear_fifo(host); + NCR_700_writel(resume_offset, host, DSP_REG); + } + /* There is probably a technical no-no about this: If we're a + * shared interrupt and we got this interrupt because the + * other device needs servicing not us, we're still going to + * check our queued commands here---of course, there shouldn't + * be any outstanding.... */ + if(hostdata->state == NCR_700_HOST_FREE) { + int i; + + for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) { + /* fairness: always run the queue from the last + * position we left off */ + int j = (i + hostdata->saved_slot_position) + % NCR_700_COMMAND_SLOTS_PER_HOST; + + if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED) + continue; + if(NCR_700_start_command(hostdata->slots[j].cmnd)) { + DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n", + host->host_no, &hostdata->slots[j], + hostdata->slots[j].cmnd)); + hostdata->saved_slot_position = j + 1; + } + + break; + } + } + out_unlock: + spin_unlock_irqrestore(host->host_lock, flags); + return IRQ_RETVAL(handled); +} + +static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp) +{ + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; + __u32 move_ins; + struct NCR_700_command_slot *slot; + + if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) { + /* We're over our allocation, this should never happen + * since we report the max allocation to the mid layer */ + printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no); + return 1; + } + /* check for untagged commands. We cannot have any outstanding + * commands if we accept them. Commands could be untagged because: + * + * - The tag negotiated bitmap is clear + * - The blk layer sent and untagged command + */ + if(NCR_700_get_depth(SCp->device) != 0 + && (!(hostdata->tag_negotiated & (1<flags & SCMD_TAGGED))) { + CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n", + NCR_700_get_depth(SCp->device)); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) { + CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n", + NCR_700_get_depth(SCp->device)); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1); + + /* begin the command here */ + /* no need to check for NULL, test for command_slot_count above + * ensures a slot is free */ + slot = find_empty_slot(hostdata); + + slot->cmnd = SCp; + + SCp->host_scribble = (unsigned char *)slot; + +#ifdef NCR_700_DEBUG + printk("53c700: scsi%d, command ", SCp->device->host->host_no); + scsi_print_command(SCp); +#endif + if ((SCp->flags & SCMD_TAGGED) + && (hostdata->tag_negotiated &(1<device) == NCR_700_START_TAG_NEGOTIATION) { + scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n"); + hostdata->tag_negotiated |= (1<device, NCR_700_DURING_TAG_NEGOTIATION); + } + + /* here we may have to process an untagged command. The gate + * above ensures that this will be the only one outstanding, + * so clear the tag negotiated bit. + * + * FIXME: This will royally screw up on multiple LUN devices + * */ + if (!(SCp->flags & SCMD_TAGGED) + && (hostdata->tag_negotiated &(1<tag_negotiated &= ~(1<tag_negotiated & (1<device->simple_tags) { + slot->tag = scsi_cmd_to_rq(SCp)->tag; + CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", + slot->tag, slot); + } else { + struct NCR_700_Device_Parameters *p = SCp->device->hostdata; + + slot->tag = SCSI_NO_TAG; + /* save current command for reselection */ + p->current_cmnd = SCp; + } + /* sanity check: some of the commands generated by the mid-layer + * have an eccentric idea of their sc_data_direction */ + if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) && + SCp->sc_data_direction != DMA_NONE) { +#ifdef NCR_700_DEBUG + printk("53c700: Command"); + scsi_print_command(SCp); + printk("Has wrong data direction %d\n", SCp->sc_data_direction); +#endif + SCp->sc_data_direction = DMA_NONE; + } + + switch (SCp->cmnd[0]) { + case REQUEST_SENSE: + /* clear the internal sense magic */ + SCp->cmnd[6] = 0; + fallthrough; + default: + /* OK, get it from the command */ + switch(SCp->sc_data_direction) { + case DMA_BIDIRECTIONAL: + default: + printk(KERN_ERR "53c700: Unknown command for data direction "); + scsi_print_command(SCp); + + move_ins = 0; + break; + case DMA_NONE: + move_ins = 0; + break; + case DMA_FROM_DEVICE: + move_ins = SCRIPT_MOVE_DATA_IN; + break; + case DMA_TO_DEVICE: + move_ins = SCRIPT_MOVE_DATA_OUT; + break; + } + } + + /* now build the scatter gather list */ + if(move_ins != 0) { + int i; + int sg_count; + dma_addr_t vPtr = 0; + struct scatterlist *sg; + __u32 count = 0; + + sg_count = scsi_dma_map(SCp); + BUG_ON(sg_count < 0); + + scsi_for_each_sg(SCp, sg, sg_count, i) { + vPtr = sg_dma_address(sg); + count = sg_dma_len(sg); + + slot->SG[i].ins = bS_to_host(move_ins | count); + DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n", + i, count, slot->SG[i].ins, (unsigned long)vPtr)); + slot->SG[i].pAddr = bS_to_host(vPtr); + } + slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); + slot->SG[i].pAddr = 0; + dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG)); + DEBUG((" SETTING %p to %x\n", + (&slot->pSG[i].ins), + slot->SG[i].ins)); + } + slot->resume_offset = 0; + slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd, + MAX_COMMAND_SIZE, DMA_TO_DEVICE); + NCR_700_start_command(SCp); + return 0; +} + +STATIC DEF_SCSI_QCMD(NCR_700_queuecommand) + +STATIC int +NCR_700_abort(struct scsi_cmnd * SCp) +{ + struct NCR_700_command_slot *slot; + + scmd_printk(KERN_INFO, SCp, "abort command\n"); + + slot = (struct NCR_700_command_slot *)SCp->host_scribble; + + if(slot == NULL) + /* no outstanding command to abort */ + return SUCCESS; + if(SCp->cmnd[0] == TEST_UNIT_READY) { + /* FIXME: This is because of a problem in the new + * error handler. When it is in error recovery, it + * will send a TUR to a device it thinks may still be + * showing a problem. If the TUR isn't responded to, + * it will abort it and mark the device off line. + * Unfortunately, it does no other error recovery, so + * this would leave us with an outstanding command + * occupying a slot. Rather than allow this to + * happen, we issue a bus reset to force all + * outstanding commands to terminate here. */ + NCR_700_internal_bus_reset(SCp->device->host); + /* still drop through and return failed */ + } + return FAILED; + +} + +STATIC int +NCR_700_host_reset(struct scsi_cmnd * SCp) +{ + DECLARE_COMPLETION_ONSTACK(complete); + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; + + scmd_printk(KERN_INFO, SCp, + "New error handler wants HOST reset, cmd %p\n\t", SCp); + scsi_print_command(SCp); + + /* In theory, eh_complete should always be null because the + * eh is single threaded, but just in case we're handling a + * reset via sg or something */ + spin_lock_irq(SCp->device->host->host_lock); + while (hostdata->eh_complete != NULL) { + spin_unlock_irq(SCp->device->host->host_lock); + msleep_interruptible(100); + spin_lock_irq(SCp->device->host->host_lock); + } + + hostdata->eh_complete = &complete; + NCR_700_internal_bus_reset(SCp->device->host); + NCR_700_chip_reset(SCp->device->host); + + spin_unlock_irq(SCp->device->host->host_lock); + wait_for_completion(&complete); + spin_lock_irq(SCp->device->host->host_lock); + + hostdata->eh_complete = NULL; + /* Revalidate the transport parameters of the failing device */ + if(hostdata->fast) + spi_schedule_dv_device(SCp->device); + + spin_unlock_irq(SCp->device->host->host_lock); + return SUCCESS; +} + +STATIC void +NCR_700_set_period(struct scsi_target *STp, int period) +{ + struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent); + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)SHp->hostdata[0]; + + if(!hostdata->fast) + return; + + if(period < hostdata->min_period) + period = hostdata->min_period; + + spi_period(STp) = period; + spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC | + NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); + spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION; +} + +STATIC void +NCR_700_set_offset(struct scsi_target *STp, int offset) +{ + struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent); + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)SHp->hostdata[0]; + int max_offset = hostdata->chip710 + ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET; + + if(!hostdata->fast) + return; + + if(offset > max_offset) + offset = max_offset; + + /* if we're currently async, make sure the period is reasonable */ + if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period || + spi_period(STp) > 0xff)) + spi_period(STp) = hostdata->min_period; + + spi_offset(STp) = offset; + spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC | + NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); + spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION; +} + +STATIC int +NCR_700_slave_alloc(struct scsi_device *SDp) +{ + SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters), + GFP_KERNEL); + + if (!SDp->hostdata) + return -ENOMEM; + + return 0; +} + +STATIC int +NCR_700_slave_configure(struct scsi_device *SDp) +{ + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; + + /* to do here: allocate memory; build a queue_full list */ + if(SDp->tagged_supported) { + scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS); + NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); + } + + if(hostdata->fast) { + /* Find the correct offset and period via domain validation */ + if (!spi_initial_dv(SDp->sdev_target)) + spi_dv_device(SDp); + } else { + spi_offset(SDp->sdev_target) = 0; + spi_period(SDp->sdev_target) = 0; + } + return 0; +} + +STATIC void +NCR_700_slave_destroy(struct scsi_device *SDp) +{ + kfree(SDp->hostdata); + SDp->hostdata = NULL; +} + +static int +NCR_700_change_queue_depth(struct scsi_device *SDp, int depth) +{ + if (depth > NCR_700_MAX_TAGS) + depth = NCR_700_MAX_TAGS; + return scsi_change_queue_depth(SDp, depth); +} + +static ssize_t +NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *SDp = to_scsi_device(dev); + + return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp)); +} + +static struct device_attribute NCR_700_active_tags_attr = { + .attr = { + .name = "active_tags", + .mode = S_IRUGO, + }, + .show = NCR_700_show_active_tags, +}; + +STATIC struct attribute *NCR_700_dev_attrs[] = { + &NCR_700_active_tags_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(NCR_700_dev); + +EXPORT_SYMBOL(NCR_700_detect); +EXPORT_SYMBOL(NCR_700_release); +EXPORT_SYMBOL(NCR_700_intr); + +static struct spi_function_template NCR_700_transport_functions = { + .set_period = NCR_700_set_period, + .show_period = 1, + .set_offset = NCR_700_set_offset, + .show_offset = 1, +}; + +static int __init NCR_700_init(void) +{ + NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions); + if(!NCR_700_transport_template) + return -ENODEV; + return 0; +} + +static void __exit NCR_700_exit(void) +{ + spi_release_transport(NCR_700_transport_template); +} + +module_init(NCR_700_init); +module_exit(NCR_700_exit); + diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h new file mode 100644 index 000000000..2df347ca9 --- /dev/null +++ b/drivers/scsi/53c700.h @@ -0,0 +1,528 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for 53c700 and 53c700-66 chips from NCR and Symbios + * + * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com + */ + +#ifndef _53C700_H +#define _53C700_H + +#include +#include + +#include +#include + +/* Turn on for general debugging---too verbose for normal use */ +#undef NCR_700_DEBUG +/* Debug the tag queues, checking hash queue allocation and deallocation + * and search for duplicate tags */ +#undef NCR_700_TAG_DEBUG + +#ifdef NCR_700_DEBUG +#define DEBUG(x) printk x +#define DDEBUG(prefix, sdev, fmt, a...) \ + sdev_printk(prefix, sdev, fmt, ##a) +#define CDEBUG(prefix, scmd, fmt, a...) \ + scmd_printk(prefix, scmd, fmt, ##a) +#else +#define DEBUG(x) do {} while (0) +#define DDEBUG(prefix, scmd, fmt, a...) do {} while (0) +#define CDEBUG(prefix, scmd, fmt, a...) do {} while (0) +#endif + +/* The number of available command slots */ +#define NCR_700_COMMAND_SLOTS_PER_HOST 64 +/* The maximum number of Scatter Gathers we allow */ +#define NCR_700_SG_SEGMENTS 32 +/* The maximum number of luns (make this of the form 2^n) */ +#define NCR_700_MAX_LUNS 32 +#define NCR_700_LUN_MASK (NCR_700_MAX_LUNS - 1) +/* Maximum number of tags the driver ever allows per device */ +#define NCR_700_MAX_TAGS 16 +/* Tag depth the driver starts out with (can be altered in sysfs) */ +#define NCR_700_DEFAULT_TAGS 4 +/* This is the default number of commands per LUN in the untagged case. + * two is a good value because it means we can have one command active and + * one command fully prepared and waiting + */ +#define NCR_700_CMD_PER_LUN 2 +/* magic byte identifying an internally generated REQUEST_SENSE command */ +#define NCR_700_INTERNAL_SENSE_MAGIC 0x42 + +struct NCR_700_Host_Parameters; + +/* These are the externally used routines */ +struct Scsi_Host *NCR_700_detect(struct scsi_host_template *, + struct NCR_700_Host_Parameters *, struct device *); +int NCR_700_release(struct Scsi_Host *host); +irqreturn_t NCR_700_intr(int, void *); + + +enum NCR_700_Host_State { + NCR_700_HOST_BUSY, + NCR_700_HOST_FREE, +}; + +struct NCR_700_SG_List { + /* The following is a script fragment to move the buffer onto the + * bus and then link the next fragment or return */ + #define SCRIPT_MOVE_DATA_IN 0x09000000 + #define SCRIPT_MOVE_DATA_OUT 0x08000000 + __u32 ins; + __u32 pAddr; + #define SCRIPT_NOP 0x80000000 + #define SCRIPT_RETURN 0x90080000 +}; + +struct NCR_700_Device_Parameters { + /* space for creating a request sense command. Really, except + * for the annoying SCSI-2 requirement for LUN information in + * cmnd[1], this could be in static storage */ + unsigned char cmnd[MAX_COMMAND_SIZE]; + __u8 depth; + struct scsi_cmnd *current_cmnd; /* currently active command */ +}; + + +/* The SYNC negotiation sequence looks like: + * + * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the + * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION + * If we get an SDTR reply, work out the SXFER parameters, squirrel + * them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set + * DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel + * + * + * 0:7 SXFER_REG negotiated value for this device + * 8:15 Current queue depth + * 16 negotiated SYNC flag + * 17 begin SYNC negotiation flag + * 18 device supports tag queueing */ +#define NCR_700_DEV_NEGOTIATED_SYNC (1<<16) +#define NCR_700_DEV_BEGIN_SYNC_NEGOTIATION (1<<17) +#define NCR_700_DEV_PRINT_SYNC_NEGOTIATION (1<<19) + +static inline char *NCR_700_get_sense_cmnd(struct scsi_device *SDp) +{ + struct NCR_700_Device_Parameters *hostdata = SDp->hostdata; + + return hostdata->cmnd; +} + +static inline void +NCR_700_set_depth(struct scsi_device *SDp, __u8 depth) +{ + struct NCR_700_Device_Parameters *hostdata = SDp->hostdata; + + hostdata->depth = depth; +} +static inline __u8 +NCR_700_get_depth(struct scsi_device *SDp) +{ + struct NCR_700_Device_Parameters *hostdata = SDp->hostdata; + + return hostdata->depth; +} +static inline int +NCR_700_is_flag_set(struct scsi_device *SDp, __u32 flag) +{ + return (spi_flags(SDp->sdev_target) & flag) == flag; +} +static inline int +NCR_700_is_flag_clear(struct scsi_device *SDp, __u32 flag) +{ + return (spi_flags(SDp->sdev_target) & flag) == 0; +} +static inline void +NCR_700_set_flag(struct scsi_device *SDp, __u32 flag) +{ + spi_flags(SDp->sdev_target) |= flag; +} +static inline void +NCR_700_clear_flag(struct scsi_device *SDp, __u32 flag) +{ + spi_flags(SDp->sdev_target) &= ~flag; +} + +enum NCR_700_tag_neg_state { + NCR_700_START_TAG_NEGOTIATION = 0, + NCR_700_DURING_TAG_NEGOTIATION = 1, + NCR_700_FINISHED_TAG_NEGOTIATION = 2, +}; + +static inline enum NCR_700_tag_neg_state +NCR_700_get_tag_neg_state(struct scsi_device *SDp) +{ + return (enum NCR_700_tag_neg_state)((spi_flags(SDp->sdev_target)>>20) & 0x3); +} + +static inline void +NCR_700_set_tag_neg_state(struct scsi_device *SDp, + enum NCR_700_tag_neg_state state) +{ + /* clear the slot */ + spi_flags(SDp->sdev_target) &= ~(0x3 << 20); + spi_flags(SDp->sdev_target) |= ((__u32)state) << 20; +} + +struct NCR_700_command_slot { + struct NCR_700_SG_List SG[NCR_700_SG_SEGMENTS+1]; + struct NCR_700_SG_List *pSG; + #define NCR_700_SLOT_MASK 0xFC + #define NCR_700_SLOT_MAGIC 0xb8 + #define NCR_700_SLOT_FREE (0|NCR_700_SLOT_MAGIC) /* slot may be used */ + #define NCR_700_SLOT_BUSY (1|NCR_700_SLOT_MAGIC) /* slot has command active on HA */ + #define NCR_700_SLOT_QUEUED (2|NCR_700_SLOT_MAGIC) /* slot has command to be made active on HA */ + __u8 state; + #define NCR_700_FLAG_AUTOSENSE 0x01 + __u8 flags; + __u8 pad1[2]; /* Needed for m68k where min alignment is 2 bytes */ + int tag; + __u32 resume_offset; + struct scsi_cmnd *cmnd; + /* The pci_mapped address of the actual command in cmnd */ + dma_addr_t pCmd; + __u32 temp; + /* if this command is a pci_single mapping, holds the dma address + * for later unmapping in the done routine */ + dma_addr_t dma_handle; + /* historical remnant, now used to link free commands */ + struct NCR_700_command_slot *ITL_forw; +}; + +struct NCR_700_Host_Parameters { + /* These must be filled in by the calling driver */ + int clock; /* board clock speed in MHz */ + void __iomem *base; /* the base for the port (copied to host) */ + struct device *dev; + __u32 dmode_extra; /* adjustable bus settings */ + __u32 dcntl_extra; /* adjustable bus settings */ + __u32 ctest7_extra; /* adjustable bus settings */ + __u32 differential:1; /* if we are differential */ +#ifdef CONFIG_53C700_LE_ON_BE + /* This option is for HP only. Set it if your chip is wired for + * little endian on this platform (which is big endian) */ + __u32 force_le_on_be:1; +#endif + __u32 chip710:1; /* set if really a 710 not 700 */ + __u32 burst_length:4; /* set to 0 to disable 710 bursting */ + __u32 noncoherent:1; /* needs to use non-coherent DMA */ + + /* NOTHING BELOW HERE NEEDS ALTERING */ + __u32 fast:1; /* if we can alter the SCSI bus clock + speed (so can negiotiate sync) */ + int sync_clock; /* The speed of the SYNC core */ + + __u32 *script; /* pointer to script location */ + __u32 pScript; /* physical mem addr of script */ + + enum NCR_700_Host_State state; /* protected by state lock */ + struct scsi_cmnd *cmd; + /* Note: pScript contains the single consistent block of + * memory. All the msgin, msgout and status are allocated in + * this memory too (at separate cache lines). TOTAL_MEM_SIZE + * represents the total size of this area */ +#define MSG_ARRAY_SIZE 8 +#define MSGOUT_OFFSET (L1_CACHE_ALIGN(sizeof(SCRIPT))) + __u8 *msgout; +#define MSGIN_OFFSET (MSGOUT_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE)) + __u8 *msgin; +#define STATUS_OFFSET (MSGIN_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE)) + __u8 *status; +#define SLOTS_OFFSET (STATUS_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE)) + struct NCR_700_command_slot *slots; +#define TOTAL_MEM_SIZE (SLOTS_OFFSET + L1_CACHE_ALIGN(sizeof(struct NCR_700_command_slot) * NCR_700_COMMAND_SLOTS_PER_HOST)) + int saved_slot_position; + int command_slot_count; /* protected by state lock */ + __u8 tag_negotiated; + __u8 rev; + __u8 reselection_id; + __u8 min_period; + + /* Free list, singly linked by ITL_forw elements */ + struct NCR_700_command_slot *free_list; + /* Completion for waited for ops, like reset, abort or + * device reset. + * + * NOTE: relies on single threading in the error handler to + * have only one outstanding at once */ + struct completion *eh_complete; +}; + +/* + * 53C700 Register Interface - the offset from the Selected base + * I/O address */ +#ifdef CONFIG_53C700_LE_ON_BE +#define bE (hostdata->force_le_on_be ? 0 : 3) +#define bSWAP (hostdata->force_le_on_be) +#define bEBus (!hostdata->force_le_on_be) +#elif defined(__BIG_ENDIAN) +#define bE 3 +#define bSWAP 0 +#elif defined(__LITTLE_ENDIAN) +#define bE 0 +#define bSWAP 0 +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined, did you include byteorder.h?" +#endif +#ifndef bEBus +#ifdef CONFIG_53C700_BE_BUS +#define bEBus 1 +#else +#define bEBus 0 +#endif +#endif +#define bS_to_cpu(x) (bSWAP ? le32_to_cpu(x) : (x)) +#define bS_to_host(x) (bSWAP ? cpu_to_le32(x) : (x)) + +/* NOTE: These registers are in the LE register space only, the required byte + * swapping is done by the NCR_700_{read|write}[b] functions */ +#define SCNTL0_REG 0x00 +#define FULL_ARBITRATION 0xc0 +#define PARITY 0x08 +#define ENABLE_PARITY 0x04 +#define AUTO_ATN 0x02 +#define SCNTL1_REG 0x01 +#define SLOW_BUS 0x80 +#define ENABLE_SELECT 0x20 +#define ASSERT_RST 0x08 +#define ASSERT_EVEN_PARITY 0x04 +#define SDID_REG 0x02 +#define SIEN_REG 0x03 +#define PHASE_MM_INT 0x80 +#define FUNC_COMP_INT 0x40 +#define SEL_TIMEOUT_INT 0x20 +#define SELECT_INT 0x10 +#define GROSS_ERR_INT 0x08 +#define UX_DISC_INT 0x04 +#define RST_INT 0x02 +#define PAR_ERR_INT 0x01 +#define SCID_REG 0x04 +#define SXFER_REG 0x05 +#define ASYNC_OPERATION 0x00 +#define SODL_REG 0x06 +#define SOCL_REG 0x07 +#define SFBR_REG 0x08 +#define SIDL_REG 0x09 +#define SBDL_REG 0x0A +#define SBCL_REG 0x0B +/* read bits */ +#define SBCL_IO 0x01 +/*write bits */ +#define SYNC_DIV_AS_ASYNC 0x00 +#define SYNC_DIV_1_0 0x01 +#define SYNC_DIV_1_5 0x02 +#define SYNC_DIV_2_0 0x03 +#define DSTAT_REG 0x0C +#define ILGL_INST_DETECTED 0x01 +#define WATCH_DOG_INTERRUPT 0x02 +#define SCRIPT_INT_RECEIVED 0x04 +#define ABORTED 0x10 +#define SSTAT0_REG 0x0D +#define PARITY_ERROR 0x01 +#define SCSI_RESET_DETECTED 0x02 +#define UNEXPECTED_DISCONNECT 0x04 +#define SCSI_GROSS_ERROR 0x08 +#define SELECTED 0x10 +#define SELECTION_TIMEOUT 0x20 +#define FUNCTION_COMPLETE 0x40 +#define PHASE_MISMATCH 0x80 +#define SSTAT1_REG 0x0E +#define SIDL_REG_FULL 0x80 +#define SODR_REG_FULL 0x40 +#define SODL_REG_FULL 0x20 +#define SSTAT2_REG 0x0F +#define CTEST0_REG 0x14 +#define BTB_TIMER_DISABLE 0x40 +#define CTEST1_REG 0x15 +#define CTEST2_REG 0x16 +#define CTEST3_REG 0x17 +#define CTEST4_REG 0x18 +#define DISABLE_FIFO 0x00 +#define SLBE 0x10 +#define SFWR 0x08 +#define BYTE_LANE0 0x04 +#define BYTE_LANE1 0x05 +#define BYTE_LANE2 0x06 +#define BYTE_LANE3 0x07 +#define SCSI_ZMODE 0x20 +#define ZMODE 0x40 +#define CTEST5_REG 0x19 +#define MASTER_CONTROL 0x10 +#define DMA_DIRECTION 0x08 +#define CTEST7_REG 0x1B +#define BURST_DISABLE 0x80 /* 710 only */ +#define SEL_TIMEOUT_DISABLE 0x10 /* 710 only */ +#define DFP 0x08 +#define EVP 0x04 +#define CTEST7_TT1 0x02 +#define DIFF 0x01 +#define CTEST6_REG 0x1A +#define TEMP_REG 0x1C +#define DFIFO_REG 0x20 +#define FLUSH_DMA_FIFO 0x80 +#define CLR_FIFO 0x40 +#define ISTAT_REG 0x21 +#define ABORT_OPERATION 0x80 +#define SOFTWARE_RESET_710 0x40 +#define DMA_INT_PENDING 0x01 +#define SCSI_INT_PENDING 0x02 +#define CONNECTED 0x08 +#define CTEST8_REG 0x22 +#define LAST_DIS_ENBL 0x01 +#define SHORTEN_FILTERING 0x04 +#define ENABLE_ACTIVE_NEGATION 0x10 +#define GENERATE_RECEIVE_PARITY 0x20 +#define CLR_FIFO_710 0x04 +#define FLUSH_DMA_FIFO_710 0x08 +#define CTEST9_REG 0x23 +#define DBC_REG 0x24 +#define DCMD_REG 0x27 +#define DNAD_REG 0x28 +#define DIEN_REG 0x39 +#define BUS_FAULT 0x20 +#define ABORT_INT 0x10 +#define INT_INST_INT 0x04 +#define WD_INT 0x02 +#define ILGL_INST_INT 0x01 +#define DCNTL_REG 0x3B +#define SOFTWARE_RESET 0x01 +#define COMPAT_700_MODE 0x01 +#define SCRPTS_16BITS 0x20 +#define EA_710 0x20 +#define ASYNC_DIV_2_0 0x00 +#define ASYNC_DIV_1_5 0x40 +#define ASYNC_DIV_1_0 0x80 +#define ASYNC_DIV_3_0 0xc0 +#define DMODE_710_REG 0x38 +#define DMODE_700_REG 0x34 +#define BURST_LENGTH_1 0x00 +#define BURST_LENGTH_2 0x40 +#define BURST_LENGTH_4 0x80 +#define BURST_LENGTH_8 0xC0 +#define DMODE_FC1 0x10 +#define DMODE_FC2 0x20 +#define BW16 32 +#define MODE_286 16 +#define IO_XFER 8 +#define FIXED_ADDR 4 + +#define DSP_REG 0x2C +#define DSPS_REG 0x30 + +/* Parameters to begin SDTR negotiations. Empirically, I find that + * the 53c700-66 cannot handle an offset >8, so don't change this */ +#define NCR_700_MAX_OFFSET 8 +/* Was hoping the max offset would be greater for the 710, but + * empirically it seems to be 8 also */ +#define NCR_710_MAX_OFFSET 8 +#define NCR_700_MIN_XFERP 1 +#define NCR_710_MIN_XFERP 0 +#define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */ + +#define script_patch_32(h, script, symbol, value) \ +{ \ + int i; \ + dma_addr_t da = value; \ + for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ + __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + da; \ + (script)[A_##symbol##_used[i]] = bS_to_host(val); \ + dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \ + DEBUG((" script, patching %s at %d to %pad\n", \ + #symbol, A_##symbol##_used[i], &da)); \ + } \ +} + +#define script_patch_32_abs(h, script, symbol, value) \ +{ \ + int i; \ + dma_addr_t da = value; \ + for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ + (script)[A_##symbol##_used[i]] = bS_to_host(da); \ + dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \ + DEBUG((" script, patching %s at %d to %pad\n", \ + #symbol, A_##symbol##_used[i], &da)); \ + } \ +} + +/* Used for patching the SCSI ID in the SELECT instruction */ +#define script_patch_ID(h, script, symbol, value) \ +{ \ + int i; \ + for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ + __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]); \ + val &= 0xff00ffff; \ + val |= ((value) & 0xff) << 16; \ + (script)[A_##symbol##_used[i]] = bS_to_host(val); \ + dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \ + DEBUG((" script, patching ID field %s at %d to 0x%x\n", \ + #symbol, A_##symbol##_used[i], val)); \ + } \ +} + +#define script_patch_16(h, script, symbol, value) \ +{ \ + int i; \ + for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ + __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]); \ + val &= 0xffff0000; \ + val |= ((value) & 0xffff); \ + (script)[A_##symbol##_used[i]] = bS_to_host(val); \ + dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \ + DEBUG((" script, patching short field %s at %d to 0x%x\n", \ + #symbol, A_##symbol##_used[i], val)); \ + } \ +} + + +static inline __u8 +NCR_700_readb(struct Scsi_Host *host, __u32 reg) +{ + const struct NCR_700_Host_Parameters *hostdata + = (struct NCR_700_Host_Parameters *)host->hostdata[0]; + + return ioread8(hostdata->base + (reg^bE)); +} + +static inline __u32 +NCR_700_readl(struct Scsi_Host *host, __u32 reg) +{ + const struct NCR_700_Host_Parameters *hostdata + = (struct NCR_700_Host_Parameters *)host->hostdata[0]; + __u32 value = bEBus ? ioread32be(hostdata->base + reg) : + ioread32(hostdata->base + reg); +#if 1 + /* sanity check the register */ + BUG_ON((reg & 0x3) != 0); +#endif + + return value; +} + +static inline void +NCR_700_writeb(__u8 value, struct Scsi_Host *host, __u32 reg) +{ + const struct NCR_700_Host_Parameters *hostdata + = (struct NCR_700_Host_Parameters *)host->hostdata[0]; + + iowrite8(value, hostdata->base + (reg^bE)); +} + +static inline void +NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg) +{ + const struct NCR_700_Host_Parameters *hostdata + = (struct NCR_700_Host_Parameters *)host->hostdata[0]; + +#if 1 + /* sanity check the register */ + BUG_ON((reg & 0x3) != 0); +#endif + + bEBus ? iowrite32be(value, hostdata->base + reg): + iowrite32(value, hostdata->base + reg); +} + +#endif diff --git a/drivers/scsi/53c700.scr b/drivers/scsi/53c700.scr new file mode 100644 index 000000000..ec822e3b7 --- /dev/null +++ b/drivers/scsi/53c700.scr @@ -0,0 +1,411 @@ +; Script for the NCR (or symbios) 53c700 and 53c700-66 chip +; +; Copyright (C) 2001 James.Bottomley@HansenPartnership.com +;;----------------------------------------------------------------------------- +;; +;; This program is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 2 of the License, or +;; (at your option) any later version. +;; +;; This program is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with this program; if not, write to the Free Software +;; Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +;; +;;----------------------------------------------------------------------------- +; +; This script is designed to be modified for the particular command in +; operation. The particular variables pertaining to the commands are: +; +ABSOLUTE Device_ID = 0 ; ID of target for command +ABSOLUTE MessageCount = 0 ; Number of bytes in message +ABSOLUTE MessageLocation = 0 ; Addr of message +ABSOLUTE CommandCount = 0 ; Number of bytes in command +ABSOLUTE CommandAddress = 0 ; Addr of Command +ABSOLUTE StatusAddress = 0 ; Addr to receive status return +ABSOLUTE ReceiveMsgAddress = 0 ; Addr to receive msg +; +; This is the magic component for handling scatter-gather. Each of the +; SG components is preceded by a script fragment which moves the +; necessary amount of data and jumps to the next SG segment. The final +; SG segment jumps back to . However, this address is the first SG script +; segment. +; +ABSOLUTE SGScriptStartAddress = 0 + +; The following represent status interrupts we use 3 hex digits for +; this: 0xPRS where + +; P: +ABSOLUTE AFTER_SELECTION = 0x100 +ABSOLUTE BEFORE_CMD = 0x200 +ABSOLUTE AFTER_CMD = 0x300 +ABSOLUTE AFTER_STATUS = 0x400 +ABSOLUTE AFTER_DATA_IN = 0x500 +ABSOLUTE AFTER_DATA_OUT = 0x600 +ABSOLUTE DURING_DATA_IN = 0x700 + +; R: +ABSOLUTE NOT_MSG_OUT = 0x10 +ABSOLUTE UNEXPECTED_PHASE = 0x20 +ABSOLUTE NOT_MSG_IN = 0x30 +ABSOLUTE UNEXPECTED_MSG = 0x40 +ABSOLUTE MSG_IN = 0x50 +ABSOLUTE SDTR_MSG_R = 0x60 +ABSOLUTE REJECT_MSG_R = 0x70 +ABSOLUTE DISCONNECT = 0x80 +ABSOLUTE MSG_OUT = 0x90 +ABSOLUTE WDTR_MSG_R = 0xA0 + +; S: +ABSOLUTE GOOD_STATUS = 0x1 + +; Combinations, since the script assembler can't process | +ABSOLUTE NOT_MSG_OUT_AFTER_SELECTION = 0x110 +ABSOLUTE UNEXPECTED_PHASE_BEFORE_CMD = 0x220 +ABSOLUTE UNEXPECTED_PHASE_AFTER_CMD = 0x320 +ABSOLUTE NOT_MSG_IN_AFTER_STATUS = 0x430 +ABSOLUTE GOOD_STATUS_AFTER_STATUS = 0x401 +ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_IN = 0x520 +ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_OUT = 0x620 +ABSOLUTE UNEXPECTED_MSG_BEFORE_CMD = 0x240 +ABSOLUTE MSG_IN_BEFORE_CMD = 0x250 +ABSOLUTE MSG_IN_AFTER_CMD = 0x350 +ABSOLUTE SDTR_MSG_BEFORE_CMD = 0x260 +ABSOLUTE REJECT_MSG_BEFORE_CMD = 0x270 +ABSOLUTE DISCONNECT_AFTER_CMD = 0x380 +ABSOLUTE SDTR_MSG_AFTER_CMD = 0x360 +ABSOLUTE WDTR_MSG_AFTER_CMD = 0x3A0 +ABSOLUTE MSG_IN_AFTER_STATUS = 0x440 +ABSOLUTE DISCONNECT_AFTER_DATA = 0x580 +ABSOLUTE MSG_IN_AFTER_DATA_IN = 0x550 +ABSOLUTE MSG_IN_AFTER_DATA_OUT = 0x650 +ABSOLUTE MSG_OUT_AFTER_DATA_IN = 0x590 +ABSOLUTE DATA_IN_AFTER_DATA_IN = 0x5a0 +ABSOLUTE MSG_IN_DURING_DATA_IN = 0x750 +ABSOLUTE DISCONNECT_DURING_DATA = 0x780 + +; +; Other interrupt conditions +; +ABSOLUTE RESELECTED_DURING_SELECTION = 0x1000 +ABSOLUTE COMPLETED_SELECTION_AS_TARGET = 0x1001 +ABSOLUTE RESELECTION_IDENTIFIED = 0x1003 +; +; Fatal interrupt conditions. If you add to this, also add to the +; array of corresponding messages +; +ABSOLUTE FATAL = 0x2000 +ABSOLUTE FATAL_UNEXPECTED_RESELECTION_MSG = 0x2000 +ABSOLUTE FATAL_SEND_MSG = 0x2001 +ABSOLUTE FATAL_NOT_MSG_IN_AFTER_SELECTION = 0x2002 +ABSOLUTE FATAL_ILLEGAL_MSG_LENGTH = 0x2003 + +ABSOLUTE DEBUG_INTERRUPT = 0x3000 +ABSOLUTE DEBUG_INTERRUPT1 = 0x3001 +ABSOLUTE DEBUG_INTERRUPT2 = 0x3002 +ABSOLUTE DEBUG_INTERRUPT3 = 0x3003 +ABSOLUTE DEBUG_INTERRUPT4 = 0x3004 +ABSOLUTE DEBUG_INTERRUPT5 = 0x3005 +ABSOLUTE DEBUG_INTERRUPT6 = 0x3006 + + +; +; SCSI Messages we interpret in the script +; +ABSOLUTE COMMAND_COMPLETE_MSG = 0x00 +ABSOLUTE EXTENDED_MSG = 0x01 +ABSOLUTE SDTR_MSG = 0x01 +ABSOLUTE SAVE_DATA_PTRS_MSG = 0x02 +ABSOLUTE RESTORE_DATA_PTRS_MSG = 0x03 +ABSOLUTE WDTR_MSG = 0x03 +ABSOLUTE DISCONNECT_MSG = 0x04 +ABSOLUTE REJECT_MSG = 0x07 +ABSOLUTE PARITY_ERROR_MSG = 0x09 +ABSOLUTE SIMPLE_TAG_MSG = 0x20 +ABSOLUTE IDENTIFY_MSG = 0x80 +ABSOLUTE IDENTIFY_MSG_MASK = 0x7F +ABSOLUTE TWO_BYTE_MSG = 0x20 +ABSOLUTE TWO_BYTE_MSG_MASK = 0x0F + +; This is where the script begins + +ENTRY StartUp + +StartUp: + SELECT ATN Device_ID, Reselect + JUMP Finish, WHEN STATUS + JUMP SendIdentifyMsg, IF MSG_OUT + INT NOT_MSG_OUT_AFTER_SELECTION + +Reselect: + WAIT RESELECT SelectedAsTarget + INT RESELECTED_DURING_SELECTION, WHEN MSG_IN + INT FATAL_NOT_MSG_IN_AFTER_SELECTION + + ENTRY GetReselectionData +GetReselectionData: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + INT RESELECTION_IDENTIFIED + + ENTRY GetReselectionWithTag +GetReselectionWithTag: + MOVE 3, ReceiveMsgAddress, WHEN MSG_IN + INT RESELECTION_IDENTIFIED + + ENTRY SelectedAsTarget +SelectedAsTarget: +; Basically tell the selecting device that there's nothing here + SET TARGET + DISCONNECT + CLEAR TARGET + INT COMPLETED_SELECTION_AS_TARGET +; +; These are the messaging entries +; +; Send a message. Message count should be correctly patched + ENTRY SendMessage +SendMessage: + MOVE MessageCount, MessageLocation, WHEN MSG_OUT +ResumeSendMessage: + RETURN, WHEN NOT MSG_OUT + INT FATAL_SEND_MSG + + ENTRY SendMessagePhaseMismatch +SendMessagePhaseMismatch: + CLEAR ACK + JUMP ResumeSendMessage +; +; Receive a message. Need to identify the message to +; receive it correctly + ENTRY ReceiveMessage +ReceiveMessage: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN +; +; Use this entry if we've just tried to look at the first byte +; of the message and want to process it further +ProcessReceiveMessage: + JUMP ReceiveExtendedMessage, IF EXTENDED_MSG + RETURN, IF NOT TWO_BYTE_MSG, AND MASK TWO_BYTE_MSG_MASK + CLEAR ACK + MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN + RETURN +ReceiveExtendedMessage: + CLEAR ACK + MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN + JUMP Receive1Byte, IF 0x01 + JUMP Receive2Byte, IF 0x02 + JUMP Receive3Byte, IF 0x03 + JUMP Receive4Byte, IF 0x04 + JUMP Receive5Byte, IF 0x05 + INT FATAL_ILLEGAL_MSG_LENGTH +Receive1Byte: + CLEAR ACK + MOVE 1, ReceiveMsgAddress + 2, WHEN MSG_IN + RETURN +Receive2Byte: + CLEAR ACK + MOVE 2, ReceiveMsgAddress + 2, WHEN MSG_IN + RETURN +Receive3Byte: + CLEAR ACK + MOVE 3, ReceiveMsgAddress + 2, WHEN MSG_IN + RETURN +Receive4Byte: + CLEAR ACK + MOVE 4, ReceiveMsgAddress + 2, WHEN MSG_IN + RETURN +Receive5Byte: + CLEAR ACK + MOVE 5, ReceiveMsgAddress + 2, WHEN MSG_IN + RETURN +; +; Come here from the message processor to ignore the message +; + ENTRY IgnoreMessage +IgnoreMessage: + CLEAR ACK + RETURN +; +; Come here to send a reply to a message +; + ENTRY SendMessageWithATN +SendMessageWithATN: + SET ATN + CLEAR ACK + JUMP SendMessage + +SendIdentifyMsg: + CALL SendMessage + CLEAR ATN + +IgnoreMsgBeforeCommand: + CLEAR ACK + ENTRY SendCommand +SendCommand: + JUMP Finish, WHEN STATUS + JUMP MsgInBeforeCommand, IF MSG_IN + INT UNEXPECTED_PHASE_BEFORE_CMD, IF NOT CMD + MOVE CommandCount, CommandAddress, WHEN CMD +ResumeSendCommand: + JUMP Finish, WHEN STATUS + JUMP MsgInAfterCmd, IF MSG_IN + JUMP DataIn, IF DATA_IN + JUMP DataOut, IF DATA_OUT + INT UNEXPECTED_PHASE_AFTER_CMD + +IgnoreMsgDuringData: + CLEAR ACK + ; fall through to MsgInDuringData + +Entry MsgInDuringData +MsgInDuringData: +; +; Could be we have nothing more to transfer +; + JUMP Finish, WHEN STATUS + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + JUMP DisconnectDuringDataIn, IF DISCONNECT_MSG + JUMP IgnoreMsgDuringData, IF SAVE_DATA_PTRS_MSG + JUMP IgnoreMsgDuringData, IF RESTORE_DATA_PTRS_MSG + INT MSG_IN_DURING_DATA_IN + +MsgInAfterCmd: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + JUMP DisconnectAfterCmd, IF DISCONNECT_MSG + JUMP IgnoreMsgInAfterCmd, IF SAVE_DATA_PTRS_MSG + JUMP IgnoreMsgInAfterCmd, IF RESTORE_DATA_PTRS_MSG + CALL ProcessReceiveMessage + INT MSG_IN_AFTER_CMD + CLEAR ACK + JUMP ResumeSendCommand + +IgnoreMsgInAfterCmd: + CLEAR ACK + JUMP ResumeSendCommand + +DisconnectAfterCmd: + CLEAR ACK + WAIT DISCONNECT + ENTRY Disconnect1 +Disconnect1: + INT DISCONNECT_AFTER_CMD + ENTRY Disconnect2 +Disconnect2: +; We return here after a reselection + CLEAR ACK + JUMP ResumeSendCommand + +MsgInBeforeCommand: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + JUMP IgnoreMsgBeforeCommand, IF SAVE_DATA_PTRS_MSG + JUMP IgnoreMsgBeforeCommand, IF RESTORE_DATA_PTRS_MSG + CALL ProcessReceiveMessage + INT MSG_IN_BEFORE_CMD + CLEAR ACK + JUMP SendCommand + +DataIn: + CALL SGScriptStartAddress +ResumeDataIn: + JUMP Finish, WHEN STATUS + JUMP MsgInAfterDataIn, IF MSG_IN + JUMP DataInAfterDataIn, if DATA_IN + INT MSG_OUT_AFTER_DATA_IN, if MSG_OUT + INT UNEXPECTED_PHASE_AFTER_DATA_IN + +DataInAfterDataIn: + INT DATA_IN_AFTER_DATA_IN + JUMP ResumeDataIn + +DataOut: + CALL SGScriptStartAddress +ResumeDataOut: + JUMP Finish, WHEN STATUS + JUMP MsgInAfterDataOut, IF MSG_IN + INT UNEXPECTED_PHASE_AFTER_DATA_OUT + +MsgInAfterDataIn: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + JUMP DisconnectAfterDataIn, IF DISCONNECT_MSG + JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG + JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG + CALL ProcessReceiveMessage + INT MSG_IN_AFTER_DATA_IN + CLEAR ACK + JUMP ResumeDataIn + +DisconnectDuringDataIn: + CLEAR ACK + WAIT DISCONNECT + ENTRY Disconnect3 +Disconnect3: + INT DISCONNECT_DURING_DATA + ENTRY Disconnect4 +Disconnect4: +; we return here after a reselection + CLEAR ACK + JUMP ResumeSendCommand + + +DisconnectAfterDataIn: + CLEAR ACK + WAIT DISCONNECT + ENTRY Disconnect5 +Disconnect5: + INT DISCONNECT_AFTER_DATA + ENTRY Disconnect6 +Disconnect6: +; we return here after a reselection + CLEAR ACK + JUMP ResumeDataIn + +MsgInAfterDataOut: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + JUMP DisconnectAfterDataOut, if DISCONNECT_MSG + JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG + JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG + CALL ProcessReceiveMessage + INT MSG_IN_AFTER_DATA_OUT + CLEAR ACK + JUMP ResumeDataOut + +IgnoreMsgAfterData: + CLEAR ACK +; Data in and out do the same thing on resume, so pick one + JUMP ResumeDataIn + +DisconnectAfterDataOut: + CLEAR ACK + WAIT DISCONNECT + ENTRY Disconnect7 +Disconnect7: + INT DISCONNECT_AFTER_DATA + ENTRY Disconnect8 +Disconnect8: +; we return here after a reselection + CLEAR ACK + JUMP ResumeDataOut + +Finish: + MOVE 1, StatusAddress, WHEN STATUS + INT NOT_MSG_IN_AFTER_STATUS, WHEN NOT MSG_IN + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + JUMP FinishCommandComplete, IF COMMAND_COMPLETE_MSG + CALL ProcessReceiveMessage + INT MSG_IN_AFTER_STATUS + ENTRY FinishCommandComplete +FinishCommandComplete: + CLEAR ACK + WAIT DISCONNECT + ENTRY Finish1 +Finish1: + INT GOOD_STATUS_AFTER_STATUS + ENTRY Finish2 +Finish2: + diff --git a/drivers/scsi/53c700_d.h_shipped b/drivers/scsi/53c700_d.h_shipped new file mode 100644 index 000000000..aa623da33 --- /dev/null +++ b/drivers/scsi/53c700_d.h_shipped @@ -0,0 +1,1329 @@ +/* DO NOT EDIT - Generated automatically by script_asm.pl */ +static u32 SCRIPT[] = { +/* +; Script for the NCR (or symbios) 53c700 and 53c700-66 chip +; +; Copyright (C) 2001 James.Bottomley@HansenPartnership.com +;;----------------------------------------------------------------------------- +;; +;; This program is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 2 of the License, or +;; (at your option) any later version. +;; +;; This program is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with this program; if not, write to the Free Software +;; Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +;; +;;----------------------------------------------------------------------------- +; +; This script is designed to be modified for the particular command in +; operation. The particular variables pertaining to the commands are: +; +ABSOLUTE Device_ID = 0 ; ID of target for command +ABSOLUTE MessageCount = 0 ; Number of bytes in message +ABSOLUTE MessageLocation = 0 ; Addr of message +ABSOLUTE CommandCount = 0 ; Number of bytes in command +ABSOLUTE CommandAddress = 0 ; Addr of Command +ABSOLUTE StatusAddress = 0 ; Addr to receive status return +ABSOLUTE ReceiveMsgAddress = 0 ; Addr to receive msg +; +; This is the magic component for handling scatter-gather. Each of the +; SG components is preceded by a script fragment which moves the +; necessary amount of data and jumps to the next SG segment. The final +; SG segment jumps back to . However, this address is the first SG script +; segment. +; +ABSOLUTE SGScriptStartAddress = 0 + +; The following represent status interrupts we use 3 hex digits for +; this: 0xPRS where + +; P: +ABSOLUTE AFTER_SELECTION = 0x100 +ABSOLUTE BEFORE_CMD = 0x200 +ABSOLUTE AFTER_CMD = 0x300 +ABSOLUTE AFTER_STATUS = 0x400 +ABSOLUTE AFTER_DATA_IN = 0x500 +ABSOLUTE AFTER_DATA_OUT = 0x600 +ABSOLUTE DURING_DATA_IN = 0x700 + +; R: +ABSOLUTE NOT_MSG_OUT = 0x10 +ABSOLUTE UNEXPECTED_PHASE = 0x20 +ABSOLUTE NOT_MSG_IN = 0x30 +ABSOLUTE UNEXPECTED_MSG = 0x40 +ABSOLUTE MSG_IN = 0x50 +ABSOLUTE SDTR_MSG_R = 0x60 +ABSOLUTE REJECT_MSG_R = 0x70 +ABSOLUTE DISCONNECT = 0x80 +ABSOLUTE MSG_OUT = 0x90 +ABSOLUTE WDTR_MSG_R = 0xA0 + +; S: +ABSOLUTE GOOD_STATUS = 0x1 + +; Combinations, since the script assembler can't process | +ABSOLUTE NOT_MSG_OUT_AFTER_SELECTION = 0x110 +ABSOLUTE UNEXPECTED_PHASE_BEFORE_CMD = 0x220 +ABSOLUTE UNEXPECTED_PHASE_AFTER_CMD = 0x320 +ABSOLUTE NOT_MSG_IN_AFTER_STATUS = 0x430 +ABSOLUTE GOOD_STATUS_AFTER_STATUS = 0x401 +ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_IN = 0x520 +ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_OUT = 0x620 +ABSOLUTE UNEXPECTED_MSG_BEFORE_CMD = 0x240 +ABSOLUTE MSG_IN_BEFORE_CMD = 0x250 +ABSOLUTE MSG_IN_AFTER_CMD = 0x350 +ABSOLUTE SDTR_MSG_BEFORE_CMD = 0x260 +ABSOLUTE REJECT_MSG_BEFORE_CMD = 0x270 +ABSOLUTE DISCONNECT_AFTER_CMD = 0x380 +ABSOLUTE SDTR_MSG_AFTER_CMD = 0x360 +ABSOLUTE WDTR_MSG_AFTER_CMD = 0x3A0 +ABSOLUTE MSG_IN_AFTER_STATUS = 0x440 +ABSOLUTE DISCONNECT_AFTER_DATA = 0x580 +ABSOLUTE MSG_IN_AFTER_DATA_IN = 0x550 +ABSOLUTE MSG_IN_AFTER_DATA_OUT = 0x650 +ABSOLUTE MSG_OUT_AFTER_DATA_IN = 0x590 +ABSOLUTE DATA_IN_AFTER_DATA_IN = 0x5a0 +ABSOLUTE MSG_IN_DURING_DATA_IN = 0x750 +ABSOLUTE DISCONNECT_DURING_DATA = 0x780 + +; +; Other interrupt conditions +; +ABSOLUTE RESELECTED_DURING_SELECTION = 0x1000 +ABSOLUTE COMPLETED_SELECTION_AS_TARGET = 0x1001 +ABSOLUTE RESELECTION_IDENTIFIED = 0x1003 +; +; Fatal interrupt conditions. If you add to this, also add to the +; array of corresponding messages +; +ABSOLUTE FATAL = 0x2000 +ABSOLUTE FATAL_UNEXPECTED_RESELECTION_MSG = 0x2000 +ABSOLUTE FATAL_SEND_MSG = 0x2001 +ABSOLUTE FATAL_NOT_MSG_IN_AFTER_SELECTION = 0x2002 +ABSOLUTE FATAL_ILLEGAL_MSG_LENGTH = 0x2003 + +ABSOLUTE DEBUG_INTERRUPT = 0x3000 +ABSOLUTE DEBUG_INTERRUPT1 = 0x3001 +ABSOLUTE DEBUG_INTERRUPT2 = 0x3002 +ABSOLUTE DEBUG_INTERRUPT3 = 0x3003 +ABSOLUTE DEBUG_INTERRUPT4 = 0x3004 +ABSOLUTE DEBUG_INTERRUPT5 = 0x3005 +ABSOLUTE DEBUG_INTERRUPT6 = 0x3006 + + +; +; SCSI Messages we interpret in the script +; +ABSOLUTE COMMAND_COMPLETE_MSG = 0x00 +ABSOLUTE EXTENDED_MSG = 0x01 +ABSOLUTE SDTR_MSG = 0x01 +ABSOLUTE SAVE_DATA_PTRS_MSG = 0x02 +ABSOLUTE RESTORE_DATA_PTRS_MSG = 0x03 +ABSOLUTE WDTR_MSG = 0x03 +ABSOLUTE DISCONNECT_MSG = 0x04 +ABSOLUTE REJECT_MSG = 0x07 +ABSOLUTE PARITY_ERROR_MSG = 0x09 +ABSOLUTE SIMPLE_TAG_MSG = 0x20 +ABSOLUTE IDENTIFY_MSG = 0x80 +ABSOLUTE IDENTIFY_MSG_MASK = 0x7F +ABSOLUTE TWO_BYTE_MSG = 0x20 +ABSOLUTE TWO_BYTE_MSG_MASK = 0x0F + +; This is where the script begins + +ENTRY StartUp + +StartUp: + SELECT ATN Device_ID, Reselect + +at 0x00000000 : */ 0x41000000,0x00000020, +/* + JUMP Finish, WHEN STATUS + +at 0x00000002 : */ 0x830b0000,0x00000460, +/* + JUMP SendIdentifyMsg, IF MSG_OUT + +at 0x00000004 : */ 0x860a0000,0x000001b0, +/* + INT NOT_MSG_OUT_AFTER_SELECTION + +at 0x00000006 : */ 0x98080000,0x00000110, +/* + +Reselect: + WAIT RESELECT SelectedAsTarget + +at 0x00000008 : */ 0x50000000,0x00000058, +/* + INT RESELECTED_DURING_SELECTION, WHEN MSG_IN + +at 0x0000000a : */ 0x9f0b0000,0x00001000, +/* + INT FATAL_NOT_MSG_IN_AFTER_SELECTION + +at 0x0000000c : */ 0x98080000,0x00002002, +/* + + ENTRY GetReselectionData +GetReselectionData: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x0000000e : */ 0x0f000001,0x00000000, +/* + INT RESELECTION_IDENTIFIED + +at 0x00000010 : */ 0x98080000,0x00001003, +/* + + ENTRY GetReselectionWithTag +GetReselectionWithTag: + MOVE 3, ReceiveMsgAddress, WHEN MSG_IN + +at 0x00000012 : */ 0x0f000003,0x00000000, +/* + INT RESELECTION_IDENTIFIED + +at 0x00000014 : */ 0x98080000,0x00001003, +/* + + ENTRY SelectedAsTarget +SelectedAsTarget: +; Basically tell the selecting device that there's nothing here + SET TARGET + +at 0x00000016 : */ 0x58000200,0x00000000, +/* + DISCONNECT + +at 0x00000018 : */ 0x48000000,0x00000000, +/* + CLEAR TARGET + +at 0x0000001a : */ 0x60000200,0x00000000, +/* + INT COMPLETED_SELECTION_AS_TARGET + +at 0x0000001c : */ 0x98080000,0x00001001, +/* +; +; These are the messaging entries +; +; Send a message. Message count should be correctly patched + ENTRY SendMessage +SendMessage: + MOVE MessageCount, MessageLocation, WHEN MSG_OUT + +at 0x0000001e : */ 0x0e000000,0x00000000, +/* +ResumeSendMessage: + RETURN, WHEN NOT MSG_OUT + +at 0x00000020 : */ 0x96030000,0x00000000, +/* + INT FATAL_SEND_MSG + +at 0x00000022 : */ 0x98080000,0x00002001, +/* + + ENTRY SendMessagePhaseMismatch +SendMessagePhaseMismatch: + CLEAR ACK + +at 0x00000024 : */ 0x60000040,0x00000000, +/* + JUMP ResumeSendMessage + +at 0x00000026 : */ 0x80080000,0x00000080, +/* +; +; Receive a message. Need to identify the message to +; receive it correctly + ENTRY ReceiveMessage +ReceiveMessage: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x00000028 : */ 0x0f000001,0x00000000, +/* +; +; Use this entry if we've just tried to look at the first byte +; of the message and want to process it further +ProcessReceiveMessage: + JUMP ReceiveExtendedMessage, IF EXTENDED_MSG + +at 0x0000002a : */ 0x800c0001,0x000000d0, +/* + RETURN, IF NOT TWO_BYTE_MSG, AND MASK TWO_BYTE_MSG_MASK + +at 0x0000002c : */ 0x90040f20,0x00000000, +/* + CLEAR ACK + +at 0x0000002e : */ 0x60000040,0x00000000, +/* + MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN + +at 0x00000030 : */ 0x0f000001,0x00000001, +/* + RETURN + +at 0x00000032 : */ 0x90080000,0x00000000, +/* +ReceiveExtendedMessage: + CLEAR ACK + +at 0x00000034 : */ 0x60000040,0x00000000, +/* + MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN + +at 0x00000036 : */ 0x0f000001,0x00000001, +/* + JUMP Receive1Byte, IF 0x01 + +at 0x00000038 : */ 0x800c0001,0x00000110, +/* + JUMP Receive2Byte, IF 0x02 + +at 0x0000003a : */ 0x800c0002,0x00000128, +/* + JUMP Receive3Byte, IF 0x03 + +at 0x0000003c : */ 0x800c0003,0x00000140, +/* + JUMP Receive4Byte, IF 0x04 + +at 0x0000003e : */ 0x800c0004,0x00000158, +/* + JUMP Receive5Byte, IF 0x05 + +at 0x00000040 : */ 0x800c0005,0x00000170, +/* + INT FATAL_ILLEGAL_MSG_LENGTH + +at 0x00000042 : */ 0x98080000,0x00002003, +/* +Receive1Byte: + CLEAR ACK + +at 0x00000044 : */ 0x60000040,0x00000000, +/* + MOVE 1, ReceiveMsgAddress + 2, WHEN MSG_IN + +at 0x00000046 : */ 0x0f000001,0x00000002, +/* + RETURN + +at 0x00000048 : */ 0x90080000,0x00000000, +/* +Receive2Byte: + CLEAR ACK + +at 0x0000004a : */ 0x60000040,0x00000000, +/* + MOVE 2, ReceiveMsgAddress + 2, WHEN MSG_IN + +at 0x0000004c : */ 0x0f000002,0x00000002, +/* + RETURN + +at 0x0000004e : */ 0x90080000,0x00000000, +/* +Receive3Byte: + CLEAR ACK + +at 0x00000050 : */ 0x60000040,0x00000000, +/* + MOVE 3, ReceiveMsgAddress + 2, WHEN MSG_IN + +at 0x00000052 : */ 0x0f000003,0x00000002, +/* + RETURN + +at 0x00000054 : */ 0x90080000,0x00000000, +/* +Receive4Byte: + CLEAR ACK + +at 0x00000056 : */ 0x60000040,0x00000000, +/* + MOVE 4, ReceiveMsgAddress + 2, WHEN MSG_IN + +at 0x00000058 : */ 0x0f000004,0x00000002, +/* + RETURN + +at 0x0000005a : */ 0x90080000,0x00000000, +/* +Receive5Byte: + CLEAR ACK + +at 0x0000005c : */ 0x60000040,0x00000000, +/* + MOVE 5, ReceiveMsgAddress + 2, WHEN MSG_IN + +at 0x0000005e : */ 0x0f000005,0x00000002, +/* + RETURN + +at 0x00000060 : */ 0x90080000,0x00000000, +/* +; +; Come here from the message processor to ignore the message +; + ENTRY IgnoreMessage +IgnoreMessage: + CLEAR ACK + +at 0x00000062 : */ 0x60000040,0x00000000, +/* + RETURN + +at 0x00000064 : */ 0x90080000,0x00000000, +/* +; +; Come here to send a reply to a message +; + ENTRY SendMessageWithATN +SendMessageWithATN: + SET ATN + +at 0x00000066 : */ 0x58000008,0x00000000, +/* + CLEAR ACK + +at 0x00000068 : */ 0x60000040,0x00000000, +/* + JUMP SendMessage + +at 0x0000006a : */ 0x80080000,0x00000078, +/* + +SendIdentifyMsg: + CALL SendMessage + +at 0x0000006c : */ 0x88080000,0x00000078, +/* + CLEAR ATN + +at 0x0000006e : */ 0x60000008,0x00000000, +/* + +IgnoreMsgBeforeCommand: + CLEAR ACK + +at 0x00000070 : */ 0x60000040,0x00000000, +/* + ENTRY SendCommand +SendCommand: + JUMP Finish, WHEN STATUS + +at 0x00000072 : */ 0x830b0000,0x00000460, +/* + JUMP MsgInBeforeCommand, IF MSG_IN + +at 0x00000074 : */ 0x870a0000,0x000002c0, +/* + INT UNEXPECTED_PHASE_BEFORE_CMD, IF NOT CMD + +at 0x00000076 : */ 0x9a020000,0x00000220, +/* + MOVE CommandCount, CommandAddress, WHEN CMD + +at 0x00000078 : */ 0x0a000000,0x00000000, +/* +ResumeSendCommand: + JUMP Finish, WHEN STATUS + +at 0x0000007a : */ 0x830b0000,0x00000460, +/* + JUMP MsgInAfterCmd, IF MSG_IN + +at 0x0000007c : */ 0x870a0000,0x00000248, +/* + JUMP DataIn, IF DATA_IN + +at 0x0000007e : */ 0x810a0000,0x000002f8, +/* + JUMP DataOut, IF DATA_OUT + +at 0x00000080 : */ 0x800a0000,0x00000338, +/* + INT UNEXPECTED_PHASE_AFTER_CMD + +at 0x00000082 : */ 0x98080000,0x00000320, +/* + +IgnoreMsgDuringData: + CLEAR ACK + +at 0x00000084 : */ 0x60000040,0x00000000, +/* + ; fall through to MsgInDuringData + +Entry MsgInDuringData +MsgInDuringData: +; +; Could be we have nothing more to transfer +; + JUMP Finish, WHEN STATUS + +at 0x00000086 : */ 0x830b0000,0x00000460, +/* + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x00000088 : */ 0x0f000001,0x00000000, +/* + JUMP DisconnectDuringDataIn, IF DISCONNECT_MSG + +at 0x0000008a : */ 0x800c0004,0x00000398, +/* + JUMP IgnoreMsgDuringData, IF SAVE_DATA_PTRS_MSG + +at 0x0000008c : */ 0x800c0002,0x00000210, +/* + JUMP IgnoreMsgDuringData, IF RESTORE_DATA_PTRS_MSG + +at 0x0000008e : */ 0x800c0003,0x00000210, +/* + INT MSG_IN_DURING_DATA_IN + +at 0x00000090 : */ 0x98080000,0x00000750, +/* + +MsgInAfterCmd: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x00000092 : */ 0x0f000001,0x00000000, +/* + JUMP DisconnectAfterCmd, IF DISCONNECT_MSG + +at 0x00000094 : */ 0x800c0004,0x00000298, +/* + JUMP IgnoreMsgInAfterCmd, IF SAVE_DATA_PTRS_MSG + +at 0x00000096 : */ 0x800c0002,0x00000288, +/* + JUMP IgnoreMsgInAfterCmd, IF RESTORE_DATA_PTRS_MSG + +at 0x00000098 : */ 0x800c0003,0x00000288, +/* + CALL ProcessReceiveMessage + +at 0x0000009a : */ 0x88080000,0x000000a8, +/* + INT MSG_IN_AFTER_CMD + +at 0x0000009c : */ 0x98080000,0x00000350, +/* + CLEAR ACK + +at 0x0000009e : */ 0x60000040,0x00000000, +/* + JUMP ResumeSendCommand + +at 0x000000a0 : */ 0x80080000,0x000001e8, +/* + +IgnoreMsgInAfterCmd: + CLEAR ACK + +at 0x000000a2 : */ 0x60000040,0x00000000, +/* + JUMP ResumeSendCommand + +at 0x000000a4 : */ 0x80080000,0x000001e8, +/* + +DisconnectAfterCmd: + CLEAR ACK + +at 0x000000a6 : */ 0x60000040,0x00000000, +/* + WAIT DISCONNECT + +at 0x000000a8 : */ 0x48000000,0x00000000, +/* + ENTRY Disconnect1 +Disconnect1: + INT DISCONNECT_AFTER_CMD + +at 0x000000aa : */ 0x98080000,0x00000380, +/* + ENTRY Disconnect2 +Disconnect2: +; We return here after a reselection + CLEAR ACK + +at 0x000000ac : */ 0x60000040,0x00000000, +/* + JUMP ResumeSendCommand + +at 0x000000ae : */ 0x80080000,0x000001e8, +/* + +MsgInBeforeCommand: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x000000b0 : */ 0x0f000001,0x00000000, +/* + JUMP IgnoreMsgBeforeCommand, IF SAVE_DATA_PTRS_MSG + +at 0x000000b2 : */ 0x800c0002,0x000001c0, +/* + JUMP IgnoreMsgBeforeCommand, IF RESTORE_DATA_PTRS_MSG + +at 0x000000b4 : */ 0x800c0003,0x000001c0, +/* + CALL ProcessReceiveMessage + +at 0x000000b6 : */ 0x88080000,0x000000a8, +/* + INT MSG_IN_BEFORE_CMD + +at 0x000000b8 : */ 0x98080000,0x00000250, +/* + CLEAR ACK + +at 0x000000ba : */ 0x60000040,0x00000000, +/* + JUMP SendCommand + +at 0x000000bc : */ 0x80080000,0x000001c8, +/* + +DataIn: + CALL SGScriptStartAddress + +at 0x000000be : */ 0x88080000,0x00000000, +/* +ResumeDataIn: + JUMP Finish, WHEN STATUS + +at 0x000000c0 : */ 0x830b0000,0x00000460, +/* + JUMP MsgInAfterDataIn, IF MSG_IN + +at 0x000000c2 : */ 0x870a0000,0x00000358, +/* + JUMP DataInAfterDataIn, if DATA_IN + +at 0x000000c4 : */ 0x810a0000,0x00000328, +/* + INT MSG_OUT_AFTER_DATA_IN, if MSG_OUT + +at 0x000000c6 : */ 0x9e0a0000,0x00000590, +/* + INT UNEXPECTED_PHASE_AFTER_DATA_IN + +at 0x000000c8 : */ 0x98080000,0x00000520, +/* + +DataInAfterDataIn: + INT DATA_IN_AFTER_DATA_IN + +at 0x000000ca : */ 0x98080000,0x000005a0, +/* + JUMP ResumeDataIn + +at 0x000000cc : */ 0x80080000,0x00000300, +/* + +DataOut: + CALL SGScriptStartAddress + +at 0x000000ce : */ 0x88080000,0x00000000, +/* +ResumeDataOut: + JUMP Finish, WHEN STATUS + +at 0x000000d0 : */ 0x830b0000,0x00000460, +/* + JUMP MsgInAfterDataOut, IF MSG_IN + +at 0x000000d2 : */ 0x870a0000,0x000003e8, +/* + INT UNEXPECTED_PHASE_AFTER_DATA_OUT + +at 0x000000d4 : */ 0x98080000,0x00000620, +/* + +MsgInAfterDataIn: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x000000d6 : */ 0x0f000001,0x00000000, +/* + JUMP DisconnectAfterDataIn, IF DISCONNECT_MSG + +at 0x000000d8 : */ 0x800c0004,0x000003c0, +/* + JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG + +at 0x000000da : */ 0x800c0002,0x00000428, +/* + JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG + +at 0x000000dc : */ 0x800c0003,0x00000428, +/* + CALL ProcessReceiveMessage + +at 0x000000de : */ 0x88080000,0x000000a8, +/* + INT MSG_IN_AFTER_DATA_IN + +at 0x000000e0 : */ 0x98080000,0x00000550, +/* + CLEAR ACK + +at 0x000000e2 : */ 0x60000040,0x00000000, +/* + JUMP ResumeDataIn + +at 0x000000e4 : */ 0x80080000,0x00000300, +/* + +DisconnectDuringDataIn: + CLEAR ACK + +at 0x000000e6 : */ 0x60000040,0x00000000, +/* + WAIT DISCONNECT + +at 0x000000e8 : */ 0x48000000,0x00000000, +/* + ENTRY Disconnect3 +Disconnect3: + INT DISCONNECT_DURING_DATA + +at 0x000000ea : */ 0x98080000,0x00000780, +/* + ENTRY Disconnect4 +Disconnect4: +; we return here after a reselection + CLEAR ACK + +at 0x000000ec : */ 0x60000040,0x00000000, +/* + JUMP ResumeSendCommand + +at 0x000000ee : */ 0x80080000,0x000001e8, +/* + + +DisconnectAfterDataIn: + CLEAR ACK + +at 0x000000f0 : */ 0x60000040,0x00000000, +/* + WAIT DISCONNECT + +at 0x000000f2 : */ 0x48000000,0x00000000, +/* + ENTRY Disconnect5 +Disconnect5: + INT DISCONNECT_AFTER_DATA + +at 0x000000f4 : */ 0x98080000,0x00000580, +/* + ENTRY Disconnect6 +Disconnect6: +; we return here after a reselection + CLEAR ACK + +at 0x000000f6 : */ 0x60000040,0x00000000, +/* + JUMP ResumeDataIn + +at 0x000000f8 : */ 0x80080000,0x00000300, +/* + +MsgInAfterDataOut: + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x000000fa : */ 0x0f000001,0x00000000, +/* + JUMP DisconnectAfterDataOut, if DISCONNECT_MSG + +at 0x000000fc : */ 0x800c0004,0x00000438, +/* + JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG + +at 0x000000fe : */ 0x800c0002,0x00000428, +/* + JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG + +at 0x00000100 : */ 0x800c0003,0x00000428, +/* + CALL ProcessReceiveMessage + +at 0x00000102 : */ 0x88080000,0x000000a8, +/* + INT MSG_IN_AFTER_DATA_OUT + +at 0x00000104 : */ 0x98080000,0x00000650, +/* + CLEAR ACK + +at 0x00000106 : */ 0x60000040,0x00000000, +/* + JUMP ResumeDataOut + +at 0x00000108 : */ 0x80080000,0x00000340, +/* + +IgnoreMsgAfterData: + CLEAR ACK + +at 0x0000010a : */ 0x60000040,0x00000000, +/* +; Data in and out do the same thing on resume, so pick one + JUMP ResumeDataIn + +at 0x0000010c : */ 0x80080000,0x00000300, +/* + +DisconnectAfterDataOut: + CLEAR ACK + +at 0x0000010e : */ 0x60000040,0x00000000, +/* + WAIT DISCONNECT + +at 0x00000110 : */ 0x48000000,0x00000000, +/* + ENTRY Disconnect7 +Disconnect7: + INT DISCONNECT_AFTER_DATA + +at 0x00000112 : */ 0x98080000,0x00000580, +/* + ENTRY Disconnect8 +Disconnect8: +; we return here after a reselection + CLEAR ACK + +at 0x00000114 : */ 0x60000040,0x00000000, +/* + JUMP ResumeDataOut + +at 0x00000116 : */ 0x80080000,0x00000340, +/* + +Finish: + MOVE 1, StatusAddress, WHEN STATUS + +at 0x00000118 : */ 0x0b000001,0x00000000, +/* + INT NOT_MSG_IN_AFTER_STATUS, WHEN NOT MSG_IN + +at 0x0000011a : */ 0x9f030000,0x00000430, +/* + MOVE 1, ReceiveMsgAddress, WHEN MSG_IN + +at 0x0000011c : */ 0x0f000001,0x00000000, +/* + JUMP FinishCommandComplete, IF COMMAND_COMPLETE_MSG + +at 0x0000011e : */ 0x800c0000,0x00000490, +/* + CALL ProcessReceiveMessage + +at 0x00000120 : */ 0x88080000,0x000000a8, +/* + INT MSG_IN_AFTER_STATUS + +at 0x00000122 : */ 0x98080000,0x00000440, +/* + ENTRY FinishCommandComplete +FinishCommandComplete: + CLEAR ACK + +at 0x00000124 : */ 0x60000040,0x00000000, +/* + WAIT DISCONNECT + +at 0x00000126 : */ 0x48000000,0x00000000, +/* + ENTRY Finish1 +Finish1: + INT GOOD_STATUS_AFTER_STATUS + +at 0x00000128 : */ 0x98080000,0x00000401, +}; + +#define A_AFTER_CMD 0x00000300 +static u32 A_AFTER_CMD_used[] __attribute((unused)) = { +}; + +#define A_AFTER_DATA_IN 0x00000500 +static u32 A_AFTER_DATA_IN_used[] __attribute((unused)) = { +}; + +#define A_AFTER_DATA_OUT 0x00000600 +static u32 A_AFTER_DATA_OUT_used[] __attribute((unused)) = { +}; + +#define A_AFTER_SELECTION 0x00000100 +static u32 A_AFTER_SELECTION_used[] __attribute((unused)) = { +}; + +#define A_AFTER_STATUS 0x00000400 +static u32 A_AFTER_STATUS_used[] __attribute((unused)) = { +}; + +#define A_BEFORE_CMD 0x00000200 +static u32 A_BEFORE_CMD_used[] __attribute((unused)) = { +}; + +#define A_COMMAND_COMPLETE_MSG 0x00000000 +static u32 A_COMMAND_COMPLETE_MSG_used[] __attribute((unused)) = { + 0x0000011e, +}; + +#define A_COMPLETED_SELECTION_AS_TARGET 0x00001001 +static u32 A_COMPLETED_SELECTION_AS_TARGET_used[] __attribute((unused)) = { + 0x0000001d, +}; + +#define A_CommandAddress 0x00000000 +static u32 A_CommandAddress_used[] __attribute((unused)) = { + 0x00000079, +}; + +#define A_CommandCount 0x00000000 +static u32 A_CommandCount_used[] __attribute((unused)) = { + 0x00000078, +}; + +#define A_DATA_IN_AFTER_DATA_IN 0x000005a0 +static u32 A_DATA_IN_AFTER_DATA_IN_used[] __attribute((unused)) = { + 0x000000cb, +}; + +#define A_DEBUG_INTERRUPT 0x00003000 +static u32 A_DEBUG_INTERRUPT_used[] __attribute((unused)) = { +}; + +#define A_DEBUG_INTERRUPT1 0x00003001 +static u32 A_DEBUG_INTERRUPT1_used[] __attribute((unused)) = { +}; + +#define A_DEBUG_INTERRUPT2 0x00003002 +static u32 A_DEBUG_INTERRUPT2_used[] __attribute((unused)) = { +}; + +#define A_DEBUG_INTERRUPT3 0x00003003 +static u32 A_DEBUG_INTERRUPT3_used[] __attribute((unused)) = { +}; + +#define A_DEBUG_INTERRUPT4 0x00003004 +static u32 A_DEBUG_INTERRUPT4_used[] __attribute((unused)) = { +}; + +#define A_DEBUG_INTERRUPT5 0x00003005 +static u32 A_DEBUG_INTERRUPT5_used[] __attribute((unused)) = { +}; + +#define A_DEBUG_INTERRUPT6 0x00003006 +static u32 A_DEBUG_INTERRUPT6_used[] __attribute((unused)) = { +}; + +#define A_DISCONNECT 0x00000080 +static u32 A_DISCONNECT_used[] __attribute((unused)) = { +}; + +#define A_DISCONNECT_AFTER_CMD 0x00000380 +static u32 A_DISCONNECT_AFTER_CMD_used[] __attribute((unused)) = { + 0x000000ab, +}; + +#define A_DISCONNECT_AFTER_DATA 0x00000580 +static u32 A_DISCONNECT_AFTER_DATA_used[] __attribute((unused)) = { + 0x000000f5, + 0x00000113, +}; + +#define A_DISCONNECT_DURING_DATA 0x00000780 +static u32 A_DISCONNECT_DURING_DATA_used[] __attribute((unused)) = { + 0x000000eb, +}; + +#define A_DISCONNECT_MSG 0x00000004 +static u32 A_DISCONNECT_MSG_used[] __attribute((unused)) = { + 0x0000008a, + 0x00000094, + 0x000000d8, + 0x000000fc, +}; + +#define A_DURING_DATA_IN 0x00000700 +static u32 A_DURING_DATA_IN_used[] __attribute((unused)) = { +}; + +#define A_Device_ID 0x00000000 +static u32 A_Device_ID_used[] __attribute((unused)) = { + 0x00000000, +}; + +#define A_EXTENDED_MSG 0x00000001 +static u32 A_EXTENDED_MSG_used[] __attribute((unused)) = { + 0x0000002a, +}; + +#define A_FATAL 0x00002000 +static u32 A_FATAL_used[] __attribute((unused)) = { +}; + +#define A_FATAL_ILLEGAL_MSG_LENGTH 0x00002003 +static u32 A_FATAL_ILLEGAL_MSG_LENGTH_used[] __attribute((unused)) = { + 0x00000043, +}; + +#define A_FATAL_NOT_MSG_IN_AFTER_SELECTION 0x00002002 +static u32 A_FATAL_NOT_MSG_IN_AFTER_SELECTION_used[] __attribute((unused)) = { + 0x0000000d, +}; + +#define A_FATAL_SEND_MSG 0x00002001 +static u32 A_FATAL_SEND_MSG_used[] __attribute((unused)) = { + 0x00000023, +}; + +#define A_FATAL_UNEXPECTED_RESELECTION_MSG 0x00002000 +static u32 A_FATAL_UNEXPECTED_RESELECTION_MSG_used[] __attribute((unused)) = { +}; + +#define A_GOOD_STATUS 0x00000001 +static u32 A_GOOD_STATUS_used[] __attribute((unused)) = { +}; + +#define A_GOOD_STATUS_AFTER_STATUS 0x00000401 +static u32 A_GOOD_STATUS_AFTER_STATUS_used[] __attribute((unused)) = { + 0x00000129, +}; + +#define A_IDENTIFY_MSG 0x00000080 +static u32 A_IDENTIFY_MSG_used[] __attribute((unused)) = { +}; + +#define A_IDENTIFY_MSG_MASK 0x0000007f +static u32 A_IDENTIFY_MSG_MASK_used[] __attribute((unused)) = { +}; + +#define A_MSG_IN 0x00000050 +static u32 A_MSG_IN_used[] __attribute((unused)) = { +}; + +#define A_MSG_IN_AFTER_CMD 0x00000350 +static u32 A_MSG_IN_AFTER_CMD_used[] __attribute((unused)) = { + 0x0000009d, +}; + +#define A_MSG_IN_AFTER_DATA_IN 0x00000550 +static u32 A_MSG_IN_AFTER_DATA_IN_used[] __attribute((unused)) = { + 0x000000e1, +}; + +#define A_MSG_IN_AFTER_DATA_OUT 0x00000650 +static u32 A_MSG_IN_AFTER_DATA_OUT_used[] __attribute((unused)) = { + 0x00000105, +}; + +#define A_MSG_IN_AFTER_STATUS 0x00000440 +static u32 A_MSG_IN_AFTER_STATUS_used[] __attribute((unused)) = { + 0x00000123, +}; + +#define A_MSG_IN_BEFORE_CMD 0x00000250 +static u32 A_MSG_IN_BEFORE_CMD_used[] __attribute((unused)) = { + 0x000000b9, +}; + +#define A_MSG_IN_DURING_DATA_IN 0x00000750 +static u32 A_MSG_IN_DURING_DATA_IN_used[] __attribute((unused)) = { + 0x00000091, +}; + +#define A_MSG_OUT 0x00000090 +static u32 A_MSG_OUT_used[] __attribute((unused)) = { +}; + +#define A_MSG_OUT_AFTER_DATA_IN 0x00000590 +static u32 A_MSG_OUT_AFTER_DATA_IN_used[] __attribute((unused)) = { + 0x000000c7, +}; + +#define A_MessageCount 0x00000000 +static u32 A_MessageCount_used[] __attribute((unused)) = { + 0x0000001e, +}; + +#define A_MessageLocation 0x00000000 +static u32 A_MessageLocation_used[] __attribute((unused)) = { + 0x0000001f, +}; + +#define A_NOT_MSG_IN 0x00000030 +static u32 A_NOT_MSG_IN_used[] __attribute((unused)) = { +}; + +#define A_NOT_MSG_IN_AFTER_STATUS 0x00000430 +static u32 A_NOT_MSG_IN_AFTER_STATUS_used[] __attribute((unused)) = { + 0x0000011b, +}; + +#define A_NOT_MSG_OUT 0x00000010 +static u32 A_NOT_MSG_OUT_used[] __attribute((unused)) = { +}; + +#define A_NOT_MSG_OUT_AFTER_SELECTION 0x00000110 +static u32 A_NOT_MSG_OUT_AFTER_SELECTION_used[] __attribute((unused)) = { + 0x00000007, +}; + +#define A_PARITY_ERROR_MSG 0x00000009 +static u32 A_PARITY_ERROR_MSG_used[] __attribute((unused)) = { +}; + +#define A_REJECT_MSG 0x00000007 +static u32 A_REJECT_MSG_used[] __attribute((unused)) = { +}; + +#define A_REJECT_MSG_BEFORE_CMD 0x00000270 +static u32 A_REJECT_MSG_BEFORE_CMD_used[] __attribute((unused)) = { +}; + +#define A_REJECT_MSG_R 0x00000070 +static u32 A_REJECT_MSG_R_used[] __attribute((unused)) = { +}; + +#define A_RESELECTED_DURING_SELECTION 0x00001000 +static u32 A_RESELECTED_DURING_SELECTION_used[] __attribute((unused)) = { + 0x0000000b, +}; + +#define A_RESELECTION_IDENTIFIED 0x00001003 +static u32 A_RESELECTION_IDENTIFIED_used[] __attribute((unused)) = { + 0x00000011, + 0x00000015, +}; + +#define A_RESTORE_DATA_PTRS_MSG 0x00000003 +static u32 A_RESTORE_DATA_PTRS_MSG_used[] __attribute((unused)) = { + 0x0000008e, + 0x00000098, + 0x000000b4, + 0x000000dc, + 0x00000100, +}; + +#define A_ReceiveMsgAddress 0x00000000 +static u32 A_ReceiveMsgAddress_used[] __attribute((unused)) = { + 0x0000000f, + 0x00000013, + 0x00000029, + 0x00000031, + 0x00000037, + 0x00000047, + 0x0000004d, + 0x00000053, + 0x00000059, + 0x0000005f, + 0x00000089, + 0x00000093, + 0x000000b1, + 0x000000d7, + 0x000000fb, + 0x0000011d, +}; + +#define A_SAVE_DATA_PTRS_MSG 0x00000002 +static u32 A_SAVE_DATA_PTRS_MSG_used[] __attribute((unused)) = { + 0x0000008c, + 0x00000096, + 0x000000b2, + 0x000000da, + 0x000000fe, +}; + +#define A_SDTR_MSG 0x00000001 +static u32 A_SDTR_MSG_used[] __attribute((unused)) = { +}; + +#define A_SDTR_MSG_AFTER_CMD 0x00000360 +static u32 A_SDTR_MSG_AFTER_CMD_used[] __attribute((unused)) = { +}; + +#define A_SDTR_MSG_BEFORE_CMD 0x00000260 +static u32 A_SDTR_MSG_BEFORE_CMD_used[] __attribute((unused)) = { +}; + +#define A_SDTR_MSG_R 0x00000060 +static u32 A_SDTR_MSG_R_used[] __attribute((unused)) = { +}; + +#define A_SGScriptStartAddress 0x00000000 +static u32 A_SGScriptStartAddress_used[] __attribute((unused)) = { + 0x000000bf, + 0x000000cf, +}; + +#define A_SIMPLE_TAG_MSG 0x00000020 +static u32 A_SIMPLE_TAG_MSG_used[] __attribute((unused)) = { +}; + +#define A_StatusAddress 0x00000000 +static u32 A_StatusAddress_used[] __attribute((unused)) = { + 0x00000119, +}; + +#define A_TWO_BYTE_MSG 0x00000020 +static u32 A_TWO_BYTE_MSG_used[] __attribute((unused)) = { + 0x0000002c, +}; + +#define A_TWO_BYTE_MSG_MASK 0x0000000f +static u32 A_TWO_BYTE_MSG_MASK_used[] __attribute((unused)) = { + 0x0000002c, +}; + +#define A_UNEXPECTED_MSG 0x00000040 +static u32 A_UNEXPECTED_MSG_used[] __attribute((unused)) = { +}; + +#define A_UNEXPECTED_MSG_BEFORE_CMD 0x00000240 +static u32 A_UNEXPECTED_MSG_BEFORE_CMD_used[] __attribute((unused)) = { +}; + +#define A_UNEXPECTED_PHASE 0x00000020 +static u32 A_UNEXPECTED_PHASE_used[] __attribute((unused)) = { +}; + +#define A_UNEXPECTED_PHASE_AFTER_CMD 0x00000320 +static u32 A_UNEXPECTED_PHASE_AFTER_CMD_used[] __attribute((unused)) = { + 0x00000083, +}; + +#define A_UNEXPECTED_PHASE_AFTER_DATA_IN 0x00000520 +static u32 A_UNEXPECTED_PHASE_AFTER_DATA_IN_used[] __attribute((unused)) = { + 0x000000c9, +}; + +#define A_UNEXPECTED_PHASE_AFTER_DATA_OUT 0x00000620 +static u32 A_UNEXPECTED_PHASE_AFTER_DATA_OUT_used[] __attribute((unused)) = { + 0x000000d5, +}; + +#define A_UNEXPECTED_PHASE_BEFORE_CMD 0x00000220 +static u32 A_UNEXPECTED_PHASE_BEFORE_CMD_used[] __attribute((unused)) = { + 0x00000077, +}; + +#define A_WDTR_MSG 0x00000003 +static u32 A_WDTR_MSG_used[] __attribute((unused)) = { +}; + +#define A_WDTR_MSG_AFTER_CMD 0x000003a0 +static u32 A_WDTR_MSG_AFTER_CMD_used[] __attribute((unused)) = { +}; + +#define A_WDTR_MSG_R 0x000000a0 +static u32 A_WDTR_MSG_R_used[] __attribute((unused)) = { +}; + +#define Ent_Disconnect1 0x000002a8 +#define Ent_Disconnect2 0x000002b0 +#define Ent_Disconnect3 0x000003a8 +#define Ent_Disconnect4 0x000003b0 +#define Ent_Disconnect5 0x000003d0 +#define Ent_Disconnect6 0x000003d8 +#define Ent_Disconnect7 0x00000448 +#define Ent_Disconnect8 0x00000450 +#define Ent_Finish1 0x000004a0 +#define Ent_Finish2 0x000004a8 +#define Ent_FinishCommandComplete 0x00000490 +#define Ent_GetReselectionData 0x00000038 +#define Ent_GetReselectionWithTag 0x00000048 +#define Ent_IgnoreMessage 0x00000188 +#define Ent_MsgInDuringData 0x00000218 +#define Ent_ReceiveMessage 0x000000a0 +#define Ent_SelectedAsTarget 0x00000058 +#define Ent_SendCommand 0x000001c8 +#define Ent_SendMessage 0x00000078 +#define Ent_SendMessagePhaseMismatch 0x00000090 +#define Ent_SendMessageWithATN 0x00000198 +#define Ent_StartUp 0x00000000 +static u32 LABELPATCHES[] __attribute((unused)) = { + 0x00000001, + 0x00000003, + 0x00000005, + 0x00000009, + 0x00000027, + 0x0000002b, + 0x00000039, + 0x0000003b, + 0x0000003d, + 0x0000003f, + 0x00000041, + 0x0000006b, + 0x0000006d, + 0x00000073, + 0x00000075, + 0x0000007b, + 0x0000007d, + 0x0000007f, + 0x00000081, + 0x00000087, + 0x0000008b, + 0x0000008d, + 0x0000008f, + 0x00000095, + 0x00000097, + 0x00000099, + 0x0000009b, + 0x000000a1, + 0x000000a5, + 0x000000af, + 0x000000b3, + 0x000000b5, + 0x000000b7, + 0x000000bd, + 0x000000c1, + 0x000000c3, + 0x000000c5, + 0x000000cd, + 0x000000d1, + 0x000000d3, + 0x000000d9, + 0x000000db, + 0x000000dd, + 0x000000df, + 0x000000e5, + 0x000000ef, + 0x000000f9, + 0x000000fd, + 0x000000ff, + 0x00000101, + 0x00000103, + 0x00000109, + 0x0000010d, + 0x00000117, + 0x0000011f, + 0x00000121, +}; + +static struct { + u32 offset; + void *address; +} EXTERNAL_PATCHES[] __attribute((unused)) = { +}; + +static u32 INSTRUCTIONS __attribute((unused)) = 149; +static u32 PATCHES __attribute((unused)) = 56; +static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0; diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c new file mode 100644 index 000000000..72ceaf650 --- /dev/null +++ b/drivers/scsi/BusLogic.c @@ -0,0 +1,3736 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + + Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters + + Copyright 1995-1998 by Leonard N. Zubkoff + + + The author respectfully requests that any modifications to this software be + sent directly to him for evaluation and testing. + + Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose + advice has been invaluable, to David Gentzel, for writing the original Linux + BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site. + + Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB + Manager available as freely redistributable source code. + +*/ + +#define blogic_drvr_version "2.1.17" +#define blogic_drvr_date "12 September 2013" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include "BusLogic.h" +#include "FlashPoint.c" + +#ifndef FAILURE +#define FAILURE (-1) +#endif + +static const struct scsi_host_template blogic_template; + +/* + blogic_drvr_options_count is a count of the number of BusLogic Driver + Options specifications provided via the Linux Kernel Command Line or via + the Loadable Kernel Module Installation Facility. +*/ + +static int blogic_drvr_options_count; + + +/* + blogic_drvr_options is an array of Driver Options structures representing + BusLogic Driver Options specifications provided via the Linux Kernel Command + Line or via the Loadable Kernel Module Installation Facility. +*/ + +static struct blogic_drvr_options blogic_drvr_options[BLOGIC_MAX_ADAPTERS]; + + +/* + BusLogic can be assigned a string by insmod. +*/ + +MODULE_LICENSE("GPL"); +#ifdef MODULE +static char *BusLogic; +module_param(BusLogic, charp, 0); +#endif + + +/* + blogic_probe_options is a set of Probe Options to be applied across + all BusLogic Host Adapters. +*/ + +static struct blogic_probe_options blogic_probe_options; + + +/* + blogic_global_options is a set of Global Options to be applied across + all BusLogic Host Adapters. +*/ + +static struct blogic_global_options blogic_global_options; + +static LIST_HEAD(blogic_host_list); + +/* + blogic_probeinfo_count is the number of entries in blogic_probeinfo_list. +*/ + +static int blogic_probeinfo_count; + + +/* + blogic_probeinfo_list is the list of I/O Addresses and Bus Probe Information + to be checked for potential BusLogic Host Adapters. It is initialized by + interrogating the PCI Configuration Space on PCI machines as well as from the + list of standard BusLogic I/O Addresses. +*/ + +static struct blogic_probeinfo *blogic_probeinfo_list; + + +/* + blogic_cmd_failure_reason holds a string identifying the reason why a + call to blogic_cmd failed. It is only non-NULL when blogic_cmd + returns a failure code. +*/ + +static char *blogic_cmd_failure_reason; + +/* + blogic_announce_drvr announces the Driver Version and Date, Author's + Name, Copyright Notice, and Electronic Mail Address. +*/ + +static void blogic_announce_drvr(struct blogic_adapter *adapter) +{ + blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter); + blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff \n", adapter); +} + + +/* + blogic_drvr_info returns the Host Adapter Name to identify this SCSI + Driver and Host Adapter. +*/ + +static const char *blogic_drvr_info(struct Scsi_Host *host) +{ + struct blogic_adapter *adapter = + (struct blogic_adapter *) host->hostdata; + return adapter->full_model; +} + +/* + blogic_init_ccbs initializes a group of Command Control Blocks (CCBs) + for Host Adapter from the blk_size bytes located at blk_pointer. The newly + created CCBs are added to Host Adapter's free list. +*/ + +static void blogic_init_ccbs(struct blogic_adapter *adapter, void *blk_pointer, + int blk_size, dma_addr_t blkp) +{ + struct blogic_ccb *ccb = (struct blogic_ccb *) blk_pointer; + unsigned int offset = 0; + memset(blk_pointer, 0, blk_size); + ccb->allocgrp_head = blkp; + ccb->allocgrp_size = blk_size; + while ((blk_size -= sizeof(struct blogic_ccb)) >= 0) { + ccb->status = BLOGIC_CCB_FREE; + ccb->adapter = adapter; + ccb->dma_handle = (u32) blkp + offset; + if (blogic_flashpoint_type(adapter)) { + ccb->callback = blogic_qcompleted_ccb; + ccb->base_addr = adapter->fpinfo.base_addr; + } + ccb->next = adapter->free_ccbs; + ccb->next_all = adapter->all_ccbs; + adapter->free_ccbs = ccb; + adapter->all_ccbs = ccb; + adapter->alloc_ccbs++; + ccb++; + offset += sizeof(struct blogic_ccb); + } +} + + +/* + blogic_create_initccbs allocates the initial CCBs for Host Adapter. +*/ + +static bool __init blogic_create_initccbs(struct blogic_adapter *adapter) +{ + int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb); + void *blk_pointer; + dma_addr_t blkp; + + while (adapter->alloc_ccbs < adapter->initccbs) { + blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev, + blk_size, &blkp, GFP_KERNEL); + if (blk_pointer == NULL) { + blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n", + adapter); + return false; + } + blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); + } + return true; +} + + +/* + blogic_destroy_ccbs deallocates the CCBs for Host Adapter. +*/ + +static void blogic_destroy_ccbs(struct blogic_adapter *adapter) +{ + struct blogic_ccb *next_ccb = adapter->all_ccbs, *ccb, *lastccb = NULL; + adapter->all_ccbs = NULL; + adapter->free_ccbs = NULL; + while ((ccb = next_ccb) != NULL) { + next_ccb = ccb->next_all; + if (ccb->allocgrp_head) { + if (lastccb) + dma_free_coherent(&adapter->pci_device->dev, + lastccb->allocgrp_size, lastccb, + lastccb->allocgrp_head); + lastccb = ccb; + } + } + if (lastccb) + dma_free_coherent(&adapter->pci_device->dev, + lastccb->allocgrp_size, lastccb, + lastccb->allocgrp_head); +} + + +/* + blogic_create_addlccbs allocates Additional CCBs for Host Adapter. If + allocation fails and there are no remaining CCBs available, the Driver Queue + Depth is decreased to a known safe value to avoid potential deadlocks when + multiple host adapters share the same IRQ Channel. +*/ + +static void blogic_create_addlccbs(struct blogic_adapter *adapter, + int addl_ccbs, bool print_success) +{ + int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb); + int prev_alloc = adapter->alloc_ccbs; + void *blk_pointer; + dma_addr_t blkp; + if (addl_ccbs <= 0) + return; + while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) { + blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev, + blk_size, &blkp, GFP_KERNEL); + if (blk_pointer == NULL) + break; + blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); + } + if (adapter->alloc_ccbs > prev_alloc) { + if (print_success) + blogic_notice("Allocated %d additional CCBs (total now %d)\n", adapter, adapter->alloc_ccbs - prev_alloc, adapter->alloc_ccbs); + return; + } + blogic_notice("Failed to allocate additional CCBs\n", adapter); + if (adapter->drvr_qdepth > adapter->alloc_ccbs - adapter->tgt_count) { + adapter->drvr_qdepth = adapter->alloc_ccbs - adapter->tgt_count; + adapter->scsi_host->can_queue = adapter->drvr_qdepth; + } +} + +/* + blogic_alloc_ccb allocates a CCB from Host Adapter's free list, + allocating more memory from the Kernel if necessary. The Host Adapter's + Lock should already have been acquired by the caller. +*/ + +static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter) +{ + static unsigned long serial; + struct blogic_ccb *ccb; + ccb = adapter->free_ccbs; + if (ccb != NULL) { + ccb->serial = ++serial; + adapter->free_ccbs = ccb->next; + ccb->next = NULL; + if (adapter->free_ccbs == NULL) + blogic_create_addlccbs(adapter, adapter->inc_ccbs, + true); + return ccb; + } + blogic_create_addlccbs(adapter, adapter->inc_ccbs, true); + ccb = adapter->free_ccbs; + if (ccb == NULL) + return NULL; + ccb->serial = ++serial; + adapter->free_ccbs = ccb->next; + ccb->next = NULL; + return ccb; +} + + +/* + blogic_dealloc_ccb deallocates a CCB, returning it to the Host Adapter's + free list. The Host Adapter's Lock should already have been acquired by the + caller. +*/ + +static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap) +{ + struct blogic_adapter *adapter = ccb->adapter; + + if (ccb->command != NULL) + scsi_dma_unmap(ccb->command); + if (dma_unmap) + dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata, + ccb->sense_datalen, DMA_FROM_DEVICE); + + ccb->command = NULL; + ccb->status = BLOGIC_CCB_FREE; + ccb->next = adapter->free_ccbs; + adapter->free_ccbs = ccb; +} + + +/* + blogic_cmd sends the command opcode to adapter, optionally + providing paramlen bytes of param and receiving at most + replylen bytes of reply; any excess reply data is received but + discarded. + + On success, this function returns the number of reply bytes read from + the Host Adapter (including any discarded data); on failure, it returns + -1 if the command was invalid, or -2 if a timeout occurred. + + blogic_cmd is called exclusively during host adapter detection and + initialization, so performance and latency are not critical, and exclusive + access to the Host Adapter hardware is assumed. Once the host adapter and + driver are initialized, the only Host Adapter command that is issued is the + single byte Execute Mailbox Command operation code, which does not require + waiting for the Host Adapter Ready bit to be set in the Status Register. +*/ + +static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode, + void *param, int paramlen, void *reply, int replylen) +{ + unsigned char *param_p = (unsigned char *) param; + unsigned char *reply_p = (unsigned char *) reply; + union blogic_stat_reg statusreg; + union blogic_int_reg intreg; + unsigned long processor_flag = 0; + int reply_b = 0, result; + long timeout; + /* + Clear out the Reply Data if provided. + */ + if (replylen > 0) + memset(reply, 0, replylen); + /* + If the IRQ Channel has not yet been acquired, then interrupts + must be disabled while issuing host adapter commands since a + Command Complete interrupt could occur if the IRQ Channel was + previously enabled by another BusLogic Host Adapter or another + driver sharing the same IRQ Channel. + */ + if (!adapter->irq_acquired) + local_irq_save(processor_flag); + /* + Wait for the Host Adapter Ready bit to be set and the + Command/Parameter Register Busy bit to be reset in the Status + Register. + */ + timeout = 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.adapter_ready && !statusreg.sr.cmd_param_busy) + break; + udelay(100); + } + if (timeout < 0) { + blogic_cmd_failure_reason = + "Timeout waiting for Host Adapter Ready"; + result = -2; + goto done; + } + /* + Write the opcode to the Command/Parameter Register. + */ + adapter->adapter_cmd_complete = false; + blogic_setcmdparam(adapter, opcode); + /* + Write any additional Parameter Bytes. + */ + timeout = 10000; + while (paramlen > 0 && --timeout >= 0) { + /* + Wait 100 microseconds to give the Host Adapter enough + time to determine whether the last value written to the + Command/Parameter Register was valid or not. If the + Command Complete bit is set in the Interrupt Register, + then the Command Invalid bit in the Status Register will + be reset if the Operation Code or Parameter was valid + and the command has completed, or set if the Operation + Code or Parameter was invalid. If the Data In Register + Ready bit is set in the Status Register, then the + Operation Code was valid, and data is waiting to be read + back from the Host Adapter. Otherwise, wait for the + Command/Parameter Register Busy bit in the Status + Register to be reset. + */ + udelay(100); + intreg.all = blogic_rdint(adapter); + statusreg.all = blogic_rdstatus(adapter); + if (intreg.ir.cmd_complete) + break; + if (adapter->adapter_cmd_complete) + break; + if (statusreg.sr.datain_ready) + break; + if (statusreg.sr.cmd_param_busy) + continue; + blogic_setcmdparam(adapter, *param_p++); + paramlen--; + } + if (timeout < 0) { + blogic_cmd_failure_reason = + "Timeout waiting for Parameter Acceptance"; + result = -2; + goto done; + } + /* + The Modify I/O Address command does not cause a Command Complete + Interrupt. + */ + if (opcode == BLOGIC_MOD_IOADDR) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.cmd_invalid) { + blogic_cmd_failure_reason = + "Modify I/O Address Invalid"; + result = -1; + goto done; + } + if (blogic_global_options.trace_config) + blogic_notice("blogic_cmd(%02X) Status = %02X: (Modify I/O Address)\n", adapter, opcode, statusreg.all); + result = 0; + goto done; + } + /* + Select an appropriate timeout value for awaiting command completion. + */ + switch (opcode) { + case BLOGIC_INQ_DEV0TO7: + case BLOGIC_INQ_DEV8TO15: + case BLOGIC_INQ_DEV: + /* Approximately 60 seconds. */ + timeout = 60 * 10000; + break; + default: + /* Approximately 1 second. */ + timeout = 10000; + break; + } + /* + Receive any Reply Bytes, waiting for either the Command + Complete bit to be set in the Interrupt Register, or for the + Interrupt Handler to set the Host Adapter Command Completed + bit in the Host Adapter structure. + */ + while (--timeout >= 0) { + intreg.all = blogic_rdint(adapter); + statusreg.all = blogic_rdstatus(adapter); + if (intreg.ir.cmd_complete) + break; + if (adapter->adapter_cmd_complete) + break; + if (statusreg.sr.datain_ready) { + if (++reply_b <= replylen) + *reply_p++ = blogic_rddatain(adapter); + else + blogic_rddatain(adapter); + } + if (opcode == BLOGIC_FETCH_LOCALRAM && + statusreg.sr.adapter_ready) + break; + udelay(100); + } + if (timeout < 0) { + blogic_cmd_failure_reason = + "Timeout waiting for Command Complete"; + result = -2; + goto done; + } + /* + Clear any pending Command Complete Interrupt. + */ + blogic_intreset(adapter); + /* + Provide tracing information if requested. + */ + if (blogic_global_options.trace_config) { + int i; + blogic_notice("blogic_cmd(%02X) Status = %02X: %2d ==> %2d:", + adapter, opcode, statusreg.all, replylen, + reply_b); + if (replylen > reply_b) + replylen = reply_b; + for (i = 0; i < replylen; i++) + blogic_notice(" %02X", adapter, + ((unsigned char *) reply)[i]); + blogic_notice("\n", adapter); + } + /* + Process Command Invalid conditions. + */ + if (statusreg.sr.cmd_invalid) { + /* + Some early BusLogic Host Adapters may not recover + properly from a Command Invalid condition, so if this + appears to be the case, a Soft Reset is issued to the + Host Adapter. Potentially invalid commands are never + attempted after Mailbox Initialization is performed, + so there should be no Host Adapter state lost by a + Soft Reset in response to a Command Invalid condition. + */ + udelay(1000); + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.cmd_invalid || statusreg.sr.rsvd || + statusreg.sr.datain_ready || + statusreg.sr.cmd_param_busy || + !statusreg.sr.adapter_ready || + !statusreg.sr.init_reqd || + statusreg.sr.diag_active || + statusreg.sr.diag_failed) { + blogic_softreset(adapter); + udelay(1000); + } + blogic_cmd_failure_reason = "Command Invalid"; + result = -1; + goto done; + } + /* + Handle Excess Parameters Supplied conditions. + */ + if (paramlen > 0) { + blogic_cmd_failure_reason = "Excess Parameters Supplied"; + result = -1; + goto done; + } + /* + Indicate the command completed successfully. + */ + blogic_cmd_failure_reason = NULL; + result = reply_b; + /* + Restore the interrupt status if necessary and return. + */ +done: + if (!adapter->irq_acquired) + local_irq_restore(processor_flag); + return result; +} + + +/* + blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order + of increasing PCI Bus and Device Number. +*/ + +static void __init blogic_sort_probeinfo(struct blogic_probeinfo + *probeinfo_list, int probeinfo_cnt) +{ + int last_exchange = probeinfo_cnt - 1, bound, j; + + while (last_exchange > 0) { + bound = last_exchange; + last_exchange = 0; + for (j = 0; j < bound; j++) { + struct blogic_probeinfo *probeinfo1 = + &probeinfo_list[j]; + struct blogic_probeinfo *probeinfo2 = + &probeinfo_list[j + 1]; + if (probeinfo1->bus > probeinfo2->bus || + (probeinfo1->bus == probeinfo2->bus && + (probeinfo1->dev > probeinfo2->dev))) { + struct blogic_probeinfo tmp_probeinfo; + + memcpy(&tmp_probeinfo, probeinfo1, + sizeof(struct blogic_probeinfo)); + memcpy(probeinfo1, probeinfo2, + sizeof(struct blogic_probeinfo)); + memcpy(probeinfo2, &tmp_probeinfo, + sizeof(struct blogic_probeinfo)); + last_exchange = j; + } + } + } +} + + +/* + blogic_init_mm_probeinfo initializes the list of I/O Address + and Bus Probe Information to be checked for potential BusLogic MultiMaster + SCSI Host Adapters by interrogating the PCI Configuration Space on PCI + machines as well as from the list of standard BusLogic MultiMaster ISA + I/O Addresses. It returns the number of PCI MultiMaster Host Adapters found. +*/ + +static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) +{ + struct blogic_probeinfo *pr_probeinfo = + &blogic_probeinfo_list[blogic_probeinfo_count]; + int nonpr_mmindex = blogic_probeinfo_count + 1; + int nonpr_mmcount = 0, mmcount = 0; + bool force_scan_order = false; + bool force_scan_order_checked = false; + struct pci_dev *pci_device = NULL; + int i; + if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS) + return 0; + blogic_probeinfo_count++; + /* + Iterate over the MultiMaster PCI Host Adapters. For each + enumerated host adapter, determine whether its ISA Compatible + I/O Port is enabled and if so, whether it is assigned the + Primary I/O Address. A host adapter that is assigned the + Primary I/O Address will always be the preferred boot device. + The MultiMaster BIOS will first recognize a host adapter at + the Primary I/O Address, then any other PCI host adapters, + and finally any host adapters located at the remaining + standard ISA I/O Addresses. When a PCI host adapter is found + with its ISA Compatible I/O Port enabled, a command is issued + to disable the ISA Compatible I/O Port, and it is noted that the + particular standard ISA I/O Address need not be probed. + */ + pr_probeinfo->io_addr = 0; + while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, + PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, + pci_device)) != NULL) { + struct blogic_adapter *host_adapter = adapter; + struct blogic_adapter_info adapter_info; + enum blogic_isa_ioport mod_ioaddr_req; + unsigned char bus; + unsigned char device; + unsigned int irq_ch; + unsigned long base_addr0; + unsigned long base_addr1; + unsigned long io_addr; + unsigned long pci_addr; + + if (pci_enable_device(pci_device)) + continue; + + if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32))) + continue; + + bus = pci_device->bus->number; + device = pci_device->devfn >> 3; + irq_ch = pci_device->irq; + io_addr = base_addr0 = pci_resource_start(pci_device, 0); + pci_addr = base_addr1 = pci_resource_start(pci_device, 1); + + if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { + blogic_err("BusLogic: Base Address0 0x%lX not I/O for MultiMaster Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); + continue; + } + if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { + blogic_err("BusLogic: Base Address1 0x%lX not Memory for MultiMaster Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); + continue; + } + if (irq_ch == 0) { + blogic_err("BusLogic: IRQ Channel %d invalid for MultiMaster Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); + continue; + } + if (blogic_global_options.trace_probe) { + blogic_notice("BusLogic: PCI MultiMaster Host Adapter detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); + } + /* + Issue the Inquire PCI Host Adapter Information command to determine + the ISA Compatible I/O Port. If the ISA Compatible I/O Port is + known and enabled, note that the particular Standard ISA I/O + Address should not be probed. + */ + host_adapter->io_addr = io_addr; + blogic_intreset(host_adapter); + if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, + &adapter_info, sizeof(adapter_info)) != + sizeof(adapter_info)) + adapter_info.isa_port = BLOGIC_IO_DISABLE; + /* + Issue the Modify I/O Address command to disable the + ISA Compatible I/O Port. On PCI Host Adapters, the + Modify I/O Address command allows modification of the + ISA compatible I/O Address that the Host Adapter + responds to; it does not affect the PCI compliant + I/O Address assigned at system initialization. + */ + mod_ioaddr_req = BLOGIC_IO_DISABLE; + blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, + sizeof(mod_ioaddr_req), NULL, 0); + /* + For the first MultiMaster Host Adapter enumerated, + issue the Fetch Host Adapter Local RAM command to read + byte 45 of the AutoSCSI area, for the setting of the + "Use Bus And Device # For PCI Scanning Seq." option. + Issue the Inquire Board ID command since this option is + only valid for the BT-948/958/958D. + */ + if (!force_scan_order_checked) { + struct blogic_fetch_localram fetch_localram; + struct blogic_autoscsi_byte45 autoscsi_byte45; + struct blogic_board_id id; + + fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; + fetch_localram.count = sizeof(autoscsi_byte45); + blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM, + &fetch_localram, sizeof(fetch_localram), + &autoscsi_byte45, + sizeof(autoscsi_byte45)); + blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0, + &id, sizeof(id)); + if (id.fw_ver_digit1 == '5') + force_scan_order = + autoscsi_byte45.force_scan_order; + force_scan_order_checked = true; + } + /* + Determine whether this MultiMaster Host Adapter has its + ISA Compatible I/O Port enabled and is assigned the + Primary I/O Address. If it does, then it is the Primary + MultiMaster Host Adapter and must be recognized first. + If it does not, then it is added to the list for probing + after any Primary MultiMaster Host Adapter is probed. + */ + if (adapter_info.isa_port == BLOGIC_IO_330) { + pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER; + pr_probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + pr_probeinfo->io_addr = io_addr; + pr_probeinfo->pci_addr = pci_addr; + pr_probeinfo->bus = bus; + pr_probeinfo->dev = device; + pr_probeinfo->irq_ch = irq_ch; + pr_probeinfo->pci_device = pci_dev_get(pci_device); + mmcount++; + } else if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[blogic_probeinfo_count++]; + probeinfo->adapter_type = BLOGIC_MULTIMASTER; + probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + probeinfo->io_addr = io_addr; + probeinfo->pci_addr = pci_addr; + probeinfo->bus = bus; + probeinfo->dev = device; + probeinfo->irq_ch = irq_ch; + probeinfo->pci_device = pci_dev_get(pci_device); + nonpr_mmcount++; + mmcount++; + } else + blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); + } + /* + If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." + option is ON for the first enumerated MultiMaster Host Adapter, + and if that host adapter is a BT-948/958/958D, then the + MultiMaster BIOS will recognize MultiMaster Host Adapters in + the order of increasing PCI Bus and Device Number. In that case, + sort the probe information into the same order the BIOS uses. + If this option is OFF, then the MultiMaster BIOS will recognize + MultiMaster Host Adapters in the order they are enumerated by + the PCI BIOS, and hence no sorting is necessary. + */ + if (force_scan_order) + blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex], + nonpr_mmcount); + /* + Iterate over the older non-compliant MultiMaster PCI Host Adapters, + noting the PCI bus location and assigned IRQ Channel. + */ + pci_device = NULL; + while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, + PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, + pci_device)) != NULL) { + unsigned char bus; + unsigned char device; + unsigned int irq_ch; + unsigned long io_addr; + + if (pci_enable_device(pci_device)) + continue; + + if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32))) + continue; + + bus = pci_device->bus->number; + device = pci_device->devfn >> 3; + irq_ch = pci_device->irq; + io_addr = pci_resource_start(pci_device, 0); + + if (io_addr == 0 || irq_ch == 0) + continue; + for (i = 0; i < blogic_probeinfo_count; i++) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[i]; + if (probeinfo->io_addr == io_addr && + probeinfo->adapter_type == BLOGIC_MULTIMASTER) { + probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + probeinfo->pci_addr = 0; + probeinfo->bus = bus; + probeinfo->dev = device; + probeinfo->irq_ch = irq_ch; + probeinfo->pci_device = pci_dev_get(pci_device); + break; + } + } + } + return mmcount; +} + + +/* + blogic_init_fp_probeinfo initializes the list of I/O Address + and Bus Probe Information to be checked for potential BusLogic FlashPoint + Host Adapters by interrogating the PCI Configuration Space. It returns the + number of FlashPoint Host Adapters found. +*/ + +static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter) +{ + int fpindex = blogic_probeinfo_count, fpcount = 0; + struct pci_dev *pci_device = NULL; + /* + Interrogate PCI Configuration Space for any FlashPoint Host Adapters. + */ + while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, + PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, + pci_device)) != NULL) { + unsigned char bus; + unsigned char device; + unsigned int irq_ch; + unsigned long base_addr0; + unsigned long base_addr1; + unsigned long io_addr; + unsigned long pci_addr; + + if (pci_enable_device(pci_device)) + continue; + + if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32))) + continue; + + bus = pci_device->bus->number; + device = pci_device->devfn >> 3; + irq_ch = pci_device->irq; + io_addr = base_addr0 = pci_resource_start(pci_device, 0); + pci_addr = base_addr1 = pci_resource_start(pci_device, 1); +#ifdef CONFIG_SCSI_FLASHPOINT + if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { + blogic_err("BusLogic: Base Address0 0x%lX not I/O for FlashPoint Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); + continue; + } + if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { + blogic_err("BusLogic: Base Address1 0x%lX not Memory for FlashPoint Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); + continue; + } + if (irq_ch == 0) { + blogic_err("BusLogic: IRQ Channel %d invalid for FlashPoint Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); + continue; + } + if (blogic_global_options.trace_probe) { + blogic_notice("BusLogic: FlashPoint Host Adapter detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); + } + if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[blogic_probeinfo_count++]; + probeinfo->adapter_type = BLOGIC_FLASHPOINT; + probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + probeinfo->io_addr = io_addr; + probeinfo->pci_addr = pci_addr; + probeinfo->bus = bus; + probeinfo->dev = device; + probeinfo->irq_ch = irq_ch; + probeinfo->pci_device = pci_dev_get(pci_device); + fpcount++; + } else + blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); +#else + blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", NULL, bus, device); + blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, irq %d, but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch); + blogic_err("BusLogic: support was omitted in this kernel configuration.\n", NULL); +#endif + } + /* + The FlashPoint BIOS will scan for FlashPoint Host Adapters in the order of + increasing PCI Bus and Device Number, so sort the probe information into + the same order the BIOS uses. + */ + blogic_sort_probeinfo(&blogic_probeinfo_list[fpindex], fpcount); + return fpcount; +} + + +/* + blogic_init_probeinfo_list initializes the list of I/O Address and Bus + Probe Information to be checked for potential BusLogic SCSI Host Adapters by + interrogating the PCI Configuration Space on PCI machines as well as from the + list of standard BusLogic MultiMaster ISA I/O Addresses. By default, if both + FlashPoint and PCI MultiMaster Host Adapters are present, this driver will + probe for FlashPoint Host Adapters first unless the BIOS primary disk is + controlled by the first PCI MultiMaster Host Adapter, in which case + MultiMaster Host Adapters will be probed first. The BusLogic Driver Options + specifications "MultiMasterFirst" and "FlashPointFirst" can be used to force + a particular probe order. +*/ + +static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter) +{ + /* + If a PCI BIOS is present, interrogate it for MultiMaster and + FlashPoint Host Adapters; otherwise, default to the standard + ISA MultiMaster probe. + */ + if (!blogic_probe_options.noprobe_pci) { + if (blogic_probe_options.multimaster_first) { + blogic_init_mm_probeinfo(adapter); + blogic_init_fp_probeinfo(adapter); + } else if (blogic_probe_options.flashpoint_first) { + blogic_init_fp_probeinfo(adapter); + blogic_init_mm_probeinfo(adapter); + } else { + int fpcount = blogic_init_fp_probeinfo(adapter); + int mmcount = blogic_init_mm_probeinfo(adapter); + if (fpcount > 0 && mmcount > 0) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[fpcount]; + struct blogic_adapter *myadapter = adapter; + struct blogic_fetch_localram fetch_localram; + struct blogic_bios_drvmap d0_mapbyte; + + while (probeinfo->adapter_bus_type != + BLOGIC_PCI_BUS) + probeinfo++; + myadapter->io_addr = probeinfo->io_addr; + fetch_localram.offset = + BLOGIC_BIOS_BASE + BLOGIC_BIOS_DRVMAP; + fetch_localram.count = sizeof(d0_mapbyte); + blogic_cmd(myadapter, BLOGIC_FETCH_LOCALRAM, + &fetch_localram, + sizeof(fetch_localram), + &d0_mapbyte, + sizeof(d0_mapbyte)); + /* + If the Map Byte for BIOS Drive 0 indicates + that BIOS Drive 0 is controlled by this + PCI MultiMaster Host Adapter, then reverse + the probe order so that MultiMaster Host + Adapters are probed before FlashPoint Host + Adapters. + */ + if (d0_mapbyte.diskgeom != BLOGIC_BIOS_NODISK) { + struct blogic_probeinfo saved_probeinfo[BLOGIC_MAX_ADAPTERS]; + int mmcount = blogic_probeinfo_count - fpcount; + + memcpy(saved_probeinfo, + blogic_probeinfo_list, + blogic_probeinfo_count * sizeof(struct blogic_probeinfo)); + memcpy(&blogic_probeinfo_list[0], + &saved_probeinfo[fpcount], + mmcount * sizeof(struct blogic_probeinfo)); + memcpy(&blogic_probeinfo_list[mmcount], + &saved_probeinfo[0], + fpcount * sizeof(struct blogic_probeinfo)); + } + } + } + } +} + + +/* + blogic_failure prints a standardized error message, and then returns false. +*/ + +static bool blogic_failure(struct blogic_adapter *adapter, char *msg) +{ + blogic_announce_drvr(adapter); + if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) { + blogic_err("While configuring BusLogic PCI Host Adapter at\n", + adapter); + blogic_err("Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr); + } else + blogic_err("While configuring BusLogic Host Adapter at I/O Address 0x%lX:\n", adapter, adapter->io_addr); + blogic_err("%s FAILED - DETACHING\n", adapter, msg); + if (blogic_cmd_failure_reason != NULL) + blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter, + blogic_cmd_failure_reason); + return false; +} + + +/* + blogic_probe probes for a BusLogic Host Adapter. +*/ + +static bool __init blogic_probe(struct blogic_adapter *adapter) +{ + union blogic_stat_reg statusreg; + union blogic_int_reg intreg; + union blogic_geo_reg georeg; + /* + FlashPoint Host Adapters are Probed by the FlashPoint SCCB Manager. + */ + if (blogic_flashpoint_type(adapter)) { + struct fpoint_info *fpinfo = &adapter->fpinfo; + fpinfo->base_addr = (u32) adapter->io_addr; + fpinfo->irq_ch = adapter->irq_ch; + fpinfo->present = false; + if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 && + fpinfo->present)) { + blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev); + blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr); + blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter); + return false; + } + if (blogic_global_options.trace_probe) + blogic_notice("BusLogic_Probe(0x%lX): FlashPoint Found\n", adapter, adapter->io_addr); + /* + Indicate the Host Adapter Probe completed successfully. + */ + return true; + } + /* + Read the Status, Interrupt, and Geometry Registers to test if there are I/O + ports that respond, and to check the values to determine if they are from a + BusLogic Host Adapter. A nonexistent I/O port will return 0xFF, in which + case there is definitely no BusLogic Host Adapter at this base I/O Address. + The test here is a subset of that used by the BusLogic Host Adapter BIOS. + */ + statusreg.all = blogic_rdstatus(adapter); + intreg.all = blogic_rdint(adapter); + georeg.all = blogic_rdgeom(adapter); + if (blogic_global_options.trace_probe) + blogic_notice("BusLogic_Probe(0x%lX): Status 0x%02X, Interrupt 0x%02X, Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all); + if (statusreg.all == 0 || statusreg.sr.diag_active || + statusreg.sr.cmd_param_busy || statusreg.sr.rsvd || + statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0) + return false; + /* + Check the undocumented Geometry Register to test if there is + an I/O port that responded. Adaptec Host Adapters do not + implement the Geometry Register, so this test helps serve to + avoid incorrectly recognizing an Adaptec 1542A or 1542B as a + BusLogic. Unfortunately, the Adaptec 1542C series does respond + to the Geometry Register I/O port, but it will be rejected + later when the Inquire Extended Setup Information command is + issued in blogic_checkadapter. The AMI FastDisk Host Adapter + is a BusLogic clone that implements the same interface as + earlier BusLogic Host Adapters, including the undocumented + commands, and is therefore supported by this driver. However, + the AMI FastDisk always returns 0x00 upon reading the Geometry + Register, so the extended translation option should always be + left disabled on the AMI FastDisk. + */ + if (georeg.all == 0xFF) + return false; + /* + Indicate the Host Adapter Probe completed successfully. + */ + return true; +} + + +/* + blogic_hwreset issues a Hardware Reset to the Host Adapter + and waits for Host Adapter Diagnostics to complete. If hard_reset is true, a + Hard Reset is performed which also initiates a SCSI Bus Reset. Otherwise, a + Soft Reset is performed which only resets the Host Adapter without forcing a + SCSI Bus Reset. +*/ + +static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) +{ + union blogic_stat_reg statusreg; + int timeout; + /* + FlashPoint Host Adapters are Hard Reset by the FlashPoint + SCCB Manager. + */ + if (blogic_flashpoint_type(adapter)) { + struct fpoint_info *fpinfo = &adapter->fpinfo; + fpinfo->softreset = !hard_reset; + fpinfo->report_underrun = true; + adapter->cardhandle = + FlashPoint_HardwareResetHostAdapter(fpinfo); + if (adapter->cardhandle == (void *)FPOINT_BADCARD_HANDLE) + return false; + /* + Indicate the Host Adapter Hard Reset completed successfully. + */ + return true; + } + /* + Issue a Hard Reset or Soft Reset Command to the Host Adapter. + The Host Adapter should respond by setting Diagnostic Active in + the Status Register. + */ + if (hard_reset) + blogic_hardreset(adapter); + else + blogic_softreset(adapter); + /* + Wait until Diagnostic Active is set in the Status Register. + */ + timeout = 5 * 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.diag_active) + break; + udelay(100); + } + if (blogic_global_options.trace_hw_reset) + blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Active, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + if (timeout < 0) + return false; + /* + Wait 100 microseconds to allow completion of any initial diagnostic + activity which might leave the contents of the Status Register + unpredictable. + */ + udelay(100); + /* + Wait until Diagnostic Active is reset in the Status Register. + */ + timeout = 10 * 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (!statusreg.sr.diag_active) + break; + udelay(100); + } + if (blogic_global_options.trace_hw_reset) + blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Completed, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + if (timeout < 0) + return false; + /* + Wait until at least one of the Diagnostic Failure, Host Adapter + Ready, or Data In Register Ready bits is set in the Status Register. + */ + timeout = 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.diag_failed || statusreg.sr.adapter_ready || + statusreg.sr.datain_ready) + break; + udelay(100); + } + if (blogic_global_options.trace_hw_reset) + blogic_notice("BusLogic_HardwareReset(0x%lX): Host Adapter Ready, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + if (timeout < 0) + return false; + /* + If Diagnostic Failure is set or Host Adapter Ready is reset, + then an error occurred during the Host Adapter diagnostics. + If Data In Register Ready is set, then there is an Error Code + available. + */ + if (statusreg.sr.diag_failed || !statusreg.sr.adapter_ready) { + blogic_cmd_failure_reason = NULL; + blogic_failure(adapter, "HARD RESET DIAGNOSTICS"); + blogic_err("HOST ADAPTER STATUS REGISTER = %02X\n", adapter, + statusreg.all); + if (statusreg.sr.datain_ready) + blogic_err("HOST ADAPTER ERROR CODE = %d\n", adapter, + blogic_rddatain(adapter)); + return false; + } + /* + Indicate the Host Adapter Hard Reset completed successfully. + */ + return true; +} + + +/* + blogic_checkadapter checks to be sure this really is a BusLogic + Host Adapter. +*/ + +static bool __init blogic_checkadapter(struct blogic_adapter *adapter) +{ + struct blogic_ext_setup ext_setupinfo; + unsigned char req_replylen; + bool result = true; + /* + FlashPoint Host Adapters do not require this protection. + */ + if (blogic_flashpoint_type(adapter)) + return true; + /* + Issue the Inquire Extended Setup Information command. Only genuine + BusLogic Host Adapters and true clones support this command. + Adaptec 1542C series Host Adapters that respond to the Geometry + Register I/O port will fail this command. + */ + req_replylen = sizeof(ext_setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen, + sizeof(req_replylen), &ext_setupinfo, + sizeof(ext_setupinfo)) != sizeof(ext_setupinfo)) + result = false; + /* + Provide tracing information if requested and return. + */ + if (blogic_global_options.trace_probe) + blogic_notice("BusLogic_Check(0x%lX): MultiMaster %s\n", adapter, + adapter->io_addr, + (result ? "Found" : "Not Found")); + return result; +} + + +/* + blogic_rdconfig reads the Configuration Information + from Host Adapter and initializes the Host Adapter structure. +*/ + +static bool __init blogic_rdconfig(struct blogic_adapter *adapter) +{ + struct blogic_board_id id; + struct blogic_config config; + struct blogic_setup_info setupinfo; + struct blogic_ext_setup ext_setupinfo; + unsigned char model[5]; + unsigned char fw_ver_digit3; + unsigned char fw_ver_letter; + struct blogic_adapter_info adapter_info; + struct blogic_fetch_localram fetch_localram; + struct blogic_autoscsi autoscsi; + union blogic_geo_reg georeg; + unsigned char req_replylen; + unsigned char *tgt, ch; + int tgt_id, i; + /* + Configuration Information for FlashPoint Host Adapters is + provided in the fpoint_info structure by the FlashPoint + SCCB Manager's Probe Function. Initialize fields in the + Host Adapter structure from the fpoint_info structure. + */ + if (blogic_flashpoint_type(adapter)) { + struct fpoint_info *fpinfo = &adapter->fpinfo; + tgt = adapter->model; + *tgt++ = 'B'; + *tgt++ = 'T'; + *tgt++ = '-'; + for (i = 0; i < sizeof(fpinfo->model); i++) + *tgt++ = fpinfo->model[i]; + *tgt++ = '\0'; + strcpy(adapter->fw_ver, FLASHPOINT_FW_VER); + adapter->scsi_id = fpinfo->scsi_id; + adapter->ext_trans_enable = fpinfo->ext_trans_enable; + adapter->parity = fpinfo->parity; + adapter->reset_enabled = !fpinfo->softreset; + adapter->level_int = true; + adapter->wide = fpinfo->wide; + adapter->differential = false; + adapter->scam = true; + adapter->ultra = true; + adapter->ext_lun = true; + adapter->terminfo_valid = true; + adapter->low_term = fpinfo->low_term; + adapter->high_term = fpinfo->high_term; + adapter->scam_enabled = fpinfo->scam_enabled; + adapter->scam_lev2 = fpinfo->scam_lev2; + adapter->drvr_sglimit = BLOGIC_SG_LIMIT; + adapter->maxdev = (adapter->wide ? 16 : 8); + adapter->maxlun = 32; + adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE; + adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE; + adapter->drvr_qdepth = 255; + adapter->adapter_qdepth = adapter->drvr_qdepth; + adapter->sync_ok = fpinfo->sync_ok; + adapter->fast_ok = fpinfo->fast_ok; + adapter->ultra_ok = fpinfo->ultra_ok; + adapter->wide_ok = fpinfo->wide_ok; + adapter->discon_ok = fpinfo->discon_ok; + adapter->tagq_ok = 0xFFFF; + goto common; + } + /* + Issue the Inquire Board ID command. + */ + if (blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, + sizeof(id)) != sizeof(id)) + return blogic_failure(adapter, "INQUIRE BOARD ID"); + /* + Issue the Inquire Configuration command. + */ + if (blogic_cmd(adapter, BLOGIC_INQ_CONFIG, NULL, 0, &config, + sizeof(config)) + != sizeof(config)) + return blogic_failure(adapter, "INQUIRE CONFIGURATION"); + /* + Issue the Inquire Setup Information command. + */ + req_replylen = sizeof(setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen, + sizeof(req_replylen), &setupinfo, + sizeof(setupinfo)) != sizeof(setupinfo)) + return blogic_failure(adapter, "INQUIRE SETUP INFORMATION"); + /* + Issue the Inquire Extended Setup Information command. + */ + req_replylen = sizeof(ext_setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen, + sizeof(req_replylen), &ext_setupinfo, + sizeof(ext_setupinfo)) != sizeof(ext_setupinfo)) + return blogic_failure(adapter, + "INQUIRE EXTENDED SETUP INFORMATION"); + /* + Issue the Inquire Firmware Version 3rd Digit command. + */ + fw_ver_digit3 = '\0'; + if (id.fw_ver_digit1 > '0') + if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_D3, NULL, 0, + &fw_ver_digit3, + sizeof(fw_ver_digit3)) != sizeof(fw_ver_digit3)) + return blogic_failure(adapter, + "INQUIRE FIRMWARE 3RD DIGIT"); + /* + Issue the Inquire Host Adapter Model Number command. + */ + if (ext_setupinfo.bus_type == 'A' && id.fw_ver_digit1 == '2') + /* BusLogic BT-542B ISA 2.xx */ + strcpy(model, "542B"); + else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '2' && + (id.fw_ver_digit2 <= '1' || (id.fw_ver_digit2 == '2' && + fw_ver_digit3 == '0'))) + /* BusLogic BT-742A EISA 2.1x or 2.20 */ + strcpy(model, "742A"); + else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '0') + /* AMI FastDisk EISA Series 441 0.x */ + strcpy(model, "747A"); + else { + req_replylen = sizeof(model); + if (blogic_cmd(adapter, BLOGIC_INQ_MODELNO, &req_replylen, + sizeof(req_replylen), &model, + sizeof(model)) != sizeof(model)) + return blogic_failure(adapter, + "INQUIRE HOST ADAPTER MODEL NUMBER"); + } + /* + BusLogic MultiMaster Host Adapters can be identified by their + model number and the major version number of their firmware + as follows: + + 5.xx BusLogic "W" Series Host Adapters: + BT-948/958/958D + 4.xx BusLogic "C" Series Host Adapters: + BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF + 3.xx BusLogic "S" Series Host Adapters: + BT-747S/747D/757S/757D/445S/545S/542D + BT-542B/742A (revision H) + 2.xx BusLogic "A" Series Host Adapters: + BT-542B/742A (revision G and below) + 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter + */ + /* + Save the Model Name and Host Adapter Name in the Host Adapter + structure. + */ + tgt = adapter->model; + *tgt++ = 'B'; + *tgt++ = 'T'; + *tgt++ = '-'; + for (i = 0; i < sizeof(model); i++) { + ch = model[i]; + if (ch == ' ' || ch == '\0') + break; + *tgt++ = ch; + } + *tgt++ = '\0'; + /* + Save the Firmware Version in the Host Adapter structure. + */ + tgt = adapter->fw_ver; + *tgt++ = id.fw_ver_digit1; + *tgt++ = '.'; + *tgt++ = id.fw_ver_digit2; + if (fw_ver_digit3 != ' ' && fw_ver_digit3 != '\0') + *tgt++ = fw_ver_digit3; + *tgt = '\0'; + /* + Issue the Inquire Firmware Version Letter command. + */ + if (strcmp(adapter->fw_ver, "3.3") >= 0) { + if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_LETTER, NULL, 0, + &fw_ver_letter, + sizeof(fw_ver_letter)) != sizeof(fw_ver_letter)) + return blogic_failure(adapter, + "INQUIRE FIRMWARE VERSION LETTER"); + if (fw_ver_letter != ' ' && fw_ver_letter != '\0') + *tgt++ = fw_ver_letter; + *tgt = '\0'; + } + /* + Save the Host Adapter SCSI ID in the Host Adapter structure. + */ + adapter->scsi_id = config.id; + /* + Determine the Bus Type and save it in the Host Adapter structure, + determine and save the IRQ Channel if necessary, and determine + and save the DMA Channel for ISA Host Adapters. + */ + adapter->adapter_bus_type = + blogic_adater_bus_types[adapter->model[3] - '4']; + if (adapter->irq_ch == 0) { + if (config.irq_ch9) + adapter->irq_ch = 9; + else if (config.irq_ch10) + adapter->irq_ch = 10; + else if (config.irq_ch11) + adapter->irq_ch = 11; + else if (config.irq_ch12) + adapter->irq_ch = 12; + else if (config.irq_ch14) + adapter->irq_ch = 14; + else if (config.irq_ch15) + adapter->irq_ch = 15; + } + /* + Determine whether Extended Translation is enabled and save it in + the Host Adapter structure. + */ + georeg.all = blogic_rdgeom(adapter); + adapter->ext_trans_enable = georeg.gr.ext_trans_enable; + /* + Save the Scatter Gather Limits, Level Sensitive Interrupt flag, Wide + SCSI flag, Differential SCSI flag, SCAM Supported flag, and + Ultra SCSI flag in the Host Adapter structure. + */ + adapter->adapter_sglimit = ext_setupinfo.sg_limit; + adapter->drvr_sglimit = adapter->adapter_sglimit; + if (adapter->adapter_sglimit > BLOGIC_SG_LIMIT) + adapter->drvr_sglimit = BLOGIC_SG_LIMIT; + if (ext_setupinfo.misc.level_int) + adapter->level_int = true; + adapter->wide = ext_setupinfo.wide; + adapter->differential = ext_setupinfo.differential; + adapter->scam = ext_setupinfo.scam; + adapter->ultra = ext_setupinfo.ultra; + /* + Determine whether Extended LUN Format CCBs are supported and save the + information in the Host Adapter structure. + */ + if (adapter->fw_ver[0] == '5' || (adapter->fw_ver[0] == '4' && + adapter->wide)) + adapter->ext_lun = true; + /* + Issue the Inquire PCI Host Adapter Information command to read the + Termination Information from "W" series MultiMaster Host Adapters. + */ + if (adapter->fw_ver[0] == '5') { + if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, + &adapter_info, + sizeof(adapter_info)) != sizeof(adapter_info)) + return blogic_failure(adapter, + "INQUIRE PCI HOST ADAPTER INFORMATION"); + /* + Save the Termination Information in the Host Adapter + structure. + */ + if (adapter_info.genericinfo_valid) { + adapter->terminfo_valid = true; + adapter->low_term = adapter_info.low_term; + adapter->high_term = adapter_info.high_term; + } + } + /* + Issue the Fetch Host Adapter Local RAM command to read the + AutoSCSI data from "W" and "C" series MultiMaster Host Adapters. + */ + if (adapter->fw_ver[0] >= '4') { + fetch_localram.offset = BLOGIC_AUTOSCSI_BASE; + fetch_localram.count = sizeof(autoscsi); + if (blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, &fetch_localram, + sizeof(fetch_localram), &autoscsi, + sizeof(autoscsi)) != sizeof(autoscsi)) + return blogic_failure(adapter, + "FETCH HOST ADAPTER LOCAL RAM"); + /* + Save the Parity Checking Enabled, Bus Reset Enabled, + and Termination Information in the Host Adapter structure. + */ + adapter->parity = autoscsi.parity; + adapter->reset_enabled = autoscsi.reset_enabled; + if (adapter->fw_ver[0] == '4') { + adapter->terminfo_valid = true; + adapter->low_term = autoscsi.low_term; + adapter->high_term = autoscsi.high_term; + } + /* + Save the Wide Permitted, Fast Permitted, Synchronous + Permitted, Disconnect Permitted, Ultra Permitted, and + SCAM Information in the Host Adapter structure. + */ + adapter->wide_ok = autoscsi.wide_ok; + adapter->fast_ok = autoscsi.fast_ok; + adapter->sync_ok = autoscsi.sync_ok; + adapter->discon_ok = autoscsi.discon_ok; + if (adapter->ultra) + adapter->ultra_ok = autoscsi.ultra_ok; + if (adapter->scam) { + adapter->scam_enabled = autoscsi.scam_enabled; + adapter->scam_lev2 = autoscsi.scam_lev2; + } + } + /* + Initialize fields in the Host Adapter structure for "S" and "A" + series MultiMaster Host Adapters. + */ + if (adapter->fw_ver[0] < '4') { + if (setupinfo.sync) { + adapter->sync_ok = 0xFF; + if (adapter->adapter_bus_type == BLOGIC_EISA_BUS) { + if (ext_setupinfo.misc.fast_on_eisa) + adapter->fast_ok = 0xFF; + if (strcmp(adapter->model, "BT-757") == 0) + adapter->wide_ok = 0xFF; + } + } + adapter->discon_ok = 0xFF; + adapter->parity = setupinfo.parity; + adapter->reset_enabled = true; + } + /* + Determine the maximum number of Target IDs and Logical Units + supported by this driver for Wide and Narrow Host Adapters. + */ + adapter->maxdev = (adapter->wide ? 16 : 8); + adapter->maxlun = (adapter->ext_lun ? 32 : 8); + /* + Select appropriate values for the Mailbox Count, Driver Queue Depth, + Initial CCBs, and Incremental CCBs variables based on whether + or not Strict Round Robin Mode is supported. If Strict Round + Robin Mode is supported, then there is no performance degradation + in using the maximum possible number of Outgoing and Incoming + Mailboxes and allowing the Tagged and Untagged Queue Depths to + determine the actual utilization. If Strict Round Robin Mode is + not supported, then the Host Adapter must scan all the Outgoing + Mailboxes whenever an Outgoing Mailbox entry is made, which can + cause a substantial performance penalty. The host adapters + actually have room to store the following number of CCBs + internally; that is, they can internally queue and manage this + many active commands on the SCSI bus simultaneously. Performance + measurements demonstrate that the Driver Queue Depth should be + set to the Mailbox Count, rather than the Host Adapter Queue + Depth (internal CCB capacity), as it is more efficient to have the + queued commands waiting in Outgoing Mailboxes if necessary than + to block the process in the higher levels of the SCSI Subsystem. + + 192 BT-948/958/958D + 100 BT-946C/956C/956CD/747C/757C/757CD/445C + 50 BT-545C/540CF + 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A + */ + if (adapter->fw_ver[0] == '5') + adapter->adapter_qdepth = 192; + else if (adapter->fw_ver[0] == '4') + adapter->adapter_qdepth = 100; + else + adapter->adapter_qdepth = 30; + if (strcmp(adapter->fw_ver, "3.31") >= 0) { + adapter->strict_rr = true; + adapter->mbox_count = BLOGIC_MAX_MAILBOX; + } else { + adapter->strict_rr = false; + adapter->mbox_count = 32; + } + adapter->drvr_qdepth = adapter->mbox_count; + adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE; + adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE; + /* + Tagged Queuing support is available and operates properly on + all "W" series MultiMaster Host Adapters, on "C" series + MultiMaster Host Adapters with firmware version 4.22 and above, + and on "S" series MultiMaster Host Adapters with firmware version + 3.35 and above. + */ + adapter->tagq_ok = 0; + switch (adapter->fw_ver[0]) { + case '5': + adapter->tagq_ok = 0xFFFF; + break; + case '4': + if (strcmp(adapter->fw_ver, "4.22") >= 0) + adapter->tagq_ok = 0xFFFF; + break; + case '3': + if (strcmp(adapter->fw_ver, "3.35") >= 0) + adapter->tagq_ok = 0xFFFF; + break; + } + /* + Determine the Host Adapter BIOS Address if the BIOS is enabled and + save it in the Host Adapter structure. The BIOS is disabled if the + bios_addr is 0. + */ + adapter->bios_addr = ext_setupinfo.bios_addr << 12; + /* + BusLogic BT-445S Host Adapters prior to board revision E have a + hardware bug whereby when the BIOS is enabled, transfers to/from + the same address range the BIOS occupies modulo 16MB are handled + incorrectly. Only properly functioning BT-445S Host Adapters + have firmware version 3.37. + */ + if (adapter->bios_addr > 0 && + strcmp(adapter->model, "BT-445S") == 0 && + strcmp(adapter->fw_ver, "3.37") < 0) + return blogic_failure(adapter, "Too old firmware"); + /* + Initialize parameters common to MultiMaster and FlashPoint + Host Adapters. + */ +common: + /* + Initialize the Host Adapter Full Model Name from the Model Name. + */ + strcpy(adapter->full_model, "BusLogic "); + strcat(adapter->full_model, adapter->model); + /* + Select an appropriate value for the Tagged Queue Depth either from a + BusLogic Driver Options specification, or based on whether this Host + Adapter requires that ISA Bounce Buffers be used. The Tagged Queue + Depth is left at 0 for automatic determination in + BusLogic_SelectQueueDepths. Initialize the Untagged Queue Depth. + */ + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { + unsigned char qdepth = 0; + if (adapter->drvr_opts != NULL && + adapter->drvr_opts->qdepth[tgt_id] > 0) + qdepth = adapter->drvr_opts->qdepth[tgt_id]; + adapter->qdepth[tgt_id] = qdepth; + } + adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH; + if (adapter->drvr_opts != NULL) + adapter->common_qdepth = adapter->drvr_opts->common_qdepth; + if (adapter->common_qdepth > 0 && + adapter->common_qdepth < adapter->untag_qdepth) + adapter->untag_qdepth = adapter->common_qdepth; + /* + Tagged Queuing is only allowed if Disconnect/Reconnect is permitted. + Therefore, mask the Tagged Queuing Permitted Default bits with the + Disconnect/Reconnect Permitted bits. + */ + adapter->tagq_ok &= adapter->discon_ok; + /* + Combine the default Tagged Queuing Permitted bits with any + BusLogic Driver Options Tagged Queuing specification. + */ + if (adapter->drvr_opts != NULL) + adapter->tagq_ok = (adapter->drvr_opts->tagq_ok & + adapter->drvr_opts->tagq_ok_mask) | + (adapter->tagq_ok & ~adapter->drvr_opts->tagq_ok_mask); + + /* + Select an appropriate value for Bus Settle Time either from a + BusLogic Driver Options specification, or from + BLOGIC_BUS_SETTLE_TIME. + */ + if (adapter->drvr_opts != NULL && + adapter->drvr_opts->bus_settle_time > 0) + adapter->bus_settle_time = adapter->drvr_opts->bus_settle_time; + else + adapter->bus_settle_time = BLOGIC_BUS_SETTLE_TIME; + /* + Indicate reading the Host Adapter Configuration completed + successfully. + */ + return true; +} + + +/* + blogic_reportconfig reports the configuration of Host Adapter. +*/ + +static bool __init blogic_reportconfig(struct blogic_adapter *adapter) +{ + unsigned short alltgt_mask = (1 << adapter->maxdev) - 1; + unsigned short sync_ok, fast_ok; + unsigned short ultra_ok, wide_ok; + unsigned short discon_ok, tagq_ok; + bool common_syncneg, common_tagq_depth; + char syncstr[BLOGIC_MAXDEV + 1]; + char widestr[BLOGIC_MAXDEV + 1]; + char discon_str[BLOGIC_MAXDEV + 1]; + char tagq_str[BLOGIC_MAXDEV + 1]; + char *syncmsg = syncstr; + char *widemsg = widestr; + char *discon_msg = discon_str; + char *tagq_msg = tagq_str; + int tgt_id; + + blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : "")); + blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge")); + if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) { + blogic_info(" DMA Channel: None, ", adapter); + if (adapter->bios_addr > 0) + blogic_info("BIOS Address: 0x%X, ", adapter, + adapter->bios_addr); + else + blogic_info("BIOS Address: None, ", adapter); + } else { + blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter, + adapter->bus, adapter->dev); + if (adapter->pci_addr > 0) + blogic_info("0x%lX, ", adapter, adapter->pci_addr); + else + blogic_info("Unassigned, ", adapter); + } + blogic_info("Host Adapter SCSI ID: %d\n", adapter, adapter->scsi_id); + blogic_info(" Parity Checking: %s, Extended Translation: %s\n", + adapter, (adapter->parity ? "Enabled" : "Disabled"), + (adapter->ext_trans_enable ? "Enabled" : "Disabled")); + alltgt_mask &= ~(1 << adapter->scsi_id); + sync_ok = adapter->sync_ok & alltgt_mask; + fast_ok = adapter->fast_ok & alltgt_mask; + ultra_ok = adapter->ultra_ok & alltgt_mask; + if ((blogic_multimaster_type(adapter) && + (adapter->fw_ver[0] >= '4' || + adapter->adapter_bus_type == BLOGIC_EISA_BUS)) || + blogic_flashpoint_type(adapter)) { + common_syncneg = false; + if (sync_ok == 0) { + syncmsg = "Disabled"; + common_syncneg = true; + } else if (sync_ok == alltgt_mask) { + if (fast_ok == 0) { + syncmsg = "Slow"; + common_syncneg = true; + } else if (fast_ok == alltgt_mask) { + if (ultra_ok == 0) { + syncmsg = "Fast"; + common_syncneg = true; + } else if (ultra_ok == alltgt_mask) { + syncmsg = "Ultra"; + common_syncneg = true; + } + } + } + if (!common_syncneg) { + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + syncstr[tgt_id] = ((!(sync_ok & (1 << tgt_id))) ? 'N' : (!(fast_ok & (1 << tgt_id)) ? 'S' : (!(ultra_ok & (1 << tgt_id)) ? 'F' : 'U'))); + syncstr[adapter->scsi_id] = '#'; + syncstr[adapter->maxdev] = '\0'; + } + } else + syncmsg = (sync_ok == 0 ? "Disabled" : "Enabled"); + wide_ok = adapter->wide_ok & alltgt_mask; + if (wide_ok == 0) + widemsg = "Disabled"; + else if (wide_ok == alltgt_mask) + widemsg = "Enabled"; + else { + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + widestr[tgt_id] = ((wide_ok & (1 << tgt_id)) ? 'Y' : 'N'); + widestr[adapter->scsi_id] = '#'; + widestr[adapter->maxdev] = '\0'; + } + discon_ok = adapter->discon_ok & alltgt_mask; + if (discon_ok == 0) + discon_msg = "Disabled"; + else if (discon_ok == alltgt_mask) + discon_msg = "Enabled"; + else { + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + discon_str[tgt_id] = ((discon_ok & (1 << tgt_id)) ? 'Y' : 'N'); + discon_str[adapter->scsi_id] = '#'; + discon_str[adapter->maxdev] = '\0'; + } + tagq_ok = adapter->tagq_ok & alltgt_mask; + if (tagq_ok == 0) + tagq_msg = "Disabled"; + else if (tagq_ok == alltgt_mask) + tagq_msg = "Enabled"; + else { + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + tagq_str[tgt_id] = ((tagq_ok & (1 << tgt_id)) ? 'Y' : 'N'); + tagq_str[adapter->scsi_id] = '#'; + tagq_str[adapter->maxdev] = '\0'; + } + blogic_info(" Synchronous Negotiation: %s, Wide Negotiation: %s\n", + adapter, syncmsg, widemsg); + blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter, + discon_msg, tagq_msg); + if (blogic_multimaster_type(adapter)) { + blogic_info(" Scatter/Gather Limit: %d of %d segments, Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count); + blogic_info(" Driver Queue Depth: %d, Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth); + } else + blogic_info(" Driver Queue Depth: %d, Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit); + blogic_info(" Tagged Queue Depth: ", adapter); + common_tagq_depth = true; + for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++) + if (adapter->qdepth[tgt_id] != adapter->qdepth[0]) { + common_tagq_depth = false; + break; + } + if (common_tagq_depth) { + if (adapter->qdepth[0] > 0) + blogic_info("%d", adapter, adapter->qdepth[0]); + else + blogic_info("Automatic", adapter); + } else + blogic_info("Individual", adapter); + blogic_info(", Untagged Queue Depth: %d\n", adapter, + adapter->untag_qdepth); + if (adapter->terminfo_valid) { + if (adapter->wide) + blogic_info(" SCSI Bus Termination: %s", adapter, + (adapter->low_term ? (adapter->high_term ? "Both Enabled" : "Low Enabled") : (adapter->high_term ? "High Enabled" : "Both Disabled"))); + else + blogic_info(" SCSI Bus Termination: %s", adapter, + (adapter->low_term ? "Enabled" : "Disabled")); + if (adapter->scam) + blogic_info(", SCAM: %s", adapter, + (adapter->scam_enabled ? (adapter->scam_lev2 ? "Enabled, Level 2" : "Enabled, Level 1") : "Disabled")); + blogic_info("\n", adapter); + } + /* + Indicate reporting the Host Adapter configuration completed + successfully. + */ + return true; +} + + +/* + blogic_getres acquires the system resources necessary to use + Host Adapter. +*/ + +static bool __init blogic_getres(struct blogic_adapter *adapter) +{ + if (adapter->irq_ch == 0) { + blogic_err("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n", + adapter); + return false; + } + /* + Acquire shared access to the IRQ Channel. + */ + if (request_irq(adapter->irq_ch, blogic_inthandler, IRQF_SHARED, + adapter->full_model, adapter) < 0) { + blogic_err("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n", + adapter, adapter->irq_ch); + return false; + } + adapter->irq_acquired = true; + /* + Indicate the System Resource Acquisition completed successfully, + */ + return true; +} + + +/* + blogic_relres releases any system resources previously acquired + by blogic_getres. +*/ + +static void blogic_relres(struct blogic_adapter *adapter) +{ + /* + Release shared access to the IRQ Channel. + */ + if (adapter->irq_acquired) + free_irq(adapter->irq_ch, adapter); + /* + Release any allocated memory structs not released elsewhere + */ + if (adapter->mbox_space) + dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz, + adapter->mbox_space, adapter->mbox_space_handle); + pci_dev_put(adapter->pci_device); + adapter->mbox_space = NULL; + adapter->mbox_space_handle = 0; + adapter->mbox_sz = 0; +} + + +/* + blogic_initadapter initializes Host Adapter. This is the only + function called during SCSI Host Adapter detection which modifies the state + of the Host Adapter from its initial power on or hard reset state. +*/ + +static bool blogic_initadapter(struct blogic_adapter *adapter) +{ + struct blogic_extmbox_req extmbox_req; + enum blogic_rr_req rr_req; + enum blogic_setccb_fmt setccb_fmt; + int tgt_id; + + /* + Initialize the pointers to the first and last CCBs that are + queued for completion processing. + */ + adapter->firstccb = NULL; + adapter->lastccb = NULL; + + /* + Initialize the Bus Device Reset Pending CCB, Tagged Queuing Active, + Command Successful Flag, Active Commands, and Commands Since Reset + for each Target Device. + */ + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) { + adapter->bdr_pend[tgt_id] = NULL; + adapter->tgt_flags[tgt_id].tagq_active = false; + adapter->tgt_flags[tgt_id].cmd_good = false; + adapter->active_cmds[tgt_id] = 0; + adapter->cmds_since_rst[tgt_id] = 0; + } + + /* + FlashPoint Host Adapters do not use Outgoing and Incoming Mailboxes. + */ + if (blogic_flashpoint_type(adapter)) + goto done; + + /* + Initialize the Outgoing and Incoming Mailbox pointers. + */ + adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox)); + adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev, + adapter->mbox_sz, &adapter->mbox_space_handle, + GFP_KERNEL); + if (adapter->mbox_space == NULL) + return blogic_failure(adapter, "MAILBOX ALLOCATION"); + adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space; + adapter->last_outbox = adapter->first_outbox + adapter->mbox_count - 1; + adapter->next_outbox = adapter->first_outbox; + adapter->first_inbox = (struct blogic_inbox *) (adapter->last_outbox + 1); + adapter->last_inbox = adapter->first_inbox + adapter->mbox_count - 1; + adapter->next_inbox = adapter->first_inbox; + + /* + Initialize the Outgoing and Incoming Mailbox structures. + */ + memset(adapter->first_outbox, 0, + adapter->mbox_count * sizeof(struct blogic_outbox)); + memset(adapter->first_inbox, 0, + adapter->mbox_count * sizeof(struct blogic_inbox)); + + /* + Initialize the Host Adapter's Pointer to the Outgoing/Incoming + Mailboxes. + */ + extmbox_req.mbox_count = adapter->mbox_count; + extmbox_req.base_mbox_addr = (u32) adapter->mbox_space_handle; + if (blogic_cmd(adapter, BLOGIC_INIT_EXT_MBOX, &extmbox_req, + sizeof(extmbox_req), NULL, 0) < 0) + return blogic_failure(adapter, "MAILBOX INITIALIZATION"); + /* + Enable Strict Round Robin Mode if supported by the Host Adapter. In + Strict Round Robin Mode, the Host Adapter only looks at the next + Outgoing Mailbox for each new command, rather than scanning + through all the Outgoing Mailboxes to find any that have new + commands in them. Strict Round Robin Mode is significantly more + efficient. + */ + if (adapter->strict_rr) { + rr_req = BLOGIC_STRICT_RR_MODE; + if (blogic_cmd(adapter, BLOGIC_STRICT_RR, &rr_req, + sizeof(rr_req), NULL, 0) < 0) + return blogic_failure(adapter, + "ENABLE STRICT ROUND ROBIN MODE"); + } + + /* + For Host Adapters that support Extended LUN Format CCBs, issue the + Set CCB Format command to allow 32 Logical Units per Target Device. + */ + if (adapter->ext_lun) { + setccb_fmt = BLOGIC_EXT_LUN_CCB; + if (blogic_cmd(adapter, BLOGIC_SETCCB_FMT, &setccb_fmt, + sizeof(setccb_fmt), NULL, 0) < 0) + return blogic_failure(adapter, "SET CCB FORMAT"); + } + + /* + Announce Successful Initialization. + */ +done: + if (!adapter->adapter_initd) { + blogic_info("*** %s Initialized Successfully ***\n", adapter, + adapter->full_model); + blogic_info("\n", adapter); + } else + blogic_warn("*** %s Initialized Successfully ***\n", adapter, + adapter->full_model); + adapter->adapter_initd = true; + + /* + Indicate the Host Adapter Initialization completed successfully. + */ + return true; +} + + +/* + blogic_inquiry inquires about the Target Devices accessible + through Host Adapter. +*/ + +static bool __init blogic_inquiry(struct blogic_adapter *adapter) +{ + u16 installed_devs; + u8 installed_devs0to7[8]; + struct blogic_setup_info setupinfo; + u8 sync_period[BLOGIC_MAXDEV]; + unsigned char req_replylen; + int tgt_id; + + /* + Wait a few seconds between the Host Adapter Hard Reset which + initiates a SCSI Bus Reset and issuing any SCSI Commands. Some + SCSI devices get confused if they receive SCSI Commands too soon + after a SCSI Bus Reset. + */ + blogic_delay(adapter->bus_settle_time); + /* + FlashPoint Host Adapters do not provide for Target Device Inquiry. + */ + if (blogic_flashpoint_type(adapter)) + return true; + /* + Inhibit the Target Device Inquiry if requested. + */ + if (adapter->drvr_opts != NULL && adapter->drvr_opts->stop_tgt_inquiry) + return true; + /* + Issue the Inquire Target Devices command for host adapters with + firmware version 4.25 or later, or the Inquire Installed Devices + ID 0 to 7 command for older host adapters. This is necessary to + force Synchronous Transfer Negotiation so that the Inquire Setup + Information and Inquire Synchronous Period commands will return + valid data. The Inquire Target Devices command is preferable to + Inquire Installed Devices ID 0 to 7 since it only probes Logical + Unit 0 of each Target Device. + */ + if (strcmp(adapter->fw_ver, "4.25") >= 0) { + + /* + Issue a Inquire Target Devices command. Inquire Target + Devices only tests Logical Unit 0 of each Target Device + unlike the Inquire Installed Devices commands which test + Logical Units 0 - 7. Two bytes are returned, where byte + 0 bit 0 set indicates that Target Device 0 exists, and so on. + */ + + if (blogic_cmd(adapter, BLOGIC_INQ_DEV, NULL, 0, + &installed_devs, sizeof(installed_devs)) + != sizeof(installed_devs)) + return blogic_failure(adapter, "INQUIRE TARGET DEVICES"); + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->tgt_flags[tgt_id].tgt_exists = + (installed_devs & (1 << tgt_id) ? true : false); + } else { + + /* + Issue an Inquire Installed Devices command. For each + Target Device, a byte is returned where bit 0 set + indicates that Logical Unit 0 * exists, bit 1 set + indicates that Logical Unit 1 exists, and so on. + */ + + if (blogic_cmd(adapter, BLOGIC_INQ_DEV0TO7, NULL, 0, + &installed_devs0to7, sizeof(installed_devs0to7)) + != sizeof(installed_devs0to7)) + return blogic_failure(adapter, + "INQUIRE INSTALLED DEVICES ID 0 TO 7"); + for (tgt_id = 0; tgt_id < 8; tgt_id++) + adapter->tgt_flags[tgt_id].tgt_exists = + installed_devs0to7[tgt_id] != 0; + } + /* + Issue the Inquire Setup Information command. + */ + req_replylen = sizeof(setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen, + sizeof(req_replylen), &setupinfo, sizeof(setupinfo)) + != sizeof(setupinfo)) + return blogic_failure(adapter, "INQUIRE SETUP INFORMATION"); + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->sync_offset[tgt_id] = (tgt_id < 8 ? setupinfo.sync0to7[tgt_id].offset : setupinfo.sync8to15[tgt_id - 8].offset); + if (strcmp(adapter->fw_ver, "5.06L") >= 0) + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->tgt_flags[tgt_id].wide_active = (tgt_id < 8 ? (setupinfo.wide_tx_active0to7 & (1 << tgt_id) ? true : false) : (setupinfo.wide_tx_active8to15 & (1 << (tgt_id - 8)) ? true : false)); + /* + Issue the Inquire Synchronous Period command. + */ + if (adapter->fw_ver[0] >= '3') { + + /* Issue a Inquire Synchronous Period command. For each + Target Device, a byte is returned which represents the + Synchronous Transfer Period in units of 10 nanoseconds. + */ + + req_replylen = sizeof(sync_period); + if (blogic_cmd(adapter, BLOGIC_INQ_SYNC_PERIOD, &req_replylen, + sizeof(req_replylen), &sync_period, + sizeof(sync_period)) != sizeof(sync_period)) + return blogic_failure(adapter, + "INQUIRE SYNCHRONOUS PERIOD"); + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->sync_period[tgt_id] = sync_period[tgt_id]; + } else + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + if (setupinfo.sync0to7[tgt_id].offset > 0) + adapter->sync_period[tgt_id] = 20 + 5 * setupinfo.sync0to7[tgt_id].tx_period; + /* + Indicate the Target Device Inquiry completed successfully. + */ + return true; +} + +/* + blogic_inithoststruct initializes the fields in the SCSI Host + structure. The base, io_port, n_io_ports, irq, and dma_channel fields in the + SCSI Host structure are intentionally left uninitialized, as this driver + handles acquisition and release of these resources explicitly, as well as + ensuring exclusive access to the Host Adapter hardware and data structures + through explicit acquisition and release of the Host Adapter's Lock. +*/ + +static void __init blogic_inithoststruct(struct blogic_adapter *adapter, + struct Scsi_Host *host) +{ + host->max_id = adapter->maxdev; + host->max_lun = adapter->maxlun; + host->max_channel = 0; + host->unique_id = adapter->io_addr; + host->this_id = adapter->scsi_id; + host->can_queue = adapter->drvr_qdepth; + host->sg_tablesize = adapter->drvr_sglimit; + host->cmd_per_lun = adapter->untag_qdepth; +} + +/* + blogic_slaveconfig will actually set the queue depth on individual + scsi devices as they are permanently added to the device chain. We + shamelessly rip off the SelectQueueDepths code to make this work mostly + like it used to. Since we don't get called once at the end of the scan + but instead get called for each device, we have to do things a bit + differently. +*/ +static int blogic_slaveconfig(struct scsi_device *dev) +{ + struct blogic_adapter *adapter = + (struct blogic_adapter *) dev->host->hostdata; + int tgt_id = dev->id; + int qdepth = adapter->qdepth[tgt_id]; + + if (adapter->tgt_flags[tgt_id].tagq_ok && + (adapter->tagq_ok & (1 << tgt_id))) { + if (qdepth == 0) + qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH; + adapter->qdepth[tgt_id] = qdepth; + scsi_change_queue_depth(dev, qdepth); + } else { + adapter->tagq_ok &= ~(1 << tgt_id); + qdepth = adapter->untag_qdepth; + adapter->qdepth[tgt_id] = qdepth; + scsi_change_queue_depth(dev, qdepth); + } + qdepth = 0; + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + if (adapter->tgt_flags[tgt_id].tgt_exists) + qdepth += adapter->qdepth[tgt_id]; + if (qdepth > adapter->alloc_ccbs) + blogic_create_addlccbs(adapter, qdepth - adapter->alloc_ccbs, + false); + return 0; +} + +/* + blogic_init probes for BusLogic Host Adapters at the standard + I/O Addresses where they may be located, initializing, registering, and + reporting the configuration of each BusLogic Host Adapter it finds. It + returns the number of BusLogic Host Adapters successfully initialized and + registered. +*/ + +static int __init blogic_init(void) +{ + int drvr_optindex = 0, probeindex; + struct blogic_adapter *adapter; + int ret = 0; + +#ifdef MODULE + if (BusLogic) + blogic_setup(BusLogic); +#endif + + if (blogic_probe_options.noprobe) + return -ENODEV; + blogic_probeinfo_list = + kcalloc(BLOGIC_MAX_ADAPTERS, sizeof(struct blogic_probeinfo), + GFP_KERNEL); + if (blogic_probeinfo_list == NULL) { + blogic_err("BusLogic: Unable to allocate Probe Info List\n", + NULL); + return -ENOMEM; + } + + adapter = kzalloc(sizeof(struct blogic_adapter), GFP_KERNEL); + if (adapter == NULL) { + kfree(blogic_probeinfo_list); + blogic_err("BusLogic: Unable to allocate Prototype Host Adapter\n", NULL); + return -ENOMEM; + } + +#ifdef MODULE + if (BusLogic != NULL) + blogic_setup(BusLogic); +#endif + blogic_init_probeinfo_list(adapter); + for (probeindex = 0; probeindex < blogic_probeinfo_count; probeindex++) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[probeindex]; + struct blogic_adapter *myadapter = adapter; + struct Scsi_Host *host; + + if (probeinfo->io_addr == 0) + continue; + memset(myadapter, 0, sizeof(struct blogic_adapter)); + myadapter->adapter_type = probeinfo->adapter_type; + myadapter->adapter_bus_type = probeinfo->adapter_bus_type; + myadapter->io_addr = probeinfo->io_addr; + myadapter->pci_addr = probeinfo->pci_addr; + myadapter->bus = probeinfo->bus; + myadapter->dev = probeinfo->dev; + myadapter->pci_device = probeinfo->pci_device; + myadapter->irq_ch = probeinfo->irq_ch; + myadapter->addr_count = + blogic_adapter_addr_count[myadapter->adapter_type]; + + /* + Make sure region is free prior to probing. + */ + if (!request_region(myadapter->io_addr, myadapter->addr_count, + "BusLogic")) + continue; + /* + Probe the Host Adapter. If unsuccessful, abort further + initialization. + */ + if (!blogic_probe(myadapter)) { + release_region(myadapter->io_addr, + myadapter->addr_count); + continue; + } + /* + Hard Reset the Host Adapter. If unsuccessful, abort further + initialization. + */ + if (!blogic_hwreset(myadapter, true)) { + release_region(myadapter->io_addr, + myadapter->addr_count); + continue; + } + /* + Check the Host Adapter. If unsuccessful, abort further + initialization. + */ + if (!blogic_checkadapter(myadapter)) { + release_region(myadapter->io_addr, + myadapter->addr_count); + continue; + } + /* + Initialize the Driver Options field if provided. + */ + if (drvr_optindex < blogic_drvr_options_count) + myadapter->drvr_opts = + &blogic_drvr_options[drvr_optindex++]; + /* + Announce the Driver Version and Date, Author's Name, + Copyright Notice, and Electronic Mail Address. + */ + blogic_announce_drvr(myadapter); + /* + Register the SCSI Host structure. + */ + + host = scsi_host_alloc(&blogic_template, + sizeof(struct blogic_adapter)); + if (host == NULL) { + release_region(myadapter->io_addr, + myadapter->addr_count); + continue; + } + myadapter = (struct blogic_adapter *) host->hostdata; + memcpy(myadapter, adapter, sizeof(struct blogic_adapter)); + myadapter->scsi_host = host; + myadapter->host_no = host->host_no; + /* + Add Host Adapter to the end of the list of registered + BusLogic Host Adapters. + */ + list_add_tail(&myadapter->host_list, &blogic_host_list); + + /* + Read the Host Adapter Configuration, Configure the Host + Adapter, Acquire the System Resources necessary to use + the Host Adapter, then Create the Initial CCBs, Initialize + the Host Adapter, and finally perform Target Device + Inquiry. From this point onward, any failure will be + assumed to be due to a problem with the Host Adapter, + rather than due to having mistakenly identified this port + as belonging to a BusLogic Host Adapter. The I/O Address + range will not be released, thereby preventing it from + being incorrectly identified as any other type of Host + Adapter. + */ + if (blogic_rdconfig(myadapter) && + blogic_reportconfig(myadapter) && + blogic_getres(myadapter) && + blogic_create_initccbs(myadapter) && + blogic_initadapter(myadapter) && + blogic_inquiry(myadapter)) { + /* + Initialization has been completed successfully. + Release and re-register usage of the I/O Address + range so that the Model Name of the Host Adapter + will appear, and initialize the SCSI Host structure. + */ + release_region(myadapter->io_addr, + myadapter->addr_count); + if (!request_region(myadapter->io_addr, + myadapter->addr_count, + myadapter->full_model)) { + printk(KERN_WARNING + "BusLogic: Release and re-register of " + "port 0x%04lx failed \n", + (unsigned long)myadapter->io_addr); + blogic_destroy_ccbs(myadapter); + blogic_relres(myadapter); + list_del(&myadapter->host_list); + scsi_host_put(host); + ret = -ENOMEM; + } else { + blogic_inithoststruct(myadapter, + host); + if (scsi_add_host(host, myadapter->pci_device + ? &myadapter->pci_device->dev + : NULL)) { + printk(KERN_WARNING + "BusLogic: scsi_add_host()" + "failed!\n"); + blogic_destroy_ccbs(myadapter); + blogic_relres(myadapter); + list_del(&myadapter->host_list); + scsi_host_put(host); + ret = -ENODEV; + } else + scsi_scan_host(host); + } + } else { + /* + An error occurred during Host Adapter Configuration + Querying, Host Adapter Configuration, Resource + Acquisition, CCB Creation, Host Adapter + Initialization, or Target Device Inquiry, so + remove Host Adapter from the list of registered + BusLogic Host Adapters, destroy the CCBs, Release + the System Resources, and Unregister the SCSI + Host. + */ + blogic_destroy_ccbs(myadapter); + blogic_relres(myadapter); + list_del(&myadapter->host_list); + scsi_host_put(host); + ret = -ENODEV; + } + } + kfree(adapter); + kfree(blogic_probeinfo_list); + blogic_probeinfo_list = NULL; + return ret; +} + + +/* + blogic_deladapter releases all resources previously acquired to + support a specific Host Adapter, including the I/O Address range, and + unregisters the BusLogic Host Adapter. +*/ + +static int __exit blogic_deladapter(struct blogic_adapter *adapter) +{ + struct Scsi_Host *host = adapter->scsi_host; + + scsi_remove_host(host); + + /* + FlashPoint Host Adapters must first be released by the FlashPoint + SCCB Manager. + */ + if (blogic_flashpoint_type(adapter)) + FlashPoint_ReleaseHostAdapter(adapter->cardhandle); + /* + Destroy the CCBs and release any system resources acquired to + support Host Adapter. + */ + blogic_destroy_ccbs(adapter); + blogic_relres(adapter); + /* + Release usage of the I/O Address range. + */ + release_region(adapter->io_addr, adapter->addr_count); + /* + Remove Host Adapter from the list of registered BusLogic + Host Adapters. + */ + list_del(&adapter->host_list); + + scsi_host_put(host); + return 0; +} + + +/* + blogic_qcompleted_ccb queues CCB for completion processing. +*/ + +static void blogic_qcompleted_ccb(struct blogic_ccb *ccb) +{ + struct blogic_adapter *adapter = ccb->adapter; + + ccb->status = BLOGIC_CCB_COMPLETE; + ccb->next = NULL; + if (adapter->firstccb == NULL) { + adapter->firstccb = ccb; + adapter->lastccb = ccb; + } else { + adapter->lastccb->next = ccb; + adapter->lastccb = ccb; + } + adapter->active_cmds[ccb->tgt_id]--; +} + + +/* + blogic_resultcode computes a SCSI Subsystem Result Code from + the Host Adapter Status and Target Device Status. +*/ + +static int blogic_resultcode(struct blogic_adapter *adapter, + enum blogic_adapter_status adapter_status, + enum blogic_tgt_status tgt_status) +{ + int hoststatus; + + switch (adapter_status) { + case BLOGIC_CMD_CMPLT_NORMAL: + case BLOGIC_LINK_CMD_CMPLT: + case BLOGIC_LINK_CMD_CMPLT_FLAG: + hoststatus = DID_OK; + break; + case BLOGIC_SELECT_TIMEOUT: + hoststatus = DID_TIME_OUT; + break; + case BLOGIC_INVALID_OUTBOX_CODE: + case BLOGIC_INVALID_CMD_CODE: + case BLOGIC_BAD_CMD_PARAM: + blogic_warn("BusLogic Driver Protocol Error 0x%02X\n", + adapter, adapter_status); + fallthrough; + case BLOGIC_DATA_UNDERRUN: + case BLOGIC_DATA_OVERRUN: + case BLOGIC_NOEXPECT_BUSFREE: + case BLOGIC_LINKCCB_BADLUN: + case BLOGIC_AUTOREQSENSE_FAIL: + case BLOGIC_TAGQUEUE_REJECT: + case BLOGIC_BAD_MSG_RCVD: + case BLOGIC_HW_FAIL: + case BLOGIC_BAD_RECONNECT: + case BLOGIC_ABRT_QUEUE: + case BLOGIC_ADAPTER_SW_ERROR: + case BLOGIC_HW_TIMEOUT: + case BLOGIC_PARITY_ERR: + hoststatus = DID_ERROR; + break; + case BLOGIC_INVALID_BUSPHASE: + case BLOGIC_NORESPONSE_TO_ATN: + case BLOGIC_HW_RESET: + case BLOGIC_RST_FROM_OTHERDEV: + case BLOGIC_HW_BDR: + hoststatus = DID_RESET; + break; + default: + blogic_warn("Unknown Host Adapter Status 0x%02X\n", adapter, + adapter_status); + hoststatus = DID_ERROR; + break; + } + return (hoststatus << 16) | tgt_status; +} + +/* + * turn the dma address from an inbox into a ccb pointer + * This is rather inefficient. + */ +static struct blogic_ccb * +blogic_inbox_to_ccb(struct blogic_adapter *adapter, struct blogic_inbox *inbox) +{ + struct blogic_ccb *ccb; + + for (ccb = adapter->all_ccbs; ccb; ccb = ccb->next_all) + if (inbox->ccb == ccb->dma_handle) + break; + + return ccb; +} + +/* + blogic_scan_inbox scans the Incoming Mailboxes saving any + Incoming Mailbox entries for completion processing. +*/ +static void blogic_scan_inbox(struct blogic_adapter *adapter) +{ + /* + Scan through the Incoming Mailboxes in Strict Round Robin + fashion, saving any completed CCBs for further processing. It + is essential that for each CCB and SCSI Command issued, command + completion processing is performed exactly once. Therefore, + only Incoming Mailboxes with completion code Command Completed + Without Error, Command Completed With Error, or Command Aborted + At Host Request are saved for completion processing. When an + Incoming Mailbox has a completion code of Aborted Command Not + Found, the CCB had already completed or been aborted before the + current Abort request was processed, and so completion processing + has already occurred and no further action should be taken. + */ + struct blogic_inbox *next_inbox = adapter->next_inbox; + enum blogic_cmplt_code comp_code; + + while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) { + struct blogic_ccb *ccb = blogic_inbox_to_ccb(adapter, next_inbox); + if (!ccb) { + /* + * This should never happen, unless the CCB list is + * corrupted in memory. + */ + blogic_warn("Could not find CCB for dma address %x\n", adapter, next_inbox->ccb); + } else if (comp_code != BLOGIC_CMD_NOTFOUND) { + if (ccb->status == BLOGIC_CCB_ACTIVE || + ccb->status == BLOGIC_CCB_RESET) { + /* + Save the Completion Code for this CCB and + queue the CCB for completion processing. + */ + ccb->comp_code = comp_code; + blogic_qcompleted_ccb(ccb); + } else { + /* + If a CCB ever appears in an Incoming Mailbox + and is not marked as status Active or Reset, + then there is most likely a bug in + the Host Adapter firmware. + */ + blogic_warn("Illegal CCB #%ld status %d in Incoming Mailbox\n", adapter, ccb->serial, ccb->status); + } + } + next_inbox->comp_code = BLOGIC_INBOX_FREE; + if (++next_inbox > adapter->last_inbox) + next_inbox = adapter->first_inbox; + } + adapter->next_inbox = next_inbox; +} + + +/* + blogic_process_ccbs iterates over the completed CCBs for Host + Adapter setting the SCSI Command Result Codes, deallocating the CCBs, and + calling the SCSI Subsystem Completion Routines. The Host Adapter's Lock + should already have been acquired by the caller. +*/ + +static void blogic_process_ccbs(struct blogic_adapter *adapter) +{ + if (adapter->processing_ccbs) + return; + adapter->processing_ccbs = true; + while (adapter->firstccb != NULL) { + struct blogic_ccb *ccb = adapter->firstccb; + struct scsi_cmnd *command = ccb->command; + adapter->firstccb = ccb->next; + if (adapter->firstccb == NULL) + adapter->lastccb = NULL; + /* + Process the Completed CCB. + */ + if (ccb->opcode == BLOGIC_BDR) { + int tgt_id = ccb->tgt_id; + + blogic_warn("Bus Device Reset CCB #%ld to Target %d Completed\n", adapter, ccb->serial, tgt_id); + blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done); + adapter->tgt_flags[tgt_id].tagq_active = false; + adapter->cmds_since_rst[tgt_id] = 0; + adapter->last_resetdone[tgt_id] = jiffies; + /* + Place CCB back on the Host Adapter's free list. + */ + blogic_dealloc_ccb(ccb, 1); +#if 0 /* this needs to be redone different for new EH */ + /* + Bus Device Reset CCBs have the command field + non-NULL only when a Bus Device Reset was requested + for a command that did not have a currently active + CCB in the Host Adapter (i.e., a Synchronous Bus + Device Reset), and hence would not have its + Completion Routine called otherwise. + */ + while (command != NULL) { + struct scsi_cmnd *nxt_cmd = + command->reset_chain; + command->reset_chain = NULL; + command->result = DID_RESET << 16; + scsi_done(command); + command = nxt_cmd; + } +#endif + /* + Iterate over the CCBs for this Host Adapter + performing completion processing for any CCBs + marked as Reset for this Target. + */ + for (ccb = adapter->all_ccbs; ccb != NULL; + ccb = ccb->next_all) + if (ccb->status == BLOGIC_CCB_RESET && + ccb->tgt_id == tgt_id) { + command = ccb->command; + blogic_dealloc_ccb(ccb, 1); + adapter->active_cmds[tgt_id]--; + command->result = DID_RESET << 16; + scsi_done(command); + } + adapter->bdr_pend[tgt_id] = NULL; + } else { + /* + Translate the Completion Code, Host Adapter Status, + and Target Device Status into a SCSI Subsystem + Result Code. + */ + switch (ccb->comp_code) { + case BLOGIC_INBOX_FREE: + case BLOGIC_CMD_NOTFOUND: + case BLOGIC_INVALID_CCB: + blogic_warn("CCB #%ld to Target %d Impossible State\n", adapter, ccb->serial, ccb->tgt_id); + break; + case BLOGIC_CMD_COMPLETE_GOOD: + adapter->tgt_stats[ccb->tgt_id] + .cmds_complete++; + adapter->tgt_flags[ccb->tgt_id] + .cmd_good = true; + command->result = DID_OK << 16; + break; + case BLOGIC_CMD_ABORT_BY_HOST: + blogic_warn("CCB #%ld to Target %d Aborted\n", + adapter, ccb->serial, ccb->tgt_id); + blogic_inc_count(&adapter->tgt_stats[ccb->tgt_id].aborts_done); + command->result = DID_ABORT << 16; + break; + case BLOGIC_CMD_COMPLETE_ERROR: + command->result = blogic_resultcode(adapter, + ccb->adapter_status, ccb->tgt_status); + if (ccb->adapter_status != BLOGIC_SELECT_TIMEOUT) { + adapter->tgt_stats[ccb->tgt_id] + .cmds_complete++; + if (blogic_global_options.trace_err) { + int i; + blogic_notice("CCB #%ld Target %d: Result %X Host " + "Adapter Status %02X Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status); + blogic_notice("CDB ", adapter); + for (i = 0; i < ccb->cdblen; i++) + blogic_notice(" %02X", adapter, ccb->cdb[i]); + blogic_notice("\n", adapter); + blogic_notice("Sense ", adapter); + for (i = 0; i < ccb->sense_datalen; i++) + blogic_notice(" %02X", adapter, command->sense_buffer[i]); + blogic_notice("\n", adapter); + } + } + break; + } + /* + When an INQUIRY command completes normally, save the + CmdQue (Tagged Queuing Supported) and WBus16 (16 Bit + Wide Data Transfers Supported) bits. + */ + if (ccb->cdb[0] == INQUIRY && ccb->cdb[1] == 0 && + ccb->adapter_status == BLOGIC_CMD_CMPLT_NORMAL) { + struct blogic_tgt_flags *tgt_flags = + &adapter->tgt_flags[ccb->tgt_id]; + struct scsi_inquiry *inquiry = + (struct scsi_inquiry *) scsi_sglist(command); + tgt_flags->tgt_exists = true; + tgt_flags->tagq_ok = inquiry->CmdQue; + tgt_flags->wide_ok = inquiry->WBus16; + } + /* + Place CCB back on the Host Adapter's free list. + */ + blogic_dealloc_ccb(ccb, 1); + /* + Call the SCSI Command Completion Routine. + */ + scsi_done(command); + } + } + adapter->processing_ccbs = false; +} + + +/* + blogic_inthandler handles hardware interrupts from BusLogic Host + Adapters. +*/ + +static irqreturn_t blogic_inthandler(int irq_ch, void *devid) +{ + struct blogic_adapter *adapter = (struct blogic_adapter *) devid; + unsigned long processor_flag; + /* + Acquire exclusive access to Host Adapter. + */ + spin_lock_irqsave(adapter->scsi_host->host_lock, processor_flag); + /* + Handle Interrupts appropriately for each Host Adapter type. + */ + if (blogic_multimaster_type(adapter)) { + union blogic_int_reg intreg; + /* + Read the Host Adapter Interrupt Register. + */ + intreg.all = blogic_rdint(adapter); + if (intreg.ir.int_valid) { + /* + Acknowledge the interrupt and reset the Host Adapter + Interrupt Register. + */ + blogic_intreset(adapter); + /* + Process valid External SCSI Bus Reset and Incoming + Mailbox Loaded Interrupts. Command Complete + Interrupts are noted, and Outgoing Mailbox Available + Interrupts are ignored, as they are never enabled. + */ + if (intreg.ir.ext_busreset) + adapter->adapter_extreset = true; + else if (intreg.ir.mailin_loaded) + blogic_scan_inbox(adapter); + else if (intreg.ir.cmd_complete) + adapter->adapter_cmd_complete = true; + } + } else { + /* + Check if there is a pending interrupt for this Host Adapter. + */ + if (FlashPoint_InterruptPending(adapter->cardhandle)) + switch (FlashPoint_HandleInterrupt(adapter->cardhandle)) { + case FPOINT_NORMAL_INT: + break; + case FPOINT_EXT_RESET: + adapter->adapter_extreset = true; + break; + case FPOINT_INTERN_ERR: + blogic_warn("Internal FlashPoint Error detected - Resetting Host Adapter\n", adapter); + adapter->adapter_intern_err = true; + break; + } + } + /* + Process any completed CCBs. + */ + if (adapter->firstccb != NULL) + blogic_process_ccbs(adapter); + /* + Reset the Host Adapter if requested. + */ + if (adapter->adapter_extreset) { + blogic_warn("Resetting %s due to External SCSI Bus Reset\n", adapter, adapter->full_model); + blogic_inc_count(&adapter->ext_resets); + blogic_resetadapter(adapter, false); + adapter->adapter_extreset = false; + } else if (adapter->adapter_intern_err) { + blogic_warn("Resetting %s due to Host Adapter Internal Error\n", adapter, adapter->full_model); + blogic_inc_count(&adapter->adapter_intern_errors); + blogic_resetadapter(adapter, true); + adapter->adapter_intern_err = false; + } + /* + Release exclusive access to Host Adapter. + */ + spin_unlock_irqrestore(adapter->scsi_host->host_lock, processor_flag); + return IRQ_HANDLED; +} + + +/* + blogic_write_outbox places CCB and Action Code into an Outgoing + Mailbox for execution by Host Adapter. The Host Adapter's Lock should + already have been acquired by the caller. +*/ + +static bool blogic_write_outbox(struct blogic_adapter *adapter, + enum blogic_action action, struct blogic_ccb *ccb) +{ + struct blogic_outbox *next_outbox; + + next_outbox = adapter->next_outbox; + if (next_outbox->action == BLOGIC_OUTBOX_FREE) { + ccb->status = BLOGIC_CCB_ACTIVE; + /* + The CCB field must be written before the Action Code field + since the Host Adapter is operating asynchronously and the + locking code does not protect against simultaneous access + by the Host Adapter. + */ + next_outbox->ccb = ccb->dma_handle; + next_outbox->action = action; + blogic_execmbox(adapter); + if (++next_outbox > adapter->last_outbox) + next_outbox = adapter->first_outbox; + adapter->next_outbox = next_outbox; + if (action == BLOGIC_MBOX_START) { + adapter->active_cmds[ccb->tgt_id]++; + if (ccb->opcode != BLOGIC_BDR) + adapter->tgt_stats[ccb->tgt_id].cmds_tried++; + } + return true; + } + return false; +} + +/* Error Handling (EH) support */ + +static int blogic_hostreset(struct scsi_cmnd *SCpnt) +{ + struct blogic_adapter *adapter = + (struct blogic_adapter *) SCpnt->device->host->hostdata; + + unsigned int id = SCpnt->device->id; + struct blogic_tgt_stats *stats = &adapter->tgt_stats[id]; + int rc; + + spin_lock_irq(SCpnt->device->host->host_lock); + + blogic_inc_count(&stats->adapter_reset_req); + + rc = blogic_resetadapter(adapter, false); + spin_unlock_irq(SCpnt->device->host->host_lock); + return rc; +} + +/* + blogic_qcmd creates a CCB for Command and places it into an + Outgoing Mailbox for execution by the associated Host Adapter. +*/ + +static int blogic_qcmd_lck(struct scsi_cmnd *command) +{ + void (*comp_cb)(struct scsi_cmnd *) = scsi_done; + struct blogic_adapter *adapter = + (struct blogic_adapter *) command->device->host->hostdata; + struct blogic_tgt_flags *tgt_flags = + &adapter->tgt_flags[command->device->id]; + struct blogic_tgt_stats *tgt_stats = adapter->tgt_stats; + unsigned char *cdb = command->cmnd; + int cdblen = command->cmd_len; + int tgt_id = command->device->id; + int lun = command->device->lun; + int buflen = scsi_bufflen(command); + int count; + struct blogic_ccb *ccb; + dma_addr_t sense_buf; + + /* + SCSI REQUEST_SENSE commands will be executed automatically by the + Host Adapter for any errors, so they should not be executed + explicitly unless the Sense Data is zero indicating that no error + occurred. + */ + if (cdb[0] == REQUEST_SENSE && command->sense_buffer[0] != 0) { + command->result = DID_OK << 16; + comp_cb(command); + return 0; + } + /* + Allocate a CCB from the Host Adapter's free list. In the unlikely + event that there are none available and memory allocation fails, + wait 1 second and try again. If that fails, the Host Adapter is + probably hung so signal an error as a Host Adapter Hard Reset + should be initiated soon. + */ + ccb = blogic_alloc_ccb(adapter); + if (ccb == NULL) { + spin_unlock_irq(adapter->scsi_host->host_lock); + blogic_delay(1); + spin_lock_irq(adapter->scsi_host->host_lock); + ccb = blogic_alloc_ccb(adapter); + if (ccb == NULL) { + command->result = DID_ERROR << 16; + comp_cb(command); + return 0; + } + } + + /* + Initialize the fields in the BusLogic Command Control Block (CCB). + */ + count = scsi_dma_map(command); + BUG_ON(count < 0); + if (count) { + struct scatterlist *sg; + int i; + + ccb->opcode = BLOGIC_INITIATOR_CCB_SG; + ccb->datalen = count * sizeof(struct blogic_sg_seg); + if (blogic_multimaster_type(adapter)) + ccb->data = (unsigned int) ccb->dma_handle + + ((unsigned long) &ccb->sglist - + (unsigned long) ccb); + else + ccb->data = virt_to_32bit_virt(ccb->sglist); + + scsi_for_each_sg(command, sg, count, i) { + ccb->sglist[i].segbytes = sg_dma_len(sg); + ccb->sglist[i].segdata = sg_dma_address(sg); + } + } else if (!count) { + ccb->opcode = BLOGIC_INITIATOR_CCB; + ccb->datalen = buflen; + ccb->data = 0; + } + + switch (cdb[0]) { + case READ_6: + case READ_10: + ccb->datadir = BLOGIC_DATAIN_CHECKED; + tgt_stats[tgt_id].read_cmds++; + blogic_addcount(&tgt_stats[tgt_id].bytesread, buflen); + blogic_incszbucket(tgt_stats[tgt_id].read_sz_buckets, buflen); + break; + case WRITE_6: + case WRITE_10: + ccb->datadir = BLOGIC_DATAOUT_CHECKED; + tgt_stats[tgt_id].write_cmds++; + blogic_addcount(&tgt_stats[tgt_id].byteswritten, buflen); + blogic_incszbucket(tgt_stats[tgt_id].write_sz_buckets, buflen); + break; + default: + ccb->datadir = BLOGIC_UNCHECKED_TX; + break; + } + ccb->cdblen = cdblen; + ccb->adapter_status = 0; + ccb->tgt_status = 0; + ccb->tgt_id = tgt_id; + ccb->lun = lun; + ccb->tag_enable = false; + ccb->legacytag_enable = false; + /* + BusLogic recommends that after a Reset the first couple of + commands that are sent to a Target Device be sent in a non + Tagged Queue fashion so that the Host Adapter and Target Device + can establish Synchronous and Wide Transfer before Queue Tag + messages can interfere with the Synchronous and Wide Negotiation + messages. By waiting to enable Tagged Queuing until after the + first BLOGIC_MAX_TAG_DEPTH commands have been queued, it is + assured that after a Reset any pending commands are requeued + before Tagged Queuing is enabled and that the Tagged Queuing + message will not occur while the partition table is being printed. + In addition, some devices do not properly handle the transition + from non-tagged to tagged commands, so it is necessary to wait + until there are no pending commands for a target device + before queuing tagged commands. + */ + if (adapter->cmds_since_rst[tgt_id]++ >= BLOGIC_MAX_TAG_DEPTH && + !tgt_flags->tagq_active && + adapter->active_cmds[tgt_id] == 0 + && tgt_flags->tagq_ok && + (adapter->tagq_ok & (1 << tgt_id))) { + tgt_flags->tagq_active = true; + blogic_notice("Tagged Queuing now active for Target %d\n", + adapter, tgt_id); + } + if (tgt_flags->tagq_active) { + enum blogic_queuetag queuetag = BLOGIC_SIMPLETAG; + /* + When using Tagged Queuing with Simple Queue Tags, it + appears that disk drive controllers do not guarantee that + a queued command will not remain in a disconnected state + indefinitely if commands that read or write nearer the + head position continue to arrive without interruption. + Therefore, for each Target Device this driver keeps track + of the last time either the queue was empty or an Ordered + Queue Tag was issued. If more than 4 seconds (one fifth + of the 20 second disk timeout) have elapsed since this + last sequence point, this command will be issued with an + Ordered Queue Tag rather than a Simple Queue Tag, which + forces the Target Device to complete all previously + queued commands before this command may be executed. + */ + if (adapter->active_cmds[tgt_id] == 0) + adapter->last_seqpoint[tgt_id] = jiffies; + else if (time_after(jiffies, + adapter->last_seqpoint[tgt_id] + 4 * HZ)) { + adapter->last_seqpoint[tgt_id] = jiffies; + queuetag = BLOGIC_ORDEREDTAG; + } + if (adapter->ext_lun) { + ccb->tag_enable = true; + ccb->queuetag = queuetag; + } else { + ccb->legacytag_enable = true; + ccb->legacy_tag = queuetag; + } + } + memcpy(ccb->cdb, cdb, cdblen); + ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; + ccb->command = command; + sense_buf = dma_map_single(&adapter->pci_device->dev, + command->sense_buffer, ccb->sense_datalen, + DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { + blogic_err("DMA mapping for sense data buffer failed\n", + adapter); + blogic_dealloc_ccb(ccb, 0); + return SCSI_MLQUEUE_HOST_BUSY; + } + ccb->sensedata = sense_buf; + if (blogic_multimaster_type(adapter)) { + /* + Place the CCB in an Outgoing Mailbox. The higher levels + of the SCSI Subsystem should not attempt to queue more + commands than can be placed in Outgoing Mailboxes, so + there should always be one free. In the unlikely event + that there are none available, wait 1 second and try + again. If that fails, the Host Adapter is probably hung + so signal an error as a Host Adapter Hard Reset should + be initiated soon. + */ + if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { + spin_unlock_irq(adapter->scsi_host->host_lock); + blogic_warn("Unable to write Outgoing Mailbox - Pausing for 1 second\n", adapter); + blogic_delay(1); + spin_lock_irq(adapter->scsi_host->host_lock); + if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, + ccb)) { + blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter); + blogic_dealloc_ccb(ccb, 1); + command->result = DID_ERROR << 16; + scsi_done(command); + } + } + } else { + /* + Call the FlashPoint SCCB Manager to start execution of + the CCB. + */ + ccb->status = BLOGIC_CCB_ACTIVE; + adapter->active_cmds[tgt_id]++; + tgt_stats[tgt_id].cmds_tried++; + FlashPoint_StartCCB(adapter->cardhandle, ccb); + /* + The Command may have already completed and + blogic_qcompleted_ccb been called, or it may still be + pending. + */ + if (ccb->status == BLOGIC_CCB_COMPLETE) + blogic_process_ccbs(adapter); + } + return 0; +} + +static DEF_SCSI_QCMD(blogic_qcmd) + +#if 0 +/* + blogic_abort aborts Command if possible. +*/ + +static int blogic_abort(struct scsi_cmnd *command) +{ + struct blogic_adapter *adapter = + (struct blogic_adapter *) command->device->host->hostdata; + + int tgt_id = command->device->id; + struct blogic_ccb *ccb; + blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_request); + + /* + Attempt to find an Active CCB for this Command. If no Active + CCB for this Command is found, then no Abort is necessary. + */ + for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) + if (ccb->command == command) + break; + if (ccb == NULL) { + blogic_warn("Unable to Abort Command to Target %d - No CCB Found\n", adapter, tgt_id); + return SUCCESS; + } else if (ccb->status == BLOGIC_CCB_COMPLETE) { + blogic_warn("Unable to Abort Command to Target %d - CCB Completed\n", adapter, tgt_id); + return SUCCESS; + } else if (ccb->status == BLOGIC_CCB_RESET) { + blogic_warn("Unable to Abort Command to Target %d - CCB Reset\n", adapter, tgt_id); + return SUCCESS; + } + if (blogic_multimaster_type(adapter)) { + /* + Attempt to Abort this CCB. MultiMaster Firmware versions + prior to 5.xx do not generate Abort Tag messages, but only + generate the non-tagged Abort message. Since non-tagged + commands are not sent by the Host Adapter until the queue + of outstanding tagged commands has completed, and the + Abort message is treated as a non-tagged command, it is + effectively impossible to abort commands when Tagged + Queuing is active. Firmware version 5.xx does generate + Abort Tag messages, so it is possible to abort commands + when Tagged Queuing is active. + */ + if (adapter->tgt_flags[tgt_id].tagq_active && + adapter->fw_ver[0] < '5') { + blogic_warn("Unable to Abort CCB #%ld to Target %d - Abort Tag Not Supported\n", adapter, ccb->serial, tgt_id); + return FAILURE; + } else if (blogic_write_outbox(adapter, BLOGIC_MBOX_ABORT, + ccb)) { + blogic_warn("Aborting CCB #%ld to Target %d\n", + adapter, ccb->serial, tgt_id); + blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried); + return SUCCESS; + } else { + blogic_warn("Unable to Abort CCB #%ld to Target %d - No Outgoing Mailboxes\n", adapter, ccb->serial, tgt_id); + return FAILURE; + } + } else { + /* + Call the FlashPoint SCCB Manager to abort execution of + the CCB. + */ + blogic_warn("Aborting CCB #%ld to Target %d\n", adapter, + ccb->serial, tgt_id); + blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried); + FlashPoint_AbortCCB(adapter->cardhandle, ccb); + /* + The Abort may have already been completed and + blogic_qcompleted_ccb been called, or it + may still be pending. + */ + if (ccb->status == BLOGIC_CCB_COMPLETE) + blogic_process_ccbs(adapter); + return SUCCESS; + } + return SUCCESS; +} + +#endif +/* + blogic_resetadapter resets Host Adapter if possible, marking all + currently executing SCSI Commands as having been Reset. +*/ + +static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset) +{ + struct blogic_ccb *ccb; + int tgt_id; + + /* + * Attempt to Reset and Reinitialize the Host Adapter. + */ + + if (!(blogic_hwreset(adapter, hard_reset) && + blogic_initadapter(adapter))) { + blogic_err("Resetting %s Failed\n", adapter, + adapter->full_model); + return FAILURE; + } + + /* + * Deallocate all currently executing CCBs. + */ + + for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) + if (ccb->status == BLOGIC_CCB_ACTIVE) + blogic_dealloc_ccb(ccb, 1); + /* + * Wait a few seconds between the Host Adapter Hard Reset which + * initiates a SCSI Bus Reset and issuing any SCSI Commands. Some + * SCSI devices get confused if they receive SCSI Commands too soon + * after a SCSI Bus Reset. + */ + + if (hard_reset) { + spin_unlock_irq(adapter->scsi_host->host_lock); + blogic_delay(adapter->bus_settle_time); + spin_lock_irq(adapter->scsi_host->host_lock); + } + + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) { + adapter->last_resettried[tgt_id] = jiffies; + adapter->last_resetdone[tgt_id] = jiffies; + } + return SUCCESS; +} + +/* + blogic_diskparam returns the Heads/Sectors/Cylinders BIOS Disk + Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and + the appropriate number of cylinders so as not to exceed drive capacity. In + order for disks equal to or larger than 1 GB to be addressable by the BIOS + without exceeding the BIOS limitation of 1024 cylinders, Extended Translation + may be enabled in AutoSCSI on FlashPoint Host Adapters and on "W" and "C" + series MultiMaster Host Adapters, or by a dip switch setting on "S" and "A" + series MultiMaster Host Adapters. With Extended Translation enabled, drives + between 1 GB inclusive and 2 GB exclusive are given a disk geometry of 128 + heads and 32 sectors, and drives above 2 GB inclusive are given a disk + geometry of 255 heads and 63 sectors. However, if the BIOS detects that the + Extended Translation setting does not match the geometry in the partition + table, then the translation inferred from the partition table will be used by + the BIOS, and a warning may be displayed. +*/ + +static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev, + sector_t capacity, int *params) +{ + struct blogic_adapter *adapter = + (struct blogic_adapter *) sdev->host->hostdata; + struct bios_diskparam *diskparam = (struct bios_diskparam *) params; + unsigned char *buf; + + if (adapter->ext_trans_enable && capacity >= 2 * 1024 * 1024 /* 1 GB in 512 byte sectors */) { + if (capacity >= 4 * 1024 * 1024 /* 2 GB in 512 byte sectors */) { + diskparam->heads = 255; + diskparam->sectors = 63; + } else { + diskparam->heads = 128; + diskparam->sectors = 32; + } + } else { + diskparam->heads = 64; + diskparam->sectors = 32; + } + diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); + buf = scsi_bios_ptable(dev); + if (buf == NULL) + return 0; + /* + If the boot sector partition table flag is valid, search for + a partition table entry whose end_head matches one of the + standard BusLogic geometry translations (64/32, 128/32, or 255/63). + */ + if (*(unsigned short *) (buf + 64) == MSDOS_LABEL_MAGIC) { + struct msdos_partition *part1_entry = + (struct msdos_partition *)buf; + struct msdos_partition *part_entry = part1_entry; + int saved_cyl = diskparam->cylinders, part_no; + unsigned char part_end_head = 0, part_end_sector = 0; + + for (part_no = 0; part_no < 4; part_no++) { + part_end_head = part_entry->end_head; + part_end_sector = part_entry->end_sector & 0x3F; + if (part_end_head == 64 - 1) { + diskparam->heads = 64; + diskparam->sectors = 32; + break; + } else if (part_end_head == 128 - 1) { + diskparam->heads = 128; + diskparam->sectors = 32; + break; + } else if (part_end_head == 255 - 1) { + diskparam->heads = 255; + diskparam->sectors = 63; + break; + } + part_entry++; + } + if (part_no == 4) { + part_end_head = part1_entry->end_head; + part_end_sector = part1_entry->end_sector & 0x3F; + } + diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); + if (part_no < 4 && part_end_sector == diskparam->sectors) { + if (diskparam->cylinders != saved_cyl) + blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors); + } else if (part_end_head > 0 || part_end_sector > 0) { + blogic_warn("Warning: Partition Table appears to have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector); + blogic_warn("not compatible with current BusLogic Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); + } + } + kfree(buf); + return 0; +} + + +/* + BugLogic_ProcDirectoryInfo implements /proc/scsi/BusLogic/. +*/ + +static int blogic_write_info(struct Scsi_Host *shost, char *procbuf, + int bytes_avail) +{ + struct blogic_adapter *adapter = + (struct blogic_adapter *) shost->hostdata; + struct blogic_tgt_stats *tgt_stats; + + tgt_stats = adapter->tgt_stats; + adapter->ext_resets = 0; + adapter->adapter_intern_errors = 0; + memset(tgt_stats, 0, BLOGIC_MAXDEV * sizeof(struct blogic_tgt_stats)); + return 0; +} + +static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost) +{ + struct blogic_adapter *adapter = (struct blogic_adapter *) shost->hostdata; + struct blogic_tgt_stats *tgt_stats; + int tgt; + + tgt_stats = adapter->tgt_stats; + seq_write(m, adapter->msgbuf, adapter->msgbuflen); + seq_printf(m, "\n\ +Current Driver Queue Depth: %d\n\ +Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs); + seq_puts(m, "\n\n\ + DATA TRANSFER STATISTICS\n\ +\n\ +Target Tagged Queuing Queue Depth Active Attempted Completed\n\ +====== ============== =========== ====== ========= =========\n"); + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) + continue; + seq_printf(m, " %2d %s", tgt, (tgt_flags->tagq_ok ? (tgt_flags->tagq_active ? " Active" : (adapter->tagq_ok & (1 << tgt) + ? " Permitted" : " Disabled")) + : "Not Supported")); + seq_printf(m, + " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete); + } + seq_puts(m, "\n\ +Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\ +====== ============= ============== =================== ===================\n"); + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) + continue; + seq_printf(m, " %2d %9u %9u", tgt, tgt_stats[tgt].read_cmds, tgt_stats[tgt].write_cmds); + if (tgt_stats[tgt].bytesread.billions > 0) + seq_printf(m, " %9u%09u", tgt_stats[tgt].bytesread.billions, tgt_stats[tgt].bytesread.units); + else + seq_printf(m, " %9u", tgt_stats[tgt].bytesread.units); + if (tgt_stats[tgt].byteswritten.billions > 0) + seq_printf(m, " %9u%09u\n", tgt_stats[tgt].byteswritten.billions, tgt_stats[tgt].byteswritten.units); + else + seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units); + } + seq_puts(m, "\n\ +Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ +====== ======= ========= ========= ========= ========= =========\n"); + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) + continue; + seq_printf(m, + " %2d Read %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].read_sz_buckets[0], + tgt_stats[tgt].read_sz_buckets[1], tgt_stats[tgt].read_sz_buckets[2], tgt_stats[tgt].read_sz_buckets[3], tgt_stats[tgt].read_sz_buckets[4]); + seq_printf(m, + " %2d Write %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].write_sz_buckets[0], + tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]); + } + seq_puts(m, "\n\ +Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ +====== ======= ========= ========= ========= ========= =========\n"); + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) + continue; + seq_printf(m, + " %2d Read %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].read_sz_buckets[5], + tgt_stats[tgt].read_sz_buckets[6], tgt_stats[tgt].read_sz_buckets[7], tgt_stats[tgt].read_sz_buckets[8], tgt_stats[tgt].read_sz_buckets[9]); + seq_printf(m, + " %2d Write %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].write_sz_buckets[5], + tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]); + } + seq_puts(m, "\n\n\ + ERROR RECOVERY STATISTICS\n\ +\n\ + Command Aborts Bus Device Resets Host Adapter Resets\n\ +Target Requested Completed Requested Completed Requested Completed\n\ + ID \\\\\\\\ Attempted //// \\\\\\\\ Attempted //// \\\\\\\\ Attempted ////\n\ +====== ===== ===== ===== ===== ===== ===== ===== ===== =====\n"); + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) + continue; + seq_printf(m, " %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", + tgt, tgt_stats[tgt].aborts_request, + tgt_stats[tgt].aborts_tried, + tgt_stats[tgt].aborts_done, + tgt_stats[tgt].bdr_request, + tgt_stats[tgt].bdr_tried, + tgt_stats[tgt].bdr_done, + tgt_stats[tgt].adapter_reset_req, + tgt_stats[tgt].adapter_reset_attempt, + tgt_stats[tgt].adapter_reset_done); + } + seq_printf(m, "\nExternal Host Adapter Resets: %d\n", adapter->ext_resets); + seq_printf(m, "Host Adapter Internal Errors: %d\n", adapter->adapter_intern_errors); + return 0; +} + + +/* + blogic_msg prints Driver Messages. +*/ +__printf(2, 4) +static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, + struct blogic_adapter *adapter, ...) +{ + static char buf[BLOGIC_LINEBUF_SIZE]; + static bool begin = true; + va_list args; + int len = 0; + + va_start(args, adapter); + len = vscnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + if (msglevel == BLOGIC_ANNOUNCE_LEVEL) { + static int msglines = 0; + strcpy(&adapter->msgbuf[adapter->msgbuflen], buf); + adapter->msgbuflen += len; + if (++msglines <= 2) + printk("%sscsi: %s", blogic_msglevelmap[msglevel], buf); + } else if (msglevel == BLOGIC_INFO_LEVEL) { + strcpy(&adapter->msgbuf[adapter->msgbuflen], buf); + adapter->msgbuflen += len; + if (begin) { + if (buf[0] != '\n' || len > 1) + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); + } else + pr_cont("%s", buf); + } else { + if (begin) { + if (adapter != NULL && adapter->adapter_initd) + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); + else + printk("%s%s", blogic_msglevelmap[msglevel], buf); + } else + pr_cont("%s", buf); + } + begin = (buf[len - 1] == '\n'); +} + + +/* + blogic_parse parses an individual option keyword. It returns true + and updates the pointer if the keyword is recognized and false otherwise. +*/ + +static bool __init blogic_parse(char **str, char *keyword) +{ + char *pointer = *str; + while (*keyword != '\0') { + char strch = *pointer++; + char keywordch = *keyword++; + if (strch >= 'A' && strch <= 'Z') + strch += 'a' - 'Z'; + if (keywordch >= 'A' && keywordch <= 'Z') + keywordch += 'a' - 'Z'; + if (strch != keywordch) + return false; + } + *str = pointer; + return true; +} + + +/* + blogic_parseopts handles processing of BusLogic Driver Options + specifications. + + BusLogic Driver Options may be specified either via the Linux Kernel Command + Line or via the Loadable Kernel Module Installation Facility. Driver Options + for multiple host adapters may be specified either by separating the option + strings by a semicolon, or by specifying multiple "BusLogic=" strings on the + command line. Individual option specifications for a single host adapter are + separated by commas. The Probing and Debugging Options apply to all host + adapters whereas the remaining options apply individually only to the + selected host adapter. + + The BusLogic Driver Probing Options are described in + . +*/ + +static int __init blogic_parseopts(char *options) +{ + while (true) { + struct blogic_drvr_options *drvr_opts = + &blogic_drvr_options[blogic_drvr_options_count++]; + int tgt_id; + + memset(drvr_opts, 0, sizeof(struct blogic_drvr_options)); + while (*options != '\0' && *options != ';') { + if (blogic_parse(&options, "NoProbePCI")) + blogic_probe_options.noprobe_pci = true; + else if (blogic_parse(&options, "NoProbe")) + blogic_probe_options.noprobe = true; + else if (blogic_parse(&options, "NoSortPCI")) + blogic_probe_options.nosort_pci = true; + else if (blogic_parse(&options, "MultiMasterFirst")) + blogic_probe_options.multimaster_first = true; + else if (blogic_parse(&options, "FlashPointFirst")) + blogic_probe_options.flashpoint_first = true; + /* Tagged Queuing Options. */ + else if (blogic_parse(&options, "QueueDepth:[") || + blogic_parse(&options, "QD:[")) { + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { + unsigned short qdepth = simple_strtoul(options, &options, 0); + if (qdepth > BLOGIC_MAX_TAG_DEPTH) { + blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); + return 0; + } + drvr_opts->qdepth[tgt_id] = qdepth; + if (*options == ',') + options++; + else if (*options == ']') + break; + else { + blogic_err("BusLogic: Invalid Driver Options (',' or ']' expected at '%s')\n", NULL, options); + return 0; + } + } + if (*options != ']') { + blogic_err("BusLogic: Invalid Driver Options (']' expected at '%s')\n", NULL, options); + return 0; + } else + options++; + } else if (blogic_parse(&options, "QueueDepth:") || blogic_parse(&options, "QD:")) { + unsigned short qdepth = simple_strtoul(options, &options, 0); + if (qdepth == 0 || + qdepth > BLOGIC_MAX_TAG_DEPTH) { + blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); + return 0; + } + drvr_opts->common_qdepth = qdepth; + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) + drvr_opts->qdepth[tgt_id] = qdepth; + } else if (blogic_parse(&options, "TaggedQueuing:") || + blogic_parse(&options, "TQ:")) { + if (blogic_parse(&options, "Default")) { + drvr_opts->tagq_ok = 0x0000; + drvr_opts->tagq_ok_mask = 0x0000; + } else if (blogic_parse(&options, "Enable")) { + drvr_opts->tagq_ok = 0xFFFF; + drvr_opts->tagq_ok_mask = 0xFFFF; + } else if (blogic_parse(&options, "Disable")) { + drvr_opts->tagq_ok = 0x0000; + drvr_opts->tagq_ok_mask = 0xFFFF; + } else { + unsigned short tgt_bit; + for (tgt_id = 0, tgt_bit = 1; + tgt_id < BLOGIC_MAXDEV; + tgt_id++, tgt_bit <<= 1) + switch (*options++) { + case 'Y': + drvr_opts->tagq_ok |= tgt_bit; + drvr_opts->tagq_ok_mask |= tgt_bit; + break; + case 'N': + drvr_opts->tagq_ok &= ~tgt_bit; + drvr_opts->tagq_ok_mask |= tgt_bit; + break; + case 'X': + break; + default: + options--; + tgt_id = BLOGIC_MAXDEV; + break; + } + } + } + /* Miscellaneous Options. */ + else if (blogic_parse(&options, "BusSettleTime:") || + blogic_parse(&options, "BST:")) { + unsigned short bus_settle_time = + simple_strtoul(options, &options, 0); + if (bus_settle_time > 5 * 60) { + blogic_err("BusLogic: Invalid Driver Options (invalid Bus Settle Time %d)\n", NULL, bus_settle_time); + return 0; + } + drvr_opts->bus_settle_time = bus_settle_time; + } else if (blogic_parse(&options, + "InhibitTargetInquiry")) + drvr_opts->stop_tgt_inquiry = true; + /* Debugging Options. */ + else if (blogic_parse(&options, "TraceProbe")) + blogic_global_options.trace_probe = true; + else if (blogic_parse(&options, "TraceHardwareReset")) + blogic_global_options.trace_hw_reset = true; + else if (blogic_parse(&options, "TraceConfiguration")) + blogic_global_options.trace_config = true; + else if (blogic_parse(&options, "TraceErrors")) + blogic_global_options.trace_err = true; + else if (blogic_parse(&options, "Debug")) { + blogic_global_options.trace_probe = true; + blogic_global_options.trace_hw_reset = true; + blogic_global_options.trace_config = true; + blogic_global_options.trace_err = true; + } + if (*options == ',') + options++; + else if (*options != ';' && *options != '\0') { + blogic_err("BusLogic: Unexpected Driver Option '%s' ignored\n", NULL, options); + *options = '\0'; + } + } + if (!(blogic_drvr_options_count == 0 || + blogic_probeinfo_count == 0 || + blogic_drvr_options_count == blogic_probeinfo_count)) { + blogic_err("BusLogic: Invalid Driver Options (all or no I/O Addresses must be specified)\n", NULL); + return 0; + } + /* + Tagged Queuing is disabled when the Queue Depth is 1 since queuing + multiple commands is not possible. + */ + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) + if (drvr_opts->qdepth[tgt_id] == 1) { + unsigned short tgt_bit = 1 << tgt_id; + drvr_opts->tagq_ok &= ~tgt_bit; + drvr_opts->tagq_ok_mask |= tgt_bit; + } + if (*options == ';') + options++; + if (*options == '\0') + return 0; + } + return 1; +} + +/* + Get it all started +*/ + +static const struct scsi_host_template blogic_template = { + .module = THIS_MODULE, + .proc_name = "BusLogic", + .write_info = blogic_write_info, + .show_info = blogic_show_info, + .name = "BusLogic", + .info = blogic_drvr_info, + .queuecommand = blogic_qcmd, + .slave_configure = blogic_slaveconfig, + .bios_param = blogic_diskparam, + .eh_host_reset_handler = blogic_hostreset, +#if 0 + .eh_abort_handler = blogic_abort, +#endif + .max_sectors = 128, +}; + +/* + blogic_setup handles processing of Kernel Command Line Arguments. +*/ + +static int __init blogic_setup(char *str) +{ + int ints[3]; + + (void) get_options(str, ARRAY_SIZE(ints), ints); + + if (ints[0] != 0) { + blogic_err("BusLogic: Obsolete Command Line Entry Format Ignored\n", NULL); + return 0; + } + if (str == NULL || *str == '\0') + return 0; + return blogic_parseopts(str); +} + +/* + * Exit function. Deletes all hosts associated with this driver. + */ + +static void __exit blogic_exit(void) +{ + struct blogic_adapter *ha, *next; + + list_for_each_entry_safe(ha, next, &blogic_host_list, host_list) + blogic_deladapter(ha); +} + +__setup("BusLogic=", blogic_setup); + +#ifdef MODULE +/*static struct pci_device_id blogic_pci_tbl[] = { + { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { } +};*/ +static const struct pci_device_id blogic_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)}, + {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)}, + {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)}, + {0, }, +}; +#endif +MODULE_DEVICE_TABLE(pci, blogic_pci_tbl); + +module_init(blogic_init); +module_exit(blogic_exit); diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h new file mode 100644 index 000000000..7d1ec10f2 --- /dev/null +++ b/drivers/scsi/BusLogic.h @@ -0,0 +1,1284 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + + Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters + + Copyright 1995-1998 by Leonard N. Zubkoff + + + The author respectfully requests that any modifications to this software be + sent directly to him for evaluation and testing. + + Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose + advice has been invaluable, to David Gentzel, for writing the original Linux + BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site. + + Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB + Manager available as freely redistributable source code. + +*/ + +#ifndef _BUSLOGIC_H +#define _BUSLOGIC_H + + +#ifndef PACKED +#define PACKED __attribute__((packed)) +#endif + +/* + Define the maximum number of BusLogic Host Adapters supported by this driver. +*/ + +#define BLOGIC_MAX_ADAPTERS 16 + + +/* + Define the maximum number of Target Devices supported by this driver. +*/ + +#define BLOGIC_MAXDEV 16 + + +/* + Define the maximum number of Scatter/Gather Segments used by this driver. + For optimal performance, it is important that this limit be at least as + large as the largest single request generated by the I/O Subsystem. +*/ + +#define BLOGIC_SG_LIMIT 128 + + +/* + Define the maximum, maximum automatic, minimum automatic, and default Queue + Depth to allow for Target Devices depending on whether or not they support + Tagged Queuing and whether or not ISA Bounce Buffers are required. +*/ + +#define BLOGIC_MAX_TAG_DEPTH 64 +#define BLOGIC_MAX_AUTO_TAG_DEPTH 28 +#define BLOGIC_MIN_AUTO_TAG_DEPTH 7 +#define BLOGIC_TAG_DEPTH_BB 3 +#define BLOGIC_UNTAG_DEPTH 3 +#define BLOGIC_UNTAG_DEPTH_BB 2 + + +/* + Define the default amount of time in seconds to wait between a Host Adapter + Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI commands. + Some SCSI devices get confused if they receive SCSI commands too soon after + a SCSI Bus Reset. +*/ + +#define BLOGIC_BUS_SETTLE_TIME 2 + + +/* + Define the maximum number of Mailboxes that should be used for MultiMaster + Host Adapters. This number is chosen to be larger than the maximum Host + Adapter Queue Depth and small enough so that the Host Adapter structure + does not cross an allocation block size boundary. +*/ + +#define BLOGIC_MAX_MAILBOX 211 + + +/* + Define the number of CCBs that should be allocated as a group to optimize + Kernel memory allocation. +*/ + +#define BLOGIC_CCB_GRP_ALLOCSIZE 7 + + +/* + Define the Host Adapter Line and Message Buffer Sizes. +*/ + +#define BLOGIC_LINEBUF_SIZE 100 +#define BLOGIC_MSGBUF_SIZE 9700 + + +/* + Define the Driver Message Levels. +*/ + +enum blogic_msglevel { + BLOGIC_ANNOUNCE_LEVEL = 0, + BLOGIC_INFO_LEVEL = 1, + BLOGIC_NOTICE_LEVEL = 2, + BLOGIC_WARN_LEVEL = 3, + BLOGIC_ERR_LEVEL = 4 +}; + +static char *blogic_msglevelmap[] = { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING, KERN_ERR }; + + +/* + Define Driver Message macros. +*/ + +#define blogic_announce(format, args...) \ + blogic_msg(BLOGIC_ANNOUNCE_LEVEL, format, ##args) + +#define blogic_info(format, args...) \ + blogic_msg(BLOGIC_INFO_LEVEL, format, ##args) + +#define blogic_notice(format, args...) \ + blogic_msg(BLOGIC_NOTICE_LEVEL, format, ##args) + +#define blogic_warn(format, args...) \ + blogic_msg(BLOGIC_WARN_LEVEL, format, ##args) + +#define blogic_err(format, args...) \ + blogic_msg(BLOGIC_ERR_LEVEL, format, ##args) + + +/* + Define the types of BusLogic Host Adapters that are supported and the number + of I/O Addresses required by each type. +*/ + +enum blogic_adapter_type { + BLOGIC_MULTIMASTER = 1, + BLOGIC_FLASHPOINT = 2 +} PACKED; + +#define BLOGIC_MULTIMASTER_ADDR_COUNT 4 +#define BLOGIC_FLASHPOINT_ADDR_COUNT 256 + +static int blogic_adapter_addr_count[3] = { 0, BLOGIC_MULTIMASTER_ADDR_COUNT, BLOGIC_FLASHPOINT_ADDR_COUNT }; + + +/* + Define macros for testing the Host Adapter Type. +*/ + +#ifdef CONFIG_SCSI_FLASHPOINT + +#define blogic_multimaster_type(adapter) \ + (adapter->adapter_type == BLOGIC_MULTIMASTER) + +#define blogic_flashpoint_type(adapter) \ + (adapter->adapter_type == BLOGIC_FLASHPOINT) + +#else + +#define blogic_multimaster_type(adapter) (true) +#define blogic_flashpoint_type(adapter) (false) + +#endif + + +/* + Define the possible Host Adapter Bus Types. +*/ + +enum blogic_adapter_bus_type { + BLOGIC_UNKNOWN_BUS = 0, + BLOGIC_ISA_BUS = 1, + BLOGIC_EISA_BUS = 2, + BLOGIC_PCI_BUS = 3, + BLOGIC_VESA_BUS = 4, + BLOGIC_MCA_BUS = 5 +} PACKED; + +static char *blogic_adapter_busnames[] = { "Unknown", "ISA", "EISA", "PCI", "VESA", "MCA" }; + +static enum blogic_adapter_bus_type blogic_adater_bus_types[] = { + BLOGIC_VESA_BUS, /* BT-4xx */ + BLOGIC_ISA_BUS, /* BT-5xx */ + BLOGIC_MCA_BUS, /* BT-6xx */ + BLOGIC_EISA_BUS, /* BT-7xx */ + BLOGIC_UNKNOWN_BUS, /* BT-8xx */ + BLOGIC_PCI_BUS /* BT-9xx */ +}; + +/* + Define the possible Host Adapter BIOS Disk Geometry Translations. +*/ + +enum blogic_bios_diskgeometry { + BLOGIC_BIOS_NODISK = 0, + BLOGIC_BIOS_DISK64x32 = 1, + BLOGIC_BIOS_DISK128x32 = 2, + BLOGIC_BIOS_DISK255x63 = 3 +} PACKED; + + +/* + Define a 10^18 Statistics Byte Counter data type. +*/ + +struct blogic_byte_count { + unsigned int units; + unsigned int billions; +}; + + +/* + Define the structure for I/O Address and Bus Probing Information. +*/ + +struct blogic_probeinfo { + enum blogic_adapter_type adapter_type; + enum blogic_adapter_bus_type adapter_bus_type; + unsigned long io_addr; + unsigned long pci_addr; + struct pci_dev *pci_device; + unsigned char bus; + unsigned char dev; + unsigned char irq_ch; +}; + +/* + Define the Probe Options. +*/ + +struct blogic_probe_options { + bool noprobe:1; /* Bit 0 */ + bool noprobe_pci:1; /* Bit 2 */ + bool nosort_pci:1; /* Bit 3 */ + bool multimaster_first:1; /* Bit 4 */ + bool flashpoint_first:1; /* Bit 5 */ +}; + +/* + Define the Global Options. +*/ + +struct blogic_global_options { + bool trace_probe:1; /* Bit 0 */ + bool trace_hw_reset:1; /* Bit 1 */ + bool trace_config:1; /* Bit 2 */ + bool trace_err:1; /* Bit 3 */ +}; + +/* + Define the BusLogic SCSI Host Adapter I/O Register Offsets. +*/ + +#define BLOGIC_CNTRL_REG 0 /* WO register */ +#define BLOGIC_STATUS_REG 0 /* RO register */ +#define BLOGIC_CMD_PARM_REG 1 /* WO register */ +#define BLOGIC_DATAIN_REG 1 /* RO register */ +#define BLOGIC_INT_REG 2 /* RO register */ +#define BLOGIC_GEOMETRY_REG 3 /* RO register */ + +/* + Define the structure of the write-only Control Register. +*/ + +union blogic_cntrl_reg { + unsigned char all; + struct { + unsigned char:4; /* Bits 0-3 */ + bool bus_reset:1; /* Bit 4 */ + bool int_reset:1; /* Bit 5 */ + bool soft_reset:1; /* Bit 6 */ + bool hard_reset:1; /* Bit 7 */ + } cr; +}; + +/* + Define the structure of the read-only Status Register. +*/ + +union blogic_stat_reg { + unsigned char all; + struct { + bool cmd_invalid:1; /* Bit 0 */ + bool rsvd:1; /* Bit 1 */ + bool datain_ready:1; /* Bit 2 */ + bool cmd_param_busy:1; /* Bit 3 */ + bool adapter_ready:1; /* Bit 4 */ + bool init_reqd:1; /* Bit 5 */ + bool diag_failed:1; /* Bit 6 */ + bool diag_active:1; /* Bit 7 */ + } sr; +}; + +/* + Define the structure of the read-only Interrupt Register. +*/ + +union blogic_int_reg { + unsigned char all; + struct { + bool mailin_loaded:1; /* Bit 0 */ + bool mailout_avail:1; /* Bit 1 */ + bool cmd_complete:1; /* Bit 2 */ + bool ext_busreset:1; /* Bit 3 */ + unsigned char rsvd:3; /* Bits 4-6 */ + bool int_valid:1; /* Bit 7 */ + } ir; +}; + +/* + Define the structure of the read-only Geometry Register. +*/ + +union blogic_geo_reg { + unsigned char all; + struct { + enum blogic_bios_diskgeometry d0_geo:2; /* Bits 0-1 */ + enum blogic_bios_diskgeometry d1_geo:2; /* Bits 2-3 */ + unsigned char:3; /* Bits 4-6 */ + bool ext_trans_enable:1; /* Bit 7 */ + } gr; +}; + +/* + Define the BusLogic SCSI Host Adapter Command Register Operation Codes. +*/ + +enum blogic_opcode { + BLOGIC_TEST_CMP_COMPLETE = 0x00, + BLOGIC_INIT_MBOX = 0x01, + BLOGIC_EXEC_MBOX_CMD = 0x02, + BLOGIC_EXEC_BIOS_CMD = 0x03, + BLOGIC_GET_BOARD_ID = 0x04, + BLOGIC_ENABLE_OUTBOX_AVAIL_INT = 0x05, + BLOGIC_SET_SELECT_TIMEOUT = 0x06, + BLOGIC_SET_PREEMPT_TIME = 0x07, + BLOGIC_SET_TIMEOFF_BUS = 0x08, + BLOGIC_SET_TXRATE = 0x09, + BLOGIC_INQ_DEV0TO7 = 0x0A, + BLOGIC_INQ_CONFIG = 0x0B, + BLOGIC_TGT_MODE = 0x0C, + BLOGIC_INQ_SETUPINFO = 0x0D, + BLOGIC_WRITE_LOCALRAM = 0x1A, + BLOGIC_READ_LOCALRAM = 0x1B, + BLOGIC_WRITE_BUSMASTER_FIFO = 0x1C, + BLOGIC_READ_BUSMASTER_FIFO = 0x1D, + BLOGIC_ECHO_CMDDATA = 0x1F, + BLOGIC_ADAPTER_DIAG = 0x20, + BLOGIC_SET_OPTIONS = 0x21, + BLOGIC_INQ_DEV8TO15 = 0x23, + BLOGIC_INQ_DEV = 0x24, + BLOGIC_DISABLE_INT = 0x25, + BLOGIC_INIT_EXT_MBOX = 0x81, + BLOGIC_EXEC_SCS_CMD = 0x83, + BLOGIC_INQ_FWVER_D3 = 0x84, + BLOGIC_INQ_FWVER_LETTER = 0x85, + BLOGIC_INQ_PCI_INFO = 0x86, + BLOGIC_INQ_MODELNO = 0x8B, + BLOGIC_INQ_SYNC_PERIOD = 0x8C, + BLOGIC_INQ_EXTSETUP = 0x8D, + BLOGIC_STRICT_RR = 0x8F, + BLOGIC_STORE_LOCALRAM = 0x90, + BLOGIC_FETCH_LOCALRAM = 0x91, + BLOGIC_STORE_TO_EEPROM = 0x92, + BLOGIC_LOAD_AUTOSCSICODE = 0x94, + BLOGIC_MOD_IOADDR = 0x95, + BLOGIC_SETCCB_FMT = 0x96, + BLOGIC_WRITE_INQBUF = 0x9A, + BLOGIC_READ_INQBUF = 0x9B, + BLOGIC_FLASH_LOAD = 0xA7, + BLOGIC_READ_SCAMDATA = 0xA8, + BLOGIC_WRITE_SCAMDATA = 0xA9 +}; + +/* + Define the Inquire Board ID reply structure. +*/ + +struct blogic_board_id { + unsigned char type; /* Byte 0 */ + unsigned char custom_features; /* Byte 1 */ + unsigned char fw_ver_digit1; /* Byte 2 */ + unsigned char fw_ver_digit2; /* Byte 3 */ +}; + +/* + Define the Inquire Configuration reply structure. +*/ + +struct blogic_config { + unsigned char:5; /* Byte 0 Bits 0-4 */ + bool dma_ch5:1; /* Byte 0 Bit 5 */ + bool dma_ch6:1; /* Byte 0 Bit 6 */ + bool dma_ch7:1; /* Byte 0 Bit 7 */ + bool irq_ch9:1; /* Byte 1 Bit 0 */ + bool irq_ch10:1; /* Byte 1 Bit 1 */ + bool irq_ch11:1; /* Byte 1 Bit 2 */ + bool irq_ch12:1; /* Byte 1 Bit 3 */ + unsigned char:1; /* Byte 1 Bit 4 */ + bool irq_ch14:1; /* Byte 1 Bit 5 */ + bool irq_ch15:1; /* Byte 1 Bit 6 */ + unsigned char:1; /* Byte 1 Bit 7 */ + unsigned char id:4; /* Byte 2 Bits 0-3 */ + unsigned char:4; /* Byte 2 Bits 4-7 */ +}; + +/* + Define the Inquire Setup Information reply structure. +*/ + +struct blogic_syncval { + unsigned char offset:4; /* Bits 0-3 */ + unsigned char tx_period:3; /* Bits 4-6 */ + bool sync:1; /* Bit 7 */ +}; + +struct blogic_setup_info { + bool sync:1; /* Byte 0 Bit 0 */ + bool parity:1; /* Byte 0 Bit 1 */ + unsigned char:6; /* Byte 0 Bits 2-7 */ + unsigned char tx_rate; /* Byte 1 */ + unsigned char preempt_time; /* Byte 2 */ + unsigned char timeoff_bus; /* Byte 3 */ + unsigned char mbox_count; /* Byte 4 */ + unsigned char mbox_addr[3]; /* Bytes 5-7 */ + struct blogic_syncval sync0to7[8]; /* Bytes 8-15 */ + unsigned char disconnect_ok0to7; /* Byte 16 */ + unsigned char sig; /* Byte 17 */ + unsigned char char_d; /* Byte 18 */ + unsigned char bus_type; /* Byte 19 */ + unsigned char wide_tx_ok0to7; /* Byte 20 */ + unsigned char wide_tx_active0to7; /* Byte 21 */ + struct blogic_syncval sync8to15[8]; /* Bytes 22-29 */ + unsigned char disconnect_ok8to15; /* Byte 30 */ + unsigned char:8; /* Byte 31 */ + unsigned char wide_tx_ok8to15; /* Byte 32 */ + unsigned char wide_tx_active8to15; /* Byte 33 */ +}; + +/* + Define the Initialize Extended Mailbox request structure. +*/ + +struct blogic_extmbox_req { + unsigned char mbox_count; /* Byte 0 */ + u32 base_mbox_addr; /* Bytes 1-4 */ +} PACKED; + + +/* + Define the Inquire PCI Host Adapter Information reply type. The ISA + Compatible I/O Port values are defined here and are also used with + the Modify I/O Address command. +*/ + +enum blogic_isa_ioport { + BLOGIC_IO_330 = 0, + BLOGIC_IO_334 = 1, + BLOGIC_IO_230 = 2, + BLOGIC_IO_234 = 3, + BLOGIC_IO_130 = 4, + BLOGIC_IO_134 = 5, + BLOGIC_IO_DISABLE = 6, + BLOGIC_IO_DISABLE2 = 7 +} PACKED; + +struct blogic_adapter_info { + enum blogic_isa_ioport isa_port; /* Byte 0 */ + unsigned char irq_ch; /* Byte 1 */ + bool low_term:1; /* Byte 2 Bit 0 */ + bool high_term:1; /* Byte 2 Bit 1 */ + unsigned char:2; /* Byte 2 Bits 2-3 */ + bool JP1:1; /* Byte 2 Bit 4 */ + bool JP2:1; /* Byte 2 Bit 5 */ + bool JP3:1; /* Byte 2 Bit 6 */ + bool genericinfo_valid:1; /* Byte 2 Bit 7 */ + unsigned char:8; /* Byte 3 */ +}; + +/* + Define the Inquire Extended Setup Information reply structure. +*/ + +struct blogic_ext_setup { + unsigned char bus_type; /* Byte 0 */ + unsigned char bios_addr; /* Byte 1 */ + unsigned short sg_limit; /* Bytes 2-3 */ + unsigned char mbox_count; /* Byte 4 */ + u32 base_mbox_addr; /* Bytes 5-8 */ + struct { + unsigned char:2; /* Byte 9 Bits 0-1 */ + bool fast_on_eisa:1; /* Byte 9 Bit 2 */ + unsigned char:3; /* Byte 9 Bits 3-5 */ + bool level_int:1; /* Byte 9 Bit 6 */ + unsigned char:1; /* Byte 9 Bit 7 */ + } misc; + unsigned char fw_rev[3]; /* Bytes 10-12 */ + bool wide:1; /* Byte 13 Bit 0 */ + bool differential:1; /* Byte 13 Bit 1 */ + bool scam:1; /* Byte 13 Bit 2 */ + bool ultra:1; /* Byte 13 Bit 3 */ + bool smart_term:1; /* Byte 13 Bit 4 */ + unsigned char:3; /* Byte 13 Bits 5-7 */ +} PACKED; + +/* + Define the Enable Strict Round Robin Mode request type. +*/ + +enum blogic_rr_req { + BLOGIC_AGGRESSIVE_RR = 0, + BLOGIC_STRICT_RR_MODE = 1 +} PACKED; + + +/* + Define the Fetch Host Adapter Local RAM request type. +*/ + +#define BLOGIC_BIOS_BASE 0 +#define BLOGIC_AUTOSCSI_BASE 64 + +struct blogic_fetch_localram { + unsigned char offset; /* Byte 0 */ + unsigned char count; /* Byte 1 */ +}; + +/* + Define the Host Adapter Local RAM AutoSCSI structure. +*/ + +struct blogic_autoscsi { + unsigned char factory_sig[2]; /* Bytes 0-1 */ + unsigned char info_bytes; /* Byte 2 */ + unsigned char adapter_type[6]; /* Bytes 3-8 */ + unsigned char:8; /* Byte 9 */ + bool floppy:1; /* Byte 10 Bit 0 */ + bool floppy_sec:1; /* Byte 10 Bit 1 */ + bool level_int:1; /* Byte 10 Bit 2 */ + unsigned char:2; /* Byte 10 Bits 3-4 */ + unsigned char systemram_bios:3; /* Byte 10 Bits 5-7 */ + unsigned char dma_ch:7; /* Byte 11 Bits 0-6 */ + bool dma_autoconf:1; /* Byte 11 Bit 7 */ + unsigned char irq_ch:7; /* Byte 12 Bits 0-6 */ + bool irq_autoconf:1; /* Byte 12 Bit 7 */ + unsigned char dma_tx_rate; /* Byte 13 */ + unsigned char scsi_id; /* Byte 14 */ + bool low_term:1; /* Byte 15 Bit 0 */ + bool parity:1; /* Byte 15 Bit 1 */ + bool high_term:1; /* Byte 15 Bit 2 */ + bool noisy_cable:1; /* Byte 15 Bit 3 */ + bool fast_sync_neg:1; /* Byte 15 Bit 4 */ + bool reset_enabled:1; /* Byte 15 Bit 5 */ + bool:1; /* Byte 15 Bit 6 */ + bool active_negation:1; /* Byte 15 Bit 7 */ + unsigned char bus_on_delay; /* Byte 16 */ + unsigned char bus_off_delay; /* Byte 17 */ + bool bios_enabled:1; /* Byte 18 Bit 0 */ + bool int19_redir_enabled:1; /* Byte 18 Bit 1 */ + bool ext_trans_enable:1; /* Byte 18 Bit 2 */ + bool removable_as_fixed:1; /* Byte 18 Bit 3 */ + bool:1; /* Byte 18 Bit 4 */ + bool morethan2_drives:1; /* Byte 18 Bit 5 */ + bool bios_int:1; /* Byte 18 Bit 6 */ + bool floptical:1; /* Byte 19 Bit 7 */ + unsigned short dev_enabled; /* Bytes 19-20 */ + unsigned short wide_ok; /* Bytes 21-22 */ + unsigned short fast_ok; /* Bytes 23-24 */ + unsigned short sync_ok; /* Bytes 25-26 */ + unsigned short discon_ok; /* Bytes 27-28 */ + unsigned short send_start_unit; /* Bytes 29-30 */ + unsigned short ignore_bios_scan; /* Bytes 31-32 */ + unsigned char pci_int_pin:2; /* Byte 33 Bits 0-1 */ + unsigned char adapter_ioport:2; /* Byte 33 Bits 2-3 */ + bool strict_rr_enabled:1; /* Byte 33 Bit 4 */ + bool vesabus_33mhzplus:1; /* Byte 33 Bit 5 */ + bool vesa_burst_write:1; /* Byte 33 Bit 6 */ + bool vesa_burst_read:1; /* Byte 33 Bit 7 */ + unsigned short ultra_ok; /* Bytes 34-35 */ + unsigned int:32; /* Bytes 36-39 */ + unsigned char:8; /* Byte 40 */ + unsigned char autoscsi_maxlun; /* Byte 41 */ + bool:1; /* Byte 42 Bit 0 */ + bool scam_dominant:1; /* Byte 42 Bit 1 */ + bool scam_enabled:1; /* Byte 42 Bit 2 */ + bool scam_lev2:1; /* Byte 42 Bit 3 */ + unsigned char:4; /* Byte 42 Bits 4-7 */ + bool int13_exten:1; /* Byte 43 Bit 0 */ + bool:1; /* Byte 43 Bit 1 */ + bool cd_boot:1; /* Byte 43 Bit 2 */ + unsigned char:5; /* Byte 43 Bits 3-7 */ + unsigned char boot_id:4; /* Byte 44 Bits 0-3 */ + unsigned char boot_ch:4; /* Byte 44 Bits 4-7 */ + unsigned char force_scan_order:1; /* Byte 45 Bit 0 */ + unsigned char:7; /* Byte 45 Bits 1-7 */ + unsigned short nontagged_to_alt_ok; /* Bytes 46-47 */ + unsigned short reneg_sync_on_check; /* Bytes 48-49 */ + unsigned char rsvd[10]; /* Bytes 50-59 */ + unsigned char manuf_diag[2]; /* Bytes 60-61 */ + unsigned short cksum; /* Bytes 62-63 */ +} PACKED; + +/* + Define the Host Adapter Local RAM Auto SCSI Byte 45 structure. +*/ + +struct blogic_autoscsi_byte45 { + unsigned char force_scan_order:1; /* Bit 0 */ + unsigned char:7; /* Bits 1-7 */ +}; + +/* + Define the Host Adapter Local RAM BIOS Drive Map Byte structure. +*/ + +#define BLOGIC_BIOS_DRVMAP 17 + +struct blogic_bios_drvmap { + unsigned char tgt_idbit3:1; /* Bit 0 */ + unsigned char:2; /* Bits 1-2 */ + enum blogic_bios_diskgeometry diskgeom:2; /* Bits 3-4 */ + unsigned char tgt_id:3; /* Bits 5-7 */ +}; + +/* + Define the Set CCB Format request type. Extended LUN Format CCBs are + necessary to support more than 8 Logical Units per Target Device. +*/ + +enum blogic_setccb_fmt { + BLOGIC_LEGACY_LUN_CCB = 0, + BLOGIC_EXT_LUN_CCB = 1 +} PACKED; + +/* + Define the Outgoing Mailbox Action Codes. +*/ + +enum blogic_action { + BLOGIC_OUTBOX_FREE = 0x00, + BLOGIC_MBOX_START = 0x01, + BLOGIC_MBOX_ABORT = 0x02 +} PACKED; + + +/* + Define the Incoming Mailbox Completion Codes. The MultiMaster Firmware + only uses codes 0 - 4. The FlashPoint SCCB Manager has no mailboxes, so + completion codes are stored in the CCB; it only uses codes 1, 2, 4, and 5. +*/ + +enum blogic_cmplt_code { + BLOGIC_INBOX_FREE = 0x00, + BLOGIC_CMD_COMPLETE_GOOD = 0x01, + BLOGIC_CMD_ABORT_BY_HOST = 0x02, + BLOGIC_CMD_NOTFOUND = 0x03, + BLOGIC_CMD_COMPLETE_ERROR = 0x04, + BLOGIC_INVALID_CCB = 0x05 +} PACKED; + +/* + Define the Command Control Block (CCB) Opcodes. +*/ + +enum blogic_ccb_opcode { + BLOGIC_INITIATOR_CCB = 0x00, + BLOGIC_TGT_CCB = 0x01, + BLOGIC_INITIATOR_CCB_SG = 0x02, + BLOGIC_INITIATOR_CCBB_RESIDUAL = 0x03, + BLOGIC_INITIATOR_CCB_SG_RESIDUAL = 0x04, + BLOGIC_BDR = 0x81 +} PACKED; + + +/* + Define the CCB Data Direction Codes. +*/ + +enum blogic_datadir { + BLOGIC_UNCHECKED_TX = 0, + BLOGIC_DATAIN_CHECKED = 1, + BLOGIC_DATAOUT_CHECKED = 2, + BLOGIC_NOTX = 3 +}; + + +/* + Define the Host Adapter Status Codes. The MultiMaster Firmware does not + return status code 0x0C; it uses 0x12 for both overruns and underruns. +*/ + +enum blogic_adapter_status { + BLOGIC_CMD_CMPLT_NORMAL = 0x00, + BLOGIC_LINK_CMD_CMPLT = 0x0A, + BLOGIC_LINK_CMD_CMPLT_FLAG = 0x0B, + BLOGIC_DATA_UNDERRUN = 0x0C, + BLOGIC_SELECT_TIMEOUT = 0x11, + BLOGIC_DATA_OVERRUN = 0x12, + BLOGIC_NOEXPECT_BUSFREE = 0x13, + BLOGIC_INVALID_BUSPHASE = 0x14, + BLOGIC_INVALID_OUTBOX_CODE = 0x15, + BLOGIC_INVALID_CMD_CODE = 0x16, + BLOGIC_LINKCCB_BADLUN = 0x17, + BLOGIC_BAD_CMD_PARAM = 0x1A, + BLOGIC_AUTOREQSENSE_FAIL = 0x1B, + BLOGIC_TAGQUEUE_REJECT = 0x1C, + BLOGIC_BAD_MSG_RCVD = 0x1D, + BLOGIC_HW_FAIL = 0x20, + BLOGIC_NORESPONSE_TO_ATN = 0x21, + BLOGIC_HW_RESET = 0x22, + BLOGIC_RST_FROM_OTHERDEV = 0x23, + BLOGIC_BAD_RECONNECT = 0x24, + BLOGIC_HW_BDR = 0x25, + BLOGIC_ABRT_QUEUE = 0x26, + BLOGIC_ADAPTER_SW_ERROR = 0x27, + BLOGIC_HW_TIMEOUT = 0x30, + BLOGIC_PARITY_ERR = 0x34 +} PACKED; + + +/* + Define the SCSI Target Device Status Codes. +*/ + +enum blogic_tgt_status { + BLOGIC_OP_GOOD = 0x00, + BLOGIC_CHECKCONDITION = 0x02, + BLOGIC_DEVBUSY = 0x08 +} PACKED; + +/* + Define the Queue Tag Codes. +*/ + +enum blogic_queuetag { + BLOGIC_SIMPLETAG = 0, + BLOGIC_HEADTAG = 1, + BLOGIC_ORDEREDTAG = 2, + BLOGIC_RSVDTAG = 3 +}; + +/* + Define the SCSI Command Descriptor Block (CDB). +*/ + +#define BLOGIC_CDB_MAXLEN 12 + + +/* + Define the Scatter/Gather Segment structure required by the MultiMaster + Firmware Interface and the FlashPoint SCCB Manager. +*/ + +struct blogic_sg_seg { + u32 segbytes; /* Bytes 0-3 */ + u32 segdata; /* Bytes 4-7 */ +}; + +/* + Define the Driver CCB Status Codes. +*/ + +enum blogic_ccb_status { + BLOGIC_CCB_FREE = 0, + BLOGIC_CCB_ACTIVE = 1, + BLOGIC_CCB_COMPLETE = 2, + BLOGIC_CCB_RESET = 3 +} PACKED; + + +/* + Define the 32 Bit Mode Command Control Block (CCB) structure. The first 40 + bytes are defined by and common to both the MultiMaster Firmware and the + FlashPoint SCCB Manager. The next 60 bytes are defined by the FlashPoint + SCCB Manager. The remaining components are defined by the Linux BusLogic + Driver. Extended LUN Format CCBs differ from Legacy LUN Format 32 Bit Mode + CCBs only in having the TagEnable and QueueTag fields moved from byte 17 to + byte 1, and the Logical Unit field in byte 17 expanded to 6 bits. In theory, + Extended LUN Format CCBs can support up to 64 Logical Units, but in practice + many devices will respond improperly to Logical Units between 32 and 63, and + the SCSI-2 specification defines Bit 5 as LUNTAR. Extended LUN Format CCBs + are used by recent versions of the MultiMaster Firmware, as well as by the + FlashPoint SCCB Manager; the FlashPoint SCCB Manager only supports 32 Logical + Units. Since 64 Logical Units are unlikely to be needed in practice, and + since they are problematic for the above reasons, and since limiting them to + 5 bits simplifies the CCB structure definition, this driver only supports + 32 Logical Units per Target Device. +*/ + +struct blogic_ccb { + /* + MultiMaster Firmware and FlashPoint SCCB Manager Common Portion. + */ + enum blogic_ccb_opcode opcode; /* Byte 0 */ + unsigned char:3; /* Byte 1 Bits 0-2 */ + enum blogic_datadir datadir:2; /* Byte 1 Bits 3-4 */ + bool tag_enable:1; /* Byte 1 Bit 5 */ + enum blogic_queuetag queuetag:2; /* Byte 1 Bits 6-7 */ + unsigned char cdblen; /* Byte 2 */ + unsigned char sense_datalen; /* Byte 3 */ + u32 datalen; /* Bytes 4-7 */ + u32 data; /* Bytes 8-11 */ + unsigned char:8; /* Byte 12 */ + unsigned char:8; /* Byte 13 */ + enum blogic_adapter_status adapter_status; /* Byte 14 */ + enum blogic_tgt_status tgt_status; /* Byte 15 */ + unsigned char tgt_id; /* Byte 16 */ + unsigned char lun:5; /* Byte 17 Bits 0-4 */ + bool legacytag_enable:1; /* Byte 17 Bit 5 */ + enum blogic_queuetag legacy_tag:2; /* Byte 17 Bits 6-7 */ + unsigned char cdb[BLOGIC_CDB_MAXLEN]; /* Bytes 18-29 */ + unsigned char:8; /* Byte 30 */ + unsigned char:8; /* Byte 31 */ + u32 rsvd_int; /* Bytes 32-35 */ + u32 sensedata; /* Bytes 36-39 */ + /* + FlashPoint SCCB Manager Defined Portion. + */ + void (*callback) (struct blogic_ccb *); /* Bytes 40-43 */ + u32 base_addr; /* Bytes 44-47 */ + enum blogic_cmplt_code comp_code; /* Byte 48 */ +#ifdef CONFIG_SCSI_FLASHPOINT + unsigned char:8; /* Byte 49 */ + u16 os_flags; /* Bytes 50-51 */ + unsigned char private[24]; /* Bytes 52-99 */ + void *rsvd1; + void *rsvd2; + unsigned char private2[16]; +#endif + /* + BusLogic Linux Driver Defined Portion. + */ + dma_addr_t allocgrp_head; + unsigned int allocgrp_size; + u32 dma_handle; + enum blogic_ccb_status status; + unsigned long serial; + struct scsi_cmnd *command; + struct blogic_adapter *adapter; + struct blogic_ccb *next; + struct blogic_ccb *next_all; + struct blogic_sg_seg sglist[BLOGIC_SG_LIMIT]; +}; + +/* + Define the 32 Bit Mode Outgoing Mailbox structure. +*/ + +struct blogic_outbox { + u32 ccb; /* Bytes 0-3 */ + u32:24; /* Bytes 4-6 */ + enum blogic_action action; /* Byte 7 */ +}; + +/* + Define the 32 Bit Mode Incoming Mailbox structure. +*/ + +struct blogic_inbox { + u32 ccb; /* Bytes 0-3 */ + enum blogic_adapter_status adapter_status; /* Byte 4 */ + enum blogic_tgt_status tgt_status; /* Byte 5 */ + unsigned char:8; /* Byte 6 */ + enum blogic_cmplt_code comp_code; /* Byte 7 */ +}; + + +/* + Define the BusLogic Driver Options structure. +*/ + +struct blogic_drvr_options { + unsigned short tagq_ok; + unsigned short tagq_ok_mask; + unsigned short bus_settle_time; + unsigned short stop_tgt_inquiry; + unsigned char common_qdepth; + unsigned char qdepth[BLOGIC_MAXDEV]; +}; + +/* + Define the Host Adapter Target Flags structure. +*/ + +struct blogic_tgt_flags { + bool tgt_exists:1; + bool tagq_ok:1; + bool wide_ok:1; + bool tagq_active:1; + bool wide_active:1; + bool cmd_good:1; + bool tgt_info_in:1; +}; + +/* + Define the Host Adapter Target Statistics structure. +*/ + +#define BLOGIC_SZ_BUCKETS 10 + +struct blogic_tgt_stats { + unsigned int cmds_tried; + unsigned int cmds_complete; + unsigned int read_cmds; + unsigned int write_cmds; + struct blogic_byte_count bytesread; + struct blogic_byte_count byteswritten; + unsigned int read_sz_buckets[BLOGIC_SZ_BUCKETS]; + unsigned int write_sz_buckets[BLOGIC_SZ_BUCKETS]; + unsigned short aborts_request; + unsigned short aborts_tried; + unsigned short aborts_done; + unsigned short bdr_request; + unsigned short bdr_tried; + unsigned short bdr_done; + unsigned short adapter_reset_req; + unsigned short adapter_reset_attempt; + unsigned short adapter_reset_done; +}; + +/* + Define the FlashPoint Card Handle data type. +*/ + +#define FPOINT_BADCARD_HANDLE 0xFFFFFFFFL + + +/* + Define the FlashPoint Information structure. This structure is defined + by the FlashPoint SCCB Manager. +*/ + +struct fpoint_info { + u32 base_addr; /* Bytes 0-3 */ + bool present; /* Byte 4 */ + unsigned char irq_ch; /* Byte 5 */ + unsigned char scsi_id; /* Byte 6 */ + unsigned char scsi_lun; /* Byte 7 */ + u16 fw_rev; /* Bytes 8-9 */ + u16 sync_ok; /* Bytes 10-11 */ + u16 fast_ok; /* Bytes 12-13 */ + u16 ultra_ok; /* Bytes 14-15 */ + u16 discon_ok; /* Bytes 16-17 */ + u16 wide_ok; /* Bytes 18-19 */ + bool parity:1; /* Byte 20 Bit 0 */ + bool wide:1; /* Byte 20 Bit 1 */ + bool softreset:1; /* Byte 20 Bit 2 */ + bool ext_trans_enable:1; /* Byte 20 Bit 3 */ + bool low_term:1; /* Byte 20 Bit 4 */ + bool high_term:1; /* Byte 20 Bit 5 */ + bool report_underrun:1; /* Byte 20 Bit 6 */ + bool scam_enabled:1; /* Byte 20 Bit 7 */ + bool scam_lev2:1; /* Byte 21 Bit 0 */ + unsigned char:7; /* Byte 21 Bits 1-7 */ + unsigned char family; /* Byte 22 */ + unsigned char bus_type; /* Byte 23 */ + unsigned char model[3]; /* Bytes 24-26 */ + unsigned char relative_cardnum; /* Byte 27 */ + unsigned char rsvd[4]; /* Bytes 28-31 */ + u32 os_rsvd; /* Bytes 32-35 */ + unsigned char translation_info[4]; /* Bytes 36-39 */ + u32 rsvd2[5]; /* Bytes 40-59 */ + u32 sec_range; /* Bytes 60-63 */ +}; + +/* + Define the BusLogic Driver Host Adapter structure. +*/ + +struct blogic_adapter { + struct Scsi_Host *scsi_host; + struct pci_dev *pci_device; + enum blogic_adapter_type adapter_type; + enum blogic_adapter_bus_type adapter_bus_type; + unsigned long io_addr; + unsigned long pci_addr; + unsigned short addr_count; + unsigned char host_no; + unsigned char model[9]; + unsigned char fw_ver[6]; + unsigned char full_model[18]; + unsigned char bus; + unsigned char dev; + unsigned char irq_ch; + unsigned char scsi_id; + bool irq_acquired:1; + bool ext_trans_enable:1; + bool parity:1; + bool reset_enabled:1; + bool level_int:1; + bool wide:1; + bool differential:1; + bool scam:1; + bool ultra:1; + bool ext_lun:1; + bool terminfo_valid:1; + bool low_term:1; + bool high_term:1; + bool strict_rr:1; + bool scam_enabled:1; + bool scam_lev2:1; + bool adapter_initd:1; + bool adapter_extreset:1; + bool adapter_intern_err:1; + bool processing_ccbs; + volatile bool adapter_cmd_complete; + unsigned short adapter_sglimit; + unsigned short drvr_sglimit; + unsigned short maxdev; + unsigned short maxlun; + unsigned short mbox_count; + unsigned short initccbs; + unsigned short inc_ccbs; + unsigned short alloc_ccbs; + unsigned short drvr_qdepth; + unsigned short adapter_qdepth; + unsigned short untag_qdepth; + unsigned short common_qdepth; + unsigned short bus_settle_time; + unsigned short sync_ok; + unsigned short fast_ok; + unsigned short ultra_ok; + unsigned short wide_ok; + unsigned short discon_ok; + unsigned short tagq_ok; + unsigned short ext_resets; + unsigned short adapter_intern_errors; + unsigned short tgt_count; + unsigned short msgbuflen; + u32 bios_addr; + struct blogic_drvr_options *drvr_opts; + struct fpoint_info fpinfo; + void *cardhandle; + struct list_head host_list; + struct blogic_ccb *all_ccbs; + struct blogic_ccb *free_ccbs; + struct blogic_ccb *firstccb; + struct blogic_ccb *lastccb; + struct blogic_ccb *bdr_pend[BLOGIC_MAXDEV]; + struct blogic_tgt_flags tgt_flags[BLOGIC_MAXDEV]; + unsigned char qdepth[BLOGIC_MAXDEV]; + unsigned char sync_period[BLOGIC_MAXDEV]; + unsigned char sync_offset[BLOGIC_MAXDEV]; + unsigned char active_cmds[BLOGIC_MAXDEV]; + unsigned int cmds_since_rst[BLOGIC_MAXDEV]; + unsigned long last_seqpoint[BLOGIC_MAXDEV]; + unsigned long last_resettried[BLOGIC_MAXDEV]; + unsigned long last_resetdone[BLOGIC_MAXDEV]; + struct blogic_outbox *first_outbox; + struct blogic_outbox *last_outbox; + struct blogic_outbox *next_outbox; + struct blogic_inbox *first_inbox; + struct blogic_inbox *last_inbox; + struct blogic_inbox *next_inbox; + struct blogic_tgt_stats tgt_stats[BLOGIC_MAXDEV]; + unsigned char *mbox_space; + dma_addr_t mbox_space_handle; + unsigned int mbox_sz; + unsigned long ccb_offset; + char msgbuf[BLOGIC_MSGBUF_SIZE]; +}; + +/* + Define a structure for the BIOS Disk Parameters. +*/ + +struct bios_diskparam { + int heads; + int sectors; + int cylinders; +}; + +/* + Define a structure for the SCSI Inquiry command results. +*/ + +struct scsi_inquiry { + unsigned char devtype:5; /* Byte 0 Bits 0-4 */ + unsigned char dev_qual:3; /* Byte 0 Bits 5-7 */ + unsigned char dev_modifier:7; /* Byte 1 Bits 0-6 */ + bool rmb:1; /* Byte 1 Bit 7 */ + unsigned char ansi_ver:3; /* Byte 2 Bits 0-2 */ + unsigned char ecma_ver:3; /* Byte 2 Bits 3-5 */ + unsigned char iso_ver:2; /* Byte 2 Bits 6-7 */ + unsigned char resp_fmt:4; /* Byte 3 Bits 0-3 */ + unsigned char:2; /* Byte 3 Bits 4-5 */ + bool TrmIOP:1; /* Byte 3 Bit 6 */ + bool AENC:1; /* Byte 3 Bit 7 */ + unsigned char addl_len; /* Byte 4 */ + unsigned char:8; /* Byte 5 */ + unsigned char:8; /* Byte 6 */ + bool SftRe:1; /* Byte 7 Bit 0 */ + bool CmdQue:1; /* Byte 7 Bit 1 */ + bool:1; /* Byte 7 Bit 2 */ + bool linked:1; /* Byte 7 Bit 3 */ + bool sync:1; /* Byte 7 Bit 4 */ + bool WBus16:1; /* Byte 7 Bit 5 */ + bool WBus32:1; /* Byte 7 Bit 6 */ + bool RelAdr:1; /* Byte 7 Bit 7 */ + unsigned char vendor[8]; /* Bytes 8-15 */ + unsigned char product[16]; /* Bytes 16-31 */ + unsigned char product_rev[4]; /* Bytes 32-35 */ +}; + + +/* + Define functions to provide an abstraction for reading and writing the + Host Adapter I/O Registers. +*/ + +static inline void blogic_busreset(struct blogic_adapter *adapter) +{ + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.bus_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); +} + +static inline void blogic_intreset(struct blogic_adapter *adapter) +{ + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.int_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); +} + +static inline void blogic_softreset(struct blogic_adapter *adapter) +{ + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.soft_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); +} + +static inline void blogic_hardreset(struct blogic_adapter *adapter) +{ + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.hard_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); +} + +static inline unsigned char blogic_rdstatus(struct blogic_adapter *adapter) +{ + return inb(adapter->io_addr + BLOGIC_STATUS_REG); +} + +static inline void blogic_setcmdparam(struct blogic_adapter *adapter, + unsigned char value) +{ + outb(value, adapter->io_addr + BLOGIC_CMD_PARM_REG); +} + +static inline unsigned char blogic_rddatain(struct blogic_adapter *adapter) +{ + return inb(adapter->io_addr + BLOGIC_DATAIN_REG); +} + +static inline unsigned char blogic_rdint(struct blogic_adapter *adapter) +{ + return inb(adapter->io_addr + BLOGIC_INT_REG); +} + +static inline unsigned char blogic_rdgeom(struct blogic_adapter *adapter) +{ + return inb(adapter->io_addr + BLOGIC_GEOMETRY_REG); +} + +/* + blogic_execmbox issues an Execute Mailbox Command, which + notifies the Host Adapter that an entry has been made in an Outgoing + Mailbox. +*/ + +static inline void blogic_execmbox(struct blogic_adapter *adapter) +{ + blogic_setcmdparam(adapter, BLOGIC_EXEC_MBOX_CMD); +} + +/* + blogic_delay waits for Seconds to elapse. +*/ + +static inline void blogic_delay(int seconds) +{ + mdelay(1000 * seconds); +} + +/* + virt_to_32bit_virt maps between Kernel Virtual Addresses and + 32 bit Kernel Virtual Addresses. This avoids compilation warnings + on 64 bit architectures. +*/ + +static inline u32 virt_to_32bit_virt(void *virt_addr) +{ + return (u32) (unsigned long) virt_addr; +} + +/* + blogic_inc_count increments counter by 1, stopping at + 65535 rather than wrapping around to 0. +*/ + +static inline void blogic_inc_count(unsigned short *count) +{ + if (*count < 65535) + (*count)++; +} + +/* + blogic_addcount increments Byte Counter by Amount. +*/ + +static inline void blogic_addcount(struct blogic_byte_count *bytecount, + unsigned int amount) +{ + bytecount->units += amount; + if (bytecount->units > 999999999) { + bytecount->units -= 1000000000; + bytecount->billions++; + } +} + +/* + blogic_incszbucket increments the Bucket for Amount. +*/ + +static inline void blogic_incszbucket(unsigned int *cmdsz_buckets, + unsigned int amount) +{ + int index = 0; + if (amount < 8 * 1024) { + if (amount < 2 * 1024) + index = (amount < 1 * 1024 ? 0 : 1); + else + index = (amount < 4 * 1024 ? 2 : 3); + } else if (amount < 128 * 1024) { + if (amount < 32 * 1024) + index = (amount < 16 * 1024 ? 4 : 5); + else + index = (amount < 64 * 1024 ? 6 : 7); + } else + index = (amount < 256 * 1024 ? 8 : 9); + cmdsz_buckets[index]++; +} + +/* + Define the version number of the FlashPoint Firmware (SCCB Manager). +*/ + +#define FLASHPOINT_FW_VER "5.02" + +/* + Define the possible return values from FlashPoint_HandleInterrupt. +*/ + +#define FPOINT_NORMAL_INT 0x00 +#define FPOINT_INTERN_ERR 0xFE +#define FPOINT_EXT_RESET 0xFF + +/* + Define prototypes for the forward referenced BusLogic Driver + Internal Functions. +*/ + +static const char *blogic_drvr_info(struct Scsi_Host *); +static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *); +static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *); +static int blogic_slaveconfig(struct scsi_device *); +static void blogic_qcompleted_ccb(struct blogic_ccb *); +static irqreturn_t blogic_inthandler(int, void *); +static int blogic_resetadapter(struct blogic_adapter *, bool hard_reset); +static void blogic_msg(enum blogic_msglevel, char *, struct blogic_adapter *, ...); +static int __init blogic_setup(char *); + +#endif /* _BUSLOGIC_H */ diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c new file mode 100644 index 000000000..3d9c56ac8 --- /dev/null +++ b/drivers/scsi/FlashPoint.c @@ -0,0 +1,7560 @@ +/* + + FlashPoint.c -- FlashPoint SCCB Manager for Linux + + This file contains the FlashPoint SCCB Manager from BusLogic's FlashPoint + Driver Developer's Kit, with minor modifications by Leonard N. Zubkoff for + Linux compatibility. It was provided by BusLogic in the form of 16 separate + source files, which would have unnecessarily cluttered the scsi directory, so + the individual files have been combined into this single file. + + Copyright 1995-1996 by Mylex Corporation. All Rights Reserved + + This file is available under both the GNU General Public License + and a BSD-style copyright; see LICENSE.FlashPoint for details. + +*/ + + +#ifdef CONFIG_SCSI_FLASHPOINT + +#define MAX_CARDS 8 +#undef BUSTYPE_PCI + +#define CRCMASK 0xA001 + +#define FAILURE 0xFFFFFFFFL + +struct sccb; +typedef void (*CALL_BK_FN) (struct sccb *); + +struct sccb_mgr_info { + u32 si_baseaddr; + unsigned char si_present; + unsigned char si_intvect; + unsigned char si_id; + unsigned char si_lun; + u16 si_fw_revision; + u16 si_per_targ_init_sync; + u16 si_per_targ_fast_nego; + u16 si_per_targ_ultra_nego; + u16 si_per_targ_no_disc; + u16 si_per_targ_wide_nego; + u16 si_mflags; + unsigned char si_card_family; + unsigned char si_bustype; + unsigned char si_card_model[3]; + unsigned char si_relative_cardnum; + unsigned char si_reserved[4]; + u32 si_OS_reserved; + unsigned char si_XlatInfo[4]; + u32 si_reserved2[5]; + u32 si_secondary_range; +}; + +#define SCSI_PARITY_ENA 0x0001 +#define LOW_BYTE_TERM 0x0010 +#define HIGH_BYTE_TERM 0x0020 +#define BUSTYPE_PCI 0x3 + +#define SUPPORT_16TAR_32LUN 0x0002 +#define SOFT_RESET 0x0004 +#define EXTENDED_TRANSLATION 0x0008 +#define POST_ALL_UNDERRRUNS 0x0040 +#define FLAG_SCAM_ENABLED 0x0080 +#define FLAG_SCAM_LEVEL2 0x0100 + +#define HARPOON_FAMILY 0x02 + +/* SCCB struct used for both SCCB and UCB manager compiles! + * The UCB Manager treats the SCCB as it's 'native hardware structure' + */ + +/*#pragma pack(1)*/ +struct sccb { + unsigned char OperationCode; + unsigned char ControlByte; + unsigned char CdbLength; + unsigned char RequestSenseLength; + u32 DataLength; + void *DataPointer; + unsigned char CcbRes[2]; + unsigned char HostStatus; + unsigned char TargetStatus; + unsigned char TargID; + unsigned char Lun; + unsigned char Cdb[12]; + unsigned char CcbRes1; + unsigned char Reserved1; + u32 Reserved2; + u32 SensePointer; + + CALL_BK_FN SccbCallback; /* VOID (*SccbCallback)(); */ + u32 SccbIOPort; /* Identifies board base port */ + unsigned char SccbStatus; + unsigned char SCCBRes2; + u16 SccbOSFlags; + + u32 Sccb_XferCnt; /* actual transfer count */ + u32 Sccb_ATC; + u32 SccbVirtDataPtr; /* virtual addr for OS/2 */ + u32 Sccb_res1; + u16 Sccb_MGRFlags; + u16 Sccb_sgseg; + unsigned char Sccb_scsimsg; /* identify msg for selection */ + unsigned char Sccb_tag; + unsigned char Sccb_scsistat; + unsigned char Sccb_idmsg; /* image of last msg in */ + struct sccb *Sccb_forwardlink; + struct sccb *Sccb_backlink; + u32 Sccb_savedATC; + unsigned char Save_Cdb[6]; + unsigned char Save_CdbLen; + unsigned char Sccb_XferState; + u32 Sccb_SGoffset; +}; + +#pragma pack() + +#define SCATTER_GATHER_COMMAND 0x02 +#define RESIDUAL_COMMAND 0x03 +#define RESIDUAL_SG_COMMAND 0x04 +#define RESET_COMMAND 0x81 + +#define F_USE_CMD_Q 0x20 /*Inidcates TAGGED command. */ +#define TAG_TYPE_MASK 0xC0 /*Type of tag msg to send. */ +#define SCCB_DATA_XFER_OUT 0x10 /* Write */ +#define SCCB_DATA_XFER_IN 0x08 /* Read */ + +#define NO_AUTO_REQUEST_SENSE 0x01 /* No Request Sense Buffer */ + +#define BUS_FREE_ST 0 +#define SELECT_ST 1 +#define SELECT_BDR_ST 2 /* Select w\ Bus Device Reset */ +#define SELECT_SN_ST 3 /* Select w\ Sync Nego */ +#define SELECT_WN_ST 4 /* Select w\ Wide Data Nego */ +#define SELECT_Q_ST 5 /* Select w\ Tagged Q'ing */ +#define COMMAND_ST 6 +#define DATA_OUT_ST 7 +#define DATA_IN_ST 8 +#define DISCONNECT_ST 9 +#define ABORT_ST 11 + +#define F_HOST_XFER_DIR 0x01 +#define F_ALL_XFERRED 0x02 +#define F_SG_XFER 0x04 +#define F_AUTO_SENSE 0x08 +#define F_ODD_BALL_CNT 0x10 +#define F_NO_DATA_YET 0x80 + +#define F_STATUSLOADED 0x01 +#define F_DEV_SELECTED 0x04 + +#define SCCB_COMPLETE 0x00 /* SCCB completed without error */ +#define SCCB_DATA_UNDER_RUN 0x0C +#define SCCB_SELECTION_TIMEOUT 0x11 /* Set SCSI selection timed out */ +#define SCCB_DATA_OVER_RUN 0x12 +#define SCCB_PHASE_SEQUENCE_FAIL 0x14 /* Target bus phase sequence failure */ + +#define SCCB_GROSS_FW_ERR 0x27 /* Major problem! */ +#define SCCB_BM_ERR 0x30 /* BusMaster error. */ +#define SCCB_PARITY_ERR 0x34 /* SCSI parity error */ + +#define SCCB_IN_PROCESS 0x00 +#define SCCB_SUCCESS 0x01 +#define SCCB_ABORT 0x02 +#define SCCB_ERROR 0x04 + +#define ORION_FW_REV 3110 + +#define QUEUE_DEPTH 254+1 /*1 for Normal disconnect 32 for Q'ing. */ + +#define MAX_MB_CARDS 4 /* Max. no of cards suppoerted on Mother Board */ + +#define MAX_SCSI_TAR 16 +#define MAX_LUN 32 +#define LUN_MASK 0x1f + +#define SG_BUF_CNT 16 /*Number of prefetched elements. */ + +#define SG_ELEMENT_SIZE 8 /*Eight byte per element. */ + +#define RD_HARPOON(ioport) inb((u32)ioport) +#define RDW_HARPOON(ioport) inw((u32)ioport) +#define RD_HARP32(ioport,offset,data) (data = inl((u32)(ioport + offset))) +#define WR_HARPOON(ioport,val) outb((u8) val, (u32)ioport) +#define WRW_HARPOON(ioport,val) outw((u16)val, (u32)ioport) +#define WR_HARP32(ioport,offset,data) outl(data, (u32)(ioport + offset)) + +#define TAR_SYNC_MASK (BIT(7)+BIT(6)) +#define SYNC_TRYING BIT(6) +#define SYNC_SUPPORTED (BIT(7)+BIT(6)) + +#define TAR_WIDE_MASK (BIT(5)+BIT(4)) +#define WIDE_ENABLED BIT(4) +#define WIDE_NEGOCIATED BIT(5) + +#define TAR_TAG_Q_MASK (BIT(3)+BIT(2)) +#define TAG_Q_TRYING BIT(2) +#define TAG_Q_REJECT BIT(3) + +#define TAR_ALLOW_DISC BIT(0) + +#define EE_SYNC_MASK (BIT(0)+BIT(1)) +#define EE_SYNC_5MB BIT(0) +#define EE_SYNC_10MB BIT(1) +#define EE_SYNC_20MB (BIT(0)+BIT(1)) + +#define EE_WIDE_SCSI BIT(7) + +struct sccb_mgr_tar_info { + + struct sccb *TarSelQ_Head; + struct sccb *TarSelQ_Tail; + unsigned char TarLUN_CA; /*Contingent Allgiance */ + unsigned char TarTagQ_Cnt; + unsigned char TarSelQ_Cnt; + unsigned char TarStatus; + unsigned char TarEEValue; + unsigned char TarSyncCtrl; + unsigned char TarReserved[2]; /* for alignment */ + unsigned char LunDiscQ_Idx[MAX_LUN]; + unsigned char TarLUNBusy[MAX_LUN]; +}; + +struct nvram_info { + unsigned char niModel; /* Model No. of card */ + unsigned char niCardNo; /* Card no. */ + u32 niBaseAddr; /* Port Address of card */ + unsigned char niSysConf; /* Adapter Configuration byte - + Byte 16 of eeprom map */ + unsigned char niScsiConf; /* SCSI Configuration byte - + Byte 17 of eeprom map */ + unsigned char niScamConf; /* SCAM Configuration byte - + Byte 20 of eeprom map */ + unsigned char niAdapId; /* Host Adapter ID - + Byte 24 of eerpom map */ + unsigned char niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte + of targets */ + unsigned char niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name + string of Targets */ +}; + +#define MODEL_LT 1 +#define MODEL_DL 2 +#define MODEL_LW 3 +#define MODEL_DW 4 + +struct sccb_card { + struct sccb *currentSCCB; + struct sccb_mgr_info *cardInfo; + + u32 ioPort; + + unsigned short cmdCounter; + unsigned char discQCount; + unsigned char tagQ_Lst; + unsigned char cardIndex; + unsigned char scanIndex; + unsigned char globalFlags; + unsigned char ourId; + struct nvram_info *pNvRamInfo; + struct sccb *discQ_Tbl[QUEUE_DEPTH]; + +}; + +#define F_TAG_STARTED 0x01 +#define F_CONLUN_IO 0x02 +#define F_DO_RENEGO 0x04 +#define F_NO_FILTER 0x08 +#define F_GREEN_PC 0x10 +#define F_HOST_XFER_ACT 0x20 +#define F_NEW_SCCB_CMD 0x40 +#define F_UPDATE_EEPROM 0x80 + +#define ID_STRING_LENGTH 32 +#define TYPE_CODE0 0x63 /*Level2 Mstr (bits 7-6), */ + +#define SLV_TYPE_CODE0 0xA3 /*Priority Bit set (bits 7-6), */ + +#define ASSIGN_ID 0x00 +#define SET_P_FLAG 0x01 +#define CFG_CMPLT 0x03 +#define DOM_MSTR 0x0F +#define SYNC_PTRN 0x1F + +#define ID_0_7 0x18 +#define ID_8_F 0x11 +#define MISC_CODE 0x14 +#define CLR_P_FLAG 0x18 + +#define INIT_SELTD 0x01 +#define LEVEL2_TAR 0x02 + +enum scam_id_st { ID0, ID1, ID2, ID3, ID4, ID5, ID6, ID7, ID8, ID9, ID10, ID11, + ID12, + ID13, ID14, ID15, ID_UNUSED, ID_UNASSIGNED, ID_ASSIGNED, LEGACY, + CLR_PRIORITY, NO_ID_AVAIL +}; + +typedef struct SCCBscam_info { + + unsigned char id_string[ID_STRING_LENGTH]; + enum scam_id_st state; + +} SCCBSCAM_INFO; + + +#define SMIDENT 0x80 +#define DISC_PRIV 0x40 + +#define SM8BIT 0x00 +#define SM16BIT 0x01 + +#define SIX_BYTE_CMD 0x06 +#define TWELVE_BYTE_CMD 0x0C + +#define ASYNC 0x00 +#define MAX_OFFSET 0x0F /* Maxbyteoffset for Sync Xfers */ + +#define EEPROM_WD_CNT 256 + +#define EEPROM_CHECK_SUM 0 +#define FW_SIGNATURE 2 +#define MODEL_NUMB_0 4 +#define MODEL_NUMB_2 6 +#define MODEL_NUMB_4 8 +#define SYSTEM_CONFIG 16 +#define SCSI_CONFIG 17 +#define BIOS_CONFIG 18 +#define SCAM_CONFIG 20 +#define ADAPTER_SCSI_ID 24 + +#define IGNORE_B_SCAN 32 +#define SEND_START_ENA 34 +#define DEVICE_ENABLE 36 + +#define SYNC_RATE_TBL 38 +#define SYNC_RATE_TBL01 38 +#define SYNC_RATE_TBL23 40 +#define SYNC_RATE_TBL45 42 +#define SYNC_RATE_TBL67 44 +#define SYNC_RATE_TBL89 46 +#define SYNC_RATE_TBLab 48 +#define SYNC_RATE_TBLcd 50 +#define SYNC_RATE_TBLef 52 + +#define EE_SCAMBASE 256 + +#define SCAM_ENABLED BIT(2) +#define SCAM_LEVEL2 BIT(3) + +#define RENEGO_ENA BIT(10) +#define CONNIO_ENA BIT(11) +#define GREEN_PC_ENA BIT(12) + +#define AUTO_RATE_00 00 +#define AUTO_RATE_05 01 +#define AUTO_RATE_10 02 +#define AUTO_RATE_20 03 + +#define WIDE_NEGO_BIT BIT(7) +#define DISC_ENABLE_BIT BIT(6) + +#define hp_vendor_id_0 0x00 /* LSB */ +#define ORION_VEND_0 0x4B + +#define hp_vendor_id_1 0x01 /* MSB */ +#define ORION_VEND_1 0x10 + +#define hp_device_id_0 0x02 /* LSB */ +#define ORION_DEV_0 0x30 + +#define hp_device_id_1 0x03 /* MSB */ +#define ORION_DEV_1 0x81 + + /* Sub Vendor ID and Sub Device ID only available in + Harpoon Version 2 and higher */ + +#define hp_sub_device_id_0 0x06 /* LSB */ + +#define hp_semaphore 0x0C +#define SCCB_MGR_ACTIVE BIT(0) +#define TICKLE_ME BIT(1) +#define SCCB_MGR_PRESENT BIT(3) +#define BIOS_IN_USE BIT(4) + +#define hp_sys_ctrl 0x0F + +#define STOP_CLK BIT(0) /*Turn off BusMaster Clock */ +#define DRVR_RST BIT(1) /*Firmware Reset to 80C15 chip */ +#define HALT_MACH BIT(3) /*Halt State Machine */ +#define HARD_ABORT BIT(4) /*Hard Abort */ + +#define hp_host_blk_cnt 0x13 + +#define XFER_BLK64 0x06 /* 1 1 0 64 byte per block */ + +#define BM_THRESHOLD 0x40 /* PCI mode can only xfer 16 bytes */ + +#define hp_int_mask 0x17 + +#define INT_CMD_COMPL BIT(0) /* DMA command complete */ +#define INT_EXT_STATUS BIT(1) /* Extended Status Set */ + +#define hp_xfer_cnt_lo 0x18 +#define hp_xfer_cnt_hi 0x1A +#define hp_xfer_cmd 0x1B + +#define XFER_HOST_DMA 0x00 /* 0 0 0 Transfer Host -> DMA */ +#define XFER_DMA_HOST 0x01 /* 0 0 1 Transfer DMA -> Host */ + +#define XFER_HOST_AUTO 0x00 /* 0 0 Auto Transfer Size */ + +#define XFER_DMA_8BIT 0x20 /* 0 1 8 BIT Transfer Size */ + +#define DISABLE_INT BIT(7) /*Do not interrupt at end of cmd. */ + +#define HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_8BIT)) +#define HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_8BIT)) + +#define hp_host_addr_lo 0x1C +#define hp_host_addr_hmi 0x1E + +#define hp_ee_ctrl 0x22 + +#define EXT_ARB_ACK BIT(7) +#define SCSI_TERM_ENA_H BIT(6) /* SCSI high byte terminator */ +#define SEE_MS BIT(5) +#define SEE_CS BIT(3) +#define SEE_CLK BIT(2) +#define SEE_DO BIT(1) +#define SEE_DI BIT(0) + +#define EE_READ 0x06 +#define EE_WRITE 0x05 +#define EWEN 0x04 +#define EWEN_ADDR 0x03C0 +#define EWDS 0x04 +#define EWDS_ADDR 0x0000 + +#define hp_bm_ctrl 0x26 + +#define SCSI_TERM_ENA_L BIT(0) /*Enable/Disable external terminators */ +#define FLUSH_XFER_CNTR BIT(1) /*Flush transfer counter */ +#define FORCE1_XFER BIT(5) /*Always xfer one byte in byte mode */ +#define FAST_SINGLE BIT(6) /*?? */ + +#define BMCTRL_DEFAULT (FORCE1_XFER|FAST_SINGLE|SCSI_TERM_ENA_L) + +#define hp_sg_addr 0x28 +#define hp_page_ctrl 0x29 + +#define SCATTER_EN BIT(0) +#define SGRAM_ARAM BIT(1) +#define G_INT_DISABLE BIT(3) /* Enable/Disable all Interrupts */ +#define NARROW_SCSI_CARD BIT(4) /* NARROW/WIDE SCSI config pin */ + +#define hp_pci_stat_cfg 0x2D + +#define REC_MASTER_ABORT BIT(5) /*received Master abort */ + +#define hp_rev_num 0x33 + +#define hp_stack_data 0x34 +#define hp_stack_addr 0x35 + +#define hp_ext_status 0x36 + +#define BM_FORCE_OFF BIT(0) /*Bus Master is forced to get off */ +#define PCI_TGT_ABORT BIT(0) /*PCI bus master transaction aborted */ +#define PCI_DEV_TMOUT BIT(1) /*PCI Device Time out */ +#define CMD_ABORTED BIT(4) /*Command aborted */ +#define BM_PARITY_ERR BIT(5) /*parity error on data received */ +#define PIO_OVERRUN BIT(6) /*Slave data overrun */ +#define BM_CMD_BUSY BIT(7) /*Bus master transfer command busy */ +#define BAD_EXT_STATUS (BM_FORCE_OFF | PCI_DEV_TMOUT | CMD_ABORTED | \ + BM_PARITY_ERR | PIO_OVERRUN) + +#define hp_int_status 0x37 + +#define EXT_STATUS_ON BIT(1) /*Extended status is valid */ +#define SCSI_INTERRUPT BIT(2) /*Global indication of a SCSI int. */ +#define INT_ASSERTED BIT(5) /* */ + +#define hp_fifo_cnt 0x38 + +#define hp_intena 0x40 + +#define RESET BIT(7) +#define PROG_HLT BIT(6) +#define PARITY BIT(5) +#define FIFO BIT(4) +#define SEL BIT(3) +#define SCAM_SEL BIT(2) +#define RSEL BIT(1) +#define TIMEOUT BIT(0) +#define BUS_FREE BIT(15) +#define XFER_CNT_0 BIT(14) +#define PHASE BIT(13) +#define IUNKWN BIT(12) +#define ICMD_COMP BIT(11) +#define ITICKLE BIT(10) +#define IDO_STRT BIT(9) +#define ITAR_DISC BIT(8) +#define AUTO_INT (BIT(12)+BIT(11)+BIT(10)+BIT(9)+BIT(8)) +#define CLR_ALL_INT 0xFFFF +#define CLR_ALL_INT_1 0xFF00 + +#define hp_intstat 0x42 + +#define hp_scsisig 0x44 + +#define SCSI_SEL BIT(7) +#define SCSI_BSY BIT(6) +#define SCSI_REQ BIT(5) +#define SCSI_ACK BIT(4) +#define SCSI_ATN BIT(3) +#define SCSI_CD BIT(2) +#define SCSI_MSG BIT(1) +#define SCSI_IOBIT BIT(0) + +#define S_SCSI_PHZ (BIT(2)+BIT(1)+BIT(0)) +#define S_MSGO_PH (BIT(2)+BIT(1) ) +#define S_MSGI_PH (BIT(2)+BIT(1)+BIT(0)) +#define S_DATAI_PH ( BIT(0)) +#define S_DATAO_PH 0x00 +#define S_ILL_PH ( BIT(1) ) + +#define hp_scsictrl_0 0x45 + +#define SEL_TAR BIT(6) +#define ENA_ATN BIT(4) +#define ENA_RESEL BIT(2) +#define SCSI_RST BIT(1) +#define ENA_SCAM_SEL BIT(0) + +#define hp_portctrl_0 0x46 + +#define SCSI_PORT BIT(7) +#define SCSI_INBIT BIT(6) +#define DMA_PORT BIT(5) +#define DMA_RD BIT(4) +#define HOST_PORT BIT(3) +#define HOST_WRT BIT(2) +#define SCSI_BUS_EN BIT(1) +#define START_TO BIT(0) + +#define hp_scsireset 0x47 + +#define SCSI_INI BIT(6) +#define SCAM_EN BIT(5) +#define DMA_RESET BIT(3) +#define HPSCSI_RESET BIT(2) +#define PROG_RESET BIT(1) +#define FIFO_CLR BIT(0) + +#define hp_xfercnt_0 0x48 +#define hp_xfercnt_2 0x4A + +#define hp_fifodata_0 0x4C +#define hp_addstat 0x4E + +#define SCAM_TIMER BIT(7) +#define SCSI_MODE8 BIT(3) +#define SCSI_PAR_ERR BIT(0) + +#define hp_prgmcnt_0 0x4F + +#define hp_selfid_0 0x50 +#define hp_selfid_1 0x51 +#define hp_arb_id 0x52 + +#define hp_select_id 0x53 + +#define hp_synctarg_base 0x54 +#define hp_synctarg_12 0x54 +#define hp_synctarg_13 0x55 +#define hp_synctarg_14 0x56 +#define hp_synctarg_15 0x57 + +#define hp_synctarg_8 0x58 +#define hp_synctarg_9 0x59 +#define hp_synctarg_10 0x5A +#define hp_synctarg_11 0x5B + +#define hp_synctarg_4 0x5C +#define hp_synctarg_5 0x5D +#define hp_synctarg_6 0x5E +#define hp_synctarg_7 0x5F + +#define hp_synctarg_0 0x60 +#define hp_synctarg_1 0x61 +#define hp_synctarg_2 0x62 +#define hp_synctarg_3 0x63 + +#define NARROW_SCSI BIT(4) +#define DEFAULT_OFFSET 0x0F + +#define hp_autostart_0 0x64 +#define hp_autostart_1 0x65 +#define hp_autostart_3 0x67 + +#define AUTO_IMMED BIT(5) +#define SELECT BIT(6) +#define END_DATA (BIT(7)+BIT(6)) + +#define hp_gp_reg_0 0x68 +#define hp_gp_reg_1 0x69 +#define hp_gp_reg_3 0x6B + +#define hp_seltimeout 0x6C + +#define TO_4ms 0x67 /* 3.9959ms */ + +#define TO_5ms 0x03 /* 4.9152ms */ +#define TO_10ms 0x07 /* 11.xxxms */ +#define TO_250ms 0x99 /* 250.68ms */ +#define TO_290ms 0xB1 /* 289.99ms */ + +#define hp_clkctrl_0 0x6D + +#define PWR_DWN BIT(6) +#define ACTdeassert BIT(4) +#define CLK_40MHZ (BIT(1) + BIT(0)) + +#define CLKCTRL_DEFAULT (ACTdeassert | CLK_40MHZ) + +#define hp_fiforead 0x6E +#define hp_fifowrite 0x6F + +#define hp_offsetctr 0x70 +#define hp_xferstat 0x71 + +#define FIFO_EMPTY BIT(6) + +#define hp_portctrl_1 0x72 + +#define CHK_SCSI_P BIT(3) +#define HOST_MODE8 BIT(0) + +#define hp_xfer_pad 0x73 + +#define ID_UNLOCK BIT(3) + +#define hp_scsidata_0 0x74 +#define hp_scsidata_1 0x75 + +#define hp_aramBase 0x80 +#define BIOS_DATA_OFFSET 0x60 +#define BIOS_RELATIVE_CARD 0x64 + +#define AR3 (BIT(9) + BIT(8)) +#define SDATA BIT(10) + +#define CRD_OP BIT(11) /* Cmp Reg. w/ Data */ + +#define CRR_OP BIT(12) /* Cmp Reg. w. Reg. */ + +#define CPE_OP (BIT(14)+BIT(11)) /* Cmp SCSI phs & Branch EQ */ + +#define CPN_OP (BIT(14)+BIT(12)) /* Cmp SCSI phs & Branch NOT EQ */ + +#define ADATA_OUT 0x00 +#define ADATA_IN BIT(8) +#define ACOMMAND BIT(10) +#define ASTATUS (BIT(10)+BIT(8)) +#define AMSG_OUT (BIT(10)+BIT(9)) +#define AMSG_IN (BIT(10)+BIT(9)+BIT(8)) + +#define BRH_OP BIT(13) /* Branch */ + +#define ALWAYS 0x00 +#define EQUAL BIT(8) +#define NOT_EQ BIT(9) + +#define TCB_OP (BIT(13)+BIT(11)) /* Test condition & branch */ + +#define FIFO_0 BIT(10) + +#define MPM_OP BIT(15) /* Match phase and move data */ + +#define MRR_OP BIT(14) /* Move DReg. to Reg. */ + +#define S_IDREG (BIT(2)+BIT(1)+BIT(0)) + +#define D_AR0 0x00 +#define D_AR1 BIT(0) +#define D_BUCKET (BIT(2) + BIT(1) + BIT(0)) + +#define RAT_OP (BIT(14)+BIT(13)+BIT(11)) + +#define SSI_OP (BIT(15)+BIT(11)) + +#define SSI_ITAR_DISC (ITAR_DISC >> 8) +#define SSI_IDO_STRT (IDO_STRT >> 8) + +#define SSI_ICMD_COMP (ICMD_COMP >> 8) +#define SSI_ITICKLE (ITICKLE >> 8) + +#define SSI_IUNKWN (IUNKWN >> 8) +#define SSI_INO_CC (IUNKWN >> 8) +#define SSI_IRFAIL (IUNKWN >> 8) + +#define NP 0x10 /*Next Phase */ +#define NTCMD 0x02 /*Non- Tagged Command start */ +#define CMDPZ 0x04 /*Command phase */ +#define DINT 0x12 /*Data Out/In interrupt */ +#define DI 0x13 /*Data Out */ +#define DC 0x19 /*Disconnect Message */ +#define ST 0x1D /*Status Phase */ +#define UNKNWN 0x24 /*Unknown bus action */ +#define CC 0x25 /*Command Completion failure */ +#define TICK 0x26 /*New target reselected us. */ +#define SELCHK 0x28 /*Select & Check SCSI ID latch reg */ + +#define ID_MSG_STRT hp_aramBase + 0x00 +#define NON_TAG_ID_MSG hp_aramBase + 0x06 +#define CMD_STRT hp_aramBase + 0x08 +#define SYNC_MSGS hp_aramBase + 0x08 + +#define TAG_STRT 0x00 +#define DISCONNECT_START 0x10/2 +#define END_DATA_START 0x14/2 +#define CMD_ONLY_STRT CMDPZ/2 +#define SELCHK_STRT SELCHK/2 + +#define GET_XFER_CNT(port, xfercnt) {RD_HARP32(port,hp_xfercnt_0,xfercnt); xfercnt &= 0xFFFFFF;} +/* #define GET_XFER_CNT(port, xfercnt) (xfercnt = RD_HARPOON(port+hp_xfercnt_2), \ + xfercnt <<= 16,\ + xfercnt |= RDW_HARPOON((unsigned short)(port+hp_xfercnt_0))) + */ +#define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((port+hp_host_addr_lo), (unsigned short)(addr & 0x0000FFFFL)),\ + addr >>= 16,\ + WRW_HARPOON((port+hp_host_addr_hmi), (unsigned short)(addr & 0x0000FFFFL)),\ + WR_HARP32(port,hp_xfercnt_0,count),\ + WRW_HARPOON((port+hp_xfer_cnt_lo), (unsigned short)(count & 0x0000FFFFL)),\ + count >>= 16,\ + WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF))) + +#define ACCEPT_MSG(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\ + WR_HARPOON(port+hp_scsisig, S_ILL_PH);} + +#define ACCEPT_MSG_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\ + WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));} + +#define DISABLE_AUTO(port) (WR_HARPOON(port+hp_scsireset, PROG_RESET),\ + WR_HARPOON(port+hp_scsireset, 0x00)) + +#define ARAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ + (RD_HARPOON(p_port+hp_page_ctrl) | SGRAM_ARAM))) + +#define SGRAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ + (RD_HARPOON(p_port+hp_page_ctrl) & ~SGRAM_ARAM))) + +#define MDISABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ + (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE))) + +#define MENABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ + (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE))) + +static unsigned char FPT_sisyncn(u32 port, unsigned char p_card, + unsigned char syncFlag); +static void FPT_ssel(u32 port, unsigned char p_card); +static void FPT_sres(u32 port, unsigned char p_card, + struct sccb_card *pCurrCard); +static void FPT_shandem(u32 port, unsigned char p_card, + struct sccb *pCurrSCCB); +static void FPT_stsyncn(u32 port, unsigned char p_card); +static void FPT_sisyncr(u32 port, unsigned char sync_pulse, + unsigned char offset); +static void FPT_sssyncv(u32 p_port, unsigned char p_id, + unsigned char p_sync_value, + struct sccb_mgr_tar_info *currTar_Info); +static void FPT_sresb(u32 port, unsigned char p_card); +static void FPT_sxfrp(u32 p_port, unsigned char p_card); +static void FPT_schkdd(u32 port, unsigned char p_card); +static unsigned char FPT_RdStack(u32 port, unsigned char index); +static void FPT_WrStack(u32 portBase, unsigned char index, + unsigned char data); +static unsigned char FPT_ChkIfChipInitialized(u32 ioPort); + +static void FPT_SendMsg(u32 port, unsigned char message); +static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg, + unsigned char error_code); + +static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card); +static void FPT_RNVRamData(struct nvram_info *pNvRamInfo); + +static unsigned char FPT_siwidn(u32 port, unsigned char p_card); +static void FPT_stwidn(u32 port, unsigned char p_card); +static void FPT_siwidr(u32 port, unsigned char width); + +static void FPT_queueSelectFail(struct sccb_card *pCurrCard, + unsigned char p_card); +static void FPT_queueDisconnect(struct sccb *p_SCCB, unsigned char p_card); +static void FPT_queueCmdComplete(struct sccb_card *pCurrCard, + struct sccb *p_SCCB, unsigned char p_card); +static void FPT_queueSearchSelect(struct sccb_card *pCurrCard, + unsigned char p_card); +static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code); +static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char card); +static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB, + unsigned char p_card); +static void FPT_utilUpdateResidual(struct sccb *p_SCCB); +static unsigned short FPT_CalcCrc16(unsigned char buffer[]); +static unsigned char FPT_CalcLrc(unsigned char buffer[]); + +static void FPT_Wait1Second(u32 p_port); +static void FPT_Wait(u32 p_port, unsigned char p_delay); +static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode); +static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data, + unsigned short ee_addr); +static unsigned short FPT_utilEERead(u32 p_port, + unsigned short ee_addr); +static unsigned short FPT_utilEEReadOrg(u32 p_port, + unsigned short ee_addr); +static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd, + unsigned short ee_addr); + +static void FPT_phaseDataOut(u32 port, unsigned char p_card); +static void FPT_phaseDataIn(u32 port, unsigned char p_card); +static void FPT_phaseCommand(u32 port, unsigned char p_card); +static void FPT_phaseStatus(u32 port, unsigned char p_card); +static void FPT_phaseMsgOut(u32 port, unsigned char p_card); +static void FPT_phaseMsgIn(u32 port, unsigned char p_card); +static void FPT_phaseIllegal(u32 port, unsigned char p_card); + +static void FPT_phaseDecode(u32 port, unsigned char p_card); +static void FPT_phaseChkFifo(u32 port, unsigned char p_card); +static void FPT_phaseBusFree(u32 p_port, unsigned char p_card); + +static void FPT_XbowInit(u32 port, unsigned char scamFlg); +static void FPT_BusMasterInit(u32 p_port); +static void FPT_DiagEEPROM(u32 p_port); + +static void FPT_dataXferProcessor(u32 port, + struct sccb_card *pCurrCard); +static void FPT_busMstrSGDataXferStart(u32 port, + struct sccb *pCurrSCCB); +static void FPT_busMstrDataXferStart(u32 port, + struct sccb *pCurrSCCB); +static void FPT_hostDataXferAbort(u32 port, unsigned char p_card, + struct sccb *pCurrSCCB); +static void FPT_hostDataXferRestart(struct sccb *currSCCB); + +static unsigned char FPT_SccbMgr_bad_isr(u32 p_port, + unsigned char p_card, + struct sccb_card *pCurrCard, + unsigned short p_int); + +static void FPT_SccbMgrTableInitAll(void); +static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard, + unsigned char p_card); +static void FPT_SccbMgrTableInitTarget(unsigned char p_card, + unsigned char target); + +static void FPT_scini(unsigned char p_card, unsigned char p_our_id, + unsigned char p_power_up); + +static int FPT_scarb(u32 p_port, unsigned char p_sel_type); +static void FPT_scbusf(u32 p_port); +static void FPT_scsel(u32 p_port); +static void FPT_scasid(unsigned char p_card, u32 p_port); +static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data); +static unsigned char FPT_scsendi(u32 p_port, + unsigned char p_id_string[]); +static unsigned char FPT_sciso(u32 p_port, + unsigned char p_id_string[]); +static void FPT_scwirod(u32 p_port, unsigned char p_data_bit); +static void FPT_scwiros(u32 p_port, unsigned char p_data_bit); +static unsigned char FPT_scvalq(unsigned char p_quintet); +static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id); +static void FPT_scwtsel(u32 p_port); +static void FPT_inisci(unsigned char p_card, u32 p_port, + unsigned char p_our_id); +static void FPT_scsavdi(unsigned char p_card, u32 p_port); +static unsigned char FPT_scmachid(unsigned char p_card, + unsigned char p_id_string[]); + +static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card); +static void FPT_autoLoadDefaultMap(u32 p_port); + +static struct sccb_mgr_tar_info FPT_sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR] = + { {{0}} }; +static struct sccb_card FPT_BL_Card[MAX_CARDS] = { {0} }; +static SCCBSCAM_INFO FPT_scamInfo[MAX_SCSI_TAR] = { {{0}} }; +static struct nvram_info FPT_nvRamInfo[MAX_MB_CARDS] = { {0} }; + +static unsigned char FPT_mbCards = 0; +static unsigned char FPT_scamHAString[] = + { 0x63, 0x07, 'B', 'U', 'S', 'L', 'O', 'G', 'I', 'C', + ' ', 'B', 'T', '-', '9', '3', '0', + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 +}; + +static unsigned short FPT_default_intena = 0; + +static void (*FPT_s_PhaseTbl[8]) (u32, unsigned char) = { +0}; + +/*--------------------------------------------------------------------- + * + * Function: FlashPoint_ProbeHostAdapter + * + * Description: Setup and/or Search for cards and return info to caller. + * + *---------------------------------------------------------------------*/ + +static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) +{ + static unsigned char first_time = 1; + + unsigned char i, j, id, ScamFlg; + unsigned short temp, temp2, temp3, temp4, temp5, temp6; + u32 ioport; + struct nvram_info *pCurrNvRam; + + ioport = pCardInfo->si_baseaddr; + + if (RD_HARPOON(ioport + hp_vendor_id_0) != ORION_VEND_0) + return (int)FAILURE; + + if ((RD_HARPOON(ioport + hp_vendor_id_1) != ORION_VEND_1)) + return (int)FAILURE; + + if ((RD_HARPOON(ioport + hp_device_id_0) != ORION_DEV_0)) + return (int)FAILURE; + + if ((RD_HARPOON(ioport + hp_device_id_1) != ORION_DEV_1)) + return (int)FAILURE; + + if (RD_HARPOON(ioport + hp_rev_num) != 0x0f) { + +/* For new Harpoon then check for sub_device ID LSB + the bits(0-3) must be all ZERO for compatible with + current version of SCCBMgr, else skip this Harpoon + device. */ + + if (RD_HARPOON(ioport + hp_sub_device_id_0) & 0x0f) + return (int)FAILURE; + } + + if (first_time) { + FPT_SccbMgrTableInitAll(); + first_time = 0; + FPT_mbCards = 0; + } + + if (FPT_RdStack(ioport, 0) != 0x00) { + if (FPT_ChkIfChipInitialized(ioport) == 0) { + pCurrNvRam = NULL; + WR_HARPOON(ioport + hp_semaphore, 0x00); + FPT_XbowInit(ioport, 0); /*Must Init the SCSI before attempting */ + FPT_DiagEEPROM(ioport); + } else { + if (FPT_mbCards < MAX_MB_CARDS) { + pCurrNvRam = &FPT_nvRamInfo[FPT_mbCards]; + FPT_mbCards++; + pCurrNvRam->niBaseAddr = ioport; + FPT_RNVRamData(pCurrNvRam); + } else + return (int)FAILURE; + } + } else + pCurrNvRam = NULL; + + WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT); + WR_HARPOON(ioport + hp_sys_ctrl, 0x00); + + if (pCurrNvRam) + pCardInfo->si_id = pCurrNvRam->niAdapId; + else + pCardInfo->si_id = + (unsigned + char)(FPT_utilEERead(ioport, + (ADAPTER_SCSI_ID / + 2)) & (unsigned char)0x0FF); + + pCardInfo->si_lun = 0x00; + pCardInfo->si_fw_revision = ORION_FW_REV; + temp2 = 0x0000; + temp3 = 0x0000; + temp4 = 0x0000; + temp5 = 0x0000; + temp6 = 0x0000; + + for (id = 0; id < (16 / 2); id++) { + + if (pCurrNvRam) { + temp = (unsigned short)pCurrNvRam->niSyncTbl[id]; + temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) + + (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000)); + } else + temp = + FPT_utilEERead(ioport, + (unsigned short)((SYNC_RATE_TBL / 2) + + id)); + + for (i = 0; i < 2; temp >>= 8, i++) { + + temp2 >>= 1; + temp3 >>= 1; + temp4 >>= 1; + temp5 >>= 1; + temp6 >>= 1; + switch (temp & 0x3) { + case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */ + temp6 |= 0x8000; + fallthrough; + case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */ + temp5 |= 0x8000; + fallthrough; + case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */ + temp2 |= 0x8000; + fallthrough; + case AUTO_RATE_00: /* Asynchronous */ + break; + } + + if (temp & DISC_ENABLE_BIT) + temp3 |= 0x8000; + + if (temp & WIDE_NEGO_BIT) + temp4 |= 0x8000; + + } + } + + pCardInfo->si_per_targ_init_sync = temp2; + pCardInfo->si_per_targ_no_disc = temp3; + pCardInfo->si_per_targ_wide_nego = temp4; + pCardInfo->si_per_targ_fast_nego = temp5; + pCardInfo->si_per_targ_ultra_nego = temp6; + + if (pCurrNvRam) + i = pCurrNvRam->niSysConf; + else + i = (unsigned + char)(FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2))); + + if (pCurrNvRam) + ScamFlg = pCurrNvRam->niScamConf; + else + ScamFlg = + (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2); + + pCardInfo->si_mflags = 0x0000; + + if (i & 0x01) + pCardInfo->si_mflags |= SCSI_PARITY_ENA; + + if (!(i & 0x02)) + pCardInfo->si_mflags |= SOFT_RESET; + + if (i & 0x10) + pCardInfo->si_mflags |= EXTENDED_TRANSLATION; + + if (ScamFlg & SCAM_ENABLED) + pCardInfo->si_mflags |= FLAG_SCAM_ENABLED; + + if (ScamFlg & SCAM_LEVEL2) + pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2; + + j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L); + if (i & 0x04) { + j |= SCSI_TERM_ENA_L; + } + WR_HARPOON(ioport + hp_bm_ctrl, j); + + j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H); + if (i & 0x08) { + j |= SCSI_TERM_ENA_H; + } + WR_HARPOON(ioport + hp_ee_ctrl, j); + + if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD)) + + pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN; + + pCardInfo->si_card_family = HARPOON_FAMILY; + pCardInfo->si_bustype = BUSTYPE_PCI; + + if (pCurrNvRam) { + pCardInfo->si_card_model[0] = '9'; + switch (pCurrNvRam->niModel & 0x0f) { + case MODEL_LT: + pCardInfo->si_card_model[1] = '3'; + pCardInfo->si_card_model[2] = '0'; + break; + case MODEL_LW: + pCardInfo->si_card_model[1] = '5'; + pCardInfo->si_card_model[2] = '0'; + break; + case MODEL_DL: + pCardInfo->si_card_model[1] = '3'; + pCardInfo->si_card_model[2] = '2'; + break; + case MODEL_DW: + pCardInfo->si_card_model[1] = '5'; + pCardInfo->si_card_model[2] = '2'; + break; + } + } else { + temp = FPT_utilEERead(ioport, (MODEL_NUMB_0 / 2)); + pCardInfo->si_card_model[0] = (unsigned char)(temp >> 8); + temp = FPT_utilEERead(ioport, (MODEL_NUMB_2 / 2)); + + pCardInfo->si_card_model[1] = (unsigned char)(temp & 0x00FF); + pCardInfo->si_card_model[2] = (unsigned char)(temp >> 8); + } + + if (pCardInfo->si_card_model[1] == '3') { + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) + pCardInfo->si_mflags |= LOW_BYTE_TERM; + } else if (pCardInfo->si_card_model[2] == '0') { + temp = RD_HARPOON(ioport + hp_xfer_pad); + WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4))); + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) + pCardInfo->si_mflags |= LOW_BYTE_TERM; + WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4))); + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) + pCardInfo->si_mflags |= HIGH_BYTE_TERM; + WR_HARPOON(ioport + hp_xfer_pad, temp); + } else { + temp = RD_HARPOON(ioport + hp_ee_ctrl); + temp2 = RD_HARPOON(ioport + hp_xfer_pad); + WR_HARPOON(ioport + hp_ee_ctrl, (temp | SEE_CS)); + WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4))); + temp3 = 0; + for (i = 0; i < 8; i++) { + temp3 <<= 1; + if (!(RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))) + temp3 |= 1; + WR_HARPOON(ioport + hp_xfer_pad, (temp2 & ~BIT(4))); + WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4))); + } + WR_HARPOON(ioport + hp_ee_ctrl, temp); + WR_HARPOON(ioport + hp_xfer_pad, temp2); + if (!(temp3 & BIT(7))) + pCardInfo->si_mflags |= LOW_BYTE_TERM; + if (!(temp3 & BIT(6))) + pCardInfo->si_mflags |= HIGH_BYTE_TERM; + } + + ARAM_ACCESS(ioport); + + for (i = 0; i < 4; i++) { + + pCardInfo->si_XlatInfo[i] = + RD_HARPOON(ioport + hp_aramBase + BIOS_DATA_OFFSET + i); + } + + /* return with -1 if no sort, else return with + logical card number sorted by BIOS (zero-based) */ + + pCardInfo->si_relative_cardnum = + (unsigned + char)(RD_HARPOON(ioport + hp_aramBase + BIOS_RELATIVE_CARD) - 1); + + SGRAM_ACCESS(ioport); + + FPT_s_PhaseTbl[0] = FPT_phaseDataOut; + FPT_s_PhaseTbl[1] = FPT_phaseDataIn; + FPT_s_PhaseTbl[2] = FPT_phaseIllegal; + FPT_s_PhaseTbl[3] = FPT_phaseIllegal; + FPT_s_PhaseTbl[4] = FPT_phaseCommand; + FPT_s_PhaseTbl[5] = FPT_phaseStatus; + FPT_s_PhaseTbl[6] = FPT_phaseMsgOut; + FPT_s_PhaseTbl[7] = FPT_phaseMsgIn; + + pCardInfo->si_present = 0x01; + + return 0; +} + +/*--------------------------------------------------------------------- + * + * Function: FlashPoint_HardwareResetHostAdapter + * + * Description: Setup adapter for normal operation (hard reset). + * + *---------------------------------------------------------------------*/ + +static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info + *pCardInfo) +{ + struct sccb_card *CurrCard = NULL; + struct nvram_info *pCurrNvRam; + unsigned char i, j, thisCard, ScamFlg; + unsigned short temp, sync_bit_map, id; + u32 ioport; + + ioport = pCardInfo->si_baseaddr; + + for (thisCard = 0; thisCard <= MAX_CARDS; thisCard++) { + + if (thisCard == MAX_CARDS) + return (void *)FAILURE; + + if (FPT_BL_Card[thisCard].ioPort == ioport) { + + CurrCard = &FPT_BL_Card[thisCard]; + FPT_SccbMgrTableInitCard(CurrCard, thisCard); + break; + } + + else if (FPT_BL_Card[thisCard].ioPort == 0x00) { + + FPT_BL_Card[thisCard].ioPort = ioport; + CurrCard = &FPT_BL_Card[thisCard]; + + if (FPT_mbCards) + for (i = 0; i < FPT_mbCards; i++) { + if (CurrCard->ioPort == + FPT_nvRamInfo[i].niBaseAddr) + CurrCard->pNvRamInfo = + &FPT_nvRamInfo[i]; + } + FPT_SccbMgrTableInitCard(CurrCard, thisCard); + CurrCard->cardIndex = thisCard; + CurrCard->cardInfo = pCardInfo; + + break; + } + } + + pCurrNvRam = CurrCard->pNvRamInfo; + + if (pCurrNvRam) { + ScamFlg = pCurrNvRam->niScamConf; + } else { + ScamFlg = + (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2); + } + + FPT_BusMasterInit(ioport); + FPT_XbowInit(ioport, ScamFlg); + + FPT_autoLoadDefaultMap(ioport); + + for (i = 0, id = 0x01; i != pCardInfo->si_id; i++, id <<= 1) { + } + + WR_HARPOON(ioport + hp_selfid_0, id); + WR_HARPOON(ioport + hp_selfid_1, 0x00); + WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id); + CurrCard->ourId = pCardInfo->si_id; + + i = (unsigned char)pCardInfo->si_mflags; + if (i & SCSI_PARITY_ENA) + WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P)); + + j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L); + if (i & LOW_BYTE_TERM) + j |= SCSI_TERM_ENA_L; + WR_HARPOON(ioport + hp_bm_ctrl, j); + + j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H); + if (i & HIGH_BYTE_TERM) + j |= SCSI_TERM_ENA_H; + WR_HARPOON(ioport + hp_ee_ctrl, j); + + if (!(pCardInfo->si_mflags & SOFT_RESET)) { + + FPT_sresb(ioport, thisCard); + + FPT_scini(thisCard, pCardInfo->si_id, 0); + } + + if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS) + CurrCard->globalFlags |= F_NO_FILTER; + + if (pCurrNvRam) { + if (pCurrNvRam->niSysConf & 0x10) + CurrCard->globalFlags |= F_GREEN_PC; + } else { + if (FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2)) & GREEN_PC_ENA) + CurrCard->globalFlags |= F_GREEN_PC; + } + + /* Set global flag to indicate Re-Negotiation to be done on all + ckeck condition */ + if (pCurrNvRam) { + if (pCurrNvRam->niScsiConf & 0x04) + CurrCard->globalFlags |= F_DO_RENEGO; + } else { + if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & RENEGO_ENA) + CurrCard->globalFlags |= F_DO_RENEGO; + } + + if (pCurrNvRam) { + if (pCurrNvRam->niScsiConf & 0x08) + CurrCard->globalFlags |= F_CONLUN_IO; + } else { + if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & CONNIO_ENA) + CurrCard->globalFlags |= F_CONLUN_IO; + } + + temp = pCardInfo->si_per_targ_no_disc; + + for (i = 0, id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) { + + if (temp & id) + FPT_sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC; + } + + sync_bit_map = 0x0001; + + for (id = 0; id < (MAX_SCSI_TAR / 2); id++) { + + if (pCurrNvRam) { + temp = (unsigned short)pCurrNvRam->niSyncTbl[id]; + temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) + + (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000)); + } else + temp = + FPT_utilEERead(ioport, + (unsigned short)((SYNC_RATE_TBL / 2) + + id)); + + for (i = 0; i < 2; temp >>= 8, i++) { + + if (pCardInfo->si_per_targ_init_sync & sync_bit_map) { + + FPT_sccbMgrTbl[thisCard][id * 2 + + i].TarEEValue = + (unsigned char)temp; + } + + else { + FPT_sccbMgrTbl[thisCard][id * 2 + + i].TarStatus |= + SYNC_SUPPORTED; + FPT_sccbMgrTbl[thisCard][id * 2 + + i].TarEEValue = + (unsigned char)(temp & ~EE_SYNC_MASK); + } + +/* if ((pCardInfo->si_per_targ_wide_nego & sync_bit_map) || + (id*2+i >= 8)){ +*/ + if (pCardInfo->si_per_targ_wide_nego & sync_bit_map) { + + FPT_sccbMgrTbl[thisCard][id * 2 + + i].TarEEValue |= + EE_WIDE_SCSI; + + } + + else { /* NARROW SCSI */ + FPT_sccbMgrTbl[thisCard][id * 2 + + i].TarStatus |= + WIDE_NEGOCIATED; + } + + sync_bit_map <<= 1; + + } + } + + WR_HARPOON((ioport + hp_semaphore), + (unsigned char)(RD_HARPOON((ioport + hp_semaphore)) | + SCCB_MGR_PRESENT)); + + return (void *)CurrCard; +} + +static void FlashPoint_ReleaseHostAdapter(void *pCurrCard) +{ + unsigned char i; + u32 portBase; + u32 regOffset; + u32 scamData; + u32 *pScamTbl; + struct nvram_info *pCurrNvRam; + + pCurrNvRam = ((struct sccb_card *)pCurrCard)->pNvRamInfo; + + if (pCurrNvRam) { + FPT_WrStack(pCurrNvRam->niBaseAddr, 0, pCurrNvRam->niModel); + FPT_WrStack(pCurrNvRam->niBaseAddr, 1, pCurrNvRam->niSysConf); + FPT_WrStack(pCurrNvRam->niBaseAddr, 2, pCurrNvRam->niScsiConf); + FPT_WrStack(pCurrNvRam->niBaseAddr, 3, pCurrNvRam->niScamConf); + FPT_WrStack(pCurrNvRam->niBaseAddr, 4, pCurrNvRam->niAdapId); + + for (i = 0; i < MAX_SCSI_TAR / 2; i++) + FPT_WrStack(pCurrNvRam->niBaseAddr, + (unsigned char)(i + 5), + pCurrNvRam->niSyncTbl[i]); + + portBase = pCurrNvRam->niBaseAddr; + + for (i = 0; i < MAX_SCSI_TAR; i++) { + regOffset = hp_aramBase + 64 + i * 4; + pScamTbl = (u32 *)&pCurrNvRam->niScamTbl[i]; + scamData = *pScamTbl; + WR_HARP32(portBase, regOffset, scamData); + } + + } else { + FPT_WrStack(((struct sccb_card *)pCurrCard)->ioPort, 0, 0); + } +} + +static void FPT_RNVRamData(struct nvram_info *pNvRamInfo) +{ + unsigned char i; + u32 portBase; + u32 regOffset; + u32 scamData; + u32 *pScamTbl; + + pNvRamInfo->niModel = FPT_RdStack(pNvRamInfo->niBaseAddr, 0); + pNvRamInfo->niSysConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 1); + pNvRamInfo->niScsiConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 2); + pNvRamInfo->niScamConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 3); + pNvRamInfo->niAdapId = FPT_RdStack(pNvRamInfo->niBaseAddr, 4); + + for (i = 0; i < MAX_SCSI_TAR / 2; i++) + pNvRamInfo->niSyncTbl[i] = + FPT_RdStack(pNvRamInfo->niBaseAddr, (unsigned char)(i + 5)); + + portBase = pNvRamInfo->niBaseAddr; + + for (i = 0; i < MAX_SCSI_TAR; i++) { + regOffset = hp_aramBase + 64 + i * 4; + RD_HARP32(portBase, regOffset, scamData); + pScamTbl = (u32 *)&pNvRamInfo->niScamTbl[i]; + *pScamTbl = scamData; + } + +} + +static unsigned char FPT_RdStack(u32 portBase, unsigned char index) +{ + WR_HARPOON(portBase + hp_stack_addr, index); + return RD_HARPOON(portBase + hp_stack_data); +} + +static void FPT_WrStack(u32 portBase, unsigned char index, unsigned char data) +{ + WR_HARPOON(portBase + hp_stack_addr, index); + WR_HARPOON(portBase + hp_stack_data, data); +} + +static unsigned char FPT_ChkIfChipInitialized(u32 ioPort) +{ + if ((RD_HARPOON(ioPort + hp_arb_id) & 0x0f) != FPT_RdStack(ioPort, 4)) + return 0; + if ((RD_HARPOON(ioPort + hp_clkctrl_0) & CLKCTRL_DEFAULT) + != CLKCTRL_DEFAULT) + return 0; + if ((RD_HARPOON(ioPort + hp_seltimeout) == TO_250ms) || + (RD_HARPOON(ioPort + hp_seltimeout) == TO_290ms)) + return 1; + return 0; + +} + +/*--------------------------------------------------------------------- + * + * Function: FlashPoint_StartCCB + * + * Description: Start a command pointed to by p_Sccb. When the + * command is completed it will be returned via the + * callback function. + * + *---------------------------------------------------------------------*/ +static void FlashPoint_StartCCB(void *curr_card, struct sccb *p_Sccb) +{ + u32 ioport; + unsigned char thisCard, lun; + struct sccb *pSaveSccb; + CALL_BK_FN callback; + struct sccb_card *pCurrCard = curr_card; + + thisCard = pCurrCard->cardIndex; + ioport = pCurrCard->ioPort; + + if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) { + + p_Sccb->HostStatus = SCCB_COMPLETE; + p_Sccb->SccbStatus = SCCB_ERROR; + callback = (CALL_BK_FN) p_Sccb->SccbCallback; + if (callback) + callback(p_Sccb); + + return; + } + + FPT_sinits(p_Sccb, thisCard); + + if (!pCurrCard->cmdCounter) { + WR_HARPOON(ioport + hp_semaphore, + (RD_HARPOON(ioport + hp_semaphore) + | SCCB_MGR_ACTIVE)); + + if (pCurrCard->globalFlags & F_GREEN_PC) { + WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT); + WR_HARPOON(ioport + hp_sys_ctrl, 0x00); + } + } + + pCurrCard->cmdCounter++; + + if (RD_HARPOON(ioport + hp_semaphore) & BIOS_IN_USE) { + + WR_HARPOON(ioport + hp_semaphore, + (RD_HARPOON(ioport + hp_semaphore) + | TICKLE_ME)); + if (p_Sccb->OperationCode == RESET_COMMAND) { + pSaveSccb = + pCurrCard->currentSCCB; + pCurrCard->currentSCCB = p_Sccb; + FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); + pCurrCard->currentSCCB = + pSaveSccb; + } else { + FPT_queueAddSccb(p_Sccb, thisCard); + } + } + + else if ((RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) { + + if (p_Sccb->OperationCode == RESET_COMMAND) { + pSaveSccb = + pCurrCard->currentSCCB; + pCurrCard->currentSCCB = p_Sccb; + FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); + pCurrCard->currentSCCB = + pSaveSccb; + } else { + FPT_queueAddSccb(p_Sccb, thisCard); + } + } + + else { + + MDISABLE_INT(ioport); + + if ((pCurrCard->globalFlags & F_CONLUN_IO) && + ((FPT_sccbMgrTbl[thisCard][p_Sccb->TargID]. + TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) + lun = p_Sccb->Lun; + else + lun = 0; + if ((pCurrCard->currentSCCB == NULL) && + (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarSelQ_Cnt == 0) + && (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarLUNBusy[lun] + == 0)) { + + pCurrCard->currentSCCB = p_Sccb; + FPT_ssel(p_Sccb->SccbIOPort, thisCard); + } + + else { + + if (p_Sccb->OperationCode == RESET_COMMAND) { + pSaveSccb = pCurrCard->currentSCCB; + pCurrCard->currentSCCB = p_Sccb; + FPT_queueSelectFail(&FPT_BL_Card[thisCard], + thisCard); + pCurrCard->currentSCCB = pSaveSccb; + } else { + FPT_queueAddSccb(p_Sccb, thisCard); + } + } + + MENABLE_INT(ioport); + } + +} + +/*--------------------------------------------------------------------- + * + * Function: FlashPoint_AbortCCB + * + * Description: Abort the command pointed to by p_Sccb. When the + * command is completed it will be returned via the + * callback function. + * + *---------------------------------------------------------------------*/ +static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb) +{ + u32 ioport; + + unsigned char thisCard; + CALL_BK_FN callback; + struct sccb *pSaveSCCB; + struct sccb_mgr_tar_info *currTar_Info; + + ioport = ((struct sccb_card *)pCurrCard)->ioPort; + + thisCard = ((struct sccb_card *)pCurrCard)->cardIndex; + + if (!(RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) { + + if (FPT_queueFindSccb(p_Sccb, thisCard)) { + + ((struct sccb_card *)pCurrCard)->cmdCounter--; + + if (!((struct sccb_card *)pCurrCard)->cmdCounter) + WR_HARPOON(ioport + hp_semaphore, + (RD_HARPOON(ioport + hp_semaphore) + & (unsigned + char)(~(SCCB_MGR_ACTIVE | + TICKLE_ME)))); + + p_Sccb->SccbStatus = SCCB_ABORT; + callback = p_Sccb->SccbCallback; + callback(p_Sccb); + + return 0; + } + + else { + if (((struct sccb_card *)pCurrCard)->currentSCCB == + p_Sccb) { + p_Sccb->SccbStatus = SCCB_ABORT; + return 0; + + } + + else { + if (p_Sccb->Sccb_tag) { + MDISABLE_INT(ioport); + if (((struct sccb_card *)pCurrCard)-> + discQ_Tbl[p_Sccb->Sccb_tag] == + p_Sccb) { + p_Sccb->SccbStatus = SCCB_ABORT; + p_Sccb->Sccb_scsistat = + ABORT_ST; + p_Sccb->Sccb_scsimsg = + ABORT_TASK; + + if (((struct sccb_card *) + pCurrCard)->currentSCCB == + NULL) { + ((struct sccb_card *) + pCurrCard)-> + currentSCCB = p_Sccb; + FPT_ssel(ioport, + thisCard); + } else { + pSaveSCCB = + ((struct sccb_card + *)pCurrCard)-> + currentSCCB; + ((struct sccb_card *) + pCurrCard)-> + currentSCCB = p_Sccb; + FPT_queueSelectFail((struct sccb_card *)pCurrCard, thisCard); + ((struct sccb_card *) + pCurrCard)-> + currentSCCB = pSaveSCCB; + } + } + MENABLE_INT(ioport); + return 0; + } else { + currTar_Info = + &FPT_sccbMgrTbl[thisCard][p_Sccb-> + TargID]; + + if (FPT_BL_Card[thisCard]. + discQ_Tbl[currTar_Info-> + LunDiscQ_Idx[p_Sccb->Lun]] + == p_Sccb) { + p_Sccb->SccbStatus = SCCB_ABORT; + return 0; + } + } + } + } + } + return -1; +} + +/*--------------------------------------------------------------------- + * + * Function: FlashPoint_InterruptPending + * + * Description: Do a quick check to determine if there is a pending + * interrupt for this card and disable the IRQ Pin if so. + * + *---------------------------------------------------------------------*/ +static unsigned char FlashPoint_InterruptPending(void *pCurrCard) +{ + u32 ioport; + + ioport = ((struct sccb_card *)pCurrCard)->ioPort; + + if (RD_HARPOON(ioport + hp_int_status) & INT_ASSERTED) { + return 1; + } + + else + + return 0; +} + +/*--------------------------------------------------------------------- + * + * Function: FlashPoint_HandleInterrupt + * + * Description: This is our entry point when an interrupt is generated + * by the card and the upper level driver passes it on to + * us. + * + *---------------------------------------------------------------------*/ +static int FlashPoint_HandleInterrupt(void *pcard) +{ + struct sccb *currSCCB; + unsigned char thisCard, result, bm_status; + unsigned short hp_int; + unsigned char i, target; + struct sccb_card *pCurrCard = pcard; + u32 ioport; + + thisCard = pCurrCard->cardIndex; + ioport = pCurrCard->ioPort; + + MDISABLE_INT(ioport); + + if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON) + bm_status = RD_HARPOON(ioport + hp_ext_status) & + (unsigned char)BAD_EXT_STATUS; + else + bm_status = 0; + + WR_HARPOON(ioport + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT)); + + while ((hp_int = RDW_HARPOON((ioport + hp_intstat)) & + FPT_default_intena) | bm_status) { + + currSCCB = pCurrCard->currentSCCB; + + if (hp_int & (FIFO | TIMEOUT | RESET | SCAM_SEL) || bm_status) { + result = + FPT_SccbMgr_bad_isr(ioport, thisCard, pCurrCard, + hp_int); + WRW_HARPOON((ioport + hp_intstat), + (FIFO | TIMEOUT | RESET | SCAM_SEL)); + bm_status = 0; + + if (result) { + + MENABLE_INT(ioport); + return result; + } + } + + else if (hp_int & ICMD_COMP) { + + if (!(hp_int & BUS_FREE)) { + /* Wait for the BusFree before starting a new command. We + must also check for being reselected since the BusFree + may not show up if another device reselects us in 1.5us or + less. SRR Wednesday, 3/8/1995. + */ + while (! + (RDW_HARPOON((ioport + hp_intstat)) & + (BUS_FREE | RSEL))) ; + } + + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) + + FPT_phaseChkFifo(ioport, thisCard); + +/* WRW_HARPOON((ioport+hp_intstat), + (BUS_FREE | ICMD_COMP | ITAR_DISC | XFER_CNT_0)); + */ + + WRW_HARPOON((ioport + hp_intstat), CLR_ALL_INT_1); + + FPT_autoCmdCmplt(ioport, thisCard); + + } + + else if (hp_int & ITAR_DISC) { + + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) + FPT_phaseChkFifo(ioport, thisCard); + + if (RD_HARPOON(ioport + hp_gp_reg_1) == + SAVE_POINTERS) { + + WR_HARPOON(ioport + hp_gp_reg_1, 0x00); + currSCCB->Sccb_XferState |= F_NO_DATA_YET; + + currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC; + } + + currSCCB->Sccb_scsistat = DISCONNECT_ST; + FPT_queueDisconnect(currSCCB, thisCard); + + /* Wait for the BusFree before starting a new command. We + must also check for being reselected since the BusFree + may not show up if another device reselects us in 1.5us or + less. SRR Wednesday, 3/8/1995. + */ + while (! + (RDW_HARPOON((ioport + hp_intstat)) & + (BUS_FREE | RSEL)) + && !((RDW_HARPOON((ioport + hp_intstat)) & PHASE) + && RD_HARPOON((ioport + hp_scsisig)) == + (SCSI_BSY | SCSI_REQ | SCSI_CD | SCSI_MSG | + SCSI_IOBIT))) ; + + /* + The additional loop exit condition above detects a timing problem + with the revision D/E harpoon chips. The caller should reset the + host adapter to recover when 0xFE is returned. + */ + if (! + (RDW_HARPOON((ioport + hp_intstat)) & + (BUS_FREE | RSEL))) { + MENABLE_INT(ioport); + return 0xFE; + } + + WRW_HARPOON((ioport + hp_intstat), + (BUS_FREE | ITAR_DISC)); + + pCurrCard->globalFlags |= F_NEW_SCCB_CMD; + + } + + else if (hp_int & RSEL) { + + WRW_HARPOON((ioport + hp_intstat), + (PROG_HLT | RSEL | PHASE | BUS_FREE)); + + if (RDW_HARPOON((ioport + hp_intstat)) & ITAR_DISC) { + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) + FPT_phaseChkFifo(ioport, thisCard); + + if (RD_HARPOON(ioport + hp_gp_reg_1) == + SAVE_POINTERS) { + WR_HARPOON(ioport + hp_gp_reg_1, 0x00); + currSCCB->Sccb_XferState |= + F_NO_DATA_YET; + currSCCB->Sccb_savedATC = + currSCCB->Sccb_ATC; + } + + WRW_HARPOON((ioport + hp_intstat), + (BUS_FREE | ITAR_DISC)); + currSCCB->Sccb_scsistat = DISCONNECT_ST; + FPT_queueDisconnect(currSCCB, thisCard); + } + + FPT_sres(ioport, thisCard, pCurrCard); + FPT_phaseDecode(ioport, thisCard); + + } + + else if ((hp_int & IDO_STRT) && (!(hp_int & BUS_FREE))) { + + WRW_HARPOON((ioport + hp_intstat), + (IDO_STRT | XFER_CNT_0)); + FPT_phaseDecode(ioport, thisCard); + + } + + else if ((hp_int & IUNKWN) || (hp_int & PROG_HLT)) { + WRW_HARPOON((ioport + hp_intstat), + (PHASE | IUNKWN | PROG_HLT)); + if ((RD_HARPOON(ioport + hp_prgmcnt_0) & (unsigned char) + 0x3f) < (unsigned char)SELCHK) { + FPT_phaseDecode(ioport, thisCard); + } else { + /* Harpoon problem some SCSI target device respond to selection + with short BUSY pulse (<400ns) this will make the Harpoon is not able + to latch the correct Target ID into reg. x53. + The work around require to correct this reg. But when write to this + reg. (0x53) also increment the FIFO write addr reg (0x6f), thus we + need to read this reg first then restore it later. After update to 0x53 */ + + i = (unsigned + char)(RD_HARPOON(ioport + hp_fifowrite)); + target = + (unsigned + char)(RD_HARPOON(ioport + hp_gp_reg_3)); + WR_HARPOON(ioport + hp_xfer_pad, + (unsigned char)ID_UNLOCK); + WR_HARPOON(ioport + hp_select_id, + (unsigned char)(target | target << + 4)); + WR_HARPOON(ioport + hp_xfer_pad, + (unsigned char)0x00); + WR_HARPOON(ioport + hp_fifowrite, i); + WR_HARPOON(ioport + hp_autostart_3, + (AUTO_IMMED + TAG_STRT)); + } + } + + else if (hp_int & XFER_CNT_0) { + + WRW_HARPOON((ioport + hp_intstat), XFER_CNT_0); + + FPT_schkdd(ioport, thisCard); + + } + + else if (hp_int & BUS_FREE) { + + WRW_HARPOON((ioport + hp_intstat), BUS_FREE); + + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) { + + FPT_hostDataXferAbort(ioport, thisCard, + currSCCB); + } + + FPT_phaseBusFree(ioport, thisCard); + } + + else if (hp_int & ITICKLE) { + + WRW_HARPOON((ioport + hp_intstat), ITICKLE); + pCurrCard->globalFlags |= F_NEW_SCCB_CMD; + } + + if (((struct sccb_card *)pCurrCard)-> + globalFlags & F_NEW_SCCB_CMD) { + + pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD; + + if (pCurrCard->currentSCCB == NULL) + FPT_queueSearchSelect(pCurrCard, thisCard); + + if (pCurrCard->currentSCCB != NULL) { + pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD; + FPT_ssel(ioport, thisCard); + } + + break; + + } + + } /*end while */ + + MENABLE_INT(ioport); + + return 0; +} + +/*--------------------------------------------------------------------- + * + * Function: Sccb_bad_isr + * + * Description: Some type of interrupt has occurred which is slightly + * out of the ordinary. We will now decode it fully, in + * this routine. This is broken up in an attempt to save + * processing time. + * + *---------------------------------------------------------------------*/ +static unsigned char FPT_SccbMgr_bad_isr(u32 p_port, unsigned char p_card, + struct sccb_card *pCurrCard, + unsigned short p_int) +{ + unsigned char temp, ScamFlg; + struct sccb_mgr_tar_info *currTar_Info; + struct nvram_info *pCurrNvRam; + + if (RD_HARPOON(p_port + hp_ext_status) & + (BM_FORCE_OFF | PCI_DEV_TMOUT | BM_PARITY_ERR | PIO_OVERRUN)) { + + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) { + + FPT_hostDataXferAbort(p_port, p_card, + pCurrCard->currentSCCB); + } + + if (RD_HARPOON(p_port + hp_pci_stat_cfg) & REC_MASTER_ABORT) + { + WR_HARPOON(p_port + hp_pci_stat_cfg, + (RD_HARPOON(p_port + hp_pci_stat_cfg) & + ~REC_MASTER_ABORT)); + + WR_HARPOON(p_port + hp_host_blk_cnt, 0x00); + + } + + if (pCurrCard->currentSCCB != NULL) { + + if (!pCurrCard->currentSCCB->HostStatus) + pCurrCard->currentSCCB->HostStatus = + SCCB_BM_ERR; + + FPT_sxfrp(p_port, p_card); + + temp = (unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) & + (EXT_ARB_ACK | SCSI_TERM_ENA_H)); + WR_HARPOON(p_port + hp_ee_ctrl, + ((unsigned char)temp | SEE_MS | SEE_CS)); + WR_HARPOON(p_port + hp_ee_ctrl, temp); + + if (! + (RDW_HARPOON((p_port + hp_intstat)) & + (BUS_FREE | RESET))) { + FPT_phaseDecode(p_port, p_card); + } + } + } + + else if (p_int & RESET) { + + WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT); + WR_HARPOON(p_port + hp_sys_ctrl, 0x00); + if (pCurrCard->currentSCCB != NULL) { + + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) + + FPT_hostDataXferAbort(p_port, p_card, + pCurrCard->currentSCCB); + } + + DISABLE_AUTO(p_port); + + FPT_sresb(p_port, p_card); + + while (RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST) { + } + + pCurrNvRam = pCurrCard->pNvRamInfo; + if (pCurrNvRam) { + ScamFlg = pCurrNvRam->niScamConf; + } else { + ScamFlg = + (unsigned char)FPT_utilEERead(p_port, + SCAM_CONFIG / 2); + } + + FPT_XbowInit(p_port, ScamFlg); + + FPT_scini(p_card, pCurrCard->ourId, 0); + + return 0xFF; + } + + else if (p_int & FIFO) { + + WRW_HARPOON((p_port + hp_intstat), FIFO); + + if (pCurrCard->currentSCCB != NULL) + FPT_sxfrp(p_port, p_card); + } + + else if (p_int & TIMEOUT) { + + DISABLE_AUTO(p_port); + + WRW_HARPOON((p_port + hp_intstat), + (PROG_HLT | TIMEOUT | SEL | BUS_FREE | PHASE | + IUNKWN)); + + pCurrCard->currentSCCB->HostStatus = SCCB_SELECTION_TIMEOUT; + + currTar_Info = + &FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID]; + if ((pCurrCard->globalFlags & F_CONLUN_IO) + && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != + TAG_Q_TRYING)) + currTar_Info->TarLUNBusy[pCurrCard->currentSCCB->Lun] = + 0; + else + currTar_Info->TarLUNBusy[0] = 0; + + if (currTar_Info->TarEEValue & EE_SYNC_MASK) { + currTar_Info->TarSyncCtrl = 0; + currTar_Info->TarStatus &= ~TAR_SYNC_MASK; + } + + if (currTar_Info->TarEEValue & EE_WIDE_SCSI) { + currTar_Info->TarStatus &= ~TAR_WIDE_MASK; + } + + FPT_sssyncv(p_port, pCurrCard->currentSCCB->TargID, NARROW_SCSI, + currTar_Info); + + FPT_queueCmdComplete(pCurrCard, pCurrCard->currentSCCB, p_card); + + } + + else if (p_int & SCAM_SEL) { + + FPT_scarb(p_port, LEVEL2_TAR); + FPT_scsel(p_port); + FPT_scasid(p_card, p_port); + + FPT_scbusf(p_port); + + WRW_HARPOON((p_port + hp_intstat), SCAM_SEL); + } + + return 0x00; +} + +/*--------------------------------------------------------------------- + * + * Function: SccbMgrTableInit + * + * Description: Initialize all Sccb manager data structures. + * + *---------------------------------------------------------------------*/ + +static void FPT_SccbMgrTableInitAll(void) +{ + unsigned char thisCard; + + for (thisCard = 0; thisCard < MAX_CARDS; thisCard++) { + FPT_SccbMgrTableInitCard(&FPT_BL_Card[thisCard], thisCard); + + FPT_BL_Card[thisCard].ioPort = 0x00; + FPT_BL_Card[thisCard].cardInfo = NULL; + FPT_BL_Card[thisCard].cardIndex = 0xFF; + FPT_BL_Card[thisCard].ourId = 0x00; + FPT_BL_Card[thisCard].pNvRamInfo = NULL; + } +} + +/*--------------------------------------------------------------------- + * + * Function: SccbMgrTableInit + * + * Description: Initialize all Sccb manager data structures. + * + *---------------------------------------------------------------------*/ + +static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard, + unsigned char p_card) +{ + unsigned char scsiID, qtag; + + for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { + FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; + } + + for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) { + FPT_sccbMgrTbl[p_card][scsiID].TarStatus = 0; + FPT_sccbMgrTbl[p_card][scsiID].TarEEValue = 0; + FPT_SccbMgrTableInitTarget(p_card, scsiID); + } + + pCurrCard->scanIndex = 0x00; + pCurrCard->currentSCCB = NULL; + pCurrCard->globalFlags = 0x00; + pCurrCard->cmdCounter = 0x00; + pCurrCard->tagQ_Lst = 0x01; + pCurrCard->discQCount = 0; + +} + +/*--------------------------------------------------------------------- + * + * Function: SccbMgrTableInit + * + * Description: Initialize all Sccb manager data structures. + * + *---------------------------------------------------------------------*/ + +static void FPT_SccbMgrTableInitTarget(unsigned char p_card, + unsigned char target) +{ + + unsigned char lun, qtag; + struct sccb_mgr_tar_info *currTar_Info; + + currTar_Info = &FPT_sccbMgrTbl[p_card][target]; + + currTar_Info->TarSelQ_Cnt = 0; + currTar_Info->TarSyncCtrl = 0; + + currTar_Info->TarSelQ_Head = NULL; + currTar_Info->TarSelQ_Tail = NULL; + currTar_Info->TarTagQ_Cnt = 0; + currTar_Info->TarLUN_CA = 0; + + for (lun = 0; lun < MAX_LUN; lun++) { + currTar_Info->TarLUNBusy[lun] = 0; + currTar_Info->LunDiscQ_Idx[lun] = 0; + } + + for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { + if (FPT_BL_Card[p_card].discQ_Tbl[qtag] != NULL) { + if (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == + target) { + FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; + FPT_BL_Card[p_card].discQCount--; + } + } + } +} + +/*--------------------------------------------------------------------- + * + * Function: sfetm + * + * Description: Read in a message byte from the SCSI bus, and check + * for a parity error. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB) +{ + unsigned char message; + unsigned short TimeOutLoop; + + TimeOutLoop = 0; + while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && + (TimeOutLoop++ < 20000)) { + } + + WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); + + message = RD_HARPOON(port + hp_scsidata_0); + + WR_HARPOON(port + hp_scsisig, SCSI_ACK + S_MSGI_PH); + + if (TimeOutLoop > 20000) + message = 0x00; /* force message byte = 0 if Time Out on Req */ + + if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && + (RD_HARPOON(port + hp_addstat) & SCSI_PAR_ERR)) { + WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); + WR_HARPOON(port + hp_xferstat, 0); + WR_HARPOON(port + hp_fiforead, 0); + WR_HARPOON(port + hp_fifowrite, 0); + if (pCurrSCCB != NULL) { + pCurrSCCB->Sccb_scsimsg = MSG_PARITY_ERROR; + } + message = 0x00; + do { + ACCEPT_MSG_ATN(port); + TimeOutLoop = 0; + while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && + (TimeOutLoop++ < 20000)) { + } + if (TimeOutLoop > 20000) { + WRW_HARPOON((port + hp_intstat), PARITY); + return message; + } + if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) != + S_MSGI_PH) { + WRW_HARPOON((port + hp_intstat), PARITY); + return message; + } + WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); + + RD_HARPOON(port + hp_scsidata_0); + + WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); + + } while (1); + + } + WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); + WR_HARPOON(port + hp_xferstat, 0); + WR_HARPOON(port + hp_fiforead, 0); + WR_HARPOON(port + hp_fifowrite, 0); + return message; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_ssel + * + * Description: Load up automation and select target device. + * + *---------------------------------------------------------------------*/ + +static void FPT_ssel(u32 port, unsigned char p_card) +{ + + unsigned char auto_loaded, i, target, *theCCB; + + u32 cdb_reg; + struct sccb_card *CurrCard; + struct sccb *currSCCB; + struct sccb_mgr_tar_info *currTar_Info; + unsigned char lastTag, lun; + + CurrCard = &FPT_BL_Card[p_card]; + currSCCB = CurrCard->currentSCCB; + target = currSCCB->TargID; + currTar_Info = &FPT_sccbMgrTbl[p_card][target]; + lastTag = CurrCard->tagQ_Lst; + + ARAM_ACCESS(port); + + if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT) + currSCCB->ControlByte &= ~F_USE_CMD_Q; + + if (((CurrCard->globalFlags & F_CONLUN_IO) && + ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) + + lun = currSCCB->Lun; + else + lun = 0; + + if (CurrCard->globalFlags & F_TAG_STARTED) { + if (!(currSCCB->ControlByte & F_USE_CMD_Q)) { + if ((currTar_Info->TarLUN_CA == 0) + && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) + == TAG_Q_TRYING)) { + + if (currTar_Info->TarTagQ_Cnt != 0) { + currTar_Info->TarLUNBusy[lun] = 1; + FPT_queueSelectFail(CurrCard, p_card); + SGRAM_ACCESS(port); + return; + } + + else { + currTar_Info->TarLUNBusy[lun] = 1; + } + + } + /*End non-tagged */ + else { + currTar_Info->TarLUNBusy[lun] = 1; + } + + } + /*!Use cmd Q Tagged */ + else { + if (currTar_Info->TarLUN_CA == 1) { + FPT_queueSelectFail(CurrCard, p_card); + SGRAM_ACCESS(port); + return; + } + + currTar_Info->TarLUNBusy[lun] = 1; + + } /*else use cmd Q tagged */ + + } + /*if glob tagged started */ + else { + currTar_Info->TarLUNBusy[lun] = 1; + } + + if ((((CurrCard->globalFlags & F_CONLUN_IO) && + ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) + || (!(currSCCB->ControlByte & F_USE_CMD_Q)))) { + if (CurrCard->discQCount >= QUEUE_DEPTH) { + currTar_Info->TarLUNBusy[lun] = 1; + FPT_queueSelectFail(CurrCard, p_card); + SGRAM_ACCESS(port); + return; + } + for (i = 1; i < QUEUE_DEPTH; i++) { + if (++lastTag >= QUEUE_DEPTH) + lastTag = 1; + if (CurrCard->discQ_Tbl[lastTag] == NULL) { + CurrCard->tagQ_Lst = lastTag; + currTar_Info->LunDiscQ_Idx[lun] = lastTag; + CurrCard->discQ_Tbl[lastTag] = currSCCB; + CurrCard->discQCount++; + break; + } + } + if (i == QUEUE_DEPTH) { + currTar_Info->TarLUNBusy[lun] = 1; + FPT_queueSelectFail(CurrCard, p_card); + SGRAM_ACCESS(port); + return; + } + } + + auto_loaded = 0; + + WR_HARPOON(port + hp_select_id, target); + WR_HARPOON(port + hp_gp_reg_3, target); /* Use by new automation logic */ + + if (currSCCB->OperationCode == RESET_COMMAND) { + WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT + + (currSCCB-> + Sccb_idmsg & ~DISC_PRIV))); + + WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP); + + currSCCB->Sccb_scsimsg = TARGET_RESET; + + WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); + auto_loaded = 1; + currSCCB->Sccb_scsistat = SELECT_BDR_ST; + + if (currTar_Info->TarEEValue & EE_SYNC_MASK) { + currTar_Info->TarSyncCtrl = 0; + currTar_Info->TarStatus &= ~TAR_SYNC_MASK; + } + + if (currTar_Info->TarEEValue & EE_WIDE_SCSI) { + currTar_Info->TarStatus &= ~TAR_WIDE_MASK; + } + + FPT_sssyncv(port, target, NARROW_SCSI, currTar_Info); + FPT_SccbMgrTableInitTarget(p_card, target); + + } + + else if (currSCCB->Sccb_scsistat == ABORT_ST) { + WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT + + (currSCCB-> + Sccb_idmsg & ~DISC_PRIV))); + + WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ); + + WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + + (((unsigned + char)(currSCCB-> + ControlByte & + TAG_TYPE_MASK) + >> 6) | (unsigned char) + 0x20))); + WRW_HARPOON((port + SYNC_MSGS + 2), + (MPM_OP + AMSG_OUT + currSCCB->Sccb_tag)); + WRW_HARPOON((port + SYNC_MSGS + 4), (BRH_OP + ALWAYS + NP)); + + WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); + auto_loaded = 1; + + } + + else if (!(currTar_Info->TarStatus & WIDE_NEGOCIATED)) { + auto_loaded = FPT_siwidn(port, p_card); + currSCCB->Sccb_scsistat = SELECT_WN_ST; + } + + else if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) + == SYNC_SUPPORTED)) { + auto_loaded = FPT_sisyncn(port, p_card, 0); + currSCCB->Sccb_scsistat = SELECT_SN_ST; + } + + if (!auto_loaded) { + + if (currSCCB->ControlByte & F_USE_CMD_Q) { + + CurrCard->globalFlags |= F_TAG_STARTED; + + if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) + == TAG_Q_REJECT) { + currSCCB->ControlByte &= ~F_USE_CMD_Q; + + /* Fix up the start instruction with a jump to + Non-Tag-CMD handling */ + WRW_HARPOON((port + ID_MSG_STRT), + BRH_OP + ALWAYS + NTCMD); + + WRW_HARPOON((port + NON_TAG_ID_MSG), + (MPM_OP + AMSG_OUT + + currSCCB->Sccb_idmsg)); + + WR_HARPOON(port + hp_autostart_3, + (SELECT + SELCHK_STRT)); + + /* Setup our STATE so we know what happened when + the wheels fall off. */ + currSCCB->Sccb_scsistat = SELECT_ST; + + currTar_Info->TarLUNBusy[lun] = 1; + } + + else { + WRW_HARPOON((port + ID_MSG_STRT), + (MPM_OP + AMSG_OUT + + currSCCB->Sccb_idmsg)); + + WRW_HARPOON((port + ID_MSG_STRT + 2), + (MPM_OP + AMSG_OUT + + (((unsigned char)(currSCCB-> + ControlByte & + TAG_TYPE_MASK) + >> 6) | (unsigned char)0x20))); + + for (i = 1; i < QUEUE_DEPTH; i++) { + if (++lastTag >= QUEUE_DEPTH) + lastTag = 1; + if (CurrCard->discQ_Tbl[lastTag] == + NULL) { + WRW_HARPOON((port + + ID_MSG_STRT + 6), + (MPM_OP + AMSG_OUT + + lastTag)); + CurrCard->tagQ_Lst = lastTag; + currSCCB->Sccb_tag = lastTag; + CurrCard->discQ_Tbl[lastTag] = + currSCCB; + CurrCard->discQCount++; + break; + } + } + + if (i == QUEUE_DEPTH) { + currTar_Info->TarLUNBusy[lun] = 1; + FPT_queueSelectFail(CurrCard, p_card); + SGRAM_ACCESS(port); + return; + } + + currSCCB->Sccb_scsistat = SELECT_Q_ST; + + WR_HARPOON(port + hp_autostart_3, + (SELECT + SELCHK_STRT)); + } + } + + else { + + WRW_HARPOON((port + ID_MSG_STRT), + BRH_OP + ALWAYS + NTCMD); + + WRW_HARPOON((port + NON_TAG_ID_MSG), + (MPM_OP + AMSG_OUT + currSCCB->Sccb_idmsg)); + + currSCCB->Sccb_scsistat = SELECT_ST; + + WR_HARPOON(port + hp_autostart_3, + (SELECT + SELCHK_STRT)); + } + + theCCB = (unsigned char *)&currSCCB->Cdb[0]; + + cdb_reg = port + CMD_STRT; + + for (i = 0; i < currSCCB->CdbLength; i++) { + WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + *theCCB)); + cdb_reg += 2; + theCCB++; + } + + if (currSCCB->CdbLength != TWELVE_BYTE_CMD) + WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP)); + + } + /* auto_loaded */ + WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00); + WR_HARPOON(port + hp_xferstat, 0x00); + + WRW_HARPOON((port + hp_intstat), (PROG_HLT | TIMEOUT | SEL | BUS_FREE)); + + WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT)); + + if (!(currSCCB->Sccb_MGRFlags & F_DEV_SELECTED)) { + WR_HARPOON(port + hp_scsictrl_0, + (SEL_TAR | ENA_ATN | ENA_RESEL | ENA_SCAM_SEL)); + } else { + +/* auto_loaded = (RD_HARPOON(port+hp_autostart_3) & (unsigned char)0x1F); + auto_loaded |= AUTO_IMMED; */ + auto_loaded = AUTO_IMMED; + + DISABLE_AUTO(port); + + WR_HARPOON(port + hp_autostart_3, auto_loaded); + } + + SGRAM_ACCESS(port); +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sres + * + * Description: Hookup the correct CCB and handle the incoming messages. + * + *---------------------------------------------------------------------*/ + +static void FPT_sres(u32 port, unsigned char p_card, + struct sccb_card *pCurrCard) +{ + + unsigned char our_target, message, lun = 0, tag, msgRetryCount; + + struct sccb_mgr_tar_info *currTar_Info; + struct sccb *currSCCB; + + if (pCurrCard->currentSCCB != NULL) { + currTar_Info = + &FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID]; + DISABLE_AUTO(port); + + WR_HARPOON((port + hp_scsictrl_0), (ENA_RESEL | ENA_SCAM_SEL)); + + currSCCB = pCurrCard->currentSCCB; + if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { + currTar_Info->TarStatus &= ~TAR_WIDE_MASK; + currSCCB->Sccb_scsistat = BUS_FREE_ST; + } + if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { + currTar_Info->TarStatus &= ~TAR_SYNC_MASK; + currSCCB->Sccb_scsistat = BUS_FREE_ST; + } + if (((pCurrCard->globalFlags & F_CONLUN_IO) && + ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != + TAG_Q_TRYING))) { + currTar_Info->TarLUNBusy[currSCCB->Lun] = 0; + if (currSCCB->Sccb_scsistat != ABORT_ST) { + pCurrCard->discQCount--; + pCurrCard->discQ_Tbl[currTar_Info-> + LunDiscQ_Idx[currSCCB-> + Lun]] + = NULL; + } + } else { + currTar_Info->TarLUNBusy[0] = 0; + if (currSCCB->Sccb_tag) { + if (currSCCB->Sccb_scsistat != ABORT_ST) { + pCurrCard->discQCount--; + pCurrCard->discQ_Tbl[currSCCB-> + Sccb_tag] = NULL; + } + } else { + if (currSCCB->Sccb_scsistat != ABORT_ST) { + pCurrCard->discQCount--; + pCurrCard->discQ_Tbl[currTar_Info-> + LunDiscQ_Idx[0]] = + NULL; + } + } + } + + FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card); + } + + WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00); + + our_target = (unsigned char)(RD_HARPOON(port + hp_select_id) >> 4); + currTar_Info = &FPT_sccbMgrTbl[p_card][our_target]; + + msgRetryCount = 0; + do { + + currTar_Info = &FPT_sccbMgrTbl[p_card][our_target]; + tag = 0; + + while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) { + if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) { + + WRW_HARPOON((port + hp_intstat), PHASE); + return; + } + } + + WRW_HARPOON((port + hp_intstat), PHASE); + if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH) { + + message = FPT_sfm(port, pCurrCard->currentSCCB); + if (message) { + + if (message <= (0x80 | LUN_MASK)) { + lun = message & (unsigned char)LUN_MASK; + + if ((currTar_Info-> + TarStatus & TAR_TAG_Q_MASK) == + TAG_Q_TRYING) { + if (currTar_Info->TarTagQ_Cnt != + 0) { + + if (! + (currTar_Info-> + TarLUN_CA)) { + ACCEPT_MSG(port); /*Release the ACK for ID msg. */ + + message = + FPT_sfm + (port, + pCurrCard-> + currentSCCB); + if (message) { + ACCEPT_MSG + (port); + } + + else + message + = 0; + + if (message != + 0) { + tag = + FPT_sfm + (port, + pCurrCard-> + currentSCCB); + + if (! + (tag)) + message + = + 0; + } + + } + /*C.A. exists! */ + } + /*End Q cnt != 0 */ + } + /*End Tag cmds supported! */ + } + /*End valid ID message. */ + else { + + ACCEPT_MSG_ATN(port); + } + + } + /* End good id message. */ + else { + + message = 0; + } + } else { + ACCEPT_MSG_ATN(port); + + while (! + (RDW_HARPOON((port + hp_intstat)) & + (PHASE | RESET)) + && !(RD_HARPOON(port + hp_scsisig) & SCSI_REQ) + && (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ; + + return; + } + + if (message == 0) { + msgRetryCount++; + if (msgRetryCount == 1) { + FPT_SendMsg(port, MSG_PARITY_ERROR); + } else { + FPT_SendMsg(port, TARGET_RESET); + + FPT_sssyncv(port, our_target, NARROW_SCSI, + currTar_Info); + + if (FPT_sccbMgrTbl[p_card][our_target]. + TarEEValue & EE_SYNC_MASK) { + + FPT_sccbMgrTbl[p_card][our_target]. + TarStatus &= ~TAR_SYNC_MASK; + + } + + if (FPT_sccbMgrTbl[p_card][our_target]. + TarEEValue & EE_WIDE_SCSI) { + + FPT_sccbMgrTbl[p_card][our_target]. + TarStatus &= ~TAR_WIDE_MASK; + } + + FPT_queueFlushTargSccb(p_card, our_target, + SCCB_COMPLETE); + FPT_SccbMgrTableInitTarget(p_card, our_target); + return; + } + } + } while (message == 0); + + if (((pCurrCard->globalFlags & F_CONLUN_IO) && + ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { + currTar_Info->TarLUNBusy[lun] = 1; + pCurrCard->currentSCCB = + pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[lun]]; + if (pCurrCard->currentSCCB != NULL) { + ACCEPT_MSG(port); + } else { + ACCEPT_MSG_ATN(port); + } + } else { + currTar_Info->TarLUNBusy[0] = 1; + + if (tag) { + if (pCurrCard->discQ_Tbl[tag] != NULL) { + pCurrCard->currentSCCB = + pCurrCard->discQ_Tbl[tag]; + currTar_Info->TarTagQ_Cnt--; + ACCEPT_MSG(port); + } else { + ACCEPT_MSG_ATN(port); + } + } else { + pCurrCard->currentSCCB = + pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]]; + if (pCurrCard->currentSCCB != NULL) { + ACCEPT_MSG(port); + } else { + ACCEPT_MSG_ATN(port); + } + } + } + + if (pCurrCard->currentSCCB != NULL) { + if (pCurrCard->currentSCCB->Sccb_scsistat == ABORT_ST) { + /* During Abort Tag command, the target could have got re-selected + and completed the command. Check the select Q and remove the CCB + if it is in the Select Q */ + FPT_queueFindSccb(pCurrCard->currentSCCB, p_card); + } + } + + while (!(RDW_HARPOON((port + hp_intstat)) & (PHASE | RESET)) && + !(RD_HARPOON(port + hp_scsisig) & SCSI_REQ) && + (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ; +} + +static void FPT_SendMsg(u32 port, unsigned char message) +{ + while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) { + if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) { + + WRW_HARPOON((port + hp_intstat), PHASE); + return; + } + } + + WRW_HARPOON((port + hp_intstat), PHASE); + if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGO_PH) { + WRW_HARPOON((port + hp_intstat), + (BUS_FREE | PHASE | XFER_CNT_0)); + + WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN); + + WR_HARPOON(port + hp_scsidata_0, message); + + WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); + + ACCEPT_MSG(port); + + WR_HARPOON(port + hp_portctrl_0, 0x00); + + if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) || + (message == ABORT_TASK)) { + while (! + (RDW_HARPOON((port + hp_intstat)) & + (BUS_FREE | PHASE))) { + } + + if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { + WRW_HARPOON((port + hp_intstat), BUS_FREE); + } + } + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sdecm + * + * Description: Determine the proper response to the message from the + * target device. + * + *---------------------------------------------------------------------*/ +static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card) +{ + struct sccb *currSCCB; + struct sccb_card *CurrCard; + struct sccb_mgr_tar_info *currTar_Info; + + CurrCard = &FPT_BL_Card[p_card]; + currSCCB = CurrCard->currentSCCB; + + currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; + + if (message == RESTORE_POINTERS) { + if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) { + currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC; + + FPT_hostDataXferRestart(currSCCB); + } + + ACCEPT_MSG(port); + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + + else if (message == COMMAND_COMPLETE) { + + if (currSCCB->Sccb_scsistat == SELECT_Q_ST) { + currTar_Info->TarStatus &= + ~(unsigned char)TAR_TAG_Q_MASK; + currTar_Info->TarStatus |= (unsigned char)TAG_Q_REJECT; + } + + ACCEPT_MSG(port); + + } + + else if ((message == NOP) || (message >= IDENTIFY_BASE) || + (message == INITIATE_RECOVERY) || + (message == RELEASE_RECOVERY)) { + + ACCEPT_MSG(port); + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + + else if (message == MESSAGE_REJECT) { + + if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) || + (currSCCB->Sccb_scsistat == SELECT_WN_ST) || + ((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING) + || ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == + TAG_Q_TRYING)) + { + WRW_HARPOON((port + hp_intstat), BUS_FREE); + + ACCEPT_MSG(port); + + while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && + (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE))) + { + } + + if (currSCCB->Lun == 0x00) { + if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { + + currTar_Info->TarStatus |= + (unsigned char)SYNC_SUPPORTED; + + currTar_Info->TarEEValue &= + ~EE_SYNC_MASK; + } + + else if (currSCCB->Sccb_scsistat == + SELECT_WN_ST) { + + currTar_Info->TarStatus = + (currTar_Info-> + TarStatus & ~WIDE_ENABLED) | + WIDE_NEGOCIATED; + + currTar_Info->TarEEValue &= + ~EE_WIDE_SCSI; + + } + + else if ((currTar_Info-> + TarStatus & TAR_TAG_Q_MASK) == + TAG_Q_TRYING) { + currTar_Info->TarStatus = + (currTar_Info-> + TarStatus & ~(unsigned char) + TAR_TAG_Q_MASK) | TAG_Q_REJECT; + + currSCCB->ControlByte &= ~F_USE_CMD_Q; + CurrCard->discQCount--; + CurrCard->discQ_Tbl[currSCCB-> + Sccb_tag] = NULL; + currSCCB->Sccb_tag = 0x00; + + } + } + + if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { + + if (currSCCB->Lun == 0x00) { + WRW_HARPOON((port + hp_intstat), + BUS_FREE); + CurrCard->globalFlags |= F_NEW_SCCB_CMD; + } + } + + else { + + if ((CurrCard->globalFlags & F_CONLUN_IO) && + ((currTar_Info-> + TarStatus & TAR_TAG_Q_MASK) != + TAG_Q_TRYING)) + currTar_Info->TarLUNBusy[currSCCB-> + Lun] = 1; + else + currTar_Info->TarLUNBusy[0] = 1; + + currSCCB->ControlByte &= + ~(unsigned char)F_USE_CMD_Q; + + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + + } + } + + else { + ACCEPT_MSG(port); + + while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && + (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE))) + { + } + + if (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)) { + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + } + } + + else if (message == EXTENDED_MESSAGE) { + + ACCEPT_MSG(port); + FPT_shandem(port, p_card, currSCCB); + } + + else if (message == IGNORE_WIDE_RESIDUE) { + + ACCEPT_MSG(port); /* ACK the RESIDUE MSG */ + + message = FPT_sfm(port, currSCCB); + + if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR) + ACCEPT_MSG(port); + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + + else { + + currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; + currSCCB->Sccb_scsimsg = MESSAGE_REJECT; + + ACCEPT_MSG_ATN(port); + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_shandem + * + * Description: Decide what to do with the extended message. + * + *---------------------------------------------------------------------*/ +static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB) +{ + unsigned char length, message; + + length = FPT_sfm(port, pCurrSCCB); + if (length) { + + ACCEPT_MSG(port); + message = FPT_sfm(port, pCurrSCCB); + if (message) { + + if (message == EXTENDED_SDTR) { + + if (length == 0x03) { + + ACCEPT_MSG(port); + FPT_stsyncn(port, p_card); + } else { + + pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT; + ACCEPT_MSG_ATN(port); + } + } else if (message == EXTENDED_WDTR) { + + if (length == 0x02) { + + ACCEPT_MSG(port); + FPT_stwidn(port, p_card); + } else { + + pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT; + ACCEPT_MSG_ATN(port); + + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + + DISCONNECT_START)); + } + } else { + + pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT; + ACCEPT_MSG_ATN(port); + + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + } else { + if (pCurrSCCB->Sccb_scsimsg != MSG_PARITY_ERROR) + ACCEPT_MSG(port); + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + } else { + if (pCurrSCCB->Sccb_scsimsg == MSG_PARITY_ERROR) + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sisyncn + * + * Description: Read in a message byte from the SCSI bus, and check + * for a parity error. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_sisyncn(u32 port, unsigned char p_card, + unsigned char syncFlag) +{ + struct sccb *currSCCB; + struct sccb_mgr_tar_info *currTar_Info; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; + + if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)) { + + WRW_HARPOON((port + ID_MSG_STRT), + (MPM_OP + AMSG_OUT + + (currSCCB-> + Sccb_idmsg & ~(unsigned char)DISC_PRIV))); + + WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ); + + WRW_HARPOON((port + SYNC_MSGS + 0), + (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); + WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03)); + WRW_HARPOON((port + SYNC_MSGS + 4), + (MPM_OP + AMSG_OUT + EXTENDED_SDTR)); + + if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB) + + WRW_HARPOON((port + SYNC_MSGS + 6), + (MPM_OP + AMSG_OUT + 12)); + + else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == + EE_SYNC_10MB) + + WRW_HARPOON((port + SYNC_MSGS + 6), + (MPM_OP + AMSG_OUT + 25)); + + else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == + EE_SYNC_5MB) + + WRW_HARPOON((port + SYNC_MSGS + 6), + (MPM_OP + AMSG_OUT + 50)); + + else + WRW_HARPOON((port + SYNC_MSGS + 6), + (MPM_OP + AMSG_OUT + 00)); + + WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP)); + WRW_HARPOON((port + SYNC_MSGS + 10), + (MPM_OP + AMSG_OUT + DEFAULT_OFFSET)); + WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP)); + + if (syncFlag == 0) { + WR_HARPOON(port + hp_autostart_3, + (SELECT + SELCHK_STRT)); + currTar_Info->TarStatus = + ((currTar_Info-> + TarStatus & ~(unsigned char)TAR_SYNC_MASK) | + (unsigned char)SYNC_TRYING); + } else { + WR_HARPOON(port + hp_autostart_3, + (AUTO_IMMED + CMD_ONLY_STRT)); + } + + return 1; + } + + else { + + currTar_Info->TarStatus |= (unsigned char)SYNC_SUPPORTED; + currTar_Info->TarEEValue &= ~EE_SYNC_MASK; + return 0; + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_stsyncn + * + * Description: The has sent us a Sync Nego message so handle it as + * necessary. + * + *---------------------------------------------------------------------*/ +static void FPT_stsyncn(u32 port, unsigned char p_card) +{ + unsigned char sync_msg, offset, sync_reg, our_sync_msg; + struct sccb *currSCCB; + struct sccb_mgr_tar_info *currTar_Info; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; + + sync_msg = FPT_sfm(port, currSCCB); + + if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) { + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + return; + } + + ACCEPT_MSG(port); + + offset = FPT_sfm(port, currSCCB); + + if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) { + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + return; + } + + if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB) + + our_sync_msg = 12; /* Setup our Message to 20mb/s */ + + else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB) + + our_sync_msg = 25; /* Setup our Message to 10mb/s */ + + else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB) + + our_sync_msg = 50; /* Setup our Message to 5mb/s */ + else + + our_sync_msg = 0; /* Message = Async */ + + if (sync_msg < our_sync_msg) { + sync_msg = our_sync_msg; /*if faster, then set to max. */ + } + + if (offset == ASYNC) + sync_msg = ASYNC; + + if (offset > MAX_OFFSET) + offset = MAX_OFFSET; + + sync_reg = 0x00; + + if (sync_msg > 12) + + sync_reg = 0x20; /* Use 10MB/s */ + + if (sync_msg > 25) + + sync_reg = 0x40; /* Use 6.6MB/s */ + + if (sync_msg > 38) + + sync_reg = 0x60; /* Use 5MB/s */ + + if (sync_msg > 50) + + sync_reg = 0x80; /* Use 4MB/s */ + + if (sync_msg > 62) + + sync_reg = 0xA0; /* Use 3.33MB/s */ + + if (sync_msg > 75) + + sync_reg = 0xC0; /* Use 2.85MB/s */ + + if (sync_msg > 87) + + sync_reg = 0xE0; /* Use 2.5MB/s */ + + if (sync_msg > 100) { + + sync_reg = 0x00; /* Use ASYNC */ + offset = 0x00; + } + + if (currTar_Info->TarStatus & WIDE_ENABLED) + + sync_reg |= offset; + + else + + sync_reg |= (offset | NARROW_SCSI); + + FPT_sssyncv(port, currSCCB->TargID, sync_reg, currTar_Info); + + if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { + + ACCEPT_MSG(port); + + currTar_Info->TarStatus = ((currTar_Info->TarStatus & + ~(unsigned char)TAR_SYNC_MASK) | + (unsigned char)SYNC_SUPPORTED); + + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + + else { + + ACCEPT_MSG_ATN(port); + + FPT_sisyncr(port, sync_msg, offset); + + currTar_Info->TarStatus = ((currTar_Info->TarStatus & + ~(unsigned char)TAR_SYNC_MASK) | + (unsigned char)SYNC_SUPPORTED); + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sisyncr + * + * Description: Answer the targets sync message. + * + *---------------------------------------------------------------------*/ +static void FPT_sisyncr(u32 port, unsigned char sync_pulse, + unsigned char offset) +{ + ARAM_ACCESS(port); + WRW_HARPOON((port + SYNC_MSGS + 0), + (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); + WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03)); + WRW_HARPOON((port + SYNC_MSGS + 4), + (MPM_OP + AMSG_OUT + EXTENDED_SDTR)); + WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse)); + WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP)); + WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset)); + WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP)); + SGRAM_ACCESS(port); + + WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); + WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1); + + WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT)); + + while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) { + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_siwidn + * + * Description: Read in a message byte from the SCSI bus, and check + * for a parity error. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_siwidn(u32 port, unsigned char p_card) +{ + struct sccb *currSCCB; + struct sccb_mgr_tar_info *currTar_Info; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; + + if (!((currTar_Info->TarStatus & TAR_WIDE_MASK) == WIDE_NEGOCIATED)) { + + WRW_HARPOON((port + ID_MSG_STRT), + (MPM_OP + AMSG_OUT + + (currSCCB-> + Sccb_idmsg & ~(unsigned char)DISC_PRIV))); + + WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ); + + WRW_HARPOON((port + SYNC_MSGS + 0), + (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); + WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02)); + WRW_HARPOON((port + SYNC_MSGS + 4), + (MPM_OP + AMSG_OUT + EXTENDED_WDTR)); + WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP)); + WRW_HARPOON((port + SYNC_MSGS + 8), + (MPM_OP + AMSG_OUT + SM16BIT)); + WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP)); + + WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); + + currTar_Info->TarStatus = ((currTar_Info->TarStatus & + ~(unsigned char)TAR_WIDE_MASK) | + (unsigned char)WIDE_ENABLED); + + return 1; + } + + else { + + currTar_Info->TarStatus = ((currTar_Info->TarStatus & + ~(unsigned char)TAR_WIDE_MASK) | + WIDE_NEGOCIATED); + + currTar_Info->TarEEValue &= ~EE_WIDE_SCSI; + return 0; + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_stwidn + * + * Description: The has sent us a Wide Nego message so handle it as + * necessary. + * + *---------------------------------------------------------------------*/ +static void FPT_stwidn(u32 port, unsigned char p_card) +{ + unsigned char width; + struct sccb *currSCCB; + struct sccb_mgr_tar_info *currTar_Info; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; + + width = FPT_sfm(port, currSCCB); + + if ((width == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) { + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + return; + } + + if (!(currTar_Info->TarEEValue & EE_WIDE_SCSI)) + width = 0; + + if (width) { + currTar_Info->TarStatus |= WIDE_ENABLED; + width = 0; + } else { + width = NARROW_SCSI; + currTar_Info->TarStatus &= ~WIDE_ENABLED; + } + + FPT_sssyncv(port, currSCCB->TargID, width, currTar_Info); + + if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { + + currTar_Info->TarStatus |= WIDE_NEGOCIATED; + + if (! + ((currTar_Info->TarStatus & TAR_SYNC_MASK) == + SYNC_SUPPORTED)) { + ACCEPT_MSG_ATN(port); + ARAM_ACCESS(port); + FPT_sisyncn(port, p_card, 1); + currSCCB->Sccb_scsistat = SELECT_SN_ST; + SGRAM_ACCESS(port); + } else { + ACCEPT_MSG(port); + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + } + + else { + + ACCEPT_MSG_ATN(port); + + if (currTar_Info->TarEEValue & EE_WIDE_SCSI) + width = SM16BIT; + else + width = SM8BIT; + + FPT_siwidr(port, width); + + currTar_Info->TarStatus |= (WIDE_NEGOCIATED | WIDE_ENABLED); + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_siwidr + * + * Description: Answer the targets Wide nego message. + * + *---------------------------------------------------------------------*/ +static void FPT_siwidr(u32 port, unsigned char width) +{ + ARAM_ACCESS(port); + WRW_HARPOON((port + SYNC_MSGS + 0), + (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); + WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02)); + WRW_HARPOON((port + SYNC_MSGS + 4), + (MPM_OP + AMSG_OUT + EXTENDED_WDTR)); + WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP)); + WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width)); + WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP)); + SGRAM_ACCESS(port); + + WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); + WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1); + + WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT)); + + while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) { + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sssyncv + * + * Description: Write the desired value to the Sync Register for the + * ID specified. + * + *---------------------------------------------------------------------*/ +static void FPT_sssyncv(u32 p_port, unsigned char p_id, + unsigned char p_sync_value, + struct sccb_mgr_tar_info *currTar_Info) +{ + unsigned char index; + + index = p_id; + + switch (index) { + + case 0: + index = 12; /* hp_synctarg_0 */ + break; + case 1: + index = 13; /* hp_synctarg_1 */ + break; + case 2: + index = 14; /* hp_synctarg_2 */ + break; + case 3: + index = 15; /* hp_synctarg_3 */ + break; + case 4: + index = 8; /* hp_synctarg_4 */ + break; + case 5: + index = 9; /* hp_synctarg_5 */ + break; + case 6: + index = 10; /* hp_synctarg_6 */ + break; + case 7: + index = 11; /* hp_synctarg_7 */ + break; + case 8: + index = 4; /* hp_synctarg_8 */ + break; + case 9: + index = 5; /* hp_synctarg_9 */ + break; + case 10: + index = 6; /* hp_synctarg_10 */ + break; + case 11: + index = 7; /* hp_synctarg_11 */ + break; + case 12: + index = 0; /* hp_synctarg_12 */ + break; + case 13: + index = 1; /* hp_synctarg_13 */ + break; + case 14: + index = 2; /* hp_synctarg_14 */ + break; + case 15: + index = 3; /* hp_synctarg_15 */ + + } + + WR_HARPOON(p_port + hp_synctarg_base + index, p_sync_value); + + currTar_Info->TarSyncCtrl = p_sync_value; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sresb + * + * Description: Reset the desired card's SCSI bus. + * + *---------------------------------------------------------------------*/ +static void FPT_sresb(u32 port, unsigned char p_card) +{ + unsigned char scsiID, i; + + struct sccb_mgr_tar_info *currTar_Info; + + WR_HARPOON(port + hp_page_ctrl, + (RD_HARPOON(port + hp_page_ctrl) | G_INT_DISABLE)); + WRW_HARPOON((port + hp_intstat), CLR_ALL_INT); + + WR_HARPOON(port + hp_scsictrl_0, SCSI_RST); + + scsiID = RD_HARPOON(port + hp_seltimeout); + WR_HARPOON(port + hp_seltimeout, TO_5ms); + WRW_HARPOON((port + hp_intstat), TIMEOUT); + + WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT | START_TO)); + + while (!(RDW_HARPOON((port + hp_intstat)) & TIMEOUT)) { + } + + WR_HARPOON(port + hp_seltimeout, scsiID); + + WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL); + + FPT_Wait(port, TO_5ms); + + WRW_HARPOON((port + hp_intstat), CLR_ALL_INT); + + WR_HARPOON(port + hp_int_mask, (RD_HARPOON(port + hp_int_mask) | 0x00)); + + for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) { + currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID]; + + if (currTar_Info->TarEEValue & EE_SYNC_MASK) { + currTar_Info->TarSyncCtrl = 0; + currTar_Info->TarStatus &= ~TAR_SYNC_MASK; + } + + if (currTar_Info->TarEEValue & EE_WIDE_SCSI) { + currTar_Info->TarStatus &= ~TAR_WIDE_MASK; + } + + FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info); + + FPT_SccbMgrTableInitTarget(p_card, scsiID); + } + + FPT_BL_Card[p_card].scanIndex = 0x00; + FPT_BL_Card[p_card].currentSCCB = NULL; + FPT_BL_Card[p_card].globalFlags &= ~(F_TAG_STARTED | F_HOST_XFER_ACT + | F_NEW_SCCB_CMD); + FPT_BL_Card[p_card].cmdCounter = 0x00; + FPT_BL_Card[p_card].discQCount = 0x00; + FPT_BL_Card[p_card].tagQ_Lst = 0x01; + + for (i = 0; i < QUEUE_DEPTH; i++) + FPT_BL_Card[p_card].discQ_Tbl[i] = NULL; + + WR_HARPOON(port + hp_page_ctrl, + (RD_HARPOON(port + hp_page_ctrl) & ~G_INT_DISABLE)); + +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_ssenss + * + * Description: Setup for the Auto Sense command. + * + *---------------------------------------------------------------------*/ +static void FPT_ssenss(struct sccb_card *pCurrCard) +{ + unsigned char i; + struct sccb *currSCCB; + + currSCCB = pCurrCard->currentSCCB; + + currSCCB->Save_CdbLen = currSCCB->CdbLength; + + for (i = 0; i < 6; i++) { + + currSCCB->Save_Cdb[i] = currSCCB->Cdb[i]; + } + + currSCCB->CdbLength = SIX_BYTE_CMD; + currSCCB->Cdb[0] = REQUEST_SENSE; + currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */ + currSCCB->Cdb[2] = 0x00; + currSCCB->Cdb[3] = 0x00; + currSCCB->Cdb[4] = currSCCB->RequestSenseLength; + currSCCB->Cdb[5] = 0x00; + + currSCCB->Sccb_XferCnt = (u32)currSCCB->RequestSenseLength; + + currSCCB->Sccb_ATC = 0x00; + + currSCCB->Sccb_XferState |= F_AUTO_SENSE; + + currSCCB->Sccb_XferState &= ~F_SG_XFER; + + currSCCB->Sccb_idmsg = currSCCB->Sccb_idmsg & ~(unsigned char)DISC_PRIV; + + currSCCB->ControlByte = 0x00; + + currSCCB->Sccb_MGRFlags &= F_STATUSLOADED; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sxfrp + * + * Description: Transfer data into the bit bucket until the device + * decides to switch phase. + * + *---------------------------------------------------------------------*/ + +static void FPT_sxfrp(u32 p_port, unsigned char p_card) +{ + unsigned char curr_phz; + + DISABLE_AUTO(p_port); + + if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) { + + FPT_hostDataXferAbort(p_port, p_card, + FPT_BL_Card[p_card].currentSCCB); + + } + + /* If the Automation handled the end of the transfer then do not + match the phase or we will get out of sync with the ISR. */ + + if (RDW_HARPOON((p_port + hp_intstat)) & + (BUS_FREE | XFER_CNT_0 | AUTO_INT)) + return; + + WR_HARPOON(p_port + hp_xfercnt_0, 0x00); + + curr_phz = RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ; + + WRW_HARPOON((p_port + hp_intstat), XFER_CNT_0); + + WR_HARPOON(p_port + hp_scsisig, curr_phz); + + while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET)) && + (curr_phz == + (RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ))) + { + if (curr_phz & (unsigned char)SCSI_IOBIT) { + WR_HARPOON(p_port + hp_portctrl_0, + (SCSI_PORT | HOST_PORT | SCSI_INBIT)); + + if (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) { + RD_HARPOON(p_port + hp_fifodata_0); + } + } else { + WR_HARPOON(p_port + hp_portctrl_0, + (SCSI_PORT | HOST_PORT | HOST_WRT)); + if (RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY) { + WR_HARPOON(p_port + hp_fifodata_0, 0xFA); + } + } + } /* End of While loop for padding data I/O phase */ + + while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) { + if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ) + break; + } + + WR_HARPOON(p_port + hp_portctrl_0, + (SCSI_PORT | HOST_PORT | SCSI_INBIT)); + while (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) { + RD_HARPOON(p_port + hp_fifodata_0); + } + + if (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) { + WR_HARPOON(p_port + hp_autostart_0, + (AUTO_IMMED + DISCONNECT_START)); + while (!(RDW_HARPOON((p_port + hp_intstat)) & AUTO_INT)) { + } + + if (RDW_HARPOON((p_port + hp_intstat)) & + (ICMD_COMP | ITAR_DISC)) + while (! + (RDW_HARPOON((p_port + hp_intstat)) & + (BUS_FREE | RSEL))) ; + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_schkdd + * + * Description: Make sure data has been flushed from both FIFOs and abort + * the operations if necessary. + * + *---------------------------------------------------------------------*/ + +static void FPT_schkdd(u32 port, unsigned char p_card) +{ + unsigned short TimeOutLoop; + unsigned char sPhase; + + struct sccb *currSCCB; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + if ((currSCCB->Sccb_scsistat != DATA_OUT_ST) && + (currSCCB->Sccb_scsistat != DATA_IN_ST)) { + return; + } + + if (currSCCB->Sccb_XferState & F_ODD_BALL_CNT) { + + currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - 1); + + currSCCB->Sccb_XferCnt = 1; + + currSCCB->Sccb_XferState &= ~F_ODD_BALL_CNT; + WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00); + WR_HARPOON(port + hp_xferstat, 0x00); + } + + else { + + currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt; + + currSCCB->Sccb_XferCnt = 0; + } + + if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && + (currSCCB->HostStatus == SCCB_COMPLETE)) { + + currSCCB->HostStatus = SCCB_PARITY_ERR; + WRW_HARPOON((port + hp_intstat), PARITY); + } + + FPT_hostDataXferAbort(port, p_card, currSCCB); + + while (RD_HARPOON(port + hp_scsisig) & SCSI_ACK) { + } + + TimeOutLoop = 0; + + while (RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY) { + if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { + return; + } + if (RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) { + break; + } + if (RDW_HARPOON((port + hp_intstat)) & RESET) { + return; + } + if ((RD_HARPOON(port + hp_scsisig) & SCSI_REQ) + || (TimeOutLoop++ > 0x3000)) + break; + } + + sPhase = RD_HARPOON(port + hp_scsisig) & (SCSI_BSY | S_SCSI_PHZ); + if ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) || + (RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) || + (sPhase == (SCSI_BSY | S_DATAO_PH)) || + (sPhase == (SCSI_BSY | S_DATAI_PH))) { + + WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); + + if (!(currSCCB->Sccb_XferState & F_ALL_XFERRED)) { + if (currSCCB->Sccb_XferState & F_HOST_XFER_DIR) { + FPT_phaseDataIn(port, p_card); + } + + else { + FPT_phaseDataOut(port, p_card); + } + } else { + FPT_sxfrp(port, p_card); + if (!(RDW_HARPOON((port + hp_intstat)) & + (BUS_FREE | ICMD_COMP | ITAR_DISC | RESET))) { + WRW_HARPOON((port + hp_intstat), AUTO_INT); + FPT_phaseDecode(port, p_card); + } + } + + } + + else { + WR_HARPOON(port + hp_portctrl_0, 0x00); + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sinits + * + * Description: Setup SCCB manager fields in this SCCB. + * + *---------------------------------------------------------------------*/ + +static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card) +{ + struct sccb_mgr_tar_info *currTar_Info; + + if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) { + return; + } + currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; + + p_sccb->Sccb_XferState = 0x00; + p_sccb->Sccb_XferCnt = p_sccb->DataLength; + + if ((p_sccb->OperationCode == SCATTER_GATHER_COMMAND) || + (p_sccb->OperationCode == RESIDUAL_SG_COMMAND)) { + + p_sccb->Sccb_SGoffset = 0; + p_sccb->Sccb_XferState = F_SG_XFER; + p_sccb->Sccb_XferCnt = 0x00; + } + + if (p_sccb->DataLength == 0x00) + + p_sccb->Sccb_XferState |= F_ALL_XFERRED; + + if (p_sccb->ControlByte & F_USE_CMD_Q) { + if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT) + p_sccb->ControlByte &= ~F_USE_CMD_Q; + + else + currTar_Info->TarStatus |= TAG_Q_TRYING; + } + +/* For !single SCSI device in system & device allow Disconnect + or command is tag_q type then send Cmd with Disconnect Enable + else send Cmd with Disconnect Disable */ + +/* + if (((!(FPT_BL_Card[p_card].globalFlags & F_SINGLE_DEVICE)) && + (currTar_Info->TarStatus & TAR_ALLOW_DISC)) || + (currTar_Info->TarStatus & TAG_Q_TRYING)) { +*/ + if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) || + (currTar_Info->TarStatus & TAG_Q_TRYING)) { + p_sccb->Sccb_idmsg = IDENTIFY(true, p_sccb->Lun); + } else { + p_sccb->Sccb_idmsg = IDENTIFY(false, p_sccb->Lun); + } + + p_sccb->HostStatus = 0x00; + p_sccb->TargetStatus = 0x00; + p_sccb->Sccb_tag = 0x00; + p_sccb->Sccb_MGRFlags = 0x00; + p_sccb->Sccb_sgseg = 0x00; + p_sccb->Sccb_ATC = 0x00; + p_sccb->Sccb_savedATC = 0x00; +/* + p_sccb->SccbVirtDataPtr = 0x00; + p_sccb->Sccb_forwardlink = NULL; + p_sccb->Sccb_backlink = NULL; + */ + p_sccb->Sccb_scsistat = BUS_FREE_ST; + p_sccb->SccbStatus = SCCB_IN_PROCESS; + p_sccb->Sccb_scsimsg = NOP; + +} + +/*--------------------------------------------------------------------- + * + * Function: Phase Decode + * + * Description: Determine the phase and call the appropriate function. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseDecode(u32 p_port, unsigned char p_card) +{ + unsigned char phase_ref; + void (*phase) (u32, unsigned char); + + DISABLE_AUTO(p_port); + + phase_ref = + (unsigned char)(RD_HARPOON(p_port + hp_scsisig) & S_SCSI_PHZ); + + phase = FPT_s_PhaseTbl[phase_ref]; + + (*phase) (p_port, p_card); /* Call the correct phase func */ +} + +/*--------------------------------------------------------------------- + * + * Function: Data Out Phase + * + * Description: Start up both the BusMaster and Xbow. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseDataOut(u32 port, unsigned char p_card) +{ + + struct sccb *currSCCB; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + if (currSCCB == NULL) { + return; /* Exit if No SCCB record */ + } + + currSCCB->Sccb_scsistat = DATA_OUT_ST; + currSCCB->Sccb_XferState &= ~(F_HOST_XFER_DIR | F_NO_DATA_YET); + + WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); + + WRW_HARPOON((port + hp_intstat), XFER_CNT_0); + + WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START)); + + FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]); + + if (currSCCB->Sccb_XferCnt == 0) { + + if ((currSCCB->ControlByte & SCCB_DATA_XFER_OUT) && + (currSCCB->HostStatus == SCCB_COMPLETE)) + currSCCB->HostStatus = SCCB_DATA_OVER_RUN; + + FPT_sxfrp(port, p_card); + if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET))) + FPT_phaseDecode(port, p_card); + } +} + +/*--------------------------------------------------------------------- + * + * Function: Data In Phase + * + * Description: Startup the BusMaster and the XBOW. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseDataIn(u32 port, unsigned char p_card) +{ + + struct sccb *currSCCB; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + if (currSCCB == NULL) { + return; /* Exit if No SCCB record */ + } + + currSCCB->Sccb_scsistat = DATA_IN_ST; + currSCCB->Sccb_XferState |= F_HOST_XFER_DIR; + currSCCB->Sccb_XferState &= ~F_NO_DATA_YET; + + WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); + + WRW_HARPOON((port + hp_intstat), XFER_CNT_0); + + WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START)); + + FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]); + + if (currSCCB->Sccb_XferCnt == 0) { + + if ((currSCCB->ControlByte & SCCB_DATA_XFER_IN) && + (currSCCB->HostStatus == SCCB_COMPLETE)) + currSCCB->HostStatus = SCCB_DATA_OVER_RUN; + + FPT_sxfrp(port, p_card); + if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET))) + FPT_phaseDecode(port, p_card); + + } +} + +/*--------------------------------------------------------------------- + * + * Function: Command Phase + * + * Description: Load the CDB into the automation and start it up. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseCommand(u32 p_port, unsigned char p_card) +{ + struct sccb *currSCCB; + u32 cdb_reg; + unsigned char i; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + if (currSCCB->OperationCode == RESET_COMMAND) { + + currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; + currSCCB->CdbLength = SIX_BYTE_CMD; + } + + WR_HARPOON(p_port + hp_scsisig, 0x00); + + ARAM_ACCESS(p_port); + + cdb_reg = p_port + CMD_STRT; + + for (i = 0; i < currSCCB->CdbLength; i++) { + + if (currSCCB->OperationCode == RESET_COMMAND) + + WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + 0x00)); + + else + WRW_HARPOON(cdb_reg, + (MPM_OP + ACOMMAND + currSCCB->Cdb[i])); + cdb_reg += 2; + } + + if (currSCCB->CdbLength != TWELVE_BYTE_CMD) + WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP)); + + WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT)); + + currSCCB->Sccb_scsistat = COMMAND_ST; + + WR_HARPOON(p_port + hp_autostart_3, (AUTO_IMMED | CMD_ONLY_STRT)); + SGRAM_ACCESS(p_port); +} + +/*--------------------------------------------------------------------- + * + * Function: Status phase + * + * Description: Bring in the status and command complete message bytes + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseStatus(u32 port, unsigned char p_card) +{ + /* Start-up the automation to finish off this command and let the + isr handle the interrupt for command complete when it comes in. + We could wait here for the interrupt to be generated? + */ + + WR_HARPOON(port + hp_scsisig, 0x00); + + WR_HARPOON(port + hp_autostart_0, (AUTO_IMMED + END_DATA_START)); +} + +/*--------------------------------------------------------------------- + * + * Function: Phase Message Out + * + * Description: Send out our message (if we have one) and handle whatever + * else is involed. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseMsgOut(u32 port, unsigned char p_card) +{ + unsigned char message, scsiID; + struct sccb *currSCCB; + struct sccb_mgr_tar_info *currTar_Info; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + if (currSCCB != NULL) { + + message = currSCCB->Sccb_scsimsg; + scsiID = currSCCB->TargID; + + if (message == TARGET_RESET) { + + currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID]; + currTar_Info->TarSyncCtrl = 0; + FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info); + + if (FPT_sccbMgrTbl[p_card][scsiID]. + TarEEValue & EE_SYNC_MASK) { + + FPT_sccbMgrTbl[p_card][scsiID].TarStatus &= + ~TAR_SYNC_MASK; + + } + + if (FPT_sccbMgrTbl[p_card][scsiID]. + TarEEValue & EE_WIDE_SCSI) { + + FPT_sccbMgrTbl[p_card][scsiID].TarStatus &= + ~TAR_WIDE_MASK; + } + + FPT_queueFlushSccb(p_card, SCCB_COMPLETE); + FPT_SccbMgrTableInitTarget(p_card, scsiID); + } else if (currSCCB->Sccb_scsistat == ABORT_ST) { + currSCCB->HostStatus = SCCB_COMPLETE; + if (FPT_BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] != + NULL) { + FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> + Sccb_tag] = NULL; + FPT_sccbMgrTbl[p_card][scsiID].TarTagQ_Cnt--; + } + + } + + else if (currSCCB->Sccb_scsistat < COMMAND_ST) { + + if (message == NOP) { + currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED; + + FPT_ssel(port, p_card); + return; + } + } else { + + if (message == ABORT_TASK_SET) + + FPT_queueFlushSccb(p_card, SCCB_COMPLETE); + } + + } else { + message = ABORT_TASK_SET; + } + + WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0)); + + WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN); + + WR_HARPOON(port + hp_scsidata_0, message); + + WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); + + ACCEPT_MSG(port); + + WR_HARPOON(port + hp_portctrl_0, 0x00); + + if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) || + (message == ABORT_TASK)) { + + while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) { + } + + if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { + WRW_HARPOON((port + hp_intstat), BUS_FREE); + + if (currSCCB != NULL) { + + if ((FPT_BL_Card[p_card]. + globalFlags & F_CONLUN_IO) + && + ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != + TAG_Q_TRYING)) + FPT_sccbMgrTbl[p_card][currSCCB-> + TargID]. + TarLUNBusy[currSCCB->Lun] = 0; + else + FPT_sccbMgrTbl[p_card][currSCCB-> + TargID]. + TarLUNBusy[0] = 0; + + FPT_queueCmdComplete(&FPT_BL_Card[p_card], + currSCCB, p_card); + } + + else { + FPT_BL_Card[p_card].globalFlags |= + F_NEW_SCCB_CMD; + } + } + + else { + + FPT_sxfrp(port, p_card); + } + } + + else { + + if (message == MSG_PARITY_ERROR) { + currSCCB->Sccb_scsimsg = NOP; + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } else { + FPT_sxfrp(port, p_card); + } + } +} + +/*--------------------------------------------------------------------- + * + * Function: Message In phase + * + * Description: Bring in the message and determine what to do with it. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseMsgIn(u32 port, unsigned char p_card) +{ + unsigned char message; + struct sccb *currSCCB; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) { + + FPT_phaseChkFifo(port, p_card); + } + + message = RD_HARPOON(port + hp_scsidata_0); + if ((message == DISCONNECT) || (message == SAVE_POINTERS)) { + + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + END_DATA_START)); + + } + + else { + + message = FPT_sfm(port, currSCCB); + if (message) { + + FPT_sdecm(message, port, p_card); + + } else { + if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR) + ACCEPT_MSG(port); + WR_HARPOON(port + hp_autostart_1, + (AUTO_IMMED + DISCONNECT_START)); + } + } + +} + +/*--------------------------------------------------------------------- + * + * Function: Illegal phase + * + * Description: Target switched to some illegal phase, so all we can do + * is report an error back to the host (if that is possible) + * and send an ABORT message to the misbehaving target. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseIllegal(u32 port, unsigned char p_card) +{ + struct sccb *currSCCB; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + WR_HARPOON(port + hp_scsisig, RD_HARPOON(port + hp_scsisig)); + if (currSCCB != NULL) { + + currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; + currSCCB->Sccb_scsistat = ABORT_ST; + currSCCB->Sccb_scsimsg = ABORT_TASK_SET; + } + + ACCEPT_MSG_ATN(port); +} + +/*--------------------------------------------------------------------- + * + * Function: Phase Check FIFO + * + * Description: Make sure data has been flushed from both FIFOs and abort + * the operations if necessary. + * + *---------------------------------------------------------------------*/ + +static void FPT_phaseChkFifo(u32 port, unsigned char p_card) +{ + u32 xfercnt; + struct sccb *currSCCB; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + if (currSCCB->Sccb_scsistat == DATA_IN_ST) { + + while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) && + (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)) { + } + + if (!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) { + currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt; + + currSCCB->Sccb_XferCnt = 0; + + if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && + (currSCCB->HostStatus == SCCB_COMPLETE)) { + currSCCB->HostStatus = SCCB_PARITY_ERR; + WRW_HARPOON((port + hp_intstat), PARITY); + } + + FPT_hostDataXferAbort(port, p_card, currSCCB); + + FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]); + + while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) + && (RD_HARPOON(port + hp_ext_status) & + BM_CMD_BUSY)) { + } + + } + } + + /*End Data In specific code. */ + GET_XFER_CNT(port, xfercnt); + + WR_HARPOON(port + hp_xfercnt_0, 0x00); + + WR_HARPOON(port + hp_portctrl_0, 0x00); + + currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - xfercnt); + + currSCCB->Sccb_XferCnt = xfercnt; + + if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && + (currSCCB->HostStatus == SCCB_COMPLETE)) { + + currSCCB->HostStatus = SCCB_PARITY_ERR; + WRW_HARPOON((port + hp_intstat), PARITY); + } + + FPT_hostDataXferAbort(port, p_card, currSCCB); + + WR_HARPOON(port + hp_fifowrite, 0x00); + WR_HARPOON(port + hp_fiforead, 0x00); + WR_HARPOON(port + hp_xferstat, 0x00); + + WRW_HARPOON((port + hp_intstat), XFER_CNT_0); +} + +/*--------------------------------------------------------------------- + * + * Function: Phase Bus Free + * + * Description: We just went bus free so figure out if it was + * because of command complete or from a disconnect. + * + *---------------------------------------------------------------------*/ +static void FPT_phaseBusFree(u32 port, unsigned char p_card) +{ + struct sccb *currSCCB; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + if (currSCCB != NULL) { + + DISABLE_AUTO(port); + + if (currSCCB->OperationCode == RESET_COMMAND) { + + if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && + ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[currSCCB->Lun] = 0; + else + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[0] = 0; + + FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, + p_card); + + FPT_queueSearchSelect(&FPT_BL_Card[p_card], p_card); + + } + + else if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |= + (unsigned char)SYNC_SUPPORTED; + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= + ~EE_SYNC_MASK; + } + + else if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus = + (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED; + + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= + ~EE_WIDE_SCSI; + } + + else if (currSCCB->Sccb_scsistat == SELECT_Q_ST) { + /* Make sure this is not a phony BUS_FREE. If we were + reselected or if BUSY is NOT on then this is a + valid BUS FREE. SRR Wednesday, 5/10/1995. */ + + if ((!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) || + (RDW_HARPOON((port + hp_intstat)) & RSEL)) { + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus &= ~TAR_TAG_Q_MASK; + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus |= TAG_Q_REJECT; + } + + else { + return; + } + } + + else { + + currSCCB->Sccb_scsistat = BUS_FREE_ST; + + if (!currSCCB->HostStatus) { + currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; + } + + if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && + ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[currSCCB->Lun] = 0; + else + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[0] = 0; + + FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, + p_card); + return; + } + + FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; + + } /*end if !=null */ +} + +/*--------------------------------------------------------------------- + * + * Function: Auto Load Default Map + * + * Description: Load the Automation RAM with the default map values. + * + *---------------------------------------------------------------------*/ +static void FPT_autoLoadDefaultMap(u32 p_port) +{ + u32 map_addr; + + ARAM_ACCESS(p_port); + map_addr = p_port + hp_aramBase; + + WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0xC0)); /*ID MESSAGE */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x20)); /*SIMPLE TAG QUEUEING MSG */ + map_addr += 2; + WRW_HARPOON(map_addr, RAT_OP); /*RESET ATTENTION */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x00)); /*TAG ID MSG */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 0 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 1 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 2 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 3 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 4 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 5 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 6 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 7 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 8 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 9 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 10 */ + map_addr += 2; + WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 11 */ + map_addr += 2; + WRW_HARPOON(map_addr, (CPE_OP + ADATA_OUT + DINT)); /*JUMP IF DATA OUT */ + map_addr += 2; + WRW_HARPOON(map_addr, (TCB_OP + FIFO_0 + DI)); /*JUMP IF NO DATA IN FIFO */ + map_addr += 2; /*This means AYNC DATA IN */ + WRW_HARPOON(map_addr, (SSI_OP + SSI_IDO_STRT)); /*STOP AND INTERRUPT */ + map_addr += 2; + WRW_HARPOON(map_addr, (CPE_OP + ADATA_IN + DINT)); /*JUMP IF NOT DATA IN PHZ */ + map_addr += 2; + WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK 4 DATA IN */ + map_addr += 2; + WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x02)); /*SAVE DATA PTR MSG? */ + map_addr += 2; + WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + DC)); /*GO CHECK FOR DISCONNECT MSG */ + map_addr += 2; + WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR1)); /*SAVE DATA PTRS MSG */ + map_addr += 2; + WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK DATA IN */ + map_addr += 2; + WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x04)); /*DISCONNECT MSG? */ + map_addr += 2; + WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + UNKNWN)); /*UKNKNOWN MSG */ + map_addr += 2; + WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*XFER DISCONNECT MSG */ + map_addr += 2; + WRW_HARPOON(map_addr, (SSI_OP + SSI_ITAR_DISC)); /*STOP AND INTERRUPT */ + map_addr += 2; + WRW_HARPOON(map_addr, (CPN_OP + ASTATUS + UNKNWN)); /*JUMP IF NOT STATUS PHZ. */ + map_addr += 2; + WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR0)); /*GET STATUS BYTE */ + map_addr += 2; + WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + CC)); /*ERROR IF NOT MSG IN PHZ */ + map_addr += 2; + WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x00)); /*CHECK FOR CMD COMPLETE MSG. */ + map_addr += 2; + WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + CC)); /*ERROR IF NOT CMD COMPLETE MSG. */ + map_addr += 2; + WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*GET CMD COMPLETE MSG */ + map_addr += 2; + WRW_HARPOON(map_addr, (SSI_OP + SSI_ICMD_COMP)); /*END OF COMMAND */ + map_addr += 2; + + WRW_HARPOON(map_addr, (SSI_OP + SSI_IUNKWN)); /*RECEIVED UNKNOWN MSG BYTE */ + map_addr += 2; + WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */ + map_addr += 2; + WRW_HARPOON(map_addr, (SSI_OP + SSI_ITICKLE)); /*BIOS Tickled the Mgr */ + map_addr += 2; + WRW_HARPOON(map_addr, (SSI_OP + SSI_IRFAIL)); /*EXPECTED ID/TAG MESSAGES AND */ + map_addr += 2; /* DIDN'T GET ONE */ + WRW_HARPOON(map_addr, (CRR_OP + AR3 + S_IDREG)); /* comp SCSI SEL ID & AR3 */ + map_addr += 2; + WRW_HARPOON(map_addr, (BRH_OP + EQUAL + 0x00)); /*SEL ID OK then Conti. */ + map_addr += 2; + WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */ + + SGRAM_ACCESS(p_port); +} + +/*--------------------------------------------------------------------- + * + * Function: Auto Command Complete + * + * Description: Post command back to host and find another command + * to execute. + * + *---------------------------------------------------------------------*/ + +static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card) +{ + struct sccb *currSCCB; + unsigned char status_byte; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + + status_byte = RD_HARPOON(p_port + hp_gp_reg_0); + + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0; + + if (status_byte != SAM_STAT_GOOD) { + + if (status_byte == SAM_STAT_TASK_SET_FULL) { + + if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && + ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[currSCCB->Lun] = 1; + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card].discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + LunDiscQ_Idx[currSCCB->Lun]] = + NULL; + } else { + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[0] = 1; + if (currSCCB->Sccb_tag) { + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card]. + discQCount--; + FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> + Sccb_tag] + = NULL; + } else { + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card]. + discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + LunDiscQ_Idx[0]] = NULL; + } + } + + currSCCB->Sccb_MGRFlags |= F_STATUSLOADED; + + FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card); + + return; + } + + if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |= + (unsigned char)SYNC_SUPPORTED; + + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= + ~EE_SYNC_MASK; + FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; + + if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && + ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[currSCCB->Lun] = 1; + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card].discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + LunDiscQ_Idx[currSCCB->Lun]] = + NULL; + } else { + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[0] = 1; + if (currSCCB->Sccb_tag) { + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card]. + discQCount--; + FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> + Sccb_tag] + = NULL; + } else { + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card]. + discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + LunDiscQ_Idx[0]] = NULL; + } + } + return; + + } + + if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { + + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus = + (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED; + + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= + ~EE_WIDE_SCSI; + FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; + + if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && + ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[currSCCB->Lun] = 1; + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card].discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + LunDiscQ_Idx[currSCCB->Lun]] = + NULL; + } else { + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUNBusy[0] = 1; + if (currSCCB->Sccb_tag) { + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card]. + discQCount--; + FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> + Sccb_tag] + = NULL; + } else { + if (FPT_BL_Card[p_card].discQCount != 0) + FPT_BL_Card[p_card]. + discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + LunDiscQ_Idx[0]] = NULL; + } + } + return; + + } + + if (status_byte == SAM_STAT_CHECK_CONDITION) { + if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) { + if (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarEEValue & EE_SYNC_MASK) { + FPT_sccbMgrTbl[p_card][currSCCB-> + TargID]. + TarStatus &= ~TAR_SYNC_MASK; + } + if (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarEEValue & EE_WIDE_SCSI) { + FPT_sccbMgrTbl[p_card][currSCCB-> + TargID]. + TarStatus &= ~TAR_WIDE_MASK; + } + } + } + + if (!(currSCCB->Sccb_XferState & F_AUTO_SENSE)) { + + currSCCB->SccbStatus = SCCB_ERROR; + currSCCB->TargetStatus = status_byte; + + if (status_byte == SAM_STAT_CHECK_CONDITION) { + + FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarLUN_CA = 1; + + if (currSCCB->RequestSenseLength != + NO_AUTO_REQUEST_SENSE) { + + if (currSCCB->RequestSenseLength == 0) + currSCCB->RequestSenseLength = + 14; + + FPT_ssenss(&FPT_BL_Card[p_card]); + FPT_BL_Card[p_card].globalFlags |= + F_NEW_SCCB_CMD; + + if (((FPT_BL_Card[p_card]. + globalFlags & F_CONLUN_IO) + && + ((FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != + TAG_Q_TRYING))) { + FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + TarLUNBusy[currSCCB->Lun] = + 1; + if (FPT_BL_Card[p_card]. + discQCount != 0) + FPT_BL_Card[p_card]. + discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[FPT_sccbMgrTbl + [p_card] + [currSCCB-> + TargID]. + LunDiscQ_Idx + [currSCCB->Lun]] = + NULL; + } else { + FPT_sccbMgrTbl[p_card] + [currSCCB->TargID]. + TarLUNBusy[0] = 1; + if (currSCCB->Sccb_tag) { + if (FPT_BL_Card[p_card]. + discQCount != 0) + FPT_BL_Card + [p_card]. + discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl[currSCCB-> + Sccb_tag] + = NULL; + } else { + if (FPT_BL_Card[p_card]. + discQCount != 0) + FPT_BL_Card + [p_card]. + discQCount--; + FPT_BL_Card[p_card]. + discQ_Tbl + [FPT_sccbMgrTbl + [p_card][currSCCB-> + TargID]. + LunDiscQ_Idx[0]] = + NULL; + } + } + return; + } + } + } + } + + if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && + ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. + TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB-> + Lun] = 0; + else + FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = 0; + + FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, p_card); +} + +#define SHORT_WAIT 0x0000000F +#define LONG_WAIT 0x0000FFFFL + +/*--------------------------------------------------------------------- + * + * Function: Data Transfer Processor + * + * Description: This routine performs two tasks. + * (1) Start data transfer by calling HOST_DATA_XFER_START + * function. Once data transfer is started, (2) Depends + * on the type of data transfer mode Scatter/Gather mode + * or NON Scatter/Gather mode. In NON Scatter/Gather mode, + * this routine checks Sccb_MGRFlag (F_HOST_XFER_ACT bit) for + * data transfer done. In Scatter/Gather mode, this routine + * checks bus master command complete and dual rank busy + * bit to keep chaining SC transfer command. Similarly, + * in Scatter/Gather mode, it checks Sccb_MGRFlag + * (F_HOST_XFER_ACT bit) for data transfer done. + * + *---------------------------------------------------------------------*/ + +static void FPT_dataXferProcessor(u32 port, struct sccb_card *pCurrCard) +{ + struct sccb *currSCCB; + + currSCCB = pCurrCard->currentSCCB; + + if (currSCCB->Sccb_XferState & F_SG_XFER) { + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) + { + currSCCB->Sccb_sgseg += (unsigned char)SG_BUF_CNT; + currSCCB->Sccb_SGoffset = 0x00; + } + pCurrCard->globalFlags |= F_HOST_XFER_ACT; + + FPT_busMstrSGDataXferStart(port, currSCCB); + } + + else { + if (!(pCurrCard->globalFlags & F_HOST_XFER_ACT)) { + pCurrCard->globalFlags |= F_HOST_XFER_ACT; + + FPT_busMstrDataXferStart(port, currSCCB); + } + } +} + +/*--------------------------------------------------------------------- + * + * Function: BusMaster Scatter Gather Data Transfer Start + * + * Description: + * + *---------------------------------------------------------------------*/ +static void FPT_busMstrSGDataXferStart(u32 p_port, struct sccb *pcurrSCCB) +{ + u32 count, addr, tmpSGCnt; + unsigned int sg_index; + unsigned char sg_count, i; + u32 reg_offset; + struct blogic_sg_seg *segp; + + if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) + count = ((u32)HOST_RD_CMD) << 24; + else + count = ((u32)HOST_WRT_CMD) << 24; + + sg_count = 0; + tmpSGCnt = 0; + sg_index = pcurrSCCB->Sccb_sgseg; + reg_offset = hp_aramBase; + + i = (unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) & + ~(SGRAM_ARAM | SCATTER_EN)); + + WR_HARPOON(p_port + hp_page_ctrl, i); + + while ((sg_count < (unsigned char)SG_BUF_CNT) && + ((sg_index * (unsigned int)SG_ELEMENT_SIZE) < + pcurrSCCB->DataLength)) { + + segp = (struct blogic_sg_seg *)(pcurrSCCB->DataPointer) + + sg_index; + tmpSGCnt += segp->segbytes; + count |= segp->segbytes; + addr = segp->segdata; + + if ((!sg_count) && (pcurrSCCB->Sccb_SGoffset)) { + addr += + ((count & 0x00FFFFFFL) - pcurrSCCB->Sccb_SGoffset); + count = + (count & 0xFF000000L) | pcurrSCCB->Sccb_SGoffset; + tmpSGCnt = count & 0x00FFFFFFL; + } + + WR_HARP32(p_port, reg_offset, addr); + reg_offset += 4; + + WR_HARP32(p_port, reg_offset, count); + reg_offset += 4; + + count &= 0xFF000000L; + sg_index++; + sg_count++; + + } /*End While */ + + pcurrSCCB->Sccb_XferCnt = tmpSGCnt; + + WR_HARPOON(p_port + hp_sg_addr, (sg_count << 4)); + + if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) { + + WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt); + + WR_HARPOON(p_port + hp_portctrl_0, + (DMA_PORT | SCSI_PORT | SCSI_INBIT)); + WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH); + } + + else { + + if ((!(RD_HARPOON(p_port + hp_synctarg_0) & NARROW_SCSI)) && + (tmpSGCnt & 0x000000001)) { + + pcurrSCCB->Sccb_XferState |= F_ODD_BALL_CNT; + tmpSGCnt--; + } + + WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt); + + WR_HARPOON(p_port + hp_portctrl_0, + (SCSI_PORT | DMA_PORT | DMA_RD)); + WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH); + } + + WR_HARPOON(p_port + hp_page_ctrl, (unsigned char)(i | SCATTER_EN)); + +} + +/*--------------------------------------------------------------------- + * + * Function: BusMaster Data Transfer Start + * + * Description: + * + *---------------------------------------------------------------------*/ +static void FPT_busMstrDataXferStart(u32 p_port, struct sccb *pcurrSCCB) +{ + u32 addr, count; + + if (!(pcurrSCCB->Sccb_XferState & F_AUTO_SENSE)) { + + count = pcurrSCCB->Sccb_XferCnt; + + addr = (u32)(unsigned long)pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC; + } + + else { + addr = pcurrSCCB->SensePointer; + count = pcurrSCCB->RequestSenseLength; + + } + + HP_SETUP_ADDR_CNT(p_port, addr, count); + + if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) { + + WR_HARPOON(p_port + hp_portctrl_0, + (DMA_PORT | SCSI_PORT | SCSI_INBIT)); + WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH); + + WR_HARPOON(p_port + hp_xfer_cmd, + (XFER_DMA_HOST | XFER_HOST_AUTO | XFER_DMA_8BIT)); + } + + else { + + WR_HARPOON(p_port + hp_portctrl_0, + (SCSI_PORT | DMA_PORT | DMA_RD)); + WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH); + + WR_HARPOON(p_port + hp_xfer_cmd, + (XFER_HOST_DMA | XFER_HOST_AUTO | XFER_DMA_8BIT)); + + } +} + +/*--------------------------------------------------------------------- + * + * Function: BusMaster Timeout Handler + * + * Description: This function is called after a bus master command busy time + * out is detected. This routines issue halt state machine + * with a software time out for command busy. If command busy + * is still asserted at the end of the time out, it issues + * hard abort with another software time out. It hard abort + * command busy is also time out, it'll just give up. + * + *---------------------------------------------------------------------*/ +static unsigned char FPT_busMstrTimeOut(u32 p_port) +{ + unsigned long timeout; + + timeout = LONG_WAIT; + + WR_HARPOON(p_port + hp_sys_ctrl, HALT_MACH); + + while ((!(RD_HARPOON(p_port + hp_ext_status) & CMD_ABORTED)) + && timeout--) { + } + + if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) { + WR_HARPOON(p_port + hp_sys_ctrl, HARD_ABORT); + + timeout = LONG_WAIT; + while ((RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) + && timeout--) { + } + } + + RD_HARPOON(p_port + hp_int_status); /*Clear command complete */ + + if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) { + return 1; + } + + else { + return 0; + } +} + +/*--------------------------------------------------------------------- + * + * Function: Host Data Transfer Abort + * + * Description: Abort any in progress transfer. + * + *---------------------------------------------------------------------*/ +static void FPT_hostDataXferAbort(u32 port, unsigned char p_card, + struct sccb *pCurrSCCB) +{ + + unsigned long timeout; + unsigned long remain_cnt; + u32 sg_ptr; + struct blogic_sg_seg *segp; + + FPT_BL_Card[p_card].globalFlags &= ~F_HOST_XFER_ACT; + + if (pCurrSCCB->Sccb_XferState & F_AUTO_SENSE) { + + if (!(RD_HARPOON(port + hp_int_status) & INT_CMD_COMPL)) { + + WR_HARPOON(port + hp_bm_ctrl, + (RD_HARPOON(port + hp_bm_ctrl) | + FLUSH_XFER_CNTR)); + timeout = LONG_WAIT; + + while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) + && timeout--) { + } + + WR_HARPOON(port + hp_bm_ctrl, + (RD_HARPOON(port + hp_bm_ctrl) & + ~FLUSH_XFER_CNTR)); + + if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { + + if (FPT_busMstrTimeOut(port)) { + + if (pCurrSCCB->HostStatus == 0x00) + + pCurrSCCB->HostStatus = + SCCB_BM_ERR; + + } + + if (RD_HARPOON(port + hp_int_status) & + INT_EXT_STATUS) + + if (RD_HARPOON(port + hp_ext_status) & + BAD_EXT_STATUS) + + if (pCurrSCCB->HostStatus == + 0x00) + { + pCurrSCCB->HostStatus = + SCCB_BM_ERR; + } + } + } + } + + else if (pCurrSCCB->Sccb_XferCnt) { + + if (pCurrSCCB->Sccb_XferState & F_SG_XFER) { + + WR_HARPOON(port + hp_page_ctrl, + (RD_HARPOON(port + hp_page_ctrl) & + ~SCATTER_EN)); + + WR_HARPOON(port + hp_sg_addr, 0x00); + + sg_ptr = pCurrSCCB->Sccb_sgseg + SG_BUF_CNT; + + if (sg_ptr > + (unsigned int)(pCurrSCCB->DataLength / + SG_ELEMENT_SIZE)) { + + sg_ptr = (u32)(pCurrSCCB->DataLength / + SG_ELEMENT_SIZE); + } + + remain_cnt = pCurrSCCB->Sccb_XferCnt; + + while (remain_cnt < 0x01000000L) { + + sg_ptr--; + segp = (struct blogic_sg_seg *)(pCurrSCCB-> + DataPointer) + (sg_ptr * 2); + if (remain_cnt > (unsigned long)segp->segbytes) + remain_cnt -= + (unsigned long)segp->segbytes; + else + break; + } + + if (remain_cnt < 0x01000000L) { + + pCurrSCCB->Sccb_SGoffset = remain_cnt; + + pCurrSCCB->Sccb_sgseg = (unsigned short)sg_ptr; + + if ((unsigned long)(sg_ptr * SG_ELEMENT_SIZE) == + pCurrSCCB->DataLength && (remain_cnt == 0)) + + pCurrSCCB->Sccb_XferState |= + F_ALL_XFERRED; + } + + else { + + if (pCurrSCCB->HostStatus == 0x00) { + + pCurrSCCB->HostStatus = + SCCB_GROSS_FW_ERR; + } + } + } + + if (!(pCurrSCCB->Sccb_XferState & F_HOST_XFER_DIR)) { + + if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { + + FPT_busMstrTimeOut(port); + } + + else { + + if (RD_HARPOON(port + hp_int_status) & + INT_EXT_STATUS) { + + if (RD_HARPOON(port + hp_ext_status) & + BAD_EXT_STATUS) { + + if (pCurrSCCB->HostStatus == + 0x00) { + + pCurrSCCB->HostStatus = + SCCB_BM_ERR; + } + } + } + + } + } + + else { + + if ((RD_HARPOON(port + hp_fifo_cnt)) >= BM_THRESHOLD) { + + timeout = SHORT_WAIT; + + while ((RD_HARPOON(port + hp_ext_status) & + BM_CMD_BUSY) + && ((RD_HARPOON(port + hp_fifo_cnt)) >= + BM_THRESHOLD) && timeout--) { + } + } + + if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { + + WR_HARPOON(port + hp_bm_ctrl, + (RD_HARPOON(port + hp_bm_ctrl) | + FLUSH_XFER_CNTR)); + + timeout = LONG_WAIT; + + while ((RD_HARPOON(port + hp_ext_status) & + BM_CMD_BUSY) && timeout--) { + } + + WR_HARPOON(port + hp_bm_ctrl, + (RD_HARPOON(port + hp_bm_ctrl) & + ~FLUSH_XFER_CNTR)); + + if (RD_HARPOON(port + hp_ext_status) & + BM_CMD_BUSY) { + + if (pCurrSCCB->HostStatus == 0x00) { + + pCurrSCCB->HostStatus = + SCCB_BM_ERR; + } + + FPT_busMstrTimeOut(port); + } + } + + if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) { + + if (RD_HARPOON(port + hp_ext_status) & + BAD_EXT_STATUS) { + + if (pCurrSCCB->HostStatus == 0x00) { + + pCurrSCCB->HostStatus = + SCCB_BM_ERR; + } + } + } + } + + } + + else { + + if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { + + timeout = LONG_WAIT; + + while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) + && timeout--) { + } + + if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { + + if (pCurrSCCB->HostStatus == 0x00) { + + pCurrSCCB->HostStatus = SCCB_BM_ERR; + } + + FPT_busMstrTimeOut(port); + } + } + + if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) { + + if (RD_HARPOON(port + hp_ext_status) & BAD_EXT_STATUS) { + + if (pCurrSCCB->HostStatus == 0x00) { + + pCurrSCCB->HostStatus = SCCB_BM_ERR; + } + } + + } + + if (pCurrSCCB->Sccb_XferState & F_SG_XFER) { + + WR_HARPOON(port + hp_page_ctrl, + (RD_HARPOON(port + hp_page_ctrl) & + ~SCATTER_EN)); + + WR_HARPOON(port + hp_sg_addr, 0x00); + + pCurrSCCB->Sccb_sgseg += SG_BUF_CNT; + + pCurrSCCB->Sccb_SGoffset = 0x00; + + if ((u32)(pCurrSCCB->Sccb_sgseg * SG_ELEMENT_SIZE) >= + pCurrSCCB->DataLength) { + + pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED; + pCurrSCCB->Sccb_sgseg = + (unsigned short)(pCurrSCCB->DataLength / + SG_ELEMENT_SIZE); + } + } + + else { + if (!(pCurrSCCB->Sccb_XferState & F_AUTO_SENSE)) + pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED; + } + } + + WR_HARPOON(port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT)); +} + +/*--------------------------------------------------------------------- + * + * Function: Host Data Transfer Restart + * + * Description: Reset the available count due to a restore data + * pointers message. + * + *---------------------------------------------------------------------*/ +static void FPT_hostDataXferRestart(struct sccb *currSCCB) +{ + unsigned long data_count; + unsigned int sg_index; + struct blogic_sg_seg *segp; + + if (currSCCB->Sccb_XferState & F_SG_XFER) { + + currSCCB->Sccb_XferCnt = 0; + + sg_index = 0xffff; /*Index by long words into sg list. */ + data_count = 0; /*Running count of SG xfer counts. */ + + + while (data_count < currSCCB->Sccb_ATC) { + + sg_index++; + segp = (struct blogic_sg_seg *)(currSCCB->DataPointer) + + (sg_index * 2); + data_count += segp->segbytes; + } + + if (data_count == currSCCB->Sccb_ATC) { + + currSCCB->Sccb_SGoffset = 0; + sg_index++; + } + + else { + currSCCB->Sccb_SGoffset = + data_count - currSCCB->Sccb_ATC; + } + + currSCCB->Sccb_sgseg = (unsigned short)sg_index; + } + + else { + currSCCB->Sccb_XferCnt = + currSCCB->DataLength - currSCCB->Sccb_ATC; + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scini + * + * Description: Setup all data structures necessary for SCAM selection. + * + *---------------------------------------------------------------------*/ + +static void FPT_scini(unsigned char p_card, unsigned char p_our_id, + unsigned char p_power_up) +{ + + unsigned char loser, assigned_id; + u32 p_port; + + unsigned char i, k, ScamFlg; + struct sccb_card *currCard; + struct nvram_info *pCurrNvRam; + + currCard = &FPT_BL_Card[p_card]; + p_port = currCard->ioPort; + pCurrNvRam = currCard->pNvRamInfo; + + if (pCurrNvRam) { + ScamFlg = pCurrNvRam->niScamConf; + i = pCurrNvRam->niSysConf; + } else { + ScamFlg = + (unsigned char)FPT_utilEERead(p_port, SCAM_CONFIG / 2); + i = (unsigned + char)(FPT_utilEERead(p_port, (SYSTEM_CONFIG / 2))); + } + if (!(i & 0x02)) /* check if reset bus in AutoSCSI parameter set */ + return; + + FPT_inisci(p_card, p_port, p_our_id); + + /* Force to wait 1 sec after SCSI bus reset. Some SCAM device FW + too slow to return to SCAM selection */ + + /* if (p_power_up) + FPT_Wait1Second(p_port); + else + FPT_Wait(p_port, TO_250ms); */ + + FPT_Wait1Second(p_port); + + if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2)) { + while (!(FPT_scarb(p_port, INIT_SELTD))) { + } + + FPT_scsel(p_port); + + do { + FPT_scxferc(p_port, SYNC_PTRN); + FPT_scxferc(p_port, DOM_MSTR); + loser = + FPT_scsendi(p_port, + &FPT_scamInfo[p_our_id].id_string[0]); + } while (loser == 0xFF); + + FPT_scbusf(p_port); + + if ((p_power_up) && (!loser)) { + FPT_sresb(p_port, p_card); + FPT_Wait(p_port, TO_250ms); + + while (!(FPT_scarb(p_port, INIT_SELTD))) { + } + + FPT_scsel(p_port); + + do { + FPT_scxferc(p_port, SYNC_PTRN); + FPT_scxferc(p_port, DOM_MSTR); + loser = + FPT_scsendi(p_port, + &FPT_scamInfo[p_our_id]. + id_string[0]); + } while (loser == 0xFF); + + FPT_scbusf(p_port); + } + } + + else { + loser = 0; + } + + if (!loser) { + + FPT_scamInfo[p_our_id].state = ID_ASSIGNED; + + if (ScamFlg & SCAM_ENABLED) { + + for (i = 0; i < MAX_SCSI_TAR; i++) { + if ((FPT_scamInfo[i].state == ID_UNASSIGNED) || + (FPT_scamInfo[i].state == ID_UNUSED)) { + if (FPT_scsell(p_port, i)) { + FPT_scamInfo[i].state = LEGACY; + if ((FPT_scamInfo[i]. + id_string[0] != 0xFF) + || (FPT_scamInfo[i]. + id_string[1] != 0xFA)) { + + FPT_scamInfo[i]. + id_string[0] = 0xFF; + FPT_scamInfo[i]. + id_string[1] = 0xFA; + if (pCurrNvRam == NULL) + currCard-> + globalFlags + |= + F_UPDATE_EEPROM; + } + } + } + } + + FPT_sresb(p_port, p_card); + FPT_Wait1Second(p_port); + while (!(FPT_scarb(p_port, INIT_SELTD))) { + } + FPT_scsel(p_port); + FPT_scasid(p_card, p_port); + } + + } + + else if ((loser) && (ScamFlg & SCAM_ENABLED)) { + FPT_scamInfo[p_our_id].id_string[0] = SLV_TYPE_CODE0; + assigned_id = 0; + FPT_scwtsel(p_port); + + do { + while (FPT_scxferc(p_port, 0x00) != SYNC_PTRN) { + } + + i = FPT_scxferc(p_port, 0x00); + if (i == ASSIGN_ID) { + if (! + (FPT_scsendi + (p_port, + &FPT_scamInfo[p_our_id].id_string[0]))) { + i = FPT_scxferc(p_port, 0x00); + if (FPT_scvalq(i)) { + k = FPT_scxferc(p_port, 0x00); + + if (FPT_scvalq(k)) { + currCard->ourId = + ((unsigned char)(i + << + 3) + + + (k & + (unsigned char)7)) + & (unsigned char) + 0x3F; + FPT_inisci(p_card, + p_port, + p_our_id); + FPT_scamInfo[currCard-> + ourId]. + state = ID_ASSIGNED; + FPT_scamInfo[currCard-> + ourId]. + id_string[0] + = SLV_TYPE_CODE0; + assigned_id = 1; + } + } + } + } + + else if (i == SET_P_FLAG) { + if (!(FPT_scsendi(p_port, + &FPT_scamInfo[p_our_id]. + id_string[0]))) + FPT_scamInfo[p_our_id].id_string[0] |= + 0x80; + } + } while (!assigned_id); + + while (FPT_scxferc(p_port, 0x00) != CFG_CMPLT) { + } + } + + if (ScamFlg & SCAM_ENABLED) { + FPT_scbusf(p_port); + if (currCard->globalFlags & F_UPDATE_EEPROM) { + FPT_scsavdi(p_card, p_port); + currCard->globalFlags &= ~F_UPDATE_EEPROM; + } + } + +/* + for (i=0,k=0; i < MAX_SCSI_TAR; i++) + { + if ((FPT_scamInfo[i].state == ID_ASSIGNED) || + (FPT_scamInfo[i].state == LEGACY)) + k++; + } + + if (k==2) + currCard->globalFlags |= F_SINGLE_DEVICE; + else + currCard->globalFlags &= ~F_SINGLE_DEVICE; +*/ +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scarb + * + * Description: Gain control of the bus and wait SCAM select time (250ms) + * + *---------------------------------------------------------------------*/ + +static int FPT_scarb(u32 p_port, unsigned char p_sel_type) +{ + if (p_sel_type == INIT_SELTD) { + + while (RD_HARPOON(p_port + hp_scsisig) & (SCSI_SEL | SCSI_BSY)) { + } + + if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL) + return 0; + + if (RD_HARPOON(p_port + hp_scsidata_0) != 00) + return 0; + + WR_HARPOON(p_port + hp_scsisig, + (RD_HARPOON(p_port + hp_scsisig) | SCSI_BSY)); + + if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL) { + + WR_HARPOON(p_port + hp_scsisig, + (RD_HARPOON(p_port + hp_scsisig) & + ~SCSI_BSY)); + return 0; + } + + WR_HARPOON(p_port + hp_scsisig, + (RD_HARPOON(p_port + hp_scsisig) | SCSI_SEL)); + + if (RD_HARPOON(p_port + hp_scsidata_0) != 00) { + + WR_HARPOON(p_port + hp_scsisig, + (RD_HARPOON(p_port + hp_scsisig) & + ~(SCSI_BSY | SCSI_SEL))); + return 0; + } + } + + WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0) + & ~ACTdeassert)); + WR_HARPOON(p_port + hp_scsireset, SCAM_EN); + WR_HARPOON(p_port + hp_scsidata_0, 0x00); + WR_HARPOON(p_port + hp_scsidata_1, 0x00); + WR_HARPOON(p_port + hp_portctrl_0, SCSI_BUS_EN); + + WR_HARPOON(p_port + hp_scsisig, + (RD_HARPOON(p_port + hp_scsisig) | SCSI_MSG)); + + WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig) + & ~SCSI_BSY)); + + FPT_Wait(p_port, TO_250ms); + + return 1; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scbusf + * + * Description: Release the SCSI bus and disable SCAM selection. + * + *---------------------------------------------------------------------*/ + +static void FPT_scbusf(u32 p_port) +{ + WR_HARPOON(p_port + hp_page_ctrl, + (RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE)); + + WR_HARPOON(p_port + hp_scsidata_0, 0x00); + + WR_HARPOON(p_port + hp_portctrl_0, (RD_HARPOON(p_port + hp_portctrl_0) + & ~SCSI_BUS_EN)); + + WR_HARPOON(p_port + hp_scsisig, 0x00); + + WR_HARPOON(p_port + hp_scsireset, (RD_HARPOON(p_port + hp_scsireset) + & ~SCAM_EN)); + + WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0) + | ACTdeassert)); + + WRW_HARPOON((p_port + hp_intstat), (BUS_FREE | AUTO_INT | SCAM_SEL)); + + WR_HARPOON(p_port + hp_page_ctrl, + (RD_HARPOON(p_port + hp_page_ctrl) & ~G_INT_DISABLE)); +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scasid + * + * Description: Assign an ID to all the SCAM devices. + * + *---------------------------------------------------------------------*/ + +static void FPT_scasid(unsigned char p_card, u32 p_port) +{ + unsigned char temp_id_string[ID_STRING_LENGTH]; + + unsigned char i, k, scam_id; + unsigned char crcBytes[3]; + struct nvram_info *pCurrNvRam; + unsigned short *pCrcBytes; + + pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo; + + i = 0; + + while (!i) { + + for (k = 0; k < ID_STRING_LENGTH; k++) { + temp_id_string[k] = (unsigned char)0x00; + } + + FPT_scxferc(p_port, SYNC_PTRN); + FPT_scxferc(p_port, ASSIGN_ID); + + if (!(FPT_sciso(p_port, &temp_id_string[0]))) { + if (pCurrNvRam) { + pCrcBytes = (unsigned short *)&crcBytes[0]; + *pCrcBytes = FPT_CalcCrc16(&temp_id_string[0]); + crcBytes[2] = FPT_CalcLrc(&temp_id_string[0]); + temp_id_string[1] = crcBytes[2]; + temp_id_string[2] = crcBytes[0]; + temp_id_string[3] = crcBytes[1]; + for (k = 4; k < ID_STRING_LENGTH; k++) + temp_id_string[k] = (unsigned char)0x00; + } + i = FPT_scmachid(p_card, temp_id_string); + + if (i == CLR_PRIORITY) { + FPT_scxferc(p_port, MISC_CODE); + FPT_scxferc(p_port, CLR_P_FLAG); + i = 0; /*Not the last ID yet. */ + } + + else if (i != NO_ID_AVAIL) { + if (i < 8) + FPT_scxferc(p_port, ID_0_7); + else + FPT_scxferc(p_port, ID_8_F); + + scam_id = (i & (unsigned char)0x07); + + for (k = 1; k < 0x08; k <<= 1) + if (!(k & i)) + scam_id += 0x08; /*Count number of zeros in DB0-3. */ + + FPT_scxferc(p_port, scam_id); + + i = 0; /*Not the last ID yet. */ + } + } + + else { + i = 1; + } + + } /*End while */ + + FPT_scxferc(p_port, SYNC_PTRN); + FPT_scxferc(p_port, CFG_CMPLT); +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scsel + * + * Description: Select all the SCAM devices. + * + *---------------------------------------------------------------------*/ + +static void FPT_scsel(u32 p_port) +{ + + WR_HARPOON(p_port + hp_scsisig, SCSI_SEL); + FPT_scwiros(p_port, SCSI_MSG); + + WR_HARPOON(p_port + hp_scsisig, (SCSI_SEL | SCSI_BSY)); + + WR_HARPOON(p_port + hp_scsisig, + (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD)); + WR_HARPOON(p_port + hp_scsidata_0, + (unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) | + (unsigned char)(BIT(7) + BIT(6)))); + + WR_HARPOON(p_port + hp_scsisig, (SCSI_BSY | SCSI_IOBIT | SCSI_CD)); + FPT_scwiros(p_port, SCSI_SEL); + + WR_HARPOON(p_port + hp_scsidata_0, + (unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) & + ~(unsigned char)BIT(6))); + FPT_scwirod(p_port, BIT(6)); + + WR_HARPOON(p_port + hp_scsisig, + (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD)); +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scxferc + * + * Description: Handshake the p_data (DB4-0) across the bus. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data) +{ + unsigned char curr_data, ret_data; + + curr_data = p_data | BIT(7) | BIT(5); /*Start with DB7 & DB5 asserted. */ + + WR_HARPOON(p_port + hp_scsidata_0, curr_data); + + curr_data &= ~BIT(7); + + WR_HARPOON(p_port + hp_scsidata_0, curr_data); + + FPT_scwirod(p_port, BIT(7)); /*Wait for DB7 to be released. */ + while (!(RD_HARPOON(p_port + hp_scsidata_0) & BIT(5))) ; + + ret_data = (RD_HARPOON(p_port + hp_scsidata_0) & (unsigned char)0x1F); + + curr_data |= BIT(6); + + WR_HARPOON(p_port + hp_scsidata_0, curr_data); + + curr_data &= ~BIT(5); + + WR_HARPOON(p_port + hp_scsidata_0, curr_data); + + FPT_scwirod(p_port, BIT(5)); /*Wait for DB5 to be released. */ + + curr_data &= ~(BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); /*Release data bits */ + curr_data |= BIT(7); + + WR_HARPOON(p_port + hp_scsidata_0, curr_data); + + curr_data &= ~BIT(6); + + WR_HARPOON(p_port + hp_scsidata_0, curr_data); + + FPT_scwirod(p_port, BIT(6)); /*Wait for DB6 to be released. */ + + return ret_data; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scsendi + * + * Description: Transfer our Identification string to determine if we + * will be the dominant master. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_scsendi(u32 p_port, unsigned char p_id_string[]) +{ + unsigned char ret_data, byte_cnt, bit_cnt, defer; + + defer = 0; + + for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) { + + for (bit_cnt = 0x80; bit_cnt != 0; bit_cnt >>= 1) { + + if (defer) + ret_data = FPT_scxferc(p_port, 00); + + else if (p_id_string[byte_cnt] & bit_cnt) + + ret_data = FPT_scxferc(p_port, 02); + + else { + + ret_data = FPT_scxferc(p_port, 01); + if (ret_data & 02) + defer = 1; + } + + if ((ret_data & 0x1C) == 0x10) + return 0x00; /*End of isolation stage, we won! */ + + if (ret_data & 0x1C) + return 0xFF; + + if ((defer) && (!(ret_data & 0x1F))) + return 0x01; /*End of isolation stage, we lost. */ + + } /*bit loop */ + + } /*byte loop */ + + if (defer) + return 0x01; /*We lost */ + else + return 0; /*We WON! Yeeessss! */ +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_sciso + * + * Description: Transfer the Identification string. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_sciso(u32 p_port, unsigned char p_id_string[]) +{ + unsigned char ret_data, the_data, byte_cnt, bit_cnt; + + the_data = 0; + + for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) { + + for (bit_cnt = 0; bit_cnt < 8; bit_cnt++) { + + ret_data = FPT_scxferc(p_port, 0); + + if (ret_data & 0xFC) + return 0xFF; + + else { + + the_data <<= 1; + if (ret_data & BIT(1)) { + the_data |= 1; + } + } + + if ((ret_data & 0x1F) == 0) { +/* + if(bit_cnt != 0 || bit_cnt != 8) + { + byte_cnt = 0; + bit_cnt = 0; + FPT_scxferc(p_port, SYNC_PTRN); + FPT_scxferc(p_port, ASSIGN_ID); + continue; + } +*/ + if (byte_cnt) + return 0x00; + else + return 0xFF; + } + + } /*bit loop */ + + p_id_string[byte_cnt] = the_data; + + } /*byte loop */ + + return 0; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scwirod + * + * Description: Sample the SCSI data bus making sure the signal has been + * deasserted for the correct number of consecutive samples. + * + *---------------------------------------------------------------------*/ + +static void FPT_scwirod(u32 p_port, unsigned char p_data_bit) +{ + unsigned char i; + + i = 0; + while (i < MAX_SCSI_TAR) { + + if (RD_HARPOON(p_port + hp_scsidata_0) & p_data_bit) + + i = 0; + + else + + i++; + + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scwiros + * + * Description: Sample the SCSI Signal lines making sure the signal has been + * deasserted for the correct number of consecutive samples. + * + *---------------------------------------------------------------------*/ + +static void FPT_scwiros(u32 p_port, unsigned char p_data_bit) +{ + unsigned char i; + + i = 0; + while (i < MAX_SCSI_TAR) { + + if (RD_HARPOON(p_port + hp_scsisig) & p_data_bit) + + i = 0; + + else + + i++; + + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scvalq + * + * Description: Make sure we received a valid data byte. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_scvalq(unsigned char p_quintet) +{ + unsigned char count; + + for (count = 1; count < 0x08; count <<= 1) { + if (!(p_quintet & count)) + p_quintet -= 0x80; + } + + if (p_quintet & 0x18) + return 0; + + else + return 1; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scsell + * + * Description: Select the specified device ID using a selection timeout + * less than 4ms. If somebody responds then it is a legacy + * drive and this ID must be marked as such. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id) +{ + unsigned long i; + + WR_HARPOON(p_port + hp_page_ctrl, + (RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE)); + + ARAM_ACCESS(p_port); + + WR_HARPOON(p_port + hp_addstat, + (RD_HARPOON(p_port + hp_addstat) | SCAM_TIMER)); + WR_HARPOON(p_port + hp_seltimeout, TO_4ms); + + for (i = p_port + CMD_STRT; i < p_port + CMD_STRT + 12; i += 2) { + WRW_HARPOON(i, (MPM_OP + ACOMMAND)); + } + WRW_HARPOON(i, (BRH_OP + ALWAYS + NP)); + + WRW_HARPOON((p_port + hp_intstat), + (RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT)); + + WR_HARPOON(p_port + hp_select_id, targ_id); + + WR_HARPOON(p_port + hp_portctrl_0, SCSI_PORT); + WR_HARPOON(p_port + hp_autostart_3, (SELECT | CMD_ONLY_STRT)); + WR_HARPOON(p_port + hp_scsictrl_0, (SEL_TAR | ENA_RESEL)); + + while (!(RDW_HARPOON((p_port + hp_intstat)) & + (RESET | PROG_HLT | TIMEOUT | AUTO_INT))) { + } + + if (RDW_HARPOON((p_port + hp_intstat)) & RESET) + FPT_Wait(p_port, TO_250ms); + + DISABLE_AUTO(p_port); + + WR_HARPOON(p_port + hp_addstat, + (RD_HARPOON(p_port + hp_addstat) & ~SCAM_TIMER)); + WR_HARPOON(p_port + hp_seltimeout, TO_290ms); + + SGRAM_ACCESS(p_port); + + if (RDW_HARPOON((p_port + hp_intstat)) & (RESET | TIMEOUT)) { + + WRW_HARPOON((p_port + hp_intstat), + (RESET | TIMEOUT | SEL | BUS_FREE | PHASE)); + + WR_HARPOON(p_port + hp_page_ctrl, + (RD_HARPOON(p_port + hp_page_ctrl) & + ~G_INT_DISABLE)); + + return 0; /*No legacy device */ + } + + else { + + while (!(RDW_HARPOON((p_port + hp_intstat)) & BUS_FREE)) { + if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ) { + WR_HARPOON(p_port + hp_scsisig, + (SCSI_ACK + S_ILL_PH)); + ACCEPT_MSG(p_port); + } + } + + WRW_HARPOON((p_port + hp_intstat), CLR_ALL_INT_1); + + WR_HARPOON(p_port + hp_page_ctrl, + (RD_HARPOON(p_port + hp_page_ctrl) & + ~G_INT_DISABLE)); + + return 1; /*Found one of them oldies! */ + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scwtsel + * + * Description: Wait to be selected by another SCAM initiator. + * + *---------------------------------------------------------------------*/ + +static void FPT_scwtsel(u32 p_port) +{ + while (!(RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) { + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_inisci + * + * Description: Setup the data Structure with the info from the EEPROM. + * + *---------------------------------------------------------------------*/ + +static void FPT_inisci(unsigned char p_card, u32 p_port, unsigned char p_our_id) +{ + unsigned char i, k, max_id; + unsigned short ee_data; + struct nvram_info *pCurrNvRam; + + pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo; + + if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD) + max_id = 0x08; + + else + max_id = 0x10; + + if (pCurrNvRam) { + for (i = 0; i < max_id; i++) { + + for (k = 0; k < 4; k++) + FPT_scamInfo[i].id_string[k] = + pCurrNvRam->niScamTbl[i][k]; + for (k = 4; k < ID_STRING_LENGTH; k++) + FPT_scamInfo[i].id_string[k] = + (unsigned char)0x00; + + if (FPT_scamInfo[i].id_string[0] == 0x00) + FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */ + else + FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */ + + } + } else { + for (i = 0; i < max_id; i++) { + for (k = 0; k < ID_STRING_LENGTH; k += 2) { + ee_data = + FPT_utilEERead(p_port, + (unsigned + short)((EE_SCAMBASE / 2) + + (unsigned short)(i * + ((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2))); + FPT_scamInfo[i].id_string[k] = + (unsigned char)ee_data; + ee_data >>= 8; + FPT_scamInfo[i].id_string[k + 1] = + (unsigned char)ee_data; + } + + if ((FPT_scamInfo[i].id_string[0] == 0x00) || + (FPT_scamInfo[i].id_string[0] == 0xFF)) + + FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */ + + else + FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */ + + } + } + for (k = 0; k < ID_STRING_LENGTH; k++) + FPT_scamInfo[p_our_id].id_string[k] = FPT_scamHAString[k]; + +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scmachid + * + * Description: Match the Device ID string with our values stored in + * the EEPROM. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_scmachid(unsigned char p_card, + unsigned char p_id_string[]) +{ + + unsigned char i, k, match; + + for (i = 0; i < MAX_SCSI_TAR; i++) { + + match = 1; + + for (k = 0; k < ID_STRING_LENGTH; k++) { + if (p_id_string[k] != FPT_scamInfo[i].id_string[k]) + match = 0; + } + + if (match) { + FPT_scamInfo[i].state = ID_ASSIGNED; + return i; + } + + } + + if (p_id_string[0] & BIT(5)) + i = 8; + else + i = MAX_SCSI_TAR; + + if (((p_id_string[0] & 0x06) == 0x02) + || ((p_id_string[0] & 0x06) == 0x04)) + match = p_id_string[1] & (unsigned char)0x1F; + else + match = 7; + + while (i > 0) { + i--; + + if (FPT_scamInfo[match].state == ID_UNUSED) { + for (k = 0; k < ID_STRING_LENGTH; k++) { + FPT_scamInfo[match].id_string[k] = + p_id_string[k]; + } + + FPT_scamInfo[match].state = ID_ASSIGNED; + + if (FPT_BL_Card[p_card].pNvRamInfo == NULL) + FPT_BL_Card[p_card].globalFlags |= + F_UPDATE_EEPROM; + return match; + + } + + match--; + + if (match == 0xFF) { + if (p_id_string[0] & BIT(5)) + match = 7; + else + match = MAX_SCSI_TAR - 1; + } + } + + if (p_id_string[0] & BIT(7)) { + return CLR_PRIORITY; + } + + if (p_id_string[0] & BIT(5)) + i = 8; + else + i = MAX_SCSI_TAR; + + if (((p_id_string[0] & 0x06) == 0x02) + || ((p_id_string[0] & 0x06) == 0x04)) + match = p_id_string[1] & (unsigned char)0x1F; + else + match = 7; + + while (i > 0) { + + i--; + + if (FPT_scamInfo[match].state == ID_UNASSIGNED) { + for (k = 0; k < ID_STRING_LENGTH; k++) { + FPT_scamInfo[match].id_string[k] = + p_id_string[k]; + } + + FPT_scamInfo[match].id_string[0] |= BIT(7); + FPT_scamInfo[match].state = ID_ASSIGNED; + if (FPT_BL_Card[p_card].pNvRamInfo == NULL) + FPT_BL_Card[p_card].globalFlags |= + F_UPDATE_EEPROM; + return match; + + } + + match--; + + if (match == 0xFF) { + if (p_id_string[0] & BIT(5)) + match = 7; + else + match = MAX_SCSI_TAR - 1; + } + } + + return NO_ID_AVAIL; +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_scsavdi + * + * Description: Save off the device SCAM ID strings. + * + *---------------------------------------------------------------------*/ + +static void FPT_scsavdi(unsigned char p_card, u32 p_port) +{ + unsigned char i, k, max_id; + unsigned short ee_data, sum_data; + + sum_data = 0x0000; + + for (i = 1; i < EE_SCAMBASE / 2; i++) { + sum_data += FPT_utilEERead(p_port, i); + } + + FPT_utilEEWriteOnOff(p_port, 1); /* Enable write access to the EEPROM */ + + if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD) + max_id = 0x08; + + else + max_id = 0x10; + + for (i = 0; i < max_id; i++) { + + for (k = 0; k < ID_STRING_LENGTH; k += 2) { + ee_data = FPT_scamInfo[i].id_string[k + 1]; + ee_data <<= 8; + ee_data |= FPT_scamInfo[i].id_string[k]; + sum_data += ee_data; + FPT_utilEEWrite(p_port, ee_data, + (unsigned short)((EE_SCAMBASE / 2) + + (unsigned short)(i * + ((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2))); + } + } + + FPT_utilEEWrite(p_port, sum_data, EEPROM_CHECK_SUM / 2); + FPT_utilEEWriteOnOff(p_port, 0); /* Turn off write access */ +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_XbowInit + * + * Description: Setup the Xbow for normal operation. + * + *---------------------------------------------------------------------*/ + +static void FPT_XbowInit(u32 port, unsigned char ScamFlg) +{ + unsigned char i; + + i = RD_HARPOON(port + hp_page_ctrl); + WR_HARPOON(port + hp_page_ctrl, (unsigned char)(i | G_INT_DISABLE)); + + WR_HARPOON(port + hp_scsireset, 0x00); + WR_HARPOON(port + hp_portctrl_1, HOST_MODE8); + + WR_HARPOON(port + hp_scsireset, (DMA_RESET | HPSCSI_RESET | PROG_RESET | + FIFO_CLR)); + + WR_HARPOON(port + hp_scsireset, SCSI_INI); + + WR_HARPOON(port + hp_clkctrl_0, CLKCTRL_DEFAULT); + + WR_HARPOON(port + hp_scsisig, 0x00); /* Clear any signals we might */ + WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL); + + WRW_HARPOON((port + hp_intstat), CLR_ALL_INT); + + FPT_default_intena = RESET | RSEL | PROG_HLT | TIMEOUT | + BUS_FREE | XFER_CNT_0 | AUTO_INT; + + if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2)) + FPT_default_intena |= SCAM_SEL; + + WRW_HARPOON((port + hp_intena), FPT_default_intena); + + WR_HARPOON(port + hp_seltimeout, TO_290ms); + + /* Turn on SCSI_MODE8 for narrow cards to fix the + strapping issue with the DUAL CHANNEL card */ + if (RD_HARPOON(port + hp_page_ctrl) & NARROW_SCSI_CARD) + WR_HARPOON(port + hp_addstat, SCSI_MODE8); + + WR_HARPOON(port + hp_page_ctrl, i); + +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_BusMasterInit + * + * Description: Initialize the BusMaster for normal operations. + * + *---------------------------------------------------------------------*/ + +static void FPT_BusMasterInit(u32 p_port) +{ + + WR_HARPOON(p_port + hp_sys_ctrl, DRVR_RST); + WR_HARPOON(p_port + hp_sys_ctrl, 0x00); + + WR_HARPOON(p_port + hp_host_blk_cnt, XFER_BLK64); + + WR_HARPOON(p_port + hp_bm_ctrl, (BMCTRL_DEFAULT)); + + WR_HARPOON(p_port + hp_ee_ctrl, (SCSI_TERM_ENA_H)); + + RD_HARPOON(p_port + hp_int_status); /*Clear interrupts. */ + WR_HARPOON(p_port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT)); + WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) & + ~SCATTER_EN)); +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_DiagEEPROM + * + * Description: Verfiy checksum and 'Key' and initialize the EEPROM if + * necessary. + * + *---------------------------------------------------------------------*/ + +static void FPT_DiagEEPROM(u32 p_port) +{ + unsigned short index, temp, max_wd_cnt; + + if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD) + max_wd_cnt = EEPROM_WD_CNT; + else + max_wd_cnt = EEPROM_WD_CNT * 2; + + temp = FPT_utilEERead(p_port, FW_SIGNATURE / 2); + + if (temp == 0x4641) { + + for (index = 2; index < max_wd_cnt; index++) { + + temp += FPT_utilEERead(p_port, index); + + } + + if (temp == FPT_utilEERead(p_port, EEPROM_CHECK_SUM / 2)) { + + return; /*EEPROM is Okay so return now! */ + } + } + + FPT_utilEEWriteOnOff(p_port, (unsigned char)1); + + for (index = 0; index < max_wd_cnt; index++) { + + FPT_utilEEWrite(p_port, 0x0000, index); + } + + temp = 0; + + FPT_utilEEWrite(p_port, 0x4641, FW_SIGNATURE / 2); + temp += 0x4641; + FPT_utilEEWrite(p_port, 0x3920, MODEL_NUMB_0 / 2); + temp += 0x3920; + FPT_utilEEWrite(p_port, 0x3033, MODEL_NUMB_2 / 2); + temp += 0x3033; + FPT_utilEEWrite(p_port, 0x2020, MODEL_NUMB_4 / 2); + temp += 0x2020; + FPT_utilEEWrite(p_port, 0x70D3, SYSTEM_CONFIG / 2); + temp += 0x70D3; + FPT_utilEEWrite(p_port, 0x0010, BIOS_CONFIG / 2); + temp += 0x0010; + FPT_utilEEWrite(p_port, 0x0003, SCAM_CONFIG / 2); + temp += 0x0003; + FPT_utilEEWrite(p_port, 0x0007, ADAPTER_SCSI_ID / 2); + temp += 0x0007; + + FPT_utilEEWrite(p_port, 0x0000, IGNORE_B_SCAN / 2); + temp += 0x0000; + FPT_utilEEWrite(p_port, 0x0000, SEND_START_ENA / 2); + temp += 0x0000; + FPT_utilEEWrite(p_port, 0x0000, DEVICE_ENABLE / 2); + temp += 0x0000; + + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL01 / 2); + temp += 0x4242; + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL23 / 2); + temp += 0x4242; + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL45 / 2); + temp += 0x4242; + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL67 / 2); + temp += 0x4242; + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL89 / 2); + temp += 0x4242; + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLab / 2); + temp += 0x4242; + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLcd / 2); + temp += 0x4242; + FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLef / 2); + temp += 0x4242; + + FPT_utilEEWrite(p_port, 0x6C46, 64 / 2); /*PRODUCT ID */ + temp += 0x6C46; + FPT_utilEEWrite(p_port, 0x7361, 66 / 2); /* FlashPoint LT */ + temp += 0x7361; + FPT_utilEEWrite(p_port, 0x5068, 68 / 2); + temp += 0x5068; + FPT_utilEEWrite(p_port, 0x696F, 70 / 2); + temp += 0x696F; + FPT_utilEEWrite(p_port, 0x746E, 72 / 2); + temp += 0x746E; + FPT_utilEEWrite(p_port, 0x4C20, 74 / 2); + temp += 0x4C20; + FPT_utilEEWrite(p_port, 0x2054, 76 / 2); + temp += 0x2054; + FPT_utilEEWrite(p_port, 0x2020, 78 / 2); + temp += 0x2020; + + index = ((EE_SCAMBASE / 2) + (7 * 16)); + FPT_utilEEWrite(p_port, (0x0700 + TYPE_CODE0), index); + temp += (0x0700 + TYPE_CODE0); + index++; + FPT_utilEEWrite(p_port, 0x5542, index); /*Vendor ID code */ + temp += 0x5542; /* BUSLOGIC */ + index++; + FPT_utilEEWrite(p_port, 0x4C53, index); + temp += 0x4C53; + index++; + FPT_utilEEWrite(p_port, 0x474F, index); + temp += 0x474F; + index++; + FPT_utilEEWrite(p_port, 0x4349, index); + temp += 0x4349; + index++; + FPT_utilEEWrite(p_port, 0x5442, index); /*Vendor unique code */ + temp += 0x5442; /* BT- 930 */ + index++; + FPT_utilEEWrite(p_port, 0x202D, index); + temp += 0x202D; + index++; + FPT_utilEEWrite(p_port, 0x3339, index); + temp += 0x3339; + index++; /*Serial # */ + FPT_utilEEWrite(p_port, 0x2030, index); /* 01234567 */ + temp += 0x2030; + index++; + FPT_utilEEWrite(p_port, 0x5453, index); + temp += 0x5453; + index++; + FPT_utilEEWrite(p_port, 0x5645, index); + temp += 0x5645; + index++; + FPT_utilEEWrite(p_port, 0x2045, index); + temp += 0x2045; + index++; + FPT_utilEEWrite(p_port, 0x202F, index); + temp += 0x202F; + index++; + FPT_utilEEWrite(p_port, 0x4F4A, index); + temp += 0x4F4A; + index++; + FPT_utilEEWrite(p_port, 0x204E, index); + temp += 0x204E; + index++; + FPT_utilEEWrite(p_port, 0x3539, index); + temp += 0x3539; + + FPT_utilEEWrite(p_port, temp, EEPROM_CHECK_SUM / 2); + + FPT_utilEEWriteOnOff(p_port, (unsigned char)0); + +} + +/*--------------------------------------------------------------------- + * + * Function: Queue Search Select + * + * Description: Try to find a new command to execute. + * + *---------------------------------------------------------------------*/ + +static void FPT_queueSearchSelect(struct sccb_card *pCurrCard, + unsigned char p_card) +{ + unsigned char scan_ptr, lun; + struct sccb_mgr_tar_info *currTar_Info; + struct sccb *pOldSccb; + + scan_ptr = pCurrCard->scanIndex; + do { + currTar_Info = &FPT_sccbMgrTbl[p_card][scan_ptr]; + if ((pCurrCard->globalFlags & F_CONLUN_IO) && + ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != + TAG_Q_TRYING)) { + if (currTar_Info->TarSelQ_Cnt != 0) { + + scan_ptr++; + if (scan_ptr == MAX_SCSI_TAR) + scan_ptr = 0; + + for (lun = 0; lun < MAX_LUN; lun++) { + if (currTar_Info->TarLUNBusy[lun] == 0) { + + pCurrCard->currentSCCB = + currTar_Info->TarSelQ_Head; + pOldSccb = NULL; + + while ((pCurrCard-> + currentSCCB != NULL) + && (lun != + pCurrCard-> + currentSCCB->Lun)) { + pOldSccb = + pCurrCard-> + currentSCCB; + pCurrCard->currentSCCB = + (struct sccb + *)(pCurrCard-> + currentSCCB)-> + Sccb_forwardlink; + } + if (pCurrCard->currentSCCB == + NULL) + continue; + if (pOldSccb != NULL) { + pOldSccb-> + Sccb_forwardlink = + (struct sccb + *)(pCurrCard-> + currentSCCB)-> + Sccb_forwardlink; + pOldSccb-> + Sccb_backlink = + (struct sccb + *)(pCurrCard-> + currentSCCB)-> + Sccb_backlink; + currTar_Info-> + TarSelQ_Cnt--; + } else { + currTar_Info-> + TarSelQ_Head = + (struct sccb + *)(pCurrCard-> + currentSCCB)-> + Sccb_forwardlink; + + if (currTar_Info-> + TarSelQ_Head == + NULL) { + currTar_Info-> + TarSelQ_Tail + = NULL; + currTar_Info-> + TarSelQ_Cnt + = 0; + } else { + currTar_Info-> + TarSelQ_Cnt--; + currTar_Info-> + TarSelQ_Head-> + Sccb_backlink + = + (struct sccb + *)NULL; + } + } + pCurrCard->scanIndex = scan_ptr; + + pCurrCard->globalFlags |= + F_NEW_SCCB_CMD; + + break; + } + } + } + + else { + scan_ptr++; + if (scan_ptr == MAX_SCSI_TAR) { + scan_ptr = 0; + } + } + + } else { + if ((currTar_Info->TarSelQ_Cnt != 0) && + (currTar_Info->TarLUNBusy[0] == 0)) { + + pCurrCard->currentSCCB = + currTar_Info->TarSelQ_Head; + + currTar_Info->TarSelQ_Head = + (struct sccb *)(pCurrCard->currentSCCB)-> + Sccb_forwardlink; + + if (currTar_Info->TarSelQ_Head == NULL) { + currTar_Info->TarSelQ_Tail = NULL; + currTar_Info->TarSelQ_Cnt = 0; + } else { + currTar_Info->TarSelQ_Cnt--; + currTar_Info->TarSelQ_Head-> + Sccb_backlink = (struct sccb *)NULL; + } + + scan_ptr++; + if (scan_ptr == MAX_SCSI_TAR) + scan_ptr = 0; + + pCurrCard->scanIndex = scan_ptr; + + pCurrCard->globalFlags |= F_NEW_SCCB_CMD; + + break; + } + + else { + scan_ptr++; + if (scan_ptr == MAX_SCSI_TAR) { + scan_ptr = 0; + } + } + } + } while (scan_ptr != pCurrCard->scanIndex); +} + +/*--------------------------------------------------------------------- + * + * Function: Queue Select Fail + * + * Description: Add the current SCCB to the head of the Queue. + * + *---------------------------------------------------------------------*/ + +static void FPT_queueSelectFail(struct sccb_card *pCurrCard, + unsigned char p_card) +{ + unsigned char thisTarg; + struct sccb_mgr_tar_info *currTar_Info; + + if (pCurrCard->currentSCCB != NULL) { + thisTarg = + (unsigned char)(((struct sccb *)(pCurrCard->currentSCCB))-> + TargID); + currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg]; + + pCurrCard->currentSCCB->Sccb_backlink = (struct sccb *)NULL; + + pCurrCard->currentSCCB->Sccb_forwardlink = + currTar_Info->TarSelQ_Head; + + if (currTar_Info->TarSelQ_Cnt == 0) { + currTar_Info->TarSelQ_Tail = pCurrCard->currentSCCB; + } + + else { + currTar_Info->TarSelQ_Head->Sccb_backlink = + pCurrCard->currentSCCB; + } + + currTar_Info->TarSelQ_Head = pCurrCard->currentSCCB; + + pCurrCard->currentSCCB = NULL; + currTar_Info->TarSelQ_Cnt++; + } +} + +/*--------------------------------------------------------------------- + * + * Function: Queue Command Complete + * + * Description: Call the callback function with the current SCCB. + * + *---------------------------------------------------------------------*/ + +static void FPT_queueCmdComplete(struct sccb_card *pCurrCard, + struct sccb *p_sccb, unsigned char p_card) +{ + + unsigned char i, SCSIcmd; + CALL_BK_FN callback; + struct sccb_mgr_tar_info *currTar_Info; + + SCSIcmd = p_sccb->Cdb[0]; + + if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED)) { + + if ((p_sccb-> + ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN)) + && (p_sccb->HostStatus == SCCB_COMPLETE) + && (p_sccb->TargetStatus != SAM_STAT_CHECK_CONDITION)) + + if ((SCSIcmd == READ_6) || + (SCSIcmd == WRITE_6) || + (SCSIcmd == READ_10) || + (SCSIcmd == WRITE_10) || + (SCSIcmd == WRITE_VERIFY) || + (SCSIcmd == START_STOP) || + (pCurrCard->globalFlags & F_NO_FILTER) + ) + p_sccb->HostStatus = SCCB_DATA_UNDER_RUN; + } + + if (p_sccb->SccbStatus == SCCB_IN_PROCESS) { + if (p_sccb->HostStatus || p_sccb->TargetStatus) + p_sccb->SccbStatus = SCCB_ERROR; + else + p_sccb->SccbStatus = SCCB_SUCCESS; + } + + if (p_sccb->Sccb_XferState & F_AUTO_SENSE) { + + p_sccb->CdbLength = p_sccb->Save_CdbLen; + for (i = 0; i < 6; i++) { + p_sccb->Cdb[i] = p_sccb->Save_Cdb[i]; + } + } + + if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) || + (p_sccb->OperationCode == RESIDUAL_COMMAND)) { + + FPT_utilUpdateResidual(p_sccb); + } + + pCurrCard->cmdCounter--; + if (!pCurrCard->cmdCounter) { + + if (pCurrCard->globalFlags & F_GREEN_PC) { + WR_HARPOON(pCurrCard->ioPort + hp_clkctrl_0, + (PWR_DWN | CLKCTRL_DEFAULT)); + WR_HARPOON(pCurrCard->ioPort + hp_sys_ctrl, STOP_CLK); + } + + WR_HARPOON(pCurrCard->ioPort + hp_semaphore, + (RD_HARPOON(pCurrCard->ioPort + hp_semaphore) & + ~SCCB_MGR_ACTIVE)); + + } + + if (pCurrCard->discQCount != 0) { + currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; + if (((pCurrCard->globalFlags & F_CONLUN_IO) && + ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != + TAG_Q_TRYING))) { + pCurrCard->discQCount--; + pCurrCard->discQ_Tbl[currTar_Info-> + LunDiscQ_Idx[p_sccb->Lun]] = NULL; + } else { + if (p_sccb->Sccb_tag) { + pCurrCard->discQCount--; + pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL; + } else { + pCurrCard->discQCount--; + pCurrCard->discQ_Tbl[currTar_Info-> + LunDiscQ_Idx[0]] = NULL; + } + } + + } + + callback = (CALL_BK_FN) p_sccb->SccbCallback; + callback(p_sccb); + pCurrCard->globalFlags |= F_NEW_SCCB_CMD; + pCurrCard->currentSCCB = NULL; +} + +/*--------------------------------------------------------------------- + * + * Function: Queue Disconnect + * + * Description: Add SCCB to our disconnect array. + * + *---------------------------------------------------------------------*/ +static void FPT_queueDisconnect(struct sccb *p_sccb, unsigned char p_card) +{ + struct sccb_mgr_tar_info *currTar_Info; + + currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; + + if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && + ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { + FPT_BL_Card[p_card].discQ_Tbl[currTar_Info-> + LunDiscQ_Idx[p_sccb->Lun]] = + p_sccb; + } else { + if (p_sccb->Sccb_tag) { + FPT_BL_Card[p_card].discQ_Tbl[p_sccb->Sccb_tag] = + p_sccb; + FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarLUNBusy[0] = + 0; + FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarTagQ_Cnt++; + } else { + FPT_BL_Card[p_card].discQ_Tbl[currTar_Info-> + LunDiscQ_Idx[0]] = p_sccb; + } + } + FPT_BL_Card[p_card].currentSCCB = NULL; +} + +/*--------------------------------------------------------------------- + * + * Function: Queue Flush SCCB + * + * Description: Flush all SCCB's back to the host driver for this target. + * + *---------------------------------------------------------------------*/ + +static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code) +{ + unsigned char qtag, thisTarg; + struct sccb *currSCCB; + struct sccb_mgr_tar_info *currTar_Info; + + currSCCB = FPT_BL_Card[p_card].currentSCCB; + if (currSCCB != NULL) { + thisTarg = (unsigned char)currSCCB->TargID; + currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg]; + + for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { + + if (FPT_BL_Card[p_card].discQ_Tbl[qtag] && + (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == + thisTarg)) { + + FPT_BL_Card[p_card].discQ_Tbl[qtag]-> + HostStatus = (unsigned char)error_code; + + FPT_queueCmdComplete(&FPT_BL_Card[p_card], + FPT_BL_Card[p_card]. + discQ_Tbl[qtag], p_card); + + FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; + currTar_Info->TarTagQ_Cnt--; + + } + } + } + +} + +/*--------------------------------------------------------------------- + * + * Function: Queue Flush Target SCCB + * + * Description: Flush all SCCB's back to the host driver for this target. + * + *---------------------------------------------------------------------*/ + +static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg, + unsigned char error_code) +{ + unsigned char qtag; + struct sccb_mgr_tar_info *currTar_Info; + + currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg]; + + for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { + + if (FPT_BL_Card[p_card].discQ_Tbl[qtag] && + (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg)) { + + FPT_BL_Card[p_card].discQ_Tbl[qtag]->HostStatus = + (unsigned char)error_code; + + FPT_queueCmdComplete(&FPT_BL_Card[p_card], + FPT_BL_Card[p_card]. + discQ_Tbl[qtag], p_card); + + FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; + currTar_Info->TarTagQ_Cnt--; + + } + } + +} + +static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char p_card) +{ + struct sccb_mgr_tar_info *currTar_Info; + currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID]; + + p_SCCB->Sccb_forwardlink = NULL; + + p_SCCB->Sccb_backlink = currTar_Info->TarSelQ_Tail; + + if (currTar_Info->TarSelQ_Cnt == 0) { + + currTar_Info->TarSelQ_Head = p_SCCB; + } + + else { + + currTar_Info->TarSelQ_Tail->Sccb_forwardlink = p_SCCB; + } + + currTar_Info->TarSelQ_Tail = p_SCCB; + currTar_Info->TarSelQ_Cnt++; +} + +/*--------------------------------------------------------------------- + * + * Function: Queue Find SCCB + * + * Description: Search the target select Queue for this SCCB, and + * remove it if found. + * + *---------------------------------------------------------------------*/ + +static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB, + unsigned char p_card) +{ + struct sccb *q_ptr; + struct sccb_mgr_tar_info *currTar_Info; + + currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID]; + + q_ptr = currTar_Info->TarSelQ_Head; + + while (q_ptr != NULL) { + + if (q_ptr == p_SCCB) { + + if (currTar_Info->TarSelQ_Head == q_ptr) { + + currTar_Info->TarSelQ_Head = + q_ptr->Sccb_forwardlink; + } + + if (currTar_Info->TarSelQ_Tail == q_ptr) { + + currTar_Info->TarSelQ_Tail = + q_ptr->Sccb_backlink; + } + + if (q_ptr->Sccb_forwardlink != NULL) { + q_ptr->Sccb_forwardlink->Sccb_backlink = + q_ptr->Sccb_backlink; + } + + if (q_ptr->Sccb_backlink != NULL) { + q_ptr->Sccb_backlink->Sccb_forwardlink = + q_ptr->Sccb_forwardlink; + } + + currTar_Info->TarSelQ_Cnt--; + + return 1; + } + + else { + q_ptr = q_ptr->Sccb_forwardlink; + } + } + + return 0; + +} + +/*--------------------------------------------------------------------- + * + * Function: Utility Update Residual Count + * + * Description: Update the XferCnt to the remaining byte count. + * If we transferred all the data then just write zero. + * If Non-SG transfer then report Total Cnt - Actual Transfer + * Cnt. For SG transfers add the count fields of all + * remaining SG elements, as well as any partial remaining + * element. + * + *---------------------------------------------------------------------*/ + +static void FPT_utilUpdateResidual(struct sccb *p_SCCB) +{ + unsigned long partial_cnt; + unsigned int sg_index; + struct blogic_sg_seg *segp; + + if (p_SCCB->Sccb_XferState & F_ALL_XFERRED) { + + p_SCCB->DataLength = 0x0000; + } + + else if (p_SCCB->Sccb_XferState & F_SG_XFER) { + + partial_cnt = 0x0000; + + sg_index = p_SCCB->Sccb_sgseg; + + + if (p_SCCB->Sccb_SGoffset) { + + partial_cnt = p_SCCB->Sccb_SGoffset; + sg_index++; + } + + while (((unsigned long)sg_index * + (unsigned long)SG_ELEMENT_SIZE) < p_SCCB->DataLength) { + segp = (struct blogic_sg_seg *)(p_SCCB->DataPointer) + + (sg_index * 2); + partial_cnt += segp->segbytes; + sg_index++; + } + + p_SCCB->DataLength = partial_cnt; + } + + else { + + p_SCCB->DataLength -= p_SCCB->Sccb_ATC; + } +} + +/*--------------------------------------------------------------------- + * + * Function: Wait 1 Second + * + * Description: Wait for 1 second. + * + *---------------------------------------------------------------------*/ + +static void FPT_Wait1Second(u32 p_port) +{ + unsigned char i; + + for (i = 0; i < 4; i++) { + + FPT_Wait(p_port, TO_250ms); + + if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST)) + break; + + if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) + break; + } +} + +/*--------------------------------------------------------------------- + * + * Function: FPT_Wait + * + * Description: Wait the desired delay. + * + *---------------------------------------------------------------------*/ + +static void FPT_Wait(u32 p_port, unsigned char p_delay) +{ + unsigned char old_timer; + unsigned char green_flag; + + old_timer = RD_HARPOON(p_port + hp_seltimeout); + + green_flag = RD_HARPOON(p_port + hp_clkctrl_0); + WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT); + + WR_HARPOON(p_port + hp_seltimeout, p_delay); + WRW_HARPOON((p_port + hp_intstat), TIMEOUT); + WRW_HARPOON((p_port + hp_intena), (FPT_default_intena & ~TIMEOUT)); + + WR_HARPOON(p_port + hp_portctrl_0, + (RD_HARPOON(p_port + hp_portctrl_0) | START_TO)); + + while (!(RDW_HARPOON((p_port + hp_intstat)) & TIMEOUT)) { + + if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST)) + break; + + if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) + break; + } + + WR_HARPOON(p_port + hp_portctrl_0, + (RD_HARPOON(p_port + hp_portctrl_0) & ~START_TO)); + + WRW_HARPOON((p_port + hp_intstat), TIMEOUT); + WRW_HARPOON((p_port + hp_intena), FPT_default_intena); + + WR_HARPOON(p_port + hp_clkctrl_0, green_flag); + + WR_HARPOON(p_port + hp_seltimeout, old_timer); +} + +/*--------------------------------------------------------------------- + * + * Function: Enable/Disable Write to EEPROM + * + * Description: The EEPROM must first be enabled for writes + * A total of 9 clocks are needed. + * + *---------------------------------------------------------------------*/ + +static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode) +{ + unsigned char ee_value; + + ee_value = + (unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) & + (EXT_ARB_ACK | SCSI_TERM_ENA_H)); + + if (p_mode) + + FPT_utilEESendCmdAddr(p_port, EWEN, EWEN_ADDR); + + else + + FPT_utilEESendCmdAddr(p_port, EWDS, EWDS_ADDR); + + WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */ +} + +/*--------------------------------------------------------------------- + * + * Function: Write EEPROM + * + * Description: Write a word to the EEPROM at the specified + * address. + * + *---------------------------------------------------------------------*/ + +static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data, + unsigned short ee_addr) +{ + + unsigned char ee_value; + unsigned short i; + + ee_value = + (unsigned + char)((RD_HARPOON(p_port + hp_ee_ctrl) & + (EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS)); + + FPT_utilEESendCmdAddr(p_port, EE_WRITE, ee_addr); + + ee_value |= (SEE_MS + SEE_CS); + + for (i = 0x8000; i != 0; i >>= 1) { + + if (i & ee_data) + ee_value |= SEE_DO; + else + ee_value &= ~SEE_DO; + + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + ee_value |= SEE_CLK; /* Clock data! */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + ee_value &= ~SEE_CLK; + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + } + ee_value &= (EXT_ARB_ACK | SCSI_TERM_ENA_H); + WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); + + FPT_Wait(p_port, TO_10ms); + + WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS | SEE_CS)); /* Set CS to EEPROM */ + WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /* Turn off CS */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /* Turn off Master Select */ +} + +/*--------------------------------------------------------------------- + * + * Function: Read EEPROM + * + * Description: Read a word from the EEPROM at the desired + * address. + * + *---------------------------------------------------------------------*/ + +static unsigned short FPT_utilEERead(u32 p_port, + unsigned short ee_addr) +{ + unsigned short i, ee_data1, ee_data2; + + i = 0; + ee_data1 = FPT_utilEEReadOrg(p_port, ee_addr); + do { + ee_data2 = FPT_utilEEReadOrg(p_port, ee_addr); + + if (ee_data1 == ee_data2) + return ee_data1; + + ee_data1 = ee_data2; + i++; + + } while (i < 4); + + return ee_data1; +} + +/*--------------------------------------------------------------------- + * + * Function: Read EEPROM Original + * + * Description: Read a word from the EEPROM at the desired + * address. + * + *---------------------------------------------------------------------*/ + +static unsigned short FPT_utilEEReadOrg(u32 p_port, unsigned short ee_addr) +{ + + unsigned char ee_value; + unsigned short i, ee_data; + + ee_value = + (unsigned + char)((RD_HARPOON(p_port + hp_ee_ctrl) & + (EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS)); + + FPT_utilEESendCmdAddr(p_port, EE_READ, ee_addr); + + ee_value |= (SEE_MS + SEE_CS); + ee_data = 0; + + for (i = 1; i <= 16; i++) { + + ee_value |= SEE_CLK; /* Clock data! */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + ee_value &= ~SEE_CLK; + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + + ee_data <<= 1; + + if (RD_HARPOON(p_port + hp_ee_ctrl) & SEE_DI) + ee_data |= 1; + } + + ee_value &= ~(SEE_MS + SEE_CS); + WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */ + + return ee_data; +} + +/*--------------------------------------------------------------------- + * + * Function: Send EE command and Address to the EEPROM + * + * Description: Transfers the correct command and sends the address + * to the eeprom. + * + *---------------------------------------------------------------------*/ + +static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd, + unsigned short ee_addr) +{ + unsigned char ee_value; + unsigned char narrow_flg; + + unsigned short i; + + narrow_flg = + (unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) & + NARROW_SCSI_CARD); + + ee_value = SEE_MS; + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + + ee_value |= SEE_CS; /* Set CS to EEPROM */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + + for (i = 0x04; i != 0; i >>= 1) { + + if (i & ee_cmd) + ee_value |= SEE_DO; + else + ee_value &= ~SEE_DO; + + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + ee_value |= SEE_CLK; /* Clock data! */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + ee_value &= ~SEE_CLK; + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + } + + if (narrow_flg) + i = 0x0080; + + else + i = 0x0200; + + while (i != 0) { + + if (i & ee_addr) + ee_value |= SEE_DO; + else + ee_value &= ~SEE_DO; + + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + ee_value |= SEE_CLK; /* Clock data! */ + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + ee_value &= ~SEE_CLK; + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + WR_HARPOON(p_port + hp_ee_ctrl, ee_value); + + i >>= 1; + } +} + +static unsigned short FPT_CalcCrc16(unsigned char buffer[]) +{ + unsigned short crc = 0; + int i, j; + unsigned short ch; + for (i = 0; i < ID_STRING_LENGTH; i++) { + ch = (unsigned short)buffer[i]; + for (j = 0; j < 8; j++) { + if ((crc ^ ch) & 1) + crc = (crc >> 1) ^ CRCMASK; + else + crc >>= 1; + ch >>= 1; + } + } + return crc; +} + +static unsigned char FPT_CalcLrc(unsigned char buffer[]) +{ + int i; + unsigned char lrc; + lrc = 0; + for (i = 0; i < ID_STRING_LENGTH; i++) + lrc ^= buffer[i]; + return lrc; +} + +/* + The following inline definitions avoid type conflicts. +*/ + +static inline unsigned char +FlashPoint__ProbeHostAdapter(struct fpoint_info *FlashPointInfo) +{ + return FlashPoint_ProbeHostAdapter((struct sccb_mgr_info *) + FlashPointInfo); +} + +static inline void * +FlashPoint__HardwareResetHostAdapter(struct fpoint_info *FlashPointInfo) +{ + return FlashPoint_HardwareResetHostAdapter((struct sccb_mgr_info *) + FlashPointInfo); +} + +static inline void +FlashPoint__ReleaseHostAdapter(void *CardHandle) +{ + FlashPoint_ReleaseHostAdapter(CardHandle); +} + +static inline void +FlashPoint__StartCCB(void *CardHandle, struct blogic_ccb *CCB) +{ + FlashPoint_StartCCB(CardHandle, (struct sccb *)CCB); +} + +static inline void +FlashPoint__AbortCCB(void *CardHandle, struct blogic_ccb *CCB) +{ + FlashPoint_AbortCCB(CardHandle, (struct sccb *)CCB); +} + +static inline bool +FlashPoint__InterruptPending(void *CardHandle) +{ + return FlashPoint_InterruptPending(CardHandle); +} + +static inline int +FlashPoint__HandleInterrupt(void *CardHandle) +{ + return FlashPoint_HandleInterrupt(CardHandle); +} + +#define FlashPoint_ProbeHostAdapter FlashPoint__ProbeHostAdapter +#define FlashPoint_HardwareResetHostAdapter FlashPoint__HardwareResetHostAdapter +#define FlashPoint_ReleaseHostAdapter FlashPoint__ReleaseHostAdapter +#define FlashPoint_StartCCB FlashPoint__StartCCB +#define FlashPoint_AbortCCB FlashPoint__AbortCCB +#define FlashPoint_InterruptPending FlashPoint__InterruptPending +#define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt + +#else /* !CONFIG_SCSI_FLASHPOINT */ + +/* + Define prototypes for the FlashPoint SCCB Manager Functions. +*/ + +extern unsigned char FlashPoint_ProbeHostAdapter(struct fpoint_info *); +extern void *FlashPoint_HardwareResetHostAdapter(struct fpoint_info *); +extern void FlashPoint_StartCCB(void *, struct blogic_ccb *); +extern int FlashPoint_AbortCCB(void *, struct blogic_ccb *); +extern bool FlashPoint_InterruptPending(void *); +extern int FlashPoint_HandleInterrupt(void *); +extern void FlashPoint_ReleaseHostAdapter(void *); + +#endif /* CONFIG_SCSI_FLASHPOINT */ diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig new file mode 100644 index 000000000..695a57d89 --- /dev/null +++ b/drivers/scsi/Kconfig @@ -0,0 +1,1527 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "SCSI device support" + +config SCSI_MOD + tristate + default y if SCSI=n || SCSI=y + default m if SCSI=m + depends on BLOCK + +config RAID_ATTRS + tristate "RAID Transport Class" + default n + depends on BLOCK + depends on SCSI_MOD + help + Provides RAID + +config SCSI_COMMON + tristate + +config SCSI + tristate "SCSI device support" + depends on BLOCK + select SCSI_DMA if HAS_DMA + select SG_POOL + select SCSI_COMMON + select BLK_DEV_BSG_COMMON if BLK_DEV_BSG + help + If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or + any other SCSI device under Linux, say Y and make sure that you know + the name of your SCSI host adapter (the card inside your computer + that "speaks" the SCSI protocol, also called SCSI controller), + because you will be asked for it. + + You also need to say Y here if you have a device which speaks + the SCSI protocol. Examples of this include the parallel port + version of the IOMEGA ZIP drive, USB storage devices, Fibre + Channel, and FireWire storage. + + To compile this driver as a module, choose M here and read + . + The module will be called scsi_mod. + + However, do not compile this as a module if your root file system + (the one containing the directory /) is located on a SCSI device. + +config SCSI_DMA + bool + default n + +config SCSI_ESP_PIO + bool + +config SCSI_NETLINK + bool + default n + depends on NET + +config SCSI_PROC_FS + bool "legacy /proc/scsi/ support" + depends on SCSI && PROC_FS + default y + help + This option enables support for the various files in + /proc/scsi. In Linux 2.6 this has been superseded by + files in sysfs but many legacy applications rely on this. + + If unsure say Y. + +comment "SCSI support type (disk, tape, CD-ROM)" + depends on SCSI + +config BLK_DEV_SD + tristate "SCSI disk support" + depends on SCSI + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY + help + If you want to use SCSI hard disks, Fibre Channel disks, + Serial ATA (SATA) or Parallel ATA (PATA) hard disks, + USB storage or the SCSI or parallel port version of + the IOMEGA ZIP drive, say Y and read the SCSI-HOWTO, + the Disk-HOWTO and the Multi-Disk-HOWTO, available from + . This is NOT for SCSI + CD-ROMs. + + To compile this driver as a module, choose M here and read + . + The module will be called sd_mod. + + Do not compile this driver as a module if your root file system + (the one containing the directory /) is located on a SCSI disk. + In this case, do not compile the driver for your SCSI host adapter + (below) as a module either. + +config CHR_DEV_ST + tristate "SCSI tape support" + depends on SCSI + help + If you want to use a SCSI tape drive under Linux, say Y and read the + SCSI-HOWTO, available from + , and + in the kernel source. This is NOT + for SCSI CD-ROMs. + + To compile this driver as a module, choose M here and read + . The module will be called st. + +config BLK_DEV_SR + tristate "SCSI CDROM support" + depends on SCSI && BLK_DEV + select CDROM + help + If you want to use a CD or DVD drive attached to your computer + by SCSI, FireWire, USB or ATAPI, say Y and read the SCSI-HOWTO + and the CDROM-HOWTO at . + + Make sure to say Y or M to "ISO 9660 CD-ROM file system support". + + To compile this driver as a module, choose M here and read + . + The module will be called sr_mod. + +config CHR_DEV_SG + tristate "SCSI generic support" + depends on SCSI + help + If you want to use SCSI scanners, synthesizers or CD-writers or just + about anything having "SCSI" in its name other than hard disks, + CD-ROMs or tapes, say Y here. These won't be supported by the kernel + directly, so you need some additional software which knows how to + talk to these devices using the SCSI protocol: + + For scanners, look at SANE (). For CD + writer software look at Cdrtools + () + and for burning a "disk at once": CDRDAO + (). Cdparanoia is a high + quality digital reader of audio CDs (). + For other devices, it's possible that you'll have to write the + driver software yourself. Please read the file + for more information. + + To compile this driver as a module, choose M here and read + . The module will be called sg. + + If unsure, say N. + +config BLK_DEV_BSG + bool "/dev/bsg support (SG v4)" + depends on SCSI + default y + help + Saying Y here will enable generic SG (SCSI generic) v4 support + for any SCSI device. + + This option is required by UDEV to access device serial numbers, etc. + + If unsure, say Y. + +config CHR_DEV_SCH + tristate "SCSI media changer support" + depends on SCSI + help + This is a driver for SCSI media changers. Most common devices are + tape libraries and MOD/CDROM jukeboxes. *Real* jukeboxes, you + don't need this for those tiny 6-slot cdrom changers. Media + changers are listed as "Type: Medium Changer" in /proc/scsi/scsi. + If you have such hardware and want to use it with linux, say Y + here. Check for details. + + If you want to compile this as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read and + . The module will be called ch.o. + If unsure, say N. + +config SCSI_ENCLOSURE + tristate "SCSI Enclosure Support" + depends on SCSI && ENCLOSURE_SERVICES + depends on m || SCSI_SAS_ATTRS != m + help + Enclosures are devices sitting on or in SCSI backplanes that + manage devices. If you have a disk cage, the chances are that + it has an enclosure device. Selecting this option will just allow + certain enclosure conditions to be reported and is not required. + +config SCSI_CONSTANTS + bool "Verbose SCSI error reporting (kernel size += 36K)" + depends on SCSI + help + The error messages regarding your SCSI hardware will be easier to + understand if you say Y here; it will enlarge your kernel by about + 36 KB. If in doubt, say Y. + +config SCSI_LOGGING + bool "SCSI logging facility" + depends on SCSI + help + This turns on a logging facility that can be used to debug a number + of SCSI related problems. + + If you say Y here, no logging output will appear by default, but you + can enable logging by saying Y to "/proc file system support" and + "Sysctl support" below and executing the command + + echo > /proc/sys/dev/scsi/logging_level + + where is a four byte value representing the logging type + and logging level for each type of logging selected. + + There are a number of logging types and you can find them in the + source at . The logging levels + are also described in that file and they determine the verbosity of + the logging for each logging type. + + If you say N here, it may be harder to track down some types of SCSI + problems. If you say Y here your kernel will be somewhat larger, but + there should be no noticeable performance impact as long as you have + logging turned off. + +config SCSI_SCAN_ASYNC + bool "Asynchronous SCSI scanning" + depends on SCSI + help + The SCSI subsystem can probe for devices while the rest of the + system continues booting, and even probe devices on different + busses in parallel, leading to a significant speed-up. + + You can override this choice by specifying "scsi_mod.scan=sync" + or async on the kernel's command line. + + Note that this setting also affects whether resuming from + system suspend will be performed asynchronously. + +menu "SCSI Transports" + depends on SCSI + +config SCSI_SPI_ATTRS + tristate "Parallel SCSI (SPI) Transport Attributes" + depends on SCSI + help + If you wish to export transport-specific information about + each attached SCSI device to sysfs, say Y. Otherwise, say N. + +config SCSI_FC_ATTRS + tristate "FiberChannel Transport Attributes" + depends on SCSI && NET + select BLK_DEV_BSGLIB + select SCSI_NETLINK + help + If you wish to export transport-specific information about + each attached FiberChannel device to sysfs, say Y. + Otherwise, say N. + +config SCSI_ISCSI_ATTRS + tristate "iSCSI Transport Attributes" + depends on SCSI && NET + select BLK_DEV_BSGLIB + help + If you wish to export transport-specific information about + each attached iSCSI device to sysfs, say Y. + Otherwise, say N. + +config SCSI_SAS_ATTRS + tristate "SAS Transport Attributes" + depends on SCSI + select BLK_DEV_BSGLIB + help + If you wish to export transport-specific information about + each attached SAS device to sysfs, say Y. + +source "drivers/scsi/libsas/Kconfig" + +config SCSI_SRP_ATTRS + tristate "SRP Transport Attributes" + depends on SCSI + help + If you wish to export transport-specific information about + each attached SRP device to sysfs, say Y. + +endmenu + +menuconfig SCSI_LOWLEVEL + bool "SCSI low-level drivers" + depends on SCSI!=n + default y + +if SCSI_LOWLEVEL && SCSI + +config ISCSI_TCP + tristate "iSCSI Initiator over TCP/IP" + depends on SCSI && INET + select CRYPTO + select CRYPTO_MD5 + select CRYPTO_CRC32C + select SCSI_ISCSI_ATTRS + help + The iSCSI Driver provides a host with the ability to access storage + through an IP network. The driver uses the iSCSI protocol to transport + SCSI requests and responses over a TCP/IP network between the host + (the "initiator") and "targets". Architecturally, the iSCSI driver + combines with the host's TCP/IP stack, network drivers, and Network + Interface Card (NIC) to provide the same functions as a SCSI or a + Fibre Channel (FC) adapter driver with a Host Bus Adapter (HBA). + + To compile this driver as a module, choose M here: the + module will be called iscsi_tcp. + + The userspace component needed to initialize the driver, documentation, + and sample configuration files can be found here: + + http://open-iscsi.org + +config ISCSI_BOOT_SYSFS + tristate "iSCSI Boot Sysfs Interface" + default n + help + This option enables support for exposing iSCSI boot information + via sysfs to userspace. If you wish to export this information, + say Y. Otherwise, say N. + +source "drivers/scsi/cxgbi/Kconfig" +source "drivers/scsi/bnx2i/Kconfig" +source "drivers/scsi/bnx2fc/Kconfig" +source "drivers/scsi/be2iscsi/Kconfig" +source "drivers/scsi/cxlflash/Kconfig" + +config SGIWD93_SCSI + tristate "SGI WD93C93 SCSI Driver" + depends on SGI_HAS_WD93 && SCSI + help + If you have a Western Digital WD93 SCSI controller on + an SGI MIPS system, say Y. Otherwise, say N. + +config BLK_DEV_3W_XXXX_RAID + tristate "3ware 5/6/7/8xxx ATA-RAID support" + depends on PCI && HAS_IOPORT && SCSI + help + 3ware is the only hardware ATA-Raid product in Linux to date. + This card is 2,4, or 8 channel master mode support only. + SCSI support required!!! + + + + Please read the comments at the top of + . + +config SCSI_HPSA + tristate "HP Smart Array SCSI driver" + depends on PCI && SCSI + select CHECK_SIGNATURE + select SCSI_SAS_ATTRS + help + This driver supports HP Smart Array Controllers (circa 2009). + It is a SCSI alternative to the cciss driver, which is a block + driver. Anyone wishing to use HP Smart Array controllers who + would prefer the devices be presented to linux as SCSI devices, + rather than as generic block devices should say Y here. + +config SCSI_3W_9XXX + tristate "3ware 9xxx SATA-RAID support" + depends on PCI && SCSI + help + This driver supports the 9000 series 3ware SATA-RAID cards. + + + + Please read the comments at the top of + . + +config SCSI_3W_SAS + tristate "3ware 97xx SAS/SATA-RAID support" + depends on PCI && SCSI + help + This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards. + + + + Please read the comments at the top of + . + +config SCSI_ACARD + tristate "ACARD SCSI support" + depends on PCI && HAS_IOPORT && SCSI + help + This driver supports the ACARD SCSI host adapter. + Support Chip + To compile this driver as a module, choose M here: the + module will be called atp870u. + +config SCSI_AHA152X + tristate "Adaptec AHA152X/2825 support" + depends on ISA && SCSI + select SCSI_SPI_ATTRS + select CHECK_SIGNATURE + help + This is a driver for the AHA-1510, AHA-1520, AHA-1522, and AHA-2825 + SCSI host adapters. It also works for the AVA-1505, but the IRQ etc. + must be manually specified in this case. + + It is explained in section 3.3 of the SCSI-HOWTO, available from + . You might also want to + read the file . + + To compile this driver as a module, choose M here: the + module will be called aha152x. + +config SCSI_AHA1542 + tristate "Adaptec AHA1542 support" + depends on ISA && SCSI && ISA_DMA_API + help + This is support for a SCSI host adapter. It is explained in section + 3.4 of the SCSI-HOWTO, available from + . Note that Trantor was + purchased by Adaptec, and some former Trantor products are being + sold under the Adaptec name. If it doesn't work out of the box, you + may have to change some settings in . + + To compile this driver as a module, choose M here: the + module will be called aha1542. + +config SCSI_AHA1740 + tristate "Adaptec AHA1740 support" + depends on EISA && SCSI + help + This is support for a SCSI host adapter. It is explained in section + 3.5 of the SCSI-HOWTO, available from + . If it doesn't work out + of the box, you may have to change some settings in + . + + To compile this driver as a module, choose M here: the + module will be called aha1740. + +config SCSI_AACRAID + tristate "Adaptec AACRAID support" + depends on SCSI && PCI + help + This driver supports a variety of Dell, HP, Adaptec, IBM and + ICP storage products. For a list of supported products, refer + to . + + To compile this driver as a module, choose M here: the module + will be called aacraid. + + +source "drivers/scsi/aic7xxx/Kconfig.aic7xxx" +source "drivers/scsi/aic7xxx/Kconfig.aic79xx" +source "drivers/scsi/aic94xx/Kconfig" +source "drivers/scsi/hisi_sas/Kconfig" +source "drivers/scsi/mvsas/Kconfig" + +config SCSI_MVUMI + tristate "Marvell UMI driver" + depends on SCSI && PCI + help + Module for Marvell Universal Message Interface(UMI) driver + + To compile this driver as a module, choose M here: the + module will be called mvumi. + +config SCSI_ADVANSYS + tristate "AdvanSys SCSI support" + depends on SCSI + depends on (ISA || EISA || PCI) && HAS_IOPORT + depends on ISA_DMA_API || !ISA + help + This is a driver for all SCSI host adapters manufactured by + AdvanSys. It is documented in the kernel source in + . + + To compile this driver as a module, choose M here: the + module will be called advansys. + +config SCSI_ARCMSR + tristate "ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Adapter" + depends on PCI && SCSI + help + This driver supports all of ARECA's SATA/SAS RAID controller cards. + This is an ARECA-maintained driver by Erich Chen. + If you have any problems, please mail to: . + Areca supports Linux RAID config tools. + Please link + + To compile this driver as a module, choose M here: the + module will be called arcmsr (modprobe arcmsr). + +source "drivers/scsi/esas2r/Kconfig" +source "drivers/scsi/megaraid/Kconfig.megaraid" +source "drivers/scsi/mpt3sas/Kconfig" +source "drivers/scsi/mpi3mr/Kconfig" +source "drivers/scsi/smartpqi/Kconfig" + +config SCSI_HPTIOP + tristate "HighPoint RocketRAID 3xxx/4xxx Controller support" + depends on SCSI && PCI + help + This option enables support for HighPoint RocketRAID 3xxx/4xxx + controllers. + + To compile this driver as a module, choose M here; the module + will be called hptiop. If unsure, say N. + +config SCSI_BUSLOGIC + tristate "BusLogic SCSI support" + depends on SCSI && PCI && HAS_IOPORT + help + This is support for BusLogic MultiMaster and FlashPoint SCSI Host + Adapters. Consult the SCSI-HOWTO, available from + , and the files + and + for more information. + Note that support for FlashPoint is only available for 32-bit + x86 configurations. + + To compile this driver as a module, choose M here: the + module will be called BusLogic. + +config SCSI_FLASHPOINT + bool "FlashPoint support" + depends on SCSI_BUSLOGIC && PCI && HAS_IOPORT + help + This option allows you to add FlashPoint support to the + BusLogic SCSI driver. The FlashPoint SCCB Manager code is + substantial, so users of MultiMaster Host Adapters may not + wish to include it. + +config SCSI_MYRB + tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)" + depends on PCI + select RAID_ATTRS + help + This driver adds support for the Mylex DAC960, AcceleRAID, and + eXtremeRAID PCI RAID controllers. This driver supports the + older, block based interface. + This driver is a reimplementation of the original DAC960 + driver. If you have used the DAC960 driver you should enable + this module. + + To compile this driver as a module, choose M here: the + module will be called myrb. + +config SCSI_MYRS + tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)" + depends on PCI + depends on !CPU_BIG_ENDIAN || COMPILE_TEST + select RAID_ATTRS + help + This driver adds support for the Mylex DAC960, AcceleRAID, and + eXtremeRAID PCI RAID controllers. This driver supports the + newer, SCSI-based interface only. + This driver is a reimplementation of the original DAC960 + driver. If you have used the DAC960 driver you should enable + this module. + + To compile this driver as a module, choose M here: the + module will be called myrs. + +config VMWARE_PVSCSI + tristate "VMware PVSCSI driver support" + depends on PCI && SCSI && X86 + help + This driver supports VMware's para virtualized SCSI HBA. + To compile this driver as a module, choose M here: the + module will be called vmw_pvscsi. + +config XEN_SCSI_FRONTEND + tristate "XEN SCSI frontend driver" + depends on SCSI && XEN + select XEN_XENBUS_FRONTEND + help + The XEN SCSI frontend driver allows the kernel to access SCSI Devices + within another guest OS (usually Dom0). + Only needed if the kernel is running in a XEN guest and generic + SCSI access to a device is needed. + +config HYPERV_STORAGE + tristate "Microsoft Hyper-V virtual storage driver" + depends on SCSI && HYPERV + depends on m || SCSI_FC_ATTRS != m + default HYPERV + help + Select this option to enable the Hyper-V virtual storage driver. + +config LIBFC + tristate "LibFC module" + depends on SCSI_FC_ATTRS + select CRC32 + help + Fibre Channel library module + +config LIBFCOE + tristate "LibFCoE module" + depends on LIBFC + help + Library for Fibre Channel over Ethernet module + +config FCOE + tristate "FCoE module" + depends on PCI + depends on LIBFCOE + help + Fibre Channel over Ethernet module + +config FCOE_FNIC + tristate "Cisco FNIC Driver" + depends on PCI && X86 + depends on LIBFCOE + help + This is support for the Cisco PCI-Express FCoE HBA. + + To compile this driver as a module, choose M here and read + . + The module will be called fnic. + +config SCSI_SNIC + tristate "Cisco SNIC Driver" + depends on PCI && SCSI + help + This is support for the Cisco PCI-Express SCSI HBA. + + To compile this driver as a module, choose M here and read + . + The module will be called snic. + +config SCSI_SNIC_DEBUG_FS + bool "Cisco SNIC Driver Debugfs Support" + depends on SCSI_SNIC && DEBUG_FS + help + This enables to list debugging information from SNIC Driver + available via debugfs file system + +config SCSI_DMX3191D + tristate "DMX3191D SCSI support" + depends on PCI && HAS_IOPORT && SCSI + select SCSI_SPI_ATTRS + help + This is support for Domex DMX3191D SCSI Host Adapters. + + To compile this driver as a module, choose M here: the + module will be called dmx3191d. + +config SCSI_FDOMAIN + tristate + depends on SCSI + +config SCSI_FDOMAIN_PCI + tristate "Future Domain TMC-3260/AHA-2920A PCI SCSI support" + depends on PCI && HAS_IOPORT && SCSI + select SCSI_FDOMAIN + help + This is support for Future Domain's PCI SCSI host adapters (TMC-3260) + and other adapters with PCI bus based on the Future Domain chipsets + (Adaptec AHA-2920A). + + NOTE: Newer Adaptec AHA-2920C boards use the Adaptec AIC-7850 chip + and should use the aic7xxx driver ("Adaptec AIC7xxx chipset SCSI + controller support"). This Future Domain driver works with the older + Adaptec AHA-2920A boards with a Future Domain chip on them. + + To compile this driver as a module, choose M here: the + module will be called fdomain_pci. + +config SCSI_FDOMAIN_ISA + tristate "Future Domain 16xx ISA SCSI support" + depends on ISA && SCSI + select CHECK_SIGNATURE + select SCSI_FDOMAIN + help + This is support for Future Domain's 16-bit SCSI host adapters + (TMC-1660/1680, TMC-1650/1670, TMC-1610M/MER/MEX) and other adapters + with ISA bus based on the Future Domain chipsets (Quantum ISA-200S, + ISA-250MG; and at least one IBM board). + + To compile this driver as a module, choose M here: the + module will be called fdomain_isa. + +config SCSI_ISCI + tristate "Intel(R) C600 Series Chipset SAS Controller" + depends on PCI && SCSI + depends on X86 + select SCSI_SAS_LIBSAS + help + This driver supports the 6Gb/s SAS capabilities of the storage + control unit found in the Intel(R) C600 series chipset. + +config SCSI_GENERIC_NCR5380 + tristate "Generic NCR5380/53c400 SCSI ISA card support" + depends on ISA && SCSI && HAS_IOPORT_MAP + select SCSI_SPI_ATTRS + help + This is a driver for old ISA card SCSI controllers based on a + NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device. + Most boards such as the Trantor T130 fit this category, as do + various 8-bit and 16-bit ISA cards bundled with SCSI scanners. + + To compile this driver as a module, choose M here: the + module will be called g_NCR5380. + +config SCSI_IPS + tristate "IBM ServeRAID support" + depends on PCI && HAS_IOPORT && SCSI + help + This is support for the IBM ServeRAID hardware RAID controllers. + See + and + for more information. If this driver does not work correctly + without modification please contact the author by email at + . + + To compile this driver as a module, choose M here: the + module will be called ips. + +config SCSI_IBMVSCSI + tristate "IBM Virtual SCSI support" + depends on PPC_PSERIES + select SCSI_SRP_ATTRS + help + This is the IBM POWER Virtual SCSI Client + + To compile this driver as a module, choose M here: the + module will be called ibmvscsi. + +config SCSI_IBMVSCSIS + tristate "IBM Virtual SCSI Server support" + depends on PPC_PSERIES && TARGET_CORE && SCSI && PCI + help + This is the IBM POWER Virtual SCSI Target Server + This driver uses the SRP protocol for communication between servers + guest and/or the host that run on the same server. + More information on VSCSI protocol can be found at www.power.org + + The userspace configuration needed to initialize the driver can be + be found here: + + https://github.com/powervm/ibmvscsis/wiki/Configuration + + To compile this driver as a module, choose M here: the + module will be called ibmvscsis. + +config SCSI_IBMVFC + tristate "IBM Virtual FC support" + depends on PPC_PSERIES && SCSI + depends on SCSI_FC_ATTRS + help + This is the IBM POWER Virtual FC Client + + To compile this driver as a module, choose M here: the + module will be called ibmvfc. + +config SCSI_IBMVFC_TRACE + bool "enable driver internal trace" + depends on SCSI_IBMVFC + default y + help + If you say Y here, the driver will trace all commands issued + to the adapter. Performance impact is minimal. Trace can be + dumped using /sys/class/scsi_host/hostXX/trace. + +config SCSI_INITIO + tristate "Initio 9100U(W) support" + depends on PCI && HAS_IOPORT && SCSI + help + This is support for the Initio 91XXU(W) SCSI host adapter. Please + read the SCSI-HOWTO, available from + . + + To compile this driver as a module, choose M here: the + module will be called initio. + +config SCSI_INIA100 + tristate "Initio INI-A100U2W support" + depends on PCI && HAS_IOPORT && SCSI + help + This is support for the Initio INI-A100U2W SCSI host adapter. + Please read the SCSI-HOWTO, available from + . + + To compile this driver as a module, choose M here: the + module will be called a100u2w. + +config SCSI_PPA + tristate "IOMEGA parallel port (ppa - older drives)" + depends on SCSI && PARPORT_PC + depends on HAS_IOPORT + help + This driver supports older versions of IOMEGA's parallel port ZIP + drive (a 100 MB removable media device). + + Note that you can say N here if you have the SCSI version of the ZIP + drive: it will be supported automatically if you said Y to the + generic "SCSI disk support", above. + + If you have the ZIP Plus drive or a more recent parallel port ZIP + drive (if the supplied cable with the drive is labeled "AutoDetect") + then you should say N here and Y to "IOMEGA parallel port (imm - + newer drives)", below. + + For more information about this driver and how to use it you should + read the file . You should also read + the SCSI-HOWTO, which is available from + . If you use this driver, + you will still be able to use the parallel port for other tasks, + such as a printer; it is safe to compile both drivers into the + kernel. + + To compile this driver as a module, choose M here: the + module will be called ppa. + +config SCSI_IMM + tristate "IOMEGA parallel port (imm - newer drives)" + depends on SCSI && PARPORT_PC + help + This driver supports newer versions of IOMEGA's parallel port ZIP + drive (a 100 MB removable media device). + + Note that you can say N here if you have the SCSI version of the ZIP + drive: it will be supported automatically if you said Y to the + generic "SCSI disk support", above. + + If you have the ZIP Plus drive or a more recent parallel port ZIP + drive (if the supplied cable with the drive is labeled "AutoDetect") + then you should say Y here; if you have an older ZIP drive, say N + here and Y to "IOMEGA Parallel Port (ppa - older drives)", above. + + For more information about this driver and how to use it you should + read the file . You should also read + the SCSI-HOWTO, which is available from + . If you use this driver, + you will still be able to use the parallel port for other tasks, + such as a printer; it is safe to compile both drivers into the + kernel. + + To compile this driver as a module, choose M here: the + module will be called imm. + +config SCSI_IZIP_EPP16 + bool "ppa/imm option - Use slow (but safe) EPP-16" + depends on SCSI_IMM + help + EPP (Enhanced Parallel Port) is a standard for parallel ports which + allows them to act as expansion buses that can handle up to 64 + peripheral devices. + + Some parallel port chipsets are slower than their motherboard, and + so we have to control the state of the chipset's FIFO queue every + now and then to avoid data loss. This will be done if you say Y + here. + + Generally, saying Y is the safe option and slows things down a bit. + +config SCSI_IZIP_SLOW_CTR + bool "ppa/imm option - Assume slow parport control register" + depends on SCSI_PPA || SCSI_IMM + help + Some parallel ports are known to have excessive delays between + changing the parallel port control register and good data being + available on the parallel port data/status register. This option + forces a small delay (1.0 usec to be exact) after changing the + control register to let things settle out. Enabling this option may + result in a big drop in performance but some very old parallel ports + (found in 386 vintage machines) will not work properly. + + Generally, saying N is fine. + +config SCSI_LASI700 + tristate "HP Lasi SCSI support for 53c700/710" + depends on GSC && SCSI + select SCSI_SPI_ATTRS + help + This is a driver for the SCSI controller in the Lasi chip found in + many PA-RISC workstations & servers. If you do not know whether you + have a Lasi chip, it is safe to say "Y" here. + +config SCSI_SNI_53C710 + tristate "SNI RM SCSI support for 53c710" + depends on SNI_RM && SCSI + select SCSI_SPI_ATTRS + select 53C700_LE_ON_BE + help + This is a driver for the onboard SCSI controller found in older + SNI RM workstations & servers. + +config 53C700_LE_ON_BE + bool + depends on SCSI_LASI700 || SCSI_SNI_53C710 + default y + +config SCSI_STEX + tristate "Promise SuperTrak EX Series support" + depends on PCI && SCSI + help + This driver supports Promise SuperTrak EX series storage controllers. + + Promise provides Linux RAID configuration utility for these + controllers. Please visit to download. + + To compile this driver as a module, choose M here: the + module will be called stex. + +config 53C700_BE_BUS + bool + depends on SCSI_A4000T || SCSI_ZORRO7XX || MVME16x_SCSI || BVME6000_SCSI + default y + +config SCSI_SYM53C8XX_2 + tristate "SYM53C8XX Version 2 SCSI support" + depends on PCI && SCSI + select SCSI_SPI_ATTRS + help + This driver supports the whole NCR53C8XX/SYM53C8XX family of + PCI-SCSI controllers. It also supports the subset of LSI53C10XX + Ultra-160 controllers that are based on the SYM53C8XX SCRIPTS + language. It does not support LSI53C10XX Ultra-320 PCI-X SCSI + controllers; you need to use the Fusion MPT driver for that. + + Please read for more + information. + +config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE + int "DMA addressing mode" + depends on SCSI_SYM53C8XX_2 + default "1" + help + This option only applies to PCI-SCSI chips that are PCI DAC + capable (875A, 895A, 896, 1010-33, 1010-66, 1000). + + When set to 0, the driver will program the chip to only perform + 32-bit DMA. When set to 1, the chip will be able to perform DMA + to addresses up to 1TB. When set to 2, the driver supports the + full 64-bit DMA address range, but can only address 16 segments + of 4 GB each. This limits the total addressable range to 64 GB. + + Most machines with less than 4GB of memory should use a setting + of 0 for best performance. If your machine has 4GB of memory + or more, you should set this option to 1 (the default). + + The still experimental value 2 (64 bit DMA addressing with 16 + x 4GB segments limitation) can be used on systems that require + PCI address bits past bit 39 to be set for the addressing of + memory using PCI DAC cycles. + +config SCSI_SYM53C8XX_DEFAULT_TAGS + int "Default tagged command queue depth" + depends on SCSI_SYM53C8XX_2 + default "16" + help + This is the default value of the command queue depth the + driver will announce to the generic SCSI layer for devices + that support tagged command queueing. This value can be changed + from the boot command line. This is a soft limit that cannot + exceed CONFIG_SCSI_SYM53C8XX_MAX_TAGS. + +config SCSI_SYM53C8XX_MAX_TAGS + int "Maximum number of queued commands" + depends on SCSI_SYM53C8XX_2 + default "64" + help + This option allows you to specify the maximum number of commands + that can be queued to any device, when tagged command queuing is + possible. The driver supports up to 256 queued commands per device. + This value is used as a compiled-in hard limit. + +config SCSI_SYM53C8XX_MMIO + bool "Use memory mapped IO" + depends on SCSI_SYM53C8XX_2 + default y + help + Memory mapped IO is faster than Port IO. Most people should + answer Y here, but some machines may have problems. If you have + to answer N here, please report the problem to the maintainer. + +config SCSI_IPR + tristate "IBM Power Linux RAID adapter support" + depends on PCI && SCSI + select FW_LOADER + select IRQ_POLL + select SGL_ALLOC + help + This driver supports the IBM Power Linux family RAID adapters. + This includes IBM pSeries 5712, 5703, 5709, and 570A, as well + as IBM iSeries 5702, 5703, 5709, and 570A. + +config SCSI_IPR_TRACE + bool "enable driver internal trace" + depends on SCSI_IPR + default y + help + If you say Y here, the driver will trace all commands issued + to the adapter. Performance impact is minimal. Trace can be + dumped using /sys/bus/class/scsi_host/hostXX/trace. + +config SCSI_IPR_DUMP + bool "enable adapter dump support" + depends on SCSI_IPR + default y + help + If you say Y here, the driver will support adapter crash dump. + If you enable this support, the iprdump daemon can be used + to capture adapter failure analysis information. + +config SCSI_ZALON + tristate "Zalon SCSI support" + depends on GSC && SCSI + select SCSI_SPI_ATTRS + help + The Zalon is a GSC/HSC bus interface chip that sits between the + PA-RISC processor and the NCR 53c720 SCSI controller on C100, + C110, J200, J210 and some D, K & R-class machines. It's also + used on the add-in Bluefish, Barracuda & Shrike SCSI cards. + Say Y here if you have one of these machines or cards. + +config SCSI_NCR53C8XX_DEFAULT_TAGS + int "default tagged command queue depth" + depends on SCSI_ZALON + default "8" + help + "Tagged command queuing" is a feature of SCSI-2 which improves + performance: the host adapter can send several SCSI commands to a + device's queue even if previous commands haven't finished yet. + Because the device is intelligent, it can optimize its operations + (like head positioning) based on its own request queue. Some SCSI + devices don't implement this properly; if you want to disable this + feature, enter 0 or 1 here (it doesn't matter which). + + The default value is 8 and should be supported by most hard disks. + This value can be overridden from the boot command line using the + 'tags' option as follows (example): + 'ncr53c8xx=tags:4/t2t3q16/t0u2q10' will set default queue depth to + 4, set queue depth to 16 for target 2 and target 3 on controller 0 + and set queue depth to 10 for target 0 / lun 2 on controller 1. + + The normal answer therefore is to go with the default 8 and to use + a boot command line option for devices that need to use a different + command queue depth. + + There is no safe option other than using good SCSI devices. + +config SCSI_NCR53C8XX_MAX_TAGS + int "maximum number of queued commands" + depends on SCSI_ZALON + default "32" + help + This option allows you to specify the maximum number of commands + that can be queued to any device, when tagged command queuing is + possible. The default value is 32. Minimum is 2, maximum is 64. + Modern hard disks are able to support 64 tags and even more, but + do not seem to be faster when more than 32 tags are being used. + + So, the normal answer here is to go with the default value 32 unless + you are using very large hard disks with large cache (>= 1 MB) that + are able to take advantage of more than 32 tagged commands. + + There is no safe option and the default answer is recommended. + +config SCSI_NCR53C8XX_SYNC + int "synchronous transfers frequency in MHz" + depends on SCSI_ZALON + default "20" + help + The SCSI Parallel Interface-2 Standard defines 5 classes of transfer + rates: FAST-5, FAST-10, FAST-20, FAST-40 and FAST-80. The numbers + are respectively the maximum data transfer rates in mega-transfers + per second for each class. For example, a FAST-20 Wide 16 device is + able to transfer data at 20 million 16 bit packets per second for a + total rate of 40 MB/s. + + You may specify 0 if you want to only use asynchronous data + transfers. This is the safest and slowest option. Otherwise, specify + a value between 5 and 80, depending on the capability of your SCSI + controller. The higher the number, the faster the data transfer. + Note that 80 should normally be ok since the driver decreases the + value automatically according to the controller's capabilities. + + Your answer to this question is ignored for controllers with NVRAM, + since the driver will get this information from the user set-up. It + also can be overridden using a boot setup option, as follows + (example): 'ncr53c8xx=sync:12' will allow the driver to negotiate + for FAST-20 synchronous data transfer (20 mega-transfers per + second). + + The normal answer therefore is not to go with the default but to + select the maximum value 80 allowing the driver to use the maximum + value supported by each controller. If this causes problems with + your SCSI devices, you should come back and decrease the value. + + There is no safe option other than using good cabling, right + terminations and SCSI conformant devices. + +config SCSI_NCR53C8XX_NO_DISCONNECT + bool "not allow targets to disconnect" + depends on SCSI_ZALON && SCSI_NCR53C8XX_DEFAULT_TAGS=0 + help + This option is only provided for safety if you suspect some SCSI + device of yours to not support properly the target-disconnect + feature. In that case, you would say Y here. In general however, to + not allow targets to disconnect is not reasonable if there is more + than 1 device on a SCSI bus. The normal answer therefore is N. + +config SCSI_QLOGIC_FAS + tristate "Qlogic FAS SCSI support" + depends on ISA && SCSI + help + This is a driver for the ISA, VLB, and PCMCIA versions of the Qlogic + FastSCSI! cards as well as any other card based on the FASXX chip + (including the Control Concepts SCSI/IDE/SIO/PIO/FDC cards). + + This driver does NOT support the PCI versions of these cards. The + PCI versions are supported by the Qlogic ISP driver ("Qlogic ISP + SCSI support"), below. + + Information about this driver is contained in + . You should also read the + SCSI-HOWTO, available from + . + + To compile this driver as a module, choose M here: the + module will be called qlogicfas. + +config SCSI_QLOGIC_1280 + tristate "Qlogic QLA 1240/1x80/1x160 SCSI support" + depends on PCI && SCSI + help + Say Y if you have a QLogic ISP1240/1x80/1x160 SCSI host adapter. + + To compile this driver as a module, choose M here: the + module will be called qla1280. + +config SCSI_QLOGICPTI + tristate "PTI Qlogic, ISP Driver" + depends on SBUS && SCSI + help + This driver supports SBUS SCSI controllers from PTI or QLogic. These + controllers are known under Solaris as qpti and in the openprom as + PTI,ptisp or QLGC,isp. Note that PCI QLogic SCSI controllers are + driven by a different driver. + + To compile this driver as a module, choose M here: the + module will be called qlogicpti. + +source "drivers/scsi/qla2xxx/Kconfig" +source "drivers/scsi/qla4xxx/Kconfig" +source "drivers/scsi/qedi/Kconfig" +source "drivers/scsi/qedf/Kconfig" + +config SCSI_LPFC + tristate "Emulex LightPulse Fibre Channel Support" + depends on PCI && SCSI + depends on CPU_FREQ + depends on SCSI_FC_ATTRS + depends on NVME_TARGET_FC || NVME_TARGET_FC=n + depends on NVME_FC || NVME_FC=n + select CRC_T10DIF + select IRQ_POLL + help + This lpfc driver supports the Emulex LightPulse + Family of Fibre Channel PCI host adapters. + +config SCSI_LPFC_DEBUG_FS + bool "Emulex LightPulse Fibre Channel debugfs Support" + depends on SCSI_LPFC && DEBUG_FS + help + This makes debugging information from the lpfc driver + available via the debugfs filesystem. + +source "drivers/scsi/elx/Kconfig" + +config SCSI_SIM710 + tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" + depends on EISA && SCSI + select SCSI_SPI_ATTRS + help + This driver is for NCR53c710 based SCSI host adapters. + + It currently supports Compaq EISA cards. + +config SCSI_DC395x + tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support" + depends on PCI && HAS_IOPORT && SCSI + select SCSI_SPI_ATTRS + help + This driver supports PCI SCSI host adapters based on the ASIC + TRM-S1040 chip, e.g Tekram DC395(U/UW/F) and DC315(U) variants. + + This driver works, but is still in experimental status. So better + have a bootable disk and a backup in case of emergency. + + Documentation can be found in . + + To compile this driver as a module, choose M here: the + module will be called dc395x. + +config SCSI_AM53C974 + tristate "Tekram DC390(T) and Am53/79C974 SCSI support (new driver)" + depends on PCI && SCSI + select SCSI_SPI_ATTRS + help + This driver supports PCI SCSI host adapters based on the Am53C974A + chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard + PCscsi/PCnet (Am53/79C974) solutions. + This is a new implementation base on the generic esp_scsi driver. + + Note that this driver does NOT support Tekram DC390W/U/F, which are + based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those. + + To compile this driver as a module, choose M here: the + module will be called am53c974. + +config SCSI_NSP32 + tristate "Workbit NinjaSCSI-32Bi/UDE support" + depends on PCI && SCSI && !64BIT && HAS_IOPORT + help + This is support for the Workbit NinjaSCSI-32Bi/UDE PCI/Cardbus + SCSI host adapter. Please read the SCSI-HOWTO, available from + . + + To compile this driver as a module, choose M here: the + module will be called nsp32. + +config SCSI_WD719X + tristate "Western Digital WD7193/7197/7296 support" + depends on PCI && SCSI + select EEPROM_93CX6 + help + This is a driver for Western Digital WD7193, WD7197 and WD7296 PCI + SCSI controllers (based on WD33C296A chip). + +config SCSI_DEBUG + tristate "SCSI debugging host and device simulator" + depends on SCSI + select CRC_T10DIF + help + This pseudo driver simulates one or more hosts (SCSI initiators), + each with one or more targets, each with one or more logical units. + Defaults to one of each, creating a small RAM disk device. Many + parameters found in the /sys/bus/pseudo/drivers/scsi_debug + directory can be tweaked at run time. + See for more information. + Mainly used for testing and best as a module. If unsure, say N. + +config SCSI_MESH + tristate "MESH (Power Mac internal SCSI) support" + depends on PPC32 && PPC_PMAC && SCSI + help + Many Power Macintoshes and clones have a MESH (Macintosh Enhanced + SCSI Hardware) SCSI bus adaptor (the 7200 doesn't, but all of the + other Power Macintoshes do). Say Y to include support for this SCSI + adaptor. + + To compile this driver as a module, choose M here: the + module will be called mesh. + +config SCSI_MESH_SYNC_RATE + int "maximum synchronous transfer rate (MB/s) (0 = async)" + depends on SCSI_MESH + default "5" + help + On Power Macintoshes (and clones) where the MESH SCSI bus adaptor + drives a bus which is entirely internal to the machine (such as the + 7500, 7600, 8500, etc.), the MESH is capable of synchronous + operation at up to 10 MB/s. On machines where the SCSI bus + controlled by the MESH can have external devices connected, it is + usually rated at 5 MB/s. 5 is a safe value here unless you know the + MESH SCSI bus is internal only; in that case you can say 10. Say 0 + to disable synchronous operation. + +config SCSI_MESH_RESET_DELAY_MS + int "initial bus reset delay (ms) (0 = no reset)" + depends on SCSI_MESH + default "4000" + +config SCSI_MAC53C94 + tristate "53C94 (Power Mac external SCSI) support" + depends on PPC32 && PPC_PMAC && SCSI + help + On Power Macintoshes (and clones) with two SCSI buses, the external + SCSI bus is usually controlled by a 53C94 SCSI bus adaptor. Older + machines which only have one SCSI bus, such as the 7200, also use + the 53C94. Say Y to include support for the 53C94. + + To compile this driver as a module, choose M here: the + module will be called mac53c94. + +source "drivers/scsi/arm/Kconfig" + +config JAZZ_ESP + bool "MIPS JAZZ FAS216 SCSI support" + depends on MACH_JAZZ && SCSI + select SCSI_SPI_ATTRS + help + This is the driver for the onboard SCSI host adapter of MIPS Magnum + 4000, Acer PICA, Olivetti M700-10 and a few other identical OEM + systems. + +config A3000_SCSI + tristate "A3000 WD33C93A support" + depends on AMIGA && SCSI + help + If you have an Amiga 3000 and have SCSI devices connected to the + built-in SCSI controller, say Y. Otherwise, say N. + + To compile this driver as a module, choose M here: the + module will be called a3000. + +config A2091_SCSI + tristate "A2091/A590 WD33C93A support" + depends on ZORRO && SCSI + help + If you have a Commodore A2091 SCSI controller, say Y. Otherwise, + say N. + + To compile this driver as a module, choose M here: the + module will be called a2091. + +config GVP11_SCSI + tristate "GVP Series II WD33C93A support" + depends on ZORRO && SCSI + help + If you have a Great Valley Products Series II SCSI controller, + answer Y. Also say Y if you have a later model of GVP SCSI + controller (such as the GVP A4008 or a Combo board). Otherwise, + answer N. This driver does NOT work for the T-Rex series of + accelerators from TekMagic and GVP-M. + + To compile this driver as a module, choose M here: the + module will be called gvp11. + +config SCSI_A4000T + tristate "A4000T NCR53c710 SCSI support" + depends on AMIGA && SCSI + select SCSI_SPI_ATTRS + help + If you have an Amiga 4000T and have SCSI devices connected to the + built-in SCSI controller, say Y. Otherwise, say N. + + To compile this driver as a module, choose M here: the + module will be called a4000t. + +config SCSI_ZORRO7XX + tristate "Zorro NCR53c710 SCSI support" + depends on ZORRO && SCSI + select SCSI_SPI_ATTRS + help + Support for various NCR53c710-based SCSI controllers on Zorro + expansion boards for the Amiga. + This includes: + - the Amiga 4091 Zorro III SCSI-2 controller, + - the MacroSystem Development's WarpEngine Amiga SCSI-2 controller + (info at + ), + - the SCSI controller on the Phase5 Blizzard PowerUP 603e+ + accelerator card for the Amiga 1200, + - the SCSI controller on the GVP Turbo 040/060 accelerator. + +config SCSI_ZORRO_ESP + tristate "Zorro ESP SCSI support" + depends on ZORRO && SCSI + select SCSI_SPI_ATTRS + select SCSI_ESP_PIO + help + Support for various NCR53C9x (ESP) based SCSI controllers on Zorro + expansion boards for the Amiga. + This includes: + - the Phase5 Blizzard 1230 II and IV SCSI controllers, + - the Phase5 Blizzard 2060 SCSI controller, + - the Phase5 Blizzard Cyberstorm and Cyberstorm II SCSI + controllers, + - the Fastlane Zorro III SCSI controller. + +config ATARI_SCSI + tristate "Atari native SCSI support" + depends on ATARI && SCSI + select SCSI_SPI_ATTRS + help + If you have an Atari with built-in NCR5380 SCSI controller (TT, + Falcon, ...) say Y to get it supported. Of course also, if you have + a compatible SCSI controller (e.g. for Medusa). + + To compile this driver as a module, choose M here: the module will + be called atari_scsi. If you also enable NVRAM support, the SCSI + host's ID is taken from the setting in TT RTC NVRAM. + + This driver supports both styles of NCR integration into the + system: the TT style (separate DMA), and the Falcon style (via + ST-DMA, replacing ACSI). It does NOT support other schemes, like + in the Hades (without DMA). + +config MAC_SCSI + tristate "Macintosh NCR5380 SCSI" + depends on MAC && SCSI + select SCSI_SPI_ATTRS + help + This is the NCR 5380 SCSI controller included on most of the 68030 + based Macintoshes. If you have one of these say Y and read the + SCSI-HOWTO, available from + . + +config SCSI_MAC_ESP + tristate "Macintosh NCR53c9[46] SCSI" + depends on MAC && SCSI + select SCSI_SPI_ATTRS + select SCSI_ESP_PIO + help + This is the NCR 53c9x SCSI controller found on most of the 68040 + based Macintoshes. + + To compile this driver as a module, choose M here: the module + will be called mac_esp. + +config MVME147_SCSI + bool "WD33C93 SCSI driver for MVME147" + depends on MVME147 && SCSI=y + select SCSI_SPI_ATTRS + help + Support for the on-board SCSI controller on the Motorola MVME147 + single-board computer. + +config MVME16x_SCSI + tristate "NCR53C710 SCSI driver for MVME16x" + depends on MVME16x && SCSI + select SCSI_SPI_ATTRS + help + The Motorola MVME162, 166, 167, 172 and 177 boards use the NCR53C710 + SCSI controller chip. Almost everyone using one of these boards + will want to say Y to this question. + +config BVME6000_SCSI + tristate "NCR53C710 SCSI driver for BVME6000" + depends on BVME6000 && SCSI + select SCSI_SPI_ATTRS + help + The BVME4000 and BVME6000 boards from BVM Ltd use the NCR53C710 + SCSI controller chip. Almost everyone using one of these boards + will want to say Y to this question. + +config SUN3_SCSI + tristate "Sun3 NCR5380 SCSI" + depends on SUN3 && SCSI + select SCSI_SPI_ATTRS + help + This option will enable support for the OBIO (onboard io) NCR5380 + SCSI controller found in the Sun 3/50 and 3/60, as well as for + "Sun3" type VME scsi controllers also based on the NCR5380. + General Linux information on the Sun 3 series (now discontinued) + is at . + +config SUN3X_ESP + bool "Sun3x ESP SCSI" + depends on SUN3X && SCSI=y + select SCSI_SPI_ATTRS + help + The ESP was an on-board SCSI controller used on Sun 3/80 + machines. Say Y here to compile in support for it. + +config SCSI_SUNESP + tristate "Sparc ESP Scsi Driver" + depends on SBUS && SCSI + select SCSI_SPI_ATTRS + help + This is the driver for the Sun ESP SCSI host adapter. The ESP + chipset is present in most SPARC SBUS-based computers and + supports the Emulex family of ESP SCSI chips (esp100, esp100A, + esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip. + + To compile this driver as a module, choose M here: the + module will be called sun_esp. + +config ZFCP + tristate "FCP host bus adapter driver for IBM mainframes" + depends on S390 && QDIO && SCSI + depends on SCSI_FC_ATTRS + help + If you want to access SCSI devices attached to your IBM mainframe by + means of Fibre Channel Protocol host bus adapters say Y. + + Supported HBAs include different models of the FICON Express and FCP + Express I/O cards. + + For a more complete list, and for more details about setup and + operation refer to the IBM publication "Device Drivers, Features, and + Commands", SC33-8411. + + This driver is also available as a module. This module will be + called zfcp. If you want to compile it as a module, say M here + and read . + +config SCSI_PMCRAID + tristate "PMC SIERRA Linux MaxRAID adapter support" + depends on PCI && SCSI && NET + select SGL_ALLOC + help + This driver supports the PMC SIERRA MaxRAID adapters. + +config SCSI_PM8001 + tristate "PMC-Sierra SPC 8001 SAS/SATA Based Host Adapter driver" + depends on PCI && SCSI + select SCSI_SAS_LIBSAS + help + This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip + based host adapters. + +config SCSI_BFA_FC + tristate "Brocade BFA Fibre Channel Support" + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + help + This bfa driver supports all Brocade PCIe FC/FCOE host adapters. + + To compile this driver as a module, choose M here. The module will + be called bfa. + +config SCSI_VIRTIO + tristate "virtio-scsi support" + depends on VIRTIO + help + This is the virtual HBA driver for virtio. If the kernel will + be used in a virtual machine, say Y or M. + +source "drivers/scsi/csiostor/Kconfig" + +source "drivers/scsi/pcmcia/Kconfig" + +endif # SCSI_LOWLEVEL + +source "drivers/scsi/device_handler/Kconfig" + +endmenu diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile new file mode 100644 index 000000000..f055bfd54 --- /dev/null +++ b/drivers/scsi/Makefile @@ -0,0 +1,207 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for linux/drivers/scsi +# +# 30 May 2000, Christoph Hellwig +# Rewritten to use lists instead of if-statements. +# +# 20 Sep 2000, Torben Mathiasen +# Changed link order to reflect new scsi initialization. +# +# *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*! +# The link order must be, SCSI Core, SCSI HBA drivers, and +# lastly SCSI peripheral drivers (disk/tape/cdrom/etc.) to +# satisfy certain initialization assumptions in the SCSI layer. +# *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*! + + +CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF + +obj-$(CONFIG_PCMCIA) += pcmcia/ + +obj-$(CONFIG_SCSI) += scsi_mod.o +obj-$(CONFIG_SCSI_COMMON) += scsi_common.o + +obj-$(CONFIG_RAID_ATTRS) += raid_class.o + +# --- NOTE ORDERING HERE --- +# For kernel non-modular link, transport attributes need to +# be initialised before drivers +# -------------------------- +obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o +obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o +obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o +obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o +obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/ +obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o +obj-$(CONFIG_SCSI_DH) += device_handler/ + +obj-$(CONFIG_LIBFC) += libfc/ +obj-$(CONFIG_LIBFCOE) += fcoe/ +obj-$(CONFIG_FCOE) += fcoe/ +obj-$(CONFIG_FCOE_FNIC) += fnic/ +obj-$(CONFIG_SCSI_SNIC) += snic/ +obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ +obj-$(CONFIG_QEDF) += qedf/ +obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o +obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o +obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o +obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o +obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o +obj-$(CONFIG_SCSI_ZORRO_ESP) += esp_scsi.o zorro_esp.o +obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o +obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o +obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o +obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o +obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o +obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o +obj-$(CONFIG_MAC_SCSI) += mac_scsi.o +obj-$(CONFIG_SCSI_MAC_ESP) += esp_scsi.o mac_esp.o +obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o +obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o +obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o +obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o +obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o +obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o +obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/ +obj-$(CONFIG_SCSI_AHA152X) += aha152x.o +obj-$(CONFIG_SCSI_AHA1542) += aha1542.o +obj-$(CONFIG_SCSI_AHA1740) += aha1740.o +obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/ +obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ +obj-$(CONFIG_SCSI_AACRAID) += aacraid/ +obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ +obj-$(CONFIG_SCSI_PM8001) += pm8001/ +obj-$(CONFIG_SCSI_ISCI) += isci/ +obj-$(CONFIG_SCSI_IPS) += ips.o +obj-$(CONFIG_SCSI_FDOMAIN) += fdomain.o +obj-$(CONFIG_SCSI_FDOMAIN_PCI) += fdomain_pci.o +obj-$(CONFIG_SCSI_FDOMAIN_ISA) += fdomain_isa.o +obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o +obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o +obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o +obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o +obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ +obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ +obj-$(CONFIG_SCSI_LPFC) += lpfc/ +obj-$(CONFIG_SCSI_EFCT) += elx/ +obj-$(CONFIG_SCSI_BFA_FC) += bfa/ +obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/ +obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o +obj-$(CONFIG_SCSI_HPSA) += hpsa.o +obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/ +obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ +obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o +obj-$(CONFIG_SCSI_DC395x) += dc395x.o +obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o +obj-$(CONFIG_CXLFLASH) += cxlflash/ +obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o +obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ +obj-$(CONFIG_MEGARAID_SAS) += megaraid/ +obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/ +obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/ +obj-$(CONFIG_SCSI_ACARD) += atp870u.o +obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o +obj-$(CONFIG_SCSI_INITIO) += initio.o +obj-$(CONFIG_SCSI_INIA100) += a100u2w.o +obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o +obj-$(CONFIG_SCSI_MESH) += mesh.o +obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o +obj-$(CONFIG_SCSI_MYRB) += myrb.o +obj-$(CONFIG_SCSI_MYRS) += myrs.o +obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o +obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o +obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o +obj-$(CONFIG_SCSI_PPA) += ppa.o +obj-$(CONFIG_SCSI_IMM) += imm.o +obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o +obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o +obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o +obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o +obj-$(CONFIG_SCSI_NSP32) += nsp32.o +obj-$(CONFIG_SCSI_IPR) += ipr.o +obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ +obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi_tgt/ +obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ +obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o +obj-$(CONFIG_SCSI_STEX) += stex.o +obj-$(CONFIG_SCSI_MVSAS) += mvsas/ +obj-$(CONFIG_SCSI_MVUMI) += mvumi.o +obj-$(CONFIG_PS3_ROM) += ps3rom.o +obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ +obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ +obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ +obj-$(CONFIG_QEDI) += libiscsi.o qedi/ +obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ +obj-$(CONFIG_SCSI_ESAS2R) += esas2r/ +obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o +obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o +obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o +obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o +obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o +obj-$(CONFIG_SCSI_WD719X) += wd719x.o + +obj-$(CONFIG_ARM) += arm/ + +obj-$(CONFIG_CHR_DEV_ST) += st.o +obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o +obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o +obj-$(CONFIG_CHR_DEV_SG) += sg.o +obj-$(CONFIG_CHR_DEV_SCH) += ch.o +obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o + +obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/ + +# This goes last, so that "real" scsi devices probe earlier +obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o +scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \ + scsicam.o scsi_error.o scsi_lib.o +scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o +scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o +scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o +scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o +scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o +scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o +scsi_mod-$(CONFIG_BLK_DEBUG_FS) += scsi_debugfs.o +scsi_mod-y += scsi_trace.o scsi_logging.o +scsi_mod-$(CONFIG_PM) += scsi_pm.o +scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o +scsi_mod-$(CONFIG_BLK_DEV_BSG) += scsi_bsg.o + +hv_storvsc-y := storvsc_drv.o + +sd_mod-objs := sd.o +sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o +sd_mod-$(CONFIG_BLK_DEV_ZONED) += sd_zbc.o + +sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o +ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ + := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \ + -DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS +CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) +zalon7xx-objs := zalon.o ncr53c8xx.o + +# Files generated that shall be removed upon make clean +clean-files := 53c700_d.h 53c700_u.h + +$(obj)/53c700.o: $(obj)/53c700_d.h + +$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c + +quiet_cmd_bflags = GEN $@ + cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@ + +$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h FORCE + $(call if_changed,bflags) + +targets += scsi_devinfo_tbl.c + +# If you want to play with the firmware, uncomment +# GENERATE_FIRMWARE := 1 + +ifdef GENERATE_FIRMWARE + +$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl + $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $< + +endif diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c new file mode 100644 index 000000000..cea3a79d5 --- /dev/null +++ b/drivers/scsi/NCR5380.c @@ -0,0 +1,2411 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NCR 5380 generic driver routines. These should make it *trivial* + * to implement 5380 SCSI drivers under Linux with a non-trantor + * architecture. + * + * Note that these routines also work with NR53c400 family chips. + * + * Copyright 1993, Drew Eckhardt + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * drew@colorado.edu + * +1 (303) 666-5836 + * + * For more information, please consult + * + * NCR 5380 Family + * SCSI Protocol Controller + * Databook + * + * NCR Microelectronics + * 1635 Aeroplaza Drive + * Colorado Springs, CO 80916 + * 1+ (719) 578-3400 + * 1+ (800) 334-5454 + */ + +/* + * With contributions from Ray Van Tassle, Ingmar Baumgart, + * Ronald van Cuijlenborg, Alan Cox and others. + */ + +/* Ported to Atari by Roman Hodek and others. */ + +/* Adapted for the Sun 3 by Sam Creasey. */ + +/* + * Design + * + * This is a generic 5380 driver. To use it on a different platform, + * one simply writes appropriate system specific macros (ie, data + * transfer - some PC's will use the I/O bus, 68K's must use + * memory mapped) and drops this file in their 'C' wrapper. + * + * As far as command queueing, two queues are maintained for + * each 5380 in the system - commands that haven't been issued yet, + * and commands that are currently executing. This means that an + * unlimited number of commands may be queued, letting + * more commands propagate from the higher driver levels giving higher + * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, + * allowing multiple commands to propagate all the way to a SCSI-II device + * while a command is already executing. + * + * + * Issues specific to the NCR5380 : + * + * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead + * piece of hardware that requires you to sit in a loop polling for + * the REQ signal as long as you are connected. Some devices are + * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect + * while doing long seek operations. [...] These + * broken devices are the exception rather than the rule and I'd rather + * spend my time optimizing for the normal case. + * + * Architecture : + * + * At the heart of the design is a coroutine, NCR5380_main, + * which is started from a workqueue for each NCR5380 host in the + * system. It attempts to establish I_T_L or I_T_L_Q nexuses by + * removing the commands from the issue queue and calling + * NCR5380_select() if a nexus is not established. + * + * Once a nexus is established, the NCR5380_information_transfer() + * phase goes through the various phases as instructed by the target. + * if the target goes into MSG IN and sends a DISCONNECT message, + * the command structure is placed into the per instance disconnected + * queue, and NCR5380_main tries to find more work. If the target is + * idle for too long, the system will try to sleep. + * + * If a command has disconnected, eventually an interrupt will trigger, + * calling NCR5380_intr() which will in turn call NCR5380_reselect + * to reestablish a nexus. This will run main if necessary. + * + * On command termination, the done function will be called as + * appropriate. + * + * The command data pointer is initialized after the command is connected + * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. + * Note that in violation of the standard, an implicit SAVE POINTERS operation + * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. + */ + +/* + * Using this file : + * This file a skeleton Linux SCSI driver for the NCR 5380 series + * of chips. To use it, you write an architecture specific functions + * and macros and include this file in your driver. + * + * These macros MUST be defined : + * + * NCR5380_read(register) - read from the specified register + * + * NCR5380_write(register, value) - write to the specific register + * + * NCR5380_implementation_fields - additional fields needed for this + * specific implementation of the NCR5380 + * + * Either real DMA *or* pseudo DMA may be implemented + * + * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer + * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380 + * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory + * NCR5380_dma_residual - residual byte count + * + * The generic driver is initialized by calling NCR5380_init(instance), + * after setting the appropriate host specific fields and ID. + */ + +#ifndef NCR5380_io_delay +#define NCR5380_io_delay(x) +#endif + +#ifndef NCR5380_acquire_dma_irq +#define NCR5380_acquire_dma_irq(x) (1) +#endif + +#ifndef NCR5380_release_dma_irq +#define NCR5380_release_dma_irq(x) +#endif + +static unsigned int disconnect_mask = ~0; +module_param(disconnect_mask, int, 0444); + +static int do_abort(struct Scsi_Host *, unsigned int); +static void do_reset(struct Scsi_Host *); +static void bus_reset_cleanup(struct Scsi_Host *); + +/** + * initialize_SCp - init the scsi pointer field + * @cmd: command block to set up + * + * Set up the internal fields in the SCSI command. + */ + +static inline void initialize_SCp(struct scsi_cmnd *cmd) +{ + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); + + if (scsi_bufflen(cmd)) { + ncmd->buffer = scsi_sglist(cmd); + ncmd->ptr = sg_virt(ncmd->buffer); + ncmd->this_residual = ncmd->buffer->length; + } else { + ncmd->buffer = NULL; + ncmd->ptr = NULL; + ncmd->this_residual = 0; + } + + ncmd->status = 0; + ncmd->message = 0; +} + +static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd) +{ + struct scatterlist *s = ncmd->buffer; + + if (!ncmd->this_residual && s && !sg_is_last(s)) { + ncmd->buffer = sg_next(s); + ncmd->ptr = sg_virt(ncmd->buffer); + ncmd->this_residual = ncmd->buffer->length; + } +} + +static inline void set_resid_from_SCp(struct scsi_cmnd *cmd) +{ + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); + int resid = ncmd->this_residual; + struct scatterlist *s = ncmd->buffer; + + if (s) + while (!sg_is_last(s)) { + s = sg_next(s); + resid += s->length; + } + scsi_set_resid(cmd, resid); +} + +/** + * NCR5380_poll_politely2 - wait for two chip register values + * @hostdata: host private data + * @reg1: 5380 register to poll + * @bit1: Bitmask to check + * @val1: Expected value + * @reg2: Second 5380 register to poll + * @bit2: Second bitmask to check + * @val2: Second expected value + * @wait: Time-out in jiffies, 0 if sleeping is not allowed + * + * Polls the chip in a reasonably efficient manner waiting for an + * event to occur. After a short quick poll we begin to yield the CPU + * (if possible). In irq contexts the time-out is arbitrarily limited. + * Callers may hold locks as long as they are held in irq mode. + * + * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. + */ + +static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata, + unsigned int reg1, u8 bit1, u8 val1, + unsigned int reg2, u8 bit2, u8 val2, + unsigned long wait) +{ + unsigned long n = hostdata->poll_loops; + unsigned long deadline = jiffies + wait; + + do { + if ((NCR5380_read(reg1) & bit1) == val1) + return 0; + if ((NCR5380_read(reg2) & bit2) == val2) + return 0; + cpu_relax(); + } while (n--); + + if (!wait) + return -ETIMEDOUT; + + /* Repeatedly sleep for 1 ms until deadline */ + while (time_is_after_jiffies(deadline)) { + schedule_timeout_uninterruptible(1); + if ((NCR5380_read(reg1) & bit1) == val1) + return 0; + if ((NCR5380_read(reg2) & bit2) == val2) + return 0; + } + + return -ETIMEDOUT; +} + +#if NDEBUG +static struct { + unsigned char mask; + const char *name; +} signals[] = { + {SR_DBP, "PARITY"}, + {SR_RST, "RST"}, + {SR_BSY, "BSY"}, + {SR_REQ, "REQ"}, + {SR_MSG, "MSG"}, + {SR_CD, "CD"}, + {SR_IO, "IO"}, + {SR_SEL, "SEL"}, + {0, NULL} +}, +basrs[] = { + {BASR_END_DMA_TRANSFER, "END OF DMA"}, + {BASR_DRQ, "DRQ"}, + {BASR_PARITY_ERROR, "PARITY ERROR"}, + {BASR_IRQ, "IRQ"}, + {BASR_PHASE_MATCH, "PHASE MATCH"}, + {BASR_BUSY_ERROR, "BUSY ERROR"}, + {BASR_ATN, "ATN"}, + {BASR_ACK, "ACK"}, + {0, NULL} +}, +icrs[] = { + {ICR_ASSERT_RST, "ASSERT RST"}, + {ICR_ARBITRATION_PROGRESS, "ARB. IN PROGRESS"}, + {ICR_ARBITRATION_LOST, "LOST ARB."}, + {ICR_ASSERT_ACK, "ASSERT ACK"}, + {ICR_ASSERT_BSY, "ASSERT BSY"}, + {ICR_ASSERT_SEL, "ASSERT SEL"}, + {ICR_ASSERT_ATN, "ASSERT ATN"}, + {ICR_ASSERT_DATA, "ASSERT DATA"}, + {0, NULL} +}, +mrs[] = { + {MR_BLOCK_DMA_MODE, "BLOCK DMA MODE"}, + {MR_TARGET, "TARGET"}, + {MR_ENABLE_PAR_CHECK, "PARITY CHECK"}, + {MR_ENABLE_PAR_INTR, "PARITY INTR"}, + {MR_ENABLE_EOP_INTR, "EOP INTR"}, + {MR_MONITOR_BSY, "MONITOR BSY"}, + {MR_DMA_MODE, "DMA MODE"}, + {MR_ARBITRATE, "ARBITRATE"}, + {0, NULL} +}; + +/** + * NCR5380_print - print scsi bus signals + * @instance: adapter state to dump + * + * Print the SCSI bus signals for debugging purposes + */ + +static void NCR5380_print(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char status, basr, mr, icr, i; + + status = NCR5380_read(STATUS_REG); + mr = NCR5380_read(MODE_REG); + icr = NCR5380_read(INITIATOR_COMMAND_REG); + basr = NCR5380_read(BUS_AND_STATUS_REG); + + printk(KERN_DEBUG "SR = 0x%02x : ", status); + for (i = 0; signals[i].mask; ++i) + if (status & signals[i].mask) + printk(KERN_CONT "%s, ", signals[i].name); + printk(KERN_CONT "\nBASR = 0x%02x : ", basr); + for (i = 0; basrs[i].mask; ++i) + if (basr & basrs[i].mask) + printk(KERN_CONT "%s, ", basrs[i].name); + printk(KERN_CONT "\nICR = 0x%02x : ", icr); + for (i = 0; icrs[i].mask; ++i) + if (icr & icrs[i].mask) + printk(KERN_CONT "%s, ", icrs[i].name); + printk(KERN_CONT "\nMR = 0x%02x : ", mr); + for (i = 0; mrs[i].mask; ++i) + if (mr & mrs[i].mask) + printk(KERN_CONT "%s, ", mrs[i].name); + printk(KERN_CONT "\n"); +} + +static struct { + unsigned char value; + const char *name; +} phases[] = { + {PHASE_DATAOUT, "DATAOUT"}, + {PHASE_DATAIN, "DATAIN"}, + {PHASE_CMDOUT, "CMDOUT"}, + {PHASE_STATIN, "STATIN"}, + {PHASE_MSGOUT, "MSGOUT"}, + {PHASE_MSGIN, "MSGIN"}, + {PHASE_UNKNOWN, "UNKNOWN"} +}; + +/** + * NCR5380_print_phase - show SCSI phase + * @instance: adapter to dump + * + * Print the current SCSI phase for debugging purposes + */ + +static void NCR5380_print_phase(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char status; + int i; + + status = NCR5380_read(STATUS_REG); + if (!(status & SR_REQ)) + shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n"); + else { + for (i = 0; (phases[i].value != PHASE_UNKNOWN) && + (phases[i].value != (status & PHASE_MASK)); ++i) + ; + shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name); + } +} +#endif + +/** + * NCR5380_info - report driver and host information + * @instance: relevant scsi host instance + * + * For use as the host template info() handler. + */ + +static const char *NCR5380_info(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + return hostdata->info; +} + +/** + * NCR5380_init - initialise an NCR5380 + * @instance: adapter to configure + * @flags: control flags + * + * Initializes *instance and corresponding 5380 chip, + * with flags OR'd into the initial flags value. + * + * Notes : I assume that the host, hostno, and id bits have been + * set correctly. I don't care about the irq and other fields. + * + * Returns 0 for success + */ + +static int NCR5380_init(struct Scsi_Host *instance, int flags) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int i; + unsigned long deadline; + unsigned long accesses_per_ms; + + instance->max_lun = 7; + + hostdata->host = instance; + hostdata->id_mask = 1 << instance->this_id; + hostdata->id_higher_mask = 0; + for (i = hostdata->id_mask; i <= 0x80; i <<= 1) + if (i > hostdata->id_mask) + hostdata->id_higher_mask |= i; + for (i = 0; i < 8; ++i) + hostdata->busy[i] = 0; + hostdata->dma_len = 0; + + spin_lock_init(&hostdata->lock); + hostdata->connected = NULL; + hostdata->sensing = NULL; + INIT_LIST_HEAD(&hostdata->autosense); + INIT_LIST_HEAD(&hostdata->unissued); + INIT_LIST_HEAD(&hostdata->disconnected); + + hostdata->flags = flags; + + INIT_WORK(&hostdata->main_task, NCR5380_main); + hostdata->work_q = alloc_workqueue("ncr5380_%d", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 0, instance->host_no); + if (!hostdata->work_q) + return -ENOMEM; + + snprintf(hostdata->info, sizeof(hostdata->info), + "%s, irq %d, io_port 0x%lx, base 0x%lx, can_queue %d, cmd_per_lun %d, sg_tablesize %d, this_id %d, flags { %s%s%s}", + instance->hostt->name, instance->irq, hostdata->io_port, + hostdata->base, instance->can_queue, instance->cmd_per_lun, + instance->sg_tablesize, instance->this_id, + hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", + hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", + hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : ""); + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_write(TARGET_COMMAND_REG, 0); + NCR5380_write(SELECT_ENABLE_REG, 0); + + /* Calibrate register polling loop */ + i = 0; + deadline = jiffies + 1; + do { + cpu_relax(); + } while (time_is_after_jiffies(deadline)); + deadline += msecs_to_jiffies(256); + do { + NCR5380_read(STATUS_REG); + ++i; + cpu_relax(); + } while (time_is_after_jiffies(deadline)); + accesses_per_ms = i / 256; + hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2; + + return 0; +} + +/** + * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems. + * @instance: adapter to check + * + * If the system crashed, it may have crashed with a connected target and + * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the + * currently established nexus, which we know nothing about. Failing that + * do a bus reset. + * + * Note that a bus reset will cause the chip to assert IRQ. + * + * Returns 0 if successful, otherwise -ENXIO. + */ + +static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int pass; + + for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) { + switch (pass) { + case 1: + case 3: + case 5: + shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); + NCR5380_poll_politely(hostdata, + STATUS_REG, SR_BSY, 0, 5 * HZ); + break; + case 2: + shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n"); + do_abort(instance, 1); + break; + case 4: + shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n"); + do_reset(instance); + /* Wait after a reset; the SCSI standard calls for + * 250ms, we wait 500ms to be on the safe side. + * But some Toshiba CD-ROMs need ten times that. + */ + if (hostdata->flags & FLAG_TOSHIBA_DELAY) + msleep(2500); + else + msleep(500); + break; + case 6: + shost_printk(KERN_ERR, instance, "bus locked solid\n"); + return -ENXIO; + } + } + return 0; +} + +/** + * NCR5380_exit - remove an NCR5380 + * @instance: adapter to remove + * + * Assumes that no more work can be queued (e.g. by NCR5380_intr). + */ + +static void NCR5380_exit(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + cancel_work_sync(&hostdata->main_task); + destroy_workqueue(hostdata->work_q); +} + +/** + * complete_cmd - finish processing a command and return it to the SCSI ML + * @instance: the host instance + * @cmd: command to complete + */ + +static void complete_cmd(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd); + + if (hostdata->sensing == cmd) { + /* Autosense processing ends here */ + if (get_status_byte(cmd) != SAM_STAT_GOOD) { + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + } else { + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); + } + hostdata->sensing = NULL; + } + + scsi_done(cmd); +} + +/** + * NCR5380_queue_command - queue a command + * @instance: the relevant SCSI adapter + * @cmd: SCSI command + * + * cmd is added to the per-instance issue queue, with minor + * twiddling done to the host specific fields of cmd. If the + * main coroutine is not running, it is restarted. + */ + +static int NCR5380_queue_command(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); + unsigned long flags; + +#if (NDEBUG & NDEBUG_NO_WRITE) + switch (cmd->cmnd[0]) { + case WRITE_6: + case WRITE_10: + shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); + cmd->result = (DID_ERROR << 16); + scsi_done(cmd); + return 0; + } +#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ + + cmd->result = 0; + + spin_lock_irqsave(&hostdata->lock, flags); + + if (!NCR5380_acquire_dma_irq(instance)) { + spin_unlock_irqrestore(&hostdata->lock, flags); + + return SCSI_MLQUEUE_HOST_BUSY; + } + + /* + * Insert the cmd into the issue queue. Note that REQUEST SENSE + * commands are added to the head of the queue since any command will + * clear the contingent allegiance condition that exists and the + * sense data is only guaranteed to be valid while the condition exists. + */ + + if (cmd->cmnd[0] == REQUEST_SENSE) + list_add(&ncmd->list, &hostdata->unissued); + else + list_add_tail(&ncmd->list, &hostdata->unissued); + + spin_unlock_irqrestore(&hostdata->lock, flags); + + dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n", + cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); + + /* Kick off command processing */ + queue_work(hostdata->work_q, &hostdata->main_task); + return 0; +} + +static inline void maybe_release_dma_irq(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + /* Caller does the locking needed to set & test these data atomically */ + if (list_empty(&hostdata->disconnected) && + list_empty(&hostdata->unissued) && + list_empty(&hostdata->autosense) && + !hostdata->connected && + !hostdata->selecting) { + NCR5380_release_dma_irq(instance); + } +} + +/** + * dequeue_next_cmd - dequeue a command for processing + * @instance: the scsi host instance + * + * Priority is given to commands on the autosense queue. These commands + * need autosense because of a CHECK CONDITION result. + * + * Returns a command pointer if a command is found for a target that is + * not already busy. Otherwise returns NULL. + */ + +static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd; + struct scsi_cmnd *cmd; + + if (hostdata->sensing || list_empty(&hostdata->autosense)) { + list_for_each_entry(ncmd, &hostdata->unissued, list) { + cmd = NCR5380_to_scmd(ncmd); + dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n", + cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun); + + if (!(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun))) { + list_del(&ncmd->list); + dsprintk(NDEBUG_QUEUES, instance, + "dequeue: removed %p from issue queue\n", cmd); + return cmd; + } + } + } else { + /* Autosense processing begins here */ + ncmd = list_first_entry(&hostdata->autosense, + struct NCR5380_cmd, list); + list_del(&ncmd->list); + cmd = NCR5380_to_scmd(ncmd); + dsprintk(NDEBUG_QUEUES, instance, + "dequeue: removed %p from autosense queue\n", cmd); + scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); + hostdata->sensing = cmd; + return cmd; + } + return NULL; +} + +static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); + + if (hostdata->sensing == cmd) { + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + list_add(&ncmd->list, &hostdata->autosense); + hostdata->sensing = NULL; + } else + list_add(&ncmd->list, &hostdata->unissued); +} + +/** + * NCR5380_main - NCR state machines + * + * NCR5380_main is a coroutine that runs as long as more work can + * be done on the NCR5380 host adapters in a system. Both + * NCR5380_queue_command() and NCR5380_intr() will try to start it + * in case it is not running. + */ + +static void NCR5380_main(struct work_struct *work) +{ + struct NCR5380_hostdata *hostdata = + container_of(work, struct NCR5380_hostdata, main_task); + struct Scsi_Host *instance = hostdata->host; + int done; + + do { + done = 1; + + spin_lock_irq(&hostdata->lock); + while (!hostdata->connected && !hostdata->selecting) { + struct scsi_cmnd *cmd = dequeue_next_cmd(instance); + + if (!cmd) + break; + + dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd); + + /* + * Attempt to establish an I_T_L nexus here. + * On success, instance->hostdata->connected is set. + * On failure, we must add the command back to the + * issue queue so we can keep trying. + */ + /* + * REQUEST SENSE commands are issued without tagged + * queueing, even on SCSI-II devices because the + * contingent allegiance condition exists for the + * entire unit. + */ + + if (!NCR5380_select(instance, cmd)) { + dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); + } else { + dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, + "main: select failed, returning %p to queue\n", cmd); + requeue_cmd(instance, cmd); + } + } + if (hostdata->connected && !hostdata->dma_len) { + dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); + NCR5380_information_transfer(instance); + done = 0; + } + if (!hostdata->connected) { + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + maybe_release_dma_irq(instance); + } + spin_unlock_irq(&hostdata->lock); + if (!done) + cond_resched(); + } while (!done); +} + +/* + * NCR5380_dma_complete - finish DMA transfer + * @instance: the scsi host instance + * + * Called by the interrupt handler when DMA finishes or a phase + * mismatch occurs (which would end the DMA transfer). + */ + +static void NCR5380_dma_complete(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected); + int transferred; + unsigned char **data; + int *count; + int saved_data = 0, overrun = 0; + unsigned char p; + + if (hostdata->read_overruns) { + p = ncmd->phase; + if (p & SR_IO) { + udelay(10); + if ((NCR5380_read(BUS_AND_STATUS_REG) & + (BASR_PHASE_MATCH | BASR_ACK)) == + (BASR_PHASE_MATCH | BASR_ACK)) { + saved_data = NCR5380_read(INPUT_DATA_REG); + overrun = 1; + dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); + } + } + } + +#ifdef CONFIG_SUN3 + if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) { + pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", + instance->host_no); + BUG(); + } + + if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == + (BASR_PHASE_MATCH | BASR_ACK)) { + pr_err("scsi%d: BASR %02x\n", instance->host_no, + NCR5380_read(BUS_AND_STATUS_REG)); + pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", + instance->host_no); + BUG(); + } +#endif + + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata); + hostdata->dma_len = 0; + + data = (unsigned char **)&ncmd->ptr; + count = &ncmd->this_residual; + *data += transferred; + *count -= transferred; + + if (hostdata->read_overruns) { + int cnt, toPIO; + + if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { + cnt = toPIO = hostdata->read_overruns; + if (overrun) { + dsprintk(NDEBUG_DMA, instance, + "Got an input overrun, using saved byte\n"); + *(*data)++ = saved_data; + (*count)--; + cnt--; + toPIO--; + } + if (toPIO > 0) { + dsprintk(NDEBUG_DMA, instance, + "Doing %d byte PIO to 0x%p\n", cnt, *data); + NCR5380_transfer_pio(instance, &p, &cnt, data, 0); + *count -= toPIO - cnt; + } + } + } +} + +/** + * NCR5380_intr - generic NCR5380 irq handler + * @irq: interrupt number + * @dev_id: device info + * + * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses + * from the disconnected queue, and restarting NCR5380_main() + * as required. + * + * The chip can assert IRQ in any of six different conditions. The IRQ flag + * is then cleared by reading the Reset Parity/Interrupt Register (RPIR). + * Three of these six conditions are latched in the Bus and Status Register: + * - End of DMA (cleared by ending DMA Mode) + * - Parity error (cleared by reading RPIR) + * - Loss of BSY (cleared by reading RPIR) + * Two conditions have flag bits that are not latched: + * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode) + * - Bus reset (non-maskable) + * The remaining condition has no flag bit at all: + * - Selection/reselection + * + * Hence, establishing the cause(s) of any interrupt is partly guesswork. + * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor + * claimed that "the design of the [DP8490] interrupt logic ensures + * interrupts will not be lost (they can be on the DP5380)." + * The L5380/53C80 datasheet from LOGIC Devices has more details. + * + * Checking for bus reset by reading RST is futile because of interrupt + * latency, but a bus reset will reset chip logic. Checking for parity error + * is unnecessary because that interrupt is never enabled. A Loss of BSY + * condition will clear DMA Mode. We can tell when this occurs because the + * Busy Monitor interrupt is enabled together with DMA Mode. + */ + +static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) +{ + struct Scsi_Host *instance = dev_id; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int handled = 0; + unsigned char basr; + unsigned long flags; + + spin_lock_irqsave(&hostdata->lock, flags); + + basr = NCR5380_read(BUS_AND_STATUS_REG); + if (basr & BASR_IRQ) { + unsigned char mr = NCR5380_read(MODE_REG); + unsigned char sr = NCR5380_read(STATUS_REG); + + dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", + irq, basr, sr, mr); + + if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { + /* Probably End of DMA, Phase Mismatch or Loss of BSY. + * We ack IRQ after clearing Mode Register. Workarounds + * for End of DMA errata need to happen in DMA Mode. + */ + + dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); + + if (hostdata->connected) { + NCR5380_dma_complete(instance); + queue_work(hostdata->work_q, &hostdata->main_task); + } else { + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + } + } else if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && + (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { + /* Probably reselected */ + NCR5380_write(SELECT_ENABLE_REG, 0); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n"); + + if (!hostdata->connected) { + NCR5380_reselect(instance); + queue_work(hostdata->work_q, &hostdata->main_task); + } + if (!hostdata->connected) + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + } else { + /* Probably Bus Reset */ + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + if (sr & SR_RST) { + /* Certainly Bus Reset */ + shost_printk(KERN_WARNING, instance, + "bus reset interrupt\n"); + bus_reset_cleanup(instance); + } else { + dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); + } +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif + } + handled = 1; + } else { + dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n"); +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif + } + + spin_unlock_irqrestore(&hostdata->lock, flags); + + return IRQ_RETVAL(handled); +} + +/** + * NCR5380_select - attempt arbitration and selection for a given command + * @instance: the Scsi_Host instance + * @cmd: the scsi_cmnd to execute + * + * This routine establishes an I_T_L nexus for a SCSI command. This involves + * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message. + * + * Returns true if the operation should be retried. + * Returns false if it should not be retried. + * + * Side effects : + * If bus busy, arbitration failed, etc, NCR5380_select() will exit + * with registers as they should have been on entry - ie + * SELECT_ENABLE will be set appropriately, the NCR5380 + * will cease to drive any SCSI bus signals. + * + * If successful : the I_T_L nexus will be established, and + * hostdata->connected will be set to cmd. + * SELECT interrupt will be disabled. + * + * If failed (no target) : scsi_done() will be called, and the + * cmd->result host byte set to DID_BAD_TARGET. + */ + +static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) + __releases(&hostdata->lock) __acquires(&hostdata->lock) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char tmp[3], phase; + unsigned char *data; + int len; + int err; + bool ret = true; + bool can_disconnect = instance->irq != NO_IRQ && + cmd->cmnd[0] != REQUEST_SENSE && + (disconnect_mask & BIT(scmd_id(cmd))); + + NCR5380_dprint(NDEBUG_ARBITRATION, instance); + dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", + instance->this_id); + + /* + * Arbitration and selection phases are slow and involve dropping the + * lock, so we have to watch out for EH. An exception handler may + * change 'selecting' to NULL. This function will then return false + * so that the caller will forget about 'cmd'. (During information + * transfer phases, EH may change 'connected' to NULL.) + */ + hostdata->selecting = cmd; + + /* + * Set the phase bits to 0, otherwise the NCR5380 won't drive the + * data bus during SELECTION. + */ + + NCR5380_write(TARGET_COMMAND_REG, 0); + + /* + * Start arbitration. + */ + + NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); + NCR5380_write(MODE_REG, MR_ARBITRATE); + + /* The chip now waits for BUS FREE phase. Then after the 800 ns + * Bus Free Delay, arbitration will begin. + */ + + spin_unlock_irq(&hostdata->lock); + err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0, + INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, + ICR_ARBITRATION_PROGRESS, HZ); + spin_lock_irq(&hostdata->lock); + if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) { + /* Reselection interrupt */ + goto out; + } + if (!hostdata->selecting) { + /* Command was aborted */ + NCR5380_write(MODE_REG, MR_BASE); + return false; + } + if (err < 0) { + NCR5380_write(MODE_REG, MR_BASE); + shost_printk(KERN_ERR, instance, + "select: arbitration timeout\n"); + goto out; + } + spin_unlock_irq(&hostdata->lock); + + /* The SCSI-2 arbitration delay is 2.4 us */ + udelay(3); + + /* Check for lost arbitration */ + if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || + (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || + (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { + NCR5380_write(MODE_REG, MR_BASE); + dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n"); + spin_lock_irq(&hostdata->lock); + goto out; + } + + /* After/during arbitration, BSY should be asserted. + * IBM DPES-31080 Version S31Q works now + * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) + */ + NCR5380_write(INITIATOR_COMMAND_REG, + ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); + + /* + * Again, bus clear + bus settle time is 1.2us, however, this is + * a minimum so we'll udelay ceil(1.2) + */ + + if (hostdata->flags & FLAG_TOSHIBA_DELAY) + udelay(15); + else + udelay(2); + + spin_lock_irq(&hostdata->lock); + + /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */ + if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) + goto out; + + if (!hostdata->selecting) { + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + return false; + } + + dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); + + /* + * Now that we have won arbitration, start Selection process, asserting + * the host and target ID's on the SCSI bus. + */ + + NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd))); + + /* + * Raise ATN while SEL is true before BSY goes false from arbitration, + * since this is the only way to guarantee that we'll get a MESSAGE OUT + * phase immediately after selection. + */ + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY | + ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); + NCR5380_write(MODE_REG, MR_BASE); + + /* + * Reselect interrupts must be turned off prior to the dropping of BSY, + * otherwise we will trigger an interrupt. + */ + NCR5380_write(SELECT_ENABLE_REG, 0); + + spin_unlock_irq(&hostdata->lock); + + /* + * The initiator shall then wait at least two deskew delays and release + * the BSY signal. + */ + udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ + + /* Reset BSY */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | + ICR_ASSERT_ATN | ICR_ASSERT_SEL); + + /* + * Something weird happens when we cease to drive BSY - looks + * like the board/chip is letting us do another read before the + * appropriate propagation delay has expired, and we're confusing + * a BSY signal from ourselves as the target's response to SELECTION. + * + * A small delay (the 'C++' frontend breaks the pipeline with an + * unnecessary jump, making it work on my 386-33/Trantor T128, the + * tighter 'C' code breaks and requires this) solves the problem - + * the 1 us delay is arbitrary, and only used because this delay will + * be the same on other platforms and since it works here, it should + * work there. + * + * wingel suggests that this could be due to failing to wait + * one deskew delay. + */ + + udelay(1); + + dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd)); + + /* + * The SCSI specification calls for a 250 ms timeout for the actual + * selection. + */ + + err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY, + msecs_to_jiffies(250)); + + if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { + spin_lock_irq(&hostdata->lock); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_reselect(instance); + shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); + goto out; + } + + if (err < 0) { + spin_lock_irq(&hostdata->lock); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + + /* Can't touch cmd if it has been reclaimed by the scsi ML */ + if (!hostdata->selecting) + return false; + + cmd->result = DID_BAD_TARGET << 16; + complete_cmd(instance, cmd); + dsprintk(NDEBUG_SELECTION, instance, + "target did not respond within 250ms\n"); + ret = false; + goto out; + } + + /* + * No less than two deskew delays after the initiator detects the + * BSY signal is true, it shall release the SEL signal and may + * change the DATA BUS. -wingel + */ + + udelay(1); + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); + + /* + * Since we followed the SCSI spec, and raised ATN while SEL + * was true but before BSY was false during selection, the information + * transfer phase should be a MESSAGE OUT phase so that we can send the + * IDENTIFY message. + */ + + /* Wait for start of REQ/ACK handshake */ + + err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); + spin_lock_irq(&hostdata->lock); + if (err < 0) { + shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + goto out; + } + if (!hostdata->selecting) { + do_abort(instance, 0); + return false; + } + + dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", + scmd_id(cmd)); + tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun); + + len = 1; + data = tmp; + phase = PHASE_MSGOUT; + NCR5380_transfer_pio(instance, &phase, &len, &data, 0); + if (len) { + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + cmd->result = DID_ERROR << 16; + complete_cmd(instance, cmd); + dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n"); + ret = false; + goto out; + } + + dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); + + hostdata->connected = cmd; + hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; + +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif + + initialize_SCp(cmd); + + ret = false; + +out: + if (!hostdata->selecting) + return false; + hostdata->selecting = NULL; + return ret; +} + +/* + * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, + * unsigned char *phase, int *count, unsigned char **data) + * + * Purpose : transfers data in given phase using polled I/O + * + * Inputs : instance - instance of driver, *phase - pointer to + * what phase is expected, *count - pointer to number of + * bytes to transfer, **data - pointer to data pointer, + * can_sleep - 1 or 0 when sleeping is permitted or not, respectively. + * + * Returns : -1 when different phase is entered without transferring + * maximum number of bytes, 0 if all bytes are transferred or exit + * is in same phase. + * + * Also, *phase, *count, *data are modified in place. + * + * XXX Note : handling for bus free may be useful. + */ + +/* + * Note : this code is not as quick as it could be, however it + * IS 100% reliable, and for the actual data transfer where speed + * counts, we will always do a pseudo DMA or DMA transfer. + */ + +static int NCR5380_transfer_pio(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data, unsigned int can_sleep) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char p = *phase, tmp; + int c = *count; + unsigned char *d = *data; + + /* + * The NCR5380 chip will only drive the SCSI bus when the + * phase specified in the appropriate bits of the TARGET COMMAND + * REGISTER match the STATUS REGISTER + */ + + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); + + do { + /* + * Wait for assertion of REQ, after which the phase bits will be + * valid + */ + + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, + HZ * can_sleep) < 0) + break; + + dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); + + /* Check for phase mismatch */ + if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) { + dsprintk(NDEBUG_PIO, instance, "phase mismatch\n"); + NCR5380_dprint_phase(NDEBUG_PIO, instance); + break; + } + + /* Do actual transfer from SCSI bus to / from memory */ + if (!(p & SR_IO)) + NCR5380_write(OUTPUT_DATA_REG, *d); + else + *d = NCR5380_read(CURRENT_SCSI_DATA_REG); + + ++d; + + /* + * The SCSI standard suggests that in MSGOUT phase, the initiator + * should drop ATN on the last byte of the message phase + * after REQ has been asserted for the handshake but before + * the initiator raises ACK. + */ + + if (!(p & SR_IO)) { + if (!((p & SR_MSG) && c > 1)) { + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); + NCR5380_dprint(NDEBUG_PIO, instance); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_DATA | ICR_ASSERT_ACK); + } else { + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_DATA | ICR_ASSERT_ATN); + NCR5380_dprint(NDEBUG_PIO, instance); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); + } + } else { + NCR5380_dprint(NDEBUG_PIO, instance); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); + } + + if (NCR5380_poll_politely(hostdata, + STATUS_REG, SR_REQ, 0, 5 * HZ * can_sleep) < 0) + break; + + dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); + +/* + * We have several special cases to consider during REQ/ACK handshaking : + * 1. We were in MSGOUT phase, and we are on the last byte of the + * message. ATN must be dropped as ACK is dropped. + * + * 2. We are in a MSGIN phase, and we are on the last byte of the + * message. We must exit with ACK asserted, so that the calling + * code may raise ATN before dropping ACK to reject the message. + * + * 3. ACK and ATN are clear and the target may proceed as normal. + */ + if (!(p == PHASE_MSGIN && c == 1)) { + if (p == PHASE_MSGOUT && c > 1) + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); + else + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + } + } while (--c); + + dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); + + *count = c; + *data = d; + tmp = NCR5380_read(STATUS_REG); + /* The phase read from the bus is valid if either REQ is (already) + * asserted or if ACK hasn't been released yet. The latter applies if + * we're in MSG IN, DATA IN or STATUS and all bytes have been received. + */ + if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) + *phase = tmp & PHASE_MASK; + else + *phase = PHASE_UNKNOWN; + + if (!c || (*phase == p)) + return 0; + else + return -1; +} + +/** + * do_reset - issue a reset command + * @instance: adapter to reset + * + * Issue a reset sequence to the NCR5380 and try and get the bus + * back into sane shape. + * + * This clears the reset interrupt flag because there may be no handler for + * it. When the driver is initialized, the NCR5380_intr() handler has not yet + * been installed. And when in EH we may have released the ST DMA interrupt. + */ + +static void do_reset(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance); + unsigned long flags; + + local_irq_save(flags); + NCR5380_write(TARGET_COMMAND_REG, + PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); + udelay(50); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); + local_irq_restore(flags); +} + +/** + * do_abort - abort the currently established nexus by going to + * MESSAGE OUT phase and sending an ABORT message. + * @instance: relevant scsi host instance + * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively + * + * Returns 0 on success, negative error code on failure. + */ + +static int do_abort(struct Scsi_Host *instance, unsigned int can_sleep) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char *msgptr, phase, tmp; + int len; + int rc; + + /* Request message out phase */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); + + /* + * Wait for the target to indicate a valid phase by asserting + * REQ. Once this happens, we'll have either a MSGOUT phase + * and can immediately send the ABORT message, or we'll have some + * other phase and will have to source/sink data. + * + * We really don't care what value was on the bus or what value + * the target sees, so we just handshake. + */ + + rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, + 10 * HZ * can_sleep); + if (rc < 0) + goto out; + + tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; + + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); + + if (tmp != PHASE_MSGOUT) { + NCR5380_write(INITIATOR_COMMAND_REG, + ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); + rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, + 3 * HZ * can_sleep); + if (rc < 0) + goto out; + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); + } + + tmp = ABORT; + msgptr = &tmp; + len = 1; + phase = PHASE_MSGOUT; + NCR5380_transfer_pio(instance, &phase, &len, &msgptr, can_sleep); + if (len) + rc = -ENXIO; + + /* + * If we got here, and the command completed successfully, + * we're about to go into bus free state. + */ + +out: + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + return rc; +} + +/* + * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, + * unsigned char *phase, int *count, unsigned char **data) + * + * Purpose : transfers data in given phase using either real + * or pseudo DMA. + * + * Inputs : instance - instance of driver, *phase - pointer to + * what phase is expected, *count - pointer to number of + * bytes to transfer, **data - pointer to data pointer. + * + * Returns : -1 when different phase is entered without transferring + * maximum number of bytes, 0 if all bytes or transferred or exit + * is in same phase. + * + * Also, *phase, *count, *data are modified in place. + */ + + +static int NCR5380_transfer_dma(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int c = *count; + unsigned char p = *phase; + unsigned char *d = *data; + unsigned char tmp; + int result = 0; + + if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { + *phase = tmp; + return -1; + } + + NCR5380_to_ncmd(hostdata->connected)->phase = p; + + if (p & SR_IO) { + if (hostdata->read_overruns) + c -= hostdata->read_overruns; + else if (hostdata->flags & FLAG_DMA_FIXUP) + --c; + } + + dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", + (p & SR_IO) ? "receive" : "send", c, d); + +#ifdef CONFIG_SUN3 + /* send start chain */ + sun3scsi_dma_start(c, *data); +#endif + + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); + NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | + MR_ENABLE_EOP_INTR); + + if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { + /* On the Medusa, it is a must to initialize the DMA before + * starting the NCR. This is also the cleaner way for the TT. + */ + if (p & SR_IO) + result = NCR5380_dma_recv_setup(hostdata, d, c); + else + result = NCR5380_dma_send_setup(hostdata, d, c); + } + + /* + * On the PAS16 at least I/O recovery delays are not needed here. + * Everyone else seems to want them. + */ + + if (p & SR_IO) { + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_io_delay(1); + NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); + } else { + NCR5380_io_delay(1); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); + NCR5380_io_delay(1); + NCR5380_write(START_DMA_SEND_REG, 0); + NCR5380_io_delay(1); + } + +#ifdef CONFIG_SUN3 +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif + sun3_dma_active = 1; +#endif + + if (hostdata->flags & FLAG_LATE_DMA_SETUP) { + /* On the Falcon, the DMA setup must be done after the last + * NCR access, else the DMA setup gets trashed! + */ + if (p & SR_IO) + result = NCR5380_dma_recv_setup(hostdata, d, c); + else + result = NCR5380_dma_send_setup(hostdata, d, c); + } + + /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */ + if (result < 0) + return result; + + /* For real DMA, result is the byte count. DMA interrupt is expected. */ + if (result > 0) { + hostdata->dma_len = result; + return 0; + } + + /* The result is zero iff pseudo DMA send/receive was completed. */ + hostdata->dma_len = c; + +/* + * A note regarding the DMA errata workarounds for early NMOS silicon. + * + * For DMA sends, we want to wait until the last byte has been + * transferred out over the bus before we turn off DMA mode. Alas, there + * seems to be no terribly good way of doing this on a 5380 under all + * conditions. For non-scatter-gather operations, we can wait until REQ + * and ACK both go false, or until a phase mismatch occurs. Gather-sends + * are nastier, since the device will be expecting more data than we + * are prepared to send it, and REQ will remain asserted. On a 53C8[01] we + * could test Last Byte Sent to assure transfer (I imagine this is precisely + * why this signal was added to the newer chips) but on the older 538[01] + * this signal does not exist. The workaround for this lack is a watchdog; + * we bail out of the wait-loop after a modest amount of wait-time if + * the usual exit conditions are not met. Not a terribly clean or + * correct solution :-% + * + * DMA receive is equally tricky due to a nasty characteristic of the NCR5380. + * If the chip is in DMA receive mode, it will respond to a target's + * REQ by latching the SCSI data into the INPUT DATA register and asserting + * ACK, even if it has _already_ been notified by the DMA controller that + * the current DMA transfer has completed! If the NCR5380 is then taken + * out of DMA mode, this already-acknowledged byte is lost. This is + * not a problem for "one DMA transfer per READ command", because + * the situation will never arise... either all of the data is DMA'ed + * properly, or the target switches to MESSAGE IN phase to signal a + * disconnection (either operation bringing the DMA to a clean halt). + * However, in order to handle scatter-receive, we must work around the + * problem. The chosen fix is to DMA fewer bytes, then check for the + * condition before taking the NCR5380 out of DMA mode. One or two extra + * bytes are transferred via PIO as necessary to fill out the original + * request. + */ + + if (hostdata->flags & FLAG_DMA_FIXUP) { + if (p & SR_IO) { + /* + * The workaround was to transfer fewer bytes than we + * intended to with the pseudo-DMA read function, wait for + * the chip to latch the last byte, read it, and then disable + * pseudo-DMA mode. + * + * After REQ is asserted, the NCR5380 asserts DRQ and ACK. + * REQ is deasserted when ACK is asserted, and not reasserted + * until ACK goes false. Since the NCR5380 won't lower ACK + * until DACK is asserted, which won't happen unless we twiddle + * the DMA port or we take the NCR5380 out of DMA mode, we + * can guarantee that we won't handshake another extra + * byte. + */ + + if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_DRQ, BASR_DRQ, 0) < 0) { + result = -1; + shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); + } + if (NCR5380_poll_politely(hostdata, STATUS_REG, + SR_REQ, 0, 0) < 0) { + result = -1; + shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); + } + d[*count - 1] = NCR5380_read(INPUT_DATA_REG); + } else { + /* + * Wait for the last byte to be sent. If REQ is being asserted for + * the byte we're interested, we'll ACK it and it will go false. + */ + if (NCR5380_poll_politely2(hostdata, + BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, + BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) { + result = -1; + shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); + } + } + } + + NCR5380_dma_complete(instance); + return result; +} + +/* + * Function : NCR5380_information_transfer (struct Scsi_Host *instance) + * + * Purpose : run through the various SCSI phases and do as the target + * directs us to. Operates on the currently connected command, + * instance->connected. + * + * Inputs : instance, instance for which we are doing commands + * + * Side effects : SCSI things happen, the disconnected queue will be + * modified if a command disconnects, *instance->connected will + * change. + * + * XXX Note : we need to watch for bus free or a reset condition here + * to recover from an unexpected bus free condition. + */ + +static void NCR5380_information_transfer(struct Scsi_Host *instance) + __releases(&hostdata->lock) __acquires(&hostdata->lock) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char msgout = NOP; + int sink = 0; + int len; + int transfersize; + unsigned char *data; + unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; + struct scsi_cmnd *cmd; + +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif + + while ((cmd = hostdata->connected)) { + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); + + tmp = NCR5380_read(STATUS_REG); + /* We only have a valid SCSI phase when REQ is asserted */ + if (tmp & SR_REQ) { + phase = (tmp & PHASE_MASK); + if (phase != old_phase) { + old_phase = phase; + NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); + } +#ifdef CONFIG_SUN3 + if (phase == PHASE_CMDOUT && + sun3_dma_setup_done != cmd) { + int count; + + advance_sg_buffer(ncmd); + + count = sun3scsi_dma_xfer_len(hostdata, cmd); + + if (count > 0) { + if (cmd->sc_data_direction == DMA_TO_DEVICE) + sun3scsi_dma_send_setup(hostdata, + ncmd->ptr, count); + else + sun3scsi_dma_recv_setup(hostdata, + ncmd->ptr, count); + sun3_dma_setup_done = cmd; + } +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif + } +#endif /* CONFIG_SUN3 */ + + if (sink && (phase != PHASE_MSGOUT)) { + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | + ICR_ASSERT_ACK); + while (NCR5380_read(STATUS_REG) & SR_REQ) + ; + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_ATN); + sink = 0; + continue; + } + + switch (phase) { + case PHASE_DATAOUT: +#if (NDEBUG & NDEBUG_NO_DATAOUT) + shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n"); + sink = 1; + do_abort(instance, 0); + cmd->result = DID_ERROR << 16; + complete_cmd(instance, cmd); + hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); + return; +#endif + case PHASE_DATAIN: + /* + * If there is no room left in the current buffer in the + * scatter-gather list, move onto the next one. + */ + + advance_sg_buffer(ncmd); + dsprintk(NDEBUG_INFORMATION, instance, + "this residual %d, sg ents %d\n", + ncmd->this_residual, + sg_nents(ncmd->buffer)); + + /* + * The preferred transfer method is going to be + * PSEUDO-DMA for systems that are strictly PIO, + * since we can let the hardware do the handshaking. + * + * For this to work, we need to know the transfersize + * ahead of time, since the pseudo-DMA code will sit + * in an unconditional loop. + */ + + transfersize = 0; + if (!cmd->device->borken) + transfersize = NCR5380_dma_xfer_len(hostdata, cmd); + + if (transfersize > 0) { + len = transfersize; + if (NCR5380_transfer_dma(instance, &phase, + &len, (unsigned char **)&ncmd->ptr)) { + /* + * If the watchdog timer fires, all future + * accesses to this device will use the + * polled-IO. + */ + scmd_printk(KERN_INFO, cmd, + "switching to slow handshake\n"); + cmd->device->borken = 1; + do_reset(instance); + bus_reset_cleanup(instance); + } + } else { + /* Transfer a small chunk so that the + * irq mode lock is not held too long. + */ + transfersize = min(ncmd->this_residual, + NCR5380_PIO_CHUNK_SIZE); + len = transfersize; + NCR5380_transfer_pio(instance, &phase, &len, + (unsigned char **)&ncmd->ptr, + 0); + ncmd->this_residual -= transfersize - len; + } +#ifdef CONFIG_SUN3 + if (sun3_dma_setup_done == cmd) + sun3_dma_setup_done = NULL; +#endif + return; + case PHASE_MSGIN: + len = 1; + data = &tmp; + NCR5380_transfer_pio(instance, &phase, &len, &data, 0); + ncmd->message = tmp; + + switch (tmp) { + case ABORT: + set_host_byte(cmd, DID_ABORT); + fallthrough; + case COMMAND_COMPLETE: + /* Accept message by clearing ACK */ + sink = 1; + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + dsprintk(NDEBUG_QUEUES, instance, + "COMMAND COMPLETE %p target %d lun %llu\n", + cmd, scmd_id(cmd), cmd->device->lun); + + hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); + + set_status_byte(cmd, ncmd->status); + + set_resid_from_SCp(cmd); + + if (cmd->cmnd[0] == REQUEST_SENSE) + complete_cmd(instance, cmd); + else { + if (ncmd->status == SAM_STAT_CHECK_CONDITION || + ncmd->status == SAM_STAT_COMMAND_TERMINATED) { + dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n", + cmd); + list_add_tail(&ncmd->list, + &hostdata->autosense); + } else + complete_cmd(instance, cmd); + } + + /* + * Restore phase bits to 0 so an interrupted selection, + * arbitration can resume. + */ + NCR5380_write(TARGET_COMMAND_REG, 0); + + return; + case MESSAGE_REJECT: + /* Accept message by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + switch (hostdata->last_message) { + case HEAD_OF_QUEUE_TAG: + case ORDERED_QUEUE_TAG: + case SIMPLE_QUEUE_TAG: + cmd->device->simple_tags = 0; + hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); + break; + default: + break; + } + break; + case DISCONNECT: + /* Accept message by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + hostdata->connected = NULL; + list_add(&ncmd->list, &hostdata->disconnected); + dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES, + instance, "connected command %p for target %d lun %llu moved to disconnected queue\n", + cmd, scmd_id(cmd), cmd->device->lun); + + /* + * Restore phase bits to 0 so an interrupted selection, + * arbitration can resume. + */ + NCR5380_write(TARGET_COMMAND_REG, 0); + +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif + return; + /* + * The SCSI data pointer is *IMPLICITLY* saved on a disconnect + * operation, in violation of the SCSI spec so we can safely + * ignore SAVE/RESTORE pointers calls. + * + * Unfortunately, some disks violate the SCSI spec and + * don't issue the required SAVE_POINTERS message before + * disconnecting, and we have to break spec to remain + * compatible. + */ + case SAVE_POINTERS: + case RESTORE_POINTERS: + /* Accept message by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + break; + case EXTENDED_MESSAGE: + /* + * Start the message buffer with the EXTENDED_MESSAGE + * byte, since spi_print_msg() wants the whole thing. + */ + extended_msg[0] = EXTENDED_MESSAGE; + /* Accept first byte by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + + spin_unlock_irq(&hostdata->lock); + + dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n"); + + len = 2; + data = extended_msg + 1; + phase = PHASE_MSGIN; + NCR5380_transfer_pio(instance, &phase, &len, &data, 1); + dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n", + (int)extended_msg[1], + (int)extended_msg[2]); + + if (!len && extended_msg[1] > 0 && + extended_msg[1] <= sizeof(extended_msg) - 2) { + /* Accept third byte by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + len = extended_msg[1] - 1; + data = extended_msg + 3; + phase = PHASE_MSGIN; + + NCR5380_transfer_pio(instance, &phase, &len, &data, 1); + dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n", + len); + + switch (extended_msg[2]) { + case EXTENDED_SDTR: + case EXTENDED_WDTR: + tmp = 0; + } + } else if (len) { + shost_printk(KERN_ERR, instance, "error receiving extended message\n"); + tmp = 0; + } else { + shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n", + extended_msg[2], extended_msg[1]); + tmp = 0; + } + + spin_lock_irq(&hostdata->lock); + if (!hostdata->connected) + return; + + /* Reject message */ + fallthrough; + default: + /* + * If we get something weird that we aren't expecting, + * log it. + */ + if (tmp == EXTENDED_MESSAGE) + scmd_printk(KERN_INFO, cmd, + "rejecting unknown extended message code %02x, length %d\n", + extended_msg[2], extended_msg[1]); + else if (tmp) + scmd_printk(KERN_INFO, cmd, + "rejecting unknown message code %02x\n", + tmp); + + msgout = MESSAGE_REJECT; + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); + break; + } /* switch (tmp) */ + break; + case PHASE_MSGOUT: + len = 1; + data = &msgout; + hostdata->last_message = msgout; + NCR5380_transfer_pio(instance, &phase, &len, &data, 0); + if (msgout == ABORT) { + hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); + cmd->result = DID_ERROR << 16; + complete_cmd(instance, cmd); + return; + } + msgout = NOP; + break; + case PHASE_CMDOUT: + len = cmd->cmd_len; + data = cmd->cmnd; + /* + * XXX for performance reasons, on machines with a + * PSEUDO-DMA architecture we should probably + * use the dma transfer function. + */ + NCR5380_transfer_pio(instance, &phase, &len, &data, 0); + break; + case PHASE_STATIN: + len = 1; + data = &tmp; + NCR5380_transfer_pio(instance, &phase, &len, &data, 0); + ncmd->status = tmp; + break; + default: + shost_printk(KERN_ERR, instance, "unknown phase\n"); + NCR5380_dprint(NDEBUG_ANY, instance); + } /* switch(phase) */ + } else { + spin_unlock_irq(&hostdata->lock); + NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); + spin_lock_irq(&hostdata->lock); + } + } +} + +/* + * Function : void NCR5380_reselect (struct Scsi_Host *instance) + * + * Purpose : does reselection, initializing the instance->connected + * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q + * nexus has been reestablished, + * + * Inputs : instance - this instance of the NCR5380. + */ + +static void NCR5380_reselect(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char target_mask; + unsigned char lun; + unsigned char msg[3]; + struct NCR5380_cmd *ncmd; + struct scsi_cmnd *tmp; + + /* + * Disable arbitration, etc. since the host adapter obviously + * lost, and tell an interrupted NCR5380_select() to restart. + */ + + NCR5380_write(MODE_REG, MR_BASE); + + target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); + if (!target_mask || target_mask & (target_mask - 1)) { + shost_printk(KERN_WARNING, instance, + "reselect: bad target_mask 0x%02x\n", target_mask); + return; + } + + /* + * At this point, we have detected that our SCSI ID is on the bus, + * SEL is true and BSY was false for at least one bus settle delay + * (400 ns). + * + * We must assert BSY ourselves, until the target drops the SEL + * signal. + */ + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); + if (NCR5380_poll_politely(hostdata, + STATUS_REG, SR_SEL, 0, 0) < 0) { + shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n"); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + return; + } + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + + /* + * Wait for target to go into MSGIN. + */ + + if (NCR5380_poll_politely(hostdata, + STATUS_REG, SR_REQ, SR_REQ, 0) < 0) { + if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0) + /* BUS FREE phase */ + return; + shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n"); + do_abort(instance, 0); + return; + } + +#ifdef CONFIG_SUN3 + /* acknowledge toggle to MSGIN */ + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); + + /* peek at the byte without really hitting the bus */ + msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); +#else + { + int len = 1; + unsigned char *data = msg; + unsigned char phase = PHASE_MSGIN; + + NCR5380_transfer_pio(instance, &phase, &len, &data, 0); + + if (len) { + do_abort(instance, 0); + return; + } + } +#endif /* CONFIG_SUN3 */ + + if (!(msg[0] & 0x80)) { + shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); + spi_print_msg(msg); + printk("\n"); + do_abort(instance, 0); + return; + } + lun = msg[0] & 0x07; + + /* + * We need to add code for SCSI-II to track which devices have + * I_T_L_Q nexuses established, and which have simple I_T_L + * nexuses so we can chose to do additional data transfer. + */ + + /* + * Find the command corresponding to the I_T_L or I_T_L_Q nexus we + * just reestablished, and remove it from the disconnected queue. + */ + + tmp = NULL; + list_for_each_entry(ncmd, &hostdata->disconnected, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + if (target_mask == (1 << scmd_id(cmd)) && + lun == (u8)cmd->device->lun) { + list_del(&ncmd->list); + tmp = cmd; + break; + } + } + + if (tmp) { + dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, + "reselect: removed %p from disconnected queue\n", tmp); + } else { + int target = ffs(target_mask) - 1; + + shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", + target_mask, lun); + /* + * Since we have an established nexus that we can't do anything + * with, we must abort it. + */ + if (do_abort(instance, 0) == 0) + hostdata->busy[target] &= ~(1 << lun); + return; + } + +#ifdef CONFIG_SUN3 + if (sun3_dma_setup_done != tmp) { + int count; + + advance_sg_buffer(ncmd); + + count = sun3scsi_dma_xfer_len(hostdata, tmp); + + if (count > 0) { + if (tmp->sc_data_direction == DMA_TO_DEVICE) + sun3scsi_dma_send_setup(hostdata, + ncmd->ptr, count); + else + sun3scsi_dma_recv_setup(hostdata, + ncmd->ptr, count); + sun3_dma_setup_done = tmp; + } + } + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); +#endif /* CONFIG_SUN3 */ + + /* Accept message by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + + hostdata->connected = tmp; + dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu\n", + scmd_id(tmp), tmp->device->lun); +} + +/** + * list_find_cmd - test for presence of a command in a linked list + * @haystack: list of commands + * @needle: command to search for + */ + +static bool list_find_cmd(struct list_head *haystack, + struct scsi_cmnd *needle) +{ + struct NCR5380_cmd *ncmd; + + list_for_each_entry(ncmd, haystack, list) + if (NCR5380_to_scmd(ncmd) == needle) + return true; + return false; +} + +/** + * list_remove_cmd - remove a command from linked list + * @haystack: list of commands + * @needle: command to remove + */ + +static bool list_del_cmd(struct list_head *haystack, + struct scsi_cmnd *needle) +{ + if (list_find_cmd(haystack, needle)) { + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(needle); + + list_del(&ncmd->list); + return true; + } + return false; +} + +/** + * NCR5380_abort - scsi host eh_abort_handler() method + * @cmd: the command to be aborted + * + * Try to abort a given command by removing it from queues and/or sending + * the target an abort message. This may not succeed in causing a target + * to abort the command. Nonetheless, the low-level driver must forget about + * the command because the mid-layer reclaims it and it may be re-issued. + * + * The normal path taken by a command is as follows. For EH we trace this + * same path to locate and abort the command. + * + * unissued -> selecting -> [unissued -> selecting ->]... connected -> + * [disconnected -> connected ->]... + * [autosense -> connected ->] done + * + * If cmd was not found at all then presumably it has already been completed, + * in which case return SUCCESS to try to avoid further EH measures. + * + * If the command has not completed yet, we must not fail to find it. + * We have no option but to forget the aborted command (even if it still + * lacks sense data). The mid-layer may re-issue a command that is in error + * recovery (see scsi_send_eh_cmnd), but the logic and data structures in + * this driver are such that a command can appear on one queue only. + * + * The lock protects driver data structures, but EH handlers also use it + * to serialize their own execution and prevent their own re-entry. + */ + +static int NCR5380_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long flags; + int result = SUCCESS; + + spin_lock_irqsave(&hostdata->lock, flags); + +#if (NDEBUG & NDEBUG_ANY) + scmd_printk(KERN_INFO, cmd, __func__); +#endif + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); + + if (list_del_cmd(&hostdata->unissued, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from issue queue\n", cmd); + cmd->result = DID_ABORT << 16; + scsi_done(cmd); /* No tag or busy flag to worry about */ + goto out; + } + + if (hostdata->selecting == cmd) { + dsprintk(NDEBUG_ABORT, instance, + "abort: cmd %p == selecting\n", cmd); + hostdata->selecting = NULL; + cmd->result = DID_ABORT << 16; + complete_cmd(instance, cmd); + goto out; + } + + if (list_del_cmd(&hostdata->disconnected, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from disconnected list\n", cmd); + /* Can't call NCR5380_select() and send ABORT because that + * means releasing the lock. Need a bus reset. + */ + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + result = FAILED; + goto out; + } + + if (hostdata->connected == cmd) { + dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); + hostdata->connected = NULL; + hostdata->dma_len = 0; + if (do_abort(instance, 0) < 0) { + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + result = FAILED; + goto out; + } + set_host_byte(cmd, DID_ABORT); + complete_cmd(instance, cmd); + goto out; + } + + if (list_del_cmd(&hostdata->autosense, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from sense queue\n", cmd); + complete_cmd(instance, cmd); + } + +out: + if (result == FAILED) + dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); + else { + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); + dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); + } + + queue_work(hostdata->work_q, &hostdata->main_task); + spin_unlock_irqrestore(&hostdata->lock, flags); + + return result; +} + + +static void bus_reset_cleanup(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int i; + struct NCR5380_cmd *ncmd; + + /* reset NCR registers */ + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_write(TARGET_COMMAND_REG, 0); + NCR5380_write(SELECT_ENABLE_REG, 0); + + /* After the reset, there are no more connected or disconnected commands + * and no busy units; so clear the low-level status here to avoid + * conflicts when the mid-level code tries to wake up the affected + * commands! + */ + + if (hostdata->selecting) { + hostdata->selecting->result = DID_RESET << 16; + complete_cmd(instance, hostdata->selecting); + hostdata->selecting = NULL; + } + + list_for_each_entry(ncmd, &hostdata->disconnected, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + set_host_byte(cmd, DID_RESET); + complete_cmd(instance, cmd); + } + INIT_LIST_HEAD(&hostdata->disconnected); + + list_for_each_entry(ncmd, &hostdata->autosense, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + scsi_done(cmd); + } + INIT_LIST_HEAD(&hostdata->autosense); + + if (hostdata->connected) { + set_host_byte(hostdata->connected, DID_RESET); + complete_cmd(instance, hostdata->connected); + hostdata->connected = NULL; + } + + for (i = 0; i < 8; ++i) + hostdata->busy[i] = 0; + hostdata->dma_len = 0; + + queue_work(hostdata->work_q, &hostdata->main_task); +} + +/** + * NCR5380_host_reset - reset the SCSI host + * @cmd: SCSI command undergoing EH + * + * Returns SUCCESS + */ + +static int NCR5380_host_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long flags; + struct NCR5380_cmd *ncmd; + + spin_lock_irqsave(&hostdata->lock, flags); + +#if (NDEBUG & NDEBUG_ANY) + shost_printk(KERN_INFO, instance, __func__); +#endif + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); + + list_for_each_entry(ncmd, &hostdata->unissued, list) { + struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd); + + scmd->result = DID_RESET << 16; + scsi_done(scmd); + } + INIT_LIST_HEAD(&hostdata->unissued); + + do_reset(instance); + bus_reset_cleanup(instance); + + spin_unlock_irqrestore(&hostdata->lock, flags); + + return SUCCESS; +} diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h new file mode 100644 index 000000000..8dc2be421 --- /dev/null +++ b/drivers/scsi/NCR5380.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NCR 5380 defines + * + * Copyright 1993, Drew Eckhardt + * Visionary Computing + * (Unix consulting and custom programming) + * drew@colorado.edu + * +1 (303) 666-5836 + * + * For more information, please consult + * + * NCR 5380 Family + * SCSI Protocol Controller + * Databook + * NCR Microelectronics + * 1635 Aeroplaza Drive + * Colorado Springs, CO 80916 + * 1+ (719) 578-3400 + * 1+ (800) 334-5454 + */ + +#ifndef NCR5380_H +#define NCR5380_H + +#include +#include +#include +#include +#include +#include +#include + +#define NDEBUG_ARBITRATION 0x1 +#define NDEBUG_AUTOSENSE 0x2 +#define NDEBUG_DMA 0x4 +#define NDEBUG_HANDSHAKE 0x8 +#define NDEBUG_INFORMATION 0x10 +#define NDEBUG_INIT 0x20 +#define NDEBUG_INTR 0x40 +#define NDEBUG_LINKED 0x80 +#define NDEBUG_MAIN 0x100 +#define NDEBUG_NO_DATAOUT 0x200 +#define NDEBUG_NO_WRITE 0x400 +#define NDEBUG_PIO 0x800 +#define NDEBUG_PSEUDO_DMA 0x1000 +#define NDEBUG_QUEUES 0x2000 +#define NDEBUG_RESELECTION 0x4000 +#define NDEBUG_SELECTION 0x8000 +#define NDEBUG_USLEEP 0x10000 +#define NDEBUG_LAST_BYTE_SENT 0x20000 +#define NDEBUG_RESTART_SELECT 0x40000 +#define NDEBUG_EXTENDED 0x80000 +#define NDEBUG_C400_PREAD 0x100000 +#define NDEBUG_C400_PWRITE 0x200000 +#define NDEBUG_LISTS 0x400000 +#define NDEBUG_ABORT 0x800000 +#define NDEBUG_TAGS 0x1000000 +#define NDEBUG_MERGING 0x2000000 + +#define NDEBUG_ANY 0xFFFFFFFFUL + +/* + * The contents of the OUTPUT DATA register are asserted on the bus when + * either arbitration is occurring or the phase-indicating signals ( + * IO, CD, MSG) in the TARGET COMMAND register and the ASSERT DATA + * bit in the INITIATOR COMMAND register is set. + */ + +#define OUTPUT_DATA_REG 0 /* wo DATA lines on SCSI bus */ +#define CURRENT_SCSI_DATA_REG 0 /* ro same */ + +#define INITIATOR_COMMAND_REG 1 /* rw */ +#define ICR_ASSERT_RST 0x80 /* rw Set to assert RST */ +#define ICR_ARBITRATION_PROGRESS 0x40 /* ro Indicates arbitration complete */ +#define ICR_TRI_STATE 0x40 /* wo Set to tri-state drivers */ +#define ICR_ARBITRATION_LOST 0x20 /* ro Indicates arbitration lost */ +#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */ +#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */ +#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */ +#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */ +#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */ +#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */ + +#define ICR_BASE 0 + +#define MODE_REG 2 +/* + * Note : BLOCK_DMA code will keep DRQ asserted for the duration of the + * transfer, causing the chip to hog the bus. You probably don't want + * this. + */ +#define MR_BLOCK_DMA_MODE 0x80 /* rw block mode DMA */ +#define MR_TARGET 0x40 /* rw target mode */ +#define MR_ENABLE_PAR_CHECK 0x20 /* rw enable parity checking */ +#define MR_ENABLE_PAR_INTR 0x10 /* rw enable bad parity interrupt */ +#define MR_ENABLE_EOP_INTR 0x08 /* rw enable eop interrupt */ +#define MR_MONITOR_BSY 0x04 /* rw enable int on unexpected bsy fail */ +#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */ +#define MR_ARBITRATE 0x01 /* rw start arbitration */ + +#define MR_BASE 0 + +#define TARGET_COMMAND_REG 3 +#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */ +#define TCR_ASSERT_REQ 0x08 /* tgt rw assert REQ */ +#define TCR_ASSERT_MSG 0x04 /* tgt rw assert MSG */ +#define TCR_ASSERT_CD 0x02 /* tgt rw assert CD */ +#define TCR_ASSERT_IO 0x01 /* tgt rw assert IO */ + +#define STATUS_REG 4 /* ro */ +/* + * Note : a set bit indicates an active signal, driven by us or another + * device. + */ +#define SR_RST 0x80 +#define SR_BSY 0x40 +#define SR_REQ 0x20 +#define SR_MSG 0x10 +#define SR_CD 0x08 +#define SR_IO 0x04 +#define SR_SEL 0x02 +#define SR_DBP 0x01 + +/* + * Setting a bit in this register will cause an interrupt to be generated when + * BSY is false and SEL true and this bit is asserted on the bus. + */ +#define SELECT_ENABLE_REG 4 /* wo */ + +#define BUS_AND_STATUS_REG 5 /* ro */ +#define BASR_END_DMA_TRANSFER 0x80 /* ro set on end of transfer */ +#define BASR_DRQ 0x40 /* ro mirror of DRQ pin */ +#define BASR_PARITY_ERROR 0x20 /* ro parity error detected */ +#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */ +#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */ +#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */ +#define BASR_ATN 0x02 /* ro BUS status */ +#define BASR_ACK 0x01 /* ro BUS status */ + +/* Write any value to this register to start a DMA send */ +#define START_DMA_SEND_REG 5 /* wo */ + +/* + * Used in DMA transfer mode, data is latched from the SCSI bus on + * the falling edge of REQ (ini) or ACK (tgt) + */ +#define INPUT_DATA_REG 6 /* ro */ + +/* Write any value to this register to start a DMA receive */ +#define START_DMA_TARGET_RECEIVE_REG 6 /* wo */ + +/* Read this register to clear interrupt conditions */ +#define RESET_PARITY_INTERRUPT_REG 7 /* ro */ + +/* Write any value to this register to start an ini mode DMA receive */ +#define START_DMA_INITIATOR_RECEIVE_REG 7 /* wo */ + +/* NCR 53C400(A) Control Status Register bits: */ +#define CSR_RESET 0x80 /* wo Resets 53c400 */ +#define CSR_53C80_REG 0x80 /* ro 5380 registers busy */ +#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */ +#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */ +#define CSR_53C80_INTR 0x10 /* rw Enable 53c80 interrupts */ +#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */ +#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Is Host buffer ready */ +#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */ +#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */ + +#define CSR_BASE CSR_53C80_INTR + +/* Note : PHASE_* macros are based on the values of the STATUS register */ +#define PHASE_MASK (SR_MSG | SR_CD | SR_IO) + +#define PHASE_DATAOUT 0 +#define PHASE_DATAIN SR_IO +#define PHASE_CMDOUT SR_CD +#define PHASE_STATIN (SR_CD | SR_IO) +#define PHASE_MSGOUT (SR_MSG | SR_CD) +#define PHASE_MSGIN (SR_MSG | SR_CD | SR_IO) +#define PHASE_UNKNOWN 0xff + +/* + * Convert status register phase to something we can use to set phase in + * the target register so we can get phase mismatch interrupts on DMA + * transfers. + */ + +#define PHASE_SR_TO_TCR(phase) ((phase) >> 2) + +#ifndef NO_IRQ +#define NO_IRQ 0 +#endif + +#define FLAG_DMA_FIXUP 1 /* Use DMA errata workarounds */ +#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ +#define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */ +#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ + +struct NCR5380_hostdata { + NCR5380_implementation_fields; /* Board-specific data */ + u8 __iomem *io; /* Remapped 5380 address */ + u8 __iomem *pdma_io; /* Remapped PDMA address */ + unsigned long poll_loops; /* Register polling limit */ + spinlock_t lock; /* Protects this struct */ + struct scsi_cmnd *connected; /* Currently connected cmnd */ + struct list_head disconnected; /* Waiting for reconnect */ + struct Scsi_Host *host; /* SCSI host backpointer */ + struct workqueue_struct *work_q; /* SCSI host work queue */ + struct work_struct main_task; /* Work item for main loop */ + int flags; /* Board-specific quirks */ + int dma_len; /* Requested length of DMA */ + int read_overruns; /* Transfer size reduction for DMA erratum */ + unsigned long io_port; /* Device IO port */ + unsigned long base; /* Device base address */ + struct list_head unissued; /* Waiting to be issued */ + struct scsi_cmnd *selecting; /* Cmnd to be connected */ + struct list_head autosense; /* Priority cmnd queue */ + struct scsi_cmnd *sensing; /* Cmnd needing autosense */ + struct scsi_eh_save ses; /* Cmnd state saved for EH */ + unsigned char busy[8]; /* Index = target, bit = lun */ + unsigned char id_mask; /* 1 << Host ID */ + unsigned char id_higher_mask; /* All bits above id_mask */ + unsigned char last_message; /* Last Message Out */ + unsigned long region_size; /* Size of address/port range */ + char info[168]; /* Host banner message */ +}; + +struct NCR5380_cmd { + char *ptr; + int this_residual; + struct scatterlist *buffer; + int status; + int message; + int phase; + struct list_head list; +}; + +#define NCR5380_PIO_CHUNK_SIZE 256 + +/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */ +#define NCR5380_REG_POLL_TIME 10 + +static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr) +{ + return ((struct scsi_cmnd *)ncmd_ptr) - 1; +} + +static inline struct NCR5380_cmd *NCR5380_to_ncmd(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +#ifndef NDEBUG +#define NDEBUG (0) +#endif + +#define dprintk(flg, fmt, ...) \ + do { if ((NDEBUG) & (flg)) \ + printk(KERN_DEBUG fmt, ## __VA_ARGS__); } while (0) + +#define dsprintk(flg, host, fmt, ...) \ + do { if ((NDEBUG) & (flg)) \ + shost_printk(KERN_DEBUG, host, fmt, ## __VA_ARGS__); \ + } while (0) + +#if NDEBUG +#define NCR5380_dprint(flg, arg) \ + do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0) +#define NCR5380_dprint_phase(flg, arg) \ + do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0) +static void NCR5380_print_phase(struct Scsi_Host *instance); +static void NCR5380_print(struct Scsi_Host *instance); +#else +#define NCR5380_dprint(flg, arg) do {} while (0) +#define NCR5380_dprint_phase(flg, arg) do {} while (0) +#endif + +static int NCR5380_init(struct Scsi_Host *instance, int flags); +static int NCR5380_maybe_reset_bus(struct Scsi_Host *); +static void NCR5380_exit(struct Scsi_Host *instance); +static void NCR5380_information_transfer(struct Scsi_Host *instance); +static irqreturn_t NCR5380_intr(int irq, void *dev_id); +static void NCR5380_main(struct work_struct *work); +static const char *NCR5380_info(struct Scsi_Host *instance); +static void NCR5380_reselect(struct Scsi_Host *instance); +static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); +static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); +static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data, + unsigned int can_sleep); +static int NCR5380_poll_politely2(struct NCR5380_hostdata *, + unsigned int, u8, u8, + unsigned int, u8, u8, unsigned long); + +static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata, + unsigned int reg, u8 bit, u8 val, + unsigned long wait) +{ + if ((NCR5380_read(reg) & bit) == val) + return 0; + + return NCR5380_poll_politely2(hostdata, reg, bit, val, + reg, bit, val, wait); +} + +static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *, + struct scsi_cmnd *); +static int NCR5380_dma_send_setup(struct NCR5380_hostdata *, + unsigned char *, int); +static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *, + unsigned char *, int); +static int NCR5380_dma_residual(struct NCR5380_hostdata *); + +static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + return 0; +} + +static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return 0; +} + +static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata) +{ + return 0; +} + +#endif /* NCR5380_H */ diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c new file mode 100644 index 000000000..b95147fb1 --- /dev/null +++ b/drivers/scsi/a100u2w.c @@ -0,0 +1,1226 @@ +/* + * Initio A100 device driver for Linux. + * + * Copyright (c) 1994-1998 Initio Corporation + * Copyright (c) 2003-2004 Christoph Hellwig + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Revision History: + * 07/02/98 hl - v.91n Initial drivers. + * 09/14/98 hl - v1.01 Support new Kernel. + * 09/22/98 hl - v1.01a Support reset. + * 09/24/98 hl - v1.01b Fixed reset. + * 10/05/98 hl - v1.02 split the source code and release. + * 12/19/98 bv - v1.02a Use spinlocks for 2.1.95 and up + * 01/31/99 bv - v1.02b Use mdelay instead of waitForPause + * 08/08/99 bv - v1.02c Use waitForPause again. + * 06/25/02 Doug Ledford - v1.02d + * - Remove limit on number of controllers + * - Port to DMA mapping API + * - Clean up interrupt handler registration + * - Fix memory leaks + * - Fix allocation of scsi host structs and private data + * 11/18/03 Christoph Hellwig + * - Port to new probing API + * - Fix some more leaks in init failure cases + * 9/28/04 Christoph Hellwig + * - merge the two source files + * - remove internal queueing code + * 14/06/07 Alan Cox + * - Grand cleanup and Linuxisation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "a100u2w.h" + + +static struct orc_scb *__orc_alloc_scb(struct orc_host * host); +static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb); + +static struct orc_nvram nvram, *nvramp = &nvram; + +static u8 default_nvram[64] = +{ +/*----------header -------------*/ + 0x01, /* 0x00: Sub System Vendor ID 0 */ + 0x11, /* 0x01: Sub System Vendor ID 1 */ + 0x60, /* 0x02: Sub System ID 0 */ + 0x10, /* 0x03: Sub System ID 1 */ + 0x00, /* 0x04: SubClass */ + 0x01, /* 0x05: Vendor ID 0 */ + 0x11, /* 0x06: Vendor ID 1 */ + 0x60, /* 0x07: Device ID 0 */ + 0x10, /* 0x08: Device ID 1 */ + 0x00, /* 0x09: Reserved */ + 0x00, /* 0x0A: Reserved */ + 0x01, /* 0x0B: Revision of Data Structure */ + /* -- Host Adapter Structure --- */ + 0x01, /* 0x0C: Number Of SCSI Channel */ + 0x01, /* 0x0D: BIOS Configuration 1 */ + 0x00, /* 0x0E: BIOS Configuration 2 */ + 0x00, /* 0x0F: BIOS Configuration 3 */ + /* --- SCSI Channel 0 Configuration --- */ + 0x07, /* 0x10: H/A ID */ + 0x83, /* 0x11: Channel Configuration */ + 0x20, /* 0x12: MAX TAG per target */ + 0x0A, /* 0x13: SCSI Reset Recovering time */ + 0x00, /* 0x14: Channel Configuration4 */ + 0x00, /* 0x15: Channel Configuration5 */ + /* SCSI Channel 0 Target Configuration */ + /* 0x16-0x25 */ + 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, + 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, + /* --- SCSI Channel 1 Configuration --- */ + 0x07, /* 0x26: H/A ID */ + 0x83, /* 0x27: Channel Configuration */ + 0x20, /* 0x28: MAX TAG per target */ + 0x0A, /* 0x29: SCSI Reset Recovering time */ + 0x00, /* 0x2A: Channel Configuration4 */ + 0x00, /* 0x2B: Channel Configuration5 */ + /* SCSI Channel 1 Target Configuration */ + /* 0x2C-0x3B */ + 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, + 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, + 0x00, /* 0x3C: Reserved */ + 0x00, /* 0x3D: Reserved */ + 0x00, /* 0x3E: Reserved */ + 0x00 /* 0x3F: Checksum */ +}; + + +static u8 wait_chip_ready(struct orc_host * host) +{ + int i; + + for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ + if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */ + return 1; + msleep(100); + } + return 0; +} + +static u8 wait_firmware_ready(struct orc_host * host) +{ + int i; + + for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ + if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */ + return 1; + msleep(100); /* wait 100ms before try again */ + } + return 0; +} + +/***************************************************************************/ +static u8 wait_scsi_reset_done(struct orc_host * host) +{ + int i; + + for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ + if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */ + return 1; + mdelay(100); /* wait 100ms before try again */ + } + return 0; +} + +/***************************************************************************/ +static u8 wait_HDO_off(struct orc_host * host) +{ + int i; + + for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ + if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */ + return 1; + mdelay(100); /* wait 100ms before try again */ + } + return 0; +} + +/***************************************************************************/ +static u8 wait_hdi_set(struct orc_host * host, u8 * data) +{ + int i; + + for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ + if ((*data = inb(host->base + ORC_HSTUS)) & HDI) + return 1; /* Wait HDI set */ + mdelay(100); /* wait 100ms before try again */ + } + return 0; +} + +/***************************************************************************/ +static unsigned short orc_read_fwrev(struct orc_host * host) +{ + u16 version; + u8 data; + + outb(ORC_CMD_VERSION, host->base + ORC_HDATA); + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ + return 0; + version = inb(host->base + ORC_HDATA); + outb(data, host->base + ORC_HSTUS); /* Clear HDI */ + + if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ + return 0; + version |= inb(host->base + ORC_HDATA) << 8; + outb(data, host->base + ORC_HSTUS); /* Clear HDI */ + + return version; +} + +/***************************************************************************/ +static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value) +{ + outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */ + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + outb(address, host->base + ORC_HDATA); /* Write address */ + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + outb(value, host->base + ORC_HDATA); /* Write value */ + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + return 1; +} + +/***************************************************************************/ +static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr) +{ + unsigned char data; + + outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */ + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + outb(address, host->base + ORC_HDATA); /* Write address */ + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ + return 0; + *ptr = inb(host->base + ORC_HDATA); + outb(data, host->base + ORC_HSTUS); /* Clear HDI */ + + return 1; + +} + +/** + * orc_exec_scb - Queue an SCB with the HA + * @host: host adapter the SCB belongs to + * @scb: SCB to queue for execution + */ + +static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb) +{ + scb->status = ORCSCB_POST; + outb(scb->scbidx, host->base + ORC_PQUEUE); +} + + +/** + * se2_rd_all - read SCSI parameters from EEPROM + * @host: Host whose EEPROM is being loaded + * + * Read SCSI H/A configuration parameters from serial EEPROM + */ + +static int se2_rd_all(struct orc_host * host) +{ + int i; + u8 *np, chksum = 0; + + np = (u8 *) nvramp; + for (i = 0; i < 64; i++, np++) { /* <01> */ + if (orc_nv_read(host, (u8) i, np) == 0) + return -1; + } + + /*------ Is ckecksum ok ? ------*/ + np = (u8 *) nvramp; + for (i = 0; i < 63; i++) + chksum += *np++; + + if (nvramp->CheckSum != (u8) chksum) + return -1; + return 1; +} + +/** + * se2_update_all - update the EEPROM + * @host: Host whose EEPROM is being updated + * + * Update changed bytes in the EEPROM image. + */ + +static void se2_update_all(struct orc_host * host) +{ /* setup default pattern */ + int i; + u8 *np, *np1, chksum = 0; + + /* Calculate checksum first */ + np = (u8 *) default_nvram; + for (i = 0; i < 63; i++) + chksum += *np++; + *np = chksum; + + np = (u8 *) default_nvram; + np1 = (u8 *) nvramp; + for (i = 0; i < 64; i++, np++, np1++) { + if (*np != *np1) + orc_nv_write(host, (u8) i, *np); + } +} + +/** + * read_eeprom - load EEPROM + * @host: Host EEPROM to read + * + * Read the EEPROM for a given host. If it is invalid or fails + * the restore the defaults and use them. + */ + +static void read_eeprom(struct orc_host * host) +{ + if (se2_rd_all(host) != 1) { + se2_update_all(host); /* setup default pattern */ + se2_rd_all(host); /* load again */ + } +} + + +/** + * orc_load_firmware - initialise firmware + * @host: Host to set up + * + * Load the firmware from the EEPROM into controller SRAM. This + * is basically a 4K block copy and then a 4K block read to check + * correctness. The rest is convulted by the indirect interfaces + * in the hardware + */ + +static u8 orc_load_firmware(struct orc_host * host) +{ + u32 data32; + u16 bios_addr; + u16 i; + u8 *data32_ptr, data; + + + /* Set up the EEPROM for access */ + + data = inb(host->base + ORC_GCFG); + outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */ + outb(0x00, host->base + ORC_EBIOSADR2); + outw(0x0000, host->base + ORC_EBIOSADR0); + if (inb(host->base + ORC_EBIOSDATA) != 0x55) { + outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */ + return 0; + } + outw(0x0001, host->base + ORC_EBIOSADR0); + if (inb(host->base + ORC_EBIOSDATA) != 0xAA) { + outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */ + return 0; + } + + outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */ + data32_ptr = (u8 *) & data32; + data32 = cpu_to_le32(0); /* Initial FW address to 0 */ + outw(0x0010, host->base + ORC_EBIOSADR0); + *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ + outw(0x0011, host->base + ORC_EBIOSADR0); + *(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ + outw(0x0012, host->base + ORC_EBIOSADR0); + *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ + outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2); + outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */ + + /* Copy the code from the BIOS to the SRAM */ + + udelay(500); /* Required on Sun Ultra 5 ... 350 -> failures */ + bios_addr = (u16) le32_to_cpu(data32); /* FW code locate at BIOS address + ? */ + for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */ + i < 0x1000; /* Firmware code size = 4K */ + i++, bios_addr++) { + outw(bios_addr, host->base + ORC_EBIOSADR0); + *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ + if ((i % 4) == 3) { + outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */ + data32_ptr = (u8 *) & data32; + } + } + + /* Go back and check they match */ + + outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */ + bios_addr -= 0x1000; /* Reset the BIOS address */ + for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */ + i < 0x1000; /* Firmware code size = 4K */ + i++, bios_addr++) { + outw(bios_addr, host->base + ORC_EBIOSADR0); + *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ + if ((i % 4) == 3) { + if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) { + outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */ + outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */ + return 0; + } + data32_ptr = (u8 *) & data32; + } + } + + /* Success */ + outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */ + outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */ + return 1; +} + +/***************************************************************************/ +static void setup_SCBs(struct orc_host * host) +{ + struct orc_scb *scb; + int i; + struct orc_extended_scb *escb; + dma_addr_t escb_phys; + + /* Setup SCB base and SCB Size registers */ + outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */ + /* SCB base address 0 */ + outl(host->scb_phys, host->base + ORC_SCBBASE0); + /* SCB base address 1 */ + outl(host->scb_phys, host->base + ORC_SCBBASE1); + + /* setup scatter list address with one buffer */ + scb = host->scb_virt; + escb = host->escb_virt; + + for (i = 0; i < ORC_MAXQUEUE; i++) { + escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i)); + scb->sg_addr = cpu_to_le32((u32) escb_phys); + scb->sense_addr = cpu_to_le32((u32) escb_phys); + scb->escb = escb; + scb->scbidx = i; + scb++; + escb++; + } +} + +/** + * init_alloc_map - initialise allocation map + * @host: host map to configure + * + * Initialise the allocation maps for this device. If the device + * is not quiescent the caller must hold the allocation lock + */ + +static void init_alloc_map(struct orc_host * host) +{ + u8 i, j; + + for (i = 0; i < MAX_CHANNELS; i++) { + for (j = 0; j < 8; j++) { + host->allocation_map[i][j] = 0xffffffff; + } + } +} + +/** + * init_orchid - initialise the host adapter + * @host:host adapter to initialise + * + * Initialise the controller and if necessary load the firmware. + * + * Returns -1 if the initialisation fails. + */ + +static int init_orchid(struct orc_host * host) +{ + u8 *ptr; + u16 revision; + u8 i; + + init_alloc_map(host); + outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */ + + if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */ + revision = orc_read_fwrev(host); + if (revision == 0xFFFF) { + outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */ + if (wait_chip_ready(host) == 0) + return -1; + orc_load_firmware(host); /* Download FW */ + setup_SCBs(host); /* Setup SCB base and SCB Size registers */ + outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */ + if (wait_firmware_ready(host) == 0) + return -1; + /* Wait for firmware ready */ + } else { + setup_SCBs(host); /* Setup SCB base and SCB Size registers */ + } + } else { /* Orchid is not Ready */ + outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */ + if (wait_chip_ready(host) == 0) + return -1; + orc_load_firmware(host); /* Download FW */ + setup_SCBs(host); /* Setup SCB base and SCB Size registers */ + outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */ + + /* clear HOSTSTOP */ + if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */ + return -1; + } + + /* Load an EEProm copy into RAM */ + /* Assumes single threaded at this point */ + read_eeprom(host); + + if (nvramp->revision != 1) + return -1; + + host->scsi_id = nvramp->scsi_id; + host->BIOScfg = nvramp->BIOSConfig1; + host->max_targets = MAX_TARGETS; + ptr = (u8 *) & (nvramp->Target00Config); + for (i = 0; i < 16; ptr++, i++) { + host->target_flag[i] = *ptr; + host->max_tags[i] = ORC_MAXTAGS; + } + + if (nvramp->SCSI0Config & NCC_BUSRESET) + host->flags |= HCF_SCSI_RESET; + outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */ + return 0; +} + +/** + * orc_reset_scsi_bus - perform bus reset + * @host: host being reset + * + * Perform a full bus reset on the adapter. + */ + +static int orc_reset_scsi_bus(struct orc_host * host) +{ /* I need Host Control Block Information */ + unsigned long flags; + + spin_lock_irqsave(&host->allocation_lock, flags); + + init_alloc_map(host); + /* reset scsi bus */ + outb(SCSIRST, host->base + ORC_HCTRL); + /* FIXME: We can spend up to a second with the lock held and + interrupts off here */ + if (wait_scsi_reset_done(host) == 0) { + spin_unlock_irqrestore(&host->allocation_lock, flags); + return FAILED; + } else { + spin_unlock_irqrestore(&host->allocation_lock, flags); + return SUCCESS; + } +} + +/** + * orc_device_reset - device reset handler + * @host: host to reset + * @cmd: command causing the reset + * @target: target device + * + * Reset registers, reset a hanging bus and kill active and disconnected + * commands for target w/o soft reset + */ + +static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target) +{ /* I need Host Control Block Information */ + struct orc_scb *scb; + struct orc_extended_scb *escb; + struct orc_scb *host_scb; + u8 i; + unsigned long flags; + + spin_lock_irqsave(&(host->allocation_lock), flags); + scb = (struct orc_scb *) NULL; + escb = (struct orc_extended_scb *) NULL; + + /* setup scatter list address with one buffer */ + host_scb = host->scb_virt; + + /* FIXME: is this safe if we then fail to issue the reset or race + a completion ? */ + init_alloc_map(host); + + /* Find the scb corresponding to the command */ + for (i = 0; i < ORC_MAXQUEUE; i++) { + escb = host_scb->escb; + if (host_scb->status && escb->srb == cmd) + break; + host_scb++; + } + + if (i == ORC_MAXQUEUE) { + printk(KERN_ERR "Unable to Reset - No SCB Found\n"); + spin_unlock_irqrestore(&(host->allocation_lock), flags); + return FAILED; + } + + /* Allocate a new SCB for the reset command to the firmware */ + if ((scb = __orc_alloc_scb(host)) == NULL) { + /* Can't happen.. */ + spin_unlock_irqrestore(&(host->allocation_lock), flags); + return FAILED; + } + + /* Reset device is handled by the firmware, we fill in an SCB and + fire it at the controller, it does the rest */ + scb->opcode = ORC_BUSDEVRST; + scb->target = target; + scb->hastat = 0; + scb->tastat = 0; + scb->status = 0x0; + scb->link = 0xFF; + scb->reserved0 = 0; + scb->reserved1 = 0; + scb->xferlen = cpu_to_le32(0); + scb->sg_len = cpu_to_le32(0); + + escb->srb = NULL; + escb->srb = cmd; + orc_exec_scb(host, scb); /* Start execute SCB */ + spin_unlock_irqrestore(&host->allocation_lock, flags); + return SUCCESS; +} + +/** + * __orc_alloc_scb - allocate an SCB + * @host: host to allocate from + * + * Allocate an SCB and return a pointer to the SCB object. NULL + * is returned if no SCB is free. The caller must already hold + * the allocator lock at this point. + */ + + +static struct orc_scb *__orc_alloc_scb(struct orc_host * host) +{ + u8 channel; + unsigned long idx; + u8 index; + u8 i; + + channel = host->index; + for (i = 0; i < 8; i++) { + for (index = 0; index < 32; index++) { + if ((host->allocation_map[channel][i] >> index) & 0x01) { + host->allocation_map[channel][i] &= ~(1 << index); + idx = index + 32 * i; + /* + * Translate the index to a structure instance + */ + return host->scb_virt + idx; + } + } + } + return NULL; +} + +/** + * orc_alloc_scb - allocate an SCB + * @host: host to allocate from + * + * Allocate an SCB and return a pointer to the SCB object. NULL + * is returned if no SCB is free. + */ + +static struct orc_scb *orc_alloc_scb(struct orc_host * host) +{ + struct orc_scb *scb; + unsigned long flags; + + spin_lock_irqsave(&host->allocation_lock, flags); + scb = __orc_alloc_scb(host); + spin_unlock_irqrestore(&host->allocation_lock, flags); + return scb; +} + +/** + * orc_release_scb - release an SCB + * @host: host owning the SCB + * @scb: SCB that is now free + * + * Called to return a completed SCB to the allocation pool. Before + * calling the SCB must be out of use on both the host and the HA. + */ + +static void orc_release_scb(struct orc_host *host, struct orc_scb *scb) +{ + unsigned long flags; + u8 index, i, channel; + + spin_lock_irqsave(&(host->allocation_lock), flags); + channel = host->index; /* Channel */ + index = scb->scbidx; + i = index / 32; + index %= 32; + host->allocation_map[channel][i] |= (1 << index); + spin_unlock_irqrestore(&(host->allocation_lock), flags); +} + +/* + * orchid_abort_scb - abort a command + * + * Abort a queued command that has been passed to the firmware layer + * if possible. This is all handled by the firmware. We aks the firmware + * and it either aborts the command or fails + */ + +static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb) +{ + unsigned char data, status; + + outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */ + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */ + outb(HDO, host->base + ORC_HCTRL); + if (wait_HDO_off(host) == 0) /* Wait HDO off */ + return 0; + + if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ + return 0; + status = inb(host->base + ORC_HDATA); + outb(data, host->base + ORC_HSTUS); /* Clear HDI */ + + if (status == 1) /* 0 - Successfully */ + return 0; /* 1 - Fail */ + return 1; +} + +static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd) +{ + struct orc_extended_scb *escb; + struct orc_scb *scb; + u8 i; + unsigned long flags; + + spin_lock_irqsave(&(host->allocation_lock), flags); + + scb = host->scb_virt; + + /* Walk the queue until we find the SCB that belongs to the command + block. This isn't a performance critical path so a walk in the park + here does no harm */ + + for (i = 0; i < ORC_MAXQUEUE; i++, scb++) { + escb = scb->escb; + if (scb->status && escb->srb == cmd) { + if (scb->tag_msg == 0) { + goto out; + } else { + /* Issue an ABORT to the firmware */ + if (orchid_abort_scb(host, scb)) { + escb->srb = NULL; + spin_unlock_irqrestore(&host->allocation_lock, flags); + return SUCCESS; + } else + goto out; + } + } + } +out: + spin_unlock_irqrestore(&host->allocation_lock, flags); + return FAILED; +} + +/** + * orc_interrupt - IRQ processing + * @host: Host causing the interrupt + * + * This function is called from the IRQ handler and protected + * by the host lock. While the controller reports that there are + * scb's for processing we pull them off the controller, turn the + * index into a host address pointer to the scb and call the scb + * handler. + * + * Returns IRQ_HANDLED if any SCBs were processed, IRQ_NONE otherwise + */ + +static irqreturn_t orc_interrupt(struct orc_host * host) +{ + u8 scb_index; + struct orc_scb *scb; + + /* Check if we have an SCB queued for servicing */ + if (inb(host->base + ORC_RQUEUECNT) == 0) + return IRQ_NONE; + + do { + /* Get the SCB index of the SCB to service */ + scb_index = inb(host->base + ORC_RQUEUE); + + /* Translate it back to a host pointer */ + scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index)); + scb->status = 0x0; + /* Process the SCB */ + inia100_scb_handler(host, scb); + } while (inb(host->base + ORC_RQUEUECNT)); + return IRQ_HANDLED; +} /* End of I1060Interrupt() */ + +/** + * inia100_build_scb - build SCB + * @host: host owing the control block + * @scb: control block to use + * @cmd: Mid layer command + * + * Build a host adapter control block from the SCSI mid layer command + */ + +static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd) +{ /* Create corresponding SCB */ + struct scatterlist *sg; + struct orc_sgent *sgent; /* Pointer to SG list */ + int i, count_sg; + struct orc_extended_scb *escb; + + /* Links between the escb, scb and Linux scsi midlayer cmd */ + escb = scb->escb; + escb->srb = cmd; + sgent = NULL; + + /* Set up the SCB to do a SCSI command block */ + scb->opcode = ORC_EXECSCSI; + scb->flags = SCF_NO_DCHK; /* Clear done bit */ + scb->target = cmd->device->id; + scb->lun = cmd->device->lun; + scb->reserved0 = 0; + scb->reserved1 = 0; + scb->sg_len = cpu_to_le32(0); + + scb->xferlen = cpu_to_le32((u32) scsi_bufflen(cmd)); + sgent = (struct orc_sgent *) & escb->sglist[0]; + + count_sg = scsi_dma_map(cmd); + if (count_sg < 0) + return count_sg; + BUG_ON(count_sg > TOTAL_SG_ENTRY); + + /* Build the scatter gather lists */ + if (count_sg) { + scb->sg_len = cpu_to_le32((u32) (count_sg * 8)); + scsi_for_each_sg(cmd, sg, count_sg, i) { + sgent->base = cpu_to_le32((u32) sg_dma_address(sg)); + sgent->length = cpu_to_le32((u32) sg_dma_len(sg)); + sgent++; + } + } else { + scb->sg_len = cpu_to_le32(0); + sgent->base = cpu_to_le32(0); + sgent->length = cpu_to_le32(0); + } + scb->sg_addr = (u32) scb->sense_addr; /* sense_addr is already little endian */ + scb->hastat = 0; + scb->tastat = 0; + scb->link = 0xFF; + scb->sense_len = SENSE_SIZE; + scb->cdb_len = cmd->cmd_len; + if (scb->cdb_len >= IMAX_CDB) { + printk("max cdb length= %x\n", cmd->cmd_len); + scb->cdb_len = IMAX_CDB; + } + scb->ident = (u8)(cmd->device->lun & 0xff) | DISC_ALLOW; + if (cmd->device->tagged_supported) { /* Tag Support */ + scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ + } else { + scb->tag_msg = 0; /* No tag support */ + } + memcpy(scb->cdb, cmd->cmnd, scb->cdb_len); + return 0; +} + +/** + * inia100_queue_lck - queue command with host + * @cmd: Command block + * + * Called by the mid layer to queue a command. Process the command + * block, build the host specific scb structures and if there is room + * queue the command down to the controller + */ +static int inia100_queue_lck(struct scsi_cmnd *cmd) +{ + struct orc_scb *scb; + struct orc_host *host; /* Point to Host adapter control block */ + + host = (struct orc_host *) cmd->device->host->hostdata; + /* Get free SCSI control block */ + if ((scb = orc_alloc_scb(host)) == NULL) + return SCSI_MLQUEUE_HOST_BUSY; + + if (inia100_build_scb(host, scb, cmd)) { + orc_release_scb(host, scb); + return SCSI_MLQUEUE_HOST_BUSY; + } + orc_exec_scb(host, scb); /* Start execute SCB */ + return 0; +} + +static DEF_SCSI_QCMD(inia100_queue) + +/***************************************************************************** + Function name : inia100_abort + Description : Abort a queued command. + (commands that are on the bus can't be aborted easily) + Input : host - Pointer to host adapter structure + Output : None. + Return : pSRB - Pointer to SCSI request block. +*****************************************************************************/ +static int inia100_abort(struct scsi_cmnd * cmd) +{ + struct orc_host *host; + + host = (struct orc_host *) cmd->device->host->hostdata; + return inia100_abort_cmd(host, cmd); +} + +/***************************************************************************** + Function name : inia100_reset + Description : Reset registers, reset a hanging bus and + kill active and disconnected commands for target w/o soft reset + Input : host - Pointer to host adapter structure + Output : None. + Return : pSRB - Pointer to SCSI request block. +*****************************************************************************/ +static int inia100_bus_reset(struct scsi_cmnd * cmd) +{ /* I need Host Control Block Information */ + struct orc_host *host; + host = (struct orc_host *) cmd->device->host->hostdata; + return orc_reset_scsi_bus(host); +} + +/***************************************************************************** + Function name : inia100_device_reset + Description : Reset the device + Input : host - Pointer to host adapter structure + Output : None. + Return : pSRB - Pointer to SCSI request block. +*****************************************************************************/ +static int inia100_device_reset(struct scsi_cmnd * cmd) +{ /* I need Host Control Block Information */ + struct orc_host *host; + host = (struct orc_host *) cmd->device->host->hostdata; + return orc_device_reset(host, cmd, scmd_id(cmd)); + +} + +/** + * inia100_scb_handler - interrupt callback + * @host: Host causing the interrupt + * @scb: SCB the controller returned as needing processing + * + * Perform completion processing on a control block. Do the conversions + * from host to SCSI midlayer error coding, save any sense data and + * the complete with the midlayer and recycle the scb. + */ + +static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb) +{ + struct scsi_cmnd *cmd; /* Pointer to SCSI request block */ + struct orc_extended_scb *escb; + + escb = scb->escb; + if ((cmd = (struct scsi_cmnd *) escb->srb) == NULL) { + printk(KERN_ERR "inia100_scb_handler: SRB pointer is empty\n"); + orc_release_scb(host, scb); /* Release SCB for current channel */ + return; + } + escb->srb = NULL; + + switch (scb->hastat) { + case 0x0: + case 0xa: /* Linked command complete without error and linked normally */ + case 0xb: /* Linked command complete without error interrupt generated */ + scb->hastat = 0; + break; + + case 0x11: /* Selection time out-The initiator selection or target + reselection was not complete within the SCSI Time out period */ + scb->hastat = DID_TIME_OUT; + break; + + case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus + phase sequence was requested by the target. The host adapter + will generate a SCSI Reset Condition, notifying the host with + a SCRD interrupt */ + scb->hastat = DID_RESET; + break; + + case 0x1a: /* SCB Aborted. 07/21/98 */ + scb->hastat = DID_ABORT; + break; + + case 0x12: /* Data overrun/underrun-The target attempted to transfer more data + than was allocated by the Data Length field or the sum of the + Scatter / Gather Data Length fields. */ + case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */ + case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */ + + default: + printk(KERN_DEBUG "inia100: %x %x\n", scb->hastat, scb->tastat); + scb->hastat = DID_ERROR; /* Couldn't find any better */ + break; + } + + if (scb->tastat == 2) { /* Check condition */ + memcpy((unsigned char *) &cmd->sense_buffer[0], + (unsigned char *) &escb->sglist[0], SENSE_SIZE); + } + cmd->result = scb->tastat | (scb->hastat << 16); + scsi_dma_unmap(cmd); + scsi_done(cmd); /* Notify system DONE */ + orc_release_scb(host, scb); /* Release SCB for current channel */ +} + +/** + * inia100_intr - interrupt handler + * @irqno: Interrupt value + * @devid: Host adapter + * + * Entry point for IRQ handling. All the real work is performed + * by orc_interrupt. + */ +static irqreturn_t inia100_intr(int irqno, void *devid) +{ + struct Scsi_Host *shost = (struct Scsi_Host *)devid; + struct orc_host *host = (struct orc_host *)shost->hostdata; + unsigned long flags; + irqreturn_t res; + + spin_lock_irqsave(shost->host_lock, flags); + res = orc_interrupt(host); + spin_unlock_irqrestore(shost->host_lock, flags); + + return res; +} + +static const struct scsi_host_template inia100_template = { + .proc_name = "inia100", + .name = inia100_REVID, + .queuecommand = inia100_queue, + .eh_abort_handler = inia100_abort, + .eh_bus_reset_handler = inia100_bus_reset, + .eh_device_reset_handler = inia100_device_reset, + .can_queue = 1, + .this_id = 1, + .sg_tablesize = SG_ALL, +}; + +static int inia100_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct Scsi_Host *shost; + struct orc_host *host; + unsigned long port, bios; + int error = -ENODEV; + u32 sz; + + if (pci_enable_device(pdev)) + goto out; + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + printk(KERN_WARNING "Unable to set 32bit DMA " + "on inia100 adapter, ignoring.\n"); + goto out_disable_device; + } + + pci_set_master(pdev); + + port = pci_resource_start(pdev, 0); + if (!request_region(port, 256, "inia100")) { + printk(KERN_WARNING "inia100: io port 0x%lx, is busy.\n", port); + goto out_disable_device; + } + + /* <02> read from base address + 0x50 offset to get the bios value. */ + bios = inw(port + 0x50); + + + shost = scsi_host_alloc(&inia100_template, sizeof(struct orc_host)); + if (!shost) + goto out_release_region; + + host = (struct orc_host *)shost->hostdata; + host->pdev = pdev; + host->base = port; + host->BIOScfg = bios; + spin_lock_init(&host->allocation_lock); + + /* Get total memory needed for SCB */ + sz = ORC_MAXQUEUE * sizeof(struct orc_scb); + host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys, + GFP_KERNEL); + if (!host->scb_virt) { + printk("inia100: SCB memory allocation error\n"); + goto out_host_put; + } + + /* Get total memory needed for ESCB */ + sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); + host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys, + GFP_KERNEL); + if (!host->escb_virt) { + printk("inia100: ESCB memory allocation error\n"); + goto out_free_scb_array; + } + + if (init_orchid(host)) { /* Initialize orchid chip */ + printk("inia100: initial orchid fail!!\n"); + goto out_free_escb_array; + } + + shost->io_port = host->base; + shost->n_io_port = 0xff; + shost->can_queue = ORC_MAXQUEUE; + shost->unique_id = shost->io_port; + shost->max_id = host->max_targets; + shost->max_lun = 16; + shost->irq = pdev->irq; + shost->this_id = host->scsi_id; /* Assign HCS index */ + shost->sg_tablesize = TOTAL_SG_ENTRY; + + /* Initial orc chip */ + error = request_irq(pdev->irq, inia100_intr, IRQF_SHARED, + "inia100", shost); + if (error < 0) { + printk(KERN_WARNING "inia100: unable to get irq %d\n", + pdev->irq); + goto out_free_escb_array; + } + + pci_set_drvdata(pdev, shost); + + error = scsi_add_host(shost, &pdev->dev); + if (error) + goto out_free_irq; + + scsi_scan_host(shost); + return 0; + +out_free_irq: + free_irq(shost->irq, shost); +out_free_escb_array: + dma_free_coherent(&pdev->dev, + ORC_MAXQUEUE * sizeof(struct orc_extended_scb), + host->escb_virt, host->escb_phys); +out_free_scb_array: + dma_free_coherent(&pdev->dev, + ORC_MAXQUEUE * sizeof(struct orc_scb), + host->scb_virt, host->scb_phys); +out_host_put: + scsi_host_put(shost); +out_release_region: + release_region(port, 256); +out_disable_device: + pci_disable_device(pdev); +out: + return error; +} + +static void inia100_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct orc_host *host = (struct orc_host *)shost->hostdata; + + scsi_remove_host(shost); + + free_irq(shost->irq, shost); + dma_free_coherent(&pdev->dev, + ORC_MAXQUEUE * sizeof(struct orc_extended_scb), + host->escb_virt, host->escb_phys); + dma_free_coherent(&pdev->dev, + ORC_MAXQUEUE * sizeof(struct orc_scb), + host->scb_virt, host->scb_phys); + release_region(shost->io_port, 256); + + scsi_host_put(shost); +} + +static struct pci_device_id inia100_pci_tbl[] = { + {PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, inia100_pci_tbl); + +static struct pci_driver inia100_pci_driver = { + .name = "inia100", + .id_table = inia100_pci_tbl, + .probe = inia100_probe_one, + .remove = inia100_remove_one, +}; + +module_pci_driver(inia100_pci_driver); + +MODULE_DESCRIPTION("Initio A100U2W SCSI driver"); +MODULE_AUTHOR("Initio Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/a100u2w.h b/drivers/scsi/a100u2w.h new file mode 100644 index 000000000..d40e0c528 --- /dev/null +++ b/drivers/scsi/a100u2w.h @@ -0,0 +1,371 @@ +/* + * Initio A100 device driver for Linux. + * + * Copyright (c) 1994-1998 Initio Corporation + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Revision History: + * 06/18/98 HL, Initial production Version 1.02 + * 12/19/98 bv, Use spinlocks for 2.1.95 and up + * 06/25/02 Doug Ledford + * - This and the i60uscsi.h file are almost identical, + * merged them into a single header used by both .c files. + * 14/06/07 Alan Cox + * - Grand cleanup and Linuxisation + */ + +#define inia100_REVID "Initio INI-A100U2W SCSI device driver; Revision: 1.02d" + +#if 1 +#define ORC_MAXQUEUE 245 +#define ORC_MAXTAGS 64 +#else +#define ORC_MAXQUEUE 25 +#define ORC_MAXTAGS 8 +#endif + +#define TOTAL_SG_ENTRY 32 +#define MAX_TARGETS 16 +#define IMAX_CDB 15 +#define SENSE_SIZE 14 + +/************************************************************************/ +/* Scatter-Gather Element Structure */ +/************************************************************************/ +struct orc_sgent { + u32 base; /* Data Pointer */ + u32 length; /* Data Length */ +}; + +/* SCSI related definition */ +#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */ +#define DISC_ALLOW 0xC0 /* Disconnect is allowed */ + + +#define ORC_OFFSET_SCB 16 +#define ORC_MAX_SCBS 250 +#define MAX_CHANNELS 2 +#define MAX_ESCB_ELE 64 +#define TCF_DRV_255_63 0x0400 + +/********************************************************/ +/* Orchid Host Command Set */ +/********************************************************/ +#define ORC_CMD_NOP 0x00 /* Host command - NOP */ +#define ORC_CMD_VERSION 0x01 /* Host command - Get F/W version */ +#define ORC_CMD_ECHO 0x02 /* Host command - ECHO */ +#define ORC_CMD_SET_NVM 0x03 /* Host command - Set NVRAM */ +#define ORC_CMD_GET_NVM 0x04 /* Host command - Get NVRAM */ +#define ORC_CMD_GET_BUS_STATUS 0x05 /* Host command - Get SCSI bus status */ +#define ORC_CMD_ABORT_SCB 0x06 /* Host command - Abort SCB */ +#define ORC_CMD_ISSUE_SCB 0x07 /* Host command - Issue SCB */ + +/********************************************************/ +/* Orchid Register Set */ +/********************************************************/ +#define ORC_GINTS 0xA0 /* Global Interrupt Status */ +#define QINT 0x04 /* Reply Queue Interrupt */ +#define ORC_GIMSK 0xA1 /* Global Interrupt MASK */ +#define MQINT 0x04 /* Mask Reply Queue Interrupt */ +#define ORC_GCFG 0xA2 /* Global Configure */ +#define EEPRG 0x01 /* Enable EEPROM programming */ +#define ORC_GSTAT 0xA3 /* Global status */ +#define WIDEBUS 0x10 /* Wide SCSI Devices connected */ +#define ORC_HDATA 0xA4 /* Host Data */ +#define ORC_HCTRL 0xA5 /* Host Control */ +#define SCSIRST 0x80 /* SCSI bus reset */ +#define HDO 0x40 /* Host data out */ +#define HOSTSTOP 0x02 /* Host stop RISC engine */ +#define DEVRST 0x01 /* Device reset */ +#define ORC_HSTUS 0xA6 /* Host Status */ +#define HDI 0x02 /* Host data in */ +#define RREADY 0x01 /* RISC engine is ready to receive */ +#define ORC_NVRAM 0xA7 /* Nvram port address */ +#define SE2CS 0x008 +#define SE2CLK 0x004 +#define SE2DO 0x002 +#define SE2DI 0x001 +#define ORC_PQUEUE 0xA8 /* Posting queue FIFO */ +#define ORC_PQCNT 0xA9 /* Posting queue FIFO Cnt */ +#define ORC_RQUEUE 0xAA /* Reply queue FIFO */ +#define ORC_RQUEUECNT 0xAB /* Reply queue FIFO Cnt */ +#define ORC_FWBASEADR 0xAC /* Firmware base address */ + +#define ORC_EBIOSADR0 0xB0 /* External Bios address */ +#define ORC_EBIOSADR1 0xB1 /* External Bios address */ +#define ORC_EBIOSADR2 0xB2 /* External Bios address */ +#define ORC_EBIOSDATA 0xB3 /* External Bios address */ + +#define ORC_SCBSIZE 0xB7 /* SCB size register */ +#define ORC_SCBBASE0 0xB8 /* SCB base address 0 */ +#define ORC_SCBBASE1 0xBC /* SCB base address 1 */ + +#define ORC_RISCCTL 0xE0 /* RISC Control */ +#define PRGMRST 0x002 +#define DOWNLOAD 0x001 +#define ORC_PRGMCTR0 0xE2 /* RISC program counter */ +#define ORC_PRGMCTR1 0xE3 /* RISC program counter */ +#define ORC_RISCRAM 0xEC /* RISC RAM data port 4 bytes */ + +struct orc_extended_scb { /* Extended SCB */ + struct orc_sgent sglist[TOTAL_SG_ENTRY]; /*0 Start of SG list */ + struct scsi_cmnd *srb; /*50 SRB Pointer */ +}; + +/*********************************************************************** + SCSI Control Block + + 0x40 bytes long, the last 8 are user bytes +************************************************************************/ +struct orc_scb { /* Scsi_Ctrl_Blk */ + u8 opcode; /*00 SCB command code&residual */ + u8 flags; /*01 SCB Flags */ + u8 target; /*02 Target Id */ + u8 lun; /*03 Lun */ + u32 reserved0; /*04 Reserved for ORCHID must 0 */ + u32 xferlen; /*08 Data Transfer Length */ + u32 reserved1; /*0C Reserved for ORCHID must 0 */ + u32 sg_len; /*10 SG list # * 8 */ + u32 sg_addr; /*14 SG List Buf physical Addr */ + u32 sg_addrhigh; /*18 SG Buffer high physical Addr */ + u8 hastat; /*1C Host Status */ + u8 tastat; /*1D Target Status */ + u8 status; /*1E SCB status */ + u8 link; /*1F Link pointer, default 0xFF */ + u8 sense_len; /*20 Sense Allocation Length */ + u8 cdb_len; /*21 CDB Length */ + u8 ident; /*22 Identify */ + u8 tag_msg; /*23 Tag Message */ + u8 cdb[IMAX_CDB]; /*24 SCSI CDBs */ + u8 scbidx; /*3C Index for this ORCSCB */ + u32 sense_addr; /*34 Sense Buffer physical Addr */ + + struct orc_extended_scb *escb; /*38 Extended SCB Pointer */ + /* 64bit pointer or 32bit pointer + reserved ? */ +#ifndef CONFIG_64BIT + u8 reserved2[4]; /*3E Reserved for Driver use */ +#endif +}; + +/* Opcodes of ORCSCB_Opcode */ +#define ORC_EXECSCSI 0x00 /* SCSI initiator command with residual */ +#define ORC_BUSDEVRST 0x01 /* SCSI Bus Device Reset */ + +/* Status of ORCSCB_Status */ +#define ORCSCB_COMPLETE 0x00 /* SCB request completed */ +#define ORCSCB_POST 0x01 /* SCB is posted by the HOST */ + +/* Bit Definition for ORCSCB_Flags */ +#define SCF_DISINT 0x01 /* Disable HOST interrupt */ +#define SCF_DIR 0x18 /* Direction bits */ +#define SCF_NO_DCHK 0x00 /* Direction determined by SCSI */ +#define SCF_DIN 0x08 /* From Target to Initiator */ +#define SCF_DOUT 0x10 /* From Initiator to Target */ +#define SCF_NO_XF 0x18 /* No data transfer */ +#define SCF_POLL 0x40 + +/* Error Codes for ORCSCB_HaStat */ +#define HOST_SEL_TOUT 0x11 +#define HOST_DO_DU 0x12 +#define HOST_BUS_FREE 0x13 +#define HOST_BAD_PHAS 0x14 +#define HOST_INV_CMD 0x16 +#define HOST_SCSI_RST 0x1B +#define HOST_DEV_RST 0x1C + + +/* Error Codes for ORCSCB_TaStat */ +#define TARGET_CHK_COND 0x02 +#define TARGET_BUSY 0x08 +#define TARGET_TAG_FULL 0x28 + + +/*********************************************************************** + Target Device Control Structure +**********************************************************************/ + +struct orc_target { + u8 TCS_DrvDASD; /* 6 */ + u8 TCS_DrvSCSI; /* 7 */ + u8 TCS_DrvHead; /* 8 */ + u16 TCS_DrvFlags; /* 4 */ + u8 TCS_DrvSector; /* 7 */ +}; + +/* Bit Definition for TCF_DrvFlags */ +#define TCS_DF_NODASD_SUPT 0x20 /* Suppress OS/2 DASD Mgr support */ +#define TCS_DF_NOSCSI_SUPT 0x40 /* Suppress OS/2 SCSI Mgr support */ + + +/*********************************************************************** + Host Adapter Control Structure +************************************************************************/ +struct orc_host { + unsigned long base; /* Base address */ + u8 index; /* Index (Channel)*/ + u8 scsi_id; /* H/A SCSI ID */ + u8 BIOScfg; /*BIOS configuration */ + u8 flags; + u8 max_targets; /* SCSI0MAXTags */ + struct orc_scb *scb_virt; /* Virtual Pointer to SCB array */ + dma_addr_t scb_phys; /* Scb Physical address */ + struct orc_extended_scb *escb_virt; /* Virtual pointer to ESCB Scatter list */ + dma_addr_t escb_phys; /* scatter list Physical address */ + u8 target_flag[16]; /* target configuration, TCF_EN_TAG */ + u8 max_tags[16]; /* ORC_MAX_SCBS */ + u32 allocation_map[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */ + spinlock_t allocation_lock; + struct pci_dev *pdev; +}; + +/* Bit Definition for HCS_Flags */ + +#define HCF_SCSI_RESET 0x01 /* SCSI BUS RESET */ +#define HCF_PARITY 0x02 /* parity card */ +#define HCF_LVDS 0x10 /* parity card */ + +/* Bit Definition for TargetFlag */ + +#define TCF_EN_255 0x08 +#define TCF_EN_TAG 0x10 +#define TCF_BUSY 0x20 +#define TCF_DISCONNECT 0x40 +#define TCF_SPIN_UP 0x80 + +/* Bit Definition for HCS_AFlags */ +#define HCS_AF_IGNORE 0x01 /* Adapter ignore */ +#define HCS_AF_DISABLE_RESET 0x10 /* Adapter disable reset */ +#define HCS_AF_DISABLE_ADPT 0x80 /* Adapter disable */ + +struct orc_nvram { +/*----------header ---------------*/ + u8 SubVendorID0; /* 00 - Sub Vendor ID */ + u8 SubVendorID1; /* 00 - Sub Vendor ID */ + u8 SubSysID0; /* 02 - Sub System ID */ + u8 SubSysID1; /* 02 - Sub System ID */ + u8 SubClass; /* 04 - Sub Class */ + u8 VendorID0; /* 05 - Vendor ID */ + u8 VendorID1; /* 05 - Vendor ID */ + u8 DeviceID0; /* 07 - Device ID */ + u8 DeviceID1; /* 07 - Device ID */ + u8 Reserved0[2]; /* 09 - Reserved */ + u8 revision; /* 0B - revision of data structure */ + /* ----Host Adapter Structure ---- */ + u8 NumOfCh; /* 0C - Number of SCSI channel */ + u8 BIOSConfig1; /* 0D - BIOS configuration 1 */ + u8 BIOSConfig2; /* 0E - BIOS boot channel&target ID */ + u8 BIOSConfig3; /* 0F - BIOS configuration 3 */ + /* ----SCSI channel Structure ---- */ + /* from "CTRL-I SCSI Host Adapter SetUp menu " */ + u8 scsi_id; /* 10 - Channel 0 SCSI ID */ + u8 SCSI0Config; /* 11 - Channel 0 SCSI configuration */ + u8 SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */ + u8 SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */ + u8 ReservedforChannel0[2]; /* 14 - Reserved */ + + /* ----SCSI target Structure ---- */ + /* from "CTRL-I SCSI device SetUp menu " */ + u8 Target00Config; /* 16 - Channel 0 Target 0 config */ + u8 Target01Config; /* 17 - Channel 0 Target 1 config */ + u8 Target02Config; /* 18 - Channel 0 Target 2 config */ + u8 Target03Config; /* 19 - Channel 0 Target 3 config */ + u8 Target04Config; /* 1A - Channel 0 Target 4 config */ + u8 Target05Config; /* 1B - Channel 0 Target 5 config */ + u8 Target06Config; /* 1C - Channel 0 Target 6 config */ + u8 Target07Config; /* 1D - Channel 0 Target 7 config */ + u8 Target08Config; /* 1E - Channel 0 Target 8 config */ + u8 Target09Config; /* 1F - Channel 0 Target 9 config */ + u8 Target0AConfig; /* 20 - Channel 0 Target A config */ + u8 Target0BConfig; /* 21 - Channel 0 Target B config */ + u8 Target0CConfig; /* 22 - Channel 0 Target C config */ + u8 Target0DConfig; /* 23 - Channel 0 Target D config */ + u8 Target0EConfig; /* 24 - Channel 0 Target E config */ + u8 Target0FConfig; /* 25 - Channel 0 Target F config */ + + u8 SCSI1Id; /* 26 - Channel 1 SCSI ID */ + u8 SCSI1Config; /* 27 - Channel 1 SCSI configuration */ + u8 SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */ + u8 SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */ + u8 ReservedforChannel1[2]; /* 2A - Reserved */ + + /* ----SCSI target Structure ---- */ + /* from "CTRL-I SCSI device SetUp menu " */ + u8 Target10Config; /* 2C - Channel 1 Target 0 config */ + u8 Target11Config; /* 2D - Channel 1 Target 1 config */ + u8 Target12Config; /* 2E - Channel 1 Target 2 config */ + u8 Target13Config; /* 2F - Channel 1 Target 3 config */ + u8 Target14Config; /* 30 - Channel 1 Target 4 config */ + u8 Target15Config; /* 31 - Channel 1 Target 5 config */ + u8 Target16Config; /* 32 - Channel 1 Target 6 config */ + u8 Target17Config; /* 33 - Channel 1 Target 7 config */ + u8 Target18Config; /* 34 - Channel 1 Target 8 config */ + u8 Target19Config; /* 35 - Channel 1 Target 9 config */ + u8 Target1AConfig; /* 36 - Channel 1 Target A config */ + u8 Target1BConfig; /* 37 - Channel 1 Target B config */ + u8 Target1CConfig; /* 38 - Channel 1 Target C config */ + u8 Target1DConfig; /* 39 - Channel 1 Target D config */ + u8 Target1EConfig; /* 3A - Channel 1 Target E config */ + u8 Target1FConfig; /* 3B - Channel 1 Target F config */ + u8 reserved[3]; /* 3C - Reserved */ + /* ---------- CheckSum ---------- */ + u8 CheckSum; /* 3F - Checksum of NVRam */ +}; + +/* Bios Configuration for nvram->BIOSConfig1 */ +#define NBC_BIOSENABLE 0x01 /* BIOS enable */ +#define NBC_CDROM 0x02 /* Support bootable CDROM */ +#define NBC_REMOVABLE 0x04 /* Support removable drive */ + +/* Bios Configuration for nvram->BIOSConfig2 */ +#define NBB_TARGET_MASK 0x0F /* Boot SCSI target ID number */ +#define NBB_CHANL_MASK 0xF0 /* Boot SCSI channel number */ + +/* Bit definition for nvram->SCSIConfig */ +#define NCC_BUSRESET 0x01 /* Reset SCSI bus at power up */ +#define NCC_PARITYCHK 0x02 /* SCSI parity enable */ +#define NCC_LVDS 0x10 /* Enable LVDS */ +#define NCC_ACTTERM1 0x20 /* Enable active terminator 1 */ +#define NCC_ACTTERM2 0x40 /* Enable active terminator 2 */ +#define NCC_AUTOTERM 0x80 /* Enable auto termination */ + +/* Bit definition for nvram->TargetxConfig */ +#define NTC_PERIOD 0x07 /* Maximum Sync. Speed */ +#define NTC_1GIGA 0x08 /* 255 head / 63 sectors (64/32) */ +#define NTC_NO_SYNC 0x10 /* NO SYNC. NEGO */ +#define NTC_NO_WIDESYNC 0x20 /* NO WIDE SYNC. NEGO */ +#define NTC_DISC_ENABLE 0x40 /* Enable SCSI disconnect */ +#define NTC_SPINUP 0x80 /* Start disk drive */ + +/* Default NVRam values */ +#define NBC_DEFAULT (NBC_ENABLE) +#define NCC_DEFAULT (NCC_BUSRESET | NCC_AUTOTERM | NCC_PARITYCHK) +#define NCC_MAX_TAGS 0x20 /* Maximum tags per target */ +#define NCC_RESET_TIME 0x0A /* SCSI RESET recovering time */ +#define NTC_DEFAULT (NTC_1GIGA | NTC_NO_WIDESYNC | NTC_DISC_ENABLE) + diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c new file mode 100644 index 000000000..204448bfd --- /dev/null +++ b/drivers/scsi/a2091.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include "wd33c93.h" +#include "a2091.h" + + +struct a2091_hostdata { + struct WD33C93_hostdata wh; + struct a2091_scsiregs *regs; + struct device *dev; +}; + +#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) + +static irqreturn_t a2091_intr(int irq, void *data) +{ + struct Scsi_Host *instance = data; + struct a2091_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->ISTR; + unsigned long flags; + + if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) + return IRQ_NONE; + + spin_lock_irqsave(instance->host_lock, flags); + wd33c93_intr(instance); + spin_unlock_irqrestore(instance->host_lock, flags); + return IRQ_HANDLED; +} + +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + unsigned long len = scsi_pointer->this_residual; + struct Scsi_Host *instance = cmd->device->host; + struct a2091_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a2091_scsiregs *regs = hdata->regs; + unsigned short cntr = CNTR_PDMD | CNTR_INTEN; + dma_addr_t addr; + + addr = dma_map_single(hdata->dev, scsi_pointer->ptr, + len, DMA_DIR(dir_in)); + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, "cannot map SCSI data block %p\n", + scsi_pointer->ptr); + return 1; + } + scsi_pointer->dma_handle = addr; + + /* don't allow DMA if the physical address is bad */ + if (addr & A2091_XFER_MASK) { + /* drop useless mapping */ + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(dir_in)); + scsi_pointer->dma_handle = (dma_addr_t) NULL; + + wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff; + wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, + GFP_KERNEL); + + /* can't allocate memory; use PIO */ + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; + return 1; + } + + if (!dir_in) { + /* copy to bounce buffer for a write */ + memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, + scsi_pointer->this_residual); + } + + /* will flush/invalidate cache for us */ + addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer, + wh->dma_bounce_len, DMA_DIR(dir_in)); + /* can't map buffer; use PIO */ + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, "cannot map bounce buffer %p\n", + wh->dma_bounce_buffer); + return 1; + } + + /* the bounce buffer may not be in the first 16M of physmem */ + if (addr & A2091_XFER_MASK) { + /* we could use chipmem... maybe later */ + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; + return 1; + } + + scsi_pointer->dma_handle = addr; + } + + /* setup dma direction */ + if (!dir_in) + cntr |= CNTR_DDIR; + + /* remember direction */ + wh->dma_dir = dir_in; + + regs->CNTR = cntr; + + /* setup DMA *physical* address */ + regs->ACR = addr; + + /* no more cache flush here - dma_map_single() takes care */ + + /* start DMA */ + regs->ST_DMA = 1; + + /* return success */ + return 0; +} + +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, + int status) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt); + struct a2091_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a2091_scsiregs *regs = hdata->regs; + + /* disable SCSI interrupts */ + unsigned short cntr = CNTR_PDMD; + + if (!wh->dma_dir) + cntr |= CNTR_DDIR; + + /* disable SCSI interrupts */ + regs->CNTR = cntr; + + /* flush if we were reading */ + if (wh->dma_dir) { + regs->FLUSH = 1; + while (!(regs->ISTR & ISTR_FE_FLG)) + ; + } + + /* clear a possible interrupt */ + regs->CINT = 1; + + /* stop DMA */ + regs->SP_DMA = 1; + + /* restore the CONTROL bits (minus the direction flag) */ + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(wh->dma_dir)); + + /* copy from a bounce buffer, if necessary */ + if (status && wh->dma_bounce_buffer) { + if (wh->dma_dir) + memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer, + scsi_pointer->this_residual); + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; + } +} + +static const struct scsi_host_template a2091_scsi_template = { + .module = THIS_MODULE, + .name = "Commodore A2091/A590 SCSI", + .show_info = wd33c93_show_info, + .write_info = wd33c93_write_info, + .proc_name = "A2901", + .queuecommand = wd33c93_queuecommand, + .eh_abort_handler = wd33c93_abort, + .eh_host_reset_handler = wd33c93_host_reset, + .can_queue = CAN_QUEUE, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = CMD_PER_LUN, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct scsi_pointer), +}; + +static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) +{ + struct Scsi_Host *instance; + int error; + struct a2091_scsiregs *regs; + wd33c93_regs wdregs; + struct a2091_hostdata *hdata; + + if (dma_set_mask_and_coherent(&z->dev, DMA_BIT_MASK(24))) { + dev_warn(&z->dev, "cannot use 24 bit DMA\n"); + return -ENODEV; + } + + if (!request_mem_region(z->resource.start, 256, "wd33c93")) + return -EBUSY; + + instance = scsi_host_alloc(&a2091_scsi_template, + sizeof(struct a2091_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; + instance->unique_id = z->slotaddr; + + regs = ZTWO_VADDR(z->resource.start); + regs->DAWR = DAWR_A2091; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + hdata->dev = &z->dev; + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); + error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, + "A2091 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + zorro_set_drvdata(z, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(IRQ_AMIGA_PORTS, instance); +fail_irq: + scsi_host_put(instance); +fail_alloc: + release_mem_region(z->resource.start, 256); + return error; +} + +static void a2091_remove(struct zorro_dev *z) +{ + struct Scsi_Host *instance = zorro_get_drvdata(z); + struct a2091_hostdata *hdata = shost_priv(instance); + + hdata->regs->CNTR = 0; + scsi_remove_host(instance); + free_irq(IRQ_AMIGA_PORTS, instance); + scsi_host_put(instance); + release_mem_region(z->resource.start, 256); +} + +static struct zorro_device_id a2091_zorro_tbl[] = { + { ZORRO_PROD_CBM_A590_A2091_1 }, + { ZORRO_PROD_CBM_A590_A2091_2 }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl); + +static struct zorro_driver a2091_driver = { + .name = "a2091", + .id_table = a2091_zorro_tbl, + .probe = a2091_probe, + .remove = a2091_remove, +}; + +static int __init a2091_init(void) +{ + return zorro_register_driver(&a2091_driver); +} +module_init(a2091_init); + +static void __exit a2091_exit(void) +{ + zorro_unregister_driver(&a2091_driver); +} +module_exit(a2091_exit); + +MODULE_DESCRIPTION("Commodore A2091/A590 SCSI"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h new file mode 100644 index 000000000..8d8a4074a --- /dev/null +++ b/drivers/scsi/a2091.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef A2091_H +#define A2091_H + +/* $Id: a2091.h,v 1.4 1997/01/19 23:07:09 davem Exp $ + * + * Header file for the Commodore A2091 Zorro II SCSI controller for Linux + * + * Written and (C) 1993, Hamish Macdonald, see a2091.c for more info + * + */ + +#include + +#ifndef CMD_PER_LUN +#define CMD_PER_LUN 2 +#endif + +#ifndef CAN_QUEUE +#define CAN_QUEUE 16 +#endif + +/* + * if the transfer address ANDed with this results in a non-zero + * result, then we can't use DMA. + */ +#define A2091_XFER_MASK (0xff000001) + +struct a2091_scsiregs { + unsigned char pad1[64]; + volatile unsigned short ISTR; + volatile unsigned short CNTR; + unsigned char pad2[60]; + volatile unsigned int WTC; + volatile unsigned long ACR; + unsigned char pad3[6]; + volatile unsigned short DAWR; + unsigned char pad4; + volatile unsigned char SASR; + unsigned char pad5; + volatile unsigned char SCMD; + unsigned char pad6[76]; + volatile unsigned short ST_DMA; + volatile unsigned short SP_DMA; + volatile unsigned short CINT; + unsigned char pad7[2]; + volatile unsigned short FLUSH; +}; + +#define DAWR_A2091 (3) + +/* CNTR bits. */ +#define CNTR_TCEN (1<<7) +#define CNTR_PREST (1<<6) +#define CNTR_PDMD (1<<5) +#define CNTR_INTEN (1<<4) +#define CNTR_DDIR (1<<3) + +/* ISTR bits. */ +#define ISTR_INTX (1<<8) +#define ISTR_INT_F (1<<7) +#define ISTR_INTS (1<<6) +#define ISTR_E_INT (1<<5) +#define ISTR_INT_P (1<<4) +#define ISTR_UE_INT (1<<3) +#define ISTR_OE_INT (1<<2) +#define ISTR_FF_FLG (1<<1) +#define ISTR_FE_FLG (1<<0) + +#endif /* A2091_H */ diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c new file mode 100644 index 000000000..c3028726b --- /dev/null +++ b/drivers/scsi/a3000.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include "wd33c93.h" +#include "a3000.h" + + +struct a3000_hostdata { + struct WD33C93_hostdata wh; + struct a3000_scsiregs *regs; + struct device *dev; +}; + +#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) + +static irqreturn_t a3000_intr(int irq, void *data) +{ + struct Scsi_Host *instance = data; + struct a3000_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->ISTR; + unsigned long flags; + + if (!(status & ISTR_INT_P)) + return IRQ_NONE; + if (status & ISTR_INTS) { + spin_lock_irqsave(instance->host_lock, flags); + wd33c93_intr(instance); + spin_unlock_irqrestore(instance->host_lock, flags); + return IRQ_HANDLED; + } + pr_warn("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); + return IRQ_NONE; +} + +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + unsigned long len = scsi_pointer->this_residual; + struct Scsi_Host *instance = cmd->device->host; + struct a3000_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a3000_scsiregs *regs = hdata->regs; + unsigned short cntr = CNTR_PDMD | CNTR_INTEN; + dma_addr_t addr; + + addr = dma_map_single(hdata->dev, scsi_pointer->ptr, + len, DMA_DIR(dir_in)); + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, "cannot map SCSI data block %p\n", + scsi_pointer->ptr); + return 1; + } + scsi_pointer->dma_handle = addr; + + /* + * if the physical address has the wrong alignment, or if + * physical address is bad, or if it is a write and at the + * end of a physical memory chunk, then allocate a bounce + * buffer + * MSch 20220629 - only wrong alignment tested - bounce + * buffer returned by kmalloc is guaranteed to be aligned + */ + if (addr & A3000_XFER_MASK) { + WARN_ONCE(1, "Invalid alignment for DMA!"); + /* drop useless mapping */ + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(dir_in)); + + wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff; + wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, + GFP_KERNEL); + + /* can't allocate memory; use PIO */ + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; + scsi_pointer->dma_handle = (dma_addr_t) NULL; + return 1; + } + + if (!dir_in) { + /* copy to bounce buffer for a write */ + memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, + scsi_pointer->this_residual); + } + + addr = dma_map_single(hdata->dev, scsi_pointer->ptr, + len, DMA_DIR(dir_in)); + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, + "cannot map SCSI data block %p\n", + scsi_pointer->ptr); + return 1; + } + scsi_pointer->dma_handle = addr; + } + + /* setup dma direction */ + if (!dir_in) + cntr |= CNTR_DDIR; + + /* remember direction */ + wh->dma_dir = dir_in; + + regs->CNTR = cntr; + + /* setup DMA *physical* address */ + regs->ACR = addr; + + /* no more cache flush here - dma_map_single() takes care */ + + /* start DMA */ + mb(); /* make sure setup is completed */ + regs->ST_DMA = 1; + mb(); /* make sure DMA has started before next IO */ + + /* return success */ + return 0; +} + +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, + int status) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt); + struct a3000_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a3000_scsiregs *regs = hdata->regs; + + /* disable SCSI interrupts */ + unsigned short cntr = CNTR_PDMD; + + if (!wh->dma_dir) + cntr |= CNTR_DDIR; + + regs->CNTR = cntr; + mb(); /* make sure CNTR is updated before next IO */ + + /* flush if we were reading */ + if (wh->dma_dir) { + regs->FLUSH = 1; + mb(); /* don't allow prefetch */ + while (!(regs->ISTR & ISTR_FE_FLG)) + barrier(); + mb(); /* no IO until FLUSH is done */ + } + + /* clear a possible interrupt */ + /* I think that this CINT is only necessary if you are + * using the terminal count features. HM 7 Mar 1994 + */ + regs->CINT = 1; + + /* stop DMA */ + regs->SP_DMA = 1; + mb(); /* make sure DMA is stopped before next IO */ + + /* restore the CONTROL bits (minus the direction flag) */ + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + mb(); /* make sure CNTR is updated before next IO */ + + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(wh->dma_dir)); + + /* copy from a bounce buffer, if necessary */ + if (status && wh->dma_bounce_buffer) { + if (SCpnt) { + if (wh->dma_dir && SCpnt) + memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer, + scsi_pointer->this_residual); + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; + } else { + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; + } + } +} + +static const struct scsi_host_template amiga_a3000_scsi_template = { + .module = THIS_MODULE, + .name = "Amiga 3000 built-in SCSI", + .show_info = wd33c93_show_info, + .write_info = wd33c93_write_info, + .proc_name = "A3000", + .queuecommand = wd33c93_queuecommand, + .eh_abort_handler = wd33c93_abort, + .eh_host_reset_handler = wd33c93_host_reset, + .can_queue = CAN_QUEUE, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = CMD_PER_LUN, + .cmd_size = sizeof(struct scsi_pointer), +}; + +static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) +{ + struct resource *res; + struct Scsi_Host *instance; + int error; + struct a3000_scsiregs *regs; + wd33c93_regs wdregs; + struct a3000_hostdata *hdata; + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + dev_warn(&pdev->dev, "cannot use 32 bit DMA\n"); + return -ENODEV; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), "wd33c93")) + return -EBUSY; + + instance = scsi_host_alloc(&amiga_a3000_scsi_template, + sizeof(struct a3000_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; + + regs = ZTWO_VADDR(res->start); + regs->DAWR = DAWR_A3000; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + hdata->dev = &pdev->dev; + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15); + error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, + "A3000 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(IRQ_AMIGA_PORTS, instance); +fail_irq: + scsi_host_put(instance); +fail_alloc: + release_mem_region(res->start, resource_size(res)); + return error; +} + +static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + struct a3000_hostdata *hdata = shost_priv(instance); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + hdata->regs->CNTR = 0; + scsi_remove_host(instance); + free_irq(IRQ_AMIGA_PORTS, instance); + scsi_host_put(instance); + release_mem_region(res->start, resource_size(res)); + return 0; +} + +static struct platform_driver amiga_a3000_scsi_driver = { + .remove = __exit_p(amiga_a3000_scsi_remove), + .driver = { + .name = "amiga-a3000-scsi", + }, +}; + +module_platform_driver_probe(amiga_a3000_scsi_driver, amiga_a3000_scsi_probe); + +MODULE_DESCRIPTION("Amiga 3000 built-in SCSI"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-a3000-scsi"); diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h new file mode 100644 index 000000000..5cb3e7535 --- /dev/null +++ b/drivers/scsi/a3000.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef A3000_H +#define A3000_H + +/* $Id: a3000.h,v 1.4 1997/01/19 23:07:10 davem Exp $ + * + * Header file for the Amiga 3000 built-in SCSI controller for Linux + * + * Written and (C) 1993, Hamish Macdonald, see a3000.c for more info + * + */ + +#include + +#ifndef CMD_PER_LUN +#define CMD_PER_LUN 2 +#endif + +#ifndef CAN_QUEUE +#define CAN_QUEUE 16 +#endif + +/* + * if the transfer address ANDed with this results in a non-zero + * result, then we can't use DMA. + */ +#define A3000_XFER_MASK (0x00000003) + +struct a3000_scsiregs { + unsigned char pad1[2]; + volatile unsigned short DAWR; + volatile unsigned int WTC; + unsigned char pad2[2]; + volatile unsigned short CNTR; + volatile unsigned long ACR; + unsigned char pad3[2]; + volatile unsigned short ST_DMA; + unsigned char pad4[2]; + volatile unsigned short FLUSH; + unsigned char pad5[2]; + volatile unsigned short CINT; + unsigned char pad6[2]; + volatile unsigned short ISTR; + unsigned char pad7[30]; + volatile unsigned short SP_DMA; + unsigned char pad8; + volatile unsigned char SASR; + unsigned char pad9; + volatile unsigned char SCMD; +}; + +#define DAWR_A3000 (3) + +/* CNTR bits. */ +#define CNTR_TCEN (1<<5) +#define CNTR_PREST (1<<4) +#define CNTR_PDMD (1<<3) +#define CNTR_INTEN (1<<2) +#define CNTR_DDIR (1<<1) +#define CNTR_IO_DX (1<<0) + +/* ISTR bits. */ +#define ISTR_INTX (1<<8) +#define ISTR_INT_F (1<<7) +#define ISTR_INTS (1<<6) +#define ISTR_E_INT (1<<5) +#define ISTR_INT_P (1<<4) +#define ISTR_UE_INT (1<<3) +#define ISTR_OE_INT (1<<2) +#define ISTR_FF_FLG (1<<1) +#define ISTR_FE_FLG (1<<0) + +#endif /* A3000_H */ diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c new file mode 100644 index 000000000..5e575afce --- /dev/null +++ b/drivers/scsi/a4000t.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux. + * Amiga Technologies A4000T SCSI controller. + * + * Written 1997 by Alan Hourihane + * plus modifications of the 53c7xx.c driver to support the Amiga. + * + * Rewritten to use 53c700.c by Kars de Jong + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "53c700.h" + + +static struct scsi_host_template a4000t_scsi_driver_template = { + .name = "A4000T builtin SCSI", + .proc_name = "A4000t", + .this_id = 7, + .module = THIS_MODULE, +}; + + +#define A4000T_SCSI_OFFSET 0x40 + +static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev) +{ + struct resource *res; + phys_addr_t scsi_addr; + struct NCR_700_Host_Parameters *hostdata; + struct Scsi_Host *host; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), + "A4000T builtin SCSI")) + return -EBUSY; + + hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), + GFP_KERNEL); + if (!hostdata) { + dev_err(&pdev->dev, "Failed to allocate host data\n"); + goto out_release; + } + + scsi_addr = res->start + A4000T_SCSI_OFFSET; + + /* Fill in the required pieces of hostdata */ + hostdata->base = ZTWO_VADDR(scsi_addr); + hostdata->clock = 50; + hostdata->chip710 = 1; + hostdata->dmode_extra = DMODE_FC2; + hostdata->dcntl_extra = EA_710; + + /* and register the chip */ + host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, + &pdev->dev); + if (!host) { + dev_err(&pdev->dev, + "No host detected; board configuration problem?\n"); + goto out_free; + } + + host->this_id = 7; + host->base = scsi_addr; + host->irq = IRQ_AMIGA_PORTS; + + if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", + host)) { + dev_err(&pdev->dev, "request_irq failed\n"); + goto out_put_host; + } + + platform_set_drvdata(pdev, host); + scsi_scan_host(host); + return 0; + + out_put_host: + scsi_host_put(host); + out_free: + kfree(hostdata); + out_release: + release_mem_region(res->start, resource_size(res)); + return -ENODEV; +} + +static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *host = platform_get_drvdata(pdev); + struct NCR_700_Host_Parameters *hostdata = shost_priv(host); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + scsi_remove_host(host); + NCR_700_release(host); + kfree(hostdata); + free_irq(host->irq, host); + release_mem_region(res->start, resource_size(res)); + return 0; +} + +static struct platform_driver amiga_a4000t_scsi_driver = { + .remove = __exit_p(amiga_a4000t_scsi_remove), + .driver = { + .name = "amiga-a4000t-scsi", + }, +}; + +module_platform_driver_probe(amiga_a4000t_scsi_driver, amiga_a4000t_scsi_probe); + +MODULE_AUTHOR("Alan Hourihane / " + "Kars de Jong "); +MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-a4000t-scsi"); diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile new file mode 100644 index 000000000..8f0eec682 --- /dev/null +++ b/drivers/scsi/aacraid/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Adaptec aacraid + +obj-$(CONFIG_SCSI_AACRAID) := aacraid.o + +aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ + dpcsup.o rx.o sa.o rkt.o nark.o src.o diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c new file mode 100644 index 000000000..70e1cac19 --- /dev/null +++ b/drivers/scsi/aacraid/aachba.c @@ -0,0 +1,4164 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * aachba.c + * + * Abstract: Contains Interfaces to manage IOs. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "aacraid.h" + +/* values for inqd_pdt: Peripheral device type in plain English */ +#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */ +#define INQD_PDT_PROC 0x03 /* Processor device */ +#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */ +#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */ +#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */ +#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */ + +#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */ +#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */ + +/* + * Sense codes + */ + +#define SENCODE_NO_SENSE 0x00 +#define SENCODE_END_OF_DATA 0x00 +#define SENCODE_BECOMING_READY 0x04 +#define SENCODE_INIT_CMD_REQUIRED 0x04 +#define SENCODE_UNRECOVERED_READ_ERROR 0x11 +#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A +#define SENCODE_INVALID_COMMAND 0x20 +#define SENCODE_LBA_OUT_OF_RANGE 0x21 +#define SENCODE_INVALID_CDB_FIELD 0x24 +#define SENCODE_LUN_NOT_SUPPORTED 0x25 +#define SENCODE_INVALID_PARAM_FIELD 0x26 +#define SENCODE_PARAM_NOT_SUPPORTED 0x26 +#define SENCODE_PARAM_VALUE_INVALID 0x26 +#define SENCODE_RESET_OCCURRED 0x29 +#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E +#define SENCODE_INQUIRY_DATA_CHANGED 0x3F +#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39 +#define SENCODE_DIAGNOSTIC_FAILURE 0x40 +#define SENCODE_INTERNAL_TARGET_FAILURE 0x44 +#define SENCODE_INVALID_MESSAGE_ERROR 0x49 +#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c +#define SENCODE_OVERLAPPED_COMMAND 0x4E + +/* + * Additional sense codes + */ + +#define ASENCODE_NO_SENSE 0x00 +#define ASENCODE_END_OF_DATA 0x05 +#define ASENCODE_BECOMING_READY 0x01 +#define ASENCODE_INIT_CMD_REQUIRED 0x02 +#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00 +#define ASENCODE_INVALID_COMMAND 0x00 +#define ASENCODE_LBA_OUT_OF_RANGE 0x00 +#define ASENCODE_INVALID_CDB_FIELD 0x00 +#define ASENCODE_LUN_NOT_SUPPORTED 0x00 +#define ASENCODE_INVALID_PARAM_FIELD 0x00 +#define ASENCODE_PARAM_NOT_SUPPORTED 0x01 +#define ASENCODE_PARAM_VALUE_INVALID 0x02 +#define ASENCODE_RESET_OCCURRED 0x00 +#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00 +#define ASENCODE_INQUIRY_DATA_CHANGED 0x03 +#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00 +#define ASENCODE_DIAGNOSTIC_FAILURE 0x80 +#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00 +#define ASENCODE_INVALID_MESSAGE_ERROR 0x00 +#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 +#define ASENCODE_OVERLAPPED_COMMAND 0x00 + +#define BYTE0(x) (unsigned char)(x) +#define BYTE1(x) (unsigned char)((x) >> 8) +#define BYTE2(x) (unsigned char)((x) >> 16) +#define BYTE3(x) (unsigned char)((x) >> 24) + +/* MODE_SENSE data format */ +typedef struct { + struct { + u8 data_length; + u8 med_type; + u8 dev_par; + u8 bd_length; + } __attribute__((packed)) hd; + struct { + u8 dens_code; + u8 block_count[3]; + u8 reserved; + u8 block_length[3]; + } __attribute__((packed)) bd; + u8 mpc_buf[3]; +} __attribute__((packed)) aac_modep_data; + +/* MODE_SENSE_10 data format */ +typedef struct { + struct { + u8 data_length[2]; + u8 med_type; + u8 dev_par; + u8 rsrvd[2]; + u8 bd_length[2]; + } __attribute__((packed)) hd; + struct { + u8 dens_code; + u8 block_count[3]; + u8 reserved; + u8 block_length[3]; + } __attribute__((packed)) bd; + u8 mpc_buf[3]; +} __attribute__((packed)) aac_modep10_data; + +/*------------------------------------------------------------------------------ + * S T R U C T S / T Y P E D E F S + *----------------------------------------------------------------------------*/ +/* SCSI inquiry data */ +struct inquiry_data { + u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */ + u8 inqd_dtq; /* RMB | Device Type Qualifier */ + u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */ + u8 inqd_rdf; /* AENC | TrmIOP | Response data format */ + u8 inqd_len; /* Additional length (n-4) */ + u8 inqd_pad1[2];/* Reserved - must be zero */ + u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ + u8 inqd_vid[8]; /* Vendor ID */ + u8 inqd_pid[16];/* Product ID */ + u8 inqd_prl[4]; /* Product Revision Level */ +}; + +/* Added for VPD 0x83 */ +struct tvpd_id_descriptor_type_1 { + u8 codeset:4; /* VPD_CODE_SET */ + u8 reserved:4; + u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ + u8 reserved2:4; + u8 reserved3; + u8 identifierlength; + u8 venid[8]; + u8 productid[16]; + u8 serialnumber[8]; /* SN in ASCII */ + +}; + +struct tvpd_id_descriptor_type_2 { + u8 codeset:4; /* VPD_CODE_SET */ + u8 reserved:4; + u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ + u8 reserved2:4; + u8 reserved3; + u8 identifierlength; + struct teu64id { + u32 Serial; + /* The serial number supposed to be 40 bits, + * bit we only support 32, so make the last byte zero. */ + u8 reserved; + u8 venid[3]; + } eu64id; + +}; + +struct tvpd_id_descriptor_type_3 { + u8 codeset : 4; /* VPD_CODE_SET */ + u8 reserved : 4; + u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */ + u8 reserved2 : 4; + u8 reserved3; + u8 identifierlength; + u8 Identifier[16]; +}; + +struct tvpd_page83 { + u8 DeviceType:5; + u8 DeviceTypeQualifier:3; + u8 PageCode; + u8 reserved; + u8 PageLength; + struct tvpd_id_descriptor_type_1 type1; + struct tvpd_id_descriptor_type_2 type2; + struct tvpd_id_descriptor_type_3 type3; +}; + +/* + * M O D U L E G L O B A L S + */ + +static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap); +static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg); +static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg); +static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, + struct aac_raw_io2 *rio2, int sg_max); +static long aac_build_sghba(struct scsi_cmnd *scsicmd, + struct aac_hba_cmd_req *hbacmd, + int sg_max, u64 sg_address); +static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, + int pages, int nseg, int nseg_new); +static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd); +static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); +static int aac_send_hba_fib(struct scsi_cmnd *scsicmd); +#ifdef AAC_DETAILED_STATUS_INFO +static char *aac_get_status_string(u32 status); +#endif + +/* + * Non dasd selection is handled entirely in aachba now + */ + +static int nondasd = -1; +static int aac_cache = 2; /* WCE=0 to avoid performance problems */ +static int dacmode = -1; +int aac_msi; +int aac_commit = -1; +int startup_timeout = 180; +int aif_timeout = 120; +int aac_sync_mode; /* Only Sync. transfer - disabled */ +static int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */ + +module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode" + " 0=off, 1=on"); +module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list" + " 0=off, 1=on"); +module_param(nondasd, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." + " 0=off, 1=on"); +module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" + "\tbit 0 - Disable FUA in WRITE SCSI commands\n" + "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n" + "\tbit 2 - Disable only if Battery is protecting Cache"); +module_param(dacmode, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC." + " 0=off, 1=on"); +module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the" + " adapter for foreign arrays.\n" + "This is typically needed in systems that do not have a BIOS." + " 0=off, 1=on"); +module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(msi, "IRQ handling." + " 0=PIC(default), 1=MSI, 2=MSI-X)"); +module_param(startup_timeout, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" + " adapter to have its kernel up and\n" + "running. This is typically adjusted for large systems that do not" + " have a BIOS."); +module_param(aif_timeout, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" + " applications to pick up AIFs before\n" + "deregistering them. This is typically adjusted for heavily burdened" + " systems."); + +int aac_fib_dump; +module_param(aac_fib_dump, int, 0644); +MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on"); + +int numacb = -1; +module_param(numacb, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" + " blocks (FIB) allocated. Valid values are 512 and down. Default is" + " to use suggestion from Firmware."); + +static int acbsize = -1; +module_param(acbsize, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)" + " size. Valid values are 512, 2048, 4096 and 8192. Default is to use" + " suggestion from Firmware."); + +int update_interval = 30 * 60; +module_param(update_interval, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" + " updates issued to adapter."); + +int check_interval = 60; +module_param(check_interval, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" + " checks."); + +int aac_check_reset = 1; +module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the" + " adapter. a value of -1 forces the reset to adapters programmed to" + " ignore it."); + +int expose_physicals = -1; +module_param(expose_physicals, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays." + " -1=protect 0=off, 1=on"); + +int aac_reset_devices; +module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); + +static int aac_wwn = 1; +module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n" + "\t0 - Disable\n" + "\t1 - Array Meta Data Signature (default)\n" + "\t2 - Adapter Serial Number"); + + +static inline int aac_valid_context(struct scsi_cmnd *scsicmd, + struct fib *fibptr) { + struct scsi_device *device; + + if (unlikely(!scsicmd)) { + dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n")); + aac_fib_complete(fibptr); + return 0; + } + aac_priv(scsicmd)->owner = AAC_OWNER_MIDLEVEL; + device = scsicmd->device; + if (unlikely(!device)) { + dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n")); + aac_fib_complete(fibptr); + return 0; + } + return 1; +} + +/** + * aac_get_config_status - check the adapter configuration + * @dev: aac driver data + * @commit_flag: force sending CT_COMMIT_CONFIG + * + * Query config status, and commit the configuration if needed. + */ +int aac_get_config_status(struct aac_dev *dev, int commit_flag) +{ + int status = 0; + struct fib * fibptr; + + if (!(fibptr = aac_fib_alloc(dev))) + return -ENOMEM; + + aac_fib_init(fibptr); + { + struct aac_get_config_status *dinfo; + dinfo = (struct aac_get_config_status *) fib_data(fibptr); + + dinfo->command = cpu_to_le32(VM_ContainerConfig); + dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS); + dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); + } + + status = aac_fib_send(ContainerCommand, + fibptr, + sizeof (struct aac_get_config_status), + FsaNormal, + 1, 1, + NULL, NULL); + if (status < 0) { + printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n"); + } else { + struct aac_get_config_status_resp *reply + = (struct aac_get_config_status_resp *) fib_data(fibptr); + dprintk((KERN_WARNING + "aac_get_config_status: response=%d status=%d action=%d\n", + le32_to_cpu(reply->response), + le32_to_cpu(reply->status), + le32_to_cpu(reply->data.action))); + if ((le32_to_cpu(reply->response) != ST_OK) || + (le32_to_cpu(reply->status) != CT_OK) || + (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) { + printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n"); + status = -EINVAL; + } + } + /* Do not set XferState to zero unless receives a response from F/W */ + if (status >= 0) + aac_fib_complete(fibptr); + + /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ + if (status >= 0) { + if ((aac_commit == 1) || commit_flag) { + struct aac_commit_config * dinfo; + aac_fib_init(fibptr); + dinfo = (struct aac_commit_config *) fib_data(fibptr); + + dinfo->command = cpu_to_le32(VM_ContainerConfig); + dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); + + status = aac_fib_send(ContainerCommand, + fibptr, + sizeof (struct aac_commit_config), + FsaNormal, + 1, 1, + NULL, NULL); + /* Do not set XferState to zero unless + * receives a response from F/W */ + if (status >= 0) + aac_fib_complete(fibptr); + } else if (aac_commit == 0) { + printk(KERN_WARNING + "aac_get_config_status: Foreign device configurations are being ignored\n"); + } + } + /* FIB should be freed only after getting the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibptr); + return status; +} + +static void aac_expose_phy_device(struct scsi_cmnd *scsicmd) +{ + char inq_data; + scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data)); + if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) { + inq_data &= 0xdf; + scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); + } +} + +/** + * aac_get_containers - list containers + * @dev: aac driver data + * + * Make a list of all containers on this controller + */ +int aac_get_containers(struct aac_dev *dev) +{ + struct fsa_dev_info *fsa_dev_ptr; + u32 index; + int status = 0; + struct fib * fibptr; + struct aac_get_container_count *dinfo; + struct aac_get_container_count_resp *dresp; + int maximum_num_containers = MAXIMUM_NUM_CONTAINERS; + + if (!(fibptr = aac_fib_alloc(dev))) + return -ENOMEM; + + aac_fib_init(fibptr); + dinfo = (struct aac_get_container_count *) fib_data(fibptr); + dinfo->command = cpu_to_le32(VM_ContainerConfig); + dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); + + status = aac_fib_send(ContainerCommand, + fibptr, + sizeof (struct aac_get_container_count), + FsaNormal, + 1, 1, + NULL, NULL); + if (status >= 0) { + dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); + maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); + if (fibptr->dev->supplement_adapter_info.supported_options2 & + AAC_OPTION_SUPPORTED_240_VOLUMES) { + maximum_num_containers = + le32_to_cpu(dresp->MaxSimpleVolumes); + } + aac_fib_complete(fibptr); + } + /* FIB should be freed only after getting the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibptr); + + if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) + maximum_num_containers = MAXIMUM_NUM_CONTAINERS; + if (dev->fsa_dev == NULL || + dev->maximum_num_containers != maximum_num_containers) { + + fsa_dev_ptr = dev->fsa_dev; + + dev->fsa_dev = kcalloc(maximum_num_containers, + sizeof(*fsa_dev_ptr), GFP_KERNEL); + + kfree(fsa_dev_ptr); + fsa_dev_ptr = NULL; + + + if (!dev->fsa_dev) + return -ENOMEM; + + dev->maximum_num_containers = maximum_num_containers; + } + for (index = 0; index < dev->maximum_num_containers; index++) { + dev->fsa_dev[index].devname[0] = '\0'; + dev->fsa_dev[index].valid = 0; + + status = aac_probe_container(dev, index); + + if (status < 0) { + printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); + break; + } + } + return status; +} + +static void aac_scsi_done(struct scsi_cmnd *scmd) +{ + if (scmd->device->request_queue) { + /* SCSI command has been submitted by the SCSI mid-layer. */ + scsi_done(scmd); + } else { + /* SCSI command has been submitted by aac_probe_container(). */ + aac_probe_container_scsi_done(scmd); + } +} + +static void get_container_name_callback(void *context, struct fib * fibptr) +{ + struct aac_get_name_resp * get_name_reply; + struct scsi_cmnd * scsicmd; + + scsicmd = (struct scsi_cmnd *) context; + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); + BUG_ON(fibptr == NULL); + + get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr); + /* Failure is irrelevant, using default value instead */ + if ((le32_to_cpu(get_name_reply->status) == CT_OK) + && (get_name_reply->data[0] != '\0')) { + char *sp = get_name_reply->data; + int data_size = sizeof_field(struct aac_get_name_resp, data); + + sp[data_size - 1] = '\0'; + while (*sp == ' ') + ++sp; + if (*sp) { + struct inquiry_data inq; + char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)]; + int count = sizeof(d); + char *dp = d; + do { + *dp++ = (*sp) ? *sp++ : ' '; + } while (--count > 0); + + scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq)); + memcpy(inq.inqd_pid, d, sizeof(d)); + scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq)); + } + } + + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + + aac_fib_complete(fibptr); + aac_scsi_done(scsicmd); +} + +/* + * aac_get_container_name - get container name, none blocking. + */ +static int aac_get_container_name(struct scsi_cmnd * scsicmd) +{ + int status; + int data_size; + struct aac_get_name *dinfo; + struct fib * cmd_fibcontext; + struct aac_dev * dev; + + dev = (struct aac_dev *)scsicmd->device->host->hostdata; + + data_size = sizeof_field(struct aac_get_name_resp, data); + + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + + aac_fib_init(cmd_fibcontext); + dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + + dinfo->command = cpu_to_le32(VM_ContainerConfig); + dinfo->type = cpu_to_le32(CT_READ_NAME); + dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); + dinfo->count = cpu_to_le32(data_size - 1); + + status = aac_fib_send(ContainerCommand, + cmd_fibcontext, + sizeof(struct aac_get_name_resp), + FsaNormal, + 0, 1, + (fib_callback)get_container_name_callback, + (void *) scsicmd); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); + aac_fib_complete(cmd_fibcontext); + return -1; +} + +static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd) +{ + struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; + + if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1)) + return aac_scsi_cmd(scsicmd); + + scsicmd->result = DID_NO_CONNECT << 16; + aac_scsi_done(scsicmd); + return 0; +} + +static void _aac_probe_container2(void * context, struct fib * fibptr) +{ + struct fsa_dev_info *fsa_dev_ptr; + int (*callback)(struct scsi_cmnd *); + struct scsi_cmnd *scsicmd = context; + struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd); + int i; + + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + cmd_priv->status = 0; + fsa_dev_ptr = fibptr->dev->fsa_dev; + if (fsa_dev_ptr) { + struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr); + __le32 sup_options2; + + fsa_dev_ptr += scmd_id(scsicmd); + sup_options2 = + fibptr->dev->supplement_adapter_info.supported_options2; + + if ((le32_to_cpu(dresp->status) == ST_OK) && + (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && + (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { + if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) { + dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200; + fsa_dev_ptr->block_size = 0x200; + } else { + fsa_dev_ptr->block_size = + le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size); + } + for (i = 0; i < 16; i++) + fsa_dev_ptr->identifier[i] = + dresp->mnt[0].fileinfo.bdevinfo + .identifier[i]; + fsa_dev_ptr->valid = 1; + /* sense_key holds the current state of the spin-up */ + if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY)) + fsa_dev_ptr->sense_data.sense_key = NOT_READY; + else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY) + fsa_dev_ptr->sense_data.sense_key = NO_SENSE; + fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol); + fsa_dev_ptr->size + = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + + (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32); + fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0); + } + if ((fsa_dev_ptr->valid & 1) == 0) + fsa_dev_ptr->valid = 0; + cmd_priv->status = le32_to_cpu(dresp->count); + } + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + callback = cmd_priv->callback; + cmd_priv->callback = NULL; + (*callback)(scsicmd); + return; +} + +static void _aac_probe_container1(void * context, struct fib * fibptr) +{ + struct scsi_cmnd * scsicmd; + struct aac_mount * dresp; + struct aac_query_mount *dinfo; + int status; + + dresp = (struct aac_mount *) fib_data(fibptr); + if (!aac_supports_2T(fibptr->dev)) { + dresp->mnt[0].capacityhigh = 0; + if ((le32_to_cpu(dresp->status) == ST_OK) && + (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { + _aac_probe_container2(context, fibptr); + return; + } + } + scsicmd = (struct scsi_cmnd *) context; + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + aac_fib_init(fibptr); + + dinfo = (struct aac_query_mount *)fib_data(fibptr); + + if (fibptr->dev->supplement_adapter_info.supported_options2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE) + dinfo->command = cpu_to_le32(VM_NameServeAllBlk); + else + dinfo->command = cpu_to_le32(VM_NameServe64); + + dinfo->count = cpu_to_le32(scmd_id(scsicmd)); + dinfo->type = cpu_to_le32(FT_FILESYS); + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + + status = aac_fib_send(ContainerCommand, + fibptr, + sizeof(struct aac_query_mount), + FsaNormal, + 0, 1, + _aac_probe_container2, + (void *) scsicmd); + /* + * Check that the command queued to the controller + */ + if (status < 0 && status != -EINPROGRESS) { + /* Inherit results from VM_NameServe, if any */ + dresp->status = cpu_to_le32(ST_OK); + _aac_probe_container2(context, fibptr); + } +} + +static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *)) +{ + struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd); + struct fib * fibptr; + int status = -ENOMEM; + + if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) { + struct aac_query_mount *dinfo; + + aac_fib_init(fibptr); + + dinfo = (struct aac_query_mount *)fib_data(fibptr); + + if (fibptr->dev->supplement_adapter_info.supported_options2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE) + dinfo->command = cpu_to_le32(VM_NameServeAllBlk); + else + dinfo->command = cpu_to_le32(VM_NameServe); + + dinfo->count = cpu_to_le32(scmd_id(scsicmd)); + dinfo->type = cpu_to_le32(FT_FILESYS); + cmd_priv->callback = callback; + cmd_priv->owner = AAC_OWNER_FIRMWARE; + + status = aac_fib_send(ContainerCommand, + fibptr, + sizeof(struct aac_query_mount), + FsaNormal, + 0, 1, + _aac_probe_container1, + (void *) scsicmd); + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + if (status < 0) { + cmd_priv->callback = NULL; + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + } + } + if (status < 0) { + struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; + if (fsa_dev_ptr) { + fsa_dev_ptr += scmd_id(scsicmd); + if ((fsa_dev_ptr->valid & 1) == 0) { + fsa_dev_ptr->valid = 0; + return (*callback)(scsicmd); + } + } + } + return status; +} + +/** + * aac_probe_container_callback1 - query a logical volume + * @scsicmd: the scsi command block + * + * Queries the controller about the given volume. The volume information + * is updated in the struct fsa_dev_info structure rather than returned. + */ +static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd) +{ + scsicmd->device = NULL; + return 0; +} + +static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd) +{ + aac_probe_container_callback1(scsi_cmnd); +} + +int aac_probe_container(struct aac_dev *dev, int cid) +{ + struct aac_cmd_priv *cmd_priv; + struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd) + sizeof(*cmd_priv), GFP_KERNEL); + struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL); + int status; + + if (!scsicmd || !scsidev) { + kfree(scsicmd); + kfree(scsidev); + return -ENOMEM; + } + + scsicmd->device = scsidev; + scsidev->sdev_state = 0; + scsidev->id = cid; + scsidev->host = dev->scsi_host_ptr; + + if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0) + while (scsicmd->device == scsidev) + schedule(); + kfree(scsidev); + cmd_priv = aac_priv(scsicmd); + status = cmd_priv->status; + kfree(scsicmd); + return status; +} + +/* Local Structure to set SCSI inquiry data strings */ +struct scsi_inq { + char vid[8]; /* Vendor ID */ + char pid[16]; /* Product ID */ + char prl[4]; /* Product Revision Level */ +}; + +/** + * inqstrcpy - string merge + * @a: string to copy from + * @b: string to copy to + * + * Copy a String from one location to another + * without copying \0 + */ + +static void inqstrcpy(char *a, char *b) +{ + + while (*a != (char)0) + *b++ = *a++; +} + +static char *container_types[] = { + "None", + "Volume", + "Mirror", + "Stripe", + "RAID5", + "SSRW", + "SSRO", + "Morph", + "Legacy", + "RAID4", + "RAID10", + "RAID00", + "V-MIRRORS", + "PSEUDO R4", + "RAID50", + "RAID5D", + "RAID5D0", + "RAID1E", + "RAID6", + "RAID60", + "Unknown" +}; + +char * get_container_type(unsigned tindex) +{ + if (tindex >= ARRAY_SIZE(container_types)) + tindex = ARRAY_SIZE(container_types) - 1; + return container_types[tindex]; +} + +/* Function: setinqstr + * + * Arguments: [1] pointer to void [1] int + * + * Purpose: Sets SCSI inquiry data strings for vendor, product + * and revision level. Allows strings to be set in platform dependent + * files instead of in OS dependent driver source. + */ + +static void setinqstr(struct aac_dev *dev, void *data, int tindex) +{ + struct scsi_inq *str; + struct aac_supplement_adapter_info *sup_adap_info; + + sup_adap_info = &dev->supplement_adapter_info; + str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ + memset(str, ' ', sizeof(*str)); + + if (sup_adap_info->adapter_type_text[0]) { + int c; + char *cp; + char *cname = kmemdup(sup_adap_info->adapter_type_text, + sizeof(sup_adap_info->adapter_type_text), + GFP_ATOMIC); + if (!cname) + return; + + cp = cname; + if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) + inqstrcpy("SMC", str->vid); + else { + c = sizeof(str->vid); + while (*cp && *cp != ' ' && --c) + ++cp; + c = *cp; + *cp = '\0'; + inqstrcpy(cname, str->vid); + *cp = c; + while (*cp && *cp != ' ') + ++cp; + } + while (*cp == ' ') + ++cp; + /* last six chars reserved for vol type */ + if (strlen(cp) > sizeof(str->pid)) + cp[sizeof(str->pid)] = '\0'; + inqstrcpy (cp, str->pid); + + kfree(cname); + } else { + struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); + + inqstrcpy (mp->vname, str->vid); + /* last six chars reserved for vol type */ + inqstrcpy (mp->model, str->pid); + } + + if (tindex < ARRAY_SIZE(container_types)){ + char *findit = str->pid; + + for ( ; *findit != ' '; findit++); /* walk till we find a space */ + /* RAID is superfluous in the context of a RAID device */ + if (memcmp(findit-4, "RAID", 4) == 0) + *(findit -= 4) = ' '; + if (((findit - str->pid) + strlen(container_types[tindex])) + < (sizeof(str->pid) + sizeof(str->prl))) + inqstrcpy (container_types[tindex], findit + 1); + } + inqstrcpy ("V1.0", str->prl); +} + +static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data, + struct aac_dev *dev, struct scsi_cmnd *scsicmd) +{ + int container; + + vpdpage83data->type3.codeset = 1; + vpdpage83data->type3.identifiertype = 3; + vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3) + - 4; + + for (container = 0; container < dev->maximum_num_containers; + container++) { + + if (scmd_id(scsicmd) == container) { + memcpy(vpdpage83data->type3.Identifier, + dev->fsa_dev[container].identifier, + 16); + break; + } + } +} + +static void get_container_serial_callback(void *context, struct fib * fibptr) +{ + struct aac_get_serial_resp * get_serial_reply; + struct scsi_cmnd * scsicmd; + + BUG_ON(fibptr == NULL); + + scsicmd = (struct scsi_cmnd *) context; + if (!aac_valid_context(scsicmd, fibptr)) + return; + + get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr); + /* Failure is irrelevant, using default value instead */ + if (le32_to_cpu(get_serial_reply->status) == CT_OK) { + /*Check to see if it's for VPD 0x83 or 0x80 */ + if (scsicmd->cmnd[2] == 0x83) { + /* vpd page 0x83 - Device Identification Page */ + struct aac_dev *dev; + int i; + struct tvpd_page83 vpdpage83data; + + dev = (struct aac_dev *)scsicmd->device->host->hostdata; + + memset(((u8 *)&vpdpage83data), 0, + sizeof(vpdpage83data)); + + /* DIRECT_ACCESS_DEVIC */ + vpdpage83data.DeviceType = 0; + /* DEVICE_CONNECTED */ + vpdpage83data.DeviceTypeQualifier = 0; + /* VPD_DEVICE_IDENTIFIERS */ + vpdpage83data.PageCode = 0x83; + vpdpage83data.reserved = 0; + vpdpage83data.PageLength = + sizeof(vpdpage83data.type1) + + sizeof(vpdpage83data.type2); + + /* VPD 83 Type 3 is not supported for ARC */ + if (dev->sa_firmware) + vpdpage83data.PageLength += + sizeof(vpdpage83data.type3); + + /* T10 Vendor Identifier Field Format */ + /* VpdcodesetAscii */ + vpdpage83data.type1.codeset = 2; + /* VpdIdentifierTypeVendorId */ + vpdpage83data.type1.identifiertype = 1; + vpdpage83data.type1.identifierlength = + sizeof(vpdpage83data.type1) - 4; + + /* "ADAPTEC " for adaptec */ + memcpy(vpdpage83data.type1.venid, + "ADAPTEC ", + sizeof(vpdpage83data.type1.venid)); + memcpy(vpdpage83data.type1.productid, + "ARRAY ", + sizeof( + vpdpage83data.type1.productid)); + + /* Convert to ascii based serial number. + * The LSB is the end. + */ + for (i = 0; i < 8; i++) { + u8 temp = + (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF); + if (temp > 0x9) { + vpdpage83data.type1.serialnumber[i] = + 'A' + (temp - 0xA); + } else { + vpdpage83data.type1.serialnumber[i] = + '0' + temp; + } + } + + /* VpdCodeSetBinary */ + vpdpage83data.type2.codeset = 1; + /* VpdidentifiertypeEUI64 */ + vpdpage83data.type2.identifiertype = 2; + vpdpage83data.type2.identifierlength = + sizeof(vpdpage83data.type2) - 4; + + vpdpage83data.type2.eu64id.venid[0] = 0xD0; + vpdpage83data.type2.eu64id.venid[1] = 0; + vpdpage83data.type2.eu64id.venid[2] = 0; + + vpdpage83data.type2.eu64id.Serial = + get_serial_reply->uid; + vpdpage83data.type2.eu64id.reserved = 0; + + /* + * VpdIdentifierTypeFCPHName + * VPD 0x83 Type 3 not supported for ARC + */ + if (dev->sa_firmware) { + build_vpd83_type3(&vpdpage83data, + dev, scsicmd); + } + + /* Move the inquiry data to the response buffer. */ + scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data, + sizeof(vpdpage83data)); + } else { + /* It must be for VPD 0x80 */ + char sp[13]; + /* EVPD bit set */ + sp[0] = INQD_PDT_DA; + sp[1] = scsicmd->cmnd[2]; + sp[2] = 0; + sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", + le32_to_cpu(get_serial_reply->uid)); + scsi_sg_copy_from_buffer(scsicmd, sp, + sizeof(sp)); + } + } + + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + + aac_fib_complete(fibptr); + aac_scsi_done(scsicmd); +} + +/* + * aac_get_container_serial - get container serial, none blocking. + */ +static int aac_get_container_serial(struct scsi_cmnd * scsicmd) +{ + int status; + struct aac_get_serial *dinfo; + struct fib * cmd_fibcontext; + struct aac_dev * dev; + + dev = (struct aac_dev *)scsicmd->device->host->hostdata; + + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + + aac_fib_init(cmd_fibcontext); + dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext); + + dinfo->command = cpu_to_le32(VM_ContainerConfig); + dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); + dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + + status = aac_fib_send(ContainerCommand, + cmd_fibcontext, + sizeof(struct aac_get_serial_resp), + FsaNormal, + 0, 1, + (fib_callback) get_container_serial_callback, + (void *) scsicmd); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); + aac_fib_complete(cmd_fibcontext); + return -1; +} + +/* Function: setinqserial + * + * Arguments: [1] pointer to void [1] int + * + * Purpose: Sets SCSI Unit Serial number. + * This is a fake. We should read a proper + * serial number from the container. But + * without docs it's quite hard to do it :-) + * So this will have to do in the meantime. + */ + +static int setinqserial(struct aac_dev *dev, void *data, int cid) +{ + /* + * This breaks array migration. + */ + return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X", + le32_to_cpu(dev->adapter_info.serial[0]), cid); +} + +static inline void set_sense(struct sense_data *sense_data, u8 sense_key, + u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer) +{ + u8 *sense_buf = (u8 *)sense_data; + /* Sense data valid, err code 70h */ + sense_buf[0] = 0x70; /* No info field */ + sense_buf[1] = 0; /* Segment number, always zero */ + + sense_buf[2] = sense_key; /* Sense key */ + + sense_buf[12] = sense_code; /* Additional sense code */ + sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ + + if (sense_key == ILLEGAL_REQUEST) { + sense_buf[7] = 10; /* Additional sense length */ + + sense_buf[15] = bit_pointer; + /* Illegal parameter is in the parameter block */ + if (sense_code == SENCODE_INVALID_CDB_FIELD) + sense_buf[15] |= 0xc0;/* Std sense key specific field */ + /* Illegal parameter is in the CDB block */ + sense_buf[16] = field_pointer >> 8; /* MSB */ + sense_buf[17] = field_pointer; /* LSB */ + } else + sense_buf[7] = 6; /* Additional sense length */ +} + +static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) +{ + if (lba & 0xffffffff00000000LL) { + int cid = scmd_id(cmd); + dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); + cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); + memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + aac_scsi_done(cmd); + return 1; + } + return 0; +} + +static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) +{ + return 0; +} + +static void io_callback(void *context, struct fib * fibptr); + +static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + struct aac_dev *dev = fib->dev; + u16 fibsize, command; + long ret; + + aac_fib_init(fib); + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && + !dev->sync_mode) { + struct aac_raw_io2 *readcmd2; + readcmd2 = (struct aac_raw_io2 *) fib_data(fib); + memset(readcmd2, 0, sizeof(struct aac_raw_io2)); + readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + readcmd2->byteCount = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); + readcmd2->cid = cpu_to_le16(scmd_id(cmd)); + readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ); + ret = aac_build_sgraw2(cmd, readcmd2, + dev->scsi_host_ptr->sg_tablesize); + if (ret < 0) + return ret; + command = ContainerRawIo2; + fibsize = struct_size(readcmd2, sge, + le32_to_cpu(readcmd2->sgeCnt)); + } else { + struct aac_raw_io *readcmd; + readcmd = (struct aac_raw_io *) fib_data(fib); + readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + readcmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); + readcmd->cid = cpu_to_le16(scmd_id(cmd)); + readcmd->flags = cpu_to_le16(RIO_TYPE_READ); + readcmd->bpTotal = 0; + readcmd->bpComplete = 0; + ret = aac_build_sgraw(cmd, &readcmd->sg); + if (ret < 0) + return ret; + command = ContainerRawIo; + fibsize = sizeof(struct aac_raw_io) + + ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); + } + + BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(command, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_read64 *readcmd; + long ret; + + aac_fib_init(fib); + readcmd = (struct aac_read64 *) fib_data(fib); + readcmd->command = cpu_to_le32(VM_CtHostRead64); + readcmd->cid = cpu_to_le16(scmd_id(cmd)); + readcmd->sector_count = cpu_to_le16(count); + readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->pad = 0; + readcmd->flags = 0; + + ret = aac_build_sg64(cmd, &readcmd->sg); + if (ret < 0) + return ret; + fibsize = sizeof(struct aac_read64) + + ((le32_to_cpu(readcmd->sg.count) - 1) * + sizeof (struct sgentry64)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand64, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_read *readcmd; + struct aac_dev *dev = fib->dev; + long ret; + + aac_fib_init(fib); + readcmd = (struct aac_read *) fib_data(fib); + readcmd->command = cpu_to_le32(VM_CtBlockRead); + readcmd->cid = cpu_to_le32(scmd_id(cmd)); + readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); + + ret = aac_build_sg(cmd, &readcmd->sg); + if (ret < 0) + return ret; + fibsize = sizeof(struct aac_read) + + ((le32_to_cpu(readcmd->sg.count) - 1) * + sizeof (struct sgentry)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) +{ + struct aac_dev *dev = fib->dev; + u16 fibsize, command; + long ret; + + aac_fib_init(fib); + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && + !dev->sync_mode) { + struct aac_raw_io2 *writecmd2; + writecmd2 = (struct aac_raw_io2 *) fib_data(fib); + memset(writecmd2, 0, sizeof(struct aac_raw_io2)); + writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + writecmd2->byteCount = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); + writecmd2->cid = cpu_to_le16(scmd_id(cmd)); + writecmd2->flags = (fua && ((aac_cache & 5) != 1) && + (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? + cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) : + cpu_to_le16(RIO2_IO_TYPE_WRITE); + ret = aac_build_sgraw2(cmd, writecmd2, + dev->scsi_host_ptr->sg_tablesize); + if (ret < 0) + return ret; + command = ContainerRawIo2; + fibsize = struct_size(writecmd2, sge, + le32_to_cpu(writecmd2->sgeCnt)); + } else { + struct aac_raw_io *writecmd; + writecmd = (struct aac_raw_io *) fib_data(fib); + writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + writecmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); + writecmd->cid = cpu_to_le16(scmd_id(cmd)); + writecmd->flags = (fua && ((aac_cache & 5) != 1) && + (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? + cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) : + cpu_to_le16(RIO_TYPE_WRITE); + writecmd->bpTotal = 0; + writecmd->bpComplete = 0; + ret = aac_build_sgraw(cmd, &writecmd->sg); + if (ret < 0) + return ret; + command = ContainerRawIo; + fibsize = sizeof(struct aac_raw_io) + + ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); + } + + BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(command, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) +{ + u16 fibsize; + struct aac_write64 *writecmd; + long ret; + + aac_fib_init(fib); + writecmd = (struct aac_write64 *) fib_data(fib); + writecmd->command = cpu_to_le32(VM_CtHostWrite64); + writecmd->cid = cpu_to_le16(scmd_id(cmd)); + writecmd->sector_count = cpu_to_le16(count); + writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->pad = 0; + writecmd->flags = 0; + + ret = aac_build_sg64(cmd, &writecmd->sg); + if (ret < 0) + return ret; + fibsize = sizeof(struct aac_write64) + + ((le32_to_cpu(writecmd->sg.count) - 1) * + sizeof (struct sgentry64)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand64, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) +{ + u16 fibsize; + struct aac_write *writecmd; + struct aac_dev *dev = fib->dev; + long ret; + + aac_fib_init(fib); + writecmd = (struct aac_write *) fib_data(fib); + writecmd->command = cpu_to_le32(VM_CtBlockWrite); + writecmd->cid = cpu_to_le32(scmd_id(cmd)); + writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); + writecmd->sg.count = cpu_to_le32(1); + /* ->stable is not used - it did mean which type of write */ + + ret = aac_build_sg(cmd, &writecmd->sg); + if (ret < 0) + return ret; + fibsize = sizeof(struct aac_write) + + ((le32_to_cpu(writecmd->sg.count) - 1) * + sizeof (struct sgentry)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd) +{ + struct aac_srb * srbcmd; + u32 flag; + u32 timeout; + struct aac_dev *dev = fib->dev; + + aac_fib_init(fib); + switch(cmd->sc_data_direction){ + case DMA_TO_DEVICE: + flag = SRB_DataOut; + break; + case DMA_BIDIRECTIONAL: + flag = SRB_DataIn | SRB_DataOut; + break; + case DMA_FROM_DEVICE: + flag = SRB_DataIn; + break; + case DMA_NONE: + default: /* shuts up some versions of gcc */ + flag = SRB_NoDataXfer; + break; + } + + srbcmd = (struct aac_srb*) fib_data(fib); + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); + srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd))); + srbcmd->id = cpu_to_le32(scmd_id(cmd)); + srbcmd->lun = cpu_to_le32(cmd->device->lun); + srbcmd->flags = cpu_to_le32(flag); + timeout = scsi_cmd_to_rq(cmd)->timeout / HZ; + if (timeout == 0) + timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT); + srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds + srbcmd->retry_limit = 0; /* Obsolete parameter */ + srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len); + return srbcmd; +} + +static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib, + struct scsi_cmnd *cmd) +{ + struct aac_hba_cmd_req *hbacmd; + struct aac_dev *dev; + int bus, target; + u64 address; + + dev = (struct aac_dev *)cmd->device->host->hostdata; + + hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va; + memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ + /* iu_type is a parameter of aac_hba_send */ + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + hbacmd->byte1 = 2; + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + hbacmd->byte1 = 1; + break; + case DMA_NONE: + default: + break; + } + hbacmd->lun[1] = cpu_to_le32(cmd->device->lun); + + bus = aac_logical_to_phys(scmd_channel(cmd)); + target = scmd_id(cmd); + hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus; + + /* we fill in reply_qid later in aac_src_deliver_message */ + /* we fill in iu_type, request_id later in aac_hba_send */ + /* we fill in emb_data_desc_count later in aac_build_sghba */ + + memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len); + hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd)); + + address = (u64)fib->hw_error_pa; + hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); + hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + + return hbacmd; +} + +static void aac_srb_callback(void *context, struct fib * fibptr); + +static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) +{ + u16 fibsize; + struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); + long ret; + + ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg); + if (ret < 0) + return ret; + srbcmd->count = cpu_to_le32(scsi_bufflen(cmd)); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); + /* + * Build Scatter/Gather list + */ + fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + + ((le32_to_cpu(srbcmd->sg.count) & 0xff) * + sizeof (struct sgentry64)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ScsiPortCommand64, fib, + fibsize, FsaNormal, 0, 1, + (fib_callback) aac_srb_callback, + (void *) cmd); +} + +static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd) +{ + u16 fibsize; + struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); + long ret; + + ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg); + if (ret < 0) + return ret; + srbcmd->count = cpu_to_le32(scsi_bufflen(cmd)); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); + /* + * Build Scatter/Gather list + */ + fibsize = sizeof (struct aac_srb) + + (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * + sizeof (struct sgentry)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1, + (fib_callback) aac_srb_callback, (void *) cmd); +} + +static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd) +{ + if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac && + (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) + return FAILED; + return aac_scsi_32(fib, cmd); +} + +static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd) +{ + struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd); + struct aac_dev *dev; + long ret; + + dev = (struct aac_dev *)cmd->device->host->hostdata; + + ret = aac_build_sghba(cmd, hbacmd, + dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa); + if (ret < 0) + return ret; + + /* + * Now send the HBA command to the adapter + */ + fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) * + sizeof(struct aac_hba_sgl); + + return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib, + (fib_callback) aac_hba_callback, + (void *) cmd); +} + +static int aac_send_safw_bmic_cmd(struct aac_dev *dev, + struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len) +{ + struct fib *fibptr; + dma_addr_t addr; + int rcode; + int fibsize; + struct aac_srb *srb; + struct aac_srb_reply *srb_reply; + struct sgmap64 *sg64; + u32 vbus; + u32 vid; + + if (!dev->sa_firmware) + return 0; + + /* allocate FIB */ + fibptr = aac_fib_alloc(dev); + if (!fibptr) + return -ENOMEM; + + aac_fib_init(fibptr); + fibptr->hw_fib_va->header.XferState &= + ~cpu_to_le32(FastResponseCapable); + + fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + + sizeof(struct sgentry64); + + /* allocate DMA buffer for response */ + addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + rcode = -ENOMEM; + goto fib_error; + } + + srb = fib_data(fibptr); + memcpy(srb, &srbu->srb, sizeof(struct aac_srb)); + + vbus = (u32)le16_to_cpu( + dev->supplement_adapter_info.virt_device_bus); + vid = (u32)le16_to_cpu( + dev->supplement_adapter_info.virt_device_target); + + /* set the common request fields */ + srb->channel = cpu_to_le32(vbus); + srb->id = cpu_to_le32(vid); + srb->lun = 0; + srb->function = cpu_to_le32(SRBF_ExecuteScsi); + srb->timeout = 0; + srb->retry_limit = 0; + srb->cdb_size = cpu_to_le32(16); + srb->count = cpu_to_le32(xfer_len); + + sg64 = (struct sgmap64 *)&srb->sg; + sg64->count = cpu_to_le32(1); + sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr)); + sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr)); + sg64->sg[0].count = cpu_to_le32(xfer_len); + + /* + * Copy the updated data for other dumping or other usage if needed + */ + memcpy(&srbu->srb, srb, sizeof(struct aac_srb)); + + /* issue request to the controller */ + rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal, + 1, 1, NULL, NULL); + + if (rcode == -ERESTARTSYS) + rcode = -ERESTART; + + if (unlikely(rcode < 0)) + goto bmic_error; + + srb_reply = (struct aac_srb_reply *)fib_data(fibptr); + memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply)); + +bmic_error: + dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL); +fib_error: + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + return rcode; +} + +static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target) +{ + + struct aac_ciss_identify_pd *identify_resp; + + if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW) + return; + + identify_resp = dev->hba_map[bus][target].safw_identify_resp; + if (identify_resp == NULL) { + dev->hba_map[bus][target].qd_limit = 32; + return; + } + + if (identify_resp->current_queue_depth_limit <= 0 || + identify_resp->current_queue_depth_limit > 255) + dev->hba_map[bus][target].qd_limit = 32; + else + dev->hba_map[bus][target].qd_limit = + identify_resp->current_queue_depth_limit; +} + +static int aac_issue_safw_bmic_identify(struct aac_dev *dev, + struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target) +{ + int rcode = -ENOMEM; + int datasize; + struct aac_srb_unit srbu; + struct aac_srb *srbcmd; + struct aac_ciss_identify_pd *identify_reply; + + datasize = sizeof(struct aac_ciss_identify_pd); + identify_reply = kmalloc(datasize, GFP_KERNEL); + if (!identify_reply) + goto out; + + memset(&srbu, 0, sizeof(struct aac_srb_unit)); + + srbcmd = &srbu.srb; + srbcmd->flags = cpu_to_le32(SRB_DataIn); + srbcmd->cdb[0] = 0x26; + srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF); + srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE; + + rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize); + if (unlikely(rcode < 0)) + goto mem_free_all; + + *identify_resp = identify_reply; + +out: + return rcode; +mem_free_all: + kfree(identify_reply); + goto out; +} + +static inline void aac_free_safw_ciss_luns(struct aac_dev *dev) +{ + kfree(dev->safw_phys_luns); + dev->safw_phys_luns = NULL; +} + +/** + * aac_get_safw_ciss_luns() - Process topology change + * @dev: aac_dev structure + * + * Execute a CISS REPORT PHYS LUNS and process the results into + * the current hba_map. + */ +static int aac_get_safw_ciss_luns(struct aac_dev *dev) +{ + int rcode = -ENOMEM; + int datasize; + struct aac_srb *srbcmd; + struct aac_srb_unit srbu; + struct aac_ciss_phys_luns_resp *phys_luns; + + datasize = sizeof(struct aac_ciss_phys_luns_resp) + + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); + phys_luns = kmalloc(datasize, GFP_KERNEL); + if (phys_luns == NULL) + goto out; + + memset(&srbu, 0, sizeof(struct aac_srb_unit)); + + srbcmd = &srbu.srb; + srbcmd->flags = cpu_to_le32(SRB_DataIn); + srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS; + srbcmd->cdb[1] = 2; /* extended reporting */ + srbcmd->cdb[8] = (u8)(datasize >> 8); + srbcmd->cdb[9] = (u8)(datasize); + + rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize); + if (unlikely(rcode < 0)) + goto mem_free_all; + + if (phys_luns->resp_flag != 2) { + rcode = -ENOMSG; + goto mem_free_all; + } + + dev->safw_phys_luns = phys_luns; + +out: + return rcode; +mem_free_all: + kfree(phys_luns); + goto out; +} + +static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev) +{ + return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24; +} + +static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun) +{ + return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f; +} + +static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun) +{ + return dev->safw_phys_luns->lun[lun].level2[0]; +} + +static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun) +{ + return dev->safw_phys_luns->lun[lun].bus >> 6; +} + +static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun) +{ + return dev->safw_phys_luns->lun[lun].node_ident[9]; +} + +static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun) +{ + return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]); +} + +static inline void aac_free_safw_identify_resp(struct aac_dev *dev, + int bus, int target) +{ + kfree(dev->hba_map[bus][target].safw_identify_resp); + dev->hba_map[bus][target].safw_identify_resp = NULL; +} + +static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev, + int lun_count) +{ + int luns; + int i; + u32 bus; + u32 target; + + luns = aac_get_safw_phys_lun_count(dev); + + if (luns < lun_count) + lun_count = luns; + else if (lun_count < 0) + lun_count = luns; + + for (i = 0; i < lun_count; i++) { + bus = aac_get_safw_phys_bus(dev, i); + target = aac_get_safw_phys_target(dev, i); + + aac_free_safw_identify_resp(dev, bus, target); + } +} + +static int aac_get_safw_attr_all_targets(struct aac_dev *dev) +{ + int i; + int rcode = 0; + u32 lun_count; + u32 bus; + u32 target; + struct aac_ciss_identify_pd *identify_resp = NULL; + + lun_count = aac_get_safw_phys_lun_count(dev); + + for (i = 0; i < lun_count; ++i) { + + bus = aac_get_safw_phys_bus(dev, i); + target = aac_get_safw_phys_target(dev, i); + + rcode = aac_issue_safw_bmic_identify(dev, + &identify_resp, bus, target); + + if (unlikely(rcode < 0)) + goto free_identify_resp; + + dev->hba_map[bus][target].safw_identify_resp = identify_resp; + } + +out: + return rcode; +free_identify_resp: + aac_free_safw_all_identify_resp(dev, i); + goto out; +} + +/** + * aac_set_safw_attr_all_targets- update current hba map with data from FW + * @dev: aac_dev structure + * + * Update our hba map with the information gathered from the FW + */ +static void aac_set_safw_attr_all_targets(struct aac_dev *dev) +{ + /* ok and extended reporting */ + u32 lun_count, nexus; + u32 i, bus, target; + u8 expose_flag, attribs; + + lun_count = aac_get_safw_phys_lun_count(dev); + + dev->scan_counter++; + + for (i = 0; i < lun_count; ++i) { + + bus = aac_get_safw_phys_bus(dev, i); + target = aac_get_safw_phys_target(dev, i); + expose_flag = aac_get_safw_phys_expose_flag(dev, i); + attribs = aac_get_safw_phys_attribs(dev, i); + nexus = aac_get_safw_phys_nexus(dev, i); + + if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS) + continue; + + if (expose_flag != 0) { + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_RAID_MEMBER; + continue; + } + + if (nexus != 0 && (attribs & 8)) { + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_NATIVE_RAW; + dev->hba_map[bus][target].rmw_nexus = + nexus; + } else + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_ARC_RAW; + + dev->hba_map[bus][target].scan_counter = dev->scan_counter; + + aac_set_safw_target_qd(dev, bus, target); + } +} + +static int aac_setup_safw_targets(struct aac_dev *dev) +{ + int rcode = 0; + + rcode = aac_get_containers(dev); + if (unlikely(rcode < 0)) + goto out; + + rcode = aac_get_safw_ciss_luns(dev); + if (unlikely(rcode < 0)) + goto out; + + rcode = aac_get_safw_attr_all_targets(dev); + if (unlikely(rcode < 0)) + goto free_ciss_luns; + + aac_set_safw_attr_all_targets(dev); + + aac_free_safw_all_identify_resp(dev, -1); +free_ciss_luns: + aac_free_safw_ciss_luns(dev); +out: + return rcode; +} + +int aac_setup_safw_adapter(struct aac_dev *dev) +{ + return aac_setup_safw_targets(dev); +} + +int aac_get_adapter_info(struct aac_dev* dev) +{ + struct fib* fibptr; + int rcode; + u32 tmp, bus, target; + struct aac_adapter_info *info; + struct aac_bus_info *command; + struct aac_bus_info_response *bus_info; + + if (!(fibptr = aac_fib_alloc(dev))) + return -ENOMEM; + + aac_fib_init(fibptr); + info = (struct aac_adapter_info *) fib_data(fibptr); + memset(info,0,sizeof(*info)); + + rcode = aac_fib_send(RequestAdapterInfo, + fibptr, + sizeof(*info), + FsaNormal, + -1, 1, /* First `interrupt' command uses special wait */ + NULL, + NULL); + + if (rcode < 0) { + /* FIB should be freed only after + * getting the response from the F/W */ + if (rcode != -ERESTARTSYS) { + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + } + return rcode; + } + memcpy(&dev->adapter_info, info, sizeof(*info)); + + dev->supplement_adapter_info.virt_device_bus = 0xffff; + if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { + struct aac_supplement_adapter_info * sinfo; + + aac_fib_init(fibptr); + + sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr); + + memset(sinfo,0,sizeof(*sinfo)); + + rcode = aac_fib_send(RequestSupplementAdapterInfo, + fibptr, + sizeof(*sinfo), + FsaNormal, + 1, 1, + NULL, + NULL); + + if (rcode >= 0) + memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); + if (rcode == -ERESTARTSYS) { + fibptr = aac_fib_alloc(dev); + if (!fibptr) + return -ENOMEM; + } + + } + + /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */ + for (bus = 0; bus < AAC_MAX_BUSES; bus++) { + for (target = 0; target < AAC_MAX_TARGETS; target++) { + dev->hba_map[bus][target].devtype = 0; + dev->hba_map[bus][target].qd_limit = 0; + } + } + + /* + * GetBusInfo + */ + + aac_fib_init(fibptr); + + bus_info = (struct aac_bus_info_response *) fib_data(fibptr); + + memset(bus_info, 0, sizeof(*bus_info)); + + command = (struct aac_bus_info *)bus_info; + + command->Command = cpu_to_le32(VM_Ioctl); + command->ObjType = cpu_to_le32(FT_DRIVE); + command->MethodId = cpu_to_le32(1); + command->CtlCmd = cpu_to_le32(GetBusInfo); + + rcode = aac_fib_send(ContainerCommand, + fibptr, + sizeof (*bus_info), + FsaNormal, + 1, 1, + NULL, NULL); + + /* reasoned default */ + dev->maximum_num_physicals = 16; + if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) { + dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus); + dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); + } + + if (!dev->in_reset) { + char buffer[16]; + tmp = le32_to_cpu(dev->adapter_info.kernelrev); + printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", + dev->name, + dev->id, + tmp>>24, + (tmp>>16)&0xff, + tmp&0xff, + le32_to_cpu(dev->adapter_info.kernelbuild), + (int)sizeof(dev->supplement_adapter_info.build_date), + dev->supplement_adapter_info.build_date); + tmp = le32_to_cpu(dev->adapter_info.monitorrev); + printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", + dev->name, dev->id, + tmp>>24,(tmp>>16)&0xff,tmp&0xff, + le32_to_cpu(dev->adapter_info.monitorbuild)); + tmp = le32_to_cpu(dev->adapter_info.biosrev); + printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", + dev->name, dev->id, + tmp>>24,(tmp>>16)&0xff,tmp&0xff, + le32_to_cpu(dev->adapter_info.biosbuild)); + buffer[0] = '\0'; + if (aac_get_serial_number( + shost_to_class(dev->scsi_host_ptr), buffer)) + printk(KERN_INFO "%s%d: serial %s", + dev->name, dev->id, buffer); + if (dev->supplement_adapter_info.vpd_info.tsid[0]) { + printk(KERN_INFO "%s%d: TSID %.*s\n", + dev->name, dev->id, + (int)sizeof(dev->supplement_adapter_info + .vpd_info.tsid), + dev->supplement_adapter_info.vpd_info.tsid); + } + if (!aac_check_reset || ((aac_check_reset == 1) && + (dev->supplement_adapter_info.supported_options2 & + AAC_OPTION_IGNORE_RESET))) { + printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", + dev->name, dev->id); + } + } + + dev->cache_protected = 0; + dev->jbod = ((dev->supplement_adapter_info.feature_bits & + AAC_FEATURE_JBOD) != 0); + dev->nondasd_support = 0; + dev->raid_scsi_mode = 0; + if(dev->adapter_info.options & AAC_OPT_NONDASD) + dev->nondasd_support = 1; + + /* + * If the firmware supports ROMB RAID/SCSI mode and we are currently + * in RAID/SCSI mode, set the flag. For now if in this mode we will + * force nondasd support on. If we decide to allow the non-dasd flag + * additional changes changes will have to be made to support + * RAID/SCSI. the function aac_scsi_cmd in this module will have to be + * changed to support the new dev->raid_scsi_mode flag instead of + * leaching off of the dev->nondasd_support flag. Also in linit.c the + * function aac_detect will have to be modified where it sets up the + * max number of channels based on the aac->nondasd_support flag only. + */ + if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) && + (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) { + dev->nondasd_support = 1; + dev->raid_scsi_mode = 1; + } + if (dev->raid_scsi_mode != 0) + printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n", + dev->name, dev->id); + + if (nondasd != -1) + dev->nondasd_support = (nondasd!=0); + if (dev->nondasd_support && !dev->in_reset) + printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); + + if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32)) + dev->needs_dac = 1; + dev->dac_support = 0; + if ((sizeof(dma_addr_t) > 4) && dev->needs_dac && + (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) { + if (!dev->in_reset) + printk(KERN_INFO "%s%d: 64bit support enabled.\n", + dev->name, dev->id); + dev->dac_support = 1; + } + + if(dacmode != -1) { + dev->dac_support = (dacmode!=0); + } + + /* avoid problems with AAC_QUIRK_SCSI_32 controllers */ + if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks + & AAC_QUIRK_SCSI_32)) { + dev->nondasd_support = 0; + dev->jbod = 0; + expose_physicals = 0; + } + + if (dev->dac_support) { + if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(64))) { + if (!dev->in_reset) + dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n"); + } else if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(32))) { + dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n"); + dev->dac_support = 0; + } else { + dev_info(&dev->pdev->dev, "No suitable DMA available\n"); + rcode = -ENOMEM; + } + } + /* + * Deal with configuring for the individualized limits of each packet + * interface. + */ + dev->a_ops.adapter_scsi = (dev->dac_support) + ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32) + ? aac_scsi_32_64 + : aac_scsi_64) + : aac_scsi_32; + if (dev->raw_io_interface) { + dev->a_ops.adapter_bounds = (dev->raw_io_64) + ? aac_bounds_64 + : aac_bounds_32; + dev->a_ops.adapter_read = aac_read_raw_io; + dev->a_ops.adapter_write = aac_write_raw_io; + } else { + dev->a_ops.adapter_bounds = aac_bounds_32; + dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - + sizeof(struct aac_fibhdr) - + sizeof(struct aac_write) + sizeof(struct sgentry)) / + sizeof(struct sgentry); + if (dev->dac_support) { + dev->a_ops.adapter_read = aac_read_block64; + dev->a_ops.adapter_write = aac_write_block64; + /* + * 38 scatter gather elements + */ + dev->scsi_host_ptr->sg_tablesize = + (dev->max_fib_size - + sizeof(struct aac_fibhdr) - + sizeof(struct aac_write64) + + sizeof(struct sgentry64)) / + sizeof(struct sgentry64); + } else { + dev->a_ops.adapter_read = aac_read_block; + dev->a_ops.adapter_write = aac_write_block; + } + dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; + if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { + /* + * Worst case size that could cause sg overflow when + * we break up SG elements that are larger than 64KB. + * Would be nice if we could tell the SCSI layer what + * the maximum SG element size can be. Worst case is + * (sg_tablesize-1) 4KB elements with one 64KB + * element. + * 32bit -> 468 or 238KB 64bit -> 424 or 212KB + */ + dev->scsi_host_ptr->max_sectors = + (dev->scsi_host_ptr->sg_tablesize * 8) + 112; + } + } + if (!dev->sync_mode && dev->sa_firmware && + dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE) + dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize = + HBA_MAX_SG_SEPARATE; + + /* FIB should be freed only after getting the response from the F/W */ + if (rcode != -ERESTARTSYS) { + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + } + + return rcode; +} + + +static void io_callback(void *context, struct fib * fibptr) +{ + struct aac_dev *dev; + struct aac_read_reply *readreply; + struct scsi_cmnd *scsicmd; + u32 cid; + + scsicmd = (struct scsi_cmnd *) context; + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + dev = fibptr->dev; + cid = scmd_id(scsicmd); + + if (nblank(dprintk(x))) { + u64 lba; + switch (scsicmd->cmnd[0]) { + case WRITE_6: + case READ_6: + lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | + (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; + break; + case WRITE_16: + case READ_16: + lba = ((u64)scsicmd->cmnd[2] << 56) | + ((u64)scsicmd->cmnd[3] << 48) | + ((u64)scsicmd->cmnd[4] << 40) | + ((u64)scsicmd->cmnd[5] << 32) | + ((u64)scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + break; + case WRITE_12: + case READ_12: + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + break; + default: + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + break; + } + printk(KERN_DEBUG + "io_callback[cpu %d]: lba = %llu, t = %ld.\n", + smp_processor_id(), (unsigned long long)lba, jiffies); + } + + BUG_ON(fibptr == NULL); + + scsi_dma_unmap(scsicmd); + + readreply = (struct aac_read_reply *)fib_data(fibptr); + switch (le32_to_cpu(readreply->status)) { + case ST_OK: + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE; + break; + case ST_NOT_READY: + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY, + SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + break; + case ST_MEDERR: + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR, + SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + break; + default: +#ifdef AAC_DETAILED_STATUS_INFO + printk(KERN_WARNING "io_callback: io failed, status = %d\n", + le32_to_cpu(readreply->status)); +#endif + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + break; + } + aac_fib_complete(fibptr); + + aac_scsi_done(scsicmd); +} + +static int aac_read(struct scsi_cmnd * scsicmd) +{ + u64 lba; + u32 count; + int status; + struct aac_dev *dev; + struct fib * cmd_fibcontext; + int cid; + + dev = (struct aac_dev *)scsicmd->device->host->hostdata; + /* + * Get block address and transfer length + */ + switch (scsicmd->cmnd[0]) { + case READ_6: + dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd))); + + lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | + (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; + count = scsicmd->cmnd[4]; + + if (count == 0) + count = 256; + break; + case READ_16: + dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd))); + + lba = ((u64)scsicmd->cmnd[2] << 56) | + ((u64)scsicmd->cmnd[3] << 48) | + ((u64)scsicmd->cmnd[4] << 40) | + ((u64)scsicmd->cmnd[5] << 32) | + ((u64)scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + count = (scsicmd->cmnd[10] << 24) | + (scsicmd->cmnd[11] << 16) | + (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; + break; + case READ_12: + dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd))); + + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + count = (scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + break; + default: + dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd))); + + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; + break; + } + + if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) { + cid = scmd_id(scsicmd); + dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + aac_scsi_done(scsicmd); + return 0; + } + + dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", + smp_processor_id(), (unsigned long long)lba, jiffies)); + if (aac_adapter_bounds(dev,scsicmd,lba)) + return 0; + /* + * Alocate and initialize a Fib + */ + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); + /* + * For some reason, the Fib didn't queue, return QUEUE_FULL + */ + scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL; + aac_scsi_done(scsicmd); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); + return 0; +} + +static int aac_write(struct scsi_cmnd * scsicmd) +{ + u64 lba; + u32 count; + int fua; + int status; + struct aac_dev *dev; + struct fib * cmd_fibcontext; + int cid; + + dev = (struct aac_dev *)scsicmd->device->host->hostdata; + /* + * Get block address and transfer length + */ + if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */ + { + lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; + count = scsicmd->cmnd[4]; + if (count == 0) + count = 256; + fua = 0; + } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ + dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd))); + + lba = ((u64)scsicmd->cmnd[2] << 56) | + ((u64)scsicmd->cmnd[3] << 48) | + ((u64)scsicmd->cmnd[4] << 40) | + ((u64)scsicmd->cmnd[5] << 32) | + ((u64)scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | + (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; + fua = scsicmd->cmnd[1] & 0x8; + } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ + dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd))); + + lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) + | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) + | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + fua = scsicmd->cmnd[1] & 0x8; + } else { + dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd))); + lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; + fua = scsicmd->cmnd[1] & 0x8; + } + + if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) { + cid = scmd_id(scsicmd); + dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + aac_scsi_done(scsicmd); + return 0; + } + + dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", + smp_processor_id(), (unsigned long long)lba, jiffies)); + if (aac_adapter_bounds(dev,scsicmd,lba)) + return 0; + /* + * Allocate and initialize a Fib then setup a BlockWrite command + */ + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); + /* + * For some reason, the Fib didn't queue, return QUEUE_FULL + */ + scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL; + aac_scsi_done(scsicmd); + + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); + return 0; +} + +static void synchronize_callback(void *context, struct fib *fibptr) +{ + struct aac_synchronize_reply *synchronizereply; + struct scsi_cmnd *cmd = context; + + if (!aac_valid_context(cmd, fibptr)) + return; + + dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", + smp_processor_id(), jiffies)); + BUG_ON(fibptr == NULL); + + + synchronizereply = fib_data(fibptr); + if (le32_to_cpu(synchronizereply->status) == CT_OK) + cmd->result = DID_OK << 16 | SAM_STAT_GOOD; + else { + struct scsi_device *sdev = cmd->device; + struct aac_dev *dev = fibptr->dev; + u32 cid = sdev_id(sdev); + printk(KERN_WARNING + "synchronize_callback: synchronize failed, status = %d\n", + le32_to_cpu(synchronizereply->status)); + cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); + memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + } + + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + aac_scsi_done(cmd); +} + +static int aac_synchronize(struct scsi_cmnd *scsicmd) +{ + int status; + struct fib *cmd_fibcontext; + struct aac_synchronize *synchronizecmd; + struct scsi_device *sdev = scsicmd->device; + struct aac_dev *aac; + + aac = (struct aac_dev *)sdev->host->hostdata; + if (aac->in_reset) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * Allocate and initialize a Fib + */ + cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); + + aac_fib_init(cmd_fibcontext); + + synchronizecmd = fib_data(cmd_fibcontext); + synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); + synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE); + synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); + synchronizecmd->count = + cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + + /* + * Now send the Fib to the adapter + */ + status = aac_fib_send(ContainerCommand, + cmd_fibcontext, + sizeof(struct aac_synchronize), + FsaNormal, + 0, 1, + (fib_callback)synchronize_callback, + (void *)scsicmd); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + printk(KERN_WARNING + "aac_synchronize: aac_fib_send failed with status: %d.\n", status); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); + return SCSI_MLQUEUE_HOST_BUSY; +} + +static void aac_start_stop_callback(void *context, struct fib *fibptr) +{ + struct scsi_cmnd *scsicmd = context; + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + BUG_ON(fibptr == NULL); + + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + aac_scsi_done(scsicmd); +} + +static int aac_start_stop(struct scsi_cmnd *scsicmd) +{ + int status; + struct fib *cmd_fibcontext; + struct aac_power_management *pmcmd; + struct scsi_device *sdev = scsicmd->device; + struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; + + if (!(aac->supplement_adapter_info.supported_options2 & + AAC_OPTION_POWER_MANAGEMENT)) { + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + aac_scsi_done(scsicmd); + return 0; + } + + if (aac->in_reset) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * Allocate and initialize a Fib + */ + cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); + + aac_fib_init(cmd_fibcontext); + + pmcmd = fib_data(cmd_fibcontext); + pmcmd->command = cpu_to_le32(VM_ContainerConfig); + pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT); + /* Eject bit ignored, not relevant */ + pmcmd->sub = (scsicmd->cmnd[4] & 1) ? + cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT); + pmcmd->cid = cpu_to_le32(sdev_id(sdev)); + pmcmd->parm = (scsicmd->cmnd[1] & 1) ? + cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + + /* + * Now send the Fib to the adapter + */ + status = aac_fib_send(ContainerCommand, + cmd_fibcontext, + sizeof(struct aac_power_management), + FsaNormal, + 0, 1, + (fib_callback)aac_start_stop_callback, + (void *)scsicmd); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); + return SCSI_MLQUEUE_HOST_BUSY; +} + +/** + * aac_scsi_cmd() - Process SCSI command + * @scsicmd: SCSI command block + * + * Emulate a SCSI command and queue the required request for the + * aacraid firmware. + */ + +int aac_scsi_cmd(struct scsi_cmnd * scsicmd) +{ + u32 cid, bus; + struct Scsi_Host *host = scsicmd->device->host; + struct aac_dev *dev = (struct aac_dev *)host->hostdata; + struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; + + if (fsa_dev_ptr == NULL) + return -1; + /* + * If the bus, id or lun is out of range, return fail + * Test does not apply to ID 16, the pseudo id for the controller + * itself. + */ + cid = scmd_id(scsicmd); + if (cid != host->this_id) { + if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) { + if((cid >= dev->maximum_num_containers) || + (scsicmd->device->lun != 0)) { + scsicmd->result = DID_NO_CONNECT << 16; + goto scsi_done_ret; + } + + /* + * If the target container doesn't exist, it may have + * been newly created + */ + if (((fsa_dev_ptr[cid].valid & 1) == 0) || + (fsa_dev_ptr[cid].sense_data.sense_key == + NOT_READY)) { + switch (scsicmd->cmnd[0]) { + case SERVICE_ACTION_IN_16: + if (!(dev->raw_io_interface) || + !(dev->raw_io_64) || + ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) + break; + fallthrough; + case INQUIRY: + case READ_CAPACITY: + case TEST_UNIT_READY: + if (dev->in_reset) + return -1; + return _aac_probe_container(scsicmd, + aac_probe_container_callback2); + default: + break; + } + } + } else { /* check for physical non-dasd devices */ + bus = aac_logical_to_phys(scmd_channel(scsicmd)); + + if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && + dev->hba_map[bus][cid].devtype + == AAC_DEVTYPE_NATIVE_RAW) { + if (dev->in_reset) + return -1; + return aac_send_hba_fib(scsicmd); + } else if (dev->nondasd_support || expose_physicals || + dev->jbod) { + if (dev->in_reset) + return -1; + return aac_send_srb_fib(scsicmd); + } else { + scsicmd->result = DID_NO_CONNECT << 16; + goto scsi_done_ret; + } + } + } + /* + * else Command for the controller itself + */ + else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */ + (scsicmd->cmnd[0] != TEST_UNIT_READY)) + { + dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, + ASENCODE_INVALID_COMMAND, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + goto scsi_done_ret; + } + + switch (scsicmd->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + if (dev->in_reset) + return -1; + return aac_read(scsicmd); + + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + if (dev->in_reset) + return -1; + return aac_write(scsicmd); + + case SYNCHRONIZE_CACHE: + if (((aac_cache & 6) == 6) && dev->cache_protected) { + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + } + /* Issue FIB to tell Firmware to flush it's cache */ + if ((aac_cache & 6) != 2) + return aac_synchronize(scsicmd); + fallthrough; + case INQUIRY: + { + struct inquiry_data inq_data; + + dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid)); + memset(&inq_data, 0, sizeof (struct inquiry_data)); + + if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) { + char *arr = (char *)&inq_data; + + /* EVPD bit set */ + arr[0] = (scmd_id(scsicmd) == host->this_id) ? + INQD_PDT_PROC : INQD_PDT_DA; + if (scsicmd->cmnd[2] == 0) { + /* supported vital product data pages */ + arr[3] = 3; + arr[4] = 0x0; + arr[5] = 0x80; + arr[6] = 0x83; + arr[1] = scsicmd->cmnd[2]; + scsi_sg_copy_from_buffer(scsicmd, &inq_data, + sizeof(inq_data)); + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + } else if (scsicmd->cmnd[2] == 0x80) { + /* unit serial number page */ + arr[3] = setinqserial(dev, &arr[4], + scmd_id(scsicmd)); + arr[1] = scsicmd->cmnd[2]; + scsi_sg_copy_from_buffer(scsicmd, &inq_data, + sizeof(inq_data)); + if (aac_wwn != 2) + return aac_get_container_serial( + scsicmd); + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + } else if (scsicmd->cmnd[2] == 0x83) { + /* vpd page 0x83 - Device Identification Page */ + char *sno = (char *)&inq_data; + sno[3] = setinqserial(dev, &sno[4], + scmd_id(scsicmd)); + if (aac_wwn != 2) + return aac_get_container_serial( + scsicmd); + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + } else { + /* vpd page not implemented */ + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD, + ASENCODE_NO_SENSE, 7, 2); + memcpy(scsicmd->sense_buffer, + &dev->fsa_dev[cid].sense_data, + min_t(size_t, + sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + } + break; + } + inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ + inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ + inq_data.inqd_len = 31; + /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ + inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ + /* + * Set the Vendor, Product, and Revision Level + * see: .c i.e. aac.c + */ + if (cid == host->this_id) { + setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types)); + inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ + scsi_sg_copy_from_buffer(scsicmd, &inq_data, + sizeof(inq_data)); + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + } + if (dev->in_reset) + return -1; + setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); + inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ + scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); + return aac_get_container_name(scsicmd); + } + case SERVICE_ACTION_IN_16: + if (!(dev->raw_io_interface) || + !(dev->raw_io_64) || + ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) + break; + { + u64 capacity; + char cp[13]; + unsigned int alloc_len; + + dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n")); + capacity = fsa_dev_ptr[cid].size - 1; + cp[0] = (capacity >> 56) & 0xff; + cp[1] = (capacity >> 48) & 0xff; + cp[2] = (capacity >> 40) & 0xff; + cp[3] = (capacity >> 32) & 0xff; + cp[4] = (capacity >> 24) & 0xff; + cp[5] = (capacity >> 16) & 0xff; + cp[6] = (capacity >> 8) & 0xff; + cp[7] = (capacity >> 0) & 0xff; + cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; + cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff; + cp[12] = 0; + + alloc_len = ((scsicmd->cmnd[10] << 24) + + (scsicmd->cmnd[11] << 16) + + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]); + + alloc_len = min_t(size_t, alloc_len, sizeof(cp)); + scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len); + if (alloc_len < scsi_bufflen(scsicmd)) + scsi_set_resid(scsicmd, + scsi_bufflen(scsicmd) - alloc_len); + + /* Do not cache partition table for arrays */ + scsicmd->device->removable = 1; + + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + } + + case READ_CAPACITY: + { + u32 capacity; + char cp[8]; + + dprintk((KERN_DEBUG "READ CAPACITY command.\n")); + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) + capacity = fsa_dev_ptr[cid].size - 1; + else + capacity = (u32)-1; + + cp[0] = (capacity >> 24) & 0xff; + cp[1] = (capacity >> 16) & 0xff; + cp[2] = (capacity >> 8) & 0xff; + cp[3] = (capacity >> 0) & 0xff; + cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; + cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff; + scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); + /* Do not cache partition table for arrays */ + scsicmd->device->removable = 1; + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + } + + case MODE_SENSE: + { + int mode_buf_length = 4; + u32 capacity; + aac_modep_data mpd; + + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) + capacity = fsa_dev_ptr[cid].size - 1; + else + capacity = (u32)-1; + + dprintk((KERN_DEBUG "MODE SENSE command.\n")); + memset((char *)&mpd, 0, sizeof(aac_modep_data)); + + /* Mode data length */ + mpd.hd.data_length = sizeof(mpd.hd) - 1; + /* Medium type - default */ + mpd.hd.med_type = 0; + /* Device-specific param, + bit 8: 0/1 = write enabled/protected + bit 4: 0/1 = FUA enabled */ + mpd.hd.dev_par = 0; + + if (dev->raw_io_interface && ((aac_cache & 5) != 1)) + mpd.hd.dev_par = 0x10; + if (scsicmd->cmnd[1] & 0x8) + mpd.hd.bd_length = 0; /* Block descriptor length */ + else { + mpd.hd.bd_length = sizeof(mpd.bd); + mpd.hd.data_length += mpd.hd.bd_length; + mpd.bd.block_length[0] = + (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + mpd.bd.block_length[1] = + (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + mpd.bd.block_length[2] = + fsa_dev_ptr[cid].block_size & 0xff; + + mpd.mpc_buf[0] = scsicmd->cmnd[2]; + if (scsicmd->cmnd[2] == 0x1C) { + /* page length */ + mpd.mpc_buf[1] = 0xa; + /* Mode data length */ + mpd.hd.data_length = 23; + } else { + /* Mode data length */ + mpd.hd.data_length = 15; + } + + if (capacity > 0xffffff) { + mpd.bd.block_count[0] = 0xff; + mpd.bd.block_count[1] = 0xff; + mpd.bd.block_count[2] = 0xff; + } else { + mpd.bd.block_count[0] = (capacity >> 16) & 0xff; + mpd.bd.block_count[1] = (capacity >> 8) & 0xff; + mpd.bd.block_count[2] = capacity & 0xff; + } + } + if (((scsicmd->cmnd[2] & 0x3f) == 8) || + ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { + mpd.hd.data_length += 3; + mpd.mpc_buf[0] = 8; + mpd.mpc_buf[1] = 1; + mpd.mpc_buf[2] = ((aac_cache & 6) == 2) + ? 0 : 0x04; /* WCE */ + mode_buf_length = sizeof(mpd); + } + + if (mode_buf_length > scsicmd->cmnd[4]) + mode_buf_length = scsicmd->cmnd[4]; + else + mode_buf_length = sizeof(mpd); + scsi_sg_copy_from_buffer(scsicmd, + (char *)&mpd, + mode_buf_length); + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + } + case MODE_SENSE_10: + { + u32 capacity; + int mode_buf_length = 8; + aac_modep10_data mpd10; + + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) + capacity = fsa_dev_ptr[cid].size - 1; + else + capacity = (u32)-1; + + dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); + memset((char *)&mpd10, 0, sizeof(aac_modep10_data)); + /* Mode data length (MSB) */ + mpd10.hd.data_length[0] = 0; + /* Mode data length (LSB) */ + mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1; + /* Medium type - default */ + mpd10.hd.med_type = 0; + /* Device-specific param, + bit 8: 0/1 = write enabled/protected + bit 4: 0/1 = FUA enabled */ + mpd10.hd.dev_par = 0; + + if (dev->raw_io_interface && ((aac_cache & 5) != 1)) + mpd10.hd.dev_par = 0x10; + mpd10.hd.rsrvd[0] = 0; /* reserved */ + mpd10.hd.rsrvd[1] = 0; /* reserved */ + if (scsicmd->cmnd[1] & 0x8) { + /* Block descriptor length (MSB) */ + mpd10.hd.bd_length[0] = 0; + /* Block descriptor length (LSB) */ + mpd10.hd.bd_length[1] = 0; + } else { + mpd10.hd.bd_length[0] = 0; + mpd10.hd.bd_length[1] = sizeof(mpd10.bd); + + mpd10.hd.data_length[1] += mpd10.hd.bd_length[1]; + + mpd10.bd.block_length[0] = + (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + mpd10.bd.block_length[1] = + (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + mpd10.bd.block_length[2] = + fsa_dev_ptr[cid].block_size & 0xff; + + if (capacity > 0xffffff) { + mpd10.bd.block_count[0] = 0xff; + mpd10.bd.block_count[1] = 0xff; + mpd10.bd.block_count[2] = 0xff; + } else { + mpd10.bd.block_count[0] = + (capacity >> 16) & 0xff; + mpd10.bd.block_count[1] = + (capacity >> 8) & 0xff; + mpd10.bd.block_count[2] = + capacity & 0xff; + } + } + if (((scsicmd->cmnd[2] & 0x3f) == 8) || + ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { + mpd10.hd.data_length[1] += 3; + mpd10.mpc_buf[0] = 8; + mpd10.mpc_buf[1] = 1; + mpd10.mpc_buf[2] = ((aac_cache & 6) == 2) + ? 0 : 0x04; /* WCE */ + mode_buf_length = sizeof(mpd10); + if (mode_buf_length > scsicmd->cmnd[8]) + mode_buf_length = scsicmd->cmnd[8]; + } + scsi_sg_copy_from_buffer(scsicmd, + (char *)&mpd10, + mode_buf_length); + + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + } + case REQUEST_SENSE: + dprintk((KERN_DEBUG "REQUEST SENSE command.\n")); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + sizeof(struct sense_data)); + memset(&dev->fsa_dev[cid].sense_data, 0, + sizeof(struct sense_data)); + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + + case ALLOW_MEDIUM_REMOVAL: + dprintk((KERN_DEBUG "LOCK command.\n")); + if (scsicmd->cmnd[4]) + fsa_dev_ptr[cid].locked = 1; + else + fsa_dev_ptr[cid].locked = 0; + + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + /* + * These commands are all No-Ops + */ + case TEST_UNIT_READY: + if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) { + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + NOT_READY, SENCODE_BECOMING_READY, + ASENCODE_BECOMING_READY, 0, 0); + memcpy(scsicmd->sense_buffer, + &dev->fsa_dev[cid].sense_data, + min_t(size_t, + sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + break; + } + fallthrough; + case RESERVE: + case RELEASE: + case REZERO_UNIT: + case REASSIGN_BLOCKS: + case SEEK_10: + scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; + break; + + case START_STOP: + return aac_start_stop(scsicmd); + + default: + /* + * Unhandled commands + */ + dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", + scsicmd->cmnd[0])); + scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, + ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, + ASENCODE_INVALID_COMMAND, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, + sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + } + +scsi_done_ret: + + aac_scsi_done(scsicmd); + return 0; +} + +static int query_disk(struct aac_dev *dev, void __user *arg) +{ + struct aac_query_disk qd; + struct fsa_dev_info *fsa_dev_ptr; + + fsa_dev_ptr = dev->fsa_dev; + if (!fsa_dev_ptr) + return -EBUSY; + if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) + return -EFAULT; + if (qd.cnum == -1) { + if (qd.id < 0 || qd.id >= dev->maximum_num_containers) + return -EINVAL; + qd.cnum = qd.id; + } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { + if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) + return -EINVAL; + qd.instance = dev->scsi_host_ptr->host_no; + qd.bus = 0; + qd.id = CONTAINER_TO_ID(qd.cnum); + qd.lun = CONTAINER_TO_LUN(qd.cnum); + } + else return -EINVAL; + + qd.valid = fsa_dev_ptr[qd.cnum].valid != 0; + qd.locked = fsa_dev_ptr[qd.cnum].locked; + qd.deleted = fsa_dev_ptr[qd.cnum].deleted; + + if (fsa_dev_ptr[qd.cnum].devname[0] == '\0') + qd.unmapped = 1; + else + qd.unmapped = 0; + + strscpy(qd.name, fsa_dev_ptr[qd.cnum].devname, + min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1)); + + if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk))) + return -EFAULT; + return 0; +} + +static int force_delete_disk(struct aac_dev *dev, void __user *arg) +{ + struct aac_delete_disk dd; + struct fsa_dev_info *fsa_dev_ptr; + + fsa_dev_ptr = dev->fsa_dev; + if (!fsa_dev_ptr) + return -EBUSY; + + if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) + return -EFAULT; + + if (dd.cnum >= dev->maximum_num_containers) + return -EINVAL; + /* + * Mark this container as being deleted. + */ + fsa_dev_ptr[dd.cnum].deleted = 1; + /* + * Mark the container as no longer valid + */ + fsa_dev_ptr[dd.cnum].valid = 0; + return 0; +} + +static int delete_disk(struct aac_dev *dev, void __user *arg) +{ + struct aac_delete_disk dd; + struct fsa_dev_info *fsa_dev_ptr; + + fsa_dev_ptr = dev->fsa_dev; + if (!fsa_dev_ptr) + return -EBUSY; + + if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) + return -EFAULT; + + if (dd.cnum >= dev->maximum_num_containers) + return -EINVAL; + /* + * If the container is locked, it can not be deleted by the API. + */ + if (fsa_dev_ptr[dd.cnum].locked) + return -EBUSY; + else { + /* + * Mark the container as no longer being valid. + */ + fsa_dev_ptr[dd.cnum].valid = 0; + fsa_dev_ptr[dd.cnum].devname[0] = '\0'; + return 0; + } +} + +int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) +{ + switch (cmd) { + case FSACTL_QUERY_DISK: + return query_disk(dev, arg); + case FSACTL_DELETE_DISK: + return delete_disk(dev, arg); + case FSACTL_FORCE_DELETE_DISK: + return force_delete_disk(dev, arg); + case FSACTL_GET_CONTAINERS: + return aac_get_containers(dev); + default: + return -ENOTTY; + } +} + +/** + * aac_srb_callback + * @context: the context set in the fib - here it is scsi cmd + * @fibptr: pointer to the fib + * + * Handles the completion of a scsi command to a non dasd device + */ +static void aac_srb_callback(void *context, struct fib * fibptr) +{ + struct aac_srb_reply *srbreply; + struct scsi_cmnd *scsicmd; + + scsicmd = (struct scsi_cmnd *) context; + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + BUG_ON(fibptr == NULL); + + srbreply = (struct aac_srb_reply *) fib_data(fibptr); + + scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ + + if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { + /* fast response */ + srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS); + srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD); + } else { + /* + * Calculate resid for sg + */ + scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) + - le32_to_cpu(srbreply->data_xfer_length)); + } + + + scsi_dma_unmap(scsicmd); + + /* expose physical device if expose_physicald flag is on */ + if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) + && expose_physicals > 0) + aac_expose_phy_device(scsicmd); + + /* + * First check the fib status + */ + + if (le32_to_cpu(srbreply->status) != ST_OK) { + int len; + + pr_warn("aac_srb_callback: srb failed, status = %d\n", + le32_to_cpu(srbreply->status)); + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), + SCSI_SENSE_BUFFERSIZE); + scsicmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION; + memcpy(scsicmd->sense_buffer, + srbreply->sense_data, len); + } + + /* + * Next check the srb status + */ + switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { + case SRB_STATUS_ERROR_RECOVERY: + case SRB_STATUS_PENDING: + case SRB_STATUS_SUCCESS: + scsicmd->result = DID_OK << 16; + break; + case SRB_STATUS_DATA_OVERRUN: + switch (scsicmd->cmnd[0]) { + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + case READ_16: + case WRITE_16: + if (le32_to_cpu(srbreply->data_xfer_length) + < scsicmd->underflow) + pr_warn("aacraid: SCSI CMD underflow\n"); + else + pr_warn("aacraid: SCSI CMD Data Overrun\n"); + scsicmd->result = DID_ERROR << 16; + break; + case INQUIRY: + scsicmd->result = DID_OK << 16; + break; + default: + scsicmd->result = DID_OK << 16; + break; + } + break; + case SRB_STATUS_ABORTED: + scsicmd->result = DID_ABORT << 16; + break; + case SRB_STATUS_ABORT_FAILED: + /* + * Not sure about this one - but assuming the + * hba was trying to abort for some reason + */ + scsicmd->result = DID_ERROR << 16; + break; + case SRB_STATUS_PARITY_ERROR: + scsicmd->result = DID_PARITY << 16; + break; + case SRB_STATUS_NO_DEVICE: + case SRB_STATUS_INVALID_PATH_ID: + case SRB_STATUS_INVALID_TARGET_ID: + case SRB_STATUS_INVALID_LUN: + case SRB_STATUS_SELECTION_TIMEOUT: + scsicmd->result = DID_NO_CONNECT << 16; + break; + + case SRB_STATUS_COMMAND_TIMEOUT: + case SRB_STATUS_TIMEOUT: + scsicmd->result = DID_TIME_OUT << 16; + break; + + case SRB_STATUS_BUSY: + scsicmd->result = DID_BUS_BUSY << 16; + break; + + case SRB_STATUS_BUS_RESET: + scsicmd->result = DID_RESET << 16; + break; + + case SRB_STATUS_MESSAGE_REJECTED: + scsicmd->result = DID_ERROR << 16; + break; + case SRB_STATUS_REQUEST_FLUSHED: + case SRB_STATUS_ERROR: + case SRB_STATUS_INVALID_REQUEST: + case SRB_STATUS_REQUEST_SENSE_FAILED: + case SRB_STATUS_NO_HBA: + case SRB_STATUS_UNEXPECTED_BUS_FREE: + case SRB_STATUS_PHASE_SEQUENCE_FAILURE: + case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: + case SRB_STATUS_DELAYED_RETRY: + case SRB_STATUS_BAD_FUNCTION: + case SRB_STATUS_NOT_STARTED: + case SRB_STATUS_NOT_IN_USE: + case SRB_STATUS_FORCE_ABORT: + case SRB_STATUS_DOMAIN_VALIDATION_FAIL: + default: +#ifdef AAC_DETAILED_STATUS_INFO + pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n", + le32_to_cpu(srbreply->srb_status) & 0x3F, + aac_get_status_string( + le32_to_cpu(srbreply->srb_status) & 0x3F), + scsicmd->cmnd[0], + le32_to_cpu(srbreply->scsi_status)); +#endif + /* + * When the CC bit is SET by the host in ATA pass thru CDB, + * driver is supposed to return DID_OK + * + * When the CC bit is RESET by the host, driver should + * return DID_ERROR + */ + if ((scsicmd->cmnd[0] == ATA_12) + || (scsicmd->cmnd[0] == ATA_16)) { + + if (scsicmd->cmnd[2] & (0x01 << 5)) { + scsicmd->result = DID_OK << 16; + } else { + scsicmd->result = DID_ERROR << 16; + } + } else { + scsicmd->result = DID_ERROR << 16; + } + break; + } + if (le32_to_cpu(srbreply->scsi_status) + == SAM_STAT_CHECK_CONDITION) { + int len; + + scsicmd->result |= SAM_STAT_CHECK_CONDITION; + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), + SCSI_SENSE_BUFFERSIZE); +#ifdef AAC_DETAILED_STATUS_INFO + pr_warn("aac_srb_callback: check condition, status = %d len=%d\n", + le32_to_cpu(srbreply->status), len); +#endif + memcpy(scsicmd->sense_buffer, + srbreply->sense_data, len); + } + + /* + * OR in the scsi status (already shifted up a bit) + */ + scsicmd->result |= le32_to_cpu(srbreply->scsi_status); + + aac_fib_complete(fibptr); + aac_scsi_done(scsicmd); +} + +static void hba_resp_task_complete(struct aac_dev *dev, + struct scsi_cmnd *scsicmd, + struct aac_hba_resp *err) { + + scsicmd->result = err->status; + /* set residual count */ + scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count)); + + switch (err->status) { + case SAM_STAT_GOOD: + scsicmd->result |= DID_OK << 16; + break; + case SAM_STAT_CHECK_CONDITION: + { + int len; + + len = min_t(u8, err->sense_response_data_len, + SCSI_SENSE_BUFFERSIZE); + if (len) + memcpy(scsicmd->sense_buffer, + err->sense_response_buf, len); + scsicmd->result |= DID_OK << 16; + break; + } + case SAM_STAT_BUSY: + scsicmd->result |= DID_BUS_BUSY << 16; + break; + case SAM_STAT_TASK_ABORTED: + scsicmd->result |= DID_ABORT << 16; + break; + case SAM_STAT_RESERVATION_CONFLICT: + case SAM_STAT_TASK_SET_FULL: + default: + scsicmd->result |= DID_ERROR << 16; + break; + } +} + +static void hba_resp_task_failure(struct aac_dev *dev, + struct scsi_cmnd *scsicmd, + struct aac_hba_resp *err) +{ + switch (err->status) { + case HBA_RESP_STAT_HBAMODE_DISABLED: + { + u32 bus, cid; + + bus = aac_logical_to_phys(scmd_channel(scsicmd)); + cid = scmd_id(scsicmd); + if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { + dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW; + dev->hba_map[bus][cid].rmw_nexus = 0xffffffff; + } + scsicmd->result = DID_NO_CONNECT << 16; + break; + } + case HBA_RESP_STAT_IO_ERROR: + case HBA_RESP_STAT_NO_PATH_TO_DEVICE: + scsicmd->result = DID_OK << 16 | SAM_STAT_BUSY; + break; + case HBA_RESP_STAT_IO_ABORTED: + scsicmd->result = DID_ABORT << 16; + break; + case HBA_RESP_STAT_INVALID_DEVICE: + scsicmd->result = DID_NO_CONNECT << 16; + break; + case HBA_RESP_STAT_UNDERRUN: + /* UNDERRUN is OK */ + scsicmd->result = DID_OK << 16; + break; + case HBA_RESP_STAT_OVERRUN: + default: + scsicmd->result = DID_ERROR << 16; + break; + } +} + +/** + * aac_hba_callback + * @context: the context set in the fib - here it is scsi cmd + * @fibptr: pointer to the fib + * + * Handles the completion of a native HBA scsi command + */ +void aac_hba_callback(void *context, struct fib *fibptr) +{ + struct aac_dev *dev; + struct scsi_cmnd *scsicmd; + + struct aac_hba_resp *err = + &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; + + scsicmd = (struct scsi_cmnd *) context; + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + WARN_ON(fibptr == NULL); + dev = fibptr->dev; + + if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)) + scsi_dma_unmap(scsicmd); + + if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { + /* fast response */ + scsicmd->result = DID_OK << 16; + goto out; + } + + switch (err->service_response) { + case HBA_RESP_SVCRES_TASK_COMPLETE: + hba_resp_task_complete(dev, scsicmd, err); + break; + case HBA_RESP_SVCRES_FAILURE: + hba_resp_task_failure(dev, scsicmd, err); + break; + case HBA_RESP_SVCRES_TMF_REJECTED: + scsicmd->result = DID_ERROR << 16; + break; + case HBA_RESP_SVCRES_TMF_LUN_INVALID: + scsicmd->result = DID_NO_CONNECT << 16; + break; + case HBA_RESP_SVCRES_TMF_COMPLETE: + case HBA_RESP_SVCRES_TMF_SUCCEEDED: + scsicmd->result = DID_OK << 16; + break; + default: + scsicmd->result = DID_ERROR << 16; + break; + } + +out: + aac_fib_complete(fibptr); + + if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) + aac_priv(scsicmd)->sent_command = 1; + else + aac_scsi_done(scsicmd); +} + +/** + * aac_send_srb_fib + * @scsicmd: the scsi command block + * + * This routine will form a FIB and fill in the aac_srb from the + * scsicmd passed in. + */ +static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) +{ + struct fib* cmd_fibcontext; + struct aac_dev* dev; + int status; + + dev = (struct aac_dev *)scsicmd->device->host->hostdata; + if (scmd_id(scsicmd) >= dev->maximum_num_physicals || + scsicmd->device->lun > 7) { + scsicmd->result = DID_NO_CONNECT << 16; + aac_scsi_done(scsicmd); + return 0; + } + + /* + * Allocate and initialize a Fib then setup a BlockWrite command + */ + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + status = aac_adapter_scsi(cmd_fibcontext, scsicmd); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); + + return -1; +} + +/** + * aac_send_hba_fib + * @scsicmd: the scsi command block + * + * This routine will form a FIB and fill in the aac_hba_cmd_req from the + * scsicmd passed in. + */ +static int aac_send_hba_fib(struct scsi_cmnd *scsicmd) +{ + struct fib *cmd_fibcontext; + struct aac_dev *dev; + int status; + + dev = shost_priv(scsicmd->device->host); + if (scmd_id(scsicmd) >= dev->maximum_num_physicals || + scsicmd->device->lun > AAC_MAX_LUN - 1) { + scsicmd->result = DID_NO_CONNECT << 16; + aac_scsi_done(scsicmd); + return 0; + } + + /* + * Allocate and initialize a Fib then setup a BlockWrite command + */ + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + if (!cmd_fibcontext) + return -1; + + aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; + status = aac_adapter_hba(cmd_fibcontext, scsicmd); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) + return 0; + + pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", + status); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); + + return -1; +} + + +static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) +{ + unsigned long byte_count = 0; + int nseg; + struct scatterlist *sg; + int i; + + // Get rid of old data + psg->count = 0; + psg->sg[0].addr = 0; + psg->sg[0].count = 0; + + nseg = scsi_dma_map(scsicmd); + if (nseg <= 0) + return nseg; + + psg->count = cpu_to_le32(nseg); + + scsi_for_each_sg(scsicmd, sg, nseg, i) { + psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); + psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); + byte_count += sg_dma_len(sg); + } + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(psg->sg[i-1].count) - + (byte_count - scsi_bufflen(scsicmd)); + psg->sg[i-1].count = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + + return byte_count; +} + + +static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg) +{ + unsigned long byte_count = 0; + u64 addr; + int nseg; + struct scatterlist *sg; + int i; + + // Get rid of old data + psg->count = 0; + psg->sg[0].addr[0] = 0; + psg->sg[0].addr[1] = 0; + psg->sg[0].count = 0; + + nseg = scsi_dma_map(scsicmd); + if (nseg <= 0) + return nseg; + + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + addr = sg_dma_address(sg); + psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); + psg->sg[i].addr[1] = cpu_to_le32(addr>>32); + psg->sg[i].count = cpu_to_le32(count); + byte_count += count; + } + psg->count = cpu_to_le32(nseg); + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(psg->sg[i-1].count) - + (byte_count - scsi_bufflen(scsicmd)); + psg->sg[i-1].count = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + + return byte_count; +} + +static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg) +{ + unsigned long byte_count = 0; + int nseg; + struct scatterlist *sg; + int i; + + // Get rid of old data + psg->count = 0; + psg->sg[0].next = 0; + psg->sg[0].prev = 0; + psg->sg[0].addr[0] = 0; + psg->sg[0].addr[1] = 0; + psg->sg[0].count = 0; + psg->sg[0].flags = 0; + + nseg = scsi_dma_map(scsicmd); + if (nseg <= 0) + return nseg; + + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + u64 addr = sg_dma_address(sg); + psg->sg[i].next = 0; + psg->sg[i].prev = 0; + psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32)); + psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); + psg->sg[i].count = cpu_to_le32(count); + psg->sg[i].flags = 0; + byte_count += count; + } + psg->count = cpu_to_le32(nseg); + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(psg->sg[i-1].count) - + (byte_count - scsi_bufflen(scsicmd)); + psg->sg[i-1].count = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + + return byte_count; +} + +static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, + struct aac_raw_io2 *rio2, int sg_max) +{ + unsigned long byte_count = 0; + int nseg; + struct scatterlist *sg; + int i, conformable = 0; + u32 min_size = PAGE_SIZE, cur_size; + + nseg = scsi_dma_map(scsicmd); + if (nseg <= 0) + return nseg; + + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + u64 addr = sg_dma_address(sg); + + BUG_ON(i >= sg_max); + rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32)); + rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff)); + cur_size = cpu_to_le32(count); + rio2->sge[i].length = cur_size; + rio2->sge[i].flags = 0; + if (i == 0) { + conformable = 1; + rio2->sgeFirstSize = cur_size; + } else if (i == 1) { + rio2->sgeNominalSize = cur_size; + min_size = cur_size; + } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) { + conformable = 0; + if (cur_size < min_size) + min_size = cur_size; + } + byte_count += count; + } + + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(rio2->sge[i-1].length) - + (byte_count - scsi_bufflen(scsicmd)); + rio2->sge[i-1].length = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + + rio2->sgeCnt = cpu_to_le32(nseg); + rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212); + /* not conformable: evaluate required sg elements */ + if (!conformable) { + int j, nseg_new = nseg, err_found; + for (i = min_size / PAGE_SIZE; i >= 1; --i) { + err_found = 0; + nseg_new = 2; + for (j = 1; j < nseg - 1; ++j) { + if (rio2->sge[j].length % (i*PAGE_SIZE)) { + err_found = 1; + break; + } + nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE)); + } + if (!err_found) + break; + } + if (i > 0 && nseg_new <= sg_max) { + int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new); + + if (ret < 0) + return ret; + } + } else + rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); + + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + + return byte_count; +} + +static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new) +{ + struct sge_ieee1212 *sge; + int i, j, pos; + u32 addr_low; + + if (aac_convert_sgl == 0) + return 0; + + sge = kmalloc_array(nseg_new, sizeof(*sge), GFP_ATOMIC); + if (sge == NULL) + return -ENOMEM; + + for (i = 1, pos = 1; i < nseg-1; ++i) { + for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) { + addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE; + sge[pos].addrLow = addr_low; + sge[pos].addrHigh = rio2->sge[i].addrHigh; + if (addr_low < rio2->sge[i].addrLow) + sge[pos].addrHigh++; + sge[pos].length = pages * PAGE_SIZE; + sge[pos].flags = 0; + pos++; + } + } + sge[pos] = rio2->sge[nseg-1]; + memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212)); + + kfree(sge); + rio2->sgeCnt = cpu_to_le32(nseg_new); + rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); + rio2->sgeNominalSize = pages * PAGE_SIZE; + return 0; +} + +static long aac_build_sghba(struct scsi_cmnd *scsicmd, + struct aac_hba_cmd_req *hbacmd, + int sg_max, + u64 sg_address) +{ + unsigned long byte_count = 0; + int nseg; + struct scatterlist *sg; + int i; + u32 cur_size; + struct aac_hba_sgl *sge; + + nseg = scsi_dma_map(scsicmd); + if (nseg <= 0) { + byte_count = nseg; + goto out; + } + + if (nseg > HBA_MAX_SG_EMBEDDED) + sge = &hbacmd->sge[2]; + else + sge = &hbacmd->sge[0]; + + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + u64 addr = sg_dma_address(sg); + + WARN_ON(i >= sg_max); + sge->addr_hi = cpu_to_le32((u32)(addr>>32)); + sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff)); + cur_size = cpu_to_le32(count); + sge->len = cur_size; + sge->flags = 0; + byte_count += count; + sge++; + } + + sge--; + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp; + + temp = le32_to_cpu(sge->len) - byte_count + - scsi_bufflen(scsicmd); + sge->len = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + + if (nseg <= HBA_MAX_SG_EMBEDDED) { + hbacmd->emb_data_desc_count = cpu_to_le32(nseg); + sge->flags = cpu_to_le32(0x40000000); + } else { + /* not embedded */ + hbacmd->sge[0].flags = cpu_to_le32(0x80000000); + hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1); + hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32); + hbacmd->sge[0].addr_lo = + cpu_to_le32((u32)(sg_address & 0xffffffff)); + } + + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } +out: + return byte_count; +} + +#ifdef AAC_DETAILED_STATUS_INFO + +struct aac_srb_status_info { + u32 status; + char *str; +}; + + +static struct aac_srb_status_info srb_status_info[] = { + { SRB_STATUS_PENDING, "Pending Status"}, + { SRB_STATUS_SUCCESS, "Success"}, + { SRB_STATUS_ABORTED, "Aborted Command"}, + { SRB_STATUS_ABORT_FAILED, "Abort Failed"}, + { SRB_STATUS_ERROR, "Error Event"}, + { SRB_STATUS_BUSY, "Device Busy"}, + { SRB_STATUS_INVALID_REQUEST, "Invalid Request"}, + { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"}, + { SRB_STATUS_NO_DEVICE, "No Device"}, + { SRB_STATUS_TIMEOUT, "Timeout"}, + { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"}, + { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"}, + { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"}, + { SRB_STATUS_BUS_RESET, "Bus Reset"}, + { SRB_STATUS_PARITY_ERROR, "Parity Error"}, + { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"}, + { SRB_STATUS_NO_HBA, "No HBA"}, + { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"}, + { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"}, + { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"}, + { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"}, + { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"}, + { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"}, + { SRB_STATUS_INVALID_LUN, "Invalid LUN"}, + { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"}, + { SRB_STATUS_BAD_FUNCTION, "Bad Function"}, + { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"}, + { SRB_STATUS_NOT_STARTED, "Not Started"}, + { SRB_STATUS_NOT_IN_USE, "Not In Use"}, + { SRB_STATUS_FORCE_ABORT, "Force Abort"}, + { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"}, + { 0xff, "Unknown Error"} +}; + +char *aac_get_status_string(u32 status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(srb_status_info); i++) + if (srb_status_info[i].status == status) + return srb_status_info[i].str; + + return "Bad Status Code"; +} + +#endif diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h new file mode 100644 index 000000000..7d5a15507 --- /dev/null +++ b/drivers/scsi/aacraid/aacraid.h @@ -0,0 +1,2786 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * aacraid.h + * + * Abstract: Contains all routines for control of the aacraid driver + */ + +#ifndef _AACRAID_H_ +#define _AACRAID_H_ +#ifndef dprintk +# define dprintk(x) +#endif +/* eg: if (nblank(dprintk(x))) */ +#define _nblank(x) #x +#define nblank(x) _nblank(x)[0] + +#include +#include +#include +#include +#include + +/*------------------------------------------------------------------------------ + * D E F I N E S + *----------------------------------------------------------------------------*/ + +#define AAC_MAX_MSIX 32 /* vectors */ +#define AAC_PCI_MSI_ENABLE 0x8000 + +enum { + AAC_ENABLE_INTERRUPT = 0x0, + AAC_DISABLE_INTERRUPT, + AAC_ENABLE_MSIX, + AAC_DISABLE_MSIX, + AAC_CLEAR_AIF_BIT, + AAC_CLEAR_SYNC_BIT, + AAC_ENABLE_INTX +}; + +#define AAC_INT_MODE_INTX (1<<0) +#define AAC_INT_MODE_MSI (1<<1) +#define AAC_INT_MODE_AIF (1<<2) +#define AAC_INT_MODE_SYNC (1<<3) +#define AAC_INT_MODE_MSIX (1<<16) + +#define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb +#define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa +#define AAC_INT_DISABLE_ALL 0xffffffff + +/* Bit definitions in IOA->Host Interrupt Register */ +#define PMC_TRANSITION_TO_OPERATIONAL (1<<31) +#define PMC_IOARCB_TRANSFER_FAILED (1<<28) +#define PMC_IOA_UNIT_CHECK (1<<27) +#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26) +#define PMC_CRITICAL_IOA_OP_IN_PROGRESS (1<<25) +#define PMC_IOARRIN_LOST (1<<4) +#define PMC_SYSTEM_BUS_MMIO_ERROR (1<<3) +#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2) +#define PMC_HOST_RRQ_VALID (1<<1) +#define PMC_OPERATIONAL_STATUS (1<<31) +#define PMC_ALLOW_MSIX_VECTOR0 (1<<0) + +#define PMC_IOA_ERROR_INTERRUPTS (PMC_IOARCB_TRANSFER_FAILED | \ + PMC_IOA_UNIT_CHECK | \ + PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \ + PMC_IOARRIN_LOST | \ + PMC_SYSTEM_BUS_MMIO_ERROR | \ + PMC_IOA_PROCESSOR_IN_ERROR_STATE) + +#define PMC_ALL_INTERRUPT_BITS (PMC_IOA_ERROR_INTERRUPTS | \ + PMC_HOST_RRQ_VALID | \ + PMC_TRANSITION_TO_OPERATIONAL | \ + PMC_ALLOW_MSIX_VECTOR0) +#define PMC_GLOBAL_INT_BIT2 0x00000004 +#define PMC_GLOBAL_INT_BIT0 0x00000001 + +#ifndef AAC_DRIVER_BUILD +# define AAC_DRIVER_BUILD 50983 +# define AAC_DRIVER_BRANCH "-custom" +#endif +#define MAXIMUM_NUM_CONTAINERS 32 + +#define AAC_NUM_MGT_FIB 8 +#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB) +#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB) + +#define AAC_MAX_LUN 256 + +#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff) +#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256) + +#define AAC_DEBUG_INSTRUMENT_AIF_DELETE + +#define AAC_MAX_NATIVE_TARGETS 1024 +/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */ +#define AAC_MAX_BUSES 5 +#define AAC_MAX_TARGETS 256 +#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS) +#define AAC_MAX_NATIVE_SIZE 2048 +#define FW_ERROR_BUFFER_SIZE 512 +#define AAC_SA_TIMEOUT 180 +#define AAC_ARC_TIMEOUT 60 + +#define get_bus_number(x) (x/AAC_MAX_TARGETS) +#define get_target_number(x) (x%AAC_MAX_TARGETS) + +/* Thor AIF events */ +#define SA_AIF_HOTPLUG (1<<1) +#define SA_AIF_HARDWARE (1<<2) +#define SA_AIF_PDEV_CHANGE (1<<4) +#define SA_AIF_LDEV_CHANGE (1<<5) +#define SA_AIF_BPSTAT_CHANGE (1<<30) +#define SA_AIF_BPCFG_CHANGE (1U<<31) + +#define HBA_MAX_SG_EMBEDDED 28 +#define HBA_MAX_SG_SEPARATE 90 +#define HBA_SENSE_DATA_LEN_MAX 32 +#define HBA_REQUEST_TAG_ERROR_FLAG 0x00000002 +#define HBA_SGL_FLAGS_EXT 0x80000000UL + +struct aac_hba_sgl { + u32 addr_lo; /* Lower 32-bits of SGL element address */ + u32 addr_hi; /* Upper 32-bits of SGL element address */ + u32 len; /* Length of SGL element in bytes */ + u32 flags; /* SGL element flags */ +}; + +enum { + HBA_IU_TYPE_SCSI_CMD_REQ = 0x40, + HBA_IU_TYPE_SCSI_TM_REQ = 0x41, + HBA_IU_TYPE_SATA_REQ = 0x42, + HBA_IU_TYPE_RESP = 0x60, + HBA_IU_TYPE_COALESCED_RESP = 0x61, + HBA_IU_TYPE_INT_COALESCING_CFG_REQ = 0x70 +}; + +enum { + HBA_CMD_BYTE1_DATA_DIR_IN = 0x1, + HBA_CMD_BYTE1_DATA_DIR_OUT = 0x2, + HBA_CMD_BYTE1_DATA_TYPE_DDR = 0x4, + HBA_CMD_BYTE1_CRYPTO_ENABLE = 0x8 +}; + +enum { + HBA_CMD_BYTE1_BITOFF_DATA_DIR_IN = 0x0, + HBA_CMD_BYTE1_BITOFF_DATA_DIR_OUT, + HBA_CMD_BYTE1_BITOFF_DATA_TYPE_DDR, + HBA_CMD_BYTE1_BITOFF_CRYPTO_ENABLE +}; + +enum { + HBA_RESP_DATAPRES_NO_DATA = 0x0, + HBA_RESP_DATAPRES_RESPONSE_DATA, + HBA_RESP_DATAPRES_SENSE_DATA +}; + +enum { + HBA_RESP_SVCRES_TASK_COMPLETE = 0x0, + HBA_RESP_SVCRES_FAILURE, + HBA_RESP_SVCRES_TMF_COMPLETE, + HBA_RESP_SVCRES_TMF_SUCCEEDED, + HBA_RESP_SVCRES_TMF_REJECTED, + HBA_RESP_SVCRES_TMF_LUN_INVALID +}; + +enum { + HBA_RESP_STAT_IO_ERROR = 0x1, + HBA_RESP_STAT_IO_ABORTED, + HBA_RESP_STAT_NO_PATH_TO_DEVICE, + HBA_RESP_STAT_INVALID_DEVICE, + HBA_RESP_STAT_HBAMODE_DISABLED = 0xE, + HBA_RESP_STAT_UNDERRUN = 0x51, + HBA_RESP_STAT_OVERRUN = 0x75 +}; + +struct aac_hba_cmd_req { + u8 iu_type; /* HBA information unit type */ + /* + * byte1: + * [1:0] DIR - 0=No data, 0x1 = IN, 0x2 = OUT + * [2] TYPE - 0=PCI, 1=DDR + * [3] CRYPTO_ENABLE - 0=Crypto disabled, 1=Crypto enabled + */ + u8 byte1; + u8 reply_qid; /* Host reply queue to post response to */ + u8 reserved1; + __le32 it_nexus; /* Device handle for the request */ + __le32 request_id; /* Sender context */ + /* Lower 32-bits of tweak value for crypto enabled IOs */ + __le32 tweak_value_lo; + u8 cdb[16]; /* SCSI CDB of the command */ + u8 lun[8]; /* SCSI LUN of the command */ + + /* Total data length in bytes to be read/written (if any) */ + __le32 data_length; + + /* [2:0] Task Attribute, [6:3] Command Priority */ + u8 attr_prio; + + /* Number of SGL elements embedded in the HBA req */ + u8 emb_data_desc_count; + + __le16 dek_index; /* DEK index for crypto enabled IOs */ + + /* Lower 32-bits of reserved error data target location on the host */ + __le32 error_ptr_lo; + + /* Upper 32-bits of reserved error data target location on the host */ + __le32 error_ptr_hi; + + /* Length of reserved error data area on the host in bytes */ + __le32 error_length; + + /* Upper 32-bits of tweak value for crypto enabled IOs */ + __le32 tweak_value_hi; + + struct aac_hba_sgl sge[HBA_MAX_SG_SEPARATE+2]; /* SG list space */ + + /* + * structure must not exceed + * AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE + */ +}; + +/* Task Management Functions (TMF) */ +#define HBA_TMF_ABORT_TASK 0x01 +#define HBA_TMF_LUN_RESET 0x08 + +struct aac_hba_tm_req { + u8 iu_type; /* HBA information unit type */ + u8 reply_qid; /* Host reply queue to post response to */ + u8 tmf; /* Task management function */ + u8 reserved1; + + __le32 it_nexus; /* Device handle for the command */ + + u8 lun[8]; /* SCSI LUN */ + + /* Used to hold sender context. */ + __le32 request_id; /* Sender context */ + __le32 reserved2; + + /* Request identifier of managed task */ + __le32 managed_request_id; /* Sender context being managed */ + __le32 reserved3; + + /* Lower 32-bits of reserved error data target location on the host */ + __le32 error_ptr_lo; + /* Upper 32-bits of reserved error data target location on the host */ + __le32 error_ptr_hi; + /* Length of reserved error data area on the host in bytes */ + __le32 error_length; +}; + +struct aac_hba_reset_req { + u8 iu_type; /* HBA information unit type */ + /* 0 - reset specified device, 1 - reset all devices */ + u8 reset_type; + u8 reply_qid; /* Host reply queue to post response to */ + u8 reserved1; + + __le32 it_nexus; /* Device handle for the command */ + __le32 request_id; /* Sender context */ + /* Lower 32-bits of reserved error data target location on the host */ + __le32 error_ptr_lo; + /* Upper 32-bits of reserved error data target location on the host */ + __le32 error_ptr_hi; + /* Length of reserved error data area on the host in bytes */ + __le32 error_length; +}; + +struct aac_hba_resp { + u8 iu_type; /* HBA information unit type */ + u8 reserved1[3]; + __le32 request_identifier; /* sender context */ + __le32 reserved2; + u8 service_response; /* SCSI service response */ + u8 status; /* SCSI status */ + u8 datapres; /* [1:0] - data present, [7:2] - reserved */ + u8 sense_response_data_len; /* Sense/response data length */ + __le32 residual_count; /* Residual data length in bytes */ + /* Sense/response data */ + u8 sense_response_buf[HBA_SENSE_DATA_LEN_MAX]; +}; + +struct aac_native_hba { + union { + struct aac_hba_cmd_req cmd; + struct aac_hba_tm_req tmr; + u8 cmd_bytes[AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE]; + } cmd; + union { + struct aac_hba_resp err; + u8 resp_bytes[FW_ERROR_BUFFER_SIZE]; + } resp; +}; + +#define CISS_REPORT_PHYSICAL_LUNS 0xc3 +#define WRITE_HOST_WELLNESS 0xa5 +#define CISS_IDENTIFY_PHYSICAL_DEVICE 0x15 +#define BMIC_IN 0x26 +#define BMIC_OUT 0x27 + +struct aac_ciss_phys_luns_resp { + u8 list_length[4]; /* LUN list length (N-7, big endian) */ + u8 resp_flag; /* extended response_flag */ + u8 reserved[3]; + struct _ciss_lun { + u8 tid[3]; /* Target ID */ + u8 bus; /* Bus, flag (bits 6,7) */ + u8 level3[2]; + u8 level2[2]; + u8 node_ident[16]; /* phys. node identifier */ + } lun[1]; /* List of phys. devices */ +}; + +/* + * Interrupts + */ +#define AAC_MAX_HRRQ 64 + +struct aac_ciss_identify_pd { + u8 scsi_bus; /* SCSI Bus number on controller */ + u8 scsi_id; /* SCSI ID on this bus */ + u16 block_size; /* sector size in bytes */ + u32 total_blocks; /* number for sectors on drive */ + u32 reserved_blocks; /* controller reserved (RIS) */ + u8 model[40]; /* Physical Drive Model */ + u8 serial_number[40]; /* Drive Serial Number */ + u8 firmware_revision[8]; /* drive firmware revision */ + u8 scsi_inquiry_bits; /* inquiry byte 7 bits */ + u8 compaq_drive_stamp; /* 0 means drive not stamped */ + u8 last_failure_reason; + + u8 flags; + u8 more_flags; + u8 scsi_lun; /* SCSI LUN for phys drive */ + u8 yet_more_flags; + u8 even_more_flags; + u32 spi_speed_rules; /* SPI Speed :Ultra disable diagnose */ + u8 phys_connector[2]; /* connector number on controller */ + u8 phys_box_on_bus; /* phys enclosure this drive resides */ + u8 phys_bay_in_box; /* phys drv bay this drive resides */ + u32 rpm; /* Drive rotational speed in rpm */ + u8 device_type; /* type of drive */ + u8 sata_version; /* only valid when drive_type is SATA */ + u64 big_total_block_count; + u64 ris_starting_lba; + u32 ris_size; + u8 wwid[20]; + u8 controller_phy_map[32]; + u16 phy_count; + u8 phy_connected_dev_type[256]; + u8 phy_to_drive_bay_num[256]; + u16 phy_to_attached_dev_index[256]; + u8 box_index; + u8 spitfire_support; + u16 extra_physical_drive_flags; + u8 negotiated_link_rate[256]; + u8 phy_to_phy_map[256]; + u8 redundant_path_present_map; + u8 redundant_path_failure_map; + u8 active_path_number; + u16 alternate_paths_phys_connector[8]; + u8 alternate_paths_phys_box_on_port[8]; + u8 multi_lun_device_lun_count; + u8 minimum_good_fw_revision[8]; + u8 unique_inquiry_bytes[20]; + u8 current_temperature_degreesC; + u8 temperature_threshold_degreesC; + u8 max_temperature_degreesC; + u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512 * 2^exp */ + u16 current_queue_depth_limit; + u8 switch_name[10]; + u16 switch_port; + u8 alternate_paths_switch_name[40]; + u8 alternate_paths_switch_port[8]; + u16 power_on_hours; /* valid only if gas gauge supported */ + u16 percent_endurance_used; /* valid only if gas gauge supported. */ + u8 drive_authentication; + u8 smart_carrier_authentication; + u8 smart_carrier_app_fw_version; + u8 smart_carrier_bootloader_fw_version; + u8 SanitizeSecureEraseSupport; + u8 DriveKeyFlags; + u8 encryption_key_name[64]; + u32 misc_drive_flags; + u16 dek_index; + u16 drive_encryption_flags; + u8 sanitize_maximum_time[6]; + u8 connector_info_mode; + u8 connector_info_number[4]; + u8 long_connector_name[64]; + u8 device_unique_identifier[16]; + u8 padto_2K[17]; +} __packed; + +/* + * These macros convert from physical channels to virtual channels + */ +#define CONTAINER_CHANNEL (0) +#define NATIVE_CHANNEL (1) +#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL) +#define CONTAINER_TO_ID(cont) (cont) +#define CONTAINER_TO_LUN(cont) (0) +#define ENCLOSURE_CHANNEL (3) + +#define PMC_DEVICE_S6 0x28b +#define PMC_DEVICE_S7 0x28c +#define PMC_DEVICE_S8 0x28d + +#define aac_phys_to_logical(x) ((x)+1) +#define aac_logical_to_phys(x) ((x)?(x)-1:0) + +/* + * These macros are for keeping track of + * character device state. + */ +#define AAC_CHARDEV_UNREGISTERED (-1) +#define AAC_CHARDEV_NEEDS_REINIT (-2) + +/* #define AAC_DETAILED_STATUS_INFO */ + +struct diskparm +{ + int heads; + int sectors; + int cylinders; +}; + + +/* + * Firmware constants + */ + +#define CT_NONE 0 +#define CT_OK 218 +#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */ +#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */ + +/* + * Host side memory scatter gather list + * Used by the adapter for read, write, and readdirplus operations + * We have separate 32 and 64 bit version because even + * on 64 bit systems not all cards support the 64 bit version + */ +struct sgentry { + __le32 addr; /* 32-bit address. */ + __le32 count; /* Length. */ +}; + +struct user_sgentry { + u32 addr; /* 32-bit address. */ + u32 count; /* Length. */ +}; + +struct sgentry64 { + __le32 addr[2]; /* 64-bit addr. 2 pieces for data alignment */ + __le32 count; /* Length. */ +}; + +struct user_sgentry64 { + u32 addr[2]; /* 64-bit addr. 2 pieces for data alignment */ + u32 count; /* Length. */ +}; + +struct sgentryraw { + __le32 next; /* reserved for F/W use */ + __le32 prev; /* reserved for F/W use */ + __le32 addr[2]; + __le32 count; + __le32 flags; /* reserved for F/W use */ +}; + +struct user_sgentryraw { + u32 next; /* reserved for F/W use */ + u32 prev; /* reserved for F/W use */ + u32 addr[2]; + u32 count; + u32 flags; /* reserved for F/W use */ +}; + +struct sge_ieee1212 { + u32 addrLow; + u32 addrHigh; + u32 length; + u32 flags; +}; + +/* + * SGMAP + * + * This is the SGMAP structure for all commands that use + * 32-bit addressing. + */ + +struct sgmap { + __le32 count; + struct sgentry sg[1]; +}; + +struct user_sgmap { + u32 count; + struct user_sgentry sg[1]; +}; + +struct sgmap64 { + __le32 count; + struct sgentry64 sg[1]; +}; + +struct user_sgmap64 { + u32 count; + struct user_sgentry64 sg[1]; +}; + +struct sgmapraw { + __le32 count; + struct sgentryraw sg[1]; +}; + +struct user_sgmapraw { + u32 count; + struct user_sgentryraw sg[1]; +}; + +struct creation_info +{ + u8 buildnum; /* e.g., 588 */ + u8 usec; /* e.g., 588 */ + u8 via; /* e.g., 1 = FSU, + * 2 = API + */ + u8 year; /* e.g., 1997 = 97 */ + __le32 date; /* + * unsigned Month :4; // 1 - 12 + * unsigned Day :6; // 1 - 32 + * unsigned Hour :6; // 0 - 23 + * unsigned Minute :6; // 0 - 60 + * unsigned Second :6; // 0 - 60 + */ + __le32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */ +}; + + +/* + * Define all the constants needed for the communication interface + */ + +/* + * Define how many queue entries each queue will have and the total + * number of entries for the entire communication interface. Also define + * how many queues we support. + * + * This has to match the controller + */ + +#define NUMBER_OF_COMM_QUEUES 8 // 4 command; 4 response +#define HOST_HIGH_CMD_ENTRIES 4 +#define HOST_NORM_CMD_ENTRIES 8 +#define ADAP_HIGH_CMD_ENTRIES 4 +#define ADAP_NORM_CMD_ENTRIES 512 +#define HOST_HIGH_RESP_ENTRIES 4 +#define HOST_NORM_RESP_ENTRIES 512 +#define ADAP_HIGH_RESP_ENTRIES 4 +#define ADAP_NORM_RESP_ENTRIES 8 + +#define TOTAL_QUEUE_ENTRIES \ + (HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \ + HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES) + + +/* + * Set the queues on a 16 byte alignment + */ + +#define QUEUE_ALIGNMENT 16 + +/* + * The queue headers define the Communication Region queues. These + * are physically contiguous and accessible by both the adapter and the + * host. Even though all queue headers are in the same contiguous block + * they will be represented as individual units in the data structures. + */ + +struct aac_entry { + __le32 size; /* Size in bytes of Fib which this QE points to */ + __le32 addr; /* Receiver address of the FIB */ +}; + +/* + * The adapter assumes the ProducerIndex and ConsumerIndex are grouped + * adjacently and in that order. + */ + +struct aac_qhdr { + __le64 header_addr;/* Address to hand the adapter to access + to this queue head */ + __le32 *producer; /* The producer index for this queue (host address) */ + __le32 *consumer; /* The consumer index for this queue (host address) */ +}; + +/* + * Define all the events which the adapter would like to notify + * the host of. + */ + +#define HostNormCmdQue 1 /* Change in host normal priority command queue */ +#define HostHighCmdQue 2 /* Change in host high priority command queue */ +#define HostNormRespQue 3 /* Change in host normal priority response queue */ +#define HostHighRespQue 4 /* Change in host high priority response queue */ +#define AdapNormRespNotFull 5 +#define AdapHighRespNotFull 6 +#define AdapNormCmdNotFull 7 +#define AdapHighCmdNotFull 8 +#define SynchCommandComplete 9 +#define AdapInternalError 0xfe /* The adapter detected an internal error shutting down */ + +/* + * Define all the events the host wishes to notify the + * adapter of. The first four values much match the Qid the + * corresponding queue. + */ + +#define AdapNormCmdQue 2 +#define AdapHighCmdQue 3 +#define AdapNormRespQue 6 +#define AdapHighRespQue 7 +#define HostShutdown 8 +#define HostPowerFail 9 +#define FatalCommError 10 +#define HostNormRespNotFull 11 +#define HostHighRespNotFull 12 +#define HostNormCmdNotFull 13 +#define HostHighCmdNotFull 14 +#define FastIo 15 +#define AdapPrintfDone 16 + +/* + * Define all the queues that the adapter and host use to communicate + * Number them to match the physical queue layout. + */ + +enum aac_queue_types { + HostNormCmdQueue = 0, /* Adapter to host normal priority command traffic */ + HostHighCmdQueue, /* Adapter to host high priority command traffic */ + AdapNormCmdQueue, /* Host to adapter normal priority command traffic */ + AdapHighCmdQueue, /* Host to adapter high priority command traffic */ + HostNormRespQueue, /* Adapter to host normal priority response traffic */ + HostHighRespQueue, /* Adapter to host high priority response traffic */ + AdapNormRespQueue, /* Host to adapter normal priority response traffic */ + AdapHighRespQueue /* Host to adapter high priority response traffic */ +}; + +/* + * Assign type values to the FSA communication data structures + */ + +#define FIB_MAGIC 0x0001 +#define FIB_MAGIC2 0x0004 +#define FIB_MAGIC2_64 0x0005 + +/* + * Define the priority levels the FSA communication routines support. + */ + +#define FsaNormal 1 + +/* transport FIB header (PMC) */ +struct aac_fib_xporthdr { + __le64 HostAddress; /* FIB host address w/o xport header */ + __le32 Size; /* FIB size excluding xport header */ + __le32 Handle; /* driver handle to reference the FIB */ + __le64 Reserved[2]; +}; + +#define ALIGN32 32 + +/* + * Define the FIB. The FIB is the where all the requested data and + * command information are put to the application on the FSA adapter. + */ + +struct aac_fibhdr { + __le32 XferState; /* Current transfer state for this CCB */ + __le16 Command; /* Routing information for the destination */ + u8 StructType; /* Type FIB */ + u8 Unused; /* Unused */ + __le16 Size; /* Size of this FIB in bytes */ + __le16 SenderSize; /* Size of the FIB in the sender + (for response sizing) */ + __le32 SenderFibAddress; /* Host defined data in the FIB */ + union { + __le32 ReceiverFibAddress;/* Logical address of this FIB for + the adapter (old) */ + __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */ + __le32 TimeStamp; /* otherwise timestamp for FW internal use */ + } u; + __le32 Handle; /* FIB handle used for MSGU commnunication */ + u32 Previous; /* FW internal use */ + u32 Next; /* FW internal use */ +}; + +struct hw_fib { + struct aac_fibhdr header; + u8 data[512-sizeof(struct aac_fibhdr)]; // Command specific data +}; + +/* + * FIB commands + */ + +#define TestCommandResponse 1 +#define TestAdapterCommand 2 +/* + * Lowlevel and comm commands + */ +#define LastTestCommand 100 +#define ReinitHostNormCommandQueue 101 +#define ReinitHostHighCommandQueue 102 +#define ReinitHostHighRespQueue 103 +#define ReinitHostNormRespQueue 104 +#define ReinitAdapNormCommandQueue 105 +#define ReinitAdapHighCommandQueue 107 +#define ReinitAdapHighRespQueue 108 +#define ReinitAdapNormRespQueue 109 +#define InterfaceShutdown 110 +#define DmaCommandFib 120 +#define StartProfile 121 +#define TermProfile 122 +#define SpeedTest 123 +#define TakeABreakPt 124 +#define RequestPerfData 125 +#define SetInterruptDefTimer 126 +#define SetInterruptDefCount 127 +#define GetInterruptDefStatus 128 +#define LastCommCommand 129 +/* + * Filesystem commands + */ +#define NuFileSystem 300 +#define UFS 301 +#define HostFileSystem 302 +#define LastFileSystemCommand 303 +/* + * Container Commands + */ +#define ContainerCommand 500 +#define ContainerCommand64 501 +#define ContainerRawIo 502 +#define ContainerRawIo2 503 +/* + * Scsi Port commands (scsi passthrough) + */ +#define ScsiPortCommand 600 +#define ScsiPortCommand64 601 +/* + * Misc house keeping and generic adapter initiated commands + */ +#define AifRequest 700 +#define CheckRevision 701 +#define FsaHostShutdown 702 +#define RequestAdapterInfo 703 +#define IsAdapterPaused 704 +#define SendHostTime 705 +#define RequestSupplementAdapterInfo 706 +#define LastMiscCommand 707 + +/* + * Commands that will target the failover level on the FSA adapter + */ + +enum fib_xfer_state { + HostOwned = (1<<0), + AdapterOwned = (1<<1), + FibInitialized = (1<<2), + FibEmpty = (1<<3), + AllocatedFromPool = (1<<4), + SentFromHost = (1<<5), + SentFromAdapter = (1<<6), + ResponseExpected = (1<<7), + NoResponseExpected = (1<<8), + AdapterProcessed = (1<<9), + HostProcessed = (1<<10), + HighPriority = (1<<11), + NormalPriority = (1<<12), + Async = (1<<13), + AsyncIo = (1<<13), // rpbfix: remove with new regime + PageFileIo = (1<<14), // rpbfix: remove with new regime + ShutdownRequest = (1<<15), + LazyWrite = (1<<16), // rpbfix: remove with new regime + AdapterMicroFib = (1<<17), + BIOSFibPath = (1<<18), + FastResponseCapable = (1<<19), + ApiFib = (1<<20), /* Its an API Fib */ + /* PMC NEW COMM: There is no more AIF data pending */ + NoMoreAifDataAvailable = (1<<21) +}; + +/* + * The following defines needs to be updated any time there is an + * incompatible change made to the aac_init structure. + */ + +#define ADAPTER_INIT_STRUCT_REVISION 3 +#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science +#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ +#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */ +#define ADAPTER_INIT_STRUCT_REVISION_8 8 // Thor + +union aac_init +{ + struct _r7 { + __le32 init_struct_revision; + __le32 no_of_msix_vectors; + __le32 fsrev; + __le32 comm_header_address; + __le32 fast_io_comm_area_address; + __le32 adapter_fibs_physical_address; + __le32 adapter_fibs_virtual_address; + __le32 adapter_fibs_size; + __le32 adapter_fib_align; + __le32 printfbuf; + __le32 printfbufsiz; + /* number of 4k pages of host phys. mem. */ + __le32 host_phys_mem_pages; + /* number of seconds since 1970. */ + __le32 host_elapsed_seconds; + /* ADAPTER_INIT_STRUCT_REVISION_4 begins here */ + __le32 init_flags; /* flags for supported features */ +#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 +#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 +#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 +#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040 +#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080 +#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100 +#define INITFLAGS_DRIVER_SUPPORTS_HBA_MODE 0x00000400 + __le32 max_io_commands; /* max outstanding commands */ + __le32 max_io_size; /* largest I/O command */ + __le32 max_fib_size; /* largest FIB to adapter */ + /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ + __le32 max_num_aif; /* max number of aif */ + /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ + /* Host RRQ (response queue) for SRC */ + __le32 host_rrq_addr_low; + __le32 host_rrq_addr_high; + } r7; + struct _r8 { + /* ADAPTER_INIT_STRUCT_REVISION_8 */ + __le32 init_struct_revision; + __le32 rr_queue_count; + __le32 host_elapsed_seconds; /* number of secs since 1970. */ + __le32 init_flags; + __le32 max_io_size; /* largest I/O command */ + __le32 max_num_aif; /* max number of aif */ + __le32 reserved1; + __le32 reserved2; + struct _rrq { + __le32 host_addr_low; + __le32 host_addr_high; + __le16 msix_id; + __le16 element_count; + __le16 comp_thresh; + __le16 unused; + } rrq[1]; /* up to 64 RRQ addresses */ + } r8; +}; + +enum aac_log_level { + LOG_AAC_INIT = 10, + LOG_AAC_INFORMATIONAL = 20, + LOG_AAC_WARNING = 30, + LOG_AAC_LOW_ERROR = 40, + LOG_AAC_MEDIUM_ERROR = 50, + LOG_AAC_HIGH_ERROR = 60, + LOG_AAC_PANIC = 70, + LOG_AAC_DEBUG = 80, + LOG_AAC_WINDBG_PRINT = 90 +}; + +#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT 0x030b +#define FSAFS_NTC_FIB_CONTEXT 0x030c + +struct aac_dev; +struct fib; +struct scsi_cmnd; + +struct adapter_ops +{ + /* Low level operations */ + void (*adapter_interrupt)(struct aac_dev *dev); + void (*adapter_notify)(struct aac_dev *dev, u32 event); + void (*adapter_disable_int)(struct aac_dev *dev); + void (*adapter_enable_int)(struct aac_dev *dev); + int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); + int (*adapter_check_health)(struct aac_dev *dev); + int (*adapter_restart)(struct aac_dev *dev, int bled, u8 reset_type); + void (*adapter_start)(struct aac_dev *dev); + /* Transport operations */ + int (*adapter_ioremap)(struct aac_dev * dev, u32 size); + irq_handler_t adapter_intr; + /* Packet operations */ + int (*adapter_deliver)(struct fib * fib); + int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba); + int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count); + int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua); + int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd); + /* Administrative operations */ + int (*adapter_comm)(struct aac_dev * dev, int comm); +}; + +/* + * Define which interrupt handler needs to be installed + */ + +struct aac_driver_ident +{ + int (*init)(struct aac_dev *dev); + char * name; + char * vname; + char * model; + u16 channels; + int quirks; +}; +/* + * Some adapter firmware needs communication memory + * below 2gig. This tells the init function to set the + * dma mask such that fib memory will be allocated where the + * adapter firmware can get to it. + */ +#define AAC_QUIRK_31BIT 0x0001 + +/* + * Some adapter firmware, when the raid card's cache is turned off, can not + * split up scatter gathers in order to deal with the limits of the + * underlying CHIM. This limit is 34 scatter gather elements. + */ +#define AAC_QUIRK_34SG 0x0002 + +/* + * This adapter is a slave (no Firmware) + */ +#define AAC_QUIRK_SLAVE 0x0004 + +/* + * This adapter is a master. + */ +#define AAC_QUIRK_MASTER 0x0008 + +/* + * Some adapter firmware perform poorly when it must split up scatter gathers + * in order to deal with the limits of the underlying CHIM. This limit in this + * class of adapters is 17 scatter gather elements. + */ +#define AAC_QUIRK_17SG 0x0010 + +/* + * Some adapter firmware does not support 64 bit scsi passthrough + * commands. + */ +#define AAC_QUIRK_SCSI_32 0x0020 + +/* + * SRC based adapters support the AifReqEvent functions + */ +#define AAC_QUIRK_SRC 0x0040 + +/* + * The adapter interface specs all queues to be located in the same + * physically contiguous block. The host structure that defines the + * commuication queues will assume they are each a separate physically + * contiguous memory region that will support them all being one big + * contiguous block. + * There is a command and response queue for each level and direction of + * commuication. These regions are accessed by both the host and adapter. + */ + +struct aac_queue { + u64 logical; /*address we give the adapter */ + struct aac_entry *base; /*system virtual address */ + struct aac_qhdr headers; /*producer,consumer q headers*/ + u32 entries; /*Number of queue entries */ + wait_queue_head_t qfull; /*Event to wait on if q full */ + wait_queue_head_t cmdready; /*Cmd ready from the adapter */ + /* This is only valid for adapter to host command queues. */ + spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ + spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ + struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ + /* only valid for command queues which receive entries from the adapter. */ + /* Number of entries on outstanding queue. */ + atomic_t numpending; + struct aac_dev * dev; /* Back pointer to adapter structure */ +}; + +/* + * Message queues. The order here is important, see also the + * queue type ordering + */ + +struct aac_queue_block +{ + struct aac_queue queue[8]; +}; + +/* + * SaP1 Message Unit Registers + */ + +struct sa_drawbridge_CSR { + /* Offset | Name */ + __le32 reserved[10]; /* 00h-27h | Reserved */ + u8 LUT_Offset; /* 28h | Lookup Table Offset */ + u8 reserved1[3]; /* 29h-2bh | Reserved */ + __le32 LUT_Data; /* 2ch | Looup Table Data */ + __le32 reserved2[26]; /* 30h-97h | Reserved */ + __le16 PRICLEARIRQ; /* 98h | Primary Clear Irq */ + __le16 SECCLEARIRQ; /* 9ah | Secondary Clear Irq */ + __le16 PRISETIRQ; /* 9ch | Primary Set Irq */ + __le16 SECSETIRQ; /* 9eh | Secondary Set Irq */ + __le16 PRICLEARIRQMASK;/* a0h | Primary Clear Irq Mask */ + __le16 SECCLEARIRQMASK;/* a2h | Secondary Clear Irq Mask */ + __le16 PRISETIRQMASK; /* a4h | Primary Set Irq Mask */ + __le16 SECSETIRQMASK; /* a6h | Secondary Set Irq Mask */ + __le32 MAILBOX0; /* a8h | Scratchpad 0 */ + __le32 MAILBOX1; /* ach | Scratchpad 1 */ + __le32 MAILBOX2; /* b0h | Scratchpad 2 */ + __le32 MAILBOX3; /* b4h | Scratchpad 3 */ + __le32 MAILBOX4; /* b8h | Scratchpad 4 */ + __le32 MAILBOX5; /* bch | Scratchpad 5 */ + __le32 MAILBOX6; /* c0h | Scratchpad 6 */ + __le32 MAILBOX7; /* c4h | Scratchpad 7 */ + __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */ + __le32 ROM_Control_Addr;/* cch | Rom Control and Address */ + __le32 reserved3[12]; /* d0h-ffh | reserved */ + __le32 LUT[64]; /* 100h-1ffh | Lookup Table Entries */ +}; + +#define Mailbox0 SaDbCSR.MAILBOX0 +#define Mailbox1 SaDbCSR.MAILBOX1 +#define Mailbox2 SaDbCSR.MAILBOX2 +#define Mailbox3 SaDbCSR.MAILBOX3 +#define Mailbox4 SaDbCSR.MAILBOX4 +#define Mailbox5 SaDbCSR.MAILBOX5 +#define Mailbox6 SaDbCSR.MAILBOX6 +#define Mailbox7 SaDbCSR.MAILBOX7 + +#define DoorbellReg_p SaDbCSR.PRISETIRQ +#define DoorbellReg_s SaDbCSR.SECSETIRQ +#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ + + +#define DOORBELL_0 0x0001 +#define DOORBELL_1 0x0002 +#define DOORBELL_2 0x0004 +#define DOORBELL_3 0x0008 +#define DOORBELL_4 0x0010 +#define DOORBELL_5 0x0020 +#define DOORBELL_6 0x0040 + + +#define PrintfReady DOORBELL_5 +#define PrintfDone DOORBELL_5 + +struct sa_registers { + struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */ +}; + + +#define SA_INIT_NUM_MSIXVECTORS 1 +#define SA_MINIPORT_REVISION SA_INIT_NUM_MSIXVECTORS + +#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) +#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) +#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR)) +#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR)) + +/* + * Rx Message Unit Registers + */ + +struct rx_mu_registers { + /* Local | PCI*| Name */ + __le32 ARSR; /* 1300h | 00h | APIC Register Select Register */ + __le32 reserved0; /* 1304h | 04h | Reserved */ + __le32 AWR; /* 1308h | 08h | APIC Window Register */ + __le32 reserved1; /* 130Ch | 0Ch | Reserved */ + __le32 IMRx[2]; /* 1310h | 10h | Inbound Message Registers */ + __le32 OMRx[2]; /* 1318h | 18h | Outbound Message Registers */ + __le32 IDR; /* 1320h | 20h | Inbound Doorbell Register */ + __le32 IISR; /* 1324h | 24h | Inbound Interrupt + Status Register */ + __le32 IIMR; /* 1328h | 28h | Inbound Interrupt + Mask Register */ + __le32 ODR; /* 132Ch | 2Ch | Outbound Doorbell Register */ + __le32 OISR; /* 1330h | 30h | Outbound Interrupt + Status Register */ + __le32 OIMR; /* 1334h | 34h | Outbound Interrupt + Mask Register */ + __le32 reserved2; /* 1338h | 38h | Reserved */ + __le32 reserved3; /* 133Ch | 3Ch | Reserved */ + __le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */ + __le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */ + /* * Must access through ATU Inbound + Translation Window */ +}; + +struct rx_inbound { + __le32 Mailbox[8]; +}; + +#define INBOUNDDOORBELL_0 0x00000001 +#define INBOUNDDOORBELL_1 0x00000002 +#define INBOUNDDOORBELL_2 0x00000004 +#define INBOUNDDOORBELL_3 0x00000008 +#define INBOUNDDOORBELL_4 0x00000010 +#define INBOUNDDOORBELL_5 0x00000020 +#define INBOUNDDOORBELL_6 0x00000040 + +#define OUTBOUNDDOORBELL_0 0x00000001 +#define OUTBOUNDDOORBELL_1 0x00000002 +#define OUTBOUNDDOORBELL_2 0x00000004 +#define OUTBOUNDDOORBELL_3 0x00000008 +#define OUTBOUNDDOORBELL_4 0x00000010 + +#define InboundDoorbellReg MUnit.IDR +#define OutboundDoorbellReg MUnit.ODR + +struct rx_registers { + struct rx_mu_registers MUnit; /* 1300h - 1347h */ + __le32 reserved1[2]; /* 1348h - 134ch */ + struct rx_inbound IndexRegs; +}; + +#define rx_readb(AEP, CSR) readb(&((AEP)->regs.rx->CSR)) +#define rx_readl(AEP, CSR) readl(&((AEP)->regs.rx->CSR)) +#define rx_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rx->CSR)) +#define rx_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rx->CSR)) + +/* + * Rkt Message Unit Registers (same as Rx, except a larger reserve region) + */ + +#define rkt_mu_registers rx_mu_registers +#define rkt_inbound rx_inbound + +struct rkt_registers { + struct rkt_mu_registers MUnit; /* 1300h - 1347h */ + __le32 reserved1[1006]; /* 1348h - 22fch */ + struct rkt_inbound IndexRegs; /* 2300h - */ +}; + +#define rkt_readb(AEP, CSR) readb(&((AEP)->regs.rkt->CSR)) +#define rkt_readl(AEP, CSR) readl(&((AEP)->regs.rkt->CSR)) +#define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) +#define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) + +/* + * PMC SRC message unit registers + */ + +#define src_inbound rx_inbound + +struct src_mu_registers { + /* PCI*| Name */ + __le32 reserved0[6]; /* 00h | Reserved */ + __le32 IOAR[2]; /* 18h | IOA->host interrupt register */ + __le32 IDR; /* 20h | Inbound Doorbell Register */ + __le32 IISR; /* 24h | Inbound Int. Status Register */ + __le32 reserved1[3]; /* 28h | Reserved */ + __le32 OIMR; /* 34h | Outbound Int. Mask Register */ + __le32 reserved2[25]; /* 38h | Reserved */ + __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ + __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ + __le32 reserved3[3]; /* a4h | Reserved */ + __le32 SCR0; /* b0h | Scratchpad 0 */ + __le32 reserved4[2]; /* b4h | Reserved */ + __le32 OMR; /* bch | Outbound Message Register */ + __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ + __le32 IQ_H; /* c4h | Inbound Queue (High address) */ + __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */ + __le32 reserved5; /* cch | Reserved */ + __le32 IQN_L; /* d0h | Inbound (native cmd) low */ + __le32 IQN_H; /* d4h | Inbound (native cmd) high */ +}; + +struct src_registers { + struct src_mu_registers MUnit; /* 00h - cbh */ + union { + struct { + __le32 reserved1[130786]; /* d8h - 7fc5fh */ + struct src_inbound IndexRegs; /* 7fc60h */ + } tupelo; + struct { + __le32 reserved1[970]; /* d8h - fffh */ + struct src_inbound IndexRegs; /* 1000h */ + } denali; + } u; +}; + +#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR)) +#define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR)) +#define src_writeb(AEP, CSR, value) writeb(value, \ + &((AEP)->regs.src.bar0->CSR)) +#define src_writel(AEP, CSR, value) writel(value, \ + &((AEP)->regs.src.bar0->CSR)) +#if defined(writeq) +#define src_writeq(AEP, CSR, value) writeq(value, \ + &((AEP)->regs.src.bar0->CSR)) +#endif + +#define SRC_ODR_SHIFT 12 +#define SRC_IDR_SHIFT 9 +#define SRC_MSI_READ_MASK 0x1000 + +typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); + +struct aac_fib_context { + s16 type; // used for verification of structure + s16 size; + u32 unique; // unique value representing this context + ulong jiffies; // used for cleanup - dmb changed to ulong + struct list_head next; // used to link context's into a linked list + struct completion completion; // this is used to wait for the next fib to arrive. + int wait; // Set to true when thread is in WaitForSingleObject + unsigned long count; // total number of FIBs on FibList + struct list_head fib_list; // this holds fibs and their attachd hw_fibs +}; + +struct sense_data { + u8 error_code; /* 70h (current errors), 71h(deferred errors) */ + u8 valid:1; /* A valid bit of one indicates that the information */ + /* field contains valid information as defined in the + * SCSI-2 Standard. + */ + u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */ + u8 sense_key:4; /* Sense Key */ + u8 reserved:1; + u8 ILI:1; /* Incorrect Length Indicator */ + u8 EOM:1; /* End Of Medium - reserved for random access devices */ + u8 filemark:1; /* Filemark - reserved for random access devices */ + + u8 information[4]; /* for direct-access devices, contains the unsigned + * logical block address or residue associated with + * the sense key + */ + u8 add_sense_len; /* number of additional sense bytes to follow this field */ + u8 cmnd_info[4]; /* not used */ + u8 ASC; /* Additional Sense Code */ + u8 ASCQ; /* Additional Sense Code Qualifier */ + u8 FRUC; /* Field Replaceable Unit Code - not used */ + u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data + * was in error + */ + u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that + * the bit_ptr field has valid value + */ + u8 reserved2:2; + u8 CD:1; /* command data bit: 1- illegal parameter in CDB. + * 0- illegal parameter in data. + */ + u8 SKSV:1; + u8 field_ptr[2]; /* byte of the CDB or parameter data in error */ +}; + +struct fsa_dev_info { + u64 last; + u64 size; + u32 type; + u32 config_waiting_on; + unsigned long config_waiting_stamp; + u16 queue_depth; + u8 config_needed; + u8 valid; + u8 ro; + u8 locked; + u8 deleted; + char devname[8]; + struct sense_data sense_data; + u32 block_size; + u8 identifier[16]; +}; + +struct fib { + void *next; /* this is used by the allocator */ + s16 type; + s16 size; + /* + * The Adapter that this I/O is destined for. + */ + struct aac_dev *dev; + /* + * This is the event the sendfib routine will wait on if the + * caller did not pass one and this is synch io. + */ + struct completion event_wait; + spinlock_t event_lock; + + u32 done; /* gets set to 1 when fib is complete */ + fib_callback callback; + void *callback_data; + u32 flags; // u32 dmb was ulong + /* + * And for the internal issue/reply queues (we may be able + * to merge these two) + */ + struct list_head fiblink; + void *data; + u32 vector_no; + struct hw_fib *hw_fib_va; /* also used for native */ + dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ + dma_addr_t hw_sgl_pa; /* extra sgl for native */ + dma_addr_t hw_error_pa; /* error buffer for native */ + u32 hbacmd_size; /* cmd size for native */ +}; + +#define AAC_INIT 0 +#define AAC_RESCAN 1 + +#define AAC_DEVTYPE_RAID_MEMBER 1 +#define AAC_DEVTYPE_ARC_RAW 2 +#define AAC_DEVTYPE_NATIVE_RAW 3 + +#define AAC_RESCAN_DELAY (10 * HZ) + +struct aac_hba_map_info { + __le32 rmw_nexus; /* nexus for native HBA devices */ + u8 devtype; /* device type */ + s8 reset_state; /* 0 - no reset, 1..x - */ + /* after xth TM LUN reset */ + u16 qd_limit; + u32 scan_counter; + struct aac_ciss_identify_pd *safw_identify_resp; +}; + +/* + * Adapter Information Block + * + * This is returned by the RequestAdapterInfo block + */ + +struct aac_adapter_info +{ + __le32 platform; + __le32 cpu; + __le32 subcpu; + __le32 clock; + __le32 execmem; + __le32 buffermem; + __le32 totalmem; + __le32 kernelrev; + __le32 kernelbuild; + __le32 monitorrev; + __le32 monitorbuild; + __le32 hwrev; + __le32 hwbuild; + __le32 biosrev; + __le32 biosbuild; + __le32 cluster; + __le32 clusterchannelmask; + __le32 serial[2]; + __le32 battery; + __le32 options; + __le32 OEM; +}; + +struct aac_supplement_adapter_info +{ + u8 adapter_type_text[17+1]; + u8 pad[2]; + __le32 flash_memory_byte_size; + __le32 flash_image_id; + __le32 max_number_ports; + __le32 version; + __le32 feature_bits; + u8 slot_number; + u8 reserved_pad0[3]; + u8 build_date[12]; + __le32 current_number_ports; + struct { + u8 assembly_pn[8]; + u8 fru_pn[8]; + u8 battery_fru_pn[8]; + u8 ec_version_string[8]; + u8 tsid[12]; + } vpd_info; + __le32 flash_firmware_revision; + __le32 flash_firmware_build; + __le32 raid_type_morph_options; + __le32 flash_firmware_boot_revision; + __le32 flash_firmware_boot_build; + u8 mfg_pcba_serial_no[12]; + u8 mfg_wwn_name[8]; + __le32 supported_options2; + __le32 struct_expansion; + /* StructExpansion == 1 */ + __le32 feature_bits3; + __le32 supported_performance_modes; + u8 host_bus_type; /* uses HOST_BUS_TYPE_xxx defines */ + u8 host_bus_width; /* actual width in bits or links */ + u16 host_bus_speed; /* actual bus speed/link rate in MHz */ + u8 max_rrc_drives; /* max. number of ITP-RRC drives/pool */ + u8 max_disk_xtasks; /* max. possible num of DiskX Tasks */ + + u8 cpld_ver_loaded; + u8 cpld_ver_in_flash; + + __le64 max_rrc_capacity; + __le32 compiled_max_hist_log_level; + u8 custom_board_name[12]; + u16 supported_cntlr_mode; /* identify supported controller mode */ + u16 reserved_for_future16; + __le32 supported_options3; /* reserved for future options */ + + __le16 virt_device_bus; /* virt. SCSI device for Thor */ + __le16 virt_device_target; + __le16 virt_device_lun; + __le16 unused; + __le32 reserved_for_future_growth[68]; + +}; +#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010) +#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000) +/* SupportedOptions2 */ +#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) +#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) +#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) +#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) +/* 4KB sector size */ +#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000) +/* 240 simple volume support */ +#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000) +/* + * Supports FIB dump sync command send prior to IOP_RESET + */ +#define AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP cpu_to_le32(0x00004000) +#define AAC_SIS_VERSION_V3 3 +#define AAC_SIS_SLOT_UNKNOWN 0xFF + +#define GetBusInfo 0x00000009 +struct aac_bus_info { + __le32 Command; /* VM_Ioctl */ + __le32 ObjType; /* FT_DRIVE */ + __le32 MethodId; /* 1 = SCSI Layer */ + __le32 ObjectId; /* Handle */ + __le32 CtlCmd; /* GetBusInfo */ +}; + +struct aac_bus_info_response { + __le32 Status; /* ST_OK */ + __le32 ObjType; + __le32 MethodId; /* unused */ + __le32 ObjectId; /* unused */ + __le32 CtlCmd; /* unused */ + __le32 ProbeComplete; + __le32 BusCount; + __le32 TargetsPerBus; + u8 InitiatorBusId[10]; + u8 BusValid[10]; +}; + +/* + * Battery platforms + */ +#define AAC_BAT_REQ_PRESENT (1) +#define AAC_BAT_REQ_NOTPRESENT (2) +#define AAC_BAT_OPT_PRESENT (3) +#define AAC_BAT_OPT_NOTPRESENT (4) +#define AAC_BAT_NOT_SUPPORTED (5) +/* + * cpu types + */ +#define AAC_CPU_SIMULATOR (1) +#define AAC_CPU_I960 (2) +#define AAC_CPU_STRONGARM (3) + +/* + * Supported Options + */ +#define AAC_OPT_SNAPSHOT cpu_to_le32(1) +#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1) +#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2) +#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3) +#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4) +#define AAC_OPT_RAID50 cpu_to_le32(1<<5) +#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6) +#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7) +#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8) +#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9) +#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10) +#define AAC_OPT_ALARM cpu_to_le32(1<<11) +#define AAC_OPT_NONDASD cpu_to_le32(1<<12) +#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13) +#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14) +#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) +#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) +#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) +#define AAC_OPT_EXTENDED cpu_to_le32(1<<23) +#define AAC_OPT_NATIVE_HBA cpu_to_le32(1<<25) +#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) +#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29) +#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30) +#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31) + +#define AAC_COMM_PRODUCER 0 +#define AAC_COMM_MESSAGE 1 +#define AAC_COMM_MESSAGE_TYPE1 3 +#define AAC_COMM_MESSAGE_TYPE2 4 +#define AAC_COMM_MESSAGE_TYPE3 5 + +#define AAC_EXTOPT_SA_FIRMWARE cpu_to_le32(1<<1) +#define AAC_EXTOPT_SOFT_RESET cpu_to_le32(1<<16) + +/* MSIX context */ +struct aac_msix_ctx { + int vector_no; + struct aac_dev *dev; +}; + +struct aac_dev +{ + struct list_head entry; + const char *name; + int id; + + /* + * negotiated FIB settings + */ + unsigned int max_fib_size; + unsigned int sg_tablesize; + unsigned int max_num_aif; + + unsigned int max_cmd_size; /* max_fib_size or MAX_NATIVE */ + + /* + * Map for 128 fib objects (64k) + */ + dma_addr_t hw_fib_pa; /* also used for native cmd */ + struct hw_fib *hw_fib_va; /* also used for native cmd */ + struct hw_fib *aif_base_va; + /* + * Fib Headers + */ + struct fib *fibs; + + struct fib *free_fib; + spinlock_t fib_lock; + + struct mutex ioctl_mutex; + struct mutex scan_mutex; + struct aac_queue_block *queues; + /* + * The user API will use an IOCTL to register itself to receive + * FIBs from the adapter. The following list is used to keep + * track of all the threads that have requested these FIBs. The + * mutex is used to synchronize access to all data associated + * with the adapter fibs. + */ + struct list_head fib_list; + + struct adapter_ops a_ops; + unsigned long fsrev; /* Main driver's revision number */ + + resource_size_t base_start; /* main IO base */ + resource_size_t dbg_base; /* address of UART + * debug buffer */ + + resource_size_t base_size, dbg_size; /* Size of + * mapped in region */ + /* + * Holds initialization info + * to communicate with adapter + */ + union aac_init *init; + dma_addr_t init_pa; /* Holds physical address of the init struct */ + /* response queue (if AAC_COMM_MESSAGE_TYPE1) */ + __le32 *host_rrq; + dma_addr_t host_rrq_pa; /* phys. address */ + /* index into rrq buffer */ + u32 host_rrq_idx[AAC_MAX_MSIX]; + atomic_t rrq_outstanding[AAC_MAX_MSIX]; + u32 fibs_pushed_no; + struct pci_dev *pdev; /* Our PCI interface */ + /* pointer to buffer used for printf's from the adapter */ + void *printfbuf; + void *comm_addr; /* Base address of Comm area */ + dma_addr_t comm_phys; /* Physical Address of Comm area */ + size_t comm_size; + + struct Scsi_Host *scsi_host_ptr; + int maximum_num_containers; + int maximum_num_physicals; + int maximum_num_channels; + struct fsa_dev_info *fsa_dev; + struct task_struct *thread; + struct delayed_work safw_rescan_work; + struct delayed_work src_reinit_aif_worker; + int cardtype; + /* + *This lock will protect the two 32-bit + *writes to the Inbound Queue + */ + spinlock_t iq_lock; + + /* + * The following is the device specific extension. + */ +#ifndef AAC_MIN_FOOTPRINT_SIZE +# define AAC_MIN_FOOTPRINT_SIZE 8192 +# define AAC_MIN_SRC_BAR0_SIZE 0x400000 +# define AAC_MIN_SRC_BAR1_SIZE 0x800 +# define AAC_MIN_SRCV_BAR0_SIZE 0x100000 +# define AAC_MIN_SRCV_BAR1_SIZE 0x400 +#endif + union + { + struct sa_registers __iomem *sa; + struct rx_registers __iomem *rx; + struct rkt_registers __iomem *rkt; + struct { + struct src_registers __iomem *bar0; + char __iomem *bar1; + } src; + } regs; + volatile void __iomem *base, *dbg_base_mapped; + volatile struct rx_inbound __iomem *IndexRegs; + u32 OIMR; /* Mask Register Cache */ + /* + * AIF thread states + */ + u32 aif_thread; + struct aac_adapter_info adapter_info; + struct aac_supplement_adapter_info supplement_adapter_info; + /* These are in adapter info but they are in the io flow so + * lets break them out so we don't have to do an AND to check them + */ + u8 nondasd_support; + u8 jbod; + u8 cache_protected; + u8 dac_support; + u8 needs_dac; + u8 raid_scsi_mode; + u8 comm_interface; + u8 raw_io_interface; + u8 raw_io_64; + u8 printf_enabled; + u8 in_reset; + u8 in_soft_reset; + u8 msi; + u8 sa_firmware; + int management_fib_count; + spinlock_t manage_lock; + spinlock_t sync_lock; + int sync_mode; + struct fib *sync_fib; + struct list_head sync_fib_list; + u32 doorbell_mask; + u32 max_msix; /* max. MSI-X vectors */ + u32 vector_cap; /* MSI-X vector capab.*/ + int msi_enabled; /* MSI/MSI-X enabled */ + atomic_t msix_counter; + u32 scan_counter; + struct msix_entry msixentry[AAC_MAX_MSIX]; + struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ + struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS]; + struct aac_ciss_phys_luns_resp *safw_phys_luns; + u8 adapter_shutdown; + u32 handle_pci_error; + bool init_reset; + u8 soft_reset_support; +}; + +#define aac_adapter_interrupt(dev) \ + (dev)->a_ops.adapter_interrupt(dev) + +#define aac_adapter_notify(dev, event) \ + (dev)->a_ops.adapter_notify(dev, event) + +#define aac_adapter_disable_int(dev) \ + (dev)->a_ops.adapter_disable_int(dev) + +#define aac_adapter_enable_int(dev) \ + (dev)->a_ops.adapter_enable_int(dev) + +#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ + (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) + +#define aac_adapter_restart(dev, bled, reset_type) \ + ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) + +#define aac_adapter_start(dev) \ + ((dev)->a_ops.adapter_start(dev)) + +#define aac_adapter_ioremap(dev, size) \ + (dev)->a_ops.adapter_ioremap(dev, size) + +#define aac_adapter_deliver(fib) \ + ((fib)->dev)->a_ops.adapter_deliver(fib) + +#define aac_adapter_bounds(dev,cmd,lba) \ + dev->a_ops.adapter_bounds(dev,cmd,lba) + +#define aac_adapter_read(fib,cmd,lba,count) \ + ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count) + +#define aac_adapter_write(fib,cmd,lba,count,fua) \ + ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua) + +#define aac_adapter_scsi(fib,cmd) \ + ((fib)->dev)->a_ops.adapter_scsi(fib,cmd) + +#define aac_adapter_comm(dev,comm) \ + (dev)->a_ops.adapter_comm(dev, comm) + +#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) +#define FIB_CONTEXT_FLAG (0x00000002) +#define FIB_CONTEXT_FLAG_WAIT (0x00000004) +#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008) +#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010) +#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020) +#define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040) +#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080) + +/* + * Define the command values + */ + +#define Null 0 +#define GetAttributes 1 +#define SetAttributes 2 +#define Lookup 3 +#define ReadLink 4 +#define Read 5 +#define Write 6 +#define Create 7 +#define MakeDirectory 8 +#define SymbolicLink 9 +#define MakeNode 10 +#define Removex 11 +#define RemoveDirectoryx 12 +#define Rename 13 +#define Link 14 +#define ReadDirectory 15 +#define ReadDirectoryPlus 16 +#define FileSystemStatus 17 +#define FileSystemInfo 18 +#define PathConfigure 19 +#define Commit 20 +#define Mount 21 +#define UnMount 22 +#define Newfs 23 +#define FsCheck 24 +#define FsSync 25 +#define SimReadWrite 26 +#define SetFileSystemStatus 27 +#define BlockRead 28 +#define BlockWrite 29 +#define NvramIoctl 30 +#define FsSyncWait 31 +#define ClearArchiveBit 32 +#define SetAcl 33 +#define GetAcl 34 +#define AssignAcl 35 +#define FaultInsertion 36 /* Fault Insertion Command */ +#define CrazyCache 37 /* Crazycache */ + +#define MAX_FSACOMMAND_NUM 38 + + +/* + * Define the status returns. These are very unixlike although + * most are not in fact used + */ + +#define ST_OK 0 +#define ST_PERM 1 +#define ST_NOENT 2 +#define ST_IO 5 +#define ST_NXIO 6 +#define ST_E2BIG 7 +#define ST_MEDERR 8 +#define ST_ACCES 13 +#define ST_EXIST 17 +#define ST_XDEV 18 +#define ST_NODEV 19 +#define ST_NOTDIR 20 +#define ST_ISDIR 21 +#define ST_INVAL 22 +#define ST_FBIG 27 +#define ST_NOSPC 28 +#define ST_ROFS 30 +#define ST_MLINK 31 +#define ST_WOULDBLOCK 35 +#define ST_NAMETOOLONG 63 +#define ST_NOTEMPTY 66 +#define ST_DQUOT 69 +#define ST_STALE 70 +#define ST_REMOTE 71 +#define ST_NOT_READY 72 +#define ST_BADHANDLE 10001 +#define ST_NOT_SYNC 10002 +#define ST_BAD_COOKIE 10003 +#define ST_NOTSUPP 10004 +#define ST_TOOSMALL 10005 +#define ST_SERVERFAULT 10006 +#define ST_BADTYPE 10007 +#define ST_JUKEBOX 10008 +#define ST_NOTMOUNTED 10009 +#define ST_MAINTMODE 10010 +#define ST_STALEACL 10011 + +/* + * On writes how does the client want the data written. + */ + +#define CACHE_CSTABLE 1 +#define CACHE_UNSTABLE 2 + +/* + * Lets the client know at which level the data was committed on + * a write request + */ + +#define CMFILE_SYNCH_NVRAM 1 +#define CMDATA_SYNCH_NVRAM 2 +#define CMFILE_SYNCH 3 +#define CMDATA_SYNCH 4 +#define CMUNSTABLE 5 + +#define RIO_TYPE_WRITE 0x0000 +#define RIO_TYPE_READ 0x0001 +#define RIO_SUREWRITE 0x0008 + +#define RIO2_IO_TYPE 0x0003 +#define RIO2_IO_TYPE_WRITE 0x0000 +#define RIO2_IO_TYPE_READ 0x0001 +#define RIO2_IO_TYPE_VERIFY 0x0002 +#define RIO2_IO_ERROR 0x0004 +#define RIO2_IO_SUREWRITE 0x0008 +#define RIO2_SGL_CONFORMANT 0x0010 +#define RIO2_SG_FORMAT 0xF000 +#define RIO2_SG_FORMAT_ARC 0x0000 +#define RIO2_SG_FORMAT_SRL 0x1000 +#define RIO2_SG_FORMAT_IEEE1212 0x2000 + +struct aac_read +{ + __le32 command; + __le32 cid; + __le32 block; + __le32 count; + struct sgmap sg; // Must be last in struct because it is variable +}; + +struct aac_read64 +{ + __le32 command; + __le16 cid; + __le16 sector_count; + __le32 block; + __le16 pad; + __le16 flags; + struct sgmap64 sg; // Must be last in struct because it is variable +}; + +struct aac_read_reply +{ + __le32 status; + __le32 count; +}; + +struct aac_write +{ + __le32 command; + __le32 cid; + __le32 block; + __le32 count; + __le32 stable; // Not used + struct sgmap sg; // Must be last in struct because it is variable +}; + +struct aac_write64 +{ + __le32 command; + __le16 cid; + __le16 sector_count; + __le32 block; + __le16 pad; + __le16 flags; + struct sgmap64 sg; // Must be last in struct because it is variable +}; +struct aac_write_reply +{ + __le32 status; + __le32 count; + __le32 committed; +}; + +struct aac_raw_io +{ + __le32 block[2]; + __le32 count; + __le16 cid; + __le16 flags; /* 00 W, 01 R */ + __le16 bpTotal; /* reserved for F/W use */ + __le16 bpComplete; /* reserved for F/W use */ + struct sgmapraw sg; +}; + +struct aac_raw_io2 { + __le32 blockLow; + __le32 blockHigh; + __le32 byteCount; + __le16 cid; + __le16 flags; /* RIO2 flags */ + __le32 sgeFirstSize; /* size of first sge el. */ + __le32 sgeNominalSize; /* size of 2nd sge el. (if conformant) */ + u8 sgeCnt; /* only 8 bits required */ + u8 bpTotal; /* reserved for F/W use */ + u8 bpComplete; /* reserved for F/W use */ + u8 sgeFirstIndex; /* reserved for F/W use */ + u8 unused[4]; + struct sge_ieee1212 sge[]; +}; + +#define CT_FLUSH_CACHE 129 +struct aac_synchronize { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_FLUSH_CACHE */ + __le32 cid; + __le32 parm1; + __le32 parm2; + __le32 parm3; + __le32 parm4; + __le32 count; /* sizeof(((struct aac_synchronize_reply *)NULL)->data) */ +}; + +struct aac_synchronize_reply { + __le32 dummy0; + __le32 dummy1; + __le32 status; /* CT_OK */ + __le32 parm1; + __le32 parm2; + __le32 parm3; + __le32 parm4; + __le32 parm5; + u8 data[16]; +}; + +#define CT_POWER_MANAGEMENT 245 +#define CT_PM_START_UNIT 2 +#define CT_PM_STOP_UNIT 3 +#define CT_PM_UNIT_IMMEDIATE 1 +struct aac_power_management { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_POWER_MANAGEMENT */ + __le32 sub; /* CT_PM_* */ + __le32 cid; + __le32 parm; /* CT_PM_sub_* */ +}; + +#define CT_PAUSE_IO 65 +#define CT_RELEASE_IO 66 +struct aac_pause { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_PAUSE_IO */ + __le32 timeout; /* 10ms ticks */ + __le32 min; + __le32 noRescan; + __le32 parm3; + __le32 parm4; + __le32 count; /* sizeof(((struct aac_pause_reply *)NULL)->data) */ +}; + +struct aac_srb +{ + __le32 function; + __le32 channel; + __le32 id; + __le32 lun; + __le32 timeout; + __le32 flags; + __le32 count; // Data xfer size + __le32 retry_limit; + __le32 cdb_size; + u8 cdb[16]; + struct sgmap sg; +}; + +/* + * This and associated data structs are used by the + * ioctl caller and are in cpu order. + */ +struct user_aac_srb +{ + u32 function; + u32 channel; + u32 id; + u32 lun; + u32 timeout; + u32 flags; + u32 count; // Data xfer size + u32 retry_limit; + u32 cdb_size; + u8 cdb[16]; + struct user_sgmap sg; +}; + +#define AAC_SENSE_BUFFERSIZE 30 + +struct aac_srb_reply +{ + __le32 status; + __le32 srb_status; + __le32 scsi_status; + __le32 data_xfer_length; + __le32 sense_data_size; + u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE +}; + +struct aac_srb_unit { + struct aac_srb srb; + struct aac_srb_reply srb_reply; +}; + +/* + * SRB Flags + */ +#define SRB_NoDataXfer 0x0000 +#define SRB_DisableDisconnect 0x0004 +#define SRB_DisableSynchTransfer 0x0008 +#define SRB_BypassFrozenQueue 0x0010 +#define SRB_DisableAutosense 0x0020 +#define SRB_DataIn 0x0040 +#define SRB_DataOut 0x0080 + +/* + * SRB Functions - set in aac_srb->function + */ +#define SRBF_ExecuteScsi 0x0000 +#define SRBF_ClaimDevice 0x0001 +#define SRBF_IO_Control 0x0002 +#define SRBF_ReceiveEvent 0x0003 +#define SRBF_ReleaseQueue 0x0004 +#define SRBF_AttachDevice 0x0005 +#define SRBF_ReleaseDevice 0x0006 +#define SRBF_Shutdown 0x0007 +#define SRBF_Flush 0x0008 +#define SRBF_AbortCommand 0x0010 +#define SRBF_ReleaseRecovery 0x0011 +#define SRBF_ResetBus 0x0012 +#define SRBF_ResetDevice 0x0013 +#define SRBF_TerminateIO 0x0014 +#define SRBF_FlushQueue 0x0015 +#define SRBF_RemoveDevice 0x0016 +#define SRBF_DomainValidation 0x0017 + +/* + * SRB SCSI Status - set in aac_srb->scsi_status + */ +#define SRB_STATUS_PENDING 0x00 +#define SRB_STATUS_SUCCESS 0x01 +#define SRB_STATUS_ABORTED 0x02 +#define SRB_STATUS_ABORT_FAILED 0x03 +#define SRB_STATUS_ERROR 0x04 +#define SRB_STATUS_BUSY 0x05 +#define SRB_STATUS_INVALID_REQUEST 0x06 +#define SRB_STATUS_INVALID_PATH_ID 0x07 +#define SRB_STATUS_NO_DEVICE 0x08 +#define SRB_STATUS_TIMEOUT 0x09 +#define SRB_STATUS_SELECTION_TIMEOUT 0x0A +#define SRB_STATUS_COMMAND_TIMEOUT 0x0B +#define SRB_STATUS_MESSAGE_REJECTED 0x0D +#define SRB_STATUS_BUS_RESET 0x0E +#define SRB_STATUS_PARITY_ERROR 0x0F +#define SRB_STATUS_REQUEST_SENSE_FAILED 0x10 +#define SRB_STATUS_NO_HBA 0x11 +#define SRB_STATUS_DATA_OVERRUN 0x12 +#define SRB_STATUS_UNEXPECTED_BUS_FREE 0x13 +#define SRB_STATUS_PHASE_SEQUENCE_FAILURE 0x14 +#define SRB_STATUS_BAD_SRB_BLOCK_LENGTH 0x15 +#define SRB_STATUS_REQUEST_FLUSHED 0x16 +#define SRB_STATUS_DELAYED_RETRY 0x17 +#define SRB_STATUS_INVALID_LUN 0x20 +#define SRB_STATUS_INVALID_TARGET_ID 0x21 +#define SRB_STATUS_BAD_FUNCTION 0x22 +#define SRB_STATUS_ERROR_RECOVERY 0x23 +#define SRB_STATUS_NOT_STARTED 0x24 +#define SRB_STATUS_NOT_IN_USE 0x30 +#define SRB_STATUS_FORCE_ABORT 0x31 +#define SRB_STATUS_DOMAIN_VALIDATION_FAIL 0x32 + +/* + * Object-Server / Volume-Manager Dispatch Classes + */ + +#define VM_Null 0 +#define VM_NameServe 1 +#define VM_ContainerConfig 2 +#define VM_Ioctl 3 +#define VM_FilesystemIoctl 4 +#define VM_CloseAll 5 +#define VM_CtBlockRead 6 +#define VM_CtBlockWrite 7 +#define VM_SliceBlockRead 8 /* raw access to configured "storage objects" */ +#define VM_SliceBlockWrite 9 +#define VM_DriveBlockRead 10 /* raw access to physical devices */ +#define VM_DriveBlockWrite 11 +#define VM_EnclosureMgt 12 /* enclosure management */ +#define VM_Unused 13 /* used to be diskset management */ +#define VM_CtBlockVerify 14 +#define VM_CtPerf 15 /* performance test */ +#define VM_CtBlockRead64 16 +#define VM_CtBlockWrite64 17 +#define VM_CtBlockVerify64 18 +#define VM_CtHostRead64 19 +#define VM_CtHostWrite64 20 +#define VM_DrvErrTblLog 21 +#define VM_NameServe64 22 +#define VM_NameServeAllBlk 30 + +#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */ + +/* + * Descriptive information (eg, vital stats) + * that a content manager might report. The + * FileArray filesystem component is one example + * of a content manager. Raw mode might be + * another. + */ + +struct aac_fsinfo { + __le32 fsTotalSize; /* Consumed by fs, incl. metadata */ + __le32 fsBlockSize; + __le32 fsFragSize; + __le32 fsMaxExtendSize; + __le32 fsSpaceUnits; + __le32 fsMaxNumFiles; + __le32 fsNumFreeFiles; + __le32 fsInodeDensity; +}; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */ + +struct aac_blockdevinfo { + __le32 block_size; + __le32 logical_phys_map; + u8 identifier[16]; +}; + +union aac_contentinfo { + struct aac_fsinfo filesys; + struct aac_blockdevinfo bdevinfo; +}; + +/* + * Query for Container Configuration Status + */ + +#define CT_GET_CONFIG_STATUS 147 +struct aac_get_config_status { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_GET_CONFIG_STATUS */ + __le32 parm1; + __le32 parm2; + __le32 parm3; + __le32 parm4; + __le32 parm5; + __le32 count; /* sizeof(((struct aac_get_config_status_resp *)NULL)->data) */ +}; + +#define CFACT_CONTINUE 0 +#define CFACT_PAUSE 1 +#define CFACT_ABORT 2 +struct aac_get_config_status_resp { + __le32 response; /* ST_OK */ + __le32 dummy0; + __le32 status; /* CT_OK */ + __le32 parm1; + __le32 parm2; + __le32 parm3; + __le32 parm4; + __le32 parm5; + struct { + __le32 action; /* CFACT_CONTINUE, CFACT_PAUSE or CFACT_ABORT */ + __le16 flags; + __le16 count; + } data; +}; + +/* + * Accept the configuration as-is + */ + +#define CT_COMMIT_CONFIG 152 + +struct aac_commit_config { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_COMMIT_CONFIG */ +}; + +/* + * Query for Container Configuration Status + */ + +#define CT_GET_CONTAINER_COUNT 4 +struct aac_get_container_count { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_GET_CONTAINER_COUNT */ +}; + +struct aac_get_container_count_resp { + __le32 response; /* ST_OK */ + __le32 dummy0; + __le32 MaxContainers; + __le32 ContainerSwitchEntries; + __le32 MaxPartitions; + __le32 MaxSimpleVolumes; +}; + + +/* + * Query for "mountable" objects, ie, objects that are typically + * associated with a drive letter on the client (host) side. + */ + +struct aac_mntent { + __le32 oid; + u8 name[16]; /* if applicable */ + struct creation_info create_info; /* if applicable */ + __le32 capacity; + __le32 vol; /* substrate structure */ + __le32 obj; /* FT_FILESYS, etc. */ + __le32 state; /* unready for mounting, + readonly, etc. */ + union aac_contentinfo fileinfo; /* Info specific to content + manager (eg, filesystem) */ + __le32 altoid; /* != oid <==> snapshot or + broken mirror exists */ + __le32 capacityhigh; +}; + +#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */ +#define FSCS_READONLY 0x0002 /* possible result of broken mirror */ +#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */ +#define FSCS_NOT_READY 0x0008 /* Array spinning up to fulfil request */ + +struct aac_query_mount { + __le32 command; + __le32 type; + __le32 count; +}; + +struct aac_mount { + __le32 status; + __le32 type; /* should be same as that requested */ + __le32 count; + struct aac_mntent mnt[1]; +}; + +#define CT_READ_NAME 130 +struct aac_get_name { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_READ_NAME */ + __le32 cid; + __le32 parm1; + __le32 parm2; + __le32 parm3; + __le32 parm4; + __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */ +}; + +struct aac_get_name_resp { + __le32 dummy0; + __le32 dummy1; + __le32 status; /* CT_OK */ + __le32 parm1; + __le32 parm2; + __le32 parm3; + __le32 parm4; + __le32 parm5; + u8 data[17]; +}; + +#define CT_CID_TO_32BITS_UID 165 +struct aac_get_serial { + __le32 command; /* VM_ContainerConfig */ + __le32 type; /* CT_CID_TO_32BITS_UID */ + __le32 cid; +}; + +struct aac_get_serial_resp { + __le32 dummy0; + __le32 dummy1; + __le32 status; /* CT_OK */ + __le32 uid; +}; + +/* + * The following command is sent to shut down each container. + */ + +struct aac_close { + __le32 command; + __le32 cid; +}; + +struct aac_query_disk +{ + s32 cnum; + s32 bus; + s32 id; + s32 lun; + u32 valid; + u32 locked; + u32 deleted; + s32 instance; + s8 name[10]; + u32 unmapped; +}; + +struct aac_delete_disk { + u32 disknum; + u32 cnum; +}; + +struct fib_ioctl +{ + u32 fibctx; + s32 wait; + char __user *fib; +}; + +struct revision +{ + u32 compat; + __le32 version; + __le32 build; +}; + + +/* + * Ugly - non Linux like ioctl coding for back compat. + */ + +#define CTL_CODE(function, method) ( \ + (4<< 16) | ((function) << 2) | (method) \ +) + +/* + * Define the method codes for how buffers are passed for I/O and FS + * controls + */ + +#define METHOD_BUFFERED 0 +#define METHOD_NEITHER 3 + +/* + * Filesystem ioctls + */ + +#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED) +#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED) +#define FSACTL_DELETE_DISK 0x163 +#define FSACTL_QUERY_DISK 0x173 +#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED) +#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED) +#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED) +#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED) +#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED) +#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER) +#define FSACTL_GET_CONTAINERS 2131 +#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED) +#define FSACTL_RESET_IOP CTL_CODE(2140, METHOD_BUFFERED) +#define FSACTL_GET_HBA_INFO CTL_CODE(2150, METHOD_BUFFERED) +/* flags defined for IOP & HW SOFT RESET */ +#define HW_IOP_RESET 0x01 +#define HW_SOFT_RESET 0x02 +#define IOP_HWSOFT_RESET (HW_IOP_RESET | HW_SOFT_RESET) +/* HW Soft Reset register offset */ +#define IBW_SWR_OFFSET 0x4000 +#define SOFT_RESET_TIME 60 + + + +struct aac_common +{ + /* + * If this value is set to 1 then interrupt moderation will occur + * in the base commuication support. + */ + u32 irq_mod; + u32 peak_fibs; + u32 zero_fibs; + u32 fib_timeouts; + /* + * Statistical counters in debug mode + */ +#ifdef DBG + u32 FibsSent; + u32 FibRecved; + u32 NativeSent; + u32 NativeRecved; + u32 NoResponseSent; + u32 NoResponseRecved; + u32 AsyncSent; + u32 AsyncRecved; + u32 NormalSent; + u32 NormalRecved; +#endif +}; + +extern struct aac_common aac_config; + +/* + * This is for management ioctl purpose only. + */ +struct aac_hba_info { + + u8 driver_name[50]; + u8 adapter_number; + u8 system_io_bus_number; + u8 device_number; + u32 function_number; + u32 vendor_id; + u32 device_id; + u32 sub_vendor_id; + u32 sub_system_id; + u32 mapped_base_address_size; + u32 base_physical_address_high_part; + u32 base_physical_address_low_part; + + u32 max_command_size; + u32 max_fib_size; + u32 max_scatter_gather_from_os; + u32 max_scatter_gather_to_fw; + u32 max_outstanding_fibs; + + u32 queue_start_threshold; + u32 queue_dump_threshold; + u32 max_io_size_queued; + u32 outstanding_io; + + u32 firmware_build_number; + u32 bios_build_number; + u32 driver_build_number; + u32 serial_number_high_part; + u32 serial_number_low_part; + u32 supported_options; + u32 feature_bits; + u32 currentnumber_ports; + + u8 new_comm_interface:1; + u8 new_commands_supported:1; + u8 disable_passthrough:1; + u8 expose_non_dasd:1; + u8 queue_allowed:1; + u8 bled_check_enabled:1; + u8 reserved1:1; + u8 reserted2:1; + + u32 reserved3[10]; + +}; + +/* + * The following macro is used when sending and receiving FIBs. It is + * only used for debugging. + */ + +#ifdef DBG +#define FIB_COUNTER_INCREMENT(counter) (counter)++ +#else +#define FIB_COUNTER_INCREMENT(counter) +#endif + +/* + * Adapter direct commands + * Monitor/Kernel API + */ + +#define BREAKPOINT_REQUEST 0x00000004 +#define INIT_STRUCT_BASE_ADDRESS 0x00000005 +#define READ_PERMANENT_PARAMETERS 0x0000000a +#define WRITE_PERMANENT_PARAMETERS 0x0000000b +#define HOST_CRASHING 0x0000000d +#define SEND_SYNCHRONOUS_FIB 0x0000000c +#define COMMAND_POST_RESULTS 0x00000014 +#define GET_ADAPTER_PROPERTIES 0x00000019 +#define GET_DRIVER_BUFFER_PROPERTIES 0x00000023 +#define RCV_TEMP_READINGS 0x00000025 +#define GET_COMM_PREFERRED_SETTINGS 0x00000026 +#define IOP_RESET_FW_FIB_DUMP 0x00000034 +#define DROP_IO 0x00000035 +#define IOP_RESET 0x00001000 +#define IOP_RESET_ALWAYS 0x00001001 +#define RE_INIT_ADAPTER 0x000000ee + +#define IOP_SRC_RESET_MASK 0x00000100 + +/* + * Adapter Status Register + * + * Phase Staus mailbox is 32bits: + * <31:16> = Phase Status + * <15:0> = Phase + * + * The adapter reports is present state through the phase. Only + * a single phase should be ever be set. Each phase can have multiple + * phase status bits to provide more detailed information about the + * state of the board. Care should be taken to ensure that any phase + * status bits that are set when changing the phase are also valid + * for the new phase or be cleared out. Adapter software (monitor, + * iflash, kernel) is responsible for properly maintining the phase + * status mailbox when it is running. + * + * MONKER_API Phases + * + * Phases are bit oriented. It is NOT valid to have multiple bits set + */ + +#define SELF_TEST_FAILED 0x00000004 +#define MONITOR_PANIC 0x00000020 +#define KERNEL_BOOTING 0x00000040 +#define KERNEL_UP_AND_RUNNING 0x00000080 +#define KERNEL_PANIC 0x00000100 +#define FLASH_UPD_PENDING 0x00002000 +#define FLASH_UPD_SUCCESS 0x00004000 +#define FLASH_UPD_FAILED 0x00008000 +#define INVALID_OMR 0xffffffff +#define FWUPD_TIMEOUT (5 * 60) + +/* + * Doorbell bit defines + */ + +#define DoorBellSyncCmdAvailable (1<<0) /* Host -> Adapter */ +#define DoorBellPrintfDone (1<<5) /* Host -> Adapter */ +#define DoorBellAdapterNormCmdReady (1<<1) /* Adapter -> Host */ +#define DoorBellAdapterNormRespReady (1<<2) /* Adapter -> Host */ +#define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ +#define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ +#define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ +#define DoorBellAifPending (1<<6) /* Adapter -> Host */ + +/* PMC specific outbound doorbell bits */ +#define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */ + +/* + * For FIB communication, we need all of the following things + * to send back to the user. + */ + +#define AifCmdEventNotify 1 /* Notify of event */ +#define AifEnConfigChange 3 /* Adapter configuration change */ +#define AifEnContainerChange 4 /* Container configuration change */ +#define AifEnDeviceFailure 5 /* SCSI device failed */ +#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */ +#define EM_DRIVE_INSERTION 31 +#define EM_DRIVE_REMOVAL 32 +#define EM_SES_DRIVE_INSERTION 33 +#define EM_SES_DRIVE_REMOVAL 26 +#define AifEnBatteryEvent 14 /* Change in Battery State */ +#define AifEnAddContainer 15 /* A new array was created */ +#define AifEnDeleteContainer 16 /* A container was deleted */ +#define AifEnExpEvent 23 /* Firmware Event Log */ +#define AifExeFirmwarePanic 3 /* Firmware Event Panic */ +#define AifHighPriority 3 /* Highest Priority Event */ +#define AifEnAddJBOD 30 /* JBOD created */ +#define AifEnDeleteJBOD 31 /* JBOD deleted */ + +#define AifBuManagerEvent 42 /* Bu management*/ +#define AifBuCacheDataLoss 10 +#define AifBuCacheDataRecover 11 + +#define AifCmdJobProgress 2 /* Progress report */ +#define AifJobCtrZero 101 /* Array Zero progress */ +#define AifJobStsSuccess 1 /* Job completes */ +#define AifJobStsRunning 102 /* Job running */ +#define AifCmdAPIReport 3 /* Report from other user of API */ +#define AifCmdDriverNotify 4 /* Notify host driver of event */ +#define AifDenMorphComplete 200 /* A morph operation completed */ +#define AifDenVolumeExtendComplete 201 /* A volume extend completed */ +#define AifReqJobList 100 /* Gets back complete job list */ +#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */ +#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */ +#define AifReqJobReport 103 /* Gets back a specific job report or list of them */ +#define AifReqTerminateJob 104 /* Terminates job */ +#define AifReqSuspendJob 105 /* Suspends a job */ +#define AifReqResumeJob 106 /* Resumes a job */ +#define AifReqSendAPIReport 107 /* API generic report requests */ +#define AifReqAPIJobStart 108 /* Start a job from the API */ +#define AifReqAPIJobUpdate 109 /* Update a job report from the API */ +#define AifReqAPIJobFinish 110 /* Finish a job from the API */ + +/* PMC NEW COMM: Request the event data */ +#define AifReqEvent 200 +#define AifRawDeviceRemove 203 /* RAW device deleted */ +#define AifNativeDeviceAdd 204 /* native HBA device added */ +#define AifNativeDeviceRemove 205 /* native HBA device removed */ + + +/* + * Adapter Initiated FIB command structures. Start with the adapter + * initiated FIBs that really come from the adapter, and get responded + * to by the host. + */ + +struct aac_aifcmd { + __le32 command; /* Tell host what type of notify this is */ + __le32 seqnum; /* To allow ordering of reports (if necessary) */ + u8 data[]; /* Undefined length (from kernel viewpoint) */ +}; + +/** + * Convert capacity to cylinders + * accounting for the fact capacity could be a 64 bit value + * + */ +static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) +{ + sector_div(capacity, divisor); + return capacity; +} + +static inline int aac_pci_offline(struct aac_dev *dev) +{ + return pci_channel_offline(dev->pdev) || dev->handle_pci_error; +} + +static inline int aac_adapter_check_health(struct aac_dev *dev) +{ + if (unlikely(aac_pci_offline(dev))) + return -1; + + return (dev)->a_ops.adapter_check_health(dev); +} + + +int aac_scan_host(struct aac_dev *dev); + +static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev) +{ + schedule_delayed_work(&dev->safw_rescan_work, AAC_RESCAN_DELAY); +} + +static inline void aac_schedule_src_reinit_aif_worker(struct aac_dev *dev) +{ + schedule_delayed_work(&dev->src_reinit_aif_worker, AAC_RESCAN_DELAY); +} + +static inline void aac_safw_rescan_worker(struct work_struct *work) +{ + struct aac_dev *dev = container_of(to_delayed_work(work), + struct aac_dev, safw_rescan_work); + + wait_event(dev->scsi_host_ptr->host_wait, + !scsi_host_in_recovery(dev->scsi_host_ptr)); + + aac_scan_host(dev); +} + +static inline void aac_cancel_rescan_worker(struct aac_dev *dev) +{ + cancel_delayed_work_sync(&dev->safw_rescan_work); + cancel_delayed_work_sync(&dev->src_reinit_aif_worker); +} + +enum aac_cmd_owner { + AAC_OWNER_MIDLEVEL = 0x101, + AAC_OWNER_LOWLEVEL = 0x102, + AAC_OWNER_ERROR_HANDLER = 0x103, + AAC_OWNER_FIRMWARE = 0x106, +}; + +struct aac_cmd_priv { + int (*callback)(struct scsi_cmnd *); + int status; + enum aac_cmd_owner owner; + bool sent_command; +}; + +static inline struct aac_cmd_priv *aac_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +void aac_safw_rescan_worker(struct work_struct *work); +void aac_src_reinit_aif_worker(struct work_struct *work); +int aac_acquire_irq(struct aac_dev *dev); +void aac_free_irq(struct aac_dev *dev); +int aac_setup_safw_adapter(struct aac_dev *dev); +const char *aac_driverinfo(struct Scsi_Host *); +void aac_fib_vector_assign(struct aac_dev *dev); +struct fib *aac_fib_alloc(struct aac_dev *dev); +struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd); +int aac_fib_setup(struct aac_dev *dev); +void aac_fib_map_free(struct aac_dev *dev); +void aac_fib_free(struct fib * context); +void aac_fib_init(struct fib * context); +void aac_printf(struct aac_dev *dev, u32 val); +int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); +int aac_hba_send(u8 command, struct fib *context, + fib_callback callback, void *ctxt); +int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); +void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); +int aac_fib_complete(struct fib * context); +void aac_hba_callback(void *context, struct fib *fibptr); +#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data) +struct aac_dev *aac_init_adapter(struct aac_dev *dev); +void aac_src_access_devreg(struct aac_dev *dev, int mode); +void aac_set_intx_mode(struct aac_dev *dev); +int aac_get_config_status(struct aac_dev *dev, int commit_flag); +int aac_get_containers(struct aac_dev *dev); +int aac_scsi_cmd(struct scsi_cmnd *cmd); +int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg); +#ifndef shost_to_class +#define shost_to_class(shost) &shost->shost_dev +#endif +ssize_t aac_get_serial_number(struct device *dev, char *buf); +int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg); +int aac_rx_init(struct aac_dev *dev); +int aac_rkt_init(struct aac_dev *dev); +int aac_nark_init(struct aac_dev *dev); +int aac_sa_init(struct aac_dev *dev); +int aac_src_init(struct aac_dev *dev); +int aac_srcv_init(struct aac_dev *dev); +int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); +void aac_define_int_mode(struct aac_dev *dev); +unsigned int aac_response_normal(struct aac_queue * q); +unsigned int aac_command_normal(struct aac_queue * q); +unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, + int isAif, int isFastResponse, + struct hw_fib *aif_fib); +int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type); +int aac_check_health(struct aac_dev * dev); +int aac_command_thread(void *data); +int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); +int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); +struct aac_driver_ident* aac_get_driver_ident(int devtype); +int aac_get_adapter_info(struct aac_dev* dev); +int aac_send_shutdown(struct aac_dev *dev); +int aac_probe_container(struct aac_dev *dev, int cid); +int _aac_rx_init(struct aac_dev *dev); +int aac_rx_select_comm(struct aac_dev *dev, int comm); +int aac_rx_deliver_producer(struct fib * fib); +void aac_reinit_aif(struct aac_dev *aac, unsigned int index); + +static inline int aac_is_src(struct aac_dev *dev) +{ + u16 device = dev->pdev->device; + + if (device == PMC_DEVICE_S6 || + device == PMC_DEVICE_S7 || + device == PMC_DEVICE_S8) + return 1; + return 0; +} + +static inline int aac_supports_2T(struct aac_dev *dev) +{ + return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64); +} + +char * get_container_type(unsigned type); +extern int numacb; +extern char aac_driver_version[]; +extern int startup_timeout; +extern int aif_timeout; +extern int expose_physicals; +extern int aac_reset_devices; +extern int aac_msi; +extern int aac_commit; +extern int update_interval; +extern int check_interval; +extern int aac_check_reset; +extern int aac_fib_dump; +#endif diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c new file mode 100644 index 000000000..e7cc927ed --- /dev/null +++ b/drivers/scsi/aacraid/commctrl.c @@ -0,0 +1,1121 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * commctrl.c + * + * Abstract: Contains all routines for control of the AFA comm layer + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* ssleep prototype */ +#include +#include +#include + +#include "aacraid.h" + +# define AAC_DEBUG_PREAMBLE KERN_INFO +# define AAC_DEBUG_POSTAMBLE +/** + * ioctl_send_fib - send a FIB from userspace + * @dev: adapter is being processed + * @arg: arguments to the ioctl call + * + * This routine sends a fib to the adapter on behalf of a user level + * program. + */ +static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) +{ + struct hw_fib * kfib; + struct fib *fibptr; + struct hw_fib * hw_fib = (struct hw_fib *)0; + dma_addr_t hw_fib_pa = (dma_addr_t)0LL; + unsigned int size, osize; + int retval; + + if (dev->in_reset) { + return -EBUSY; + } + fibptr = aac_fib_alloc(dev); + if(fibptr == NULL) { + return -ENOMEM; + } + + kfib = fibptr->hw_fib_va; + /* + * First copy in the header so that we can check the size field. + */ + if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { + aac_fib_free(fibptr); + return -EFAULT; + } + /* + * Since we copy based on the fib header size, make sure that we + * will not overrun the buffer when we copy the memory. Return + * an error if we would. + */ + osize = size = le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr); + if (size < le16_to_cpu(kfib->header.SenderSize)) + size = le16_to_cpu(kfib->header.SenderSize); + if (size > dev->max_fib_size) { + dma_addr_t daddr; + + if (size > 2048) { + retval = -EINVAL; + goto cleanup; + } + + kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr, + GFP_KERNEL); + if (!kfib) { + retval = -ENOMEM; + goto cleanup; + } + + /* Highjack the hw_fib */ + hw_fib = fibptr->hw_fib_va; + hw_fib_pa = fibptr->hw_fib_pa; + fibptr->hw_fib_va = kfib; + fibptr->hw_fib_pa = daddr; + memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); + memcpy(kfib, hw_fib, dev->max_fib_size); + } + + if (copy_from_user(kfib, arg, size)) { + retval = -EFAULT; + goto cleanup; + } + + /* Sanity check the second copy */ + if ((osize != le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr)) + || (size < le16_to_cpu(kfib->header.SenderSize))) { + retval = -EINVAL; + goto cleanup; + } + + if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { + aac_adapter_interrupt(dev); + /* + * Since we didn't really send a fib, zero out the state to allow + * cleanup code not to assert. + */ + kfib->header.XferState = 0; + } else { + retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, + le16_to_cpu(kfib->header.Size) , FsaNormal, + 1, 1, NULL, NULL); + if (retval) { + goto cleanup; + } + if (aac_fib_complete(fibptr) != 0) { + retval = -EINVAL; + goto cleanup; + } + } + /* + * Make sure that the size returned by the adapter (which includes + * the header) is less than or equal to the size of a fib, so we + * don't corrupt application data. Then copy that size to the user + * buffer. (Don't try to add the header information again, since it + * was already included by the adapter.) + */ + + retval = 0; + if (copy_to_user(arg, (void *)kfib, size)) + retval = -EFAULT; +cleanup: + if (hw_fib) { + dma_free_coherent(&dev->pdev->dev, size, kfib, + fibptr->hw_fib_pa); + fibptr->hw_fib_pa = hw_fib_pa; + fibptr->hw_fib_va = hw_fib; + } + if (retval != -ERESTARTSYS) + aac_fib_free(fibptr); + return retval; +} + +/** + * open_getadapter_fib - Get the next fib + * @dev: adapter is being processed + * @arg: arguments to the open call + * + * This routine will get the next Fib, if available, from the AdapterFibContext + * passed in from the user. + */ +static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) +{ + struct aac_fib_context * fibctx; + int status; + + fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); + if (fibctx == NULL) { + status = -ENOMEM; + } else { + unsigned long flags; + struct list_head * entry; + struct aac_fib_context * context; + + fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; + fibctx->size = sizeof(struct aac_fib_context); + /* + * Yes yes, I know this could be an index, but we have a + * better guarantee of uniqueness for the locked loop below. + * Without the aid of a persistent history, this also helps + * reduce the chance that the opaque context would be reused. + */ + fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); + /* + * Initialize the mutex used to wait for the next AIF. + */ + init_completion(&fibctx->completion); + fibctx->wait = 0; + /* + * Initialize the fibs and set the count of fibs on + * the list to 0. + */ + fibctx->count = 0; + INIT_LIST_HEAD(&fibctx->fib_list); + fibctx->jiffies = jiffies/HZ; + /* + * Now add this context onto the adapter's + * AdapterFibContext list. + */ + spin_lock_irqsave(&dev->fib_lock, flags); + /* Ensure that we have a unique identifier */ + entry = dev->fib_list.next; + while (entry != &dev->fib_list) { + context = list_entry(entry, struct aac_fib_context, next); + if (context->unique == fibctx->unique) { + /* Not unique (32 bits) */ + fibctx->unique++; + entry = dev->fib_list.next; + } else { + entry = entry->next; + } + } + list_add_tail(&fibctx->next, &dev->fib_list); + spin_unlock_irqrestore(&dev->fib_lock, flags); + if (copy_to_user(arg, &fibctx->unique, + sizeof(fibctx->unique))) { + status = -EFAULT; + } else { + status = 0; + } + } + return status; +} + +struct compat_fib_ioctl { + u32 fibctx; + s32 wait; + compat_uptr_t fib; +}; + +/** + * next_getadapter_fib - get the next fib + * @dev: adapter to use + * @arg: ioctl argument + * + * This routine will get the next Fib, if available, from the AdapterFibContext + * passed in from the user. + */ +static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) +{ + struct fib_ioctl f; + struct fib *fib; + struct aac_fib_context *fibctx; + int status; + struct list_head * entry; + unsigned long flags; + + if (in_compat_syscall()) { + struct compat_fib_ioctl cf; + + if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl))) + return -EFAULT; + + f.fibctx = cf.fibctx; + f.wait = cf.wait; + f.fib = compat_ptr(cf.fib); + } else { + if (copy_from_user(&f, arg, sizeof(struct fib_ioctl))) + return -EFAULT; + } + /* + * Verify that the HANDLE passed in was a valid AdapterFibContext + * + * Search the list of AdapterFibContext addresses on the adapter + * to be sure this is a valid address + */ + spin_lock_irqsave(&dev->fib_lock, flags); + entry = dev->fib_list.next; + fibctx = NULL; + + while (entry != &dev->fib_list) { + fibctx = list_entry(entry, struct aac_fib_context, next); + /* + * Extract the AdapterFibContext from the Input parameters. + */ + if (fibctx->unique == f.fibctx) { /* We found a winner */ + break; + } + entry = entry->next; + fibctx = NULL; + } + if (!fibctx) { + spin_unlock_irqrestore(&dev->fib_lock, flags); + dprintk ((KERN_INFO "Fib Context not found\n")); + return -EINVAL; + } + + if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || + (fibctx->size != sizeof(struct aac_fib_context))) { + spin_unlock_irqrestore(&dev->fib_lock, flags); + dprintk ((KERN_INFO "Fib Context corrupt?\n")); + return -EINVAL; + } + status = 0; + /* + * If there are no fibs to send back, then either wait or return + * -EAGAIN + */ +return_fib: + if (!list_empty(&fibctx->fib_list)) { + /* + * Pull the next fib from the fibs + */ + entry = fibctx->fib_list.next; + list_del(entry); + + fib = list_entry(entry, struct fib, fiblink); + fibctx->count--; + spin_unlock_irqrestore(&dev->fib_lock, flags); + if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) { + kfree(fib->hw_fib_va); + kfree(fib); + return -EFAULT; + } + /* + * Free the space occupied by this copy of the fib. + */ + kfree(fib->hw_fib_va); + kfree(fib); + status = 0; + } else { + spin_unlock_irqrestore(&dev->fib_lock, flags); + /* If someone killed the AIF aacraid thread, restart it */ + status = !dev->aif_thread; + if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { + /* Be paranoid, be very paranoid! */ + kthread_stop(dev->thread); + ssleep(1); + dev->aif_thread = 0; + dev->thread = kthread_run(aac_command_thread, dev, + "%s", dev->name); + ssleep(1); + } + if (f.wait) { + if (wait_for_completion_interruptible(&fibctx->completion) < 0) { + status = -ERESTARTSYS; + } else { + /* Lock again and retry */ + spin_lock_irqsave(&dev->fib_lock, flags); + goto return_fib; + } + } else { + status = -EAGAIN; + } + } + fibctx->jiffies = jiffies/HZ; + return status; +} + +int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) +{ + struct fib *fib; + + /* + * First free any FIBs that have not been consumed. + */ + while (!list_empty(&fibctx->fib_list)) { + struct list_head * entry; + /* + * Pull the next fib from the fibs + */ + entry = fibctx->fib_list.next; + list_del(entry); + fib = list_entry(entry, struct fib, fiblink); + fibctx->count--; + /* + * Free the space occupied by this copy of the fib. + */ + kfree(fib->hw_fib_va); + kfree(fib); + } + /* + * Remove the Context from the AdapterFibContext List + */ + list_del(&fibctx->next); + /* + * Invalidate context + */ + fibctx->type = 0; + /* + * Free the space occupied by the Context + */ + kfree(fibctx); + return 0; +} + +/** + * close_getadapter_fib - close down user fib context + * @dev: adapter + * @arg: ioctl arguments + * + * This routine will close down the fibctx passed in from the user. + */ + +static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) +{ + struct aac_fib_context *fibctx; + int status; + unsigned long flags; + struct list_head * entry; + + /* + * Verify that the HANDLE passed in was a valid AdapterFibContext + * + * Search the list of AdapterFibContext addresses on the adapter + * to be sure this is a valid address + */ + + entry = dev->fib_list.next; + fibctx = NULL; + + while(entry != &dev->fib_list) { + fibctx = list_entry(entry, struct aac_fib_context, next); + /* + * Extract the fibctx from the input parameters + */ + if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ + break; + entry = entry->next; + fibctx = NULL; + } + + if (!fibctx) + return 0; /* Already gone */ + + if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || + (fibctx->size != sizeof(struct aac_fib_context))) + return -EINVAL; + spin_lock_irqsave(&dev->fib_lock, flags); + status = aac_close_fib_context(dev, fibctx); + spin_unlock_irqrestore(&dev->fib_lock, flags); + return status; +} + +/** + * check_revision - close down user fib context + * @dev: adapter + * @arg: ioctl arguments + * + * This routine returns the driver version. + * Under Linux, there have been no version incompatibilities, so this is + * simple! + */ + +static int check_revision(struct aac_dev *dev, void __user *arg) +{ + struct revision response; + char *driver_version = aac_driver_version; + u32 version; + + response.compat = 1; + version = (simple_strtol(driver_version, + &driver_version, 10) << 24) | 0x00000400; + version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; + version += simple_strtol(driver_version + 1, NULL, 10); + response.version = cpu_to_le32(version); +# ifdef AAC_DRIVER_BUILD + response.build = cpu_to_le32(AAC_DRIVER_BUILD); +# else + response.build = cpu_to_le32(9999); +# endif + + if (copy_to_user(arg, &response, sizeof(response))) + return -EFAULT; + return 0; +} + + +/** + * aac_send_raw_srb() + * @dev: adapter is being processed + * @arg: arguments to the send call + */ +static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) +{ + struct fib* srbfib; + int status; + struct aac_srb *srbcmd = NULL; + struct aac_hba_cmd_req *hbacmd = NULL; + struct user_aac_srb *user_srbcmd = NULL; + struct user_aac_srb __user *user_srb = arg; + struct aac_srb_reply __user *user_reply; + u32 chn; + u32 fibsize = 0; + u32 flags = 0; + s32 rcode = 0; + u32 data_dir; + void __user *sg_user[HBA_MAX_SG_EMBEDDED]; + void *sg_list[HBA_MAX_SG_EMBEDDED]; + u32 sg_count[HBA_MAX_SG_EMBEDDED]; + u32 sg_indx = 0; + u32 byte_count = 0; + u32 actual_fibsize64, actual_fibsize = 0; + int i; + int is_native_device; + u64 address; + + + if (dev->in_reset) { + dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); + return -EBUSY; + } + if (!capable(CAP_SYS_ADMIN)){ + dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); + return -EPERM; + } + /* + * Allocate and initialize a Fib then setup a SRB command + */ + if (!(srbfib = aac_fib_alloc(dev))) { + return -ENOMEM; + } + + memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ + if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ + dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); + rcode = -EFAULT; + goto cleanup; + } + + if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { + rcode = -EINVAL; + goto cleanup; + } + + user_srbcmd = memdup_user(user_srb, fibsize); + if (IS_ERR(user_srbcmd)) { + rcode = PTR_ERR(user_srbcmd); + user_srbcmd = NULL; + goto cleanup; + } + + flags = user_srbcmd->flags; /* from user in cpu order */ + switch (flags & (SRB_DataIn | SRB_DataOut)) { + case SRB_DataOut: + data_dir = DMA_TO_DEVICE; + break; + case (SRB_DataIn | SRB_DataOut): + data_dir = DMA_BIDIRECTIONAL; + break; + case SRB_DataIn: + data_dir = DMA_FROM_DEVICE; + break; + default: + data_dir = DMA_NONE; + } + if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { + dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", + user_srbcmd->sg.count)); + rcode = -EINVAL; + goto cleanup; + } + if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { + dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n")); + rcode = -EINVAL; + goto cleanup; + } + actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + + ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); + actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * + (sizeof(struct sgentry64) - sizeof(struct sgentry)); + /* User made a mistake - should not continue */ + if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { + dprintk((KERN_DEBUG"aacraid: Bad Size specified in " + "Raw SRB command calculated fibsize=%lu;%lu " + "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " + "issued fibsize=%d\n", + actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, + sizeof(struct aac_srb), sizeof(struct sgentry), + sizeof(struct sgentry64), fibsize)); + rcode = -EINVAL; + goto cleanup; + } + + chn = user_srbcmd->channel; + if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && + dev->hba_map[chn][user_srbcmd->id].devtype == + AAC_DEVTYPE_NATIVE_RAW) { + is_native_device = 1; + hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va; + memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ + + /* iu_type is a parameter of aac_hba_send */ + switch (data_dir) { + case DMA_TO_DEVICE: + hbacmd->byte1 = 2; + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + hbacmd->byte1 = 1; + break; + case DMA_NONE: + default: + break; + } + hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun); + hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus; + + /* + * we fill in reply_qid later in aac_src_deliver_message + * we fill in iu_type, request_id later in aac_hba_send + * we fill in emb_data_desc_count, data_length later + * in sg list build + */ + + memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb)); + + address = (u64)srbfib->hw_error_pa; + hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); + hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + hbacmd->emb_data_desc_count = + cpu_to_le32(user_srbcmd->sg.count); + srbfib->hbacmd_size = 64 + + user_srbcmd->sg.count * sizeof(struct aac_hba_sgl); + + } else { + is_native_device = 0; + aac_fib_init(srbfib); + + /* raw_srb FIB is not FastResponseCapable */ + srbfib->hw_fib_va->header.XferState &= + ~cpu_to_le32(FastResponseCapable); + + srbcmd = (struct aac_srb *) fib_data(srbfib); + + // Fix up srb for endian and force some values + + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this + srbcmd->channel = cpu_to_le32(user_srbcmd->channel); + srbcmd->id = cpu_to_le32(user_srbcmd->id); + srbcmd->lun = cpu_to_le32(user_srbcmd->lun); + srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); + srbcmd->flags = cpu_to_le32(flags); + srbcmd->retry_limit = 0; // Obsolete parameter + srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); + memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); + } + + byte_count = 0; + if (is_native_device) { + struct user_sgmap *usg32 = &user_srbcmd->sg; + struct user_sgmap64 *usg64 = + (struct user_sgmap64 *)&user_srbcmd->sg; + + for (i = 0; i < usg32->count; i++) { + void *p; + u64 addr; + + sg_count[i] = (actual_fibsize64 == fibsize) ? + usg64->sg[i].count : usg32->sg[i].count; + if (sg_count[i] > + (dev->scsi_host_ptr->max_sectors << 9)) { + pr_err("aacraid: upsg->sg[%d].count=%u>%u\n", + i, sg_count[i], + dev->scsi_host_ptr->max_sectors << 9); + rcode = -EINVAL; + goto cleanup; + } + + p = kmalloc(sg_count[i], GFP_KERNEL); + if (!p) { + rcode = -ENOMEM; + goto cleanup; + } + + if (actual_fibsize64 == fibsize) { + addr = (u64)usg64->sg[i].addr[0]; + addr += ((u64)usg64->sg[i].addr[1]) << 32; + } else { + addr = (u64)usg32->sg[i].addr; + } + + sg_user[i] = (void __user *)(uintptr_t)addr; + sg_list[i] = p; // save so we can clean up later + sg_indx = i; + + if (flags & SRB_DataOut) { + if (copy_from_user(p, sg_user[i], + sg_count[i])) { + rcode = -EFAULT; + goto cleanup; + } + } + addr = dma_map_single(&dev->pdev->dev, p, sg_count[i], + data_dir); + hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32)); + hbacmd->sge[i].addr_lo = cpu_to_le32( + (u32)(addr & 0xffffffff)); + hbacmd->sge[i].len = cpu_to_le32(sg_count[i]); + hbacmd->sge[i].flags = 0; + byte_count += sg_count[i]; + } + + if (usg32->count > 0) /* embedded sglist */ + hbacmd->sge[usg32->count-1].flags = + cpu_to_le32(0x40000000); + hbacmd->data_length = cpu_to_le32(byte_count); + + status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib, + NULL, NULL); + + } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { + struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; + struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; + + /* + * This should also catch if user used the 32 bit sgmap + */ + if (actual_fibsize64 == fibsize) { + actual_fibsize = actual_fibsize64; + for (i = 0; i < upsg->count; i++) { + u64 addr; + void* p; + + sg_count[i] = upsg->sg[i].count; + if (sg_count[i] > + ((dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536)) { + rcode = -EINVAL; + goto cleanup; + } + + p = kmalloc(sg_count[i], GFP_KERNEL); + if(!p) { + dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + sg_count[i], i, upsg->count)); + rcode = -ENOMEM; + goto cleanup; + } + addr = (u64)upsg->sg[i].addr[0]; + addr += ((u64)upsg->sg[i].addr[1]) << 32; + sg_user[i] = (void __user *)(uintptr_t)addr; + sg_list[i] = p; // save so we can clean up later + sg_indx = i; + + if (flags & SRB_DataOut) { + if (copy_from_user(p, sg_user[i], + sg_count[i])){ + dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); + rcode = -EFAULT; + goto cleanup; + } + } + addr = dma_map_single(&dev->pdev->dev, p, + sg_count[i], data_dir); + + psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); + psg->sg[i].addr[1] = cpu_to_le32(addr>>32); + byte_count += sg_count[i]; + psg->sg[i].count = cpu_to_le32(sg_count[i]); + } + } else { + struct user_sgmap* usg; + usg = kmemdup(upsg, + actual_fibsize - sizeof(struct aac_srb) + + sizeof(struct sgmap), GFP_KERNEL); + if (!usg) { + dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); + rcode = -ENOMEM; + goto cleanup; + } + actual_fibsize = actual_fibsize64; + + for (i = 0; i < usg->count; i++) { + u64 addr; + void* p; + + sg_count[i] = usg->sg[i].count; + if (sg_count[i] > + ((dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536)) { + kfree(usg); + rcode = -EINVAL; + goto cleanup; + } + + p = kmalloc(sg_count[i], GFP_KERNEL); + if(!p) { + dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + sg_count[i], i, usg->count)); + kfree(usg); + rcode = -ENOMEM; + goto cleanup; + } + sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; + sg_list[i] = p; // save so we can clean up later + sg_indx = i; + + if (flags & SRB_DataOut) { + if (copy_from_user(p, sg_user[i], + sg_count[i])) { + kfree (usg); + dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); + rcode = -EFAULT; + goto cleanup; + } + } + addr = dma_map_single(&dev->pdev->dev, p, + sg_count[i], data_dir); + + psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); + psg->sg[i].addr[1] = cpu_to_le32(addr>>32); + byte_count += sg_count[i]; + psg->sg[i].count = cpu_to_le32(sg_count[i]); + } + kfree (usg); + } + srbcmd->count = cpu_to_le32(byte_count); + if (user_srbcmd->sg.count) + psg->count = cpu_to_le32(sg_indx+1); + else + psg->count = 0; + status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); + } else { + struct user_sgmap* upsg = &user_srbcmd->sg; + struct sgmap* psg = &srbcmd->sg; + + if (actual_fibsize64 == fibsize) { + struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; + for (i = 0; i < upsg->count; i++) { + uintptr_t addr; + void* p; + + sg_count[i] = usg->sg[i].count; + if (sg_count[i] > + ((dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536)) { + rcode = -EINVAL; + goto cleanup; + } + p = kmalloc(sg_count[i], GFP_KERNEL); + if (!p) { + dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + sg_count[i], i, usg->count)); + rcode = -ENOMEM; + goto cleanup; + } + addr = (u64)usg->sg[i].addr[0]; + addr += ((u64)usg->sg[i].addr[1]) << 32; + sg_user[i] = (void __user *)addr; + sg_list[i] = p; // save so we can clean up later + sg_indx = i; + + if (flags & SRB_DataOut) { + if (copy_from_user(p, sg_user[i], + sg_count[i])){ + dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); + rcode = -EFAULT; + goto cleanup; + } + } + addr = dma_map_single(&dev->pdev->dev, p, + usg->sg[i].count, + data_dir); + + psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); + byte_count += usg->sg[i].count; + psg->sg[i].count = cpu_to_le32(sg_count[i]); + } + } else { + for (i = 0; i < upsg->count; i++) { + dma_addr_t addr; + void* p; + + sg_count[i] = upsg->sg[i].count; + if (sg_count[i] > + ((dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536)) { + rcode = -EINVAL; + goto cleanup; + } + p = kmalloc(sg_count[i], GFP_KERNEL); + if (!p) { + dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + sg_count[i], i, upsg->count)); + rcode = -ENOMEM; + goto cleanup; + } + sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; + sg_list[i] = p; // save so we can clean up later + sg_indx = i; + + if (flags & SRB_DataOut) { + if (copy_from_user(p, sg_user[i], + sg_count[i])) { + dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); + rcode = -EFAULT; + goto cleanup; + } + } + addr = dma_map_single(&dev->pdev->dev, p, + sg_count[i], data_dir); + + psg->sg[i].addr = cpu_to_le32(addr); + byte_count += sg_count[i]; + psg->sg[i].count = cpu_to_le32(sg_count[i]); + } + } + srbcmd->count = cpu_to_le32(byte_count); + if (user_srbcmd->sg.count) + psg->count = cpu_to_le32(sg_indx+1); + else + psg->count = 0; + status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); + } + + if (status == -ERESTARTSYS) { + rcode = -ERESTARTSYS; + goto cleanup; + } + + if (status != 0) { + dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); + rcode = -ENXIO; + goto cleanup; + } + + if (flags & SRB_DataIn) { + for(i = 0 ; i <= sg_indx; i++){ + if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) { + dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); + rcode = -EFAULT; + goto cleanup; + + } + } + } + + user_reply = arg + fibsize; + if (is_native_device) { + struct aac_hba_resp *err = + &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err; + struct aac_srb_reply reply; + + memset(&reply, 0, sizeof(reply)); + reply.status = ST_OK; + if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) { + /* fast response */ + reply.srb_status = SRB_STATUS_SUCCESS; + reply.scsi_status = 0; + reply.data_xfer_length = byte_count; + reply.sense_data_size = 0; + memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE); + } else { + reply.srb_status = err->service_response; + reply.scsi_status = err->status; + reply.data_xfer_length = byte_count - + le32_to_cpu(err->residual_count); + reply.sense_data_size = err->sense_response_data_len; + memcpy(reply.sense_data, err->sense_response_buf, + AAC_SENSE_BUFFERSIZE); + } + if (copy_to_user(user_reply, &reply, + sizeof(struct aac_srb_reply))) { + dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); + rcode = -EFAULT; + goto cleanup; + } + } else { + struct aac_srb_reply *reply; + + reply = (struct aac_srb_reply *) fib_data(srbfib); + if (copy_to_user(user_reply, reply, + sizeof(struct aac_srb_reply))) { + dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); + rcode = -EFAULT; + goto cleanup; + } + } + +cleanup: + kfree(user_srbcmd); + if (rcode != -ERESTARTSYS) { + for (i = 0; i <= sg_indx; i++) + kfree(sg_list[i]); + aac_fib_complete(srbfib); + aac_fib_free(srbfib); + } + + return rcode; +} + +struct aac_pci_info { + u32 bus; + u32 slot; +}; + + +static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) +{ + struct aac_pci_info pci_info; + + pci_info.bus = dev->pdev->bus->number; + pci_info.slot = PCI_SLOT(dev->pdev->devfn); + + if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { + dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); + return -EFAULT; + } + return 0; +} + +static int aac_get_hba_info(struct aac_dev *dev, void __user *arg) +{ + struct aac_hba_info hbainfo; + + memset(&hbainfo, 0, sizeof(hbainfo)); + hbainfo.adapter_number = (u8) dev->id; + hbainfo.system_io_bus_number = dev->pdev->bus->number; + hbainfo.device_number = (dev->pdev->devfn >> 3); + hbainfo.function_number = (dev->pdev->devfn & 0x0007); + + hbainfo.vendor_id = dev->pdev->vendor; + hbainfo.device_id = dev->pdev->device; + hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor; + hbainfo.sub_system_id = dev->pdev->subsystem_device; + + if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { + dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n")); + return -EFAULT; + } + + return 0; +} + +struct aac_reset_iop { + u8 reset_type; +}; + +static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) +{ + struct aac_reset_iop reset; + int retval; + + if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop))) + return -EFAULT; + + dev->adapter_shutdown = 1; + + mutex_unlock(&dev->ioctl_mutex); + retval = aac_reset_adapter(dev, 0, reset.reset_type); + mutex_lock(&dev->ioctl_mutex); + + return retval; +} + +int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) +{ + int status; + + mutex_lock(&dev->ioctl_mutex); + + if (dev->adapter_shutdown) { + status = -EACCES; + goto cleanup; + } + + /* + * HBA gets first crack + */ + + status = aac_dev_ioctl(dev, cmd, arg); + if (status != -ENOTTY) + goto cleanup; + + switch (cmd) { + case FSACTL_MINIPORT_REV_CHECK: + status = check_revision(dev, arg); + break; + case FSACTL_SEND_LARGE_FIB: + case FSACTL_SENDFIB: + status = ioctl_send_fib(dev, arg); + break; + case FSACTL_OPEN_GET_ADAPTER_FIB: + status = open_getadapter_fib(dev, arg); + break; + case FSACTL_GET_NEXT_ADAPTER_FIB: + status = next_getadapter_fib(dev, arg); + break; + case FSACTL_CLOSE_GET_ADAPTER_FIB: + status = close_getadapter_fib(dev, arg); + break; + case FSACTL_SEND_RAW_SRB: + status = aac_send_raw_srb(dev,arg); + break; + case FSACTL_GET_PCI_INFO: + status = aac_get_pci_info(dev,arg); + break; + case FSACTL_GET_HBA_INFO: + status = aac_get_hba_info(dev, arg); + break; + case FSACTL_RESET_IOP: + status = aac_send_reset_adapter(dev, arg); + break; + + default: + status = -ENOTTY; + break; + } + +cleanup: + mutex_unlock(&dev->ioctl_mutex); + + return status; +} + diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c new file mode 100644 index 000000000..bd99c5492 --- /dev/null +++ b/drivers/scsi/aacraid/comminit.c @@ -0,0 +1,660 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * comminit.c + * + * Abstract: This supports the initialization of the host adapter commuication interface. + * This is a platform dependent module for the pci cyclone board. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aacraid.h" + +struct aac_common aac_config = { + .irq_mod = 1 +}; + +static inline int aac_is_msix_mode(struct aac_dev *dev) +{ + u32 status = 0; + + if (aac_is_src(dev)) + status = src_readl(dev, MUnit.OMR); + return (status & AAC_INT_MODE_MSIX); +} + +static inline void aac_change_to_intx(struct aac_dev *dev) +{ + aac_src_access_devreg(dev, AAC_DISABLE_MSIX); + aac_src_access_devreg(dev, AAC_ENABLE_INTX); +} + +static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) +{ + unsigned char *base; + unsigned long size, align; + const unsigned long fibsize = dev->max_fib_size; + const unsigned long printfbufsiz = 256; + unsigned long host_rrq_size, aac_init_size; + union aac_init *init; + dma_addr_t phys; + unsigned long aac_max_hostphysmempages; + + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && + !dev->sa_firmware)) { + host_rrq_size = + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + * sizeof(u32); + aac_init_size = sizeof(union aac_init); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && + dev->sa_firmware) { + host_rrq_size = (dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB) * sizeof(u32) * AAC_MAX_MSIX; + aac_init_size = sizeof(union aac_init) + + (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq); + } else { + host_rrq_size = 0; + aac_init_size = sizeof(union aac_init); + } + size = fibsize + aac_init_size + commsize + commalign + + printfbufsiz + host_rrq_size; + + base = dma_alloc_coherent(&dev->pdev->dev, size, &phys, GFP_KERNEL); + if (base == NULL) { + printk(KERN_ERR "aacraid: unable to create mapping.\n"); + return 0; + } + + dev->comm_addr = (void *)base; + dev->comm_phys = phys; + dev->comm_size = size; + + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) { + dev->host_rrq = (u32 *)(base + fibsize); + dev->host_rrq_pa = phys + fibsize; + memset(dev->host_rrq, 0, host_rrq_size); + } + + dev->init = (union aac_init *)(base + fibsize + host_rrq_size); + dev->init_pa = phys + fibsize + host_rrq_size; + + init = dev->init; + + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { + int i; + u64 addr; + + init->r8.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8); + init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_DRIVER_USES_UTC_TIME | + INITFLAGS_DRIVER_SUPPORTS_PM); + init->r8.init_flags |= + cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE); + init->r8.rr_queue_count = cpu_to_le32(dev->max_msix); + init->r8.max_io_size = + cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); + init->r8.max_num_aif = init->r8.reserved1 = + init->r8.reserved2 = 0; + + for (i = 0; i < dev->max_msix; i++) { + addr = (u64)dev->host_rrq_pa + dev->vector_cap * i * + sizeof(u32); + init->r8.rrq[i].host_addr_high = cpu_to_le32( + upper_32_bits(addr)); + init->r8.rrq[i].host_addr_low = cpu_to_le32( + lower_32_bits(addr)); + init->r8.rrq[i].msix_id = i; + init->r8.rrq[i].element_count = cpu_to_le16( + (u16)dev->vector_cap); + init->r8.rrq[i].comp_thresh = + init->r8.rrq[i].unused = 0; + } + + pr_warn("aacraid: Comm Interface type3 enabled\n"); + } else { + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); + if (dev->max_fib_size != sizeof(struct hw_fib)) + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); + init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION); + init->r7.fsrev = cpu_to_le32(dev->fsrev); + + /* + * Adapter Fibs are the first thing allocated so that they + * start page aligned + */ + dev->aif_base_va = (struct hw_fib *)base; + + init->r7.adapter_fibs_virtual_address = 0; + init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys); + init->r7.adapter_fibs_size = cpu_to_le32(fibsize); + init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib)); + + /* + * number of 4k pages of host physical memory. The aacraid fw + * needs this number to be less than 4gb worth of pages. New + * firmware doesn't have any issues with the mapping system, but + * older Firmware did, and had *troubles* dealing with the math + * overloading past 32 bits, thus we must limit this field. + */ + aac_max_hostphysmempages = + dma_get_required_mask(&dev->pdev->dev) >> 12; + if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES) + init->r7.host_phys_mem_pages = + cpu_to_le32(aac_max_hostphysmempages); + else + init->r7.host_phys_mem_pages = + cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); + + init->r7.init_flags = + cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | + INITFLAGS_DRIVER_SUPPORTS_PM); + init->r7.max_io_commands = + cpu_to_le32(dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB); + init->r7.max_io_size = + cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); + init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size); + init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif); + + if (dev->comm_interface == AAC_COMM_MESSAGE) { + init->r7.init_flags |= + cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); + pr_warn("aacraid: Comm Interface enabled\n"); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); + init->r7.init_flags |= + cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | + INITFLAGS_FAST_JBOD_SUPPORTED); + init->r7.host_rrq_addr_high = + cpu_to_le32(upper_32_bits(dev->host_rrq_pa)); + init->r7.host_rrq_addr_low = + cpu_to_le32(lower_32_bits(dev->host_rrq_pa)); + pr_warn("aacraid: Comm Interface type1 enabled\n"); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7); + init->r7.init_flags |= + cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | + INITFLAGS_FAST_JBOD_SUPPORTED); + init->r7.host_rrq_addr_high = + cpu_to_le32(upper_32_bits(dev->host_rrq_pa)); + init->r7.host_rrq_addr_low = + cpu_to_le32(lower_32_bits(dev->host_rrq_pa)); + init->r7.no_of_msix_vectors = + cpu_to_le32(dev->max_msix); + /* must be the COMM_PREFERRED_SETTINGS values */ + pr_warn("aacraid: Comm Interface type2 enabled\n"); + } + } + + /* + * Increment the base address by the amount already used + */ + base = base + fibsize + host_rrq_size + aac_init_size; + phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + + aac_init_size); + + /* + * Align the beginning of Headers to commalign + */ + align = (commalign - ((uintptr_t)(base) & (commalign - 1))); + base = base + align; + phys = phys + align; + /* + * Fill in addresses of the Comm Area Headers and Queues + */ + *commaddr = base; + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) + init->r7.comm_header_address = cpu_to_le32((u32)phys); + /* + * Increment the base address by the size of the CommArea + */ + base = base + commsize; + phys = phys + commsize; + /* + * Place the Printf buffer area after the Fast I/O comm area. + */ + dev->printfbuf = (void *)base; + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) { + init->r7.printfbuf = cpu_to_le32(phys); + init->r7.printfbufsiz = cpu_to_le32(printfbufsiz); + } + memset(base, 0, printfbufsiz); + return 1; +} + +static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) +{ + atomic_set(&q->numpending, 0); + q->dev = dev; + init_waitqueue_head(&q->cmdready); + INIT_LIST_HEAD(&q->cmdq); + init_waitqueue_head(&q->qfull); + spin_lock_init(&q->lockdata); + q->lock = &q->lockdata; + q->headers.producer = (__le32 *)mem; + q->headers.consumer = (__le32 *)(mem+1); + *(q->headers.producer) = cpu_to_le32(qsize); + *(q->headers.consumer) = cpu_to_le32(qsize); + q->entries = qsize; +} + +static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data) +{ + int *active = data; + + if (aac_priv(cmd)->owner == AAC_OWNER_FIRMWARE) + *active = *active + 1; + return true; +} +static void aac_wait_for_io_completion(struct aac_dev *aac) +{ + int i = 0, active; + + for (i = 60; i; --i) { + + active = 0; + scsi_host_busy_iter(aac->scsi_host_ptr, + wait_for_io_iter, &active); + /* + * We can exit If all the commands are complete + */ + if (active == 0) + break; + dev_info(&aac->pdev->dev, + "Wait for %d commands to complete\n", active); + ssleep(1); + } + if (active) + dev_err(&aac->pdev->dev, + "%d outstanding commands during shutdown\n", active); +} + +/** + * aac_send_shutdown - shutdown an adapter + * @dev: Adapter to shutdown + * + * This routine will send a VM_CloseAll (shutdown) request to the adapter. + */ + +int aac_send_shutdown(struct aac_dev * dev) +{ + struct fib * fibctx; + struct aac_close *cmd; + int status = 0; + + if (aac_adapter_check_health(dev)) + return status; + + if (!dev->adapter_shutdown) { + mutex_lock(&dev->ioctl_mutex); + dev->adapter_shutdown = 1; + mutex_unlock(&dev->ioctl_mutex); + } + + aac_wait_for_io_completion(dev); + + fibctx = aac_fib_alloc(dev); + if (!fibctx) + return -ENOMEM; + aac_fib_init(fibctx); + + cmd = (struct aac_close *) fib_data(fibctx); + cmd->command = cpu_to_le32(VM_CloseAll); + cmd->cid = cpu_to_le32(0xfffffffe); + + status = aac_fib_send(ContainerCommand, + fibctx, + sizeof(struct aac_close), + FsaNormal, + -2 /* Timeout silently */, 1, + NULL, NULL); + + if (status >= 0) + aac_fib_complete(fibctx); + /* FIB should be freed only after getting the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibctx); + if (aac_is_src(dev) && + dev->msi_enabled) + aac_set_intx_mode(dev); + return status; +} + +/** + * aac_comm_init - Initialise FSA data structures + * @dev: Adapter to initialise + * + * Initializes the data structures that are required for the FSA commuication + * interface to operate. + * Returns + * 1 - if we were able to init the commuication interface. + * 0 - If there were errors initing. This is a fatal error. + */ + +static int aac_comm_init(struct aac_dev * dev) +{ + unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2; + unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES; + u32 *headers; + struct aac_entry * queues; + unsigned long size; + struct aac_queue_block * comm = dev->queues; + /* + * Now allocate and initialize the zone structures used as our + * pool of FIB context records. The size of the zone is based + * on the system memory size. We also initialize the mutex used + * to protect the zone. + */ + spin_lock_init(&dev->fib_lock); + + /* + * Allocate the physically contiguous space for the commuication + * queue headers. + */ + + size = hdrsize + queuesize; + + if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT)) + return -ENOMEM; + + queues = (struct aac_entry *)(((ulong)headers) + hdrsize); + + /* Adapter to Host normal priority Command queue */ + comm->queue[HostNormCmdQueue].base = queues; + aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); + queues += HOST_NORM_CMD_ENTRIES; + headers += 2; + + /* Adapter to Host high priority command queue */ + comm->queue[HostHighCmdQueue].base = queues; + aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); + + queues += HOST_HIGH_CMD_ENTRIES; + headers +=2; + + /* Host to adapter normal priority command queue */ + comm->queue[AdapNormCmdQueue].base = queues; + aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); + + queues += ADAP_NORM_CMD_ENTRIES; + headers += 2; + + /* host to adapter high priority command queue */ + comm->queue[AdapHighCmdQueue].base = queues; + aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); + + queues += ADAP_HIGH_CMD_ENTRIES; + headers += 2; + + /* adapter to host normal priority response queue */ + comm->queue[HostNormRespQueue].base = queues; + aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); + queues += HOST_NORM_RESP_ENTRIES; + headers += 2; + + /* adapter to host high priority response queue */ + comm->queue[HostHighRespQueue].base = queues; + aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES); + + queues += HOST_HIGH_RESP_ENTRIES; + headers += 2; + + /* host to adapter normal priority response queue */ + comm->queue[AdapNormRespQueue].base = queues; + aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES); + + queues += ADAP_NORM_RESP_ENTRIES; + headers += 2; + + /* host to adapter high priority response queue */ + comm->queue[AdapHighRespQueue].base = queues; + aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES); + + comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock; + comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock; + comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock; + comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock; + + return 0; +} + +void aac_define_int_mode(struct aac_dev *dev) +{ + int i, msi_count, min_msix; + + msi_count = i = 0; + /* max. vectors from GET_COMM_PREFERRED_SETTINGS */ + if (dev->max_msix == 0 || + dev->pdev->device == PMC_DEVICE_S6 || + dev->sync_mode) { + dev->max_msix = 1; + dev->vector_cap = + dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB; + return; + } + + /* Don't bother allocating more MSI-X vectors than cpus */ + msi_count = min(dev->max_msix, + (unsigned int)num_online_cpus()); + + dev->max_msix = msi_count; + + if (msi_count > AAC_MAX_MSIX) + msi_count = AAC_MAX_MSIX; + + if (msi_count > 1 && + pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { + min_msix = 2; + i = pci_alloc_irq_vectors(dev->pdev, + min_msix, msi_count, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + if (i > 0) { + dev->msi_enabled = 1; + msi_count = i; + } else { + dev->msi_enabled = 0; + dev_err(&dev->pdev->dev, + "MSIX not supported!! Will try INTX 0x%x.\n", i); + } + } + + if (!dev->msi_enabled) + dev->max_msix = msi_count = 1; + else { + if (dev->max_msix > msi_count) + dev->max_msix = msi_count; + } + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware) + dev->vector_cap = dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB; + else + dev->vector_cap = (dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB) / msi_count; + +} +struct aac_dev *aac_init_adapter(struct aac_dev *dev) +{ + u32 status[5]; + struct Scsi_Host * host = dev->scsi_host_ptr; + extern int aac_sync_mode; + + /* + * Check the preferred comm settings, defaults from template. + */ + dev->management_fib_count = 0; + spin_lock_init(&dev->manage_lock); + spin_lock_init(&dev->sync_lock); + spin_lock_init(&dev->iq_lock); + dev->max_fib_size = sizeof(struct hw_fib); + dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size + - sizeof(struct aac_fibhdr) + - sizeof(struct aac_write) + sizeof(struct sgentry)) + / sizeof(struct sgentry); + dev->comm_interface = AAC_COMM_PRODUCER; + dev->raw_io_interface = dev->raw_io_64 = 0; + + + /* + * Enable INTX mode, if not done already Enabled + */ + if (aac_is_msix_mode(dev)) { + aac_change_to_intx(dev); + dev_info(&dev->pdev->dev, "Changed firmware to INTX mode"); + } + + if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, + 0, 0, 0, 0, 0, 0, + status+0, status+1, status+2, status+3, status+4)) && + (status[0] == 0x00000001)) { + dev->doorbell_mask = status[3]; + if (status[1] & AAC_OPT_NEW_COMM_64) + dev->raw_io_64 = 1; + dev->sync_mode = aac_sync_mode; + if (dev->a_ops.adapter_comm && + (status[1] & AAC_OPT_NEW_COMM)) { + dev->comm_interface = AAC_COMM_MESSAGE; + dev->raw_io_interface = 1; + if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) { + /* driver supports TYPE1 (Tupelo) */ + dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; + } else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) { + /* driver supports TYPE2 (Denali, Yosemite) */ + dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; + } else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) { + /* driver supports TYPE3 (Yosemite, Thor) */ + dev->comm_interface = AAC_COMM_MESSAGE_TYPE3; + } else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) { + /* not supported TYPE - switch to sync. mode */ + dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; + dev->sync_mode = 1; + } + } + if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) && + (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE))) + dev->sa_firmware = 1; + else + dev->sa_firmware = 0; + + if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)) + dev->soft_reset_support = 1; + else + dev->soft_reset_support = 0; + + if ((dev->comm_interface == AAC_COMM_MESSAGE) && + (status[2] > dev->base_size)) { + aac_adapter_ioremap(dev, 0); + dev->base_size = status[2]; + if (aac_adapter_ioremap(dev, status[2])) { + /* remap failed, go back ... */ + dev->comm_interface = AAC_COMM_PRODUCER; + if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { + printk(KERN_WARNING + "aacraid: unable to map adapter.\n"); + return NULL; + } + } + } + } + dev->max_msix = 0; + dev->msi_enabled = 0; + dev->adapter_shutdown = 0; + if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, + 0, 0, 0, 0, 0, 0, + status+0, status+1, status+2, status+3, status+4)) + && (status[0] == 0x00000001)) { + /* + * status[1] >> 16 maximum command size in KB + * status[1] & 0xFFFF maximum FIB size + * status[2] >> 16 maximum SG elements to driver + * status[2] & 0xFFFF maximum SG elements from driver + * status[3] & 0xFFFF maximum number FIBs outstanding + */ + host->max_sectors = (status[1] >> 16) << 1; + /* Multiple of 32 for PMC */ + dev->max_fib_size = status[1] & 0xFFE0; + host->sg_tablesize = status[2] >> 16; + dev->sg_tablesize = status[2] & 0xFFFF; + if (aac_is_src(dev)) { + if (host->can_queue > (status[3] >> 16) - + AAC_NUM_MGT_FIB) + host->can_queue = (status[3] >> 16) - + AAC_NUM_MGT_FIB; + } else if (host->can_queue > (status[3] & 0xFFFF) - + AAC_NUM_MGT_FIB) + host->can_queue = (status[3] & 0xFFFF) - + AAC_NUM_MGT_FIB; + + dev->max_num_aif = status[4] & 0xFFFF; + } + if (numacb > 0) { + if (numacb < host->can_queue) + host->can_queue = numacb; + else + pr_warn("numacb=%d ignored\n", numacb); + } + + if (aac_is_src(dev)) + aac_define_int_mode(dev); + /* + * Ok now init the communication subsystem + */ + + dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL); + if (dev->queues == NULL) { + printk(KERN_ERR "Error could not allocate comm region.\n"); + return NULL; + } + + if (aac_comm_init(dev)<0){ + kfree(dev->queues); + return NULL; + } + /* + * Initialize the list of fibs + */ + if (aac_fib_setup(dev) < 0) { + kfree(dev->queues); + return NULL; + } + + INIT_LIST_HEAD(&dev->fib_list); + INIT_LIST_HEAD(&dev->sync_fib_list); + + return dev; +} + diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c new file mode 100644 index 000000000..25cee03d7 --- /dev/null +++ b/drivers/scsi/aacraid/commsup.c @@ -0,0 +1,2582 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * commsup.c + * + * Abstract: Contain all routines that are required for FSA host/adapter + * communication. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aacraid.h" + +/** + * fib_map_alloc - allocate the fib objects + * @dev: Adapter to allocate for + * + * Allocate and map the shared PCI space for the FIB blocks used to + * talk to the Adaptec firmware. + */ + +static int fib_map_alloc(struct aac_dev *dev) +{ + if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE) + dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; + else + dev->max_cmd_size = dev->max_fib_size; + if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) { + dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; + } else { + dev->max_cmd_size = dev->max_fib_size; + } + + dprintk((KERN_INFO + "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n", + &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue, + AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); + dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev, + (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) + * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), + &dev->hw_fib_pa, GFP_KERNEL); + if (dev->hw_fib_va == NULL) + return -ENOMEM; + return 0; +} + +/** + * aac_fib_map_free - free the fib objects + * @dev: Adapter to free + * + * Free the PCI mappings and the memory allocated for FIB blocks + * on this adapter. + */ + +void aac_fib_map_free(struct aac_dev *dev) +{ + size_t alloc_size; + size_t fib_size; + int num_fibs; + + if(!dev->hw_fib_va || !dev->max_cmd_size) + return; + + num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; + fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr); + alloc_size = fib_size * num_fibs + ALIGN32 - 1; + + dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va, + dev->hw_fib_pa); + + dev->hw_fib_va = NULL; + dev->hw_fib_pa = 0; +} + +void aac_fib_vector_assign(struct aac_dev *dev) +{ + u32 i = 0; + u32 vector = 1; + struct fib *fibptr = NULL; + + for (i = 0, fibptr = &dev->fibs[i]; + i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); + i++, fibptr++) { + if ((dev->max_msix == 1) || + (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1) + - dev->vector_cap))) { + fibptr->vector_no = 0; + } else { + fibptr->vector_no = vector; + vector++; + if (vector == dev->max_msix) + vector = 1; + } + } +} + +/** + * aac_fib_setup - setup the fibs + * @dev: Adapter to set up + * + * Allocate the PCI space for the fibs, map it and then initialise the + * fib area, the unmapped fib data and also the free list + */ + +int aac_fib_setup(struct aac_dev * dev) +{ + struct fib *fibptr; + struct hw_fib *hw_fib; + dma_addr_t hw_fib_pa; + int i; + u32 max_cmds; + + while (((i = fib_map_alloc(dev)) == -ENOMEM) + && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { + max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1; + dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB; + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) + dev->init->r7.max_io_commands = cpu_to_le32(max_cmds); + } + if (i<0) + return -ENOMEM; + + memset(dev->hw_fib_va, 0, + (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); + + /* 32 byte alignment for PMC */ + hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); + hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + + (hw_fib_pa - dev->hw_fib_pa)); + + /* add Xport header */ + hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + + sizeof(struct aac_fib_xporthdr)); + hw_fib_pa += sizeof(struct aac_fib_xporthdr); + + /* + * Initialise the fibs + */ + for (i = 0, fibptr = &dev->fibs[i]; + i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); + i++, fibptr++) + { + fibptr->flags = 0; + fibptr->size = sizeof(struct fib); + fibptr->dev = dev; + fibptr->hw_fib_va = hw_fib; + fibptr->data = (void *) fibptr->hw_fib_va->data; + fibptr->next = fibptr+1; /* Forward chain the fibs */ + init_completion(&fibptr->event_wait); + spin_lock_init(&fibptr->event_lock); + hw_fib->header.XferState = cpu_to_le32(0xffffffff); + hw_fib->header.SenderSize = + cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */ + fibptr->hw_fib_pa = hw_fib_pa; + fibptr->hw_sgl_pa = hw_fib_pa + + offsetof(struct aac_hba_cmd_req, sge[2]); + /* + * one element is for the ptr to the separate sg list, + * second element for 32 byte alignment + */ + fibptr->hw_error_pa = hw_fib_pa + + offsetof(struct aac_native_hba, resp.resp_bytes[0]); + + hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + + dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)); + hw_fib_pa = hw_fib_pa + + dev->max_cmd_size + sizeof(struct aac_fib_xporthdr); + } + + /* + *Assign vector numbers to fibs + */ + aac_fib_vector_assign(dev); + + /* + * Add the fib chain to the free list + */ + dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; + /* + * Set 8 fibs aside for management tools + */ + dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue]; + return 0; +} + +/** + * aac_fib_alloc_tag-allocate a fib using tags + * @dev: Adapter to allocate the fib for + * @scmd: SCSI command + * + * Allocate a fib from the adapter fib pool using tags + * from the blk layer. + */ + +struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) +{ + struct fib *fibptr; + + fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag]; + /* + * Null out fields that depend on being zero at the start of + * each I/O + */ + fibptr->hw_fib_va->header.XferState = 0; + fibptr->type = FSAFS_NTC_FIB_CONTEXT; + fibptr->callback_data = NULL; + fibptr->callback = NULL; + fibptr->flags = 0; + + return fibptr; +} + +/** + * aac_fib_alloc - allocate a fib + * @dev: Adapter to allocate the fib for + * + * Allocate a fib from the adapter fib pool. If the pool is empty we + * return NULL. + */ + +struct fib *aac_fib_alloc(struct aac_dev *dev) +{ + struct fib * fibptr; + unsigned long flags; + spin_lock_irqsave(&dev->fib_lock, flags); + fibptr = dev->free_fib; + if(!fibptr){ + spin_unlock_irqrestore(&dev->fib_lock, flags); + return fibptr; + } + dev->free_fib = fibptr->next; + spin_unlock_irqrestore(&dev->fib_lock, flags); + /* + * Set the proper node type code and node byte size + */ + fibptr->type = FSAFS_NTC_FIB_CONTEXT; + fibptr->size = sizeof(struct fib); + /* + * Null out fields that depend on being zero at the start of + * each I/O + */ + fibptr->hw_fib_va->header.XferState = 0; + fibptr->flags = 0; + fibptr->callback = NULL; + fibptr->callback_data = NULL; + + return fibptr; +} + +/** + * aac_fib_free - free a fib + * @fibptr: fib to free up + * + * Frees up a fib and places it on the appropriate queue + */ + +void aac_fib_free(struct fib *fibptr) +{ + unsigned long flags; + + if (fibptr->done == 2) + return; + + spin_lock_irqsave(&fibptr->dev->fib_lock, flags); + if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) + aac_config.fib_timeouts++; + if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && + fibptr->hw_fib_va->header.XferState != 0) { + printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", + (void*)fibptr, + le32_to_cpu(fibptr->hw_fib_va->header.XferState)); + } + fibptr->next = fibptr->dev->free_fib; + fibptr->dev->free_fib = fibptr; + spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); +} + +/** + * aac_fib_init - initialise a fib + * @fibptr: The fib to initialize + * + * Set up the generic fib fields ready for use + */ + +void aac_fib_init(struct fib *fibptr) +{ + struct hw_fib *hw_fib = fibptr->hw_fib_va; + + memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr)); + hw_fib->header.StructType = FIB_MAGIC; + hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); + hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); + hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); + hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); +} + +/** + * fib_dealloc - deallocate a fib + * @fibptr: fib to deallocate + * + * Will deallocate and return to the free pool the FIB pointed to by the + * caller. + */ + +static void fib_dealloc(struct fib * fibptr) +{ + struct hw_fib *hw_fib = fibptr->hw_fib_va; + hw_fib->header.XferState = 0; +} + +/* + * Commuication primitives define and support the queuing method we use to + * support host to adapter commuication. All queue accesses happen through + * these routines and are the only routines which have a knowledge of the + * how these queues are implemented. + */ + +/** + * aac_get_entry - get a queue entry + * @dev: Adapter + * @qid: Queue Number + * @entry: Entry return + * @index: Index return + * @nonotify: notification control + * + * With a priority the routine returns a queue entry if the queue has free entries. If the queue + * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is + * returned. + */ + +static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) +{ + struct aac_queue * q; + unsigned long idx; + + /* + * All of the queues wrap when they reach the end, so we check + * to see if they have reached the end and if they have we just + * set the index back to zero. This is a wrap. You could or off + * the high bits in all updates but this is a bit faster I think. + */ + + q = &dev->queues->queue[qid]; + + idx = *index = le32_to_cpu(*(q->headers.producer)); + /* Interrupt Moderation, only interrupt for first two entries */ + if (idx != le32_to_cpu(*(q->headers.consumer))) { + if (--idx == 0) { + if (qid == AdapNormCmdQueue) + idx = ADAP_NORM_CMD_ENTRIES; + else + idx = ADAP_NORM_RESP_ENTRIES; + } + if (idx != le32_to_cpu(*(q->headers.consumer))) + *nonotify = 1; + } + + if (qid == AdapNormCmdQueue) { + if (*index >= ADAP_NORM_CMD_ENTRIES) + *index = 0; /* Wrap to front of the Producer Queue. */ + } else { + if (*index >= ADAP_NORM_RESP_ENTRIES) + *index = 0; /* Wrap to front of the Producer Queue. */ + } + + /* Queue is full */ + if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { + printk(KERN_WARNING "Queue %d full, %u outstanding.\n", + qid, atomic_read(&q->numpending)); + return 0; + } else { + *entry = q->base + *index; + return 1; + } +} + +/** + * aac_queue_get - get the next free QE + * @dev: Adapter + * @index: Returned index + * @qid: Queue number + * @hw_fib: Fib to associate with the queue entry + * @wait: Wait if queue full + * @fibptr: Driver fib object to go with fib + * @nonotify: Don't notify the adapter + * + * Gets the next free QE off the requested priorty adapter command + * queue and associates the Fib with the QE. The QE represented by + * index is ready to insert on the queue when this routine returns + * success. + */ + +int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) +{ + struct aac_entry * entry = NULL; + int map = 0; + + if (qid == AdapNormCmdQueue) { + /* if no entries wait for some if caller wants to */ + while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { + printk(KERN_ERR "GetEntries failed\n"); + } + /* + * Setup queue entry with a command, status and fib mapped + */ + entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); + map = 1; + } else { + while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { + /* if no entries wait for some if caller wants to */ + } + /* + * Setup queue entry with command, status and fib mapped + */ + entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); + entry->addr = hw_fib->header.SenderFibAddress; + /* Restore adapters pointer to the FIB */ + hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ + map = 0; + } + /* + * If MapFib is true than we need to map the Fib and put pointers + * in the queue entry. + */ + if (map) + entry->addr = cpu_to_le32(fibptr->hw_fib_pa); + return 0; +} + +/* + * Define the highest level of host to adapter communication routines. + * These routines will support host to adapter FS commuication. These + * routines have no knowledge of the commuication method used. This level + * sends and receives FIBs. This level has no knowledge of how these FIBs + * get passed back and forth. + */ + +/** + * aac_fib_send - send a fib to the adapter + * @command: Command to send + * @fibptr: The fib + * @size: Size of fib data area + * @priority: Priority of Fib + * @wait: Async/sync select + * @reply: True if a reply is wanted + * @callback: Called with reply + * @callback_data: Passed to callback + * + * Sends the requested FIB to the adapter and optionally will wait for a + * response FIB. If the caller does not wish to wait for a response than + * an event to wait on must be supplied. This event will be set when a + * response FIB is received from the adapter. + */ + +int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, + int priority, int wait, int reply, fib_callback callback, + void *callback_data) +{ + struct aac_dev * dev = fibptr->dev; + struct hw_fib * hw_fib = fibptr->hw_fib_va; + unsigned long flags = 0; + unsigned long mflags = 0; + unsigned long sflags = 0; + + if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) + return -EBUSY; + + if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)) + return -EINVAL; + + /* + * There are 5 cases with the wait and response requested flags. + * The only invalid cases are if the caller requests to wait and + * does not request a response and if the caller does not want a + * response and the Fib is not allocated from pool. If a response + * is not requested the Fib will just be deallocaed by the DPC + * routine when the response comes back from the adapter. No + * further processing will be done besides deleting the Fib. We + * will have a debug mode where the adapter can notify the host + * it had a problem and the host can log that fact. + */ + fibptr->flags = 0; + if (wait && !reply) { + return -EINVAL; + } else if (!wait && reply) { + hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); + FIB_COUNTER_INCREMENT(aac_config.AsyncSent); + } else if (!wait && !reply) { + hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); + FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); + } else if (wait && reply) { + hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); + FIB_COUNTER_INCREMENT(aac_config.NormalSent); + } + /* + * Map the fib into 32bits by using the fib number + */ + + hw_fib->header.SenderFibAddress = + cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); + + /* use the same shifted value for handle to be compatible + * with the new native hba command handle + */ + hw_fib->header.Handle = + cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); + + /* + * Set FIB state to indicate where it came from and if we want a + * response from the adapter. Also load the command from the + * caller. + * + * Map the hw fib pointer as a 32bit value + */ + hw_fib->header.Command = cpu_to_le16(command); + hw_fib->header.XferState |= cpu_to_le32(SentFromHost); + /* + * Set the size of the Fib we want to send to the adapter + */ + hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); + if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { + return -EMSGSIZE; + } + /* + * Get a queue entry connect the FIB to it and send an notify + * the adapter a command is ready. + */ + hw_fib->header.XferState |= cpu_to_le32(NormalPriority); + + /* + * Fill in the Callback and CallbackContext if we are not + * going to wait. + */ + if (!wait) { + fibptr->callback = callback; + fibptr->callback_data = callback_data; + fibptr->flags = FIB_CONTEXT_FLAG; + } + + fibptr->done = 0; + + FIB_COUNTER_INCREMENT(aac_config.FibsSent); + + dprintk((KERN_DEBUG "Fib contents:.\n")); + dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); + dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); + dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); + dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va)); + dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); + dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); + + if (!dev->queues) + return -EBUSY; + + if (wait) { + + spin_lock_irqsave(&dev->manage_lock, mflags); + if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { + printk(KERN_INFO "No management Fibs Available:%d\n", + dev->management_fib_count); + spin_unlock_irqrestore(&dev->manage_lock, mflags); + return -EBUSY; + } + dev->management_fib_count++; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + spin_lock_irqsave(&fibptr->event_lock, flags); + } + + if (dev->sync_mode) { + if (wait) + spin_unlock_irqrestore(&fibptr->event_lock, flags); + spin_lock_irqsave(&dev->sync_lock, sflags); + if (dev->sync_fib) { + list_add_tail(&fibptr->fiblink, &dev->sync_fib_list); + spin_unlock_irqrestore(&dev->sync_lock, sflags); + } else { + dev->sync_fib = fibptr; + spin_unlock_irqrestore(&dev->sync_lock, sflags); + aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, + (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); + } + if (wait) { + fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; + if (wait_for_completion_interruptible(&fibptr->event_wait)) { + fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT; + return -EFAULT; + } + return 0; + } + return -EINPROGRESS; + } + + if (aac_adapter_deliver(fibptr) != 0) { + printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); + if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + } + return -EBUSY; + } + + + /* + * If the caller wanted us to wait for response wait now. + */ + + if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + /* Only set for first known interruptable command */ + if (wait < 0) { + /* + * *VERY* Dangerous to time out a command, the + * assumption is made that we have no hope of + * functioning because an interrupt routing or other + * hardware failure has occurred. + */ + unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */ + while (!try_wait_for_completion(&fibptr->event_wait)) { + int blink; + if (time_is_before_eq_jiffies(timeout)) { + struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; + atomic_dec(&q->numpending); + if (wait == -1) { + printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" + "Usually a result of a PCI interrupt routing problem;\n" + "update mother board BIOS or consider utilizing one of\n" + "the SAFE mode kernel options (acpi, apic etc)\n"); + } + return -ETIMEDOUT; + } + + if (unlikely(aac_pci_offline(dev))) + return -EFAULT; + + if ((blink = aac_adapter_check_health(dev)) > 0) { + if (wait == -1) { + printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n" + "Usually a result of a serious unrecoverable hardware problem\n", + blink); + } + return -EFAULT; + } + /* + * Allow other processes / CPUS to use core + */ + schedule(); + } + } else if (wait_for_completion_interruptible(&fibptr->event_wait)) { + /* Do nothing ... satisfy + * wait_for_completion_interruptible must_check */ + } + + spin_lock_irqsave(&fibptr->event_lock, flags); + if (fibptr->done == 0) { + fibptr->done = 2; /* Tell interrupt we aborted */ + spin_unlock_irqrestore(&fibptr->event_lock, flags); + return -ERESTARTSYS; + } + spin_unlock_irqrestore(&fibptr->event_lock, flags); + BUG_ON(fibptr->done == 0); + + if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) + return -ETIMEDOUT; + return 0; + } + /* + * If the user does not want a response than return success otherwise + * return pending + */ + if (reply) + return -EINPROGRESS; + else + return 0; +} + +int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, + void *callback_data) +{ + struct aac_dev *dev = fibptr->dev; + int wait; + unsigned long flags = 0; + unsigned long mflags = 0; + struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *) + fibptr->hw_fib_va; + + fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); + if (callback) { + wait = 0; + fibptr->callback = callback; + fibptr->callback_data = callback_data; + } else + wait = 1; + + + hbacmd->iu_type = command; + + if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { + /* bit1 of request_id must be 0 */ + hbacmd->request_id = + cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); + fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; + } else + return -EINVAL; + + + if (wait) { + spin_lock_irqsave(&dev->manage_lock, mflags); + if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { + spin_unlock_irqrestore(&dev->manage_lock, mflags); + return -EBUSY; + } + dev->management_fib_count++; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + spin_lock_irqsave(&fibptr->event_lock, flags); + } + + if (aac_adapter_deliver(fibptr) != 0) { + if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + } + return -EBUSY; + } + FIB_COUNTER_INCREMENT(aac_config.NativeSent); + + if (wait) { + + spin_unlock_irqrestore(&fibptr->event_lock, flags); + + if (unlikely(aac_pci_offline(dev))) + return -EFAULT; + + fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; + if (wait_for_completion_interruptible(&fibptr->event_wait)) + fibptr->done = 2; + fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT); + + spin_lock_irqsave(&fibptr->event_lock, flags); + if ((fibptr->done == 0) || (fibptr->done == 2)) { + fibptr->done = 2; /* Tell interrupt we aborted */ + spin_unlock_irqrestore(&fibptr->event_lock, flags); + return -ERESTARTSYS; + } + spin_unlock_irqrestore(&fibptr->event_lock, flags); + WARN_ON(fibptr->done == 0); + + if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) + return -ETIMEDOUT; + + return 0; + } + + return -EINPROGRESS; +} + +/** + * aac_consumer_get - get the top of the queue + * @dev: Adapter + * @q: Queue + * @entry: Return entry + * + * Will return a pointer to the entry on the top of the queue requested that + * we are a consumer of, and return the address of the queue entry. It does + * not change the state of the queue. + */ + +int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) +{ + u32 index; + int status; + if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { + status = 0; + } else { + /* + * The consumer index must be wrapped if we have reached + * the end of the queue, else we just use the entry + * pointed to by the header index + */ + if (le32_to_cpu(*q->headers.consumer) >= q->entries) + index = 0; + else + index = le32_to_cpu(*q->headers.consumer); + *entry = q->base + index; + status = 1; + } + return(status); +} + +/** + * aac_consumer_free - free consumer entry + * @dev: Adapter + * @q: Queue + * @qid: Queue ident + * + * Frees up the current top of the queue we are a consumer of. If the + * queue was full notify the producer that the queue is no longer full. + */ + +void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) +{ + int wasfull = 0; + u32 notify; + + if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) + wasfull = 1; + + if (le32_to_cpu(*q->headers.consumer) >= q->entries) + *q->headers.consumer = cpu_to_le32(1); + else + le32_add_cpu(q->headers.consumer, 1); + + if (wasfull) { + switch (qid) { + + case HostNormCmdQueue: + notify = HostNormCmdNotFull; + break; + case HostNormRespQueue: + notify = HostNormRespNotFull; + break; + default: + BUG(); + return; + } + aac_adapter_notify(dev, notify); + } +} + +/** + * aac_fib_adapter_complete - complete adapter issued fib + * @fibptr: fib to complete + * @size: size of fib + * + * Will do all necessary work to complete a FIB that was sent from + * the adapter. + */ + +int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) +{ + struct hw_fib * hw_fib = fibptr->hw_fib_va; + struct aac_dev * dev = fibptr->dev; + struct aac_queue * q; + unsigned long nointr = 0; + unsigned long qflags; + + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { + kfree(hw_fib); + return 0; + } + + if (hw_fib->header.XferState == 0) { + if (dev->comm_interface == AAC_COMM_MESSAGE) + kfree(hw_fib); + return 0; + } + /* + * If we plan to do anything check the structure type first. + */ + if (hw_fib->header.StructType != FIB_MAGIC && + hw_fib->header.StructType != FIB_MAGIC2 && + hw_fib->header.StructType != FIB_MAGIC2_64) { + if (dev->comm_interface == AAC_COMM_MESSAGE) + kfree(hw_fib); + return -EINVAL; + } + /* + * This block handles the case where the adapter had sent us a + * command and we have finished processing the command. We + * call completeFib when we are done processing the command + * and want to send a response back to the adapter. This will + * send the completed cdb to the adapter. + */ + if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { + if (dev->comm_interface == AAC_COMM_MESSAGE) { + kfree (hw_fib); + } else { + u32 index; + hw_fib->header.XferState |= cpu_to_le32(HostProcessed); + if (size) { + size += sizeof(struct aac_fibhdr); + if (size > le16_to_cpu(hw_fib->header.SenderSize)) + return -EMSGSIZE; + hw_fib->header.Size = cpu_to_le16(size); + } + q = &dev->queues->queue[AdapNormRespQueue]; + spin_lock_irqsave(q->lock, qflags); + aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); + *(q->headers.producer) = cpu_to_le32(index + 1); + spin_unlock_irqrestore(q->lock, qflags); + if (!(nointr & (int)aac_config.irq_mod)) + aac_adapter_notify(dev, AdapNormRespQueue); + } + } else { + printk(KERN_WARNING "aac_fib_adapter_complete: " + "Unknown xferstate detected.\n"); + BUG(); + } + return 0; +} + +/** + * aac_fib_complete - fib completion handler + * @fibptr: FIB to complete + * + * Will do all necessary work to complete a FIB. + */ + +int aac_fib_complete(struct fib *fibptr) +{ + struct hw_fib * hw_fib = fibptr->hw_fib_va; + + if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { + fib_dealloc(fibptr); + return 0; + } + + /* + * Check for a fib which has already been completed or with a + * status wait timeout + */ + + if (hw_fib->header.XferState == 0 || fibptr->done == 2) + return 0; + /* + * If we plan to do anything check the structure type first. + */ + + if (hw_fib->header.StructType != FIB_MAGIC && + hw_fib->header.StructType != FIB_MAGIC2 && + hw_fib->header.StructType != FIB_MAGIC2_64) + return -EINVAL; + /* + * This block completes a cdb which orginated on the host and we + * just need to deallocate the cdb or reinit it. At this point the + * command is complete that we had sent to the adapter and this + * cdb could be reused. + */ + + if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && + (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) + { + fib_dealloc(fibptr); + } + else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) + { + /* + * This handles the case when the host has aborted the I/O + * to the adapter because the adapter is not responding + */ + fib_dealloc(fibptr); + } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { + fib_dealloc(fibptr); + } else { + BUG(); + } + return 0; +} + +/** + * aac_printf - handle printf from firmware + * @dev: Adapter + * @val: Message info + * + * Print a message passed to us by the controller firmware on the + * Adaptec board + */ + +void aac_printf(struct aac_dev *dev, u32 val) +{ + char *cp = dev->printfbuf; + if (dev->printf_enabled) + { + int length = val & 0xffff; + int level = (val >> 16) & 0xffff; + + /* + * The size of the printfbuf is set in port.c + * There is no variable or define for it + */ + if (length > 255) + length = 255; + if (cp[length] != 0) + cp[length] = 0; + if (level == LOG_AAC_HIGH_ERROR) + printk(KERN_WARNING "%s:%s", dev->name, cp); + else + printk(KERN_INFO "%s:%s", dev->name, cp); + } + memset(cp, 0, 256); +} + +static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index) +{ + return le32_to_cpu(((__le32 *)aifcmd->data)[index]); +} + + +static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd) +{ + switch (aac_aif_data(aifcmd, 1)) { + case AifBuCacheDataLoss: + if (aac_aif_data(aifcmd, 2)) + dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n", + aac_aif_data(aifcmd, 2)); + else + dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n"); + break; + case AifBuCacheDataRecover: + if (aac_aif_data(aifcmd, 2)) + dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n", + aac_aif_data(aifcmd, 2)); + else + dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n"); + break; + } +} + +#define AIF_SNIFF_TIMEOUT (500*HZ) +/** + * aac_handle_aif - Handle a message from the firmware + * @dev: Which adapter this fib is from + * @fibptr: Pointer to fibptr from adapter + * + * This routine handles a driver notify fib from the adapter and + * dispatches it to the appropriate routine for handling. + */ +static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) +{ + struct hw_fib * hw_fib = fibptr->hw_fib_va; + struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; + u32 channel, id, lun, container; + struct scsi_device *device; + enum { + NOTHING, + DELETE, + ADD, + CHANGE + } device_config_needed = NOTHING; + + /* Sniff for container changes */ + + if (!dev || !dev->fsa_dev) + return; + container = channel = id = lun = (u32)-1; + + /* + * We have set this up to try and minimize the number of + * re-configures that take place. As a result of this when + * certain AIF's come in we will set a flag waiting for another + * type of AIF before setting the re-config flag. + */ + switch (le32_to_cpu(aifcmd->command)) { + case AifCmdDriverNotify: + switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { + case AifRawDeviceRemove: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if ((container >> 28)) { + container = (u32)-1; + break; + } + channel = (container >> 24) & 0xF; + if (channel >= dev->maximum_num_channels) { + container = (u32)-1; + break; + } + id = container & 0xFFFF; + if (id >= dev->maximum_num_physicals) { + container = (u32)-1; + break; + } + lun = (container >> 16) & 0xFF; + container = (u32)-1; + channel = aac_phys_to_logical(channel); + device_config_needed = DELETE; + break; + + /* + * Morph or Expand complete + */ + case AifDenMorphComplete: + case AifDenVolumeExtendComplete: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + + /* + * Find the scsi_device associated with the SCSI + * address. Make sure we have the right array, and if + * so set the flag to initiate a new re-config once we + * see an AifEnConfigChange AIF come through. + */ + + if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { + device = scsi_device_lookup(dev->scsi_host_ptr, + CONTAINER_TO_CHANNEL(container), + CONTAINER_TO_ID(container), + CONTAINER_TO_LUN(container)); + if (device) { + dev->fsa_dev[container].config_needed = CHANGE; + dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; + dev->fsa_dev[container].config_waiting_stamp = jiffies; + scsi_device_put(device); + } + } + } + + /* + * If we are waiting on something and this happens to be + * that thing then set the re-configure flag. + */ + if (container != (u32)-1) { + if (container >= dev->maximum_num_containers) + break; + if ((dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(__le32 *)aifcmd->data)) && + time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) + dev->fsa_dev[container].config_waiting_on = 0; + } else for (container = 0; + container < dev->maximum_num_containers; ++container) { + if ((dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(__le32 *)aifcmd->data)) && + time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) + dev->fsa_dev[container].config_waiting_on = 0; + } + break; + + case AifCmdEventNotify: + switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { + case AifEnBatteryEvent: + dev->cache_protected = + (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3)); + break; + /* + * Add an Array. + */ + case AifEnAddContainer: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + dev->fsa_dev[container].config_needed = ADD; + dev->fsa_dev[container].config_waiting_on = + AifEnConfigChange; + dev->fsa_dev[container].config_waiting_stamp = jiffies; + break; + + /* + * Delete an Array. + */ + case AifEnDeleteContainer: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + dev->fsa_dev[container].config_needed = DELETE; + dev->fsa_dev[container].config_waiting_on = + AifEnConfigChange; + dev->fsa_dev[container].config_waiting_stamp = jiffies; + break; + + /* + * Container change detected. If we currently are not + * waiting on something else, setup to wait on a Config Change. + */ + case AifEnContainerChange: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + if (dev->fsa_dev[container].config_waiting_on && + time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) + break; + dev->fsa_dev[container].config_needed = CHANGE; + dev->fsa_dev[container].config_waiting_on = + AifEnConfigChange; + dev->fsa_dev[container].config_waiting_stamp = jiffies; + break; + + case AifEnConfigChange: + break; + + case AifEnAddJBOD: + case AifEnDeleteJBOD: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if ((container >> 28)) { + container = (u32)-1; + break; + } + channel = (container >> 24) & 0xF; + if (channel >= dev->maximum_num_channels) { + container = (u32)-1; + break; + } + id = container & 0xFFFF; + if (id >= dev->maximum_num_physicals) { + container = (u32)-1; + break; + } + lun = (container >> 16) & 0xFF; + container = (u32)-1; + channel = aac_phys_to_logical(channel); + device_config_needed = + (((__le32 *)aifcmd->data)[0] == + cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE; + if (device_config_needed == ADD) { + device = scsi_device_lookup(dev->scsi_host_ptr, + channel, + id, + lun); + if (device) { + scsi_remove_device(device); + scsi_device_put(device); + } + } + break; + + case AifEnEnclosureManagement: + /* + * If in JBOD mode, automatic exposure of new + * physical target to be suppressed until configured. + */ + if (dev->jbod) + break; + switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) { + case EM_DRIVE_INSERTION: + case EM_DRIVE_REMOVAL: + case EM_SES_DRIVE_INSERTION: + case EM_SES_DRIVE_REMOVAL: + container = le32_to_cpu( + ((__le32 *)aifcmd->data)[2]); + if ((container >> 28)) { + container = (u32)-1; + break; + } + channel = (container >> 24) & 0xF; + if (channel >= dev->maximum_num_channels) { + container = (u32)-1; + break; + } + id = container & 0xFFFF; + lun = (container >> 16) & 0xFF; + container = (u32)-1; + if (id >= dev->maximum_num_physicals) { + /* legacy dev_t ? */ + if ((0x2000 <= id) || lun || channel || + ((channel = (id >> 7) & 0x3F) >= + dev->maximum_num_channels)) + break; + lun = (id >> 4) & 7; + id &= 0xF; + } + channel = aac_phys_to_logical(channel); + device_config_needed = + ((((__le32 *)aifcmd->data)[3] + == cpu_to_le32(EM_DRIVE_INSERTION)) || + (((__le32 *)aifcmd->data)[3] + == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ? + ADD : DELETE; + break; + } + break; + case AifBuManagerEvent: + aac_handle_aif_bu(dev, aifcmd); + break; + } + + /* + * If we are waiting on something and this happens to be + * that thing then set the re-configure flag. + */ + if (container != (u32)-1) { + if (container >= dev->maximum_num_containers) + break; + if ((dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(__le32 *)aifcmd->data)) && + time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) + dev->fsa_dev[container].config_waiting_on = 0; + } else for (container = 0; + container < dev->maximum_num_containers; ++container) { + if ((dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(__le32 *)aifcmd->data)) && + time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) + dev->fsa_dev[container].config_waiting_on = 0; + } + break; + + case AifCmdJobProgress: + /* + * These are job progress AIF's. When a Clear is being + * done on a container it is initially created then hidden from + * the OS. When the clear completes we don't get a config + * change so we monitor the job status complete on a clear then + * wait for a container change. + */ + + if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && + (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] || + ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) { + for (container = 0; + container < dev->maximum_num_containers; + ++container) { + /* + * Stomp on all config sequencing for all + * containers? + */ + dev->fsa_dev[container].config_waiting_on = + AifEnContainerChange; + dev->fsa_dev[container].config_needed = ADD; + dev->fsa_dev[container].config_waiting_stamp = + jiffies; + } + } + if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && + ((__le32 *)aifcmd->data)[6] == 0 && + ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) { + for (container = 0; + container < dev->maximum_num_containers; + ++container) { + /* + * Stomp on all config sequencing for all + * containers? + */ + dev->fsa_dev[container].config_waiting_on = + AifEnContainerChange; + dev->fsa_dev[container].config_needed = DELETE; + dev->fsa_dev[container].config_waiting_stamp = + jiffies; + } + } + break; + } + + container = 0; +retry_next: + if (device_config_needed == NOTHING) { + for (; container < dev->maximum_num_containers; ++container) { + if ((dev->fsa_dev[container].config_waiting_on == 0) && + (dev->fsa_dev[container].config_needed != NOTHING) && + time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { + device_config_needed = + dev->fsa_dev[container].config_needed; + dev->fsa_dev[container].config_needed = NOTHING; + channel = CONTAINER_TO_CHANNEL(container); + id = CONTAINER_TO_ID(container); + lun = CONTAINER_TO_LUN(container); + break; + } + } + } + if (device_config_needed == NOTHING) + return; + + /* + * If we decided that a re-configuration needs to be done, + * schedule it here on the way out the door, please close the door + * behind you. + */ + + /* + * Find the scsi_device associated with the SCSI address, + * and mark it as changed, invalidating the cache. This deals + * with changes to existing device IDs. + */ + + if (!dev || !dev->scsi_host_ptr) + return; + /* + * force reload of disk info via aac_probe_container + */ + if ((channel == CONTAINER_CHANNEL) && + (device_config_needed != NOTHING)) { + if (dev->fsa_dev[container].valid == 1) + dev->fsa_dev[container].valid = 2; + aac_probe_container(dev, container); + } + device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun); + if (device) { + switch (device_config_needed) { + case DELETE: +#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE)) + scsi_remove_device(device); +#else + if (scsi_device_online(device)) { + scsi_device_set_state(device, SDEV_OFFLINE); + sdev_printk(KERN_INFO, device, + "Device offlined - %s\n", + (channel == CONTAINER_CHANNEL) ? + "array deleted" : + "enclosure services event"); + } +#endif + break; + case ADD: + if (!scsi_device_online(device)) { + sdev_printk(KERN_INFO, device, + "Device online - %s\n", + (channel == CONTAINER_CHANNEL) ? + "array created" : + "enclosure services event"); + scsi_device_set_state(device, SDEV_RUNNING); + } + fallthrough; + case CHANGE: + if ((channel == CONTAINER_CHANNEL) + && (!dev->fsa_dev[container].valid)) { +#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE)) + scsi_remove_device(device); +#else + if (!scsi_device_online(device)) + break; + scsi_device_set_state(device, SDEV_OFFLINE); + sdev_printk(KERN_INFO, device, + "Device offlined - %s\n", + "array failed"); +#endif + break; + } + scsi_rescan_device(device); + break; + + default: + break; + } + scsi_device_put(device); + device_config_needed = NOTHING; + } + if (device_config_needed == ADD) + scsi_add_device(dev->scsi_host_ptr, channel, id, lun); + if (channel == CONTAINER_CHANNEL) { + container++; + device_config_needed = NOTHING; + goto retry_next; + } +} + +static void aac_schedule_bus_scan(struct aac_dev *aac) +{ + if (aac->sa_firmware) + aac_schedule_safw_scan_worker(aac); + else + aac_schedule_src_reinit_aif_worker(aac); +} + +static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) +{ + int index, quirks; + int retval; + struct Scsi_Host *host = aac->scsi_host_ptr; + int jafo = 0; + int bled; + u64 dmamask; + int num_of_fibs = 0; + + /* + * Assumptions: + * - host is locked, unless called by the aacraid thread. + * (a matter of convenience, due to legacy issues surrounding + * eh_host_adapter_reset). + * - in_reset is asserted, so no new i/o is getting to the + * card. + * - The card is dead, or will be very shortly ;-/ so no new + * commands are completing in the interrupt service. + */ + aac_adapter_disable_int(aac); + if (aac->thread && aac->thread->pid != current->pid) { + spin_unlock_irq(host->host_lock); + kthread_stop(aac->thread); + aac->thread = NULL; + jafo = 1; + } + + /* + * If a positive health, means in a known DEAD PANIC + * state and the adapter could be reset to `try again'. + */ + bled = forced ? 0 : aac_adapter_check_health(aac); + retval = aac_adapter_restart(aac, bled, reset_type); + + if (retval) + goto out; + + /* + * Loop through the fibs, close the synchronous FIBS + */ + retval = 1; + num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; + for (index = 0; index < num_of_fibs; index++) { + + struct fib *fib = &aac->fibs[index]; + __le32 XferState = fib->hw_fib_va->header.XferState; + bool is_response_expected = false; + + if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) && + (XferState & cpu_to_le32(ResponseExpected))) + is_response_expected = true; + + if (is_response_expected + || fib->flags & FIB_CONTEXT_FLAG_WAIT) { + unsigned long flagv; + spin_lock_irqsave(&fib->event_lock, flagv); + complete(&fib->event_wait); + spin_unlock_irqrestore(&fib->event_lock, flagv); + schedule(); + retval = 0; + } + } + /* Give some extra time for ioctls to complete. */ + if (retval == 0) + ssleep(2); + index = aac->cardtype; + + /* + * Re-initialize the adapter, first free resources, then carefully + * apply the initialization sequence to come back again. Only risk + * is a change in Firmware dropping cache, it is assumed the caller + * will ensure that i/o is queisced and the card is flushed in that + * case. + */ + aac_free_irq(aac); + aac_fib_map_free(aac); + dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, + aac->comm_phys); + aac_adapter_ioremap(aac, 0); + aac->comm_addr = NULL; + aac->comm_phys = 0; + kfree(aac->queues); + aac->queues = NULL; + kfree(aac->fsa_dev); + aac->fsa_dev = NULL; + + dmamask = DMA_BIT_MASK(32); + quirks = aac_get_driver_ident(index)->quirks; + if (quirks & AAC_QUIRK_31BIT) + retval = dma_set_mask(&aac->pdev->dev, dmamask); + else if (!(quirks & AAC_QUIRK_SRC)) + retval = dma_set_mask(&aac->pdev->dev, dmamask); + else + retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask); + + if (quirks & AAC_QUIRK_31BIT && !retval) { + dmamask = DMA_BIT_MASK(31); + retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask); + } + + if (retval) + goto out; + + if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) + goto out; + + if (jafo) { + aac->thread = kthread_run(aac_command_thread, aac, "%s", + aac->name); + if (IS_ERR(aac->thread)) { + retval = PTR_ERR(aac->thread); + aac->thread = NULL; + goto out; + } + } + (void)aac_get_adapter_info(aac); + if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { + host->sg_tablesize = 34; + host->max_sectors = (host->sg_tablesize * 8) + 112; + } + if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { + host->sg_tablesize = 17; + host->max_sectors = (host->sg_tablesize * 8) + 112; + } + aac_get_config_status(aac, 1); + aac_get_containers(aac); + /* + * This is where the assumption that the Adapter is quiesced + * is important. + */ + scsi_host_complete_all_commands(host, DID_RESET); + + retval = 0; +out: + aac->in_reset = 0; + + /* + * Issue bus rescan to catch any configuration that might have + * occurred + */ + if (!retval && !is_kdump_kernel()) { + dev_info(&aac->pdev->dev, "Scheduling bus rescan\n"); + aac_schedule_bus_scan(aac); + } + + if (jafo) { + spin_lock_irq(host->host_lock); + } + return retval; +} + +int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) +{ + unsigned long flagv = 0; + int retval, unblock_retval; + struct Scsi_Host *host = aac->scsi_host_ptr; + int bled; + + if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) + return -EBUSY; + + if (aac->in_reset) { + spin_unlock_irqrestore(&aac->fib_lock, flagv); + return -EBUSY; + } + aac->in_reset = 1; + spin_unlock_irqrestore(&aac->fib_lock, flagv); + + /* + * Wait for all commands to complete to this specific + * target (block maximum 60 seconds). Although not necessary, + * it does make us a good storage citizen. + */ + scsi_host_block(host); + + /* Quiesce build, flush cache, write through mode */ + if (forced < 2) + aac_send_shutdown(aac); + spin_lock_irqsave(host->host_lock, flagv); + bled = forced ? forced : + (aac_check_reset != 0 && aac_check_reset != 1); + retval = _aac_reset_adapter(aac, bled, reset_type); + spin_unlock_irqrestore(host->host_lock, flagv); + + unblock_retval = scsi_host_unblock(host, SDEV_RUNNING); + if (!retval) + retval = unblock_retval; + if ((forced < 2) && (retval == -ENODEV)) { + /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */ + struct fib * fibctx = aac_fib_alloc(aac); + if (fibctx) { + struct aac_pause *cmd; + int status; + + aac_fib_init(fibctx); + + cmd = (struct aac_pause *) fib_data(fibctx); + + cmd->command = cpu_to_le32(VM_ContainerConfig); + cmd->type = cpu_to_le32(CT_PAUSE_IO); + cmd->timeout = cpu_to_le32(1); + cmd->min = cpu_to_le32(1); + cmd->noRescan = cpu_to_le32(1); + cmd->count = cpu_to_le32(0); + + status = aac_fib_send(ContainerCommand, + fibctx, + sizeof(struct aac_pause), + FsaNormal, + -2 /* Timeout silently */, 1, + NULL, NULL); + + if (status >= 0) + aac_fib_complete(fibctx); + /* FIB should be freed only after getting + * the response from the F/W */ + if (status != -ERESTARTSYS) + aac_fib_free(fibctx); + } + } + + return retval; +} + +int aac_check_health(struct aac_dev * aac) +{ + int BlinkLED; + unsigned long time_now, flagv = 0; + struct list_head * entry; + + /* Extending the scope of fib_lock slightly to protect aac->in_reset */ + if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) + return 0; + + if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { + spin_unlock_irqrestore(&aac->fib_lock, flagv); + return 0; /* OK */ + } + + aac->in_reset = 1; + + /* Fake up an AIF: + * aac_aifcmd.command = AifCmdEventNotify = 1 + * aac_aifcmd.seqnum = 0xFFFFFFFF + * aac_aifcmd.data[0] = AifEnExpEvent = 23 + * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 + * aac.aifcmd.data[2] = AifHighPriority = 3 + * aac.aifcmd.data[3] = BlinkLED + */ + + time_now = jiffies/HZ; + entry = aac->fib_list.next; + + /* + * For each Context that is on the + * fibctxList, make a copy of the + * fib, and then set the event to wake up the + * thread that is waiting for it. + */ + while (entry != &aac->fib_list) { + /* + * Extract the fibctx + */ + struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); + struct hw_fib * hw_fib; + struct fib * fib; + /* + * Check if the queue is getting + * backlogged + */ + if (fibctx->count > 20) { + /* + * It's *not* jiffies folks, + * but jiffies / HZ, so do not + * panic ... + */ + u32 time_last = fibctx->jiffies; + /* + * Has it been > 2 minutes + * since the last read off + * the queue? + */ + if ((time_now - time_last) > aif_timeout) { + entry = entry->next; + aac_close_fib_context(aac, fibctx); + continue; + } + } + /* + * Warning: no sleep allowed while + * holding spinlock + */ + hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC); + fib = kzalloc(sizeof(struct fib), GFP_ATOMIC); + if (fib && hw_fib) { + struct aac_aifcmd * aif; + + fib->hw_fib_va = hw_fib; + fib->dev = aac; + aac_fib_init(fib); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof (struct fib); + fib->data = hw_fib->data; + aif = (struct aac_aifcmd *)hw_fib->data; + aif->command = cpu_to_le32(AifCmdEventNotify); + aif->seqnum = cpu_to_le32(0xFFFFFFFF); + ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); + ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); + ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); + ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); + + /* + * Put the FIB onto the + * fibctx's fibs + */ + list_add_tail(&fib->fiblink, &fibctx->fib_list); + fibctx->count++; + /* + * Set the event to wake up the + * thread that will waiting. + */ + complete(&fibctx->completion); + } else { + printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); + kfree(fib); + kfree(hw_fib); + } + entry = entry->next; + } + + spin_unlock_irqrestore(&aac->fib_lock, flagv); + + if (BlinkLED < 0) { + printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", + aac->name, BlinkLED); + goto out; + } + + printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); + +out: + aac->in_reset = 0; + return BlinkLED; +} + +static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target) +{ + return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers; +} + +static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev, + int bus, + int target) +{ + if (bus != CONTAINER_CHANNEL) + bus = aac_phys_to_logical(bus); + + return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0); +} + +static int aac_add_safw_device(struct aac_dev *dev, int bus, int target) +{ + if (bus != CONTAINER_CHANNEL) + bus = aac_phys_to_logical(bus); + + return scsi_add_device(dev->scsi_host_ptr, bus, target, 0); +} + +static void aac_put_safw_scsi_device(struct scsi_device *sdev) +{ + if (sdev) + scsi_device_put(sdev); +} + +static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target) +{ + struct scsi_device *sdev; + + sdev = aac_lookup_safw_scsi_device(dev, bus, target); + scsi_remove_device(sdev); + aac_put_safw_scsi_device(sdev); +} + +static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev, + int bus, int target) +{ + return dev->hba_map[bus][target].scan_counter == dev->scan_counter; +} + +static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target) +{ + if (is_safw_raid_volume(dev, bus, target)) + return dev->fsa_dev[target].valid; + else + return aac_is_safw_scan_count_equal(dev, bus, target); +} + +static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target) +{ + int is_exposed = 0; + struct scsi_device *sdev; + + sdev = aac_lookup_safw_scsi_device(dev, bus, target); + if (sdev) + is_exposed = 1; + aac_put_safw_scsi_device(sdev); + + return is_exposed; +} + +static int aac_update_safw_host_devices(struct aac_dev *dev) +{ + int i; + int bus; + int target; + int is_exposed = 0; + int rcode = 0; + + rcode = aac_setup_safw_adapter(dev); + if (unlikely(rcode < 0)) { + goto out; + } + + for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) { + + bus = get_bus_number(i); + target = get_target_number(i); + + is_exposed = aac_is_safw_device_exposed(dev, bus, target); + + if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed) + aac_add_safw_device(dev, bus, target); + else if (!aac_is_safw_target_valid(dev, bus, target) && + is_exposed) + aac_remove_safw_device(dev, bus, target); + } +out: + return rcode; +} + +static int aac_scan_safw_host(struct aac_dev *dev) +{ + int rcode = 0; + + rcode = aac_update_safw_host_devices(dev); + if (rcode) + aac_schedule_safw_scan_worker(dev); + + return rcode; +} + +int aac_scan_host(struct aac_dev *dev) +{ + int rcode = 0; + + mutex_lock(&dev->scan_mutex); + if (dev->sa_firmware) + rcode = aac_scan_safw_host(dev); + else + scsi_scan_host(dev->scsi_host_ptr); + mutex_unlock(&dev->scan_mutex); + + return rcode; +} + +void aac_src_reinit_aif_worker(struct work_struct *work) +{ + struct aac_dev *dev = container_of(to_delayed_work(work), + struct aac_dev, src_reinit_aif_worker); + + wait_event(dev->scsi_host_ptr->host_wait, + !scsi_host_in_recovery(dev->scsi_host_ptr)); + aac_reinit_aif(dev, dev->cardtype); +} + +/** + * aac_handle_sa_aif - Handle a message from the firmware + * @dev: Which adapter this fib is from + * @fibptr: Pointer to fibptr from adapter + * + * This routine handles a driver notify fib from the adapter and + * dispatches it to the appropriate routine for handling. + */ +static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr) +{ + int i; + u32 events = 0; + + if (fibptr->hbacmd_size & SA_AIF_HOTPLUG) + events = SA_AIF_HOTPLUG; + else if (fibptr->hbacmd_size & SA_AIF_HARDWARE) + events = SA_AIF_HARDWARE; + else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE) + events = SA_AIF_PDEV_CHANGE; + else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE) + events = SA_AIF_LDEV_CHANGE; + else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE) + events = SA_AIF_BPSTAT_CHANGE; + else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE) + events = SA_AIF_BPCFG_CHANGE; + + switch (events) { + case SA_AIF_HOTPLUG: + case SA_AIF_HARDWARE: + case SA_AIF_PDEV_CHANGE: + case SA_AIF_LDEV_CHANGE: + case SA_AIF_BPCFG_CHANGE: + + aac_scan_host(dev); + + break; + + case SA_AIF_BPSTAT_CHANGE: + /* currently do nothing */ + break; + } + + for (i = 1; i <= 10; ++i) { + events = src_readl(dev, MUnit.IDR); + if (events & (1<<23)) { + pr_warn(" AIF not cleared by firmware - %d/%d)\n", + i, 10); + ssleep(1); + } + } +} + +static int get_fib_count(struct aac_dev *dev) +{ + unsigned int num = 0; + struct list_head *entry; + unsigned long flagv; + + /* + * Warning: no sleep allowed while + * holding spinlock. We take the estimate + * and pre-allocate a set of fibs outside the + * lock. + */ + num = le32_to_cpu(dev->init->r7.adapter_fibs_size) + / sizeof(struct hw_fib); /* some extra */ + spin_lock_irqsave(&dev->fib_lock, flagv); + entry = dev->fib_list.next; + while (entry != &dev->fib_list) { + entry = entry->next; + ++num; + } + spin_unlock_irqrestore(&dev->fib_lock, flagv); + + return num; +} + +static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, + struct fib **fib_pool, + unsigned int num) +{ + struct hw_fib **hw_fib_p; + struct fib **fib_p; + + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (hw_fib_p < &hw_fib_pool[num]) { + *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL); + if (!(*(hw_fib_p++))) { + --hw_fib_p; + break; + } + + *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL); + if (!(*(fib_p++))) { + kfree(*(--hw_fib_p)); + break; + } + } + + /* + * Get the actual number of allocated fibs + */ + num = hw_fib_p - hw_fib_pool; + return num; +} + +static void wakeup_fibctx_threads(struct aac_dev *dev, + struct hw_fib **hw_fib_pool, + struct fib **fib_pool, + struct fib *fib, + struct hw_fib *hw_fib, + unsigned int num) +{ + unsigned long flagv; + struct list_head *entry; + struct hw_fib **hw_fib_p; + struct fib **fib_p; + u32 time_now, time_last; + struct hw_fib *hw_newfib; + struct fib *newfib; + struct aac_fib_context *fibctx; + + time_now = jiffies/HZ; + spin_lock_irqsave(&dev->fib_lock, flagv); + entry = dev->fib_list.next; + /* + * For each Context that is on the + * fibctxList, make a copy of the + * fib, and then set the event to wake up the + * thread that is waiting for it. + */ + + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (entry != &dev->fib_list) { + /* + * Extract the fibctx + */ + fibctx = list_entry(entry, struct aac_fib_context, + next); + /* + * Check if the queue is getting + * backlogged + */ + if (fibctx->count > 20) { + /* + * It's *not* jiffies folks, + * but jiffies / HZ so do not + * panic ... + */ + time_last = fibctx->jiffies; + /* + * Has it been > 2 minutes + * since the last read off + * the queue? + */ + if ((time_now - time_last) > aif_timeout) { + entry = entry->next; + aac_close_fib_context(dev, fibctx); + continue; + } + } + /* + * Warning: no sleep allowed while + * holding spinlock + */ + if (hw_fib_p >= &hw_fib_pool[num]) { + pr_warn("aifd: didn't allocate NewFib\n"); + entry = entry->next; + continue; + } + + hw_newfib = *hw_fib_p; + *(hw_fib_p++) = NULL; + newfib = *fib_p; + *(fib_p++) = NULL; + /* + * Make the copy of the FIB + */ + memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); + memcpy(newfib, fib, sizeof(struct fib)); + newfib->hw_fib_va = hw_newfib; + /* + * Put the FIB onto the + * fibctx's fibs + */ + list_add_tail(&newfib->fiblink, &fibctx->fib_list); + fibctx->count++; + /* + * Set the event to wake up the + * thread that is waiting. + */ + complete(&fibctx->completion); + + entry = entry->next; + } + /* + * Set the status of this FIB + */ + *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); + aac_fib_adapter_complete(fib, sizeof(u32)); + spin_unlock_irqrestore(&dev->fib_lock, flagv); + +} + +static void aac_process_events(struct aac_dev *dev) +{ + struct hw_fib *hw_fib; + struct fib *fib; + unsigned long flags; + spinlock_t *t_lock; + + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_lock_irqsave(t_lock, flags); + + while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { + struct list_head *entry; + struct aac_aifcmd *aifcmd; + unsigned int num; + struct hw_fib **hw_fib_pool, **hw_fib_p; + struct fib **fib_pool, **fib_p; + + set_current_state(TASK_RUNNING); + + entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; + list_del(entry); + + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_unlock_irqrestore(t_lock, flags); + + fib = list_entry(entry, struct fib, fiblink); + hw_fib = fib->hw_fib_va; + if (dev->sa_firmware) { + /* Thor AIF */ + aac_handle_sa_aif(dev, fib); + aac_fib_adapter_complete(fib, (u16)sizeof(u32)); + goto free_fib; + } + /* + * We will process the FIB here or pass it to a + * worker thread that is TBD. We Really can't + * do anything at this point since we don't have + * anything defined for this thread to do. + */ + memset(fib, 0, sizeof(struct fib)); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof(struct fib); + fib->hw_fib_va = hw_fib; + fib->data = hw_fib->data; + fib->dev = dev; + /* + * We only handle AifRequest fibs from the adapter. + */ + + aifcmd = (struct aac_aifcmd *) hw_fib->data; + if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { + /* Handle Driver Notify Events */ + aac_handle_aif(dev, fib); + *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); + aac_fib_adapter_complete(fib, (u16)sizeof(u32)); + goto free_fib; + } + /* + * The u32 here is important and intended. We are using + * 32bit wrapping time to fit the adapter field + */ + + /* Sniff events */ + if (aifcmd->command == cpu_to_le32(AifCmdEventNotify) + || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) { + aac_handle_aif(dev, fib); + } + + /* + * get number of fibs to process + */ + num = get_fib_count(dev); + if (!num) + goto free_fib; + + hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *), + GFP_KERNEL); + if (!hw_fib_pool) + goto free_fib; + + fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL); + if (!fib_pool) + goto free_hw_fib_pool; + + /* + * Fill up fib pointer pools with actual fibs + * and hw_fibs + */ + num = fillup_pools(dev, hw_fib_pool, fib_pool, num); + if (!num) + goto free_mem; + + /* + * wakeup the thread that is waiting for + * the response from fw (ioctl) + */ + wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool, + fib, hw_fib, num); + +free_mem: + /* Free up the remaining resources */ + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (hw_fib_p < &hw_fib_pool[num]) { + kfree(*hw_fib_p); + kfree(*fib_p); + ++fib_p; + ++hw_fib_p; + } + kfree(fib_pool); +free_hw_fib_pool: + kfree(hw_fib_pool); +free_fib: + kfree(fib); + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_lock_irqsave(t_lock, flags); + } + /* + * There are no more AIF's + */ + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_unlock_irqrestore(t_lock, flags); +} + +static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, + u32 datasize) +{ + struct aac_srb *srbcmd; + struct sgmap64 *sg64; + dma_addr_t addr; + char *dma_buf; + struct fib *fibptr; + int ret = -ENOMEM; + u32 vbus, vid; + + fibptr = aac_fib_alloc(dev); + if (!fibptr) + goto out; + + dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, + GFP_KERNEL); + if (!dma_buf) + goto fib_free_out; + + aac_fib_init(fibptr); + + vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); + vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); + + srbcmd = (struct aac_srb *)fib_data(fibptr); + + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); + srbcmd->channel = cpu_to_le32(vbus); + srbcmd->id = cpu_to_le32(vid); + srbcmd->lun = 0; + srbcmd->flags = cpu_to_le32(SRB_DataOut); + srbcmd->timeout = cpu_to_le32(10); + srbcmd->retry_limit = 0; + srbcmd->cdb_size = cpu_to_le32(12); + srbcmd->count = cpu_to_le32(datasize); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + srbcmd->cdb[0] = BMIC_OUT; + srbcmd->cdb[6] = WRITE_HOST_WELLNESS; + memcpy(dma_buf, (char *)wellness_str, datasize); + + sg64 = (struct sgmap64 *)&srbcmd->sg; + sg64->count = cpu_to_le32(1); + sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); + sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); + sg64->sg[0].count = cpu_to_le32(datasize); + + ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), + FsaNormal, 1, 1, NULL, NULL); + + dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr); + + /* + * Do not set XferState to zero unless + * receives a response from F/W + */ + if (ret >= 0) + aac_fib_complete(fibptr); + + /* + * FIB should be freed only after + * getting the response from the F/W + */ + if (ret != -ERESTARTSYS) + goto fib_free_out; + +out: + return ret; +fib_free_out: + aac_fib_free(fibptr); + goto out; +} + +static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now) +{ + struct tm cur_tm; + char wellness_str[] = "TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; + u32 datasize = sizeof(wellness_str); + time64_t local_time; + int ret = -ENODEV; + + if (!dev->sa_firmware) + goto out; + + local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60)); + time64_to_tm(local_time, 0, &cur_tm); + cur_tm.tm_mon += 1; + cur_tm.tm_year += 1900; + wellness_str[8] = bin2bcd(cur_tm.tm_hour); + wellness_str[9] = bin2bcd(cur_tm.tm_min); + wellness_str[10] = bin2bcd(cur_tm.tm_sec); + wellness_str[12] = bin2bcd(cur_tm.tm_mon); + wellness_str[13] = bin2bcd(cur_tm.tm_mday); + wellness_str[14] = bin2bcd(cur_tm.tm_year / 100); + wellness_str[15] = bin2bcd(cur_tm.tm_year % 100); + + ret = aac_send_wellness_command(dev, wellness_str, datasize); + +out: + return ret; +} + +static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now) +{ + int ret = -ENOMEM; + struct fib *fibptr; + __le32 *info; + + fibptr = aac_fib_alloc(dev); + if (!fibptr) + goto out; + + aac_fib_init(fibptr); + info = (__le32 *)fib_data(fibptr); + *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */ + ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, + 1, 1, NULL, NULL); + + /* + * Do not set XferState to zero unless + * receives a response from F/W + */ + if (ret >= 0) + aac_fib_complete(fibptr); + + /* + * FIB should be freed only after + * getting the response from the F/W + */ + if (ret != -ERESTARTSYS) + aac_fib_free(fibptr); + +out: + return ret; +} + +/** + * aac_command_thread - command processing thread + * @data: Adapter to monitor + * + * Waits on the commandready event in it's queue. When the event gets set + * it will pull FIBs off it's queue. It will continue to pull FIBs off + * until the queue is empty. When the queue is empty it will wait for + * more FIBs. + */ + +int aac_command_thread(void *data) +{ + struct aac_dev *dev = data; + DECLARE_WAITQUEUE(wait, current); + unsigned long next_jiffies = jiffies + HZ; + unsigned long next_check_jiffies = next_jiffies; + long difference = HZ; + + /* + * We can only have one thread per adapter for AIF's. + */ + if (dev->aif_thread) + return -EINVAL; + + /* + * Let the DPC know it has a place to send the AIF's to. + */ + dev->aif_thread = 1; + add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); + set_current_state(TASK_INTERRUPTIBLE); + dprintk ((KERN_INFO "aac_command_thread start\n")); + while (1) { + + aac_process_events(dev); + + /* + * Background activity + */ + if ((time_before(next_check_jiffies,next_jiffies)) + && ((difference = next_check_jiffies - jiffies) <= 0)) { + next_check_jiffies = next_jiffies; + if (aac_adapter_check_health(dev) == 0) { + difference = ((long)(unsigned)check_interval) + * HZ; + next_check_jiffies = jiffies + difference; + } else if (!dev->queues) + break; + } + if (!time_before(next_check_jiffies,next_jiffies) + && ((difference = next_jiffies - jiffies) <= 0)) { + struct timespec64 now; + int ret; + + /* Don't even try to talk to adapter if its sick */ + ret = aac_adapter_check_health(dev); + if (ret || !dev->queues) + break; + next_check_jiffies = jiffies + + ((long)(unsigned)check_interval) + * HZ; + ktime_get_real_ts64(&now); + + /* Synchronize our watches */ + if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) + && (now.tv_nsec > (NSEC_PER_SEC / HZ))) + difference = HZ + HZ / 2 - + now.tv_nsec / (NSEC_PER_SEC / HZ); + else { + if (now.tv_nsec > NSEC_PER_SEC / 2) + ++now.tv_sec; + + if (dev->sa_firmware) + ret = + aac_send_safw_hostttime(dev, &now); + else + ret = aac_send_hosttime(dev, &now); + + difference = (long)(unsigned)update_interval*HZ; + } + next_jiffies = jiffies + difference; + if (time_before(next_check_jiffies,next_jiffies)) + difference = next_check_jiffies - jiffies; + } + if (difference <= 0) + difference = 1; + set_current_state(TASK_INTERRUPTIBLE); + + if (kthread_should_stop()) + break; + + /* + * we probably want usleep_range() here instead of the + * jiffies computation + */ + schedule_timeout(difference); + + if (kthread_should_stop()) + break; + } + if (dev->queues) + remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); + dev->aif_thread = 0; + return 0; +} + +int aac_acquire_irq(struct aac_dev *dev) +{ + int i; + int j; + int ret = 0; + + if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { + for (i = 0; i < dev->max_msix; i++) { + dev->aac_msix[i].vector_no = i; + dev->aac_msix[i].dev = dev; + if (request_irq(pci_irq_vector(dev->pdev, i), + dev->a_ops.adapter_intr, + 0, "aacraid", &(dev->aac_msix[i]))) { + printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", + dev->name, dev->id, i); + for (j = 0 ; j < i ; j++) + free_irq(pci_irq_vector(dev->pdev, j), + &(dev->aac_msix[j])); + pci_disable_msix(dev->pdev); + ret = -1; + } + } + } else { + dev->aac_msix[0].vector_no = 0; + dev->aac_msix[0].dev = dev; + + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, + IRQF_SHARED, "aacraid", + &(dev->aac_msix[0])) < 0) { + if (dev->msi) + pci_disable_msi(dev->pdev); + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", + dev->name, dev->id); + ret = -1; + } + } + return ret; +} + +void aac_free_irq(struct aac_dev *dev) +{ + int i; + + if (aac_is_src(dev)) { + if (dev->max_msix > 1) { + for (i = 0; i < dev->max_msix; i++) + free_irq(pci_irq_vector(dev->pdev, i), + &(dev->aac_msix[i])); + } else { + free_irq(dev->pdev->irq, &(dev->aac_msix[0])); + } + } else { + free_irq(dev->pdev->irq, dev); + } + if (dev->msi) + pci_disable_msi(dev->pdev); + else if (dev->max_msix > 1) + pci_disable_msix(dev->pdev); +} diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c new file mode 100644 index 000000000..fbe334c59 --- /dev/null +++ b/drivers/scsi/aacraid/dpcsup.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * dpcsup.c + * + * Abstract: All DPC processing routines for the cyclone board occur here. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "aacraid.h" + +/** + * aac_response_normal - Handle command replies + * @q: Queue to read from + * + * This DPC routine will be run when the adapter interrupts us to let us + * know there is a response on our normal priority queue. We will pull off + * all QE there are and wake up all the waiters before exiting. We will + * take a spinlock out on the queue before operating on it. + */ + +unsigned int aac_response_normal(struct aac_queue * q) +{ + struct aac_dev * dev = q->dev; + struct aac_entry *entry; + struct hw_fib * hwfib; + struct fib * fib; + int consumed = 0; + unsigned long flags, mflags; + + spin_lock_irqsave(q->lock, flags); + /* + * Keep pulling response QEs off the response queue and waking + * up the waiters until there are no more QEs. We then return + * back to the system. If no response was requested we just + * deallocate the Fib here and continue. + */ + while(aac_consumer_get(dev, q, &entry)) + { + int fast; + u32 index = le32_to_cpu(entry->addr); + fast = index & 0x01; + fib = &dev->fibs[index >> 2]; + hwfib = fib->hw_fib_va; + + aac_consumer_free(dev, q, HostNormRespQueue); + /* + * Remove this fib from the Outstanding I/O queue. + * But only if it has not already been timed out. + * + * If the fib has been timed out already, then just + * continue. The caller has already been notified that + * the fib timed out. + */ + atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); + + if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { + spin_unlock_irqrestore(q->lock, flags); + aac_fib_complete(fib); + aac_fib_free(fib); + spin_lock_irqsave(q->lock, flags); + continue; + } + spin_unlock_irqrestore(q->lock, flags); + + if (fast) { + /* + * Doctor the fib + */ + *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); + hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); + fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; + } + + FIB_COUNTER_INCREMENT(aac_config.FibRecved); + + if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) + { + __le32 *pstatus = (__le32 *)hwfib->data; + if (*pstatus & cpu_to_le32(0xffff0000)) + *pstatus = cpu_to_le32(ST_OK); + } + if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) + { + if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) { + FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); + } else { + FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); + } + /* + * NOTE: we cannot touch the fib after this + * call, because it may have been deallocated. + */ + fib->callback(fib->callback_data, fib); + } else { + unsigned long flagv; + spin_lock_irqsave(&fib->event_lock, flagv); + if (!fib->done) { + fib->done = 1; + complete(&fib->event_wait); + } + spin_unlock_irqrestore(&fib->event_lock, flagv); + + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + + FIB_COUNTER_INCREMENT(aac_config.NormalRecved); + if (fib->done == 2) { + spin_lock_irqsave(&fib->event_lock, flagv); + fib->done = 0; + spin_unlock_irqrestore(&fib->event_lock, flagv); + aac_fib_complete(fib); + aac_fib_free(fib); + } + } + consumed++; + spin_lock_irqsave(q->lock, flags); + } + + if (consumed > aac_config.peak_fibs) + aac_config.peak_fibs = consumed; + if (consumed == 0) + aac_config.zero_fibs++; + + spin_unlock_irqrestore(q->lock, flags); + return 0; +} + + +/** + * aac_command_normal - handle commands + * @q: queue to process + * + * This DPC routine will be queued when the adapter interrupts us to + * let us know there is a command on our normal priority queue. We will + * pull off all QE there are and wake up all the waiters before exiting. + * We will take a spinlock out on the queue before operating on it. + */ + +unsigned int aac_command_normal(struct aac_queue *q) +{ + struct aac_dev * dev = q->dev; + struct aac_entry *entry; + unsigned long flags; + + spin_lock_irqsave(q->lock, flags); + + /* + * Keep pulling response QEs off the response queue and waking + * up the waiters until there are no more QEs. We then return + * back to the system. + */ + while(aac_consumer_get(dev, q, &entry)) + { + struct fib fibctx; + struct hw_fib * hw_fib; + u32 index; + struct fib *fib = &fibctx; + + index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); + hw_fib = &dev->aif_base_va[index]; + + /* + * Allocate a FIB at all costs. For non queued stuff + * we can just use the stack so we are happy. We need + * a fib object in order to manage the linked lists + */ + if (dev->aif_thread) + if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) + fib = &fibctx; + + memset(fib, 0, sizeof(struct fib)); + INIT_LIST_HEAD(&fib->fiblink); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof(struct fib); + fib->hw_fib_va = hw_fib; + fib->data = hw_fib->data; + fib->dev = dev; + + + if (dev->aif_thread && fib != &fibctx) { + list_add_tail(&fib->fiblink, &q->cmdq); + aac_consumer_free(dev, q, HostNormCmdQueue); + wake_up_interruptible(&q->cmdready); + } else { + aac_consumer_free(dev, q, HostNormCmdQueue); + spin_unlock_irqrestore(q->lock, flags); + /* + * Set the status of this FIB + */ + *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); + aac_fib_adapter_complete(fib, sizeof(u32)); + spin_lock_irqsave(q->lock, flags); + } + } + spin_unlock_irqrestore(q->lock, flags); + return 0; +} + +/* + * + * aac_aif_callback + * @context: the context set in the fib - here it is scsi cmd + * @fibptr: pointer to the fib + * + * Handles the AIFs - new method (SRC) + * + */ + +static void aac_aif_callback(void *context, struct fib * fibptr) +{ + struct fib *fibctx; + struct aac_dev *dev; + struct aac_aifcmd *cmd; + + fibctx = (struct fib *)context; + BUG_ON(fibptr == NULL); + dev = fibptr->dev; + + if ((fibptr->hw_fib_va->header.XferState & + cpu_to_le32(NoMoreAifDataAvailable)) || + dev->sa_firmware) { + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + return; + } + + aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); + + aac_fib_init(fibctx); + cmd = (struct aac_aifcmd *) fib_data(fibctx); + cmd->command = cpu_to_le32(AifReqEvent); + + aac_fib_send(AifRequest, + fibctx, + sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), + FsaNormal, + 0, 1, + (fib_callback)aac_aif_callback, fibctx); +} + + +/* + * aac_intr_normal - Handle command replies + * @dev: Device + * @index: completion reference + * + * This DPC routine will be run when the adapter interrupts us to let us + * know there is a response on our normal priority queue. We will pull off + * all QE there are and wake up all the waiters before exiting. + */ +unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, + int isFastResponse, struct hw_fib *aif_fib) +{ + unsigned long mflags; + dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); + if (isAif == 1) { /* AIF - common */ + struct hw_fib * hw_fib; + struct fib * fib; + struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; + unsigned long flags; + + /* + * Allocate a FIB. For non queued stuff we can just use + * the stack so we are happy. We need a fib object in order to + * manage the linked lists. + */ + if ((!dev->aif_thread) + || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) + return 1; + if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { + kfree (fib); + return 1; + } + if (dev->sa_firmware) { + fib->hbacmd_size = index; /* store event type */ + } else if (aif_fib != NULL) { + memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); + } else { + memcpy(hw_fib, (struct hw_fib *) + (((uintptr_t)(dev->regs.sa)) + index), + sizeof(struct hw_fib)); + } + INIT_LIST_HEAD(&fib->fiblink); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof(struct fib); + fib->hw_fib_va = hw_fib; + fib->data = hw_fib->data; + fib->dev = dev; + + spin_lock_irqsave(q->lock, flags); + list_add_tail(&fib->fiblink, &q->cmdq); + wake_up_interruptible(&q->cmdready); + spin_unlock_irqrestore(q->lock, flags); + return 1; + } else if (isAif == 2) { /* AIF - new (SRC) */ + struct fib *fibctx; + struct aac_aifcmd *cmd; + + fibctx = aac_fib_alloc(dev); + if (!fibctx) + return 1; + aac_fib_init(fibctx); + + cmd = (struct aac_aifcmd *) fib_data(fibctx); + cmd->command = cpu_to_le32(AifReqEvent); + + return aac_fib_send(AifRequest, + fibctx, + sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), + FsaNormal, + 0, 1, + (fib_callback)aac_aif_callback, fibctx); + } else { + struct fib *fib = &dev->fibs[index]; + int start_callback = 0; + + /* + * Remove this fib from the Outstanding I/O queue. + * But only if it has not already been timed out. + * + * If the fib has been timed out already, then just + * continue. The caller has already been notified that + * the fib timed out. + */ + atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); + + if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { + aac_fib_complete(fib); + aac_fib_free(fib); + return 0; + } + + FIB_COUNTER_INCREMENT(aac_config.FibRecved); + + if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { + + if (isFastResponse) + fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; + + if (fib->callback) { + start_callback = 1; + } else { + unsigned long flagv; + int completed = 0; + + dprintk((KERN_INFO "event_wait up\n")); + spin_lock_irqsave(&fib->event_lock, flagv); + if (fib->done == 2) { + fib->done = 1; + completed = 1; + } else { + fib->done = 1; + complete(&fib->event_wait); + } + spin_unlock_irqrestore(&fib->event_lock, flagv); + + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, + mflags); + + FIB_COUNTER_INCREMENT(aac_config.NativeRecved); + if (completed) + aac_fib_complete(fib); + } + } else { + struct hw_fib *hwfib = fib->hw_fib_va; + + if (isFastResponse) { + /* Doctor the fib */ + *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); + hwfib->header.XferState |= + cpu_to_le32(AdapterProcessed); + fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; + } + + if (hwfib->header.Command == + cpu_to_le16(NuFileSystem)) { + __le32 *pstatus = (__le32 *)hwfib->data; + + if (*pstatus & cpu_to_le32(0xffff0000)) + *pstatus = cpu_to_le32(ST_OK); + } + if (hwfib->header.XferState & + cpu_to_le32(NoResponseExpected | Async)) { + if (hwfib->header.XferState & cpu_to_le32( + NoResponseExpected)) { + FIB_COUNTER_INCREMENT( + aac_config.NoResponseRecved); + } else { + FIB_COUNTER_INCREMENT( + aac_config.AsyncRecved); + } + start_callback = 1; + } else { + unsigned long flagv; + int completed = 0; + + dprintk((KERN_INFO "event_wait up\n")); + spin_lock_irqsave(&fib->event_lock, flagv); + if (fib->done == 2) { + fib->done = 1; + completed = 1; + } else { + fib->done = 1; + complete(&fib->event_wait); + } + spin_unlock_irqrestore(&fib->event_lock, flagv); + + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, + mflags); + + FIB_COUNTER_INCREMENT(aac_config.NormalRecved); + if (completed) + aac_fib_complete(fib); + } + } + + + if (start_callback) { + /* + * NOTE: we cannot touch the fib after this + * call, because it may have been deallocated. + */ + if (likely(fib->callback && fib->callback_data)) { + fib->callback(fib->callback_data, fib); + } else { + aac_fib_complete(fib); + aac_fib_free(fib); + } + + } + return 0; + } +} diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c new file mode 100644 index 000000000..68f4dbcff --- /dev/null +++ b/drivers/scsi/aacraid/linit.c @@ -0,0 +1,2074 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * linit.c + * + * Abstract: Linux Driver entry module for Adaptec RAID Array Controller + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "aacraid.h" + +#define AAC_DRIVER_VERSION "1.2.1" +#ifndef AAC_DRIVER_BRANCH +#define AAC_DRIVER_BRANCH "" +#endif +#define AAC_DRIVERNAME "aacraid" + +#ifdef AAC_DRIVER_BUILD +#define _str(x) #x +#define str(x) _str(x) +#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH +#else +#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH +#endif + +MODULE_AUTHOR("Red Hat Inc and Adaptec"); +MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, " + "Adaptec Advanced Raid Products, " + "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(AAC_DRIVER_FULL_VERSION); + +static DEFINE_MUTEX(aac_mutex); +static LIST_HEAD(aac_devices); +static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED; +char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; + +/* + * Because of the way Linux names scsi devices, the order in this table has + * become important. Check for on-board Raid first, add-in cards second. + * + * Note: The last field is used to index into aac_drivers below. + */ +static const struct pci_device_id aac_pci_tbl[] = { + { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */ + { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */ + { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */ + { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */ + { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */ + { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */ + { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */ + { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */ + { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */ + { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */ + { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */ + { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */ + { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */ + { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */ + { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */ + { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */ + + { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */ + { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */ + { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */ + { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */ + { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */ + { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */ + { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */ + { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */ + { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */ + { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */ + { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */ + { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */ + { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */ + { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */ + { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */ + { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */ + { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */ + { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */ + { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */ + { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */ + { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ + { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */ + { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ + { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */ + { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ + { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ + { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ + { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */ + { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */ + { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */ + { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */ + { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */ + { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */ + { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */ + { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */ + { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */ + { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */ + { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */ + + { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/ + { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/ + { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/ + { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */ + { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */ + + { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */ + { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */ + { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ + { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ + { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ + { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */ + { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */ + { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */ + { 0,} +}; +MODULE_DEVICE_TABLE(pci, aac_pci_tbl); + +/* + * dmb - For now we add the number of channels to this structure. + * In the future we should add a fib that reports the number of channels + * for the card. At that time we can remove the channels from here + */ +static struct aac_driver_ident aac_drivers[] = { + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */ + { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */ + { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */ + { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */ + { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */ + { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */ + + { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */ + { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */ + { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */ + { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */ + { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */ + { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */ + { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */ + { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */ + { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */ + { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ + { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */ + { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */ + { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */ + { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */ + + { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/ + { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ + { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ + { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */ + { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */ + + { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */ + { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ + { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ + { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ + { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ +}; + +/** + * aac_queuecommand - queue a SCSI command + * @shost: Scsi host to queue command on + * @cmd: SCSI command to queue + * + * Queues a command for execution by the associated Host Adapter. + * + * TODO: unify with aac_scsi_cmd(). + */ + +static int aac_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *cmd) +{ + aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL; + + return aac_scsi_cmd(cmd) ? FAILED : 0; +} + +/** + * aac_info - Returns the host adapter name + * @shost: Scsi host to report on + * + * Returns a static string describing the device in question + */ + +static const char *aac_info(struct Scsi_Host *shost) +{ + struct aac_dev *dev = (struct aac_dev *)shost->hostdata; + return aac_drivers[dev->cardtype].name; +} + +/** + * aac_get_driver_ident + * @devtype: index into lookup table + * + * Returns a pointer to the entry in the driver lookup table. + */ + +struct aac_driver_ident* aac_get_driver_ident(int devtype) +{ + return &aac_drivers[devtype]; +} + +/** + * aac_biosparm - return BIOS parameters for disk + * @sdev: The scsi device corresponding to the disk + * @bdev: the block device corresponding to the disk + * @capacity: the sector capacity of the disk + * @geom: geometry block to fill in + * + * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk. + * The default disk geometry is 64 heads, 32 sectors, and the appropriate + * number of cylinders so as not to exceed drive capacity. In order for + * disks equal to or larger than 1 GB to be addressable by the BIOS + * without exceeding the BIOS limitation of 1024 cylinders, Extended + * Translation should be enabled. With Extended Translation enabled, + * drives between 1 GB inclusive and 2 GB exclusive are given a disk + * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive + * are given a disk geometry of 255 heads and 63 sectors. However, if + * the BIOS detects that the Extended Translation setting does not match + * the geometry in the partition table, then the translation inferred + * from the partition table will be used by the BIOS, and a warning may + * be displayed. + */ + +static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int *geom) +{ + struct diskparm *param = (struct diskparm *)geom; + unsigned char *buf; + + dprintk((KERN_DEBUG "aac_biosparm.\n")); + + /* + * Assuming extended translation is enabled - #REVISIT# + */ + if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */ + if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */ + param->heads = 255; + param->sectors = 63; + } else { + param->heads = 128; + param->sectors = 32; + } + } else { + param->heads = 64; + param->sectors = 32; + } + + param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); + + /* + * Read the first 1024 bytes from the disk device, if the boot + * sector partition table is valid, search for a partition table + * entry whose end_head matches one of the standard geometry + * translations ( 64/32, 128/32, 255/63 ). + */ + buf = scsi_bios_ptable(bdev); + if (!buf) + return 0; + if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) { + struct msdos_partition *first = (struct msdos_partition *)buf; + struct msdos_partition *entry = first; + int saved_cylinders = param->cylinders; + int num; + unsigned char end_head, end_sec; + + for(num = 0; num < 4; num++) { + end_head = entry->end_head; + end_sec = entry->end_sector & 0x3f; + + if(end_head == 63) { + param->heads = 64; + param->sectors = 32; + break; + } else if(end_head == 127) { + param->heads = 128; + param->sectors = 32; + break; + } else if(end_head == 254) { + param->heads = 255; + param->sectors = 63; + break; + } + entry++; + } + + if (num == 4) { + end_head = first->end_head; + end_sec = first->end_sector & 0x3f; + } + + param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); + if (num < 4 && end_sec == param->sectors) { + if (param->cylinders != saved_cylinders) { + dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n", + param->heads, param->sectors, num)); + } + } else if (end_head > 0 || end_sec > 0) { + dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n", + end_head + 1, end_sec, num)); + dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n", + param->heads, param->sectors)); + } + } + kfree(buf); + return 0; +} + +/** + * aac_slave_configure - compute queue depths + * @sdev: SCSI device we are considering + * + * Selects queue depths for each target device based on the host adapter's + * total capacity and the queue depth supported by the target device. + * A queue depth of one automatically disables tagged queueing. + */ + +static int aac_slave_configure(struct scsi_device *sdev) +{ + struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; + int chn, tid; + unsigned int depth = 0; + unsigned int set_timeout = 0; + int timeout = 0; + bool set_qd_dev_type = false; + u8 devtype = 0; + + chn = aac_logical_to_phys(sdev_channel(sdev)); + tid = sdev_id(sdev); + if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) { + devtype = aac->hba_map[chn][tid].devtype; + + if (devtype == AAC_DEVTYPE_NATIVE_RAW) { + depth = aac->hba_map[chn][tid].qd_limit; + set_timeout = 1; + goto common_config; + } + if (devtype == AAC_DEVTYPE_ARC_RAW) { + set_qd_dev_type = true; + set_timeout = 1; + goto common_config; + } + } + + if (aac->jbod && (sdev->type == TYPE_DISK)) + sdev->removable = 1; + + if (sdev->type == TYPE_DISK + && sdev_channel(sdev) != CONTAINER_CHANNEL + && (!aac->jbod || sdev->inq_periph_qual) + && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) { + + if (expose_physicals == 0) + return -ENXIO; + + if (expose_physicals < 0) + sdev->no_uld_attach = 1; + } + + if (sdev->tagged_supported + && sdev->type == TYPE_DISK + && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) + && !sdev->no_uld_attach) { + + struct scsi_device * dev; + struct Scsi_Host *host = sdev->host; + unsigned num_lsu = 0; + unsigned num_one = 0; + unsigned cid; + + set_timeout = 1; + + for (cid = 0; cid < aac->maximum_num_containers; ++cid) + if (aac->fsa_dev[cid].valid) + ++num_lsu; + + __shost_for_each_device(dev, host) { + if (dev->tagged_supported + && dev->type == TYPE_DISK + && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) + && !dev->no_uld_attach) { + if ((sdev_channel(dev) != CONTAINER_CHANNEL) + || !aac->fsa_dev[sdev_id(dev)].valid) { + ++num_lsu; + } + } else { + ++num_one; + } + } + + if (num_lsu == 0) + ++num_lsu; + + depth = (host->can_queue - num_one) / num_lsu; + + if (sdev_channel(sdev) != NATIVE_CHANNEL) + goto common_config; + + set_qd_dev_type = true; + + } + +common_config: + + /* + * Check if SATA drive + */ + if (set_qd_dev_type) { + if (strncmp(sdev->vendor, "ATA", 3) == 0) + depth = 32; + else + depth = 64; + } + + /* + * Firmware has an individual device recovery time typically + * of 35 seconds, give us a margin. Thor devices can take longer in + * error recovery, hence different value. + */ + if (set_timeout) { + timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); + } + + if (depth > 256) + depth = 256; + else if (depth < 1) + depth = 1; + + scsi_change_queue_depth(sdev, depth); + + sdev->tagged_supported = 1; + + return 0; +} + +/** + * aac_change_queue_depth - alter queue depths + * @sdev: SCSI device we are considering + * @depth: desired queue depth + * + * Alters queue depths for target device based on the host adapter's + * total capacity and the queue depth supported by the target device. + */ + +static int aac_change_queue_depth(struct scsi_device *sdev, int depth) +{ + struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); + int chn, tid, is_native_device = 0; + + chn = aac_logical_to_phys(sdev_channel(sdev)); + tid = sdev_id(sdev); + if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && + aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) + is_native_device = 1; + + if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && + (sdev_channel(sdev) == CONTAINER_CHANNEL)) { + struct scsi_device * dev; + struct Scsi_Host *host = sdev->host; + unsigned num = 0; + + __shost_for_each_device(dev, host) { + if (dev->tagged_supported && (dev->type == TYPE_DISK) && + (sdev_channel(dev) == CONTAINER_CHANNEL)) + ++num; + ++num; + } + if (num >= host->can_queue) + num = host->can_queue - 1; + if (depth > (host->can_queue - num)) + depth = host->can_queue - num; + if (depth > 256) + depth = 256; + else if (depth < 2) + depth = 2; + return scsi_change_queue_depth(sdev, depth); + } else if (is_native_device) { + scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit); + } else { + scsi_change_queue_depth(sdev, 1); + } + return sdev->queue_depth; +} + +static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); + if (sdev_channel(sdev) != CONTAINER_CHANNEL) + return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach + ? "Hidden\n" : + ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : "")); + return snprintf(buf, PAGE_SIZE, "%s\n", + get_container_type(aac->fsa_dev[sdev_id(sdev)].type)); +} + +static struct device_attribute aac_raid_level_attr = { + .attr = { + .name = "level", + .mode = S_IRUGO, + }, + .show = aac_show_raid_level +}; + +static ssize_t aac_show_unique_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); + unsigned char sn[16]; + + memset(sn, 0, sizeof(sn)); + + if (sdev_channel(sdev) == CONTAINER_CHANNEL) + memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn)); + + return snprintf(buf, 16 * 2 + 2, + "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", + sn[0], sn[1], sn[2], sn[3], + sn[4], sn[5], sn[6], sn[7], + sn[8], sn[9], sn[10], sn[11], + sn[12], sn[13], sn[14], sn[15]); +} + +static struct device_attribute aac_unique_id_attr = { + .attr = { + .name = "unique_id", + .mode = 0444, + }, + .show = aac_show_unique_id +}; + + + +static struct attribute *aac_dev_attrs[] = { + &aac_raid_level_attr.attr, + &aac_unique_id_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(aac_dev); + +static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) +{ + int retval; + struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + retval = aac_adapter_check_health(dev); + if (retval) + return -EBUSY; + return aac_do_ioctl(dev, cmd, arg); +} + +struct fib_count_data { + int mlcnt; + int llcnt; + int ehcnt; + int fwcnt; + int krlcnt; +}; + +static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data) +{ + struct fib_count_data *fib_count = data; + + switch (aac_priv(scmnd)->owner) { + case AAC_OWNER_FIRMWARE: + fib_count->fwcnt++; + break; + case AAC_OWNER_ERROR_HANDLER: + fib_count->ehcnt++; + break; + case AAC_OWNER_LOWLEVEL: + fib_count->llcnt++; + break; + case AAC_OWNER_MIDLEVEL: + fib_count->mlcnt++; + break; + default: + fib_count->krlcnt++; + break; + } + return true; +} + +/* Called during SCSI EH, so we don't need to block requests */ +static int get_num_of_incomplete_fibs(struct aac_dev *aac) +{ + struct Scsi_Host *shost = aac->scsi_host_ptr; + struct device *ctrl_dev; + struct fib_count_data fcnt = { }; + + scsi_host_busy_iter(shost, fib_count_iter, &fcnt); + + ctrl_dev = &aac->pdev->dev; + + dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", fcnt.mlcnt); + dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", fcnt.llcnt); + dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", fcnt.ehcnt); + dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fcnt.fwcnt); + dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", fcnt.krlcnt); + + return fcnt.mlcnt + fcnt.llcnt + fcnt.ehcnt + fcnt.fwcnt; +} + +static int aac_eh_abort(struct scsi_cmnd* cmd) +{ + struct aac_cmd_priv *cmd_priv = aac_priv(cmd); + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + int count, found; + u32 bus, cid; + int ret = FAILED; + + if (aac_adapter_check_health(aac)) + return ret; + + bus = aac_logical_to_phys(scmd_channel(cmd)); + cid = scmd_id(cmd); + if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { + struct fib *fib; + struct aac_hba_tm_req *tmf; + int status; + u64 address; + + pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n", + AAC_DRIVERNAME, + host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun); + + found = 0; + for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { + fib = &aac->fibs[count]; + if (*(u8 *)fib->hw_fib_va != 0 && + (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && + (fib->callback_data == cmd)) { + found = 1; + break; + } + } + if (!found) + return ret; + + /* start a HBA_TMF_ABORT_TASK TMF request */ + fib = aac_fib_alloc(aac); + if (!fib) + return ret; + + tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; + memset(tmf, 0, sizeof(*tmf)); + tmf->tmf = HBA_TMF_ABORT_TASK; + tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus; + tmf->lun[1] = cmd->device->lun; + + address = (u64)fib->hw_error_pa; + tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); + tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + + fib->hbacmd_size = sizeof(*tmf); + cmd_priv->sent_command = 0; + + status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, + (fib_callback) aac_hba_callback, + (void *) cmd); + if (status != -EINPROGRESS) { + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } + /* Wait up to 15 secs for completion */ + for (count = 0; count < 15; ++count) { + if (cmd_priv->sent_command) { + ret = SUCCESS; + break; + } + msleep(1000); + } + + if (ret != SUCCESS) + pr_err("%s: Host adapter abort request timed out\n", + AAC_DRIVERNAME); + } else { + pr_err( + "%s: Host adapter abort request.\n" + "%s: Outstanding commands on (%d,%d,%d,%d):\n", + AAC_DRIVERNAME, AAC_DRIVERNAME, + host->host_no, sdev_channel(dev), sdev_id(dev), + (int)dev->lun); + switch (cmd->cmnd[0]) { + case SERVICE_ACTION_IN_16: + if (!(aac->raw_io_interface) || + !(aac->raw_io_64) || + ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) + break; + fallthrough; + case INQUIRY: + case READ_CAPACITY: + /* + * Mark associated FIB to not complete, + * eh handler does this + */ + for (count = 0; + count < (host->can_queue + AAC_NUM_MGT_FIB); + ++count) { + struct fib *fib = &aac->fibs[count]; + + if (fib->hw_fib_va->header.XferState && + (fib->flags & FIB_CONTEXT_FLAG) && + (fib->callback_data == cmd)) { + fib->flags |= + FIB_CONTEXT_FLAG_TIMED_OUT; + cmd_priv->owner = + AAC_OWNER_ERROR_HANDLER; + ret = SUCCESS; + } + } + break; + case TEST_UNIT_READY: + /* + * Mark associated FIB to not complete, + * eh handler does this + */ + for (count = 0; + count < (host->can_queue + AAC_NUM_MGT_FIB); + ++count) { + struct scsi_cmnd *command; + struct fib *fib = &aac->fibs[count]; + + command = fib->callback_data; + + if ((fib->hw_fib_va->header.XferState & + cpu_to_le32 + (Async | NoResponseExpected)) && + (fib->flags & FIB_CONTEXT_FLAG) && + ((command)) && + (command->device == cmd->device)) { + fib->flags |= + FIB_CONTEXT_FLAG_TIMED_OUT; + aac_priv(command)->owner = + AAC_OWNER_ERROR_HANDLER; + if (command == cmd) + ret = SUCCESS; + } + } + break; + } + } + return ret; +} + +static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info, + struct fib *fib, u64 tmf_lun) +{ + struct aac_hba_tm_req *tmf; + u64 address; + + /* start a HBA_TMF_LUN_RESET TMF request */ + tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; + memset(tmf, 0, sizeof(*tmf)); + tmf->tmf = HBA_TMF_LUN_RESET; + tmf->it_nexus = info->rmw_nexus; + int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun); + + address = (u64)fib->hw_error_pa; + tmf->error_ptr_hi = cpu_to_le32 + ((u32)(address >> 32)); + tmf->error_ptr_lo = cpu_to_le32 + ((u32)(address & 0xffffffff)); + tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + fib->hbacmd_size = sizeof(*tmf); + + return HBA_IU_TYPE_SCSI_TM_REQ; +} + +static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info, + struct fib *fib) +{ + struct aac_hba_reset_req *rst; + u64 address; + + /* already tried, start a hard reset now */ + rst = (struct aac_hba_reset_req *)fib->hw_fib_va; + memset(rst, 0, sizeof(*rst)); + rst->it_nexus = info->rmw_nexus; + + address = (u64)fib->hw_error_pa; + rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + rst->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); + rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + fib->hbacmd_size = sizeof(*rst); + + return HBA_IU_TYPE_SATA_REQ; +} + +static void aac_tmf_callback(void *context, struct fib *fibptr) +{ + struct aac_hba_resp *err = + &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; + struct aac_hba_map_info *info = context; + int res; + + switch (err->service_response) { + case HBA_RESP_SVCRES_TMF_REJECTED: + res = -1; + break; + case HBA_RESP_SVCRES_TMF_LUN_INVALID: + res = 0; + break; + case HBA_RESP_SVCRES_TMF_COMPLETE: + case HBA_RESP_SVCRES_TMF_SUCCEEDED: + res = 0; + break; + default: + res = -2; + break; + } + aac_fib_complete(fibptr); + + info->reset_state = res; +} + +/* + * aac_eh_dev_reset - Device reset command handling + * @scsi_cmd: SCSI command block causing the reset + * + */ +static int aac_eh_dev_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + struct aac_hba_map_info *info; + int count; + u32 bus, cid; + struct fib *fib; + int ret = FAILED; + int status; + u8 command; + + bus = aac_logical_to_phys(scmd_channel(cmd)); + cid = scmd_id(cmd); + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) + return FAILED; + + info = &aac->hba_map[bus][cid]; + + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) + return FAILED; + + pr_err("%s: Host device reset request. SCSI hang ?\n", + AAC_DRIVERNAME); + + fib = aac_fib_alloc(aac); + if (!fib) + return ret; + + /* start a HBA_TMF_LUN_RESET TMF request */ + command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun); + + info->reset_state = 1; + + status = aac_hba_send(command, fib, + (fib_callback) aac_tmf_callback, + (void *) info); + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } + /* Wait up to 15 seconds for completion */ + for (count = 0; count < 15; ++count) { + if (info->reset_state == 0) { + ret = info->reset_state == 0 ? SUCCESS : FAILED; + break; + } + msleep(1000); + } + + return ret; +} + +/* + * aac_eh_target_reset - Target reset command handling + * @scsi_cmd: SCSI command block causing the reset + * + */ +static int aac_eh_target_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + struct aac_hba_map_info *info; + int count; + u32 bus, cid; + int ret = FAILED; + struct fib *fib; + int status; + u8 command; + + bus = aac_logical_to_phys(scmd_channel(cmd)); + cid = scmd_id(cmd); + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) + return FAILED; + + info = &aac->hba_map[bus][cid]; + + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) + return FAILED; + + pr_err("%s: Host target reset request. SCSI hang ?\n", + AAC_DRIVERNAME); + + fib = aac_fib_alloc(aac); + if (!fib) + return ret; + + + /* already tried, start a hard reset now */ + command = aac_eh_tmf_hard_reset_fib(info, fib); + + info->reset_state = 2; + + status = aac_hba_send(command, fib, + (fib_callback) aac_tmf_callback, + (void *) info); + + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } + + /* Wait up to 15 seconds for completion */ + for (count = 0; count < 15; ++count) { + if (info->reset_state <= 0) { + ret = info->reset_state == 0 ? SUCCESS : FAILED; + break; + } + msleep(1000); + } + + return ret; +} + +/* + * aac_eh_bus_reset - Bus reset command handling + * @scsi_cmd: SCSI command block causing the reset + * + */ +static int aac_eh_bus_reset(struct scsi_cmnd* cmd) +{ + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + int count; + u32 cmd_bus; + int status = 0; + + + cmd_bus = aac_logical_to_phys(scmd_channel(cmd)); + /* Mark the assoc. FIB to not complete, eh handler does this */ + for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { + struct fib *fib = &aac->fibs[count]; + + if (fib->hw_fib_va->header.XferState && + (fib->flags & FIB_CONTEXT_FLAG) && + (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) { + struct aac_hba_map_info *info; + u32 bus, cid; + + cmd = (struct scsi_cmnd *)fib->callback_data; + bus = aac_logical_to_phys(scmd_channel(cmd)); + if (bus != cmd_bus) + continue; + cid = scmd_id(cmd); + info = &aac->hba_map[bus][cid]; + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || + info->devtype != AAC_DEVTYPE_NATIVE_RAW) { + fib->flags |= FIB_CONTEXT_FLAG_EH_RESET; + aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER; + } + } + } + + pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME); + + /* + * Check the health of the controller + */ + status = aac_adapter_check_health(aac); + if (status) + dev_err(&aac->pdev->dev, "Adapter health - %d\n", status); + + count = get_num_of_incomplete_fibs(aac); + return (count == 0) ? SUCCESS : FAILED; +} + +/* + * aac_eh_host_reset - Host reset command handling + * @scsi_cmd: SCSI command block causing the reset + * + */ +static int aac_eh_host_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + int ret = FAILED; + __le32 supported_options2 = 0; + bool is_mu_reset; + bool is_ignore_reset; + bool is_doorbell_reset; + + /* + * Check if reset is supported by the firmware + */ + supported_options2 = aac->supplement_adapter_info.supported_options2; + is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET; + is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET; + is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET; + /* + * This adapter needs a blind reset, only do so for + * Adapters that support a register, instead of a commanded, + * reset. + */ + if ((is_mu_reset || is_doorbell_reset) + && aac_check_reset + && (aac_check_reset != -1 || !is_ignore_reset)) { + /* Bypass wait for command quiesce */ + if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0) + ret = SUCCESS; + } + /* + * Reset EH state + */ + if (ret == SUCCESS) { + int bus, cid; + struct aac_hba_map_info *info; + + for (bus = 0; bus < AAC_MAX_BUSES; bus++) { + for (cid = 0; cid < AAC_MAX_TARGETS; cid++) { + info = &aac->hba_map[bus][cid]; + if (info->devtype == AAC_DEVTYPE_NATIVE_RAW) + info->reset_state = 0; + } + } + } + return ret; +} + +/** + * aac_cfg_open - open a configuration file + * @inode: inode being opened + * @file: file handle attached + * + * Called when the configuration device is opened. Does the needed + * set up on the handle and then returns + * + * Bugs: This needs extending to check a given adapter is present + * so we can support hot plugging, and to ref count adapters. + */ + +static int aac_cfg_open(struct inode *inode, struct file *file) +{ + struct aac_dev *aac; + unsigned minor_number = iminor(inode); + int err = -ENODEV; + + mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */ + list_for_each_entry(aac, &aac_devices, entry) { + if (aac->id == minor_number) { + file->private_data = aac; + err = 0; + break; + } + } + mutex_unlock(&aac_mutex); + + return err; +} + +/** + * aac_cfg_ioctl - AAC configuration request + * @file: file handle + * @cmd: ioctl command code + * @arg: argument + * + * Handles a configuration ioctl. Currently this involves wrapping it + * up and feeding it into the nasty windowsalike glue layer. + * + * Bugs: Needs locking against parallel ioctls lower down + * Bugs: Needs to handle hot plugging + */ + +static long aac_cfg_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct aac_dev *aac = (struct aac_dev *)file->private_data; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + return aac_do_ioctl(aac, cmd, (void __user *)arg); +} + +static ssize_t aac_show_model(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + int len; + + if (dev->supplement_adapter_info.adapter_type_text[0]) { + char *cp = dev->supplement_adapter_info.adapter_type_text; + while (*cp && *cp != ' ') + ++cp; + while (*cp == ' ') + ++cp; + len = snprintf(buf, PAGE_SIZE, "%s\n", cp); + } else + len = snprintf(buf, PAGE_SIZE, "%s\n", + aac_drivers[dev->cardtype].model); + return len; +} + +static ssize_t aac_show_vendor(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + struct aac_supplement_adapter_info *sup_adap_info; + int len; + + sup_adap_info = &dev->supplement_adapter_info; + if (sup_adap_info->adapter_type_text[0]) { + char *cp = sup_adap_info->adapter_type_text; + while (*cp && *cp != ' ') + ++cp; + len = snprintf(buf, PAGE_SIZE, "%.*s\n", + (int)(cp - (char *)sup_adap_info->adapter_type_text), + sup_adap_info->adapter_type_text); + } else + len = snprintf(buf, PAGE_SIZE, "%s\n", + aac_drivers[dev->cardtype].vname); + return len; +} + +static ssize_t aac_show_flags(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + int len = 0; + struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata; + + if (nblank(dprintk(x))) + len = snprintf(buf, PAGE_SIZE, "dprintk\n"); +#ifdef AAC_DETAILED_STATUS_INFO + len += scnprintf(buf + len, PAGE_SIZE - len, + "AAC_DETAILED_STATUS_INFO\n"); +#endif + if (dev->raw_io_interface && dev->raw_io_64) + len += scnprintf(buf + len, PAGE_SIZE - len, + "SAI_READ_CAPACITY_16\n"); + if (dev->jbod) + len += scnprintf(buf + len, PAGE_SIZE - len, + "SUPPORTED_JBOD\n"); + if (dev->supplement_adapter_info.supported_options2 & + AAC_OPTION_POWER_MANAGEMENT) + len += scnprintf(buf + len, PAGE_SIZE - len, + "SUPPORTED_POWER_MANAGEMENT\n"); + if (dev->msi) + len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n"); + return len; +} + +static ssize_t aac_show_kernel_version(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + int len, tmp; + + tmp = le32_to_cpu(dev->adapter_info.kernelrev); + len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", + tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, + le32_to_cpu(dev->adapter_info.kernelbuild)); + return len; +} + +static ssize_t aac_show_monitor_version(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + int len, tmp; + + tmp = le32_to_cpu(dev->adapter_info.monitorrev); + len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", + tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, + le32_to_cpu(dev->adapter_info.monitorbuild)); + return len; +} + +static ssize_t aac_show_bios_version(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + int len, tmp; + + tmp = le32_to_cpu(dev->adapter_info.biosrev); + len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", + tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, + le32_to_cpu(dev->adapter_info.biosbuild)); + return len; +} + +static ssize_t aac_show_driver_version(struct device *device, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version); +} + +static ssize_t aac_show_serial_number(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + int len = 0; + + if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) + len = snprintf(buf, 16, "%06X\n", + le32_to_cpu(dev->adapter_info.serial[0])); + if (len && + !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[ + sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len], + buf, len-1)) + len = snprintf(buf, 16, "%.*s\n", + (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no), + dev->supplement_adapter_info.mfg_pcba_serial_no); + + return min(len, 16); +} + +static ssize_t aac_show_max_channel(struct device *device, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + class_to_shost(device)->max_channel); +} + +static ssize_t aac_show_max_id(struct device *device, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + class_to_shost(device)->max_id); +} + +static ssize_t aac_store_reset_adapter(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int retval = -EACCES; + + if (!capable(CAP_SYS_ADMIN)) + return retval; + + retval = aac_reset_adapter(shost_priv(class_to_shost(device)), + buf[0] == '!', IOP_HWSOFT_RESET); + if (retval >= 0) + retval = count; + + return retval; +} + +static ssize_t aac_show_reset_adapter(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + int len, tmp; + + tmp = aac_adapter_check_health(dev); + if ((tmp == 0) && dev->in_reset) + tmp = -EBUSY; + len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp); + return len; +} + +static struct device_attribute aac_model = { + .attr = { + .name = "model", + .mode = S_IRUGO, + }, + .show = aac_show_model, +}; +static struct device_attribute aac_vendor = { + .attr = { + .name = "vendor", + .mode = S_IRUGO, + }, + .show = aac_show_vendor, +}; +static struct device_attribute aac_flags = { + .attr = { + .name = "flags", + .mode = S_IRUGO, + }, + .show = aac_show_flags, +}; +static struct device_attribute aac_kernel_version = { + .attr = { + .name = "hba_kernel_version", + .mode = S_IRUGO, + }, + .show = aac_show_kernel_version, +}; +static struct device_attribute aac_monitor_version = { + .attr = { + .name = "hba_monitor_version", + .mode = S_IRUGO, + }, + .show = aac_show_monitor_version, +}; +static struct device_attribute aac_bios_version = { + .attr = { + .name = "hba_bios_version", + .mode = S_IRUGO, + }, + .show = aac_show_bios_version, +}; +static struct device_attribute aac_lld_version = { + .attr = { + .name = "driver_version", + .mode = 0444, + }, + .show = aac_show_driver_version, +}; +static struct device_attribute aac_serial_number = { + .attr = { + .name = "serial_number", + .mode = S_IRUGO, + }, + .show = aac_show_serial_number, +}; +static struct device_attribute aac_max_channel = { + .attr = { + .name = "max_channel", + .mode = S_IRUGO, + }, + .show = aac_show_max_channel, +}; +static struct device_attribute aac_max_id = { + .attr = { + .name = "max_id", + .mode = S_IRUGO, + }, + .show = aac_show_max_id, +}; +static struct device_attribute aac_reset = { + .attr = { + .name = "reset_host", + .mode = S_IWUSR|S_IRUGO, + }, + .store = aac_store_reset_adapter, + .show = aac_show_reset_adapter, +}; + +static struct attribute *aac_host_attrs[] = { + &aac_model.attr, + &aac_vendor.attr, + &aac_flags.attr, + &aac_kernel_version.attr, + &aac_monitor_version.attr, + &aac_bios_version.attr, + &aac_lld_version.attr, + &aac_serial_number.attr, + &aac_max_channel.attr, + &aac_max_id.attr, + &aac_reset.attr, + NULL +}; + +ATTRIBUTE_GROUPS(aac_host); + +ssize_t aac_get_serial_number(struct device *device, char *buf) +{ + return aac_show_serial_number(device, &aac_serial_number, buf); +} + +static const struct file_operations aac_cfg_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = aac_cfg_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = aac_cfg_ioctl, +#endif + .open = aac_cfg_open, + .llseek = noop_llseek, +}; + +static const struct scsi_host_template aac_driver_template = { + .module = THIS_MODULE, + .name = "AAC", + .proc_name = AAC_DRIVERNAME, + .info = aac_info, + .ioctl = aac_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = aac_ioctl, +#endif + .queuecommand = aac_queuecommand, + .bios_param = aac_biosparm, + .shost_groups = aac_host_groups, + .slave_configure = aac_slave_configure, + .change_queue_depth = aac_change_queue_depth, + .sdev_groups = aac_dev_groups, + .eh_abort_handler = aac_eh_abort, + .eh_device_reset_handler = aac_eh_dev_reset, + .eh_target_reset_handler = aac_eh_target_reset, + .eh_bus_reset_handler = aac_eh_bus_reset, + .eh_host_reset_handler = aac_eh_host_reset, + .can_queue = AAC_NUM_IO_FIB, + .this_id = MAXIMUM_NUM_CONTAINERS, + .sg_tablesize = 16, + .max_sectors = 128, +#if (AAC_NUM_IO_FIB > 256) + .cmd_per_lun = 256, +#else + .cmd_per_lun = AAC_NUM_IO_FIB, +#endif + .emulated = 1, + .no_write_same = 1, + .cmd_size = sizeof(struct aac_cmd_priv), +}; + +static void __aac_shutdown(struct aac_dev * aac) +{ + int i; + + mutex_lock(&aac->ioctl_mutex); + aac->adapter_shutdown = 1; + mutex_unlock(&aac->ioctl_mutex); + + if (aac->aif_thread) { + int i; + /* Clear out events first */ + for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) { + struct fib *fib = &aac->fibs[i]; + if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && + (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) + complete(&fib->event_wait); + } + kthread_stop(aac->thread); + aac->thread = NULL; + } + + aac_send_shutdown(aac); + + aac_adapter_disable_int(aac); + + if (aac_is_src(aac)) { + if (aac->max_msix > 1) { + for (i = 0; i < aac->max_msix; i++) { + free_irq(pci_irq_vector(aac->pdev, i), + &(aac->aac_msix[i])); + } + } else { + free_irq(aac->pdev->irq, + &(aac->aac_msix[0])); + } + } else { + free_irq(aac->pdev->irq, aac); + } + if (aac->msi) + pci_disable_msi(aac->pdev); + else if (aac->max_msix > 1) + pci_disable_msix(aac->pdev); +} +static void aac_init_char(void) +{ + aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops); + if (aac_cfg_major < 0) { + pr_err("aacraid: unable to register \"aac\" device.\n"); + } +} + +void aac_reinit_aif(struct aac_dev *aac, unsigned int index) +{ + /* + * Firmware may send a AIF messages very early and the Driver may have + * ignored as it is not fully ready to process the messages. Send + * AIF to firmware so that if there are any unprocessed events they + * can be processed now. + */ + if (aac_drivers[index].quirks & AAC_QUIRK_SRC) + aac_intr_normal(aac, 0, 2, 0, NULL); + +} + +static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + unsigned index = id->driver_data; + struct Scsi_Host *shost; + struct aac_dev *aac; + struct list_head *insert = &aac_devices; + int error; + int unique_id = 0; + u64 dmamask; + int mask_bits = 0; + extern int aac_sync_mode; + + /* + * Only series 7 needs freset. + */ + if (pdev->device == PMC_DEVICE_S7) + pdev->needs_freset = 1; + + list_for_each_entry(aac, &aac_devices, entry) { + if (aac->id > unique_id) + break; + insert = &aac->entry; + unique_id++; + } + + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + error = pci_enable_device(pdev); + if (error) + goto out; + + if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { + error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (error) { + dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed"); + goto out_disable_pdev; + } + } + + /* + * If the quirk31 bit is set, the adapter needs adapter + * to driver communication memory to be allocated below 2gig + */ + if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) { + dmamask = DMA_BIT_MASK(31); + mask_bits = 31; + } else { + dmamask = DMA_BIT_MASK(32); + mask_bits = 32; + } + + error = dma_set_coherent_mask(&pdev->dev, dmamask); + if (error) { + dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n" + , mask_bits); + goto out_disable_pdev; + } + + pci_set_master(pdev); + + shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); + if (!shost) { + error = -ENOMEM; + goto out_disable_pdev; + } + + shost->irq = pdev->irq; + shost->unique_id = unique_id; + shost->max_cmd_len = 16; + + if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT) + aac_init_char(); + + aac = (struct aac_dev *)shost->hostdata; + aac->base_start = pci_resource_start(pdev, 0); + aac->scsi_host_ptr = shost; + aac->pdev = pdev; + aac->name = aac_driver_template.name; + aac->id = shost->unique_id; + aac->cardtype = index; + INIT_LIST_HEAD(&aac->entry); + + if (aac_reset_devices || reset_devices) + aac->init_reset = true; + + aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, + sizeof(struct fib), + GFP_KERNEL); + if (!aac->fibs) { + error = -ENOMEM; + goto out_free_host; + } + + spin_lock_init(&aac->fib_lock); + + mutex_init(&aac->ioctl_mutex); + mutex_init(&aac->scan_mutex); + + INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker); + INIT_DELAYED_WORK(&aac->src_reinit_aif_worker, + aac_src_reinit_aif_worker); + /* + * Map in the registers from the adapter. + */ + aac->base_size = AAC_MIN_FOOTPRINT_SIZE; + if ((*aac_drivers[index].init)(aac)) { + error = -ENODEV; + goto out_unmap; + } + + if (aac->sync_mode) { + if (aac_sync_mode) + printk(KERN_INFO "%s%d: Sync. mode enforced " + "by driver parameter. This will cause " + "a significant performance decrease!\n", + aac->name, + aac->id); + else + printk(KERN_INFO "%s%d: Async. mode not supported " + "by current driver, sync. mode enforced." + "\nPlease update driver to get full performance.\n", + aac->name, + aac->id); + } + + /* + * Start any kernel threads needed + */ + aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME); + if (IS_ERR(aac->thread)) { + printk(KERN_ERR "aacraid: Unable to create command thread.\n"); + error = PTR_ERR(aac->thread); + aac->thread = NULL; + goto out_deinit; + } + + aac->maximum_num_channels = aac_drivers[index].channels; + error = aac_get_adapter_info(aac); + if (error < 0) + goto out_deinit; + + /* + * Lets override negotiations and drop the maximum SG limit to 34 + */ + if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && + (shost->sg_tablesize > 34)) { + shost->sg_tablesize = 34; + shost->max_sectors = (shost->sg_tablesize * 8) + 112; + } + + if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) && + (shost->sg_tablesize > 17)) { + shost->sg_tablesize = 17; + shost->max_sectors = (shost->sg_tablesize * 8) + 112; + } + + if (aac->adapter_info.options & AAC_OPT_NEW_COMM) + shost->max_segment_size = shost->max_sectors << 9; + else + shost->max_segment_size = 65536; + + /* + * Firmware printf works only with older firmware. + */ + if (aac_drivers[index].quirks & AAC_QUIRK_34SG) + aac->printf_enabled = 1; + else + aac->printf_enabled = 0; + + /* + * max channel will be the physical channels plus 1 virtual channel + * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) + * physical channels are address by their actual physical number+1 + */ + if (aac->nondasd_support || expose_physicals || aac->jbod) + shost->max_channel = aac->maximum_num_channels; + else + shost->max_channel = 0; + + aac_get_config_status(aac, 0); + aac_get_containers(aac); + list_add(&aac->entry, insert); + + shost->max_id = aac->maximum_num_containers; + if (shost->max_id < aac->maximum_num_physicals) + shost->max_id = aac->maximum_num_physicals; + if (shost->max_id < MAXIMUM_NUM_CONTAINERS) + shost->max_id = MAXIMUM_NUM_CONTAINERS; + else + shost->this_id = shost->max_id; + + if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC) + aac_intr_normal(aac, 0, 2, 0, NULL); + + /* + * dmb - we may need to move the setting of these parms somewhere else once + * we get a fib that can report the actual numbers + */ + shost->max_lun = AAC_MAX_LUN; + + pci_set_drvdata(pdev, shost); + + error = scsi_add_host(shost, &pdev->dev); + if (error) + goto out_deinit; + + aac_scan_host(aac); + + pci_save_state(pdev); + + return 0; + + out_deinit: + __aac_shutdown(aac); + out_unmap: + aac_fib_map_free(aac); + if (aac->comm_addr) + dma_free_coherent(&aac->pdev->dev, aac->comm_size, + aac->comm_addr, aac->comm_phys); + kfree(aac->queues); + aac_adapter_ioremap(aac, 0); + kfree(aac->fibs); + kfree(aac->fsa_dev); + out_free_host: + scsi_host_put(shost); + out_disable_pdev: + pci_disable_device(pdev); + out: + return error; +} + +static void aac_release_resources(struct aac_dev *aac) +{ + aac_adapter_disable_int(aac); + aac_free_irq(aac); +} + +static int aac_acquire_resources(struct aac_dev *dev) +{ + unsigned long status; + /* + * First clear out all interrupts. Then enable the one's that we + * can handle. + */ + while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING) + || status == 0xffffffff) + msleep(20); + + aac_adapter_disable_int(dev); + aac_adapter_enable_int(dev); + + + if (aac_is_src(dev)) + aac_define_int_mode(dev); + + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_ENABLE_MSIX); + + if (aac_acquire_irq(dev)) + goto error_iounmap; + + aac_adapter_enable_int(dev); + + /*max msix may change after EEH + * Re-assign vectors to fibs + */ + aac_fib_vector_assign(dev); + + if (!dev->sync_mode) { + /* After EEH recovery or suspend resume, max_msix count + * may change, therefore updating in init as well. + */ + dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix); + aac_adapter_start(dev); + } + return 0; + +error_iounmap: + return -1; + +} + +static int __maybe_unused aac_suspend(struct device *dev) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev); + struct aac_dev *aac = (struct aac_dev *)shost->hostdata; + + scsi_host_block(shost); + aac_cancel_rescan_worker(aac); + aac_send_shutdown(aac); + + aac_release_resources(aac); + + return 0; +} + +static int __maybe_unused aac_resume(struct device *dev) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev); + struct aac_dev *aac = (struct aac_dev *)shost->hostdata; + + if (aac_acquire_resources(aac)) + goto fail_device; + /* + * reset this flag to unblock ioctl() as it was set at + * aac_send_shutdown() to block ioctls from upperlayer + */ + aac->adapter_shutdown = 0; + scsi_host_unblock(shost, SDEV_RUNNING); + + return 0; + +fail_device: + printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id); + scsi_host_put(shost); + return -ENODEV; +} + +static void aac_shutdown(struct pci_dev *dev) +{ + struct Scsi_Host *shost = pci_get_drvdata(dev); + + scsi_host_block(shost); + __aac_shutdown((struct aac_dev *)shost->hostdata); +} + +static void aac_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct aac_dev *aac = (struct aac_dev *)shost->hostdata; + + aac_cancel_rescan_worker(aac); + scsi_remove_host(shost); + + __aac_shutdown(aac); + aac_fib_map_free(aac); + dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, + aac->comm_phys); + kfree(aac->queues); + + aac_adapter_ioremap(aac, 0); + + kfree(aac->fibs); + kfree(aac->fsa_dev); + + list_del(&aac->entry); + scsi_host_put(shost); + pci_disable_device(pdev); + if (list_empty(&aac_devices)) { + unregister_chrdev(aac_cfg_major, "aac"); + aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT; + } +} + +static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t error) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct aac_dev *aac = shost_priv(shost); + + dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error); + + switch (error) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + aac->handle_pci_error = 1; + + scsi_host_block(shost); + aac_cancel_rescan_worker(aac); + scsi_host_complete_all_commands(shost, DID_NO_CONNECT); + aac_release_resources(aac); + + aac_adapter_ioremap(aac, 0); + + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + aac->handle_pci_error = 1; + + scsi_host_complete_all_commands(shost, DID_NO_CONNECT); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev) +{ + dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n"); + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev) +{ + dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n"); + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { + dev_warn(&pdev->dev, + "aacraid: failed to enable slave\n"); + goto fail_device; + } + + pci_set_master(pdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, "pci_enable_device_mem failed\n"); + goto fail_device; + } + + return PCI_ERS_RESULT_RECOVERED; + +fail_device: + dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n"); + return PCI_ERS_RESULT_DISCONNECT; +} + + +static void aac_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct aac_dev *aac = (struct aac_dev *)shost_priv(shost); + + if (aac_adapter_ioremap(aac, aac->base_size)) { + + dev_err(&pdev->dev, "aacraid: ioremap failed\n"); + /* remap failed, go back ... */ + aac->comm_interface = AAC_COMM_PRODUCER; + if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) { + dev_warn(&pdev->dev, + "aacraid: unable to map adapter.\n"); + + return; + } + } + + msleep(10000); + + aac_acquire_resources(aac); + + /* + * reset this flag to unblock ioctl() as it was set + * at aac_send_shutdown() to block ioctls from upperlayer + */ + aac->adapter_shutdown = 0; + aac->handle_pci_error = 0; + + scsi_host_unblock(shost, SDEV_RUNNING); + aac_scan_host(aac); + pci_save_state(pdev); + + dev_err(&pdev->dev, "aacraid: PCI error - resume\n"); +} + +static struct pci_error_handlers aac_pci_err_handler = { + .error_detected = aac_pci_error_detected, + .mmio_enabled = aac_pci_mmio_enabled, + .slot_reset = aac_pci_slot_reset, + .resume = aac_pci_resume, +}; + +static SIMPLE_DEV_PM_OPS(aac_pm_ops, aac_suspend, aac_resume); + +static struct pci_driver aac_pci_driver = { + .name = AAC_DRIVERNAME, + .id_table = aac_pci_tbl, + .probe = aac_probe_one, + .remove = aac_remove_one, + .driver.pm = &aac_pm_ops, + .shutdown = aac_shutdown, + .err_handler = &aac_pci_err_handler, +}; + +static int __init aac_init(void) +{ + int error; + + printk(KERN_INFO "Adaptec %s driver %s\n", + AAC_DRIVERNAME, aac_driver_version); + + error = pci_register_driver(&aac_pci_driver); + if (error < 0) + return error; + + aac_init_char(); + + + return 0; +} + +static void __exit aac_exit(void) +{ + if (aac_cfg_major > -1) + unregister_chrdev(aac_cfg_major, "aac"); + pci_unregister_driver(&aac_pci_driver); +} + +module_init(aac_init); +module_exit(aac_exit); diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c new file mode 100644 index 000000000..4745a99fb --- /dev/null +++ b/drivers/scsi/aacraid/nark.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * nark.c + * + * Abstract: Hardware Device Interface for NEMER/ARK + */ + +#include +#include + +#include + +#include "aacraid.h" + +/** + * aac_nark_ioremap + * @dev: device to ioremap + * @size: mapping resize request + * + */ +static int aac_nark_ioremap(struct aac_dev * dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.rx); + dev->regs.rx = NULL; + iounmap(dev->base); + dev->base = NULL; + return 0; + } + dev->base_start = pci_resource_start(dev->pdev, 2); + dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) | + ((u64)pci_resource_start(dev->pdev, 1) << 32), + sizeof(struct rx_registers) - sizeof(struct rx_inbound)); + dev->base = NULL; + if (dev->regs.rx == NULL) + return -1; + dev->base = ioremap(dev->base_start, size); + if (dev->base == NULL) { + iounmap(dev->regs.rx); + dev->regs.rx = NULL; + return -1; + } + dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs; + return 0; +} + +/** + * aac_nark_init - initialize an NEMER/ARK Split Bar card + * @dev: device to configure + * + */ + +int aac_nark_init(struct aac_dev * dev) +{ + /* + * Fill in the function dispatch table. + */ + dev->a_ops.adapter_ioremap = aac_nark_ioremap; + dev->a_ops.adapter_comm = aac_rx_select_comm; + + return _aac_rx_init(dev); +} diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c new file mode 100644 index 000000000..8ebc67e54 --- /dev/null +++ b/drivers/scsi/aacraid/rkt.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * rkt.c + * + * Abstract: Hardware miniport for Drawbridge specific hardware functions. + */ + +#include + +#include + +#include "aacraid.h" + +#define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB) + +/** + * aac_rkt_select_comm - Select communications method + * @dev: Adapter + * @comm: communications method + */ + +static int aac_rkt_select_comm(struct aac_dev *dev, int comm) +{ + int retval; + retval = aac_rx_select_comm(dev, comm); + if (comm == AAC_COMM_MESSAGE) { + /* + * FIB Setup has already been done, but we can minimize the + * damage by at least ensuring the OS never issues more + * commands than we can handle. The Rocket adapters currently + * can only handle 246 commands and 8 AIFs at the same time, + * and in fact do notify us accordingly if we negotiate the + * FIB size. The problem that causes us to add this check is + * to ensure that we do not overdo it with the adapter when a + * hard coded FIB override is being utilized. This special + * case warrants this half baked, but convenient, check here. + */ + if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { + dev->init->r7.max_io_commands = + cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); + dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; + } + } + return retval; +} + +/** + * aac_rkt_ioremap + * @dev: device to ioremap + * @size: mapping resize request + * + */ +static int aac_rkt_ioremap(struct aac_dev * dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.rkt); + return 0; + } + dev->base = dev->regs.rkt = ioremap(dev->base_start, size); + if (dev->base == NULL) + return -1; + dev->IndexRegs = &dev->regs.rkt->IndexRegs; + return 0; +} + +/** + * aac_rkt_init - initialize an i960 based AAC card + * @dev: device to configure + * + * Allocate and set up resources for the i960 based AAC variants. The + * device_interface in the commregion will be allocated and linked + * to the comm region. + */ + +int aac_rkt_init(struct aac_dev *dev) +{ + /* + * Fill in the function dispatch table. + */ + dev->a_ops.adapter_ioremap = aac_rkt_ioremap; + dev->a_ops.adapter_comm = aac_rkt_select_comm; + + return _aac_rx_init(dev); +} diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c new file mode 100644 index 000000000..e06ff83b6 --- /dev/null +++ b/drivers/scsi/aacraid/rx.c @@ -0,0 +1,683 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * rx.c + * + * Abstract: Hardware miniport for Drawbridge specific hardware functions. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "aacraid.h" + +static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) +{ + struct aac_dev *dev = dev_id; + unsigned long bellbits; + u8 intstat = rx_readb(dev, MUnit.OISR); + + /* + * Read mask and invert because drawbridge is reversed. + * This allows us to only service interrupts that have + * been enabled. + * Check to see if this is our interrupt. If it isn't just return + */ + if (likely(intstat & ~(dev->OIMR))) { + bellbits = rx_readl(dev, OutboundDoorbellReg); + if (unlikely(bellbits & DoorBellPrintfReady)) { + aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); + rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); + rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); + } + else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) { + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); + aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); + } + else if (likely(bellbits & DoorBellAdapterNormRespReady)) { + rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); + aac_response_normal(&dev->queues->queue[HostNormRespQueue]); + } + else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) { + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); + } + else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) { + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); + } + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) +{ + int isAif, isFastResponse, isSpecial; + struct aac_dev *dev = dev_id; + u32 Index = rx_readl(dev, MUnit.OutboundQueue); + if (unlikely(Index == 0xFFFFFFFFL)) + Index = rx_readl(dev, MUnit.OutboundQueue); + if (likely(Index != 0xFFFFFFFFL)) { + do { + isAif = isFastResponse = isSpecial = 0; + if (Index & 0x00000002L) { + isAif = 1; + if (Index == 0xFFFFFFFEL) + isSpecial = 1; + Index &= ~0x00000002L; + } else { + if (Index & 0x00000001L) + isFastResponse = 1; + Index >>= 2; + } + if (!isSpecial) { + if (unlikely(aac_intr_normal(dev, + Index, isAif, + isFastResponse, NULL))) { + rx_writel(dev, + MUnit.OutboundQueue, + Index); + rx_writel(dev, + MUnit.ODR, + DoorBellAdapterNormRespReady); + } + } + Index = rx_readl(dev, MUnit.OutboundQueue); + } while (Index != 0xFFFFFFFFL); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/** + * aac_rx_disable_interrupt - Disable interrupts + * @dev: Adapter + */ + +static void aac_rx_disable_interrupt(struct aac_dev *dev) +{ + rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); +} + +/** + * aac_rx_enable_interrupt_producer - Enable interrupts + * @dev: Adapter + */ + +static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) +{ + rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); +} + +/** + * aac_rx_enable_interrupt_message - Enable interrupts + * @dev: Adapter + */ + +static void aac_rx_enable_interrupt_message(struct aac_dev *dev) +{ + rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); +} + +/** + * rx_sync_cmd - send a command and wait + * @dev: Adapter + * @command: Command to execute + * @p1: first parameter + * @p2: second parameter + * @p3: third parameter + * @p4: forth parameter + * @p5: fifth parameter + * @p6: sixth parameter + * @status: adapter status + * @r1: first return value + * @r2: second return value + * @r3: third return value + * @r4: forth return value + * + * This routine will send a synchronous command to the adapter and wait + * for its completion. + */ + +static int rx_sync_cmd(struct aac_dev *dev, u32 command, + u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, + u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) +{ + unsigned long start; + int ok; + /* + * Write the command into Mailbox 0 + */ + writel(command, &dev->IndexRegs->Mailbox[0]); + /* + * Write the parameters into Mailboxes 1 - 6 + */ + writel(p1, &dev->IndexRegs->Mailbox[1]); + writel(p2, &dev->IndexRegs->Mailbox[2]); + writel(p3, &dev->IndexRegs->Mailbox[3]); + writel(p4, &dev->IndexRegs->Mailbox[4]); + /* + * Clear the synch command doorbell to start on a clean slate. + */ + rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); + /* + * Disable doorbell interrupts + */ + rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); + /* + * Force the completion of the mask register write before issuing + * the interrupt. + */ + rx_readb (dev, MUnit.OIMR); + /* + * Signal that there is a new synch command + */ + rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); + + ok = 0; + start = jiffies; + + /* + * Wait up to 30 seconds + */ + while (time_before(jiffies, start+30*HZ)) + { + udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ + /* + * Mon960 will set doorbell0 bit when it has completed the command. + */ + if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { + /* + * Clear the doorbell. + */ + rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); + ok = 1; + break; + } + /* + * Yield the processor in case we are slow + */ + msleep(1); + } + if (unlikely(ok != 1)) { + /* + * Restore interrupt mask even though we timed out + */ + aac_adapter_enable_int(dev); + return -ETIMEDOUT; + } + /* + * Pull the synch status from Mailbox 0. + */ + if (status) + *status = readl(&dev->IndexRegs->Mailbox[0]); + if (r1) + *r1 = readl(&dev->IndexRegs->Mailbox[1]); + if (r2) + *r2 = readl(&dev->IndexRegs->Mailbox[2]); + if (r3) + *r3 = readl(&dev->IndexRegs->Mailbox[3]); + if (r4) + *r4 = readl(&dev->IndexRegs->Mailbox[4]); + /* + * Clear the synch command doorbell. + */ + rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); + /* + * Restore interrupt mask + */ + aac_adapter_enable_int(dev); + return 0; + +} + +/** + * aac_rx_interrupt_adapter - interrupt adapter + * @dev: Adapter + * + * Send an interrupt to the i960 and breakpoint it. + */ + +static void aac_rx_interrupt_adapter(struct aac_dev *dev) +{ + rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + +/** + * aac_rx_notify_adapter - send an event to the adapter + * @dev: Adapter + * @event: Event to send + * + * Notify the i960 that something it probably cares about has + * happened. + */ + +static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) +{ + switch (event) { + + case AdapNormCmdQue: + rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1); + break; + case HostNormRespNotFull: + rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4); + break; + case AdapNormRespQue: + rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2); + break; + case HostNormCmdNotFull: + rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); + break; + case HostShutdown: + break; + case FastIo: + rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); + break; + case AdapPrintfDone: + rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5); + break; + default: + BUG(); + break; + } +} + +/** + * aac_rx_start_adapter - activate adapter + * @dev: Adapter + * + * Start up processing on an i960 based AAC adapter + */ + +static void aac_rx_start_adapter(struct aac_dev *dev) +{ + union aac_init *init; + + init = dev->init; + init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); + // We can only use a 32 bit address here + rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, + 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + +/** + * aac_rx_check_health + * @dev: device to check if healthy + * + * Will attempt to determine if the specified adapter is alive and + * capable of handling requests, returning 0 if alive. + */ +static int aac_rx_check_health(struct aac_dev *dev) +{ + u32 status = rx_readl(dev, MUnit.OMRx[0]); + + /* + * Check to see if the board failed any self tests. + */ + if (unlikely(status & SELF_TEST_FAILED)) + return -1; + /* + * Check to see if the board panic'd. + */ + if (unlikely(status & KERNEL_PANIC)) { + char * buffer; + struct POSTSTATUS { + __le32 Post_Command; + __le32 Post_Address; + } * post; + dma_addr_t paddr, baddr; + int ret; + + if (likely((status & 0xFF000000L) == 0xBC000000L)) + return (status >> 16) & 0xFF; + buffer = dma_alloc_coherent(&dev->pdev->dev, 512, &baddr, + GFP_KERNEL); + ret = -2; + if (unlikely(buffer == NULL)) + return ret; + post = dma_alloc_coherent(&dev->pdev->dev, + sizeof(struct POSTSTATUS), &paddr, + GFP_KERNEL); + if (unlikely(post == NULL)) { + dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr); + return ret; + } + memset(buffer, 0, 512); + post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS); + post->Post_Address = cpu_to_le32(baddr); + rx_writel(dev, MUnit.IMRx[0], paddr); + rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); + dma_free_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS), + post, paddr); + if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) { + ret = (hex_to_bin(buffer[2]) << 4) + + hex_to_bin(buffer[3]); + } + dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr); + return ret; + } + /* + * Wait for the adapter to be up and running. + */ + if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) + return -3; + /* + * Everything is OK + */ + return 0; +} + +/** + * aac_rx_deliver_producer + * @fib: fib to issue + * + * Will send a fib, returning 0 if successful. + */ +int aac_rx_deliver_producer(struct fib * fib) +{ + struct aac_dev *dev = fib->dev; + struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; + u32 Index; + unsigned long nointr = 0; + + aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); + + atomic_inc(&q->numpending); + *(q->headers.producer) = cpu_to_le32(Index + 1); + if (!(nointr & aac_config.irq_mod)) + aac_adapter_notify(dev, AdapNormCmdQueue); + + return 0; +} + +/** + * aac_rx_deliver_message + * @fib: fib to issue + * + * Will send a fib, returning 0 if successful. + */ +static int aac_rx_deliver_message(struct fib * fib) +{ + struct aac_dev *dev = fib->dev; + struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; + u32 Index; + u64 addr; + volatile void __iomem *device; + + unsigned long count = 10000000L; /* 50 seconds */ + atomic_inc(&q->numpending); + for(;;) { + Index = rx_readl(dev, MUnit.InboundQueue); + if (unlikely(Index == 0xFFFFFFFFL)) + Index = rx_readl(dev, MUnit.InboundQueue); + if (likely(Index != 0xFFFFFFFFL)) + break; + if (--count == 0) { + atomic_dec(&q->numpending); + return -ETIMEDOUT; + } + udelay(5); + } + device = dev->base + Index; + addr = fib->hw_fib_pa; + writel((u32)(addr & 0xffffffff), device); + device += sizeof(u32); + writel((u32)(addr >> 32), device); + device += sizeof(u32); + writel(le16_to_cpu(fib->hw_fib_va->header.Size), device); + rx_writel(dev, MUnit.InboundQueue, Index); + return 0; +} + +/** + * aac_rx_ioremap + * @dev: adapter + * @size: mapping resize request + * + */ +static int aac_rx_ioremap(struct aac_dev * dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.rx); + return 0; + } + dev->base = dev->regs.rx = ioremap(dev->base_start, size); + if (dev->base == NULL) + return -1; + dev->IndexRegs = &dev->regs.rx->IndexRegs; + return 0; +} + +static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) +{ + u32 var = 0; + + if (!(dev->supplement_adapter_info.supported_options2 & + AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { + if (bled) + printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", + dev->name, dev->id, bled); + else { + bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, + 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); + if (!bled && (var != 0x00000001) && (var != 0x3803000F)) + bled = -EINVAL; + } + if (bled && (bled != -ETIMEDOUT)) + bled = aac_adapter_sync_cmd(dev, IOP_RESET, + 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); + + if (bled && (bled != -ETIMEDOUT)) + return -EINVAL; + } + if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */ + rx_writel(dev, MUnit.reserved2, 3); + msleep(5000); /* Delay 5 seconds */ + var = 0x00000001; + } + if (bled && (var != 0x00000001)) + return -EINVAL; + ssleep(5); + if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) + return -ENODEV; + if (startup_timeout < 300) + startup_timeout = 300; + return 0; +} + +/** + * aac_rx_select_comm - Select communications method + * @dev: Adapter + * @comm: communications method + */ + +int aac_rx_select_comm(struct aac_dev *dev, int comm) +{ + switch (comm) { + case AAC_COMM_PRODUCER: + dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; + dev->a_ops.adapter_intr = aac_rx_intr_producer; + dev->a_ops.adapter_deliver = aac_rx_deliver_producer; + break; + case AAC_COMM_MESSAGE: + dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; + dev->a_ops.adapter_intr = aac_rx_intr_message; + dev->a_ops.adapter_deliver = aac_rx_deliver_message; + break; + default: + return 1; + } + return 0; +} + +/** + * _aac_rx_init - initialize an i960 based AAC card + * @dev: device to configure + * + * Allocate and set up resources for the i960 based AAC variants. The + * device_interface in the commregion will be allocated and linked + * to the comm region. + */ + +int _aac_rx_init(struct aac_dev *dev) +{ + unsigned long start; + unsigned long status; + int restart = 0; + int instance = dev->id; + const char * name = dev->name; + + if (aac_adapter_ioremap(dev, dev->base_size)) { + printk(KERN_WARNING "%s: unable to map adapter.\n", name); + goto error_iounmap; + } + + /* Failure to reset here is an option ... */ + dev->a_ops.adapter_sync_cmd = rx_sync_cmd; + dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; + dev->OIMR = status = rx_readb (dev, MUnit.OIMR); + + if (((status & 0x0c) != 0x0c) || dev->init_reset) { + dev->init_reset = false; + if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) { + /* Make sure the Hardware FIFO is empty */ + while ((++restart < 512) && + (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); + } + } + + /* + * Check to see if the board panic'd while booting. + */ + status = rx_readl(dev, MUnit.OMRx[0]); + if (status & KERNEL_PANIC) { + if (aac_rx_restart_adapter(dev, + aac_rx_check_health(dev), IOP_HWSOFT_RESET)) + goto error_iounmap; + ++restart; + } + /* + * Check to see if the board failed any self tests. + */ + status = rx_readl(dev, MUnit.OMRx[0]); + if (status & SELF_TEST_FAILED) { + printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); + goto error_iounmap; + } + /* + * Check to see if the monitor panic'd while booting. + */ + if (status & MONITOR_PANIC) { + printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); + goto error_iounmap; + } + start = jiffies; + /* + * Wait for the adapter to be up and running. Wait up to 3 minutes + */ + while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) + { + if ((restart && + (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || + time_after(jiffies, start+HZ*startup_timeout)) { + printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", + dev->name, instance, status); + goto error_iounmap; + } + if (!restart && + ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || + time_after(jiffies, start + HZ * + ((startup_timeout > 60) + ? (startup_timeout - 60) + : (startup_timeout / 2))))) { + if (likely(!aac_rx_restart_adapter(dev, + aac_rx_check_health(dev), IOP_HWSOFT_RESET))) + start = jiffies; + ++restart; + } + msleep(1); + } + if (restart && aac_commit) + aac_commit = 1; + /* + * Fill in the common function dispatch table. + */ + dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; + dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; + dev->a_ops.adapter_notify = aac_rx_notify_adapter; + dev->a_ops.adapter_sync_cmd = rx_sync_cmd; + dev->a_ops.adapter_check_health = aac_rx_check_health; + dev->a_ops.adapter_restart = aac_rx_restart_adapter; + dev->a_ops.adapter_start = aac_rx_start_adapter; + + /* + * First clear out all interrupts. Then enable the one's that we + * can handle. + */ + aac_adapter_comm(dev, AAC_COMM_PRODUCER); + aac_adapter_disable_int(dev); + rx_writel(dev, MUnit.ODR, 0xffffffff); + aac_adapter_enable_int(dev); + + if (aac_init_adapter(dev) == NULL) + goto error_iounmap; + aac_adapter_comm(dev, dev->comm_interface); + dev->sync_mode = 0; /* sync. mode not supported */ + dev->msi = aac_msi && !pci_enable_msi(dev->pdev); + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, + IRQF_SHARED, "aacraid", dev) < 0) { + if (dev->msi) + pci_disable_msi(dev->pdev); + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } + dev->dbg_base = dev->base_start; + dev->dbg_base_mapped = dev->base; + dev->dbg_size = dev->base_size; + + aac_adapter_enable_int(dev); + /* + * Tell the adapter that all is configured, and it can + * start accepting requests + */ + aac_rx_start_adapter(dev); + + return 0; + +error_iounmap: + + return -1; +} + +int aac_rx_init(struct aac_dev *dev) +{ + /* + * Fill in the function dispatch table. + */ + dev->a_ops.adapter_ioremap = aac_rx_ioremap; + dev->a_ops.adapter_comm = aac_rx_select_comm; + + return _aac_rx_init(dev); +} diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c new file mode 100644 index 000000000..c9a1dad2f --- /dev/null +++ b/drivers/scsi/aacraid/sa.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * sa.c + * + * Abstract: Drawbridge specific support functions + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "aacraid.h" + +static irqreturn_t aac_sa_intr(int irq, void *dev_id) +{ + struct aac_dev *dev = dev_id; + unsigned short intstat, mask; + + intstat = sa_readw(dev, DoorbellReg_p); + /* + * Read mask and invert because drawbridge is reversed. + * This allows us to only service interrupts that have been enabled. + */ + mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK)); + + /* Check to see if this is our interrupt. If it isn't just return */ + + if (intstat & mask) { + if (intstat & PrintfReady) { + aac_printf(dev, sa_readl(dev, Mailbox5)); + sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */ + sa_writew(dev, DoorbellReg_s, PrintfDone); + } else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready + sa_writew(dev, DoorbellClrReg_p, DOORBELL_1); + aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); + } else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready + sa_writew(dev, DoorbellClrReg_p, DOORBELL_2); + aac_response_normal(&dev->queues->queue[HostNormRespQueue]); + } else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full + sa_writew(dev, DoorbellClrReg_p, DOORBELL_3); + } else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full + sa_writew(dev, DoorbellClrReg_p, DOORBELL_4); + } + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/** + * aac_sa_disable_interrupt - disable interrupt + * @dev: Which adapter to enable. + */ + +static void aac_sa_disable_interrupt (struct aac_dev *dev) +{ + sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); +} + +/** + * aac_sa_enable_interrupt - enable interrupt + * @dev: Which adapter to enable. + */ + +static void aac_sa_enable_interrupt (struct aac_dev *dev) +{ + sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | + DOORBELL_2 | DOORBELL_3 | DOORBELL_4)); +} + +/** + * aac_sa_notify_adapter - handle adapter notification + * @dev: Adapter that notification is for + * @event: Event to notidy + * + * Notify the adapter of an event + */ + +static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event) +{ + switch (event) { + + case AdapNormCmdQue: + sa_writew(dev, DoorbellReg_s,DOORBELL_1); + break; + case HostNormRespNotFull: + sa_writew(dev, DoorbellReg_s,DOORBELL_4); + break; + case AdapNormRespQue: + sa_writew(dev, DoorbellReg_s,DOORBELL_2); + break; + case HostNormCmdNotFull: + sa_writew(dev, DoorbellReg_s,DOORBELL_3); + break; + case HostShutdown: + /* + sa_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); + */ + break; + case FastIo: + sa_writew(dev, DoorbellReg_s,DOORBELL_6); + break; + case AdapPrintfDone: + sa_writew(dev, DoorbellReg_s,DOORBELL_5); + break; + default: + BUG(); + break; + } +} + + +/** + * sa_sync_cmd - send a command and wait + * @dev: Adapter + * @command: Command to execute + * @p1: first parameter + * @p2: second parameter + * @p3: third parameter + * @p4: forth parameter + * @p5: fifth parameter + * @p6: sixth parameter + * @ret: adapter status + * @r1: first return value + * @r2: second return value + * @r3: third return value + * @r4: forth return value + * + * This routine will send a synchronous command to the adapter and wait + * for its completion. + */ +static int sa_sync_cmd(struct aac_dev *dev, u32 command, + u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, + u32 *ret, u32 *r1, u32 *r2, u32 *r3, u32 *r4) +{ + unsigned long start; + int ok; + /* + * Write the Command into Mailbox 0 + */ + sa_writel(dev, Mailbox0, command); + /* + * Write the parameters into Mailboxes 1 - 4 + */ + sa_writel(dev, Mailbox1, p1); + sa_writel(dev, Mailbox2, p2); + sa_writel(dev, Mailbox3, p3); + sa_writel(dev, Mailbox4, p4); + + /* + * Clear the synch command doorbell to start on a clean slate. + */ + sa_writew(dev, DoorbellClrReg_p, DOORBELL_0); + /* + * Signal that there is a new synch command + */ + sa_writew(dev, DoorbellReg_s, DOORBELL_0); + + ok = 0; + start = jiffies; + + while(time_before(jiffies, start+30*HZ)) + { + /* + * Delay 5uS so that the monitor gets access + */ + udelay(5); + /* + * Mon110 will set doorbell0 bit when it has + * completed the command. + */ + if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) { + ok = 1; + break; + } + msleep(1); + } + + if (ok != 1) + return -ETIMEDOUT; + /* + * Clear the synch command doorbell. + */ + sa_writew(dev, DoorbellClrReg_p, DOORBELL_0); + /* + * Pull the synch status from Mailbox 0. + */ + if (ret) + *ret = sa_readl(dev, Mailbox0); + if (r1) + *r1 = sa_readl(dev, Mailbox1); + if (r2) + *r2 = sa_readl(dev, Mailbox2); + if (r3) + *r3 = sa_readl(dev, Mailbox3); + if (r4) + *r4 = sa_readl(dev, Mailbox4); + return 0; +} + +/** + * aac_sa_interrupt_adapter - interrupt an adapter + * @dev: Which adapter to enable. + * + * Breakpoint an adapter. + */ + +static void aac_sa_interrupt_adapter (struct aac_dev *dev) +{ + sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); +} + +/** + * aac_sa_start_adapter - activate adapter + * @dev: Adapter + * + * Start up processing on an ARM based AAC adapter + */ + +static void aac_sa_start_adapter(struct aac_dev *dev) +{ + union aac_init *init; + /* + * Fill in the remaining pieces of the init. + */ + init = dev->init; + init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); + /* We can only use a 32 bit address here */ + sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, + (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); +} + +static int aac_sa_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) +{ + return -EINVAL; +} + +/** + * aac_sa_check_health + * @dev: device to check if healthy + * + * Will attempt to determine if the specified adapter is alive and + * capable of handling requests, returning 0 if alive. + */ +static int aac_sa_check_health(struct aac_dev *dev) +{ + long status = sa_readl(dev, Mailbox7); + + /* + * Check to see if the board failed any self tests. + */ + if (status & SELF_TEST_FAILED) + return -1; + /* + * Check to see if the board panic'd while booting. + */ + if (status & KERNEL_PANIC) + return -2; + /* + * Wait for the adapter to be up and running. Wait up to 3 minutes + */ + if (!(status & KERNEL_UP_AND_RUNNING)) + return -3; + /* + * Everything is OK + */ + return 0; +} + +/** + * aac_sa_ioremap + * @dev: device to ioremap + * @size: mapping resize request + * + */ +static int aac_sa_ioremap(struct aac_dev * dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.sa); + return 0; + } + dev->base = dev->regs.sa = ioremap(dev->base_start, size); + return (dev->base == NULL) ? -1 : 0; +} + +/** + * aac_sa_init - initialize an ARM based AAC card + * @dev: device to configure + * + * Allocate and set up resources for the ARM based AAC variants. The + * device_interface in the commregion will be allocated and linked + * to the comm region. + */ + +int aac_sa_init(struct aac_dev *dev) +{ + unsigned long start; + unsigned long status; + int instance; + const char *name; + + instance = dev->id; + name = dev->name; + + /* + * Fill in the function dispatch table. + */ + + dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; + dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt; + dev->a_ops.adapter_notify = aac_sa_notify_adapter; + dev->a_ops.adapter_sync_cmd = sa_sync_cmd; + dev->a_ops.adapter_check_health = aac_sa_check_health; + dev->a_ops.adapter_restart = aac_sa_restart_adapter; + dev->a_ops.adapter_start = aac_sa_start_adapter; + dev->a_ops.adapter_intr = aac_sa_intr; + dev->a_ops.adapter_deliver = aac_rx_deliver_producer; + dev->a_ops.adapter_ioremap = aac_sa_ioremap; + + if (aac_sa_ioremap(dev, dev->base_size)) { + printk(KERN_WARNING "%s: unable to map adapter.\n", name); + goto error_iounmap; + } + + /* + * Check to see if the board failed any self tests. + */ + if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) { + printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance); + goto error_iounmap; + } + /* + * Check to see if the board panic'd while booting. + */ + if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) { + printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance); + goto error_iounmap; + } + start = jiffies; + /* + * Wait for the adapter to be up and running. Wait up to 3 minutes. + */ + while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) { + if (time_after(jiffies, start+startup_timeout*HZ)) { + status = sa_readl(dev, Mailbox7); + printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n", + name, instance, status); + goto error_iounmap; + } + msleep(1); + } + + /* + * First clear out all interrupts. Then enable the one's that + * we can handle. + */ + aac_adapter_disable_int(dev); + aac_adapter_enable_int(dev); + + if(aac_init_adapter(dev) == NULL) + goto error_irq; + dev->sync_mode = 0; /* sync. mode not supported */ + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, + IRQF_SHARED, "aacraid", (void *)dev) < 0) { + printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } + dev->dbg_base = dev->base_start; + dev->dbg_base_mapped = dev->base; + dev->dbg_size = dev->base_size; + + aac_adapter_enable_int(dev); + + /* + * Tell the adapter that all is configure, and it can start + * accepting requests + */ + aac_sa_start_adapter(dev); + return 0; + +error_irq: + aac_sa_disable_interrupt(dev); + free_irq(dev->pdev->irq, (void *)dev); + +error_iounmap: + + return -1; +} + diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c new file mode 100644 index 000000000..11ef58204 --- /dev/null +++ b/drivers/scsi/aacraid/src.c @@ -0,0 +1,1436 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * Module Name: + * src.c + * + * Abstract: Hardware Device Interface for PMC SRC based controllers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aacraid.h" + +static int aac_src_get_sync_status(struct aac_dev *dev); + +static irqreturn_t aac_src_intr_message(int irq, void *dev_id) +{ + struct aac_msix_ctx *ctx; + struct aac_dev *dev; + unsigned long bellbits, bellbits_shifted; + int vector_no; + int isFastResponse, mode; + u32 index, handle; + + ctx = (struct aac_msix_ctx *)dev_id; + dev = ctx->dev; + vector_no = ctx->vector_no; + + if (dev->msi_enabled) { + mode = AAC_INT_MODE_MSI; + if (vector_no == 0) { + bellbits = src_readl(dev, MUnit.ODR_MSI); + if (bellbits & 0x40000) + mode |= AAC_INT_MODE_AIF; + if (bellbits & 0x1000) + mode |= AAC_INT_MODE_SYNC; + } + } else { + mode = AAC_INT_MODE_INTX; + bellbits = src_readl(dev, MUnit.ODR_R); + if (bellbits & PmDoorBellResponseSent) { + bellbits = PmDoorBellResponseSent; + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); + } else { + bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); + + if (bellbits_shifted & DoorBellAifPending) + mode |= AAC_INT_MODE_AIF; + else if (bellbits_shifted & OUTBOUNDDOORBELL_0) + mode |= AAC_INT_MODE_SYNC; + } + } + + if (mode & AAC_INT_MODE_SYNC) { + unsigned long sflags; + struct list_head *entry; + int send_it = 0; + extern int aac_sync_mode; + + if (!aac_sync_mode && !dev->msi_enabled) { + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); + } + + if (dev->sync_fib) { + if (dev->sync_fib->callback) + dev->sync_fib->callback(dev->sync_fib->callback_data, + dev->sync_fib); + spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); + if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { + dev->management_fib_count--; + complete(&dev->sync_fib->event_wait); + } + spin_unlock_irqrestore(&dev->sync_fib->event_lock, + sflags); + spin_lock_irqsave(&dev->sync_lock, sflags); + if (!list_empty(&dev->sync_fib_list)) { + entry = dev->sync_fib_list.next; + dev->sync_fib = list_entry(entry, + struct fib, + fiblink); + list_del(entry); + send_it = 1; + } else { + dev->sync_fib = NULL; + } + spin_unlock_irqrestore(&dev->sync_lock, sflags); + if (send_it) { + aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, + (u32)dev->sync_fib->hw_fib_pa, + 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); + } + } + if (!dev->msi_enabled) + mode = 0; + + } + + if (mode & AAC_INT_MODE_AIF) { + /* handle AIF */ + if (dev->sa_firmware) { + u32 events = src_readl(dev, MUnit.SCR0); + + aac_intr_normal(dev, events, 1, 0, NULL); + writel(events, &dev->IndexRegs->Mailbox[0]); + src_writel(dev, MUnit.IDR, 1 << 23); + } else { + if (dev->aif_thread && dev->fsa_dev) + aac_intr_normal(dev, 0, 2, 0, NULL); + } + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); + mode = 0; + } + + if (mode) { + index = dev->host_rrq_idx[vector_no]; + + for (;;) { + isFastResponse = 0; + /* remove toggle bit (31) */ + handle = le32_to_cpu((dev->host_rrq[index]) + & 0x7fffffff); + /* check fast response bits (30, 1) */ + if (handle & 0x40000000) + isFastResponse = 1; + handle &= 0x0000ffff; + if (handle == 0) + break; + handle >>= 2; + if (dev->msi_enabled && dev->max_msix > 1) + atomic_dec(&dev->rrq_outstanding[vector_no]); + aac_intr_normal(dev, handle, 0, isFastResponse, NULL); + dev->host_rrq[index++] = 0; + if (index == (vector_no + 1) * dev->vector_cap) + index = vector_no * dev->vector_cap; + dev->host_rrq_idx[vector_no] = index; + } + mode = 0; + } + + return IRQ_HANDLED; +} + +/** + * aac_src_disable_interrupt - Disable interrupts + * @dev: Adapter + */ + +static void aac_src_disable_interrupt(struct aac_dev *dev) +{ + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); +} + +/** + * aac_src_enable_interrupt_message - Enable interrupts + * @dev: Adapter + */ + +static void aac_src_enable_interrupt_message(struct aac_dev *dev) +{ + aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT); +} + +/** + * src_sync_cmd - send a command and wait + * @dev: Adapter + * @command: Command to execute + * @p1: first parameter + * @p2: second parameter + * @p3: third parameter + * @p4: forth parameter + * @p5: fifth parameter + * @p6: sixth parameter + * @status: adapter status + * @r1: first return value + * @r2: second return valu + * @r3: third return value + * @r4: forth return value + * + * This routine will send a synchronous command to the adapter and wait + * for its completion. + */ + +static int src_sync_cmd(struct aac_dev *dev, u32 command, + u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, + u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) +{ + unsigned long start; + unsigned long delay; + int ok; + + /* + * Write the command into Mailbox 0 + */ + writel(command, &dev->IndexRegs->Mailbox[0]); + /* + * Write the parameters into Mailboxes 1 - 6 + */ + writel(p1, &dev->IndexRegs->Mailbox[1]); + writel(p2, &dev->IndexRegs->Mailbox[2]); + writel(p3, &dev->IndexRegs->Mailbox[3]); + writel(p4, &dev->IndexRegs->Mailbox[4]); + + /* + * Clear the synch command doorbell to start on a clean slate. + */ + if (!dev->msi_enabled) + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + + /* + * Disable doorbell interrupts + */ + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); + + /* + * Force the completion of the mask register write before issuing + * the interrupt. + */ + src_readl(dev, MUnit.OIMR); + + /* + * Signal that there is a new synch command + */ + src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); + + if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) && + !dev->in_soft_reset) { + ok = 0; + start = jiffies; + + if (command == IOP_RESET_ALWAYS) { + /* Wait up to 10 sec */ + delay = 10*HZ; + } else { + /* Wait up to 5 minutes */ + delay = 300*HZ; + } + while (time_before(jiffies, start+delay)) { + udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ + /* + * Mon960 will set doorbell0 bit when it has completed the command. + */ + if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { + /* + * Clear the doorbell. + */ + if (dev->msi_enabled) + aac_src_access_devreg(dev, + AAC_CLEAR_SYNC_BIT); + else + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + ok = 1; + break; + } + /* + * Yield the processor in case we are slow + */ + msleep(1); + } + if (unlikely(ok != 1)) { + /* + * Restore interrupt mask even though we timed out + */ + aac_adapter_enable_int(dev); + return -ETIMEDOUT; + } + /* + * Pull the synch status from Mailbox 0. + */ + if (status) + *status = readl(&dev->IndexRegs->Mailbox[0]); + if (r1) + *r1 = readl(&dev->IndexRegs->Mailbox[1]); + if (r2) + *r2 = readl(&dev->IndexRegs->Mailbox[2]); + if (r3) + *r3 = readl(&dev->IndexRegs->Mailbox[3]); + if (r4) + *r4 = readl(&dev->IndexRegs->Mailbox[4]); + if (command == GET_COMM_PREFERRED_SETTINGS) + dev->max_msix = + readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF; + /* + * Clear the synch command doorbell. + */ + if (!dev->msi_enabled) + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + } + + /* + * Restore interrupt mask + */ + aac_adapter_enable_int(dev); + return 0; +} + +/** + * aac_src_interrupt_adapter - interrupt adapter + * @dev: Adapter + * + * Send an interrupt to the i960 and breakpoint it. + */ + +static void aac_src_interrupt_adapter(struct aac_dev *dev) +{ + src_sync_cmd(dev, BREAKPOINT_REQUEST, + 0, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); +} + +/** + * aac_src_notify_adapter - send an event to the adapter + * @dev: Adapter + * @event: Event to send + * + * Notify the i960 that something it probably cares about has + * happened. + */ + +static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) +{ + switch (event) { + + case AdapNormCmdQue: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); + break; + case HostNormRespNotFull: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); + break; + case AdapNormRespQue: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); + break; + case HostNormCmdNotFull: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); + break; + case FastIo: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); + break; + case AdapPrintfDone: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); + break; + default: + BUG(); + break; + } +} + +/** + * aac_src_start_adapter - activate adapter + * @dev: Adapter + * + * Start up processing on an i960 based AAC adapter + */ + +static void aac_src_start_adapter(struct aac_dev *dev) +{ + union aac_init *init; + int i; + + /* reset host_rrq_idx first */ + for (i = 0; i < dev->max_msix; i++) { + dev->host_rrq_idx[i] = i * dev->vector_cap; + atomic_set(&dev->rrq_outstanding[i], 0); + } + atomic_set(&dev->msix_counter, 0); + dev->fibs_pushed_no = 0; + + init = dev->init; + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { + init->r8.host_elapsed_seconds = + cpu_to_le32(ktime_get_real_seconds()); + src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, + lower_32_bits(dev->init_pa), + upper_32_bits(dev->init_pa), + sizeof(struct _r8) + + (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), + 0, 0, 0, NULL, NULL, NULL, NULL, NULL); + } else { + init->r7.host_elapsed_seconds = + cpu_to_le32(ktime_get_real_seconds()); + // We can only use a 32 bit address here + src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, + (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); + } + +} + +/** + * aac_src_check_health + * @dev: device to check if healthy + * + * Will attempt to determine if the specified adapter is alive and + * capable of handling requests, returning 0 if alive. + */ +static int aac_src_check_health(struct aac_dev *dev) +{ + u32 status = src_readl(dev, MUnit.OMR); + + /* + * Check to see if the board panic'd. + */ + if (unlikely(status & KERNEL_PANIC)) + goto err_blink; + + /* + * Check to see if the board failed any self tests. + */ + if (unlikely(status & SELF_TEST_FAILED)) + goto err_out; + + /* + * Check to see if the board failed any self tests. + */ + if (unlikely(status & MONITOR_PANIC)) + goto err_out; + + /* + * Wait for the adapter to be up and running. + */ + if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) + return -3; + /* + * Everything is OK + */ + return 0; + +err_out: + return -1; + +err_blink: + return (status >> 16) & 0xFF; +} + +static inline u32 aac_get_vector(struct aac_dev *dev) +{ + return atomic_inc_return(&dev->msix_counter)%dev->max_msix; +} + +/** + * aac_src_deliver_message + * @fib: fib to issue + * + * Will send a fib, returning 0 if successful. + */ +static int aac_src_deliver_message(struct fib *fib) +{ + struct aac_dev *dev = fib->dev; + struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; + u32 fibsize; + dma_addr_t address; + struct aac_fib_xporthdr *pFibX; + int native_hba; +#if !defined(writeq) + unsigned long flags; +#endif + + u16 vector_no; + + atomic_inc(&q->numpending); + + native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0; + + + if (dev->msi_enabled && dev->max_msix > 1 && + (native_hba || fib->hw_fib_va->header.Command != AifRequest)) { + + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) + && dev->sa_firmware) + vector_no = aac_get_vector(dev); + else + vector_no = fib->vector_no; + + if (native_hba) { + if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { + struct aac_hba_tm_req *tm_req; + + tm_req = (struct aac_hba_tm_req *) + fib->hw_fib_va; + if (tm_req->iu_type == + HBA_IU_TYPE_SCSI_TM_REQ) { + ((struct aac_hba_tm_req *) + fib->hw_fib_va)->reply_qid + = vector_no; + ((struct aac_hba_tm_req *) + fib->hw_fib_va)->request_id + += (vector_no << 16); + } else { + ((struct aac_hba_reset_req *) + fib->hw_fib_va)->reply_qid + = vector_no; + ((struct aac_hba_reset_req *) + fib->hw_fib_va)->request_id + += (vector_no << 16); + } + } else { + ((struct aac_hba_cmd_req *) + fib->hw_fib_va)->reply_qid + = vector_no; + ((struct aac_hba_cmd_req *) + fib->hw_fib_va)->request_id + += (vector_no << 16); + } + } else { + fib->hw_fib_va->header.Handle += (vector_no << 16); + } + } else { + vector_no = 0; + } + + atomic_inc(&dev->rrq_outstanding[vector_no]); + + if (native_hba) { + address = fib->hw_fib_pa; + fibsize = (fib->hbacmd_size + 127) / 128 - 1; + if (fibsize > 31) + fibsize = 31; + address |= fibsize; +#if defined(writeq) + src_writeq(dev, MUnit.IQN_L, (u64)address); +#else + spin_lock_irqsave(&fib->dev->iq_lock, flags); + src_writel(dev, MUnit.IQN_H, + upper_32_bits(address) & 0xffffffff); + src_writel(dev, MUnit.IQN_L, address & 0xffffffff); + spin_unlock_irqrestore(&fib->dev->iq_lock, flags); +#endif + } else { + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { + /* Calculate the amount to the fibsize bits */ + fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size) + + 127) / 128 - 1; + /* New FIB header, 32-bit */ + address = fib->hw_fib_pa; + fib->hw_fib_va->header.StructType = FIB_MAGIC2; + fib->hw_fib_va->header.SenderFibAddress = + cpu_to_le32((u32)address); + fib->hw_fib_va->header.u.TimeStamp = 0; + WARN_ON(upper_32_bits(address) != 0L); + } else { + /* Calculate the amount to the fibsize bits */ + fibsize = (sizeof(struct aac_fib_xporthdr) + + le16_to_cpu(fib->hw_fib_va->header.Size) + + 127) / 128 - 1; + /* Fill XPORT header */ + pFibX = (struct aac_fib_xporthdr *) + ((unsigned char *)fib->hw_fib_va - + sizeof(struct aac_fib_xporthdr)); + pFibX->Handle = fib->hw_fib_va->header.Handle; + pFibX->HostAddress = + cpu_to_le64((u64)fib->hw_fib_pa); + pFibX->Size = cpu_to_le32( + le16_to_cpu(fib->hw_fib_va->header.Size)); + address = fib->hw_fib_pa - + (u64)sizeof(struct aac_fib_xporthdr); + } + if (fibsize > 31) + fibsize = 31; + address |= fibsize; + +#if defined(writeq) + src_writeq(dev, MUnit.IQ_L, (u64)address); +#else + spin_lock_irqsave(&fib->dev->iq_lock, flags); + src_writel(dev, MUnit.IQ_H, + upper_32_bits(address) & 0xffffffff); + src_writel(dev, MUnit.IQ_L, address & 0xffffffff); + spin_unlock_irqrestore(&fib->dev->iq_lock, flags); +#endif + } + return 0; +} + +/** + * aac_src_ioremap + * @dev: device ioremap + * @size: mapping resize request + * + */ +static int aac_src_ioremap(struct aac_dev *dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.src.bar1); + dev->regs.src.bar1 = NULL; + iounmap(dev->regs.src.bar0); + dev->base = dev->regs.src.bar0 = NULL; + return 0; + } + dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), + AAC_MIN_SRC_BAR1_SIZE); + dev->base = NULL; + if (dev->regs.src.bar1 == NULL) + return -1; + dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); + if (dev->base == NULL) { + iounmap(dev->regs.src.bar1); + dev->regs.src.bar1 = NULL; + return -1; + } + dev->IndexRegs = &((struct src_registers __iomem *) + dev->base)->u.tupelo.IndexRegs; + return 0; +} + +/** + * aac_srcv_ioremap + * @dev: device ioremap + * @size: mapping resize request + * + */ +static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.src.bar0); + dev->base = dev->regs.src.bar0 = NULL; + return 0; + } + + dev->regs.src.bar1 = + ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE); + dev->base = NULL; + if (dev->regs.src.bar1 == NULL) + return -1; + dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); + if (dev->base == NULL) { + iounmap(dev->regs.src.bar1); + dev->regs.src.bar1 = NULL; + return -1; + } + dev->IndexRegs = &((struct src_registers __iomem *) + dev->base)->u.denali.IndexRegs; + return 0; +} + +void aac_set_intx_mode(struct aac_dev *dev) +{ + if (dev->msi_enabled) { + aac_src_access_devreg(dev, AAC_ENABLE_INTX); + dev->msi_enabled = 0; + msleep(5000); /* Delay 5 seconds */ + } +} + +static void aac_clear_omr(struct aac_dev *dev) +{ + u32 omr_value = 0; + + omr_value = src_readl(dev, MUnit.OMR); + + /* + * Check for PCI Errors or Kernel Panic + */ + if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC)) + omr_value = 0; + + /* + * Preserve MSIX Value if any + */ + src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX); + src_readl(dev, MUnit.OMR); +} + +static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) +{ + __le32 supported_options3; + + if (!aac_fib_dump) + return; + + supported_options3 = dev->supplement_adapter_info.supported_options3; + if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP)) + return; + + aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP, + 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + +static bool aac_is_ctrl_up_and_running(struct aac_dev *dev) +{ + bool ctrl_up = true; + unsigned long status, start; + bool is_up = false; + + start = jiffies; + do { + schedule(); + status = src_readl(dev, MUnit.OMR); + + if (status == 0xffffffff) + status = 0; + + if (status & KERNEL_BOOTING) { + start = jiffies; + continue; + } + + if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) { + ctrl_up = false; + break; + } + + is_up = status & KERNEL_UP_AND_RUNNING; + + } while (!is_up); + + return ctrl_up; +} + +static void aac_src_drop_io(struct aac_dev *dev) +{ + if (!dev->soft_reset_support) + return; + + aac_adapter_sync_cmd(dev, DROP_IO, + 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + +static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) +{ + aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, + NULL, NULL, NULL, NULL); + aac_src_drop_io(dev); +} + +static void aac_send_iop_reset(struct aac_dev *dev) +{ + aac_dump_fw_fib_iop_reset(dev); + + aac_notify_fw_of_iop_reset(dev); + + aac_set_intx_mode(dev); + + aac_clear_omr(dev); + + src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); + + msleep(5000); +} + +static void aac_send_hardware_soft_reset(struct aac_dev *dev) +{ + u_int32_t val; + + aac_clear_omr(dev); + val = readl(((char *)(dev->base) + IBW_SWR_OFFSET)); + val |= 0x01; + writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET)); + msleep_interruptible(20000); +} + +static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) +{ + bool is_ctrl_up; + int ret = 0; + + if (bled < 0) + goto invalid_out; + + if (bled) + dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled); + + /* + * When there is a BlinkLED, IOP_RESET has not effect + */ + if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET) + reset_type &= ~HW_IOP_RESET; + + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; + + dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type); + + if (reset_type & HW_IOP_RESET) { + dev_info(&dev->pdev->dev, "Issuing IOP reset\n"); + aac_send_iop_reset(dev); + + /* + * Creates a delay or wait till up and running comes thru + */ + is_ctrl_up = aac_is_ctrl_up_and_running(dev); + if (!is_ctrl_up) + dev_err(&dev->pdev->dev, "IOP reset failed\n"); + else { + dev_info(&dev->pdev->dev, "IOP reset succeeded\n"); + goto set_startup; + } + } + + if (!dev->sa_firmware) { + dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n"); + ret = -ENODEV; + goto out; + } + + if (reset_type & HW_SOFT_RESET) { + dev_info(&dev->pdev->dev, "Issuing SOFT reset\n"); + aac_send_hardware_soft_reset(dev); + dev->msi_enabled = 0; + + is_ctrl_up = aac_is_ctrl_up_and_running(dev); + if (!is_ctrl_up) { + dev_err(&dev->pdev->dev, "SOFT reset failed\n"); + ret = -ENODEV; + goto out; + } else + dev_info(&dev->pdev->dev, "SOFT reset succeeded\n"); + } + +set_startup: + if (startup_timeout < 300) + startup_timeout = 300; + +out: + return ret; + +invalid_out: + if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) + ret = -ENODEV; +goto out; +} + +/** + * aac_src_select_comm - Select communications method + * @dev: Adapter + * @comm: communications method + */ +static int aac_src_select_comm(struct aac_dev *dev, int comm) +{ + switch (comm) { + case AAC_COMM_MESSAGE: + dev->a_ops.adapter_intr = aac_src_intr_message; + dev->a_ops.adapter_deliver = aac_src_deliver_message; + break; + default: + return 1; + } + return 0; +} + +/** + * aac_src_init - initialize an Cardinal Frey Bar card + * @dev: device to configure + * + */ + +int aac_src_init(struct aac_dev *dev) +{ + unsigned long start; + unsigned long status; + int restart = 0; + int instance = dev->id; + const char *name = dev->name; + + dev->a_ops.adapter_ioremap = aac_src_ioremap; + dev->a_ops.adapter_comm = aac_src_select_comm; + + dev->base_size = AAC_MIN_SRC_BAR0_SIZE; + if (aac_adapter_ioremap(dev, dev->base_size)) { + printk(KERN_WARNING "%s: unable to map adapter.\n", name); + goto error_iounmap; + } + + /* Failure to reset here is an option ... */ + dev->a_ops.adapter_sync_cmd = src_sync_cmd; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; + + if (dev->init_reset) { + dev->init_reset = false; + if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) + ++restart; + } + + /* + * Check to see if the board panic'd while booting. + */ + status = src_readl(dev, MUnit.OMR); + if (status & KERNEL_PANIC) { + if (aac_src_restart_adapter(dev, + aac_src_check_health(dev), IOP_HWSOFT_RESET)) + goto error_iounmap; + ++restart; + } + /* + * Check to see if the board failed any self tests. + */ + status = src_readl(dev, MUnit.OMR); + if (status & SELF_TEST_FAILED) { + printk(KERN_ERR "%s%d: adapter self-test failed.\n", + dev->name, instance); + goto error_iounmap; + } + /* + * Check to see if the monitor panic'd while booting. + */ + if (status & MONITOR_PANIC) { + printk(KERN_ERR "%s%d: adapter monitor panic.\n", + dev->name, instance); + goto error_iounmap; + } + start = jiffies; + /* + * Wait for the adapter to be up and running. Wait up to 3 minutes + */ + while (!((status = src_readl(dev, MUnit.OMR)) & + KERNEL_UP_AND_RUNNING)) { + if ((restart && + (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || + time_after(jiffies, start+HZ*startup_timeout)) { + printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", + dev->name, instance, status); + goto error_iounmap; + } + if (!restart && + ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || + time_after(jiffies, start + HZ * + ((startup_timeout > 60) + ? (startup_timeout - 60) + : (startup_timeout / 2))))) { + if (likely(!aac_src_restart_adapter(dev, + aac_src_check_health(dev), IOP_HWSOFT_RESET))) + start = jiffies; + ++restart; + } + msleep(1); + } + if (restart && aac_commit) + aac_commit = 1; + /* + * Fill in the common function dispatch table. + */ + dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; + dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_notify = aac_src_notify_adapter; + dev->a_ops.adapter_sync_cmd = src_sync_cmd; + dev->a_ops.adapter_check_health = aac_src_check_health; + dev->a_ops.adapter_restart = aac_src_restart_adapter; + dev->a_ops.adapter_start = aac_src_start_adapter; + + /* + * First clear out all interrupts. Then enable the one's that we + * can handle. + */ + aac_adapter_comm(dev, AAC_COMM_MESSAGE); + aac_adapter_disable_int(dev); + src_writel(dev, MUnit.ODR_C, 0xffffffff); + aac_adapter_enable_int(dev); + + if (aac_init_adapter(dev) == NULL) + goto error_iounmap; + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) + goto error_iounmap; + + dev->msi = !pci_enable_msi(dev->pdev); + + dev->aac_msix[0].vector_no = 0; + dev->aac_msix[0].dev = dev; + + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, + IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { + + if (dev->msi) + pci_disable_msi(dev->pdev); + + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } + dev->dbg_base = pci_resource_start(dev->pdev, 2); + dev->dbg_base_mapped = dev->regs.src.bar1; + dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; + + aac_adapter_enable_int(dev); + + if (!dev->sync_mode) { + /* + * Tell the adapter that all is configured, and it can + * start accepting requests + */ + aac_src_start_adapter(dev); + } + return 0; + +error_iounmap: + + return -1; +} + +static int aac_src_wait_sync(struct aac_dev *dev, int *status) +{ + unsigned long start = jiffies; + unsigned long usecs = 0; + int delay = 5 * HZ; + int rc = 1; + + while (time_before(jiffies, start+delay)) { + /* + * Delay 5 microseconds to let Mon960 get info. + */ + udelay(5); + + /* + * Mon960 will set doorbell0 bit when it has completed the + * command. + */ + if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { + /* + * Clear: the doorbell. + */ + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT); + else + src_writel(dev, MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + rc = 0; + + break; + } + + /* + * Yield the processor in case we are slow + */ + usecs = 1 * USEC_PER_MSEC; + usleep_range(usecs, usecs + 50); + } + /* + * Pull the synch status from Mailbox 0. + */ + if (status && !rc) { + status[0] = readl(&dev->IndexRegs->Mailbox[0]); + status[1] = readl(&dev->IndexRegs->Mailbox[1]); + status[2] = readl(&dev->IndexRegs->Mailbox[2]); + status[3] = readl(&dev->IndexRegs->Mailbox[3]); + status[4] = readl(&dev->IndexRegs->Mailbox[4]); + } + + return rc; +} + +/** + * aac_src_soft_reset - perform soft reset to speed up + * access + * + * Assumptions: That the controller is in a state where we can + * bring it back to life with an init struct. We can only use + * fast sync commands, as the timeout is 5 seconds. + * + * @dev: device to configure + * + */ + +static int aac_src_soft_reset(struct aac_dev *dev) +{ + u32 status_omr = src_readl(dev, MUnit.OMR); + u32 status[5]; + int rc = 1; + int state = 0; + char *state_str[7] = { + "GET_ADAPTER_PROPERTIES Failed", + "GET_ADAPTER_PROPERTIES timeout", + "SOFT_RESET not supported", + "DROP_IO Failed", + "DROP_IO timeout", + "Check Health failed" + }; + + if (status_omr == INVALID_OMR) + return 1; // pcie hosed + + if (!(status_omr & KERNEL_UP_AND_RUNNING)) + return 1; // not up and running + + /* + * We go into soft reset mode to allow us to handle response + */ + dev->in_soft_reset = 1; + dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX; + + /* Get adapter properties */ + rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, + 0, 0, 0, status+0, status+1, status+2, status+3, status+4); + if (rc) + goto out; + + state++; + if (aac_src_wait_sync(dev, status)) { + rc = 1; + goto out; + } + + state++; + if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) && + (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) { + rc = 2; + goto out; + } + + if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) && + (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE))) + dev->sa_firmware = 1; + + state++; + rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0, + status+0, status+1, status+2, status+3, status+4); + + if (rc) + goto out; + + state++; + if (aac_src_wait_sync(dev, status)) { + rc = 3; + goto out; + } + + if (status[1]) + dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n", + __func__, status[1]); + + state++; + rc = aac_src_check_health(dev); + +out: + dev->in_soft_reset = 0; + dev->msi_enabled = 0; + if (rc) + dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__, + state_str[state], rc); + + return rc; +} +/** + * aac_srcv_init - initialize an SRCv card + * @dev: device to configure + * + */ + +int aac_srcv_init(struct aac_dev *dev) +{ + unsigned long start; + unsigned long status; + int restart = 0; + int instance = dev->id; + const char *name = dev->name; + + dev->a_ops.adapter_ioremap = aac_srcv_ioremap; + dev->a_ops.adapter_comm = aac_src_select_comm; + + dev->base_size = AAC_MIN_SRCV_BAR0_SIZE; + if (aac_adapter_ioremap(dev, dev->base_size)) { + printk(KERN_WARNING "%s: unable to map adapter.\n", name); + goto error_iounmap; + } + + /* Failure to reset here is an option ... */ + dev->a_ops.adapter_sync_cmd = src_sync_cmd; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; + + if (dev->init_reset) { + dev->init_reset = false; + if (aac_src_soft_reset(dev)) { + aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET); + ++restart; + } + } + + /* + * Check to see if flash update is running. + * Wait for the adapter to be up and running. Wait up to 5 minutes + */ + status = src_readl(dev, MUnit.OMR); + if (status & FLASH_UPD_PENDING) { + start = jiffies; + do { + status = src_readl(dev, MUnit.OMR); + if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { + printk(KERN_ERR "%s%d: adapter flash update failed.\n", + dev->name, instance); + goto error_iounmap; + } + } while (!(status & FLASH_UPD_SUCCESS) && + !(status & FLASH_UPD_FAILED)); + /* Delay 10 seconds. + * Because right now FW is doing a soft reset, + * do not read scratch pad register at this time + */ + ssleep(10); + } + /* + * Check to see if the board panic'd while booting. + */ + status = src_readl(dev, MUnit.OMR); + if (status & KERNEL_PANIC) { + if (aac_src_restart_adapter(dev, + aac_src_check_health(dev), IOP_HWSOFT_RESET)) + goto error_iounmap; + ++restart; + } + /* + * Check to see if the board failed any self tests. + */ + status = src_readl(dev, MUnit.OMR); + if (status & SELF_TEST_FAILED) { + printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); + goto error_iounmap; + } + /* + * Check to see if the monitor panic'd while booting. + */ + if (status & MONITOR_PANIC) { + printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); + goto error_iounmap; + } + + start = jiffies; + /* + * Wait for the adapter to be up and running. Wait up to 3 minutes + */ + do { + status = src_readl(dev, MUnit.OMR); + if (status == INVALID_OMR) + status = 0; + + if ((restart && + (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || + time_after(jiffies, start+HZ*startup_timeout)) { + printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", + dev->name, instance, status); + goto error_iounmap; + } + if (!restart && + ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || + time_after(jiffies, start + HZ * + ((startup_timeout > 60) + ? (startup_timeout - 60) + : (startup_timeout / 2))))) { + if (likely(!aac_src_restart_adapter(dev, + aac_src_check_health(dev), IOP_HWSOFT_RESET))) + start = jiffies; + ++restart; + } + msleep(1); + } while (!(status & KERNEL_UP_AND_RUNNING)); + + if (restart && aac_commit) + aac_commit = 1; + /* + * Fill in the common function dispatch table. + */ + dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; + dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_notify = aac_src_notify_adapter; + dev->a_ops.adapter_sync_cmd = src_sync_cmd; + dev->a_ops.adapter_check_health = aac_src_check_health; + dev->a_ops.adapter_restart = aac_src_restart_adapter; + dev->a_ops.adapter_start = aac_src_start_adapter; + + /* + * First clear out all interrupts. Then enable the one's that we + * can handle. + */ + aac_adapter_comm(dev, AAC_COMM_MESSAGE); + aac_adapter_disable_int(dev); + src_writel(dev, MUnit.ODR_C, 0xffffffff); + aac_adapter_enable_int(dev); + + if (aac_init_adapter(dev) == NULL) + goto error_iounmap; + if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) && + (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)) + goto error_iounmap; + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_ENABLE_MSIX); + + if (aac_acquire_irq(dev)) + goto error_iounmap; + + dev->dbg_base = pci_resource_start(dev->pdev, 2); + dev->dbg_base_mapped = dev->regs.src.bar1; + dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE; + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; + + aac_adapter_enable_int(dev); + + if (!dev->sync_mode) { + /* + * Tell the adapter that all is configured, and it can + * start accepting requests + */ + aac_src_start_adapter(dev); + } + return 0; + +error_iounmap: + + return -1; +} + +void aac_src_access_devreg(struct aac_dev *dev, int mode) +{ + u_int32_t val; + + switch (mode) { + case AAC_ENABLE_INTERRUPT: + src_writel(dev, + MUnit.OIMR, + dev->OIMR = (dev->msi_enabled ? + AAC_INT_ENABLE_TYPE1_MSIX : + AAC_INT_ENABLE_TYPE1_INTX)); + break; + + case AAC_DISABLE_INTERRUPT: + src_writel(dev, + MUnit.OIMR, + dev->OIMR = AAC_INT_DISABLE_ALL); + break; + + case AAC_ENABLE_MSIX: + /* set bit 6 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x40; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + /* unmask int. */ + val = PMC_ALL_INTERRUPT_BITS; + src_writel(dev, MUnit.IOAR, val); + val = src_readl(dev, MUnit.OIMR); + src_writel(dev, + MUnit.OIMR, + val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); + break; + + case AAC_DISABLE_MSIX: + /* reset bit 6 */ + val = src_readl(dev, MUnit.IDR); + val &= ~0x40; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_CLEAR_AIF_BIT: + /* set bit 5 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x20; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_CLEAR_SYNC_BIT: + /* set bit 4 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x10; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_ENABLE_INTX: + /* set bit 7 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x80; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + /* unmask int. */ + val = PMC_ALL_INTERRUPT_BITS; + src_writel(dev, MUnit.IOAR, val); + src_readl(dev, MUnit.IOAR); + val = src_readl(dev, MUnit.OIMR); + src_writel(dev, MUnit.OIMR, + val & (~(PMC_GLOBAL_INT_BIT2))); + break; + + default: + break; + } +} + +static int aac_src_get_sync_status(struct aac_dev *dev) +{ + int msix_val = 0; + int legacy_val = 0; + + msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0; + + if (!dev->msi_enabled) { + /* + * if Legacy int status indicates cmd is not complete + * sample MSIx register to see if it indiactes cmd complete, + * if yes set the controller in MSIx mode and consider cmd + * completed + */ + legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; + if (!(legacy_val & 1) && msix_val) + dev->msi_enabled = 1; + return legacy_val; + } + + return msix_val; +} diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c new file mode 100644 index 000000000..ab066bb27 --- /dev/null +++ b/drivers/scsi/advansys.c @@ -0,0 +1,11552 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters + * + * Copyright (c) 1995-2000 Advanced System Products, Inc. + * Copyright (c) 2000-2001 ConnectCom Solutions, Inc. + * Copyright (c) 2007 Matthew Wilcox + * Copyright (c) 2014 Hannes Reinecke + * All Rights Reserved. + */ + +/* + * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys) + * changed its name to ConnectCom Solutions, Inc. + * On June 18, 2001 Initio Corp. acquired ConnectCom's SCSI assets + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#define DRV_NAME "advansys" +#define ASC_VERSION "3.5" /* AdvanSys Driver Version */ + +/* FIXME: + * + * 1. Use scsi_transport_spi + * 2. advansys_info is not safe against multiple simultaneous callers + * 3. Add module_param to override ISA/VLB ioport array + */ + +/* Enable driver /proc statistics. */ +#define ADVANSYS_STATS + +/* Enable driver tracing. */ +#undef ADVANSYS_DEBUG + +typedef unsigned char uchar; + +#define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0) + +#define PCI_VENDOR_ID_ASP 0x10cd +#define PCI_DEVICE_ID_ASP_1200A 0x1100 +#define PCI_DEVICE_ID_ASP_ABP940 0x1200 +#define PCI_DEVICE_ID_ASP_ABP940U 0x1300 +#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 +#define PCI_DEVICE_ID_38C0800_REV1 0x2500 +#define PCI_DEVICE_ID_38C1600_REV1 0x2700 + +#define PortAddr unsigned int /* port address size */ +#define inp(port) inb(port) +#define outp(port, byte) outb((byte), (port)) + +#define inpw(port) inw(port) +#define outpw(port, word) outw((word), (port)) + +#define ASC_MAX_SG_QUEUE 7 +#define ASC_MAX_SG_LIST 255 + +#define ASC_CS_TYPE unsigned short + +#define ASC_IS_EISA (0x0002) +#define ASC_IS_PCI (0x0004) +#define ASC_IS_PCI_ULTRA (0x0104) +#define ASC_IS_PCMCIA (0x0008) +#define ASC_IS_MCA (0x0020) +#define ASC_IS_VL (0x0040) +#define ASC_IS_WIDESCSI_16 (0x0100) +#define ASC_IS_WIDESCSI_32 (0x0200) +#define ASC_IS_BIG_ENDIAN (0x8000) + +#define ASC_CHIP_MIN_VER_VL (0x01) +#define ASC_CHIP_MAX_VER_VL (0x07) +#define ASC_CHIP_MIN_VER_PCI (0x09) +#define ASC_CHIP_MAX_VER_PCI (0x0F) +#define ASC_CHIP_VER_PCI_BIT (0x08) +#define ASC_CHIP_VER_ASYN_BUG (0x21) +#define ASC_CHIP_VER_PCI 0x08 +#define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02) +#define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03) +#define ASC_CHIP_MIN_VER_EISA (0x41) +#define ASC_CHIP_MAX_VER_EISA (0x47) +#define ASC_CHIP_VER_EISA_BIT (0x40) +#define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3) +#define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL) +#define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL) + +#define ASC_SCSI_ID_BITS 3 +#define ASC_SCSI_TIX_TYPE uchar +#define ASC_ALL_DEVICE_BIT_SET 0xFF +#define ASC_SCSI_BIT_ID_TYPE uchar +#define ASC_MAX_TID 7 +#define ASC_MAX_LUN 7 +#define ASC_SCSI_WIDTH_BIT_SET 0xFF +#define ASC_MAX_SENSE_LEN 32 +#define ASC_MIN_SENSE_LEN 14 +#define ASC_SCSI_RESET_HOLD_TIME_US 60 + +/* + * Narrow boards only support 12-byte commands, while wide boards + * extend to 16-byte commands. + */ +#define ASC_MAX_CDB_LEN 12 +#define ADV_MAX_CDB_LEN 16 + +#define MS_SDTR_LEN 0x03 +#define MS_WDTR_LEN 0x02 + +#define ASC_SG_LIST_PER_Q 7 +#define QS_FREE 0x00 +#define QS_READY 0x01 +#define QS_DISC1 0x02 +#define QS_DISC2 0x04 +#define QS_BUSY 0x08 +#define QS_ABORTED 0x40 +#define QS_DONE 0x80 +#define QC_NO_CALLBACK 0x01 +#define QC_SG_SWAP_QUEUE 0x02 +#define QC_SG_HEAD 0x04 +#define QC_DATA_IN 0x08 +#define QC_DATA_OUT 0x10 +#define QC_URGENT 0x20 +#define QC_MSG_OUT 0x40 +#define QC_REQ_SENSE 0x80 +#define QCSG_SG_XFER_LIST 0x02 +#define QCSG_SG_XFER_MORE 0x04 +#define QCSG_SG_XFER_END 0x08 +#define QD_IN_PROGRESS 0x00 +#define QD_NO_ERROR 0x01 +#define QD_ABORTED_BY_HOST 0x02 +#define QD_WITH_ERROR 0x04 +#define QD_INVALID_REQUEST 0x80 +#define QD_INVALID_HOST_NUM 0x81 +#define QD_INVALID_DEVICE 0x82 +#define QD_ERR_INTERNAL 0xFF +#define QHSTA_NO_ERROR 0x00 +#define QHSTA_M_SEL_TIMEOUT 0x11 +#define QHSTA_M_DATA_OVER_RUN 0x12 +#define QHSTA_M_DATA_UNDER_RUN 0x12 +#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 +#define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14 +#define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21 +#define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22 +#define QHSTA_D_HOST_ABORT_FAILED 0x23 +#define QHSTA_D_EXE_SCSI_Q_FAILED 0x24 +#define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25 +#define QHSTA_D_ASPI_NO_BUF_POOL 0x26 +#define QHSTA_M_WTM_TIMEOUT 0x41 +#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 +#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 +#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 +#define QHSTA_M_TARGET_STATUS_BUSY 0x45 +#define QHSTA_M_BAD_TAG_CODE 0x46 +#define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47 +#define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48 +#define QHSTA_D_LRAM_CMP_ERROR 0x81 +#define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1 +#define ASC_FLAG_SCSIQ_REQ 0x01 +#define ASC_FLAG_BIOS_SCSIQ_REQ 0x02 +#define ASC_FLAG_BIOS_ASYNC_IO 0x04 +#define ASC_FLAG_SRB_LINEAR_ADDR 0x08 +#define ASC_FLAG_WIN16 0x10 +#define ASC_FLAG_WIN32 0x20 +#define ASC_FLAG_DOS_VM_CALLBACK 0x80 +#define ASC_TAG_FLAG_EXTRA_BYTES 0x10 +#define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04 +#define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08 +#define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40 +#define ASC_SCSIQ_CPY_BEG 4 +#define ASC_SCSIQ_SGHD_CPY_BEG 2 +#define ASC_SCSIQ_B_FWD 0 +#define ASC_SCSIQ_B_BWD 1 +#define ASC_SCSIQ_B_STATUS 2 +#define ASC_SCSIQ_B_QNO 3 +#define ASC_SCSIQ_B_CNTL 4 +#define ASC_SCSIQ_B_SG_QUEUE_CNT 5 +#define ASC_SCSIQ_D_DATA_ADDR 8 +#define ASC_SCSIQ_D_DATA_CNT 12 +#define ASC_SCSIQ_B_SENSE_LEN 20 +#define ASC_SCSIQ_DONE_INFO_BEG 22 +#define ASC_SCSIQ_D_SRBPTR 22 +#define ASC_SCSIQ_B_TARGET_IX 26 +#define ASC_SCSIQ_B_CDB_LEN 28 +#define ASC_SCSIQ_B_TAG_CODE 29 +#define ASC_SCSIQ_W_VM_ID 30 +#define ASC_SCSIQ_DONE_STATUS 32 +#define ASC_SCSIQ_HOST_STATUS 33 +#define ASC_SCSIQ_SCSI_STATUS 34 +#define ASC_SCSIQ_CDB_BEG 36 +#define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56 +#define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60 +#define ASC_SCSIQ_B_FIRST_SG_WK_QP 48 +#define ASC_SCSIQ_B_SG_WK_QP 49 +#define ASC_SCSIQ_B_SG_WK_IX 50 +#define ASC_SCSIQ_W_ALT_DC1 52 +#define ASC_SCSIQ_B_LIST_CNT 6 +#define ASC_SCSIQ_B_CUR_LIST_CNT 7 +#define ASC_SGQ_B_SG_CNTL 4 +#define ASC_SGQ_B_SG_HEAD_QP 5 +#define ASC_SGQ_B_SG_LIST_CNT 6 +#define ASC_SGQ_B_SG_CUR_LIST_CNT 7 +#define ASC_SGQ_LIST_BEG 8 +#define ASC_DEF_SCSI1_QNG 4 +#define ASC_MAX_SCSI1_QNG 4 +#define ASC_DEF_SCSI2_QNG 16 +#define ASC_MAX_SCSI2_QNG 32 +#define ASC_TAG_CODE_MASK 0x23 +#define ASC_STOP_REQ_RISC_STOP 0x01 +#define ASC_STOP_ACK_RISC_STOP 0x03 +#define ASC_STOP_CLEAN_UP_BUSY_Q 0x10 +#define ASC_STOP_CLEAN_UP_DISC_Q 0x20 +#define ASC_STOP_HOST_REQ_RISC_HALT 0x40 +#define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<> ASC_SCSI_ID_BITS) & ASC_MAX_LUN) +#define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) + +typedef struct asc_scsiq_1 { + uchar status; + uchar q_no; + uchar cntl; + uchar sg_queue_cnt; + uchar target_id; + uchar target_lun; + __le32 data_addr; + __le32 data_cnt; + __le32 sense_addr; + uchar sense_len; + uchar extra_bytes; +} ASC_SCSIQ_1; + +typedef struct asc_scsiq_2 { + u32 srb_tag; + uchar target_ix; + uchar flag; + uchar cdb_len; + uchar tag_code; + ushort vm_id; +} ASC_SCSIQ_2; + +typedef struct asc_scsiq_3 { + uchar done_stat; + uchar host_stat; + uchar scsi_stat; + uchar scsi_msg; +} ASC_SCSIQ_3; + +typedef struct asc_scsiq_4 { + uchar cdb[ASC_MAX_CDB_LEN]; + uchar y_first_sg_list_qp; + uchar y_working_sg_qp; + uchar y_working_sg_ix; + uchar y_res; + ushort x_req_count; + ushort x_reconnect_rtn; + __le32 x_saved_data_addr; + __le32 x_saved_data_cnt; +} ASC_SCSIQ_4; + +typedef struct asc_q_done_info { + ASC_SCSIQ_2 d2; + ASC_SCSIQ_3 d3; + uchar q_status; + uchar q_no; + uchar cntl; + uchar sense_len; + uchar extra_bytes; + uchar res; + u32 remain_bytes; +} ASC_QDONE_INFO; + +typedef struct asc_sg_list { + __le32 addr; + __le32 bytes; +} ASC_SG_LIST; + +typedef struct asc_sg_head { + ushort entry_cnt; + ushort queue_cnt; + ushort entry_to_copy; + ushort res; + ASC_SG_LIST sg_list[]; +} ASC_SG_HEAD; + +typedef struct asc_scsi_q { + ASC_SCSIQ_1 q1; + ASC_SCSIQ_2 q2; + uchar *cdbptr; + ASC_SG_HEAD *sg_head; + ushort remain_sg_entry_cnt; + ushort next_sg_index; +} ASC_SCSI_Q; + +typedef struct asc_scsi_bios_req_q { + ASC_SCSIQ_1 r1; + ASC_SCSIQ_2 r2; + uchar *cdbptr; + ASC_SG_HEAD *sg_head; + uchar *sense_ptr; + ASC_SCSIQ_3 r3; + uchar cdb[ASC_MAX_CDB_LEN]; + uchar sense[ASC_MIN_SENSE_LEN]; +} ASC_SCSI_BIOS_REQ_Q; + +typedef struct asc_risc_q { + uchar fwd; + uchar bwd; + ASC_SCSIQ_1 i1; + ASC_SCSIQ_2 i2; + ASC_SCSIQ_3 i3; + ASC_SCSIQ_4 i4; +} ASC_RISC_Q; + +typedef struct asc_sg_list_q { + uchar seq_no; + uchar q_no; + uchar cntl; + uchar sg_head_qp; + uchar sg_list_cnt; + uchar sg_cur_list_cnt; +} ASC_SG_LIST_Q; + +typedef struct asc_risc_sg_list_q { + uchar fwd; + uchar bwd; + ASC_SG_LIST_Q sg; + ASC_SG_LIST sg_list[7]; +} ASC_RISC_SG_LIST_Q; + +#define ASCQ_ERR_Q_STATUS 0x0D +#define ASCQ_ERR_CUR_QNG 0x17 +#define ASCQ_ERR_SG_Q_LINKS 0x18 +#define ASCQ_ERR_ISR_RE_ENTRY 0x1A +#define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B +#define ASCQ_ERR_ISR_ON_CRITICAL 0x1C + +/* + * Warning code values are set in ASC_DVC_VAR 'warn_code'. + */ +#define ASC_WARN_NO_ERROR 0x0000 +#define ASC_WARN_IO_PORT_ROTATE 0x0001 +#define ASC_WARN_EEPROM_CHKSUM 0x0002 +#define ASC_WARN_IRQ_MODIFIED 0x0004 +#define ASC_WARN_AUTO_CONFIG 0x0008 +#define ASC_WARN_CMD_QNG_CONFLICT 0x0010 +#define ASC_WARN_EEPROM_RECOVER 0x0020 +#define ASC_WARN_CFG_MSW_RECOVER 0x0040 + +/* + * Error code values are set in {ASC/ADV}_DVC_VAR 'err_code'. + */ +#define ASC_IERR_NO_CARRIER 0x0001 /* No more carrier memory */ +#define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */ +#define ASC_IERR_SET_PC_ADDR 0x0004 +#define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */ +#define ASC_IERR_ILLEGAL_CONNECTION 0x0010 /* Illegal cable connection */ +#define ASC_IERR_SINGLE_END_DEVICE 0x0020 /* SE device on DIFF bus */ +#define ASC_IERR_REVERSED_CABLE 0x0040 /* Narrow flat cable reversed */ +#define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */ +#define ASC_IERR_HVD_DEVICE 0x0100 /* HVD device on LVD port */ +#define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */ +#define ASC_IERR_NO_BUS_TYPE 0x0400 +#define ASC_IERR_BIST_PRE_TEST 0x0800 /* BIST pre-test error */ +#define ASC_IERR_BIST_RAM_TEST 0x1000 /* BIST RAM test error */ +#define ASC_IERR_BAD_CHIPTYPE 0x2000 /* Invalid chip_type setting */ + +#define ASC_DEF_MAX_TOTAL_QNG (0xF0) +#define ASC_MIN_TAG_Q_PER_DVC (0x04) +#define ASC_MIN_FREE_Q (0x02) +#define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q)) +#define ASC_MAX_TOTAL_QNG 240 +#define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16 +#define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8 +#define ASC_MAX_PCI_INRAM_TOTAL_QNG 20 +#define ASC_MAX_INRAM_TAG_QNG 16 +#define ASC_IOADR_GAP 0x10 +#define ASC_SYN_MAX_OFFSET 0x0F +#define ASC_DEF_SDTR_OFFSET 0x0F +#define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02 +#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 + +/* The narrow chip only supports a limited selection of transfer rates. + * These are encoded in the range 0..7 or 0..15 depending whether the chip + * is Ultra-capable or not. These tables let us convert from one to the other. + */ +static const unsigned char asc_syn_xfer_period[8] = { + 25, 30, 35, 40, 50, 60, 70, 85 +}; + +static const unsigned char asc_syn_ultra_xfer_period[16] = { + 12, 19, 25, 32, 38, 44, 50, 57, 63, 69, 75, 82, 88, 94, 100, 107 +}; + +typedef struct ext_msg { + uchar msg_type; + uchar msg_len; + uchar msg_req; + union { + struct { + uchar sdtr_xfer_period; + uchar sdtr_req_ack_offset; + } sdtr; + struct { + uchar wdtr_width; + } wdtr; + struct { + uchar mdp_b3; + uchar mdp_b2; + uchar mdp_b1; + uchar mdp_b0; + } mdp; + } u_ext_msg; + uchar res; +} EXT_MSG; + +#define xfer_period u_ext_msg.sdtr.sdtr_xfer_period +#define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset +#define wdtr_width u_ext_msg.wdtr.wdtr_width +#define mdp_b3 u_ext_msg.mdp_b3 +#define mdp_b2 u_ext_msg.mdp_b2 +#define mdp_b1 u_ext_msg.mdp_b1 +#define mdp_b0 u_ext_msg.mdp_b0 + +typedef struct asc_dvc_cfg { + ASC_SCSI_BIT_ID_TYPE can_tagged_qng; + ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled; + ASC_SCSI_BIT_ID_TYPE disc_enable; + ASC_SCSI_BIT_ID_TYPE sdtr_enable; + uchar chip_scsi_id; + uchar chip_version; + ushort mcode_date; + ushort mcode_version; + uchar max_tag_qng[ASC_MAX_TID + 1]; + uchar sdtr_period_offset[ASC_MAX_TID + 1]; + uchar adapter_info[6]; +} ASC_DVC_CFG; + +#define ASC_DEF_DVC_CNTL 0xFFFF +#define ASC_DEF_CHIP_SCSI_ID 7 +#define ASC_DEF_ISA_DMA_SPEED 4 +#define ASC_INIT_STATE_BEG_GET_CFG 0x0001 +#define ASC_INIT_STATE_END_GET_CFG 0x0002 +#define ASC_INIT_STATE_BEG_SET_CFG 0x0004 +#define ASC_INIT_STATE_END_SET_CFG 0x0008 +#define ASC_INIT_STATE_BEG_LOAD_MC 0x0010 +#define ASC_INIT_STATE_END_LOAD_MC 0x0020 +#define ASC_INIT_STATE_BEG_INQUIRY 0x0040 +#define ASC_INIT_STATE_END_INQUIRY 0x0080 +#define ASC_INIT_RESET_SCSI_DONE 0x0100 +#define ASC_INIT_STATE_WITHOUT_EEP 0x8000 +#define ASC_BUG_FIX_IF_NOT_DWB 0x0001 +#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 +#define ASC_MIN_TAGGED_CMD 7 +#define ASC_MAX_SCSI_RESET_WAIT 30 +#define ASC_OVERRUN_BSIZE 64 + +struct asc_dvc_var; /* Forward Declaration. */ + +typedef struct asc_dvc_var { + PortAddr iop_base; + ushort err_code; + ushort dvc_cntl; + ushort bug_fix_cntl; + ushort bus_type; + ASC_SCSI_BIT_ID_TYPE init_sdtr; + ASC_SCSI_BIT_ID_TYPE sdtr_done; + ASC_SCSI_BIT_ID_TYPE use_tagged_qng; + ASC_SCSI_BIT_ID_TYPE unit_not_ready; + ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; + ASC_SCSI_BIT_ID_TYPE start_motor; + uchar *overrun_buf; + dma_addr_t overrun_dma; + uchar scsi_reset_wait; + uchar chip_no; + bool is_in_int; + uchar max_total_qng; + uchar cur_total_qng; + uchar in_critical_cnt; + uchar last_q_shortage; + ushort init_state; + uchar cur_dvc_qng[ASC_MAX_TID + 1]; + uchar max_dvc_qng[ASC_MAX_TID + 1]; + ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1]; + ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1]; + const uchar *sdtr_period_tbl; + ASC_DVC_CFG *cfg; + ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always; + char redo_scam; + ushort res2; + uchar dos_int13_table[ASC_MAX_TID + 1]; + unsigned int max_dma_count; + ASC_SCSI_BIT_ID_TYPE no_scam; + ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer; + uchar min_sdtr_index; + uchar max_sdtr_index; + struct asc_board *drv_ptr; + unsigned int uc_break; +} ASC_DVC_VAR; + +typedef struct asc_dvc_inq_info { + uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; +} ASC_DVC_INQ_INFO; + +typedef struct asc_cap_info { + u32 lba; + u32 blk_size; +} ASC_CAP_INFO; + +typedef struct asc_cap_info_array { + ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; +} ASC_CAP_INFO_ARRAY; + +#define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001 +#define ASC_MCNTL_NULL_TARGET (ushort)0x0002 +#define ASC_CNTL_INITIATOR (ushort)0x0001 +#define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002 +#define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004 +#define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008 +#define ASC_CNTL_NO_SCAM (ushort)0x0010 +#define ASC_CNTL_INT_MULTI_Q (ushort)0x0080 +#define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040 +#define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100 +#define ASC_CNTL_RESET_SCSI (ushort)0x0200 +#define ASC_CNTL_INIT_INQUIRY (ushort)0x0400 +#define ASC_CNTL_INIT_VERBOSE (ushort)0x0800 +#define ASC_CNTL_SCSI_PARITY (ushort)0x1000 +#define ASC_CNTL_BURST_MODE (ushort)0x2000 +#define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000 +#define ASC_EEP_DVC_CFG_BEG_VL 2 +#define ASC_EEP_MAX_DVC_ADDR_VL 15 +#define ASC_EEP_DVC_CFG_BEG 32 +#define ASC_EEP_MAX_DVC_ADDR 45 +#define ASC_EEP_MAX_RETRY 20 + +/* + * These macros keep the chip SCSI id bitfields in board order. C bitfields + * aren't portable between big and little-endian platforms so they are not used. + */ + +#define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f) +#define ASC_EEP_GET_DMA_SPD(cfg) (((cfg)->id_speed & 0xf0) >> 4) +#define ASC_EEP_SET_CHIP_ID(cfg, sid) \ + ((cfg)->id_speed = ((cfg)->id_speed & 0xf0) | ((sid) & ASC_MAX_TID)) +#define ASC_EEP_SET_DMA_SPD(cfg, spd) \ + ((cfg)->id_speed = ((cfg)->id_speed & 0x0f) | ((spd) & 0x0f) << 4) + +typedef struct asceep_config { + ushort cfg_lsw; + ushort cfg_msw; + uchar init_sdtr; + uchar disc_enable; + uchar use_cmd_qng; + uchar start_motor; + uchar max_total_qng; + uchar max_tag_qng; + uchar bios_scan; + uchar power_up_wait; + uchar no_scam; + uchar id_speed; /* low order 4 bits is chip scsi id */ + /* high order 4 bits is isa dma speed */ + uchar dos_int13_table[ASC_MAX_TID + 1]; + uchar adapter_info[6]; + ushort cntl; + ushort chksum; +} ASCEEP_CONFIG; + +#define ASC_EEP_CMD_READ 0x80 +#define ASC_EEP_CMD_WRITE 0x40 +#define ASC_EEP_CMD_WRITE_ABLE 0x30 +#define ASC_EEP_CMD_WRITE_DISABLE 0x00 +#define ASCV_MSGOUT_BEG 0x0000 +#define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3) +#define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4) +#define ASCV_BREAK_SAVED_CODE (ushort)0x0006 +#define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8) +#define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3) +#define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4) +#define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8) +#define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8) +#define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020 +#define ASCV_BREAK_ADDR (ushort)0x0028 +#define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A +#define ASCV_BREAK_CONTROL (ushort)0x002C +#define ASCV_BREAK_HIT_COUNT (ushort)0x002E + +#define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030 +#define ASCV_MCODE_CHKSUM_W (ushort)0x0032 +#define ASCV_MCODE_SIZE_W (ushort)0x0034 +#define ASCV_STOP_CODE_B (ushort)0x0036 +#define ASCV_DVC_ERR_CODE_B (ushort)0x0037 +#define ASCV_OVERRUN_PADDR_D (ushort)0x0038 +#define ASCV_OVERRUN_BSIZE_D (ushort)0x003C +#define ASCV_HALTCODE_W (ushort)0x0040 +#define ASCV_CHKSUM_W (ushort)0x0042 +#define ASCV_MC_DATE_W (ushort)0x0044 +#define ASCV_MC_VER_W (ushort)0x0046 +#define ASCV_NEXTRDY_B (ushort)0x0048 +#define ASCV_DONENEXT_B (ushort)0x0049 +#define ASCV_USE_TAGGED_QNG_B (ushort)0x004A +#define ASCV_SCSIBUSY_B (ushort)0x004B +#define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C +#define ASCV_CURCDB_B (ushort)0x004D +#define ASCV_RCLUN_B (ushort)0x004E +#define ASCV_BUSY_QHEAD_B (ushort)0x004F +#define ASCV_DISC1_QHEAD_B (ushort)0x0050 +#define ASCV_DISC_ENABLE_B (ushort)0x0052 +#define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053 +#define ASCV_HOSTSCSI_ID_B (ushort)0x0055 +#define ASCV_MCODE_CNTL_B (ushort)0x0056 +#define ASCV_NULL_TARGET_B (ushort)0x0057 +#define ASCV_FREE_Q_HEAD_W (ushort)0x0058 +#define ASCV_DONE_Q_TAIL_W (ushort)0x005A +#define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1) +#define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1) +#define ASCV_HOST_FLAG_B (ushort)0x005D +#define ASCV_TOTAL_READY_Q_B (ushort)0x0064 +#define ASCV_VER_SERIAL_B (ushort)0x0065 +#define ASCV_HALTCODE_SAVED_W (ushort)0x0066 +#define ASCV_WTM_FLAG_B (ushort)0x0068 +#define ASCV_RISC_FLAG_B (ushort)0x006A +#define ASCV_REQ_SG_LIST_QP (ushort)0x006B +#define ASC_HOST_FLAG_IN_ISR 0x01 +#define ASC_HOST_FLAG_ACK_INT 0x02 +#define ASC_RISC_FLAG_GEN_INT 0x01 +#define ASC_RISC_FLAG_REQ_SG_LIST 0x02 +#define IOP_CTRL (0x0F) +#define IOP_STATUS (0x0E) +#define IOP_INT_ACK IOP_STATUS +#define IOP_REG_IFC (0x0D) +#define IOP_SYN_OFFSET (0x0B) +#define IOP_EXTRA_CONTROL (0x0D) +#define IOP_REG_PC (0x0C) +#define IOP_RAM_ADDR (0x0A) +#define IOP_RAM_DATA (0x08) +#define IOP_EEP_DATA (0x06) +#define IOP_EEP_CMD (0x07) +#define IOP_VERSION (0x03) +#define IOP_CONFIG_HIGH (0x04) +#define IOP_CONFIG_LOW (0x02) +#define IOP_SIG_BYTE (0x01) +#define IOP_SIG_WORD (0x00) +#define IOP_REG_DC1 (0x0E) +#define IOP_REG_DC0 (0x0C) +#define IOP_REG_SB (0x0B) +#define IOP_REG_DA1 (0x0A) +#define IOP_REG_DA0 (0x08) +#define IOP_REG_SC (0x09) +#define IOP_DMA_SPEED (0x07) +#define IOP_REG_FLAG (0x07) +#define IOP_FIFO_H (0x06) +#define IOP_FIFO_L (0x04) +#define IOP_REG_ID (0x05) +#define IOP_REG_QP (0x03) +#define IOP_REG_IH (0x02) +#define IOP_REG_IX (0x01) +#define IOP_REG_AX (0x00) +#define IFC_REG_LOCK (0x00) +#define IFC_REG_UNLOCK (0x09) +#define IFC_WR_EN_FILTER (0x10) +#define IFC_RD_NO_EEPROM (0x10) +#define IFC_SLEW_RATE (0x20) +#define IFC_ACT_NEG (0x40) +#define IFC_INP_FILTER (0x80) +#define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK) +#define SC_SEL (uchar)(0x80) +#define SC_BSY (uchar)(0x40) +#define SC_ACK (uchar)(0x20) +#define SC_REQ (uchar)(0x10) +#define SC_ATN (uchar)(0x08) +#define SC_IO (uchar)(0x04) +#define SC_CD (uchar)(0x02) +#define SC_MSG (uchar)(0x01) +#define SEC_SCSI_CTL (uchar)(0x80) +#define SEC_ACTIVE_NEGATE (uchar)(0x40) +#define SEC_SLEW_RATE (uchar)(0x20) +#define SEC_ENABLE_FILTER (uchar)(0x10) +#define ASC_HALT_EXTMSG_IN (ushort)0x8000 +#define ASC_HALT_CHK_CONDITION (ushort)0x8100 +#define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200 +#define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300 +#define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400 +#define ASC_HALT_SDTR_REJECTED (ushort)0x4000 +#define ASC_HALT_HOST_COPY_SG_LIST_TO_RISC ( ushort )0x2000 +#define ASC_MAX_QNO 0xF8 +#define ASC_DATA_SEC_BEG (ushort)0x0080 +#define ASC_DATA_SEC_END (ushort)0x0080 +#define ASC_CODE_SEC_BEG (ushort)0x0080 +#define ASC_CODE_SEC_END (ushort)0x0080 +#define ASC_QADR_BEG (0x4000) +#define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64) +#define ASC_QADR_END (ushort)0x7FFF +#define ASC_QLAST_ADR (ushort)0x7FC0 +#define ASC_QBLK_SIZE 0x40 +#define ASC_BIOS_DATA_QBEG 0xF8 +#define ASC_MIN_ACTIVE_QNO 0x01 +#define ASC_QLINK_END 0xFF +#define ASC_EEPROM_WORDS 0x10 +#define ASC_MAX_MGS_LEN 0x10 +#define ASC_BIOS_ADDR_DEF 0xDC00 +#define ASC_BIOS_SIZE 0x3800 +#define ASC_BIOS_RAM_OFF 0x3800 +#define ASC_BIOS_RAM_SIZE 0x800 +#define ASC_BIOS_MIN_ADDR 0xC000 +#define ASC_BIOS_MAX_ADDR 0xEC00 +#define ASC_BIOS_BANK_SIZE 0x0400 +#define ASC_MCODE_START_ADDR 0x0080 +#define ASC_CFG0_HOST_INT_ON 0x0020 +#define ASC_CFG0_BIOS_ON 0x0040 +#define ASC_CFG0_VERA_BURST_ON 0x0080 +#define ASC_CFG0_SCSI_PARITY_ON 0x0800 +#define ASC_CFG1_SCSI_TARGET_ON 0x0080 +#define ASC_CFG1_LRAM_8BITS_ON 0x0800 +#define ASC_CFG_MSW_CLR_MASK 0x3080 +#define CSW_TEST1 (ASC_CS_TYPE)0x8000 +#define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000 +#define CSW_RESERVED1 (ASC_CS_TYPE)0x2000 +#define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000 +#define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800 +#define CSW_TEST2 (ASC_CS_TYPE)0x0400 +#define CSW_TEST3 (ASC_CS_TYPE)0x0200 +#define CSW_RESERVED2 (ASC_CS_TYPE)0x0100 +#define CSW_DMA_DONE (ASC_CS_TYPE)0x0080 +#define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040 +#define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020 +#define CSW_HALTED (ASC_CS_TYPE)0x0010 +#define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008 +#define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004 +#define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002 +#define CSW_INT_PENDING (ASC_CS_TYPE)0x0001 +#define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000 +#define CIW_INT_ACK (ASC_CS_TYPE)0x0100 +#define CIW_TEST1 (ASC_CS_TYPE)0x0200 +#define CIW_TEST2 (ASC_CS_TYPE)0x0400 +#define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800 +#define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000 +#define CC_CHIP_RESET (uchar)0x80 +#define CC_SCSI_RESET (uchar)0x40 +#define CC_HALT (uchar)0x20 +#define CC_SINGLE_STEP (uchar)0x10 +#define CC_DMA_ABLE (uchar)0x08 +#define CC_TEST (uchar)0x04 +#define CC_BANK_ONE (uchar)0x02 +#define CC_DIAG (uchar)0x01 +#define ASC_1000_ID0W 0x04C1 +#define ASC_1000_ID0W_FIX 0x00C1 +#define ASC_1000_ID1B 0x25 +#define ASC_EISA_REV_IOP_MASK (0x0C83) +#define ASC_EISA_CFG_IOP_MASK (0x0C86) +#define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000) +#define INS_HALTINT (ushort)0x6281 +#define INS_HALT (ushort)0x6280 +#define INS_SINT (ushort)0x6200 +#define INS_RFLAG_WTM (ushort)0x7380 +#define ASC_MC_SAVE_CODE_WSIZE 0x500 +#define ASC_MC_SAVE_DATA_WSIZE 0x40 + +typedef struct asc_mc_saved { + ushort data[ASC_MC_SAVE_DATA_WSIZE]; + ushort code[ASC_MC_SAVE_CODE_WSIZE]; +} ASC_MC_SAVED; + +#define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B) +#define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val) +#define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W) +#define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W) +#define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val) +#define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val) +#define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B) +#define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B) +#define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val) +#define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val) +#define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data)) +#define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id)) +#define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data) +#define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id)) +#define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE) +#define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD) +#define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION) +#define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW) +#define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH) +#define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data) +#define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data) +#define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD) +#define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data) +#define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA) +#define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data) +#define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR)) +#define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr) +#define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA) +#define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data) +#define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC) +#define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data) +#define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS) +#define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val) +#define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL) +#define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val) +#define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET) +#define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data) +#define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data) +#define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC) +#define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH)) +#define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID) +#define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL) +#define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data) +#define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX) +#define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data) +#define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX) +#define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data) +#define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH) +#define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data) +#define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP) +#define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data) +#define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L) +#define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data) +#define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H) +#define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data) +#define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED) +#define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data) +#define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0) +#define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data) +#define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1) +#define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data) +#define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0) +#define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data) +#define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1) +#define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data) +#define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID) +#define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data) + +#define AdvPortAddr void __iomem * /* Virtual memory address size */ + +/* + * Define Adv Library required memory access macros. + */ +#define ADV_MEM_READB(addr) readb(addr) +#define ADV_MEM_READW(addr) readw(addr) +#define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr) +#define ADV_MEM_WRITEW(addr, word) writew(word, addr) +#define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr) + +/* + * Define total number of simultaneous maximum element scatter-gather + * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the + * maximum number of outstanding commands per wide host adapter. Each + * command uses one or more ADV_SG_BLOCK each with 15 scatter-gather + * elements. Allow each command to have at least one ADV_SG_BLOCK structure. + * This allows about 15 commands to have the maximum 17 ADV_SG_BLOCK + * structures or 255 scatter-gather elements. + */ +#define ADV_TOT_SG_BLOCK ASC_DEF_MAX_HOST_QNG + +/* + * Define maximum number of scatter-gather elements per request. + */ +#define ADV_MAX_SG_LIST 255 +#define NO_OF_SG_PER_BLOCK 15 + +#define ADV_EEP_DVC_CFG_BEGIN (0x00) +#define ADV_EEP_DVC_CFG_END (0x15) +#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ +#define ADV_EEP_MAX_WORD_ADDR (0x1E) + +#define ADV_EEP_DELAY_MS 100 + +#define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */ +#define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */ +/* + * For the ASC3550 Bit 13 is Termination Polarity control bit. + * For later ICs Bit 13 controls whether the CIS (Card Information + * Service Section) is loaded from EEPROM. + */ +#define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */ +#define ADV_EEPROM_CIS_LD 0x2000 /* EEPROM Bit 13 */ +/* + * ASC38C1600 Bit 11 + * + * If EEPROM Bit 11 is 0 for Function 0, then Function 0 will specify + * INT A in the PCI Configuration Space Int Pin field. If it is 1, then + * Function 0 will specify INT B. + * + * If EEPROM Bit 11 is 0 for Function 1, then Function 1 will specify + * INT B in the PCI Configuration Space Int Pin field. If it is 1, then + * Function 1 will specify INT A. + */ +#define ADV_EEPROM_INTAB 0x0800 /* EEPROM Bit 11 */ + +typedef struct adveep_3550_config { + /* Word Offset, Description */ + + ushort cfg_lsw; /* 00 power up initialization */ + /* bit 13 set - Term Polarity Control */ + /* bit 14 set - BIOS Enable */ + /* bit 15 set - Big Endian Mode */ + ushort cfg_msw; /* 01 unused */ + ushort disc_enable; /* 02 disconnect enable */ + ushort wdtr_able; /* 03 Wide DTR able */ + ushort sdtr_able; /* 04 Synchronous DTR able */ + ushort start_motor; /* 05 send start up motor */ + ushort tagqng_able; /* 06 tag queuing able */ + ushort bios_scan; /* 07 BIOS device control */ + ushort scam_tolerant; /* 08 no scam */ + + uchar adapter_scsi_id; /* 09 Host Adapter ID */ + uchar bios_boot_delay; /* power up wait */ + + uchar scsi_reset_delay; /* 10 reset delay */ + uchar bios_id_lun; /* first boot device scsi id & lun */ + /* high nibble is lun */ + /* low nibble is scsi id */ + + uchar termination; /* 11 0 - automatic */ + /* 1 - low off / high off */ + /* 2 - low off / high on */ + /* 3 - low on / high on */ + /* There is no low on / high off */ + + uchar reserved1; /* reserved byte (not used) */ + + ushort bios_ctrl; /* 12 BIOS control bits */ + /* bit 0 BIOS don't act as initiator. */ + /* bit 1 BIOS > 1 GB support */ + /* bit 2 BIOS > 2 Disk Support */ + /* bit 3 BIOS don't support removables */ + /* bit 4 BIOS support bootable CD */ + /* bit 5 BIOS scan enabled */ + /* bit 6 BIOS support multiple LUNs */ + /* bit 7 BIOS display of message */ + /* bit 8 SCAM disabled */ + /* bit 9 Reset SCSI bus during init. */ + /* bit 10 */ + /* bit 11 No verbose initialization. */ + /* bit 12 SCSI parity enabled */ + /* bit 13 */ + /* bit 14 */ + /* bit 15 */ + ushort ultra_able; /* 13 ULTRA speed able */ + ushort reserved2; /* 14 reserved */ + uchar max_host_qng; /* 15 maximum host queuing */ + uchar max_dvc_qng; /* maximum per device queuing */ + ushort dvc_cntl; /* 16 control bit for driver */ + ushort bug_fix; /* 17 control bit for bug fix */ + ushort serial_number_word1; /* 18 Board serial number word 1 */ + ushort serial_number_word2; /* 19 Board serial number word 2 */ + ushort serial_number_word3; /* 20 Board serial number word 3 */ + ushort check_sum; /* 21 EEP check sum */ + uchar oem_name[16]; /* 22 OEM name */ + ushort dvc_err_code; /* 30 last device driver error code */ + ushort adv_err_code; /* 31 last uc and Adv Lib error code */ + ushort adv_err_addr; /* 32 last uc error address */ + ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ + ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ + ushort saved_adv_err_addr; /* 35 saved last uc error address */ + ushort num_of_err; /* 36 number of error */ +} ADVEEP_3550_CONFIG; + +typedef struct adveep_38C0800_config { + /* Word Offset, Description */ + + ushort cfg_lsw; /* 00 power up initialization */ + /* bit 13 set - Load CIS */ + /* bit 14 set - BIOS Enable */ + /* bit 15 set - Big Endian Mode */ + ushort cfg_msw; /* 01 unused */ + ushort disc_enable; /* 02 disconnect enable */ + ushort wdtr_able; /* 03 Wide DTR able */ + ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ + ushort start_motor; /* 05 send start up motor */ + ushort tagqng_able; /* 06 tag queuing able */ + ushort bios_scan; /* 07 BIOS device control */ + ushort scam_tolerant; /* 08 no scam */ + + uchar adapter_scsi_id; /* 09 Host Adapter ID */ + uchar bios_boot_delay; /* power up wait */ + + uchar scsi_reset_delay; /* 10 reset delay */ + uchar bios_id_lun; /* first boot device scsi id & lun */ + /* high nibble is lun */ + /* low nibble is scsi id */ + + uchar termination_se; /* 11 0 - automatic */ + /* 1 - low off / high off */ + /* 2 - low off / high on */ + /* 3 - low on / high on */ + /* There is no low on / high off */ + + uchar termination_lvd; /* 11 0 - automatic */ + /* 1 - low off / high off */ + /* 2 - low off / high on */ + /* 3 - low on / high on */ + /* There is no low on / high off */ + + ushort bios_ctrl; /* 12 BIOS control bits */ + /* bit 0 BIOS don't act as initiator. */ + /* bit 1 BIOS > 1 GB support */ + /* bit 2 BIOS > 2 Disk Support */ + /* bit 3 BIOS don't support removables */ + /* bit 4 BIOS support bootable CD */ + /* bit 5 BIOS scan enabled */ + /* bit 6 BIOS support multiple LUNs */ + /* bit 7 BIOS display of message */ + /* bit 8 SCAM disabled */ + /* bit 9 Reset SCSI bus during init. */ + /* bit 10 */ + /* bit 11 No verbose initialization. */ + /* bit 12 SCSI parity enabled */ + /* bit 13 */ + /* bit 14 */ + /* bit 15 */ + ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ + ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ + uchar max_host_qng; /* 15 maximum host queueing */ + uchar max_dvc_qng; /* maximum per device queuing */ + ushort dvc_cntl; /* 16 control bit for driver */ + ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ + ushort serial_number_word1; /* 18 Board serial number word 1 */ + ushort serial_number_word2; /* 19 Board serial number word 2 */ + ushort serial_number_word3; /* 20 Board serial number word 3 */ + ushort check_sum; /* 21 EEP check sum */ + uchar oem_name[16]; /* 22 OEM name */ + ushort dvc_err_code; /* 30 last device driver error code */ + ushort adv_err_code; /* 31 last uc and Adv Lib error code */ + ushort adv_err_addr; /* 32 last uc error address */ + ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ + ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ + ushort saved_adv_err_addr; /* 35 saved last uc error address */ + ushort reserved36; /* 36 reserved */ + ushort reserved37; /* 37 reserved */ + ushort reserved38; /* 38 reserved */ + ushort reserved39; /* 39 reserved */ + ushort reserved40; /* 40 reserved */ + ushort reserved41; /* 41 reserved */ + ushort reserved42; /* 42 reserved */ + ushort reserved43; /* 43 reserved */ + ushort reserved44; /* 44 reserved */ + ushort reserved45; /* 45 reserved */ + ushort reserved46; /* 46 reserved */ + ushort reserved47; /* 47 reserved */ + ushort reserved48; /* 48 reserved */ + ushort reserved49; /* 49 reserved */ + ushort reserved50; /* 50 reserved */ + ushort reserved51; /* 51 reserved */ + ushort reserved52; /* 52 reserved */ + ushort reserved53; /* 53 reserved */ + ushort reserved54; /* 54 reserved */ + ushort reserved55; /* 55 reserved */ + ushort cisptr_lsw; /* 56 CIS PTR LSW */ + ushort cisprt_msw; /* 57 CIS PTR MSW */ + ushort subsysvid; /* 58 SubSystem Vendor ID */ + ushort subsysid; /* 59 SubSystem ID */ + ushort reserved60; /* 60 reserved */ + ushort reserved61; /* 61 reserved */ + ushort reserved62; /* 62 reserved */ + ushort reserved63; /* 63 reserved */ +} ADVEEP_38C0800_CONFIG; + +typedef struct adveep_38C1600_config { + /* Word Offset, Description */ + + ushort cfg_lsw; /* 00 power up initialization */ + /* bit 11 set - Func. 0 INTB, Func. 1 INTA */ + /* clear - Func. 0 INTA, Func. 1 INTB */ + /* bit 13 set - Load CIS */ + /* bit 14 set - BIOS Enable */ + /* bit 15 set - Big Endian Mode */ + ushort cfg_msw; /* 01 unused */ + ushort disc_enable; /* 02 disconnect enable */ + ushort wdtr_able; /* 03 Wide DTR able */ + ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ + ushort start_motor; /* 05 send start up motor */ + ushort tagqng_able; /* 06 tag queuing able */ + ushort bios_scan; /* 07 BIOS device control */ + ushort scam_tolerant; /* 08 no scam */ + + uchar adapter_scsi_id; /* 09 Host Adapter ID */ + uchar bios_boot_delay; /* power up wait */ + + uchar scsi_reset_delay; /* 10 reset delay */ + uchar bios_id_lun; /* first boot device scsi id & lun */ + /* high nibble is lun */ + /* low nibble is scsi id */ + + uchar termination_se; /* 11 0 - automatic */ + /* 1 - low off / high off */ + /* 2 - low off / high on */ + /* 3 - low on / high on */ + /* There is no low on / high off */ + + uchar termination_lvd; /* 11 0 - automatic */ + /* 1 - low off / high off */ + /* 2 - low off / high on */ + /* 3 - low on / high on */ + /* There is no low on / high off */ + + ushort bios_ctrl; /* 12 BIOS control bits */ + /* bit 0 BIOS don't act as initiator. */ + /* bit 1 BIOS > 1 GB support */ + /* bit 2 BIOS > 2 Disk Support */ + /* bit 3 BIOS don't support removables */ + /* bit 4 BIOS support bootable CD */ + /* bit 5 BIOS scan enabled */ + /* bit 6 BIOS support multiple LUNs */ + /* bit 7 BIOS display of message */ + /* bit 8 SCAM disabled */ + /* bit 9 Reset SCSI bus during init. */ + /* bit 10 Basic Integrity Checking disabled */ + /* bit 11 No verbose initialization. */ + /* bit 12 SCSI parity enabled */ + /* bit 13 AIPP (Asyn. Info. Ph. Prot.) dis. */ + /* bit 14 */ + /* bit 15 */ + ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ + ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ + uchar max_host_qng; /* 15 maximum host queueing */ + uchar max_dvc_qng; /* maximum per device queuing */ + ushort dvc_cntl; /* 16 control bit for driver */ + ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ + ushort serial_number_word1; /* 18 Board serial number word 1 */ + ushort serial_number_word2; /* 19 Board serial number word 2 */ + ushort serial_number_word3; /* 20 Board serial number word 3 */ + ushort check_sum; /* 21 EEP check sum */ + uchar oem_name[16]; /* 22 OEM name */ + ushort dvc_err_code; /* 30 last device driver error code */ + ushort adv_err_code; /* 31 last uc and Adv Lib error code */ + ushort adv_err_addr; /* 32 last uc error address */ + ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ + ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ + ushort saved_adv_err_addr; /* 35 saved last uc error address */ + ushort reserved36; /* 36 reserved */ + ushort reserved37; /* 37 reserved */ + ushort reserved38; /* 38 reserved */ + ushort reserved39; /* 39 reserved */ + ushort reserved40; /* 40 reserved */ + ushort reserved41; /* 41 reserved */ + ushort reserved42; /* 42 reserved */ + ushort reserved43; /* 43 reserved */ + ushort reserved44; /* 44 reserved */ + ushort reserved45; /* 45 reserved */ + ushort reserved46; /* 46 reserved */ + ushort reserved47; /* 47 reserved */ + ushort reserved48; /* 48 reserved */ + ushort reserved49; /* 49 reserved */ + ushort reserved50; /* 50 reserved */ + ushort reserved51; /* 51 reserved */ + ushort reserved52; /* 52 reserved */ + ushort reserved53; /* 53 reserved */ + ushort reserved54; /* 54 reserved */ + ushort reserved55; /* 55 reserved */ + ushort cisptr_lsw; /* 56 CIS PTR LSW */ + ushort cisprt_msw; /* 57 CIS PTR MSW */ + ushort subsysvid; /* 58 SubSystem Vendor ID */ + ushort subsysid; /* 59 SubSystem ID */ + ushort reserved60; /* 60 reserved */ + ushort reserved61; /* 61 reserved */ + ushort reserved62; /* 62 reserved */ + ushort reserved63; /* 63 reserved */ +} ADVEEP_38C1600_CONFIG; + +/* + * EEPROM Commands + */ +#define ASC_EEP_CMD_DONE 0x0200 + +/* bios_ctrl */ +#define BIOS_CTRL_BIOS 0x0001 +#define BIOS_CTRL_EXTENDED_XLAT 0x0002 +#define BIOS_CTRL_GT_2_DISK 0x0004 +#define BIOS_CTRL_BIOS_REMOVABLE 0x0008 +#define BIOS_CTRL_BOOTABLE_CD 0x0010 +#define BIOS_CTRL_MULTIPLE_LUN 0x0040 +#define BIOS_CTRL_DISPLAY_MSG 0x0080 +#define BIOS_CTRL_NO_SCAM 0x0100 +#define BIOS_CTRL_RESET_SCSI_BUS 0x0200 +#define BIOS_CTRL_INIT_VERBOSE 0x0800 +#define BIOS_CTRL_SCSI_PARITY 0x1000 +#define BIOS_CTRL_AIPP_DIS 0x2000 + +#define ADV_3550_MEMSIZE 0x2000 /* 8 KB Internal Memory */ + +#define ADV_38C0800_MEMSIZE 0x4000 /* 16 KB Internal Memory */ + +/* + * XXX - Since ASC38C1600 Rev.3 has a local RAM failure issue, there is + * a special 16K Adv Library and Microcode version. After the issue is + * resolved, should restore 32K support. + * + * #define ADV_38C1600_MEMSIZE 0x8000L * 32 KB Internal Memory * + */ +#define ADV_38C1600_MEMSIZE 0x4000 /* 16 KB Internal Memory */ + +/* + * Byte I/O register address from base of 'iop_base'. + */ +#define IOPB_INTR_STATUS_REG 0x00 +#define IOPB_CHIP_ID_1 0x01 +#define IOPB_INTR_ENABLES 0x02 +#define IOPB_CHIP_TYPE_REV 0x03 +#define IOPB_RES_ADDR_4 0x04 +#define IOPB_RES_ADDR_5 0x05 +#define IOPB_RAM_DATA 0x06 +#define IOPB_RES_ADDR_7 0x07 +#define IOPB_FLAG_REG 0x08 +#define IOPB_RES_ADDR_9 0x09 +#define IOPB_RISC_CSR 0x0A +#define IOPB_RES_ADDR_B 0x0B +#define IOPB_RES_ADDR_C 0x0C +#define IOPB_RES_ADDR_D 0x0D +#define IOPB_SOFT_OVER_WR 0x0E +#define IOPB_RES_ADDR_F 0x0F +#define IOPB_MEM_CFG 0x10 +#define IOPB_RES_ADDR_11 0x11 +#define IOPB_GPIO_DATA 0x12 +#define IOPB_RES_ADDR_13 0x13 +#define IOPB_FLASH_PAGE 0x14 +#define IOPB_RES_ADDR_15 0x15 +#define IOPB_GPIO_CNTL 0x16 +#define IOPB_RES_ADDR_17 0x17 +#define IOPB_FLASH_DATA 0x18 +#define IOPB_RES_ADDR_19 0x19 +#define IOPB_RES_ADDR_1A 0x1A +#define IOPB_RES_ADDR_1B 0x1B +#define IOPB_RES_ADDR_1C 0x1C +#define IOPB_RES_ADDR_1D 0x1D +#define IOPB_RES_ADDR_1E 0x1E +#define IOPB_RES_ADDR_1F 0x1F +#define IOPB_DMA_CFG0 0x20 +#define IOPB_DMA_CFG1 0x21 +#define IOPB_TICKLE 0x22 +#define IOPB_DMA_REG_WR 0x23 +#define IOPB_SDMA_STATUS 0x24 +#define IOPB_SCSI_BYTE_CNT 0x25 +#define IOPB_HOST_BYTE_CNT 0x26 +#define IOPB_BYTE_LEFT_TO_XFER 0x27 +#define IOPB_BYTE_TO_XFER_0 0x28 +#define IOPB_BYTE_TO_XFER_1 0x29 +#define IOPB_BYTE_TO_XFER_2 0x2A +#define IOPB_BYTE_TO_XFER_3 0x2B +#define IOPB_ACC_GRP 0x2C +#define IOPB_RES_ADDR_2D 0x2D +#define IOPB_DEV_ID 0x2E +#define IOPB_RES_ADDR_2F 0x2F +#define IOPB_SCSI_DATA 0x30 +#define IOPB_RES_ADDR_31 0x31 +#define IOPB_RES_ADDR_32 0x32 +#define IOPB_SCSI_DATA_HSHK 0x33 +#define IOPB_SCSI_CTRL 0x34 +#define IOPB_RES_ADDR_35 0x35 +#define IOPB_RES_ADDR_36 0x36 +#define IOPB_RES_ADDR_37 0x37 +#define IOPB_RAM_BIST 0x38 +#define IOPB_PLL_TEST 0x39 +#define IOPB_PCI_INT_CFG 0x3A +#define IOPB_RES_ADDR_3B 0x3B +#define IOPB_RFIFO_CNT 0x3C +#define IOPB_RES_ADDR_3D 0x3D +#define IOPB_RES_ADDR_3E 0x3E +#define IOPB_RES_ADDR_3F 0x3F + +/* + * Word I/O register address from base of 'iop_base'. + */ +#define IOPW_CHIP_ID_0 0x00 /* CID0 */ +#define IOPW_CTRL_REG 0x02 /* CC */ +#define IOPW_RAM_ADDR 0x04 /* LA */ +#define IOPW_RAM_DATA 0x06 /* LD */ +#define IOPW_RES_ADDR_08 0x08 +#define IOPW_RISC_CSR 0x0A /* CSR */ +#define IOPW_SCSI_CFG0 0x0C /* CFG0 */ +#define IOPW_SCSI_CFG1 0x0E /* CFG1 */ +#define IOPW_RES_ADDR_10 0x10 +#define IOPW_SEL_MASK 0x12 /* SM */ +#define IOPW_RES_ADDR_14 0x14 +#define IOPW_FLASH_ADDR 0x16 /* FA */ +#define IOPW_RES_ADDR_18 0x18 +#define IOPW_EE_CMD 0x1A /* EC */ +#define IOPW_EE_DATA 0x1C /* ED */ +#define IOPW_SFIFO_CNT 0x1E /* SFC */ +#define IOPW_RES_ADDR_20 0x20 +#define IOPW_Q_BASE 0x22 /* QB */ +#define IOPW_QP 0x24 /* QP */ +#define IOPW_IX 0x26 /* IX */ +#define IOPW_SP 0x28 /* SP */ +#define IOPW_PC 0x2A /* PC */ +#define IOPW_RES_ADDR_2C 0x2C +#define IOPW_RES_ADDR_2E 0x2E +#define IOPW_SCSI_DATA 0x30 /* SD */ +#define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */ +#define IOPW_SCSI_CTRL 0x34 /* SC */ +#define IOPW_HSHK_CFG 0x36 /* HCFG */ +#define IOPW_SXFR_STATUS 0x36 /* SXS */ +#define IOPW_SXFR_CNTL 0x38 /* SXL */ +#define IOPW_SXFR_CNTH 0x3A /* SXH */ +#define IOPW_RES_ADDR_3C 0x3C +#define IOPW_RFIFO_DATA 0x3E /* RFD */ + +/* + * Doubleword I/O register address from base of 'iop_base'. + */ +#define IOPDW_RES_ADDR_0 0x00 +#define IOPDW_RAM_DATA 0x04 +#define IOPDW_RES_ADDR_8 0x08 +#define IOPDW_RES_ADDR_C 0x0C +#define IOPDW_RES_ADDR_10 0x10 +#define IOPDW_COMMA 0x14 +#define IOPDW_COMMB 0x18 +#define IOPDW_RES_ADDR_1C 0x1C +#define IOPDW_SDMA_ADDR0 0x20 +#define IOPDW_SDMA_ADDR1 0x24 +#define IOPDW_SDMA_COUNT 0x28 +#define IOPDW_SDMA_ERROR 0x2C +#define IOPDW_RDMA_ADDR0 0x30 +#define IOPDW_RDMA_ADDR1 0x34 +#define IOPDW_RDMA_COUNT 0x38 +#define IOPDW_RDMA_ERROR 0x3C + +#define ADV_CHIP_ID_BYTE 0x25 +#define ADV_CHIP_ID_WORD 0x04C1 + +#define ADV_INTR_ENABLE_HOST_INTR 0x01 +#define ADV_INTR_ENABLE_SEL_INTR 0x02 +#define ADV_INTR_ENABLE_DPR_INTR 0x04 +#define ADV_INTR_ENABLE_RTA_INTR 0x08 +#define ADV_INTR_ENABLE_RMA_INTR 0x10 +#define ADV_INTR_ENABLE_RST_INTR 0x20 +#define ADV_INTR_ENABLE_DPE_INTR 0x40 +#define ADV_INTR_ENABLE_GLOBAL_INTR 0x80 + +#define ADV_INTR_STATUS_INTRA 0x01 +#define ADV_INTR_STATUS_INTRB 0x02 +#define ADV_INTR_STATUS_INTRC 0x04 + +#define ADV_RISC_CSR_STOP (0x0000) +#define ADV_RISC_TEST_COND (0x2000) +#define ADV_RISC_CSR_RUN (0x4000) +#define ADV_RISC_CSR_SINGLE_STEP (0x8000) + +#define ADV_CTRL_REG_HOST_INTR 0x0100 +#define ADV_CTRL_REG_SEL_INTR 0x0200 +#define ADV_CTRL_REG_DPR_INTR 0x0400 +#define ADV_CTRL_REG_RTA_INTR 0x0800 +#define ADV_CTRL_REG_RMA_INTR 0x1000 +#define ADV_CTRL_REG_RES_BIT14 0x2000 +#define ADV_CTRL_REG_DPE_INTR 0x4000 +#define ADV_CTRL_REG_POWER_DONE 0x8000 +#define ADV_CTRL_REG_ANY_INTR 0xFF00 + +#define ADV_CTRL_REG_CMD_RESET 0x00C6 +#define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5 +#define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4 +#define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3 +#define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2 + +#define ADV_TICKLE_NOP 0x00 +#define ADV_TICKLE_A 0x01 +#define ADV_TICKLE_B 0x02 +#define ADV_TICKLE_C 0x03 + +#define AdvIsIntPending(port) \ + (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR) + +/* + * SCSI_CFG0 Register bit definitions + */ +#define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */ +#define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */ +#define EVEN_PARITY 0x1000 /* Select Even Parity */ +#define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */ +#define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */ +#define PRIM_MODE 0x0100 /* Primitive SCSI mode */ +#define SCAM_EN 0x0080 /* Enable SCAM selection */ +#define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */ +#define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */ +#define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */ +#define OUR_ID 0x000F /* SCSI ID */ + +/* + * SCSI_CFG1 Register bit definitions + */ +#define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */ +#define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */ +#define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */ +#define FILTER_SEL 0x0C00 /* Filter Period Selection */ +#define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */ +#define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */ +#define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */ +#define ACTIVE_DBL 0x0200 /* Disable Active Negation */ +#define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */ +#define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */ +#define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */ +#define TERM_CTL 0x0030 /* External SCSI Termination Bits */ +#define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */ +#define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */ +#define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */ + +/* + * Addendum for ASC-38C0800 Chip + * + * The ASC-38C1600 Chip uses the same definitions except that the + * bus mode override bits [12:10] have been moved to byte register + * offset 0xE (IOPB_SOFT_OVER_WR) bits [12:10]. The [12:10] bits in + * SCSI_CFG1 are read-only and always available. Bit 14 (DIS_TERM_DRV) + * is not needed. The [12:10] bits in IOPB_SOFT_OVER_WR are write-only. + * Also each ASC-38C1600 function or channel uses only cable bits [5:4] + * and [1:0]. Bits [14], [7:6], [3:2] are unused. + */ +#define DIS_TERM_DRV 0x4000 /* 1: Read c_det[3:0], 0: cannot read */ +#define HVD_LVD_SE 0x1C00 /* Device Detect Bits */ +#define HVD 0x1000 /* HVD Device Detect */ +#define LVD 0x0800 /* LVD Device Detect */ +#define SE 0x0400 /* SE Device Detect */ +#define TERM_LVD 0x00C0 /* LVD Termination Bits */ +#define TERM_LVD_HI 0x0080 /* Enable LVD Upper Termination */ +#define TERM_LVD_LO 0x0040 /* Enable LVD Lower Termination */ +#define TERM_SE 0x0030 /* SE Termination Bits */ +#define TERM_SE_HI 0x0020 /* Enable SE Upper Termination */ +#define TERM_SE_LO 0x0010 /* Enable SE Lower Termination */ +#define C_DET_LVD 0x000C /* LVD Cable Detect Bits */ +#define C_DET3 0x0008 /* Cable Detect for LVD External Wide */ +#define C_DET2 0x0004 /* Cable Detect for LVD Internal Wide */ +#define C_DET_SE 0x0003 /* SE Cable Detect Bits */ +#define C_DET1 0x0002 /* Cable Detect for SE Internal Wide */ +#define C_DET0 0x0001 /* Cable Detect for SE Internal Narrow */ + +#define CABLE_ILLEGAL_A 0x7 + /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */ + +#define CABLE_ILLEGAL_B 0xB + /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */ + +/* + * MEM_CFG Register bit definitions + */ +#define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */ +#define FAST_EE_CLK 0x20 /* Diagnostic Bit */ +#define RAM_SZ 0x1C /* Specify size of RAM to RISC */ +#define RAM_SZ_2KB 0x00 /* 2 KB */ +#define RAM_SZ_4KB 0x04 /* 4 KB */ +#define RAM_SZ_8KB 0x08 /* 8 KB */ +#define RAM_SZ_16KB 0x0C /* 16 KB */ +#define RAM_SZ_32KB 0x10 /* 32 KB */ +#define RAM_SZ_64KB 0x14 /* 64 KB */ + +/* + * DMA_CFG0 Register bit definitions + * + * This register is only accessible to the host. + */ +#define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */ +#define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */ +#define FIFO_THRESH_16B 0x00 /* 16 bytes */ +#define FIFO_THRESH_32B 0x20 /* 32 bytes */ +#define FIFO_THRESH_48B 0x30 /* 48 bytes */ +#define FIFO_THRESH_64B 0x40 /* 64 bytes */ +#define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */ +#define FIFO_THRESH_96B 0x60 /* 96 bytes */ +#define FIFO_THRESH_112B 0x70 /* 112 bytes */ +#define START_CTL 0x0C /* DMA start conditions */ +#define START_CTL_TH 0x00 /* Wait threshold level (default) */ +#define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */ +#define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */ +#define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */ +#define READ_CMD 0x03 /* Memory Read Method */ +#define READ_CMD_MR 0x00 /* Memory Read */ +#define READ_CMD_MRL 0x02 /* Memory Read Long */ +#define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */ + +/* + * ASC-38C0800 RAM BIST Register bit definitions + */ +#define RAM_TEST_MODE 0x80 +#define PRE_TEST_MODE 0x40 +#define NORMAL_MODE 0x00 +#define RAM_TEST_DONE 0x10 +#define RAM_TEST_STATUS 0x0F +#define RAM_TEST_HOST_ERROR 0x08 +#define RAM_TEST_INTRAM_ERROR 0x04 +#define RAM_TEST_RISC_ERROR 0x02 +#define RAM_TEST_SCSI_ERROR 0x01 +#define RAM_TEST_SUCCESS 0x00 +#define PRE_TEST_VALUE 0x05 +#define NORMAL_VALUE 0x00 + +/* + * ASC38C1600 Definitions + * + * IOPB_PCI_INT_CFG Bit Field Definitions + */ + +#define INTAB_LD 0x80 /* Value loaded from EEPROM Bit 11. */ + +/* + * Bit 1 can be set to change the interrupt for the Function to operate in + * Totem Pole mode. By default Bit 1 is 0 and the interrupt operates in + * Open Drain mode. Both functions of the ASC38C1600 must be set to the same + * mode, otherwise the operating mode is undefined. + */ +#define TOTEMPOLE 0x02 + +/* + * Bit 0 can be used to change the Int Pin for the Function. The value is + * 0 by default for both Functions with Function 0 using INT A and Function + * B using INT B. For Function 0 if set, INT B is used. For Function 1 if set, + * INT A is used. + * + * EEPROM Word 0 Bit 11 for each Function may change the initial Int Pin + * value specified in the PCI Configuration Space. + */ +#define INTAB 0x01 + +/* + * Adv Library Status Definitions + */ +#define ADV_TRUE 1 +#define ADV_FALSE 0 +#define ADV_SUCCESS 1 +#define ADV_BUSY 0 +#define ADV_ERROR (-1) + +/* + * ADV_DVC_VAR 'warn_code' values + */ +#define ASC_WARN_BUSRESET_ERROR 0x0001 /* SCSI Bus Reset error */ +#define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */ +#define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */ +#define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */ + +#define ADV_MAX_TID 15 /* max. target identifier */ +#define ADV_MAX_LUN 7 /* max. logical unit number */ + +/* + * Fixed locations of microcode operating variables. + */ +#define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */ +#define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */ +#define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */ +#define ASC_MC_VERSION_DATE 0x0038 /* microcode version */ +#define ASC_MC_VERSION_NUM 0x003A /* microcode number */ +#define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */ +#define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */ +#define ASC_MC_BIOS_SIGNATURE 0x0058 /* BIOS Signature 0x55AA */ +#define ASC_MC_BIOS_VERSION 0x005A /* BIOS Version (2 bytes) */ +#define ASC_MC_SDTR_SPEED1 0x0090 /* SDTR Speed for TID 0-3 */ +#define ASC_MC_SDTR_SPEED2 0x0092 /* SDTR Speed for TID 4-7 */ +#define ASC_MC_SDTR_SPEED3 0x0094 /* SDTR Speed for TID 8-11 */ +#define ASC_MC_SDTR_SPEED4 0x0096 /* SDTR Speed for TID 12-15 */ +#define ASC_MC_CHIP_TYPE 0x009A +#define ASC_MC_INTRB_CODE 0x009B +#define ASC_MC_WDTR_ABLE 0x009C +#define ASC_MC_SDTR_ABLE 0x009E +#define ASC_MC_TAGQNG_ABLE 0x00A0 +#define ASC_MC_DISC_ENABLE 0x00A2 +#define ASC_MC_IDLE_CMD_STATUS 0x00A4 +#define ASC_MC_IDLE_CMD 0x00A6 +#define ASC_MC_IDLE_CMD_PARAMETER 0x00A8 +#define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC +#define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE +#define ASC_MC_DEFAULT_MEM_CFG 0x00B0 +#define ASC_MC_DEFAULT_SEL_MASK 0x00B2 +#define ASC_MC_SDTR_DONE 0x00B6 +#define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0 +#define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0 +#define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100 +#define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */ +#define ASC_MC_WDTR_DONE 0x0124 +#define ASC_MC_CAM_MODE_MASK 0x015E /* CAM mode TID bitmask. */ +#define ASC_MC_ICQ 0x0160 +#define ASC_MC_IRQ 0x0164 +#define ASC_MC_PPR_ABLE 0x017A + +/* + * BIOS LRAM variable absolute offsets. + */ +#define BIOS_CODESEG 0x54 +#define BIOS_CODELEN 0x56 +#define BIOS_SIGNATURE 0x58 +#define BIOS_VERSION 0x5A + +/* + * Microcode Control Flags + * + * Flags set by the Adv Library in RISC variable 'control_flag' (0x122) + * and handled by the microcode. + */ +#define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */ +#define CONTROL_FLAG_ENABLE_AIPP 0x0002 /* Enabled AIPP checking. */ + +/* + * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format + */ +#define HSHK_CFG_WIDE_XFR 0x8000 +#define HSHK_CFG_RATE 0x0F00 +#define HSHK_CFG_OFFSET 0x001F + +#define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */ +#define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */ +#define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */ +#define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */ + +#define ASC_QC_DATA_CHECK 0x01 /* Require ASC_QC_DATA_OUT set or clear. */ +#define ASC_QC_DATA_OUT 0x02 /* Data out DMA transfer. */ +#define ASC_QC_START_MOTOR 0x04 /* Send auto-start motor before request. */ +#define ASC_QC_NO_OVERRUN 0x08 /* Don't report overrun. */ +#define ASC_QC_FREEZE_TIDQ 0x10 /* Freeze TID queue after request. XXX TBD */ + +#define ASC_QSC_NO_DISC 0x01 /* Don't allow disconnect for request. */ +#define ASC_QSC_NO_TAGMSG 0x02 /* Don't allow tag queuing for request. */ +#define ASC_QSC_NO_SYNC 0x04 /* Don't use Synch. transfer on request. */ +#define ASC_QSC_NO_WIDE 0x08 /* Don't use Wide transfer on request. */ +#define ASC_QSC_REDO_DTR 0x10 /* Renegotiate WDTR/SDTR before request. */ +/* + * Note: If a Tag Message is to be sent and neither ASC_QSC_HEAD_TAG or + * ASC_QSC_ORDERED_TAG is set, then a Simple Tag Message (0x20) is used. + */ +#define ASC_QSC_HEAD_TAG 0x40 /* Use Head Tag Message (0x21). */ +#define ASC_QSC_ORDERED_TAG 0x80 /* Use Ordered Tag Message (0x22). */ + +/* + * All fields here are accessed by the board microcode and need to be + * little-endian. + */ +typedef struct adv_carr_t { + __le32 carr_va; /* Carrier Virtual Address */ + __le32 carr_pa; /* Carrier Physical Address */ + __le32 areq_vpa; /* ADV_SCSI_REQ_Q Virtual or Physical Address */ + /* + * next_vpa [31:4] Carrier Virtual or Physical Next Pointer + * + * next_vpa [3:1] Reserved Bits + * next_vpa [0] Done Flag set in Response Queue. + */ + __le32 next_vpa; +} ADV_CARR_T; + +/* + * Mask used to eliminate low 4 bits of carrier 'next_vpa' field. + */ +#define ADV_NEXT_VPA_MASK 0xFFFFFFF0 + +#define ADV_RQ_DONE 0x00000001 +#define ADV_RQ_GOOD 0x00000002 +#define ADV_CQ_STOPPER 0x00000000 + +#define ADV_GET_CARRP(carrp) ((carrp) & ADV_NEXT_VPA_MASK) + +/* + * Each carrier is 64 bytes, and we need three additional + * carrier for icq, irq, and the termination carrier. + */ +#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 3) + +#define ADV_CARRIER_BUFSIZE \ + (ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + +#define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */ +#define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */ +#define ADV_CHIP_ASC38C1600 0x03 /* Ultra3-Wide/LVD2 IC */ + +/* + * Adapter temporary configuration structure + * + * This structure can be discarded after initialization. Don't add + * fields here needed after initialization. + * + * Field naming convention: + * + * *_enable indicates the field enables or disables a feature. The + * value of the field is never reset. + */ +typedef struct adv_dvc_cfg { + ushort disc_enable; /* enable disconnection */ + uchar chip_version; /* chip version */ + uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */ + ushort control_flag; /* Microcode Control Flag */ + ushort mcode_date; /* Microcode date */ + ushort mcode_version; /* Microcode version */ + ushort serial1; /* EEPROM serial number word 1 */ + ushort serial2; /* EEPROM serial number word 2 */ + ushort serial3; /* EEPROM serial number word 3 */ +} ADV_DVC_CFG; + +struct adv_dvc_var; +struct adv_scsi_req_q; + +typedef struct adv_sg_block { + uchar reserved1; + uchar reserved2; + uchar reserved3; + uchar sg_cnt; /* Valid entries in block. */ + __le32 sg_ptr; /* Pointer to next sg block. */ + struct { + __le32 sg_addr; /* SG element address. */ + __le32 sg_count; /* SG element count. */ + } sg_list[NO_OF_SG_PER_BLOCK]; +} ADV_SG_BLOCK; + +/* + * ADV_SCSI_REQ_Q - microcode request structure + * + * All fields in this structure up to byte 60 are used by the microcode. + * The microcode makes assumptions about the size and ordering of fields + * in this structure. Do not change the structure definition here without + * coordinating the change with the microcode. + * + * All fields accessed by microcode must be maintained in little_endian + * order. + */ +typedef struct adv_scsi_req_q { + uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */ + uchar target_cmd; + uchar target_id; /* Device target identifier. */ + uchar target_lun; /* Device target logical unit number. */ + __le32 data_addr; /* Data buffer physical address. */ + __le32 data_cnt; /* Data count. Ucode sets to residual. */ + __le32 sense_addr; + __le32 carr_pa; + uchar mflag; + uchar sense_len; + uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */ + uchar scsi_cntl; + uchar done_status; /* Completion status. */ + uchar scsi_status; /* SCSI status byte. */ + uchar host_status; /* Ucode host status. */ + uchar sg_working_ix; + uchar cdb[12]; /* SCSI CDB bytes 0-11. */ + __le32 sg_real_addr; /* SG list physical address. */ + __le32 scsiq_rptr; + uchar cdb16[4]; /* SCSI CDB bytes 12-15. */ + __le32 scsiq_ptr; + __le32 carr_va; + /* + * End of microcode structure - 60 bytes. The rest of the structure + * is used by the Adv Library and ignored by the microcode. + */ + u32 srb_tag; + ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */ +} ADV_SCSI_REQ_Q; + +/* + * The following two structures are used to process Wide Board requests. + * + * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library + * and microcode with the ADV_SCSI_REQ_Q field 'srb_tag' set to the + * SCSI request tag. The adv_req_t structure 'cmndp' field in turn points + * to the Mid-Level SCSI request structure. + * + * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each + * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux + * up to 255 scatter-gather elements may be used per request or + * ADV_SCSI_REQ_Q. + * + * Both structures must be 32 byte aligned. + */ +typedef struct adv_sgblk { + ADV_SG_BLOCK sg_block; /* Sgblock structure. */ + dma_addr_t sg_addr; /* Physical address */ + struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */ +} adv_sgblk_t; + +typedef struct adv_req { + ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */ + uchar align[24]; /* Request structure padding. */ + struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */ + dma_addr_t req_addr; + adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */ +} adv_req_t __aligned(32); + +/* + * Adapter operation variable structure. + * + * One structure is required per host adapter. + * + * Field naming convention: + * + * *_able indicates both whether a feature should be enabled or disabled + * and whether a device is capable of the feature. At initialization + * this field may be set, but later if a device is found to be incapable + * of the feature, the field is cleared. + */ +typedef struct adv_dvc_var { + AdvPortAddr iop_base; /* I/O port address */ + ushort err_code; /* fatal error code */ + ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */ + ushort wdtr_able; /* try WDTR for a device */ + ushort sdtr_able; /* try SDTR for a device */ + ushort ultra_able; /* try SDTR Ultra speed for a device */ + ushort sdtr_speed1; /* EEPROM SDTR Speed for TID 0-3 */ + ushort sdtr_speed2; /* EEPROM SDTR Speed for TID 4-7 */ + ushort sdtr_speed3; /* EEPROM SDTR Speed for TID 8-11 */ + ushort sdtr_speed4; /* EEPROM SDTR Speed for TID 12-15 */ + ushort tagqng_able; /* try tagged queuing with a device */ + ushort ppr_able; /* PPR message capable per TID bitmask. */ + uchar max_dvc_qng; /* maximum number of tagged commands per device */ + ushort start_motor; /* start motor command allowed */ + uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */ + uchar chip_no; /* should be assigned by caller */ + uchar max_host_qng; /* maximum number of Q'ed command allowed */ + ushort no_scam; /* scam_tolerant of EEPROM */ + struct asc_board *drv_ptr; /* driver pointer to private structure */ + uchar chip_scsi_id; /* chip SCSI target ID */ + uchar chip_type; + uchar bist_err_code; + ADV_CARR_T *carrier; + ADV_CARR_T *carr_freelist; /* Carrier free list. */ + dma_addr_t carrier_addr; + ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */ + ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */ + ushort carr_pending_cnt; /* Count of pending carriers. */ + /* + * Note: The following fields will not be used after initialization. The + * driver may discard the buffer after initialization is done. + */ + ADV_DVC_CFG *cfg; /* temporary configuration structure */ +} ADV_DVC_VAR; + +/* + * Microcode idle loop commands + */ +#define IDLE_CMD_COMPLETED 0 +#define IDLE_CMD_STOP_CHIP 0x0001 +#define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002 +#define IDLE_CMD_SEND_INT 0x0004 +#define IDLE_CMD_ABORT 0x0008 +#define IDLE_CMD_DEVICE_RESET 0x0010 +#define IDLE_CMD_SCSI_RESET_START 0x0020 /* Assert SCSI Bus Reset */ +#define IDLE_CMD_SCSI_RESET_END 0x0040 /* Deassert SCSI Bus Reset */ +#define IDLE_CMD_SCSIREQ 0x0080 + +#define IDLE_CMD_STATUS_SUCCESS 0x0001 +#define IDLE_CMD_STATUS_FAILURE 0x0002 + +/* + * AdvSendIdleCmd() flag definitions. + */ +#define ADV_NOWAIT 0x01 + +/* + * Wait loop time out values. + */ +#define SCSI_WAIT_100_MSEC 100UL /* 100 milliseconds */ +#define SCSI_US_PER_MSEC 1000 /* microseconds per millisecond */ +#define SCSI_MAX_RETRY 10 /* retry count */ + +#define ADV_ASYNC_RDMA_FAILURE 0x01 /* Fatal RDMA failure. */ +#define ADV_ASYNC_SCSI_BUS_RESET_DET 0x02 /* Detected SCSI Bus Reset. */ +#define ADV_ASYNC_CARRIER_READY_FAILURE 0x03 /* Carrier Ready failure. */ +#define ADV_RDMA_IN_CARR_AND_Q_INVALID 0x04 /* RDMAed-in data invalid. */ + +#define ADV_HOST_SCSI_BUS_RESET 0x80 /* Host Initiated SCSI Bus Reset. */ + +/* Read byte from a register. */ +#define AdvReadByteRegister(iop_base, reg_off) \ + (ADV_MEM_READB((iop_base) + (reg_off))) + +/* Write byte to a register. */ +#define AdvWriteByteRegister(iop_base, reg_off, byte) \ + (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte))) + +/* Read word (2 bytes) from a register. */ +#define AdvReadWordRegister(iop_base, reg_off) \ + (ADV_MEM_READW((iop_base) + (reg_off))) + +/* Write word (2 bytes) to a register. */ +#define AdvWriteWordRegister(iop_base, reg_off, word) \ + (ADV_MEM_WRITEW((iop_base) + (reg_off), (word))) + +/* Write dword (4 bytes) to a register. */ +#define AdvWriteDWordRegister(iop_base, reg_off, dword) \ + (ADV_MEM_WRITEDW((iop_base) + (reg_off), (dword))) + +/* Read byte from LRAM. */ +#define AdvReadByteLram(iop_base, addr, byte) \ +do { \ + ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ + (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \ +} while (0) + +/* Write byte to LRAM. */ +#define AdvWriteByteLram(iop_base, addr, byte) \ + (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ + ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte))) + +/* Read word (2 bytes) from LRAM. */ +#define AdvReadWordLram(iop_base, addr, word) \ +do { \ + ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ + (word) = (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)); \ +} while (0) + +/* Write word (2 bytes) to LRAM. */ +#define AdvWriteWordLram(iop_base, addr, word) \ + (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ + ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) + +/* Write little-endian double word (4 bytes) to LRAM */ +/* Because of unspecified C language ordering don't use auto-increment. */ +#define AdvWriteDWordLramNoSwap(iop_base, addr, dword) \ + ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ + ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ + cpu_to_le16((ushort) ((dword) & 0xFFFF)))), \ + (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \ + ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ + cpu_to_le16((ushort) ((dword >> 16) & 0xFFFF))))) + +/* Read word (2 bytes) from LRAM assuming that the address is already set. */ +#define AdvReadWordAutoIncLram(iop_base) \ + (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)) + +/* Write word (2 bytes) to LRAM assuming that the address is already set. */ +#define AdvWriteWordAutoIncLram(iop_base, word) \ + (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) + +/* + * Define macro to check for Condor signature. + * + * Evaluate to ADV_TRUE if a Condor chip is found the specified port + * address 'iop_base'. Otherwise evalue to ADV_FALSE. + */ +#define AdvFindSignature(iop_base) \ + (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \ + ADV_CHIP_ID_BYTE) && \ + (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \ + ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE) + +/* + * Define macro to Return the version number of the chip at 'iop_base'. + * + * The second parameter 'bus_type' is currently unused. + */ +#define AdvGetChipVersion(iop_base, bus_type) \ + AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV) + +/* + * Abort an SRB in the chip's RISC Memory. The 'srb_tag' argument must + * match the ADV_SCSI_REQ_Q 'srb_tag' field. + * + * If the request has not yet been sent to the device it will simply be + * aborted from RISC memory. If the request is disconnected it will be + * aborted on reselection by sending an Abort Message to the target ID. + * + * Return value: + * ADV_TRUE(1) - Queue was successfully aborted. + * ADV_FALSE(0) - Queue was not found on the active queue list. + */ +#define AdvAbortQueue(asc_dvc, srb_tag) \ + AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ + (ADV_DCNT) (srb_tag)) + +/* + * Send a Bus Device Reset Message to the specified target ID. + * + * All outstanding commands will be purged if sending the + * Bus Device Reset Message is successful. + * + * Return Value: + * ADV_TRUE(1) - All requests on the target are purged. + * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests + * are not purged. + */ +#define AdvResetDevice(asc_dvc, target_id) \ + AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ + (ADV_DCNT) (target_id)) + +/* + * SCSI Wide Type definition. + */ +#define ADV_SCSI_BIT_ID_TYPE ushort + +/* + * AdvInitScsiTarget() 'cntl_flag' options. + */ +#define ADV_SCAN_LUN 0x01 +#define ADV_CAPINFO_NOLUN 0x02 + +/* + * Convert target id to target id bit mask. + */ +#define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID)) + +/* + * ADV_SCSI_REQ_Q 'done_status' and 'host_status' return values. + */ + +#define QD_NO_STATUS 0x00 /* Request not completed yet. */ +#define QD_NO_ERROR 0x01 +#define QD_ABORTED_BY_HOST 0x02 +#define QD_WITH_ERROR 0x04 + +#define QHSTA_NO_ERROR 0x00 +#define QHSTA_M_SEL_TIMEOUT 0x11 +#define QHSTA_M_DATA_OVER_RUN 0x12 +#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 +#define QHSTA_M_QUEUE_ABORTED 0x15 +#define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */ +#define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */ +#define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */ +#define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */ +#define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */ +#define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */ +#define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */ +/* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */ +#define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */ +#define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */ +#define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */ +#define QHSTA_M_SCSI_BUS_RESET 0x30 /* Request aborted from SBR */ +#define QHSTA_M_SCSI_BUS_RESET_UNSOL 0x31 /* Request aborted from unsol. SBR */ +#define QHSTA_M_BUS_DEVICE_RESET 0x32 /* Request aborted from BDR */ +#define QHSTA_M_DIRECTION_ERR 0x35 /* Data Phase mismatch */ +#define QHSTA_M_DIRECTION_ERR_HUNG 0x36 /* Data Phase mismatch and bus hang */ +#define QHSTA_M_WTM_TIMEOUT 0x41 +#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 +#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 +#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 +#define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */ +#define QHSTA_M_FROZEN_TIDQ 0x46 /* TID Queue frozen. */ +#define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */ + +/* Return the address that is aligned at the next doubleword >= to 'addr'. */ +#define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F) + +/* + * Total contiguous memory needed for driver SG blocks. + * + * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum + * number of scatter-gather elements the driver supports in a + * single request. + */ + +#define ADV_SG_LIST_MAX_BYTE_SIZE \ + (sizeof(ADV_SG_BLOCK) * \ + ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)) + +/* struct asc_board flags */ +#define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */ + +#define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0) + +#define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */ + +#define ASC_INFO_SIZE 128 /* advansys_info() line size */ + +/* Asc Library return codes */ +#define ASC_TRUE 1 +#define ASC_FALSE 0 +#define ASC_NOERROR 1 +#define ASC_BUSY 0 +#define ASC_ERROR (-1) + +#define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1) +#ifndef ADVANSYS_STATS +#define ASC_STATS_ADD(shost, counter, count) +#else /* ADVANSYS_STATS */ +#define ASC_STATS_ADD(shost, counter, count) \ + (((struct asc_board *) shost_priv(shost))->asc_stats.counter += (count)) +#endif /* ADVANSYS_STATS */ + +/* If the result wraps when calculating tenths, return 0. */ +#define ASC_TENTHS(num, den) \ + (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \ + 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den))))) + +/* + * Display a message to the console. + */ +#define ASC_PRINT(s) \ + { \ + printk("advansys: "); \ + printk(s); \ + } + +#define ASC_PRINT1(s, a1) \ + { \ + printk("advansys: "); \ + printk((s), (a1)); \ + } + +#define ASC_PRINT2(s, a1, a2) \ + { \ + printk("advansys: "); \ + printk((s), (a1), (a2)); \ + } + +#define ASC_PRINT3(s, a1, a2, a3) \ + { \ + printk("advansys: "); \ + printk((s), (a1), (a2), (a3)); \ + } + +#define ASC_PRINT4(s, a1, a2, a3, a4) \ + { \ + printk("advansys: "); \ + printk((s), (a1), (a2), (a3), (a4)); \ + } + +#ifndef ADVANSYS_DEBUG + +#define ASC_DBG(lvl, s...) +#define ASC_DBG_PRT_SCSI_HOST(lvl, s) +#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) +#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) +#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) +#define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) +#define ASC_DBG_PRT_HEX(lvl, name, start, length) +#define ASC_DBG_PRT_CDB(lvl, cdb, len) +#define ASC_DBG_PRT_SENSE(lvl, sense, len) +#define ASC_DBG_PRT_INQUIRY(lvl, inq, len) + +#else /* ADVANSYS_DEBUG */ + +/* + * Debugging Message Levels: + * 0: Errors Only + * 1: High-Level Tracing + * 2-N: Verbose Tracing + */ + +#define ASC_DBG(lvl, format, arg...) { \ + if (asc_dbglvl >= (lvl)) \ + printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ + __func__ , ## arg); \ +} + +#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \ + { \ + if (asc_dbglvl >= (lvl)) { \ + asc_prt_scsi_host(s); \ + } \ + } + +#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \ + { \ + if (asc_dbglvl >= (lvl)) { \ + asc_prt_asc_scsi_q(scsiqp); \ + } \ + } + +#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \ + { \ + if (asc_dbglvl >= (lvl)) { \ + asc_prt_asc_qdone_info(qdone); \ + } \ + } + +#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \ + { \ + if (asc_dbglvl >= (lvl)) { \ + asc_prt_adv_scsi_req_q(scsiqp); \ + } \ + } + +#define ASC_DBG_PRT_HEX(lvl, name, start, length) \ + { \ + if (asc_dbglvl >= (lvl)) { \ + asc_prt_hex((name), (start), (length)); \ + } \ + } + +#define ASC_DBG_PRT_CDB(lvl, cdb, len) \ + ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len)); + +#define ASC_DBG_PRT_SENSE(lvl, sense, len) \ + ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len)); + +#define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \ + ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len)); +#endif /* ADVANSYS_DEBUG */ + +#ifdef ADVANSYS_STATS + +/* Per board statistics structure */ +struct asc_stats { + /* Driver Entrypoint Statistics */ + unsigned int queuecommand; /* # calls to advansys_queuecommand() */ + unsigned int reset; /* # calls to advansys_eh_bus_reset() */ + unsigned int biosparam; /* # calls to advansys_biosparam() */ + unsigned int interrupt; /* # advansys_interrupt() calls */ + unsigned int callback; /* # calls to asc/adv_isr_callback() */ + unsigned int done; /* # calls to request's scsi_done function */ + unsigned int build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ + unsigned int adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ + unsigned int adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ + /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */ + unsigned int exe_noerror; /* # ASC_NOERROR returns. */ + unsigned int exe_busy; /* # ASC_BUSY returns. */ + unsigned int exe_error; /* # ASC_ERROR returns. */ + unsigned int exe_unknown; /* # unknown returns. */ + /* Data Transfer Statistics */ + unsigned int xfer_cnt; /* # I/O requests received */ + unsigned int xfer_elem; /* # scatter-gather elements */ + unsigned int xfer_sect; /* # 512-byte blocks */ +}; +#endif /* ADVANSYS_STATS */ + +/* + * Structure allocated for each board. + * + * This structure is allocated by scsi_host_alloc() at the end + * of the 'Scsi_Host' structure starting at the 'hostdata' + * field. It is guaranteed to be allocated from DMA-able memory. + */ +struct asc_board { + struct device *dev; + struct Scsi_Host *shost; + uint flags; /* Board flags */ + unsigned int irq; + union { + ASC_DVC_VAR asc_dvc_var; /* Narrow board */ + ADV_DVC_VAR adv_dvc_var; /* Wide board */ + } dvc_var; + union { + ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */ + ADV_DVC_CFG adv_dvc_cfg; /* Wide board */ + } dvc_cfg; + ushort asc_n_io_port; /* Number I/O ports. */ + ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */ + ushort reqcnt[ADV_MAX_TID + 1]; /* Starvation request count */ + ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */ + ushort queue_full_cnt[ADV_MAX_TID + 1]; /* Queue full count */ + union { + ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */ + ADVEEP_3550_CONFIG adv_3550_eep; /* 3550 EEPROM config. */ + ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */ + ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */ + } eep_config; + /* /proc/scsi/advansys/[0...] */ +#ifdef ADVANSYS_STATS + struct asc_stats asc_stats; /* Board statistics */ +#endif /* ADVANSYS_STATS */ + /* + * The following fields are used only for Narrow Boards. + */ + uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */ + /* + * The following fields are used only for Wide Boards. + */ + void __iomem *ioremap_addr; /* I/O Memory remap address. */ + ushort ioport; /* I/O Port address. */ + adv_req_t *adv_reqp; /* Request structures. */ + dma_addr_t adv_reqp_addr; + size_t adv_reqp_size; + struct dma_pool *adv_sgblk_pool; /* Scatter-gather structures. */ + ushort bios_signature; /* BIOS Signature. */ + ushort bios_version; /* BIOS Version. */ + ushort bios_codeseg; /* BIOS Code Segment. */ + ushort bios_codelen; /* BIOS Code Segment Length. */ +}; + +#define asc_dvc_to_board(asc_dvc) container_of(asc_dvc, struct asc_board, \ + dvc_var.asc_dvc_var) +#define adv_dvc_to_board(adv_dvc) container_of(adv_dvc, struct asc_board, \ + dvc_var.adv_dvc_var) +#define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev) + +struct advansys_cmd { + dma_addr_t dma_handle; +}; + +static struct advansys_cmd *advansys_cmd(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +#ifdef ADVANSYS_DEBUG +static int asc_dbglvl = 3; + +/* + * asc_prt_asc_dvc_var() + */ +static void asc_prt_asc_dvc_var(ASC_DVC_VAR *h) +{ + printk("ASC_DVC_VAR at addr 0x%lx\n", (ulong)h); + + printk(" iop_base 0x%x, err_code 0x%x, dvc_cntl 0x%x, bug_fix_cntl " + "%d,\n", h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl); + + printk(" bus_type %d, init_sdtr 0x%x,\n", h->bus_type, + (unsigned)h->init_sdtr); + + printk(" sdtr_done 0x%x, use_tagged_qng 0x%x, unit_not_ready 0x%x, " + "chip_no 0x%x,\n", (unsigned)h->sdtr_done, + (unsigned)h->use_tagged_qng, (unsigned)h->unit_not_ready, + (unsigned)h->chip_no); + + printk(" queue_full_or_busy 0x%x, start_motor 0x%x, scsi_reset_wait " + "%u,\n", (unsigned)h->queue_full_or_busy, + (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); + + printk(" is_in_int %u, max_total_qng %u, cur_total_qng %u, " + "in_critical_cnt %u,\n", (unsigned)h->is_in_int, + (unsigned)h->max_total_qng, (unsigned)h->cur_total_qng, + (unsigned)h->in_critical_cnt); + + printk(" last_q_shortage %u, init_state 0x%x, no_scam 0x%x, " + "pci_fix_asyn_xfer 0x%x,\n", (unsigned)h->last_q_shortage, + (unsigned)h->init_state, (unsigned)h->no_scam, + (unsigned)h->pci_fix_asyn_xfer); + + printk(" cfg 0x%lx\n", (ulong)h->cfg); +} + +/* + * asc_prt_asc_dvc_cfg() + */ +static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h) +{ + printk("ASC_DVC_CFG at addr 0x%lx\n", (ulong)h); + + printk(" can_tagged_qng 0x%x, cmd_qng_enabled 0x%x,\n", + h->can_tagged_qng, h->cmd_qng_enabled); + printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n", + h->disc_enable, h->sdtr_enable); + + printk(" chip_scsi_id %d, chip_version %d,\n", + h->chip_scsi_id, h->chip_version); + + printk(" mcode_date 0x%x, mcode_version %d\n", + h->mcode_date, h->mcode_version); +} + +/* + * asc_prt_adv_dvc_var() + * + * Display an ADV_DVC_VAR structure. + */ +static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h) +{ + printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong)h); + + printk(" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n", + (ulong)h->iop_base, h->err_code, (unsigned)h->ultra_able); + + printk(" sdtr_able 0x%x, wdtr_able 0x%x\n", + (unsigned)h->sdtr_able, (unsigned)h->wdtr_able); + + printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n", + (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); + + printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%p\n", + (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, + h->carr_freelist); + + printk(" icq_sp 0x%p, irq_sp 0x%p\n", h->icq_sp, h->irq_sp); + + printk(" no_scam 0x%x, tagqng_able 0x%x\n", + (unsigned)h->no_scam, (unsigned)h->tagqng_able); + + printk(" chip_scsi_id 0x%x, cfg 0x%lx\n", + (unsigned)h->chip_scsi_id, (ulong)h->cfg); +} + +/* + * asc_prt_adv_dvc_cfg() + * + * Display an ADV_DVC_CFG structure. + */ +static void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h) +{ + printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong)h); + + printk(" disc_enable 0x%x, termination 0x%x\n", + h->disc_enable, h->termination); + + printk(" chip_version 0x%x, mcode_date 0x%x\n", + h->chip_version, h->mcode_date); + + printk(" mcode_version 0x%x, control_flag 0x%x\n", + h->mcode_version, h->control_flag); +} + +/* + * asc_prt_scsi_host() + */ +static void asc_prt_scsi_host(struct Scsi_Host *s) +{ + struct asc_board *boardp = shost_priv(s); + + printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); + printk(" host_busy %d, host_no %d,\n", + scsi_host_busy(s), s->host_no); + + printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", + (ulong)s->base, (ulong)s->io_port, boardp->irq); + + printk(" dma_channel %d, this_id %d, can_queue %d,\n", + s->dma_channel, s->this_id, s->can_queue); + + printk(" cmd_per_lun %d, sg_tablesize %d\n", + s->cmd_per_lun, s->sg_tablesize); + + if (ASC_NARROW_BOARD(boardp)) { + asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var); + asc_prt_asc_dvc_cfg(&boardp->dvc_cfg.asc_dvc_cfg); + } else { + asc_prt_adv_dvc_var(&boardp->dvc_var.adv_dvc_var); + asc_prt_adv_dvc_cfg(&boardp->dvc_cfg.adv_dvc_cfg); + } +} + +/* + * asc_prt_hex() + * + * Print hexadecimal output in 4 byte groupings 32 bytes + * or 8 double-words per line. + */ +static void asc_prt_hex(char *f, uchar *s, int l) +{ + int i; + int j; + int k; + int m; + + printk("%s: (%d bytes)\n", f, l); + + for (i = 0; i < l; i += 32) { + + /* Display a maximum of 8 double-words per line. */ + if ((k = (l - i) / 4) >= 8) { + k = 8; + m = 0; + } else { + m = (l - i) % 4; + } + + for (j = 0; j < k; j++) { + printk(" %2.2X%2.2X%2.2X%2.2X", + (unsigned)s[i + (j * 4)], + (unsigned)s[i + (j * 4) + 1], + (unsigned)s[i + (j * 4) + 2], + (unsigned)s[i + (j * 4) + 3]); + } + + switch (m) { + case 0: + default: + break; + case 1: + printk(" %2.2X", (unsigned)s[i + (j * 4)]); + break; + case 2: + printk(" %2.2X%2.2X", + (unsigned)s[i + (j * 4)], + (unsigned)s[i + (j * 4) + 1]); + break; + case 3: + printk(" %2.2X%2.2X%2.2X", + (unsigned)s[i + (j * 4) + 1], + (unsigned)s[i + (j * 4) + 2], + (unsigned)s[i + (j * 4) + 3]); + break; + } + + printk("\n"); + } +} + +/* + * asc_prt_asc_scsi_q() + */ +static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) +{ + ASC_SG_HEAD *sgp; + int i; + + printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); + + printk + (" target_ix 0x%x, target_lun %u, srb_tag 0x%x, tag_code 0x%x,\n", + q->q2.target_ix, q->q1.target_lun, q->q2.srb_tag, + q->q2.tag_code); + + printk + (" data_addr 0x%lx, data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", + (ulong)le32_to_cpu(q->q1.data_addr), + (ulong)le32_to_cpu(q->q1.data_cnt), + (ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len); + + printk(" cdbptr 0x%lx, cdb_len %u, sg_head 0x%lx, sg_queue_cnt %u\n", + (ulong)q->cdbptr, q->q2.cdb_len, + (ulong)q->sg_head, q->q1.sg_queue_cnt); + + if (q->sg_head) { + sgp = q->sg_head; + printk("ASC_SG_HEAD at addr 0x%lx\n", (ulong)sgp); + printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, + sgp->queue_cnt); + for (i = 0; i < sgp->entry_cnt; i++) { + printk(" [%u]: addr 0x%lx, bytes %lu\n", + i, (ulong)le32_to_cpu(sgp->sg_list[i].addr), + (ulong)le32_to_cpu(sgp->sg_list[i].bytes)); + } + + } +} + +/* + * asc_prt_asc_qdone_info() + */ +static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) +{ + printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); + printk(" srb_tag 0x%x, target_ix %u, cdb_len %u, tag_code %u,\n", + q->d2.srb_tag, q->d2.target_ix, q->d2.cdb_len, + q->d2.tag_code); + printk + (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n", + q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg); +} + +/* + * asc_prt_adv_sgblock() + * + * Display an ADV_SG_BLOCK structure. + */ +static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b) +{ + int i; + + printk(" ADV_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", + (ulong)b, sgblockno); + printk(" sg_cnt %u, sg_ptr 0x%x\n", + b->sg_cnt, (u32)le32_to_cpu(b->sg_ptr)); + BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK); + if (b->sg_ptr != 0) + BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK); + for (i = 0; i < b->sg_cnt; i++) { + printk(" [%u]: sg_addr 0x%x, sg_count 0x%x\n", + i, (u32)le32_to_cpu(b->sg_list[i].sg_addr), + (u32)le32_to_cpu(b->sg_list[i].sg_count)); + } +} + +/* + * asc_prt_adv_scsi_req_q() + * + * Display an ADV_SCSI_REQ_Q structure. + */ +static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) +{ + int sg_blk_cnt; + struct adv_sg_block *sg_ptr; + adv_sgblk_t *sgblkp; + + printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); + + printk(" target_id %u, target_lun %u, srb_tag 0x%x\n", + q->target_id, q->target_lun, q->srb_tag); + + printk(" cntl 0x%x, data_addr 0x%lx\n", + q->cntl, (ulong)le32_to_cpu(q->data_addr)); + + printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", + (ulong)le32_to_cpu(q->data_cnt), + (ulong)le32_to_cpu(q->sense_addr), q->sense_len); + + printk + (" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n", + q->cdb_len, q->done_status, q->host_status, q->scsi_status); + + printk(" sg_working_ix 0x%x, target_cmd %u\n", + q->sg_working_ix, q->target_cmd); + + printk(" scsiq_rptr 0x%lx, sg_real_addr 0x%lx, sg_list_ptr 0x%lx\n", + (ulong)le32_to_cpu(q->scsiq_rptr), + (ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr); + + /* Display the request's ADV_SG_BLOCK structures. */ + if (q->sg_list_ptr != NULL) { + sgblkp = container_of(q->sg_list_ptr, adv_sgblk_t, sg_block); + sg_blk_cnt = 0; + while (sgblkp) { + sg_ptr = &sgblkp->sg_block; + asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr); + if (sg_ptr->sg_ptr == 0) { + break; + } + sgblkp = sgblkp->next_sgblkp; + sg_blk_cnt++; + } + } +} +#endif /* ADVANSYS_DEBUG */ + +/* + * advansys_info() + * + * Return suitable for printing on the console with the argument + * adapter's configuration information. + * + * Note: The information line should not exceed ASC_INFO_SIZE bytes, + * otherwise the static 'info' array will be overrun. + */ +static const char *advansys_info(struct Scsi_Host *shost) +{ + static char info[ASC_INFO_SIZE]; + struct asc_board *boardp = shost_priv(shost); + ASC_DVC_VAR *asc_dvc_varp; + ADV_DVC_VAR *adv_dvc_varp; + char *busname; + char *widename = NULL; + + if (ASC_NARROW_BOARD(boardp)) { + asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; + ASC_DBG(1, "begin\n"); + + if (asc_dvc_varp->bus_type & ASC_IS_VL) { + busname = "VL"; + } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) { + busname = "EISA"; + } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) { + if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA) + == ASC_IS_PCI_ULTRA) { + busname = "PCI Ultra"; + } else { + busname = "PCI"; + } + } else { + busname = "?"; + shost_printk(KERN_ERR, shost, "unknown bus " + "type %d\n", asc_dvc_varp->bus_type); + } + sprintf(info, + "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X", + ASC_VERSION, busname, (ulong)shost->io_port, + (ulong)shost->io_port + ASC_IOADR_GAP - 1, + boardp->irq); + } else { + /* + * Wide Adapter Information + * + * Memory-mapped I/O is used instead of I/O space to access + * the adapter, but display the I/O Port range. The Memory + * I/O address is displayed through the driver /proc file. + */ + adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + widename = "Ultra-Wide"; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + widename = "Ultra2-Wide"; + } else { + widename = "Ultra3-Wide"; + } + sprintf(info, + "AdvanSys SCSI %s: PCI %s: PCIMEM 0x%lX-0x%lX, IRQ 0x%X", + ASC_VERSION, widename, (ulong)adv_dvc_varp->iop_base, + (ulong)adv_dvc_varp->iop_base + boardp->asc_n_io_port - 1, boardp->irq); + } + BUG_ON(strlen(info) >= ASC_INFO_SIZE); + ASC_DBG(1, "end\n"); + return info; +} + +#ifdef CONFIG_PROC_FS + +/* + * asc_prt_board_devices() + * + * Print driver information for devices attached to the board. + */ +static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + int chip_scsi_id; + int i; + + seq_printf(m, + "\nDevice Information for AdvanSys SCSI Host %d:\n", + shost->host_no); + + if (ASC_NARROW_BOARD(boardp)) { + chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; + } else { + chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; + } + + seq_puts(m, "Target IDs Detected:"); + for (i = 0; i <= ADV_MAX_TID; i++) { + if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) + seq_printf(m, " %X,", i); + } + seq_printf(m, " (%X=Host Adapter)\n", chip_scsi_id); +} + +/* + * Display Wide Board BIOS Information. + */ +static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + ushort major, minor, letter; + + seq_puts(m, "\nROM BIOS Version: "); + + /* + * If the BIOS saved a valid signature, then fill in + * the BIOS code segment base address. + */ + if (boardp->bios_signature != 0x55AA) { + seq_puts(m, "Disabled or Pre-3.1\n" + "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n" + "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); + } else { + major = (boardp->bios_version >> 12) & 0xF; + minor = (boardp->bios_version >> 8) & 0xF; + letter = (boardp->bios_version & 0xFF); + + seq_printf(m, "%d.%d%c\n", + major, minor, + letter >= 26 ? '?' : letter + 'A'); + /* + * Current available ROM BIOS release is 3.1I for UW + * and 3.2I for U2W. This code doesn't differentiate + * UW and U2W boards. + */ + if (major < 3 || (major <= 3 && minor < 1) || + (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { + seq_puts(m, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n" + "ftp://ftp.connectcom.net/pub\n"); + } + } +} + +/* + * Add serial number to information bar if signature AAh + * is found in at bit 15-9 (7 bits) of word 1. + * + * Serial Number consists fo 12 alpha-numeric digits. + * + * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits) + * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits) + * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits) + * 5 - Product revision (A-J) Word0: " " + * + * Signature Word1: 15-9 (7 bits) + * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit) + * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits) + * + * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits) + * + * Note 1: Only production cards will have a serial number. + * + * Note 2: Signature is most significant 7 bits (0xFE). + * + * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE. + */ +static int asc_get_eeprom_string(ushort *serialnum, uchar *cp) +{ + ushort w, num; + + if ((serialnum[1] & 0xFE00) != ((ushort)0xAA << 8)) { + return ASC_FALSE; + } else { + /* + * First word - 6 digits. + */ + w = serialnum[0]; + + /* Product type - 1st digit. */ + if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') { + /* Product type is P=Prototype */ + *cp += 0x8; + } + cp++; + + /* Manufacturing location - 2nd digit. */ + *cp++ = 'A' + ((w & 0x1C00) >> 10); + + /* Product ID - 3rd, 4th digits. */ + num = w & 0x3FF; + *cp++ = '0' + (num / 100); + num %= 100; + *cp++ = '0' + (num / 10); + + /* Product revision - 5th digit. */ + *cp++ = 'A' + (num % 10); + + /* + * Second word + */ + w = serialnum[1]; + + /* + * Year - 6th digit. + * + * If bit 15 of third word is set, then the + * last digit of the year is greater than 7. + */ + if (serialnum[2] & 0x8000) { + *cp++ = '8' + ((w & 0x1C0) >> 6); + } else { + *cp++ = '0' + ((w & 0x1C0) >> 6); + } + + /* Week of year - 7th, 8th digits. */ + num = w & 0x003F; + *cp++ = '0' + num / 10; + num %= 10; + *cp++ = '0' + num; + + /* + * Third word + */ + w = serialnum[2] & 0x7FFF; + + /* Serial number - 9th digit. */ + *cp++ = 'A' + (w / 1000); + + /* 10th, 11th, 12th digits. */ + num = w % 1000; + *cp++ = '0' + num / 100; + num %= 100; + *cp++ = '0' + num / 10; + num %= 10; + *cp++ = '0' + num; + + *cp = '\0'; /* Null Terminate the string. */ + return ASC_TRUE; + } +} + +/* + * asc_prt_asc_board_eeprom() + * + * Print board EEPROM configuration. + */ +static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + ASCEEP_CONFIG *ep; + int i; + uchar serialstr[13]; + + ep = &boardp->eep_config.asc_eep; + + seq_printf(m, + "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", + shost->host_no); + + if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr) + == ASC_TRUE) + seq_printf(m, " Serial Number: %s\n", serialstr); + else if (ep->adapter_info[5] == 0xBB) + seq_puts(m, + " Default Settings Used for EEPROM-less Adapter.\n"); + else + seq_puts(m, " Serial Number Signature Not Present.\n"); + + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng, + ep->max_tag_qng); + + seq_printf(m, + " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); + + seq_puts(m, " Target ID: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %d", i); + + seq_puts(m, "\n Disconnects: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + + seq_puts(m, "\n Command Queuing: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + + seq_puts(m, "\n Start Motor: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + + seq_puts(m, "\n Synchronous Transfer:"); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_putc(m, '\n'); +} + +/* + * asc_prt_adv_board_eeprom() + * + * Print board EEPROM configuration. + */ +static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + ADV_DVC_VAR *adv_dvc_varp; + int i; + char *termstr; + uchar serialstr[13]; + ADVEEP_3550_CONFIG *ep_3550 = NULL; + ADVEEP_38C0800_CONFIG *ep_38C0800 = NULL; + ADVEEP_38C1600_CONFIG *ep_38C1600 = NULL; + ushort word; + ushort *wordp; + ushort sdtr_speed = 0; + + adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + ep_3550 = &boardp->eep_config.adv_3550_eep; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; + } else { + ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; + } + + seq_printf(m, + "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", + shost->host_no); + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + wordp = &ep_3550->serial_number_word1; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + wordp = &ep_38C0800->serial_number_word1; + } else { + wordp = &ep_38C1600->serial_number_word1; + } + + if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) + seq_printf(m, " Serial Number: %s\n", serialstr); + else + seq_puts(m, " Serial Number Signature Not Present.\n"); + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ep_3550->adapter_scsi_id, + ep_3550->max_host_qng, ep_3550->max_dvc_qng); + else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ep_38C0800->adapter_scsi_id, + ep_38C0800->max_host_qng, + ep_38C0800->max_dvc_qng); + else + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ep_38C1600->adapter_scsi_id, + ep_38C1600->max_host_qng, + ep_38C1600->max_dvc_qng); + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + word = ep_3550->termination; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + word = ep_38C0800->termination_lvd; + } else { + word = ep_38C1600->termination_lvd; + } + switch (word) { + case 1: + termstr = "Low Off/High Off"; + break; + case 2: + termstr = "Low Off/High On"; + break; + case 3: + termstr = "Low On/High On"; + break; + default: + case 0: + termstr = "Automatic"; + break; + } + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) + seq_printf(m, + " termination: %u (%s), bios_ctrl: 0x%x\n", + ep_3550->termination, termstr, + ep_3550->bios_ctrl); + else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) + seq_printf(m, + " termination: %u (%s), bios_ctrl: 0x%x\n", + ep_38C0800->termination_lvd, termstr, + ep_38C0800->bios_ctrl); + else + seq_printf(m, + " termination: %u (%s), bios_ctrl: 0x%x\n", + ep_38C1600->termination_lvd, termstr, + ep_38C1600->bios_ctrl); + + seq_puts(m, " Target ID: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %X", i); + seq_putc(m, '\n'); + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + word = ep_3550->disc_enable; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + word = ep_38C0800->disc_enable; + } else { + word = ep_38C1600->disc_enable; + } + seq_puts(m, " Disconnects: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_putc(m, '\n'); + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + word = ep_3550->tagqng_able; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + word = ep_38C0800->tagqng_able; + } else { + word = ep_38C1600->tagqng_able; + } + seq_puts(m, " Command Queuing: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_putc(m, '\n'); + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + word = ep_3550->start_motor; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + word = ep_38C0800->start_motor; + } else { + word = ep_38C1600->start_motor; + } + seq_puts(m, " Start Motor: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_putc(m, '\n'); + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + seq_puts(m, " Synchronous Transfer:"); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? + 'Y' : 'N'); + seq_putc(m, '\n'); + } + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + seq_puts(m, " Ultra Transfer: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i)) + ? 'Y' : 'N'); + seq_putc(m, '\n'); + } + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + word = ep_3550->wdtr_able; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + word = ep_38C0800->wdtr_able; + } else { + word = ep_38C1600->wdtr_able; + } + seq_puts(m, " Wide Transfer: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_putc(m, '\n'); + + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || + adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { + seq_puts(m, " Synchronous Transfer Speed (Mhz):\n "); + for (i = 0; i <= ADV_MAX_TID; i++) { + char *speed_str; + + if (i == 0) { + sdtr_speed = adv_dvc_varp->sdtr_speed1; + } else if (i == 4) { + sdtr_speed = adv_dvc_varp->sdtr_speed2; + } else if (i == 8) { + sdtr_speed = adv_dvc_varp->sdtr_speed3; + } else if (i == 12) { + sdtr_speed = adv_dvc_varp->sdtr_speed4; + } + switch (sdtr_speed & ADV_MAX_TID) { + case 0: + speed_str = "Off"; + break; + case 1: + speed_str = " 5"; + break; + case 2: + speed_str = " 10"; + break; + case 3: + speed_str = " 20"; + break; + case 4: + speed_str = " 40"; + break; + case 5: + speed_str = " 80"; + break; + default: + speed_str = "Unk"; + break; + } + seq_printf(m, "%X:%s ", i, speed_str); + if (i == 7) + seq_puts(m, "\n "); + sdtr_speed >>= 4; + } + seq_putc(m, '\n'); + } +} + +/* + * asc_prt_driver_conf() + */ +static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + + seq_printf(m, + "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n", + shost->host_no); + + seq_printf(m, + " host_busy %d, max_id %u, max_lun %llu, max_channel %u\n", + scsi_host_busy(shost), shost->max_id, + shost->max_lun, shost->max_channel); + + seq_printf(m, + " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n", + shost->unique_id, shost->can_queue, shost->this_id, + shost->sg_tablesize, shost->cmd_per_lun); + + seq_printf(m, + " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n", + boardp->flags, shost->last_reset, jiffies, + boardp->asc_n_io_port); + + seq_printf(m, " io_port 0x%lx\n", shost->io_port); +} + +/* + * asc_prt_asc_board_info() + * + * Print dynamic board configuration information. + */ +static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + int chip_scsi_id; + ASC_DVC_VAR *v; + ASC_DVC_CFG *c; + int i; + int renegotiate = 0; + + v = &boardp->dvc_var.asc_dvc_var; + c = &boardp->dvc_cfg.asc_dvc_cfg; + chip_scsi_id = c->chip_scsi_id; + + seq_printf(m, + "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", + shost->host_no); + + seq_printf(m, " chip_version %u, mcode_date 0x%x, " + "mcode_version 0x%x, err_code %u\n", + c->chip_version, c->mcode_date, c->mcode_version, + v->err_code); + + /* Current number of commands waiting for the host. */ + seq_printf(m, + " Total Command Pending: %d\n", v->cur_total_qng); + + seq_puts(m, " Command Queuing:"); + for (i = 0; i <= ASC_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + seq_printf(m, " %X:%c", + i, + (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + } + + /* Current number of commands waiting for a device. */ + seq_puts(m, "\n Command Queue Pending:"); + for (i = 0; i <= ASC_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]); + } + + /* Current limit on number of commands that can be sent to a device. */ + seq_puts(m, "\n Command Queue Limit:"); + for (i = 0; i <= ASC_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]); + } + + /* Indicate whether the device has returned queue full status. */ + seq_puts(m, "\n Command Queue Full:"); + for (i = 0; i <= ASC_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) + seq_printf(m, " %X:Y-%d", + i, boardp->queue_full_cnt[i]); + else + seq_printf(m, " %X:N", i); + } + + seq_puts(m, "\n Synchronous Transfer:"); + for (i = 0; i <= ASC_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + seq_printf(m, " %X:%c", + i, + (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + } + seq_putc(m, '\n'); + + for (i = 0; i <= ASC_MAX_TID; i++) { + uchar syn_period_ix; + + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || + ((v->init_sdtr & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + seq_printf(m, " %X:", i); + + if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { + seq_puts(m, " Asynchronous"); + } else { + syn_period_ix = + (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - + 1); + + seq_printf(m, + " Transfer Period Factor: %d (%d.%d Mhz),", + v->sdtr_period_tbl[syn_period_ix], + 250 / v->sdtr_period_tbl[syn_period_ix], + ASC_TENTHS(250, + v->sdtr_period_tbl[syn_period_ix])); + + seq_printf(m, " REQ/ACK Offset: %d", + boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET); + } + + if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { + seq_puts(m, "*\n"); + renegotiate = 1; + } else { + seq_putc(m, '\n'); + } + } + + if (renegotiate) { + seq_puts(m, " * = Re-negotiation pending before next command.\n"); + } +} + +/* + * asc_prt_adv_board_info() + * + * Print dynamic board configuration information. + */ +static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + int i; + ADV_DVC_VAR *v; + ADV_DVC_CFG *c; + AdvPortAddr iop_base; + ushort chip_scsi_id; + ushort lramword; + uchar lrambyte; + ushort tagqng_able; + ushort sdtr_able, wdtr_able; + ushort wdtr_done, sdtr_done; + ushort period = 0; + int renegotiate = 0; + + v = &boardp->dvc_var.adv_dvc_var; + c = &boardp->dvc_cfg.adv_dvc_cfg; + iop_base = v->iop_base; + chip_scsi_id = v->chip_scsi_id; + + seq_printf(m, + "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", + shost->host_no); + + seq_printf(m, + " iop_base 0x%p, cable_detect: %X, err_code %u\n", + v->iop_base, + AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT, + v->err_code); + + seq_printf(m, " chip_version %u, mcode_date 0x%x, " + "mcode_version 0x%x\n", c->chip_version, + c->mcode_date, c->mcode_version); + + AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); + seq_puts(m, " Queuing Enabled:"); + for (i = 0; i <= ADV_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + seq_printf(m, " %X:%c", + i, + (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + } + + seq_puts(m, "\n Queue Limit:"); + for (i = 0; i <= ADV_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, + lrambyte); + + seq_printf(m, " %X:%d", i, lrambyte); + } + + seq_puts(m, "\n Command Pending:"); + for (i = 0; i <= ADV_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, + lrambyte); + + seq_printf(m, " %X:%d", i, lrambyte); + } + seq_putc(m, '\n'); + + AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + seq_puts(m, " Wide Enabled:"); + for (i = 0; i <= ADV_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + seq_printf(m, " %X:%c", + i, + (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + } + seq_putc(m, '\n'); + + AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); + seq_puts(m, " Transfer Bit Width:"); + for (i = 0; i <= ADV_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + AdvReadWordLram(iop_base, + ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), + lramword); + + seq_printf(m, " %X:%d", + i, (lramword & 0x8000) ? 16 : 8); + + if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && + (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { + seq_putc(m, '*'); + renegotiate = 1; + } + } + seq_putc(m, '\n'); + + AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + seq_puts(m, " Synchronous Enabled:"); + for (i = 0; i <= ADV_MAX_TID; i++) { + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + seq_printf(m, " %X:%c", + i, + (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + } + seq_putc(m, '\n'); + + AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); + for (i = 0; i <= ADV_MAX_TID; i++) { + + AdvReadWordLram(iop_base, + ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), + lramword); + lramword &= ~0x8000; + + if ((chip_scsi_id == i) || + ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || + ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0)) { + continue; + } + + seq_printf(m, " %X:", i); + + if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ + seq_puts(m, " Asynchronous"); + } else { + seq_puts(m, " Transfer Period Factor: "); + + if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ + seq_puts(m, "9 (80.0 Mhz),"); + } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ + seq_puts(m, "10 (40.0 Mhz),"); + } else { /* 20 Mhz or below. */ + + period = (((lramword >> 8) * 25) + 50) / 4; + + if (period == 0) { /* Should never happen. */ + seq_printf(m, "%d (? Mhz), ", period); + } else { + seq_printf(m, + "%d (%d.%d Mhz),", + period, 250 / period, + ASC_TENTHS(250, period)); + } + } + + seq_printf(m, " REQ/ACK Offset: %d", + lramword & 0x1F); + } + + if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { + seq_puts(m, "*\n"); + renegotiate = 1; + } else { + seq_putc(m, '\n'); + } + } + + if (renegotiate) { + seq_puts(m, " * = Re-negotiation pending before next command.\n"); + } +} + +#ifdef ADVANSYS_STATS +/* + * asc_prt_board_stats() + */ +static void asc_prt_board_stats(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + struct asc_stats *s = &boardp->asc_stats; + + seq_printf(m, + "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", + shost->host_no); + + seq_printf(m, + " queuecommand %u, reset %u, biosparam %u, interrupt %u\n", + s->queuecommand, s->reset, s->biosparam, + s->interrupt); + + seq_printf(m, + " callback %u, done %u, build_error %u, build_noreq %u, build_nosg %u\n", + s->callback, s->done, s->build_error, + s->adv_build_noreq, s->adv_build_nosg); + + seq_printf(m, + " exe_noerror %u, exe_busy %u, exe_error %u, exe_unknown %u\n", + s->exe_noerror, s->exe_busy, s->exe_error, + s->exe_unknown); + + /* + * Display data transfer statistics. + */ + if (s->xfer_cnt > 0) { + seq_printf(m, " xfer_cnt %u, xfer_elem %u, ", + s->xfer_cnt, s->xfer_elem); + + seq_printf(m, "xfer_bytes %u.%01u kb\n", + s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2)); + + /* Scatter gather transfer statistics */ + seq_printf(m, " avg_num_elem %u.%01u, ", + s->xfer_elem / s->xfer_cnt, + ASC_TENTHS(s->xfer_elem, s->xfer_cnt)); + + seq_printf(m, "avg_elem_size %u.%01u kb, ", + (s->xfer_sect / 2) / s->xfer_elem, + ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem)); + + seq_printf(m, "avg_xfer_size %u.%01u kb\n", + (s->xfer_sect / 2) / s->xfer_cnt, + ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt)); + } +} +#endif /* ADVANSYS_STATS */ + +/* + * advansys_show_info() - /proc/scsi/advansys/{0,1,2,3,...} + * + * m: seq_file to print into + * shost: Scsi_Host + * + * Return the number of bytes read from or written to a + * /proc/scsi/advansys/[0...] file. + */ +static int +advansys_show_info(struct seq_file *m, struct Scsi_Host *shost) +{ + struct asc_board *boardp = shost_priv(shost); + + ASC_DBG(1, "begin\n"); + + /* + * User read of /proc/scsi/advansys/[0...] file. + */ + + /* + * Get board configuration information. + * + * advansys_info() returns the board string from its own static buffer. + */ + /* Copy board information. */ + seq_printf(m, "%s\n", (char *)advansys_info(shost)); + /* + * Display Wide Board BIOS Information. + */ + if (!ASC_NARROW_BOARD(boardp)) + asc_prt_adv_bios(m, shost); + + /* + * Display driver information for each device attached to the board. + */ + asc_prt_board_devices(m, shost); + + /* + * Display EEPROM configuration for the board. + */ + if (ASC_NARROW_BOARD(boardp)) + asc_prt_asc_board_eeprom(m, shost); + else + asc_prt_adv_board_eeprom(m, shost); + + /* + * Display driver configuration and information for the board. + */ + asc_prt_driver_conf(m, shost); + +#ifdef ADVANSYS_STATS + /* + * Display driver statistics for the board. + */ + asc_prt_board_stats(m, shost); +#endif /* ADVANSYS_STATS */ + + /* + * Display Asc Library dynamic configuration information + * for the board. + */ + if (ASC_NARROW_BOARD(boardp)) + asc_prt_asc_board_info(m, shost); + else + asc_prt_adv_board_info(m, shost); + return 0; +} +#endif /* CONFIG_PROC_FS */ + +static void asc_scsi_done(struct scsi_cmnd *scp) +{ + scsi_dma_unmap(scp); + ASC_STATS(scp->device->host, done); + scsi_done(scp); +} + +static void AscSetBank(PortAddr iop_base, uchar bank) +{ + uchar val; + + val = AscGetChipControl(iop_base) & + (~ + (CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | + CC_CHIP_RESET)); + if (bank == 1) { + val |= CC_BANK_ONE; + } else if (bank == 2) { + val |= CC_DIAG | CC_BANK_ONE; + } else { + val &= ~CC_BANK_ONE; + } + AscSetChipControl(iop_base, val); +} + +static void AscSetChipIH(PortAddr iop_base, ushort ins_code) +{ + AscSetBank(iop_base, 1); + AscWriteChipIH(iop_base, ins_code); + AscSetBank(iop_base, 0); +} + +static int AscStartChip(PortAddr iop_base) +{ + AscSetChipControl(iop_base, 0); + if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { + return (0); + } + return (1); +} + +static bool AscStopChip(PortAddr iop_base) +{ + uchar cc_val; + + cc_val = + AscGetChipControl(iop_base) & + (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG)); + AscSetChipControl(iop_base, (uchar)(cc_val | CC_HALT)); + AscSetChipIH(iop_base, INS_HALT); + AscSetChipIH(iop_base, INS_RFLAG_WTM); + if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) { + return false; + } + return true; +} + +static bool AscIsChipHalted(PortAddr iop_base) +{ + if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { + if ((AscGetChipControl(iop_base) & CC_HALT) != 0) { + return true; + } + } + return false; +} + +static bool AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) +{ + PortAddr iop_base; + int i = 10; + + iop_base = asc_dvc->iop_base; + while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) + && (i-- > 0)) { + mdelay(100); + } + AscStopChip(iop_base); + AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT); + udelay(60); + AscSetChipIH(iop_base, INS_RFLAG_WTM); + AscSetChipIH(iop_base, INS_HALT); + AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT); + AscSetChipControl(iop_base, CC_HALT); + mdelay(200); + AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); + AscSetChipStatus(iop_base, 0); + return (AscIsChipHalted(iop_base)); +} + +static int AscFindSignature(PortAddr iop_base) +{ + ushort sig_word; + + ASC_DBG(1, "AscGetChipSignatureByte(0x%x) 0x%x\n", + iop_base, AscGetChipSignatureByte(iop_base)); + if (AscGetChipSignatureByte(iop_base) == (uchar)ASC_1000_ID1B) { + ASC_DBG(1, "AscGetChipSignatureWord(0x%x) 0x%x\n", + iop_base, AscGetChipSignatureWord(iop_base)); + sig_word = AscGetChipSignatureWord(iop_base); + if ((sig_word == (ushort)ASC_1000_ID0W) || + (sig_word == (ushort)ASC_1000_ID0W_FIX)) { + return (1); + } + } + return (0); +} + +static void AscEnableInterrupt(PortAddr iop_base) +{ + ushort cfg; + + cfg = AscGetChipCfgLsw(iop_base); + AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON); +} + +static void AscDisableInterrupt(PortAddr iop_base) +{ + ushort cfg; + + cfg = AscGetChipCfgLsw(iop_base); + AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON)); +} + +static uchar AscReadLramByte(PortAddr iop_base, ushort addr) +{ + unsigned char byte_data; + unsigned short word_data; + + if (isodd_word(addr)) { + AscSetChipLramAddr(iop_base, addr - 1); + word_data = AscGetChipLramData(iop_base); + byte_data = (word_data >> 8) & 0xFF; + } else { + AscSetChipLramAddr(iop_base, addr); + word_data = AscGetChipLramData(iop_base); + byte_data = word_data & 0xFF; + } + return byte_data; +} + +static ushort AscReadLramWord(PortAddr iop_base, ushort addr) +{ + ushort word_data; + + AscSetChipLramAddr(iop_base, addr); + word_data = AscGetChipLramData(iop_base); + return (word_data); +} + +static void +AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words) +{ + int i; + + AscSetChipLramAddr(iop_base, s_addr); + for (i = 0; i < words; i++) { + AscSetChipLramData(iop_base, set_wval); + } +} + +static void AscWriteLramWord(PortAddr iop_base, ushort addr, ushort word_val) +{ + AscSetChipLramAddr(iop_base, addr); + AscSetChipLramData(iop_base, word_val); +} + +static void AscWriteLramByte(PortAddr iop_base, ushort addr, uchar byte_val) +{ + ushort word_data; + + if (isodd_word(addr)) { + addr--; + word_data = AscReadLramWord(iop_base, addr); + word_data &= 0x00FF; + word_data |= (((ushort)byte_val << 8) & 0xFF00); + } else { + word_data = AscReadLramWord(iop_base, addr); + word_data &= 0xFF00; + word_data |= ((ushort)byte_val & 0x00FF); + } + AscWriteLramWord(iop_base, addr, word_data); +} + +/* + * Copy 2 bytes to LRAM. + * + * The source data is assumed to be in little-endian order in memory + * and is maintained in little-endian order when written to LRAM. + */ +static void +AscMemWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, + const uchar *s_buffer, int words) +{ + int i; + + AscSetChipLramAddr(iop_base, s_addr); + for (i = 0; i < 2 * words; i += 2) { + /* + * On a little-endian system the second argument below + * produces a little-endian ushort which is written to + * LRAM in little-endian order. On a big-endian system + * the second argument produces a big-endian ushort which + * is "transparently" byte-swapped by outpw() and written + * in little-endian order to LRAM. + */ + outpw(iop_base + IOP_RAM_DATA, + ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); + } +} + +/* + * Copy 4 bytes to LRAM. + * + * The source data is assumed to be in little-endian order in memory + * and is maintained in little-endian order when written to LRAM. + */ +static void +AscMemDWordCopyPtrToLram(PortAddr iop_base, + ushort s_addr, uchar *s_buffer, int dwords) +{ + int i; + + AscSetChipLramAddr(iop_base, s_addr); + for (i = 0; i < 4 * dwords; i += 4) { + outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); /* LSW */ + outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 3] << 8) | s_buffer[i + 2]); /* MSW */ + } +} + +/* + * Copy 2 bytes from LRAM. + * + * The source data is assumed to be in little-endian order in LRAM + * and is maintained in little-endian order when written to memory. + */ +static void +AscMemWordCopyPtrFromLram(PortAddr iop_base, + ushort s_addr, uchar *d_buffer, int words) +{ + int i; + ushort word; + + AscSetChipLramAddr(iop_base, s_addr); + for (i = 0; i < 2 * words; i += 2) { + word = inpw(iop_base + IOP_RAM_DATA); + d_buffer[i] = word & 0xff; + d_buffer[i + 1] = (word >> 8) & 0xff; + } +} + +static u32 AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) +{ + u32 sum = 0; + int i; + + for (i = 0; i < words; i++, s_addr += 2) { + sum += AscReadLramWord(iop_base, s_addr); + } + return (sum); +} + +static void AscInitLram(ASC_DVC_VAR *asc_dvc) +{ + uchar i; + ushort s_addr; + PortAddr iop_base; + + iop_base = asc_dvc->iop_base; + AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0, + (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) * + 64) >> 1)); + i = ASC_MIN_ACTIVE_QNO; + s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE; + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), + (uchar)(i + 1)); + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), + (uchar)(asc_dvc->max_total_qng)); + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), + (uchar)i); + i++; + s_addr += ASC_QBLK_SIZE; + for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) { + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), + (uchar)(i + 1)); + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), + (uchar)(i - 1)); + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), + (uchar)i); + } + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), + (uchar)ASC_QLINK_END); + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), + (uchar)(asc_dvc->max_total_qng - 1)); + AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), + (uchar)asc_dvc->max_total_qng); + i++; + s_addr += ASC_QBLK_SIZE; + for (; i <= (uchar)(asc_dvc->max_total_qng + 3); + i++, s_addr += ASC_QBLK_SIZE) { + AscWriteLramByte(iop_base, + (ushort)(s_addr + (ushort)ASC_SCSIQ_B_FWD), i); + AscWriteLramByte(iop_base, + (ushort)(s_addr + (ushort)ASC_SCSIQ_B_BWD), i); + AscWriteLramByte(iop_base, + (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i); + } +} + +static u32 +AscLoadMicroCode(PortAddr iop_base, ushort s_addr, + const uchar *mcode_buf, ushort mcode_size) +{ + u32 chksum; + ushort mcode_word_size; + ushort mcode_chksum; + + /* Write the microcode buffer starting at LRAM address 0. */ + mcode_word_size = (ushort)(mcode_size >> 1); + AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size); + AscMemWordCopyPtrToLram(iop_base, s_addr, mcode_buf, mcode_word_size); + + chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size); + ASC_DBG(1, "chksum 0x%lx\n", (ulong)chksum); + mcode_chksum = (ushort)AscMemSumLramWord(iop_base, + (ushort)ASC_CODE_SEC_BEG, + (ushort)((mcode_size - + s_addr - (ushort) + ASC_CODE_SEC_BEG) / + 2)); + ASC_DBG(1, "mcode_chksum 0x%lx\n", (ulong)mcode_chksum); + AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum); + AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size); + return chksum; +} + +static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc) +{ + PortAddr iop_base; + int i; + ushort lram_addr; + + iop_base = asc_dvc->iop_base; + AscPutRiscVarFreeQHead(iop_base, 1); + AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng); + AscPutVarFreeQHead(iop_base, 1); + AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng); + AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B, + (uchar)((int)asc_dvc->max_total_qng + 1)); + AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B, + (uchar)((int)asc_dvc->max_total_qng + 2)); + AscWriteLramByte(iop_base, (ushort)ASCV_TOTAL_READY_Q_B, + asc_dvc->max_total_qng); + AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0); + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0); + AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0); + AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0); + AscPutQDoneInProgress(iop_base, 0); + lram_addr = ASC_QADR_BEG; + for (i = 0; i < 32; i++, lram_addr += 2) { + AscWriteLramWord(iop_base, lram_addr, 0); + } +} + +static int AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) +{ + int i; + int warn_code; + PortAddr iop_base; + __le32 phy_addr; + __le32 phy_size; + struct asc_board *board = asc_dvc_to_board(asc_dvc); + + iop_base = asc_dvc->iop_base; + warn_code = 0; + for (i = 0; i <= ASC_MAX_TID; i++) { + AscPutMCodeInitSDTRAtID(iop_base, i, + asc_dvc->cfg->sdtr_period_offset[i]); + } + + AscInitQLinkVar(asc_dvc); + AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B, + asc_dvc->cfg->disc_enable); + AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B, + ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id)); + + /* Ensure overrun buffer is aligned on an 8 byte boundary. */ + BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); + asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, + ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) { + warn_code = -ENOMEM; + goto err_dma_map; + } + phy_addr = cpu_to_le32(asc_dvc->overrun_dma); + AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, + (uchar *)&phy_addr, 1); + phy_size = cpu_to_le32(ASC_OVERRUN_BSIZE); + AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_BSIZE_D, + (uchar *)&phy_size, 1); + + asc_dvc->cfg->mcode_date = + AscReadLramWord(iop_base, (ushort)ASCV_MC_DATE_W); + asc_dvc->cfg->mcode_version = + AscReadLramWord(iop_base, (ushort)ASCV_MC_VER_W); + + AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); + if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { + asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; + warn_code = -EINVAL; + goto err_mcode_start; + } + if (AscStartChip(iop_base) != 1) { + asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; + warn_code = -EIO; + goto err_mcode_start; + } + + return warn_code; + +err_mcode_start: + dma_unmap_single(board->dev, asc_dvc->overrun_dma, + ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); +err_dma_map: + asc_dvc->overrun_dma = 0; + return warn_code; +} + +static int AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) +{ + const struct firmware *fw; + const char fwname[] = "advansys/mcode.bin"; + int err; + unsigned long chksum; + int warn_code; + PortAddr iop_base; + + iop_base = asc_dvc->iop_base; + warn_code = 0; + if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) && + !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) { + AscResetChipAndScsiBus(asc_dvc); + mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ + } + asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC; + if (asc_dvc->err_code != 0) + return ASC_ERROR; + if (!AscFindSignature(asc_dvc->iop_base)) { + asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; + return warn_code; + } + AscDisableInterrupt(iop_base); + AscInitLram(asc_dvc); + + err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); + if (err) { + printk(KERN_ERR "Failed to load image \"%s\" err %d\n", + fwname, err); + asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; + return err; + } + if (fw->size < 4) { + printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", + fw->size, fwname); + release_firmware(fw); + asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; + return -EINVAL; + } + chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | + (fw->data[1] << 8) | fw->data[0]; + ASC_DBG(1, "_asc_mcode_chksum 0x%lx\n", (ulong)chksum); + if (AscLoadMicroCode(iop_base, 0, &fw->data[4], + fw->size - 4) != chksum) { + asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; + release_firmware(fw); + return warn_code; + } + release_firmware(fw); + warn_code |= AscInitMicroCodeVar(asc_dvc); + if (!asc_dvc->overrun_dma) + return warn_code; + asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; + AscEnableInterrupt(iop_base); + return warn_code; +} + +/* + * Load the Microcode + * + * Write the microcode image to RISC memory starting at address 0. + * + * The microcode is stored compressed in the following format: + * + * 254 word (508 byte) table indexed by byte code followed + * by the following byte codes: + * + * 1-Byte Code: + * 00: Emit word 0 in table. + * 01: Emit word 1 in table. + * . + * FD: Emit word 253 in table. + * + * Multi-Byte Code: + * FE WW WW: (3 byte code) Word to emit is the next word WW WW. + * FF BB WW WW: (4 byte code) Emit BB count times next word WW WW. + * + * Returns 0 or an error if the checksum doesn't match + */ +static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf, + int size, int memsize, int chksum) +{ + int i, j, end, len = 0; + u32 sum; + + AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); + + for (i = 253 * 2; i < size; i++) { + if (buf[i] == 0xff) { + unsigned short word = (buf[i + 3] << 8) | buf[i + 2]; + for (j = 0; j < buf[i + 1]; j++) { + AdvWriteWordAutoIncLram(iop_base, word); + len += 2; + } + i += 3; + } else if (buf[i] == 0xfe) { + unsigned short word = (buf[i + 2] << 8) | buf[i + 1]; + AdvWriteWordAutoIncLram(iop_base, word); + i += 2; + len += 2; + } else { + unsigned int off = buf[i] * 2; + unsigned short word = (buf[off + 1] << 8) | buf[off]; + AdvWriteWordAutoIncLram(iop_base, word); + len += 2; + } + } + + end = len; + + while (len < memsize) { + AdvWriteWordAutoIncLram(iop_base, 0); + len += 2; + } + + /* Verify the microcode checksum. */ + sum = 0; + AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); + + for (len = 0; len < end; len += 2) { + sum += AdvReadWordAutoIncLram(iop_base); + } + + if (sum != chksum) + return ASC_IERR_MCODE_CHKSUM; + + return 0; +} + +static void AdvBuildCarrierFreelist(struct adv_dvc_var *adv_dvc) +{ + off_t carr_offset = 0, next_offset; + dma_addr_t carr_paddr; + int carr_num = ADV_CARRIER_BUFSIZE / sizeof(ADV_CARR_T), i; + + for (i = 0; i < carr_num; i++) { + carr_offset = i * sizeof(ADV_CARR_T); + /* Get physical address of the carrier 'carrp'. */ + carr_paddr = adv_dvc->carrier_addr + carr_offset; + + adv_dvc->carrier[i].carr_pa = cpu_to_le32(carr_paddr); + adv_dvc->carrier[i].carr_va = cpu_to_le32(carr_offset); + adv_dvc->carrier[i].areq_vpa = 0; + next_offset = carr_offset + sizeof(ADV_CARR_T); + if (i == carr_num) + next_offset = ~0; + adv_dvc->carrier[i].next_vpa = cpu_to_le32(next_offset); + } + /* + * We cannot have a carrier with 'carr_va' of '0', as + * a reference to this carrier would be interpreted as + * list termination. + * So start at carrier 1 with the freelist. + */ + adv_dvc->carr_freelist = &adv_dvc->carrier[1]; +} + +static ADV_CARR_T *adv_get_carrier(struct adv_dvc_var *adv_dvc, u32 offset) +{ + int index; + + BUG_ON(offset > ADV_CARRIER_BUFSIZE); + + index = offset / sizeof(ADV_CARR_T); + return &adv_dvc->carrier[index]; +} + +static ADV_CARR_T *adv_get_next_carrier(struct adv_dvc_var *adv_dvc) +{ + ADV_CARR_T *carrp = adv_dvc->carr_freelist; + u32 next_vpa = le32_to_cpu(carrp->next_vpa); + + if (next_vpa == 0 || next_vpa == ~0) { + ASC_DBG(1, "invalid vpa offset 0x%x\n", next_vpa); + return NULL; + } + + adv_dvc->carr_freelist = adv_get_carrier(adv_dvc, next_vpa); + /* + * insert stopper carrier to terminate list + */ + carrp->next_vpa = cpu_to_le32(ADV_CQ_STOPPER); + + return carrp; +} + +/* + * 'offset' is the index in the request pointer array + */ +static adv_req_t * adv_get_reqp(struct adv_dvc_var *adv_dvc, u32 offset) +{ + struct asc_board *boardp = adv_dvc->drv_ptr; + + BUG_ON(offset > adv_dvc->max_host_qng); + return &boardp->adv_reqp[offset]; +} + +/* + * Send an idle command to the chip and wait for completion. + * + * Command completion is polled for once per microsecond. + * + * The function can be called from anywhere including an interrupt handler. + * But the function is not re-entrant, so it uses the DvcEnter/LeaveCritical() + * functions to prevent reentrancy. + * + * Return Values: + * ADV_TRUE - command completed successfully + * ADV_FALSE - command failed + * ADV_ERROR - command timed out + */ +static int +AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc, + ushort idle_cmd, u32 idle_cmd_parameter) +{ + int result, i, j; + AdvPortAddr iop_base; + + iop_base = asc_dvc->iop_base; + + /* + * Clear the idle command status which is set by the microcode + * to a non-zero value to indicate when the command is completed. + * The non-zero result is one of the IDLE_CMD_STATUS_* values + */ + AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, (ushort)0); + + /* + * Write the idle command value after the idle command parameter + * has been written to avoid a race condition. If the order is not + * followed, the microcode may process the idle command before the + * parameters have been written to LRAM. + */ + AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IDLE_CMD_PARAMETER, + cpu_to_le32(idle_cmd_parameter)); + AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd); + + /* + * Tickle the RISC to tell it to process the idle command. + */ + AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_B); + if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { + /* + * Clear the tickle value. In the ASC-3550 the RISC flag + * command 'clr_tickle_b' does not work unless the host + * value is cleared. + */ + AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); + } + + /* Wait for up to 100 millisecond for the idle command to timeout. */ + for (i = 0; i < SCSI_WAIT_100_MSEC; i++) { + /* Poll once each microsecond for command completion. */ + for (j = 0; j < SCSI_US_PER_MSEC; j++) { + AdvReadWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, + result); + if (result != 0) + return result; + udelay(1); + } + } + + BUG(); /* The idle command should never timeout. */ + return ADV_ERROR; +} + +/* + * Reset SCSI Bus and purge all outstanding requests. + * + * Return Value: + * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset. + * ADV_FALSE(0) - Microcode command failed. + * ADV_ERROR(-1) - Microcode command timed-out. Microcode or IC + * may be hung which requires driver recovery. + */ +static int AdvResetSB(ADV_DVC_VAR *asc_dvc) +{ + int status; + + /* + * Send the SCSI Bus Reset idle start idle command which asserts + * the SCSI Bus Reset signal. + */ + status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_START, 0L); + if (status != ADV_TRUE) { + return status; + } + + /* + * Delay for the specified SCSI Bus Reset hold time. + * + * The hold time delay is done on the host because the RISC has no + * microsecond accurate timer. + */ + udelay(ASC_SCSI_RESET_HOLD_TIME_US); + + /* + * Send the SCSI Bus Reset end idle command which de-asserts + * the SCSI Bus Reset signal and purges any pending requests. + */ + status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_END, 0L); + if (status != ADV_TRUE) { + return status; + } + + mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ + + return status; +} + +/* + * Initialize the ASC-3550. + * + * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. + * + * For a non-fatal error return a warning code. If there are no warnings + * then 0 is returned. + * + * Needed after initialization for error recovery. + */ +static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) +{ + const struct firmware *fw; + const char fwname[] = "advansys/3550.bin"; + AdvPortAddr iop_base; + ushort warn_code; + int begin_addr; + int end_addr; + ushort code_sum; + int word; + int i; + int err; + unsigned long chksum; + ushort scsi_cfg1; + uchar tid; + ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ + ushort wdtr_able = 0, sdtr_able, tagqng_able; + uchar max_cmd[ADV_MAX_TID + 1]; + + /* If there is already an error, don't continue. */ + if (asc_dvc->err_code != 0) + return ADV_ERROR; + + /* + * The caller must set 'chip_type' to ADV_CHIP_ASC3550. + */ + if (asc_dvc->chip_type != ADV_CHIP_ASC3550) { + asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; + return ADV_ERROR; + } + + warn_code = 0; + iop_base = asc_dvc->iop_base; + + /* + * Save the RISC memory BIOS region before writing the microcode. + * The BIOS may already be loaded and using its RISC LRAM region + * so its region must be saved and restored. + * + * Note: This code makes the assumption, which is currently true, + * that a chip reset does not clear RISC LRAM. + */ + for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { + AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), + bios_mem[i]); + } + + /* + * Save current per TID negotiated values. + */ + if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { + ushort bios_version, major, minor; + + bios_version = + bios_mem[(ASC_MC_BIOS_VERSION - ASC_MC_BIOSMEM) / 2]; + major = (bios_version >> 12) & 0xF; + minor = (bios_version >> 8) & 0xF; + if (major < 3 || (major == 3 && minor == 1)) { + /* BIOS 3.1 and earlier location of 'wdtr_able' variable. */ + AdvReadWordLram(iop_base, 0x120, wdtr_able); + } else { + AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + } + } + AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + + err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); + if (err) { + printk(KERN_ERR "Failed to load image \"%s\" err %d\n", + fwname, err); + asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; + return err; + } + if (fw->size < 4) { + printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", + fw->size, fwname); + release_firmware(fw); + asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; + return -EINVAL; + } + chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | + (fw->data[1] << 8) | fw->data[0]; + asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], + fw->size - 4, ADV_3550_MEMSIZE, + chksum); + release_firmware(fw); + if (asc_dvc->err_code) + return ADV_ERROR; + + /* + * Restore the RISC memory BIOS region. + */ + for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { + AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), + bios_mem[i]); + } + + /* + * Calculate and write the microcode code checksum to the microcode + * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). + */ + AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); + AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); + code_sum = 0; + AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); + for (word = begin_addr; word < end_addr; word += 2) { + code_sum += AdvReadWordAutoIncLram(iop_base); + } + AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); + + /* + * Read and save microcode version and date. + */ + AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, + asc_dvc->cfg->mcode_date); + AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, + asc_dvc->cfg->mcode_version); + + /* + * Set the chip type to indicate the ASC3550. + */ + AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC3550); + + /* + * If the PCI Configuration Command Register "Parity Error Response + * Control" Bit was clear (0), then set the microcode variable + * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode + * to ignore DMA parity errors. + */ + if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { + AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + word |= CONTROL_FLAG_IGNORE_PERR; + AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + } + + /* + * For ASC-3550, setting the START_CTL_EMFU [3:2] bits sets a FIFO + * threshold of 128 bytes. This register is only accessible to the host. + */ + AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, + START_CTL_EMFU | READ_CMD_MRM); + + /* + * Microcode operating variables for WDTR, SDTR, and command tag + * queuing will be set in slave_configure() based on what a + * device reports it is capable of in Inquiry byte 7. + * + * If SCSI Bus Resets have been disabled, then directly set + * SDTR and WDTR from the EEPROM configuration. This will allow + * the BIOS and warm boot to work without a SCSI bus hang on + * the Inquiry caused by host and target mismatched DTR values. + * Without the SCSI Bus Reset, before an Inquiry a device can't + * be assumed to be in Asynchronous, Narrow mode. + */ + if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, + asc_dvc->wdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, + asc_dvc->sdtr_able); + } + + /* + * Set microcode operating variables for SDTR_SPEED1, SDTR_SPEED2, + * SDTR_SPEED3, and SDTR_SPEED4 based on the ULTRA EEPROM per TID + * bitmask. These values determine the maximum SDTR speed negotiated + * with a device. + * + * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, + * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them + * without determining here whether the device supports SDTR. + * + * 4-bit speed SDTR speed name + * =========== =============== + * 0000b (0x0) SDTR disabled + * 0001b (0x1) 5 Mhz + * 0010b (0x2) 10 Mhz + * 0011b (0x3) 20 Mhz (Ultra) + * 0100b (0x4) 40 Mhz (LVD/Ultra2) + * 0101b (0x5) 80 Mhz (LVD2/Ultra3) + * 0110b (0x6) Undefined + * . + * 1111b (0xF) Undefined + */ + word = 0; + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + if (ADV_TID_TO_TIDMASK(tid) & asc_dvc->ultra_able) { + /* Set Ultra speed for TID 'tid'. */ + word |= (0x3 << (4 * (tid % 4))); + } else { + /* Set Fast speed for TID 'tid'. */ + word |= (0x2 << (4 * (tid % 4))); + } + if (tid == 3) { /* Check if done with sdtr_speed1. */ + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, word); + word = 0; + } else if (tid == 7) { /* Check if done with sdtr_speed2. */ + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, word); + word = 0; + } else if (tid == 11) { /* Check if done with sdtr_speed3. */ + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, word); + word = 0; + } else if (tid == 15) { /* Check if done with sdtr_speed4. */ + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, word); + /* End of loop. */ + } + } + + /* + * Set microcode operating variable for the disconnect per TID bitmask. + */ + AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, + asc_dvc->cfg->disc_enable); + + /* + * Set SCSI_CFG0 Microcode Default Value. + * + * The microcode will set the SCSI_CFG0 register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, + PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | + asc_dvc->chip_scsi_id); + + /* + * Determine SCSI_CFG1 Microcode Default Value. + * + * The microcode will set the SCSI_CFG1 register using this value + * after it is started below. + */ + + /* Read current SCSI_CFG1 Register value. */ + scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); + + /* + * If all three connectors are in use, return an error. + */ + if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 || + (scsi_cfg1 & CABLE_ILLEGAL_B) == 0) { + asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION; + return ADV_ERROR; + } + + /* + * If the internal narrow cable is reversed all of the SCSI_CTRL + * register signals will be set. Check for and return an error if + * this condition is found. + */ + if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { + asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; + return ADV_ERROR; + } + + /* + * If this is a differential board and a single-ended device + * is attached to one of the connectors, return an error. + */ + if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0) { + asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE; + return ADV_ERROR; + } + + /* + * If automatic termination control is enabled, then set the + * termination value based on a table listed in a_condor.h. + * + * If manual termination was specified with an EEPROM setting + * then 'termination' was set-up in AdvInitFrom3550EEPROM() and + * is ready to be 'ored' into SCSI_CFG1. + */ + if (asc_dvc->cfg->termination == 0) { + /* + * The software always controls termination by setting TERM_CTL_SEL. + * If TERM_CTL_SEL were set to 0, the hardware would set termination. + */ + asc_dvc->cfg->termination |= TERM_CTL_SEL; + + switch (scsi_cfg1 & CABLE_DETECT) { + /* TERM_CTL_H: on, TERM_CTL_L: on */ + case 0x3: + case 0x7: + case 0xB: + case 0xD: + case 0xE: + case 0xF: + asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L); + break; + + /* TERM_CTL_H: on, TERM_CTL_L: off */ + case 0x1: + case 0x5: + case 0x9: + case 0xA: + case 0xC: + asc_dvc->cfg->termination |= TERM_CTL_H; + break; + + /* TERM_CTL_H: off, TERM_CTL_L: off */ + case 0x2: + case 0x6: + break; + } + } + + /* + * Clear any set TERM_CTL_H and TERM_CTL_L bits. + */ + scsi_cfg1 &= ~TERM_CTL; + + /* + * Invert the TERM_CTL_H and TERM_CTL_L bits and then + * set 'scsi_cfg1'. The TERM_POL bit does not need to be + * referenced, because the hardware internally inverts + * the Termination High and Low bits if TERM_POL is set. + */ + scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL)); + + /* + * Set SCSI_CFG1 Microcode Default Value + * + * Set filter value and possibly modified termination control + * bits in the Microcode SCSI_CFG1 Register Value. + * + * The microcode will set the SCSI_CFG1 register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, + FLTR_DISABLE | scsi_cfg1); + + /* + * Set MEM_CFG Microcode Default Value + * + * The microcode will set the MEM_CFG register using this value + * after it is started below. + * + * MEM_CFG may be accessed as a word or byte, but only bits 0-7 + * are defined. + * + * ASC-3550 has 8KB internal memory. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, + BIOS_EN | RAM_SZ_8KB); + + /* + * Set SEL_MASK Microcode Default Value + * + * The microcode will set the SEL_MASK register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, + ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); + + AdvBuildCarrierFreelist(asc_dvc); + + /* + * Set-up the Host->RISC Initiator Command Queue (ICQ). + */ + + asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->icq_sp) { + asc_dvc->err_code |= ASC_IERR_NO_CARRIER; + return ADV_ERROR; + } + + /* + * Set RISC ICQ physical address start value. + */ + AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); + + /* + * Set-up the RISC->Host Initiator Response Queue (IRQ). + */ + asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->irq_sp) { + asc_dvc->err_code |= ASC_IERR_NO_CARRIER; + return ADV_ERROR; + } + + /* + * Set RISC IRQ physical address start value. + */ + AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); + asc_dvc->carr_pending_cnt = 0; + + AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, + (ADV_INTR_ENABLE_HOST_INTR | + ADV_INTR_ENABLE_GLOBAL_INTR)); + + AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); + AdvWriteWordRegister(iop_base, IOPW_PC, word); + + /* finally, finally, gentlemen, start your engine */ + AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); + + /* + * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus + * Resets should be performed. The RISC has to be running + * to issue a SCSI Bus Reset. + */ + if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { + /* + * If the BIOS Signature is present in memory, restore the + * BIOS Handshake Configuration Table and do not perform + * a SCSI Bus Reset. + */ + if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == + 0x55AA) { + /* + * Restore per TID negotiated values. + */ + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, + tagqng_able); + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + AdvWriteByteLram(iop_base, + ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + } else { + if (AdvResetSB(asc_dvc) != ADV_TRUE) { + warn_code = ASC_WARN_BUSRESET_ERROR; + } + } + } + + return warn_code; +} + +/* + * Initialize the ASC-38C0800. + * + * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. + * + * For a non-fatal error return a warning code. If there are no warnings + * then 0 is returned. + * + * Needed after initialization for error recovery. + */ +static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) +{ + const struct firmware *fw; + const char fwname[] = "advansys/38C0800.bin"; + AdvPortAddr iop_base; + ushort warn_code; + int begin_addr; + int end_addr; + ushort code_sum; + int word; + int i; + int err; + unsigned long chksum; + ushort scsi_cfg1; + uchar byte; + uchar tid; + ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ + ushort wdtr_able, sdtr_able, tagqng_able; + uchar max_cmd[ADV_MAX_TID + 1]; + + /* If there is already an error, don't continue. */ + if (asc_dvc->err_code != 0) + return ADV_ERROR; + + /* + * The caller must set 'chip_type' to ADV_CHIP_ASC38C0800. + */ + if (asc_dvc->chip_type != ADV_CHIP_ASC38C0800) { + asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; + return ADV_ERROR; + } + + warn_code = 0; + iop_base = asc_dvc->iop_base; + + /* + * Save the RISC memory BIOS region before writing the microcode. + * The BIOS may already be loaded and using its RISC LRAM region + * so its region must be saved and restored. + * + * Note: This code makes the assumption, which is currently true, + * that a chip reset does not clear RISC LRAM. + */ + for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { + AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), + bios_mem[i]); + } + + /* + * Save current per TID negotiated values. + */ + AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + + /* + * RAM BIST (RAM Built-In Self Test) + * + * Address : I/O base + offset 0x38h register (byte). + * Function: Bit 7-6(RW) : RAM mode + * Normal Mode : 0x00 + * Pre-test Mode : 0x40 + * RAM Test Mode : 0x80 + * Bit 5 : unused + * Bit 4(RO) : Done bit + * Bit 3-0(RO) : Status + * Host Error : 0x08 + * Int_RAM Error : 0x04 + * RISC Error : 0x02 + * SCSI Error : 0x01 + * No Error : 0x00 + * + * Note: RAM BIST code should be put right here, before loading the + * microcode and after saving the RISC memory BIOS region. + */ + + /* + * LRAM Pre-test + * + * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. + * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return + * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset + * to NORMAL_MODE, return an error too. + */ + for (i = 0; i < 2; i++) { + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); + mdelay(10); /* Wait for 10ms before reading back. */ + byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); + if ((byte & RAM_TEST_DONE) == 0 + || (byte & 0x0F) != PRE_TEST_VALUE) { + asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; + return ADV_ERROR; + } + + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); + mdelay(10); /* Wait for 10ms before reading back. */ + if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) + != NORMAL_VALUE) { + asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; + return ADV_ERROR; + } + } + + /* + * LRAM Test - It takes about 1.5 ms to run through the test. + * + * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. + * If Done bit not set or Status not 0, save register byte, set the + * err_code, and return an error. + */ + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); + mdelay(10); /* Wait for 10ms before checking status. */ + + byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); + if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { + /* Get here if Done bit not set or Status not 0. */ + asc_dvc->bist_err_code = byte; /* for BIOS display message */ + asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; + return ADV_ERROR; + } + + /* We need to reset back to normal mode after LRAM test passes. */ + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); + + err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); + if (err) { + printk(KERN_ERR "Failed to load image \"%s\" err %d\n", + fwname, err); + asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; + return err; + } + if (fw->size < 4) { + printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", + fw->size, fwname); + release_firmware(fw); + asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; + return -EINVAL; + } + chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | + (fw->data[1] << 8) | fw->data[0]; + asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], + fw->size - 4, ADV_38C0800_MEMSIZE, + chksum); + release_firmware(fw); + if (asc_dvc->err_code) + return ADV_ERROR; + + /* + * Restore the RISC memory BIOS region. + */ + for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { + AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), + bios_mem[i]); + } + + /* + * Calculate and write the microcode code checksum to the microcode + * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). + */ + AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); + AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); + code_sum = 0; + AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); + for (word = begin_addr; word < end_addr; word += 2) { + code_sum += AdvReadWordAutoIncLram(iop_base); + } + AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); + + /* + * Read microcode version and date. + */ + AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, + asc_dvc->cfg->mcode_date); + AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, + asc_dvc->cfg->mcode_version); + + /* + * Set the chip type to indicate the ASC38C0800. + */ + AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C0800); + + /* + * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. + * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current + * cable detection and then we are able to read C_DET[3:0]. + * + * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 + * Microcode Default Value' section below. + */ + scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); + AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, + scsi_cfg1 | DIS_TERM_DRV); + + /* + * If the PCI Configuration Command Register "Parity Error Response + * Control" Bit was clear (0), then set the microcode variable + * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode + * to ignore DMA parity errors. + */ + if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { + AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + word |= CONTROL_FLAG_IGNORE_PERR; + AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + } + + /* + * For ASC-38C0800, set FIFO_THRESH_80B [6:4] bits and START_CTL_TH [3:2] + * bits for the default FIFO threshold. + * + * Note: ASC-38C0800 FIFO threshold has been changed to 256 bytes. + * + * For DMA Errata #4 set the BC_THRESH_ENB bit. + */ + AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, + BC_THRESH_ENB | FIFO_THRESH_80B | START_CTL_TH | + READ_CMD_MRM); + + /* + * Microcode operating variables for WDTR, SDTR, and command tag + * queuing will be set in slave_configure() based on what a + * device reports it is capable of in Inquiry byte 7. + * + * If SCSI Bus Resets have been disabled, then directly set + * SDTR and WDTR from the EEPROM configuration. This will allow + * the BIOS and warm boot to work without a SCSI bus hang on + * the Inquiry caused by host and target mismatched DTR values. + * Without the SCSI Bus Reset, before an Inquiry a device can't + * be assumed to be in Asynchronous, Narrow mode. + */ + if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, + asc_dvc->wdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, + asc_dvc->sdtr_able); + } + + /* + * Set microcode operating variables for DISC and SDTR_SPEED1, + * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM + * configuration values. + * + * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, + * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them + * without determining here whether the device supports SDTR. + */ + AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, + asc_dvc->cfg->disc_enable); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); + + /* + * Set SCSI_CFG0 Microcode Default Value. + * + * The microcode will set the SCSI_CFG0 register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, + PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | + asc_dvc->chip_scsi_id); + + /* + * Determine SCSI_CFG1 Microcode Default Value. + * + * The microcode will set the SCSI_CFG1 register using this value + * after it is started below. + */ + + /* Read current SCSI_CFG1 Register value. */ + scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); + + /* + * If the internal narrow cable is reversed all of the SCSI_CTRL + * register signals will be set. Check for and return an error if + * this condition is found. + */ + if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { + asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; + return ADV_ERROR; + } + + /* + * All kind of combinations of devices attached to one of four + * connectors are acceptable except HVD device attached. For example, + * LVD device can be attached to SE connector while SE device attached + * to LVD connector. If LVD device attached to SE connector, it only + * runs up to Ultra speed. + * + * If an HVD device is attached to one of LVD connectors, return an + * error. However, there is no way to detect HVD device attached to + * SE connectors. + */ + if (scsi_cfg1 & HVD) { + asc_dvc->err_code = ASC_IERR_HVD_DEVICE; + return ADV_ERROR; + } + + /* + * If either SE or LVD automatic termination control is enabled, then + * set the termination value based on a table listed in a_condor.h. + * + * If manual termination was specified with an EEPROM setting then + * 'termination' was set-up in AdvInitFrom38C0800EEPROM() and is ready + * to be 'ored' into SCSI_CFG1. + */ + if ((asc_dvc->cfg->termination & TERM_SE) == 0) { + /* SE automatic termination control is enabled. */ + switch (scsi_cfg1 & C_DET_SE) { + /* TERM_SE_HI: on, TERM_SE_LO: on */ + case 0x1: + case 0x2: + case 0x3: + asc_dvc->cfg->termination |= TERM_SE; + break; + + /* TERM_SE_HI: on, TERM_SE_LO: off */ + case 0x0: + asc_dvc->cfg->termination |= TERM_SE_HI; + break; + } + } + + if ((asc_dvc->cfg->termination & TERM_LVD) == 0) { + /* LVD automatic termination control is enabled. */ + switch (scsi_cfg1 & C_DET_LVD) { + /* TERM_LVD_HI: on, TERM_LVD_LO: on */ + case 0x4: + case 0x8: + case 0xC: + asc_dvc->cfg->termination |= TERM_LVD; + break; + + /* TERM_LVD_HI: off, TERM_LVD_LO: off */ + case 0x0: + break; + } + } + + /* + * Clear any set TERM_SE and TERM_LVD bits. + */ + scsi_cfg1 &= (~TERM_SE & ~TERM_LVD); + + /* + * Invert the TERM_SE and TERM_LVD bits and then set 'scsi_cfg1'. + */ + scsi_cfg1 |= (~asc_dvc->cfg->termination & 0xF0); + + /* + * Clear BIG_ENDIAN, DIS_TERM_DRV, Terminator Polarity and HVD/LVD/SE + * bits and set possibly modified termination control bits in the + * Microcode SCSI_CFG1 Register Value. + */ + scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL & ~HVD_LVD_SE); + + /* + * Set SCSI_CFG1 Microcode Default Value + * + * Set possibly modified termination control and reset DIS_TERM_DRV + * bits in the Microcode SCSI_CFG1 Register Value. + * + * The microcode will set the SCSI_CFG1 register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); + + /* + * Set MEM_CFG Microcode Default Value + * + * The microcode will set the MEM_CFG register using this value + * after it is started below. + * + * MEM_CFG may be accessed as a word or byte, but only bits 0-7 + * are defined. + * + * ASC-38C0800 has 16KB internal memory. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, + BIOS_EN | RAM_SZ_16KB); + + /* + * Set SEL_MASK Microcode Default Value + * + * The microcode will set the SEL_MASK register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, + ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); + + AdvBuildCarrierFreelist(asc_dvc); + + /* + * Set-up the Host->RISC Initiator Command Queue (ICQ). + */ + + asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->icq_sp) { + ASC_DBG(0, "Failed to get ICQ carrier\n"); + asc_dvc->err_code |= ASC_IERR_NO_CARRIER; + return ADV_ERROR; + } + + /* + * Set RISC ICQ physical address start value. + * carr_pa is LE, must be native before write + */ + AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); + + /* + * Set-up the RISC->Host Initiator Response Queue (IRQ). + */ + asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->irq_sp) { + ASC_DBG(0, "Failed to get IRQ carrier\n"); + asc_dvc->err_code |= ASC_IERR_NO_CARRIER; + return ADV_ERROR; + } + + /* + * Set RISC IRQ physical address start value. + * + * carr_pa is LE, must be native before write * + */ + AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); + asc_dvc->carr_pending_cnt = 0; + + AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, + (ADV_INTR_ENABLE_HOST_INTR | + ADV_INTR_ENABLE_GLOBAL_INTR)); + + AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); + AdvWriteWordRegister(iop_base, IOPW_PC, word); + + /* finally, finally, gentlemen, start your engine */ + AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); + + /* + * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus + * Resets should be performed. The RISC has to be running + * to issue a SCSI Bus Reset. + */ + if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { + /* + * If the BIOS Signature is present in memory, restore the + * BIOS Handshake Configuration Table and do not perform + * a SCSI Bus Reset. + */ + if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == + 0x55AA) { + /* + * Restore per TID negotiated values. + */ + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, + tagqng_able); + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + AdvWriteByteLram(iop_base, + ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + } else { + if (AdvResetSB(asc_dvc) != ADV_TRUE) { + warn_code = ASC_WARN_BUSRESET_ERROR; + } + } + } + + return warn_code; +} + +/* + * Initialize the ASC-38C1600. + * + * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. + * + * For a non-fatal error return a warning code. If there are no warnings + * then 0 is returned. + * + * Needed after initialization for error recovery. + */ +static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) +{ + const struct firmware *fw; + const char fwname[] = "advansys/38C1600.bin"; + AdvPortAddr iop_base; + ushort warn_code; + int begin_addr; + int end_addr; + ushort code_sum; + long word; + int i; + int err; + unsigned long chksum; + ushort scsi_cfg1; + uchar byte; + uchar tid; + ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ + ushort wdtr_able, sdtr_able, ppr_able, tagqng_able; + uchar max_cmd[ASC_MAX_TID + 1]; + + /* If there is already an error, don't continue. */ + if (asc_dvc->err_code != 0) { + return ADV_ERROR; + } + + /* + * The caller must set 'chip_type' to ADV_CHIP_ASC38C1600. + */ + if (asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { + asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; + return ADV_ERROR; + } + + warn_code = 0; + iop_base = asc_dvc->iop_base; + + /* + * Save the RISC memory BIOS region before writing the microcode. + * The BIOS may already be loaded and using its RISC LRAM region + * so its region must be saved and restored. + * + * Note: This code makes the assumption, which is currently true, + * that a chip reset does not clear RISC LRAM. + */ + for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { + AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), + bios_mem[i]); + } + + /* + * Save current per TID negotiated values. + */ + AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); + AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); + for (tid = 0; tid <= ASC_MAX_TID; tid++) { + AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + + /* + * RAM BIST (Built-In Self Test) + * + * Address : I/O base + offset 0x38h register (byte). + * Function: Bit 7-6(RW) : RAM mode + * Normal Mode : 0x00 + * Pre-test Mode : 0x40 + * RAM Test Mode : 0x80 + * Bit 5 : unused + * Bit 4(RO) : Done bit + * Bit 3-0(RO) : Status + * Host Error : 0x08 + * Int_RAM Error : 0x04 + * RISC Error : 0x02 + * SCSI Error : 0x01 + * No Error : 0x00 + * + * Note: RAM BIST code should be put right here, before loading the + * microcode and after saving the RISC memory BIOS region. + */ + + /* + * LRAM Pre-test + * + * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. + * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return + * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset + * to NORMAL_MODE, return an error too. + */ + for (i = 0; i < 2; i++) { + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); + mdelay(10); /* Wait for 10ms before reading back. */ + byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); + if ((byte & RAM_TEST_DONE) == 0 + || (byte & 0x0F) != PRE_TEST_VALUE) { + asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; + return ADV_ERROR; + } + + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); + mdelay(10); /* Wait for 10ms before reading back. */ + if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) + != NORMAL_VALUE) { + asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; + return ADV_ERROR; + } + } + + /* + * LRAM Test - It takes about 1.5 ms to run through the test. + * + * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. + * If Done bit not set or Status not 0, save register byte, set the + * err_code, and return an error. + */ + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); + mdelay(10); /* Wait for 10ms before checking status. */ + + byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); + if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { + /* Get here if Done bit not set or Status not 0. */ + asc_dvc->bist_err_code = byte; /* for BIOS display message */ + asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; + return ADV_ERROR; + } + + /* We need to reset back to normal mode after LRAM test passes. */ + AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); + + err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); + if (err) { + printk(KERN_ERR "Failed to load image \"%s\" err %d\n", + fwname, err); + asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; + return err; + } + if (fw->size < 4) { + printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", + fw->size, fwname); + release_firmware(fw); + asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; + return -EINVAL; + } + chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | + (fw->data[1] << 8) | fw->data[0]; + asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], + fw->size - 4, ADV_38C1600_MEMSIZE, + chksum); + release_firmware(fw); + if (asc_dvc->err_code) + return ADV_ERROR; + + /* + * Restore the RISC memory BIOS region. + */ + for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { + AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), + bios_mem[i]); + } + + /* + * Calculate and write the microcode code checksum to the microcode + * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). + */ + AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); + AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); + code_sum = 0; + AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); + for (word = begin_addr; word < end_addr; word += 2) { + code_sum += AdvReadWordAutoIncLram(iop_base); + } + AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); + + /* + * Read microcode version and date. + */ + AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, + asc_dvc->cfg->mcode_date); + AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, + asc_dvc->cfg->mcode_version); + + /* + * Set the chip type to indicate the ASC38C1600. + */ + AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C1600); + + /* + * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. + * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current + * cable detection and then we are able to read C_DET[3:0]. + * + * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 + * Microcode Default Value' section below. + */ + scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); + AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, + scsi_cfg1 | DIS_TERM_DRV); + + /* + * If the PCI Configuration Command Register "Parity Error Response + * Control" Bit was clear (0), then set the microcode variable + * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode + * to ignore DMA parity errors. + */ + if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { + AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + word |= CONTROL_FLAG_IGNORE_PERR; + AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + } + + /* + * If the BIOS control flag AIPP (Asynchronous Information + * Phase Protection) disable bit is not set, then set the firmware + * 'control_flag' CONTROL_FLAG_ENABLE_AIPP bit to enable + * AIPP checking and encoding. + */ + if ((asc_dvc->bios_ctrl & BIOS_CTRL_AIPP_DIS) == 0) { + AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + word |= CONTROL_FLAG_ENABLE_AIPP; + AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); + } + + /* + * For ASC-38C1600 use DMA_CFG0 default values: FIFO_THRESH_80B [6:4], + * and START_CTL_TH [3:2]. + */ + AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, + FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); + + /* + * Microcode operating variables for WDTR, SDTR, and command tag + * queuing will be set in slave_configure() based on what a + * device reports it is capable of in Inquiry byte 7. + * + * If SCSI Bus Resets have been disabled, then directly set + * SDTR and WDTR from the EEPROM configuration. This will allow + * the BIOS and warm boot to work without a SCSI bus hang on + * the Inquiry caused by host and target mismatched DTR values. + * Without the SCSI Bus Reset, before an Inquiry a device can't + * be assumed to be in Asynchronous, Narrow mode. + */ + if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, + asc_dvc->wdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, + asc_dvc->sdtr_able); + } + + /* + * Set microcode operating variables for DISC and SDTR_SPEED1, + * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM + * configuration values. + * + * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, + * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them + * without determining here whether the device supports SDTR. + */ + AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, + asc_dvc->cfg->disc_enable); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); + + /* + * Set SCSI_CFG0 Microcode Default Value. + * + * The microcode will set the SCSI_CFG0 register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, + PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | + asc_dvc->chip_scsi_id); + + /* + * Calculate SCSI_CFG1 Microcode Default Value. + * + * The microcode will set the SCSI_CFG1 register using this value + * after it is started below. + * + * Each ASC-38C1600 function has only two cable detect bits. + * The bus mode override bits are in IOPB_SOFT_OVER_WR. + */ + scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); + + /* + * If the cable is reversed all of the SCSI_CTRL register signals + * will be set. Check for and return an error if this condition is + * found. + */ + if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { + asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; + return ADV_ERROR; + } + + /* + * Each ASC-38C1600 function has two connectors. Only an HVD device + * can not be connected to either connector. An LVD device or SE device + * may be connected to either connecor. If an SE device is connected, + * then at most Ultra speed (20 Mhz) can be used on both connectors. + * + * If an HVD device is attached, return an error. + */ + if (scsi_cfg1 & HVD) { + asc_dvc->err_code |= ASC_IERR_HVD_DEVICE; + return ADV_ERROR; + } + + /* + * Each function in the ASC-38C1600 uses only the SE cable detect and + * termination because there are two connectors for each function. Each + * function may use either LVD or SE mode. Corresponding the SE automatic + * termination control EEPROM bits are used for each function. Each + * function has its own EEPROM. If SE automatic control is enabled for + * the function, then set the termination value based on a table listed + * in a_condor.h. + * + * If manual termination is specified in the EEPROM for the function, + * then 'termination' was set-up in AscInitFrom38C1600EEPROM() and is + * ready to be 'ored' into SCSI_CFG1. + */ + if ((asc_dvc->cfg->termination & TERM_SE) == 0) { + struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); + /* SE automatic termination control is enabled. */ + switch (scsi_cfg1 & C_DET_SE) { + /* TERM_SE_HI: on, TERM_SE_LO: on */ + case 0x1: + case 0x2: + case 0x3: + asc_dvc->cfg->termination |= TERM_SE; + break; + + case 0x0: + if (PCI_FUNC(pdev->devfn) == 0) { + /* Function 0 - TERM_SE_HI: off, TERM_SE_LO: off */ + } else { + /* Function 1 - TERM_SE_HI: on, TERM_SE_LO: off */ + asc_dvc->cfg->termination |= TERM_SE_HI; + } + break; + } + } + + /* + * Clear any set TERM_SE bits. + */ + scsi_cfg1 &= ~TERM_SE; + + /* + * Invert the TERM_SE bits and then set 'scsi_cfg1'. + */ + scsi_cfg1 |= (~asc_dvc->cfg->termination & TERM_SE); + + /* + * Clear Big Endian and Terminator Polarity bits and set possibly + * modified termination control bits in the Microcode SCSI_CFG1 + * Register Value. + * + * Big Endian bit is not used even on big endian machines. + */ + scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL); + + /* + * Set SCSI_CFG1 Microcode Default Value + * + * Set possibly modified termination control bits in the Microcode + * SCSI_CFG1 Register Value. + * + * The microcode will set the SCSI_CFG1 register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); + + /* + * Set MEM_CFG Microcode Default Value + * + * The microcode will set the MEM_CFG register using this value + * after it is started below. + * + * MEM_CFG may be accessed as a word or byte, but only bits 0-7 + * are defined. + * + * ASC-38C1600 has 32KB internal memory. + * + * XXX - Since ASC38C1600 Rev.3 has a Local RAM failure issue, we come + * out a special 16K Adv Library and Microcode version. After the issue + * resolved, we should turn back to the 32K support. Both a_condor.h and + * mcode.sas files also need to be updated. + * + * AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, + * BIOS_EN | RAM_SZ_32KB); + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, + BIOS_EN | RAM_SZ_16KB); + + /* + * Set SEL_MASK Microcode Default Value + * + * The microcode will set the SEL_MASK register using this value + * after it is started below. + */ + AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, + ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); + + AdvBuildCarrierFreelist(asc_dvc); + + /* + * Set-up the Host->RISC Initiator Command Queue (ICQ). + */ + asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->icq_sp) { + asc_dvc->err_code |= ASC_IERR_NO_CARRIER; + return ADV_ERROR; + } + + /* + * Set RISC ICQ physical address start value. Initialize the + * COMMA register to the same value otherwise the RISC will + * prematurely detect a command is available. + */ + AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); + AdvWriteDWordRegister(iop_base, IOPDW_COMMA, + le32_to_cpu(asc_dvc->icq_sp->carr_pa)); + + /* + * Set-up the RISC->Host Initiator Response Queue (IRQ). + */ + asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->irq_sp) { + asc_dvc->err_code |= ASC_IERR_NO_CARRIER; + return ADV_ERROR; + } + + /* + * Set RISC IRQ physical address start value. + */ + AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); + asc_dvc->carr_pending_cnt = 0; + + AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, + (ADV_INTR_ENABLE_HOST_INTR | + ADV_INTR_ENABLE_GLOBAL_INTR)); + AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); + AdvWriteWordRegister(iop_base, IOPW_PC, word); + + /* finally, finally, gentlemen, start your engine */ + AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); + + /* + * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus + * Resets should be performed. The RISC has to be running + * to issue a SCSI Bus Reset. + */ + if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { + /* + * If the BIOS Signature is present in memory, restore the + * per TID microcode operating variables. + */ + if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == + 0x55AA) { + /* + * Restore per TID negotiated values. + */ + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); + AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, + tagqng_able); + for (tid = 0; tid <= ASC_MAX_TID; tid++) { + AdvWriteByteLram(iop_base, + ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + } else { + if (AdvResetSB(asc_dvc) != ADV_TRUE) { + warn_code = ASC_WARN_BUSRESET_ERROR; + } + } + } + + return warn_code; +} + +/* + * Reset chip and SCSI Bus. + * + * Return Value: + * ADV_TRUE(1) - Chip re-initialization and SCSI Bus Reset successful. + * ADV_FALSE(0) - Chip re-initialization and SCSI Bus Reset failure. + */ +static int AdvResetChipAndSB(ADV_DVC_VAR *asc_dvc) +{ + int status; + ushort wdtr_able, sdtr_able, tagqng_able; + ushort ppr_able = 0; + uchar tid, max_cmd[ADV_MAX_TID + 1]; + AdvPortAddr iop_base; + ushort bios_sig; + + iop_base = asc_dvc->iop_base; + + /* + * Save current per TID negotiated values. + */ + AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { + AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); + } + AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + + /* + * Force the AdvInitAsc3550/38C0800Driver() function to + * perform a SCSI Bus Reset by clearing the BIOS signature word. + * The initialization functions assumes a SCSI Bus Reset is not + * needed if the BIOS signature word is present. + */ + AdvReadWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); + AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, 0); + + /* + * Stop chip and reset it. + */ + AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_STOP); + AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); + mdelay(100); + AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, + ADV_CTRL_REG_CMD_WR_IO_REG); + + /* + * Reset Adv Library error code, if any, and try + * re-initializing the chip. + */ + asc_dvc->err_code = 0; + if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { + status = AdvInitAsc38C1600Driver(asc_dvc); + } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { + status = AdvInitAsc38C0800Driver(asc_dvc); + } else { + status = AdvInitAsc3550Driver(asc_dvc); + } + + /* Translate initialization return value to status value. */ + if (status == 0) { + status = ADV_TRUE; + } else { + status = ADV_FALSE; + } + + /* + * Restore the BIOS signature word. + */ + AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); + + /* + * Restore per TID negotiated values. + */ + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); + if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { + AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); + } + AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, + max_cmd[tid]); + } + + return status; +} + +/* + * adv_async_callback() - Adv Library asynchronous event callback function. + */ +static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code) +{ + switch (code) { + case ADV_ASYNC_SCSI_BUS_RESET_DET: + /* + * The firmware detected a SCSI Bus reset. + */ + ASC_DBG(0, "ADV_ASYNC_SCSI_BUS_RESET_DET\n"); + break; + + case ADV_ASYNC_RDMA_FAILURE: + /* + * Handle RDMA failure by resetting the SCSI Bus and + * possibly the chip if it is unresponsive. Log the error + * with a unique code. + */ + ASC_DBG(0, "ADV_ASYNC_RDMA_FAILURE\n"); + AdvResetChipAndSB(adv_dvc_varp); + break; + + case ADV_HOST_SCSI_BUS_RESET: + /* + * Host generated SCSI bus reset occurred. + */ + ASC_DBG(0, "ADV_HOST_SCSI_BUS_RESET\n"); + break; + + default: + ASC_DBG(0, "unknown code 0x%x\n", code); + break; + } +} + +/* + * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR(). + * + * Callback function for the Wide SCSI Adv Library. + */ +static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) +{ + struct asc_board *boardp = adv_dvc_varp->drv_ptr; + adv_req_t *reqp; + adv_sgblk_t *sgblkp; + struct scsi_cmnd *scp; + u32 resid_cnt; + dma_addr_t sense_addr; + + ASC_DBG(1, "adv_dvc_varp 0x%p, scsiqp 0x%p\n", + adv_dvc_varp, scsiqp); + ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); + + /* + * Get the adv_req_t structure for the command that has been + * completed. The adv_req_t structure actually contains the + * completed ADV_SCSI_REQ_Q structure. + */ + scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag); + + ASC_DBG(1, "scp 0x%p\n", scp); + if (scp == NULL) { + ASC_PRINT + ("adv_isr_callback: scp is NULL; adv_req_t dropped.\n"); + return; + } + ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); + + reqp = (adv_req_t *)scp->host_scribble; + ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); + if (reqp == NULL) { + ASC_PRINT("adv_isr_callback: reqp is NULL\n"); + return; + } + /* + * Remove backreferences to avoid duplicate + * command completions. + */ + scp->host_scribble = NULL; + reqp->cmndp = NULL; + + ASC_STATS(boardp->shost, callback); + ASC_DBG(1, "shost 0x%p\n", boardp->shost); + + sense_addr = le32_to_cpu(scsiqp->sense_addr); + dma_unmap_single(boardp->dev, sense_addr, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + + /* + * 'done_status' contains the command's ending status. + */ + scp->result = 0; + switch (scsiqp->done_status) { + case QD_NO_ERROR: + ASC_DBG(2, "QD_NO_ERROR\n"); + + /* + * Check for an underrun condition. + * + * If there was no error and an underrun condition, then + * then return the number of underrun bytes. + */ + resid_cnt = le32_to_cpu(scsiqp->data_cnt); + if (scsi_bufflen(scp) != 0 && resid_cnt != 0 && + resid_cnt <= scsi_bufflen(scp)) { + ASC_DBG(1, "underrun condition %lu bytes\n", + (ulong)resid_cnt); + scsi_set_resid(scp, resid_cnt); + } + break; + + case QD_WITH_ERROR: + ASC_DBG(2, "QD_WITH_ERROR\n"); + switch (scsiqp->host_status) { + case QHSTA_NO_ERROR: + set_status_byte(scp, scsiqp->scsi_status); + if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) { + ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); + ASC_DBG_PRT_SENSE(2, scp->sense_buffer, + SCSI_SENSE_BUFFERSIZE); + } + break; + + default: + /* Some other QHSTA error occurred. */ + ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status); + set_host_byte(scp, DID_BAD_TARGET); + break; + } + break; + + case QD_ABORTED_BY_HOST: + ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); + set_status_byte(scp, scsiqp->scsi_status); + set_host_byte(scp, DID_ABORT); + break; + + default: + ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status); + set_status_byte(scp, scsiqp->scsi_status); + set_host_byte(scp, DID_ERROR); + break; + } + + /* + * If the 'init_tidmask' bit isn't already set for the target and the + * current request finished normally, then set the bit for the target + * to indicate that a device is present. + */ + if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && + scsiqp->done_status == QD_NO_ERROR && + scsiqp->host_status == QHSTA_NO_ERROR) { + boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); + } + + asc_scsi_done(scp); + + /* + * Free all 'adv_sgblk_t' structures allocated for the request. + */ + while ((sgblkp = reqp->sgblkp) != NULL) { + /* Remove 'sgblkp' from the request list. */ + reqp->sgblkp = sgblkp->next_sgblkp; + + dma_pool_free(boardp->adv_sgblk_pool, sgblkp, + sgblkp->sg_addr); + } + + ASC_DBG(1, "done\n"); +} + +/* + * Adv Library Interrupt Service Routine + * + * This function is called by a driver's interrupt service routine. + * The function disables and re-enables interrupts. + * + * When a microcode idle command is completed, the ADV_DVC_VAR + * 'idle_cmd_done' field is set to ADV_TRUE. + * + * Note: AdvISR() can be called when interrupts are disabled or even + * when there is no hardware interrupt condition present. It will + * always check for completed idle commands and microcode requests. + * This is an important feature that shouldn't be changed because it + * allows commands to be completed from polling mode loops. + * + * Return: + * ADV_TRUE(1) - interrupt was pending + * ADV_FALSE(0) - no interrupt was pending + */ +static int AdvISR(ADV_DVC_VAR *asc_dvc) +{ + AdvPortAddr iop_base; + uchar int_stat; + ADV_CARR_T *free_carrp; + __le32 irq_next_vpa; + ADV_SCSI_REQ_Q *scsiq; + adv_req_t *reqp; + + iop_base = asc_dvc->iop_base; + + /* Reading the register clears the interrupt. */ + int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG); + + if ((int_stat & (ADV_INTR_STATUS_INTRA | ADV_INTR_STATUS_INTRB | + ADV_INTR_STATUS_INTRC)) == 0) { + return ADV_FALSE; + } + + /* + * Notify the driver of an asynchronous microcode condition by + * calling the adv_async_callback function. The function + * is passed the microcode ASC_MC_INTRB_CODE byte value. + */ + if (int_stat & ADV_INTR_STATUS_INTRB) { + uchar intrb_code; + + AdvReadByteLram(iop_base, ASC_MC_INTRB_CODE, intrb_code); + + if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || + asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { + if (intrb_code == ADV_ASYNC_CARRIER_READY_FAILURE && + asc_dvc->carr_pending_cnt != 0) { + AdvWriteByteRegister(iop_base, IOPB_TICKLE, + ADV_TICKLE_A); + if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { + AdvWriteByteRegister(iop_base, + IOPB_TICKLE, + ADV_TICKLE_NOP); + } + } + } + + adv_async_callback(asc_dvc, intrb_code); + } + + /* + * Check if the IRQ stopper carrier contains a completed request. + */ + while (((irq_next_vpa = + le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ADV_RQ_DONE) != 0) { + /* + * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure. + * The RISC will have set 'areq_vpa' to a virtual address. + * + * The firmware will have copied the ADV_SCSI_REQ_Q.scsiq_ptr + * field to the carrier ADV_CARR_T.areq_vpa field. The conversion + * below complements the conversion of ADV_SCSI_REQ_Q.scsiq_ptr' + * in AdvExeScsiQueue(). + */ + u32 pa_offset = le32_to_cpu(asc_dvc->irq_sp->areq_vpa); + ASC_DBG(1, "irq_sp %p areq_vpa %u\n", + asc_dvc->irq_sp, pa_offset); + reqp = adv_get_reqp(asc_dvc, pa_offset); + scsiq = &reqp->scsi_req_q; + + /* + * Request finished with good status and the queue was not + * DMAed to host memory by the firmware. Set all status fields + * to indicate good status. + */ + if ((irq_next_vpa & ADV_RQ_GOOD) != 0) { + scsiq->done_status = QD_NO_ERROR; + scsiq->host_status = scsiq->scsi_status = 0; + scsiq->data_cnt = 0L; + } + + /* + * Advance the stopper pointer to the next carrier + * ignoring the lower four bits. Free the previous + * stopper carrier. + */ + free_carrp = asc_dvc->irq_sp; + asc_dvc->irq_sp = adv_get_carrier(asc_dvc, + ADV_GET_CARRP(irq_next_vpa)); + + free_carrp->next_vpa = asc_dvc->carr_freelist->carr_va; + asc_dvc->carr_freelist = free_carrp; + asc_dvc->carr_pending_cnt--; + + /* + * Clear request microcode control flag. + */ + scsiq->cntl = 0; + + /* + * Notify the driver of the completed request by passing + * the ADV_SCSI_REQ_Q pointer to its callback function. + */ + adv_isr_callback(asc_dvc, scsiq); + /* + * Note: After the driver callback function is called, 'scsiq' + * can no longer be referenced. + * + * Fall through and continue processing other completed + * requests... + */ + } + return ADV_TRUE; +} + +static int AscSetLibErrorCode(ASC_DVC_VAR *asc_dvc, ushort err_code) +{ + if (asc_dvc->err_code == 0) { + asc_dvc->err_code = err_code; + AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W, + err_code); + } + return err_code; +} + +static void AscAckInterrupt(PortAddr iop_base) +{ + uchar host_flag; + uchar risc_flag; + ushort loop; + + loop = 0; + do { + risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B); + if (loop++ > 0x7FFF) { + break; + } + } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0); + host_flag = + AscReadLramByte(iop_base, + ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT); + AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, + (uchar)(host_flag | ASC_HOST_FLAG_ACK_INT)); + AscSetChipStatus(iop_base, CIW_INT_ACK); + loop = 0; + while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) { + AscSetChipStatus(iop_base, CIW_INT_ACK); + if (loop++ > 3) { + break; + } + } + AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); +} + +static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time) +{ + const uchar *period_table; + int max_index; + int min_index; + int i; + + period_table = asc_dvc->sdtr_period_tbl; + max_index = (int)asc_dvc->max_sdtr_index; + min_index = (int)asc_dvc->min_sdtr_index; + if ((syn_time <= period_table[max_index])) { + for (i = min_index; i < (max_index - 1); i++) { + if (syn_time <= period_table[i]) { + return (uchar)i; + } + } + return (uchar)max_index; + } else { + return (uchar)(max_index + 1); + } +} + +static uchar +AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset) +{ + PortAddr iop_base = asc_dvc->iop_base; + uchar sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period); + EXT_MSG sdtr_buf = { + .msg_type = EXTENDED_MESSAGE, + .msg_len = MS_SDTR_LEN, + .msg_req = EXTENDED_SDTR, + .xfer_period = sdtr_period, + .req_ack_offset = sdtr_offset, + }; + sdtr_offset &= ASC_SYN_MAX_OFFSET; + + if (sdtr_period_index <= asc_dvc->max_sdtr_index) { + AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, + (uchar *)&sdtr_buf, + sizeof(EXT_MSG) >> 1); + return ((sdtr_period_index << 4) | sdtr_offset); + } else { + sdtr_buf.req_ack_offset = 0; + AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, + (uchar *)&sdtr_buf, + sizeof(EXT_MSG) >> 1); + return 0; + } +} + +static uchar +AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset) +{ + uchar byte; + uchar sdtr_period_ix; + + sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period); + if (sdtr_period_ix > asc_dvc->max_sdtr_index) + return 0xFF; + byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET); + return byte; +} + +static bool AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) +{ + ASC_SCSI_BIT_ID_TYPE org_id; + int i; + bool sta = true; + + AscSetBank(iop_base, 1); + org_id = AscReadChipDvcID(iop_base); + for (i = 0; i <= ASC_MAX_TID; i++) { + if (org_id == (0x01 << i)) + break; + } + org_id = (ASC_SCSI_BIT_ID_TYPE) i; + AscWriteChipDvcID(iop_base, id); + if (AscReadChipDvcID(iop_base) == (0x01 << id)) { + AscSetBank(iop_base, 0); + AscSetChipSyn(iop_base, sdtr_data); + if (AscGetChipSyn(iop_base) != sdtr_data) { + sta = false; + } + } else { + sta = false; + } + AscSetBank(iop_base, 1); + AscWriteChipDvcID(iop_base, org_id); + AscSetBank(iop_base, 0); + return (sta); +} + +static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no) +{ + AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); + AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data); +} + +static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) +{ + EXT_MSG ext_msg; + EXT_MSG out_msg; + ushort halt_q_addr; + bool sdtr_accept; + ushort int_halt_code; + ASC_SCSI_BIT_ID_TYPE scsi_busy; + ASC_SCSI_BIT_ID_TYPE target_id; + PortAddr iop_base; + uchar tag_code; + uchar q_status; + uchar halt_qp; + uchar sdtr_data; + uchar target_ix; + uchar q_cntl, tid_no; + uchar cur_dvc_qng; + uchar asyn_sdtr; + uchar scsi_status; + struct asc_board *boardp; + + BUG_ON(!asc_dvc->drv_ptr); + boardp = asc_dvc->drv_ptr; + + iop_base = asc_dvc->iop_base; + int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W); + + halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B); + halt_q_addr = ASC_QNO_TO_QADDR(halt_qp); + target_ix = AscReadLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_TARGET_IX)); + q_cntl = AscReadLramByte(iop_base, + (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL)); + tid_no = ASC_TIX_TO_TID(target_ix); + target_id = (uchar)ASC_TID_TO_TARGET_ID(tid_no); + if (asc_dvc->pci_fix_asyn_xfer & target_id) { + asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB; + } else { + asyn_sdtr = 0; + } + if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) { + if (asc_dvc->pci_fix_asyn_xfer & target_id) { + AscSetChipSDTR(iop_base, 0, tid_no); + boardp->sdtr_data[tid_no] = 0; + } + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) { + if (asc_dvc->pci_fix_asyn_xfer & target_id) { + AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); + boardp->sdtr_data[tid_no] = asyn_sdtr; + } + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } else if (int_halt_code == ASC_HALT_EXTMSG_IN) { + AscMemWordCopyPtrFromLram(iop_base, + ASCV_MSGIN_BEG, + (uchar *)&ext_msg, + sizeof(EXT_MSG) >> 1); + + if (ext_msg.msg_type == EXTENDED_MESSAGE && + ext_msg.msg_req == EXTENDED_SDTR && + ext_msg.msg_len == MS_SDTR_LEN) { + sdtr_accept = true; + if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { + + sdtr_accept = false; + ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET; + } + if ((ext_msg.xfer_period < + asc_dvc->sdtr_period_tbl[asc_dvc->min_sdtr_index]) + || (ext_msg.xfer_period > + asc_dvc->sdtr_period_tbl[asc_dvc-> + max_sdtr_index])) { + sdtr_accept = false; + ext_msg.xfer_period = + asc_dvc->sdtr_period_tbl[asc_dvc-> + min_sdtr_index]; + } + if (sdtr_accept) { + sdtr_data = + AscCalSDTRData(asc_dvc, ext_msg.xfer_period, + ext_msg.req_ack_offset); + if (sdtr_data == 0xFF) { + + q_cntl |= QC_MSG_OUT; + asc_dvc->init_sdtr &= ~target_id; + asc_dvc->sdtr_done &= ~target_id; + AscSetChipSDTR(iop_base, asyn_sdtr, + tid_no); + boardp->sdtr_data[tid_no] = asyn_sdtr; + } + } + if (ext_msg.req_ack_offset == 0) { + + q_cntl &= ~QC_MSG_OUT; + asc_dvc->init_sdtr &= ~target_id; + asc_dvc->sdtr_done &= ~target_id; + AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); + } else { + if (sdtr_accept && (q_cntl & QC_MSG_OUT)) { + q_cntl &= ~QC_MSG_OUT; + asc_dvc->sdtr_done |= target_id; + asc_dvc->init_sdtr |= target_id; + asc_dvc->pci_fix_asyn_xfer &= + ~target_id; + sdtr_data = + AscCalSDTRData(asc_dvc, + ext_msg.xfer_period, + ext_msg. + req_ack_offset); + AscSetChipSDTR(iop_base, sdtr_data, + tid_no); + boardp->sdtr_data[tid_no] = sdtr_data; + } else { + q_cntl |= QC_MSG_OUT; + AscMsgOutSDTR(asc_dvc, + ext_msg.xfer_period, + ext_msg.req_ack_offset); + asc_dvc->pci_fix_asyn_xfer &= + ~target_id; + sdtr_data = + AscCalSDTRData(asc_dvc, + ext_msg.xfer_period, + ext_msg. + req_ack_offset); + AscSetChipSDTR(iop_base, sdtr_data, + tid_no); + boardp->sdtr_data[tid_no] = sdtr_data; + asc_dvc->sdtr_done |= target_id; + asc_dvc->init_sdtr |= target_id; + } + } + + AscWriteLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_CNTL), + q_cntl); + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } else if (ext_msg.msg_type == EXTENDED_MESSAGE && + ext_msg.msg_req == EXTENDED_WDTR && + ext_msg.msg_len == MS_WDTR_LEN) { + + ext_msg.wdtr_width = 0; + AscMemWordCopyPtrToLram(iop_base, + ASCV_MSGOUT_BEG, + (uchar *)&ext_msg, + sizeof(EXT_MSG) >> 1); + q_cntl |= QC_MSG_OUT; + AscWriteLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_CNTL), + q_cntl); + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } else { + + ext_msg.msg_type = MESSAGE_REJECT; + AscMemWordCopyPtrToLram(iop_base, + ASCV_MSGOUT_BEG, + (uchar *)&ext_msg, + sizeof(EXT_MSG) >> 1); + q_cntl |= QC_MSG_OUT; + AscWriteLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_CNTL), + q_cntl); + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } + } else if (int_halt_code == ASC_HALT_CHK_CONDITION) { + + q_cntl |= QC_REQ_SENSE; + + if ((asc_dvc->init_sdtr & target_id) != 0) { + + asc_dvc->sdtr_done &= ~target_id; + + sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); + q_cntl |= QC_MSG_OUT; + AscMsgOutSDTR(asc_dvc, + asc_dvc-> + sdtr_period_tbl[(sdtr_data >> 4) & + (uchar)(asc_dvc-> + max_sdtr_index - + 1)], + (uchar)(sdtr_data & (uchar) + ASC_SYN_MAX_OFFSET)); + } + + AscWriteLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); + + tag_code = AscReadLramByte(iop_base, + (ushort)(halt_q_addr + (ushort) + ASC_SCSIQ_B_TAG_CODE)); + tag_code &= 0xDC; + if ((asc_dvc->pci_fix_asyn_xfer & target_id) + && !(asc_dvc->pci_fix_asyn_xfer_always & target_id) + ) { + + tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT + | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX); + + } + AscWriteLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_TAG_CODE), + tag_code); + + q_status = AscReadLramByte(iop_base, + (ushort)(halt_q_addr + (ushort) + ASC_SCSIQ_B_STATUS)); + q_status |= (QS_READY | QS_BUSY); + AscWriteLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_STATUS), + q_status); + + scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); + scsi_busy &= ~target_id; + AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); + + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) { + + AscMemWordCopyPtrFromLram(iop_base, + ASCV_MSGOUT_BEG, + (uchar *)&out_msg, + sizeof(EXT_MSG) >> 1); + + if ((out_msg.msg_type == EXTENDED_MESSAGE) && + (out_msg.msg_len == MS_SDTR_LEN) && + (out_msg.msg_req == EXTENDED_SDTR)) { + + asc_dvc->init_sdtr &= ~target_id; + asc_dvc->sdtr_done &= ~target_id; + AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); + boardp->sdtr_data[tid_no] = asyn_sdtr; + } + q_cntl &= ~QC_MSG_OUT; + AscWriteLramByte(iop_base, + (ushort)(halt_q_addr + + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) { + + scsi_status = AscReadLramByte(iop_base, + (ushort)((ushort)halt_q_addr + + (ushort) + ASC_SCSIQ_SCSI_STATUS)); + cur_dvc_qng = + AscReadLramByte(iop_base, + (ushort)((ushort)ASC_QADR_BEG + + (ushort)target_ix)); + if ((cur_dvc_qng > 0) && (asc_dvc->cur_dvc_qng[tid_no] > 0)) { + + scsi_busy = AscReadLramByte(iop_base, + (ushort)ASCV_SCSIBUSY_B); + scsi_busy |= target_id; + AscWriteLramByte(iop_base, + (ushort)ASCV_SCSIBUSY_B, scsi_busy); + asc_dvc->queue_full_or_busy |= target_id; + + if (scsi_status == SAM_STAT_TASK_SET_FULL) { + if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) { + cur_dvc_qng -= 1; + asc_dvc->max_dvc_qng[tid_no] = + cur_dvc_qng; + + AscWriteLramByte(iop_base, + (ushort)((ushort) + ASCV_MAX_DVC_QNG_BEG + + (ushort) + tid_no), + cur_dvc_qng); + + /* + * Set the device queue depth to the + * number of active requests when the + * QUEUE FULL condition was encountered. + */ + boardp->queue_full |= target_id; + boardp->queue_full_cnt[tid_no] = + cur_dvc_qng; + } + } + } + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); + return; + } + return; +} + +/* + * void + * DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) + * + * Calling/Exit State: + * none + * + * Description: + * Input an ASC_QDONE_INFO structure from the chip + */ +static void +DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) +{ + int i; + ushort word; + + AscSetChipLramAddr(iop_base, s_addr); + for (i = 0; i < 2 * words; i += 2) { + if (i == 10) { + continue; + } + word = inpw(iop_base + IOP_RAM_DATA); + inbuf[i] = word & 0xff; + inbuf[i + 1] = (word >> 8) & 0xff; + } + ASC_DBG_PRT_HEX(2, "DvcGetQinfo", inbuf, 2 * words); +} + +static uchar +_AscCopyLramScsiDoneQ(PortAddr iop_base, + ushort q_addr, + ASC_QDONE_INFO *scsiq, unsigned int max_dma_count) +{ + ushort _val; + uchar sg_queue_cnt; + + DvcGetQinfo(iop_base, + q_addr + ASC_SCSIQ_DONE_INFO_BEG, + (uchar *)scsiq, + (sizeof(ASC_SCSIQ_2) + sizeof(ASC_SCSIQ_3)) / 2); + + _val = AscReadLramWord(iop_base, + (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS)); + scsiq->q_status = (uchar)_val; + scsiq->q_no = (uchar)(_val >> 8); + _val = AscReadLramWord(iop_base, + (ushort)(q_addr + (ushort)ASC_SCSIQ_B_CNTL)); + scsiq->cntl = (uchar)_val; + sg_queue_cnt = (uchar)(_val >> 8); + _val = AscReadLramWord(iop_base, + (ushort)(q_addr + + (ushort)ASC_SCSIQ_B_SENSE_LEN)); + scsiq->sense_len = (uchar)_val; + scsiq->extra_bytes = (uchar)(_val >> 8); + + /* + * Read high word of remain bytes from alternate location. + */ + scsiq->remain_bytes = (((u32)AscReadLramWord(iop_base, + (ushort)(q_addr + + (ushort) + ASC_SCSIQ_W_ALT_DC1))) + << 16); + /* + * Read low word of remain bytes from original location. + */ + scsiq->remain_bytes += AscReadLramWord(iop_base, + (ushort)(q_addr + (ushort) + ASC_SCSIQ_DW_REMAIN_XFER_CNT)); + + scsiq->remain_bytes &= max_dma_count; + return sg_queue_cnt; +} + +/* + * asc_isr_callback() - Second Level Interrupt Handler called by AscISR(). + * + * Interrupt callback function for the Narrow SCSI Asc Library. + */ +static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep) +{ + struct asc_board *boardp = asc_dvc_varp->drv_ptr; + u32 srb_tag; + struct scsi_cmnd *scp; + + ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep); + ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep); + + /* + * Decrease the srb_tag by 1 to find the SCSI command + */ + srb_tag = qdonep->d2.srb_tag - 1; + scp = scsi_host_find_tag(boardp->shost, srb_tag); + if (!scp) + return; + + ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); + + ASC_STATS(boardp->shost, callback); + + dma_unmap_single(boardp->dev, advansys_cmd(scp)->dma_handle, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + /* + * 'qdonep' contains the command's ending status. + */ + scp->result = 0; + switch (qdonep->d3.done_stat) { + case QD_NO_ERROR: + ASC_DBG(2, "QD_NO_ERROR\n"); + + /* + * Check for an underrun condition. + * + * If there was no error and an underrun condition, then + * return the number of underrun bytes. + */ + if (scsi_bufflen(scp) != 0 && qdonep->remain_bytes != 0 && + qdonep->remain_bytes <= scsi_bufflen(scp)) { + ASC_DBG(1, "underrun condition %u bytes\n", + (unsigned)qdonep->remain_bytes); + scsi_set_resid(scp, qdonep->remain_bytes); + } + break; + + case QD_WITH_ERROR: + ASC_DBG(2, "QD_WITH_ERROR\n"); + switch (qdonep->d3.host_stat) { + case QHSTA_NO_ERROR: + set_status_byte(scp, qdonep->d3.scsi_stat); + if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) { + ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); + ASC_DBG_PRT_SENSE(2, scp->sense_buffer, + SCSI_SENSE_BUFFERSIZE); + } + break; + + default: + /* QHSTA error occurred */ + ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat); + set_host_byte(scp, DID_BAD_TARGET); + break; + } + break; + + case QD_ABORTED_BY_HOST: + ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); + set_status_byte(scp, qdonep->d3.scsi_stat); + set_host_byte(scp, DID_ABORT); + break; + + default: + ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat); + set_status_byte(scp, qdonep->d3.scsi_stat); + set_host_byte(scp, DID_ERROR); + break; + } + + /* + * If the 'init_tidmask' bit isn't already set for the target and the + * current request finished normally, then set the bit for the target + * to indicate that a device is present. + */ + if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && + qdonep->d3.done_stat == QD_NO_ERROR && + qdonep->d3.host_stat == QHSTA_NO_ERROR) { + boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); + } + + asc_scsi_done(scp); +} + +static int AscIsrQDone(ASC_DVC_VAR *asc_dvc) +{ + uchar next_qp; + uchar n_q_used; + uchar sg_list_qp; + uchar sg_queue_cnt; + uchar q_cnt; + uchar done_q_tail; + uchar tid_no; + ASC_SCSI_BIT_ID_TYPE scsi_busy; + ASC_SCSI_BIT_ID_TYPE target_id; + PortAddr iop_base; + ushort q_addr; + ushort sg_q_addr; + uchar cur_target_qng; + ASC_QDONE_INFO scsiq_buf; + ASC_QDONE_INFO *scsiq; + bool false_overrun; + + iop_base = asc_dvc->iop_base; + n_q_used = 1; + scsiq = (ASC_QDONE_INFO *)&scsiq_buf; + done_q_tail = (uchar)AscGetVarDoneQTail(iop_base); + q_addr = ASC_QNO_TO_QADDR(done_q_tail); + next_qp = AscReadLramByte(iop_base, + (ushort)(q_addr + (ushort)ASC_SCSIQ_B_FWD)); + if (next_qp != ASC_QLINK_END) { + AscPutVarDoneQTail(iop_base, next_qp); + q_addr = ASC_QNO_TO_QADDR(next_qp); + sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, + asc_dvc->max_dma_count); + AscWriteLramByte(iop_base, + (ushort)(q_addr + + (ushort)ASC_SCSIQ_B_STATUS), + (uchar)(scsiq-> + q_status & (uchar)~(QS_READY | + QS_ABORTED))); + tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix); + target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix); + if ((scsiq->cntl & QC_SG_HEAD) != 0) { + sg_q_addr = q_addr; + sg_list_qp = next_qp; + for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) { + sg_list_qp = AscReadLramByte(iop_base, + (ushort)(sg_q_addr + + (ushort) + ASC_SCSIQ_B_FWD)); + sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp); + if (sg_list_qp == ASC_QLINK_END) { + AscSetLibErrorCode(asc_dvc, + ASCQ_ERR_SG_Q_LINKS); + scsiq->d3.done_stat = QD_WITH_ERROR; + scsiq->d3.host_stat = + QHSTA_D_QDONE_SG_LIST_CORRUPTED; + goto FATAL_ERR_QDONE; + } + AscWriteLramByte(iop_base, + (ushort)(sg_q_addr + (ushort) + ASC_SCSIQ_B_STATUS), + QS_FREE); + } + n_q_used = sg_queue_cnt + 1; + AscPutVarDoneQTail(iop_base, sg_list_qp); + } + if (asc_dvc->queue_full_or_busy & target_id) { + cur_target_qng = AscReadLramByte(iop_base, + (ushort)((ushort) + ASC_QADR_BEG + + (ushort) + scsiq->d2. + target_ix)); + if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) { + scsi_busy = AscReadLramByte(iop_base, (ushort) + ASCV_SCSIBUSY_B); + scsi_busy &= ~target_id; + AscWriteLramByte(iop_base, + (ushort)ASCV_SCSIBUSY_B, + scsi_busy); + asc_dvc->queue_full_or_busy &= ~target_id; + } + } + if (asc_dvc->cur_total_qng >= n_q_used) { + asc_dvc->cur_total_qng -= n_q_used; + if (asc_dvc->cur_dvc_qng[tid_no] != 0) { + asc_dvc->cur_dvc_qng[tid_no]--; + } + } else { + AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG); + scsiq->d3.done_stat = QD_WITH_ERROR; + goto FATAL_ERR_QDONE; + } + if ((scsiq->d2.srb_tag == 0UL) || + ((scsiq->q_status & QS_ABORTED) != 0)) { + return (0x11); + } else if (scsiq->q_status == QS_DONE) { + /* + * This is also curious. + * false_overrun will _always_ be set to 'false' + */ + false_overrun = false; + if (scsiq->extra_bytes != 0) { + scsiq->remain_bytes += scsiq->extra_bytes; + } + if (scsiq->d3.done_stat == QD_WITH_ERROR) { + if (scsiq->d3.host_stat == + QHSTA_M_DATA_OVER_RUN) { + if ((scsiq-> + cntl & (QC_DATA_IN | QC_DATA_OUT)) + == 0) { + scsiq->d3.done_stat = + QD_NO_ERROR; + scsiq->d3.host_stat = + QHSTA_NO_ERROR; + } else if (false_overrun) { + scsiq->d3.done_stat = + QD_NO_ERROR; + scsiq->d3.host_stat = + QHSTA_NO_ERROR; + } + } else if (scsiq->d3.host_stat == + QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) { + AscStopChip(iop_base); + AscSetChipControl(iop_base, + (uchar)(CC_SCSI_RESET + | CC_HALT)); + udelay(60); + AscSetChipControl(iop_base, CC_HALT); + AscSetChipStatus(iop_base, + CIW_CLR_SCSI_RESET_INT); + AscSetChipStatus(iop_base, 0); + AscSetChipControl(iop_base, 0); + } + } + if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { + asc_isr_callback(asc_dvc, scsiq); + } else { + if ((AscReadLramByte(iop_base, + (ushort)(q_addr + (ushort) + ASC_SCSIQ_CDB_BEG)) + == START_STOP)) { + asc_dvc->unit_not_ready &= ~target_id; + if (scsiq->d3.done_stat != QD_NO_ERROR) { + asc_dvc->start_motor &= + ~target_id; + } + } + } + return (1); + } else { + AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS); + FATAL_ERR_QDONE: + if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { + asc_isr_callback(asc_dvc, scsiq); + } + return (0x80); + } + } + return (0); +} + +static int AscISR(ASC_DVC_VAR *asc_dvc) +{ + ASC_CS_TYPE chipstat; + PortAddr iop_base; + ushort saved_ram_addr; + uchar ctrl_reg; + uchar saved_ctrl_reg; + int int_pending; + int status; + uchar host_flag; + + iop_base = asc_dvc->iop_base; + int_pending = ASC_FALSE; + + if (AscIsIntPending(iop_base) == 0) + return int_pending; + + if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) { + return ASC_ERROR; + } + if (asc_dvc->in_critical_cnt != 0) { + AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL); + return ASC_ERROR; + } + if (asc_dvc->is_in_int) { + AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY); + return ASC_ERROR; + } + asc_dvc->is_in_int = true; + ctrl_reg = AscGetChipControl(iop_base); + saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET | + CC_SINGLE_STEP | CC_DIAG | CC_TEST)); + chipstat = AscGetChipStatus(iop_base); + if (chipstat & CSW_SCSI_RESET_LATCH) { + if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) { + int i = 10; + int_pending = ASC_TRUE; + asc_dvc->sdtr_done = 0; + saved_ctrl_reg &= (uchar)(~CC_HALT); + while ((AscGetChipStatus(iop_base) & + CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { + mdelay(100); + } + AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT)); + AscSetChipControl(iop_base, CC_HALT); + AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); + AscSetChipStatus(iop_base, 0); + chipstat = AscGetChipStatus(iop_base); + } + } + saved_ram_addr = AscGetChipLramAddr(iop_base); + host_flag = AscReadLramByte(iop_base, + ASCV_HOST_FLAG_B) & + (uchar)(~ASC_HOST_FLAG_IN_ISR); + AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, + (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR)); + if ((chipstat & CSW_INT_PENDING) || (int_pending)) { + AscAckInterrupt(iop_base); + int_pending = ASC_TRUE; + if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) { + AscIsrChipHalted(asc_dvc); + saved_ctrl_reg &= (uchar)(~CC_HALT); + } else { + if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) { + while (((status = + AscIsrQDone(asc_dvc)) & 0x01) != 0) { + } + } else { + do { + if ((status = + AscIsrQDone(asc_dvc)) == 1) { + break; + } + } while (status == 0x11); + } + if ((status & 0x80) != 0) + int_pending = ASC_ERROR; + } + } + AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); + AscSetChipLramAddr(iop_base, saved_ram_addr); + AscSetChipControl(iop_base, saved_ctrl_reg); + asc_dvc->is_in_int = false; + return int_pending; +} + +/* + * advansys_reset() + * + * Reset the host associated with the command 'scp'. + * + * This function runs its own thread. Interrupts must be blocked but + * sleeping is allowed and no locking other than for host structures is + * required. Returns SUCCESS or FAILED. + */ +static int advansys_reset(struct scsi_cmnd *scp) +{ + struct Scsi_Host *shost = scp->device->host; + struct asc_board *boardp = shost_priv(shost); + unsigned long flags; + int status; + int ret = SUCCESS; + + ASC_DBG(1, "0x%p\n", scp); + + ASC_STATS(shost, reset); + + scmd_printk(KERN_INFO, scp, "SCSI host reset started...\n"); + + if (ASC_NARROW_BOARD(boardp)) { + ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; + + /* Reset the chip and SCSI bus. */ + ASC_DBG(1, "before AscInitAsc1000Driver()\n"); + status = AscInitAsc1000Driver(asc_dvc); + + /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ + if (asc_dvc->err_code || !asc_dvc->overrun_dma) { + scmd_printk(KERN_INFO, scp, "SCSI host reset error: " + "0x%x, status: 0x%x\n", asc_dvc->err_code, + status); + ret = FAILED; + } else if (status) { + scmd_printk(KERN_INFO, scp, "SCSI host reset warning: " + "0x%x\n", status); + } else { + scmd_printk(KERN_INFO, scp, "SCSI host reset " + "successful\n"); + } + + ASC_DBG(1, "after AscInitAsc1000Driver()\n"); + } else { + /* + * If the suggest reset bus flags are set, then reset the bus. + * Otherwise only reset the device. + */ + ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; + + /* + * Reset the chip and SCSI bus. + */ + ASC_DBG(1, "before AdvResetChipAndSB()\n"); + switch (AdvResetChipAndSB(adv_dvc)) { + case ASC_TRUE: + scmd_printk(KERN_INFO, scp, "SCSI host reset " + "successful\n"); + break; + case ASC_FALSE: + default: + scmd_printk(KERN_INFO, scp, "SCSI host reset error\n"); + ret = FAILED; + break; + } + spin_lock_irqsave(shost->host_lock, flags); + AdvISR(adv_dvc); + spin_unlock_irqrestore(shost->host_lock, flags); + } + + ASC_DBG(1, "ret %d\n", ret); + + return ret; +} + +/* + * advansys_biosparam() + * + * Translate disk drive geometry if the "BIOS greater than 1 GB" + * support is enabled for a drive. + * + * ip (information pointer) is an int array with the following definition: + * ip[0]: heads + * ip[1]: sectors + * ip[2]: cylinders + */ +static int +advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int ip[]) +{ + struct asc_board *boardp = shost_priv(sdev->host); + + ASC_DBG(1, "begin\n"); + ASC_STATS(sdev->host, biosparam); + if (ASC_NARROW_BOARD(boardp)) { + if ((boardp->dvc_var.asc_dvc_var.dvc_cntl & + ASC_CNTL_BIOS_GT_1GB) && capacity > 0x200000) { + ip[0] = 255; + ip[1] = 63; + } else { + ip[0] = 64; + ip[1] = 32; + } + } else { + if ((boardp->dvc_var.adv_dvc_var.bios_ctrl & + BIOS_CTRL_EXTENDED_XLAT) && capacity > 0x200000) { + ip[0] = 255; + ip[1] = 63; + } else { + ip[0] = 64; + ip[1] = 32; + } + } + ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); + ASC_DBG(1, "end\n"); + return 0; +} + +/* + * First-level interrupt handler. + * + * 'dev_id' is a pointer to the interrupting adapter's Scsi_Host. + */ +static irqreturn_t advansys_interrupt(int irq, void *dev_id) +{ + struct Scsi_Host *shost = dev_id; + struct asc_board *boardp = shost_priv(shost); + irqreturn_t result = IRQ_NONE; + unsigned long flags; + + ASC_DBG(2, "boardp 0x%p\n", boardp); + spin_lock_irqsave(shost->host_lock, flags); + if (ASC_NARROW_BOARD(boardp)) { + if (AscIsIntPending(shost->io_port)) { + result = IRQ_HANDLED; + ASC_STATS(shost, interrupt); + ASC_DBG(1, "before AscISR()\n"); + AscISR(&boardp->dvc_var.asc_dvc_var); + } + } else { + ASC_DBG(1, "before AdvISR()\n"); + if (AdvISR(&boardp->dvc_var.adv_dvc_var)) { + result = IRQ_HANDLED; + ASC_STATS(shost, interrupt); + } + } + spin_unlock_irqrestore(shost->host_lock, flags); + + ASC_DBG(1, "end\n"); + return result; +} + +static bool AscHostReqRiscHalt(PortAddr iop_base) +{ + int count = 0; + bool sta = false; + uchar saved_stop_code; + + if (AscIsChipHalted(iop_base)) + return true; + saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B); + AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, + ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP); + do { + if (AscIsChipHalted(iop_base)) { + sta = true; + break; + } + mdelay(100); + } while (count++ < 20); + AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code); + return sta; +} + +static bool +AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data) +{ + bool sta = false; + + if (AscHostReqRiscHalt(iop_base)) { + sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); + AscStartChip(iop_base); + } + return sta; +} + +static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev) +{ + char type = sdev->type; + ASC_SCSI_BIT_ID_TYPE tid_bits = 1 << sdev->id; + + if (!(asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN)) + return; + if (asc_dvc->init_sdtr & tid_bits) + return; + + if ((type == TYPE_ROM) && (strncmp(sdev->vendor, "HP ", 3) == 0)) + asc_dvc->pci_fix_asyn_xfer_always |= tid_bits; + + asc_dvc->pci_fix_asyn_xfer |= tid_bits; + if ((type == TYPE_PROCESSOR) || (type == TYPE_SCANNER) || + (type == TYPE_ROM) || (type == TYPE_TAPE)) + asc_dvc->pci_fix_asyn_xfer &= ~tid_bits; + + if (asc_dvc->pci_fix_asyn_xfer & tid_bits) + AscSetRunChipSynRegAtID(asc_dvc->iop_base, sdev->id, + ASYN_SDTR_DATA_FIX_PCI_REV_AB); +} + +static void +advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) +{ + ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id; + ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng; + + if (sdev->lun == 0) { + ASC_SCSI_BIT_ID_TYPE orig_init_sdtr = asc_dvc->init_sdtr; + if ((asc_dvc->cfg->sdtr_enable & tid_bit) && sdev->sdtr) { + asc_dvc->init_sdtr |= tid_bit; + } else { + asc_dvc->init_sdtr &= ~tid_bit; + } + + if (orig_init_sdtr != asc_dvc->init_sdtr) + AscAsyncFix(asc_dvc, sdev); + } + + if (sdev->tagged_supported) { + if (asc_dvc->cfg->cmd_qng_enabled & tid_bit) { + if (sdev->lun == 0) { + asc_dvc->cfg->can_tagged_qng |= tid_bit; + asc_dvc->use_tagged_qng |= tid_bit; + } + scsi_change_queue_depth(sdev, + asc_dvc->max_dvc_qng[sdev->id]); + } + } else { + if (sdev->lun == 0) { + asc_dvc->cfg->can_tagged_qng &= ~tid_bit; + asc_dvc->use_tagged_qng &= ~tid_bit; + } + } + + if ((sdev->lun == 0) && + (orig_use_tagged_qng != asc_dvc->use_tagged_qng)) { + AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B, + asc_dvc->cfg->disc_enable); + AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B, + asc_dvc->use_tagged_qng); + AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B, + asc_dvc->cfg->can_tagged_qng); + + asc_dvc->max_dvc_qng[sdev->id] = + asc_dvc->cfg->max_tag_qng[sdev->id]; + AscWriteLramByte(asc_dvc->iop_base, + (ushort)(ASCV_MAX_DVC_QNG_BEG + sdev->id), + asc_dvc->max_dvc_qng[sdev->id]); + } +} + +/* + * Wide Transfers + * + * If the EEPROM enabled WDTR for the device and the device supports wide + * bus (16 bit) transfers, then turn on the device's 'wdtr_able' bit and + * write the new value to the microcode. + */ +static void +advansys_wide_enable_wdtr(AdvPortAddr iop_base, unsigned short tidmask) +{ + unsigned short cfg_word; + AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); + if ((cfg_word & tidmask) != 0) + return; + + cfg_word |= tidmask; + AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); + + /* + * Clear the microcode SDTR and WDTR negotiation done indicators for + * the target to cause it to negotiate with the new setting set above. + * WDTR when accepted causes the target to enter asynchronous mode, so + * SDTR must be negotiated. + */ + AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); + cfg_word &= ~tidmask; + AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); + AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); + cfg_word &= ~tidmask; + AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); +} + +/* + * Synchronous Transfers + * + * If the EEPROM enabled SDTR for the device and the device + * supports synchronous transfers, then turn on the device's + * 'sdtr_able' bit. Write the new value to the microcode. + */ +static void +advansys_wide_enable_sdtr(AdvPortAddr iop_base, unsigned short tidmask) +{ + unsigned short cfg_word; + AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); + if ((cfg_word & tidmask) != 0) + return; + + cfg_word |= tidmask; + AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); + + /* + * Clear the microcode "SDTR negotiation" done indicator for the + * target to cause it to negotiate with the new setting set above. + */ + AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); + cfg_word &= ~tidmask; + AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); +} + +/* + * PPR (Parallel Protocol Request) Capable + * + * If the device supports DT mode, then it must be PPR capable. + * The PPR message will be used in place of the SDTR and WDTR + * messages to negotiate synchronous speed and offset, transfer + * width, and protocol options. + */ +static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc, + AdvPortAddr iop_base, unsigned short tidmask) +{ + AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); + adv_dvc->ppr_able |= tidmask; + AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); +} + +static void +advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) +{ + AdvPortAddr iop_base = adv_dvc->iop_base; + unsigned short tidmask = 1 << sdev->id; + + if (sdev->lun == 0) { + /* + * Handle WDTR, SDTR, and Tag Queuing. If the feature + * is enabled in the EEPROM and the device supports the + * feature, then enable it in the microcode. + */ + + if ((adv_dvc->wdtr_able & tidmask) && sdev->wdtr) + advansys_wide_enable_wdtr(iop_base, tidmask); + if ((adv_dvc->sdtr_able & tidmask) && sdev->sdtr) + advansys_wide_enable_sdtr(iop_base, tidmask); + if (adv_dvc->chip_type == ADV_CHIP_ASC38C1600 && sdev->ppr) + advansys_wide_enable_ppr(adv_dvc, iop_base, tidmask); + + /* + * Tag Queuing is disabled for the BIOS which runs in polled + * mode and would see no benefit from Tag Queuing. Also by + * disabling Tag Queuing in the BIOS devices with Tag Queuing + * bugs will at least work with the BIOS. + */ + if ((adv_dvc->tagqng_able & tidmask) && + sdev->tagged_supported) { + unsigned short cfg_word; + AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); + cfg_word |= tidmask; + AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, + cfg_word); + AdvWriteByteLram(iop_base, + ASC_MC_NUMBER_OF_MAX_CMD + sdev->id, + adv_dvc->max_dvc_qng); + } + } + + if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) + scsi_change_queue_depth(sdev, adv_dvc->max_dvc_qng); +} + +/* + * Set the number of commands to queue per device for the + * specified host adapter. + */ +static int advansys_slave_configure(struct scsi_device *sdev) +{ + struct asc_board *boardp = shost_priv(sdev->host); + + if (ASC_NARROW_BOARD(boardp)) + advansys_narrow_slave_configure(sdev, + &boardp->dvc_var.asc_dvc_var); + else + advansys_wide_slave_configure(sdev, + &boardp->dvc_var.adv_dvc_var); + + return 0; +} + +static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp) +{ + struct asc_board *board = shost_priv(scp->device->host); + struct advansys_cmd *acmd = advansys_cmd(scp); + + acmd->dma_handle = dma_map_single(board->dev, scp->sense_buffer, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(board->dev, acmd->dma_handle)) { + ASC_DBG(1, "failed to map sense buffer\n"); + return 0; + } + return cpu_to_le32(acmd->dma_handle); +} + +static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, + struct asc_scsi_q *asc_scsi_q) +{ + struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var; + int use_sg; + u32 srb_tag; + + memset(asc_scsi_q, 0, sizeof(*asc_scsi_q)); + + /* + * Set the srb_tag to the command tag + 1, as + * srb_tag '0' is used internally by the chip. + */ + srb_tag = scsi_cmd_to_rq(scp)->tag + 1; + asc_scsi_q->q2.srb_tag = srb_tag; + + /* + * Build the ASC_SCSI_Q request. + */ + asc_scsi_q->cdbptr = &scp->cmnd[0]; + asc_scsi_q->q2.cdb_len = scp->cmd_len; + asc_scsi_q->q1.target_id = ASC_TID_TO_TARGET_ID(scp->device->id); + asc_scsi_q->q1.target_lun = scp->device->lun; + asc_scsi_q->q2.target_ix = + ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); + asc_scsi_q->q1.sense_addr = asc_get_sense_buffer_dma(scp); + asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE; + if (!asc_scsi_q->q1.sense_addr) + return ASC_BUSY; + + /* + * If there are any outstanding requests for the current target, + * then every 255th request send an ORDERED request. This heuristic + * tries to retain the benefit of request sorting while preventing + * request starvation. 255 is the max number of tags or pending commands + * a device may have outstanding. + * + * The request count is incremented below for every successfully + * started request. + * + */ + if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && + (boardp->reqcnt[scp->device->id] % 255) == 0) { + asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG; + } else { + asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG; + } + + /* Build ASC_SCSI_Q */ + use_sg = scsi_dma_map(scp); + if (use_sg < 0) { + ASC_DBG(1, "failed to map sglist\n"); + return ASC_BUSY; + } else if (use_sg > 0) { + int sgcnt; + struct scatterlist *slp; + struct asc_sg_head *asc_sg_head; + + if (use_sg > scp->device->host->sg_tablesize) { + scmd_printk(KERN_ERR, scp, "use_sg %d > " + "sg_tablesize %d\n", use_sg, + scp->device->host->sg_tablesize); + scsi_dma_unmap(scp); + set_host_byte(scp, DID_ERROR); + return ASC_ERROR; + } + + asc_sg_head = kzalloc(struct_size(asc_sg_head, sg_list, use_sg), + GFP_ATOMIC); + if (!asc_sg_head) { + scsi_dma_unmap(scp); + set_host_byte(scp, DID_SOFT_ERROR); + return ASC_ERROR; + } + + asc_scsi_q->q1.cntl |= QC_SG_HEAD; + asc_scsi_q->sg_head = asc_sg_head; + asc_scsi_q->q1.data_cnt = 0; + asc_scsi_q->q1.data_addr = 0; + /* This is a byte value, otherwise it would need to be swapped. */ + asc_sg_head->entry_cnt = asc_scsi_q->q1.sg_queue_cnt = use_sg; + ASC_STATS_ADD(scp->device->host, xfer_elem, + asc_sg_head->entry_cnt); + + /* + * Convert scatter-gather list into ASC_SG_HEAD list. + */ + scsi_for_each_sg(scp, slp, use_sg, sgcnt) { + asc_sg_head->sg_list[sgcnt].addr = + cpu_to_le32(sg_dma_address(slp)); + asc_sg_head->sg_list[sgcnt].bytes = + cpu_to_le32(sg_dma_len(slp)); + ASC_STATS_ADD(scp->device->host, xfer_sect, + DIV_ROUND_UP(sg_dma_len(slp), 512)); + } + } + + ASC_STATS(scp->device->host, xfer_cnt); + + ASC_DBG_PRT_ASC_SCSI_Q(2, asc_scsi_q); + ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); + + return ASC_NOERROR; +} + +/* + * Build scatter-gather list for Adv Library (Wide Board). + * + * Additional ADV_SG_BLOCK structures will need to be allocated + * if the total number of scatter-gather elements exceeds + * NO_OF_SG_PER_BLOCK (15). The ADV_SG_BLOCK structures are + * assumed to be physically contiguous. + * + * Return: + * ADV_SUCCESS(1) - SG List successfully created + * ADV_ERROR(-1) - SG List creation failed + */ +static int +adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, + ADV_SCSI_REQ_Q *scsiqp, struct scsi_cmnd *scp, int use_sg) +{ + adv_sgblk_t *sgblkp, *prev_sgblkp; + struct scatterlist *slp; + int sg_elem_cnt; + ADV_SG_BLOCK *sg_block, *prev_sg_block; + dma_addr_t sgblk_paddr; + int i; + + slp = scsi_sglist(scp); + sg_elem_cnt = use_sg; + prev_sgblkp = NULL; + prev_sg_block = NULL; + reqp->sgblkp = NULL; + + for (;;) { + /* + * Allocate a 'adv_sgblk_t' structure from the board free + * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK + * (15) scatter-gather elements. + */ + sgblkp = dma_pool_alloc(boardp->adv_sgblk_pool, GFP_ATOMIC, + &sgblk_paddr); + if (!sgblkp) { + ASC_DBG(1, "no free adv_sgblk_t\n"); + ASC_STATS(scp->device->host, adv_build_nosg); + + /* + * Allocation failed. Free 'adv_sgblk_t' structures + * already allocated for the request. + */ + while ((sgblkp = reqp->sgblkp) != NULL) { + /* Remove 'sgblkp' from the request list. */ + reqp->sgblkp = sgblkp->next_sgblkp; + sgblkp->next_sgblkp = NULL; + dma_pool_free(boardp->adv_sgblk_pool, sgblkp, + sgblkp->sg_addr); + } + return ASC_BUSY; + } + /* Complete 'adv_sgblk_t' board allocation. */ + sgblkp->sg_addr = sgblk_paddr; + sgblkp->next_sgblkp = NULL; + sg_block = &sgblkp->sg_block; + + /* + * Check if this is the first 'adv_sgblk_t' for the + * request. + */ + if (reqp->sgblkp == NULL) { + /* Request's first scatter-gather block. */ + reqp->sgblkp = sgblkp; + + /* + * Set ADV_SCSI_REQ_T ADV_SG_BLOCK virtual and physical + * address pointers. + */ + scsiqp->sg_list_ptr = sg_block; + scsiqp->sg_real_addr = cpu_to_le32(sgblk_paddr); + } else { + /* Request's second or later scatter-gather block. */ + prev_sgblkp->next_sgblkp = sgblkp; + + /* + * Point the previous ADV_SG_BLOCK structure to + * the newly allocated ADV_SG_BLOCK structure. + */ + prev_sg_block->sg_ptr = cpu_to_le32(sgblk_paddr); + } + + for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { + sg_block->sg_list[i].sg_addr = + cpu_to_le32(sg_dma_address(slp)); + sg_block->sg_list[i].sg_count = + cpu_to_le32(sg_dma_len(slp)); + ASC_STATS_ADD(scp->device->host, xfer_sect, + DIV_ROUND_UP(sg_dma_len(slp), 512)); + + if (--sg_elem_cnt == 0) { + /* + * Last ADV_SG_BLOCK and scatter-gather entry. + */ + sg_block->sg_cnt = i + 1; + sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ + return ADV_SUCCESS; + } + slp = sg_next(slp); + } + sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; + prev_sg_block = sg_block; + prev_sgblkp = sgblkp; + } +} + +/* + * Build a request structure for the Adv Library (Wide Board). + * + * If an adv_req_t can not be allocated to issue the request, + * then return ASC_BUSY. If an error occurs, then return ASC_ERROR. + * + * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the + * microcode for DMA addresses or math operations are byte swapped + * to little-endian order. + */ +static int +adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, + adv_req_t **adv_reqpp) +{ + u32 srb_tag = scsi_cmd_to_rq(scp)->tag; + adv_req_t *reqp; + ADV_SCSI_REQ_Q *scsiqp; + int ret; + int use_sg; + dma_addr_t sense_addr; + + /* + * Allocate an adv_req_t structure from the board to execute + * the command. + */ + reqp = &boardp->adv_reqp[srb_tag]; + if (reqp->cmndp && reqp->cmndp != scp ) { + ASC_DBG(1, "no free adv_req_t\n"); + ASC_STATS(scp->device->host, adv_build_noreq); + return ASC_BUSY; + } + + reqp->req_addr = boardp->adv_reqp_addr + (srb_tag * sizeof(adv_req_t)); + + scsiqp = &reqp->scsi_req_q; + + /* + * Initialize the structure. + */ + scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0; + + /* + * Set the srb_tag to the command tag. + */ + scsiqp->srb_tag = srb_tag; + + /* + * Set 'host_scribble' to point to the adv_req_t structure. + */ + reqp->cmndp = scp; + scp->host_scribble = (void *)reqp; + + /* + * Build the ADV_SCSI_REQ_Q request. + */ + + /* Set CDB length and copy it to the request structure. */ + scsiqp->cdb_len = scp->cmd_len; + /* Copy first 12 CDB bytes to cdb[]. */ + memcpy(scsiqp->cdb, scp->cmnd, scp->cmd_len < 12 ? scp->cmd_len : 12); + /* Copy last 4 CDB bytes, if present, to cdb16[]. */ + if (scp->cmd_len > 12) { + int cdb16_len = scp->cmd_len - 12; + + memcpy(scsiqp->cdb16, &scp->cmnd[12], cdb16_len); + } + + scsiqp->target_id = scp->device->id; + scsiqp->target_lun = scp->device->lun; + + sense_addr = dma_map_single(boardp->dev, scp->sense_buffer, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(boardp->dev, sense_addr)) { + ASC_DBG(1, "failed to map sense buffer\n"); + ASC_STATS(scp->device->host, adv_build_noreq); + return ASC_BUSY; + } + scsiqp->sense_addr = cpu_to_le32(sense_addr); + scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; + + /* Build ADV_SCSI_REQ_Q */ + + use_sg = scsi_dma_map(scp); + if (use_sg < 0) { + ASC_DBG(1, "failed to map SG list\n"); + ASC_STATS(scp->device->host, adv_build_noreq); + return ASC_BUSY; + } else if (use_sg == 0) { + /* Zero-length transfer */ + reqp->sgblkp = NULL; + scsiqp->data_cnt = 0; + + scsiqp->data_addr = 0; + scsiqp->sg_list_ptr = NULL; + scsiqp->sg_real_addr = 0; + } else { + if (use_sg > ADV_MAX_SG_LIST) { + scmd_printk(KERN_ERR, scp, "use_sg %d > " + "ADV_MAX_SG_LIST %d\n", use_sg, + scp->device->host->sg_tablesize); + scsi_dma_unmap(scp); + set_host_byte(scp, DID_ERROR); + reqp->cmndp = NULL; + scp->host_scribble = NULL; + + return ASC_ERROR; + } + + scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp)); + + ret = adv_get_sglist(boardp, reqp, scsiqp, scp, use_sg); + if (ret != ADV_SUCCESS) { + scsi_dma_unmap(scp); + set_host_byte(scp, DID_ERROR); + reqp->cmndp = NULL; + scp->host_scribble = NULL; + + return ret; + } + + ASC_STATS_ADD(scp->device->host, xfer_elem, use_sg); + } + + ASC_STATS(scp->device->host, xfer_cnt); + + ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); + ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); + + *adv_reqpp = reqp; + + return ASC_NOERROR; +} + +static int AscSgListToQueue(int sg_list) +{ + int n_sg_list_qs; + + n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q); + if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0) + n_sg_list_qs++; + return n_sg_list_qs + 1; +} + +static uint +AscGetNumOfFreeQueue(ASC_DVC_VAR *asc_dvc, uchar target_ix, uchar n_qs) +{ + uint cur_used_qs; + uint cur_free_qs; + ASC_SCSI_BIT_ID_TYPE target_id; + uchar tid_no; + + target_id = ASC_TIX_TO_TARGET_ID(target_ix); + tid_no = ASC_TIX_TO_TID(target_ix); + if ((asc_dvc->unit_not_ready & target_id) || + (asc_dvc->queue_full_or_busy & target_id)) { + return 0; + } + if (n_qs == 1) { + cur_used_qs = (uint) asc_dvc->cur_total_qng + + (uint) asc_dvc->last_q_shortage + (uint) ASC_MIN_FREE_Q; + } else { + cur_used_qs = (uint) asc_dvc->cur_total_qng + + (uint) ASC_MIN_FREE_Q; + } + if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) { + cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs; + if (asc_dvc->cur_dvc_qng[tid_no] >= + asc_dvc->max_dvc_qng[tid_no]) { + return 0; + } + return cur_free_qs; + } + if (n_qs > 1) { + if ((n_qs > asc_dvc->last_q_shortage) + && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) { + asc_dvc->last_q_shortage = n_qs; + } + } + return 0; +} + +static uchar AscAllocFreeQueue(PortAddr iop_base, uchar free_q_head) +{ + ushort q_addr; + uchar next_qp; + uchar q_status; + + q_addr = ASC_QNO_TO_QADDR(free_q_head); + q_status = (uchar)AscReadLramByte(iop_base, + (ushort)(q_addr + + ASC_SCSIQ_B_STATUS)); + next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); + if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END)) + return next_qp; + return ASC_QLINK_END; +} + +static uchar +AscAllocMultipleFreeQueue(PortAddr iop_base, uchar free_q_head, uchar n_free_q) +{ + uchar i; + + for (i = 0; i < n_free_q; i++) { + free_q_head = AscAllocFreeQueue(iop_base, free_q_head); + if (free_q_head == ASC_QLINK_END) + break; + } + return free_q_head; +} + +/* + * void + * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) + * + * Calling/Exit State: + * none + * + * Description: + * Output an ASC_SCSI_Q structure to the chip + */ +static void +DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) +{ + int i; + + ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", outbuf, 2 * words); + AscSetChipLramAddr(iop_base, s_addr); + for (i = 0; i < 2 * words; i += 2) { + if (i == 4 || i == 20) { + continue; + } + outpw(iop_base + IOP_RAM_DATA, + ((ushort)outbuf[i + 1] << 8) | outbuf[i]); + } +} + +static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) +{ + ushort q_addr; + uchar tid_no; + uchar sdtr_data; + uchar syn_period_ix; + uchar syn_offset; + PortAddr iop_base; + + iop_base = asc_dvc->iop_base; + if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) && + ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) { + tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix); + sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); + syn_period_ix = + (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1); + syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET; + AscMsgOutSDTR(asc_dvc, + asc_dvc->sdtr_period_tbl[syn_period_ix], + syn_offset); + scsiq->q1.cntl |= QC_MSG_OUT; + } + q_addr = ASC_QNO_TO_QADDR(q_no); + if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { + scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG; + } + scsiq->q1.status = QS_FREE; + AscMemWordCopyPtrToLram(iop_base, + q_addr + ASC_SCSIQ_CDB_BEG, + (uchar *)scsiq->cdbptr, scsiq->q2.cdb_len >> 1); + + DvcPutScsiQ(iop_base, + q_addr + ASC_SCSIQ_CPY_BEG, + (uchar *)&scsiq->q1.cntl, + ((sizeof(ASC_SCSIQ_1) + sizeof(ASC_SCSIQ_2)) / 2) - 1); + AscWriteLramWord(iop_base, + (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), + (ushort)(((ushort)scsiq->q1. + q_no << 8) | (ushort)QS_READY)); + return 1; +} + +static int +AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) +{ + int sta; + int i; + ASC_SG_HEAD *sg_head; + ASC_SG_LIST_Q scsi_sg_q; + __le32 saved_data_addr; + __le32 saved_data_cnt; + PortAddr iop_base; + ushort sg_list_dwords; + ushort sg_index; + ushort sg_entry_cnt; + ushort q_addr; + uchar next_qp; + + iop_base = asc_dvc->iop_base; + sg_head = scsiq->sg_head; + saved_data_addr = scsiq->q1.data_addr; + saved_data_cnt = scsiq->q1.data_cnt; + scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr); + scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes); + /* + * Set sg_entry_cnt to be the number of SG elements that + * will fit in the allocated SG queues. It is minus 1, because + * the first SG element is handled above. + */ + sg_entry_cnt = sg_head->entry_cnt - 1; + + if (sg_entry_cnt != 0) { + scsiq->q1.cntl |= QC_SG_HEAD; + q_addr = ASC_QNO_TO_QADDR(q_no); + sg_index = 1; + scsiq->q1.sg_queue_cnt = sg_head->queue_cnt; + scsi_sg_q.sg_head_qp = q_no; + scsi_sg_q.cntl = QCSG_SG_XFER_LIST; + for (i = 0; i < sg_head->queue_cnt; i++) { + scsi_sg_q.seq_no = i + 1; + if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { + sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); + sg_entry_cnt -= ASC_SG_LIST_PER_Q; + if (i == 0) { + scsi_sg_q.sg_list_cnt = + ASC_SG_LIST_PER_Q; + scsi_sg_q.sg_cur_list_cnt = + ASC_SG_LIST_PER_Q; + } else { + scsi_sg_q.sg_list_cnt = + ASC_SG_LIST_PER_Q - 1; + scsi_sg_q.sg_cur_list_cnt = + ASC_SG_LIST_PER_Q - 1; + } + } else { + scsi_sg_q.cntl |= QCSG_SG_XFER_END; + sg_list_dwords = sg_entry_cnt << 1; + if (i == 0) { + scsi_sg_q.sg_list_cnt = sg_entry_cnt; + scsi_sg_q.sg_cur_list_cnt = + sg_entry_cnt; + } else { + scsi_sg_q.sg_list_cnt = + sg_entry_cnt - 1; + scsi_sg_q.sg_cur_list_cnt = + sg_entry_cnt - 1; + } + sg_entry_cnt = 0; + } + next_qp = AscReadLramByte(iop_base, + (ushort)(q_addr + + ASC_SCSIQ_B_FWD)); + scsi_sg_q.q_no = next_qp; + q_addr = ASC_QNO_TO_QADDR(next_qp); + AscMemWordCopyPtrToLram(iop_base, + q_addr + ASC_SCSIQ_SGHD_CPY_BEG, + (uchar *)&scsi_sg_q, + sizeof(ASC_SG_LIST_Q) >> 1); + AscMemDWordCopyPtrToLram(iop_base, + q_addr + ASC_SGQ_LIST_BEG, + (uchar *)&sg_head-> + sg_list[sg_index], + sg_list_dwords); + sg_index += ASC_SG_LIST_PER_Q; + scsiq->next_sg_index = sg_index; + } + } else { + scsiq->q1.cntl &= ~QC_SG_HEAD; + } + sta = AscPutReadyQueue(asc_dvc, scsiq, q_no); + scsiq->q1.data_addr = saved_data_addr; + scsiq->q1.data_cnt = saved_data_cnt; + return (sta); +} + +static int +AscSendScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar n_q_required) +{ + PortAddr iop_base; + uchar free_q_head; + uchar next_qp; + uchar tid_no; + uchar target_ix; + int sta; + + iop_base = asc_dvc->iop_base; + target_ix = scsiq->q2.target_ix; + tid_no = ASC_TIX_TO_TID(target_ix); + sta = 0; + free_q_head = (uchar)AscGetVarFreeQHead(iop_base); + if (n_q_required > 1) { + next_qp = AscAllocMultipleFreeQueue(iop_base, free_q_head, + (uchar)n_q_required); + if (next_qp != ASC_QLINK_END) { + asc_dvc->last_q_shortage = 0; + scsiq->sg_head->queue_cnt = n_q_required - 1; + scsiq->q1.q_no = free_q_head; + sta = AscPutReadySgListQueue(asc_dvc, scsiq, + free_q_head); + } + } else if (n_q_required == 1) { + next_qp = AscAllocFreeQueue(iop_base, free_q_head); + if (next_qp != ASC_QLINK_END) { + scsiq->q1.q_no = free_q_head; + sta = AscPutReadyQueue(asc_dvc, scsiq, free_q_head); + } + } + if (sta == 1) { + AscPutVarFreeQHead(iop_base, next_qp); + asc_dvc->cur_total_qng += n_q_required; + asc_dvc->cur_dvc_qng[tid_no]++; + } + return sta; +} + +#define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16 +static uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] = { + INQUIRY, + REQUEST_SENSE, + READ_CAPACITY, + READ_TOC, + MODE_SELECT, + MODE_SENSE, + MODE_SELECT_10, + MODE_SENSE_10, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF +}; + +static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) +{ + PortAddr iop_base; + int sta; + int n_q_required; + bool disable_syn_offset_one_fix; + int i; + u32 addr; + ushort sg_entry_cnt = 0; + ushort sg_entry_cnt_minus_one = 0; + uchar target_ix; + uchar tid_no; + uchar sdtr_data; + uchar extra_bytes; + uchar scsi_cmd; + uchar disable_cmd; + ASC_SG_HEAD *sg_head; + unsigned long data_cnt; + + iop_base = asc_dvc->iop_base; + sg_head = scsiq->sg_head; + if (asc_dvc->err_code != 0) + return ASC_ERROR; + scsiq->q1.q_no = 0; + if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) { + scsiq->q1.extra_bytes = 0; + } + sta = 0; + target_ix = scsiq->q2.target_ix; + tid_no = ASC_TIX_TO_TID(target_ix); + n_q_required = 1; + if (scsiq->cdbptr[0] == REQUEST_SENSE) { + if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) { + asc_dvc->sdtr_done &= ~scsiq->q1.target_id; + sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); + AscMsgOutSDTR(asc_dvc, + asc_dvc-> + sdtr_period_tbl[(sdtr_data >> 4) & + (uchar)(asc_dvc-> + max_sdtr_index - + 1)], + (uchar)(sdtr_data & (uchar) + ASC_SYN_MAX_OFFSET)); + scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT); + } + } + if (asc_dvc->in_critical_cnt != 0) { + AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY); + return ASC_ERROR; + } + asc_dvc->in_critical_cnt++; + if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { + if ((sg_entry_cnt = sg_head->entry_cnt) == 0) { + asc_dvc->in_critical_cnt--; + return ASC_ERROR; + } + if (sg_entry_cnt > ASC_MAX_SG_LIST) { + asc_dvc->in_critical_cnt--; + return ASC_ERROR; + } + if (sg_entry_cnt == 1) { + scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr); + scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes); + scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE); + } + sg_entry_cnt_minus_one = sg_entry_cnt - 1; + } + scsi_cmd = scsiq->cdbptr[0]; + disable_syn_offset_one_fix = false; + if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) && + !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) { + if (scsiq->q1.cntl & QC_SG_HEAD) { + data_cnt = 0; + for (i = 0; i < sg_entry_cnt; i++) { + data_cnt += le32_to_cpu(sg_head->sg_list[i]. + bytes); + } + } else { + data_cnt = le32_to_cpu(scsiq->q1.data_cnt); + } + if (data_cnt != 0UL) { + if (data_cnt < 512UL) { + disable_syn_offset_one_fix = true; + } else { + for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; + i++) { + disable_cmd = + _syn_offset_one_disable_cmd[i]; + if (disable_cmd == 0xFF) { + break; + } + if (scsi_cmd == disable_cmd) { + disable_syn_offset_one_fix = + true; + break; + } + } + } + } + } + if (disable_syn_offset_one_fix) { + scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG; + scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | + ASC_TAG_FLAG_DISABLE_DISCONNECT); + } else { + scsiq->q2.tag_code &= 0x27; + } + if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { + if (asc_dvc->bug_fix_cntl) { + if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { + if ((scsi_cmd == READ_6) || + (scsi_cmd == READ_10)) { + addr = le32_to_cpu(sg_head-> + sg_list + [sg_entry_cnt_minus_one]. + addr) + + le32_to_cpu(sg_head-> + sg_list + [sg_entry_cnt_minus_one]. + bytes); + extra_bytes = + (uchar)((ushort)addr & 0x0003); + if ((extra_bytes != 0) + && + ((scsiq->q2. + tag_code & + ASC_TAG_FLAG_EXTRA_BYTES) + == 0)) { + scsiq->q2.tag_code |= + ASC_TAG_FLAG_EXTRA_BYTES; + scsiq->q1.extra_bytes = + extra_bytes; + data_cnt = + le32_to_cpu(sg_head-> + sg_list + [sg_entry_cnt_minus_one]. + bytes); + data_cnt -= extra_bytes; + sg_head-> + sg_list + [sg_entry_cnt_minus_one]. + bytes = + cpu_to_le32(data_cnt); + } + } + } + } + sg_head->entry_to_copy = sg_head->entry_cnt; + n_q_required = AscSgListToQueue(sg_entry_cnt); + if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >= + (uint) n_q_required) + || ((scsiq->q1.cntl & QC_URGENT) != 0)) { + if ((sta = + AscSendScsiQueue(asc_dvc, scsiq, + n_q_required)) == 1) { + asc_dvc->in_critical_cnt--; + return (sta); + } + } + } else { + if (asc_dvc->bug_fix_cntl) { + if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { + if ((scsi_cmd == READ_6) || + (scsi_cmd == READ_10)) { + addr = + le32_to_cpu(scsiq->q1.data_addr) + + le32_to_cpu(scsiq->q1.data_cnt); + extra_bytes = + (uchar)((ushort)addr & 0x0003); + if ((extra_bytes != 0) + && + ((scsiq->q2. + tag_code & + ASC_TAG_FLAG_EXTRA_BYTES) + == 0)) { + data_cnt = + le32_to_cpu(scsiq->q1. + data_cnt); + if (((ushort)data_cnt & 0x01FF) + == 0) { + scsiq->q2.tag_code |= + ASC_TAG_FLAG_EXTRA_BYTES; + data_cnt -= extra_bytes; + scsiq->q1.data_cnt = + cpu_to_le32 + (data_cnt); + scsiq->q1.extra_bytes = + extra_bytes; + } + } + } + } + } + n_q_required = 1; + if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) || + ((scsiq->q1.cntl & QC_URGENT) != 0)) { + if ((sta = AscSendScsiQueue(asc_dvc, scsiq, + n_q_required)) == 1) { + asc_dvc->in_critical_cnt--; + return (sta); + } + } + } + asc_dvc->in_critical_cnt--; + return (sta); +} + +/* + * AdvExeScsiQueue() - Send a request to the RISC microcode program. + * + * Allocate a carrier structure, point the carrier to the ADV_SCSI_REQ_Q, + * add the carrier to the ICQ (Initiator Command Queue), and tickle the + * RISC to notify it a new command is ready to be executed. + * + * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be + * set to SCSI_MAX_RETRY. + * + * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the microcode + * for DMA addresses or math operations are byte swapped to little-endian + * order. + * + * Return: + * ADV_SUCCESS(1) - The request was successfully queued. + * ADV_BUSY(0) - Resource unavailable; Retry again after pending + * request completes. + * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure + * host IC error. + */ +static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp) +{ + AdvPortAddr iop_base; + ADV_CARR_T *new_carrp; + ADV_SCSI_REQ_Q *scsiq = &reqp->scsi_req_q; + + /* + * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID. + */ + if (scsiq->target_id > ADV_MAX_TID) { + scsiq->host_status = QHSTA_M_INVALID_DEVICE; + scsiq->done_status = QD_WITH_ERROR; + return ADV_ERROR; + } + + iop_base = asc_dvc->iop_base; + + /* + * Allocate a carrier ensuring at least one carrier always + * remains on the freelist and initialize fields. + */ + new_carrp = adv_get_next_carrier(asc_dvc); + if (!new_carrp) { + ASC_DBG(1, "No free carriers\n"); + return ADV_BUSY; + } + + asc_dvc->carr_pending_cnt++; + + /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */ + scsiq->scsiq_ptr = cpu_to_le32(scsiq->srb_tag); + scsiq->scsiq_rptr = cpu_to_le32(reqp->req_addr); + + scsiq->carr_va = asc_dvc->icq_sp->carr_va; + scsiq->carr_pa = asc_dvc->icq_sp->carr_pa; + + /* + * Use the current stopper to send the ADV_SCSI_REQ_Q command to + * the microcode. The newly allocated stopper will become the new + * stopper. + */ + asc_dvc->icq_sp->areq_vpa = scsiq->scsiq_rptr; + + /* + * Set the 'next_vpa' pointer for the old stopper to be the + * physical address of the new stopper. The RISC can only + * follow physical addresses. + */ + asc_dvc->icq_sp->next_vpa = new_carrp->carr_pa; + + /* + * Set the host adapter stopper pointer to point to the new carrier. + */ + asc_dvc->icq_sp = new_carrp; + + if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || + asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { + /* + * Tickle the RISC to tell it to read its Command Queue Head pointer. + */ + AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); + if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { + /* + * Clear the tickle value. In the ASC-3550 the RISC flag + * command 'clr_tickle_a' does not work unless the host + * value is cleared. + */ + AdvWriteByteRegister(iop_base, IOPB_TICKLE, + ADV_TICKLE_NOP); + } + } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { + /* + * Notify the RISC a carrier is ready by writing the physical + * address of the new carrier stopper to the COMMA register. + */ + AdvWriteDWordRegister(iop_base, IOPDW_COMMA, + le32_to_cpu(new_carrp->carr_pa)); + } + + return ADV_SUCCESS; +} + +/* + * Execute a single 'struct scsi_cmnd'. + */ +static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) +{ + int ret, err_code; + struct asc_board *boardp = shost_priv(scp->device->host); + + ASC_DBG(1, "scp 0x%p\n", scp); + + if (ASC_NARROW_BOARD(boardp)) { + ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; + struct asc_scsi_q asc_scsi_q; + + ret = asc_build_req(boardp, scp, &asc_scsi_q); + if (ret != ASC_NOERROR) { + ASC_STATS(scp->device->host, build_error); + return ret; + } + + ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q); + kfree(asc_scsi_q.sg_head); + err_code = asc_dvc->err_code; + } else { + ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; + adv_req_t *adv_reqp; + + switch (adv_build_req(boardp, scp, &adv_reqp)) { + case ASC_NOERROR: + ASC_DBG(3, "adv_build_req ASC_NOERROR\n"); + break; + case ASC_BUSY: + ASC_DBG(1, "adv_build_req ASC_BUSY\n"); + /* + * The asc_stats fields 'adv_build_noreq' and + * 'adv_build_nosg' count wide board busy conditions. + * They are updated in adv_build_req and + * adv_get_sglist, respectively. + */ + return ASC_BUSY; + case ASC_ERROR: + default: + ASC_DBG(1, "adv_build_req ASC_ERROR\n"); + ASC_STATS(scp->device->host, build_error); + return ASC_ERROR; + } + + ret = AdvExeScsiQueue(adv_dvc, adv_reqp); + err_code = adv_dvc->err_code; + } + + switch (ret) { + case ASC_NOERROR: + ASC_STATS(scp->device->host, exe_noerror); + /* + * Increment monotonically increasing per device + * successful request counter. Wrapping doesn't matter. + */ + boardp->reqcnt[scp->device->id]++; + ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n"); + break; + case ASC_BUSY: + ASC_DBG(1, "ExeScsiQueue() ASC_BUSY\n"); + ASC_STATS(scp->device->host, exe_busy); + break; + case ASC_ERROR: + scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, " + "err_code 0x%x\n", err_code); + ASC_STATS(scp->device->host, exe_error); + set_host_byte(scp, DID_ERROR); + break; + default: + scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, " + "err_code 0x%x\n", err_code); + ASC_STATS(scp->device->host, exe_unknown); + set_host_byte(scp, DID_ERROR); + break; + } + + ASC_DBG(1, "end\n"); + return ret; +} + +/* + * advansys_queuecommand() - interrupt-driven I/O entrypoint. + * + * This function always returns 0. Command return status is saved + * in the 'scp' result field. + */ +static int advansys_queuecommand_lck(struct scsi_cmnd *scp) +{ + struct Scsi_Host *shost = scp->device->host; + int asc_res, result = 0; + + ASC_STATS(shost, queuecommand); + + asc_res = asc_execute_scsi_cmnd(scp); + + switch (asc_res) { + case ASC_NOERROR: + break; + case ASC_BUSY: + result = SCSI_MLQUEUE_HOST_BUSY; + break; + case ASC_ERROR: + default: + asc_scsi_done(scp); + break; + } + + return result; +} + +static DEF_SCSI_QCMD(advansys_queuecommand) + +static ushort AscGetEisaChipCfg(PortAddr iop_base) +{ + PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | + (PortAddr) (ASC_EISA_CFG_IOP_MASK); + return inpw(eisa_cfg_iop); +} + +/* + * Return the BIOS address of the adapter at the specified + * I/O port and with the specified bus type. + */ +static unsigned short AscGetChipBiosAddress(PortAddr iop_base, + unsigned short bus_type) +{ + unsigned short cfg_lsw; + unsigned short bios_addr; + + /* + * The PCI BIOS is re-located by the motherboard BIOS. Because + * of this the driver can not determine where a PCI BIOS is + * loaded and executes. + */ + if (bus_type & ASC_IS_PCI) + return 0; + + if ((bus_type & ASC_IS_EISA) != 0) { + cfg_lsw = AscGetEisaChipCfg(iop_base); + cfg_lsw &= 0x000F; + bios_addr = ASC_BIOS_MIN_ADDR + cfg_lsw * ASC_BIOS_BANK_SIZE; + return bios_addr; + } + + cfg_lsw = AscGetChipCfgLsw(iop_base); + bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE; + return bios_addr; +} + +static uchar AscSetChipScsiID(PortAddr iop_base, uchar new_host_id) +{ + ushort cfg_lsw; + + if (AscGetChipScsiID(iop_base) == new_host_id) { + return (new_host_id); + } + cfg_lsw = AscGetChipCfgLsw(iop_base); + cfg_lsw &= 0xF8FF; + cfg_lsw |= (ushort)((new_host_id & ASC_MAX_TID) << 8); + AscSetChipCfgLsw(iop_base, cfg_lsw); + return (AscGetChipScsiID(iop_base)); +} + +static unsigned char AscGetChipScsiCtrl(PortAddr iop_base) +{ + unsigned char sc; + + AscSetBank(iop_base, 1); + sc = inp(iop_base + IOP_REG_SC); + AscSetBank(iop_base, 0); + return sc; +} + +static unsigned char AscGetChipVersion(PortAddr iop_base, + unsigned short bus_type) +{ + if (bus_type & ASC_IS_EISA) { + PortAddr eisa_iop; + unsigned char revision; + eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | + (PortAddr) ASC_EISA_REV_IOP_MASK; + revision = inp(eisa_iop); + return ASC_CHIP_MIN_VER_EISA - 1 + revision; + } + return AscGetChipVerNo(iop_base); +} + +static int AscStopQueueExe(PortAddr iop_base) +{ + int count = 0; + + if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) { + AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, + ASC_STOP_REQ_RISC_STOP); + do { + if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) & + ASC_STOP_ACK_RISC_STOP) { + return (1); + } + mdelay(100); + } while (count++ < 20); + } + return (0); +} + +static unsigned int AscGetMaxDmaCount(ushort bus_type) +{ + if (bus_type & (ASC_IS_EISA | ASC_IS_VL)) + return ASC_MAX_VL_DMA_COUNT; + return ASC_MAX_PCI_DMA_COUNT; +} + +static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) +{ + int i; + PortAddr iop_base; + uchar chip_version; + + iop_base = asc_dvc->iop_base; + asc_dvc->err_code = 0; + if ((asc_dvc->bus_type & + (ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) { + asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE; + } + AscSetChipControl(iop_base, CC_HALT); + AscSetChipStatus(iop_base, 0); + asc_dvc->bug_fix_cntl = 0; + asc_dvc->pci_fix_asyn_xfer = 0; + asc_dvc->pci_fix_asyn_xfer_always = 0; + /* asc_dvc->init_state initialized in AscInitGetConfig(). */ + asc_dvc->sdtr_done = 0; + asc_dvc->cur_total_qng = 0; + asc_dvc->is_in_int = false; + asc_dvc->in_critical_cnt = 0; + asc_dvc->last_q_shortage = 0; + asc_dvc->use_tagged_qng = 0; + asc_dvc->no_scam = 0; + asc_dvc->unit_not_ready = 0; + asc_dvc->queue_full_or_busy = 0; + asc_dvc->redo_scam = 0; + asc_dvc->res2 = 0; + asc_dvc->min_sdtr_index = 0; + asc_dvc->cfg->can_tagged_qng = 0; + asc_dvc->cfg->cmd_qng_enabled = 0; + asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL; + asc_dvc->init_sdtr = 0; + asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG; + asc_dvc->scsi_reset_wait = 3; + asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET; + asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type); + asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET; + asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET; + asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID; + chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type); + asc_dvc->cfg->chip_version = chip_version; + asc_dvc->sdtr_period_tbl = asc_syn_xfer_period; + asc_dvc->max_sdtr_index = 7; + if ((asc_dvc->bus_type & ASC_IS_PCI) && + (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) { + asc_dvc->bus_type = ASC_IS_PCI_ULTRA; + asc_dvc->sdtr_period_tbl = asc_syn_ultra_xfer_period; + asc_dvc->max_sdtr_index = 15; + if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150) { + AscSetExtraControl(iop_base, + (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); + } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) { + AscSetExtraControl(iop_base, + (SEC_ACTIVE_NEGATE | + SEC_ENABLE_FILTER)); + } + } + if (asc_dvc->bus_type == ASC_IS_PCI) { + AscSetExtraControl(iop_base, + (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); + } + + for (i = 0; i <= ASC_MAX_TID; i++) { + asc_dvc->cur_dvc_qng[i] = 0; + asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG; + asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *)0L; + asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L; + asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG; + } +} + +static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg) +{ + int retry; + + for (retry = 0; retry < ASC_EEP_MAX_RETRY; retry++) { + unsigned char read_back; + AscSetChipEEPCmd(iop_base, cmd_reg); + mdelay(1); + read_back = AscGetChipEEPCmd(iop_base); + if (read_back == cmd_reg) + return 1; + } + return 0; +} + +static void AscWaitEEPRead(void) +{ + mdelay(1); +} + +static ushort AscReadEEPWord(PortAddr iop_base, uchar addr) +{ + ushort read_wval; + uchar cmd_reg; + + AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); + AscWaitEEPRead(); + cmd_reg = addr | ASC_EEP_CMD_READ; + AscWriteEEPCmdReg(iop_base, cmd_reg); + AscWaitEEPRead(); + read_wval = AscGetChipEEPData(iop_base); + AscWaitEEPRead(); + return read_wval; +} + +static ushort AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, + ushort bus_type) +{ + ushort wval; + ushort sum; + ushort *wbuf; + int cfg_beg; + int cfg_end; + int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; + int s_addr; + + wbuf = (ushort *)cfg_buf; + sum = 0; + /* Read two config words; Byte-swapping done by AscReadEEPWord(). */ + for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { + *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); + sum += *wbuf; + } + if (bus_type & ASC_IS_VL) { + cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; + cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; + } else { + cfg_beg = ASC_EEP_DVC_CFG_BEG; + cfg_end = ASC_EEP_MAX_DVC_ADDR; + } + for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { + wval = AscReadEEPWord(iop_base, (uchar)s_addr); + if (s_addr <= uchar_end_in_config) { + /* + * Swap all char fields - must unswap bytes already swapped + * by AscReadEEPWord(). + */ + *wbuf = le16_to_cpu(wval); + } else { + /* Don't swap word field at the end - cntl field. */ + *wbuf = wval; + } + sum += wval; /* Checksum treats all EEPROM data as words. */ + } + /* + * Read the checksum word which will be compared against 'sum' + * by the caller. Word field already swapped. + */ + *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); + return sum; +} + +static int AscTestExternalLram(ASC_DVC_VAR *asc_dvc) +{ + PortAddr iop_base; + ushort q_addr; + ushort saved_word; + int sta; + + iop_base = asc_dvc->iop_base; + sta = 0; + q_addr = ASC_QNO_TO_QADDR(241); + saved_word = AscReadLramWord(iop_base, q_addr); + AscSetChipLramAddr(iop_base, q_addr); + AscSetChipLramData(iop_base, 0x55AA); + mdelay(10); + AscSetChipLramAddr(iop_base, q_addr); + if (AscGetChipLramData(iop_base) == 0x55AA) { + sta = 1; + AscWriteLramWord(iop_base, q_addr, saved_word); + } + return (sta); +} + +static void AscWaitEEPWrite(void) +{ + mdelay(20); +} + +static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg) +{ + ushort read_back; + int retry; + + retry = 0; + while (true) { + AscSetChipEEPData(iop_base, data_reg); + mdelay(1); + read_back = AscGetChipEEPData(iop_base); + if (read_back == data_reg) { + return (1); + } + if (retry++ > ASC_EEP_MAX_RETRY) { + return (0); + } + } +} + +static ushort AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val) +{ + ushort read_wval; + + read_wval = AscReadEEPWord(iop_base, addr); + if (read_wval != word_val) { + AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE); + AscWaitEEPRead(); + AscWriteEEPDataReg(iop_base, word_val); + AscWaitEEPRead(); + AscWriteEEPCmdReg(iop_base, + (uchar)((uchar)ASC_EEP_CMD_WRITE | addr)); + AscWaitEEPWrite(); + AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); + AscWaitEEPRead(); + return (AscReadEEPWord(iop_base, addr)); + } + return (read_wval); +} + +static int AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, + ushort bus_type) +{ + int n_error; + ushort *wbuf; + ushort word; + ushort sum; + int s_addr; + int cfg_beg; + int cfg_end; + int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; + + wbuf = (ushort *)cfg_buf; + n_error = 0; + sum = 0; + /* Write two config words; AscWriteEEPWord() will swap bytes. */ + for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { + sum += *wbuf; + if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { + n_error++; + } + } + if (bus_type & ASC_IS_VL) { + cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; + cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; + } else { + cfg_beg = ASC_EEP_DVC_CFG_BEG; + cfg_end = ASC_EEP_MAX_DVC_ADDR; + } + for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { + if (s_addr <= uchar_end_in_config) { + /* + * This is a char field. Swap char fields before they are + * swapped again by AscWriteEEPWord(). + */ + word = cpu_to_le16(*wbuf); + if (word != + AscWriteEEPWord(iop_base, (uchar)s_addr, word)) { + n_error++; + } + } else { + /* Don't swap word field at the end - cntl field. */ + if (*wbuf != + AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { + n_error++; + } + } + sum += *wbuf; /* Checksum calculated from word values. */ + } + /* Write checksum word. It will be swapped by AscWriteEEPWord(). */ + *wbuf = sum; + if (sum != AscWriteEEPWord(iop_base, (uchar)s_addr, sum)) { + n_error++; + } + + /* Read EEPROM back again. */ + wbuf = (ushort *)cfg_buf; + /* + * Read two config words; Byte-swapping done by AscReadEEPWord(). + */ + for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { + if (*wbuf != AscReadEEPWord(iop_base, (uchar)s_addr)) { + n_error++; + } + } + if (bus_type & ASC_IS_VL) { + cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; + cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; + } else { + cfg_beg = ASC_EEP_DVC_CFG_BEG; + cfg_end = ASC_EEP_MAX_DVC_ADDR; + } + for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { + if (s_addr <= uchar_end_in_config) { + /* + * Swap all char fields. Must unswap bytes already swapped + * by AscReadEEPWord(). + */ + word = + le16_to_cpu(AscReadEEPWord + (iop_base, (uchar)s_addr)); + } else { + /* Don't swap word field at the end - cntl field. */ + word = AscReadEEPWord(iop_base, (uchar)s_addr); + } + if (*wbuf != word) { + n_error++; + } + } + /* Read checksum; Byte swapping not needed. */ + if (AscReadEEPWord(iop_base, (uchar)s_addr) != sum) { + n_error++; + } + return n_error; +} + +static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, + ushort bus_type) +{ + int retry; + int n_error; + + retry = 0; + while (true) { + if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf, + bus_type)) == 0) { + break; + } + if (++retry > ASC_EEP_MAX_RETRY) { + break; + } + } + return n_error; +} + +static int AscInitFromEEP(ASC_DVC_VAR *asc_dvc) +{ + ASCEEP_CONFIG eep_config_buf; + ASCEEP_CONFIG *eep_config; + PortAddr iop_base; + ushort chksum; + ushort warn_code; + ushort cfg_msw, cfg_lsw; + int i; + int write_eep = 0; + + iop_base = asc_dvc->iop_base; + warn_code = 0; + AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE); + AscStopQueueExe(iop_base); + if ((AscStopChip(iop_base)) || + (AscGetChipScsiCtrl(iop_base) != 0)) { + asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE; + AscResetChipAndScsiBus(asc_dvc); + mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ + } + if (!AscIsChipHalted(iop_base)) { + asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; + return (warn_code); + } + AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); + if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { + asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; + return (warn_code); + } + eep_config = (ASCEEP_CONFIG *)&eep_config_buf; + cfg_msw = AscGetChipCfgMsw(iop_base); + cfg_lsw = AscGetChipCfgLsw(iop_base); + if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { + cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; + warn_code |= ASC_WARN_CFG_MSW_RECOVER; + AscSetChipCfgMsw(iop_base, cfg_msw); + } + chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type); + ASC_DBG(1, "chksum 0x%x\n", chksum); + if (chksum == 0) { + chksum = 0xaa55; + } + if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { + warn_code |= ASC_WARN_AUTO_CONFIG; + if (asc_dvc->cfg->chip_version == 3) { + if (eep_config->cfg_lsw != cfg_lsw) { + warn_code |= ASC_WARN_EEPROM_RECOVER; + eep_config->cfg_lsw = + AscGetChipCfgLsw(iop_base); + } + if (eep_config->cfg_msw != cfg_msw) { + warn_code |= ASC_WARN_EEPROM_RECOVER; + eep_config->cfg_msw = + AscGetChipCfgMsw(iop_base); + } + } + } + eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; + eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON; + ASC_DBG(1, "eep_config->chksum 0x%x\n", eep_config->chksum); + if (chksum != eep_config->chksum) { + if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == + ASC_CHIP_VER_PCI_ULTRA_3050) { + ASC_DBG(1, "chksum error ignored; EEPROM-less board\n"); + eep_config->init_sdtr = 0xFF; + eep_config->disc_enable = 0xFF; + eep_config->start_motor = 0xFF; + eep_config->use_cmd_qng = 0; + eep_config->max_total_qng = 0xF0; + eep_config->max_tag_qng = 0x20; + eep_config->cntl = 0xBFFF; + ASC_EEP_SET_CHIP_ID(eep_config, 7); + eep_config->no_scam = 0; + eep_config->adapter_info[0] = 0; + eep_config->adapter_info[1] = 0; + eep_config->adapter_info[2] = 0; + eep_config->adapter_info[3] = 0; + eep_config->adapter_info[4] = 0; + /* Indicate EEPROM-less board. */ + eep_config->adapter_info[5] = 0xBB; + } else { + ASC_PRINT + ("AscInitFromEEP: EEPROM checksum error; Will try to re-write EEPROM.\n"); + write_eep = 1; + warn_code |= ASC_WARN_EEPROM_CHKSUM; + } + } + asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr; + asc_dvc->cfg->disc_enable = eep_config->disc_enable; + asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng; + asc_dvc->start_motor = eep_config->start_motor; + asc_dvc->dvc_cntl = eep_config->cntl; + asc_dvc->no_scam = eep_config->no_scam; + asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0]; + asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1]; + asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2]; + asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3]; + asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4]; + asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5]; + if (!AscTestExternalLram(asc_dvc)) { + if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == + ASC_IS_PCI_ULTRA)) { + eep_config->max_total_qng = + ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; + eep_config->max_tag_qng = + ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG; + } else { + eep_config->cfg_msw |= 0x0800; + cfg_msw |= 0x0800; + AscSetChipCfgMsw(iop_base, cfg_msw); + eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG; + eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG; + } + } else { + } + if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) { + eep_config->max_total_qng = ASC_MIN_TOTAL_QNG; + } + if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) { + eep_config->max_total_qng = ASC_MAX_TOTAL_QNG; + } + if (eep_config->max_tag_qng > eep_config->max_total_qng) { + eep_config->max_tag_qng = eep_config->max_total_qng; + } + if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) { + eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC; + } + asc_dvc->max_total_qng = eep_config->max_total_qng; + if ((eep_config->use_cmd_qng & eep_config->disc_enable) != + eep_config->use_cmd_qng) { + eep_config->disc_enable = eep_config->use_cmd_qng; + warn_code |= ASC_WARN_CMD_QNG_CONFLICT; + } + ASC_EEP_SET_CHIP_ID(eep_config, + ASC_EEP_GET_CHIP_ID(eep_config) & ASC_MAX_TID); + asc_dvc->cfg->chip_scsi_id = ASC_EEP_GET_CHIP_ID(eep_config); + if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) && + !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) { + asc_dvc->min_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX; + } + + for (i = 0; i <= ASC_MAX_TID; i++) { + asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i]; + asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng; + asc_dvc->cfg->sdtr_period_offset[i] = + (uchar)(ASC_DEF_SDTR_OFFSET | + (asc_dvc->min_sdtr_index << 4)); + } + eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); + if (write_eep) { + if ((i = AscSetEEPConfig(iop_base, eep_config, + asc_dvc->bus_type)) != 0) { + ASC_PRINT1 + ("AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", + i); + } else { + ASC_PRINT + ("AscInitFromEEP: Successfully re-wrote EEPROM.\n"); + } + } + return (warn_code); +} + +static int AscInitGetConfig(struct Scsi_Host *shost) +{ + struct asc_board *board = shost_priv(shost); + ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; + unsigned short warn_code = 0; + + asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG; + if (asc_dvc->err_code != 0) + return asc_dvc->err_code; + + if (AscFindSignature(asc_dvc->iop_base)) { + AscInitAscDvcVar(asc_dvc); + warn_code = AscInitFromEEP(asc_dvc); + asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG; + if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) + asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; + } else { + asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; + } + + switch (warn_code) { + case 0: /* No error */ + break; + case ASC_WARN_IO_PORT_ROTATE: + shost_printk(KERN_WARNING, shost, "I/O port address " + "modified\n"); + break; + case ASC_WARN_AUTO_CONFIG: + shost_printk(KERN_WARNING, shost, "I/O port increment switch " + "enabled\n"); + break; + case ASC_WARN_EEPROM_CHKSUM: + shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); + break; + case ASC_WARN_IRQ_MODIFIED: + shost_printk(KERN_WARNING, shost, "IRQ modified\n"); + break; + case ASC_WARN_CMD_QNG_CONFLICT: + shost_printk(KERN_WARNING, shost, "tag queuing enabled w/o " + "disconnects\n"); + break; + default: + shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", + warn_code); + break; + } + + if (asc_dvc->err_code != 0) + shost_printk(KERN_ERR, shost, "error 0x%x at init_state " + "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); + + return asc_dvc->err_code; +} + +static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) +{ + struct asc_board *board = shost_priv(shost); + ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; + PortAddr iop_base = asc_dvc->iop_base; + unsigned short cfg_msw; + unsigned short warn_code = 0; + + asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG; + if (asc_dvc->err_code != 0) + return asc_dvc->err_code; + if (!AscFindSignature(asc_dvc->iop_base)) { + asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; + return asc_dvc->err_code; + } + + cfg_msw = AscGetChipCfgMsw(iop_base); + if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { + cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; + warn_code |= ASC_WARN_CFG_MSW_RECOVER; + AscSetChipCfgMsw(iop_base, cfg_msw); + } + if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) != + asc_dvc->cfg->cmd_qng_enabled) { + asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled; + warn_code |= ASC_WARN_CMD_QNG_CONFLICT; + } + if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { + warn_code |= ASC_WARN_AUTO_CONFIG; + } +#ifdef CONFIG_PCI + if (asc_dvc->bus_type & ASC_IS_PCI) { + cfg_msw &= 0xFFC0; + AscSetChipCfgMsw(iop_base, cfg_msw); + if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { + } else { + if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || + (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { + asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; + asc_dvc->bug_fix_cntl |= + ASC_BUG_FIX_ASYN_USE_SYN; + } + } + } else +#endif /* CONFIG_PCI */ + if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) != + asc_dvc->cfg->chip_scsi_id) { + asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID; + } + + asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG; + + switch (warn_code) { + case 0: /* No error. */ + break; + case ASC_WARN_IO_PORT_ROTATE: + shost_printk(KERN_WARNING, shost, "I/O port address " + "modified\n"); + break; + case ASC_WARN_AUTO_CONFIG: + shost_printk(KERN_WARNING, shost, "I/O port increment switch " + "enabled\n"); + break; + case ASC_WARN_EEPROM_CHKSUM: + shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); + break; + case ASC_WARN_IRQ_MODIFIED: + shost_printk(KERN_WARNING, shost, "IRQ modified\n"); + break; + case ASC_WARN_CMD_QNG_CONFLICT: + shost_printk(KERN_WARNING, shost, "tag queuing w/o " + "disconnects\n"); + break; + default: + shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", + warn_code); + break; + } + + if (asc_dvc->err_code != 0) + shost_printk(KERN_ERR, shost, "error 0x%x at init_state " + "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); + + return asc_dvc->err_code; +} + +/* + * EEPROM Configuration. + * + * All drivers should use this structure to set the default EEPROM + * configuration. The BIOS now uses this structure when it is built. + * Additional structure information can be found in a_condor.h where + * the structure is defined. + * + * The *_Field_IsChar structs are needed to correct for endianness. + * These values are read from the board 16 bits at a time directly + * into the structs. Because some fields are char, the values will be + * in the wrong order. The *_Field_IsChar tells when to flip the + * bytes. Data read and written to PCI memory is automatically swapped + * on big-endian platforms so char fields read as words are actually being + * unswapped on big-endian platforms. + */ +#ifdef CONFIG_PCI +static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = { + ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */ + 0x0000, /* cfg_msw */ + 0xFFFF, /* disc_enable */ + 0xFFFF, /* wdtr_able */ + 0xFFFF, /* sdtr_able */ + 0xFFFF, /* start_motor */ + 0xFFFF, /* tagqng_able */ + 0xFFFF, /* bios_scan */ + 0, /* scam_tolerant */ + 7, /* adapter_scsi_id */ + 0, /* bios_boot_delay */ + 3, /* scsi_reset_delay */ + 0, /* bios_id_lun */ + 0, /* termination */ + 0, /* reserved1 */ + 0xFFE7, /* bios_ctrl */ + 0xFFFF, /* ultra_able */ + 0, /* reserved2 */ + ASC_DEF_MAX_HOST_QNG, /* max_host_qng */ + ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ + 0, /* dvc_cntl */ + 0, /* bug_fix */ + 0, /* serial_number_word1 */ + 0, /* serial_number_word2 */ + 0, /* serial_number_word3 */ + 0, /* check_sum */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + , /* oem_name[16] */ + 0, /* dvc_err_code */ + 0, /* adv_err_code */ + 0, /* adv_err_addr */ + 0, /* saved_dvc_err_code */ + 0, /* saved_adv_err_code */ + 0, /* saved_adv_err_addr */ + 0 /* num_of_err */ +}; + +static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar = { + 0, /* cfg_lsw */ + 0, /* cfg_msw */ + 0, /* -disc_enable */ + 0, /* wdtr_able */ + 0, /* sdtr_able */ + 0, /* start_motor */ + 0, /* tagqng_able */ + 0, /* bios_scan */ + 0, /* scam_tolerant */ + 1, /* adapter_scsi_id */ + 1, /* bios_boot_delay */ + 1, /* scsi_reset_delay */ + 1, /* bios_id_lun */ + 1, /* termination */ + 1, /* reserved1 */ + 0, /* bios_ctrl */ + 0, /* ultra_able */ + 0, /* reserved2 */ + 1, /* max_host_qng */ + 1, /* max_dvc_qng */ + 0, /* dvc_cntl */ + 0, /* bug_fix */ + 0, /* serial_number_word1 */ + 0, /* serial_number_word2 */ + 0, /* serial_number_word3 */ + 0, /* check_sum */ + {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + , /* oem_name[16] */ + 0, /* dvc_err_code */ + 0, /* adv_err_code */ + 0, /* adv_err_addr */ + 0, /* saved_dvc_err_code */ + 0, /* saved_adv_err_code */ + 0, /* saved_adv_err_addr */ + 0 /* num_of_err */ +}; + +static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config = { + ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ + 0x0000, /* 01 cfg_msw */ + 0xFFFF, /* 02 disc_enable */ + 0xFFFF, /* 03 wdtr_able */ + 0x4444, /* 04 sdtr_speed1 */ + 0xFFFF, /* 05 start_motor */ + 0xFFFF, /* 06 tagqng_able */ + 0xFFFF, /* 07 bios_scan */ + 0, /* 08 scam_tolerant */ + 7, /* 09 adapter_scsi_id */ + 0, /* bios_boot_delay */ + 3, /* 10 scsi_reset_delay */ + 0, /* bios_id_lun */ + 0, /* 11 termination_se */ + 0, /* termination_lvd */ + 0xFFE7, /* 12 bios_ctrl */ + 0x4444, /* 13 sdtr_speed2 */ + 0x4444, /* 14 sdtr_speed3 */ + ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ + ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ + 0, /* 16 dvc_cntl */ + 0x4444, /* 17 sdtr_speed4 */ + 0, /* 18 serial_number_word1 */ + 0, /* 19 serial_number_word2 */ + 0, /* 20 serial_number_word3 */ + 0, /* 21 check_sum */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + , /* 22-29 oem_name[16] */ + 0, /* 30 dvc_err_code */ + 0, /* 31 adv_err_code */ + 0, /* 32 adv_err_addr */ + 0, /* 33 saved_dvc_err_code */ + 0, /* 34 saved_adv_err_code */ + 0, /* 35 saved_adv_err_addr */ + 0, /* 36 reserved */ + 0, /* 37 reserved */ + 0, /* 38 reserved */ + 0, /* 39 reserved */ + 0, /* 40 reserved */ + 0, /* 41 reserved */ + 0, /* 42 reserved */ + 0, /* 43 reserved */ + 0, /* 44 reserved */ + 0, /* 45 reserved */ + 0, /* 46 reserved */ + 0, /* 47 reserved */ + 0, /* 48 reserved */ + 0, /* 49 reserved */ + 0, /* 50 reserved */ + 0, /* 51 reserved */ + 0, /* 52 reserved */ + 0, /* 53 reserved */ + 0, /* 54 reserved */ + 0, /* 55 reserved */ + 0, /* 56 cisptr_lsw */ + 0, /* 57 cisprt_msw */ + PCI_VENDOR_ID_ASP, /* 58 subsysvid */ + PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */ + 0, /* 60 reserved */ + 0, /* 61 reserved */ + 0, /* 62 reserved */ + 0 /* 63 reserved */ +}; + +static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar = { + 0, /* 00 cfg_lsw */ + 0, /* 01 cfg_msw */ + 0, /* 02 disc_enable */ + 0, /* 03 wdtr_able */ + 0, /* 04 sdtr_speed1 */ + 0, /* 05 start_motor */ + 0, /* 06 tagqng_able */ + 0, /* 07 bios_scan */ + 0, /* 08 scam_tolerant */ + 1, /* 09 adapter_scsi_id */ + 1, /* bios_boot_delay */ + 1, /* 10 scsi_reset_delay */ + 1, /* bios_id_lun */ + 1, /* 11 termination_se */ + 1, /* termination_lvd */ + 0, /* 12 bios_ctrl */ + 0, /* 13 sdtr_speed2 */ + 0, /* 14 sdtr_speed3 */ + 1, /* 15 max_host_qng */ + 1, /* max_dvc_qng */ + 0, /* 16 dvc_cntl */ + 0, /* 17 sdtr_speed4 */ + 0, /* 18 serial_number_word1 */ + 0, /* 19 serial_number_word2 */ + 0, /* 20 serial_number_word3 */ + 0, /* 21 check_sum */ + {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + , /* 22-29 oem_name[16] */ + 0, /* 30 dvc_err_code */ + 0, /* 31 adv_err_code */ + 0, /* 32 adv_err_addr */ + 0, /* 33 saved_dvc_err_code */ + 0, /* 34 saved_adv_err_code */ + 0, /* 35 saved_adv_err_addr */ + 0, /* 36 reserved */ + 0, /* 37 reserved */ + 0, /* 38 reserved */ + 0, /* 39 reserved */ + 0, /* 40 reserved */ + 0, /* 41 reserved */ + 0, /* 42 reserved */ + 0, /* 43 reserved */ + 0, /* 44 reserved */ + 0, /* 45 reserved */ + 0, /* 46 reserved */ + 0, /* 47 reserved */ + 0, /* 48 reserved */ + 0, /* 49 reserved */ + 0, /* 50 reserved */ + 0, /* 51 reserved */ + 0, /* 52 reserved */ + 0, /* 53 reserved */ + 0, /* 54 reserved */ + 0, /* 55 reserved */ + 0, /* 56 cisptr_lsw */ + 0, /* 57 cisprt_msw */ + 0, /* 58 subsysvid */ + 0, /* 59 subsysid */ + 0, /* 60 reserved */ + 0, /* 61 reserved */ + 0, /* 62 reserved */ + 0 /* 63 reserved */ +}; + +static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config = { + ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ + 0x0000, /* 01 cfg_msw */ + 0xFFFF, /* 02 disc_enable */ + 0xFFFF, /* 03 wdtr_able */ + 0x5555, /* 04 sdtr_speed1 */ + 0xFFFF, /* 05 start_motor */ + 0xFFFF, /* 06 tagqng_able */ + 0xFFFF, /* 07 bios_scan */ + 0, /* 08 scam_tolerant */ + 7, /* 09 adapter_scsi_id */ + 0, /* bios_boot_delay */ + 3, /* 10 scsi_reset_delay */ + 0, /* bios_id_lun */ + 0, /* 11 termination_se */ + 0, /* termination_lvd */ + 0xFFE7, /* 12 bios_ctrl */ + 0x5555, /* 13 sdtr_speed2 */ + 0x5555, /* 14 sdtr_speed3 */ + ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ + ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ + 0, /* 16 dvc_cntl */ + 0x5555, /* 17 sdtr_speed4 */ + 0, /* 18 serial_number_word1 */ + 0, /* 19 serial_number_word2 */ + 0, /* 20 serial_number_word3 */ + 0, /* 21 check_sum */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + , /* 22-29 oem_name[16] */ + 0, /* 30 dvc_err_code */ + 0, /* 31 adv_err_code */ + 0, /* 32 adv_err_addr */ + 0, /* 33 saved_dvc_err_code */ + 0, /* 34 saved_adv_err_code */ + 0, /* 35 saved_adv_err_addr */ + 0, /* 36 reserved */ + 0, /* 37 reserved */ + 0, /* 38 reserved */ + 0, /* 39 reserved */ + 0, /* 40 reserved */ + 0, /* 41 reserved */ + 0, /* 42 reserved */ + 0, /* 43 reserved */ + 0, /* 44 reserved */ + 0, /* 45 reserved */ + 0, /* 46 reserved */ + 0, /* 47 reserved */ + 0, /* 48 reserved */ + 0, /* 49 reserved */ + 0, /* 50 reserved */ + 0, /* 51 reserved */ + 0, /* 52 reserved */ + 0, /* 53 reserved */ + 0, /* 54 reserved */ + 0, /* 55 reserved */ + 0, /* 56 cisptr_lsw */ + 0, /* 57 cisprt_msw */ + PCI_VENDOR_ID_ASP, /* 58 subsysvid */ + PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */ + 0, /* 60 reserved */ + 0, /* 61 reserved */ + 0, /* 62 reserved */ + 0 /* 63 reserved */ +}; + +static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = { + 0, /* 00 cfg_lsw */ + 0, /* 01 cfg_msw */ + 0, /* 02 disc_enable */ + 0, /* 03 wdtr_able */ + 0, /* 04 sdtr_speed1 */ + 0, /* 05 start_motor */ + 0, /* 06 tagqng_able */ + 0, /* 07 bios_scan */ + 0, /* 08 scam_tolerant */ + 1, /* 09 adapter_scsi_id */ + 1, /* bios_boot_delay */ + 1, /* 10 scsi_reset_delay */ + 1, /* bios_id_lun */ + 1, /* 11 termination_se */ + 1, /* termination_lvd */ + 0, /* 12 bios_ctrl */ + 0, /* 13 sdtr_speed2 */ + 0, /* 14 sdtr_speed3 */ + 1, /* 15 max_host_qng */ + 1, /* max_dvc_qng */ + 0, /* 16 dvc_cntl */ + 0, /* 17 sdtr_speed4 */ + 0, /* 18 serial_number_word1 */ + 0, /* 19 serial_number_word2 */ + 0, /* 20 serial_number_word3 */ + 0, /* 21 check_sum */ + {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + , /* 22-29 oem_name[16] */ + 0, /* 30 dvc_err_code */ + 0, /* 31 adv_err_code */ + 0, /* 32 adv_err_addr */ + 0, /* 33 saved_dvc_err_code */ + 0, /* 34 saved_adv_err_code */ + 0, /* 35 saved_adv_err_addr */ + 0, /* 36 reserved */ + 0, /* 37 reserved */ + 0, /* 38 reserved */ + 0, /* 39 reserved */ + 0, /* 40 reserved */ + 0, /* 41 reserved */ + 0, /* 42 reserved */ + 0, /* 43 reserved */ + 0, /* 44 reserved */ + 0, /* 45 reserved */ + 0, /* 46 reserved */ + 0, /* 47 reserved */ + 0, /* 48 reserved */ + 0, /* 49 reserved */ + 0, /* 50 reserved */ + 0, /* 51 reserved */ + 0, /* 52 reserved */ + 0, /* 53 reserved */ + 0, /* 54 reserved */ + 0, /* 55 reserved */ + 0, /* 56 cisptr_lsw */ + 0, /* 57 cisprt_msw */ + 0, /* 58 subsysvid */ + 0, /* 59 subsysid */ + 0, /* 60 reserved */ + 0, /* 61 reserved */ + 0, /* 62 reserved */ + 0 /* 63 reserved */ +}; + +/* + * Wait for EEPROM command to complete + */ +static void AdvWaitEEPCmd(AdvPortAddr iop_base) +{ + int eep_delay_ms; + + for (eep_delay_ms = 0; eep_delay_ms < ADV_EEP_DELAY_MS; eep_delay_ms++) { + if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) & + ASC_EEP_CMD_DONE) { + break; + } + mdelay(1); + } + if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) == + 0) + BUG(); +} + +/* + * Read the EEPROM from specified location + */ +static ushort AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr) +{ + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, + ASC_EEP_CMD_READ | eep_word_addr); + AdvWaitEEPCmd(iop_base); + return AdvReadWordRegister(iop_base, IOPW_EE_DATA); +} + +/* + * Write the EEPROM from 'cfg_buf'. + */ +static void AdvSet3550EEPConfig(AdvPortAddr iop_base, + ADVEEP_3550_CONFIG *cfg_buf) +{ + ushort *wbuf; + ushort addr, chksum; + ushort *charfields; + + wbuf = (ushort *)cfg_buf; + charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; + chksum = 0; + + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); + AdvWaitEEPCmd(iop_base); + + /* + * Write EEPROM from word 0 to word 20. + */ + for (addr = ADV_EEP_DVC_CFG_BEGIN; + addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { + ushort word; + + if (*charfields++) { + word = cpu_to_le16(*wbuf); + } else { + word = *wbuf; + } + chksum += *wbuf; /* Checksum is calculated from word values. */ + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, + ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + mdelay(ADV_EEP_DELAY_MS); + } + + /* + * Write EEPROM checksum at word 21. + */ + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + wbuf++; + charfields++; + + /* + * Write EEPROM OEM name at words 22 to 29. + */ + for (addr = ADV_EEP_DVC_CTL_BEGIN; + addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { + ushort word; + + if (*charfields++) { + word = cpu_to_le16(*wbuf); + } else { + word = *wbuf; + } + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, + ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + } + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); + AdvWaitEEPCmd(iop_base); +} + +/* + * Write the EEPROM from 'cfg_buf'. + */ +static void AdvSet38C0800EEPConfig(AdvPortAddr iop_base, + ADVEEP_38C0800_CONFIG *cfg_buf) +{ + ushort *wbuf; + ushort *charfields; + ushort addr, chksum; + + wbuf = (ushort *)cfg_buf; + charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; + chksum = 0; + + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); + AdvWaitEEPCmd(iop_base); + + /* + * Write EEPROM from word 0 to word 20. + */ + for (addr = ADV_EEP_DVC_CFG_BEGIN; + addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { + ushort word; + + if (*charfields++) { + word = cpu_to_le16(*wbuf); + } else { + word = *wbuf; + } + chksum += *wbuf; /* Checksum is calculated from word values. */ + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, + ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + mdelay(ADV_EEP_DELAY_MS); + } + + /* + * Write EEPROM checksum at word 21. + */ + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + wbuf++; + charfields++; + + /* + * Write EEPROM OEM name at words 22 to 29. + */ + for (addr = ADV_EEP_DVC_CTL_BEGIN; + addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { + ushort word; + + if (*charfields++) { + word = cpu_to_le16(*wbuf); + } else { + word = *wbuf; + } + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, + ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + } + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); + AdvWaitEEPCmd(iop_base); +} + +/* + * Write the EEPROM from 'cfg_buf'. + */ +static void AdvSet38C1600EEPConfig(AdvPortAddr iop_base, + ADVEEP_38C1600_CONFIG *cfg_buf) +{ + ushort *wbuf; + ushort *charfields; + ushort addr, chksum; + + wbuf = (ushort *)cfg_buf; + charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; + chksum = 0; + + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); + AdvWaitEEPCmd(iop_base); + + /* + * Write EEPROM from word 0 to word 20. + */ + for (addr = ADV_EEP_DVC_CFG_BEGIN; + addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { + ushort word; + + if (*charfields++) { + word = cpu_to_le16(*wbuf); + } else { + word = *wbuf; + } + chksum += *wbuf; /* Checksum is calculated from word values. */ + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, + ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + mdelay(ADV_EEP_DELAY_MS); + } + + /* + * Write EEPROM checksum at word 21. + */ + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + wbuf++; + charfields++; + + /* + * Write EEPROM OEM name at words 22 to 29. + */ + for (addr = ADV_EEP_DVC_CTL_BEGIN; + addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { + ushort word; + + if (*charfields++) { + word = cpu_to_le16(*wbuf); + } else { + word = *wbuf; + } + AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, + ASC_EEP_CMD_WRITE | addr); + AdvWaitEEPCmd(iop_base); + } + AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); + AdvWaitEEPCmd(iop_base); +} + +/* + * Read EEPROM configuration into the specified buffer. + * + * Return a checksum based on the EEPROM configuration read. + */ +static ushort AdvGet3550EEPConfig(AdvPortAddr iop_base, + ADVEEP_3550_CONFIG *cfg_buf) +{ + ushort wval, chksum; + ushort *wbuf; + int eep_addr; + ushort *charfields; + + charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; + wbuf = (ushort *)cfg_buf; + chksum = 0; + + for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; + eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { + wval = AdvReadEEPWord(iop_base, eep_addr); + chksum += wval; /* Checksum is calculated from word values. */ + if (*charfields++) { + *wbuf = le16_to_cpu(wval); + } else { + *wbuf = wval; + } + } + /* Read checksum word. */ + *wbuf = AdvReadEEPWord(iop_base, eep_addr); + wbuf++; + charfields++; + + /* Read rest of EEPROM not covered by the checksum. */ + for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; + eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { + *wbuf = AdvReadEEPWord(iop_base, eep_addr); + if (*charfields++) { + *wbuf = le16_to_cpu(*wbuf); + } + } + return chksum; +} + +/* + * Read EEPROM configuration into the specified buffer. + * + * Return a checksum based on the EEPROM configuration read. + */ +static ushort AdvGet38C0800EEPConfig(AdvPortAddr iop_base, + ADVEEP_38C0800_CONFIG *cfg_buf) +{ + ushort wval, chksum; + ushort *wbuf; + int eep_addr; + ushort *charfields; + + charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; + wbuf = (ushort *)cfg_buf; + chksum = 0; + + for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; + eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { + wval = AdvReadEEPWord(iop_base, eep_addr); + chksum += wval; /* Checksum is calculated from word values. */ + if (*charfields++) { + *wbuf = le16_to_cpu(wval); + } else { + *wbuf = wval; + } + } + /* Read checksum word. */ + *wbuf = AdvReadEEPWord(iop_base, eep_addr); + wbuf++; + charfields++; + + /* Read rest of EEPROM not covered by the checksum. */ + for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; + eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { + *wbuf = AdvReadEEPWord(iop_base, eep_addr); + if (*charfields++) { + *wbuf = le16_to_cpu(*wbuf); + } + } + return chksum; +} + +/* + * Read EEPROM configuration into the specified buffer. + * + * Return a checksum based on the EEPROM configuration read. + */ +static ushort AdvGet38C1600EEPConfig(AdvPortAddr iop_base, + ADVEEP_38C1600_CONFIG *cfg_buf) +{ + ushort wval, chksum; + ushort *wbuf; + int eep_addr; + ushort *charfields; + + charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; + wbuf = (ushort *)cfg_buf; + chksum = 0; + + for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; + eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { + wval = AdvReadEEPWord(iop_base, eep_addr); + chksum += wval; /* Checksum is calculated from word values. */ + if (*charfields++) { + *wbuf = le16_to_cpu(wval); + } else { + *wbuf = wval; + } + } + /* Read checksum word. */ + *wbuf = AdvReadEEPWord(iop_base, eep_addr); + wbuf++; + charfields++; + + /* Read rest of EEPROM not covered by the checksum. */ + for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; + eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { + *wbuf = AdvReadEEPWord(iop_base, eep_addr); + if (*charfields++) { + *wbuf = le16_to_cpu(*wbuf); + } + } + return chksum; +} + +/* + * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and + * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while + * all of this is done. + * + * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. + * + * For a non-fatal error return a warning code. If there are no warnings + * then 0 is returned. + * + * Note: Chip is stopped on entry. + */ +static int AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc) +{ + AdvPortAddr iop_base; + ushort warn_code; + ADVEEP_3550_CONFIG eep_config; + + iop_base = asc_dvc->iop_base; + + warn_code = 0; + + /* + * Read the board's EEPROM configuration. + * + * Set default values if a bad checksum is found. + */ + if (AdvGet3550EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { + warn_code |= ASC_WARN_EEPROM_CHKSUM; + + /* + * Set EEPROM default values. + */ + memcpy(&eep_config, &Default_3550_EEPROM_Config, + sizeof(ADVEEP_3550_CONFIG)); + + /* + * Assume the 6 byte board serial number that was read from + * EEPROM is correct even if the EEPROM checksum failed. + */ + eep_config.serial_number_word3 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); + + eep_config.serial_number_word2 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); + + eep_config.serial_number_word1 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); + + AdvSet3550EEPConfig(iop_base, &eep_config); + } + /* + * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the + * EEPROM configuration that was read. + * + * This is the mapping of EEPROM fields to Adv Library fields. + */ + asc_dvc->wdtr_able = eep_config.wdtr_able; + asc_dvc->sdtr_able = eep_config.sdtr_able; + asc_dvc->ultra_able = eep_config.ultra_able; + asc_dvc->tagqng_able = eep_config.tagqng_able; + asc_dvc->cfg->disc_enable = eep_config.disc_enable; + asc_dvc->max_host_qng = eep_config.max_host_qng; + asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; + asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); + asc_dvc->start_motor = eep_config.start_motor; + asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; + asc_dvc->bios_ctrl = eep_config.bios_ctrl; + asc_dvc->no_scam = eep_config.scam_tolerant; + asc_dvc->cfg->serial1 = eep_config.serial_number_word1; + asc_dvc->cfg->serial2 = eep_config.serial_number_word2; + asc_dvc->cfg->serial3 = eep_config.serial_number_word3; + + /* + * Set the host maximum queuing (max. 253, min. 16) and the per device + * maximum queuing (max. 63, min. 4). + */ + if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { + eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; + } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { + /* If the value is zero, assume it is uninitialized. */ + if (eep_config.max_host_qng == 0) { + eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; + } else { + eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; + } + } + + if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { + eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; + } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { + /* If the value is zero, assume it is uninitialized. */ + if (eep_config.max_dvc_qng == 0) { + eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; + } else { + eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; + } + } + + /* + * If 'max_dvc_qng' is greater than 'max_host_qng', then + * set 'max_dvc_qng' to 'max_host_qng'. + */ + if (eep_config.max_dvc_qng > eep_config.max_host_qng) { + eep_config.max_dvc_qng = eep_config.max_host_qng; + } + + /* + * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' + * values based on possibly adjusted EEPROM values. + */ + asc_dvc->max_host_qng = eep_config.max_host_qng; + asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; + + /* + * If the EEPROM 'termination' field is set to automatic (0), then set + * the ADV_DVC_CFG 'termination' field to automatic also. + * + * If the termination is specified with a non-zero 'termination' + * value check that a legal value is set and set the ADV_DVC_CFG + * 'termination' field appropriately. + */ + if (eep_config.termination == 0) { + asc_dvc->cfg->termination = 0; /* auto termination */ + } else { + /* Enable manual control with low off / high off. */ + if (eep_config.termination == 1) { + asc_dvc->cfg->termination = TERM_CTL_SEL; + + /* Enable manual control with low off / high on. */ + } else if (eep_config.termination == 2) { + asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H; + + /* Enable manual control with low on / high on. */ + } else if (eep_config.termination == 3) { + asc_dvc->cfg->termination = + TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L; + } else { + /* + * The EEPROM 'termination' field contains a bad value. Use + * automatic termination instead. + */ + asc_dvc->cfg->termination = 0; + warn_code |= ASC_WARN_EEPROM_TERMINATION; + } + } + + return warn_code; +} + +/* + * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and + * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while + * all of this is done. + * + * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. + * + * For a non-fatal error return a warning code. If there are no warnings + * then 0 is returned. + * + * Note: Chip is stopped on entry. + */ +static int AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc) +{ + AdvPortAddr iop_base; + ushort warn_code; + ADVEEP_38C0800_CONFIG eep_config; + uchar tid, termination; + ushort sdtr_speed = 0; + + iop_base = asc_dvc->iop_base; + + warn_code = 0; + + /* + * Read the board's EEPROM configuration. + * + * Set default values if a bad checksum is found. + */ + if (AdvGet38C0800EEPConfig(iop_base, &eep_config) != + eep_config.check_sum) { + warn_code |= ASC_WARN_EEPROM_CHKSUM; + + /* + * Set EEPROM default values. + */ + memcpy(&eep_config, &Default_38C0800_EEPROM_Config, + sizeof(ADVEEP_38C0800_CONFIG)); + + /* + * Assume the 6 byte board serial number that was read from + * EEPROM is correct even if the EEPROM checksum failed. + */ + eep_config.serial_number_word3 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); + + eep_config.serial_number_word2 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); + + eep_config.serial_number_word1 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); + + AdvSet38C0800EEPConfig(iop_base, &eep_config); + } + /* + * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the + * EEPROM configuration that was read. + * + * This is the mapping of EEPROM fields to Adv Library fields. + */ + asc_dvc->wdtr_able = eep_config.wdtr_able; + asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; + asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; + asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; + asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; + asc_dvc->tagqng_able = eep_config.tagqng_able; + asc_dvc->cfg->disc_enable = eep_config.disc_enable; + asc_dvc->max_host_qng = eep_config.max_host_qng; + asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; + asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); + asc_dvc->start_motor = eep_config.start_motor; + asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; + asc_dvc->bios_ctrl = eep_config.bios_ctrl; + asc_dvc->no_scam = eep_config.scam_tolerant; + asc_dvc->cfg->serial1 = eep_config.serial_number_word1; + asc_dvc->cfg->serial2 = eep_config.serial_number_word2; + asc_dvc->cfg->serial3 = eep_config.serial_number_word3; + + /* + * For every Target ID if any of its 'sdtr_speed[1234]' bits + * are set, then set an 'sdtr_able' bit for it. + */ + asc_dvc->sdtr_able = 0; + for (tid = 0; tid <= ADV_MAX_TID; tid++) { + if (tid == 0) { + sdtr_speed = asc_dvc->sdtr_speed1; + } else if (tid == 4) { + sdtr_speed = asc_dvc->sdtr_speed2; + } else if (tid == 8) { + sdtr_speed = asc_dvc->sdtr_speed3; + } else if (tid == 12) { + sdtr_speed = asc_dvc->sdtr_speed4; + } + if (sdtr_speed & ADV_MAX_TID) { + asc_dvc->sdtr_able |= (1 << tid); + } + sdtr_speed >>= 4; + } + + /* + * Set the host maximum queuing (max. 253, min. 16) and the per device + * maximum queuing (max. 63, min. 4). + */ + if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { + eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; + } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { + /* If the value is zero, assume it is uninitialized. */ + if (eep_config.max_host_qng == 0) { + eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; + } else { + eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; + } + } + + if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { + eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; + } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { + /* If the value is zero, assume it is uninitialized. */ + if (eep_config.max_dvc_qng == 0) { + eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; + } else { + eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; + } + } + + /* + * If 'max_dvc_qng' is greater than 'max_host_qng', then + * set 'max_dvc_qng' to 'max_host_qng'. + */ + if (eep_config.max_dvc_qng > eep_config.max_host_qng) { + eep_config.max_dvc_qng = eep_config.max_host_qng; + } + + /* + * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' + * values based on possibly adjusted EEPROM values. + */ + asc_dvc->max_host_qng = eep_config.max_host_qng; + asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; + + /* + * If the EEPROM 'termination' field is set to automatic (0), then set + * the ADV_DVC_CFG 'termination' field to automatic also. + * + * If the termination is specified with a non-zero 'termination' + * value check that a legal value is set and set the ADV_DVC_CFG + * 'termination' field appropriately. + */ + if (eep_config.termination_se == 0) { + termination = 0; /* auto termination for SE */ + } else { + /* Enable manual control with low off / high off. */ + if (eep_config.termination_se == 1) { + termination = 0; + + /* Enable manual control with low off / high on. */ + } else if (eep_config.termination_se == 2) { + termination = TERM_SE_HI; + + /* Enable manual control with low on / high on. */ + } else if (eep_config.termination_se == 3) { + termination = TERM_SE; + } else { + /* + * The EEPROM 'termination_se' field contains a bad value. + * Use automatic termination instead. + */ + termination = 0; + warn_code |= ASC_WARN_EEPROM_TERMINATION; + } + } + + if (eep_config.termination_lvd == 0) { + asc_dvc->cfg->termination = termination; /* auto termination for LVD */ + } else { + /* Enable manual control with low off / high off. */ + if (eep_config.termination_lvd == 1) { + asc_dvc->cfg->termination = termination; + + /* Enable manual control with low off / high on. */ + } else if (eep_config.termination_lvd == 2) { + asc_dvc->cfg->termination = termination | TERM_LVD_HI; + + /* Enable manual control with low on / high on. */ + } else if (eep_config.termination_lvd == 3) { + asc_dvc->cfg->termination = termination | TERM_LVD; + } else { + /* + * The EEPROM 'termination_lvd' field contains a bad value. + * Use automatic termination instead. + */ + asc_dvc->cfg->termination = termination; + warn_code |= ASC_WARN_EEPROM_TERMINATION; + } + } + + return warn_code; +} + +/* + * Read the board's EEPROM configuration. Set fields in ASC_DVC_VAR and + * ASC_DVC_CFG based on the EEPROM settings. The chip is stopped while + * all of this is done. + * + * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. + * + * For a non-fatal error return a warning code. If there are no warnings + * then 0 is returned. + * + * Note: Chip is stopped on entry. + */ +static int AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc) +{ + AdvPortAddr iop_base; + ushort warn_code; + ADVEEP_38C1600_CONFIG eep_config; + uchar tid, termination; + ushort sdtr_speed = 0; + + iop_base = asc_dvc->iop_base; + + warn_code = 0; + + /* + * Read the board's EEPROM configuration. + * + * Set default values if a bad checksum is found. + */ + if (AdvGet38C1600EEPConfig(iop_base, &eep_config) != + eep_config.check_sum) { + struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); + warn_code |= ASC_WARN_EEPROM_CHKSUM; + + /* + * Set EEPROM default values. + */ + memcpy(&eep_config, &Default_38C1600_EEPROM_Config, + sizeof(ADVEEP_38C1600_CONFIG)); + + if (PCI_FUNC(pdev->devfn) != 0) { + u8 ints; + /* + * Disable Bit 14 (BIOS_ENABLE) to fix SPARC Ultra 60 + * and old Mac system booting problem. The Expansion + * ROM must be disabled in Function 1 for these systems + */ + eep_config.cfg_lsw &= ~ADV_EEPROM_BIOS_ENABLE; + /* + * Clear the INTAB (bit 11) if the GPIO 0 input + * indicates the Function 1 interrupt line is wired + * to INTB. + * + * Set/Clear Bit 11 (INTAB) from the GPIO bit 0 input: + * 1 - Function 1 interrupt line wired to INT A. + * 0 - Function 1 interrupt line wired to INT B. + * + * Note: Function 0 is always wired to INTA. + * Put all 5 GPIO bits in input mode and then read + * their input values. + */ + AdvWriteByteRegister(iop_base, IOPB_GPIO_CNTL, 0); + ints = AdvReadByteRegister(iop_base, IOPB_GPIO_DATA); + if ((ints & 0x01) == 0) + eep_config.cfg_lsw &= ~ADV_EEPROM_INTAB; + } + + /* + * Assume the 6 byte board serial number that was read from + * EEPROM is correct even if the EEPROM checksum failed. + */ + eep_config.serial_number_word3 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); + eep_config.serial_number_word2 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); + eep_config.serial_number_word1 = + AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); + + AdvSet38C1600EEPConfig(iop_base, &eep_config); + } + + /* + * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the + * EEPROM configuration that was read. + * + * This is the mapping of EEPROM fields to Adv Library fields. + */ + asc_dvc->wdtr_able = eep_config.wdtr_able; + asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; + asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; + asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; + asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; + asc_dvc->ppr_able = 0; + asc_dvc->tagqng_able = eep_config.tagqng_able; + asc_dvc->cfg->disc_enable = eep_config.disc_enable; + asc_dvc->max_host_qng = eep_config.max_host_qng; + asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; + asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ASC_MAX_TID); + asc_dvc->start_motor = eep_config.start_motor; + asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; + asc_dvc->bios_ctrl = eep_config.bios_ctrl; + asc_dvc->no_scam = eep_config.scam_tolerant; + + /* + * For every Target ID if any of its 'sdtr_speed[1234]' bits + * are set, then set an 'sdtr_able' bit for it. + */ + asc_dvc->sdtr_able = 0; + for (tid = 0; tid <= ASC_MAX_TID; tid++) { + if (tid == 0) { + sdtr_speed = asc_dvc->sdtr_speed1; + } else if (tid == 4) { + sdtr_speed = asc_dvc->sdtr_speed2; + } else if (tid == 8) { + sdtr_speed = asc_dvc->sdtr_speed3; + } else if (tid == 12) { + sdtr_speed = asc_dvc->sdtr_speed4; + } + if (sdtr_speed & ASC_MAX_TID) { + asc_dvc->sdtr_able |= (1 << tid); + } + sdtr_speed >>= 4; + } + + /* + * Set the host maximum queuing (max. 253, min. 16) and the per device + * maximum queuing (max. 63, min. 4). + */ + if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { + eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; + } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { + /* If the value is zero, assume it is uninitialized. */ + if (eep_config.max_host_qng == 0) { + eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; + } else { + eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; + } + } + + if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { + eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; + } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { + /* If the value is zero, assume it is uninitialized. */ + if (eep_config.max_dvc_qng == 0) { + eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; + } else { + eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; + } + } + + /* + * If 'max_dvc_qng' is greater than 'max_host_qng', then + * set 'max_dvc_qng' to 'max_host_qng'. + */ + if (eep_config.max_dvc_qng > eep_config.max_host_qng) { + eep_config.max_dvc_qng = eep_config.max_host_qng; + } + + /* + * Set ASC_DVC_VAR 'max_host_qng' and ASC_DVC_VAR 'max_dvc_qng' + * values based on possibly adjusted EEPROM values. + */ + asc_dvc->max_host_qng = eep_config.max_host_qng; + asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; + + /* + * If the EEPROM 'termination' field is set to automatic (0), then set + * the ASC_DVC_CFG 'termination' field to automatic also. + * + * If the termination is specified with a non-zero 'termination' + * value check that a legal value is set and set the ASC_DVC_CFG + * 'termination' field appropriately. + */ + if (eep_config.termination_se == 0) { + termination = 0; /* auto termination for SE */ + } else { + /* Enable manual control with low off / high off. */ + if (eep_config.termination_se == 1) { + termination = 0; + + /* Enable manual control with low off / high on. */ + } else if (eep_config.termination_se == 2) { + termination = TERM_SE_HI; + + /* Enable manual control with low on / high on. */ + } else if (eep_config.termination_se == 3) { + termination = TERM_SE; + } else { + /* + * The EEPROM 'termination_se' field contains a bad value. + * Use automatic termination instead. + */ + termination = 0; + warn_code |= ASC_WARN_EEPROM_TERMINATION; + } + } + + if (eep_config.termination_lvd == 0) { + asc_dvc->cfg->termination = termination; /* auto termination for LVD */ + } else { + /* Enable manual control with low off / high off. */ + if (eep_config.termination_lvd == 1) { + asc_dvc->cfg->termination = termination; + + /* Enable manual control with low off / high on. */ + } else if (eep_config.termination_lvd == 2) { + asc_dvc->cfg->termination = termination | TERM_LVD_HI; + + /* Enable manual control with low on / high on. */ + } else if (eep_config.termination_lvd == 3) { + asc_dvc->cfg->termination = termination | TERM_LVD; + } else { + /* + * The EEPROM 'termination_lvd' field contains a bad value. + * Use automatic termination instead. + */ + asc_dvc->cfg->termination = termination; + warn_code |= ASC_WARN_EEPROM_TERMINATION; + } + } + + return warn_code; +} + +/* + * Initialize the ADV_DVC_VAR structure. + * + * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. + * + * For a non-fatal error return a warning code. If there are no warnings + * then 0 is returned. + */ +static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) +{ + struct asc_board *board = shost_priv(shost); + ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var; + unsigned short warn_code = 0; + AdvPortAddr iop_base = asc_dvc->iop_base; + u16 cmd; + int status; + + asc_dvc->err_code = 0; + + /* + * Save the state of the PCI Configuration Command Register + * "Parity Error Response Control" Bit. If the bit is clear (0), + * in AdvInitAsc3550/38C0800Driver() tell the microcode to ignore + * DMA parity errors. + */ + asc_dvc->cfg->control_flag = 0; + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if ((cmd & PCI_COMMAND_PARITY) == 0) + asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR; + + asc_dvc->cfg->chip_version = + AdvGetChipVersion(iop_base, asc_dvc->bus_type); + + ASC_DBG(1, "iopb_chip_id_1: 0x%x 0x%x\n", + (ushort)AdvReadByteRegister(iop_base, IOPB_CHIP_ID_1), + (ushort)ADV_CHIP_ID_BYTE); + + ASC_DBG(1, "iopw_chip_id_0: 0x%x 0x%x\n", + (ushort)AdvReadWordRegister(iop_base, IOPW_CHIP_ID_0), + (ushort)ADV_CHIP_ID_WORD); + + /* + * Reset the chip to start and allow register writes. + */ + if (AdvFindSignature(iop_base) == 0) { + asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; + return ADV_ERROR; + } else { + /* + * The caller must set 'chip_type' to a valid setting. + */ + if (asc_dvc->chip_type != ADV_CHIP_ASC3550 && + asc_dvc->chip_type != ADV_CHIP_ASC38C0800 && + asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { + asc_dvc->err_code |= ASC_IERR_BAD_CHIPTYPE; + return ADV_ERROR; + } + + /* + * Reset Chip. + */ + AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, + ADV_CTRL_REG_CMD_RESET); + mdelay(100); + AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, + ADV_CTRL_REG_CMD_WR_IO_REG); + + if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { + status = AdvInitFrom38C1600EEP(asc_dvc); + } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { + status = AdvInitFrom38C0800EEP(asc_dvc); + } else { + status = AdvInitFrom3550EEP(asc_dvc); + } + warn_code |= status; + } + + if (warn_code != 0) + shost_printk(KERN_WARNING, shost, "warning: 0x%x\n", warn_code); + + if (asc_dvc->err_code) + shost_printk(KERN_ERR, shost, "error code 0x%x\n", + asc_dvc->err_code); + + return asc_dvc->err_code; +} +#endif + +static const struct scsi_host_template advansys_template = { + .proc_name = DRV_NAME, +#ifdef CONFIG_PROC_FS + .show_info = advansys_show_info, +#endif + .name = DRV_NAME, + .info = advansys_info, + .queuecommand = advansys_queuecommand, + .eh_host_reset_handler = advansys_reset, + .bios_param = advansys_biosparam, + .slave_configure = advansys_slave_configure, + .cmd_size = sizeof(struct advansys_cmd), +}; + +static int advansys_wide_init_chip(struct Scsi_Host *shost) +{ + struct asc_board *board = shost_priv(shost); + struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; + size_t sgblk_pool_size; + int warn_code, err_code; + + /* + * Allocate buffer carrier structures. The total size + * is about 8 KB, so allocate all at once. + */ + adv_dvc->carrier = dma_alloc_coherent(board->dev, + ADV_CARRIER_BUFSIZE, &adv_dvc->carrier_addr, GFP_KERNEL); + ASC_DBG(1, "carrier 0x%p\n", adv_dvc->carrier); + + if (!adv_dvc->carrier) + goto kmalloc_failed; + + /* + * Allocate up to 'max_host_qng' request structures for the Wide + * board. The total size is about 16 KB, so allocate all at once. + * If the allocation fails decrement and try again. + */ + board->adv_reqp_size = adv_dvc->max_host_qng * sizeof(adv_req_t); + if (board->adv_reqp_size & 0x1f) { + ASC_DBG(1, "unaligned reqp %lu bytes\n", sizeof(adv_req_t)); + board->adv_reqp_size = ADV_32BALIGN(board->adv_reqp_size); + } + board->adv_reqp = dma_alloc_coherent(board->dev, board->adv_reqp_size, + &board->adv_reqp_addr, GFP_KERNEL); + + if (!board->adv_reqp) + goto kmalloc_failed; + + ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", board->adv_reqp, + adv_dvc->max_host_qng, board->adv_reqp_size); + + /* + * Allocate up to ADV_TOT_SG_BLOCK request structures for + * the Wide board. Each structure is about 136 bytes. + */ + sgblk_pool_size = sizeof(adv_sgblk_t) * ADV_TOT_SG_BLOCK; + board->adv_sgblk_pool = dma_pool_create("adv_sgblk", board->dev, + sgblk_pool_size, 32, 0); + + ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", ADV_TOT_SG_BLOCK, + sizeof(adv_sgblk_t), sgblk_pool_size); + + if (!board->adv_sgblk_pool) + goto kmalloc_failed; + + if (adv_dvc->chip_type == ADV_CHIP_ASC3550) { + ASC_DBG(2, "AdvInitAsc3550Driver()\n"); + warn_code = AdvInitAsc3550Driver(adv_dvc); + } else if (adv_dvc->chip_type == ADV_CHIP_ASC38C0800) { + ASC_DBG(2, "AdvInitAsc38C0800Driver()\n"); + warn_code = AdvInitAsc38C0800Driver(adv_dvc); + } else { + ASC_DBG(2, "AdvInitAsc38C1600Driver()\n"); + warn_code = AdvInitAsc38C1600Driver(adv_dvc); + } + err_code = adv_dvc->err_code; + + if (warn_code || err_code) { + shost_printk(KERN_WARNING, shost, "error: warn 0x%x, error " + "0x%x\n", warn_code, err_code); + } + + goto exit; + + kmalloc_failed: + shost_printk(KERN_ERR, shost, "error: kmalloc() failed\n"); + err_code = ADV_ERROR; + exit: + return err_code; +} + +static void advansys_wide_free_mem(struct asc_board *board) +{ + struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; + + if (adv_dvc->carrier) { + dma_free_coherent(board->dev, ADV_CARRIER_BUFSIZE, + adv_dvc->carrier, adv_dvc->carrier_addr); + adv_dvc->carrier = NULL; + } + if (board->adv_reqp) { + dma_free_coherent(board->dev, board->adv_reqp_size, + board->adv_reqp, board->adv_reqp_addr); + board->adv_reqp = NULL; + } + if (board->adv_sgblk_pool) { + dma_pool_destroy(board->adv_sgblk_pool); + board->adv_sgblk_pool = NULL; + } +} + +static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, + int bus_type) +{ + struct pci_dev *pdev; + struct asc_board *boardp = shost_priv(shost); + ASC_DVC_VAR *asc_dvc_varp = NULL; + ADV_DVC_VAR *adv_dvc_varp = NULL; + int share_irq, warn_code, ret; + + pdev = (bus_type == ASC_IS_PCI) ? to_pci_dev(boardp->dev) : NULL; + + if (ASC_NARROW_BOARD(boardp)) { + ASC_DBG(1, "narrow board\n"); + asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; + asc_dvc_varp->bus_type = bus_type; + asc_dvc_varp->drv_ptr = boardp; + asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg; + asc_dvc_varp->iop_base = iop; + } else { +#ifdef CONFIG_PCI + adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; + adv_dvc_varp->drv_ptr = boardp; + adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg; + if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW) { + ASC_DBG(1, "wide board ASC-3550\n"); + adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; + } else if (pdev->device == PCI_DEVICE_ID_38C0800_REV1) { + ASC_DBG(1, "wide board ASC-38C0800\n"); + adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; + } else { + ASC_DBG(1, "wide board ASC-38C1600\n"); + adv_dvc_varp->chip_type = ADV_CHIP_ASC38C1600; + } + + boardp->asc_n_io_port = pci_resource_len(pdev, 1); + boardp->ioremap_addr = pci_ioremap_bar(pdev, 1); + if (!boardp->ioremap_addr) { + shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) " + "returned NULL\n", + (long)pci_resource_start(pdev, 1), + boardp->asc_n_io_port); + ret = -ENODEV; + goto err_shost; + } + adv_dvc_varp->iop_base = (AdvPortAddr)boardp->ioremap_addr; + ASC_DBG(1, "iop_base: 0x%p\n", adv_dvc_varp->iop_base); + + /* + * Even though it isn't used to access wide boards, other + * than for the debug line below, save I/O Port address so + * that it can be reported. + */ + boardp->ioport = iop; + + ASC_DBG(1, "iopb_chip_id_1 0x%x, iopw_chip_id_0 0x%x\n", + (ushort)inp(iop + 1), (ushort)inpw(iop)); +#endif /* CONFIG_PCI */ + } + + if (ASC_NARROW_BOARD(boardp)) { + /* + * Set the board bus type and PCI IRQ before + * calling AscInitGetConfig(). + */ + switch (asc_dvc_varp->bus_type) { +#ifdef CONFIG_ISA + case ASC_IS_VL: + share_irq = 0; + break; + case ASC_IS_EISA: + share_irq = IRQF_SHARED; + break; +#endif /* CONFIG_ISA */ +#ifdef CONFIG_PCI + case ASC_IS_PCI: + share_irq = IRQF_SHARED; + break; +#endif /* CONFIG_PCI */ + default: + shost_printk(KERN_ERR, shost, "unknown adapter type: " + "%d\n", asc_dvc_varp->bus_type); + share_irq = 0; + break; + } + + /* + * NOTE: AscInitGetConfig() may change the board's + * bus_type value. The bus_type value should no + * longer be used. If the bus_type field must be + * referenced only use the bit-wise AND operator "&". + */ + ASC_DBG(2, "AscInitGetConfig()\n"); + ret = AscInitGetConfig(shost) ? -ENODEV : 0; + } else { +#ifdef CONFIG_PCI + /* + * For Wide boards set PCI information before calling + * AdvInitGetConfig(). + */ + share_irq = IRQF_SHARED; + ASC_DBG(2, "AdvInitGetConfig()\n"); + + ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; +#else + share_irq = 0; + ret = -ENODEV; +#endif /* CONFIG_PCI */ + } + + if (ret) + goto err_unmap; + + /* + * Save the EEPROM configuration so that it can be displayed + * from /proc/scsi/advansys/[0...]. + */ + if (ASC_NARROW_BOARD(boardp)) { + + ASCEEP_CONFIG *ep; + + /* + * Set the adapter's target id bit in the 'init_tidmask' field. + */ + boardp->init_tidmask |= + ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id); + + /* + * Save EEPROM settings for the board. + */ + ep = &boardp->eep_config.asc_eep; + + ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable; + ep->disc_enable = asc_dvc_varp->cfg->disc_enable; + ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled; + ASC_EEP_SET_DMA_SPD(ep, ASC_DEF_ISA_DMA_SPEED); + ep->start_motor = asc_dvc_varp->start_motor; + ep->cntl = asc_dvc_varp->dvc_cntl; + ep->no_scam = asc_dvc_varp->no_scam; + ep->max_total_qng = asc_dvc_varp->max_total_qng; + ASC_EEP_SET_CHIP_ID(ep, asc_dvc_varp->cfg->chip_scsi_id); + /* 'max_tag_qng' is set to the same value for every device. */ + ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0]; + ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0]; + ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1]; + ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2]; + ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3]; + ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4]; + ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5]; + + /* + * Modify board configuration. + */ + ASC_DBG(2, "AscInitSetConfig()\n"); + ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0; + if (ret) + goto err_unmap; + } else { + ADVEEP_3550_CONFIG *ep_3550; + ADVEEP_38C0800_CONFIG *ep_38C0800; + ADVEEP_38C1600_CONFIG *ep_38C1600; + + /* + * Save Wide EEP Configuration Information. + */ + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { + ep_3550 = &boardp->eep_config.adv_3550_eep; + + ep_3550->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; + ep_3550->max_host_qng = adv_dvc_varp->max_host_qng; + ep_3550->max_dvc_qng = adv_dvc_varp->max_dvc_qng; + ep_3550->termination = adv_dvc_varp->cfg->termination; + ep_3550->disc_enable = adv_dvc_varp->cfg->disc_enable; + ep_3550->bios_ctrl = adv_dvc_varp->bios_ctrl; + ep_3550->wdtr_able = adv_dvc_varp->wdtr_able; + ep_3550->sdtr_able = adv_dvc_varp->sdtr_able; + ep_3550->ultra_able = adv_dvc_varp->ultra_able; + ep_3550->tagqng_able = adv_dvc_varp->tagqng_able; + ep_3550->start_motor = adv_dvc_varp->start_motor; + ep_3550->scsi_reset_delay = + adv_dvc_varp->scsi_reset_wait; + ep_3550->serial_number_word1 = + adv_dvc_varp->cfg->serial1; + ep_3550->serial_number_word2 = + adv_dvc_varp->cfg->serial2; + ep_3550->serial_number_word3 = + adv_dvc_varp->cfg->serial3; + } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { + ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; + + ep_38C0800->adapter_scsi_id = + adv_dvc_varp->chip_scsi_id; + ep_38C0800->max_host_qng = adv_dvc_varp->max_host_qng; + ep_38C0800->max_dvc_qng = adv_dvc_varp->max_dvc_qng; + ep_38C0800->termination_lvd = + adv_dvc_varp->cfg->termination; + ep_38C0800->disc_enable = + adv_dvc_varp->cfg->disc_enable; + ep_38C0800->bios_ctrl = adv_dvc_varp->bios_ctrl; + ep_38C0800->wdtr_able = adv_dvc_varp->wdtr_able; + ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; + ep_38C0800->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; + ep_38C0800->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; + ep_38C0800->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; + ep_38C0800->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; + ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; + ep_38C0800->start_motor = adv_dvc_varp->start_motor; + ep_38C0800->scsi_reset_delay = + adv_dvc_varp->scsi_reset_wait; + ep_38C0800->serial_number_word1 = + adv_dvc_varp->cfg->serial1; + ep_38C0800->serial_number_word2 = + adv_dvc_varp->cfg->serial2; + ep_38C0800->serial_number_word3 = + adv_dvc_varp->cfg->serial3; + } else { + ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; + + ep_38C1600->adapter_scsi_id = + adv_dvc_varp->chip_scsi_id; + ep_38C1600->max_host_qng = adv_dvc_varp->max_host_qng; + ep_38C1600->max_dvc_qng = adv_dvc_varp->max_dvc_qng; + ep_38C1600->termination_lvd = + adv_dvc_varp->cfg->termination; + ep_38C1600->disc_enable = + adv_dvc_varp->cfg->disc_enable; + ep_38C1600->bios_ctrl = adv_dvc_varp->bios_ctrl; + ep_38C1600->wdtr_able = adv_dvc_varp->wdtr_able; + ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; + ep_38C1600->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; + ep_38C1600->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; + ep_38C1600->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; + ep_38C1600->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; + ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; + ep_38C1600->start_motor = adv_dvc_varp->start_motor; + ep_38C1600->scsi_reset_delay = + adv_dvc_varp->scsi_reset_wait; + ep_38C1600->serial_number_word1 = + adv_dvc_varp->cfg->serial1; + ep_38C1600->serial_number_word2 = + adv_dvc_varp->cfg->serial2; + ep_38C1600->serial_number_word3 = + adv_dvc_varp->cfg->serial3; + } + + /* + * Set the adapter's target id bit in the 'init_tidmask' field. + */ + boardp->init_tidmask |= + ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id); + } + + /* + * Channels are numbered beginning with 0. For AdvanSys one host + * structure supports one channel. Multi-channel boards have a + * separate host structure for each channel. + */ + shost->max_channel = 0; + if (ASC_NARROW_BOARD(boardp)) { + shost->max_id = ASC_MAX_TID + 1; + shost->max_lun = ASC_MAX_LUN + 1; + shost->max_cmd_len = ASC_MAX_CDB_LEN; + + shost->io_port = asc_dvc_varp->iop_base; + boardp->asc_n_io_port = ASC_IOADR_GAP; + shost->this_id = asc_dvc_varp->cfg->chip_scsi_id; + + /* Set maximum number of queues the adapter can handle. */ + shost->can_queue = asc_dvc_varp->max_total_qng; + } else { + shost->max_id = ADV_MAX_TID + 1; + shost->max_lun = ADV_MAX_LUN + 1; + shost->max_cmd_len = ADV_MAX_CDB_LEN; + + /* + * Save the I/O Port address and length even though + * I/O ports are not used to access Wide boards. + * Instead the Wide boards are accessed with + * PCI Memory Mapped I/O. + */ + shost->io_port = iop; + + shost->this_id = adv_dvc_varp->chip_scsi_id; + + /* Set maximum number of queues the adapter can handle. */ + shost->can_queue = adv_dvc_varp->max_host_qng; + } + + /* + * Set the maximum number of scatter-gather elements the + * adapter can handle. + */ + if (ASC_NARROW_BOARD(boardp)) { + /* + * Allow two commands with 'sg_tablesize' scatter-gather + * elements to be executed simultaneously. This value is + * the theoretical hardware limit. It may be decreased + * below. + */ + shost->sg_tablesize = + (((asc_dvc_varp->max_total_qng - 2) / 2) * + ASC_SG_LIST_PER_Q) + 1; + } else { + shost->sg_tablesize = ADV_MAX_SG_LIST; + } + + /* + * The value of 'sg_tablesize' can not exceed the SCSI + * mid-level driver definition of SG_ALL. SG_ALL also + * must not be exceeded, because it is used to define the + * size of the scatter-gather table in 'struct asc_sg_head'. + */ + if (shost->sg_tablesize > SG_ALL) { + shost->sg_tablesize = SG_ALL; + } + + ASC_DBG(1, "sg_tablesize: %d\n", shost->sg_tablesize); + + /* BIOS start address. */ + if (ASC_NARROW_BOARD(boardp)) { + shost->base = AscGetChipBiosAddress(asc_dvc_varp->iop_base, + asc_dvc_varp->bus_type); + } else { + /* + * Fill-in BIOS board variables. The Wide BIOS saves + * information in LRAM that is used by the driver. + */ + AdvReadWordLram(adv_dvc_varp->iop_base, + BIOS_SIGNATURE, boardp->bios_signature); + AdvReadWordLram(adv_dvc_varp->iop_base, + BIOS_VERSION, boardp->bios_version); + AdvReadWordLram(adv_dvc_varp->iop_base, + BIOS_CODESEG, boardp->bios_codeseg); + AdvReadWordLram(adv_dvc_varp->iop_base, + BIOS_CODELEN, boardp->bios_codelen); + + ASC_DBG(1, "bios_signature 0x%x, bios_version 0x%x\n", + boardp->bios_signature, boardp->bios_version); + + ASC_DBG(1, "bios_codeseg 0x%x, bios_codelen 0x%x\n", + boardp->bios_codeseg, boardp->bios_codelen); + + /* + * If the BIOS saved a valid signature, then fill in + * the BIOS code segment base address. + */ + if (boardp->bios_signature == 0x55AA) { + /* + * Convert x86 realmode code segment to a linear + * address by shifting left 4. + */ + shost->base = ((ulong)boardp->bios_codeseg << 4); + } else { + shost->base = 0; + } + } + + /* + * Register Board Resources - I/O Port, DMA, IRQ + */ + + /* Register DMA Channel for Narrow boards. */ + shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */ + + /* Register IRQ Number. */ + ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost); + + ret = request_irq(boardp->irq, advansys_interrupt, share_irq, + DRV_NAME, shost); + + if (ret) { + if (ret == -EBUSY) { + shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " + "already in use\n", boardp->irq); + } else if (ret == -EINVAL) { + shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " + "not valid\n", boardp->irq); + } else { + shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " + "failed with %d\n", boardp->irq, ret); + } + goto err_unmap; + } + + /* + * Initialize board RISC chip and enable interrupts. + */ + if (ASC_NARROW_BOARD(boardp)) { + ASC_DBG(2, "AscInitAsc1000Driver()\n"); + + asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); + if (!asc_dvc_varp->overrun_buf) { + ret = -ENOMEM; + goto err_free_irq; + } + warn_code = AscInitAsc1000Driver(asc_dvc_varp); + + if (warn_code || asc_dvc_varp->err_code) { + shost_printk(KERN_ERR, shost, "error: init_state 0x%x, " + "warn 0x%x, error 0x%x\n", + asc_dvc_varp->init_state, warn_code, + asc_dvc_varp->err_code); + if (!asc_dvc_varp->overrun_dma) { + ret = -ENODEV; + goto err_free_mem; + } + } + } else { + if (advansys_wide_init_chip(shost)) { + ret = -ENODEV; + goto err_free_mem; + } + } + + ASC_DBG_PRT_SCSI_HOST(2, shost); + + ret = scsi_add_host(shost, boardp->dev); + if (ret) + goto err_free_mem; + + scsi_scan_host(shost); + return 0; + + err_free_mem: + if (ASC_NARROW_BOARD(boardp)) { + if (asc_dvc_varp->overrun_dma) + dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma, + ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); + kfree(asc_dvc_varp->overrun_buf); + } else + advansys_wide_free_mem(boardp); + err_free_irq: + free_irq(boardp->irq, shost); + err_unmap: + if (boardp->ioremap_addr) + iounmap(boardp->ioremap_addr); +#ifdef CONFIG_PCI + err_shost: +#endif + return ret; +} + +/* + * advansys_release() + * + * Release resources allocated for a single AdvanSys adapter. + */ +static int advansys_release(struct Scsi_Host *shost) +{ + struct asc_board *board = shost_priv(shost); + ASC_DBG(1, "begin\n"); + scsi_remove_host(shost); + free_irq(board->irq, shost); + + if (ASC_NARROW_BOARD(board)) { + dma_unmap_single(board->dev, + board->dvc_var.asc_dvc_var.overrun_dma, + ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); + kfree(board->dvc_var.asc_dvc_var.overrun_buf); + } else { + iounmap(board->ioremap_addr); + advansys_wide_free_mem(board); + } + scsi_host_put(shost); + ASC_DBG(1, "end\n"); + return 0; +} + +#define ASC_IOADR_TABLE_MAX_IX 11 + +static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = { + 0x100, 0x0110, 0x120, 0x0130, 0x140, 0x0150, 0x0190, + 0x0210, 0x0230, 0x0250, 0x0330 +}; + +static void advansys_vlb_remove(struct device *dev, unsigned int id) +{ + int ioport = _asc_def_iop_base[id]; + advansys_release(dev_get_drvdata(dev)); + release_region(ioport, ASC_IOADR_GAP); +} + +/* + * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as: + * 000: invalid + * 001: 10 + * 010: 11 + * 011: 12 + * 100: invalid + * 101: 14 + * 110: 15 + * 111: invalid + */ +static unsigned int advansys_vlb_irq_no(PortAddr iop_base) +{ + unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); + unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9; + if ((chip_irq < 10) || (chip_irq == 13) || (chip_irq > 15)) + return 0; + return chip_irq; +} + +static int advansys_vlb_probe(struct device *dev, unsigned int id) +{ + int err = -ENODEV; + PortAddr iop_base = _asc_def_iop_base[id]; + struct Scsi_Host *shost; + struct asc_board *board; + + if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { + ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); + return -ENODEV; + } + ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); + if (!AscFindSignature(iop_base)) + goto release_region; + /* + * I don't think this condition can actually happen, but the old + * driver did it, and the chances of finding a VLB setup in 2007 + * to do testing with is slight to none. + */ + if (AscGetChipVersion(iop_base, ASC_IS_VL) > ASC_CHIP_MAX_VER_VL) + goto release_region; + + err = -ENOMEM; + shost = scsi_host_alloc(&advansys_template, sizeof(*board)); + if (!shost) + goto release_region; + + board = shost_priv(shost); + board->irq = advansys_vlb_irq_no(iop_base); + board->dev = dev; + board->shost = shost; + + err = advansys_board_found(shost, iop_base, ASC_IS_VL); + if (err) + goto free_host; + + dev_set_drvdata(dev, shost); + return 0; + + free_host: + scsi_host_put(shost); + release_region: + release_region(iop_base, ASC_IOADR_GAP); + return -ENODEV; +} + +static struct isa_driver advansys_vlb_driver = { + .probe = advansys_vlb_probe, + .remove = advansys_vlb_remove, + .driver = { + .owner = THIS_MODULE, + .name = "advansys_vlb", + }, +}; + +static struct eisa_device_id advansys_eisa_table[] = { + { "ABP7401" }, + { "ABP7501" }, + { "" } +}; + +MODULE_DEVICE_TABLE(eisa, advansys_eisa_table); + +/* + * EISA is a little more tricky than PCI; each EISA device may have two + * channels, and this driver is written to make each channel its own Scsi_Host + */ +struct eisa_scsi_data { + struct Scsi_Host *host[2]; +}; + +/* + * The EISA IRQ number is found in bits 8 to 10 of the CfgLsw. It decodes as: + * 000: 10 + * 001: 11 + * 010: 12 + * 011: invalid + * 100: 14 + * 101: 15 + * 110: invalid + * 111: invalid + */ +static unsigned int advansys_eisa_irq_no(struct eisa_device *edev) +{ + unsigned short cfg_lsw = inw(edev->base_addr + 0xc86); + unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10; + if ((chip_irq == 13) || (chip_irq > 15)) + return 0; + return chip_irq; +} + +static int advansys_eisa_probe(struct device *dev) +{ + int i, ioport, irq = 0; + int err; + struct eisa_device *edev = to_eisa_device(dev); + struct eisa_scsi_data *data; + + err = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto fail; + ioport = edev->base_addr + 0xc30; + + err = -ENODEV; + for (i = 0; i < 2; i++, ioport += 0x20) { + struct asc_board *board; + struct Scsi_Host *shost; + if (!request_region(ioport, ASC_IOADR_GAP, DRV_NAME)) { + printk(KERN_WARNING "Region %x-%x busy\n", ioport, + ioport + ASC_IOADR_GAP - 1); + continue; + } + if (!AscFindSignature(ioport)) { + release_region(ioport, ASC_IOADR_GAP); + continue; + } + + /* + * I don't know why we need to do this for EISA chips, but + * not for any others. It looks to be equivalent to + * AscGetChipCfgMsw, but I may have overlooked something, + * so I'm not converting it until I get an EISA board to + * test with. + */ + inw(ioport + 4); + + if (!irq) + irq = advansys_eisa_irq_no(edev); + + err = -ENOMEM; + shost = scsi_host_alloc(&advansys_template, sizeof(*board)); + if (!shost) + goto release_region; + + board = shost_priv(shost); + board->irq = irq; + board->dev = dev; + board->shost = shost; + + err = advansys_board_found(shost, ioport, ASC_IS_EISA); + if (!err) { + data->host[i] = shost; + continue; + } + + scsi_host_put(shost); + release_region: + release_region(ioport, ASC_IOADR_GAP); + break; + } + + if (err) + goto free_data; + dev_set_drvdata(dev, data); + return 0; + + free_data: + kfree(data->host[0]); + kfree(data->host[1]); + kfree(data); + fail: + return err; +} + +static int advansys_eisa_remove(struct device *dev) +{ + int i; + struct eisa_scsi_data *data = dev_get_drvdata(dev); + + for (i = 0; i < 2; i++) { + int ioport; + struct Scsi_Host *shost = data->host[i]; + if (!shost) + continue; + ioport = shost->io_port; + advansys_release(shost); + release_region(ioport, ASC_IOADR_GAP); + } + + kfree(data); + return 0; +} + +static struct eisa_driver advansys_eisa_driver = { + .id_table = advansys_eisa_table, + .driver = { + .name = DRV_NAME, + .probe = advansys_eisa_probe, + .remove = advansys_eisa_remove, + } +}; + +/* PCI Devices supported by this driver */ +static struct pci_device_id advansys_pci_tbl[] = { + {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {} +}; + +MODULE_DEVICE_TABLE(pci, advansys_pci_tbl); + +static void advansys_set_latency(struct pci_dev *pdev) +{ + if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || + (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0); + } else { + u8 latency; + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency); + if (latency < 0x20) + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20); + } +} + +static int advansys_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err, ioport; + struct Scsi_Host *shost; + struct asc_board *board; + + err = pci_enable_device(pdev); + if (err) + goto fail; + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto disable_device; + pci_set_master(pdev); + advansys_set_latency(pdev); + + err = -ENODEV; + if (pci_resource_len(pdev, 0) == 0) + goto release_region; + + ioport = pci_resource_start(pdev, 0); + + err = -ENOMEM; + shost = scsi_host_alloc(&advansys_template, sizeof(*board)); + if (!shost) + goto release_region; + + board = shost_priv(shost); + board->irq = pdev->irq; + board->dev = &pdev->dev; + board->shost = shost; + + if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW || + pdev->device == PCI_DEVICE_ID_38C0800_REV1 || + pdev->device == PCI_DEVICE_ID_38C1600_REV1) { + board->flags |= ASC_IS_WIDE_BOARD; + } + + err = advansys_board_found(shost, ioport, ASC_IS_PCI); + if (err) + goto free_host; + + pci_set_drvdata(pdev, shost); + return 0; + + free_host: + scsi_host_put(shost); + release_region: + pci_release_regions(pdev); + disable_device: + pci_disable_device(pdev); + fail: + return err; +} + +static void advansys_pci_remove(struct pci_dev *pdev) +{ + advansys_release(pci_get_drvdata(pdev)); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver advansys_pci_driver = { + .name = DRV_NAME, + .id_table = advansys_pci_tbl, + .probe = advansys_pci_probe, + .remove = advansys_pci_remove, +}; + +static int __init advansys_init(void) +{ + int error; + + error = isa_register_driver(&advansys_vlb_driver, + ASC_IOADR_TABLE_MAX_IX); + if (error) + goto fail; + + error = eisa_driver_register(&advansys_eisa_driver); + if (error) + goto unregister_vlb; + + error = pci_register_driver(&advansys_pci_driver); + if (error) + goto unregister_eisa; + + return 0; + + unregister_eisa: + eisa_driver_unregister(&advansys_eisa_driver); + unregister_vlb: + isa_unregister_driver(&advansys_vlb_driver); + fail: + return error; +} + +static void __exit advansys_exit(void) +{ + pci_unregister_driver(&advansys_pci_driver); + eisa_driver_unregister(&advansys_eisa_driver); + isa_unregister_driver(&advansys_vlb_driver); +} + +module_init(advansys_init); +module_exit(advansys_exit); + +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("advansys/mcode.bin"); +MODULE_FIRMWARE("advansys/3550.bin"); +MODULE_FIRMWARE("advansys/38C0800.bin"); +MODULE_FIRMWARE("advansys/38C1600.bin"); diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c new file mode 100644 index 000000000..055adb349 --- /dev/null +++ b/drivers/scsi/aha152x.c @@ -0,0 +1,3434 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* aha152x.c -- Adaptec AHA-152x driver + * Author: Jürgen E. Fischer, fischer@norbit.de + * Copyright 1993-2004 Jürgen E. Fischer + * + * $Id: aha152x.c,v 2.7 2004/01/24 11:42:59 fischer Exp $ + * + * $Log: aha152x.c,v $ + * Revision 2.7 2004/01/24 11:42:59 fischer + * - gather code that is not used by PCMCIA at the end + * - move request_region for !PCMCIA case to detection + * - migration to new scsi host api (remove legacy code) + * - free host scribble before scsi_done + * - fix error handling + * - one isapnp device added to id_table + * + * Revision 2.6 2003/10/30 20:52:47 fischer + * - interfaces changes for kernel 2.6 + * - aha152x_probe_one introduced for pcmcia stub + * - fixed pnpdev handling + * - instead of allocation a new one, reuse command for request sense after check condition and reset + * - fixes race in is_complete + * + * Revision 2.5 2002/04/14 11:24:53 fischer + * - isapnp support + * - abort fixed + * - 2.5 support + * + * Revision 2.4 2000/12/16 12:53:56 fischer + * - allow REQUEST SENSE to be queued + * - handle shared PCI interrupts + * + * Revision 2.3 2000/11/04 16:40:26 fischer + * - handle data overruns + * - extend timeout for data phases + * + * Revision 2.2 2000/08/08 19:54:53 fischer + * - minor changes + * + * Revision 2.1 2000/05/17 16:23:17 fischer + * - signature update + * - fix for data out w/o scatter gather + * + * Revision 2.0 1999/12/25 15:07:32 fischer + * - interrupt routine completly reworked + * - basic support for new eh code + * + * Revision 1.21 1999/11/10 23:46:36 fischer + * - default to synchronous operation + * - synchronous negotiation fixed + * - added timeout to loops + * - debugging output can be controlled through procfs + * + * Revision 1.20 1999/11/07 18:37:31 fischer + * - synchronous operation works + * - resid support for sg driver + * + * Revision 1.19 1999/11/02 22:39:59 fischer + * - moved leading comments to README.aha152x + * - new additional module parameters + * - updates for 2.3 + * - support for the Tripace TC1550 controller + * - interrupt handling changed + * + * Revision 1.18 1996/09/07 20:10:40 fischer + * - fixed can_queue handling (multiple outstanding commands working again) + * + * Revision 1.17 1996/08/17 16:05:14 fischer + * - biosparam improved + * - interrupt verification + * - updated documentation + * - cleanups + * + * Revision 1.16 1996/06/09 00:04:56 root + * - added configuration symbols for insmod (aha152x/aha152x1) + * + * Revision 1.15 1996/04/30 14:52:06 fischer + * - proc info fixed + * - support for extended translation for >1GB disks + * + * Revision 1.14 1996/01/17 15:11:20 fischer + * - fixed lockup in MESSAGE IN phase after reconnection + * + * Revision 1.13 1996/01/09 02:15:53 fischer + * - some cleanups + * - moved request_irq behind controller initialization + * (to avoid spurious interrupts) + * + * Revision 1.12 1995/12/16 12:26:07 fischer + * - barrier()s added + * - configurable RESET delay added + * + * Revision 1.11 1995/12/06 21:18:35 fischer + * - some minor updates + * + * Revision 1.10 1995/07/22 19:18:45 fischer + * - support for 2 controllers + * - started synchronous data transfers (not working yet) + * + * Revision 1.9 1995/03/18 09:20:24 root + * - patches for PCMCIA and modules + * + * Revision 1.8 1995/01/21 22:07:19 root + * - snarf_region => request_region + * - aha152x_intr interface change + * + * Revision 1.7 1995/01/02 23:19:36 root + * - updated COMMAND_SIZE to cmd_len + * - changed sti() to restore_flags() + * - fixed some #ifdef which generated warnings + * + * Revision 1.6 1994/11/24 20:35:27 root + * - problem with odd number of bytes in fifo fixed + * + * Revision 1.5 1994/10/30 14:39:56 root + * - abort code fixed + * - debugging improved + * + * Revision 1.4 1994/09/12 11:33:01 root + * - irqaction to request_irq + * - abortion updated + * + * Revision 1.3 1994/08/04 13:53:05 root + * - updates for mid-level-driver changes + * - accept unexpected BUSFREE phase as error condition + * - parity check now configurable + * + * Revision 1.2 1994/07/03 12:56:36 root + * - cleaned up debugging code + * - more tweaking on reset delays + * - updated abort/reset code (pretty untested...) + * + * Revision 1.1 1994/05/28 21:18:49 root + * - update for mid-level interface change (abort-reset) + * - delays after resets adjusted for some slow devices + * + * Revision 1.0 1994/03/25 12:52:00 root + * - Fixed "more data than expected" problem + * - added new BIOS signatures + * + * Revision 0.102 1994/01/31 20:44:12 root + * - minor changes in insw/outsw handling + * + * Revision 0.101 1993/12/13 01:16:27 root + * - fixed STATUS phase (non-GOOD stati were dropped sometimes; + * fixes problems with CD-ROM sector size detection & media change) + * + * Revision 0.100 1993/12/10 16:58:47 root + * - fix for unsuccessful selections in case of non-continuous id assignments + * on the scsi bus. + * + * Revision 0.99 1993/10/24 16:19:59 root + * - fixed DATA IN (rare read errors gone) + * + * Revision 0.98 1993/10/17 12:54:44 root + * - fixed some recent fixes (shame on me) + * - moved initialization of scratch area to aha152x_queue + * + * Revision 0.97 1993/10/09 18:53:53 root + * - DATA IN fixed. Rarely left data in the fifo. + * + * Revision 0.96 1993/10/03 00:53:59 root + * - minor changes on DATA IN + * + * Revision 0.95 1993/09/24 10:36:01 root + * - change handling of MSGI after reselection + * - fixed sti/cli + * - minor changes + * + * Revision 0.94 1993/09/18 14:08:22 root + * - fixed bug in multiple outstanding command code + * - changed detection + * - support for kernel command line configuration + * - reset corrected + * - changed message handling + * + * Revision 0.93 1993/09/15 20:41:19 root + * - fixed bugs with multiple outstanding commands + * + * Revision 0.92 1993/09/13 02:46:33 root + * - multiple outstanding commands work (no problems with IBM drive) + * + * Revision 0.91 1993/09/12 20:51:46 root + * added multiple outstanding commands + * (some problem with this $%&? IBM device remain) + * + * Revision 0.9 1993/09/12 11:11:22 root + * - corrected auto-configuration + * - changed the auto-configuration (added some '#define's) + * - added support for dis-/reconnection + * + * Revision 0.8 1993/09/06 23:09:39 root + * - added support for the drive activity light + * - minor changes + * + * Revision 0.7 1993/09/05 14:30:15 root + * - improved phase detection + * - now using the new snarf_region code of 0.99pl13 + * + * Revision 0.6 1993/09/02 11:01:38 root + * first public release; added some signatures and biosparam() + * + * Revision 0.5 1993/08/30 10:23:30 root + * fixed timing problems with my IBM drive + * + * Revision 0.4 1993/08/29 14:06:52 root + * fixed some problems with timeouts due incomplete commands + * + * Revision 0.3 1993/08/28 15:55:03 root + * writing data works too. mounted and worked on a dos partition + * + * Revision 0.2 1993/08/27 22:42:07 root + * reading data works. Mounted a msdos partition. + * + * Revision 0.1 1993/08/25 13:38:30 root + * first "damn thing doesn't work" version + * + * Revision 0.0 1993/08/14 19:54:25 root + * empty function bodies; detect() works. + * + ************************************************************************** + + see Documentation/scsi/aha152x.rst for configuration details + + **************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "aha152x.h" + +static LIST_HEAD(aha152x_host_list); + + +/* DEFINES */ + +/* For PCMCIA cards, always use AUTOCONF */ +#if defined(AHA152X_PCMCIA) || defined(MODULE) +#if !defined(AUTOCONF) +#define AUTOCONF +#endif +#endif + +#if !defined(AUTOCONF) && !defined(SETUP0) +#error define AUTOCONF or SETUP0 +#endif + +#define DO_LOCK(flags) spin_lock_irqsave(&QLOCK,flags) +#define DO_UNLOCK(flags) spin_unlock_irqrestore(&QLOCK,flags) + +#define LEAD "(scsi%d:%d:%d) " +#define INFO_LEAD KERN_INFO LEAD +#define CMDINFO(cmd) \ + (cmd) ? ((cmd)->device->host->host_no) : -1, \ + (cmd) ? ((cmd)->device->id & 0x0f) : -1, \ + (cmd) ? ((u8)(cmd)->device->lun & 0x07) : -1 + +static inline void +CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) +{ + scsi_set_resid(cmd, scsi_get_resid(cmd) + inc); +} + +#define DELAY_DEFAULT 1000 + +#if defined(AHA152X_PCMCIA) +#define IRQ_MIN 0 +#define IRQ_MAX 16 +#else +#define IRQ_MIN 9 +#if defined(__PPC) +#define IRQ_MAX (nr_irqs-1) +#else +#define IRQ_MAX 12 +#endif +#endif + +enum { + not_issued = 0x0001, /* command not yet issued */ + selecting = 0x0002, /* target is being selected */ + identified = 0x0004, /* IDENTIFY was sent */ + disconnected = 0x0008, /* target disconnected */ + completed = 0x0010, /* target sent COMMAND COMPLETE */ + aborted = 0x0020, /* ABORT was sent */ + resetted = 0x0040, /* BUS DEVICE RESET was sent */ + spiordy = 0x0080, /* waiting for SPIORDY to raise */ + syncneg = 0x0100, /* synchronous negotiation in progress */ + aborting = 0x0200, /* ABORT is pending */ + resetting = 0x0400, /* BUS DEVICE RESET is pending */ + check_condition = 0x0800, /* requesting sense after CHECK CONDITION */ +}; + +struct aha152x_cmd_priv { + char *ptr; + int this_residual; + struct scatterlist *buffer; + int status; + int message; + int sent_command; + int phase; +}; + +static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +MODULE_AUTHOR("Jürgen Fischer"); +MODULE_DESCRIPTION(AHA152X_REVID); +MODULE_LICENSE("GPL"); + +#if !defined(AHA152X_PCMCIA) +#if defined(MODULE) +static int io[] = {0, 0}; +module_param_hw_array(io, int, ioport, NULL, 0); +MODULE_PARM_DESC(io,"base io address of controller"); + +static int irq[] = {0, 0}; +module_param_hw_array(irq, int, irq, NULL, 0); +MODULE_PARM_DESC(irq,"interrupt for controller"); + +static int scsiid[] = {7, 7}; +module_param_array(scsiid, int, NULL, 0); +MODULE_PARM_DESC(scsiid,"scsi id of controller"); + +static int reconnect[] = {1, 1}; +module_param_array(reconnect, int, NULL, 0); +MODULE_PARM_DESC(reconnect,"allow targets to disconnect"); + +static int parity[] = {1, 1}; +module_param_array(parity, int, NULL, 0); +MODULE_PARM_DESC(parity,"use scsi parity"); + +static int sync[] = {1, 1}; +module_param_array(sync, int, NULL, 0); +MODULE_PARM_DESC(sync,"use synchronous transfers"); + +static int delay[] = {DELAY_DEFAULT, DELAY_DEFAULT}; +module_param_array(delay, int, NULL, 0); +MODULE_PARM_DESC(delay,"scsi reset delay"); + +static int exttrans[] = {0, 0}; +module_param_array(exttrans, int, NULL, 0); +MODULE_PARM_DESC(exttrans,"use extended translation"); + +static int aha152x[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; +module_param_array(aha152x, int, NULL, 0); +MODULE_PARM_DESC(aha152x, "parameters for first controller"); + +static int aha152x1[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; +module_param_array(aha152x1, int, NULL, 0); +MODULE_PARM_DESC(aha152x1, "parameters for second controller"); +#endif /* MODULE */ + +#ifdef __ISAPNP__ +static struct isapnp_device_id id_table[] = { + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1515), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1520), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2015), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1522), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2215), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1530), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3015), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1532), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3215), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x6360), 0 }, + { ISAPNP_DEVICE_SINGLE_END, } +}; +MODULE_DEVICE_TABLE(isapnp, id_table); +#endif /* ISAPNP */ + +#endif /* !AHA152X_PCMCIA */ + +static const struct scsi_host_template aha152x_driver_template; + +/* + * internal states of the host + * + */ +enum aha152x_state { + idle=0, + unknown, + seldo, + seldi, + selto, + busfree, + msgo, + cmd, + msgi, + status, + datai, + datao, + parerr, + rsti, + maxstate +}; + +/* + * current state information of the host + * + */ +struct aha152x_hostdata { + struct scsi_cmnd *issue_SC; + /* pending commands to issue */ + + struct scsi_cmnd *current_SC; + /* current command on the bus */ + + struct scsi_cmnd *disconnected_SC; + /* commands that disconnected */ + + struct scsi_cmnd *done_SC; + /* command that was completed */ + + spinlock_t lock; + /* host lock */ + +#if defined(AHA152X_STAT) + int total_commands; + int disconnections; + int busfree_without_any_action; + int busfree_without_old_command; + int busfree_without_new_command; + int busfree_without_done_command; + int busfree_with_check_condition; + int count[maxstate]; + int count_trans[maxstate]; + unsigned long time[maxstate]; +#endif + + int commands; /* current number of commands */ + + int reconnect; /* disconnection allowed */ + int parity; /* parity checking enabled */ + int synchronous; /* synchronous transferes enabled */ + int delay; /* reset out delay */ + int ext_trans; /* extended translation enabled */ + + int swint; /* software-interrupt was fired during detect() */ + int service; /* bh needs to be run */ + int in_intr; /* bh is running */ + + /* current state, + previous state, + last state different from current state */ + enum aha152x_state state, prevstate, laststate; + + int target; + /* reconnecting target */ + + unsigned char syncrate[8]; + /* current synchronous transfer agreements */ + + unsigned char syncneg[8]; + /* 0: no negotiation; + * 1: negotiation in progress; + * 2: negotiation completed + */ + + int cmd_i; + /* number of sent bytes of current command */ + + int msgi_len; + /* number of received message bytes */ + unsigned char msgi[256]; + /* received message bytes */ + + int msgo_i, msgo_len; + /* number of sent bytes and length of current messages */ + unsigned char msgo[256]; + /* pending messages */ + + int data_len; + /* number of sent/received bytes in dataphase */ + + unsigned long io_port0; + unsigned long io_port1; + +#ifdef __ISAPNP__ + struct pnp_dev *pnpdev; +#endif + struct list_head host_list; +}; + + +/* + * host specific command extension + * + */ +struct aha152x_scdata { + struct scsi_cmnd *next; /* next sc in queue */ + struct completion *done;/* semaphore to block on */ + struct scsi_eh_save ses; +}; + +/* access macros for hostdata */ + +#define HOSTDATA(shpnt) ((struct aha152x_hostdata *) &shpnt->hostdata) + +#define HOSTNO ((shpnt)->host_no) + +#define CURRENT_SC (HOSTDATA(shpnt)->current_SC) +#define DONE_SC (HOSTDATA(shpnt)->done_SC) +#define ISSUE_SC (HOSTDATA(shpnt)->issue_SC) +#define DISCONNECTED_SC (HOSTDATA(shpnt)->disconnected_SC) +#define QLOCK (HOSTDATA(shpnt)->lock) +#define QLOCKER (HOSTDATA(shpnt)->locker) +#define QLOCKERL (HOSTDATA(shpnt)->lockerl) + +#define STATE (HOSTDATA(shpnt)->state) +#define PREVSTATE (HOSTDATA(shpnt)->prevstate) +#define LASTSTATE (HOSTDATA(shpnt)->laststate) + +#define RECONN_TARGET (HOSTDATA(shpnt)->target) + +#define CMD_I (HOSTDATA(shpnt)->cmd_i) + +#define MSGO(i) (HOSTDATA(shpnt)->msgo[i]) +#define MSGO_I (HOSTDATA(shpnt)->msgo_i) +#define MSGOLEN (HOSTDATA(shpnt)->msgo_len) +#define ADDMSGO(x) (MSGOLEN<256 ? (void)(MSGO(MSGOLEN++)=x) : aha152x_error(shpnt,"MSGO overflow")) + +#define MSGI(i) (HOSTDATA(shpnt)->msgi[i]) +#define MSGILEN (HOSTDATA(shpnt)->msgi_len) +#define ADDMSGI(x) (MSGILEN<256 ? (void)(MSGI(MSGILEN++)=x) : aha152x_error(shpnt,"MSGI overflow")) + +#define DATA_LEN (HOSTDATA(shpnt)->data_len) + +#define SYNCRATE (HOSTDATA(shpnt)->syncrate[CURRENT_SC->device->id]) +#define SYNCNEG (HOSTDATA(shpnt)->syncneg[CURRENT_SC->device->id]) + +#define DELAY (HOSTDATA(shpnt)->delay) +#define EXT_TRANS (HOSTDATA(shpnt)->ext_trans) +#define TC1550 (HOSTDATA(shpnt)->tc1550) +#define RECONNECT (HOSTDATA(shpnt)->reconnect) +#define PARITY (HOSTDATA(shpnt)->parity) +#define SYNCHRONOUS (HOSTDATA(shpnt)->synchronous) + +#define HOSTIOPORT0 (HOSTDATA(shpnt)->io_port0) +#define HOSTIOPORT1 (HOSTDATA(shpnt)->io_port1) + +#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble) +#define SCNEXT(SCpnt) SCDATA(SCpnt)->next +#define SCSEM(SCpnt) SCDATA(SCpnt)->done + +#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer))) + +/* state handling */ +static void seldi_run(struct Scsi_Host *shpnt); +static void seldo_run(struct Scsi_Host *shpnt); +static void selto_run(struct Scsi_Host *shpnt); +static void busfree_run(struct Scsi_Host *shpnt); + +static void msgo_init(struct Scsi_Host *shpnt); +static void msgo_run(struct Scsi_Host *shpnt); +static void msgo_end(struct Scsi_Host *shpnt); + +static void cmd_init(struct Scsi_Host *shpnt); +static void cmd_run(struct Scsi_Host *shpnt); +static void cmd_end(struct Scsi_Host *shpnt); + +static void datai_init(struct Scsi_Host *shpnt); +static void datai_run(struct Scsi_Host *shpnt); +static void datai_end(struct Scsi_Host *shpnt); + +static void datao_init(struct Scsi_Host *shpnt); +static void datao_run(struct Scsi_Host *shpnt); +static void datao_end(struct Scsi_Host *shpnt); + +static void status_run(struct Scsi_Host *shpnt); + +static void msgi_run(struct Scsi_Host *shpnt); +static void msgi_end(struct Scsi_Host *shpnt); + +static void parerr_run(struct Scsi_Host *shpnt); +static void rsti_run(struct Scsi_Host *shpnt); + +static void is_complete(struct Scsi_Host *shpnt); + +/* + * driver states + * + */ +static struct { + char *name; + void (*init)(struct Scsi_Host *); + void (*run)(struct Scsi_Host *); + void (*end)(struct Scsi_Host *); + int spio; +} states[] = { + { "idle", NULL, NULL, NULL, 0}, + { "unknown", NULL, NULL, NULL, 0}, + { "seldo", NULL, seldo_run, NULL, 0}, + { "seldi", NULL, seldi_run, NULL, 0}, + { "selto", NULL, selto_run, NULL, 0}, + { "busfree", NULL, busfree_run, NULL, 0}, + { "msgo", msgo_init, msgo_run, msgo_end, 1}, + { "cmd", cmd_init, cmd_run, cmd_end, 1}, + { "msgi", NULL, msgi_run, msgi_end, 1}, + { "status", NULL, status_run, NULL, 1}, + { "datai", datai_init, datai_run, datai_end, 0}, + { "datao", datao_init, datao_run, datao_end, 0}, + { "parerr", NULL, parerr_run, NULL, 0}, + { "rsti", NULL, rsti_run, NULL, 0}, +}; + +/* setup & interrupt */ +static irqreturn_t intr(int irq, void *dev_id); +static void reset_ports(struct Scsi_Host *shpnt); +static void aha152x_error(struct Scsi_Host *shpnt, char *msg); +static void done(struct Scsi_Host *shpnt, unsigned char status_byte, + unsigned char host_byte); + +/* diagnostics */ +static void show_command(struct scsi_cmnd * ptr); +static void show_queues(struct Scsi_Host *shpnt); +static void disp_enintr(struct Scsi_Host *shpnt); + + +/* + * queue services: + * + */ +static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) +{ + struct scsi_cmnd *end; + + SCNEXT(new_SC) = NULL; + if (!*SC) + *SC = new_SC; + else { + for (end = *SC; SCNEXT(end); end = SCNEXT(end)) + ; + SCNEXT(end) = new_SC; + } +} + +static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd ** SC) +{ + struct scsi_cmnd *ptr; + + ptr = *SC; + if (ptr) { + *SC = SCNEXT(*SC); + SCNEXT(ptr)=NULL; + } + return ptr; +} + +static inline struct scsi_cmnd *remove_lun_SC(struct scsi_cmnd ** SC, + int target, int lun) +{ + struct scsi_cmnd *ptr, *prev; + + for (ptr = *SC, prev = NULL; + ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); + prev = ptr, ptr = SCNEXT(ptr)) + ; + + if (ptr) { + if (prev) + SCNEXT(prev) = SCNEXT(ptr); + else + *SC = SCNEXT(ptr); + + SCNEXT(ptr)=NULL; + } + + return ptr; +} + +static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, + struct scsi_cmnd *SCp) +{ + struct scsi_cmnd *ptr, *prev; + + for (ptr = *SC, prev = NULL; + ptr && SCp!=ptr; + prev = ptr, ptr = SCNEXT(ptr)) + ; + + if (ptr) { + if (prev) + SCNEXT(prev) = SCNEXT(ptr); + else + *SC = SCNEXT(ptr); + + SCNEXT(ptr)=NULL; + } + + return ptr; +} + +static irqreturn_t swintr(int irqno, void *dev_id) +{ + struct Scsi_Host *shpnt = dev_id; + + HOSTDATA(shpnt)->swint++; + + SETPORT(DMACNTRL0, INTEN); + return IRQ_HANDLED; +} + +struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) +{ + struct Scsi_Host *shpnt; + + shpnt = scsi_host_alloc(&aha152x_driver_template, sizeof(struct aha152x_hostdata)); + if (!shpnt) { + printk(KERN_ERR "aha152x: scsi_host_alloc failed\n"); + return NULL; + } + + memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt)); + INIT_LIST_HEAD(&HOSTDATA(shpnt)->host_list); + + /* need to have host registered before triggering any interrupt */ + list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list); + + shpnt->io_port = setup->io_port; + shpnt->n_io_port = IO_RANGE; + shpnt->irq = setup->irq; + + if (!setup->tc1550) { + HOSTIOPORT0 = setup->io_port; + HOSTIOPORT1 = setup->io_port; + } else { + HOSTIOPORT0 = setup->io_port+0x10; + HOSTIOPORT1 = setup->io_port-0x10; + } + + spin_lock_init(&QLOCK); + RECONNECT = setup->reconnect; + SYNCHRONOUS = setup->synchronous; + PARITY = setup->parity; + DELAY = setup->delay; + EXT_TRANS = setup->ext_trans; + + SETPORT(SCSIID, setup->scsiid << 4); + shpnt->this_id = setup->scsiid; + + if (setup->reconnect) + shpnt->can_queue = AHA152X_MAXQUEUE; + + /* RESET OUT */ + printk("aha152x: resetting bus...\n"); + SETPORT(SCSISEQ, SCSIRSTO); + mdelay(256); + SETPORT(SCSISEQ, 0); + mdelay(DELAY); + + reset_ports(shpnt); + + printk(KERN_INFO + "aha152x%d%s: " + "vital data: rev=%x, " + "io=0x%03lx (0x%03lx/0x%03lx), " + "irq=%d, " + "scsiid=%d, " + "reconnect=%s, " + "parity=%s, " + "synchronous=%s, " + "delay=%d, " + "extended translation=%s\n", + shpnt->host_no, setup->tc1550 ? " (tc1550 mode)" : "", + GETPORT(REV) & 0x7, + shpnt->io_port, HOSTIOPORT0, HOSTIOPORT1, + shpnt->irq, + shpnt->this_id, + RECONNECT ? "enabled" : "disabled", + PARITY ? "enabled" : "disabled", + SYNCHRONOUS ? "enabled" : "disabled", + DELAY, + EXT_TRANS ? "enabled" : "disabled"); + + /* not expecting any interrupts */ + SETPORT(SIMODE0, 0); + SETPORT(SIMODE1, 0); + + if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) { + printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq); + goto out_host_put; + } + + HOSTDATA(shpnt)->swint = 0; + + printk(KERN_INFO "aha152x%d: trying software interrupt, ", shpnt->host_no); + + mb(); + SETPORT(DMACNTRL0, SWINT|INTEN); + mdelay(1000); + free_irq(shpnt->irq, shpnt); + + if (!HOSTDATA(shpnt)->swint) { + if (TESTHI(DMASTAT, INTSTAT)) { + printk("lost.\n"); + } else { + printk("failed.\n"); + } + + SETPORT(DMACNTRL0, INTEN); + + printk(KERN_ERR "aha152x%d: irq %d possibly wrong. " + "Please verify.\n", shpnt->host_no, shpnt->irq); + goto out_host_put; + } + printk("ok.\n"); + + + /* clear interrupts */ + SETPORT(SSTAT0, 0x7f); + SETPORT(SSTAT1, 0xef); + + if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) { + printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq); + goto out_host_put; + } + + if( scsi_add_host(shpnt, NULL) ) { + free_irq(shpnt->irq, shpnt); + printk(KERN_ERR "aha152x%d: failed to add host.\n", shpnt->host_no); + goto out_host_put; + } + + scsi_scan_host(shpnt); + + return shpnt; + +out_host_put: + list_del(&HOSTDATA(shpnt)->host_list); + scsi_host_put(shpnt); + + return NULL; +} + +void aha152x_release(struct Scsi_Host *shpnt) +{ + if (!shpnt) + return; + + scsi_remove_host(shpnt); + if (shpnt->irq) + free_irq(shpnt->irq, shpnt); + +#if !defined(AHA152X_PCMCIA) + if (shpnt->io_port) + release_region(shpnt->io_port, IO_RANGE); +#endif + +#ifdef __ISAPNP__ + if (HOSTDATA(shpnt)->pnpdev) + pnp_device_detach(HOSTDATA(shpnt)->pnpdev); +#endif + + list_del(&HOSTDATA(shpnt)->host_list); + scsi_host_put(shpnt); +} + + +/* + * setup controller to generate interrupts depending + * on current state (lock has to be acquired) + * + */ +static int setup_expected_interrupts(struct Scsi_Host *shpnt) +{ + if(CURRENT_SC) { + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + + acp->phase |= 1 << 16; + + if (acp->phase & selecting) { + SETPORT(SSTAT1, SELTO); + SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0)); + SETPORT(SIMODE1, ENSELTIMO); + } else { + SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0); + SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); + } + } else if(STATE==seldi) { + SETPORT(SIMODE0, 0); + SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); + } else { + SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0); + SETPORT(SIMODE1, ENSCSIRST | ( (ISSUE_SC||DONE_SC) ? ENBUSFREE : 0)); + } + + if(!HOSTDATA(shpnt)->in_intr) + SETBITS(DMACNTRL0, INTEN); + + return TESTHI(DMASTAT, INTSTAT); +} + + +/* + * Queue a command and setup interrupts for a free bus. + */ +static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, + struct completion *complete, int phase) +{ + struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt); + struct Scsi_Host *shpnt = SCpnt->device->host; + unsigned long flags; + + acp->phase = not_issued | phase; + acp->status = 0x1; /* Illegal status by SCSI standard */ + acp->message = 0; + acp->sent_command = 0; + + if (acp->phase & (resetting | check_condition)) { + if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) { + scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n"); + return FAILED; + } + } else { + SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); + if(!SCpnt->host_scribble) { + scmd_printk(KERN_ERR, SCpnt, "allocation failed\n"); + return FAILED; + } + } + + SCNEXT(SCpnt) = NULL; + SCSEM(SCpnt) = complete; + + /* setup scratch area + SCp.ptr : buffer pointer + SCp.this_residual : buffer length + SCp.buffer : next buffer + SCp.phase : current state of the command */ + + if ((phase & resetting) || !scsi_sglist(SCpnt)) { + acp->ptr = NULL; + acp->this_residual = 0; + scsi_set_resid(SCpnt, 0); + acp->buffer = NULL; + } else { + scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); + acp->buffer = scsi_sglist(SCpnt); + acp->ptr = SG_ADDRESS(acp->buffer); + acp->this_residual = acp->buffer->length; + } + + DO_LOCK(flags); + +#if defined(AHA152X_STAT) + HOSTDATA(shpnt)->total_commands++; +#endif + + /* Turn led on, when this is the first command. */ + HOSTDATA(shpnt)->commands++; + if (HOSTDATA(shpnt)->commands==1) + SETPORT(PORTA, 1); + + append_SC(&ISSUE_SC, SCpnt); + + if(!HOSTDATA(shpnt)->in_intr) + setup_expected_interrupts(shpnt); + + DO_UNLOCK(flags); + + return 0; +} + +/* + * queue a command + * + */ +static int aha152x_queue_lck(struct scsi_cmnd *SCpnt) +{ + return aha152x_internal_queue(SCpnt, NULL, 0); +} + +static DEF_SCSI_QCMD(aha152x_queue) + + +/* + * + */ +static void reset_done(struct scsi_cmnd *SCpnt) +{ + if(SCSEM(SCpnt)) { + complete(SCSEM(SCpnt)); + } else { + printk(KERN_ERR "aha152x: reset_done w/o completion\n"); + } +} + +static void aha152x_scsi_done(struct scsi_cmnd *SCpnt) +{ + if (aha152x_priv(SCpnt)->phase & resetting) + reset_done(SCpnt); + else + scsi_done(SCpnt); +} + +/* + * Abort a command + * + */ +static int aha152x_abort(struct scsi_cmnd *SCpnt) +{ + struct Scsi_Host *shpnt = SCpnt->device->host; + struct scsi_cmnd *ptr; + unsigned long flags; + + DO_LOCK(flags); + + ptr=remove_SC(&ISSUE_SC, SCpnt); + + if(ptr) { + HOSTDATA(shpnt)->commands--; + if (!HOSTDATA(shpnt)->commands) + SETPORT(PORTA, 0); + DO_UNLOCK(flags); + + kfree(SCpnt->host_scribble); + SCpnt->host_scribble=NULL; + + return SUCCESS; + } + + DO_UNLOCK(flags); + + /* + * FIXME: + * for current command: queue ABORT for message out and raise ATN + * for disconnected command: pseudo SC with ABORT message or ABORT on reselection? + * + */ + + scmd_printk(KERN_ERR, SCpnt, + "cannot abort running or disconnected command\n"); + + return FAILED; +} + +/* + * Reset a device + * + */ +static int aha152x_device_reset(struct scsi_cmnd * SCpnt) +{ + struct Scsi_Host *shpnt = SCpnt->device->host; + DECLARE_COMPLETION(done); + int ret, issued, disconnected; + unsigned char old_cmd_len = SCpnt->cmd_len; + unsigned long flags; + unsigned long timeleft; + + if(CURRENT_SC==SCpnt) { + scmd_printk(KERN_ERR, SCpnt, "cannot reset current device\n"); + return FAILED; + } + + DO_LOCK(flags); + issued = remove_SC(&ISSUE_SC, SCpnt) == NULL; + disconnected = issued && remove_SC(&DISCONNECTED_SC, SCpnt); + DO_UNLOCK(flags); + + SCpnt->cmd_len = 0; + + aha152x_internal_queue(SCpnt, &done, resetting); + + timeleft = wait_for_completion_timeout(&done, 100*HZ); + if (!timeleft) { + /* remove command from issue queue */ + DO_LOCK(flags); + remove_SC(&ISSUE_SC, SCpnt); + DO_UNLOCK(flags); + } + + SCpnt->cmd_len = old_cmd_len; + + DO_LOCK(flags); + + if (aha152x_priv(SCpnt)->phase & resetted) { + HOSTDATA(shpnt)->commands--; + if (!HOSTDATA(shpnt)->commands) + SETPORT(PORTA, 0); + kfree(SCpnt->host_scribble); + SCpnt->host_scribble=NULL; + + ret = SUCCESS; + } else { + /* requeue */ + if(!issued) { + append_SC(&ISSUE_SC, SCpnt); + } else if(disconnected) { + append_SC(&DISCONNECTED_SC, SCpnt); + } + + ret = FAILED; + } + + DO_UNLOCK(flags); + return ret; +} + +static void free_hard_reset_SCs(struct Scsi_Host *shpnt, + struct scsi_cmnd **SCs) +{ + struct scsi_cmnd *ptr; + + ptr=*SCs; + while(ptr) { + struct scsi_cmnd *next; + + if(SCDATA(ptr)) { + next = SCNEXT(ptr); + } else { + scmd_printk(KERN_DEBUG, ptr, + "queue corrupted at %p\n", ptr); + next = NULL; + } + + if (!ptr->device->soft_reset) { + remove_SC(SCs, ptr); + HOSTDATA(shpnt)->commands--; + kfree(ptr->host_scribble); + ptr->host_scribble=NULL; + } + + ptr = next; + } +} + +/* + * Reset the bus + * + * AIC-6260 has a hard reset (MRST signal), but apparently + * one cannot trigger it via software. So live with + * a soft reset; no-one seemed to have cared. + */ +static int aha152x_bus_reset_host(struct Scsi_Host *shpnt) +{ + unsigned long flags; + + DO_LOCK(flags); + + free_hard_reset_SCs(shpnt, &ISSUE_SC); + free_hard_reset_SCs(shpnt, &DISCONNECTED_SC); + + SETPORT(SCSISEQ, SCSIRSTO); + mdelay(256); + SETPORT(SCSISEQ, 0); + mdelay(DELAY); + + setup_expected_interrupts(shpnt); + if(HOSTDATA(shpnt)->commands==0) + SETPORT(PORTA, 0); + + DO_UNLOCK(flags); + + return SUCCESS; +} + +/* + * Reset the bus + * + */ +static int aha152x_bus_reset(struct scsi_cmnd *SCpnt) +{ + return aha152x_bus_reset_host(SCpnt->device->host); +} + +/* + * Restore default values to the AIC-6260 registers and reset the fifos + * + */ +static void reset_ports(struct Scsi_Host *shpnt) +{ + unsigned long flags; + + /* disable interrupts */ + SETPORT(DMACNTRL0, RSTFIFO); + + SETPORT(SCSISEQ, 0); + + SETPORT(SXFRCTL1, 0); + SETPORT(SCSISIG, 0); + SETRATE(0); + + /* clear all interrupt conditions */ + SETPORT(SSTAT0, 0x7f); + SETPORT(SSTAT1, 0xef); + + SETPORT(SSTAT4, SYNCERR | FWERR | FRERR); + + SETPORT(DMACNTRL0, 0); + SETPORT(DMACNTRL1, 0); + + SETPORT(BRSTCNTRL, 0xf1); + + /* clear SCSI fifos and transfer count */ + SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); + SETPORT(SXFRCTL0, CH1); + + DO_LOCK(flags); + setup_expected_interrupts(shpnt); + DO_UNLOCK(flags); +} + +/* + * Reset the host (bus and controller) + * + */ +int aha152x_host_reset_host(struct Scsi_Host *shpnt) +{ + aha152x_bus_reset_host(shpnt); + reset_ports(shpnt); + + return SUCCESS; +} + +/* + * Return the "logical geometry" + * + */ +static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int *info_array) +{ + struct Scsi_Host *shpnt = sdev->host; + + /* try default translation */ + info_array[0] = 64; + info_array[1] = 32; + info_array[2] = (unsigned long)capacity / (64 * 32); + + /* for disks >1GB do some guessing */ + if (info_array[2] >= 1024) { + int info[3]; + + /* try to figure out the geometry from the partition table */ + if (scsicam_bios_param(bdev, capacity, info) < 0 || + !((info[0] == 64 && info[1] == 32) || (info[0] == 255 && info[1] == 63))) { + if (EXT_TRANS) { + printk(KERN_NOTICE + "aha152x: unable to verify geometry for disk with >1GB.\n" + " using extended translation.\n"); + info_array[0] = 255; + info_array[1] = 63; + info_array[2] = (unsigned long)capacity / (255 * 63); + } else { + printk(KERN_NOTICE + "aha152x: unable to verify geometry for disk with >1GB.\n" + " Using default translation. Please verify yourself.\n" + " Perhaps you need to enable extended translation in the driver.\n" + " See Documentation/scsi/aha152x.rst for details.\n"); + } + } else { + info_array[0] = info[0]; + info_array[1] = info[1]; + info_array[2] = info[2]; + + if (info[0] == 255 && !EXT_TRANS) { + printk(KERN_NOTICE + "aha152x: current partition table is using extended translation.\n" + " using it also, although it's not explicitly enabled.\n"); + } + } + } + + return 0; +} + +/* + * Internal done function + * + */ +static void done(struct Scsi_Host *shpnt, unsigned char status_byte, + unsigned char host_byte) +{ + if (CURRENT_SC) { + if(DONE_SC) + scmd_printk(KERN_ERR, CURRENT_SC, + "there's already a completed command %p " + "- will cause abort\n", DONE_SC); + + DONE_SC = CURRENT_SC; + CURRENT_SC = NULL; + set_status_byte(DONE_SC, status_byte); + set_host_byte(DONE_SC, host_byte); + } else + printk(KERN_ERR "aha152x: done() called outside of command\n"); +} + +static struct work_struct aha152x_tq; + +/* + * Run service completions on the card with interrupts enabled. + * + */ +static void run(struct work_struct *work) +{ + struct aha152x_hostdata *hd; + + list_for_each_entry(hd, &aha152x_host_list, host_list) { + struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata); + + is_complete(shost); + } +} + +/* + * Interrupt handler + * + */ +static irqreturn_t intr(int irqno, void *dev_id) +{ + struct Scsi_Host *shpnt = dev_id; + unsigned long flags; + unsigned char rev, dmacntrl0; + + /* + * Read a couple of registers that are known to not be all 1's. If + * we read all 1's (-1), that means that either: + * + * a. The host adapter chip has gone bad, and we cannot control it, + * OR + * b. The host adapter is a PCMCIA card that has been ejected + * + * In either case, we cannot do anything with the host adapter at + * this point in time. So just ignore the interrupt and return. + * In the latter case, the interrupt might actually be meant for + * someone else sharing this IRQ, and that driver will handle it. + */ + rev = GETPORT(REV); + dmacntrl0 = GETPORT(DMACNTRL0); + if ((rev == 0xFF) && (dmacntrl0 == 0xFF)) + return IRQ_NONE; + + if( TESTLO(DMASTAT, INTSTAT) ) + return IRQ_NONE; + + /* no more interrupts from the controller, while we're busy. + INTEN is restored by the BH handler */ + CLRBITS(DMACNTRL0, INTEN); + + DO_LOCK(flags); + if( HOSTDATA(shpnt)->service==0 ) { + HOSTDATA(shpnt)->service=1; + + /* Poke the BH handler */ + INIT_WORK(&aha152x_tq, run); + schedule_work(&aha152x_tq); + } + DO_UNLOCK(flags); + + return IRQ_HANDLED; +} + +/* + * busfree phase + * - handle completition/disconnection/error of current command + * - start selection for next command (if any) + */ +static void busfree_run(struct Scsi_Host *shpnt) +{ + unsigned long flags; +#if defined(AHA152X_STAT) + int action=0; +#endif + + SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); + SETPORT(SXFRCTL0, CH1); + + SETPORT(SSTAT1, CLRBUSFREE); + + if(CURRENT_SC) { + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + +#if defined(AHA152X_STAT) + action++; +#endif + acp->phase &= ~syncneg; + + if (acp->phase & completed) { + /* target sent COMMAND COMPLETE */ + done(shpnt, acp->status, DID_OK); + + } else if (acp->phase & aborted) { + done(shpnt, acp->status, DID_ABORT); + + } else if (acp->phase & resetted) { + done(shpnt, acp->status, DID_RESET); + + } else if (acp->phase & disconnected) { + /* target sent DISCONNECT */ +#if defined(AHA152X_STAT) + HOSTDATA(shpnt)->disconnections++; +#endif + append_SC(&DISCONNECTED_SC, CURRENT_SC); + acp->phase |= 1 << 16; + CURRENT_SC = NULL; + + } else { + done(shpnt, SAM_STAT_GOOD, DID_ERROR); + } +#if defined(AHA152X_STAT) + } else { + HOSTDATA(shpnt)->busfree_without_old_command++; +#endif + } + + DO_LOCK(flags); + + if(DONE_SC) { +#if defined(AHA152X_STAT) + action++; +#endif + + if (aha152x_priv(DONE_SC)->phase & check_condition) { + struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; + struct aha152x_scdata *sc = SCDATA(cmd); + + scsi_eh_restore_cmnd(cmd, &sc->ses); + + aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION; + + HOSTDATA(shpnt)->commands--; + if (!HOSTDATA(shpnt)->commands) + SETPORT(PORTA, 0); /* turn led off */ + } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) { +#if defined(AHA152X_STAT) + HOSTDATA(shpnt)->busfree_with_check_condition++; +#endif + + if (!(aha152x_priv(DONE_SC)->phase & not_issued)) { + struct aha152x_scdata *sc; + struct scsi_cmnd *ptr = DONE_SC; + DONE_SC=NULL; + + sc = SCDATA(ptr); + /* It was allocated in aha152x_internal_queue? */ + BUG_ON(!sc); + scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0); + + DO_UNLOCK(flags); + aha152x_internal_queue(ptr, NULL, check_condition); + DO_LOCK(flags); + } + } + + if (DONE_SC) { + struct scsi_cmnd *ptr = DONE_SC; + DONE_SC=NULL; + + /* turn led off, when no commands are in the driver */ + HOSTDATA(shpnt)->commands--; + if (!HOSTDATA(shpnt)->commands) + SETPORT(PORTA, 0); /* turn led off */ + + if (!(aha152x_priv(ptr)->phase & resetting)) { + kfree(ptr->host_scribble); + ptr->host_scribble=NULL; + } + + DO_UNLOCK(flags); + aha152x_scsi_done(ptr); + DO_LOCK(flags); + } + + DONE_SC=NULL; +#if defined(AHA152X_STAT) + } else { + HOSTDATA(shpnt)->busfree_without_done_command++; +#endif + } + + if(ISSUE_SC) + CURRENT_SC = remove_first_SC(&ISSUE_SC); + + DO_UNLOCK(flags); + + if(CURRENT_SC) { + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + +#if defined(AHA152X_STAT) + action++; +#endif + acp->phase |= selecting; + + /* clear selection timeout */ + SETPORT(SSTAT1, SELTO); + + SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->device->id); + SETPORT(SXFRCTL1, (PARITY ? ENSPCHK : 0 ) | ENSTIMER); + SETPORT(SCSISEQ, ENSELO | ENAUTOATNO | (DISCONNECTED_SC ? ENRESELI : 0)); + } else { +#if defined(AHA152X_STAT) + HOSTDATA(shpnt)->busfree_without_new_command++; +#endif + SETPORT(SCSISEQ, DISCONNECTED_SC ? ENRESELI : 0); + } + +#if defined(AHA152X_STAT) + if(!action) + HOSTDATA(shpnt)->busfree_without_any_action++; +#endif +} + +/* + * Selection done (OUT) + * - queue IDENTIFY message and SDTR to selected target for message out + * (ATN asserted automagically via ENAUTOATNO in busfree()) + */ +static void seldo_run(struct Scsi_Host *shpnt) +{ + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + + SETPORT(SCSISIG, 0); + SETPORT(SSTAT1, CLRBUSFREE); + SETPORT(SSTAT1, CLRPHASECHG); + + acp->phase &= ~(selecting | not_issued); + + SETPORT(SCSISEQ, 0); + + if (TESTLO(SSTAT0, SELDO)) { + scmd_printk(KERN_ERR, CURRENT_SC, + "aha152x: passing bus free condition\n"); + done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT); + return; + } + + SETPORT(SSTAT0, CLRSELDO); + + ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); + + if (acp->phase & aborting) { + ADDMSGO(ABORT); + } else if (acp->phase & resetting) { + ADDMSGO(BUS_DEVICE_RESET); + } else if (SYNCNEG==0 && SYNCHRONOUS) { + acp->phase |= syncneg; + MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8); + SYNCNEG=1; /* negotiation in progress */ + } + + SETRATE(SYNCRATE); +} + +/* + * Selection timeout + * - return command to mid-level with failure cause + * + */ +static void selto_run(struct Scsi_Host *shpnt) +{ + struct aha152x_cmd_priv *acp; + + SETPORT(SCSISEQ, 0); + SETPORT(SSTAT1, CLRSELTIMO); + + if (!CURRENT_SC) + return; + + acp = aha152x_priv(CURRENT_SC); + acp->phase &= ~selecting; + + if (acp->phase & aborted) + done(shpnt, SAM_STAT_GOOD, DID_ABORT); + else if (TESTLO(SSTAT0, SELINGO)) + done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY); + else + /* ARBITRATION won, but SELECTION failed */ + done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT); +} + +/* + * Selection in done + * - put current command back to issue queue + * (reconnection of a disconnected nexus instead + * of successful selection out) + * + */ +static void seldi_run(struct Scsi_Host *shpnt) +{ + int selid; + int target; + unsigned long flags; + + SETPORT(SCSISIG, 0); + SETPORT(SSTAT0, CLRSELDI); + SETPORT(SSTAT1, CLRBUSFREE); + SETPORT(SSTAT1, CLRPHASECHG); + + if(CURRENT_SC) { + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + + if (!(acp->phase & not_issued)) + scmd_printk(KERN_ERR, CURRENT_SC, + "command should not have been issued yet\n"); + + DO_LOCK(flags); + append_SC(&ISSUE_SC, CURRENT_SC); + DO_UNLOCK(flags); + + CURRENT_SC = NULL; + } + + if (!DISCONNECTED_SC) + return; + + RECONN_TARGET=-1; + + selid = GETPORT(SELID) & ~(1 << shpnt->this_id); + + if (selid==0) { + shost_printk(KERN_INFO, shpnt, + "target id unknown (%02x)\n", selid); + return; + } + + for(target=7; !(selid & (1 << target)); target--) + ; + + if(selid & ~(1 << target)) { + shost_printk(KERN_INFO, shpnt, + "multiple targets reconnected (%02x)\n", selid); + } + + + SETPORT(SCSIID, (shpnt->this_id << OID_) | target); + SETPORT(SCSISEQ, 0); + + SETRATE(HOSTDATA(shpnt)->syncrate[target]); + + RECONN_TARGET=target; +} + +/* + * message in phase + * - handle initial message after reconnection to identify + * reconnecting nexus + * - queue command on DISCONNECTED_SC on DISCONNECT message + * - set completed flag on COMMAND COMPLETE + * (other completition code moved to busfree_run) + * - handle response to SDTR + * - clear synchronous transfer agreements on BUS RESET + * + * FIXME: what about SAVE POINTERS, RESTORE POINTERS? + * + */ +static void msgi_run(struct Scsi_Host *shpnt) +{ + for(;;) { + struct aha152x_cmd_priv *acp; + int sstat1 = GETPORT(SSTAT1); + + if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT)) + return; + + if (TESTLO(SSTAT0, SPIORDY)) + return; + + ADDMSGI(GETPORT(SCSIDAT)); + + if(!CURRENT_SC) { + if(LASTSTATE!=seldi) { + shost_printk(KERN_ERR, shpnt, + "message in w/o current command" + " not after reselection\n"); + } + + /* + * Handle reselection + */ + if(!(MSGI(0) & IDENTIFY_BASE)) { + shost_printk(KERN_ERR, shpnt, + "target didn't identify after reselection\n"); + continue; + } + + CURRENT_SC = remove_lun_SC(&DISCONNECTED_SC, RECONN_TARGET, MSGI(0) & 0x3f); + + if (!CURRENT_SC) { + show_queues(shpnt); + shost_printk(KERN_ERR, shpnt, + "no disconnected command" + " for target %d/%d\n", + RECONN_TARGET, MSGI(0) & 0x3f); + continue; + } + + acp = aha152x_priv(CURRENT_SC); + acp->message = MSGI(0); + acp->phase &= ~disconnected; + + MSGILEN=0; + + /* next message if any */ + continue; + } + + acp = aha152x_priv(CURRENT_SC); + acp->message = MSGI(0); + + switch (MSGI(0)) { + case DISCONNECT: + if (!RECONNECT) + scmd_printk(KERN_WARNING, CURRENT_SC, + "target was not allowed to disconnect\n"); + + acp->phase |= disconnected; + break; + + case COMMAND_COMPLETE: + acp->phase |= completed; + break; + + case MESSAGE_REJECT: + if (SYNCNEG==1) { + scmd_printk(KERN_INFO, CURRENT_SC, + "Synchronous Data Transfer Request" + " was rejected\n"); + SYNCNEG=2; /* negotiation completed */ + } else + scmd_printk(KERN_INFO, CURRENT_SC, + "inbound message (MESSAGE REJECT)\n"); + break; + + case SAVE_POINTERS: + break; + + case RESTORE_POINTERS: + break; + + case EXTENDED_MESSAGE: + if(MSGILEN<2 || MSGILENsynchronous) + break; + + printk(INFO_LEAD, CMDINFO(CURRENT_SC)); + spi_print_msg(&MSGI(0)); + printk("\n"); + + ticks = (MSGI(3) * 4 + 49) / 50; + + if (syncneg) { + /* negotiation in progress */ + if (ticks > 9 || MSGI(4) < 1 || MSGI(4) > 8) { + ADDMSGO(MESSAGE_REJECT); + scmd_printk(KERN_INFO, + CURRENT_SC, + "received Synchronous Data Transfer Request invalid - rejected\n"); + break; + } + + SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); + } else if (ticks <= 9 && MSGI(4) >= 1) { + ADDMSGO(EXTENDED_MESSAGE); + ADDMSGO(3); + ADDMSGO(EXTENDED_SDTR); + if (ticks < 4) { + ticks = 4; + ADDMSGO(50); + } else + ADDMSGO(MSGI(3)); + + if (MSGI(4) > 8) + MSGI(4) = 8; + + ADDMSGO(MSGI(4)); + + SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); + } else { + /* requested SDTR is too slow, do it asynchronously */ + scmd_printk(KERN_INFO, + CURRENT_SC, + "Synchronous Data Transfer Request too slow - Rejecting\n"); + ADDMSGO(MESSAGE_REJECT); + } + + /* negotiation completed */ + SYNCNEG=2; + SETRATE(SYNCRATE); + } + break; + + case BUS_DEVICE_RESET: + { + int i; + + for(i=0; i<8; i++) { + HOSTDATA(shpnt)->syncrate[i]=0; + HOSTDATA(shpnt)->syncneg[i]=0; + } + + } + break; + + case EXTENDED_MODIFY_DATA_POINTER: + case EXTENDED_EXTENDED_IDENTIFY: + case EXTENDED_WDTR: + default: + ADDMSGO(MESSAGE_REJECT); + break; + } + break; + } + + MSGILEN=0; + } +} + +static void msgi_end(struct Scsi_Host *shpnt) +{ + if(MSGILEN>0) + scmd_printk(KERN_WARNING, CURRENT_SC, + "target left before message completed (%d)\n", + MSGILEN); + + if (MSGOLEN > 0 && !(GETPORT(SSTAT1) & BUSFREE)) + SETPORT(SCSISIG, P_MSGI | SIG_ATNO); +} + +/* + * message out phase + * + */ +static void msgo_init(struct Scsi_Host *shpnt) +{ + if(MSGOLEN==0) { + if ((aha152x_priv(CURRENT_SC)->phase & syncneg) && + SYNCNEG == 2 && SYNCRATE == 0) { + ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); + } else { + scmd_printk(KERN_INFO, CURRENT_SC, + "unexpected MESSAGE OUT phase; rejecting\n"); + ADDMSGO(MESSAGE_REJECT); + } + } + +} + +/* + * message out phase + * + */ +static void msgo_run(struct Scsi_Host *shpnt) +{ + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + + while(MSGO_Iphase |= identified; + + if (MSGO(MSGO_I)==ABORT) + acp->phase |= aborted; + + if (MSGO(MSGO_I)==BUS_DEVICE_RESET) + acp->phase |= resetted; + + SETPORT(SCSIDAT, MSGO(MSGO_I++)); + } +} + +static void msgo_end(struct Scsi_Host *shpnt) +{ + if(MSGO_Isent_command) { + scmd_printk(KERN_ERR, CURRENT_SC, + "command already sent\n"); + done(shpnt, SAM_STAT_GOOD, DID_ERROR); + return; + } + + CMD_I=0; +} + +/* + * command phase + * + */ +static void cmd_run(struct Scsi_Host *shpnt) +{ + while(CMD_Icmd_len) { + if (TESTLO(SSTAT0, SPIORDY)) + return; + + SETPORT(SCSIDAT, CURRENT_SC->cmnd[CMD_I++]); + } +} + +static void cmd_end(struct Scsi_Host *shpnt) +{ + if(CMD_Icmd_len) + scmd_printk(KERN_ERR, CURRENT_SC, + "command sent incompletely (%d/%d)\n", + CMD_I, CURRENT_SC->cmd_len); + else + aha152x_priv(CURRENT_SC)->sent_command++; +} + +/* + * status phase + * + */ +static void status_run(struct Scsi_Host *shpnt) +{ + if (TESTLO(SSTAT0, SPIORDY)) + return; + + aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT); + +} + +/* + * data in phase + * + */ +static void datai_init(struct Scsi_Host *shpnt) +{ + SETPORT(DMACNTRL0, RSTFIFO); + SETPORT(DMACNTRL0, RSTFIFO|ENDMA); + + SETPORT(SXFRCTL0, CH1|CLRSTCNT); + SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN); + + SETPORT(SIMODE0, 0); + SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE); + + DATA_LEN=0; +} + +static void datai_run(struct Scsi_Host *shpnt) +{ + struct aha152x_cmd_priv *acp; + unsigned long the_time; + int fifodata, data_count; + + /* + * loop while the phase persists or the fifos are not empty + * + */ + while(TESTLO(DMASTAT, INTSTAT) || TESTLO(DMASTAT, DFIFOEMP) || TESTLO(SSTAT2, SEMPTY)) { + /* FIXME: maybe this should be done by setting up + * STCNT to trigger ENSWRAP interrupt, instead of + * polling for DFIFOFULL + */ + the_time=jiffies + 100*HZ; + while(TESTLO(DMASTAT, DFIFOFULL|INTSTAT) && time_before(jiffies,the_time)) + barrier(); + + if(TESTLO(DMASTAT, DFIFOFULL|INTSTAT)) { + scmd_printk(KERN_ERR, CURRENT_SC, "datai timeout\n"); + break; + } + + if(TESTHI(DMASTAT, DFIFOFULL)) { + fifodata = 128; + } else { + the_time=jiffies + 100*HZ; + while(TESTLO(SSTAT2, SEMPTY) && time_before(jiffies,the_time)) + barrier(); + + if(TESTLO(SSTAT2, SEMPTY)) { + scmd_printk(KERN_ERR, CURRENT_SC, + "datai sempty timeout"); + break; + } + + fifodata = GETPORT(FIFOSTAT); + } + + acp = aha152x_priv(CURRENT_SC); + if (acp->this_residual > 0) { + while (fifodata > 0 && acp->this_residual > 0) { + data_count = fifodata > acp->this_residual ? + acp->this_residual : fifodata; + fifodata -= data_count; + + if (data_count & 1) { + SETPORT(DMACNTRL0, ENDMA|_8BIT); + *acp->ptr++ = GETPORT(DATAPORT); + acp->this_residual--; + DATA_LEN++; + SETPORT(DMACNTRL0, ENDMA); + } + + if (data_count > 1) { + data_count >>= 1; + insw(DATAPORT, acp->ptr, data_count); + acp->ptr += 2 * data_count; + acp->this_residual -= 2 * data_count; + DATA_LEN += 2 * data_count; + } + + if (acp->this_residual == 0 && + !sg_is_last(acp->buffer)) { + /* advance to next buffer */ + acp->buffer = sg_next(acp->buffer); + acp->ptr = SG_ADDRESS(acp->buffer); + acp->this_residual = acp->buffer->length; + } + } + } else if (fifodata > 0) { + scmd_printk(KERN_ERR, CURRENT_SC, + "no buffers left for %d(%d) bytes" + " (data overrun!?)\n", + fifodata, GETPORT(FIFOSTAT)); + SETPORT(DMACNTRL0, ENDMA|_8BIT); + while(fifodata>0) { + GETPORT(DATAPORT); + fifodata--; + DATA_LEN++; + } + SETPORT(DMACNTRL0, ENDMA|_8BIT); + } + } + + if(TESTLO(DMASTAT, INTSTAT) || + TESTLO(DMASTAT, DFIFOEMP) || + TESTLO(SSTAT2, SEMPTY) || + GETPORT(FIFOSTAT)>0) { + /* + * something went wrong, if there's something left in the fifos + * or the phase didn't change + */ + scmd_printk(KERN_ERR, CURRENT_SC, + "fifos should be empty and phase should have changed\n"); + } + + if(DATA_LEN!=GETSTCNT()) { + scmd_printk(KERN_ERR, CURRENT_SC, + "manual transfer count differs from automatic " + "(count=%d;stcnt=%d;diff=%d;fifostat=%d)", + DATA_LEN, GETSTCNT(), GETSTCNT()-DATA_LEN, + GETPORT(FIFOSTAT)); + mdelay(10000); + } +} + +static void datai_end(struct Scsi_Host *shpnt) +{ + CMD_INC_RESID(CURRENT_SC, -GETSTCNT()); + + SETPORT(SXFRCTL0, CH1|CLRSTCNT); + SETPORT(DMACNTRL0, 0); +} + +/* + * data out phase + * + */ +static void datao_init(struct Scsi_Host *shpnt) +{ + SETPORT(DMACNTRL0, WRITE_READ | RSTFIFO); + SETPORT(DMACNTRL0, WRITE_READ | ENDMA); + + SETPORT(SXFRCTL0, CH1|CLRSTCNT); + SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN); + + SETPORT(SIMODE0, 0); + SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE ); + + DATA_LEN = scsi_get_resid(CURRENT_SC); +} + +static void datao_run(struct Scsi_Host *shpnt) +{ + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + unsigned long the_time; + int data_count; + + /* until phase changes or all data sent */ + while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) { + data_count = 128; + if (data_count > acp->this_residual) + data_count = acp->this_residual; + + if(TESTLO(DMASTAT, DFIFOEMP)) { + scmd_printk(KERN_ERR, CURRENT_SC, + "datao fifo not empty (%d)", + GETPORT(FIFOSTAT)); + break; + } + + if(data_count & 1) { + SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT); + SETPORT(DATAPORT, *acp->ptr++); + acp->this_residual--; + CMD_INC_RESID(CURRENT_SC, -1); + SETPORT(DMACNTRL0,WRITE_READ|ENDMA); + } + + if(data_count > 1) { + data_count >>= 1; + outsw(DATAPORT, acp->ptr, data_count); + acp->ptr += 2 * data_count; + acp->this_residual -= 2 * data_count; + CMD_INC_RESID(CURRENT_SC, -2 * data_count); + } + + if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) { + /* advance to next buffer */ + acp->buffer = sg_next(acp->buffer); + acp->ptr = SG_ADDRESS(acp->buffer); + acp->this_residual = acp->buffer->length; + } + + the_time=jiffies + 100*HZ; + while(TESTLO(DMASTAT, DFIFOEMP|INTSTAT) && time_before(jiffies,the_time)) + barrier(); + + if(TESTLO(DMASTAT, DFIFOEMP|INTSTAT)) { + scmd_printk(KERN_ERR, CURRENT_SC, "dataout timeout\n"); + break; + } + } +} + +static void datao_end(struct Scsi_Host *shpnt) +{ + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); + + if(TESTLO(DMASTAT, DFIFOEMP)) { + u32 datao_cnt = GETSTCNT(); + int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC); + int done; + struct scatterlist *sg = scsi_sglist(CURRENT_SC); + + CMD_INC_RESID(CURRENT_SC, datao_out - datao_cnt); + + done = scsi_bufflen(CURRENT_SC) - scsi_get_resid(CURRENT_SC); + /* Locate the first SG entry not yet sent */ + while (done > 0 && !sg_is_last(sg)) { + if (done < sg->length) + break; + done -= sg->length; + sg = sg_next(sg); + } + + acp->buffer = sg; + acp->ptr = SG_ADDRESS(acp->buffer) + done; + acp->this_residual = acp->buffer->length - done; + } + + SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); + SETPORT(SXFRCTL0, CH1); + + SETPORT(DMACNTRL0, 0); +} + +/* + * figure out what state we're in + * + */ +static int update_state(struct Scsi_Host *shpnt) +{ + int dataphase=0; + unsigned int stat0 = GETPORT(SSTAT0); + unsigned int stat1 = GETPORT(SSTAT1); + + PREVSTATE = STATE; + STATE=unknown; + + if(stat1 & SCSIRSTI) { + STATE=rsti; + SETPORT(SCSISEQ,0); + SETPORT(SSTAT1,SCSIRSTI); + } else if (stat0 & SELDI && PREVSTATE == busfree) { + STATE=seldi; + } else if (stat0 & SELDO && CURRENT_SC && + (aha152x_priv(CURRENT_SC)->phase & selecting)) { + STATE=seldo; + } else if(stat1 & SELTO) { + STATE=selto; + } else if(stat1 & BUSFREE) { + STATE=busfree; + SETPORT(SSTAT1,BUSFREE); + } else if(stat1 & SCSIPERR) { + STATE=parerr; + SETPORT(SSTAT1,SCSIPERR); + } else if(stat1 & REQINIT) { + switch(GETPORT(SCSISIG) & P_MASK) { + case P_MSGI: STATE=msgi; break; + case P_MSGO: STATE=msgo; break; + case P_DATAO: STATE=datao; break; + case P_DATAI: STATE=datai; break; + case P_STATUS: STATE=status; break; + case P_CMD: STATE=cmd; break; + } + dataphase=1; + } + + if((stat0 & SELDI) && STATE!=seldi && !dataphase) { + scmd_printk(KERN_INFO, CURRENT_SC, "reselection missed?"); + } + + if(STATE!=PREVSTATE) { + LASTSTATE=PREVSTATE; + } + + return dataphase; +} + +/* + * handle parity error + * + * FIXME: in which phase? + * + */ +static void parerr_run(struct Scsi_Host *shpnt) +{ + scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n"); + done(shpnt, SAM_STAT_GOOD, DID_PARITY); +} + +/* + * handle reset in + * + */ +static void rsti_run(struct Scsi_Host *shpnt) +{ + struct scsi_cmnd *ptr; + + shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n"); + + ptr=DISCONNECTED_SC; + while(ptr) { + struct scsi_cmnd *next = SCNEXT(ptr); + + if (!ptr->device->soft_reset) { + remove_SC(&DISCONNECTED_SC, ptr); + + kfree(ptr->host_scribble); + ptr->host_scribble=NULL; + + set_host_byte(ptr, DID_RESET); + aha152x_scsi_done(ptr); + } + + ptr = next; + } + + if(CURRENT_SC && !CURRENT_SC->device->soft_reset) + done(shpnt, SAM_STAT_GOOD, DID_RESET); +} + + +/* + * bottom-half handler + * + */ +static void is_complete(struct Scsi_Host *shpnt) +{ + int dataphase; + unsigned long flags; + int pending; + + if(!shpnt) + return; + + DO_LOCK(flags); + + if( HOSTDATA(shpnt)->service==0 ) { + DO_UNLOCK(flags); + return; + } + + HOSTDATA(shpnt)->service = 0; + + if(HOSTDATA(shpnt)->in_intr) { + DO_UNLOCK(flags); + /* aha152x_error never returns.. */ + aha152x_error(shpnt, "bottom-half already running!?"); + } + HOSTDATA(shpnt)->in_intr++; + + /* + * loop while there are interrupt conditions pending + * + */ + do { + unsigned long start = jiffies; + DO_UNLOCK(flags); + + dataphase=update_state(shpnt); + + /* + * end previous state + * + */ + if(PREVSTATE!=STATE && states[PREVSTATE].end) + states[PREVSTATE].end(shpnt); + + /* + * disable SPIO mode if previous phase used it + * and this one doesn't + * + */ + if(states[PREVSTATE].spio && !states[STATE].spio) { + SETPORT(SXFRCTL0, CH1); + SETPORT(DMACNTRL0, 0); + if(CURRENT_SC) + aha152x_priv(CURRENT_SC)->phase &= ~spiordy; + } + + /* + * accept current dataphase phase + * + */ + if(dataphase) { + SETPORT(SSTAT0, REQINIT); + SETPORT(SCSISIG, GETPORT(SCSISIG) & P_MASK); + SETPORT(SSTAT1, PHASECHG); + } + + /* + * enable SPIO mode if previous didn't use it + * and this one does + * + */ + if(!states[PREVSTATE].spio && states[STATE].spio) { + SETPORT(DMACNTRL0, 0); + SETPORT(SXFRCTL0, CH1|SPIOEN); + if(CURRENT_SC) + aha152x_priv(CURRENT_SC)->phase |= spiordy; + } + + /* + * initialize for new state + * + */ + if(PREVSTATE!=STATE && states[STATE].init) + states[STATE].init(shpnt); + + /* + * handle current state + * + */ + if(states[STATE].run) + states[STATE].run(shpnt); + else + scmd_printk(KERN_ERR, CURRENT_SC, + "unexpected state (%x)\n", STATE); + + /* + * setup controller to interrupt on + * the next expected condition and + * loop if it's already there + * + */ + DO_LOCK(flags); + pending=setup_expected_interrupts(shpnt); +#if defined(AHA152X_STAT) + HOSTDATA(shpnt)->count[STATE]++; + if(PREVSTATE!=STATE) + HOSTDATA(shpnt)->count_trans[STATE]++; + HOSTDATA(shpnt)->time[STATE] += jiffies-start; +#endif + + } while(pending); + + /* + * enable interrupts and leave bottom-half + * + */ + HOSTDATA(shpnt)->in_intr--; + SETBITS(DMACNTRL0, INTEN); + DO_UNLOCK(flags); +} + + +/* + * Dump the current driver status and panic + */ +static void aha152x_error(struct Scsi_Host *shpnt, char *msg) +{ + shost_printk(KERN_EMERG, shpnt, "%s\n", msg); + show_queues(shpnt); + panic("aha152x panic\n"); +} + +/* + * display enabled interrupts + */ +static void disp_enintr(struct Scsi_Host *shpnt) +{ + int s0, s1; + + s0 = GETPORT(SIMODE0); + s1 = GETPORT(SIMODE1); + + shost_printk(KERN_DEBUG, shpnt, + "enabled interrupts (%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", + (s0 & ENSELDO) ? "ENSELDO " : "", + (s0 & ENSELDI) ? "ENSELDI " : "", + (s0 & ENSELINGO) ? "ENSELINGO " : "", + (s0 & ENSWRAP) ? "ENSWRAP " : "", + (s0 & ENSDONE) ? "ENSDONE " : "", + (s0 & ENSPIORDY) ? "ENSPIORDY " : "", + (s0 & ENDMADONE) ? "ENDMADONE " : "", + (s1 & ENSELTIMO) ? "ENSELTIMO " : "", + (s1 & ENATNTARG) ? "ENATNTARG " : "", + (s1 & ENPHASEMIS) ? "ENPHASEMIS " : "", + (s1 & ENBUSFREE) ? "ENBUSFREE " : "", + (s1 & ENSCSIPERR) ? "ENSCSIPERR " : "", + (s1 & ENPHASECHG) ? "ENPHASECHG " : "", + (s1 & ENREQINIT) ? "ENREQINIT " : ""); +} + +/* + * Show the command data of a command + */ +static void show_command(struct scsi_cmnd *ptr) +{ + const int phase = aha152x_priv(ptr)->phase; + + scsi_print_command(ptr); + scmd_printk(KERN_DEBUG, ptr, + "request_bufflen=%d; resid=%d; " + "phase |%s%s%s%s%s%s%s%s%s; next=0x%p", + scsi_bufflen(ptr), scsi_get_resid(ptr), + phase & not_issued ? "not issued|" : "", + phase & selecting ? "selecting|" : "", + phase & identified ? "identified|" : "", + phase & disconnected ? "disconnected|" : "", + phase & completed ? "completed|" : "", + phase & spiordy ? "spiordy|" : "", + phase & syncneg ? "syncneg|" : "", + phase & aborted ? "aborted|" : "", + phase & resetted ? "resetted|" : "", + SCDATA(ptr) ? SCNEXT(ptr) : NULL); +} + +/* + * Dump the queued data + */ +static void show_queues(struct Scsi_Host *shpnt) +{ + struct scsi_cmnd *ptr; + unsigned long flags; + + DO_LOCK(flags); + printk(KERN_DEBUG "\nqueue status:\nissue_SC:\n"); + for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) + show_command(ptr); + DO_UNLOCK(flags); + + printk(KERN_DEBUG "current_SC:\n"); + if (CURRENT_SC) + show_command(CURRENT_SC); + else + printk(KERN_DEBUG "none\n"); + + printk(KERN_DEBUG "disconnected_SC:\n"); + for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL) + show_command(ptr); + + disp_enintr(shpnt); +} + +static void get_command(struct seq_file *m, struct scsi_cmnd * ptr) +{ + struct aha152x_cmd_priv *acp = aha152x_priv(ptr); + const int phase = acp->phase; + int i; + + seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ", + ptr, ptr->device->id, (u8)ptr->device->lun); + + for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) + seq_printf(m, "0x%02x ", ptr->cmnd[i]); + + seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |", + scsi_get_resid(ptr), acp->this_residual, + sg_nents(acp->buffer) - 1); + + if (phase & not_issued) + seq_puts(m, "not issued|"); + if (phase & selecting) + seq_puts(m, "selecting|"); + if (phase & disconnected) + seq_puts(m, "disconnected|"); + if (phase & aborted) + seq_puts(m, "aborted|"); + if (phase & identified) + seq_puts(m, "identified|"); + if (phase & completed) + seq_puts(m, "completed|"); + if (phase & spiordy) + seq_puts(m, "spiordy|"); + if (phase & syncneg) + seq_puts(m, "syncneg|"); + seq_printf(m, "; next=0x%p\n", SCNEXT(ptr)); +} + +static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt) +{ + int s; + + seq_printf(m, "\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name); + + s = GETPORT(SCSISEQ); + seq_puts(m, "SCSISEQ( "); + if (s & TEMODEO) + seq_puts(m, "TARGET MODE "); + if (s & ENSELO) + seq_puts(m, "SELO "); + if (s & ENSELI) + seq_puts(m, "SELI "); + if (s & ENRESELI) + seq_puts(m, "RESELI "); + if (s & ENAUTOATNO) + seq_puts(m, "AUTOATNO "); + if (s & ENAUTOATNI) + seq_puts(m, "AUTOATNI "); + if (s & ENAUTOATNP) + seq_puts(m, "AUTOATNP "); + if (s & SCSIRSTO) + seq_puts(m, "SCSIRSTO "); + seq_puts(m, ");"); + + seq_puts(m, " SCSISIG("); + s = GETPORT(SCSISIG); + switch (s & P_MASK) { + case P_DATAO: + seq_puts(m, "DATA OUT"); + break; + case P_DATAI: + seq_puts(m, "DATA IN"); + break; + case P_CMD: + seq_puts(m, "COMMAND"); + break; + case P_STATUS: + seq_puts(m, "STATUS"); + break; + case P_MSGO: + seq_puts(m, "MESSAGE OUT"); + break; + case P_MSGI: + seq_puts(m, "MESSAGE IN"); + break; + default: + seq_puts(m, "*invalid*"); + break; + } + + seq_puts(m, "); "); + + seq_printf(m, "INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo"); + + seq_puts(m, "SSTAT( "); + s = GETPORT(SSTAT0); + if (s & TARGET) + seq_puts(m, "TARGET "); + if (s & SELDO) + seq_puts(m, "SELDO "); + if (s & SELDI) + seq_puts(m, "SELDI "); + if (s & SELINGO) + seq_puts(m, "SELINGO "); + if (s & SWRAP) + seq_puts(m, "SWRAP "); + if (s & SDONE) + seq_puts(m, "SDONE "); + if (s & SPIORDY) + seq_puts(m, "SPIORDY "); + if (s & DMADONE) + seq_puts(m, "DMADONE "); + + s = GETPORT(SSTAT1); + if (s & SELTO) + seq_puts(m, "SELTO "); + if (s & ATNTARG) + seq_puts(m, "ATNTARG "); + if (s & SCSIRSTI) + seq_puts(m, "SCSIRSTI "); + if (s & PHASEMIS) + seq_puts(m, "PHASEMIS "); + if (s & BUSFREE) + seq_puts(m, "BUSFREE "); + if (s & SCSIPERR) + seq_puts(m, "SCSIPERR "); + if (s & PHASECHG) + seq_puts(m, "PHASECHG "); + if (s & REQINIT) + seq_puts(m, "REQINIT "); + seq_puts(m, "); "); + + + seq_puts(m, "SSTAT( "); + + s = GETPORT(SSTAT0) & GETPORT(SIMODE0); + + if (s & TARGET) + seq_puts(m, "TARGET "); + if (s & SELDO) + seq_puts(m, "SELDO "); + if (s & SELDI) + seq_puts(m, "SELDI "); + if (s & SELINGO) + seq_puts(m, "SELINGO "); + if (s & SWRAP) + seq_puts(m, "SWRAP "); + if (s & SDONE) + seq_puts(m, "SDONE "); + if (s & SPIORDY) + seq_puts(m, "SPIORDY "); + if (s & DMADONE) + seq_puts(m, "DMADONE "); + + s = GETPORT(SSTAT1) & GETPORT(SIMODE1); + + if (s & SELTO) + seq_puts(m, "SELTO "); + if (s & ATNTARG) + seq_puts(m, "ATNTARG "); + if (s & SCSIRSTI) + seq_puts(m, "SCSIRSTI "); + if (s & PHASEMIS) + seq_puts(m, "PHASEMIS "); + if (s & BUSFREE) + seq_puts(m, "BUSFREE "); + if (s & SCSIPERR) + seq_puts(m, "SCSIPERR "); + if (s & PHASECHG) + seq_puts(m, "PHASECHG "); + if (s & REQINIT) + seq_puts(m, "REQINIT "); + seq_puts(m, "); "); + + seq_puts(m, "SXFRCTL0( "); + + s = GETPORT(SXFRCTL0); + if (s & SCSIEN) + seq_puts(m, "SCSIEN "); + if (s & DMAEN) + seq_puts(m, "DMAEN "); + if (s & CH1) + seq_puts(m, "CH1 "); + if (s & CLRSTCNT) + seq_puts(m, "CLRSTCNT "); + if (s & SPIOEN) + seq_puts(m, "SPIOEN "); + if (s & CLRCH1) + seq_puts(m, "CLRCH1 "); + seq_puts(m, "); "); + + seq_puts(m, "SIGNAL( "); + + s = GETPORT(SCSISIG); + if (s & SIG_ATNI) + seq_puts(m, "ATNI "); + if (s & SIG_SELI) + seq_puts(m, "SELI "); + if (s & SIG_BSYI) + seq_puts(m, "BSYI "); + if (s & SIG_REQI) + seq_puts(m, "REQI "); + if (s & SIG_ACKI) + seq_puts(m, "ACKI "); + seq_puts(m, "); "); + + seq_printf(m, "SELID(%02x), ", GETPORT(SELID)); + + seq_printf(m, "STCNT(%d), ", GETSTCNT()); + + seq_puts(m, "SSTAT2( "); + + s = GETPORT(SSTAT2); + if (s & SOFFSET) + seq_puts(m, "SOFFSET "); + if (s & SEMPTY) + seq_puts(m, "SEMPTY "); + if (s & SFULL) + seq_puts(m, "SFULL "); + seq_printf(m, "); SFCNT (%d); ", s & (SFULL | SFCNT)); + + s = GETPORT(SSTAT3); + seq_printf(m, "SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f); + + seq_puts(m, "SSTAT4( "); + s = GETPORT(SSTAT4); + if (s & SYNCERR) + seq_puts(m, "SYNCERR "); + if (s & FWERR) + seq_puts(m, "FWERR "); + if (s & FRERR) + seq_puts(m, "FRERR "); + seq_puts(m, "); "); + + seq_puts(m, "DMACNTRL0( "); + s = GETPORT(DMACNTRL0); + seq_printf(m, "%s ", s & _8BIT ? "8BIT" : "16BIT"); + seq_printf(m, "%s ", s & DMA ? "DMA" : "PIO"); + seq_printf(m, "%s ", s & WRITE_READ ? "WRITE" : "READ"); + if (s & ENDMA) + seq_puts(m, "ENDMA "); + if (s & INTEN) + seq_puts(m, "INTEN "); + if (s & RSTFIFO) + seq_puts(m, "RSTFIFO "); + if (s & SWINT) + seq_puts(m, "SWINT "); + seq_puts(m, "); "); + + seq_puts(m, "DMASTAT( "); + s = GETPORT(DMASTAT); + if (s & ATDONE) + seq_puts(m, "ATDONE "); + if (s & WORDRDY) + seq_puts(m, "WORDRDY "); + if (s & DFIFOFULL) + seq_puts(m, "DFIFOFULL "); + if (s & DFIFOEMP) + seq_puts(m, "DFIFOEMP "); + seq_puts(m, ")\n"); + + seq_puts(m, "enabled interrupts( "); + + s = GETPORT(SIMODE0); + if (s & ENSELDO) + seq_puts(m, "ENSELDO "); + if (s & ENSELDI) + seq_puts(m, "ENSELDI "); + if (s & ENSELINGO) + seq_puts(m, "ENSELINGO "); + if (s & ENSWRAP) + seq_puts(m, "ENSWRAP "); + if (s & ENSDONE) + seq_puts(m, "ENSDONE "); + if (s & ENSPIORDY) + seq_puts(m, "ENSPIORDY "); + if (s & ENDMADONE) + seq_puts(m, "ENDMADONE "); + + s = GETPORT(SIMODE1); + if (s & ENSELTIMO) + seq_puts(m, "ENSELTIMO "); + if (s & ENATNTARG) + seq_puts(m, "ENATNTARG "); + if (s & ENPHASEMIS) + seq_puts(m, "ENPHASEMIS "); + if (s & ENBUSFREE) + seq_puts(m, "ENBUSFREE "); + if (s & ENSCSIPERR) + seq_puts(m, "ENSCSIPERR "); + if (s & ENPHASECHG) + seq_puts(m, "ENPHASECHG "); + if (s & ENREQINIT) + seq_puts(m, "ENREQINIT "); + seq_puts(m, ")\n"); +} + +static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) +{ + if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0) + return -EINVAL; + +#if defined(AHA152X_STAT) + if(length>13 && strncmp("reset", buffer+8, 5)==0) { + int i; + + HOSTDATA(shpnt)->total_commands=0; + HOSTDATA(shpnt)->disconnections=0; + HOSTDATA(shpnt)->busfree_without_any_action=0; + HOSTDATA(shpnt)->busfree_without_old_command=0; + HOSTDATA(shpnt)->busfree_without_new_command=0; + HOSTDATA(shpnt)->busfree_without_done_command=0; + HOSTDATA(shpnt)->busfree_with_check_condition=0; + for (i = idle; icount[i]=0; + HOSTDATA(shpnt)->count_trans[i]=0; + HOSTDATA(shpnt)->time[i]=0; + } + + shost_printk(KERN_INFO, shpnt, "aha152x: stats reset.\n"); + + } else +#endif + { + return -EINVAL; + } + + + return length; +} + +static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) +{ + int i; + struct scsi_cmnd *ptr; + unsigned long flags; + + seq_puts(m, AHA152X_REVID "\n"); + + seq_printf(m, "ioports 0x%04lx to 0x%04lx\n", + shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1); + seq_printf(m, "interrupt 0x%02x\n", shpnt->irq); + seq_printf(m, "disconnection/reconnection %s\n", + RECONNECT ? "enabled" : "disabled"); + seq_printf(m, "parity checking %s\n", + PARITY ? "enabled" : "disabled"); + seq_printf(m, "synchronous transfers %s\n", + SYNCHRONOUS ? "enabled" : "disabled"); + seq_printf(m, "%d commands currently queued\n", HOSTDATA(shpnt)->commands); + + if(SYNCHRONOUS) { + seq_puts(m, "synchronously operating targets (tick=50 ns):\n"); + for (i = 0; i < 8; i++) + if (HOSTDATA(shpnt)->syncrate[i] & 0x7f) + seq_printf(m, "target %d: period %dT/%dns; req/ack offset %d\n", + i, + (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2), + (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, + HOSTDATA(shpnt)->syncrate[i] & 0x0f); + } + seq_puts(m, "\nqueue status:\n"); + DO_LOCK(flags); + if (ISSUE_SC) { + seq_puts(m, "not yet issued commands:\n"); + for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) + get_command(m, ptr); + } else + seq_puts(m, "no not yet issued commands\n"); + DO_UNLOCK(flags); + + if (CURRENT_SC) { + seq_puts(m, "current command:\n"); + get_command(m, CURRENT_SC); + } else + seq_puts(m, "no current command\n"); + + if (DISCONNECTED_SC) { + seq_puts(m, "disconnected commands:\n"); + for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr)) + get_command(m, ptr); + } else + seq_puts(m, "no disconnected commands\n"); + + get_ports(m, shpnt); + +#if defined(AHA152X_STAT) + seq_printf(m, "statistics:\n" + "total commands: %d\n" + "disconnections: %d\n" + "busfree with check condition: %d\n" + "busfree without old command: %d\n" + "busfree without new command: %d\n" + "busfree without done command: %d\n" + "busfree without any action: %d\n" + "state " + "transitions " + "count " + "time\n", + HOSTDATA(shpnt)->total_commands, + HOSTDATA(shpnt)->disconnections, + HOSTDATA(shpnt)->busfree_with_check_condition, + HOSTDATA(shpnt)->busfree_without_old_command, + HOSTDATA(shpnt)->busfree_without_new_command, + HOSTDATA(shpnt)->busfree_without_done_command, + HOSTDATA(shpnt)->busfree_without_any_action); + for(i=0; icount_trans[i], + HOSTDATA(shpnt)->count[i], + HOSTDATA(shpnt)->time[i]); + } +#endif + return 0; +} + +static int aha152x_adjust_queue(struct scsi_device *device) +{ + blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); + return 0; +} + +static const struct scsi_host_template aha152x_driver_template = { + .module = THIS_MODULE, + .name = AHA152X_REVID, + .proc_name = "aha152x", + .show_info = aha152x_show_info, + .write_info = aha152x_set_info, + .queuecommand = aha152x_queue, + .eh_abort_handler = aha152x_abort, + .eh_device_reset_handler = aha152x_device_reset, + .eh_bus_reset_handler = aha152x_bus_reset, + .bios_param = aha152x_biosparam, + .can_queue = 1, + .this_id = 7, + .sg_tablesize = SG_ALL, + .dma_boundary = PAGE_SIZE - 1, + .slave_alloc = aha152x_adjust_queue, + .cmd_size = sizeof(struct aha152x_cmd_priv), +}; + +#if !defined(AHA152X_PCMCIA) +static int setup_count; +static struct aha152x_setup setup[2]; + +/* possible i/o addresses for the AIC-6260; default first */ +static unsigned short ports[] = { 0x340, 0x140 }; + +#if !defined(SKIP_BIOSTEST) +/* possible locations for the Adaptec BIOS; defaults first */ +static unsigned int addresses[] = +{ + 0xdc000, /* default first */ + 0xc8000, + 0xcc000, + 0xd0000, + 0xd4000, + 0xd8000, + 0xe0000, + 0xeb800, /* VTech Platinum SMP */ + 0xf0000, +}; + +/* signatures for various AIC-6[23]60 based controllers. + The point in detecting signatures is to avoid useless and maybe + harmful probes on ports. I'm not sure that all listed boards pass + auto-configuration. For those which fail the BIOS signature is + obsolete, because user intervention to supply the configuration is + needed anyway. May be an information whether or not the BIOS supports + extended translation could be also useful here. */ +static struct signature { + unsigned char *signature; + int sig_offset; + int sig_length; +} signatures[] = +{ + { "Adaptec AHA-1520 BIOS", 0x102e, 21 }, + /* Adaptec 152x */ + { "Adaptec AHA-1520B", 0x000b, 17 }, + /* Adaptec 152x rev B */ + { "Adaptec AHA-1520B", 0x0026, 17 }, + /* Iomega Jaz Jet ISA (AIC6370Q) */ + { "Adaptec ASW-B626 BIOS", 0x1029, 21 }, + /* on-board controller */ + { "Adaptec BIOS: ASW-B626", 0x000f, 22 }, + /* on-board controller */ + { "Adaptec ASW-B626 S2", 0x2e6c, 19 }, + /* on-board controller */ + { "Adaptec BIOS:AIC-6360", 0x000c, 21 }, + /* on-board controller */ + { "ScsiPro SP-360 BIOS", 0x2873, 19 }, + /* ScsiPro-Controller */ + { "GA-400 LOCAL BUS SCSI BIOS", 0x102e, 26 }, + /* Gigabyte Local-Bus-SCSI */ + { "Adaptec BIOS:AVA-282X", 0x000c, 21 }, + /* Adaptec 282x */ + { "Adaptec IBM Dock II SCSI", 0x2edd, 24 }, + /* IBM Thinkpad Dock II */ + { "Adaptec BIOS:AHA-1532P", 0x001c, 22 }, + /* IBM Thinkpad Dock II SCSI */ + { "DTC3520A Host Adapter BIOS", 0x318a, 26 }, + /* DTC 3520A ISA SCSI */ +}; +#endif /* !SKIP_BIOSTEST */ + +/* + * Test, if port_base is valid. + * + */ +static int aha152x_porttest(int io_port) +{ + int i; + + SETPORT(io_port + O_DMACNTRL1, 0); /* reset stack pointer */ + for (i = 0; i < 16; i++) + SETPORT(io_port + O_STACK, i); + + SETPORT(io_port + O_DMACNTRL1, 0); /* reset stack pointer */ + for (i = 0; i < 16 && GETPORT(io_port + O_STACK) == i; i++) + ; + + return (i == 16); +} + +static int tc1550_porttest(int io_port) +{ + int i; + + SETPORT(io_port + O_TC_DMACNTRL1, 0); /* reset stack pointer */ + for (i = 0; i < 16; i++) + SETPORT(io_port + O_STACK, i); + + SETPORT(io_port + O_TC_DMACNTRL1, 0); /* reset stack pointer */ + for (i = 0; i < 16 && GETPORT(io_port + O_TC_STACK) == i; i++) + ; + + return (i == 16); +} + + +static int checksetup(struct aha152x_setup *setup) +{ + int i; + for (i = 0; i < ARRAY_SIZE(ports) && (setup->io_port != ports[i]); i++) + ; + + if (i == ARRAY_SIZE(ports)) + return 0; + + if (!request_region(setup->io_port, IO_RANGE, "aha152x")) { + printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup->io_port); + return 0; + } + + if( aha152x_porttest(setup->io_port) ) { + setup->tc1550=0; + } else if( tc1550_porttest(setup->io_port) ) { + setup->tc1550=1; + } else { + release_region(setup->io_port, IO_RANGE); + return 0; + } + + release_region(setup->io_port, IO_RANGE); + + if ((setup->irq < IRQ_MIN) || (setup->irq > IRQ_MAX)) + return 0; + + if ((setup->scsiid < 0) || (setup->scsiid > 7)) + return 0; + + if ((setup->reconnect < 0) || (setup->reconnect > 1)) + return 0; + + if ((setup->parity < 0) || (setup->parity > 1)) + return 0; + + if ((setup->synchronous < 0) || (setup->synchronous > 1)) + return 0; + + if ((setup->ext_trans < 0) || (setup->ext_trans > 1)) + return 0; + + + return 1; +} + + +static int __init aha152x_init(void) +{ + int i, j, ok; +#if defined(AUTOCONF) + aha152x_config conf; +#endif +#ifdef __ISAPNP__ + struct pnp_dev *dev=NULL, *pnpdev[2] = {NULL, NULL}; +#endif + + if ( setup_count ) { + printk(KERN_INFO "aha152x: processing commandline: "); + + for (i = 0; ipnpdev=pnpdev[i]; + pnpdev[i]=NULL; +#endif + } + } else { + printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup[i].io_port); + } + +#if defined(__ISAPNP__) + if( pnpdev[i] ) + pnp_device_detach(pnpdev[i]); +#endif + } + + return 0; +} + +static void __exit aha152x_exit(void) +{ + struct aha152x_hostdata *hd, *tmp; + + list_for_each_entry_safe(hd, tmp, &aha152x_host_list, host_list) { + struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata); + + aha152x_release(shost); + } +} + +module_init(aha152x_init); +module_exit(aha152x_exit); + +#if !defined(MODULE) +static int __init aha152x_setup(char *str) +{ + int ints[10]; + + get_options(str, ARRAY_SIZE(ints), ints); + + if(setup_count>=ARRAY_SIZE(setup)) { + printk(KERN_ERR "aha152x: you can only configure up to two controllers\n"); + return 1; + } + + setup[setup_count].conf = str; + setup[setup_count].io_port = ints[0] >= 1 ? ints[1] : 0x340; + setup[setup_count].irq = ints[0] >= 2 ? ints[2] : 11; + setup[setup_count].scsiid = ints[0] >= 3 ? ints[3] : 7; + setup[setup_count].reconnect = ints[0] >= 4 ? ints[4] : 1; + setup[setup_count].parity = ints[0] >= 5 ? ints[5] : 1; + setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1; + setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT; + setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0; + if (ints[0] > 8) + printk(KERN_NOTICE "aha152x: usage: aha152x=[,[," + "[,[,[,[,[,]]]]]]]\n"); + else + setup_count++; + + return 1; +} +__setup("aha152x=", aha152x_setup); +#endif + +#endif /* !AHA152X_PCMCIA */ diff --git a/drivers/scsi/aha152x.h b/drivers/scsi/aha152x.h new file mode 100644 index 000000000..efd01877d --- /dev/null +++ b/drivers/scsi/aha152x.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _AHA152X_H +#define _AHA152X_H + +/* + * $Id: aha152x.h,v 2.7 2004/01/24 11:39:03 fischer Exp $ + */ + +/* number of queueable commands + (unless we support more than 1 cmd_per_lun this should do) */ +#define AHA152X_MAXQUEUE 7 + +#define AHA152X_REVID "Adaptec 152x SCSI driver; $Revision: 2.7 $" + +/* port addresses */ +#define SCSISEQ (HOSTIOPORT0+0x00) /* SCSI sequence control */ +#define SXFRCTL0 (HOSTIOPORT0+0x01) /* SCSI transfer control 0 */ +#define SXFRCTL1 (HOSTIOPORT0+0x02) /* SCSI transfer control 1 */ +#define SCSISIG (HOSTIOPORT0+0x03) /* SCSI signal in/out */ +#define SCSIRATE (HOSTIOPORT0+0x04) /* SCSI rate control */ +#define SELID (HOSTIOPORT0+0x05) /* selection/reselection ID */ +#define SCSIID SELID /* SCSI ID */ +#define SCSIDAT (HOSTIOPORT0+0x06) /* SCSI latched data */ +#define SCSIBUS (HOSTIOPORT0+0x07) /* SCSI data bus */ +#define STCNT0 (HOSTIOPORT0+0x08) /* SCSI transfer count 0 */ +#define STCNT1 (HOSTIOPORT0+0x09) /* SCSI transfer count 1 */ +#define STCNT2 (HOSTIOPORT0+0x0a) /* SCSI transfer count 2 */ +#define SSTAT0 (HOSTIOPORT0+0x0b) /* SCSI interrupt status 0 */ +#define SSTAT1 (HOSTIOPORT0+0x0c) /* SCSI interrupt status 1 */ +#define SSTAT2 (HOSTIOPORT0+0x0d) /* SCSI interrupt status 2 */ +#define SCSITEST (HOSTIOPORT0+0x0e) /* SCSI test control */ +#define SSTAT3 SCSITEST /* SCSI interrupt status 3 */ +#define SSTAT4 (HOSTIOPORT0+0x0f) /* SCSI status 4 */ +#define SIMODE0 (HOSTIOPORT1+0x10) /* SCSI interrupt mode 0 */ +#define SIMODE1 (HOSTIOPORT1+0x11) /* SCSI interrupt mode 1 */ +#define DMACNTRL0 (HOSTIOPORT1+0x12) /* DMA control 0 */ +#define DMACNTRL1 (HOSTIOPORT1+0x13) /* DMA control 1 */ +#define DMASTAT (HOSTIOPORT1+0x14) /* DMA status */ +#define FIFOSTAT (HOSTIOPORT1+0x15) /* FIFO status */ +#define DATAPORT (HOSTIOPORT1+0x16) /* DATA port */ +#define BRSTCNTRL (HOSTIOPORT1+0x18) /* burst control */ +#define PORTA (HOSTIOPORT1+0x1a) /* PORT A */ +#define PORTB (HOSTIOPORT1+0x1b) /* PORT B */ +#define REV (HOSTIOPORT1+0x1c) /* revision */ +#define STACK (HOSTIOPORT1+0x1d) /* stack */ +#define TEST (HOSTIOPORT1+0x1e) /* test register */ + +#define IO_RANGE 0x20 + +/* used in aha152x_porttest */ +#define O_PORTA 0x1a /* PORT A */ +#define O_PORTB 0x1b /* PORT B */ +#define O_DMACNTRL1 0x13 /* DMA control 1 */ +#define O_STACK 0x1d /* stack */ + +/* used in tc1550_porttest */ +#define O_TC_PORTA 0x0a /* PORT A */ +#define O_TC_PORTB 0x0b /* PORT B */ +#define O_TC_DMACNTRL1 0x03 /* DMA control 1 */ +#define O_TC_STACK 0x0d /* stack */ + +/* bits and bitmasks to ports */ + +/* SCSI sequence control */ +#define TEMODEO 0x80 +#define ENSELO 0x40 +#define ENSELI 0x20 +#define ENRESELI 0x10 +#define ENAUTOATNO 0x08 +#define ENAUTOATNI 0x04 +#define ENAUTOATNP 0x02 +#define SCSIRSTO 0x01 + +/* SCSI transfer control 0 */ +#define SCSIEN 0x80 +#define DMAEN 0x40 +#define CH1 0x20 +#define CLRSTCNT 0x10 +#define SPIOEN 0x08 +#define CLRCH1 0x02 + +/* SCSI transfer control 1 */ +#define BITBUCKET 0x80 +#define SWRAPEN 0x40 +#define ENSPCHK 0x20 +#define STIMESEL 0x18 /* mask */ +#define STIMESEL_ 3 +#define ENSTIMER 0x04 +#define BYTEALIGN 0x02 + +/* SCSI signal IN */ +#define SIG_CDI 0x80 +#define SIG_IOI 0x40 +#define SIG_MSGI 0x20 +#define SIG_ATNI 0x10 +#define SIG_SELI 0x08 +#define SIG_BSYI 0x04 +#define SIG_REQI 0x02 +#define SIG_ACKI 0x01 + +/* SCSI Phases */ +#define P_MASK (SIG_MSGI|SIG_CDI|SIG_IOI) +#define P_DATAO (0) +#define P_DATAI (SIG_IOI) +#define P_CMD (SIG_CDI) +#define P_STATUS (SIG_CDI|SIG_IOI) +#define P_MSGO (SIG_MSGI|SIG_CDI) +#define P_MSGI (SIG_MSGI|SIG_CDI|SIG_IOI) + +/* SCSI signal OUT */ +#define SIG_CDO 0x80 +#define SIG_IOO 0x40 +#define SIG_MSGO 0x20 +#define SIG_ATNO 0x10 +#define SIG_SELO 0x08 +#define SIG_BSYO 0x04 +#define SIG_REQO 0x02 +#define SIG_ACKO 0x01 + +/* SCSI rate control */ +#define SXFR 0x70 /* mask */ +#define SXFR_ 4 +#define SOFS 0x0f /* mask */ + +/* SCSI ID */ +#define OID 0x70 +#define OID_ 4 +#define TID 0x07 + +/* SCSI transfer count */ +#define GETSTCNT() ( (GETPORT(STCNT2)<<16) \ + + (GETPORT(STCNT1)<< 8) \ + + GETPORT(STCNT0) ) + +#define SETSTCNT(X) { SETPORT(STCNT2, ((X) & 0xFF0000) >> 16); \ + SETPORT(STCNT1, ((X) & 0x00FF00) >> 8); \ + SETPORT(STCNT0, ((X) & 0x0000FF) ); } + +/* SCSI interrupt status */ +#define TARGET 0x80 +#define SELDO 0x40 +#define SELDI 0x20 +#define SELINGO 0x10 +#define SWRAP 0x08 +#define SDONE 0x04 +#define SPIORDY 0x02 +#define DMADONE 0x01 + +#define SETSDONE 0x80 +#define CLRSELDO 0x40 +#define CLRSELDI 0x20 +#define CLRSELINGO 0x10 +#define CLRSWRAP 0x08 +#define CLRSDONE 0x04 +#define CLRSPIORDY 0x02 +#define CLRDMADONE 0x01 + +/* SCSI status 1 */ +#define SELTO 0x80 +#define ATNTARG 0x40 +#define SCSIRSTI 0x20 +#define PHASEMIS 0x10 +#define BUSFREE 0x08 +#define SCSIPERR 0x04 +#define PHASECHG 0x02 +#define REQINIT 0x01 + +#define CLRSELTIMO 0x80 +#define CLRATNO 0x40 +#define CLRSCSIRSTI 0x20 +#define CLRBUSFREE 0x08 +#define CLRSCSIPERR 0x04 +#define CLRPHASECHG 0x02 +#define CLRREQINIT 0x01 + +/* SCSI status 2 */ +#define SOFFSET 0x20 +#define SEMPTY 0x10 +#define SFULL 0x08 +#define SFCNT 0x07 /* mask */ + +/* SCSI status 3 */ +#define SCSICNT 0xf0 /* mask */ +#define SCSICNT_ 4 +#define OFFCNT 0x0f /* mask */ + +/* SCSI TEST control */ +#define SCTESTU 0x08 +#define SCTESTD 0x04 +#define STCTEST 0x01 + +/* SCSI status 4 */ +#define SYNCERR 0x04 +#define FWERR 0x02 +#define FRERR 0x01 + +#define CLRSYNCERR 0x04 +#define CLRFWERR 0x02 +#define CLRFRERR 0x01 + +/* SCSI interrupt mode 0 */ +#define ENSELDO 0x40 +#define ENSELDI 0x20 +#define ENSELINGO 0x10 +#define ENSWRAP 0x08 +#define ENSDONE 0x04 +#define ENSPIORDY 0x02 +#define ENDMADONE 0x01 + +/* SCSI interrupt mode 1 */ +#define ENSELTIMO 0x80 +#define ENATNTARG 0x40 +#define ENSCSIRST 0x20 +#define ENPHASEMIS 0x10 +#define ENBUSFREE 0x08 +#define ENSCSIPERR 0x04 +#define ENPHASECHG 0x02 +#define ENREQINIT 0x01 + +/* DMA control 0 */ +#define ENDMA 0x80 +#define _8BIT 0x40 +#define DMA 0x20 +#define WRITE_READ 0x08 +#define INTEN 0x04 +#define RSTFIFO 0x02 +#define SWINT 0x01 + +/* DMA control 1 */ +#define PWRDWN 0x80 +#define STK 0x07 /* mask */ + +/* DMA status */ +#define ATDONE 0x80 +#define WORDRDY 0x40 +#define INTSTAT 0x20 +#define DFIFOFULL 0x10 +#define DFIFOEMP 0x08 + +/* BURST control */ +#define BON 0xf0 +#define BOFF 0x0f + +/* TEST REGISTER */ +#define BOFFTMR 0x40 +#define BONTMR 0x20 +#define STCNTH 0x10 +#define STCNTM 0x08 +#define STCNTL 0x04 +#define SCSIBLK 0x02 +#define DMABLK 0x01 + +/* On the AHA-152x board PORTA and PORTB contain + some information about the board's configuration. */ +typedef union { + struct { + unsigned reserved:2; /* reserved */ + unsigned tardisc:1; /* Target disconnect: 0=disabled, 1=enabled */ + unsigned syncneg:1; /* Initial sync neg: 0=disabled, 1=enabled */ + unsigned msgclasses:2; /* Message classes + 0=#4 + 1=#0, #1, #2, #3, #4 + 2=#0, #3, #4 + 3=#0, #4 + */ + unsigned boot:1; /* boot: 0=disabled, 1=enabled */ + unsigned dma:1; /* Transfer mode: 0=PIO; 1=DMA */ + unsigned id:3; /* SCSI-id */ + unsigned irq:2; /* IRQ-Channel: 0,3=12, 1=10, 2=11 */ + unsigned dmachan:2; /* DMA-Channel: 0=0, 1=5, 2=6, 3=7 */ + unsigned parity:1; /* SCSI-parity: 1=enabled 0=disabled */ + } fields; + unsigned short port; +} aha152x_config ; + +#define cf_parity fields.parity +#define cf_dmachan fields.dmachan +#define cf_irq fields.irq +#define cf_id fields.id +#define cf_dma fields.dma +#define cf_boot fields.boot +#define cf_msgclasses fields.msgclasses +#define cf_syncneg fields.syncneg +#define cf_tardisc fields.tardisc +#define cf_port port + +/* Some macros to manipulate ports and their bits */ + +#define SETPORT(PORT, VAL) outb( (VAL), (PORT) ) +#define GETPORT(PORT) inb( PORT ) +#define SETBITS(PORT, BITS) outb( (inb(PORT) | (BITS)), (PORT) ) +#define CLRBITS(PORT, BITS) outb( (inb(PORT) & ~(BITS)), (PORT) ) +#define TESTHI(PORT, BITS) ((inb(PORT) & (BITS)) == (BITS)) +#define TESTLO(PORT, BITS) ((inb(PORT) & (BITS)) == 0) + +#define SETRATE(RATE) SETPORT(SCSIRATE,(RATE) & 0x7f) + +#if defined(AHA152X_DEBUG) +enum { + debug_procinfo = 0x0001, + debug_queue = 0x0002, + debug_locking = 0x0004, + debug_intr = 0x0008, + debug_selection = 0x0010, + debug_msgo = 0x0020, + debug_msgi = 0x0040, + debug_status = 0x0080, + debug_cmd = 0x0100, + debug_datai = 0x0200, + debug_datao = 0x0400, + debug_eh = 0x0800, + debug_done = 0x1000, + debug_phases = 0x2000, +}; +#endif + +/* for the pcmcia stub */ +struct aha152x_setup { + int io_port; + int irq; + int scsiid; + int reconnect; + int parity; + int synchronous; + int delay; + int ext_trans; + int tc1550; +#if defined(AHA152X_DEBUG) + int debug; +#endif + char *conf; +}; + +struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *); +void aha152x_release(struct Scsi_Host *); +int aha152x_host_reset_host(struct Scsi_Host *); + +#endif /* _AHA152X_H */ diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c new file mode 100644 index 000000000..9503996c6 --- /dev/null +++ b/drivers/scsi/aha1542.c @@ -0,0 +1,1168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Adaptec AHA-1542 SCSI host adapters + * + * Copyright (C) 1992 Tommy Thorn + * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 2015 Ondrej Zary + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "aha1542.h" + +#define MAXBOARDS 4 + +static bool isapnp = 1; +module_param(isapnp, bool, 0); +MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)"); + +static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 }; +module_param_hw_array(io, int, ioport, NULL, 0); +MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)"); + +/* time AHA spends on the AT-bus during data transfer */ +static int bus_on[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 11us */ +module_param_array(bus_on, int, NULL, 0); +MODULE_PARM_DESC(bus_on, "bus on time [us] (2-15, default=-1 [HW default: 11])"); + +/* time AHA spends off the bus (not to monopolize it) during data transfer */ +static int bus_off[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 4us */ +module_param_array(bus_off, int, NULL, 0); +MODULE_PARM_DESC(bus_off, "bus off time [us] (1-64, default=-1 [HW default: 4])"); + +/* default is jumper selected (J1 on 1542A), factory default = 5 MB/s */ +static int dma_speed[MAXBOARDS] = { -1, -1, -1, -1 }; +module_param_array(dma_speed, int, NULL, 0); +MODULE_PARM_DESC(dma_speed, "DMA speed [MB/s] (5,6,7,8,10, default=-1 [by jumper])"); + +#define BIOS_TRANSLATION_6432 1 /* Default case these days */ +#define BIOS_TRANSLATION_25563 2 /* Big disk case */ + +struct aha1542_hostdata { + /* This will effectively start both of them at the first mailbox */ + int bios_translation; /* Mapping bios uses - for compatibility */ + int aha1542_last_mbi_used; + int aha1542_last_mbo_used; + struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES]; + struct mailbox *mb; + dma_addr_t mb_handle; + struct ccb *ccb; + dma_addr_t ccb_handle; +}; + +#define AHA1542_MAX_SECTORS 16 + +struct aha1542_cmd { + /* bounce buffer */ + void *data_buffer; + dma_addr_t data_buffer_handle; +}; + +static inline void aha1542_intr_reset(u16 base) +{ + outb(IRST, CONTROL(base)); +} + +static inline bool wait_mask(u16 port, u8 mask, u8 allof, u8 noneof, int timeout) +{ + bool delayed = true; + + if (timeout == 0) { + timeout = 3000000; + delayed = false; + } + + while (1) { + u8 bits = inb(port) & mask; + if ((bits & allof) == allof && ((bits & noneof) == 0)) + break; + if (delayed) + mdelay(1); + if (--timeout == 0) + return false; + } + + return true; +} + +static int aha1542_outb(unsigned int base, u8 val) +{ + if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) + return 1; + outb(val, DATA(base)); + + return 0; +} + +static int aha1542_out(unsigned int base, u8 *buf, int len) +{ + while (len--) { + if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) + return 1; + outb(*buf++, DATA(base)); + } + if (!wait_mask(INTRFLAGS(base), INTRMASK, HACC, 0, 0)) + return 1; + + return 0; +} + +/* + * Only used at boot time, so we do not need to worry about latency as much + * here + */ + +static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout) +{ + while (len--) { + if (!wait_mask(STATUS(base), DF, DF, 0, timeout)) + return 1; + *buf++ = inb(DATA(base)); + } + return 0; +} + +static int makecode(unsigned hosterr, unsigned scsierr) +{ + switch (hosterr) { + case 0x0: + case 0xa: /* Linked command complete without error and linked normally */ + case 0xb: /* Linked command complete without error, interrupt generated */ + hosterr = 0; + break; + + case 0x11: /* Selection time out-The initiator selection or target + * reselection was not complete within the SCSI Time out period + */ + hosterr = DID_TIME_OUT; + break; + + case 0x12: /* Data overrun/underrun-The target attempted to transfer more data + * than was allocated by the Data Length field or the sum of the + * Scatter / Gather Data Length fields. + */ + + case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */ + + case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was + * invalid. This usually indicates a software failure. + */ + + case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. + * This usually indicates a software failure. + */ + + case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set + * of linked CCB's does not specify the same logical unit number as + * the first. + */ + case 0x18: /* Invalid Target Direction received from Host-The direction of a + * Target Mode CCB was invalid. + */ + + case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was + * received to service data transfer between the same target LUN + * and initiator SCSI ID in the same direction. + */ + + case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero + * length segment or invalid segment list boundaries was received. + * A CCB parameter was invalid. + */ +#ifdef DEBUG + printk("Aha1542: %x %x\n", hosterr, scsierr); +#endif + hosterr = DID_ERROR; /* Couldn't find any better */ + break; + + case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus + * phase sequence was requested by the target. The host adapter + * will generate a SCSI Reset Condition, notifying the host with + * a SCRD interrupt + */ + hosterr = DID_RESET; + break; + default: + printk(KERN_ERR "aha1542: makecode: unknown hoststatus %x\n", hosterr); + break; + } + return scsierr | (hosterr << 16); +} + +static int aha1542_test_port(struct Scsi_Host *sh) +{ + int i; + + /* Quick and dirty test for presence of the card. */ + if (inb(STATUS(sh->io_port)) == 0xff) + return 0; + + /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */ + + /* In case some other card was probing here, reset interrupts */ + aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ + + outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port)); + + mdelay(20); /* Wait a little bit for things to settle down. */ + + /* Expect INIT and IDLE, any of the others are bad */ + if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) + return 0; + + /* Shouldn't have generated any interrupts during reset */ + if (inb(INTRFLAGS(sh->io_port)) & INTRMASK) + return 0; + + /* + * Perform a host adapter inquiry instead so we do not need to set + * up the mailboxes ahead of time + */ + + aha1542_outb(sh->io_port, CMD_INQUIRY); + + for (i = 0; i < 4; i++) { + if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0)) + return 0; + (void)inb(DATA(sh->io_port)); + } + + /* Reading port should reset DF */ + if (inb(STATUS(sh->io_port)) & DF) + return 0; + + /* When HACC, command is completed, and we're though testing */ + if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0)) + return 0; + + /* Clear interrupts */ + outb(IRST, CONTROL(sh->io_port)); + + return 1; +} + +static void aha1542_free_cmd(struct scsi_cmnd *cmd) +{ + struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); + + if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + struct request *rq = scsi_cmd_to_rq(cmd); + void *buf = acmd->data_buffer; + struct req_iterator iter; + struct bio_vec bv; + + rq_for_each_segment(bv, rq, iter) { + memcpy_to_bvec(&bv, buf); + buf += bv.bv_len; + } + } + + scsi_dma_unmap(cmd); +} + +static irqreturn_t aha1542_interrupt(int irq, void *dev_id) +{ + struct Scsi_Host *sh = dev_id; + struct aha1542_hostdata *aha1542 = shost_priv(sh); + int errstatus, mbi, mbo, mbistatus; + int number_serviced; + unsigned long flags; + struct scsi_cmnd *tmp_cmd; + int flag; + struct mailbox *mb = aha1542->mb; + struct ccb *ccb = aha1542->ccb; + +#ifdef DEBUG + { + flag = inb(INTRFLAGS(sh->io_port)); + shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: "); + if (!(flag & ANYINTR)) + printk("no interrupt?"); + if (flag & MBIF) + printk("MBIF "); + if (flag & MBOA) + printk("MBOF "); + if (flag & HACC) + printk("HACC "); + if (flag & SCRD) + printk("SCRD "); + printk("status %02x\n", inb(STATUS(sh->io_port))); + } +#endif + number_serviced = 0; + + spin_lock_irqsave(sh->host_lock, flags); + while (1) { + flag = inb(INTRFLAGS(sh->io_port)); + + /* + * Check for unusual interrupts. If any of these happen, we should + * probably do something special, but for now just printing a message + * is sufficient. A SCSI reset detected is something that we really + * need to deal with in some way. + */ + if (flag & ~MBIF) { + if (flag & MBOA) + printk("MBOF "); + if (flag & HACC) + printk("HACC "); + if (flag & SCRD) + printk("SCRD "); + } + aha1542_intr_reset(sh->io_port); + + mbi = aha1542->aha1542_last_mbi_used + 1; + if (mbi >= 2 * AHA1542_MAILBOXES) + mbi = AHA1542_MAILBOXES; + + do { + if (mb[mbi].status != 0) + break; + mbi++; + if (mbi >= 2 * AHA1542_MAILBOXES) + mbi = AHA1542_MAILBOXES; + } while (mbi != aha1542->aha1542_last_mbi_used); + + if (mb[mbi].status == 0) { + spin_unlock_irqrestore(sh->host_lock, flags); + /* Hmm, no mail. Must have read it the last time around */ + if (!number_serviced) + shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n"); + return IRQ_HANDLED; + } + + mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb); + mbistatus = mb[mbi].status; + mb[mbi].status = 0; + aha1542->aha1542_last_mbi_used = mbi; + +#ifdef DEBUG + if (ccb[mbo].tarstat | ccb[mbo].hastat) + shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n", + ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status); +#endif + + if (mbistatus == 3) + continue; /* Aborted command not found */ + +#ifdef DEBUG + shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi); +#endif + + tmp_cmd = aha1542->int_cmds[mbo]; + + if (!tmp_cmd) { + spin_unlock_irqrestore(sh->host_lock, flags); + shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); + shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, + ccb[mbo].hastat, ccb[mbo].idlun, mbo); + return IRQ_HANDLED; + } + aha1542_free_cmd(tmp_cmd); + /* + * Fetch the sense data, and tuck it away, in the required slot. The + * Adaptec automatically fetches it, and there is no guarantee that + * we will still have it in the cdb when we come back + */ + if (ccb[mbo].tarstat == 2) + memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen], + SCSI_SENSE_BUFFERSIZE); + + + /* is there mail :-) */ + + /* more error checking left out here */ + if (mbistatus != 1) + /* This is surely wrong, but I don't know what's right */ + errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat); + else + errstatus = 0; + +#ifdef DEBUG + if (errstatus) + shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus, + ccb[mbo].hastat, ccb[mbo].tarstat); + if (ccb[mbo].tarstat == 2) + print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12); + if (errstatus) + printk("aha1542_intr_handle: returning %6x\n", errstatus); +#endif + tmp_cmd->result = errstatus; + aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as + * far as queuecommand is concerned + */ + scsi_done(tmp_cmd); + number_serviced++; + } +} + +static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) +{ + struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); + struct aha1542_hostdata *aha1542 = shost_priv(sh); + u8 direction; + u8 target = cmd->device->id; + u8 lun = cmd->device->lun; + unsigned long flags; + int bufflen = scsi_bufflen(cmd); + int mbo; + struct mailbox *mb = aha1542->mb; + struct ccb *ccb = aha1542->ccb; + + if (*cmd->cmnd == REQUEST_SENSE) { + /* Don't do the command - we have the sense data already */ + cmd->result = 0; + scsi_done(cmd); + return 0; + } +#ifdef DEBUG + { + int i = -1; + if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10) + i = xscsi2int(cmd->cmnd + 2); + else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6) + i = scsi2int(cmd->cmnd + 2); + shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d", + target, *cmd->cmnd, i, bufflen); + print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); + } +#endif + + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + struct request *rq = scsi_cmd_to_rq(cmd); + void *buf = acmd->data_buffer; + struct req_iterator iter; + struct bio_vec bv; + + rq_for_each_segment(bv, rq, iter) { + memcpy_from_bvec(buf, &bv); + buf += bv.bv_len; + } + } + + /* + * Use the outgoing mailboxes in a round-robin fashion, because this + * is how the host adapter will scan for them + */ + + spin_lock_irqsave(sh->host_lock, flags); + mbo = aha1542->aha1542_last_mbo_used + 1; + if (mbo >= AHA1542_MAILBOXES) + mbo = 0; + + do { + if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) + break; + mbo++; + if (mbo >= AHA1542_MAILBOXES) + mbo = 0; + } while (mbo != aha1542->aha1542_last_mbo_used); + + if (mb[mbo].status || aha1542->int_cmds[mbo]) + panic("Unable to find empty mailbox for aha1542.\n"); + + aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from + * screwing with this cdb. + */ + + aha1542->aha1542_last_mbo_used = mbo; + +#ifdef DEBUG + shost_printk(KERN_DEBUG, sh, "Sending command (%d)...", mbo); +#endif + + /* This gets trashed for some reason */ + any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb)); + + memset(&ccb[mbo], 0, sizeof(struct ccb)); + + ccb[mbo].cdblen = cmd->cmd_len; + + direction = 0; + if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6) + direction = 8; + else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6) + direction = 16; + + memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen); + ccb[mbo].op = 0; /* SCSI Initiator Command */ + any2scsi(ccb[mbo].datalen, bufflen); + if (bufflen) + any2scsi(ccb[mbo].dataptr, acmd->data_buffer_handle); + else + any2scsi(ccb[mbo].dataptr, 0); + ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */ + ccb[mbo].rsalen = 16; + ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0; + ccb[mbo].commlinkid = 0; + +#ifdef DEBUG + print_hex_dump_bytes("sending: ", DUMP_PREFIX_NONE, &ccb[mbo], sizeof(ccb[mbo]) - 10); + printk("aha1542_queuecommand: now waiting for interrupt "); +#endif + mb[mbo].status = 1; + aha1542_outb(cmd->device->host->io_port, CMD_START_SCSI); + spin_unlock_irqrestore(sh->host_lock, flags); + + return 0; +} + +/* Initialize mailboxes */ +static void setup_mailboxes(struct Scsi_Host *sh) +{ + struct aha1542_hostdata *aha1542 = shost_priv(sh); + u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0}; + int i; + + for (i = 0; i < AHA1542_MAILBOXES; i++) { + aha1542->mb[i].status = 0; + any2scsi(aha1542->mb[i].ccbptr, + aha1542->ccb_handle + i * sizeof(struct ccb)); + aha1542->mb[AHA1542_MAILBOXES + i].status = 0; + } + aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ + any2scsi(mb_cmd + 2, aha1542->mb_handle); + if (aha1542_out(sh->io_port, mb_cmd, 5)) + shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); + aha1542_intr_reset(sh->io_port); +} + +static int aha1542_getconfig(struct Scsi_Host *sh) +{ + u8 inquiry_result[3]; + int i; + i = inb(STATUS(sh->io_port)); + if (i & DF) { + i = inb(DATA(sh->io_port)); + } + aha1542_outb(sh->io_port, CMD_RETCONF); + aha1542_in(sh->io_port, inquiry_result, 3, 0); + if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) + shost_printk(KERN_ERR, sh, "error querying board settings\n"); + aha1542_intr_reset(sh->io_port); + switch (inquiry_result[0]) { + case 0x80: + sh->dma_channel = 7; + break; + case 0x40: + sh->dma_channel = 6; + break; + case 0x20: + sh->dma_channel = 5; + break; + case 0x01: + sh->dma_channel = 0; + break; + case 0: + /* + * This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel. + * Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. + */ + sh->dma_channel = 0xFF; + break; + default: + shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n"); + return -1; + } + switch (inquiry_result[1]) { + case 0x40: + sh->irq = 15; + break; + case 0x20: + sh->irq = 14; + break; + case 0x8: + sh->irq = 12; + break; + case 0x4: + sh->irq = 11; + break; + case 0x2: + sh->irq = 10; + break; + case 0x1: + sh->irq = 9; + break; + default: + shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n"); + return -1; + } + sh->this_id = inquiry_result[2] & 7; + return 0; +} + +/* + * This function should only be called for 1542C boards - we can detect + * the special firmware settings and unlock the board + */ + +static int aha1542_mbenable(struct Scsi_Host *sh) +{ + static u8 mbenable_cmd[3]; + static u8 mbenable_result[2]; + int retval; + + retval = BIOS_TRANSLATION_6432; + + aha1542_outb(sh->io_port, CMD_EXTBIOS); + if (aha1542_in(sh->io_port, mbenable_result, 2, 100)) + return retval; + if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100)) + goto fail; + aha1542_intr_reset(sh->io_port); + + if ((mbenable_result[0] & 0x08) || mbenable_result[1]) { + mbenable_cmd[0] = CMD_MBENABLE; + mbenable_cmd[1] = 0; + mbenable_cmd[2] = mbenable_result[1]; + + if ((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03)) + retval = BIOS_TRANSLATION_25563; + + if (aha1542_out(sh->io_port, mbenable_cmd, 3)) + goto fail; + } + while (0) { +fail: + shost_printk(KERN_ERR, sh, "Mailbox init failed\n"); + } + aha1542_intr_reset(sh->io_port); + return retval; +} + +/* Query the board to find out if it is a 1542 or a 1740, or whatever. */ +static int aha1542_query(struct Scsi_Host *sh) +{ + struct aha1542_hostdata *aha1542 = shost_priv(sh); + u8 inquiry_result[4]; + int i; + i = inb(STATUS(sh->io_port)); + if (i & DF) { + i = inb(DATA(sh->io_port)); + } + aha1542_outb(sh->io_port, CMD_INQUIRY); + aha1542_in(sh->io_port, inquiry_result, 4, 0); + if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) + shost_printk(KERN_ERR, sh, "error querying card type\n"); + aha1542_intr_reset(sh->io_port); + + aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */ + + /* + * For an AHA1740 series board, we ignore the board since there is a + * hardware bug which can lead to wrong blocks being returned if the board + * is operating in the 1542 emulation mode. Since there is an extended mode + * driver, we simply ignore the board and let the 1740 driver pick it up. + */ + + if (inquiry_result[0] == 0x43) { + shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n"); + return 1; + } + + /* + * Always call this - boards that do not support extended bios translation + * will ignore the command, and we will set the proper default + */ + + aha1542->bios_translation = aha1542_mbenable(sh); + + return 0; +} + +static u8 dma_speed_hw(int dma_speed) +{ + switch (dma_speed) { + case 5: + return 0x00; + case 6: + return 0x04; + case 7: + return 0x01; + case 8: + return 0x02; + case 10: + return 0x03; + } + + return 0xff; /* invalid */ +} + +/* Set the Bus on/off-times as not to ruin floppy performance */ +static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed) +{ + if (bus_on > 0) { + u8 oncmd[] = { CMD_BUSON_TIME, clamp(bus_on, 2, 15) }; + + aha1542_intr_reset(sh->io_port); + if (aha1542_out(sh->io_port, oncmd, 2)) + goto fail; + } + + if (bus_off > 0) { + u8 offcmd[] = { CMD_BUSOFF_TIME, clamp(bus_off, 1, 64) }; + + aha1542_intr_reset(sh->io_port); + if (aha1542_out(sh->io_port, offcmd, 2)) + goto fail; + } + + if (dma_speed_hw(dma_speed) != 0xff) { + u8 dmacmd[] = { CMD_DMASPEED, dma_speed_hw(dma_speed) }; + + aha1542_intr_reset(sh->io_port); + if (aha1542_out(sh->io_port, dmacmd, 2)) + goto fail; + } + aha1542_intr_reset(sh->io_port); + return; +fail: + shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n"); + aha1542_intr_reset(sh->io_port); +} + +/* return non-zero on detection */ +static struct Scsi_Host *aha1542_hw_init(const struct scsi_host_template *tpnt, + struct device *pdev, int indx) +{ + unsigned int base_io = io[indx]; + struct Scsi_Host *sh; + struct aha1542_hostdata *aha1542; + char dma_info[] = "no DMA"; + + if (base_io == 0) + return NULL; + + if (!request_region(base_io, AHA1542_REGION_SIZE, "aha1542")) + return NULL; + + sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata)); + if (!sh) + goto release; + aha1542 = shost_priv(sh); + + sh->unique_id = base_io; + sh->io_port = base_io; + sh->n_io_port = AHA1542_REGION_SIZE; + aha1542->aha1542_last_mbi_used = 2 * AHA1542_MAILBOXES - 1; + aha1542->aha1542_last_mbo_used = AHA1542_MAILBOXES - 1; + + if (!aha1542_test_port(sh)) + goto unregister; + + aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]); + if (aha1542_query(sh)) + goto unregister; + if (aha1542_getconfig(sh) == -1) + goto unregister; + + if (sh->dma_channel != 0xFF) + snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel); + shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n", + sh->this_id, base_io, sh->irq, dma_info); + if (aha1542->bios_translation == BIOS_TRANSLATION_25563) + shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); + + if (dma_set_mask_and_coherent(pdev, DMA_BIT_MASK(24)) < 0) + goto unregister; + + aha1542->mb = dma_alloc_coherent(pdev, + AHA1542_MAILBOXES * 2 * sizeof(struct mailbox), + &aha1542->mb_handle, GFP_KERNEL); + if (!aha1542->mb) + goto unregister; + + aha1542->ccb = dma_alloc_coherent(pdev, + AHA1542_MAILBOXES * sizeof(struct ccb), + &aha1542->ccb_handle, GFP_KERNEL); + if (!aha1542->ccb) + goto free_mb; + + setup_mailboxes(sh); + + if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { + shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); + goto free_ccb; + } + if (sh->dma_channel != 0xFF) { + if (request_dma(sh->dma_channel, "aha1542")) { + shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n"); + goto free_irq; + } + if (sh->dma_channel == 0 || sh->dma_channel >= 5) { + set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE); + enable_dma(sh->dma_channel); + } + } + + if (scsi_add_host(sh, pdev)) + goto free_dma; + + scsi_scan_host(sh); + + return sh; + +free_dma: + if (sh->dma_channel != 0xff) + free_dma(sh->dma_channel); +free_irq: + free_irq(sh->irq, sh); +free_ccb: + dma_free_coherent(pdev, AHA1542_MAILBOXES * sizeof(struct ccb), + aha1542->ccb, aha1542->ccb_handle); +free_mb: + dma_free_coherent(pdev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox), + aha1542->mb, aha1542->mb_handle); +unregister: + scsi_host_put(sh); +release: + release_region(base_io, AHA1542_REGION_SIZE); + + return NULL; +} + +static int aha1542_release(struct Scsi_Host *sh) +{ + struct aha1542_hostdata *aha1542 = shost_priv(sh); + struct device *dev = sh->dma_dev; + + scsi_remove_host(sh); + if (sh->dma_channel != 0xff) + free_dma(sh->dma_channel); + dma_free_coherent(dev, AHA1542_MAILBOXES * sizeof(struct ccb), + aha1542->ccb, aha1542->ccb_handle); + dma_free_coherent(dev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox), + aha1542->mb, aha1542->mb_handle); + if (sh->irq) + free_irq(sh->irq, sh); + if (sh->io_port && sh->n_io_port) + release_region(sh->io_port, sh->n_io_port); + scsi_host_put(sh); + return 0; +} + + +/* + * This is a device reset. This is handled by sending a special command + * to the device. + */ +static int aha1542_dev_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *sh = cmd->device->host; + struct aha1542_hostdata *aha1542 = shost_priv(sh); + unsigned long flags; + struct mailbox *mb = aha1542->mb; + u8 target = cmd->device->id; + u8 lun = cmd->device->lun; + int mbo; + struct ccb *ccb = aha1542->ccb; + + spin_lock_irqsave(sh->host_lock, flags); + mbo = aha1542->aha1542_last_mbo_used + 1; + if (mbo >= AHA1542_MAILBOXES) + mbo = 0; + + do { + if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) + break; + mbo++; + if (mbo >= AHA1542_MAILBOXES) + mbo = 0; + } while (mbo != aha1542->aha1542_last_mbo_used); + + if (mb[mbo].status || aha1542->int_cmds[mbo]) + panic("Unable to find empty mailbox for aha1542.\n"); + + aha1542->int_cmds[mbo] = cmd; /* This will effectively + * prevent someone else from + * screwing with this cdb. + */ + + aha1542->aha1542_last_mbo_used = mbo; + + /* This gets trashed for some reason */ + any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb)); + + memset(&ccb[mbo], 0, sizeof(struct ccb)); + + ccb[mbo].op = 0x81; /* BUS DEVICE RESET */ + + ccb[mbo].idlun = (target & 7) << 5 | (lun & 7); /*SCSI Target Id */ + + ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0; + ccb[mbo].commlinkid = 0; + + /* + * Now tell the 1542 to flush all pending commands for this + * target + */ + aha1542_outb(sh->io_port, CMD_START_SCSI); + spin_unlock_irqrestore(sh->host_lock, flags); + + scmd_printk(KERN_WARNING, cmd, + "Trying device reset for target\n"); + + return SUCCESS; +} + +static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd) +{ + struct Scsi_Host *sh = cmd->device->host; + struct aha1542_hostdata *aha1542 = shost_priv(sh); + unsigned long flags; + int i; + + spin_lock_irqsave(sh->host_lock, flags); + /* + * This does a scsi reset for all devices on the bus. + * In principle, we could also reset the 1542 - should + * we do this? Try this first, and we can add that later + * if it turns out to be useful. + */ + outb(reset_cmd, CONTROL(cmd->device->host->io_port)); + + if (!wait_mask(STATUS(cmd->device->host->io_port), + STATMASK, IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) { + spin_unlock_irqrestore(sh->host_lock, flags); + return FAILED; + } + + /* + * We need to do this too before the 1542 can interact with + * us again after host reset. + */ + if (reset_cmd & HRST) + setup_mailboxes(cmd->device->host); + + /* + * Now try to pick up the pieces. For all pending commands, + * free any internal data structures, and basically clear things + * out. We do not try and restart any commands or anything - + * the strategy handler takes care of that crap. + */ + shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no); + + for (i = 0; i < AHA1542_MAILBOXES; i++) { + if (aha1542->int_cmds[i] != NULL) { + struct scsi_cmnd *tmp_cmd; + tmp_cmd = aha1542->int_cmds[i]; + + if (tmp_cmd->device->soft_reset) { + /* + * If this device implements the soft reset option, + * then it is still holding onto the command, and + * may yet complete it. In this case, we don't + * flush the data. + */ + continue; + } + aha1542_free_cmd(tmp_cmd); + aha1542->int_cmds[i] = NULL; + aha1542->mb[i].status = 0; + } + } + + spin_unlock_irqrestore(sh->host_lock, flags); + return SUCCESS; +} + +static int aha1542_bus_reset(struct scsi_cmnd *cmd) +{ + return aha1542_reset(cmd, SCRST); +} + +static int aha1542_host_reset(struct scsi_cmnd *cmd) +{ + return aha1542_reset(cmd, HRST | SCRST); +} + +static int aha1542_biosparam(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int geom[]) +{ + struct aha1542_hostdata *aha1542 = shost_priv(sdev->host); + + if (capacity >= 0x200000 && + aha1542->bios_translation == BIOS_TRANSLATION_25563) { + /* Please verify that this is the same as what DOS returns */ + geom[0] = 255; /* heads */ + geom[1] = 63; /* sectors */ + } else { + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ + } + geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ + + return 0; +} +MODULE_LICENSE("GPL"); + +static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) +{ + struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); + + acmd->data_buffer = dma_alloc_coherent(shost->dma_dev, + SECTOR_SIZE * AHA1542_MAX_SECTORS, + &acmd->data_buffer_handle, GFP_KERNEL); + if (!acmd->data_buffer) + return -ENOMEM; + return 0; +} + +static int aha1542_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) +{ + struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); + + dma_free_coherent(shost->dma_dev, SECTOR_SIZE * AHA1542_MAX_SECTORS, + acmd->data_buffer, acmd->data_buffer_handle); + return 0; +} + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .proc_name = "aha1542", + .name = "Adaptec 1542", + .cmd_size = sizeof(struct aha1542_cmd), + .queuecommand = aha1542_queuecommand, + .eh_device_reset_handler= aha1542_dev_reset, + .eh_bus_reset_handler = aha1542_bus_reset, + .eh_host_reset_handler = aha1542_host_reset, + .bios_param = aha1542_biosparam, + .init_cmd_priv = aha1542_init_cmd_priv, + .exit_cmd_priv = aha1542_exit_cmd_priv, + .can_queue = AHA1542_MAILBOXES, + .this_id = 7, + .max_sectors = AHA1542_MAX_SECTORS, + .sg_tablesize = SG_ALL, +}; + +static int aha1542_isa_match(struct device *pdev, unsigned int ndev) +{ + struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev); + + if (!sh) + return 0; + + dev_set_drvdata(pdev, sh); + return 1; +} + +static void aha1542_isa_remove(struct device *pdev, + unsigned int ndev) +{ + aha1542_release(dev_get_drvdata(pdev)); + dev_set_drvdata(pdev, NULL); +} + +static struct isa_driver aha1542_isa_driver = { + .match = aha1542_isa_match, + .remove = aha1542_isa_remove, + .driver = { + .name = "aha1542" + }, +}; +static int isa_registered; + +#ifdef CONFIG_PNP +static const struct pnp_device_id aha1542_pnp_ids[] = { + { .id = "ADP1542" }, + { .id = "" } +}; +MODULE_DEVICE_TABLE(pnp, aha1542_pnp_ids); + +static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) +{ + int indx; + struct Scsi_Host *sh; + + for (indx = 0; indx < ARRAY_SIZE(io); indx++) { + if (io[indx]) + continue; + + if (pnp_activate_dev(pdev) < 0) + continue; + + io[indx] = pnp_port_start(pdev, 0); + + /* + * The card can be queried for its DMA, we have + * the DMA set up that is enough + */ + + dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]); + } + + sh = aha1542_hw_init(&driver_template, &pdev->dev, indx); + if (!sh) + return -ENODEV; + + pnp_set_drvdata(pdev, sh); + return 0; +} + +static void aha1542_pnp_remove(struct pnp_dev *pdev) +{ + aha1542_release(pnp_get_drvdata(pdev)); + pnp_set_drvdata(pdev, NULL); +} + +static struct pnp_driver aha1542_pnp_driver = { + .name = "aha1542", + .id_table = aha1542_pnp_ids, + .probe = aha1542_pnp_probe, + .remove = aha1542_pnp_remove, +}; +static int pnp_registered; +#endif /* CONFIG_PNP */ + +static int __init aha1542_init(void) +{ + int ret = 0; + +#ifdef CONFIG_PNP + if (isapnp) { + ret = pnp_register_driver(&aha1542_pnp_driver); + if (!ret) + pnp_registered = 1; + } +#endif + ret = isa_register_driver(&aha1542_isa_driver, MAXBOARDS); + if (!ret) + isa_registered = 1; + +#ifdef CONFIG_PNP + if (pnp_registered) + ret = 0; +#endif + if (isa_registered) + ret = 0; + + return ret; +} + +static void __exit aha1542_exit(void) +{ +#ifdef CONFIG_PNP + if (pnp_registered) + pnp_unregister_driver(&aha1542_pnp_driver); +#endif + if (isa_registered) + isa_unregister_driver(&aha1542_isa_driver); +} + +module_init(aha1542_init); +module_exit(aha1542_exit); diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h new file mode 100644 index 000000000..92a5f9896 --- /dev/null +++ b/drivers/scsi/aha1542.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _AHA1542_H_ +#define _AHA1542_H_ + +#include + +/* I/O Port interface 4.2 */ +/* READ */ +#define STATUS(base) base +#define STST BIT(7) /* Self Test in Progress */ +#define DIAGF BIT(6) /* Internal Diagnostic Failure */ +#define INIT BIT(5) /* Mailbox Initialization Required */ +#define IDLE BIT(4) /* SCSI Host Adapter Idle */ +#define CDF BIT(3) /* Command/Data Out Port Full */ +#define DF BIT(2) /* Data In Port Full */ +/* BIT(1) is reserved */ +#define INVDCMD BIT(0) /* Invalid H A Command */ +#define STATMASK (STST | DIAGF | INIT | IDLE | CDF | DF | INVDCMD) + +#define INTRFLAGS(base) (STATUS(base)+2) +#define ANYINTR BIT(7) /* Any Interrupt */ +#define SCRD BIT(3) /* SCSI Reset Detected */ +#define HACC BIT(2) /* HA Command Complete */ +#define MBOA BIT(1) /* MBO Empty */ +#define MBIF BIT(0) /* MBI Full */ +#define INTRMASK (ANYINTR | SCRD | HACC | MBOA | MBIF) + +/* WRITE */ +#define CONTROL(base) STATUS(base) +#define HRST BIT(7) /* Hard Reset */ +#define SRST BIT(6) /* Soft Reset */ +#define IRST BIT(5) /* Interrupt Reset */ +#define SCRST BIT(4) /* SCSI Bus Reset */ + +/* READ/WRITE */ +#define DATA(base) (STATUS(base)+1) +#define CMD_NOP 0x00 /* No Operation */ +#define CMD_MBINIT 0x01 /* Mailbox Initialization */ +#define CMD_START_SCSI 0x02 /* Start SCSI Command */ +#define CMD_INQUIRY 0x04 /* Adapter Inquiry */ +#define CMD_EMBOI 0x05 /* Enable MailBox Out Interrupt */ +#define CMD_BUSON_TIME 0x07 /* Set Bus-On Time */ +#define CMD_BUSOFF_TIME 0x08 /* Set Bus-Off Time */ +#define CMD_DMASPEED 0x09 /* Set AT Bus Transfer Speed */ +#define CMD_RETDEVS 0x0a /* Return Installed Devices */ +#define CMD_RETCONF 0x0b /* Return Configuration Data */ +#define CMD_RETSETUP 0x0d /* Return Setup Data */ +#define CMD_ECHO 0x1f /* ECHO Command Data */ + +#define CMD_EXTBIOS 0x28 /* Return extend bios information only 1542C */ +#define CMD_MBENABLE 0x29 /* Set Mailbox Interface enable only 1542C */ + +/* Mailbox Definition 5.2.1 and 5.2.2 */ +struct mailbox { + u8 status; /* Command/Status */ + u8 ccbptr[3]; /* msb, .., lsb */ +}; + +/* This is used with scatter-gather */ +struct chain { + u8 datalen[3]; /* Size of this part of chain */ + u8 dataptr[3]; /* Location of data */ +}; + +/* These belong in scsi.h also */ +static inline void any2scsi(u8 *p, u32 v) +{ + p[0] = v >> 16; + p[1] = v >> 8; + p[2] = v; +} + +#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) ) + +#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \ + + (((long)(up)[2]) << 8) + ((long)(up)[3]) ) + +#define MAX_CDB 12 +#define MAX_SENSE 14 + +/* Command Control Block (CCB), 5.3 */ +struct ccb { + u8 op; /* Command Control Block Operation Code: */ + /* 0x00: SCSI Initiator CCB, 0x01: SCSI Target CCB, */ + /* 0x02: SCSI Initiator CCB with Scatter/Gather, */ + /* 0x81: SCSI Bus Device Reset CCB */ + u8 idlun; /* Address and Direction Control: */ + /* Bits 7-5: op=0, 2: Target ID, op=1: Initiator ID */ + /* Bit 4: Outbound data transfer, length is checked */ + /* Bit 3: Inbound data transfer, length is checked */ + /* Bits 2-0: Logical Unit Number */ + u8 cdblen; /* SCSI Command Length */ + u8 rsalen; /* Request Sense Allocation Length/Disable Auto Sense */ + u8 datalen[3]; /* Data Length (MSB, ..., LSB) */ + u8 dataptr[3]; /* Data Pointer (MSB, ..., LSB) */ + u8 linkptr[3]; /* Link Pointer (MSB, ..., LSB) */ + u8 commlinkid; /* Command Linking Identifier */ + u8 hastat; /* Host Adapter Status (HASTAT) */ + u8 tarstat; /* Target Device Status (TARSTAT) */ + u8 reserved[2]; + u8 cdb[MAX_CDB + MAX_SENSE]; /* SCSI Command Descriptor Block */ + /* followed by the Auto Sense data */ +}; + +#define AHA1542_REGION_SIZE 4 +#define AHA1542_MAILBOXES 8 + +#endif /* _AHA1542_H_ */ diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c new file mode 100644 index 000000000..3d18945ab --- /dev/null +++ b/drivers/scsi/aha1740.c @@ -0,0 +1,684 @@ +/* $Id$ + * 1993/03/31 + * linux/kernel/aha1740.c + * + * Based loosely on aha1542.c which is + * Copyright (C) 1992 Tommy Thorn and + * Modified by Eric Youngdale + * + * This file is aha1740.c, written and + * Copyright (C) 1992,1993 Brad McLean + * brad@saturn.gaylord.com or brad@bradpc.gaylord.com. + * + * Modifications to makecode and queuecommand + * for proper handling of multiple devices courteously + * provided by Michael Weller, March, 1993 + * + * Multiple adapter support, extended translation detection, + * update to current scsi subsystem changes, proc fs support, + * working (!) module support based on patches from Andreas Arens, + * by Andreas Degert , 2/1997 + * + * aha1740_makecode may still need even more work + * if it doesn't work for your devices, take a look. + * + * Reworked for new_eh and new locking by Alan Cox + * + * Converted to EISA and generic DMA APIs by Marc Zyngier + * , 4/2003. + * + * Shared interrupt support added by Rask Ingemann Lambertsen + * , 10/2003 + * + * For the avoidance of doubt the "preferred form" of this code is one which + * is in an open non patent encumbered format. Where cryptographic key signing + * forms part of the process of creating an executable the information + * including keys needed to generate an equivalently functional executable + * are deemed to be part of the source code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include "aha1740.h" + +/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH + IT WORK, THEN: +#define DEBUG +*/ +#ifdef DEBUG +#define DEB(x) x +#else +#define DEB(x) +#endif + +struct aha1740_hostdata { + struct eisa_device *edev; + unsigned int translation; + unsigned int last_ecb_used; + dma_addr_t ecb_dma_addr; + struct ecb ecb[AHA1740_ECBS]; +}; + +struct aha1740_sg { + struct aha1740_chain sg_chain[AHA1740_SCATTER]; + dma_addr_t sg_dma_addr; + dma_addr_t buf_dma_addr; +}; + +#define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata) + +static inline struct ecb *ecb_dma_to_cpu (struct Scsi_Host *host, + dma_addr_t dma) +{ + struct aha1740_hostdata *hdata = HOSTDATA (host); + dma_addr_t offset; + + offset = dma - hdata->ecb_dma_addr; + + return (struct ecb *)(((char *) hdata->ecb) + (unsigned int) offset); +} + +static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu) +{ + struct aha1740_hostdata *hdata = HOSTDATA (host); + dma_addr_t offset; + + offset = (char *) cpu - (char *) hdata->ecb; + + return hdata->ecb_dma_addr + offset; +} + +static int aha1740_show_info(struct seq_file *m, struct Scsi_Host *shpnt) +{ + struct aha1740_hostdata *host = HOSTDATA(shpnt); + seq_printf(m, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n" + "Extended translation %sabled.\n", + shpnt->io_port, shpnt->irq, host->edev->slot, + host->translation ? "en" : "dis"); + return 0; +} + +static int aha1740_makecode(unchar *sense, unchar *status) +{ + struct statusword + { + ushort don:1, /* Command Done - No Error */ + du:1, /* Data underrun */ + :1, qf:1, /* Queue full */ + sc:1, /* Specification Check */ + dor:1, /* Data overrun */ + ch:1, /* Chaining Halted */ + intr:1, /* Interrupt issued */ + asa:1, /* Additional Status Available */ + sns:1, /* Sense information Stored */ + :1, ini:1, /* Initialization Required */ + me:1, /* Major error or exception */ + :1, eca:1, /* Extended Contingent alliance */ + :1; + } status_word; + int retval = DID_OK; + + status_word = * (struct statusword *) status; +#ifdef DEBUG + printk("makecode from %x,%x,%x,%x %x,%x,%x,%x", + status[0], status[1], status[2], status[3], + sense[0], sense[1], sense[2], sense[3]); +#endif + if (!status_word.don) { /* Anything abnormal was detected */ + if ( (status[1]&0x18) || status_word.sc ) { + /*Additional info available*/ + /* Use the supplied info for further diagnostics */ + switch ( status[2] ) { + case 0x12: + if ( status_word.dor ) + retval=DID_ERROR; /* It's an Overrun */ + /* If not overrun, assume underrun and + * ignore it! */ + break; + case 0x00: /* No info, assume no error, should + * not occur */ + break; + case 0x11: + case 0x21: + retval=DID_TIME_OUT; + break; + case 0x0a: + retval=DID_BAD_TARGET; + break; + case 0x04: + case 0x05: + retval=DID_ABORT; + /* Either by this driver or the + * AHA1740 itself */ + break; + default: + retval=DID_ERROR; /* No further + * diagnostics + * possible */ + } + } else { + /* Michael suggests, and Brad concurs: */ + if ( status_word.qf ) { + retval = DID_TIME_OUT; /* forces a redo */ + /* I think this specific one should + * not happen -Brad */ + printk("aha1740.c: WARNING: AHA1740 queue overflow!\n"); + } else + if ( status[0]&0x60 ) { + /* Didn't find a better error */ + retval = DID_ERROR; + } + /* In any other case return DID_OK so for example + CONDITION_CHECKS make it through to the appropriate + device driver */ + } + } + /* Under all circumstances supply the target status -Michael */ + return status[3] | retval << 16; +} + +static int aha1740_test_port(unsigned int base) +{ + if ( inb(PORTADR(base)) & PORTADDR_ENH ) + return 1; /* Okay, we're all set */ + + printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n"); + return 0; +} + +/* A "high" level interrupt handler */ +static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) +{ + struct Scsi_Host *host = (struct Scsi_Host *) dev_id; + void (*my_done)(struct scsi_cmnd *); + int errstatus, adapstat; + int number_serviced; + struct ecb *ecbptr; + struct scsi_cmnd *SCtmp; + unsigned int base; + unsigned long flags; + int handled = 0; + struct aha1740_sg *sgptr; + struct eisa_device *edev; + + if (!host) + panic("aha1740.c: Irq from unknown host!\n"); + spin_lock_irqsave(host->host_lock, flags); + base = host->io_port; + number_serviced = 0; + edev = HOSTDATA(host)->edev; + + while(inb(G2STAT(base)) & G2STAT_INTPEND) { + handled = 1; + DEB(printk("aha1740_intr top of loop.\n")); + adapstat = inb(G2INTST(base)); + ecbptr = ecb_dma_to_cpu (host, inl(MBOXIN0(base))); + outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */ + + switch ( adapstat & G2INTST_MASK ) { + case G2INTST_CCBRETRY: + case G2INTST_CCBERROR: + case G2INTST_CCBGOOD: + /* Host Ready -> Mailbox in complete */ + outb(G2CNTRL_HRDY,G2CNTRL(base)); + if (!ecbptr) { + printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n", + inb(G2STAT(base)),adapstat, + inb(G2INTST(base)), number_serviced++); + continue; + } + SCtmp = ecbptr->SCpnt; + if (!SCtmp) { + printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n", + inb(G2STAT(base)),adapstat, + inb(G2INTST(base)), number_serviced++); + continue; + } + sgptr = (struct aha1740_sg *) SCtmp->host_scribble; + scsi_dma_unmap(SCtmp); + + /* Free the sg block */ + dma_free_coherent (&edev->dev, + sizeof (struct aha1740_sg), + SCtmp->host_scribble, + sgptr->sg_dma_addr); + + /* Fetch the sense data, and tuck it away, in + the required slot. The Adaptec + automatically fetches it, and there is no + guarantee that we will still have it in the + cdb when we come back */ + if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) { + memcpy_and_pad(SCtmp->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + ecbptr->sense, + sizeof(ecbptr->sense), + 0); + errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status); + } else + errstatus = 0; + DEB(if (errstatus) + printk("aha1740_intr_handle: returning %6x\n", + errstatus)); + SCtmp->result = errstatus; + my_done = ecbptr->done; + memset(ecbptr,0,sizeof(struct ecb)); + if ( my_done ) + my_done(SCtmp); + break; + + case G2INTST_HARDFAIL: + printk(KERN_ALERT "aha1740 hardware failure!\n"); + panic("aha1740.c"); /* Goodbye */ + + case G2INTST_ASNEVENT: + printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n", + adapstat, + inb(MBOXIN0(base)), + inb(MBOXIN1(base)), + inb(MBOXIN2(base)), + inb(MBOXIN3(base))); /* Say What? */ + /* Host Ready -> Mailbox in complete */ + outb(G2CNTRL_HRDY,G2CNTRL(base)); + break; + + case G2INTST_CMDGOOD: + /* set immediate command success flag here: */ + break; + + case G2INTST_CMDERROR: + /* Set immediate command failure flag here: */ + break; + } + number_serviced++; + } + + spin_unlock_irqrestore(host->host_lock, flags); + return IRQ_RETVAL(handled); +} + +static int aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + unchar direction; + unchar *cmd = (unchar *) SCpnt->cmnd; + unchar target = scmd_id(SCpnt); + struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host); + unsigned long flags; + dma_addr_t sg_dma; + struct aha1740_sg *sgptr; + int ecbno, nseg; + DEB(int i); + + if(*cmd == REQUEST_SENSE) { + SCpnt->result = 0; + done(SCpnt); + return 0; + } + +#ifdef DEBUG + if (*cmd == READ_10 || *cmd == WRITE_10) + i = xscsi2int(cmd+2); + else if (*cmd == READ_6 || *cmd == WRITE_6) + i = scsi2int(cmd+2); + else + i = -1; + printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ", + target, *cmd, i, bufflen); + printk("scsi cmd:"); + for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]); + printk("\n"); +#endif + + /* locate an available ecb */ + spin_lock_irqsave(SCpnt->device->host->host_lock, flags); + ecbno = host->last_ecb_used + 1; /* An optimization */ + if (ecbno >= AHA1740_ECBS) + ecbno = 0; + do { + if (!host->ecb[ecbno].cmdw) + break; + ecbno++; + if (ecbno >= AHA1740_ECBS) + ecbno = 0; + } while (ecbno != host->last_ecb_used); + + if (host->ecb[ecbno].cmdw) + panic("Unable to find empty ecb for aha1740.\n"); + + host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command + doubles as reserved flag */ + + host->last_ecb_used = ecbno; + spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); + +#ifdef DEBUG + printk("Sending command (%d %x)...", ecbno, done); +#endif + + host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command + * Descriptor Block + * Length */ + + direction = 0; + if (*cmd == READ_10 || *cmd == READ_6) + direction = 1; + else if (*cmd == WRITE_10 || *cmd == WRITE_6) + direction = 0; + + memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len); + + SCpnt->host_scribble = dma_alloc_coherent (&host->edev->dev, + sizeof (struct aha1740_sg), + &sg_dma, GFP_ATOMIC); + if(SCpnt->host_scribble == NULL) { + printk(KERN_WARNING "aha1740: out of memory in queuecommand!\n"); + return 1; + } + sgptr = (struct aha1740_sg *) SCpnt->host_scribble; + sgptr->sg_dma_addr = sg_dma; + + nseg = scsi_dma_map(SCpnt); + BUG_ON(nseg < 0); + if (nseg) { + struct scatterlist *sg; + struct aha1740_chain * cptr; + int i; + DEB(unsigned char * ptr); + + host->ecb[ecbno].sg = 1; /* SCSI Initiator Command + * w/scatter-gather*/ + cptr = sgptr->sg_chain; + scsi_for_each_sg(SCpnt, sg, nseg, i) { + cptr[i].datalen = sg_dma_len (sg); + cptr[i].dataptr = sg_dma_address (sg); + } + host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain); + host->ecb[ecbno].dataptr = sg_dma; +#ifdef DEBUG + printk("cptr %x: ",cptr); + ptr = (unsigned char *) cptr; + for(i=0;i<24;i++) printk("%02x ", ptr[i]); +#endif + } else { + host->ecb[ecbno].datalen = 0; + host->ecb[ecbno].dataptr = 0; + } + host->ecb[ecbno].lun = SCpnt->device->lun; + host->ecb[ecbno].ses = 1; /* Suppress underrun errors */ + host->ecb[ecbno].dir = direction; + host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */ + host->ecb[ecbno].senselen = 12; + host->ecb[ecbno].senseptr = ecb_cpu_to_dma (SCpnt->device->host, + host->ecb[ecbno].sense); + host->ecb[ecbno].statusptr = ecb_cpu_to_dma (SCpnt->device->host, + host->ecb[ecbno].status); + host->ecb[ecbno].done = done; + host->ecb[ecbno].SCpnt = SCpnt; +#ifdef DEBUG + { + int i; + printk("aha1740_command: sending.. "); + for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++) + printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]); + } + printk("\n"); +#endif + if (done) { + /* The Adaptec Spec says the card is so fast that the loops + will only be executed once in the code below. Even if this + was true with the fastest processors when the spec was + written, it doesn't seem to be true with today's fast + processors. We print a warning if the code is executed more + often than LOOPCNT_WARN. If this happens, it should be + investigated. If the count reaches LOOPCNT_MAX, we assume + something is broken; since there is no way to return an + error (the return value is ignored by the mid-level scsi + layer) we have to panic (and maybe that's the best thing we + can do then anyhow). */ + +#define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */ +#define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */ + int loopcnt; + unsigned int base = SCpnt->device->host->io_port; + DEB(printk("aha1740[%d] critical section\n",ecbno)); + + spin_lock_irqsave(SCpnt->device->host->host_lock, flags); + for (loopcnt = 0; ; loopcnt++) { + if (inb(G2STAT(base)) & G2STAT_MBXOUT) break; + if (loopcnt == LOOPCNT_WARN) { + printk("aha1740[%d]_mbxout wait!\n",ecbno); + } + if (loopcnt == LOOPCNT_MAX) + panic("aha1740.c: mbxout busy!\n"); + } + outl (ecb_cpu_to_dma (SCpnt->device->host, host->ecb + ecbno), + MBOXOUT0(base)); + for (loopcnt = 0; ; loopcnt++) { + if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break; + if (loopcnt == LOOPCNT_WARN) { + printk("aha1740[%d]_attn wait!\n",ecbno); + } + if (loopcnt == LOOPCNT_MAX) + panic("aha1740.c: attn wait failed!\n"); + } + outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */ + spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); + DEB(printk("aha1740[%d] request queued.\n",ecbno)); + } else + printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n"); + return 0; +} + +static DEF_SCSI_QCMD(aha1740_queuecommand) + +/* Query the board for its irq_level and irq_type. Nothing else matters + in enhanced mode on an EISA bus. */ + +static void aha1740_getconfig(unsigned int base, unsigned int *irq_level, + unsigned int *irq_type, + unsigned int *translation) +{ + static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 }; + + *irq_level = intab[inb(INTDEF(base)) & 0x7]; + *irq_type = (inb(INTDEF(base)) & 0x8) >> 3; + *translation = inb(RESV1(base)) & 0x1; + outb(inb(INTDEF(base)) | 0x10, INTDEF(base)); +} + +static int aha1740_biosparam(struct scsi_device *sdev, + struct block_device *dev, + sector_t capacity, int* ip) +{ + int size = capacity; + int extended = HOSTDATA(sdev->host)->translation; + + DEB(printk("aha1740_biosparam\n")); + if (extended && (ip[2] > 1024)) { + ip[0] = 255; + ip[1] = 63; + ip[2] = size / (255 * 63); + } else { + ip[0] = 64; + ip[1] = 32; + ip[2] = size >> 11; + } + return 0; +} + +static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy) +{ +/* + * From Alan Cox : + * The AHA1740 has firmware handled abort/reset handling. The "head in + * sand" kernel code is correct for once 8) + * + * So we define a dummy handler just to keep the kernel SCSI code as + * quiet as possible... + */ + + return SUCCESS; +} + +static const struct scsi_host_template aha1740_template = { + .module = THIS_MODULE, + .proc_name = "aha1740", + .show_info = aha1740_show_info, + .name = "Adaptec 174x (EISA)", + .queuecommand = aha1740_queuecommand, + .bios_param = aha1740_biosparam, + .can_queue = AHA1740_ECBS, + .this_id = 7, + .sg_tablesize = AHA1740_SCATTER, + .eh_abort_handler = aha1740_eh_abort_handler, +}; + +static int aha1740_probe (struct device *dev) +{ + int slotbase, rc; + unsigned int irq_level, irq_type, translation; + struct Scsi_Host *shpnt; + struct aha1740_hostdata *host; + struct eisa_device *edev = to_eisa_device (dev); + + DEB(printk("aha1740_probe: \n")); + + slotbase = edev->base_addr + EISA_VENDOR_ID_OFFSET; + if (!request_region(slotbase, SLOTSIZE, "aha1740")) /* See if in use */ + return -EBUSY; + if (!aha1740_test_port(slotbase)) + goto err_release_region; + aha1740_getconfig(slotbase,&irq_level,&irq_type,&translation); + if ((inb(G2STAT(slotbase)) & + (G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT) { + /* If the card isn't ready, hard reset it */ + outb(G2CNTRL_HRST, G2CNTRL(slotbase)); + outb(0, G2CNTRL(slotbase)); + } + printk(KERN_INFO "Configuring slot %d at IO:%x, IRQ %u (%s)\n", + edev->slot, slotbase, irq_level, irq_type ? "edge" : "level"); + printk(KERN_INFO "aha174x: Extended translation %sabled.\n", + translation ? "en" : "dis"); + shpnt = scsi_host_alloc(&aha1740_template, + sizeof(struct aha1740_hostdata)); + if(shpnt == NULL) + goto err_release_region; + + shpnt->base = 0; + shpnt->io_port = slotbase; + shpnt->n_io_port = SLOTSIZE; + shpnt->irq = irq_level; + shpnt->dma_channel = 0xff; + host = HOSTDATA(shpnt); + host->edev = edev; + host->translation = translation; + host->ecb_dma_addr = dma_map_single (&edev->dev, host->ecb, + sizeof (host->ecb), + DMA_BIDIRECTIONAL); + if (!host->ecb_dma_addr) { + printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n"); + goto err_host_put; + } + + DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level)); + if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : IRQF_SHARED, + "aha1740",shpnt)) { + printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n", + irq_level); + goto err_unmap; + } + + eisa_set_drvdata (edev, shpnt); + + rc = scsi_add_host (shpnt, dev); + if (rc) + goto err_irq; + + scsi_scan_host (shpnt); + return 0; + + err_irq: + free_irq(irq_level, shpnt); + err_unmap: + dma_unmap_single (&edev->dev, host->ecb_dma_addr, + sizeof (host->ecb), DMA_BIDIRECTIONAL); + err_host_put: + scsi_host_put (shpnt); + err_release_region: + release_region(slotbase, SLOTSIZE); + + return -ENODEV; +} + +static int aha1740_remove (struct device *dev) +{ + struct Scsi_Host *shpnt = dev_get_drvdata(dev); + struct aha1740_hostdata *host = HOSTDATA (shpnt); + + scsi_remove_host(shpnt); + + free_irq (shpnt->irq, shpnt); + dma_unmap_single (dev, host->ecb_dma_addr, + sizeof (host->ecb), DMA_BIDIRECTIONAL); + release_region (shpnt->io_port, SLOTSIZE); + + scsi_host_put (shpnt); + + return 0; +} + +static struct eisa_device_id aha1740_ids[] = { + { "ADP0000" }, /* 1740 */ + { "ADP0001" }, /* 1740A */ + { "ADP0002" }, /* 1742A */ + { "ADP0400" }, /* 1744 */ + { "" } +}; +MODULE_DEVICE_TABLE(eisa, aha1740_ids); + +static struct eisa_driver aha1740_driver = { + .id_table = aha1740_ids, + .driver = { + .name = "aha1740", + .probe = aha1740_probe, + .remove = aha1740_remove, + }, +}; + +static __init int aha1740_init (void) +{ + return eisa_driver_register (&aha1740_driver); +} + +static __exit void aha1740_exit (void) +{ + eisa_driver_unregister (&aha1740_driver); +} + +module_init (aha1740_init); +module_exit (aha1740_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h new file mode 100644 index 000000000..6eeed6da0 --- /dev/null +++ b/drivers/scsi/aha1740.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _AHA1740_H + +/* $Id$ + * + * Header file for the adaptec 1740 driver for Linux + * + * With minor revisions 3/31/93 + * Written and (C) 1992,1993 Brad McLean. See aha1740.c + * for more info + * + */ + +#include + +#define SLOTSIZE 0x5c + +/* EISA configuration registers & values */ +#define HID0(base) (base + 0x0) +#define HID1(base) (base + 0x1) +#define HID2(base) (base + 0x2) +#define HID3(base) (base + 0x3) +#define EBCNTRL(base) (base + 0x4) +#define PORTADR(base) (base + 0x40) +#define BIOSADR(base) (base + 0x41) +#define INTDEF(base) (base + 0x42) +#define SCSIDEF(base) (base + 0x43) +#define BUSDEF(base) (base + 0x44) +#define RESV0(base) (base + 0x45) +#define RESV1(base) (base + 0x46) +#define RESV2(base) (base + 0x47) + +#define HID_MFG "ADP" +#define HID_PRD 0 +#define HID_REV 2 +#define EBCNTRL_VALUE 1 +#define PORTADDR_ENH 0x80 +/* READ */ +#define G2INTST(base) (base + 0x56) +#define G2STAT(base) (base + 0x57) +#define MBOXIN0(base) (base + 0x58) +#define MBOXIN1(base) (base + 0x59) +#define MBOXIN2(base) (base + 0x5a) +#define MBOXIN3(base) (base + 0x5b) +#define G2STAT2(base) (base + 0x5c) + +#define G2INTST_MASK 0xf0 /* isolate the status */ +#define G2INTST_CCBGOOD 0x10 /* CCB Completed */ +#define G2INTST_CCBRETRY 0x50 /* CCB Completed with a retry */ +#define G2INTST_HARDFAIL 0x70 /* Adapter Hardware Failure */ +#define G2INTST_CMDGOOD 0xa0 /* Immediate command success */ +#define G2INTST_CCBERROR 0xc0 /* CCB Completed with error */ +#define G2INTST_ASNEVENT 0xd0 /* Asynchronous Event Notification */ +#define G2INTST_CMDERROR 0xe0 /* Immediate command error */ + +#define G2STAT_MBXOUT 4 /* Mailbox Out Empty Bit */ +#define G2STAT_INTPEND 2 /* Interrupt Pending Bit */ +#define G2STAT_BUSY 1 /* Busy Bit (attention pending) */ + +#define G2STAT2_READY 0 /* Host Ready Bit */ + +/* WRITE (and ReadBack) */ +#define MBOXOUT0(base) (base + 0x50) +#define MBOXOUT1(base) (base + 0x51) +#define MBOXOUT2(base) (base + 0x52) +#define MBOXOUT3(base) (base + 0x53) +#define ATTN(base) (base + 0x54) +#define G2CNTRL(base) (base + 0x55) + +#define ATTN_IMMED 0x10 /* Immediate Command */ +#define ATTN_START 0x40 /* Start CCB */ +#define ATTN_ABORT 0x50 /* Abort CCB */ + +#define G2CNTRL_HRST 0x80 /* Hard Reset */ +#define G2CNTRL_IRST 0x40 /* Clear EISA Interrupt */ +#define G2CNTRL_HRDY 0x20 /* Sets HOST ready */ + +/* This is used with scatter-gather */ +struct aha1740_chain { + u32 dataptr; /* Location of data */ + u32 datalen; /* Size of this part of chain */ +}; + +/* These belong in scsi.h */ +#define any2scsi(up, p) \ +(up)[0] = (((unsigned long)(p)) >> 16) ; \ +(up)[1] = (((unsigned long)(p)) >> 8); \ +(up)[2] = ((unsigned long)(p)); + +#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) ) + +#define xany2scsi(up, p) \ +(up)[0] = ((long)(p)) >> 24; \ +(up)[1] = ((long)(p)) >> 16; \ +(up)[2] = ((long)(p)) >> 8; \ +(up)[3] = ((long)(p)); + +#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \ + + (((long)(up)[2]) << 8) + ((long)(up)[3]) ) + +#define MAX_CDB 12 +#define MAX_SENSE 14 +#define MAX_STATUS 32 + +struct ecb { /* Enhanced Control Block 6.1 */ + u16 cmdw; /* Command Word */ + /* Flag Word 1 */ + u16 cne:1, /* Control Block Chaining */ + :6, di:1, /* Disable Interrupt */ + :2, ses:1, /* Suppress Underrun error */ + :1, sg:1, /* Scatter/Gather */ + :1, dsb:1, /* Disable Status Block */ + ars:1; /* Automatic Request Sense */ + /* Flag Word 2 */ + u16 lun:3, /* Logical Unit */ + tag:1, /* Tagged Queuing */ + tt:2, /* Tag Type */ + nd:1, /* No Disconnect */ + :1, dat:1, /* Data transfer - check direction */ + dir:1, /* Direction of transfer 1 = datain */ + st:1, /* Suppress Transfer */ + chk:1, /* Calculate Checksum */ + :2, rec:1,:1; /* Error Recovery */ + u16 nil0; /* nothing */ + u32 dataptr; /* Data or Scatter List ptr */ + u32 datalen; /* Data or Scatter List len */ + u32 statusptr; /* Status Block ptr */ + u32 linkptr; /* Chain Address */ + u32 nil1; /* nothing */ + u32 senseptr; /* Sense Info Pointer */ + u8 senselen; /* Sense Length */ + u8 cdblen; /* CDB Length */ + u16 datacheck; /* Data checksum */ + u8 cdb[MAX_CDB]; /* CDB area */ +/* Hardware defined portion ends here, rest is driver defined */ + u8 sense[MAX_SENSE]; /* Sense area */ + u8 status[MAX_STATUS]; /* Status area */ + struct scsi_cmnd *SCpnt; /* Link to the SCSI Command Block */ + void (*done) (struct scsi_cmnd *); /* Completion Function */ +}; + +#define AHA1740CMD_NOP 0x00 /* No OP */ +#define AHA1740CMD_INIT 0x01 /* Initiator SCSI Command */ +#define AHA1740CMD_DIAG 0x05 /* Run Diagnostic Command */ +#define AHA1740CMD_SCSI 0x06 /* Initialize SCSI */ +#define AHA1740CMD_SENSE 0x08 /* Read Sense Information */ +#define AHA1740CMD_DOWN 0x09 /* Download Firmware (yeah, I bet!) */ +#define AHA1740CMD_RINQ 0x0a /* Read Host Adapter Inquiry Data */ +#define AHA1740CMD_TARG 0x10 /* Target SCSI Command */ + +#define AHA1740_ECBS 32 +#define AHA1740_SCATTER 16 + +#endif diff --git a/drivers/scsi/aic7xxx/.gitignore b/drivers/scsi/aic7xxx/.gitignore new file mode 100644 index 000000000..9aa780221 --- /dev/null +++ b/drivers/scsi/aic7xxx/.gitignore @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +aic79xx_reg.h +aic79xx_reg_print.c +aic79xx_seq.h +aic7xxx_reg.h +aic7xxx_reg_print.c +aic7xxx_seq.h diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx new file mode 100644 index 000000000..4bc53eec4 --- /dev/null +++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AIC79XX 2.5.X Kernel configuration File. +# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic79xx#4 $ +# +config SCSI_AIC79XX + tristate "Adaptec AIC79xx U320 support" + depends on PCI && HAS_IOPORT && SCSI + select SCSI_SPI_ATTRS + help + This driver supports all of Adaptec's Ultra 320 PCI-X + based SCSI controllers. + +config AIC79XX_CMDS_PER_DEVICE + int "Maximum number of TCQ commands per device" + depends on SCSI_AIC79XX + default "32" + help + Specify the number of commands you would like to allocate per SCSI + device when Tagged Command Queueing (TCQ) is enabled on that device. + + This is an upper bound value for the number of tagged transactions + to be used for any device. The aic7xxx driver will automatically + vary this number based on device behavior. For devices with a + fixed maximum, the driver will eventually lock to this maximum + and display a console message indicating this value. + + Due to resource allocation issues in the Linux SCSI mid-layer, using + a high number of commands per device may result in memory allocation + failures when many devices are attached to the system. For this reason, + the default is set to 32. Higher values may result in higher performance + on some devices. The upper bound is 253. 0 disables tagged queueing. + + Per device tag depth can be controlled via the kernel command line + "tag_info" option. See Documentation/scsi/aic79xx.rst for details. + +config AIC79XX_RESET_DELAY_MS + int "Initial bus reset delay in milli-seconds" + depends on SCSI_AIC79XX + default "5000" + help + The number of milliseconds to delay after an initial bus reset. + The bus settle delay following all error recovery actions is + dictated by the SCSI layer and is not affected by this value. + + Default: 5000 (5 seconds) + +config AIC79XX_BUILD_FIRMWARE + bool "Build Adapter Firmware with Kernel Build" + depends on SCSI_AIC79XX && !PREVENT_FIRMWARE_BUILD + help + This option should only be enabled if you are modifying the firmware + source to the aic79xx driver and wish to have the generated firmware + include files updated during a normal kernel build. The assembler + for the firmware requires lex and yacc or their equivalents, as well + as the db v1 library. You may have to install additional packages + or modify the assembler Makefile or the files it includes if your + build environment is different than that of the author. + +config AIC79XX_DEBUG_ENABLE + bool "Compile in Debugging Code" + depends on SCSI_AIC79XX + default y + help + Compile in aic79xx debugging code that can be useful in diagnosing + driver errors. + +config AIC79XX_DEBUG_MASK + int "Debug code enable mask (16383 for all debugging)" + depends on SCSI_AIC79XX + default "0" + help + Bit mask of debug options that is only valid if the + CONFIG_AIC79XX_DEBUG_ENABLE option is enabled. The bits in this mask + are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the + variable ahd_debug in that file to find them. + +config AIC79XX_REG_PRETTY_PRINT + bool "Decode registers during diagnostics" + depends on SCSI_AIC79XX + default y + help + Compile in register value tables for the output of expanded register + contents in diagnostics. This make it much easier to understand debug + output without having to refer to a data book and/or the aic7xxx.reg + file. diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx new file mode 100644 index 000000000..f0425145a --- /dev/null +++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AIC7XXX and AIC79XX 2.5.X Kernel configuration File. +# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $ +# +config SCSI_AIC7XXX + tristate "Adaptec AIC7xxx Fast -> U160 support" + depends on (PCI || EISA) && HAS_IOPORT && SCSI + select SCSI_SPI_ATTRS + help + This driver supports all of Adaptec's Fast through Ultra 160 PCI + based SCSI controllers as well as the aic7770 based EISA and VLB + SCSI controllers (the 274x and 284x series). For AAA and ARO based + configurations, only SCSI functionality is provided. + + To compile this driver as a module, choose M here: the + module will be called aic7xxx. + +config AIC7XXX_CMDS_PER_DEVICE + int "Maximum number of TCQ commands per device" + depends on SCSI_AIC7XXX + default "32" + help + Specify the number of commands you would like to allocate per SCSI + device when Tagged Command Queueing (TCQ) is enabled on that device. + + This is an upper bound value for the number of tagged transactions + to be used for any device. The aic7xxx driver will automatically + vary this number based on device behavior. For devices with a + fixed maximum, the driver will eventually lock to this maximum + and display a console message indicating this value. + + Due to resource allocation issues in the Linux SCSI mid-layer, using + a high number of commands per device may result in memory allocation + failures when many devices are attached to the system. For this reason, + the default is set to 32. Higher values may result in higher performance + on some devices. The upper bound is 253. 0 disables tagged queueing. + + Per device tag depth can be controlled via the kernel command line + "tag_info" option. See Documentation/scsi/aic7xxx.rst for details. + +config AIC7XXX_RESET_DELAY_MS + int "Initial bus reset delay in milli-seconds" + depends on SCSI_AIC7XXX + default "5000" + help + The number of milliseconds to delay after an initial bus reset. + The bus settle delay following all error recovery actions is + dictated by the SCSI layer and is not affected by this value. + + Default: 5000 (5 seconds) + +config AIC7XXX_BUILD_FIRMWARE + bool "Build Adapter Firmware with Kernel Build" + depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD + help + This option should only be enabled if you are modifying the firmware + source to the aic7xxx driver and wish to have the generated firmware + include files updated during a normal kernel build. The assembler + for the firmware requires lex and yacc or their equivalents, as well + as the db v1 library. You may have to install additional packages + or modify the assembler Makefile or the files it includes if your + build environment is different than that of the author. + +config AIC7XXX_DEBUG_ENABLE + bool "Compile in Debugging Code" + depends on SCSI_AIC7XXX + default y + help + Compile in aic7xxx debugging code that can be useful in diagnosing + driver errors. + +config AIC7XXX_DEBUG_MASK + int "Debug code enable mask (2047 for all debugging)" + depends on SCSI_AIC7XXX + default "0" + help + Bit mask of debug options that is only valid if the + CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask + are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the + variable ahc_debug in that file to find them. + +config AIC7XXX_REG_PRETTY_PRINT + bool "Decode registers during diagnostics" + depends on SCSI_AIC7XXX + default y + help + Compile in register value tables for the output of expanded register + contents in diagnostics. This make it much easier to understand debug + output without having to refer to a data book and/or the aic7xxx.reg + file. diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile new file mode 100644 index 000000000..e0188ecd8 --- /dev/null +++ b/drivers/scsi/aic7xxx/Makefile @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux aic7xxx SCSI driver. +# +# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Makefile#8 $ +# + +# Let kbuild descend into aicasm when cleaning +subdir- += aicasm + +obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx.o +obj-$(CONFIG_SCSI_AIC79XX) += aic79xx.o + +# Core Fast -> U160 files +aic7xxx-y += aic7xxx_core.o \ + aic7xxx_93cx6.o +aic7xxx-$(CONFIG_EISA) += aic7770.o +aic7xxx-$(CONFIG_PCI) += aic7xxx_pci.o +aic7xxx-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += aic7xxx_reg_print.o + +# Platform Specific Fast -> U160 Files +aic7xxx-y += aic7xxx_osm.o \ + aic7xxx_proc.o +aic7xxx-$(CONFIG_EISA) += aic7770_osm.o +aic7xxx-$(CONFIG_PCI) += aic7xxx_osm_pci.o + +# Core U320 files +aic79xx-y += aic79xx_core.o \ + aic79xx_pci.o +aic79xx-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += aic79xx_reg_print.o + +# Platform Specific U320 Files +aic79xx-y += aic79xx_osm.o \ + aic79xx_proc.o \ + aic79xx_osm_pci.o + +ifdef WARNINGS_BECOME_ERRORS +ccflags-y += -Werror +endif + +# Files generated that shall be removed upon make clean +clean-files := aic7xxx_seq.h aic7xxx_reg.h aic7xxx_reg_print.c +clean-files += aic79xx_seq.h aic79xx_reg.h aic79xx_reg_print.c + +# Dependencies for generated files need to be listed explicitly + +$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_seq.h $(obj)/aic7xxx_reg.h +$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_seq.h $(obj)/aic79xx_reg.h + +aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_reg.h +aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c + +aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \ + -p $(obj)/aic7xxx_reg_print.c -i aic7xxx_osm.h + +ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y) +$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm + $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic7xxx_reg.h \ + $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ + $(srctree)/$(src)/aic7xxx.seq + +$(aic7xxx-gen-y): $(objtree)/$(obj)/aic7xxx_seq.h + @true +else +$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped +endif + +aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_reg.h +aic79xx-gen-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += $(obj)/aic79xx_reg_print.c + +aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \ + -p $(obj)/aic79xx_reg_print.c -i aic79xx_osm.h + +ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y) +$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm + $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic79xx_reg.h \ + $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ + $(srctree)/$(src)/aic79xx.seq + +$(aic79xx-gen-y): $(objtree)/$(obj)/aic79xx_seq.h + @true +else +$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped +endif + +$(obj)/aicasm/aicasm: $(srctree)/$(src)/aicasm/*.[chyl] + $(MAKE) -C $(srctree)/$(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/ diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c new file mode 100644 index 000000000..176704b24 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7770.c @@ -0,0 +1,385 @@ +/* + * Product specific probe and attach routines for: + * 27/284X and aic7770 motherboard SCSI controllers + * + * Copyright (c) 1994-1998, 2000, 2001 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic7770.c#32 $ + * + * $FreeBSD$ + */ + +#include "aic7xxx_osm.h" +#include "aic7xxx_inline.h" +#include "aic7xxx_93cx6.h" + +#define ID_AIC7770 0x04907770 +#define ID_AHA_274x 0x04907771 +#define ID_AHA_284xB 0x04907756 /* BIOS enabled */ +#define ID_AHA_284x 0x04907757 /* BIOS disabled*/ +#define ID_OLV_274x 0x04907782 /* Olivetti OEM */ +#define ID_OLV_274xD 0x04907783 /* Olivetti OEM (Differential) */ + +static int aic7770_chip_init(struct ahc_softc *ahc); +static int aha2840_load_seeprom(struct ahc_softc *ahc); +static ahc_device_setup_t ahc_aic7770_VL_setup; +static ahc_device_setup_t ahc_aic7770_EISA_setup; +static ahc_device_setup_t ahc_aic7770_setup; + +struct aic7770_identity aic7770_ident_table[] = +{ + { + ID_AHA_274x, + 0xFFFFFFFF, + "Adaptec 274X SCSI adapter", + ahc_aic7770_EISA_setup + }, + { + ID_AHA_284xB, + 0xFFFFFFFE, + "Adaptec 284X SCSI adapter", + ahc_aic7770_VL_setup + }, + { + ID_AHA_284x, + 0xFFFFFFFE, + "Adaptec 284X SCSI adapter (BIOS Disabled)", + ahc_aic7770_VL_setup + }, + { + ID_OLV_274x, + 0xFFFFFFFF, + "Adaptec (Olivetti OEM) 274X SCSI adapter", + ahc_aic7770_EISA_setup + }, + { + ID_OLV_274xD, + 0xFFFFFFFF, + "Adaptec (Olivetti OEM) 274X Differential SCSI adapter", + ahc_aic7770_EISA_setup + }, + /* Generic chip probes for devices we don't know 'exactly' */ + { + ID_AIC7770, + 0xFFFFFFFF, + "Adaptec aic7770 SCSI adapter", + ahc_aic7770_EISA_setup + } +}; +const int ahc_num_aic7770_devs = ARRAY_SIZE(aic7770_ident_table); + +struct aic7770_identity * +aic7770_find_device(uint32_t id) +{ + struct aic7770_identity *entry; + int i; + + for (i = 0; i < ahc_num_aic7770_devs; i++) { + entry = &aic7770_ident_table[i]; + if (entry->full_id == (id & entry->id_mask)) + return (entry); + } + return (NULL); +} + +int +aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) +{ + int error; + int have_seeprom; + u_int hostconf; + u_int irq; + u_int intdef; + + error = entry->setup(ahc); + have_seeprom = 0; + if (error != 0) + return (error); + + error = aic7770_map_registers(ahc, io); + if (error != 0) + return (error); + + /* + * Before we continue probing the card, ensure that + * its interrupts are *disabled*. We don't want + * a misstep to hang the machine in an interrupt + * storm. + */ + ahc_intr_enable(ahc, FALSE); + + ahc->description = entry->name; + error = ahc_softc_init(ahc); + if (error != 0) + return (error); + + ahc->bus_chip_init = aic7770_chip_init; + + error = ahc_reset(ahc, /*reinit*/FALSE); + if (error != 0) + return (error); + + /* Make sure we have a valid interrupt vector */ + intdef = ahc_inb(ahc, INTDEF); + irq = intdef & VECTOR; + switch (irq) { + case 9: + case 10: + case 11: + case 12: + case 14: + case 15: + break; + default: + printk("aic7770_config: invalid irq setting %d\n", intdef); + return (ENXIO); + } + + if ((intdef & EDGE_TRIG) != 0) + ahc->flags |= AHC_EDGE_INTERRUPT; + + switch (ahc->chip & (AHC_EISA|AHC_VL)) { + case AHC_EISA: + { + u_int biosctrl; + u_int scsiconf; + u_int scsiconf1; + + biosctrl = ahc_inb(ahc, HA_274_BIOSCTRL); + scsiconf = ahc_inb(ahc, SCSICONF); + scsiconf1 = ahc_inb(ahc, SCSICONF + 1); + + /* Get the primary channel information */ + if ((biosctrl & CHANNEL_B_PRIMARY) != 0) + ahc->flags |= 1; + + if ((biosctrl & BIOSMODE) == BIOSDISABLED) { + ahc->flags |= AHC_USEDEFAULTS; + } else { + if ((ahc->features & AHC_WIDE) != 0) { + ahc->our_id = scsiconf1 & HWSCSIID; + if (scsiconf & TERM_ENB) + ahc->flags |= AHC_TERM_ENB_A; + } else { + ahc->our_id = scsiconf & HSCSIID; + ahc->our_id_b = scsiconf1 & HSCSIID; + if (scsiconf & TERM_ENB) + ahc->flags |= AHC_TERM_ENB_A; + if (scsiconf1 & TERM_ENB) + ahc->flags |= AHC_TERM_ENB_B; + } + } + if ((ahc_inb(ahc, HA_274_BIOSGLOBAL) & HA_274_EXTENDED_TRANS)) + ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B; + break; + } + case AHC_VL: + { + have_seeprom = aha2840_load_seeprom(ahc); + break; + } + default: + break; + } + if (have_seeprom == 0) { + kfree(ahc->seep_config); + ahc->seep_config = NULL; + } + + /* + * Ensure autoflush is enabled + */ + ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS); + + /* Setup the FIFO threshold and the bus off time */ + hostconf = ahc_inb(ahc, HOSTCONF); + ahc_outb(ahc, BUSSPD, hostconf & DFTHRSH); + ahc_outb(ahc, BUSTIME, (hostconf << 2) & BOFF); + + ahc->bus_softc.aic7770_softc.busspd = hostconf & DFTHRSH; + ahc->bus_softc.aic7770_softc.bustime = (hostconf << 2) & BOFF; + + /* + * Generic aic7xxx initialization. + */ + error = ahc_init(ahc); + if (error != 0) + return (error); + + error = aic7770_map_int(ahc, irq); + if (error != 0) + return (error); + + ahc->init_level++; + + /* + * Enable the board's BUS drivers + */ + ahc_outb(ahc, BCTL, ENABLE); + return (0); +} + +static int +aic7770_chip_init(struct ahc_softc *ahc) +{ + ahc_outb(ahc, BUSSPD, ahc->bus_softc.aic7770_softc.busspd); + ahc_outb(ahc, BUSTIME, ahc->bus_softc.aic7770_softc.bustime); + ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS); + ahc_outb(ahc, BCTL, ENABLE); + return (ahc_chip_init(ahc)); +} + +/* + * Read the 284x SEEPROM. + */ +static int +aha2840_load_seeprom(struct ahc_softc *ahc) +{ + struct seeprom_descriptor sd; + struct seeprom_config *sc; + int have_seeprom; + uint8_t scsi_conf; + + sd.sd_ahc = ahc; + sd.sd_control_offset = SEECTL_2840; + sd.sd_status_offset = STATUS_2840; + sd.sd_dataout_offset = STATUS_2840; + sd.sd_chip = C46; + sd.sd_MS = 0; + sd.sd_RDY = EEPROM_TF; + sd.sd_CS = CS_2840; + sd.sd_CK = CK_2840; + sd.sd_DO = DO_2840; + sd.sd_DI = DI_2840; + sc = ahc->seep_config; + + if (bootverbose) + printk("%s: Reading SEEPROM...", ahc_name(ahc)); + have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, + /*start_addr*/0, sizeof(*sc)/2); + + if (have_seeprom) { + + if (ahc_verify_cksum(sc) == 0) { + if(bootverbose) + printk ("checksum error\n"); + have_seeprom = 0; + } else if (bootverbose) { + printk("done.\n"); + } + } + + if (!have_seeprom) { + if (bootverbose) + printk("%s: No SEEPROM available\n", ahc_name(ahc)); + ahc->flags |= AHC_USEDEFAULTS; + } else { + /* + * Put the data we've collected down into SRAM + * where ahc_init will find it. + */ + int i; + int max_targ; + uint16_t discenable; + + max_targ = (ahc->features & AHC_WIDE) != 0 ? 16 : 8; + discenable = 0; + for (i = 0; i < max_targ; i++){ + uint8_t target_settings; + + target_settings = (sc->device_flags[i] & CFXFER) << 4; + if (sc->device_flags[i] & CFSYNCH) + target_settings |= SOFS; + if (sc->device_flags[i] & CFWIDEB) + target_settings |= WIDEXFER; + if (sc->device_flags[i] & CFDISC) + discenable |= (0x01 << i); + ahc_outb(ahc, TARG_SCSIRATE + i, target_settings); + } + ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff)); + ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff)); + + ahc->our_id = sc->brtime_id & CFSCSIID; + + scsi_conf = (ahc->our_id & 0x7); + if (sc->adapter_control & CFSPARITY) + scsi_conf |= ENSPCHK; + if (sc->adapter_control & CFRESETB) + scsi_conf |= RESET_SCSI; + + if (sc->bios_control & CF284XEXTEND) + ahc->flags |= AHC_EXTENDED_TRANS_A; + /* Set SCSICONF info */ + ahc_outb(ahc, SCSICONF, scsi_conf); + + if (sc->adapter_control & CF284XSTERM) + ahc->flags |= AHC_TERM_ENB_A; + } + return (have_seeprom); +} + +static int +ahc_aic7770_VL_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7770_setup(ahc); + ahc->chip |= AHC_VL; + return (error); +} + +static int +ahc_aic7770_EISA_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7770_setup(ahc); + ahc->chip |= AHC_EISA; + return (error); +} + +static int +ahc_aic7770_setup(struct ahc_softc *ahc) +{ + ahc->channel = 'A'; + ahc->channel_b = 'B'; + ahc->chip = AHC_AIC7770; + ahc->features = AHC_AIC7770_FE; + ahc->bugs |= AHC_TMODE_WIDEODD_BUG; + ahc->flags |= AHC_PAGESCBS; + ahc->instruction_ram_size = 448; + return (0); +} diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c new file mode 100644 index 000000000..bdd177e3d --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7770_osm.c @@ -0,0 +1,156 @@ +/* + * Linux driver attachment glue for aic7770 based controllers. + * + * Copyright (c) 2000-2003 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7770_osm.c#14 $ + */ + +#include "aic7xxx_osm.h" + +#include +#include + +int +aic7770_map_registers(struct ahc_softc *ahc, u_int port) +{ + /* + * Lock out other contenders for our i/o space. + */ + if (!request_region(port, AHC_EISA_IOSIZE, "aic7xxx")) + return (ENOMEM); + ahc->tag = BUS_SPACE_PIO; + ahc->bsh.ioport = port; + return (0); +} + +int +aic7770_map_int(struct ahc_softc *ahc, u_int irq) +{ + int error; + int shared; + + shared = 0; + if ((ahc->flags & AHC_EDGE_INTERRUPT) == 0) + shared = IRQF_SHARED; + + error = request_irq(irq, ahc_linux_isr, shared, "aic7xxx", ahc); + if (error == 0) + ahc->platform_data->irq = irq; + + return (-error); +} + +static int +aic7770_probe(struct device *dev) +{ + struct eisa_device *edev = to_eisa_device(dev); + u_int eisaBase = edev->base_addr+AHC_EISA_SLOT_OFFSET; + struct ahc_softc *ahc; + char buf[80]; + char *name; + int error; + + sprintf(buf, "ahc_eisa:%d", eisaBase >> 12); + name = kstrdup(buf, GFP_ATOMIC); + if (name == NULL) + return (ENOMEM); + ahc = ahc_alloc(&aic7xxx_driver_template, name); + if (ahc == NULL) + return (ENOMEM); + ahc->dev = dev; + error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data, + eisaBase); + if (error != 0) { + ahc->bsh.ioport = 0; + ahc_free(ahc); + return (error); + } + + dev_set_drvdata(dev, ahc); + + error = ahc_linux_register_host(ahc, &aic7xxx_driver_template); + return (error); +} + +static int +aic7770_remove(struct device *dev) +{ + struct ahc_softc *ahc = dev_get_drvdata(dev); + u_long s; + + if (ahc->platform_data && ahc->platform_data->host) + scsi_remove_host(ahc->platform_data->host); + + ahc_lock(ahc, &s); + ahc_intr_enable(ahc, FALSE); + ahc_unlock(ahc, &s); + + ahc_free(ahc); + return 0; +} + +static struct eisa_device_id aic7770_ids[] = { + { "ADP7771", 0 }, /* AHA 274x */ + { "ADP7756", 1 }, /* AHA 284x BIOS enabled */ + { "ADP7757", 2 }, /* AHA 284x BIOS disabled */ + { "ADP7782", 3 }, /* AHA 274x Olivetti OEM */ + { "ADP7783", 4 }, /* AHA 274x Olivetti OEM (Differential) */ + { "ADP7770", 5 }, /* AIC7770 generic */ + { "" } +}; +MODULE_DEVICE_TABLE(eisa, aic7770_ids); + +static struct eisa_driver aic7770_driver = { + .id_table = aic7770_ids, + .driver = { + .name = "aic7xxx", + .probe = aic7770_probe, + .remove = aic7770_remove, + } +}; + +int +ahc_linux_eisa_init(void) +{ + return eisa_driver_register(&aic7770_driver); +} + +void +ahc_linux_eisa_exit(void) +{ + eisa_driver_unregister(&aic7770_driver); +} diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h new file mode 100644 index 000000000..59e932181 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -0,0 +1,1465 @@ +/* + * Core definitions and data structures shareable across OS platforms. + * + * Copyright (c) 1994-2002 Justin T. Gibbs. + * Copyright (c) 2000-2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#109 $ + * + * $FreeBSD$ + */ + +#ifndef _AIC79XX_H_ +#define _AIC79XX_H_ + +/* Register Definitions */ +#include "aic79xx_reg.h" + +/************************* Forward Declarations *******************************/ +struct ahd_platform_data; +struct scb_platform_data; + +/****************************** Useful Macros *********************************/ +#ifndef TRUE +#define TRUE 1 +#endif +#ifndef FALSE +#define FALSE 0 +#endif + +#define ALL_CHANNELS '\0' +#define ALL_TARGETS_MASK 0xFFFF +#define INITIATOR_WILDCARD (~0) +#define SCB_LIST_NULL 0xFF00 +#define SCB_LIST_NULL_LE (ahd_htole16(SCB_LIST_NULL)) +#define QOUTFIFO_ENTRY_VALID 0x80 +#define SCBID_IS_NULL(scbid) (((scbid) & 0xFF00 ) == SCB_LIST_NULL) + +#define SCSIID_TARGET(ahd, scsiid) \ + (((scsiid) & TID) >> TID_SHIFT) +#define SCSIID_OUR_ID(scsiid) \ + ((scsiid) & OID) +#define SCSIID_CHANNEL(ahd, scsiid) ('A') +#define SCB_IS_SCSIBUS_B(ahd, scb) (0) +#define SCB_GET_OUR_ID(scb) \ + SCSIID_OUR_ID((scb)->hscb->scsiid) +#define SCB_GET_TARGET(ahd, scb) \ + SCSIID_TARGET((ahd), (scb)->hscb->scsiid) +#define SCB_GET_CHANNEL(ahd, scb) \ + SCSIID_CHANNEL(ahd, (scb)->hscb->scsiid) +#define SCB_GET_LUN(scb) \ + ((scb)->hscb->lun) +#define SCB_GET_TARGET_OFFSET(ahd, scb) \ + SCB_GET_TARGET(ahd, scb) +#define SCB_GET_TARGET_MASK(ahd, scb) \ + (0x01 << (SCB_GET_TARGET_OFFSET(ahd, scb))) +#ifdef AHD_DEBUG +#define SCB_IS_SILENT(scb) \ + ((ahd_debug & AHD_SHOW_MASKED_ERRORS) == 0 \ + && (((scb)->flags & SCB_SILENT) != 0)) +#else +#define SCB_IS_SILENT(scb) \ + (((scb)->flags & SCB_SILENT) != 0) +#endif +/* + * TCLs have the following format: TTTTLLLLLLLL + */ +#define TCL_TARGET_OFFSET(tcl) \ + ((((tcl) >> 4) & TID) >> 4) +#define TCL_LUN(tcl) \ + (tcl & (AHD_NUM_LUNS - 1)) +#define BUILD_TCL(scsiid, lun) \ + ((lun) | (((scsiid) & TID) << 4)) +#define BUILD_TCL_RAW(target, channel, lun) \ + ((lun) | ((target) << 8)) + +#define SCB_GET_TAG(scb) \ + ahd_le16toh(scb->hscb->tag) + +#ifndef AHD_TARGET_MODE +#undef AHD_TMODE_ENABLE +#define AHD_TMODE_ENABLE 0 +#endif + +#define AHD_BUILD_COL_IDX(target, lun) \ + ((((u8)lun) << 4) | target) + +#define AHD_GET_SCB_COL_IDX(ahd, scb) \ + ((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb)) + +#define AHD_SET_SCB_COL_IDX(scb, col_idx) \ +do { \ + (scb)->hscb->scsiid = ((col_idx) << TID_SHIFT) & TID; \ + (scb)->hscb->lun = ((col_idx) >> 4) & (AHD_NUM_LUNS_NONPKT-1); \ +} while (0) + +#define AHD_COPY_SCB_COL_IDX(dst, src) \ +do { \ + dst->hscb->scsiid = src->hscb->scsiid; \ + dst->hscb->lun = src->hscb->lun; \ +} while (0) + +#define AHD_NEVER_COL_IDX 0xFFFF + +/**************************** Driver Constants ********************************/ +/* + * The maximum number of supported targets. + */ +#define AHD_NUM_TARGETS 16 + +/* + * The maximum number of supported luns. + * The identify message only supports 64 luns in non-packetized transfers. + * You can have 2^64 luns when information unit transfers are enabled, + * but until we see a need to support that many, we support 256. + */ +#define AHD_NUM_LUNS_NONPKT 64 +#define AHD_NUM_LUNS 256 + +/* + * The maximum transfer per S/G segment. + */ +#define AHD_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */ + +/* + * The maximum amount of SCB storage in hardware on a controller. + * This value represents an upper bound. Due to software design, + * we may not be able to use this number. + */ +#define AHD_SCB_MAX 512 + +/* + * The maximum number of concurrent transactions supported per driver instance. + * Sequencer Control Blocks (SCBs) store per-transaction information. + */ +#define AHD_MAX_QUEUE AHD_SCB_MAX + +/* + * Define the size of our QIN and QOUT FIFOs. They must be a power of 2 + * in size and accommodate as many transactions as can be queued concurrently. + */ +#define AHD_QIN_SIZE AHD_MAX_QUEUE +#define AHD_QOUT_SIZE AHD_MAX_QUEUE + +#define AHD_QIN_WRAP(x) ((x) & (AHD_QIN_SIZE-1)) +/* + * The maximum amount of SCB storage we allocate in host memory. + */ +#define AHD_SCB_MAX_ALLOC AHD_MAX_QUEUE + +/* + * Ring Buffer of incoming target commands. + * We allocate 256 to simplify the logic in the sequencer + * by using the natural wrap point of an 8bit counter. + */ +#define AHD_TMODE_CMDS 256 + +/* Reset line assertion time in us */ +#define AHD_BUSRESET_DELAY 25 + +/******************* Chip Characteristics/Operating Settings *****************/ +/* + * Chip Type + * The chip order is from least sophisticated to most sophisticated. + */ +typedef enum { + AHD_NONE = 0x0000, + AHD_CHIPID_MASK = 0x00FF, + AHD_AIC7901 = 0x0001, + AHD_AIC7902 = 0x0002, + AHD_AIC7901A = 0x0003, + AHD_PCI = 0x0100, /* Bus type PCI */ + AHD_PCIX = 0x0200, /* Bus type PCIX */ + AHD_BUS_MASK = 0x0F00 +} ahd_chip; + +/* + * Features available in each chip type. + */ +typedef enum { + AHD_FENONE = 0x00000, + AHD_WIDE = 0x00001,/* Wide Channel */ + AHD_AIC79XXB_SLOWCRC = 0x00002,/* SLOWCRC bit should be set */ + AHD_MULTI_FUNC = 0x00100,/* Multi-Function/Channel Device */ + AHD_TARGETMODE = 0x01000,/* Has tested target mode support */ + AHD_MULTIROLE = 0x02000,/* Space for two roles at a time */ + AHD_RTI = 0x04000,/* Retained Training Support */ + AHD_NEW_IOCELL_OPTS = 0x08000,/* More Signal knobs in the IOCELL */ + AHD_NEW_DFCNTRL_OPTS = 0x10000,/* SCSIENWRDIS bit */ + AHD_FAST_CDB_DELIVERY = 0x20000,/* CDB acks released to Output Sync */ + AHD_REMOVABLE = 0x00000,/* Hot-Swap supported - None so far*/ + AHD_AIC7901_FE = AHD_FENONE, + AHD_AIC7901A_FE = AHD_FENONE, + AHD_AIC7902_FE = AHD_MULTI_FUNC +} ahd_feature; + +/* + * Bugs in the silicon that we work around in software. + */ +typedef enum { + AHD_BUGNONE = 0x0000, + /* + * Rev A hardware fails to update LAST/CURR/NEXTSCB + * correctly in certain packetized selection cases. + */ + AHD_SENT_SCB_UPDATE_BUG = 0x0001, + /* The wrong SCB is accessed to check the abort pending bit. */ + AHD_ABORT_LQI_BUG = 0x0002, + /* Packetized bitbucket crosses packet boundaries. */ + AHD_PKT_BITBUCKET_BUG = 0x0004, + /* The selection timer runs twice as long as its setting. */ + AHD_LONG_SETIMO_BUG = 0x0008, + /* The Non-LQ CRC error status is delayed until phase change. */ + AHD_NLQICRC_DELAYED_BUG = 0x0010, + /* The chip must be reset for all outgoing bus resets. */ + AHD_SCSIRST_BUG = 0x0020, + /* Some PCIX fields must be saved and restored across chip reset. */ + AHD_PCIX_CHIPRST_BUG = 0x0040, + /* MMAPIO is not functional in PCI-X mode. */ + AHD_PCIX_MMAPIO_BUG = 0x0080, + /* Reads to SCBRAM fail to reset the discard timer. */ + AHD_PCIX_SCBRAM_RD_BUG = 0x0100, + /* Bug workarounds that can be disabled on non-PCIX busses. */ + AHD_PCIX_BUG_MASK = AHD_PCIX_CHIPRST_BUG + | AHD_PCIX_MMAPIO_BUG + | AHD_PCIX_SCBRAM_RD_BUG, + /* + * LQOSTOP0 status set even for forced selections with ATN + * to perform non-packetized message delivery. + */ + AHD_LQO_ATNO_BUG = 0x0200, + /* FIFO auto-flush does not always trigger. */ + AHD_AUTOFLUSH_BUG = 0x0400, + /* The CLRLQO registers are not self-clearing. */ + AHD_CLRLQO_AUTOCLR_BUG = 0x0800, + /* The PACKETIZED status bit refers to the previous connection. */ + AHD_PKTIZED_STATUS_BUG = 0x1000, + /* "Short Luns" are not placed into outgoing LQ packets correctly. */ + AHD_PKT_LUN_BUG = 0x2000, + /* + * Only the FIFO allocated to the non-packetized connection may + * be in use during a non-packetzied connection. + */ + AHD_NONPACKFIFO_BUG = 0x4000, + /* + * Writing to a DFF SCBPTR register may fail if concurent with + * a hardware write to the other DFF SCBPTR register. This is + * not currently a concern in our sequencer since all chips with + * this bug have the AHD_NONPACKFIFO_BUG and all writes of concern + * occur in non-packetized connections. + */ + AHD_MDFF_WSCBPTR_BUG = 0x8000, + /* SGHADDR updates are slow. */ + AHD_REG_SLOW_SETTLE_BUG = 0x10000, + /* + * Changing the MODE_PTR coincident with an interrupt that + * switches to a different mode will cause the interrupt to + * be in the mode written outside of interrupt context. + */ + AHD_SET_MODE_BUG = 0x20000, + /* Non-packetized busfree revision does not work. */ + AHD_BUSFREEREV_BUG = 0x40000, + /* + * Paced transfers are indicated with a non-standard PPR + * option bit in the neg table, 160MHz is indicated by + * sync factor 0x7, and the offset if off by a factor of 2. + */ + AHD_PACED_NEGTABLE_BUG = 0x80000, + /* LQOOVERRUN false positives. */ + AHD_LQOOVERRUN_BUG = 0x100000, + /* + * Controller write to INTSTAT will lose to a host + * write to CLRINT. + */ + AHD_INTCOLLISION_BUG = 0x200000, + /* + * The GEM318 violates the SCSI spec by not waiting + * the mandated bus settle delay between phase changes + * in some situations. Some aic79xx chip revs. are more + * strict in this regard and will treat REQ assertions + * that fall within the bus settle delay window as + * glitches. This flag tells the firmware to tolerate + * early REQ assertions. + */ + AHD_EARLY_REQ_BUG = 0x400000, + /* + * The LED does not stay on long enough in packetized modes. + */ + AHD_FAINT_LED_BUG = 0x800000 +} ahd_bug; + +/* + * Configuration specific settings. + * The driver determines these settings by probing the + * chip/controller's configuration. + */ +typedef enum { + AHD_FNONE = 0x00000, + AHD_BOOT_CHANNEL = 0x00001,/* We were set as the boot channel. */ + AHD_USEDEFAULTS = 0x00004,/* + * For cards without an seeprom + * or a BIOS to initialize the chip's + * SRAM, we use the default target + * settings. + */ + AHD_SEQUENCER_DEBUG = 0x00008, + AHD_RESET_BUS_A = 0x00010, + AHD_EXTENDED_TRANS_A = 0x00020, + AHD_TERM_ENB_A = 0x00040, + AHD_SPCHK_ENB_A = 0x00080, + AHD_STPWLEVEL_A = 0x00100, + AHD_INITIATORROLE = 0x00200,/* + * Allow initiator operations on + * this controller. + */ + AHD_TARGETROLE = 0x00400,/* + * Allow target operations on this + * controller. + */ + AHD_RESOURCE_SHORTAGE = 0x00800, + AHD_TQINFIFO_BLOCKED = 0x01000,/* Blocked waiting for ATIOs */ + AHD_INT50_SPEEDFLEX = 0x02000,/* + * Internal 50pin connector + * sits behind an aic3860 + */ + AHD_BIOS_ENABLED = 0x04000, + AHD_ALL_INTERRUPTS = 0x08000, + AHD_39BIT_ADDRESSING = 0x10000,/* Use 39 bit addressing scheme. */ + AHD_64BIT_ADDRESSING = 0x20000,/* Use 64 bit addressing scheme. */ + AHD_CURRENT_SENSING = 0x40000, + AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */ + AHD_HP_BOARD = 0x100000, + AHD_BUS_RESET_ACTIVE = 0x200000, + AHD_UPDATE_PEND_CMDS = 0x400000, + AHD_RUNNING_QOUTFIFO = 0x800000, + AHD_HAD_FIRST_SEL = 0x1000000 +} ahd_flag; + +/************************* Hardware SCB Definition ***************************/ + +/* + * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB + * consists of a "hardware SCB" mirroring the fields available on the card + * and additional information the kernel stores for each transaction. + * + * To minimize space utilization, a portion of the hardware scb stores + * different data during different portions of a SCSI transaction. + * As initialized by the host driver for the initiator role, this area + * contains the SCSI cdb (or a pointer to the cdb) to be executed. After + * the cdb has been presented to the target, this area serves to store + * residual transfer information and the SCSI status byte. + * For the target role, the contents of this area do not change, but + * still serve a different purpose than for the initiator role. See + * struct target_data for details. + */ + +/* + * Status information embedded in the shared poriton of + * an SCB after passing the cdb to the target. The kernel + * driver will only read this data for transactions that + * complete abnormally. + */ +struct initiator_status { + uint32_t residual_datacnt; /* Residual in the current S/G seg */ + uint32_t residual_sgptr; /* The next S/G for this transfer */ + uint8_t scsi_status; /* Standard SCSI status byte */ +}; + +struct target_status { + uint32_t residual_datacnt; /* Residual in the current S/G seg */ + uint32_t residual_sgptr; /* The next S/G for this transfer */ + uint8_t scsi_status; /* SCSI status to give to initiator */ + uint8_t target_phases; /* Bitmap of phases to execute */ + uint8_t data_phase; /* Data-In or Data-Out */ + uint8_t initiator_tag; /* Initiator's transaction tag */ +}; + +/* + * Initiator mode SCB shared data area. + * If the embedded CDB is 12 bytes or less, we embed + * the sense buffer address in the SCB. This allows + * us to retrieve sense information without interrupting + * the host in packetized mode. + */ +typedef uint32_t sense_addr_t; +#define MAX_CDB_LEN 16 +#define MAX_CDB_LEN_WITH_SENSE_ADDR (MAX_CDB_LEN - sizeof(sense_addr_t)) +union initiator_data { + struct { + uint64_t cdbptr; + uint8_t cdblen; + } cdb_from_host; + uint8_t cdb[MAX_CDB_LEN]; + struct { + uint8_t cdb[MAX_CDB_LEN_WITH_SENSE_ADDR]; + sense_addr_t sense_addr; + } cdb_plus_saddr; +}; + +/* + * Target mode version of the shared data SCB segment. + */ +struct target_data { + uint32_t spare[2]; + uint8_t scsi_status; /* SCSI status to give to initiator */ + uint8_t target_phases; /* Bitmap of phases to execute */ + uint8_t data_phase; /* Data-In or Data-Out */ + uint8_t initiator_tag; /* Initiator's transaction tag */ +}; + +struct hardware_scb { +/*0*/ union { + union initiator_data idata; + struct target_data tdata; + struct initiator_status istatus; + struct target_status tstatus; + } shared_data; +/* + * A word about residuals. + * The scb is presented to the sequencer with the dataptr and datacnt + * fields initialized to the contents of the first S/G element to + * transfer. The sgptr field is initialized to the bus address for + * the S/G element that follows the first in the in core S/G array + * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid + * S/G entry for this transfer (single S/G element transfer with the + * first elements address and length preloaded in the dataptr/datacnt + * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL. + * The SG_FULL_RESID flag ensures that the residual will be correctly + * noted even if no data transfers occur. Once the data phase is entered, + * the residual sgptr and datacnt are loaded from the sgptr and the + * datacnt fields. After each S/G element's dataptr and length are + * loaded into the hardware, the residual sgptr is advanced. After + * each S/G element is expired, its datacnt field is checked to see + * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the + * residual sg ptr and the transfer is considered complete. If the + * sequencer determines that there is a residual in the tranfer, or + * there is non-zero status, it will set the SG_STATUS_VALID flag in + * sgptr and dma the scb back into host memory. To sumarize: + * + * Sequencer: + * o A residual has occurred if SG_FULL_RESID is set in sgptr, + * or residual_sgptr does not have SG_LIST_NULL set. + * + * o We are transferring the last segment if residual_datacnt has + * the SG_LAST_SEG flag set. + * + * Host: + * o A residual can only have occurred if a completed scb has the + * SG_STATUS_VALID flag set. Inspection of the SCSI status field, + * the residual_datacnt, and the residual_sgptr field will tell + * for sure. + * + * o residual_sgptr and sgptr refer to the "next" sg entry + * and so may point beyond the last valid sg entry for the + * transfer. + */ +#define SG_PTR_MASK 0xFFFFFFF8 +/*16*/ uint16_t tag; /* Reused by Sequencer. */ +/*18*/ uint8_t control; /* See SCB_CONTROL in aic79xx.reg for details */ +/*19*/ uint8_t scsiid; /* + * Selection out Id + * Our Id (bits 0-3) Their ID (bits 4-7) + */ +/*20*/ uint8_t lun; +/*21*/ uint8_t task_attribute; +/*22*/ uint8_t cdb_len; +/*23*/ uint8_t task_management; +/*24*/ uint64_t dataptr; +/*32*/ uint32_t datacnt; /* Byte 3 is spare. */ +/*36*/ uint32_t sgptr; +/*40*/ uint32_t hscb_busaddr; +/*44*/ uint32_t next_hscb_busaddr; +/********** Long lun field only downloaded for full 8 byte lun support ********/ +/*48*/ uint8_t pkt_long_lun[8]; +/******* Fields below are not Downloaded (Sequencer may use for scratch) ******/ +/*56*/ uint8_t spare[8]; +}; + +/************************ Kernel SCB Definitions ******************************/ +/* + * Some fields of the SCB are OS dependent. Here we collect the + * definitions for elements that all OS platforms need to include + * in there SCB definition. + */ + +/* + * Definition of a scatter/gather element as transferred to the controller. + * The aic7xxx chips only support a 24bit length. We use the top byte of + * the length to store additional address bits and a flag to indicate + * that a given segment terminates the transfer. This gives us an + * addressable range of 512GB on machines with 64bit PCI or with chips + * that can support dual address cycles on 32bit PCI busses. + */ +struct ahd_dma_seg { + uint32_t addr; + uint32_t len; +#define AHD_DMA_LAST_SEG 0x80000000 +#define AHD_SG_HIGH_ADDR_MASK 0x7F000000 +#define AHD_SG_LEN_MASK 0x00FFFFFF +}; + +struct ahd_dma64_seg { + uint64_t addr; + uint32_t len; + uint32_t pad; +}; + +struct map_node { + bus_dmamap_t dmamap; + dma_addr_t physaddr; + uint8_t *vaddr; + SLIST_ENTRY(map_node) links; +}; + +/* + * The current state of this SCB. + */ +typedef enum { + SCB_FLAG_NONE = 0x00000, + SCB_TRANSMISSION_ERROR = 0x00001,/* + * We detected a parity or CRC + * error that has effected the + * payload of the command. This + * flag is checked when normal + * status is returned to catch + * the case of a target not + * responding to our attempt + * to report the error. + */ + SCB_OTHERTCL_TIMEOUT = 0x00002,/* + * Another device was active + * during the first timeout for + * this SCB so we gave ourselves + * an additional timeout period + * in case it was hogging the + * bus. + */ + SCB_DEVICE_RESET = 0x00004, + SCB_SENSE = 0x00008, + SCB_CDB32_PTR = 0x00010, + SCB_RECOVERY_SCB = 0x00020, + SCB_AUTO_NEGOTIATE = 0x00040,/* Negotiate to achieve goal. */ + SCB_NEGOTIATE = 0x00080,/* Negotiation forced for command. */ + SCB_ABORT = 0x00100, + SCB_ACTIVE = 0x00200, + SCB_TARGET_IMMEDIATE = 0x00400, + SCB_PACKETIZED = 0x00800, + SCB_EXPECT_PPR_BUSFREE = 0x01000, + SCB_PKT_SENSE = 0x02000, + SCB_EXTERNAL_RESET = 0x04000,/* Device was reset externally */ + SCB_ON_COL_LIST = 0x08000, + SCB_SILENT = 0x10000 /* + * Be quiet about transmission type + * errors. They are expected and we + * don't want to upset the user. This + * flag is typically used during DV. + */ +} scb_flag; + +struct scb { + struct hardware_scb *hscb; + union { + SLIST_ENTRY(scb) sle; + LIST_ENTRY(scb) le; + TAILQ_ENTRY(scb) tqe; + } links; + union { + SLIST_ENTRY(scb) sle; + LIST_ENTRY(scb) le; + TAILQ_ENTRY(scb) tqe; + } links2; +#define pending_links links2.le +#define collision_links links2.le + struct scb *col_scb; + ahd_io_ctx_t io_ctx; + struct ahd_softc *ahd_softc; + scb_flag flags; + struct scb_platform_data *platform_data; + struct map_node *hscb_map; + struct map_node *sg_map; + struct map_node *sense_map; + void *sg_list; + uint8_t *sense_data; + dma_addr_t sg_list_busaddr; + dma_addr_t sense_busaddr; + u_int sg_count;/* How full ahd_dma_seg is */ +#define AHD_MAX_LQ_CRC_ERRORS 5 + u_int crc_retry_count; +}; + +TAILQ_HEAD(scb_tailq, scb); +BSD_LIST_HEAD(scb_list, scb); + +struct scb_data { + /* + * TAILQ of lists of free SCBs grouped by device + * collision domains. + */ + struct scb_tailq free_scbs; + + /* + * Per-device lists of SCBs whose tag ID would collide + * with an already active tag on the device. + */ + struct scb_list free_scb_lists[AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT]; + + /* + * SCBs that will not collide with any active device. + */ + struct scb_list any_dev_free_scb_list; + + /* + * Mapping from tag to SCB. + */ + struct scb *scbindex[AHD_SCB_MAX]; + + /* + * "Bus" addresses of our data structures. + */ + bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */ + bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ + bus_dma_tag_t sense_dmat; /* dmat for our sense buffers */ + SLIST_HEAD(, map_node) hscb_maps; + SLIST_HEAD(, map_node) sg_maps; + SLIST_HEAD(, map_node) sense_maps; + int scbs_left; /* unallocated scbs in head map_node */ + int sgs_left; /* unallocated sgs in head map_node */ + int sense_left; /* unallocated sense in head map_node */ + uint16_t numscbs; + uint16_t maxhscbs; /* Number of SCBs on the card */ + uint8_t init_level; /* + * How far we've initialized + * this structure. + */ +}; + +/************************ Target Mode Definitions *****************************/ + +/* + * Connection descriptor for select-in requests in target mode. + */ +struct target_cmd { + uint8_t scsiid; /* Our ID and the initiator's ID */ + uint8_t identify; /* Identify message */ + uint8_t bytes[22]; /* + * Bytes contains any additional message + * bytes terminated by 0xFF. The remainder + * is the cdb to execute. + */ + uint8_t cmd_valid; /* + * When a command is complete, the firmware + * will set cmd_valid to all bits set. + * After the host has seen the command, + * the bits are cleared. This allows us + * to just peek at host memory to determine + * if more work is complete. cmd_valid is on + * an 8 byte boundary to simplify setting + * it on aic7880 hardware which only has + * limited direct access to the DMA FIFO. + */ + uint8_t pad[7]; +}; + +/* + * Number of events we can buffer up if we run out + * of immediate notify ccbs. + */ +#define AHD_TMODE_EVENT_BUFFER_SIZE 8 +struct ahd_tmode_event { + uint8_t initiator_id; + uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */ +#define EVENT_TYPE_BUS_RESET 0xFF + uint8_t event_arg; +}; + +/* + * Per enabled lun target mode state. + * As this state is directly influenced by the host OS'es target mode + * environment, we let the OS module define it. Forward declare the + * structure here so we can store arrays of them, etc. in OS neutral + * data structures. + */ +#ifdef AHD_TARGET_MODE +struct ahd_tmode_lstate { + struct cam_path *path; + struct ccb_hdr_slist accept_tios; + struct ccb_hdr_slist immed_notifies; + struct ahd_tmode_event event_buffer[AHD_TMODE_EVENT_BUFFER_SIZE]; + uint8_t event_r_idx; + uint8_t event_w_idx; +}; +#else +struct ahd_tmode_lstate; +#endif + +/******************** Transfer Negotiation Datastructures *********************/ +#define AHD_TRANS_CUR 0x01 /* Modify current neogtiation status */ +#define AHD_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */ +#define AHD_TRANS_GOAL 0x04 /* Modify negotiation goal */ +#define AHD_TRANS_USER 0x08 /* Modify user negotiation settings */ +#define AHD_PERIOD_10MHz 0x19 + +#define AHD_WIDTH_UNKNOWN 0xFF +#define AHD_PERIOD_UNKNOWN 0xFF +#define AHD_OFFSET_UNKNOWN 0xFF +#define AHD_PPR_OPTS_UNKNOWN 0xFF + +/* + * Transfer Negotiation Information. + */ +struct ahd_transinfo { + uint8_t protocol_version; /* SCSI Revision level */ + uint8_t transport_version; /* SPI Revision level */ + uint8_t width; /* Bus width */ + uint8_t period; /* Sync rate factor */ + uint8_t offset; /* Sync offset */ + uint8_t ppr_options; /* Parallel Protocol Request options */ +}; + +/* + * Per-initiator current, goal and user transfer negotiation information. */ +struct ahd_initiator_tinfo { + struct ahd_transinfo curr; + struct ahd_transinfo goal; + struct ahd_transinfo user; +}; + +/* + * Per enabled target ID state. + * Pointers to lun target state as well as sync/wide negotiation information + * for each initiator<->target mapping. For the initiator role we pretend + * that we are the target and the targets are the initiators since the + * negotiation is the same regardless of role. + */ +struct ahd_tmode_tstate { + struct ahd_tmode_lstate* enabled_luns[AHD_NUM_LUNS]; + struct ahd_initiator_tinfo transinfo[AHD_NUM_TARGETS]; + + /* + * Per initiator state bitmasks. + */ + uint16_t auto_negotiate;/* Auto Negotiation Required */ + uint16_t discenable; /* Disconnection allowed */ + uint16_t tagenable; /* Tagged Queuing allowed */ +}; + +/* + * Points of interest along the negotiated transfer scale. + */ +#define AHD_SYNCRATE_160 0x8 +#define AHD_SYNCRATE_PACED 0x8 +#define AHD_SYNCRATE_DT 0x9 +#define AHD_SYNCRATE_ULTRA2 0xa +#define AHD_SYNCRATE_ULTRA 0xc +#define AHD_SYNCRATE_FAST 0x19 +#define AHD_SYNCRATE_MIN_DT AHD_SYNCRATE_FAST +#define AHD_SYNCRATE_SYNC 0x32 +#define AHD_SYNCRATE_MIN 0x60 +#define AHD_SYNCRATE_ASYNC 0xFF +#define AHD_SYNCRATE_MAX AHD_SYNCRATE_160 + +/* Safe and valid period for async negotiations. */ +#define AHD_ASYNC_XFER_PERIOD 0x44 + +/* + * In RevA, the synctable uses a 120MHz rate for the period + * factor 8 and 160MHz for the period factor 7. The 120MHz + * rate never made it into the official SCSI spec, so we must + * compensate when setting the negotiation table for Rev A + * parts. + */ +#define AHD_SYNCRATE_REVA_120 0x8 +#define AHD_SYNCRATE_REVA_160 0x7 + +/***************************** Lookup Tables **********************************/ +/* + * Phase -> name and message out response + * to parity errors in each phase table. + */ +struct ahd_phase_table_entry { + uint8_t phase; + uint8_t mesg_out; /* Message response to parity errors */ + const char *phasemsg; +}; + +/************************** Serial EEPROM Format ******************************/ + +struct seeprom_config { +/* + * Per SCSI ID Configuration Flags + */ + uint16_t device_flags[16]; /* words 0-15 */ +#define CFXFER 0x003F /* synchronous transfer rate */ +#define CFXFER_ASYNC 0x3F +#define CFQAS 0x0040 /* Negotiate QAS */ +#define CFPACKETIZED 0x0080 /* Negotiate Packetized Transfers */ +#define CFSTART 0x0100 /* send start unit SCSI command */ +#define CFINCBIOS 0x0200 /* include in BIOS scan */ +#define CFDISC 0x0400 /* enable disconnection */ +#define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */ +#define CFWIDEB 0x1000 /* wide bus device */ +#define CFHOSTMANAGED 0x8000 /* Managed by a RAID controller */ + +/* + * BIOS Control Bits + */ + uint16_t bios_control; /* word 16 */ +#define CFSUPREM 0x0001 /* support all removeable drives */ +#define CFSUPREMB 0x0002 /* support removeable boot drives */ +#define CFBIOSSTATE 0x000C /* BIOS Action State */ +#define CFBS_DISABLED 0x00 +#define CFBS_ENABLED 0x04 +#define CFBS_DISABLED_SCAN 0x08 +#define CFENABLEDV 0x0010 /* Perform Domain Validation */ +#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */ +#define CFSPARITY 0x0040 /* SCSI parity */ +#define CFEXTEND 0x0080 /* extended translation enabled */ +#define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */ +#define CFMSG_LEVEL 0x0600 /* BIOS Message Level */ +#define CFMSG_VERBOSE 0x0000 +#define CFMSG_SILENT 0x0200 +#define CFMSG_DIAG 0x0400 +#define CFRESETB 0x0800 /* reset SCSI bus at boot */ +/* UNUSED 0xf000 */ + +/* + * Host Adapter Control Bits + */ + uint16_t adapter_control; /* word 17 */ +#define CFAUTOTERM 0x0001 /* Perform Auto termination */ +#define CFSTERM 0x0002 /* SCSI low byte termination */ +#define CFWSTERM 0x0004 /* SCSI high byte termination */ +#define CFSEAUTOTERM 0x0008 /* Ultra2 Perform secondary Auto Term*/ +#define CFSELOWTERM 0x0010 /* Ultra2 secondary low term */ +#define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */ +#define CFSTPWLEVEL 0x0040 /* Termination level control */ +#define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */ +#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */ +#define CFCLUSTERENB 0x8000 /* Cluster Enable */ + +/* + * Bus Release Time, Host Adapter ID + */ + uint16_t brtime_id; /* word 18 */ +#define CFSCSIID 0x000f /* host adapter SCSI ID */ +/* UNUSED 0x00f0 */ +#define CFBRTIME 0xff00 /* bus release time/PCI Latency Time */ + +/* + * Maximum targets + */ + uint16_t max_targets; /* word 19 */ +#define CFMAXTARG 0x00ff /* maximum targets */ +#define CFBOOTLUN 0x0f00 /* Lun to boot from */ +#define CFBOOTID 0xf000 /* Target to boot from */ + uint16_t res_1[10]; /* words 20-29 */ + uint16_t signature; /* BIOS Signature */ +#define CFSIGNATURE 0x400 + uint16_t checksum; /* word 31 */ +}; + +/* + * Vital Product Data used during POST and by the BIOS. + */ +struct vpd_config { + uint8_t bios_flags; +#define VPDMASTERBIOS 0x0001 +#define VPDBOOTHOST 0x0002 + uint8_t reserved_1[21]; + uint8_t resource_type; + uint8_t resource_len[2]; + uint8_t resource_data[8]; + uint8_t vpd_tag; + uint16_t vpd_len; + uint8_t vpd_keyword[2]; + uint8_t length; + uint8_t revision; + uint8_t device_flags; + uint8_t termination_menus[2]; + uint8_t fifo_threshold; + uint8_t end_tag; + uint8_t vpd_checksum; + uint16_t default_target_flags; + uint16_t default_bios_flags; + uint16_t default_ctrl_flags; + uint8_t default_irq; + uint8_t pci_lattime; + uint8_t max_target; + uint8_t boot_lun; + uint16_t signature; + uint8_t reserved_2; + uint8_t checksum; + uint8_t reserved_3[4]; +}; + +/****************************** Flexport Logic ********************************/ +#define FLXADDR_TERMCTL 0x0 +#define FLX_TERMCTL_ENSECHIGH 0x8 +#define FLX_TERMCTL_ENSECLOW 0x4 +#define FLX_TERMCTL_ENPRIHIGH 0x2 +#define FLX_TERMCTL_ENPRILOW 0x1 +#define FLXADDR_ROMSTAT_CURSENSECTL 0x1 +#define FLX_ROMSTAT_SEECFG 0xF0 +#define FLX_ROMSTAT_EECFG 0x0F +#define FLX_ROMSTAT_SEE_93C66 0x00 +#define FLX_ROMSTAT_SEE_NONE 0xF0 +#define FLX_ROMSTAT_EE_512x8 0x0 +#define FLX_ROMSTAT_EE_1MBx8 0x1 +#define FLX_ROMSTAT_EE_2MBx8 0x2 +#define FLX_ROMSTAT_EE_4MBx8 0x3 +#define FLX_ROMSTAT_EE_16MBx8 0x4 +#define CURSENSE_ENB 0x1 +#define FLXADDR_FLEXSTAT 0x2 +#define FLX_FSTAT_BUSY 0x1 +#define FLXADDR_CURRENT_STAT 0x4 +#define FLX_CSTAT_SEC_HIGH 0xC0 +#define FLX_CSTAT_SEC_LOW 0x30 +#define FLX_CSTAT_PRI_HIGH 0x0C +#define FLX_CSTAT_PRI_LOW 0x03 +#define FLX_CSTAT_MASK 0x03 +#define FLX_CSTAT_SHIFT 2 +#define FLX_CSTAT_OKAY 0x0 +#define FLX_CSTAT_OVER 0x1 +#define FLX_CSTAT_UNDER 0x2 +#define FLX_CSTAT_INVALID 0x3 + +int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count, int bstream); + +int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count); +int ahd_verify_cksum(struct seeprom_config *sc); +int ahd_acquire_seeprom(struct ahd_softc *ahd); +void ahd_release_seeprom(struct ahd_softc *ahd); + +/**************************** Message Buffer *********************************/ +typedef enum { + MSG_FLAG_NONE = 0x00, + MSG_FLAG_EXPECT_PPR_BUSFREE = 0x01, + MSG_FLAG_IU_REQ_CHANGED = 0x02, + MSG_FLAG_EXPECT_IDE_BUSFREE = 0x04, + MSG_FLAG_EXPECT_QASREJ_BUSFREE = 0x08, + MSG_FLAG_PACKETIZED = 0x10 +} ahd_msg_flags; + +typedef enum { + MSG_TYPE_NONE = 0x00, + MSG_TYPE_INITIATOR_MSGOUT = 0x01, + MSG_TYPE_INITIATOR_MSGIN = 0x02, + MSG_TYPE_TARGET_MSGOUT = 0x03, + MSG_TYPE_TARGET_MSGIN = 0x04 +} ahd_msg_type; + +typedef enum { + MSGLOOP_IN_PROG, + MSGLOOP_MSGCOMPLETE, + MSGLOOP_TERMINATED +} msg_loop_stat; + +/*********************** Software Configuration Structure *********************/ +struct ahd_suspend_channel_state { + uint8_t scsiseq; + uint8_t sxfrctl0; + uint8_t sxfrctl1; + uint8_t simode0; + uint8_t simode1; + uint8_t seltimer; + uint8_t seqctl; +}; + +struct ahd_suspend_pci_state { + uint32_t devconfig; + uint8_t command; + uint8_t csize_lattime; +}; + +struct ahd_suspend_state { + struct ahd_suspend_channel_state channel[2]; + struct ahd_suspend_pci_state pci_state; + uint8_t optionmode; + uint8_t dscommand0; + uint8_t dspcistatus; + /* hsmailbox */ + uint8_t crccontrol1; + uint8_t scbbaddr; + /* Host and sequencer SCB counts */ + uint8_t dff_thrsh; + uint8_t *scratch_ram; + uint8_t *btt; +}; + +typedef void (*ahd_bus_intr_t)(struct ahd_softc *); + +typedef enum { + AHD_MODE_DFF0, + AHD_MODE_DFF1, + AHD_MODE_CCHAN, + AHD_MODE_SCSI, + AHD_MODE_CFG, + AHD_MODE_UNKNOWN +} ahd_mode; + +#define AHD_MK_MSK(x) (0x01 << (x)) +#define AHD_MODE_DFF0_MSK AHD_MK_MSK(AHD_MODE_DFF0) +#define AHD_MODE_DFF1_MSK AHD_MK_MSK(AHD_MODE_DFF1) +#define AHD_MODE_CCHAN_MSK AHD_MK_MSK(AHD_MODE_CCHAN) +#define AHD_MODE_SCSI_MSK AHD_MK_MSK(AHD_MODE_SCSI) +#define AHD_MODE_CFG_MSK AHD_MK_MSK(AHD_MODE_CFG) +#define AHD_MODE_UNKNOWN_MSK AHD_MK_MSK(AHD_MODE_UNKNOWN) +#define AHD_MODE_ANY_MSK (~0) + +typedef uint8_t ahd_mode_state; + +struct ahd_completion +{ + uint16_t tag; + uint8_t sg_status; + uint8_t valid_tag; +}; + +struct ahd_softc { + bus_space_tag_t tags[2]; + bus_space_handle_t bshs[2]; + struct scb_data scb_data; + + struct hardware_scb *next_queued_hscb; + struct map_node *next_queued_hscb_map; + + /* + * SCBs that have been sent to the controller + */ + BSD_LIST_HEAD(, scb) pending_scbs; + + /* + * Current register window mode information. + */ + ahd_mode dst_mode; + ahd_mode src_mode; + + /* + * Saved register window mode information + * used for restore on next unpause. + */ + ahd_mode saved_dst_mode; + ahd_mode saved_src_mode; + + /* + * Platform specific data. + */ + struct ahd_platform_data *platform_data; + + /* + * Platform specific device information. + */ + ahd_dev_softc_t dev_softc; + + /* + * Bus specific device information. + */ + ahd_bus_intr_t bus_intr; + + /* + * Target mode related state kept on a per enabled lun basis. + * Targets that are not enabled will have null entries. + * As an initiator, we keep one target entry for our initiator + * ID to store our sync/wide transfer settings. + */ + struct ahd_tmode_tstate *enabled_targets[AHD_NUM_TARGETS]; + + /* + * The black hole device responsible for handling requests for + * disabled luns on enabled targets. + */ + struct ahd_tmode_lstate *black_hole; + + /* + * Device instance currently on the bus awaiting a continue TIO + * for a command that was not given the disconnect priveledge. + */ + struct ahd_tmode_lstate *pending_device; + + /* + * Timer handles for timer driven callbacks. + */ + struct timer_list stat_timer; + + /* + * Statistics. + */ +#define AHD_STAT_UPDATE_US 250000 /* 250ms */ +#define AHD_STAT_BUCKETS 4 + u_int cmdcmplt_bucket; + uint32_t cmdcmplt_counts[AHD_STAT_BUCKETS]; + uint32_t cmdcmplt_total; + + /* + * Card characteristics + */ + ahd_chip chip; + ahd_feature features; + ahd_bug bugs; + ahd_flag flags; + struct seeprom_config *seep_config; + + /* Command Queues */ + struct ahd_completion *qoutfifo; + uint16_t qoutfifonext; + uint16_t qoutfifonext_valid_tag; + uint16_t qinfifonext; + uint16_t qinfifo[AHD_SCB_MAX]; + + /* + * Our qfreeze count. The sequencer compares + * this value with its own counter to determine + * whether to allow selections to occur. + */ + uint16_t qfreeze_cnt; + + /* Values to store in the SEQCTL register for pause and unpause */ + uint8_t unpause; + uint8_t pause; + + /* Critical Section Data */ + struct cs *critical_sections; + u_int num_critical_sections; + + /* Buffer for handling packetized bitbucket. */ + uint8_t *overrun_buf; + + /* Links for chaining softcs */ + TAILQ_ENTRY(ahd_softc) links; + + /* Channel Names ('A', 'B', etc.) */ + char channel; + + /* Initiator Bus ID */ + uint8_t our_id; + + /* + * Target incoming command FIFO. + */ + struct target_cmd *targetcmds; + uint8_t tqinfifonext; + + /* + * Cached version of the hs_mailbox so we can avoid + * pausing the sequencer during mailbox updates. + */ + uint8_t hs_mailbox; + + /* + * Incoming and outgoing message handling. + */ + uint8_t send_msg_perror; + ahd_msg_flags msg_flags; + ahd_msg_type msg_type; + uint8_t msgout_buf[12];/* Message we are sending */ + uint8_t msgin_buf[12];/* Message we are receiving */ + u_int msgout_len; /* Length of message to send */ + u_int msgout_index; /* Current index in msgout */ + u_int msgin_index; /* Current index in msgin */ + + /* + * Mapping information for data structures shared + * between the sequencer and kernel. + */ + bus_dma_tag_t parent_dmat; + bus_dma_tag_t shared_data_dmat; + struct map_node shared_data_map; + + /* Information saved through suspend/resume cycles */ + struct ahd_suspend_state suspend_state; + + /* Number of enabled target mode device on this card */ + u_int enabled_luns; + + /* Initialization level of this data structure */ + u_int init_level; + + /* PCI cacheline size. */ + u_int pci_cachesize; + + /* IO Cell Parameters */ + uint8_t iocell_opts[AHD_NUM_PER_DEV_ANNEXCOLS]; + + u_int stack_size; + uint16_t *saved_stack; + + /* Per-Unit descriptive information */ + const char *description; + const char *bus_description; + char *name; + int unit; + + /* Selection Timer settings */ + int seltime; + + /* + * Interrupt coalescing settings. + */ +#define AHD_INT_COALESCING_TIMER_DEFAULT 250 /*us*/ +#define AHD_INT_COALESCING_MAXCMDS_DEFAULT 10 +#define AHD_INT_COALESCING_MAXCMDS_MAX 127 +#define AHD_INT_COALESCING_MINCMDS_DEFAULT 5 +#define AHD_INT_COALESCING_MINCMDS_MAX 127 +#define AHD_INT_COALESCING_THRESHOLD_DEFAULT 2000 +#define AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT 1000 + u_int int_coalescing_timer; + u_int int_coalescing_maxcmds; + u_int int_coalescing_mincmds; + u_int int_coalescing_threshold; + u_int int_coalescing_stop_threshold; + + uint16_t user_discenable;/* Disconnection allowed */ + uint16_t user_tagenable;/* Tagged Queuing allowed */ +}; + +/*************************** IO Cell Configuration ****************************/ +#define AHD_PRECOMP_SLEW_INDEX \ + (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0) + +#define AHD_AMPLITUDE_INDEX \ + (AHD_ANNEXCOL_AMPLITUDE - AHD_ANNEXCOL_PER_DEV0) + +#define AHD_SET_SLEWRATE(ahd, new_slew) \ +do { \ + (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; \ + (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \ + (((new_slew) << AHD_SLEWRATE_SHIFT) & AHD_SLEWRATE_MASK); \ +} while (0) + +#define AHD_SET_PRECOMP(ahd, new_pcomp) \ +do { \ + (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; \ + (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \ + (((new_pcomp) << AHD_PRECOMP_SHIFT) & AHD_PRECOMP_MASK); \ +} while (0) + +#define AHD_SET_AMPLITUDE(ahd, new_amp) \ +do { \ + (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] &= ~AHD_AMPLITUDE_MASK; \ + (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] |= \ + (((new_amp) << AHD_AMPLITUDE_SHIFT) & AHD_AMPLITUDE_MASK); \ +} while (0) + +/************************ Active Device Information ***************************/ +typedef enum { + ROLE_UNKNOWN, + ROLE_INITIATOR, + ROLE_TARGET +} role_t; + +struct ahd_devinfo { + int our_scsiid; + int target_offset; + uint16_t target_mask; + u_int target; + u_int lun; + char channel; + role_t role; /* + * Only guaranteed to be correct if not + * in the busfree state. + */ +}; + +/****************************** PCI Structures ********************************/ +#define AHD_PCI_IOADDR0 PCIR_BAR(0) /* I/O BAR*/ +#define AHD_PCI_MEMADDR PCIR_BAR(1) /* Memory BAR */ +#define AHD_PCI_IOADDR1 PCIR_BAR(3) /* Second I/O BAR */ + +typedef int (ahd_device_setup_t)(struct ahd_softc *); + +struct ahd_pci_identity { + uint64_t full_id; + uint64_t id_mask; + const char *name; + ahd_device_setup_t *setup; +}; + +/***************************** VL/EISA Declarations ***************************/ +struct aic7770_identity { + uint32_t full_id; + uint32_t id_mask; + const char *name; + ahd_device_setup_t *setup; +}; +extern struct aic7770_identity aic7770_ident_table []; +extern const int ahd_num_aic7770_devs; + +#define AHD_EISA_SLOT_OFFSET 0xc00 +#define AHD_EISA_IOSIZE 0x100 + +/*************************** Function Declarations ****************************/ +/******************************************************************************/ + +/***************************** PCI Front End *********************************/ +const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t); +int ahd_pci_config(struct ahd_softc *, + const struct ahd_pci_identity *); +int ahd_pci_test_register_access(struct ahd_softc *); +void __maybe_unused ahd_pci_suspend(struct ahd_softc *); +void __maybe_unused ahd_pci_resume(struct ahd_softc *); + +/************************** SCB and SCB queue management **********************/ +void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, + struct scb *scb); + +/****************************** Initialization ********************************/ +struct ahd_softc *ahd_alloc(void *platform_arg, char *name); +int ahd_softc_init(struct ahd_softc *); +void ahd_controller_info(struct ahd_softc *ahd, char *buf); +int ahd_init(struct ahd_softc *ahd); +int __maybe_unused ahd_suspend(struct ahd_softc *ahd); +void __maybe_unused ahd_resume(struct ahd_softc *ahd); +int ahd_default_config(struct ahd_softc *ahd); +int ahd_parse_vpddata(struct ahd_softc *ahd, + struct vpd_config *vpd); +int ahd_parse_cfgdata(struct ahd_softc *ahd, + struct seeprom_config *sc); +void ahd_intr_enable(struct ahd_softc *ahd, int enable); +void ahd_pause_and_flushwork(struct ahd_softc *ahd); +void ahd_set_unit(struct ahd_softc *, int); +void ahd_set_name(struct ahd_softc *, char *); +struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx); +void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb); +void ahd_free(struct ahd_softc *ahd); +int ahd_reset(struct ahd_softc *ahd, int reinit); +int ahd_write_flexport(struct ahd_softc *ahd, + u_int addr, u_int value); +int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, + uint8_t *value); + +/***************************** Error Recovery *********************************/ +typedef enum { + SEARCH_COMPLETE, + SEARCH_COUNT, + SEARCH_REMOVE, + SEARCH_PRINT +} ahd_search_action; +int ahd_search_qinfifo(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status, + ahd_search_action action); +int ahd_search_disc_list(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + int stop_on_first, int remove, + int save_state); +int ahd_reset_channel(struct ahd_softc *ahd, char channel, + int initiate_reset); +/*************************** Utility Functions ********************************/ +void ahd_compile_devinfo(struct ahd_devinfo *devinfo, + u_int our_id, u_int target, + u_int lun, char channel, + role_t role); +/************************** Transfer Negotiation ******************************/ +void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, + u_int *ppr_options, u_int maxsync); +/* + * Negotiation types. These are used to qualify if we should renegotiate + * even if our goal and current transport parameters are identical. + */ +typedef enum { + AHD_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */ + AHD_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */ + AHD_NEG_ALWAYS /* Renegotiat even if goal is async. */ +} ahd_neg_type; +int ahd_update_neg_request(struct ahd_softc*, + struct ahd_devinfo*, + struct ahd_tmode_tstate*, + struct ahd_initiator_tinfo*, + ahd_neg_type); +void ahd_set_width(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int width, u_int type, int paused); +void ahd_set_syncrate(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int period, u_int offset, + u_int ppr_options, + u_int type, int paused); +typedef enum { + AHD_QUEUE_NONE, + AHD_QUEUE_BASIC, + AHD_QUEUE_TAGGED +} ahd_queue_alg; + +/**************************** Target Mode *************************************/ +#ifdef AHD_TARGET_MODE +void ahd_send_lstate_events(struct ahd_softc *, + struct ahd_tmode_lstate *); +void ahd_handle_en_lun(struct ahd_softc *ahd, + struct cam_sim *sim, union ccb *ccb); +cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, + struct cam_sim *sim, union ccb *ccb, + struct ahd_tmode_tstate **tstate, + struct ahd_tmode_lstate **lstate, + int notfound_failure); +#ifndef AHD_TMODE_ENABLE +#define AHD_TMODE_ENABLE 0 +#endif +#endif +/******************************* Debug ***************************************/ +#ifdef AHD_DEBUG +extern uint32_t ahd_debug; +#define AHD_SHOW_MISC 0x00001 +#define AHD_SHOW_SENSE 0x00002 +#define AHD_SHOW_RECOVERY 0x00004 +#define AHD_DUMP_SEEPROM 0x00008 +#define AHD_SHOW_TERMCTL 0x00010 +#define AHD_SHOW_MEMORY 0x00020 +#define AHD_SHOW_MESSAGES 0x00040 +#define AHD_SHOW_MODEPTR 0x00080 +#define AHD_SHOW_SELTO 0x00100 +#define AHD_SHOW_FIFOS 0x00200 +#define AHD_SHOW_QFULL 0x00400 +#define AHD_SHOW_DV 0x00800 +#define AHD_SHOW_MASKED_ERRORS 0x01000 +#define AHD_SHOW_QUEUE 0x02000 +#define AHD_SHOW_TQIN 0x04000 +#define AHD_SHOW_SG 0x08000 +#define AHD_SHOW_INT_COALESCING 0x10000 +#define AHD_DEBUG_SEQUENCER 0x20000 +#endif +void ahd_print_devinfo(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +void ahd_dump_card_state(struct ahd_softc *ahd); +int ahd_print_register(const ahd_reg_parse_entry_t *table, + u_int num_entries, + const char *name, + u_int address, + u_int value, + u_int *cur_column, + u_int wrap_point); +#endif /* _AIC79XX_H_ */ diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg new file mode 100644 index 000000000..7e12c31cc --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx.reg @@ -0,0 +1,4281 @@ +/* + * Aic79xx register and scratch ram definitions. + * + * Copyright (c) 1994-2001, 2004 Justin T. Gibbs. + * Copyright (c) 2000-2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $" + +/* + * This file is processed by the aic7xxx_asm utility for use in assembling + * firmware for the aic79xx family of SCSI host adapters as well as to generate + * a C header file for use in the kernel portion of the Aic79xx driver. + */ + +/* Register window Modes */ +#define M_DFF0 0 +#define M_DFF1 1 +#define M_CCHAN 2 +#define M_SCSI 3 +#define M_CFG 4 +#define M_DST_SHIFT 4 + +#define MK_MODE(src, dst) ((src) | ((dst) << M_DST_SHIFT)) +#define SET_MODE(src, dst) \ + SET_SRC_MODE src; \ + SET_DST_MODE dst; \ + if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \ + mvi MK_MODE(src, dst) call set_mode_work_around; \ + } else { \ + mvi MODE_PTR, MK_MODE(src, dst); \ + } + +#define RESTORE_MODE(mode) \ + if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \ + mov mode call set_mode_work_around; \ + } else { \ + mov MODE_PTR, mode; \ + } + +#define SET_SEQINTCODE(code) \ + if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { \ + mvi code call set_seqint_work_around; \ + } else { \ + mvi SEQINTCODE, code; \ + } + +/* + * Registers marked "dont_generate_debug_code" are not (yet) referenced + * from the driver code, and this keyword inhibit generation + * of debug code for them. + * + * REG_PRETTY_PRINT config will complain if dont_generate_debug_code + * is added to the register which is referenced in the driver. + * Unreferenced register with no dont_generate_debug_code will result + * in dead code. No warning is issued. + */ + +/* + * Mode Pointer + * Controls which of the 5, 512byte, address spaces should be used + * as the source and destination of any register accesses in our + * register window. + */ +register MODE_PTR { + address 0x000 + access_mode RW + field DST_MODE 0x70 + field SRC_MODE 0x07 + mode_pointer + dont_generate_debug_code +} + +const SRC_MODE_SHIFT 0 +const DST_MODE_SHIFT 4 + +/* + * Host Interrupt Status + */ +register INTSTAT { + address 0x001 + access_mode RW + field HWERRINT 0x80 + field BRKADRINT 0x40 + field SWTMINT 0x20 + field PCIINT 0x10 + field SCSIINT 0x08 + field SEQINT 0x04 + field CMDCMPLT 0x02 + field SPLTINT 0x01 + mask INT_PEND 0xFF +} + +/* + * Sequencer Interrupt Code + */ +register SEQINTCODE { + address 0x002 + access_mode RW + field { + NO_SEQINT, /* No seqint pending. */ + BAD_PHASE, /* unknown scsi bus phase */ + SEND_REJECT, /* sending a message reject */ + PROTO_VIOLATION, /* Protocol Violation */ + NO_MATCH, /* no cmd match for reconnect */ + IGN_WIDE_RES, /* Complex IGN Wide Res Msg */ + PDATA_REINIT, /* + * Returned to data phase + * that requires data + * transfer pointers to be + * recalculated from the + * transfer residual. + */ + HOST_MSG_LOOP, /* + * The bus is ready for the + * host to perform another + * message transaction. This + * mechanism is used for things + * like sync/wide negotiation + * that require a kernel based + * message state engine. + */ + BAD_STATUS, /* Bad status from target */ + DATA_OVERRUN, /* + * Target attempted to write + * beyond the bounds of its + * command. + */ + MKMSG_FAILED, /* + * Target completed command + * without honoring our ATN + * request to issue a message. + */ + MISSED_BUSFREE, /* + * The sequencer never saw + * the bus go free after + * either a command complete + * or disconnect message. + */ + DUMP_CARD_STATE, + ILLEGAL_PHASE, + INVALID_SEQINT, + CFG4ISTAT_INTR, + STATUS_OVERRUN, + CFG4OVERRUN, + ENTERING_NONPACK, + TASKMGMT_FUNC_COMPLETE, /* + * Task management function + * request completed with + * an expected busfree. + */ + TASKMGMT_CMD_CMPLT_OKAY, /* + * A command with a non-zero + * task management function + * has completed via the normal + * command completion method + * for commands with a zero + * task management function. + * This happens when an attempt + * to abort a command loses + * the race for the command to + * complete normally. + */ + TRACEPOINT0, + TRACEPOINT1, + TRACEPOINT2, + TRACEPOINT3, + SAW_HWERR, + BAD_SCB_STATUS + } + dont_generate_debug_code +} + +/* + * Clear Host Interrupt + */ +register CLRINT { + address 0x003 + access_mode WO + count 19 + field CLRHWERRINT 0x80 /* Rev B or greater */ + field CLRBRKADRINT 0x40 + field CLRSWTMINT 0x20 + field CLRPCIINT 0x10 + field CLRSCSIINT 0x08 + field CLRSEQINT 0x04 + field CLRCMDINT 0x02 + field CLRSPLTINT 0x01 + dont_generate_debug_code +} + +/* + * Error Register + */ +register ERROR { + address 0x004 + access_mode RO + field CIOPARERR 0x80 + field CIOACCESFAIL 0x40 /* Rev B or greater */ + field MPARERR 0x20 + field DPARERR 0x10 + field SQPARERR 0x08 + field ILLOPCODE 0x04 + field DSCTMOUT 0x02 + dont_generate_debug_code +} + +/* + * Clear Error + */ +register CLRERR { + address 0x004 + access_mode WO + field CLRCIOPARERR 0x80 + field CLRCIOACCESFAIL 0x40 /* Rev B or greater */ + field CLRMPARERR 0x20 + field CLRDPARERR 0x10 + field CLRSQPARERR 0x08 + field CLRILLOPCODE 0x04 + field CLRDSCTMOUT 0x02 +} + +/* + * Host Control Register + * Overall host control of the device. + */ +register HCNTRL { + address 0x005 + access_mode RW + count 12 + field SEQ_RESET 0x80 /* Rev B or greater */ + field POWRDN 0x40 + field SWINT 0x10 + field SWTIMER_START_B 0x08 /* Rev B or greater */ + field PAUSE 0x04 + field INTEN 0x02 + field CHIPRST 0x01 + field CHIPRSTACK 0x01 + dont_generate_debug_code +} + +/* + * Host New SCB Queue Offset + */ +register HNSCB_QOFF { + address 0x006 + access_mode RW + size 2 + count 2 + dont_generate_debug_code +} + +/* + * Host Empty SCB Queue Offset + */ +register HESCB_QOFF { + address 0x008 + access_mode RW + count 2 + dont_generate_debug_code +} + +/* + * Host Mailbox + */ +register HS_MAILBOX { + address 0x00B + access_mode RW + mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ + mask ENINT_COALESCE 0x40 /* Perform interrupt coalescing */ +} + +/* + * Sequencer Interrupt Status + */ +register SEQINTSTAT { + address 0x00C + count 1 + access_mode RO + field SEQ_SWTMRTO 0x10 + field SEQ_SEQINT 0x08 + field SEQ_SCSIINT 0x04 + field SEQ_PCIINT 0x02 + field SEQ_SPLTINT 0x01 +} + +/* + * Clear SEQ Interrupt + */ +register CLRSEQINTSTAT { + address 0x00C + access_mode WO + field CLRSEQ_SWTMRTO 0x10 + field CLRSEQ_SEQINT 0x08 + field CLRSEQ_SCSIINT 0x04 + field CLRSEQ_PCIINT 0x02 + field CLRSEQ_SPLTINT 0x01 + dont_generate_debug_code +} + +/* + * Software Timer + */ +register SWTIMER { + address 0x00E + access_mode RW + size 2 + dont_generate_debug_code +} + +/* + * SEQ New SCB Queue Offset + */ +register SNSCB_QOFF { + address 0x010 + access_mode RW + size 2 + modes M_CCHAN + dont_generate_debug_code +} + +/* + * SEQ Empty SCB Queue Offset + */ +register SESCB_QOFF { + address 0x012 + count 2 + access_mode RW + modes M_CCHAN + dont_generate_debug_code +} + +/* + * SEQ Done SCB Queue Offset + */ +register SDSCB_QOFF { + address 0x014 + access_mode RW + modes M_CCHAN + size 2 + dont_generate_debug_code +} + +/* + * Queue Offset Control & Status + */ +register QOFF_CTLSTA { + address 0x016 + access_mode RW + modes M_CCHAN + field EMPTY_SCB_AVAIL 0x80 + field NEW_SCB_AVAIL 0x40 + field SDSCB_ROLLOVR 0x20 + field HS_MAILBOX_ACT 0x10 + field SCB_QSIZE 0x0F { + SCB_QSIZE_4, + SCB_QSIZE_8, + SCB_QSIZE_16, + SCB_QSIZE_32, + SCB_QSIZE_64, + SCB_QSIZE_128, + SCB_QSIZE_256, + SCB_QSIZE_512, + SCB_QSIZE_1024, + SCB_QSIZE_2048, + SCB_QSIZE_4096, + SCB_QSIZE_8192, + SCB_QSIZE_16384 + } + dont_generate_debug_code +} + +/* + * Interrupt Control + */ +register INTCTL { + address 0x018 + access_mode RW + field SWTMINTMASK 0x80 + field SWTMINTEN 0x40 + field SWTIMER_START 0x20 + field AUTOCLRCMDINT 0x10 + field PCIINTEN 0x08 + field SCSIINTEN 0x04 + field SEQINTEN 0x02 + field SPLTINTEN 0x01 +} + +/* + * Data FIFO Control + */ +register DFCNTRL { + address 0x019 + access_mode RW + modes M_DFF0, M_DFF1 + count 11 + field PRELOADEN 0x80 + field SCSIENWRDIS 0x40 /* Rev B only. */ + field SCSIEN 0x20 + field SCSIENACK 0x20 + field HDMAEN 0x08 + field HDMAENACK 0x08 + field DIRECTION 0x04 + field DIRECTIONACK 0x04 + field FIFOFLUSH 0x02 + field FIFOFLUSHACK 0x02 + field DIRECTIONEN 0x01 +} + +/* + * Device Space Command 0 + */ +register DSCOMMAND0 { + address 0x019 + count 1 + access_mode RW + modes M_CFG + field CACHETHEN 0x80 /* Cache Threshold enable */ + field DPARCKEN 0x40 /* Data Parity Check Enable */ + field MPARCKEN 0x20 /* Memory Parity Check Enable */ + field EXTREQLCK 0x10 /* External Request Lock */ + field DISABLE_TWATE 0x02 /* Rev B or greater */ + field CIOPARCKEN 0x01 /* Internal bus parity error enable */ + dont_generate_debug_code +} + +/* + * Data FIFO Status + */ +register DFSTATUS { + address 0x01A + access_mode RO + modes M_DFF0, M_DFF1 + field PRELOAD_AVAIL 0x80 + field PKT_PRELOAD_AVAIL 0x40 + field MREQPEND 0x10 + field HDONE 0x08 + field DFTHRESH 0x04 + field FIFOFULL 0x02 + field FIFOEMP 0x01 +} + +/* + * S/G Cache Pointer + */ +register SG_CACHE_PRE { + address 0x01B + access_mode WO + modes M_DFF0, M_DFF1 + field SG_ADDR_MASK 0xf8 + field ODD_SEG 0x04 + field LAST_SEG 0x02 + dont_generate_debug_code +} + +register SG_CACHE_SHADOW { + address 0x01B + access_mode RO + modes M_DFF0, M_DFF1 + field SG_ADDR_MASK 0xf8 + field ODD_SEG 0x04 + field LAST_SEG 0x02 + field LAST_SEG_DONE 0x01 +} + +/* + * Arbiter Control + */ +register ARBCTL { + address 0x01B + access_mode RW + modes M_CFG + field RESET_HARB 0x80 + field RETRY_SWEN 0x08 + field USE_TIME 0x07 +} + +/* + * Data Channel Host Address + */ +register HADDR { + address 0x070 + access_mode RW + size 8 + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * Host Overlay DMA Address + */ +register HODMAADR { + address 0x070 + access_mode RW + size 8 + modes M_SCSI +} + +/* + * PCI PLL Delay. + */ +register PLLDELAY { + address 0x070 + access_mode RW + size 1 + modes M_CFG + field SPLIT_DROP_REQ 0x80 +} + +/* + * Data Channel Host Count + */ +register HCNT { + address 0x078 + access_mode RW + size 3 + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * Host Overlay DMA Count + */ +register HODMACNT { + address 0x078 + access_mode RW + size 2 + modes M_SCSI +} + +/* + * Host Overlay DMA Enable + */ +register HODMAEN { + address 0x07A + access_mode RW + modes M_SCSI +} + +/* + * Scatter/Gather Host Address + */ +register SGHADDR { + address 0x07C + access_mode RW + size 8 + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * SCB Host Address + */ +register SCBHADDR { + address 0x07C + access_mode RW + size 8 + modes M_CCHAN + dont_generate_debug_code +} + +/* + * Scatter/Gather Host Count + */ +register SGHCNT { + address 0x084 + access_mode RW + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * SCB Host Count + */ +register SCBHCNT { + address 0x084 + access_mode RW + modes M_CCHAN + dont_generate_debug_code +} + +/* + * Data FIFO Threshold + */ +register DFF_THRSH { + address 0x088 + access_mode RW + modes M_CFG + count 1 + field WR_DFTHRSH 0x70 { + WR_DFTHRSH_MIN, + WR_DFTHRSH_25, + WR_DFTHRSH_50, + WR_DFTHRSH_63, + WR_DFTHRSH_75, + WR_DFTHRSH_85, + WR_DFTHRSH_90, + WR_DFTHRSH_MAX + } + field RD_DFTHRSH 0x07 { + RD_DFTHRSH_MIN, + RD_DFTHRSH_25, + RD_DFTHRSH_50, + RD_DFTHRSH_63, + RD_DFTHRSH_75, + RD_DFTHRSH_85, + RD_DFTHRSH_90, + RD_DFTHRSH_MAX + } + dont_generate_debug_code +} + +/* + * ROM Address + */ +register ROMADDR { + address 0x08A + access_mode RW + size 3 +} + +/* + * ROM Control + */ +register ROMCNTRL { + address 0x08D + access_mode RW + field ROMOP 0xE0 + field ROMSPD 0x18 + field REPEAT 0x02 + field RDY 0x01 +} + +/* + * ROM Data + */ +register ROMDATA { + address 0x08E + access_mode RW +} + +/* + * Data Channel Receive Message 0 + */ +register DCHRXMSG0 { + address 0x090 + access_mode RO + modes M_DFF0, M_DFF1 + field CDNUM 0xF8 + field CFNUM 0x07 +} + +/* + * CMC Receive Message 0 + */ +register CMCRXMSG0 { + address 0x090 + access_mode RO + modes M_CCHAN + field CDNUM 0xF8 + field CFNUM 0x07 +} + +/* + * Overlay Receive Message 0 + */ +register OVLYRXMSG0 { + address 0x090 + access_mode RO + modes M_SCSI + field CDNUM 0xF8 + field CFNUM 0x07 +} + +/* + * Relaxed Order Enable + */ +register ROENABLE { + address 0x090 + access_mode RW + modes M_CFG + field MSIROEN 0x20 + field OVLYROEN 0x10 + field CMCROEN 0x08 + field SGROEN 0x04 + field DCH1ROEN 0x02 + field DCH0ROEN 0x01 +} + +/* + * Data Channel Receive Message 1 + */ +register DCHRXMSG1 { + address 0x091 + access_mode RO + modes M_DFF0, M_DFF1 + field CBNUM 0xFF +} + +/* + * CMC Receive Message 1 + */ +register CMCRXMSG1 { + address 0x091 + access_mode RO + modes M_CCHAN + field CBNUM 0xFF +} + +/* + * Overlay Receive Message 1 + */ +register OVLYRXMSG1 { + address 0x091 + access_mode RO + modes M_SCSI + field CBNUM 0xFF +} + +/* + * No Snoop Enable + */ +register NSENABLE { + address 0x091 + access_mode RW + modes M_CFG + field MSINSEN 0x20 + field OVLYNSEN 0x10 + field CMCNSEN 0x08 + field SGNSEN 0x04 + field DCH1NSEN 0x02 + field DCH0NSEN 0x01 +} + +/* + * Data Channel Receive Message 2 + */ +register DCHRXMSG2 { + address 0x092 + access_mode RO + modes M_DFF0, M_DFF1 + field MINDEX 0xFF +} + +/* + * CMC Receive Message 2 + */ +register CMCRXMSG2 { + address 0x092 + access_mode RO + modes M_CCHAN + field MINDEX 0xFF +} + +/* + * Overlay Receive Message 2 + */ +register OVLYRXMSG2 { + address 0x092 + access_mode RO + modes M_SCSI + field MINDEX 0xFF +} + +/* + * Outstanding Split Transactions + */ +register OST { + address 0x092 + access_mode RW + modes M_CFG +} + +/* + * Data Channel Receive Message 3 + */ +register DCHRXMSG3 { + address 0x093 + access_mode RO + modes M_DFF0, M_DFF1 + field MCLASS 0x0F +} + +/* + * CMC Receive Message 3 + */ +register CMCRXMSG3 { + address 0x093 + access_mode RO + modes M_CCHAN + field MCLASS 0x0F +} + +/* + * Overlay Receive Message 3 + */ +register OVLYRXMSG3 { + address 0x093 + access_mode RO + modes M_SCSI + field MCLASS 0x0F +} + +/* + * PCI-X Control + */ +register PCIXCTL { + address 0x093 + access_mode RW + modes M_CFG + count 1 + field SERRPULSE 0x80 + field UNEXPSCIEN 0x20 + field SPLTSMADIS 0x10 + field SPLTSTADIS 0x08 + field SRSPDPEEN 0x04 + field TSCSERREN 0x02 + field CMPABCDIS 0x01 + dont_generate_debug_code +} + +/* + * CMC Sequencer Byte Count + */ +register CMCSEQBCNT { + address 0x094 + access_mode RO + modes M_CCHAN +} + +/* + * Overlay Sequencer Byte Count + */ +register OVLYSEQBCNT { + address 0x094 + access_mode RO + modes M_SCSI +} + +/* + * Data Channel Sequencer Byte Count + */ +register DCHSEQBCNT { + address 0x094 + access_mode RO + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Data Channel Split Status 0 + */ +register DCHSPLTSTAT0 { + address 0x096 + access_mode RW + modes M_DFF0, M_DFF1 + count 2 + field STAETERM 0x80 + field SCBCERR 0x40 + field SCADERR 0x20 + field SCDATBUCKET 0x10 + field CNTNOTCMPLT 0x08 + field RXOVRUN 0x04 + field RXSCEMSG 0x02 + field RXSPLTRSP 0x01 + dont_generate_debug_code +} + +/* + * CMC Split Status 0 + */ +register CMCSPLTSTAT0 { + address 0x096 + access_mode RW + modes M_CCHAN + field STAETERM 0x80 + field SCBCERR 0x40 + field SCADERR 0x20 + field SCDATBUCKET 0x10 + field CNTNOTCMPLT 0x08 + field RXOVRUN 0x04 + field RXSCEMSG 0x02 + field RXSPLTRSP 0x01 +} + +/* + * Overlay Split Status 0 + */ +register OVLYSPLTSTAT0 { + address 0x096 + access_mode RW + modes M_SCSI + field STAETERM 0x80 + field SCBCERR 0x40 + field SCADERR 0x20 + field SCDATBUCKET 0x10 + field CNTNOTCMPLT 0x08 + field RXOVRUN 0x04 + field RXSCEMSG 0x02 + field RXSPLTRSP 0x01 +} + +/* + * Data Channel Split Status 1 + */ +register DCHSPLTSTAT1 { + address 0x097 + access_mode RW + modes M_DFF0, M_DFF1 + count 2 + field RXDATABUCKET 0x01 + dont_generate_debug_code +} + +/* + * CMC Split Status 1 + */ +register CMCSPLTSTAT1 { + address 0x097 + access_mode RW + modes M_CCHAN + field RXDATABUCKET 0x01 +} + +/* + * Overlay Split Status 1 + */ +register OVLYSPLTSTAT1 { + address 0x097 + access_mode RW + modes M_SCSI + field RXDATABUCKET 0x01 +} + +/* + * S/G Receive Message 0 + */ +register SGRXMSG0 { + address 0x098 + access_mode RO + modes M_DFF0, M_DFF1 + field CDNUM 0xF8 + field CFNUM 0x07 +} + +/* + * S/G Receive Message 1 + */ +register SGRXMSG1 { + address 0x099 + access_mode RO + modes M_DFF0, M_DFF1 + field CBNUM 0xFF +} + +/* + * S/G Receive Message 2 + */ +register SGRXMSG2 { + address 0x09A + access_mode RO + modes M_DFF0, M_DFF1 + field MINDEX 0xFF +} + +/* + * S/G Receive Message 3 + */ +register SGRXMSG3 { + address 0x09B + access_mode RO + modes M_DFF0, M_DFF1 + field MCLASS 0x0F +} + +/* + * Slave Split Out Address 0 + */ +register SLVSPLTOUTADR0 { + address 0x098 + access_mode RO + modes M_SCSI + field LOWER_ADDR 0x7F +} + +/* + * Slave Split Out Address 1 + */ +register SLVSPLTOUTADR1 { + address 0x099 + access_mode RO + modes M_SCSI + field REQ_DNUM 0xF8 + field REQ_FNUM 0x07 +} + +/* + * Slave Split Out Address 2 + */ +register SLVSPLTOUTADR2 { + address 0x09A + access_mode RO + modes M_SCSI + field REQ_BNUM 0xFF +} + +/* + * Slave Split Out Address 3 + */ +register SLVSPLTOUTADR3 { + address 0x09B + access_mode RO + modes M_SCSI + field RLXORD 020 + field TAG_NUM 0x1F +} + +/* + * SG Sequencer Byte Count + */ +register SGSEQBCNT { + address 0x09C + access_mode RO + modes M_DFF0, M_DFF1 +} + +/* + * Slave Split Out Attribute 0 + */ +register SLVSPLTOUTATTR0 { + address 0x09C + access_mode RO + modes M_SCSI + field LOWER_BCNT 0xFF +} + +/* + * Slave Split Out Attribute 1 + */ +register SLVSPLTOUTATTR1 { + address 0x09D + access_mode RO + modes M_SCSI + field CMPLT_DNUM 0xF8 + field CMPLT_FNUM 0x07 +} + +/* + * Slave Split Out Attribute 2 + */ +register SLVSPLTOUTATTR2 { + address 0x09E + access_mode RO + size 2 + modes M_SCSI + field CMPLT_BNUM 0xFF +} +/* + * S/G Split Status 0 + */ +register SGSPLTSTAT0 { + address 0x09E + access_mode RW + modes M_DFF0, M_DFF1 + count 2 + field STAETERM 0x80 + field SCBCERR 0x40 + field SCADERR 0x20 + field SCDATBUCKET 0x10 + field CNTNOTCMPLT 0x08 + field RXOVRUN 0x04 + field RXSCEMSG 0x02 + field RXSPLTRSP 0x01 + dont_generate_debug_code +} + +/* + * S/G Split Status 1 + */ +register SGSPLTSTAT1 { + address 0x09F + access_mode RW + modes M_DFF0, M_DFF1 + count 2 + field RXDATABUCKET 0x01 + dont_generate_debug_code +} + +/* + * Special Function + */ +register SFUNCT { + address 0x09f + access_mode RW + modes M_CFG + field TEST_GROUP 0xF0 + field TEST_NUM 0x0F + dont_generate_debug_code +} + +/* + * Data FIFO 0 PCI Status + */ +register DF0PCISTAT { + address 0x0A0 + access_mode RW + modes M_CFG + count 1 + field DPE 0x80 + field SSE 0x40 + field RMA 0x20 + field RTA 0x10 + field SCAAPERR 0x08 + field RDPERR 0x04 + field TWATERR 0x02 + field DPR 0x01 + dont_generate_debug_code +} + +/* + * Data FIFO 1 PCI Status + */ +register DF1PCISTAT { + address 0x0A1 + access_mode RW + modes M_CFG + field DPE 0x80 + field SSE 0x40 + field RMA 0x20 + field RTA 0x10 + field SCAAPERR 0x08 + field RDPERR 0x04 + field TWATERR 0x02 + field DPR 0x01 +} + +/* + * S/G PCI Status + */ +register SGPCISTAT { + address 0x0A2 + access_mode RW + modes M_CFG + field DPE 0x80 + field SSE 0x40 + field RMA 0x20 + field RTA 0x10 + field SCAAPERR 0x08 + field RDPERR 0x04 + field DPR 0x01 +} + +/* + * CMC PCI Status + */ +register CMCPCISTAT { + address 0x0A3 + access_mode RW + modes M_CFG + field DPE 0x80 + field SSE 0x40 + field RMA 0x20 + field RTA 0x10 + field SCAAPERR 0x08 + field RDPERR 0x04 + field TWATERR 0x02 + field DPR 0x01 +} + +/* + * Overlay PCI Status + */ +register OVLYPCISTAT { + address 0x0A4 + access_mode RW + modes M_CFG + field DPE 0x80 + field SSE 0x40 + field RMA 0x20 + field RTA 0x10 + field SCAAPERR 0x08 + field RDPERR 0x04 + field DPR 0x01 +} + +/* + * PCI Status for MSI Master DMA Transfer + */ +register MSIPCISTAT { + address 0x0A6 + access_mode RW + modes M_CFG + field SSE 0x40 + field RMA 0x20 + field RTA 0x10 + field CLRPENDMSI 0x08 + field TWATERR 0x02 + field DPR 0x01 +} + +/* + * PCI Status for Target + */ +register TARGPCISTAT { + address 0x0A7 + access_mode RW + modes M_CFG + count 5 + field DPE 0x80 + field SSE 0x40 + field STA 0x08 + field TWATERR 0x02 + dont_generate_debug_code +} + +/* + * LQ Packet In + * The last LQ Packet received + */ +register LQIN { + address 0x020 + access_mode RW + size 20 + count 2 + modes M_DFF0, M_DFF1, M_SCSI + dont_generate_debug_code +} + +/* + * SCB Type Pointer + * SCB offset for Target Mode SCB type information + */ +register TYPEPTR { + address 0x020 + access_mode RW + modes M_CFG +} + +/* + * Queue Tag Pointer + * SCB offset to the Two Byte tag identifier used for target mode. + */ +register TAGPTR { + address 0x021 + access_mode RW + modes M_CFG +} + +/* + * Logical Unit Number Pointer + * SCB offset to the LSB (little endian) of the lun field. + */ +register LUNPTR { + address 0x022 + access_mode RW + modes M_CFG + count 2 + dont_generate_debug_code +} + +/* + * Data Length Pointer + * SCB offset for the 4 byte data length field in target mode. + */ +register DATALENPTR { + address 0x023 + access_mode RW + modes M_CFG +} + +/* + * Status Length Pointer + * SCB offset to the two byte status field in target SCBs. + */ +register STATLENPTR { + address 0x024 + access_mode RW + modes M_CFG +} + +/* + * Command Length Pointer + * Scb offset for the CDB length field in initiator SCBs. + */ +register CMDLENPTR { + address 0x025 + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Task Attribute Pointer + * Scb offset for the byte field specifying the attribute byte + * to be used in command packets. + */ +register ATTRPTR { + address 0x026 + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Task Management Flags Pointer + * Scb offset for the byte field specifying the attribute flags + * byte to be used in command packets. + */ +register FLAGPTR { + address 0x027 + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Command Pointer + * Scb offset for the first byte in the CDB for initiator SCBs. + */ +register CMDPTR { + address 0x028 + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Queue Next Pointer + * Scb offset for the 2 byte "next scb link". + */ +register QNEXTPTR { + address 0x029 + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * SCSI ID Pointer + * Scb offset to the value to place in the SCSIID register + * during target mode connections. + */ +register IDPTR { + address 0x02A + access_mode RW + modes M_CFG +} + +/* + * Command Aborted Byte Pointer + * Offset to the SCB flags field that includes the + * "SCB aborted" status bit. + */ +register ABRTBYTEPTR { + address 0x02B + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Command Aborted Bit Pointer + * Bit offset in the SCB flags field for "SCB aborted" status. + */ +register ABRTBITPTR { + address 0x02C + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Rev B or greater. + */ +register MAXCMDBYTES { + address 0x02D + access_mode RW + modes M_CFG +} + +/* + * Rev B or greater. + */ +register MAXCMD2RCV { + address 0x02E + access_mode RW + modes M_CFG +} + +/* + * Rev B or greater. + */ +register SHORTTHRESH { + address 0x02F + access_mode RW + modes M_CFG +} + +/* + * Logical Unit Number Length + * The length, in bytes, of the SCB lun field. + */ +register LUNLEN { + address 0x030 + access_mode RW + modes M_CFG + count 2 + mask ILUNLEN 0x0F + mask TLUNLEN 0xF0 + dont_generate_debug_code +} +const LUNLEN_SINGLE_LEVEL_LUN 0xF + +/* + * CDB Limit + * The size, in bytes, of the embedded CDB field in initator SCBs. + */ +register CDBLIMIT { + address 0x031 + access_mode RW + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Maximum Commands + * The maximum number of commands to issue during a + * single packetized connection. + */ +register MAXCMD { + address 0x032 + access_mode RW + modes M_CFG + count 9 + dont_generate_debug_code +} + +/* + * Maximum Command Counter + * The number of commands already sent during this connection + */ +register MAXCMDCNT { + address 0x033 + access_mode RW + modes M_CFG + dont_generate_debug_code +} + +/* + * LQ Packet Reserved Bytes + * The bytes to be sent in the currently reserved fileds + * of all LQ packets. + */ +register LQRSVD01 { + address 0x034 + access_mode RW + modes M_SCSI +} +register LQRSVD16 { + address 0x035 + access_mode RW + modes M_SCSI +} +register LQRSVD17 { + address 0x036 + access_mode RW + modes M_SCSI +} + +/* + * Command Reserved 0 + * The byte to be sent for the reserved byte 0 of + * outgoing command packets. + */ +register CMDRSVD0 { + address 0x037 + access_mode RW + modes M_CFG +} + +/* + * LQ Manager Control 0 + */ +register LQCTL0 { + address 0x038 + access_mode RW + modes M_CFG + field LQITARGCLT 0xC0 + field LQIINITGCLT 0x30 + field LQ0TARGCLT 0x0C + field LQ0INITGCLT 0x03 +} + +/* + * LQ Manager Control 1 + */ +register LQCTL1 { + address 0x038 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + count 2 + field PCI2PCI 0x04 + field SINGLECMD 0x02 + field ABORTPENDING 0x01 + dont_generate_debug_code +} + +/* + * LQ Manager Control 2 + */ +register LQCTL2 { + address 0x039 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + count 5 + field LQIRETRY 0x80 + field LQICONTINUE 0x40 + field LQITOIDLE 0x20 + field LQIPAUSE 0x10 + field LQORETRY 0x08 + field LQOCONTINUE 0x04 + field LQOTOIDLE 0x02 + field LQOPAUSE 0x01 + dont_generate_debug_code +} + +/* + * SCSI RAM BIST0 + */ +register SCSBIST0 { + address 0x039 + access_mode RW + modes M_CFG + field GSBISTERR 0x40 + field GSBISTDONE 0x20 + field GSBISTRUN 0x10 + field OSBISTERR 0x04 + field OSBISTDONE 0x02 + field OSBISTRUN 0x01 +} + +/* + * SCSI Sequence Control0 + */ +register SCSISEQ0 { + address 0x03A + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + field TEMODEO 0x80 + field ENSELO 0x40 + field ENARBO 0x20 + field FORCEBUSFREE 0x10 + field SCSIRSTO 0x01 +} + +/* + * SCSI RAM BIST 1 + */ +register SCSBIST1 { + address 0x03A + access_mode RW + modes M_CFG + field NTBISTERR 0x04 + field NTBISTDONE 0x02 + field NTBISTRUN 0x01 +} + +/* + * SCSI Sequence Control 1 + */ +register SCSISEQ1 { + address 0x03B + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + count 8 + field MANUALCTL 0x40 + field ENSELI 0x20 + field ENRSELI 0x10 + field MANUALP 0x0C + field ENAUTOATNP 0x02 + field ALTSTIM 0x01 +} + +/* + * SCSI Transfer Control 0 + */ +register SXFRCTL0 { + address 0x03C + access_mode RW + modes M_SCSI + field DFON 0x80 + field DFPEXP 0x40 + field BIOSCANCELEN 0x10 + field SPIOEN 0x08 + dont_generate_debug_code +} + +/* + * SCSI Transfer Control 1 + */ +register SXFRCTL1 { + address 0x03D + access_mode RW + modes M_SCSI + field BITBUCKET 0x80 + field ENSACHK 0x40 + field ENSPCHK 0x20 + field STIMESEL 0x18 + field ENSTIMER 0x04 + field ACTNEGEN 0x02 + field STPWEN 0x01 + dont_generate_debug_code +} + +/* + * SCSI Transfer Control 2 + */ +register SXFRCTL2 { + address 0x03E + access_mode RW + modes M_SCSI + field AUTORSTDIS 0x10 + field CMDDMAEN 0x08 + field ASU 0x07 +} + +/* + * SCSI Bus Initiator IDs + * Bitmask of observed initiators on the bus. + */ +register BUSINITID { + address 0x03C + access_mode RW + modes M_CFG + size 2 +} + +/* + * Data Length Counters + * Packet byte counter. + */ +register DLCOUNT { + address 0x03C + access_mode RW + modes M_DFF0, M_DFF1 + size 3 +} + +/* + * Data FIFO Status + */ +register DFFSTAT { + address 0x03F + access_mode RW + modes M_SCSI + field FIFO1FREE 0x20 + field FIFO0FREE 0x10 + /* + * On the B, this enum only works + * in the read direction. For writes, + * you must use the B version of the + * CURRFIFO_0 definition which is defined + * as a constant outside of this register + * definition to avoid confusing the + * register pretty printing code. + */ + enum CURRFIFO 0x03 { + CURRFIFO_0, + CURRFIFO_1, + CURRFIFO_NONE 0x3 + } +} + +const B_CURRFIFO_0 0x2 + +/* + * SCSI Bus Target IDs + * Bitmask of observed targets on the bus. + */ +register BUSTARGID { + address 0x03E + access_mode RW + modes M_CFG + size 2 +} + +/* + * SCSI Control Signal Out + */ +register SCSISIGO { + address 0x040 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + field CDO 0x80 + field IOO 0x40 + field MSGO 0x20 + field ATNO 0x10 + field SELO 0x08 + field BSYO 0x04 + field REQO 0x02 + field ACKO 0x01 +/* + * Possible phases to write into SCSISIG0 + */ + enum PHASE_MASK CDO|IOO|MSGO { + P_DATAOUT 0x0, + P_DATAIN IOO, + P_DATAOUT_DT P_DATAOUT|MSGO, + P_DATAIN_DT P_DATAIN|MSGO, + P_COMMAND CDO, + P_MESGOUT CDO|MSGO, + P_STATUS CDO|IOO, + P_MESGIN CDO|IOO|MSGO + } + dont_generate_debug_code +} + +/* + * SCSI Control Signal In + */ +register SCSISIGI { + address 0x041 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field CDI 0x80 + field IOI 0x40 + field MSGI 0x20 + field ATNI 0x10 + field SELI 0x08 + field BSYI 0x04 + field REQI 0x02 + field ACKI 0x01 +/* + * Possible phases in SCSISIGI + */ + enum PHASE_MASK CDO|IOO|MSGO { + P_DATAOUT 0x0, + P_DATAIN IOO, + P_DATAOUT_DT P_DATAOUT|MSGO, + P_DATAIN_DT P_DATAIN|MSGO, + P_COMMAND CDO, + P_MESGOUT CDO|MSGO, + P_STATUS CDO|IOO, + P_MESGIN CDO|IOO|MSGO + } +} + +/* + * Multiple Target IDs + * Bitmask of ids to respond as a target. + */ +register MULTARGID { + address 0x040 + access_mode RW + modes M_CFG + size 2 + count 2 + dont_generate_debug_code +} + +/* + * SCSI Phase + */ +register SCSIPHASE { + address 0x042 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field STATUS_PHASE 0x20 + field COMMAND_PHASE 0x10 + field MSG_IN_PHASE 0x08 + field MSG_OUT_PHASE 0x04 + field DATA_PHASE_MASK 0x03 { + DATA_OUT_PHASE 0x01, + DATA_IN_PHASE 0x02 + } +} + +/* + * SCSI Data 0 Image + */ +register SCSIDAT0_IMG { + address 0x043 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI +} + +/* + * SCSI Latched Data + */ +register SCSIDAT { + address 0x044 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + size 2 + dont_generate_debug_code +} + +/* + * SCSI Data Bus + */ +register SCSIBUS { + address 0x046 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + size 2 +} + +/* + * Target ID In + */ +register TARGIDIN { + address 0x048 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + count 2 + field CLKOUT 0x80 + field TARGID 0x0F + dont_generate_debug_code +} + +/* + * Selection/Reselection ID + * Upper four bits are the device id. The ONEBIT is set when the re/selecting + * device did not set its own ID. + */ +register SELID { + address 0x049 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + field SELID_MASK 0xf0 + field ONEBIT 0x08 +} + +/* + * SCSI Block Control + * Controls Bus type and channel selection. SELWIDE allows for the + * coexistence of 8bit and 16bit devices on a wide bus. + */ +register SBLKCTL { + address 0x04A + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + field DIAGLEDEN 0x80 + field DIAGLEDON 0x40 + field ENAB40 0x08 /* LVD transceiver active */ + field ENAB20 0x04 /* SE/HVD transceiver active */ + field SELWIDE 0x02 + dont_generate_debug_code +} + +/* + * Option Mode + */ +register OPTIONMODE { + address 0x04A + access_mode RW + modes M_CFG + count 4 + field BIOSCANCTL 0x80 + field AUTOACKEN 0x40 + field BIASCANCTL 0x20 + field BUSFREEREV 0x10 + field ENDGFORMCHK 0x04 + field AUTO_MSGOUT_DE 0x02 + mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE + dont_generate_debug_code +} + +/* + * SCSI Status 0 + */ +register SSTAT0 { + address 0x04B + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field TARGET 0x80 /* Board acting as target */ + field SELDO 0x40 /* Selection Done */ + field SELDI 0x20 /* Board has been selected */ + field SELINGO 0x10 /* Selection In Progress */ + field IOERR 0x08 /* LVD Tranceiver mode changed */ + field OVERRUN 0x04 /* SCSI Offset overrun detected */ + field SPIORDY 0x02 /* SCSI PIO Ready */ + field ARBDO 0x01 /* Arbitration Done Out */ +} + +/* + * Clear SCSI Interrupt 0 + * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. + */ +register CLRSINT0 { + address 0x04B + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + field CLRSELDO 0x40 + field CLRSELDI 0x20 + field CLRSELINGO 0x10 + field CLRIOERR 0x08 + field CLROVERRUN 0x04 + field CLRSPIORDY 0x02 + field CLRARBDO 0x01 + dont_generate_debug_code +} + +/* + * SCSI Interrupt Mode 0 + * Setting any bit will enable the corresponding function + * in SIMODE0 to interrupt via the IRQ pin. + */ +register SIMODE0 { + address 0x04B + access_mode RW + modes M_CFG + count 8 + field ENSELDO 0x40 + field ENSELDI 0x20 + field ENSELINGO 0x10 + field ENIOERR 0x08 + field ENOVERRUN 0x04 + field ENSPIORDY 0x02 + field ENARBDO 0x01 +} + +/* + * SCSI Status 1 + */ +register SSTAT1 { + address 0x04C + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field SELTO 0x80 + field ATNTARG 0x40 + field SCSIRSTI 0x20 + field PHASEMIS 0x10 + field BUSFREE 0x08 + field SCSIPERR 0x04 + field STRB2FAST 0x02 + field REQINIT 0x01 +} + +/* + * Clear SCSI Interrupt 1 + * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. + */ +register CLRSINT1 { + address 0x04C + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + field CLRSELTIMEO 0x80 + field CLRATNO 0x40 + field CLRSCSIRSTI 0x20 + field CLRBUSFREE 0x08 + field CLRSCSIPERR 0x04 + field CLRSTRB2FAST 0x02 + field CLRREQINIT 0x01 + dont_generate_debug_code +} + +/* + * SCSI Status 2 + */ +register SSTAT2 { + address 0x04d + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field BUSFREETIME 0xc0 { + BUSFREE_LQO 0x40, + BUSFREE_DFF0 0x80, + BUSFREE_DFF1 0xC0 + } + field NONPACKREQ 0x20 + field EXP_ACTIVE 0x10 /* SCSI Expander Active */ + field BSYX 0x08 /* Busy Expander */ + field WIDE_RES 0x04 /* Modes 0 and 1 only */ + field SDONE 0x02 /* Modes 0 and 1 only */ + field DMADONE 0x01 /* Modes 0 and 1 only */ +} + +/* + * Clear SCSI Interrupt 2 + */ +register CLRSINT2 { + address 0x04D + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + field CLRNONPACKREQ 0x20 + field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */ + field CLRSDONE 0x02 /* Modes 0 and 1 only */ + field CLRDMADONE 0x01 /* Modes 0 and 1 only */ + dont_generate_debug_code +} + +/* + * SCSI Interrupt Mode 2 + */ +register SIMODE2 { + address 0x04D + access_mode RW + modes M_CFG + field ENWIDE_RES 0x04 + field ENSDONE 0x02 + field ENDMADONE 0x01 +} + +/* + * Physical Error Diagnosis + */ +register PERRDIAG { + address 0x04E + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + count 3 + field HIZERO 0x80 + field HIPERR 0x40 + field PREVPHASE 0x20 + field PARITYERR 0x10 + field AIPERR 0x08 + field CRCERR 0x04 + field DGFORMERR 0x02 + field DTERR 0x01 +} + +/* + * LQI Manager Current State + */ +register LQISTATE { + address 0x04E + access_mode RO + modes M_CFG + count 6 + dont_generate_debug_code +} + +/* + * SCSI Offset Count + */ +register SOFFCNT { + address 0x04F + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + count 1 +} + +/* + * LQO Manager Current State + */ +register LQOSTATE { + address 0x04F + access_mode RO + modes M_CFG + count 2 + dont_generate_debug_code +} + +/* + * LQI Manager Status + */ +register LQISTAT0 { + address 0x050 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + count 2 + field LQIATNQAS 0x20 + field LQICRCT1 0x10 + field LQICRCT2 0x08 + field LQIBADLQT 0x04 + field LQIATNLQ 0x02 + field LQIATNCMD 0x01 +} + +/* + * Clear LQI Interrupts 0 + */ +register CLRLQIINT0 { + address 0x050 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + count 1 + field CLRLQIATNQAS 0x20 + field CLRLQICRCT1 0x10 + field CLRLQICRCT2 0x08 + field CLRLQIBADLQT 0x04 + field CLRLQIATNLQ 0x02 + field CLRLQIATNCMD 0x01 + dont_generate_debug_code +} + +/* + * LQI Manager Interrupt Mode 0 + */ +register LQIMODE0 { + address 0x050 + access_mode RW + modes M_CFG + count 3 + field ENLQIATNQASK 0x20 + field ENLQICRCT1 0x10 + field ENLQICRCT2 0x08 + field ENLQIBADLQT 0x04 + field ENLQIATNLQ 0x02 + field ENLQIATNCMD 0x01 + dont_generate_debug_code +} + +/* + * LQI Manager Status 1 + */ +register LQISTAT1 { + address 0x051 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + count 3 + field LQIPHASE_LQ 0x80 + field LQIPHASE_NLQ 0x40 + field LQIABORT 0x20 + field LQICRCI_LQ 0x10 + field LQICRCI_NLQ 0x08 + field LQIBADLQI 0x04 + field LQIOVERI_LQ 0x02 + field LQIOVERI_NLQ 0x01 +} + +/* + * Clear LQI Manager Interrupts1 + */ +register CLRLQIINT1 { + address 0x051 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + count 4 + field CLRLQIPHASE_LQ 0x80 + field CLRLQIPHASE_NLQ 0x40 + field CLRLIQABORT 0x20 + field CLRLQICRCI_LQ 0x10 + field CLRLQICRCI_NLQ 0x08 + field CLRLQIBADLQI 0x04 + field CLRLQIOVERI_LQ 0x02 + field CLRLQIOVERI_NLQ 0x01 + dont_generate_debug_code +} + +/* + * LQI Manager Interrupt Mode 1 + */ +register LQIMODE1 { + address 0x051 + access_mode RW + modes M_CFG + count 4 + field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */ + field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */ + field ENLIQABORT 0x20 + field ENLQICRCI_LQ 0x10 /* LQICRCI1 */ + field ENLQICRCI_NLQ 0x08 /* LQICRCI2 */ + field ENLQIBADLQI 0x04 + field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */ + field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */ + dont_generate_debug_code +} + +/* + * LQI Manager Status 2 + */ +register LQISTAT2 { + address 0x052 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field PACKETIZED 0x80 + field LQIPHASE_OUTPKT 0x40 + field LQIWORKONLQ 0x20 + field LQIWAITFIFO 0x10 + field LQISTOPPKT 0x08 + field LQISTOPLQ 0x04 + field LQISTOPCMD 0x02 + field LQIGSAVAIL 0x01 +} + +/* + * SCSI Status 3 + */ +register SSTAT3 { + address 0x053 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + count 3 + field NTRAMPERR 0x02 + field OSRAMPERR 0x01 +} + +/* + * Clear SCSI Status 3 + */ +register CLRSINT3 { + address 0x053 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + count 3 + field CLRNTRAMPERR 0x02 + field CLROSRAMPERR 0x01 + dont_generate_debug_code +} + +/* + * SCSI Interrupt Mode 3 + */ +register SIMODE3 { + address 0x053 + access_mode RW + modes M_CFG + count 4 + field ENNTRAMPERR 0x02 + field ENOSRAMPERR 0x01 + dont_generate_debug_code +} + +/* + * LQO Manager Status 0 + */ +register LQOSTAT0 { + address 0x054 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + count 2 + field LQOTARGSCBPERR 0x10 + field LQOSTOPT2 0x08 + field LQOATNLQ 0x04 + field LQOATNPKT 0x02 + field LQOTCRC 0x01 +} + +/* + * Clear LQO Manager interrupt 0 + */ +register CLRLQOINT0 { + address 0x054 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + count 3 + field CLRLQOTARGSCBPERR 0x10 + field CLRLQOSTOPT2 0x08 + field CLRLQOATNLQ 0x04 + field CLRLQOATNPKT 0x02 + field CLRLQOTCRC 0x01 + dont_generate_debug_code +} + +/* + * LQO Manager Interrupt Mode 0 + */ +register LQOMODE0 { + address 0x054 + access_mode RW + modes M_CFG + count 4 + field ENLQOTARGSCBPERR 0x10 + field ENLQOSTOPT2 0x08 + field ENLQOATNLQ 0x04 + field ENLQOATNPKT 0x02 + field ENLQOTCRC 0x01 + dont_generate_debug_code +} + +/* + * LQO Manager Status 1 + */ +register LQOSTAT1 { + address 0x055 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field LQOINITSCBPERR 0x10 + field LQOSTOPI2 0x08 + field LQOBADQAS 0x04 + field LQOBUSFREE 0x02 + field LQOPHACHGINPKT 0x01 +} + +/* + * Clear LOQ Interrupt 1 + */ +register CLRLQOINT1 { + address 0x055 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + count 7 + field CLRLQOINITSCBPERR 0x10 + field CLRLQOSTOPI2 0x08 + field CLRLQOBADQAS 0x04 + field CLRLQOBUSFREE 0x02 + field CLRLQOPHACHGINPKT 0x01 + dont_generate_debug_code +} + +/* + * LQO Manager Interrupt Mode 1 + */ +register LQOMODE1 { + address 0x055 + access_mode RW + modes M_CFG + count 4 + field ENLQOINITSCBPERR 0x10 + field ENLQOSTOPI2 0x08 + field ENLQOBADQAS 0x04 + field ENLQOBUSFREE 0x02 + field ENLQOPHACHGINPKT 0x01 + dont_generate_debug_code +} + +/* + * LQO Manager Status 2 + */ +register LQOSTAT2 { + address 0x056 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + field LQOPKT 0xE0 + field LQOWAITFIFO 0x10 + field LQOPHACHGOUTPKT 0x02 /* outside of packet boundaries. */ + field LQOSTOP0 0x01 /* Stopped after sending all packets */ +} + +/* + * Output Synchronizer Space Count + */ +register OS_SPACE_CNT { + address 0x056 + access_mode RO + modes M_CFG + count 2 + dont_generate_debug_code +} + +/* + * SCSI Interrupt Mode 1 + * Setting any bit will enable the corresponding function + * in SIMODE1 to interrupt via the IRQ pin. + */ +register SIMODE1 { + address 0x057 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + field ENSELTIMO 0x80 + field ENATNTARG 0x40 + field ENSCSIRST 0x20 + field ENPHASEMIS 0x10 + field ENBUSFREE 0x08 + field ENSCSIPERR 0x04 + field ENSTRB2FAST 0x02 + field ENREQINIT 0x01 +} + +/* + * Good Status FIFO + */ +register GSFIFO { + address 0x058 + access_mode RO + size 2 + modes M_DFF0, M_DFF1, M_SCSI + dont_generate_debug_code +} + +/* + * Data FIFO SCSI Transfer Control + */ +register DFFSXFRCTL { + address 0x05A + access_mode RW + modes M_DFF0, M_DFF1 + field DFFBITBUCKET 0x08 + field CLRSHCNT 0x04 + field CLRCHN 0x02 + field RSTCHN 0x01 +} + +/* + * Next SCSI Control Block + */ +register NEXTSCB { + address 0x05A + access_mode RW + size 2 + modes M_SCSI + dont_generate_debug_code +} + +/* + * LQO SCSI Control + * (Rev B only.) + */ +register LQOSCSCTL { + address 0x05A + access_mode RW + size 1 + modes M_CFG + count 1 + field LQOH2A_VERSION 0x80 + field LQOBUSETDLY 0x40 + field LQONOHOLDLACK 0x02 + field LQONOCHKOVER 0x01 + dont_generate_debug_code +} + +/* + * SEQ Interrupts + */ +register SEQINTSRC { + address 0x05B + access_mode RO + modes M_DFF0, M_DFF1 + field CTXTDONE 0x40 + field SAVEPTRS 0x20 + field CFG4DATA 0x10 + field CFG4ISTAT 0x08 + field CFG4TSTAT 0x04 + field CFG4ICMD 0x02 + field CFG4TCMD 0x01 +} + +/* + * Clear Arp Interrupts + */ +register CLRSEQINTSRC { + address 0x05B + access_mode WO + modes M_DFF0, M_DFF1 + field CLRCTXTDONE 0x40 + field CLRSAVEPTRS 0x20 + field CLRCFG4DATA 0x10 + field CLRCFG4ISTAT 0x08 + field CLRCFG4TSTAT 0x04 + field CLRCFG4ICMD 0x02 + field CLRCFG4TCMD 0x01 + dont_generate_debug_code +} + +/* + * SEQ Interrupt Enabled (Shared) + */ +register SEQIMODE { + address 0x05C + access_mode RW + modes M_DFF0, M_DFF1 + field ENCTXTDONE 0x40 + field ENSAVEPTRS 0x20 + field ENCFG4DATA 0x10 + field ENCFG4ISTAT 0x08 + field ENCFG4TSTAT 0x04 + field ENCFG4ICMD 0x02 + field ENCFG4TCMD 0x01 +} + +/* + * Current SCSI Control Block + */ +register CURRSCB { + address 0x05C + access_mode RW + size 2 + modes M_SCSI + dont_generate_debug_code +} + +/* + * Data FIFO Status + */ +register MDFFSTAT { + address 0x05D + access_mode RO + modes M_DFF0, M_DFF1 + field SHCNTNEGATIVE 0x40 /* Rev B or higher */ + field SHCNTMINUS1 0x20 /* Rev B or higher */ + field LASTSDONE 0x10 + field SHVALID 0x08 + field DLZERO 0x04 /* FIFO data ends on packet boundary. */ + field DATAINFIFO 0x02 + field FIFOFREE 0x01 +} + +/* + * CRC Control + */ +register CRCCONTROL { + address 0x05d + access_mode RW + modes M_CFG + field CRCVALCHKEN 0x40 +} + +/* + * SCSI Test Control + */ +register SCSITEST { + address 0x05E + access_mode RW + modes M_CFG + field CNTRTEST 0x08 + field SEL_TXPLL_DEBUG 0x04 +} + +/* + * Data FIFO Queue Tag + */ +register DFFTAG { + address 0x05E + access_mode RW + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Last SCSI Control Block + */ +register LASTSCB { + address 0x05E + access_mode RW + size 2 + modes M_SCSI + dont_generate_debug_code +} + +/* + * SCSI I/O Cell Power-down Control + */ +register IOPDNCTL { + address 0x05F + access_mode RW + modes M_CFG + field DISABLE_OE 0x80 + field PDN_IDIST 0x04 + field PDN_DIFFSENSE 0x01 +} + +/* + * Shadow Host Address. + */ +register SHADDR { + address 0x060 + access_mode RO + size 8 + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * Data Group CRC Interval. + */ +register DGRPCRCI { + address 0x060 + access_mode RW + size 2 + modes M_CFG +} + +/* + * Data Transfer Negotiation Address + */ +register NEGOADDR { + address 0x060 + access_mode RW + modes M_SCSI + dont_generate_debug_code +} + +/* + * Data Transfer Negotiation Data - Period Byte + */ +register NEGPERIOD { + address 0x061 + access_mode RW + modes M_SCSI + count 1 + dont_generate_debug_code +} + +/* + * Packetized CRC Interval + */ +register PACKCRCI { + address 0x062 + access_mode RW + size 2 + modes M_CFG +} + +/* + * Data Transfer Negotiation Data - Offset Byte + */ +register NEGOFFSET { + address 0x062 + access_mode RW + modes M_SCSI + count 1 + dont_generate_debug_code +} + +/* + * Data Transfer Negotiation Data - PPR Options + */ +register NEGPPROPTS { + address 0x063 + access_mode RW + modes M_SCSI + count 1 + field PPROPT_PACE 0x08 + field PPROPT_QAS 0x04 + field PPROPT_DT 0x02 + field PPROPT_IUT 0x01 + dont_generate_debug_code +} + +/* + * Data Transfer Negotiation Data - Connection Options + */ +register NEGCONOPTS { + address 0x064 + access_mode RW + modes M_SCSI + field ENSNAPSHOT 0x40 + field RTI_WRTDIS 0x20 + field RTI_OVRDTRN 0x10 + field ENSLOWCRC 0x08 + field ENAUTOATNI 0x04 + field ENAUTOATNO 0x02 + field WIDEXFER 0x01 + dont_generate_debug_code +} + +/* + * Negotiation Table Annex Column Index. + */ +register ANNEXCOL { + address 0x065 + access_mode RW + modes M_SCSI + count 7 + dont_generate_debug_code +} + +/* + * SCSI Check + * (Rev. B only) + */ +register SCSCHKN { + address 0x066 + access_mode RW + modes M_CFG + count 1 + field BIDICHKDIS 0x80 + field STSELSKIDDIS 0x40 + field CURRFIFODEF 0x20 + field WIDERESEN 0x10 + field SDONEMSKDIS 0x08 + field DFFACTCLR 0x04 + field SHVALIDSTDIS 0x02 + field LSTSGCLRDIS 0x01 + dont_generate_debug_code +} + +const AHD_ANNEXCOL_PER_DEV0 4 +const AHD_NUM_PER_DEV_ANNEXCOLS 4 +const AHD_ANNEXCOL_PRECOMP_SLEW 4 +const AHD_PRECOMP_MASK 0x07 +const AHD_PRECOMP_SHIFT 0 +const AHD_PRECOMP_CUTBACK_17 0x04 +const AHD_PRECOMP_CUTBACK_29 0x06 +const AHD_PRECOMP_CUTBACK_37 0x07 +const AHD_SLEWRATE_MASK 0x78 +const AHD_SLEWRATE_SHIFT 3 +/* + * Rev A has only a single bit (high bit of field) of slew adjustment. + * Rev B has 4 bits. The current default happens to be the same for both. + */ +const AHD_SLEWRATE_DEF_REVA 0x08 +const AHD_SLEWRATE_DEF_REVB 0x08 + +/* Rev A does not have any amplitude setting. */ +const AHD_ANNEXCOL_AMPLITUDE 6 +const AHD_AMPLITUDE_MASK 0x7 +const AHD_AMPLITUDE_SHIFT 0 +const AHD_AMPLITUDE_DEF 0x7 + +/* + * Negotiation Table Annex Data Port. + */ +register ANNEXDAT { + address 0x066 + access_mode RW + modes M_SCSI + count 3 + dont_generate_debug_code +} + +/* + * Initiator's Own Id. + * The SCSI ID to use for Selection Out and seen during a reselection.. + */ +register IOWNID { + address 0x067 + access_mode RW + modes M_SCSI + dont_generate_debug_code +} + +/* + * 960MHz Phase-Locked Loop Control 0 + */ +register PLL960CTL0 { + address 0x068 + access_mode RW + modes M_CFG + field PLL_VCOSEL 0x80 + field PLL_PWDN 0x40 + field PLL_NS 0x30 + field PLL_ENLUD 0x08 + field PLL_ENLPF 0x04 + field PLL_DLPF 0x02 + field PLL_ENFBM 0x01 +} + +/* + * Target Own Id + */ +register TOWNID { + address 0x069 + access_mode RW + modes M_SCSI + count 2 + dont_generate_debug_code +} + +/* + * 960MHz Phase-Locked Loop Control 1 + */ +register PLL960CTL1 { + address 0x069 + access_mode RW + modes M_CFG + field PLL_CNTEN 0x80 + field PLL_CNTCLR 0x40 + field PLL_RST 0x01 +} + +/* + * Expander Signature + */ +register XSIG { + address 0x06A + access_mode RW + modes M_SCSI +} + +/* + * Shadow Byte Count + */ +register SHCNT { + address 0x068 + access_mode RW + size 3 + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * Selection Out ID + */ +register SELOID { + address 0x06B + access_mode RW + modes M_SCSI +} + +/* + * 960-MHz Phase-Locked Loop Test Count + */ +register PLL960CNT0 { + address 0x06A + access_mode RO + size 2 + modes M_CFG +} + +/* + * 400-MHz Phase-Locked Loop Control 0 + */ +register PLL400CTL0 { + address 0x06C + access_mode RW + modes M_CFG + field PLL_VCOSEL 0x80 + field PLL_PWDN 0x40 + field PLL_NS 0x30 + field PLL_ENLUD 0x08 + field PLL_ENLPF 0x04 + field PLL_DLPF 0x02 + field PLL_ENFBM 0x01 +} + +/* + * Arbitration Fairness + */ +register FAIRNESS { + address 0x06C + access_mode RW + size 2 + modes M_SCSI +} + +/* + * 400-MHz Phase-Locked Loop Control 1 + */ +register PLL400CTL1 { + address 0x06D + access_mode RW + modes M_CFG + field PLL_CNTEN 0x80 + field PLL_CNTCLR 0x40 + field PLL_RST 0x01 +} + +/* + * Arbitration Unfairness + */ +register UNFAIRNESS { + address 0x06E + access_mode RW + size 2 + modes M_SCSI +} + +/* + * 400-MHz Phase-Locked Loop Test Count + */ +register PLL400CNT0 { + address 0x06E + access_mode RO + size 2 + modes M_CFG +} + +/* + * SCB Page Pointer + */ +register SCBPTR { + address 0x0A8 + access_mode RW + size 2 + modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI + dont_generate_debug_code +} + +/* + * CMC SCB Array Count + * Number of bytes to transfer between CMC SCB memory and SCBRAM. + * Transfers must be 8byte aligned and sized. + */ +register CCSCBACNT { + address 0x0AB + access_mode RW + modes M_CCHAN +} + +/* + * SCB Autopointer + * SCB-Next Address Snooping logic. When an SCB is transferred to + * the card, the next SCB address to be used by the CMC array can + * be autoloaded from that transfer. + */ +register SCBAUTOPTR { + address 0x0AB + access_mode RW + modes M_CFG + count 1 + field AUSCBPTR_EN 0x80 + field SCBPTR_ADDR 0x38 + field SCBPTR_OFF 0x07 + dont_generate_debug_code +} + +/* + * CMC SG Ram Address Pointer + */ +register CCSGADDR { + address 0x0AC + access_mode RW + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * CMC SCB RAM Address Pointer + */ +register CCSCBADDR { + address 0x0AC + access_mode RW + modes M_CCHAN + dont_generate_debug_code +} + +/* + * CMC SCB Ram Back-up Address Pointer + * Indicates the true stop location of transfers halted prior + * to SCBHCNT going to 0. + */ +register CCSCBADR_BK { + address 0x0AC + access_mode RO + modes M_CFG +} + +/* + * CMC SG Control + */ +register CCSGCTL { + address 0x0AD + access_mode RW + modes M_DFF0, M_DFF1 + field CCSGDONE 0x80 + field SG_CACHE_AVAIL 0x10 + field CCSGENACK 0x08 + mask CCSGEN 0x0C + field SG_FETCH_REQ 0x02 + field CCSGRESET 0x01 +} + +/* + * CMD SCB Control + */ +register CCSCBCTL { + address 0x0AD + access_mode RW + modes M_CCHAN + field CCSCBDONE 0x80 + field ARRDONE 0x40 + field CCARREN 0x10 + field CCSCBEN 0x08 + field CCSCBDIR 0x04 + field CCSCBRESET 0x01 +} + +/* + * CMC Ram BIST + */ +register CMC_RAMBIST { + address 0x0AD + access_mode RW + modes M_CFG + field SG_ELEMENT_SIZE 0x80 + field SCBRAMBIST_FAIL 0x40 + field SG_BIST_FAIL 0x20 + field SG_BIST_EN 0x10 + field CMC_BUFFER_BIST_FAIL 0x02 + field CMC_BUFFER_BIST_EN 0x01 +} + +/* + * CMC SG RAM Data Port + */ +register CCSGRAM { + address 0x0B0 + access_mode RW + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * CMC SCB RAM Data Port + */ +register CCSCBRAM { + address 0x0B0 + access_mode RW + modes M_CCHAN + dont_generate_debug_code +} + +/* + * Flex DMA Address. + */ +register FLEXADR { + address 0x0B0 + access_mode RW + size 3 + modes M_SCSI +} + +/* + * Flex DMA Byte Count + */ +register FLEXCNT { + address 0x0B3 + access_mode RW + size 2 + modes M_SCSI +} + +/* + * Flex DMA Status + */ +register FLEXDMASTAT { + address 0x0B5 + access_mode RW + modes M_SCSI + field FLEXDMAERR 0x02 + field FLEXDMADONE 0x01 +} + +/* + * Flex DMA Data Port + */ +register FLEXDATA { + address 0x0B6 + access_mode RW + modes M_SCSI +} + +/* + * Board Data + */ +register BRDDAT { + address 0x0B8 + access_mode RW + modes M_SCSI + count 2 + dont_generate_debug_code +} + +/* + * Board Control + */ +register BRDCTL { + address 0x0B9 + access_mode RW + modes M_SCSI + count 7 + field FLXARBACK 0x80 + field FLXARBREQ 0x40 + field BRDADDR 0x38 + field BRDEN 0x04 + field BRDRW 0x02 + field BRDSTB 0x01 + dont_generate_debug_code +} + +/* + * Serial EEPROM Address + */ +register SEEADR { + address 0x0BA + access_mode RW + modes M_SCSI + count 4 + dont_generate_debug_code +} + +/* + * Serial EEPROM Data + */ +register SEEDAT { + address 0x0BC + access_mode RW + size 2 + modes M_SCSI + count 4 + dont_generate_debug_code +} + +/* + * Serial EEPROM Status + */ +register SEESTAT { + address 0x0BE + access_mode RO + modes M_SCSI + count 1 + field INIT_DONE 0x80 + field SEEOPCODE 0x70 + field LDALTID_L 0x08 + field SEEARBACK 0x04 + field SEEBUSY 0x02 + field SEESTART 0x01 + dont_generate_debug_code +} + +/* + * Serial EEPROM Control + */ +register SEECTL { + address 0x0BE + access_mode RW + modes M_SCSI + count 4 + field SEEOPCODE 0x70 { + SEEOP_ERASE 0x70, + SEEOP_READ 0x60, + SEEOP_WRITE 0x50, + /* + * The following four commands use special + * addresses for differentiation. + */ + SEEOP_ERAL 0x40 + } + mask SEEOP_EWEN 0x40 + mask SEEOP_WALL 0x40 + mask SEEOP_EWDS 0x40 + field SEERST 0x02 + field SEESTART 0x01 + dont_generate_debug_code +} + +const SEEOP_ERAL_ADDR 0x80 +const SEEOP_EWEN_ADDR 0xC0 +const SEEOP_WRAL_ADDR 0x40 +const SEEOP_EWDS_ADDR 0x00 + +/* + * SCB Counter + */ +register SCBCNT { + address 0x0BF + access_mode RW + modes M_SCSI + dont_generate_debug_code +} + +/* + * Data FIFO Write Address + * Pointer to the next QWD location to be written to the data FIFO. + */ +register DFWADDR { + address 0x0C0 + access_mode RW + size 2 + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * DSP Filter Control + */ +register DSPFLTRCTL { + address 0x0C0 + access_mode RW + modes M_CFG + field FLTRDISABLE 0x20 + field EDGESENSE 0x10 + field DSPFCNTSEL 0x0F +} + +/* + * DSP Data Channel Control + */ +register DSPDATACTL { + address 0x0C1 + access_mode RW + modes M_CFG + count 3 + field BYPASSENAB 0x80 + field DESQDIS 0x10 + field RCVROFFSTDIS 0x04 + field XMITOFFSTDIS 0x02 + dont_generate_debug_code +} + +/* + * Data FIFO Read Address + * Pointer to the next QWD location to be read from the data FIFO. + */ +register DFRADDR { + address 0x0C2 + access_mode RW + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * DSP REQ Control + */ +register DSPREQCTL { + address 0x0C2 + access_mode RW + modes M_CFG + field MANREQCTL 0xC0 + field MANREQDLY 0x3F +} + +/* + * DSP ACK Control + */ +register DSPACKCTL { + address 0x0C3 + access_mode RW + modes M_CFG + field MANACKCTL 0xC0 + field MANACKDLY 0x3F +} + +/* + * Data FIFO Data + * Read/Write byte port into the data FIFO. The read and write + * FIFO pointers increment with each read and write respectively + * to this port. + */ +register DFDAT { + address 0x0C4 + access_mode RW + modes M_DFF0, M_DFF1 + dont_generate_debug_code +} + +/* + * DSP Channel Select + */ +register DSPSELECT { + address 0x0C4 + access_mode RW + modes M_CFG + count 1 + field AUTOINCEN 0x80 + field DSPSEL 0x1F + dont_generate_debug_code +} + +const NUMDSPS 0x14 + +/* + * Write Bias Control + */ +register WRTBIASCTL { + address 0x0C5 + access_mode WO + modes M_CFG + count 3 + field AUTOXBCDIS 0x80 + field XMITMANVAL 0x3F + dont_generate_debug_code +} + +/* + * Currently the WRTBIASCTL is the same as the default. + */ +const WRTBIASCTL_HP_DEFAULT 0x0 + +/* + * Receiver Bias Control + */ +register RCVRBIOSCTL { + address 0x0C6 + access_mode WO + modes M_CFG + field AUTORBCDIS 0x80 + field RCVRMANVAL 0x3F +} + +/* + * Write Bias Calculator + */ +register WRTBIASCALC { + address 0x0C7 + access_mode RO + modes M_CFG +} + +/* + * Data FIFO Pointers + * Contains the byte offset from DFWADDR and DWRADDR to the current + * FIFO write/read locations. + */ +register DFPTRS { + address 0x0C8 + access_mode RW + modes M_DFF0, M_DFF1 +} + +/* + * Receiver Bias Calculator + */ +register RCVRBIASCALC { + address 0x0C8 + access_mode RO + modes M_CFG +} + +/* + * Data FIFO Backup Read Pointer + * Contains the data FIFO address to be restored if the last + * data accessed from the data FIFO was not transferred successfully. + */ +register DFBKPTR { + address 0x0C9 + access_mode RW + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Skew Calculator + */ +register SKEWCALC { + address 0x0C9 + access_mode RO + modes M_CFG +} + +/* + * Data FIFO Debug Control + */ +register DFDBCTL { + address 0x0CB + access_mode RW + modes M_DFF0, M_DFF1 + field DFF_CIO_WR_RDY 0x20 + field DFF_CIO_RD_RDY 0x10 + field DFF_DIR_ERR 0x08 + field DFF_RAMBIST_FAIL 0x04 + field DFF_RAMBIST_DONE 0x02 + field DFF_RAMBIST_EN 0x01 +} + +/* + * Data FIFO Space Count + * Number of FIFO locations that are free. + */ +register DFSCNT { + address 0x0CC + access_mode RO + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Data FIFO Byte Count + * Number of filled FIFO locations. + */ +register DFBCNT { + address 0x0CE + access_mode RO + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Sequencer Program Overlay Address. + * Low address must be written prior to high address. + */ +register OVLYADDR { + address 0x0D4 + modes M_SCSI + size 2 + access_mode RW +} + +/* + * Sequencer Control 0 + * Error detection mode, speed configuration, + * single step, breakpoints and program load. + */ +register SEQCTL0 { + address 0x0D6 + access_mode RW + count 11 + field PERRORDIS 0x80 + field PAUSEDIS 0x40 + field FAILDIS 0x20 + field FASTMODE 0x10 + field BRKADRINTEN 0x08 + field STEP 0x04 + field SEQRESET 0x02 + field LOADRAM 0x01 +} + +/* + * Sequencer Control 1 + * Instruction RAM Diagnostics + */ +register SEQCTL1 { + address 0x0D7 + access_mode RW + field OVRLAY_DATA_CHK 0x08 + field RAMBIST_DONE 0x04 + field RAMBIST_FAIL 0x02 + field RAMBIST_EN 0x01 +} + +/* + * Sequencer Flags + * Zero and Carry state of the ALU. + */ +register FLAGS { + address 0x0D8 + access_mode RO + count 23 + field ZERO 0x02 + field CARRY 0x01 + dont_generate_debug_code +} + +/* + * Sequencer Interrupt Control + */ +register SEQINTCTL { + address 0x0D9 + access_mode RW + field INTVEC1DSL 0x80 + field INT1_CONTEXT 0x20 + field SCS_SEQ_INT1M1 0x10 + field SCS_SEQ_INT1M0 0x08 + field INTMASK2 0x04 + field INTMASK1 0x02 + field IRET 0x01 +} + +/* + * Sequencer RAM Data Port + * Single byte window into the Sequencer Instruction Ram area starting + * at the address specified by OVLYADDR. To write a full instruction word, + * simply write four bytes in succession. OVLYADDR will increment after the + * most significant instrution byte (the byte with the parity bit) is written. + */ +register SEQRAM { + address 0x0DA + access_mode RW + count 2 + dont_generate_debug_code +} + +/* + * Sequencer Program Counter + * Low byte must be written prior to high byte. + */ +register PRGMCNT { + address 0x0DE + access_mode RW + size 2 + count 5 + dont_generate_debug_code +} + +/* + * Accumulator + */ +register ACCUM { + address 0x0E0 + access_mode RW + accumulator + dont_generate_debug_code +} + +/* + * Source Index Register + * Incrementing index for reads of SINDIR and the destination (low byte only) + * for any immediate operands passed in jmp, jc, jnc, call instructions. + * Example: + * mvi 0xFF call some_routine; + * + * Will set SINDEX[0] to 0xFF and call the routine "some_routine. + */ +register SINDEX { + address 0x0E2 + access_mode RW + size 2 + sindex + dont_generate_debug_code +} + +/* + * Destination Index Register + * Incrementing index for writes to DINDIR. Can be used as a scratch register. + */ +register DINDEX { + address 0x0E4 + access_mode RW + size 2 + dont_generate_debug_code +} + +/* + * Break Address + * Sequencer instruction breakpoint address address. + */ +register BRKADDR0 { + address 0x0E6 + access_mode RW +} + +register BRKADDR1 { + address 0x0E6 + access_mode RW + field BRKDIS 0x80 /* Disable Breakpoint */ +} + +/* + * All Ones + * All reads to this register return the value 0xFF. + */ +register ALLONES { + address 0x0E8 + access_mode RO + allones + dont_generate_debug_code +} + +/* + * All Zeros + * All reads to this register return the value 0. + */ +register ALLZEROS { + address 0x0EA + access_mode RO + allzeros + dont_generate_debug_code +} + +/* + * No Destination + * Writes to this register have no effect. + */ +register NONE { + address 0x0EA + access_mode WO + none + dont_generate_debug_code +} + +/* + * Source Index Indirect + * Reading this register is equivalent to reading (register_base + SINDEX) and + * incrementing SINDEX by 1. + */ +register SINDIR { + address 0x0EC + access_mode RO + dont_generate_debug_code +} + +/* + * Destination Index Indirect + * Writing this register is equivalent to writing to (register_base + DINDEX) + * and incrementing DINDEX by 1. + */ +register DINDIR { + address 0x0ED + access_mode WO + dont_generate_debug_code +} + +/* + * Function One + * 2's complement to bit value conversion. Write the 2's complement value + * (0-7 only) to the top nibble and retrieve the bit indexed by that value + * on the next read of this register. + * Example: + * Write 0x60 + * Read 0x40 + */ +register FUNCTION1 { + address 0x0F0 + access_mode RW +} + +/* + * Stack + * Window into the stack. Each stack location is 10 bits wide reported + * low byte followed by high byte. There are 8 stack locations. + */ +register STACK { + address 0x0F2 + access_mode RW + dont_generate_debug_code +} + +/* + * Interrupt Vector 1 Address + * Interrupt branch address for SCS SEQ_INT1 mode 0 and 1 interrupts. + */ +register INTVEC1_ADDR { + address 0x0F4 + access_mode RW + size 2 + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Current Address + * Address of the SEQRAM instruction currently executing instruction. + */ +register CURADDR { + address 0x0F4 + access_mode RW + size 2 + modes M_SCSI + count 2 + dont_generate_debug_code +} + +/* + * Interrupt Vector 2 Address + * Interrupt branch address for HST_SEQ_INT2 interrupts. + */ +register INTVEC2_ADDR { + address 0x0F6 + access_mode RW + size 2 + modes M_CFG + count 1 + dont_generate_debug_code +} + +/* + * Last Address + * Address of the SEQRAM instruction executed prior to the current instruction. + */ +register LASTADDR { + address 0x0F6 + access_mode RW + size 2 + modes M_SCSI +} + +register AHD_PCI_CONFIG_BASE { + address 0x100 + access_mode RW + size 256 + modes M_CFG +} + +/* ---------------------- Scratch RAM Offsets ------------------------- */ +scratch_ram { + /* Mode Specific */ + address 0x0A0 + size 8 + modes 0, 1, 2, 3 + REG0 { + size 2 + dont_generate_debug_code + } + REG1 { + size 2 + } + REG_ISR { + size 2 + dont_generate_debug_code + } + SG_STATE { + size 1 + field SEGS_AVAIL 0x01 + field LOADING_NEEDED 0x02 + field FETCH_INPROG 0x04 + } + /* + * Track whether the transfer byte count for + * the current data phase is odd. + */ + DATA_COUNT_ODD { + size 1 + } +} + +scratch_ram { + /* Mode Specific */ + address 0x0F8 + size 8 + modes 0, 1, 2, 3 + LONGJMP_ADDR { + size 2 + dont_generate_debug_code + } + ACCUM_SAVE { + size 1 + dont_generate_debug_code + } +} + + +scratch_ram { + address 0x100 + size 128 + modes 0, 1, 2, 3 + /* + * Per "other-id" execution queues. We use an array of + * tail pointers into lists of SCBs sorted by "other-id". + * The execution head pointer threads the head SCBs for + * each list. + */ + WAITING_SCB_TAILS { + size 32 + dont_generate_debug_code + } + WAITING_TID_HEAD { + size 2 + dont_generate_debug_code + } + WAITING_TID_TAIL { + size 2 + dont_generate_debug_code + } + /* + * SCBID of the next SCB in the new SCB queue. + */ + NEXT_QUEUED_SCB_ADDR { + size 4 + dont_generate_debug_code + } + /* + * head of list of SCBs that have + * completed but have not been + * put into the qoutfifo. + */ + COMPLETE_SCB_HEAD { + size 2 + dont_generate_debug_code + } + /* + * The list of completed SCBs in + * the active DMA. + */ + COMPLETE_SCB_DMAINPROG_HEAD { + size 2 + dont_generate_debug_code + } + /* + * head of list of SCBs that have + * completed but need to be uploaded + * to the host prior to being completed. + */ + COMPLETE_DMA_SCB_HEAD { + size 2 + dont_generate_debug_code + } + /* + * tail of list of SCBs that have + * completed but need to be uploaded + * to the host prior to being completed. + */ + COMPLETE_DMA_SCB_TAIL { + size 2 + dont_generate_debug_code + } + /* + * head of list of SCBs that have + * been uploaded to the host, but cannot + * be completed until the QFREEZE is in + * full effect (i.e. no selections pending). + */ + COMPLETE_ON_QFREEZE_HEAD { + size 2 + dont_generate_debug_code + } + /* + * Counting semaphore to prevent new select-outs + * The queue is frozen so long as the sequencer + * and kernel freeze counts differ. + */ + QFREEZE_COUNT { + size 2 + } + KERNEL_QFREEZE_COUNT { + size 2 + } + /* + * Mode to restore on legacy idle loop exit. + */ + SAVED_MODE { + size 1 + } + /* + * Single byte buffer used to designate the type or message + * to send to a target. + */ + MSG_OUT { + size 1 + dont_generate_debug_code + } + /* Parameters for DMA Logic */ + DMAPARAMS { + size 1 + count 8 + field PRELOADEN 0x80 + field WIDEODD 0x40 + field SCSIEN 0x20 + field SDMAEN 0x10 + field SDMAENACK 0x10 + field HDMAEN 0x08 + field HDMAENACK 0x08 + field DIRECTION 0x04 /* Set indicates PCI->SCSI */ + field FIFOFLUSH 0x02 + field FIFORESET 0x01 + dont_generate_debug_code + } + SEQ_FLAGS { + size 1 + field NOT_IDENTIFIED 0x80 + field NO_CDB_SENT 0x40 + field TARGET_CMD_IS_TAGGED 0x40 + field DPHASE 0x20 + /* Target flags */ + field TARG_CMD_PENDING 0x10 + field CMDPHASE_PENDING 0x08 + field DPHASE_PENDING 0x04 + field SPHASE_PENDING 0x02 + field NO_DISCONNECT 0x01 + } + /* + * Temporary storage for the + * target/channel/lun of a + * reconnecting target + */ + SAVED_SCSIID { + size 1 + dont_generate_debug_code + } + SAVED_LUN { + size 1 + dont_generate_debug_code + } + /* + * The last bus phase as seen by the sequencer. + */ + LASTPHASE { + size 1 + field CDI 0x80 + field IOI 0x40 + field MSGI 0x20 + field P_BUSFREE 0x01 + enum PHASE_MASK CDO|IOO|MSGO { + P_DATAOUT 0x0, + P_DATAIN IOO, + P_DATAOUT_DT P_DATAOUT|MSGO, + P_DATAIN_DT P_DATAIN|MSGO, + P_COMMAND CDO, + P_MESGOUT CDO|MSGO, + P_STATUS CDO|IOO, + P_MESGIN CDO|IOO|MSGO + } + } + /* + * Value to "or" into the SCBPTR[1] value to + * indicate that an entry in the QINFIFO is valid. + */ + QOUTFIFO_ENTRY_VALID_TAG { + size 1 + dont_generate_debug_code + } + /* + * Kernel and sequencer offsets into the queue of + * incoming target mode command descriptors. The + * queue is full when the KERNEL_TQINPOS == TQINPOS. + */ + KERNEL_TQINPOS { + size 1 + count 1 + dont_generate_debug_code + } + TQINPOS { + size 1 + count 8 + dont_generate_debug_code + } + /* + * Base address of our shared data with the kernel driver in host + * memory. This includes the qoutfifo and target mode + * incoming command queue. + */ + SHARED_DATA_ADDR { + size 4 + dont_generate_debug_code + } + /* + * Pointer to location in host memory for next + * position in the qoutfifo. + */ + QOUTFIFO_NEXT_ADDR { + size 4 + dont_generate_debug_code + } + ARG_1 { + size 1 + mask SEND_MSG 0x80 + mask SEND_SENSE 0x40 + mask SEND_REJ 0x20 + mask MSGOUT_PHASEMIS 0x10 + mask EXIT_MSG_LOOP 0x08 + mask CONT_MSG_LOOP_WRITE 0x04 + mask CONT_MSG_LOOP_READ 0x03 + mask CONT_MSG_LOOP_TARG 0x02 + alias RETURN_1 + dont_generate_debug_code + } + ARG_2 { + size 1 + count 1 + alias RETURN_2 + dont_generate_debug_code + } + + /* + * Snapshot of MSG_OUT taken after each message is sent. + */ + LAST_MSG { + size 1 + dont_generate_debug_code + } + + /* + * Sequences the kernel driver has okayed for us. This allows + * the driver to do things like prevent initiator or target + * operations. + */ + SCSISEQ_TEMPLATE { + size 1 + count 7 + field MANUALCTL 0x40 + field ENSELI 0x20 + field ENRSELI 0x10 + field MANUALP 0x0C + field ENAUTOATNP 0x02 + field ALTSTIM 0x01 + dont_generate_debug_code + } + + /* + * The initiator specified tag for this target mode transaction. + */ + INITIATOR_TAG { + size 1 + count 1 + dont_generate_debug_code + } + + SEQ_FLAGS2 { + size 1 + field PENDING_MK_MESSAGE 0x01 + field TARGET_MSG_PENDING 0x02 + field SELECTOUT_QFROZEN 0x04 + } + + ALLOCFIFO_SCBPTR { + size 2 + dont_generate_debug_code + } + + /* + * The maximum amount of time to wait, when interrupt coalescing + * is enabled, before issuing a CMDCMPLT interrupt for a completed + * command. + */ + INT_COALESCING_TIMER { + size 2 + dont_generate_debug_code + } + + /* + * The maximum number of commands to coalesce into a single interrupt. + * Actually the 2's complement of that value to simplify sequencer + * code. + */ + INT_COALESCING_MAXCMDS { + size 1 + dont_generate_debug_code + } + + /* + * The minimum number of commands still outstanding required + * to continue coalescing (2's complement of value). + */ + INT_COALESCING_MINCMDS { + size 1 + dont_generate_debug_code + } + + /* + * Number of commands "in-flight". + */ + CMDS_PENDING { + size 2 + dont_generate_debug_code + } + + /* + * The count of commands that have been coalesced. + */ + INT_COALESCING_CMDCOUNT { + size 1 + dont_generate_debug_code + } + + /* + * Since the HS_MAIBOX is self clearing, copy its contents to + * this position in scratch ram every time it changes. + */ + LOCAL_HS_MAILBOX { + size 1 + dont_generate_debug_code + } + /* + * Target-mode CDB type to CDB length table used + * in non-packetized operation. + */ + CMDSIZE_TABLE { + size 8 + count 8 + dont_generate_debug_code + } + /* + * When an SCB with the MK_MESSAGE flag is + * queued to the controller, it cannot enter + * the waiting for selection list until the + * selections for any previously queued + * commands to that target complete. During + * the wait, the MK_MESSAGE SCB is queued + * here. + */ + MK_MESSAGE_SCB { + size 2 + } + /* + * Saved SCSIID of MK_MESSAGE_SCB to avoid + * an extra SCBPTR operation when deciding + * if the MK_MESSAGE_SCB can be run. + */ + MK_MESSAGE_SCSIID { + size 1 + } +} + +/************************* Hardware SCB Definition ****************************/ +scb { + address 0x180 + size 64 + modes 0, 1, 2, 3 + SCB_RESIDUAL_DATACNT { + size 4 + alias SCB_CDB_STORE + alias SCB_HOST_CDB_PTR + dont_generate_debug_code + } + SCB_RESIDUAL_SGPTR { + size 4 + field SG_ADDR_MASK 0xf8 /* In the last byte */ + field SG_OVERRUN_RESID 0x02 /* In the first byte */ + field SG_LIST_NULL 0x01 /* In the first byte */ + dont_generate_debug_code + } + SCB_SCSI_STATUS { + size 1 + alias SCB_HOST_CDB_LEN + dont_generate_debug_code + } + SCB_TARGET_PHASES { + size 1 + dont_generate_debug_code + } + SCB_TARGET_DATA_DIR { + size 1 + dont_generate_debug_code + } + SCB_TARGET_ITAG { + size 1 + dont_generate_debug_code + } + SCB_SENSE_BUSADDR { + /* + * Only valid if CDB length is less than 13 bytes or + * we are using a CDB pointer. Otherwise contains + * the last 4 bytes of embedded cdb information. + */ + size 4 + alias SCB_NEXT_COMPLETE + dont_generate_debug_code + } + SCB_TAG { + alias SCB_FIFO_USE_COUNT + size 2 + dont_generate_debug_code + } + SCB_CONTROL { + size 1 + field TARGET_SCB 0x80 + field DISCENB 0x40 + field TAG_ENB 0x20 + field MK_MESSAGE 0x10 + field STATUS_RCVD 0x08 + field DISCONNECTED 0x04 + field SCB_TAG_TYPE 0x03 + } + SCB_SCSIID { + size 1 + field TID 0xF0 + field OID 0x0F + } + SCB_LUN { + size 1 + field LID 0xff + dont_generate_debug_code + } + SCB_TASK_ATTRIBUTE { + size 1 + /* + * Overloaded field for non-packetized + * ignore wide residue message handling. + */ + field SCB_XFERLEN_ODD 0x01 + dont_generate_debug_code + } + SCB_CDB_LEN { + size 1 + field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */ + dont_generate_debug_code + } + SCB_TASK_MANAGEMENT { + size 1 + dont_generate_debug_code + } + SCB_DATAPTR { + size 8 + dont_generate_debug_code + } + SCB_DATACNT { + /* + * The last byte is really the high address bits for + * the data address. + */ + size 4 + field SG_LAST_SEG 0x80 /* In the fourth byte */ + field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ + dont_generate_debug_code + } + SCB_SGPTR { + size 4 + field SG_STATUS_VALID 0x04 /* In the first byte */ + field SG_FULL_RESID 0x02 /* In the first byte */ + field SG_LIST_NULL 0x01 /* In the first byte */ + dont_generate_debug_code + } + SCB_BUSADDR { + size 4 + dont_generate_debug_code + } + SCB_NEXT { + alias SCB_NEXT_SCB_BUSADDR + size 2 + dont_generate_debug_code + } + SCB_NEXT2 { + size 2 + dont_generate_debug_code + } + SCB_SPARE { + size 8 + alias SCB_PKT_LUN + } + SCB_DISCONNECTED_LISTS { + size 8 + dont_generate_debug_code + } +} + +/*********************************** Constants ********************************/ +const MK_MESSAGE_BIT_OFFSET 4 +const TID_SHIFT 4 +const TARGET_CMD_CMPLT 0xfe +const INVALID_ADDR 0x80 +#define SCB_LIST_NULL 0xff +#define QOUTFIFO_ENTRY_VALID_TOGGLE 0x80 + +const CCSGADDR_MAX 0x80 +const CCSCBADDR_MAX 0x80 +const CCSGRAM_MAXSEGS 16 + +/* Selection Timeout Timer Constants */ +const STIMESEL_SHIFT 3 +const STIMESEL_MIN 0x18 +const STIMESEL_BUG_ADJ 0x8 + +/* WDTR Message values */ +const BUS_8_BIT 0x00 +const BUS_16_BIT 0x01 +const BUS_32_BIT 0x02 + +/* Offset maximums */ +const MAX_OFFSET 0xfe +const MAX_OFFSET_PACED 0xfe +const MAX_OFFSET_PACED_BUG 0x7f +/* + * Some 160 devices incorrectly accept 0xfe as a + * sync offset, but will overrun this value. Limit + * to 0x7f for speed lower than U320 which will + * avoid the persistent sync offset overruns. + */ +const MAX_OFFSET_NON_PACED 0x7f +const HOST_MSG 0xff + +/* + * The size of our sense buffers. + * Sense buffer mapping can be handled in either of two ways. + * The first is to allocate a dmamap for each transaction. + * Depending on the architecture, dmamaps can be costly. The + * alternative is to statically map the buffers in much the same + * way we handle our scatter gather lists. The driver implements + * the later. + */ +const AHD_SENSE_BUFSIZE 256 + +/* Target mode command processing constants */ +const CMD_GROUP_CODE_SHIFT 0x05 + +const STATUS_BUSY 0x08 +const STATUS_QUEUE_FULL 0x28 +const STATUS_PKT_SENSE 0xFF +const TARGET_DATA_IN 1 + +const SCB_TRANSFER_SIZE_FULL_LUN 56 +const SCB_TRANSFER_SIZE_1BYTE_LUN 48 +/* PKT_OVERRUN_BUFSIZE must be a multiple of 256 less than 64K */ +const PKT_OVERRUN_BUFSIZE 512 + +/* + * Timer parameters. + */ +const AHD_TIMER_US_PER_TICK 25 +const AHD_TIMER_MAX_TICKS 0xFFFF +const AHD_TIMER_MAX_US (AHD_TIMER_MAX_TICKS * AHD_TIMER_US_PER_TICK) + +/* + * Downloaded (kernel inserted) constants + */ +const SG_PREFETCH_CNT download +const SG_PREFETCH_CNT_LIMIT download +const SG_PREFETCH_ALIGN_MASK download +const SG_PREFETCH_ADDR_MASK download +const SG_SIZEOF download +const PKT_OVERRUN_BUFOFFSET download +const SCB_TRANSFER_SIZE download +const CACHELINE_MASK download + +/* + * BIOS SCB offsets + */ +const NVRAM_SCB_OFFSET 0x2C diff --git a/drivers/scsi/aic7xxx/aic79xx.seq b/drivers/scsi/aic7xxx/aic79xx.seq new file mode 100644 index 000000000..3a36d9362 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx.seq @@ -0,0 +1,2290 @@ +/* + * Adaptec U320 device driver firmware for Linux and FreeBSD. + * + * Copyright (c) 1994-2001, 2004 Justin T. Gibbs. + * Copyright (c) 2000-2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ + +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $" +PATCH_ARG_LIST = "struct ahd_softc *ahd" +PREFIX = "ahd_" + +#include "aic79xx.reg" +#include "scsi_message.h" + +restart: +if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { + test SEQINTCODE, 0xFF jz idle_loop; + SET_SEQINTCODE(NO_SEQINT) +} + +idle_loop: + + if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { + /* + * Convert ERROR status into a sequencer + * interrupt to handle the case of an + * interrupt collision on the hardware + * setting of HWERR. + */ + test ERROR, 0xFF jz no_error_set; + SET_SEQINTCODE(SAW_HWERR) +no_error_set: + } + SET_MODE(M_SCSI, M_SCSI) + test SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus; + test SEQ_FLAGS2, SELECTOUT_QFROZEN jz check_waiting_list; + /* + * If the kernel has caught up with us, thaw the queue. + */ + mov A, KERNEL_QFREEZE_COUNT; + cmp QFREEZE_COUNT, A jne check_frozen_completions; + mov A, KERNEL_QFREEZE_COUNT[1]; + cmp QFREEZE_COUNT[1], A jne check_frozen_completions; + and SEQ_FLAGS2, ~SELECTOUT_QFROZEN; + jmp check_waiting_list; +check_frozen_completions: + test SSTAT0, SELDO|SELINGO jnz idle_loop_checkbus; +BEGIN_CRITICAL; + /* + * If we have completions stalled waiting for the qfreeze + * to take effect, move them over to the complete_scb list + * now that no selections are pending. + */ + cmp COMPLETE_ON_QFREEZE_HEAD[1],SCB_LIST_NULL je idle_loop_checkbus; + /* + * Find the end of the qfreeze list. The first element has + * to be treated specially. + */ + bmov SCBPTR, COMPLETE_ON_QFREEZE_HEAD, 2; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je join_lists; + /* + * Now the normal loop. + */ + bmov SCBPTR, SCB_NEXT_COMPLETE, 2; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . - 1; +join_lists: + bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; + bmov COMPLETE_SCB_HEAD, COMPLETE_ON_QFREEZE_HEAD, 2; + mvi COMPLETE_ON_QFREEZE_HEAD[1], SCB_LIST_NULL; + jmp idle_loop_checkbus; +check_waiting_list: + cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus; + /* + * ENSELO is cleared by a SELDO, so we must test for SELDO + * one last time. + */ + test SSTAT0, SELDO jnz select_out; + call start_selection; +idle_loop_checkbus: + test SSTAT0, SELDO jnz select_out; +END_CRITICAL; + test SSTAT0, SELDI jnz select_in; + test SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq; + test SCSISIGO, ATNO jz idle_loop_check_nonpackreq; + call unexpected_nonpkt_phase_find_ctxt; +idle_loop_check_nonpackreq: + test SSTAT2, NONPACKREQ jz . + 2; + call unexpected_nonpkt_phase_find_ctxt; + if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { + /* + * On Rev A. hardware, the busy LED is only + * turned on automaically during selections + * and re-selections. Make the LED status + * more useful by forcing it to be on so + * long as one of our data FIFOs is active. + */ + and A, FIFO0FREE|FIFO1FREE, DFFSTAT; + cmp A, FIFO0FREE|FIFO1FREE jne . + 3; + and SBLKCTL, ~DIAGLEDEN|DIAGLEDON; + jmp . + 2; + or SBLKCTL, DIAGLEDEN|DIAGLEDON; + } + call idle_loop_gsfifo_in_scsi_mode; + call idle_loop_service_fifos; + call idle_loop_cchan; + jmp idle_loop; + +idle_loop_gsfifo: + SET_MODE(M_SCSI, M_SCSI) +BEGIN_CRITICAL; +idle_loop_gsfifo_in_scsi_mode: + test LQISTAT2, LQIGSAVAIL jz return; + /* + * We have received good status for this transaction. There may + * still be data in our FIFOs draining to the host. Complete + * the SCB only if all data has transferred to the host. + */ +good_status_IU_done: + bmov SCBPTR, GSFIFO, 2; + clr SCB_SCSI_STATUS; + /* + * If a command completed before an attempted task management + * function completed, notify the host after disabling any + * pending select-outs. + */ + test SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally; + test SSTAT0, SELDO|SELINGO jnz . + 2; + and SCSISEQ0, ~ENSELO; + SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) +gsfifo_complete_normally: + or SCB_CONTROL, STATUS_RCVD; + + /* + * Since this status did not consume a FIFO, we have to + * be a bit more dilligent in how we check for FIFOs pertaining + * to this transaction. There are two states that a FIFO still + * transferring data may be in. + * + * 1) Configured and draining to the host, with a FIFO handler. + * 2) Pending cfg4data, fifo not empty. + * + * Case 1 can be detected by noticing a non-zero FIFO active + * count in the SCB. In this case, we allow the routine servicing + * the FIFO to complete the SCB. + * + * Case 2 implies either a pending or yet to occur save data + * pointers for this same context in the other FIFO. So, if + * we detect case 1, we will properly defer the post of the SCB + * and achieve the desired result. The pending cfg4data will + * notice that status has been received and complete the SCB. + */ + test SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode; + call complete; +END_CRITICAL; + jmp idle_loop_gsfifo_in_scsi_mode; + +idle_loop_service_fifos: + SET_MODE(M_DFF0, M_DFF0) +BEGIN_CRITICAL; + test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo; + call longjmp; +END_CRITICAL; +idle_loop_next_fifo: + SET_MODE(M_DFF1, M_DFF1) +BEGIN_CRITICAL; + test LONGJMP_ADDR[1], INVALID_ADDR jz longjmp; +END_CRITICAL; +return: + ret; + +idle_loop_cchan: + SET_MODE(M_CCHAN, M_CCHAN) + test QOFF_CTLSTA, HS_MAILBOX_ACT jz hs_mailbox_empty; + or QOFF_CTLSTA, HS_MAILBOX_ACT; + mov LOCAL_HS_MAILBOX, HS_MAILBOX; +hs_mailbox_empty: +BEGIN_CRITICAL; + test CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle; + test CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog; + test CCSCBCTL, CCSCBDONE jz return; + /* FALLTHROUGH */ +scbdma_tohost_done: + test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; + /* + * An SCB has been successfully uploaded to the host. + * If the SCB was uploaded for some reason other than + * bad SCSI status (currently only for underruns), we + * queue the SCB for normal completion. Otherwise, we + * wait until any select-out activity has halted, and + * then queue the completion. + */ + and CCSCBCTL, ~(CCARREN|CCSCBEN); + bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . + 2; + mvi COMPLETE_DMA_SCB_TAIL[1], SCB_LIST_NULL; + test SCB_SCSI_STATUS, 0xff jz scbdma_queue_completion; + bmov SCB_NEXT_COMPLETE, COMPLETE_ON_QFREEZE_HEAD, 2; + bmov COMPLETE_ON_QFREEZE_HEAD, SCBPTR, 2 ret; +scbdma_queue_completion: + bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; + bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; +fill_qoutfifo_dmadone: + and CCSCBCTL, ~(CCARREN|CCSCBEN); + call qoutfifo_updated; + mvi COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL; + bmov QOUTFIFO_NEXT_ADDR, SCBHADDR, 4; + test QOFF_CTLSTA, SDSCB_ROLLOVR jz return; + bmov QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4; + xor QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret; +END_CRITICAL; + +qoutfifo_updated: + /* + * If there are more commands waiting to be dma'ed + * to the host, always coalesce. Otherwise honor the + * host's wishes. + */ + cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; + cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; + test LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt; + + /* + * If we have relatively few commands outstanding, don't + * bother waiting for another command to complete. + */ + test CMDS_PENDING[1], 0xFF jnz coalesce_by_count; + /* Add -1 so that jnc means <= not just < */ + add A, -1, INT_COALESCING_MINCMDS; + add NONE, A, CMDS_PENDING; + jnc issue_cmdcmplt; + + /* + * If coalescing, only coalesce up to the limit + * provided by the host driver. + */ +coalesce_by_count: + mov A, INT_COALESCING_MAXCMDS; + add NONE, A, INT_COALESCING_CMDCOUNT; + jc issue_cmdcmplt; + /* + * If the timer is not currently active, + * fire it up. + */ + test INTCTL, SWTMINTMASK jz return; + bmov SWTIMER, INT_COALESCING_TIMER, 2; + mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; + or INTCTL, SWTMINTEN|SWTIMER_START; + and INTCTL, ~SWTMINTMASK ret; + +issue_cmdcmplt: + mvi INTSTAT, CMDCMPLT; + clr INT_COALESCING_CMDCOUNT; + or INTCTL, SWTMINTMASK ret; + +BEGIN_CRITICAL; +fetch_new_scb_inprog: + test CCSCBCTL, ARRDONE jz return; +fetch_new_scb_done: + and CCSCBCTL, ~(CCARREN|CCSCBEN); + clr A; + add CMDS_PENDING, 1; + adc CMDS_PENDING[1], A; + if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { + /* + * "Short Luns" are not placed into outgoing LQ + * packets in the correct byte order. Use a full + * sized lun field instead and fill it with the + * one byte of lun information we support. + */ + mov SCB_PKT_LUN[6], SCB_LUN; + } + /* + * The FIFO use count field is shared with the + * tag set by the host so that our SCB dma engine + * knows the correct location to store the SCB. + * Set it to zero before processing the SCB. + */ + clr SCB_FIFO_USE_COUNT; + /* Update the next SCB address to download. */ + bmov NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4; + /* + * NULL out the SCB links since these fields + * occupy the same location as SCB_NEXT_SCB_BUSADDR. + */ + mvi SCB_NEXT[1], SCB_LIST_NULL; + mvi SCB_NEXT2[1], SCB_LIST_NULL; + /* Increment our position in the QINFIFO. */ + mov NONE, SNSCB_QOFF; + + /* + * Save SCBID of this SCB in REG0 since + * SCBPTR will be clobbered during target + * list updates. We also record the SCB's + * flags so that we can refer to them even + * after SCBPTR has been changed. + */ + bmov REG0, SCBPTR, 2; + mov A, SCB_CONTROL; + + /* + * Find the tail SCB of the execution queue + * for this target. + */ + shr SINDEX, 3, SCB_SCSIID; + and SINDEX, ~0x1; + mvi SINDEX[1], (WAITING_SCB_TAILS >> 8); + bmov DINDEX, SINDEX, 2; + bmov SCBPTR, SINDIR, 2; + + /* + * Update the tail to point to the new SCB. + */ + bmov DINDIR, REG0, 2; + + /* + * If the queue was empty, queue this SCB as + * the first for this target. + */ + cmp SCBPTR[1], SCB_LIST_NULL je first_new_target_scb; + + /* + * SCBs that want to send messages must always be + * at the head of their per-target queue so that + * ATN can be asserted even if the current + * negotiation agreement is packetized. If the + * target queue is empty, the SCB can be queued + * immediately. If the queue is not empty, we must + * wait for it to empty before entering this SCB + * into the waiting for selection queue. Otherwise + * our batching and round-robin selection scheme + * could allow commands to be queued out of order. + * To simplify the implementation, we stop pulling + * new commands from the host until the MK_MESSAGE + * SCB can be queued to the waiting for selection + * list. + */ + test A, MK_MESSAGE jz batch_scb; + + /* + * If the last SCB is also a MK_MESSAGE SCB, then + * order is preserved even if we batch. + */ + test SCB_CONTROL, MK_MESSAGE jz batch_scb; + + /* + * Defer this SCB and stop fetching new SCBs until + * it can be queued. Since the SCB_SCSIID of the + * tail SCB must be the same as that of the newly + * queued SCB, there is no need to restore the SCBID + * here. + */ + or SEQ_FLAGS2, PENDING_MK_MESSAGE; + bmov MK_MESSAGE_SCB, REG0, 2; + mov MK_MESSAGE_SCSIID, SCB_SCSIID ret; + +batch_scb: + /* + * Otherwise just update the previous tail SCB to + * point to the new tail. + */ + bmov SCB_NEXT, REG0, 2 ret; + +first_new_target_scb: + /* + * Append SCB to the tail of the waiting for + * selection list. + */ + cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb; + bmov SCBPTR, WAITING_TID_TAIL, 2; + bmov SCB_NEXT2, REG0, 2; + bmov WAITING_TID_TAIL, REG0, 2 ret; +first_new_scb: + /* + * Whole list is empty, so the head of + * the list must be initialized too. + */ + bmov WAITING_TID_HEAD, REG0, 2; + bmov WAITING_TID_TAIL, REG0, 2 ret; +END_CRITICAL; + +scbdma_idle: + /* + * Don't bother downloading new SCBs to execute + * if select-outs are currently frozen or we have + * a MK_MESSAGE SCB waiting to enter the queue. + */ + test SEQ_FLAGS2, SELECTOUT_QFROZEN|PENDING_MK_MESSAGE + jnz scbdma_no_new_scbs; +BEGIN_CRITICAL; + test QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb; +scbdma_no_new_scbs: + cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb; + cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return; + /* FALLTHROUGH */ +fill_qoutfifo: + /* + * Keep track of the SCBs we are dmaing just + * in case the DMA fails or is aborted. + */ + bmov COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2; + mvi CCSCBCTL, CCSCBRESET; + bmov SCBHADDR, QOUTFIFO_NEXT_ADDR, 4; + mov A, QOUTFIFO_NEXT_ADDR; + bmov SCBPTR, COMPLETE_SCB_HEAD, 2; +fill_qoutfifo_loop: + bmov CCSCBRAM, SCBPTR, 2; + mov CCSCBRAM, SCB_SGPTR[0]; + mov CCSCBRAM, QOUTFIFO_ENTRY_VALID_TAG; + mov NONE, SDSCB_QOFF; + inc INT_COALESCING_CMDCOUNT; + add CMDS_PENDING, -1; + adc CMDS_PENDING[1], -1; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done; + cmp CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done; + test QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done; + /* + * Don't cross an ADB or Cachline boundary when DMA'ing + * completion entries. In PCI mode, at least in 32/33 + * configurations, the SCB DMA engine may lose its place + * in the data-stream should the target force a retry on + * something other than an 8byte aligned boundary. In + * PCI-X mode, we do this to avoid split transactions since + * many chipsets seem to be unable to format proper split + * completions to continue the data transfer. + */ + add SINDEX, A, CCSCBADDR; + test SINDEX, CACHELINE_MASK jz fill_qoutfifo_done; + bmov SCBPTR, SCB_NEXT_COMPLETE, 2; + jmp fill_qoutfifo_loop; +fill_qoutfifo_done: + mov SCBHCNT, CCSCBADDR; + mvi CCSCBCTL, CCSCBEN|CCSCBRESET; + bmov COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2; + mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret; + +fetch_new_scb: + bmov SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4; + mvi CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb; +dma_complete_scb: + bmov SCBPTR, COMPLETE_DMA_SCB_HEAD, 2; + bmov SCBHADDR, SCB_BUSADDR, 4; + mvi CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb; + +/* + * Either post or fetch an SCB from host memory. The caller + * is responsible for polling for transfer completion. + * + * Prerequisits: Mode == M_CCHAN + * SINDEX contains CCSCBCTL flags + * SCBHADDR set to Host SCB address + * SCBPTR set to SCB src location on "push" operations + */ +SET_SRC_MODE M_CCHAN; +SET_DST_MODE M_CCHAN; +dma_scb: + mvi SCBHCNT, SCB_TRANSFER_SIZE; + mov CCSCBCTL, SINDEX ret; + +setjmp: + /* + * At least on the A, a return in the same + * instruction as the bmov results in a return + * to the caller, not to the new address at the + * top of the stack. Since we want the latter + * (we use setjmp to register a handler from an + * interrupt context but not invoke that handler + * until we return to our idle loop), use a + * separate ret instruction. + */ + bmov LONGJMP_ADDR, STACK, 2; + ret; +setjmp_inline: + bmov LONGJMP_ADDR, STACK, 2; +longjmp: + bmov STACK, LONGJMP_ADDR, 2 ret; +END_CRITICAL; + +/*************************** Chip Bug Work Arounds ****************************/ +/* + * Must disable interrupts when setting the mode pointer + * register as an interrupt occurring mid update will + * fail to store the new mode value for restoration on + * an iret. + */ +if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { +set_mode_work_around: + mvi SEQINTCTL, INTVEC1DSL; + mov MODE_PTR, SINDEX; + clr SEQINTCTL ret; +} + + +if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { +set_seqint_work_around: + mov SEQINTCODE, SINDEX; + mvi SEQINTCODE, NO_SEQINT ret; +} + +/************************ Packetized LongJmp Routines *************************/ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +start_selection: +BEGIN_CRITICAL; + if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { + /* + * Razor #494 + * Rev A hardware fails to update LAST/CURR/NEXTSCB + * correctly after a packetized selection in several + * situations: + * + * 1) If only one command existed in the queue, the + * LAST/CURR/NEXTSCB are unchanged. + * + * 2) In a non QAS, protocol allowed phase change, + * the queue is shifted 1 too far. LASTSCB is + * the last SCB that was correctly processed. + * + * 3) In the QAS case, if the full list of commands + * was successfully sent, NEXTSCB is NULL and neither + * CURRSCB nor LASTSCB can be trusted. We must + * manually walk the list counting MAXCMDCNT elements + * to find the last SCB that was sent correctly. + * + * To simplify the workaround for this bug in SELDO + * handling, we initialize LASTSCB prior to enabling + * selection so we can rely on it even for case #1 above. + */ + bmov LASTSCB, WAITING_TID_HEAD, 2; + } + bmov CURRSCB, WAITING_TID_HEAD, 2; + bmov SCBPTR, WAITING_TID_HEAD, 2; + shr SELOID, 4, SCB_SCSIID; + /* + * If we want to send a message to the device, ensure + * we are selecting with atn regardless of our packetized + * agreement. Since SPI4 only allows target reset or PPR + * messages if this is a packetized connection, the change + * to our negotiation table entry for this selection will + * be cleared when the message is acted on. + */ + test SCB_CONTROL, MK_MESSAGE jz . + 3; + mov NEGOADDR, SELOID; + or NEGCONOPTS, ENAUTOATNO; + or SCSISEQ0, ENSELO ret; +END_CRITICAL; + +/* + * Allocate a FIFO for a non-packetized transaction. + * In RevA hardware, both FIFOs must be free before we + * can allocate a FIFO for a non-packetized transaction. + */ +allocate_fifo_loop: + /* + * Do whatever work is required to free a FIFO. + */ + call idle_loop_service_fifos; + SET_MODE(M_SCSI, M_SCSI) +allocate_fifo: + if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) { + and A, FIFO0FREE|FIFO1FREE, DFFSTAT; + cmp A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop; + } else { + test DFFSTAT, FIFO1FREE jnz allocate_fifo1; + test DFFSTAT, FIFO0FREE jz allocate_fifo_loop; + mvi DFFSTAT, B_CURRFIFO_0; + SET_MODE(M_DFF0, M_DFF0) + bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; + } +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +allocate_fifo1: + mvi DFFSTAT, CURRFIFO_1; + SET_MODE(M_DFF1, M_DFF1) + bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; + +/* + * We have been reselected as an initiator + * or selected as a target. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +select_in: + if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { + /* + * On Rev A. hardware, the busy LED is only + * turned on automaically during selections + * and re-selections. Make the LED status + * more useful by forcing it to be on from + * the point of selection until our idle + * loop determines that neither of our FIFOs + * are busy. This handles the non-packetized + * case nicely as we will not return to the + * idle loop until the busfree at the end of + * each transaction. + */ + or SBLKCTL, DIAGLEDEN|DIAGLEDON; + } + if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { + /* + * Test to ensure that the bus has not + * already gone free prior to clearing + * any stale busfree status. This avoids + * a window whereby a busfree just after + * a selection could be missed. + */ + test SCSISIGI, BSYI jz . + 2; + mvi CLRSINT1,CLRBUSFREE; + or SIMODE1, ENBUSFREE; + } + or SXFRCTL0, SPIOEN; + and SAVED_SCSIID, SELID_MASK, SELID; + and A, OID, IOWNID; + or SAVED_SCSIID, A; + mvi CLRSINT0, CLRSELDI; + jmp ITloop; + +/* + * We have successfully selected out. + * + * Clear SELDO. + * Dequeue all SCBs sent from the waiting queue + * Requeue all SCBs *not* sent to the tail of the waiting queue + * Take Razor #494 into account for above. + * + * In Packetized Mode: + * Return to the idle loop. Our interrupt handler will take + * care of any incoming L_Qs. + * + * In Non-Packetize Mode: + * Continue to our normal state machine. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +select_out: +BEGIN_CRITICAL; + if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { + /* + * On Rev A. hardware, the busy LED is only + * turned on automaically during selections + * and re-selections. Make the LED status + * more useful by forcing it to be on from + * the point of re-selection until our idle + * loop determines that neither of our FIFOs + * are busy. This handles the non-packetized + * case nicely as we will not return to the + * idle loop until the busfree at the end of + * each transaction. + */ + or SBLKCTL, DIAGLEDEN|DIAGLEDON; + } + /* Clear out all SCBs that have been successfully sent. */ + if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { + /* + * For packetized, the LQO manager clears ENSELO on + * the assertion of SELDO. If we are non-packetized, + * LASTSCB and CURRSCB are accurate. + */ + test SCSISEQ0, ENSELO jnz use_lastscb; + + /* + * The update is correct for LQOSTAT1 errors. All + * but LQOBUSFREE are handled by kernel interrupts. + * If we see LQOBUSFREE, return to the idle loop. + * Once we are out of the select_out critical section, + * the kernel will cleanup the LQOBUSFREE and we will + * eventually restart the selection if appropriate. + */ + test LQOSTAT1, LQOBUSFREE jnz idle_loop; + + /* + * On a phase change oustside of packet boundaries, + * LASTSCB points to the currently active SCB context + * on the bus. + */ + test LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb; + + /* + * If the hardware has traversed the whole list, NEXTSCB + * will be NULL, CURRSCB and LASTSCB cannot be trusted, + * but MAXCMDCNT is accurate. If we stop part way through + * the list or only had one command to issue, NEXTSCB[1] is + * not NULL and LASTSCB is the last command to go out. + */ + cmp NEXTSCB[1], SCB_LIST_NULL jne use_lastscb; + + /* + * Brute force walk. + */ + bmov SCBPTR, WAITING_TID_HEAD, 2; + mvi SEQINTCTL, INTVEC1DSL; + mvi MODE_PTR, MK_MODE(M_CFG, M_CFG); + mov A, MAXCMDCNT; + mvi MODE_PTR, MK_MODE(M_SCSI, M_SCSI); + clr SEQINTCTL; +find_lastscb_loop: + dec A; + test A, 0xFF jz found_last_sent_scb; + bmov SCBPTR, SCB_NEXT, 2; + jmp find_lastscb_loop; +use_lastscb: + bmov SCBPTR, LASTSCB, 2; +found_last_sent_scb: + bmov CURRSCB, SCBPTR, 2; +curscb_ww_done: + } else { + bmov SCBPTR, CURRSCB, 2; + } + + /* + * The whole list made it. Clear our tail pointer to indicate + * that the per-target selection queue is now empty. + */ + cmp SCB_NEXT[1], SCB_LIST_NULL je select_out_clear_tail; + + /* + * Requeue any SCBs not sent, to the tail of the waiting Q. + * We know that neither the per-TID list nor the list of + * TIDs is empty. Use this knowledge to our advantage and + * queue the remainder to the tail of the global execution + * queue. + */ + bmov REG0, SCB_NEXT, 2; +select_out_queue_remainder: + bmov SCBPTR, WAITING_TID_TAIL, 2; + bmov SCB_NEXT2, REG0, 2; + bmov WAITING_TID_TAIL, REG0, 2; + jmp select_out_inc_tid_q; + +select_out_clear_tail: + /* + * Queue any pending MK_MESSAGE SCB for this target now + * that the queue is empty. + */ + test SEQ_FLAGS2, PENDING_MK_MESSAGE jz select_out_no_mk_message_scb; + mov A, MK_MESSAGE_SCSIID; + cmp SCB_SCSIID, A jne select_out_no_mk_message_scb; + and SEQ_FLAGS2, ~PENDING_MK_MESSAGE; + bmov REG0, MK_MESSAGE_SCB, 2; + jmp select_out_queue_remainder; + +select_out_no_mk_message_scb: + /* + * Clear this target's execution tail and increment the queue. + */ + shr DINDEX, 3, SCB_SCSIID; + or DINDEX, 1; /* Want only the second byte */ + mvi DINDEX[1], ((WAITING_SCB_TAILS) >> 8); + mvi DINDIR, SCB_LIST_NULL; +select_out_inc_tid_q: + bmov SCBPTR, WAITING_TID_HEAD, 2; + bmov WAITING_TID_HEAD, SCB_NEXT2, 2; + cmp WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2; + mvi WAITING_TID_TAIL[1], SCB_LIST_NULL; + bmov SCBPTR, CURRSCB, 2; + mvi CLRSINT0, CLRSELDO; + test LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_mode_cleared; + test LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_mode_cleared; + + /* + * If this is a packetized connection, return to our + * idle_loop and let our interrupt handler deal with + * any connection setup/teardown issues. The only + * exceptions are the case of MK_MESSAGE and task management + * SCBs. + */ + if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) { + /* + * In the A, the LQO manager transitions to LQOSTOP0 even if + * we have selected out with ATN asserted and the target + * REQs in a non-packet phase. + */ + test SCB_CONTROL, MK_MESSAGE jz select_out_no_message; + test SCSISIGO, ATNO jnz select_out_non_packetized; +select_out_no_message: + } + test LQOSTAT2, LQOSTOP0 jz select_out_non_packetized; + test SCB_TASK_MANAGEMENT, 0xFF jz idle_loop; + SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE) + jmp idle_loop; + +select_out_non_packetized: + /* Non packetized request. */ + and SCSISEQ0, ~ENSELO; + if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { + /* + * Test to ensure that the bus has not + * already gone free prior to clearing + * any stale busfree status. This avoids + * a window whereby a busfree just after + * a selection could be missed. + */ + test SCSISIGI, BSYI jz . + 2; + mvi CLRSINT1,CLRBUSFREE; + or SIMODE1, ENBUSFREE; + } + mov SAVED_SCSIID, SCB_SCSIID; + mov SAVED_LUN, SCB_LUN; + mvi SEQ_FLAGS, NO_CDB_SENT; +END_CRITICAL; + or SXFRCTL0, SPIOEN; + + /* + * As soon as we get a successful selection, the target + * should go into the message out phase since we have ATN + * asserted. + */ + mvi MSG_OUT, MSG_IDENTIFYFLAG; + + /* + * Main loop for information transfer phases. Wait for the + * target to assert REQ before checking MSG, C/D and I/O for + * the bus phase. + */ +mesgin_phasemis: +ITloop: + call phase_lock; + + mov A, LASTPHASE; + + test A, ~P_DATAIN_DT jz p_data; + cmp A,P_COMMAND je p_command; + cmp A,P_MESGOUT je p_mesgout; + cmp A,P_STATUS je p_status; + cmp A,P_MESGIN je p_mesgin; + + SET_SEQINTCODE(BAD_PHASE) + jmp ITloop; /* Try reading the bus again. */ + +/* + * Command phase. Set up the DMA registers and let 'er rip. + */ +p_command: + test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; + SET_SEQINTCODE(PROTO_VIOLATION) +p_command_okay: + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) + jnz p_command_allocate_fifo; + /* + * Command retry. Free our current FIFO and + * re-allocate a FIFO so transfer state is + * reset. + */ +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; + mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; + SET_MODE(M_SCSI, M_SCSI) +p_command_allocate_fifo: + bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; + call allocate_fifo; +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; + add NONE, -17, SCB_CDB_LEN; + jnc p_command_embedded; +p_command_from_host: + bmov HADDR[0], SCB_HOST_CDB_PTR, 9; + mvi SG_CACHE_PRE, LAST_SEG; + mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); + jmp p_command_xfer; +p_command_embedded: + bmov SHCNT[0], SCB_CDB_LEN, 1; + bmov DFDAT, SCB_CDB_STORE, 16; + mvi DFCNTRL, SCSIEN; +p_command_xfer: + and SEQ_FLAGS, ~NO_CDB_SENT; + if ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0) { + /* + * To speed up CDB delivery in Rev B, all CDB acks + * are "released" to the output sync as soon as the + * command phase starts. There is only one problem + * with this approach. If the target changes phase + * before all data are sent, we have left over acks + * that can go out on the bus in a data phase. Due + * to other chip contraints, this only happens if + * the target goes to data-in, but if the acks go + * out before we can test SDONE, we'll think that + * the transfer has completed successfully. Work + * around this by taking advantage of the 400ns or + * 800ns dead time between command phase and the REQ + * of the new phase. If the transfer has completed + * successfully, SCSIEN should fall *long* before we + * see a phase change. We thus treat any phasemiss + * that occurs before SCSIEN falls as an incomplete + * transfer. + */ + test SSTAT1, PHASEMIS jnz p_command_xfer_failed; + test DFCNTRL, SCSIEN jnz . - 1; + } else { + test DFCNTRL, SCSIEN jnz .; + } + /* + * DMA Channel automatically disabled. + * Don't allow a data phase if the command + * was not fully transferred. + */ + test SSTAT2, SDONE jnz ITloop; +p_command_xfer_failed: + or SEQ_FLAGS, NO_CDB_SENT; + jmp ITloop; + + +/* + * Status phase. Wait for the data byte to appear, then read it + * and store it into the SCB. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +p_status: + test SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation; +p_status_okay: + mov SCB_SCSI_STATUS, SCSIDAT; + or SCB_CONTROL, STATUS_RCVD; + jmp ITloop; + +/* + * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full + * indentify message sequence and send it to the target. The host may + * override this behavior by setting the MK_MESSAGE bit in the SCB + * control byte. This will cause us to interrupt the host and allow + * it to handle the message phase completely on its own. If the bit + * associated with this target is set, we will also interrupt the host, + * thereby allowing it to send a message on the next selection regardless + * of the transaction being sent. + * + * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. + * This is done to allow the host to send messages outside of an identify + * sequence while protecting the seqencer from testing the MK_MESSAGE bit + * on an SCB that might not be for the current nexus. (For example, a + * BDR message in response to a bad reselection would leave us pointed to + * an SCB that doesn't have anything to do with the current target). + * + * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, + * bus device reset). + * + * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, + * in case the target decides to put us in this phase for some strange + * reason. + */ +p_mesgout_retry: + /* Turn on ATN for the retry */ + mvi SCSISIGO, ATNO; +p_mesgout: + mov SINDEX, MSG_OUT; + cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; + test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; +p_mesgout_identify: + or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN; + test SCB_CONTROL, DISCENB jnz . + 2; + and SINDEX, ~DISCENB; +/* + * Send a tag message if TAG_ENB is set in the SCB control block. + * Use SCB_NONPACKET_TAG as the tag value. + */ +p_mesgout_tag: + test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; + mov SCSIDAT, SINDEX; /* Send the identify message */ + call phase_lock; + cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; + and SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; + call phase_lock; + cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; + mov SCBPTR jmp p_mesgout_onebyte; +/* + * Interrupt the driver, and allow it to handle this message + * phase and any required retries. + */ +p_mesgout_from_host: + cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; + jmp host_message_loop; + +p_mesgout_onebyte: + mvi CLRSINT1, CLRATNO; + mov SCSIDAT, SINDEX; + +/* + * If the next bus phase after ATN drops is message out, it means + * that the target is requesting that the last message(s) be resent. + */ + call phase_lock; + cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; + +p_mesgout_done: + mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ + mov LAST_MSG, MSG_OUT; + mvi MSG_OUT, MSG_NOOP; /* No message left */ + jmp ITloop; + +/* + * Message in phase. Bytes are read using Automatic PIO mode. + */ +p_mesgin: + /* read the 1st message byte */ + mvi ACCUM call inb_first; + + test A,MSG_IDENTIFYFLAG jnz mesgin_identify; + cmp A,MSG_DISCONNECT je mesgin_disconnect; + cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; + cmp ALLZEROS,A je mesgin_complete; + cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; + cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; + cmp A,MSG_NOOP je mesgin_done; + +/* + * Pushed message loop to allow the kernel to + * run it's own message state engine. To avoid an + * extra nop instruction after signaling the kernel, + * we perform the phase_lock before checking to see + * if we should exit the loop and skip the phase_lock + * in the ITloop. Performing back to back phase_locks + * shouldn't hurt, but why do it twice... + */ +host_message_loop: + call phase_lock; /* Benign the first time through. */ + SET_SEQINTCODE(HOST_MSG_LOOP) + cmp RETURN_1, EXIT_MSG_LOOP je ITloop; + cmp RETURN_1, CONT_MSG_LOOP_WRITE jne . + 3; + mov SCSIDAT, RETURN_2; + jmp host_message_loop; + /* Must be CONT_MSG_LOOP_READ */ + mov NONE, SCSIDAT; /* ACK Byte */ + jmp host_message_loop; + +mesgin_ign_wide_residue: + mov SAVED_MODE, MODE_PTR; + SET_MODE(M_SCSI, M_SCSI) + shr NEGOADDR, 4, SAVED_SCSIID; + mov A, NEGCONOPTS; + RESTORE_MODE(SAVED_MODE) + test A, WIDEXFER jz mesgin_reject; + /* Pull the residue byte */ + mvi REG0 call inb_next; + cmp REG0, 0x01 jne mesgin_reject; + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; + test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done; + SET_SEQINTCODE(IGN_WIDE_RES) + jmp mesgin_done; + +mesgin_proto_violation: + SET_SEQINTCODE(PROTO_VIOLATION) + jmp mesgin_done; +mesgin_reject: + mvi MSG_MESSAGE_REJECT call mk_mesg; +mesgin_done: + mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ + jmp ITloop; + +#define INDEX_DISC_LIST(scsiid, lun) \ + and A, 0xC0, scsiid; \ + or SCBPTR, A, lun; \ + clr SCBPTR[1]; \ + and SINDEX, 0x30, scsiid; \ + shr SINDEX, 3; /* Multiply by 2 */ \ + add SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF); \ + mvi SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF) + +mesgin_identify: + /* + * Determine whether a target is using tagged or non-tagged + * transactions by first looking at the transaction stored in + * the per-device, disconnected array. If there is no untagged + * transaction for this target, this must be a tagged transaction. + */ + and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; + INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); + bmov DINDEX, SINDEX, 2; + bmov REG0, SINDIR, 2; + cmp REG0[1], SCB_LIST_NULL je snoop_tag; + /* Untagged. Clear the busy table entry and setup the SCB. */ + bmov DINDIR, ALLONES, 2; + bmov SCBPTR, REG0, 2; + jmp setup_SCB; + +/* + * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. + * If we get one, we use the tag returned to find the proper + * SCB. After receiving the tag, look for the SCB at SCB locations tag and + * tag + 256. + */ +snoop_tag: + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x80; + } + mov NONE, SCSIDAT; /* ACK Identify MSG */ + call phase_lock; + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x1; + } + cmp LASTPHASE, P_MESGIN jne not_found_ITloop; + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x2; + } + cmp SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found; +get_tag: + clr SCBPTR[1]; + mvi SCBPTR call inb_next; /* tag value */ +verify_scb: + test SCB_CONTROL,DISCONNECTED jz verify_other_scb; + mov A, SAVED_SCSIID; + cmp SCB_SCSIID, A jne verify_other_scb; + mov A, SAVED_LUN; + cmp SCB_LUN, A je setup_SCB_disconnected; +verify_other_scb: + xor SCBPTR[1], 1; + test SCBPTR[1], 0xFF jnz verify_scb; + jmp not_found; + +/* + * Ensure that the SCB the tag points to is for + * an SCB transaction to the reconnecting target. + */ +setup_SCB: + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x10; + } + test SCB_CONTROL,DISCONNECTED jz not_found; +setup_SCB_disconnected: + and SCB_CONTROL,~DISCONNECTED; + clr SEQ_FLAGS; /* make note of IDENTIFY */ + test SCB_SGPTR, SG_LIST_NULL jnz . + 3; + bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; + call allocate_fifo; + /* See if the host wants to send a message upon reconnection */ + test SCB_CONTROL, MK_MESSAGE jz mesgin_done; + mvi HOST_MSG call mk_mesg; + jmp mesgin_done; + +not_found: + SET_SEQINTCODE(NO_MATCH) + jmp mesgin_done; + +not_found_ITloop: + SET_SEQINTCODE(NO_MATCH) + jmp ITloop; + +/* + * We received a "command complete" message. Put the SCB on the complete + * queue and trigger a completion interrupt via the idle loop. Before doing + * so, check to see if there is a residual or the status byte is something + * other than STATUS_GOOD (0). In either of these conditions, we upload the + * SCB back to the host so it can process this information. + */ +mesgin_complete: + + /* + * If ATN is raised, we still want to give the target a message. + * Perhaps there was a parity error on this last message byte. + * Either way, the target should take us to message out phase + * and then attempt to complete the command again. We should use a + * critical section here to guard against a timeout triggering + * for this command and setting ATN while we are still processing + * the completion. + test SCSISIGI, ATNI jnz mesgin_done; + */ + + /* + * If we are identified and have successfully sent the CDB, + * any status will do. Optimize this fast path. + */ + test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; + + /* + * If the target never sent an identify message but instead went + * to mesgin to give an invalid message, let the host abort us. + */ + test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; + + /* + * If we recevied good status but never successfully sent the + * cdb, abort the command. + */ + test SCB_SCSI_STATUS,0xff jnz complete_accepted; + test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; +complete_accepted: + + /* + * See if we attempted to deliver a message but the target ingnored us. + */ + test SCB_CONTROL, MK_MESSAGE jz complete_nomsg; + SET_SEQINTCODE(MKMSG_FAILED) +complete_nomsg: + call queue_scb_completion; + jmp await_busfree; + +BEGIN_CRITICAL; +freeze_queue: + /* Cancel any pending select-out. */ + test SSTAT0, SELDO|SELINGO jnz . + 2; + and SCSISEQ0, ~ENSELO; + mov ACCUM_SAVE, A; + clr A; + add QFREEZE_COUNT, 1; + adc QFREEZE_COUNT[1], A; + or SEQ_FLAGS2, SELECTOUT_QFROZEN; + mov A, ACCUM_SAVE ret; +END_CRITICAL; + +/* + * Complete the current FIFO's SCB if data for this same + * SCB is not transferring in the other FIFO. + */ +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; +pkt_complete_scb_if_fifos_idle: + bmov ARG_1, SCBPTR, 2; + mvi DFFSXFRCTL, CLRCHN; + SET_MODE(M_SCSI, M_SCSI) + bmov SCBPTR, ARG_1, 2; + test SCB_FIFO_USE_COUNT, 0xFF jnz return; +queue_scb_completion: + test SCB_SCSI_STATUS,0xff jnz bad_status; + /* + * Check for residuals + */ + test SCB_SGPTR, SG_LIST_NULL jnz complete; /* No xfer */ + test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ + test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; +complete: +BEGIN_CRITICAL; + bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; + bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; +END_CRITICAL; +bad_status: + cmp SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb; + call freeze_queue; +upload_scb: + /* + * Restore SCB TAG since we reuse this field + * in the sequencer. We don't want to corrupt + * it on the host. + */ + bmov SCB_TAG, SCBPTR, 2; +BEGIN_CRITICAL; + or SCB_SGPTR, SG_STATUS_VALID; + mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL; + cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne add_dma_scb_tail; + bmov COMPLETE_DMA_SCB_HEAD, SCBPTR, 2; + bmov COMPLETE_DMA_SCB_TAIL, SCBPTR, 2 ret; +add_dma_scb_tail: + bmov REG0, SCBPTR, 2; + bmov SCBPTR, COMPLETE_DMA_SCB_TAIL, 2; + bmov SCB_NEXT_COMPLETE, REG0, 2; + bmov COMPLETE_DMA_SCB_TAIL, REG0, 2 ret; +END_CRITICAL; + +/* + * Is it a disconnect message? Set a flag in the SCB to remind us + * and await the bus going free. If this is an untagged transaction + * store the SCB id for it in our untagged target table for lookup on + * a reselection. + */ +mesgin_disconnect: + /* + * If ATN is raised, we still want to give the target a message. + * Perhaps there was a parity error on this last message byte + * or we want to abort this command. Either way, the target + * should take us to message out phase and then attempt to + * disconnect again. + * XXX - Wait for more testing. + test SCSISIGI, ATNI jnz mesgin_done; + */ + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT + jnz mesgin_proto_violation; + or SCB_CONTROL,DISCONNECTED; + test SCB_CONTROL, TAG_ENB jnz await_busfree; +queue_disc_scb: + bmov REG0, SCBPTR, 2; + INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); + bmov DINDEX, SINDEX, 2; + bmov DINDIR, REG0, 2; + bmov SCBPTR, REG0, 2; + /* FALLTHROUGH */ +await_busfree: + and SIMODE1, ~ENBUSFREE; + if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) { + /* + * In the BUSFREEREV_BUG case, the + * busfree status was cleared at the + * beginning of the connection. + */ + mvi CLRSINT1,CLRBUSFREE; + } + mov NONE, SCSIDAT; /* Ack the last byte */ + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) + jnz await_busfree_not_m_dff; +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; +await_busfree_clrchn: + mvi DFFSXFRCTL, CLRCHN; +await_busfree_not_m_dff: + /* clear target specific flags */ + mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT; + test SSTAT1,REQINIT|BUSFREE jz .; + /* + * We only set BUSFREE status once either a new + * phase has been detected or we are really + * BUSFREE. This allows the driver to know + * that we are active on the bus even though + * no identified transaction exists should a + * timeout occur while awaiting busfree. + */ + mvi LASTPHASE, P_BUSFREE; + test SSTAT1, BUSFREE jnz idle_loop; + SET_SEQINTCODE(MISSED_BUSFREE) + + +/* + * Save data pointers message: + * Copying RAM values back to SCB, for Save Data Pointers message, but + * only if we've actually been into a data phase to change them. This + * protects against bogus data in scratch ram and the residual counts + * since they are only initialized when we go into data_in or data_out. + * Ack the message as soon as possible. + */ +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; +mesgin_sdptrs: + mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ + test SEQ_FLAGS, DPHASE jz ITloop; + call save_pointers; + jmp ITloop; + +save_pointers: + /* + * If we are asked to save our position at the end of the + * transfer, just mark us at the end rather than perform a + * full save. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full; + or SCB_SGPTR, SG_LIST_NULL ret; + +save_pointers_full: + /* + * The SCB_DATAPTR becomes the current SHADDR. + * All other information comes directly from our residual + * state. + */ + bmov SCB_DATAPTR, SHADDR, 8; + bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret; + +/* + * Restore pointers message? Data pointers are recopied from the + * SCB anytime we enter a data phase for the first time, so all + * we need to do is clear the DPHASE flag and let the data phase + * code do the rest. We also reset/reallocate the FIFO to make + * sure we have a clean start for the next data or command phase. + */ +mesgin_rdptrs: + and SEQ_FLAGS, ~DPHASE; + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo; + mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; + SET_MODE(M_SCSI, M_SCSI) +msgin_rdptrs_get_fifo: + call allocate_fifo; + jmp mesgin_done; + +phase_lock: + if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) { + /* + * Don't ignore persistent REQ assertions just because + * they were asserted within the bus settle delay window. + * This allows us to tolerate devices like the GEM318 + * that violate the SCSI spec. We are careful not to + * count REQ while we are waiting for it to fall during + * an async phase due to our asserted ACK. Each + * sequencer instruction takes ~25ns, so the REQ must + * last at least 100ns in order to be counted as a true + * REQ. + */ + test SCSIPHASE, 0xFF jnz phase_locked; + test SCSISIGI, ACKI jnz phase_lock; + test SCSISIGI, REQI jz phase_lock; + test SCSIPHASE, 0xFF jnz phase_locked; + test SCSISIGI, ACKI jnz phase_lock; + test SCSISIGI, REQI jz phase_lock; +phase_locked: + } else { + test SCSIPHASE, 0xFF jz .; + } + test SSTAT1, SCSIPERR jnz phase_lock; +phase_lock_latch_phase: + and LASTPHASE, PHASE_MASK, SCSISIGI ret; + +/* + * Functions to read data in Automatic PIO mode. + * + * An ACK is not sent on input from the target until SCSIDATL is read from. + * So we wait until SCSIDATL is latched (the usual way), then read the data + * byte directly off the bus using SCSIBUSL. When we have pulled the ATN + * line, or we just want to acknowledge the byte, then we do a dummy read + * from SCISDATL. The SCSI spec guarantees that the target will hold the + * data byte on the bus until we send our ACK. + * + * The assumption here is that these are called in a particular sequence, + * and that REQ is already set when inb_first is called. inb_{first,next} + * use the same calling convention as inb. + */ +inb_next: + mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ +inb_next_wait: + /* + * If there is a parity error, wait for the kernel to + * see the interrupt and prepare our message response + * before continuing. + */ + test SCSIPHASE, 0xFF jz .; + test SSTAT1, SCSIPERR jnz inb_next_wait; +inb_next_check_phase: + and LASTPHASE, PHASE_MASK, SCSISIGI; + cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; +inb_first: + clr DINDEX[1]; + mov DINDEX,SINDEX; + mov DINDIR,SCSIBUS ret; /*read byte directly from bus*/ +inb_last: + mov NONE,SCSIDAT ret; /*dummy read from latch to ACK*/ + +mk_mesg: + mvi SCSISIGO, ATNO; + mov MSG_OUT,SINDEX ret; + +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; +disable_ccsgen: + test SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done; + clr CCSGCTL; +disable_ccsgen_fetch_done: + clr SG_STATE ret; + +service_fifo: + /* + * Do we have any prefetch left??? + */ + test SG_STATE, SEGS_AVAIL jnz idle_sg_avail; + + /* + * Can this FIFO have access to the S/G cache yet? + */ + test CCSGCTL, SG_CACHE_AVAIL jz return; + + /* Did we just finish fetching segs? */ + test CCSGCTL, CCSGDONE jnz idle_sgfetch_complete; + + /* Are we actively fetching segments? */ + test CCSGCTL, CCSGENACK jnz return; + + /* + * Should the other FIFO get the S/G cache first? If + * both FIFOs have been allocated since we last checked + * any FIFO, it is important that we service a FIFO + * that is not actively on the bus first. This guarantees + * that a FIFO will be freed to handle snapshot requests for + * any FIFO that is still on the bus. Chips with RTI do not + * perform snapshots, so don't bother with this test there. + */ + if ((ahd->features & AHD_RTI) == 0) { + /* + * If we're not still receiving SCSI data, + * it is safe to allocate the S/G cache to + * this FIFO. + */ + test DFCNTRL, SCSIEN jz idle_sgfetch_start; + + /* + * Switch to the other FIFO. Non-RTI chips + * also have the "set mode" bug, so we must + * disable interrupts during the switch. + */ + mvi SEQINTCTL, INTVEC1DSL; + xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); + + /* + * If the other FIFO needs loading, then it + * must not have claimed the S/G cache yet + * (SG_CACHE_AVAIL would have been cleared in + * the original FIFO mode and we test this above). + * Return to the idle loop so we can process the + * FIFO not currently on the bus first. + */ + test SG_STATE, LOADING_NEEDED jz idle_sgfetch_okay; + clr SEQINTCTL ret; +idle_sgfetch_okay: + xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); + clr SEQINTCTL; + } + +idle_sgfetch_start: + /* + * We fetch a "cacheline aligned" and sized amount of data + * so we don't end up referencing a non-existent page. + * Cacheline aligned is in quotes because the kernel will + * set the prefetch amount to a reasonable level if the + * cacheline size is unknown. + */ + bmov SGHADDR, SCB_RESIDUAL_SGPTR, 4; + mvi SGHCNT, SG_PREFETCH_CNT; + if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) { + /* + * Need two instructions between "touches" of SGHADDR. + */ + nop; + } + and SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; + mvi CCSGCTL, CCSGEN|CCSGRESET; + or SG_STATE, FETCH_INPROG ret; +idle_sgfetch_complete: + /* + * Guard against SG_CACHE_AVAIL activating during sg fetch + * request in the other FIFO. + */ + test SG_STATE, FETCH_INPROG jz return; + clr CCSGCTL; + and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; + mvi SG_STATE, SEGS_AVAIL|LOADING_NEEDED; +idle_sg_avail: + /* Does the hardware have space for another SG entry? */ + test DFSTATUS, PRELOAD_AVAIL jz return; + /* + * On the A, preloading a segment before HDMAENACK + * comes true can clobber the shadow address of the + * first segment in the S/G FIFO. Wait until it is + * safe to proceed. + */ + if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) { + test DFCNTRL, HDMAENACK jz return; + } + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + bmov HADDR, CCSGRAM, 8; + } else { + bmov HADDR, CCSGRAM, 4; + } + bmov HCNT, CCSGRAM, 3; + bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; + if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { + and HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3]; + } + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + /* Skip 4 bytes of pad. */ + add CCSGADDR, 4; + } +sg_advance: + clr A; /* add sizeof(struct scatter) */ + add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; + adc SCB_RESIDUAL_SGPTR[1],A; + adc SCB_RESIDUAL_SGPTR[2],A; + adc SCB_RESIDUAL_SGPTR[3],A; + mov SINDEX, SCB_RESIDUAL_SGPTR[0]; + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3; + or SINDEX, LAST_SEG; + clr SG_STATE; + mov SG_CACHE_PRE, SINDEX; + if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { + /* + * Use SCSIENWRDIS so that SCSIEN is never + * modified by this operation. + */ + or DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS; + } else { + or DFCNTRL, PRELOADEN|HDMAEN; + } + /* + * Do we have another segment in the cache? + */ + add NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR; + jnc return; + and SG_STATE, ~SEGS_AVAIL ret; + +/* + * Initialize the DMA address and counter from the SCB. + */ +load_first_seg: + bmov HADDR, SCB_DATAPTR, 11; + and REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0]; + test SCB_DATACNT[3], SG_LAST_SEG jz . + 2; + or REG_ISR, LAST_SEG; + mov SG_CACHE_PRE, REG_ISR; + mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); + /* + * Since we've are entering a data phase, we will + * rely on the SCB_RESID* fields. Initialize the + * residual and clear the full residual flag. + */ + and SCB_SGPTR[0], ~SG_FULL_RESID; + bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; + /* If we need more S/G elements, tell the idle loop */ + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2; + mvi SG_STATE, LOADING_NEEDED ret; + clr SG_STATE ret; + +p_data_handle_xfer: + call setjmp; + test SG_STATE, LOADING_NEEDED jnz service_fifo; +p_data_clear_handler: + or LONGJMP_ADDR[1], INVALID_ADDR ret; + +p_data: + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; + SET_SEQINTCODE(PROTO_VIOLATION) +p_data_allowed: + + test SEQ_FLAGS, DPHASE jz data_phase_initialize; + + /* + * If we re-enter the data phase after going through another + * phase, our transfer location has almost certainly been + * corrupted by the interveining, non-data, transfers. Ask + * the host driver to fix us up based on the transfer residual + * unless we already know that we should be bitbucketing. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; + SET_SEQINTCODE(PDATA_REINIT) + jmp data_phase_inbounds; + +p_data_bitbucket: + /* + * Turn on `Bit Bucket' mode, wait until the target takes + * us to another phase, and then notify the host. + */ + mov SAVED_MODE, MODE_PTR; + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) + jnz bitbucket_not_m_dff; + /* + * Ensure that any FIFO contents are cleared out and the + * FIFO free'd prior to starting the BITBUCKET. BITBUCKET + * doesn't discard data already in the FIFO. + */ + mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; + SET_MODE(M_SCSI, M_SCSI) +bitbucket_not_m_dff: + or SXFRCTL1,BITBUCKET; + /* Wait for non-data phase. */ + test SCSIPHASE, ~DATA_PHASE_MASK jz .; + and SXFRCTL1, ~BITBUCKET; + RESTORE_MODE(SAVED_MODE) +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; + SET_SEQINTCODE(DATA_OVERRUN) + jmp ITloop; + +data_phase_initialize: + test SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; + call load_first_seg; +data_phase_inbounds: + /* We have seen a data phase at least once. */ + or SEQ_FLAGS, DPHASE; + mov SAVED_MODE, MODE_PTR; + test SG_STATE, LOADING_NEEDED jz data_group_dma_loop; + call p_data_handle_xfer; +data_group_dma_loop: + /* + * The transfer is complete if either the last segment + * completes or the target changes phase. Both conditions + * will clear SCSIEN. + */ + call idle_loop_service_fifos; + call idle_loop_cchan; + call idle_loop_gsfifo; + RESTORE_MODE(SAVED_MODE) + test DFCNTRL, SCSIEN jnz data_group_dma_loop; + +data_group_dmafinish: + /* + * The transfer has terminated either due to a phase + * change, and/or the completion of the last segment. + * We have two goals here. Do as much other work + * as possible while the data fifo drains on a read + * and respond as quickly as possible to the standard + * messages (save data pointers/disconnect and command + * complete) that usually follow a data phase. + */ + call calc_residual; + + /* + * Go ahead and shut down the DMA engine now. + */ + test DFCNTRL, DIRECTION jnz data_phase_finish; +data_group_fifoflush: + if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { + or DFCNTRL, FIFOFLUSH; + } + /* + * We have enabled the auto-ack feature. This means + * that the controller may have already transferred + * some overrun bytes into the data FIFO and acked them + * on the bus. The only way to detect this situation is + * to wait for LAST_SEG_DONE to come true on a completed + * transfer and then test to see if the data FIFO is + * non-empty. We know there is more data yet to transfer + * if SG_LIST_NULL is not yet set, thus there cannot be + * an overrun. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish; + test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; + test DFSTATUS, FIFOEMP jnz data_phase_finish; + /* Overrun */ + jmp p_data; +data_phase_finish: + /* + * If the target has left us in data phase, loop through + * the dma code again. We will only loop if there is a + * data overrun. + */ + if ((ahd->flags & AHD_TARGETROLE) != 0) { + test SSTAT0, TARGET jnz data_phase_done; + } + if ((ahd->flags & AHD_INITIATORROLE) != 0) { + test SSTAT1, REQINIT jz .; + test SCSIPHASE, DATA_PHASE_MASK jnz p_data; + } + +data_phase_done: + /* Kill off any pending prefetch */ + call disable_ccsgen; + or LONGJMP_ADDR[1], INVALID_ADDR; + + if ((ahd->flags & AHD_TARGETROLE) != 0) { + test SEQ_FLAGS, DPHASE_PENDING jz ITloop; + /* + and SEQ_FLAGS, ~DPHASE_PENDING; + * For data-in phases, wait for any pending acks from the + * initiator before changing phase. We only need to + * send Ignore Wide Residue messages for data-in phases. + test DFCNTRL, DIRECTION jz target_ITloop; + test SSTAT1, REQINIT jnz .; + test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop; + SET_MODE(M_SCSI, M_SCSI) + test NEGCONOPTS, WIDEXFER jz target_ITloop; + */ + /* + * Issue an Ignore Wide Residue Message. + mvi P_MESGIN|BSYO call change_phase; + mvi MSG_IGN_WIDE_RESIDUE call target_outb; + mvi 1 call target_outb; + jmp target_ITloop; + */ + } else { + jmp ITloop; + } + +/* + * We assume that, even though data may still be + * transferring to the host, that the SCSI side of + * the DMA engine is now in a static state. This + * allows us to update our notion of where we are + * in this transfer. + * + * If, by chance, we stopped before being able + * to fetch additional segments for this transfer, + * yet the last S/G was completely exhausted, + * call our idle loop until it is able to load + * another segment. This will allow us to immediately + * pickup on the next segment on the next data phase. + * + * If we happened to stop on the last segment, then + * our residual information is still correct from + * the idle loop and there is no need to perform + * any fixups. + */ +residual_before_last_seg: + test MDFFSTAT, SHVALID jnz sgptr_fixup; + /* + * Can never happen from an interrupt as the packetized + * hardware will only interrupt us once SHVALID or + * LAST_SEG_DONE. + */ + call idle_loop_service_fifos; + RESTORE_MODE(SAVED_MODE) + /* FALLTHROUGH */ +calc_residual: + test SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg; + /* Record if we've consumed all S/G entries */ + test MDFFSTAT, SHVALID jz . + 2; + bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; + or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret; + +sgptr_fixup: + /* + * Fixup the residual next S/G pointer. The S/G preload + * feature of the chip allows us to load two elements + * in addition to the currently active element. We + * store the bottom byte of the next S/G pointer in + * the SG_CACHE_PTR register so we can restore the + * correct value when the DMA completes. If the next + * sg ptr value has advanced to the point where higher + * bytes in the address have been affected, fix them + * too. + */ + test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; + test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; + add SCB_RESIDUAL_SGPTR[1], -1; + adc SCB_RESIDUAL_SGPTR[2], -1; + adc SCB_RESIDUAL_SGPTR[3], -1; +sgptr_fixup_done: + and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; + clr SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */ + bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; + +export timer_isr: + call issue_cmdcmplt; + mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; + if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { + /* + * In H2A4, the mode pointer is not saved + * for intvec2, but is restored on iret. + * This can lead to the restoration of a + * bogus mode ptr. Manually clear the + * intmask bits and do a normal return + * to compensate. + */ + and SEQINTCTL, ~(INTMASK2|INTMASK1) ret; + } else { + or SEQINTCTL, IRET ret; + } + +export seq_isr: + if ((ahd->features & AHD_RTI) == 0) { + /* + * On RevA Silicon, if the target returns us to data-out + * after we have already trained for data-out, it is + * possible for us to transition the free running clock to + * data-valid before the required 100ns P1 setup time (8 P1 + * assertions in fast-160 mode). This will only happen if + * this L-Q is a continuation of a data transfer for which + * we have already prefetched data into our FIFO (LQ/Data + * followed by LQ/Data for the same write transaction). + * This can cause some target implementations to miss the + * first few data transfers on the bus. We detect this + * situation by noticing that this is the first data transfer + * after an LQ (LQIWORKONLQ true), that the data transfer is + * a continuation of a transfer already setup in our FIFO + * (SAVEPTRS interrupt), and that the transaction is a write + * (DIRECTION set in DFCNTRL). The delay is performed by + * disabling SCSIEN until we see the first REQ from the + * target. + * + * First instruction in an ISR cannot be a branch on + * Rev A. Snapshot LQISTAT2 so the status is not missed + * and deffer the test by one instruction. + */ + mov REG_ISR, LQISTAT2; + test REG_ISR, LQIWORKONLQ jz main_isr; + test SEQINTSRC, SAVEPTRS jz main_isr; + test LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo; + /* + * Switch to the active FIFO after clearing the snapshot + * savepointer in the current FIFO. We do this so that + * a pending CTXTDONE or SAVEPTR is visible in the active + * FIFO. This status is the only way we can detect if we + * have lost the race (e.g. host paused us) and our attempts + * to disable the channel occurred after all REQs were + * already seen and acked (REQINIT never comes true). + */ + mvi DFFSXFRCTL, CLRCHN; + xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); + test DFCNTRL, DIRECTION jz interrupt_return; + and DFCNTRL, ~SCSIEN; +snapshot_wait_data_valid: + test SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz interrupt_return; + test SSTAT1, REQINIT jz snapshot_wait_data_valid; +snapshot_data_valid: + or DFCNTRL, SCSIEN; + or SEQINTCTL, IRET ret; +snapshot_saveptr: + mvi DFFSXFRCTL, CLRCHN; + or SEQINTCTL, IRET ret; +main_isr: + } + test SEQINTSRC, CFG4DATA jnz cfg4data_intr; + test SEQINTSRC, CFG4ISTAT jnz cfg4istat_intr; + test SEQINTSRC, SAVEPTRS jnz saveptr_intr; + test SEQINTSRC, CFG4ICMD jnz cfg4icmd_intr; + SET_SEQINTCODE(INVALID_SEQINT) + +/* + * There are two types of save pointers interrupts: + * The first is a snapshot save pointers where the current FIFO is not + * active and contains a snapshot of the current poniter information. + * This happens between packets in a stream for a single L_Q. Since we + * are not performing a pointer save, we can safely clear the channel + * so it can be used for other transactions. On RTI capable controllers, + * where snapshots can, and are, disabled, the code to handle this type + * of snapshot is not active. + * + * The second case is a save pointers on an active FIFO which occurs + * if the target changes to a new L_Q or busfrees/QASes and the transfer + * has a residual. This should occur coincident with a ctxtdone. We + * disable the interrupt and allow our active routine to handle the + * save. + */ +saveptr_intr: + if ((ahd->features & AHD_RTI) == 0) { + test LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr; + } +saveptr_active_fifo: + and SEQIMODE, ~ENSAVEPTRS; + or SEQINTCTL, IRET ret; + +cfg4data_intr: + test SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count; + call load_first_seg; + call pkt_handle_xfer; + inc SCB_FIFO_USE_COUNT; +interrupt_return: + or SEQINTCTL, IRET ret; + +cfg4istat_intr: + call freeze_queue; + add NONE, -13, SCB_CDB_LEN; + jnc cfg4istat_have_sense_addr; + test SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr; + /* + * Host sets up address/count and enables transfer. + */ + SET_SEQINTCODE(CFG4ISTAT_INTR) + jmp cfg4istat_setup_handler; +cfg4istat_have_sense_addr: + bmov HADDR, SCB_SENSE_BUSADDR, 4; + mvi HCNT[1], (AHD_SENSE_BUFSIZE >> 8); + mvi SG_CACHE_PRE, LAST_SEG; + mvi DFCNTRL, PRELOADEN|SCSIEN|HDMAEN; +cfg4istat_setup_handler: + /* + * Status pkt is transferring to host. + * Wait in idle loop for transfer to complete. + * If a command completed before an attempted + * task management function completed, notify the host. + */ + test SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func; + SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) +cfg4istat_no_taskmgmt_func: + call pkt_handle_status; + or SEQINTCTL, IRET ret; + +cfg4icmd_intr: + /* + * In the case of DMAing a CDB from the host, the normal + * CDB buffer is formatted with an 8 byte address followed + * by a 1 byte count. + */ + bmov HADDR[0], SCB_HOST_CDB_PTR, 9; + mvi SG_CACHE_PRE, LAST_SEG; + mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); + call pkt_handle_cdb; + or SEQINTCTL, IRET ret; + +/* + * See if the target has gone on in this context creating an + * overrun condition. For the write case, the hardware cannot + * ack bytes until data are provided. So, if the target begins + * another packet without changing contexts, implying we are + * not sitting on a packet boundary, we are in an overrun + * situation. For the read case, the hardware will continue to + * ack bytes into the FIFO, and may even ack the last overrun packet + * into the FIFO. If the FIFO should become non-empty, we are in + * a read overrun case. + */ +#define check_overrun \ + /* Not on a packet boundary. */ \ + test MDFFSTAT, DLZERO jz pkt_handle_overrun; \ + test DFSTATUS, FIFOEMP jz pkt_handle_overrun + +pkt_handle_xfer: + test SG_STATE, LOADING_NEEDED jz pkt_last_seg; + call setjmp; + test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; + test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; + test SCSISIGO, ATNO jnz . + 2; + test SSTAT2, NONPACKREQ jz pkt_service_fifo; + /* + * Defer handling of this NONPACKREQ until we + * can be sure it pertains to this FIFO. SAVEPTRS + * will not be asserted if the NONPACKREQ is for us, + * so we must simulate it if shadow is valid. If + * shadow is not valid, keep running this FIFO until we + * have satisfied the transfer by loading segments and + * waiting for either shadow valid or last_seg_done. + */ + test MDFFSTAT, SHVALID jnz pkt_saveptrs; +pkt_service_fifo: + test SG_STATE, LOADING_NEEDED jnz service_fifo; +pkt_last_seg: + call setjmp; + test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; + test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done; + test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; + test SCSISIGO, ATNO jnz . + 2; + test SSTAT2, NONPACKREQ jz return; + test MDFFSTAT, SHVALID jz return; + /* FALLTHROUGH */ + +/* + * Either a SAVEPTRS interrupt condition is pending for this FIFO + * or we have a pending NONPACKREQ for this FIFO. We differentiate + * between the two by capturing the state of the SAVEPTRS interrupt + * prior to clearing this status and executing the common code for + * these two cases. + */ +pkt_saveptrs: +BEGIN_CRITICAL; + if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { + or DFCNTRL, FIFOFLUSH; + } + mov REG0, SEQINTSRC; + call calc_residual; + call save_pointers; + mvi CLRSEQINTSRC, CLRSAVEPTRS; + call disable_ccsgen; + or SEQIMODE, ENSAVEPTRS; + test DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status; + test DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status; + /* + * Keep a handler around for this FIFO until it drains + * to the host to guarantee that we don't complete the + * command to the host before the data arrives. + */ +pkt_saveptrs_wait_fifoemp: + call setjmp; + test DFSTATUS, FIFOEMP jz return; +pkt_saveptrs_check_status: + or LONGJMP_ADDR[1], INVALID_ADDR; + test REG0, SAVEPTRS jz unexpected_nonpkt_phase; + dec SCB_FIFO_USE_COUNT; + test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; + mvi DFFSXFRCTL, CLRCHN ret; + +/* + * LAST_SEG_DONE status has been seen in the current FIFO. + * This indicates that all of the allowed data for this + * command has transferred across the SCSI and host buses. + * Check for overrun and see if we can complete this command. + */ +pkt_last_seg_done: + /* + * Mark transfer as completed. + */ + or SCB_SGPTR, SG_LIST_NULL; + + /* + * Wait for the current context to finish to verify that + * no overrun condition has occurred. + */ + test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; + call setjmp; +pkt_wait_ctxt_done_loop: + test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; + /* + * A sufficiently large overrun or a NONPACKREQ may + * prevent CTXTDONE from ever asserting, so we must + * poll for these statuses too. + */ + check_overrun; + test SSTAT2, NONPACKREQ jz return; + test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; + /* FALLTHROUGH */ + +pkt_ctxt_done: + check_overrun; + or LONGJMP_ADDR[1], INVALID_ADDR; + /* + * If status has been received, it is safe to skip + * the check to see if another FIFO is active because + * LAST_SEG_DONE has been observed. However, we check + * the FIFO anyway since it costs us only one extra + * instruction to leverage common code to perform the + * SCB completion. + */ + dec SCB_FIFO_USE_COUNT; + test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; + mvi DFFSXFRCTL, CLRCHN ret; +END_CRITICAL; + +/* + * Must wait until CDB xfer is over before issuing the + * clear channel. + */ +pkt_handle_cdb: + call setjmp; + test SG_CACHE_SHADOW, LAST_SEG_DONE jz return; + or LONGJMP_ADDR[1], INVALID_ADDR; + mvi DFFSXFRCTL, CLRCHN ret; + +/* + * Watch over the status transfer. Our host sense buffer is + * large enough to take the maximum allowed status packet. + * None-the-less, we must still catch and report overruns to + * the host. Additionally, properly catch unexpected non-packet + * phases that are typically caused by CRC errors in status packet + * transmission. + */ +pkt_handle_status: + call setjmp; + test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; + test SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq; + test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; +pkt_status_IU_done: + if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { + or DFCNTRL, FIFOFLUSH; + } + test DFSTATUS, FIFOEMP jz return; +BEGIN_CRITICAL; + or LONGJMP_ADDR[1], INVALID_ADDR; + mvi SCB_SCSI_STATUS, STATUS_PKT_SENSE; + or SCB_CONTROL, STATUS_RCVD; + jmp pkt_complete_scb_if_fifos_idle; +END_CRITICAL; +pkt_status_check_overrun: + /* + * Status PKT overruns are uncerimoniously recovered with a + * bus reset. If we've overrun, let the host know so that + * recovery can be performed. + * + * LAST_SEG_DONE has been observed. If either CTXTDONE or + * a NONPACKREQ phase change have occurred and the FIFO is + * empty, there is no overrun. + */ + test DFSTATUS, FIFOEMP jz pkt_status_report_overrun; + test SEQINTSRC, CTXTDONE jz . + 2; + test DFSTATUS, FIFOEMP jnz pkt_status_IU_done; + test SCSIPHASE, ~DATA_PHASE_MASK jz return; + test DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq; +pkt_status_report_overrun: + SET_SEQINTCODE(STATUS_OVERRUN) + /* SEQUENCER RESTARTED */ +pkt_status_check_nonpackreq: + /* + * CTXTDONE may be held off if a NONPACKREQ is associated with + * the current context. If a NONPACKREQ is observed, decide + * if it is for the current context. If it is for the current + * context, we must defer NONPACKREQ processing until all data + * has transferred to the host. + */ + test SCSIPHASE, ~DATA_PHASE_MASK jz return; + test SCSISIGO, ATNO jnz . + 2; + test SSTAT2, NONPACKREQ jz return; + test SEQINTSRC, CTXTDONE jnz pkt_status_IU_done; + test DFSTATUS, FIFOEMP jz return; + /* + * The unexpected nonpkt phase handler assumes that any + * data channel use will have a FIFO reference count. It + * turns out that the status handler doesn't need a references + * count since the status received flag, and thus completion + * processing, cannot be set until the handler is finished. + * We increment the count here to make the nonpkt handler + * happy. + */ + inc SCB_FIFO_USE_COUNT; + /* FALLTHROUGH */ + +/* + * Nonpackreq is a polled status. It can come true in three situations: + * we have received an L_Q, we have sent one or more L_Qs, or there is no + * L_Q context associated with this REQ (REQ occurs immediately after a + * (re)selection). Routines that know that the context responsible for this + * nonpackreq call directly into unexpected_nonpkt_phase. In the case of the + * top level idle loop, we exhaust all active contexts prior to determining that + * we simply do not have the full I_T_L_Q for this phase. + */ +unexpected_nonpkt_phase_find_ctxt: + /* + * This nonpackreq is most likely associated with one of the tags + * in a FIFO or an outgoing LQ. Only treat it as an I_T only + * nonpackreq if we've cleared out the FIFOs and handled any + * pending SELDO. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; + and A, FIFO1FREE|FIFO0FREE, DFFSTAT; + cmp A, FIFO1FREE|FIFO0FREE jne return; + test SSTAT0, SELDO jnz return; + mvi SCBPTR[1], SCB_LIST_NULL; +unexpected_nonpkt_phase: + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) + jnz unexpected_nonpkt_mode_cleared; +SET_SRC_MODE M_DFF0; +SET_DST_MODE M_DFF0; + or LONGJMP_ADDR[1], INVALID_ADDR; + dec SCB_FIFO_USE_COUNT; + mvi DFFSXFRCTL, CLRCHN; +unexpected_nonpkt_mode_cleared: + mvi CLRSINT2, CLRNONPACKREQ; + if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { + /* + * Test to ensure that the bus has not + * already gone free prior to clearing + * any stale busfree status. This avoids + * a window whereby a busfree just after + * a selection could be missed. + */ + test SCSISIGI, BSYI jz . + 2; + mvi CLRSINT1,CLRBUSFREE; + or SIMODE1, ENBUSFREE; + } + test SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase; + SET_SEQINTCODE(ENTERING_NONPACK) + jmp ITloop; + +illegal_phase: + SET_SEQINTCODE(ILLEGAL_PHASE) + jmp ITloop; + +/* + * We have entered an overrun situation. If we have working + * BITBUCKET, flip that on and let the hardware eat any overrun + * data. Otherwise use an overrun buffer in the host to simulate + * BITBUCKET. + */ +pkt_handle_overrun_inc_use_count: + inc SCB_FIFO_USE_COUNT; +pkt_handle_overrun: + SET_SEQINTCODE(CFG4OVERRUN) + call freeze_queue; + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) { + or DFFSXFRCTL, DFFBITBUCKET; +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; + } else { + call load_overrun_buf; + mvi DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN); + } + call setjmp; + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { + test DFSTATUS, PRELOAD_AVAIL jz overrun_load_done; + call load_overrun_buf; + or DFCNTRL, PRELOADEN; +overrun_load_done: + test SEQINTSRC, CTXTDONE jnz pkt_overrun_end; + } else { + test DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end; + } + test SSTAT2, NONPACKREQ jz return; +pkt_overrun_end: + or SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID; + test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; + dec SCB_FIFO_USE_COUNT; + or LONGJMP_ADDR[1], INVALID_ADDR; + test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; + mvi DFFSXFRCTL, CLRCHN ret; + +if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { +load_overrun_buf: + /* + * Load a dummy segment if preload space is available. + */ + mov HADDR[0], SHARED_DATA_ADDR; + add HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1]; + mov ACCUM_SAVE, A; + clr A; + adc HADDR[2], A, SHARED_DATA_ADDR[2]; + adc HADDR[3], A, SHARED_DATA_ADDR[3]; + mov A, ACCUM_SAVE; + bmov HADDR[4], ALLZEROS, 4; + /* PKT_OVERRUN_BUFSIZE is a multiple of 256 */ + clr HCNT[0]; + mvi HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF); + clr HCNT[2] ret; +} diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c new file mode 100644 index 000000000..3e3100dbf --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -0,0 +1,10724 @@ +/* + * Core routines and tables shareable across OS platforms. + * + * Copyright (c) 1994-2002 Justin T. Gibbs. + * Copyright (c) 2000-2003 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ + */ + +#include "aic79xx_osm.h" +#include "aic79xx_inline.h" +#include "aicasm/aicasm_insformat.h" + +/***************************** Lookup Tables **********************************/ +static const char *const ahd_chip_names[] = +{ + "NONE", + "aic7901", + "aic7902", + "aic7901A" +}; + +/* + * Hardware error codes. + */ +struct ahd_hard_error_entry { + uint8_t errno; + const char *errmesg; +}; + +static const struct ahd_hard_error_entry ahd_hard_errors[] = { + { DSCTMOUT, "Discard Timer has timed out" }, + { ILLOPCODE, "Illegal Opcode in sequencer program" }, + { SQPARERR, "Sequencer Parity Error" }, + { DPARERR, "Data-path Parity Error" }, + { MPARERR, "Scratch or SCB Memory Parity Error" }, + { CIOPARERR, "CIOBUS Parity Error" }, +}; +static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); + +static const struct ahd_phase_table_entry ahd_phase_table[] = +{ + { P_DATAOUT, NOP, "in Data-out phase" }, + { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" }, + { P_DATAOUT_DT, NOP, "in DT Data-out phase" }, + { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" }, + { P_COMMAND, NOP, "in Command phase" }, + { P_MESGOUT, NOP, "in Message-out phase" }, + { P_STATUS, INITIATOR_ERROR, "in Status phase" }, + { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, + { P_BUSFREE, NOP, "while idle" }, + { 0, NOP, "in unknown phase" } +}; + +/* + * In most cases we only wish to itterate over real phases, so + * exclude the last element from the count. + */ +static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1; + +/* Our Sequencer Program */ +#include "aic79xx_seq.h" + +/**************************** Function Declarations ***************************/ +static void ahd_handle_transmission_error(struct ahd_softc *ahd); +static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, + u_int lqistat1); +static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, + u_int busfreetime); +static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); +static void ahd_handle_proto_violation(struct ahd_softc *ahd); +static void ahd_force_renegotiation(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); + +static struct ahd_tmode_tstate* + ahd_alloc_tstate(struct ahd_softc *ahd, + u_int scsi_id, char channel); +#ifdef AHD_TARGET_MODE +static void ahd_free_tstate(struct ahd_softc *ahd, + u_int scsi_id, char channel, int force); +#endif +static void ahd_devlimited_syncrate(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *, + u_int *period, + u_int *ppr_options, + role_t role); +static void ahd_update_neg_table(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct ahd_transinfo *tinfo); +static void ahd_update_pending_scbs(struct ahd_softc *ahd); +static void ahd_fetch_devinfo(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_scb_devinfo(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct scb *scb); +static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct scb *scb); +static void ahd_build_transfer_msg(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_construct_sdtr(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int period, u_int offset); +static void ahd_construct_wdtr(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int bus_width); +static void ahd_construct_ppr(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int period, u_int offset, + u_int bus_width, u_int ppr_options); +static void ahd_clear_msg_state(struct ahd_softc *ahd); +static void ahd_handle_message_phase(struct ahd_softc *ahd); +typedef enum { + AHDMSG_1B, + AHDMSG_2B, + AHDMSG_EXT +} ahd_msgtype; +static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, + u_int msgval, int full); +static int ahd_parse_msg(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static int ahd_handle_msg_reject(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); +static void ahd_handle_devreset(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int lun, cam_status status, + char *message, int verbose_level); +#ifdef AHD_TARGET_MODE +static void ahd_setup_target_msgin(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct scb *scb); +#endif + +static u_int ahd_sglist_size(struct ahd_softc *ahd); +static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); +static bus_dmamap_callback_t + ahd_dmamap_cb; +static void ahd_initialize_hscbs(struct ahd_softc *ahd); +static int ahd_init_scbdata(struct ahd_softc *ahd); +static void ahd_fini_scbdata(struct ahd_softc *ahd); +static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); +static void ahd_iocell_first_selection(struct ahd_softc *ahd); +static void ahd_add_col_list(struct ahd_softc *ahd, + struct scb *scb, u_int col_idx); +static void ahd_rem_col_list(struct ahd_softc *ahd, + struct scb *scb); +static void ahd_chip_init(struct ahd_softc *ahd); +static void ahd_qinfifo_requeue(struct ahd_softc *ahd, + struct scb *prev_scb, + struct scb *scb); +static int ahd_qinfifo_count(struct ahd_softc *ahd); +static int ahd_search_scb_list(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status, + ahd_search_action action, + u_int *list_head, u_int *list_tail, + u_int tid); +static void ahd_stitch_tid_list(struct ahd_softc *ahd, + u_int tid_prev, u_int tid_cur, + u_int tid_next); +static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, + u_int scbid); +static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, + u_int prev, u_int next, u_int tid); +static void ahd_reset_current_bus(struct ahd_softc *ahd); +static void ahd_stat_timer(struct timer_list *t); +#ifdef AHD_DUMP_SEQ +static void ahd_dumpseq(struct ahd_softc *ahd); +#endif +static void ahd_loadseq(struct ahd_softc *ahd); +static int ahd_check_patch(struct ahd_softc *ahd, + const struct patch **start_patch, + u_int start_instr, u_int *skip_addr); +static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, + u_int address); +static void ahd_download_instr(struct ahd_softc *ahd, + u_int instrptr, uint8_t *dconsts); +static int ahd_probe_stack_size(struct ahd_softc *ahd); +static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, + struct scb *scb); +static void ahd_run_data_fifo(struct ahd_softc *ahd, + struct scb *scb); + +#ifdef AHD_TARGET_MODE +static void ahd_queue_lstate_event(struct ahd_softc *ahd, + struct ahd_tmode_lstate *lstate, + u_int initiator_id, + u_int event_type, + u_int event_arg); +static void ahd_update_scsiid(struct ahd_softc *ahd, + u_int targid_mask); +static int ahd_handle_target_cmd(struct ahd_softc *ahd, + struct target_cmd *cmd); +#endif + +static int ahd_abort_scbs(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status); +static void ahd_alloc_scbs(struct ahd_softc *ahd); +static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, + u_int scbid); +static void ahd_calc_residual(struct ahd_softc *ahd, + struct scb *scb); +static void ahd_clear_critical_section(struct ahd_softc *ahd); +static void ahd_clear_intstat(struct ahd_softc *ahd); +static void ahd_enable_coalescing(struct ahd_softc *ahd, + int enable); +static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); +static void ahd_freeze_devq(struct ahd_softc *ahd, + struct scb *scb); +static void ahd_handle_scb_status(struct ahd_softc *ahd, + struct scb *scb); +static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); +static void ahd_shutdown(void *arg); +static void ahd_update_coalescing_values(struct ahd_softc *ahd, + u_int timer, + u_int maxcmds, + u_int mincmds); +static int ahd_verify_vpd_cksum(struct vpd_config *vpd); +static int ahd_wait_seeprom(struct ahd_softc *ahd); +static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, + int target, char channel, int lun, + u_int tag, role_t role); + +static void ahd_reset_cmds_pending(struct ahd_softc *ahd); + +/*************************** Interrupt Services *******************************/ +static void ahd_run_qoutfifo(struct ahd_softc *ahd); +#ifdef AHD_TARGET_MODE +static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); +#endif +static void ahd_handle_hwerrint(struct ahd_softc *ahd); +static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); +static void ahd_handle_scsiint(struct ahd_softc *ahd, + u_int intstat); + +/************************ Sequencer Execution Control *************************/ +void +ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) +{ + if (ahd->src_mode == src && ahd->dst_mode == dst) + return; +#ifdef AHD_DEBUG + if (ahd->src_mode == AHD_MODE_UNKNOWN + || ahd->dst_mode == AHD_MODE_UNKNOWN) + panic("Setting mode prior to saving it.\n"); + if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) + printk("%s: Setting mode 0x%x\n", ahd_name(ahd), + ahd_build_mode_state(ahd, src, dst)); +#endif + ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); + ahd->src_mode = src; + ahd->dst_mode = dst; +} + +static void +ahd_update_modes(struct ahd_softc *ahd) +{ + ahd_mode_state mode_ptr; + ahd_mode src; + ahd_mode dst; + + mode_ptr = ahd_inb(ahd, MODE_PTR); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) + printk("Reading mode 0x%x\n", mode_ptr); +#endif + ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); + ahd_known_modes(ahd, src, dst); +} + +static void +ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, + ahd_mode dstmode, const char *file, int line) +{ +#ifdef AHD_DEBUG + if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 + || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { + panic("%s:%s:%d: Mode assertion failed.\n", + ahd_name(ahd), file, line); + } +#endif +} + +#define AHD_ASSERT_MODES(ahd, source, dest) \ + ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); + +ahd_mode_state +ahd_save_modes(struct ahd_softc *ahd) +{ + if (ahd->src_mode == AHD_MODE_UNKNOWN + || ahd->dst_mode == AHD_MODE_UNKNOWN) + ahd_update_modes(ahd); + + return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); +} + +void +ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) +{ + ahd_mode src; + ahd_mode dst; + + ahd_extract_mode_state(ahd, state, &src, &dst); + ahd_set_modes(ahd, src, dst); +} + +/* + * Determine whether the sequencer has halted code execution. + * Returns non-zero status if the sequencer is stopped. + */ +int +ahd_is_paused(struct ahd_softc *ahd) +{ + return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); +} + +/* + * Request that the sequencer stop and wait, indefinitely, for it + * to stop. The sequencer will only acknowledge that it is paused + * once it has reached an instruction boundary and PAUSEDIS is + * cleared in the SEQCTL register. The sequencer may use PAUSEDIS + * for critical sections. + */ +void +ahd_pause(struct ahd_softc *ahd) +{ + ahd_outb(ahd, HCNTRL, ahd->pause); + + /* + * Since the sequencer can disable pausing in a critical section, we + * must loop until it actually stops. + */ + while (ahd_is_paused(ahd) == 0) + ; +} + +/* + * Allow the sequencer to continue program execution. + * We check here to ensure that no additional interrupt + * sources that would cause the sequencer to halt have been + * asserted. If, for example, a SCSI bus reset is detected + * while we are fielding a different, pausing, interrupt type, + * we don't want to release the sequencer before going back + * into our interrupt handler and dealing with this new + * condition. + */ +void +ahd_unpause(struct ahd_softc *ahd) +{ + /* + * Automatically restore our modes to those saved + * prior to the first change of the mode. + */ + if (ahd->saved_src_mode != AHD_MODE_UNKNOWN + && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { + if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) + ahd_reset_cmds_pending(ahd); + ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); + } + + if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) + ahd_outb(ahd, HCNTRL, ahd->unpause); + + ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); +} + +/*********************** Scatter Gather List Handling *************************/ +void * +ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, + void *sgptr, dma_addr_t addr, bus_size_t len, int last) +{ + scb->sg_count++; + if (sizeof(dma_addr_t) > 4 + && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = (struct ahd_dma64_seg *)sgptr; + sg->addr = ahd_htole64(addr); + sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); + return (sg + 1); + } else { + struct ahd_dma_seg *sg; + + sg = (struct ahd_dma_seg *)sgptr; + sg->addr = ahd_htole32(addr & 0xFFFFFFFF); + sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) + | (last ? AHD_DMA_LAST_SEG : 0)); + return (sg + 1); + } +} + +static void +ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) +{ + /* XXX Handle target mode SCBs. */ + scb->crc_retry_count = 0; + if ((scb->flags & SCB_PACKETIZED) != 0) { + /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ + scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; + } else { + if (ahd_get_transfer_length(scb) & 0x01) + scb->hscb->task_attribute = SCB_XFERLEN_ODD; + else + scb->hscb->task_attribute = 0; + } + + if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR + || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) + scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = + ahd_htole32(scb->sense_busaddr); +} + +static void +ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) +{ + /* + * Copy the first SG into the "current" data ponter area. + */ + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = (struct ahd_dma64_seg *)scb->sg_list; + scb->hscb->dataptr = sg->addr; + scb->hscb->datacnt = sg->len; + } else { + struct ahd_dma_seg *sg; + uint32_t *dataptr_words; + + sg = (struct ahd_dma_seg *)scb->sg_list; + dataptr_words = (uint32_t*)&scb->hscb->dataptr; + dataptr_words[0] = sg->addr; + dataptr_words[1] = 0; + if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { + uint64_t high_addr; + + high_addr = ahd_le32toh(sg->len) & 0x7F000000; + scb->hscb->dataptr |= ahd_htole64(high_addr << 8); + } + scb->hscb->datacnt = sg->len; + } + /* + * Note where to find the SG entries in bus space. + * We also set the full residual flag which the + * sequencer will clear as soon as a data transfer + * occurs. + */ + scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); +} + +static void +ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) +{ + scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); + scb->hscb->dataptr = 0; + scb->hscb->datacnt = 0; +} + +/************************** Memory mapping routines ***************************/ +static void * +ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) +{ + dma_addr_t sg_offset; + + /* sg_list_phys points to entry 1, not 0 */ + sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); + return ((uint8_t *)scb->sg_list + sg_offset); +} + +static uint32_t +ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) +{ + dma_addr_t sg_offset; + + /* sg_list_phys points to entry 1, not 0 */ + sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) + - ahd_sg_size(ahd); + + return (scb->sg_list_busaddr + sg_offset); +} + +static void +ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) +{ + ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, + scb->hscb_map->dmamap, + /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, + /*len*/sizeof(*scb->hscb), op); +} + +void +ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) +{ + if (scb->sg_count == 0) + return; + + ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, + scb->sg_map->dmamap, + /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), + /*len*/ahd_sg_size(ahd) * scb->sg_count, op); +} + +static void +ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) +{ + ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, + scb->sense_map->dmamap, + /*offset*/scb->sense_busaddr, + /*len*/AHD_SENSE_BUFSIZE, op); +} + +#ifdef AHD_TARGET_MODE +static uint32_t +ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) +{ + return (((uint8_t *)&ahd->targetcmds[index]) + - (uint8_t *)ahd->qoutfifo); +} +#endif + +/*********************** Miscellaneous Support Functions ***********************/ +/* + * Return pointers to the transfer negotiation information + * for the specified our_id/remote_id pair. + */ +struct ahd_initiator_tinfo * +ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, + u_int remote_id, struct ahd_tmode_tstate **tstate) +{ + /* + * Transfer data structures are stored from the perspective + * of the target role. Since the parameters for a connection + * in the initiator role to a given target are the same as + * when the roles are reversed, we pretend we are the target. + */ + if (channel == 'B') + our_id += 8; + *tstate = ahd->enabled_targets[our_id]; + return (&(*tstate)->transinfo[remote_id]); +} + +uint16_t +ahd_inw(struct ahd_softc *ahd, u_int port) +{ + /* + * Read high byte first as some registers increment + * or have other side effects when the low byte is + * read. + */ + uint16_t r = ahd_inb(ahd, port+1) << 8; + return r | ahd_inb(ahd, port); +} + +void +ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) +{ + /* + * Write low byte first to accommodate registers + * such as PRGMCNT where the order maters. + */ + ahd_outb(ahd, port, value & 0xFF); + ahd_outb(ahd, port+1, (value >> 8) & 0xFF); +} + +uint32_t +ahd_inl(struct ahd_softc *ahd, u_int port) +{ + return ((ahd_inb(ahd, port)) + | (ahd_inb(ahd, port+1) << 8) + | (ahd_inb(ahd, port+2) << 16) + | (ahd_inb(ahd, port+3) << 24)); +} + +void +ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) +{ + ahd_outb(ahd, port, (value) & 0xFF); + ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); + ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); + ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); +} + +uint64_t +ahd_inq(struct ahd_softc *ahd, u_int port) +{ + return ((ahd_inb(ahd, port)) + | (ahd_inb(ahd, port+1) << 8) + | (ahd_inb(ahd, port+2) << 16) + | (ahd_inb(ahd, port+3) << 24) + | (((uint64_t)ahd_inb(ahd, port+4)) << 32) + | (((uint64_t)ahd_inb(ahd, port+5)) << 40) + | (((uint64_t)ahd_inb(ahd, port+6)) << 48) + | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); +} + +void +ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) +{ + ahd_outb(ahd, port, value & 0xFF); + ahd_outb(ahd, port+1, (value >> 8) & 0xFF); + ahd_outb(ahd, port+2, (value >> 16) & 0xFF); + ahd_outb(ahd, port+3, (value >> 24) & 0xFF); + ahd_outb(ahd, port+4, (value >> 32) & 0xFF); + ahd_outb(ahd, port+5, (value >> 40) & 0xFF); + ahd_outb(ahd, port+6, (value >> 48) & 0xFF); + ahd_outb(ahd, port+7, (value >> 56) & 0xFF); +} + +u_int +ahd_get_scbptr(struct ahd_softc *ahd) +{ + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); +} + +void +ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) +{ + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + ahd_outb(ahd, SCBPTR, scbptr & 0xFF); + ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); +} + +#if 0 /* unused */ +static u_int +ahd_get_hnscb_qoff(struct ahd_softc *ahd) +{ + return (ahd_inw_atomic(ahd, HNSCB_QOFF)); +} +#endif + +static void +ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) +{ + ahd_outw_atomic(ahd, HNSCB_QOFF, value); +} + +#if 0 /* unused */ +static u_int +ahd_get_hescb_qoff(struct ahd_softc *ahd) +{ + return (ahd_inb(ahd, HESCB_QOFF)); +} +#endif + +static void +ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) +{ + ahd_outb(ahd, HESCB_QOFF, value); +} + +static u_int +ahd_get_snscb_qoff(struct ahd_softc *ahd) +{ + u_int oldvalue; + + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + oldvalue = ahd_inw(ahd, SNSCB_QOFF); + ahd_outw(ahd, SNSCB_QOFF, oldvalue); + return (oldvalue); +} + +static void +ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + ahd_outw(ahd, SNSCB_QOFF, value); +} + +#if 0 /* unused */ +static u_int +ahd_get_sescb_qoff(struct ahd_softc *ahd) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + return (ahd_inb(ahd, SESCB_QOFF)); +} +#endif + +static void +ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + ahd_outb(ahd, SESCB_QOFF, value); +} + +#if 0 /* unused */ +static u_int +ahd_get_sdscb_qoff(struct ahd_softc *ahd) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); +} +#endif + +static void +ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); + ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); +} + +u_int +ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) +{ + u_int value; + + /* + * Workaround PCI-X Rev A. hardware bug. + * After a host read of SCB memory, the chip + * may become confused into thinking prefetch + * was required. This starts the discard timer + * running and can cause an unexpected discard + * timer interrupt. The work around is to read + * a normal register prior to the exhaustion of + * the discard timer. The mode pointer register + * has no side effects and so serves well for + * this purpose. + * + * Razor #528 + */ + value = ahd_inb(ahd, offset); + if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0) + ahd_inb(ahd, MODE_PTR); + return (value); +} + +u_int +ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) +{ + return (ahd_inb_scbram(ahd, offset) + | (ahd_inb_scbram(ahd, offset+1) << 8)); +} + +static uint32_t +ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) +{ + return (ahd_inw_scbram(ahd, offset) + | (ahd_inw_scbram(ahd, offset+2) << 16)); +} + +static uint64_t +ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) +{ + return (ahd_inl_scbram(ahd, offset) + | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); +} + +struct scb * +ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) +{ + struct scb* scb; + + if (tag >= AHD_SCB_MAX) + return (NULL); + scb = ahd->scb_data.scbindex[tag]; + if (scb != NULL) + ahd_sync_scb(ahd, scb, + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + return (scb); +} + +static void +ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) +{ + struct hardware_scb *q_hscb; + struct map_node *q_hscb_map; + uint32_t saved_hscb_busaddr; + + /* + * Our queuing method is a bit tricky. The card + * knows in advance which HSCB (by address) to download, + * and we can't disappoint it. To achieve this, the next + * HSCB to download is saved off in ahd->next_queued_hscb. + * When we are called to queue "an arbitrary scb", + * we copy the contents of the incoming HSCB to the one + * the sequencer knows about, swap HSCB pointers and + * finally assign the SCB to the tag indexed location + * in the scb_array. This makes sure that we can still + * locate the correct SCB by SCB_TAG. + */ + q_hscb = ahd->next_queued_hscb; + q_hscb_map = ahd->next_queued_hscb_map; + saved_hscb_busaddr = q_hscb->hscb_busaddr; + memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); + q_hscb->hscb_busaddr = saved_hscb_busaddr; + q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; + + /* Now swap HSCB pointers. */ + ahd->next_queued_hscb = scb->hscb; + ahd->next_queued_hscb_map = scb->hscb_map; + scb->hscb = q_hscb; + scb->hscb_map = q_hscb_map; + + /* Now define the mapping from tag to SCB in the scbindex */ + ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; +} + +/* + * Tell the sequencer about a new transaction to execute. + */ +void +ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) +{ + ahd_swap_with_next_hscb(ahd, scb); + + if (SCBID_IS_NULL(SCB_GET_TAG(scb))) + panic("Attempt to queue invalid SCB tag %x\n", + SCB_GET_TAG(scb)); + + /* + * Keep a history of SCBs we've downloaded in the qinfifo. + */ + ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); + ahd->qinfifonext++; + + if (scb->sg_count != 0) + ahd_setup_data_scb(ahd, scb); + else + ahd_setup_noxfer_scb(ahd, scb); + ahd_setup_scb_common(ahd, scb); + + /* + * Make sure our data is consistent from the + * perspective of the adapter. + */ + ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { + uint64_t host_dataptr; + + host_dataptr = ahd_le64toh(scb->hscb->dataptr); + printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", + ahd_name(ahd), + SCB_GET_TAG(scb), scb->hscb->scsiid, + ahd_le32toh(scb->hscb->hscb_busaddr), + (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), + (u_int)(host_dataptr & 0xFFFFFFFF), + ahd_le32toh(scb->hscb->datacnt)); + } +#endif + /* Tell the adapter about the newly queued SCB */ + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); +} + +/************************** Interrupt Processing ******************************/ +static void +ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) +{ + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, + /*offset*/0, + /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); +} + +static void +ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) +{ +#ifdef AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0) { + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap, + ahd_targetcmd_offset(ahd, 0), + sizeof(struct target_cmd) * AHD_TMODE_CMDS, + op); + } +#endif +} + +/* + * See if the firmware has posted any completed commands + * into our in-core command complete fifos. + */ +#define AHD_RUN_QOUTFIFO 0x1 +#define AHD_RUN_TQINFIFO 0x2 +static u_int +ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) +{ + u_int retval; + + retval = 0; + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, + /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), + /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); + if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag + == ahd->qoutfifonext_valid_tag) + retval |= AHD_RUN_QOUTFIFO; +#ifdef AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0 + && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap, + ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), + /*len*/sizeof(struct target_cmd), + BUS_DMASYNC_POSTREAD); + if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) + retval |= AHD_RUN_TQINFIFO; + } +#endif + return (retval); +} + +/* + * Catch an interrupt from the adapter + */ +int +ahd_intr(struct ahd_softc *ahd) +{ + u_int intstat; + + if ((ahd->pause & INTEN) == 0) { + /* + * Our interrupt is not enabled on the chip + * and may be disabled for re-entrancy reasons, + * so just return. This is likely just a shared + * interrupt. + */ + return (0); + } + + /* + * Instead of directly reading the interrupt status register, + * infer the cause of the interrupt by checking our in-core + * completion queues. This avoids a costly PCI bus read in + * most cases. + */ + if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 + && (ahd_check_cmdcmpltqueues(ahd) != 0)) + intstat = CMDCMPLT; + else + intstat = ahd_inb(ahd, INTSTAT); + + if ((intstat & INT_PEND) == 0) + return (0); + + if (intstat & CMDCMPLT) { + ahd_outb(ahd, CLRINT, CLRCMDINT); + + /* + * Ensure that the chip sees that we've cleared + * this interrupt before we walk the output fifo. + * Otherwise, we may, due to posted bus writes, + * clear the interrupt after we finish the scan, + * and after the sequencer has added new entries + * and asserted the interrupt again. + */ + if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { + if (ahd_is_paused(ahd)) { + /* + * Potentially lost SEQINT. + * If SEQINTCODE is non-zero, + * simulate the SEQINT. + */ + if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) + intstat |= SEQINT; + } + } else { + ahd_flush_device_writes(ahd); + } + ahd_run_qoutfifo(ahd); + ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; + ahd->cmdcmplt_total++; +#ifdef AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0) + ahd_run_tqinfifo(ahd, /*paused*/FALSE); +#endif + } + + /* + * Handle statuses that may invalidate our cached + * copy of INTSTAT separately. + */ + if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { + /* Hot eject. Do nothing */ + } else if (intstat & HWERRINT) { + ahd_handle_hwerrint(ahd); + } else if ((intstat & (PCIINT|SPLTINT)) != 0) { + ahd->bus_intr(ahd); + } else { + + if ((intstat & SEQINT) != 0) + ahd_handle_seqint(ahd, intstat); + + if ((intstat & SCSIINT) != 0) + ahd_handle_scsiint(ahd, intstat); + } + return (1); +} + +/******************************** Private Inlines *****************************/ +static inline void +ahd_assert_atn(struct ahd_softc *ahd) +{ + ahd_outb(ahd, SCSISIGO, ATNO); +} + +/* + * Determine if the current connection has a packetized + * agreement. This does not necessarily mean that we + * are currently in a packetized transfer. We could + * just as easily be sending or receiving a message. + */ +static int +ahd_currently_packetized(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + int packetized; + + saved_modes = ahd_save_modes(ahd); + if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { + /* + * The packetized bit refers to the last + * connection, not the current one. Check + * for non-zero LQISTATE instead. + */ + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + packetized = ahd_inb(ahd, LQISTATE) != 0; + } else { + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; + } + ahd_restore_modes(ahd, saved_modes); + return (packetized); +} + +static inline int +ahd_set_active_fifo(struct ahd_softc *ahd) +{ + u_int active_fifo; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; + switch (active_fifo) { + case 0: + case 1: + ahd_set_modes(ahd, active_fifo, active_fifo); + return (1); + default: + return (0); + } +} + +static inline void +ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) +{ + ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); +} + +/* + * Determine whether the sequencer reported a residual + * for this SCB/transaction. + */ +static inline void +ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) +{ + uint32_t sgptr; + + sgptr = ahd_le32toh(scb->hscb->sgptr); + if ((sgptr & SG_STATUS_VALID) != 0) + ahd_calc_residual(ahd, scb); +} + +static inline void +ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) +{ + uint32_t sgptr; + + sgptr = ahd_le32toh(scb->hscb->sgptr); + if ((sgptr & SG_STATUS_VALID) != 0) + ahd_handle_scb_status(ahd, scb); + else + ahd_done(ahd, scb); +} + + +/************************* Sequencer Execution Control ************************/ +/* + * Restart the sequencer program from address zero + */ +static void +ahd_restart(struct ahd_softc *ahd) +{ + + ahd_pause(ahd); + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + /* No more pending messages */ + ahd_clear_msg_state(ahd); + ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ + ahd_outb(ahd, MSG_OUT, NOP); /* No message to send */ + ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); + ahd_outb(ahd, SEQINTCTL, 0); + ahd_outb(ahd, LASTPHASE, P_BUSFREE); + ahd_outb(ahd, SEQ_FLAGS, 0); + ahd_outb(ahd, SAVED_SCSIID, 0xFF); + ahd_outb(ahd, SAVED_LUN, 0xFF); + + /* + * Ensure that the sequencer's idea of TQINPOS + * matches our own. The sequencer increments TQINPOS + * only after it sees a DMA complete and a reset could + * occur before the increment leaving the kernel to believe + * the command arrived but the sequencer to not. + */ + ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); + + /* Always allow reselection */ + ahd_outb(ahd, SCSISEQ1, + ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + + /* + * Clear any pending sequencer interrupt. It is no + * longer relevant since we're resetting the Program + * Counter. + */ + ahd_outb(ahd, CLRINT, CLRSEQINT); + + ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); + ahd_unpause(ahd); +} + +static void +ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) +{ + ahd_mode_state saved_modes; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_FIFOS) != 0) + printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); +#endif + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, fifo, fifo); + ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); + if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) + ahd_outb(ahd, CCSGCTL, CCSGRESET); + ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); + ahd_outb(ahd, SG_STATE, 0); + ahd_restore_modes(ahd, saved_modes); +} + +/************************* Input/Output Queues ********************************/ +/* + * Flush and completed commands that are sitting in the command + * complete queues down on the chip but have yet to be dma'ed back up. + */ +static void +ahd_flush_qoutfifo(struct ahd_softc *ahd) +{ + struct scb *scb; + ahd_mode_state saved_modes; + u_int saved_scbptr; + u_int ccscbctl; + u_int scbid; + u_int next_scbid; + + saved_modes = ahd_save_modes(ahd); + + /* + * Flush the good status FIFO for completed packetized commands. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + saved_scbptr = ahd_get_scbptr(ahd); + while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { + u_int fifo_mode; + u_int i; + + scbid = ahd_inw(ahd, GSFIFO); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: Warning - GSFIFO SCB %d invalid\n", + ahd_name(ahd), scbid); + continue; + } + /* + * Determine if this transaction is still active in + * any FIFO. If it is, we must flush that FIFO to + * the host before completing the command. + */ + fifo_mode = 0; +rescan_fifos: + for (i = 0; i < 2; i++) { + /* Toggle to the other mode. */ + fifo_mode ^= 1; + ahd_set_modes(ahd, fifo_mode, fifo_mode); + + if (ahd_scb_active_in_fifo(ahd, scb) == 0) + continue; + + ahd_run_data_fifo(ahd, scb); + + /* + * Running this FIFO may cause a CFG4DATA for + * this same transaction to assert in the other + * FIFO or a new snapshot SAVEPTRS interrupt + * in this FIFO. Even running a FIFO may not + * clear the transaction if we are still waiting + * for data to drain to the host. We must loop + * until the transaction is not active in either + * FIFO just to be sure. Reset our loop counter + * so we will visit both FIFOs again before + * declaring this transaction finished. We + * also delay a bit so that status has a chance + * to change before we look at this FIFO again. + */ + ahd_delay(200); + goto rescan_fifos; + } + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_set_scbptr(ahd, scbid); + if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 + && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 + || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) + & SG_LIST_NULL) != 0)) { + u_int comp_head; + + /* + * The transfer completed with a residual. + * Place this SCB on the complete DMA list + * so that we update our in-core copy of the + * SCB before completing the command. + */ + ahd_outb(ahd, SCB_SCSI_STATUS, 0); + ahd_outb(ahd, SCB_SGPTR, + ahd_inb_scbram(ahd, SCB_SGPTR) + | SG_STATUS_VALID); + ahd_outw(ahd, SCB_TAG, scbid); + ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); + comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); + if (SCBID_IS_NULL(comp_head)) { + ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); + } else { + u_int tail; + + tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); + ahd_set_scbptr(ahd, tail); + ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); + ahd_set_scbptr(ahd, scbid); + } + } else + ahd_complete_scb(ahd, scb); + } + ahd_set_scbptr(ahd, saved_scbptr); + + /* + * Setup for command channel portion of flush. + */ + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + + /* + * Wait for any inprogress DMA to complete and clear DMA state + * if this is for an SCB in the qinfifo. + */ + while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { + + if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { + if ((ccscbctl & ARRDONE) != 0) + break; + } else if ((ccscbctl & CCSCBDONE) != 0) + break; + ahd_delay(200); + } + /* + * We leave the sequencer to cleanup in the case of DMA's to + * update the qoutfifo. In all other cases (DMA's to the + * chip or a push of an SCB from the COMPLETE_DMA_SCB list), + * we disable the DMA engine so that the sequencer will not + * attempt to handle the DMA completion. + */ + if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) + ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); + + /* + * Complete any SCBs that just finished + * being DMA'ed into the qoutfifo. + */ + ahd_run_qoutfifo(ahd); + + saved_scbptr = ahd_get_scbptr(ahd); + /* + * Manually update/complete any completed SCBs that are waiting to be + * DMA'ed back up to the host. + */ + scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); + while (!SCBID_IS_NULL(scbid)) { + uint8_t *hscb_ptr; + u_int i; + + ahd_set_scbptr(ahd, scbid); + next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: Warning - DMA-up and complete " + "SCB %d invalid\n", ahd_name(ahd), scbid); + continue; + } + hscb_ptr = (uint8_t *)scb->hscb; + for (i = 0; i < sizeof(struct hardware_scb); i++) + *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); + + ahd_complete_scb(ahd, scb); + scbid = next_scbid; + } + ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); + + scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); + while (!SCBID_IS_NULL(scbid)) { + + ahd_set_scbptr(ahd, scbid); + next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: Warning - Complete Qfrz SCB %d invalid\n", + ahd_name(ahd), scbid); + continue; + } + + ahd_complete_scb(ahd, scb); + scbid = next_scbid; + } + ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); + + scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); + while (!SCBID_IS_NULL(scbid)) { + + ahd_set_scbptr(ahd, scbid); + next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: Warning - Complete SCB %d invalid\n", + ahd_name(ahd), scbid); + continue; + } + + ahd_complete_scb(ahd, scb); + scbid = next_scbid; + } + ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); + + /* + * Restore state. + */ + ahd_set_scbptr(ahd, saved_scbptr); + ahd_restore_modes(ahd, saved_modes); + ahd->flags |= AHD_UPDATE_PEND_CMDS; +} + +/* + * Determine if an SCB for a packetized transaction + * is active in a FIFO. + */ +static int +ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) +{ + + /* + * The FIFO is only active for our transaction if + * the SCBPTR matches the SCB's ID and the firmware + * has installed a handler for the FIFO or we have + * a pending SAVEPTRS or CFG4DATA interrupt. + */ + if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) + || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 + && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) + return (0); + + return (1); +} + +/* + * Run a data fifo to completion for a transaction we know + * has completed across the SCSI bus (good status has been + * received). We are already set to the correct FIFO mode + * on entry to this routine. + * + * This function attempts to operate exactly as the firmware + * would when running this FIFO. Care must be taken to update + * this routine any time the firmware's FIFO algorithm is + * changed. + */ +static void +ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) +{ + u_int seqintsrc; + + seqintsrc = ahd_inb(ahd, SEQINTSRC); + if ((seqintsrc & CFG4DATA) != 0) { + uint32_t datacnt; + uint32_t sgptr; + + /* + * Clear full residual flag. + */ + sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; + ahd_outb(ahd, SCB_SGPTR, sgptr); + + /* + * Load datacnt and address. + */ + datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); + if ((datacnt & AHD_DMA_LAST_SEG) != 0) { + sgptr |= LAST_SEG; + ahd_outb(ahd, SG_STATE, 0); + } else + ahd_outb(ahd, SG_STATE, LOADING_NEEDED); + ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); + ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); + ahd_outb(ahd, SG_CACHE_PRE, sgptr); + ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); + + /* + * Initialize Residual Fields. + */ + ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); + ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); + + /* + * Mark the SCB as having a FIFO in use. + */ + ahd_outb(ahd, SCB_FIFO_USE_COUNT, + ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); + + /* + * Install a "fake" handler for this FIFO. + */ + ahd_outw(ahd, LONGJMP_ADDR, 0); + + /* + * Notify the hardware that we have satisfied + * this sequencer interrupt. + */ + ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); + } else if ((seqintsrc & SAVEPTRS) != 0) { + uint32_t sgptr; + uint32_t resid; + + if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { + /* + * Snapshot Save Pointers. All that + * is necessary to clear the snapshot + * is a CLRCHN. + */ + goto clrchn; + } + + /* + * Disable S/G fetch so the DMA engine + * is available to future users. + */ + if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) + ahd_outb(ahd, CCSGCTL, 0); + ahd_outb(ahd, SG_STATE, 0); + + /* + * Flush the data FIFO. Strickly only + * necessary for Rev A parts. + */ + ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); + + /* + * Calculate residual. + */ + sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); + resid = ahd_inl(ahd, SHCNT); + resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; + ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); + if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { + /* + * Must back up to the correct S/G element. + * Typically this just means resetting our + * low byte to the offset in the SG_CACHE, + * but if we wrapped, we have to correct + * the other bytes of the sgptr too. + */ + if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 + && (sgptr & 0x80) == 0) + sgptr -= 0x100; + sgptr &= ~0xFF; + sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) + & SG_ADDR_MASK; + ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); + ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); + } else if ((resid & AHD_SG_LEN_MASK) == 0) { + ahd_outb(ahd, SCB_RESIDUAL_SGPTR, + sgptr | SG_LIST_NULL); + } + /* + * Save Pointers. + */ + ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); + ahd_outl(ahd, SCB_DATACNT, resid); + ahd_outl(ahd, SCB_SGPTR, sgptr); + ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); + ahd_outb(ahd, SEQIMODE, + ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); + /* + * If the data is to the SCSI bus, we are + * done, otherwise wait for FIFOEMP. + */ + if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) + goto clrchn; + } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { + uint32_t sgptr; + uint64_t data_addr; + uint32_t data_len; + u_int dfcntrl; + + /* + * Disable S/G fetch so the DMA engine + * is available to future users. We won't + * be using the DMA engine to load segments. + */ + if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { + ahd_outb(ahd, CCSGCTL, 0); + ahd_outb(ahd, SG_STATE, LOADING_NEEDED); + } + + /* + * Wait for the DMA engine to notice that the + * host transfer is enabled and that there is + * space in the S/G FIFO for new segments before + * loading more segments. + */ + if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 + && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { + + /* + * Determine the offset of the next S/G + * element to load. + */ + sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); + sgptr &= SG_PTR_MASK; + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + data_addr = sg->addr; + data_len = sg->len; + sgptr += sizeof(*sg); + } else { + struct ahd_dma_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; + data_addr <<= 8; + data_addr |= sg->addr; + data_len = sg->len; + sgptr += sizeof(*sg); + } + + /* + * Update residual information. + */ + ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); + ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); + + /* + * Load the S/G. + */ + if (data_len & AHD_DMA_LAST_SEG) { + sgptr |= LAST_SEG; + ahd_outb(ahd, SG_STATE, 0); + } + ahd_outq(ahd, HADDR, data_addr); + ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); + ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); + + /* + * Advertise the segment to the hardware. + */ + dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; + if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { + /* + * Use SCSIENWRDIS so that SCSIEN + * is never modified by this + * operation. + */ + dfcntrl |= SCSIENWRDIS; + } + ahd_outb(ahd, DFCNTRL, dfcntrl); + } + } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { + + /* + * Transfer completed to the end of SG list + * and has flushed to the host. + */ + ahd_outb(ahd, SCB_SGPTR, + ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); + goto clrchn; + } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { +clrchn: + /* + * Clear any handler for this FIFO, decrement + * the FIFO use count for the SCB, and release + * the FIFO. + */ + ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); + ahd_outb(ahd, SCB_FIFO_USE_COUNT, + ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); + ahd_outb(ahd, DFFSXFRCTL, CLRCHN); + } +} + +/* + * Look for entries in the QoutFIFO that have completed. + * The valid_tag completion field indicates the validity + * of the entry - the valid value toggles each time through + * the queue. We use the sg_status field in the completion + * entry to avoid referencing the hscb if the completion + * occurred with no errors and no residual. sg_status is + * a copy of the first byte (little endian) of the sgptr + * hscb field. + */ +static void +ahd_run_qoutfifo(struct ahd_softc *ahd) +{ + struct ahd_completion *completion; + struct scb *scb; + u_int scb_index; + + if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) + panic("ahd_run_qoutfifo recursion"); + ahd->flags |= AHD_RUNNING_QOUTFIFO; + ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); + for (;;) { + completion = &ahd->qoutfifo[ahd->qoutfifonext]; + + if (completion->valid_tag != ahd->qoutfifonext_valid_tag) + break; + + scb_index = ahd_le16toh(completion->tag); + scb = ahd_lookup_scb(ahd, scb_index); + if (scb == NULL) { + printk("%s: WARNING no command for scb %d " + "(cmdcmplt)\nQOUTPOS = %d\n", + ahd_name(ahd), scb_index, + ahd->qoutfifonext); + ahd_dump_card_state(ahd); + } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { + ahd_handle_scb_status(ahd, scb); + } else { + ahd_done(ahd, scb); + } + + ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); + if (ahd->qoutfifonext == 0) + ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; + } + ahd->flags &= ~AHD_RUNNING_QOUTFIFO; +} + +/************************* Interrupt Handling *********************************/ +static void +ahd_handle_hwerrint(struct ahd_softc *ahd) +{ + /* + * Some catastrophic hardware error has occurred. + * Print it for the user and disable the controller. + */ + int i; + int error; + + error = ahd_inb(ahd, ERROR); + for (i = 0; i < num_errors; i++) { + if ((error & ahd_hard_errors[i].errno) != 0) + printk("%s: hwerrint, %s\n", + ahd_name(ahd), ahd_hard_errors[i].errmesg); + } + + ahd_dump_card_state(ahd); + panic("BRKADRINT"); + + /* Tell everyone that this HBA is no longer available */ + ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, + CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, + CAM_NO_HBA); + + /* Tell the system that this controller has gone away. */ + ahd_free(ahd); +} + +#ifdef AHD_DEBUG +static void +ahd_dump_sglist(struct scb *scb) +{ + int i; + + if (scb->sg_count > 0) { + if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg_list; + + sg_list = (struct ahd_dma64_seg*)scb->sg_list; + for (i = 0; i < scb->sg_count; i++) { + uint64_t addr; + + addr = ahd_le64toh(sg_list[i].addr); + printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", + i, + (uint32_t)((addr >> 32) & 0xFFFFFFFF), + (uint32_t)(addr & 0xFFFFFFFF), + sg_list[i].len & AHD_SG_LEN_MASK, + (sg_list[i].len & AHD_DMA_LAST_SEG) + ? " Last" : ""); + } + } else { + struct ahd_dma_seg *sg_list; + + sg_list = (struct ahd_dma_seg*)scb->sg_list; + for (i = 0; i < scb->sg_count; i++) { + uint32_t len; + + len = ahd_le32toh(sg_list[i].len); + printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", + i, + (len & AHD_SG_HIGH_ADDR_MASK) >> 24, + ahd_le32toh(sg_list[i].addr), + len & AHD_SG_LEN_MASK, + len & AHD_DMA_LAST_SEG ? " Last" : ""); + } + } + } +} +#endif /* AHD_DEBUG */ + +static void +ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) +{ + u_int seqintcode; + + /* + * Save the sequencer interrupt code and clear the SEQINT + * bit. We will unpause the sequencer, if appropriate, + * after servicing the request. + */ + seqintcode = ahd_inb(ahd, SEQINTCODE); + ahd_outb(ahd, CLRINT, CLRSEQINT); + if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { + /* + * Unpause the sequencer and let it clear + * SEQINT by writing NO_SEQINT to it. This + * will cause the sequencer to be paused again, + * which is the expected state of this routine. + */ + ahd_unpause(ahd); + while (!ahd_is_paused(ahd)) + ; + ahd_outb(ahd, CLRINT, CLRSEQINT); + } + ahd_update_modes(ahd); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("%s: Handle Seqint Called for code %d\n", + ahd_name(ahd), seqintcode); +#endif + switch (seqintcode) { + case ENTERING_NONPACK: + { + struct scb *scb; + u_int scbid; + + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + /* + * Somehow need to know if this + * is from a selection or reselection. + * From that, we can determine target + * ID so we at least have an I_T nexus. + */ + } else { + ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); + ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); + ahd_outb(ahd, SEQ_FLAGS, 0x0); + } + if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 + && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { + /* + * Phase change after read stream with + * CRC error with P0 asserted on last + * packet. + */ +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) + printk("%s: Assuming LQIPHASE_NLQ with " + "P0 assertion\n", ahd_name(ahd)); +#endif + } +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) + printk("%s: Entering NONPACK\n", ahd_name(ahd)); +#endif + break; + } + case INVALID_SEQINT: + printk("%s: Invalid Sequencer interrupt occurred, " + "resetting channel.\n", + ahd_name(ahd)); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) + ahd_dump_card_state(ahd); +#endif + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + break; + case STATUS_OVERRUN: + { + struct scb *scb; + u_int scbid; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL) + ahd_print_path(ahd, scb); + else + printk("%s: ", ahd_name(ahd)); + printk("SCB %d Packetized Status Overrun", scbid); + ahd_dump_card_state(ahd); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + break; + } + case CFG4ISTAT_INTR: + { + struct scb *scb; + u_int scbid; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + ahd_dump_card_state(ahd); + printk("CFG4ISTAT: Free SCB %d referenced", scbid); + panic("For safety"); + } + ahd_outq(ahd, HADDR, scb->sense_busaddr); + ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); + ahd_outb(ahd, HCNT + 2, 0); + ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); + ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); + break; + } + case ILLEGAL_PHASE: + { + u_int bus_phase; + + bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + printk("%s: ILLEGAL_PHASE 0x%x\n", + ahd_name(ahd), bus_phase); + + switch (bus_phase) { + case P_DATAOUT: + case P_DATAIN: + case P_DATAOUT_DT: + case P_DATAIN_DT: + case P_MESGOUT: + case P_STATUS: + case P_MESGIN: + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + printk("%s: Issued Bus Reset.\n", ahd_name(ahd)); + break; + case P_COMMAND: + { + struct ahd_devinfo devinfo; + struct scb *scb; + u_int scbid; + + /* + * If a target takes us into the command phase + * assume that it has been externally reset and + * has thus lost our previous packetized negotiation + * agreement. Since we have not sent an identify + * message and may not have fully qualified the + * connection, we change our command to TUR, assert + * ATN and ABORT the task when we go to message in + * phase. The OSM will see the REQUEUE_REQUEST + * status and retry the command. + */ + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("Invalid phase with no valid SCB. " + "Resetting bus.\n"); + ahd_reset_channel(ahd, 'A', + /*Initiate Reset*/TRUE); + break; + } + ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), + SCB_GET_TARGET(ahd, scb), + SCB_GET_LUN(scb), + SCB_GET_CHANNEL(ahd, scb), + ROLE_INITIATOR); + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_ACTIVE, /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHD_TRANS_ACTIVE, /*paused*/TRUE); + /* Hand-craft TUR command */ + ahd_outb(ahd, SCB_CDB_STORE, 0); + ahd_outb(ahd, SCB_CDB_STORE+1, 0); + ahd_outb(ahd, SCB_CDB_STORE+2, 0); + ahd_outb(ahd, SCB_CDB_STORE+3, 0); + ahd_outb(ahd, SCB_CDB_STORE+4, 0); + ahd_outb(ahd, SCB_CDB_STORE+5, 0); + ahd_outb(ahd, SCB_CDB_LEN, 6); + scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); + scb->hscb->control |= MK_MESSAGE; + ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); + /* + * The lun is 0, regardless of the SCB's lun + * as we have not sent an identify message. + */ + ahd_outb(ahd, SAVED_LUN, 0); + ahd_outb(ahd, SEQ_FLAGS, 0); + ahd_assert_atn(ahd); + scb->flags &= ~SCB_PACKETIZED; + scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET; + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); + ahd_freeze_scb(scb); + + /* Notify XPT */ + ahd_send_async(ahd, devinfo.channel, devinfo.target, + CAM_LUN_WILDCARD, AC_SENT_BDR); + + /* + * Allow the sequencer to continue with + * non-pack processing. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); + if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { + ahd_outb(ahd, CLRLQOINT1, 0); + } +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { + ahd_print_path(ahd, scb); + printk("Unexpected command phase from " + "packetized target\n"); + } +#endif + break; + } + } + break; + } + case CFG4OVERRUN: + { + struct scb *scb; + u_int scb_index; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { + printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), + ahd_inb(ahd, MODE_PTR)); + } +#endif + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + if (scb == NULL) { + /* + * Attempt to transfer to an SCB that is + * not outstanding. + */ + ahd_assert_atn(ahd); + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd->msgout_buf[0] = ABORT_TASK; + ahd->msgout_len = 1; + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + /* + * Clear status received flag to prevent any + * attempt to complete this bogus SCB. + */ + ahd_outb(ahd, SCB_CONTROL, + ahd_inb_scbram(ahd, SCB_CONTROL) + & ~STATUS_RCVD); + } + break; + } + case DUMP_CARD_STATE: + { + ahd_dump_card_state(ahd); + break; + } + case PDATA_REINIT: + { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { + printk("%s: PDATA_REINIT - DFCNTRL = 0x%x " + "SG_CACHE_SHADOW = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, DFCNTRL), + ahd_inb(ahd, SG_CACHE_SHADOW)); + } +#endif + ahd_reinitialize_dataptrs(ahd); + break; + } + case HOST_MSG_LOOP: + { + struct ahd_devinfo devinfo; + + /* + * The sequencer has encountered a message phase + * that requires host assistance for completion. + * While handling the message phase(s), we will be + * notified by the sequencer after each byte is + * transferred so we can track bus phase changes. + * + * If this is the first time we've seen a HOST_MSG_LOOP + * interrupt, initialize the state of the host message + * loop. + */ + ahd_fetch_devinfo(ahd, &devinfo); + if (ahd->msg_type == MSG_TYPE_NONE) { + struct scb *scb; + u_int scb_index; + u_int bus_phase; + + bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + if (bus_phase != P_MESGIN + && bus_phase != P_MESGOUT) { + printk("ahd_intr: HOST_MSG_LOOP bad " + "phase 0x%x\n", bus_phase); + /* + * Probably transitioned to bus free before + * we got here. Just punt the message. + */ + ahd_dump_card_state(ahd); + ahd_clear_intstat(ahd); + ahd_restart(ahd); + return; + } + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + if (devinfo.role == ROLE_INITIATOR) { + if (bus_phase == P_MESGOUT) + ahd_setup_initiator_msgout(ahd, + &devinfo, + scb); + else { + ahd->msg_type = + MSG_TYPE_INITIATOR_MSGIN; + ahd->msgin_index = 0; + } + } +#ifdef AHD_TARGET_MODE + else { + if (bus_phase == P_MESGOUT) { + ahd->msg_type = + MSG_TYPE_TARGET_MSGOUT; + ahd->msgin_index = 0; + } else + ahd_setup_target_msgin(ahd, + &devinfo, + scb); + } +#endif + } + + ahd_handle_message_phase(ahd); + break; + } + case NO_MATCH: + { + /* Ensure we don't leave the selection hardware on */ + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); + + printk("%s:%c:%d: no active SCB for reconnecting " + "target - issuing BUS DEVICE RESET\n", + ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); + printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " + "REG0 == 0x%x ACCUM = 0x%x\n", + ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), + ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); + printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " + "SINDEX == 0x%x\n", + ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), + ahd_find_busy_tcl(ahd, + BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), + ahd_inb(ahd, SAVED_LUN))), + ahd_inw(ahd, SINDEX)); + printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " + "SCB_CONTROL == 0x%x\n", + ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), + ahd_inb_scbram(ahd, SCB_LUN), + ahd_inb_scbram(ahd, SCB_CONTROL)); + printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", + ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); + printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); + printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); + ahd_dump_card_state(ahd); + ahd->msgout_buf[0] = TARGET_RESET; + ahd->msgout_len = 1; + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_assert_atn(ahd); + break; + } + case PROTO_VIOLATION: + { + ahd_handle_proto_violation(ahd); + break; + } + case IGN_WIDE_RES: + { + struct ahd_devinfo devinfo; + + ahd_fetch_devinfo(ahd, &devinfo); + ahd_handle_ign_wide_residue(ahd, &devinfo); + break; + } + case BAD_PHASE: + { + u_int lastphase; + + lastphase = ahd_inb(ahd, LASTPHASE); + printk("%s:%c:%d: unknown scsi bus phase %x, " + "lastphase = 0x%x. Attempting to continue\n", + ahd_name(ahd), 'A', + SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), + lastphase, ahd_inb(ahd, SCSISIGI)); + break; + } + case MISSED_BUSFREE: + { + u_int lastphase; + + lastphase = ahd_inb(ahd, LASTPHASE); + printk("%s:%c:%d: Missed busfree. " + "Lastphase = 0x%x, Curphase = 0x%x\n", + ahd_name(ahd), 'A', + SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), + lastphase, ahd_inb(ahd, SCSISIGI)); + ahd_restart(ahd); + return; + } + case DATA_OVERRUN: + { + /* + * When the sequencer detects an overrun, it + * places the controller in "BITBUCKET" mode + * and allows the target to complete its transfer. + * Unfortunately, none of the counters get updated + * when the controller is in this mode, so we have + * no way of knowing how large the overrun was. + */ + struct scb *scb; + u_int scbindex; +#ifdef AHD_DEBUG + u_int lastphase; +#endif + + scbindex = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbindex); +#ifdef AHD_DEBUG + lastphase = ahd_inb(ahd, LASTPHASE); + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { + ahd_print_path(ahd, scb); + printk("data overrun detected %s. Tag == 0x%x.\n", + ahd_lookup_phase_entry(lastphase)->phasemsg, + SCB_GET_TAG(scb)); + ahd_print_path(ahd, scb); + printk("%s seen Data Phase. Length = %ld. " + "NumSGs = %d.\n", + ahd_inb(ahd, SEQ_FLAGS) & DPHASE + ? "Have" : "Haven't", + ahd_get_transfer_length(scb), scb->sg_count); + ahd_dump_sglist(scb); + } +#endif + + /* + * Set this and it will take effect when the + * target does a command complete. + */ + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); + ahd_freeze_scb(scb); + break; + } + case MKMSG_FAILED: + { + struct ahd_devinfo devinfo; + struct scb *scb; + u_int scbid; + + ahd_fetch_devinfo(ahd, &devinfo); + printk("%s:%c:%d:%d: Attempt to issue message failed\n", + ahd_name(ahd), devinfo.channel, devinfo.target, + devinfo.lun); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL + && (scb->flags & SCB_RECOVERY_SCB) != 0) + /* + * Ensure that we didn't put a second instance of this + * SCB into the QINFIFO. + */ + ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), SCB_GET_TAG(scb), + ROLE_INITIATOR, /*status*/0, + SEARCH_REMOVE); + ahd_outb(ahd, SCB_CONTROL, + ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); + break; + } + case TASKMGMT_FUNC_COMPLETE: + { + u_int scbid; + struct scb *scb; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL) { + u_int lun; + u_int tag; + cam_status error; + + ahd_print_path(ahd, scb); + printk("Task Management Func 0x%x Complete\n", + scb->hscb->task_management); + lun = CAM_LUN_WILDCARD; + tag = SCB_LIST_NULL; + + switch (scb->hscb->task_management) { + case SIU_TASKMGMT_ABORT_TASK: + tag = SCB_GET_TAG(scb); + fallthrough; + case SIU_TASKMGMT_ABORT_TASK_SET: + case SIU_TASKMGMT_CLEAR_TASK_SET: + lun = scb->hscb->lun; + error = CAM_REQ_ABORTED; + ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), + 'A', lun, tag, ROLE_INITIATOR, + error); + break; + case SIU_TASKMGMT_LUN_RESET: + lun = scb->hscb->lun; + fallthrough; + case SIU_TASKMGMT_TARGET_RESET: + { + struct ahd_devinfo devinfo; + + ahd_scb_devinfo(ahd, &devinfo, scb); + error = CAM_BDR_SENT; + ahd_handle_devreset(ahd, &devinfo, lun, + CAM_BDR_SENT, + lun != CAM_LUN_WILDCARD + ? "Lun Reset" + : "Target Reset", + /*verbose_level*/0); + break; + } + default: + panic("Unexpected TaskMgmt Func\n"); + break; + } + } + break; + } + case TASKMGMT_CMD_CMPLT_OKAY: + { + u_int scbid; + struct scb *scb; + + /* + * An ABORT TASK TMF failed to be delivered before + * the targeted command completed normally. + */ + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL) { + /* + * Remove the second instance of this SCB from + * the QINFIFO if it is still there. + */ + ahd_print_path(ahd, scb); + printk("SCB completes before TMF\n"); + /* + * Handle losing the race. Wait until any + * current selection completes. We will then + * set the TMF back to zero in this SCB so that + * the sequencer doesn't bother to issue another + * sequencer interrupt for its completion. + */ + while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 + && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 + && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) + ; + ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); + ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), SCB_GET_TAG(scb), + ROLE_INITIATOR, /*status*/0, + SEARCH_REMOVE); + } + break; + } + case TRACEPOINT0: + case TRACEPOINT1: + case TRACEPOINT2: + case TRACEPOINT3: + printk("%s: Tracepoint %d\n", ahd_name(ahd), + seqintcode - TRACEPOINT0); + break; + case NO_SEQINT: + break; + case SAW_HWERR: + ahd_handle_hwerrint(ahd); + break; + default: + printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), + seqintcode); + break; + } + /* + * The sequencer is paused immediately on + * a SEQINT, so we should restart it when + * we're done. + */ + ahd_unpause(ahd); +} + +static void +ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) +{ + struct scb *scb; + u_int status0; + u_int status3; + u_int status; + u_int lqistat1; + u_int lqostat0; + u_int scbid; + u_int busfreetime; + + ahd_update_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); + status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); + status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); + lqistat1 = ahd_inb(ahd, LQISTAT1); + lqostat0 = ahd_inb(ahd, LQOSTAT0); + busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; + + /* + * Ignore external resets after a bus reset. + */ + if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) { + ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); + return; + } + + /* + * Clear bus reset flag + */ + ahd->flags &= ~AHD_BUS_RESET_ACTIVE; + + if ((status0 & (SELDI|SELDO)) != 0) { + u_int simode0; + + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + simode0 = ahd_inb(ahd, SIMODE0); + status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + } + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL + && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) + scb = NULL; + + if ((status0 & IOERR) != 0) { + u_int now_lvd; + + now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; + printk("%s: Transceiver State Has Changed to %s mode\n", + ahd_name(ahd), now_lvd ? "LVD" : "SE"); + ahd_outb(ahd, CLRSINT0, CLRIOERR); + /* + * A change in I/O mode is equivalent to a bus reset. + */ + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + ahd_pause(ahd); + ahd_setup_iocell_workaround(ahd); + ahd_unpause(ahd); + } else if ((status0 & OVERRUN) != 0) { + + printk("%s: SCSI offset overrun detected. Resetting bus.\n", + ahd_name(ahd)); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + } else if ((status & SCSIRSTI) != 0) { + + printk("%s: Someone reset channel A\n", ahd_name(ahd)); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); + } else if ((status & SCSIPERR) != 0) { + + /* Make sure the sequencer is in a safe location. */ + ahd_clear_critical_section(ahd); + + ahd_handle_transmission_error(ahd); + } else if (lqostat0 != 0) { + + printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); + ahd_outb(ahd, CLRLQOINT0, lqostat0); + if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) + ahd_outb(ahd, CLRLQOINT1, 0); + } else if ((status & SELTO) != 0) { + /* Stop the selection */ + ahd_outb(ahd, SCSISEQ0, 0); + + /* Make sure the sequencer is in a safe location. */ + ahd_clear_critical_section(ahd); + + /* No more pending messages */ + ahd_clear_msg_state(ahd); + + /* Clear interrupt state */ + ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); + + /* + * Although the driver does not care about the + * 'Selection in Progress' status bit, the busy + * LED does. SELINGO is only cleared by a successful + * selection, so we must manually clear it to insure + * the LED turns off just incase no future successful + * selections occur (e.g. no devices on the bus). + */ + ahd_outb(ahd, CLRSINT0, CLRSELINGO); + + scbid = ahd_inw(ahd, WAITING_TID_HEAD); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: ahd_intr - referenced scb not " + "valid during SELTO scb(0x%x)\n", + ahd_name(ahd), scbid); + ahd_dump_card_state(ahd); + } else { + struct ahd_devinfo devinfo; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_SELTO) != 0) { + ahd_print_path(ahd, scb); + printk("Saw Selection Timeout for SCB 0x%x\n", + scbid); + } +#endif + ahd_scb_devinfo(ahd, &devinfo, scb); + ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); + ahd_freeze_devq(ahd, scb); + + /* + * Cancel any pending transactions on the device + * now that it seems to be missing. This will + * also revert us to async/narrow transfers until + * we can renegotiate with the device. + */ + ahd_handle_devreset(ahd, &devinfo, + CAM_LUN_WILDCARD, + CAM_SEL_TIMEOUT, + "Selection Timeout", + /*verbose_level*/1); + } + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_iocell_first_selection(ahd); + ahd_unpause(ahd); + } else if ((status0 & (SELDI|SELDO)) != 0) { + + ahd_iocell_first_selection(ahd); + ahd_unpause(ahd); + } else if (status3 != 0) { + printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", + ahd_name(ahd), status3); + ahd_outb(ahd, CLRSINT3, status3); + } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { + + /* Make sure the sequencer is in a safe location. */ + ahd_clear_critical_section(ahd); + + ahd_handle_lqiphase_error(ahd, lqistat1); + } else if ((lqistat1 & LQICRCI_NLQ) != 0) { + /* + * This status can be delayed during some + * streaming operations. The SCSIPHASE + * handler has already dealt with this case + * so just clear the error. + */ + ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); + } else if ((status & BUSFREE) != 0 + || (lqistat1 & LQOBUSFREE) != 0) { + u_int lqostat1; + int restart; + int clear_fifo; + int packetized; + u_int mode; + + /* + * Clear our selection hardware as soon as possible. + * We may have an entry in the waiting Q for this target, + * that is affected by this busfree and we don't want to + * go about selecting the target while we handle the event. + */ + ahd_outb(ahd, SCSISEQ0, 0); + + /* Make sure the sequencer is in a safe location. */ + ahd_clear_critical_section(ahd); + + /* + * Determine what we were up to at the time of + * the busfree. + */ + mode = AHD_MODE_SCSI; + busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; + lqostat1 = ahd_inb(ahd, LQOSTAT1); + switch (busfreetime) { + case BUSFREE_DFF0: + case BUSFREE_DFF1: + { + mode = busfreetime == BUSFREE_DFF0 + ? AHD_MODE_DFF0 : AHD_MODE_DFF1; + ahd_set_modes(ahd, mode, mode); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: Invalid SCB %d in DFF%d " + "during unexpected busfree\n", + ahd_name(ahd), scbid, mode); + packetized = 0; + } else + packetized = (scb->flags & SCB_PACKETIZED) != 0; + clear_fifo = 1; + break; + } + case BUSFREE_LQO: + clear_fifo = 0; + packetized = 1; + break; + default: + clear_fifo = 0; + packetized = (lqostat1 & LQOBUSFREE) != 0; + if (!packetized + && ahd_inb(ahd, LASTPHASE) == P_BUSFREE + && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 + && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 + || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) + /* + * Assume packetized if we are not + * on the bus in a non-packetized + * capacity and any pending selection + * was a packetized selection. + */ + packetized = 1; + break; + } + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("Saw Busfree. Busfreetime = 0x%x.\n", + busfreetime); +#endif + /* + * Busfrees that occur in non-packetized phases are + * handled by the nonpkt_busfree handler. + */ + if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { + restart = ahd_handle_pkt_busfree(ahd, busfreetime); + } else { + packetized = 0; + restart = ahd_handle_nonpkt_busfree(ahd); + } + /* + * Clear the busfree interrupt status. The setting of + * the interrupt is a pulse, so in a perfect world, we + * would not need to muck with the ENBUSFREE logic. This + * would ensure that if the bus moves on to another + * connection, busfree protection is still in force. If + * BUSFREEREV is broken, however, we must manually clear + * the ENBUSFREE if the busfree occurred during a non-pack + * connection so that we don't get false positives during + * future, packetized, connections. + */ + ahd_outb(ahd, CLRSINT1, CLRBUSFREE); + if (packetized == 0 + && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) + ahd_outb(ahd, SIMODE1, + ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); + + if (clear_fifo) + ahd_clear_fifo(ahd, mode); + + ahd_clear_msg_state(ahd); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + if (restart) { + ahd_restart(ahd); + } else { + ahd_unpause(ahd); + } + } else { + printk("%s: Missing case in ahd_handle_scsiint. status = %x\n", + ahd_name(ahd), status); + ahd_dump_card_state(ahd); + ahd_clear_intstat(ahd); + ahd_unpause(ahd); + } +} + +static void +ahd_handle_transmission_error(struct ahd_softc *ahd) +{ + struct scb *scb; + u_int scbid; + u_int lqistat1; + u_int msg_out; + u_int curphase; + u_int lastphase; + u_int perrdiag; + u_int cur_col; + int silent; + + scb = NULL; + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); + ahd_inb(ahd, LQISTAT2); + if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 + && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { + u_int lqistate; + + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + lqistate = ahd_inb(ahd, LQISTATE); + if ((lqistate >= 0x1E && lqistate <= 0x24) + || (lqistate == 0x29)) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { + printk("%s: NLQCRC found via LQISTATE\n", + ahd_name(ahd)); + } +#endif + lqistat1 |= LQICRCI_NLQ; + } + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + } + + ahd_outb(ahd, CLRLQIINT1, lqistat1); + lastphase = ahd_inb(ahd, LASTPHASE); + curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + perrdiag = ahd_inb(ahd, PERRDIAG); + msg_out = INITIATOR_ERROR; + ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); + + /* + * Try to find the SCB associated with this error. + */ + silent = FALSE; + if (lqistat1 == 0 + || (lqistat1 & LQICRCI_NLQ) != 0) { + if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) + ahd_set_active_fifo(ahd); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL && SCB_IS_SILENT(scb)) + silent = TRUE; + } + + cur_col = 0; + if (silent == FALSE) { + printk("%s: Transmission error detected\n", ahd_name(ahd)); + ahd_lqistat1_print(lqistat1, &cur_col, 50); + ahd_lastphase_print(lastphase, &cur_col, 50); + ahd_scsisigi_print(curphase, &cur_col, 50); + ahd_perrdiag_print(perrdiag, &cur_col, 50); + printk("\n"); + ahd_dump_card_state(ahd); + } + + if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { + if (silent == FALSE) { + printk("%s: Gross protocol error during incoming " + "packet. lqistat1 == 0x%x. Resetting bus.\n", + ahd_name(ahd), lqistat1); + } + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + return; + } else if ((lqistat1 & LQICRCI_LQ) != 0) { + /* + * A CRC error has been detected on an incoming LQ. + * The bus is currently hung on the last ACK. + * Hit LQIRETRY to release the last ack, and + * wait for the sequencer to determine that ATNO + * is asserted while in message out to take us + * to our host message loop. No NONPACKREQ or + * LQIPHASE type errors will occur in this + * scenario. After this first LQIRETRY, the LQI + * manager will be in ISELO where it will + * happily sit until another packet phase begins. + * Unexpected bus free detection is enabled + * through any phases that occur after we release + * this last ack until the LQI manager sees a + * packet phase. This implies we may have to + * ignore a perfectly valid "unexected busfree" + * after our "initiator detected error" message is + * sent. A busfree is the expected response after + * we tell the target that it's L_Q was corrupted. + * (SPI4R09 10.7.3.3.3) + */ + ahd_outb(ahd, LQCTL2, LQIRETRY); + printk("LQIRetry for LQICRCI_LQ to release ACK\n"); + } else if ((lqistat1 & LQICRCI_NLQ) != 0) { + /* + * We detected a CRC error in a NON-LQ packet. + * The hardware has varying behavior in this situation + * depending on whether this packet was part of a + * stream or not. + * + * PKT by PKT mode: + * The hardware has already acked the complete packet. + * If the target honors our outstanding ATN condition, + * we should be (or soon will be) in MSGOUT phase. + * This will trigger the LQIPHASE_LQ status bit as the + * hardware was expecting another LQ. Unexpected + * busfree detection is enabled. Once LQIPHASE_LQ is + * true (first entry into host message loop is much + * the same), we must clear LQIPHASE_LQ and hit + * LQIRETRY so the hardware is ready to handle + * a future LQ. NONPACKREQ will not be asserted again + * once we hit LQIRETRY until another packet is + * processed. The target may either go busfree + * or start another packet in response to our message. + * + * Read Streaming P0 asserted: + * If we raise ATN and the target completes the entire + * stream (P0 asserted during the last packet), the + * hardware will ack all data and return to the ISTART + * state. When the target reponds to our ATN condition, + * LQIPHASE_LQ will be asserted. We should respond to + * this with an LQIRETRY to prepare for any future + * packets. NONPACKREQ will not be asserted again + * once we hit LQIRETRY until another packet is + * processed. The target may either go busfree or + * start another packet in response to our message. + * Busfree detection is enabled. + * + * Read Streaming P0 not asserted: + * If we raise ATN and the target transitions to + * MSGOUT in or after a packet where P0 is not + * asserted, the hardware will assert LQIPHASE_NLQ. + * We should respond to the LQIPHASE_NLQ with an + * LQIRETRY. Should the target stay in a non-pkt + * phase after we send our message, the hardware + * will assert LQIPHASE_LQ. Recovery is then just as + * listed above for the read streaming with P0 asserted. + * Busfree detection is enabled. + */ + if (silent == FALSE) + printk("LQICRC_NLQ\n"); + if (scb == NULL) { + printk("%s: No SCB valid for LQICRC_NLQ. " + "Resetting bus\n", ahd_name(ahd)); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + return; + } + } else if ((lqistat1 & LQIBADLQI) != 0) { + printk("Need to handle BADLQI!\n"); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + return; + } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { + if ((curphase & ~P_DATAIN_DT) != 0) { + /* Ack the byte. So we can continue. */ + if (silent == FALSE) + printk("Acking %s to clear perror\n", + ahd_lookup_phase_entry(curphase)->phasemsg); + ahd_inb(ahd, SCSIDAT); + } + + if (curphase == P_MESGIN) + msg_out = MSG_PARITY_ERROR; + } + + /* + * We've set the hardware to assert ATN if we + * get a parity error on "in" phases, so all we + * need to do is stuff the message buffer with + * the appropriate message. "In" phases have set + * mesg_out to something other than NOP. + */ + ahd->send_msg_perror = msg_out; + if (scb != NULL && msg_out == INITIATOR_ERROR) + scb->flags |= SCB_TRANSMISSION_ERROR; + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_unpause(ahd); +} + +static void +ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) +{ + /* + * Clear the sources of the interrupts. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, CLRLQIINT1, lqistat1); + + /* + * If the "illegal" phase changes were in response + * to our ATN to flag a CRC error, AND we ended up + * on packet boundaries, clear the error, restart the + * LQI manager as appropriate, and go on our merry + * way toward sending the message. Otherwise, reset + * the bus to clear the error. + */ + ahd_set_active_fifo(ahd); + if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 + && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { + if ((lqistat1 & LQIPHASE_LQ) != 0) { + printk("LQIRETRY for LQIPHASE_LQ\n"); + ahd_outb(ahd, LQCTL2, LQIRETRY); + } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { + printk("LQIRETRY for LQIPHASE_NLQ\n"); + ahd_outb(ahd, LQCTL2, LQIRETRY); + } else + panic("ahd_handle_lqiphase_error: No phase errors\n"); + ahd_dump_card_state(ahd); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_unpause(ahd); + } else { + printk("Resetting Channel for LQI Phase error\n"); + ahd_dump_card_state(ahd); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + } +} + +/* + * Packetized unexpected or expected busfree. + * Entered in mode based on busfreetime. + */ +static int +ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) +{ + u_int lqostat1; + + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + lqostat1 = ahd_inb(ahd, LQOSTAT1); + if ((lqostat1 & LQOBUSFREE) != 0) { + struct scb *scb; + u_int scbid; + u_int saved_scbptr; + u_int waiting_h; + u_int waiting_t; + u_int next; + + /* + * The LQO manager detected an unexpected busfree + * either: + * + * 1) During an outgoing LQ. + * 2) After an outgoing LQ but before the first + * REQ of the command packet. + * 3) During an outgoing command packet. + * + * In all cases, CURRSCB is pointing to the + * SCB that encountered the failure. Clean + * up the queue, clear SELDO and LQOBUSFREE, + * and allow the sequencer to restart the select + * out at its lesure. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + scbid = ahd_inw(ahd, CURRSCB); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) + panic("SCB not valid during LQOBUSFREE"); + /* + * Clear the status. + */ + ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); + if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) + ahd_outb(ahd, CLRLQOINT1, 0); + ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); + ahd_flush_device_writes(ahd); + ahd_outb(ahd, CLRSINT0, CLRSELDO); + + /* + * Return the LQO manager to its idle loop. It will + * not do this automatically if the busfree occurs + * after the first REQ of either the LQ or command + * packet or between the LQ and command packet. + */ + ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); + + /* + * Update the waiting for selection queue so + * we restart on the correct SCB. + */ + waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); + saved_scbptr = ahd_get_scbptr(ahd); + if (waiting_h != scbid) { + + ahd_outw(ahd, WAITING_TID_HEAD, scbid); + waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); + if (waiting_t == waiting_h) { + ahd_outw(ahd, WAITING_TID_TAIL, scbid); + next = SCB_LIST_NULL; + } else { + ahd_set_scbptr(ahd, waiting_h); + next = ahd_inw_scbram(ahd, SCB_NEXT2); + } + ahd_set_scbptr(ahd, scbid); + ahd_outw(ahd, SCB_NEXT2, next); + } + ahd_set_scbptr(ahd, saved_scbptr); + if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { + if (SCB_IS_SILENT(scb) == FALSE) { + ahd_print_path(ahd, scb); + printk("Probable outgoing LQ CRC error. " + "Retrying command\n"); + } + scb->crc_retry_count++; + } else { + ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); + ahd_freeze_scb(scb); + ahd_freeze_devq(ahd, scb); + } + /* Return unpausing the sequencer. */ + return (0); + } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { + /* + * Ignore what are really parity errors that + * occur on the last REQ of a free running + * clock prior to going busfree. Some drives + * do not properly active negate just before + * going busfree resulting in a parity glitch. + */ + ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) + printk("%s: Parity on last REQ detected " + "during busfree phase.\n", + ahd_name(ahd)); +#endif + /* Return unpausing the sequencer. */ + return (0); + } + if (ahd->src_mode != AHD_MODE_SCSI) { + u_int scbid; + struct scb *scb; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + ahd_print_path(ahd, scb); + printk("Unexpected PKT busfree condition\n"); + ahd_dump_card_state(ahd); + ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', + SCB_GET_LUN(scb), SCB_GET_TAG(scb), + ROLE_INITIATOR, CAM_UNEXP_BUSFREE); + + /* Return restarting the sequencer. */ + return (1); + } + printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); + ahd_dump_card_state(ahd); + /* Restart the sequencer. */ + return (1); +} + +/* + * Non-packetized unexpected or expected busfree. + */ +static int +ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) +{ + struct ahd_devinfo devinfo; + struct scb *scb; + u_int lastphase; + u_int saved_scsiid; + u_int saved_lun; + u_int target; + u_int initiator_role_id; + u_int scbid; + u_int ppr_busfree; + int printerror; + + /* + * Look at what phase we were last in. If its message out, + * chances are pretty good that the busfree was in response + * to one of our abort requests. + */ + lastphase = ahd_inb(ahd, LASTPHASE); + saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); + saved_lun = ahd_inb(ahd, SAVED_LUN); + target = SCSIID_TARGET(ahd, saved_scsiid); + initiator_role_id = SCSIID_OUR_ID(saved_scsiid); + ahd_compile_devinfo(&devinfo, initiator_role_id, + target, saved_lun, 'A', ROLE_INITIATOR); + printerror = 1; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL + && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) + scb = NULL; + + ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; + if (lastphase == P_MESGOUT) { + u_int tag; + + tag = SCB_LIST_NULL; + if (ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK, TRUE) + || ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK_SET, TRUE)) { + int found; + int sent_msg; + + if (scb == NULL) { + ahd_print_devinfo(ahd, &devinfo); + printk("Abort for unidentified " + "connection completed.\n"); + /* restart the sequencer. */ + return (1); + } + sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; + ahd_print_path(ahd, scb); + printk("SCB %d - Abort%s Completed.\n", + SCB_GET_TAG(scb), + sent_msg == ABORT_TASK ? "" : " Tag"); + + if (sent_msg == ABORT_TASK) + tag = SCB_GET_TAG(scb); + + if ((scb->flags & SCB_EXTERNAL_RESET) != 0) { + /* + * This abort is in response to an + * unexpected switch to command phase + * for a packetized connection. Since + * the identify message was never sent, + * "saved lun" is 0. We really want to + * abort only the SCB that encountered + * this error, which could have a different + * lun. The SCB will be retried so the OS + * will see the UA after renegotiating to + * packetized. + */ + tag = SCB_GET_TAG(scb); + saved_lun = scb->hscb->lun; + } + found = ahd_abort_scbs(ahd, target, 'A', saved_lun, + tag, ROLE_INITIATOR, + CAM_REQ_ABORTED); + printk("found == 0x%x\n", found); + printerror = 0; + } else if (ahd_sent_msg(ahd, AHDMSG_1B, + TARGET_RESET, TRUE)) { + ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, + CAM_BDR_SENT, "Bus Device Reset", + /*verbose_level*/0); + printerror = 0; + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, FALSE) + && ppr_busfree == 0) { + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + + /* + * PPR Rejected. + * + * If the previous negotiation was packetized, + * this could be because the device has been + * reset without our knowledge. Force our + * current negotiation to async and retry the + * negotiation. Otherwise retry the command + * with non-ppr negotiation. + */ +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("PPR negotiation rejected busfree.\n"); +#endif + tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { + ahd_set_width(ahd, &devinfo, + MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR, + /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, + /*period*/0, /*offset*/0, + /*ppr_options*/0, + AHD_TRANS_CUR, + /*paused*/TRUE); + /* + * The expect PPR busfree handler below + * will effect the retry and necessary + * abort. + */ + } else { + tinfo->curr.transport_version = 2; + tinfo->goal.transport_version = 2; + tinfo->goal.ppr_options = 0; + if (scb != NULL) { + /* + * Remove any SCBs in the waiting + * for selection queue that may + * also be for this target so that + * command ordering is preserved. + */ + ahd_freeze_devq(ahd, scb); + ahd_qinfifo_requeue_tail(ahd, scb); + } + printerror = 0; + } + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, FALSE) + && ppr_busfree == 0) { + /* + * Negotiation Rejected. Go-narrow and + * retry command. + */ +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("WDTR negotiation rejected busfree.\n"); +#endif + ahd_set_width(ahd, &devinfo, + MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + if (scb != NULL) { + /* + * Remove any SCBs in the waiting for + * selection queue that may also be for + * this target so that command ordering + * is preserved. + */ + ahd_freeze_devq(ahd, scb); + ahd_qinfifo_requeue_tail(ahd, scb); + } + printerror = 0; + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, FALSE) + && ppr_busfree == 0) { + /* + * Negotiation Rejected. Go-async and + * retry command. + */ +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("SDTR negotiation rejected busfree.\n"); +#endif + ahd_set_syncrate(ahd, &devinfo, + /*period*/0, /*offset*/0, + /*ppr_options*/0, + AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + if (scb != NULL) { + /* + * Remove any SCBs in the waiting for + * selection queue that may also be for + * this target so that command ordering + * is preserved. + */ + ahd_freeze_devq(ahd, scb); + ahd_qinfifo_requeue_tail(ahd, scb); + } + printerror = 0; + } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 + && ahd_sent_msg(ahd, AHDMSG_1B, + INITIATOR_ERROR, TRUE)) { + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("Expected IDE Busfree\n"); +#endif + printerror = 0; + } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) + && ahd_sent_msg(ahd, AHDMSG_1B, + MESSAGE_REJECT, TRUE)) { + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("Expected QAS Reject Busfree\n"); +#endif + printerror = 0; + } + } + + /* + * The busfree required flag is honored at the end of + * the message phases. We check it last in case we + * had to send some other message that caused a busfree. + */ + if (scb != NULL && printerror != 0 + && (lastphase == P_MESGIN || lastphase == P_MESGOUT) + && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { + + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); + ahd_freeze_scb(scb); + if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { + ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), SCB_LIST_NULL, + ROLE_INITIATOR, CAM_REQ_ABORTED); + } else { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("PPR Negotiation Busfree.\n"); +#endif + ahd_done(ahd, scb); + } + printerror = 0; + } + if (printerror != 0) { + int aborted; + + aborted = 0; + if (scb != NULL) { + u_int tag; + + if ((scb->hscb->control & TAG_ENB) != 0) + tag = SCB_GET_TAG(scb); + else + tag = SCB_LIST_NULL; + ahd_print_path(ahd, scb); + aborted = ahd_abort_scbs(ahd, target, 'A', + SCB_GET_LUN(scb), tag, + ROLE_INITIATOR, + CAM_UNEXP_BUSFREE); + } else { + /* + * We had not fully identified this connection, + * so we cannot abort anything. + */ + printk("%s: ", ahd_name(ahd)); + } + printk("Unexpected busfree %s, %d SCBs aborted, " + "PRGMCNT == 0x%x\n", + ahd_lookup_phase_entry(lastphase)->phasemsg, + aborted, + ahd_inw(ahd, PRGMCNT)); + ahd_dump_card_state(ahd); + if (lastphase != P_BUSFREE) + ahd_force_renegotiation(ahd, &devinfo); + } + /* Always restart the sequencer. */ + return (1); +} + +static void +ahd_handle_proto_violation(struct ahd_softc *ahd) +{ + struct ahd_devinfo devinfo; + struct scb *scb; + u_int scbid; + u_int seq_flags; + u_int curphase; + u_int lastphase; + int found; + + ahd_fetch_devinfo(ahd, &devinfo); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + seq_flags = ahd_inb(ahd, SEQ_FLAGS); + curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + lastphase = ahd_inb(ahd, LASTPHASE); + if ((seq_flags & NOT_IDENTIFIED) != 0) { + + /* + * The reconnecting target either did not send an + * identify message, or did, but we didn't find an SCB + * to match. + */ + ahd_print_devinfo(ahd, &devinfo); + printk("Target did not send an IDENTIFY message. " + "LASTPHASE = 0x%x.\n", lastphase); + scb = NULL; + } else if (scb == NULL) { + /* + * We don't seem to have an SCB active for this + * transaction. Print an error and reset the bus. + */ + ahd_print_devinfo(ahd, &devinfo); + printk("No SCB found during protocol violation\n"); + goto proto_violation_reset; + } else { + ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); + if ((seq_flags & NO_CDB_SENT) != 0) { + ahd_print_path(ahd, scb); + printk("No or incomplete CDB sent to device.\n"); + } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) + & STATUS_RCVD) == 0) { + /* + * The target never bothered to provide status to + * us prior to completing the command. Since we don't + * know the disposition of this command, we must attempt + * to abort it. Assert ATN and prepare to send an abort + * message. + */ + ahd_print_path(ahd, scb); + printk("Completed command without status.\n"); + } else { + ahd_print_path(ahd, scb); + printk("Unknown protocol violation.\n"); + ahd_dump_card_state(ahd); + } + } + if ((lastphase & ~P_DATAIN_DT) == 0 + || lastphase == P_COMMAND) { +proto_violation_reset: + /* + * Target either went directly to data + * phase or didn't respond to our ATN. + * The only safe thing to do is to blow + * it away with a bus reset. + */ + found = ahd_reset_channel(ahd, 'A', TRUE); + printk("%s: Issued Channel %c Bus Reset. " + "%d SCBs aborted\n", ahd_name(ahd), 'A', found); + } else { + /* + * Leave the selection hardware off in case + * this abort attempt will affect yet to + * be sent commands. + */ + ahd_outb(ahd, SCSISEQ0, + ahd_inb(ahd, SCSISEQ0) & ~ENSELO); + ahd_assert_atn(ahd); + ahd_outb(ahd, MSG_OUT, HOST_MSG); + if (scb == NULL) { + ahd_print_devinfo(ahd, &devinfo); + ahd->msgout_buf[0] = ABORT_TASK; + ahd->msgout_len = 1; + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + } else { + ahd_print_path(ahd, scb); + scb->flags |= SCB_ABORT; + } + printk("Protocol violation %s. Attempting to abort.\n", + ahd_lookup_phase_entry(curphase)->phasemsg); + } +} + +/* + * Force renegotiation to occur the next time we initiate + * a command to the current device. + */ +static void +ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + struct ahd_initiator_tinfo *targ_info; + struct ahd_tmode_tstate *tstate; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + ahd_print_devinfo(ahd, devinfo); + printk("Forcing renegotiation\n"); + } +#endif + targ_info = ahd_fetch_transinfo(ahd, + devinfo->channel, + devinfo->our_scsiid, + devinfo->target, + &tstate); + ahd_update_neg_request(ahd, devinfo, tstate, + targ_info, AHD_NEG_IF_NON_ASYNC); +} + +#define AHD_MAX_STEPS 2000 +static void +ahd_clear_critical_section(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + int stepping; + int steps; + int first_instr; + u_int simode0; + u_int simode1; + u_int simode3; + u_int lqimode0; + u_int lqimode1; + u_int lqomode0; + u_int lqomode1; + + if (ahd->num_critical_sections == 0) + return; + + stepping = FALSE; + steps = 0; + first_instr = 0; + simode0 = 0; + simode1 = 0; + simode3 = 0; + lqimode0 = 0; + lqimode1 = 0; + lqomode0 = 0; + lqomode1 = 0; + saved_modes = ahd_save_modes(ahd); + for (;;) { + struct cs *cs; + u_int seqaddr; + u_int i; + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + seqaddr = ahd_inw(ahd, CURADDR); + + cs = ahd->critical_sections; + for (i = 0; i < ahd->num_critical_sections; i++, cs++) { + if (cs->begin < seqaddr && cs->end >= seqaddr) + break; + } + + if (i == ahd->num_critical_sections) + break; + + if (steps > AHD_MAX_STEPS) { + printk("%s: Infinite loop in critical section\n" + "%s: First Instruction 0x%x now 0x%x\n", + ahd_name(ahd), ahd_name(ahd), first_instr, + seqaddr); + ahd_dump_card_state(ahd); + panic("critical section loop"); + } + + steps++; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("%s: Single stepping at 0x%x\n", ahd_name(ahd), + seqaddr); +#endif + if (stepping == FALSE) { + + first_instr = seqaddr; + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + simode0 = ahd_inb(ahd, SIMODE0); + simode3 = ahd_inb(ahd, SIMODE3); + lqimode0 = ahd_inb(ahd, LQIMODE0); + lqimode1 = ahd_inb(ahd, LQIMODE1); + lqomode0 = ahd_inb(ahd, LQOMODE0); + lqomode1 = ahd_inb(ahd, LQOMODE1); + ahd_outb(ahd, SIMODE0, 0); + ahd_outb(ahd, SIMODE3, 0); + ahd_outb(ahd, LQIMODE0, 0); + ahd_outb(ahd, LQIMODE1, 0); + ahd_outb(ahd, LQOMODE0, 0); + ahd_outb(ahd, LQOMODE1, 0); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + simode1 = ahd_inb(ahd, SIMODE1); + /* + * We don't clear ENBUSFREE. Unfortunately + * we cannot re-enable busfree detection within + * the current connection, so we must leave it + * on while single stepping. + */ + ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); + ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); + stepping = TRUE; + } + ahd_outb(ahd, CLRSINT1, CLRBUSFREE); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); + ahd_outb(ahd, HCNTRL, ahd->unpause); + while (!ahd_is_paused(ahd)) + ahd_delay(200); + ahd_update_modes(ahd); + } + if (stepping) { + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + ahd_outb(ahd, SIMODE0, simode0); + ahd_outb(ahd, SIMODE3, simode3); + ahd_outb(ahd, LQIMODE0, lqimode0); + ahd_outb(ahd, LQIMODE1, lqimode1); + ahd_outb(ahd, LQOMODE0, lqomode0); + ahd_outb(ahd, LQOMODE1, lqomode1); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); + ahd_outb(ahd, SIMODE1, simode1); + /* + * SCSIINT seems to glitch occasionally when + * the interrupt masks are restored. Clear SCSIINT + * one more time so that only persistent errors + * are seen as a real interrupt. + */ + ahd_outb(ahd, CLRINT, CLRSCSIINT); + } + ahd_restore_modes(ahd, saved_modes); +} + +/* + * Clear any pending interrupt status. + */ +static void +ahd_clear_intstat(struct ahd_softc *ahd) +{ + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + /* Clear any interrupt conditions this may have caused */ + ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 + |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); + ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT + |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI + |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); + ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ + |CLRLQOATNPKT|CLRLQOTCRC); + ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS + |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); + if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { + ahd_outb(ahd, CLRLQOINT0, 0); + ahd_outb(ahd, CLRLQOINT1, 0); + } + ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); + ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI + |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); + ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO + |CLRIOERR|CLROVERRUN); + ahd_outb(ahd, CLRINT, CLRSCSIINT); +} + +/**************************** Debugging Routines ******************************/ +#ifdef AHD_DEBUG +uint32_t ahd_debug = AHD_DEBUG_OPTS; +#endif + +#if 0 +void +ahd_print_scb(struct scb *scb) +{ + struct hardware_scb *hscb; + int i; + + hscb = scb->hscb; + printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", + (void *)scb, + hscb->control, + hscb->scsiid, + hscb->lun, + hscb->cdb_len); + printk("Shared Data: "); + for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) + printk("%#02x", hscb->shared_data.idata.cdb[i]); + printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", + (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), + (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), + ahd_le32toh(hscb->datacnt), + ahd_le32toh(hscb->sgptr), + SCB_GET_TAG(scb)); + ahd_dump_sglist(scb); +} +#endif /* 0 */ + +/************************* Transfer Negotiation *******************************/ +/* + * Allocate per target mode instance (ID we respond to as a target) + * transfer negotiation data structures. + */ +static struct ahd_tmode_tstate * +ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) +{ + struct ahd_tmode_tstate *master_tstate; + struct ahd_tmode_tstate *tstate; + int i; + + master_tstate = ahd->enabled_targets[ahd->our_id]; + if (ahd->enabled_targets[scsi_id] != NULL + && ahd->enabled_targets[scsi_id] != master_tstate) + panic("%s: ahd_alloc_tstate - Target already allocated", + ahd_name(ahd)); + tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); + if (tstate == NULL) + return (NULL); + + /* + * If we have allocated a master tstate, copy user settings from + * the master tstate (taken from SRAM or the EEPROM) for this + * channel, but reset our current and goal settings to async/narrow + * until an initiator talks to us. + */ + if (master_tstate != NULL) { + memcpy(tstate, master_tstate, sizeof(*tstate)); + memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); + for (i = 0; i < 16; i++) { + memset(&tstate->transinfo[i].curr, 0, + sizeof(tstate->transinfo[i].curr)); + memset(&tstate->transinfo[i].goal, 0, + sizeof(tstate->transinfo[i].goal)); + } + } else + memset(tstate, 0, sizeof(*tstate)); + ahd->enabled_targets[scsi_id] = tstate; + return (tstate); +} + +#ifdef AHD_TARGET_MODE +/* + * Free per target mode instance (ID we respond to as a target) + * transfer negotiation data structures. + */ +static void +ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) +{ + struct ahd_tmode_tstate *tstate; + + /* + * Don't clean up our "master" tstate. + * It has our default user settings. + */ + if (scsi_id == ahd->our_id + && force == FALSE) + return; + + tstate = ahd->enabled_targets[scsi_id]; + kfree(tstate); + ahd->enabled_targets[scsi_id] = NULL; +} +#endif + +/* + * Called when we have an active connection to a target on the bus, + * this function finds the nearest period to the input period limited + * by the capabilities of the bus connectivity of and sync settings for + * the target. + */ +static void +ahd_devlimited_syncrate(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *tinfo, + u_int *period, u_int *ppr_options, role_t role) +{ + struct ahd_transinfo *transinfo; + u_int maxsync; + + if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 + && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { + maxsync = AHD_SYNCRATE_PACED; + } else { + maxsync = AHD_SYNCRATE_ULTRA; + /* Can't do DT related options on an SE bus */ + *ppr_options &= MSG_EXT_PPR_QAS_REQ; + } + /* + * Never allow a value higher than our current goal + * period otherwise we may allow a target initiated + * negotiation to go above the limit as set by the + * user. In the case of an initiator initiated + * sync negotiation, we limit based on the user + * setting. This allows the system to still accept + * incoming negotiations even if target initiated + * negotiation is not performed. + */ + if (role == ROLE_TARGET) + transinfo = &tinfo->user; + else + transinfo = &tinfo->goal; + *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); + if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { + maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2); + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + } + if (transinfo->period == 0) { + *period = 0; + *ppr_options = 0; + } else { + *period = max(*period, (u_int)transinfo->period); + ahd_find_syncrate(ahd, period, ppr_options, maxsync); + } +} + +/* + * Look up the valid period to SCSIRATE conversion in our table. + * Return the period and offset that should be sent to the target + * if this was the beginning of an SDTR. + */ +void +ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, + u_int *ppr_options, u_int maxsync) +{ + if (*period < maxsync) + *period = maxsync; + + if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 + && *period > AHD_SYNCRATE_MIN_DT) + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + + if (*period > AHD_SYNCRATE_MIN) + *period = 0; + + /* Honor PPR option conformance rules. */ + if (*period > AHD_SYNCRATE_PACED) + *ppr_options &= ~MSG_EXT_PPR_RTI; + + if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) + *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); + + if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) + *ppr_options &= MSG_EXT_PPR_QAS_REQ; + + /* Skip all PACED only entries if IU is not available */ + if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 + && *period < AHD_SYNCRATE_DT) + *period = AHD_SYNCRATE_DT; + + /* Skip all DT only entries if DT is not available */ + if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 + && *period < AHD_SYNCRATE_ULTRA2) + *period = AHD_SYNCRATE_ULTRA2; +} + +/* + * Truncate the given synchronous offset to a value the + * current adapter type and syncrate are capable of. + */ +static void +ahd_validate_offset(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *tinfo, + u_int period, u_int *offset, int wide, + role_t role) +{ + u_int maxoffset; + + /* Limit offset to what we can do */ + if (period == 0) + maxoffset = 0; + else if (period <= AHD_SYNCRATE_PACED) { + if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) + maxoffset = MAX_OFFSET_PACED_BUG; + else + maxoffset = MAX_OFFSET_PACED; + } else + maxoffset = MAX_OFFSET_NON_PACED; + *offset = min(*offset, maxoffset); + if (tinfo != NULL) { + if (role == ROLE_TARGET) + *offset = min(*offset, (u_int)tinfo->user.offset); + else + *offset = min(*offset, (u_int)tinfo->goal.offset); + } +} + +/* + * Truncate the given transfer width parameter to a value the + * current adapter type is capable of. + */ +static void +ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, + u_int *bus_width, role_t role) +{ + switch (*bus_width) { + default: + if (ahd->features & AHD_WIDE) { + /* Respond Wide */ + *bus_width = MSG_EXT_WDTR_BUS_16_BIT; + break; + } + fallthrough; + case MSG_EXT_WDTR_BUS_8_BIT: + *bus_width = MSG_EXT_WDTR_BUS_8_BIT; + break; + } + if (tinfo != NULL) { + if (role == ROLE_TARGET) + *bus_width = min((u_int)tinfo->user.width, *bus_width); + else + *bus_width = min((u_int)tinfo->goal.width, *bus_width); + } +} + +/* + * Update the bitmask of targets for which the controller should + * negotiate with at the next convenient opportunity. This currently + * means the next time we send the initial identify messages for + * a new transaction. + */ +int +ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct ahd_tmode_tstate *tstate, + struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) +{ + u_int auto_negotiate_orig; + + auto_negotiate_orig = tstate->auto_negotiate; + if (neg_type == AHD_NEG_ALWAYS) { + /* + * Force our "current" settings to be + * unknown so that unless a bus reset + * occurs the need to renegotiate is + * recorded persistently. + */ + if ((ahd->features & AHD_WIDE) != 0) + tinfo->curr.width = AHD_WIDTH_UNKNOWN; + tinfo->curr.period = AHD_PERIOD_UNKNOWN; + tinfo->curr.offset = AHD_OFFSET_UNKNOWN; + } + if (tinfo->curr.period != tinfo->goal.period + || tinfo->curr.width != tinfo->goal.width + || tinfo->curr.offset != tinfo->goal.offset + || tinfo->curr.ppr_options != tinfo->goal.ppr_options + || (neg_type == AHD_NEG_IF_NON_ASYNC + && (tinfo->goal.offset != 0 + || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT + || tinfo->goal.ppr_options != 0))) + tstate->auto_negotiate |= devinfo->target_mask; + else + tstate->auto_negotiate &= ~devinfo->target_mask; + + return (auto_negotiate_orig != tstate->auto_negotiate); +} + +/* + * Update the user/goal/curr tables of synchronous negotiation + * parameters as well as, in the case of a current or active update, + * any data structures on the host controller. In the case of an + * active update, the specified target is currently talking to us on + * the bus, so the transfer parameter update must take effect + * immediately. + */ +void +ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int period, u_int offset, u_int ppr_options, + u_int type, int paused) +{ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int old_period; + u_int old_offset; + u_int old_ppr; + int active; + int update_needed; + + active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; + update_needed = 0; + + if (period == 0 || offset == 0) { + period = 0; + offset = 0; + } + + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + if ((type & AHD_TRANS_USER) != 0) { + tinfo->user.period = period; + tinfo->user.offset = offset; + tinfo->user.ppr_options = ppr_options; + } + + if ((type & AHD_TRANS_GOAL) != 0) { + tinfo->goal.period = period; + tinfo->goal.offset = offset; + tinfo->goal.ppr_options = ppr_options; + } + + old_period = tinfo->curr.period; + old_offset = tinfo->curr.offset; + old_ppr = tinfo->curr.ppr_options; + + if ((type & AHD_TRANS_CUR) != 0 + && (old_period != period + || old_offset != offset + || old_ppr != ppr_options)) { + + update_needed++; + + tinfo->curr.period = period; + tinfo->curr.offset = offset; + tinfo->curr.ppr_options = ppr_options; + + ahd_send_async(ahd, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_TRANSFER_NEG); + if (bootverbose) { + if (offset != 0) { + int options; + + printk("%s: target %d synchronous with " + "period = 0x%x, offset = 0x%x", + ahd_name(ahd), devinfo->target, + period, offset); + options = 0; + if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { + printk("(RDSTRM"); + options++; + } + if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { + printk("%s", options ? "|DT" : "(DT"); + options++; + } + if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { + printk("%s", options ? "|IU" : "(IU"); + options++; + } + if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { + printk("%s", options ? "|RTI" : "(RTI"); + options++; + } + if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { + printk("%s", options ? "|QAS" : "(QAS"); + options++; + } + if (options != 0) + printk(")\n"); + else + printk("\n"); + } else { + printk("%s: target %d using " + "asynchronous transfers%s\n", + ahd_name(ahd), devinfo->target, + (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 + ? "(QAS)" : ""); + } + } + } + /* + * Always refresh the neg-table to handle the case of the + * sequencer setting the ENATNO bit for a MK_MESSAGE request. + * We will always renegotiate in that case if this is a + * packetized request. Also manage the busfree expected flag + * from this common routine so that we catch changes due to + * WDTR or SDTR messages. + */ + if ((type & AHD_TRANS_CUR) != 0) { + if (!paused) + ahd_pause(ahd); + ahd_update_neg_table(ahd, devinfo, &tinfo->curr); + if (!paused) + ahd_unpause(ahd); + if (ahd->msg_type != MSG_TYPE_NONE) { + if ((old_ppr & MSG_EXT_PPR_IU_REQ) + != (ppr_options & MSG_EXT_PPR_IU_REQ)) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + ahd_print_devinfo(ahd, devinfo); + printk("Expecting IU Change busfree\n"); + } +#endif + ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE + | MSG_FLAG_IU_REQ_CHANGED; + } + if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("PPR with IU_REQ outstanding\n"); +#endif + ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; + } + } + } + + update_needed += ahd_update_neg_request(ahd, devinfo, tstate, + tinfo, AHD_NEG_TO_GOAL); + + if (update_needed && active) + ahd_update_pending_scbs(ahd); +} + +/* + * Update the user/goal/curr tables of wide negotiation + * parameters as well as, in the case of a current or active update, + * any data structures on the host controller. In the case of an + * active update, the specified target is currently talking to us on + * the bus, so the transfer parameter update must take effect + * immediately. + */ +void +ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int width, u_int type, int paused) +{ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int oldwidth; + int active; + int update_needed; + + active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; + update_needed = 0; + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + if ((type & AHD_TRANS_USER) != 0) + tinfo->user.width = width; + + if ((type & AHD_TRANS_GOAL) != 0) + tinfo->goal.width = width; + + oldwidth = tinfo->curr.width; + if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { + + update_needed++; + + tinfo->curr.width = width; + ahd_send_async(ahd, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_TRANSFER_NEG); + if (bootverbose) { + printk("%s: target %d using %dbit transfers\n", + ahd_name(ahd), devinfo->target, + 8 * (0x01 << width)); + } + } + + if ((type & AHD_TRANS_CUR) != 0) { + if (!paused) + ahd_pause(ahd); + ahd_update_neg_table(ahd, devinfo, &tinfo->curr); + if (!paused) + ahd_unpause(ahd); + } + + update_needed += ahd_update_neg_request(ahd, devinfo, tstate, + tinfo, AHD_NEG_TO_GOAL); + if (update_needed && active) + ahd_update_pending_scbs(ahd); + +} + +/* + * Update the current state of tagged queuing for a given target. + */ +static void +ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd, + struct ahd_devinfo *devinfo, ahd_queue_alg alg) +{ + struct scsi_device *sdev = cmd->device; + + ahd_platform_set_tags(ahd, sdev, devinfo, alg); + ahd_send_async(ahd, devinfo->channel, devinfo->target, + devinfo->lun, AC_TRANSFER_NEG); +} + +static void +ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct ahd_transinfo *tinfo) +{ + ahd_mode_state saved_modes; + u_int period; + u_int ppr_opts; + u_int con_opts; + u_int offset; + u_int saved_negoaddr; + uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + saved_negoaddr = ahd_inb(ahd, NEGOADDR); + ahd_outb(ahd, NEGOADDR, devinfo->target); + period = tinfo->period; + offset = tinfo->offset; + memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); + ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ + |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); + con_opts = 0; + if (period == 0) + period = AHD_SYNCRATE_ASYNC; + if (period == AHD_SYNCRATE_160) { + + if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { + /* + * When the SPI4 spec was finalized, PACE transfers + * was not made a configurable option in the PPR + * message. Instead it is assumed to be enabled for + * any syncrate faster than 80MHz. Nevertheless, + * Harpoon2A4 allows this to be configurable. + * + * Harpoon2A4 also assumes at most 2 data bytes per + * negotiated REQ/ACK offset. Paced transfers take + * 4, so we must adjust our offset. + */ + ppr_opts |= PPROPT_PACE; + offset *= 2; + + /* + * Harpoon2A assumed that there would be a + * fallback rate between 160MHz and 80MHz, + * so 7 is used as the period factor rather + * than 8 for 160MHz. + */ + period = AHD_SYNCRATE_REVA_160; + } + if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) + iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= + ~AHD_PRECOMP_MASK; + } else { + /* + * Precomp should be disabled for non-paced transfers. + */ + iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; + + if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 + && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 + && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { + /* + * Slow down our CRC interval to be + * compatible with non-packetized + * U160 devices that can't handle a + * CRC at full speed. + */ + con_opts |= ENSLOWCRC; + } + + if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { + /* + * On H2A4, revert to a slower slewrate + * on non-paced transfers. + */ + iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= + ~AHD_SLEWRATE_MASK; + } + } + + ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); + ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); + ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); + ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); + + ahd_outb(ahd, NEGPERIOD, period); + ahd_outb(ahd, NEGPPROPTS, ppr_opts); + ahd_outb(ahd, NEGOFFSET, offset); + + if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) + con_opts |= WIDEXFER; + + /* + * Slow down our CRC interval to be + * compatible with packetized U320 devices + * that can't handle a CRC at full speed + */ + if (ahd->features & AHD_AIC79XXB_SLOWCRC) { + con_opts |= ENSLOWCRC; + } + + /* + * During packetized transfers, the target will + * give us the opportunity to send command packets + * without us asserting attention. + */ + if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) + con_opts |= ENAUTOATNO; + ahd_outb(ahd, NEGCONOPTS, con_opts); + ahd_outb(ahd, NEGOADDR, saved_negoaddr); + ahd_restore_modes(ahd, saved_modes); +} + +/* + * When the transfer settings for a connection change, setup for + * negotiation in pending SCBs to effect the change as quickly as + * possible. We also cancel any negotiations that are scheduled + * for inflight SCBs that have not been started yet. + */ +static void +ahd_update_pending_scbs(struct ahd_softc *ahd) +{ + struct scb *pending_scb; + int pending_scb_count; + int paused; + u_int saved_scbptr; + ahd_mode_state saved_modes; + + /* + * Traverse the pending SCB list and ensure that all of the + * SCBs there have the proper settings. We can only safely + * clear the negotiation required flag (setting requires the + * execution queue to be modified) and this is only possible + * if we are not already attempting to select out for this + * SCB. For this reason, all callers only call this routine + * if we are changing the negotiation settings for the currently + * active transaction on the bus. + */ + pending_scb_count = 0; + LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { + struct ahd_devinfo devinfo; + struct ahd_tmode_tstate *tstate; + + ahd_scb_devinfo(ahd, &devinfo, pending_scb); + ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, + devinfo.target, &tstate); + if ((tstate->auto_negotiate & devinfo.target_mask) == 0 + && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { + pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; + pending_scb->hscb->control &= ~MK_MESSAGE; + } + ahd_sync_scb(ahd, pending_scb, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + pending_scb_count++; + } + + if (pending_scb_count == 0) + return; + + if (ahd_is_paused(ahd)) { + paused = 1; + } else { + paused = 0; + ahd_pause(ahd); + } + + /* + * Force the sequencer to reinitialize the selection for + * the command at the head of the execution queue if it + * has already been setup. The negotiation changes may + * effect whether we select-out with ATN. It is only + * safe to clear ENSELO when the bus is not free and no + * selection is in progres or completed. + */ + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 + && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) + ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); + saved_scbptr = ahd_get_scbptr(ahd); + /* Ensure that the hscbs down on the card match the new information */ + LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { + u_int scb_tag; + u_int control; + + scb_tag = SCB_GET_TAG(pending_scb); + ahd_set_scbptr(ahd, scb_tag); + control = ahd_inb_scbram(ahd, SCB_CONTROL); + control &= ~MK_MESSAGE; + control |= pending_scb->hscb->control & MK_MESSAGE; + ahd_outb(ahd, SCB_CONTROL, control); + } + ahd_set_scbptr(ahd, saved_scbptr); + ahd_restore_modes(ahd, saved_modes); + + if (paused == 0) + ahd_unpause(ahd); +} + +/**************************** Pathing Information *****************************/ +static void +ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + ahd_mode_state saved_modes; + u_int saved_scsiid; + role_t role; + int our_id; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + if (ahd_inb(ahd, SSTAT0) & TARGET) + role = ROLE_TARGET; + else + role = ROLE_INITIATOR; + + if (role == ROLE_TARGET + && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { + /* We were selected, so pull our id from TARGIDIN */ + our_id = ahd_inb(ahd, TARGIDIN) & OID; + } else if (role == ROLE_TARGET) + our_id = ahd_inb(ahd, TOWNID); + else + our_id = ahd_inb(ahd, IOWNID); + + saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); + ahd_compile_devinfo(devinfo, + our_id, + SCSIID_TARGET(ahd, saved_scsiid), + ahd_inb(ahd, SAVED_LUN), + SCSIID_CHANNEL(ahd, saved_scsiid), + role); + ahd_restore_modes(ahd, saved_modes); +} + +void +ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A', + devinfo->target, devinfo->lun); +} + +static const struct ahd_phase_table_entry* +ahd_lookup_phase_entry(int phase) +{ + const struct ahd_phase_table_entry *entry; + const struct ahd_phase_table_entry *last_entry; + + /* + * num_phases doesn't include the default entry which + * will be returned if the phase doesn't match. + */ + last_entry = &ahd_phase_table[num_phases]; + for (entry = ahd_phase_table; entry < last_entry; entry++) { + if (phase == entry->phase) + break; + } + return (entry); +} + +void +ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, + u_int lun, char channel, role_t role) +{ + devinfo->our_scsiid = our_id; + devinfo->target = target; + devinfo->lun = lun; + devinfo->target_offset = target; + devinfo->channel = channel; + devinfo->role = role; + if (channel == 'B') + devinfo->target_offset += 8; + devinfo->target_mask = (0x01 << devinfo->target_offset); +} + +static void +ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct scb *scb) +{ + role_t role; + int our_id; + + our_id = SCSIID_OUR_ID(scb->hscb->scsiid); + role = ROLE_INITIATOR; + if ((scb->hscb->control & TARGET_SCB) != 0) + role = ROLE_TARGET; + ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), + SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); +} + + +/************************ Message Phase Processing ****************************/ +/* + * When an initiator transaction with the MK_MESSAGE flag either reconnects + * or enters the initial message out phase, we are interrupted. Fill our + * outgoing message buffer with the appropriate message and beging handing + * the message phase(s) manually. + */ +static void +ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct scb *scb) +{ + /* + * To facilitate adding multiple messages together, + * each routine should increment the index and len + * variables instead of setting them explicitly. + */ + ahd->msgout_index = 0; + ahd->msgout_len = 0; + + if (ahd_currently_packetized(ahd)) + ahd->msg_flags |= MSG_FLAG_PACKETIZED; + + if (ahd->send_msg_perror + && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { + ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; + ahd->msgout_len++; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("Setting up for Parity Error delivery\n"); +#endif + return; + } else if (scb == NULL) { + printk("%s: WARNING. No pending message for " + "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); + ahd->msgout_buf[ahd->msgout_index++] = NOP; + ahd->msgout_len++; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + return; + } + + if ((scb->flags & SCB_DEVICE_RESET) == 0 + && (scb->flags & SCB_PACKETIZED) == 0 + && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { + u_int identify_msg; + + identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); + if ((scb->hscb->control & DISCENB) != 0) + identify_msg |= MSG_IDENTIFY_DISCFLAG; + ahd->msgout_buf[ahd->msgout_index++] = identify_msg; + ahd->msgout_len++; + + if ((scb->hscb->control & TAG_ENB) != 0) { + ahd->msgout_buf[ahd->msgout_index++] = + scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); + ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); + ahd->msgout_len += 2; + } + } + + if (scb->flags & SCB_DEVICE_RESET) { + ahd->msgout_buf[ahd->msgout_index++] = TARGET_RESET; + ahd->msgout_len++; + ahd_print_path(ahd, scb); + printk("Bus Device Reset Message Sent\n"); + /* + * Clear our selection hardware in advance of + * the busfree. We may have an entry in the waiting + * Q for this target, and we don't want to go about + * selecting while we handle the busfree and blow it + * away. + */ + ahd_outb(ahd, SCSISEQ0, 0); + } else if ((scb->flags & SCB_ABORT) != 0) { + + if ((scb->hscb->control & TAG_ENB) != 0) { + ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK; + } else { + ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK_SET; + } + ahd->msgout_len++; + ahd_print_path(ahd, scb); + printk("Abort%s Message Sent\n", + (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); + /* + * Clear our selection hardware in advance of + * the busfree. We may have an entry in the waiting + * Q for this target, and we don't want to go about + * selecting while we handle the busfree and blow it + * away. + */ + ahd_outb(ahd, SCSISEQ0, 0); + } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { + ahd_build_transfer_msg(ahd, devinfo); + /* + * Clear our selection hardware in advance of potential + * PPR IU status change busfree. We may have an entry in + * the waiting Q for this target, and we don't want to go + * about selecting while we handle the busfree and blow + * it away. + */ + ahd_outb(ahd, SCSISEQ0, 0); + } else { + printk("ahd_intr: AWAITING_MSG for an SCB that " + "does not have a waiting message\n"); + printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, + devinfo->target_mask); + panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " + "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, + ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), + scb->flags); + } + + /* + * Clear the MK_MESSAGE flag from the SCB so we aren't + * asked to send this message again. + */ + ahd_outb(ahd, SCB_CONTROL, + ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); + scb->hscb->control &= ~MK_MESSAGE; + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; +} + +/* + * Build an appropriate transfer negotiation message for the + * currently active target. + */ +static void +ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + /* + * We need to initiate transfer negotiations. + * If our current and goal settings are identical, + * we want to renegotiate due to a check condition. + */ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + int dowide; + int dosync; + int doppr; + u_int period; + u_int ppr_options; + u_int offset; + + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + /* + * Filter our period based on the current connection. + * If we can't perform DT transfers on this segment (not in LVD + * mode for instance), then our decision to issue a PPR message + * may change. + */ + period = tinfo->goal.period; + offset = tinfo->goal.offset; + ppr_options = tinfo->goal.ppr_options; + /* Target initiated PPR is not allowed in the SCSI spec */ + if (devinfo->role == ROLE_TARGET) + ppr_options = 0; + ahd_devlimited_syncrate(ahd, tinfo, &period, + &ppr_options, devinfo->role); + dowide = tinfo->curr.width != tinfo->goal.width; + dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; + /* + * Only use PPR if we have options that need it, even if the device + * claims to support it. There might be an expander in the way + * that doesn't. + */ + doppr = ppr_options != 0; + + if (!dowide && !dosync && !doppr) { + dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; + dosync = tinfo->goal.offset != 0; + } + + if (!dowide && !dosync && !doppr) { + /* + * Force async with a WDTR message if we have a wide bus, + * or just issue an SDTR with a 0 offset. + */ + if ((ahd->features & AHD_WIDE) != 0) + dowide = 1; + else + dosync = 1; + + if (bootverbose) { + ahd_print_devinfo(ahd, devinfo); + printk("Ensuring async\n"); + } + } + /* Target initiated PPR is not allowed in the SCSI spec */ + if (devinfo->role == ROLE_TARGET) + doppr = 0; + + /* + * Both the PPR message and SDTR message require the + * goal syncrate to be limited to what the target device + * is capable of handling (based on whether an LVD->SE + * expander is on the bus), so combine these two cases. + * Regardless, guarantee that if we are using WDTR and SDTR + * messages that WDTR comes first. + */ + if (doppr || (dosync && !dowide)) { + + offset = tinfo->goal.offset; + ahd_validate_offset(ahd, tinfo, period, &offset, + doppr ? tinfo->goal.width + : tinfo->curr.width, + devinfo->role); + if (doppr) { + ahd_construct_ppr(ahd, devinfo, period, offset, + tinfo->goal.width, ppr_options); + } else { + ahd_construct_sdtr(ahd, devinfo, period, offset); + } + } else { + ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); + } +} + +/* + * Build a synchronous negotiation message in our message + * buffer based on the input parameters. + */ +static void +ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int period, u_int offset) +{ + if (offset == 0) + period = AHD_ASYNC_XFER_PERIOD; + ahd->msgout_index += spi_populate_sync_msg( + ahd->msgout_buf + ahd->msgout_index, period, offset); + ahd->msgout_len += 5; + if (bootverbose) { + printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + devinfo->lun, period, offset); + } +} + +/* + * Build a wide negotiateion message in our message + * buffer based on the input parameters. + */ +static void +ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int bus_width) +{ + ahd->msgout_index += spi_populate_width_msg( + ahd->msgout_buf + ahd->msgout_index, bus_width); + ahd->msgout_len += 4; + if (bootverbose) { + printk("(%s:%c:%d:%d): Sending WDTR %x\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + devinfo->lun, bus_width); + } +} + +/* + * Build a parallel protocol request message in our message + * buffer based on the input parameters. + */ +static void +ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int period, u_int offset, u_int bus_width, + u_int ppr_options) +{ + /* + * Always request precompensation from + * the other target if we are running + * at paced syncrates. + */ + if (period <= AHD_SYNCRATE_PACED) + ppr_options |= MSG_EXT_PPR_PCOMP_EN; + if (offset == 0) + period = AHD_ASYNC_XFER_PERIOD; + ahd->msgout_index += spi_populate_ppr_msg( + ahd->msgout_buf + ahd->msgout_index, period, offset, + bus_width, ppr_options); + ahd->msgout_len += 8; + if (bootverbose) { + printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " + "offset %x, ppr_options %x\n", ahd_name(ahd), + devinfo->channel, devinfo->target, devinfo->lun, + bus_width, period, offset, ppr_options); + } +} + +/* + * Clear any active message state. + */ +static void +ahd_clear_msg_state(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd->send_msg_perror = 0; + ahd->msg_flags = MSG_FLAG_NONE; + ahd->msgout_len = 0; + ahd->msgin_index = 0; + ahd->msg_type = MSG_TYPE_NONE; + if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { + /* + * The target didn't care to respond to our + * message request, so clear ATN. + */ + ahd_outb(ahd, CLRSINT1, CLRATNO); + } + ahd_outb(ahd, MSG_OUT, NOP); + ahd_outb(ahd, SEQ_FLAGS2, + ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); + ahd_restore_modes(ahd, saved_modes); +} + +/* + * Manual message loop handler. + */ +static void +ahd_handle_message_phase(struct ahd_softc *ahd) +{ + struct ahd_devinfo devinfo; + u_int bus_phase; + int end_session; + + ahd_fetch_devinfo(ahd, &devinfo); + end_session = FALSE; + bus_phase = ahd_inb(ahd, LASTPHASE); + + if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { + printk("LQIRETRY for LQIPHASE_OUTPKT\n"); + ahd_outb(ahd, LQCTL2, LQIRETRY); + } +reswitch: + switch (ahd->msg_type) { + case MSG_TYPE_INITIATOR_MSGOUT: + { + int lastbyte; + int phasemis; + int msgdone; + + if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) + panic("HOST_MSG_LOOP interrupt with no active message"); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + ahd_print_devinfo(ahd, &devinfo); + printk("INITIATOR_MSG_OUT"); + } +#endif + phasemis = bus_phase != P_MESGOUT; + if (phasemis) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + printk(" PHASEMIS %s\n", + ahd_lookup_phase_entry(bus_phase) + ->phasemsg); + } +#endif + if (bus_phase == P_MESGIN) { + /* + * Change gears and see if + * this messages is of interest to + * us or should be passed back to + * the sequencer. + */ + ahd_outb(ahd, CLRSINT1, CLRATNO); + ahd->send_msg_perror = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; + ahd->msgin_index = 0; + goto reswitch; + } + end_session = TRUE; + break; + } + + if (ahd->send_msg_perror) { + ahd_outb(ahd, CLRSINT1, CLRATNO); + ahd_outb(ahd, CLRSINT1, CLRREQINIT); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk(" byte 0x%x\n", ahd->send_msg_perror); +#endif + /* + * If we are notifying the target of a CRC error + * during packetized operations, the target is + * within its rights to acknowledge our message + * with a busfree. + */ + if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 + && ahd->send_msg_perror == INITIATOR_ERROR) + ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; + + ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); + break; + } + + msgdone = ahd->msgout_index == ahd->msgout_len; + if (msgdone) { + /* + * The target has requested a retry. + * Re-assert ATN, reset our message index to + * 0, and try again. + */ + ahd->msgout_index = 0; + ahd_assert_atn(ahd); + } + + lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); + if (lastbyte) { + /* Last byte is signified by dropping ATN */ + ahd_outb(ahd, CLRSINT1, CLRATNO); + } + + /* + * Clear our interrupt status and present + * the next byte on the bus. + */ + ahd_outb(ahd, CLRSINT1, CLRREQINIT); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk(" byte 0x%x\n", + ahd->msgout_buf[ahd->msgout_index]); +#endif + ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); + break; + } + case MSG_TYPE_INITIATOR_MSGIN: + { + int phasemis; + int message_done; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + ahd_print_devinfo(ahd, &devinfo); + printk("INITIATOR_MSG_IN"); + } +#endif + phasemis = bus_phase != P_MESGIN; + if (phasemis) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + printk(" PHASEMIS %s\n", + ahd_lookup_phase_entry(bus_phase) + ->phasemsg); + } +#endif + ahd->msgin_index = 0; + if (bus_phase == P_MESGOUT + && (ahd->send_msg_perror != 0 + || (ahd->msgout_len != 0 + && ahd->msgout_index == 0))) { + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + goto reswitch; + } + end_session = TRUE; + break; + } + + /* Pull the byte in without acking it */ + ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk(" byte 0x%x\n", + ahd->msgin_buf[ahd->msgin_index]); +#endif + + message_done = ahd_parse_msg(ahd, &devinfo); + + if (message_done) { + /* + * Clear our incoming message buffer in case there + * is another message following this one. + */ + ahd->msgin_index = 0; + + /* + * If this message illicited a response, + * assert ATN so the target takes us to the + * message out phase. + */ + if (ahd->msgout_len != 0) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + ahd_print_devinfo(ahd, &devinfo); + printk("Asserting ATN for response\n"); + } +#endif + ahd_assert_atn(ahd); + } + } else + ahd->msgin_index++; + + if (message_done == MSGLOOP_TERMINATED) { + end_session = TRUE; + } else { + /* Ack the byte */ + ahd_outb(ahd, CLRSINT1, CLRREQINIT); + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); + } + break; + } + case MSG_TYPE_TARGET_MSGIN: + { + int msgdone; + int msgout_request; + + /* + * By default, the message loop will continue. + */ + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); + + if (ahd->msgout_len == 0) + panic("Target MSGIN with no active message"); + + /* + * If we interrupted a mesgout session, the initiator + * will not know this until our first REQ. So, we + * only honor mesgout requests after we've sent our + * first byte. + */ + if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 + && ahd->msgout_index > 0) + msgout_request = TRUE; + else + msgout_request = FALSE; + + if (msgout_request) { + + /* + * Change gears and see if + * this messages is of interest to + * us or should be passed back to + * the sequencer. + */ + ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; + ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); + ahd->msgin_index = 0; + /* Dummy read to REQ for first byte */ + ahd_inb(ahd, SCSIDAT); + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) | SPIOEN); + break; + } + + msgdone = ahd->msgout_index == ahd->msgout_len; + if (msgdone) { + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); + end_session = TRUE; + break; + } + + /* + * Present the next byte on the bus. + */ + ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); + ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); + break; + } + case MSG_TYPE_TARGET_MSGOUT: + { + int lastbyte; + int msgdone; + + /* + * By default, the message loop will continue. + */ + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); + + /* + * The initiator signals that this is + * the last byte by dropping ATN. + */ + lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; + + /* + * Read the latched byte, but turn off SPIOEN first + * so that we don't inadvertently cause a REQ for the + * next byte. + */ + ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); + ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); + msgdone = ahd_parse_msg(ahd, &devinfo); + if (msgdone == MSGLOOP_TERMINATED) { + /* + * The message is *really* done in that it caused + * us to go to bus free. The sequencer has already + * been reset at this point, so pull the ejection + * handle. + */ + return; + } + + ahd->msgin_index++; + + /* + * XXX Read spec about initiator dropping ATN too soon + * and use msgdone to detect it. + */ + if (msgdone == MSGLOOP_MSGCOMPLETE) { + ahd->msgin_index = 0; + + /* + * If this message illicited a response, transition + * to the Message in phase and send it. + */ + if (ahd->msgout_len != 0) { + ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) | SPIOEN); + ahd->msg_type = MSG_TYPE_TARGET_MSGIN; + ahd->msgin_index = 0; + break; + } + } + + if (lastbyte) + end_session = TRUE; + else { + /* Ask for the next byte. */ + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) | SPIOEN); + } + + break; + } + default: + panic("Unknown REQINIT message type"); + } + + if (end_session) { + if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { + printk("%s: Returning to Idle Loop\n", + ahd_name(ahd)); + ahd_clear_msg_state(ahd); + + /* + * Perform the equivalent of a clear_target_state. + */ + ahd_outb(ahd, LASTPHASE, P_BUSFREE); + ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); + ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); + } else { + ahd_clear_msg_state(ahd); + ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); + } + } +} + +/* + * See if we sent a particular extended message to the target. + * If "full" is true, return true only if the target saw the full + * message. If "full" is false, return true if the target saw at + * least the first byte of the message. + */ +static int +ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) +{ + int found; + u_int index; + + found = FALSE; + index = 0; + + while (index < ahd->msgout_len) { + if (ahd->msgout_buf[index] == EXTENDED_MESSAGE) { + u_int end_index; + + end_index = index + 1 + ahd->msgout_buf[index + 1]; + if (ahd->msgout_buf[index+2] == msgval + && type == AHDMSG_EXT) { + + if (full) { + if (ahd->msgout_index > end_index) + found = TRUE; + } else if (ahd->msgout_index > index) + found = TRUE; + } + index = end_index; + } else if (ahd->msgout_buf[index] >= SIMPLE_QUEUE_TAG + && ahd->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) { + + /* Skip tag type and tag id or residue param*/ + index += 2; + } else { + /* Single byte message */ + if (type == AHDMSG_1B + && ahd->msgout_index > index + && (ahd->msgout_buf[index] == msgval + || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 + && msgval == MSG_IDENTIFYFLAG))) + found = TRUE; + index++; + } + + if (found) + break; + } + return (found); +} + +/* + * Wait for a complete incoming message, parse it, and respond accordingly. + */ +static int +ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + int reject; + int done; + int response; + + done = MSGLOOP_IN_PROG; + response = FALSE; + reject = FALSE; + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + /* + * Parse as much of the message as is available, + * rejecting it if we don't support it. When + * the entire message is available and has been + * handled, return MSGLOOP_MSGCOMPLETE, indicating + * that we have parsed an entire message. + * + * In the case of extended messages, we accept the length + * byte outright and perform more checking once we know the + * extended message type. + */ + switch (ahd->msgin_buf[0]) { + case DISCONNECT: + case SAVE_POINTERS: + case COMMAND_COMPLETE: + case RESTORE_POINTERS: + case IGNORE_WIDE_RESIDUE: + /* + * End our message loop as these are messages + * the sequencer handles on its own. + */ + done = MSGLOOP_TERMINATED; + break; + case MESSAGE_REJECT: + response = ahd_handle_msg_reject(ahd, devinfo); + fallthrough; + case NOP: + done = MSGLOOP_MSGCOMPLETE; + break; + case EXTENDED_MESSAGE: + { + /* Wait for enough of the message to begin validation */ + if (ahd->msgin_index < 2) + break; + switch (ahd->msgin_buf[2]) { + case EXTENDED_SDTR: + { + u_int period; + u_int ppr_options; + u_int offset; + u_int saved_offset; + + if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have both args before validating + * and acting on this message. + * + * Add one to MSG_EXT_SDTR_LEN to account for + * the extended message preamble. + */ + if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) + break; + + period = ahd->msgin_buf[3]; + ppr_options = 0; + saved_offset = offset = ahd->msgin_buf[4]; + ahd_devlimited_syncrate(ahd, tinfo, &period, + &ppr_options, devinfo->role); + ahd_validate_offset(ahd, tinfo, period, &offset, + tinfo->curr.width, devinfo->role); + if (bootverbose) { + printk("(%s:%c:%d:%d): Received " + "SDTR period %x, offset %x\n\t" + "Filtered to period %x, offset %x\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + ahd->msgin_buf[3], saved_offset, + period, offset); + } + ahd_set_syncrate(ahd, devinfo, period, + offset, ppr_options, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + + /* + * See if we initiated Sync Negotiation + * and didn't have to fall down to async + * transfers. + */ + if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, TRUE)) { + /* We started it */ + if (saved_offset != offset) { + /* Went too low - force async */ + reject = TRUE; + } + } else { + /* + * Send our own SDTR in reply + */ + if (bootverbose + && devinfo->role == ROLE_INITIATOR) { + printk("(%s:%c:%d:%d): Target " + "Initiated SDTR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_construct_sdtr(ahd, devinfo, + period, offset); + ahd->msgout_index = 0; + response = TRUE; + } + done = MSGLOOP_MSGCOMPLETE; + break; + } + case EXTENDED_WDTR: + { + u_int bus_width; + u_int saved_width; + u_int sending_reply; + + sending_reply = FALSE; + if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have our arg before validating + * and acting on this message. + * + * Add one to MSG_EXT_WDTR_LEN to account for + * the extended message preamble. + */ + if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) + break; + + bus_width = ahd->msgin_buf[3]; + saved_width = bus_width; + ahd_validate_width(ahd, tinfo, &bus_width, + devinfo->role); + if (bootverbose) { + printk("(%s:%c:%d:%d): Received WDTR " + "%x filtered to %x\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + saved_width, bus_width); + } + + if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, TRUE)) { + /* + * Don't send a WDTR back to the + * target, since we asked first. + * If the width went higher than our + * request, reject it. + */ + if (saved_width > bus_width) { + reject = TRUE; + printk("(%s:%c:%d:%d): requested %dBit " + "transfers. Rejecting...\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + 8 * (0x01 << bus_width)); + bus_width = 0; + } + } else { + /* + * Send our own WDTR in reply + */ + if (bootverbose + && devinfo->role == ROLE_INITIATOR) { + printk("(%s:%c:%d:%d): Target " + "Initiated WDTR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_construct_wdtr(ahd, devinfo, bus_width); + ahd->msgout_index = 0; + response = TRUE; + sending_reply = TRUE; + } + /* + * After a wide message, we are async, but + * some devices don't seem to honor this portion + * of the spec. Force a renegotiation of the + * sync component of our transfer agreement even + * if our goal is async. By updating our width + * after forcing the negotiation, we avoid + * renegotiating for width. + */ + ahd_update_neg_request(ahd, devinfo, tstate, + tinfo, AHD_NEG_ALWAYS); + ahd_set_width(ahd, devinfo, bus_width, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + if (sending_reply == FALSE && reject == FALSE) { + + /* + * We will always have an SDTR to send. + */ + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_build_transfer_msg(ahd, devinfo); + ahd->msgout_index = 0; + response = TRUE; + } + done = MSGLOOP_MSGCOMPLETE; + break; + } + case EXTENDED_PPR: + { + u_int period; + u_int offset; + u_int bus_width; + u_int ppr_options; + u_int saved_width; + u_int saved_offset; + u_int saved_ppr_options; + + if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have all args before validating + * and acting on this message. + * + * Add one to MSG_EXT_PPR_LEN to account for + * the extended message preamble. + */ + if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) + break; + + period = ahd->msgin_buf[3]; + offset = ahd->msgin_buf[5]; + bus_width = ahd->msgin_buf[6]; + saved_width = bus_width; + ppr_options = ahd->msgin_buf[7]; + /* + * According to the spec, a DT only + * period factor with no DT option + * set implies async. + */ + if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 + && period <= 9) + offset = 0; + saved_ppr_options = ppr_options; + saved_offset = offset; + + /* + * Transfer options are only available if we + * are negotiating wide. + */ + if (bus_width == 0) + ppr_options &= MSG_EXT_PPR_QAS_REQ; + + ahd_validate_width(ahd, tinfo, &bus_width, + devinfo->role); + ahd_devlimited_syncrate(ahd, tinfo, &period, + &ppr_options, devinfo->role); + ahd_validate_offset(ahd, tinfo, period, &offset, + bus_width, devinfo->role); + + if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, TRUE)) { + /* + * If we are unable to do any of the + * requested options (we went too low), + * then we'll have to reject the message. + */ + if (saved_width > bus_width + || saved_offset != offset + || saved_ppr_options != ppr_options) { + reject = TRUE; + period = 0; + offset = 0; + bus_width = 0; + ppr_options = 0; + } + } else { + if (devinfo->role != ROLE_TARGET) + printk("(%s:%c:%d:%d): Target " + "Initiated PPR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + else + printk("(%s:%c:%d:%d): Initiator " + "Initiated PPR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_construct_ppr(ahd, devinfo, period, offset, + bus_width, ppr_options); + ahd->msgout_index = 0; + response = TRUE; + } + if (bootverbose) { + printk("(%s:%c:%d:%d): Received PPR width %x, " + "period %x, offset %x,options %x\n" + "\tFiltered to width %x, period %x, " + "offset %x, options %x\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + saved_width, ahd->msgin_buf[3], + saved_offset, saved_ppr_options, + bus_width, period, offset, ppr_options); + } + ahd_set_width(ahd, devinfo, bus_width, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + ahd_set_syncrate(ahd, devinfo, period, + offset, ppr_options, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + + done = MSGLOOP_MSGCOMPLETE; + break; + } + default: + /* Unknown extended message. Reject it. */ + reject = TRUE; + break; + } + break; + } +#ifdef AHD_TARGET_MODE + case TARGET_RESET: + ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, + CAM_BDR_SENT, + "Bus Device Reset Received", + /*verbose_level*/0); + ahd_restart(ahd); + done = MSGLOOP_TERMINATED; + break; + case ABORT_TASK: + case ABORT_TASK_SET: + case CLEAR_TASK_SET: + { + int tag; + + /* Target mode messages */ + if (devinfo->role != ROLE_TARGET) { + reject = TRUE; + break; + } + tag = SCB_LIST_NULL; + if (ahd->msgin_buf[0] == ABORT_TASK) + tag = ahd_inb(ahd, INITIATOR_TAG); + ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, + devinfo->lun, tag, ROLE_TARGET, + CAM_REQ_ABORTED); + + tstate = ahd->enabled_targets[devinfo->our_scsiid]; + if (tstate != NULL) { + struct ahd_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[devinfo->lun]; + if (lstate != NULL) { + ahd_queue_lstate_event(ahd, lstate, + devinfo->our_scsiid, + ahd->msgin_buf[0], + /*arg*/tag); + ahd_send_lstate_events(ahd, lstate); + } + } + ahd_restart(ahd); + done = MSGLOOP_TERMINATED; + break; + } +#endif + case QAS_REQUEST: +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printk("%s: QAS request. SCSISIGI == 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); +#endif + ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; + fallthrough; + case TERMINATE_IO_PROC: + default: + reject = TRUE; + break; + } + + if (reject) { + /* + * Setup to reject the message. + */ + ahd->msgout_index = 0; + ahd->msgout_len = 1; + ahd->msgout_buf[0] = MESSAGE_REJECT; + done = MSGLOOP_MSGCOMPLETE; + response = TRUE; + } + + if (done != MSGLOOP_IN_PROG && !response) + /* Clear the outgoing message buffer */ + ahd->msgout_len = 0; + + return (done); +} + +/* + * Process a message reject message. + */ +static int +ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + /* + * What we care about here is if we had an + * outstanding SDTR or WDTR message for this + * target. If we did, this is a signal that + * the target is refusing negotiation. + */ + struct scb *scb; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int scb_index; + u_int last_msg; + int response = 0; + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, + devinfo->our_scsiid, + devinfo->target, &tstate); + /* Might be necessary */ + last_msg = ahd_inb(ahd, LAST_MSG); + + if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) { + if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/TRUE) + && tinfo->goal.period <= AHD_SYNCRATE_PACED) { + /* + * Target may not like our SPI-4 PPR Options. + * Attempt to negotiate 80MHz which will turn + * off these options. + */ + if (bootverbose) { + printk("(%s:%c:%d:%d): PPR Rejected. " + "Trying simple U160 PPR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } + tinfo->goal.period = AHD_SYNCRATE_DT; + tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ + | MSG_EXT_PPR_QAS_REQ + | MSG_EXT_PPR_DT_REQ; + } else { + /* + * Target does not support the PPR message. + * Attempt to negotiate SPI-2 style. + */ + if (bootverbose) { + printk("(%s:%c:%d:%d): PPR Rejected. " + "Trying WDTR/SDTR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } + tinfo->goal.ppr_options = 0; + tinfo->curr.transport_version = 2; + tinfo->goal.transport_version = 2; + } + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_build_transfer_msg(ahd, devinfo); + ahd->msgout_index = 0; + response = 1; + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) { + + /* note 8bit xfers */ + printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " + "8bit transfers\n", ahd_name(ahd), + devinfo->channel, devinfo->target, devinfo->lun); + ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + /* + * No need to clear the sync rate. If the target + * did not accept the command, our syncrate is + * unaffected. If the target started the negotiation, + * but rejected our response, we already cleared the + * sync rate before sending our WDTR. + */ + if (tinfo->goal.offset != tinfo->curr.offset) { + + /* Start the sync negotiation */ + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_build_transfer_msg(ahd, devinfo); + ahd->msgout_index = 0; + response = 1; + } + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) { + /* note asynch xfers and clear flag */ + ahd_set_syncrate(ahd, devinfo, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + printk("(%s:%c:%d:%d): refuses synchronous negotiation. " + "Using asynchronous transfers\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) { + int tag_type; + int mask; + + tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG); + + if (tag_type == SIMPLE_QUEUE_TAG) { + printk("(%s:%c:%d:%d): refuses tagged commands. " + "Performing non-tagged I/O\n", ahd_name(ahd), + devinfo->channel, devinfo->target, devinfo->lun); + ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); + mask = ~0x23; + } else { + printk("(%s:%c:%d:%d): refuses %s tagged commands. " + "Performing simple queue tagged I/O only\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + devinfo->lun, tag_type == ORDERED_QUEUE_TAG + ? "ordered" : "head of queue"); + ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC); + mask = ~0x03; + } + + /* + * Resend the identify for this CCB as the target + * may believe that the selection is invalid otherwise. + */ + ahd_outb(ahd, SCB_CONTROL, + ahd_inb_scbram(ahd, SCB_CONTROL) & mask); + scb->hscb->control &= mask; + ahd_set_transaction_tag(scb, /*enabled*/FALSE, + /*type*/SIMPLE_QUEUE_TAG); + ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); + ahd_assert_atn(ahd); + ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), + SCB_GET_TAG(scb)); + + /* + * Requeue all tagged commands for this target + * currently in our possession so they can be + * converted to untagged commands. + */ + ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, + ROLE_INITIATOR, CAM_REQUEUE_REQ, + SEARCH_COMPLETE); + } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { + /* + * Most likely the device believes that we had + * previously negotiated packetized. + */ + ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE + | MSG_FLAG_IU_REQ_CHANGED; + + ahd_force_renegotiation(ahd, devinfo); + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_build_transfer_msg(ahd, devinfo); + ahd->msgout_index = 0; + response = 1; + } else { + /* + * Otherwise, we ignore it. + */ + printk("%s:%c:%d: Message reject for %x -- ignored\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + last_msg); + } + return (response); +} + +/* + * Process an ingnore wide residue message. + */ +static void +ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + u_int scb_index; + struct scb *scb; + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + /* + * XXX Actually check data direction in the sequencer? + * Perhaps add datadir to some spare bits in the hscb? + */ + if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 + || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { + /* + * Ignore the message if we haven't + * seen an appropriate data phase yet. + */ + } else { + /* + * If the residual occurred on the last + * transfer and the transfer request was + * expected to end on an odd count, do + * nothing. Otherwise, subtract a byte + * and update the residual count accordingly. + */ + uint32_t sgptr; + + sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); + if ((sgptr & SG_LIST_NULL) != 0 + && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) + & SCB_XFERLEN_ODD) != 0) { + /* + * If the residual occurred on the last + * transfer and the transfer request was + * expected to end on an odd count, do + * nothing. + */ + } else { + uint32_t data_cnt; + uint64_t data_addr; + uint32_t sglen; + + /* Pull in the rest of the sgptr */ + sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); + data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); + if ((sgptr & SG_LIST_NULL) != 0) { + /* + * The residual data count is not updated + * for the command run to completion case. + * Explicitly zero the count. + */ + data_cnt &= ~AHD_SG_LEN_MASK; + } + data_addr = ahd_inq(ahd, SHADDR); + data_cnt += 1; + data_addr -= 1; + sgptr &= SG_PTR_MASK; + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* + * The residual sg ptr points to the next S/G + * to load so we must go back one. + */ + sg--; + sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + if (sg != scb->sg_list + && sglen < (data_cnt & AHD_SG_LEN_MASK)) { + + sg--; + sglen = ahd_le32toh(sg->len); + /* + * Preserve High Address and SG_LIST + * bits while setting the count to 1. + */ + data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); + data_addr = ahd_le64toh(sg->addr) + + (sglen & AHD_SG_LEN_MASK) + - 1; + + /* + * Increment sg so it points to the + * "next" sg. + */ + sg++; + sgptr = ahd_sg_virt_to_bus(ahd, scb, + sg); + } + } else { + struct ahd_dma_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* + * The residual sg ptr points to the next S/G + * to load so we must go back one. + */ + sg--; + sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + if (sg != scb->sg_list + && sglen < (data_cnt & AHD_SG_LEN_MASK)) { + + sg--; + sglen = ahd_le32toh(sg->len); + /* + * Preserve High Address and SG_LIST + * bits while setting the count to 1. + */ + data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); + data_addr = ahd_le32toh(sg->addr) + + (sglen & AHD_SG_LEN_MASK) + - 1; + + /* + * Increment sg so it points to the + * "next" sg. + */ + sg++; + sgptr = ahd_sg_virt_to_bus(ahd, scb, + sg); + } + } + /* + * Toggle the "oddness" of the transfer length + * to handle this mid-transfer ignore wide + * residue. This ensures that the oddness is + * correct for subsequent data transfers. + */ + ahd_outb(ahd, SCB_TASK_ATTRIBUTE, + ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) + ^ SCB_XFERLEN_ODD); + + ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); + ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); + /* + * The FIFO's pointers will be updated if/when the + * sequencer re-enters a data phase. + */ + } + } +} + + +/* + * Reinitialize the data pointers for the active transfer + * based on its current residual. + */ +static void +ahd_reinitialize_dataptrs(struct ahd_softc *ahd) +{ + struct scb *scb; + ahd_mode_state saved_modes; + u_int scb_index; + u_int wait; + uint32_t sgptr; + uint32_t resid; + uint64_t dataptr; + + AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, + AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + + /* + * Release and reacquire the FIFO so we + * have a clean slate. + */ + ahd_outb(ahd, DFFSXFRCTL, CLRCHN); + wait = 1000; + while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) + ahd_delay(100); + if (wait == 0) { + ahd_print_path(ahd, scb); + printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); + ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); + } + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, DFFSTAT, + ahd_inb(ahd, DFFSTAT) + | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); + + /* + * Determine initial values for data_addr and data_cnt + * for resuming the data phase. + */ + sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); + sgptr &= SG_PTR_MASK; + + resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) + | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); + + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + dataptr = ahd_le64toh(sg->addr) + + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) + - resid; + ahd_outl(ahd, HADDR + 4, dataptr >> 32); + } else { + struct ahd_dma_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + dataptr = ahd_le32toh(sg->addr) + + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) + - resid; + ahd_outb(ahd, HADDR + 4, + (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); + } + ahd_outl(ahd, HADDR, dataptr); + ahd_outb(ahd, HCNT + 2, resid >> 16); + ahd_outb(ahd, HCNT + 1, resid >> 8); + ahd_outb(ahd, HCNT, resid); +} + +/* + * Handle the effects of issuing a bus device reset message. + */ +static void +ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int lun, cam_status status, char *message, + int verbose_level) +{ +#ifdef AHD_TARGET_MODE + struct ahd_tmode_tstate* tstate; +#endif + int found; + + found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, + lun, SCB_LIST_NULL, devinfo->role, + status); + +#ifdef AHD_TARGET_MODE + /* + * Send an immediate notify ccb to all target mord peripheral + * drivers affected by this action. + */ + tstate = ahd->enabled_targets[devinfo->our_scsiid]; + if (tstate != NULL) { + u_int cur_lun; + u_int max_lun; + + if (lun != CAM_LUN_WILDCARD) { + cur_lun = 0; + max_lun = AHD_NUM_LUNS - 1; + } else { + cur_lun = lun; + max_lun = lun; + } + for (;cur_lun <= max_lun; cur_lun++) { + struct ahd_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[cur_lun]; + if (lstate == NULL) + continue; + + ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, + TARGET_RESET, /*arg*/0); + ahd_send_lstate_events(ahd, lstate); + } + } +#endif + + /* + * Go back to async/narrow transfers and renegotiate. + */ + ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR, /*paused*/TRUE); + ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, + /*ppr_options*/0, AHD_TRANS_CUR, + /*paused*/TRUE); + + if (status != CAM_SEL_TIMEOUT) + ahd_send_async(ahd, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_SENT_BDR); + + if (message != NULL && bootverbose) + printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), + message, devinfo->channel, devinfo->target, found); +} + +#ifdef AHD_TARGET_MODE +static void +ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct scb *scb) +{ + + /* + * To facilitate adding multiple messages together, + * each routine should increment the index and len + * variables instead of setting them explicitly. + */ + ahd->msgout_index = 0; + ahd->msgout_len = 0; + + if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) + ahd_build_transfer_msg(ahd, devinfo); + else + panic("ahd_intr: AWAITING target message with no message"); + + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_TARGET_MSGIN; +} +#endif +/**************************** Initialization **********************************/ +static u_int +ahd_sglist_size(struct ahd_softc *ahd) +{ + bus_size_t list_size; + + list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; + return (list_size); +} + +/* + * Calculate the optimum S/G List allocation size. S/G elements used + * for a given transaction must be physically contiguous. Assume the + * OS will allocate full pages to us, so it doesn't make sense to request + * less than a page. + */ +static u_int +ahd_sglist_allocsize(struct ahd_softc *ahd) +{ + bus_size_t sg_list_increment; + bus_size_t sg_list_size; + bus_size_t max_list_size; + bus_size_t best_list_size; + + /* Start out with the minimum required for AHD_NSEG. */ + sg_list_increment = ahd_sglist_size(ahd); + sg_list_size = sg_list_increment; + + /* Get us as close as possible to a page in size. */ + while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) + sg_list_size += sg_list_increment; + + /* + * Try to reduce the amount of wastage by allocating + * multiple pages. + */ + best_list_size = sg_list_size; + max_list_size = roundup(sg_list_increment, PAGE_SIZE); + if (max_list_size < 4 * PAGE_SIZE) + max_list_size = 4 * PAGE_SIZE; + if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) + max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); + while ((sg_list_size + sg_list_increment) <= max_list_size + && (sg_list_size % PAGE_SIZE) != 0) { + bus_size_t new_mod; + bus_size_t best_mod; + + sg_list_size += sg_list_increment; + new_mod = sg_list_size % PAGE_SIZE; + best_mod = best_list_size % PAGE_SIZE; + if (new_mod > best_mod || new_mod == 0) { + best_list_size = sg_list_size; + } + } + return (best_list_size); +} + +/* + * Allocate a controller structure for a new device + * and perform initial initializion. + */ +struct ahd_softc * +ahd_alloc(void *platform_arg, char *name) +{ + struct ahd_softc *ahd; + + ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC); + if (!ahd) { + printk("aic7xxx: cannot malloc softc!\n"); + kfree(name); + return NULL; + } + + ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); + if (ahd->seep_config == NULL) { + kfree(ahd); + kfree(name); + return (NULL); + } + LIST_INIT(&ahd->pending_scbs); + /* We don't know our unit number until the OSM sets it */ + ahd->name = name; + ahd->unit = -1; + ahd->description = NULL; + ahd->bus_description = NULL; + ahd->channel = 'A'; + ahd->chip = AHD_NONE; + ahd->features = AHD_FENONE; + ahd->bugs = AHD_BUGNONE; + ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A + | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; + timer_setup(&ahd->stat_timer, ahd_stat_timer, 0); + ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; + ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; + ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; + ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; + ahd->int_coalescing_stop_threshold = + AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { + printk("%s: scb size = 0x%x, hscb size = 0x%x\n", + ahd_name(ahd), (u_int)sizeof(struct scb), + (u_int)sizeof(struct hardware_scb)); + } +#endif + if (ahd_platform_alloc(ahd, platform_arg) != 0) { + ahd_free(ahd); + ahd = NULL; + } + return (ahd); +} + +int +ahd_softc_init(struct ahd_softc *ahd) +{ + + ahd->unpause = 0; + ahd->pause = PAUSE; + return (0); +} + +void +ahd_set_unit(struct ahd_softc *ahd, int unit) +{ + ahd->unit = unit; +} + +void +ahd_set_name(struct ahd_softc *ahd, char *name) +{ + kfree(ahd->name); + ahd->name = name; +} + +void +ahd_free(struct ahd_softc *ahd) +{ + int i; + + switch (ahd->init_level) { + default: + case 5: + ahd_shutdown(ahd); + fallthrough; + case 4: + ahd_dmamap_unload(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap); + fallthrough; + case 3: + ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, + ahd->shared_data_map.dmamap); + ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap); + fallthrough; + case 2: + ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); + break; + case 1: + break; + case 0: + break; + } + + ahd_platform_free(ahd); + ahd_fini_scbdata(ahd); + for (i = 0; i < AHD_NUM_TARGETS; i++) { + struct ahd_tmode_tstate *tstate; + + tstate = ahd->enabled_targets[i]; + if (tstate != NULL) { +#ifdef AHD_TARGET_MODE + int j; + + for (j = 0; j < AHD_NUM_LUNS; j++) { + struct ahd_tmode_lstate *lstate; + + lstate = tstate->enabled_luns[j]; + if (lstate != NULL) { + xpt_free_path(lstate->path); + kfree(lstate); + } + } +#endif + kfree(tstate); + } + } +#ifdef AHD_TARGET_MODE + if (ahd->black_hole != NULL) { + xpt_free_path(ahd->black_hole->path); + kfree(ahd->black_hole); + } +#endif + kfree(ahd->name); + kfree(ahd->seep_config); + kfree(ahd->saved_stack); + kfree(ahd); + return; +} + +static void +ahd_shutdown(void *arg) +{ + struct ahd_softc *ahd; + + ahd = (struct ahd_softc *)arg; + + /* + * Stop periodic timer callbacks. + */ + del_timer_sync(&ahd->stat_timer); + + /* This will reset most registers to 0, but not all */ + ahd_reset(ahd, /*reinit*/FALSE); +} + +/* + * Reset the controller and record some information about it + * that is only available just after a reset. If "reinit" is + * non-zero, this reset occurred after initial configuration + * and the caller requests that the chip be fully reinitialized + * to a runable state. Chip interrupts are *not* enabled after + * a reinitialization. The caller must enable interrupts via + * ahd_intr_enable(). + */ +int +ahd_reset(struct ahd_softc *ahd, int reinit) +{ + u_int sxfrctl1; + int wait; + uint32_t cmd; + + /* + * Preserve the value of the SXFRCTL1 register for all channels. + * It contains settings that affect termination and we don't want + * to disturb the integrity of the bus. + */ + ahd_pause(ahd); + ahd_update_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + sxfrctl1 = ahd_inb(ahd, SXFRCTL1); + + cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); + if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { + uint32_t mod_cmd; + + /* + * A4 Razor #632 + * During the assertion of CHIPRST, the chip + * does not disable its parity logic prior to + * the start of the reset. This may cause a + * parity error to be detected and thus a + * spurious SERR or PERR assertion. Disable + * PERR and SERR responses during the CHIPRST. + */ + mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + mod_cmd, /*bytes*/2); + } + ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); + + /* + * Ensure that the reset has finished. We delay 1000us + * prior to reading the register to make sure the chip + * has sufficiently completed its reset to handle register + * accesses. + */ + wait = 1000; + do { + ahd_delay(1000); + } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); + + if (wait == 0) { + printk("%s: WARNING - Failed chip reset! " + "Trying to initialize anyway.\n", ahd_name(ahd)); + } + ahd_outb(ahd, HCNTRL, ahd->pause); + + if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { + /* + * Clear any latched PCI error status and restore + * previous SERR and PERR response enables. + */ + ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + 0xFF, /*bytes*/1); + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + cmd, /*bytes*/2); + } + + /* + * Mode should be SCSI after a chip reset, but lets + * set it just to be safe. We touch the MODE_PTR + * register directly so as to bypass the lazy update + * code in ahd_set_modes(). + */ + ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, MODE_PTR, + ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); + + /* + * Restore SXFRCTL1. + * + * We must always initialize STPWEN to 1 before we + * restore the saved values. STPWEN is initialized + * to a tri-state condition which can only be cleared + * by turning it on. + */ + ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); + ahd_outb(ahd, SXFRCTL1, sxfrctl1); + + /* Determine chip configuration */ + ahd->features &= ~AHD_WIDE; + if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) + ahd->features |= AHD_WIDE; + + /* + * If a recovery action has forced a chip reset, + * re-initialize the chip to our liking. + */ + if (reinit != 0) + ahd_chip_init(ahd); + + return (0); +} + +/* + * Determine the number of SCBs available on the controller + */ +static int +ahd_probe_scbs(struct ahd_softc *ahd) { + int i; + + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + for (i = 0; i < AHD_SCB_MAX; i++) { + int j; + + ahd_set_scbptr(ahd, i); + ahd_outw(ahd, SCB_BASE, i); + for (j = 2; j < 64; j++) + ahd_outb(ahd, SCB_BASE+j, 0); + /* Start out life as unallocated (needing an abort) */ + ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); + if (ahd_inw_scbram(ahd, SCB_BASE) != i) + break; + ahd_set_scbptr(ahd, 0); + if (ahd_inw_scbram(ahd, SCB_BASE) != 0) + break; + } + return (i); +} + +static void +ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + dma_addr_t *baddr; + + baddr = (dma_addr_t *)arg; + *baddr = segs->ds_addr; +} + +static void +ahd_initialize_hscbs(struct ahd_softc *ahd) +{ + int i; + + for (i = 0; i < ahd->scb_data.maxhscbs; i++) { + ahd_set_scbptr(ahd, i); + + /* Clear the control byte. */ + ahd_outb(ahd, SCB_CONTROL, 0); + + /* Set the next pointer */ + ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); + } +} + +static int +ahd_init_scbdata(struct ahd_softc *ahd) +{ + struct scb_data *scb_data; + int i; + + scb_data = &ahd->scb_data; + TAILQ_INIT(&scb_data->free_scbs); + for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) + LIST_INIT(&scb_data->free_scb_lists[i]); + LIST_INIT(&scb_data->any_dev_free_scb_list); + SLIST_INIT(&scb_data->hscb_maps); + SLIST_INIT(&scb_data->sg_maps); + SLIST_INIT(&scb_data->sense_maps); + + /* Determine the number of hardware SCBs and initialize them */ + scb_data->maxhscbs = ahd_probe_scbs(ahd); + if (scb_data->maxhscbs == 0) { + printk("%s: No SCB space found\n", ahd_name(ahd)); + return (ENXIO); + } + + ahd_initialize_hscbs(ahd); + + /* + * Create our DMA tags. These tags define the kinds of device + * accessible memory allocations and memory mappings we will + * need to perform during normal operation. + * + * Unless we need to further restrict the allocation, we rely + * on the restrictions of the parent dmat, hence the common + * use of MAXADDR and MAXSIZE. + */ + + /* DMA tag for our hardware scb structures */ + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + PAGE_SIZE, /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->hscb_dmat) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* DMA tag for our S/G structures. */ + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + ahd_sglist_allocsize(ahd), /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->sg_dmat) != 0) { + goto error_exit; + } +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MEMORY) != 0) + printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), + ahd_sglist_allocsize(ahd)); +#endif + + scb_data->init_level++; + + /* DMA tag for our sense buffers. We allocate in page sized chunks */ + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + PAGE_SIZE, /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->sense_dmat) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* Perform initial CCB allocation */ + ahd_alloc_scbs(ahd); + + if (scb_data->numscbs == 0) { + printk("%s: ahd_init_scbdata - " + "Unable to allocate initial scbs\n", + ahd_name(ahd)); + goto error_exit; + } + + /* + * Note that we were successful + */ + return (0); + +error_exit: + + return (ENOMEM); +} + +static struct scb * +ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) +{ + struct scb *scb; + + /* + * Look on the pending list. + */ + LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { + if (SCB_GET_TAG(scb) == tag) + return (scb); + } + + /* + * Then on all of the collision free lists. + */ + TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { + struct scb *list_scb; + + list_scb = scb; + do { + if (SCB_GET_TAG(list_scb) == tag) + return (list_scb); + list_scb = LIST_NEXT(list_scb, collision_links); + } while (list_scb); + } + + /* + * And finally on the generic free list. + */ + LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { + if (SCB_GET_TAG(scb) == tag) + return (scb); + } + + return (NULL); +} + +static void +ahd_fini_scbdata(struct ahd_softc *ahd) +{ + struct scb_data *scb_data; + + scb_data = &ahd->scb_data; + if (scb_data == NULL) + return; + + switch (scb_data->init_level) { + default: + case 7: + { + struct map_node *sns_map; + + while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { + SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); + ahd_dmamap_unload(ahd, scb_data->sense_dmat, + sns_map->dmamap); + ahd_dmamem_free(ahd, scb_data->sense_dmat, + sns_map->vaddr, sns_map->dmamap); + kfree(sns_map); + } + ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); + } + fallthrough; + case 6: + { + struct map_node *sg_map; + + while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { + SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); + ahd_dmamap_unload(ahd, scb_data->sg_dmat, + sg_map->dmamap); + ahd_dmamem_free(ahd, scb_data->sg_dmat, + sg_map->vaddr, sg_map->dmamap); + kfree(sg_map); + } + ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); + } + fallthrough; + case 5: + { + struct map_node *hscb_map; + + while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { + SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); + ahd_dmamap_unload(ahd, scb_data->hscb_dmat, + hscb_map->dmamap); + ahd_dmamem_free(ahd, scb_data->hscb_dmat, + hscb_map->vaddr, hscb_map->dmamap); + kfree(hscb_map); + } + ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); + } + fallthrough; + case 4: + case 3: + case 2: + case 1: + case 0: + break; + } +} + +/* + * DSP filter Bypass must be enabled until the first selection + * after a change in bus mode (Razor #491 and #493). + */ +static void +ahd_setup_iocell_workaround(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) + | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); + ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("%s: Setting up iocell workaround\n", ahd_name(ahd)); +#endif + ahd_restore_modes(ahd, saved_modes); + ahd->flags &= ~AHD_HAD_FIRST_SEL; +} + +static void +ahd_iocell_first_selection(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + u_int sblkctl; + + if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) + return; + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + sblkctl = ahd_inb(ahd, SBLKCTL); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("%s: iocell first selection\n", ahd_name(ahd)); +#endif + if ((sblkctl & ENAB40) != 0) { + ahd_outb(ahd, DSPDATACTL, + ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("%s: BYPASS now disabled\n", ahd_name(ahd)); +#endif + } + ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_restore_modes(ahd, saved_modes); + ahd->flags |= AHD_HAD_FIRST_SEL; +} + +/*************************** SCB Management ***********************************/ +static void +ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) +{ + struct scb_list *free_list; + struct scb_tailq *free_tailq; + struct scb *first_scb; + + scb->flags |= SCB_ON_COL_LIST; + AHD_SET_SCB_COL_IDX(scb, col_idx); + free_list = &ahd->scb_data.free_scb_lists[col_idx]; + free_tailq = &ahd->scb_data.free_scbs; + first_scb = LIST_FIRST(free_list); + if (first_scb != NULL) { + LIST_INSERT_AFTER(first_scb, scb, collision_links); + } else { + LIST_INSERT_HEAD(free_list, scb, collision_links); + TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); + } +} + +static void +ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) +{ + struct scb_list *free_list; + struct scb_tailq *free_tailq; + struct scb *first_scb; + u_int col_idx; + + scb->flags &= ~SCB_ON_COL_LIST; + col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); + free_list = &ahd->scb_data.free_scb_lists[col_idx]; + free_tailq = &ahd->scb_data.free_scbs; + first_scb = LIST_FIRST(free_list); + if (first_scb == scb) { + struct scb *next_scb; + + /* + * Maintain order in the collision free + * lists for fairness if this device has + * other colliding tags active. + */ + next_scb = LIST_NEXT(scb, collision_links); + if (next_scb != NULL) { + TAILQ_INSERT_AFTER(free_tailq, scb, + next_scb, links.tqe); + } + TAILQ_REMOVE(free_tailq, scb, links.tqe); + } + LIST_REMOVE(scb, collision_links); +} + +/* + * Get a free scb. If there are none, see if we can allocate a new SCB. + */ +struct scb * +ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) +{ + struct scb *scb; + int tries; + + tries = 0; +look_again: + TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { + if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { + ahd_rem_col_list(ahd, scb); + goto found; + } + } + if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { + + if (tries++ != 0) + return (NULL); + ahd_alloc_scbs(ahd); + goto look_again; + } + LIST_REMOVE(scb, links.le); + if (col_idx != AHD_NEVER_COL_IDX + && (scb->col_scb != NULL) + && (scb->col_scb->flags & SCB_ACTIVE) == 0) { + LIST_REMOVE(scb->col_scb, links.le); + ahd_add_col_list(ahd, scb->col_scb, col_idx); + } +found: + scb->flags |= SCB_ACTIVE; + return (scb); +} + +/* + * Return an SCB resource to the free list. + */ +void +ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) +{ + /* Clean up for the next user */ + scb->flags = SCB_FLAG_NONE; + scb->hscb->control = 0; + ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; + + if (scb->col_scb == NULL) { + + /* + * No collision possible. Just free normally. + */ + LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, + scb, links.le); + } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { + + /* + * The SCB we might have collided with is on + * a free collision list. Put both SCBs on + * the generic list. + */ + ahd_rem_col_list(ahd, scb->col_scb); + LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, + scb, links.le); + LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, + scb->col_scb, links.le); + } else if ((scb->col_scb->flags + & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE + && (scb->col_scb->hscb->control & TAG_ENB) != 0) { + + /* + * The SCB we might collide with on the next allocation + * is still active in a non-packetized, tagged, context. + * Put us on the SCB collision list. + */ + ahd_add_col_list(ahd, scb, + AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); + } else { + /* + * The SCB we might collide with on the next allocation + * is either active in a packetized context, or free. + * Since we can't collide, put this SCB on the generic + * free list. + */ + LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, + scb, links.le); + } + + ahd_platform_scb_free(ahd, scb); +} + +static void +ahd_alloc_scbs(struct ahd_softc *ahd) +{ + struct scb_data *scb_data; + struct scb *next_scb; + struct hardware_scb *hscb; + struct map_node *hscb_map; + struct map_node *sg_map; + struct map_node *sense_map; + uint8_t *segs; + uint8_t *sense_data; + dma_addr_t hscb_busaddr; + dma_addr_t sg_busaddr; + dma_addr_t sense_busaddr; + int newcount; + int i; + + scb_data = &ahd->scb_data; + if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) + /* Can't allocate any more */ + return; + + if (scb_data->scbs_left != 0) { + int offset; + + offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; + hscb_map = SLIST_FIRST(&scb_data->hscb_maps); + hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; + hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); + } else { + hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC); + + if (hscb_map == NULL) + return; + + /* Allocate the next batch of hardware SCBs */ + if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, + (void **)&hscb_map->vaddr, + BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { + kfree(hscb_map); + return; + } + + SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); + + ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, + hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, + &hscb_map->physaddr, /*flags*/0); + + hscb = (struct hardware_scb *)hscb_map->vaddr; + hscb_busaddr = hscb_map->physaddr; + scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); + } + + if (scb_data->sgs_left != 0) { + int offset; + + offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) + - scb_data->sgs_left) * ahd_sglist_size(ahd); + sg_map = SLIST_FIRST(&scb_data->sg_maps); + segs = sg_map->vaddr + offset; + sg_busaddr = sg_map->physaddr + offset; + } else { + sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); + + if (sg_map == NULL) + return; + + /* Allocate the next batch of S/G lists */ + if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, + (void **)&sg_map->vaddr, + BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { + kfree(sg_map); + return; + } + + SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); + + ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, + sg_map->vaddr, ahd_sglist_allocsize(ahd), + ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); + + segs = sg_map->vaddr; + sg_busaddr = sg_map->physaddr; + scb_data->sgs_left = + ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_MEMORY) + printk("Mapped SG data\n"); +#endif + } + + if (scb_data->sense_left != 0) { + int offset; + + offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); + sense_map = SLIST_FIRST(&scb_data->sense_maps); + sense_data = sense_map->vaddr + offset; + sense_busaddr = sense_map->physaddr + offset; + } else { + sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC); + + if (sense_map == NULL) + return; + + /* Allocate the next batch of sense buffers */ + if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, + (void **)&sense_map->vaddr, + BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { + kfree(sense_map); + return; + } + + SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); + + ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, + sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, + &sense_map->physaddr, /*flags*/0); + + sense_data = sense_map->vaddr; + sense_busaddr = sense_map->physaddr; + scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_MEMORY) + printk("Mapped sense data\n"); +#endif + } + + newcount = min(scb_data->sense_left, scb_data->scbs_left); + newcount = min(newcount, scb_data->sgs_left); + newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); + for (i = 0; i < newcount; i++) { + struct scb_platform_data *pdata; + u_int col_tag; + + next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); + if (next_scb == NULL) + break; + + pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); + if (pdata == NULL) { + kfree(next_scb); + break; + } + next_scb->platform_data = pdata; + next_scb->hscb_map = hscb_map; + next_scb->sg_map = sg_map; + next_scb->sense_map = sense_map; + next_scb->sg_list = segs; + next_scb->sense_data = sense_data; + next_scb->sense_busaddr = sense_busaddr; + memset(hscb, 0, sizeof(*hscb)); + next_scb->hscb = hscb; + hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); + + /* + * The sequencer always starts with the second entry. + * The first entry is embedded in the scb. + */ + next_scb->sg_list_busaddr = sg_busaddr; + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + next_scb->sg_list_busaddr + += sizeof(struct ahd_dma64_seg); + else + next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); + next_scb->ahd_softc = ahd; + next_scb->flags = SCB_FLAG_NONE; + next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); + col_tag = scb_data->numscbs ^ 0x100; + next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); + if (next_scb->col_scb != NULL) + next_scb->col_scb->col_scb = next_scb; + ahd_free_scb(ahd, next_scb); + hscb++; + hscb_busaddr += sizeof(*hscb); + segs += ahd_sglist_size(ahd); + sg_busaddr += ahd_sglist_size(ahd); + sense_data += AHD_SENSE_BUFSIZE; + sense_busaddr += AHD_SENSE_BUFSIZE; + scb_data->numscbs++; + scb_data->sense_left--; + scb_data->scbs_left--; + scb_data->sgs_left--; + } +} + +void +ahd_controller_info(struct ahd_softc *ahd, char *buf) +{ + const char *speed; + const char *type; + int len; + + len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); + buf += len; + + speed = "Ultra320 "; + if ((ahd->features & AHD_WIDE) != 0) { + type = "Wide "; + } else { + type = "Single "; + } + len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", + speed, type, ahd->channel, ahd->our_id); + buf += len; + + sprintf(buf, "%s, %d SCBs", ahd->bus_description, + ahd->scb_data.maxhscbs); +} + +static const char *channel_strings[] = { + "Primary Low", + "Primary High", + "Secondary Low", + "Secondary High" +}; + +static const char *termstat_strings[] = { + "Terminated Correctly", + "Over Terminated", + "Under Terminated", + "Not Configured" +}; + +/***************************** Timer Facilities *******************************/ +static void +ahd_timer_reset(struct timer_list *timer, int usec) +{ + del_timer(timer); + timer->expires = jiffies + (usec * HZ)/1000000; + add_timer(timer); +} + +/* + * Start the board, ready for normal operation + */ +int +ahd_init(struct ahd_softc *ahd) +{ + uint8_t *next_vaddr; + dma_addr_t next_baddr; + size_t driver_data_size; + int i; + int error; + u_int warn_user; + uint8_t current_sensing; + uint8_t fstat; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + + ahd->stack_size = ahd_probe_stack_size(ahd); + ahd->saved_stack = kmalloc_array(ahd->stack_size, sizeof(uint16_t), + GFP_ATOMIC); + if (ahd->saved_stack == NULL) + return (ENOMEM); + + /* + * Verify that the compiler hasn't over-aggressively + * padded important structures. + */ + if (sizeof(struct hardware_scb) != 64) + panic("Hardware SCB size is incorrect"); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) + ahd->flags |= AHD_SEQUENCER_DEBUG; +#endif + + /* + * Default to allowing initiator operations. + */ + ahd->flags |= AHD_INITIATORROLE; + + /* + * Only allow target mode features if this unit has them enabled. + */ + if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) + ahd->features &= ~AHD_TARGETMODE; + + ahd->init_level++; + + /* + * DMA tag for our command fifos and other data in system memory + * the card's sequencer must be able to access. For initiator + * roles, we need to allocate space for the qoutfifo. When providing + * for the target mode role, we must additionally provide space for + * the incoming target command fifo. + */ + driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) + + sizeof(struct hardware_scb); + if ((ahd->features & AHD_TARGETMODE) != 0) + driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) + driver_data_size += PKT_OVERRUN_BUFSIZE; + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + driver_data_size, + /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &ahd->shared_data_dmat) != 0) { + return (ENOMEM); + } + + ahd->init_level++; + + /* Allocation of driver data */ + if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, + (void **)&ahd->shared_data_map.vaddr, + BUS_DMA_NOWAIT, + &ahd->shared_data_map.dmamap) != 0) { + return (ENOMEM); + } + + ahd->init_level++; + + /* And permanently map it in */ + ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, + ahd->shared_data_map.vaddr, driver_data_size, + ahd_dmamap_cb, &ahd->shared_data_map.physaddr, + /*flags*/0); + ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; + next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; + next_baddr = ahd->shared_data_map.physaddr + + AHD_QOUT_SIZE*sizeof(struct ahd_completion); + if ((ahd->features & AHD_TARGETMODE) != 0) { + ahd->targetcmds = (struct target_cmd *)next_vaddr; + next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); + next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); + } + + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { + ahd->overrun_buf = next_vaddr; + next_vaddr += PKT_OVERRUN_BUFSIZE; + next_baddr += PKT_OVERRUN_BUFSIZE; + } + + /* + * We need one SCB to serve as the "next SCB". Since the + * tag identifier in this SCB will never be used, there is + * no point in using a valid HSCB tag from an SCB pulled from + * the standard free pool. So, we allocate this "sentinel" + * specially from the DMA safe memory chunk used for the QOUTFIFO. + */ + ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; + ahd->next_queued_hscb_map = &ahd->shared_data_map; + ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); + + ahd->init_level++; + + /* Allocate SCB data now that buffer_dmat is initialized */ + if (ahd_init_scbdata(ahd) != 0) + return (ENOMEM); + + if ((ahd->flags & AHD_INITIATORROLE) == 0) + ahd->flags &= ~AHD_RESET_BUS_A; + + /* + * Before committing these settings to the chip, give + * the OSM one last chance to modify our configuration. + */ + ahd_platform_init(ahd); + + /* Bring up the chip. */ + ahd_chip_init(ahd); + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + + if ((ahd->flags & AHD_CURRENT_SENSING) == 0) + goto init_done; + + /* + * Verify termination based on current draw and + * warn user if the bus is over/under terminated. + */ + error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, + CURSENSE_ENB); + if (error != 0) { + printk("%s: current sensing timeout 1\n", ahd_name(ahd)); + goto init_done; + } + for (i = 20, fstat = FLX_FSTAT_BUSY; + (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { + error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); + if (error != 0) { + printk("%s: current sensing timeout 2\n", + ahd_name(ahd)); + goto init_done; + } + } + if (i == 0) { + printk("%s: Timedout during current-sensing test\n", + ahd_name(ahd)); + goto init_done; + } + + /* Latch Current Sensing status. */ + error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, ¤t_sensing); + if (error != 0) { + printk("%s: current sensing timeout 3\n", ahd_name(ahd)); + goto init_done; + } + + /* Diable current sensing. */ + ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { + printk("%s: current_sensing == 0x%x\n", + ahd_name(ahd), current_sensing); + } +#endif + warn_user = 0; + for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { + u_int term_stat; + + term_stat = (current_sensing & FLX_CSTAT_MASK); + switch (term_stat) { + case FLX_CSTAT_OVER: + case FLX_CSTAT_UNDER: + warn_user++; + fallthrough; + case FLX_CSTAT_INVALID: + case FLX_CSTAT_OKAY: + if (warn_user == 0 && bootverbose == 0) + break; + printk("%s: %s Channel %s\n", ahd_name(ahd), + channel_strings[i], termstat_strings[term_stat]); + break; + } + } + if (warn_user) { + printk("%s: WARNING. Termination is not configured correctly.\n" + "%s: WARNING. SCSI bus operations may FAIL.\n", + ahd_name(ahd), ahd_name(ahd)); + } +init_done: + ahd_restart(ahd); + ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); + return (0); +} + +/* + * (Re)initialize chip state after a chip reset. + */ +static void +ahd_chip_init(struct ahd_softc *ahd) +{ + uint32_t busaddr; + u_int sxfrctl1; + u_int scsiseq_template; + u_int wait; + u_int i; + u_int target; + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + /* + * Take the LED out of diagnostic mode + */ + ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); + + /* + * Return HS_MAILBOX to its default value. + */ + ahd->hs_mailbox = 0; + ahd_outb(ahd, HS_MAILBOX, 0); + + /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ + ahd_outb(ahd, IOWNID, ahd->our_id); + ahd_outb(ahd, TOWNID, ahd->our_id); + sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; + sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; + if ((ahd->bugs & AHD_LONG_SETIMO_BUG) + && (ahd->seltime != STIMESEL_MIN)) { + /* + * The selection timer duration is twice as long + * as it should be. Halve it by adding "1" to + * the user specified setting. + */ + sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; + } else { + sxfrctl1 |= ahd->seltime; + } + + ahd_outb(ahd, SXFRCTL0, DFON); + ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); + ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); + + /* + * Now that termination is set, wait for up + * to 500ms for our transceivers to settle. If + * the adapter does not have a cable attached, + * the transceivers may never settle, so don't + * complain if we fail here. + */ + for (wait = 10000; + (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; + wait--) + ahd_delay(100); + + /* Clear any false bus resets due to the transceivers settling */ + ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + + /* Initialize mode specific S/G state. */ + for (i = 0; i < 2; i++) { + ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); + ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); + ahd_outb(ahd, SG_STATE, 0); + ahd_outb(ahd, CLRSEQINTSRC, 0xFF); + ahd_outb(ahd, SEQIMODE, + ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT + |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); + } + + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); + ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); + ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); + ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); + if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { + ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); + } else { + ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); + } + ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); + if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) + /* + * Do not issue a target abort when a split completion + * error occurs. Let our PCIX interrupt handler deal + * with it instead. H2A4 Razor #625 + */ + ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); + + if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) + ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); + + /* + * Tweak IOCELL settings. + */ + if ((ahd->flags & AHD_HP_BOARD) != 0) { + for (i = 0; i < NUMDSPS; i++) { + ahd_outb(ahd, DSPSELECT, i); + ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); + } +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), + WRTBIASCTL_HP_DEFAULT); +#endif + } + ahd_setup_iocell_workaround(ahd); + + /* + * Enable LQI Manager interrupts. + */ + ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT + | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI + | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); + ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); + /* + * We choose to have the sequencer catch LQOPHCHGINPKT errors + * manually for the command phase at the start of a packetized + * selection case. ENLQOBUSFREE should be made redundant by + * the BUSFREE interrupt, but it seems that some LQOBUSFREE + * events fail to assert the BUSFREE interrupt so we must + * also enable LQOBUSFREE interrupts. + */ + ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); + + /* + * Setup sequencer interrupt handlers. + */ + ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); + ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); + + /* + * Setup SCB Offset registers. + */ + if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { + ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, + pkt_long_lun)); + } else { + ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); + } + ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); + ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); + ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); + ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, + shared_data.idata.cdb)); + ahd_outb(ahd, QNEXTPTR, + offsetof(struct hardware_scb, next_hscb_busaddr)); + ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); + ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); + if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { + ahd_outb(ahd, LUNLEN, + sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); + } else { + ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); + } + ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); + ahd_outb(ahd, MAXCMD, 0xFF); + ahd_outb(ahd, SCBAUTOPTR, + AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); + + /* We haven't been enabled for target mode yet. */ + ahd_outb(ahd, MULTARGID, 0); + ahd_outb(ahd, MULTARGID + 1, 0); + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + /* Initialize the negotiation table. */ + if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { + /* + * Clear the spare bytes in the neg table to avoid + * spurious parity errors. + */ + for (target = 0; target < AHD_NUM_TARGETS; target++) { + ahd_outb(ahd, NEGOADDR, target); + ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); + for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) + ahd_outb(ahd, ANNEXDAT, 0); + } + } + for (target = 0; target < AHD_NUM_TARGETS; target++) { + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + target, &tstate); + ahd_compile_devinfo(&devinfo, ahd->our_id, + target, CAM_LUN_WILDCARD, + 'A', ROLE_INITIATOR); + ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); + } + + ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + +#ifdef NEEDS_MORE_TESTING + /* + * Always enable abort on incoming L_Qs if this feature is + * supported. We use this to catch invalid SCB references. + */ + if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) + ahd_outb(ahd, LQCTL1, ABORTPENDING); + else +#endif + ahd_outb(ahd, LQCTL1, 0); + + /* All of our queues are empty */ + ahd->qoutfifonext = 0; + ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; + ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); + for (i = 0; i < AHD_QOUT_SIZE; i++) + ahd->qoutfifo[i].valid_tag = 0; + ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); + + ahd->qinfifonext = 0; + for (i = 0; i < AHD_QIN_SIZE; i++) + ahd->qinfifo[i] = SCB_LIST_NULL; + + if ((ahd->features & AHD_TARGETMODE) != 0) { + /* All target command blocks start out invalid. */ + for (i = 0; i < AHD_TMODE_CMDS; i++) + ahd->targetcmds[i].cmd_valid = 0; + ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); + ahd->tqinfifonext = 1; + ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); + ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); + } + + /* Initialize Scratch Ram. */ + ahd_outb(ahd, SEQ_FLAGS, 0); + ahd_outb(ahd, SEQ_FLAGS2, 0); + + /* We don't have any waiting selections */ + ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); + ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); + ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); + for (i = 0; i < AHD_NUM_TARGETS; i++) + ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); + + /* + * Nobody is waiting to be DMAed into the QOUTFIFO. + */ + ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); + + /* + * The Freeze Count is 0. + */ + ahd->qfreeze_cnt = 0; + ahd_outw(ahd, QFREEZE_COUNT, 0); + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); + + /* + * Tell the sequencer where it can find our arrays in memory. + */ + busaddr = ahd->shared_data_map.physaddr; + ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); + ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); + + /* + * Setup the allowed SCSI Sequences based on operational mode. + * If we are a target, we'll enable select in operations once + * we've had a lun enabled. + */ + scsiseq_template = ENAUTOATNP; + if ((ahd->flags & AHD_INITIATORROLE) != 0) + scsiseq_template |= ENRSELI; + ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); + + /* There are no busy SCBs yet. */ + for (target = 0; target < AHD_NUM_TARGETS; target++) { + int lun; + + for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) + ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); + } + + /* + * Initialize the group code to command length table. + * Vendor Unique codes are set to 0 so we only capture + * the first byte of the cdb. These can be overridden + * when target mode is enabled. + */ + ahd_outb(ahd, CMDSIZE_TABLE, 5); + ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); + ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); + ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); + ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); + ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); + ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); + ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); + + /* Tell the sequencer of our initial queue positions */ + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); + ahd->qinfifonext = 0; + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); + ahd_set_hescb_qoff(ahd, 0); + ahd_set_snscb_qoff(ahd, 0); + ahd_set_sescb_qoff(ahd, 0); + ahd_set_sdscb_qoff(ahd, 0); + + /* + * Tell the sequencer which SCB will be the next one it receives. + */ + busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); + ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); + + /* + * Default to coalescing disabled. + */ + ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); + ahd_outw(ahd, CMDS_PENDING, 0); + ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, + ahd->int_coalescing_maxcmds, + ahd->int_coalescing_mincmds); + ahd_enable_coalescing(ahd, FALSE); + + ahd_loadseq(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + if (ahd->features & AHD_AIC79XXB_SLOWCRC) { + u_int negodat3 = ahd_inb(ahd, NEGCONOPTS); + + negodat3 |= ENSLOWCRC; + ahd_outb(ahd, NEGCONOPTS, negodat3); + negodat3 = ahd_inb(ahd, NEGCONOPTS); + if (!(negodat3 & ENSLOWCRC)) + printk("aic79xx: failed to set the SLOWCRC bit\n"); + else + printk("aic79xx: SLOWCRC bit set\n"); + } +} + +/* + * Setup default device and controller settings. + * This should only be called if our probe has + * determined that no configuration data is available. + */ +int +ahd_default_config(struct ahd_softc *ahd) +{ + int targ; + + ahd->our_id = 7; + + /* + * Allocate a tstate to house information for our + * initiator presence on the bus as well as the user + * data for any target mode initiator. + */ + if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { + printk("%s: unable to allocate ahd_tmode_tstate. " + "Failing attach\n", ahd_name(ahd)); + return (ENOMEM); + } + + for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + uint16_t target_mask; + + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + targ, &tstate); + /* + * We support SPC2 and SPI4. + */ + tinfo->user.protocol_version = 4; + tinfo->user.transport_version = 4; + + target_mask = 0x01 << targ; + ahd->user_discenable |= target_mask; + tstate->discenable |= target_mask; + ahd->user_tagenable |= target_mask; +#ifdef AHD_FORCE_160 + tinfo->user.period = AHD_SYNCRATE_DT; +#else + tinfo->user.period = AHD_SYNCRATE_160; +#endif + tinfo->user.offset = MAX_OFFSET; + tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM + | MSG_EXT_PPR_WR_FLOW + | MSG_EXT_PPR_HOLD_MCS + | MSG_EXT_PPR_IU_REQ + | MSG_EXT_PPR_QAS_REQ + | MSG_EXT_PPR_DT_REQ; + if ((ahd->features & AHD_RTI) != 0) + tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; + + tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; + + /* + * Start out Async/Narrow/Untagged and with + * conservative protocol support. + */ + tinfo->goal.protocol_version = 2; + tinfo->goal.transport_version = 2; + tinfo->curr.protocol_version = 2; + tinfo->curr.transport_version = 2; + ahd_compile_devinfo(&devinfo, ahd->our_id, + targ, CAM_LUN_WILDCARD, + 'A', ROLE_INITIATOR); + tstate->tagenable &= ~target_mask; + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, + /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + } + return (0); +} + +/* + * Parse device configuration information. + */ +int +ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) +{ + int targ; + int max_targ; + + max_targ = sc->max_targets & CFMAXTARG; + ahd->our_id = sc->brtime_id & CFSCSIID; + + /* + * Allocate a tstate to house information for our + * initiator presence on the bus as well as the user + * data for any target mode initiator. + */ + if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { + printk("%s: unable to allocate ahd_tmode_tstate. " + "Failing attach\n", ahd_name(ahd)); + return (ENOMEM); + } + + for (targ = 0; targ < max_targ; targ++) { + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_transinfo *user_tinfo; + struct ahd_tmode_tstate *tstate; + uint16_t target_mask; + + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + targ, &tstate); + user_tinfo = &tinfo->user; + + /* + * We support SPC2 and SPI4. + */ + tinfo->user.protocol_version = 4; + tinfo->user.transport_version = 4; + + target_mask = 0x01 << targ; + ahd->user_discenable &= ~target_mask; + tstate->discenable &= ~target_mask; + ahd->user_tagenable &= ~target_mask; + if (sc->device_flags[targ] & CFDISC) { + tstate->discenable |= target_mask; + ahd->user_discenable |= target_mask; + ahd->user_tagenable |= target_mask; + } else { + /* + * Cannot be packetized without disconnection. + */ + sc->device_flags[targ] &= ~CFPACKETIZED; + } + + user_tinfo->ppr_options = 0; + user_tinfo->period = (sc->device_flags[targ] & CFXFER); + if (user_tinfo->period < CFXFER_ASYNC) { + if (user_tinfo->period <= AHD_PERIOD_10MHz) + user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; + user_tinfo->offset = MAX_OFFSET; + } else { + user_tinfo->offset = 0; + user_tinfo->period = AHD_ASYNC_XFER_PERIOD; + } +#ifdef AHD_FORCE_160 + if (user_tinfo->period <= AHD_SYNCRATE_160) + user_tinfo->period = AHD_SYNCRATE_DT; +#endif + + if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { + user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM + | MSG_EXT_PPR_WR_FLOW + | MSG_EXT_PPR_HOLD_MCS + | MSG_EXT_PPR_IU_REQ; + if ((ahd->features & AHD_RTI) != 0) + user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; + } + + if ((sc->device_flags[targ] & CFQAS) != 0) + user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; + + if ((sc->device_flags[targ] & CFWIDEB) != 0) + user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; + else + user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, + user_tinfo->period, user_tinfo->offset, + user_tinfo->ppr_options); +#endif + /* + * Start out Async/Narrow/Untagged and with + * conservative protocol support. + */ + tstate->tagenable &= ~target_mask; + tinfo->goal.protocol_version = 2; + tinfo->goal.transport_version = 2; + tinfo->curr.protocol_version = 2; + tinfo->curr.transport_version = 2; + ahd_compile_devinfo(&devinfo, ahd->our_id, + targ, CAM_LUN_WILDCARD, + 'A', ROLE_INITIATOR); + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, + /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + } + + ahd->flags &= ~AHD_SPCHK_ENB_A; + if (sc->bios_control & CFSPARITY) + ahd->flags |= AHD_SPCHK_ENB_A; + + ahd->flags &= ~AHD_RESET_BUS_A; + if (sc->bios_control & CFRESETB) + ahd->flags |= AHD_RESET_BUS_A; + + ahd->flags &= ~AHD_EXTENDED_TRANS_A; + if (sc->bios_control & CFEXTEND) + ahd->flags |= AHD_EXTENDED_TRANS_A; + + ahd->flags &= ~AHD_BIOS_ENABLED; + if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) + ahd->flags |= AHD_BIOS_ENABLED; + + ahd->flags &= ~AHD_STPWLEVEL_A; + if ((sc->adapter_control & CFSTPWLEVEL) != 0) + ahd->flags |= AHD_STPWLEVEL_A; + + return (0); +} + +/* + * Parse device configuration information. + */ +int +ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) +{ + int error; + + error = ahd_verify_vpd_cksum(vpd); + if (error == 0) + return (EINVAL); + if ((vpd->bios_flags & VPDBOOTHOST) != 0) + ahd->flags |= AHD_BOOT_CHANNEL; + return (0); +} + +void +ahd_intr_enable(struct ahd_softc *ahd, int enable) +{ + u_int hcntrl; + + hcntrl = ahd_inb(ahd, HCNTRL); + hcntrl &= ~INTEN; + ahd->pause &= ~INTEN; + ahd->unpause &= ~INTEN; + if (enable) { + hcntrl |= INTEN; + ahd->pause |= INTEN; + ahd->unpause |= INTEN; + } + ahd_outb(ahd, HCNTRL, hcntrl); +} + +static void +ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, + u_int mincmds) +{ + if (timer > AHD_TIMER_MAX_US) + timer = AHD_TIMER_MAX_US; + ahd->int_coalescing_timer = timer; + + if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) + maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; + if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) + mincmds = AHD_INT_COALESCING_MINCMDS_MAX; + ahd->int_coalescing_maxcmds = maxcmds; + ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); + ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); + ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); +} + +static void +ahd_enable_coalescing(struct ahd_softc *ahd, int enable) +{ + + ahd->hs_mailbox &= ~ENINT_COALESCE; + if (enable) + ahd->hs_mailbox |= ENINT_COALESCE; + ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); + ahd_flush_device_writes(ahd); + ahd_run_qoutfifo(ahd); +} + +/* + * Ensure that the card is paused in a location + * outside of all critical sections and that all + * pending work is completed prior to returning. + * This routine should only be called from outside + * an interrupt context. + */ +void +ahd_pause_and_flushwork(struct ahd_softc *ahd) +{ + u_int intstat; + u_int maxloops; + + maxloops = 1000; + ahd->flags |= AHD_ALL_INTERRUPTS; + ahd_pause(ahd); + /* + * Freeze the outgoing selections. We do this only + * until we are safely paused without further selections + * pending. + */ + ahd->qfreeze_cnt--; + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); + ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); + do { + + ahd_unpause(ahd); + /* + * Give the sequencer some time to service + * any active selections. + */ + ahd_delay(500); + + ahd_intr(ahd); + ahd_pause(ahd); + intstat = ahd_inb(ahd, INTSTAT); + if ((intstat & INT_PEND) == 0) { + ahd_clear_critical_section(ahd); + intstat = ahd_inb(ahd, INTSTAT); + } + } while (--maxloops + && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) + && ((intstat & INT_PEND) != 0 + || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 + || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); + + if (maxloops == 0) { + printk("Infinite interrupt loop, INTSTAT = %x", + ahd_inb(ahd, INTSTAT)); + } + ahd->qfreeze_cnt++; + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); + + ahd_flush_qoutfifo(ahd); + + ahd->flags &= ~AHD_ALL_INTERRUPTS; +} + +int __maybe_unused +ahd_suspend(struct ahd_softc *ahd) +{ + ahd_pause_and_flushwork(ahd); + + if (LIST_FIRST(&ahd->pending_scbs) != NULL) { + ahd_unpause(ahd); + return (EBUSY); + } + ahd_shutdown(ahd); + return (0); +} + +void __maybe_unused +ahd_resume(struct ahd_softc *ahd) +{ + ahd_reset(ahd, /*reinit*/TRUE); + ahd_intr_enable(ahd, TRUE); + ahd_restart(ahd); +} + +/************************** Busy Target Table *********************************/ +/* + * Set SCBPTR to the SCB that contains the busy + * table entry for TCL. Return the offset into + * the SCB that contains the entry for TCL. + * saved_scbid is dereferenced and set to the + * scbid that should be restored once manipualtion + * of the TCL entry is complete. + */ +static inline u_int +ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) +{ + /* + * Index to the SCB that contains the busy entry. + */ + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + *saved_scbid = ahd_get_scbptr(ahd); + ahd_set_scbptr(ahd, TCL_LUN(tcl) + | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); + + /* + * And now calculate the SCB offset to the entry. + * Each entry is 2 bytes wide, hence the + * multiplication by 2. + */ + return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); +} + +/* + * Return the untagged transaction id for a given target/channel lun. + */ +static u_int +ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) +{ + u_int scbid; + u_int scb_offset; + u_int saved_scbptr; + + scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); + scbid = ahd_inw_scbram(ahd, scb_offset); + ahd_set_scbptr(ahd, saved_scbptr); + return (scbid); +} + +static void +ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) +{ + u_int scb_offset; + u_int saved_scbptr; + + scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); + ahd_outw(ahd, scb_offset, scbid); + ahd_set_scbptr(ahd, saved_scbptr); +} + +/************************** SCB and SCB queue management **********************/ +static int +ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, + char channel, int lun, u_int tag, role_t role) +{ + int targ = SCB_GET_TARGET(ahd, scb); + char chan = SCB_GET_CHANNEL(ahd, scb); + int slun = SCB_GET_LUN(scb); + int match; + + match = ((chan == channel) || (channel == ALL_CHANNELS)); + if (match != 0) + match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); + if (match != 0) + match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); + if (match != 0) { +#ifdef AHD_TARGET_MODE + int group; + + group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); + if (role == ROLE_INITIATOR) { + match = (group != XPT_FC_GROUP_TMODE) + && ((tag == SCB_GET_TAG(scb)) + || (tag == SCB_LIST_NULL)); + } else if (role == ROLE_TARGET) { + match = (group == XPT_FC_GROUP_TMODE) + && ((tag == scb->io_ctx->csio.tag_id) + || (tag == SCB_LIST_NULL)); + } +#else /* !AHD_TARGET_MODE */ + match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); +#endif /* AHD_TARGET_MODE */ + } + + return match; +} + +static void +ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) +{ + int target; + char channel; + int lun; + + target = SCB_GET_TARGET(ahd, scb); + lun = SCB_GET_LUN(scb); + channel = SCB_GET_CHANNEL(ahd, scb); + + ahd_search_qinfifo(ahd, target, channel, lun, + /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, + CAM_REQUEUE_REQ, SEARCH_COMPLETE); + + ahd_platform_freeze_devq(ahd, scb); +} + +void +ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) +{ + struct scb *prev_scb; + ahd_mode_state saved_modes; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + prev_scb = NULL; + if (ahd_qinfifo_count(ahd) != 0) { + u_int prev_tag; + u_int prev_pos; + + prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); + prev_tag = ahd->qinfifo[prev_pos]; + prev_scb = ahd_lookup_scb(ahd, prev_tag); + } + ahd_qinfifo_requeue(ahd, prev_scb, scb); + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); + ahd_restore_modes(ahd, saved_modes); +} + +static void +ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, + struct scb *scb) +{ + if (prev_scb == NULL) { + uint32_t busaddr; + + busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); + ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); + } else { + prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; + ahd_sync_scb(ahd, prev_scb, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + } + ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); + ahd->qinfifonext++; + scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; + ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); +} + +static int +ahd_qinfifo_count(struct ahd_softc *ahd) +{ + u_int qinpos; + u_int wrap_qinpos; + u_int wrap_qinfifonext; + + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + qinpos = ahd_get_snscb_qoff(ahd); + wrap_qinpos = AHD_QIN_WRAP(qinpos); + wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); + if (wrap_qinfifonext >= wrap_qinpos) + return (wrap_qinfifonext - wrap_qinpos); + else + return (wrap_qinfifonext + + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); +} + +static void +ahd_reset_cmds_pending(struct ahd_softc *ahd) +{ + struct scb *scb; + ahd_mode_state saved_modes; + u_int pending_cmds; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + + /* + * Don't count any commands as outstanding that the + * sequencer has already marked for completion. + */ + ahd_flush_qoutfifo(ahd); + + pending_cmds = 0; + LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { + pending_cmds++; + } + ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); + ahd_restore_modes(ahd, saved_modes); + ahd->flags &= ~AHD_UPDATE_PEND_CMDS; +} + +static void +ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) +{ + cam_status ostat; + cam_status cstat; + + ostat = ahd_get_transaction_status(scb); + if (ostat == CAM_REQ_INPROG) + ahd_set_transaction_status(scb, status); + cstat = ahd_get_transaction_status(scb); + if (cstat != CAM_REQ_CMP) + ahd_freeze_scb(scb); + ahd_done(ahd, scb); +} + +int +ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status, + ahd_search_action action) +{ + struct scb *scb; + struct scb *mk_msg_scb; + struct scb *prev_scb; + ahd_mode_state saved_modes; + u_int qinstart; + u_int qinpos; + u_int qintail; + u_int tid_next; + u_int tid_prev; + u_int scbid; + u_int seq_flags2; + u_int savedscbptr; + uint32_t busaddr; + int found; + int targets; + + /* Must be in CCHAN mode */ + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + + /* + * Halt any pending SCB DMA. The sequencer will reinitiate + * this dma if the qinfifo is not empty once we unpause. + */ + if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) + == (CCARREN|CCSCBEN|CCSCBDIR)) { + ahd_outb(ahd, CCSCBCTL, + ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); + while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) + ; + } + /* Determine sequencer's position in the qinfifo. */ + qintail = AHD_QIN_WRAP(ahd->qinfifonext); + qinstart = ahd_get_snscb_qoff(ahd); + qinpos = AHD_QIN_WRAP(qinstart); + found = 0; + prev_scb = NULL; + + if (action == SEARCH_PRINT) { + printk("qinstart = %d qinfifonext = %d\nQINFIFO:", + qinstart, ahd->qinfifonext); + } + + /* + * Start with an empty queue. Entries that are not chosen + * for removal will be re-added to the queue as we go. + */ + ahd->qinfifonext = qinstart; + busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); + ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); + + while (qinpos != qintail) { + scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); + if (scb == NULL) { + printk("qinpos = %d, SCB index = %d\n", + qinpos, ahd->qinfifo[qinpos]); + panic("Loop 1\n"); + } + + if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { + /* + * We found an scb that needs to be acted on. + */ + found++; + switch (action) { + case SEARCH_COMPLETE: + if ((scb->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB in qinfifo\n"); + ahd_done_with_status(ahd, scb, status); + fallthrough; + case SEARCH_REMOVE: + break; + case SEARCH_PRINT: + printk(" 0x%x", ahd->qinfifo[qinpos]); + fallthrough; + case SEARCH_COUNT: + ahd_qinfifo_requeue(ahd, prev_scb, scb); + prev_scb = scb; + break; + } + } else { + ahd_qinfifo_requeue(ahd, prev_scb, scb); + prev_scb = scb; + } + qinpos = AHD_QIN_WRAP(qinpos+1); + } + + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); + + if (action == SEARCH_PRINT) + printk("\nWAITING_TID_QUEUES:\n"); + + /* + * Search waiting for selection lists. We traverse the + * list of "their ids" waiting for selection and, if + * appropriate, traverse the SCBs of each "their id" + * looking for matches. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); + if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { + scbid = ahd_inw(ahd, MK_MESSAGE_SCB); + mk_msg_scb = ahd_lookup_scb(ahd, scbid); + } else + mk_msg_scb = NULL; + savedscbptr = ahd_get_scbptr(ahd); + tid_next = ahd_inw(ahd, WAITING_TID_HEAD); + tid_prev = SCB_LIST_NULL; + targets = 0; + for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { + u_int tid_head; + u_int tid_tail; + + targets++; + if (targets > AHD_NUM_TARGETS) + panic("TID LIST LOOP"); + + if (scbid >= ahd->scb_data.numscbs) { + printk("%s: Waiting TID List inconsistency. " + "SCB index == 0x%x, yet numscbs == 0x%x.", + ahd_name(ahd), scbid, ahd->scb_data.numscbs); + ahd_dump_card_state(ahd); + panic("for safety"); + } + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: SCB = 0x%x Not Active!\n", + ahd_name(ahd), scbid); + panic("Waiting TID List traversal\n"); + } + ahd_set_scbptr(ahd, scbid); + tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); + if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, + SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { + tid_prev = scbid; + continue; + } + + /* + * We found a list of scbs that needs to be searched. + */ + if (action == SEARCH_PRINT) + printk(" %d ( ", SCB_GET_TARGET(ahd, scb)); + tid_head = scbid; + found += ahd_search_scb_list(ahd, target, channel, + lun, tag, role, status, + action, &tid_head, &tid_tail, + SCB_GET_TARGET(ahd, scb)); + /* + * Check any MK_MESSAGE SCB that is still waiting to + * enter this target's waiting for selection queue. + */ + if (mk_msg_scb != NULL + && ahd_match_scb(ahd, mk_msg_scb, target, channel, + lun, tag, role)) { + + /* + * We found an scb that needs to be acted on. + */ + found++; + switch (action) { + case SEARCH_COMPLETE: + if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB pending MK_MSG\n"); + ahd_done_with_status(ahd, mk_msg_scb, status); + fallthrough; + case SEARCH_REMOVE: + { + u_int tail_offset; + + printk("Removing MK_MSG scb\n"); + + /* + * Reset our tail to the tail of the + * main per-target list. + */ + tail_offset = WAITING_SCB_TAILS + + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); + ahd_outw(ahd, tail_offset, tid_tail); + + seq_flags2 &= ~PENDING_MK_MESSAGE; + ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); + ahd_outw(ahd, CMDS_PENDING, + ahd_inw(ahd, CMDS_PENDING)-1); + mk_msg_scb = NULL; + break; + } + case SEARCH_PRINT: + printk(" 0x%x", SCB_GET_TAG(scb)); + fallthrough; + case SEARCH_COUNT: + break; + } + } + + if (mk_msg_scb != NULL + && SCBID_IS_NULL(tid_head) + && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, + SCB_LIST_NULL, ROLE_UNKNOWN)) { + + /* + * When removing the last SCB for a target + * queue with a pending MK_MESSAGE scb, we + * must queue the MK_MESSAGE scb. + */ + printk("Queueing mk_msg_scb\n"); + tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); + seq_flags2 &= ~PENDING_MK_MESSAGE; + ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); + mk_msg_scb = NULL; + } + if (tid_head != scbid) + ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); + if (!SCBID_IS_NULL(tid_head)) + tid_prev = tid_head; + if (action == SEARCH_PRINT) + printk(")\n"); + } + + /* Restore saved state. */ + ahd_set_scbptr(ahd, savedscbptr); + ahd_restore_modes(ahd, saved_modes); + return (found); +} + +static int +ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status, + ahd_search_action action, u_int *list_head, + u_int *list_tail, u_int tid) +{ + struct scb *scb; + u_int scbid; + u_int next; + u_int prev; + int found; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + found = 0; + prev = SCB_LIST_NULL; + next = *list_head; + *list_tail = SCB_LIST_NULL; + for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { + if (scbid >= ahd->scb_data.numscbs) { + printk("%s:SCB List inconsistency. " + "SCB == 0x%x, yet numscbs == 0x%x.", + ahd_name(ahd), scbid, ahd->scb_data.numscbs); + ahd_dump_card_state(ahd); + panic("for safety"); + } + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printk("%s: SCB = %d Not Active!\n", + ahd_name(ahd), scbid); + panic("Waiting List traversal\n"); + } + ahd_set_scbptr(ahd, scbid); + *list_tail = scbid; + next = ahd_inw_scbram(ahd, SCB_NEXT); + if (ahd_match_scb(ahd, scb, target, channel, + lun, SCB_LIST_NULL, role) == 0) { + prev = scbid; + continue; + } + found++; + switch (action) { + case SEARCH_COMPLETE: + if ((scb->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB in Waiting List\n"); + ahd_done_with_status(ahd, scb, status); + fallthrough; + case SEARCH_REMOVE: + ahd_rem_wscb(ahd, scbid, prev, next, tid); + *list_tail = prev; + if (SCBID_IS_NULL(prev)) + *list_head = next; + break; + case SEARCH_PRINT: + printk("0x%x ", scbid); + fallthrough; + case SEARCH_COUNT: + prev = scbid; + break; + } + if (found > AHD_SCB_MAX) + panic("SCB LIST LOOP"); + } + if (action == SEARCH_COMPLETE + || action == SEARCH_REMOVE) + ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); + return (found); +} + +static void +ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, + u_int tid_cur, u_int tid_next) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + + if (SCBID_IS_NULL(tid_cur)) { + + /* Bypass current TID list */ + if (SCBID_IS_NULL(tid_prev)) { + ahd_outw(ahd, WAITING_TID_HEAD, tid_next); + } else { + ahd_set_scbptr(ahd, tid_prev); + ahd_outw(ahd, SCB_NEXT2, tid_next); + } + if (SCBID_IS_NULL(tid_next)) + ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); + } else { + + /* Stitch through tid_cur */ + if (SCBID_IS_NULL(tid_prev)) { + ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); + } else { + ahd_set_scbptr(ahd, tid_prev); + ahd_outw(ahd, SCB_NEXT2, tid_cur); + } + ahd_set_scbptr(ahd, tid_cur); + ahd_outw(ahd, SCB_NEXT2, tid_next); + + if (SCBID_IS_NULL(tid_next)) + ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); + } +} + +/* + * Manipulate the waiting for selection list and return the + * scb that follows the one that we remove. + */ +static u_int +ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, + u_int prev, u_int next, u_int tid) +{ + u_int tail_offset; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + if (!SCBID_IS_NULL(prev)) { + ahd_set_scbptr(ahd, prev); + ahd_outw(ahd, SCB_NEXT, next); + } + + /* + * SCBs that have MK_MESSAGE set in them may + * cause the tail pointer to be updated without + * setting the next pointer of the previous tail. + * Only clear the tail if the removed SCB was + * the tail. + */ + tail_offset = WAITING_SCB_TAILS + (2 * tid); + if (SCBID_IS_NULL(next) + && ahd_inw(ahd, tail_offset) == scbid) + ahd_outw(ahd, tail_offset, prev); + + ahd_add_scb_to_free_list(ahd, scbid); + return (next); +} + +/* + * Add the SCB as selected by SCBPTR onto the on chip list of + * free hardware SCBs. This list is empty/unused if we are not + * performing SCB paging. + */ +static void +ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) +{ +/* XXX Need some other mechanism to designate "free". */ + /* + * Invalidate the tag so that our abort + * routines don't think it's active. + ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); + */ +} + +/******************************** Error Handling ******************************/ +/* + * Abort all SCBs that match the given description (target/channel/lun/tag), + * setting their status to the passed in status if the status has not already + * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer + * is paused before it is called. + */ +static int +ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status) +{ + struct scb *scbp; + struct scb *scbp_next; + u_int i, j; + u_int maxtarget; + u_int minlun; + u_int maxlun; + int found; + ahd_mode_state saved_modes; + + /* restore this when we're done */ + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, + role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); + + /* + * Clean out the busy target table for any untagged commands. + */ + i = 0; + maxtarget = 16; + if (target != CAM_TARGET_WILDCARD) { + i = target; + if (channel == 'B') + i += 8; + maxtarget = i + 1; + } + + if (lun == CAM_LUN_WILDCARD) { + minlun = 0; + maxlun = AHD_NUM_LUNS_NONPKT; + } else if (lun >= AHD_NUM_LUNS_NONPKT) { + minlun = maxlun = 0; + } else { + minlun = lun; + maxlun = lun + 1; + } + + if (role != ROLE_TARGET) { + for (;i < maxtarget; i++) { + for (j = minlun;j < maxlun; j++) { + u_int scbid; + u_int tcl; + + tcl = BUILD_TCL_RAW(i, 'A', j); + scbid = ahd_find_busy_tcl(ahd, tcl); + scbp = ahd_lookup_scb(ahd, scbid); + if (scbp == NULL + || ahd_match_scb(ahd, scbp, target, channel, + lun, tag, role) == 0) + continue; + ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); + } + } + } + + /* + * Don't abort commands that have already completed, + * but haven't quite made it up to the host yet. + */ + ahd_flush_qoutfifo(ahd); + + /* + * Go through the pending CCB list and look for + * commands for this target that are still active. + * These are other tagged commands that were + * disconnected when the reset occurred. + */ + scbp_next = LIST_FIRST(&ahd->pending_scbs); + while (scbp_next != NULL) { + scbp = scbp_next; + scbp_next = LIST_NEXT(scbp, pending_links); + if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { + cam_status ostat; + + ostat = ahd_get_transaction_status(scbp); + if (ostat == CAM_REQ_INPROG) + ahd_set_transaction_status(scbp, status); + if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) + ahd_freeze_scb(scbp); + if ((scbp->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB on pending list\n"); + ahd_done(ahd, scbp); + found++; + } + } + ahd_restore_modes(ahd, saved_modes); + ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); + ahd->flags |= AHD_UPDATE_PEND_CMDS; + return found; +} + +static void +ahd_reset_current_bus(struct ahd_softc *ahd) +{ + uint8_t scsiseq; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); + scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); + ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); + ahd_flush_device_writes(ahd); + ahd_delay(AHD_BUSRESET_DELAY); + /* Turn off the bus reset */ + ahd_outb(ahd, SCSISEQ0, scsiseq); + ahd_flush_device_writes(ahd); + ahd_delay(AHD_BUSRESET_DELAY); + if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { + /* + * 2A Razor #474 + * Certain chip state is not cleared for + * SCSI bus resets that we initiate, so + * we must reset the chip. + */ + ahd_reset(ahd, /*reinit*/TRUE); + ahd_intr_enable(ahd, /*enable*/TRUE); + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + } + + ahd_clear_intstat(ahd); +} + +int +ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) +{ + struct ahd_devinfo caminfo; + u_int initiator; + u_int target; + u_int max_scsiid; + int found; + u_int fifo; + u_int next_fifo; + uint8_t scsiseq; + + /* + * Check if the last bus reset is cleared + */ + if (ahd->flags & AHD_BUS_RESET_ACTIVE) { + printk("%s: bus reset still active\n", + ahd_name(ahd)); + return 0; + } + ahd->flags |= AHD_BUS_RESET_ACTIVE; + + ahd->pending_device = NULL; + + ahd_compile_devinfo(&caminfo, + CAM_TARGET_WILDCARD, + CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD, + channel, ROLE_UNKNOWN); + ahd_pause(ahd); + + /* Make sure the sequencer is in a safe location. */ + ahd_clear_critical_section(ahd); + + /* + * Run our command complete fifos to ensure that we perform + * completion processing on any commands that 'completed' + * before the reset occurred. + */ + ahd_run_qoutfifo(ahd); +#ifdef AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0) { + ahd_run_tqinfifo(ahd, /*paused*/TRUE); + } +#endif + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + /* + * Disable selections so no automatic hardware + * functions will modify chip state. + */ + ahd_outb(ahd, SCSISEQ0, 0); + ahd_outb(ahd, SCSISEQ1, 0); + + /* + * Safely shut down our DMA engines. Always start with + * the FIFO that is not currently active (if any are + * actively connected). + */ + next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; + if (next_fifo > CURRFIFO_1) + /* If disconneced, arbitrarily start with FIFO1. */ + next_fifo = fifo = 0; + do { + next_fifo ^= CURRFIFO_1; + ahd_set_modes(ahd, next_fifo, next_fifo); + ahd_outb(ahd, DFCNTRL, + ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); + while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) + ahd_delay(10); + /* + * Set CURRFIFO to the now inactive channel. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, DFFSTAT, next_fifo); + } while (next_fifo != fifo); + + /* + * Reset the bus if we are initiating this reset + */ + ahd_clear_msg_state(ahd); + ahd_outb(ahd, SIMODE1, + ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); + + if (initiate_reset) + ahd_reset_current_bus(ahd); + + ahd_clear_intstat(ahd); + + /* + * Clean up all the state information for the + * pending transactions on this bus. + */ + found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, + CAM_LUN_WILDCARD, SCB_LIST_NULL, + ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); + + /* + * Cleanup anything left in the FIFOs. + */ + ahd_clear_fifo(ahd, 0); + ahd_clear_fifo(ahd, 1); + + /* + * Clear SCSI interrupt status + */ + ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); + + /* + * Reenable selections + */ + ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); + scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE); + ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); + + max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; +#ifdef AHD_TARGET_MODE + /* + * Send an immediate notify ccb to all target more peripheral + * drivers affected by this action. + */ + for (target = 0; target <= max_scsiid; target++) { + struct ahd_tmode_tstate* tstate; + u_int lun; + + tstate = ahd->enabled_targets[target]; + if (tstate == NULL) + continue; + for (lun = 0; lun < AHD_NUM_LUNS; lun++) { + struct ahd_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[lun]; + if (lstate == NULL) + continue; + + ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, + EVENT_TYPE_BUS_RESET, /*arg*/0); + ahd_send_lstate_events(ahd, lstate); + } + } +#endif + /* + * Revert to async/narrow transfers until we renegotiate. + */ + for (target = 0; target <= max_scsiid; target++) { + + if (ahd->enabled_targets[target] == NULL) + continue; + for (initiator = 0; initiator <= max_scsiid; initiator++) { + struct ahd_devinfo devinfo; + + ahd_compile_devinfo(&devinfo, target, initiator, + CAM_LUN_WILDCARD, + 'A', ROLE_UNKNOWN); + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR, /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHD_TRANS_CUR, /*paused*/TRUE); + } + } + + /* Notify the XPT that a bus reset occurred */ + ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD, AC_BUS_RESET); + + ahd_restart(ahd); + + return (found); +} + +/**************************** Statistics Processing ***************************/ +static void +ahd_stat_timer(struct timer_list *t) +{ + struct ahd_softc *ahd = from_timer(ahd, t, stat_timer); + u_long s; + int enint_coal; + + ahd_lock(ahd, &s); + + enint_coal = ahd->hs_mailbox & ENINT_COALESCE; + if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) + enint_coal |= ENINT_COALESCE; + else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) + enint_coal &= ~ENINT_COALESCE; + + if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { + ahd_enable_coalescing(ahd, enint_coal); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) + printk("%s: Interrupt coalescing " + "now %sabled. Cmds %d\n", + ahd_name(ahd), + (enint_coal & ENINT_COALESCE) ? "en" : "dis", + ahd->cmdcmplt_total); +#endif + } + + ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); + ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; + ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; + ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); + ahd_unlock(ahd, &s); +} + +/****************************** Status Processing *****************************/ + +static void +ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) +{ + struct hardware_scb *hscb; + int paused; + + /* + * The sequencer freezes its select-out queue + * anytime a SCSI status error occurs. We must + * handle the error and increment our qfreeze count + * to allow the sequencer to continue. We don't + * bother clearing critical sections here since all + * operations are on data structures that the sequencer + * is not touching once the queue is frozen. + */ + hscb = scb->hscb; + + if (ahd_is_paused(ahd)) { + paused = 1; + } else { + paused = 0; + ahd_pause(ahd); + } + + /* Freeze the queue until the client sees the error. */ + ahd_freeze_devq(ahd, scb); + ahd_freeze_scb(scb); + ahd->qfreeze_cnt++; + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); + + if (paused == 0) + ahd_unpause(ahd); + + /* Don't want to clobber the original sense code */ + if ((scb->flags & SCB_SENSE) != 0) { + /* + * Clear the SCB_SENSE Flag and perform + * a normal command completion. + */ + scb->flags &= ~SCB_SENSE; + ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); + ahd_done(ahd, scb); + return; + } + ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); + ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); + switch (hscb->shared_data.istatus.scsi_status) { + case STATUS_PKT_SENSE: + { + struct scsi_status_iu_header *siu; + + ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); + siu = (struct scsi_status_iu_header *)scb->sense_data; + ahd_set_scsi_status(scb, siu->status); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_SENSE) != 0) { + ahd_print_path(ahd, scb); + printk("SCB 0x%x Received PKT Status of 0x%x\n", + SCB_GET_TAG(scb), siu->status); + printk("\tflags = 0x%x, sense len = 0x%x, " + "pktfail = 0x%x\n", + siu->flags, scsi_4btoul(siu->sense_length), + scsi_4btoul(siu->pkt_failures_length)); + } +#endif + if ((siu->flags & SIU_RSPVALID) != 0) { + ahd_print_path(ahd, scb); + if (scsi_4btoul(siu->pkt_failures_length) < 4) { + printk("Unable to parse pkt_failures\n"); + } else { + + switch (SIU_PKTFAIL_CODE(siu)) { + case SIU_PFC_NONE: + printk("No packet failure found\n"); + break; + case SIU_PFC_CIU_FIELDS_INVALID: + printk("Invalid Command IU Field\n"); + break; + case SIU_PFC_TMF_NOT_SUPPORTED: + printk("TMF not supported\n"); + break; + case SIU_PFC_TMF_FAILED: + printk("TMF failed\n"); + break; + case SIU_PFC_INVALID_TYPE_CODE: + printk("Invalid L_Q Type code\n"); + break; + case SIU_PFC_ILLEGAL_REQUEST: + printk("Illegal request\n"); + break; + default: + break; + } + } + if (siu->status == SAM_STAT_GOOD) + ahd_set_transaction_status(scb, + CAM_REQ_CMP_ERR); + } + if ((siu->flags & SIU_SNSVALID) != 0) { + scb->flags |= SCB_PKT_SENSE; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_SENSE) != 0) + printk("Sense data available\n"); +#endif + } + ahd_done(ahd, scb); + break; + } + case SAM_STAT_COMMAND_TERMINATED: + case SAM_STAT_CHECK_CONDITION: + { + struct ahd_devinfo devinfo; + struct ahd_dma_seg *sg; + struct scsi_sense *sc; + struct ahd_initiator_tinfo *targ_info; + struct ahd_tmode_tstate *tstate; + struct ahd_transinfo *tinfo; +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_SENSE) { + ahd_print_path(ahd, scb); + printk("SCB %d: requests Check Status\n", + SCB_GET_TAG(scb)); + } +#endif + + if (ahd_perform_autosense(scb) == 0) + break; + + ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), + SCB_GET_TARGET(ahd, scb), + SCB_GET_LUN(scb), + SCB_GET_CHANNEL(ahd, scb), + ROLE_INITIATOR); + targ_info = ahd_fetch_transinfo(ahd, + devinfo.channel, + devinfo.our_scsiid, + devinfo.target, + &tstate); + tinfo = &targ_info->curr; + sg = scb->sg_list; + sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; + /* + * Save off the residual if there is one. + */ + ahd_update_residual(ahd, scb); +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_SENSE) { + ahd_print_path(ahd, scb); + printk("Sending Sense\n"); + } +#endif + scb->sg_count = 0; + sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), + ahd_get_sense_bufsize(ahd, scb), + /*last*/TRUE); + sc->opcode = REQUEST_SENSE; + sc->byte2 = 0; + if (tinfo->protocol_version <= SCSI_REV_2 + && SCB_GET_LUN(scb) < 8) + sc->byte2 = SCB_GET_LUN(scb) << 5; + sc->unused[0] = 0; + sc->unused[1] = 0; + sc->length = ahd_get_sense_bufsize(ahd, scb); + sc->control = 0; + + /* + * We can't allow the target to disconnect. + * This will be an untagged transaction and + * having the target disconnect will make this + * transaction indestinguishable from outstanding + * tagged transactions. + */ + hscb->control = 0; + + /* + * This request sense could be because the + * the device lost power or in some other + * way has lost our transfer negotiations. + * Renegotiate if appropriate. Unit attention + * errors will be reported before any data + * phases occur. + */ + if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { + ahd_update_neg_request(ahd, &devinfo, + tstate, targ_info, + AHD_NEG_IF_NON_ASYNC); + } + if (tstate->auto_negotiate & devinfo.target_mask) { + hscb->control |= MK_MESSAGE; + scb->flags &= + ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); + scb->flags |= SCB_AUTO_NEGOTIATE; + } + hscb->cdb_len = sizeof(*sc); + ahd_setup_data_scb(ahd, scb); + scb->flags |= SCB_SENSE; + ahd_queue_scb(ahd, scb); + break; + } + case SAM_STAT_GOOD: + printk("%s: Interrupted for status of 0???\n", + ahd_name(ahd)); + fallthrough; + default: + ahd_done(ahd, scb); + break; + } +} + +static void +ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) +{ + if (scb->hscb->shared_data.istatus.scsi_status != 0) { + ahd_handle_scsi_status(ahd, scb); + } else { + ahd_calc_residual(ahd, scb); + ahd_done(ahd, scb); + } +} + +/* + * Calculate the residual for a just completed SCB. + */ +static void +ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) +{ + struct hardware_scb *hscb; + struct initiator_status *spkt; + uint32_t sgptr; + uint32_t resid_sgptr; + uint32_t resid; + + /* + * 5 cases. + * 1) No residual. + * SG_STATUS_VALID clear in sgptr. + * 2) Transferless command + * 3) Never performed any transfers. + * sgptr has SG_FULL_RESID set. + * 4) No residual but target did not + * save data pointers after the + * last transfer, so sgptr was + * never updated. + * 5) We have a partial residual. + * Use residual_sgptr to determine + * where we are. + */ + + hscb = scb->hscb; + sgptr = ahd_le32toh(hscb->sgptr); + if ((sgptr & SG_STATUS_VALID) == 0) + /* Case 1 */ + return; + sgptr &= ~SG_STATUS_VALID; + + if ((sgptr & SG_LIST_NULL) != 0) + /* Case 2 */ + return; + + /* + * Residual fields are the same in both + * target and initiator status packets, + * so we can always use the initiator fields + * regardless of the role for this SCB. + */ + spkt = &hscb->shared_data.istatus; + resid_sgptr = ahd_le32toh(spkt->residual_sgptr); + if ((sgptr & SG_FULL_RESID) != 0) { + /* Case 3 */ + resid = ahd_get_transfer_length(scb); + } else if ((resid_sgptr & SG_LIST_NULL) != 0) { + /* Case 4 */ + return; + } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { + ahd_print_path(ahd, scb); + printk("data overrun detected Tag == 0x%x.\n", + SCB_GET_TAG(scb)); + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); + ahd_freeze_scb(scb); + return; + } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { + panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); + /* NOTREACHED */ + } else { + struct ahd_dma_seg *sg; + + /* + * Remainder of the SG where the transfer + * stopped. + */ + resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; + sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + /* + * Add up the contents of all residual + * SG segments that are after the SG where + * the transfer stopped. + */ + while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { + sg++; + resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + } + } + if ((scb->flags & SCB_SENSE) == 0) + ahd_set_residual(scb, resid); + else + ahd_set_sense_residual(scb, resid); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) { + ahd_print_path(ahd, scb); + printk("Handled %sResidual of %d bytes\n", + (scb->flags & SCB_SENSE) ? "Sense " : "", resid); + } +#endif +} + +/******************************* Target Mode **********************************/ +#ifdef AHD_TARGET_MODE +/* + * Add a target mode event to this lun's queue + */ +static void +ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, + u_int initiator_id, u_int event_type, u_int event_arg) +{ + struct ahd_tmode_event *event; + int pending; + + xpt_freeze_devq(lstate->path, /*count*/1); + if (lstate->event_w_idx >= lstate->event_r_idx) + pending = lstate->event_w_idx - lstate->event_r_idx; + else + pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 + - (lstate->event_r_idx - lstate->event_w_idx); + + if (event_type == EVENT_TYPE_BUS_RESET + || event_type == TARGET_RESET) { + /* + * Any earlier events are irrelevant, so reset our buffer. + * This has the effect of allowing us to deal with reset + * floods (an external device holding down the reset line) + * without losing the event that is really interesting. + */ + lstate->event_r_idx = 0; + lstate->event_w_idx = 0; + xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); + } + + if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { + xpt_print_path(lstate->path); + printk("immediate event %x:%x lost\n", + lstate->event_buffer[lstate->event_r_idx].event_type, + lstate->event_buffer[lstate->event_r_idx].event_arg); + lstate->event_r_idx++; + if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) + lstate->event_r_idx = 0; + xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); + } + + event = &lstate->event_buffer[lstate->event_w_idx]; + event->initiator_id = initiator_id; + event->event_type = event_type; + event->event_arg = event_arg; + lstate->event_w_idx++; + if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) + lstate->event_w_idx = 0; +} + +/* + * Send any target mode events queued up waiting + * for immediate notify resources. + */ +void +ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) +{ + struct ccb_hdr *ccbh; + struct ccb_immed_notify *inot; + + while (lstate->event_r_idx != lstate->event_w_idx + && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { + struct ahd_tmode_event *event; + + event = &lstate->event_buffer[lstate->event_r_idx]; + SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); + inot = (struct ccb_immed_notify *)ccbh; + switch (event->event_type) { + case EVENT_TYPE_BUS_RESET: + ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; + break; + default: + ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; + inot->message_args[0] = event->event_type; + inot->message_args[1] = event->event_arg; + break; + } + inot->initiator_id = event->initiator_id; + inot->sense_len = 0; + xpt_done((union ccb *)inot); + lstate->event_r_idx++; + if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) + lstate->event_r_idx = 0; + } +} +#endif + +/******************** Sequencer Program Patching/Download *********************/ + +#ifdef AHD_DUMP_SEQ +void +ahd_dumpseq(struct ahd_softc* ahd) +{ + int i; + int max_prog; + + max_prog = 2048; + + ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); + ahd_outw(ahd, PRGMCNT, 0); + for (i = 0; i < max_prog; i++) { + uint8_t ins_bytes[4]; + + ahd_insb(ahd, SEQRAM, ins_bytes, 4); + printk("0x%08x\n", ins_bytes[0] << 24 + | ins_bytes[1] << 16 + | ins_bytes[2] << 8 + | ins_bytes[3]); + } +} +#endif + +static void +ahd_loadseq(struct ahd_softc *ahd) +{ + struct cs cs_table[NUM_CRITICAL_SECTIONS]; + u_int begin_set[NUM_CRITICAL_SECTIONS]; + u_int end_set[NUM_CRITICAL_SECTIONS]; + const struct patch *cur_patch; + u_int cs_count; + u_int cur_cs; + u_int i; + int downloaded; + u_int skip_addr; + u_int sg_prefetch_cnt; + u_int sg_prefetch_cnt_limit; + u_int sg_prefetch_align; + u_int sg_size; + u_int cacheline_mask; + uint8_t download_consts[DOWNLOAD_CONST_COUNT]; + + if (bootverbose) + printk("%s: Downloading Sequencer Program...", + ahd_name(ahd)); + +#if DOWNLOAD_CONST_COUNT != 8 +#error "Download Const Mismatch" +#endif + /* + * Start out with 0 critical sections + * that apply to this firmware load. + */ + cs_count = 0; + cur_cs = 0; + memset(begin_set, 0, sizeof(begin_set)); + memset(end_set, 0, sizeof(end_set)); + + /* + * Setup downloadable constant table. + * + * The computation for the S/G prefetch variables is + * a bit complicated. We would like to always fetch + * in terms of cachelined sized increments. However, + * if the cacheline is not an even multiple of the + * SG element size or is larger than our SG RAM, using + * just the cache size might leave us with only a portion + * of an SG element at the tail of a prefetch. If the + * cacheline is larger than our S/G prefetch buffer less + * the size of an SG element, we may round down to a cacheline + * that doesn't contain any or all of the S/G of interest + * within the bounds of our S/G ram. Provide variables to + * the sequencer that will allow it to handle these edge + * cases. + */ + /* Start by aligning to the nearest cacheline. */ + sg_prefetch_align = ahd->pci_cachesize; + if (sg_prefetch_align == 0) + sg_prefetch_align = 8; + /* Round down to the nearest power of 2. */ + while (powerof2(sg_prefetch_align) == 0) + sg_prefetch_align--; + + cacheline_mask = sg_prefetch_align - 1; + + /* + * If the cacheline boundary is greater than half our prefetch RAM + * we risk not being able to fetch even a single complete S/G + * segment if we align to that boundary. + */ + if (sg_prefetch_align > CCSGADDR_MAX/2) + sg_prefetch_align = CCSGADDR_MAX/2; + /* Start by fetching a single cacheline. */ + sg_prefetch_cnt = sg_prefetch_align; + /* + * Increment the prefetch count by cachelines until + * at least one S/G element will fit. + */ + sg_size = sizeof(struct ahd_dma_seg); + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + sg_size = sizeof(struct ahd_dma64_seg); + while (sg_prefetch_cnt < sg_size) + sg_prefetch_cnt += sg_prefetch_align; + /* + * If the cacheline is not an even multiple of + * the S/G size, we may only get a partial S/G when + * we align. Add a cacheline if this is the case. + */ + if ((sg_prefetch_align % sg_size) != 0 + && (sg_prefetch_cnt < CCSGADDR_MAX)) + sg_prefetch_cnt += sg_prefetch_align; + /* + * Lastly, compute a value that the sequencer can use + * to determine if the remainder of the CCSGRAM buffer + * has a full S/G element in it. + */ + sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); + download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; + download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; + download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); + download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); + download_consts[SG_SIZEOF] = sg_size; + download_consts[PKT_OVERRUN_BUFOFFSET] = + (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; + download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; + download_consts[CACHELINE_MASK] = cacheline_mask; + cur_patch = patches; + downloaded = 0; + skip_addr = 0; + ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); + ahd_outw(ahd, PRGMCNT, 0); + + for (i = 0; i < sizeof(seqprog)/4; i++) { + if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { + /* + * Don't download this instruction as it + * is in a patch that was removed. + */ + continue; + } + /* + * Move through the CS table until we find a CS + * that might apply to this instruction. + */ + for (; cur_cs < NUM_CRITICAL_SECTIONS; cur_cs++) { + if (critical_sections[cur_cs].end <= i) { + if (begin_set[cs_count] == TRUE + && end_set[cs_count] == FALSE) { + cs_table[cs_count].end = downloaded; + end_set[cs_count] = TRUE; + cs_count++; + } + continue; + } + if (critical_sections[cur_cs].begin <= i + && begin_set[cs_count] == FALSE) { + cs_table[cs_count].begin = downloaded; + begin_set[cs_count] = TRUE; + } + break; + } + ahd_download_instr(ahd, i, download_consts); + downloaded++; + } + + ahd->num_critical_sections = cs_count; + if (cs_count != 0) { + + cs_count *= sizeof(struct cs); + ahd->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC); + if (ahd->critical_sections == NULL) + panic("ahd_loadseq: Could not malloc"); + } + ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); + + if (bootverbose) { + printk(" %d instructions downloaded\n", downloaded); + printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", + ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); + } +} + +static int +ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, + u_int start_instr, u_int *skip_addr) +{ + const struct patch *cur_patch; + const struct patch *last_patch; + u_int num_patches; + + num_patches = ARRAY_SIZE(patches); + last_patch = &patches[num_patches]; + cur_patch = *start_patch; + + while (cur_patch < last_patch && start_instr == cur_patch->begin) { + + if (cur_patch->patch_func(ahd) == 0) { + + /* Start rejecting code */ + *skip_addr = start_instr + cur_patch->skip_instr; + cur_patch += cur_patch->skip_patch; + } else { + /* Accepted this patch. Advance to the next + * one and wait for our intruction pointer to + * hit this point. + */ + cur_patch++; + } + } + + *start_patch = cur_patch; + if (start_instr < *skip_addr) + /* Still skipping */ + return (0); + + return (1); +} + +static u_int +ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) +{ + const struct patch *cur_patch; + int address_offset; + u_int skip_addr; + u_int i; + + address_offset = 0; + cur_patch = patches; + skip_addr = 0; + + for (i = 0; i < address;) { + + ahd_check_patch(ahd, &cur_patch, i, &skip_addr); + + if (skip_addr > i) { + int end_addr; + + end_addr = min(address, skip_addr); + address_offset += end_addr - i; + i = skip_addr; + } else { + i++; + } + } + return (address - address_offset); +} + +static void +ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) +{ + union ins_formats instr; + struct ins_format1 *fmt1_ins; + struct ins_format3 *fmt3_ins; + u_int opcode; + + /* + * The firmware is always compiled into a little endian format. + */ + instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); + + fmt1_ins = &instr.format1; + fmt3_ins = NULL; + + /* Pull the opcode */ + opcode = instr.format1.opcode; + switch (opcode) { + case AIC_OP_JMP: + case AIC_OP_JC: + case AIC_OP_JNC: + case AIC_OP_CALL: + case AIC_OP_JNE: + case AIC_OP_JNZ: + case AIC_OP_JE: + case AIC_OP_JZ: + { + fmt3_ins = &instr.format3; + fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); + } + fallthrough; + case AIC_OP_OR: + case AIC_OP_AND: + case AIC_OP_XOR: + case AIC_OP_ADD: + case AIC_OP_ADC: + case AIC_OP_BMOV: + if (fmt1_ins->parity != 0) { + fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; + } + fmt1_ins->parity = 0; + fallthrough; + case AIC_OP_ROL: + { + int i, count; + + /* Calculate odd parity for the instruction */ + for (i = 0, count = 0; i < 31; i++) { + uint32_t mask; + + mask = 0x01 << i; + if ((instr.integer & mask) != 0) + count++; + } + if ((count & 0x01) == 0) + instr.format1.parity = 1; + + /* The sequencer is a little endian cpu */ + instr.integer = ahd_htole32(instr.integer); + ahd_outsb(ahd, SEQRAM, instr.bytes, 4); + break; + } + default: + panic("Unknown opcode encountered in seq program"); + break; + } +} + +static int +ahd_probe_stack_size(struct ahd_softc *ahd) +{ + int last_probe; + + last_probe = 0; + while (1) { + int i; + + /* + * We avoid using 0 as a pattern to avoid + * confusion if the stack implementation + * "back-fills" with zeros when "poping' + * entries. + */ + for (i = 1; i <= last_probe+1; i++) { + ahd_outb(ahd, STACK, i & 0xFF); + ahd_outb(ahd, STACK, (i >> 8) & 0xFF); + } + + /* Verify */ + for (i = last_probe+1; i > 0; i--) { + u_int stack_entry; + + stack_entry = ahd_inb(ahd, STACK) + |(ahd_inb(ahd, STACK) << 8); + if (stack_entry != i) + goto sized; + } + last_probe++; + } +sized: + return (last_probe); +} + +int +ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries, + const char *name, u_int address, u_int value, + u_int *cur_column, u_int wrap_point) +{ + int printed; + u_int printed_mask; + + if (cur_column != NULL && *cur_column >= wrap_point) { + printk("\n"); + *cur_column = 0; + } + printed = printk("%s[0x%x]", name, value); + if (table == NULL) { + printed += printk(" "); + *cur_column += printed; + return (printed); + } + printed_mask = 0; + while (printed_mask != 0xFF) { + int entry; + + for (entry = 0; entry < num_entries; entry++) { + if (((value & table[entry].mask) + != table[entry].value) + || ((printed_mask & table[entry].mask) + == table[entry].mask)) + continue; + + printed += printk("%s%s", + printed_mask == 0 ? ":(" : "|", + table[entry].name); + printed_mask |= table[entry].mask; + + break; + } + if (entry >= num_entries) + break; + } + if (printed_mask != 0) + printed += printk(") "); + else + printed += printk(" "); + if (cur_column != NULL) + *cur_column += printed; + return (printed); +} + +void +ahd_dump_card_state(struct ahd_softc *ahd) +{ + struct scb *scb; + ahd_mode_state saved_modes; + u_int dffstat; + int paused; + u_int scb_index; + u_int saved_scb_index; + u_int cur_col; + int i; + + if (ahd_is_paused(ahd)) { + paused = 1; + } else { + paused = 0; + ahd_pause(ahd); + } + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" + "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", + ahd_name(ahd), + ahd_inw(ahd, CURADDR), + ahd_build_mode_state(ahd, ahd->saved_src_mode, + ahd->saved_dst_mode)); + if (paused) + printk("Card was paused\n"); + + if (ahd_check_cmdcmpltqueues(ahd)) + printk("Completions are pending\n"); + + /* + * Mode independent registers. + */ + cur_col = 0; + ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); + ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); + ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); + ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); + ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); + ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); + ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); + ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); + ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); + ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); + ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); + ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); + ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); + ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); + ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); + ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); + ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); + ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); + ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); + ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), + &cur_col, 50); + ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); + ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), + &cur_col, 50); + ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); + ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); + ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); + ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); + ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); + ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); + ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); + ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); + ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); + ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); + ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); + ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); + printk("\n"); + printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " + "CURRSCB 0x%x NEXTSCB 0x%x\n", + ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), + ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), + ahd_inw(ahd, NEXTSCB)); + cur_col = 0; + /* QINFIFO */ + ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, + CAM_LUN_WILDCARD, SCB_LIST_NULL, + ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); + saved_scb_index = ahd_get_scbptr(ahd); + printk("Pending list:"); + i = 0; + LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { + if (i++ > AHD_SCB_MAX) + break; + cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), + ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); + ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); + ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), + &cur_col, 60); + ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), + &cur_col, 60); + } + printk("\nTotal %d\n", i); + + printk("Kernel Free SCB list: "); + i = 0; + TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { + struct scb *list_scb; + + list_scb = scb; + do { + printk("%d ", SCB_GET_TAG(list_scb)); + list_scb = LIST_NEXT(list_scb, collision_links); + } while (list_scb && i++ < AHD_SCB_MAX); + } + + LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { + if (i++ > AHD_SCB_MAX) + break; + printk("%d ", SCB_GET_TAG(scb)); + } + printk("\n"); + + printk("Sequencer Complete DMA-inprog list: "); + scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printk("%d ", scb_index); + scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + } + printk("\n"); + + printk("Sequencer Complete list: "); + scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printk("%d ", scb_index); + scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + } + printk("\n"); + + printk("Sequencer DMA-Up and Complete list: "); + scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printk("%d ", scb_index); + scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + } + printk("\n"); + printk("Sequencer On QFreeze and Complete list: "); + scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printk("%d ", scb_index); + scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + } + printk("\n"); + ahd_set_scbptr(ahd, saved_scb_index); + dffstat = ahd_inb(ahd, DFFSTAT); + for (i = 0; i < 2; i++) { +#ifdef AHD_DEBUG + struct scb *fifo_scb; +#endif + u_int fifo_scbptr; + + ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); + fifo_scbptr = ahd_get_scbptr(ahd); + printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", + ahd_name(ahd), i, + (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", + ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); + cur_col = 0; + ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); + ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); + ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); + ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); + ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), + &cur_col, 50); + ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); + ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); + ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); + ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); + if (cur_col > 50) { + printk("\n"); + cur_col = 0; + } + cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ", + ahd_inl(ahd, SHADDR+4), + ahd_inl(ahd, SHADDR), + (ahd_inb(ahd, SHCNT) + | (ahd_inb(ahd, SHCNT + 1) << 8) + | (ahd_inb(ahd, SHCNT + 2) << 16))); + if (cur_col > 50) { + printk("\n"); + cur_col = 0; + } + cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ", + ahd_inl(ahd, HADDR+4), + ahd_inl(ahd, HADDR), + (ahd_inb(ahd, HCNT) + | (ahd_inb(ahd, HCNT + 1) << 8) + | (ahd_inb(ahd, HCNT + 2) << 16))); + ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_SG) != 0) { + fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); + if (fifo_scb != NULL) + ahd_dump_sglist(fifo_scb); + } +#endif + } + printk("\nLQIN: "); + for (i = 0; i < 20; i++) + printk("0x%x ", ahd_inb(ahd, LQIN + i)); + printk("\n"); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), + ahd_inb(ahd, OPTIONMODE)); + printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), + ahd_inb(ahd, MAXCMDCNT)); + printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), + ahd_inb(ahd, SAVED_LUN)); + ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); + printk("\n"); + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + cur_col = 0; + ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); + printk("\n"); + ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); + printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", + ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), + ahd_inw(ahd, DINDEX)); + printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", + ahd_name(ahd), ahd_get_scbptr(ahd), + ahd_inw_scbram(ahd, SCB_NEXT), + ahd_inw_scbram(ahd, SCB_NEXT2)); + printk("CDB %x %x %x %x %x %x\n", + ahd_inb_scbram(ahd, SCB_CDB_STORE), + ahd_inb_scbram(ahd, SCB_CDB_STORE+1), + ahd_inb_scbram(ahd, SCB_CDB_STORE+2), + ahd_inb_scbram(ahd, SCB_CDB_STORE+3), + ahd_inb_scbram(ahd, SCB_CDB_STORE+4), + ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); + printk("STACK:"); + for (i = 0; i < ahd->stack_size; i++) { + ahd->saved_stack[i] = + ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); + printk(" 0x%x", ahd->saved_stack[i]); + } + for (i = ahd->stack_size-1; i >= 0; i--) { + ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); + ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); + } + printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); + ahd_restore_modes(ahd, saved_modes); + if (paused == 0) + ahd_unpause(ahd); +} + +#if 0 +void +ahd_dump_scbs(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + u_int saved_scb_index; + int i; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + saved_scb_index = ahd_get_scbptr(ahd); + for (i = 0; i < AHD_SCB_MAX; i++) { + ahd_set_scbptr(ahd, i); + printk("%3d", i); + printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", + ahd_inb_scbram(ahd, SCB_CONTROL), + ahd_inb_scbram(ahd, SCB_SCSIID), + ahd_inw_scbram(ahd, SCB_NEXT), + ahd_inw_scbram(ahd, SCB_NEXT2), + ahd_inl_scbram(ahd, SCB_SGPTR), + ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); + } + printk("\n"); + ahd_set_scbptr(ahd, saved_scb_index); + ahd_restore_modes(ahd, saved_modes); +} +#endif /* 0 */ + +/**************************** Flexport Logic **********************************/ +/* + * Read count 16bit words from 16bit word address start_addr from the + * SEEPROM attached to the controller, into buf, using the controller's + * SEEPROM reading state machine. Optionally treat the data as a byte + * stream in terms of byte order. + */ +int +ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count, int bytestream) +{ + u_int cur_addr; + u_int end_addr; + int error; + + /* + * If we never make it through the loop even once, + * we were passed invalid arguments. + */ + error = EINVAL; + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + end_addr = start_addr + count; + for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { + + ahd_outb(ahd, SEEADR, cur_addr); + ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); + + error = ahd_wait_seeprom(ahd); + if (error) + break; + if (bytestream != 0) { + uint8_t *bytestream_ptr; + + bytestream_ptr = (uint8_t *)buf; + *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); + *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); + } else { + /* + * ahd_inw() already handles machine byte order. + */ + *buf = ahd_inw(ahd, SEEDAT); + } + buf++; + } + return (error); +} + +/* + * Write count 16bit words from buf, into SEEPROM attache to the + * controller starting at 16bit word address start_addr, using the + * controller's SEEPROM writing state machine. + */ +int +ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count) +{ + u_int cur_addr; + u_int end_addr; + int error; + int retval; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + error = ENOENT; + + /* Place the chip into write-enable mode */ + ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); + ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); + error = ahd_wait_seeprom(ahd); + if (error) + return (error); + + /* + * Write the data. If we don't get through the loop at + * least once, the arguments were invalid. + */ + retval = EINVAL; + end_addr = start_addr + count; + for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { + ahd_outw(ahd, SEEDAT, *buf++); + ahd_outb(ahd, SEEADR, cur_addr); + ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); + + retval = ahd_wait_seeprom(ahd); + if (retval) + break; + } + + /* + * Disable writes. + */ + ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); + ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); + error = ahd_wait_seeprom(ahd); + if (error) + return (error); + return (retval); +} + +/* + * Wait ~100us for the serial eeprom to satisfy our request. + */ +static int +ahd_wait_seeprom(struct ahd_softc *ahd) +{ + int cnt; + + cnt = 5000; + while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) + ahd_delay(5); + + if (cnt == 0) + return (ETIMEDOUT); + return (0); +} + +/* + * Validate the two checksums in the per_channel + * vital product data struct. + */ +static int +ahd_verify_vpd_cksum(struct vpd_config *vpd) +{ + int i; + int maxaddr; + uint32_t checksum; + uint8_t *vpdarray; + + vpdarray = (uint8_t *)vpd; + maxaddr = offsetof(struct vpd_config, vpd_checksum); + checksum = 0; + for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) + checksum = checksum + vpdarray[i]; + if (checksum == 0 + || (-checksum & 0xFF) != vpd->vpd_checksum) + return (0); + + checksum = 0; + maxaddr = offsetof(struct vpd_config, checksum); + for (i = offsetof(struct vpd_config, default_target_flags); + i < maxaddr; i++) + checksum = checksum + vpdarray[i]; + if (checksum == 0 + || (-checksum & 0xFF) != vpd->checksum) + return (0); + return (1); +} + +int +ahd_verify_cksum(struct seeprom_config *sc) +{ + int i; + int maxaddr; + uint32_t checksum; + uint16_t *scarray; + + maxaddr = (sizeof(*sc)/2) - 1; + checksum = 0; + scarray = (uint16_t *)sc; + + for (i = 0; i < maxaddr; i++) + checksum = checksum + scarray[i]; + if (checksum == 0 + || (checksum & 0xFFFF) != sc->checksum) { + return (0); + } else { + return (1); + } +} + +int +ahd_acquire_seeprom(struct ahd_softc *ahd) +{ + /* + * We should be able to determine the SEEPROM type + * from the flexport logic, but unfortunately not + * all implementations have this logic and there is + * no programatic method for determining if the logic + * is present. + */ + return (1); +#if 0 + uint8_t seetype; + int error; + + error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); + if (error != 0 + || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) + return (0); + return (1); +#endif +} + +void +ahd_release_seeprom(struct ahd_softc *ahd) +{ + /* Currently a no-op */ +} + +/* + * Wait at most 2 seconds for flexport arbitration to succeed. + */ +static int +ahd_wait_flexport(struct ahd_softc *ahd) +{ + int cnt; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + cnt = 1000000 * 2 / 5; + while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) + ahd_delay(5); + + if (cnt == 0) + return (ETIMEDOUT); + return (0); +} + +int +ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) +{ + int error; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + if (addr > 7) + panic("ahd_write_flexport: address out of range"); + ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); + error = ahd_wait_flexport(ahd); + if (error != 0) + return (error); + ahd_outb(ahd, BRDDAT, value); + ahd_flush_device_writes(ahd); + ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); + ahd_flush_device_writes(ahd); + ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); + ahd_flush_device_writes(ahd); + ahd_outb(ahd, BRDCTL, 0); + ahd_flush_device_writes(ahd); + return (0); +} + +int +ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) +{ + int error; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + if (addr > 7) + panic("ahd_read_flexport: address out of range"); + ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); + error = ahd_wait_flexport(ahd); + if (error != 0) + return (error); + *value = ahd_inb(ahd, BRDDAT); + ahd_outb(ahd, BRDCTL, 0); + ahd_flush_device_writes(ahd); + return (0); +} + +/************************* Target Mode ****************************************/ +#ifdef AHD_TARGET_MODE +cam_status +ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, + struct ahd_tmode_tstate **tstate, + struct ahd_tmode_lstate **lstate, + int notfound_failure) +{ + + if ((ahd->features & AHD_TARGETMODE) == 0) + return (CAM_REQ_INVALID); + + /* + * Handle the 'black hole' device that sucks up + * requests to unattached luns on enabled targets. + */ + if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD + && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { + *tstate = NULL; + *lstate = ahd->black_hole; + } else { + u_int max_id; + + max_id = (ahd->features & AHD_WIDE) ? 16 : 8; + if (ccb->ccb_h.target_id >= max_id) + return (CAM_TID_INVALID); + + if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) + return (CAM_LUN_INVALID); + + *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; + *lstate = NULL; + if (*tstate != NULL) + *lstate = + (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; + } + + if (notfound_failure != 0 && *lstate == NULL) + return (CAM_PATH_INVALID); + + return (CAM_REQ_CMP); +} + +void +ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) +{ +#if NOT_YET + struct ahd_tmode_tstate *tstate; + struct ahd_tmode_lstate *lstate; + struct ccb_en_lun *cel; + cam_status status; + u_int target; + u_int lun; + u_int target_mask; + u_long s; + char channel; + + status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, + /*notfound_failure*/FALSE); + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + return; + } + + if ((ahd->features & AHD_MULTIROLE) != 0) { + u_int our_id; + + our_id = ahd->our_id; + if (ccb->ccb_h.target_id != our_id) { + if ((ahd->features & AHD_MULTI_TID) != 0 + && (ahd->flags & AHD_INITIATORROLE) != 0) { + /* + * Only allow additional targets if + * the initiator role is disabled. + * The hardware cannot handle a re-select-in + * on the initiator id during a re-select-out + * on a different target id. + */ + status = CAM_TID_INVALID; + } else if ((ahd->flags & AHD_INITIATORROLE) != 0 + || ahd->enabled_luns > 0) { + /* + * Only allow our target id to change + * if the initiator role is not configured + * and there are no enabled luns which + * are attached to the currently registered + * scsi id. + */ + status = CAM_TID_INVALID; + } + } + } + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + return; + } + + /* + * We now have an id that is valid. + * If we aren't in target mode, switch modes. + */ + if ((ahd->flags & AHD_TARGETROLE) == 0 + && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { + u_long s; + + printk("Configuring Target Mode\n"); + ahd_lock(ahd, &s); + if (LIST_FIRST(&ahd->pending_scbs) != NULL) { + ccb->ccb_h.status = CAM_BUSY; + ahd_unlock(ahd, &s); + return; + } + ahd->flags |= AHD_TARGETROLE; + if ((ahd->features & AHD_MULTIROLE) == 0) + ahd->flags &= ~AHD_INITIATORROLE; + ahd_pause(ahd); + ahd_loadseq(ahd); + ahd_restart(ahd); + ahd_unlock(ahd, &s); + } + cel = &ccb->cel; + target = ccb->ccb_h.target_id; + lun = ccb->ccb_h.target_lun; + channel = SIM_CHANNEL(ahd, sim); + target_mask = 0x01 << target; + if (channel == 'B') + target_mask <<= 8; + + if (cel->enable != 0) { + u_int scsiseq1; + + /* Are we already enabled?? */ + if (lstate != NULL) { + xpt_print_path(ccb->ccb_h.path); + printk("Lun already enabled\n"); + ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; + return; + } + + if (cel->grp6_len != 0 + || cel->grp7_len != 0) { + /* + * Don't (yet?) support vendor + * specific commands. + */ + ccb->ccb_h.status = CAM_REQ_INVALID; + printk("Non-zero Group Codes\n"); + return; + } + + /* + * Seems to be okay. + * Setup our data structures. + */ + if (target != CAM_TARGET_WILDCARD && tstate == NULL) { + tstate = ahd_alloc_tstate(ahd, target, channel); + if (tstate == NULL) { + xpt_print_path(ccb->ccb_h.path); + printk("Couldn't allocate tstate\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + } + lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); + if (lstate == NULL) { + xpt_print_path(ccb->ccb_h.path); + printk("Couldn't allocate lstate\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + status = xpt_create_path(&lstate->path, /*periph*/NULL, + xpt_path_path_id(ccb->ccb_h.path), + xpt_path_target_id(ccb->ccb_h.path), + xpt_path_lun_id(ccb->ccb_h.path)); + if (status != CAM_REQ_CMP) { + kfree(lstate); + xpt_print_path(ccb->ccb_h.path); + printk("Couldn't allocate path\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + SLIST_INIT(&lstate->accept_tios); + SLIST_INIT(&lstate->immed_notifies); + ahd_lock(ahd, &s); + ahd_pause(ahd); + if (target != CAM_TARGET_WILDCARD) { + tstate->enabled_luns[lun] = lstate; + ahd->enabled_luns++; + + if ((ahd->features & AHD_MULTI_TID) != 0) { + u_int targid_mask; + + targid_mask = ahd_inw(ahd, TARGID); + targid_mask |= target_mask; + ahd_outw(ahd, TARGID, targid_mask); + ahd_update_scsiid(ahd, targid_mask); + } else { + u_int our_id; + char channel; + + channel = SIM_CHANNEL(ahd, sim); + our_id = SIM_SCSI_ID(ahd, sim); + + /* + * This can only happen if selections + * are not enabled + */ + if (target != our_id) { + u_int sblkctl; + char cur_channel; + int swap; + + sblkctl = ahd_inb(ahd, SBLKCTL); + cur_channel = (sblkctl & SELBUSB) + ? 'B' : 'A'; + if ((ahd->features & AHD_TWIN) == 0) + cur_channel = 'A'; + swap = cur_channel != channel; + ahd->our_id = target; + + if (swap) + ahd_outb(ahd, SBLKCTL, + sblkctl ^ SELBUSB); + + ahd_outb(ahd, SCSIID, target); + + if (swap) + ahd_outb(ahd, SBLKCTL, sblkctl); + } + } + } else + ahd->black_hole = lstate; + /* Allow select-in operations */ + if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { + scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); + scsiseq1 |= ENSELI; + ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); + scsiseq1 = ahd_inb(ahd, SCSISEQ1); + scsiseq1 |= ENSELI; + ahd_outb(ahd, SCSISEQ1, scsiseq1); + } + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_print_path(ccb->ccb_h.path); + printk("Lun now enabled for target mode\n"); + } else { + struct scb *scb; + int i, empty; + + if (lstate == NULL) { + ccb->ccb_h.status = CAM_LUN_INVALID; + return; + } + + ahd_lock(ahd, &s); + + ccb->ccb_h.status = CAM_REQ_CMP; + LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { + struct ccb_hdr *ccbh; + + ccbh = &scb->io_ctx->ccb_h; + if (ccbh->func_code == XPT_CONT_TARGET_IO + && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ + printk("CTIO pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + ahd_unlock(ahd, &s); + return; + } + } + + if (SLIST_FIRST(&lstate->accept_tios) != NULL) { + printk("ATIOs pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + } + + if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { + printk("INOTs pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + } + + if (ccb->ccb_h.status != CAM_REQ_CMP) { + ahd_unlock(ahd, &s); + return; + } + + xpt_print_path(ccb->ccb_h.path); + printk("Target mode disabled\n"); + xpt_free_path(lstate->path); + kfree(lstate); + + ahd_pause(ahd); + /* Can we clean up the target too? */ + if (target != CAM_TARGET_WILDCARD) { + tstate->enabled_luns[lun] = NULL; + ahd->enabled_luns--; + for (empty = 1, i = 0; i < 8; i++) + if (tstate->enabled_luns[i] != NULL) { + empty = 0; + break; + } + + if (empty) { + ahd_free_tstate(ahd, target, channel, + /*force*/FALSE); + if (ahd->features & AHD_MULTI_TID) { + u_int targid_mask; + + targid_mask = ahd_inw(ahd, TARGID); + targid_mask &= ~target_mask; + ahd_outw(ahd, TARGID, targid_mask); + ahd_update_scsiid(ahd, targid_mask); + } + } + } else { + + ahd->black_hole = NULL; + + /* + * We can't allow selections without + * our black hole device. + */ + empty = TRUE; + } + if (ahd->enabled_luns == 0) { + /* Disallow select-in */ + u_int scsiseq1; + + scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); + scsiseq1 &= ~ENSELI; + ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); + scsiseq1 = ahd_inb(ahd, SCSISEQ1); + scsiseq1 &= ~ENSELI; + ahd_outb(ahd, SCSISEQ1, scsiseq1); + + if ((ahd->features & AHD_MULTIROLE) == 0) { + printk("Configuring Initiator Mode\n"); + ahd->flags &= ~AHD_TARGETROLE; + ahd->flags |= AHD_INITIATORROLE; + ahd_pause(ahd); + ahd_loadseq(ahd); + ahd_restart(ahd); + /* + * Unpaused. The extra unpause + * that follows is harmless. + */ + } + } + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + } +#endif +} + +static void +ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) +{ +#if NOT_YET + u_int scsiid_mask; + u_int scsiid; + + if ((ahd->features & AHD_MULTI_TID) == 0) + panic("ahd_update_scsiid called on non-multitid unit\n"); + + /* + * Since we will rely on the TARGID mask + * for selection enables, ensure that OID + * in SCSIID is not set to some other ID + * that we don't want to allow selections on. + */ + if ((ahd->features & AHD_ULTRA2) != 0) + scsiid = ahd_inb(ahd, SCSIID_ULTRA2); + else + scsiid = ahd_inb(ahd, SCSIID); + scsiid_mask = 0x1 << (scsiid & OID); + if ((targid_mask & scsiid_mask) == 0) { + u_int our_id; + + /* ffs counts from 1 */ + our_id = ffs(targid_mask); + if (our_id == 0) + our_id = ahd->our_id; + else + our_id--; + scsiid &= TID; + scsiid |= our_id; + } + if ((ahd->features & AHD_ULTRA2) != 0) + ahd_outb(ahd, SCSIID_ULTRA2, scsiid); + else + ahd_outb(ahd, SCSIID, scsiid); +#endif +} + +static void +ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) +{ + struct target_cmd *cmd; + + ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); + while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { + + /* + * Only advance through the queue if we + * have the resources to process the command. + */ + if (ahd_handle_target_cmd(ahd, cmd) != 0) + break; + + cmd->cmd_valid = 0; + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap, + ahd_targetcmd_offset(ahd, ahd->tqinfifonext), + sizeof(struct target_cmd), + BUS_DMASYNC_PREREAD); + ahd->tqinfifonext++; + + /* + * Lazily update our position in the target mode incoming + * command queue as seen by the sequencer. + */ + if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { + u_int hs_mailbox; + + hs_mailbox = ahd_inb(ahd, HS_MAILBOX); + hs_mailbox &= ~HOST_TQINPOS; + hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; + ahd_outb(ahd, HS_MAILBOX, hs_mailbox); + } + } +} + +static int +ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) +{ + struct ahd_tmode_tstate *tstate; + struct ahd_tmode_lstate *lstate; + struct ccb_accept_tio *atio; + uint8_t *byte; + int initiator; + int target; + int lun; + + initiator = SCSIID_TARGET(ahd, cmd->scsiid); + target = SCSIID_OUR_ID(cmd->scsiid); + lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); + + byte = cmd->bytes; + tstate = ahd->enabled_targets[target]; + lstate = NULL; + if (tstate != NULL) + lstate = tstate->enabled_luns[lun]; + + /* + * Commands for disabled luns go to the black hole driver. + */ + if (lstate == NULL) + lstate = ahd->black_hole; + + atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); + if (atio == NULL) { + ahd->flags |= AHD_TQINFIFO_BLOCKED; + /* + * Wait for more ATIOs from the peripheral driver for this lun. + */ + return (1); + } else + ahd->flags &= ~AHD_TQINFIFO_BLOCKED; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_TQIN) != 0) + printk("Incoming command from %d for %d:%d%s\n", + initiator, target, lun, + lstate == ahd->black_hole ? "(Black Holed)" : ""); +#endif + SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); + + if (lstate == ahd->black_hole) { + /* Fill in the wildcards */ + atio->ccb_h.target_id = target; + atio->ccb_h.target_lun = lun; + } + + /* + * Package it up and send it off to + * whomever has this lun enabled. + */ + atio->sense_len = 0; + atio->init_id = initiator; + if (byte[0] != 0xFF) { + /* Tag was included */ + atio->tag_action = *byte++; + atio->tag_id = *byte++; + atio->ccb_h.flags = CAM_TAG_ACTION_VALID; + } else { + atio->ccb_h.flags = 0; + } + byte++; + + /* Okay. Now determine the cdb size based on the command code */ + switch (*byte >> CMD_GROUP_CODE_SHIFT) { + case 0: + atio->cdb_len = 6; + break; + case 1: + case 2: + atio->cdb_len = 10; + break; + case 4: + atio->cdb_len = 16; + break; + case 5: + atio->cdb_len = 12; + break; + case 3: + default: + /* Only copy the opcode. */ + atio->cdb_len = 1; + printk("Reserved or VU command code type encountered\n"); + break; + } + + memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); + + atio->ccb_h.status |= CAM_CDB_RECVD; + + if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { + /* + * We weren't allowed to disconnect. + * We're hanging on the bus until a + * continue target I/O comes in response + * to this accept tio. + */ +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_TQIN) != 0) + printk("Received Immediate Command %d:%d:%d - %p\n", + initiator, target, lun, ahd->pending_device); +#endif + ahd->pending_device = lstate; + ahd_freeze_ccb((union ccb *)atio); + atio->ccb_h.flags |= CAM_DIS_DISCONNECT; + } + xpt_done((union ccb*)atio); + return (0); +} + +#endif diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h new file mode 100644 index 000000000..09335a3c8 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_inline.h @@ -0,0 +1,172 @@ +/* + * Inline routines shareable across OS platforms. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2003 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#59 $ + * + * $FreeBSD$ + */ + +#ifndef _AIC79XX_INLINE_H_ +#define _AIC79XX_INLINE_H_ + +/******************************** Debugging ***********************************/ +static inline char *ahd_name(struct ahd_softc *ahd); + +static inline char *ahd_name(struct ahd_softc *ahd) +{ + return (ahd->name); +} + +/************************ Sequencer Execution Control *************************/ +static inline void ahd_known_modes(struct ahd_softc *ahd, + ahd_mode src, ahd_mode dst); +static inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd, + ahd_mode src, + ahd_mode dst); +static inline void ahd_extract_mode_state(struct ahd_softc *ahd, + ahd_mode_state state, + ahd_mode *src, ahd_mode *dst); + +void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, + ahd_mode dst); +ahd_mode_state ahd_save_modes(struct ahd_softc *ahd); +void ahd_restore_modes(struct ahd_softc *ahd, + ahd_mode_state state); +int ahd_is_paused(struct ahd_softc *ahd); +void ahd_pause(struct ahd_softc *ahd); +void ahd_unpause(struct ahd_softc *ahd); + +static inline void +ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) +{ + ahd->src_mode = src; + ahd->dst_mode = dst; + ahd->saved_src_mode = src; + ahd->saved_dst_mode = dst; +} + +static inline ahd_mode_state +ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) +{ + return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT)); +} + +static inline void +ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state, + ahd_mode *src, ahd_mode *dst) +{ + *src = (state & SRC_MODE) >> SRC_MODE_SHIFT; + *dst = (state & DST_MODE) >> DST_MODE_SHIFT; +} + +/*********************** Scatter Gather List Handling *************************/ +void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, + void *sgptr, dma_addr_t addr, + bus_size_t len, int last); + +/************************** Memory mapping routines ***************************/ +static inline size_t ahd_sg_size(struct ahd_softc *ahd); + +void ahd_sync_sglist(struct ahd_softc *ahd, + struct scb *scb, int op); + +static inline size_t ahd_sg_size(struct ahd_softc *ahd) +{ + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + return (sizeof(struct ahd_dma64_seg)); + return (sizeof(struct ahd_dma_seg)); +} + +/*********************** Miscellaneous Support Functions ***********************/ +struct ahd_initiator_tinfo * + ahd_fetch_transinfo(struct ahd_softc *ahd, + char channel, u_int our_id, + u_int remote_id, + struct ahd_tmode_tstate **tstate); +uint16_t + ahd_inw(struct ahd_softc *ahd, u_int port); +void ahd_outw(struct ahd_softc *ahd, u_int port, + u_int value); +uint32_t + ahd_inl(struct ahd_softc *ahd, u_int port); +void ahd_outl(struct ahd_softc *ahd, u_int port, + uint32_t value); +uint64_t + ahd_inq(struct ahd_softc *ahd, u_int port); +void ahd_outq(struct ahd_softc *ahd, u_int port, + uint64_t value); +u_int ahd_get_scbptr(struct ahd_softc *ahd); +void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr); +u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset); +u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset); +struct scb * + ahd_lookup_scb(struct ahd_softc *ahd, u_int tag); +void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb); + +static inline uint8_t *ahd_get_sense_buf(struct ahd_softc *ahd, + struct scb *scb); +static inline uint32_t ahd_get_sense_bufaddr(struct ahd_softc *ahd, + struct scb *scb); + +#if 0 /* unused */ + +#define AHD_COPY_COL_IDX(dst, src) \ +do { \ + dst->hscb->scsiid = src->hscb->scsiid; \ + dst->hscb->lun = src->hscb->lun; \ +} while (0) + +#endif + +static inline uint8_t * +ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) +{ + return (scb->sense_data); +} + +static inline uint32_t +ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb) +{ + return (scb->sense_busaddr); +} + +/************************** Interrupt Processing ******************************/ +int ahd_intr(struct ahd_softc *ahd); + +#endif /* _AIC79XX_INLINE_H_ */ diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c new file mode 100644 index 000000000..f2f3405cd --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -0,0 +1,2851 @@ +/* + * Adaptec AIC79xx device driver for Linux. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $ + * + * -------------------------------------------------------------------------- + * Copyright (c) 1994-2000 Justin T. Gibbs. + * Copyright (c) 1997-1999 Doug Ledford + * Copyright (c) 2000-2003 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include "aic79xx_osm.h" +#include "aic79xx_inline.h" +#include + +static struct scsi_transport_template *ahd_linux_transport_template = NULL; + +#include /* __setup */ +#include /* For fetching system memory size */ +#include /* For block_size() */ +#include /* For ssleep/msleep */ +#include +#include + +/* + * Bucket size for counting good commands in between bad ones. + */ +#define AHD_LINUX_ERR_THRESH 1000 + +/* + * Set this to the delay in seconds after SCSI bus reset. + * Note, we honor this only for the initial bus reset. + * The scsi error recovery code performs its own bus settle + * delay handling for error recovery actions. + */ +#ifdef CONFIG_AIC79XX_RESET_DELAY_MS +#define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS +#else +#define AIC79XX_RESET_DELAY 5000 +#endif + +/* + * To change the default number of tagged transactions allowed per-device, + * add a line to the lilo.conf file like: + * append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" + * which will result in the first four devices on the first two + * controllers being set to a tagged queue depth of 32. + * + * The tag_commands is an array of 16 to allow for wide and twin adapters. + * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15 + * for channel 1. + */ +typedef struct { + uint16_t tag_commands[16]; /* Allow for wide/twin adapters. */ +} adapter_tag_info_t; + +/* + * Modify this as you see fit for your system. + * + * 0 tagged queuing disabled + * 1 <= n <= 253 n == max tags ever dispatched. + * + * The driver will throttle the number of commands dispatched to a + * device if it returns queue full. For devices with a fixed maximum + * queue depth, the driver will eventually determine this depth and + * lock it in (a console message is printed to indicate that a lock + * has occurred). On some devices, queue full is returned for a temporary + * resource shortage. These devices will return queue full at varying + * depths. The driver will throttle back when the queue fulls occur and + * attempt to slowly increase the depth over time as the device recovers + * from the resource shortage. + * + * In this example, the first line will disable tagged queueing for all + * the devices on the first probed aic79xx adapter. + * + * The second line enables tagged queueing with 4 commands/LUN for IDs + * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the + * driver to attempt to use up to 64 tags for ID 1. + * + * The third line is the same as the first line. + * + * The fourth line disables tagged queueing for devices 0 and 3. It + * enables tagged queueing for the other IDs, with 16 commands/LUN + * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for + * IDs 2, 5-7, and 9-15. + */ + +/* + * NOTE: The below structure is for reference only, the actual structure + * to modify in order to change things is just below this comment block. +adapter_tag_info_t aic79xx_tag_info[] = +{ + {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}}, + {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}} +}; +*/ + +#ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE +#define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE +#else +#define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE +#endif + +#define AIC79XX_CONFIGED_TAG_COMMANDS { \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ + AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE \ +} + +/* + * By default, use the number of commands specified by + * the users kernel configuration. + */ +static adapter_tag_info_t aic79xx_tag_info[] = +{ + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS}, + {AIC79XX_CONFIGED_TAG_COMMANDS} +}; + +/* + * The I/O cell on the chip is very configurable in respect to its analog + * characteristics. Set the defaults here; they can be overriden with + * the proper insmod parameters. + */ +struct ahd_linux_iocell_opts +{ + uint8_t precomp; + uint8_t slewrate; + uint8_t amplitude; +}; +#define AIC79XX_DEFAULT_PRECOMP 0xFF +#define AIC79XX_DEFAULT_SLEWRATE 0xFF +#define AIC79XX_DEFAULT_AMPLITUDE 0xFF +#define AIC79XX_DEFAULT_IOOPTS \ +{ \ + AIC79XX_DEFAULT_PRECOMP, \ + AIC79XX_DEFAULT_SLEWRATE, \ + AIC79XX_DEFAULT_AMPLITUDE \ +} +#define AIC79XX_PRECOMP_INDEX 0 +#define AIC79XX_SLEWRATE_INDEX 1 +#define AIC79XX_AMPLITUDE_INDEX 2 +static struct ahd_linux_iocell_opts aic79xx_iocell_info[] __ro_after_init = +{ + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS, + AIC79XX_DEFAULT_IOOPTS +}; + +/* + * There should be a specific return value for this in scsi.h, but + * it seems that most drivers ignore it. + */ +#define DID_UNDERFLOW DID_ERROR + +void +ahd_print_path(struct ahd_softc *ahd, struct scb *scb) +{ + printk("(scsi%d:%c:%d:%d): ", + ahd->platform_data->host->host_no, + scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X', + scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1, + scb != NULL ? SCB_GET_LUN(scb) : -1); +} + +/* + * XXX - these options apply unilaterally to _all_ adapters + * cards in the system. This should be fixed. Exceptions to this + * rule are noted in the comments. + */ + +/* + * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This + * has no effect on any later resets that might occur due to things like + * SCSI bus timeouts. + */ +static uint32_t aic79xx_no_reset; + +/* + * Should we force EXTENDED translation on a controller. + * 0 == Use whatever is in the SEEPROM or default to off + * 1 == Use whatever is in the SEEPROM or default to on + */ +static uint32_t aic79xx_extended; + +/* + * PCI bus parity checking of the Adaptec controllers. This is somewhat + * dubious at best. To my knowledge, this option has never actually + * solved a PCI parity problem, but on certain machines with broken PCI + * chipset configurations, it can generate tons of false error messages. + * It's included in the driver for completeness. + * 0 = Shut off PCI parity check + * non-0 = Enable PCI parity check + * + * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this + * variable to -1 you would actually want to simply pass the variable + * name without a number. That will invert the 0 which will result in + * -1. + */ +static uint32_t aic79xx_pci_parity = ~0; + +/* + * There are lots of broken chipsets in the world. Some of them will + * violate the PCI spec when we issue byte sized memory writes to our + * controller. I/O mapped register access, if allowed by the given + * platform, will work in almost all cases. + */ +uint32_t aic79xx_allow_memio = ~0; + +/* + * So that we can set how long each device is given as a selection timeout. + * The table of values goes like this: + * 0 - 256ms + * 1 - 128ms + * 2 - 64ms + * 3 - 32ms + * We default to 256ms because some older devices need a longer time + * to respond to initial selection. + */ +static uint32_t aic79xx_seltime; + +/* + * Certain devices do not perform any aging on commands. Should the + * device be saturated by commands in one portion of the disk, it is + * possible for transactions on far away sectors to never be serviced. + * To handle these devices, we can periodically send an ordered tag to + * force all outstanding transactions to be serviced prior to a new + * transaction. + */ +static uint32_t aic79xx_periodic_otag; + +/* Some storage boxes are using an LSI chip which has a bug making it + * impossible to use aic79xx Rev B chip in 320 speeds. The following + * storage boxes have been reported to be buggy: + * EonStor 3U 16-Bay: U16U-G3A3 + * EonStor 2U 12-Bay: U12U-G3A3 + * SentinelRAID: 2500F R5 / R6 + * SentinelRAID: 2500F R1 + * SentinelRAID: 2500F/1500F + * SentinelRAID: 150F + * + * To get around this LSI bug, you can set your board to 160 mode + * or you can enable the SLOWCRC bit. + */ +uint32_t aic79xx_slowcrc; + +/* + * Module information and settable options. + */ +static char *aic79xx = NULL; + +MODULE_AUTHOR("Maintainer: Hannes Reinecke "); +MODULE_DESCRIPTION("Adaptec AIC790X U320 SCSI Host Bus Adapter driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(AIC79XX_DRIVER_VERSION); +module_param(aic79xx, charp, 0444); +MODULE_PARM_DESC(aic79xx, +"period-delimited options string:\n" +" verbose Enable verbose/diagnostic logging\n" +" allow_memio Allow device registers to be memory mapped\n" +" debug Bitmask of debug values to enable\n" +" no_reset Suppress initial bus resets\n" +" extended Enable extended geometry on all controllers\n" +" periodic_otag Send an ordered tagged transaction\n" +" periodically to prevent tag starvation.\n" +" This may be required by some older disk\n" +" or drives/RAID arrays.\n" +" tag_info: Set per-target tag depth\n" +" global_tag_depth: Global tag depth for all targets on all buses\n" +" slewrate:Set the signal slew rate (0-15).\n" +" precomp: Set the signal precompensation (0-7).\n" +" amplitude: Set the signal amplitude (0-7).\n" +" seltime: Selection Timeout:\n" +" (0/256ms,1/128ms,2/64ms,3/32ms)\n" +" slowcrc Turn on the SLOWCRC bit (Rev B only)\n" +"\n" +" Sample modprobe configuration file:\n" +" # Enable verbose logging\n" +" # Set tag depth on Controller 2/Target 2 to 10 tags\n" +" # Shorten the selection timeout to 128ms\n" +"\n" +" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" +); + +static void ahd_linux_handle_scsi_status(struct ahd_softc *, + struct scsi_device *, + struct scb *); +static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, + struct scsi_cmnd *cmd); +static int ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd); +static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd); +static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_linux_device_queue_depth(struct scsi_device *); +static int ahd_linux_run_command(struct ahd_softc*, + struct ahd_linux_device *, + struct scsi_cmnd *); +static void ahd_linux_setup_tag_info_global(char *p); +static int aic79xx_setup(char *c); +static void ahd_freeze_simq(struct ahd_softc *ahd); +static void ahd_release_simq(struct ahd_softc *ahd); + +static int ahd_linux_unit; + + +/************************** OS Utility Wrappers *******************************/ +void ahd_delay(long); +void +ahd_delay(long usec) +{ + /* + * udelay on Linux can have problems for + * multi-millisecond waits. Wait at most + * 1024us per call. + */ + while (usec > 0) { + udelay(usec % 1024); + usec -= 1024; + } +} + + +/***************************** Low Level I/O **********************************/ +uint8_t ahd_inb(struct ahd_softc * ahd, long port); +void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val); +void ahd_outw_atomic(struct ahd_softc * ahd, + long port, uint16_t val); +void ahd_outsb(struct ahd_softc * ahd, long port, + uint8_t *, int count); +void ahd_insb(struct ahd_softc * ahd, long port, + uint8_t *, int count); + +uint8_t +ahd_inb(struct ahd_softc * ahd, long port) +{ + uint8_t x; + + if (ahd->tags[0] == BUS_SPACE_MEMIO) { + x = readb(ahd->bshs[0].maddr + port); + } else { + x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF)); + } + mb(); + return (x); +} + +#if 0 /* unused */ +static uint16_t +ahd_inw_atomic(struct ahd_softc * ahd, long port) +{ + uint8_t x; + + if (ahd->tags[0] == BUS_SPACE_MEMIO) { + x = readw(ahd->bshs[0].maddr + port); + } else { + x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF)); + } + mb(); + return (x); +} +#endif + +void +ahd_outb(struct ahd_softc * ahd, long port, uint8_t val) +{ + if (ahd->tags[0] == BUS_SPACE_MEMIO) { + writeb(val, ahd->bshs[0].maddr + port); + } else { + outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF)); + } + mb(); +} + +void +ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val) +{ + if (ahd->tags[0] == BUS_SPACE_MEMIO) { + writew(val, ahd->bshs[0].maddr + port); + } else { + outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF)); + } + mb(); +} + +void +ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count) +{ + int i; + + /* + * There is probably a more efficient way to do this on Linux + * but we don't use this for anything speed critical and this + * should work. + */ + for (i = 0; i < count; i++) + ahd_outb(ahd, port, *array++); +} + +void +ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count) +{ + int i; + + /* + * There is probably a more efficient way to do this on Linux + * but we don't use this for anything speed critical and this + * should work. + */ + for (i = 0; i < count; i++) + *array++ = ahd_inb(ahd, port); +} + +/******************************* PCI Routines *********************************/ +uint32_t +ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width) +{ + switch (width) { + case 1: + { + uint8_t retval; + + pci_read_config_byte(pci, reg, &retval); + return (retval); + } + case 2: + { + uint16_t retval; + pci_read_config_word(pci, reg, &retval); + return (retval); + } + case 4: + { + uint32_t retval; + pci_read_config_dword(pci, reg, &retval); + return (retval); + } + default: + panic("ahd_pci_read_config: Read size too big"); + /* NOTREACHED */ + return (0); + } +} + +void +ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width) +{ + switch (width) { + case 1: + pci_write_config_byte(pci, reg, value); + break; + case 2: + pci_write_config_word(pci, reg, value); + break; + case 4: + pci_write_config_dword(pci, reg, value); + break; + default: + panic("ahd_pci_write_config: Write size too big"); + /* NOTREACHED */ + } +} + +/****************************** Inlines ***************************************/ +static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*); + +static void +ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) +{ + struct scsi_cmnd *cmd; + + cmd = scb->io_ctx; + ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE); + scsi_dma_unmap(cmd); +} + +/******************************** Macros **************************************/ +#define BUILD_SCSIID(ahd, cmd) \ + (((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id) + +/* + * Return a string describing the driver. + */ +static const char * +ahd_linux_info(struct Scsi_Host *host) +{ + static char buffer[512]; + char ahd_info[256]; + char *bp; + struct ahd_softc *ahd; + + bp = &buffer[0]; + ahd = *(struct ahd_softc **)host->hostdata; + memset(bp, 0, sizeof(buffer)); + strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n" + " <"); + strcat(bp, ahd->description); + strcat(bp, ">\n" + " "); + ahd_controller_info(ahd, ahd_info); + strcat(bp, ahd_info); + + return (bp); +} + +/* + * Queue an SCB to the controller. + */ +static int ahd_linux_queue_lck(struct scsi_cmnd *cmd) +{ + struct ahd_softc *ahd; + struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); + int rtn = SCSI_MLQUEUE_HOST_BUSY; + + ahd = *(struct ahd_softc **)cmd->device->host->hostdata; + + cmd->result = CAM_REQ_INPROG << 16; + rtn = ahd_linux_run_command(ahd, dev, cmd); + + return rtn; +} + +static DEF_SCSI_QCMD(ahd_linux_queue) + +static struct scsi_target ** +ahd_linux_target_in_softc(struct scsi_target *starget) +{ + struct ahd_softc *ahd = + *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata); + unsigned int target_offset; + + target_offset = starget->id; + if (starget->channel != 0) + target_offset += 8; + + return &ahd->platform_data->starget[target_offset]; +} + +static int +ahd_linux_target_alloc(struct scsi_target *starget) +{ + struct ahd_softc *ahd = + *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata); + struct seeprom_config *sc = ahd->seep_config; + unsigned long flags; + struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget); + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + char channel = starget->channel + 'A'; + + ahd_lock(ahd, &flags); + + BUG_ON(*ahd_targp != NULL); + + *ahd_targp = starget; + + if (sc) { + int flags = sc->device_flags[starget->id]; + + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + starget->id, &tstate); + + if ((flags & CFPACKETIZED) == 0) { + /* don't negotiate packetized (IU) transfers */ + spi_max_iu(starget) = 0; + } else { + if ((ahd->features & AHD_RTI) == 0) + spi_rti(starget) = 0; + } + + if ((flags & CFQAS) == 0) + spi_max_qas(starget) = 0; + + /* Transinfo values have been set to BIOS settings */ + spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; + spi_min_period(starget) = tinfo->user.period; + spi_max_offset(starget) = tinfo->user.offset; + } + + tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, + starget->id, &tstate); + ahd_compile_devinfo(&devinfo, ahd->our_id, starget->id, + CAM_LUN_WILDCARD, channel, + ROLE_INITIATOR); + ahd_set_syncrate(ahd, &devinfo, 0, 0, 0, + AHD_TRANS_GOAL, /*paused*/FALSE); + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_GOAL, /*paused*/FALSE); + ahd_unlock(ahd, &flags); + + return 0; +} + +static void +ahd_linux_target_destroy(struct scsi_target *starget) +{ + struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget); + + *ahd_targp = NULL; +} + +static int +ahd_linux_slave_alloc(struct scsi_device *sdev) +{ + struct ahd_softc *ahd = + *((struct ahd_softc **)sdev->host->hostdata); + struct ahd_linux_device *dev; + + if (bootverbose) + printk("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id); + + dev = scsi_transport_device_data(sdev); + memset(dev, 0, sizeof(*dev)); + + /* + * We start out life using untagged + * transactions of which we allow one. + */ + dev->openings = 1; + + /* + * Set maxtags to 0. This will be changed if we + * later determine that we are dealing with + * a tagged queuing capable device. + */ + dev->maxtags = 0; + + return (0); +} + +static int +ahd_linux_slave_configure(struct scsi_device *sdev) +{ + if (bootverbose) + sdev_printk(KERN_INFO, sdev, "Slave Configure\n"); + + ahd_linux_device_queue_depth(sdev); + + /* Initial Domain Validation */ + if (!spi_initial_dv(sdev->sdev_target)) + spi_dv_device(sdev); + + return 0; +} + +#if defined(__i386__) +/* + * Return the disk geometry for the given SCSI device. + */ +static int +ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + int heads; + int sectors; + int cylinders; + int extended; + struct ahd_softc *ahd; + + ahd = *((struct ahd_softc **)sdev->host->hostdata); + + if (scsi_partsize(bdev, capacity, geom)) + return 0; + + heads = 64; + sectors = 32; + cylinders = aic_sector_div(capacity, heads, sectors); + + if (aic79xx_extended != 0) + extended = 1; + else + extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0; + if (extended && cylinders >= 1024) { + heads = 255; + sectors = 63; + cylinders = aic_sector_div(capacity, heads, sectors); + } + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + return (0); +} +#endif + +/* + * Abort the current SCSI command(s). + */ +static int +ahd_linux_abort(struct scsi_cmnd *cmd) +{ + return ahd_linux_queue_abort_cmd(cmd); +} + +/* + * Attempt to send a target reset message to the device that timed out. + */ +static int +ahd_linux_dev_reset(struct scsi_cmnd *cmd) +{ + struct ahd_softc *ahd; + struct ahd_linux_device *dev; + struct scb *reset_scb; + u_int cdb_byte; + int retval = SUCCESS; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(done); + + reset_scb = NULL; + + ahd = *(struct ahd_softc **)cmd->device->host->hostdata; + + scmd_printk(KERN_INFO, cmd, + "Attempting to queue a TARGET RESET message:"); + + printk("CDB:"); + for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) + printk(" 0x%x", cmd->cmnd[cdb_byte]); + printk("\n"); + + /* + * Determine if we currently own this command. + */ + dev = scsi_transport_device_data(cmd->device); + + if (dev == NULL) { + /* + * No target device for this command exists, + * so we must not still own the command. + */ + scmd_printk(KERN_INFO, cmd, "Is not an active device\n"); + return SUCCESS; + } + + /* + * Generate us a new SCB + */ + reset_scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX); + if (!reset_scb) { + scmd_printk(KERN_INFO, cmd, "No SCB available\n"); + return FAILED; + } + + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + cmd->device->id, &tstate); + reset_scb->io_ctx = cmd; + reset_scb->platform_data->dev = dev; + reset_scb->sg_count = 0; + ahd_set_residual(reset_scb, 0); + ahd_set_sense_residual(reset_scb, 0); + reset_scb->platform_data->xfer_len = 0; + reset_scb->hscb->control = 0; + reset_scb->hscb->scsiid = BUILD_SCSIID(ahd,cmd); + reset_scb->hscb->lun = cmd->device->lun; + reset_scb->hscb->cdb_len = 0; + reset_scb->hscb->task_management = SIU_TASKMGMT_LUN_RESET; + reset_scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE; + if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { + reset_scb->flags |= SCB_PACKETIZED; + } else { + reset_scb->hscb->control |= MK_MESSAGE; + } + dev->openings--; + dev->active++; + dev->commands_issued++; + + ahd_lock(ahd, &flags); + + LIST_INSERT_HEAD(&ahd->pending_scbs, reset_scb, pending_links); + ahd_queue_scb(ahd, reset_scb); + + ahd->platform_data->eh_done = &done; + ahd_unlock(ahd, &flags); + + printk("%s: Device reset code sleeping\n", ahd_name(ahd)); + if (!wait_for_completion_timeout(&done, 5 * HZ)) { + ahd_lock(ahd, &flags); + ahd->platform_data->eh_done = NULL; + ahd_unlock(ahd, &flags); + printk("%s: Device reset timer expired (active %d)\n", + ahd_name(ahd), dev->active); + retval = FAILED; + } + printk("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval); + + return (retval); +} + +/* + * Reset the SCSI bus. + */ +static int +ahd_linux_bus_reset(struct scsi_cmnd *cmd) +{ + struct ahd_softc *ahd; + int found; + unsigned long flags; + + ahd = *(struct ahd_softc **)cmd->device->host->hostdata; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) + printk("%s: Bus reset called for cmd %p\n", + ahd_name(ahd), cmd); +#endif + ahd_lock(ahd, &flags); + + found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A', + /*initiate reset*/TRUE); + ahd_unlock(ahd, &flags); + + if (bootverbose) + printk("%s: SCSI bus reset delivered. " + "%d SCBs aborted.\n", ahd_name(ahd), found); + + return (SUCCESS); +} + +struct scsi_host_template aic79xx_driver_template = { + .module = THIS_MODULE, + .name = "aic79xx", + .proc_name = "aic79xx", + .show_info = ahd_linux_show_info, + .write_info = ahd_proc_write_seeprom, + .info = ahd_linux_info, + .queuecommand = ahd_linux_queue, + .eh_abort_handler = ahd_linux_abort, + .eh_device_reset_handler = ahd_linux_dev_reset, + .eh_bus_reset_handler = ahd_linux_bus_reset, +#if defined(__i386__) + .bios_param = ahd_linux_biosparam, +#endif + .can_queue = AHD_MAX_QUEUE, + .this_id = -1, + .max_sectors = 8192, + .cmd_per_lun = 2, + .slave_alloc = ahd_linux_slave_alloc, + .slave_configure = ahd_linux_slave_configure, + .target_alloc = ahd_linux_target_alloc, + .target_destroy = ahd_linux_target_destroy, +}; + +/******************************** Bus DMA *************************************/ +int +ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent, + bus_size_t alignment, bus_size_t boundary, + dma_addr_t lowaddr, dma_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, + bus_size_t maxsize, int nsegments, + bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag) +{ + bus_dma_tag_t dmat; + + dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC); + if (dmat == NULL) + return (ENOMEM); + + /* + * Linux is very simplistic about DMA memory. For now don't + * maintain all specification information. Once Linux supplies + * better facilities for doing these operations, or the + * needs of this particular driver change, we might need to do + * more here. + */ + dmat->alignment = alignment; + dmat->boundary = boundary; + dmat->maxsize = maxsize; + *ret_tag = dmat; + return (0); +} + +void +ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat) +{ + kfree(dmat); +} + +int +ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr, + int flags, bus_dmamap_t *mapp) +{ + *vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp, + GFP_ATOMIC); + if (*vaddr == NULL) + return (ENOMEM); + return(0); +} + +void +ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat, + void* vaddr, bus_dmamap_t map) +{ + dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map); +} + +int +ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map, + void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, + void *cb_arg, int flags) +{ + /* + * Assume for now that this will only be used during + * initialization and not for per-transaction buffer mapping. + */ + bus_dma_segment_t stack_sg; + + stack_sg.ds_addr = map; + stack_sg.ds_len = dmat->maxsize; + cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); + return (0); +} + +void +ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) +{ +} + +int +ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) +{ + /* Nothing to do */ + return (0); +} + +/********************* Platform Dependent Functions ***************************/ +static void +ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value) +{ + + if ((instance >= 0) + && (instance < ARRAY_SIZE(aic79xx_iocell_info))) { + uint8_t *iocell_info; + + iocell_info = (uint8_t*)&aic79xx_iocell_info[instance]; + iocell_info[index] = value & 0xFFFF; + if (bootverbose) + printk("iocell[%d:%ld] = %d\n", instance, index, value); + } +} + +static void +ahd_linux_setup_tag_info_global(char *p) +{ + int tags, i, j; + + tags = simple_strtoul(p + 1, NULL, 0) & 0xff; + printk("Setting Global Tags= %d\n", tags); + + for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) { + for (j = 0; j < AHD_NUM_TARGETS; j++) { + aic79xx_tag_info[i].tag_commands[j] = tags; + } + } +} + +static void +ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value) +{ + + if ((instance >= 0) && (targ >= 0) + && (instance < ARRAY_SIZE(aic79xx_tag_info)) + && (targ < AHD_NUM_TARGETS)) { + aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF; + if (bootverbose) + printk("tag_info[%d:%d] = %d\n", instance, targ, value); + } +} + +static char * +ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth, + void (*callback)(u_long, int, int, int32_t), + u_long callback_arg) +{ + char *tok_end; + char *tok_end2; + int i; + int instance; + int targ; + int done; + char tok_list[] = {'.', ',', '{', '}', '\0'}; + + /* All options use a ':' name/arg separator */ + if (*opt_arg != ':') + return (opt_arg); + opt_arg++; + instance = -1; + targ = -1; + done = FALSE; + /* + * Restore separator that may be in + * the middle of our option argument. + */ + tok_end = strchr(opt_arg, '\0'); + if (tok_end < end) + *tok_end = ','; + while (!done) { + switch (*opt_arg) { + case '{': + if (instance == -1) { + instance = 0; + } else { + if (depth > 1) { + if (targ == -1) + targ = 0; + } else { + printk("Malformed Option %s\n", + opt_name); + done = TRUE; + } + } + opt_arg++; + break; + case '}': + if (targ != -1) + targ = -1; + else if (instance != -1) + instance = -1; + opt_arg++; + break; + case ',': + case '.': + if (instance == -1) + done = TRUE; + else if (targ >= 0) + targ++; + else if (instance >= 0) + instance++; + opt_arg++; + break; + case '\0': + done = TRUE; + break; + default: + tok_end = end; + for (i = 0; tok_list[i]; i++) { + tok_end2 = strchr(opt_arg, tok_list[i]); + if ((tok_end2) && (tok_end2 < tok_end)) + tok_end = tok_end2; + } + callback(callback_arg, instance, targ, + simple_strtol(opt_arg, NULL, 0)); + opt_arg = tok_end; + break; + } + } + return (opt_arg); +} + +/* + * Handle Linux boot parameters. This routine allows for assigning a value + * to a parameter with a ':' between the parameter and the value. + * ie. aic79xx=stpwlev:1,extended + */ +static int +aic79xx_setup(char *s) +{ + int i, n; + char *p; + char *end; + + static const struct { + const char *name; + uint32_t *flag; + } options[] = { + { "extended", &aic79xx_extended }, + { "no_reset", &aic79xx_no_reset }, + { "verbose", &aic79xx_verbose }, + { "allow_memio", &aic79xx_allow_memio}, +#ifdef AHD_DEBUG + { "debug", &ahd_debug }, +#endif + { "periodic_otag", &aic79xx_periodic_otag }, + { "pci_parity", &aic79xx_pci_parity }, + { "seltime", &aic79xx_seltime }, + { "tag_info", NULL }, + { "global_tag_depth", NULL}, + { "slewrate", NULL }, + { "precomp", NULL }, + { "amplitude", NULL }, + { "slowcrc", &aic79xx_slowcrc }, + }; + + end = strchr(s, '\0'); + + /* + * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE + * will never be 0 in this case. + */ + n = 0; + + while ((p = strsep(&s, ",.")) != NULL) { + if (*p == '\0') + continue; + for (i = 0; i < ARRAY_SIZE(options); i++) { + + n = strlen(options[i].name); + if (strncmp(options[i].name, p, n) == 0) + break; + } + if (i == ARRAY_SIZE(options)) + continue; + + if (strncmp(p, "global_tag_depth", n) == 0) { + ahd_linux_setup_tag_info_global(p + n); + } else if (strncmp(p, "tag_info", n) == 0) { + s = ahd_parse_brace_option("tag_info", p + n, end, + 2, ahd_linux_setup_tag_info, 0); + } else if (strncmp(p, "slewrate", n) == 0) { + s = ahd_parse_brace_option("slewrate", + p + n, end, 1, ahd_linux_setup_iocell_info, + AIC79XX_SLEWRATE_INDEX); + } else if (strncmp(p, "precomp", n) == 0) { + s = ahd_parse_brace_option("precomp", + p + n, end, 1, ahd_linux_setup_iocell_info, + AIC79XX_PRECOMP_INDEX); + } else if (strncmp(p, "amplitude", n) == 0) { + s = ahd_parse_brace_option("amplitude", + p + n, end, 1, ahd_linux_setup_iocell_info, + AIC79XX_AMPLITUDE_INDEX); + } else if (p[n] == ':') { + *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); + } else if (!strncmp(p, "verbose", n)) { + *(options[i].flag) = 1; + } else { + *(options[i].flag) ^= 0xFFFFFFFF; + } + } + return 1; +} + +__setup("aic79xx=", aic79xx_setup); + +uint32_t aic79xx_verbose; + +int +ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *template) +{ + char buf[80]; + struct Scsi_Host *host; + char *new_name; + u_long s; + int retval; + + template->name = ahd->description; + host = scsi_host_alloc(template, sizeof(struct ahd_softc *)); + if (host == NULL) + return (ENOMEM); + + *((struct ahd_softc **)host->hostdata) = ahd; + ahd->platform_data->host = host; + host->can_queue = AHD_MAX_QUEUE; + host->cmd_per_lun = 2; + host->sg_tablesize = AHD_NSEG; + host->this_id = ahd->our_id; + host->irq = ahd->platform_data->irq; + host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8; + host->max_lun = AHD_NUM_LUNS; + host->max_channel = 0; + host->sg_tablesize = AHD_NSEG; + ahd_lock(ahd, &s); + ahd_set_unit(ahd, ahd_linux_unit++); + ahd_unlock(ahd, &s); + sprintf(buf, "scsi%d", host->host_no); + new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); + if (new_name != NULL) { + strcpy(new_name, buf); + ahd_set_name(ahd, new_name); + } + host->unique_id = ahd->unit; + ahd_linux_initialize_scsi_bus(ahd); + ahd_intr_enable(ahd, TRUE); + + host->transportt = ahd_linux_transport_template; + + retval = scsi_add_host(host, &ahd->dev_softc->dev); + if (retval) { + printk(KERN_WARNING "aic79xx: scsi_add_host failed\n"); + scsi_host_put(host); + return retval; + } + + scsi_scan_host(host); + return 0; +} + +/* + * Place the SCSI bus into a known state by either resetting it, + * or forcing transfer negotiations on the next command to any + * target. + */ +static void +ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd) +{ + u_int target_id; + u_int numtarg; + unsigned long s; + + target_id = 0; + numtarg = 0; + + if (aic79xx_no_reset != 0) + ahd->flags &= ~AHD_RESET_BUS_A; + + if ((ahd->flags & AHD_RESET_BUS_A) != 0) + ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE); + else + numtarg = (ahd->features & AHD_WIDE) ? 16 : 8; + + ahd_lock(ahd, &s); + + /* + * Force negotiation to async for all targets that + * will not see an initial bus reset. + */ + for (; target_id < numtarg; target_id++) { + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + target_id, &tstate); + ahd_compile_devinfo(&devinfo, ahd->our_id, target_id, + CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); + ahd_update_neg_request(ahd, &devinfo, tstate, + tinfo, AHD_NEG_ALWAYS); + } + ahd_unlock(ahd, &s); + /* Give the bus some time to recover */ + if ((ahd->flags & AHD_RESET_BUS_A) != 0) { + ahd_freeze_simq(ahd); + msleep(AIC79XX_RESET_DELAY); + ahd_release_simq(ahd); + } +} + +int +ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) +{ + ahd->platform_data = + kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC); + if (ahd->platform_data == NULL) + return (ENOMEM); + ahd->platform_data->irq = AHD_LINUX_NOIRQ; + ahd_lockinit(ahd); + ahd->seltime = (aic79xx_seltime & 0x3) << 4; + return (0); +} + +void +ahd_platform_free(struct ahd_softc *ahd) +{ + struct scsi_target *starget; + int i; + + if (ahd->platform_data != NULL) { + /* destroy all of the device and target objects */ + for (i = 0; i < AHD_NUM_TARGETS; i++) { + starget = ahd->platform_data->starget[i]; + if (starget != NULL) { + ahd->platform_data->starget[i] = NULL; + } + } + + if (ahd->platform_data->irq != AHD_LINUX_NOIRQ) + free_irq(ahd->platform_data->irq, ahd); + if (ahd->tags[0] == BUS_SPACE_PIO + && ahd->bshs[0].ioport != 0) + release_region(ahd->bshs[0].ioport, 256); + if (ahd->tags[1] == BUS_SPACE_PIO + && ahd->bshs[1].ioport != 0) + release_region(ahd->bshs[1].ioport, 256); + if (ahd->tags[0] == BUS_SPACE_MEMIO + && ahd->bshs[0].maddr != NULL) { + iounmap(ahd->bshs[0].maddr); + release_mem_region(ahd->platform_data->mem_busaddr, + 0x1000); + } + if (ahd->platform_data->host) + scsi_host_put(ahd->platform_data->host); + + kfree(ahd->platform_data); + } +} + +void +ahd_platform_init(struct ahd_softc *ahd) +{ + /* + * Lookup and commit any modified IO Cell options. + */ + if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { + const struct ahd_linux_iocell_opts *iocell_opts; + + iocell_opts = &aic79xx_iocell_info[ahd->unit]; + if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP) + AHD_SET_PRECOMP(ahd, iocell_opts->precomp); + if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE) + AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate); + if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE) + AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude); + } + +} + +void +ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb) +{ + ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), SCB_LIST_NULL, + ROLE_UNKNOWN, CAM_REQUEUE_REQ); +} + +void +ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev, + struct ahd_devinfo *devinfo, ahd_queue_alg alg) +{ + struct ahd_linux_device *dev; + int was_queuing; + int now_queuing; + + if (sdev == NULL) + return; + + dev = scsi_transport_device_data(sdev); + + if (dev == NULL) + return; + was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED); + switch (alg) { + default: + case AHD_QUEUE_NONE: + now_queuing = 0; + break; + case AHD_QUEUE_BASIC: + now_queuing = AHD_DEV_Q_BASIC; + break; + case AHD_QUEUE_TAGGED: + now_queuing = AHD_DEV_Q_TAGGED; + break; + } + if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0 + && (was_queuing != now_queuing) + && (dev->active != 0)) { + dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY; + dev->qfrozen++; + } + + dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG); + if (now_queuing) { + u_int usertags; + + usertags = ahd_linux_user_tagdepth(ahd, devinfo); + if (!was_queuing) { + /* + * Start out aggressively and allow our + * dynamic queue depth algorithm to take + * care of the rest. + */ + dev->maxtags = usertags; + dev->openings = dev->maxtags - dev->active; + } + if (dev->maxtags == 0) { + /* + * Queueing is disabled by the user. + */ + dev->openings = 1; + } else if (alg == AHD_QUEUE_TAGGED) { + dev->flags |= AHD_DEV_Q_TAGGED; + if (aic79xx_periodic_otag != 0) + dev->flags |= AHD_DEV_PERIODIC_OTAG; + } else + dev->flags |= AHD_DEV_Q_BASIC; + } else { + /* We can only have one opening. */ + dev->maxtags = 0; + dev->openings = 1 - dev->active; + } + + switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) { + case AHD_DEV_Q_BASIC: + case AHD_DEV_Q_TAGGED: + scsi_change_queue_depth(sdev, + dev->openings + dev->active); + break; + default: + /* + * We allow the OS to queue 2 untagged transactions to + * us at any time even though we can only execute them + * serially on the controller/device. This should + * remove some latency. + */ + scsi_change_queue_depth(sdev, 1); + break; + } +} + +int +ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status) +{ + return 0; +} + +static u_int +ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + static int warned_user; + u_int tags; + + tags = 0; + if ((ahd->user_discenable & devinfo->target_mask) != 0) { + if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) { + + if (warned_user == 0) { + printk(KERN_WARNING +"aic79xx: WARNING: Insufficient tag_info instances\n" +"aic79xx: for installed controllers. Using defaults\n" +"aic79xx: Please update the aic79xx_tag_info array in\n" +"aic79xx: the aic79xx_osm.c source file.\n"); + warned_user++; + } + tags = AHD_MAX_QUEUE; + } else { + adapter_tag_info_t *tag_info; + + tag_info = &aic79xx_tag_info[ahd->unit]; + tags = tag_info->tag_commands[devinfo->target_offset]; + if (tags > AHD_MAX_QUEUE) + tags = AHD_MAX_QUEUE; + } + } + return (tags); +} + +/* + * Determines the queue depth for a given device. + */ +static void +ahd_linux_device_queue_depth(struct scsi_device *sdev) +{ + struct ahd_devinfo devinfo; + u_int tags; + struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata); + + ahd_compile_devinfo(&devinfo, + ahd->our_id, + sdev->sdev_target->id, sdev->lun, + sdev->sdev_target->channel == 0 ? 'A' : 'B', + ROLE_INITIATOR); + tags = ahd_linux_user_tagdepth(ahd, &devinfo); + if (tags != 0 && sdev->tagged_supported != 0) { + + ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_TAGGED); + ahd_send_async(ahd, devinfo.channel, devinfo.target, + devinfo.lun, AC_TRANSFER_NEG); + ahd_print_devinfo(ahd, &devinfo); + printk("Tagged Queuing enabled. Depth %d\n", tags); + } else { + ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE); + ahd_send_async(ahd, devinfo.channel, devinfo.target, + devinfo.lun, AC_TRANSFER_NEG); + } +} + +static int +ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, + struct scsi_cmnd *cmd) +{ + struct scb *scb; + struct hardware_scb *hscb; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int col_idx; + uint16_t mask; + unsigned long flags; + int nseg; + + nseg = scsi_dma_map(cmd); + if (nseg < 0) + return SCSI_MLQUEUE_HOST_BUSY; + + ahd_lock(ahd, &flags); + + /* + * Get an scb to use. + */ + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + cmd->device->id, &tstate); + if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0 + || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { + col_idx = AHD_NEVER_COL_IDX; + } else { + col_idx = AHD_BUILD_COL_IDX(cmd->device->id, + cmd->device->lun); + } + if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { + ahd->flags |= AHD_RESOURCE_SHORTAGE; + ahd_unlock(ahd, &flags); + scsi_dma_unmap(cmd); + return SCSI_MLQUEUE_HOST_BUSY; + } + + scb->io_ctx = cmd; + scb->platform_data->dev = dev; + hscb = scb->hscb; + cmd->host_scribble = (char *)scb; + + /* + * Fill out basics of the HSCB. + */ + hscb->control = 0; + hscb->scsiid = BUILD_SCSIID(ahd, cmd); + hscb->lun = cmd->device->lun; + scb->hscb->task_management = 0; + mask = SCB_GET_TARGET_MASK(ahd, scb); + + if ((ahd->user_discenable & mask) != 0) + hscb->control |= DISCENB; + + if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) + scb->flags |= SCB_PACKETIZED; + + if ((tstate->auto_negotiate & mask) != 0) { + scb->flags |= SCB_AUTO_NEGOTIATE; + scb->hscb->control |= MK_MESSAGE; + } + + if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) { + if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH + && (dev->flags & AHD_DEV_Q_TAGGED) != 0) { + hscb->control |= ORDERED_QUEUE_TAG; + dev->commands_since_idle_or_otag = 0; + } else { + hscb->control |= SIMPLE_QUEUE_TAG; + } + } + + hscb->cdb_len = cmd->cmd_len; + memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len); + + scb->platform_data->xfer_len = 0; + ahd_set_residual(scb, 0); + ahd_set_sense_residual(scb, 0); + scb->sg_count = 0; + + if (nseg > 0) { + void *sg = scb->sg_list; + struct scatterlist *cur_seg; + int i; + + scb->platform_data->xfer_len = 0; + + scsi_for_each_sg(cmd, cur_seg, nseg, i) { + dma_addr_t addr; + bus_size_t len; + + addr = sg_dma_address(cur_seg); + len = sg_dma_len(cur_seg); + scb->platform_data->xfer_len += len; + sg = ahd_sg_setup(ahd, scb, sg, addr, len, + i == (nseg - 1)); + } + } + + LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); + dev->openings--; + dev->active++; + dev->commands_issued++; + + if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0) + dev->commands_since_idle_or_otag++; + scb->flags |= SCB_ACTIVE; + ahd_queue_scb(ahd, scb); + + ahd_unlock(ahd, &flags); + + return 0; +} + +/* + * SCSI controller interrupt handler. + */ +irqreturn_t +ahd_linux_isr(int irq, void *dev_id) +{ + struct ahd_softc *ahd; + u_long flags; + int ours; + + ahd = (struct ahd_softc *) dev_id; + ahd_lock(ahd, &flags); + ours = ahd_intr(ahd); + ahd_unlock(ahd, &flags); + return IRQ_RETVAL(ours); +} + +void +ahd_send_async(struct ahd_softc *ahd, char channel, + u_int target, u_int lun, ac_code code) +{ + switch (code) { + case AC_TRANSFER_NEG: + { + struct scsi_target *starget; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + unsigned int target_ppr_options; + + BUG_ON(target == CAM_TARGET_WILDCARD); + + tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, + target, &tstate); + + /* + * Don't bother reporting results while + * negotiations are still pending. + */ + if (tinfo->curr.period != tinfo->goal.period + || tinfo->curr.width != tinfo->goal.width + || tinfo->curr.offset != tinfo->goal.offset + || tinfo->curr.ppr_options != tinfo->goal.ppr_options) + if (bootverbose == 0) + break; + + /* + * Don't bother reporting results that + * are identical to those last reported. + */ + starget = ahd->platform_data->starget[target]; + if (starget == NULL) + break; + + target_ppr_options = + (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) + + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0) + + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0) + + (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0) + + (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0) + + (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0) + + (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0) + + (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0); + + if (tinfo->curr.period == spi_period(starget) + && tinfo->curr.width == spi_width(starget) + && tinfo->curr.offset == spi_offset(starget) + && tinfo->curr.ppr_options == target_ppr_options) + if (bootverbose == 0) + break; + + spi_period(starget) = tinfo->curr.period; + spi_width(starget) = tinfo->curr.width; + spi_offset(starget) = tinfo->curr.offset; + spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; + spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; + spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; + spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0; + spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0; + spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0; + spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0; + spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0; + spi_display_xfer_agreement(starget); + break; + } + case AC_SENT_BDR: + { + WARN_ON(lun != CAM_LUN_WILDCARD); + scsi_report_device_reset(ahd->platform_data->host, + channel - 'A', target); + break; + } + case AC_BUS_RESET: + if (ahd->platform_data->host != NULL) { + scsi_report_bus_reset(ahd->platform_data->host, + channel - 'A'); + } + break; + default: + panic("ahd_send_async: Unexpected async event"); + } +} + +/* + * Calls the higher level scsi done function and frees the scb. + */ +void +ahd_done(struct ahd_softc *ahd, struct scb *scb) +{ + struct scsi_cmnd *cmd; + struct ahd_linux_device *dev; + + if ((scb->flags & SCB_ACTIVE) == 0) { + printk("SCB %d done'd twice\n", SCB_GET_TAG(scb)); + ahd_dump_card_state(ahd); + panic("Stopping for safety"); + } + LIST_REMOVE(scb, pending_links); + cmd = scb->io_ctx; + dev = scb->platform_data->dev; + dev->active--; + dev->openings++; + if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) { + cmd->result &= ~(CAM_DEV_QFRZN << 16); + dev->qfrozen--; + } + ahd_linux_unmap_scb(ahd, scb); + + /* + * Guard against stale sense data. + * The Linux mid-layer assumes that sense + * was retrieved anytime the first byte of + * the sense buffer looks "sane". + */ + cmd->sense_buffer[0] = 0; + if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) { +#ifdef AHD_REPORT_UNDERFLOWS + uint32_t amount_xferred; + + amount_xferred = + ahd_get_transfer_length(scb) - ahd_get_residual(scb); +#endif + if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) { + ahd_print_path(ahd, scb); + printk("Set CAM_UNCOR_PARITY\n"); + } +#endif + ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); +#ifdef AHD_REPORT_UNDERFLOWS + /* + * This code is disabled by default as some + * clients of the SCSI system do not properly + * initialize the underflow parameter. This + * results in spurious termination of commands + * that complete as expected (e.g. underflow is + * allowed as command can return variable amounts + * of data. + */ + } else if (amount_xferred < scb->io_ctx->underflow) { + u_int i; + + ahd_print_path(ahd, scb); + printk("CDB:"); + for (i = 0; i < scb->io_ctx->cmd_len; i++) + printk(" 0x%x", scb->io_ctx->cmnd[i]); + printk("\n"); + ahd_print_path(ahd, scb); + printk("Saw underflow (%ld of %ld bytes). " + "Treated as error\n", + ahd_get_residual(scb), + ahd_get_transfer_length(scb)); + ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); +#endif + } else { + ahd_set_transaction_status(scb, CAM_REQ_CMP); + } + } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { + ahd_linux_handle_scsi_status(ahd, cmd->device, scb); + } + + if (dev->openings == 1 + && ahd_get_transaction_status(scb) == CAM_REQ_CMP + && ahd_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL) + dev->tag_success_count++; + /* + * Some devices deal with temporary internal resource + * shortages by returning queue full. When the queue + * full occurrs, we throttle back. Slowly try to get + * back to our previous queue depth. + */ + if ((dev->openings + dev->active) < dev->maxtags + && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) { + dev->tag_success_count = 0; + dev->openings++; + } + + if (dev->active == 0) + dev->commands_since_idle_or_otag = 0; + + if ((scb->flags & SCB_RECOVERY_SCB) != 0) { + printk("Recovery SCB completes\n"); + if (ahd_get_transaction_status(scb) == CAM_BDR_SENT + || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) + ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); + + if (ahd->platform_data->eh_done) + complete(ahd->platform_data->eh_done); + } + + ahd_free_scb(ahd, scb); + ahd_linux_queue_cmd_complete(ahd, cmd); +} + +static void +ahd_linux_handle_scsi_status(struct ahd_softc *ahd, + struct scsi_device *sdev, struct scb *scb) +{ + struct ahd_devinfo devinfo; + struct ahd_linux_device *dev = scsi_transport_device_data(sdev); + + ahd_compile_devinfo(&devinfo, + ahd->our_id, + sdev->sdev_target->id, sdev->lun, + sdev->sdev_target->channel == 0 ? 'A' : 'B', + ROLE_INITIATOR); + + /* + * We don't currently trust the mid-layer to + * properly deal with queue full or busy. So, + * when one occurs, we tell the mid-layer to + * unconditionally requeue the command to us + * so that we can retry it ourselves. We also + * implement our own throttling mechanism so + * we don't clobber the device with too many + * commands. + */ + switch (ahd_get_scsi_status(scb)) { + default: + break; + case SAM_STAT_CHECK_CONDITION: + case SAM_STAT_COMMAND_TERMINATED: + { + struct scsi_cmnd *cmd; + + /* + * Copy sense information to the OS's cmd + * structure if it is available. + */ + cmd = scb->io_ctx; + if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) { + struct scsi_status_iu_header *siu; + u_int sense_size; + u_int sense_offset; + + if (scb->flags & SCB_SENSE) { + sense_size = min(sizeof(struct scsi_sense_data) + - ahd_get_sense_residual(scb), + (u_long)SCSI_SENSE_BUFFERSIZE); + sense_offset = 0; + } else { + /* + * Copy only the sense data into the provided + * buffer. + */ + siu = (struct scsi_status_iu_header *) + scb->sense_data; + sense_size = min_t(size_t, + scsi_4btoul(siu->sense_length), + SCSI_SENSE_BUFFERSIZE); + sense_offset = SIU_SENSE_OFFSET(siu); + } + + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + memcpy(cmd->sense_buffer, + ahd_get_sense_buf(ahd, scb) + + sense_offset, sense_size); + set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); + +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_SENSE) { + int i; + + printk("Copied %d bytes of sense data at %d:", + sense_size, sense_offset); + for (i = 0; i < sense_size; i++) { + if ((i & 0xF) == 0) + printk("\n"); + printk("0x%x ", cmd->sense_buffer[i]); + } + printk("\n"); + } +#endif + } + break; + } + case SAM_STAT_TASK_SET_FULL: + /* + * By the time the core driver has returned this + * command, all other commands that were queued + * to us but not the device have been returned. + * This ensures that dev->active is equal to + * the number of commands actually queued to + * the device. + */ + dev->tag_success_count = 0; + if (dev->active != 0) { + /* + * Drop our opening count to the number + * of commands currently outstanding. + */ + dev->openings = 0; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_QFULL) != 0) { + ahd_print_path(ahd, scb); + printk("Dropping tag count to %d\n", + dev->active); + } +#endif + if (dev->active == dev->tags_on_last_queuefull) { + + dev->last_queuefull_same_count++; + /* + * If we repeatedly see a queue full + * at the same queue depth, this + * device has a fixed number of tag + * slots. Lock in this tag depth + * so we stop seeing queue fulls from + * this device. + */ + if (dev->last_queuefull_same_count + == AHD_LOCK_TAGS_COUNT) { + dev->maxtags = dev->active; + ahd_print_path(ahd, scb); + printk("Locking max tag count at %d\n", + dev->active); + } + } else { + dev->tags_on_last_queuefull = dev->active; + dev->last_queuefull_same_count = 0; + } + ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); + ahd_set_scsi_status(scb, SAM_STAT_GOOD); + ahd_platform_set_tags(ahd, sdev, &devinfo, + (dev->flags & AHD_DEV_Q_BASIC) + ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); + break; + } + /* + * Drop down to a single opening, and treat this + * as if the target returned BUSY SCSI status. + */ + dev->openings = 1; + ahd_platform_set_tags(ahd, sdev, &devinfo, + (dev->flags & AHD_DEV_Q_BASIC) + ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); + ahd_set_scsi_status(scb, SAM_STAT_BUSY); + } +} + +static void +ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd) +{ + int status; + int new_status = DID_OK; + int do_fallback = 0; + int scsi_status; + struct scsi_sense_data *sense; + + /* + * Map CAM error codes into Linux Error codes. We + * avoid the conversion so that the DV code has the + * full error information available when making + * state change decisions. + */ + + status = ahd_cmd_get_transaction_status(cmd); + switch (status) { + case CAM_REQ_INPROG: + case CAM_REQ_CMP: + new_status = DID_OK; + break; + case CAM_AUTOSENSE_FAIL: + new_status = DID_ERROR; + fallthrough; + case CAM_SCSI_STATUS_ERROR: + scsi_status = ahd_cmd_get_scsi_status(cmd); + + switch(scsi_status) { + case SAM_STAT_COMMAND_TERMINATED: + case SAM_STAT_CHECK_CONDITION: + sense = (struct scsi_sense_data *) + cmd->sense_buffer; + if (sense->extra_len >= 5 && + (sense->add_sense_code == 0x47 + || sense->add_sense_code == 0x48)) + do_fallback = 1; + break; + default: + break; + } + break; + case CAM_REQ_ABORTED: + new_status = DID_ABORT; + break; + case CAM_BUSY: + new_status = DID_BUS_BUSY; + break; + case CAM_REQ_INVALID: + case CAM_PATH_INVALID: + new_status = DID_BAD_TARGET; + break; + case CAM_SEL_TIMEOUT: + new_status = DID_NO_CONNECT; + break; + case CAM_SCSI_BUS_RESET: + case CAM_BDR_SENT: + new_status = DID_RESET; + break; + case CAM_UNCOR_PARITY: + new_status = DID_PARITY; + do_fallback = 1; + break; + case CAM_CMD_TIMEOUT: + new_status = DID_TIME_OUT; + do_fallback = 1; + break; + case CAM_REQ_CMP_ERR: + case CAM_UNEXP_BUSFREE: + case CAM_DATA_RUN_ERR: + new_status = DID_ERROR; + do_fallback = 1; + break; + case CAM_UA_ABORT: + case CAM_NO_HBA: + case CAM_SEQUENCE_FAIL: + case CAM_CCB_LEN_ERR: + case CAM_PROVIDE_FAIL: + case CAM_REQ_TERMIO: + case CAM_UNREC_HBA_ERROR: + case CAM_REQ_TOO_BIG: + new_status = DID_ERROR; + break; + case CAM_REQUEUE_REQ: + new_status = DID_REQUEUE; + break; + default: + /* We should never get here */ + new_status = DID_ERROR; + break; + } + + if (do_fallback) { + printk("%s: device overrun (status %x) on %d:%d:%d\n", + ahd_name(ahd), status, cmd->device->channel, + cmd->device->id, (u8)cmd->device->lun); + } + + ahd_cmd_set_transaction_status(cmd, new_status); + + scsi_done(cmd); +} + +static void +ahd_freeze_simq(struct ahd_softc *ahd) +{ + scsi_block_requests(ahd->platform_data->host); +} + +static void +ahd_release_simq(struct ahd_softc *ahd) +{ + scsi_unblock_requests(ahd->platform_data->host); +} + +static int +ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd) +{ + struct ahd_softc *ahd; + struct ahd_linux_device *dev; + struct scb *pending_scb; + u_int saved_scbptr; + u_int active_scbptr; + u_int last_phase; + u_int cdb_byte; + int retval = SUCCESS; + int was_paused; + int paused; + int wait; + int disconnected; + ahd_mode_state saved_modes; + unsigned long flags; + + pending_scb = NULL; + paused = FALSE; + wait = FALSE; + ahd = *(struct ahd_softc **)cmd->device->host->hostdata; + + scmd_printk(KERN_INFO, cmd, + "Attempting to queue an ABORT message:"); + + printk("CDB:"); + for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) + printk(" 0x%x", cmd->cmnd[cdb_byte]); + printk("\n"); + + ahd_lock(ahd, &flags); + + /* + * First determine if we currently own this command. + * Start by searching the device queue. If not found + * there, check the pending_scb list. If not found + * at all, and the system wanted us to just abort the + * command, return success. + */ + dev = scsi_transport_device_data(cmd->device); + + if (dev == NULL) { + /* + * No target device for this command exists, + * so we must not still own the command. + */ + scmd_printk(KERN_INFO, cmd, "Is not an active device\n"); + goto done; + } + + /* + * See if we can find a matching cmd in the pending list. + */ + LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { + if (pending_scb->io_ctx == cmd) + break; + } + + if (pending_scb == NULL) { + scmd_printk(KERN_INFO, cmd, "Command not found\n"); + goto done; + } + + if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) { + /* + * We can't queue two recovery actions using the same SCB + */ + retval = FAILED; + goto done; + } + + /* + * Ensure that the card doesn't do anything + * behind our back. Also make sure that we + * didn't "just" miss an interrupt that would + * affect this cmd. + */ + was_paused = ahd_is_paused(ahd); + ahd_pause_and_flushwork(ahd); + paused = TRUE; + + if ((pending_scb->flags & SCB_ACTIVE) == 0) { + scmd_printk(KERN_INFO, cmd, "Command already completed\n"); + goto done; + } + + printk("%s: At time of recovery, card was %spaused\n", + ahd_name(ahd), was_paused ? "" : "not "); + ahd_dump_card_state(ahd); + + disconnected = TRUE; + if (ahd_search_qinfifo(ahd, cmd->device->id, + cmd->device->channel + 'A', + cmd->device->lun, + pending_scb->hscb->tag, + ROLE_INITIATOR, CAM_REQ_ABORTED, + SEARCH_COMPLETE) > 0) { + printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", + ahd_name(ahd), cmd->device->channel, + cmd->device->id, (u8)cmd->device->lun); + goto done; + } + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + last_phase = ahd_inb(ahd, LASTPHASE); + saved_scbptr = ahd_get_scbptr(ahd); + active_scbptr = saved_scbptr; + if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) { + struct scb *bus_scb; + + bus_scb = ahd_lookup_scb(ahd, active_scbptr); + if (bus_scb == pending_scb) + disconnected = FALSE; + } + + /* + * At this point, pending_scb is the scb associated with the + * passed in command. That command is currently active on the + * bus or is in the disconnected state. + */ + ahd_inb(ahd, SAVED_SCSIID); + if (last_phase != P_BUSFREE + && SCB_GET_TAG(pending_scb) == active_scbptr) { + + /* + * We're active on the bus, so assert ATN + * and hope that the target responds. + */ + pending_scb = ahd_lookup_scb(ahd, active_scbptr); + pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT; + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_outb(ahd, SCSISIGO, last_phase|ATNO); + scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n"); + wait = TRUE; + } else if (disconnected) { + + /* + * Actually re-queue this SCB in an attempt + * to select the device before it reconnects. + */ + pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT; + ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb)); + pending_scb->hscb->cdb_len = 0; + pending_scb->hscb->task_attribute = 0; + pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK; + + if ((pending_scb->flags & SCB_PACKETIZED) != 0) { + /* + * Mark the SCB has having an outstanding + * task management function. Should the command + * complete normally before the task management + * function can be sent, the host will be notified + * to abort our requeued SCB. + */ + ahd_outb(ahd, SCB_TASK_MANAGEMENT, + pending_scb->hscb->task_management); + } else { + /* + * If non-packetized, set the MK_MESSAGE control + * bit indicating that we desire to send a message. + * We also set the disconnected flag since there is + * no guarantee that our SCB control byte matches + * the version on the card. We don't want the + * sequencer to abort the command thinking an + * unsolicited reselection occurred. + */ + pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED; + + /* + * The sequencer will never re-reference the + * in-core SCB. To make sure we are notified + * during reselection, set the MK_MESSAGE flag in + * the card's copy of the SCB. + */ + ahd_outb(ahd, SCB_CONTROL, + ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE); + } + + /* + * Clear out any entries in the QINFIFO first + * so we are the next SCB for this target + * to run. + */ + ahd_search_qinfifo(ahd, cmd->device->id, + cmd->device->channel + 'A', cmd->device->lun, + SCB_LIST_NULL, ROLE_INITIATOR, + CAM_REQUEUE_REQ, SEARCH_COMPLETE); + ahd_qinfifo_requeue_tail(ahd, pending_scb); + ahd_set_scbptr(ahd, saved_scbptr); + ahd_print_path(ahd, pending_scb); + printk("Device is disconnected, re-queuing SCB\n"); + wait = TRUE; + } else { + scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n"); + retval = FAILED; + } + + + ahd_restore_modes(ahd, saved_modes); +done: + if (paused) + ahd_unpause(ahd); + if (wait) { + DECLARE_COMPLETION_ONSTACK(done); + + ahd->platform_data->eh_done = &done; + ahd_unlock(ahd, &flags); + + printk("%s: Recovery code sleeping\n", ahd_name(ahd)); + if (!wait_for_completion_timeout(&done, 5 * HZ)) { + ahd_lock(ahd, &flags); + ahd->platform_data->eh_done = NULL; + ahd_unlock(ahd, &flags); + printk("%s: Timer Expired (active %d)\n", + ahd_name(ahd), dev->active); + retval = FAILED; + } + printk("Recovery code awake\n"); + } else + ahd_unlock(ahd, &flags); + + if (retval != SUCCESS) + printk("%s: Command abort returning 0x%x\n", + ahd_name(ahd), retval); + + return retval; +} + +static void ahd_linux_set_width(struct scsi_target *starget, int width) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_devinfo devinfo; + unsigned long flags; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_lock(ahd, &flags); + ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_period(struct scsi_target *starget, int period) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options; + unsigned int dt; + unsigned long flags; + unsigned long offset = tinfo->goal.offset; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: set period to %d\n", ahd_name(ahd), period); +#endif + if (offset == 0) + offset = MAX_OFFSET; + + if (period < 8) + period = 8; + if (period < 10) { + if (spi_max_width(starget)) { + ppr_options |= MSG_EXT_PPR_DT_REQ; + if (period == 8) + ppr_options |= MSG_EXT_PPR_IU_REQ; + } else + period = 10; + } + + dt = ppr_options & MSG_EXT_PPR_DT_REQ; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + + /* all PPR requests apart from QAS require wide transfers */ + if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) { + if (spi_width(starget) == 0) + ppr_options &= MSG_EXT_PPR_QAS_REQ; + } + + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_offset(struct scsi_target *starget, int offset) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = 0; + unsigned int period = 0; + unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; + unsigned long flags; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: set offset to %d\n", ahd_name(ahd), offset); +#endif + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + if (offset != 0) { + period = tinfo->goal.period; + ppr_options = tinfo->goal.ppr_options; + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + } + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options, + AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_dt(struct scsi_target *starget, int dt) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_DT_REQ; + unsigned int period = tinfo->goal.period; + unsigned int width = tinfo->goal.width; + unsigned long flags; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: %s DT\n", ahd_name(ahd), + dt ? "enabling" : "disabling"); +#endif + if (dt && spi_max_width(starget)) { + ppr_options |= MSG_EXT_PPR_DT_REQ; + if (!width) + ahd_linux_set_width(starget, 1); + } else { + if (period <= 9) + period = 10; /* If resetting DT, period must be >= 25ns */ + /* IU is invalid without DT set */ + ppr_options &= ~MSG_EXT_PPR_IU_REQ; + } + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_qas(struct scsi_target *starget, int qas) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_QAS_REQ; + unsigned int period = tinfo->goal.period; + unsigned int dt; + unsigned long flags; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: %s QAS\n", ahd_name(ahd), + qas ? "enabling" : "disabling"); +#endif + + if (qas) { + ppr_options |= MSG_EXT_PPR_QAS_REQ; + } + + dt = ppr_options & MSG_EXT_PPR_DT_REQ; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_iu(struct scsi_target *starget, int iu) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_IU_REQ; + unsigned int period = tinfo->goal.period; + unsigned int dt; + unsigned long flags; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: %s IU\n", ahd_name(ahd), + iu ? "enabling" : "disabling"); +#endif + + if (iu && spi_max_width(starget)) { + ppr_options |= MSG_EXT_PPR_IU_REQ; + ppr_options |= MSG_EXT_PPR_DT_REQ; /* IU requires DT */ + } + + dt = ppr_options & MSG_EXT_PPR_DT_REQ; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_RD_STRM; + unsigned int period = tinfo->goal.period; + unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; + unsigned long flags; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: %s Read Streaming\n", ahd_name(ahd), + rdstrm ? "enabling" : "disabling"); +#endif + + if (rdstrm && spi_max_width(starget)) + ppr_options |= MSG_EXT_PPR_RD_STRM; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_WR_FLOW; + unsigned int period = tinfo->goal.period; + unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; + unsigned long flags; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: %s Write Flow Control\n", ahd_name(ahd), + wrflow ? "enabling" : "disabling"); +#endif + + if (wrflow && spi_max_width(starget)) + ppr_options |= MSG_EXT_PPR_WR_FLOW; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_rti(struct scsi_target *starget, int rti) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_RTI; + unsigned int period = tinfo->goal.period; + unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; + unsigned long flags; + + if ((ahd->features & AHD_RTI) == 0) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: RTI not available\n", ahd_name(ahd)); +#endif + return; + } + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: %s RTI\n", ahd_name(ahd), + rti ? "enabling" : "disabling"); +#endif + + if (rti && spi_max_width(starget)) + ppr_options |= MSG_EXT_PPR_RTI; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_PCOMP_EN; + unsigned int period = tinfo->goal.period; + unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; + unsigned long flags; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_DV) != 0) + printk("%s: %s Precompensation\n", ahd_name(ahd), + pcomp ? "Enable" : "Disable"); +#endif + + if (pcomp && spi_max_width(starget)) { + uint8_t precomp; + + if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { + const struct ahd_linux_iocell_opts *iocell_opts; + + iocell_opts = &aic79xx_iocell_info[ahd->unit]; + precomp = iocell_opts->precomp; + } else { + precomp = AIC79XX_DEFAULT_PRECOMP; + } + ppr_options |= MSG_EXT_PPR_PCOMP_EN; + AHD_SET_PRECOMP(ahd, precomp); + } else { + AHD_SET_PRECOMP(ahd, 0); + } + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_set_hold_mcs(struct scsi_target *starget, int hold) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); + struct ahd_tmode_tstate *tstate; + struct ahd_initiator_tinfo *tinfo + = ahd_fetch_transinfo(ahd, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahd_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_HOLD_MCS; + unsigned int period = tinfo->goal.period; + unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; + unsigned long flags; + + if (hold && spi_max_width(starget)) + ppr_options |= MSG_EXT_PPR_HOLD_MCS; + + ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahd_find_syncrate(ahd, &period, &ppr_options, + dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); + + ahd_lock(ahd, &flags); + ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, + ppr_options, AHD_TRANS_GOAL, FALSE); + ahd_unlock(ahd, &flags); +} + +static void ahd_linux_get_signalling(struct Scsi_Host *shost) +{ + struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; + unsigned long flags; + u8 mode; + + ahd_lock(ahd, &flags); + ahd_pause(ahd); + mode = ahd_inb(ahd, SBLKCTL); + ahd_unpause(ahd); + ahd_unlock(ahd, &flags); + + if (mode & ENAB40) + spi_signalling(shost) = SPI_SIGNAL_LVD; + else if (mode & ENAB20) + spi_signalling(shost) = SPI_SIGNAL_SE; + else + spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; +} + +static struct spi_function_template ahd_linux_transport_functions = { + .set_offset = ahd_linux_set_offset, + .show_offset = 1, + .set_period = ahd_linux_set_period, + .show_period = 1, + .set_width = ahd_linux_set_width, + .show_width = 1, + .set_dt = ahd_linux_set_dt, + .show_dt = 1, + .set_iu = ahd_linux_set_iu, + .show_iu = 1, + .set_qas = ahd_linux_set_qas, + .show_qas = 1, + .set_rd_strm = ahd_linux_set_rd_strm, + .show_rd_strm = 1, + .set_wr_flow = ahd_linux_set_wr_flow, + .show_wr_flow = 1, + .set_rti = ahd_linux_set_rti, + .show_rti = 1, + .set_pcomp_en = ahd_linux_set_pcomp_en, + .show_pcomp_en = 1, + .set_hold_mcs = ahd_linux_set_hold_mcs, + .show_hold_mcs = 1, + .get_signalling = ahd_linux_get_signalling, +}; + +static int __init +ahd_linux_init(void) +{ + int error = 0; + + /* + * If we've been passed any parameters, process them now. + */ + if (aic79xx) + aic79xx_setup(aic79xx); + + ahd_linux_transport_template = + spi_attach_transport(&ahd_linux_transport_functions); + if (!ahd_linux_transport_template) + return -ENODEV; + + scsi_transport_reserve_device(ahd_linux_transport_template, + sizeof(struct ahd_linux_device)); + + error = ahd_linux_pci_init(); + if (error) + spi_release_transport(ahd_linux_transport_template); + return error; +} + +static void __exit +ahd_linux_exit(void) +{ + ahd_linux_pci_exit(); + spi_release_transport(ahd_linux_transport_template); +} + +module_init(ahd_linux_init); +module_exit(ahd_linux_exit); diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h new file mode 100644 index 000000000..793fe1999 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_osm.h @@ -0,0 +1,658 @@ +/* + * Adaptec AIC79xx device driver for Linux. + * + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#166 $ + * + */ +#ifndef _AIC79XX_LINUX_H_ +#define _AIC79XX_LINUX_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Core SCSI definitions */ +#define AIC_LIB_PREFIX ahd + +#include "cam.h" +#include "queue.h" +#include "scsi_message.h" +#include "scsi_iu.h" +#include "aiclib.h" + +/*********************************** Debugging ********************************/ +#ifdef CONFIG_AIC79XX_DEBUG_ENABLE +#ifdef CONFIG_AIC79XX_DEBUG_MASK +#define AHD_DEBUG 1 +#define AHD_DEBUG_OPTS CONFIG_AIC79XX_DEBUG_MASK +#else +/* + * Compile in debugging code, but do not enable any printfs. + */ +#define AHD_DEBUG 1 +#define AHD_DEBUG_OPTS 0 +#endif +/* No debugging code. */ +#endif + +/********************************** Misc Macros *******************************/ +#define powerof2(x) ((((x)-1)&(x))==0) + +/************************* Forward Declarations *******************************/ +struct ahd_softc; +typedef struct pci_dev *ahd_dev_softc_t; +typedef struct scsi_cmnd *ahd_io_ctx_t; + +/******************************* Byte Order ***********************************/ +#define ahd_htobe16(x) cpu_to_be16(x) +#define ahd_htobe32(x) cpu_to_be32(x) +#define ahd_htobe64(x) cpu_to_be64(x) +#define ahd_htole16(x) cpu_to_le16(x) +#define ahd_htole32(x) cpu_to_le32(x) +#define ahd_htole64(x) cpu_to_le64(x) + +#define ahd_be16toh(x) be16_to_cpu(x) +#define ahd_be32toh(x) be32_to_cpu(x) +#define ahd_be64toh(x) be64_to_cpu(x) +#define ahd_le16toh(x) le16_to_cpu(x) +#define ahd_le32toh(x) le32_to_cpu(x) +#define ahd_le64toh(x) le64_to_cpu(x) + +/************************* Configuration Data *********************************/ +extern uint32_t aic79xx_allow_memio; +extern struct scsi_host_template aic79xx_driver_template; + +/***************************** Bus Space/DMA **********************************/ + +typedef uint32_t bus_size_t; + +typedef enum { + BUS_SPACE_MEMIO, + BUS_SPACE_PIO +} bus_space_tag_t; + +typedef union { + u_long ioport; + volatile uint8_t __iomem *maddr; +} bus_space_handle_t; + +typedef struct bus_dma_segment +{ + dma_addr_t ds_addr; + bus_size_t ds_len; +} bus_dma_segment_t; + +struct ahd_linux_dma_tag +{ + bus_size_t alignment; + bus_size_t boundary; + bus_size_t maxsize; +}; +typedef struct ahd_linux_dma_tag* bus_dma_tag_t; + +typedef dma_addr_t bus_dmamap_t; + +typedef int bus_dma_filter_t(void*, dma_addr_t); +typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); + +#define BUS_DMA_WAITOK 0x0 +#define BUS_DMA_NOWAIT 0x1 +#define BUS_DMA_ALLOCNOW 0x2 +#define BUS_DMA_LOAD_SEGS 0x4 /* + * Argument is an S/G list not + * a single buffer. + */ + +#define BUS_SPACE_MAXADDR 0xFFFFFFFF +#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF +#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF + +int ahd_dma_tag_create(struct ahd_softc *, bus_dma_tag_t /*parent*/, + bus_size_t /*alignment*/, bus_size_t /*boundary*/, + dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/, + bus_dma_filter_t*/*filter*/, void */*filterarg*/, + bus_size_t /*maxsize*/, int /*nsegments*/, + bus_size_t /*maxsegsz*/, int /*flags*/, + bus_dma_tag_t */*dma_tagp*/); + +void ahd_dma_tag_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/); + +int ahd_dmamem_alloc(struct ahd_softc *, bus_dma_tag_t /*dmat*/, + void** /*vaddr*/, int /*flags*/, + bus_dmamap_t* /*mapp*/); + +void ahd_dmamem_free(struct ahd_softc *, bus_dma_tag_t /*dmat*/, + void* /*vaddr*/, bus_dmamap_t /*map*/); + +void ahd_dmamap_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/, + bus_dmamap_t /*map*/); + +int ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t /*dmat*/, + bus_dmamap_t /*map*/, void * /*buf*/, + bus_size_t /*buflen*/, bus_dmamap_callback_t *, + void */*callback_arg*/, int /*flags*/); + +int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t); + +/* + * Operations performed by ahd_dmamap_sync(). + */ +#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */ +#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */ +#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */ +#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */ + +/* + * XXX + * ahd_dmamap_sync is only used on buffers allocated with + * the dma_alloc_coherent() API. Although I'm not sure how + * this works on architectures with a write buffer, Linux does + * not have an API to sync "coherent" memory. Perhaps we need + * to do an mb()? + */ +#define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op) + +/********************************** Includes **********************************/ +#ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT +#define AIC_DEBUG_REGISTERS 1 +#else +#define AIC_DEBUG_REGISTERS 0 +#endif +#include "aic79xx.h" + +/***************************** SMP support ************************************/ +#include + +#define AIC79XX_DRIVER_VERSION "3.0" + +/*************************** Device Data Structures ***************************/ +/* + * A per probed device structure used to deal with some error recovery + * scenarios that the Linux mid-layer code just doesn't know how to + * handle. The structure allocated for a device only becomes persistent + * after a successfully completed inquiry command to the target when + * that inquiry data indicates a lun is present. + */ + +typedef enum { + AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */ + AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */ + AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */ + AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ +} ahd_linux_dev_flags; + +struct ahd_linux_device { + TAILQ_ENTRY(ahd_linux_device) links; + + /* + * The number of transactions currently + * queued to the device. + */ + int active; + + /* + * The currently allowed number of + * transactions that can be queued to + * the device. Must be signed for + * conversion from tagged to untagged + * mode where the device may have more + * than one outstanding active transaction. + */ + int openings; + + /* + * A positive count indicates that this + * device's queue is halted. + */ + u_int qfrozen; + + /* + * Cumulative command counter. + */ + u_long commands_issued; + + /* + * The number of tagged transactions when + * running at our current opening level + * that have been successfully received by + * this device since the last QUEUE FULL. + */ + u_int tag_success_count; +#define AHD_TAG_SUCCESS_INTERVAL 50 + + ahd_linux_dev_flags flags; + + /* + * Per device timer. + */ + struct timer_list timer; + + /* + * The high limit for the tags variable. + */ + u_int maxtags; + + /* + * The computed number of tags outstanding + * at the time of the last QUEUE FULL event. + */ + u_int tags_on_last_queuefull; + + /* + * How many times we have seen a queue full + * with the same number of tags. This is used + * to stop our adaptive queue depth algorithm + * on devices with a fixed number of tags. + */ + u_int last_queuefull_same_count; +#define AHD_LOCK_TAGS_COUNT 50 + + /* + * How many transactions have been queued + * without the device going idle. We use + * this statistic to determine when to issue + * an ordered tag to prevent transaction + * starvation. This statistic is only updated + * if the AHD_DEV_PERIODIC_OTAG flag is set + * on this device. + */ + u_int commands_since_idle_or_otag; +#define AHD_OTAG_THRESH 500 +}; + +/********************* Definitions Required by the Core ***********************/ +/* + * Number of SG segments we require. So long as the S/G segments for + * a particular transaction are allocated in a physically contiguous + * manner and are allocated below 4GB, the number of S/G segments is + * unrestricted. + */ +#define AHD_NSEG 128 + +/* + * Per-SCB OSM storage. + */ +struct scb_platform_data { + struct ahd_linux_device *dev; + dma_addr_t buf_busaddr; + uint32_t xfer_len; + uint32_t sense_resid; /* Auto-Sense residual */ +}; + +/* + * Define a structure used for each host adapter. All members are + * aligned on a boundary >= the size of the member to honor the + * alignment restrictions of the various platforms supported by + * this driver. + */ +struct ahd_platform_data { + /* + * Fields accessed from interrupt context. + */ + struct scsi_target *starget[AHD_NUM_TARGETS]; + + spinlock_t spin_lock; + struct completion *eh_done; + struct Scsi_Host *host; /* pointer to scsi host */ +#define AHD_LINUX_NOIRQ ((uint32_t)~0) + uint32_t irq; /* IRQ for this adapter */ + uint32_t bios_address; + resource_size_t mem_busaddr; /* Mem Base Addr */ +}; + +void ahd_delay(long); + +/***************************** Low Level I/O **********************************/ +uint8_t ahd_inb(struct ahd_softc * ahd, long port); +void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val); +void ahd_outw_atomic(struct ahd_softc * ahd, + long port, uint16_t val); +void ahd_outsb(struct ahd_softc * ahd, long port, + uint8_t *, int count); +void ahd_insb(struct ahd_softc * ahd, long port, + uint8_t *, int count); + +/**************************** Initialization **********************************/ +int ahd_linux_register_host(struct ahd_softc *, + struct scsi_host_template *); + +/******************************** Locking *************************************/ +static inline void +ahd_lockinit(struct ahd_softc *ahd) +{ + spin_lock_init(&ahd->platform_data->spin_lock); +} + +static inline void +ahd_lock(struct ahd_softc *ahd, unsigned long *flags) +{ + spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags); +} + +static inline void +ahd_unlock(struct ahd_softc *ahd, unsigned long *flags) +{ + spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags); +} + +/******************************* PCI Definitions ******************************/ +/* + * PCIM_xxx: mask to locate subfield in register + * PCIR_xxx: config register offset + * PCIC_xxx: device class + * PCIS_xxx: device subclass + * PCIP_xxx: device programming interface + * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices) + * PCID_xxx: device ID + */ +#define PCIR_DEVVENDOR 0x00 +#define PCIR_VENDOR 0x00 +#define PCIR_DEVICE 0x02 +#define PCIR_COMMAND 0x04 +#define PCIM_CMD_PORTEN 0x0001 +#define PCIM_CMD_MEMEN 0x0002 +#define PCIM_CMD_BUSMASTEREN 0x0004 +#define PCIM_CMD_MWRICEN 0x0010 +#define PCIM_CMD_PERRESPEN 0x0040 +#define PCIM_CMD_SERRESPEN 0x0100 +#define PCIR_STATUS 0x06 +#define PCIR_REVID 0x08 +#define PCIR_PROGIF 0x09 +#define PCIR_SUBCLASS 0x0a +#define PCIR_CLASS 0x0b +#define PCIR_CACHELNSZ 0x0c +#define PCIR_LATTIMER 0x0d +#define PCIR_HEADERTYPE 0x0e +#define PCIM_MFDEV 0x80 +#define PCIR_BIST 0x0f +#define PCIR_CAP_PTR 0x34 + +/* config registers for header type 0 devices */ +#define PCIR_MAPS 0x10 + +/****************************** PCI-X definitions *****************************/ +#define PCIXR_COMMAND 0x96 +#define PCIXR_DEVADDR 0x98 +#define PCIXM_DEVADDR_FNUM 0x0003 /* Function Number */ +#define PCIXM_DEVADDR_DNUM 0x00F8 /* Device Number */ +#define PCIXM_DEVADDR_BNUM 0xFF00 /* Bus Number */ +#define PCIXR_STATUS 0x9A +#define PCIXM_STATUS_64BIT 0x0001 /* Active 64bit connection to device. */ +#define PCIXM_STATUS_133CAP 0x0002 /* Device is 133MHz capable */ +#define PCIXM_STATUS_SCDISC 0x0004 /* Split Completion Discarded */ +#define PCIXM_STATUS_UNEXPSC 0x0008 /* Unexpected Split Completion */ +#define PCIXM_STATUS_CMPLEXDEV 0x0010 /* Device Complexity (set == bridge) */ +#define PCIXM_STATUS_MAXMRDBC 0x0060 /* Maximum Burst Read Count */ +#define PCIXM_STATUS_MAXSPLITS 0x0380 /* Maximum Split Transactions */ +#define PCIXM_STATUS_MAXCRDS 0x1C00 /* Maximum Cumulative Read Size */ +#define PCIXM_STATUS_RCVDSCEM 0x2000 /* Received a Split Comp w/Error msg */ + +typedef enum +{ + AHD_POWER_STATE_D0, + AHD_POWER_STATE_D1, + AHD_POWER_STATE_D2, + AHD_POWER_STATE_D3 +} ahd_power_state; + +void ahd_power_state_change(struct ahd_softc *ahd, + ahd_power_state new_state); + +/******************************* PCI Routines *********************************/ +int ahd_linux_pci_init(void); +void ahd_linux_pci_exit(void); +int ahd_pci_map_registers(struct ahd_softc *ahd); +int ahd_pci_map_int(struct ahd_softc *ahd); + +uint32_t ahd_pci_read_config(ahd_dev_softc_t pci, + int reg, int width); +void ahd_pci_write_config(ahd_dev_softc_t pci, + int reg, uint32_t value, + int width); + +static inline int ahd_get_pci_function(ahd_dev_softc_t); +static inline int +ahd_get_pci_function(ahd_dev_softc_t pci) +{ + return (PCI_FUNC(pci->devfn)); +} + +static inline int ahd_get_pci_slot(ahd_dev_softc_t); +static inline int +ahd_get_pci_slot(ahd_dev_softc_t pci) +{ + return (PCI_SLOT(pci->devfn)); +} + +static inline int ahd_get_pci_bus(ahd_dev_softc_t); +static inline int +ahd_get_pci_bus(ahd_dev_softc_t pci) +{ + return (pci->bus->number); +} + +static inline void ahd_flush_device_writes(struct ahd_softc *); +static inline void +ahd_flush_device_writes(struct ahd_softc *ahd) +{ + /* XXX Is this sufficient for all architectures??? */ + ahd_inb(ahd, INTSTAT); +} + +/**************************** Proc FS Support *********************************/ +int ahd_proc_write_seeprom(struct Scsi_Host *, char *, int); +int ahd_linux_show_info(struct seq_file *,struct Scsi_Host *); + +/*********************** Transaction Access Wrappers **************************/ + +static inline +void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status) +{ + cmd->result &= ~(CAM_STATUS_MASK << 16); + cmd->result |= status << 16; +} + +static inline +void ahd_set_transaction_status(struct scb *scb, uint32_t status) +{ + ahd_cmd_set_transaction_status(scb->io_ctx,status); +} + +static inline +void ahd_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status) +{ + cmd->result &= ~0xFFFF; + cmd->result |= status; +} + +static inline +void ahd_set_scsi_status(struct scb *scb, uint32_t status) +{ + ahd_cmd_set_scsi_status(scb->io_ctx, status); +} + +static inline +uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd) +{ + return ((cmd->result >> 16) & CAM_STATUS_MASK); +} + +static inline +uint32_t ahd_get_transaction_status(struct scb *scb) +{ + return (ahd_cmd_get_transaction_status(scb->io_ctx)); +} + +static inline +uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd) +{ + return (cmd->result & 0xFFFF); +} + +static inline +uint32_t ahd_get_scsi_status(struct scb *scb) +{ + return (ahd_cmd_get_scsi_status(scb->io_ctx)); +} + +static inline +void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type) +{ + /* + * Nothing to do for linux as the incoming transaction + * has no concept of tag/non tagged, etc. + */ +} + +static inline +u_long ahd_get_transfer_length(struct scb *scb) +{ + return (scb->platform_data->xfer_len); +} + +static inline +int ahd_get_transfer_dir(struct scb *scb) +{ + return (scb->io_ctx->sc_data_direction); +} + +static inline +void ahd_set_residual(struct scb *scb, u_long resid) +{ + scsi_set_resid(scb->io_ctx, resid); +} + +static inline +void ahd_set_sense_residual(struct scb *scb, u_long resid) +{ + scb->platform_data->sense_resid = resid; +} + +static inline +u_long ahd_get_residual(struct scb *scb) +{ + return scsi_get_resid(scb->io_ctx); +} + +static inline +u_long ahd_get_sense_residual(struct scb *scb) +{ + return (scb->platform_data->sense_resid); +} + +static inline +int ahd_perform_autosense(struct scb *scb) +{ + /* + * We always perform autosense in Linux. + * On other platforms this is set on a + * per-transaction basis. + */ + return (1); +} + +static inline uint32_t +ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb) +{ + return (sizeof(struct scsi_sense_data)); +} + +static inline void +ahd_notify_xfer_settings_change(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo) +{ + /* Nothing to do here for linux */ +} + +static inline void +ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb) +{ + ahd->flags &= ~AHD_RESOURCE_SHORTAGE; +} + +int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg); +void ahd_platform_free(struct ahd_softc *ahd); +void ahd_platform_init(struct ahd_softc *ahd); +void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb); + +static inline void +ahd_freeze_scb(struct scb *scb) +{ + if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) { + scb->io_ctx->result |= CAM_DEV_QFRZN << 16; + scb->platform_data->dev->qfrozen++; + } +} + +void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev, + struct ahd_devinfo *devinfo, ahd_queue_alg); +int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status); +irqreturn_t + ahd_linux_isr(int irq, void *dev_id); +void ahd_done(struct ahd_softc*, struct scb*); +void ahd_send_async(struct ahd_softc *, char channel, + u_int target, u_int lun, ac_code); +void ahd_print_path(struct ahd_softc *, struct scb *); + +#ifdef CONFIG_PCI +#define AHD_PCI_CONFIG 1 +#else +#define AHD_PCI_CONFIG 0 +#endif +#define bootverbose aic79xx_verbose +extern uint32_t aic79xx_verbose; + +#endif /* _AIC79XX_LINUX_H_ */ diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c new file mode 100644 index 000000000..b92e2e3c3 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c @@ -0,0 +1,378 @@ +/* + * Linux driver attachment glue for PCI based U320 controllers. + * + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm_pci.c#25 $ + */ + +#include "aic79xx_osm.h" +#include "aic79xx_inline.h" +#include "aic79xx_pci.h" + +/* Define the macro locally since it's different for different class of chips. + */ +#define ID(x) \ + ID2C(x), \ + ID2C(IDIROC(x)) + +static const struct pci_device_id ahd_linux_pci_id_table[] = { + /* aic7901 based controllers */ + ID(ID_AHA_29320A), + ID(ID_AHA_29320ALP), + ID(ID_AHA_29320LPE), + /* aic7902 based controllers */ + ID(ID_AHA_29320), + ID(ID_AHA_29320B), + ID(ID_AHA_29320LP), + ID(ID_AHA_39320), + ID(ID_AHA_39320_B), + ID(ID_AHA_39320A), + ID(ID_AHA_39320D), + ID(ID_AHA_39320D_HP), + ID(ID_AHA_39320D_B), + ID(ID_AHA_39320D_B_HP), + /* Generic chip probes for devices we don't know exactly. */ + ID16(ID_AIC7901 & ID_9005_GENERIC_MASK), + ID(ID_AIC7901A & ID_DEV_VENDOR_MASK), + ID16(ID_AIC7902 & ID_9005_GENERIC_MASK), + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table); + +static int __maybe_unused +ahd_linux_pci_dev_suspend(struct device *dev) +{ + struct ahd_softc *ahd = dev_get_drvdata(dev); + int rc; + + if ((rc = ahd_suspend(ahd))) + return rc; + + ahd_pci_suspend(ahd); + + return rc; +} + +static int __maybe_unused +ahd_linux_pci_dev_resume(struct device *dev) +{ + struct ahd_softc *ahd = dev_get_drvdata(dev); + + ahd_pci_resume(ahd); + + ahd_resume(ahd); + + return 0; +} + +static void +ahd_linux_pci_dev_remove(struct pci_dev *pdev) +{ + struct ahd_softc *ahd = pci_get_drvdata(pdev); + u_long s; + + if (ahd->platform_data && ahd->platform_data->host) + scsi_remove_host(ahd->platform_data->host); + + ahd_lock(ahd, &s); + ahd_intr_enable(ahd, FALSE); + ahd_unlock(ahd, &s); + ahd_free(ahd); +} + +static void +ahd_linux_pci_inherit_flags(struct ahd_softc *ahd) +{ + struct pci_dev *pdev = ahd->dev_softc, *master_pdev; + unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + + master_pdev = pci_get_slot(pdev->bus, master_devfn); + if (master_pdev) { + struct ahd_softc *master = pci_get_drvdata(master_pdev); + if (master) { + ahd->flags &= ~AHD_BIOS_ENABLED; + ahd->flags |= master->flags & AHD_BIOS_ENABLED; + } else + printk(KERN_ERR "aic79xx: no multichannel peer found!\n"); + pci_dev_put(master_pdev); + } +} + +static int +ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + char buf[80]; + struct ahd_softc *ahd; + ahd_dev_softc_t pci; + const struct ahd_pci_identity *entry; + char *name; + int error; + struct device *dev = &pdev->dev; + + pci = pdev; + entry = ahd_find_pci_device(pci); + if (entry == NULL) + return (-ENODEV); + + /* + * Allocate a softc for this card and + * set it up for attachment by our + * common detect routine. + */ + sprintf(buf, "ahd_pci:%d:%d:%d", + ahd_get_pci_bus(pci), + ahd_get_pci_slot(pci), + ahd_get_pci_function(pci)); + name = kstrdup(buf, GFP_ATOMIC); + if (name == NULL) + return (-ENOMEM); + ahd = ahd_alloc(NULL, name); + if (ahd == NULL) + return (-ENOMEM); + if (pci_enable_device(pdev)) { + ahd_free(ahd); + return (-ENODEV); + } + pci_set_master(pdev); + + if (sizeof(dma_addr_t) > 4) { + const u64 required_mask = dma_get_required_mask(dev); + + if (required_mask > DMA_BIT_MASK(39) && + dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) + ahd->flags |= AHD_64BIT_ADDRESSING; + else if (required_mask > DMA_BIT_MASK(32) && + dma_set_mask(dev, DMA_BIT_MASK(39)) == 0) + ahd->flags |= AHD_39BIT_ADDRESSING; + else + dma_set_mask(dev, DMA_BIT_MASK(32)); + } else { + dma_set_mask(dev, DMA_BIT_MASK(32)); + } + ahd->dev_softc = pci; + error = ahd_pci_config(ahd, entry); + if (error != 0) { + ahd_free(ahd); + return (-error); + } + + /* + * Second Function PCI devices need to inherit some + * * settings from function 0. + */ + if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0) + ahd_linux_pci_inherit_flags(ahd); + + pci_set_drvdata(pdev, ahd); + + ahd_linux_register_host(ahd, &aic79xx_driver_template); + return (0); +} + +static SIMPLE_DEV_PM_OPS(ahd_linux_pci_dev_pm_ops, + ahd_linux_pci_dev_suspend, + ahd_linux_pci_dev_resume); + +static struct pci_driver aic79xx_pci_driver = { + .name = "aic79xx", + .probe = ahd_linux_pci_dev_probe, + .driver.pm = &ahd_linux_pci_dev_pm_ops, + .remove = ahd_linux_pci_dev_remove, + .id_table = ahd_linux_pci_id_table +}; + +int +ahd_linux_pci_init(void) +{ + return pci_register_driver(&aic79xx_pci_driver); +} + +void +ahd_linux_pci_exit(void) +{ + pci_unregister_driver(&aic79xx_pci_driver); +} + +static int +ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base, + resource_size_t *base2) +{ + *base = pci_resource_start(ahd->dev_softc, 0); + /* + * This is really the 3rd bar and should be at index 2, + * but the Linux PCI code doesn't know how to "count" 64bit + * bars. + */ + *base2 = pci_resource_start(ahd->dev_softc, 3); + if (*base == 0 || *base2 == 0) + return (ENOMEM); + if (!request_region(*base, 256, "aic79xx")) + return (ENOMEM); + if (!request_region(*base2, 256, "aic79xx")) { + release_region(*base, 256); + return (ENOMEM); + } + return (0); +} + +static int +ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, + resource_size_t *bus_addr, + uint8_t __iomem **maddr) +{ + resource_size_t start; + resource_size_t base_page; + u_long base_offset; + int error = 0; + + if (aic79xx_allow_memio == 0) + return (ENOMEM); + + if ((ahd->bugs & AHD_PCIX_MMAPIO_BUG) != 0) + return (ENOMEM); + + start = pci_resource_start(ahd->dev_softc, 1); + base_page = start & PAGE_MASK; + base_offset = start - base_page; + if (start != 0) { + *bus_addr = start; + if (!request_mem_region(start, 0x1000, "aic79xx")) + error = ENOMEM; + if (!error) { + *maddr = ioremap(base_page, base_offset + 512); + if (*maddr == NULL) { + error = ENOMEM; + release_mem_region(start, 0x1000); + } else + *maddr += base_offset; + } + } else + error = ENOMEM; + return (error); +} + +int +ahd_pci_map_registers(struct ahd_softc *ahd) +{ + uint32_t command; + resource_size_t base; + uint8_t __iomem *maddr; + int error; + + /* + * If its allowed, we prefer memory mapped access. + */ + command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, 4); + command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN); + base = 0; + maddr = NULL; + error = ahd_linux_pci_reserve_mem_region(ahd, &base, &maddr); + if (error == 0) { + ahd->platform_data->mem_busaddr = base; + ahd->tags[0] = BUS_SPACE_MEMIO; + ahd->bshs[0].maddr = maddr; + ahd->tags[1] = BUS_SPACE_MEMIO; + ahd->bshs[1].maddr = maddr + 0x100; + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + command | PCIM_CMD_MEMEN, 4); + + if (ahd_pci_test_register_access(ahd) != 0) { + + printk("aic79xx: PCI Device %d:%d:%d " + "failed memory mapped test. Using PIO.\n", + ahd_get_pci_bus(ahd->dev_softc), + ahd_get_pci_slot(ahd->dev_softc), + ahd_get_pci_function(ahd->dev_softc)); + iounmap(maddr); + release_mem_region(ahd->platform_data->mem_busaddr, + 0x1000); + ahd->bshs[0].maddr = NULL; + maddr = NULL; + } else + command |= PCIM_CMD_MEMEN; + } else if (bootverbose) { + printk("aic79xx: PCI%d:%d:%d MEM region 0x%llx " + "unavailable. Cannot memory map device.\n", + ahd_get_pci_bus(ahd->dev_softc), + ahd_get_pci_slot(ahd->dev_softc), + ahd_get_pci_function(ahd->dev_softc), + (unsigned long long)base); + } + + if (maddr == NULL) { + resource_size_t base2; + + error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2); + if (error == 0) { + ahd->tags[0] = BUS_SPACE_PIO; + ahd->tags[1] = BUS_SPACE_PIO; + ahd->bshs[0].ioport = (u_long)base; + ahd->bshs[1].ioport = (u_long)base2; + command |= PCIM_CMD_PORTEN; + } else { + printk("aic79xx: PCI%d:%d:%d IO regions 0x%llx and " + "0x%llx unavailable. Cannot map device.\n", + ahd_get_pci_bus(ahd->dev_softc), + ahd_get_pci_slot(ahd->dev_softc), + ahd_get_pci_function(ahd->dev_softc), + (unsigned long long)base, + (unsigned long long)base2); + } + } + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4); + return (error); +} + +int +ahd_pci_map_int(struct ahd_softc *ahd) +{ + int error; + + error = request_irq(ahd->dev_softc->irq, ahd_linux_isr, + IRQF_SHARED, "aic79xx", ahd); + if (!error) + ahd->platform_data->irq = ahd->dev_softc->irq; + + return (-error); +} + +void +ahd_power_state_change(struct ahd_softc *ahd, ahd_power_state new_state) +{ + pci_set_power_state(ahd->dev_softc, new_state); +} diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c new file mode 100644 index 000000000..5fad41b1a --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c @@ -0,0 +1,1004 @@ +/* + * Product specific probe and attach routines for: + * aic7901 and aic7902 SCSI controllers + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $ + */ + +#include "aic79xx_osm.h" +#include "aic79xx_inline.h" +#include "aic79xx_pci.h" + +static inline uint64_t +ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) +{ + uint64_t id; + + id = subvendor + | (subdevice << 16) + | ((uint64_t)vendor << 32) + | ((uint64_t)device << 48); + + return (id); +} + +#define ID_AIC7902_PCI_REV_A4 0x3 +#define ID_AIC7902_PCI_REV_B0 0x10 +#define SUBID_HP 0x0E11 + +#define DEVID_9005_HOSTRAID(id) ((id) & 0x80) + +#define DEVID_9005_TYPE(id) ((id) & 0xF) +#define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ +#define DEVID_9005_TYPE_HBA_2EXT 0x1 /* 2 External Ports */ +#define DEVID_9005_TYPE_IROC 0x8 /* Raid(0,1,10) Card */ +#define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ + +#define DEVID_9005_MFUNC(id) ((id) & 0x10) + +#define DEVID_9005_PACKETIZED(id) ((id) & 0x8000) + +#define SUBID_9005_TYPE(id) ((id) & 0xF) +#define SUBID_9005_TYPE_HBA 0x0 /* Standard Card */ +#define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ + +#define SUBID_9005_AUTOTERM(id) (((id) & 0x10) == 0) + +#define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20) + +#define SUBID_9005_SEEPTYPE(id) (((id) & 0x0C0) >> 6) +#define SUBID_9005_SEEPTYPE_NONE 0x0 +#define SUBID_9005_SEEPTYPE_4K 0x1 + +static ahd_device_setup_t ahd_aic7901_setup; +static ahd_device_setup_t ahd_aic7901A_setup; +static ahd_device_setup_t ahd_aic7902_setup; +static ahd_device_setup_t ahd_aic790X_setup; + +static const struct ahd_pci_identity ahd_pci_ident_table[] = +{ + /* aic7901 based controllers */ + { + ID_AHA_29320A, + ID_ALL_MASK, + "Adaptec 29320A Ultra320 SCSI adapter", + ahd_aic7901_setup + }, + { + ID_AHA_29320ALP, + ID_ALL_MASK, + "Adaptec 29320ALP PCIx Ultra320 SCSI adapter", + ahd_aic7901_setup + }, + { + ID_AHA_29320LPE, + ID_ALL_MASK, + "Adaptec 29320LPE PCIe Ultra320 SCSI adapter", + ahd_aic7901_setup + }, + /* aic7901A based controllers */ + { + ID_AHA_29320LP, + ID_ALL_MASK, + "Adaptec 29320LP Ultra320 SCSI adapter", + ahd_aic7901A_setup + }, + /* aic7902 based controllers */ + { + ID_AHA_29320, + ID_ALL_MASK, + "Adaptec 29320 Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_29320B, + ID_ALL_MASK, + "Adaptec 29320B Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320, + ID_ALL_MASK, + "Adaptec 39320 Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320_B, + ID_ALL_MASK, + "Adaptec 39320 Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320_B_DELL, + ID_ALL_MASK, + "Adaptec (Dell OEM) 39320 Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320A, + ID_ALL_MASK, + "Adaptec 39320A Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320D, + ID_ALL_MASK, + "Adaptec 39320D Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320D_HP, + ID_ALL_MASK, + "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320D_B, + ID_ALL_MASK, + "Adaptec 39320D Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320D_B_HP, + ID_ALL_MASK, + "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + /* Generic chip probes for devices we don't know 'exactly' */ + { + ID_AIC7901 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec AIC7901 Ultra320 SCSI adapter", + ahd_aic7901_setup + }, + { + ID_AIC7901A & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec AIC7901A Ultra320 SCSI adapter", + ahd_aic7901A_setup + }, + { + ID_AIC7902 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec AIC7902 Ultra320 SCSI adapter", + ahd_aic7902_setup + } +}; + +static const u_int ahd_num_pci_devs = ARRAY_SIZE(ahd_pci_ident_table); + +#define DEVCONFIG 0x40 +#define PCIXINITPAT 0x0000E000ul +#define PCIXINIT_PCI33_66 0x0000E000ul +#define PCIXINIT_PCIX50_66 0x0000C000ul +#define PCIXINIT_PCIX66_100 0x0000A000ul +#define PCIXINIT_PCIX100_133 0x00008000ul +#define PCI_BUS_MODES_INDEX(devconfig) \ + (((devconfig) & PCIXINITPAT) >> 13) +static const char *pci_bus_modes[] = +{ + "PCI bus mode unknown", + "PCI bus mode unknown", + "PCI bus mode unknown", + "PCI bus mode unknown", + "PCI-X 101-133MHz", + "PCI-X 67-100MHz", + "PCI-X 50-66MHz", + "PCI 33 or 66MHz" +}; + +#define TESTMODE 0x00000800ul +#define IRDY_RST 0x00000200ul +#define FRAME_RST 0x00000100ul +#define PCI64BIT 0x00000080ul +#define MRDCEN 0x00000040ul +#define ENDIANSEL 0x00000020ul +#define MIXQWENDIANEN 0x00000008ul +#define DACEN 0x00000004ul +#define STPWLEVEL 0x00000002ul +#define QWENDIANSEL 0x00000001ul + +#define DEVCONFIG1 0x44 +#define PREQDIS 0x01 + +#define CSIZE_LATTIME 0x0c +#define CACHESIZE 0x000000fful +#define LATTIME 0x0000ff00ul + +static int ahd_check_extport(struct ahd_softc *ahd); +static void ahd_configure_termination(struct ahd_softc *ahd, + u_int adapter_control); +static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat); +static void ahd_pci_intr(struct ahd_softc *ahd); + +const struct ahd_pci_identity * +ahd_find_pci_device(ahd_dev_softc_t pci) +{ + uint64_t full_id; + uint16_t device; + uint16_t vendor; + uint16_t subdevice; + uint16_t subvendor; + const struct ahd_pci_identity *entry; + u_int i; + + vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); + device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); + subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); + subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); + full_id = ahd_compose_id(device, + vendor, + subdevice, + subvendor); + + /* + * Controllers, mask out the IROC/HostRAID bit + */ + + full_id &= ID_ALL_IROC_MASK; + + for (i = 0; i < ahd_num_pci_devs; i++) { + entry = &ahd_pci_ident_table[i]; + if (entry->full_id == (full_id & entry->id_mask)) { + /* Honor exclusion entries. */ + if (entry->name == NULL) + return (NULL); + return (entry); + } + } + return (NULL); +} + +int +ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry) +{ + u_int command; + uint32_t devconfig; + uint16_t subvendor; + int error; + + ahd->description = entry->name; + /* + * Record if this is an HP board. + */ + subvendor = ahd_pci_read_config(ahd->dev_softc, + PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); + if (subvendor == SUBID_HP) + ahd->flags |= AHD_HP_BOARD; + + error = entry->setup(ahd); + if (error != 0) + return (error); + + devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); + if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) { + ahd->chip |= AHD_PCI; + /* Disable PCIX workarounds when running in PCI mode. */ + ahd->bugs &= ~AHD_PCIX_BUG_MASK; + } else { + ahd->chip |= AHD_PCIX; + } + ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)]; + + ahd_power_state_change(ahd, AHD_POWER_STATE_D0); + + error = ahd_pci_map_registers(ahd); + if (error != 0) + return (error); + + /* + * If we need to support high memory, enable dual + * address cycles. This bit must be set to enable + * high address bit generation even if we are on a + * 64bit bus (PCI64BIT set in devconfig). + */ + if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { + if (bootverbose) + printk("%s: Enabling 39Bit Addressing\n", + ahd_name(ahd)); + devconfig = ahd_pci_read_config(ahd->dev_softc, + DEVCONFIG, /*bytes*/4); + devconfig |= DACEN; + ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, + devconfig, /*bytes*/4); + } + + /* Ensure busmastering is enabled */ + command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); + command |= PCIM_CMD_BUSMASTEREN; + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/2); + + error = ahd_softc_init(ahd); + if (error != 0) + return (error); + + ahd->bus_intr = ahd_pci_intr; + + error = ahd_reset(ahd, /*reinit*/FALSE); + if (error != 0) + return (ENXIO); + + ahd->pci_cachesize = + ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, + /*bytes*/1) & CACHESIZE; + ahd->pci_cachesize *= 4; + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + /* See if we have a SEEPROM and perform auto-term */ + error = ahd_check_extport(ahd); + if (error != 0) + return (error); + + /* Core initialization */ + error = ahd_init(ahd); + if (error != 0) + return (error); + ahd->init_level++; + + /* + * Allow interrupts now that we are completely setup. + */ + return ahd_pci_map_int(ahd); +} + +void __maybe_unused +ahd_pci_suspend(struct ahd_softc *ahd) +{ + /* + * Save chip register configuration data for chip resets + * that occur during runtime and resume events. + */ + ahd->suspend_state.pci_state.devconfig = + ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); + ahd->suspend_state.pci_state.command = + ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1); + ahd->suspend_state.pci_state.csize_lattime = + ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, /*bytes*/1); + +} + +void __maybe_unused +ahd_pci_resume(struct ahd_softc *ahd) +{ + ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, + ahd->suspend_state.pci_state.devconfig, /*bytes*/4); + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + ahd->suspend_state.pci_state.command, /*bytes*/1); + ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME, + ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1); +} + +/* + * Perform some simple tests that should catch situations where + * our registers are invalidly mapped. + */ +int +ahd_pci_test_register_access(struct ahd_softc *ahd) +{ + uint32_t cmd; + u_int targpcistat; + u_int pci_status1; + int error; + uint8_t hcntrl; + + error = EIO; + + /* + * Enable PCI error interrupt status, but suppress NMIs + * generated by SERR raised due to target aborts. + */ + cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); + + /* + * First a simple test to see if any + * registers can be read. Reading + * HCNTRL has no side effects and has + * at least one bit that is guaranteed to + * be zero so it is a good register to + * use for this test. + */ + hcntrl = ahd_inb(ahd, HCNTRL); + if (hcntrl == 0xFF) + goto fail; + + /* + * Next create a situation where write combining + * or read prefetching could be initiated by the + * CPU or host bridge. Our device does not support + * either, so look for data corruption and/or flaged + * PCI errors. First pause without causing another + * chip reset. + */ + hcntrl &= ~CHIPRST; + ahd_outb(ahd, HCNTRL, hcntrl|PAUSE); + while (ahd_is_paused(ahd) == 0) + ; + + /* Clear any PCI errors that occurred before our driver attached. */ + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + targpcistat = ahd_inb(ahd, TARGPCISTAT); + ahd_outb(ahd, TARGPCISTAT, targpcistat); + pci_status1 = ahd_pci_read_config(ahd->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + pci_status1, /*bytes*/1); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, CLRINT, CLRPCIINT); + + ahd_outb(ahd, SEQCTL0, PERRORDIS); + ahd_outl(ahd, SRAM_BASE, 0x5aa555aa); + if (ahd_inl(ahd, SRAM_BASE) != 0x5aa555aa) + goto fail; + + if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + targpcistat = ahd_inb(ahd, TARGPCISTAT); + if ((targpcistat & STA) != 0) + goto fail; + } + + error = 0; + +fail: + if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { + + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + targpcistat = ahd_inb(ahd, TARGPCISTAT); + + /* Silently clear any latched errors. */ + ahd_outb(ahd, TARGPCISTAT, targpcistat); + pci_status1 = ahd_pci_read_config(ahd->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + pci_status1, /*bytes*/1); + ahd_outb(ahd, CLRINT, CLRPCIINT); + } + ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS); + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); + return (error); +} + +/* + * Check the external port logic for a serial eeprom + * and termination/cable detection contrls. + */ +static int +ahd_check_extport(struct ahd_softc *ahd) +{ + struct vpd_config vpd; + struct seeprom_config *sc; + u_int adapter_control; + int have_seeprom; + int error; + + sc = ahd->seep_config; + have_seeprom = ahd_acquire_seeprom(ahd); + if (have_seeprom) { + u_int start_addr; + + /* + * Fetch VPD for this function and parse it. + */ + if (bootverbose) + printk("%s: Reading VPD from SEEPROM...", + ahd_name(ahd)); + + /* Address is always in units of 16bit words */ + start_addr = ((2 * sizeof(*sc)) + + (sizeof(vpd) * (ahd->channel - 'A'))) / 2; + + error = ahd_read_seeprom(ahd, (uint16_t *)&vpd, + start_addr, sizeof(vpd)/2, + /*bytestream*/TRUE); + if (error == 0) + error = ahd_parse_vpddata(ahd, &vpd); + if (bootverbose) + printk("%s: VPD parsing %s\n", + ahd_name(ahd), + error == 0 ? "successful" : "failed"); + + if (bootverbose) + printk("%s: Reading SEEPROM...", ahd_name(ahd)); + + /* Address is always in units of 16bit words */ + start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A'); + + error = ahd_read_seeprom(ahd, (uint16_t *)sc, + start_addr, sizeof(*sc)/2, + /*bytestream*/FALSE); + + if (error != 0) { + printk("Unable to read SEEPROM\n"); + have_seeprom = 0; + } else { + have_seeprom = ahd_verify_cksum(sc); + + if (bootverbose) { + if (have_seeprom == 0) + printk ("checksum error\n"); + else + printk ("done.\n"); + } + } + ahd_release_seeprom(ahd); + } + + if (!have_seeprom) { + u_int nvram_scb; + + /* + * Pull scratch ram settings and treat them as + * if they are the contents of an seeprom if + * the 'ADPT', 'BIOS', or 'ASPI' signature is found + * in SCB 0xFF. We manually compose the data as 16bit + * values to avoid endian issues. + */ + ahd_set_scbptr(ahd, 0xFF); + nvram_scb = ahd_inb_scbram(ahd, SCB_BASE + NVRAM_SCB_OFFSET); + if (nvram_scb != 0xFF + && ((ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' + && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'D' + && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' + && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'T') + || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'B' + && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'I' + && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'O' + && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'S') + || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' + && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'S' + && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' + && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'I'))) { + uint16_t *sc_data; + int i; + + ahd_set_scbptr(ahd, nvram_scb); + sc_data = (uint16_t *)sc; + for (i = 0; i < 64; i += 2) + *sc_data++ = ahd_inw_scbram(ahd, SCB_BASE+i); + have_seeprom = ahd_verify_cksum(sc); + if (have_seeprom) + ahd->flags |= AHD_SCB_CONFIG_USED; + } + } + +#ifdef AHD_DEBUG + if (have_seeprom != 0 + && (ahd_debug & AHD_DUMP_SEEPROM) != 0) { + uint16_t *sc_data; + int i; + + printk("%s: Seeprom Contents:", ahd_name(ahd)); + sc_data = (uint16_t *)sc; + for (i = 0; i < (sizeof(*sc)); i += 2) + printk("\n\t0x%.4x", sc_data[i]); + printk("\n"); + } +#endif + + if (!have_seeprom) { + if (bootverbose) + printk("%s: No SEEPROM available.\n", ahd_name(ahd)); + ahd->flags |= AHD_USEDEFAULTS; + error = ahd_default_config(ahd); + adapter_control = CFAUTOTERM|CFSEAUTOTERM; + kfree(ahd->seep_config); + ahd->seep_config = NULL; + } else { + error = ahd_parse_cfgdata(ahd, sc); + adapter_control = sc->adapter_control; + } + if (error != 0) + return (error); + + ahd_configure_termination(ahd, adapter_control); + + return (0); +} + +static void +ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control) +{ + int error; + u_int sxfrctl1; + uint8_t termctl; + uint32_t devconfig; + + devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); + devconfig &= ~STPWLEVEL; + if ((ahd->flags & AHD_STPWLEVEL_A) != 0) + devconfig |= STPWLEVEL; + if (bootverbose) + printk("%s: STPWLEVEL is %s\n", + ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off"); + ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); + + /* Make sure current sensing is off. */ + if ((ahd->flags & AHD_CURRENT_SENSING) != 0) { + (void)ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); + } + + /* + * Read to sense. Write to set. + */ + error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl); + if ((adapter_control & CFAUTOTERM) == 0) { + if (bootverbose) + printk("%s: Manual Primary Termination\n", + ahd_name(ahd)); + termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH); + if ((adapter_control & CFSTERM) != 0) + termctl |= FLX_TERMCTL_ENPRILOW; + if ((adapter_control & CFWSTERM) != 0) + termctl |= FLX_TERMCTL_ENPRIHIGH; + } else if (error != 0) { + printk("%s: Primary Auto-Term Sensing failed! " + "Using Defaults.\n", ahd_name(ahd)); + termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH; + } + + if ((adapter_control & CFSEAUTOTERM) == 0) { + if (bootverbose) + printk("%s: Manual Secondary Termination\n", + ahd_name(ahd)); + termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH); + if ((adapter_control & CFSELOWTERM) != 0) + termctl |= FLX_TERMCTL_ENSECLOW; + if ((adapter_control & CFSEHIGHTERM) != 0) + termctl |= FLX_TERMCTL_ENSECHIGH; + } else if (error != 0) { + printk("%s: Secondary Auto-Term Sensing failed! " + "Using Defaults.\n", ahd_name(ahd)); + termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH; + } + + /* + * Now set the termination based on what we found. + */ + sxfrctl1 = ahd_inb(ahd, SXFRCTL1) & ~STPWEN; + ahd->flags &= ~AHD_TERM_ENB_A; + if ((termctl & FLX_TERMCTL_ENPRILOW) != 0) { + ahd->flags |= AHD_TERM_ENB_A; + sxfrctl1 |= STPWEN; + } + /* Must set the latch once in order to be effective. */ + ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); + ahd_outb(ahd, SXFRCTL1, sxfrctl1); + + error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl); + if (error != 0) { + printk("%s: Unable to set termination settings!\n", + ahd_name(ahd)); + } else if (bootverbose) { + printk("%s: Primary High byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis"); + + printk("%s: Primary Low byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis"); + + printk("%s: Secondary High byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis"); + + printk("%s: Secondary Low byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis"); + } + return; +} + +#define DPE 0x80 +#define SSE 0x40 +#define RMA 0x20 +#define RTA 0x10 +#define STA 0x08 +#define DPR 0x01 + +static const char *split_status_source[] = +{ + "DFF0", + "DFF1", + "OVLY", + "CMC", +}; + +static const char *pci_status_source[] = +{ + "DFF0", + "DFF1", + "SG", + "CMC", + "OVLY", + "NONE", + "MSI", + "TARG" +}; + +static const char *split_status_strings[] = +{ + "%s: Received split response in %s.\n", + "%s: Received split completion error message in %s\n", + "%s: Receive overrun in %s\n", + "%s: Count not complete in %s\n", + "%s: Split completion data bucket in %s\n", + "%s: Split completion address error in %s\n", + "%s: Split completion byte count error in %s\n", + "%s: Signaled Target-abort to early terminate a split in %s\n" +}; + +static const char *pci_status_strings[] = +{ + "%s: Data Parity Error has been reported via PERR# in %s\n", + "%s: Target initial wait state error in %s\n", + "%s: Split completion read data parity error in %s\n", + "%s: Split completion address attribute parity error in %s\n", + "%s: Received a Target Abort in %s\n", + "%s: Received a Master Abort in %s\n", + "%s: Signal System Error Detected in %s\n", + "%s: Address or Write Phase Parity Error Detected in %s.\n" +}; + +static void +ahd_pci_intr(struct ahd_softc *ahd) +{ + uint8_t pci_status[8]; + ahd_mode_state saved_modes; + u_int pci_status1; + u_int intstat; + u_int i; + u_int reg; + + intstat = ahd_inb(ahd, INTSTAT); + + if ((intstat & SPLTINT) != 0) + ahd_pci_split_intr(ahd, intstat); + + if ((intstat & PCIINT) == 0) + return; + + printk("%s: PCI error Interrupt\n", ahd_name(ahd)); + saved_modes = ahd_save_modes(ahd); + ahd_dump_card_state(ahd); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + for (i = 0, reg = DF0PCISTAT; i < 8; i++, reg++) { + + if (i == 5) + continue; + pci_status[i] = ahd_inb(ahd, reg); + /* Clear latched errors. So our interrupt deasserts. */ + ahd_outb(ahd, reg, pci_status[i]); + } + + for (i = 0; i < 8; i++) { + u_int bit; + + if (i == 5) + continue; + + for (bit = 0; bit < 8; bit++) { + + if ((pci_status[i] & (0x1 << bit)) != 0) { + const char *s; + + s = pci_status_strings[bit]; + if (i == 7/*TARG*/ && bit == 3) + s = "%s: Signaled Target Abort\n"; + printk(s, ahd_name(ahd), pci_status_source[i]); + } + } + } + pci_status1 = ahd_pci_read_config(ahd->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + pci_status1, /*bytes*/1); + ahd_restore_modes(ahd, saved_modes); + ahd_outb(ahd, CLRINT, CLRPCIINT); + ahd_unpause(ahd); +} + +static void +ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) +{ + uint8_t split_status[4]; + uint8_t split_status1[4]; + uint8_t sg_split_status[2]; + uint8_t sg_split_status1[2]; + ahd_mode_state saved_modes; + u_int i; + uint16_t pcix_status; + + /* + * Check for splits in all modes. Modes 0 and 1 + * additionally have SG engine splits to look at. + */ + pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS, + /*bytes*/2); + printk("%s: PCI Split Interrupt - PCI-X status = 0x%x\n", + ahd_name(ahd), pcix_status); + saved_modes = ahd_save_modes(ahd); + for (i = 0; i < 4; i++) { + ahd_set_modes(ahd, i, i); + + split_status[i] = ahd_inb(ahd, DCHSPLTSTAT0); + split_status1[i] = ahd_inb(ahd, DCHSPLTSTAT1); + /* Clear latched errors. So our interrupt deasserts. */ + ahd_outb(ahd, DCHSPLTSTAT0, split_status[i]); + ahd_outb(ahd, DCHSPLTSTAT1, split_status1[i]); + if (i > 1) + continue; + sg_split_status[i] = ahd_inb(ahd, SGSPLTSTAT0); + sg_split_status1[i] = ahd_inb(ahd, SGSPLTSTAT1); + /* Clear latched errors. So our interrupt deasserts. */ + ahd_outb(ahd, SGSPLTSTAT0, sg_split_status[i]); + ahd_outb(ahd, SGSPLTSTAT1, sg_split_status1[i]); + } + + for (i = 0; i < 4; i++) { + u_int bit; + + for (bit = 0; bit < 8; bit++) { + + if ((split_status[i] & (0x1 << bit)) != 0) + printk(split_status_strings[bit], ahd_name(ahd), + split_status_source[i]); + + if (i > 1) + continue; + + if ((sg_split_status[i] & (0x1 << bit)) != 0) + printk(split_status_strings[bit], ahd_name(ahd), "SG"); + } + } + /* + * Clear PCI-X status bits. + */ + ahd_pci_write_config(ahd->dev_softc, PCIXR_STATUS, + pcix_status, /*bytes*/2); + ahd_outb(ahd, CLRINT, CLRSPLTINT); + ahd_restore_modes(ahd, saved_modes); +} + +static int +ahd_aic7901_setup(struct ahd_softc *ahd) +{ + + ahd->chip = AHD_AIC7901; + ahd->features = AHD_AIC7901_FE; + return (ahd_aic790X_setup(ahd)); +} + +static int +ahd_aic7901A_setup(struct ahd_softc *ahd) +{ + + ahd->chip = AHD_AIC7901A; + ahd->features = AHD_AIC7901A_FE; + return (ahd_aic790X_setup(ahd)); +} + +static int +ahd_aic7902_setup(struct ahd_softc *ahd) +{ + ahd->chip = AHD_AIC7902; + ahd->features = AHD_AIC7902_FE; + return (ahd_aic790X_setup(ahd)); +} + +static int +ahd_aic790X_setup(struct ahd_softc *ahd) +{ + ahd_dev_softc_t pci; + u_int rev; + + pci = ahd->dev_softc; + rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + if (rev < ID_AIC7902_PCI_REV_A4) { + printk("%s: Unable to attach to unsupported chip revision %d\n", + ahd_name(ahd), rev); + ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2); + return (ENXIO); + } + ahd->channel = ahd_get_pci_function(pci) + 'A'; + if (rev < ID_AIC7902_PCI_REV_B0) { + /* + * Enable A series workarounds. + */ + ahd->bugs |= AHD_SENT_SCB_UPDATE_BUG|AHD_ABORT_LQI_BUG + | AHD_PKT_BITBUCKET_BUG|AHD_LONG_SETIMO_BUG + | AHD_NLQICRC_DELAYED_BUG|AHD_SCSIRST_BUG + | AHD_LQO_ATNO_BUG|AHD_AUTOFLUSH_BUG + | AHD_CLRLQO_AUTOCLR_BUG|AHD_PCIX_MMAPIO_BUG + | AHD_PCIX_CHIPRST_BUG|AHD_PCIX_SCBRAM_RD_BUG + | AHD_PKTIZED_STATUS_BUG|AHD_PKT_LUN_BUG + | AHD_MDFF_WSCBPTR_BUG|AHD_REG_SLOW_SETTLE_BUG + | AHD_SET_MODE_BUG|AHD_BUSFREEREV_BUG + | AHD_NONPACKFIFO_BUG|AHD_PACED_NEGTABLE_BUG + | AHD_FAINT_LED_BUG; + + /* + * IO Cell parameter setup. + */ + AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); + + if ((ahd->flags & AHD_HP_BOARD) == 0) + AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVA); + } else { + /* This is revision B and newer. */ + extern uint32_t aic79xx_slowcrc; + u_int devconfig1; + + ahd->features |= AHD_RTI|AHD_NEW_IOCELL_OPTS + | AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY + | AHD_BUSFREEREV_BUG; + ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; + + /* If the user requested that the SLOWCRC bit to be set. */ + if (aic79xx_slowcrc) + ahd->features |= AHD_AIC79XXB_SLOWCRC; + + /* + * Some issues have been resolved in the 7901B. + */ + if ((ahd->features & AHD_MULTI_FUNC) != 0) + ahd->bugs |= AHD_INTCOLLISION_BUG|AHD_ABORT_LQI_BUG; + + /* + * IO Cell parameter setup. + */ + AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); + AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVB); + AHD_SET_AMPLITUDE(ahd, AHD_AMPLITUDE_DEF); + + /* + * Set the PREQDIS bit for H2B which disables some workaround + * that doesn't work on regular PCI busses. + * XXX - Find out exactly what this does from the hardware + * folks! + */ + devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); + ahd_pci_write_config(pci, DEVCONFIG1, + devconfig1|PREQDIS, /*bytes*/1); + devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); + } + + return (0); +} diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.h b/drivers/scsi/aic7xxx/aic79xx_pci.h new file mode 100644 index 000000000..16b7c70a6 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_pci.h @@ -0,0 +1,72 @@ +/* + * Adaptec AIC79xx device driver for Linux. + * + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id$ + * + */ +#ifndef _AIC79XX_PCI_H_ +#define _AIC79XX_PCI_H_ + +#define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull +#define ID_ALL_IROC_MASK 0xFF7FFFFFFFFFFFFFull +#define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull +#define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull +#define ID_9005_GENERIC_IROC_MASK 0xFF70FFFF00000000ull + +#define ID_AIC7901 0x800F9005FFFF9005ull +#define ID_AHA_29320A 0x8000900500609005ull +#define ID_AHA_29320ALP 0x8017900500449005ull +#define ID_AHA_29320LPE 0x8017900500459005ull + +#define ID_AIC7901A 0x801E9005FFFF9005ull +#define ID_AHA_29320LP 0x8014900500449005ull + +#define ID_AIC7902 0x801F9005FFFF9005ull +#define ID_AIC7902_B 0x801D9005FFFF9005ull +#define ID_AHA_39320 0x8010900500409005ull +#define ID_AHA_29320 0x8012900500429005ull +#define ID_AHA_29320B 0x8013900500439005ull +#define ID_AHA_39320_B 0x8015900500409005ull +#define ID_AHA_39320_B_DELL 0x8015900501681028ull +#define ID_AHA_39320A 0x8016900500409005ull +#define ID_AHA_39320D 0x8011900500419005ull +#define ID_AHA_39320D_B 0x801C900500419005ull +#define ID_AHA_39320D_HP 0x8011900500AC0E11ull +#define ID_AHA_39320D_B_HP 0x801C900500AC0E11ull + +#endif /* _AIC79XX_PCI_H_ */ diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c new file mode 100644 index 000000000..746d0ca2a --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_proc.c @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * String handling code courtesy of Gerard Roudier's + * sym driver. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_proc.c#19 $ + */ +#include "aic79xx_osm.h" +#include "aic79xx_inline.h" + +static void ahd_dump_target_state(struct ahd_softc *ahd, + struct seq_file *m, + u_int our_id, char channel, + u_int target_id); +static void ahd_dump_device_state(struct seq_file *m, + struct scsi_device *sdev); + +/* + * Table of syncrates that don't follow the "divisible by 4" + * rule. This table will be expanded in future SCSI specs. + */ +static const struct { + u_int period_factor; + u_int period; /* in 100ths of ns */ +} scsi_syncrates[] = { + { 0x08, 625 }, /* FAST-160 */ + { 0x09, 1250 }, /* FAST-80 */ + { 0x0a, 2500 }, /* FAST-40 40MHz */ + { 0x0b, 3030 }, /* FAST-40 33MHz */ + { 0x0c, 5000 } /* FAST-20 */ +}; + +/* + * Return the frequency in kHz corresponding to the given + * sync period factor. + */ +static u_int +ahd_calc_syncsrate(u_int period_factor) +{ + int i; + + /* See if the period is in the "exception" table */ + for (i = 0; i < ARRAY_SIZE(scsi_syncrates); i++) { + + if (period_factor == scsi_syncrates[i].period_factor) { + /* Period in kHz */ + return (100000000 / scsi_syncrates[i].period); + } + } + + /* + * Wasn't in the table, so use the standard + * 4 times conversion. + */ + return (10000000 / (period_factor * 4 * 10)); +} + +static void +ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo) +{ + u_int speed; + u_int freq; + u_int mb; + + if (tinfo->period == AHD_PERIOD_UNKNOWN) { + seq_puts(m, "Renegotiation Pending\n"); + return; + } + speed = 3300; + freq = 0; + if (tinfo->offset != 0) { + freq = ahd_calc_syncsrate(tinfo->period); + speed = freq; + } + speed *= (0x01 << tinfo->width); + mb = speed / 1000; + if (mb > 0) + seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000); + else + seq_printf(m, "%dKB/s transfers", speed); + + if (freq != 0) { + int printed_options; + + printed_options = 0; + seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000); + if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { + seq_puts(m, " RDSTRM"); + printed_options++; + } + if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { + seq_puts(m, printed_options ? "|DT" : " DT"); + printed_options++; + } + if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { + seq_puts(m, printed_options ? "|IU" : " IU"); + printed_options++; + } + if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { + seq_puts(m, printed_options ? "|RTI" : " RTI"); + printed_options++; + } + if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { + seq_puts(m, printed_options ? "|QAS" : " QAS"); + printed_options++; + } + } + + if (tinfo->width > 0) { + if (freq != 0) { + seq_puts(m, ", "); + } else { + seq_puts(m, " ("); + } + seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); + } else if (freq != 0) { + seq_putc(m, ')'); + } + seq_putc(m, '\n'); +} + +static void +ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m, + u_int our_id, char channel, u_int target_id) +{ + struct scsi_target *starget; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + int lun; + + tinfo = ahd_fetch_transinfo(ahd, channel, our_id, + target_id, &tstate); + seq_printf(m, "Target %d Negotiation Settings\n", target_id); + seq_puts(m, "\tUser: "); + ahd_format_transinfo(m, &tinfo->user); + starget = ahd->platform_data->starget[target_id]; + if (starget == NULL) + return; + + seq_puts(m, "\tGoal: "); + ahd_format_transinfo(m, &tinfo->goal); + seq_puts(m, "\tCurr: "); + ahd_format_transinfo(m, &tinfo->curr); + + for (lun = 0; lun < AHD_NUM_LUNS; lun++) { + struct scsi_device *dev; + + dev = scsi_device_lookup_by_target(starget, lun); + + if (dev == NULL) + continue; + + ahd_dump_device_state(m, dev); + } +} + +static void +ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev) +{ + struct ahd_linux_device *dev = scsi_transport_device_data(sdev); + + seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", + sdev->sdev_target->channel + 'A', + sdev->sdev_target->id, (u8)sdev->lun); + + seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); + seq_printf(m, "\t\tCommands Active %d\n", dev->active); + seq_printf(m, "\t\tCommand Openings %d\n", dev->openings); + seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags); + seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); +} + +int +ahd_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length) +{ + struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; + ahd_mode_state saved_modes; + int have_seeprom; + u_long s; + int paused; + int written; + + /* Default to failure. */ + written = -EINVAL; + ahd_lock(ahd, &s); + paused = ahd_is_paused(ahd); + if (!paused) + ahd_pause(ahd); + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + if (length != sizeof(struct seeprom_config)) { + printk("ahd_proc_write_seeprom: incorrect buffer size\n"); + goto done; + } + + have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer); + if (have_seeprom == 0) { + printk("ahd_proc_write_seeprom: cksum verification failed\n"); + goto done; + } + + have_seeprom = ahd_acquire_seeprom(ahd); + if (!have_seeprom) { + printk("ahd_proc_write_seeprom: No Serial EEPROM\n"); + goto done; + } else { + u_int start_addr; + + if (ahd->seep_config == NULL) { + ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), + GFP_ATOMIC); + if (ahd->seep_config == NULL) { + printk("aic79xx: Unable to allocate serial " + "eeprom buffer. Write failing\n"); + goto done; + } + } + printk("aic79xx: Writing Serial EEPROM\n"); + start_addr = 32 * (ahd->channel - 'A'); + ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr, + sizeof(struct seeprom_config)/2); + ahd_read_seeprom(ahd, (uint16_t *)ahd->seep_config, + start_addr, sizeof(struct seeprom_config)/2, + /*ByteStream*/FALSE); + ahd_release_seeprom(ahd); + written = length; + } + +done: + ahd_restore_modes(ahd, saved_modes); + if (!paused) + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + return (written); +} +/* + * Return information to handle /proc support for the driver. + */ +int +ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) +{ + struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; + char ahd_info[256]; + u_int max_targ; + u_int i; + + seq_printf(m, "Adaptec AIC79xx driver version: %s\n", + AIC79XX_DRIVER_VERSION); + seq_printf(m, "%s\n", ahd->description); + ahd_controller_info(ahd, ahd_info); + seq_printf(m, "%s\n", ahd_info); + seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n", + ahd->scb_data.numscbs, AHD_NSEG); + + max_targ = 16; + + if (ahd->seep_config == NULL) + seq_puts(m, "No Serial EEPROM\n"); + else { + seq_puts(m, "Serial EEPROM:\n"); + for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { + if (((i % 8) == 0) && (i != 0)) { + seq_putc(m, '\n'); + } + seq_printf(m, "0x%.4x ", + ((uint16_t*)ahd->seep_config)[i]); + } + seq_putc(m, '\n'); + } + seq_putc(m, '\n'); + + if ((ahd->features & AHD_WIDE) == 0) + max_targ = 8; + + for (i = 0; i < max_targ; i++) { + + ahd_dump_target_state(ahd, m, ahd->our_id, 'A', + /*target_id*/i); + } + return 0; +} diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped new file mode 100644 index 000000000..ddcd5a770 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped @@ -0,0 +1,1810 @@ +/* + * DO NOT EDIT - This file is automatically generated + * from the following source files: + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $ + */ +typedef int (ahd_reg_print_t)(u_int, u_int *, u_int); +typedef struct ahd_reg_parse_entry { + char *name; + uint8_t value; + uint8_t mask; +} ahd_reg_parse_entry_t; + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_intstat_print; +#else +#define ahd_intstat_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "INTSTAT", 0x01, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_hs_mailbox_print; +#else +#define ahd_hs_mailbox_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "HS_MAILBOX", 0x0b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seqintstat_print; +#else +#define ahd_seqintstat_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SEQINTSTAT", 0x0c, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_intctl_print; +#else +#define ahd_intctl_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "INTCTL", 0x18, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_dfcntrl_print; +#else +#define ahd_dfcntrl_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "DFCNTRL", 0x19, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_dfstatus_print; +#else +#define ahd_dfstatus_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "DFSTATUS", 0x1a, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_sg_cache_shadow_print; +#else +#define ahd_sg_cache_shadow_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SG_CACHE_SHADOW", 0x1b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scsiseq0_print; +#else +#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCSISEQ0", 0x3a, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scsiseq1_print; +#else +#define ahd_scsiseq1_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCSISEQ1", 0x3b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_dffstat_print; +#else +#define ahd_dffstat_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "DFFSTAT", 0x3f, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scsisigi_print; +#else +#define ahd_scsisigi_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCSISIGI", 0x41, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scsiphase_print; +#else +#define ahd_scsiphase_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCSIPHASE", 0x42, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scsibus_print; +#else +#define ahd_scsibus_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCSIBUS", 0x46, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_selid_print; +#else +#define ahd_selid_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SELID", 0x49, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_simode0_print; +#else +#define ahd_simode0_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_sstat0_print; +#else +#define ahd_sstat0_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SSTAT0", 0x4b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_sstat1_print; +#else +#define ahd_sstat1_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SSTAT1", 0x4c, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_sstat2_print; +#else +#define ahd_sstat2_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_perrdiag_print; +#else +#define ahd_perrdiag_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "PERRDIAG", 0x4e, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_soffcnt_print; +#else +#define ahd_soffcnt_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SOFFCNT", 0x4f, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_lqistat0_print; +#else +#define ahd_lqistat0_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "LQISTAT0", 0x50, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_lqistat1_print; +#else +#define ahd_lqistat1_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "LQISTAT1", 0x51, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_lqistat2_print; +#else +#define ahd_lqistat2_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "LQISTAT2", 0x52, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_sstat3_print; +#else +#define ahd_sstat3_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SSTAT3", 0x53, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_lqostat0_print; +#else +#define ahd_lqostat0_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "LQOSTAT0", 0x54, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_lqostat1_print; +#else +#define ahd_lqostat1_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "LQOSTAT1", 0x55, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_lqostat2_print; +#else +#define ahd_lqostat2_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "LQOSTAT2", 0x56, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_simode1_print; +#else +#define ahd_simode1_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SIMODE1", 0x57, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_dffsxfrctl_print; +#else +#define ahd_dffsxfrctl_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "DFFSXFRCTL", 0x5a, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seqintsrc_print; +#else +#define ahd_seqintsrc_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SEQINTSRC", 0x5b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seqimode_print; +#else +#define ahd_seqimode_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_mdffstat_print; +#else +#define ahd_mdffstat_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seloid_print; +#else +#define ahd_seloid_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SELOID", 0x6b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_sg_state_print; +#else +#define ahd_sg_state_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SG_STATE", 0xa6, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_ccscbctl_print; +#else +#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "CCSCBCTL", 0xad, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_ccsgctl_print; +#else +#define ahd_ccsgctl_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "CCSGCTL", 0xad, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seqctl0_print; +#else +#define ahd_seqctl0_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SEQCTL0", 0xd6, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seqintctl_print; +#else +#define ahd_seqintctl_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SEQINTCTL", 0xd9, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_sram_base_print; +#else +#define ahd_sram_base_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_qfreeze_count_print; +#else +#define ahd_qfreeze_count_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "QFREEZE_COUNT", 0x132, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_kernel_qfreeze_count_print; +#else +#define ahd_kernel_qfreeze_count_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "KERNEL_QFREEZE_COUNT", 0x134, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_saved_mode_print; +#else +#define ahd_saved_mode_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SAVED_MODE", 0x136, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seq_flags_print; +#else +#define ahd_seq_flags_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SEQ_FLAGS", 0x139, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_lastphase_print; +#else +#define ahd_lastphase_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "LASTPHASE", 0x13c, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_seq_flags2_print; +#else +#define ahd_seq_flags2_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SEQ_FLAGS2", 0x14d, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_mk_message_scb_print; +#else +#define ahd_mk_message_scb_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", 0x160, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_mk_message_scsiid_print; +#else +#define ahd_mk_message_scsiid_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID", 0x162, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scb_base_print; +#else +#define ahd_scb_base_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scb_control_print; +#else +#define ahd_scb_control_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCB_CONTROL", 0x192, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahd_reg_print_t ahd_scb_scsiid_print; +#else +#define ahd_scb_scsiid_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SCB_SCSIID", 0x193, regvalue, cur_col, wrap) +#endif + + +#define MODE_PTR 0x00 +#define DST_MODE 0x70 +#define SRC_MODE 0x07 + +#define INTSTAT 0x01 +#define INT_PEND 0xff +#define HWERRINT 0x80 +#define BRKADRINT 0x40 +#define SWTMINT 0x20 +#define PCIINT 0x10 +#define SCSIINT 0x08 +#define SEQINT 0x04 +#define CMDCMPLT 0x02 +#define SPLTINT 0x01 + +#define SEQINTCODE 0x02 +#define BAD_SCB_STATUS 0x1a +#define SAW_HWERR 0x19 +#define TRACEPOINT3 0x18 +#define TRACEPOINT2 0x17 +#define TRACEPOINT1 0x16 +#define TRACEPOINT0 0x15 +#define TASKMGMT_CMD_CMPLT_OKAY 0x14 +#define TASKMGMT_FUNC_COMPLETE 0x13 +#define ENTERING_NONPACK 0x12 +#define CFG4OVERRUN 0x11 +#define STATUS_OVERRUN 0x10 +#define CFG4ISTAT_INTR 0x0f +#define INVALID_SEQINT 0x0e +#define ILLEGAL_PHASE 0x0d +#define DUMP_CARD_STATE 0x0c +#define MISSED_BUSFREE 0x0b +#define MKMSG_FAILED 0x0a +#define DATA_OVERRUN 0x09 +#define BAD_STATUS 0x08 +#define HOST_MSG_LOOP 0x07 +#define PDATA_REINIT 0x06 +#define IGN_WIDE_RES 0x05 +#define NO_MATCH 0x04 +#define PROTO_VIOLATION 0x03 +#define SEND_REJECT 0x02 +#define BAD_PHASE 0x01 +#define NO_SEQINT 0x00 + +#define CLRINT 0x03 +#define CLRHWERRINT 0x80 +#define CLRBRKADRINT 0x40 +#define CLRSWTMINT 0x20 +#define CLRPCIINT 0x10 +#define CLRSCSIINT 0x08 +#define CLRSEQINT 0x04 +#define CLRCMDINT 0x02 +#define CLRSPLTINT 0x01 + +#define CLRERR 0x04 +#define CLRCIOPARERR 0x80 +#define CLRCIOACCESFAIL 0x40 +#define CLRMPARERR 0x20 +#define CLRDPARERR 0x10 +#define CLRSQPARERR 0x08 +#define CLRILLOPCODE 0x04 +#define CLRDSCTMOUT 0x02 + +#define ERROR 0x04 +#define CIOPARERR 0x80 +#define CIOACCESFAIL 0x40 +#define MPARERR 0x20 +#define DPARERR 0x10 +#define SQPARERR 0x08 +#define ILLOPCODE 0x04 +#define DSCTMOUT 0x02 + +#define HCNTRL 0x05 +#define SEQ_RESET 0x80 +#define POWRDN 0x40 +#define SWINT 0x10 +#define SWTIMER_START_B 0x08 +#define PAUSE 0x04 +#define INTEN 0x02 +#define CHIPRST 0x01 +#define CHIPRSTACK 0x01 + +#define HNSCB_QOFF 0x06 + +#define HESCB_QOFF 0x08 + +#define HS_MAILBOX 0x0b +#define HOST_TQINPOS 0x80 +#define ENINT_COALESCE 0x40 + +#define SEQINTSTAT 0x0c +#define SEQ_SWTMRTO 0x10 +#define SEQ_SEQINT 0x08 +#define SEQ_SCSIINT 0x04 +#define SEQ_PCIINT 0x02 +#define SEQ_SPLTINT 0x01 + +#define CLRSEQINTSTAT 0x0c +#define CLRSEQ_SWTMRTO 0x10 +#define CLRSEQ_SEQINT 0x08 +#define CLRSEQ_SCSIINT 0x04 +#define CLRSEQ_PCIINT 0x02 +#define CLRSEQ_SPLTINT 0x01 + +#define SWTIMER 0x0e + +#define SNSCB_QOFF 0x10 + +#define SESCB_QOFF 0x12 + +#define SDSCB_QOFF 0x14 + +#define QOFF_CTLSTA 0x16 +#define EMPTY_SCB_AVAIL 0x80 +#define NEW_SCB_AVAIL 0x40 +#define SDSCB_ROLLOVR 0x20 +#define HS_MAILBOX_ACT 0x10 +#define SCB_QSIZE 0x0f +#define SCB_QSIZE_16384 0x0c +#define SCB_QSIZE_8192 0x0b +#define SCB_QSIZE_4096 0x0a +#define SCB_QSIZE_2048 0x09 +#define SCB_QSIZE_1024 0x08 +#define SCB_QSIZE_512 0x07 +#define SCB_QSIZE_256 0x06 +#define SCB_QSIZE_128 0x05 +#define SCB_QSIZE_64 0x04 +#define SCB_QSIZE_32 0x03 +#define SCB_QSIZE_16 0x02 +#define SCB_QSIZE_8 0x01 +#define SCB_QSIZE_4 0x00 + +#define INTCTL 0x18 +#define SWTMINTMASK 0x80 +#define SWTMINTEN 0x40 +#define SWTIMER_START 0x20 +#define AUTOCLRCMDINT 0x10 +#define PCIINTEN 0x08 +#define SCSIINTEN 0x04 +#define SEQINTEN 0x02 +#define SPLTINTEN 0x01 + +#define DFCNTRL 0x19 +#define SCSIENWRDIS 0x40 +#define SCSIENACK 0x20 +#define DIRECTIONACK 0x04 +#define FIFOFLUSHACK 0x02 +#define DIRECTIONEN 0x01 + +#define DSCOMMAND0 0x19 +#define CACHETHEN 0x80 +#define DPARCKEN 0x40 +#define MPARCKEN 0x20 +#define EXTREQLCK 0x10 +#define DISABLE_TWATE 0x02 +#define CIOPARCKEN 0x01 + +#define DFSTATUS 0x1a +#define PRELOAD_AVAIL 0x80 +#define PKT_PRELOAD_AVAIL 0x40 +#define MREQPEND 0x10 +#define HDONE 0x08 +#define DFTHRESH 0x04 +#define FIFOFULL 0x02 +#define FIFOEMP 0x01 + +#define ARBCTL 0x1b +#define RESET_HARB 0x80 +#define RETRY_SWEN 0x08 +#define USE_TIME 0x07 + +#define SG_CACHE_SHADOW 0x1b +#define ODD_SEG 0x04 +#define LAST_SEG 0x02 +#define LAST_SEG_DONE 0x01 + +#define SG_CACHE_PRE 0x1b + +#define TYPEPTR 0x20 + +#define LQIN 0x20 + +#define TAGPTR 0x21 + +#define LUNPTR 0x22 + +#define DATALENPTR 0x23 + +#define STATLENPTR 0x24 + +#define CMDLENPTR 0x25 + +#define ATTRPTR 0x26 + +#define FLAGPTR 0x27 + +#define CMDPTR 0x28 + +#define QNEXTPTR 0x29 + +#define IDPTR 0x2a + +#define ABRTBYTEPTR 0x2b + +#define ABRTBITPTR 0x2c + +#define MAXCMDBYTES 0x2d + +#define MAXCMD2RCV 0x2e + +#define SHORTTHRESH 0x2f + +#define LUNLEN 0x30 +#define TLUNLEN 0xf0 +#define ILUNLEN 0x0f + +#define CDBLIMIT 0x31 + +#define MAXCMD 0x32 + +#define MAXCMDCNT 0x33 + +#define LQRSVD01 0x34 + +#define LQRSVD16 0x35 + +#define LQRSVD17 0x36 + +#define CMDRSVD0 0x37 + +#define LQCTL0 0x38 +#define LQITARGCLT 0xc0 +#define LQIINITGCLT 0x30 +#define LQ0TARGCLT 0x0c +#define LQ0INITGCLT 0x03 + +#define LQCTL1 0x38 +#define PCI2PCI 0x04 +#define SINGLECMD 0x02 +#define ABORTPENDING 0x01 + +#define LQCTL2 0x39 +#define LQIRETRY 0x80 +#define LQICONTINUE 0x40 +#define LQITOIDLE 0x20 +#define LQIPAUSE 0x10 +#define LQORETRY 0x08 +#define LQOCONTINUE 0x04 +#define LQOTOIDLE 0x02 +#define LQOPAUSE 0x01 + +#define SCSBIST0 0x39 +#define GSBISTERR 0x40 +#define GSBISTDONE 0x20 +#define GSBISTRUN 0x10 +#define OSBISTERR 0x04 +#define OSBISTDONE 0x02 +#define OSBISTRUN 0x01 + +#define SCSISEQ0 0x3a +#define TEMODEO 0x80 +#define ENSELO 0x40 +#define ENARBO 0x20 +#define FORCEBUSFREE 0x10 +#define SCSIRSTO 0x01 + +#define SCSBIST1 0x3a +#define NTBISTERR 0x04 +#define NTBISTDONE 0x02 +#define NTBISTRUN 0x01 + +#define SCSISEQ1 0x3b + +#define BUSINITID 0x3c + +#define SXFRCTL0 0x3c +#define DFON 0x80 +#define DFPEXP 0x40 +#define BIOSCANCELEN 0x10 +#define SPIOEN 0x08 + +#define DLCOUNT 0x3c + +#define SXFRCTL1 0x3d +#define BITBUCKET 0x80 +#define ENSACHK 0x40 +#define ENSPCHK 0x20 +#define STIMESEL 0x18 +#define ENSTIMER 0x04 +#define ACTNEGEN 0x02 +#define STPWEN 0x01 + +#define BUSTARGID 0x3e + +#define SXFRCTL2 0x3e +#define AUTORSTDIS 0x10 +#define CMDDMAEN 0x08 +#define ASU 0x07 + +#define DFFSTAT 0x3f +#define CURRFIFO 0x03 +#define FIFO1FREE 0x20 +#define FIFO0FREE 0x10 +#define CURRFIFO_NONE 0x03 +#define CURRFIFO_1 0x01 +#define CURRFIFO_0 0x00 + +#define MULTARGID 0x40 + +#define SCSISIGO 0x40 +#define CDO 0x80 +#define IOO 0x40 +#define MSGO 0x20 +#define ATNO 0x10 +#define SELO 0x08 +#define BSYO 0x04 +#define REQO 0x02 +#define ACKO 0x01 + +#define SCSISIGI 0x41 +#define ATNI 0x10 +#define SELI 0x08 +#define BSYI 0x04 +#define REQI 0x02 +#define ACKI 0x01 + +#define SCSIPHASE 0x42 +#define STATUS_PHASE 0x20 +#define COMMAND_PHASE 0x10 +#define MSG_IN_PHASE 0x08 +#define MSG_OUT_PHASE 0x04 +#define DATA_PHASE_MASK 0x03 +#define DATA_IN_PHASE 0x02 +#define DATA_OUT_PHASE 0x01 + +#define SCSIDAT0_IMG 0x43 + +#define SCSIDAT 0x44 + +#define SCSIBUS 0x46 + +#define TARGIDIN 0x48 +#define CLKOUT 0x80 +#define TARGID 0x0f + +#define SELID 0x49 +#define SELID_MASK 0xf0 +#define ONEBIT 0x08 + +#define OPTIONMODE 0x4a +#define OPTIONMODE_DEFAULTS 0x02 +#define BIOSCANCTL 0x80 +#define AUTOACKEN 0x40 +#define BIASCANCTL 0x20 +#define BUSFREEREV 0x10 +#define ENDGFORMCHK 0x04 +#define AUTO_MSGOUT_DE 0x02 + +#define SBLKCTL 0x4a +#define DIAGLEDEN 0x80 +#define DIAGLEDON 0x40 +#define ENAB40 0x08 +#define ENAB20 0x04 +#define SELWIDE 0x02 + +#define SIMODE0 0x4b +#define ENSELDO 0x40 +#define ENSELDI 0x20 +#define ENSELINGO 0x10 +#define ENIOERR 0x08 +#define ENOVERRUN 0x04 +#define ENSPIORDY 0x02 +#define ENARBDO 0x01 + +#define SSTAT0 0x4b +#define TARGET 0x80 +#define SELDO 0x40 +#define SELDI 0x20 +#define SELINGO 0x10 +#define IOERR 0x08 +#define OVERRUN 0x04 +#define SPIORDY 0x02 +#define ARBDO 0x01 + +#define CLRSINT0 0x4b +#define CLRSELDO 0x40 +#define CLRSELDI 0x20 +#define CLRSELINGO 0x10 +#define CLRIOERR 0x08 +#define CLROVERRUN 0x04 +#define CLRSPIORDY 0x02 +#define CLRARBDO 0x01 + +#define SSTAT1 0x4c +#define SELTO 0x80 +#define ATNTARG 0x40 +#define SCSIRSTI 0x20 +#define PHASEMIS 0x10 +#define BUSFREE 0x08 +#define SCSIPERR 0x04 +#define STRB2FAST 0x02 +#define REQINIT 0x01 + +#define CLRSINT1 0x4c +#define CLRSELTIMEO 0x80 +#define CLRATNO 0x40 +#define CLRSCSIRSTI 0x20 +#define CLRBUSFREE 0x08 +#define CLRSCSIPERR 0x04 +#define CLRSTRB2FAST 0x02 +#define CLRREQINIT 0x01 + +#define SIMODE2 0x4d +#define ENWIDE_RES 0x04 +#define ENSDONE 0x02 +#define ENDMADONE 0x01 + +#define SSTAT2 0x4d +#define BUSFREETIME 0xc0 +#define NONPACKREQ 0x20 +#define EXP_ACTIVE 0x10 +#define BSYX 0x08 +#define WIDE_RES 0x04 +#define SDONE 0x02 +#define DMADONE 0x01 +#define BUSFREE_DFF1 0xc0 +#define BUSFREE_DFF0 0x80 +#define BUSFREE_LQO 0x40 + +#define CLRSINT2 0x4d +#define CLRNONPACKREQ 0x20 +#define CLRWIDE_RES 0x04 +#define CLRSDONE 0x02 +#define CLRDMADONE 0x01 + +#define PERRDIAG 0x4e +#define HIZERO 0x80 +#define HIPERR 0x40 +#define PREVPHASE 0x20 +#define PARITYERR 0x10 +#define AIPERR 0x08 +#define CRCERR 0x04 +#define DGFORMERR 0x02 +#define DTERR 0x01 + +#define LQISTATE 0x4e + +#define LQOSTATE 0x4f + +#define SOFFCNT 0x4f + +#define LQISTAT0 0x50 +#define LQIATNQAS 0x20 +#define LQICRCT1 0x10 +#define LQICRCT2 0x08 +#define LQIBADLQT 0x04 +#define LQIATNLQ 0x02 +#define LQIATNCMD 0x01 + +#define LQIMODE0 0x50 +#define ENLQIATNQASK 0x20 +#define ENLQICRCT1 0x10 +#define ENLQICRCT2 0x08 +#define ENLQIBADLQT 0x04 +#define ENLQIATNLQ 0x02 +#define ENLQIATNCMD 0x01 + +#define CLRLQIINT0 0x50 +#define CLRLQIATNQAS 0x20 +#define CLRLQICRCT1 0x10 +#define CLRLQICRCT2 0x08 +#define CLRLQIBADLQT 0x04 +#define CLRLQIATNLQ 0x02 +#define CLRLQIATNCMD 0x01 + +#define LQIMODE1 0x51 +#define ENLQIPHASE_LQ 0x80 +#define ENLQIPHASE_NLQ 0x40 +#define ENLIQABORT 0x20 +#define ENLQICRCI_LQ 0x10 +#define ENLQICRCI_NLQ 0x08 +#define ENLQIBADLQI 0x04 +#define ENLQIOVERI_LQ 0x02 +#define ENLQIOVERI_NLQ 0x01 + +#define LQISTAT1 0x51 +#define LQIPHASE_LQ 0x80 +#define LQIPHASE_NLQ 0x40 +#define LQIABORT 0x20 +#define LQICRCI_LQ 0x10 +#define LQICRCI_NLQ 0x08 +#define LQIBADLQI 0x04 +#define LQIOVERI_LQ 0x02 +#define LQIOVERI_NLQ 0x01 + +#define CLRLQIINT1 0x51 +#define CLRLQIPHASE_LQ 0x80 +#define CLRLQIPHASE_NLQ 0x40 +#define CLRLIQABORT 0x20 +#define CLRLQICRCI_LQ 0x10 +#define CLRLQICRCI_NLQ 0x08 +#define CLRLQIBADLQI 0x04 +#define CLRLQIOVERI_LQ 0x02 +#define CLRLQIOVERI_NLQ 0x01 + +#define LQISTAT2 0x52 +#define PACKETIZED 0x80 +#define LQIPHASE_OUTPKT 0x40 +#define LQIWORKONLQ 0x20 +#define LQIWAITFIFO 0x10 +#define LQISTOPPKT 0x08 +#define LQISTOPLQ 0x04 +#define LQISTOPCMD 0x02 +#define LQIGSAVAIL 0x01 + +#define SIMODE3 0x53 +#define ENNTRAMPERR 0x02 +#define ENOSRAMPERR 0x01 + +#define SSTAT3 0x53 +#define NTRAMPERR 0x02 +#define OSRAMPERR 0x01 + +#define CLRSINT3 0x53 +#define CLRNTRAMPERR 0x02 +#define CLROSRAMPERR 0x01 + +#define CLRLQOINT0 0x54 +#define CLRLQOTARGSCBPERR 0x10 +#define CLRLQOSTOPT2 0x08 +#define CLRLQOATNLQ 0x04 +#define CLRLQOATNPKT 0x02 +#define CLRLQOTCRC 0x01 + +#define LQOSTAT0 0x54 +#define LQOTARGSCBPERR 0x10 +#define LQOSTOPT2 0x08 +#define LQOATNLQ 0x04 +#define LQOATNPKT 0x02 +#define LQOTCRC 0x01 + +#define LQOMODE0 0x54 +#define ENLQOTARGSCBPERR 0x10 +#define ENLQOSTOPT2 0x08 +#define ENLQOATNLQ 0x04 +#define ENLQOATNPKT 0x02 +#define ENLQOTCRC 0x01 + +#define LQOMODE1 0x55 +#define ENLQOINITSCBPERR 0x10 +#define ENLQOSTOPI2 0x08 +#define ENLQOBADQAS 0x04 +#define ENLQOBUSFREE 0x02 +#define ENLQOPHACHGINPKT 0x01 + +#define CLRLQOINT1 0x55 +#define CLRLQOINITSCBPERR 0x10 +#define CLRLQOSTOPI2 0x08 +#define CLRLQOBADQAS 0x04 +#define CLRLQOBUSFREE 0x02 +#define CLRLQOPHACHGINPKT 0x01 + +#define LQOSTAT1 0x55 +#define LQOINITSCBPERR 0x10 +#define LQOSTOPI2 0x08 +#define LQOBADQAS 0x04 +#define LQOBUSFREE 0x02 +#define LQOPHACHGINPKT 0x01 + +#define LQOSTAT2 0x56 +#define LQOPKT 0xe0 +#define LQOWAITFIFO 0x10 +#define LQOPHACHGOUTPKT 0x02 +#define LQOSTOP0 0x01 + +#define OS_SPACE_CNT 0x56 + +#define SIMODE1 0x57 +#define ENSELTIMO 0x80 +#define ENATNTARG 0x40 +#define ENSCSIRST 0x20 +#define ENPHASEMIS 0x10 +#define ENBUSFREE 0x08 +#define ENSCSIPERR 0x04 +#define ENSTRB2FAST 0x02 +#define ENREQINIT 0x01 + +#define GSFIFO 0x58 + +#define DFFSXFRCTL 0x5a +#define DFFBITBUCKET 0x08 +#define CLRSHCNT 0x04 +#define CLRCHN 0x02 +#define RSTCHN 0x01 + +#define LQOSCSCTL 0x5a +#define LQOH2A_VERSION 0x80 +#define LQOBUSETDLY 0x40 +#define LQONOHOLDLACK 0x02 +#define LQONOCHKOVER 0x01 + +#define NEXTSCB 0x5a + +#define CLRSEQINTSRC 0x5b +#define CLRCTXTDONE 0x40 +#define CLRSAVEPTRS 0x20 +#define CLRCFG4DATA 0x10 +#define CLRCFG4ISTAT 0x08 +#define CLRCFG4TSTAT 0x04 +#define CLRCFG4ICMD 0x02 +#define CLRCFG4TCMD 0x01 + +#define SEQINTSRC 0x5b +#define CTXTDONE 0x40 +#define SAVEPTRS 0x20 +#define CFG4DATA 0x10 +#define CFG4ISTAT 0x08 +#define CFG4TSTAT 0x04 +#define CFG4ICMD 0x02 +#define CFG4TCMD 0x01 + +#define SEQIMODE 0x5c +#define ENCTXTDONE 0x40 +#define ENSAVEPTRS 0x20 +#define ENCFG4DATA 0x10 +#define ENCFG4ISTAT 0x08 +#define ENCFG4TSTAT 0x04 +#define ENCFG4ICMD 0x02 +#define ENCFG4TCMD 0x01 + +#define CURRSCB 0x5c + +#define CRCCONTROL 0x5d +#define CRCVALCHKEN 0x40 + +#define MDFFSTAT 0x5d +#define SHCNTNEGATIVE 0x40 +#define SHCNTMINUS1 0x20 +#define LASTSDONE 0x10 +#define SHVALID 0x08 +#define DLZERO 0x04 +#define DATAINFIFO 0x02 +#define FIFOFREE 0x01 + +#define DFFTAG 0x5e + +#define SCSITEST 0x5e +#define CNTRTEST 0x08 +#define SEL_TXPLL_DEBUG 0x04 + +#define LASTSCB 0x5e + +#define IOPDNCTL 0x5f +#define DISABLE_OE 0x80 +#define PDN_IDIST 0x04 +#define PDN_DIFFSENSE 0x01 + +#define DGRPCRCI 0x60 + +#define NEGOADDR 0x60 + +#define SHADDR 0x60 + +#define NEGPERIOD 0x61 + +#define NEGOFFSET 0x62 + +#define PACKCRCI 0x62 + +#define NEGPPROPTS 0x63 +#define PPROPT_PACE 0x08 +#define PPROPT_QAS 0x04 +#define PPROPT_DT 0x02 +#define PPROPT_IUT 0x01 + +#define NEGCONOPTS 0x64 +#define ENSNAPSHOT 0x40 +#define RTI_WRTDIS 0x20 +#define RTI_OVRDTRN 0x10 +#define ENSLOWCRC 0x08 +#define ENAUTOATNI 0x04 +#define ENAUTOATNO 0x02 +#define WIDEXFER 0x01 + +#define ANNEXCOL 0x65 + +#define ANNEXDAT 0x66 + +#define SCSCHKN 0x66 +#define BIDICHKDIS 0x80 +#define STSELSKIDDIS 0x40 +#define CURRFIFODEF 0x20 +#define WIDERESEN 0x10 +#define SDONEMSKDIS 0x08 +#define DFFACTCLR 0x04 +#define SHVALIDSTDIS 0x02 +#define LSTSGCLRDIS 0x01 + +#define IOWNID 0x67 + +#define PLL960CTL0 0x68 + +#define SHCNT 0x68 + +#define PLL960CTL1 0x69 + +#define TOWNID 0x69 + +#define PLL960CNT0 0x6a + +#define XSIG 0x6a + +#define SELOID 0x6b + +#define FAIRNESS 0x6c + +#define PLL400CTL0 0x6c +#define PLL_VCOSEL 0x80 +#define PLL_PWDN 0x40 +#define PLL_NS 0x30 +#define PLL_ENLUD 0x08 +#define PLL_ENLPF 0x04 +#define PLL_DLPF 0x02 +#define PLL_ENFBM 0x01 + +#define PLL400CTL1 0x6d +#define PLL_CNTEN 0x80 +#define PLL_CNTCLR 0x40 +#define PLL_RST 0x01 + +#define UNFAIRNESS 0x6e + +#define PLL400CNT0 0x6e + +#define HADDR 0x70 + +#define HODMAADR 0x70 + +#define PLLDELAY 0x70 +#define SPLIT_DROP_REQ 0x80 + +#define HCNT 0x78 + +#define HODMACNT 0x78 + +#define HODMAEN 0x7a + +#define SGHADDR 0x7c + +#define SCBHADDR 0x7c + +#define SGHCNT 0x84 + +#define SCBHCNT 0x84 + +#define DFF_THRSH 0x88 +#define WR_DFTHRSH 0x70 +#define RD_DFTHRSH 0x07 +#define WR_DFTHRSH_MAX 0x70 +#define WR_DFTHRSH_90 0x60 +#define WR_DFTHRSH_85 0x50 +#define WR_DFTHRSH_75 0x40 +#define WR_DFTHRSH_63 0x30 +#define WR_DFTHRSH_50 0x20 +#define WR_DFTHRSH_25 0x10 +#define RD_DFTHRSH_MAX 0x07 +#define RD_DFTHRSH_90 0x06 +#define RD_DFTHRSH_85 0x05 +#define RD_DFTHRSH_75 0x04 +#define RD_DFTHRSH_63 0x03 +#define RD_DFTHRSH_50 0x02 +#define RD_DFTHRSH_25 0x01 +#define RD_DFTHRSH_MIN 0x00 +#define WR_DFTHRSH_MIN 0x00 + +#define ROMADDR 0x8a + +#define ROMCNTRL 0x8d +#define ROMOP 0xe0 +#define ROMSPD 0x18 +#define REPEAT 0x02 +#define RDY 0x01 + +#define ROMDATA 0x8e + +#define CMCRXMSG0 0x90 + +#define OVLYRXMSG0 0x90 + +#define DCHRXMSG0 0x90 + +#define ROENABLE 0x90 +#define MSIROEN 0x20 +#define OVLYROEN 0x10 +#define CMCROEN 0x08 +#define SGROEN 0x04 +#define DCH1ROEN 0x02 +#define DCH0ROEN 0x01 + +#define OVLYRXMSG1 0x91 + +#define CMCRXMSG1 0x91 + +#define DCHRXMSG1 0x91 + +#define NSENABLE 0x91 +#define MSINSEN 0x20 +#define OVLYNSEN 0x10 +#define CMCNSEN 0x08 +#define SGNSEN 0x04 +#define DCH1NSEN 0x02 +#define DCH0NSEN 0x01 + +#define DCHRXMSG2 0x92 + +#define CMCRXMSG2 0x92 + +#define OST 0x92 + +#define OVLYRXMSG2 0x92 + +#define DCHRXMSG3 0x93 + +#define OVLYRXMSG3 0x93 + +#define CMCRXMSG3 0x93 + +#define PCIXCTL 0x93 +#define SERRPULSE 0x80 +#define UNEXPSCIEN 0x20 +#define SPLTSMADIS 0x10 +#define SPLTSTADIS 0x08 +#define SRSPDPEEN 0x04 +#define TSCSERREN 0x02 +#define CMPABCDIS 0x01 + +#define CMCSEQBCNT 0x94 + +#define OVLYSEQBCNT 0x94 + +#define DCHSEQBCNT 0x94 + +#define DCHSPLTSTAT0 0x96 + +#define OVLYSPLTSTAT0 0x96 + +#define CMCSPLTSTAT0 0x96 + +#define OVLYSPLTSTAT1 0x97 + +#define DCHSPLTSTAT1 0x97 + +#define CMCSPLTSTAT1 0x97 + +#define SGRXMSG0 0x98 +#define CDNUM 0xf8 +#define CFNUM 0x07 + +#define SLVSPLTOUTADR0 0x98 +#define LOWER_ADDR 0x7f + +#define SGRXMSG1 0x99 +#define CBNUM 0xff + +#define SLVSPLTOUTADR1 0x99 +#define REQ_DNUM 0xf8 +#define REQ_FNUM 0x07 + +#define SGRXMSG2 0x9a +#define MINDEX 0xff + +#define SLVSPLTOUTADR2 0x9a +#define REQ_BNUM 0xff + +#define SGRXMSG3 0x9b +#define MCLASS 0x0f + +#define SLVSPLTOUTADR3 0x9b +#define TAG_NUM 0x1f +#define RLXORD 0x10 + +#define SLVSPLTOUTATTR0 0x9c +#define LOWER_BCNT 0xff + +#define SGSEQBCNT 0x9c + +#define SLVSPLTOUTATTR1 0x9d +#define CMPLT_DNUM 0xf8 +#define CMPLT_FNUM 0x07 + +#define SGSPLTSTAT0 0x9e +#define STAETERM 0x80 +#define SCBCERR 0x40 +#define SCADERR 0x20 +#define SCDATBUCKET 0x10 +#define CNTNOTCMPLT 0x08 +#define RXOVRUN 0x04 +#define RXSCEMSG 0x02 +#define RXSPLTRSP 0x01 + +#define SLVSPLTOUTATTR2 0x9e +#define CMPLT_BNUM 0xff + +#define SGSPLTSTAT1 0x9f +#define RXDATABUCKET 0x01 + +#define SFUNCT 0x9f +#define TEST_GROUP 0xf0 +#define TEST_NUM 0x0f + +#define DF0PCISTAT 0xa0 + +#define REG0 0xa0 + +#define DF1PCISTAT 0xa1 + +#define SGPCISTAT 0xa2 + +#define REG1 0xa2 + +#define CMCPCISTAT 0xa3 + +#define OVLYPCISTAT 0xa4 +#define SCAAPERR 0x08 +#define RDPERR 0x04 + +#define REG_ISR 0xa4 + +#define SG_STATE 0xa6 +#define FETCH_INPROG 0x04 +#define LOADING_NEEDED 0x02 +#define SEGS_AVAIL 0x01 + +#define MSIPCISTAT 0xa6 +#define RMA 0x20 +#define RTA 0x10 +#define CLRPENDMSI 0x08 +#define DPR 0x01 + +#define DATA_COUNT_ODD 0xa7 + +#define TARGPCISTAT 0xa7 +#define DPE 0x80 +#define SSE 0x40 +#define STA 0x08 +#define TWATERR 0x02 + +#define SCBPTR 0xa8 + +#define CCSCBACNT 0xab + +#define SCBAUTOPTR 0xab +#define AUSCBPTR_EN 0x80 +#define SCBPTR_ADDR 0x38 +#define SCBPTR_OFF 0x07 + +#define CCSGADDR 0xac + +#define CCSCBADDR 0xac + +#define CCSCBADR_BK 0xac + +#define CMC_RAMBIST 0xad +#define SG_ELEMENT_SIZE 0x80 +#define SCBRAMBIST_FAIL 0x40 +#define SG_BIST_FAIL 0x20 +#define SG_BIST_EN 0x10 +#define CMC_BUFFER_BIST_FAIL 0x02 +#define CMC_BUFFER_BIST_EN 0x01 + +#define CCSCBCTL 0xad +#define CCSCBDONE 0x80 +#define ARRDONE 0x40 +#define CCARREN 0x10 +#define CCSCBEN 0x08 +#define CCSCBDIR 0x04 +#define CCSCBRESET 0x01 + +#define CCSGCTL 0xad +#define CCSGEN 0x0c +#define CCSGDONE 0x80 +#define SG_CACHE_AVAIL 0x10 +#define CCSGENACK 0x08 +#define SG_FETCH_REQ 0x02 +#define CCSGRESET 0x01 + +#define CCSGRAM 0xb0 + +#define FLEXADR 0xb0 + +#define CCSCBRAM 0xb0 + +#define FLEXCNT 0xb3 + +#define FLEXDMASTAT 0xb5 +#define FLEXDMAERR 0x02 +#define FLEXDMADONE 0x01 + +#define FLEXDATA 0xb6 + +#define BRDDAT 0xb8 + +#define BRDCTL 0xb9 +#define FLXARBACK 0x80 +#define FLXARBREQ 0x40 +#define BRDADDR 0x38 +#define BRDEN 0x04 +#define BRDRW 0x02 +#define BRDSTB 0x01 + +#define SEEADR 0xba + +#define SEEDAT 0xbc + +#define SEECTL 0xbe +#define SEEOP_EWDS 0x40 +#define SEEOP_WALL 0x40 +#define SEEOP_EWEN 0x40 +#define SEEOPCODE 0x70 +#define SEERST 0x02 +#define SEESTART 0x01 +#define SEEOP_ERASE 0x70 +#define SEEOP_READ 0x60 +#define SEEOP_WRITE 0x50 +#define SEEOP_ERAL 0x40 + +#define SEESTAT 0xbe +#define INIT_DONE 0x80 +#define LDALTID_L 0x08 +#define SEEARBACK 0x04 +#define SEEBUSY 0x02 + +#define SCBCNT 0xbf + +#define DSPFLTRCTL 0xc0 +#define FLTRDISABLE 0x20 +#define EDGESENSE 0x10 +#define DSPFCNTSEL 0x0f + +#define DFWADDR 0xc0 + +#define DSPDATACTL 0xc1 +#define BYPASSENAB 0x80 +#define DESQDIS 0x10 +#define RCVROFFSTDIS 0x04 +#define XMITOFFSTDIS 0x02 + +#define DSPREQCTL 0xc2 +#define MANREQCTL 0xc0 +#define MANREQDLY 0x3f + +#define DFRADDR 0xc2 + +#define DSPACKCTL 0xc3 +#define MANACKCTL 0xc0 +#define MANACKDLY 0x3f + +#define DFDAT 0xc4 + +#define DSPSELECT 0xc4 +#define AUTOINCEN 0x80 +#define DSPSEL 0x1f + +#define WRTBIASCTL 0xc5 +#define AUTOXBCDIS 0x80 +#define XMITMANVAL 0x3f + +#define RCVRBIOSCTL 0xc6 +#define AUTORBCDIS 0x80 +#define RCVRMANVAL 0x3f + +#define WRTBIASCALC 0xc7 + +#define DFPTRS 0xc8 + +#define RCVRBIASCALC 0xc8 + +#define DFBKPTR 0xc9 + +#define SKEWCALC 0xc9 + +#define DFDBCTL 0xcb +#define DFF_CIO_WR_RDY 0x20 +#define DFF_CIO_RD_RDY 0x10 +#define DFF_DIR_ERR 0x08 +#define DFF_RAMBIST_FAIL 0x04 +#define DFF_RAMBIST_DONE 0x02 +#define DFF_RAMBIST_EN 0x01 + +#define DFSCNT 0xcc + +#define DFBCNT 0xce + +#define OVLYADDR 0xd4 + +#define SEQCTL0 0xd6 +#define PERRORDIS 0x80 +#define PAUSEDIS 0x40 +#define FAILDIS 0x20 +#define FASTMODE 0x10 +#define BRKADRINTEN 0x08 +#define STEP 0x04 +#define SEQRESET 0x02 +#define LOADRAM 0x01 + +#define SEQCTL1 0xd7 +#define OVRLAY_DATA_CHK 0x08 +#define RAMBIST_DONE 0x04 +#define RAMBIST_FAIL 0x02 +#define RAMBIST_EN 0x01 + +#define FLAGS 0xd8 +#define ZERO 0x02 +#define CARRY 0x01 + +#define SEQINTCTL 0xd9 +#define INTVEC1DSL 0x80 +#define INT1_CONTEXT 0x20 +#define SCS_SEQ_INT1M1 0x10 +#define SCS_SEQ_INT1M0 0x08 +#define INTMASK2 0x04 +#define INTMASK1 0x02 +#define IRET 0x01 + +#define SEQRAM 0xda + +#define PRGMCNT 0xde + +#define ACCUM 0xe0 + +#define SINDEX 0xe2 + +#define DINDEX 0xe4 + +#define BRKADDR0 0xe6 + +#define BRKADDR1 0xe6 +#define BRKDIS 0x80 + +#define ALLONES 0xe8 + +#define ALLZEROS 0xea + +#define NONE 0xea + +#define SINDIR 0xec + +#define DINDIR 0xed + +#define FUNCTION1 0xf0 + +#define STACK 0xf2 + +#define INTVEC1_ADDR 0xf4 + +#define CURADDR 0xf4 + +#define LASTADDR 0xf6 + +#define INTVEC2_ADDR 0xf6 + +#define LONGJMP_ADDR 0xf8 + +#define ACCUM_SAVE 0xfa + +#define AHD_PCI_CONFIG_BASE 0x100 + +#define SRAM_BASE 0x100 + +#define WAITING_SCB_TAILS 0x100 + +#define WAITING_TID_HEAD 0x120 + +#define WAITING_TID_TAIL 0x122 + +#define NEXT_QUEUED_SCB_ADDR 0x124 + +#define COMPLETE_SCB_HEAD 0x128 + +#define COMPLETE_SCB_DMAINPROG_HEAD 0x12a + +#define COMPLETE_DMA_SCB_HEAD 0x12c + +#define COMPLETE_DMA_SCB_TAIL 0x12e + +#define COMPLETE_ON_QFREEZE_HEAD 0x130 + +#define QFREEZE_COUNT 0x132 + +#define KERNEL_QFREEZE_COUNT 0x134 + +#define SAVED_MODE 0x136 + +#define MSG_OUT 0x137 + +#define DMAPARAMS 0x138 +#define PRELOADEN 0x80 +#define WIDEODD 0x40 +#define SCSIEN 0x20 +#define SDMAENACK 0x10 +#define SDMAEN 0x10 +#define HDMAEN 0x08 +#define HDMAENACK 0x08 +#define DIRECTION 0x04 +#define FIFOFLUSH 0x02 +#define FIFORESET 0x01 + +#define SEQ_FLAGS 0x139 +#define NOT_IDENTIFIED 0x80 +#define NO_CDB_SENT 0x40 +#define TARGET_CMD_IS_TAGGED 0x40 +#define DPHASE 0x20 +#define TARG_CMD_PENDING 0x10 +#define CMDPHASE_PENDING 0x08 +#define DPHASE_PENDING 0x04 +#define SPHASE_PENDING 0x02 +#define NO_DISCONNECT 0x01 + +#define SAVED_SCSIID 0x13a + +#define SAVED_LUN 0x13b + +#define LASTPHASE 0x13c +#define PHASE_MASK 0xe0 +#define CDI 0x80 +#define IOI 0x40 +#define MSGI 0x20 +#define P_BUSFREE 0x01 +#define P_MESGIN 0xe0 +#define P_STATUS 0xc0 +#define P_MESGOUT 0xa0 +#define P_COMMAND 0x80 +#define P_DATAIN_DT 0x60 +#define P_DATAIN 0x40 +#define P_DATAOUT_DT 0x20 +#define P_DATAOUT 0x00 + +#define QOUTFIFO_ENTRY_VALID_TAG 0x13d + +#define KERNEL_TQINPOS 0x13e + +#define TQINPOS 0x13f + +#define SHARED_DATA_ADDR 0x140 + +#define QOUTFIFO_NEXT_ADDR 0x144 + +#define ARG_1 0x148 +#define RETURN_1 0x148 +#define SEND_MSG 0x80 +#define SEND_SENSE 0x40 +#define SEND_REJ 0x20 +#define MSGOUT_PHASEMIS 0x10 +#define EXIT_MSG_LOOP 0x08 +#define CONT_MSG_LOOP_WRITE 0x04 +#define CONT_MSG_LOOP_READ 0x03 +#define CONT_MSG_LOOP_TARG 0x02 + +#define ARG_2 0x149 +#define RETURN_2 0x149 + +#define LAST_MSG 0x14a + +#define SCSISEQ_TEMPLATE 0x14b +#define MANUALCTL 0x40 +#define ENSELI 0x20 +#define ENRSELI 0x10 +#define MANUALP 0x0c +#define ENAUTOATNP 0x02 +#define ALTSTIM 0x01 + +#define INITIATOR_TAG 0x14c + +#define SEQ_FLAGS2 0x14d +#define SELECTOUT_QFROZEN 0x04 +#define TARGET_MSG_PENDING 0x02 +#define PENDING_MK_MESSAGE 0x01 + +#define ALLOCFIFO_SCBPTR 0x14e + +#define INT_COALESCING_TIMER 0x150 + +#define INT_COALESCING_MAXCMDS 0x152 + +#define INT_COALESCING_MINCMDS 0x153 + +#define CMDS_PENDING 0x154 + +#define INT_COALESCING_CMDCOUNT 0x156 + +#define LOCAL_HS_MAILBOX 0x157 + +#define CMDSIZE_TABLE 0x158 + +#define MK_MESSAGE_SCB 0x160 + +#define MK_MESSAGE_SCSIID 0x162 + +#define SCB_RESIDUAL_DATACNT 0x180 +#define SCB_CDB_STORE 0x180 +#define SCB_HOST_CDB_PTR 0x180 + +#define SCB_BASE 0x180 + +#define SCB_RESIDUAL_SGPTR 0x184 +#define SG_ADDR_MASK 0xf8 +#define SG_OVERRUN_RESID 0x02 + +#define SCB_SCSI_STATUS 0x188 +#define SCB_HOST_CDB_LEN 0x188 + +#define SCB_TARGET_PHASES 0x189 + +#define SCB_TARGET_DATA_DIR 0x18a + +#define SCB_TARGET_ITAG 0x18b + +#define SCB_SENSE_BUSADDR 0x18c +#define SCB_NEXT_COMPLETE 0x18c + +#define SCB_TAG 0x190 +#define SCB_FIFO_USE_COUNT 0x190 + +#define SCB_CONTROL 0x192 +#define TARGET_SCB 0x80 +#define DISCENB 0x40 +#define TAG_ENB 0x20 +#define MK_MESSAGE 0x10 +#define STATUS_RCVD 0x08 +#define DISCONNECTED 0x04 +#define SCB_TAG_TYPE 0x03 + +#define SCB_SCSIID 0x193 +#define TID 0xf0 +#define OID 0x0f + +#define SCB_LUN 0x194 +#define LID 0xff + +#define SCB_TASK_ATTRIBUTE 0x195 +#define SCB_XFERLEN_ODD 0x01 + +#define SCB_CDB_LEN 0x196 +#define SCB_CDB_LEN_PTR 0x80 + +#define SCB_TASK_MANAGEMENT 0x197 + +#define SCB_DATAPTR 0x198 + +#define SCB_DATACNT 0x1a0 +#define SG_LAST_SEG 0x80 +#define SG_HIGH_ADDR_BITS 0x7f + +#define SCB_SGPTR 0x1a4 +#define SG_STATUS_VALID 0x04 +#define SG_FULL_RESID 0x02 +#define SG_LIST_NULL 0x01 + +#define SCB_BUSADDR 0x1a8 + +#define SCB_NEXT 0x1ac +#define SCB_NEXT_SCB_BUSADDR 0x1ac + +#define SCB_NEXT2 0x1ae + +#define SCB_SPARE 0x1b0 +#define SCB_PKT_LUN 0x1b0 + +#define SCB_DISCONNECTED_LISTS 0x1b8 + + +#define STIMESEL_SHIFT 0x03 +#define STIMESEL_MIN 0x18 +#define INVALID_ADDR 0x80 +#define CMD_GROUP_CODE_SHIFT 0x05 +#define AHD_PRECOMP_MASK 0x07 +#define TARGET_DATA_IN 0x01 +#define SEEOP_EWEN_ADDR 0xc0 +#define NUMDSPS 0x14 +#define DST_MODE_SHIFT 0x04 +#define CCSCBADDR_MAX 0x80 +#define AHD_ANNEXCOL_PER_DEV0 0x04 +#define TARGET_CMD_CMPLT 0xfe +#define SEEOP_WRAL_ADDR 0x40 +#define BUS_8_BIT 0x00 +#define AHD_TIMER_MAX_US 0x18ffe7 +#define AHD_TIMER_MAX_TICKS 0xffff +#define AHD_SENSE_BUFSIZE 0x100 +#define AHD_PRECOMP_SHIFT 0x00 +#define AHD_PRECOMP_CUTBACK_37 0x07 +#define AHD_ANNEXCOL_PRECOMP_SLEW 0x04 +#define AHD_AMPLITUDE_DEF 0x07 +#define WRTBIASCTL_HP_DEFAULT 0x00 +#define TID_SHIFT 0x04 +#define STATUS_QUEUE_FULL 0x28 +#define STATUS_BUSY 0x08 +#define SEEOP_EWDS_ADDR 0x00 +#define SCB_TRANSFER_SIZE_FULL_LUN 0x38 +#define MK_MESSAGE_BIT_OFFSET 0x04 +#define MAX_OFFSET_PACED 0xfe +#define MAX_OFFSET_NON_PACED 0x7f +#define LUNLEN_SINGLE_LEVEL_LUN 0x0f +#define CCSGADDR_MAX 0x80 +#define B_CURRFIFO_0 0x02 +#define BUS_32_BIT 0x02 +#define AHD_TIMER_US_PER_TICK 0x19 +#define AHD_SLEWRATE_SHIFT 0x03 +#define AHD_SLEWRATE_MASK 0x78 +#define AHD_SLEWRATE_DEF_REVA 0x08 +#define AHD_PRECOMP_CUTBACK_29 0x06 +#define AHD_NUM_PER_DEV_ANNEXCOLS 0x04 +#define AHD_ANNEXCOL_AMPLITUDE 0x06 +#define AHD_AMPLITUDE_SHIFT 0x00 +#define AHD_AMPLITUDE_MASK 0x07 +#define STIMESEL_BUG_ADJ 0x08 +#define STATUS_PKT_SENSE 0xff +#define SRC_MODE_SHIFT 0x00 +#define SEEOP_ERAL_ADDR 0x80 +#define NVRAM_SCB_OFFSET 0x2c +#define MAX_OFFSET_PACED_BUG 0x7f +#define CCSGRAM_MAXSEGS 0x10 +#define AHD_SLEWRATE_DEF_REVB 0x08 +#define AHD_PRECOMP_CUTBACK_17 0x04 +#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30 +#define PKT_OVERRUN_BUFSIZE 0x200 +#define MAX_OFFSET 0xfe +#define HOST_MSG 0xff +#define BUS_16_BIT 0x01 + + +/* Downloaded Constant Definitions */ +#define SG_SIZEOF 0x04 +#define SG_PREFETCH_ALIGN_MASK 0x02 +#define SG_PREFETCH_CNT_LIMIT 0x01 +#define CACHELINE_MASK 0x07 +#define SCB_TRANSFER_SIZE 0x06 +#define PKT_OVERRUN_BUFOFFSET 0x05 +#define SG_PREFETCH_ADDR_MASK 0x03 +#define SG_PREFETCH_CNT 0x00 +#define DOWNLOAD_CONST_COUNT 0x08 + + +/* Exported Labels */ +#define LABEL_timer_isr 0x28b +#define LABEL_seq_isr 0x28f diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped new file mode 100644 index 000000000..2e0c58905 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped @@ -0,0 +1,745 @@ +/* + * DO NOT EDIT - This file is automatically generated + * from the following source files: + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $ + */ + +#include "aic79xx_osm.h" + +static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = { + { "SPLTINT", 0x01, 0x01 }, + { "CMDCMPLT", 0x02, 0x02 }, + { "SEQINT", 0x04, 0x04 }, + { "SCSIINT", 0x08, 0x08 }, + { "PCIINT", 0x10, 0x10 }, + { "SWTMINT", 0x20, 0x20 }, + { "BRKADRINT", 0x40, 0x40 }, + { "HWERRINT", 0x80, 0x80 }, + { "INT_PEND", 0xff, 0xff } +}; + +int +ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(INTSTAT_parse_table, 9, "INTSTAT", + 0x01, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = { + { "ENINT_COALESCE", 0x40, 0x40 }, + { "HOST_TQINPOS", 0x80, 0x80 } +}; + +int +ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(HS_MAILBOX_parse_table, 2, "HS_MAILBOX", + 0x0b, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = { + { "SEQ_SPLTINT", 0x01, 0x01 }, + { "SEQ_PCIINT", 0x02, 0x02 }, + { "SEQ_SCSIINT", 0x04, 0x04 }, + { "SEQ_SEQINT", 0x08, 0x08 }, + { "SEQ_SWTMRTO", 0x10, 0x10 } +}; + +int +ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SEQINTSTAT_parse_table, 5, "SEQINTSTAT", + 0x0c, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t INTCTL_parse_table[] = { + { "SPLTINTEN", 0x01, 0x01 }, + { "SEQINTEN", 0x02, 0x02 }, + { "SCSIINTEN", 0x04, 0x04 }, + { "PCIINTEN", 0x08, 0x08 }, + { "AUTOCLRCMDINT", 0x10, 0x10 }, + { "SWTIMER_START", 0x20, 0x20 }, + { "SWTMINTEN", 0x40, 0x40 }, + { "SWTMINTMASK", 0x80, 0x80 } +}; + +int +ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(INTCTL_parse_table, 8, "INTCTL", + 0x18, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t DFCNTRL_parse_table[] = { + { "DIRECTIONEN", 0x01, 0x01 }, + { "FIFOFLUSH", 0x02, 0x02 }, + { "FIFOFLUSHACK", 0x02, 0x02 }, + { "DIRECTION", 0x04, 0x04 }, + { "DIRECTIONACK", 0x04, 0x04 }, + { "HDMAEN", 0x08, 0x08 }, + { "HDMAENACK", 0x08, 0x08 }, + { "SCSIEN", 0x20, 0x20 }, + { "SCSIENACK", 0x20, 0x20 }, + { "SCSIENWRDIS", 0x40, 0x40 }, + { "PRELOADEN", 0x80, 0x80 } +}; + +int +ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(DFCNTRL_parse_table, 11, "DFCNTRL", + 0x19, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = { + { "FIFOEMP", 0x01, 0x01 }, + { "FIFOFULL", 0x02, 0x02 }, + { "DFTHRESH", 0x04, 0x04 }, + { "HDONE", 0x08, 0x08 }, + { "MREQPEND", 0x10, 0x10 }, + { "PKT_PRELOAD_AVAIL", 0x40, 0x40 }, + { "PRELOAD_AVAIL", 0x80, 0x80 } +}; + +int +ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(DFSTATUS_parse_table, 7, "DFSTATUS", + 0x1a, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = { + { "LAST_SEG_DONE", 0x01, 0x01 }, + { "LAST_SEG", 0x02, 0x02 }, + { "ODD_SEG", 0x04, 0x04 }, + { "SG_ADDR_MASK", 0xf8, 0xf8 } +}; + +int +ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SG_CACHE_SHADOW_parse_table, 4, "SG_CACHE_SHADOW", + 0x1b, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = { + { "SCSIRSTO", 0x01, 0x01 }, + { "FORCEBUSFREE", 0x10, 0x10 }, + { "ENARBO", 0x20, 0x20 }, + { "ENSELO", 0x40, 0x40 }, + { "TEMODEO", 0x80, 0x80 } +}; + +int +ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SCSISEQ0_parse_table, 5, "SCSISEQ0", + 0x3a, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = { + { "ALTSTIM", 0x01, 0x01 }, + { "ENAUTOATNP", 0x02, 0x02 }, + { "MANUALP", 0x0c, 0x0c }, + { "ENRSELI", 0x10, 0x10 }, + { "ENSELI", 0x20, 0x20 }, + { "MANUALCTL", 0x40, 0x40 } +}; + +int +ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SCSISEQ1_parse_table, 6, "SCSISEQ1", + 0x3b, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = { + { "CURRFIFO_0", 0x00, 0x03 }, + { "CURRFIFO_1", 0x01, 0x03 }, + { "CURRFIFO_NONE", 0x03, 0x03 }, + { "FIFO0FREE", 0x10, 0x10 }, + { "FIFO1FREE", 0x20, 0x20 }, + { "CURRFIFO", 0x03, 0x03 } +}; + +int +ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(DFFSTAT_parse_table, 6, "DFFSTAT", + 0x3f, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = { + { "P_DATAOUT", 0x00, 0xe0 }, + { "P_DATAOUT_DT", 0x20, 0xe0 }, + { "P_DATAIN", 0x40, 0xe0 }, + { "P_DATAIN_DT", 0x60, 0xe0 }, + { "P_COMMAND", 0x80, 0xe0 }, + { "P_MESGOUT", 0xa0, 0xe0 }, + { "P_STATUS", 0xc0, 0xe0 }, + { "P_MESGIN", 0xe0, 0xe0 }, + { "ACKI", 0x01, 0x01 }, + { "REQI", 0x02, 0x02 }, + { "BSYI", 0x04, 0x04 }, + { "SELI", 0x08, 0x08 }, + { "ATNI", 0x10, 0x10 }, + { "MSGI", 0x20, 0x20 }, + { "IOI", 0x40, 0x40 }, + { "CDI", 0x80, 0x80 }, + { "PHASE_MASK", 0xe0, 0xe0 } +}; + +int +ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SCSISIGI_parse_table, 17, "SCSISIGI", + 0x41, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = { + { "DATA_OUT_PHASE", 0x01, 0x03 }, + { "DATA_IN_PHASE", 0x02, 0x03 }, + { "DATA_PHASE_MASK", 0x03, 0x03 }, + { "MSG_OUT_PHASE", 0x04, 0x04 }, + { "MSG_IN_PHASE", 0x08, 0x08 }, + { "COMMAND_PHASE", 0x10, 0x10 }, + { "STATUS_PHASE", 0x20, 0x20 } +}; + +int +ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SCSIPHASE_parse_table, 7, "SCSIPHASE", + 0x42, regvalue, cur_col, wrap)); +} + +int +ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "SCSIBUS", + 0x46, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SELID_parse_table[] = { + { "ONEBIT", 0x08, 0x08 }, + { "SELID_MASK", 0xf0, 0xf0 } +}; + +int +ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SELID_parse_table, 2, "SELID", + 0x49, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = { + { "ENARBDO", 0x01, 0x01 }, + { "ENSPIORDY", 0x02, 0x02 }, + { "ENOVERRUN", 0x04, 0x04 }, + { "ENIOERR", 0x08, 0x08 }, + { "ENSELINGO", 0x10, 0x10 }, + { "ENSELDI", 0x20, 0x20 }, + { "ENSELDO", 0x40, 0x40 } +}; + +int +ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0", + 0x4b, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = { + { "ARBDO", 0x01, 0x01 }, + { "SPIORDY", 0x02, 0x02 }, + { "OVERRUN", 0x04, 0x04 }, + { "IOERR", 0x08, 0x08 }, + { "SELINGO", 0x10, 0x10 }, + { "SELDI", 0x20, 0x20 }, + { "SELDO", 0x40, 0x40 }, + { "TARGET", 0x80, 0x80 } +}; + +int +ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SSTAT0_parse_table, 8, "SSTAT0", + 0x4b, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = { + { "REQINIT", 0x01, 0x01 }, + { "STRB2FAST", 0x02, 0x02 }, + { "SCSIPERR", 0x04, 0x04 }, + { "BUSFREE", 0x08, 0x08 }, + { "PHASEMIS", 0x10, 0x10 }, + { "SCSIRSTI", 0x20, 0x20 }, + { "ATNTARG", 0x40, 0x40 }, + { "SELTO", 0x80, 0x80 } +}; + +int +ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SSTAT1_parse_table, 8, "SSTAT1", + 0x4c, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = { + { "BUSFREE_LQO", 0x40, 0xc0 }, + { "BUSFREE_DFF0", 0x80, 0xc0 }, + { "BUSFREE_DFF1", 0xc0, 0xc0 }, + { "DMADONE", 0x01, 0x01 }, + { "SDONE", 0x02, 0x02 }, + { "WIDE_RES", 0x04, 0x04 }, + { "BSYX", 0x08, 0x08 }, + { "EXP_ACTIVE", 0x10, 0x10 }, + { "NONPACKREQ", 0x20, 0x20 }, + { "BUSFREETIME", 0xc0, 0xc0 } +}; + +int +ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SSTAT2_parse_table, 10, "SSTAT2", + 0x4d, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = { + { "DTERR", 0x01, 0x01 }, + { "DGFORMERR", 0x02, 0x02 }, + { "CRCERR", 0x04, 0x04 }, + { "AIPERR", 0x08, 0x08 }, + { "PARITYERR", 0x10, 0x10 }, + { "PREVPHASE", 0x20, 0x20 }, + { "HIPERR", 0x40, 0x40 }, + { "HIZERO", 0x80, 0x80 } +}; + +int +ahd_perrdiag_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(PERRDIAG_parse_table, 8, "PERRDIAG", + 0x4e, regvalue, cur_col, wrap)); +} + +int +ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "SOFFCNT", + 0x4f, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = { + { "LQIATNCMD", 0x01, 0x01 }, + { "LQIATNLQ", 0x02, 0x02 }, + { "LQIBADLQT", 0x04, 0x04 }, + { "LQICRCT2", 0x08, 0x08 }, + { "LQICRCT1", 0x10, 0x10 }, + { "LQIATNQAS", 0x20, 0x20 } +}; + +int +ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(LQISTAT0_parse_table, 6, "LQISTAT0", + 0x50, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = { + { "LQIOVERI_NLQ", 0x01, 0x01 }, + { "LQIOVERI_LQ", 0x02, 0x02 }, + { "LQIBADLQI", 0x04, 0x04 }, + { "LQICRCI_NLQ", 0x08, 0x08 }, + { "LQICRCI_LQ", 0x10, 0x10 }, + { "LQIABORT", 0x20, 0x20 }, + { "LQIPHASE_NLQ", 0x40, 0x40 }, + { "LQIPHASE_LQ", 0x80, 0x80 } +}; + +int +ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(LQISTAT1_parse_table, 8, "LQISTAT1", + 0x51, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = { + { "LQIGSAVAIL", 0x01, 0x01 }, + { "LQISTOPCMD", 0x02, 0x02 }, + { "LQISTOPLQ", 0x04, 0x04 }, + { "LQISTOPPKT", 0x08, 0x08 }, + { "LQIWAITFIFO", 0x10, 0x10 }, + { "LQIWORKONLQ", 0x20, 0x20 }, + { "LQIPHASE_OUTPKT", 0x40, 0x40 }, + { "PACKETIZED", 0x80, 0x80 } +}; + +int +ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(LQISTAT2_parse_table, 8, "LQISTAT2", + 0x52, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SSTAT3_parse_table[] = { + { "OSRAMPERR", 0x01, 0x01 }, + { "NTRAMPERR", 0x02, 0x02 } +}; + +int +ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SSTAT3_parse_table, 2, "SSTAT3", + 0x53, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = { + { "LQOTCRC", 0x01, 0x01 }, + { "LQOATNPKT", 0x02, 0x02 }, + { "LQOATNLQ", 0x04, 0x04 }, + { "LQOSTOPT2", 0x08, 0x08 }, + { "LQOTARGSCBPERR", 0x10, 0x10 } +}; + +int +ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(LQOSTAT0_parse_table, 5, "LQOSTAT0", + 0x54, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = { + { "LQOPHACHGINPKT", 0x01, 0x01 }, + { "LQOBUSFREE", 0x02, 0x02 }, + { "LQOBADQAS", 0x04, 0x04 }, + { "LQOSTOPI2", 0x08, 0x08 }, + { "LQOINITSCBPERR", 0x10, 0x10 } +}; + +int +ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(LQOSTAT1_parse_table, 5, "LQOSTAT1", + 0x55, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = { + { "LQOSTOP0", 0x01, 0x01 }, + { "LQOPHACHGOUTPKT", 0x02, 0x02 }, + { "LQOWAITFIFO", 0x10, 0x10 }, + { "LQOPKT", 0xe0, 0xe0 } +}; + +int +ahd_lqostat2_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(LQOSTAT2_parse_table, 4, "LQOSTAT2", + 0x56, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = { + { "ENREQINIT", 0x01, 0x01 }, + { "ENSTRB2FAST", 0x02, 0x02 }, + { "ENSCSIPERR", 0x04, 0x04 }, + { "ENBUSFREE", 0x08, 0x08 }, + { "ENPHASEMIS", 0x10, 0x10 }, + { "ENSCSIRST", 0x20, 0x20 }, + { "ENATNTARG", 0x40, 0x40 }, + { "ENSELTIMO", 0x80, 0x80 } +}; + +int +ahd_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SIMODE1_parse_table, 8, "SIMODE1", + 0x57, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = { + { "RSTCHN", 0x01, 0x01 }, + { "CLRCHN", 0x02, 0x02 }, + { "CLRSHCNT", 0x04, 0x04 }, + { "DFFBITBUCKET", 0x08, 0x08 } +}; + +int +ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(DFFSXFRCTL_parse_table, 4, "DFFSXFRCTL", + 0x5a, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = { + { "CFG4TCMD", 0x01, 0x01 }, + { "CFG4ICMD", 0x02, 0x02 }, + { "CFG4TSTAT", 0x04, 0x04 }, + { "CFG4ISTAT", 0x08, 0x08 }, + { "CFG4DATA", 0x10, 0x10 }, + { "SAVEPTRS", 0x20, 0x20 }, + { "CTXTDONE", 0x40, 0x40 } +}; + +int +ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SEQINTSRC_parse_table, 7, "SEQINTSRC", + 0x5b, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SEQIMODE_parse_table[] = { + { "ENCFG4TCMD", 0x01, 0x01 }, + { "ENCFG4ICMD", 0x02, 0x02 }, + { "ENCFG4TSTAT", 0x04, 0x04 }, + { "ENCFG4ISTAT", 0x08, 0x08 }, + { "ENCFG4DATA", 0x10, 0x10 }, + { "ENSAVEPTRS", 0x20, 0x20 }, + { "ENCTXTDONE", 0x40, 0x40 } +}; + +int +ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SEQIMODE_parse_table, 7, "SEQIMODE", + 0x5c, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = { + { "FIFOFREE", 0x01, 0x01 }, + { "DATAINFIFO", 0x02, 0x02 }, + { "DLZERO", 0x04, 0x04 }, + { "SHVALID", 0x08, 0x08 }, + { "LASTSDONE", 0x10, 0x10 }, + { "SHCNTMINUS1", 0x20, 0x20 }, + { "SHCNTNEGATIVE", 0x40, 0x40 } +}; + +int +ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(MDFFSTAT_parse_table, 7, "MDFFSTAT", + 0x5d, regvalue, cur_col, wrap)); +} + +int +ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "SELOID", + 0x6b, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = { + { "SEGS_AVAIL", 0x01, 0x01 }, + { "LOADING_NEEDED", 0x02, 0x02 }, + { "FETCH_INPROG", 0x04, 0x04 } +}; + +int +ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SG_STATE_parse_table, 3, "SG_STATE", + 0xa6, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = { + { "CCSCBRESET", 0x01, 0x01 }, + { "CCSCBDIR", 0x04, 0x04 }, + { "CCSCBEN", 0x08, 0x08 }, + { "CCARREN", 0x10, 0x10 }, + { "ARRDONE", 0x40, 0x40 }, + { "CCSCBDONE", 0x80, 0x80 } +}; + +int +ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(CCSCBCTL_parse_table, 6, "CCSCBCTL", + 0xad, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t CCSGCTL_parse_table[] = { + { "CCSGRESET", 0x01, 0x01 }, + { "SG_FETCH_REQ", 0x02, 0x02 }, + { "CCSGENACK", 0x08, 0x08 }, + { "SG_CACHE_AVAIL", 0x10, 0x10 }, + { "CCSGDONE", 0x80, 0x80 }, + { "CCSGEN", 0x0c, 0x0c } +}; + +int +ahd_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(CCSGCTL_parse_table, 6, "CCSGCTL", + 0xad, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = { + { "LOADRAM", 0x01, 0x01 }, + { "SEQRESET", 0x02, 0x02 }, + { "STEP", 0x04, 0x04 }, + { "BRKADRINTEN", 0x08, 0x08 }, + { "FASTMODE", 0x10, 0x10 }, + { "FAILDIS", 0x20, 0x20 }, + { "PAUSEDIS", 0x40, 0x40 }, + { "PERRORDIS", 0x80, 0x80 } +}; + +int +ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SEQCTL0_parse_table, 8, "SEQCTL0", + 0xd6, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = { + { "IRET", 0x01, 0x01 }, + { "INTMASK1", 0x02, 0x02 }, + { "INTMASK2", 0x04, 0x04 }, + { "SCS_SEQ_INT1M0", 0x08, 0x08 }, + { "SCS_SEQ_INT1M1", 0x10, 0x10 }, + { "INT1_CONTEXT", 0x20, 0x20 }, + { "INTVEC1DSL", 0x80, 0x80 } +}; + +int +ahd_seqintctl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SEQINTCTL_parse_table, 7, "SEQINTCTL", + 0xd9, regvalue, cur_col, wrap)); +} + +int +ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "SRAM_BASE", + 0x100, regvalue, cur_col, wrap)); +} + +int +ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "QFREEZE_COUNT", + 0x132, regvalue, cur_col, wrap)); +} + +int +ahd_kernel_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "KERNEL_QFREEZE_COUNT", + 0x134, regvalue, cur_col, wrap)); +} + +int +ahd_saved_mode_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "SAVED_MODE", + 0x136, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { + { "NO_DISCONNECT", 0x01, 0x01 }, + { "SPHASE_PENDING", 0x02, 0x02 }, + { "DPHASE_PENDING", 0x04, 0x04 }, + { "CMDPHASE_PENDING", 0x08, 0x08 }, + { "TARG_CMD_PENDING", 0x10, 0x10 }, + { "DPHASE", 0x20, 0x20 }, + { "NO_CDB_SENT", 0x40, 0x40 }, + { "TARGET_CMD_IS_TAGGED",0x40, 0x40 }, + { "NOT_IDENTIFIED", 0x80, 0x80 } +}; + +int +ahd_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SEQ_FLAGS_parse_table, 9, "SEQ_FLAGS", + 0x139, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = { + { "P_DATAOUT", 0x00, 0xe0 }, + { "P_DATAOUT_DT", 0x20, 0xe0 }, + { "P_DATAIN", 0x40, 0xe0 }, + { "P_DATAIN_DT", 0x60, 0xe0 }, + { "P_COMMAND", 0x80, 0xe0 }, + { "P_MESGOUT", 0xa0, 0xe0 }, + { "P_STATUS", 0xc0, 0xe0 }, + { "P_MESGIN", 0xe0, 0xe0 }, + { "P_BUSFREE", 0x01, 0x01 }, + { "MSGI", 0x20, 0x20 }, + { "IOI", 0x40, 0x40 }, + { "CDI", 0x80, 0x80 }, + { "PHASE_MASK", 0xe0, 0xe0 } +}; + +int +ahd_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(LASTPHASE_parse_table, 13, "LASTPHASE", + 0x13c, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = { + { "PENDING_MK_MESSAGE", 0x01, 0x01 }, + { "TARGET_MSG_PENDING", 0x02, 0x02 }, + { "SELECTOUT_QFROZEN", 0x04, 0x04 } +}; + +int +ahd_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SEQ_FLAGS2_parse_table, 3, "SEQ_FLAGS2", + 0x14d, regvalue, cur_col, wrap)); +} + +int +ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", + 0x160, regvalue, cur_col, wrap)); +} + +int +ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID", + 0x162, regvalue, cur_col, wrap)); +} + +int +ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(NULL, 0, "SCB_BASE", + 0x180, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = { + { "SCB_TAG_TYPE", 0x03, 0x03 }, + { "DISCONNECTED", 0x04, 0x04 }, + { "STATUS_RCVD", 0x08, 0x08 }, + { "MK_MESSAGE", 0x10, 0x10 }, + { "TAG_ENB", 0x20, 0x20 }, + { "DISCENB", 0x40, 0x40 }, + { "TARGET_SCB", 0x80, 0x80 } +}; + +int +ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SCB_CONTROL_parse_table, 7, "SCB_CONTROL", + 0x192, regvalue, cur_col, wrap)); +} + +static const ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = { + { "OID", 0x0f, 0x0f }, + { "TID", 0xf0, 0xf0 } +}; + +int +ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SCB_SCSIID_parse_table, 2, "SCB_SCSIID", + 0x193, regvalue, cur_col, wrap)); +} + diff --git a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped new file mode 100644 index 000000000..fd64a950e --- /dev/null +++ b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped @@ -0,0 +1,1189 @@ +/* + * DO NOT EDIT - This file is automatically generated + * from the following source files: + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $ + */ +static const uint8_t seqprog[] = { + 0xff, 0x02, 0x06, 0x78, + 0x00, 0xea, 0x6e, 0x59, + 0x01, 0xea, 0x04, 0x30, + 0xff, 0x04, 0x0c, 0x78, + 0x19, 0xea, 0x6e, 0x59, + 0x19, 0xea, 0x04, 0x00, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x60, 0x3a, 0x3a, 0x68, + 0x04, 0x4d, 0x35, 0x78, + 0x01, 0x34, 0xc1, 0x31, + 0x00, 0x32, 0x21, 0x60, + 0x01, 0x35, 0xc1, 0x31, + 0x00, 0x33, 0x21, 0x60, + 0xfb, 0x4d, 0x9b, 0x0a, + 0x00, 0xe2, 0x34, 0x40, + 0x50, 0x4b, 0x3a, 0x68, + 0xff, 0x31, 0x3b, 0x70, + 0x02, 0x30, 0x51, 0x31, + 0xff, 0x8d, 0x2d, 0x70, + 0x02, 0x8c, 0x51, 0x31, + 0xff, 0x8d, 0x29, 0x60, + 0x02, 0x28, 0x19, 0x33, + 0x02, 0x30, 0x51, 0x32, + 0xff, 0xea, 0x62, 0x02, + 0x00, 0xe2, 0x3a, 0x40, + 0xff, 0x21, 0x3b, 0x70, + 0x40, 0x4b, 0xb4, 0x69, + 0x00, 0xe2, 0x72, 0x59, + 0x40, 0x4b, 0xb4, 0x69, + 0x20, 0x4b, 0xa0, 0x69, + 0xfc, 0x42, 0x44, 0x78, + 0x10, 0x40, 0x44, 0x78, + 0x00, 0xe2, 0x10, 0x5e, + 0x20, 0x4d, 0x48, 0x78, + 0x00, 0xe2, 0x10, 0x5e, + 0x30, 0x3f, 0xc0, 0x09, + 0x30, 0xe0, 0x50, 0x60, + 0x7f, 0x4a, 0x94, 0x08, + 0x00, 0xe2, 0x52, 0x40, + 0xc0, 0x4a, 0x94, 0x00, + 0x00, 0xe2, 0x5e, 0x58, + 0x00, 0xe2, 0x76, 0x58, + 0x00, 0xe2, 0x86, 0x58, + 0x00, 0xe2, 0x06, 0x40, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x01, 0x52, 0x84, 0x78, + 0x02, 0x58, 0x50, 0x31, + 0xff, 0xea, 0x10, 0x0b, + 0xff, 0x97, 0x6f, 0x78, + 0x50, 0x4b, 0x6a, 0x68, + 0xbf, 0x3a, 0x74, 0x08, + 0x14, 0xea, 0x6e, 0x59, + 0x14, 0xea, 0x04, 0x00, + 0x08, 0x92, 0x25, 0x03, + 0xff, 0x90, 0x5f, 0x68, + 0x00, 0xe2, 0x8a, 0x5b, + 0x00, 0xe2, 0x5e, 0x40, + 0x00, 0xea, 0x68, 0x59, + 0x01, 0xea, 0x00, 0x30, + 0x80, 0xf9, 0x7e, 0x68, + 0x00, 0xe2, 0x66, 0x59, + 0x11, 0xea, 0x68, 0x59, + 0x11, 0xea, 0x00, 0x00, + 0x80, 0xf9, 0x66, 0x79, + 0xff, 0xea, 0xd4, 0x0d, + 0x22, 0xea, 0x68, 0x59, + 0x22, 0xea, 0x00, 0x00, + 0x10, 0x16, 0x90, 0x78, + 0x10, 0x16, 0x2c, 0x00, + 0x01, 0x0b, 0xae, 0x32, + 0x18, 0xad, 0x1c, 0x79, + 0x04, 0xad, 0xdc, 0x68, + 0x80, 0xad, 0x84, 0x78, + 0x10, 0xad, 0xaa, 0x78, + 0xe7, 0xad, 0x5a, 0x09, + 0x02, 0x8c, 0x59, 0x32, + 0xff, 0x8d, 0xa1, 0x60, + 0xff, 0xea, 0x5e, 0x02, + 0xff, 0x88, 0xa7, 0x78, + 0x02, 0x30, 0x19, 0x33, + 0x02, 0xa8, 0x60, 0x36, + 0x02, 0x28, 0x19, 0x33, + 0x02, 0xa8, 0x50, 0x36, + 0xe7, 0xad, 0x5a, 0x09, + 0x00, 0xe2, 0xb8, 0x58, + 0xff, 0xea, 0x56, 0x02, + 0x04, 0x7c, 0x88, 0x32, + 0x20, 0x16, 0x84, 0x78, + 0x04, 0x40, 0x89, 0x32, + 0x80, 0x3d, 0x7b, 0x16, + 0xff, 0x2d, 0xc7, 0x60, + 0xff, 0x29, 0xc7, 0x60, + 0x40, 0x57, 0xd7, 0x78, + 0xff, 0x55, 0xc7, 0x68, + 0xff, 0x53, 0xc1, 0x19, + 0x00, 0x54, 0xd5, 0x19, + 0x00, 0xe2, 0xd6, 0x50, + 0x01, 0x52, 0xc1, 0x31, + 0x00, 0x56, 0xd5, 0x19, + 0x00, 0xe2, 0xd6, 0x48, + 0x80, 0x18, 0x84, 0x78, + 0x02, 0x50, 0x1d, 0x30, + 0x10, 0xea, 0x18, 0x00, + 0x60, 0x18, 0x30, 0x00, + 0x7f, 0x18, 0x30, 0x0c, + 0x02, 0xea, 0x02, 0x00, + 0xff, 0xea, 0xac, 0x0a, + 0x80, 0x18, 0x30, 0x04, + 0x40, 0xad, 0x84, 0x78, + 0xe7, 0xad, 0x5a, 0x09, + 0xff, 0xea, 0xc0, 0x09, + 0x01, 0x54, 0xa9, 0x1a, + 0x00, 0x55, 0xab, 0x22, + 0x01, 0x94, 0x6d, 0x33, + 0xff, 0xea, 0x20, 0x0b, + 0x04, 0xac, 0x49, 0x32, + 0xff, 0xea, 0x5a, 0x03, + 0xff, 0xea, 0x5e, 0x03, + 0x01, 0x10, 0xd4, 0x31, + 0x02, 0xa8, 0x40, 0x31, + 0x01, 0x92, 0xc1, 0x31, + 0x3d, 0x93, 0xc5, 0x29, + 0xfe, 0xe2, 0xc4, 0x09, + 0x01, 0xea, 0xc6, 0x01, + 0x02, 0xe2, 0xc8, 0x31, + 0x02, 0xec, 0x50, 0x31, + 0x02, 0xa0, 0xda, 0x31, + 0xff, 0xa9, 0x10, 0x71, + 0x10, 0xe0, 0x0e, 0x79, + 0x10, 0x92, 0x0f, 0x79, + 0x01, 0x4d, 0x9b, 0x02, + 0x02, 0xa0, 0xc0, 0x32, + 0x01, 0x93, 0xc5, 0x36, + 0x02, 0xa0, 0x58, 0x37, + 0xff, 0x21, 0x19, 0x71, + 0x02, 0x22, 0x51, 0x31, + 0x02, 0xa0, 0x5c, 0x33, + 0x02, 0xa0, 0x44, 0x36, + 0x02, 0xa0, 0x40, 0x32, + 0x02, 0xa0, 0x44, 0x36, + 0x05, 0x4d, 0x21, 0x69, + 0x40, 0x16, 0x52, 0x69, + 0xff, 0x2d, 0x57, 0x61, + 0xff, 0x29, 0x85, 0x70, + 0x02, 0x28, 0x55, 0x32, + 0x01, 0xea, 0x5a, 0x01, + 0x04, 0x44, 0xf9, 0x30, + 0x01, 0x44, 0xc1, 0x31, + 0x02, 0x28, 0x51, 0x31, + 0x02, 0xa8, 0x60, 0x31, + 0x01, 0xa4, 0x61, 0x31, + 0x01, 0x3d, 0x61, 0x31, + 0x01, 0x14, 0xd4, 0x31, + 0x01, 0x56, 0xad, 0x1a, + 0xff, 0x54, 0xa9, 0x1a, + 0xff, 0x55, 0xab, 0x22, + 0xff, 0x8d, 0x4b, 0x71, + 0x80, 0xac, 0x4a, 0x71, + 0x20, 0x16, 0x4a, 0x69, + 0x00, 0xac, 0xc4, 0x19, + 0x07, 0xe2, 0x4a, 0xf9, + 0x02, 0x8c, 0x51, 0x31, + 0x00, 0xe2, 0x2e, 0x41, + 0x01, 0xac, 0x08, 0x31, + 0x09, 0xea, 0x5a, 0x01, + 0x02, 0x8c, 0x51, 0x32, + 0xff, 0xea, 0x1a, 0x07, + 0x04, 0x24, 0xf9, 0x30, + 0x1d, 0xea, 0x5c, 0x41, + 0x02, 0x2c, 0x51, 0x31, + 0x04, 0xa8, 0xf9, 0x30, + 0x19, 0xea, 0x5c, 0x41, + 0x06, 0xea, 0x08, 0x81, + 0x01, 0xe2, 0x5a, 0x35, + 0x02, 0xf2, 0xf0, 0x31, + 0xff, 0xea, 0xd4, 0x0d, + 0x02, 0xf2, 0xf0, 0x31, + 0x02, 0xf8, 0xe4, 0x35, + 0x80, 0xea, 0xb2, 0x01, + 0x01, 0xe2, 0x00, 0x30, + 0xff, 0xea, 0xb2, 0x0d, + 0x01, 0xe2, 0x04, 0x30, + 0x01, 0xea, 0x04, 0x34, + 0x02, 0x20, 0xbd, 0x30, + 0x02, 0x20, 0xb9, 0x30, + 0x02, 0x20, 0x51, 0x31, + 0x4c, 0x93, 0xd7, 0x28, + 0x10, 0x92, 0x81, 0x79, + 0x01, 0x6b, 0xc0, 0x30, + 0x02, 0x64, 0xc8, 0x00, + 0x40, 0x3a, 0x74, 0x04, + 0x00, 0xe2, 0x76, 0x58, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x30, 0x3f, 0xc0, 0x09, + 0x30, 0xe0, 0x82, 0x61, + 0x20, 0x3f, 0x98, 0x69, + 0x10, 0x3f, 0x82, 0x79, + 0x02, 0xea, 0x7e, 0x00, + 0x00, 0xea, 0x68, 0x59, + 0x01, 0xea, 0x00, 0x30, + 0x02, 0x4e, 0x51, 0x35, + 0x01, 0xea, 0x7e, 0x00, + 0x11, 0xea, 0x68, 0x59, + 0x11, 0xea, 0x00, 0x00, + 0x02, 0x4e, 0x51, 0x35, + 0xc0, 0x4a, 0x94, 0x00, + 0x04, 0x41, 0xa6, 0x79, + 0x08, 0xea, 0x98, 0x00, + 0x08, 0x57, 0xae, 0x00, + 0x08, 0x3c, 0x78, 0x00, + 0xf0, 0x49, 0x74, 0x0a, + 0x0f, 0x67, 0xc0, 0x09, + 0x00, 0x3a, 0x75, 0x02, + 0x20, 0xea, 0x96, 0x00, + 0x00, 0xe2, 0x28, 0x42, + 0xc0, 0x4a, 0x94, 0x00, + 0x40, 0x3a, 0xd2, 0x69, + 0x02, 0x55, 0x06, 0x68, + 0x02, 0x56, 0xd2, 0x69, + 0xff, 0x5b, 0xd2, 0x61, + 0x02, 0x20, 0x51, 0x31, + 0x80, 0xea, 0xb2, 0x01, + 0x44, 0xea, 0x00, 0x00, + 0x01, 0x33, 0xc0, 0x31, + 0x33, 0xea, 0x00, 0x00, + 0xff, 0xea, 0xb2, 0x09, + 0xff, 0xe0, 0xc0, 0x19, + 0xff, 0xe0, 0xd4, 0x79, + 0x02, 0xac, 0x51, 0x31, + 0x00, 0xe2, 0xca, 0x41, + 0x02, 0x5e, 0x50, 0x31, + 0x02, 0xa8, 0xb8, 0x30, + 0x02, 0x5c, 0x50, 0x31, + 0xff, 0xad, 0xe5, 0x71, + 0x02, 0xac, 0x41, 0x31, + 0x02, 0x22, 0x51, 0x31, + 0x02, 0xa0, 0x5c, 0x33, + 0x02, 0xa0, 0x44, 0x32, + 0x00, 0xe2, 0xf8, 0x41, + 0x01, 0x4d, 0xf1, 0x79, + 0x01, 0x62, 0xc1, 0x31, + 0x00, 0x93, 0xf1, 0x61, + 0xfe, 0x4d, 0x9b, 0x0a, + 0x02, 0x60, 0x41, 0x31, + 0x00, 0xe2, 0xdc, 0x41, + 0x3d, 0x93, 0xc9, 0x29, + 0x01, 0xe4, 0xc8, 0x01, + 0x01, 0xea, 0xca, 0x01, + 0xff, 0xea, 0xda, 0x01, + 0x02, 0x20, 0x51, 0x31, + 0x02, 0xae, 0x41, 0x32, + 0xff, 0x21, 0x01, 0x62, + 0xff, 0xea, 0x46, 0x02, + 0x02, 0x5c, 0x50, 0x31, + 0x40, 0xea, 0x96, 0x00, + 0x02, 0x56, 0x20, 0x6e, + 0x01, 0x55, 0x20, 0x6e, + 0x10, 0x92, 0x0d, 0x7a, + 0x10, 0x40, 0x16, 0x6a, + 0x01, 0x56, 0x16, 0x7a, + 0xff, 0x97, 0x07, 0x78, + 0x13, 0xea, 0x6e, 0x59, + 0x13, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x06, 0x40, + 0xbf, 0x3a, 0x74, 0x08, + 0x04, 0x41, 0x1c, 0x7a, + 0x08, 0xea, 0x98, 0x00, + 0x08, 0x57, 0xae, 0x00, + 0x01, 0x93, 0x75, 0x32, + 0x01, 0x94, 0x77, 0x32, + 0x40, 0xea, 0x72, 0x02, + 0x08, 0x3c, 0x78, 0x00, + 0x80, 0xea, 0x6e, 0x02, + 0x00, 0xe2, 0xf6, 0x5b, + 0x01, 0x3c, 0xc1, 0x31, + 0x9f, 0xe0, 0x98, 0x7c, + 0x80, 0xe0, 0x3c, 0x72, + 0xa0, 0xe0, 0x78, 0x72, + 0xc0, 0xe0, 0x6e, 0x72, + 0xe0, 0xe0, 0xa8, 0x72, + 0x01, 0xea, 0x6e, 0x59, + 0x01, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x28, 0x42, + 0x80, 0x39, 0x43, 0x7a, + 0x03, 0xea, 0x6e, 0x59, + 0x03, 0xea, 0x04, 0x00, + 0xee, 0x00, 0x4a, 0x6a, + 0x05, 0xea, 0xb4, 0x00, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x02, 0xa8, 0x9c, 0x32, + 0x00, 0xe2, 0x88, 0x59, + 0xef, 0x96, 0xd5, 0x19, + 0x00, 0xe2, 0x5a, 0x52, + 0x09, 0x80, 0xe1, 0x30, + 0x02, 0xea, 0x36, 0x00, + 0xa8, 0xea, 0x32, 0x00, + 0x00, 0xe2, 0x60, 0x42, + 0x01, 0x96, 0xd1, 0x30, + 0x10, 0x80, 0x89, 0x31, + 0x20, 0xea, 0x32, 0x00, + 0xbf, 0x39, 0x73, 0x0a, + 0x10, 0x4c, 0x6a, 0x6a, + 0x20, 0x19, 0x62, 0x6a, + 0x20, 0x19, 0x66, 0x6a, + 0x02, 0x4d, 0x28, 0x6a, + 0x40, 0x39, 0x73, 0x02, + 0x00, 0xe2, 0x28, 0x42, + 0x80, 0x39, 0xe9, 0x6a, + 0x01, 0x44, 0x10, 0x33, + 0x08, 0x92, 0x25, 0x03, + 0x00, 0xe2, 0x28, 0x42, + 0x10, 0xea, 0x80, 0x00, + 0x01, 0x37, 0xc5, 0x31, + 0x80, 0xe2, 0x94, 0x62, + 0x10, 0x92, 0xb9, 0x6a, + 0xc0, 0x94, 0xc5, 0x01, + 0x40, 0x92, 0x85, 0x6a, + 0xbf, 0xe2, 0xc4, 0x09, + 0x20, 0x92, 0x99, 0x7a, + 0x01, 0xe2, 0x88, 0x30, + 0x00, 0xe2, 0xf6, 0x5b, + 0xa0, 0x3c, 0xa1, 0x62, + 0x23, 0x92, 0x89, 0x08, + 0x00, 0xe2, 0xf6, 0x5b, + 0xa0, 0x3c, 0xa1, 0x62, + 0x00, 0xa8, 0x98, 0x42, + 0xff, 0xe2, 0x98, 0x62, + 0x00, 0xe2, 0xb8, 0x42, + 0x40, 0xea, 0x98, 0x00, + 0x01, 0xe2, 0x88, 0x30, + 0x00, 0xe2, 0xf6, 0x5b, + 0xa0, 0x3c, 0x77, 0x72, + 0x40, 0xea, 0x98, 0x00, + 0x01, 0x37, 0x95, 0x32, + 0x08, 0xea, 0x6e, 0x02, + 0x00, 0xe2, 0x28, 0x42, + 0xe0, 0xea, 0x12, 0x5c, + 0x80, 0xe0, 0xf4, 0x6a, + 0x04, 0xe0, 0xa6, 0x73, + 0x02, 0xe0, 0xd8, 0x73, + 0x00, 0xea, 0x52, 0x73, + 0x03, 0xe0, 0xe8, 0x73, + 0x23, 0xe0, 0xca, 0x72, + 0x08, 0xe0, 0xf0, 0x72, + 0x00, 0xe2, 0xf6, 0x5b, + 0x07, 0xea, 0x6e, 0x59, + 0x07, 0xea, 0x04, 0x00, + 0x08, 0x48, 0x29, 0x72, + 0x04, 0x48, 0xc7, 0x62, + 0x01, 0x49, 0x89, 0x30, + 0x00, 0xe2, 0xb8, 0x42, + 0x01, 0x44, 0xd4, 0x31, + 0x00, 0xe2, 0xb8, 0x42, + 0x01, 0x00, 0x6c, 0x32, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x4c, 0x3a, 0xc1, 0x28, + 0x01, 0x64, 0xc0, 0x31, + 0x00, 0x36, 0x69, 0x59, + 0x01, 0x36, 0x01, 0x30, + 0x01, 0xe0, 0xee, 0x7a, + 0xa0, 0xea, 0x08, 0x5c, + 0x01, 0xa0, 0xee, 0x62, + 0x01, 0x84, 0xe3, 0x7a, + 0x01, 0x95, 0xf1, 0x6a, + 0x05, 0xea, 0x6e, 0x59, + 0x05, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0xf0, 0x42, + 0x03, 0xea, 0x6e, 0x59, + 0x03, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0xf0, 0x42, + 0x07, 0xea, 0x1a, 0x5c, + 0x01, 0x44, 0xd4, 0x31, + 0x00, 0xe2, 0x28, 0x42, + 0x3f, 0xe0, 0x76, 0x0a, + 0xc0, 0x3a, 0xc1, 0x09, + 0x00, 0x3b, 0x51, 0x01, + 0xff, 0xea, 0x52, 0x09, + 0x30, 0x3a, 0xc5, 0x09, + 0x3d, 0xe2, 0xc4, 0x29, + 0xb8, 0xe2, 0xc4, 0x19, + 0x01, 0xea, 0xc6, 0x01, + 0x02, 0xe2, 0xc8, 0x31, + 0x02, 0xec, 0x40, 0x31, + 0xff, 0xa1, 0x10, 0x73, + 0x02, 0xe8, 0xda, 0x31, + 0x02, 0xa0, 0x50, 0x31, + 0x00, 0xe2, 0x32, 0x43, + 0x80, 0x39, 0x73, 0x02, + 0x01, 0x44, 0xd4, 0x31, + 0x00, 0xe2, 0xf6, 0x5b, + 0x01, 0x39, 0x73, 0x02, + 0xe0, 0x3c, 0x4d, 0x63, + 0x02, 0x39, 0x73, 0x02, + 0x20, 0x46, 0x46, 0x63, + 0xff, 0xea, 0x52, 0x09, + 0xa8, 0xea, 0x08, 0x5c, + 0x04, 0x92, 0x2d, 0x7b, + 0x01, 0x3a, 0xc1, 0x31, + 0x00, 0x93, 0x2d, 0x63, + 0x01, 0x3b, 0xc1, 0x31, + 0x00, 0x94, 0x37, 0x73, + 0x01, 0xa9, 0x52, 0x11, + 0xff, 0xa9, 0x22, 0x6b, + 0x00, 0xe2, 0x46, 0x43, + 0x10, 0x39, 0x73, 0x02, + 0x04, 0x92, 0x47, 0x7b, + 0xfb, 0x92, 0x25, 0x0b, + 0xff, 0xea, 0x72, 0x0a, + 0x01, 0xa4, 0x41, 0x6b, + 0x02, 0xa8, 0x9c, 0x32, + 0x00, 0xe2, 0x88, 0x59, + 0x10, 0x92, 0xf1, 0x7a, + 0xff, 0xea, 0x1a, 0x5c, + 0x00, 0xe2, 0xf0, 0x42, + 0x04, 0xea, 0x6e, 0x59, + 0x04, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0xf0, 0x42, + 0x04, 0xea, 0x6e, 0x59, + 0x04, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x28, 0x42, + 0x08, 0x92, 0xe9, 0x7a, + 0xc0, 0x39, 0x5d, 0x7b, + 0x80, 0x39, 0xe9, 0x6a, + 0xff, 0x88, 0x5d, 0x6b, + 0x40, 0x39, 0xe9, 0x6a, + 0x10, 0x92, 0x63, 0x7b, + 0x0a, 0xea, 0x6e, 0x59, + 0x0a, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x82, 0x5b, + 0x00, 0xe2, 0xc2, 0x43, + 0x50, 0x4b, 0x6a, 0x6b, + 0xbf, 0x3a, 0x74, 0x08, + 0x01, 0xe0, 0xf4, 0x31, + 0xff, 0xea, 0xc0, 0x09, + 0x01, 0x32, 0x65, 0x1a, + 0x00, 0x33, 0x67, 0x22, + 0x04, 0x4d, 0x9b, 0x02, + 0x01, 0xfa, 0xc0, 0x35, + 0x02, 0xa8, 0x90, 0x32, + 0x02, 0xea, 0xb4, 0x00, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x02, 0x48, 0x51, 0x31, + 0xff, 0x90, 0x85, 0x68, + 0xff, 0x88, 0x8f, 0x6b, + 0x01, 0xa4, 0x8b, 0x6b, + 0x02, 0xa4, 0x93, 0x6b, + 0x01, 0x84, 0x93, 0x7b, + 0x02, 0x28, 0x19, 0x33, + 0x02, 0xa8, 0x50, 0x36, + 0xff, 0x88, 0x93, 0x73, + 0x00, 0xe2, 0x66, 0x5b, + 0x02, 0xa8, 0x20, 0x33, + 0x04, 0xa4, 0x49, 0x03, + 0xff, 0xea, 0x1a, 0x03, + 0xff, 0x2d, 0x9f, 0x63, + 0x02, 0xa8, 0x58, 0x32, + 0x02, 0xa8, 0x5c, 0x36, + 0x02, 0xa8, 0x40, 0x31, + 0x02, 0x2e, 0x51, 0x31, + 0x02, 0xa0, 0x18, 0x33, + 0x02, 0xa0, 0x5c, 0x36, + 0xc0, 0x39, 0xe9, 0x6a, + 0x04, 0x92, 0x25, 0x03, + 0x20, 0x92, 0xc3, 0x6b, + 0x02, 0xa8, 0x40, 0x31, + 0xc0, 0x3a, 0xc1, 0x09, + 0x00, 0x3b, 0x51, 0x01, + 0xff, 0xea, 0x52, 0x09, + 0x30, 0x3a, 0xc5, 0x09, + 0x3d, 0xe2, 0xc4, 0x29, + 0xb8, 0xe2, 0xc4, 0x19, + 0x01, 0xea, 0xc6, 0x01, + 0x02, 0xe2, 0xc8, 0x31, + 0x02, 0xa0, 0xda, 0x31, + 0x02, 0xa0, 0x50, 0x31, + 0xf7, 0x57, 0xae, 0x08, + 0x08, 0xea, 0x98, 0x00, + 0x01, 0x44, 0xd4, 0x31, + 0xee, 0x00, 0xcc, 0x6b, + 0x02, 0xea, 0xb4, 0x00, + 0xc0, 0xea, 0x72, 0x02, + 0x09, 0x4c, 0xce, 0x7b, + 0x01, 0xea, 0x78, 0x02, + 0x08, 0x4c, 0x06, 0x68, + 0x0b, 0xea, 0x6e, 0x59, + 0x0b, 0xea, 0x04, 0x00, + 0x01, 0x44, 0xd4, 0x31, + 0x20, 0x39, 0x29, 0x7a, + 0x00, 0xe2, 0xe0, 0x5b, + 0x00, 0xe2, 0x28, 0x42, + 0x01, 0x84, 0xe5, 0x7b, + 0x01, 0xa4, 0x49, 0x07, + 0x08, 0x60, 0x30, 0x33, + 0x08, 0x80, 0x41, 0x37, + 0xdf, 0x39, 0x73, 0x0a, + 0xee, 0x00, 0xf2, 0x6b, + 0x05, 0xea, 0xb4, 0x00, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x00, 0xe2, 0x88, 0x59, + 0x00, 0xe2, 0xf0, 0x42, + 0xff, 0x42, 0x02, 0x6c, + 0x01, 0x41, 0xf6, 0x6b, + 0x02, 0x41, 0xf6, 0x7b, + 0xff, 0x42, 0x02, 0x6c, + 0x01, 0x41, 0xf6, 0x6b, + 0x02, 0x41, 0xf6, 0x7b, + 0xff, 0x42, 0x02, 0x7c, + 0x04, 0x4c, 0xf6, 0x6b, + 0xe0, 0x41, 0x78, 0x0e, + 0x01, 0x44, 0xd4, 0x31, + 0xff, 0x42, 0x0a, 0x7c, + 0x04, 0x4c, 0x0a, 0x6c, + 0xe0, 0x41, 0x78, 0x0a, + 0xe0, 0x3c, 0x29, 0x62, + 0xff, 0xea, 0xca, 0x09, + 0x01, 0xe2, 0xc8, 0x31, + 0x01, 0x46, 0xda, 0x35, + 0x01, 0x44, 0xd4, 0x35, + 0x10, 0xea, 0x80, 0x00, + 0x01, 0xe2, 0x6e, 0x36, + 0x04, 0xa6, 0x22, 0x7c, + 0xff, 0xea, 0x5a, 0x09, + 0xff, 0xea, 0x4c, 0x0d, + 0x01, 0xa6, 0x4e, 0x6c, + 0x10, 0xad, 0x84, 0x78, + 0x80, 0xad, 0x46, 0x6c, + 0x08, 0xad, 0x84, 0x68, + 0x20, 0x19, 0x3a, 0x7c, + 0x80, 0xea, 0xb2, 0x01, + 0x11, 0x00, 0x00, 0x10, + 0x02, 0xa6, 0x36, 0x7c, + 0xff, 0xea, 0xb2, 0x0d, + 0x11, 0x00, 0x00, 0x10, + 0xff, 0xea, 0xb2, 0x09, + 0x04, 0x84, 0xf9, 0x30, + 0x00, 0xea, 0x08, 0x81, + 0xff, 0xea, 0xd4, 0x09, + 0x02, 0x84, 0xf9, 0x88, + 0x0d, 0xea, 0x5a, 0x01, + 0x04, 0xa6, 0x4c, 0x05, + 0x04, 0xa6, 0x84, 0x78, + 0xff, 0xea, 0x5a, 0x09, + 0x03, 0x84, 0x59, 0x89, + 0x03, 0xea, 0x4c, 0x01, + 0x80, 0x1a, 0x84, 0x78, + 0x08, 0x19, 0x84, 0x78, + 0x08, 0xb0, 0xe0, 0x30, + 0x04, 0xb0, 0xe0, 0x30, + 0x03, 0xb0, 0xf0, 0x30, + 0x01, 0xb0, 0x06, 0x33, + 0x7f, 0x83, 0xe9, 0x08, + 0x04, 0xac, 0x58, 0x19, + 0xff, 0xea, 0xc0, 0x09, + 0x04, 0x84, 0x09, 0x9b, + 0x00, 0x85, 0x0b, 0x23, + 0x00, 0x86, 0x0d, 0x23, + 0x00, 0x87, 0x0f, 0x23, + 0x01, 0x84, 0xc5, 0x31, + 0x80, 0x83, 0x71, 0x7c, + 0x02, 0xe2, 0xc4, 0x01, + 0xff, 0xea, 0x4c, 0x09, + 0x01, 0xe2, 0x36, 0x30, + 0xc8, 0x19, 0x32, 0x00, + 0x88, 0x19, 0x32, 0x00, + 0x01, 0xac, 0xd4, 0x99, + 0x00, 0xe2, 0x84, 0x50, + 0xfe, 0xa6, 0x4c, 0x0d, + 0x0b, 0x98, 0xe1, 0x30, + 0xfd, 0xa4, 0x49, 0x09, + 0x80, 0xa3, 0x85, 0x7c, + 0x02, 0xa4, 0x48, 0x01, + 0x01, 0xa4, 0x36, 0x30, + 0xa8, 0xea, 0x32, 0x00, + 0xfd, 0xa4, 0x49, 0x0b, + 0x05, 0xa3, 0x07, 0x33, + 0x80, 0x83, 0x91, 0x6c, + 0x02, 0xea, 0x4c, 0x05, + 0xff, 0xea, 0x4c, 0x0d, + 0x00, 0xe2, 0x60, 0x59, + 0x02, 0xa6, 0x24, 0x6c, + 0x80, 0xf9, 0xf2, 0x05, + 0xc0, 0x39, 0x9f, 0x7c, + 0x03, 0xea, 0x6e, 0x59, + 0x03, 0xea, 0x04, 0x00, + 0x20, 0x39, 0xc3, 0x7c, + 0x01, 0x84, 0xa9, 0x6c, + 0x06, 0xea, 0x6e, 0x59, + 0x06, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0xc6, 0x44, + 0x01, 0x00, 0x6c, 0x32, + 0xee, 0x00, 0xb2, 0x6c, + 0x05, 0xea, 0xb4, 0x00, + 0x33, 0xea, 0x68, 0x59, + 0x33, 0xea, 0x00, 0x00, + 0x80, 0x3d, 0x7a, 0x00, + 0xfc, 0x42, 0xb4, 0x7c, + 0x7f, 0x3d, 0x7a, 0x08, + 0x00, 0x36, 0x69, 0x59, + 0x01, 0x36, 0x01, 0x30, + 0x09, 0xea, 0x6e, 0x59, + 0x09, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x28, 0x42, + 0x01, 0xa4, 0xa9, 0x6c, + 0x00, 0xe2, 0x7c, 0x5c, + 0x20, 0x39, 0x73, 0x02, + 0x01, 0x00, 0x6c, 0x32, + 0x02, 0xa6, 0xce, 0x7c, + 0x00, 0xe2, 0x92, 0x5c, + 0x00, 0xe2, 0x76, 0x58, + 0x00, 0xe2, 0x86, 0x58, + 0x00, 0xe2, 0x5a, 0x58, + 0x00, 0x36, 0x69, 0x59, + 0x01, 0x36, 0x01, 0x30, + 0x20, 0x19, 0xce, 0x6c, + 0x00, 0xe2, 0xfe, 0x5c, + 0x04, 0x19, 0xe8, 0x6c, + 0x02, 0x19, 0x32, 0x00, + 0x01, 0x84, 0xe9, 0x7c, + 0x01, 0x1b, 0xe2, 0x7c, + 0x01, 0x1a, 0xe8, 0x6c, + 0x00, 0xe2, 0x98, 0x44, + 0x80, 0x4b, 0xee, 0x6c, + 0x01, 0x4c, 0xea, 0x7c, + 0x03, 0x42, 0x98, 0x6c, + 0x00, 0xe2, 0x1e, 0x5c, + 0x80, 0xf9, 0xf2, 0x01, + 0x04, 0x39, 0x29, 0x7a, + 0x00, 0xe2, 0x28, 0x42, + 0x08, 0x5d, 0x06, 0x6d, + 0x00, 0xe2, 0x76, 0x58, + 0x00, 0x36, 0x69, 0x59, + 0x01, 0x36, 0x01, 0x30, + 0x02, 0x1b, 0xf6, 0x7c, + 0x08, 0x5d, 0x04, 0x7d, + 0x03, 0x68, 0x00, 0x37, + 0x01, 0x84, 0x09, 0x07, + 0x80, 0x1b, 0x10, 0x7d, + 0x80, 0x84, 0x11, 0x6d, + 0xff, 0x85, 0x0b, 0x1b, + 0xff, 0x86, 0x0d, 0x23, + 0xff, 0x87, 0x0f, 0x23, + 0xf8, 0x1b, 0x08, 0x0b, + 0xff, 0xea, 0x06, 0x0b, + 0x03, 0x68, 0x00, 0x37, + 0x00, 0xe2, 0xd6, 0x58, + 0x10, 0xea, 0x18, 0x00, + 0xf9, 0xd9, 0xb2, 0x0d, + 0x01, 0xd9, 0xb2, 0x05, + 0x01, 0x52, 0x48, 0x31, + 0x20, 0xa4, 0x3a, 0x7d, + 0x20, 0x5b, 0x3a, 0x7d, + 0x80, 0xf9, 0x48, 0x7d, + 0x02, 0xea, 0xb4, 0x00, + 0x11, 0x00, 0x00, 0x10, + 0x04, 0x19, 0x54, 0x7d, + 0xdf, 0x19, 0x32, 0x08, + 0x60, 0x5b, 0x54, 0x6d, + 0x01, 0x4c, 0x2e, 0x7d, + 0x20, 0x19, 0x32, 0x00, + 0x01, 0xd9, 0xb2, 0x05, + 0x02, 0xea, 0xb4, 0x00, + 0x01, 0xd9, 0xb2, 0x05, + 0x10, 0x5b, 0x4c, 0x6d, + 0x08, 0x5b, 0x56, 0x6d, + 0x20, 0x5b, 0x46, 0x6d, + 0x02, 0x5b, 0x76, 0x6d, + 0x0e, 0xea, 0x6e, 0x59, + 0x0e, 0xea, 0x04, 0x00, + 0x80, 0xf9, 0x36, 0x6d, + 0xdf, 0x5c, 0xb8, 0x08, + 0x01, 0xd9, 0xb2, 0x05, + 0x01, 0xa4, 0x37, 0x6e, + 0x00, 0xe2, 0x7c, 0x5c, + 0x00, 0xe2, 0x80, 0x5d, + 0x01, 0x90, 0x21, 0x1b, + 0x01, 0xd9, 0xb2, 0x05, + 0x00, 0xe2, 0x66, 0x5b, + 0xf3, 0x96, 0xd5, 0x19, + 0x00, 0xe2, 0x64, 0x55, + 0x80, 0x96, 0x65, 0x6d, + 0x0f, 0xea, 0x6e, 0x59, + 0x0f, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x6c, 0x45, + 0x04, 0x8c, 0xe1, 0x30, + 0x01, 0xea, 0xf2, 0x00, + 0x02, 0xea, 0x36, 0x00, + 0xa8, 0xea, 0x32, 0x00, + 0xff, 0x97, 0x73, 0x7d, + 0x14, 0xea, 0x6e, 0x59, + 0x14, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0xe2, 0x5d, + 0x01, 0xd9, 0xb2, 0x05, + 0x09, 0x80, 0xe1, 0x30, + 0x02, 0xea, 0x36, 0x00, + 0xa8, 0xea, 0x32, 0x00, + 0x00, 0xe2, 0xda, 0x5d, + 0x01, 0xd9, 0xb2, 0x05, + 0x02, 0xa6, 0x90, 0x7d, + 0x00, 0xe2, 0x60, 0x59, + 0x20, 0x5b, 0x9e, 0x6d, + 0xfc, 0x42, 0x8a, 0x7d, + 0x10, 0x40, 0x8c, 0x6d, + 0x20, 0x4d, 0x8e, 0x7d, + 0x08, 0x5d, 0x9e, 0x6d, + 0x02, 0xa6, 0x24, 0x6c, + 0x00, 0xe2, 0x60, 0x59, + 0x20, 0x5b, 0x9e, 0x6d, + 0x01, 0x1b, 0xbe, 0x6d, + 0xfc, 0x42, 0x9a, 0x7d, + 0x10, 0x40, 0x9c, 0x6d, + 0x20, 0x4d, 0x84, 0x78, + 0x08, 0x5d, 0x84, 0x78, + 0x02, 0x19, 0x32, 0x00, + 0x01, 0x5b, 0x40, 0x31, + 0x00, 0xe2, 0xfe, 0x5c, + 0x00, 0xe2, 0xe0, 0x5b, + 0x20, 0xea, 0xb6, 0x00, + 0x00, 0xe2, 0x1e, 0x5c, + 0x20, 0x5c, 0xb8, 0x00, + 0x04, 0x19, 0xb4, 0x6d, + 0x01, 0x1a, 0xb4, 0x6d, + 0x00, 0xe2, 0x60, 0x59, + 0x01, 0x1a, 0x84, 0x78, + 0x80, 0xf9, 0xf2, 0x01, + 0x20, 0xa0, 0x18, 0x7e, + 0xff, 0x90, 0x21, 0x1b, + 0x08, 0x92, 0x77, 0x6b, + 0x02, 0xea, 0xb4, 0x04, + 0x01, 0xa4, 0x49, 0x03, + 0x40, 0x5b, 0xce, 0x6d, + 0x00, 0xe2, 0x60, 0x59, + 0x40, 0x5b, 0xce, 0x6d, + 0x04, 0x5d, 0x38, 0x7e, + 0x01, 0x1a, 0x38, 0x7e, + 0x20, 0x4d, 0x84, 0x78, + 0x40, 0x5b, 0x18, 0x7e, + 0x04, 0x5d, 0x38, 0x7e, + 0x01, 0x1a, 0x38, 0x7e, + 0x80, 0xf9, 0xf2, 0x01, + 0xff, 0x90, 0x21, 0x1b, + 0x08, 0x92, 0x77, 0x6b, + 0x02, 0xea, 0xb4, 0x04, + 0x00, 0xe2, 0x60, 0x59, + 0x01, 0x1b, 0x84, 0x78, + 0x80, 0xf9, 0xf2, 0x01, + 0x02, 0xea, 0xb4, 0x04, + 0x00, 0xe2, 0x60, 0x59, + 0x01, 0x1b, 0xf6, 0x6d, + 0x40, 0x5b, 0x04, 0x7e, + 0x01, 0x1b, 0xf6, 0x6d, + 0x02, 0x19, 0x32, 0x00, + 0x01, 0x1a, 0x84, 0x78, + 0x80, 0xf9, 0xf2, 0x01, + 0xff, 0xea, 0x10, 0x03, + 0x08, 0x92, 0x25, 0x03, + 0x00, 0xe2, 0x76, 0x43, + 0x01, 0x1a, 0x00, 0x7e, + 0x40, 0x5b, 0xfc, 0x7d, + 0x01, 0x1a, 0xea, 0x6d, + 0xfc, 0x42, 0x84, 0x78, + 0x01, 0x1a, 0x04, 0x6e, + 0x10, 0xea, 0x6e, 0x59, + 0x10, 0xea, 0x04, 0x00, + 0xfc, 0x42, 0x84, 0x78, + 0x10, 0x40, 0x0a, 0x6e, + 0x20, 0x4d, 0x84, 0x78, + 0x40, 0x5b, 0xea, 0x6d, + 0x01, 0x1a, 0x84, 0x78, + 0x01, 0x90, 0x21, 0x1b, + 0x30, 0x3f, 0xc0, 0x09, + 0x30, 0xe0, 0x84, 0x60, + 0x40, 0x4b, 0x84, 0x68, + 0xff, 0xea, 0x52, 0x01, + 0xee, 0x00, 0x20, 0x6e, + 0x80, 0xf9, 0xf2, 0x01, + 0xff, 0x90, 0x21, 0x1b, + 0x02, 0xea, 0xb4, 0x00, + 0x20, 0xea, 0x9a, 0x00, + 0x04, 0x41, 0x26, 0x7e, + 0x08, 0xea, 0x98, 0x00, + 0x08, 0x57, 0xae, 0x00, + 0xf3, 0x42, 0x30, 0x6e, + 0x12, 0xea, 0x6e, 0x59, + 0x12, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x28, 0x42, + 0x0d, 0xea, 0x6e, 0x59, + 0x0d, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x28, 0x42, + 0x01, 0x90, 0x21, 0x1b, + 0x11, 0xea, 0x6e, 0x59, + 0x11, 0xea, 0x04, 0x00, + 0x00, 0xe2, 0x66, 0x5b, + 0x08, 0x5a, 0xb4, 0x00, + 0x00, 0xe2, 0x5e, 0x5e, + 0xa8, 0xea, 0x32, 0x00, + 0x00, 0xe2, 0x60, 0x59, + 0x80, 0x1a, 0x4c, 0x7e, + 0x00, 0xe2, 0x5e, 0x5e, + 0x80, 0x19, 0x32, 0x00, + 0x40, 0x5b, 0x52, 0x6e, + 0x08, 0x5a, 0x52, 0x7e, + 0x20, 0x4d, 0x84, 0x78, + 0x02, 0x84, 0x09, 0x03, + 0x40, 0x5b, 0x18, 0x7e, + 0xff, 0x90, 0x21, 0x1b, + 0x80, 0xf9, 0xf2, 0x01, + 0x08, 0x92, 0x77, 0x6b, + 0x02, 0xea, 0xb4, 0x04, + 0x01, 0x40, 0xe1, 0x30, + 0x05, 0x41, 0xe3, 0x98, + 0x01, 0xe0, 0xf4, 0x31, + 0xff, 0xea, 0xc0, 0x09, + 0x00, 0x42, 0xe5, 0x20, + 0x00, 0x43, 0xe7, 0x20, + 0x01, 0xfa, 0xc0, 0x31, + 0x04, 0xea, 0xe8, 0x30, + 0xff, 0xea, 0xf0, 0x08, + 0x02, 0xea, 0xf2, 0x00, + 0xff, 0xea, 0xf4, 0x0c +}; + +typedef int ahd_patch_func_t (struct ahd_softc *ahd); +static ahd_patch_func_t ahd_patch23_func; + +static int +ahd_patch23_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch22_func; + +static int +ahd_patch22_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0); +} + +static ahd_patch_func_t ahd_patch21_func; + +static int +ahd_patch21_func(struct ahd_softc *ahd) +{ + return ((ahd->flags & AHD_INITIATORROLE) != 0); +} + +static ahd_patch_func_t ahd_patch20_func; + +static int +ahd_patch20_func(struct ahd_softc *ahd) +{ + return ((ahd->flags & AHD_TARGETROLE) != 0); +} + +static ahd_patch_func_t ahd_patch19_func; + +static int +ahd_patch19_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch18_func; + +static int +ahd_patch18_func(struct ahd_softc *ahd) +{ + return ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0); +} + +static ahd_patch_func_t ahd_patch17_func; + +static int +ahd_patch17_func(struct ahd_softc *ahd) +{ + return ((ahd->flags & AHD_39BIT_ADDRESSING) != 0); +} + +static ahd_patch_func_t ahd_patch16_func; + +static int +ahd_patch16_func(struct ahd_softc *ahd) +{ + return ((ahd->flags & AHD_64BIT_ADDRESSING) != 0); +} + +static ahd_patch_func_t ahd_patch15_func; + +static int +ahd_patch15_func(struct ahd_softc *ahd) +{ + return ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0); +} + +static ahd_patch_func_t ahd_patch14_func; + +static int +ahd_patch14_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch13_func; + +static int +ahd_patch13_func(struct ahd_softc *ahd) +{ + return ((ahd->features & AHD_RTI) == 0); +} + +static ahd_patch_func_t ahd_patch12_func; + +static int +ahd_patch12_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch11_func; + +static int +ahd_patch11_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0); +} + +static ahd_patch_func_t ahd_patch10_func; + +static int +ahd_patch10_func(struct ahd_softc *ahd) +{ + return ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0); +} + +static ahd_patch_func_t ahd_patch9_func; + +static int +ahd_patch9_func(struct ahd_softc *ahd) +{ + return ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0); +} + +static ahd_patch_func_t ahd_patch8_func; + +static int +ahd_patch8_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch7_func; + +static int +ahd_patch7_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch6_func; + +static int +ahd_patch6_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch5_func; + +static int +ahd_patch5_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch4_func; + +static int +ahd_patch4_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_PKT_LUN_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch3_func; + +static int +ahd_patch3_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_FAINT_LED_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch2_func; + +static int +ahd_patch2_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_SET_MODE_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch1_func; + +static int +ahd_patch1_func(struct ahd_softc *ahd) +{ + return ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0); +} + +static ahd_patch_func_t ahd_patch0_func; + +static int +ahd_patch0_func(struct ahd_softc *ahd) +{ + return (0); +} + +static const struct patch { + ahd_patch_func_t *patch_func; + uint32_t begin :10, + skip_instr :10, + skip_patch :12; +} patches[] = { + { ahd_patch1_func, 0, 3, 3 }, + { ahd_patch1_func, 1, 1, 2 }, + { ahd_patch0_func, 2, 1, 1 }, + { ahd_patch1_func, 3, 3, 3 }, + { ahd_patch1_func, 4, 1, 2 }, + { ahd_patch0_func, 5, 1, 1 }, + { ahd_patch2_func, 6, 1, 2 }, + { ahd_patch0_func, 7, 1, 1 }, + { ahd_patch3_func, 36, 5, 1 }, + { ahd_patch2_func, 45, 1, 2 }, + { ahd_patch0_func, 46, 1, 1 }, + { ahd_patch1_func, 53, 1, 2 }, + { ahd_patch0_func, 54, 1, 1 }, + { ahd_patch2_func, 59, 1, 2 }, + { ahd_patch0_func, 60, 1, 1 }, + { ahd_patch2_func, 63, 1, 2 }, + { ahd_patch0_func, 64, 1, 1 }, + { ahd_patch2_func, 67, 1, 2 }, + { ahd_patch0_func, 68, 1, 1 }, + { ahd_patch4_func, 115, 1, 1 }, + { ahd_patch2_func, 180, 3, 1 }, + { ahd_patch1_func, 183, 2, 1 }, + { ahd_patch5_func, 185, 1, 1 }, + { ahd_patch2_func, 194, 1, 2 }, + { ahd_patch0_func, 195, 1, 1 }, + { ahd_patch6_func, 196, 2, 2 }, + { ahd_patch0_func, 198, 6, 3 }, + { ahd_patch2_func, 201, 1, 2 }, + { ahd_patch0_func, 202, 1, 1 }, + { ahd_patch2_func, 205, 1, 2 }, + { ahd_patch0_func, 206, 1, 1 }, + { ahd_patch3_func, 208, 1, 1 }, + { ahd_patch7_func, 209, 3, 1 }, + { ahd_patch3_func, 218, 1, 1 }, + { ahd_patch5_func, 219, 16, 2 }, + { ahd_patch0_func, 235, 1, 1 }, + { ahd_patch8_func, 260, 2, 1 }, + { ahd_patch1_func, 264, 1, 2 }, + { ahd_patch0_func, 265, 1, 1 }, + { ahd_patch7_func, 268, 3, 1 }, + { ahd_patch1_func, 283, 1, 2 }, + { ahd_patch0_func, 284, 1, 1 }, + { ahd_patch1_func, 287, 1, 2 }, + { ahd_patch0_func, 288, 1, 1 }, + { ahd_patch2_func, 291, 1, 2 }, + { ahd_patch0_func, 292, 1, 1 }, + { ahd_patch9_func, 305, 2, 2 }, + { ahd_patch0_func, 307, 1, 1 }, + { ahd_patch1_func, 349, 1, 2 }, + { ahd_patch0_func, 350, 1, 1 }, + { ahd_patch2_func, 358, 1, 2 }, + { ahd_patch0_func, 359, 1, 1 }, + { ahd_patch2_func, 362, 1, 2 }, + { ahd_patch0_func, 363, 1, 1 }, + { ahd_patch1_func, 369, 1, 2 }, + { ahd_patch0_func, 370, 1, 1 }, + { ahd_patch1_func, 372, 1, 2 }, + { ahd_patch0_func, 373, 1, 1 }, + { ahd_patch10_func, 392, 1, 1 }, + { ahd_patch10_func, 395, 1, 1 }, + { ahd_patch10_func, 397, 1, 1 }, + { ahd_patch10_func, 409, 1, 1 }, + { ahd_patch1_func, 419, 1, 2 }, + { ahd_patch0_func, 420, 1, 1 }, + { ahd_patch1_func, 422, 1, 2 }, + { ahd_patch0_func, 423, 1, 1 }, + { ahd_patch1_func, 431, 1, 2 }, + { ahd_patch0_func, 432, 1, 1 }, + { ahd_patch2_func, 445, 1, 2 }, + { ahd_patch0_func, 446, 1, 1 }, + { ahd_patch11_func, 482, 1, 1 }, + { ahd_patch1_func, 490, 1, 2 }, + { ahd_patch0_func, 491, 1, 1 }, + { ahd_patch2_func, 503, 1, 2 }, + { ahd_patch0_func, 504, 1, 1 }, + { ahd_patch12_func, 507, 6, 2 }, + { ahd_patch0_func, 513, 1, 1 }, + { ahd_patch13_func, 534, 7, 1 }, + { ahd_patch14_func, 543, 1, 1 }, + { ahd_patch15_func, 552, 1, 1 }, + { ahd_patch16_func, 553, 1, 2 }, + { ahd_patch0_func, 554, 1, 1 }, + { ahd_patch17_func, 557, 1, 1 }, + { ahd_patch16_func, 558, 1, 1 }, + { ahd_patch18_func, 569, 1, 2 }, + { ahd_patch0_func, 570, 1, 1 }, + { ahd_patch1_func, 589, 1, 2 }, + { ahd_patch0_func, 590, 1, 1 }, + { ahd_patch1_func, 593, 1, 2 }, + { ahd_patch0_func, 594, 1, 1 }, + { ahd_patch2_func, 599, 1, 2 }, + { ahd_patch0_func, 600, 1, 1 }, + { ahd_patch2_func, 604, 1, 2 }, + { ahd_patch0_func, 605, 1, 1 }, + { ahd_patch1_func, 606, 1, 2 }, + { ahd_patch0_func, 607, 1, 1 }, + { ahd_patch2_func, 618, 1, 2 }, + { ahd_patch0_func, 619, 1, 1 }, + { ahd_patch19_func, 623, 1, 1 }, + { ahd_patch20_func, 628, 1, 1 }, + { ahd_patch21_func, 629, 2, 1 }, + { ahd_patch20_func, 633, 1, 2 }, + { ahd_patch0_func, 634, 1, 1 }, + { ahd_patch2_func, 637, 1, 2 }, + { ahd_patch0_func, 638, 1, 1 }, + { ahd_patch2_func, 653, 1, 2 }, + { ahd_patch0_func, 654, 1, 1 }, + { ahd_patch13_func, 655, 14, 1 }, + { ahd_patch1_func, 673, 1, 2 }, + { ahd_patch0_func, 674, 1, 1 }, + { ahd_patch13_func, 675, 1, 1 }, + { ahd_patch1_func, 687, 1, 2 }, + { ahd_patch0_func, 688, 1, 1 }, + { ahd_patch1_func, 695, 1, 2 }, + { ahd_patch0_func, 696, 1, 1 }, + { ahd_patch19_func, 719, 1, 1 }, + { ahd_patch19_func, 757, 1, 1 }, + { ahd_patch1_func, 768, 1, 2 }, + { ahd_patch0_func, 769, 1, 1 }, + { ahd_patch7_func, 785, 3, 1 }, + { ahd_patch1_func, 789, 1, 2 }, + { ahd_patch0_func, 790, 1, 1 }, + { ahd_patch1_func, 792, 1, 2 }, + { ahd_patch0_func, 793, 1, 1 }, + { ahd_patch1_func, 796, 1, 2 }, + { ahd_patch0_func, 797, 1, 1 }, + { ahd_patch22_func, 799, 1, 2 }, + { ahd_patch0_func, 800, 2, 1 }, + { ahd_patch23_func, 803, 4, 2 }, + { ahd_patch0_func, 807, 1, 1 }, + { ahd_patch23_func, 815, 11, 1 } +}; + +static const struct cs { + uint16_t begin; + uint16_t end; +} critical_sections[] = { + { 17, 30 }, + { 47, 58 }, + { 61, 63 }, + { 65, 66 }, + { 72, 92 }, + { 110, 142 }, + { 143, 180 }, + { 185, 193 }, + { 218, 274 }, + { 435, 443 }, + { 453, 455 }, + { 458, 467 }, + { 719, 749 }, + { 759, 763 } +}; + +#define NUM_CRITICAL_SECTIONS ARRAY_SIZE(critical_sections) diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h new file mode 100644 index 000000000..9bc755a0a --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -0,0 +1,1273 @@ +/* + * Core definitions and data structures shareable across OS platforms. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $ + * + * $FreeBSD$ + */ + +#ifndef _AIC7XXX_H_ +#define _AIC7XXX_H_ + +/* Register Definitions */ +#include "aic7xxx_reg.h" + +/************************* Forward Declarations *******************************/ +struct ahc_platform_data; +struct scb_platform_data; +struct seeprom_descriptor; + +/****************************** Useful Macros *********************************/ +#ifndef TRUE +#define TRUE 1 +#endif +#ifndef FALSE +#define FALSE 0 +#endif + +#define ALL_CHANNELS '\0' +#define ALL_TARGETS_MASK 0xFFFF +#define INITIATOR_WILDCARD (~0) + +#define SCSIID_TARGET(ahc, scsiid) \ + (((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \ + >> TID_SHIFT) +#define SCSIID_OUR_ID(scsiid) \ + ((scsiid) & OID) +#define SCSIID_CHANNEL(ahc, scsiid) \ + ((((ahc)->features & AHC_TWIN) != 0) \ + ? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \ + : 'A') +#define SCB_IS_SCSIBUS_B(ahc, scb) \ + (SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B') +#define SCB_GET_OUR_ID(scb) \ + SCSIID_OUR_ID((scb)->hscb->scsiid) +#define SCB_GET_TARGET(ahc, scb) \ + SCSIID_TARGET((ahc), (scb)->hscb->scsiid) +#define SCB_GET_CHANNEL(ahc, scb) \ + SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) +#define SCB_GET_LUN(scb) \ + ((scb)->hscb->lun & LID) +#define SCB_GET_TARGET_OFFSET(ahc, scb) \ + (SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0)) +#define SCB_GET_TARGET_MASK(ahc, scb) \ + (0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb))) +#ifdef AHC_DEBUG +#define SCB_IS_SILENT(scb) \ + ((ahc_debug & AHC_SHOW_MASKED_ERRORS) == 0 \ + && (((scb)->flags & SCB_SILENT) != 0)) +#else +#define SCB_IS_SILENT(scb) \ + (((scb)->flags & SCB_SILENT) != 0) +#endif +#define TCL_TARGET_OFFSET(tcl) \ + ((((tcl) >> 4) & TID) >> 4) +#define TCL_LUN(tcl) \ + (tcl & (AHC_NUM_LUNS - 1)) +#define BUILD_TCL(scsiid, lun) \ + ((lun) | (((scsiid) & TID) << 4)) + +#ifndef AHC_TARGET_MODE +#undef AHC_TMODE_ENABLE +#define AHC_TMODE_ENABLE 0 +#endif + +/**************************** Driver Constants ********************************/ +/* + * The maximum number of supported targets. + */ +#define AHC_NUM_TARGETS 16 + +/* + * The maximum number of supported luns. + * The identify message only supports 64 luns in SPI3. + * You can have 2^64 luns when information unit transfers are enabled, + * but it is doubtful this driver will ever support IUTs. + */ +#define AHC_NUM_LUNS 64 + +/* + * The maximum transfer per S/G segment. + */ +#define AHC_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */ + +/* + * The maximum amount of SCB storage in hardware on a controller. + * This value represents an upper bound. Controllers vary in the number + * they actually support. + */ +#define AHC_SCB_MAX 255 + +/* + * The maximum number of concurrent transactions supported per driver instance. + * Sequencer Control Blocks (SCBs) store per-transaction information. Although + * the space for SCBs on the host adapter varies by model, the driver will + * page the SCBs between host and controller memory as needed. We are limited + * to 253 because: + * 1) The 8bit nature of the RISC engine holds us to an 8bit value. + * 2) We reserve one value, 255, to represent the invalid element. + * 3) Our input queue scheme requires one SCB to always be reserved + * in advance of queuing any SCBs. This takes us down to 254. + * 4) To handle our output queue correctly on machines that only + * support 32bit stores, we must clear the array 4 bytes at a + * time. To avoid colliding with a DMA write from the sequencer, + * we must be sure that 4 slots are empty when we write to clear + * the queue. This reduces us to 253 SCBs: 1 that just completed + * and the known three additional empty slots in the queue that + * precede it. + */ +#define AHC_MAX_QUEUE 253 + +/* + * The maximum amount of SCB storage we allocate in host memory. This + * number should reflect the 1 additional SCB we require to handle our + * qinfifo mechanism. + */ +#define AHC_SCB_MAX_ALLOC (AHC_MAX_QUEUE+1) + +/* + * Ring Buffer of incoming target commands. + * We allocate 256 to simplify the logic in the sequencer + * by using the natural wrap point of an 8bit counter. + */ +#define AHC_TMODE_CMDS 256 + +/* Reset line assertion time in us */ +#define AHC_BUSRESET_DELAY 25 + +/******************* Chip Characteristics/Operating Settings *****************/ +/* + * Chip Type + * The chip order is from least sophisticated to most sophisticated. + */ +typedef enum { + AHC_NONE = 0x0000, + AHC_CHIPID_MASK = 0x00FF, + AHC_AIC7770 = 0x0001, + AHC_AIC7850 = 0x0002, + AHC_AIC7855 = 0x0003, + AHC_AIC7859 = 0x0004, + AHC_AIC7860 = 0x0005, + AHC_AIC7870 = 0x0006, + AHC_AIC7880 = 0x0007, + AHC_AIC7895 = 0x0008, + AHC_AIC7895C = 0x0009, + AHC_AIC7890 = 0x000a, + AHC_AIC7896 = 0x000b, + AHC_AIC7892 = 0x000c, + AHC_AIC7899 = 0x000d, + AHC_VL = 0x0100, /* Bus type VL */ + AHC_EISA = 0x0200, /* Bus type EISA */ + AHC_PCI = 0x0400, /* Bus type PCI */ + AHC_BUS_MASK = 0x0F00 +} ahc_chip; + +/* + * Features available in each chip type. + */ +typedef enum { + AHC_FENONE = 0x00000, + AHC_ULTRA = 0x00001, /* Supports 20MHz Transfers */ + AHC_ULTRA2 = 0x00002, /* Supports 40MHz Transfers */ + AHC_WIDE = 0x00004, /* Wide Channel */ + AHC_TWIN = 0x00008, /* Twin Channel */ + AHC_MORE_SRAM = 0x00010, /* 80 bytes instead of 64 */ + AHC_CMD_CHAN = 0x00020, /* Has a Command DMA Channel */ + AHC_QUEUE_REGS = 0x00040, /* Has Queue management registers */ + AHC_SG_PRELOAD = 0x00080, /* Can perform auto-SG preload */ + AHC_SPIOCAP = 0x00100, /* Has a Serial Port I/O Cap Register */ + AHC_MULTI_TID = 0x00200, /* Has bitmask of TIDs for select-in */ + AHC_HS_MAILBOX = 0x00400, /* Has HS_MAILBOX register */ + AHC_DT = 0x00800, /* Double Transition transfers */ + AHC_NEW_TERMCTL = 0x01000, /* Newer termination scheme */ + AHC_MULTI_FUNC = 0x02000, /* Multi-Function Twin Channel Device */ + AHC_LARGE_SCBS = 0x04000, /* 64byte SCBs */ + AHC_AUTORATE = 0x08000, /* Automatic update of SCSIRATE/OFFSET*/ + AHC_AUTOPAUSE = 0x10000, /* Automatic pause on register access */ + AHC_TARGETMODE = 0x20000, /* Has tested target mode support */ + AHC_MULTIROLE = 0x40000, /* Space for two roles at a time */ + AHC_REMOVABLE = 0x80000, /* Hot-Swap supported */ + AHC_HVD = 0x100000, /* HVD rather than SE */ + AHC_AIC7770_FE = AHC_FENONE, + /* + * The real 7850 does not support Ultra modes, but there are + * several cards that use the generic 7850 PCI ID even though + * they are using an Ultra capable chip (7859/7860). We start + * out with the AHC_ULTRA feature set and then check the DEVSTATUS + * register to determine if the capability is really present. + */ + AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA, + AHC_AIC7860_FE = AHC_AIC7850_FE, + AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE, + AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA, + /* + * Although we have space for both the initiator and + * target roles on ULTRA2 chips, we currently disable + * the initiator role to allow multi-scsi-id target mode + * configurations. We can only respond on the same SCSI + * ID as our initiator role if we allow initiator operation. + * At some point, we should add a configuration knob to + * allow both roles to be loaded. + */ + AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2 + |AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_MULTI_TID + |AHC_HS_MAILBOX|AHC_NEW_TERMCTL|AHC_LARGE_SCBS + |AHC_TARGETMODE, + AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_DT|AHC_AUTORATE|AHC_AUTOPAUSE, + AHC_AIC7895_FE = AHC_AIC7880_FE|AHC_MORE_SRAM|AHC_AUTOPAUSE + |AHC_CMD_CHAN|AHC_MULTI_FUNC|AHC_LARGE_SCBS, + AHC_AIC7895C_FE = AHC_AIC7895_FE|AHC_MULTI_TID, + AHC_AIC7896_FE = AHC_AIC7890_FE|AHC_MULTI_FUNC, + AHC_AIC7899_FE = AHC_AIC7892_FE|AHC_MULTI_FUNC +} ahc_feature; + +/* + * Bugs in the silicon that we work around in software. + */ +typedef enum { + AHC_BUGNONE = 0x00, + /* + * On all chips prior to the U2 product line, + * the WIDEODD S/G segment feature does not + * work during scsi->HostBus transfers. + */ + AHC_TMODE_WIDEODD_BUG = 0x01, + /* + * On the aic7890/91 Rev 0 chips, the autoflush + * feature does not work. A manual flush of + * the DMA FIFO is required. + */ + AHC_AUTOFLUSH_BUG = 0x02, + /* + * On many chips, cacheline streaming does not work. + */ + AHC_CACHETHEN_BUG = 0x04, + /* + * On the aic7896/97 chips, cacheline + * streaming must be enabled. + */ + AHC_CACHETHEN_DIS_BUG = 0x08, + /* + * PCI 2.1 Retry failure on non-empty data fifo. + */ + AHC_PCI_2_1_RETRY_BUG = 0x10, + /* + * Controller does not handle cacheline residuals + * properly on S/G segments if PCI MWI instructions + * are allowed. + */ + AHC_PCI_MWI_BUG = 0x20, + /* + * An SCB upload using the SCB channel's + * auto array entry copy feature may + * corrupt data. This appears to only + * occur on 66MHz systems. + */ + AHC_SCBCHAN_UPLOAD_BUG = 0x40 +} ahc_bug; + +/* + * Configuration specific settings. + * The driver determines these settings by probing the + * chip/controller's configuration. + */ +typedef enum { + AHC_FNONE = 0x000, + AHC_PRIMARY_CHANNEL = 0x003, /* + * The channel that should + * be probed first. + */ + AHC_USEDEFAULTS = 0x004, /* + * For cards without an seeprom + * or a BIOS to initialize the chip's + * SRAM, we use the default target + * settings. + */ + AHC_SEQUENCER_DEBUG = 0x008, + AHC_SHARED_SRAM = 0x010, + AHC_LARGE_SEEPROM = 0x020, /* Uses C56_66 not C46 */ + AHC_RESET_BUS_A = 0x040, + AHC_RESET_BUS_B = 0x080, + AHC_EXTENDED_TRANS_A = 0x100, + AHC_EXTENDED_TRANS_B = 0x200, + AHC_TERM_ENB_A = 0x400, + AHC_TERM_ENB_B = 0x800, + AHC_INITIATORROLE = 0x1000, /* + * Allow initiator operations on + * this controller. + */ + AHC_TARGETROLE = 0x2000, /* + * Allow target operations on this + * controller. + */ + AHC_NEWEEPROM_FMT = 0x4000, + AHC_TQINFIFO_BLOCKED = 0x10000, /* Blocked waiting for ATIOs */ + AHC_INT50_SPEEDFLEX = 0x20000, /* + * Internal 50pin connector + * sits behind an aic3860 + */ + AHC_SCB_BTT = 0x40000, /* + * The busy targets table is + * stored in SCB space rather + * than SRAM. + */ + AHC_BIOS_ENABLED = 0x80000, + AHC_ALL_INTERRUPTS = 0x100000, + AHC_PAGESCBS = 0x400000, /* Enable SCB paging */ + AHC_EDGE_INTERRUPT = 0x800000, /* Device uses edge triggered ints */ + AHC_39BIT_ADDRESSING = 0x1000000, /* Use 39 bit addressing scheme. */ + AHC_LSCBS_ENABLED = 0x2000000, /* 64Byte SCBs enabled */ + AHC_SCB_CONFIG_USED = 0x4000000, /* No SEEPROM but SCB2 had info. */ + AHC_NO_BIOS_INIT = 0x8000000, /* No BIOS left over settings. */ + AHC_DISABLE_PCI_PERR = 0x10000000, + AHC_HAS_TERM_LOGIC = 0x20000000 +} ahc_flag; + +/************************* Hardware SCB Definition ***************************/ + +/* + * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB + * consists of a "hardware SCB" mirroring the fields available on the card + * and additional information the kernel stores for each transaction. + * + * To minimize space utilization, a portion of the hardware scb stores + * different data during different portions of a SCSI transaction. + * As initialized by the host driver for the initiator role, this area + * contains the SCSI cdb (or a pointer to the cdb) to be executed. After + * the cdb has been presented to the target, this area serves to store + * residual transfer information and the SCSI status byte. + * For the target role, the contents of this area do not change, but + * still serve a different purpose than for the initiator role. See + * struct target_data for details. + */ + +/* + * Status information embedded in the shared poriton of + * an SCB after passing the cdb to the target. The kernel + * driver will only read this data for transactions that + * complete abnormally (non-zero status byte). + */ +struct status_pkt { + uint32_t residual_datacnt; /* Residual in the current S/G seg */ + uint32_t residual_sg_ptr; /* The next S/G for this transfer */ + uint8_t scsi_status; /* Standard SCSI status byte */ +}; + +/* + * Target mode version of the shared data SCB segment. + */ +struct target_data { + uint32_t residual_datacnt; /* Residual in the current S/G seg */ + uint32_t residual_sg_ptr; /* The next S/G for this transfer */ + uint8_t scsi_status; /* SCSI status to give to initiator */ + uint8_t target_phases; /* Bitmap of phases to execute */ + uint8_t data_phase; /* Data-In or Data-Out */ + uint8_t initiator_tag; /* Initiator's transaction tag */ +}; + +struct hardware_scb { +/*0*/ union { + /* + * If the cdb is 12 bytes or less, we embed it directly + * in the SCB. For longer cdbs, we embed the address + * of the cdb payload as seen by the chip and a DMA + * is used to pull it in. + */ + uint8_t cdb[12]; + uint32_t cdb_ptr; + struct status_pkt status; + struct target_data tdata; + } shared_data; +/* + * A word about residuals. + * The scb is presented to the sequencer with the dataptr and datacnt + * fields initialized to the contents of the first S/G element to + * transfer. The sgptr field is initialized to the bus address for + * the S/G element that follows the first in the in core S/G array + * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid + * S/G entry for this transfer (single S/G element transfer with the + * first elements address and length preloaded in the dataptr/datacnt + * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL. + * The SG_FULL_RESID flag ensures that the residual will be correctly + * noted even if no data transfers occur. Once the data phase is entered, + * the residual sgptr and datacnt are loaded from the sgptr and the + * datacnt fields. After each S/G element's dataptr and length are + * loaded into the hardware, the residual sgptr is advanced. After + * each S/G element is expired, its datacnt field is checked to see + * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the + * residual sg ptr and the transfer is considered complete. If the + * sequencer determines that there is a residual in the tranfer, it + * will set the SG_RESID_VALID flag in sgptr and dma the scb back into + * host memory. To sumarize: + * + * Sequencer: + * o A residual has occurred if SG_FULL_RESID is set in sgptr, + * or residual_sgptr does not have SG_LIST_NULL set. + * + * o We are transferring the last segment if residual_datacnt has + * the SG_LAST_SEG flag set. + * + * Host: + * o A residual has occurred if a completed scb has the + * SG_RESID_VALID flag set. + * + * o residual_sgptr and sgptr refer to the "next" sg entry + * and so may point beyond the last valid sg entry for the + * transfer. + */ +/*12*/ uint32_t dataptr; +/*16*/ uint32_t datacnt; /* + * Byte 3 (numbered from 0) of + * the datacnt is really the + * 4th byte in that data address. + */ +/*20*/ uint32_t sgptr; +#define SG_PTR_MASK 0xFFFFFFF8 +/*24*/ uint8_t control; /* See SCB_CONTROL in aic7xxx.reg for details */ +/*25*/ uint8_t scsiid; /* what to load in the SCSIID register */ +/*26*/ uint8_t lun; +/*27*/ uint8_t tag; /* + * Index into our kernel SCB array. + * Also used as the tag for tagged I/O + */ +/*28*/ uint8_t cdb_len; +/*29*/ uint8_t scsirate; /* Value for SCSIRATE register */ +/*30*/ uint8_t scsioffset; /* Value for SCSIOFFSET register */ +/*31*/ uint8_t next; /* + * Used for threading SCBs in the + * "Waiting for Selection" and + * "Disconnected SCB" lists down + * in the sequencer. + */ +/*32*/ uint8_t cdb32[32]; /* + * CDB storage for cdbs of size + * 13->32. We store them here + * because hardware scbs are + * allocated from DMA safe + * memory so we are guaranteed + * the controller can access + * this data. + */ +}; + +/************************ Kernel SCB Definitions ******************************/ +/* + * Some fields of the SCB are OS dependent. Here we collect the + * definitions for elements that all OS platforms need to include + * in there SCB definition. + */ + +/* + * Definition of a scatter/gather element as transferred to the controller. + * The aic7xxx chips only support a 24bit length. We use the top byte of + * the length to store additional address bits and a flag to indicate + * that a given segment terminates the transfer. This gives us an + * addressable range of 512GB on machines with 64bit PCI or with chips + * that can support dual address cycles on 32bit PCI busses. + */ +struct ahc_dma_seg { + uint32_t addr; + uint32_t len; +#define AHC_DMA_LAST_SEG 0x80000000 +#define AHC_SG_HIGH_ADDR_MASK 0x7F000000 +#define AHC_SG_LEN_MASK 0x00FFFFFF +}; + +struct sg_map_node { + bus_dmamap_t sg_dmamap; + dma_addr_t sg_physaddr; + struct ahc_dma_seg* sg_vaddr; + SLIST_ENTRY(sg_map_node) links; +}; + +/* + * The current state of this SCB. + */ +typedef enum { + SCB_FREE = 0x0000, + SCB_OTHERTCL_TIMEOUT = 0x0002,/* + * Another device was active + * during the first timeout for + * this SCB so we gave ourselves + * an additional timeout period + * in case it was hogging the + * bus. + */ + SCB_DEVICE_RESET = 0x0004, + SCB_SENSE = 0x0008, + SCB_CDB32_PTR = 0x0010, + SCB_RECOVERY_SCB = 0x0020, + SCB_AUTO_NEGOTIATE = 0x0040,/* Negotiate to achieve goal. */ + SCB_NEGOTIATE = 0x0080,/* Negotiation forced for command. */ + SCB_ABORT = 0x0100, + SCB_UNTAGGEDQ = 0x0200, + SCB_ACTIVE = 0x0400, + SCB_TARGET_IMMEDIATE = 0x0800, + SCB_TRANSMISSION_ERROR = 0x1000,/* + * We detected a parity or CRC + * error that has effected the + * payload of the command. This + * flag is checked when normal + * status is returned to catch + * the case of a target not + * responding to our attempt + * to report the error. + */ + SCB_TARGET_SCB = 0x2000, + SCB_SILENT = 0x4000 /* + * Be quiet about transmission type + * errors. They are expected and we + * don't want to upset the user. This + * flag is typically used during DV. + */ +} scb_flag; + +struct scb { + struct hardware_scb *hscb; + union { + SLIST_ENTRY(scb) sle; + TAILQ_ENTRY(scb) tqe; + } links; + LIST_ENTRY(scb) pending_links; + ahc_io_ctx_t io_ctx; + struct ahc_softc *ahc_softc; + scb_flag flags; + struct scb_platform_data *platform_data; + struct sg_map_node *sg_map; + struct ahc_dma_seg *sg_list; + dma_addr_t sg_list_phys; + u_int sg_count;/* How full ahc_dma_seg is */ +}; + +struct scb_data { + SLIST_HEAD(, scb) free_scbs; /* + * Pool of SCBs ready to be assigned + * commands to execute. + */ + struct scb *scbindex[256]; /* + * Mapping from tag to SCB. + * As tag identifiers are an + * 8bit value, we provide space + * for all possible tag values. + * Any lookups to entries at or + * above AHC_SCB_MAX_ALLOC will + * always fail. + */ + struct hardware_scb *hscbs; /* Array of hardware SCBs */ + struct scb *scbarray; /* Array of kernel SCBs */ + struct scsi_sense_data *sense; /* Per SCB sense data */ + + /* + * "Bus" addresses of our data structures. + */ + bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */ + bus_dmamap_t hscb_dmamap; + dma_addr_t hscb_busaddr; + bus_dma_tag_t sense_dmat; + bus_dmamap_t sense_dmamap; + dma_addr_t sense_busaddr; + bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ + SLIST_HEAD(, sg_map_node) sg_maps; + uint8_t numscbs; + uint8_t maxhscbs; /* Number of SCBs on the card */ + uint8_t init_level; /* + * How far we've initialized + * this structure. + */ +}; + +/************************ Target Mode Definitions *****************************/ + +/* + * Connection descriptor for select-in requests in target mode. + */ +struct target_cmd { + uint8_t scsiid; /* Our ID and the initiator's ID */ + uint8_t identify; /* Identify message */ + uint8_t bytes[22]; /* + * Bytes contains any additional message + * bytes terminated by 0xFF. The remainder + * is the cdb to execute. + */ + uint8_t cmd_valid; /* + * When a command is complete, the firmware + * will set cmd_valid to all bits set. + * After the host has seen the command, + * the bits are cleared. This allows us + * to just peek at host memory to determine + * if more work is complete. cmd_valid is on + * an 8 byte boundary to simplify setting + * it on aic7880 hardware which only has + * limited direct access to the DMA FIFO. + */ + uint8_t pad[7]; +}; + +/* + * Number of events we can buffer up if we run out + * of immediate notify ccbs. + */ +#define AHC_TMODE_EVENT_BUFFER_SIZE 8 +struct ahc_tmode_event { + uint8_t initiator_id; + uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */ +#define EVENT_TYPE_BUS_RESET 0xFF + uint8_t event_arg; +}; + +/* + * Per enabled lun target mode state. + * As this state is directly influenced by the host OS'es target mode + * environment, we let the OS module define it. Forward declare the + * structure here so we can store arrays of them, etc. in OS neutral + * data structures. + */ +#ifdef AHC_TARGET_MODE +struct ahc_tmode_lstate { + struct cam_path *path; + struct ccb_hdr_slist accept_tios; + struct ccb_hdr_slist immed_notifies; + struct ahc_tmode_event event_buffer[AHC_TMODE_EVENT_BUFFER_SIZE]; + uint8_t event_r_idx; + uint8_t event_w_idx; +}; +#else +struct ahc_tmode_lstate; +#endif + +/******************** Transfer Negotiation Datastructures *********************/ +#define AHC_TRANS_CUR 0x01 /* Modify current neogtiation status */ +#define AHC_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */ +#define AHC_TRANS_GOAL 0x04 /* Modify negotiation goal */ +#define AHC_TRANS_USER 0x08 /* Modify user negotiation settings */ + +#define AHC_WIDTH_UNKNOWN 0xFF +#define AHC_PERIOD_UNKNOWN 0xFF +#define AHC_OFFSET_UNKNOWN 0xFF +#define AHC_PPR_OPTS_UNKNOWN 0xFF + +/* + * Transfer Negotiation Information. + */ +struct ahc_transinfo { + uint8_t protocol_version; /* SCSI Revision level */ + uint8_t transport_version; /* SPI Revision level */ + uint8_t width; /* Bus width */ + uint8_t period; /* Sync rate factor */ + uint8_t offset; /* Sync offset */ + uint8_t ppr_options; /* Parallel Protocol Request options */ +}; + +/* + * Per-initiator current, goal and user transfer negotiation information. */ +struct ahc_initiator_tinfo { + uint8_t scsirate; /* Computed value for SCSIRATE reg */ + struct ahc_transinfo curr; + struct ahc_transinfo goal; + struct ahc_transinfo user; +}; + +/* + * Per enabled target ID state. + * Pointers to lun target state as well as sync/wide negotiation information + * for each initiator<->target mapping. For the initiator role we pretend + * that we are the target and the targets are the initiators since the + * negotiation is the same regardless of role. + */ +struct ahc_tmode_tstate { + struct ahc_tmode_lstate* enabled_luns[AHC_NUM_LUNS]; + struct ahc_initiator_tinfo transinfo[AHC_NUM_TARGETS]; + + /* + * Per initiator state bitmasks. + */ + uint16_t auto_negotiate;/* Auto Negotiation Required */ + uint16_t ultraenb; /* Using ultra sync rate */ + uint16_t discenable; /* Disconnection allowed */ + uint16_t tagenable; /* Tagged Queuing allowed */ +}; + +/* + * Data structure for our table of allowed synchronous transfer rates. + */ +struct ahc_syncrate { + u_int sxfr_u2; /* Value of the SXFR parameter for Ultra2+ Chips */ + u_int sxfr; /* Value of the SXFR parameter for <= Ultra Chips */ +#define ULTRA_SXFR 0x100 /* Rate Requires Ultra Mode set */ +#define ST_SXFR 0x010 /* Rate Single Transition Only */ +#define DT_SXFR 0x040 /* Rate Double Transition Only */ + uint8_t period; /* Period to send to SCSI target */ + const char *rate; +}; + +/* Safe and valid period for async negotiations. */ +#define AHC_ASYNC_XFER_PERIOD 0x45 +#define AHC_ULTRA2_XFER_PERIOD 0x0a + +/* + * Indexes into our table of syncronous transfer rates. + */ +#define AHC_SYNCRATE_DT 0 +#define AHC_SYNCRATE_ULTRA2 1 +#define AHC_SYNCRATE_ULTRA 3 +#define AHC_SYNCRATE_FAST 6 +#define AHC_SYNCRATE_MAX AHC_SYNCRATE_DT +#define AHC_SYNCRATE_MIN 13 + +/***************************** Lookup Tables **********************************/ +/* + * Phase -> name and message out response + * to parity errors in each phase table. + */ +struct ahc_phase_table_entry { + uint8_t phase; + uint8_t mesg_out; /* Message response to parity errors */ + char *phasemsg; +}; + +/************************** Serial EEPROM Format ******************************/ + +struct seeprom_config { +/* + * Per SCSI ID Configuration Flags + */ + uint16_t device_flags[16]; /* words 0-15 */ +#define CFXFER 0x0007 /* synchronous transfer rate */ +#define CFSYNCH 0x0008 /* enable synchronous transfer */ +#define CFDISC 0x0010 /* enable disconnection */ +#define CFWIDEB 0x0020 /* wide bus device */ +#define CFSYNCHISULTRA 0x0040 /* CFSYNCH is an ultra offset (2940AU)*/ +#define CFSYNCSINGLE 0x0080 /* Single-Transition signalling */ +#define CFSTART 0x0100 /* send start unit SCSI command */ +#define CFINCBIOS 0x0200 /* include in BIOS scan */ +#define CFRNFOUND 0x0400 /* report even if not found */ +#define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */ +#define CFWBCACHEENB 0x4000 /* Enable W-Behind Cache on disks */ +#define CFWBCACHENOP 0xc000 /* Don't touch W-Behind Cache */ + +/* + * BIOS Control Bits + */ + uint16_t bios_control; /* word 16 */ +#define CFSUPREM 0x0001 /* support all removeable drives */ +#define CFSUPREMB 0x0002 /* support removeable boot drives */ +#define CFBIOSEN 0x0004 /* BIOS enabled */ +#define CFBIOS_BUSSCAN 0x0008 /* Have the BIOS Scan the Bus */ +#define CFSM2DRV 0x0010 /* support more than two drives */ +#define CFSTPWLEVEL 0x0010 /* Termination level control */ +#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */ +#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */ +#define CFTERM_MENU 0x0040 /* BIOS displays termination menu */ +#define CFEXTEND 0x0080 /* extended translation enabled */ +#define CFSCAMEN 0x0100 /* SCAM enable */ +#define CFMSG_LEVEL 0x0600 /* BIOS Message Level */ +#define CFMSG_VERBOSE 0x0000 +#define CFMSG_SILENT 0x0200 +#define CFMSG_DIAG 0x0400 +#define CFBOOTCD 0x0800 /* Support Bootable CD-ROM */ +/* UNUSED 0xff00 */ + +/* + * Host Adapter Control Bits + */ + uint16_t adapter_control; /* word 17 */ +#define CFAUTOTERM 0x0001 /* Perform Auto termination */ +#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable */ +#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */ +#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */ +#define CFSTERM 0x0004 /* SCSI low byte termination */ +#define CFWSTERM 0x0008 /* SCSI high byte termination */ +#define CFSPARITY 0x0010 /* SCSI parity */ +#define CF284XSTERM 0x0020 /* SCSI low byte term (284x cards) */ +#define CFMULTILUN 0x0020 +#define CFRESETB 0x0040 /* reset SCSI bus at boot */ +#define CFCLUSTERENB 0x0080 /* Cluster Enable */ +#define CFBOOTCHAN 0x0300 /* probe this channel first */ +#define CFBOOTCHANSHIFT 8 +#define CFSEAUTOTERM 0x0400 /* Ultra2 Perform secondary Auto Term*/ +#define CFSELOWTERM 0x0800 /* Ultra2 secondary low term */ +#define CFSEHIGHTERM 0x1000 /* Ultra2 secondary high term */ +#define CFENABLEDV 0x4000 /* Perform Domain Validation*/ + +/* + * Bus Release Time, Host Adapter ID + */ + uint16_t brtime_id; /* word 18 */ +#define CFSCSIID 0x000f /* host adapter SCSI ID */ +/* UNUSED 0x00f0 */ +#define CFBRTIME 0xff00 /* bus release time */ + +/* + * Maximum targets + */ + uint16_t max_targets; /* word 19 */ +#define CFMAXTARG 0x00ff /* maximum targets */ +#define CFBOOTLUN 0x0f00 /* Lun to boot from */ +#define CFBOOTID 0xf000 /* Target to boot from */ + uint16_t res_1[10]; /* words 20-29 */ + uint16_t signature; /* Signature == 0x250 */ +#define CFSIGNATURE 0x250 +#define CFSIGNATURE2 0x300 + uint16_t checksum; /* word 31 */ +}; + +/**************************** Message Buffer *********************************/ +typedef enum { + MSG_TYPE_NONE = 0x00, + MSG_TYPE_INITIATOR_MSGOUT = 0x01, + MSG_TYPE_INITIATOR_MSGIN = 0x02, + MSG_TYPE_TARGET_MSGOUT = 0x03, + MSG_TYPE_TARGET_MSGIN = 0x04 +} ahc_msg_type; + +typedef enum { + MSGLOOP_IN_PROG, + MSGLOOP_MSGCOMPLETE, + MSGLOOP_TERMINATED +} msg_loop_stat; + +/*********************** Software Configuration Structure *********************/ +TAILQ_HEAD(scb_tailq, scb); + +struct ahc_aic7770_softc { + /* + * Saved register state used for chip_init(). + */ + uint8_t busspd; + uint8_t bustime; +}; + +struct ahc_pci_softc { + /* + * Saved register state used for chip_init(). + */ + uint32_t devconfig; + uint16_t targcrccnt; + uint8_t command; + uint8_t csize_lattime; + uint8_t optionmode; + uint8_t crccontrol1; + uint8_t dscommand0; + uint8_t dspcistatus; + uint8_t scbbaddr; + uint8_t dff_thrsh; +}; + +union ahc_bus_softc { + struct ahc_aic7770_softc aic7770_softc; + struct ahc_pci_softc pci_softc; +}; + +typedef void (*ahc_bus_intr_t)(struct ahc_softc *); +typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *); +typedef void ahc_callback_t (void *); + +struct ahc_softc { + bus_space_tag_t tag; + bus_space_handle_t bsh; + struct scb_data *scb_data; + + struct scb *next_queued_scb; + + /* + * SCBs that have been sent to the controller + */ + BSD_LIST_HEAD(, scb) pending_scbs; + + /* + * Counting lock for deferring the release of additional + * untagged transactions from the untagged_queues. When + * the lock is decremented to 0, all queues in the + * untagged_queues array are run. + */ + u_int untagged_queue_lock; + + /* + * Per-target queue of untagged-transactions. The + * transaction at the head of the queue is the + * currently pending untagged transaction for the + * target. The driver only allows a single untagged + * transaction per target. + */ + struct scb_tailq untagged_queues[AHC_NUM_TARGETS]; + + /* + * Bus attachment specific data. + */ + union ahc_bus_softc bus_softc; + + /* + * Platform specific data. + */ + struct ahc_platform_data *platform_data; + + /* + * Platform specific device information. + */ + ahc_dev_softc_t dev_softc; + struct device *dev; + + /* + * Bus specific device information. + */ + ahc_bus_intr_t bus_intr; + + /* + * Bus specific initialization required + * after a chip reset. + */ + ahc_bus_chip_init_t bus_chip_init; + + /* + * Target mode related state kept on a per enabled lun basis. + * Targets that are not enabled will have null entries. + * As an initiator, we keep one target entry for our initiator + * ID to store our sync/wide transfer settings. + */ + struct ahc_tmode_tstate *enabled_targets[AHC_NUM_TARGETS]; + + /* + * The black hole device responsible for handling requests for + * disabled luns on enabled targets. + */ + struct ahc_tmode_lstate *black_hole; + + /* + * Device instance currently on the bus awaiting a continue TIO + * for a command that was not given the disconnect priveledge. + */ + struct ahc_tmode_lstate *pending_device; + + /* + * Card characteristics + */ + ahc_chip chip; + ahc_feature features; + ahc_bug bugs; + ahc_flag flags; + struct seeprom_config *seep_config; + + /* Values to store in the SEQCTL register for pause and unpause */ + uint8_t unpause; + uint8_t pause; + + /* Command Queues */ + uint8_t qoutfifonext; + uint8_t qinfifonext; + uint8_t *qoutfifo; + uint8_t *qinfifo; + + /* Critical Section Data */ + struct cs *critical_sections; + u_int num_critical_sections; + + /* Channel Names ('A', 'B', etc.) */ + char channel; + char channel_b; + + /* Initiator Bus ID */ + uint8_t our_id; + uint8_t our_id_b; + + /* + * PCI error detection. + */ + int unsolicited_ints; + + /* + * Target incoming command FIFO. + */ + struct target_cmd *targetcmds; + uint8_t tqinfifonext; + + /* + * Cached copy of the sequencer control register. + */ + uint8_t seqctl; + + /* + * Incoming and outgoing message handling. + */ + uint8_t send_msg_perror; + ahc_msg_type msg_type; + uint8_t msgout_buf[12];/* Message we are sending */ + uint8_t msgin_buf[12];/* Message we are receiving */ + u_int msgout_len; /* Length of message to send */ + u_int msgout_index; /* Current index in msgout */ + u_int msgin_index; /* Current index in msgin */ + + /* + * Mapping information for data structures shared + * between the sequencer and kernel. + */ + bus_dma_tag_t parent_dmat; + bus_dma_tag_t shared_data_dmat; + bus_dmamap_t shared_data_dmamap; + dma_addr_t shared_data_busaddr; + + /* + * Bus address of the one byte buffer used to + * work-around a DMA bug for chips <= aic7880 + * in target mode. + */ + dma_addr_t dma_bug_buf; + + /* Number of enabled target mode device on this card */ + u_int enabled_luns; + + /* Initialization level of this data structure */ + u_int init_level; + + /* PCI cacheline size. */ + u_int pci_cachesize; + + /* + * Count of parity errors we have seen as a target. + * We auto-disable parity error checking after seeing + * AHC_PCI_TARGET_PERR_THRESH number of errors. + */ + u_int pci_target_perr_count; +#define AHC_PCI_TARGET_PERR_THRESH 10 + + /* Maximum number of sequencer instructions supported. */ + u_int instruction_ram_size; + + /* Per-Unit descriptive information */ + const char *description; + char *name; + int unit; + + /* Selection Timer settings */ + int seltime; + int seltime_b; + + uint16_t user_discenable;/* Disconnection allowed */ + uint16_t user_tagenable;/* Tagged Queuing allowed */ +}; + +/************************ Active Device Information ***************************/ +typedef enum { + ROLE_UNKNOWN, + ROLE_INITIATOR, + ROLE_TARGET +} role_t; + +struct ahc_devinfo { + int our_scsiid; + int target_offset; + uint16_t target_mask; + u_int target; + u_int lun; + char channel; + role_t role; /* + * Only guaranteed to be correct if not + * in the busfree state. + */ +}; + +/****************************** PCI Structures ********************************/ +typedef int (ahc_device_setup_t)(struct ahc_softc *); + +struct ahc_pci_identity { + uint64_t full_id; + uint64_t id_mask; + const char *name; + ahc_device_setup_t *setup; +}; + +/***************************** VL/EISA Declarations ***************************/ +struct aic7770_identity { + uint32_t full_id; + uint32_t id_mask; + const char *name; + ahc_device_setup_t *setup; +}; +extern struct aic7770_identity aic7770_ident_table[]; +extern const int ahc_num_aic7770_devs; + +#define AHC_EISA_SLOT_OFFSET 0xc00 +#define AHC_EISA_IOSIZE 0x100 + +/*************************** Function Declarations ****************************/ +/******************************************************************************/ + +/***************************** PCI Front End *********************************/ +const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t); +int ahc_pci_config(struct ahc_softc *, + const struct ahc_pci_identity *); +int ahc_pci_test_register_access(struct ahc_softc *); +void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc); + +/*************************** EISA/VL Front End ********************************/ +struct aic7770_identity *aic7770_find_device(uint32_t); +int aic7770_config(struct ahc_softc *ahc, + struct aic7770_identity *, + u_int port); + +/************************** SCB and SCB queue management **********************/ +int ahc_probe_scbs(struct ahc_softc *); +void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, + struct scb *scb); +int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, + int target, char channel, int lun, + u_int tag, role_t role); + +/****************************** Initialization ********************************/ +struct ahc_softc *ahc_alloc(void *platform_arg, char *name); +int ahc_softc_init(struct ahc_softc *); +void ahc_controller_info(struct ahc_softc *ahc, char *buf); +int ahc_chip_init(struct ahc_softc *ahc); +int ahc_init(struct ahc_softc *ahc); +void ahc_intr_enable(struct ahc_softc *ahc, int enable); +void ahc_pause_and_flushwork(struct ahc_softc *ahc); +int __maybe_unused ahc_suspend(struct ahc_softc *ahc); +int __maybe_unused ahc_resume(struct ahc_softc *ahc); +void ahc_set_unit(struct ahc_softc *, int); +void ahc_set_name(struct ahc_softc *, char *); +void ahc_free(struct ahc_softc *ahc); +int ahc_reset(struct ahc_softc *ahc, int reinit); + +/***************************** Error Recovery *********************************/ +typedef enum { + SEARCH_COMPLETE, + SEARCH_COUNT, + SEARCH_REMOVE +} ahc_search_action; +int ahc_search_qinfifo(struct ahc_softc *ahc, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status, + ahc_search_action action); +int ahc_search_untagged_queues(struct ahc_softc *ahc, + ahc_io_ctx_t ctx, + int target, char channel, + int lun, uint32_t status, + ahc_search_action action); +int ahc_search_disc_list(struct ahc_softc *ahc, int target, + char channel, int lun, u_int tag, + int stop_on_first, int remove, + int save_state); +int ahc_reset_channel(struct ahc_softc *ahc, char channel, + int initiate_reset); + +/*************************** Utility Functions ********************************/ +void ahc_compile_devinfo(struct ahc_devinfo *devinfo, + u_int our_id, u_int target, + u_int lun, char channel, + role_t role); +/************************** Transfer Negotiation ******************************/ +const struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, + u_int *ppr_options, u_int maxsync); +u_int ahc_find_period(struct ahc_softc *ahc, + u_int scsirate, u_int maxsync); +/* + * Negotiation types. These are used to qualify if we should renegotiate + * even if our goal and current transport parameters are identical. + */ +typedef enum { + AHC_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */ + AHC_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */ + AHC_NEG_ALWAYS /* Renegotiat even if goal is async. */ +} ahc_neg_type; +int ahc_update_neg_request(struct ahc_softc*, + struct ahc_devinfo*, + struct ahc_tmode_tstate*, + struct ahc_initiator_tinfo*, + ahc_neg_type); +void ahc_set_width(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + u_int width, u_int type, int paused); +void ahc_set_syncrate(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + const struct ahc_syncrate *syncrate, + u_int period, u_int offset, + u_int ppr_options, + u_int type, int paused); +typedef enum { + AHC_QUEUE_NONE, + AHC_QUEUE_BASIC, + AHC_QUEUE_TAGGED +} ahc_queue_alg; + +/**************************** Target Mode *************************************/ +#ifdef AHC_TARGET_MODE +void ahc_send_lstate_events(struct ahc_softc *, + struct ahc_tmode_lstate *); +void ahc_handle_en_lun(struct ahc_softc *ahc, + struct cam_sim *sim, union ccb *ccb); +cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, + struct cam_sim *sim, union ccb *ccb, + struct ahc_tmode_tstate **tstate, + struct ahc_tmode_lstate **lstate, + int notfound_failure); +#ifndef AHC_TMODE_ENABLE +#define AHC_TMODE_ENABLE 0 +#endif +#endif +/******************************* Debug ***************************************/ +#ifdef AHC_DEBUG +extern uint32_t ahc_debug; +#define AHC_SHOW_MISC 0x0001 +#define AHC_SHOW_SENSE 0x0002 +#define AHC_DUMP_SEEPROM 0x0004 +#define AHC_SHOW_TERMCTL 0x0008 +#define AHC_SHOW_MEMORY 0x0010 +#define AHC_SHOW_MESSAGES 0x0020 +#define AHC_SHOW_DV 0x0040 +#define AHC_SHOW_SELTO 0x0080 +#define AHC_SHOW_QFULL 0x0200 +#define AHC_SHOW_QUEUE 0x0400 +#define AHC_SHOW_TQIN 0x0800 +#define AHC_SHOW_MASKED_ERRORS 0x1000 +#define AHC_DEBUG_SEQUENCER 0x2000 +#endif +void ahc_print_devinfo(struct ahc_softc *ahc, + struct ahc_devinfo *dev); +void ahc_dump_card_state(struct ahc_softc *ahc); +int ahc_print_register(const ahc_reg_parse_entry_t *table, + u_int num_entries, + const char *name, + u_int address, + u_int value, + u_int *cur_column, + u_int wrap_point); +/******************************* SEEPROM *************************************/ +int ahc_acquire_seeprom(struct ahc_softc *ahc, + struct seeprom_descriptor *sd); +void ahc_release_seeprom(struct seeprom_descriptor *sd); +#endif /* _AIC7XXX_H_ */ diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg new file mode 100644 index 000000000..00fde2243 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx.reg @@ -0,0 +1,1761 @@ +/* + * Aic7xxx register and scratch ram definitions. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $" + +/* + * This file is processed by the aic7xxx_asm utility for use in assembling + * firmware for the aic7xxx family of SCSI host adapters as well as to generate + * a C header file for use in the kernel portion of the Aic7xxx driver. + * + * All page numbers refer to the Adaptec AIC-7770 Data Book available from + * Adaptec's Technical Documents Department 1-800-934-2766 + */ + +/* + * Registers marked "dont_generate_debug_code" are not (yet) referenced + * from the driver code, and this keyword inhibit generation + * of debug code for them. + * + * REG_PRETTY_PRINT config will complain if dont_generate_debug_code + * is added to the register which is referenced in the driver. + * Unreferenced register with no dont_generate_debug_code will result + * in dead code. No warning is issued. + */ + +/* + * SCSI Sequence Control (p. 3-11). + * Each bit, when set starts a specific SCSI sequence on the bus + */ +register SCSISEQ { + address 0x000 + access_mode RW + field TEMODE 0x80 + field ENSELO 0x40 + field ENSELI 0x20 + field ENRSELI 0x10 + field ENAUTOATNO 0x08 + field ENAUTOATNI 0x04 + field ENAUTOATNP 0x02 + field SCSIRSTO 0x01 +} + +/* + * SCSI Transfer Control 0 Register (pp. 3-13). + * Controls the SCSI module data path. + */ +register SXFRCTL0 { + address 0x001 + access_mode RW + field DFON 0x80 + field DFPEXP 0x40 + field FAST20 0x20 + field CLRSTCNT 0x10 + field SPIOEN 0x08 + field SCAMEN 0x04 + field CLRCHN 0x02 +} + +/* + * SCSI Transfer Control 1 Register (pp. 3-14,15). + * Controls the SCSI module data path. + */ +register SXFRCTL1 { + address 0x002 + access_mode RW + field BITBUCKET 0x80 + field SWRAPEN 0x40 + field ENSPCHK 0x20 + mask STIMESEL 0x18 + field ENSTIMER 0x04 + field ACTNEGEN 0x02 + field STPWEN 0x01 /* Powered Termination */ + dont_generate_debug_code +} + +/* + * SCSI Control Signal Read Register (p. 3-15). + * Reads the actual state of the SCSI bus pins + */ +register SCSISIGI { + address 0x003 + access_mode RO + field CDI 0x80 + field IOI 0x40 + field MSGI 0x20 + field ATNI 0x10 + field SELI 0x08 + field BSYI 0x04 + field REQI 0x02 + field ACKI 0x01 +/* + * Possible phases in SCSISIGI + */ + mask PHASE_MASK CDI|IOI|MSGI + mask P_DATAOUT 0x00 + mask P_DATAIN IOI + mask P_DATAOUT_DT P_DATAOUT|MSGI + mask P_DATAIN_DT P_DATAIN|MSGI + mask P_COMMAND CDI + mask P_MESGOUT CDI|MSGI + mask P_STATUS CDI|IOI + mask P_MESGIN CDI|IOI|MSGI +} + +/* + * SCSI Control Signal Write Register (p. 3-16). + * Writing to this register modifies the control signals on the bus. Only + * those signals that are allowed in the current mode (Initiator/Target) are + * asserted. + */ +register SCSISIGO { + address 0x003 + access_mode WO + field CDO 0x80 + field IOO 0x40 + field MSGO 0x20 + field ATNO 0x10 + field SELO 0x08 + field BSYO 0x04 + field REQO 0x02 + field ACKO 0x01 +/* + * Possible phases to write into SCSISIG0 + */ + mask PHASE_MASK CDI|IOI|MSGI + mask P_DATAOUT 0x00 + mask P_DATAIN IOI + mask P_COMMAND CDI + mask P_MESGOUT CDI|MSGI + mask P_STATUS CDI|IOI + mask P_MESGIN CDI|IOI|MSGI + dont_generate_debug_code +} + +/* + * SCSI Rate Control (p. 3-17). + * Contents of this register determine the Synchronous SCSI data transfer + * rate and the maximum synchronous Req/Ack offset. An offset of 0 in the + * SOFS (3:0) bits disables synchronous data transfers. Any offset value + * greater than 0 enables synchronous transfers. + */ +register SCSIRATE { + address 0x004 + access_mode RW + field WIDEXFER 0x80 /* Wide transfer control */ + field ENABLE_CRC 0x40 /* CRC for D-Phases */ + field SINGLE_EDGE 0x10 /* Disable DT Transfers */ + mask SXFR 0x70 /* Sync transfer rate */ + mask SXFR_ULTRA2 0x0f /* Sync transfer rate */ + mask SOFS 0x0f /* Sync offset */ +} + +/* + * SCSI ID (p. 3-18). + * Contains the ID of the board and the current target on the + * selected channel. + */ +register SCSIID { + address 0x005 + access_mode RW + mask TID 0xf0 /* Target ID mask */ + mask TWIN_TID 0x70 + field TWIN_CHNLB 0x80 + mask OID 0x0f /* Our ID mask */ + /* + * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book) + * The aic7890/91 allow an offset of up to 127 transfers in both wide + * and narrow mode. + */ + alias SCSIOFFSET + mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */ + dont_generate_debug_code +} + +/* + * SCSI Latched Data (p. 3-19). + * Read/Write latches used to transfer data on the SCSI bus during + * Automatic or Manual PIO mode. SCSIDATH can be used for the + * upper byte of a 16bit wide asynchronouse data phase transfer. + */ +register SCSIDATL { + address 0x006 + access_mode RW + dont_generate_debug_code +} + +register SCSIDATH { + address 0x007 + access_mode RW +} + +/* + * SCSI Transfer Count (pp. 3-19,20) + * These registers count down the number of bytes transferred + * across the SCSI bus. The counter is decremented only once + * the data has been safely transferred. SDONE in SSTAT0 is + * set when STCNT goes to 0 + */ +register STCNT { + address 0x008 + size 3 + access_mode RW + dont_generate_debug_code +} + +/* ALT_MODE registers (Ultra2 and Ultra160 chips) */ +register SXFRCTL2 { + address 0x013 + access_mode RW + field AUTORSTDIS 0x10 + field CMDDMAEN 0x08 + mask ASYNC_SETUP 0x07 +} + +/* ALT_MODE register on Ultra160 chips */ +register OPTIONMODE { + address 0x008 + access_mode RW + count 2 + field AUTORATEEN 0x80 + field AUTOACKEN 0x40 + field ATNMGMNTEN 0x20 + field BUSFREEREV 0x10 + field EXPPHASEDIS 0x08 + field SCSIDATL_IMGEN 0x04 + field AUTO_MSGOUT_DE 0x02 + field DIS_MSGIN_DUALEDGE 0x01 + mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE + dont_generate_debug_code +} + +/* ALT_MODE register on Ultra160 chips */ +register TARGCRCCNT { + address 0x00a + size 2 + access_mode RW + count 2 + dont_generate_debug_code +} + +/* + * Clear SCSI Interrupt 0 (p. 3-20) + * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. + */ +register CLRSINT0 { + address 0x00b + access_mode WO + field CLRSELDO 0x40 + field CLRSELDI 0x20 + field CLRSELINGO 0x10 + field CLRSWRAP 0x08 + field CLRIOERR 0x08 /* Ultra2 Only */ + field CLRSPIORDY 0x02 + dont_generate_debug_code +} + +/* + * SCSI Status 0 (p. 3-21) + * Contains one set of SCSI Interrupt codes + * These are most likely of interest to the sequencer + */ +register SSTAT0 { + address 0x00b + access_mode RO + field TARGET 0x80 /* Board acting as target */ + field SELDO 0x40 /* Selection Done */ + field SELDI 0x20 /* Board has been selected */ + field SELINGO 0x10 /* Selection In Progress */ + field SWRAP 0x08 /* 24bit counter wrap */ + field IOERR 0x08 /* LVD Tranceiver mode changed */ + field SDONE 0x04 /* STCNT = 0x000000 */ + field SPIORDY 0x02 /* SCSI PIO Ready */ + field DMADONE 0x01 /* DMA transfer completed */ +} + +/* + * Clear SCSI Interrupt 1 (p. 3-23) + * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. + */ +register CLRSINT1 { + address 0x00c + access_mode WO + field CLRSELTIMEO 0x80 + field CLRATNO 0x40 + field CLRSCSIRSTI 0x20 + field CLRBUSFREE 0x08 + field CLRSCSIPERR 0x04 + field CLRPHASECHG 0x02 + field CLRREQINIT 0x01 + dont_generate_debug_code +} + +/* + * SCSI Status 1 (p. 3-24) + */ +register SSTAT1 { + address 0x00c + access_mode RO + field SELTO 0x80 + field ATNTARG 0x40 + field SCSIRSTI 0x20 + field PHASEMIS 0x10 + field BUSFREE 0x08 + field SCSIPERR 0x04 + field PHASECHG 0x02 + field REQINIT 0x01 +} + +/* + * SCSI Status 2 (pp. 3-25,26) + */ +register SSTAT2 { + address 0x00d + access_mode RO + field OVERRUN 0x80 + field SHVALID 0x40 /* Shadow Layer non-zero */ + field EXP_ACTIVE 0x10 /* SCSI Expander Active */ + field CRCVALERR 0x08 /* CRC doesn't match (U3 only) */ + field CRCENDERR 0x04 /* No terminal CRC packet (U3 only) */ + field CRCREQERR 0x02 /* Illegal CRC packet req (U3 only) */ + field DUAL_EDGE_ERR 0x01 /* Incorrect data phase (U3 only) */ + mask SFCNT 0x1f +} + +/* + * SCSI Status 3 (p. 3-26) + */ +register SSTAT3 { + address 0x00e + access_mode RO + count 2 + mask SCSICNT 0xf0 + mask OFFCNT 0x0f + mask U2OFFCNT 0x7f +} + +/* + * SCSI ID for the aic7890/91 chips + */ +register SCSIID_ULTRA2 { + address 0x00f + access_mode RW + mask TID 0xf0 /* Target ID mask */ + mask OID 0x0f /* Our ID mask */ + dont_generate_debug_code +} + +/* + * SCSI Interrupt Mode 1 (p. 3-28) + * Setting any bit will enable the corresponding function + * in SIMODE0 to interrupt via the IRQ pin. + */ +register SIMODE0 { + address 0x010 + access_mode RW + count 2 + field ENSELDO 0x40 + field ENSELDI 0x20 + field ENSELINGO 0x10 + field ENSWRAP 0x08 + field ENIOERR 0x08 /* LVD Tranceiver mode changes */ + field ENSDONE 0x04 + field ENSPIORDY 0x02 + field ENDMADONE 0x01 +} + +/* + * SCSI Interrupt Mode 1 (pp. 3-28,29) + * Setting any bit will enable the corresponding function + * in SIMODE1 to interrupt via the IRQ pin. + */ +register SIMODE1 { + address 0x011 + access_mode RW + field ENSELTIMO 0x80 + field ENATNTARG 0x40 + field ENSCSIRST 0x20 + field ENPHASEMIS 0x10 + field ENBUSFREE 0x08 + field ENSCSIPERR 0x04 + field ENPHASECHG 0x02 + field ENREQINIT 0x01 +} + +/* + * SCSI Data Bus (High) (p. 3-29) + * This register reads data on the SCSI Data bus directly. + */ +register SCSIBUSL { + address 0x012 + access_mode RW +} + +register SCSIBUSH { + address 0x013 + access_mode RW +} + +/* + * SCSI/Host Address (p. 3-30) + * These registers hold the host address for the byte about to be + * transferred on the SCSI bus. They are counted up in the same + * manner as STCNT is counted down. SHADDR should always be used + * to determine the address of the last byte transferred since HADDR + * can be skewed by write ahead. + */ +register SHADDR { + address 0x014 + size 4 + access_mode RO + dont_generate_debug_code +} + +/* + * Selection Timeout Timer (p. 3-30) + */ +register SELTIMER { + address 0x018 + access_mode RW + count 1 + field STAGE6 0x20 + field STAGE5 0x10 + field STAGE4 0x08 + field STAGE3 0x04 + field STAGE2 0x02 + field STAGE1 0x01 + alias TARGIDIN + dont_generate_debug_code +} + +/* + * Selection/Reselection ID (p. 3-31) + * Upper four bits are the device id. The ONEBIT is set when the re/selecting + * device did not set its own ID. + */ +register SELID { + address 0x019 + access_mode RW + mask SELID_MASK 0xf0 + field ONEBIT 0x08 + dont_generate_debug_code +} + +register SCAMCTL { + address 0x01a + access_mode RW + field ENSCAMSELO 0x80 + field CLRSCAMSELID 0x40 + field ALTSTIM 0x20 + field DFLTTID 0x10 + mask SCAMLVL 0x03 +} + +/* + * Target Mode Selecting in ID bitmask (aic7890/91/96/97) + */ +register TARGID { + address 0x01b + size 2 + access_mode RW + count 14 + dont_generate_debug_code +} + +/* + * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book) + * Indicates if external logic has been attached to the chip to + * perform the tasks of accessing a serial eeprom, testing termination + * strength, and performing cable detection. On the aic7860, most of + * these features are handled on chip, but on the aic7855 an attached + * aic3800 does the grunt work. + */ +register SPIOCAP { + address 0x01b + access_mode RW + count 10 + field SOFT1 0x80 + field SOFT0 0x40 + field SOFTCMDEN 0x20 + field EXT_BRDCTL 0x10 /* External Board control */ + field SEEPROM 0x08 /* External serial eeprom logic */ + field EEPROM 0x04 /* Writable external BIOS ROM */ + field ROM 0x02 /* Logic for accessing external ROM */ + field SSPIOCPS 0x01 /* Termination and cable detection */ + dont_generate_debug_code +} + +register BRDCTL { + address 0x01d + count 11 + field BRDDAT7 0x80 + field BRDDAT6 0x40 + field BRDDAT5 0x20 + field BRDSTB 0x10 + field BRDCS 0x08 + field BRDRW 0x04 + field BRDCTL1 0x02 + field BRDCTL0 0x01 + /* 7890 Definitions */ + field BRDDAT4 0x10 + field BRDDAT3 0x08 + field BRDDAT2 0x04 + field BRDRW_ULTRA2 0x02 + field BRDSTB_ULTRA2 0x01 + dont_generate_debug_code +} + +/* + * Serial EEPROM Control (p. 4-92 in 7870 Databook) + * Controls the reading and writing of an external serial 1-bit + * EEPROM Device. In order to access the serial EEPROM, you must + * first set the SEEMS bit that generates a request to the memory + * port for access to the serial EEPROM device. When the memory + * port is not busy servicing another request, it reconfigures + * to allow access to the serial EEPROM. When this happens, SEERDY + * gets set high to verify that the memory port access has been + * granted. + * + * After successful arbitration for the memory port, the SEECS bit of + * the SEECTL register is connected to the chip select. The SEECK, + * SEEDO, and SEEDI are connected to the clock, data out, and data in + * lines respectively. The SEERDY bit of SEECTL is useful in that it + * gives us an 800 nsec timer. After a write to the SEECTL register, + * the SEERDY goes high 800 nsec later. The one exception to this is + * when we first request access to the memory port. The SEERDY goes + * high to signify that access has been granted and, for this case, has + * no implied timing. + * + * See 93cx6.c for detailed information on the protocol necessary to + * read the serial EEPROM. + */ +register SEECTL { + address 0x01e + count 11 + field EXTARBACK 0x80 + field EXTARBREQ 0x40 + field SEEMS 0x20 + field SEERDY 0x10 + field SEECS 0x08 + field SEECK 0x04 + field SEEDO 0x02 + field SEEDI 0x01 + dont_generate_debug_code +} +/* + * SCSI Block Control (p. 3-32) + * Controls Bus type and channel selection. In a twin channel configuration + * addresses 0x00-0x1e are gated to the appropriate channel based on this + * register. SELWIDE allows for the coexistence of 8bit and 16bit devices + * on a wide bus. + */ +register SBLKCTL { + address 0x01f + access_mode RW + field DIAGLEDEN 0x80 /* Aic78X0 only */ + field DIAGLEDON 0x40 /* Aic78X0 only */ + field AUTOFLUSHDIS 0x20 + field SELBUSB 0x08 + field ENAB40 0x08 /* LVD transceiver active */ + field ENAB20 0x04 /* SE/HVD transceiver active */ + field SELWIDE 0x02 + field XCVR 0x01 /* External transceiver active */ +} + +/* + * Sequencer Control (p. 3-33) + * Error detection mode and speed configuration + */ +register SEQCTL { + address 0x060 + access_mode RW + count 15 + field PERRORDIS 0x80 + field PAUSEDIS 0x40 + field FAILDIS 0x20 + field FASTMODE 0x10 + field BRKADRINTEN 0x08 + field STEP 0x04 + field SEQRESET 0x02 + field LOADRAM 0x01 +} + +/* + * Sequencer RAM Data (p. 3-34) + * Single byte window into the Scratch Ram area starting at the address + * specified by SEQADDR0 and SEQADDR1. To write a full word, simply write + * four bytes in succession. The SEQADDRs will increment after the most + * significant byte is written + */ +register SEQRAM { + address 0x061 + access_mode RW + count 2 + dont_generate_debug_code +} + +/* + * Sequencer Address Registers (p. 3-35) + * Only the first bit of SEQADDR1 holds addressing information + */ +register SEQADDR0 { + address 0x062 + access_mode RW + dont_generate_debug_code +} + +register SEQADDR1 { + address 0x063 + access_mode RW + count 8 + mask SEQADDR1_MASK 0x01 + dont_generate_debug_code +} + +/* + * Accumulator + * We cheat by passing arguments in the Accumulator up to the kernel driver + */ +register ACCUM { + address 0x064 + access_mode RW + accumulator + dont_generate_debug_code +} + +register SINDEX { + address 0x065 + access_mode RW + sindex + dont_generate_debug_code +} + +register DINDEX { + address 0x066 + access_mode RW + dont_generate_debug_code +} + +register ALLONES { + address 0x069 + access_mode RO + allones + dont_generate_debug_code +} + +register ALLZEROS { + address 0x06a + access_mode RO + allzeros + dont_generate_debug_code +} + +register NONE { + address 0x06a + access_mode WO + none + dont_generate_debug_code +} + +register FLAGS { + address 0x06b + access_mode RO + count 18 + field ZERO 0x02 + field CARRY 0x01 + dont_generate_debug_code +} + +register SINDIR { + address 0x06c + access_mode RO + dont_generate_debug_code +} + +register DINDIR { + address 0x06d + access_mode WO + dont_generate_debug_code +} + +register FUNCTION1 { + address 0x06e + access_mode RW +} + +register STACK { + address 0x06f + access_mode RO + count 5 + dont_generate_debug_code +} + +const STACK_SIZE 4 + +/* + * Board Control (p. 3-43) + */ +register BCTL { + address 0x084 + access_mode RW + field ACE 0x08 + field ENABLE 0x01 +} + +/* + * On the aic78X0 chips, Board Control is replaced by the DSCommand + * register (p. 4-64) + */ +register DSCOMMAND0 { + address 0x084 + access_mode RW + count 7 + field CACHETHEN 0x80 /* Cache Threshold enable */ + field DPARCKEN 0x40 /* Data Parity Check Enable */ + field MPARCKEN 0x20 /* Memory Parity Check Enable */ + field EXTREQLCK 0x10 /* External Request Lock */ + /* aic7890/91/96/97 only */ + field INTSCBRAMSEL 0x08 /* Internal SCB RAM Select */ + field RAMPS 0x04 /* External SCB RAM Present */ + field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */ + field CIOPARCKEN 0x01 /* Internal bus parity error enable */ + dont_generate_debug_code +} + +register DSCOMMAND1 { + address 0x085 + access_mode RW + mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */ + field HADDLDSEL1 0x02 /* Host Address Load Select Bits */ + field HADDLDSEL0 0x01 + dont_generate_debug_code +} + +/* + * Bus On/Off Time (p. 3-44) aic7770 only + */ +register BUSTIME { + address 0x085 + access_mode RW + count 2 + mask BOFF 0xf0 + mask BON 0x0f + dont_generate_debug_code +} + +/* + * Bus Speed (p. 3-45) aic7770 only + */ +register BUSSPD { + address 0x086 + access_mode RW + count 2 + mask DFTHRSH 0xc0 + mask STBOFF 0x38 + mask STBON 0x07 + mask DFTHRSH_100 0xc0 + mask DFTHRSH_75 0x80 + dont_generate_debug_code +} + +/* aic7850/55/60/70/80/95 only */ +register DSPCISTATUS { + address 0x086 + count 4 + mask DFTHRSH_100 0xc0 + dont_generate_debug_code +} + +/* aic7890/91/96/97 only */ +register HS_MAILBOX { + address 0x086 + mask HOST_MAILBOX 0xF0 + mask SEQ_MAILBOX 0x0F + mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ + dont_generate_debug_code +} + +const HOST_MAILBOX_SHIFT 4 +const SEQ_MAILBOX_SHIFT 0 + +/* + * Host Control (p. 3-47) R/W + * Overall host control of the device. + */ +register HCNTRL { + address 0x087 + access_mode RW + count 14 + field POWRDN 0x40 + field SWINT 0x10 + field IRQMS 0x08 + field PAUSE 0x04 + field INTEN 0x02 + field CHIPRST 0x01 + field CHIPRSTACK 0x01 + dont_generate_debug_code +} + +/* + * Host Address (p. 3-48) + * This register contains the address of the byte about + * to be transferred across the host bus. + */ +register HADDR { + address 0x088 + size 4 + access_mode RW + dont_generate_debug_code +} + +register HCNT { + address 0x08c + size 3 + access_mode RW + dont_generate_debug_code +} + +/* + * SCB Pointer (p. 3-49) + * Gate one of the SCBs into the SCBARRAY window. + */ +register SCBPTR { + address 0x090 + access_mode RW + dont_generate_debug_code +} + +/* + * Interrupt Status (p. 3-50) + * Status for system interrupts + */ +register INTSTAT { + address 0x091 + access_mode RW + field BRKADRINT 0x08 + field SCSIINT 0x04 + field CMDCMPLT 0x02 + field SEQINT 0x01 + mask BAD_PHASE SEQINT /* unknown scsi bus phase */ + mask SEND_REJECT 0x10|SEQINT /* sending a message reject */ + mask PROTO_VIOLATION 0x20|SEQINT /* SCSI protocol violation */ + mask NO_MATCH 0x30|SEQINT /* no cmd match for reconnect */ + mask IGN_WIDE_RES 0x40|SEQINT /* Complex IGN Wide Res Msg */ + mask PDATA_REINIT 0x50|SEQINT /* + * Returned to data phase + * that requires data + * transfer pointers to be + * recalculated from the + * transfer residual. + */ + mask HOST_MSG_LOOP 0x60|SEQINT /* + * The bus is ready for the + * host to perform another + * message transaction. This + * mechanism is used for things + * like sync/wide negotiation + * that require a kernel based + * message state engine. + */ + mask BAD_STATUS 0x70|SEQINT /* Bad status from target */ + mask PERR_DETECTED 0x80|SEQINT /* + * Either the phase_lock + * or inb_next routine has + * noticed a parity error. + */ + mask DATA_OVERRUN 0x90|SEQINT /* + * Target attempted to write + * beyond the bounds of its + * command. + */ + mask MKMSG_FAILED 0xa0|SEQINT /* + * Target completed command + * without honoring our ATN + * request to issue a message. + */ + mask MISSED_BUSFREE 0xb0|SEQINT /* + * The sequencer never saw + * the bus go free after + * either a command complete + * or disconnect message. + */ + mask SCB_MISMATCH 0xc0|SEQINT /* + * Downloaded SCB's tag does + * not match the entry we + * intended to download. + */ + mask NO_FREE_SCB 0xd0|SEQINT /* + * get_free_or_disc_scb failed. + */ + mask OUT_OF_RANGE 0xe0|SEQINT + + mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */ + mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT) + dont_generate_debug_code +} + +/* + * Hard Error (p. 3-53) + * Reporting of catastrophic errors. You usually cannot recover from + * these without a full board reset. + */ +register ERROR { + address 0x092 + access_mode RO + count 26 + field CIOPARERR 0x80 /* Ultra2 only */ + field PCIERRSTAT 0x40 /* PCI only */ + field MPARERR 0x20 /* PCI only */ + field DPARERR 0x10 /* PCI only */ + field SQPARERR 0x08 + field ILLOPCODE 0x04 + field ILLSADDR 0x02 + field ILLHADDR 0x01 +} + +/* + * Clear Interrupt Status (p. 3-52) + */ +register CLRINT { + address 0x092 + access_mode WO + count 24 + field CLRPARERR 0x10 /* PCI only */ + field CLRBRKADRINT 0x08 + field CLRSCSIINT 0x04 + field CLRCMDINT 0x02 + field CLRSEQINT 0x01 + dont_generate_debug_code +} + +register DFCNTRL { + address 0x093 + access_mode RW + field PRELOADEN 0x80 /* aic7890 only */ + field WIDEODD 0x40 + field SCSIEN 0x20 + field SDMAEN 0x10 + field SDMAENACK 0x10 + field HDMAEN 0x08 + field HDMAENACK 0x08 + field DIRECTION 0x04 + field FIFOFLUSH 0x02 + field FIFORESET 0x01 +} + +register DFSTATUS { + address 0x094 + access_mode RO + field PRELOAD_AVAIL 0x80 + field DFCACHETH 0x40 + field FIFOQWDEMP 0x20 + field MREQPEND 0x10 + field HDONE 0x08 + field DFTHRESH 0x04 + field FIFOFULL 0x02 + field FIFOEMP 0x01 +} + +register DFWADDR { + address 0x95 + access_mode RW + dont_generate_debug_code +} + +register DFRADDR { + address 0x97 + access_mode RW +} + +register DFDAT { + address 0x099 + access_mode RW + dont_generate_debug_code +} + +/* + * SCB Auto Increment (p. 3-59) + * Byte offset into the SCB Array and an optional bit to allow auto + * incrementing of the address during download and upload operations + */ +register SCBCNT { + address 0x09a + access_mode RW + count 1 + field SCBAUTO 0x80 + mask SCBCNT_MASK 0x1f + dont_generate_debug_code +} + +/* + * Queue In FIFO (p. 3-60) + * Input queue for queued SCBs (commands that the seqencer has yet to start) + */ +register QINFIFO { + address 0x09b + access_mode RW + count 12 + dont_generate_debug_code +} + +/* + * Queue In Count (p. 3-60) + * Number of queued SCBs + */ +register QINCNT { + address 0x09c + access_mode RO +} + +/* + * Queue Out FIFO (p. 3-61) + * Queue of SCBs that have completed and await the host + */ +register QOUTFIFO { + address 0x09d + access_mode WO + count 7 + dont_generate_debug_code +} + +register CRCCONTROL1 { + address 0x09d + access_mode RW + count 3 + field CRCONSEEN 0x80 + field CRCVALCHKEN 0x40 + field CRCENDCHKEN 0x20 + field CRCREQCHKEN 0x10 + field TARGCRCENDEN 0x08 + field TARGCRCCNTEN 0x04 + dont_generate_debug_code +} + + +/* + * Queue Out Count (p. 3-61) + * Number of queued SCBs in the Out FIFO + */ +register QOUTCNT { + address 0x09e + access_mode RO +} + +register SCSIPHASE { + address 0x09e + access_mode RO + field STATUS_PHASE 0x20 + field COMMAND_PHASE 0x10 + field MSG_IN_PHASE 0x08 + field MSG_OUT_PHASE 0x04 + field DATA_IN_PHASE 0x02 + field DATA_OUT_PHASE 0x01 + mask DATA_PHASE_MASK 0x03 +} + +/* + * Special Function + */ +register SFUNCT { + address 0x09f + access_mode RW + count 4 + field ALT_MODE 0x80 + dont_generate_debug_code +} + +/* + * SCB Definition (p. 5-4) + */ +scb { + address 0x0a0 + size 64 + + SCB_CDB_PTR { + size 4 + alias SCB_RESIDUAL_DATACNT + alias SCB_CDB_STORE + dont_generate_debug_code + } + SCB_RESIDUAL_SGPTR { + size 4 + dont_generate_debug_code + } + SCB_SCSI_STATUS { + size 1 + dont_generate_debug_code + } + SCB_TARGET_PHASES { + size 1 + dont_generate_debug_code + } + SCB_TARGET_DATA_DIR { + size 1 + dont_generate_debug_code + } + SCB_TARGET_ITAG { + size 1 + dont_generate_debug_code + } + SCB_DATAPTR { + size 4 + dont_generate_debug_code + } + SCB_DATACNT { + /* + * The last byte is really the high address bits for + * the data address. + */ + size 4 + field SG_LAST_SEG 0x80 /* In the fourth byte */ + mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ + dont_generate_debug_code + } + SCB_SGPTR { + size 4 + field SG_RESID_VALID 0x04 /* In the first byte */ + field SG_FULL_RESID 0x02 /* In the first byte */ + field SG_LIST_NULL 0x01 /* In the first byte */ + dont_generate_debug_code + } + SCB_CONTROL { + size 1 + field TARGET_SCB 0x80 + field STATUS_RCVD 0x80 + field DISCENB 0x40 + field TAG_ENB 0x20 + field MK_MESSAGE 0x10 + field ULTRAENB 0x08 + field DISCONNECTED 0x04 + mask SCB_TAG_TYPE 0x03 + } + SCB_SCSIID { + size 1 + field TWIN_CHNLB 0x80 + mask TWIN_TID 0x70 + mask TID 0xf0 + mask OID 0x0f + } + SCB_LUN { + field SCB_XFERLEN_ODD 0x80 + mask LID 0x3f + size 1 + } + SCB_TAG { + size 1 + } + SCB_CDB_LEN { + size 1 + dont_generate_debug_code + } + SCB_SCSIRATE { + size 1 + dont_generate_debug_code + } + SCB_SCSIOFFSET { + size 1 + count 1 + dont_generate_debug_code + } + SCB_NEXT { + size 1 + dont_generate_debug_code + } + SCB_64_SPARE { + size 16 + } + SCB_64_BTT { + size 16 + dont_generate_debug_code + } +} + +const SCB_UPLOAD_SIZE 32 +const SCB_DOWNLOAD_SIZE 32 +const SCB_DOWNLOAD_SIZE_64 48 + +const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */ + +/* --------------------- AHA-2840-only definitions -------------------- */ + +register SEECTL_2840 { + address 0x0c0 + access_mode RW + count 2 + field CS_2840 0x04 + field CK_2840 0x02 + field DO_2840 0x01 + dont_generate_debug_code +} + +register STATUS_2840 { + address 0x0c1 + access_mode RW + count 4 + field EEPROM_TF 0x80 + mask BIOS_SEL 0x60 + mask ADSEL 0x1e + field DI_2840 0x01 + dont_generate_debug_code +} + +/* --------------------- AIC-7870-only definitions -------------------- */ + +register CCHADDR { + address 0x0E0 + size 8 + dont_generate_debug_code +} + +register CCHCNT { + address 0x0E8 + dont_generate_debug_code +} + +register CCSGRAM { + address 0x0E9 + dont_generate_debug_code +} + +register CCSGADDR { + address 0x0EA + dont_generate_debug_code +} + +register CCSGCTL { + address 0x0EB + field CCSGDONE 0x80 + field CCSGEN 0x08 + field SG_FETCH_NEEDED 0x02 /* Bit used for software state */ + field CCSGRESET 0x01 + dont_generate_debug_code +} + +register CCSCBCNT { + address 0xEF + count 1 + dont_generate_debug_code +} + +register CCSCBCTL { + address 0x0EE + field CCSCBDONE 0x80 + field ARRDONE 0x40 /* SCB Array prefetch done */ + field CCARREN 0x10 + field CCSCBEN 0x08 + field CCSCBDIR 0x04 + field CCSCBRESET 0x01 + dont_generate_debug_code +} + +register CCSCBADDR { + address 0x0ED + dont_generate_debug_code +} + +register CCSCBRAM { + address 0xEC + dont_generate_debug_code +} + +/* + * SCB bank address (7895/7896/97 only) + */ +register SCBBADDR { + address 0x0F0 + access_mode RW + count 3 + dont_generate_debug_code +} + +register CCSCBPTR { + address 0x0F1 + dont_generate_debug_code +} + +register HNSCB_QOFF { + address 0x0F4 + count 4 + dont_generate_debug_code +} + +register SNSCB_QOFF { + address 0x0F6 + dont_generate_debug_code +} + +register SDSCB_QOFF { + address 0x0F8 + dont_generate_debug_code +} + +register QOFF_CTLSTA { + address 0x0FA + field SCB_AVAIL 0x40 + field SNSCB_ROLLOVER 0x20 + field SDSCB_ROLLOVER 0x10 + mask SCB_QSIZE 0x07 + mask SCB_QSIZE_256 0x06 + dont_generate_debug_code +} + +register DFF_THRSH { + address 0x0FB + mask WR_DFTHRSH 0x70 + mask RD_DFTHRSH 0x07 + mask RD_DFTHRSH_MIN 0x00 + mask RD_DFTHRSH_25 0x01 + mask RD_DFTHRSH_50 0x02 + mask RD_DFTHRSH_63 0x03 + mask RD_DFTHRSH_75 0x04 + mask RD_DFTHRSH_85 0x05 + mask RD_DFTHRSH_90 0x06 + mask RD_DFTHRSH_MAX 0x07 + mask WR_DFTHRSH_MIN 0x00 + mask WR_DFTHRSH_25 0x10 + mask WR_DFTHRSH_50 0x20 + mask WR_DFTHRSH_63 0x30 + mask WR_DFTHRSH_75 0x40 + mask WR_DFTHRSH_85 0x50 + mask WR_DFTHRSH_90 0x60 + mask WR_DFTHRSH_MAX 0x70 + count 4 + dont_generate_debug_code +} + +register SG_CACHE_PRE { + access_mode WO + address 0x0fc + mask SG_ADDR_MASK 0xf8 + field LAST_SEG 0x02 + field LAST_SEG_DONE 0x01 + dont_generate_debug_code +} + +register SG_CACHE_SHADOW { + access_mode RO + address 0x0fc + mask SG_ADDR_MASK 0xf8 + field LAST_SEG 0x02 + field LAST_SEG_DONE 0x01 + dont_generate_debug_code +} +/* ---------------------- Scratch RAM Offsets ------------------------- */ +/* These offsets are either to values that are initialized by the board's + * BIOS or are specified by the sequencer code. + * + * The host adapter card (at least the BIOS) uses 20-2f for SCSI + * device information, 32-33 and 5a-5f as well. As it turns out, the + * BIOS trashes 20-2f, writing the synchronous negotiation results + * on top of the BIOS values, so we re-use those for our per-target + * scratchspace (actually a value that can be copied directly into + * SCSIRATE). The kernel driver will enable synchronous negotiation + * for all targets that have a value other than 0 in the lower four + * bits of the target scratch space. This should work regardless of + * whether the bios has been installed. + */ + +scratch_ram { + address 0x020 + size 58 + + /* + * 1 byte per target starting at this address for configuration values + */ + BUSY_TARGETS { + alias TARG_SCSIRATE + size 16 + dont_generate_debug_code + } + /* + * Bit vector of targets that have ULTRA enabled as set by + * the BIOS. The Sequencer relies on a per-SCB field to + * control whether to enable Ultra transfers or not. During + * initialization, we read this field and reuse it for 2 + * entries in the busy target table. + */ + ULTRA_ENB { + alias CMDSIZE_TABLE + size 2 + count 2 + dont_generate_debug_code + } + /* + * Bit vector of targets that have disconnection disabled as set by + * the BIOS. The Sequencer relies in a per-SCB field to control the + * disconnect priveldge. During initialization, we read this field + * and reuse it for 2 entries in the busy target table. + */ + DISC_DSB { + size 2 + count 6 + dont_generate_debug_code + } + CMDSIZE_TABLE_TAIL { + size 4 + } + /* + * Partial transfer past cacheline end to be + * transferred using an extra S/G. + */ + MWI_RESIDUAL { + size 1 + dont_generate_debug_code + } + /* + * SCBID of the next SCB to be started by the controller. + */ + NEXT_QUEUED_SCB { + size 1 + dont_generate_debug_code + } + /* + * Single byte buffer used to designate the type or message + * to send to a target. + */ + MSG_OUT { + size 1 + dont_generate_debug_code + } + /* Parameters for DMA Logic */ + DMAPARAMS { + size 1 + count 12 + field PRELOADEN 0x80 + field WIDEODD 0x40 + field SCSIEN 0x20 + field SDMAEN 0x10 + field SDMAENACK 0x10 + field HDMAEN 0x08 + field HDMAENACK 0x08 + field DIRECTION 0x04 /* Set indicates PCI->SCSI */ + field FIFOFLUSH 0x02 + field FIFORESET 0x01 + dont_generate_debug_code + } + SEQ_FLAGS { + size 1 + field NOT_IDENTIFIED 0x80 + field NO_CDB_SENT 0x40 + field TARGET_CMD_IS_TAGGED 0x40 + field DPHASE 0x20 + /* Target flags */ + field TARG_CMD_PENDING 0x10 + field CMDPHASE_PENDING 0x08 + field DPHASE_PENDING 0x04 + field SPHASE_PENDING 0x02 + field NO_DISCONNECT 0x01 + } + /* + * Temporary storage for the + * target/channel/lun of a + * reconnecting target + */ + SAVED_SCSIID { + size 1 + dont_generate_debug_code + } + SAVED_LUN { + size 1 + dont_generate_debug_code + } + /* + * The last bus phase as seen by the sequencer. + */ + LASTPHASE { + size 1 + field CDI 0x80 + field IOI 0x40 + field MSGI 0x20 + mask PHASE_MASK CDI|IOI|MSGI + mask P_DATAOUT 0x00 + mask P_DATAIN IOI + mask P_COMMAND CDI + mask P_MESGOUT CDI|MSGI + mask P_STATUS CDI|IOI + mask P_MESGIN CDI|IOI|MSGI + mask P_BUSFREE 0x01 + } + /* + * head of list of SCBs awaiting + * selection + */ + WAITING_SCBH { + size 1 + dont_generate_debug_code + } + /* + * head of list of SCBs that are + * disconnected. Used for SCB + * paging. + */ + DISCONNECTED_SCBH { + size 1 + dont_generate_debug_code + } + /* + * head of list of SCBs that are + * not in use. Used for SCB paging. + */ + FREE_SCBH { + size 1 + dont_generate_debug_code + } + /* + * head of list of SCBs that have + * completed but have not been + * put into the qoutfifo. + */ + COMPLETE_SCBH { + size 1 + } + /* + * Address of the hardware scb array in the host. + */ + HSCB_ADDR { + size 4 + dont_generate_debug_code + } + /* + * Base address of our shared data with the kernel driver in host + * memory. This includes the qoutfifo and target mode + * incoming command queue. + */ + SHARED_DATA_ADDR { + size 4 + dont_generate_debug_code + } + KERNEL_QINPOS { + size 1 + dont_generate_debug_code + } + QINPOS { + size 1 + dont_generate_debug_code + } + QOUTPOS { + size 1 + dont_generate_debug_code + } + /* + * Kernel and sequencer offsets into the queue of + * incoming target mode command descriptors. The + * queue is full when the KERNEL_TQINPOS == TQINPOS. + */ + KERNEL_TQINPOS { + size 1 + dont_generate_debug_code + } + TQINPOS { + size 1 + dont_generate_debug_code + } + ARG_1 { + size 1 + count 1 + mask SEND_MSG 0x80 + mask SEND_SENSE 0x40 + mask SEND_REJ 0x20 + mask MSGOUT_PHASEMIS 0x10 + mask EXIT_MSG_LOOP 0x08 + mask CONT_MSG_LOOP 0x04 + mask CONT_TARG_SESSION 0x02 + alias RETURN_1 + dont_generate_debug_code + } + ARG_2 { + size 1 + alias RETURN_2 + dont_generate_debug_code + } + + /* + * Snapshot of MSG_OUT taken after each message is sent. + */ + LAST_MSG { + size 1 + alias TARG_IMMEDIATE_SCB + dont_generate_debug_code + } + + /* + * Sequences the kernel driver has okayed for us. This allows + * the driver to do things like prevent initiator or target + * operations. + */ + SCSISEQ_TEMPLATE { + size 1 + field ENSELO 0x40 + field ENSELI 0x20 + field ENRSELI 0x10 + field ENAUTOATNO 0x08 + field ENAUTOATNI 0x04 + field ENAUTOATNP 0x02 + dont_generate_debug_code + } +} + +scratch_ram { + address 0x056 + size 4 + /* + * These scratch ram locations are initialized by the 274X BIOS. + * We reuse them after capturing the BIOS settings during + * initialization. + */ + + /* + * The initiator specified tag for this target mode transaction. + */ + HA_274_BIOSGLOBAL { + size 1 + field HA_274_EXTENDED_TRANS 0x01 + alias INITIATOR_TAG + count 1 + dont_generate_debug_code + } + + SEQ_FLAGS2 { + size 1 + field SCB_DMA 0x01 + field TARGET_MSG_PENDING 0x02 + dont_generate_debug_code + } +} + +scratch_ram { + address 0x05a + size 6 + /* + * These are reserved registers in the card's scratch ram on the 2742. + * The EISA configuration chip is mapped here. On Rev E. of the + * aic7770, the sequencer can use this area for scratch, but the + * host cannot directly access these registers. On later chips, this + * area can be read and written by both the host and the sequencer. + * Even on later chips, many of these locations are initialized by + * the BIOS. + */ + SCSICONF { + size 1 + count 12 + field TERM_ENB 0x80 + field RESET_SCSI 0x40 + field ENSPCHK 0x20 + mask HSCSIID 0x07 /* our SCSI ID */ + mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */ + dont_generate_debug_code + } + INTDEF { + address 0x05c + size 1 + count 1 + field EDGE_TRIG 0x80 + mask VECTOR 0x0f + dont_generate_debug_code + } + HOSTCONF { + address 0x05d + size 1 + count 1 + dont_generate_debug_code + } + HA_274_BIOSCTRL { + address 0x05f + size 1 + count 1 + mask BIOSMODE 0x30 + mask BIOSDISABLED 0x30 + field CHANNEL_B_PRIMARY 0x08 + dont_generate_debug_code + } +} + +scratch_ram { + address 0x070 + size 16 + + /* + * Per target SCSI offset values for Ultra2 controllers. + */ + TARG_OFFSET { + size 16 + count 1 + dont_generate_debug_code + } +} + +const TID_SHIFT 4 +const SCB_LIST_NULL 0xff +const TARGET_CMD_CMPLT 0xfe + +const CCSGADDR_MAX 0x80 +const CCSGRAM_MAXSEGS 16 + +/* WDTR Message values */ +const BUS_8_BIT 0x00 +const BUS_16_BIT 0x01 +const BUS_32_BIT 0x02 + +/* Offset maximums */ +const MAX_OFFSET_8BIT 0x0f +const MAX_OFFSET_16BIT 0x08 +const MAX_OFFSET_ULTRA2 0x7f +const MAX_OFFSET 0x7f +const HOST_MSG 0xff + +/* Target mode command processing constants */ +const CMD_GROUP_CODE_SHIFT 0x05 + +const STATUS_BUSY 0x08 +const STATUS_QUEUE_FULL 0x28 +const TARGET_DATA_IN 1 + +/* + * Downloaded (kernel inserted) constants + */ +/* Offsets into the SCBID array where different data is stored */ +const QOUTFIFO_OFFSET download +const QINFIFO_OFFSET download +const CACHESIZE_MASK download +const INVERTED_CACHESIZE_MASK download +const SG_PREFETCH_CNT download +const SG_PREFETCH_ALIGN_MASK download +const SG_PREFETCH_ADDR_MASK download diff --git a/drivers/scsi/aic7xxx/aic7xxx.seq b/drivers/scsi/aic7xxx/aic7xxx.seq new file mode 100644 index 000000000..e60041e8f --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx.seq @@ -0,0 +1,2399 @@ +/* + * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ + +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $" +PATCH_ARG_LIST = "struct ahc_softc *ahc" +PREFIX = "ahc_" + +#include "aic7xxx.reg" +#include "scsi_message.h" + +/* + * A few words on the waiting SCB list: + * After starting the selection hardware, we check for reconnecting targets + * as well as for our selection to complete just in case the reselection wins + * bus arbitration. The problem with this is that we must keep track of the + * SCB that we've already pulled from the QINFIFO and started the selection + * on just in case the reselection wins so that we can retry the selection at + * a later time. This problem cannot be resolved by holding a single entry + * in scratch ram since a reconnecting target can request sense and this will + * create yet another SCB waiting for selection. The solution used here is to + * use byte 27 of the SCB as a pseudo-next pointer and to thread a list + * of SCBs that are awaiting selection. Since 0-0xfe are valid SCB indexes, + * SCB_LIST_NULL is 0xff which is out of range. An entry is also added to + * this list every time a request sense occurs or after completing a non-tagged + * command for which a second SCB has been queued. The sequencer will + * automatically consume the entries. + */ + +bus_free_sel: + /* + * Turn off the selection hardware. We need to reset the + * selection request in order to perform a new selection. + */ + and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP; + and SIMODE1, ~ENBUSFREE; +poll_for_work: + call clear_target_state; + and SXFRCTL0, ~SPIOEN; + if ((ahc->features & AHC_ULTRA2) != 0) { + clr SCSIBUSL; + } + test SCSISEQ, ENSELO jnz poll_for_selection; + if ((ahc->features & AHC_TWIN) != 0) { + xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ + test SCSISEQ, ENSELO jnz poll_for_selection; + } + cmp WAITING_SCBH,SCB_LIST_NULL jne start_waiting; +poll_for_work_loop: + if ((ahc->features & AHC_TWIN) != 0) { + xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ + } + test SSTAT0, SELDO|SELDI jnz selection; +test_queue: + /* Has the driver posted any work for us? */ +BEGIN_CRITICAL; + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + test QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop; + } else { + mov A, QINPOS; + cmp KERNEL_QINPOS, A je poll_for_work_loop; + } + mov ARG_1, NEXT_QUEUED_SCB; + + /* + * We have at least one queued SCB now and we don't have any + * SCBs in the list of SCBs awaiting selection. Allocate a + * card SCB for the host's SCB and get to work on it. + */ + if ((ahc->flags & AHC_PAGESCBS) != 0) { + mov ALLZEROS call get_free_or_disc_scb; + } else { + /* In the non-paging case, the SCBID == hardware SCB index */ + mov SCBPTR, ARG_1; + } + or SEQ_FLAGS2, SCB_DMA; +END_CRITICAL; +dma_queued_scb: + /* + * DMA the SCB from host ram into the current SCB location. + */ + mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; + mov ARG_1 call dma_scb; + /* + * Check one last time to see if this SCB was canceled + * before we completed the DMA operation. If it was, + * the QINFIFO next pointer will not match our saved + * value. + */ + mov A, ARG_1; +BEGIN_CRITICAL; + cmp NEXT_QUEUED_SCB, A jne abort_qinscb; + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + cmp SCB_TAG, A je . + 2; + mvi SCB_MISMATCH call set_seqint; + } + mov NEXT_QUEUED_SCB, SCB_NEXT; + mov SCB_NEXT,WAITING_SCBH; + mov WAITING_SCBH, SCBPTR; + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + mov NONE, SNSCB_QOFF; + } else { + inc QINPOS; + } + and SEQ_FLAGS2, ~SCB_DMA; +END_CRITICAL; +start_waiting: + /* + * Start the first entry on the waiting SCB list. + */ + mov SCBPTR, WAITING_SCBH; + call start_selection; + +poll_for_selection: + /* + * Twin channel devices cannot handle things like SELTO + * interrupts on the "background" channel. So, while + * selecting, keep polling the current channel until + * either a selection or reselection occurs. + */ + test SSTAT0, SELDO|SELDI jz poll_for_selection; + +selection: + /* + * We aren't expecting a bus free, so interrupt + * the kernel driver if it happens. + */ + mvi CLRSINT1,CLRBUSFREE; + if ((ahc->features & AHC_DT) == 0) { + or SIMODE1, ENBUSFREE; + } + + /* + * Guard against a bus free after (re)selection + * but prior to enabling the busfree interrupt. SELDI + * and SELDO will be cleared in that case. + */ + test SSTAT0, SELDI|SELDO jz bus_free_sel; + test SSTAT0,SELDO jnz select_out; +select_in: + if ((ahc->flags & AHC_TARGETROLE) != 0) { + if ((ahc->flags & AHC_INITIATORROLE) != 0) { + test SSTAT0, TARGET jz initiator_reselect; + } + mvi CLRSINT0, CLRSELDI; + + /* + * We've just been selected. Assert BSY and + * setup the phase for receiving messages + * from the target. + */ + mvi SCSISIGO, P_MESGOUT|BSYO; + + /* + * Setup the DMA for sending the identify and + * command information. + */ + mvi SEQ_FLAGS, CMDPHASE_PENDING; + + mov A, TQINPOS; + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mvi DINDEX, CCHADDR; + mvi SHARED_DATA_ADDR call set_32byte_addr; + mvi CCSCBCTL, CCSCBRESET; + } else { + mvi DINDEX, HADDR; + mvi SHARED_DATA_ADDR call set_32byte_addr; + mvi DFCNTRL, FIFORESET; + } + + /* Initiator that selected us */ + and SAVED_SCSIID, SELID_MASK, SELID; + /* The Target ID we were selected at */ + if ((ahc->features & AHC_MULTI_TID) != 0) { + and A, OID, TARGIDIN; + } else if ((ahc->features & AHC_ULTRA2) != 0) { + and A, OID, SCSIID_ULTRA2; + } else { + and A, OID, SCSIID; + } + or SAVED_SCSIID, A; + if ((ahc->features & AHC_TWIN) != 0) { + test SBLKCTL, SELBUSB jz . + 2; + or SAVED_SCSIID, TWIN_CHNLB; + } + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mov CCSCBRAM, SAVED_SCSIID; + } else { + mov DFDAT, SAVED_SCSIID; + } + + /* + * If ATN isn't asserted, the target isn't interested + * in talking to us. Go directly to bus free. + * XXX SCSI-1 may require us to assume lun 0 if + * ATN is false. + */ + test SCSISIGI, ATNI jz target_busfree; + + /* + * Watch ATN closely now as we pull in messages from the + * initiator. We follow the guidlines from section 6.5 + * of the SCSI-2 spec for what messages are allowed when. + */ + call target_inb; + + /* + * Our first message must be one of IDENTIFY, ABORT, or + * BUS_DEVICE_RESET. + */ + test DINDEX, MSG_IDENTIFYFLAG jz host_target_message_loop; + /* Store for host */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mov CCSCBRAM, DINDEX; + } else { + mov DFDAT, DINDEX; + } + and SAVED_LUN, MSG_IDENTIFY_LUNMASK, DINDEX; + + /* Remember for disconnection decision */ + test DINDEX, MSG_IDENTIFY_DISCFLAG jnz . + 2; + /* XXX Honor per target settings too */ + or SEQ_FLAGS, NO_DISCONNECT; + + test SCSISIGI, ATNI jz ident_messages_done; + call target_inb; + /* + * If this is a tagged request, the tagged message must + * immediately follow the identify. We test for a valid + * tag message by seeing if it is >= MSG_SIMPLE_Q_TAG and + * < MSG_IGN_WIDE_RESIDUE. + */ + add A, -MSG_SIMPLE_Q_TAG, DINDEX; + jnc ident_messages_done_msg_pending; + add A, -MSG_IGN_WIDE_RESIDUE, DINDEX; + jc ident_messages_done_msg_pending; + + /* Store for host */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mov CCSCBRAM, DINDEX; + } else { + mov DFDAT, DINDEX; + } + + /* + * If the initiator doesn't feel like providing a tag number, + * we've got a failed selection and must transition to bus + * free. + */ + test SCSISIGI, ATNI jz target_busfree; + + /* + * Store the tag for the host. + */ + call target_inb; + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mov CCSCBRAM, DINDEX; + } else { + mov DFDAT, DINDEX; + } + mov INITIATOR_TAG, DINDEX; + or SEQ_FLAGS, TARGET_CMD_IS_TAGGED; + +ident_messages_done: + /* Terminate the ident list */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mvi CCSCBRAM, SCB_LIST_NULL; + } else { + mvi DFDAT, SCB_LIST_NULL; + } + or SEQ_FLAGS, TARG_CMD_PENDING; + test SEQ_FLAGS2, TARGET_MSG_PENDING + jnz target_mesgout_pending; + test SCSISIGI, ATNI jnz target_mesgout_continue; + jmp target_ITloop; + + +ident_messages_done_msg_pending: + or SEQ_FLAGS2, TARGET_MSG_PENDING; + jmp ident_messages_done; + + /* + * Pushed message loop to allow the kernel to + * run it's own target mode message state engine. + */ +host_target_message_loop: + mvi HOST_MSG_LOOP call set_seqint; + cmp RETURN_1, EXIT_MSG_LOOP je target_ITloop; + test SSTAT0, SPIORDY jz .; + jmp host_target_message_loop; + } + +if ((ahc->flags & AHC_INITIATORROLE) != 0) { +/* + * Reselection has been initiated by a target. Make a note that we've been + * reselected, but haven't seen an IDENTIFY message from the target yet. + */ +initiator_reselect: + /* XXX test for and handle ONE BIT condition */ + or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; + and SAVED_SCSIID, SELID_MASK, SELID; + if ((ahc->features & AHC_ULTRA2) != 0) { + and A, OID, SCSIID_ULTRA2; + } else { + and A, OID, SCSIID; + } + or SAVED_SCSIID, A; + if ((ahc->features & AHC_TWIN) != 0) { + test SBLKCTL, SELBUSB jz . + 2; + or SAVED_SCSIID, TWIN_CHNLB; + } + mvi CLRSINT0, CLRSELDI; + jmp ITloop; +} + +abort_qinscb: + call add_scb_to_free_list; + jmp poll_for_work_loop; + +start_selection: + /* + * If bus reset interrupts have been disabled (from a previous + * reset), re-enable them now. Resets are only of interest + * when we have outstanding transactions, so we can safely + * defer re-enabling the interrupt until, as an initiator, + * we start sending out transactions again. + */ + test SIMODE1, ENSCSIRST jnz . + 3; + mvi CLRSINT1, CLRSCSIRSTI; + or SIMODE1, ENSCSIRST; + if ((ahc->features & AHC_TWIN) != 0) { + and SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */ + test SCB_SCSIID, TWIN_CHNLB jz . + 2; + or SINDEX, SELBUSB; + mov SBLKCTL,SINDEX; /* select channel */ + } +initialize_scsiid: + if ((ahc->features & AHC_ULTRA2) != 0) { + mov SCSIID_ULTRA2, SCB_SCSIID; + } else if ((ahc->features & AHC_TWIN) != 0) { + and SCSIID, TWIN_TID|OID, SCB_SCSIID; + } else { + mov SCSIID, SCB_SCSIID; + } + if ((ahc->flags & AHC_TARGETROLE) != 0) { + mov SINDEX, SCSISEQ_TEMPLATE; + test SCB_CONTROL, TARGET_SCB jz . + 2; + or SINDEX, TEMODE; + mov SCSISEQ, SINDEX ret; + } else { + mov SCSISEQ, SCSISEQ_TEMPLATE ret; + } + +/* + * Initialize transfer settings with SCB provided settings. + */ +set_transfer_settings: + if ((ahc->features & AHC_ULTRA) != 0) { + test SCB_CONTROL, ULTRAENB jz . + 2; + or SXFRCTL0, FAST20; + } + /* + * Initialize SCSIRATE with the appropriate value for this target. + */ + if ((ahc->features & AHC_ULTRA2) != 0) { + bmov SCSIRATE, SCB_SCSIRATE, 2 ret; + } else { + mov SCSIRATE, SCB_SCSIRATE ret; + } + +if ((ahc->flags & AHC_TARGETROLE) != 0) { +/* + * We carefully toggle SPIOEN to allow us to return the + * message byte we receive so it can be checked prior to + * driving REQ on the bus for the next byte. + */ +target_inb: + /* + * Drive REQ on the bus by enabling SCSI PIO. + */ + or SXFRCTL0, SPIOEN; + /* Wait for the byte */ + test SSTAT0, SPIORDY jz .; + /* Prevent our read from triggering another REQ */ + and SXFRCTL0, ~SPIOEN; + /* Save latched contents */ + mov DINDEX, SCSIDATL ret; +} + +/* + * After the selection, remove this SCB from the "waiting SCB" + * list. This is achieved by simply moving our "next" pointer into + * WAITING_SCBH. Our next pointer will be set to null the next time this + * SCB is used, so don't bother with it now. + */ +select_out: + /* Turn off the selection hardware */ + and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP, SCSISEQ; + mov SCBPTR, WAITING_SCBH; + mov WAITING_SCBH,SCB_NEXT; + mov SAVED_SCSIID, SCB_SCSIID; + and SAVED_LUN, LID, SCB_LUN; + call set_transfer_settings; + if ((ahc->flags & AHC_TARGETROLE) != 0) { + test SSTAT0, TARGET jz initiator_select; + + or SXFRCTL0, CLRSTCNT|CLRCHN; + + /* + * Put tag in connonical location since not + * all connections have an SCB. + */ + mov INITIATOR_TAG, SCB_TARGET_ITAG; + + /* + * We've just re-selected an initiator. + * Assert BSY and setup the phase for + * sending our identify messages. + */ + mvi P_MESGIN|BSYO call change_phase; + mvi CLRSINT0, CLRSELDO; + + /* + * Start out with a simple identify message. + */ + or SAVED_LUN, MSG_IDENTIFYFLAG call target_outb; + + /* + * If we are the result of a tagged command, send + * a simple Q tag and the tag id. + */ + test SCB_CONTROL, TAG_ENB jz . + 3; + mvi MSG_SIMPLE_Q_TAG call target_outb; + mov SCB_TARGET_ITAG call target_outb; +target_synccmd: + /* + * Now determine what phases the host wants us + * to go through. + */ + mov SEQ_FLAGS, SCB_TARGET_PHASES; + + test SCB_CONTROL, MK_MESSAGE jz target_ITloop; + mvi P_MESGIN|BSYO call change_phase; + jmp host_target_message_loop; +target_ITloop: + /* + * Start honoring ATN signals now that + * we properly identified ourselves. + */ + test SCSISIGI, ATNI jnz target_mesgout; + test SEQ_FLAGS, CMDPHASE_PENDING jnz target_cmdphase; + test SEQ_FLAGS, DPHASE_PENDING jnz target_dphase; + test SEQ_FLAGS, SPHASE_PENDING jnz target_sphase; + + /* + * No more work to do. Either disconnect or not depending + * on the state of NO_DISCONNECT. + */ + test SEQ_FLAGS, NO_DISCONNECT jz target_disconnect; + mvi TARG_IMMEDIATE_SCB, SCB_LIST_NULL; + call complete_target_cmd; + if ((ahc->flags & AHC_PAGESCBS) != 0) { + mov ALLZEROS call get_free_or_disc_scb; + } + cmp TARG_IMMEDIATE_SCB, SCB_LIST_NULL je .; + mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; + mov TARG_IMMEDIATE_SCB call dma_scb; + call set_transfer_settings; + or SXFRCTL0, CLRSTCNT|CLRCHN; + jmp target_synccmd; + +target_mesgout: + mvi SCSISIGO, P_MESGOUT|BSYO; +target_mesgout_continue: + call target_inb; +target_mesgout_pending: + and SEQ_FLAGS2, ~TARGET_MSG_PENDING; + /* Local Processing goes here... */ + jmp host_target_message_loop; + +target_disconnect: + mvi P_MESGIN|BSYO call change_phase; + test SEQ_FLAGS, DPHASE jz . + 2; + mvi MSG_SAVEDATAPOINTER call target_outb; + mvi MSG_DISCONNECT call target_outb; + +target_busfree_wait: + /* Wait for preceding I/O session to complete. */ + test SCSISIGI, ACKI jnz .; +target_busfree: + and SIMODE1, ~ENBUSFREE; + if ((ahc->features & AHC_ULTRA2) != 0) { + clr SCSIBUSL; + } + clr SCSISIGO; + mvi LASTPHASE, P_BUSFREE; + call complete_target_cmd; + jmp poll_for_work; + +target_cmdphase: + /* + * The target has dropped ATN (doesn't want to abort or BDR) + * and we believe this selection to be valid. If the ring + * buffer for new commands is full, return busy or queue full. + */ + if ((ahc->features & AHC_HS_MAILBOX) != 0) { + and A, HOST_TQINPOS, HS_MAILBOX; + } else { + mov A, KERNEL_TQINPOS; + } + cmp TQINPOS, A jne tqinfifo_has_space; + mvi P_STATUS|BSYO call change_phase; + test SEQ_FLAGS, TARGET_CMD_IS_TAGGED jz . + 3; + mvi STATUS_QUEUE_FULL call target_outb; + jmp target_busfree_wait; + mvi STATUS_BUSY call target_outb; + jmp target_busfree_wait; +tqinfifo_has_space: + mvi P_COMMAND|BSYO call change_phase; + call target_inb; + mov A, DINDEX; + /* Store for host */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mov CCSCBRAM, A; + } else { + mov DFDAT, A; + } + + /* + * Determine the number of bytes to read + * based on the command group code via table lookup. + * We reuse the first 8 bytes of the TARG_SCSIRATE + * BIOS array for this table. Count is one less than + * the total for the command since we've already fetched + * the first byte. + */ + shr A, CMD_GROUP_CODE_SHIFT; + add SINDEX, CMDSIZE_TABLE, A; + mov A, SINDIR; + + test A, 0xFF jz command_phase_done; + or SXFRCTL0, SPIOEN; +command_loop: + test SSTAT0, SPIORDY jz .; + cmp A, 1 jne . + 2; + and SXFRCTL0, ~SPIOEN; /* Last Byte */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mov CCSCBRAM, SCSIDATL; + } else { + mov DFDAT, SCSIDATL; + } + dec A; + test A, 0xFF jnz command_loop; + +command_phase_done: + and SEQ_FLAGS, ~CMDPHASE_PENDING; + jmp target_ITloop; + +target_dphase: + /* + * Data phases on the bus are from the + * perspective of the initiator. The dma + * code looks at LASTPHASE to determine the + * data direction of the DMA. Toggle it for + * target transfers. + */ + xor LASTPHASE, IOI, SCB_TARGET_DATA_DIR; + or SCB_TARGET_DATA_DIR, BSYO call change_phase; + jmp p_data; + +target_sphase: + mvi P_STATUS|BSYO call change_phase; + mvi LASTPHASE, P_STATUS; + mov SCB_SCSI_STATUS call target_outb; + /* XXX Watch for ATN or parity errors??? */ + mvi SCSISIGO, P_MESGIN|BSYO; + /* MSG_CMDCMPLT is 0, but we can't do an immediate of 0 */ + mov ALLZEROS call target_outb; + jmp target_busfree_wait; + +complete_target_cmd: + test SEQ_FLAGS, TARG_CMD_PENDING jnz . + 2; + mov SCB_TAG jmp complete_post; + if ((ahc->features & AHC_CMD_CHAN) != 0) { + /* Set the valid byte */ + mvi CCSCBADDR, 24; + mov CCSCBRAM, ALLONES; + mvi CCHCNT, 28; + or CCSCBCTL, CCSCBEN|CCSCBRESET; + test CCSCBCTL, CCSCBDONE jz .; + clr CCSCBCTL; + } else { + /* Set the valid byte */ + or DFCNTRL, FIFORESET; + mvi DFWADDR, 3; /* Third 64bit word or byte 24 */ + mov DFDAT, ALLONES; + mvi 28 call set_hcnt; + or DFCNTRL, HDMAEN|FIFOFLUSH; + call dma_finish; + } + inc TQINPOS; + mvi INTSTAT,CMDCMPLT ret; + } + +if ((ahc->flags & AHC_INITIATORROLE) != 0) { +initiator_select: + or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; + /* + * As soon as we get a successful selection, the target + * should go into the message out phase since we have ATN + * asserted. + */ + mvi MSG_OUT, MSG_IDENTIFYFLAG; + mvi SEQ_FLAGS, NO_CDB_SENT; + mvi CLRSINT0, CLRSELDO; + + /* + * Main loop for information transfer phases. Wait for the + * target to assert REQ before checking MSG, C/D and I/O for + * the bus phase. + */ +mesgin_phasemis: +ITloop: + call phase_lock; + + mov A, LASTPHASE; + + test A, ~P_DATAIN jz p_data; + cmp A,P_COMMAND je p_command; + cmp A,P_MESGOUT je p_mesgout; + cmp A,P_STATUS je p_status; + cmp A,P_MESGIN je p_mesgin; + + mvi BAD_PHASE call set_seqint; + jmp ITloop; /* Try reading the bus again. */ + +await_busfree: + and SIMODE1, ~ENBUSFREE; + mov NONE, SCSIDATL; /* Ack the last byte */ + if ((ahc->features & AHC_ULTRA2) != 0) { + clr SCSIBUSL; /* Prevent bit leakage durint SELTO */ + } + and SXFRCTL0, ~SPIOEN; + mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT; + test SSTAT1,REQINIT|BUSFREE jz .; + test SSTAT1, BUSFREE jnz poll_for_work; + mvi MISSED_BUSFREE call set_seqint; +} + +clear_target_state: + /* + * We assume that the kernel driver may reset us + * at any time, even in the middle of a DMA, so + * clear DFCNTRL too. + */ + clr DFCNTRL; + or SXFRCTL0, CLRSTCNT|CLRCHN; + + /* + * We don't know the target we will connect to, + * so default to narrow transfers to avoid + * parity problems. + */ + if ((ahc->features & AHC_ULTRA2) != 0) { + bmov SCSIRATE, ALLZEROS, 2; + } else { + clr SCSIRATE; + if ((ahc->features & AHC_ULTRA) != 0) { + and SXFRCTL0, ~(FAST20); + } + } + mvi LASTPHASE, P_BUSFREE; + /* clear target specific flags */ + mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret; + +sg_advance: + clr A; /* add sizeof(struct scatter) */ + add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; + adc SCB_RESIDUAL_SGPTR[1],A; + adc SCB_RESIDUAL_SGPTR[2],A; + adc SCB_RESIDUAL_SGPTR[3],A ret; + +if ((ahc->features & AHC_CMD_CHAN) != 0) { +disable_ccsgen: + test CCSGCTL, CCSGEN jz return; + test CCSGCTL, CCSGDONE jz .; +disable_ccsgen_fetch_done: + clr CCSGCTL; + test CCSGCTL, CCSGEN jnz .; + ret; +idle_loop: + /* + * Do we need any more segments for this transfer? + */ + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz return; + + /* Did we just finish fetching segs? */ + cmp CCSGCTL, CCSGEN|CCSGDONE je idle_sgfetch_complete; + + /* Are we actively fetching segments? */ + test CCSGCTL, CCSGEN jnz return; + + /* + * Do we have any prefetch left??? + */ + cmp CCSGADDR, SG_PREFETCH_CNT jne idle_sg_avail; + + /* + * Need to fetch segments, but we can only do that + * if the command channel is completely idle. Make + * sure we don't have an SCB prefetch going on. + */ + test CCSCBCTL, CCSCBEN jnz return; + + /* + * We fetch a "cacheline aligned" and sized amount of data + * so we don't end up referencing a non-existent page. + * Cacheline aligned is in quotes because the kernel will + * set the prefetch amount to a reasonable level if the + * cacheline size is unknown. + */ + mvi CCHCNT, SG_PREFETCH_CNT; + and CCHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; + bmov CCHADDR[1], SCB_RESIDUAL_SGPTR[1], 3; + mvi CCSGCTL, CCSGEN|CCSGRESET ret; +idle_sgfetch_complete: + call disable_ccsgen_fetch_done; + and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; +idle_sg_avail: + if ((ahc->features & AHC_ULTRA2) != 0) { + /* Does the hardware have space for another SG entry? */ + test DFSTATUS, PRELOAD_AVAIL jz return; + bmov HADDR, CCSGRAM, 7; + bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; + } + call sg_advance; + mov SINDEX, SCB_RESIDUAL_SGPTR[0]; + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; + or SINDEX, LAST_SEG; + mov SG_CACHE_PRE, SINDEX; + /* Load the segment */ + or DFCNTRL, PRELOADEN; + } + ret; +} + +if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { +/* + * Calculate the trailing portion of this S/G segment that cannot + * be transferred using memory write and invalidate PCI transactions. + * XXX Can we optimize this for PCI writes only??? + */ +calc_mwi_residual: + /* + * If the ending address is on a cacheline boundary, + * there is no need for an extra segment. + */ + mov A, HCNT[0]; + add A, A, HADDR[0]; + and A, CACHESIZE_MASK; + test A, 0xFF jz return; + + /* + * If the transfer is less than a cachline, + * there is no need for an extra segment. + */ + test HCNT[1], 0xFF jnz calc_mwi_residual_final; + test HCNT[2], 0xFF jnz calc_mwi_residual_final; + add NONE, INVERTED_CACHESIZE_MASK, HCNT[0]; + jnc return; + +calc_mwi_residual_final: + mov MWI_RESIDUAL, A; + not A; + inc A; + add HCNT[0], A; + adc HCNT[1], -1; + adc HCNT[2], -1 ret; +} + +p_data: + test SEQ_FLAGS,NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; + mvi PROTO_VIOLATION call set_seqint; +p_data_allowed: + if ((ahc->features & AHC_ULTRA2) != 0) { + mvi DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN; + } else { + mvi DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET; + } + test LASTPHASE, IOI jnz . + 2; + or DMAPARAMS, DIRECTION; + if ((ahc->features & AHC_CMD_CHAN) != 0) { + /* We don't have any valid S/G elements */ + mvi CCSGADDR, SG_PREFETCH_CNT; + } + test SEQ_FLAGS, DPHASE jz data_phase_initialize; + + /* + * If we re-enter the data phase after going through another + * phase, our transfer location has almost certainly been + * corrupted by the interveining, non-data, transfers. Ask + * the host driver to fix us up based on the transfer residual. + */ + mvi PDATA_REINIT call set_seqint; + jmp data_phase_loop; + +data_phase_initialize: + /* We have seen a data phase for the first time */ + or SEQ_FLAGS, DPHASE; + + /* + * Initialize the DMA address and counter from the SCB. + * Also set SCB_RESIDUAL_SGPTR, including the LAST_SEG + * flag in the highest byte of the data count. We cannot + * modify the saved values in the SCB until we see a save + * data pointers message. + */ + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + /* The lowest address byte must be loaded last. */ + mov SCB_DATACNT[3] call set_hhaddr; + } + if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov HADDR, SCB_DATAPTR, 7; + bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; + } else { + mvi DINDEX, HADDR; + mvi SCB_DATAPTR call bcopy_7; + mvi DINDEX, SCB_RESIDUAL_DATACNT + 3; + mvi SCB_DATACNT + 3 call bcopy_5; + } + if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { + call calc_mwi_residual; + } + and SCB_RESIDUAL_SGPTR[0], ~SG_FULL_RESID; + + if ((ahc->features & AHC_ULTRA2) == 0) { + if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov STCNT, HCNT, 3; + } else { + call set_stcnt_from_hcnt; + } + } + +data_phase_loop: + /* Guard against overruns */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_inbounds; + + /* + * Turn on `Bit Bucket' mode, wait until the target takes + * us to another phase, and then notify the host. + */ + and DMAPARAMS, DIRECTION; + mov DFCNTRL, DMAPARAMS; + or SXFRCTL1,BITBUCKET; + if ((ahc->features & AHC_DT) == 0) { + test SSTAT1,PHASEMIS jz .; + } else { + test SCSIPHASE, DATA_PHASE_MASK jnz .; + } + and SXFRCTL1, ~BITBUCKET; + mvi DATA_OVERRUN call set_seqint; + jmp ITloop; + +data_phase_inbounds: + if ((ahc->features & AHC_ULTRA2) != 0) { + mov SINDEX, SCB_RESIDUAL_SGPTR[0]; + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; + or SINDEX, LAST_SEG; + mov SG_CACHE_PRE, SINDEX; + mov DFCNTRL, DMAPARAMS; +ultra2_dma_loop: + call idle_loop; + /* + * The transfer is complete if either the last segment + * completes or the target changes phase. + */ + test SG_CACHE_SHADOW, LAST_SEG_DONE jnz ultra2_dmafinish; + if ((ahc->features & AHC_DT) == 0) { + if ((ahc->flags & AHC_TARGETROLE) != 0) { + /* + * As a target, we control the phases, + * so ignore PHASEMIS. + */ + test SSTAT0, TARGET jnz ultra2_dma_loop; + } + if ((ahc->flags & AHC_INITIATORROLE) != 0) { + test SSTAT1,PHASEMIS jz ultra2_dma_loop; + } + } else { + test DFCNTRL, SCSIEN jnz ultra2_dma_loop; + } + +ultra2_dmafinish: + /* + * The transfer has terminated either due to a phase + * change, and/or the completion of the last segment. + * We have two goals here. Do as much other work + * as possible while the data fifo drains on a read + * and respond as quickly as possible to the standard + * messages (save data pointers/disconnect and command + * complete) that usually follow a data phase. + */ + if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { + /* + * On chips with broken auto-flush, start + * the flushing process now. We'll poke + * the chip from time to time to keep the + * flush process going as we complete the + * data phase. + */ + or DFCNTRL, FIFOFLUSH; + } + /* + * We assume that, even though data may still be + * transferring to the host, that the SCSI side of + * the DMA engine is now in a static state. This + * allows us to update our notion of where we are + * in this transfer. + * + * If, by chance, we stopped before being able + * to fetch additional segments for this transfer, + * yet the last S/G was completely exhausted, + * call our idle loop until it is able to load + * another segment. This will allow us to immediately + * pickup on the next segment on the next data phase. + * + * If we happened to stop on the last segment, then + * our residual information is still correct from + * the idle loop and there is no need to perform + * any fixups. + */ +ultra2_ensure_sg: + test SG_CACHE_SHADOW, LAST_SEG jz ultra2_shvalid; + /* Record if we've consumed all S/G entries */ + test SSTAT2, SHVALID jnz residuals_correct; + or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; + jmp residuals_correct; + +ultra2_shvalid: + test SSTAT2, SHVALID jnz sgptr_fixup; + call idle_loop; + jmp ultra2_ensure_sg; + +sgptr_fixup: + /* + * Fixup the residual next S/G pointer. The S/G preload + * feature of the chip allows us to load two elements + * in addition to the currently active element. We + * store the bottom byte of the next S/G pointer in + * the SG_CACEPTR register so we can restore the + * correct value when the DMA completes. If the next + * sg ptr value has advanced to the point where higher + * bytes in the address have been affected, fix them + * too. + */ + test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; + test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; + add SCB_RESIDUAL_SGPTR[1], -1; + adc SCB_RESIDUAL_SGPTR[2], -1; + adc SCB_RESIDUAL_SGPTR[3], -1; +sgptr_fixup_done: + and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; + /* We are not the last seg */ + and SCB_RESIDUAL_DATACNT[3], ~SG_LAST_SEG; +residuals_correct: + /* + * Go ahead and shut down the DMA engine now. + * In the future, we'll want to handle end of + * transfer messages prior to doing this, but this + * requires similar restructuring for pre-ULTRA2 + * controllers. + */ + test DMAPARAMS, DIRECTION jnz ultra2_fifoempty; +ultra2_fifoflush: + if ((ahc->features & AHC_DT) == 0) { + if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { + /* + * On Rev A of the aic7890, the autoflush + * feature doesn't function correctly. + * Perform an explicit manual flush. During + * a manual flush, the FIFOEMP bit becomes + * true every time the PCI FIFO empties + * regardless of the state of the SCSI FIFO. + * It can take up to 4 clock cycles for the + * SCSI FIFO to get data into the PCI FIFO + * and for FIFOEMP to de-assert. Here we + * guard against this condition by making + * sure the FIFOEMP bit stays on for 5 full + * clock cycles. + */ + or DFCNTRL, FIFOFLUSH; + test DFSTATUS, FIFOEMP jz ultra2_fifoflush; + test DFSTATUS, FIFOEMP jz ultra2_fifoflush; + test DFSTATUS, FIFOEMP jz ultra2_fifoflush; + test DFSTATUS, FIFOEMP jz ultra2_fifoflush; + } + test DFSTATUS, FIFOEMP jz ultra2_fifoflush; + } else { + /* + * We enable the auto-ack feature on DT capable + * controllers. This means that the controller may + * have already transferred some overrun bytes into + * the data FIFO and acked them on the bus. The only + * way to detect this situation is to wait for + * LAST_SEG_DONE to come true on a completed transfer + * and then test to see if the data FIFO is non-empty. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL + jz ultra2_wait_fifoemp; + test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; + /* + * FIFOEMP can lag LAST_SEG_DONE. Wait a few + * clocks before calling this an overrun. + */ + test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; + test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; + test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; + /* Overrun */ + jmp data_phase_loop; +ultra2_wait_fifoemp: + test DFSTATUS, FIFOEMP jz .; + } +ultra2_fifoempty: + /* Don't clobber an inprogress host data transfer */ + test DFSTATUS, MREQPEND jnz ultra2_fifoempty; +ultra2_dmahalt: + and DFCNTRL, ~(SCSIEN|HDMAEN); + test DFCNTRL, SCSIEN|HDMAEN jnz .; + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + /* + * Keep HHADDR cleared for future, 32bit addressed + * only, DMA operations. + * + * Due to bayonette style S/G handling, our residual + * data must be "fixed up" once the transfer is halted. + * Here we fixup the HSHADDR stored in the high byte + * of the residual data cnt. By postponing the fixup, + * we can batch the clearing of HADDR with the fixup. + * If we halted on the last segment, the residual is + * already correct. If we are not on the last + * segment, copy the high address directly from HSHADDR. + * We don't need to worry about maintaining the + * SG_LAST_SEG flag as it will always be false in the + * case where an update is required. + */ + or DSCOMMAND1, HADDLDSEL0; + test SG_CACHE_SHADOW, LAST_SEG jnz . + 2; + mov SCB_RESIDUAL_DATACNT[3], SHADDR; + clr HADDR; + and DSCOMMAND1, ~HADDLDSEL0; + } + } else { + /* If we are the last SG block, tell the hardware. */ + if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 + && ahc->pci_cachesize != 0) { + test MWI_RESIDUAL, 0xFF jnz dma_mid_sg; + } + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg; + if ((ahc->flags & AHC_TARGETROLE) != 0) { + test SSTAT0, TARGET jz dma_last_sg; + if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) { + test DMAPARAMS, DIRECTION jz dma_mid_sg; + } + } +dma_last_sg: + and DMAPARAMS, ~WIDEODD; +dma_mid_sg: + /* Start DMA data transfer. */ + mov DFCNTRL, DMAPARAMS; +dma_loop: + if ((ahc->features & AHC_CMD_CHAN) != 0) { + call idle_loop; + } + test SSTAT0,DMADONE jnz dma_dmadone; + test SSTAT1,PHASEMIS jz dma_loop; /* ie. underrun */ +dma_phasemis: + /* + * We will be "done" DMAing when the transfer count goes to + * zero, or the target changes the phase (in light of this, + * it makes sense that the DMA circuitry doesn't ACK when + * PHASEMIS is active). If we are doing a SCSI->Host transfer, + * the data FIFO should be flushed auto-magically on STCNT=0 + * or a phase change, so just wait for FIFO empty status. + */ +dma_checkfifo: + test DFCNTRL,DIRECTION jnz dma_fifoempty; +dma_fifoflush: + test DFSTATUS,FIFOEMP jz dma_fifoflush; +dma_fifoempty: + /* Don't clobber an inprogress host data transfer */ + test DFSTATUS, MREQPEND jnz dma_fifoempty; + + /* + * Now shut off the DMA and make sure that the DMA + * hardware has actually stopped. Touching the DMA + * counters, etc. while a DMA is active will result + * in an ILLSADDR exception. + */ +dma_dmadone: + and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); +dma_halt: + /* + * Some revisions of the aic78XX have a problem where, if the + * data fifo is full, but the PCI input latch is not empty, + * HDMAEN cannot be cleared. The fix used here is to drain + * the prefetched but unused data from the data fifo until + * there is space for the input latch to drain. + */ + if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { + mov NONE, DFDAT; + } + test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt; + + /* See if we have completed this last segment */ + test STCNT[0], 0xff jnz data_phase_finish; + test STCNT[1], 0xff jnz data_phase_finish; + test STCNT[2], 0xff jnz data_phase_finish; + + /* + * Advance the scatter-gather pointers if needed + */ + if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 + && ahc->pci_cachesize != 0) { + test MWI_RESIDUAL, 0xFF jz no_mwi_resid; + /* + * Reload HADDR from SHADDR and setup the + * count to be the size of our residual. + */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov HADDR, SHADDR, 4; + mov HCNT, MWI_RESIDUAL; + bmov HCNT[1], ALLZEROS, 2; + } else { + mvi DINDEX, HADDR; + mvi SHADDR call bcopy_4; + mov MWI_RESIDUAL call set_hcnt; + } + clr MWI_RESIDUAL; + jmp sg_load_done; +no_mwi_resid: + } + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz sg_load; + or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; + jmp data_phase_finish; +sg_load: + /* + * Load the next SG element's data address and length + * into the DMA engine. If we don't have hardware + * to perform a prefetch, we'll have to fetch the + * segment from host memory first. + */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + /* Wait for the idle loop to complete */ + test CCSGCTL, CCSGEN jz . + 3; + call idle_loop; + test CCSGCTL, CCSGEN jnz . - 1; + bmov HADDR, CCSGRAM, 7; + /* + * Workaround for flaky external SCB RAM + * on certain aic7895 setups. It seems + * unable to handle direct transfers from + * S/G ram to certain SCB locations. + */ + mov SINDEX, CCSGRAM; + mov SCB_RESIDUAL_DATACNT[3], SINDEX; + } else { + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + mov ALLZEROS call set_hhaddr; + } + mvi DINDEX, HADDR; + mvi SCB_RESIDUAL_SGPTR call bcopy_4; + + mvi SG_SIZEOF call set_hcnt; + + or DFCNTRL, HDMAEN|DIRECTION|FIFORESET; + + call dma_finish; + + mvi DINDEX, HADDR; + call dfdat_in_7; + mov SCB_RESIDUAL_DATACNT[3], DFDAT; + } + + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; + + /* + * The lowest address byte must be loaded + * last as it triggers the computation of + * some items in the PCI block. The ULTRA2 + * chips do this on PRELOAD. + */ + mov HADDR, HADDR; + } + if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 + && ahc->pci_cachesize != 0) { + call calc_mwi_residual; + } + + /* Point to the new next sg in memory */ + call sg_advance; + +sg_load_done: + if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov STCNT, HCNT, 3; + } else { + call set_stcnt_from_hcnt; + } + + if ((ahc->flags & AHC_TARGETROLE) != 0) { + test SSTAT0, TARGET jnz data_phase_loop; + } + } +data_phase_finish: + /* + * If the target has left us in data phase, loop through + * the dma code again. In the case of ULTRA2 adapters, + * we should only loop if there is a data overrun. For + * all other adapters, we'll loop after each S/G element + * is loaded as well as if there is an overrun. + */ + if ((ahc->flags & AHC_TARGETROLE) != 0) { + test SSTAT0, TARGET jnz data_phase_done; + } + if ((ahc->flags & AHC_INITIATORROLE) != 0) { + test SSTAT1, REQINIT jz .; + if ((ahc->features & AHC_DT) == 0) { + test SSTAT1,PHASEMIS jz data_phase_loop; + } else { + test SCSIPHASE, DATA_PHASE_MASK jnz data_phase_loop; + } + } + +data_phase_done: + /* + * After a DMA finishes, save the SG and STCNT residuals back into + * the SCB. We use STCNT instead of HCNT, since it's a reflection + * of how many bytes were transferred on the SCSI (as opposed to the + * host) bus. + */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + /* Kill off any pending prefetch */ + call disable_ccsgen; + } + + if ((ahc->features & AHC_ULTRA2) == 0) { + /* + * Clear the high address byte so that all other DMA + * operations, which use 32bit addressing, can assume + * HHADDR is 0. + */ + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + mov ALLZEROS call set_hhaddr; + } + } + + /* + * Update our residual information before the information is + * lost by some other type of SCSI I/O (e.g. PIO). If we have + * transferred all data, no update is needed. + * + */ + test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jnz residual_update_done; + if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 + && ahc->pci_cachesize != 0) { + if ((ahc->features & AHC_CMD_CHAN) != 0) { + test MWI_RESIDUAL, 0xFF jz bmov_resid; + } + mov A, MWI_RESIDUAL; + add SCB_RESIDUAL_DATACNT[0], A, STCNT[0]; + clr A; + adc SCB_RESIDUAL_DATACNT[1], A, STCNT[1]; + adc SCB_RESIDUAL_DATACNT[2], A, STCNT[2]; + clr MWI_RESIDUAL; + if ((ahc->features & AHC_CMD_CHAN) != 0) { + jmp . + 2; +bmov_resid: + bmov SCB_RESIDUAL_DATACNT, STCNT, 3; + } + } else if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov SCB_RESIDUAL_DATACNT, STCNT, 3; + } else { + mov SCB_RESIDUAL_DATACNT[0], STCNT[0]; + mov SCB_RESIDUAL_DATACNT[1], STCNT[1]; + mov SCB_RESIDUAL_DATACNT[2], STCNT[2]; + } +residual_update_done: + /* + * Since we've been through a data phase, the SCB_RESID* fields + * are now initialized. Clear the full residual flag. + */ + and SCB_SGPTR[0], ~SG_FULL_RESID; + + if ((ahc->features & AHC_ULTRA2) != 0) { + /* Clear the channel in case we return to data phase later */ + or SXFRCTL0, CLRSTCNT|CLRCHN; + or SXFRCTL0, CLRSTCNT|CLRCHN; + } + + if ((ahc->flags & AHC_TARGETROLE) != 0) { + test SEQ_FLAGS, DPHASE_PENDING jz ITloop; + and SEQ_FLAGS, ~DPHASE_PENDING; + /* + * For data-in phases, wait for any pending acks from the + * initiator before changing phase. We only need to + * send Ignore Wide Residue messages for data-in phases. + */ + test DFCNTRL, DIRECTION jz target_ITloop; + test SSTAT1, REQINIT jnz .; + test SCB_LUN, SCB_XFERLEN_ODD jz target_ITloop; + test SCSIRATE, WIDEXFER jz target_ITloop; + /* + * Issue an Ignore Wide Residue Message. + */ + mvi P_MESGIN|BSYO call change_phase; + mvi MSG_IGN_WIDE_RESIDUE call target_outb; + mvi 1 call target_outb; + jmp target_ITloop; + } else { + jmp ITloop; + } + +if ((ahc->flags & AHC_INITIATORROLE) != 0) { +/* + * Command phase. Set up the DMA registers and let 'er rip. + */ +p_command: + test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; + mvi PROTO_VIOLATION call set_seqint; +p_command_okay: + + if ((ahc->features & AHC_ULTRA2) != 0) { + bmov HCNT[0], SCB_CDB_LEN, 1; + bmov HCNT[1], ALLZEROS, 2; + mvi SG_CACHE_PRE, LAST_SEG; + } else if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov STCNT[0], SCB_CDB_LEN, 1; + bmov STCNT[1], ALLZEROS, 2; + } else { + mov STCNT[0], SCB_CDB_LEN; + clr STCNT[1]; + clr STCNT[2]; + } + add NONE, -13, SCB_CDB_LEN; + mvi SCB_CDB_STORE jnc p_command_embedded; +p_command_from_host: + if ((ahc->features & AHC_ULTRA2) != 0) { + bmov HADDR[0], SCB_CDB_PTR, 4; + mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION); + } else { + if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov HADDR[0], SCB_CDB_PTR, 4; + bmov HCNT, STCNT, 3; + } else { + mvi DINDEX, HADDR; + mvi SCB_CDB_PTR call bcopy_4; + mov SCB_CDB_LEN call set_hcnt; + } + mvi DFCNTRL, (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET); + } + jmp p_command_xfer; +p_command_embedded: + /* + * The data fifo seems to require 4 byte aligned + * transfers from the sequencer. Force this to + * be the case by clearing HADDR[0] even though + * we aren't going to touch host memory. + */ + clr HADDR[0]; + if ((ahc->features & AHC_ULTRA2) != 0) { + mvi DFCNTRL, (PRELOADEN|SCSIEN|DIRECTION); + bmov DFDAT, SCB_CDB_STORE, 12; + } else if ((ahc->features & AHC_CMD_CHAN) != 0) { + if ((ahc->flags & AHC_SCB_BTT) != 0) { + /* + * On the 7895 the data FIFO will + * get corrupted if you try to dump + * data from external SCB memory into + * the FIFO while it is enabled. So, + * fill the fifo and then enable SCSI + * transfers. + */ + mvi DFCNTRL, (DIRECTION|FIFORESET); + } else { + mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); + } + bmov DFDAT, SCB_CDB_STORE, 12; + if ((ahc->flags & AHC_SCB_BTT) != 0) { + mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFOFLUSH); + } else { + or DFCNTRL, FIFOFLUSH; + } + } else { + mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); + call copy_to_fifo_6; + call copy_to_fifo_6; + or DFCNTRL, FIFOFLUSH; + } +p_command_xfer: + and SEQ_FLAGS, ~NO_CDB_SENT; + if ((ahc->features & AHC_DT) == 0) { + test SSTAT0, SDONE jnz . + 2; + test SSTAT1, PHASEMIS jz . - 1; + /* + * Wait for our ACK to go-away on it's own + * instead of being killed by SCSIEN getting cleared. + */ + test SCSISIGI, ACKI jnz .; + } else { + test DFCNTRL, SCSIEN jnz .; + } + test SSTAT0, SDONE jnz p_command_successful; + /* + * Don't allow a data phase if the command + * was not fully transferred. + */ + or SEQ_FLAGS, NO_CDB_SENT; +p_command_successful: + and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); + test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz .; + jmp ITloop; + +/* + * Status phase. Wait for the data byte to appear, then read it + * and store it into the SCB. + */ +p_status: + test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; +p_status_okay: + mov SCB_SCSI_STATUS, SCSIDATL; + or SCB_CONTROL, STATUS_RCVD; + jmp ITloop; + +/* + * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full + * indentify message sequence and send it to the target. The host may + * override this behavior by setting the MK_MESSAGE bit in the SCB + * control byte. This will cause us to interrupt the host and allow + * it to handle the message phase completely on its own. If the bit + * associated with this target is set, we will also interrupt the host, + * thereby allowing it to send a message on the next selection regardless + * of the transaction being sent. + * + * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. + * This is done to allow the host to send messages outside of an identify + * sequence while protecting the seqencer from testing the MK_MESSAGE bit + * on an SCB that might not be for the current nexus. (For example, a + * BDR message in response to a bad reselection would leave us pointed to + * an SCB that doesn't have anything to do with the current target). + * + * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, + * bus device reset). + * + * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, + * in case the target decides to put us in this phase for some strange + * reason. + */ +p_mesgout_retry: + /* Turn on ATN for the retry */ + if ((ahc->features & AHC_DT) == 0) { + or SCSISIGO, ATNO, LASTPHASE; + } else { + mvi SCSISIGO, ATNO; + } +p_mesgout: + mov SINDEX, MSG_OUT; + cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; + test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; +p_mesgout_identify: + or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SAVED_LUN; + test SCB_CONTROL, DISCENB jnz . + 2; + and SINDEX, ~DISCENB; +/* + * Send a tag message if TAG_ENB is set in the SCB control block. + * Use SCB_TAG (the position in the kernel's SCB array) as the tag value. + */ +p_mesgout_tag: + test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; + mov SCSIDATL, SINDEX; /* Send the identify message */ + call phase_lock; + cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; + and SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; + call phase_lock; + cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; + mov SCB_TAG jmp p_mesgout_onebyte; +/* + * Interrupt the driver, and allow it to handle this message + * phase and any required retries. + */ +p_mesgout_from_host: + cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; + jmp host_message_loop; + +p_mesgout_onebyte: + mvi CLRSINT1, CLRATNO; + mov SCSIDATL, SINDEX; + +/* + * If the next bus phase after ATN drops is message out, it means + * that the target is requesting that the last message(s) be resent. + */ + call phase_lock; + cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; + +p_mesgout_done: + mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ + mov LAST_MSG, MSG_OUT; + mvi MSG_OUT, MSG_NOOP; /* No message left */ + jmp ITloop; + +/* + * Message in phase. Bytes are read using Automatic PIO mode. + */ +p_mesgin: + mvi ACCUM call inb_first; /* read the 1st message byte */ + + test A,MSG_IDENTIFYFLAG jnz mesgin_identify; + cmp A,MSG_DISCONNECT je mesgin_disconnect; + cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; + cmp ALLZEROS,A je mesgin_complete; + cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; + cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; + cmp A,MSG_NOOP je mesgin_done; + +/* + * Pushed message loop to allow the kernel to + * run it's own message state engine. To avoid an + * extra nop instruction after signaling the kernel, + * we perform the phase_lock before checking to see + * if we should exit the loop and skip the phase_lock + * in the ITloop. Performing back to back phase_locks + * shouldn't hurt, but why do it twice... + */ +host_message_loop: + mvi HOST_MSG_LOOP call set_seqint; + call phase_lock; + cmp RETURN_1, EXIT_MSG_LOOP je ITloop + 1; + jmp host_message_loop; + +mesgin_ign_wide_residue: +if ((ahc->features & AHC_WIDE) != 0) { + test SCSIRATE, WIDEXFER jz mesgin_reject; + /* Pull the residue byte */ + mvi ARG_1 call inb_next; + cmp ARG_1, 0x01 jne mesgin_reject; + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; + test SCB_LUN, SCB_XFERLEN_ODD jnz mesgin_done; + mvi IGN_WIDE_RES call set_seqint; + jmp mesgin_done; +} + +mesgin_proto_violation: + mvi PROTO_VIOLATION call set_seqint; + jmp mesgin_done; +mesgin_reject: + mvi MSG_MESSAGE_REJECT call mk_mesg; +mesgin_done: + mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ + jmp ITloop; + +/* + * We received a "command complete" message. Put the SCB_TAG into the QOUTFIFO, + * and trigger a completion interrupt. Before doing so, check to see if there + * is a residual or the status byte is something other than STATUS_GOOD (0). + * In either of these conditions, we upload the SCB back to the host so it can + * process this information. In the case of a non zero status byte, we + * additionally interrupt the kernel driver synchronously, allowing it to + * decide if sense should be retrieved. If the kernel driver wishes to request + * sense, it will fill the kernel SCB with a request sense command, requeue + * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting + * RETURN_1 to SEND_SENSE. + */ +mesgin_complete: + + /* + * If ATN is raised, we still want to give the target a message. + * Perhaps there was a parity error on this last message byte. + * Either way, the target should take us to message out phase + * and then attempt to complete the command again. We should use a + * critical section here to guard against a timeout triggering + * for this command and setting ATN while we are still processing + * the completion. + test SCSISIGI, ATNI jnz mesgin_done; + */ + + /* + * If we are identified and have successfully sent the CDB, + * any status will do. Optimize this fast path. + */ + test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; + + /* + * If the target never sent an identify message but instead went + * to mesgin to give an invalid message, let the host abort us. + */ + test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; + + /* + * If we recevied good status but never successfully sent the + * cdb, abort the command. + */ + test SCB_SCSI_STATUS,0xff jnz complete_accepted; + test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; + +complete_accepted: + /* + * See if we attempted to deliver a message but the target ingnored us. + */ + test SCB_CONTROL, MK_MESSAGE jz . + 2; + mvi MKMSG_FAILED call set_seqint; + + /* + * Check for residuals + */ + test SCB_SGPTR, SG_LIST_NULL jnz check_status;/* No xfer */ + test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ + test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; +check_status: + test SCB_SCSI_STATUS,0xff jz complete; /* Good Status? */ +upload_scb: + or SCB_SGPTR, SG_RESID_VALID; + mvi DMAPARAMS, FIFORESET; + mov SCB_TAG call dma_scb; + test SCB_SCSI_STATUS, 0xff jz complete; /* Just a residual? */ + mvi BAD_STATUS call set_seqint; /* let driver know */ + cmp RETURN_1, SEND_SENSE jne complete; + call add_scb_to_free_list; + jmp await_busfree; +complete: + mov SCB_TAG call complete_post; + jmp await_busfree; +} + +complete_post: + /* Post the SCBID in SINDEX and issue an interrupt */ + call add_scb_to_free_list; + mov ARG_1, SINDEX; + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + mov A, SDSCB_QOFF; + } else { + mov A, QOUTPOS; + } + mvi QOUTFIFO_OFFSET call post_byte_setup; + mov ARG_1 call post_byte; + if ((ahc->features & AHC_QUEUE_REGS) == 0) { + inc QOUTPOS; + } + mvi INTSTAT,CMDCMPLT ret; + +if ((ahc->flags & AHC_INITIATORROLE) != 0) { +/* + * Is it a disconnect message? Set a flag in the SCB to remind us + * and await the bus going free. If this is an untagged transaction + * store the SCB id for it in our untagged target table for lookup on + * a reselection. + */ +mesgin_disconnect: + /* + * If ATN is raised, we still want to give the target a message. + * Perhaps there was a parity error on this last message byte + * or we want to abort this command. Either way, the target + * should take us to message out phase and then attempt to + * disconnect again. + * XXX - Wait for more testing. + test SCSISIGI, ATNI jnz mesgin_done; + */ + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT + jnz mesgin_proto_violation; + or SCB_CONTROL,DISCONNECTED; + if ((ahc->flags & AHC_PAGESCBS) != 0) { + call add_scb_to_disc_list; + } + test SCB_CONTROL, TAG_ENB jnz await_busfree; + mov ARG_1, SCB_TAG; + and SAVED_LUN, LID, SCB_LUN; + mov SCB_SCSIID call set_busy_target; + jmp await_busfree; + +/* + * Save data pointers message: + * Copying RAM values back to SCB, for Save Data Pointers message, but + * only if we've actually been into a data phase to change them. This + * protects against bogus data in scratch ram and the residual counts + * since they are only initialized when we go into data_in or data_out. + * Ack the message as soon as possible. For chips without S/G pipelining, + * we can only ack the message after SHADDR has been saved. On these + * chips, SHADDR increments with every bus transaction, even PIO. + */ +mesgin_sdptrs: + if ((ahc->features & AHC_ULTRA2) != 0) { + mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ + test SEQ_FLAGS, DPHASE jz ITloop; + } else { + test SEQ_FLAGS, DPHASE jz mesgin_done; + } + + /* + * If we are asked to save our position at the end of the + * transfer, just mark us at the end rather than perform a + * full save. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz mesgin_sdptrs_full; + or SCB_SGPTR, SG_LIST_NULL; + if ((ahc->features & AHC_ULTRA2) != 0) { + jmp ITloop; + } else { + jmp mesgin_done; + } + +mesgin_sdptrs_full: + + /* + * The SCB_SGPTR becomes the next one we'll download, + * and the SCB_DATAPTR becomes the current SHADDR. + * Use the residual number since STCNT is corrupted by + * any message transfer. + */ + if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov SCB_DATAPTR, SHADDR, 4; + if ((ahc->features & AHC_ULTRA2) == 0) { + mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ + } + bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8; + } else { + mvi DINDEX, SCB_DATAPTR; + mvi SHADDR call bcopy_4; + mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ + mvi SCB_RESIDUAL_DATACNT call bcopy_8; + } + jmp ITloop; + +/* + * Restore pointers message? Data pointers are recopied from the + * SCB anytime we enter a data phase for the first time, so all + * we need to do is clear the DPHASE flag and let the data phase + * code do the rest. We also reset/reallocate the FIFO to make + * sure we have a clean start for the next data or command phase. + */ +mesgin_rdptrs: + and SEQ_FLAGS, ~DPHASE; /* + * We'll reload them + * the next time through + * the dataphase. + */ + or SXFRCTL0, CLRSTCNT|CLRCHN; + jmp mesgin_done; + +/* + * Index into our Busy Target table. SINDEX and DINDEX are modified + * upon return. SCBPTR may be modified by this action. + */ +set_busy_target: + shr DINDEX, 4, SINDEX; + if ((ahc->flags & AHC_SCB_BTT) != 0) { + mov SCBPTR, SAVED_LUN; + add DINDEX, SCB_64_BTT; + } else { + add DINDEX, BUSY_TARGETS; + } + mov DINDIR, ARG_1 ret; + +/* + * Identify message? For a reconnecting target, this tells us the lun + * that the reconnection is for - find the correct SCB and switch to it, + * clearing the "disconnected" bit so we don't "find" it by accident later. + */ +mesgin_identify: + /* + * Determine whether a target is using tagged or non-tagged + * transactions by first looking at the transaction stored in + * the busy target array. If there is no untagged transaction + * for this target or the transaction is for a different lun, then + * this must be a tagged transaction. + */ + shr SINDEX, 4, SAVED_SCSIID; + and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; + if ((ahc->flags & AHC_SCB_BTT) != 0) { + add SINDEX, SCB_64_BTT; + mov SCBPTR, SAVED_LUN; + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + add NONE, -SCB_64_BTT, SINDEX; + jc . + 2; + mvi INTSTAT, OUT_OF_RANGE; + nop; + add NONE, -(SCB_64_BTT + 16), SINDEX; + jnc . + 2; + mvi INTSTAT, OUT_OF_RANGE; + nop; + } + } else { + add SINDEX, BUSY_TARGETS; + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + add NONE, -BUSY_TARGETS, SINDEX; + jc . + 2; + mvi INTSTAT, OUT_OF_RANGE; + nop; + add NONE, -(BUSY_TARGETS + 16), SINDEX; + jnc . + 2; + mvi INTSTAT, OUT_OF_RANGE; + nop; + } + } + mov ARG_1, SINDIR; + cmp ARG_1, SCB_LIST_NULL je snoop_tag; + if ((ahc->flags & AHC_PAGESCBS) != 0) { + mov ARG_1 call findSCB; + } else { + mov SCBPTR, ARG_1; + } + if ((ahc->flags & AHC_SCB_BTT) != 0) { + jmp setup_SCB_id_lun_okay; + } else { + /* + * We only allow one untagged command per-target + * at a time. So, if the lun doesn't match, look + * for a tag message. + */ + and A, LID, SCB_LUN; + cmp SAVED_LUN, A je setup_SCB_id_lun_okay; + if ((ahc->flags & AHC_PAGESCBS) != 0) { + /* + * findSCB removes the SCB from the + * disconnected list, so we must replace + * it there should this SCB be for another + * lun. + */ + call cleanup_scb; + } + } + +/* + * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. + * If we get one, we use the tag returned to find the proper + * SCB. With SCB paging, we must search for non-tagged + * transactions since the SCB may exist in any slot. If we're not + * using SCB paging, we can use the tag as the direct index to the + * SCB. + */ +snoop_tag: + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x80; + } + mov NONE,SCSIDATL; /* ACK Identify MSG */ + call phase_lock; + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x1; + } + cmp LASTPHASE, P_MESGIN jne not_found; + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x2; + } + cmp SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found; +get_tag: + if ((ahc->flags & AHC_PAGESCBS) != 0) { + mvi ARG_1 call inb_next; /* tag value */ + mov ARG_1 call findSCB; + } else { + mvi ARG_1 call inb_next; /* tag value */ + mov SCBPTR, ARG_1; + } + +/* + * Ensure that the SCB the tag points to is for + * an SCB transaction to the reconnecting target. + */ +setup_SCB: + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x4; + } + mov A, SCB_SCSIID; + cmp SAVED_SCSIID, A jne not_found_cleanup_scb; + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x8; + } +setup_SCB_id_okay: + and A, LID, SCB_LUN; + cmp SAVED_LUN, A jne not_found_cleanup_scb; +setup_SCB_id_lun_okay: + if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x10; + } + test SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb; + and SCB_CONTROL,~DISCONNECTED; + test SCB_CONTROL, TAG_ENB jnz setup_SCB_tagged; + if ((ahc->flags & AHC_SCB_BTT) != 0) { + mov A, SCBPTR; + } + mvi ARG_1, SCB_LIST_NULL; + mov SAVED_SCSIID call set_busy_target; + if ((ahc->flags & AHC_SCB_BTT) != 0) { + mov SCBPTR, A; + } +setup_SCB_tagged: + clr SEQ_FLAGS; /* make note of IDENTIFY */ + call set_transfer_settings; + /* See if the host wants to send a message upon reconnection */ + test SCB_CONTROL, MK_MESSAGE jz mesgin_done; + mvi HOST_MSG call mk_mesg; + jmp mesgin_done; + +not_found_cleanup_scb: + if ((ahc->flags & AHC_PAGESCBS) != 0) { + call cleanup_scb; + } +not_found: + mvi NO_MATCH call set_seqint; + jmp mesgin_done; + +mk_mesg: + if ((ahc->features & AHC_DT) == 0) { + or SCSISIGO, ATNO, LASTPHASE; + } else { + mvi SCSISIGO, ATNO; + } + mov MSG_OUT,SINDEX ret; + +/* + * Functions to read data in Automatic PIO mode. + * + * According to Adaptec's documentation, an ACK is not sent on input from + * the target until SCSIDATL is read from. So we wait until SCSIDATL is + * latched (the usual way), then read the data byte directly off the bus + * using SCSIBUSL. When we have pulled the ATN line, or we just want to + * acknowledge the byte, then we do a dummy read from SCISDATL. The SCSI + * spec guarantees that the target will hold the data byte on the bus until + * we send our ACK. + * + * The assumption here is that these are called in a particular sequence, + * and that REQ is already set when inb_first is called. inb_{first,next} + * use the same calling convention as inb. + */ +inb_next_wait_perr: + mvi PERR_DETECTED call set_seqint; + jmp inb_next_wait; +inb_next: + mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ +inb_next_wait: + /* + * If there is a parity error, wait for the kernel to + * see the interrupt and prepare our message response + * before continuing. + */ + test SSTAT1, REQINIT jz inb_next_wait; + test SSTAT1, SCSIPERR jnz inb_next_wait_perr; +inb_next_check_phase: + and LASTPHASE, PHASE_MASK, SCSISIGI; + cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; +inb_first: + mov DINDEX,SINDEX; + mov DINDIR,SCSIBUSL ret; /*read byte directly from bus*/ +inb_last: + mov NONE,SCSIDATL ret; /*dummy read from latch to ACK*/ +} + +if ((ahc->flags & AHC_TARGETROLE) != 0) { +/* + * Change to a new phase. If we are changing the state of the I/O signal, + * from out to in, wait an additional data release delay before continuing. + */ +change_phase: + /* Wait for preceding I/O session to complete. */ + test SCSISIGI, ACKI jnz .; + + /* Change the phase */ + and DINDEX, IOI, SCSISIGI; + mov SCSISIGO, SINDEX; + and A, IOI, SINDEX; + + /* + * If the data direction has changed, from + * out (initiator driving) to in (target driving), + * we must wait at least a data release delay plus + * the normal bus settle delay. [SCSI III SPI 10.11.0] + */ + cmp DINDEX, A je change_phase_wait; + test SINDEX, IOI jz change_phase_wait; + call change_phase_wait; +change_phase_wait: + nop; + nop; + nop; + nop ret; + +/* + * Send a byte to an initiator in Automatic PIO mode. + */ +target_outb: + or SXFRCTL0, SPIOEN; + test SSTAT0, SPIORDY jz .; + mov SCSIDATL, SINDEX; + test SSTAT0, SPIORDY jz .; + and SXFRCTL0, ~SPIOEN ret; +} + +/* + * Locate a disconnected SCB by SCBID. Upon return, SCBPTR and SINDEX will + * be set to the position of the SCB. If the SCB cannot be found locally, + * it will be paged in from host memory. RETURN_2 stores the address of the + * preceding SCB in the disconnected list which can be used to speed up + * removal of the found SCB from the disconnected list. + */ +if ((ahc->flags & AHC_PAGESCBS) != 0) { +BEGIN_CRITICAL; +findSCB: + mov A, SINDEX; /* Tag passed in SINDEX */ + cmp DISCONNECTED_SCBH, SCB_LIST_NULL je findSCB_notFound; + mov SCBPTR, DISCONNECTED_SCBH; /* Initialize SCBPTR */ + mvi ARG_2, SCB_LIST_NULL; /* Head of list */ + jmp findSCB_loop; +findSCB_next: + cmp SCB_NEXT, SCB_LIST_NULL je findSCB_notFound; + mov ARG_2, SCBPTR; + mov SCBPTR,SCB_NEXT; +findSCB_loop: + cmp SCB_TAG, A jne findSCB_next; +rem_scb_from_disc_list: + cmp ARG_2, SCB_LIST_NULL je rHead; + mov DINDEX, SCB_NEXT; + mov SINDEX, SCBPTR; + mov SCBPTR, ARG_2; + mov SCB_NEXT, DINDEX; + mov SCBPTR, SINDEX ret; +rHead: + mov DISCONNECTED_SCBH,SCB_NEXT ret; +END_CRITICAL; +findSCB_notFound: + /* + * We didn't find it. Page in the SCB. + */ + mov ARG_1, A; /* Save tag */ + mov ALLZEROS call get_free_or_disc_scb; + mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; + mov ARG_1 jmp dma_scb; +} + +/* + * Prepare the hardware to post a byte to host memory given an + * index of (A + (256 * SINDEX)) and a base address of SHARED_DATA_ADDR. + */ +post_byte_setup: + mov ARG_2, SINDEX; + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mvi DINDEX, CCHADDR; + mvi SHARED_DATA_ADDR call set_1byte_addr; + mvi CCHCNT, 1; + mvi CCSCBCTL, CCSCBRESET ret; + } else { + mvi DINDEX, HADDR; + mvi SHARED_DATA_ADDR call set_1byte_addr; + mvi 1 call set_hcnt; + mvi DFCNTRL, FIFORESET ret; + } + +post_byte: + if ((ahc->features & AHC_CMD_CHAN) != 0) { + bmov CCSCBRAM, SINDEX, 1; + or CCSCBCTL, CCSCBEN|CCSCBRESET; + test CCSCBCTL, CCSCBDONE jz .; + clr CCSCBCTL ret; + } else { + mov DFDAT, SINDEX; + or DFCNTRL, HDMAEN|FIFOFLUSH; + jmp dma_finish; + } + +phase_lock_perr: + mvi PERR_DETECTED call set_seqint; +phase_lock: + /* + * If there is a parity error, wait for the kernel to + * see the interrupt and prepare our message response + * before continuing. + */ + test SSTAT1, REQINIT jz phase_lock; + test SSTAT1, SCSIPERR jnz phase_lock_perr; +phase_lock_latch_phase: + if ((ahc->features & AHC_DT) == 0) { + and SCSISIGO, PHASE_MASK, SCSISIGI; + } + and LASTPHASE, PHASE_MASK, SCSISIGI ret; + +if ((ahc->features & AHC_CMD_CHAN) == 0) { +set_hcnt: + mov HCNT[0], SINDEX; +clear_hcnt: + clr HCNT[1]; + clr HCNT[2] ret; + +set_stcnt_from_hcnt: + mov STCNT[0], HCNT[0]; + mov STCNT[1], HCNT[1]; + mov STCNT[2], HCNT[2] ret; + +bcopy_8: + mov DINDIR, SINDIR; +bcopy_7: + mov DINDIR, SINDIR; + mov DINDIR, SINDIR; +bcopy_5: + mov DINDIR, SINDIR; +bcopy_4: + mov DINDIR, SINDIR; +bcopy_3: + mov DINDIR, SINDIR; + mov DINDIR, SINDIR; + mov DINDIR, SINDIR ret; +} + +if ((ahc->flags & AHC_TARGETROLE) != 0) { +/* + * Setup addr assuming that A is an index into + * an array of 32byte objects, SINDEX contains + * the base address of that array, and DINDEX + * contains the base address of the location + * to store the indexed address. + */ +set_32byte_addr: + shr ARG_2, 3, A; + shl A, 5; + jmp set_1byte_addr; +} + +/* + * Setup addr assuming that A is an index into + * an array of 64byte objects, SINDEX contains + * the base address of that array, and DINDEX + * contains the base address of the location + * to store the indexed address. + */ +set_64byte_addr: + shr ARG_2, 2, A; + shl A, 6; + +/* + * Setup addr assuming that A + (ARG_2 * 256) is an + * index into an array of 1byte objects, SINDEX contains + * the base address of that array, and DINDEX contains + * the base address of the location to store the computed + * address. + */ +set_1byte_addr: + add DINDIR, A, SINDIR; + mov A, ARG_2; + adc DINDIR, A, SINDIR; + clr A; + adc DINDIR, A, SINDIR; + adc DINDIR, A, SINDIR ret; + +/* + * Either post or fetch an SCB from host memory based on the + * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX. + */ +dma_scb: + mov A, SINDEX; + if ((ahc->features & AHC_CMD_CHAN) != 0) { + mvi DINDEX, CCHADDR; + mvi HSCB_ADDR call set_64byte_addr; + mov CCSCBPTR, SCBPTR; + test DMAPARAMS, DIRECTION jz dma_scb_tohost; + if ((ahc->flags & AHC_SCB_BTT) != 0) { + mvi CCHCNT, SCB_DOWNLOAD_SIZE_64; + } else { + mvi CCHCNT, SCB_DOWNLOAD_SIZE; + } + mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET; + cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .; + jmp dma_scb_finish; +dma_scb_tohost: + mvi CCHCNT, SCB_UPLOAD_SIZE; + if ((ahc->features & AHC_ULTRA2) == 0) { + mvi CCSCBCTL, CCSCBRESET; + bmov CCSCBRAM, SCB_BASE, SCB_UPLOAD_SIZE; + or CCSCBCTL, CCSCBEN|CCSCBRESET; + test CCSCBCTL, CCSCBDONE jz .; + } else if ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0) { + mvi CCSCBCTL, CCARREN|CCSCBRESET; + cmp CCSCBCTL, ARRDONE|CCARREN jne .; + mvi CCHCNT, SCB_UPLOAD_SIZE; + mvi CCSCBCTL, CCSCBEN|CCSCBRESET; + cmp CCSCBCTL, CCSCBDONE|CCSCBEN jne .; + } else { + mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET; + cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .; + } +dma_scb_finish: + clr CCSCBCTL; + test CCSCBCTL, CCARREN|CCSCBEN jnz .; + ret; + } else { + mvi DINDEX, HADDR; + mvi HSCB_ADDR call set_64byte_addr; + mvi SCB_DOWNLOAD_SIZE call set_hcnt; + mov DFCNTRL, DMAPARAMS; + test DMAPARAMS, DIRECTION jnz dma_scb_fromhost; + /* Fill it with the SCB data */ +copy_scb_tofifo: + mvi SINDEX, SCB_BASE; + add A, SCB_DOWNLOAD_SIZE, SINDEX; +copy_scb_tofifo_loop: + call copy_to_fifo_8; + cmp SINDEX, A jne copy_scb_tofifo_loop; + or DFCNTRL, HDMAEN|FIFOFLUSH; + jmp dma_finish; +dma_scb_fromhost: + mvi DINDEX, SCB_BASE; + if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { + /* + * The PCI module will only issue a PCI + * retry if the data FIFO is empty. If the + * host disconnects in the middle of a + * transfer, we must empty the fifo of all + * available data to force the chip to + * continue the transfer. This does not + * happen for SCSI transfers as the SCSI module + * will drain the FIFO as data are made available. + * When the hang occurs, we know that a multiple + * of 8 bytes is in the FIFO because the PCI + * module has an 8 byte input latch that only + * dumps to the FIFO when HCNT == 0 or the + * latch is full. + */ + clr A; + /* Wait for at least 8 bytes of data to arrive. */ +dma_scb_hang_fifo: + test DFSTATUS, FIFOQWDEMP jnz dma_scb_hang_fifo; +dma_scb_hang_wait: + test DFSTATUS, MREQPEND jnz dma_scb_hang_wait; + test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; + test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; + test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; + /* + * The PCI module no longer intends to perform + * a PCI transaction. Drain the fifo. + */ +dma_scb_hang_dma_drain_fifo: + not A, HCNT; + add A, SCB_DOWNLOAD_SIZE+SCB_BASE+1; + and A, ~0x7; + mov DINDIR,DFDAT; + cmp DINDEX, A jne . - 1; + cmp DINDEX, SCB_DOWNLOAD_SIZE+SCB_BASE + je dma_finish_nowait; + /* Restore A as the lines left to transfer. */ + add A, -SCB_BASE, DINDEX; + shr A, 3; + jmp dma_scb_hang_fifo; +dma_scb_hang_dma_done: + and DFCNTRL, ~HDMAEN; + test DFCNTRL, HDMAEN jnz .; + add SEQADDR0, A; + } else { + call dma_finish; + } + call dfdat_in_8; + call dfdat_in_8; + call dfdat_in_8; +dfdat_in_8: + mov DINDIR,DFDAT; +dfdat_in_7: + mov DINDIR,DFDAT; + mov DINDIR,DFDAT; + mov DINDIR,DFDAT; + mov DINDIR,DFDAT; + mov DINDIR,DFDAT; +dfdat_in_2: + mov DINDIR,DFDAT; + mov DINDIR,DFDAT ret; + } + +copy_to_fifo_8: + mov DFDAT,SINDIR; + mov DFDAT,SINDIR; +copy_to_fifo_6: + mov DFDAT,SINDIR; +copy_to_fifo_5: + mov DFDAT,SINDIR; +copy_to_fifo_4: + mov DFDAT,SINDIR; + mov DFDAT,SINDIR; + mov DFDAT,SINDIR; + mov DFDAT,SINDIR ret; + +/* + * Wait for DMA from host memory to data FIFO to complete, then disable + * DMA and wait for it to acknowledge that it's off. + */ +dma_finish: + test DFSTATUS,HDONE jz dma_finish; +dma_finish_nowait: + /* Turn off DMA */ + and DFCNTRL, ~HDMAEN; + test DFCNTRL, HDMAEN jnz .; + ret; + +/* + * Restore an SCB that failed to match an incoming reselection + * to the correct/safe state. If the SCB is for a disconnected + * transaction, it must be returned to the disconnected list. + * If it is not in the disconnected state, it must be free. + */ +cleanup_scb: + if ((ahc->flags & AHC_PAGESCBS) != 0) { + test SCB_CONTROL,DISCONNECTED jnz add_scb_to_disc_list; + } +add_scb_to_free_list: + if ((ahc->flags & AHC_PAGESCBS) != 0) { +BEGIN_CRITICAL; + mov SCB_NEXT, FREE_SCBH; + mvi SCB_TAG, SCB_LIST_NULL; + mov FREE_SCBH, SCBPTR ret; +END_CRITICAL; + } else { + mvi SCB_TAG, SCB_LIST_NULL ret; + } + +if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { +set_hhaddr: + or DSCOMMAND1, HADDLDSEL0; + and HADDR, SG_HIGH_ADDR_BITS, SINDEX; + and DSCOMMAND1, ~HADDLDSEL0 ret; +} + +if ((ahc->flags & AHC_PAGESCBS) != 0) { +get_free_or_disc_scb: +BEGIN_CRITICAL; + cmp FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb; + cmp DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb; +return_error: + mvi NO_FREE_SCB call set_seqint; + mvi SINDEX, SCB_LIST_NULL ret; +dequeue_disc_scb: + mov SCBPTR, DISCONNECTED_SCBH; + mov DISCONNECTED_SCBH, SCB_NEXT; +END_CRITICAL; + mvi DMAPARAMS, FIFORESET; + mov SCB_TAG jmp dma_scb; +BEGIN_CRITICAL; +dequeue_free_scb: + mov SCBPTR, FREE_SCBH; + mov FREE_SCBH, SCB_NEXT ret; +END_CRITICAL; + +add_scb_to_disc_list: +/* + * Link this SCB into the DISCONNECTED list. This list holds the + * candidates for paging out an SCB if one is needed for a new command. + * Modifying the disconnected list is a critical(pause dissabled) section. + */ +BEGIN_CRITICAL; + mov SCB_NEXT, DISCONNECTED_SCBH; + mov DISCONNECTED_SCBH, SCBPTR ret; +END_CRITICAL; +} +set_seqint: + mov INTSTAT, SINDEX; + nop; +return: + ret; diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c new file mode 100644 index 000000000..11ddffbcc --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c @@ -0,0 +1,318 @@ +/* + * Interface for the 93C66/56/46/26/06 serial eeprom parts. + * + * Copyright (c) 1995, 1996 Daniel M. Eischen + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#19 $ + */ + +/* + * The instruction set of the 93C66/56/46/26/06 chips are as follows: + * + * Start OP * + * Function Bit Code Address** Data Description + * ------------------------------------------------------------------- + * READ 1 10 A5 - A0 Reads data stored in memory, + * starting at specified address + * EWEN 1 00 11XXXX Write enable must precede + * all programming modes + * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0 + * WRITE 1 01 A5 - A0 D15 - D0 Writes register + * ERAL 1 00 10XXXX Erase all registers + * WRAL 1 00 01XXXX D15 - D0 Writes to all registers + * EWDS 1 00 00XXXX Disables all programming + * instructions + * *Note: A value of X for address is a don't care condition. + * **Note: There are 8 address bits for the 93C56/66 chips unlike + * the 93C46/26/06 chips which have 6 address bits. + * + * The 93C46 has a four wire interface: clock, chip select, data in, and + * data out. In order to perform one of the above functions, you need + * to enable the chip select for a clock period (typically a minimum of + * 1 usec, with the clock high and low a minimum of 750 and 250 nsec + * respectively). While the chip select remains high, you can clock in + * the instructions (above) starting with the start bit, followed by the + * OP code, Address, and Data (if needed). For the READ instruction, the + * requested 16-bit register contents is read from the data out line but + * is preceded by an initial zero (leading 0, followed by 16-bits, MSB + * first). The clock cycling from low to high initiates the next data + * bit to be sent from the chip. + */ + +#include "aic7xxx_osm.h" +#include "aic7xxx_inline.h" +#include "aic7xxx_93cx6.h" + +/* + * Right now, we only have to read the SEEPROM. But we make it easier to + * add other 93Cx6 functions. + */ +struct seeprom_cmd { + uint8_t len; + uint8_t bits[11]; +}; + +/* Short opcodes for the c46 */ +static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; +static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; + +/* Long opcodes for the C56/C66 */ +static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; +static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; + +/* Common opcodes */ +static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; +static const struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; + +/* + * Wait for the SEERDY to go high; about 800 ns. + */ +#define CLOCK_PULSE(sd, rdy) \ + while ((SEEPROM_STATUS_INB(sd) & rdy) == 0) { \ + ; /* Do nothing */ \ + } \ + (void)SEEPROM_INB(sd); /* Clear clock */ + +/* + * Send a START condition and the given command + */ +static void +send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd) +{ + uint8_t temp; + int i = 0; + + /* Send chip select for one clock cycle. */ + temp = sd->sd_MS ^ sd->sd_CS; + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + + for (i = 0; i < cmd->len; i++) { + if (cmd->bits[i] != 0) + temp ^= sd->sd_DO; + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + if (cmd->bits[i] != 0) + temp ^= sd->sd_DO; + } +} + +/* + * Clear CS put the chip in the reset state, where it can wait for new commands. + */ +static void +reset_seeprom(struct seeprom_descriptor *sd) +{ + uint8_t temp; + + temp = sd->sd_MS; + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); +} + +/* + * Read the serial EEPROM and returns 1 if successful and 0 if + * not successful. + */ +int +ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, + u_int start_addr, u_int count) +{ + int i = 0; + u_int k = 0; + uint16_t v; + uint8_t temp; + + /* + * Read the requested registers of the seeprom. The loop + * will range from 0 to count-1. + */ + for (k = start_addr; k < count + start_addr; k++) { + /* + * Now we're ready to send the read command followed by the + * address of the 16-bit register we want to read. + */ + send_seeprom_cmd(sd, &seeprom_read); + + /* Send the 6 or 8 bit address (MSB first, LSB last). */ + temp = sd->sd_MS ^ sd->sd_CS; + for (i = (sd->sd_chip - 1); i >= 0; i--) { + if ((k & (1 << i)) != 0) + temp ^= sd->sd_DO; + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + if ((k & (1 << i)) != 0) + temp ^= sd->sd_DO; + } + + /* + * Now read the 16 bit register. An initial 0 precedes the + * register contents which begins with bit 15 (MSB) and ends + * with bit 0 (LSB). The initial 0 will be shifted off the + * top of our word as we let the loop run from 0 to 16. + */ + v = 0; + for (i = 16; i >= 0; i--) { + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + v <<= 1; + if (SEEPROM_DATA_INB(sd) & sd->sd_DI) + v |= 1; + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + } + + buf[k - start_addr] = v; + + /* Reset the chip select for the next command cycle. */ + reset_seeprom(sd); + } +#ifdef AHC_DUMP_EEPROM + printk("\nSerial EEPROM:\n\t"); + for (k = 0; k < count; k = k + 1) { + if (((k % 8) == 0) && (k != 0)) { + printk(KERN_CONT "\n\t"); + } + printk(KERN_CONT " 0x%x", buf[k]); + } + printk(KERN_CONT "\n"); +#endif + return (1); +} + +/* + * Write the serial EEPROM and return 1 if successful and 0 if + * not successful. + */ +int +ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, + u_int start_addr, u_int count) +{ + const struct seeprom_cmd *ewen, *ewds; + uint16_t v; + uint8_t temp; + int i, k; + + /* Place the chip into write-enable mode */ + if (sd->sd_chip == C46) { + ewen = &seeprom_ewen; + ewds = &seeprom_ewds; + } else if (sd->sd_chip == C56_66) { + ewen = &seeprom_long_ewen; + ewds = &seeprom_long_ewds; + } else { + printk("ahc_write_seeprom: unsupported seeprom type %d\n", + sd->sd_chip); + return (0); + } + + send_seeprom_cmd(sd, ewen); + reset_seeprom(sd); + + /* Write all requested data out to the seeprom. */ + temp = sd->sd_MS ^ sd->sd_CS; + for (k = start_addr; k < count + start_addr; k++) { + /* Send the write command */ + send_seeprom_cmd(sd, &seeprom_write); + + /* Send the 6 or 8 bit address (MSB first). */ + for (i = (sd->sd_chip - 1); i >= 0; i--) { + if ((k & (1 << i)) != 0) + temp ^= sd->sd_DO; + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + if ((k & (1 << i)) != 0) + temp ^= sd->sd_DO; + } + + /* Write the 16 bit value, MSB first */ + v = buf[k - start_addr]; + for (i = 15; i >= 0; i--) { + if ((v & (1 << i)) != 0) + temp ^= sd->sd_DO; + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + if ((v & (1 << i)) != 0) + temp ^= sd->sd_DO; + } + + /* Wait for the chip to complete the write */ + temp = sd->sd_MS; + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + temp = sd->sd_MS ^ sd->sd_CS; + do { + SEEPROM_OUTB(sd, temp); + CLOCK_PULSE(sd, sd->sd_RDY); + SEEPROM_OUTB(sd, temp ^ sd->sd_CK); + CLOCK_PULSE(sd, sd->sd_RDY); + } while ((SEEPROM_DATA_INB(sd) & sd->sd_DI) == 0); + + reset_seeprom(sd); + } + + /* Put the chip back into write-protect mode */ + send_seeprom_cmd(sd, ewds); + reset_seeprom(sd); + + return (1); +} + +int +ahc_verify_cksum(struct seeprom_config *sc) +{ + int i; + int maxaddr; + uint32_t checksum; + uint16_t *scarray; + + maxaddr = (sizeof(*sc)/2) - 1; + checksum = 0; + scarray = (uint16_t *)sc; + + for (i = 0; i < maxaddr; i++) + checksum = checksum + scarray[i]; + if (checksum == 0 + || (checksum & 0xFFFF) != sc->checksum) { + return (0); + } else { + return(1); + } +} diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.h b/drivers/scsi/aic7xxx/aic7xxx_93cx6.h new file mode 100644 index 000000000..859c43ccd --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.h @@ -0,0 +1,102 @@ +/* + * Interface to the 93C46/56 serial EEPROM that is used to store BIOS + * settings for the aic7xxx based adaptec SCSI controllers. It can + * also be used for 93C26 and 93C06 serial EEPROMS. + * + * Copyright (c) 1994, 1995, 2000 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.h#12 $ + * + * $FreeBSD$ + */ +#ifndef _AIC7XXX_93CX6_H_ +#define _AIC7XXX_93CX6_H_ + +typedef enum { + C46 = 6, + C56_66 = 8 +} seeprom_chip_t; + +struct seeprom_descriptor { + struct ahc_softc *sd_ahc; + u_int sd_control_offset; + u_int sd_status_offset; + u_int sd_dataout_offset; + seeprom_chip_t sd_chip; + uint16_t sd_MS; + uint16_t sd_RDY; + uint16_t sd_CS; + uint16_t sd_CK; + uint16_t sd_DO; + uint16_t sd_DI; +}; + +/* + * This function will read count 16-bit words from the serial EEPROM and + * return their value in buf. The port address of the aic7xxx serial EEPROM + * control register is passed in as offset. The following parameters are + * also passed in: + * + * CS - Chip select + * CK - Clock + * DO - Data out + * DI - Data in + * RDY - SEEPROM ready + * MS - Memory port mode select + * + * A failed read attempt returns 0, and a successful read returns 1. + */ + +#define SEEPROM_INB(sd) \ + ahc_inb(sd->sd_ahc, sd->sd_control_offset) +#define SEEPROM_OUTB(sd, value) \ +do { \ + ahc_outb(sd->sd_ahc, sd->sd_control_offset, value); \ + ahc_flush_device_writes(sd->sd_ahc); \ +} while(0) + +#define SEEPROM_STATUS_INB(sd) \ + ahc_inb(sd->sd_ahc, sd->sd_status_offset) +#define SEEPROM_DATA_INB(sd) \ + ahc_inb(sd->sd_ahc, sd->sd_dataout_offset) + +int ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, + u_int start_addr, u_int count); +int ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, + u_int start_addr, u_int count); +int ahc_verify_cksum(struct seeprom_config *sc); + +#endif /* _AIC7XXX_93CX6_H_ */ diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c new file mode 100644 index 000000000..a396f048a --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -0,0 +1,7901 @@ +/* + * Core routines and tables shareable across OS platforms. + * + * Copyright (c) 1994-2002 Justin T. Gibbs. + * Copyright (c) 2000-2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ + */ + +#include "aic7xxx_osm.h" +#include "aic7xxx_inline.h" +#include "aicasm/aicasm_insformat.h" + +/***************************** Lookup Tables **********************************/ +static const char *const ahc_chip_names[] = { + "NONE", + "aic7770", + "aic7850", + "aic7855", + "aic7859", + "aic7860", + "aic7870", + "aic7880", + "aic7895", + "aic7895C", + "aic7890/91", + "aic7896/97", + "aic7892", + "aic7899" +}; + +/* + * Hardware error codes. + */ +struct ahc_hard_error_entry { + uint8_t errno; + const char *errmesg; +}; + +static const struct ahc_hard_error_entry ahc_hard_errors[] = { + { ILLHADDR, "Illegal Host Access" }, + { ILLSADDR, "Illegal Sequencer Address referenced" }, + { ILLOPCODE, "Illegal Opcode in sequencer program" }, + { SQPARERR, "Sequencer Parity Error" }, + { DPARERR, "Data-path Parity Error" }, + { MPARERR, "Scratch or SCB Memory Parity Error" }, + { PCIERRSTAT, "PCI Error detected" }, + { CIOPARERR, "CIOBUS Parity Error" }, +}; +static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors); + +static const struct ahc_phase_table_entry ahc_phase_table[] = +{ + { P_DATAOUT, NOP, "in Data-out phase" }, + { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" }, + { P_DATAOUT_DT, NOP, "in DT Data-out phase" }, + { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" }, + { P_COMMAND, NOP, "in Command phase" }, + { P_MESGOUT, NOP, "in Message-out phase" }, + { P_STATUS, INITIATOR_ERROR, "in Status phase" }, + { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, + { P_BUSFREE, NOP, "while idle" }, + { 0, NOP, "in unknown phase" } +}; + +/* + * In most cases we only wish to itterate over real phases, so + * exclude the last element from the count. + */ +static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1; + +/* + * Valid SCSIRATE values. (p. 3-17) + * Provides a mapping of tranfer periods in ns to the proper value to + * stick in the scsixfer reg. + */ +static const struct ahc_syncrate ahc_syncrates[] = +{ + /* ultra2 fast/ultra period rate */ + { 0x42, 0x000, 9, "80.0" }, + { 0x03, 0x000, 10, "40.0" }, + { 0x04, 0x000, 11, "33.0" }, + { 0x05, 0x100, 12, "20.0" }, + { 0x06, 0x110, 15, "16.0" }, + { 0x07, 0x120, 18, "13.4" }, + { 0x08, 0x000, 25, "10.0" }, + { 0x19, 0x010, 31, "8.0" }, + { 0x1a, 0x020, 37, "6.67" }, + { 0x1b, 0x030, 43, "5.7" }, + { 0x1c, 0x040, 50, "5.0" }, + { 0x00, 0x050, 56, "4.4" }, + { 0x00, 0x060, 62, "4.0" }, + { 0x00, 0x070, 68, "3.6" }, + { 0x00, 0x000, 0, NULL } +}; + +/* Our Sequencer Program */ +#include "aic7xxx_seq.h" + +/**************************** Function Declarations ***************************/ +static void ahc_force_renegotiation(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo); +static struct ahc_tmode_tstate* + ahc_alloc_tstate(struct ahc_softc *ahc, + u_int scsi_id, char channel); +#ifdef AHC_TARGET_MODE +static void ahc_free_tstate(struct ahc_softc *ahc, + u_int scsi_id, char channel, int force); +#endif +static const struct ahc_syncrate* + ahc_devlimited_syncrate(struct ahc_softc *ahc, + struct ahc_initiator_tinfo *, + u_int *period, + u_int *ppr_options, + role_t role); +static void ahc_update_pending_scbs(struct ahc_softc *ahc); +static void ahc_fetch_devinfo(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo); +static void ahc_scb_devinfo(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + struct scb *scb); +static void ahc_assert_atn(struct ahc_softc *ahc); +static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + struct scb *scb); +static void ahc_build_transfer_msg(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo); +static void ahc_construct_sdtr(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + u_int period, u_int offset); +static void ahc_construct_wdtr(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + u_int bus_width); +static void ahc_construct_ppr(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + u_int period, u_int offset, + u_int bus_width, u_int ppr_options); +static void ahc_clear_msg_state(struct ahc_softc *ahc); +static void ahc_handle_proto_violation(struct ahc_softc *ahc); +static void ahc_handle_message_phase(struct ahc_softc *ahc); +typedef enum { + AHCMSG_1B, + AHCMSG_2B, + AHCMSG_EXT +} ahc_msgtype; +static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, + u_int msgval, int full); +static int ahc_parse_msg(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo); +static int ahc_handle_msg_reject(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo); +static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo); +static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); +static void ahc_handle_devreset(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + cam_status status, char *message, + int verbose_level); +#ifdef AHC_TARGET_MODE +static void ahc_setup_target_msgin(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo, + struct scb *scb); +#endif + +static bus_dmamap_callback_t ahc_dmamap_cb; +static void ahc_build_free_scb_list(struct ahc_softc *ahc); +static int ahc_init_scbdata(struct ahc_softc *ahc); +static void ahc_fini_scbdata(struct ahc_softc *ahc); +static void ahc_qinfifo_requeue(struct ahc_softc *ahc, + struct scb *prev_scb, + struct scb *scb); +static int ahc_qinfifo_count(struct ahc_softc *ahc); +static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, + u_int prev, u_int scbptr); +static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); +static u_int ahc_rem_wscb(struct ahc_softc *ahc, + u_int scbpos, u_int prev); +static void ahc_reset_current_bus(struct ahc_softc *ahc); +#ifdef AHC_DUMP_SEQ +static void ahc_dumpseq(struct ahc_softc *ahc); +#endif +static int ahc_loadseq(struct ahc_softc *ahc); +static int ahc_check_patch(struct ahc_softc *ahc, + const struct patch **start_patch, + u_int start_instr, u_int *skip_addr); +static void ahc_download_instr(struct ahc_softc *ahc, + u_int instrptr, uint8_t *dconsts); +#ifdef AHC_TARGET_MODE +static void ahc_queue_lstate_event(struct ahc_softc *ahc, + struct ahc_tmode_lstate *lstate, + u_int initiator_id, + u_int event_type, + u_int event_arg); +static void ahc_update_scsiid(struct ahc_softc *ahc, + u_int targid_mask); +static int ahc_handle_target_cmd(struct ahc_softc *ahc, + struct target_cmd *cmd); +#endif + +static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl); +static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl); +static void ahc_busy_tcl(struct ahc_softc *ahc, + u_int tcl, u_int busyid); + +/************************** SCB and SCB queue management **********************/ +static void ahc_run_untagged_queues(struct ahc_softc *ahc); +static void ahc_run_untagged_queue(struct ahc_softc *ahc, + struct scb_tailq *queue); + +/****************************** Initialization ********************************/ +static void ahc_alloc_scbs(struct ahc_softc *ahc); +static void ahc_shutdown(void *arg); + +/*************************** Interrupt Services *******************************/ +static void ahc_clear_intstat(struct ahc_softc *ahc); +static void ahc_run_qoutfifo(struct ahc_softc *ahc); +#ifdef AHC_TARGET_MODE +static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); +#endif +static void ahc_handle_brkadrint(struct ahc_softc *ahc); +static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); +static void ahc_handle_scsiint(struct ahc_softc *ahc, + u_int intstat); +static void ahc_clear_critical_section(struct ahc_softc *ahc); + +/***************************** Error Recovery *********************************/ +static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb); +static int ahc_abort_scbs(struct ahc_softc *ahc, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status); +static void ahc_calc_residual(struct ahc_softc *ahc, + struct scb *scb); + +/*********************** Untagged Transaction Routines ************************/ +static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); +static inline void ahc_release_untagged_queues(struct ahc_softc *ahc); + +/* + * Block our completion routine from starting the next untagged + * transaction for this target or target lun. + */ +static inline void +ahc_freeze_untagged_queues(struct ahc_softc *ahc) +{ + if ((ahc->flags & AHC_SCB_BTT) == 0) + ahc->untagged_queue_lock++; +} + +/* + * Allow the next untagged transaction for this target or target lun + * to be executed. We use a counting semaphore to allow the lock + * to be acquired recursively. Once the count drops to zero, the + * transaction queues will be run. + */ +static inline void +ahc_release_untagged_queues(struct ahc_softc *ahc) +{ + if ((ahc->flags & AHC_SCB_BTT) == 0) { + ahc->untagged_queue_lock--; + if (ahc->untagged_queue_lock == 0) + ahc_run_untagged_queues(ahc); + } +} + +/************************* Sequencer Execution Control ************************/ +/* + * Work around any chip bugs related to halting sequencer execution. + * On Ultra2 controllers, we must clear the CIOBUS stretch signal by + * reading a register that will set this signal and deassert it. + * Without this workaround, if the chip is paused, by an interrupt or + * manual pause while accessing scb ram, accesses to certain registers + * will hang the system (infinite pci retries). + */ +static void +ahc_pause_bug_fix(struct ahc_softc *ahc) +{ + if ((ahc->features & AHC_ULTRA2) != 0) + (void)ahc_inb(ahc, CCSCBCTL); +} + +/* + * Determine whether the sequencer has halted code execution. + * Returns non-zero status if the sequencer is stopped. + */ +int +ahc_is_paused(struct ahc_softc *ahc) +{ + return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); +} + +/* + * Request that the sequencer stop and wait, indefinitely, for it + * to stop. The sequencer will only acknowledge that it is paused + * once it has reached an instruction boundary and PAUSEDIS is + * cleared in the SEQCTL register. The sequencer may use PAUSEDIS + * for critical sections. + */ +void +ahc_pause(struct ahc_softc *ahc) +{ + ahc_outb(ahc, HCNTRL, ahc->pause); + + /* + * Since the sequencer can disable pausing in a critical section, we + * must loop until it actually stops. + */ + while (ahc_is_paused(ahc) == 0) + ; + + ahc_pause_bug_fix(ahc); +} + +/* + * Allow the sequencer to continue program execution. + * We check here to ensure that no additional interrupt + * sources that would cause the sequencer to halt have been + * asserted. If, for example, a SCSI bus reset is detected + * while we are fielding a different, pausing, interrupt type, + * we don't want to release the sequencer before going back + * into our interrupt handler and dealing with this new + * condition. + */ +void +ahc_unpause(struct ahc_softc *ahc) +{ + if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) + ahc_outb(ahc, HCNTRL, ahc->unpause); +} + +/************************** Memory mapping routines ***************************/ +static struct ahc_dma_seg * +ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) +{ + int sg_index; + + sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); + /* sg_list_phys points to entry 1, not 0 */ + sg_index++; + + return (&scb->sg_list[sg_index]); +} + +static uint32_t +ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) +{ + int sg_index; + + /* sg_list_phys points to entry 1, not 0 */ + sg_index = sg - &scb->sg_list[1]; + + return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); +} + +static uint32_t +ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) +{ + return (ahc->scb_data->hscb_busaddr + + (sizeof(struct hardware_scb) * index)); +} + +static void +ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) +{ + ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, + ahc->scb_data->hscb_dmamap, + /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), + /*len*/sizeof(*scb->hscb), op); +} + +void +ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) +{ + if (scb->sg_count == 0) + return; + + ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, + /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) + * sizeof(struct ahc_dma_seg), + /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); +} + +#ifdef AHC_TARGET_MODE +static uint32_t +ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) +{ + return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); +} +#endif + +/*********************** Miscellaneous Support Functions ***********************/ +/* + * Determine whether the sequencer reported a residual + * for this SCB/transaction. + */ +static void +ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) +{ + uint32_t sgptr; + + sgptr = ahc_le32toh(scb->hscb->sgptr); + if ((sgptr & SG_RESID_VALID) != 0) + ahc_calc_residual(ahc, scb); +} + +/* + * Return pointers to the transfer negotiation information + * for the specified our_id/remote_id pair. + */ +struct ahc_initiator_tinfo * +ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, + u_int remote_id, struct ahc_tmode_tstate **tstate) +{ + /* + * Transfer data structures are stored from the perspective + * of the target role. Since the parameters for a connection + * in the initiator role to a given target are the same as + * when the roles are reversed, we pretend we are the target. + */ + if (channel == 'B') + our_id += 8; + *tstate = ahc->enabled_targets[our_id]; + return (&(*tstate)->transinfo[remote_id]); +} + +uint16_t +ahc_inw(struct ahc_softc *ahc, u_int port) +{ + uint16_t r = ahc_inb(ahc, port+1) << 8; + return r | ahc_inb(ahc, port); +} + +void +ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) +{ + ahc_outb(ahc, port, value & 0xFF); + ahc_outb(ahc, port+1, (value >> 8) & 0xFF); +} + +uint32_t +ahc_inl(struct ahc_softc *ahc, u_int port) +{ + return ((ahc_inb(ahc, port)) + | (ahc_inb(ahc, port+1) << 8) + | (ahc_inb(ahc, port+2) << 16) + | (ahc_inb(ahc, port+3) << 24)); +} + +void +ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) +{ + ahc_outb(ahc, port, (value) & 0xFF); + ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); + ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); + ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); +} + +uint64_t +ahc_inq(struct ahc_softc *ahc, u_int port) +{ + return ((ahc_inb(ahc, port)) + | (ahc_inb(ahc, port+1) << 8) + | (ahc_inb(ahc, port+2) << 16) + | (((uint64_t)ahc_inb(ahc, port+3)) << 24) + | (((uint64_t)ahc_inb(ahc, port+4)) << 32) + | (((uint64_t)ahc_inb(ahc, port+5)) << 40) + | (((uint64_t)ahc_inb(ahc, port+6)) << 48) + | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); +} + +void +ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) +{ + ahc_outb(ahc, port, value & 0xFF); + ahc_outb(ahc, port+1, (value >> 8) & 0xFF); + ahc_outb(ahc, port+2, (value >> 16) & 0xFF); + ahc_outb(ahc, port+3, (value >> 24) & 0xFF); + ahc_outb(ahc, port+4, (value >> 32) & 0xFF); + ahc_outb(ahc, port+5, (value >> 40) & 0xFF); + ahc_outb(ahc, port+6, (value >> 48) & 0xFF); + ahc_outb(ahc, port+7, (value >> 56) & 0xFF); +} + +/* + * Get a free scb. If there are none, see if we can allocate a new SCB. + */ +struct scb * +ahc_get_scb(struct ahc_softc *ahc) +{ + struct scb *scb; + + if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { + ahc_alloc_scbs(ahc); + scb = SLIST_FIRST(&ahc->scb_data->free_scbs); + if (scb == NULL) + return (NULL); + } + SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); + return (scb); +} + +/* + * Return an SCB resource to the free list. + */ +void +ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) +{ + struct hardware_scb *hscb; + + hscb = scb->hscb; + /* Clean up for the next user */ + ahc->scb_data->scbindex[hscb->tag] = NULL; + scb->flags = SCB_FREE; + hscb->control = 0; + + SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); + + /* Notify the OSM that a resource is now available. */ + ahc_platform_scb_free(ahc, scb); +} + +struct scb * +ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) +{ + struct scb* scb; + + scb = ahc->scb_data->scbindex[tag]; + if (scb != NULL) + ahc_sync_scb(ahc, scb, + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + return (scb); +} + +static void +ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) +{ + struct hardware_scb *q_hscb; + u_int saved_tag; + + /* + * Our queuing method is a bit tricky. The card + * knows in advance which HSCB to download, and we + * can't disappoint it. To achieve this, the next + * SCB to download is saved off in ahc->next_queued_scb. + * When we are called to queue "an arbitrary scb", + * we copy the contents of the incoming HSCB to the one + * the sequencer knows about, swap HSCB pointers and + * finally assign the SCB to the tag indexed location + * in the scb_array. This makes sure that we can still + * locate the correct SCB by SCB_TAG. + */ + q_hscb = ahc->next_queued_scb->hscb; + saved_tag = q_hscb->tag; + memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); + if ((scb->flags & SCB_CDB32_PTR) != 0) { + q_hscb->shared_data.cdb_ptr = + ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + + offsetof(struct hardware_scb, cdb32)); + } + q_hscb->tag = saved_tag; + q_hscb->next = scb->hscb->tag; + + /* Now swap HSCB pointers. */ + ahc->next_queued_scb->hscb = scb->hscb; + scb->hscb = q_hscb; + + /* Now define the mapping from tag to SCB in the scbindex */ + ahc->scb_data->scbindex[scb->hscb->tag] = scb; +} + +/* + * Tell the sequencer about a new transaction to execute. + */ +void +ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) +{ + ahc_swap_with_next_hscb(ahc, scb); + + if (scb->hscb->tag == SCB_LIST_NULL + || scb->hscb->next == SCB_LIST_NULL) + panic("Attempt to queue invalid SCB tag %x:%x\n", + scb->hscb->tag, scb->hscb->next); + + /* + * Setup data "oddness". + */ + scb->hscb->lun &= LID; + if (ahc_get_transfer_length(scb) & 0x1) + scb->hscb->lun |= SCB_XFERLEN_ODD; + + /* + * Keep a history of SCBs we've downloaded in the qinfifo. + */ + ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; + + /* + * Make sure our data is consistent from the + * perspective of the adapter. + */ + ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + /* Tell the adapter about the newly queued SCB */ + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); + } else { + if ((ahc->features & AHC_AUTOPAUSE) == 0) + ahc_pause(ahc); + ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); + if ((ahc->features & AHC_AUTOPAUSE) == 0) + ahc_unpause(ahc); + } +} + +struct scsi_sense_data * +ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) +{ + int offset; + + offset = scb - ahc->scb_data->scbarray; + return (&ahc->scb_data->sense[offset]); +} + +static uint32_t +ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) +{ + int offset; + + offset = scb - ahc->scb_data->scbarray; + return (ahc->scb_data->sense_busaddr + + (offset * sizeof(struct scsi_sense_data))); +} + +/************************** Interrupt Processing ******************************/ +static void +ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) +{ + ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, + /*offset*/0, /*len*/256, op); +} + +static void +ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) +{ +#ifdef AHC_TARGET_MODE + if ((ahc->flags & AHC_TARGETROLE) != 0) { + ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + ahc->shared_data_dmamap, + ahc_targetcmd_offset(ahc, 0), + sizeof(struct target_cmd) * AHC_TMODE_CMDS, + op); + } +#endif +} + +/* + * See if the firmware has posted any completed commands + * into our in-core command complete fifos. + */ +#define AHC_RUN_QOUTFIFO 0x1 +#define AHC_RUN_TQINFIFO 0x2 +static u_int +ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) +{ + u_int retval; + + retval = 0; + ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, + /*offset*/ahc->qoutfifonext, /*len*/1, + BUS_DMASYNC_POSTREAD); + if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) + retval |= AHC_RUN_QOUTFIFO; +#ifdef AHC_TARGET_MODE + if ((ahc->flags & AHC_TARGETROLE) != 0 + && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { + ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + ahc->shared_data_dmamap, + ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), + /*len*/sizeof(struct target_cmd), + BUS_DMASYNC_POSTREAD); + if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) + retval |= AHC_RUN_TQINFIFO; + } +#endif + return (retval); +} + +/* + * Catch an interrupt from the adapter + */ +int +ahc_intr(struct ahc_softc *ahc) +{ + u_int intstat; + + if ((ahc->pause & INTEN) == 0) { + /* + * Our interrupt is not enabled on the chip + * and may be disabled for re-entrancy reasons, + * so just return. This is likely just a shared + * interrupt. + */ + return (0); + } + /* + * Instead of directly reading the interrupt status register, + * infer the cause of the interrupt by checking our in-core + * completion queues. This avoids a costly PCI bus read in + * most cases. + */ + if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 + && (ahc_check_cmdcmpltqueues(ahc) != 0)) + intstat = CMDCMPLT; + else { + intstat = ahc_inb(ahc, INTSTAT); + } + + if ((intstat & INT_PEND) == 0) { +#if AHC_PCI_CONFIG > 0 + if (ahc->unsolicited_ints > 500) { + ahc->unsolicited_ints = 0; + if ((ahc->chip & AHC_PCI) != 0 + && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) + ahc->bus_intr(ahc); + } +#endif + ahc->unsolicited_ints++; + return (0); + } + ahc->unsolicited_ints = 0; + + if (intstat & CMDCMPLT) { + ahc_outb(ahc, CLRINT, CLRCMDINT); + + /* + * Ensure that the chip sees that we've cleared + * this interrupt before we walk the output fifo. + * Otherwise, we may, due to posted bus writes, + * clear the interrupt after we finish the scan, + * and after the sequencer has added new entries + * and asserted the interrupt again. + */ + ahc_flush_device_writes(ahc); + ahc_run_qoutfifo(ahc); +#ifdef AHC_TARGET_MODE + if ((ahc->flags & AHC_TARGETROLE) != 0) + ahc_run_tqinfifo(ahc, /*paused*/FALSE); +#endif + } + + /* + * Handle statuses that may invalidate our cached + * copy of INTSTAT separately. + */ + if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { + /* Hot eject. Do nothing */ + } else if (intstat & BRKADRINT) { + ahc_handle_brkadrint(ahc); + } else if ((intstat & (SEQINT|SCSIINT)) != 0) { + + ahc_pause_bug_fix(ahc); + + if ((intstat & SEQINT) != 0) + ahc_handle_seqint(ahc, intstat); + + if ((intstat & SCSIINT) != 0) + ahc_handle_scsiint(ahc, intstat); + } + return (1); +} + +/************************* Sequencer Execution Control ************************/ +/* + * Restart the sequencer program from address zero + */ +static void +ahc_restart(struct ahc_softc *ahc) +{ + uint8_t sblkctl; + + ahc_pause(ahc); + + /* No more pending messages. */ + ahc_clear_msg_state(ahc); + + ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ + ahc_outb(ahc, MSG_OUT, NOP); /* No message to send */ + ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); + ahc_outb(ahc, LASTPHASE, P_BUSFREE); + ahc_outb(ahc, SAVED_SCSIID, 0xFF); + ahc_outb(ahc, SAVED_LUN, 0xFF); + + /* + * Ensure that the sequencer's idea of TQINPOS + * matches our own. The sequencer increments TQINPOS + * only after it sees a DMA complete and a reset could + * occur before the increment leaving the kernel to believe + * the command arrived but the sequencer to not. + */ + ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); + + /* Always allow reselection */ + ahc_outb(ahc, SCSISEQ, + ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); + if ((ahc->features & AHC_CMD_CHAN) != 0) { + /* Ensure that no DMA operations are in progress */ + ahc_outb(ahc, CCSCBCNT, 0); + ahc_outb(ahc, CCSGCTL, 0); + ahc_outb(ahc, CCSCBCTL, 0); + } + /* + * If we were in the process of DMA'ing SCB data into + * an SCB, replace that SCB on the free list. This prevents + * an SCB leak. + */ + if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { + ahc_add_curscb_to_free_list(ahc); + ahc_outb(ahc, SEQ_FLAGS2, + ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); + } + + /* + * Clear any pending sequencer interrupt. It is no + * longer relevant since we're resetting the Program + * Counter. + */ + ahc_outb(ahc, CLRINT, CLRSEQINT); + + ahc_outb(ahc, MWI_RESIDUAL, 0); + ahc_outb(ahc, SEQCTL, ahc->seqctl); + ahc_outb(ahc, SEQADDR0, 0); + ahc_outb(ahc, SEQADDR1, 0); + + /* + * Take the LED out of diagnostic mode on PM resume, too + */ + sblkctl = ahc_inb(ahc, SBLKCTL); + ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); + + ahc_unpause(ahc); +} + +/************************* Input/Output Queues ********************************/ +static void +ahc_run_qoutfifo(struct ahc_softc *ahc) +{ + struct scb *scb; + u_int scb_index; + + ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); + while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { + + scb_index = ahc->qoutfifo[ahc->qoutfifonext]; + if ((ahc->qoutfifonext & 0x03) == 0x03) { + u_int modnext; + + /* + * Clear 32bits of QOUTFIFO at a time + * so that we don't clobber an incoming + * byte DMA to the array on architectures + * that only support 32bit load and store + * operations. + */ + modnext = ahc->qoutfifonext & ~0x3; + *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; + ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + ahc->shared_data_dmamap, + /*offset*/modnext, /*len*/4, + BUS_DMASYNC_PREREAD); + } + ahc->qoutfifonext++; + + scb = ahc_lookup_scb(ahc, scb_index); + if (scb == NULL) { + printk("%s: WARNING no command for scb %d " + "(cmdcmplt)\nQOUTPOS = %d\n", + ahc_name(ahc), scb_index, + (ahc->qoutfifonext - 1) & 0xFF); + continue; + } + + /* + * Save off the residual + * if there is one. + */ + ahc_update_residual(ahc, scb); + ahc_done(ahc, scb); + } +} + +static void +ahc_run_untagged_queues(struct ahc_softc *ahc) +{ + int i; + + for (i = 0; i < 16; i++) + ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); +} + +static void +ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) +{ + struct scb *scb; + + if (ahc->untagged_queue_lock != 0) + return; + + if ((scb = TAILQ_FIRST(queue)) != NULL + && (scb->flags & SCB_ACTIVE) == 0) { + scb->flags |= SCB_ACTIVE; + ahc_queue_scb(ahc, scb); + } +} + +/************************* Interrupt Handling *********************************/ +static void +ahc_handle_brkadrint(struct ahc_softc *ahc) +{ + /* + * We upset the sequencer :-( + * Lookup the error message + */ + int i; + int error; + + error = ahc_inb(ahc, ERROR); + for (i = 0; error != 1 && i < num_errors; i++) + error >>= 1; + printk("%s: brkadrint, %s at seqaddr = 0x%x\n", + ahc_name(ahc), ahc_hard_errors[i].errmesg, + ahc_inb(ahc, SEQADDR0) | + (ahc_inb(ahc, SEQADDR1) << 8)); + + ahc_dump_card_state(ahc); + + /* Tell everyone that this HBA is no longer available */ + ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, + CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, + CAM_NO_HBA); + + /* Disable all interrupt sources by resetting the controller */ + ahc_shutdown(ahc); +} + +static void +ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) +{ + struct scb *scb; + struct ahc_devinfo devinfo; + + ahc_fetch_devinfo(ahc, &devinfo); + + /* + * Clear the upper byte that holds SEQINT status + * codes and clear the SEQINT bit. We will unpause + * the sequencer, if appropriate, after servicing + * the request. + */ + ahc_outb(ahc, CLRINT, CLRSEQINT); + switch (intstat & SEQINT_MASK) { + case BAD_STATUS: + { + u_int scb_index; + struct hardware_scb *hscb; + + /* + * Set the default return value to 0 (don't + * send sense). The sense code will change + * this if needed. + */ + ahc_outb(ahc, RETURN_1, 0); + + /* + * The sequencer will notify us when a command + * has an error that would be of interest to + * the kernel. This allows us to leave the sequencer + * running in the common case of command completes + * without error. The sequencer will already have + * dma'd the SCB back up to us, so we can reference + * the in kernel copy directly. + */ + scb_index = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scb_index); + if (scb == NULL) { + ahc_print_devinfo(ahc, &devinfo); + printk("ahc_intr - referenced scb " + "not valid during seqint 0x%x scb(%d)\n", + intstat, scb_index); + ahc_dump_card_state(ahc); + panic("for safety"); + goto unpause; + } + + hscb = scb->hscb; + + /* Don't want to clobber the original sense code */ + if ((scb->flags & SCB_SENSE) != 0) { + /* + * Clear the SCB_SENSE Flag and have + * the sequencer do a normal command + * complete. + */ + scb->flags &= ~SCB_SENSE; + ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); + break; + } + ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); + /* Freeze the queue until the client sees the error. */ + ahc_freeze_devq(ahc, scb); + ahc_freeze_scb(scb); + ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); + switch (hscb->shared_data.status.scsi_status) { + case SAM_STAT_GOOD: + printk("%s: Interrupted for status of 0???\n", + ahc_name(ahc)); + break; + case SAM_STAT_COMMAND_TERMINATED: + case SAM_STAT_CHECK_CONDITION: + { + struct ahc_dma_seg *sg; + struct scsi_sense *sc; + struct ahc_initiator_tinfo *targ_info; + struct ahc_tmode_tstate *tstate; + struct ahc_transinfo *tinfo; +#ifdef AHC_DEBUG + if (ahc_debug & AHC_SHOW_SENSE) { + ahc_print_path(ahc, scb); + printk("SCB %d: requests Check Status\n", + scb->hscb->tag); + } +#endif + + if (ahc_perform_autosense(scb) == 0) + break; + + targ_info = ahc_fetch_transinfo(ahc, + devinfo.channel, + devinfo.our_scsiid, + devinfo.target, + &tstate); + tinfo = &targ_info->curr; + sg = scb->sg_list; + sc = (struct scsi_sense *)(&hscb->shared_data.cdb); + /* + * Save off the residual if there is one. + */ + ahc_update_residual(ahc, scb); +#ifdef AHC_DEBUG + if (ahc_debug & AHC_SHOW_SENSE) { + ahc_print_path(ahc, scb); + printk("Sending Sense\n"); + } +#endif + sg->addr = ahc_get_sense_bufaddr(ahc, scb); + sg->len = ahc_get_sense_bufsize(ahc, scb); + sg->len |= AHC_DMA_LAST_SEG; + + /* Fixup byte order */ + sg->addr = ahc_htole32(sg->addr); + sg->len = ahc_htole32(sg->len); + + sc->opcode = REQUEST_SENSE; + sc->byte2 = 0; + if (tinfo->protocol_version <= SCSI_REV_2 + && SCB_GET_LUN(scb) < 8) + sc->byte2 = SCB_GET_LUN(scb) << 5; + sc->unused[0] = 0; + sc->unused[1] = 0; + sc->length = sg->len; + sc->control = 0; + + /* + * We can't allow the target to disconnect. + * This will be an untagged transaction and + * having the target disconnect will make this + * transaction indestinguishable from outstanding + * tagged transactions. + */ + hscb->control = 0; + + /* + * This request sense could be because the + * the device lost power or in some other + * way has lost our transfer negotiations. + * Renegotiate if appropriate. Unit attention + * errors will be reported before any data + * phases occur. + */ + if (ahc_get_residual(scb) + == ahc_get_transfer_length(scb)) { + ahc_update_neg_request(ahc, &devinfo, + tstate, targ_info, + AHC_NEG_IF_NON_ASYNC); + } + if (tstate->auto_negotiate & devinfo.target_mask) { + hscb->control |= MK_MESSAGE; + scb->flags &= ~SCB_NEGOTIATE; + scb->flags |= SCB_AUTO_NEGOTIATE; + } + hscb->cdb_len = sizeof(*sc); + hscb->dataptr = sg->addr; + hscb->datacnt = sg->len; + hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; + hscb->sgptr = ahc_htole32(hscb->sgptr); + scb->sg_count = 1; + scb->flags |= SCB_SENSE; + ahc_qinfifo_requeue_tail(ahc, scb); + ahc_outb(ahc, RETURN_1, SEND_SENSE); + /* + * Ensure we have enough time to actually + * retrieve the sense. + */ + ahc_scb_timer_reset(scb, 5 * 1000000); + break; + } + default: + break; + } + break; + } + case NO_MATCH: + { + /* Ensure we don't leave the selection hardware on */ + ahc_outb(ahc, SCSISEQ, + ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); + + printk("%s:%c:%d: no active SCB for reconnecting " + "target - issuing BUS DEVICE RESET\n", + ahc_name(ahc), devinfo.channel, devinfo.target); + printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " + "ARG_1 == 0x%x ACCUM = 0x%x\n", + ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), + ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); + printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " + "SINDEX == 0x%x\n", + ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), + ahc_index_busy_tcl(ahc, + BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), + ahc_inb(ahc, SAVED_LUN))), + ahc_inb(ahc, SINDEX)); + printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " + "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", + ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), + ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), + ahc_inb(ahc, SCB_CONTROL)); + printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", + ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); + printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); + printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); + ahc_dump_card_state(ahc); + ahc->msgout_buf[0] = TARGET_RESET; + ahc->msgout_len = 1; + ahc->msgout_index = 0; + ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + ahc_outb(ahc, MSG_OUT, HOST_MSG); + ahc_assert_atn(ahc); + break; + } + case SEND_REJECT: + { + u_int rejbyte = ahc_inb(ahc, ACCUM); + printk("%s:%c:%d: Warning - unknown message received from " + "target (0x%x). Rejecting\n", + ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); + break; + } + case PROTO_VIOLATION: + { + ahc_handle_proto_violation(ahc); + break; + } + case IGN_WIDE_RES: + ahc_handle_ign_wide_residue(ahc, &devinfo); + break; + case PDATA_REINIT: + ahc_reinitialize_dataptrs(ahc); + break; + case BAD_PHASE: + { + u_int lastphase; + + lastphase = ahc_inb(ahc, LASTPHASE); + printk("%s:%c:%d: unknown scsi bus phase %x, " + "lastphase = 0x%x. Attempting to continue\n", + ahc_name(ahc), devinfo.channel, devinfo.target, + lastphase, ahc_inb(ahc, SCSISIGI)); + break; + } + case MISSED_BUSFREE: + { + u_int lastphase; + + lastphase = ahc_inb(ahc, LASTPHASE); + printk("%s:%c:%d: Missed busfree. " + "Lastphase = 0x%x, Curphase = 0x%x\n", + ahc_name(ahc), devinfo.channel, devinfo.target, + lastphase, ahc_inb(ahc, SCSISIGI)); + ahc_restart(ahc); + return; + } + case HOST_MSG_LOOP: + { + /* + * The sequencer has encountered a message phase + * that requires host assistance for completion. + * While handling the message phase(s), we will be + * notified by the sequencer after each byte is + * transferred so we can track bus phase changes. + * + * If this is the first time we've seen a HOST_MSG_LOOP + * interrupt, initialize the state of the host message + * loop. + */ + if (ahc->msg_type == MSG_TYPE_NONE) { + struct scb *scb; + u_int scb_index; + u_int bus_phase; + + bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; + if (bus_phase != P_MESGIN + && bus_phase != P_MESGOUT) { + printk("ahc_intr: HOST_MSG_LOOP bad " + "phase 0x%x\n", + bus_phase); + /* + * Probably transitioned to bus free before + * we got here. Just punt the message. + */ + ahc_clear_intstat(ahc); + ahc_restart(ahc); + return; + } + + scb_index = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scb_index); + if (devinfo.role == ROLE_INITIATOR) { + if (bus_phase == P_MESGOUT) { + if (scb == NULL) + panic("HOST_MSG_LOOP with " + "invalid SCB %x\n", + scb_index); + + ahc_setup_initiator_msgout(ahc, + &devinfo, + scb); + } else { + ahc->msg_type = + MSG_TYPE_INITIATOR_MSGIN; + ahc->msgin_index = 0; + } + } +#ifdef AHC_TARGET_MODE + else { + if (bus_phase == P_MESGOUT) { + ahc->msg_type = + MSG_TYPE_TARGET_MSGOUT; + ahc->msgin_index = 0; + } else + ahc_setup_target_msgin(ahc, + &devinfo, + scb); + } +#endif + } + + ahc_handle_message_phase(ahc); + break; + } + case PERR_DETECTED: + { + /* + * If we've cleared the parity error interrupt + * but the sequencer still believes that SCSIPERR + * is true, it must be that the parity error is + * for the currently presented byte on the bus, + * and we are not in a phase (data-in) where we will + * eventually ack this byte. Ack the byte and + * throw it away in the hope that the target will + * take us to message out to deliver the appropriate + * error message. + */ + if ((intstat & SCSIINT) == 0 + && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { + + if ((ahc->features & AHC_DT) == 0) { + u_int curphase; + + /* + * The hardware will only let you ack bytes + * if the expected phase in SCSISIGO matches + * the current phase. Make sure this is + * currently the case. + */ + curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; + ahc_outb(ahc, LASTPHASE, curphase); + ahc_outb(ahc, SCSISIGO, curphase); + } + if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { + int wait; + + /* + * In a data phase. Faster to bitbucket + * the data than to individually ack each + * byte. This is also the only strategy + * that will work with AUTOACK enabled. + */ + ahc_outb(ahc, SXFRCTL1, + ahc_inb(ahc, SXFRCTL1) | BITBUCKET); + wait = 5000; + while (--wait != 0) { + if ((ahc_inb(ahc, SCSISIGI) + & (CDI|MSGI)) != 0) + break; + ahc_delay(100); + } + ahc_outb(ahc, SXFRCTL1, + ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); + if (wait == 0) { + struct scb *scb; + u_int scb_index; + + ahc_print_devinfo(ahc, &devinfo); + printk("Unable to clear parity error. " + "Resetting bus.\n"); + scb_index = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scb_index); + if (scb != NULL) + ahc_set_transaction_status(scb, + CAM_UNCOR_PARITY); + ahc_reset_channel(ahc, devinfo.channel, + /*init reset*/TRUE); + } + } else { + ahc_inb(ahc, SCSIDATL); + } + } + break; + } + case DATA_OVERRUN: + { + /* + * When the sequencer detects an overrun, it + * places the controller in "BITBUCKET" mode + * and allows the target to complete its transfer. + * Unfortunately, none of the counters get updated + * when the controller is in this mode, so we have + * no way of knowing how large the overrun was. + */ + u_int scbindex = ahc_inb(ahc, SCB_TAG); + u_int lastphase = ahc_inb(ahc, LASTPHASE); + u_int i; + + scb = ahc_lookup_scb(ahc, scbindex); + for (i = 0; i < num_phases; i++) { + if (lastphase == ahc_phase_table[i].phase) + break; + } + ahc_print_path(ahc, scb); + printk("data overrun detected %s." + " Tag == 0x%x.\n", + ahc_phase_table[i].phasemsg, + scb->hscb->tag); + ahc_print_path(ahc, scb); + printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", + ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", + ahc_get_transfer_length(scb), scb->sg_count); + if (scb->sg_count > 0) { + for (i = 0; i < scb->sg_count; i++) { + + printk("sg[%d] - Addr 0x%x%x : Length %d\n", + i, + (ahc_le32toh(scb->sg_list[i].len) >> 24 + & SG_HIGH_ADDR_BITS), + ahc_le32toh(scb->sg_list[i].addr), + ahc_le32toh(scb->sg_list[i].len) + & AHC_SG_LEN_MASK); + } + } + /* + * Set this and it will take effect when the + * target does a command complete. + */ + ahc_freeze_devq(ahc, scb); + if ((scb->flags & SCB_SENSE) == 0) { + ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); + } else { + scb->flags &= ~SCB_SENSE; + ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); + } + ahc_freeze_scb(scb); + + if ((ahc->features & AHC_ULTRA2) != 0) { + /* + * Clear the channel in case we return + * to data phase later. + */ + ahc_outb(ahc, SXFRCTL0, + ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); + ahc_outb(ahc, SXFRCTL0, + ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); + } + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + u_int dscommand1; + + /* Ensure HHADDR is 0 for future DMA operations. */ + dscommand1 = ahc_inb(ahc, DSCOMMAND1); + ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); + ahc_outb(ahc, HADDR, 0); + ahc_outb(ahc, DSCOMMAND1, dscommand1); + } + break; + } + case MKMSG_FAILED: + { + u_int scbindex; + + printk("%s:%c:%d:%d: Attempt to issue message failed\n", + ahc_name(ahc), devinfo.channel, devinfo.target, + devinfo.lun); + scbindex = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scbindex); + if (scb != NULL + && (scb->flags & SCB_RECOVERY_SCB) != 0) + /* + * Ensure that we didn't put a second instance of this + * SCB into the QINFIFO. + */ + ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), + SCB_GET_CHANNEL(ahc, scb), + SCB_GET_LUN(scb), scb->hscb->tag, + ROLE_INITIATOR, /*status*/0, + SEARCH_REMOVE); + break; + } + case NO_FREE_SCB: + { + printk("%s: No free or disconnected SCBs\n", ahc_name(ahc)); + ahc_dump_card_state(ahc); + panic("for safety"); + break; + } + case SCB_MISMATCH: + { + u_int scbptr; + + scbptr = ahc_inb(ahc, SCBPTR); + printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", + scbptr, ahc_inb(ahc, ARG_1), + ahc->scb_data->hscbs[scbptr].tag); + ahc_dump_card_state(ahc); + panic("for safety"); + break; + } + case OUT_OF_RANGE: + { + printk("%s: BTT calculation out of range\n", ahc_name(ahc)); + printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " + "ARG_1 == 0x%x ACCUM = 0x%x\n", + ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), + ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); + printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " + "SINDEX == 0x%x\n, A == 0x%x\n", + ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), + ahc_index_busy_tcl(ahc, + BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), + ahc_inb(ahc, SAVED_LUN))), + ahc_inb(ahc, SINDEX), + ahc_inb(ahc, ACCUM)); + printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " + "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", + ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), + ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), + ahc_inb(ahc, SCB_CONTROL)); + printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", + ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); + ahc_dump_card_state(ahc); + panic("for safety"); + break; + } + default: + printk("ahc_intr: seqint, " + "intstat == 0x%x, scsisigi = 0x%x\n", + intstat, ahc_inb(ahc, SCSISIGI)); + break; + } +unpause: + /* + * The sequencer is paused immediately on + * a SEQINT, so we should restart it when + * we're done. + */ + ahc_unpause(ahc); +} + +static void +ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) +{ + u_int scb_index; + u_int status0; + u_int status; + struct scb *scb; + char cur_channel; + char intr_channel; + + if ((ahc->features & AHC_TWIN) != 0 + && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) + cur_channel = 'B'; + else + cur_channel = 'A'; + intr_channel = cur_channel; + + if ((ahc->features & AHC_ULTRA2) != 0) + status0 = ahc_inb(ahc, SSTAT0) & IOERR; + else + status0 = 0; + status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); + if (status == 0 && status0 == 0) { + if ((ahc->features & AHC_TWIN) != 0) { + /* Try the other channel */ + ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); + status = ahc_inb(ahc, SSTAT1) + & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); + intr_channel = (cur_channel == 'A') ? 'B' : 'A'; + } + if (status == 0) { + printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); + ahc_outb(ahc, CLRINT, CLRSCSIINT); + ahc_unpause(ahc); + return; + } + } + + /* Make sure the sequencer is in a safe location. */ + ahc_clear_critical_section(ahc); + + scb_index = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scb_index); + if (scb != NULL + && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) + scb = NULL; + + if ((ahc->features & AHC_ULTRA2) != 0 + && (status0 & IOERR) != 0) { + int now_lvd; + + now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; + printk("%s: Transceiver State Has Changed to %s mode\n", + ahc_name(ahc), now_lvd ? "LVD" : "SE"); + ahc_outb(ahc, CLRSINT0, CLRIOERR); + /* + * When transitioning to SE mode, the reset line + * glitches, triggering an arbitration bug in some + * Ultra2 controllers. This bug is cleared when we + * assert the reset line. Since a reset glitch has + * already occurred with this transition and a + * transceiver state change is handled just like + * a bus reset anyway, asserting the reset line + * ourselves is safe. + */ + ahc_reset_channel(ahc, intr_channel, + /*Initiate Reset*/now_lvd == 0); + } else if ((status & SCSIRSTI) != 0) { + printk("%s: Someone reset channel %c\n", + ahc_name(ahc), intr_channel); + if (intr_channel != cur_channel) + ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); + ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); + } else if ((status & SCSIPERR) != 0) { + /* + * Determine the bus phase and queue an appropriate message. + * SCSIPERR is latched true as soon as a parity error + * occurs. If the sequencer acked the transfer that + * caused the parity error and the currently presented + * transfer on the bus has correct parity, SCSIPERR will + * be cleared by CLRSCSIPERR. Use this to determine if + * we should look at the last phase the sequencer recorded, + * or the current phase presented on the bus. + */ + struct ahc_devinfo devinfo; + u_int mesg_out; + u_int curphase; + u_int errorphase; + u_int lastphase; + u_int scsirate; + u_int i; + u_int sstat2; + int silent; + + lastphase = ahc_inb(ahc, LASTPHASE); + curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; + sstat2 = ahc_inb(ahc, SSTAT2); + ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); + /* + * For all phases save DATA, the sequencer won't + * automatically ack a byte that has a parity error + * in it. So the only way that the current phase + * could be 'data-in' is if the parity error is for + * an already acked byte in the data phase. During + * synchronous data-in transfers, we may actually + * ack bytes before latching the current phase in + * LASTPHASE, leading to the discrepancy between + * curphase and lastphase. + */ + if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 + || curphase == P_DATAIN || curphase == P_DATAIN_DT) + errorphase = curphase; + else + errorphase = lastphase; + + for (i = 0; i < num_phases; i++) { + if (errorphase == ahc_phase_table[i].phase) + break; + } + mesg_out = ahc_phase_table[i].mesg_out; + silent = FALSE; + if (scb != NULL) { + if (SCB_IS_SILENT(scb)) + silent = TRUE; + else + ahc_print_path(ahc, scb); + scb->flags |= SCB_TRANSMISSION_ERROR; + } else + printk("%s:%c:%d: ", ahc_name(ahc), intr_channel, + SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); + scsirate = ahc_inb(ahc, SCSIRATE); + if (silent == FALSE) { + printk("parity error detected %s. " + "SEQADDR(0x%x) SCSIRATE(0x%x)\n", + ahc_phase_table[i].phasemsg, + ahc_inw(ahc, SEQADDR0), + scsirate); + if ((ahc->features & AHC_DT) != 0) { + if ((sstat2 & CRCVALERR) != 0) + printk("\tCRC Value Mismatch\n"); + if ((sstat2 & CRCENDERR) != 0) + printk("\tNo terminal CRC packet " + "received\n"); + if ((sstat2 & CRCREQERR) != 0) + printk("\tIllegal CRC packet " + "request\n"); + if ((sstat2 & DUAL_EDGE_ERR) != 0) + printk("\tUnexpected %sDT Data Phase\n", + (scsirate & SINGLE_EDGE) + ? "" : "non-"); + } + } + + if ((ahc->features & AHC_DT) != 0 + && (sstat2 & DUAL_EDGE_ERR) != 0) { + /* + * This error applies regardless of + * data direction, so ignore the value + * in the phase table. + */ + mesg_out = INITIATOR_ERROR; + } + + /* + * We've set the hardware to assert ATN if we + * get a parity error on "in" phases, so all we + * need to do is stuff the message buffer with + * the appropriate message. "In" phases have set + * mesg_out to something other than MSG_NOP. + */ + if (mesg_out != NOP) { + if (ahc->msg_type != MSG_TYPE_NONE) + ahc->send_msg_perror = TRUE; + else + ahc_outb(ahc, MSG_OUT, mesg_out); + } + /* + * Force a renegotiation with this target just in + * case we are out of sync for some external reason + * unknown (or unreported) by the target. + */ + ahc_fetch_devinfo(ahc, &devinfo); + ahc_force_renegotiation(ahc, &devinfo); + + ahc_outb(ahc, CLRINT, CLRSCSIINT); + ahc_unpause(ahc); + } else if ((status & SELTO) != 0) { + u_int scbptr; + + /* Stop the selection */ + ahc_outb(ahc, SCSISEQ, 0); + + /* No more pending messages */ + ahc_clear_msg_state(ahc); + + /* Clear interrupt state */ + ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); + ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); + + /* + * Although the driver does not care about the + * 'Selection in Progress' status bit, the busy + * LED does. SELINGO is only cleared by a successful + * selection, so we must manually clear it to insure + * the LED turns off just incase no future successful + * selections occur (e.g. no devices on the bus). + */ + ahc_outb(ahc, CLRSINT0, CLRSELINGO); + + scbptr = ahc_inb(ahc, WAITING_SCBH); + ahc_outb(ahc, SCBPTR, scbptr); + scb_index = ahc_inb(ahc, SCB_TAG); + + scb = ahc_lookup_scb(ahc, scb_index); + if (scb == NULL) { + printk("%s: ahc_intr - referenced scb not " + "valid during SELTO scb(%d, %d)\n", + ahc_name(ahc), scbptr, scb_index); + ahc_dump_card_state(ahc); + } else { + struct ahc_devinfo devinfo; +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_SELTO) != 0) { + ahc_print_path(ahc, scb); + printk("Saw Selection Timeout for SCB 0x%x\n", + scb_index); + } +#endif + ahc_scb_devinfo(ahc, &devinfo, scb); + ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); + ahc_freeze_devq(ahc, scb); + + /* + * Cancel any pending transactions on the device + * now that it seems to be missing. This will + * also revert us to async/narrow transfers until + * we can renegotiate with the device. + */ + ahc_handle_devreset(ahc, &devinfo, + CAM_SEL_TIMEOUT, + "Selection Timeout", + /*verbose_level*/1); + } + ahc_outb(ahc, CLRINT, CLRSCSIINT); + ahc_restart(ahc); + } else if ((status & BUSFREE) != 0 + && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { + struct ahc_devinfo devinfo; + u_int lastphase; + u_int saved_scsiid; + u_int saved_lun; + u_int target; + u_int initiator_role_id; + char channel; + int printerror; + + /* + * Clear our selection hardware as soon as possible. + * We may have an entry in the waiting Q for this target, + * that is affected by this busfree and we don't want to + * go about selecting the target while we handle the event. + */ + ahc_outb(ahc, SCSISEQ, + ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); + + /* + * Disable busfree interrupts and clear the busfree + * interrupt status. We do this here so that several + * bus transactions occur prior to clearing the SCSIINT + * latch. It can take a bit for the clearing to take effect. + */ + ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); + ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); + + /* + * Look at what phase we were last in. + * If its message out, chances are pretty good + * that the busfree was in response to one of + * our abort requests. + */ + lastphase = ahc_inb(ahc, LASTPHASE); + saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); + saved_lun = ahc_inb(ahc, SAVED_LUN); + target = SCSIID_TARGET(ahc, saved_scsiid); + initiator_role_id = SCSIID_OUR_ID(saved_scsiid); + channel = SCSIID_CHANNEL(ahc, saved_scsiid); + ahc_compile_devinfo(&devinfo, initiator_role_id, + target, saved_lun, channel, ROLE_INITIATOR); + printerror = 1; + + if (lastphase == P_MESGOUT) { + u_int tag; + + tag = SCB_LIST_NULL; + if (ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK, TRUE) + || ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK_SET, TRUE)) { + if (ahc->msgout_buf[ahc->msgout_index - 1] + == ABORT_TASK) + tag = scb->hscb->tag; + ahc_print_path(ahc, scb); + printk("SCB %d - Abort%s Completed.\n", + scb->hscb->tag, tag == SCB_LIST_NULL ? + "" : " Tag"); + ahc_abort_scbs(ahc, target, channel, + saved_lun, tag, + ROLE_INITIATOR, + CAM_REQ_ABORTED); + printerror = 0; + } else if (ahc_sent_msg(ahc, AHCMSG_1B, + TARGET_RESET, TRUE)) { + ahc_compile_devinfo(&devinfo, + initiator_role_id, + target, + CAM_LUN_WILDCARD, + channel, + ROLE_INITIATOR); + ahc_handle_devreset(ahc, &devinfo, + CAM_BDR_SENT, + "Bus Device Reset", + /*verbose_level*/0); + printerror = 0; + } else if (ahc_sent_msg(ahc, AHCMSG_EXT, + EXTENDED_PPR, FALSE)) { + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + + /* + * PPR Rejected. Try non-ppr negotiation + * and retry command. + */ + tinfo = ahc_fetch_transinfo(ahc, + devinfo.channel, + devinfo.our_scsiid, + devinfo.target, + &tstate); + tinfo->curr.transport_version = 2; + tinfo->goal.transport_version = 2; + tinfo->goal.ppr_options = 0; + ahc_qinfifo_requeue_tail(ahc, scb); + printerror = 0; + } else if (ahc_sent_msg(ahc, AHCMSG_EXT, + EXTENDED_WDTR, FALSE)) { + /* + * Negotiation Rejected. Go-narrow and + * retry command. + */ + ahc_set_width(ahc, &devinfo, + MSG_EXT_WDTR_BUS_8_BIT, + AHC_TRANS_CUR|AHC_TRANS_GOAL, + /*paused*/TRUE); + ahc_qinfifo_requeue_tail(ahc, scb); + printerror = 0; + } else if (ahc_sent_msg(ahc, AHCMSG_EXT, + EXTENDED_SDTR, FALSE)) { + /* + * Negotiation Rejected. Go-async and + * retry command. + */ + ahc_set_syncrate(ahc, &devinfo, + /*syncrate*/NULL, + /*period*/0, /*offset*/0, + /*ppr_options*/0, + AHC_TRANS_CUR|AHC_TRANS_GOAL, + /*paused*/TRUE); + ahc_qinfifo_requeue_tail(ahc, scb); + printerror = 0; + } + } + if (printerror != 0) { + u_int i; + + if (scb != NULL) { + u_int tag; + + if ((scb->hscb->control & TAG_ENB) != 0) + tag = scb->hscb->tag; + else + tag = SCB_LIST_NULL; + ahc_print_path(ahc, scb); + ahc_abort_scbs(ahc, target, channel, + SCB_GET_LUN(scb), tag, + ROLE_INITIATOR, + CAM_UNEXP_BUSFREE); + } else { + /* + * We had not fully identified this connection, + * so we cannot abort anything. + */ + printk("%s: ", ahc_name(ahc)); + } + for (i = 0; i < num_phases; i++) { + if (lastphase == ahc_phase_table[i].phase) + break; + } + if (lastphase != P_BUSFREE) { + /* + * Renegotiate with this device at the + * next opportunity just in case this busfree + * is due to a negotiation mismatch with the + * device. + */ + ahc_force_renegotiation(ahc, &devinfo); + } + printk("Unexpected busfree %s\n" + "SEQADDR == 0x%x\n", + ahc_phase_table[i].phasemsg, + ahc_inb(ahc, SEQADDR0) + | (ahc_inb(ahc, SEQADDR1) << 8)); + } + ahc_outb(ahc, CLRINT, CLRSCSIINT); + ahc_restart(ahc); + } else { + printk("%s: Missing case in ahc_handle_scsiint. status = %x\n", + ahc_name(ahc), status); + ahc_outb(ahc, CLRINT, CLRSCSIINT); + } +} + +/* + * Force renegotiation to occur the next time we initiate + * a command to the current device. + */ +static void +ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + struct ahc_initiator_tinfo *targ_info; + struct ahc_tmode_tstate *tstate; + + targ_info = ahc_fetch_transinfo(ahc, + devinfo->channel, + devinfo->our_scsiid, + devinfo->target, + &tstate); + ahc_update_neg_request(ahc, devinfo, tstate, + targ_info, AHC_NEG_IF_NON_ASYNC); +} + +#define AHC_MAX_STEPS 2000 +static void +ahc_clear_critical_section(struct ahc_softc *ahc) +{ + int stepping; + int steps; + u_int simode0; + u_int simode1; + + if (ahc->num_critical_sections == 0) + return; + + stepping = FALSE; + steps = 0; + simode0 = 0; + simode1 = 0; + for (;;) { + struct cs *cs; + u_int seqaddr; + u_int i; + + seqaddr = ahc_inb(ahc, SEQADDR0) + | (ahc_inb(ahc, SEQADDR1) << 8); + + /* + * Seqaddr represents the next instruction to execute, + * so we are really executing the instruction just + * before it. + */ + if (seqaddr != 0) + seqaddr -= 1; + cs = ahc->critical_sections; + for (i = 0; i < ahc->num_critical_sections; i++, cs++) { + if (cs->begin < seqaddr && cs->end >= seqaddr) + break; + } + + if (i == ahc->num_critical_sections) + break; + + if (steps > AHC_MAX_STEPS) { + printk("%s: Infinite loop in critical section\n", + ahc_name(ahc)); + ahc_dump_card_state(ahc); + panic("critical section loop"); + } + + steps++; + if (stepping == FALSE) { + + /* + * Disable all interrupt sources so that the + * sequencer will not be stuck by a pausing + * interrupt condition while we attempt to + * leave a critical section. + */ + simode0 = ahc_inb(ahc, SIMODE0); + ahc_outb(ahc, SIMODE0, 0); + simode1 = ahc_inb(ahc, SIMODE1); + if ((ahc->features & AHC_DT) != 0) + /* + * On DT class controllers, we + * use the enhanced busfree logic. + * Unfortunately we cannot re-enable + * busfree detection within the + * current connection, so we must + * leave it on while single stepping. + */ + ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); + else + ahc_outb(ahc, SIMODE1, 0); + ahc_outb(ahc, CLRINT, CLRSCSIINT); + ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); + stepping = TRUE; + } + if ((ahc->features & AHC_DT) != 0) { + ahc_outb(ahc, CLRSINT1, CLRBUSFREE); + ahc_outb(ahc, CLRINT, CLRSCSIINT); + } + ahc_outb(ahc, HCNTRL, ahc->unpause); + while (!ahc_is_paused(ahc)) + ahc_delay(200); + } + if (stepping) { + ahc_outb(ahc, SIMODE0, simode0); + ahc_outb(ahc, SIMODE1, simode1); + ahc_outb(ahc, SEQCTL, ahc->seqctl); + } +} + +/* + * Clear any pending interrupt status. + */ +static void +ahc_clear_intstat(struct ahc_softc *ahc) +{ + /* Clear any interrupt conditions this may have caused */ + ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI + |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| + CLRREQINIT); + ahc_flush_device_writes(ahc); + ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); + ahc_flush_device_writes(ahc); + ahc_outb(ahc, CLRINT, CLRSCSIINT); + ahc_flush_device_writes(ahc); +} + +/**************************** Debugging Routines ******************************/ +#ifdef AHC_DEBUG +uint32_t ahc_debug = AHC_DEBUG_OPTS; +#endif + +#if 0 /* unused */ +static void +ahc_print_scb(struct scb *scb) +{ + int i; + + struct hardware_scb *hscb = scb->hscb; + + printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", + (void *)scb, + hscb->control, + hscb->scsiid, + hscb->lun, + hscb->cdb_len); + printk("Shared Data: "); + for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) + printk("%#02x", hscb->shared_data.cdb[i]); + printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", + ahc_le32toh(hscb->dataptr), + ahc_le32toh(hscb->datacnt), + ahc_le32toh(hscb->sgptr), + hscb->tag); + if (scb->sg_count > 0) { + for (i = 0; i < scb->sg_count; i++) { + printk("sg[%d] - Addr 0x%x%x : Length %d\n", + i, + (ahc_le32toh(scb->sg_list[i].len) >> 24 + & SG_HIGH_ADDR_BITS), + ahc_le32toh(scb->sg_list[i].addr), + ahc_le32toh(scb->sg_list[i].len)); + } + } +} +#endif + +/************************* Transfer Negotiation *******************************/ +/* + * Allocate per target mode instance (ID we respond to as a target) + * transfer negotiation data structures. + */ +static struct ahc_tmode_tstate * +ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) +{ + struct ahc_tmode_tstate *master_tstate; + struct ahc_tmode_tstate *tstate; + int i; + + master_tstate = ahc->enabled_targets[ahc->our_id]; + if (channel == 'B') { + scsi_id += 8; + master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; + } + if (ahc->enabled_targets[scsi_id] != NULL + && ahc->enabled_targets[scsi_id] != master_tstate) + panic("%s: ahc_alloc_tstate - Target already allocated", + ahc_name(ahc)); + tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); + if (tstate == NULL) + return (NULL); + + /* + * If we have allocated a master tstate, copy user settings from + * the master tstate (taken from SRAM or the EEPROM) for this + * channel, but reset our current and goal settings to async/narrow + * until an initiator talks to us. + */ + if (master_tstate != NULL) { + memcpy(tstate, master_tstate, sizeof(*tstate)); + memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); + tstate->ultraenb = 0; + for (i = 0; i < AHC_NUM_TARGETS; i++) { + memset(&tstate->transinfo[i].curr, 0, + sizeof(tstate->transinfo[i].curr)); + memset(&tstate->transinfo[i].goal, 0, + sizeof(tstate->transinfo[i].goal)); + } + } else + memset(tstate, 0, sizeof(*tstate)); + ahc->enabled_targets[scsi_id] = tstate; + return (tstate); +} + +#ifdef AHC_TARGET_MODE +/* + * Free per target mode instance (ID we respond to as a target) + * transfer negotiation data structures. + */ +static void +ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) +{ + struct ahc_tmode_tstate *tstate; + + /* + * Don't clean up our "master" tstate. + * It has our default user settings. + */ + if (((channel == 'B' && scsi_id == ahc->our_id_b) + || (channel == 'A' && scsi_id == ahc->our_id)) + && force == FALSE) + return; + + if (channel == 'B') + scsi_id += 8; + tstate = ahc->enabled_targets[scsi_id]; + kfree(tstate); + ahc->enabled_targets[scsi_id] = NULL; +} +#endif + +/* + * Called when we have an active connection to a target on the bus, + * this function finds the nearest syncrate to the input period limited + * by the capabilities of the bus connectivity of and sync settings for + * the target. + */ +static const struct ahc_syncrate * +ahc_devlimited_syncrate(struct ahc_softc *ahc, + struct ahc_initiator_tinfo *tinfo, + u_int *period, u_int *ppr_options, role_t role) +{ + struct ahc_transinfo *transinfo; + u_int maxsync; + + if ((ahc->features & AHC_ULTRA2) != 0) { + if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 + && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { + maxsync = AHC_SYNCRATE_DT; + } else { + maxsync = AHC_SYNCRATE_ULTRA; + /* Can't do DT on an SE bus */ + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + } + } else if ((ahc->features & AHC_ULTRA) != 0) { + maxsync = AHC_SYNCRATE_ULTRA; + } else { + maxsync = AHC_SYNCRATE_FAST; + } + /* + * Never allow a value higher than our current goal + * period otherwise we may allow a target initiated + * negotiation to go above the limit as set by the + * user. In the case of an initiator initiated + * sync negotiation, we limit based on the user + * setting. This allows the system to still accept + * incoming negotiations even if target initiated + * negotiation is not performed. + */ + if (role == ROLE_TARGET) + transinfo = &tinfo->user; + else + transinfo = &tinfo->goal; + *ppr_options &= transinfo->ppr_options; + if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { + maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2); + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + } + if (transinfo->period == 0) { + *period = 0; + *ppr_options = 0; + return (NULL); + } + *period = max(*period, (u_int)transinfo->period); + return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); +} + +/* + * Look up the valid period to SCSIRATE conversion in our table. + * Return the period and offset that should be sent to the target + * if this was the beginning of an SDTR. + */ +const struct ahc_syncrate * +ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, + u_int *ppr_options, u_int maxsync) +{ + const struct ahc_syncrate *syncrate; + + if ((ahc->features & AHC_DT) == 0) + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + + /* Skip all DT only entries if DT is not available */ + if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 + && maxsync < AHC_SYNCRATE_ULTRA2) + maxsync = AHC_SYNCRATE_ULTRA2; + + /* Now set the maxsync based on the card capabilities + * DT is already done above */ + if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 + && maxsync < AHC_SYNCRATE_ULTRA) + maxsync = AHC_SYNCRATE_ULTRA; + if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 + && maxsync < AHC_SYNCRATE_FAST) + maxsync = AHC_SYNCRATE_FAST; + + for (syncrate = &ahc_syncrates[maxsync]; + syncrate->rate != NULL; + syncrate++) { + + /* + * The Ultra2 table doesn't go as low + * as for the Fast/Ultra cards. + */ + if ((ahc->features & AHC_ULTRA2) != 0 + && (syncrate->sxfr_u2 == 0)) + break; + + if (*period <= syncrate->period) { + /* + * When responding to a target that requests + * sync, the requested rate may fall between + * two rates that we can output, but still be + * a rate that we can receive. Because of this, + * we want to respond to the target with + * the same rate that it sent to us even + * if the period we use to send data to it + * is lower. Only lower the response period + * if we must. + */ + if (syncrate == &ahc_syncrates[maxsync]) + *period = syncrate->period; + + /* + * At some speeds, we only support + * ST transfers. + */ + if ((syncrate->sxfr_u2 & ST_SXFR) != 0) + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + break; + } + } + + if ((*period == 0) + || (syncrate->rate == NULL) + || ((ahc->features & AHC_ULTRA2) != 0 + && (syncrate->sxfr_u2 == 0))) { + /* Use asynchronous transfers. */ + *period = 0; + syncrate = NULL; + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + } + return (syncrate); +} + +/* + * Convert from an entry in our syncrate table to the SCSI equivalent + * sync "period" factor. + */ +u_int +ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) +{ + const struct ahc_syncrate *syncrate; + + if ((ahc->features & AHC_ULTRA2) != 0) + scsirate &= SXFR_ULTRA2; + else + scsirate &= SXFR; + + /* now set maxsync based on card capabilities */ + if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) + maxsync = AHC_SYNCRATE_ULTRA2; + if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 + && maxsync < AHC_SYNCRATE_ULTRA) + maxsync = AHC_SYNCRATE_ULTRA; + if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 + && maxsync < AHC_SYNCRATE_FAST) + maxsync = AHC_SYNCRATE_FAST; + + + syncrate = &ahc_syncrates[maxsync]; + while (syncrate->rate != NULL) { + + if ((ahc->features & AHC_ULTRA2) != 0) { + if (syncrate->sxfr_u2 == 0) + break; + else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) + return (syncrate->period); + } else if (scsirate == (syncrate->sxfr & SXFR)) { + return (syncrate->period); + } + syncrate++; + } + return (0); /* async */ +} + +/* + * Truncate the given synchronous offset to a value the + * current adapter type and syncrate are capable of. + */ +static void +ahc_validate_offset(struct ahc_softc *ahc, + struct ahc_initiator_tinfo *tinfo, + const struct ahc_syncrate *syncrate, + u_int *offset, int wide, role_t role) +{ + u_int maxoffset; + + /* Limit offset to what we can do */ + if (syncrate == NULL) { + maxoffset = 0; + } else if ((ahc->features & AHC_ULTRA2) != 0) { + maxoffset = MAX_OFFSET_ULTRA2; + } else { + if (wide) + maxoffset = MAX_OFFSET_16BIT; + else + maxoffset = MAX_OFFSET_8BIT; + } + *offset = min(*offset, maxoffset); + if (tinfo != NULL) { + if (role == ROLE_TARGET) + *offset = min(*offset, (u_int)tinfo->user.offset); + else + *offset = min(*offset, (u_int)tinfo->goal.offset); + } +} + +/* + * Truncate the given transfer width parameter to a value the + * current adapter type is capable of. + */ +static void +ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, + u_int *bus_width, role_t role) +{ + switch (*bus_width) { + default: + if (ahc->features & AHC_WIDE) { + /* Respond Wide */ + *bus_width = MSG_EXT_WDTR_BUS_16_BIT; + break; + } + fallthrough; + case MSG_EXT_WDTR_BUS_8_BIT: + *bus_width = MSG_EXT_WDTR_BUS_8_BIT; + break; + } + if (tinfo != NULL) { + if (role == ROLE_TARGET) + *bus_width = min((u_int)tinfo->user.width, *bus_width); + else + *bus_width = min((u_int)tinfo->goal.width, *bus_width); + } +} + +/* + * Update the bitmask of targets for which the controller should + * negotiate with at the next convenient opportunity. This currently + * means the next time we send the initial identify messages for + * a new transaction. + */ +int +ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + struct ahc_tmode_tstate *tstate, + struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) +{ + u_int auto_negotiate_orig; + + auto_negotiate_orig = tstate->auto_negotiate; + if (neg_type == AHC_NEG_ALWAYS) { + /* + * Force our "current" settings to be + * unknown so that unless a bus reset + * occurs the need to renegotiate is + * recorded persistently. + */ + if ((ahc->features & AHC_WIDE) != 0) + tinfo->curr.width = AHC_WIDTH_UNKNOWN; + tinfo->curr.period = AHC_PERIOD_UNKNOWN; + tinfo->curr.offset = AHC_OFFSET_UNKNOWN; + } + if (tinfo->curr.period != tinfo->goal.period + || tinfo->curr.width != tinfo->goal.width + || tinfo->curr.offset != tinfo->goal.offset + || tinfo->curr.ppr_options != tinfo->goal.ppr_options + || (neg_type == AHC_NEG_IF_NON_ASYNC + && (tinfo->goal.offset != 0 + || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT + || tinfo->goal.ppr_options != 0))) + tstate->auto_negotiate |= devinfo->target_mask; + else + tstate->auto_negotiate &= ~devinfo->target_mask; + + return (auto_negotiate_orig != tstate->auto_negotiate); +} + +/* + * Update the user/goal/curr tables of synchronous negotiation + * parameters as well as, in the case of a current or active update, + * any data structures on the host controller. In the case of an + * active update, the specified target is currently talking to us on + * the bus, so the transfer parameter update must take effect + * immediately. + */ +void +ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + const struct ahc_syncrate *syncrate, u_int period, + u_int offset, u_int ppr_options, u_int type, int paused) +{ + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + u_int old_period; + u_int old_offset; + u_int old_ppr; + int active; + int update_needed; + + active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; + update_needed = 0; + + if (syncrate == NULL) { + period = 0; + offset = 0; + } + + tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + if ((type & AHC_TRANS_USER) != 0) { + tinfo->user.period = period; + tinfo->user.offset = offset; + tinfo->user.ppr_options = ppr_options; + } + + if ((type & AHC_TRANS_GOAL) != 0) { + tinfo->goal.period = period; + tinfo->goal.offset = offset; + tinfo->goal.ppr_options = ppr_options; + } + + old_period = tinfo->curr.period; + old_offset = tinfo->curr.offset; + old_ppr = tinfo->curr.ppr_options; + + if ((type & AHC_TRANS_CUR) != 0 + && (old_period != period + || old_offset != offset + || old_ppr != ppr_options)) { + u_int scsirate; + + update_needed++; + scsirate = tinfo->scsirate; + if ((ahc->features & AHC_ULTRA2) != 0) { + + scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); + if (syncrate != NULL) { + scsirate |= syncrate->sxfr_u2; + if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) + scsirate |= ENABLE_CRC; + else + scsirate |= SINGLE_EDGE; + } + } else { + + scsirate &= ~(SXFR|SOFS); + /* + * Ensure Ultra mode is set properly for + * this target. + */ + tstate->ultraenb &= ~devinfo->target_mask; + if (syncrate != NULL) { + if (syncrate->sxfr & ULTRA_SXFR) { + tstate->ultraenb |= + devinfo->target_mask; + } + scsirate |= syncrate->sxfr & SXFR; + scsirate |= offset & SOFS; + } + if (active) { + u_int sxfrctl0; + + sxfrctl0 = ahc_inb(ahc, SXFRCTL0); + sxfrctl0 &= ~FAST20; + if (tstate->ultraenb & devinfo->target_mask) + sxfrctl0 |= FAST20; + ahc_outb(ahc, SXFRCTL0, sxfrctl0); + } + } + if (active) { + ahc_outb(ahc, SCSIRATE, scsirate); + if ((ahc->features & AHC_ULTRA2) != 0) + ahc_outb(ahc, SCSIOFFSET, offset); + } + + tinfo->scsirate = scsirate; + tinfo->curr.period = period; + tinfo->curr.offset = offset; + tinfo->curr.ppr_options = ppr_options; + + ahc_send_async(ahc, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_TRANSFER_NEG); + if (bootverbose) { + if (offset != 0) { + printk("%s: target %d synchronous at %sMHz%s, " + "offset = 0x%x\n", ahc_name(ahc), + devinfo->target, syncrate->rate, + (ppr_options & MSG_EXT_PPR_DT_REQ) + ? " DT" : "", offset); + } else { + printk("%s: target %d using " + "asynchronous transfers\n", + ahc_name(ahc), devinfo->target); + } + } + } + + update_needed += ahc_update_neg_request(ahc, devinfo, tstate, + tinfo, AHC_NEG_TO_GOAL); + + if (update_needed) + ahc_update_pending_scbs(ahc); +} + +/* + * Update the user/goal/curr tables of wide negotiation + * parameters as well as, in the case of a current or active update, + * any data structures on the host controller. In the case of an + * active update, the specified target is currently talking to us on + * the bus, so the transfer parameter update must take effect + * immediately. + */ +void +ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + u_int width, u_int type, int paused) +{ + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + u_int oldwidth; + int active; + int update_needed; + + active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; + update_needed = 0; + tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + if ((type & AHC_TRANS_USER) != 0) + tinfo->user.width = width; + + if ((type & AHC_TRANS_GOAL) != 0) + tinfo->goal.width = width; + + oldwidth = tinfo->curr.width; + if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { + u_int scsirate; + + update_needed++; + scsirate = tinfo->scsirate; + scsirate &= ~WIDEXFER; + if (width == MSG_EXT_WDTR_BUS_16_BIT) + scsirate |= WIDEXFER; + + tinfo->scsirate = scsirate; + + if (active) + ahc_outb(ahc, SCSIRATE, scsirate); + + tinfo->curr.width = width; + + ahc_send_async(ahc, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_TRANSFER_NEG); + if (bootverbose) { + printk("%s: target %d using %dbit transfers\n", + ahc_name(ahc), devinfo->target, + 8 * (0x01 << width)); + } + } + + update_needed += ahc_update_neg_request(ahc, devinfo, tstate, + tinfo, AHC_NEG_TO_GOAL); + if (update_needed) + ahc_update_pending_scbs(ahc); +} + +/* + * Update the current state of tagged queuing for a given target. + */ +static void +ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd, + struct ahc_devinfo *devinfo, ahc_queue_alg alg) +{ + struct scsi_device *sdev = cmd->device; + + ahc_platform_set_tags(ahc, sdev, devinfo, alg); + ahc_send_async(ahc, devinfo->channel, devinfo->target, + devinfo->lun, AC_TRANSFER_NEG); +} + +/* + * When the transfer settings for a connection change, update any + * in-transit SCBs to contain the new data so the hardware will + * be set correctly during future (re)selections. + */ +static void +ahc_update_pending_scbs(struct ahc_softc *ahc) +{ + struct scb *pending_scb; + int pending_scb_count; + int i; + int paused; + u_int saved_scbptr; + + /* + * Traverse the pending SCB list and ensure that all of the + * SCBs there have the proper settings. + */ + pending_scb_count = 0; + LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { + struct ahc_devinfo devinfo; + struct hardware_scb *pending_hscb; + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + + ahc_scb_devinfo(ahc, &devinfo, pending_scb); + tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + pending_hscb = pending_scb->hscb; + pending_hscb->control &= ~ULTRAENB; + if ((tstate->ultraenb & devinfo.target_mask) != 0) + pending_hscb->control |= ULTRAENB; + pending_hscb->scsirate = tinfo->scsirate; + pending_hscb->scsioffset = tinfo->curr.offset; + if ((tstate->auto_negotiate & devinfo.target_mask) == 0 + && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { + pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; + pending_hscb->control &= ~MK_MESSAGE; + } + ahc_sync_scb(ahc, pending_scb, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + pending_scb_count++; + } + + if (pending_scb_count == 0) + return; + + if (ahc_is_paused(ahc)) { + paused = 1; + } else { + paused = 0; + ahc_pause(ahc); + } + + saved_scbptr = ahc_inb(ahc, SCBPTR); + /* Ensure that the hscbs down on the card match the new information */ + for (i = 0; i < ahc->scb_data->maxhscbs; i++) { + struct hardware_scb *pending_hscb; + u_int control; + u_int scb_tag; + + ahc_outb(ahc, SCBPTR, i); + scb_tag = ahc_inb(ahc, SCB_TAG); + pending_scb = ahc_lookup_scb(ahc, scb_tag); + if (pending_scb == NULL) + continue; + + pending_hscb = pending_scb->hscb; + control = ahc_inb(ahc, SCB_CONTROL); + control &= ~(ULTRAENB|MK_MESSAGE); + control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); + ahc_outb(ahc, SCB_CONTROL, control); + ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); + ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); + } + ahc_outb(ahc, SCBPTR, saved_scbptr); + + if (paused == 0) + ahc_unpause(ahc); +} + +/**************************** Pathing Information *****************************/ +static void +ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + u_int saved_scsiid; + role_t role; + int our_id; + + if (ahc_inb(ahc, SSTAT0) & TARGET) + role = ROLE_TARGET; + else + role = ROLE_INITIATOR; + + if (role == ROLE_TARGET + && (ahc->features & AHC_MULTI_TID) != 0 + && (ahc_inb(ahc, SEQ_FLAGS) + & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { + /* We were selected, so pull our id from TARGIDIN */ + our_id = ahc_inb(ahc, TARGIDIN) & OID; + } else if ((ahc->features & AHC_ULTRA2) != 0) + our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; + else + our_id = ahc_inb(ahc, SCSIID) & OID; + + saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); + ahc_compile_devinfo(devinfo, + our_id, + SCSIID_TARGET(ahc, saved_scsiid), + ahc_inb(ahc, SAVED_LUN), + SCSIID_CHANNEL(ahc, saved_scsiid), + role); +} + +static const struct ahc_phase_table_entry* +ahc_lookup_phase_entry(int phase) +{ + const struct ahc_phase_table_entry *entry; + const struct ahc_phase_table_entry *last_entry; + + /* + * num_phases doesn't include the default entry which + * will be returned if the phase doesn't match. + */ + last_entry = &ahc_phase_table[num_phases]; + for (entry = ahc_phase_table; entry < last_entry; entry++) { + if (phase == entry->phase) + break; + } + return (entry); +} + +void +ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, + u_int lun, char channel, role_t role) +{ + devinfo->our_scsiid = our_id; + devinfo->target = target; + devinfo->lun = lun; + devinfo->target_offset = target; + devinfo->channel = channel; + devinfo->role = role; + if (channel == 'B') + devinfo->target_offset += 8; + devinfo->target_mask = (0x01 << devinfo->target_offset); +} + +void +ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun); +} + +static void +ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + struct scb *scb) +{ + role_t role; + int our_id; + + our_id = SCSIID_OUR_ID(scb->hscb->scsiid); + role = ROLE_INITIATOR; + if ((scb->flags & SCB_TARGET_SCB) != 0) + role = ROLE_TARGET; + ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), + SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); +} + + +/************************ Message Phase Processing ****************************/ +static void +ahc_assert_atn(struct ahc_softc *ahc) +{ + u_int scsisigo; + + scsisigo = ATNO; + if ((ahc->features & AHC_DT) == 0) + scsisigo |= ahc_inb(ahc, SCSISIGI); + ahc_outb(ahc, SCSISIGO, scsisigo); +} + +/* + * When an initiator transaction with the MK_MESSAGE flag either reconnects + * or enters the initial message out phase, we are interrupted. Fill our + * outgoing message buffer with the appropriate message and beging handing + * the message phase(s) manually. + */ +static void +ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + struct scb *scb) +{ + /* + * To facilitate adding multiple messages together, + * each routine should increment the index and len + * variables instead of setting them explicitly. + */ + ahc->msgout_index = 0; + ahc->msgout_len = 0; + + if ((scb->flags & SCB_DEVICE_RESET) == 0 + && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { + u_int identify_msg; + + identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); + if ((scb->hscb->control & DISCENB) != 0) + identify_msg |= MSG_IDENTIFY_DISCFLAG; + ahc->msgout_buf[ahc->msgout_index++] = identify_msg; + ahc->msgout_len++; + + if ((scb->hscb->control & TAG_ENB) != 0) { + ahc->msgout_buf[ahc->msgout_index++] = + scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); + ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; + ahc->msgout_len += 2; + } + } + + if (scb->flags & SCB_DEVICE_RESET) { + ahc->msgout_buf[ahc->msgout_index++] = TARGET_RESET; + ahc->msgout_len++; + ahc_print_path(ahc, scb); + printk("Bus Device Reset Message Sent\n"); + /* + * Clear our selection hardware in advance of + * the busfree. We may have an entry in the waiting + * Q for this target, and we don't want to go about + * selecting while we handle the busfree and blow it + * away. + */ + ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); + } else if ((scb->flags & SCB_ABORT) != 0) { + if ((scb->hscb->control & TAG_ENB) != 0) + ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK; + else + ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK_SET; + ahc->msgout_len++; + ahc_print_path(ahc, scb); + printk("Abort%s Message Sent\n", + (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); + /* + * Clear our selection hardware in advance of + * the busfree. We may have an entry in the waiting + * Q for this target, and we don't want to go about + * selecting while we handle the busfree and blow it + * away. + */ + ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); + } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { + ahc_build_transfer_msg(ahc, devinfo); + } else { + printk("ahc_intr: AWAITING_MSG for an SCB that " + "does not have a waiting message\n"); + printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, + devinfo->target_mask); + panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " + "SCB flags = %x", scb->hscb->tag, scb->hscb->control, + ahc_inb(ahc, MSG_OUT), scb->flags); + } + + /* + * Clear the MK_MESSAGE flag from the SCB so we aren't + * asked to send this message again. + */ + ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); + scb->hscb->control &= ~MK_MESSAGE; + ahc->msgout_index = 0; + ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; +} + +/* + * Build an appropriate transfer negotiation message for the + * currently active target. + */ +static void +ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + /* + * We need to initiate transfer negotiations. + * If our current and goal settings are identical, + * we want to renegotiate due to a check condition. + */ + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + const struct ahc_syncrate *rate; + int dowide; + int dosync; + int doppr; + u_int period; + u_int ppr_options; + u_int offset; + + tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + /* + * Filter our period based on the current connection. + * If we can't perform DT transfers on this segment (not in LVD + * mode for instance), then our decision to issue a PPR message + * may change. + */ + period = tinfo->goal.period; + offset = tinfo->goal.offset; + ppr_options = tinfo->goal.ppr_options; + /* Target initiated PPR is not allowed in the SCSI spec */ + if (devinfo->role == ROLE_TARGET) + ppr_options = 0; + rate = ahc_devlimited_syncrate(ahc, tinfo, &period, + &ppr_options, devinfo->role); + dowide = tinfo->curr.width != tinfo->goal.width; + dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; + /* + * Only use PPR if we have options that need it, even if the device + * claims to support it. There might be an expander in the way + * that doesn't. + */ + doppr = ppr_options != 0; + + if (!dowide && !dosync && !doppr) { + dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; + dosync = tinfo->goal.offset != 0; + } + + if (!dowide && !dosync && !doppr) { + /* + * Force async with a WDTR message if we have a wide bus, + * or just issue an SDTR with a 0 offset. + */ + if ((ahc->features & AHC_WIDE) != 0) + dowide = 1; + else + dosync = 1; + + if (bootverbose) { + ahc_print_devinfo(ahc, devinfo); + printk("Ensuring async\n"); + } + } + + /* Target initiated PPR is not allowed in the SCSI spec */ + if (devinfo->role == ROLE_TARGET) + doppr = 0; + + /* + * Both the PPR message and SDTR message require the + * goal syncrate to be limited to what the target device + * is capable of handling (based on whether an LVD->SE + * expander is on the bus), so combine these two cases. + * Regardless, guarantee that if we are using WDTR and SDTR + * messages that WDTR comes first. + */ + if (doppr || (dosync && !dowide)) { + + offset = tinfo->goal.offset; + ahc_validate_offset(ahc, tinfo, rate, &offset, + doppr ? tinfo->goal.width + : tinfo->curr.width, + devinfo->role); + if (doppr) { + ahc_construct_ppr(ahc, devinfo, period, offset, + tinfo->goal.width, ppr_options); + } else { + ahc_construct_sdtr(ahc, devinfo, period, offset); + } + } else { + ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); + } +} + +/* + * Build a synchronous negotiation message in our message + * buffer based on the input parameters. + */ +static void +ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + u_int period, u_int offset) +{ + if (offset == 0) + period = AHC_ASYNC_XFER_PERIOD; + ahc->msgout_index += spi_populate_sync_msg( + ahc->msgout_buf + ahc->msgout_index, period, offset); + ahc->msgout_len += 5; + if (bootverbose) { + printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", + ahc_name(ahc), devinfo->channel, devinfo->target, + devinfo->lun, period, offset); + } +} + +/* + * Build a wide negotiation message in our message + * buffer based on the input parameters. + */ +static void +ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + u_int bus_width) +{ + ahc->msgout_index += spi_populate_width_msg( + ahc->msgout_buf + ahc->msgout_index, bus_width); + ahc->msgout_len += 4; + if (bootverbose) { + printk("(%s:%c:%d:%d): Sending WDTR %x\n", + ahc_name(ahc), devinfo->channel, devinfo->target, + devinfo->lun, bus_width); + } +} + +/* + * Build a parallel protocol request message in our message + * buffer based on the input parameters. + */ +static void +ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + u_int period, u_int offset, u_int bus_width, + u_int ppr_options) +{ + if (offset == 0) + period = AHC_ASYNC_XFER_PERIOD; + ahc->msgout_index += spi_populate_ppr_msg( + ahc->msgout_buf + ahc->msgout_index, period, offset, + bus_width, ppr_options); + ahc->msgout_len += 8; + if (bootverbose) { + printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " + "offset %x, ppr_options %x\n", ahc_name(ahc), + devinfo->channel, devinfo->target, devinfo->lun, + bus_width, period, offset, ppr_options); + } +} + +/* + * Clear any active message state. + */ +static void +ahc_clear_msg_state(struct ahc_softc *ahc) +{ + ahc->msgout_len = 0; + ahc->msgin_index = 0; + ahc->msg_type = MSG_TYPE_NONE; + if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { + /* + * The target didn't care to respond to our + * message request, so clear ATN. + */ + ahc_outb(ahc, CLRSINT1, CLRATNO); + } + ahc_outb(ahc, MSG_OUT, NOP); + ahc_outb(ahc, SEQ_FLAGS2, + ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); +} + +static void +ahc_handle_proto_violation(struct ahc_softc *ahc) +{ + struct ahc_devinfo devinfo; + struct scb *scb; + u_int scbid; + u_int seq_flags; + u_int curphase; + u_int lastphase; + int found; + + ahc_fetch_devinfo(ahc, &devinfo); + scbid = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scbid); + seq_flags = ahc_inb(ahc, SEQ_FLAGS); + curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; + lastphase = ahc_inb(ahc, LASTPHASE); + if ((seq_flags & NOT_IDENTIFIED) != 0) { + + /* + * The reconnecting target either did not send an + * identify message, or did, but we didn't find an SCB + * to match. + */ + ahc_print_devinfo(ahc, &devinfo); + printk("Target did not send an IDENTIFY message. " + "LASTPHASE = 0x%x.\n", lastphase); + scb = NULL; + } else if (scb == NULL) { + /* + * We don't seem to have an SCB active for this + * transaction. Print an error and reset the bus. + */ + ahc_print_devinfo(ahc, &devinfo); + printk("No SCB found during protocol violation\n"); + goto proto_violation_reset; + } else { + ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); + if ((seq_flags & NO_CDB_SENT) != 0) { + ahc_print_path(ahc, scb); + printk("No or incomplete CDB sent to device.\n"); + } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { + /* + * The target never bothered to provide status to + * us prior to completing the command. Since we don't + * know the disposition of this command, we must attempt + * to abort it. Assert ATN and prepare to send an abort + * message. + */ + ahc_print_path(ahc, scb); + printk("Completed command without status.\n"); + } else { + ahc_print_path(ahc, scb); + printk("Unknown protocol violation.\n"); + ahc_dump_card_state(ahc); + } + } + if ((lastphase & ~P_DATAIN_DT) == 0 + || lastphase == P_COMMAND) { +proto_violation_reset: + /* + * Target either went directly to data/command + * phase or didn't respond to our ATN. + * The only safe thing to do is to blow + * it away with a bus reset. + */ + found = ahc_reset_channel(ahc, 'A', TRUE); + printk("%s: Issued Channel %c Bus Reset. " + "%d SCBs aborted\n", ahc_name(ahc), 'A', found); + } else { + /* + * Leave the selection hardware off in case + * this abort attempt will affect yet to + * be sent commands. + */ + ahc_outb(ahc, SCSISEQ, + ahc_inb(ahc, SCSISEQ) & ~ENSELO); + ahc_assert_atn(ahc); + ahc_outb(ahc, MSG_OUT, HOST_MSG); + if (scb == NULL) { + ahc_print_devinfo(ahc, &devinfo); + ahc->msgout_buf[0] = ABORT_TASK; + ahc->msgout_len = 1; + ahc->msgout_index = 0; + ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + } else { + ahc_print_path(ahc, scb); + scb->flags |= SCB_ABORT; + } + printk("Protocol violation %s. Attempting to abort.\n", + ahc_lookup_phase_entry(curphase)->phasemsg); + } +} + +/* + * Manual message loop handler. + */ +static void +ahc_handle_message_phase(struct ahc_softc *ahc) +{ + struct ahc_devinfo devinfo; + u_int bus_phase; + int end_session; + + ahc_fetch_devinfo(ahc, &devinfo); + end_session = FALSE; + bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; + +reswitch: + switch (ahc->msg_type) { + case MSG_TYPE_INITIATOR_MSGOUT: + { + int lastbyte; + int phasemis; + int msgdone; + + if (ahc->msgout_len == 0) + panic("HOST_MSG_LOOP interrupt with no active message"); + +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { + ahc_print_devinfo(ahc, &devinfo); + printk("INITIATOR_MSG_OUT"); + } +#endif + phasemis = bus_phase != P_MESGOUT; + if (phasemis) { +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { + printk(" PHASEMIS %s\n", + ahc_lookup_phase_entry(bus_phase) + ->phasemsg); + } +#endif + if (bus_phase == P_MESGIN) { + /* + * Change gears and see if + * this messages is of interest to + * us or should be passed back to + * the sequencer. + */ + ahc_outb(ahc, CLRSINT1, CLRATNO); + ahc->send_msg_perror = FALSE; + ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; + ahc->msgin_index = 0; + goto reswitch; + } + end_session = TRUE; + break; + } + + if (ahc->send_msg_perror) { + ahc_outb(ahc, CLRSINT1, CLRATNO); + ahc_outb(ahc, CLRSINT1, CLRREQINIT); +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) + printk(" byte 0x%x\n", ahc->send_msg_perror); +#endif + ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); + break; + } + + msgdone = ahc->msgout_index == ahc->msgout_len; + if (msgdone) { + /* + * The target has requested a retry. + * Re-assert ATN, reset our message index to + * 0, and try again. + */ + ahc->msgout_index = 0; + ahc_assert_atn(ahc); + } + + lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); + if (lastbyte) { + /* Last byte is signified by dropping ATN */ + ahc_outb(ahc, CLRSINT1, CLRATNO); + } + + /* + * Clear our interrupt status and present + * the next byte on the bus. + */ + ahc_outb(ahc, CLRSINT1, CLRREQINIT); +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) + printk(" byte 0x%x\n", + ahc->msgout_buf[ahc->msgout_index]); +#endif + ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); + break; + } + case MSG_TYPE_INITIATOR_MSGIN: + { + int phasemis; + int message_done; + +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { + ahc_print_devinfo(ahc, &devinfo); + printk("INITIATOR_MSG_IN"); + } +#endif + phasemis = bus_phase != P_MESGIN; + if (phasemis) { +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { + printk(" PHASEMIS %s\n", + ahc_lookup_phase_entry(bus_phase) + ->phasemsg); + } +#endif + ahc->msgin_index = 0; + if (bus_phase == P_MESGOUT + && (ahc->send_msg_perror == TRUE + || (ahc->msgout_len != 0 + && ahc->msgout_index == 0))) { + ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + goto reswitch; + } + end_session = TRUE; + break; + } + + /* Pull the byte in without acking it */ + ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) + printk(" byte 0x%x\n", + ahc->msgin_buf[ahc->msgin_index]); +#endif + + message_done = ahc_parse_msg(ahc, &devinfo); + + if (message_done) { + /* + * Clear our incoming message buffer in case there + * is another message following this one. + */ + ahc->msgin_index = 0; + + /* + * If this message illicited a response, + * assert ATN so the target takes us to the + * message out phase. + */ + if (ahc->msgout_len != 0) { +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { + ahc_print_devinfo(ahc, &devinfo); + printk("Asserting ATN for response\n"); + } +#endif + ahc_assert_atn(ahc); + } + } else + ahc->msgin_index++; + + if (message_done == MSGLOOP_TERMINATED) { + end_session = TRUE; + } else { + /* Ack the byte */ + ahc_outb(ahc, CLRSINT1, CLRREQINIT); + ahc_inb(ahc, SCSIDATL); + } + break; + } + case MSG_TYPE_TARGET_MSGIN: + { + int msgdone; + int msgout_request; + + if (ahc->msgout_len == 0) + panic("Target MSGIN with no active message"); + + /* + * If we interrupted a mesgout session, the initiator + * will not know this until our first REQ. So, we + * only honor mesgout requests after we've sent our + * first byte. + */ + if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 + && ahc->msgout_index > 0) + msgout_request = TRUE; + else + msgout_request = FALSE; + + if (msgout_request) { + + /* + * Change gears and see if + * this messages is of interest to + * us or should be passed back to + * the sequencer. + */ + ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; + ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); + ahc->msgin_index = 0; + /* Dummy read to REQ for first byte */ + ahc_inb(ahc, SCSIDATL); + ahc_outb(ahc, SXFRCTL0, + ahc_inb(ahc, SXFRCTL0) | SPIOEN); + break; + } + + msgdone = ahc->msgout_index == ahc->msgout_len; + if (msgdone) { + ahc_outb(ahc, SXFRCTL0, + ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); + end_session = TRUE; + break; + } + + /* + * Present the next byte on the bus. + */ + ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); + ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); + break; + } + case MSG_TYPE_TARGET_MSGOUT: + { + int lastbyte; + int msgdone; + + /* + * The initiator signals that this is + * the last byte by dropping ATN. + */ + lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; + + /* + * Read the latched byte, but turn off SPIOEN first + * so that we don't inadvertently cause a REQ for the + * next byte. + */ + ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); + ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); + msgdone = ahc_parse_msg(ahc, &devinfo); + if (msgdone == MSGLOOP_TERMINATED) { + /* + * The message is *really* done in that it caused + * us to go to bus free. The sequencer has already + * been reset at this point, so pull the ejection + * handle. + */ + return; + } + + ahc->msgin_index++; + + /* + * XXX Read spec about initiator dropping ATN too soon + * and use msgdone to detect it. + */ + if (msgdone == MSGLOOP_MSGCOMPLETE) { + ahc->msgin_index = 0; + + /* + * If this message illicited a response, transition + * to the Message in phase and send it. + */ + if (ahc->msgout_len != 0) { + ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); + ahc_outb(ahc, SXFRCTL0, + ahc_inb(ahc, SXFRCTL0) | SPIOEN); + ahc->msg_type = MSG_TYPE_TARGET_MSGIN; + ahc->msgin_index = 0; + break; + } + } + + if (lastbyte) + end_session = TRUE; + else { + /* Ask for the next byte. */ + ahc_outb(ahc, SXFRCTL0, + ahc_inb(ahc, SXFRCTL0) | SPIOEN); + } + + break; + } + default: + panic("Unknown REQINIT message type"); + } + + if (end_session) { + ahc_clear_msg_state(ahc); + ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); + } else + ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); +} + +/* + * See if we sent a particular extended message to the target. + * If "full" is true, return true only if the target saw the full + * message. If "full" is false, return true if the target saw at + * least the first byte of the message. + */ +static int +ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) +{ + int found; + u_int index; + + found = FALSE; + index = 0; + + while (index < ahc->msgout_len) { + if (ahc->msgout_buf[index] == EXTENDED_MESSAGE) { + u_int end_index; + + end_index = index + 1 + ahc->msgout_buf[index + 1]; + if (ahc->msgout_buf[index+2] == msgval + && type == AHCMSG_EXT) { + + if (full) { + if (ahc->msgout_index > end_index) + found = TRUE; + } else if (ahc->msgout_index > index) + found = TRUE; + } + index = end_index; + } else if (ahc->msgout_buf[index] >= SIMPLE_QUEUE_TAG + && ahc->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) { + + /* Skip tag type and tag id or residue param*/ + index += 2; + } else { + /* Single byte message */ + if (type == AHCMSG_1B + && ahc->msgout_buf[index] == msgval + && ahc->msgout_index > index) + found = TRUE; + index++; + } + + if (found) + break; + } + return (found); +} + +/* + * Wait for a complete incoming message, parse it, and respond accordingly. + */ +static int +ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + int reject; + int done; + int response; + u_int targ_scsirate; + + done = MSGLOOP_IN_PROG; + response = FALSE; + reject = FALSE; + tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + targ_scsirate = tinfo->scsirate; + + /* + * Parse as much of the message as is available, + * rejecting it if we don't support it. When + * the entire message is available and has been + * handled, return MSGLOOP_MSGCOMPLETE, indicating + * that we have parsed an entire message. + * + * In the case of extended messages, we accept the length + * byte outright and perform more checking once we know the + * extended message type. + */ + switch (ahc->msgin_buf[0]) { + case DISCONNECT: + case SAVE_POINTERS: + case COMMAND_COMPLETE: + case RESTORE_POINTERS: + case IGNORE_WIDE_RESIDUE: + /* + * End our message loop as these are messages + * the sequencer handles on its own. + */ + done = MSGLOOP_TERMINATED; + break; + case MESSAGE_REJECT: + response = ahc_handle_msg_reject(ahc, devinfo); + fallthrough; + case NOP: + done = MSGLOOP_MSGCOMPLETE; + break; + case EXTENDED_MESSAGE: + { + /* Wait for enough of the message to begin validation */ + if (ahc->msgin_index < 2) + break; + switch (ahc->msgin_buf[2]) { + case EXTENDED_SDTR: + { + const struct ahc_syncrate *syncrate; + u_int period; + u_int ppr_options; + u_int offset; + u_int saved_offset; + + if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have both args before validating + * and acting on this message. + * + * Add one to MSG_EXT_SDTR_LEN to account for + * the extended message preamble. + */ + if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) + break; + + period = ahc->msgin_buf[3]; + ppr_options = 0; + saved_offset = offset = ahc->msgin_buf[4]; + syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, + &ppr_options, + devinfo->role); + ahc_validate_offset(ahc, tinfo, syncrate, &offset, + targ_scsirate & WIDEXFER, + devinfo->role); + if (bootverbose) { + printk("(%s:%c:%d:%d): Received " + "SDTR period %x, offset %x\n\t" + "Filtered to period %x, offset %x\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun, + ahc->msgin_buf[3], saved_offset, + period, offset); + } + ahc_set_syncrate(ahc, devinfo, + syncrate, period, + offset, ppr_options, + AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, + /*paused*/TRUE); + + /* + * See if we initiated Sync Negotiation + * and didn't have to fall down to async + * transfers. + */ + if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, TRUE)) { + /* We started it */ + if (saved_offset != offset) { + /* Went too low - force async */ + reject = TRUE; + } + } else { + /* + * Send our own SDTR in reply + */ + if (bootverbose + && devinfo->role == ROLE_INITIATOR) { + printk("(%s:%c:%d:%d): Target " + "Initiated SDTR\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun); + } + ahc->msgout_index = 0; + ahc->msgout_len = 0; + ahc_construct_sdtr(ahc, devinfo, + period, offset); + ahc->msgout_index = 0; + response = TRUE; + } + done = MSGLOOP_MSGCOMPLETE; + break; + } + case EXTENDED_WDTR: + { + u_int bus_width; + u_int saved_width; + u_int sending_reply; + + sending_reply = FALSE; + if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have our arg before validating + * and acting on this message. + * + * Add one to MSG_EXT_WDTR_LEN to account for + * the extended message preamble. + */ + if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) + break; + + bus_width = ahc->msgin_buf[3]; + saved_width = bus_width; + ahc_validate_width(ahc, tinfo, &bus_width, + devinfo->role); + if (bootverbose) { + printk("(%s:%c:%d:%d): Received WDTR " + "%x filtered to %x\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun, + saved_width, bus_width); + } + + if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, TRUE)) { + /* + * Don't send a WDTR back to the + * target, since we asked first. + * If the width went higher than our + * request, reject it. + */ + if (saved_width > bus_width) { + reject = TRUE; + printk("(%s:%c:%d:%d): requested %dBit " + "transfers. Rejecting...\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun, + 8 * (0x01 << bus_width)); + bus_width = 0; + } + } else { + /* + * Send our own WDTR in reply + */ + if (bootverbose + && devinfo->role == ROLE_INITIATOR) { + printk("(%s:%c:%d:%d): Target " + "Initiated WDTR\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun); + } + ahc->msgout_index = 0; + ahc->msgout_len = 0; + ahc_construct_wdtr(ahc, devinfo, bus_width); + ahc->msgout_index = 0; + response = TRUE; + sending_reply = TRUE; + } + /* + * After a wide message, we are async, but + * some devices don't seem to honor this portion + * of the spec. Force a renegotiation of the + * sync component of our transfer agreement even + * if our goal is async. By updating our width + * after forcing the negotiation, we avoid + * renegotiating for width. + */ + ahc_update_neg_request(ahc, devinfo, tstate, + tinfo, AHC_NEG_ALWAYS); + ahc_set_width(ahc, devinfo, bus_width, + AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, + /*paused*/TRUE); + if (sending_reply == FALSE && reject == FALSE) { + + /* + * We will always have an SDTR to send. + */ + ahc->msgout_index = 0; + ahc->msgout_len = 0; + ahc_build_transfer_msg(ahc, devinfo); + ahc->msgout_index = 0; + response = TRUE; + } + done = MSGLOOP_MSGCOMPLETE; + break; + } + case EXTENDED_PPR: + { + const struct ahc_syncrate *syncrate; + u_int period; + u_int offset; + u_int bus_width; + u_int ppr_options; + u_int saved_width; + u_int saved_offset; + u_int saved_ppr_options; + + if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have all args before validating + * and acting on this message. + * + * Add one to MSG_EXT_PPR_LEN to account for + * the extended message preamble. + */ + if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) + break; + + period = ahc->msgin_buf[3]; + offset = ahc->msgin_buf[5]; + bus_width = ahc->msgin_buf[6]; + saved_width = bus_width; + ppr_options = ahc->msgin_buf[7]; + /* + * According to the spec, a DT only + * period factor with no DT option + * set implies async. + */ + if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 + && period == 9) + offset = 0; + saved_ppr_options = ppr_options; + saved_offset = offset; + + /* + * Mask out any options we don't support + * on any controller. Transfer options are + * only available if we are negotiating wide. + */ + ppr_options &= MSG_EXT_PPR_DT_REQ; + if (bus_width == 0) + ppr_options = 0; + + ahc_validate_width(ahc, tinfo, &bus_width, + devinfo->role); + syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, + &ppr_options, + devinfo->role); + ahc_validate_offset(ahc, tinfo, syncrate, + &offset, bus_width, + devinfo->role); + + if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, TRUE)) { + /* + * If we are unable to do any of the + * requested options (we went too low), + * then we'll have to reject the message. + */ + if (saved_width > bus_width + || saved_offset != offset + || saved_ppr_options != ppr_options) { + reject = TRUE; + period = 0; + offset = 0; + bus_width = 0; + ppr_options = 0; + syncrate = NULL; + } + } else { + if (devinfo->role != ROLE_TARGET) + printk("(%s:%c:%d:%d): Target " + "Initiated PPR\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun); + else + printk("(%s:%c:%d:%d): Initiator " + "Initiated PPR\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun); + ahc->msgout_index = 0; + ahc->msgout_len = 0; + ahc_construct_ppr(ahc, devinfo, period, offset, + bus_width, ppr_options); + ahc->msgout_index = 0; + response = TRUE; + } + if (bootverbose) { + printk("(%s:%c:%d:%d): Received PPR width %x, " + "period %x, offset %x,options %x\n" + "\tFiltered to width %x, period %x, " + "offset %x, options %x\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun, + saved_width, ahc->msgin_buf[3], + saved_offset, saved_ppr_options, + bus_width, period, offset, ppr_options); + } + ahc_set_width(ahc, devinfo, bus_width, + AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, + /*paused*/TRUE); + ahc_set_syncrate(ahc, devinfo, + syncrate, period, + offset, ppr_options, + AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, + /*paused*/TRUE); + done = MSGLOOP_MSGCOMPLETE; + break; + } + default: + /* Unknown extended message. Reject it. */ + reject = TRUE; + break; + } + break; + } +#ifdef AHC_TARGET_MODE + case TARGET_RESET: + ahc_handle_devreset(ahc, devinfo, + CAM_BDR_SENT, + "Bus Device Reset Received", + /*verbose_level*/0); + ahc_restart(ahc); + done = MSGLOOP_TERMINATED; + break; + case ABORT_TASK: + case ABORT_TASK_SET: + case CLEAR_QUEUE_TASK_SET: + { + int tag; + + /* Target mode messages */ + if (devinfo->role != ROLE_TARGET) { + reject = TRUE; + break; + } + tag = SCB_LIST_NULL; + if (ahc->msgin_buf[0] == ABORT_TASK) + tag = ahc_inb(ahc, INITIATOR_TAG); + ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, + devinfo->lun, tag, ROLE_TARGET, + CAM_REQ_ABORTED); + + tstate = ahc->enabled_targets[devinfo->our_scsiid]; + if (tstate != NULL) { + struct ahc_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[devinfo->lun]; + if (lstate != NULL) { + ahc_queue_lstate_event(ahc, lstate, + devinfo->our_scsiid, + ahc->msgin_buf[0], + /*arg*/tag); + ahc_send_lstate_events(ahc, lstate); + } + } + ahc_restart(ahc); + done = MSGLOOP_TERMINATED; + break; + } +#endif + case TERMINATE_IO_PROC: + default: + reject = TRUE; + break; + } + + if (reject) { + /* + * Setup to reject the message. + */ + ahc->msgout_index = 0; + ahc->msgout_len = 1; + ahc->msgout_buf[0] = MESSAGE_REJECT; + done = MSGLOOP_MSGCOMPLETE; + response = TRUE; + } + + if (done != MSGLOOP_IN_PROG && !response) + /* Clear the outgoing message buffer */ + ahc->msgout_len = 0; + + return (done); +} + +/* + * Process a message reject message. + */ +static int +ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + /* + * What we care about here is if we had an + * outstanding SDTR or WDTR message for this + * target. If we did, this is a signal that + * the target is refusing negotiation. + */ + struct scb *scb; + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + u_int scb_index; + u_int last_msg; + int response = 0; + + scb_index = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scb_index); + tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, + devinfo->our_scsiid, + devinfo->target, &tstate); + /* Might be necessary */ + last_msg = ahc_inb(ahc, LAST_MSG); + + if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) { + /* + * Target does not support the PPR message. + * Attempt to negotiate SPI-2 style. + */ + if (bootverbose) { + printk("(%s:%c:%d:%d): PPR Rejected. " + "Trying WDTR/SDTR\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun); + } + tinfo->goal.ppr_options = 0; + tinfo->curr.transport_version = 2; + tinfo->goal.transport_version = 2; + ahc->msgout_index = 0; + ahc->msgout_len = 0; + ahc_build_transfer_msg(ahc, devinfo); + ahc->msgout_index = 0; + response = 1; + } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) { + + /* note 8bit xfers */ + printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " + "8bit transfers\n", ahc_name(ahc), + devinfo->channel, devinfo->target, devinfo->lun); + ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, + /*paused*/TRUE); + /* + * No need to clear the sync rate. If the target + * did not accept the command, our syncrate is + * unaffected. If the target started the negotiation, + * but rejected our response, we already cleared the + * sync rate before sending our WDTR. + */ + if (tinfo->goal.offset != tinfo->curr.offset) { + + /* Start the sync negotiation */ + ahc->msgout_index = 0; + ahc->msgout_len = 0; + ahc_build_transfer_msg(ahc, devinfo); + ahc->msgout_index = 0; + response = 1; + } + } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) { + /* note asynch xfers and clear flag */ + ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, + /*paused*/TRUE); + printk("(%s:%c:%d:%d): refuses synchronous negotiation. " + "Using asynchronous transfers\n", + ahc_name(ahc), devinfo->channel, + devinfo->target, devinfo->lun); + } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) { + int tag_type; + int mask; + + tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG); + + if (tag_type == SIMPLE_QUEUE_TAG) { + printk("(%s:%c:%d:%d): refuses tagged commands. " + "Performing non-tagged I/O\n", ahc_name(ahc), + devinfo->channel, devinfo->target, devinfo->lun); + ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE); + mask = ~0x23; + } else { + printk("(%s:%c:%d:%d): refuses %s tagged commands. " + "Performing simple queue tagged I/O only\n", + ahc_name(ahc), devinfo->channel, devinfo->target, + devinfo->lun, tag_type == ORDERED_QUEUE_TAG + ? "ordered" : "head of queue"); + ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC); + mask = ~0x03; + } + + /* + * Resend the identify for this CCB as the target + * may believe that the selection is invalid otherwise. + */ + ahc_outb(ahc, SCB_CONTROL, + ahc_inb(ahc, SCB_CONTROL) & mask); + scb->hscb->control &= mask; + ahc_set_transaction_tag(scb, /*enabled*/FALSE, + /*type*/SIMPLE_QUEUE_TAG); + ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); + ahc_assert_atn(ahc); + + /* + * This transaction is now at the head of + * the untagged queue for this target. + */ + if ((ahc->flags & AHC_SCB_BTT) == 0) { + struct scb_tailq *untagged_q; + + untagged_q = + &(ahc->untagged_queues[devinfo->target_offset]); + TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); + scb->flags |= SCB_UNTAGGEDQ; + } + ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), + scb->hscb->tag); + + /* + * Requeue all tagged commands for this target + * currently in our possession so they can be + * converted to untagged commands. + */ + ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), + SCB_GET_CHANNEL(ahc, scb), + SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, + ROLE_INITIATOR, CAM_REQUEUE_REQ, + SEARCH_COMPLETE); + } else { + /* + * Otherwise, we ignore it. + */ + printk("%s:%c:%d: Message reject for %x -- ignored\n", + ahc_name(ahc), devinfo->channel, devinfo->target, + last_msg); + } + return (response); +} + +/* + * Process an ingnore wide residue message. + */ +static void +ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + u_int scb_index; + struct scb *scb; + + scb_index = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scb_index); + /* + * XXX Actually check data direction in the sequencer? + * Perhaps add datadir to some spare bits in the hscb? + */ + if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 + || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { + /* + * Ignore the message if we haven't + * seen an appropriate data phase yet. + */ + } else { + /* + * If the residual occurred on the last + * transfer and the transfer request was + * expected to end on an odd count, do + * nothing. Otherwise, subtract a byte + * and update the residual count accordingly. + */ + uint32_t sgptr; + + sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); + if ((sgptr & SG_LIST_NULL) != 0 + && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { + /* + * If the residual occurred on the last + * transfer and the transfer request was + * expected to end on an odd count, do + * nothing. + */ + } else { + struct ahc_dma_seg *sg; + uint32_t data_cnt; + uint32_t data_addr; + uint32_t sglen; + + /* Pull in all of the sgptr */ + sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); + data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); + + if ((sgptr & SG_LIST_NULL) != 0) { + /* + * The residual data count is not updated + * for the command run to completion case. + * Explicitly zero the count. + */ + data_cnt &= ~AHC_SG_LEN_MASK; + } + + data_addr = ahc_inl(ahc, SHADDR); + + data_cnt += 1; + data_addr -= 1; + sgptr &= SG_PTR_MASK; + + sg = ahc_sg_bus_to_virt(scb, sgptr); + + /* + * The residual sg ptr points to the next S/G + * to load so we must go back one. + */ + sg--; + sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; + if (sg != scb->sg_list + && sglen < (data_cnt & AHC_SG_LEN_MASK)) { + + sg--; + sglen = ahc_le32toh(sg->len); + /* + * Preserve High Address and SG_LIST bits + * while setting the count to 1. + */ + data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); + data_addr = ahc_le32toh(sg->addr) + + (sglen & AHC_SG_LEN_MASK) - 1; + + /* + * Increment sg so it points to the + * "next" sg. + */ + sg++; + sgptr = ahc_sg_virt_to_bus(scb, sg); + } + ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); + ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); + /* + * Toggle the "oddness" of the transfer length + * to handle this mid-transfer ignore wide + * residue. This ensures that the oddness is + * correct for subsequent data transfers. + */ + ahc_outb(ahc, SCB_LUN, + ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); + } + } +} + + +/* + * Reinitialize the data pointers for the active transfer + * based on its current residual. + */ +static void +ahc_reinitialize_dataptrs(struct ahc_softc *ahc) +{ + struct scb *scb; + struct ahc_dma_seg *sg; + u_int scb_index; + uint32_t sgptr; + uint32_t resid; + uint32_t dataptr; + + scb_index = ahc_inb(ahc, SCB_TAG); + scb = ahc_lookup_scb(ahc, scb_index); + sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) + | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) + | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) + | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); + + sgptr &= SG_PTR_MASK; + sg = ahc_sg_bus_to_virt(scb, sgptr); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) + | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) + | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); + + dataptr = ahc_le32toh(sg->addr) + + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) + - resid; + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + u_int dscommand1; + + dscommand1 = ahc_inb(ahc, DSCOMMAND1); + ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); + ahc_outb(ahc, HADDR, + (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); + ahc_outb(ahc, DSCOMMAND1, dscommand1); + } + ahc_outb(ahc, HADDR + 3, dataptr >> 24); + ahc_outb(ahc, HADDR + 2, dataptr >> 16); + ahc_outb(ahc, HADDR + 1, dataptr >> 8); + ahc_outb(ahc, HADDR, dataptr); + ahc_outb(ahc, HCNT + 2, resid >> 16); + ahc_outb(ahc, HCNT + 1, resid >> 8); + ahc_outb(ahc, HCNT, resid); + if ((ahc->features & AHC_ULTRA2) == 0) { + ahc_outb(ahc, STCNT + 2, resid >> 16); + ahc_outb(ahc, STCNT + 1, resid >> 8); + ahc_outb(ahc, STCNT, resid); + } +} + +/* + * Handle the effects of issuing a bus device reset message. + */ +static void +ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + cam_status status, char *message, int verbose_level) +{ +#ifdef AHC_TARGET_MODE + struct ahc_tmode_tstate* tstate; + u_int lun; +#endif + int found; + + found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, + CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, + status); + +#ifdef AHC_TARGET_MODE + /* + * Send an immediate notify ccb to all target mord peripheral + * drivers affected by this action. + */ + tstate = ahc->enabled_targets[devinfo->our_scsiid]; + if (tstate != NULL) { + for (lun = 0; lun < AHC_NUM_LUNS; lun++) { + struct ahc_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[lun]; + if (lstate == NULL) + continue; + + ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, + TARGET_RESET, /*arg*/0); + ahc_send_lstate_events(ahc, lstate); + } + } +#endif + + /* + * Go back to async/narrow transfers and renegotiate. + */ + ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHC_TRANS_CUR, /*paused*/TRUE); + ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, + /*period*/0, /*offset*/0, /*ppr_options*/0, + AHC_TRANS_CUR, /*paused*/TRUE); + + if (status != CAM_SEL_TIMEOUT) + ahc_send_async(ahc, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_SENT_BDR); + + if (message != NULL + && (verbose_level <= bootverbose)) + printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), + message, devinfo->channel, devinfo->target, found); +} + +#ifdef AHC_TARGET_MODE +static void +ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, + struct scb *scb) +{ + + /* + * To facilitate adding multiple messages together, + * each routine should increment the index and len + * variables instead of setting them explicitly. + */ + ahc->msgout_index = 0; + ahc->msgout_len = 0; + + if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) + ahc_build_transfer_msg(ahc, devinfo); + else + panic("ahc_intr: AWAITING target message with no message"); + + ahc->msgout_index = 0; + ahc->msg_type = MSG_TYPE_TARGET_MSGIN; +} +#endif +/**************************** Initialization **********************************/ +/* + * Allocate a controller structure for a new device + * and perform initial initializion. + */ +struct ahc_softc * +ahc_alloc(void *platform_arg, char *name) +{ + struct ahc_softc *ahc; + int i; + + ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC); + if (!ahc) { + printk("aic7xxx: cannot malloc softc!\n"); + kfree(name); + return NULL; + } + + ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); + if (ahc->seep_config == NULL) { + kfree(ahc); + kfree(name); + return (NULL); + } + LIST_INIT(&ahc->pending_scbs); + /* We don't know our unit number until the OSM sets it */ + ahc->name = name; + ahc->unit = -1; + ahc->description = NULL; + ahc->channel = 'A'; + ahc->channel_b = 'B'; + ahc->chip = AHC_NONE; + ahc->features = AHC_FENONE; + ahc->bugs = AHC_BUGNONE; + ahc->flags = AHC_FNONE; + /* + * Default to all error reporting enabled with the + * sequencer operating at its fastest speed. + * The bus attach code may modify this. + */ + ahc->seqctl = FASTMODE; + + for (i = 0; i < AHC_NUM_TARGETS; i++) + TAILQ_INIT(&ahc->untagged_queues[i]); + if (ahc_platform_alloc(ahc, platform_arg) != 0) { + ahc_free(ahc); + ahc = NULL; + } + return (ahc); +} + +int +ahc_softc_init(struct ahc_softc *ahc) +{ + + /* The IRQMS bit is only valid on VL and EISA chips */ + if ((ahc->chip & AHC_PCI) == 0) + ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; + else + ahc->unpause = 0; + ahc->pause = ahc->unpause | PAUSE; + /* XXX The shared scb data stuff should be deprecated */ + if (ahc->scb_data == NULL) { + ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC); + if (ahc->scb_data == NULL) + return (ENOMEM); + } + + return (0); +} + +void +ahc_set_unit(struct ahc_softc *ahc, int unit) +{ + ahc->unit = unit; +} + +void +ahc_set_name(struct ahc_softc *ahc, char *name) +{ + kfree(ahc->name); + ahc->name = name; +} + +void +ahc_free(struct ahc_softc *ahc) +{ + int i; + + switch (ahc->init_level) { + default: + case 5: + ahc_shutdown(ahc); + fallthrough; + case 4: + ahc_dmamap_unload(ahc, ahc->shared_data_dmat, + ahc->shared_data_dmamap); + fallthrough; + case 3: + ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, + ahc->shared_data_dmamap); + ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, + ahc->shared_data_dmamap); + fallthrough; + case 2: + ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); + fallthrough; + case 1: + break; + case 0: + break; + } + + ahc_platform_free(ahc); + ahc_fini_scbdata(ahc); + for (i = 0; i < AHC_NUM_TARGETS; i++) { + struct ahc_tmode_tstate *tstate; + + tstate = ahc->enabled_targets[i]; + if (tstate != NULL) { +#ifdef AHC_TARGET_MODE + int j; + + for (j = 0; j < AHC_NUM_LUNS; j++) { + struct ahc_tmode_lstate *lstate; + + lstate = tstate->enabled_luns[j]; + if (lstate != NULL) { + xpt_free_path(lstate->path); + kfree(lstate); + } + } +#endif + kfree(tstate); + } + } +#ifdef AHC_TARGET_MODE + if (ahc->black_hole != NULL) { + xpt_free_path(ahc->black_hole->path); + kfree(ahc->black_hole); + } +#endif + kfree(ahc->name); + kfree(ahc->seep_config); + kfree(ahc); + return; +} + +static void +ahc_shutdown(void *arg) +{ + struct ahc_softc *ahc; + int i; + + ahc = (struct ahc_softc *)arg; + + /* This will reset most registers to 0, but not all */ + ahc_reset(ahc, /*reinit*/FALSE); + ahc_outb(ahc, SCSISEQ, 0); + ahc_outb(ahc, SXFRCTL0, 0); + ahc_outb(ahc, DSPCISTATUS, 0); + + for (i = TARG_SCSIRATE; i < SCSICONF; i++) + ahc_outb(ahc, i, 0); +} + +/* + * Reset the controller and record some information about it + * that is only available just after a reset. If "reinit" is + * non-zero, this reset occurred after initial configuration + * and the caller requests that the chip be fully reinitialized + * to a runable state. Chip interrupts are *not* enabled after + * a reinitialization. The caller must enable interrupts via + * ahc_intr_enable(). + */ +int +ahc_reset(struct ahc_softc *ahc, int reinit) +{ + u_int sblkctl; + u_int sxfrctl1_a, sxfrctl1_b; + int error; + int wait; + + /* + * Preserve the value of the SXFRCTL1 register for all channels. + * It contains settings that affect termination and we don't want + * to disturb the integrity of the bus. + */ + ahc_pause(ahc); + sxfrctl1_b = 0; + if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { + u_int sblkctl; + + /* + * Save channel B's settings in case this chip + * is setup for TWIN channel operation. + */ + sblkctl = ahc_inb(ahc, SBLKCTL); + ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); + sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); + ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); + } + sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); + + ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); + + /* + * Ensure that the reset has finished. We delay 1000us + * prior to reading the register to make sure the chip + * has sufficiently completed its reset to handle register + * accesses. + */ + wait = 1000; + do { + ahc_delay(1000); + } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); + + if (wait == 0) { + printk("%s: WARNING - Failed chip reset! " + "Trying to initialize anyway.\n", ahc_name(ahc)); + } + ahc_outb(ahc, HCNTRL, ahc->pause); + + /* Determine channel configuration */ + sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); + /* No Twin Channel PCI cards */ + if ((ahc->chip & AHC_PCI) != 0) + sblkctl &= ~SELBUSB; + switch (sblkctl) { + case 0: + /* Single Narrow Channel */ + break; + case 2: + /* Wide Channel */ + ahc->features |= AHC_WIDE; + break; + case 8: + /* Twin Channel */ + ahc->features |= AHC_TWIN; + break; + default: + printk(" Unsupported adapter type. Ignoring\n"); + return(-1); + } + + /* + * Reload sxfrctl1. + * + * We must always initialize STPWEN to 1 before we + * restore the saved values. STPWEN is initialized + * to a tri-state condition which can only be cleared + * by turning it on. + */ + if ((ahc->features & AHC_TWIN) != 0) { + u_int sblkctl; + + sblkctl = ahc_inb(ahc, SBLKCTL); + ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); + ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); + ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); + } + ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); + + error = 0; + if (reinit != 0) + /* + * If a recovery action has forced a chip reset, + * re-initialize the chip to our liking. + */ + error = ahc->bus_chip_init(ahc); +#ifdef AHC_DUMP_SEQ + else + ahc_dumpseq(ahc); +#endif + + return (error); +} + +/* + * Determine the number of SCBs available on the controller + */ +int +ahc_probe_scbs(struct ahc_softc *ahc) { + int i; + + for (i = 0; i < AHC_SCB_MAX; i++) { + + ahc_outb(ahc, SCBPTR, i); + ahc_outb(ahc, SCB_BASE, i); + if (ahc_inb(ahc, SCB_BASE) != i) + break; + ahc_outb(ahc, SCBPTR, 0); + if (ahc_inb(ahc, SCB_BASE) != 0) + break; + } + return (i); +} + +static void +ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + dma_addr_t *baddr; + + baddr = (dma_addr_t *)arg; + *baddr = segs->ds_addr; +} + +static void +ahc_build_free_scb_list(struct ahc_softc *ahc) +{ + int scbsize; + int i; + + scbsize = 32; + if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) + scbsize = 64; + + for (i = 0; i < ahc->scb_data->maxhscbs; i++) { + int j; + + ahc_outb(ahc, SCBPTR, i); + + /* + * Touch all SCB bytes to avoid parity errors + * should one of our debugging routines read + * an otherwise uninitiatlized byte. + */ + for (j = 0; j < scbsize; j++) + ahc_outb(ahc, SCB_BASE+j, 0xFF); + + /* Clear the control byte. */ + ahc_outb(ahc, SCB_CONTROL, 0); + + /* Set the next pointer */ + if ((ahc->flags & AHC_PAGESCBS) != 0) + ahc_outb(ahc, SCB_NEXT, i+1); + else + ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); + + /* Make the tag number, SCSIID, and lun invalid */ + ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); + ahc_outb(ahc, SCB_SCSIID, 0xFF); + ahc_outb(ahc, SCB_LUN, 0xFF); + } + + if ((ahc->flags & AHC_PAGESCBS) != 0) { + /* SCB 0 heads the free list. */ + ahc_outb(ahc, FREE_SCBH, 0); + } else { + /* No free list. */ + ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); + } + + /* Make sure that the last SCB terminates the free list */ + ahc_outb(ahc, SCBPTR, i-1); + ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); +} + +static int +ahc_init_scbdata(struct ahc_softc *ahc) +{ + struct scb_data *scb_data; + + scb_data = ahc->scb_data; + SLIST_INIT(&scb_data->free_scbs); + SLIST_INIT(&scb_data->sg_maps); + + /* Allocate SCB resources */ + scb_data->scbarray = kcalloc(AHC_SCB_MAX_ALLOC, sizeof(struct scb), + GFP_ATOMIC); + if (scb_data->scbarray == NULL) + return (ENOMEM); + + /* Determine the number of hardware SCBs and initialize them */ + + scb_data->maxhscbs = ahc_probe_scbs(ahc); + if (ahc->scb_data->maxhscbs == 0) { + printk("%s: No SCB space found\n", ahc_name(ahc)); + return (ENXIO); + } + + /* + * Create our DMA tags. These tags define the kinds of device + * accessible memory allocations and memory mappings we will + * need to perform during normal operation. + * + * Unless we need to further restrict the allocation, we rely + * on the restrictions of the parent dmat, hence the common + * use of MAXADDR and MAXSIZE. + */ + + /* DMA tag for our hardware scb structures */ + if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), + /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->hscb_dmat) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* Allocation for our hscbs */ + if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, + (void **)&scb_data->hscbs, + BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* And permanently map them */ + ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, + scb_data->hscbs, + AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), + ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); + + scb_data->init_level++; + + /* DMA tag for our sense buffers */ + if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), + /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->sense_dmat) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* Allocate them */ + if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, + (void **)&scb_data->sense, + BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* And permanently map them */ + ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, + scb_data->sense, + AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), + ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); + + scb_data->init_level++; + + /* DMA tag for our S/G structures. We allocate in page sized chunks */ + if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + PAGE_SIZE, /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->sg_dmat) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* Perform initial CCB allocation */ + memset(scb_data->hscbs, 0, + AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); + ahc_alloc_scbs(ahc); + + if (scb_data->numscbs == 0) { + printk("%s: ahc_init_scbdata - " + "Unable to allocate initial scbs\n", + ahc_name(ahc)); + goto error_exit; + } + + /* + * Reserve the next queued SCB. + */ + ahc->next_queued_scb = ahc_get_scb(ahc); + + /* + * Note that we were successful + */ + return (0); + +error_exit: + + return (ENOMEM); +} + +static void +ahc_fini_scbdata(struct ahc_softc *ahc) +{ + struct scb_data *scb_data; + + scb_data = ahc->scb_data; + if (scb_data == NULL) + return; + + switch (scb_data->init_level) { + default: + case 7: + { + struct sg_map_node *sg_map; + + while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { + SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); + ahc_dmamap_unload(ahc, scb_data->sg_dmat, + sg_map->sg_dmamap); + ahc_dmamem_free(ahc, scb_data->sg_dmat, + sg_map->sg_vaddr, + sg_map->sg_dmamap); + kfree(sg_map); + } + ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); + } + fallthrough; + case 6: + ahc_dmamap_unload(ahc, scb_data->sense_dmat, + scb_data->sense_dmamap); + fallthrough; + case 5: + ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, + scb_data->sense_dmamap); + ahc_dmamap_destroy(ahc, scb_data->sense_dmat, + scb_data->sense_dmamap); + fallthrough; + case 4: + ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); + fallthrough; + case 3: + ahc_dmamap_unload(ahc, scb_data->hscb_dmat, + scb_data->hscb_dmamap); + fallthrough; + case 2: + ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, + scb_data->hscb_dmamap); + ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, + scb_data->hscb_dmamap); + fallthrough; + case 1: + ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); + break; + case 0: + break; + } + kfree(scb_data->scbarray); +} + +static void +ahc_alloc_scbs(struct ahc_softc *ahc) +{ + struct scb_data *scb_data; + struct scb *next_scb; + struct sg_map_node *sg_map; + dma_addr_t physaddr; + struct ahc_dma_seg *segs; + int newcount; + int i; + + scb_data = ahc->scb_data; + if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) + /* Can't allocate any more */ + return; + + next_scb = &scb_data->scbarray[scb_data->numscbs]; + + sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); + + if (sg_map == NULL) + return; + + /* Allocate S/G space for the next batch of SCBS */ + if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, + (void **)&sg_map->sg_vaddr, + BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { + kfree(sg_map); + return; + } + + SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); + + ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, + sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, + &sg_map->sg_physaddr, /*flags*/0); + + segs = sg_map->sg_vaddr; + physaddr = sg_map->sg_physaddr; + + newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); + newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); + for (i = 0; i < newcount; i++) { + struct scb_platform_data *pdata; + + pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); + if (pdata == NULL) + break; + next_scb->platform_data = pdata; + next_scb->sg_map = sg_map; + next_scb->sg_list = segs; + /* + * The sequencer always starts with the second entry. + * The first entry is embedded in the scb. + */ + next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); + next_scb->ahc_softc = ahc; + next_scb->flags = SCB_FREE; + next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; + next_scb->hscb->tag = ahc->scb_data->numscbs; + SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, + next_scb, links.sle); + segs += AHC_NSEG; + physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); + next_scb++; + ahc->scb_data->numscbs++; + } +} + +void +ahc_controller_info(struct ahc_softc *ahc, char *buf) +{ + int len; + + len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); + buf += len; + if ((ahc->features & AHC_TWIN) != 0) + len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " + "B SCSI Id=%d, primary %c, ", + ahc->our_id, ahc->our_id_b, + (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); + else { + const char *speed; + const char *type; + + speed = ""; + if ((ahc->features & AHC_ULTRA) != 0) { + speed = "Ultra "; + } else if ((ahc->features & AHC_DT) != 0) { + speed = "Ultra160 "; + } else if ((ahc->features & AHC_ULTRA2) != 0) { + speed = "Ultra2 "; + } + if ((ahc->features & AHC_WIDE) != 0) { + type = "Wide"; + } else { + type = "Single"; + } + len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", + speed, type, ahc->channel, ahc->our_id); + } + buf += len; + + if ((ahc->flags & AHC_PAGESCBS) != 0) + sprintf(buf, "%d/%d SCBs", + ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); + else + sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); +} + +int +ahc_chip_init(struct ahc_softc *ahc) +{ + int term; + int error; + u_int i; + u_int scsi_conf; + u_int scsiseq_template; + uint32_t physaddr; + + ahc_outb(ahc, SEQ_FLAGS, 0); + ahc_outb(ahc, SEQ_FLAGS2, 0); + + /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ + if (ahc->features & AHC_TWIN) { + + /* + * Setup Channel B first. + */ + ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); + term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; + ahc_outb(ahc, SCSIID, ahc->our_id_b); + scsi_conf = ahc_inb(ahc, SCSICONF + 1); + ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) + |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); + if ((ahc->features & AHC_ULTRA2) != 0) + ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); + ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); + ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); + + /* Select Channel A */ + ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); + } + term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; + if ((ahc->features & AHC_ULTRA2) != 0) + ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); + else + ahc_outb(ahc, SCSIID, ahc->our_id); + scsi_conf = ahc_inb(ahc, SCSICONF); + ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) + |term|ahc->seltime + |ENSTIMER|ACTNEGEN); + if ((ahc->features & AHC_ULTRA2) != 0) + ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); + ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); + ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); + + /* There are no untagged SCBs active yet. */ + for (i = 0; i < 16; i++) { + ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); + if ((ahc->flags & AHC_SCB_BTT) != 0) { + int lun; + + /* + * The SCB based BTT allows an entry per + * target and lun pair. + */ + for (lun = 1; lun < AHC_NUM_LUNS; lun++) + ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); + } + } + + /* All of our queues are empty */ + for (i = 0; i < 256; i++) + ahc->qoutfifo[i] = SCB_LIST_NULL; + ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); + + for (i = 0; i < 256; i++) + ahc->qinfifo[i] = SCB_LIST_NULL; + + if ((ahc->features & AHC_MULTI_TID) != 0) { + ahc_outb(ahc, TARGID, 0); + ahc_outb(ahc, TARGID + 1, 0); + } + + /* + * Tell the sequencer where it can find our arrays in memory. + */ + physaddr = ahc->scb_data->hscb_busaddr; + ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); + ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); + ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); + ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); + + physaddr = ahc->shared_data_busaddr; + ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); + ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); + ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); + ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); + + /* + * Initialize the group code to command length table. + * This overrides the values in TARG_SCSIRATE, so only + * setup the table after we have processed that information. + */ + ahc_outb(ahc, CMDSIZE_TABLE, 5); + ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); + ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); + ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); + ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); + ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); + ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); + ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); + + if ((ahc->features & AHC_HS_MAILBOX) != 0) + ahc_outb(ahc, HS_MAILBOX, 0); + + /* Tell the sequencer of our initial queue positions */ + if ((ahc->features & AHC_TARGETMODE) != 0) { + ahc->tqinfifonext = 1; + ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); + ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); + } + ahc->qinfifonext = 0; + ahc->qoutfifonext = 0; + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); + ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); + ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); + ahc_outb(ahc, SDSCB_QOFF, 0); + } else { + ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); + ahc_outb(ahc, QINPOS, ahc->qinfifonext); + ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); + } + + /* We don't have any waiting selections */ + ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); + + /* Our disconnection list is empty too */ + ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); + + /* Message out buffer starts empty */ + ahc_outb(ahc, MSG_OUT, NOP); + + /* + * Setup the allowed SCSI Sequences based on operational mode. + * If we are a target, we'll enable select in operations once + * we've had a lun enabled. + */ + scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; + if ((ahc->flags & AHC_INITIATORROLE) != 0) + scsiseq_template |= ENRSELI; + ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); + + /* Initialize our list of free SCBs. */ + ahc_build_free_scb_list(ahc); + + /* + * Tell the sequencer which SCB will be the next one it receives. + */ + ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); + + /* + * Load the Sequencer program and Enable the adapter + * in "fast" mode. + */ + if (bootverbose) + printk("%s: Downloading Sequencer Program...", + ahc_name(ahc)); + + error = ahc_loadseq(ahc); + if (error != 0) + return (error); + + if ((ahc->features & AHC_ULTRA2) != 0) { + int wait; + + /* + * Wait for up to 500ms for our transceivers + * to settle. If the adapter does not have + * a cable attached, the transceivers may + * never settle, so don't complain if we + * fail here. + */ + for (wait = 5000; + (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; + wait--) + ahc_delay(100); + } + ahc_restart(ahc); + return (0); +} + +/* + * Start the board, ready for normal operation + */ +int +ahc_init(struct ahc_softc *ahc) +{ + int max_targ; + u_int i; + u_int scsi_conf; + u_int ultraenb; + u_int discenable; + u_int tagenable; + size_t driver_data_size; + +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) + ahc->flags |= AHC_SEQUENCER_DEBUG; +#endif + +#ifdef AHC_PRINT_SRAM + printk("Scratch Ram:"); + for (i = 0x20; i < 0x5f; i++) { + if (((i % 8) == 0) && (i != 0)) { + printk ("\n "); + } + printk (" 0x%x", ahc_inb(ahc, i)); + } + if ((ahc->features & AHC_MORE_SRAM) != 0) { + for (i = 0x70; i < 0x7f; i++) { + if (((i % 8) == 0) && (i != 0)) { + printk ("\n "); + } + printk (" 0x%x", ahc_inb(ahc, i)); + } + } + printk ("\n"); + /* + * Reading uninitialized scratch ram may + * generate parity errors. + */ + ahc_outb(ahc, CLRINT, CLRPARERR); + ahc_outb(ahc, CLRINT, CLRBRKADRINT); +#endif + max_targ = 15; + + /* + * Assume we have a board at this stage and it has been reset. + */ + if ((ahc->flags & AHC_USEDEFAULTS) != 0) + ahc->our_id = ahc->our_id_b = 7; + + /* + * Default to allowing initiator operations. + */ + ahc->flags |= AHC_INITIATORROLE; + + /* + * Only allow target mode features if this unit has them enabled. + */ + if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) + ahc->features &= ~AHC_TARGETMODE; + + ahc->init_level++; + + /* + * DMA tag for our command fifos and other data in system memory + * the card's sequencer must be able to access. For initiator + * roles, we need to allocate space for the qinfifo and qoutfifo. + * The qinfifo and qoutfifo are composed of 256 1 byte elements. + * When providing for the target mode role, we must additionally + * provide space for the incoming target command fifo and an extra + * byte to deal with a dma bug in some chip versions. + */ + driver_data_size = 2 * 256 * sizeof(uint8_t); + if ((ahc->features & AHC_TARGETMODE) != 0) + driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) + + /*DMA WideOdd Bug Buffer*/1; + if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + driver_data_size, + /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &ahc->shared_data_dmat) != 0) { + return (ENOMEM); + } + + ahc->init_level++; + + /* Allocation of driver data */ + if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, + (void **)&ahc->qoutfifo, + BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { + return (ENOMEM); + } + + ahc->init_level++; + + /* And permanently map it in */ + ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, + ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, + &ahc->shared_data_busaddr, /*flags*/0); + + if ((ahc->features & AHC_TARGETMODE) != 0) { + ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; + ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; + ahc->dma_bug_buf = ahc->shared_data_busaddr + + driver_data_size - 1; + /* All target command blocks start out invalid. */ + for (i = 0; i < AHC_TMODE_CMDS; i++) + ahc->targetcmds[i].cmd_valid = 0; + ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); + ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; + } + ahc->qinfifo = &ahc->qoutfifo[256]; + + ahc->init_level++; + + /* Allocate SCB data now that buffer_dmat is initialized */ + if (ahc->scb_data->maxhscbs == 0) + if (ahc_init_scbdata(ahc) != 0) + return (ENOMEM); + + /* + * Allocate a tstate to house information for our + * initiator presence on the bus as well as the user + * data for any target mode initiator. + */ + if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { + printk("%s: unable to allocate ahc_tmode_tstate. " + "Failing attach\n", ahc_name(ahc)); + return (ENOMEM); + } + + if ((ahc->features & AHC_TWIN) != 0) { + if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { + printk("%s: unable to allocate ahc_tmode_tstate. " + "Failing attach\n", ahc_name(ahc)); + return (ENOMEM); + } + } + + if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { + ahc->flags |= AHC_PAGESCBS; + } else { + ahc->flags &= ~AHC_PAGESCBS; + } + +#ifdef AHC_DEBUG + if (ahc_debug & AHC_SHOW_MISC) { + printk("%s: hardware scb %u bytes; kernel scb %u bytes; " + "ahc_dma %u bytes\n", + ahc_name(ahc), + (u_int)sizeof(struct hardware_scb), + (u_int)sizeof(struct scb), + (u_int)sizeof(struct ahc_dma_seg)); + } +#endif /* AHC_DEBUG */ + + /* + * Look at the information that board initialization or + * the board bios has left us. + */ + if (ahc->features & AHC_TWIN) { + scsi_conf = ahc_inb(ahc, SCSICONF + 1); + if ((scsi_conf & RESET_SCSI) != 0 + && (ahc->flags & AHC_INITIATORROLE) != 0) + ahc->flags |= AHC_RESET_BUS_B; + } + + scsi_conf = ahc_inb(ahc, SCSICONF); + if ((scsi_conf & RESET_SCSI) != 0 + && (ahc->flags & AHC_INITIATORROLE) != 0) + ahc->flags |= AHC_RESET_BUS_A; + + ultraenb = 0; + tagenable = ALL_TARGETS_MASK; + + /* Grab the disconnection disable table and invert it for our needs */ + if ((ahc->flags & AHC_USEDEFAULTS) != 0) { + printk("%s: Host Adapter Bios disabled. Using default SCSI " + "device parameters\n", ahc_name(ahc)); + ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| + AHC_TERM_ENB_A|AHC_TERM_ENB_B; + discenable = ALL_TARGETS_MASK; + if ((ahc->features & AHC_ULTRA) != 0) + ultraenb = ALL_TARGETS_MASK; + } else { + discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) + | ahc_inb(ahc, DISC_DSB)); + if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) + ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) + | ahc_inb(ahc, ULTRA_ENB); + } + + if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) + max_targ = 7; + + for (i = 0; i <= max_targ; i++) { + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + u_int our_id; + u_int target_id; + char channel; + + channel = 'A'; + our_id = ahc->our_id; + target_id = i; + if (i > 7 && (ahc->features & AHC_TWIN) != 0) { + channel = 'B'; + our_id = ahc->our_id_b; + target_id = i % 8; + } + tinfo = ahc_fetch_transinfo(ahc, channel, our_id, + target_id, &tstate); + /* Default to async narrow across the board */ + memset(tinfo, 0, sizeof(*tinfo)); + if (ahc->flags & AHC_USEDEFAULTS) { + if ((ahc->features & AHC_WIDE) != 0) + tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; + + /* + * These will be truncated when we determine the + * connection type we have with the target. + */ + tinfo->user.period = ahc_syncrates->period; + tinfo->user.offset = MAX_OFFSET; + } else { + u_int scsirate; + uint16_t mask; + + /* Take the settings leftover in scratch RAM. */ + scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); + mask = (0x01 << i); + if ((ahc->features & AHC_ULTRA2) != 0) { + u_int offset; + u_int maxsync; + + if ((scsirate & SOFS) == 0x0F) { + /* + * Haven't negotiated yet, + * so the format is different. + */ + scsirate = (scsirate & SXFR) >> 4 + | (ultraenb & mask) + ? 0x08 : 0x0 + | (scsirate & WIDEXFER); + offset = MAX_OFFSET_ULTRA2; + } else + offset = ahc_inb(ahc, TARG_OFFSET + i); + if ((scsirate & ~WIDEXFER) == 0 && offset != 0) + /* Set to the lowest sync rate, 5MHz */ + scsirate |= 0x1c; + maxsync = AHC_SYNCRATE_ULTRA2; + if ((ahc->features & AHC_DT) != 0) + maxsync = AHC_SYNCRATE_DT; + tinfo->user.period = + ahc_find_period(ahc, scsirate, maxsync); + if (offset == 0) + tinfo->user.period = 0; + else + tinfo->user.offset = MAX_OFFSET; + if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ + && (ahc->features & AHC_DT) != 0) + tinfo->user.ppr_options = + MSG_EXT_PPR_DT_REQ; + } else if ((scsirate & SOFS) != 0) { + if ((scsirate & SXFR) == 0x40 + && (ultraenb & mask) != 0) { + /* Treat 10MHz as a non-ultra speed */ + scsirate &= ~SXFR; + ultraenb &= ~mask; + } + tinfo->user.period = + ahc_find_period(ahc, scsirate, + (ultraenb & mask) + ? AHC_SYNCRATE_ULTRA + : AHC_SYNCRATE_FAST); + if (tinfo->user.period != 0) + tinfo->user.offset = MAX_OFFSET; + } + if (tinfo->user.period == 0) + tinfo->user.offset = 0; + if ((scsirate & WIDEXFER) != 0 + && (ahc->features & AHC_WIDE) != 0) + tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; + tinfo->user.protocol_version = 4; + if ((ahc->features & AHC_DT) != 0) + tinfo->user.transport_version = 3; + else + tinfo->user.transport_version = 2; + tinfo->goal.protocol_version = 2; + tinfo->goal.transport_version = 2; + tinfo->curr.protocol_version = 2; + tinfo->curr.transport_version = 2; + } + tstate->ultraenb = 0; + } + ahc->user_discenable = discenable; + ahc->user_tagenable = tagenable; + + return (ahc->bus_chip_init(ahc)); +} + +void +ahc_intr_enable(struct ahc_softc *ahc, int enable) +{ + u_int hcntrl; + + hcntrl = ahc_inb(ahc, HCNTRL); + hcntrl &= ~INTEN; + ahc->pause &= ~INTEN; + ahc->unpause &= ~INTEN; + if (enable) { + hcntrl |= INTEN; + ahc->pause |= INTEN; + ahc->unpause |= INTEN; + } + ahc_outb(ahc, HCNTRL, hcntrl); +} + +/* + * Ensure that the card is paused in a location + * outside of all critical sections and that all + * pending work is completed prior to returning. + * This routine should only be called from outside + * an interrupt context. + */ +void +ahc_pause_and_flushwork(struct ahc_softc *ahc) +{ + int intstat; + int maxloops; + int paused; + + maxloops = 1000; + ahc->flags |= AHC_ALL_INTERRUPTS; + paused = FALSE; + do { + if (paused) { + ahc_unpause(ahc); + /* + * Give the sequencer some time to service + * any active selections. + */ + ahc_delay(500); + } + ahc_intr(ahc); + ahc_pause(ahc); + paused = TRUE; + ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); + intstat = ahc_inb(ahc, INTSTAT); + if ((intstat & INT_PEND) == 0) { + ahc_clear_critical_section(ahc); + intstat = ahc_inb(ahc, INTSTAT); + } + } while (--maxloops + && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) + && ((intstat & INT_PEND) != 0 + || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); + if (maxloops == 0) { + printk("Infinite interrupt loop, INTSTAT = %x", + ahc_inb(ahc, INTSTAT)); + } + ahc_platform_flushwork(ahc); + ahc->flags &= ~AHC_ALL_INTERRUPTS; +} + +int __maybe_unused +ahc_suspend(struct ahc_softc *ahc) +{ + + ahc_pause_and_flushwork(ahc); + + if (LIST_FIRST(&ahc->pending_scbs) != NULL) { + ahc_unpause(ahc); + return (EBUSY); + } + +#ifdef AHC_TARGET_MODE + /* + * XXX What about ATIOs that have not yet been serviced? + * Perhaps we should just refuse to be suspended if we + * are acting in a target role. + */ + if (ahc->pending_device != NULL) { + ahc_unpause(ahc); + return (EBUSY); + } +#endif + ahc_shutdown(ahc); + return (0); +} + +int __maybe_unused +ahc_resume(struct ahc_softc *ahc) +{ + + ahc_reset(ahc, /*reinit*/TRUE); + ahc_intr_enable(ahc, TRUE); + ahc_restart(ahc); + return (0); +} +/************************** Busy Target Table *********************************/ +/* + * Return the untagged transaction id for a given target/channel lun. + * Optionally, clear the entry. + */ +static u_int +ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) +{ + u_int scbid; + u_int target_offset; + + if ((ahc->flags & AHC_SCB_BTT) != 0) { + u_int saved_scbptr; + + saved_scbptr = ahc_inb(ahc, SCBPTR); + ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); + scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); + ahc_outb(ahc, SCBPTR, saved_scbptr); + } else { + target_offset = TCL_TARGET_OFFSET(tcl); + scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); + } + + return (scbid); +} + +static void +ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) +{ + u_int target_offset; + + if ((ahc->flags & AHC_SCB_BTT) != 0) { + u_int saved_scbptr; + + saved_scbptr = ahc_inb(ahc, SCBPTR); + ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); + ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); + ahc_outb(ahc, SCBPTR, saved_scbptr); + } else { + target_offset = TCL_TARGET_OFFSET(tcl); + ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); + } +} + +static void +ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) +{ + u_int target_offset; + + if ((ahc->flags & AHC_SCB_BTT) != 0) { + u_int saved_scbptr; + + saved_scbptr = ahc_inb(ahc, SCBPTR); + ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); + ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); + ahc_outb(ahc, SCBPTR, saved_scbptr); + } else { + target_offset = TCL_TARGET_OFFSET(tcl); + ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); + } +} + +/************************** SCB and SCB queue management **********************/ +int +ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, + char channel, int lun, u_int tag, role_t role) +{ + int targ = SCB_GET_TARGET(ahc, scb); + char chan = SCB_GET_CHANNEL(ahc, scb); + int slun = SCB_GET_LUN(scb); + int match; + + match = ((chan == channel) || (channel == ALL_CHANNELS)); + if (match != 0) + match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); + if (match != 0) + match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); + if (match != 0) { +#ifdef AHC_TARGET_MODE + int group; + + group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); + if (role == ROLE_INITIATOR) { + match = (group != XPT_FC_GROUP_TMODE) + && ((tag == scb->hscb->tag) + || (tag == SCB_LIST_NULL)); + } else if (role == ROLE_TARGET) { + match = (group == XPT_FC_GROUP_TMODE) + && ((tag == scb->io_ctx->csio.tag_id) + || (tag == SCB_LIST_NULL)); + } +#else /* !AHC_TARGET_MODE */ + match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); +#endif /* AHC_TARGET_MODE */ + } + + return match; +} + +static void +ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) +{ + int target; + char channel; + int lun; + + target = SCB_GET_TARGET(ahc, scb); + lun = SCB_GET_LUN(scb); + channel = SCB_GET_CHANNEL(ahc, scb); + + ahc_search_qinfifo(ahc, target, channel, lun, + /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, + CAM_REQUEUE_REQ, SEARCH_COMPLETE); + + ahc_platform_freeze_devq(ahc, scb); +} + +void +ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) +{ + struct scb *prev_scb; + + prev_scb = NULL; + if (ahc_qinfifo_count(ahc) != 0) { + u_int prev_tag; + uint8_t prev_pos; + + prev_pos = ahc->qinfifonext - 1; + prev_tag = ahc->qinfifo[prev_pos]; + prev_scb = ahc_lookup_scb(ahc, prev_tag); + } + ahc_qinfifo_requeue(ahc, prev_scb, scb); + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); + } else { + ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); + } +} + +static void +ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, + struct scb *scb) +{ + if (prev_scb == NULL) { + ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); + } else { + prev_scb->hscb->next = scb->hscb->tag; + ahc_sync_scb(ahc, prev_scb, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + } + ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; + scb->hscb->next = ahc->next_queued_scb->hscb->tag; + ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); +} + +static int +ahc_qinfifo_count(struct ahc_softc *ahc) +{ + uint8_t qinpos; + uint8_t diff; + + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + qinpos = ahc_inb(ahc, SNSCB_QOFF); + ahc_outb(ahc, SNSCB_QOFF, qinpos); + } else + qinpos = ahc_inb(ahc, QINPOS); + diff = ahc->qinfifonext - qinpos; + return (diff); +} + +int +ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status, + ahc_search_action action) +{ + struct scb *scb; + struct scb *prev_scb; + uint8_t qinstart; + uint8_t qinpos; + uint8_t qintail; + uint8_t next; + uint8_t prev; + uint8_t curscbptr; + int found; + int have_qregs; + + qintail = ahc->qinfifonext; + have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; + if (have_qregs) { + qinstart = ahc_inb(ahc, SNSCB_QOFF); + ahc_outb(ahc, SNSCB_QOFF, qinstart); + } else + qinstart = ahc_inb(ahc, QINPOS); + qinpos = qinstart; + found = 0; + prev_scb = NULL; + + if (action == SEARCH_COMPLETE) { + /* + * Don't attempt to run any queued untagged transactions + * until we are done with the abort process. + */ + ahc_freeze_untagged_queues(ahc); + } + + /* + * Start with an empty queue. Entries that are not chosen + * for removal will be re-added to the queue as we go. + */ + ahc->qinfifonext = qinpos; + ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); + + while (qinpos != qintail) { + scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); + if (scb == NULL) { + printk("qinpos = %d, SCB index = %d\n", + qinpos, ahc->qinfifo[qinpos]); + panic("Loop 1\n"); + } + + if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { + /* + * We found an scb that needs to be acted on. + */ + found++; + switch (action) { + case SEARCH_COMPLETE: + { + cam_status ostat; + cam_status cstat; + + ostat = ahc_get_transaction_status(scb); + if (ostat == CAM_REQ_INPROG) + ahc_set_transaction_status(scb, status); + cstat = ahc_get_transaction_status(scb); + if (cstat != CAM_REQ_CMP) + ahc_freeze_scb(scb); + if ((scb->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB in qinfifo\n"); + ahc_done(ahc, scb); + } + fallthrough; + case SEARCH_REMOVE: + break; + case SEARCH_COUNT: + ahc_qinfifo_requeue(ahc, prev_scb, scb); + prev_scb = scb; + break; + } + } else { + ahc_qinfifo_requeue(ahc, prev_scb, scb); + prev_scb = scb; + } + qinpos++; + } + + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); + } else { + ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); + } + + if (action != SEARCH_COUNT + && (found != 0) + && (qinstart != ahc->qinfifonext)) { + /* + * The sequencer may be in the process of dmaing + * down the SCB at the beginning of the queue. + * This could be problematic if either the first, + * or the second SCB is removed from the queue + * (the first SCB includes a pointer to the "next" + * SCB to dma). If we have removed any entries, swap + * the first element in the queue with the next HSCB + * so the sequencer will notice that NEXT_QUEUED_SCB + * has changed during its dma attempt and will retry + * the DMA. + */ + scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); + + if (scb == NULL) { + printk("found = %d, qinstart = %d, qinfifionext = %d\n", + found, qinstart, ahc->qinfifonext); + panic("First/Second Qinfifo fixup\n"); + } + /* + * ahc_swap_with_next_hscb forces our next pointer to + * point to the reserved SCB for future commands. Save + * and restore our original next pointer to maintain + * queue integrity. + */ + next = scb->hscb->next; + ahc->scb_data->scbindex[scb->hscb->tag] = NULL; + ahc_swap_with_next_hscb(ahc, scb); + scb->hscb->next = next; + ahc->qinfifo[qinstart] = scb->hscb->tag; + + /* Tell the card about the new head of the qinfifo. */ + ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); + + /* Fixup the tail "next" pointer. */ + qintail = ahc->qinfifonext - 1; + scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); + scb->hscb->next = ahc->next_queued_scb->hscb->tag; + } + + /* + * Search waiting for selection list. + */ + curscbptr = ahc_inb(ahc, SCBPTR); + next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ + prev = SCB_LIST_NULL; + + while (next != SCB_LIST_NULL) { + uint8_t scb_index; + + ahc_outb(ahc, SCBPTR, next); + scb_index = ahc_inb(ahc, SCB_TAG); + if (scb_index >= ahc->scb_data->numscbs) { + printk("Waiting List inconsistency. " + "SCB index == %d, yet numscbs == %d.", + scb_index, ahc->scb_data->numscbs); + ahc_dump_card_state(ahc); + panic("for safety"); + } + scb = ahc_lookup_scb(ahc, scb_index); + if (scb == NULL) { + printk("scb_index = %d, next = %d\n", + scb_index, next); + panic("Waiting List traversal\n"); + } + if (ahc_match_scb(ahc, scb, target, channel, + lun, SCB_LIST_NULL, role)) { + /* + * We found an scb that needs to be acted on. + */ + found++; + switch (action) { + case SEARCH_COMPLETE: + { + cam_status ostat; + cam_status cstat; + + ostat = ahc_get_transaction_status(scb); + if (ostat == CAM_REQ_INPROG) + ahc_set_transaction_status(scb, + status); + cstat = ahc_get_transaction_status(scb); + if (cstat != CAM_REQ_CMP) + ahc_freeze_scb(scb); + if ((scb->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB in Waiting List\n"); + ahc_done(ahc, scb); + } + fallthrough; + case SEARCH_REMOVE: + next = ahc_rem_wscb(ahc, next, prev); + break; + case SEARCH_COUNT: + prev = next; + next = ahc_inb(ahc, SCB_NEXT); + break; + } + } else { + prev = next; + next = ahc_inb(ahc, SCB_NEXT); + } + } + ahc_outb(ahc, SCBPTR, curscbptr); + + found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, + channel, lun, status, action); + + if (action == SEARCH_COMPLETE) + ahc_release_untagged_queues(ahc); + return (found); +} + +int +ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, + int target, char channel, int lun, uint32_t status, + ahc_search_action action) +{ + struct scb *scb; + int maxtarget; + int found; + int i; + + if (action == SEARCH_COMPLETE) { + /* + * Don't attempt to run any queued untagged transactions + * until we are done with the abort process. + */ + ahc_freeze_untagged_queues(ahc); + } + + found = 0; + i = 0; + if ((ahc->flags & AHC_SCB_BTT) == 0) { + + maxtarget = 16; + if (target != CAM_TARGET_WILDCARD) { + + i = target; + if (channel == 'B') + i += 8; + maxtarget = i + 1; + } + } else { + maxtarget = 0; + } + + for (; i < maxtarget; i++) { + struct scb_tailq *untagged_q; + struct scb *next_scb; + + untagged_q = &(ahc->untagged_queues[i]); + next_scb = TAILQ_FIRST(untagged_q); + while (next_scb != NULL) { + + scb = next_scb; + next_scb = TAILQ_NEXT(scb, links.tqe); + + /* + * The head of the list may be the currently + * active untagged command for a device. + * We're only searching for commands that + * have not been started. A transaction + * marked active but still in the qinfifo + * is removed by the qinfifo scanning code + * above. + */ + if ((scb->flags & SCB_ACTIVE) != 0) + continue; + + if (ahc_match_scb(ahc, scb, target, channel, lun, + SCB_LIST_NULL, ROLE_INITIATOR) == 0 + || (ctx != NULL && ctx != scb->io_ctx)) + continue; + + /* + * We found an scb that needs to be acted on. + */ + found++; + switch (action) { + case SEARCH_COMPLETE: + { + cam_status ostat; + cam_status cstat; + + ostat = ahc_get_transaction_status(scb); + if (ostat == CAM_REQ_INPROG) + ahc_set_transaction_status(scb, status); + cstat = ahc_get_transaction_status(scb); + if (cstat != CAM_REQ_CMP) + ahc_freeze_scb(scb); + if ((scb->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB in untaggedQ\n"); + ahc_done(ahc, scb); + break; + } + case SEARCH_REMOVE: + scb->flags &= ~SCB_UNTAGGEDQ; + TAILQ_REMOVE(untagged_q, scb, links.tqe); + break; + case SEARCH_COUNT: + break; + } + } + } + + if (action == SEARCH_COMPLETE) + ahc_release_untagged_queues(ahc); + return (found); +} + +int +ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, + int lun, u_int tag, int stop_on_first, int remove, + int save_state) +{ + struct scb *scbp; + u_int next; + u_int prev; + u_int count; + u_int active_scb; + + count = 0; + next = ahc_inb(ahc, DISCONNECTED_SCBH); + prev = SCB_LIST_NULL; + + if (save_state) { + /* restore this when we're done */ + active_scb = ahc_inb(ahc, SCBPTR); + } else + /* Silence compiler */ + active_scb = SCB_LIST_NULL; + + while (next != SCB_LIST_NULL) { + u_int scb_index; + + ahc_outb(ahc, SCBPTR, next); + scb_index = ahc_inb(ahc, SCB_TAG); + if (scb_index >= ahc->scb_data->numscbs) { + printk("Disconnected List inconsistency. " + "SCB index == %d, yet numscbs == %d.", + scb_index, ahc->scb_data->numscbs); + ahc_dump_card_state(ahc); + panic("for safety"); + } + + if (next == prev) { + panic("Disconnected List Loop. " + "cur SCBPTR == %x, prev SCBPTR == %x.", + next, prev); + } + scbp = ahc_lookup_scb(ahc, scb_index); + if (ahc_match_scb(ahc, scbp, target, channel, lun, + tag, ROLE_INITIATOR)) { + count++; + if (remove) { + next = + ahc_rem_scb_from_disc_list(ahc, prev, next); + } else { + prev = next; + next = ahc_inb(ahc, SCB_NEXT); + } + if (stop_on_first) + break; + } else { + prev = next; + next = ahc_inb(ahc, SCB_NEXT); + } + } + if (save_state) + ahc_outb(ahc, SCBPTR, active_scb); + return (count); +} + +/* + * Remove an SCB from the on chip list of disconnected transactions. + * This is empty/unused if we are not performing SCB paging. + */ +static u_int +ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) +{ + u_int next; + + ahc_outb(ahc, SCBPTR, scbptr); + next = ahc_inb(ahc, SCB_NEXT); + + ahc_outb(ahc, SCB_CONTROL, 0); + + ahc_add_curscb_to_free_list(ahc); + + if (prev != SCB_LIST_NULL) { + ahc_outb(ahc, SCBPTR, prev); + ahc_outb(ahc, SCB_NEXT, next); + } else + ahc_outb(ahc, DISCONNECTED_SCBH, next); + + return (next); +} + +/* + * Add the SCB as selected by SCBPTR onto the on chip list of + * free hardware SCBs. This list is empty/unused if we are not + * performing SCB paging. + */ +static void +ahc_add_curscb_to_free_list(struct ahc_softc *ahc) +{ + /* + * Invalidate the tag so that our abort + * routines don't think it's active. + */ + ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); + + if ((ahc->flags & AHC_PAGESCBS) != 0) { + ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); + ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); + } +} + +/* + * Manipulate the waiting for selection list and return the + * scb that follows the one that we remove. + */ +static u_int +ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) +{ + u_int curscb, next; + + /* + * Select the SCB we want to abort and + * pull the next pointer out of it. + */ + curscb = ahc_inb(ahc, SCBPTR); + ahc_outb(ahc, SCBPTR, scbpos); + next = ahc_inb(ahc, SCB_NEXT); + + /* Clear the necessary fields */ + ahc_outb(ahc, SCB_CONTROL, 0); + + ahc_add_curscb_to_free_list(ahc); + + /* update the waiting list */ + if (prev == SCB_LIST_NULL) { + /* First in the list */ + ahc_outb(ahc, WAITING_SCBH, next); + + /* + * Ensure we aren't attempting to perform + * selection for this entry. + */ + ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); + } else { + /* + * Select the scb that pointed to us + * and update its next pointer. + */ + ahc_outb(ahc, SCBPTR, prev); + ahc_outb(ahc, SCB_NEXT, next); + } + + /* + * Point us back at the original scb position. + */ + ahc_outb(ahc, SCBPTR, curscb); + return next; +} + +/******************************** Error Handling ******************************/ +/* + * Abort all SCBs that match the given description (target/channel/lun/tag), + * setting their status to the passed in status if the status has not already + * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer + * is paused before it is called. + */ +static int +ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status) +{ + struct scb *scbp; + struct scb *scbp_next; + u_int active_scb; + int i, j; + int maxtarget; + int minlun; + int maxlun; + + int found; + + /* + * Don't attempt to run any queued untagged transactions + * until we are done with the abort process. + */ + ahc_freeze_untagged_queues(ahc); + + /* restore this when we're done */ + active_scb = ahc_inb(ahc, SCBPTR); + + found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, + role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); + + /* + * Clean out the busy target table for any untagged commands. + */ + i = 0; + maxtarget = 16; + if (target != CAM_TARGET_WILDCARD) { + i = target; + if (channel == 'B') + i += 8; + maxtarget = i + 1; + } + + if (lun == CAM_LUN_WILDCARD) { + + /* + * Unless we are using an SCB based + * busy targets table, there is only + * one table entry for all luns of + * a target. + */ + minlun = 0; + maxlun = 1; + if ((ahc->flags & AHC_SCB_BTT) != 0) + maxlun = AHC_NUM_LUNS; + } else { + minlun = lun; + maxlun = lun + 1; + } + + if (role != ROLE_TARGET) { + for (;i < maxtarget; i++) { + for (j = minlun;j < maxlun; j++) { + u_int scbid; + u_int tcl; + + tcl = BUILD_TCL(i << 4, j); + scbid = ahc_index_busy_tcl(ahc, tcl); + scbp = ahc_lookup_scb(ahc, scbid); + if (scbp == NULL + || ahc_match_scb(ahc, scbp, target, channel, + lun, tag, role) == 0) + continue; + ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); + } + } + + /* + * Go through the disconnected list and remove any entries we + * have queued for completion, 0'ing their control byte too. + * We save the active SCB and restore it ourselves, so there + * is no reason for this search to restore it too. + */ + ahc_search_disc_list(ahc, target, channel, lun, tag, + /*stop_on_first*/FALSE, /*remove*/TRUE, + /*save_state*/FALSE); + } + + /* + * Go through the hardware SCB array looking for commands that + * were active but not on any list. In some cases, these remnants + * might not still have mappings in the scbindex array (e.g. unexpected + * bus free with the same scb queued for an abort). Don't hold this + * against them. + */ + for (i = 0; i < ahc->scb_data->maxhscbs; i++) { + u_int scbid; + + ahc_outb(ahc, SCBPTR, i); + scbid = ahc_inb(ahc, SCB_TAG); + scbp = ahc_lookup_scb(ahc, scbid); + if ((scbp == NULL && scbid != SCB_LIST_NULL) + || (scbp != NULL + && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) + ahc_add_curscb_to_free_list(ahc); + } + + /* + * Go through the pending CCB list and look for + * commands for this target that are still active. + * These are other tagged commands that were + * disconnected when the reset occurred. + */ + scbp_next = LIST_FIRST(&ahc->pending_scbs); + while (scbp_next != NULL) { + scbp = scbp_next; + scbp_next = LIST_NEXT(scbp, pending_links); + if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { + cam_status ostat; + + ostat = ahc_get_transaction_status(scbp); + if (ostat == CAM_REQ_INPROG) + ahc_set_transaction_status(scbp, status); + if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) + ahc_freeze_scb(scbp); + if ((scbp->flags & SCB_ACTIVE) == 0) + printk("Inactive SCB on pending list\n"); + ahc_done(ahc, scbp); + found++; + } + } + ahc_outb(ahc, SCBPTR, active_scb); + ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); + ahc_release_untagged_queues(ahc); + return found; +} + +static void +ahc_reset_current_bus(struct ahc_softc *ahc) +{ + uint8_t scsiseq; + + ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); + scsiseq = ahc_inb(ahc, SCSISEQ); + ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); + ahc_flush_device_writes(ahc); + ahc_delay(AHC_BUSRESET_DELAY); + /* Turn off the bus reset */ + ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); + + ahc_clear_intstat(ahc); + + /* Re-enable reset interrupts */ + ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); +} + +int +ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) +{ + struct ahc_devinfo devinfo; + u_int initiator, target, max_scsiid; + u_int sblkctl; + u_int scsiseq; + u_int simode1; + int found; + int restart_needed; + char cur_channel; + + ahc->pending_device = NULL; + + ahc_compile_devinfo(&devinfo, + CAM_TARGET_WILDCARD, + CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD, + channel, ROLE_UNKNOWN); + ahc_pause(ahc); + + /* Make sure the sequencer is in a safe location. */ + ahc_clear_critical_section(ahc); + + /* + * Run our command complete fifos to ensure that we perform + * completion processing on any commands that 'completed' + * before the reset occurred. + */ + ahc_run_qoutfifo(ahc); +#ifdef AHC_TARGET_MODE + /* + * XXX - In Twin mode, the tqinfifo may have commands + * for an unaffected channel in it. However, if + * we have run out of ATIO resources to drain that + * queue, we may not get them all out here. Further, + * the blocked transactions for the reset channel + * should just be killed off, irrespecitve of whether + * we are blocked on ATIO resources. Write a routine + * to compact the tqinfifo appropriately. + */ + if ((ahc->flags & AHC_TARGETROLE) != 0) { + ahc_run_tqinfifo(ahc, /*paused*/TRUE); + } +#endif + + /* + * Reset the bus if we are initiating this reset + */ + sblkctl = ahc_inb(ahc, SBLKCTL); + cur_channel = 'A'; + if ((ahc->features & AHC_TWIN) != 0 + && ((sblkctl & SELBUSB) != 0)) + cur_channel = 'B'; + scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); + if (cur_channel != channel) { + /* Case 1: Command for another bus is active + * Stealthily reset the other bus without + * upsetting the current bus. + */ + ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); + simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); +#ifdef AHC_TARGET_MODE + /* + * Bus resets clear ENSELI, so we cannot + * defer re-enabling bus reset interrupts + * if we are in target mode. + */ + if ((ahc->flags & AHC_TARGETROLE) != 0) + simode1 |= ENSCSIRST; +#endif + ahc_outb(ahc, SIMODE1, simode1); + if (initiate_reset) + ahc_reset_current_bus(ahc); + ahc_clear_intstat(ahc); + ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); + ahc_outb(ahc, SBLKCTL, sblkctl); + restart_needed = FALSE; + } else { + /* Case 2: A command from this bus is active or we're idle */ + simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); +#ifdef AHC_TARGET_MODE + /* + * Bus resets clear ENSELI, so we cannot + * defer re-enabling bus reset interrupts + * if we are in target mode. + */ + if ((ahc->flags & AHC_TARGETROLE) != 0) + simode1 |= ENSCSIRST; +#endif + ahc_outb(ahc, SIMODE1, simode1); + if (initiate_reset) + ahc_reset_current_bus(ahc); + ahc_clear_intstat(ahc); + ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); + restart_needed = TRUE; + } + + /* + * Clean up all the state information for the + * pending transactions on this bus. + */ + found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, + CAM_LUN_WILDCARD, SCB_LIST_NULL, + ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); + + max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; + +#ifdef AHC_TARGET_MODE + /* + * Send an immediate notify ccb to all target more peripheral + * drivers affected by this action. + */ + for (target = 0; target <= max_scsiid; target++) { + struct ahc_tmode_tstate* tstate; + u_int lun; + + tstate = ahc->enabled_targets[target]; + if (tstate == NULL) + continue; + for (lun = 0; lun < AHC_NUM_LUNS; lun++) { + struct ahc_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[lun]; + if (lstate == NULL) + continue; + + ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, + EVENT_TYPE_BUS_RESET, /*arg*/0); + ahc_send_lstate_events(ahc, lstate); + } + } +#endif + /* Notify the XPT that a bus reset occurred */ + ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD, AC_BUS_RESET); + + /* + * Revert to async/narrow transfers until we renegotiate. + */ + for (target = 0; target <= max_scsiid; target++) { + + if (ahc->enabled_targets[target] == NULL) + continue; + for (initiator = 0; initiator <= max_scsiid; initiator++) { + struct ahc_devinfo devinfo; + + ahc_compile_devinfo(&devinfo, target, initiator, + CAM_LUN_WILDCARD, + channel, ROLE_UNKNOWN); + ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHC_TRANS_CUR, /*paused*/TRUE); + ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, + /*period*/0, /*offset*/0, + /*ppr_options*/0, AHC_TRANS_CUR, + /*paused*/TRUE); + } + } + + if (restart_needed) + ahc_restart(ahc); + else + ahc_unpause(ahc); + return found; +} + + +/***************************** Residual Processing ****************************/ +/* + * Calculate the residual for a just completed SCB. + */ +static void +ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) +{ + struct hardware_scb *hscb; + struct status_pkt *spkt; + uint32_t sgptr; + uint32_t resid_sgptr; + uint32_t resid; + + /* + * 5 cases. + * 1) No residual. + * SG_RESID_VALID clear in sgptr. + * 2) Transferless command + * 3) Never performed any transfers. + * sgptr has SG_FULL_RESID set. + * 4) No residual but target did not + * save data pointers after the + * last transfer, so sgptr was + * never updated. + * 5) We have a partial residual. + * Use residual_sgptr to determine + * where we are. + */ + + hscb = scb->hscb; + sgptr = ahc_le32toh(hscb->sgptr); + if ((sgptr & SG_RESID_VALID) == 0) + /* Case 1 */ + return; + sgptr &= ~SG_RESID_VALID; + + if ((sgptr & SG_LIST_NULL) != 0) + /* Case 2 */ + return; + + spkt = &hscb->shared_data.status; + resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); + if ((sgptr & SG_FULL_RESID) != 0) { + /* Case 3 */ + resid = ahc_get_transfer_length(scb); + } else if ((resid_sgptr & SG_LIST_NULL) != 0) { + /* Case 4 */ + return; + } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { + panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); + } else { + struct ahc_dma_seg *sg; + + /* + * Remainder of the SG where the transfer + * stopped. + */ + resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; + sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + /* + * Add up the contents of all residual + * SG segments that are after the SG where + * the transfer stopped. + */ + while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { + sg++; + resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; + } + } + if ((scb->flags & SCB_SENSE) == 0) + ahc_set_residual(scb, resid); + else + ahc_set_sense_residual(scb, resid); + +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MISC) != 0) { + ahc_print_path(ahc, scb); + printk("Handled %sResidual of %d bytes\n", + (scb->flags & SCB_SENSE) ? "Sense " : "", resid); + } +#endif +} + +/******************************* Target Mode **********************************/ +#ifdef AHC_TARGET_MODE +/* + * Add a target mode event to this lun's queue + */ +static void +ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, + u_int initiator_id, u_int event_type, u_int event_arg) +{ + struct ahc_tmode_event *event; + int pending; + + xpt_freeze_devq(lstate->path, /*count*/1); + if (lstate->event_w_idx >= lstate->event_r_idx) + pending = lstate->event_w_idx - lstate->event_r_idx; + else + pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 + - (lstate->event_r_idx - lstate->event_w_idx); + + if (event_type == EVENT_TYPE_BUS_RESET + || event_type == TARGET_RESET) { + /* + * Any earlier events are irrelevant, so reset our buffer. + * This has the effect of allowing us to deal with reset + * floods (an external device holding down the reset line) + * without losing the event that is really interesting. + */ + lstate->event_r_idx = 0; + lstate->event_w_idx = 0; + xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); + } + + if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { + xpt_print_path(lstate->path); + printk("immediate event %x:%x lost\n", + lstate->event_buffer[lstate->event_r_idx].event_type, + lstate->event_buffer[lstate->event_r_idx].event_arg); + lstate->event_r_idx++; + if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) + lstate->event_r_idx = 0; + xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); + } + + event = &lstate->event_buffer[lstate->event_w_idx]; + event->initiator_id = initiator_id; + event->event_type = event_type; + event->event_arg = event_arg; + lstate->event_w_idx++; + if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) + lstate->event_w_idx = 0; +} + +/* + * Send any target mode events queued up waiting + * for immediate notify resources. + */ +void +ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) +{ + struct ccb_hdr *ccbh; + struct ccb_immed_notify *inot; + + while (lstate->event_r_idx != lstate->event_w_idx + && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { + struct ahc_tmode_event *event; + + event = &lstate->event_buffer[lstate->event_r_idx]; + SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); + inot = (struct ccb_immed_notify *)ccbh; + switch (event->event_type) { + case EVENT_TYPE_BUS_RESET: + ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; + break; + default: + ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; + inot->message_args[0] = event->event_type; + inot->message_args[1] = event->event_arg; + break; + } + inot->initiator_id = event->initiator_id; + inot->sense_len = 0; + xpt_done((union ccb *)inot); + lstate->event_r_idx++; + if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) + lstate->event_r_idx = 0; + } +} +#endif + +/******************** Sequencer Program Patching/Download *********************/ + +#ifdef AHC_DUMP_SEQ +void +ahc_dumpseq(struct ahc_softc* ahc) +{ + int i; + + ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); + ahc_outb(ahc, SEQADDR0, 0); + ahc_outb(ahc, SEQADDR1, 0); + for (i = 0; i < ahc->instruction_ram_size; i++) { + uint8_t ins_bytes[4]; + + ahc_insb(ahc, SEQRAM, ins_bytes, 4); + printk("0x%08x\n", ins_bytes[0] << 24 + | ins_bytes[1] << 16 + | ins_bytes[2] << 8 + | ins_bytes[3]); + } +} +#endif + +static int +ahc_loadseq(struct ahc_softc *ahc) +{ + struct cs cs_table[NUM_CRITICAL_SECTIONS]; + u_int begin_set[NUM_CRITICAL_SECTIONS]; + u_int end_set[NUM_CRITICAL_SECTIONS]; + const struct patch *cur_patch; + u_int cs_count; + u_int cur_cs; + u_int i; + u_int skip_addr; + u_int sg_prefetch_cnt; + int downloaded; + uint8_t download_consts[7]; + + /* + * Start out with 0 critical sections + * that apply to this firmware load. + */ + cs_count = 0; + cur_cs = 0; + memset(begin_set, 0, sizeof(begin_set)); + memset(end_set, 0, sizeof(end_set)); + + /* Setup downloadable constant table */ + download_consts[QOUTFIFO_OFFSET] = 0; + if (ahc->targetcmds != NULL) + download_consts[QOUTFIFO_OFFSET] += 32; + download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; + download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; + download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); + sg_prefetch_cnt = ahc->pci_cachesize; + if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) + sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); + download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; + download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); + download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); + + cur_patch = patches; + downloaded = 0; + skip_addr = 0; + ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); + ahc_outb(ahc, SEQADDR0, 0); + ahc_outb(ahc, SEQADDR1, 0); + + for (i = 0; i < sizeof(seqprog)/4; i++) { + if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { + /* + * Don't download this instruction as it + * is in a patch that was removed. + */ + continue; + } + + if (downloaded == ahc->instruction_ram_size) { + /* + * We're about to exceed the instruction + * storage capacity for this chip. Fail + * the load. + */ + printk("\n%s: Program too large for instruction memory " + "size of %d!\n", ahc_name(ahc), + ahc->instruction_ram_size); + return (ENOMEM); + } + + /* + * Move through the CS table until we find a CS + * that might apply to this instruction. + */ + for (; cur_cs < NUM_CRITICAL_SECTIONS; cur_cs++) { + if (critical_sections[cur_cs].end <= i) { + if (begin_set[cs_count] == TRUE + && end_set[cs_count] == FALSE) { + cs_table[cs_count].end = downloaded; + end_set[cs_count] = TRUE; + cs_count++; + } + continue; + } + if (critical_sections[cur_cs].begin <= i + && begin_set[cs_count] == FALSE) { + cs_table[cs_count].begin = downloaded; + begin_set[cs_count] = TRUE; + } + break; + } + ahc_download_instr(ahc, i, download_consts); + downloaded++; + } + + ahc->num_critical_sections = cs_count; + if (cs_count != 0) { + + cs_count *= sizeof(struct cs); + ahc->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC); + if (ahc->critical_sections == NULL) + panic("ahc_loadseq: Could not malloc"); + } + ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); + + if (bootverbose) { + printk(" %d instructions downloaded\n", downloaded); + printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", + ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); + } + return (0); +} + +static int +ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, + u_int start_instr, u_int *skip_addr) +{ + const struct patch *cur_patch; + const struct patch *last_patch; + u_int num_patches; + + num_patches = ARRAY_SIZE(patches); + last_patch = &patches[num_patches]; + cur_patch = *start_patch; + + while (cur_patch < last_patch && start_instr == cur_patch->begin) { + + if (cur_patch->patch_func(ahc) == 0) { + + /* Start rejecting code */ + *skip_addr = start_instr + cur_patch->skip_instr; + cur_patch += cur_patch->skip_patch; + } else { + /* Accepted this patch. Advance to the next + * one and wait for our intruction pointer to + * hit this point. + */ + cur_patch++; + } + } + + *start_patch = cur_patch; + if (start_instr < *skip_addr) + /* Still skipping */ + return (0); + + return (1); +} + +static void +ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) +{ + union ins_formats instr; + struct ins_format1 *fmt1_ins; + struct ins_format3 *fmt3_ins; + u_int opcode; + + /* + * The firmware is always compiled into a little endian format. + */ + instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); + + fmt1_ins = &instr.format1; + fmt3_ins = NULL; + + /* Pull the opcode */ + opcode = instr.format1.opcode; + switch (opcode) { + case AIC_OP_JMP: + case AIC_OP_JC: + case AIC_OP_JNC: + case AIC_OP_CALL: + case AIC_OP_JNE: + case AIC_OP_JNZ: + case AIC_OP_JE: + case AIC_OP_JZ: + { + const struct patch *cur_patch; + int address_offset; + u_int address; + u_int skip_addr; + u_int i; + + fmt3_ins = &instr.format3; + address_offset = 0; + address = fmt3_ins->address; + cur_patch = patches; + skip_addr = 0; + + for (i = 0; i < address;) { + + ahc_check_patch(ahc, &cur_patch, i, &skip_addr); + + if (skip_addr > i) { + int end_addr; + + end_addr = min(address, skip_addr); + address_offset += end_addr - i; + i = skip_addr; + } else { + i++; + } + } + address -= address_offset; + fmt3_ins->address = address; + } + fallthrough; + case AIC_OP_OR: + case AIC_OP_AND: + case AIC_OP_XOR: + case AIC_OP_ADD: + case AIC_OP_ADC: + case AIC_OP_BMOV: + if (fmt1_ins->parity != 0) { + fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; + } + fmt1_ins->parity = 0; + if ((ahc->features & AHC_CMD_CHAN) == 0 + && opcode == AIC_OP_BMOV) { + /* + * Block move was added at the same time + * as the command channel. Verify that + * this is only a move of a single element + * and convert the BMOV to a MOV + * (AND with an immediate of FF). + */ + if (fmt1_ins->immediate != 1) + panic("%s: BMOV not supported\n", + ahc_name(ahc)); + fmt1_ins->opcode = AIC_OP_AND; + fmt1_ins->immediate = 0xff; + } + fallthrough; + case AIC_OP_ROL: + if ((ahc->features & AHC_ULTRA2) != 0) { + int i, count; + + /* Calculate odd parity for the instruction */ + for (i = 0, count = 0; i < 31; i++) { + uint32_t mask; + + mask = 0x01 << i; + if ((instr.integer & mask) != 0) + count++; + } + if ((count & 0x01) == 0) + instr.format1.parity = 1; + } else { + /* Compress the instruction for older sequencers */ + if (fmt3_ins != NULL) { + instr.integer = + fmt3_ins->immediate + | (fmt3_ins->source << 8) + | (fmt3_ins->address << 16) + | (fmt3_ins->opcode << 25); + } else { + instr.integer = + fmt1_ins->immediate + | (fmt1_ins->source << 8) + | (fmt1_ins->destination << 16) + | (fmt1_ins->ret << 24) + | (fmt1_ins->opcode << 25); + } + } + /* The sequencer is a little endian cpu */ + instr.integer = ahc_htole32(instr.integer); + ahc_outsb(ahc, SEQRAM, instr.bytes, 4); + break; + default: + panic("Unknown opcode encountered in seq program"); + break; + } +} + +int +ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries, + const char *name, u_int address, u_int value, + u_int *cur_column, u_int wrap_point) +{ + int printed; + u_int printed_mask; + + if (cur_column != NULL && *cur_column >= wrap_point) { + printk("\n"); + *cur_column = 0; + } + printed = printk("%s[0x%x]", name, value); + if (table == NULL) { + printed += printk(" "); + *cur_column += printed; + return (printed); + } + printed_mask = 0; + while (printed_mask != 0xFF) { + int entry; + + for (entry = 0; entry < num_entries; entry++) { + if (((value & table[entry].mask) + != table[entry].value) + || ((printed_mask & table[entry].mask) + == table[entry].mask)) + continue; + + printed += printk("%s%s", + printed_mask == 0 ? ":(" : "|", + table[entry].name); + printed_mask |= table[entry].mask; + break; + } + if (entry >= num_entries) + break; + } + if (printed_mask != 0) + printed += printk(") "); + else + printed += printk(" "); + if (cur_column != NULL) + *cur_column += printed; + return (printed); +} + +void +ahc_dump_card_state(struct ahc_softc *ahc) +{ + struct scb *scb; + struct scb_tailq *untagged_q; + u_int cur_col; + int paused; + int target; + int maxtarget; + int i; + uint8_t last_phase; + uint8_t qinpos; + uint8_t qintail; + uint8_t qoutpos; + uint8_t scb_index; + uint8_t saved_scbptr; + + if (ahc_is_paused(ahc)) { + paused = 1; + } else { + paused = 0; + ahc_pause(ahc); + } + + saved_scbptr = ahc_inb(ahc, SCBPTR); + last_phase = ahc_inb(ahc, LASTPHASE); + printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" + "%s: Dumping Card State %s, at SEQADDR 0x%x\n", + ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, + ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); + if (paused) + printk("Card was paused\n"); + printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", + ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), + ahc_inb(ahc, ARG_2)); + printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), + ahc_inb(ahc, SCBPTR)); + cur_col = 0; + if ((ahc->features & AHC_DT) != 0) + ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); + ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); + ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); + ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); + ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); + ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); + ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); + ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); + ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); + ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); + ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); + ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); + ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); + ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); + ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); + ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); + ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); + ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); + ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); + if (cur_col != 0) + printk("\n"); + printk("STACK:"); + for (i = 0; i < STACK_SIZE; i++) + printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); + printk("\nSCB count = %d\n", ahc->scb_data->numscbs); + printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); + printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); + /* QINFIFO */ + printk("QINFIFO entries: "); + if ((ahc->features & AHC_QUEUE_REGS) != 0) { + qinpos = ahc_inb(ahc, SNSCB_QOFF); + ahc_outb(ahc, SNSCB_QOFF, qinpos); + } else + qinpos = ahc_inb(ahc, QINPOS); + qintail = ahc->qinfifonext; + while (qinpos != qintail) { + printk("%d ", ahc->qinfifo[qinpos]); + qinpos++; + } + printk("\n"); + + printk("Waiting Queue entries: "); + scb_index = ahc_inb(ahc, WAITING_SCBH); + i = 0; + while (scb_index != SCB_LIST_NULL && i++ < 256) { + ahc_outb(ahc, SCBPTR, scb_index); + printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); + scb_index = ahc_inb(ahc, SCB_NEXT); + } + printk("\n"); + + printk("Disconnected Queue entries: "); + scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); + i = 0; + while (scb_index != SCB_LIST_NULL && i++ < 256) { + ahc_outb(ahc, SCBPTR, scb_index); + printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); + scb_index = ahc_inb(ahc, SCB_NEXT); + } + printk("\n"); + + ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); + printk("QOUTFIFO entries: "); + qoutpos = ahc->qoutfifonext; + i = 0; + while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { + printk("%d ", ahc->qoutfifo[qoutpos]); + qoutpos++; + } + printk("\n"); + + printk("Sequencer Free SCB List: "); + scb_index = ahc_inb(ahc, FREE_SCBH); + i = 0; + while (scb_index != SCB_LIST_NULL && i++ < 256) { + ahc_outb(ahc, SCBPTR, scb_index); + printk("%d ", scb_index); + scb_index = ahc_inb(ahc, SCB_NEXT); + } + printk("\n"); + + printk("Sequencer SCB Info: "); + for (i = 0; i < ahc->scb_data->maxhscbs; i++) { + ahc_outb(ahc, SCBPTR, i); + cur_col = printk("\n%3d ", i); + + ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); + ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); + ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); + ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); + } + printk("\n"); + + printk("Pending list: "); + i = 0; + LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { + if (i++ > 256) + break; + cur_col = printk("\n%3d ", scb->hscb->tag); + ahc_scb_control_print(scb->hscb->control, &cur_col, 60); + ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); + ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); + if ((ahc->flags & AHC_PAGESCBS) == 0) { + ahc_outb(ahc, SCBPTR, scb->hscb->tag); + printk("("); + ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), + &cur_col, 60); + ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); + printk(")"); + } + } + printk("\n"); + + printk("Kernel Free SCB list: "); + i = 0; + SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { + if (i++ > 256) + break; + printk("%d ", scb->hscb->tag); + } + printk("\n"); + + maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; + for (target = 0; target <= maxtarget; target++) { + untagged_q = &ahc->untagged_queues[target]; + if (TAILQ_FIRST(untagged_q) == NULL) + continue; + printk("Untagged Q(%d): ", target); + i = 0; + TAILQ_FOREACH(scb, untagged_q, links.tqe) { + if (i++ > 256) + break; + printk("%d ", scb->hscb->tag); + } + printk("\n"); + } + + printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); + ahc_outb(ahc, SCBPTR, saved_scbptr); + if (paused == 0) + ahc_unpause(ahc); +} + +/************************* Target Mode ****************************************/ +#ifdef AHC_TARGET_MODE +cam_status +ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, + struct ahc_tmode_tstate **tstate, + struct ahc_tmode_lstate **lstate, + int notfound_failure) +{ + + if ((ahc->features & AHC_TARGETMODE) == 0) + return (CAM_REQ_INVALID); + + /* + * Handle the 'black hole' device that sucks up + * requests to unattached luns on enabled targets. + */ + if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD + && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { + *tstate = NULL; + *lstate = ahc->black_hole; + } else { + u_int max_id; + + max_id = (ahc->features & AHC_WIDE) ? 16 : 8; + if (ccb->ccb_h.target_id >= max_id) + return (CAM_TID_INVALID); + + if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) + return (CAM_LUN_INVALID); + + *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; + *lstate = NULL; + if (*tstate != NULL) + *lstate = + (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; + } + + if (notfound_failure != 0 && *lstate == NULL) + return (CAM_PATH_INVALID); + + return (CAM_REQ_CMP); +} + +void +ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) +{ + struct ahc_tmode_tstate *tstate; + struct ahc_tmode_lstate *lstate; + struct ccb_en_lun *cel; + cam_status status; + u_long s; + u_int target; + u_int lun; + u_int target_mask; + u_int our_id; + int error; + char channel; + + status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, + /*notfound_failure*/FALSE); + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + return; + } + + if (cam_sim_bus(sim) == 0) + our_id = ahc->our_id; + else + our_id = ahc->our_id_b; + + if (ccb->ccb_h.target_id != our_id) { + /* + * our_id represents our initiator ID, or + * the ID of the first target to have an + * enabled lun in target mode. There are + * two cases that may preclude enabling a + * target id other than our_id. + * + * o our_id is for an active initiator role. + * Since the hardware does not support + * reselections to the initiator role at + * anything other than our_id, and our_id + * is used by the hardware to indicate the + * ID to use for both select-out and + * reselect-out operations, the only target + * ID we can support in this mode is our_id. + * + * o The MULTARGID feature is not available and + * a previous target mode ID has been enabled. + */ + if ((ahc->features & AHC_MULTIROLE) != 0) { + + if ((ahc->features & AHC_MULTI_TID) != 0 + && (ahc->flags & AHC_INITIATORROLE) != 0) { + /* + * Only allow additional targets if + * the initiator role is disabled. + * The hardware cannot handle a re-select-in + * on the initiator id during a re-select-out + * on a different target id. + */ + status = CAM_TID_INVALID; + } else if ((ahc->flags & AHC_INITIATORROLE) != 0 + || ahc->enabled_luns > 0) { + /* + * Only allow our target id to change + * if the initiator role is not configured + * and there are no enabled luns which + * are attached to the currently registered + * scsi id. + */ + status = CAM_TID_INVALID; + } + } else if ((ahc->features & AHC_MULTI_TID) == 0 + && ahc->enabled_luns > 0) { + + status = CAM_TID_INVALID; + } + } + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + return; + } + + /* + * We now have an id that is valid. + * If we aren't in target mode, switch modes. + */ + if ((ahc->flags & AHC_TARGETROLE) == 0 + && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { + u_long s; + ahc_flag saved_flags; + + printk("Configuring Target Mode\n"); + ahc_lock(ahc, &s); + if (LIST_FIRST(&ahc->pending_scbs) != NULL) { + ccb->ccb_h.status = CAM_BUSY; + ahc_unlock(ahc, &s); + return; + } + saved_flags = ahc->flags; + ahc->flags |= AHC_TARGETROLE; + if ((ahc->features & AHC_MULTIROLE) == 0) + ahc->flags &= ~AHC_INITIATORROLE; + ahc_pause(ahc); + error = ahc_loadseq(ahc); + if (error != 0) { + /* + * Restore original configuration and notify + * the caller that we cannot support target mode. + * Since the adapter started out in this + * configuration, the firmware load will succeed, + * so there is no point in checking ahc_loadseq's + * return value. + */ + ahc->flags = saved_flags; + (void)ahc_loadseq(ahc); + ahc_restart(ahc); + ahc_unlock(ahc, &s); + ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; + return; + } + ahc_restart(ahc); + ahc_unlock(ahc, &s); + } + cel = &ccb->cel; + target = ccb->ccb_h.target_id; + lun = ccb->ccb_h.target_lun; + channel = SIM_CHANNEL(ahc, sim); + target_mask = 0x01 << target; + if (channel == 'B') + target_mask <<= 8; + + if (cel->enable != 0) { + u_int scsiseq; + + /* Are we already enabled?? */ + if (lstate != NULL) { + xpt_print_path(ccb->ccb_h.path); + printk("Lun already enabled\n"); + ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; + return; + } + + if (cel->grp6_len != 0 + || cel->grp7_len != 0) { + /* + * Don't (yet?) support vendor + * specific commands. + */ + ccb->ccb_h.status = CAM_REQ_INVALID; + printk("Non-zero Group Codes\n"); + return; + } + + /* + * Seems to be okay. + * Setup our data structures. + */ + if (target != CAM_TARGET_WILDCARD && tstate == NULL) { + tstate = ahc_alloc_tstate(ahc, target, channel); + if (tstate == NULL) { + xpt_print_path(ccb->ccb_h.path); + printk("Couldn't allocate tstate\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + } + lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); + if (lstate == NULL) { + xpt_print_path(ccb->ccb_h.path); + printk("Couldn't allocate lstate\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + status = xpt_create_path(&lstate->path, /*periph*/NULL, + xpt_path_path_id(ccb->ccb_h.path), + xpt_path_target_id(ccb->ccb_h.path), + xpt_path_lun_id(ccb->ccb_h.path)); + if (status != CAM_REQ_CMP) { + kfree(lstate); + xpt_print_path(ccb->ccb_h.path); + printk("Couldn't allocate path\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + SLIST_INIT(&lstate->accept_tios); + SLIST_INIT(&lstate->immed_notifies); + ahc_lock(ahc, &s); + ahc_pause(ahc); + if (target != CAM_TARGET_WILDCARD) { + tstate->enabled_luns[lun] = lstate; + ahc->enabled_luns++; + + if ((ahc->features & AHC_MULTI_TID) != 0) { + u_int targid_mask; + + targid_mask = ahc_inb(ahc, TARGID) + | (ahc_inb(ahc, TARGID + 1) << 8); + + targid_mask |= target_mask; + ahc_outb(ahc, TARGID, targid_mask); + ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); + ahc_update_scsiid(ahc, targid_mask); + } else { + u_int our_id; + char channel; + + channel = SIM_CHANNEL(ahc, sim); + our_id = SIM_SCSI_ID(ahc, sim); + + /* + * This can only happen if selections + * are not enabled + */ + if (target != our_id) { + u_int sblkctl; + char cur_channel; + int swap; + + sblkctl = ahc_inb(ahc, SBLKCTL); + cur_channel = (sblkctl & SELBUSB) + ? 'B' : 'A'; + if ((ahc->features & AHC_TWIN) == 0) + cur_channel = 'A'; + swap = cur_channel != channel; + if (channel == 'A') + ahc->our_id = target; + else + ahc->our_id_b = target; + + if (swap) + ahc_outb(ahc, SBLKCTL, + sblkctl ^ SELBUSB); + + ahc_outb(ahc, SCSIID, target); + + if (swap) + ahc_outb(ahc, SBLKCTL, sblkctl); + } + } + } else + ahc->black_hole = lstate; + /* Allow select-in operations */ + if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { + scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); + scsiseq |= ENSELI; + ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); + scsiseq = ahc_inb(ahc, SCSISEQ); + scsiseq |= ENSELI; + ahc_outb(ahc, SCSISEQ, scsiseq); + } + ahc_unpause(ahc); + ahc_unlock(ahc, &s); + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_print_path(ccb->ccb_h.path); + printk("Lun now enabled for target mode\n"); + } else { + struct scb *scb; + int i, empty; + + if (lstate == NULL) { + ccb->ccb_h.status = CAM_LUN_INVALID; + return; + } + + ahc_lock(ahc, &s); + + ccb->ccb_h.status = CAM_REQ_CMP; + LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { + struct ccb_hdr *ccbh; + + ccbh = &scb->io_ctx->ccb_h; + if (ccbh->func_code == XPT_CONT_TARGET_IO + && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ + printk("CTIO pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + ahc_unlock(ahc, &s); + return; + } + } + + if (SLIST_FIRST(&lstate->accept_tios) != NULL) { + printk("ATIOs pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + } + + if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { + printk("INOTs pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + } + + if (ccb->ccb_h.status != CAM_REQ_CMP) { + ahc_unlock(ahc, &s); + return; + } + + xpt_print_path(ccb->ccb_h.path); + printk("Target mode disabled\n"); + xpt_free_path(lstate->path); + kfree(lstate); + + ahc_pause(ahc); + /* Can we clean up the target too? */ + if (target != CAM_TARGET_WILDCARD) { + tstate->enabled_luns[lun] = NULL; + ahc->enabled_luns--; + for (empty = 1, i = 0; i < 8; i++) + if (tstate->enabled_luns[i] != NULL) { + empty = 0; + break; + } + + if (empty) { + ahc_free_tstate(ahc, target, channel, + /*force*/FALSE); + if (ahc->features & AHC_MULTI_TID) { + u_int targid_mask; + + targid_mask = ahc_inb(ahc, TARGID) + | (ahc_inb(ahc, TARGID + 1) + << 8); + + targid_mask &= ~target_mask; + ahc_outb(ahc, TARGID, targid_mask); + ahc_outb(ahc, TARGID+1, + (targid_mask >> 8)); + ahc_update_scsiid(ahc, targid_mask); + } + } + } else { + + ahc->black_hole = NULL; + + /* + * We can't allow selections without + * our black hole device. + */ + empty = TRUE; + } + if (ahc->enabled_luns == 0) { + /* Disallow select-in */ + u_int scsiseq; + + scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); + scsiseq &= ~ENSELI; + ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); + scsiseq = ahc_inb(ahc, SCSISEQ); + scsiseq &= ~ENSELI; + ahc_outb(ahc, SCSISEQ, scsiseq); + + if ((ahc->features & AHC_MULTIROLE) == 0) { + printk("Configuring Initiator Mode\n"); + ahc->flags &= ~AHC_TARGETROLE; + ahc->flags |= AHC_INITIATORROLE; + /* + * Returning to a configuration that + * fit previously will always succeed. + */ + (void)ahc_loadseq(ahc); + ahc_restart(ahc); + /* + * Unpaused. The extra unpause + * that follows is harmless. + */ + } + } + ahc_unpause(ahc); + ahc_unlock(ahc, &s); + } +} + +static void +ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) +{ + u_int scsiid_mask; + u_int scsiid; + + if ((ahc->features & AHC_MULTI_TID) == 0) + panic("ahc_update_scsiid called on non-multitid unit\n"); + + /* + * Since we will rely on the TARGID mask + * for selection enables, ensure that OID + * in SCSIID is not set to some other ID + * that we don't want to allow selections on. + */ + if ((ahc->features & AHC_ULTRA2) != 0) + scsiid = ahc_inb(ahc, SCSIID_ULTRA2); + else + scsiid = ahc_inb(ahc, SCSIID); + scsiid_mask = 0x1 << (scsiid & OID); + if ((targid_mask & scsiid_mask) == 0) { + u_int our_id; + + /* ffs counts from 1 */ + our_id = ffs(targid_mask); + if (our_id == 0) + our_id = ahc->our_id; + else + our_id--; + scsiid &= TID; + scsiid |= our_id; + } + if ((ahc->features & AHC_ULTRA2) != 0) + ahc_outb(ahc, SCSIID_ULTRA2, scsiid); + else + ahc_outb(ahc, SCSIID, scsiid); +} + +static void +ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) +{ + struct target_cmd *cmd; + + /* + * If the card supports auto-access pause, + * we can access the card directly regardless + * of whether it is paused or not. + */ + if ((ahc->features & AHC_AUTOPAUSE) != 0) + paused = TRUE; + + ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); + while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { + + /* + * Only advance through the queue if we + * have the resources to process the command. + */ + if (ahc_handle_target_cmd(ahc, cmd) != 0) + break; + + cmd->cmd_valid = 0; + ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + ahc->shared_data_dmamap, + ahc_targetcmd_offset(ahc, ahc->tqinfifonext), + sizeof(struct target_cmd), + BUS_DMASYNC_PREREAD); + ahc->tqinfifonext++; + + /* + * Lazily update our position in the target mode incoming + * command queue as seen by the sequencer. + */ + if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { + if ((ahc->features & AHC_HS_MAILBOX) != 0) { + u_int hs_mailbox; + + hs_mailbox = ahc_inb(ahc, HS_MAILBOX); + hs_mailbox &= ~HOST_TQINPOS; + hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; + ahc_outb(ahc, HS_MAILBOX, hs_mailbox); + } else { + if (!paused) + ahc_pause(ahc); + ahc_outb(ahc, KERNEL_TQINPOS, + ahc->tqinfifonext & HOST_TQINPOS); + if (!paused) + ahc_unpause(ahc); + } + } + } +} + +static int +ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) +{ + struct ahc_tmode_tstate *tstate; + struct ahc_tmode_lstate *lstate; + struct ccb_accept_tio *atio; + uint8_t *byte; + int initiator; + int target; + int lun; + + initiator = SCSIID_TARGET(ahc, cmd->scsiid); + target = SCSIID_OUR_ID(cmd->scsiid); + lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); + + byte = cmd->bytes; + tstate = ahc->enabled_targets[target]; + lstate = NULL; + if (tstate != NULL) + lstate = tstate->enabled_luns[lun]; + + /* + * Commands for disabled luns go to the black hole driver. + */ + if (lstate == NULL) + lstate = ahc->black_hole; + + atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); + if (atio == NULL) { + ahc->flags |= AHC_TQINFIFO_BLOCKED; + /* + * Wait for more ATIOs from the peripheral driver for this lun. + */ + if (bootverbose) + printk("%s: ATIOs exhausted\n", ahc_name(ahc)); + return (1); + } else + ahc->flags &= ~AHC_TQINFIFO_BLOCKED; +#if 0 + printk("Incoming command from %d for %d:%d%s\n", + initiator, target, lun, + lstate == ahc->black_hole ? "(Black Holed)" : ""); +#endif + SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); + + if (lstate == ahc->black_hole) { + /* Fill in the wildcards */ + atio->ccb_h.target_id = target; + atio->ccb_h.target_lun = lun; + } + + /* + * Package it up and send it off to + * whomever has this lun enabled. + */ + atio->sense_len = 0; + atio->init_id = initiator; + if (byte[0] != 0xFF) { + /* Tag was included */ + atio->tag_action = *byte++; + atio->tag_id = *byte++; + atio->ccb_h.flags = CAM_TAG_ACTION_VALID; + } else { + atio->ccb_h.flags = 0; + } + byte++; + + /* Okay. Now determine the cdb size based on the command code */ + switch (*byte >> CMD_GROUP_CODE_SHIFT) { + case 0: + atio->cdb_len = 6; + break; + case 1: + case 2: + atio->cdb_len = 10; + break; + case 4: + atio->cdb_len = 16; + break; + case 5: + atio->cdb_len = 12; + break; + case 3: + default: + /* Only copy the opcode. */ + atio->cdb_len = 1; + printk("Reserved or VU command code type encountered\n"); + break; + } + + memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); + + atio->ccb_h.status |= CAM_CDB_RECVD; + + if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { + /* + * We weren't allowed to disconnect. + * We're hanging on the bus until a + * continue target I/O comes in response + * to this accept tio. + */ +#if 0 + printk("Received Immediate Command %d:%d:%d - %p\n", + initiator, target, lun, ahc->pending_device); +#endif + ahc->pending_device = lstate; + ahc_freeze_ccb((union ccb *)atio); + atio->ccb_h.flags |= CAM_DIS_DISCONNECT; + } + xpt_done((union ccb*)atio); + return (0); +} + +#endif diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h new file mode 100644 index 000000000..0b57b783e --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h @@ -0,0 +1,97 @@ +/* + * Inline routines shareable across OS platforms. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#43 $ + * + * $FreeBSD$ + */ + +#ifndef _AIC7XXX_INLINE_H_ +#define _AIC7XXX_INLINE_H_ + +/************************* Sequencer Execution Control ************************/ +int ahc_is_paused(struct ahc_softc *ahc); +void ahc_pause(struct ahc_softc *ahc); +void ahc_unpause(struct ahc_softc *ahc); + +/************************** Memory mapping routines ***************************/ +void ahc_sync_sglist(struct ahc_softc *ahc, + struct scb *scb, int op); + +/******************************** Debugging ***********************************/ +static inline char *ahc_name(struct ahc_softc *ahc); + +static inline char *ahc_name(struct ahc_softc *ahc) +{ + return (ahc->name); +} + +/*********************** Miscellaneous Support Functions ***********************/ + +struct ahc_initiator_tinfo * + ahc_fetch_transinfo(struct ahc_softc *ahc, + char channel, u_int our_id, + u_int remote_id, + struct ahc_tmode_tstate **tstate); +uint16_t + ahc_inw(struct ahc_softc *ahc, u_int port); +void ahc_outw(struct ahc_softc *ahc, u_int port, + u_int value); +uint32_t + ahc_inl(struct ahc_softc *ahc, u_int port); +void ahc_outl(struct ahc_softc *ahc, u_int port, + uint32_t value); +uint64_t + ahc_inq(struct ahc_softc *ahc, u_int port); +void ahc_outq(struct ahc_softc *ahc, u_int port, + uint64_t value); +struct scb* + ahc_get_scb(struct ahc_softc *ahc); +void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); +struct scb * + ahc_lookup_scb(struct ahc_softc *ahc, u_int tag); +void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); +struct scsi_sense_data * + ahc_get_sense_buf(struct ahc_softc *ahc, + struct scb *scb); + +/************************** Interrupt Processing ******************************/ +int ahc_intr(struct ahc_softc *ahc); + +#endif /* _AIC7XXX_INLINE_H_ */ diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c new file mode 100644 index 000000000..d3b108265 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -0,0 +1,2577 @@ + +/* + * Adaptec AIC7xxx device driver for Linux. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $ + * + * Copyright (c) 1994 John Aycock + * The University of Calgary Department of Computer Science. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F + * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA + * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide, + * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux, + * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file + * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual, + * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the + * ANSI SCSI-2 specification (draft 10c), ... + * + * -------------------------------------------------------------------------- + * + * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org): + * + * Substantially modified to include support for wide and twin bus + * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes, + * SCB paging, and other rework of the code. + * + * -------------------------------------------------------------------------- + * Copyright (c) 1994-2000 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + *--------------------------------------------------------------------------- + * + * Thanks also go to (in alphabetical order) the following: + * + * Rory Bolt - Sequencer bug fixes + * Jay Estabrook - Initial DEC Alpha support + * Doug Ledford - Much needed abort/reset bug fixes + * Kai Makisara - DMAing of SCBs + * + * A Boot time option was also added for not resetting the scsi bus. + * + * Form: aic7xxx=extended + * aic7xxx=no_reset + * aic7xxx=verbose + * + * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97 + * + * Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp + */ + +/* + * Further driver modifications made by Doug Ledford + * + * Copyright (c) 1997-1999 Doug Ledford + * + * These changes are released under the same licensing terms as the FreeBSD + * driver written by Justin Gibbs. Please see his Copyright notice above + * for the exact terms and conditions covering my changes as well as the + * warranty statement. + * + * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include + * but are not limited to: + * + * 1: Import of the latest FreeBSD sequencer code for this driver + * 2: Modification of kernel code to accommodate different sequencer semantics + * 3: Extensive changes throughout kernel portion of driver to improve + * abort/reset processing and error hanndling + * 4: Other work contributed by various people on the Internet + * 5: Changes to printk information and verbosity selection code + * 6: General reliability related changes, especially in IRQ management + * 7: Modifications to the default probe/attach order for supported cards + * 8: SMP friendliness has been improved + * + */ + +#include "aic7xxx_osm.h" +#include "aic7xxx_inline.h" +#include + +static struct scsi_transport_template *ahc_linux_transport_template = NULL; + +#include /* __setup */ +#include /* For fetching system memory size */ +#include /* For block_size() */ +#include /* For ssleep/msleep */ +#include + + +/* + * Set this to the delay in seconds after SCSI bus reset. + * Note, we honor this only for the initial bus reset. + * The scsi error recovery code performs its own bus settle + * delay handling for error recovery actions. + */ +#ifdef CONFIG_AIC7XXX_RESET_DELAY_MS +#define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS +#else +#define AIC7XXX_RESET_DELAY 5000 +#endif + +/* + * To change the default number of tagged transactions allowed per-device, + * add a line to the lilo.conf file like: + * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" + * which will result in the first four devices on the first two + * controllers being set to a tagged queue depth of 32. + * + * The tag_commands is an array of 16 to allow for wide and twin adapters. + * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15 + * for channel 1. + */ +typedef struct { + uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */ +} adapter_tag_info_t; + +/* + * Modify this as you see fit for your system. + * + * 0 tagged queuing disabled + * 1 <= n <= 253 n == max tags ever dispatched. + * + * The driver will throttle the number of commands dispatched to a + * device if it returns queue full. For devices with a fixed maximum + * queue depth, the driver will eventually determine this depth and + * lock it in (a console message is printed to indicate that a lock + * has occurred). On some devices, queue full is returned for a temporary + * resource shortage. These devices will return queue full at varying + * depths. The driver will throttle back when the queue fulls occur and + * attempt to slowly increase the depth over time as the device recovers + * from the resource shortage. + * + * In this example, the first line will disable tagged queueing for all + * the devices on the first probed aic7xxx adapter. + * + * The second line enables tagged queueing with 4 commands/LUN for IDs + * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the + * driver to attempt to use up to 64 tags for ID 1. + * + * The third line is the same as the first line. + * + * The fourth line disables tagged queueing for devices 0 and 3. It + * enables tagged queueing for the other IDs, with 16 commands/LUN + * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for + * IDs 2, 5-7, and 9-15. + */ + +/* + * NOTE: The below structure is for reference only, the actual structure + * to modify in order to change things is just below this comment block. +adapter_tag_info_t aic7xxx_tag_info[] = +{ + {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}}, + {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}} +}; +*/ + +#ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE +#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE +#else +#define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE +#endif + +#define AIC7XXX_CONFIGED_TAG_COMMANDS { \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ + AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \ +} + +/* + * By default, use the number of commands specified by + * the users kernel configuration. + */ +static adapter_tag_info_t aic7xxx_tag_info[] = +{ + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS}, + {AIC7XXX_CONFIGED_TAG_COMMANDS} +}; + +/* + * There should be a specific return value for this in scsi.h, but + * it seems that most drivers ignore it. + */ +#define DID_UNDERFLOW DID_ERROR + +void +ahc_print_path(struct ahc_softc *ahc, struct scb *scb) +{ + printk("(scsi%d:%c:%d:%d): ", + ahc->platform_data->host->host_no, + scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X', + scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1, + scb != NULL ? SCB_GET_LUN(scb) : -1); +} + +/* + * XXX - these options apply unilaterally to _all_ 274x/284x/294x + * cards in the system. This should be fixed. Exceptions to this + * rule are noted in the comments. + */ + +/* + * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This + * has no effect on any later resets that might occur due to things like + * SCSI bus timeouts. + */ +static uint32_t aic7xxx_no_reset; + +/* + * Should we force EXTENDED translation on a controller. + * 0 == Use whatever is in the SEEPROM or default to off + * 1 == Use whatever is in the SEEPROM or default to on + */ +static uint32_t aic7xxx_extended; + +/* + * PCI bus parity checking of the Adaptec controllers. This is somewhat + * dubious at best. To my knowledge, this option has never actually + * solved a PCI parity problem, but on certain machines with broken PCI + * chipset configurations where stray PCI transactions with bad parity are + * the norm rather than the exception, the error messages can be overwhelming. + * It's included in the driver for completeness. + * 0 = Shut off PCI parity check + * non-0 = reverse polarity pci parity checking + */ +static uint32_t aic7xxx_pci_parity = ~0; + +/* + * There are lots of broken chipsets in the world. Some of them will + * violate the PCI spec when we issue byte sized memory writes to our + * controller. I/O mapped register access, if allowed by the given + * platform, will work in almost all cases. + */ +uint32_t aic7xxx_allow_memio = ~0; + +/* + * So that we can set how long each device is given as a selection timeout. + * The table of values goes like this: + * 0 - 256ms + * 1 - 128ms + * 2 - 64ms + * 3 - 32ms + * We default to 256ms because some older devices need a longer time + * to respond to initial selection. + */ +static uint32_t aic7xxx_seltime; + +/* + * Certain devices do not perform any aging on commands. Should the + * device be saturated by commands in one portion of the disk, it is + * possible for transactions on far away sectors to never be serviced. + * To handle these devices, we can periodically send an ordered tag to + * force all outstanding transactions to be serviced prior to a new + * transaction. + */ +static uint32_t aic7xxx_periodic_otag; + +/* + * Module information and settable options. + */ +static char *aic7xxx = NULL; + +MODULE_AUTHOR("Maintainer: Hannes Reinecke "); +MODULE_DESCRIPTION("Adaptec AIC77XX/78XX SCSI Host Bus Adapter driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(AIC7XXX_DRIVER_VERSION); +module_param(aic7xxx, charp, 0444); +MODULE_PARM_DESC(aic7xxx, +"period-delimited options string:\n" +" verbose Enable verbose/diagnostic logging\n" +" allow_memio Allow device registers to be memory mapped\n" +" debug Bitmask of debug values to enable\n" +" no_probe Toggle EISA/VLB controller probing\n" +" probe_eisa_vl Toggle EISA/VLB controller probing\n" +" no_reset Suppress initial bus resets\n" +" extended Enable extended geometry on all controllers\n" +" periodic_otag Send an ordered tagged transaction\n" +" periodically to prevent tag starvation.\n" +" This may be required by some older disk\n" +" drives or RAID arrays.\n" +" tag_info: Set per-target tag depth\n" +" global_tag_depth: Global tag depth for every target\n" +" on every bus\n" +" seltime: Selection Timeout\n" +" (0/256ms,1/128ms,2/64ms,3/32ms)\n" +"\n" +" Sample modprobe configuration file:\n" +" # Toggle EISA/VLB probing\n" +" # Set tag depth on Controller 1/Target 1 to 10 tags\n" +" # Shorten the selection timeout to 128ms\n" +"\n" +" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n" +); + +static void ahc_linux_handle_scsi_status(struct ahc_softc *, + struct scsi_device *, + struct scb *); +static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, + struct scsi_cmnd *cmd); +static void ahc_linux_freeze_simq(struct ahc_softc *ahc); +static void ahc_linux_release_simq(struct ahc_softc *ahc); +static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag); +static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc); +static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo); +static void ahc_linux_device_queue_depth(struct scsi_device *); +static int ahc_linux_run_command(struct ahc_softc*, + struct ahc_linux_device *, + struct scsi_cmnd *); +static void ahc_linux_setup_tag_info_global(char *p); +static int aic7xxx_setup(char *s); + +static int ahc_linux_unit; + + +/************************** OS Utility Wrappers *******************************/ +void +ahc_delay(long usec) +{ + /* + * udelay on Linux can have problems for + * multi-millisecond waits. Wait at most + * 1024us per call. + */ + while (usec > 0) { + udelay(usec % 1024); + usec -= 1024; + } +} + +/***************************** Low Level I/O **********************************/ +uint8_t +ahc_inb(struct ahc_softc * ahc, long port) +{ + uint8_t x; + + if (ahc->tag == BUS_SPACE_MEMIO) { + x = readb(ahc->bsh.maddr + port); + } else { + x = inb(ahc->bsh.ioport + port); + } + mb(); + return (x); +} + +void +ahc_outb(struct ahc_softc * ahc, long port, uint8_t val) +{ + if (ahc->tag == BUS_SPACE_MEMIO) { + writeb(val, ahc->bsh.maddr + port); + } else { + outb(val, ahc->bsh.ioport + port); + } + mb(); +} + +void +ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count) +{ + int i; + + /* + * There is probably a more efficient way to do this on Linux + * but we don't use this for anything speed critical and this + * should work. + */ + for (i = 0; i < count; i++) + ahc_outb(ahc, port, *array++); +} + +void +ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count) +{ + int i; + + /* + * There is probably a more efficient way to do this on Linux + * but we don't use this for anything speed critical and this + * should work. + */ + for (i = 0; i < count; i++) + *array++ = ahc_inb(ahc, port); +} + +/********************************* Inlines ************************************/ +static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); + +static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, + struct ahc_dma_seg *sg, + dma_addr_t addr, bus_size_t len); + +static void +ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) +{ + struct scsi_cmnd *cmd; + + cmd = scb->io_ctx; + ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); + + scsi_dma_unmap(cmd); +} + +static int +ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, + struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len) +{ + int consumed; + + if ((scb->sg_count + 1) > AHC_NSEG) + panic("Too few segs for dma mapping. " + "Increase AHC_NSEG\n"); + + consumed = 1; + sg->addr = ahc_htole32(addr & 0xFFFFFFFF); + scb->platform_data->xfer_len += len; + + if (sizeof(dma_addr_t) > 4 + && (ahc->flags & AHC_39BIT_ADDRESSING) != 0) + len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK; + + sg->len = ahc_htole32(len); + return (consumed); +} + +/* + * Return a string describing the driver. + */ +static const char * +ahc_linux_info(struct Scsi_Host *host) +{ + static char buffer[512]; + char ahc_info[256]; + char *bp; + struct ahc_softc *ahc; + + bp = &buffer[0]; + ahc = *(struct ahc_softc **)host->hostdata; + memset(bp, 0, sizeof(buffer)); + strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n" + " <"); + strcat(bp, ahc->description); + strcat(bp, ">\n" + " "); + ahc_controller_info(ahc, ahc_info); + strcat(bp, ahc_info); + strcat(bp, "\n"); + + return (bp); +} + +/* + * Queue an SCB to the controller. + */ +static int ahc_linux_queue_lck(struct scsi_cmnd *cmd) +{ + struct ahc_softc *ahc; + struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); + int rtn = SCSI_MLQUEUE_HOST_BUSY; + unsigned long flags; + + ahc = *(struct ahc_softc **)cmd->device->host->hostdata; + + ahc_lock(ahc, &flags); + if (ahc->platform_data->qfrozen == 0) { + cmd->result = CAM_REQ_INPROG << 16; + rtn = ahc_linux_run_command(ahc, dev, cmd); + } + ahc_unlock(ahc, &flags); + + return rtn; +} + +static DEF_SCSI_QCMD(ahc_linux_queue) + +static inline struct scsi_target ** +ahc_linux_target_in_softc(struct scsi_target *starget) +{ + struct ahc_softc *ahc = + *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata); + unsigned int target_offset; + + target_offset = starget->id; + if (starget->channel != 0) + target_offset += 8; + + return &ahc->platform_data->starget[target_offset]; +} + +static int +ahc_linux_target_alloc(struct scsi_target *starget) +{ + struct ahc_softc *ahc = + *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata); + struct seeprom_config *sc = ahc->seep_config; + unsigned long flags; + struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget); + unsigned short scsirate; + struct ahc_devinfo devinfo; + char channel = starget->channel + 'A'; + unsigned int our_id = ahc->our_id; + unsigned int target_offset; + + target_offset = starget->id; + if (starget->channel != 0) + target_offset += 8; + + if (starget->channel) + our_id = ahc->our_id_b; + + ahc_lock(ahc, &flags); + + BUG_ON(*ahc_targp != NULL); + + *ahc_targp = starget; + + if (sc) { + int maxsync = AHC_SYNCRATE_DT; + int ultra = 0; + int flags = sc->device_flags[target_offset]; + + if (ahc->flags & AHC_NEWEEPROM_FMT) { + if (flags & CFSYNCHISULTRA) + ultra = 1; + } else if (flags & CFULTRAEN) + ultra = 1; + /* AIC nutcase; 10MHz appears as ultra = 1, CFXFER = 0x04 + * change it to ultra=0, CFXFER = 0 */ + if(ultra && (flags & CFXFER) == 0x04) { + ultra = 0; + flags &= ~CFXFER; + } + + if ((ahc->features & AHC_ULTRA2) != 0) { + scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0); + } else { + scsirate = (flags & CFXFER) << 4; + maxsync = ultra ? AHC_SYNCRATE_ULTRA : + AHC_SYNCRATE_FAST; + } + spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; + if (!(flags & CFSYNCH)) + spi_max_offset(starget) = 0; + spi_min_period(starget) = + ahc_find_period(ahc, scsirate, maxsync); + } + ahc_compile_devinfo(&devinfo, our_id, starget->id, + CAM_LUN_WILDCARD, channel, + ROLE_INITIATOR); + ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0, + AHC_TRANS_GOAL, /*paused*/FALSE); + ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHC_TRANS_GOAL, /*paused*/FALSE); + ahc_unlock(ahc, &flags); + + return 0; +} + +static void +ahc_linux_target_destroy(struct scsi_target *starget) +{ + struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget); + + *ahc_targp = NULL; +} + +static int +ahc_linux_slave_alloc(struct scsi_device *sdev) +{ + struct ahc_softc *ahc = + *((struct ahc_softc **)sdev->host->hostdata); + struct scsi_target *starget = sdev->sdev_target; + struct ahc_linux_device *dev; + + if (bootverbose) + printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id); + + dev = scsi_transport_device_data(sdev); + memset(dev, 0, sizeof(*dev)); + + /* + * We start out life using untagged + * transactions of which we allow one. + */ + dev->openings = 1; + + /* + * Set maxtags to 0. This will be changed if we + * later determine that we are dealing with + * a tagged queuing capable device. + */ + dev->maxtags = 0; + + spi_period(starget) = 0; + + return 0; +} + +static int +ahc_linux_slave_configure(struct scsi_device *sdev) +{ + if (bootverbose) + sdev_printk(KERN_INFO, sdev, "Slave Configure\n"); + + ahc_linux_device_queue_depth(sdev); + + /* Initial Domain Validation */ + if (!spi_initial_dv(sdev->sdev_target)) + spi_dv_device(sdev); + + return 0; +} + +#if defined(__i386__) +/* + * Return the disk geometry for the given SCSI device. + */ +static int +ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + int heads; + int sectors; + int cylinders; + int extended; + struct ahc_softc *ahc; + u_int channel; + + ahc = *((struct ahc_softc **)sdev->host->hostdata); + channel = sdev_channel(sdev); + + if (scsi_partsize(bdev, capacity, geom)) + return 0; + + heads = 64; + sectors = 32; + cylinders = aic_sector_div(capacity, heads, sectors); + + if (aic7xxx_extended != 0) + extended = 1; + else if (channel == 0) + extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0; + else + extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0; + if (extended && cylinders >= 1024) { + heads = 255; + sectors = 63; + cylinders = aic_sector_div(capacity, heads, sectors); + } + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + return (0); +} +#endif + +/* + * Abort the current SCSI command(s). + */ +static int +ahc_linux_abort(struct scsi_cmnd *cmd) +{ + int error; + + error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT); + if (error != SUCCESS) + printk("aic7xxx_abort returns 0x%x\n", error); + return (error); +} + +/* + * Attempt to send a target reset message to the device that timed out. + */ +static int +ahc_linux_dev_reset(struct scsi_cmnd *cmd) +{ + int error; + + error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET); + if (error != SUCCESS) + printk("aic7xxx_dev_reset returns 0x%x\n", error); + return (error); +} + +/* + * Reset the SCSI bus. + */ +static int +ahc_linux_bus_reset(struct scsi_cmnd *cmd) +{ + struct ahc_softc *ahc; + int found; + unsigned long flags; + + ahc = *(struct ahc_softc **)cmd->device->host->hostdata; + + ahc_lock(ahc, &flags); + found = ahc_reset_channel(ahc, scmd_channel(cmd) + 'A', + /*initiate reset*/TRUE); + ahc_unlock(ahc, &flags); + + if (bootverbose) + printk("%s: SCSI bus reset delivered. " + "%d SCBs aborted.\n", ahc_name(ahc), found); + + return SUCCESS; +} + +struct scsi_host_template aic7xxx_driver_template = { + .module = THIS_MODULE, + .name = "aic7xxx", + .proc_name = "aic7xxx", + .show_info = ahc_linux_show_info, + .write_info = ahc_proc_write_seeprom, + .info = ahc_linux_info, + .queuecommand = ahc_linux_queue, + .eh_abort_handler = ahc_linux_abort, + .eh_device_reset_handler = ahc_linux_dev_reset, + .eh_bus_reset_handler = ahc_linux_bus_reset, +#if defined(__i386__) + .bios_param = ahc_linux_biosparam, +#endif + .can_queue = AHC_MAX_QUEUE, + .this_id = -1, + .max_sectors = 8192, + .cmd_per_lun = 2, + .slave_alloc = ahc_linux_slave_alloc, + .slave_configure = ahc_linux_slave_configure, + .target_alloc = ahc_linux_target_alloc, + .target_destroy = ahc_linux_target_destroy, +}; + +/**************************** Tasklet Handler *********************************/ + +/******************************** Macros **************************************/ +#define BUILD_SCSIID(ahc, cmd) \ + ((((cmd)->device->id << TID_SHIFT) & TID) \ + | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \ + | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB)) + +/******************************** Bus DMA *************************************/ +int +ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent, + bus_size_t alignment, bus_size_t boundary, + dma_addr_t lowaddr, dma_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, + bus_size_t maxsize, int nsegments, + bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag) +{ + bus_dma_tag_t dmat; + + dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC); + if (dmat == NULL) + return (ENOMEM); + + /* + * Linux is very simplistic about DMA memory. For now don't + * maintain all specification information. Once Linux supplies + * better facilities for doing these operations, or the + * needs of this particular driver change, we might need to do + * more here. + */ + dmat->alignment = alignment; + dmat->boundary = boundary; + dmat->maxsize = maxsize; + *ret_tag = dmat; + return (0); +} + +void +ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat) +{ + kfree(dmat); +} + +int +ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, + int flags, bus_dmamap_t *mapp) +{ + /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */ + *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC); + if (*vaddr == NULL) + return ENOMEM; + return 0; +} + +void +ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, + void* vaddr, bus_dmamap_t map) +{ + dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); +} + +int +ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map, + void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, + void *cb_arg, int flags) +{ + /* + * Assume for now that this will only be used during + * initialization and not for per-transaction buffer mapping. + */ + bus_dma_segment_t stack_sg; + + stack_sg.ds_addr = map; + stack_sg.ds_len = dmat->maxsize; + cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); + return (0); +} + +void +ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) +{ +} + +int +ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) +{ + /* Nothing to do */ + return (0); +} + +static void +ahc_linux_setup_tag_info_global(char *p) +{ + int tags, i, j; + + tags = simple_strtoul(p + 1, NULL, 0) & 0xff; + printk("Setting Global Tags= %d\n", tags); + + for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) { + for (j = 0; j < AHC_NUM_TARGETS; j++) { + aic7xxx_tag_info[i].tag_commands[j] = tags; + } + } +} + +static void +ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value) +{ + + if ((instance >= 0) && (targ >= 0) + && (instance < ARRAY_SIZE(aic7xxx_tag_info)) + && (targ < AHC_NUM_TARGETS)) { + aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff; + if (bootverbose) + printk("tag_info[%d:%d] = %d\n", instance, targ, value); + } +} + +static char * +ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth, + void (*callback)(u_long, int, int, int32_t), + u_long callback_arg) +{ + char *tok_end; + char *tok_end2; + int i; + int instance; + int targ; + int done; + char tok_list[] = {'.', ',', '{', '}', '\0'}; + + /* All options use a ':' name/arg separator */ + if (*opt_arg != ':') + return (opt_arg); + opt_arg++; + instance = -1; + targ = -1; + done = FALSE; + /* + * Restore separator that may be in + * the middle of our option argument. + */ + tok_end = strchr(opt_arg, '\0'); + if (tok_end < end) + *tok_end = ','; + while (!done) { + switch (*opt_arg) { + case '{': + if (instance == -1) { + instance = 0; + } else { + if (depth > 1) { + if (targ == -1) + targ = 0; + } else { + printk("Malformed Option %s\n", + opt_name); + done = TRUE; + } + } + opt_arg++; + break; + case '}': + if (targ != -1) + targ = -1; + else if (instance != -1) + instance = -1; + opt_arg++; + break; + case ',': + case '.': + if (instance == -1) + done = TRUE; + else if (targ >= 0) + targ++; + else if (instance >= 0) + instance++; + opt_arg++; + break; + case '\0': + done = TRUE; + break; + default: + tok_end = end; + for (i = 0; tok_list[i]; i++) { + tok_end2 = strchr(opt_arg, tok_list[i]); + if ((tok_end2) && (tok_end2 < tok_end)) + tok_end = tok_end2; + } + callback(callback_arg, instance, targ, + simple_strtol(opt_arg, NULL, 0)); + opt_arg = tok_end; + break; + } + } + return (opt_arg); +} + +/* + * Handle Linux boot parameters. This routine allows for assigning a value + * to a parameter with a ':' between the parameter and the value. + * ie. aic7xxx=stpwlev:1,extended + */ +static int +aic7xxx_setup(char *s) +{ + int i, n; + char *p; + char *end; + + static const struct { + const char *name; + uint32_t *flag; + } options[] = { + { "extended", &aic7xxx_extended }, + { "no_reset", &aic7xxx_no_reset }, + { "verbose", &aic7xxx_verbose }, + { "allow_memio", &aic7xxx_allow_memio}, +#ifdef AHC_DEBUG + { "debug", &ahc_debug }, +#endif + { "periodic_otag", &aic7xxx_periodic_otag }, + { "pci_parity", &aic7xxx_pci_parity }, + { "seltime", &aic7xxx_seltime }, + { "tag_info", NULL }, + { "global_tag_depth", NULL }, + { "dv", NULL } + }; + + end = strchr(s, '\0'); + + /* + * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE + * will never be 0 in this case. + */ + n = 0; + + while ((p = strsep(&s, ",.")) != NULL) { + if (*p == '\0') + continue; + for (i = 0; i < ARRAY_SIZE(options); i++) { + + n = strlen(options[i].name); + if (strncmp(options[i].name, p, n) == 0) + break; + } + if (i == ARRAY_SIZE(options)) + continue; + + if (strncmp(p, "global_tag_depth", n) == 0) { + ahc_linux_setup_tag_info_global(p + n); + } else if (strncmp(p, "tag_info", n) == 0) { + s = ahc_parse_brace_option("tag_info", p + n, end, + 2, ahc_linux_setup_tag_info, 0); + } else if (p[n] == ':') { + *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); + } else if (strncmp(p, "verbose", n) == 0) { + *(options[i].flag) = 1; + } else { + *(options[i].flag) ^= 0xFFFFFFFF; + } + } + return 1; +} + +__setup("aic7xxx=", aic7xxx_setup); + +uint32_t aic7xxx_verbose; + +int +ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template) +{ + char buf[80]; + struct Scsi_Host *host; + char *new_name; + u_long s; + int retval; + + template->name = ahc->description; + host = scsi_host_alloc(template, sizeof(struct ahc_softc *)); + if (host == NULL) + return (ENOMEM); + + *((struct ahc_softc **)host->hostdata) = ahc; + ahc->platform_data->host = host; + host->can_queue = AHC_MAX_QUEUE; + host->cmd_per_lun = 2; + /* XXX No way to communicate the ID for multiple channels */ + host->this_id = ahc->our_id; + host->irq = ahc->platform_data->irq; + host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8; + host->max_lun = AHC_NUM_LUNS; + host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0; + host->sg_tablesize = AHC_NSEG; + ahc_lock(ahc, &s); + ahc_set_unit(ahc, ahc_linux_unit++); + ahc_unlock(ahc, &s); + sprintf(buf, "scsi%d", host->host_no); + new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); + if (new_name != NULL) { + strcpy(new_name, buf); + ahc_set_name(ahc, new_name); + } + host->unique_id = ahc->unit; + ahc_linux_initialize_scsi_bus(ahc); + ahc_intr_enable(ahc, TRUE); + + host->transportt = ahc_linux_transport_template; + + retval = scsi_add_host(host, ahc->dev); + if (retval) { + printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); + scsi_host_put(host); + return retval; + } + + scsi_scan_host(host); + return 0; +} + +/* + * Place the SCSI bus into a known state by either resetting it, + * or forcing transfer negotiations on the next command to any + * target. + */ +static void +ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc) +{ + int i; + int numtarg; + unsigned long s; + + i = 0; + numtarg = 0; + + ahc_lock(ahc, &s); + + if (aic7xxx_no_reset != 0) + ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B); + + if ((ahc->flags & AHC_RESET_BUS_A) != 0) + ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE); + else + numtarg = (ahc->features & AHC_WIDE) ? 16 : 8; + + if ((ahc->features & AHC_TWIN) != 0) { + + if ((ahc->flags & AHC_RESET_BUS_B) != 0) { + ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE); + } else { + if (numtarg == 0) + i = 8; + numtarg += 8; + } + } + + /* + * Force negotiation to async for all targets that + * will not see an initial bus reset. + */ + for (; i < numtarg; i++) { + struct ahc_devinfo devinfo; + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + u_int our_id; + u_int target_id; + char channel; + + channel = 'A'; + our_id = ahc->our_id; + target_id = i; + if (i > 7 && (ahc->features & AHC_TWIN) != 0) { + channel = 'B'; + our_id = ahc->our_id_b; + target_id = i % 8; + } + tinfo = ahc_fetch_transinfo(ahc, channel, our_id, + target_id, &tstate); + ahc_compile_devinfo(&devinfo, our_id, target_id, + CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); + ahc_update_neg_request(ahc, &devinfo, tstate, + tinfo, AHC_NEG_ALWAYS); + } + ahc_unlock(ahc, &s); + /* Give the bus some time to recover */ + if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) { + ahc_linux_freeze_simq(ahc); + msleep(AIC7XXX_RESET_DELAY); + ahc_linux_release_simq(ahc); + } +} + +int +ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) +{ + + ahc->platform_data = + kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC); + if (ahc->platform_data == NULL) + return (ENOMEM); + ahc->platform_data->irq = AHC_LINUX_NOIRQ; + ahc_lockinit(ahc); + ahc->seltime = (aic7xxx_seltime & 0x3) << 4; + ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; + if (aic7xxx_pci_parity == 0) + ahc->flags |= AHC_DISABLE_PCI_PERR; + + return (0); +} + +void +ahc_platform_free(struct ahc_softc *ahc) +{ + struct scsi_target *starget; + int i; + + if (ahc->platform_data != NULL) { + /* destroy all of the device and target objects */ + for (i = 0; i < AHC_NUM_TARGETS; i++) { + starget = ahc->platform_data->starget[i]; + if (starget != NULL) { + ahc->platform_data->starget[i] = NULL; + } + } + + if (ahc->platform_data->irq != AHC_LINUX_NOIRQ) + free_irq(ahc->platform_data->irq, ahc); + if (ahc->tag == BUS_SPACE_PIO + && ahc->bsh.ioport != 0) + release_region(ahc->bsh.ioport, 256); + if (ahc->tag == BUS_SPACE_MEMIO + && ahc->bsh.maddr != NULL) { + iounmap(ahc->bsh.maddr); + release_mem_region(ahc->platform_data->mem_busaddr, + 0x1000); + } + + if (ahc->platform_data->host) + scsi_host_put(ahc->platform_data->host); + + kfree(ahc->platform_data); + } +} + +void +ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb) +{ + ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), + SCB_GET_CHANNEL(ahc, scb), + SCB_GET_LUN(scb), SCB_LIST_NULL, + ROLE_UNKNOWN, CAM_REQUEUE_REQ); +} + +void +ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev, + struct ahc_devinfo *devinfo, ahc_queue_alg alg) +{ + struct ahc_linux_device *dev; + int was_queuing; + int now_queuing; + + if (sdev == NULL) + return; + dev = scsi_transport_device_data(sdev); + + was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); + switch (alg) { + default: + case AHC_QUEUE_NONE: + now_queuing = 0; + break; + case AHC_QUEUE_BASIC: + now_queuing = AHC_DEV_Q_BASIC; + break; + case AHC_QUEUE_TAGGED: + now_queuing = AHC_DEV_Q_TAGGED; + break; + } + if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0 + && (was_queuing != now_queuing) + && (dev->active != 0)) { + dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY; + dev->qfrozen++; + } + + dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG); + if (now_queuing) { + u_int usertags; + + usertags = ahc_linux_user_tagdepth(ahc, devinfo); + if (!was_queuing) { + /* + * Start out aggressively and allow our + * dynamic queue depth algorithm to take + * care of the rest. + */ + dev->maxtags = usertags; + dev->openings = dev->maxtags - dev->active; + } + if (dev->maxtags == 0) { + /* + * Queueing is disabled by the user. + */ + dev->openings = 1; + } else if (alg == AHC_QUEUE_TAGGED) { + dev->flags |= AHC_DEV_Q_TAGGED; + if (aic7xxx_periodic_otag != 0) + dev->flags |= AHC_DEV_PERIODIC_OTAG; + } else + dev->flags |= AHC_DEV_Q_BASIC; + } else { + /* We can only have one opening. */ + dev->maxtags = 0; + dev->openings = 1 - dev->active; + } + switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { + case AHC_DEV_Q_BASIC: + case AHC_DEV_Q_TAGGED: + scsi_change_queue_depth(sdev, + dev->openings + dev->active); + break; + default: + /* + * We allow the OS to queue 2 untagged transactions to + * us at any time even though we can only execute them + * serially on the controller/device. This should + * remove some latency. + */ + scsi_change_queue_depth(sdev, 2); + break; + } +} + +int +ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status) +{ + return 0; +} + +static u_int +ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) +{ + static int warned_user; + u_int tags; + + tags = 0; + if ((ahc->user_discenable & devinfo->target_mask) != 0) { + if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) { + if (warned_user == 0) { + + printk(KERN_WARNING +"aic7xxx: WARNING: Insufficient tag_info instances\n" +"aic7xxx: for installed controllers. Using defaults\n" +"aic7xxx: Please update the aic7xxx_tag_info array in\n" +"aic7xxx: the aic7xxx_osm..c source file.\n"); + warned_user++; + } + tags = AHC_MAX_QUEUE; + } else { + adapter_tag_info_t *tag_info; + + tag_info = &aic7xxx_tag_info[ahc->unit]; + tags = tag_info->tag_commands[devinfo->target_offset]; + if (tags > AHC_MAX_QUEUE) + tags = AHC_MAX_QUEUE; + } + } + return (tags); +} + +/* + * Determines the queue depth for a given device. + */ +static void +ahc_linux_device_queue_depth(struct scsi_device *sdev) +{ + struct ahc_devinfo devinfo; + u_int tags; + struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata); + + ahc_compile_devinfo(&devinfo, + sdev->sdev_target->channel == 0 + ? ahc->our_id : ahc->our_id_b, + sdev->sdev_target->id, sdev->lun, + sdev->sdev_target->channel == 0 ? 'A' : 'B', + ROLE_INITIATOR); + tags = ahc_linux_user_tagdepth(ahc, &devinfo); + if (tags != 0 && sdev->tagged_supported != 0) { + + ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_TAGGED); + ahc_send_async(ahc, devinfo.channel, devinfo.target, + devinfo.lun, AC_TRANSFER_NEG); + ahc_print_devinfo(ahc, &devinfo); + printk("Tagged Queuing enabled. Depth %d\n", tags); + } else { + ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE); + ahc_send_async(ahc, devinfo.channel, devinfo.target, + devinfo.lun, AC_TRANSFER_NEG); + } +} + +static int +ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, + struct scsi_cmnd *cmd) +{ + struct scb *scb; + struct hardware_scb *hscb; + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + uint16_t mask; + struct scb_tailq *untagged_q = NULL; + int nseg; + + /* + * Schedule us to run later. The only reason we are not + * running is because the whole controller Q is frozen. + */ + if (ahc->platform_data->qfrozen != 0) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * We only allow one untagged transaction + * per target in the initiator role unless + * we are storing a full busy target *lun* + * table in SCB space. + */ + if (!(cmd->flags & SCMD_TAGGED) + && (ahc->features & AHC_SCB_BTT) == 0) { + int target_offset; + + target_offset = cmd->device->id + cmd->device->channel * 8; + untagged_q = &(ahc->untagged_queues[target_offset]); + if (!TAILQ_EMPTY(untagged_q)) + /* if we're already executing an untagged command + * we're busy to another */ + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + nseg = scsi_dma_map(cmd); + if (nseg < 0) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * Get an scb to use. + */ + scb = ahc_get_scb(ahc); + if (!scb) { + scsi_dma_unmap(cmd); + return SCSI_MLQUEUE_HOST_BUSY; + } + + scb->io_ctx = cmd; + scb->platform_data->dev = dev; + hscb = scb->hscb; + cmd->host_scribble = (char *)scb; + + /* + * Fill out basics of the HSCB. + */ + hscb->control = 0; + hscb->scsiid = BUILD_SCSIID(ahc, cmd); + hscb->lun = cmd->device->lun; + mask = SCB_GET_TARGET_MASK(ahc, scb); + tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), + SCB_GET_OUR_ID(scb), + SCB_GET_TARGET(ahc, scb), &tstate); + hscb->scsirate = tinfo->scsirate; + hscb->scsioffset = tinfo->curr.offset; + if ((tstate->ultraenb & mask) != 0) + hscb->control |= ULTRAENB; + + if ((ahc->user_discenable & mask) != 0) + hscb->control |= DISCENB; + + if ((tstate->auto_negotiate & mask) != 0) { + scb->flags |= SCB_AUTO_NEGOTIATE; + scb->hscb->control |= MK_MESSAGE; + } + + if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { + if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH + && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { + hscb->control |= ORDERED_QUEUE_TAG; + dev->commands_since_idle_or_otag = 0; + } else { + hscb->control |= SIMPLE_QUEUE_TAG; + } + } + + hscb->cdb_len = cmd->cmd_len; + if (hscb->cdb_len <= 12) { + memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); + } else { + memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); + scb->flags |= SCB_CDB32_PTR; + } + + scb->platform_data->xfer_len = 0; + ahc_set_residual(scb, 0); + ahc_set_sense_residual(scb, 0); + scb->sg_count = 0; + + if (nseg > 0) { + struct ahc_dma_seg *sg; + struct scatterlist *cur_seg; + int i; + + /* Copy the segments into the SG list. */ + sg = scb->sg_list; + /* + * The sg_count may be larger than nseg if + * a transfer crosses a 32bit page. + */ + scsi_for_each_sg(cmd, cur_seg, nseg, i) { + dma_addr_t addr; + bus_size_t len; + int consumed; + + addr = sg_dma_address(cur_seg); + len = sg_dma_len(cur_seg); + consumed = ahc_linux_map_seg(ahc, scb, + sg, addr, len); + sg += consumed; + scb->sg_count += consumed; + } + sg--; + sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); + + /* + * Reset the sg list pointer. + */ + scb->hscb->sgptr = + ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); + + /* + * Copy the first SG into the "current" + * data pointer area. + */ + scb->hscb->dataptr = scb->sg_list->addr; + scb->hscb->datacnt = scb->sg_list->len; + } else { + scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); + scb->hscb->dataptr = 0; + scb->hscb->datacnt = 0; + scb->sg_count = 0; + } + + LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); + dev->openings--; + dev->active++; + dev->commands_issued++; + if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) + dev->commands_since_idle_or_otag++; + + scb->flags |= SCB_ACTIVE; + if (untagged_q) { + TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); + scb->flags |= SCB_UNTAGGEDQ; + } + ahc_queue_scb(ahc, scb); + return 0; +} + +/* + * SCSI controller interrupt handler. + */ +irqreturn_t +ahc_linux_isr(int irq, void *dev_id) +{ + struct ahc_softc *ahc; + u_long flags; + int ours; + + ahc = (struct ahc_softc *) dev_id; + ahc_lock(ahc, &flags); + ours = ahc_intr(ahc); + ahc_unlock(ahc, &flags); + return IRQ_RETVAL(ours); +} + +void +ahc_platform_flushwork(struct ahc_softc *ahc) +{ + +} + +void +ahc_send_async(struct ahc_softc *ahc, char channel, + u_int target, u_int lun, ac_code code) +{ + switch (code) { + case AC_TRANSFER_NEG: + { + struct scsi_target *starget; + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + int target_offset; + unsigned int target_ppr_options; + + BUG_ON(target == CAM_TARGET_WILDCARD); + + tinfo = ahc_fetch_transinfo(ahc, channel, + channel == 'A' ? ahc->our_id + : ahc->our_id_b, + target, &tstate); + + /* + * Don't bother reporting results while + * negotiations are still pending. + */ + if (tinfo->curr.period != tinfo->goal.period + || tinfo->curr.width != tinfo->goal.width + || tinfo->curr.offset != tinfo->goal.offset + || tinfo->curr.ppr_options != tinfo->goal.ppr_options) + if (bootverbose == 0) + break; + + /* + * Don't bother reporting results that + * are identical to those last reported. + */ + target_offset = target; + if (channel == 'B') + target_offset += 8; + starget = ahc->platform_data->starget[target_offset]; + if (starget == NULL) + break; + + target_ppr_options = + (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) + + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0) + + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0); + + if (tinfo->curr.period == spi_period(starget) + && tinfo->curr.width == spi_width(starget) + && tinfo->curr.offset == spi_offset(starget) + && tinfo->curr.ppr_options == target_ppr_options) + if (bootverbose == 0) + break; + + spi_period(starget) = tinfo->curr.period; + spi_width(starget) = tinfo->curr.width; + spi_offset(starget) = tinfo->curr.offset; + spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; + spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; + spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; + spi_display_xfer_agreement(starget); + break; + } + case AC_SENT_BDR: + { + WARN_ON(lun != CAM_LUN_WILDCARD); + scsi_report_device_reset(ahc->platform_data->host, + channel - 'A', target); + break; + } + case AC_BUS_RESET: + if (ahc->platform_data->host != NULL) { + scsi_report_bus_reset(ahc->platform_data->host, + channel - 'A'); + } + break; + default: + panic("ahc_send_async: Unexpected async event"); + } +} + +/* + * Calls the higher level scsi done function and frees the scb. + */ +void +ahc_done(struct ahc_softc *ahc, struct scb *scb) +{ + struct scsi_cmnd *cmd; + struct ahc_linux_device *dev; + + LIST_REMOVE(scb, pending_links); + if ((scb->flags & SCB_UNTAGGEDQ) != 0) { + struct scb_tailq *untagged_q; + int target_offset; + + target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); + untagged_q = &(ahc->untagged_queues[target_offset]); + TAILQ_REMOVE(untagged_q, scb, links.tqe); + BUG_ON(!TAILQ_EMPTY(untagged_q)); + } else if ((scb->flags & SCB_ACTIVE) == 0) { + /* + * Transactions aborted from the untagged queue may + * not have been dispatched to the controller, so + * only check the SCB_ACTIVE flag for tagged transactions. + */ + printk("SCB %d done'd twice\n", scb->hscb->tag); + ahc_dump_card_state(ahc); + panic("Stopping for safety"); + } + cmd = scb->io_ctx; + dev = scb->platform_data->dev; + dev->active--; + dev->openings++; + if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) { + cmd->result &= ~(CAM_DEV_QFRZN << 16); + dev->qfrozen--; + } + ahc_linux_unmap_scb(ahc, scb); + + /* + * Guard against stale sense data. + * The Linux mid-layer assumes that sense + * was retrieved anytime the first byte of + * the sense buffer looks "sane". + */ + cmd->sense_buffer[0] = 0; + if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { +#ifdef AHC_REPORT_UNDERFLOWS + uint32_t amount_xferred; + + amount_xferred = + ahc_get_transfer_length(scb) - ahc_get_residual(scb); +#endif + if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) { +#ifdef AHC_DEBUG + if ((ahc_debug & AHC_SHOW_MISC) != 0) { + ahc_print_path(ahc, scb); + printk("Set CAM_UNCOR_PARITY\n"); + } +#endif + ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); +#ifdef AHC_REPORT_UNDERFLOWS + /* + * This code is disabled by default as some + * clients of the SCSI system do not properly + * initialize the underflow parameter. This + * results in spurious termination of commands + * that complete as expected (e.g. underflow is + * allowed as command can return variable amounts + * of data. + */ + } else if (amount_xferred < scb->io_ctx->underflow) { + u_int i; + + ahc_print_path(ahc, scb); + printk("CDB:"); + for (i = 0; i < scb->io_ctx->cmd_len; i++) + printk(" 0x%x", scb->io_ctx->cmnd[i]); + printk("\n"); + ahc_print_path(ahc, scb); + printk("Saw underflow (%ld of %ld bytes). " + "Treated as error\n", + ahc_get_residual(scb), + ahc_get_transfer_length(scb)); + ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); +#endif + } else { + ahc_set_transaction_status(scb, CAM_REQ_CMP); + } + } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { + ahc_linux_handle_scsi_status(ahc, cmd->device, scb); + } + + if (dev->openings == 1 + && ahc_get_transaction_status(scb) == CAM_REQ_CMP + && ahc_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL) + dev->tag_success_count++; + /* + * Some devices deal with temporary internal resource + * shortages by returning queue full. When the queue + * full occurrs, we throttle back. Slowly try to get + * back to our previous queue depth. + */ + if ((dev->openings + dev->active) < dev->maxtags + && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) { + dev->tag_success_count = 0; + dev->openings++; + } + + if (dev->active == 0) + dev->commands_since_idle_or_otag = 0; + + if ((scb->flags & SCB_RECOVERY_SCB) != 0) { + printk("Recovery SCB completes\n"); + if (ahc_get_transaction_status(scb) == CAM_BDR_SENT + || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) + ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); + + if (ahc->platform_data->eh_done) + complete(ahc->platform_data->eh_done); + } + + ahc_free_scb(ahc, scb); + ahc_linux_queue_cmd_complete(ahc, cmd); +} + +static void +ahc_linux_handle_scsi_status(struct ahc_softc *ahc, + struct scsi_device *sdev, struct scb *scb) +{ + struct ahc_devinfo devinfo; + struct ahc_linux_device *dev = scsi_transport_device_data(sdev); + + ahc_compile_devinfo(&devinfo, + ahc->our_id, + sdev->sdev_target->id, sdev->lun, + sdev->sdev_target->channel == 0 ? 'A' : 'B', + ROLE_INITIATOR); + + /* + * We don't currently trust the mid-layer to + * properly deal with queue full or busy. So, + * when one occurs, we tell the mid-layer to + * unconditionally requeue the command to us + * so that we can retry it ourselves. We also + * implement our own throttling mechanism so + * we don't clobber the device with too many + * commands. + */ + switch (ahc_get_scsi_status(scb)) { + default: + break; + case SAM_STAT_CHECK_CONDITION: + case SAM_STAT_COMMAND_TERMINATED: + { + struct scsi_cmnd *cmd; + + /* + * Copy sense information to the OS's cmd + * structure if it is available. + */ + cmd = scb->io_ctx; + if (scb->flags & SCB_SENSE) { + u_int sense_size; + + sense_size = min(sizeof(struct scsi_sense_data) + - ahc_get_sense_residual(scb), + (u_long)SCSI_SENSE_BUFFERSIZE); + memcpy(cmd->sense_buffer, + ahc_get_sense_buf(ahc, scb), sense_size); + if (sense_size < SCSI_SENSE_BUFFERSIZE) + memset(&cmd->sense_buffer[sense_size], 0, + SCSI_SENSE_BUFFERSIZE - sense_size); +#ifdef AHC_DEBUG + if (ahc_debug & AHC_SHOW_SENSE) { + int i; + + printk("Copied %d bytes of sense data:", + sense_size); + for (i = 0; i < sense_size; i++) { + if ((i & 0xF) == 0) + printk("\n"); + printk("0x%x ", cmd->sense_buffer[i]); + } + printk("\n"); + } +#endif + } + break; + } + case SAM_STAT_TASK_SET_FULL: + { + /* + * By the time the core driver has returned this + * command, all other commands that were queued + * to us but not the device have been returned. + * This ensures that dev->active is equal to + * the number of commands actually queued to + * the device. + */ + dev->tag_success_count = 0; + if (dev->active != 0) { + /* + * Drop our opening count to the number + * of commands currently outstanding. + */ + dev->openings = 0; +/* + ahc_print_path(ahc, scb); + printk("Dropping tag count to %d\n", dev->active); + */ + if (dev->active == dev->tags_on_last_queuefull) { + + dev->last_queuefull_same_count++; + /* + * If we repeatedly see a queue full + * at the same queue depth, this + * device has a fixed number of tag + * slots. Lock in this tag depth + * so we stop seeing queue fulls from + * this device. + */ + if (dev->last_queuefull_same_count + == AHC_LOCK_TAGS_COUNT) { + dev->maxtags = dev->active; + ahc_print_path(ahc, scb); + printk("Locking max tag count at %d\n", + dev->active); + } + } else { + dev->tags_on_last_queuefull = dev->active; + dev->last_queuefull_same_count = 0; + } + ahc_set_transaction_status(scb, CAM_REQUEUE_REQ); + ahc_set_scsi_status(scb, SAM_STAT_GOOD); + ahc_platform_set_tags(ahc, sdev, &devinfo, + (dev->flags & AHC_DEV_Q_BASIC) + ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); + break; + } + /* + * Drop down to a single opening, and treat this + * as if the target returned BUSY SCSI status. + */ + dev->openings = 1; + ahc_set_scsi_status(scb, SAM_STAT_BUSY); + ahc_platform_set_tags(ahc, sdev, &devinfo, + (dev->flags & AHC_DEV_Q_BASIC) + ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); + break; + } + } +} + +static void +ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd) +{ + /* + * Map CAM error codes into Linux Error codes. We + * avoid the conversion so that the DV code has the + * full error information available when making + * state change decisions. + */ + { + u_int new_status; + + switch (ahc_cmd_get_transaction_status(cmd)) { + case CAM_REQ_INPROG: + case CAM_REQ_CMP: + case CAM_SCSI_STATUS_ERROR: + new_status = DID_OK; + break; + case CAM_REQ_ABORTED: + new_status = DID_ABORT; + break; + case CAM_BUSY: + new_status = DID_BUS_BUSY; + break; + case CAM_REQ_INVALID: + case CAM_PATH_INVALID: + new_status = DID_BAD_TARGET; + break; + case CAM_SEL_TIMEOUT: + new_status = DID_NO_CONNECT; + break; + case CAM_SCSI_BUS_RESET: + case CAM_BDR_SENT: + new_status = DID_RESET; + break; + case CAM_UNCOR_PARITY: + new_status = DID_PARITY; + break; + case CAM_CMD_TIMEOUT: + new_status = DID_TIME_OUT; + break; + case CAM_UA_ABORT: + case CAM_REQ_CMP_ERR: + case CAM_AUTOSENSE_FAIL: + case CAM_NO_HBA: + case CAM_DATA_RUN_ERR: + case CAM_UNEXP_BUSFREE: + case CAM_SEQUENCE_FAIL: + case CAM_CCB_LEN_ERR: + case CAM_PROVIDE_FAIL: + case CAM_REQ_TERMIO: + case CAM_UNREC_HBA_ERROR: + case CAM_REQ_TOO_BIG: + new_status = DID_ERROR; + break; + case CAM_REQUEUE_REQ: + new_status = DID_REQUEUE; + break; + default: + /* We should never get here */ + new_status = DID_ERROR; + break; + } + + ahc_cmd_set_transaction_status(cmd, new_status); + } + + scsi_done(cmd); +} + +static void +ahc_linux_freeze_simq(struct ahc_softc *ahc) +{ + unsigned long s; + + ahc_lock(ahc, &s); + ahc->platform_data->qfrozen++; + if (ahc->platform_data->qfrozen == 1) { + scsi_block_requests(ahc->platform_data->host); + + /* XXX What about Twin channels? */ + ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, + CAM_LUN_WILDCARD, SCB_LIST_NULL, + ROLE_INITIATOR, CAM_REQUEUE_REQ); + } + ahc_unlock(ahc, &s); +} + +static void +ahc_linux_release_simq(struct ahc_softc *ahc) +{ + u_long s; + int unblock_reqs; + + unblock_reqs = 0; + ahc_lock(ahc, &s); + if (ahc->platform_data->qfrozen > 0) + ahc->platform_data->qfrozen--; + if (ahc->platform_data->qfrozen == 0) + unblock_reqs = 1; + ahc_unlock(ahc, &s); + /* + * There is still a race here. The mid-layer + * should keep its own freeze count and use + * a bottom half handler to run the queues + * so we can unblock with our own lock held. + */ + if (unblock_reqs) + scsi_unblock_requests(ahc->platform_data->host); +} + +static int +ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) +{ + struct ahc_softc *ahc; + struct ahc_linux_device *dev; + struct scb *pending_scb; + u_int saved_scbptr; + u_int active_scb_index; + u_int last_phase; + u_int saved_scsiid; + u_int cdb_byte; + int retval; + int was_paused; + int paused; + int wait; + int disconnected; + unsigned long flags; + + pending_scb = NULL; + paused = FALSE; + wait = FALSE; + ahc = *(struct ahc_softc **)cmd->device->host->hostdata; + + scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n", + flag == SCB_ABORT ? "n ABORT" : " TARGET RESET"); + + printk("CDB:"); + for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) + printk(" 0x%x", cmd->cmnd[cdb_byte]); + printk("\n"); + + ahc_lock(ahc, &flags); + + /* + * First determine if we currently own this command. + * Start by searching the device queue. If not found + * there, check the pending_scb list. If not found + * at all, and the system wanted us to just abort the + * command, return success. + */ + dev = scsi_transport_device_data(cmd->device); + + if (dev == NULL) { + /* + * No target device for this command exists, + * so we must not still own the command. + */ + printk("%s:%d:%d:%d: Is not an active device\n", + ahc_name(ahc), cmd->device->channel, cmd->device->id, + (u8)cmd->device->lun); + retval = SUCCESS; + goto no_cmd; + } + + if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 + && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, + cmd->device->channel + 'A', + (u8)cmd->device->lun, + CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) { + printk("%s:%d:%d:%d: Command found on untagged queue\n", + ahc_name(ahc), cmd->device->channel, cmd->device->id, + (u8)cmd->device->lun); + retval = SUCCESS; + goto done; + } + + /* + * See if we can find a matching cmd in the pending list. + */ + LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { + if (pending_scb->io_ctx == cmd) + break; + } + + if (pending_scb == NULL && flag == SCB_DEVICE_RESET) { + + /* Any SCB for this device will do for a target reset */ + LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { + if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd), + scmd_channel(cmd) + 'A', + CAM_LUN_WILDCARD, + SCB_LIST_NULL, ROLE_INITIATOR)) + break; + } + } + + if (pending_scb == NULL) { + scmd_printk(KERN_INFO, cmd, "Command not found\n"); + goto no_cmd; + } + + if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) { + /* + * We can't queue two recovery actions using the same SCB + */ + retval = FAILED; + goto done; + } + + /* + * Ensure that the card doesn't do anything + * behind our back and that we didn't "just" miss + * an interrupt that would affect this cmd. + */ + was_paused = ahc_is_paused(ahc); + ahc_pause_and_flushwork(ahc); + paused = TRUE; + + if ((pending_scb->flags & SCB_ACTIVE) == 0) { + scmd_printk(KERN_INFO, cmd, "Command already completed\n"); + goto no_cmd; + } + + printk("%s: At time of recovery, card was %spaused\n", + ahc_name(ahc), was_paused ? "" : "not "); + ahc_dump_card_state(ahc); + + disconnected = TRUE; + if (flag == SCB_ABORT) { + if (ahc_search_qinfifo(ahc, cmd->device->id, + cmd->device->channel + 'A', + cmd->device->lun, + pending_scb->hscb->tag, + ROLE_INITIATOR, CAM_REQ_ABORTED, + SEARCH_COMPLETE) > 0) { + printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", + ahc_name(ahc), cmd->device->channel, + cmd->device->id, (u8)cmd->device->lun); + retval = SUCCESS; + goto done; + } + } else if (ahc_search_qinfifo(ahc, cmd->device->id, + cmd->device->channel + 'A', + cmd->device->lun, + pending_scb->hscb->tag, + ROLE_INITIATOR, /*status*/0, + SEARCH_COUNT) > 0) { + disconnected = FALSE; + } + + if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) { + struct scb *bus_scb; + + bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG)); + if (bus_scb == pending_scb) + disconnected = FALSE; + else if (flag != SCB_ABORT + && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid + && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb)) + disconnected = FALSE; + } + + /* + * At this point, pending_scb is the scb associated with the + * passed in command. That command is currently active on the + * bus, is in the disconnected state, or we're hoping to find + * a command for the same target active on the bus to abuse to + * send a BDR. Queue the appropriate message based on which of + * these states we are in. + */ + last_phase = ahc_inb(ahc, LASTPHASE); + saved_scbptr = ahc_inb(ahc, SCBPTR); + active_scb_index = ahc_inb(ahc, SCB_TAG); + saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); + if (last_phase != P_BUSFREE + && (pending_scb->hscb->tag == active_scb_index + || (flag == SCB_DEVICE_RESET + && SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) { + + /* + * We're active on the bus, so assert ATN + * and hope that the target responds. + */ + pending_scb = ahc_lookup_scb(ahc, active_scb_index); + pending_scb->flags |= SCB_RECOVERY_SCB|flag; + ahc_outb(ahc, MSG_OUT, HOST_MSG); + ahc_outb(ahc, SCSISIGO, last_phase|ATNO); + scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n"); + wait = TRUE; + } else if (disconnected) { + + /* + * Actually re-queue this SCB in an attempt + * to select the device before it reconnects. + * In either case (selection or reselection), + * we will now issue the approprate message + * to the timed-out device. + * + * Set the MK_MESSAGE control bit indicating + * that we desire to send a message. We + * also set the disconnected flag since + * in the paging case there is no guarantee + * that our SCB control byte matches the + * version on the card. We don't want the + * sequencer to abort the command thinking + * an unsolicited reselection occurred. + */ + pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED; + pending_scb->flags |= SCB_RECOVERY_SCB|flag; + + /* + * Remove any cached copy of this SCB in the + * disconnected list in preparation for the + * queuing of our abort SCB. We use the + * same element in the SCB, SCB_NEXT, for + * both the qinfifo and the disconnected list. + */ + ahc_search_disc_list(ahc, cmd->device->id, + cmd->device->channel + 'A', + cmd->device->lun, pending_scb->hscb->tag, + /*stop_on_first*/TRUE, + /*remove*/TRUE, + /*save_state*/FALSE); + + /* + * In the non-paging case, the sequencer will + * never re-reference the in-core SCB. + * To make sure we are notified during + * reselection, set the MK_MESSAGE flag in + * the card's copy of the SCB. + */ + if ((ahc->flags & AHC_PAGESCBS) == 0) { + ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag); + ahc_outb(ahc, SCB_CONTROL, + ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE); + } + + /* + * Clear out any entries in the QINFIFO first + * so we are the next SCB for this target + * to run. + */ + ahc_search_qinfifo(ahc, cmd->device->id, + cmd->device->channel + 'A', + cmd->device->lun, SCB_LIST_NULL, + ROLE_INITIATOR, CAM_REQUEUE_REQ, + SEARCH_COMPLETE); + ahc_qinfifo_requeue_tail(ahc, pending_scb); + ahc_outb(ahc, SCBPTR, saved_scbptr); + ahc_print_path(ahc, pending_scb); + printk("Device is disconnected, re-queuing SCB\n"); + wait = TRUE; + } else { + scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n"); + retval = FAILED; + goto done; + } + +no_cmd: + /* + * Our assumption is that if we don't have the command, no + * recovery action was required, so we return success. Again, + * the semantics of the mid-layer recovery engine are not + * well defined, so this may change in time. + */ + retval = SUCCESS; +done: + if (paused) + ahc_unpause(ahc); + if (wait) { + DECLARE_COMPLETION_ONSTACK(done); + + ahc->platform_data->eh_done = &done; + ahc_unlock(ahc, &flags); + + printk("Recovery code sleeping\n"); + if (!wait_for_completion_timeout(&done, 5 * HZ)) { + ahc_lock(ahc, &flags); + ahc->platform_data->eh_done = NULL; + ahc_unlock(ahc, &flags); + + printk("Timer Expired\n"); + retval = FAILED; + } + printk("Recovery code awake\n"); + } else + ahc_unlock(ahc, &flags); + return (retval); +} + +static void ahc_linux_set_width(struct scsi_target *starget, int width) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); + struct ahc_devinfo devinfo; + unsigned long flags; + + ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + ahc_lock(ahc, &flags); + ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE); + ahc_unlock(ahc, &flags); +} + +static void ahc_linux_set_period(struct scsi_target *starget, int period) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); + struct ahc_tmode_tstate *tstate; + struct ahc_initiator_tinfo *tinfo + = ahc_fetch_transinfo(ahc, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahc_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options; + unsigned long flags; + unsigned long offset = tinfo->goal.offset; + const struct ahc_syncrate *syncrate; + + if (offset == 0) + offset = MAX_OFFSET; + + if (period < 9) + period = 9; /* 12.5ns is our minimum */ + if (period == 9) { + if (spi_max_width(starget)) + ppr_options |= MSG_EXT_PPR_DT_REQ; + else + /* need wide for DT and need DT for 12.5 ns */ + period = 10; + } + + ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + + /* all PPR requests apart from QAS require wide transfers */ + if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) { + if (spi_width(starget) == 0) + ppr_options &= MSG_EXT_PPR_QAS_REQ; + } + + syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, + AHC_SYNCRATE_DT); + ahc_lock(ahc, &flags); + ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, + ppr_options, AHC_TRANS_GOAL, FALSE); + ahc_unlock(ahc, &flags); +} + +static void ahc_linux_set_offset(struct scsi_target *starget, int offset) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); + struct ahc_tmode_tstate *tstate; + struct ahc_initiator_tinfo *tinfo + = ahc_fetch_transinfo(ahc, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahc_devinfo devinfo; + unsigned int ppr_options = 0; + unsigned int period = 0; + unsigned long flags; + const struct ahc_syncrate *syncrate = NULL; + + ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + if (offset != 0) { + syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, + AHC_SYNCRATE_DT); + period = tinfo->goal.period; + ppr_options = tinfo->goal.ppr_options; + } + ahc_lock(ahc, &flags); + ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, + ppr_options, AHC_TRANS_GOAL, FALSE); + ahc_unlock(ahc, &flags); +} + +static void ahc_linux_set_dt(struct scsi_target *starget, int dt) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); + struct ahc_tmode_tstate *tstate; + struct ahc_initiator_tinfo *tinfo + = ahc_fetch_transinfo(ahc, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahc_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_DT_REQ; + unsigned int period = tinfo->goal.period; + unsigned int width = tinfo->goal.width; + unsigned long flags; + const struct ahc_syncrate *syncrate; + + if (dt && spi_max_width(starget)) { + ppr_options |= MSG_EXT_PPR_DT_REQ; + if (!width) + ahc_linux_set_width(starget, 1); + } else if (period == 9) + period = 10; /* if resetting DT, period must be >= 25ns */ + + ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, + AHC_SYNCRATE_DT); + ahc_lock(ahc, &flags); + ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, + ppr_options, AHC_TRANS_GOAL, FALSE); + ahc_unlock(ahc, &flags); +} + +#if 0 +/* FIXME: This code claims to support IU and QAS. However, the actual + * sequencer code and aic7xxx_core have no support for these parameters and + * will get into a bad state if they're negotiated. Do not enable this + * unless you know what you're doing */ +static void ahc_linux_set_qas(struct scsi_target *starget, int qas) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); + struct ahc_tmode_tstate *tstate; + struct ahc_initiator_tinfo *tinfo + = ahc_fetch_transinfo(ahc, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahc_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_QAS_REQ; + unsigned int period = tinfo->goal.period; + unsigned long flags; + struct ahc_syncrate *syncrate; + + if (qas) + ppr_options |= MSG_EXT_PPR_QAS_REQ; + + ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, + AHC_SYNCRATE_DT); + ahc_lock(ahc, &flags); + ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, + ppr_options, AHC_TRANS_GOAL, FALSE); + ahc_unlock(ahc, &flags); +} + +static void ahc_linux_set_iu(struct scsi_target *starget, int iu) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); + struct ahc_tmode_tstate *tstate; + struct ahc_initiator_tinfo *tinfo + = ahc_fetch_transinfo(ahc, + starget->channel + 'A', + shost->this_id, starget->id, &tstate); + struct ahc_devinfo devinfo; + unsigned int ppr_options = tinfo->goal.ppr_options + & ~MSG_EXT_PPR_IU_REQ; + unsigned int period = tinfo->goal.period; + unsigned long flags; + struct ahc_syncrate *syncrate; + + if (iu) + ppr_options |= MSG_EXT_PPR_IU_REQ; + + ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, + starget->channel + 'A', ROLE_INITIATOR); + syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, + AHC_SYNCRATE_DT); + ahc_lock(ahc, &flags); + ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, + ppr_options, AHC_TRANS_GOAL, FALSE); + ahc_unlock(ahc, &flags); +} +#endif + +static void ahc_linux_get_signalling(struct Scsi_Host *shost) +{ + struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; + unsigned long flags; + u8 mode; + + if (!(ahc->features & AHC_ULTRA2)) { + /* non-LVD chipset, may not have SBLKCTL reg */ + spi_signalling(shost) = + ahc->features & AHC_HVD ? + SPI_SIGNAL_HVD : + SPI_SIGNAL_SE; + return; + } + + ahc_lock(ahc, &flags); + ahc_pause(ahc); + mode = ahc_inb(ahc, SBLKCTL); + ahc_unpause(ahc); + ahc_unlock(ahc, &flags); + + if (mode & ENAB40) + spi_signalling(shost) = SPI_SIGNAL_LVD; + else if (mode & ENAB20) + spi_signalling(shost) = SPI_SIGNAL_SE; + else + spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; +} + +static struct spi_function_template ahc_linux_transport_functions = { + .set_offset = ahc_linux_set_offset, + .show_offset = 1, + .set_period = ahc_linux_set_period, + .show_period = 1, + .set_width = ahc_linux_set_width, + .show_width = 1, + .set_dt = ahc_linux_set_dt, + .show_dt = 1, +#if 0 + .set_iu = ahc_linux_set_iu, + .show_iu = 1, + .set_qas = ahc_linux_set_qas, + .show_qas = 1, +#endif + .get_signalling = ahc_linux_get_signalling, +}; + + + +static int __init +ahc_linux_init(void) +{ + /* + * If we've been passed any parameters, process them now. + */ + if (aic7xxx) + aic7xxx_setup(aic7xxx); + + ahc_linux_transport_template = + spi_attach_transport(&ahc_linux_transport_functions); + if (!ahc_linux_transport_template) + return -ENODEV; + + scsi_transport_reserve_device(ahc_linux_transport_template, + sizeof(struct ahc_linux_device)); + + ahc_linux_pci_init(); + ahc_linux_eisa_init(); + return 0; +} + +static void +ahc_linux_exit(void) +{ + ahc_linux_pci_exit(); + ahc_linux_eisa_exit(); + spi_release_transport(ahc_linux_transport_template); +} + +module_init(ahc_linux_init); +module_exit(ahc_linux_exit); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h new file mode 100644 index 000000000..51d9f4de0 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h @@ -0,0 +1,674 @@ +/* + * Adaptec AIC7xxx device driver for Linux. + * + * Copyright (c) 1994 John Aycock + * The University of Calgary Department of Computer Science. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Copyright (c) 2000-2003 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#151 $ + * + */ +#ifndef _AIC7XXX_LINUX_H_ +#define _AIC7XXX_LINUX_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Core SCSI definitions */ +#define AIC_LIB_PREFIX ahc + +#include "cam.h" +#include "queue.h" +#include "scsi_message.h" +#include "aiclib.h" + +/*********************************** Debugging ********************************/ +#ifdef CONFIG_AIC7XXX_DEBUG_ENABLE +#ifdef CONFIG_AIC7XXX_DEBUG_MASK +#define AHC_DEBUG 1 +#define AHC_DEBUG_OPTS CONFIG_AIC7XXX_DEBUG_MASK +#else +/* + * Compile in debugging code, but do not enable any printfs. + */ +#define AHC_DEBUG 1 +#endif +/* No debugging code. */ +#endif + +/************************* Forward Declarations *******************************/ +struct ahc_softc; +typedef struct pci_dev *ahc_dev_softc_t; +typedef struct scsi_cmnd *ahc_io_ctx_t; + +/******************************* Byte Order ***********************************/ +#define ahc_htobe16(x) cpu_to_be16(x) +#define ahc_htobe32(x) cpu_to_be32(x) +#define ahc_htobe64(x) cpu_to_be64(x) +#define ahc_htole16(x) cpu_to_le16(x) +#define ahc_htole32(x) cpu_to_le32(x) +#define ahc_htole64(x) cpu_to_le64(x) + +#define ahc_be16toh(x) be16_to_cpu(x) +#define ahc_be32toh(x) be32_to_cpu(x) +#define ahc_be64toh(x) be64_to_cpu(x) +#define ahc_le16toh(x) le16_to_cpu(x) +#define ahc_le32toh(x) le32_to_cpu(x) +#define ahc_le64toh(x) le64_to_cpu(x) + +/************************* Configuration Data *********************************/ +extern u_int aic7xxx_no_probe; +extern u_int aic7xxx_allow_memio; +extern struct scsi_host_template aic7xxx_driver_template; + +/***************************** Bus Space/DMA **********************************/ + +typedef uint32_t bus_size_t; + +typedef enum { + BUS_SPACE_MEMIO, + BUS_SPACE_PIO +} bus_space_tag_t; + +typedef union { + u_long ioport; + volatile uint8_t __iomem *maddr; +} bus_space_handle_t; + +typedef struct bus_dma_segment +{ + dma_addr_t ds_addr; + bus_size_t ds_len; +} bus_dma_segment_t; + +struct ahc_linux_dma_tag +{ + bus_size_t alignment; + bus_size_t boundary; + bus_size_t maxsize; +}; +typedef struct ahc_linux_dma_tag* bus_dma_tag_t; + +typedef dma_addr_t bus_dmamap_t; + +typedef int bus_dma_filter_t(void*, dma_addr_t); +typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); + +#define BUS_DMA_WAITOK 0x0 +#define BUS_DMA_NOWAIT 0x1 +#define BUS_DMA_ALLOCNOW 0x2 +#define BUS_DMA_LOAD_SEGS 0x4 /* + * Argument is an S/G list not + * a single buffer. + */ + +#define BUS_SPACE_MAXADDR 0xFFFFFFFF +#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF +#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF + +int ahc_dma_tag_create(struct ahc_softc *, bus_dma_tag_t /*parent*/, + bus_size_t /*alignment*/, bus_size_t /*boundary*/, + dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/, + bus_dma_filter_t*/*filter*/, void */*filterarg*/, + bus_size_t /*maxsize*/, int /*nsegments*/, + bus_size_t /*maxsegsz*/, int /*flags*/, + bus_dma_tag_t */*dma_tagp*/); + +void ahc_dma_tag_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/); + +int ahc_dmamem_alloc(struct ahc_softc *, bus_dma_tag_t /*dmat*/, + void** /*vaddr*/, int /*flags*/, + bus_dmamap_t* /*mapp*/); + +void ahc_dmamem_free(struct ahc_softc *, bus_dma_tag_t /*dmat*/, + void* /*vaddr*/, bus_dmamap_t /*map*/); + +void ahc_dmamap_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/, + bus_dmamap_t /*map*/); + +int ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t /*dmat*/, + bus_dmamap_t /*map*/, void * /*buf*/, + bus_size_t /*buflen*/, bus_dmamap_callback_t *, + void */*callback_arg*/, int /*flags*/); + +int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t); + +/* + * Operations performed by ahc_dmamap_sync(). + */ +#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */ +#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */ +#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */ +#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */ + +/* + * XXX + * ahc_dmamap_sync is only used on buffers allocated with + * the dma_alloc_coherent() API. Although I'm not sure how + * this works on architectures with a write buffer, Linux does + * not have an API to sync "coherent" memory. Perhaps we need + * to do an mb()? + */ +#define ahc_dmamap_sync(ahc, dma_tag, dmamap, offset, len, op) + +/********************************** Includes **********************************/ +#ifdef CONFIG_AIC7XXX_REG_PRETTY_PRINT +#define AIC_DEBUG_REGISTERS 1 +#else +#define AIC_DEBUG_REGISTERS 0 +#endif +#include "aic7xxx.h" + +/***************************** Timer Facilities *******************************/ +static inline void +ahc_scb_timer_reset(struct scb *scb, u_int usec) +{ +} + +/***************************** SMP support ************************************/ +#include + +#define AIC7XXX_DRIVER_VERSION "7.0" + +/*************************** Device Data Structures ***************************/ +/* + * A per probed device structure used to deal with some error recovery + * scenarios that the Linux mid-layer code just doesn't know how to + * handle. The structure allocated for a device only becomes persistent + * after a successfully completed inquiry command to the target when + * that inquiry data indicates a lun is present. + */ +typedef enum { + AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */ + AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */ + AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */ + AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ +} ahc_linux_dev_flags; + +struct ahc_linux_device { + /* + * The number of transactions currently + * queued to the device. + */ + int active; + + /* + * The currently allowed number of + * transactions that can be queued to + * the device. Must be signed for + * conversion from tagged to untagged + * mode where the device may have more + * than one outstanding active transaction. + */ + int openings; + + /* + * A positive count indicates that this + * device's queue is halted. + */ + u_int qfrozen; + + /* + * Cumulative command counter. + */ + u_long commands_issued; + + /* + * The number of tagged transactions when + * running at our current opening level + * that have been successfully received by + * this device since the last QUEUE FULL. + */ + u_int tag_success_count; +#define AHC_TAG_SUCCESS_INTERVAL 50 + + ahc_linux_dev_flags flags; + + /* + * The high limit for the tags variable. + */ + u_int maxtags; + + /* + * The computed number of tags outstanding + * at the time of the last QUEUE FULL event. + */ + u_int tags_on_last_queuefull; + + /* + * How many times we have seen a queue full + * with the same number of tags. This is used + * to stop our adaptive queue depth algorithm + * on devices with a fixed number of tags. + */ + u_int last_queuefull_same_count; +#define AHC_LOCK_TAGS_COUNT 50 + + /* + * How many transactions have been queued + * without the device going idle. We use + * this statistic to determine when to issue + * an ordered tag to prevent transaction + * starvation. This statistic is only updated + * if the AHC_DEV_PERIODIC_OTAG flag is set + * on this device. + */ + u_int commands_since_idle_or_otag; +#define AHC_OTAG_THRESH 500 +}; + +/********************* Definitions Required by the Core ***********************/ +/* + * Number of SG segments we require. So long as the S/G segments for + * a particular transaction are allocated in a physically contiguous + * manner and are allocated below 4GB, the number of S/G segments is + * unrestricted. + */ +#define AHC_NSEG 128 + +/* + * Per-SCB OSM storage. + */ +struct scb_platform_data { + struct ahc_linux_device *dev; + dma_addr_t buf_busaddr; + uint32_t xfer_len; + uint32_t sense_resid; /* Auto-Sense residual */ +}; + +/* + * Define a structure used for each host adapter. All members are + * aligned on a boundary >= the size of the member to honor the + * alignment restrictions of the various platforms supported by + * this driver. + */ +struct ahc_platform_data { + /* + * Fields accessed from interrupt context. + */ + struct scsi_target *starget[AHC_NUM_TARGETS]; + + spinlock_t spin_lock; + u_int qfrozen; + struct completion *eh_done; + struct Scsi_Host *host; /* pointer to scsi host */ +#define AHC_LINUX_NOIRQ ((uint32_t)~0) + uint32_t irq; /* IRQ for this adapter */ + uint32_t bios_address; + resource_size_t mem_busaddr; /* Mem Base Addr */ +}; + +void ahc_delay(long); + + +/***************************** Low Level I/O **********************************/ +uint8_t ahc_inb(struct ahc_softc * ahc, long port); +void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val); +void ahc_outsb(struct ahc_softc * ahc, long port, + uint8_t *, int count); +void ahc_insb(struct ahc_softc * ahc, long port, + uint8_t *, int count); + +/**************************** Initialization **********************************/ +int ahc_linux_register_host(struct ahc_softc *, + struct scsi_host_template *); + +/******************************** Locking *************************************/ +/* Lock protecting internal data structures */ + +static inline void +ahc_lockinit(struct ahc_softc *ahc) +{ + spin_lock_init(&ahc->platform_data->spin_lock); +} + +static inline void +ahc_lock(struct ahc_softc *ahc, unsigned long *flags) +{ + spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags); +} + +static inline void +ahc_unlock(struct ahc_softc *ahc, unsigned long *flags) +{ + spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags); +} + +/******************************* PCI Definitions ******************************/ +/* + * PCIM_xxx: mask to locate subfield in register + * PCIR_xxx: config register offset + * PCIC_xxx: device class + * PCIS_xxx: device subclass + * PCIP_xxx: device programming interface + * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices) + * PCID_xxx: device ID + */ +#define PCIR_DEVVENDOR 0x00 +#define PCIR_VENDOR 0x00 +#define PCIR_DEVICE 0x02 +#define PCIR_COMMAND 0x04 +#define PCIM_CMD_PORTEN 0x0001 +#define PCIM_CMD_MEMEN 0x0002 +#define PCIM_CMD_BUSMASTEREN 0x0004 +#define PCIM_CMD_MWRICEN 0x0010 +#define PCIM_CMD_PERRESPEN 0x0040 +#define PCIM_CMD_SERRESPEN 0x0100 +#define PCIR_STATUS 0x06 +#define PCIR_REVID 0x08 +#define PCIR_PROGIF 0x09 +#define PCIR_SUBCLASS 0x0a +#define PCIR_CLASS 0x0b +#define PCIR_CACHELNSZ 0x0c +#define PCIR_LATTIMER 0x0d +#define PCIR_HEADERTYPE 0x0e +#define PCIM_MFDEV 0x80 +#define PCIR_BIST 0x0f +#define PCIR_CAP_PTR 0x34 + +/* config registers for header type 0 devices */ +#define PCIR_MAPS 0x10 + +typedef enum +{ + AHC_POWER_STATE_D0, + AHC_POWER_STATE_D1, + AHC_POWER_STATE_D2, + AHC_POWER_STATE_D3 +} ahc_power_state; + +/**************************** VL/EISA Routines ********************************/ +#ifdef CONFIG_EISA +int ahc_linux_eisa_init(void); +void ahc_linux_eisa_exit(void); +int aic7770_map_registers(struct ahc_softc *ahc, + u_int port); +int aic7770_map_int(struct ahc_softc *ahc, u_int irq); +#else +static inline int ahc_linux_eisa_init(void) { + return -ENODEV; +} +static inline void ahc_linux_eisa_exit(void) { +} +#endif + +/******************************* PCI Routines *********************************/ +#ifdef CONFIG_PCI +int ahc_linux_pci_init(void); +void ahc_linux_pci_exit(void); +int ahc_pci_map_registers(struct ahc_softc *ahc); +int ahc_pci_map_int(struct ahc_softc *ahc); + +uint32_t ahc_pci_read_config(ahc_dev_softc_t pci, + int reg, int width); + +void ahc_pci_write_config(ahc_dev_softc_t pci, + int reg, uint32_t value, + int width); + +static inline int ahc_get_pci_function(ahc_dev_softc_t); +static inline int +ahc_get_pci_function(ahc_dev_softc_t pci) +{ + return (PCI_FUNC(pci->devfn)); +} + +static inline int ahc_get_pci_slot(ahc_dev_softc_t); +static inline int +ahc_get_pci_slot(ahc_dev_softc_t pci) +{ + return (PCI_SLOT(pci->devfn)); +} + +static inline int ahc_get_pci_bus(ahc_dev_softc_t); +static inline int +ahc_get_pci_bus(ahc_dev_softc_t pci) +{ + return (pci->bus->number); +} +#else +static inline int ahc_linux_pci_init(void) { + return 0; +} +static inline void ahc_linux_pci_exit(void) { +} +#endif + +static inline void ahc_flush_device_writes(struct ahc_softc *); +static inline void +ahc_flush_device_writes(struct ahc_softc *ahc) +{ + /* XXX Is this sufficient for all architectures??? */ + ahc_inb(ahc, INTSTAT); +} + +/**************************** Proc FS Support *********************************/ +int ahc_proc_write_seeprom(struct Scsi_Host *, char *, int); +int ahc_linux_show_info(struct seq_file *, struct Scsi_Host *); + +/*************************** Domain Validation ********************************/ +/*********************** Transaction Access Wrappers *************************/ + +static inline +void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status) +{ + cmd->result &= ~(CAM_STATUS_MASK << 16); + cmd->result |= status << 16; +} + +static inline +void ahc_set_transaction_status(struct scb *scb, uint32_t status) +{ + ahc_cmd_set_transaction_status(scb->io_ctx,status); +} + +static inline +void ahc_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status) +{ + cmd->result &= ~0xFFFF; + cmd->result |= status; +} + +static inline +void ahc_set_scsi_status(struct scb *scb, uint32_t status) +{ + ahc_cmd_set_scsi_status(scb->io_ctx, status); +} + +static inline +uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd) +{ + return ((cmd->result >> 16) & CAM_STATUS_MASK); +} + +static inline +uint32_t ahc_get_transaction_status(struct scb *scb) +{ + return (ahc_cmd_get_transaction_status(scb->io_ctx)); +} + +static inline +uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd) +{ + return (cmd->result & 0xFFFF); +} + +static inline +uint32_t ahc_get_scsi_status(struct scb *scb) +{ + return (ahc_cmd_get_scsi_status(scb->io_ctx)); +} + +static inline +void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type) +{ + /* + * Nothing to do for linux as the incoming transaction + * has no concept of tag/non tagged, etc. + */ +} + +static inline +u_long ahc_get_transfer_length(struct scb *scb) +{ + return (scb->platform_data->xfer_len); +} + +static inline +int ahc_get_transfer_dir(struct scb *scb) +{ + return (scb->io_ctx->sc_data_direction); +} + +static inline +void ahc_set_residual(struct scb *scb, u_long resid) +{ + scsi_set_resid(scb->io_ctx, resid); +} + +static inline +void ahc_set_sense_residual(struct scb *scb, u_long resid) +{ + scb->platform_data->sense_resid = resid; +} + +static inline +u_long ahc_get_residual(struct scb *scb) +{ + return scsi_get_resid(scb->io_ctx); +} + +static inline +u_long ahc_get_sense_residual(struct scb *scb) +{ + return (scb->platform_data->sense_resid); +} + +static inline +int ahc_perform_autosense(struct scb *scb) +{ + /* + * We always perform autosense in Linux. + * On other platforms this is set on a + * per-transaction basis. + */ + return (1); +} + +static inline uint32_t +ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb) +{ + return (sizeof(struct scsi_sense_data)); +} + +static inline void +ahc_notify_xfer_settings_change(struct ahc_softc *ahc, + struct ahc_devinfo *devinfo) +{ + /* Nothing to do here for linux */ +} + +static inline void +ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb) +{ +} + +int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg); +void ahc_platform_free(struct ahc_softc *ahc); +void ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb); + +static inline void +ahc_freeze_scb(struct scb *scb) +{ + if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) { + scb->io_ctx->result |= CAM_DEV_QFRZN << 16; + scb->platform_data->dev->qfrozen++; + } +} + +void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev, + struct ahc_devinfo *devinfo, ahc_queue_alg); +int ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status); +irqreturn_t + ahc_linux_isr(int irq, void *dev_id); +void ahc_platform_flushwork(struct ahc_softc *ahc); +void ahc_done(struct ahc_softc*, struct scb*); +void ahc_send_async(struct ahc_softc *, char channel, + u_int target, u_int lun, ac_code); +void ahc_print_path(struct ahc_softc *, struct scb *); + +#ifdef CONFIG_PCI +#define AHC_PCI_CONFIG 1 +#else +#define AHC_PCI_CONFIG 0 +#endif +#define bootverbose aic7xxx_verbose +extern u_int aic7xxx_verbose; +#endif /* _AIC7XXX_LINUX_H_ */ diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c new file mode 100644 index 000000000..a07e94fac --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -0,0 +1,447 @@ +/* + * Linux driver attachment glue for PCI based controllers. + * + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c#47 $ + */ + +#include "aic7xxx_osm.h" +#include "aic7xxx_pci.h" + +/* Define the macro locally since it's different for different class of chips. +*/ +#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI) + +static const struct pci_device_id ahc_linux_pci_id_table[] = { + /* aic7850 based controllers */ + ID(ID_AHA_2902_04_10_15_20C_30C), + /* aic7860 based controllers */ + ID(ID_AHA_2930CU), + ID(ID_AHA_1480A & ID_DEV_VENDOR_MASK), + ID(ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK), + ID(ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK), + ID(ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK), + /* aic7870 based controllers */ + ID(ID_AHA_2940), + ID(ID_AHA_3940), + ID(ID_AHA_398X), + ID(ID_AHA_2944), + ID(ID_AHA_3944), + ID(ID_AHA_4944), + /* aic7880 based controllers */ + ID(ID_AHA_2940U & ID_DEV_VENDOR_MASK), + ID(ID_AHA_3940U & ID_DEV_VENDOR_MASK), + ID(ID_AHA_2944U & ID_DEV_VENDOR_MASK), + ID(ID_AHA_3944U & ID_DEV_VENDOR_MASK), + ID(ID_AHA_398XU & ID_DEV_VENDOR_MASK), + ID(ID_AHA_4944U & ID_DEV_VENDOR_MASK), + ID(ID_AHA_2930U & ID_DEV_VENDOR_MASK), + ID(ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK), + ID(ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK), + /* aic7890 based controllers */ + ID(ID_AHA_2930U2), + ID(ID_AHA_2940U2B), + ID(ID_AHA_2940U2_OEM), + ID(ID_AHA_2940U2), + ID(ID_AHA_2950U2B), + ID16(ID_AIC7890_ARO & ID_AIC7895_ARO_MASK), + ID(ID_AAA_131U2), + /* aic7890 based controllers */ + ID(ID_AHA_29160), + ID(ID_AHA_29160_CPQ), + ID(ID_AHA_29160N), + ID(ID_AHA_29160C), + ID(ID_AHA_29160B), + ID(ID_AHA_19160B), + ID(ID_AIC7892_ARO), + /* aic7892 based controllers */ + ID(ID_AHA_2940U_DUAL), + ID(ID_AHA_3940AU), + ID(ID_AHA_3944AU), + ID(ID_AIC7895_ARO), + ID(ID_AHA_3950U2B_0), + ID(ID_AHA_3950U2B_1), + ID(ID_AHA_3950U2D_0), + ID(ID_AHA_3950U2D_1), + ID(ID_AIC7896_ARO), + /* aic7899 based controllers */ + ID(ID_AHA_3960D), + ID(ID_AHA_3960D_CPQ), + ID(ID_AIC7899_ARO), + /* Generic chip probes for devices we don't know exactly. */ + ID(ID_AIC7850 & ID_DEV_VENDOR_MASK), + ID(ID_AIC7855 & ID_DEV_VENDOR_MASK), + ID(ID_AIC7859 & ID_DEV_VENDOR_MASK), + ID(ID_AIC7860 & ID_DEV_VENDOR_MASK), + ID(ID_AIC7870 & ID_DEV_VENDOR_MASK), + ID(ID_AIC7880 & ID_DEV_VENDOR_MASK), + ID16(ID_AIC7890 & ID_9005_GENERIC_MASK), + ID16(ID_AIC7892 & ID_9005_GENERIC_MASK), + ID(ID_AIC7895 & ID_DEV_VENDOR_MASK), + ID16(ID_AIC7896 & ID_9005_GENERIC_MASK), + ID16(ID_AIC7899 & ID_9005_GENERIC_MASK), + ID(ID_AIC7810 & ID_DEV_VENDOR_MASK), + ID(ID_AIC7815 & ID_DEV_VENDOR_MASK), + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table); + +static int __maybe_unused +ahc_linux_pci_dev_suspend(struct device *dev) +{ + struct ahc_softc *ahc = dev_get_drvdata(dev); + + return ahc_suspend(ahc); +} + +static int __maybe_unused +ahc_linux_pci_dev_resume(struct device *dev) +{ + struct ahc_softc *ahc = dev_get_drvdata(dev); + + ahc_pci_resume(ahc); + + return (ahc_resume(ahc)); +} + +static void +ahc_linux_pci_dev_remove(struct pci_dev *pdev) +{ + struct ahc_softc *ahc = pci_get_drvdata(pdev); + u_long s; + + if (ahc->platform_data && ahc->platform_data->host) + scsi_remove_host(ahc->platform_data->host); + + ahc_lock(ahc, &s); + ahc_intr_enable(ahc, FALSE); + ahc_unlock(ahc, &s); + ahc_free(ahc); +} + +static void +ahc_linux_pci_inherit_flags(struct ahc_softc *ahc) +{ + struct pci_dev *pdev = ahc->dev_softc, *master_pdev; + unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + + master_pdev = pci_get_slot(pdev->bus, master_devfn); + if (master_pdev) { + struct ahc_softc *master = pci_get_drvdata(master_pdev); + if (master) { + ahc->flags &= ~AHC_BIOS_ENABLED; + ahc->flags |= master->flags & AHC_BIOS_ENABLED; + + ahc->flags &= ~AHC_PRIMARY_CHANNEL; + ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL; + } else + printk(KERN_ERR "aic7xxx: no multichannel peer found!\n"); + pci_dev_put(master_pdev); + } +} + +static int +ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + char buf[80]; + const uint64_t mask_39bit = 0x7FFFFFFFFFULL; + struct ahc_softc *ahc; + ahc_dev_softc_t pci; + const struct ahc_pci_identity *entry; + char *name; + int error; + struct device *dev = &pdev->dev; + + pci = pdev; + entry = ahc_find_pci_device(pci); + if (entry == NULL) + return (-ENODEV); + + /* + * Allocate a softc for this card and + * set it up for attachment by our + * common detect routine. + */ + sprintf(buf, "ahc_pci:%d:%d:%d", + ahc_get_pci_bus(pci), + ahc_get_pci_slot(pci), + ahc_get_pci_function(pci)); + name = kstrdup(buf, GFP_ATOMIC); + if (name == NULL) + return (-ENOMEM); + ahc = ahc_alloc(NULL, name); + if (ahc == NULL) + return (-ENOMEM); + if (pci_enable_device(pdev)) { + ahc_free(ahc); + return (-ENODEV); + } + pci_set_master(pdev); + + if (sizeof(dma_addr_t) > 4 + && ahc->features & AHC_LARGE_SCBS + && dma_set_mask(dev, mask_39bit) == 0 + && dma_get_required_mask(dev) > DMA_BIT_MASK(32)) { + ahc->flags |= AHC_39BIT_ADDRESSING; + } else { + if (dma_set_mask(dev, DMA_BIT_MASK(32))) { + ahc_free(ahc); + printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); + return (-ENODEV); + } + } + ahc->dev_softc = pci; + ahc->dev = &pci->dev; + error = ahc_pci_config(ahc, entry); + if (error != 0) { + ahc_free(ahc); + return (-error); + } + + /* + * Second Function PCI devices need to inherit some + * settings from function 0. + */ + if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0) + ahc_linux_pci_inherit_flags(ahc); + + pci_set_drvdata(pdev, ahc); + ahc_linux_register_host(ahc, &aic7xxx_driver_template); + return (0); +} + +/******************************* PCI Routines *********************************/ +uint32_t +ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width) +{ + switch (width) { + case 1: + { + uint8_t retval; + + pci_read_config_byte(pci, reg, &retval); + return (retval); + } + case 2: + { + uint16_t retval; + pci_read_config_word(pci, reg, &retval); + return (retval); + } + case 4: + { + uint32_t retval; + pci_read_config_dword(pci, reg, &retval); + return (retval); + } + default: + panic("ahc_pci_read_config: Read size too big"); + /* NOTREACHED */ + return (0); + } +} + +void +ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width) +{ + switch (width) { + case 1: + pci_write_config_byte(pci, reg, value); + break; + case 2: + pci_write_config_word(pci, reg, value); + break; + case 4: + pci_write_config_dword(pci, reg, value); + break; + default: + panic("ahc_pci_write_config: Write size too big"); + /* NOTREACHED */ + } +} + +static SIMPLE_DEV_PM_OPS(ahc_linux_pci_dev_pm_ops, + ahc_linux_pci_dev_suspend, + ahc_linux_pci_dev_resume); + +static struct pci_driver aic7xxx_pci_driver = { + .name = "aic7xxx", + .probe = ahc_linux_pci_dev_probe, + .driver.pm = &ahc_linux_pci_dev_pm_ops, + .remove = ahc_linux_pci_dev_remove, + .id_table = ahc_linux_pci_id_table +}; + +int +ahc_linux_pci_init(void) +{ + return pci_register_driver(&aic7xxx_pci_driver); +} + +void +ahc_linux_pci_exit(void) +{ + pci_unregister_driver(&aic7xxx_pci_driver); +} + +static int +ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base) +{ + if (aic7xxx_allow_memio == 0) + return (ENOMEM); + + *base = pci_resource_start(ahc->dev_softc, 0); + if (*base == 0) + return (ENOMEM); + if (!request_region(*base, 256, "aic7xxx")) + return (ENOMEM); + return (0); +} + +static int +ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, + resource_size_t *bus_addr, + uint8_t __iomem **maddr) +{ + resource_size_t start; + int error; + + error = 0; + start = pci_resource_start(ahc->dev_softc, 1); + if (start != 0) { + *bus_addr = start; + if (!request_mem_region(start, 0x1000, "aic7xxx")) + error = ENOMEM; + if (error == 0) { + *maddr = ioremap(start, 256); + if (*maddr == NULL) { + error = ENOMEM; + release_mem_region(start, 0x1000); + } + } + } else + error = ENOMEM; + return (error); +} + +int +ahc_pci_map_registers(struct ahc_softc *ahc) +{ + uint32_t command; + resource_size_t base; + uint8_t __iomem *maddr; + int error; + + /* + * If its allowed, we prefer memory mapped access. + */ + command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, 4); + command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN); + base = 0; + maddr = NULL; + error = ahc_linux_pci_reserve_mem_region(ahc, &base, &maddr); + if (error == 0) { + ahc->platform_data->mem_busaddr = base; + ahc->tag = BUS_SPACE_MEMIO; + ahc->bsh.maddr = maddr; + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, + command | PCIM_CMD_MEMEN, 4); + + /* + * Do a quick test to see if memory mapped + * I/O is functioning correctly. + */ + if (ahc_pci_test_register_access(ahc) != 0) { + + printk("aic7xxx: PCI Device %d:%d:%d " + "failed memory mapped test. Using PIO.\n", + ahc_get_pci_bus(ahc->dev_softc), + ahc_get_pci_slot(ahc->dev_softc), + ahc_get_pci_function(ahc->dev_softc)); + iounmap(maddr); + release_mem_region(ahc->platform_data->mem_busaddr, + 0x1000); + ahc->bsh.maddr = NULL; + maddr = NULL; + } else + command |= PCIM_CMD_MEMEN; + } else { + printk("aic7xxx: PCI%d:%d:%d MEM region 0x%llx " + "unavailable. Cannot memory map device.\n", + ahc_get_pci_bus(ahc->dev_softc), + ahc_get_pci_slot(ahc->dev_softc), + ahc_get_pci_function(ahc->dev_softc), + (unsigned long long)base); + } + + /* + * We always prefer memory mapped access. + */ + if (maddr == NULL) { + + error = ahc_linux_pci_reserve_io_region(ahc, &base); + if (error == 0) { + ahc->tag = BUS_SPACE_PIO; + ahc->bsh.ioport = (u_long)base; + command |= PCIM_CMD_PORTEN; + } else { + printk("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] " + "unavailable. Cannot map device.\n", + ahc_get_pci_bus(ahc->dev_softc), + ahc_get_pci_slot(ahc->dev_softc), + ahc_get_pci_function(ahc->dev_softc), + (unsigned long long)base); + } + } + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4); + return (error); +} + +int +ahc_pci_map_int(struct ahc_softc *ahc) +{ + int error; + + error = request_irq(ahc->dev_softc->irq, ahc_linux_isr, + IRQF_SHARED, "aic7xxx", ahc); + if (error == 0) + ahc->platform_data->irq = ahc->dev_softc->irq; + + return (-error); +} + diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c new file mode 100644 index 000000000..2d4c85426 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -0,0 +1,2460 @@ +/* + * Product specific probe and attach routines for: + * 3940, 2940, aic7895, aic7890, aic7880, + * aic7870, aic7860 and aic7850 SCSI controllers + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $ + */ + +#include "aic7xxx_osm.h" +#include "aic7xxx_inline.h" +#include "aic7xxx_93cx6.h" +#include "aic7xxx_pci.h" + +static inline uint64_t +ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) +{ + uint64_t id; + + id = subvendor + | (subdevice << 16) + | ((uint64_t)vendor << 32) + | ((uint64_t)device << 48); + + return (id); +} + +#define AHC_PCI_IOADDR PCIR_MAPS /* I/O Address */ +#define AHC_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */ + +#define DEVID_9005_TYPE(id) ((id) & 0xF) +#define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ +#define DEVID_9005_TYPE_AAA 0x3 /* RAID Card */ +#define DEVID_9005_TYPE_SISL 0x5 /* Container ROMB */ +#define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ + +#define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4) +#define DEVID_9005_MAXRATE_U160 0x0 +#define DEVID_9005_MAXRATE_ULTRA2 0x1 +#define DEVID_9005_MAXRATE_ULTRA 0x2 +#define DEVID_9005_MAXRATE_FAST 0x3 + +#define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6) + +#define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8) +#define DEVID_9005_CLASS_SPI 0x0 /* Parallel SCSI */ + +#define SUBID_9005_TYPE(id) ((id) & 0xF) +#define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ +#define SUBID_9005_TYPE_CARD 0x0 /* Standard Card */ +#define SUBID_9005_TYPE_LCCARD 0x1 /* Low Cost Card */ +#define SUBID_9005_TYPE_RAID 0x3 /* Combined with Raid */ + +#define SUBID_9005_TYPE_KNOWN(id) \ + ((((id) & 0xF) == SUBID_9005_TYPE_MB) \ + || (((id) & 0xF) == SUBID_9005_TYPE_CARD) \ + || (((id) & 0xF) == SUBID_9005_TYPE_LCCARD) \ + || (((id) & 0xF) == SUBID_9005_TYPE_RAID)) + +#define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4) +#define SUBID_9005_MAXRATE_ULTRA2 0x0 +#define SUBID_9005_MAXRATE_ULTRA 0x1 +#define SUBID_9005_MAXRATE_U160 0x2 +#define SUBID_9005_MAXRATE_RESERVED 0x3 + +#define SUBID_9005_SEEPTYPE(id) \ + ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ + ? ((id) & 0xC0) >> 6 \ + : ((id) & 0x300) >> 8) +#define SUBID_9005_SEEPTYPE_NONE 0x0 +#define SUBID_9005_SEEPTYPE_1K 0x1 +#define SUBID_9005_SEEPTYPE_2K_4K 0x2 +#define SUBID_9005_SEEPTYPE_RESERVED 0x3 +#define SUBID_9005_AUTOTERM(id) \ + ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ + ? (((id) & 0x400) >> 10) == 0 \ + : (((id) & 0x40) >> 6) == 0) + +#define SUBID_9005_NUMCHAN(id) \ + ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ + ? ((id) & 0x300) >> 8 \ + : ((id) & 0xC00) >> 10) + +#define SUBID_9005_LEGACYCONN(id) \ + ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ + ? 0 \ + : ((id) & 0x80) >> 7) + +#define SUBID_9005_MFUNCENB(id) \ + ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ + ? ((id) & 0x800) >> 11 \ + : ((id) & 0x1000) >> 12) +/* + * Informational only. Should use chip register to be + * certain, but may be use in identification strings. + */ +#define SUBID_9005_CARD_SCSIWIDTH_MASK 0x2000 +#define SUBID_9005_CARD_PCIWIDTH_MASK 0x4000 +#define SUBID_9005_CARD_SEDIFF_MASK 0x8000 + +static ahc_device_setup_t ahc_aic785X_setup; +static ahc_device_setup_t ahc_aic7860_setup; +static ahc_device_setup_t ahc_apa1480_setup; +static ahc_device_setup_t ahc_aic7870_setup; +static ahc_device_setup_t ahc_aic7870h_setup; +static ahc_device_setup_t ahc_aha394X_setup; +static ahc_device_setup_t ahc_aha394Xh_setup; +static ahc_device_setup_t ahc_aha494X_setup; +static ahc_device_setup_t ahc_aha494Xh_setup; +static ahc_device_setup_t ahc_aha398X_setup; +static ahc_device_setup_t ahc_aic7880_setup; +static ahc_device_setup_t ahc_aic7880h_setup; +static ahc_device_setup_t ahc_aha2940Pro_setup; +static ahc_device_setup_t ahc_aha394XU_setup; +static ahc_device_setup_t ahc_aha394XUh_setup; +static ahc_device_setup_t ahc_aha398XU_setup; +static ahc_device_setup_t ahc_aic7890_setup; +static ahc_device_setup_t ahc_aic7892_setup; +static ahc_device_setup_t ahc_aic7895_setup; +static ahc_device_setup_t ahc_aic7895h_setup; +static ahc_device_setup_t ahc_aic7896_setup; +static ahc_device_setup_t ahc_aic7899_setup; +static ahc_device_setup_t ahc_aha29160C_setup; +static ahc_device_setup_t ahc_raid_setup; +static ahc_device_setup_t ahc_aha394XX_setup; +static ahc_device_setup_t ahc_aha494XX_setup; +static ahc_device_setup_t ahc_aha398XX_setup; + +static const struct ahc_pci_identity ahc_pci_ident_table[] = { + /* aic7850 based controllers */ + { + ID_AHA_2902_04_10_15_20C_30C, + ID_ALL_MASK, + "Adaptec 2902/04/10/15/20C/30C SCSI adapter", + ahc_aic785X_setup + }, + /* aic7860 based controllers */ + { + ID_AHA_2930CU, + ID_ALL_MASK, + "Adaptec 2930CU SCSI adapter", + ahc_aic7860_setup + }, + { + ID_AHA_1480A & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 1480A Ultra SCSI adapter", + ahc_apa1480_setup + }, + { + ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2940A Ultra SCSI adapter", + ahc_aic7860_setup + }, + { + ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2940A/CN Ultra SCSI adapter", + ahc_aic7860_setup + }, + { + ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2930C Ultra SCSI adapter (VAR)", + ahc_aic7860_setup + }, + /* aic7870 based controllers */ + { + ID_AHA_2940, + ID_ALL_MASK, + "Adaptec 2940 SCSI adapter", + ahc_aic7870_setup + }, + { + ID_AHA_3940, + ID_ALL_MASK, + "Adaptec 3940 SCSI adapter", + ahc_aha394X_setup + }, + { + ID_AHA_398X, + ID_ALL_MASK, + "Adaptec 398X SCSI RAID adapter", + ahc_aha398X_setup + }, + { + ID_AHA_2944, + ID_ALL_MASK, + "Adaptec 2944 SCSI adapter", + ahc_aic7870h_setup + }, + { + ID_AHA_3944, + ID_ALL_MASK, + "Adaptec 3944 SCSI adapter", + ahc_aha394Xh_setup + }, + { + ID_AHA_4944, + ID_ALL_MASK, + "Adaptec 4944 SCSI adapter", + ahc_aha494Xh_setup + }, + /* aic7880 based controllers */ + { + ID_AHA_2940U & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2940 Ultra SCSI adapter", + ahc_aic7880_setup + }, + { + ID_AHA_3940U & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 3940 Ultra SCSI adapter", + ahc_aha394XU_setup + }, + { + ID_AHA_2944U & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2944 Ultra SCSI adapter", + ahc_aic7880h_setup + }, + { + ID_AHA_3944U & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 3944 Ultra SCSI adapter", + ahc_aha394XUh_setup + }, + { + ID_AHA_398XU & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 398X Ultra SCSI RAID adapter", + ahc_aha398XU_setup + }, + { + /* + * XXX Don't know the slot numbers + * so we can't identify channels + */ + ID_AHA_4944U & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 4944 Ultra SCSI adapter", + ahc_aic7880h_setup + }, + { + ID_AHA_2930U & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2930 Ultra SCSI adapter", + ahc_aic7880_setup + }, + { + ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2940 Pro Ultra SCSI adapter", + ahc_aha2940Pro_setup + }, + { + ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec 2940/CN Ultra SCSI adapter", + ahc_aic7880_setup + }, + /* Ignore all SISL (AAC on MB) based controllers. */ + { + ID_9005_SISL_ID, + ID_9005_SISL_MASK, + NULL, + NULL + }, + /* aic7890 based controllers */ + { + ID_AHA_2930U2, + ID_ALL_MASK, + "Adaptec 2930 Ultra2 SCSI adapter", + ahc_aic7890_setup + }, + { + ID_AHA_2940U2B, + ID_ALL_MASK, + "Adaptec 2940B Ultra2 SCSI adapter", + ahc_aic7890_setup + }, + { + ID_AHA_2940U2_OEM, + ID_ALL_MASK, + "Adaptec 2940 Ultra2 SCSI adapter (OEM)", + ahc_aic7890_setup + }, + { + ID_AHA_2940U2, + ID_ALL_MASK, + "Adaptec 2940 Ultra2 SCSI adapter", + ahc_aic7890_setup + }, + { + ID_AHA_2950U2B, + ID_ALL_MASK, + "Adaptec 2950 Ultra2 SCSI adapter", + ahc_aic7890_setup + }, + { + ID_AIC7890_ARO, + ID_ALL_MASK, + "Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)", + ahc_aic7890_setup + }, + { + ID_AAA_131U2, + ID_ALL_MASK, + "Adaptec AAA-131 Ultra2 RAID adapter", + ahc_aic7890_setup + }, + /* aic7892 based controllers */ + { + ID_AHA_29160, + ID_ALL_MASK, + "Adaptec 29160 Ultra160 SCSI adapter", + ahc_aic7892_setup + }, + { + ID_AHA_29160_CPQ, + ID_ALL_MASK, + "Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter", + ahc_aic7892_setup + }, + { + ID_AHA_29160N, + ID_ALL_MASK, + "Adaptec 29160N Ultra160 SCSI adapter", + ahc_aic7892_setup + }, + { + ID_AHA_29160C, + ID_ALL_MASK, + "Adaptec 29160C Ultra160 SCSI adapter", + ahc_aha29160C_setup + }, + { + ID_AHA_29160B, + ID_ALL_MASK, + "Adaptec 29160B Ultra160 SCSI adapter", + ahc_aic7892_setup + }, + { + ID_AHA_19160B, + ID_ALL_MASK, + "Adaptec 19160B Ultra160 SCSI adapter", + ahc_aic7892_setup + }, + { + ID_AIC7892_ARO, + ID_ALL_MASK, + "Adaptec aic7892 Ultra160 SCSI adapter (ARO)", + ahc_aic7892_setup + }, + { + ID_AHA_2915_30LP, + ID_ALL_MASK, + "Adaptec 2915/30LP Ultra160 SCSI adapter", + ahc_aic7892_setup + }, + /* aic7895 based controllers */ + { + ID_AHA_2940U_DUAL, + ID_ALL_MASK, + "Adaptec 2940/DUAL Ultra SCSI adapter", + ahc_aic7895_setup + }, + { + ID_AHA_3940AU, + ID_ALL_MASK, + "Adaptec 3940A Ultra SCSI adapter", + ahc_aic7895_setup + }, + { + ID_AHA_3944AU, + ID_ALL_MASK, + "Adaptec 3944A Ultra SCSI adapter", + ahc_aic7895h_setup + }, + { + ID_AIC7895_ARO, + ID_AIC7895_ARO_MASK, + "Adaptec aic7895 Ultra SCSI adapter (ARO)", + ahc_aic7895_setup + }, + /* aic7896/97 based controllers */ + { + ID_AHA_3950U2B_0, + ID_ALL_MASK, + "Adaptec 3950B Ultra2 SCSI adapter", + ahc_aic7896_setup + }, + { + ID_AHA_3950U2B_1, + ID_ALL_MASK, + "Adaptec 3950B Ultra2 SCSI adapter", + ahc_aic7896_setup + }, + { + ID_AHA_3950U2D_0, + ID_ALL_MASK, + "Adaptec 3950D Ultra2 SCSI adapter", + ahc_aic7896_setup + }, + { + ID_AHA_3950U2D_1, + ID_ALL_MASK, + "Adaptec 3950D Ultra2 SCSI adapter", + ahc_aic7896_setup + }, + { + ID_AIC7896_ARO, + ID_ALL_MASK, + "Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)", + ahc_aic7896_setup + }, + /* aic7899 based controllers */ + { + ID_AHA_3960D, + ID_ALL_MASK, + "Adaptec 3960D Ultra160 SCSI adapter", + ahc_aic7899_setup + }, + { + ID_AHA_3960D_CPQ, + ID_ALL_MASK, + "Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter", + ahc_aic7899_setup + }, + { + ID_AIC7899_ARO, + ID_ALL_MASK, + "Adaptec aic7899 Ultra160 SCSI adapter (ARO)", + ahc_aic7899_setup + }, + /* Generic chip probes for devices we don't know 'exactly' */ + { + ID_AIC7850 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7850 SCSI adapter", + ahc_aic785X_setup + }, + { + ID_AIC7855 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7855 SCSI adapter", + ahc_aic785X_setup + }, + { + ID_AIC7859 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7859 SCSI adapter", + ahc_aic7860_setup + }, + { + ID_AIC7860 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7860 Ultra SCSI adapter", + ahc_aic7860_setup + }, + { + ID_AIC7870 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7870 SCSI adapter", + ahc_aic7870_setup + }, + { + ID_AIC7880 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7880 Ultra SCSI adapter", + ahc_aic7880_setup + }, + { + ID_AIC7890 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec aic7890/91 Ultra2 SCSI adapter", + ahc_aic7890_setup + }, + { + ID_AIC7892 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec aic7892 Ultra160 SCSI adapter", + ahc_aic7892_setup + }, + { + ID_AIC7895 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7895 Ultra SCSI adapter", + ahc_aic7895_setup + }, + { + ID_AIC7896 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec aic7896/97 Ultra2 SCSI adapter", + ahc_aic7896_setup + }, + { + ID_AIC7899 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec aic7899 Ultra160 SCSI adapter", + ahc_aic7899_setup + }, + { + ID_AIC7810 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7810 RAID memory controller", + ahc_raid_setup + }, + { + ID_AIC7815 & ID_DEV_VENDOR_MASK, + ID_DEV_VENDOR_MASK, + "Adaptec aic7815 RAID memory controller", + ahc_raid_setup + } +}; + +static const u_int ahc_num_pci_devs = ARRAY_SIZE(ahc_pci_ident_table); + +#define AHC_394X_SLOT_CHANNEL_A 4 +#define AHC_394X_SLOT_CHANNEL_B 5 + +#define AHC_398X_SLOT_CHANNEL_A 4 +#define AHC_398X_SLOT_CHANNEL_B 8 +#define AHC_398X_SLOT_CHANNEL_C 12 + +#define AHC_494X_SLOT_CHANNEL_A 4 +#define AHC_494X_SLOT_CHANNEL_B 5 +#define AHC_494X_SLOT_CHANNEL_C 6 +#define AHC_494X_SLOT_CHANNEL_D 7 + +#define DEVCONFIG 0x40 +#define PCIERRGENDIS 0x80000000ul +#define SCBSIZE32 0x00010000ul /* aic789X only */ +#define REXTVALID 0x00001000ul /* ultra cards only */ +#define MPORTMODE 0x00000400ul /* aic7870+ only */ +#define RAMPSM 0x00000200ul /* aic7870+ only */ +#define VOLSENSE 0x00000100ul +#define PCI64BIT 0x00000080ul /* 64Bit PCI bus (Ultra2 Only)*/ +#define SCBRAMSEL 0x00000080ul +#define MRDCEN 0x00000040ul +#define EXTSCBTIME 0x00000020ul /* aic7870 only */ +#define EXTSCBPEN 0x00000010ul /* aic7870 only */ +#define BERREN 0x00000008ul +#define DACEN 0x00000004ul +#define STPWLEVEL 0x00000002ul +#define DIFACTNEGEN 0x00000001ul /* aic7870 only */ + +#define CSIZE_LATTIME 0x0c +#define CACHESIZE 0x0000003ful /* only 5 bits */ +#define LATTIME 0x0000ff00ul + +/* PCI STATUS definitions */ +#define DPE 0x80 +#define SSE 0x40 +#define RMA 0x20 +#define RTA 0x10 +#define STA 0x08 +#define DPR 0x01 + +static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, + uint16_t subdevice, uint16_t subvendor); +static int ahc_ext_scbram_present(struct ahc_softc *ahc); +static void ahc_scbram_config(struct ahc_softc *ahc, int enable, + int pcheck, int fast, int large); +static void ahc_probe_ext_scbram(struct ahc_softc *ahc); +static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1); +static void ahc_parse_pci_eeprom(struct ahc_softc *ahc, + struct seeprom_config *sc); +static void configure_termination(struct ahc_softc *ahc, + struct seeprom_descriptor *sd, + u_int adapter_control, + u_int *sxfrctl1); + +static void ahc_new_term_detect(struct ahc_softc *ahc, + int *enableSEC_low, + int *enableSEC_high, + int *enablePRI_low, + int *enablePRI_high, + int *eeprom_present); +static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, + int *internal68_present, + int *externalcable_present, + int *eeprom_present); +static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, + int *externalcable_present, + int *eeprom_present); +static void write_brdctl(struct ahc_softc *ahc, uint8_t value); +static uint8_t read_brdctl(struct ahc_softc *ahc); +static void ahc_pci_intr(struct ahc_softc *ahc); +static int ahc_pci_chip_init(struct ahc_softc *ahc); + +static int +ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, + uint16_t subdevice, uint16_t subvendor) +{ + int result; + + /* Default to invalid. */ + result = 0; + if (vendor == 0x9005 + && subvendor == 0x9005 + && subdevice != device + && SUBID_9005_TYPE_KNOWN(subdevice) != 0) { + + switch (SUBID_9005_TYPE(subdevice)) { + case SUBID_9005_TYPE_MB: + break; + case SUBID_9005_TYPE_CARD: + case SUBID_9005_TYPE_LCCARD: + /* + * Currently only trust Adaptec cards to + * get the sub device info correct. + */ + if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA) + result = 1; + break; + case SUBID_9005_TYPE_RAID: + break; + default: + break; + } + } + return (result); +} + +const struct ahc_pci_identity * +ahc_find_pci_device(ahc_dev_softc_t pci) +{ + uint64_t full_id; + uint16_t device; + uint16_t vendor; + uint16_t subdevice; + uint16_t subvendor; + const struct ahc_pci_identity *entry; + u_int i; + + vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); + device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); + subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); + subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); + full_id = ahc_compose_id(device, vendor, subdevice, subvendor); + + /* + * If the second function is not hooked up, ignore it. + * Unfortunately, not all MB vendors implement the + * subdevice ID as per the Adaptec spec, so do our best + * to sanity check it prior to accepting the subdevice + * ID as valid. + */ + if (ahc_get_pci_function(pci) > 0 + && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor) + && SUBID_9005_MFUNCENB(subdevice) == 0) + return (NULL); + + for (i = 0; i < ahc_num_pci_devs; i++) { + entry = &ahc_pci_ident_table[i]; + if (entry->full_id == (full_id & entry->id_mask)) { + /* Honor exclusion entries. */ + if (entry->name == NULL) + return (NULL); + return (entry); + } + } + return (NULL); +} + +int +ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry) +{ + u_int command; + u_int our_id; + u_int sxfrctl1; + u_int scsiseq; + u_int dscommand0; + uint32_t devconfig; + int error; + uint8_t sblkctl; + + our_id = 0; + error = entry->setup(ahc); + if (error != 0) + return (error); + ahc->chip |= AHC_PCI; + ahc->description = entry->name; + + pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0); + + error = ahc_pci_map_registers(ahc); + if (error != 0) + return (error); + + /* + * Before we continue probing the card, ensure that + * its interrupts are *disabled*. We don't want + * a misstep to hang the machine in an interrupt + * storm. + */ + ahc_intr_enable(ahc, FALSE); + + devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); + + /* + * If we need to support high memory, enable dual + * address cycles. This bit must be set to enable + * high address bit generation even if we are on a + * 64bit bus (PCI64BIT set in devconfig). + */ + if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { + + if (bootverbose) + printk("%s: Enabling 39Bit Addressing\n", + ahc_name(ahc)); + devconfig |= DACEN; + } + + /* Ensure that pci error generation, a test feature, is disabled. */ + devconfig |= PCIERRGENDIS; + + ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); + + /* Ensure busmastering is enabled */ + command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); + command |= PCIM_CMD_BUSMASTEREN; + + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2); + + /* On all PCI adapters, we allow SCB paging */ + ahc->flags |= AHC_PAGESCBS; + + error = ahc_softc_init(ahc); + if (error != 0) + return (error); + + /* + * Disable PCI parity error checking. Users typically + * do this to work around broken PCI chipsets that get + * the parity timing wrong and thus generate lots of spurious + * errors. The chip only allows us to disable *all* parity + * error reporting when doing this, so CIO bus, scb ram, and + * scratch ram parity errors will be ignored too. + */ + if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0) + ahc->seqctl |= FAILDIS; + + ahc->bus_intr = ahc_pci_intr; + ahc->bus_chip_init = ahc_pci_chip_init; + + /* Remember how the card was setup in case there is no SEEPROM */ + if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) { + ahc_pause(ahc); + if ((ahc->features & AHC_ULTRA2) != 0) + our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; + else + our_id = ahc_inb(ahc, SCSIID) & OID; + sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN; + scsiseq = ahc_inb(ahc, SCSISEQ); + } else { + sxfrctl1 = STPWEN; + our_id = 7; + scsiseq = 0; + } + + error = ahc_reset(ahc, /*reinit*/FALSE); + if (error != 0) + return (ENXIO); + + if ((ahc->features & AHC_DT) != 0) { + u_int sfunct; + + /* Perform ALT-Mode Setup */ + sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; + ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); + ahc_outb(ahc, OPTIONMODE, + OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS); + ahc_outb(ahc, SFUNCT, sfunct); + + /* Normal mode setup */ + ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN + |TARGCRCENDEN); + } + + dscommand0 = ahc_inb(ahc, DSCOMMAND0); + dscommand0 |= MPARCKEN|CACHETHEN; + if ((ahc->features & AHC_ULTRA2) != 0) { + + /* + * DPARCKEN doesn't work correctly on + * some MBs so don't use it. + */ + dscommand0 &= ~DPARCKEN; + } + + /* + * Handle chips that must have cache line + * streaming (dis/en)abled. + */ + if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0) + dscommand0 |= CACHETHEN; + + if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0) + dscommand0 &= ~CACHETHEN; + + ahc_outb(ahc, DSCOMMAND0, dscommand0); + + ahc->pci_cachesize = + ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, + /*bytes*/1) & CACHESIZE; + ahc->pci_cachesize *= 4; + + if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0 + && ahc->pci_cachesize == 4) { + + ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, + 0, /*bytes*/1); + ahc->pci_cachesize = 0; + } + + /* + * We cannot perform ULTRA speeds without the presence + * of the external precision resistor. + */ + if ((ahc->features & AHC_ULTRA) != 0) { + uint32_t devconfig; + + devconfig = ahc_pci_read_config(ahc->dev_softc, + DEVCONFIG, /*bytes*/4); + if ((devconfig & REXTVALID) == 0) + ahc->features &= ~AHC_ULTRA; + } + + /* See if we have a SEEPROM and perform auto-term */ + check_extport(ahc, &sxfrctl1); + + /* + * Take the LED out of diagnostic mode + */ + sblkctl = ahc_inb(ahc, SBLKCTL); + ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); + + if ((ahc->features & AHC_ULTRA2) != 0) { + ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX); + } else { + ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100); + } + + if (ahc->flags & AHC_USEDEFAULTS) { + /* + * PCI Adapter default setup + * Should only be used if the adapter does not have + * a SEEPROM. + */ + /* See if someone else set us up already */ + if ((ahc->flags & AHC_NO_BIOS_INIT) == 0 + && scsiseq != 0) { + printk("%s: Using left over BIOS settings\n", + ahc_name(ahc)); + ahc->flags &= ~AHC_USEDEFAULTS; + ahc->flags |= AHC_BIOS_ENABLED; + } else { + /* + * Assume only one connector and always turn + * on termination. + */ + our_id = 0x07; + sxfrctl1 = STPWEN; + } + ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI); + + ahc->our_id = our_id; + } + + /* + * Take a look to see if we have external SRAM. + * We currently do not attempt to use SRAM that is + * shared among multiple controllers. + */ + ahc_probe_ext_scbram(ahc); + + /* + * Record our termination setting for the + * generic initialization routine. + */ + if ((sxfrctl1 & STPWEN) != 0) + ahc->flags |= AHC_TERM_ENB_A; + + /* + * Save chip register configuration data for chip resets + * that occur during runtime and resume events. + */ + ahc->bus_softc.pci_softc.devconfig = + ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); + ahc->bus_softc.pci_softc.command = + ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1); + ahc->bus_softc.pci_softc.csize_lattime = + ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1); + ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0); + ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS); + if ((ahc->features & AHC_DT) != 0) { + u_int sfunct; + + sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; + ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); + ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE); + ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT); + ahc_outb(ahc, SFUNCT, sfunct); + ahc->bus_softc.pci_softc.crccontrol1 = + ahc_inb(ahc, CRCCONTROL1); + } + if ((ahc->features & AHC_MULTI_FUNC) != 0) + ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR); + + if ((ahc->features & AHC_ULTRA2) != 0) + ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH); + + /* Core initialization */ + error = ahc_init(ahc); + if (error != 0) + return (error); + ahc->init_level++; + + /* + * Allow interrupts now that we are completely setup. + */ + return ahc_pci_map_int(ahc); +} + +/* + * Test for the presence of external sram in an + * "unshared" configuration. + */ +static int +ahc_ext_scbram_present(struct ahc_softc *ahc) +{ + u_int chip; + int ramps; + int single_user; + uint32_t devconfig; + + chip = ahc->chip & AHC_CHIPID_MASK; + devconfig = ahc_pci_read_config(ahc->dev_softc, + DEVCONFIG, /*bytes*/4); + single_user = (devconfig & MPORTMODE) != 0; + + if ((ahc->features & AHC_ULTRA2) != 0) + ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0; + else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C) + /* + * External SCBRAM arbitration is flakey + * on these chips. Unfortunately this means + * we don't use the extra SCB ram space on the + * 3940AUW. + */ + ramps = 0; + else if (chip >= AHC_AIC7870) + ramps = (devconfig & RAMPSM) != 0; + else + ramps = 0; + + if (ramps && single_user) + return (1); + return (0); +} + +/* + * Enable external scbram. + */ +static void +ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, + int fast, int large) +{ + uint32_t devconfig; + + if (ahc->features & AHC_MULTI_FUNC) { + /* + * Set the SCB Base addr (highest address bit) + * depending on which channel we are. + */ + ahc_outb(ahc, SCBBADDR, ahc_get_pci_function(ahc->dev_softc)); + } + + ahc->flags &= ~AHC_LSCBS_ENABLED; + if (large) + ahc->flags |= AHC_LSCBS_ENABLED; + devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); + if ((ahc->features & AHC_ULTRA2) != 0) { + u_int dscommand0; + + dscommand0 = ahc_inb(ahc, DSCOMMAND0); + if (enable) + dscommand0 &= ~INTSCBRAMSEL; + else + dscommand0 |= INTSCBRAMSEL; + if (large) + dscommand0 &= ~USCBSIZE32; + else + dscommand0 |= USCBSIZE32; + ahc_outb(ahc, DSCOMMAND0, dscommand0); + } else { + if (fast) + devconfig &= ~EXTSCBTIME; + else + devconfig |= EXTSCBTIME; + if (enable) + devconfig &= ~SCBRAMSEL; + else + devconfig |= SCBRAMSEL; + if (large) + devconfig &= ~SCBSIZE32; + else + devconfig |= SCBSIZE32; + } + if (pcheck) + devconfig |= EXTSCBPEN; + else + devconfig &= ~EXTSCBPEN; + + ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); +} + +/* + * Take a look to see if we have external SRAM. + * We currently do not attempt to use SRAM that is + * shared among multiple controllers. + */ +static void +ahc_probe_ext_scbram(struct ahc_softc *ahc) +{ + int num_scbs; + int test_num_scbs; + int enable; + int pcheck; + int fast; + int large; + + enable = FALSE; + pcheck = FALSE; + fast = FALSE; + large = FALSE; + num_scbs = 0; + + if (ahc_ext_scbram_present(ahc) == 0) + goto done; + + /* + * Probe for the best parameters to use. + */ + ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large); + num_scbs = ahc_probe_scbs(ahc); + if (num_scbs == 0) { + /* The SRAM wasn't really present. */ + goto done; + } + enable = TRUE; + + /* + * Clear any outstanding parity error + * and ensure that parity error reporting + * is enabled. + */ + ahc_outb(ahc, SEQCTL, 0); + ahc_outb(ahc, CLRINT, CLRPARERR); + ahc_outb(ahc, CLRINT, CLRBRKADRINT); + + /* Now see if we can do parity */ + ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large); + num_scbs = ahc_probe_scbs(ahc); + if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 + || (ahc_inb(ahc, ERROR) & MPARERR) == 0) + pcheck = TRUE; + + /* Clear any resulting parity error */ + ahc_outb(ahc, CLRINT, CLRPARERR); + ahc_outb(ahc, CLRINT, CLRBRKADRINT); + + /* Now see if we can do fast timing */ + ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large); + test_num_scbs = ahc_probe_scbs(ahc); + if (test_num_scbs == num_scbs + && ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 + || (ahc_inb(ahc, ERROR) & MPARERR) == 0)) + fast = TRUE; + + /* + * See if we can use large SCBs and still maintain + * the same overall count of SCBs. + */ + if ((ahc->features & AHC_LARGE_SCBS) != 0) { + ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE); + test_num_scbs = ahc_probe_scbs(ahc); + if (test_num_scbs >= num_scbs) { + large = TRUE; + num_scbs = test_num_scbs; + if (num_scbs >= 64) { + /* + * We have enough space to move the + * "busy targets table" into SCB space + * and make it qualify all the way to the + * lun level. + */ + ahc->flags |= AHC_SCB_BTT; + } + } + } +done: + /* + * Disable parity error reporting until we + * can load instruction ram. + */ + ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); + /* Clear any latched parity error */ + ahc_outb(ahc, CLRINT, CLRPARERR); + ahc_outb(ahc, CLRINT, CLRBRKADRINT); + if (bootverbose && enable) { + printk("%s: External SRAM, %s access%s, %dbytes/SCB\n", + ahc_name(ahc), fast ? "fast" : "slow", + pcheck ? ", parity checking enabled" : "", + large ? 64 : 32); + } + ahc_scbram_config(ahc, enable, pcheck, fast, large); +} + +/* + * Perform some simple tests that should catch situations where + * our registers are invalidly mapped. + */ +int +ahc_pci_test_register_access(struct ahc_softc *ahc) +{ + int error; + u_int status1; + uint32_t cmd; + uint8_t hcntrl; + + error = EIO; + + /* + * Enable PCI error interrupt status, but suppress NMIs + * generated by SERR raised due to target aborts. + */ + cmd = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, + cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); + + /* + * First a simple test to see if any + * registers can be read. Reading + * HCNTRL has no side effects and has + * at least one bit that is guaranteed to + * be zero so it is a good register to + * use for this test. + */ + hcntrl = ahc_inb(ahc, HCNTRL); + + if (hcntrl == 0xFF) + goto fail; + + if ((hcntrl & CHIPRST) != 0) { + /* + * The chip has not been initialized since + * PCI/EISA/VLB bus reset. Don't trust + * "left over BIOS data". + */ + ahc->flags |= AHC_NO_BIOS_INIT; + } + + /* + * Next create a situation where write combining + * or read prefetching could be initiated by the + * CPU or host bridge. Our device does not support + * either, so look for data corruption and/or flagged + * PCI errors. First pause without causing another + * chip reset. + */ + hcntrl &= ~CHIPRST; + ahc_outb(ahc, HCNTRL, hcntrl|PAUSE); + while (ahc_is_paused(ahc) == 0) + ; + + /* Clear any PCI errors that occurred before our driver attached. */ + status1 = ahc_pci_read_config(ahc->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, + status1, /*bytes*/1); + ahc_outb(ahc, CLRINT, CLRPARERR); + + ahc_outb(ahc, SEQCTL, PERRORDIS); + ahc_outb(ahc, SCBPTR, 0); + ahc_outl(ahc, SCB_BASE, 0x5aa555aa); + if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa) + goto fail; + + status1 = ahc_pci_read_config(ahc->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + if ((status1 & STA) != 0) + goto fail; + + error = 0; + +fail: + /* Silently clear any latched errors. */ + status1 = ahc_pci_read_config(ahc->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, + status1, /*bytes*/1); + ahc_outb(ahc, CLRINT, CLRPARERR); + ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); + return (error); +} + +/* + * Check the external port logic for a serial eeprom + * and termination/cable detection contrls. + */ +static void +check_extport(struct ahc_softc *ahc, u_int *sxfrctl1) +{ + struct seeprom_descriptor sd; + struct seeprom_config *sc; + int have_seeprom; + int have_autoterm; + + sd.sd_ahc = ahc; + sd.sd_control_offset = SEECTL; + sd.sd_status_offset = SEECTL; + sd.sd_dataout_offset = SEECTL; + sc = ahc->seep_config; + + /* + * For some multi-channel devices, the c46 is simply too + * small to work. For the other controller types, we can + * get our information from either SEEPROM type. Set the + * type to start our probe with accordingly. + */ + if (ahc->flags & AHC_LARGE_SEEPROM) + sd.sd_chip = C56_66; + else + sd.sd_chip = C46; + + sd.sd_MS = SEEMS; + sd.sd_RDY = SEERDY; + sd.sd_CS = SEECS; + sd.sd_CK = SEECK; + sd.sd_DO = SEEDO; + sd.sd_DI = SEEDI; + + have_seeprom = ahc_acquire_seeprom(ahc, &sd); + if (have_seeprom) { + + if (bootverbose) + printk("%s: Reading SEEPROM...", ahc_name(ahc)); + + for (;;) { + u_int start_addr; + + start_addr = 32 * (ahc->channel - 'A'); + + have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, + start_addr, + sizeof(*sc)/2); + + if (have_seeprom) + have_seeprom = ahc_verify_cksum(sc); + + if (have_seeprom != 0 || sd.sd_chip == C56_66) { + if (bootverbose) { + if (have_seeprom == 0) + printk ("checksum error\n"); + else + printk ("done.\n"); + } + break; + } + sd.sd_chip = C56_66; + } + ahc_release_seeprom(&sd); + + /* Remember the SEEPROM type for later */ + if (sd.sd_chip == C56_66) + ahc->flags |= AHC_LARGE_SEEPROM; + } + + if (!have_seeprom) { + /* + * Pull scratch ram settings and treat them as + * if they are the contents of an seeprom if + * the 'ADPT' signature is found in SCB2. + * We manually compose the data as 16bit values + * to avoid endian issues. + */ + ahc_outb(ahc, SCBPTR, 2); + if (ahc_inb(ahc, SCB_BASE) == 'A' + && ahc_inb(ahc, SCB_BASE + 1) == 'D' + && ahc_inb(ahc, SCB_BASE + 2) == 'P' + && ahc_inb(ahc, SCB_BASE + 3) == 'T') { + uint16_t *sc_data; + int i; + + sc_data = (uint16_t *)sc; + for (i = 0; i < 32; i++, sc_data++) { + int j; + + j = i * 2; + *sc_data = ahc_inb(ahc, SRAM_BASE + j) + | ahc_inb(ahc, SRAM_BASE + j + 1) << 8; + } + have_seeprom = ahc_verify_cksum(sc); + if (have_seeprom) + ahc->flags |= AHC_SCB_CONFIG_USED; + } + /* + * Clear any SCB parity errors in case this data and + * its associated parity was not initialized by the BIOS + */ + ahc_outb(ahc, CLRINT, CLRPARERR); + ahc_outb(ahc, CLRINT, CLRBRKADRINT); + } + + if (!have_seeprom) { + if (bootverbose) + printk("%s: No SEEPROM available.\n", ahc_name(ahc)); + ahc->flags |= AHC_USEDEFAULTS; + kfree(ahc->seep_config); + ahc->seep_config = NULL; + sc = NULL; + } else { + ahc_parse_pci_eeprom(ahc, sc); + } + + /* + * Cards that have the external logic necessary to talk to + * a SEEPROM, are almost certain to have the remaining logic + * necessary for auto-termination control. This assumption + * hasn't failed yet... + */ + have_autoterm = have_seeprom; + + /* + * Some low-cost chips have SEEPROM and auto-term control built + * in, instead of using a GAL. They can tell us directly + * if the termination logic is enabled. + */ + if ((ahc->features & AHC_SPIOCAP) != 0) { + if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0) + have_autoterm = FALSE; + } + + if (have_autoterm) { + ahc->flags |= AHC_HAS_TERM_LOGIC; + ahc_acquire_seeprom(ahc, &sd); + configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1); + ahc_release_seeprom(&sd); + } else if (have_seeprom) { + *sxfrctl1 &= ~STPWEN; + if ((sc->adapter_control & CFSTERM) != 0) + *sxfrctl1 |= STPWEN; + if (bootverbose) + printk("%s: Low byte termination %sabled\n", + ahc_name(ahc), + (*sxfrctl1 & STPWEN) ? "en" : "dis"); + } +} + +static void +ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc) +{ + /* + * Put the data we've collected down into SRAM + * where ahc_init will find it. + */ + int i; + int max_targ = sc->max_targets & CFMAXTARG; + u_int scsi_conf; + uint16_t discenable; + uint16_t ultraenb; + + discenable = 0; + ultraenb = 0; + if ((sc->adapter_control & CFULTRAEN) != 0) { + /* + * Determine if this adapter has a "newstyle" + * SEEPROM format. + */ + for (i = 0; i < max_targ; i++) { + if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) { + ahc->flags |= AHC_NEWEEPROM_FMT; + break; + } + } + } + + for (i = 0; i < max_targ; i++) { + u_int scsirate; + uint16_t target_mask; + + target_mask = 0x01 << i; + if (sc->device_flags[i] & CFDISC) + discenable |= target_mask; + if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) { + if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) + ultraenb |= target_mask; + } else if ((sc->adapter_control & CFULTRAEN) != 0) { + ultraenb |= target_mask; + } + if ((sc->device_flags[i] & CFXFER) == 0x04 + && (ultraenb & target_mask) != 0) { + /* Treat 10MHz as a non-ultra speed */ + sc->device_flags[i] &= ~CFXFER; + ultraenb &= ~target_mask; + } + if ((ahc->features & AHC_ULTRA2) != 0) { + u_int offset; + + if (sc->device_flags[i] & CFSYNCH) + offset = MAX_OFFSET_ULTRA2; + else + offset = 0; + ahc_outb(ahc, TARG_OFFSET + i, offset); + + /* + * The ultra enable bits contain the + * high bit of the ultra2 sync rate + * field. + */ + scsirate = (sc->device_flags[i] & CFXFER) + | ((ultraenb & target_mask) ? 0x8 : 0x0); + if (sc->device_flags[i] & CFWIDEB) + scsirate |= WIDEXFER; + } else { + scsirate = (sc->device_flags[i] & CFXFER) << 4; + if (sc->device_flags[i] & CFSYNCH) + scsirate |= SOFS; + if (sc->device_flags[i] & CFWIDEB) + scsirate |= WIDEXFER; + } + ahc_outb(ahc, TARG_SCSIRATE + i, scsirate); + } + ahc->our_id = sc->brtime_id & CFSCSIID; + + scsi_conf = (ahc->our_id & 0x7); + if (sc->adapter_control & CFSPARITY) + scsi_conf |= ENSPCHK; + if (sc->adapter_control & CFRESETB) + scsi_conf |= RESET_SCSI; + + ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT; + + if (sc->bios_control & CFEXTEND) + ahc->flags |= AHC_EXTENDED_TRANS_A; + + if (sc->bios_control & CFBIOSEN) + ahc->flags |= AHC_BIOS_ENABLED; + if (ahc->features & AHC_ULTRA + && (ahc->flags & AHC_NEWEEPROM_FMT) == 0) { + /* Should we enable Ultra mode? */ + if (!(sc->adapter_control & CFULTRAEN)) + /* Treat us as a non-ultra card */ + ultraenb = 0; + } + + if (sc->signature == CFSIGNATURE + || sc->signature == CFSIGNATURE2) { + uint32_t devconfig; + + /* Honor the STPWLEVEL settings */ + devconfig = ahc_pci_read_config(ahc->dev_softc, + DEVCONFIG, /*bytes*/4); + devconfig &= ~STPWLEVEL; + if ((sc->bios_control & CFSTPWLEVEL) != 0) + devconfig |= STPWLEVEL; + ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, + devconfig, /*bytes*/4); + } + /* Set SCSICONF info */ + ahc_outb(ahc, SCSICONF, scsi_conf); + ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff)); + ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff)); + ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff); + ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff); +} + +static void +configure_termination(struct ahc_softc *ahc, + struct seeprom_descriptor *sd, + u_int adapter_control, + u_int *sxfrctl1) +{ + uint8_t brddat; + + brddat = 0; + + /* + * Update the settings in sxfrctl1 to match the + * termination settings + */ + *sxfrctl1 = 0; + + /* + * SEECS must be on for the GALS to latch + * the data properly. Be sure to leave MS + * on or we will release the seeprom. + */ + SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS); + if ((adapter_control & CFAUTOTERM) != 0 + || (ahc->features & AHC_NEW_TERMCTL) != 0) { + int internal50_present; + int internal68_present; + int externalcable_present; + int eeprom_present; + int enableSEC_low; + int enableSEC_high; + int enablePRI_low; + int enablePRI_high; + int sum; + + enableSEC_low = 0; + enableSEC_high = 0; + enablePRI_low = 0; + enablePRI_high = 0; + if ((ahc->features & AHC_NEW_TERMCTL) != 0) { + ahc_new_term_detect(ahc, &enableSEC_low, + &enableSEC_high, + &enablePRI_low, + &enablePRI_high, + &eeprom_present); + if ((adapter_control & CFSEAUTOTERM) == 0) { + if (bootverbose) + printk("%s: Manual SE Termination\n", + ahc_name(ahc)); + enableSEC_low = (adapter_control & CFSELOWTERM); + enableSEC_high = + (adapter_control & CFSEHIGHTERM); + } + if ((adapter_control & CFAUTOTERM) == 0) { + if (bootverbose) + printk("%s: Manual LVD Termination\n", + ahc_name(ahc)); + enablePRI_low = (adapter_control & CFSTERM); + enablePRI_high = (adapter_control & CFWSTERM); + } + /* Make the table calculations below happy */ + internal50_present = 0; + internal68_present = 1; + externalcable_present = 1; + } else if ((ahc->features & AHC_SPIOCAP) != 0) { + aic785X_cable_detect(ahc, &internal50_present, + &externalcable_present, + &eeprom_present); + /* Can never support a wide connector. */ + internal68_present = 0; + } else { + aic787X_cable_detect(ahc, &internal50_present, + &internal68_present, + &externalcable_present, + &eeprom_present); + } + + if ((ahc->features & AHC_WIDE) == 0) + internal68_present = 0; + + if (bootverbose + && (ahc->features & AHC_ULTRA2) == 0) { + printk("%s: internal 50 cable %s present", + ahc_name(ahc), + internal50_present ? "is":"not"); + + if ((ahc->features & AHC_WIDE) != 0) + printk(", internal 68 cable %s present", + internal68_present ? "is":"not"); + printk("\n%s: external cable %s present\n", + ahc_name(ahc), + externalcable_present ? "is":"not"); + } + if (bootverbose) + printk("%s: BIOS eeprom %s present\n", + ahc_name(ahc), eeprom_present ? "is" : "not"); + + if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) { + /* + * The 50 pin connector is a separate bus, + * so force it to always be terminated. + * In the future, perform current sensing + * to determine if we are in the middle of + * a properly terminated bus. + */ + internal50_present = 0; + } + + /* + * Now set the termination based on what + * we found. + * Flash Enable = BRDDAT7 + * Secondary High Term Enable = BRDDAT6 + * Secondary Low Term Enable = BRDDAT5 (7890) + * Primary High Term Enable = BRDDAT4 (7890) + */ + if ((ahc->features & AHC_ULTRA2) == 0 + && (internal50_present != 0) + && (internal68_present != 0) + && (externalcable_present != 0)) { + printk("%s: Illegal cable configuration!!. " + "Only two connectors on the " + "adapter may be used at a " + "time!\n", ahc_name(ahc)); + + /* + * Pretend there are no cables in the hope + * that having all of the termination on + * gives us a more stable bus. + */ + internal50_present = 0; + internal68_present = 0; + externalcable_present = 0; + } + + if ((ahc->features & AHC_WIDE) != 0 + && ((externalcable_present == 0) + || (internal68_present == 0) + || (enableSEC_high != 0))) { + brddat |= BRDDAT6; + if (bootverbose) { + if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) + printk("%s: 68 pin termination " + "Enabled\n", ahc_name(ahc)); + else + printk("%s: %sHigh byte termination " + "Enabled\n", ahc_name(ahc), + enableSEC_high ? "Secondary " + : ""); + } + } + + sum = internal50_present + internal68_present + + externalcable_present; + if (sum < 2 || (enableSEC_low != 0)) { + if ((ahc->features & AHC_ULTRA2) != 0) + brddat |= BRDDAT5; + else + *sxfrctl1 |= STPWEN; + if (bootverbose) { + if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) + printk("%s: 50 pin termination " + "Enabled\n", ahc_name(ahc)); + else + printk("%s: %sLow byte termination " + "Enabled\n", ahc_name(ahc), + enableSEC_low ? "Secondary " + : ""); + } + } + + if (enablePRI_low != 0) { + *sxfrctl1 |= STPWEN; + if (bootverbose) + printk("%s: Primary Low Byte termination " + "Enabled\n", ahc_name(ahc)); + } + + /* + * Setup STPWEN before setting up the rest of + * the termination per the tech note on the U160 cards. + */ + ahc_outb(ahc, SXFRCTL1, *sxfrctl1); + + if (enablePRI_high != 0) { + brddat |= BRDDAT4; + if (bootverbose) + printk("%s: Primary High Byte " + "termination Enabled\n", + ahc_name(ahc)); + } + + write_brdctl(ahc, brddat); + + } else { + if ((adapter_control & CFSTERM) != 0) { + *sxfrctl1 |= STPWEN; + + if (bootverbose) + printk("%s: %sLow byte termination Enabled\n", + ahc_name(ahc), + (ahc->features & AHC_ULTRA2) ? "Primary " + : ""); + } + + if ((adapter_control & CFWSTERM) != 0 + && (ahc->features & AHC_WIDE) != 0) { + brddat |= BRDDAT6; + if (bootverbose) + printk("%s: %sHigh byte termination Enabled\n", + ahc_name(ahc), + (ahc->features & AHC_ULTRA2) + ? "Secondary " : ""); + } + + /* + * Setup STPWEN before setting up the rest of + * the termination per the tech note on the U160 cards. + */ + ahc_outb(ahc, SXFRCTL1, *sxfrctl1); + + if ((ahc->features & AHC_WIDE) != 0) + write_brdctl(ahc, brddat); + } + SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */ +} + +static void +ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low, + int *enableSEC_high, int *enablePRI_low, + int *enablePRI_high, int *eeprom_present) +{ + uint8_t brdctl; + + /* + * BRDDAT7 = Eeprom + * BRDDAT6 = Enable Secondary High Byte termination + * BRDDAT5 = Enable Secondary Low Byte termination + * BRDDAT4 = Enable Primary high byte termination + * BRDDAT3 = Enable Primary low byte termination + */ + brdctl = read_brdctl(ahc); + *eeprom_present = brdctl & BRDDAT7; + *enableSEC_high = (brdctl & BRDDAT6); + *enableSEC_low = (brdctl & BRDDAT5); + *enablePRI_high = (brdctl & BRDDAT4); + *enablePRI_low = (brdctl & BRDDAT3); +} + +static void +aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, + int *internal68_present, int *externalcable_present, + int *eeprom_present) +{ + uint8_t brdctl; + + /* + * First read the status of our cables. + * Set the rom bank to 0 since the + * bank setting serves as a multiplexor + * for the cable detection logic. + * BRDDAT5 controls the bank switch. + */ + write_brdctl(ahc, 0); + + /* + * Now read the state of the internal + * connectors. BRDDAT6 is INT50 and + * BRDDAT7 is INT68. + */ + brdctl = read_brdctl(ahc); + *internal50_present = (brdctl & BRDDAT6) ? 0 : 1; + *internal68_present = (brdctl & BRDDAT7) ? 0 : 1; + + /* + * Set the rom bank to 1 and determine + * the other signals. + */ + write_brdctl(ahc, BRDDAT5); + + /* + * Now read the state of the external + * connectors. BRDDAT6 is EXT68 and + * BRDDAT7 is EPROMPS. + */ + brdctl = read_brdctl(ahc); + *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; + *eeprom_present = (brdctl & BRDDAT7) ? 1 : 0; +} + +static void +aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, + int *externalcable_present, int *eeprom_present) +{ + uint8_t brdctl; + uint8_t spiocap; + + spiocap = ahc_inb(ahc, SPIOCAP); + spiocap &= ~SOFTCMDEN; + spiocap |= EXT_BRDCTL; + ahc_outb(ahc, SPIOCAP, spiocap); + ahc_outb(ahc, BRDCTL, BRDRW|BRDCS); + ahc_flush_device_writes(ahc); + ahc_delay(500); + ahc_outb(ahc, BRDCTL, 0); + ahc_flush_device_writes(ahc); + ahc_delay(500); + brdctl = ahc_inb(ahc, BRDCTL); + *internal50_present = (brdctl & BRDDAT5) ? 0 : 1; + *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; + *eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0; +} + +int +ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd) +{ + int wait; + + if ((ahc->features & AHC_SPIOCAP) != 0 + && (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0) + return (0); + + /* + * Request access of the memory port. When access is + * granted, SEERDY will go high. We use a 1 second + * timeout which should be near 1 second more than + * is needed. Reason: after the chip reset, there + * should be no contention. + */ + SEEPROM_OUTB(sd, sd->sd_MS); + wait = 1000; /* 1 second timeout in msec */ + while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) { + ahc_delay(1000); /* delay 1 msec */ + } + if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) { + SEEPROM_OUTB(sd, 0); + return (0); + } + return(1); +} + +void +ahc_release_seeprom(struct seeprom_descriptor *sd) +{ + /* Release access to the memory port and the serial EEPROM. */ + SEEPROM_OUTB(sd, 0); +} + +static void +write_brdctl(struct ahc_softc *ahc, uint8_t value) +{ + uint8_t brdctl; + + if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { + brdctl = BRDSTB; + if (ahc->channel == 'B') + brdctl |= BRDCS; + } else if ((ahc->features & AHC_ULTRA2) != 0) { + brdctl = 0; + } else { + brdctl = BRDSTB|BRDCS; + } + ahc_outb(ahc, BRDCTL, brdctl); + ahc_flush_device_writes(ahc); + brdctl |= value; + ahc_outb(ahc, BRDCTL, brdctl); + ahc_flush_device_writes(ahc); + if ((ahc->features & AHC_ULTRA2) != 0) + brdctl |= BRDSTB_ULTRA2; + else + brdctl &= ~BRDSTB; + ahc_outb(ahc, BRDCTL, brdctl); + ahc_flush_device_writes(ahc); + if ((ahc->features & AHC_ULTRA2) != 0) + brdctl = 0; + else + brdctl &= ~BRDCS; + ahc_outb(ahc, BRDCTL, brdctl); +} + +static uint8_t +read_brdctl(struct ahc_softc *ahc) +{ + uint8_t brdctl; + uint8_t value; + + if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { + brdctl = BRDRW; + if (ahc->channel == 'B') + brdctl |= BRDCS; + } else if ((ahc->features & AHC_ULTRA2) != 0) { + brdctl = BRDRW_ULTRA2; + } else { + brdctl = BRDRW|BRDCS; + } + ahc_outb(ahc, BRDCTL, brdctl); + ahc_flush_device_writes(ahc); + value = ahc_inb(ahc, BRDCTL); + ahc_outb(ahc, BRDCTL, 0); + return (value); +} + +static void +ahc_pci_intr(struct ahc_softc *ahc) +{ + u_int error; + u_int status1; + + error = ahc_inb(ahc, ERROR); + if ((error & PCIERRSTAT) == 0) + return; + + status1 = ahc_pci_read_config(ahc->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + + printk("%s: PCI error Interrupt at seqaddr = 0x%x\n", + ahc_name(ahc), + ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); + + if (status1 & DPE) { + ahc->pci_target_perr_count++; + printk("%s: Data Parity Error Detected during address " + "or write data phase\n", ahc_name(ahc)); + } + if (status1 & SSE) { + printk("%s: Signal System Error Detected\n", ahc_name(ahc)); + } + if (status1 & RMA) { + printk("%s: Received a Master Abort\n", ahc_name(ahc)); + } + if (status1 & RTA) { + printk("%s: Received a Target Abort\n", ahc_name(ahc)); + } + if (status1 & STA) { + printk("%s: Signaled a Target Abort\n", ahc_name(ahc)); + } + if (status1 & DPR) { + printk("%s: Data Parity Error has been reported via PERR#\n", + ahc_name(ahc)); + } + + /* Clear latched errors. */ + ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, + status1, /*bytes*/1); + + if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) { + printk("%s: Latched PCIERR interrupt with " + "no status bits set\n", ahc_name(ahc)); + } else { + ahc_outb(ahc, CLRINT, CLRPARERR); + } + + if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) { + printk( +"%s: WARNING WARNING WARNING WARNING\n" +"%s: Too many PCI parity errors observed as a target.\n" +"%s: Some device on this bus is generating bad parity.\n" +"%s: This is an error *observed by*, not *generated by*, this controller.\n" +"%s: PCI parity error checking has been disabled.\n" +"%s: WARNING WARNING WARNING WARNING\n", + ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), + ahc_name(ahc), ahc_name(ahc), ahc_name(ahc)); + ahc->seqctl |= FAILDIS; + ahc_outb(ahc, SEQCTL, ahc->seqctl); + } + ahc_unpause(ahc); +} + +static int +ahc_pci_chip_init(struct ahc_softc *ahc) +{ + ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0); + ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus); + if ((ahc->features & AHC_DT) != 0) { + u_int sfunct; + + sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; + ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); + ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode); + ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt); + ahc_outb(ahc, SFUNCT, sfunct); + ahc_outb(ahc, CRCCONTROL1, + ahc->bus_softc.pci_softc.crccontrol1); + } + if ((ahc->features & AHC_MULTI_FUNC) != 0) + ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr); + + if ((ahc->features & AHC_ULTRA2) != 0) + ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh); + + return (ahc_chip_init(ahc)); +} + +void __maybe_unused +ahc_pci_resume(struct ahc_softc *ahc) +{ + /* + * We assume that the OS has restored our register + * mappings, etc. Just update the config space registers + * that the OS doesn't know about and rely on our chip + * reset handler to handle the rest. + */ + ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, + ahc->bus_softc.pci_softc.devconfig, /*bytes*/4); + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, + ahc->bus_softc.pci_softc.command, /*bytes*/1); + ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, + ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1); + if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { + struct seeprom_descriptor sd; + u_int sxfrctl1; + + sd.sd_ahc = ahc; + sd.sd_control_offset = SEECTL; + sd.sd_status_offset = SEECTL; + sd.sd_dataout_offset = SEECTL; + + ahc_acquire_seeprom(ahc, &sd); + configure_termination(ahc, &sd, + ahc->seep_config->adapter_control, + &sxfrctl1); + ahc_release_seeprom(&sd); + } +} + +static int +ahc_aic785X_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + uint8_t rev; + + pci = ahc->dev_softc; + ahc->channel = 'A'; + ahc->chip = AHC_AIC7850; + ahc->features = AHC_AIC7850_FE; + ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; + rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + if (rev >= 1) + ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; + ahc->instruction_ram_size = 512; + return (0); +} + +static int +ahc_aic7860_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + uint8_t rev; + + pci = ahc->dev_softc; + ahc->channel = 'A'; + ahc->chip = AHC_AIC7860; + ahc->features = AHC_AIC7860_FE; + ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; + rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + if (rev >= 1) + ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; + ahc->instruction_ram_size = 512; + return (0); +} + +static int +ahc_apa1480_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7860_setup(ahc); + if (error != 0) + return (error); + ahc->features |= AHC_REMOVABLE; + return (0); +} + +static int +ahc_aic7870_setup(struct ahc_softc *ahc) +{ + + ahc->channel = 'A'; + ahc->chip = AHC_AIC7870; + ahc->features = AHC_AIC7870_FE; + ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; + ahc->instruction_ram_size = 512; + return (0); +} + +static int +ahc_aic7870h_setup(struct ahc_softc *ahc) +{ + int error = ahc_aic7870_setup(ahc); + + ahc->features |= AHC_HVD; + + return error; +} + +static int +ahc_aha394X_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7870_setup(ahc); + if (error == 0) + error = ahc_aha394XX_setup(ahc); + return (error); +} + +static int +ahc_aha394Xh_setup(struct ahc_softc *ahc) +{ + int error = ahc_aha394X_setup(ahc); + + ahc->features |= AHC_HVD; + + return error; +} + +static int +ahc_aha398X_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7870_setup(ahc); + if (error == 0) + error = ahc_aha398XX_setup(ahc); + return (error); +} + +static int +ahc_aha494X_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7870_setup(ahc); + if (error == 0) + error = ahc_aha494XX_setup(ahc); + return (error); +} + +static int +ahc_aha494Xh_setup(struct ahc_softc *ahc) +{ + int error = ahc_aha494X_setup(ahc); + + ahc->features |= AHC_HVD; + + return error; +} + +static int +ahc_aic7880_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + uint8_t rev; + + pci = ahc->dev_softc; + ahc->channel = 'A'; + ahc->chip = AHC_AIC7880; + ahc->features = AHC_AIC7880_FE; + ahc->bugs |= AHC_TMODE_WIDEODD_BUG; + rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + if (rev >= 1) { + ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; + } else { + ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; + } + ahc->instruction_ram_size = 512; + return (0); +} + +static int +ahc_aic7880h_setup(struct ahc_softc *ahc) +{ + int error = ahc_aic7880_setup(ahc); + + ahc->features |= AHC_HVD; + + return error; +} + + +static int +ahc_aha2940Pro_setup(struct ahc_softc *ahc) +{ + + ahc->flags |= AHC_INT50_SPEEDFLEX; + return (ahc_aic7880_setup(ahc)); +} + +static int +ahc_aha394XU_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7880_setup(ahc); + if (error == 0) + error = ahc_aha394XX_setup(ahc); + return (error); +} + +static int +ahc_aha394XUh_setup(struct ahc_softc *ahc) +{ + int error = ahc_aha394XU_setup(ahc); + + ahc->features |= AHC_HVD; + + return error; +} + +static int +ahc_aha398XU_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7880_setup(ahc); + if (error == 0) + error = ahc_aha398XX_setup(ahc); + return (error); +} + +static int +ahc_aic7890_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + uint8_t rev; + + pci = ahc->dev_softc; + ahc->channel = 'A'; + ahc->chip = AHC_AIC7890; + ahc->features = AHC_AIC7890_FE; + ahc->flags |= AHC_NEWEEPROM_FMT; + rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + if (rev == 0) + ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG; + ahc->instruction_ram_size = 768; + return (0); +} + +static int +ahc_aic7892_setup(struct ahc_softc *ahc) +{ + + ahc->channel = 'A'; + ahc->chip = AHC_AIC7892; + ahc->features = AHC_AIC7892_FE; + ahc->flags |= AHC_NEWEEPROM_FMT; + ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; + ahc->instruction_ram_size = 1024; + return (0); +} + +static int +ahc_aic7895_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + uint8_t rev; + + pci = ahc->dev_softc; + ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; + /* + * The 'C' revision of the aic7895 has a few additional features. + */ + rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + if (rev >= 4) { + ahc->chip = AHC_AIC7895C; + ahc->features = AHC_AIC7895C_FE; + } else { + u_int command; + + ahc->chip = AHC_AIC7895; + ahc->features = AHC_AIC7895_FE; + + /* + * The BIOS disables the use of MWI transactions + * since it does not have the MWI bug work around + * we have. Disabling MWI reduces performance, so + * turn it on again. + */ + command = ahc_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1); + command |= PCIM_CMD_MWRICEN; + ahc_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1); + ahc->bugs |= AHC_PCI_MWI_BUG; + } + /* + * XXX Does CACHETHEN really not work??? What about PCI retry? + * on C level chips. Need to test, but for now, play it safe. + */ + ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG + | AHC_CACHETHEN_BUG; + +#if 0 + uint32_t devconfig; + + /* + * Cachesize must also be zero due to stray DAC + * problem when sitting behind some bridges. + */ + ahc_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1); + devconfig = ahc_pci_read_config(pci, DEVCONFIG, /*bytes*/1); + devconfig |= MRDCEN; + ahc_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1); +#endif + ahc->flags |= AHC_NEWEEPROM_FMT; + ahc->instruction_ram_size = 512; + return (0); +} + +static int +ahc_aic7895h_setup(struct ahc_softc *ahc) +{ + int error = ahc_aic7895_setup(ahc); + + ahc->features |= AHC_HVD; + + return error; +} + +static int +ahc_aic7896_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + + pci = ahc->dev_softc; + ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; + ahc->chip = AHC_AIC7896; + ahc->features = AHC_AIC7896_FE; + ahc->flags |= AHC_NEWEEPROM_FMT; + ahc->bugs |= AHC_CACHETHEN_DIS_BUG; + ahc->instruction_ram_size = 768; + return (0); +} + +static int +ahc_aic7899_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + + pci = ahc->dev_softc; + ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; + ahc->chip = AHC_AIC7899; + ahc->features = AHC_AIC7899_FE; + ahc->flags |= AHC_NEWEEPROM_FMT; + ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; + ahc->instruction_ram_size = 1024; + return (0); +} + +static int +ahc_aha29160C_setup(struct ahc_softc *ahc) +{ + int error; + + error = ahc_aic7899_setup(ahc); + if (error != 0) + return (error); + ahc->features |= AHC_REMOVABLE; + return (0); +} + +static int +ahc_raid_setup(struct ahc_softc *ahc) +{ + printk("RAID functionality unsupported\n"); + return (ENXIO); +} + +static int +ahc_aha394XX_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + + pci = ahc->dev_softc; + switch (ahc_get_pci_slot(pci)) { + case AHC_394X_SLOT_CHANNEL_A: + ahc->channel = 'A'; + break; + case AHC_394X_SLOT_CHANNEL_B: + ahc->channel = 'B'; + break; + default: + printk("adapter at unexpected slot %d\n" + "unable to map to a channel\n", + ahc_get_pci_slot(pci)); + ahc->channel = 'A'; + } + return (0); +} + +static int +ahc_aha398XX_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + + pci = ahc->dev_softc; + switch (ahc_get_pci_slot(pci)) { + case AHC_398X_SLOT_CHANNEL_A: + ahc->channel = 'A'; + break; + case AHC_398X_SLOT_CHANNEL_B: + ahc->channel = 'B'; + break; + case AHC_398X_SLOT_CHANNEL_C: + ahc->channel = 'C'; + break; + default: + printk("adapter at unexpected slot %d\n" + "unable to map to a channel\n", + ahc_get_pci_slot(pci)); + ahc->channel = 'A'; + break; + } + ahc->flags |= AHC_LARGE_SEEPROM; + return (0); +} + +static int +ahc_aha494XX_setup(struct ahc_softc *ahc) +{ + ahc_dev_softc_t pci; + + pci = ahc->dev_softc; + switch (ahc_get_pci_slot(pci)) { + case AHC_494X_SLOT_CHANNEL_A: + ahc->channel = 'A'; + break; + case AHC_494X_SLOT_CHANNEL_B: + ahc->channel = 'B'; + break; + case AHC_494X_SLOT_CHANNEL_C: + ahc->channel = 'C'; + break; + case AHC_494X_SLOT_CHANNEL_D: + ahc->channel = 'D'; + break; + default: + printk("adapter at unexpected slot %d\n" + "unable to map to a channel\n", + ahc_get_pci_slot(pci)); + ahc->channel = 'A'; + } + ahc->flags |= AHC_LARGE_SEEPROM; + return (0); +} diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.h b/drivers/scsi/aic7xxx/aic7xxx_pci.h new file mode 100644 index 000000000..263f85da4 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.h @@ -0,0 +1,125 @@ +/* + * Adaptec AIC7xxx device driver for Linux. + * + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id$ + * + */ +#ifndef _AIC7XXX_PCI_H_ +#define _AIC7XXX_PCI_H_ + +#define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull +#define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull +#define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull +#define ID_9005_SISL_MASK 0x000FFFFF00000000ull +#define ID_9005_SISL_ID 0x0005900500000000ull +#define ID_AIC7850 0x5078900400000000ull +#define ID_AHA_2902_04_10_15_20C_30C 0x5078900478509004ull +#define ID_AIC7855 0x5578900400000000ull +#define ID_AIC7859 0x3860900400000000ull +#define ID_AHA_2930CU 0x3860900438699004ull +#define ID_AIC7860 0x6078900400000000ull +#define ID_AIC7860C 0x6078900478609004ull +#define ID_AHA_1480A 0x6075900400000000ull +#define ID_AHA_2940AU_0 0x6178900400000000ull +#define ID_AHA_2940AU_1 0x6178900478619004ull +#define ID_AHA_2940AU_CN 0x2178900478219004ull +#define ID_AHA_2930C_VAR 0x6038900438689004ull + +#define ID_AIC7870 0x7078900400000000ull +#define ID_AHA_2940 0x7178900400000000ull +#define ID_AHA_3940 0x7278900400000000ull +#define ID_AHA_398X 0x7378900400000000ull +#define ID_AHA_2944 0x7478900400000000ull +#define ID_AHA_3944 0x7578900400000000ull +#define ID_AHA_4944 0x7678900400000000ull + +#define ID_AIC7880 0x8078900400000000ull +#define ID_AIC7880_B 0x8078900478809004ull +#define ID_AHA_2940U 0x8178900400000000ull +#define ID_AHA_3940U 0x8278900400000000ull +#define ID_AHA_2944U 0x8478900400000000ull +#define ID_AHA_3944U 0x8578900400000000ull +#define ID_AHA_398XU 0x8378900400000000ull +#define ID_AHA_4944U 0x8678900400000000ull +#define ID_AHA_2940UB 0x8178900478819004ull +#define ID_AHA_2930U 0x8878900478889004ull +#define ID_AHA_2940U_PRO 0x8778900478879004ull +#define ID_AHA_2940U_CN 0x0078900478009004ull + +#define ID_AIC7895 0x7895900478959004ull +#define ID_AIC7895_ARO 0x7890900478939004ull +#define ID_AIC7895_ARO_MASK 0xFFF0FFFFFFFFFFFFull +#define ID_AHA_2940U_DUAL 0x7895900478919004ull +#define ID_AHA_3940AU 0x7895900478929004ull +#define ID_AHA_3944AU 0x7895900478949004ull + +#define ID_AIC7890 0x001F9005000F9005ull +#define ID_AIC7890_ARO 0x00139005000F9005ull +#define ID_AAA_131U2 0x0013900500039005ull +#define ID_AHA_2930U2 0x0011900501819005ull +#define ID_AHA_2940U2B 0x00109005A1009005ull +#define ID_AHA_2940U2_OEM 0x0010900521809005ull +#define ID_AHA_2940U2 0x00109005A1809005ull +#define ID_AHA_2950U2B 0x00109005E1009005ull + +#define ID_AIC7892 0x008F9005FFFF9005ull +#define ID_AIC7892_ARO 0x00839005FFFF9005ull +#define ID_AHA_29160 0x00809005E2A09005ull +#define ID_AHA_29160_CPQ 0x00809005E2A00E11ull +#define ID_AHA_29160N 0x0080900562A09005ull +#define ID_AHA_29160C 0x0080900562209005ull +#define ID_AHA_29160B 0x00809005E2209005ull +#define ID_AHA_19160B 0x0081900562A19005ull +#define ID_AHA_2915_30LP 0x0082900502109005ull + +#define ID_AIC7896 0x005F9005FFFF9005ull +#define ID_AIC7896_ARO 0x00539005FFFF9005ull +#define ID_AHA_3950U2B_0 0x00509005FFFF9005ull +#define ID_AHA_3950U2B_1 0x00509005F5009005ull +#define ID_AHA_3950U2D_0 0x00519005FFFF9005ull +#define ID_AHA_3950U2D_1 0x00519005B5009005ull + +#define ID_AIC7899 0x00CF9005FFFF9005ull +#define ID_AIC7899_ARO 0x00C39005FFFF9005ull +#define ID_AHA_3960D 0x00C09005F6209005ull +#define ID_AHA_3960D_CPQ 0x00C09005F6200E11ull + +#define ID_AIC7810 0x1078900400000000ull +#define ID_AIC7815 0x7815900400000000ull + +#endif /* _AIC7XXX_PCI_H_ */ diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c new file mode 100644 index 000000000..4bc9e2dfc --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * String handling code courtesy of Gerard Roudier's + * sym driver. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_proc.c#29 $ + */ +#include "aic7xxx_osm.h" +#include "aic7xxx_inline.h" +#include "aic7xxx_93cx6.h" + +static void ahc_dump_target_state(struct ahc_softc *ahc, + struct seq_file *m, + u_int our_id, char channel, + u_int target_id, u_int target_offset); +static void ahc_dump_device_state(struct seq_file *m, + struct scsi_device *dev); + +/* + * Table of syncrates that don't follow the "divisible by 4" + * rule. This table will be expanded in future SCSI specs. + */ +static const struct { + u_int period_factor; + u_int period; /* in 100ths of ns */ +} scsi_syncrates[] = { + { 0x08, 625 }, /* FAST-160 */ + { 0x09, 1250 }, /* FAST-80 */ + { 0x0a, 2500 }, /* FAST-40 40MHz */ + { 0x0b, 3030 }, /* FAST-40 33MHz */ + { 0x0c, 5000 } /* FAST-20 */ +}; + +/* + * Return the frequency in kHz corresponding to the given + * sync period factor. + */ +static u_int +ahc_calc_syncsrate(u_int period_factor) +{ + int i; + + /* See if the period is in the "exception" table */ + for (i = 0; i < ARRAY_SIZE(scsi_syncrates); i++) { + + if (period_factor == scsi_syncrates[i].period_factor) { + /* Period in kHz */ + return (100000000 / scsi_syncrates[i].period); + } + } + + /* + * Wasn't in the table, so use the standard + * 4 times conversion. + */ + return (10000000 / (period_factor * 4 * 10)); +} + +static void +ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo) +{ + u_int speed; + u_int freq; + u_int mb; + + speed = 3300; + freq = 0; + if (tinfo->offset != 0) { + freq = ahc_calc_syncsrate(tinfo->period); + speed = freq; + } + speed *= (0x01 << tinfo->width); + mb = speed / 1000; + if (mb > 0) + seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000); + else + seq_printf(m, "%dKB/s transfers", speed); + + if (freq != 0) { + seq_printf(m, " (%d.%03dMHz%s, offset %d", + freq / 1000, freq % 1000, + (tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 + ? " DT" : "", tinfo->offset); + } + + if (tinfo->width > 0) { + if (freq != 0) { + seq_puts(m, ", "); + } else { + seq_puts(m, " ("); + } + seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); + } else if (freq != 0) { + seq_putc(m, ')'); + } + seq_putc(m, '\n'); +} + +static void +ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m, + u_int our_id, char channel, u_int target_id, + u_int target_offset) +{ + struct scsi_target *starget; + struct ahc_initiator_tinfo *tinfo; + struct ahc_tmode_tstate *tstate; + int lun; + + tinfo = ahc_fetch_transinfo(ahc, channel, our_id, + target_id, &tstate); + if ((ahc->features & AHC_TWIN) != 0) + seq_printf(m, "Channel %c ", channel); + seq_printf(m, "Target %d Negotiation Settings\n", target_id); + seq_puts(m, "\tUser: "); + ahc_format_transinfo(m, &tinfo->user); + starget = ahc->platform_data->starget[target_offset]; + if (!starget) + return; + + seq_puts(m, "\tGoal: "); + ahc_format_transinfo(m, &tinfo->goal); + seq_puts(m, "\tCurr: "); + ahc_format_transinfo(m, &tinfo->curr); + + for (lun = 0; lun < AHC_NUM_LUNS; lun++) { + struct scsi_device *sdev; + + sdev = scsi_device_lookup_by_target(starget, lun); + + if (sdev == NULL) + continue; + + ahc_dump_device_state(m, sdev); + } +} + +static void +ahc_dump_device_state(struct seq_file *m, struct scsi_device *sdev) +{ + struct ahc_linux_device *dev = scsi_transport_device_data(sdev); + + seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", + sdev->sdev_target->channel + 'A', + sdev->sdev_target->id, (u8)sdev->lun); + + seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); + seq_printf(m, "\t\tCommands Active %d\n", dev->active); + seq_printf(m, "\t\tCommand Openings %d\n", dev->openings); + seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags); + seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); +} + +int +ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length) +{ + struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; + struct seeprom_descriptor sd; + int have_seeprom; + u_long s; + int paused; + int written; + + /* Default to failure. */ + written = -EINVAL; + ahc_lock(ahc, &s); + paused = ahc_is_paused(ahc); + if (!paused) + ahc_pause(ahc); + + if (length != sizeof(struct seeprom_config)) { + printk("ahc_proc_write_seeprom: incorrect buffer size\n"); + goto done; + } + + have_seeprom = ahc_verify_cksum((struct seeprom_config*)buffer); + if (have_seeprom == 0) { + printk("ahc_proc_write_seeprom: cksum verification failed\n"); + goto done; + } + + sd.sd_ahc = ahc; +#if AHC_PCI_CONFIG > 0 + if ((ahc->chip & AHC_PCI) != 0) { + sd.sd_control_offset = SEECTL; + sd.sd_status_offset = SEECTL; + sd.sd_dataout_offset = SEECTL; + if (ahc->flags & AHC_LARGE_SEEPROM) + sd.sd_chip = C56_66; + else + sd.sd_chip = C46; + sd.sd_MS = SEEMS; + sd.sd_RDY = SEERDY; + sd.sd_CS = SEECS; + sd.sd_CK = SEECK; + sd.sd_DO = SEEDO; + sd.sd_DI = SEEDI; + have_seeprom = ahc_acquire_seeprom(ahc, &sd); + } else +#endif + if ((ahc->chip & AHC_VL) != 0) { + sd.sd_control_offset = SEECTL_2840; + sd.sd_status_offset = STATUS_2840; + sd.sd_dataout_offset = STATUS_2840; + sd.sd_chip = C46; + sd.sd_MS = 0; + sd.sd_RDY = EEPROM_TF; + sd.sd_CS = CS_2840; + sd.sd_CK = CK_2840; + sd.sd_DO = DO_2840; + sd.sd_DI = DI_2840; + have_seeprom = TRUE; + } else { + printk("ahc_proc_write_seeprom: unsupported adapter type\n"); + goto done; + } + + if (!have_seeprom) { + printk("ahc_proc_write_seeprom: No Serial EEPROM\n"); + goto done; + } else { + u_int start_addr; + + if (ahc->seep_config == NULL) { + ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), + GFP_ATOMIC); + if (ahc->seep_config == NULL) { + printk("aic7xxx: Unable to allocate serial " + "eeprom buffer. Write failing\n"); + goto done; + } + } + printk("aic7xxx: Writing Serial EEPROM\n"); + start_addr = 32 * (ahc->channel - 'A'); + ahc_write_seeprom(&sd, (u_int16_t *)buffer, start_addr, + sizeof(struct seeprom_config)/2); + ahc_read_seeprom(&sd, (uint16_t *)ahc->seep_config, + start_addr, sizeof(struct seeprom_config)/2); +#if AHC_PCI_CONFIG > 0 + if ((ahc->chip & AHC_VL) == 0) + ahc_release_seeprom(&sd); +#endif + written = length; + } + +done: + if (!paused) + ahc_unpause(ahc); + ahc_unlock(ahc, &s); + return (written); +} + +/* + * Return information to handle /proc support for the driver. + */ +int +ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) +{ + struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; + char ahc_info[256]; + u_int max_targ; + u_int i; + + seq_printf(m, "Adaptec AIC7xxx driver version: %s\n", + AIC7XXX_DRIVER_VERSION); + seq_printf(m, "%s\n", ahc->description); + ahc_controller_info(ahc, ahc_info); + seq_printf(m, "%s\n", ahc_info); + seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n", + ahc->scb_data->numscbs, AHC_NSEG); + + + if (ahc->seep_config == NULL) + seq_puts(m, "No Serial EEPROM\n"); + else { + seq_puts(m, "Serial EEPROM:\n"); + for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) { + if (((i % 8) == 0) && (i != 0)) { + seq_putc(m, '\n'); + } + seq_printf(m, "0x%.4x ", + ((uint16_t*)ahc->seep_config)[i]); + } + seq_putc(m, '\n'); + } + seq_putc(m, '\n'); + + max_targ = 16; + if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) + max_targ = 8; + + for (i = 0; i < max_targ; i++) { + u_int our_id; + u_int target_id; + char channel; + + channel = 'A'; + our_id = ahc->our_id; + target_id = i; + if (i > 7 && (ahc->features & AHC_TWIN) != 0) { + channel = 'B'; + our_id = ahc->our_id_b; + target_id = i % 8; + } + + ahc_dump_target_state(ahc, m, our_id, + channel, target_id, i); + } + return 0; +} diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped new file mode 100644 index 000000000..473039df0 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped @@ -0,0 +1,912 @@ +/* + * DO NOT EDIT - This file is automatically generated + * from the following source files: + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $ + */ +typedef int (ahc_reg_print_t)(u_int, u_int *, u_int); +typedef struct ahc_reg_parse_entry { + char *name; + uint8_t value; + uint8_t mask; +} ahc_reg_parse_entry_t; + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scsiseq_print; +#else +#define ahc_scsiseq_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCSISEQ", 0x00, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_sxfrctl0_print; +#else +#define ahc_sxfrctl0_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SXFRCTL0", 0x01, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scsisigi_print; +#else +#define ahc_scsisigi_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCSISIGI", 0x03, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scsirate_print; +#else +#define ahc_scsirate_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCSIRATE", 0x04, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_sstat0_print; +#else +#define ahc_sstat0_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SSTAT0", 0x0b, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_sstat1_print; +#else +#define ahc_sstat1_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SSTAT1", 0x0c, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_sstat2_print; +#else +#define ahc_sstat2_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SSTAT2", 0x0d, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_sstat3_print; +#else +#define ahc_sstat3_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SSTAT3", 0x0e, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_simode0_print; +#else +#define ahc_simode0_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SIMODE0", 0x10, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_simode1_print; +#else +#define ahc_simode1_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SIMODE1", 0x11, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scsibusl_print; +#else +#define ahc_scsibusl_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCSIBUSL", 0x12, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_sblkctl_print; +#else +#define ahc_sblkctl_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SBLKCTL", 0x1f, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_seq_flags_print; +#else +#define ahc_seq_flags_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SEQ_FLAGS", 0x3c, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_lastphase_print; +#else +#define ahc_lastphase_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "LASTPHASE", 0x3f, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_seqctl_print; +#else +#define ahc_seqctl_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SEQCTL", 0x60, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_sram_base_print; +#else +#define ahc_sram_base_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SRAM_BASE", 0x70, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_error_print; +#else +#define ahc_error_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "ERROR", 0x92, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_dfcntrl_print; +#else +#define ahc_dfcntrl_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "DFCNTRL", 0x93, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_dfstatus_print; +#else +#define ahc_dfstatus_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "DFSTATUS", 0x94, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scsiphase_print; +#else +#define ahc_scsiphase_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCSIPHASE", 0x9e, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scb_base_print; +#else +#define ahc_scb_base_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCB_BASE", 0xa0, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scb_control_print; +#else +#define ahc_scb_control_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCB_CONTROL", 0xb8, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scb_scsiid_print; +#else +#define ahc_scb_scsiid_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCB_SCSIID", 0xb9, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scb_lun_print; +#else +#define ahc_scb_lun_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCB_LUN", 0xba, regvalue, cur_col, wrap) +#endif + +#if AIC_DEBUG_REGISTERS +ahc_reg_print_t ahc_scb_tag_print; +#else +#define ahc_scb_tag_print(regvalue, cur_col, wrap) \ + ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap) +#endif + + +#define SCSISEQ 0x00 +#define TEMODE 0x80 +#define SCSIRSTO 0x01 + +#define SXFRCTL0 0x01 +#define DFON 0x80 +#define DFPEXP 0x40 +#define FAST20 0x20 +#define CLRSTCNT 0x10 +#define SPIOEN 0x08 +#define SCAMEN 0x04 +#define CLRCHN 0x02 + +#define SXFRCTL1 0x02 +#define STIMESEL 0x18 +#define BITBUCKET 0x80 +#define SWRAPEN 0x40 +#define ENSTIMER 0x04 +#define ACTNEGEN 0x02 +#define STPWEN 0x01 + +#define SCSISIGO 0x03 +#define CDO 0x80 +#define IOO 0x40 +#define MSGO 0x20 +#define ATNO 0x10 +#define SELO 0x08 +#define BSYO 0x04 +#define REQO 0x02 +#define ACKO 0x01 + +#define SCSISIGI 0x03 +#define P_DATAIN_DT 0x60 +#define P_DATAOUT_DT 0x20 +#define ATNI 0x10 +#define SELI 0x08 +#define BSYI 0x04 +#define REQI 0x02 +#define ACKI 0x01 + +#define SCSIRATE 0x04 +#define SXFR 0x70 +#define SOFS 0x0f +#define SXFR_ULTRA2 0x0f +#define WIDEXFER 0x80 +#define ENABLE_CRC 0x40 +#define SINGLE_EDGE 0x10 + +#define SCSIID 0x05 +#define SCSIOFFSET 0x05 +#define SOFS_ULTRA2 0x7f + +#define SCSIDATL 0x06 + +#define SCSIDATH 0x07 + +#define OPTIONMODE 0x08 +#define OPTIONMODE_DEFAULTS 0x03 +#define AUTORATEEN 0x80 +#define AUTOACKEN 0x40 +#define ATNMGMNTEN 0x20 +#define BUSFREEREV 0x10 +#define EXPPHASEDIS 0x08 +#define SCSIDATL_IMGEN 0x04 +#define AUTO_MSGOUT_DE 0x02 +#define DIS_MSGIN_DUALEDGE 0x01 + +#define STCNT 0x08 + +#define TARGCRCCNT 0x0a + +#define CLRSINT0 0x0b +#define CLRSELDO 0x40 +#define CLRSELDI 0x20 +#define CLRSELINGO 0x10 +#define CLRIOERR 0x08 +#define CLRSWRAP 0x08 +#define CLRSPIORDY 0x02 + +#define SSTAT0 0x0b +#define TARGET 0x80 +#define SELDO 0x40 +#define SELDI 0x20 +#define SELINGO 0x10 +#define SWRAP 0x08 +#define IOERR 0x08 +#define SDONE 0x04 +#define SPIORDY 0x02 +#define DMADONE 0x01 + +#define CLRSINT1 0x0c +#define CLRSELTIMEO 0x80 +#define CLRATNO 0x40 +#define CLRSCSIRSTI 0x20 +#define CLRBUSFREE 0x08 +#define CLRSCSIPERR 0x04 +#define CLRPHASECHG 0x02 +#define CLRREQINIT 0x01 + +#define SSTAT1 0x0c +#define SELTO 0x80 +#define ATNTARG 0x40 +#define SCSIRSTI 0x20 +#define PHASEMIS 0x10 +#define BUSFREE 0x08 +#define SCSIPERR 0x04 +#define PHASECHG 0x02 +#define REQINIT 0x01 + +#define SSTAT2 0x0d +#define SFCNT 0x1f +#define OVERRUN 0x80 +#define SHVALID 0x40 +#define EXP_ACTIVE 0x10 +#define CRCVALERR 0x08 +#define CRCENDERR 0x04 +#define CRCREQERR 0x02 +#define DUAL_EDGE_ERR 0x01 + +#define SSTAT3 0x0e +#define SCSICNT 0xf0 +#define U2OFFCNT 0x7f +#define OFFCNT 0x0f + +#define SCSIID_ULTRA2 0x0f + +#define SIMODE0 0x10 +#define ENSELDO 0x40 +#define ENSELDI 0x20 +#define ENSELINGO 0x10 +#define ENIOERR 0x08 +#define ENSWRAP 0x08 +#define ENSDONE 0x04 +#define ENSPIORDY 0x02 +#define ENDMADONE 0x01 + +#define SIMODE1 0x11 +#define ENSELTIMO 0x80 +#define ENATNTARG 0x40 +#define ENSCSIRST 0x20 +#define ENPHASEMIS 0x10 +#define ENBUSFREE 0x08 +#define ENSCSIPERR 0x04 +#define ENPHASECHG 0x02 +#define ENREQINIT 0x01 + +#define SCSIBUSL 0x12 + +#define SCSIBUSH 0x13 + +#define SXFRCTL2 0x13 +#define ASYNC_SETUP 0x07 +#define AUTORSTDIS 0x10 +#define CMDDMAEN 0x08 + +#define SHADDR 0x14 + +#define SELTIMER 0x18 +#define TARGIDIN 0x18 +#define STAGE6 0x20 +#define STAGE5 0x10 +#define STAGE4 0x08 +#define STAGE3 0x04 +#define STAGE2 0x02 +#define STAGE1 0x01 + +#define SELID 0x19 +#define SELID_MASK 0xf0 +#define ONEBIT 0x08 + +#define SCAMCTL 0x1a +#define SCAMLVL 0x03 +#define ENSCAMSELO 0x80 +#define CLRSCAMSELID 0x40 +#define ALTSTIM 0x20 +#define DFLTTID 0x10 + +#define SPIOCAP 0x1b +#define SOFT1 0x80 +#define SOFT0 0x40 +#define SOFTCMDEN 0x20 +#define EXT_BRDCTL 0x10 +#define SEEPROM 0x08 +#define EEPROM 0x04 +#define ROM 0x02 +#define SSPIOCPS 0x01 + +#define TARGID 0x1b + +#define BRDCTL 0x1d +#define BRDDAT7 0x80 +#define BRDDAT6 0x40 +#define BRDDAT5 0x20 +#define BRDSTB 0x10 +#define BRDDAT4 0x10 +#define BRDDAT3 0x08 +#define BRDCS 0x08 +#define BRDDAT2 0x04 +#define BRDRW 0x04 +#define BRDRW_ULTRA2 0x02 +#define BRDCTL1 0x02 +#define BRDCTL0 0x01 +#define BRDSTB_ULTRA2 0x01 + +#define SEECTL 0x1e +#define EXTARBACK 0x80 +#define EXTARBREQ 0x40 +#define SEEMS 0x20 +#define SEERDY 0x10 +#define SEECS 0x08 +#define SEECK 0x04 +#define SEEDO 0x02 +#define SEEDI 0x01 + +#define SBLKCTL 0x1f +#define DIAGLEDEN 0x80 +#define DIAGLEDON 0x40 +#define AUTOFLUSHDIS 0x20 +#define SELBUSB 0x08 +#define ENAB40 0x08 +#define ENAB20 0x04 +#define SELWIDE 0x02 +#define XCVR 0x01 + +#define BUSY_TARGETS 0x20 +#define TARG_SCSIRATE 0x20 + +#define ULTRA_ENB 0x30 +#define CMDSIZE_TABLE 0x30 + +#define DISC_DSB 0x32 + +#define CMDSIZE_TABLE_TAIL 0x34 + +#define MWI_RESIDUAL 0x38 + +#define NEXT_QUEUED_SCB 0x39 + +#define MSG_OUT 0x3a + +#define DMAPARAMS 0x3b +#define PRELOADEN 0x80 +#define WIDEODD 0x40 +#define SCSIEN 0x20 +#define SDMAEN 0x10 +#define SDMAENACK 0x10 +#define HDMAEN 0x08 +#define HDMAENACK 0x08 +#define DIRECTION 0x04 +#define FIFOFLUSH 0x02 +#define FIFORESET 0x01 + +#define SEQ_FLAGS 0x3c +#define NOT_IDENTIFIED 0x80 +#define NO_CDB_SENT 0x40 +#define TARGET_CMD_IS_TAGGED 0x40 +#define DPHASE 0x20 +#define TARG_CMD_PENDING 0x10 +#define CMDPHASE_PENDING 0x08 +#define DPHASE_PENDING 0x04 +#define SPHASE_PENDING 0x02 +#define NO_DISCONNECT 0x01 + +#define SAVED_SCSIID 0x3d + +#define SAVED_LUN 0x3e + +#define LASTPHASE 0x3f +#define P_MESGIN 0xe0 +#define PHASE_MASK 0xe0 +#define P_STATUS 0xc0 +#define P_MESGOUT 0xa0 +#define P_COMMAND 0x80 +#define P_DATAIN 0x40 +#define P_BUSFREE 0x01 +#define P_DATAOUT 0x00 +#define CDI 0x80 +#define IOI 0x40 +#define MSGI 0x20 + +#define WAITING_SCBH 0x40 + +#define DISCONNECTED_SCBH 0x41 + +#define FREE_SCBH 0x42 + +#define COMPLETE_SCBH 0x43 + +#define HSCB_ADDR 0x44 + +#define SHARED_DATA_ADDR 0x48 + +#define KERNEL_QINPOS 0x4c + +#define QINPOS 0x4d + +#define QOUTPOS 0x4e + +#define KERNEL_TQINPOS 0x4f + +#define TQINPOS 0x50 + +#define ARG_1 0x51 +#define RETURN_1 0x51 +#define SEND_MSG 0x80 +#define SEND_SENSE 0x40 +#define SEND_REJ 0x20 +#define MSGOUT_PHASEMIS 0x10 +#define EXIT_MSG_LOOP 0x08 +#define CONT_MSG_LOOP 0x04 +#define CONT_TARG_SESSION 0x02 + +#define ARG_2 0x52 +#define RETURN_2 0x52 + +#define LAST_MSG 0x53 +#define TARG_IMMEDIATE_SCB 0x53 + +#define SCSISEQ_TEMPLATE 0x54 +#define ENSELO 0x40 +#define ENSELI 0x20 +#define ENRSELI 0x10 +#define ENAUTOATNO 0x08 +#define ENAUTOATNI 0x04 +#define ENAUTOATNP 0x02 + +#define HA_274_BIOSGLOBAL 0x56 +#define INITIATOR_TAG 0x56 +#define HA_274_EXTENDED_TRANS 0x01 + +#define SEQ_FLAGS2 0x57 +#define TARGET_MSG_PENDING 0x02 +#define SCB_DMA 0x01 + +#define SCSICONF 0x5a +#define HWSCSIID 0x0f +#define HSCSIID 0x07 +#define TERM_ENB 0x80 +#define RESET_SCSI 0x40 +#define ENSPCHK 0x20 + +#define INTDEF 0x5c +#define VECTOR 0x0f +#define EDGE_TRIG 0x80 + +#define HOSTCONF 0x5d + +#define HA_274_BIOSCTRL 0x5f +#define BIOSDISABLED 0x30 +#define BIOSMODE 0x30 +#define CHANNEL_B_PRIMARY 0x08 + +#define SEQCTL 0x60 +#define PERRORDIS 0x80 +#define PAUSEDIS 0x40 +#define FAILDIS 0x20 +#define FASTMODE 0x10 +#define BRKADRINTEN 0x08 +#define STEP 0x04 +#define SEQRESET 0x02 +#define LOADRAM 0x01 + +#define SEQRAM 0x61 + +#define SEQADDR0 0x62 + +#define SEQADDR1 0x63 +#define SEQADDR1_MASK 0x01 + +#define ACCUM 0x64 + +#define SINDEX 0x65 + +#define DINDEX 0x66 + +#define ALLONES 0x69 + +#define ALLZEROS 0x6a + +#define NONE 0x6a + +#define FLAGS 0x6b +#define ZERO 0x02 +#define CARRY 0x01 + +#define SINDIR 0x6c + +#define DINDIR 0x6d + +#define FUNCTION1 0x6e + +#define STACK 0x6f + +#define TARG_OFFSET 0x70 + +#define SRAM_BASE 0x70 + +#define BCTL 0x84 +#define ACE 0x08 +#define ENABLE 0x01 + +#define DSCOMMAND0 0x84 +#define CACHETHEN 0x80 +#define DPARCKEN 0x40 +#define MPARCKEN 0x20 +#define EXTREQLCK 0x10 +#define INTSCBRAMSEL 0x08 +#define RAMPS 0x04 +#define USCBSIZE32 0x02 +#define CIOPARCKEN 0x01 + +#define BUSTIME 0x85 +#define BOFF 0xf0 +#define BON 0x0f + +#define DSCOMMAND1 0x85 +#define DSLATT 0xfc +#define HADDLDSEL1 0x02 +#define HADDLDSEL0 0x01 + +#define BUSSPD 0x86 +#define DFTHRSH 0xc0 +#define DFTHRSH_75 0x80 +#define STBOFF 0x38 +#define STBON 0x07 + +#define HS_MAILBOX 0x86 +#define HOST_MAILBOX 0xf0 +#define HOST_TQINPOS 0x80 +#define SEQ_MAILBOX 0x0f + +#define DSPCISTATUS 0x86 +#define DFTHRSH_100 0xc0 + +#define HCNTRL 0x87 +#define POWRDN 0x40 +#define SWINT 0x10 +#define IRQMS 0x08 +#define PAUSE 0x04 +#define INTEN 0x02 +#define CHIPRST 0x01 +#define CHIPRSTACK 0x01 + +#define HADDR 0x88 + +#define HCNT 0x8c + +#define SCBPTR 0x90 + +#define INTSTAT 0x91 +#define SEQINT_MASK 0xf1 +#define OUT_OF_RANGE 0xe1 +#define NO_FREE_SCB 0xd1 +#define SCB_MISMATCH 0xc1 +#define MISSED_BUSFREE 0xb1 +#define MKMSG_FAILED 0xa1 +#define DATA_OVERRUN 0x91 +#define PERR_DETECTED 0x81 +#define BAD_STATUS 0x71 +#define HOST_MSG_LOOP 0x61 +#define PDATA_REINIT 0x51 +#define IGN_WIDE_RES 0x41 +#define NO_MATCH 0x31 +#define PROTO_VIOLATION 0x21 +#define SEND_REJECT 0x11 +#define INT_PEND 0x0f +#define BAD_PHASE 0x01 +#define BRKADRINT 0x08 +#define SCSIINT 0x04 +#define CMDCMPLT 0x02 +#define SEQINT 0x01 + +#define CLRINT 0x92 +#define CLRPARERR 0x10 +#define CLRBRKADRINT 0x08 +#define CLRSCSIINT 0x04 +#define CLRCMDINT 0x02 +#define CLRSEQINT 0x01 + +#define ERROR 0x92 +#define CIOPARERR 0x80 +#define PCIERRSTAT 0x40 +#define MPARERR 0x20 +#define DPARERR 0x10 +#define SQPARERR 0x08 +#define ILLOPCODE 0x04 +#define ILLSADDR 0x02 +#define ILLHADDR 0x01 + +#define DFCNTRL 0x93 + +#define DFSTATUS 0x94 +#define PRELOAD_AVAIL 0x80 +#define DFCACHETH 0x40 +#define FIFOQWDEMP 0x20 +#define MREQPEND 0x10 +#define HDONE 0x08 +#define DFTHRESH 0x04 +#define FIFOFULL 0x02 +#define FIFOEMP 0x01 + +#define DFWADDR 0x95 + +#define DFRADDR 0x97 + +#define DFDAT 0x99 + +#define SCBCNT 0x9a +#define SCBCNT_MASK 0x1f +#define SCBAUTO 0x80 + +#define QINFIFO 0x9b + +#define QINCNT 0x9c + +#define QOUTFIFO 0x9d + +#define CRCCONTROL1 0x9d +#define CRCONSEEN 0x80 +#define CRCVALCHKEN 0x40 +#define CRCENDCHKEN 0x20 +#define CRCREQCHKEN 0x10 +#define TARGCRCENDEN 0x08 +#define TARGCRCCNTEN 0x04 + +#define QOUTCNT 0x9e + +#define SCSIPHASE 0x9e +#define DATA_PHASE_MASK 0x03 +#define STATUS_PHASE 0x20 +#define COMMAND_PHASE 0x10 +#define MSG_IN_PHASE 0x08 +#define MSG_OUT_PHASE 0x04 +#define DATA_IN_PHASE 0x02 +#define DATA_OUT_PHASE 0x01 + +#define SFUNCT 0x9f +#define ALT_MODE 0x80 + +#define SCB_BASE 0xa0 + +#define SCB_CDB_PTR 0xa0 +#define SCB_CDB_STORE 0xa0 +#define SCB_RESIDUAL_DATACNT 0xa0 + +#define SCB_RESIDUAL_SGPTR 0xa4 + +#define SCB_SCSI_STATUS 0xa8 + +#define SCB_TARGET_PHASES 0xa9 + +#define SCB_TARGET_DATA_DIR 0xaa + +#define SCB_TARGET_ITAG 0xab + +#define SCB_DATAPTR 0xac + +#define SCB_DATACNT 0xb0 +#define SG_HIGH_ADDR_BITS 0x7f +#define SG_LAST_SEG 0x80 + +#define SCB_SGPTR 0xb4 +#define SG_RESID_VALID 0x04 +#define SG_FULL_RESID 0x02 +#define SG_LIST_NULL 0x01 + +#define SCB_CONTROL 0xb8 +#define SCB_TAG_TYPE 0x03 +#define TARGET_SCB 0x80 +#define STATUS_RCVD 0x80 +#define DISCENB 0x40 +#define TAG_ENB 0x20 +#define MK_MESSAGE 0x10 +#define ULTRAENB 0x08 +#define DISCONNECTED 0x04 + +#define SCB_SCSIID 0xb9 +#define TID 0xf0 +#define TWIN_TID 0x70 +#define OID 0x0f +#define TWIN_CHNLB 0x80 + +#define SCB_LUN 0xba +#define LID 0x3f +#define SCB_XFERLEN_ODD 0x80 + +#define SCB_TAG 0xbb + +#define SCB_CDB_LEN 0xbc + +#define SCB_SCSIRATE 0xbd + +#define SCB_SCSIOFFSET 0xbe + +#define SCB_NEXT 0xbf + +#define SCB_64_SPARE 0xc0 + +#define SEECTL_2840 0xc0 +#define CS_2840 0x04 +#define CK_2840 0x02 +#define DO_2840 0x01 + +#define STATUS_2840 0xc1 +#define BIOS_SEL 0x60 +#define ADSEL 0x1e +#define EEPROM_TF 0x80 +#define DI_2840 0x01 + +#define SCB_64_BTT 0xd0 + +#define CCHADDR 0xe0 + +#define CCHCNT 0xe8 + +#define CCSGRAM 0xe9 + +#define CCSGADDR 0xea + +#define CCSGCTL 0xeb +#define CCSGDONE 0x80 +#define CCSGEN 0x08 +#define SG_FETCH_NEEDED 0x02 +#define CCSGRESET 0x01 + +#define CCSCBRAM 0xec + +#define CCSCBADDR 0xed + +#define CCSCBCTL 0xee +#define CCSCBDONE 0x80 +#define ARRDONE 0x40 +#define CCARREN 0x10 +#define CCSCBEN 0x08 +#define CCSCBDIR 0x04 +#define CCSCBRESET 0x01 + +#define CCSCBCNT 0xef + +#define SCBBADDR 0xf0 + +#define CCSCBPTR 0xf1 + +#define HNSCB_QOFF 0xf4 + +#define SNSCB_QOFF 0xf6 + +#define SDSCB_QOFF 0xf8 + +#define QOFF_CTLSTA 0xfa +#define SCB_QSIZE 0x07 +#define SCB_QSIZE_256 0x06 +#define SCB_AVAIL 0x40 +#define SNSCB_ROLLOVER 0x20 +#define SDSCB_ROLLOVER 0x10 + +#define DFF_THRSH 0xfb +#define WR_DFTHRSH 0x70 +#define WR_DFTHRSH_MAX 0x70 +#define WR_DFTHRSH_90 0x60 +#define WR_DFTHRSH_85 0x50 +#define WR_DFTHRSH_75 0x40 +#define WR_DFTHRSH_63 0x30 +#define WR_DFTHRSH_50 0x20 +#define WR_DFTHRSH_25 0x10 +#define RD_DFTHRSH 0x07 +#define RD_DFTHRSH_MAX 0x07 +#define RD_DFTHRSH_90 0x06 +#define RD_DFTHRSH_85 0x05 +#define RD_DFTHRSH_75 0x04 +#define RD_DFTHRSH_63 0x03 +#define RD_DFTHRSH_50 0x02 +#define RD_DFTHRSH_25 0x01 +#define RD_DFTHRSH_MIN 0x00 +#define WR_DFTHRSH_MIN 0x00 + +#define SG_CACHE_SHADOW 0xfc +#define SG_ADDR_MASK 0xf8 +#define LAST_SEG 0x02 +#define LAST_SEG_DONE 0x01 + +#define SG_CACHE_PRE 0xfc + + +#define TARGET_CMD_CMPLT 0xfe +#define MAX_OFFSET_ULTRA2 0x7f +#define MAX_OFFSET_16BIT 0x08 +#define BUS_8_BIT 0x00 +#define TID_SHIFT 0x04 +#define STATUS_QUEUE_FULL 0x28 +#define STATUS_BUSY 0x08 +#define SCB_DOWNLOAD_SIZE_64 0x30 +#define MAX_OFFSET_8BIT 0x0f +#define HOST_MAILBOX_SHIFT 0x04 +#define CCSGADDR_MAX 0x80 +#define BUS_32_BIT 0x02 +#define SG_SIZEOF 0x08 +#define SEQ_MAILBOX_SHIFT 0x00 +#define SCB_LIST_NULL 0xff +#define SCB_DOWNLOAD_SIZE 0x20 +#define CMD_GROUP_CODE_SHIFT 0x05 +#define CCSGRAM_MAXSEGS 0x10 +#define TARGET_DATA_IN 0x01 +#define STACK_SIZE 0x04 +#define SCB_UPLOAD_SIZE 0x20 +#define MAX_OFFSET 0x7f +#define HOST_MSG 0xff +#define BUS_16_BIT 0x01 + + +/* Downloaded Constant Definitions */ +#define INVERTED_CACHESIZE_MASK 0x03 +#define SG_PREFETCH_ALIGN_MASK 0x05 +#define SG_PREFETCH_ADDR_MASK 0x06 +#define QOUTFIFO_OFFSET 0x00 +#define SG_PREFETCH_CNT 0x04 +#define QINFIFO_OFFSET 0x01 +#define CACHESIZE_MASK 0x02 +#define DOWNLOAD_CONST_COUNT 0x07 + + +/* Exported Labels */ diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped new file mode 100644 index 000000000..9f9b88047 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped @@ -0,0 +1,413 @@ +/* + * DO NOT EDIT - This file is automatically generated + * from the following source files: + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $ + */ + +#include "aic7xxx_osm.h" + +static const ahc_reg_parse_entry_t SCSISEQ_parse_table[] = { + { "SCSIRSTO", 0x01, 0x01 }, + { "ENAUTOATNP", 0x02, 0x02 }, + { "ENAUTOATNI", 0x04, 0x04 }, + { "ENAUTOATNO", 0x08, 0x08 }, + { "ENRSELI", 0x10, 0x10 }, + { "ENSELI", 0x20, 0x20 }, + { "ENSELO", 0x40, 0x40 }, + { "TEMODE", 0x80, 0x80 } +}; + +int +ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SCSISEQ_parse_table, 8, "SCSISEQ", + 0x00, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = { + { "CLRCHN", 0x02, 0x02 }, + { "SCAMEN", 0x04, 0x04 }, + { "SPIOEN", 0x08, 0x08 }, + { "CLRSTCNT", 0x10, 0x10 }, + { "FAST20", 0x20, 0x20 }, + { "DFPEXP", 0x40, 0x40 }, + { "DFON", 0x80, 0x80 } +}; + +int +ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SXFRCTL0_parse_table, 7, "SXFRCTL0", + 0x01, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = { + { "ACKI", 0x01, 0x01 }, + { "REQI", 0x02, 0x02 }, + { "BSYI", 0x04, 0x04 }, + { "SELI", 0x08, 0x08 }, + { "ATNI", 0x10, 0x10 }, + { "MSGI", 0x20, 0x20 }, + { "IOI", 0x40, 0x40 }, + { "CDI", 0x80, 0x80 }, + { "P_DATAOUT", 0x00, 0x00 }, + { "P_DATAOUT_DT", 0x20, 0x20 }, + { "P_DATAIN", 0x40, 0x40 }, + { "P_DATAIN_DT", 0x60, 0x60 }, + { "P_COMMAND", 0x80, 0x80 }, + { "P_MESGOUT", 0xa0, 0xa0 }, + { "P_STATUS", 0xc0, 0xc0 }, + { "PHASE_MASK", 0xe0, 0xe0 }, + { "P_MESGIN", 0xe0, 0xe0 } +}; + +int +ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SCSISIGI_parse_table, 17, "SCSISIGI", + 0x03, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SCSIRATE_parse_table[] = { + { "SINGLE_EDGE", 0x10, 0x10 }, + { "ENABLE_CRC", 0x40, 0x40 }, + { "WIDEXFER", 0x80, 0x80 }, + { "SXFR_ULTRA2", 0x0f, 0x0f }, + { "SOFS", 0x0f, 0x0f }, + { "SXFR", 0x70, 0x70 } +}; + +int +ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SCSIRATE_parse_table, 6, "SCSIRATE", + 0x04, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = { + { "DMADONE", 0x01, 0x01 }, + { "SPIORDY", 0x02, 0x02 }, + { "SDONE", 0x04, 0x04 }, + { "SWRAP", 0x08, 0x08 }, + { "IOERR", 0x08, 0x08 }, + { "SELINGO", 0x10, 0x10 }, + { "SELDI", 0x20, 0x20 }, + { "SELDO", 0x40, 0x40 }, + { "TARGET", 0x80, 0x80 } +}; + +int +ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SSTAT0_parse_table, 9, "SSTAT0", + 0x0b, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = { + { "REQINIT", 0x01, 0x01 }, + { "PHASECHG", 0x02, 0x02 }, + { "SCSIPERR", 0x04, 0x04 }, + { "BUSFREE", 0x08, 0x08 }, + { "PHASEMIS", 0x10, 0x10 }, + { "SCSIRSTI", 0x20, 0x20 }, + { "ATNTARG", 0x40, 0x40 }, + { "SELTO", 0x80, 0x80 } +}; + +int +ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SSTAT1_parse_table, 8, "SSTAT1", + 0x0c, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SSTAT2_parse_table[] = { + { "DUAL_EDGE_ERR", 0x01, 0x01 }, + { "CRCREQERR", 0x02, 0x02 }, + { "CRCENDERR", 0x04, 0x04 }, + { "CRCVALERR", 0x08, 0x08 }, + { "EXP_ACTIVE", 0x10, 0x10 }, + { "SHVALID", 0x40, 0x40 }, + { "OVERRUN", 0x80, 0x80 }, + { "SFCNT", 0x1f, 0x1f } +}; + +int +ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SSTAT2_parse_table, 8, "SSTAT2", + 0x0d, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SSTAT3_parse_table[] = { + { "OFFCNT", 0x0f, 0x0f }, + { "U2OFFCNT", 0x7f, 0x7f }, + { "SCSICNT", 0xf0, 0xf0 } +}; + +int +ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SSTAT3_parse_table, 3, "SSTAT3", + 0x0e, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = { + { "ENDMADONE", 0x01, 0x01 }, + { "ENSPIORDY", 0x02, 0x02 }, + { "ENSDONE", 0x04, 0x04 }, + { "ENSWRAP", 0x08, 0x08 }, + { "ENIOERR", 0x08, 0x08 }, + { "ENSELINGO", 0x10, 0x10 }, + { "ENSELDI", 0x20, 0x20 }, + { "ENSELDO", 0x40, 0x40 } +}; + +int +ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SIMODE0_parse_table, 8, "SIMODE0", + 0x10, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SIMODE1_parse_table[] = { + { "ENREQINIT", 0x01, 0x01 }, + { "ENPHASECHG", 0x02, 0x02 }, + { "ENSCSIPERR", 0x04, 0x04 }, + { "ENBUSFREE", 0x08, 0x08 }, + { "ENPHASEMIS", 0x10, 0x10 }, + { "ENSCSIRST", 0x20, 0x20 }, + { "ENATNTARG", 0x40, 0x40 }, + { "ENSELTIMO", 0x80, 0x80 } +}; + +int +ahc_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SIMODE1_parse_table, 8, "SIMODE1", + 0x11, regvalue, cur_col, wrap)); +} + +int +ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(NULL, 0, "SCSIBUSL", + 0x12, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = { + { "XCVR", 0x01, 0x01 }, + { "SELWIDE", 0x02, 0x02 }, + { "ENAB20", 0x04, 0x04 }, + { "SELBUSB", 0x08, 0x08 }, + { "ENAB40", 0x08, 0x08 }, + { "AUTOFLUSHDIS", 0x20, 0x20 }, + { "DIAGLEDON", 0x40, 0x40 }, + { "DIAGLEDEN", 0x80, 0x80 } +}; + +int +ahc_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SBLKCTL_parse_table, 8, "SBLKCTL", + 0x1f, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { + { "NO_DISCONNECT", 0x01, 0x01 }, + { "SPHASE_PENDING", 0x02, 0x02 }, + { "DPHASE_PENDING", 0x04, 0x04 }, + { "CMDPHASE_PENDING", 0x08, 0x08 }, + { "TARG_CMD_PENDING", 0x10, 0x10 }, + { "DPHASE", 0x20, 0x20 }, + { "NO_CDB_SENT", 0x40, 0x40 }, + { "TARGET_CMD_IS_TAGGED",0x40, 0x40 }, + { "NOT_IDENTIFIED", 0x80, 0x80 } +}; + +int +ahc_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SEQ_FLAGS_parse_table, 9, "SEQ_FLAGS", + 0x3c, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = { + { "MSGI", 0x20, 0x20 }, + { "IOI", 0x40, 0x40 }, + { "CDI", 0x80, 0x80 }, + { "P_DATAOUT", 0x00, 0x00 }, + { "P_BUSFREE", 0x01, 0x01 }, + { "P_DATAIN", 0x40, 0x40 }, + { "P_COMMAND", 0x80, 0x80 }, + { "P_MESGOUT", 0xa0, 0xa0 }, + { "P_STATUS", 0xc0, 0xc0 }, + { "PHASE_MASK", 0xe0, 0xe0 }, + { "P_MESGIN", 0xe0, 0xe0 } +}; + +int +ahc_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(LASTPHASE_parse_table, 11, "LASTPHASE", + 0x3f, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = { + { "LOADRAM", 0x01, 0x01 }, + { "SEQRESET", 0x02, 0x02 }, + { "STEP", 0x04, 0x04 }, + { "BRKADRINTEN", 0x08, 0x08 }, + { "FASTMODE", 0x10, 0x10 }, + { "FAILDIS", 0x20, 0x20 }, + { "PAUSEDIS", 0x40, 0x40 }, + { "PERRORDIS", 0x80, 0x80 } +}; + +int +ahc_seqctl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SEQCTL_parse_table, 8, "SEQCTL", + 0x60, regvalue, cur_col, wrap)); +} + +int +ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(NULL, 0, "SRAM_BASE", + 0x70, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t ERROR_parse_table[] = { + { "ILLHADDR", 0x01, 0x01 }, + { "ILLSADDR", 0x02, 0x02 }, + { "ILLOPCODE", 0x04, 0x04 }, + { "SQPARERR", 0x08, 0x08 }, + { "DPARERR", 0x10, 0x10 }, + { "MPARERR", 0x20, 0x20 }, + { "PCIERRSTAT", 0x40, 0x40 }, + { "CIOPARERR", 0x80, 0x80 } +}; + +int +ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(ERROR_parse_table, 8, "ERROR", + 0x92, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t DFCNTRL_parse_table[] = { + { "FIFORESET", 0x01, 0x01 }, + { "FIFOFLUSH", 0x02, 0x02 }, + { "DIRECTION", 0x04, 0x04 }, + { "HDMAEN", 0x08, 0x08 }, + { "HDMAENACK", 0x08, 0x08 }, + { "SDMAEN", 0x10, 0x10 }, + { "SDMAENACK", 0x10, 0x10 }, + { "SCSIEN", 0x20, 0x20 }, + { "WIDEODD", 0x40, 0x40 }, + { "PRELOADEN", 0x80, 0x80 } +}; + +int +ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(DFCNTRL_parse_table, 10, "DFCNTRL", + 0x93, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t DFSTATUS_parse_table[] = { + { "FIFOEMP", 0x01, 0x01 }, + { "FIFOFULL", 0x02, 0x02 }, + { "DFTHRESH", 0x04, 0x04 }, + { "HDONE", 0x08, 0x08 }, + { "MREQPEND", 0x10, 0x10 }, + { "FIFOQWDEMP", 0x20, 0x20 }, + { "DFCACHETH", 0x40, 0x40 }, + { "PRELOAD_AVAIL", 0x80, 0x80 } +}; + +int +ahc_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(DFSTATUS_parse_table, 8, "DFSTATUS", + 0x94, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = { + { "DATA_OUT_PHASE", 0x01, 0x01 }, + { "DATA_IN_PHASE", 0x02, 0x02 }, + { "MSG_OUT_PHASE", 0x04, 0x04 }, + { "MSG_IN_PHASE", 0x08, 0x08 }, + { "COMMAND_PHASE", 0x10, 0x10 }, + { "STATUS_PHASE", 0x20, 0x20 }, + { "DATA_PHASE_MASK", 0x03, 0x03 } +}; + +int +ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SCSIPHASE_parse_table, 7, "SCSIPHASE", + 0x9e, regvalue, cur_col, wrap)); +} + +int +ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(NULL, 0, "SCB_BASE", + 0xa0, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = { + { "DISCONNECTED", 0x04, 0x04 }, + { "ULTRAENB", 0x08, 0x08 }, + { "MK_MESSAGE", 0x10, 0x10 }, + { "TAG_ENB", 0x20, 0x20 }, + { "DISCENB", 0x40, 0x40 }, + { "TARGET_SCB", 0x80, 0x80 }, + { "STATUS_RCVD", 0x80, 0x80 }, + { "SCB_TAG_TYPE", 0x03, 0x03 } +}; + +int +ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SCB_CONTROL_parse_table, 8, "SCB_CONTROL", + 0xb8, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = { + { "TWIN_CHNLB", 0x80, 0x80 }, + { "OID", 0x0f, 0x0f }, + { "TWIN_TID", 0x70, 0x70 }, + { "TID", 0xf0, 0xf0 } +}; + +int +ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SCB_SCSIID_parse_table, 4, "SCB_SCSIID", + 0xb9, regvalue, cur_col, wrap)); +} + +static const ahc_reg_parse_entry_t SCB_LUN_parse_table[] = { + { "SCB_XFERLEN_ODD", 0x80, 0x80 }, + { "LID", 0x3f, 0x3f } +}; + +int +ahc_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(SCB_LUN_parse_table, 2, "SCB_LUN", + 0xba, regvalue, cur_col, wrap)); +} + +int +ahc_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahc_print_register(NULL, 0, "SCB_TAG", + 0xbb, regvalue, cur_col, wrap)); +} + diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped new file mode 100644 index 000000000..f37362bc8 --- /dev/null +++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped @@ -0,0 +1,1307 @@ +/* + * DO NOT EDIT - This file is automatically generated + * from the following source files: + * + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $ + */ +static const uint8_t seqprog[] = { + 0xb2, 0x00, 0x00, 0x08, + 0xf7, 0x11, 0x22, 0x08, + 0x00, 0x65, 0xee, 0x59, + 0xf7, 0x01, 0x02, 0x08, + 0xff, 0x6a, 0x24, 0x08, + 0x40, 0x00, 0x40, 0x68, + 0x08, 0x1f, 0x3e, 0x10, + 0x40, 0x00, 0x40, 0x68, + 0xff, 0x40, 0x3c, 0x60, + 0x08, 0x1f, 0x3e, 0x10, + 0x60, 0x0b, 0x42, 0x68, + 0x40, 0xfa, 0x12, 0x78, + 0x01, 0x4d, 0xc8, 0x30, + 0x00, 0x4c, 0x12, 0x70, + 0x01, 0x39, 0xa2, 0x30, + 0x00, 0x6a, 0xc2, 0x5e, + 0x01, 0x51, 0x20, 0x31, + 0x01, 0x57, 0xae, 0x00, + 0x0d, 0x6a, 0x76, 0x00, + 0x00, 0x51, 0x14, 0x5e, + 0x01, 0x51, 0xc8, 0x30, + 0x00, 0x39, 0xc8, 0x60, + 0x00, 0xbb, 0x30, 0x70, + 0xc1, 0x6a, 0xda, 0x5e, + 0x01, 0xbf, 0x72, 0x30, + 0x01, 0x40, 0x7e, 0x31, + 0x01, 0x90, 0x80, 0x30, + 0x01, 0xf6, 0xd4, 0x30, + 0x01, 0x4d, 0x9a, 0x18, + 0xfe, 0x57, 0xae, 0x08, + 0x01, 0x40, 0x20, 0x31, + 0x00, 0x65, 0xcc, 0x58, + 0x60, 0x0b, 0x40, 0x78, + 0x08, 0x6a, 0x18, 0x00, + 0x08, 0x11, 0x22, 0x00, + 0x60, 0x0b, 0x00, 0x78, + 0x40, 0x0b, 0xfa, 0x68, + 0x80, 0x0b, 0xb6, 0x78, + 0x20, 0x6a, 0x16, 0x00, + 0xa4, 0x6a, 0x06, 0x00, + 0x08, 0x6a, 0x78, 0x00, + 0x01, 0x50, 0xc8, 0x30, + 0xe0, 0x6a, 0xcc, 0x00, + 0x48, 0x6a, 0xfe, 0x5d, + 0x01, 0x6a, 0xdc, 0x01, + 0x88, 0x6a, 0xcc, 0x00, + 0x48, 0x6a, 0xfe, 0x5d, + 0x01, 0x6a, 0x26, 0x01, + 0xf0, 0x19, 0x7a, 0x08, + 0x0f, 0x18, 0xc8, 0x08, + 0x0f, 0x0f, 0xc8, 0x08, + 0x0f, 0x05, 0xc8, 0x08, + 0x00, 0x3d, 0x7a, 0x00, + 0x08, 0x1f, 0x6e, 0x78, + 0x80, 0x3d, 0x7a, 0x00, + 0x01, 0x3d, 0xd8, 0x31, + 0x01, 0x3d, 0x32, 0x31, + 0x10, 0x03, 0x4e, 0x79, + 0x00, 0x65, 0xf2, 0x58, + 0x80, 0x66, 0xae, 0x78, + 0x01, 0x66, 0xd8, 0x31, + 0x01, 0x66, 0x32, 0x31, + 0x3f, 0x66, 0x7c, 0x08, + 0x40, 0x66, 0x82, 0x68, + 0x01, 0x3c, 0x78, 0x00, + 0x10, 0x03, 0x9e, 0x78, + 0x00, 0x65, 0xf2, 0x58, + 0xe0, 0x66, 0xc8, 0x18, + 0x00, 0x65, 0xaa, 0x50, + 0xdd, 0x66, 0xc8, 0x18, + 0x00, 0x65, 0xaa, 0x48, + 0x01, 0x66, 0xd8, 0x31, + 0x01, 0x66, 0x32, 0x31, + 0x10, 0x03, 0x4e, 0x79, + 0x00, 0x65, 0xf2, 0x58, + 0x01, 0x66, 0xd8, 0x31, + 0x01, 0x66, 0x32, 0x31, + 0x01, 0x66, 0xac, 0x30, + 0x40, 0x3c, 0x78, 0x00, + 0xff, 0x6a, 0xd8, 0x01, + 0xff, 0x6a, 0x32, 0x01, + 0x10, 0x3c, 0x78, 0x00, + 0x02, 0x57, 0x40, 0x69, + 0x10, 0x03, 0x3e, 0x69, + 0x00, 0x65, 0x20, 0x41, + 0x02, 0x57, 0xae, 0x00, + 0x00, 0x65, 0x9e, 0x40, + 0x61, 0x6a, 0xda, 0x5e, + 0x08, 0x51, 0x20, 0x71, + 0x02, 0x0b, 0xb2, 0x78, + 0x00, 0x65, 0xae, 0x40, + 0x1a, 0x01, 0x02, 0x00, + 0xf0, 0x19, 0x7a, 0x08, + 0x0f, 0x0f, 0xc8, 0x08, + 0x0f, 0x05, 0xc8, 0x08, + 0x00, 0x3d, 0x7a, 0x00, + 0x08, 0x1f, 0xc4, 0x78, + 0x80, 0x3d, 0x7a, 0x00, + 0x20, 0x6a, 0x16, 0x00, + 0x00, 0x65, 0xcc, 0x41, + 0x00, 0x65, 0xb4, 0x5e, + 0x00, 0x65, 0x12, 0x40, + 0x20, 0x11, 0xd2, 0x68, + 0x20, 0x6a, 0x18, 0x00, + 0x20, 0x11, 0x22, 0x00, + 0xf7, 0x1f, 0xca, 0x08, + 0x80, 0xb9, 0xd8, 0x78, + 0x08, 0x65, 0xca, 0x00, + 0x01, 0x65, 0x3e, 0x30, + 0x01, 0xb9, 0x1e, 0x30, + 0x7f, 0xb9, 0x0a, 0x08, + 0x01, 0xb9, 0x0a, 0x30, + 0x01, 0x54, 0xca, 0x30, + 0x80, 0xb8, 0xe6, 0x78, + 0x80, 0x65, 0xca, 0x00, + 0x01, 0x65, 0x00, 0x34, + 0x01, 0x54, 0x00, 0x34, + 0x08, 0xb8, 0xee, 0x78, + 0x20, 0x01, 0x02, 0x00, + 0x02, 0xbd, 0x08, 0x34, + 0x01, 0xbd, 0x08, 0x34, + 0x08, 0x01, 0x02, 0x00, + 0x02, 0x0b, 0xf4, 0x78, + 0xf7, 0x01, 0x02, 0x08, + 0x01, 0x06, 0xcc, 0x34, + 0xb2, 0x00, 0x00, 0x08, + 0x01, 0x40, 0x20, 0x31, + 0x01, 0xbf, 0x80, 0x30, + 0x01, 0xb9, 0x7a, 0x30, + 0x3f, 0xba, 0x7c, 0x08, + 0x00, 0x65, 0xea, 0x58, + 0x80, 0x0b, 0xc4, 0x79, + 0x12, 0x01, 0x02, 0x00, + 0x01, 0xab, 0xac, 0x30, + 0xe4, 0x6a, 0x70, 0x5d, + 0x40, 0x6a, 0x16, 0x00, + 0x80, 0x3e, 0x86, 0x5d, + 0x20, 0xb8, 0x18, 0x79, + 0x20, 0x6a, 0x86, 0x5d, + 0x00, 0xab, 0x86, 0x5d, + 0x01, 0xa9, 0x78, 0x30, + 0x10, 0xb8, 0x20, 0x79, + 0xe4, 0x6a, 0x70, 0x5d, + 0x00, 0x65, 0xae, 0x40, + 0x10, 0x03, 0x3c, 0x69, + 0x08, 0x3c, 0x5a, 0x69, + 0x04, 0x3c, 0x92, 0x69, + 0x02, 0x3c, 0x98, 0x69, + 0x01, 0x3c, 0x44, 0x79, + 0xff, 0x6a, 0xa6, 0x00, + 0x00, 0x65, 0xa4, 0x59, + 0x00, 0x6a, 0xc2, 0x5e, + 0xff, 0x53, 0x30, 0x71, + 0x0d, 0x6a, 0x76, 0x00, + 0x00, 0x53, 0x14, 0x5e, + 0x00, 0x65, 0xea, 0x58, + 0x12, 0x01, 0x02, 0x00, + 0x00, 0x65, 0x18, 0x41, + 0xa4, 0x6a, 0x06, 0x00, + 0x00, 0x65, 0xf2, 0x58, + 0xfd, 0x57, 0xae, 0x08, + 0x00, 0x65, 0xae, 0x40, + 0xe4, 0x6a, 0x70, 0x5d, + 0x20, 0x3c, 0x4a, 0x79, + 0x02, 0x6a, 0x86, 0x5d, + 0x04, 0x6a, 0x86, 0x5d, + 0x01, 0x03, 0x4c, 0x69, + 0xf7, 0x11, 0x22, 0x08, + 0xff, 0x6a, 0x24, 0x08, + 0xff, 0x6a, 0x06, 0x08, + 0x01, 0x6a, 0x7e, 0x00, + 0x00, 0x65, 0xa4, 0x59, + 0x00, 0x65, 0x04, 0x40, + 0x80, 0x86, 0xc8, 0x08, + 0x01, 0x4f, 0xc8, 0x30, + 0x00, 0x50, 0x6c, 0x61, + 0xc4, 0x6a, 0x70, 0x5d, + 0x40, 0x3c, 0x68, 0x79, + 0x28, 0x6a, 0x86, 0x5d, + 0x00, 0x65, 0x4c, 0x41, + 0x08, 0x6a, 0x86, 0x5d, + 0x00, 0x65, 0x4c, 0x41, + 0x84, 0x6a, 0x70, 0x5d, + 0x00, 0x65, 0xf2, 0x58, + 0x01, 0x66, 0xc8, 0x30, + 0x01, 0x64, 0xd8, 0x31, + 0x01, 0x64, 0x32, 0x31, + 0x5b, 0x64, 0xc8, 0x28, + 0x30, 0x64, 0xca, 0x18, + 0x01, 0x6c, 0xc8, 0x30, + 0xff, 0x64, 0x8e, 0x79, + 0x08, 0x01, 0x02, 0x00, + 0x02, 0x0b, 0x80, 0x79, + 0x01, 0x64, 0x86, 0x61, + 0xf7, 0x01, 0x02, 0x08, + 0x01, 0x06, 0xd8, 0x31, + 0x01, 0x06, 0x32, 0x31, + 0xff, 0x64, 0xc8, 0x18, + 0xff, 0x64, 0x80, 0x69, + 0xf7, 0x3c, 0x78, 0x08, + 0x00, 0x65, 0x20, 0x41, + 0x40, 0xaa, 0x7e, 0x10, + 0x04, 0xaa, 0x70, 0x5d, + 0x00, 0x65, 0x58, 0x42, + 0xc4, 0x6a, 0x70, 0x5d, + 0xc0, 0x6a, 0x7e, 0x00, + 0x00, 0xa8, 0x86, 0x5d, + 0xe4, 0x6a, 0x06, 0x00, + 0x00, 0x6a, 0x86, 0x5d, + 0x00, 0x65, 0x4c, 0x41, + 0x10, 0x3c, 0xa8, 0x69, + 0x00, 0xbb, 0x8c, 0x44, + 0x18, 0x6a, 0xda, 0x01, + 0x01, 0x69, 0xd8, 0x31, + 0x1c, 0x6a, 0xd0, 0x01, + 0x09, 0xee, 0xdc, 0x01, + 0x80, 0xee, 0xb0, 0x79, + 0xff, 0x6a, 0xdc, 0x09, + 0x01, 0x93, 0x26, 0x01, + 0x03, 0x6a, 0x2a, 0x01, + 0x01, 0x69, 0x32, 0x31, + 0x1c, 0x6a, 0xe2, 0x5d, + 0x0a, 0x93, 0x26, 0x01, + 0x00, 0x65, 0xaa, 0x5e, + 0x01, 0x50, 0xa0, 0x18, + 0x02, 0x6a, 0x22, 0x05, + 0x1a, 0x01, 0x02, 0x00, + 0x80, 0x6a, 0x74, 0x00, + 0x40, 0x6a, 0x78, 0x00, + 0x40, 0x6a, 0x16, 0x00, + 0x00, 0x65, 0xda, 0x5d, + 0x01, 0x3f, 0xc8, 0x30, + 0xbf, 0x64, 0x58, 0x7a, + 0x80, 0x64, 0xa0, 0x73, + 0xa0, 0x64, 0x02, 0x74, + 0xc0, 0x64, 0xf6, 0x73, + 0xe0, 0x64, 0x32, 0x74, + 0x01, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0xcc, 0x41, + 0xf7, 0x11, 0x22, 0x08, + 0x01, 0x06, 0xd4, 0x30, + 0xff, 0x6a, 0x24, 0x08, + 0xf7, 0x01, 0x02, 0x08, + 0xc0, 0x6a, 0x78, 0x00, + 0x09, 0x0c, 0xe8, 0x79, + 0x08, 0x0c, 0x04, 0x68, + 0xb1, 0x6a, 0xda, 0x5e, + 0xff, 0x6a, 0x26, 0x09, + 0x12, 0x01, 0x02, 0x00, + 0x02, 0x6a, 0x08, 0x30, + 0xff, 0x6a, 0x08, 0x08, + 0xdf, 0x01, 0x02, 0x08, + 0x01, 0x6a, 0x7e, 0x00, + 0xc0, 0x6a, 0x78, 0x04, + 0xff, 0x6a, 0xc8, 0x08, + 0x08, 0xa4, 0x48, 0x19, + 0x00, 0xa5, 0x4a, 0x21, + 0x00, 0xa6, 0x4c, 0x21, + 0x00, 0xa7, 0x4e, 0x25, + 0x08, 0xeb, 0xde, 0x7e, + 0x80, 0xeb, 0x08, 0x7a, + 0xff, 0x6a, 0xd6, 0x09, + 0x08, 0xeb, 0x0c, 0x6a, + 0xff, 0x6a, 0xd4, 0x0c, + 0x80, 0xa3, 0xde, 0x6e, + 0x88, 0xeb, 0x22, 0x72, + 0x08, 0xeb, 0xde, 0x6e, + 0x04, 0xea, 0x26, 0xe2, + 0x08, 0xee, 0xde, 0x6e, + 0x04, 0x6a, 0xd0, 0x81, + 0x05, 0xa4, 0xc0, 0x89, + 0x03, 0xa5, 0xc2, 0x31, + 0x09, 0x6a, 0xd6, 0x05, + 0x00, 0x65, 0x0a, 0x5a, + 0x06, 0xa4, 0xd4, 0x89, + 0x80, 0x94, 0xde, 0x7e, + 0x07, 0xe9, 0x10, 0x31, + 0x01, 0xe9, 0x46, 0x31, + 0x00, 0xa3, 0xbc, 0x5e, + 0x00, 0x65, 0xfc, 0x59, + 0x01, 0xa4, 0xca, 0x30, + 0x80, 0xa3, 0x36, 0x7a, + 0x02, 0x65, 0xca, 0x00, + 0x01, 0x65, 0xf8, 0x31, + 0x80, 0x93, 0x26, 0x01, + 0xff, 0x6a, 0xd4, 0x0c, + 0x01, 0x8c, 0xc8, 0x30, + 0x00, 0x88, 0xc8, 0x18, + 0x02, 0x64, 0xc8, 0x88, + 0xff, 0x64, 0xde, 0x7e, + 0xff, 0x8d, 0x4c, 0x6a, + 0xff, 0x8e, 0x4c, 0x6a, + 0x03, 0x8c, 0xd4, 0x98, + 0x00, 0x65, 0xde, 0x56, + 0x01, 0x64, 0x70, 0x30, + 0xff, 0x64, 0xc8, 0x10, + 0x01, 0x64, 0xc8, 0x18, + 0x00, 0x8c, 0x18, 0x19, + 0xff, 0x8d, 0x1a, 0x21, + 0xff, 0x8e, 0x1c, 0x25, + 0xc0, 0x3c, 0x5c, 0x7a, + 0x21, 0x6a, 0xda, 0x5e, + 0xa8, 0x6a, 0x76, 0x00, + 0x79, 0x6a, 0x76, 0x00, + 0x40, 0x3f, 0x64, 0x6a, + 0x04, 0x3b, 0x76, 0x00, + 0x04, 0x6a, 0xd4, 0x81, + 0x20, 0x3c, 0x6c, 0x7a, + 0x51, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0x84, 0x42, + 0x20, 0x3c, 0x78, 0x00, + 0x00, 0xb3, 0xbc, 0x5e, + 0x07, 0xac, 0x10, 0x31, + 0x05, 0xb3, 0x46, 0x31, + 0x88, 0x6a, 0xcc, 0x00, + 0xac, 0x6a, 0xf0, 0x5d, + 0xa3, 0x6a, 0xcc, 0x00, + 0xb3, 0x6a, 0xf4, 0x5d, + 0x00, 0x65, 0x3c, 0x5a, + 0xfd, 0xa4, 0x48, 0x09, + 0x03, 0x8c, 0x10, 0x30, + 0x00, 0x65, 0xe8, 0x5d, + 0x01, 0xa4, 0x96, 0x7a, + 0x04, 0x3b, 0x76, 0x08, + 0x01, 0x3b, 0x26, 0x31, + 0x80, 0x02, 0x04, 0x00, + 0x10, 0x0c, 0x8c, 0x7a, + 0x03, 0x9e, 0x8e, 0x6a, + 0x7f, 0x02, 0x04, 0x08, + 0x91, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0xcc, 0x41, + 0x01, 0xa4, 0xca, 0x30, + 0x80, 0xa3, 0x9c, 0x7a, + 0x02, 0x65, 0xca, 0x00, + 0x01, 0x65, 0xf8, 0x31, + 0x01, 0x3b, 0x26, 0x31, + 0x00, 0x65, 0x10, 0x5a, + 0x01, 0xfc, 0xaa, 0x6a, + 0x80, 0x0b, 0xa0, 0x6a, + 0x10, 0x0c, 0xa0, 0x7a, + 0x20, 0x93, 0xa0, 0x6a, + 0x02, 0x93, 0x26, 0x01, + 0x02, 0xfc, 0xb4, 0x7a, + 0x40, 0x0d, 0xc8, 0x6a, + 0x01, 0xa4, 0x48, 0x01, + 0x00, 0x65, 0xc8, 0x42, + 0x40, 0x0d, 0xba, 0x6a, + 0x00, 0x65, 0x10, 0x5a, + 0x00, 0x65, 0xac, 0x42, + 0x80, 0xfc, 0xc4, 0x7a, + 0x80, 0xa4, 0xc4, 0x6a, + 0xff, 0xa5, 0x4a, 0x19, + 0xff, 0xa6, 0x4c, 0x21, + 0xff, 0xa7, 0x4e, 0x21, + 0xf8, 0xfc, 0x48, 0x09, + 0x7f, 0xa3, 0x46, 0x09, + 0x04, 0x3b, 0xe4, 0x6a, + 0x02, 0x93, 0x26, 0x01, + 0x01, 0x94, 0xca, 0x7a, + 0x01, 0x94, 0xca, 0x7a, + 0x01, 0x94, 0xca, 0x7a, + 0x01, 0x94, 0xca, 0x7a, + 0x01, 0x94, 0xca, 0x7a, + 0x01, 0xa4, 0xe2, 0x7a, + 0x01, 0xfc, 0xd8, 0x7a, + 0x01, 0x94, 0xe4, 0x6a, + 0x01, 0x94, 0xe4, 0x6a, + 0x01, 0x94, 0xe4, 0x6a, + 0x00, 0x65, 0x84, 0x42, + 0x01, 0x94, 0xe2, 0x7a, + 0x10, 0x94, 0xe4, 0x6a, + 0xd7, 0x93, 0x26, 0x09, + 0x28, 0x93, 0xe8, 0x6a, + 0x01, 0x85, 0x0a, 0x01, + 0x02, 0xfc, 0xf0, 0x6a, + 0x01, 0x14, 0x46, 0x31, + 0xff, 0x6a, 0x10, 0x09, + 0xfe, 0x85, 0x0a, 0x09, + 0xff, 0x38, 0xfe, 0x6a, + 0x80, 0xa3, 0xfe, 0x7a, + 0x80, 0x0b, 0xfc, 0x7a, + 0x04, 0x3b, 0xfe, 0x7a, + 0xbf, 0x3b, 0x76, 0x08, + 0x01, 0x3b, 0x26, 0x31, + 0x00, 0x65, 0x10, 0x5a, + 0x01, 0x0b, 0x0c, 0x6b, + 0x10, 0x0c, 0x00, 0x7b, + 0x04, 0x93, 0x0a, 0x6b, + 0x01, 0x94, 0x08, 0x7b, + 0x10, 0x94, 0x0a, 0x6b, + 0xc7, 0x93, 0x26, 0x09, + 0x01, 0x99, 0xd4, 0x30, + 0x38, 0x93, 0x0e, 0x6b, + 0xff, 0x08, 0x5c, 0x6b, + 0xff, 0x09, 0x5c, 0x6b, + 0xff, 0x0a, 0x5c, 0x6b, + 0xff, 0x38, 0x2a, 0x7b, + 0x04, 0x14, 0x10, 0x31, + 0x01, 0x38, 0x18, 0x31, + 0x02, 0x6a, 0x1a, 0x31, + 0x88, 0x6a, 0xcc, 0x00, + 0x14, 0x6a, 0xf6, 0x5d, + 0x00, 0x38, 0xe2, 0x5d, + 0xff, 0x6a, 0x70, 0x08, + 0x00, 0x65, 0x56, 0x43, + 0x80, 0xa3, 0x30, 0x7b, + 0x01, 0xa4, 0x48, 0x01, + 0x00, 0x65, 0x5c, 0x43, + 0x08, 0xeb, 0x36, 0x7b, + 0x00, 0x65, 0x10, 0x5a, + 0x08, 0xeb, 0x32, 0x6b, + 0x07, 0xe9, 0x10, 0x31, + 0x01, 0xe9, 0xca, 0x30, + 0x01, 0x65, 0x46, 0x31, + 0x00, 0x6a, 0xbc, 0x5e, + 0x88, 0x6a, 0xcc, 0x00, + 0xa4, 0x6a, 0xf6, 0x5d, + 0x08, 0x6a, 0xe2, 0x5d, + 0x0d, 0x93, 0x26, 0x01, + 0x00, 0x65, 0xaa, 0x5e, + 0x88, 0x6a, 0xcc, 0x00, + 0x00, 0x65, 0x8c, 0x5e, + 0x01, 0x99, 0x46, 0x31, + 0x00, 0xa3, 0xbc, 0x5e, + 0x01, 0x88, 0x10, 0x31, + 0x00, 0x65, 0x3c, 0x5a, + 0x00, 0x65, 0xfc, 0x59, + 0x03, 0x8c, 0x10, 0x30, + 0x00, 0x65, 0xe8, 0x5d, + 0x80, 0x0b, 0x84, 0x6a, + 0x80, 0x0b, 0x64, 0x6b, + 0x01, 0x0c, 0x5e, 0x7b, + 0x10, 0x0c, 0x84, 0x7a, + 0x03, 0x9e, 0x84, 0x6a, + 0x00, 0x65, 0x06, 0x5a, + 0x00, 0x6a, 0xbc, 0x5e, + 0x01, 0xa4, 0x84, 0x6b, + 0xff, 0x38, 0x7a, 0x7b, + 0x01, 0x38, 0xc8, 0x30, + 0x00, 0x08, 0x40, 0x19, + 0xff, 0x6a, 0xc8, 0x08, + 0x00, 0x09, 0x42, 0x21, + 0x00, 0x0a, 0x44, 0x21, + 0xff, 0x6a, 0x70, 0x08, + 0x00, 0x65, 0x7c, 0x43, + 0x03, 0x08, 0x40, 0x31, + 0x03, 0x08, 0x40, 0x31, + 0x01, 0x08, 0x40, 0x31, + 0x01, 0x09, 0x42, 0x31, + 0x01, 0x0a, 0x44, 0x31, + 0xfd, 0xb4, 0x68, 0x09, + 0x12, 0x01, 0x02, 0x00, + 0x12, 0x01, 0x02, 0x00, + 0x04, 0x3c, 0xcc, 0x79, + 0xfb, 0x3c, 0x78, 0x08, + 0x04, 0x93, 0x20, 0x79, + 0x01, 0x0c, 0x90, 0x6b, + 0x80, 0xba, 0x20, 0x79, + 0x80, 0x04, 0x20, 0x79, + 0xe4, 0x6a, 0x70, 0x5d, + 0x23, 0x6a, 0x86, 0x5d, + 0x01, 0x6a, 0x86, 0x5d, + 0x00, 0x65, 0x20, 0x41, + 0x00, 0x65, 0xcc, 0x41, + 0x80, 0x3c, 0xa4, 0x7b, + 0x21, 0x6a, 0xda, 0x5e, + 0x01, 0xbc, 0x18, 0x31, + 0x02, 0x6a, 0x1a, 0x31, + 0x02, 0x6a, 0xf8, 0x01, + 0x01, 0xbc, 0x10, 0x30, + 0x02, 0x6a, 0x12, 0x30, + 0x01, 0xbc, 0x10, 0x30, + 0xff, 0x6a, 0x12, 0x08, + 0xff, 0x6a, 0x14, 0x08, + 0xf3, 0xbc, 0xd4, 0x18, + 0xa0, 0x6a, 0xca, 0x53, + 0x04, 0xa0, 0x10, 0x31, + 0xac, 0x6a, 0x26, 0x01, + 0x04, 0xa0, 0x10, 0x31, + 0x03, 0x08, 0x18, 0x31, + 0x88, 0x6a, 0xcc, 0x00, + 0xa0, 0x6a, 0xf6, 0x5d, + 0x00, 0xbc, 0xe2, 0x5d, + 0x3d, 0x6a, 0x26, 0x01, + 0x00, 0x65, 0xe2, 0x43, + 0xff, 0x6a, 0x10, 0x09, + 0xa4, 0x6a, 0x26, 0x01, + 0x0c, 0xa0, 0x32, 0x31, + 0x05, 0x6a, 0x26, 0x01, + 0x35, 0x6a, 0x26, 0x01, + 0x0c, 0xa0, 0x32, 0x31, + 0x36, 0x6a, 0x26, 0x01, + 0x02, 0x93, 0x26, 0x01, + 0x35, 0x6a, 0x26, 0x01, + 0x00, 0x65, 0x9e, 0x5e, + 0x00, 0x65, 0x9e, 0x5e, + 0x02, 0x93, 0x26, 0x01, + 0xbf, 0x3c, 0x78, 0x08, + 0x04, 0x0b, 0xe8, 0x6b, + 0x10, 0x0c, 0xe4, 0x7b, + 0x01, 0x03, 0xe8, 0x6b, + 0x20, 0x93, 0xea, 0x6b, + 0x04, 0x0b, 0xf0, 0x6b, + 0x40, 0x3c, 0x78, 0x00, + 0xc7, 0x93, 0x26, 0x09, + 0x38, 0x93, 0xf2, 0x6b, + 0x00, 0x65, 0xcc, 0x41, + 0x80, 0x3c, 0x58, 0x6c, + 0x01, 0x06, 0x50, 0x31, + 0x80, 0xb8, 0x70, 0x01, + 0x00, 0x65, 0xcc, 0x41, + 0x10, 0x3f, 0x06, 0x00, + 0x10, 0x6a, 0x06, 0x00, + 0x01, 0x3a, 0xca, 0x30, + 0x80, 0x65, 0x1e, 0x64, + 0x10, 0xb8, 0x42, 0x6c, + 0xc0, 0x3e, 0xca, 0x00, + 0x40, 0xb8, 0x0e, 0x6c, + 0xbf, 0x65, 0xca, 0x08, + 0x20, 0xb8, 0x22, 0x7c, + 0x01, 0x65, 0x0c, 0x30, + 0x00, 0x65, 0xda, 0x5d, + 0xa0, 0x3f, 0x2a, 0x64, + 0x23, 0xb8, 0x0c, 0x08, + 0x00, 0x65, 0xda, 0x5d, + 0xa0, 0x3f, 0x2a, 0x64, + 0x00, 0xbb, 0x22, 0x44, + 0xff, 0x65, 0x22, 0x64, + 0x00, 0x65, 0x42, 0x44, + 0x40, 0x6a, 0x18, 0x00, + 0x01, 0x65, 0x0c, 0x30, + 0x00, 0x65, 0xda, 0x5d, + 0xa0, 0x3f, 0xfe, 0x73, + 0x40, 0x6a, 0x18, 0x00, + 0x01, 0x3a, 0xa6, 0x30, + 0x08, 0x6a, 0x74, 0x00, + 0x00, 0x65, 0xcc, 0x41, + 0x64, 0x6a, 0x6a, 0x5d, + 0x80, 0x64, 0xda, 0x6c, + 0x04, 0x64, 0x9c, 0x74, + 0x02, 0x64, 0xac, 0x74, + 0x00, 0x6a, 0x62, 0x74, + 0x03, 0x64, 0xca, 0x74, + 0x23, 0x64, 0x4a, 0x74, + 0x08, 0x64, 0x5e, 0x74, + 0x61, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0xda, 0x5d, + 0x08, 0x51, 0xce, 0x71, + 0x00, 0x65, 0x42, 0x44, + 0x80, 0x04, 0x5c, 0x7c, + 0x51, 0x6a, 0x60, 0x5d, + 0x01, 0x51, 0x5c, 0x64, + 0x01, 0xa4, 0x54, 0x7c, + 0x80, 0xba, 0x5e, 0x6c, + 0x41, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0x5e, 0x44, + 0x21, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0x5e, 0x44, + 0x07, 0x6a, 0x56, 0x5d, + 0x01, 0x06, 0xd4, 0x30, + 0x00, 0x65, 0xcc, 0x41, + 0x80, 0xb8, 0x58, 0x7c, + 0xc0, 0x3c, 0x6c, 0x7c, + 0x80, 0x3c, 0x58, 0x6c, + 0xff, 0xa8, 0x6c, 0x6c, + 0x40, 0x3c, 0x58, 0x6c, + 0x10, 0xb8, 0x70, 0x7c, + 0xa1, 0x6a, 0xda, 0x5e, + 0x01, 0xb4, 0x76, 0x6c, + 0x02, 0xb4, 0x78, 0x6c, + 0x01, 0xa4, 0x78, 0x7c, + 0xff, 0xa8, 0x88, 0x7c, + 0x04, 0xb4, 0x68, 0x01, + 0x01, 0x6a, 0x76, 0x00, + 0x00, 0xbb, 0x14, 0x5e, + 0xff, 0xa8, 0x88, 0x7c, + 0x71, 0x6a, 0xda, 0x5e, + 0x40, 0x51, 0x88, 0x64, + 0x00, 0x65, 0xb4, 0x5e, + 0x00, 0x65, 0xde, 0x41, + 0x00, 0xbb, 0x8c, 0x5c, + 0x00, 0x65, 0xde, 0x41, + 0x00, 0x65, 0xb4, 0x5e, + 0x01, 0x65, 0xa2, 0x30, + 0x01, 0xf8, 0xc8, 0x30, + 0x01, 0x4e, 0xc8, 0x30, + 0x00, 0x6a, 0xb8, 0xdd, + 0x00, 0x51, 0xca, 0x5d, + 0x01, 0x4e, 0x9c, 0x18, + 0x02, 0x6a, 0x22, 0x05, + 0xc0, 0x3c, 0x58, 0x6c, + 0x04, 0xb8, 0x70, 0x01, + 0x00, 0x65, 0xd6, 0x5e, + 0x20, 0xb8, 0xde, 0x69, + 0x01, 0xbb, 0xa2, 0x30, + 0x3f, 0xba, 0x7c, 0x08, + 0x00, 0xb9, 0xd0, 0x5c, + 0x00, 0x65, 0xde, 0x41, + 0x01, 0x06, 0xd4, 0x30, + 0x20, 0x3c, 0xcc, 0x79, + 0x20, 0x3c, 0x5e, 0x7c, + 0x01, 0xa4, 0xba, 0x7c, + 0x01, 0xb4, 0x68, 0x01, + 0x00, 0x65, 0xcc, 0x41, + 0x00, 0x65, 0x5e, 0x44, + 0x04, 0x14, 0x58, 0x31, + 0x01, 0x06, 0xd4, 0x30, + 0x08, 0xa0, 0x60, 0x31, + 0xac, 0x6a, 0xcc, 0x00, + 0x14, 0x6a, 0xf6, 0x5d, + 0x01, 0x06, 0xd4, 0x30, + 0xa0, 0x6a, 0xee, 0x5d, + 0x00, 0x65, 0xcc, 0x41, + 0xdf, 0x3c, 0x78, 0x08, + 0x12, 0x01, 0x02, 0x00, + 0x00, 0x65, 0x5e, 0x44, + 0x4c, 0x65, 0xcc, 0x28, + 0x01, 0x3e, 0x20, 0x31, + 0xd0, 0x66, 0xcc, 0x18, + 0x20, 0x66, 0xcc, 0x18, + 0x01, 0x51, 0xda, 0x34, + 0x4c, 0x3d, 0xca, 0x28, + 0x3f, 0x64, 0x7c, 0x08, + 0xd0, 0x65, 0xca, 0x18, + 0x01, 0x3e, 0x20, 0x31, + 0x30, 0x65, 0xd4, 0x18, + 0x00, 0x65, 0xe8, 0x4c, + 0xe1, 0x6a, 0x22, 0x01, + 0xff, 0x6a, 0xd4, 0x08, + 0x20, 0x65, 0xd4, 0x18, + 0x00, 0x65, 0xf0, 0x54, + 0xe1, 0x6a, 0x22, 0x01, + 0xff, 0x6a, 0xd4, 0x08, + 0x20, 0x65, 0xca, 0x18, + 0xe0, 0x65, 0xd4, 0x18, + 0x00, 0x65, 0xfa, 0x4c, + 0xe1, 0x6a, 0x22, 0x01, + 0xff, 0x6a, 0xd4, 0x08, + 0xd0, 0x65, 0xd4, 0x18, + 0x00, 0x65, 0x02, 0x55, + 0xe1, 0x6a, 0x22, 0x01, + 0xff, 0x6a, 0xd4, 0x08, + 0x01, 0x6c, 0xa2, 0x30, + 0xff, 0x51, 0x14, 0x75, + 0x00, 0x51, 0x90, 0x5d, + 0x01, 0x51, 0x20, 0x31, + 0x00, 0x65, 0x36, 0x45, + 0x3f, 0xba, 0xc8, 0x08, + 0x00, 0x3e, 0x36, 0x75, + 0x00, 0x65, 0xb2, 0x5e, + 0x80, 0x3c, 0x78, 0x00, + 0x01, 0x06, 0xd4, 0x30, + 0x00, 0x65, 0xda, 0x5d, + 0x01, 0x3c, 0x78, 0x00, + 0xe0, 0x3f, 0x52, 0x65, + 0x02, 0x3c, 0x78, 0x00, + 0x20, 0x12, 0x52, 0x65, + 0x51, 0x6a, 0x60, 0x5d, + 0x00, 0x51, 0x90, 0x5d, + 0x51, 0x6a, 0x60, 0x5d, + 0x01, 0x51, 0x20, 0x31, + 0x04, 0x3c, 0x78, 0x00, + 0x01, 0xb9, 0xc8, 0x30, + 0x00, 0x3d, 0x50, 0x65, + 0x08, 0x3c, 0x78, 0x00, + 0x3f, 0xba, 0xc8, 0x08, + 0x00, 0x3e, 0x50, 0x65, + 0x10, 0x3c, 0x78, 0x00, + 0x04, 0xb8, 0x50, 0x7d, + 0xfb, 0xb8, 0x70, 0x09, + 0x20, 0xb8, 0x46, 0x6d, + 0x01, 0x90, 0xc8, 0x30, + 0xff, 0x6a, 0xa2, 0x00, + 0x00, 0x3d, 0xd0, 0x5c, + 0x01, 0x64, 0x20, 0x31, + 0xff, 0x6a, 0x78, 0x08, + 0x00, 0x65, 0xea, 0x58, + 0x10, 0xb8, 0x5e, 0x7c, + 0xff, 0x6a, 0x56, 0x5d, + 0x00, 0x65, 0x5e, 0x44, + 0x00, 0x65, 0xb2, 0x5e, + 0x31, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0x5e, 0x44, + 0x10, 0x3f, 0x06, 0x00, + 0x10, 0x6a, 0x06, 0x00, + 0x01, 0x65, 0x74, 0x34, + 0x81, 0x6a, 0xda, 0x5e, + 0x00, 0x65, 0x62, 0x45, + 0x01, 0x06, 0xd4, 0x30, + 0x01, 0x0c, 0x62, 0x7d, + 0x04, 0x0c, 0x5c, 0x6d, + 0xe0, 0x03, 0x7e, 0x08, + 0xe0, 0x3f, 0xcc, 0x61, + 0x01, 0x65, 0xcc, 0x30, + 0x01, 0x12, 0xda, 0x34, + 0x01, 0x06, 0xd4, 0x34, + 0x01, 0x03, 0x70, 0x6d, + 0x40, 0x03, 0xcc, 0x08, + 0x01, 0x65, 0x06, 0x30, + 0x40, 0x65, 0xc8, 0x08, + 0x00, 0x66, 0x7e, 0x75, + 0x40, 0x65, 0x7e, 0x7d, + 0x00, 0x65, 0x7e, 0x5d, + 0xff, 0x6a, 0xd4, 0x08, + 0xff, 0x6a, 0xd4, 0x08, + 0xff, 0x6a, 0xd4, 0x08, + 0xff, 0x6a, 0xd4, 0x0c, + 0x08, 0x01, 0x02, 0x00, + 0x02, 0x0b, 0x88, 0x7d, + 0x01, 0x65, 0x0c, 0x30, + 0x02, 0x0b, 0x8c, 0x7d, + 0xf7, 0x01, 0x02, 0x0c, + 0x01, 0x65, 0xc8, 0x30, + 0xff, 0x41, 0xb0, 0x75, + 0x01, 0x41, 0x20, 0x31, + 0xff, 0x6a, 0xa4, 0x00, + 0x00, 0x65, 0xa0, 0x45, + 0xff, 0xbf, 0xb0, 0x75, + 0x01, 0x90, 0xa4, 0x30, + 0x01, 0xbf, 0x20, 0x31, + 0x00, 0xbb, 0x9a, 0x65, + 0xff, 0x52, 0xae, 0x75, + 0x01, 0xbf, 0xcc, 0x30, + 0x01, 0x90, 0xca, 0x30, + 0x01, 0x52, 0x20, 0x31, + 0x01, 0x66, 0x7e, 0x31, + 0x01, 0x65, 0x20, 0x35, + 0x01, 0xbf, 0x82, 0x34, + 0x01, 0x64, 0xa2, 0x30, + 0x00, 0x6a, 0xc2, 0x5e, + 0x0d, 0x6a, 0x76, 0x00, + 0x00, 0x51, 0x14, 0x46, + 0x01, 0x65, 0xa4, 0x30, + 0xe0, 0x6a, 0xcc, 0x00, + 0x48, 0x6a, 0x08, 0x5e, + 0x01, 0x6a, 0xd0, 0x01, + 0x01, 0x6a, 0xdc, 0x05, + 0x88, 0x6a, 0xcc, 0x00, + 0x48, 0x6a, 0x08, 0x5e, + 0x01, 0x6a, 0xe2, 0x5d, + 0x01, 0x6a, 0x26, 0x05, + 0x01, 0x65, 0xd8, 0x31, + 0x09, 0xee, 0xdc, 0x01, + 0x80, 0xee, 0xce, 0x7d, + 0xff, 0x6a, 0xdc, 0x0d, + 0x01, 0x65, 0x32, 0x31, + 0x0a, 0x93, 0x26, 0x01, + 0x00, 0x65, 0xaa, 0x46, + 0x81, 0x6a, 0xda, 0x5e, + 0x01, 0x0c, 0xda, 0x7d, + 0x04, 0x0c, 0xd8, 0x6d, + 0xe0, 0x03, 0x06, 0x08, + 0xe0, 0x03, 0x7e, 0x0c, + 0x01, 0x65, 0x18, 0x31, + 0xff, 0x6a, 0x1a, 0x09, + 0xff, 0x6a, 0x1c, 0x0d, + 0x01, 0x8c, 0x10, 0x30, + 0x01, 0x8d, 0x12, 0x30, + 0x01, 0x8e, 0x14, 0x34, + 0x01, 0x6c, 0xda, 0x30, + 0x01, 0x6c, 0xda, 0x30, + 0x01, 0x6c, 0xda, 0x30, + 0x01, 0x6c, 0xda, 0x30, + 0x01, 0x6c, 0xda, 0x30, + 0x01, 0x6c, 0xda, 0x30, + 0x01, 0x6c, 0xda, 0x30, + 0x01, 0x6c, 0xda, 0x34, + 0x3d, 0x64, 0xa4, 0x28, + 0x55, 0x64, 0xc8, 0x28, + 0x00, 0x65, 0x08, 0x46, + 0x2e, 0x64, 0xa4, 0x28, + 0x66, 0x64, 0xc8, 0x28, + 0x00, 0x6c, 0xda, 0x18, + 0x01, 0x52, 0xc8, 0x30, + 0x00, 0x6c, 0xda, 0x20, + 0xff, 0x6a, 0xc8, 0x08, + 0x00, 0x6c, 0xda, 0x20, + 0x00, 0x6c, 0xda, 0x24, + 0x01, 0x65, 0xc8, 0x30, + 0xe0, 0x6a, 0xcc, 0x00, + 0x44, 0x6a, 0x04, 0x5e, + 0x01, 0x90, 0xe2, 0x31, + 0x04, 0x3b, 0x28, 0x7e, + 0x30, 0x6a, 0xd0, 0x01, + 0x20, 0x6a, 0xd0, 0x01, + 0x1d, 0x6a, 0xdc, 0x01, + 0xdc, 0xee, 0x24, 0x66, + 0x00, 0x65, 0x40, 0x46, + 0x20, 0x6a, 0xd0, 0x01, + 0x01, 0x6a, 0xdc, 0x01, + 0x20, 0xa0, 0xd8, 0x31, + 0x09, 0xee, 0xdc, 0x01, + 0x80, 0xee, 0x30, 0x7e, + 0x11, 0x6a, 0xdc, 0x01, + 0x50, 0xee, 0x34, 0x66, + 0x20, 0x6a, 0xd0, 0x01, + 0x09, 0x6a, 0xdc, 0x01, + 0x88, 0xee, 0x3a, 0x66, + 0x19, 0x6a, 0xdc, 0x01, + 0xd8, 0xee, 0x3e, 0x66, + 0xff, 0x6a, 0xdc, 0x09, + 0x18, 0xee, 0x42, 0x6e, + 0xff, 0x6a, 0xd4, 0x0c, + 0x88, 0x6a, 0xcc, 0x00, + 0x44, 0x6a, 0x04, 0x5e, + 0x20, 0x6a, 0xe2, 0x5d, + 0x01, 0x3b, 0x26, 0x31, + 0x04, 0x3b, 0x5c, 0x6e, + 0xa0, 0x6a, 0xca, 0x00, + 0x20, 0x65, 0xc8, 0x18, + 0x00, 0x65, 0x9a, 0x5e, + 0x00, 0x65, 0x54, 0x66, + 0x0a, 0x93, 0x26, 0x01, + 0x00, 0x65, 0xaa, 0x46, + 0xa0, 0x6a, 0xcc, 0x00, + 0xff, 0x6a, 0xc8, 0x08, + 0x20, 0x94, 0x60, 0x6e, + 0x10, 0x94, 0x62, 0x6e, + 0x08, 0x94, 0x7c, 0x6e, + 0x08, 0x94, 0x7c, 0x6e, + 0x08, 0x94, 0x7c, 0x6e, + 0xff, 0x8c, 0xc8, 0x10, + 0xc1, 0x64, 0xc8, 0x18, + 0xf8, 0x64, 0xc8, 0x08, + 0x01, 0x99, 0xda, 0x30, + 0x00, 0x66, 0x70, 0x66, + 0xc0, 0x66, 0xac, 0x76, + 0x60, 0x66, 0xc8, 0x18, + 0x3d, 0x64, 0xc8, 0x28, + 0x00, 0x65, 0x60, 0x46, + 0xf7, 0x93, 0x26, 0x09, + 0x08, 0x93, 0x7e, 0x6e, + 0x00, 0x62, 0xc4, 0x18, + 0x00, 0x65, 0xaa, 0x5e, + 0x00, 0x65, 0x8a, 0x5e, + 0x00, 0x65, 0x8a, 0x5e, + 0x00, 0x65, 0x8a, 0x5e, + 0x01, 0x99, 0xda, 0x30, + 0x01, 0x99, 0xda, 0x30, + 0x01, 0x99, 0xda, 0x30, + 0x01, 0x99, 0xda, 0x30, + 0x01, 0x99, 0xda, 0x30, + 0x01, 0x99, 0xda, 0x30, + 0x01, 0x99, 0xda, 0x30, + 0x01, 0x99, 0xda, 0x34, + 0x01, 0x6c, 0x32, 0x31, + 0x01, 0x6c, 0x32, 0x31, + 0x01, 0x6c, 0x32, 0x31, + 0x01, 0x6c, 0x32, 0x31, + 0x01, 0x6c, 0x32, 0x31, + 0x01, 0x6c, 0x32, 0x31, + 0x01, 0x6c, 0x32, 0x31, + 0x01, 0x6c, 0x32, 0x35, + 0x08, 0x94, 0xaa, 0x7e, + 0xf7, 0x93, 0x26, 0x09, + 0x08, 0x93, 0xae, 0x6e, + 0xff, 0x6a, 0xd4, 0x0c, + 0x04, 0xb8, 0xd6, 0x6e, + 0x01, 0x42, 0x7e, 0x31, + 0xff, 0x6a, 0x76, 0x01, + 0x01, 0x90, 0x84, 0x34, + 0xff, 0x6a, 0x76, 0x05, + 0x01, 0x85, 0x0a, 0x01, + 0x7f, 0x65, 0x10, 0x09, + 0xfe, 0x85, 0x0a, 0x0d, + 0xff, 0x42, 0xd2, 0x66, + 0xff, 0x41, 0xca, 0x66, + 0xd1, 0x6a, 0xda, 0x5e, + 0xff, 0x6a, 0xca, 0x04, + 0x01, 0x41, 0x20, 0x31, + 0x01, 0xbf, 0x82, 0x30, + 0x01, 0x6a, 0x76, 0x00, + 0x00, 0xbb, 0x14, 0x46, + 0x01, 0x42, 0x20, 0x31, + 0x01, 0xbf, 0x84, 0x34, + 0x01, 0x41, 0x7e, 0x31, + 0x01, 0x90, 0x82, 0x34, + 0x01, 0x65, 0x22, 0x31, + 0xff, 0x6a, 0xd4, 0x08, + 0xff, 0x6a, 0xd4, 0x0c +}; + +typedef int ahc_patch_func_t (struct ahc_softc *ahc); +static ahc_patch_func_t ahc_patch23_func; + +static int +ahc_patch23_func(struct ahc_softc *ahc) +{ + return ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0); +} + +static ahc_patch_func_t ahc_patch22_func; + +static int +ahc_patch22_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_CMD_CHAN) == 0); +} + +static ahc_patch_func_t ahc_patch21_func; + +static int +ahc_patch21_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_QUEUE_REGS) == 0); +} + +static ahc_patch_func_t ahc_patch20_func; + +static int +ahc_patch20_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_WIDE) != 0); +} + +static ahc_patch_func_t ahc_patch19_func; + +static int +ahc_patch19_func(struct ahc_softc *ahc) +{ + return ((ahc->flags & AHC_SCB_BTT) != 0); +} + +static ahc_patch_func_t ahc_patch18_func; + +static int +ahc_patch18_func(struct ahc_softc *ahc) +{ + return ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0); +} + +static ahc_patch_func_t ahc_patch17_func; + +static int +ahc_patch17_func(struct ahc_softc *ahc) +{ + return ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0); +} + +static ahc_patch_func_t ahc_patch16_func; + +static int +ahc_patch16_func(struct ahc_softc *ahc) +{ + return ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0); +} + +static ahc_patch_func_t ahc_patch15_func; + +static int +ahc_patch15_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_ULTRA2) == 0); +} + +static ahc_patch_func_t ahc_patch14_func; + +static int +ahc_patch14_func(struct ahc_softc *ahc) +{ + return ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0); +} + +static ahc_patch_func_t ahc_patch13_func; + +static int +ahc_patch13_func(struct ahc_softc *ahc) +{ + return ((ahc->flags & AHC_39BIT_ADDRESSING) != 0); +} + +static ahc_patch_func_t ahc_patch12_func; + +static int +ahc_patch12_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_HS_MAILBOX) != 0); +} + +static ahc_patch_func_t ahc_patch11_func; + +static int +ahc_patch11_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_ULTRA) != 0); +} + +static ahc_patch_func_t ahc_patch10_func; + +static int +ahc_patch10_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_MULTI_TID) != 0); +} + +static ahc_patch_func_t ahc_patch9_func; + +static int +ahc_patch9_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_CMD_CHAN) != 0); +} + +static ahc_patch_func_t ahc_patch8_func; + +static int +ahc_patch8_func(struct ahc_softc *ahc) +{ + return ((ahc->flags & AHC_INITIATORROLE) != 0); +} + +static ahc_patch_func_t ahc_patch7_func; + +static int +ahc_patch7_func(struct ahc_softc *ahc) +{ + return ((ahc->flags & AHC_TARGETROLE) != 0); +} + +static ahc_patch_func_t ahc_patch6_func; + +static int +ahc_patch6_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_DT) == 0); +} + +static ahc_patch_func_t ahc_patch5_func; + +static int +ahc_patch5_func(struct ahc_softc *ahc) +{ + return ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0); +} + +static ahc_patch_func_t ahc_patch4_func; + +static int +ahc_patch4_func(struct ahc_softc *ahc) +{ + return ((ahc->flags & AHC_PAGESCBS) != 0); +} + +static ahc_patch_func_t ahc_patch3_func; + +static int +ahc_patch3_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_QUEUE_REGS) != 0); +} + +static ahc_patch_func_t ahc_patch2_func; + +static int +ahc_patch2_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_TWIN) != 0); +} + +static ahc_patch_func_t ahc_patch1_func; + +static int +ahc_patch1_func(struct ahc_softc *ahc) +{ + return ((ahc->features & AHC_ULTRA2) != 0); +} + +static ahc_patch_func_t ahc_patch0_func; + +static int +ahc_patch0_func(struct ahc_softc *ahc) +{ + return (0); +} + +static const struct patch { + ahc_patch_func_t *patch_func; + uint32_t begin :10, + skip_instr :10, + skip_patch :12; +} patches[] = { + { ahc_patch1_func, 4, 1, 1 }, + { ahc_patch2_func, 6, 2, 1 }, + { ahc_patch2_func, 9, 1, 1 }, + { ahc_patch3_func, 11, 1, 2 }, + { ahc_patch0_func, 12, 2, 1 }, + { ahc_patch4_func, 15, 1, 2 }, + { ahc_patch0_func, 16, 1, 1 }, + { ahc_patch5_func, 22, 2, 1 }, + { ahc_patch3_func, 27, 1, 2 }, + { ahc_patch0_func, 28, 1, 1 }, + { ahc_patch6_func, 34, 1, 1 }, + { ahc_patch7_func, 37, 54, 19 }, + { ahc_patch8_func, 37, 1, 1 }, + { ahc_patch9_func, 42, 3, 2 }, + { ahc_patch0_func, 45, 3, 1 }, + { ahc_patch10_func, 49, 1, 2 }, + { ahc_patch0_func, 50, 2, 3 }, + { ahc_patch1_func, 50, 1, 2 }, + { ahc_patch0_func, 51, 1, 1 }, + { ahc_patch2_func, 53, 2, 1 }, + { ahc_patch9_func, 55, 1, 2 }, + { ahc_patch0_func, 56, 1, 1 }, + { ahc_patch9_func, 60, 1, 2 }, + { ahc_patch0_func, 61, 1, 1 }, + { ahc_patch9_func, 71, 1, 2 }, + { ahc_patch0_func, 72, 1, 1 }, + { ahc_patch9_func, 75, 1, 2 }, + { ahc_patch0_func, 76, 1, 1 }, + { ahc_patch9_func, 79, 1, 2 }, + { ahc_patch0_func, 80, 1, 1 }, + { ahc_patch8_func, 91, 9, 4 }, + { ahc_patch1_func, 93, 1, 2 }, + { ahc_patch0_func, 94, 1, 1 }, + { ahc_patch2_func, 96, 2, 1 }, + { ahc_patch2_func, 105, 4, 1 }, + { ahc_patch1_func, 109, 1, 2 }, + { ahc_patch0_func, 110, 2, 3 }, + { ahc_patch2_func, 110, 1, 2 }, + { ahc_patch0_func, 111, 1, 1 }, + { ahc_patch7_func, 112, 4, 2 }, + { ahc_patch0_func, 116, 1, 1 }, + { ahc_patch11_func, 117, 2, 1 }, + { ahc_patch1_func, 119, 1, 2 }, + { ahc_patch0_func, 120, 1, 1 }, + { ahc_patch7_func, 121, 4, 1 }, + { ahc_patch7_func, 131, 95, 11 }, + { ahc_patch4_func, 151, 1, 1 }, + { ahc_patch1_func, 168, 1, 1 }, + { ahc_patch12_func, 173, 1, 2 }, + { ahc_patch0_func, 174, 1, 1 }, + { ahc_patch9_func, 185, 1, 2 }, + { ahc_patch0_func, 186, 1, 1 }, + { ahc_patch9_func, 195, 1, 2 }, + { ahc_patch0_func, 196, 1, 1 }, + { ahc_patch9_func, 212, 6, 2 }, + { ahc_patch0_func, 218, 6, 1 }, + { ahc_patch8_func, 226, 21, 2 }, + { ahc_patch1_func, 241, 1, 1 }, + { ahc_patch1_func, 249, 1, 2 }, + { ahc_patch0_func, 250, 2, 2 }, + { ahc_patch11_func, 251, 1, 1 }, + { ahc_patch9_func, 259, 27, 3 }, + { ahc_patch1_func, 275, 10, 2 }, + { ahc_patch13_func, 278, 1, 1 }, + { ahc_patch14_func, 286, 14, 1 }, + { ahc_patch1_func, 302, 1, 2 }, + { ahc_patch0_func, 303, 1, 1 }, + { ahc_patch9_func, 306, 1, 1 }, + { ahc_patch13_func, 311, 1, 1 }, + { ahc_patch9_func, 312, 2, 2 }, + { ahc_patch0_func, 314, 4, 1 }, + { ahc_patch14_func, 318, 1, 1 }, + { ahc_patch15_func, 320, 2, 3 }, + { ahc_patch9_func, 320, 1, 2 }, + { ahc_patch0_func, 321, 1, 1 }, + { ahc_patch6_func, 326, 1, 2 }, + { ahc_patch0_func, 327, 1, 1 }, + { ahc_patch1_func, 331, 47, 11 }, + { ahc_patch6_func, 338, 2, 4 }, + { ahc_patch7_func, 338, 1, 1 }, + { ahc_patch8_func, 339, 1, 1 }, + { ahc_patch0_func, 340, 1, 1 }, + { ahc_patch16_func, 341, 1, 1 }, + { ahc_patch6_func, 357, 6, 3 }, + { ahc_patch16_func, 357, 5, 1 }, + { ahc_patch0_func, 363, 7, 1 }, + { ahc_patch13_func, 373, 5, 1 }, + { ahc_patch0_func, 378, 52, 17 }, + { ahc_patch14_func, 378, 1, 1 }, + { ahc_patch7_func, 380, 2, 2 }, + { ahc_patch17_func, 381, 1, 1 }, + { ahc_patch9_func, 384, 1, 1 }, + { ahc_patch18_func, 391, 1, 1 }, + { ahc_patch14_func, 396, 9, 3 }, + { ahc_patch9_func, 397, 3, 2 }, + { ahc_patch0_func, 400, 3, 1 }, + { ahc_patch9_func, 408, 6, 2 }, + { ahc_patch0_func, 414, 9, 2 }, + { ahc_patch13_func, 414, 1, 1 }, + { ahc_patch13_func, 423, 2, 1 }, + { ahc_patch14_func, 425, 1, 1 }, + { ahc_patch9_func, 427, 1, 2 }, + { ahc_patch0_func, 428, 1, 1 }, + { ahc_patch7_func, 429, 1, 1 }, + { ahc_patch7_func, 430, 1, 1 }, + { ahc_patch8_func, 431, 3, 3 }, + { ahc_patch6_func, 432, 1, 2 }, + { ahc_patch0_func, 433, 1, 1 }, + { ahc_patch9_func, 434, 1, 1 }, + { ahc_patch15_func, 435, 1, 2 }, + { ahc_patch13_func, 435, 1, 1 }, + { ahc_patch14_func, 437, 9, 4 }, + { ahc_patch9_func, 437, 1, 1 }, + { ahc_patch9_func, 444, 2, 1 }, + { ahc_patch0_func, 446, 4, 3 }, + { ahc_patch9_func, 446, 1, 2 }, + { ahc_patch0_func, 447, 3, 1 }, + { ahc_patch1_func, 451, 2, 1 }, + { ahc_patch7_func, 453, 10, 2 }, + { ahc_patch0_func, 463, 1, 1 }, + { ahc_patch8_func, 464, 118, 22 }, + { ahc_patch1_func, 466, 3, 2 }, + { ahc_patch0_func, 469, 5, 3 }, + { ahc_patch9_func, 469, 2, 2 }, + { ahc_patch0_func, 471, 3, 1 }, + { ahc_patch1_func, 476, 2, 2 }, + { ahc_patch0_func, 478, 6, 3 }, + { ahc_patch9_func, 478, 2, 2 }, + { ahc_patch0_func, 480, 3, 1 }, + { ahc_patch1_func, 486, 2, 2 }, + { ahc_patch0_func, 488, 9, 7 }, + { ahc_patch9_func, 488, 5, 6 }, + { ahc_patch19_func, 488, 1, 2 }, + { ahc_patch0_func, 489, 1, 1 }, + { ahc_patch19_func, 491, 1, 2 }, + { ahc_patch0_func, 492, 1, 1 }, + { ahc_patch0_func, 493, 4, 1 }, + { ahc_patch6_func, 498, 3, 2 }, + { ahc_patch0_func, 501, 1, 1 }, + { ahc_patch6_func, 511, 1, 2 }, + { ahc_patch0_func, 512, 1, 1 }, + { ahc_patch20_func, 549, 7, 1 }, + { ahc_patch3_func, 584, 1, 2 }, + { ahc_patch0_func, 585, 1, 1 }, + { ahc_patch21_func, 588, 1, 1 }, + { ahc_patch8_func, 590, 106, 33 }, + { ahc_patch4_func, 592, 1, 1 }, + { ahc_patch1_func, 598, 2, 2 }, + { ahc_patch0_func, 600, 1, 1 }, + { ahc_patch1_func, 603, 1, 2 }, + { ahc_patch0_func, 604, 1, 1 }, + { ahc_patch9_func, 605, 3, 3 }, + { ahc_patch15_func, 606, 1, 1 }, + { ahc_patch0_func, 608, 4, 1 }, + { ahc_patch19_func, 617, 2, 2 }, + { ahc_patch0_func, 619, 1, 1 }, + { ahc_patch19_func, 623, 10, 3 }, + { ahc_patch5_func, 625, 8, 1 }, + { ahc_patch0_func, 633, 9, 2 }, + { ahc_patch5_func, 634, 8, 1 }, + { ahc_patch4_func, 644, 1, 2 }, + { ahc_patch0_func, 645, 1, 1 }, + { ahc_patch19_func, 646, 1, 2 }, + { ahc_patch0_func, 647, 3, 2 }, + { ahc_patch4_func, 649, 1, 1 }, + { ahc_patch5_func, 650, 1, 1 }, + { ahc_patch5_func, 653, 1, 1 }, + { ahc_patch5_func, 655, 1, 1 }, + { ahc_patch4_func, 657, 2, 2 }, + { ahc_patch0_func, 659, 2, 1 }, + { ahc_patch5_func, 661, 1, 1 }, + { ahc_patch5_func, 664, 1, 1 }, + { ahc_patch5_func, 667, 1, 1 }, + { ahc_patch19_func, 671, 1, 1 }, + { ahc_patch19_func, 674, 1, 1 }, + { ahc_patch4_func, 680, 1, 1 }, + { ahc_patch6_func, 683, 1, 2 }, + { ahc_patch0_func, 684, 1, 1 }, + { ahc_patch7_func, 696, 16, 1 }, + { ahc_patch4_func, 712, 20, 1 }, + { ahc_patch9_func, 733, 4, 2 }, + { ahc_patch0_func, 737, 4, 1 }, + { ahc_patch9_func, 741, 4, 2 }, + { ahc_patch0_func, 745, 3, 1 }, + { ahc_patch6_func, 751, 1, 1 }, + { ahc_patch22_func, 753, 14, 1 }, + { ahc_patch7_func, 767, 3, 1 }, + { ahc_patch9_func, 779, 24, 8 }, + { ahc_patch19_func, 783, 1, 2 }, + { ahc_patch0_func, 784, 1, 1 }, + { ahc_patch15_func, 789, 4, 2 }, + { ahc_patch0_func, 793, 7, 3 }, + { ahc_patch23_func, 793, 5, 2 }, + { ahc_patch0_func, 798, 2, 1 }, + { ahc_patch0_func, 803, 42, 3 }, + { ahc_patch18_func, 815, 18, 2 }, + { ahc_patch0_func, 833, 1, 1 }, + { ahc_patch4_func, 857, 1, 1 }, + { ahc_patch4_func, 858, 3, 2 }, + { ahc_patch0_func, 861, 1, 1 }, + { ahc_patch13_func, 862, 3, 1 }, + { ahc_patch4_func, 865, 12, 1 } +}; + +static const struct cs { + uint16_t begin; + uint16_t end; +} critical_sections[] = { + { 11, 18 }, + { 21, 30 }, + { 712, 728 }, + { 858, 861 }, + { 865, 871 }, + { 873, 875 }, + { 875, 877 } +}; + +#define NUM_CRITICAL_SECTIONS ARRAY_SIZE(critical_sections) diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile new file mode 100644 index 000000000..a3f2357a3 --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/Makefile @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0 +PROG= aicasm + +OUTDIR ?= ./ + +.SUFFIXES= .l .y .c .h + +CSRCS= aicasm.c aicasm_symbol.c +YSRCS= aicasm_gram.y aicasm_macro_gram.y +LSRCS= aicasm_scan.l aicasm_macro_scan.l + +GENHDRS= $(addprefix ${OUTDIR}/,aicdb.h $(YSRCS:.y=.h)) +GENSRCS= $(addprefix ${OUTDIR}/,$(YSRCS:.y=.c) $(LSRCS:.l=.c)) + +SRCS= ${CSRCS} ${GENSRCS} +LIBS= -ldb +clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) +# Override default kernel CFLAGS. This is a userland app. +AICASM_CFLAGS:= -I/usr/include -I. -I$(OUTDIR) +LEX= flex +YACC= bison +YFLAGS= -d + +NOMAN= noman + +ifneq ($(HOSTCC),) +AICASM_CC= $(HOSTCC) +else +AICASM_CC= $(CC) +endif + +ifdef DEBUG +CFLAGS+= -DDEBUG -g +YFLAGS+= -t -v +LFLAGS= -d +endif + +$(PROG): $(OUTDIR) ${GENHDRS} $(SRCS) + $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(OUTDIR)/$(PROG) $(LIBS) + +$(OUTDIR): + mkdir -p $(OUTDIR) + +$(OUTDIR)/aicdb.h: + @if [ -e "/usr/include/db4/db_185.h" ]; then \ + echo "#include " > $@; \ + elif [ -e "/usr/include/db3/db_185.h" ]; then \ + echo "#include " > $@; \ + elif [ -e "/usr/include/db2/db_185.h" ]; then \ + echo "#include " > $@; \ + elif [ -e "/usr/include/db1/db_185.h" ]; then \ + echo "#include " > $@; \ + elif [ -e "/usr/include/db/db_185.h" ]; then \ + echo "#include " > $@; \ + elif [ -e "/usr/include/db_185.h" ]; then \ + echo "#include " > $@; \ + else \ + echo "*** Install db development libraries"; \ + fi + +clean: + rm -f $(clean-files) + +$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y + $(YACC) $(YFLAGS) -b $(<:.y=) $< -o $(OUTDIR)/$(<:.y=.c) + +$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y + $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< -o $(OUTDIR)/$(<:.y=.c) + +$(OUTDIR)/aicasm_scan.c: aicasm_scan.l + $(LEX) $(LFLAGS) -o $@ $< + +$(OUTDIR)/aicasm_macro_scan.c: aicasm_macro_scan.l + $(LEX) $(LFLAGS) -Pmm -o $@ $< diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c new file mode 100644 index 000000000..cd692a4c5 --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c @@ -0,0 +1,843 @@ +/* + * Aic7xxx SCSI host adapter firmware assembler + * + * Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs. + * Copyright (c) 2001, 2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.c#23 $ + * + * $FreeBSD$ + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#if linux +#include +#else +#include +#endif + +#include "aicasm.h" +#include "aicasm_symbol.h" +#include "aicasm_insformat.h" + +typedef struct patch { + STAILQ_ENTRY(patch) links; + int patch_func; + u_int begin; + u_int skip_instr; + u_int skip_patch; +} patch_t; + +STAILQ_HEAD(patch_list, patch) patches; + +static void usage(void); +static void back_patch(void); +static void output_code(void); +static void output_listing(char *ifilename); +static void dump_scope(scope_t *scope); +static void emit_patch(scope_t *scope, int patch); +static int check_patch(patch_t **start_patch, int start_instr, + int *skip_addr, int *func_vals); + +struct path_list search_path; +int includes_search_curdir; +char *appname; +char *stock_include_file; +FILE *ofile; +char *ofilename; +char *regfilename; +FILE *regfile; +char *listfilename; +FILE *listfile; +char *regdiagfilename; +FILE *regdiagfile; +int src_mode; +int dst_mode; + +static STAILQ_HEAD(,instruction) seq_program; +struct cs_tailq cs_tailq; +struct scope_list scope_stack; +symlist_t patch_functions; + +#if DEBUG +extern int yy_flex_debug; +extern int mm_flex_debug; +extern int yydebug; +extern int mmdebug; +#endif +extern FILE *yyin; +extern int yyparse(void); + +int main(int argc, char *argv[]); + +int +main(int argc, char *argv[]) +{ + extern char *optarg; + extern int optind; + int ch; + int retval; + char *inputfilename; + scope_t *sentinal; + + STAILQ_INIT(&patches); + SLIST_INIT(&search_path); + STAILQ_INIT(&seq_program); + TAILQ_INIT(&cs_tailq); + SLIST_INIT(&scope_stack); + + /* Set Sentinal scope node */ + sentinal = scope_alloc(); + sentinal->type = SCOPE_ROOT; + + includes_search_curdir = 1; + appname = *argv; + regfile = NULL; + listfile = NULL; +#if DEBUG + yy_flex_debug = 0; + mm_flex_debug = 0; + yydebug = 0; + mmdebug = 0; +#endif + while ((ch = getopt(argc, argv, "d:i:l:n:o:p:r:I:")) != -1) { + switch(ch) { + case 'd': +#if DEBUG + if (strcmp(optarg, "s") == 0) { + yy_flex_debug = 1; + mm_flex_debug = 1; + } else if (strcmp(optarg, "p") == 0) { + yydebug = 1; + mmdebug = 1; + } else { + fprintf(stderr, "%s: -d Requires either an " + "'s' or 'p' argument\n", appname); + usage(); + } +#else + stop("-d: Assembler not built with debugging " + "information", EX_SOFTWARE); +#endif + break; + case 'i': + stock_include_file = optarg; + break; + case 'l': + /* Create a program listing */ + if ((listfile = fopen(optarg, "w")) == NULL) { + perror(optarg); + stop(NULL, EX_CANTCREAT); + } + listfilename = optarg; + break; + case 'n': + /* Don't complain about the -nostdinc directrive */ + if (strcmp(optarg, "ostdinc")) { + fprintf(stderr, "%s: Unknown option -%c%s\n", + appname, ch, optarg); + usage(); + /* NOTREACHED */ + } + break; + case 'o': + if ((ofile = fopen(optarg, "w")) == NULL) { + perror(optarg); + stop(NULL, EX_CANTCREAT); + } + ofilename = optarg; + break; + case 'p': + /* Create Register Diagnostic "printing" Functions */ + if ((regdiagfile = fopen(optarg, "w")) == NULL) { + perror(optarg); + stop(NULL, EX_CANTCREAT); + } + regdiagfilename = optarg; + break; + case 'r': + if ((regfile = fopen(optarg, "w")) == NULL) { + perror(optarg); + stop(NULL, EX_CANTCREAT); + } + regfilename = optarg; + break; + case 'I': + { + path_entry_t include_dir; + + if (strcmp(optarg, "-") == 0) { + if (includes_search_curdir == 0) { + fprintf(stderr, "%s: Warning - '-I-' " + "specified multiple " + "times\n", appname); + } + includes_search_curdir = 0; + for (include_dir = SLIST_FIRST(&search_path); + include_dir != NULL; + include_dir = SLIST_NEXT(include_dir, + links)) + /* + * All entries before a '-I-' only + * apply to includes specified with + * quotes instead of "<>". + */ + include_dir->quoted_includes_only = 1; + } else { + include_dir = + (path_entry_t)malloc(sizeof(*include_dir)); + if (include_dir == NULL) { + perror(optarg); + stop(NULL, EX_OSERR); + } + include_dir->directory = strdup(optarg); + if (include_dir->directory == NULL) { + perror(optarg); + stop(NULL, EX_OSERR); + } + include_dir->quoted_includes_only = 0; + SLIST_INSERT_HEAD(&search_path, include_dir, + links); + } + break; + } + case '?': + default: + usage(); + /* NOTREACHED */ + } + } + argc -= optind; + argv += optind; + + if (argc != 1) { + fprintf(stderr, "%s: No input file specified\n", appname); + usage(); + /* NOTREACHED */ + } + + if (regdiagfile != NULL + && (regfile == NULL || stock_include_file == NULL)) { + fprintf(stderr, + "%s: The -p option requires the -r and -i options.\n", + appname); + usage(); + /* NOTREACHED */ + } + symtable_open(); + inputfilename = *argv; + include_file(*argv, SOURCE_FILE); + retval = yyparse(); + if (retval == 0) { + if (SLIST_FIRST(&scope_stack) == NULL + || SLIST_FIRST(&scope_stack)->type != SCOPE_ROOT) { + stop("Unterminated conditional expression", EX_DATAERR); + /* NOTREACHED */ + } + + /* Process outmost scope */ + process_scope(SLIST_FIRST(&scope_stack)); + /* + * Decend the tree of scopes and insert/emit + * patches as appropriate. We perform a depth first + * traversal, recursively handling each scope. + */ + /* start at the root scope */ + dump_scope(SLIST_FIRST(&scope_stack)); + + /* Patch up forward jump addresses */ + back_patch(); + + if (ofile != NULL) + output_code(); + if (regfile != NULL) + symtable_dump(regfile, regdiagfile); + if (listfile != NULL) + output_listing(inputfilename); + } + + stop(NULL, 0); + /* NOTREACHED */ + return (0); +} + +static void +usage() +{ + + (void)fprintf(stderr, +"usage: %-16s [-nostdinc] [-I-] [-I directory] [-o output_file]\n" +" [-r register_output_file [-p register_diag_file -i includefile]]\n" +" [-l program_list_file]\n" +" input_file\n", appname); + exit(EX_USAGE); +} + +static void +back_patch() +{ + struct instruction *cur_instr; + + for (cur_instr = STAILQ_FIRST(&seq_program); + cur_instr != NULL; + cur_instr = STAILQ_NEXT(cur_instr, links)) { + if (cur_instr->patch_label != NULL) { + struct ins_format3 *f3_instr; + u_int address; + + if (cur_instr->patch_label->type != LABEL) { + char buf[255]; + + snprintf(buf, sizeof(buf), + "Undefined label %s", + cur_instr->patch_label->name); + stop(buf, EX_DATAERR); + /* NOTREACHED */ + } + f3_instr = &cur_instr->format.format3; + address = f3_instr->address; + address += cur_instr->patch_label->info.linfo->address; + f3_instr->address = address; + } + } +} + +static void +output_code() +{ + struct instruction *cur_instr; + patch_t *cur_patch; + critical_section_t *cs; + symbol_node_t *cur_node; + int instrcount; + + instrcount = 0; + fprintf(ofile, +"/*\n" +" * DO NOT EDIT - This file is automatically generated\n" +" * from the following source files:\n" +" *\n" +"%s */\n", versions); + + fprintf(ofile, "static const uint8_t seqprog[] = {\n"); + for (cur_instr = STAILQ_FIRST(&seq_program); + cur_instr != NULL; + cur_instr = STAILQ_NEXT(cur_instr, links)) { + + fprintf(ofile, "%s\t0x%02x, 0x%02x, 0x%02x, 0x%02x", + cur_instr == STAILQ_FIRST(&seq_program) ? "" : ",\n", +#ifdef __LITTLE_ENDIAN + cur_instr->format.bytes[0], + cur_instr->format.bytes[1], + cur_instr->format.bytes[2], + cur_instr->format.bytes[3]); +#else + cur_instr->format.bytes[3], + cur_instr->format.bytes[2], + cur_instr->format.bytes[1], + cur_instr->format.bytes[0]); +#endif + instrcount++; + } + fprintf(ofile, "\n};\n\n"); + + if (patch_arg_list == NULL) + stop("Patch argument list not defined", + EX_DATAERR); + + /* + * Output patch information. Patch functions first. + */ + fprintf(ofile, +"typedef int %spatch_func_t (%s);\n", prefix, patch_arg_list); + + for (cur_node = SLIST_FIRST(&patch_functions); + cur_node != NULL; + cur_node = SLIST_NEXT(cur_node,links)) { + fprintf(ofile, +"static %spatch_func_t %spatch%d_func;\n" +"\n" +"static int\n" +"%spatch%d_func(%s)\n" +"{\n" +" return (%s);\n" +"}\n\n", + prefix, + prefix, + cur_node->symbol->info.condinfo->func_num, + prefix, + cur_node->symbol->info.condinfo->func_num, + patch_arg_list, + cur_node->symbol->name); + } + + fprintf(ofile, +"static const struct patch {\n" +" %spatch_func_t *patch_func;\n" +" uint32_t begin :10,\n" +" skip_instr :10,\n" +" skip_patch :12;\n" +"} patches[] = {\n", prefix); + + for (cur_patch = STAILQ_FIRST(&patches); + cur_patch != NULL; + cur_patch = STAILQ_NEXT(cur_patch,links)) { + fprintf(ofile, "%s\t{ %spatch%d_func, %d, %d, %d }", + cur_patch == STAILQ_FIRST(&patches) ? "" : ",\n", + prefix, + cur_patch->patch_func, cur_patch->begin, + cur_patch->skip_instr, cur_patch->skip_patch); + } + + fprintf(ofile, "\n};\n\n"); + + fprintf(ofile, +"static const struct cs {\n" +" uint16_t begin;\n" +" uint16_t end;\n" +"} critical_sections[] = {\n"); + + for (cs = TAILQ_FIRST(&cs_tailq); + cs != NULL; + cs = TAILQ_NEXT(cs, links)) { + fprintf(ofile, "%s\t{ %d, %d }", + cs == TAILQ_FIRST(&cs_tailq) ? "" : ",\n", + cs->begin_addr, cs->end_addr); + } + + fprintf(ofile, "\n};\n\n"); + + fprintf(ofile, + "#define NUM_CRITICAL_SECTIONS ARRAY_SIZE(critical_sections)\n"); + + fprintf(stderr, "%s: %d instructions used\n", appname, instrcount); +} + +static void +dump_scope(scope_t *scope) +{ + scope_t *cur_scope; + + /* + * Emit the first patch for this scope + */ + emit_patch(scope, 0); + + /* + * Dump each scope within this one. + */ + cur_scope = TAILQ_FIRST(&scope->inner_scope); + + while (cur_scope != NULL) { + + dump_scope(cur_scope); + + cur_scope = TAILQ_NEXT(cur_scope, scope_links); + } + + /* + * Emit the second, closing, patch for this scope + */ + emit_patch(scope, 1); +} + +void +emit_patch(scope_t *scope, int patch) +{ + patch_info_t *pinfo; + patch_t *new_patch; + + pinfo = &scope->patches[patch]; + + if (pinfo->skip_instr == 0) + /* No-Op patch */ + return; + + new_patch = (patch_t *)malloc(sizeof(*new_patch)); + + if (new_patch == NULL) + stop("Could not malloc patch structure", EX_OSERR); + + memset(new_patch, 0, sizeof(*new_patch)); + + if (patch == 0) { + new_patch->patch_func = scope->func_num; + new_patch->begin = scope->begin_addr; + } else { + new_patch->patch_func = 0; + new_patch->begin = scope->end_addr; + } + new_patch->skip_instr = pinfo->skip_instr; + new_patch->skip_patch = pinfo->skip_patch; + STAILQ_INSERT_TAIL(&patches, new_patch, links); +} + +void +output_listing(char *ifilename) +{ + char buf[1024]; + FILE *ifile; + struct instruction *cur_instr; + patch_t *cur_patch; + symbol_node_t *cur_func; + int *func_values; + int instrcount; + int instrptr; + int line; + int func_count; + int skip_addr; + + instrcount = 0; + instrptr = 0; + line = 1; + skip_addr = 0; + if ((ifile = fopen(ifilename, "r")) == NULL) { + perror(ifilename); + stop(NULL, EX_DATAERR); + } + + /* + * Determine which options to apply to this listing. + */ + for (func_count = 0, cur_func = SLIST_FIRST(&patch_functions); + cur_func != NULL; + cur_func = SLIST_NEXT(cur_func, links)) + func_count++; + + func_values = NULL; + if (func_count != 0) { + func_values = (int *)malloc(func_count * sizeof(int)); + + if (func_values == NULL) + stop("Could not malloc", EX_OSERR); + + func_values[0] = 0; /* FALSE func */ + func_count--; + + /* + * Ask the user to fill in the return values for + * the rest of the functions. + */ + + + for (cur_func = SLIST_FIRST(&patch_functions); + cur_func != NULL && SLIST_NEXT(cur_func, links) != NULL; + cur_func = SLIST_NEXT(cur_func, links), func_count--) { + int input; + + fprintf(stdout, "\n(%s)\n", cur_func->symbol->name); + fprintf(stdout, + "Enter the return value for " + "this expression[T/F]:"); + + while (1) { + + input = getchar(); + input = toupper(input); + + if (input == 'T') { + func_values[func_count] = 1; + break; + } else if (input == 'F') { + func_values[func_count] = 0; + break; + } + } + if (isatty(fileno(stdin)) == 0) + putchar(input); + } + fprintf(stdout, "\nThanks!\n"); + } + + /* Now output the listing */ + cur_patch = STAILQ_FIRST(&patches); + for (cur_instr = STAILQ_FIRST(&seq_program); + cur_instr != NULL; + cur_instr = STAILQ_NEXT(cur_instr, links), instrcount++) { + + if (check_patch(&cur_patch, instrcount, + &skip_addr, func_values) == 0) { + /* Don't count this instruction as it is in a patch + * that was removed. + */ + continue; + } + + while (line < cur_instr->srcline) { + fgets(buf, sizeof(buf), ifile); + fprintf(listfile, " \t%s", buf); + line++; + } + fprintf(listfile, "%04x %02x%02x%02x%02x", instrptr, +#ifdef __LITTLE_ENDIAN + cur_instr->format.bytes[0], + cur_instr->format.bytes[1], + cur_instr->format.bytes[2], + cur_instr->format.bytes[3]); +#else + cur_instr->format.bytes[3], + cur_instr->format.bytes[2], + cur_instr->format.bytes[1], + cur_instr->format.bytes[0]); +#endif + /* + * Macro expansions can cause several instructions + * to be output for a single source line. Only + * advance the line once in these cases. + */ + if (line == cur_instr->srcline) { + fgets(buf, sizeof(buf), ifile); + fprintf(listfile, "\t%s", buf); + line++; + } else { + fprintf(listfile, "\n"); + } + instrptr++; + } + /* Dump the remainder of the file */ + while(fgets(buf, sizeof(buf), ifile) != NULL) + fprintf(listfile, " %s", buf); + + fclose(ifile); +} + +static int +check_patch(patch_t **start_patch, int start_instr, + int *skip_addr, int *func_vals) +{ + patch_t *cur_patch; + + cur_patch = *start_patch; + + while (cur_patch != NULL && start_instr == cur_patch->begin) { + if (func_vals[cur_patch->patch_func] == 0) { + int skip; + + /* Start rejecting code */ + *skip_addr = start_instr + cur_patch->skip_instr; + for (skip = cur_patch->skip_patch; + skip > 0 && cur_patch != NULL; + skip--) + cur_patch = STAILQ_NEXT(cur_patch, links); + } else { + /* Accepted this patch. Advance to the next + * one and wait for our intruction pointer to + * hit this point. + */ + cur_patch = STAILQ_NEXT(cur_patch, links); + } + } + + *start_patch = cur_patch; + if (start_instr < *skip_addr) + /* Still skipping */ + return (0); + + return (1); +} + +/* + * Print out error information if appropriate, and clean up before + * terminating the program. + */ +void +stop(const char *string, int err_code) +{ + if (string != NULL) { + fprintf(stderr, "%s: ", appname); + if (yyfilename != NULL) { + fprintf(stderr, "Stopped at file %s, line %d - ", + yyfilename, yylineno); + } + fprintf(stderr, "%s\n", string); + } + + if (ofile != NULL) { + fclose(ofile); + if (err_code != 0) { + fprintf(stderr, "%s: Removing %s due to error\n", + appname, ofilename); + unlink(ofilename); + } + } + + if (regfile != NULL) { + fclose(regfile); + if (err_code != 0) { + fprintf(stderr, "%s: Removing %s due to error\n", + appname, regfilename); + unlink(regfilename); + } + } + + if (listfile != NULL) { + fclose(listfile); + if (err_code != 0) { + fprintf(stderr, "%s: Removing %s due to error\n", + appname, listfilename); + unlink(listfilename); + } + } + + symlist_free(&patch_functions); + symtable_close(); + + exit(err_code); +} + +struct instruction * +seq_alloc() +{ + struct instruction *new_instr; + + new_instr = (struct instruction *)malloc(sizeof(struct instruction)); + if (new_instr == NULL) + stop("Unable to malloc instruction object", EX_SOFTWARE); + memset(new_instr, 0, sizeof(*new_instr)); + STAILQ_INSERT_TAIL(&seq_program, new_instr, links); + new_instr->srcline = yylineno; + return new_instr; +} + +critical_section_t * +cs_alloc() +{ + critical_section_t *new_cs; + + new_cs= (critical_section_t *)malloc(sizeof(critical_section_t)); + if (new_cs == NULL) + stop("Unable to malloc critical_section object", EX_SOFTWARE); + memset(new_cs, 0, sizeof(*new_cs)); + + TAILQ_INSERT_TAIL(&cs_tailq, new_cs, links); + return new_cs; +} + +scope_t * +scope_alloc() +{ + scope_t *new_scope; + + new_scope = (scope_t *)malloc(sizeof(scope_t)); + if (new_scope == NULL) + stop("Unable to malloc scope object", EX_SOFTWARE); + memset(new_scope, 0, sizeof(*new_scope)); + TAILQ_INIT(&new_scope->inner_scope); + + if (SLIST_FIRST(&scope_stack) != NULL) { + TAILQ_INSERT_TAIL(&SLIST_FIRST(&scope_stack)->inner_scope, + new_scope, scope_links); + } + /* This patch is now the current scope */ + SLIST_INSERT_HEAD(&scope_stack, new_scope, scope_stack_links); + return new_scope; +} + +void +process_scope(scope_t *scope) +{ + /* + * We are "leaving" this scope. We should now have + * enough information to process the lists of scopes + * we encapsulate. + */ + scope_t *cur_scope; + u_int skip_patch_count; + u_int skip_instr_count; + + cur_scope = TAILQ_LAST(&scope->inner_scope, scope_tailq); + skip_patch_count = 0; + skip_instr_count = 0; + while (cur_scope != NULL) { + u_int patch0_patch_skip; + + patch0_patch_skip = 0; + switch (cur_scope->type) { + case SCOPE_IF: + case SCOPE_ELSE_IF: + if (skip_instr_count != 0) { + /* Create a tail patch */ + patch0_patch_skip++; + cur_scope->patches[1].skip_patch = + skip_patch_count + 1; + cur_scope->patches[1].skip_instr = + skip_instr_count; + } + + /* Count Head patch */ + patch0_patch_skip++; + + /* Count any patches contained in our inner scope */ + patch0_patch_skip += cur_scope->inner_scope_patches; + + cur_scope->patches[0].skip_patch = patch0_patch_skip; + cur_scope->patches[0].skip_instr = + cur_scope->end_addr - cur_scope->begin_addr; + + skip_instr_count += cur_scope->patches[0].skip_instr; + + skip_patch_count += patch0_patch_skip; + if (cur_scope->type == SCOPE_IF) { + scope->inner_scope_patches += skip_patch_count; + skip_patch_count = 0; + skip_instr_count = 0; + } + break; + case SCOPE_ELSE: + /* Count any patches contained in our innter scope */ + skip_patch_count += cur_scope->inner_scope_patches; + + skip_instr_count += cur_scope->end_addr + - cur_scope->begin_addr; + break; + case SCOPE_ROOT: + stop("Unexpected scope type encountered", EX_SOFTWARE); + /* NOTREACHED */ + } + + cur_scope = TAILQ_PREV(cur_scope, scope_tailq, scope_links); + } +} diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h new file mode 100644 index 000000000..716a2aefc --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h @@ -0,0 +1,91 @@ +/* + * Assembler for the sequencer program downloaded to Aic7xxx SCSI host adapters + * + * Copyright (c) 1997 Justin T. Gibbs. + * Copyright (c) 2001, 2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.h#14 $ + * + * $FreeBSD$ + */ + +#include "../queue.h" + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +typedef struct path_entry { + char *directory; + int quoted_includes_only; + SLIST_ENTRY(path_entry) links; +} *path_entry_t; + +typedef enum { + QUOTED_INCLUDE, + BRACKETED_INCLUDE, + SOURCE_FILE +} include_type; + +SLIST_HEAD(path_list, path_entry); + +extern struct path_list search_path; +extern struct cs_tailq cs_tailq; +extern struct scope_list scope_stack; +extern struct symlist patch_functions; +extern int includes_search_curdir; /* False if we've seen -I- */ +extern char *appname; +extern char *stock_include_file; +extern int yylineno; +extern char *yyfilename; +extern char *prefix; +extern char *patch_arg_list; +extern char *versions; +extern int src_mode; +extern int dst_mode; +struct symbol; + +void stop(const char *errstring, int err_code); +void include_file(char *file_name, include_type type); +void expand_macro(struct symbol *macro_symbol); +struct instruction *seq_alloc(void); +struct critical_section *cs_alloc(void); +struct scope *scope_alloc(void); +void process_scope(struct scope *); diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y new file mode 100644 index 000000000..65182ad9c --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y @@ -0,0 +1,1999 @@ +%{ +/* + * Parser for the Aic7xxx SCSI Host adapter sequencer assembler. + * + * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. + * Copyright (c) 2001, 2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_gram.y#30 $ + * + * $FreeBSD$ + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "../queue.h" + +#include "aicasm.h" +#include "aicasm_symbol.h" +#include "aicasm_insformat.h" + +char *yyfilename; +char stock_prefix[] = "aic_"; +char *prefix = stock_prefix; +char *patch_arg_list; +char *versions; +static char errbuf[255]; +static char regex_pattern[255]; +static symbol_t *cur_symbol; +static symbol_t *field_symbol; +static symbol_t *scb_or_sram_symbol; +static symtype cur_symtype; +static symbol_ref_t accumulator; +static symbol_ref_t mode_ptr; +static symbol_ref_t allones; +static symbol_ref_t allzeros; +static symbol_ref_t none; +static symbol_ref_t sindex; +static int instruction_ptr; +static int num_srams; +static int sram_or_scb_offset; +static int download_constant_count; +static int in_critical_section; +static u_int enum_increment; +static u_int enum_next_value; + +static void process_field(int field_type, symbol_t *sym, int mask); +static void initialize_symbol(symbol_t *symbol); +static void add_macro_arg(const char *argtext, int position); +static void add_macro_body(const char *bodytext); +static void process_register(symbol_t **p_symbol); +static void format_1_instr(int opcode, symbol_ref_t *dest, + expression_t *immed, symbol_ref_t *src, int ret); +static void format_2_instr(int opcode, symbol_ref_t *dest, + expression_t *places, symbol_ref_t *src, int ret); +static void format_3_instr(int opcode, symbol_ref_t *src, + expression_t *immed, symbol_ref_t *address); +static void test_readable_symbol(symbol_t *symbol); +static void test_writable_symbol(symbol_t *symbol); +static void type_check(symbol_ref_t *sym, expression_t *expression, int and_op); +static void make_expression(expression_t *immed, int value); +static void add_conditional(symbol_t *symbol); +static void add_version(const char *verstring); +static int is_download_const(expression_t *immed); +static int is_location_address(symbol_t *symbol); +void yyerror(const char *string); + +#define SRAM_SYMNAME "SRAM_BASE" +#define SCB_SYMNAME "SCB_BASE" +%} + +%union { + u_int value; + char *str; + symbol_t *sym; + symbol_ref_t sym_ref; + expression_t expression; +} + +%token T_REGISTER + +%token T_CONST + +%token T_EXPORT + +%token T_DOWNLOAD + +%token T_SCB + +%token T_SRAM + +%token T_ALIAS + +%token T_SIZE + +%token T_EXPR_LSHIFT + +%token T_EXPR_RSHIFT + +%token T_ADDRESS + +%token T_COUNT + +%token T_ACCESS_MODE + +%token T_DONT_GENERATE_DEBUG_CODE + +%token T_MODES + +%token T_DEFINE + +%token T_SET_SRC_MODE + +%token T_SET_DST_MODE + +%token T_MODE + +%token T_BEGIN_CS + +%token T_END_CS + +%token T_PAD_PAGE + +%token T_FIELD + +%token T_ENUM + +%token T_MASK + +%token T_NUMBER + +%token T_PATH T_STRING T_ARG T_MACROBODY + +%token T_CEXPR + +%token T_EOF T_INCLUDE T_VERSION T_PREFIX T_PATCH_ARG_LIST + +%token T_SHR T_SHL T_ROR T_ROL + +%token T_MVI T_MOV T_CLR T_BMOV + +%token T_JMP T_JC T_JNC T_JE T_JNE T_JNZ T_JZ T_CALL + +%token T_ADD T_ADC + +%token T_INC T_DEC + +%token T_STC T_CLC + +%token T_CMP T_NOT T_XOR + +%token T_TEST T_AND + +%token T_OR + +/* 16 bit extensions, not implemented + * %token T_OR16 T_AND16 T_XOR16 T_ADD16 + * %token T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG + */ +%token T_RET + +%token T_NOP + +%token T_ACCUM T_ALLONES T_ALLZEROS T_NONE T_SINDEX T_MODE_PTR + +%token T_A + +%token T_SYMBOL + +%token T_NL + +%token T_IF T_ELSE T_ELSE_IF T_ENDIF + +%type reg_symbol address destination source opt_source + +%type expression immediate immediate_or_a + +%type export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne + +%type mode_value mode_list macro_arglist + +%left '|' +%left '&' +%left T_EXPR_LSHIFT T_EXPR_RSHIFT +%left '+' '-' +%left '*' '/' +%right '~' +%nonassoc UMINUS +%% + +program: + include +| program include +| prefix +| program prefix +| patch_arg_list +| program patch_arg_list +| version +| program version +| register +| program register +| constant +| program constant +| macrodefn +| program macrodefn +| scratch_ram +| program scratch_ram +| scb +| program scb +| label +| program label +| set_src_mode +| program set_src_mode +| set_dst_mode +| program set_dst_mode +| critical_section_start +| program critical_section_start +| critical_section_end +| program critical_section_end +| conditional +| program conditional +| code +| program code +; + +include: + T_INCLUDE '<' T_PATH '>' + { + include_file($3, BRACKETED_INCLUDE); + } +| T_INCLUDE '"' T_PATH '"' + { + include_file($3, QUOTED_INCLUDE); + } +; + +prefix: + T_PREFIX '=' T_STRING + { + if (prefix != stock_prefix) + stop("Prefix multiply defined", + EX_DATAERR); + prefix = strdup($3); + if (prefix == NULL) + stop("Unable to record prefix", EX_SOFTWARE); + } +; + +patch_arg_list: + T_PATCH_ARG_LIST '=' T_STRING + { + if (patch_arg_list != NULL) + stop("Patch argument list multiply defined", + EX_DATAERR); + patch_arg_list = strdup($3); + if (patch_arg_list == NULL) + stop("Unable to record patch arg list", EX_SOFTWARE); + } +; + +version: + T_VERSION '=' T_STRING + { add_version($3); } +; + +register: + T_REGISTER { cur_symtype = REGISTER; } reg_definition +; + +reg_definition: + T_SYMBOL '{' + { + if ($1->type != UNINITIALIZED) { + stop("Register multiply defined", EX_DATAERR); + /* NOTREACHED */ + } + cur_symbol = $1; + cur_symbol->type = cur_symtype; + initialize_symbol(cur_symbol); + } + reg_attribute_list + '}' + { + /* + * Default to allowing everything in for registers + * with no bit or mask definitions. + */ + if (cur_symbol->info.rinfo->valid_bitmask == 0) + cur_symbol->info.rinfo->valid_bitmask = 0xFF; + + if (cur_symbol->info.rinfo->size == 0) + cur_symbol->info.rinfo->size = 1; + + /* + * This might be useful for registers too. + */ + if (cur_symbol->type != REGISTER) { + if (cur_symbol->info.rinfo->address == 0) + cur_symbol->info.rinfo->address = + sram_or_scb_offset; + sram_or_scb_offset += + cur_symbol->info.rinfo->size; + } + cur_symbol = NULL; + } +; + +reg_attribute_list: + reg_attribute +| reg_attribute_list reg_attribute +; + +reg_attribute: + reg_address +| size +| count +| access_mode +| dont_generate_debug_code +| modes +| field_defn +| enum_defn +| mask_defn +| alias +| accumulator +| mode_pointer +| allones +| allzeros +| none +| sindex +; + +reg_address: + T_ADDRESS T_NUMBER + { + cur_symbol->info.rinfo->address = $2; + } +; + +size: + T_SIZE T_NUMBER + { + cur_symbol->info.rinfo->size = $2; + if (scb_or_sram_symbol != NULL) { + u_int max_addr; + u_int sym_max_addr; + + max_addr = scb_or_sram_symbol->info.rinfo->address + + scb_or_sram_symbol->info.rinfo->size; + sym_max_addr = cur_symbol->info.rinfo->address + + cur_symbol->info.rinfo->size; + + if (sym_max_addr > max_addr) + stop("SCB or SRAM space exhausted", EX_DATAERR); + } + } +; + +count: + T_COUNT T_NUMBER + { + cur_symbol->count += $2; + } +; + +access_mode: + T_ACCESS_MODE T_MODE + { + cur_symbol->info.rinfo->mode = $2; + } +; + +dont_generate_debug_code: + T_DONT_GENERATE_DEBUG_CODE + { + cur_symbol->dont_generate_debug_code = 1; + } +; + +modes: + T_MODES mode_list + { + cur_symbol->info.rinfo->modes = $2; + } +; + +mode_list: + mode_value + { + $$ = $1; + } +| mode_list ',' mode_value + { + $$ = $1 | $3; + } +; + +mode_value: + T_NUMBER + { + if ($1 > 4) { + stop("Valid register modes range between 0 and 4.", + EX_DATAERR); + /* NOTREACHED */ + } + + $$ = (0x1 << $1); + } +| T_SYMBOL + { + symbol_t *symbol; + + symbol = $1; + if (symbol->type != CONST) { + stop("Only \"const\" symbols allowed in " + "mode definitions.", EX_DATAERR); + /* NOTREACHED */ + } + if (symbol->info.cinfo->value > 4) { + stop("Valid register modes range between 0 and 4.", + EX_DATAERR); + /* NOTREACHED */ + } + $$ = (0x1 << symbol->info.cinfo->value); + } +; + +field_defn: + T_FIELD + { + field_symbol = NULL; + enum_next_value = 0; + enum_increment = 1; + } + '{' enum_entry_list '}' +| T_FIELD T_SYMBOL expression + { + process_field(FIELD, $2, $3.value); + field_symbol = $2; + enum_next_value = 0; + enum_increment = 0x01 << (ffs($3.value) - 1); + } + '{' enum_entry_list '}' +| T_FIELD T_SYMBOL expression + { + process_field(FIELD, $2, $3.value); + } +; + +enum_defn: + T_ENUM + { + field_symbol = NULL; + enum_next_value = 0; + enum_increment = 1; + } + '{' enum_entry_list '}' +| T_ENUM T_SYMBOL expression + { + process_field(ENUM, $2, $3.value); + field_symbol = $2; + enum_next_value = 0; + enum_increment = 0x01 << (ffs($3.value) - 1); + } + '{' enum_entry_list '}' +; + +enum_entry_list: + enum_entry +| enum_entry_list ',' enum_entry +; + +enum_entry: + T_SYMBOL + { + process_field(ENUM_ENTRY, $1, enum_next_value); + enum_next_value += enum_increment; + } +| T_SYMBOL expression + { + process_field(ENUM_ENTRY, $1, $2.value); + enum_next_value = $2.value + enum_increment; + } +; + +mask_defn: + T_MASK T_SYMBOL expression + { + process_field(MASK, $2, $3.value); + } +; + +alias: + T_ALIAS T_SYMBOL + { + if ($2->type != UNINITIALIZED) { + stop("Re-definition of register alias", + EX_DATAERR); + /* NOTREACHED */ + } + $2->type = ALIAS; + initialize_symbol($2); + $2->info.ainfo->parent = cur_symbol; + } +; + +accumulator: + T_ACCUM + { + if (accumulator.symbol != NULL) { + stop("Only one accumulator definition allowed", + EX_DATAERR); + /* NOTREACHED */ + } + accumulator.symbol = cur_symbol; + } +; + +mode_pointer: + T_MODE_PTR + { + if (mode_ptr.symbol != NULL) { + stop("Only one mode pointer definition allowed", + EX_DATAERR); + /* NOTREACHED */ + } + mode_ptr.symbol = cur_symbol; + } +; + +allones: + T_ALLONES + { + if (allones.symbol != NULL) { + stop("Only one definition of allones allowed", + EX_DATAERR); + /* NOTREACHED */ + } + allones.symbol = cur_symbol; + } +; + +allzeros: + T_ALLZEROS + { + if (allzeros.symbol != NULL) { + stop("Only one definition of allzeros allowed", + EX_DATAERR); + /* NOTREACHED */ + } + allzeros.symbol = cur_symbol; + } +; + +none: + T_NONE + { + if (none.symbol != NULL) { + stop("Only one definition of none allowed", + EX_DATAERR); + /* NOTREACHED */ + } + none.symbol = cur_symbol; + } +; + +sindex: + T_SINDEX + { + if (sindex.symbol != NULL) { + stop("Only one definition of sindex allowed", + EX_DATAERR); + /* NOTREACHED */ + } + sindex.symbol = cur_symbol; + } +; + +expression: + expression '|' expression + { + $$.value = $1.value | $3.value; + symlist_merge(&$$.referenced_syms, + &$1.referenced_syms, + &$3.referenced_syms); + } +| expression '&' expression + { + $$.value = $1.value & $3.value; + symlist_merge(&$$.referenced_syms, + &$1.referenced_syms, + &$3.referenced_syms); + } +| expression '+' expression + { + $$.value = $1.value + $3.value; + symlist_merge(&$$.referenced_syms, + &$1.referenced_syms, + &$3.referenced_syms); + } +| expression '-' expression + { + $$.value = $1.value - $3.value; + symlist_merge(&($$.referenced_syms), + &($1.referenced_syms), + &($3.referenced_syms)); + } +| expression '*' expression + { + $$.value = $1.value * $3.value; + symlist_merge(&($$.referenced_syms), + &($1.referenced_syms), + &($3.referenced_syms)); + } +| expression '/' expression + { + $$.value = $1.value / $3.value; + symlist_merge(&($$.referenced_syms), + &($1.referenced_syms), + &($3.referenced_syms)); + } +| expression T_EXPR_LSHIFT expression + { + $$.value = $1.value << $3.value; + symlist_merge(&$$.referenced_syms, + &$1.referenced_syms, + &$3.referenced_syms); + } +| expression T_EXPR_RSHIFT expression + { + $$.value = $1.value >> $3.value; + symlist_merge(&$$.referenced_syms, + &$1.referenced_syms, + &$3.referenced_syms); + } +| '(' expression ')' + { + $$ = $2; + } +| '~' expression + { + $$ = $2; + $$.value = (~$$.value) & 0xFF; + } +| '-' expression %prec UMINUS + { + $$ = $2; + $$.value = -$$.value; + } +| T_NUMBER + { + $$.value = $1; + SLIST_INIT(&$$.referenced_syms); + } +| T_SYMBOL + { + symbol_t *symbol; + + symbol = $1; + switch (symbol->type) { + case ALIAS: + symbol = $1->info.ainfo->parent; + case REGISTER: + case SCBLOC: + case SRAMLOC: + $$.value = symbol->info.rinfo->address; + break; + case MASK: + case FIELD: + case ENUM: + case ENUM_ENTRY: + $$.value = symbol->info.finfo->value; + break; + case DOWNLOAD_CONST: + case CONST: + $$.value = symbol->info.cinfo->value; + break; + case UNINITIALIZED: + default: + { + snprintf(errbuf, sizeof(errbuf), + "Undefined symbol %s referenced", + symbol->name); + stop(errbuf, EX_DATAERR); + /* NOTREACHED */ + break; + } + } + SLIST_INIT(&$$.referenced_syms); + symlist_add(&$$.referenced_syms, symbol, SYMLIST_INSERT_HEAD); + } +; + +constant: + T_CONST T_SYMBOL expression + { + if ($2->type != UNINITIALIZED) { + stop("Re-definition of symbol as a constant", + EX_DATAERR); + /* NOTREACHED */ + } + $2->type = CONST; + initialize_symbol($2); + $2->info.cinfo->value = $3.value; + } +| T_CONST T_SYMBOL T_DOWNLOAD + { + if ($1) { + stop("Invalid downloaded constant declaration", + EX_DATAERR); + /* NOTREACHED */ + } + if ($2->type != UNINITIALIZED) { + stop("Re-definition of symbol as a downloaded constant", + EX_DATAERR); + /* NOTREACHED */ + } + $2->type = DOWNLOAD_CONST; + initialize_symbol($2); + $2->info.cinfo->value = download_constant_count++; + } +; + +macrodefn_prologue: + T_DEFINE T_SYMBOL + { + if ($2->type != UNINITIALIZED) { + stop("Re-definition of symbol as a macro", + EX_DATAERR); + /* NOTREACHED */ + } + cur_symbol = $2; + cur_symbol->type = MACRO; + initialize_symbol(cur_symbol); + } +; + +macrodefn: + macrodefn_prologue T_MACROBODY + { + add_macro_body($2); + } +| macrodefn_prologue '(' macro_arglist ')' T_MACROBODY + { + add_macro_body($5); + cur_symbol->info.macroinfo->narg = $3; + } +; + +macro_arglist: + { + /* Macros can take no arguments */ + $$ = 0; + } +| T_ARG + { + $$ = 1; + add_macro_arg($1, 0); + } +| macro_arglist ',' T_ARG + { + if ($1 == 0) { + stop("Comma without preceding argument in arg list", + EX_DATAERR); + /* NOTREACHED */ + } + $$ = $1 + 1; + add_macro_arg($3, $1); + } +; + +scratch_ram: + T_SRAM '{' + { + snprintf(errbuf, sizeof(errbuf), "%s%d", SRAM_SYMNAME, + num_srams); + cur_symbol = symtable_get(SRAM_SYMNAME); + cur_symtype = SRAMLOC; + cur_symbol->type = SRAMLOC; + initialize_symbol(cur_symbol); + cur_symbol->count += 1; + } + reg_address + { + sram_or_scb_offset = cur_symbol->info.rinfo->address; + } + size + { + scb_or_sram_symbol = cur_symbol; + } + scb_or_sram_attributes + '}' + { + cur_symbol = NULL; + scb_or_sram_symbol = NULL; + } +; + +scb: + T_SCB '{' + { + cur_symbol = symtable_get(SCB_SYMNAME); + cur_symtype = SCBLOC; + if (cur_symbol->type != UNINITIALIZED) { + stop("Only one SRAM definition allowed", + EX_SOFTWARE); + /* NOTREACHED */ + } + cur_symbol->type = SCBLOC; + initialize_symbol(cur_symbol); + /* 64 bytes of SCB space */ + cur_symbol->info.rinfo->size = 64; + cur_symbol->count += 1; + } + reg_address + { + sram_or_scb_offset = cur_symbol->info.rinfo->address; + } + size + { + scb_or_sram_symbol = cur_symbol; + } + scb_or_sram_attributes + '}' + { + cur_symbol = NULL; + scb_or_sram_symbol = NULL; + } +; + +scb_or_sram_attributes: + /* NULL definition is okay */ +| modes +| scb_or_sram_reg_list +| modes scb_or_sram_reg_list +; + +scb_or_sram_reg_list: + reg_definition +| scb_or_sram_reg_list reg_definition +; + +reg_symbol: + T_SYMBOL + { + process_register(&$1); + $$.symbol = $1; + $$.offset = 0; + } +| T_SYMBOL '[' T_SYMBOL ']' + { + process_register(&$1); + if ($3->type != CONST) { + stop("register offset must be a constant", EX_DATAERR); + /* NOTREACHED */ + } + if (($3->info.cinfo->value + 1) > $1->info.rinfo->size) { + stop("Accessing offset beyond range of register", + EX_DATAERR); + /* NOTREACHED */ + } + $$.symbol = $1; + $$.offset = $3->info.cinfo->value; + } +| T_SYMBOL '[' T_NUMBER ']' + { + process_register(&$1); + if (($3 + 1) > $1->info.rinfo->size) { + stop("Accessing offset beyond range of register", + EX_DATAERR); + /* NOTREACHED */ + } + $$.symbol = $1; + $$.offset = $3; + } +| T_A + { + if (accumulator.symbol == NULL) { + stop("No accumulator has been defined", EX_DATAERR); + /* NOTREACHED */ + } + $$.symbol = accumulator.symbol; + $$.offset = 0; + } +; + +destination: + reg_symbol + { + test_writable_symbol($1.symbol); + $$ = $1; + } +; + +immediate: + expression + { $$ = $1; } +; + +immediate_or_a: + expression + { + if ($1.value == 0 && is_download_const(&$1) == 0) { + snprintf(errbuf, sizeof(errbuf), + "\nExpression evaluates to 0 and thus " + "references the accumulator.\n " + "If this is the desired effect, use 'A' " + "instead.\n"); + stop(errbuf, EX_DATAERR); + } + $$ = $1; + } +| T_A + { + SLIST_INIT(&$$.referenced_syms); + symlist_add(&$$.referenced_syms, accumulator.symbol, + SYMLIST_INSERT_HEAD); + $$.value = 0; + } +; + +source: + reg_symbol + { + test_readable_symbol($1.symbol); + $$ = $1; + } +; + +opt_source: + { + $$.symbol = NULL; + $$.offset = 0; + } +| ',' source + { $$ = $2; } +; + +ret: + { $$ = 0; } +| T_RET + { $$ = 1; } +; + +set_src_mode: + T_SET_SRC_MODE T_NUMBER ';' + { + src_mode = $2; + } +; + +set_dst_mode: + T_SET_DST_MODE T_NUMBER ';' + { + dst_mode = $2; + } +; + +critical_section_start: + T_BEGIN_CS ';' + { + critical_section_t *cs; + + if (in_critical_section != FALSE) { + stop("Critical Section within Critical Section", + EX_DATAERR); + /* NOTREACHED */ + } + cs = cs_alloc(); + cs->begin_addr = instruction_ptr; + in_critical_section = TRUE; + } +; + +critical_section_end: + T_END_CS ';' + { + critical_section_t *cs; + + if (in_critical_section == FALSE) { + stop("Unballanced 'end_cs'", EX_DATAERR); + /* NOTREACHED */ + } + cs = TAILQ_LAST(&cs_tailq, cs_tailq); + cs->end_addr = instruction_ptr; + in_critical_section = FALSE; + } +; + +export: + { $$ = 0; } +| T_EXPORT + { $$ = 1; } +; + +label: + export T_SYMBOL ':' + { + if ($2->type != UNINITIALIZED) { + stop("Program label multiply defined", EX_DATAERR); + /* NOTREACHED */ + } + $2->type = LABEL; + initialize_symbol($2); + $2->info.linfo->address = instruction_ptr; + $2->info.linfo->exported = $1; + } +; + +address: + T_SYMBOL + { + $$.symbol = $1; + $$.offset = 0; + } +| T_SYMBOL '+' T_NUMBER + { + $$.symbol = $1; + $$.offset = $3; + } +| T_SYMBOL '-' T_NUMBER + { + $$.symbol = $1; + $$.offset = -$3; + } +| '.' + { + $$.symbol = NULL; + $$.offset = 0; + } +| '.' '+' T_NUMBER + { + $$.symbol = NULL; + $$.offset = $3; + } +| '.' '-' T_NUMBER + { + $$.symbol = NULL; + $$.offset = -$3; + } +; + +conditional: + T_IF T_CEXPR '{' + { + scope_t *new_scope; + + add_conditional($2); + new_scope = scope_alloc(); + new_scope->type = SCOPE_IF; + new_scope->begin_addr = instruction_ptr; + new_scope->func_num = $2->info.condinfo->func_num; + } +| T_ELSE T_IF T_CEXPR '{' + { + scope_t *new_scope; + scope_t *scope_context; + scope_t *last_scope; + + /* + * Ensure that the previous scope is either an + * if or and else if. + */ + scope_context = SLIST_FIRST(&scope_stack); + last_scope = TAILQ_LAST(&scope_context->inner_scope, + scope_tailq); + if (last_scope == NULL + || last_scope->type == T_ELSE) { + + stop("'else if' without leading 'if'", EX_DATAERR); + /* NOTREACHED */ + } + add_conditional($3); + new_scope = scope_alloc(); + new_scope->type = SCOPE_ELSE_IF; + new_scope->begin_addr = instruction_ptr; + new_scope->func_num = $3->info.condinfo->func_num; + } +| T_ELSE '{' + { + scope_t *new_scope; + scope_t *scope_context; + scope_t *last_scope; + + /* + * Ensure that the previous scope is either an + * if or and else if. + */ + scope_context = SLIST_FIRST(&scope_stack); + last_scope = TAILQ_LAST(&scope_context->inner_scope, + scope_tailq); + if (last_scope == NULL + || last_scope->type == SCOPE_ELSE) { + + stop("'else' without leading 'if'", EX_DATAERR); + /* NOTREACHED */ + } + new_scope = scope_alloc(); + new_scope->type = SCOPE_ELSE; + new_scope->begin_addr = instruction_ptr; + } +; + +conditional: + '}' + { + scope_t *scope_context; + + scope_context = SLIST_FIRST(&scope_stack); + if (scope_context->type == SCOPE_ROOT) { + stop("Unexpected '}' encountered", EX_DATAERR); + /* NOTREACHED */ + } + + scope_context->end_addr = instruction_ptr; + + /* Pop the scope */ + SLIST_REMOVE_HEAD(&scope_stack, scope_stack_links); + + process_scope(scope_context); + + if (SLIST_FIRST(&scope_stack) == NULL) { + stop("Unexpected '}' encountered", EX_DATAERR); + /* NOTREACHED */ + } + } +; + +f1_opcode: + T_AND { $$ = AIC_OP_AND; } +| T_XOR { $$ = AIC_OP_XOR; } +| T_ADD { $$ = AIC_OP_ADD; } +| T_ADC { $$ = AIC_OP_ADC; } +; + +code: + f1_opcode destination ',' immediate_or_a opt_source ret ';' + { + format_1_instr($1, &$2, &$4, &$5, $6); + } +; + +code: + T_OR reg_symbol ',' immediate_or_a opt_source ret ';' + { + format_1_instr(AIC_OP_OR, &$2, &$4, &$5, $6); + } +; + +code: + T_INC destination opt_source ret ';' + { + expression_t immed; + + make_expression(&immed, 1); + format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4); + } +; + +code: + T_DEC destination opt_source ret ';' + { + expression_t immed; + + make_expression(&immed, -1); + format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4); + } +; + +code: + T_CLC ret ';' + { + expression_t immed; + + make_expression(&immed, -1); + format_1_instr(AIC_OP_ADD, &none, &immed, &allzeros, $2); + } +| T_CLC T_MVI destination ',' immediate_or_a ret ';' + { + format_1_instr(AIC_OP_ADD, &$3, &$5, &allzeros, $6); + } +; + +code: + T_STC ret ';' + { + expression_t immed; + + make_expression(&immed, 1); + format_1_instr(AIC_OP_ADD, &none, &immed, &allones, $2); + } +| T_STC destination ret ';' + { + expression_t immed; + + make_expression(&immed, 1); + format_1_instr(AIC_OP_ADD, &$2, &immed, &allones, $3); + } +; + +code: + T_BMOV destination ',' source ',' immediate ret ';' + { + format_1_instr(AIC_OP_BMOV, &$2, &$6, &$4, $7); + } +; + +code: + T_MOV destination ',' source ret ';' + { + expression_t immed; + + make_expression(&immed, 1); + format_1_instr(AIC_OP_BMOV, &$2, &immed, &$4, $5); + } +; + +code: + T_MVI destination ',' immediate ret ';' + { + if ($4.value == 0 + && is_download_const(&$4) == 0) { + expression_t immed; + + /* + * Allow move immediates of 0 so that macros, + * that can't know the immediate's value and + * otherwise compensate, still work. + */ + make_expression(&immed, 1); + format_1_instr(AIC_OP_BMOV, &$2, &immed, &allzeros, $5); + } else { + format_1_instr(AIC_OP_OR, &$2, &$4, &allzeros, $5); + } + } +; + +code: + T_NOT destination opt_source ret ';' + { + expression_t immed; + + make_expression(&immed, 0xff); + format_1_instr(AIC_OP_XOR, &$2, &immed, &$3, $4); + } +; + +code: + T_CLR destination ret ';' + { + expression_t immed; + + make_expression(&immed, 0xff); + format_1_instr(AIC_OP_AND, &$2, &immed, &allzeros, $3); + } +; + +code: + T_NOP ret ';' + { + expression_t immed; + + make_expression(&immed, 0xff); + format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, $2); + } +; + +code: + T_RET ';' + { + expression_t immed; + + make_expression(&immed, 0xff); + format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, TRUE); + } +; + + /* + * This grammar differs from the one in the aic7xxx + * reference manual since the grammar listed there is + * ambiguous and causes a shift/reduce conflict. + * It also seems more logical as the "immediate" + * argument is listed as the second arg like the + * other formats. + */ + +f2_opcode: + T_SHL { $$ = AIC_OP_SHL; } +| T_SHR { $$ = AIC_OP_SHR; } +| T_ROL { $$ = AIC_OP_ROL; } +| T_ROR { $$ = AIC_OP_ROR; } +; + +/* + * 16bit opcodes, not used + * + *f4_opcode: + * T_OR16 { $$ = AIC_OP_OR16; } + *| T_AND16 { $$ = AIC_OP_AND16; } + *| T_XOR16 { $$ = AIC_OP_XOR16; } + *| T_ADD16 { $$ = AIC_OP_ADD16; } + *| T_ADC16 { $$ = AIC_OP_ADC16; } + *| T_MVI16 { $$ = AIC_OP_MVI16; } + *; + */ + +code: + f2_opcode destination ',' expression opt_source ret ';' + { + format_2_instr($1, &$2, &$4, &$5, $6); + } +; + +jmp_jc_jnc_call: + T_JMP { $$ = AIC_OP_JMP; } +| T_JC { $$ = AIC_OP_JC; } +| T_JNC { $$ = AIC_OP_JNC; } +| T_CALL { $$ = AIC_OP_CALL; } +; + +jz_jnz: + T_JZ { $$ = AIC_OP_JZ; } +| T_JNZ { $$ = AIC_OP_JNZ; } +; + +je_jne: + T_JE { $$ = AIC_OP_JE; } +| T_JNE { $$ = AIC_OP_JNE; } +; + +code: + jmp_jc_jnc_call address ';' + { + expression_t immed; + + make_expression(&immed, 0); + format_3_instr($1, &sindex, &immed, &$2); + } +; + +code: + T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';' + { + type_check(&$2, &$4, AIC_OP_OR); + format_3_instr($5, &$2, &$4, &$6); + } +; + +code: + T_TEST source ',' immediate_or_a jz_jnz address ';' + { + format_3_instr($5, &$2, &$4, &$6); + } +; + +code: + T_CMP source ',' immediate_or_a je_jne address ';' + { + format_3_instr($5, &$2, &$4, &$6); + } +; + +code: + T_MOV source jmp_jc_jnc_call address ';' + { + expression_t immed; + + make_expression(&immed, 0); + format_3_instr($3, &$2, &immed, &$4); + } +; + +code: + T_MVI immediate jmp_jc_jnc_call address ';' + { + format_3_instr($3, &allzeros, &$2, &$4); + } +; + +%% + +static void +process_field(int field_type, symbol_t *sym, int value) +{ + /* + * Add the current register to its + * symbol list, if it already exists, + * warn if we are setting it to a + * different value, or in the bit to + * the "allowed bits" of this register. + */ + if (sym->type == UNINITIALIZED) { + sym->type = field_type; + initialize_symbol(sym); + sym->info.finfo->value = value; + if (field_type != ENUM_ENTRY) { + if (field_type != MASK && value == 0) { + stop("Empty Field, or Enum", EX_DATAERR); + /* NOTREACHED */ + } + sym->info.finfo->value = value; + sym->info.finfo->mask = value; + } else if (field_symbol != NULL) { + sym->info.finfo->mask = field_symbol->info.finfo->value; + } else { + sym->info.finfo->mask = 0xFF; + } + } else if (sym->type != field_type) { + stop("Field definition mirrors a definition of the same " + " name, but a different type", EX_DATAERR); + /* NOTREACHED */ + } else if (value != sym->info.finfo->value) { + stop("Field redefined with a conflicting value", EX_DATAERR); + /* NOTREACHED */ + } + /* Fail if this symbol is already listed */ + if (symlist_search(&(sym->info.finfo->symrefs), + cur_symbol->name) != NULL) { + stop("Field defined multiple times for register", EX_DATAERR); + /* NOTREACHED */ + } + symlist_add(&(sym->info.finfo->symrefs), cur_symbol, + SYMLIST_INSERT_HEAD); + cur_symbol->info.rinfo->valid_bitmask |= sym->info.finfo->mask; + cur_symbol->info.rinfo->typecheck_masks = TRUE; + symlist_add(&(cur_symbol->info.rinfo->fields), sym, SYMLIST_SORT); +} + +static void +initialize_symbol(symbol_t *symbol) +{ + switch (symbol->type) { + case UNINITIALIZED: + stop("Call to initialize_symbol with type field unset", + EX_SOFTWARE); + /* NOTREACHED */ + break; + case REGISTER: + case SRAMLOC: + case SCBLOC: + symbol->info.rinfo = + (struct reg_info *)malloc(sizeof(struct reg_info)); + if (symbol->info.rinfo == NULL) { + stop("Can't create register info", EX_SOFTWARE); + /* NOTREACHED */ + } + memset(symbol->info.rinfo, 0, + sizeof(struct reg_info)); + SLIST_INIT(&(symbol->info.rinfo->fields)); + /* + * Default to allowing access in all register modes + * or to the mode specified by the SCB or SRAM space + * we are in. + */ + if (scb_or_sram_symbol != NULL) + symbol->info.rinfo->modes = + scb_or_sram_symbol->info.rinfo->modes; + else + symbol->info.rinfo->modes = ~0; + break; + case ALIAS: + symbol->info.ainfo = + (struct alias_info *)malloc(sizeof(struct alias_info)); + if (symbol->info.ainfo == NULL) { + stop("Can't create alias info", EX_SOFTWARE); + /* NOTREACHED */ + } + memset(symbol->info.ainfo, 0, + sizeof(struct alias_info)); + break; + case MASK: + case FIELD: + case ENUM: + case ENUM_ENTRY: + symbol->info.finfo = + (struct field_info *)malloc(sizeof(struct field_info)); + if (symbol->info.finfo == NULL) { + stop("Can't create field info", EX_SOFTWARE); + /* NOTREACHED */ + } + memset(symbol->info.finfo, 0, sizeof(struct field_info)); + SLIST_INIT(&(symbol->info.finfo->symrefs)); + break; + case CONST: + case DOWNLOAD_CONST: + symbol->info.cinfo = + (struct const_info *)malloc(sizeof(struct const_info)); + if (symbol->info.cinfo == NULL) { + stop("Can't create alias info", EX_SOFTWARE); + /* NOTREACHED */ + } + memset(symbol->info.cinfo, 0, + sizeof(struct const_info)); + break; + case LABEL: + symbol->info.linfo = + (struct label_info *)malloc(sizeof(struct label_info)); + if (symbol->info.linfo == NULL) { + stop("Can't create label info", EX_SOFTWARE); + /* NOTREACHED */ + } + memset(symbol->info.linfo, 0, + sizeof(struct label_info)); + break; + case CONDITIONAL: + symbol->info.condinfo = + (struct cond_info *)malloc(sizeof(struct cond_info)); + if (symbol->info.condinfo == NULL) { + stop("Can't create conditional info", EX_SOFTWARE); + /* NOTREACHED */ + } + memset(symbol->info.condinfo, 0, + sizeof(struct cond_info)); + break; + case MACRO: + symbol->info.macroinfo = + (struct macro_info *)malloc(sizeof(struct macro_info)); + if (symbol->info.macroinfo == NULL) { + stop("Can't create macro info", EX_SOFTWARE); + /* NOTREACHED */ + } + memset(symbol->info.macroinfo, 0, + sizeof(struct macro_info)); + STAILQ_INIT(&symbol->info.macroinfo->args); + break; + default: + stop("Call to initialize_symbol with invalid symbol type", + EX_SOFTWARE); + /* NOTREACHED */ + break; + } +} + +static void +add_macro_arg(const char *argtext, int argnum) +{ + struct macro_arg *marg; + int i; + int retval; + + if (cur_symbol == NULL || cur_symbol->type != MACRO) { + stop("Invalid current symbol for adding macro arg", + EX_SOFTWARE); + /* NOTREACHED */ + } + + marg = (struct macro_arg *)malloc(sizeof(*marg)); + if (marg == NULL) { + stop("Can't create macro_arg structure", EX_SOFTWARE); + /* NOTREACHED */ + } + marg->replacement_text = NULL; + retval = snprintf(regex_pattern, sizeof(regex_pattern), + "[^-/A-Za-z0-9_](%s)([^-/A-Za-z0-9_]|$)", + argtext); + if (retval >= sizeof(regex_pattern)) { + stop("Regex text buffer too small for arg", + EX_SOFTWARE); + /* NOTREACHED */ + } + retval = regcomp(&marg->arg_regex, regex_pattern, REG_EXTENDED); + if (retval != 0) { + stop("Regex compilation failed", EX_SOFTWARE); + /* NOTREACHED */ + } + STAILQ_INSERT_TAIL(&cur_symbol->info.macroinfo->args, marg, links); +} + +static void +add_macro_body(const char *bodytext) +{ + if (cur_symbol == NULL || cur_symbol->type != MACRO) { + stop("Invalid current symbol for adding macro arg", + EX_SOFTWARE); + /* NOTREACHED */ + } + cur_symbol->info.macroinfo->body = strdup(bodytext); + if (cur_symbol->info.macroinfo->body == NULL) { + stop("Can't duplicate macro body text", EX_SOFTWARE); + /* NOTREACHED */ + } +} + +static void +process_register(symbol_t **p_symbol) +{ + symbol_t *symbol = *p_symbol; + + if (symbol->type == UNINITIALIZED) { + snprintf(errbuf, sizeof(errbuf), "Undefined register %s", + symbol->name); + stop(errbuf, EX_DATAERR); + /* NOTREACHED */ + } else if (symbol->type == ALIAS) { + *p_symbol = symbol->info.ainfo->parent; + } else if ((symbol->type != REGISTER) + && (symbol->type != SCBLOC) + && (symbol->type != SRAMLOC)) { + snprintf(errbuf, sizeof(errbuf), + "Specified symbol %s is not a register", + symbol->name); + stop(errbuf, EX_DATAERR); + } +} + +static void +format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed, + symbol_ref_t *src, int ret) +{ + struct instruction *instr; + struct ins_format1 *f1_instr; + + if (src->symbol == NULL) + src = dest; + + /* Test register permissions */ + test_writable_symbol(dest->symbol); + test_readable_symbol(src->symbol); + + if (!is_location_address(dest->symbol)) { + /* Ensure that immediate makes sense for this destination */ + type_check(dest, immed, opcode); + } + + /* Allocate sequencer space for the instruction and fill it out */ + instr = seq_alloc(); + f1_instr = &instr->format.format1; + f1_instr->ret = ret ? 1 : 0; + f1_instr->opcode = opcode; + f1_instr->destination = dest->symbol->info.rinfo->address + + dest->offset; + f1_instr->source = src->symbol->info.rinfo->address + + src->offset; + f1_instr->immediate = immed->value; + + if (is_download_const(immed)) + f1_instr->parity = 1; + else if (dest->symbol == mode_ptr.symbol) { + u_int src_value; + u_int dst_value; + + /* + * Attempt to update mode information if + * we are operating on the mode register. + */ + if (src->symbol == allones.symbol) + src_value = 0xFF; + else if (src->symbol == allzeros.symbol) + src_value = 0; + else if (src->symbol == mode_ptr.symbol) + src_value = (dst_mode << 4) | src_mode; + else + goto cant_update; + + switch (opcode) { + case AIC_OP_AND: + dst_value = src_value & immed->value; + break; + case AIC_OP_XOR: + dst_value = src_value ^ immed->value; + break; + case AIC_OP_ADD: + dst_value = (src_value + immed->value) & 0xFF; + break; + case AIC_OP_OR: + dst_value = src_value | immed->value; + break; + case AIC_OP_BMOV: + dst_value = src_value; + break; + default: + goto cant_update; + } + src_mode = dst_value & 0xF; + dst_mode = (dst_value >> 4) & 0xF; + } + +cant_update: + symlist_free(&immed->referenced_syms); + instruction_ptr++; +} + +static void +format_2_instr(int opcode, symbol_ref_t *dest, expression_t *places, + symbol_ref_t *src, int ret) +{ + struct instruction *instr; + struct ins_format2 *f2_instr; + uint8_t shift_control; + + if (src->symbol == NULL) + src = dest; + + /* Test register permissions */ + test_writable_symbol(dest->symbol); + test_readable_symbol(src->symbol); + + /* Allocate sequencer space for the instruction and fill it out */ + instr = seq_alloc(); + f2_instr = &instr->format.format2; + f2_instr->ret = ret ? 1 : 0; + f2_instr->opcode = AIC_OP_ROL; + f2_instr->destination = dest->symbol->info.rinfo->address + + dest->offset; + f2_instr->source = src->symbol->info.rinfo->address + + src->offset; + if (places->value > 8 || places->value <= 0) { + stop("illegal shift value", EX_DATAERR); + /* NOTREACHED */ + } + switch (opcode) { + case AIC_OP_SHL: + if (places->value == 8) + shift_control = 0xf0; + else + shift_control = (places->value << 4) | places->value; + break; + case AIC_OP_SHR: + if (places->value == 8) { + shift_control = 0xf8; + } else { + shift_control = (places->value << 4) + | (8 - places->value) + | 0x08; + } + break; + case AIC_OP_ROL: + shift_control = places->value & 0x7; + break; + case AIC_OP_ROR: + shift_control = (8 - places->value) | 0x08; + break; + default: + shift_control = 0; /* Quiet Compiler */ + stop("Invalid shift operation specified", EX_SOFTWARE); + /* NOTREACHED */ + break; + }; + f2_instr->shift_control = shift_control; + symlist_free(&places->referenced_syms); + instruction_ptr++; +} + +static void +format_3_instr(int opcode, symbol_ref_t *src, + expression_t *immed, symbol_ref_t *address) +{ + struct instruction *instr; + struct ins_format3 *f3_instr; + int addr; + + /* Test register permissions */ + test_readable_symbol(src->symbol); + + /* Allocate sequencer space for the instruction and fill it out */ + instr = seq_alloc(); + f3_instr = &instr->format.format3; + if (address->symbol == NULL) { + /* 'dot' reference. Use the current instruction pointer */ + addr = instruction_ptr + address->offset; + } else if (address->symbol->type == UNINITIALIZED) { + /* forward reference */ + addr = address->offset; + instr->patch_label = address->symbol; + } else + addr = address->symbol->info.linfo->address + address->offset; + f3_instr->opcode = opcode; + f3_instr->address = addr; + f3_instr->source = src->symbol->info.rinfo->address + + src->offset; + f3_instr->immediate = immed->value; + + if (is_download_const(immed)) + f3_instr->parity = 1; + + symlist_free(&immed->referenced_syms); + instruction_ptr++; +} + +static void +test_readable_symbol(symbol_t *symbol) +{ + if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) { + snprintf(errbuf, sizeof(errbuf), + "Register %s unavailable in source reg mode %d", + symbol->name, src_mode); + stop(errbuf, EX_DATAERR); + } + + if (symbol->info.rinfo->mode == WO) { + stop("Write Only register specified as source", + EX_DATAERR); + /* NOTREACHED */ + } +} + +static void +test_writable_symbol(symbol_t *symbol) +{ + if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) { + snprintf(errbuf, sizeof(errbuf), + "Register %s unavailable in destination reg mode %d", + symbol->name, dst_mode); + stop(errbuf, EX_DATAERR); + } + + if (symbol->info.rinfo->mode == RO) { + stop("Read Only register specified as destination", + EX_DATAERR); + /* NOTREACHED */ + } +} + +static void +type_check(symbol_ref_t *sym, expression_t *expression, int opcode) +{ + symbol_t *symbol = sym->symbol; + symbol_node_t *node; + int and_op; + int8_t value, mask; + + and_op = FALSE; + /* + * Make sure that we aren't attempting to write something + * that hasn't been defined. If this is an and operation, + * this is a mask, so "undefined" bits are okay. + */ + if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || + opcode == AIC_OP_JZ || opcode == AIC_OP_JNE || + opcode == AIC_OP_BMOV) + and_op = TRUE; + + /* + * Defaulting to 8 bit logic + */ + mask = (int8_t)~symbol->info.rinfo->valid_bitmask; + value = (int8_t)expression->value; + + if (and_op == FALSE && (mask & value) != 0 ) { + snprintf(errbuf, sizeof(errbuf), + "Invalid bit(s) 0x%x in immediate written to %s", + (mask & value), + symbol->name); + stop(errbuf, EX_DATAERR); + /* NOTREACHED */ + } + + /* + * Now make sure that all of the symbols referenced by the + * expression are defined for this register. + */ + if (symbol->info.rinfo->typecheck_masks != FALSE) { + for(node = expression->referenced_syms.slh_first; + node != NULL; + node = node->links.sle_next) { + if ((node->symbol->type == MASK + || node->symbol->type == FIELD + || node->symbol->type == ENUM + || node->symbol->type == ENUM_ENTRY) + && symlist_search(&node->symbol->info.finfo->symrefs, + symbol->name) == NULL) { + snprintf(errbuf, sizeof(errbuf), + "Invalid field or mask %s " + "for register %s", + node->symbol->name, symbol->name); + stop(errbuf, EX_DATAERR); + /* NOTREACHED */ + } + } + } +} + +static void +make_expression(expression_t *immed, int value) +{ + SLIST_INIT(&immed->referenced_syms); + immed->value = value & 0xff; +} + +static void +add_conditional(symbol_t *symbol) +{ + static int numfuncs; + + if (numfuncs == 0) { + /* add a special conditional, "0" */ + symbol_t *false_func; + + false_func = symtable_get("0"); + if (false_func->type != UNINITIALIZED) { + stop("Conditional expression '0' " + "conflicts with a symbol", EX_DATAERR); + /* NOTREACHED */ + } + false_func->type = CONDITIONAL; + initialize_symbol(false_func); + false_func->info.condinfo->func_num = numfuncs++; + symlist_add(&patch_functions, false_func, SYMLIST_INSERT_HEAD); + } + + /* This condition has occurred before */ + if (symbol->type == CONDITIONAL) + return; + + if (symbol->type != UNINITIALIZED) { + stop("Conditional expression conflicts with a symbol", + EX_DATAERR); + /* NOTREACHED */ + } + + symbol->type = CONDITIONAL; + initialize_symbol(symbol); + symbol->info.condinfo->func_num = numfuncs++; + symlist_add(&patch_functions, symbol, SYMLIST_INSERT_HEAD); +} + +static void +add_version(const char *verstring) +{ + const char prefix[] = " * "; + int newlen; + int oldlen; + + newlen = strlen(verstring) + strlen(prefix); + oldlen = 0; + if (versions != NULL) + oldlen = strlen(versions); + versions = realloc(versions, newlen + oldlen + 2); + if (versions == NULL) + stop("Can't allocate version string", EX_SOFTWARE); + strcpy(&versions[oldlen], prefix); + strcpy(&versions[oldlen + strlen(prefix)], verstring); + versions[newlen + oldlen] = '\n'; + versions[newlen + oldlen + 1] = '\0'; +} + +void +yyerror(const char *string) +{ + stop(string, EX_DATAERR); +} + +static int +is_download_const(expression_t *immed) +{ + if ((immed->referenced_syms.slh_first != NULL) + && (immed->referenced_syms.slh_first->symbol->type == DOWNLOAD_CONST)) + return (TRUE); + + return (FALSE); +} + +static int +is_location_address(symbol_t *sym) +{ + if (sym->type == SCBLOC || + sym->type == SRAMLOC) + return (TRUE); + return (FALSE); +} + diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h new file mode 100644 index 000000000..8373447bd --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h @@ -0,0 +1,218 @@ +/* + * Instruction formats for the sequencer program downloaded to + * Aic7xxx SCSI host adapters + * + * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_insformat.h#12 $ + * + * $FreeBSD$ + */ + +#include + +/* 8bit ALU logic operations */ +struct ins_format1 { +#ifdef __LITTLE_ENDIAN + uint32_t immediate : 8, + source : 9, + destination : 9, + ret : 1, + opcode : 4, + parity : 1; +#else + uint32_t parity : 1, + opcode : 4, + ret : 1, + destination : 9, + source : 9, + immediate : 8; +#endif +}; + +/* 8bit ALU shift/rotate operations */ +struct ins_format2 { +#ifdef __LITTLE_ENDIAN + uint32_t shift_control : 8, + source : 9, + destination : 9, + ret : 1, + opcode : 4, + parity : 1; +#else + uint32_t parity : 1, + opcode : 4, + ret : 1, + destination : 9, + source : 9, + shift_control : 8; +#endif +}; + +/* 8bit branch control operations */ +struct ins_format3 { +#ifdef __LITTLE_ENDIAN + uint32_t immediate : 8, + source : 9, + address : 10, + opcode : 4, + parity : 1; +#else + uint32_t parity : 1, + opcode : 4, + address : 10, + source : 9, + immediate : 8; +#endif +}; + +/* 16bit ALU logic operations */ +struct ins_format4 { +#ifdef __LITTLE_ENDIAN + uint32_t opcode_ext : 8, + source : 9, + destination : 9, + ret : 1, + opcode : 4, + parity : 1; +#else + uint32_t parity : 1, + opcode : 4, + ret : 1, + destination : 9, + source : 9, + opcode_ext : 8; +#endif +}; + +/* 16bit branch control operations */ +struct ins_format5 { +#ifdef __LITTLE_ENDIAN + uint32_t opcode_ext : 8, + source : 9, + address : 10, + opcode : 4, + parity : 1; +#else + uint32_t parity : 1, + opcode : 4, + address : 10, + source : 9, + opcode_ext : 8; +#endif +}; + +/* Far branch operations */ +struct ins_format6 { +#ifdef __LITTLE_ENDIAN + uint32_t page : 3, + opcode_ext : 5, + source : 9, + address : 10, + opcode : 4, + parity : 1; +#else + uint32_t parity : 1, + opcode : 4, + address : 10, + source : 9, + opcode_ext : 5, + page : 3; +#endif +}; + +union ins_formats { + struct ins_format1 format1; + struct ins_format2 format2; + struct ins_format3 format3; + struct ins_format4 format4; + struct ins_format5 format5; + struct ins_format6 format6; + uint8_t bytes[4]; + uint32_t integer; +}; +struct instruction { + union ins_formats format; + u_int srcline; + struct symbol *patch_label; + STAILQ_ENTRY(instruction) links; +}; + +#define AIC_OP_OR 0x0 +#define AIC_OP_AND 0x1 +#define AIC_OP_XOR 0x2 +#define AIC_OP_ADD 0x3 +#define AIC_OP_ADC 0x4 +#define AIC_OP_ROL 0x5 +#define AIC_OP_BMOV 0x6 + +#define AIC_OP_MVI16 0x7 + +#define AIC_OP_JMP 0x8 +#define AIC_OP_JC 0x9 +#define AIC_OP_JNC 0xa +#define AIC_OP_CALL 0xb +#define AIC_OP_JNE 0xc +#define AIC_OP_JNZ 0xd +#define AIC_OP_JE 0xe +#define AIC_OP_JZ 0xf + +/* Pseudo Ops */ +#define AIC_OP_SHL 0x10 +#define AIC_OP_SHR 0x20 +#define AIC_OP_ROR 0x30 + +/* 16bit Ops. Low byte main opcode. High byte extended opcode. */ +#define AIC_OP_OR16 0x8005 +#define AIC_OP_AND16 0x8105 +#define AIC_OP_XOR16 0x8205 +#define AIC_OP_ADD16 0x8305 +#define AIC_OP_ADC16 0x8405 +#define AIC_OP_JNE16 0x8805 +#define AIC_OP_JNZ16 0x8905 +#define AIC_OP_JE16 0x8C05 +#define AIC_OP_JZ16 0x8B05 +#define AIC_OP_JMP16 0x9005 +#define AIC_OP_JC16 0x9105 +#define AIC_OP_JNC16 0x9205 +#define AIC_OP_CALL16 0x9305 + +/* Page extension is low three bits of second opcode byte. */ +#define AIC_OP_JMPF 0xA005 +#define AIC_OP_CALLF 0xB005 +#define AIC_OP_JCF 0xC005 +#define AIC_OP_JNCF 0xD005 +#define AIC_OP_CMPXCHG 0xE005 diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y new file mode 100644 index 000000000..8c0479865 --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y @@ -0,0 +1,161 @@ +%{ +/* + * Sub-parser for macro invocation in the Aic7xxx SCSI + * Host adapter sequencer assembler. + * + * Copyright (c) 2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_macro_gram.y#5 $ + * + * $FreeBSD$ + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "../queue.h" + +#include "aicasm.h" +#include "aicasm_symbol.h" +#include "aicasm_insformat.h" + +static symbol_t *macro_symbol; + +static void add_macro_arg(const char *argtext, int position); +void mmerror(const char *string); + +%} + +%union { + int value; + char *str; + symbol_t *sym; +} + + +%token T_ARG + +%token T_SYMBOL + +%type macro_arglist + +%% + +macrocall: + T_SYMBOL '(' + { + macro_symbol = $1; + } + macro_arglist ')' + { + if (macro_symbol->info.macroinfo->narg != $4) { + printf("Narg == %d", macro_symbol->info.macroinfo->narg); + stop("Too few arguments for macro invocation", + EX_DATAERR); + /* NOTREACHED */ + } + macro_symbol = NULL; + YYACCEPT; + } +; + +macro_arglist: + { + /* Macros can take 0 arguments */ + $$ = 0; + } +| T_ARG + { + $$ = 1; + add_macro_arg($1, 1); + } +| macro_arglist ',' T_ARG + { + if ($1 == 0) { + stop("Comma without preceding argument in arg list", + EX_DATAERR); + /* NOTREACHED */ + } + $$ = $1 + 1; + add_macro_arg($3, $$); + } +; + +%% + +static void +add_macro_arg(const char *argtext, int argnum) +{ + struct macro_arg *marg; + int i; + + if (macro_symbol == NULL || macro_symbol->type != MACRO) { + stop("Invalid current symbol for adding macro arg", + EX_SOFTWARE); + /* NOTREACHED */ + } + /* + * Macro Invocation. Find the appropriate argument and fill + * in the replace ment text for this call. + */ + i = 0; + STAILQ_FOREACH(marg, ¯o_symbol->info.macroinfo->args, links) { + i++; + if (i == argnum) + break; + } + if (marg == NULL) { + stop("Too many arguments for macro invocation", EX_DATAERR); + /* NOTREACHED */ + } + marg->replacement_text = strdup(argtext); + if (marg->replacement_text == NULL) { + stop("Unable to replicate replacement text", EX_SOFTWARE); + /* NOTREACHED */ + } +} + +void +mmerror(const char *string) +{ + stop(string, EX_DATAERR); +} diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l new file mode 100644 index 000000000..98e9959c6 --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l @@ -0,0 +1,153 @@ +%{ +/* + * Sub-Lexical Analyzer for macro invokation in + * the Aic7xxx SCSI Host adapter sequencer assembler. + * + * Copyright (c) 2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_macro_scan.l#8 $ + * + * $FreeBSD$ + */ + +#include + +#include +#include +#include +#include +#include +#include +#include "../queue.h" + +#include "aicasm.h" +#include "aicasm_symbol.h" +#include "aicasm_macro_gram.h" + +#define MAX_STR_CONST 4096 +static char string_buf[MAX_STR_CONST]; +static char *string_buf_ptr; +static int parren_count; +static char buf[255]; +int mmlineno; +%} + +WORD [A-Za-z_][-A-Za-z_0-9]* +SPACE [ \t]+ +MCARG [^(), \t]+ + +%x ARGLIST + +%% +\n { + ++mmlineno; + } +\r ; +{SPACE} ; +\( { + parren_count++; + if (parren_count == 1) { + string_buf_ptr = string_buf; + return ('('); + } + *string_buf_ptr++ = '('; + } +\) { + if (parren_count == 1) { + if (string_buf_ptr != string_buf) { + /* + * Return an argument and + * rescan this parren so we + * can return it as well. + */ + *string_buf_ptr = '\0'; + mmlval.str = string_buf; + string_buf_ptr = string_buf; + unput(')'); + return T_ARG; + } + BEGIN INITIAL; + return (')'); + } + parren_count--; + *string_buf_ptr++ = ')'; + } +{MCARG} { + char *yptr; + + yptr = mmtext; + while (*yptr) + *string_buf_ptr++ = *yptr++; + } +\, { + if (string_buf_ptr != string_buf) { + /* + * Return an argument and + * rescan this comma so we + * can return it as well. + */ + *string_buf_ptr = '\0'; + mmlval.str = string_buf; + string_buf_ptr = string_buf; + unput(','); + return T_ARG; + } + return ','; + } +{WORD}[(] { + /* May be a symbol or a macro invocation. */ + mmlval.sym = symtable_get(mmtext); + if (mmlval.sym->type != MACRO) { + stop("Expecting Macro Name", + EX_DATAERR); + } + unput('('); + parren_count = 0; + BEGIN ARGLIST; + return T_SYMBOL; + } +. { + snprintf(buf, sizeof(buf), "Invalid character " + "'%c'", mmtext[0]); + stop(buf, EX_DATAERR); + } +%% + +int +mmwrap() +{ + stop("EOF encountered in macro call", EX_DATAERR); +} diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l new file mode 100644 index 000000000..c78d4f68e --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l @@ -0,0 +1,618 @@ +%{ +/* + * Lexical Analyzer for the Aic7xxx SCSI Host adapter sequencer assembler. + * + * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. + * Copyright (c) 2001, 2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_scan.l#20 $ + * + * $FreeBSD$ + */ + +#include + +#include +#include +#include +#include +#include +#include +#include "../queue.h" + +#include "aicasm.h" +#include "aicasm_symbol.h" +#include "aicasm_gram.h" + +/* This is used for macro body capture too, so err on the large size. */ +#define MAX_STR_CONST 4096 +static char string_buf[MAX_STR_CONST]; +static char *string_buf_ptr; +static int parren_count; +static int quote_count; +static char buf[255]; +%} + +PATH ([/]*[-A-Za-z0-9_.])+ +WORD [A-Za-z_][-A-Za-z_0-9]* +SPACE [ \t]+ +MCARG [^(), \t]+ +MBODY ((\\[^\n])*[^\n\\]*)+ + +%x COMMENT +%x CEXPR +%x INCLUDE +%x STRING +%x MACRODEF +%x MACROARGLIST +%x MACROCALLARGS +%x MACROBODY + +%% +\n { ++yylineno; } +\r ; +"/*" { BEGIN COMMENT; /* Enter comment eating state */ } +"/*" { fprintf(stderr, "Warning! Comment within comment."); } +\n { ++yylineno; } +[^*/\n]* ; +"*"+[^*/\n]* ; +"/"+[^*/\n]* ; +"*"+"/" { BEGIN INITIAL; } +if[ \t]*\( { + string_buf_ptr = string_buf; + parren_count = 1; + BEGIN CEXPR; + return T_IF; + } +\( { *string_buf_ptr++ = '('; parren_count++; } +\) { + parren_count--; + if (parren_count == 0) { + /* All done */ + BEGIN INITIAL; + *string_buf_ptr = '\0'; + yylval.sym = symtable_get(string_buf); + return T_CEXPR; + } else { + *string_buf_ptr++ = ')'; + } + } +\n { ++yylineno; } +\r ; +[^()\n]+ { + char *yptr; + + yptr = yytext; + while (*yptr != '\0') { + /* Remove duplicate spaces */ + if (*yptr == '\t') + *yptr = ' '; + if (*yptr == ' ' + && string_buf_ptr != string_buf + && string_buf_ptr[-1] == ' ') + yptr++; + else + *string_buf_ptr++ = *yptr++; + } + } +else { return T_ELSE; } +VERSION { return T_VERSION; } +PREFIX { return T_PREFIX; } +PATCH_ARG_LIST { return T_PATCH_ARG_LIST; } +\" { + string_buf_ptr = string_buf; + BEGIN STRING; + } +[^"]+ { + char *yptr; + + yptr = yytext; + while (*yptr) + *string_buf_ptr++ = *yptr++; + } +\" { + /* All done */ + BEGIN INITIAL; + *string_buf_ptr = '\0'; + yylval.str = string_buf; + return T_STRING; + } +{SPACE} ; + + /* Register/SCB/SRAM definition keywords */ +export { return T_EXPORT; } +register { return T_REGISTER; } +const { yylval.value = FALSE; return T_CONST; } +download { return T_DOWNLOAD; } +address { return T_ADDRESS; } +count { return T_COUNT; } +access_mode { return T_ACCESS_MODE; } +dont_generate_debug_code { return T_DONT_GENERATE_DEBUG_CODE; } +modes { return T_MODES; } +RW|RO|WO { + if (strcmp(yytext, "RW") == 0) + yylval.value = RW; + else if (strcmp(yytext, "RO") == 0) + yylval.value = RO; + else + yylval.value = WO; + return T_MODE; + } +field { return T_FIELD; } +enum { return T_ENUM; } +mask { return T_MASK; } +alias { return T_ALIAS; } +size { return T_SIZE; } +scb { return T_SCB; } +scratch_ram { return T_SRAM; } +accumulator { return T_ACCUM; } +mode_pointer { return T_MODE_PTR; } +allones { return T_ALLONES; } +allzeros { return T_ALLZEROS; } +none { return T_NONE; } +sindex { return T_SINDEX; } +A { return T_A; } + + /* Instruction Formatting */ +PAD_PAGE { return T_PAD_PAGE; } +BEGIN_CRITICAL { return T_BEGIN_CS; } +END_CRITICAL { return T_END_CS; } +SET_SRC_MODE { return T_SET_SRC_MODE; } +SET_DST_MODE { return T_SET_DST_MODE; } + + /* Opcodes */ +shl { return T_SHL; } +shr { return T_SHR; } +ror { return T_ROR; } +rol { return T_ROL; } +mvi { return T_MVI; } +mov { return T_MOV; } +clr { return T_CLR; } +jmp { return T_JMP; } +jc { return T_JC; } +jnc { return T_JNC; } +je { return T_JE; } +jne { return T_JNE; } +jz { return T_JZ; } +jnz { return T_JNZ; } +call { return T_CALL; } +add { return T_ADD; } +adc { return T_ADC; } +bmov { return T_BMOV; } +inc { return T_INC; } +dec { return T_DEC; } +stc { return T_STC; } +clc { return T_CLC; } +cmp { return T_CMP; } +not { return T_NOT; } +xor { return T_XOR; } +test { return T_TEST;} +and { return T_AND; } +or { return T_OR; } +ret { return T_RET; } +nop { return T_NOP; } + + /* ARP2 16bit extensions */ + /* or16 { return T_OR16; } */ + /* and16 { return T_AND16; }*/ + /* xor16 { return T_XOR16; }*/ + /* add16 { return T_ADD16; }*/ + /* adc16 { return T_ADC16; }*/ + /* mvi16 { return T_MVI16; }*/ + /* test16 { return T_TEST16; }*/ + /* cmp16 { return T_CMP16; }*/ + /* cmpxchg { return T_CMPXCHG; }*/ + + /* Allowed Symbols */ +\<\< { return T_EXPR_LSHIFT; } +\>\> { return T_EXPR_RSHIFT; } +[-+,:()~|&."{};<>[\]/*!=] { return yytext[0]; } + + /* Number processing */ +0[0-7]* { + yylval.value = strtol(yytext, NULL, 8); + return T_NUMBER; + } + +0[xX][0-9a-fA-F]+ { + yylval.value = strtoul(yytext + 2, NULL, 16); + return T_NUMBER; + } + +[1-9][0-9]* { + yylval.value = strtol(yytext, NULL, 10); + return T_NUMBER; + } + /* Include Files */ +#include{SPACE} { + BEGIN INCLUDE; + quote_count = 0; + return T_INCLUDE; + } +[<] { return yytext[0]; } +[>] { BEGIN INITIAL; return yytext[0]; } +[\"] { + if (quote_count != 0) + BEGIN INITIAL; + quote_count++; + return yytext[0]; + } +{PATH} { + char *yptr; + + yptr = yytext; + string_buf_ptr = string_buf; + while (*yptr) + *string_buf_ptr++ = *yptr++; + yylval.str = string_buf; + *string_buf_ptr = '\0'; + return T_PATH; + } +. { stop("Invalid include line", EX_DATAERR); } +#define{SPACE} { + BEGIN MACRODEF; + return T_DEFINE; + } +{WORD}{SPACE} { + char *yptr; + + /* Strip space and return as a normal symbol */ + yptr = yytext; + while (*yptr != ' ' && *yptr != '\t') + yptr++; + *yptr = '\0'; + yylval.sym = symtable_get(yytext); + string_buf_ptr = string_buf; + BEGIN MACROBODY; + return T_SYMBOL; + } +{WORD}\( { + /* + * We store the symbol with its opening + * parren so we can differentiate macros + * that take args from macros with the + * same name that do not take args as + * is allowed in C. + */ + BEGIN MACROARGLIST; + yylval.sym = symtable_get(yytext); + unput('('); + return T_SYMBOL; + } +{WORD} { + yylval.str = yytext; + return T_ARG; + } +{SPACE} ; +[(,] { + return yytext[0]; + } +[)] { + string_buf_ptr = string_buf; + BEGIN MACROBODY; + return ')'; + } +. { + snprintf(buf, sizeof(buf), "Invalid character " + "'%c' in macro argument list", + yytext[0]); + stop(buf, EX_DATAERR); + } +{SPACE} ; +\( { + parren_count++; + if (parren_count == 1) + return ('('); + *string_buf_ptr++ = '('; + } +\) { + parren_count--; + if (parren_count == 0) { + BEGIN INITIAL; + return (')'); + } + *string_buf_ptr++ = ')'; + } +{MCARG} { + char *yptr; + + yptr = yytext; + while (*yptr) + *string_buf_ptr++ = *yptr++; + } +\, { + if (string_buf_ptr != string_buf) { + /* + * Return an argument and + * rescan this comma so we + * can return it as well. + */ + *string_buf_ptr = '\0'; + yylval.str = string_buf; + string_buf_ptr = string_buf; + unput(','); + return T_ARG; + } + return ','; + } +\\\n { + /* Eat escaped newlines. */ + ++yylineno; + } +\r ; +\n { + /* Macros end on the first unescaped newline. */ + BEGIN INITIAL; + *string_buf_ptr = '\0'; + yylval.str = string_buf; + ++yylineno; + return T_MACROBODY; + } +{MBODY} { + char *yptr; + char c; + + yptr = yytext; + while (c = *yptr++) { + /* + * Strip carriage returns. + */ + if (c == '\r') + continue; + *string_buf_ptr++ = c; + } + } +{WORD}\( { + char *yptr; + char *ycopy; + + /* May be a symbol or a macro invocation. */ + yylval.sym = symtable_get(yytext); + if (yylval.sym->type == MACRO) { + YY_BUFFER_STATE old_state; + YY_BUFFER_STATE temp_state; + + ycopy = strdup(yytext); + yptr = ycopy + yyleng; + while (yptr > ycopy) + unput(*--yptr); + old_state = YY_CURRENT_BUFFER; + temp_state = + yy_create_buffer(stdin, + YY_BUF_SIZE); + yy_switch_to_buffer(temp_state); + mm_switch_to_buffer(old_state); + mmparse(); + mm_switch_to_buffer(temp_state); + yy_switch_to_buffer(old_state); + mm_delete_buffer(temp_state); + expand_macro(yylval.sym); + } else { + if (yylval.sym->type == UNINITIALIZED) { + /* Try without the '(' */ + symbol_delete(yylval.sym); + yytext[yyleng-1] = '\0'; + yylval.sym = + symtable_get(yytext); + } + unput('('); + return T_SYMBOL; + } + } +{WORD} { + yylval.sym = symtable_get(yytext); + if (yylval.sym->type == MACRO) { + expand_macro(yylval.sym); + } else { + return T_SYMBOL; + } + } +. { + snprintf(buf, sizeof(buf), "Invalid character " + "'%c'", yytext[0]); + stop(buf, EX_DATAERR); + } +%% + +typedef struct include { + YY_BUFFER_STATE buffer; + int lineno; + char *filename; + SLIST_ENTRY(include) links; +}include_t; + +SLIST_HEAD(, include) include_stack; + +void +include_file(char *file_name, include_type type) +{ + FILE *newfile; + include_t *include; + + newfile = NULL; + /* Try the current directory first */ + if (includes_search_curdir != 0 || type == SOURCE_FILE) + newfile = fopen(file_name, "r"); + + if (newfile == NULL && type != SOURCE_FILE) { + path_entry_t include_dir; + for (include_dir = search_path.slh_first; + include_dir != NULL; + include_dir = include_dir->links.sle_next) { + char fullname[PATH_MAX]; + + if ((include_dir->quoted_includes_only == TRUE) + && (type != QUOTED_INCLUDE)) + continue; + + snprintf(fullname, sizeof(fullname), + "%s/%s", include_dir->directory, file_name); + + if ((newfile = fopen(fullname, "r")) != NULL) + break; + } + } + + if (newfile == NULL) { + perror(file_name); + stop("Unable to open input file", EX_SOFTWARE); + /* NOTREACHED */ + } + + if (type != SOURCE_FILE) { + include = (include_t *)malloc(sizeof(include_t)); + if (include == NULL) { + stop("Unable to allocate include stack entry", + EX_SOFTWARE); + /* NOTREACHED */ + } + include->buffer = YY_CURRENT_BUFFER; + include->lineno = yylineno; + include->filename = yyfilename; + SLIST_INSERT_HEAD(&include_stack, include, links); + } + yy_switch_to_buffer(yy_create_buffer(newfile, YY_BUF_SIZE)); + yylineno = 1; + yyfilename = strdup(file_name); +} + +static void next_substitution(struct symbol *mac_symbol, const char *body_pos, + const char **next_match, + struct macro_arg **match_marg, regmatch_t *match); + +void +expand_macro(struct symbol *macro_symbol) +{ + struct macro_arg *marg; + struct macro_arg *match_marg; + const char *body_head; + const char *body_pos; + const char *next_match; + + /* + * Due to the nature of unput, we must work + * backwards through the macro body performing + * any expansions. + */ + body_head = macro_symbol->info.macroinfo->body; + body_pos = body_head + strlen(body_head); + while (body_pos > body_head) { + regmatch_t match; + + next_match = body_head; + match_marg = NULL; + next_substitution(macro_symbol, body_pos, &next_match, + &match_marg, &match); + + /* Put back everything up until the replacement. */ + while (body_pos > next_match) + unput(*--body_pos); + + /* Perform the replacement. */ + if (match_marg != NULL) { + const char *strp; + + next_match = match_marg->replacement_text; + strp = next_match + strlen(next_match); + while (strp > next_match) + unput(*--strp); + + /* Skip past the unexpanded macro arg. */ + body_pos -= match.rm_eo - match.rm_so; + } + } + + /* Cleanup replacement text. */ + STAILQ_FOREACH(marg, ¯o_symbol->info.macroinfo->args, links) { + free(marg->replacement_text); + } +} + +/* + * Find the next substitution in the macro working backwards from + * body_pos until the beginning of the macro buffer. next_match + * should be initialized to the beginning of the macro buffer prior + * to calling this routine. + */ +static void +next_substitution(struct symbol *mac_symbol, const char *body_pos, + const char **next_match, struct macro_arg **match_marg, + regmatch_t *match) +{ + regmatch_t matches[2]; + struct macro_arg *marg; + const char *search_pos; + int retval; + + do { + search_pos = *next_match; + + STAILQ_FOREACH(marg, &mac_symbol->info.macroinfo->args, links) { + + retval = regexec(&marg->arg_regex, search_pos, 2, + matches, 0); + if (retval == 0 + && (matches[1].rm_eo + search_pos) <= body_pos + && (matches[1].rm_eo + search_pos) > *next_match) { + *match = matches[1]; + *next_match = match->rm_eo + search_pos; + *match_marg = marg; + } + } + } while (search_pos != *next_match); +} + +int +yywrap() +{ + include_t *include; + + yy_delete_buffer(YY_CURRENT_BUFFER); + (void)fclose(yyin); + if (yyfilename != NULL) + free(yyfilename); + yyfilename = NULL; + include = include_stack.slh_first; + if (include != NULL) { + yy_switch_to_buffer(include->buffer); + yylineno = include->lineno; + yyfilename = include->filename; + SLIST_REMOVE_HEAD(&include_stack, links); + free(include); + return (0); + } + return (1); +} diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c new file mode 100644 index 000000000..2b44eb570 --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c @@ -0,0 +1,690 @@ +/* + * Aic7xxx SCSI host adapter firmware assembler symbol table implementation + * + * Copyright (c) 1997 Justin T. Gibbs. + * Copyright (c) 2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.c#24 $ + * + * $FreeBSD$ + */ + +#include + +#include "aicdb.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aicasm_symbol.h" +#include "aicasm.h" + +static DB *symtable; + +symbol_t * +symbol_create(char *name) +{ + symbol_t *new_symbol; + + new_symbol = (symbol_t *)malloc(sizeof(symbol_t)); + if (new_symbol == NULL) { + perror("Unable to create new symbol"); + exit(EX_SOFTWARE); + } + memset(new_symbol, 0, sizeof(*new_symbol)); + new_symbol->name = strdup(name); + if (new_symbol->name == NULL) + stop("Unable to strdup symbol name", EX_SOFTWARE); + new_symbol->type = UNINITIALIZED; + new_symbol->count = 1; + return (new_symbol); +} + +void +symbol_delete(symbol_t *symbol) +{ + if (symtable != NULL) { + DBT key; + + key.data = symbol->name; + key.size = strlen(symbol->name); + symtable->del(symtable, &key, /*flags*/0); + } + switch(symbol->type) { + case SCBLOC: + case SRAMLOC: + case REGISTER: + if (symbol->info.rinfo != NULL) + free(symbol->info.rinfo); + break; + case ALIAS: + if (symbol->info.ainfo != NULL) + free(symbol->info.ainfo); + break; + case MASK: + case FIELD: + case ENUM: + case ENUM_ENTRY: + if (symbol->info.finfo != NULL) { + symlist_free(&symbol->info.finfo->symrefs); + free(symbol->info.finfo); + } + break; + case DOWNLOAD_CONST: + case CONST: + if (symbol->info.cinfo != NULL) + free(symbol->info.cinfo); + break; + case LABEL: + if (symbol->info.linfo != NULL) + free(symbol->info.linfo); + break; + case UNINITIALIZED: + default: + break; + } + free(symbol->name); + free(symbol); +} + +void +symtable_open() +{ + symtable = dbopen(/*filename*/NULL, + O_CREAT | O_NONBLOCK | O_RDWR, /*mode*/0, DB_HASH, + /*openinfo*/NULL); + + if (symtable == NULL) { + perror("Symbol table creation failed"); + exit(EX_SOFTWARE); + /* NOTREACHED */ + } +} + +void +symtable_close() +{ + if (symtable != NULL) { + DBT key; + DBT data; + + while (symtable->seq(symtable, &key, &data, R_FIRST) == 0) { + symbol_t *stored_ptr; + + memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); + symbol_delete(stored_ptr); + } + symtable->close(symtable); + } +} + +/* + * The semantics of get is to return an uninitialized symbol entry + * if a lookup fails. + */ +symbol_t * +symtable_get(char *name) +{ + symbol_t *stored_ptr; + DBT key; + DBT data; + int retval; + + key.data = (void *)name; + key.size = strlen(name); + + if ((retval = symtable->get(symtable, &key, &data, /*flags*/0)) != 0) { + if (retval == -1) { + perror("Symbol table get operation failed"); + exit(EX_SOFTWARE); + /* NOTREACHED */ + } else if (retval == 1) { + /* Symbol wasn't found, so create a new one */ + symbol_t *new_symbol; + + new_symbol = symbol_create(name); + data.data = &new_symbol; + data.size = sizeof(new_symbol); + if (symtable->put(symtable, &key, &data, + /*flags*/0) !=0) { + perror("Symtable put failed"); + exit(EX_SOFTWARE); + } + return (new_symbol); + } else { + perror("Unexpected return value from db get routine"); + exit(EX_SOFTWARE); + /* NOTREACHED */ + } + } + memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); + stored_ptr->count++; + data.data = &stored_ptr; + if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) { + perror("Symtable put failed"); + exit(EX_SOFTWARE); + } + return (stored_ptr); +} + +symbol_node_t * +symlist_search(symlist_t *symlist, char *symname) +{ + symbol_node_t *curnode; + + curnode = SLIST_FIRST(symlist); + while(curnode != NULL) { + if (strcmp(symname, curnode->symbol->name) == 0) + break; + curnode = SLIST_NEXT(curnode, links); + } + return (curnode); +} + +void +symlist_add(symlist_t *symlist, symbol_t *symbol, int how) +{ + symbol_node_t *newnode; + + newnode = (symbol_node_t *)malloc(sizeof(symbol_node_t)); + if (newnode == NULL) { + stop("symlist_add: Unable to malloc symbol_node", EX_SOFTWARE); + /* NOTREACHED */ + } + newnode->symbol = symbol; + if (how == SYMLIST_SORT) { + symbol_node_t *curnode; + int field; + + field = FALSE; + switch(symbol->type) { + case REGISTER: + case SCBLOC: + case SRAMLOC: + break; + case FIELD: + case MASK: + case ENUM: + case ENUM_ENTRY: + field = TRUE; + break; + default: + stop("symlist_add: Invalid symbol type for sorting", + EX_SOFTWARE); + /* NOTREACHED */ + } + + curnode = SLIST_FIRST(symlist); + if (curnode == NULL + || (field + && (curnode->symbol->type > newnode->symbol->type + || (curnode->symbol->type == newnode->symbol->type + && (curnode->symbol->info.finfo->value > + newnode->symbol->info.finfo->value)))) + || (!field && (curnode->symbol->info.rinfo->address > + newnode->symbol->info.rinfo->address))) { + SLIST_INSERT_HEAD(symlist, newnode, links); + return; + } + + while (1) { + if (SLIST_NEXT(curnode, links) == NULL) { + SLIST_INSERT_AFTER(curnode, newnode, + links); + break; + } else { + symbol_t *cursymbol; + + cursymbol = SLIST_NEXT(curnode, links)->symbol; + if ((field + && (cursymbol->type > symbol->type + || (cursymbol->type == symbol->type + && (cursymbol->info.finfo->value > + symbol->info.finfo->value)))) + || (!field + && (cursymbol->info.rinfo->address > + symbol->info.rinfo->address))) { + SLIST_INSERT_AFTER(curnode, newnode, + links); + break; + } + } + curnode = SLIST_NEXT(curnode, links); + } + } else { + SLIST_INSERT_HEAD(symlist, newnode, links); + } +} + +void +symlist_free(symlist_t *symlist) +{ + symbol_node_t *node1, *node2; + + node1 = SLIST_FIRST(symlist); + while (node1 != NULL) { + node2 = SLIST_NEXT(node1, links); + free(node1); + node1 = node2; + } + SLIST_INIT(symlist); +} + +void +symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1, + symlist_t *symlist_src2) +{ + symbol_node_t *node; + + *symlist_dest = *symlist_src1; + while((node = SLIST_FIRST(symlist_src2)) != NULL) { + SLIST_REMOVE_HEAD(symlist_src2, links); + SLIST_INSERT_HEAD(symlist_dest, node, links); + } + + /* These are now empty */ + SLIST_INIT(symlist_src1); + SLIST_INIT(symlist_src2); +} + +void +aic_print_file_prologue(FILE *ofile) +{ + + if (ofile == NULL) + return; + + fprintf(ofile, +"/*\n" +" * DO NOT EDIT - This file is automatically generated\n" +" * from the following source files:\n" +" *\n" +"%s */\n", + versions); +} + +void +aic_print_include(FILE *dfile, char *include_file) +{ + + if (dfile == NULL) + return; + fprintf(dfile, "\n#include \"%s\"\n\n", include_file); +} + +void +aic_print_reg_dump_types(FILE *ofile) +{ + if (ofile == NULL) + return; + + fprintf(ofile, +"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n" +"typedef struct %sreg_parse_entry {\n" +" char *name;\n" +" uint8_t value;\n" +" uint8_t mask;\n" +"} %sreg_parse_entry_t;\n" +"\n", + prefix, prefix, prefix); +} + +static void +aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode) +{ + if (dfile == NULL) + return; + + fprintf(dfile, +"static const %sreg_parse_entry_t %s_parse_table[] = {\n", + prefix, + regnode->symbol->name); +} + +static void +aic_print_reg_dump_end(FILE *ofile, FILE *dfile, + symbol_node_t *regnode, u_int num_entries) +{ + char *lower_name; + char *letter; + + lower_name = strdup(regnode->symbol->name); + if (lower_name == NULL) + stop("Unable to strdup symbol name", EX_SOFTWARE); + + for (letter = lower_name; *letter != '\0'; letter++) + *letter = tolower(*letter); + + if (dfile != NULL) { + if (num_entries != 0) + fprintf(dfile, +"\n" +"};\n" +"\n"); + + fprintf(dfile, +"int\n" +"%s%s_print(u_int regvalue, u_int *cur_col, u_int wrap)\n" +"{\n" +" return (%sprint_register(%s%s, %d, \"%s\",\n" +" 0x%02x, regvalue, cur_col, wrap));\n" +"}\n" +"\n", + prefix, + lower_name, + prefix, + num_entries != 0 ? regnode->symbol->name : "NULL", + num_entries != 0 ? "_parse_table" : "", + num_entries, + regnode->symbol->name, + regnode->symbol->info.rinfo->address); + } + + fprintf(ofile, +"#if AIC_DEBUG_REGISTERS\n" +"%sreg_print_t %s%s_print;\n" +"#else\n" +"#define %s%s_print(regvalue, cur_col, wrap) \\\n" +" %sprint_register(NULL, 0, \"%s\", 0x%02x, regvalue, cur_col, wrap)\n" +"#endif\n" +"\n", + prefix, + prefix, + lower_name, + prefix, + lower_name, + prefix, + regnode->symbol->name, + regnode->symbol->info.rinfo->address); +} + +static void +aic_print_reg_dump_entry(FILE *dfile, symbol_node_t *curnode) +{ + int num_tabs; + + if (dfile == NULL) + return; + + fprintf(dfile, +" { \"%s\",", + curnode->symbol->name); + + num_tabs = 3 - (strlen(curnode->symbol->name) + 5) / 8; + + while (num_tabs-- > 0) + fputc('\t', dfile); + fprintf(dfile, "0x%02x, 0x%02x }", + curnode->symbol->info.finfo->value, + curnode->symbol->info.finfo->mask); +} + +void +symtable_dump(FILE *ofile, FILE *dfile) +{ + /* + * Sort the registers by address with a simple insertion sort. + * Put bitmasks next to the first register that defines them. + * Put constants at the end. + */ + symlist_t registers; + symlist_t masks; + symlist_t constants; + symlist_t download_constants; + symlist_t aliases; + symlist_t exported_labels; + symbol_node_t *curnode; + symbol_node_t *regnode; + DBT key; + DBT data; + int flag; + int reg_count = 0, reg_used = 0; + u_int i; + + if (symtable == NULL) + return; + + SLIST_INIT(®isters); + SLIST_INIT(&masks); + SLIST_INIT(&constants); + SLIST_INIT(&download_constants); + SLIST_INIT(&aliases); + SLIST_INIT(&exported_labels); + flag = R_FIRST; + while (symtable->seq(symtable, &key, &data, flag) == 0) { + symbol_t *cursym; + + memcpy(&cursym, data.data, sizeof(cursym)); + switch(cursym->type) { + case REGISTER: + case SCBLOC: + case SRAMLOC: + symlist_add(®isters, cursym, SYMLIST_SORT); + break; + case MASK: + case FIELD: + case ENUM: + case ENUM_ENTRY: + symlist_add(&masks, cursym, SYMLIST_SORT); + break; + case CONST: + symlist_add(&constants, cursym, + SYMLIST_INSERT_HEAD); + break; + case DOWNLOAD_CONST: + symlist_add(&download_constants, cursym, + SYMLIST_INSERT_HEAD); + break; + case ALIAS: + symlist_add(&aliases, cursym, + SYMLIST_INSERT_HEAD); + break; + case LABEL: + if (cursym->info.linfo->exported == 0) + break; + symlist_add(&exported_labels, cursym, + SYMLIST_INSERT_HEAD); + break; + default: + break; + } + flag = R_NEXT; + } + + /* Register dianostic functions/declarations first. */ + aic_print_file_prologue(ofile); + aic_print_reg_dump_types(ofile); + aic_print_file_prologue(dfile); + aic_print_include(dfile, stock_include_file); + SLIST_FOREACH(curnode, ®isters, links) { + + if (curnode->symbol->dont_generate_debug_code) + continue; + + switch(curnode->symbol->type) { + case REGISTER: + case SCBLOC: + case SRAMLOC: + { + symlist_t *fields; + symbol_node_t *fieldnode; + int num_entries; + + num_entries = 0; + reg_count++; + if (curnode->symbol->count == 1) + break; + fields = &curnode->symbol->info.rinfo->fields; + SLIST_FOREACH(fieldnode, fields, links) { + if (num_entries == 0) + aic_print_reg_dump_start(dfile, + curnode); + else if (dfile != NULL) + fputs(",\n", dfile); + num_entries++; + aic_print_reg_dump_entry(dfile, fieldnode); + } + aic_print_reg_dump_end(ofile, dfile, + curnode, num_entries); + reg_used++; + } + default: + break; + } + } + fprintf(stderr, "%s: %d of %d register definitions used\n", appname, + reg_used, reg_count); + + /* Fold in the masks and bits */ + while (SLIST_FIRST(&masks) != NULL) { + char *regname; + + curnode = SLIST_FIRST(&masks); + SLIST_REMOVE_HEAD(&masks, links); + + regnode = SLIST_FIRST(&curnode->symbol->info.finfo->symrefs); + regname = regnode->symbol->name; + regnode = symlist_search(®isters, regname); + SLIST_INSERT_AFTER(regnode, curnode, links); + } + + /* Add the aliases */ + while (SLIST_FIRST(&aliases) != NULL) { + char *regname; + + curnode = SLIST_FIRST(&aliases); + SLIST_REMOVE_HEAD(&aliases, links); + + regname = curnode->symbol->info.ainfo->parent->name; + regnode = symlist_search(®isters, regname); + SLIST_INSERT_AFTER(regnode, curnode, links); + } + + /* Output generated #defines. */ + while (SLIST_FIRST(®isters) != NULL) { + symbol_node_t *curnode; + u_int value; + char *tab_str; + char *tab_str2; + + curnode = SLIST_FIRST(®isters); + SLIST_REMOVE_HEAD(®isters, links); + switch(curnode->symbol->type) { + case REGISTER: + case SCBLOC: + case SRAMLOC: + fprintf(ofile, "\n"); + value = curnode->symbol->info.rinfo->address; + tab_str = "\t"; + tab_str2 = "\t\t"; + break; + case ALIAS: + { + symbol_t *parent; + + parent = curnode->symbol->info.ainfo->parent; + value = parent->info.rinfo->address; + tab_str = "\t"; + tab_str2 = "\t\t"; + break; + } + case MASK: + case FIELD: + case ENUM: + case ENUM_ENTRY: + value = curnode->symbol->info.finfo->value; + tab_str = "\t\t"; + tab_str2 = "\t"; + break; + default: + value = 0; /* Quiet compiler */ + tab_str = NULL; + tab_str2 = NULL; + stop("symtable_dump: Invalid symbol type " + "encountered", EX_SOFTWARE); + break; + } + fprintf(ofile, "#define%s%-16s%s0x%02x\n", + tab_str, curnode->symbol->name, tab_str2, + value); + free(curnode); + } + fprintf(ofile, "\n\n"); + + while (SLIST_FIRST(&constants) != NULL) { + symbol_node_t *curnode; + + curnode = SLIST_FIRST(&constants); + SLIST_REMOVE_HEAD(&constants, links); + fprintf(ofile, "#define\t%-8s\t0x%02x\n", + curnode->symbol->name, + curnode->symbol->info.cinfo->value); + free(curnode); + } + + fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n"); + + for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) { + symbol_node_t *curnode; + + curnode = SLIST_FIRST(&download_constants); + SLIST_REMOVE_HEAD(&download_constants, links); + fprintf(ofile, "#define\t%-8s\t0x%02x\n", + curnode->symbol->name, + curnode->symbol->info.cinfo->value); + free(curnode); + } + fprintf(ofile, "#define\tDOWNLOAD_CONST_COUNT\t0x%02x\n", i); + + fprintf(ofile, "\n\n/* Exported Labels */\n"); + + while (SLIST_FIRST(&exported_labels) != NULL) { + symbol_node_t *curnode; + + curnode = SLIST_FIRST(&exported_labels); + SLIST_REMOVE_HEAD(&exported_labels, links); + fprintf(ofile, "#define\tLABEL_%-8s\t0x%02x\n", + curnode->symbol->name, + curnode->symbol->info.linfo->address); + free(curnode); + } +} + diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h new file mode 100644 index 000000000..ed3bdd43c --- /dev/null +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h @@ -0,0 +1,205 @@ +/* + * Aic7xxx SCSI host adapter firmware assembler symbol table definitions + * + * Copyright (c) 1997 Justin T. Gibbs. + * Copyright (c) 2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.h#17 $ + * + * $FreeBSD$ + */ + +#include "../queue.h" + +typedef enum { + UNINITIALIZED, + REGISTER, + ALIAS, + SCBLOC, + SRAMLOC, + ENUM_ENTRY, + FIELD, + MASK, + ENUM, + CONST, + DOWNLOAD_CONST, + LABEL, + CONDITIONAL, + MACRO +} symtype; + +typedef enum { + RO = 0x01, + WO = 0x02, + RW = 0x03 +}amode_t; + +typedef SLIST_HEAD(symlist, symbol_node) symlist_t; + +struct reg_info { + u_int address; + int size; + amode_t mode; + symlist_t fields; + uint8_t valid_bitmask; + uint8_t modes; + int typecheck_masks; +}; + +struct field_info { + symlist_t symrefs; + uint8_t value; + uint8_t mask; +}; + +struct const_info { + u_int value; + int define; +}; + +struct alias_info { + struct symbol *parent; +}; + +struct label_info { + int address; + int exported; +}; + +struct cond_info { + int func_num; +}; + +struct macro_arg { + STAILQ_ENTRY(macro_arg) links; + regex_t arg_regex; + char *replacement_text; +}; +STAILQ_HEAD(macro_arg_list, macro_arg); + +struct macro_info { + struct macro_arg_list args; + int narg; + const char* body; +}; + +typedef struct expression_info { + symlist_t referenced_syms; + int value; +} expression_t; + +typedef struct symbol { + char *name; + symtype type; + int count; + union { + struct reg_info *rinfo; + struct field_info *finfo; + struct const_info *cinfo; + struct alias_info *ainfo; + struct label_info *linfo; + struct cond_info *condinfo; + struct macro_info *macroinfo; + } info; + int dont_generate_debug_code; +} symbol_t; + +typedef struct symbol_ref { + symbol_t *symbol; + int offset; +} symbol_ref_t; + +typedef struct symbol_node { + SLIST_ENTRY(symbol_node) links; + symbol_t *symbol; +} symbol_node_t; + +typedef struct critical_section { + TAILQ_ENTRY(critical_section) links; + int begin_addr; + int end_addr; +} critical_section_t; + +typedef enum { + SCOPE_ROOT, + SCOPE_IF, + SCOPE_ELSE_IF, + SCOPE_ELSE +} scope_type; + +typedef struct patch_info { + int skip_patch; + int skip_instr; +} patch_info_t; + +typedef struct scope { + SLIST_ENTRY(scope) scope_stack_links; + TAILQ_ENTRY(scope) scope_links; + TAILQ_HEAD(, scope) inner_scope; + scope_type type; + int inner_scope_patches; + int begin_addr; + int end_addr; + patch_info_t patches[2]; + int func_num; +} scope_t; + +TAILQ_HEAD(cs_tailq, critical_section); +SLIST_HEAD(scope_list, scope); +TAILQ_HEAD(scope_tailq, scope); + +void symbol_delete(symbol_t *symbol); + +void symtable_open(void); + +void symtable_close(void); + +symbol_t * + symtable_get(char *name); + +symbol_node_t * + symlist_search(symlist_t *symlist, char *symname); + +void + symlist_add(symlist_t *symlist, symbol_t *symbol, int how); +#define SYMLIST_INSERT_HEAD 0x00 +#define SYMLIST_SORT 0x01 + +void symlist_free(symlist_t *symlist); + +void symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1, + symlist_t *symlist_src2); +void symtable_dump(FILE *ofile, FILE *dfile); diff --git a/drivers/scsi/aic7xxx/aiclib.h b/drivers/scsi/aic7xxx/aiclib.h new file mode 100644 index 000000000..ba08eb3c4 --- /dev/null +++ b/drivers/scsi/aic7xxx/aiclib.h @@ -0,0 +1,180 @@ +/* + * Largely written by Julian Elischer (julian@tfs.com) + * for TRW Financial Systems. + * + * TRW Financial Systems, in accordance with their agreement with Carnegie + * Mellon University, makes this software available to CMU to distribute + * or use in any manner that they see fit as long as this message is kept with + * the software. For this reason TFS also grants any other persons or + * organisations permission to use or modify this software. + * + * TFS supplies this software to be publicly redistributed + * on the understanding that TFS is not responsible for the correct + * functioning of this software in any circumstances. + * + * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 + * + * $FreeBSD: src/sys/cam/scsi/scsi_all.h,v 1.21 2002/10/08 17:12:44 ken Exp $ + * + * Copyright (c) 2003 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id$ + */ + +#ifndef _AICLIB_H +#define _AICLIB_H + +struct scsi_sense +{ + uint8_t opcode; + uint8_t byte2; + uint8_t unused[2]; + uint8_t length; + uint8_t control; +}; + +#define SCSI_REV_0 0 +#define SCSI_REV_CCS 1 +#define SCSI_REV_2 2 +#define SCSI_REV_SPC 3 +#define SCSI_REV_SPC2 4 + +struct scsi_sense_data +{ + uint8_t error_code; +#define SSD_ERRCODE 0x7F +#define SSD_CURRENT_ERROR 0x70 +#define SSD_DEFERRED_ERROR 0x71 +#define SSD_ERRCODE_VALID 0x80 + uint8_t segment; + uint8_t flags; +#define SSD_KEY 0x0F +#define SSD_KEY_NO_SENSE 0x00 +#define SSD_KEY_RECOVERED_ERROR 0x01 +#define SSD_KEY_NOT_READY 0x02 +#define SSD_KEY_MEDIUM_ERROR 0x03 +#define SSD_KEY_HARDWARE_ERROR 0x04 +#define SSD_KEY_ILLEGAL_REQUEST 0x05 +#define SSD_KEY_UNIT_ATTENTION 0x06 +#define SSD_KEY_DATA_PROTECT 0x07 +#define SSD_KEY_BLANK_CHECK 0x08 +#define SSD_KEY_Vendor_Specific 0x09 +#define SSD_KEY_COPY_ABORTED 0x0a +#define SSD_KEY_ABORTED_COMMAND 0x0b +#define SSD_KEY_EQUAL 0x0c +#define SSD_KEY_VOLUME_OVERFLOW 0x0d +#define SSD_KEY_MISCOMPARE 0x0e +#define SSD_KEY_RESERVED 0x0f +#define SSD_ILI 0x20 +#define SSD_EOM 0x40 +#define SSD_FILEMARK 0x80 + uint8_t info[4]; + uint8_t extra_len; + uint8_t cmd_spec_info[4]; + uint8_t add_sense_code; + uint8_t add_sense_code_qual; + uint8_t fru; + uint8_t sense_key_spec[3]; +#define SSD_SCS_VALID 0x80 +#define SSD_FIELDPTR_CMD 0x40 +#define SSD_BITPTR_VALID 0x08 +#define SSD_BITPTR_VALUE 0x07 +#define SSD_MIN_SIZE 18 + uint8_t extra_bytes[14]; +#define SSD_FULL_SIZE sizeof(struct scsi_sense_data) +}; + +/************************* Large Disk Handling ********************************/ +static inline int +aic_sector_div(sector_t capacity, int heads, int sectors) +{ + /* ugly, ugly sector_div calling convention.. */ + sector_div(capacity, (heads * sectors)); + return (int)capacity; +} + +static inline uint32_t +scsi_4btoul(uint8_t *bytes) +{ + uint32_t rv; + + rv = (bytes[0] << 24) | + (bytes[1] << 16) | + (bytes[2] << 8) | + bytes[3]; + return (rv); +} + +/* Macros for generating the elements of the PCI ID tables. */ + +#define GETID(v, s) (unsigned)(((v) >> (s)) & 0xFFFF ?: PCI_ANY_ID) + +#define ID_C(x, c) \ +{ \ + GETID(x,32), GETID(x,48), GETID(x,0), GETID(x,16), \ + (c) << 8, 0xFFFF00, 0 \ +} + +#define ID2C(x) \ + ID_C(x, PCI_CLASS_STORAGE_SCSI), \ + ID_C(x, PCI_CLASS_STORAGE_RAID) + +#define IDIROC(x) ((x) | ~ID_ALL_IROC_MASK) + +/* Generate IDs for all 16 possibilites. + * The argument has already masked out + * the 4 least significant bits of the device id. + * (e.g., mask: ID_9005_GENERIC_MASK). + */ +#define ID16(x) \ + ID(x), \ + ID((x) | 0x0001000000000000ull), \ + ID((x) | 0x0002000000000000ull), \ + ID((x) | 0x0003000000000000ull), \ + ID((x) | 0x0004000000000000ull), \ + ID((x) | 0x0005000000000000ull), \ + ID((x) | 0x0006000000000000ull), \ + ID((x) | 0x0007000000000000ull), \ + ID((x) | 0x0008000000000000ull), \ + ID((x) | 0x0009000000000000ull), \ + ID((x) | 0x000A000000000000ull), \ + ID((x) | 0x000B000000000000ull), \ + ID((x) | 0x000C000000000000ull), \ + ID((x) | 0x000D000000000000ull), \ + ID((x) | 0x000E000000000000ull), \ + ID((x) | 0x000F000000000000ull) + +#endif /*_AICLIB_H */ diff --git a/drivers/scsi/aic7xxx/cam.h b/drivers/scsi/aic7xxx/cam.h new file mode 100644 index 000000000..687aef6ef --- /dev/null +++ b/drivers/scsi/aic7xxx/cam.h @@ -0,0 +1,111 @@ +/* + * Data structures and definitions for the CAM system. + * + * Copyright (c) 1997 Justin T. Gibbs. + * Copyright (c) 2000 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/cam.h#15 $ + */ + +#ifndef _AIC7XXX_CAM_H +#define _AIC7XXX_CAM_H 1 + +#include + +#define CAM_BUS_WILDCARD ((u_int)~0) +#define CAM_TARGET_WILDCARD ((u_int)~0) +#define CAM_LUN_WILDCARD ((u_int)~0) + +/* CAM Status field values */ +typedef enum { + CAM_REQ_INPROG, /* CCB request is in progress */ + CAM_REQ_CMP, /* CCB request completed without error */ + CAM_REQ_ABORTED, /* CCB request aborted by the host */ + CAM_UA_ABORT, /* Unable to abort CCB request */ + CAM_REQ_CMP_ERR, /* CCB request completed with an error */ + CAM_BUSY, /* CAM subsystem is busy */ + CAM_REQ_INVALID, /* CCB request was invalid */ + CAM_PATH_INVALID, /* Supplied Path ID is invalid */ + CAM_SEL_TIMEOUT, /* Target Selection Timeout */ + CAM_CMD_TIMEOUT, /* Command timeout */ + CAM_SCSI_STATUS_ERROR, /* SCSI error, look at error code in CCB */ + CAM_SCSI_BUS_RESET, /* SCSI Bus Reset Sent/Received */ + CAM_UNCOR_PARITY, /* Uncorrectable parity error occurred */ + CAM_AUTOSENSE_FAIL, /* Autosense: request sense cmd fail */ + CAM_NO_HBA, /* No HBA Detected Error */ + CAM_DATA_RUN_ERR, /* Data Overrun error */ + CAM_UNEXP_BUSFREE, /* Unexpected Bus Free */ + CAM_SEQUENCE_FAIL, /* Protocol Violation */ + CAM_CCB_LEN_ERR, /* CCB length supplied is inadequate */ + CAM_PROVIDE_FAIL, /* Unable to provide requested capability */ + CAM_BDR_SENT, /* A SCSI BDR msg was sent to target */ + CAM_REQ_TERMIO, /* CCB request terminated by the host */ + CAM_UNREC_HBA_ERROR, /* Unrecoverable Host Bus Adapter Error */ + CAM_REQ_TOO_BIG, /* The request was too large for this host */ + CAM_UA_TERMIO, /* Unable to terminate I/O CCB request */ + CAM_MSG_REJECT_REC, /* Message Reject Received */ + CAM_DEV_NOT_THERE, /* SCSI Device Not Installed/there */ + CAM_RESRC_UNAVAIL, /* Resource Unavailable */ + /* + * This request should be requeued to preserve + * transaction ordering. This typically occurs + * when the SIM recognizes an error that should + * freeze the queue and must place additional + * requests for the target at the sim level + * back into the XPT queue. + */ + CAM_REQUEUE_REQ, + CAM_DEV_QFRZN = 0x40, + + CAM_STATUS_MASK = 0x3F +} cam_status; + +/* + * Definitions for the asynchronous callback CCB fields. + */ +typedef enum { + AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ + AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ + AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ + AC_LOST_DEVICE = 0x100,/* A device went away */ + AC_FOUND_DEVICE = 0x080,/* A new device was found */ + AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ + AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ + AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ + AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ + AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ + AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ +} ac_code; + +typedef enum { + CAM_DIR_IN = DMA_FROM_DEVICE, + CAM_DIR_OUT = DMA_TO_DEVICE, + CAM_DIR_NONE = DMA_NONE, +} ccb_flags; + +#endif /* _AIC7XXX_CAM_H */ diff --git a/drivers/scsi/aic7xxx/queue.h b/drivers/scsi/aic7xxx/queue.h new file mode 100644 index 000000000..ba602981f --- /dev/null +++ b/drivers/scsi/aic7xxx/queue.h @@ -0,0 +1,501 @@ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + * $FreeBSD: src/sys/sys/queue.h,v 1.38 2000/05/26 02:06:56 jake Exp $ + */ + +#ifndef _SYS_QUEUE_H_ +#define _SYS_QUEUE_H_ + +/* + * This file defines five types of data structures: singly-linked lists, + * singly-linked tail queues, lists, tail queues, and circular queues. + * + * A singly-linked list is headed by a single forward pointer. The elements + * are singly linked for minimum space and pointer manipulation overhead at + * the expense of O(n) removal for arbitrary elements. New elements can be + * added to the list after an existing element or at the head of the list. + * Elements being removed from the head of the list should use the explicit + * macro for this purpose for optimum efficiency. A singly-linked list may + * only be traversed in the forward direction. Singly-linked lists are ideal + * for applications with large datasets and few or no removals or for + * implementing a LIFO queue. + * + * A singly-linked tail queue is headed by a pair of pointers, one to the + * head of the list and the other to the tail of the list. The elements are + * singly linked for minimum space and pointer manipulation overhead at the + * expense of O(n) removal for arbitrary elements. New elements can be added + * to the list after an existing element, at the head of the list, or at the + * end of the list. Elements being removed from the head of the tail queue + * should use the explicit macro for this purpose for optimum efficiency. + * A singly-linked tail queue may only be traversed in the forward direction. + * Singly-linked tail queues are ideal for applications with large datasets + * and few or no removals or for implementing a FIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * A circle queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the list. + * A circle queue may be traversed in either direction, but has a more + * complex end of list detection. + * + * For details on the use of these macros, see the queue(3) manual page. + * + * + * SLIST LIST STAILQ TAILQ CIRCLEQ + * _HEAD + + + + + + * _HEAD_INITIALIZER + + + + + + * _ENTRY + + + + + + * _INIT + + + + + + * _EMPTY + + + + + + * _FIRST + + + + + + * _NEXT + + + + + + * _PREV - - - + + + * _LAST - - + + + + * _FOREACH + + + + + + * _FOREACH_REVERSE - - - + + + * _INSERT_HEAD + + + + + + * _INSERT_BEFORE - + - + + + * _INSERT_AFTER + + + + + + * _INSERT_TAIL - - + + + + * _REMOVE_HEAD + - + - - + * _REMOVE + + + + + + * + */ + +/* + * Singly-linked List declarations. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) + +#define SLIST_FIRST(head) ((head)->slh_first) + +#define SLIST_FOREACH(var, head, field) \ + for ((var) = SLIST_FIRST((head)); \ + (var); \ + (var) = SLIST_NEXT((var), field)) + +#define SLIST_INIT(head) do { \ + SLIST_FIRST((head)) = NULL; \ +} while (0) + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ + SLIST_NEXT((slistelm), field) = (elm); \ +} while (0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ + SLIST_FIRST((head)) = (elm); \ +} while (0) + +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if (SLIST_FIRST((head)) == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = SLIST_FIRST((head)); \ + while (SLIST_NEXT(curelm, field) != (elm)) \ + curelm = SLIST_NEXT(curelm, field); \ + SLIST_NEXT(curelm, field) = \ + SLIST_NEXT(SLIST_NEXT(curelm, field), field); \ + } \ +} while (0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ +} while (0) + +/* + * Singly-linked Tail queue declarations. + */ +#define STAILQ_HEAD(name, type) \ +struct name { \ + struct type *stqh_first;/* first element */ \ + struct type **stqh_last;/* addr of last next element */ \ +} + +#define STAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).stqh_first } + +#define STAILQ_ENTRY(type) \ +struct { \ + struct type *stqe_next; /* next element */ \ +} + +/* + * Singly-linked Tail queue functions. + */ +#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) + +#define STAILQ_FIRST(head) ((head)->stqh_first) + +#define STAILQ_FOREACH(var, head, field) \ + for((var) = STAILQ_FIRST((head)); \ + (var); \ + (var) = STAILQ_NEXT((var), field)) + +#define STAILQ_INIT(head) do { \ + STAILQ_FIRST((head)) = NULL; \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ +} while (0) + +#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ + if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ + STAILQ_NEXT((tqelm), field) = (elm); \ +} while (0) + +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ + if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ + STAILQ_FIRST((head)) = (elm); \ +} while (0) + +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ + STAILQ_NEXT((elm), field) = NULL; \ + STAILQ_LAST((head)) = (elm); \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ +} while (0) + +#define STAILQ_LAST(head) (*(head)->stqh_last) + +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) + +#define STAILQ_REMOVE(head, elm, type, field) do { \ + if (STAILQ_FIRST((head)) == (elm)) { \ + STAILQ_REMOVE_HEAD(head, field); \ + } \ + else { \ + struct type *curelm = STAILQ_FIRST((head)); \ + while (STAILQ_NEXT(curelm, field) != (elm)) \ + curelm = STAILQ_NEXT(curelm, field); \ + if ((STAILQ_NEXT(curelm, field) = \ + STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\ + (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ + } \ +} while (0) + +#define STAILQ_REMOVE_HEAD(head, field) do { \ + if ((STAILQ_FIRST((head)) = \ + STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ +} while (0) + +#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ + if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ +} while (0) + +/* + * List declarations. + */ +#define BSD_LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ + +#define LIST_EMPTY(head) ((head)->lh_first == NULL) + +#define LIST_FIRST(head) ((head)->lh_first) + +#define LIST_FOREACH(var, head, field) \ + for ((var) = LIST_FIRST((head)); \ + (var); \ + (var) = LIST_NEXT((var), field)) + +#define LIST_INIT(head) do { \ + LIST_FIRST((head)) = NULL; \ +} while (0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ + LIST_NEXT((listelm), field)->field.le_prev = \ + &LIST_NEXT((elm), field); \ + LIST_NEXT((listelm), field) = (elm); \ + (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ +} while (0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + LIST_NEXT((elm), field) = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ +} while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ + LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ + LIST_FIRST((head)) = (elm); \ + (elm)->field.le_prev = &LIST_FIRST((head)); \ +} while (0) + +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_REMOVE(elm, field) do { \ + if (LIST_NEXT((elm), field) != NULL) \ + LIST_NEXT((elm), field)->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = LIST_NEXT((elm), field); \ +} while (0) + +/* + * Tail queue declarations. + */ +#define TAILQ_HEAD(name, type) \ +struct name { \ + struct type *tqh_first; /* first element */ \ + struct type **tqh_last; /* addr of last next element */ \ +} + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} + +/* + * Tail queue functions. + */ +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) + +#define TAILQ_FIRST(head) ((head)->tqh_first) + +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = TAILQ_FIRST((head)); \ + (var); \ + (var) = TAILQ_NEXT((var), field)) + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = TAILQ_LAST((head), headname); \ + (var); \ + (var) = TAILQ_PREV((var), headname, field)) + +#define TAILQ_INIT(head) do { \ + TAILQ_FIRST((head)) = NULL; \ + (head)->tqh_last = &TAILQ_FIRST((head)); \ +} while (0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ + TAILQ_NEXT((elm), field)->field.tqe_prev = \ + &TAILQ_NEXT((elm), field); \ + else \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + TAILQ_NEXT((listelm), field) = (elm); \ + (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ +} while (0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + TAILQ_NEXT((elm), field) = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ +} while (0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ + TAILQ_FIRST((head))->field.tqe_prev = \ + &TAILQ_NEXT((elm), field); \ + else \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + TAILQ_FIRST((head)) = (elm); \ + (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ +} while (0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + TAILQ_NEXT((elm), field) = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ +} while (0) + +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) + +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if ((TAILQ_NEXT((elm), field)) != NULL) \ + TAILQ_NEXT((elm), field)->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ +} while (0) + +/* + * Circular queue declarations. + */ +#define CIRCLEQ_HEAD(name, type) \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} + +#define CIRCLEQ_HEAD_INITIALIZER(head) \ + { (void *)&(head), (void *)&(head) } + +#define CIRCLEQ_ENTRY(type) \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} + +/* + * Circular queue functions. + */ +#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) + +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) + +#define CIRCLEQ_FOREACH(var, head, field) \ + for ((var) = CIRCLEQ_FIRST((head)); \ + (var) != (void *)(head); \ + (var) = CIRCLEQ_NEXT((var), field)) + +#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ + for ((var) = CIRCLEQ_LAST((head)); \ + (var) != (void *)(head); \ + (var) = CIRCLEQ_PREV((var), field)) + +#define CIRCLEQ_INIT(head) do { \ + CIRCLEQ_FIRST((head)) = (void *)(head); \ + CIRCLEQ_LAST((head)) = (void *)(head); \ +} while (0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + CIRCLEQ_NEXT((elm), field) = CIRCLEQ_NEXT((listelm), field); \ + CIRCLEQ_PREV((elm), field) = (listelm); \ + if (CIRCLEQ_NEXT((listelm), field) == (void *)(head)) \ + CIRCLEQ_LAST((head)) = (elm); \ + else \ + CIRCLEQ_PREV(CIRCLEQ_NEXT((listelm), field), field) = (elm);\ + CIRCLEQ_NEXT((listelm), field) = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + CIRCLEQ_NEXT((elm), field) = (listelm); \ + CIRCLEQ_PREV((elm), field) = CIRCLEQ_PREV((listelm), field); \ + if (CIRCLEQ_PREV((listelm), field) == (void *)(head)) \ + CIRCLEQ_FIRST((head)) = (elm); \ + else \ + CIRCLEQ_NEXT(CIRCLEQ_PREV((listelm), field), field) = (elm);\ + CIRCLEQ_PREV((listelm), field) = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + CIRCLEQ_NEXT((elm), field) = CIRCLEQ_FIRST((head)); \ + CIRCLEQ_PREV((elm), field) = (void *)(head); \ + if (CIRCLEQ_LAST((head)) == (void *)(head)) \ + CIRCLEQ_LAST((head)) = (elm); \ + else \ + CIRCLEQ_PREV(CIRCLEQ_FIRST((head)), field) = (elm); \ + CIRCLEQ_FIRST((head)) = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + CIRCLEQ_NEXT((elm), field) = (void *)(head); \ + CIRCLEQ_PREV((elm), field) = CIRCLEQ_LAST((head)); \ + if (CIRCLEQ_FIRST((head)) == (void *)(head)) \ + CIRCLEQ_FIRST((head)) = (elm); \ + else \ + CIRCLEQ_NEXT(CIRCLEQ_LAST((head)), field) = (elm); \ + CIRCLEQ_LAST((head)) = (elm); \ +} while (0) + +#define CIRCLEQ_LAST(head) ((head)->cqh_last) + +#define CIRCLEQ_NEXT(elm,field) ((elm)->field.cqe_next) + +#define CIRCLEQ_PREV(elm,field) ((elm)->field.cqe_prev) + +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + if (CIRCLEQ_NEXT((elm), field) == (void *)(head)) \ + CIRCLEQ_LAST((head)) = CIRCLEQ_PREV((elm), field); \ + else \ + CIRCLEQ_PREV(CIRCLEQ_NEXT((elm), field), field) = \ + CIRCLEQ_PREV((elm), field); \ + if (CIRCLEQ_PREV((elm), field) == (void *)(head)) \ + CIRCLEQ_FIRST((head)) = CIRCLEQ_NEXT((elm), field); \ + else \ + CIRCLEQ_NEXT(CIRCLEQ_PREV((elm), field), field) = \ + CIRCLEQ_NEXT((elm), field); \ +} while (0) + +#endif /* !_SYS_QUEUE_H_ */ diff --git a/drivers/scsi/aic7xxx/scsi_iu.h b/drivers/scsi/aic7xxx/scsi_iu.h new file mode 100644 index 000000000..0eafd3c17 --- /dev/null +++ b/drivers/scsi/aic7xxx/scsi_iu.h @@ -0,0 +1,39 @@ +/* + * This file is in the public domain. + */ +#ifndef _SCSI_SCSI_IU_H +#define _SCSI_SCSI_IU_H 1 + +struct scsi_status_iu_header +{ + u_int8_t reserved[2]; + u_int8_t flags; +#define SIU_SNSVALID 0x2 +#define SIU_RSPVALID 0x1 + u_int8_t status; + u_int8_t sense_length[4]; + u_int8_t pkt_failures_length[4]; + u_int8_t pkt_failures[1]; +}; + +#define SIU_PKTFAIL_OFFSET(siu) 12 +#define SIU_PKTFAIL_CODE(siu) (scsi_4btoul((siu)->pkt_failures) & 0xFF) +#define SIU_PFC_NONE 0 +#define SIU_PFC_CIU_FIELDS_INVALID 2 +#define SIU_PFC_TMF_NOT_SUPPORTED 4 +#define SIU_PFC_TMF_FAILED 5 +#define SIU_PFC_INVALID_TYPE_CODE 6 +#define SIU_PFC_ILLEGAL_REQUEST 7 +#define SIU_SENSE_OFFSET(siu) \ + (12 + (((siu)->flags & SIU_RSPVALID) \ + ? scsi_4btoul((siu)->pkt_failures_length) \ + : 0)) + +#define SIU_TASKMGMT_NONE 0x00 +#define SIU_TASKMGMT_ABORT_TASK 0x01 +#define SIU_TASKMGMT_ABORT_TASK_SET 0x02 +#define SIU_TASKMGMT_CLEAR_TASK_SET 0x04 +#define SIU_TASKMGMT_LUN_RESET 0x08 +#define SIU_TASKMGMT_TARGET_RESET 0x20 +#define SIU_TASKMGMT_CLEAR_ACA 0x40 +#endif /*_SCSI_SCSI_IU_H*/ diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h new file mode 100644 index 000000000..53343a6d8 --- /dev/null +++ b/drivers/scsi/aic7xxx/scsi_message.h @@ -0,0 +1,40 @@ +/* + * This file is in the public domain. + * $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $ + */ + +/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */ +#define MSG_SAVEDATAPOINTER 0x02 /* O/O */ +#define MSG_RESTOREPOINTERS 0x03 /* O/O */ +#define MSG_DISCONNECT 0x04 /* O/O */ +#define MSG_MESSAGE_REJECT 0x07 /* M/M */ +#define MSG_NOOP 0x08 /* M/M */ + +/* Messages (2 byte) */ +#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */ +#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */ + +/* Identify message */ /* M/M */ +#define MSG_IDENTIFYFLAG 0x80 +#define MSG_IDENTIFY_DISCFLAG 0x40 +#define MSG_IDENTIFY(lun, disc) (((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun)) +#define MSG_ISIDENTIFY(m) ((m) & MSG_IDENTIFYFLAG) +#define MSG_IDENTIFY_LUNMASK 0x3F + +/* Extended messages (opcode and length) */ +#define MSG_EXT_SDTR_LEN 0x03 + +#define MSG_EXT_WDTR_LEN 0x02 +#define MSG_EXT_WDTR_BUS_8_BIT 0x00 +#define MSG_EXT_WDTR_BUS_16_BIT 0x01 +#define MSG_EXT_WDTR_BUS_32_BIT 0x02 /* Deprecated in SPI3 */ + +#define MSG_EXT_PPR_LEN 0x06 +#define MSG_EXT_PPR_PCOMP_EN 0x80 +#define MSG_EXT_PPR_RTI 0x40 +#define MSG_EXT_PPR_RD_STRM 0x20 +#define MSG_EXT_PPR_WR_FLOW 0x10 +#define MSG_EXT_PPR_HOLD_MCS 0x08 +#define MSG_EXT_PPR_QAS_REQ 0x04 +#define MSG_EXT_PPR_DT_REQ 0x02 +#define MSG_EXT_PPR_IU_REQ 0x01 diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig new file mode 100644 index 000000000..aaa8dadc6 --- /dev/null +++ b/drivers/scsi/aic94xx/Kconfig @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Kernel configuration file for aic94xx SAS/SATA driver. +# +# Copyright (c) 2005 Adaptec, Inc. All rights reserved. +# Copyright (c) 2005 Luben Tuikov +# + +config SCSI_AIC94XX + tristate "Adaptec AIC94xx SAS/SATA support" + depends on PCI && HAS_IOPORT + select SCSI_SAS_LIBSAS + select FW_LOADER + help + This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X + AIC94xx chip based host adapters. + +config AIC94XX_DEBUG + bool "Compile in debug mode" + default y + depends on SCSI_AIC94XX + help + Compiles the aic94xx driver in debug mode. In debug mode, + the driver prints some messages to the console. diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile new file mode 100644 index 000000000..db9fbe3a8 --- /dev/null +++ b/drivers/scsi/aic94xx/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for Adaptec aic94xx SAS/SATA driver. +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov +# + +ccflags-$(CONFIG_AIC94XX_DEBUG) := -DASD_DEBUG -DASD_ENTER_EXIT + +obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o +aic94xx-y += aic94xx_init.o \ + aic94xx_hwi.o \ + aic94xx_reg.o \ + aic94xx_sds.o \ + aic94xx_seq.o \ + aic94xx_dump.o \ + aic94xx_scb.o \ + aic94xx_dev.o \ + aic94xx_tmf.o \ + aic94xx_task.o diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h new file mode 100644 index 000000000..f595bc2ee --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * $Id: //depot/aic94xx/aic94xx.h#31 $ + */ + +#ifndef _AIC94XX_H_ +#define _AIC94XX_H_ + +#include +#include +#include + +#define ASD_DRIVER_NAME "aic94xx" +#define ASD_DRIVER_DESCRIPTION "Adaptec aic94xx SAS/SATA driver" + +#define asd_printk(fmt, ...) printk(KERN_NOTICE ASD_DRIVER_NAME ": " fmt, ## __VA_ARGS__) + +#ifdef ASD_ENTER_EXIT +#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \ + __func__) +#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \ + __func__) +#else +#define ENTER +#define EXIT +#endif + +#ifdef ASD_DEBUG +#define ASD_DPRINTK asd_printk +#else +#define ASD_DPRINTK(fmt, ...) no_printk(fmt, ##__VA_ARGS__) +#endif + +/* 2*ITNL timeout + 1 second */ +#define AIC94XX_SCB_TIMEOUT (5*HZ) + +extern struct kmem_cache *asd_dma_token_cache; +extern struct kmem_cache *asd_ascb_cache; + +struct asd_ha_struct; +struct asd_ascb; + +int asd_read_ocm(struct asd_ha_struct *asd_ha); +int asd_read_flash(struct asd_ha_struct *asd_ha); + +int asd_dev_found(struct domain_device *dev); +void asd_dev_gone(struct domain_device *dev); + +void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id); + +int asd_execute_task(struct sas_task *task, gfp_t gfp_flags); + +void asd_set_dmamode(struct domain_device *dev); + +/* ---------- TMFs ---------- */ +int asd_abort_task(struct sas_task *); +int asd_abort_task_set(struct domain_device *, u8 *lun); +int asd_clear_task_set(struct domain_device *, u8 *lun); +int asd_lu_reset(struct domain_device *, u8 *lun); +int asd_I_T_nexus_reset(struct domain_device *dev); +int asd_query_task(struct sas_task *); + +/* ---------- Adapter and Port management ---------- */ +int asd_clear_nexus_port(struct asd_sas_port *port); +int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha); + +/* ---------- Phy Management ---------- */ +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg); + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c new file mode 100644 index 000000000..91d196f26 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA DDB management + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * $Id: //depot/aic94xx/aic94xx_dev.c#21 $ + */ + +#include "aic94xx.h" +#include "aic94xx_hwi.h" +#include "aic94xx_reg.h" +#include "aic94xx_sas.h" + +#define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \ + (_ha)->hw_prof.max_ddbs) +#define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) +#define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) + +static int asd_get_ddb(struct asd_ha_struct *asd_ha) +{ + int ddb, i; + + ddb = FIND_FREE_DDB(asd_ha); + if (ddb >= asd_ha->hw_prof.max_ddbs) { + ddb = -ENOMEM; + goto out; + } + SET_DDB(ddb, asd_ha); + + for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) + asd_ddbsite_write_dword(asd_ha, ddb, i, 0); +out: + return ddb; +} + +#define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag) +#define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr) +#define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head) +#define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type) +#define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask) +#define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags) +#define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2) +#define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail) +#define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail) +#define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb) +#define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn) +#define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts) +#define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr) +#define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask) +#define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags) +#define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status) +#define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr) +#define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout) + +static void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) +{ + if (!ddb || ddb >= 0xFFFF) + return; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); + CLEAR_DDB(ddb, asd_ha); +} + +static void asd_set_ddb_type(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb = (int) (unsigned long) dev->lldd_dev; + + if (dev->dev_type == SAS_SATA_PM_PORT) + asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); + else if (dev->tproto) + asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); + else + asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR); +} + +static int asd_init_sata_tag_ddb(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb, i; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2) + asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); + + asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, + SISTER_DDB, ddb); + return 0; +} + +void asd_set_dmamode(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct ata_device *ata_dev = sas_to_ata_dev(dev); + int ddb = (int) (unsigned long) dev->lldd_dev; + u32 qdepth = 0; + + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) { + if (ata_id_has_ncq(ata_dev->id)) + qdepth = ata_id_queue_depth(ata_dev->id); + asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, + (1ULL< 0) + if (asd_init_sata_tag_ddb(dev) != 0) { + unsigned long flags; + + spin_lock_irqsave(dev->sata_dev.ap->lock, flags); + ata_dev->flags |= ATA_DFLAG_NCQ_OFF; + spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); + } +} + +static int asd_init_sata(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb = (int) (unsigned long) dev->lldd_dev; + + asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || + dev->dev_type == SAS_SATA_PM_PORT) { + struct dev_to_host_fis *fis = (struct dev_to_host_fis *) + dev->frame_rcvd; + asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); + } + asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF); + + return 0; +} + +static int asd_init_target_ddb(struct domain_device *dev) +{ + int ddb, i; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + u8 flags = 0; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + dev->lldd_dev = (void *) (unsigned long) ddb; + + asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE); + asd_ddbsite_write_byte(asd_ha, ddb, 1, 0); + asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF); + for (i = 0; i < SAS_ADDR_SIZE; i++) + asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i, + dev->sas_addr[i]); + asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF); + asd_set_ddb_type(dev); + asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); + if (dev->port->oob_mode != SATA_OOB_MODE) { + flags |= OPEN_REQUIRED; + if ((dev->dev_type == SAS_SATA_DEV) || + (dev->tproto & SAS_PROTOCOL_STP)) { + struct smp_rps_resp *rps_resp = &dev->sata_dev.rps_resp; + if (rps_resp->frame_type == SMP_RESPONSE && + rps_resp->function == SMP_REPORT_PHY_SATA && + rps_resp->result == SMP_RESP_FUNC_ACC) { + if (rps_resp->rps.affil_valid) + flags |= STP_AFFIL_POL; + if (rps_resp->rps.affil_supp) + flags |= SUPPORTS_AFFIL; + } + } else { + flags |= CONCURRENT_CONN_SUPP; + if (!dev->parent && dev_is_expander(dev->dev_type)) + asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, + 4); + else + asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, + dev->pathways); + asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); + } + } + if (dev->dev_type == SAS_SATA_PM) + flags |= SATA_MULTIPORT; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); + + flags = 0; + if (dev->tproto & SAS_PROTOCOL_STP) + flags |= STP_CL_POL_NO_TX; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags); + + asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); + + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + i = asd_init_sata(dev); + if (i < 0) { + asd_free_ddb(asd_ha, ddb); + return i; + } + } + + if (dev->dev_type == SAS_END_DEVICE) { + struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); + if (rdev->I_T_nexus_loss_timeout > 0) + asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, + min(rdev->I_T_nexus_loss_timeout, + (u16)ITNL_TIMEOUT_CONST)); + else + asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, + (u16)ITNL_TIMEOUT_CONST); + } + return 0; +} + +static int asd_init_sata_pm_table_ddb(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb, i; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + for (i = 0; i < 32; i += 2) + asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); + + asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, + SISTER_DDB, ddb); + + return 0; +} + +#define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags) +#define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb) + +/** + * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port + * @dev: pointer to domain device + * + * For SATA Port Multiplier Ports we need to allocate one SATA Port + * Multiplier Port DDB and depending on whether the target on it + * supports SATA II NCQ, one SATA Tag DDB. + */ +static int asd_init_sata_pm_port_ddb(struct domain_device *dev) +{ + int ddb, i, parent_ddb, pmtable_ddb; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + u8 flags; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + asd_set_ddb_type(dev); + flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET; + asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags); + asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); + asd_init_sata(dev); + + parent_ddb = (int) (unsigned long) dev->parent->lldd_dev; + asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb); + pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB); + asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb); + + if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) { + i = asd_init_sata_tag_ddb(dev); + if (i < 0) { + asd_free_ddb(asd_ha, ddb); + return i; + } + } + return 0; +} + +static int asd_init_initiator_ddb(struct domain_device *dev) +{ + return -ENODEV; +} + +/** + * asd_init_sata_pm_ddb -- SATA Port Multiplier + * @dev: pointer to domain device + * + * For STP and direct-attached SATA Port Multipliers we need + * one target port DDB entry and one SATA PM table DDB entry. + */ +static int asd_init_sata_pm_ddb(struct domain_device *dev) +{ + int res = 0; + + res = asd_init_target_ddb(dev); + if (res) + goto out; + res = asd_init_sata_pm_table_ddb(dev); + if (res) + asd_free_ddb(dev->port->ha->lldd_ha, + (int) (unsigned long) dev->lldd_dev); +out: + return res; +} + +int asd_dev_found(struct domain_device *dev) +{ + unsigned long flags; + int res = 0; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); + switch (dev->dev_type) { + case SAS_SATA_PM: + res = asd_init_sata_pm_ddb(dev); + break; + case SAS_SATA_PM_PORT: + res = asd_init_sata_pm_port_ddb(dev); + break; + default: + if (dev->tproto) + res = asd_init_target_ddb(dev); + else + res = asd_init_initiator_ddb(dev); + } + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); + + return res; +} + +void asd_dev_gone(struct domain_device *dev) +{ + int ddb, sister_ddb; + unsigned long flags; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); + ddb = (int) (unsigned long) dev->lldd_dev; + sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); + + if (sister_ddb != 0xFFFF) + asd_free_ddb(asd_ha, sister_ddb); + asd_free_ddb(asd_ha, ddb); + dev->lldd_dev = NULL; + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); +} diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c new file mode 100644 index 000000000..552f1913e --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dump.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA driver dump interface. + * + * Copyright (C) 2004 Adaptec, Inc. All rights reserved. + * Copyright (C) 2004 David Chaw + * Copyright (C) 2005 Luben Tuikov + * + * 2005/07/14/LT Complete overhaul of this file. Update pages, register + * locations, names, etc. Make use of macros. Print more information. + * Print all cseq and lseq mip and mdp. + */ + +#include +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_reg_def.h" +#include "aic94xx_sas.h" + +#include "aic94xx_dump.h" + +#ifdef ASD_DEBUG + +#define MD(x) (1 << (x)) +#define MODE_COMMON (1 << 31) +#define MODE_0_7 (0xFF) + +static const struct lseq_cio_regs { + char *name; + u32 offs; + u8 width; + u32 mode; +} LSEQmCIOREGS[] = { + {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, + {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, + {"LmREQMBX", 0x30, 32, MODE_COMMON }, + {"LmRSPMBX", 0x34, 32, MODE_COMMON }, + {"LmMnINT", 0x38, 32, MODE_0_7 }, + {"LmMnINTEN", 0x3C, 32, MODE_0_7 }, + {"LmXMTPRIMD", 0x40, 32, MODE_COMMON }, + {"LmXMTPRIMCS", 0x44, 8, MODE_COMMON }, + {"LmCONSTAT", 0x45, 8, MODE_COMMON }, + {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) }, + {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) }, + {"LmMnEXPHDRP", 0x48, 8, MD(0) }, + {"LmMnSASAALIGN", 0x48, 8, MD(1) }, + {"LmMnMSKHDRP", 0x49, 8, MD(0) }, + {"LmMnSTPALIGN", 0x49, 8, MD(1) }, + {"LmMnRCVHDRP", 0x4A, 8, MD(0) }, + {"LmMnXMTHDRP", 0x4A, 8, MD(1) }, + {"LmALIGNMODE", 0x4B, 8, MD(1) }, + {"LmMnEXPRCVCNT", 0x4C, 32, MD(0) }, + {"LmMnXMTCNT", 0x4C, 32, MD(1) }, + {"LmMnCURRTAG", 0x54, 16, MD(0) }, + {"LmMnPREVTAG", 0x56, 16, MD(0) }, + {"LmMnACKOFS", 0x58, 8, MD(1) }, + {"LmMnXFRLVL", 0x59, 8, MD(0)|MD(1) }, + {"LmMnSGDMACTL", 0x5A, 8, MD(0)|MD(1) }, + {"LmMnSGDMASTAT", 0x5B, 8, MD(0)|MD(1) }, + {"LmMnDDMACTL", 0x5C, 8, MD(0)|MD(1) }, + {"LmMnDDMASTAT", 0x5D, 8, MD(0)|MD(1) }, + {"LmMnDDMAMODE", 0x5E, 16, MD(0)|MD(1) }, + {"LmMnPIPECTL", 0x61, 8, MD(0)|MD(1) }, + {"LmMnACTSCB", 0x62, 16, MD(0)|MD(1) }, + {"LmMnSGBHADR", 0x64, 8, MD(0)|MD(1) }, + {"LmMnSGBADR", 0x65, 8, MD(0)|MD(1) }, + {"LmMnSGDCNT", 0x66, 8, MD(0)|MD(1) }, + {"LmMnSGDMADR", 0x68, 32, MD(0)|MD(1) }, + {"LmMnSGDMADR", 0x6C, 32, MD(0)|MD(1) }, + {"LmMnXFRCNT", 0x70, 32, MD(0)|MD(1) }, + {"LmMnXMTCRC", 0x74, 32, MD(1) }, + {"LmCURRTAG", 0x74, 16, MD(0) }, + {"LmPREVTAG", 0x76, 16, MD(0) }, + {"LmMnDPSEL", 0x7B, 8, MD(0)|MD(1) }, + {"LmDPTHSTAT", 0x7C, 8, MODE_COMMON }, + {"LmMnHOLDLVL", 0x7D, 8, MD(0) }, + {"LmMnSATAFS", 0x7E, 8, MD(1) }, + {"LmMnCMPLTSTAT", 0x7F, 8, MD(0)|MD(1) }, + {"LmPRMSTAT0", 0x80, 32, MODE_COMMON }, + {"LmPRMSTAT1", 0x84, 32, MODE_COMMON }, + {"LmGPRMINT", 0x88, 8, MODE_COMMON }, + {"LmMnCURRSCB", 0x8A, 16, MD(0) }, + {"LmPRMICODE", 0x8C, 32, MODE_COMMON }, + {"LmMnRCVCNT", 0x90, 16, MD(0) }, + {"LmMnBUFSTAT", 0x92, 16, MD(0) }, + {"LmMnXMTHDRSIZE",0x92, 8, MD(1) }, + {"LmMnXMTSIZE", 0x93, 8, MD(1) }, + {"LmMnTGTXFRCNT", 0x94, 32, MD(0) }, + {"LmMnEXPROFS", 0x98, 32, MD(0) }, + {"LmMnXMTROFS", 0x98, 32, MD(1) }, + {"LmMnRCVROFS", 0x9C, 32, MD(0) }, + {"LmCONCTL", 0xA0, 16, MODE_COMMON }, + {"LmBITLTIMER", 0xA2, 16, MODE_COMMON }, + {"LmWWNLOW", 0xA8, 32, MODE_COMMON }, + {"LmWWNHIGH", 0xAC, 32, MODE_COMMON }, + {"LmMnFRMERR", 0xB0, 32, MD(0) }, + {"LmMnFRMERREN", 0xB4, 32, MD(0) }, + {"LmAWTIMER", 0xB8, 16, MODE_COMMON }, + {"LmAWTCTL", 0xBA, 8, MODE_COMMON }, + {"LmMnHDRCMPS", 0xC0, 32, MD(0) }, + {"LmMnXMTSTAT", 0xC4, 8, MD(1) }, + {"LmHWTSTATEN", 0xC5, 8, MODE_COMMON }, + {"LmMnRRDYRC", 0xC6, 8, MD(0) }, + {"LmMnRRDYTC", 0xC6, 8, MD(1) }, + {"LmHWTSTAT", 0xC7, 8, MODE_COMMON }, + {"LmMnDATABUFADR",0xC8, 16, MD(0)|MD(1) }, + {"LmDWSSTATUS", 0xCB, 8, MODE_COMMON }, + {"LmMnACTSTAT", 0xCE, 16, MD(0)|MD(1) }, + {"LmMnREQSCB", 0xD2, 16, MD(0)|MD(1) }, + {"LmXXXPRIM", 0xD4, 32, MODE_COMMON }, + {"LmRCVASTAT", 0xD9, 8, MODE_COMMON }, + {"LmINTDIS1", 0xDA, 8, MODE_COMMON }, + {"LmPSTORESEL", 0xDB, 8, MODE_COMMON }, + {"LmPSTORE", 0xDC, 32, MODE_COMMON }, + {"LmPRIMSTAT0EN", 0xE0, 32, MODE_COMMON }, + {"LmPRIMSTAT1EN", 0xE4, 32, MODE_COMMON }, + {"LmDONETCTL", 0xF2, 16, MODE_COMMON }, + {NULL, 0, 0, 0 } +}; +/* +static struct lseq_cio_regs LSEQmOOBREGS[] = { + {"OOB_BFLTR" ,0x100, 8, MD(5)}, + {"OOB_INIT_MIN" ,0x102,16, MD(5)}, + {"OOB_INIT_MAX" ,0x104,16, MD(5)}, + {"OOB_INIT_NEG" ,0x106,16, MD(5)}, + {"OOB_SAS_MIN" ,0x108,16, MD(5)}, + {"OOB_SAS_MAX" ,0x10A,16, MD(5)}, + {"OOB_SAS_NEG" ,0x10C,16, MD(5)}, + {"OOB_WAKE_MIN" ,0x10E,16, MD(5)}, + {"OOB_WAKE_MAX" ,0x110,16, MD(5)}, + {"OOB_WAKE_NEG" ,0x112,16, MD(5)}, + {"OOB_IDLE_MAX" ,0x114,16, MD(5)}, + {"OOB_BURST_MAX" ,0x116,16, MD(5)}, + {"OOB_XMIT_BURST" ,0x118, 8, MD(5)}, + {"OOB_SEND_PAIRS" ,0x119, 8, MD(5)}, + {"OOB_INIT_IDLE" ,0x11A, 8, MD(5)}, + {"OOB_INIT_NEGO" ,0x11C, 8, MD(5)}, + {"OOB_SAS_IDLE" ,0x11E, 8, MD(5)}, + {"OOB_SAS_NEGO" ,0x120, 8, MD(5)}, + {"OOB_WAKE_IDLE" ,0x122, 8, MD(5)}, + {"OOB_WAKE_NEGO" ,0x124, 8, MD(5)}, + {"OOB_DATA_KBITS" ,0x126, 8, MD(5)}, + {"OOB_BURST_DATA" ,0x128,32, MD(5)}, + {"OOB_ALIGN_0_DATA" ,0x12C,32, MD(5)}, + {"OOB_ALIGN_1_DATA" ,0x130,32, MD(5)}, + {"OOB_SYNC_DATA" ,0x134,32, MD(5)}, + {"OOB_D10_2_DATA" ,0x138,32, MD(5)}, + {"OOB_PHY_RST_CNT" ,0x13C,32, MD(5)}, + {"OOB_SIG_GEN" ,0x140, 8, MD(5)}, + {"OOB_XMIT" ,0x141, 8, MD(5)}, + {"FUNCTION_MAKS" ,0x142, 8, MD(5)}, + {"OOB_MODE" ,0x143, 8, MD(5)}, + {"CURRENT_STATUS" ,0x144, 8, MD(5)}, + {"SPEED_MASK" ,0x145, 8, MD(5)}, + {"PRIM_COUNT" ,0x146, 8, MD(5)}, + {"OOB_SIGNALS" ,0x148, 8, MD(5)}, + {"OOB_DATA_DET" ,0x149, 8, MD(5)}, + {"OOB_TIME_OUT" ,0x14C, 8, MD(5)}, + {"OOB_TIMER_ENABLE" ,0x14D, 8, MD(5)}, + {"OOB_STATUS" ,0x14E, 8, MD(5)}, + {"HOT_PLUG_DELAY" ,0x150, 8, MD(5)}, + {"RCD_DELAY" ,0x151, 8, MD(5)}, + {"COMSAS_TIMER" ,0x152, 8, MD(5)}, + {"SNTT_DELAY" ,0x153, 8, MD(5)}, + {"SPD_CHNG_DELAY" ,0x154, 8, MD(5)}, + {"SNLT_DELAY" ,0x155, 8, MD(5)}, + {"SNWT_DELAY" ,0x156, 8, MD(5)}, + {"ALIGN_DELAY" ,0x157, 8, MD(5)}, + {"INT_ENABLE_0" ,0x158, 8, MD(5)}, + {"INT_ENABLE_1" ,0x159, 8, MD(5)}, + {"INT_ENABLE_2" ,0x15A, 8, MD(5)}, + {"INT_ENABLE_3" ,0x15B, 8, MD(5)}, + {"OOB_TEST_REG" ,0x15C, 8, MD(5)}, + {"PHY_CONTROL_0" ,0x160, 8, MD(5)}, + {"PHY_CONTROL_1" ,0x161, 8, MD(5)}, + {"PHY_CONTROL_2" ,0x162, 8, MD(5)}, + {"PHY_CONTROL_3" ,0x163, 8, MD(5)}, + {"PHY_OOB_CAL_TX" ,0x164, 8, MD(5)}, + {"PHY_OOB_CAL_RX" ,0x165, 8, MD(5)}, + {"OOB_PHY_CAL_TX" ,0x166, 8, MD(5)}, + {"OOB_PHY_CAL_RX" ,0x167, 8, MD(5)}, + {"PHY_CONTROL_4" ,0x168, 8, MD(5)}, + {"PHY_TEST" ,0x169, 8, MD(5)}, + {"PHY_PWR_CTL" ,0x16A, 8, MD(5)}, + {"PHY_PWR_DELAY" ,0x16B, 8, MD(5)}, + {"OOB_SM_CON" ,0x16C, 8, MD(5)}, + {"ADDR_TRAP_1" ,0x16D, 8, MD(5)}, + {"ADDR_NEXT_1" ,0x16E, 8, MD(5)}, + {"NEXT_ST_1" ,0x16F, 8, MD(5)}, + {"OOB_SM_STATE" ,0x170, 8, MD(5)}, + {"ADDR_TRAP_2" ,0x171, 8, MD(5)}, + {"ADDR_NEXT_2" ,0x172, 8, MD(5)}, + {"NEXT_ST_2" ,0x173, 8, MD(5)}, + {NULL, 0, 0, 0 } +}; +*/ +#define STR_8BIT " %30s[0x%04x]:0x%02x\n" +#define STR_16BIT " %30s[0x%04x]:0x%04x\n" +#define STR_32BIT " %30s[0x%04x]:0x%08x\n" +#define STR_64BIT " %30s[0x%04x]:0x%llx\n" + +#define PRINT_REG_8bit(_ha, _n, _r) asd_printk(STR_8BIT, #_n, _n, \ + asd_read_reg_byte(_ha, _r)) +#define PRINT_REG_16bit(_ha, _n, _r) asd_printk(STR_16BIT, #_n, _n, \ + asd_read_reg_word(_ha, _r)) +#define PRINT_REG_32bit(_ha, _n, _r) asd_printk(STR_32BIT, #_n, _n, \ + asd_read_reg_dword(_ha, _r)) + +#define PRINT_CREG_8bit(_ha, _n) asd_printk(STR_8BIT, #_n, _n, \ + asd_read_reg_byte(_ha, C##_n)) +#define PRINT_CREG_16bit(_ha, _n) asd_printk(STR_16BIT, #_n, _n, \ + asd_read_reg_word(_ha, C##_n)) +#define PRINT_CREG_32bit(_ha, _n) asd_printk(STR_32BIT, #_n, _n, \ + asd_read_reg_dword(_ha, C##_n)) + +#define MSTR_8BIT " Mode:%02d %30s[0x%04x]:0x%02x\n" +#define MSTR_16BIT " Mode:%02d %30s[0x%04x]:0x%04x\n" +#define MSTR_32BIT " Mode:%02d %30s[0x%04x]:0x%08x\n" + +#define PRINT_MREG_8bit(_ha, _m, _n, _r) asd_printk(MSTR_8BIT, _m, #_n, _n, \ + asd_read_reg_byte(_ha, _r)) +#define PRINT_MREG_16bit(_ha, _m, _n, _r) asd_printk(MSTR_16BIT, _m, #_n, _n, \ + asd_read_reg_word(_ha, _r)) +#define PRINT_MREG_32bit(_ha, _m, _n, _r) asd_printk(MSTR_32BIT, _m, #_n, _n, \ + asd_read_reg_dword(_ha, _r)) + +/* can also be used for MD when the register is mode aware already */ +#define PRINT_MIS_byte(_ha, _n) asd_printk(STR_8BIT, #_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_byte(_ha, CSEQ_##_n)) +#define PRINT_MIS_word(_ha, _n) asd_printk(STR_16BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_word(_ha, CSEQ_##_n)) +#define PRINT_MIS_dword(_ha, _n) \ + asd_printk(STR_32BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_dword(_ha, CSEQ_##_n)) +#define PRINT_MIS_qword(_ha, _n) \ + asd_printk(STR_64BIT, #_n,CSEQ_##_n-CMAPPEDSCR, \ + (unsigned long long)(((u64)asd_read_reg_dword(_ha, CSEQ_##_n)) \ + | (((u64)asd_read_reg_dword(_ha, (CSEQ_##_n)+4))<<32))) + +#define CMDP_REG(_n, _m) (_m*(CSEQ_PAGE_SIZE*2)+CSEQ_##_n) +#define PRINT_CMDP_word(_ha, _n) \ +asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \ + #_n, \ + asd_read_reg_word(_ha, CMDP_REG(_n, 0)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 1)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 2)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 3)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 4)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 5)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 6)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 7))) + +#define PRINT_CMDP_byte(_ha, _n) \ +asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \ + #_n, \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 0)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 1)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 2)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 3)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 4)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 5)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 6)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 7))) + +static void asd_dump_cseq_state(struct asd_ha_struct *asd_ha) +{ + int mode; + + asd_printk("CSEQ STATE\n"); + + asd_printk("ARP2 REGISTERS\n"); + + PRINT_CREG_32bit(asd_ha, ARP2CTL); + PRINT_CREG_32bit(asd_ha, ARP2INT); + PRINT_CREG_32bit(asd_ha, ARP2INTEN); + PRINT_CREG_8bit(asd_ha, MODEPTR); + PRINT_CREG_8bit(asd_ha, ALTMODE); + PRINT_CREG_8bit(asd_ha, FLAG); + PRINT_CREG_8bit(asd_ha, ARP2INTCTL); + PRINT_CREG_16bit(asd_ha, STACK); + PRINT_CREG_16bit(asd_ha, PRGMCNT); + PRINT_CREG_16bit(asd_ha, ACCUM); + PRINT_CREG_16bit(asd_ha, SINDEX); + PRINT_CREG_16bit(asd_ha, DINDEX); + PRINT_CREG_8bit(asd_ha, SINDIR); + PRINT_CREG_8bit(asd_ha, DINDIR); + PRINT_CREG_8bit(asd_ha, JUMLDIR); + PRINT_CREG_8bit(asd_ha, ARP2HALTCODE); + PRINT_CREG_16bit(asd_ha, CURRADDR); + PRINT_CREG_16bit(asd_ha, LASTADDR); + PRINT_CREG_16bit(asd_ha, NXTLADDR); + + asd_printk("IOP REGISTERS\n"); + + PRINT_REG_32bit(asd_ha, BISTCTL1, CBISTCTL); + PRINT_CREG_32bit(asd_ha, MAPPEDSCR); + + asd_printk("CIO REGISTERS\n"); + + for (mode = 0; mode < 9; mode++) + PRINT_MREG_16bit(asd_ha, mode, MnSCBPTR, CMnSCBPTR(mode)); + PRINT_MREG_16bit(asd_ha, 15, MnSCBPTR, CMnSCBPTR(15)); + + for (mode = 0; mode < 9; mode++) + PRINT_MREG_16bit(asd_ha, mode, MnDDBPTR, CMnDDBPTR(mode)); + PRINT_MREG_16bit(asd_ha, 15, MnDDBPTR, CMnDDBPTR(15)); + + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnREQMBX, CMnREQMBX(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnRSPMBX, CMnRSPMBX(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnINT, CMnINT(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnINTEN, CMnINTEN(mode)); + + PRINT_CREG_8bit(asd_ha, SCRATCHPAGE); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_8bit(asd_ha, mode, MnSCRATCHPAGE, + CMnSCRATCHPAGE(mode)); + + PRINT_REG_32bit(asd_ha, CLINKCON, CLINKCON); + PRINT_REG_8bit(asd_ha, CCONMSK, CCONMSK); + PRINT_REG_8bit(asd_ha, CCONEXIST, CCONEXIST); + PRINT_REG_16bit(asd_ha, CCONMODE, CCONMODE); + PRINT_REG_32bit(asd_ha, CTIMERCALC, CTIMERCALC); + PRINT_REG_8bit(asd_ha, CINTDIS, CINTDIS); + + asd_printk("SCRATCH MEMORY\n"); + + asd_printk("MIP 4 >>>>>\n"); + PRINT_MIS_word(asd_ha, Q_EXE_HEAD); + PRINT_MIS_word(asd_ha, Q_EXE_TAIL); + PRINT_MIS_word(asd_ha, Q_DONE_HEAD); + PRINT_MIS_word(asd_ha, Q_DONE_TAIL); + PRINT_MIS_word(asd_ha, Q_SEND_HEAD); + PRINT_MIS_word(asd_ha, Q_SEND_TAIL); + PRINT_MIS_word(asd_ha, Q_DMA2CHIM_HEAD); + PRINT_MIS_word(asd_ha, Q_DMA2CHIM_TAIL); + PRINT_MIS_word(asd_ha, Q_COPY_HEAD); + PRINT_MIS_word(asd_ha, Q_COPY_TAIL); + PRINT_MIS_word(asd_ha, REG0); + PRINT_MIS_word(asd_ha, REG1); + PRINT_MIS_dword(asd_ha, REG2); + PRINT_MIS_byte(asd_ha, LINK_CTL_Q_MAP); + PRINT_MIS_byte(asd_ha, MAX_CSEQ_MODE); + PRINT_MIS_byte(asd_ha, FREE_LIST_HACK_COUNT); + + asd_printk("MIP 5 >>>>\n"); + PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_QUEUE); + PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_COUNT); + PRINT_MIS_word(asd_ha, Q_EST_NEXUS_HEAD); + PRINT_MIS_word(asd_ha, Q_EST_NEXUS_TAIL); + PRINT_MIS_word(asd_ha, NEED_EST_NEXUS_SCB); + PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_HEAD); + PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_TAIL); + PRINT_MIS_byte(asd_ha, EST_NEXUS_SCB_OFFSET); + + asd_printk("MIP 6 >>>>\n"); + PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR0); + PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR1); + PRINT_MIS_word(asd_ha, INT_ROUT_SCBPTR); + PRINT_MIS_byte(asd_ha, INT_ROUT_MODE); + PRINT_MIS_byte(asd_ha, ISR_SCRATCH_FLAGS); + PRINT_MIS_word(asd_ha, ISR_SAVE_SINDEX); + PRINT_MIS_word(asd_ha, ISR_SAVE_DINDEX); + PRINT_MIS_word(asd_ha, Q_MONIRTT_HEAD); + PRINT_MIS_word(asd_ha, Q_MONIRTT_TAIL); + PRINT_MIS_byte(asd_ha, FREE_SCB_MASK); + PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_HEAD); + PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_TAIL); + PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_HEAD); + PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_TAIL); + + asd_printk("MIP 7 >>>>\n"); + PRINT_MIS_qword(asd_ha, EMPTY_REQ_QUEUE); + PRINT_MIS_qword(asd_ha, EMPTY_REQ_COUNT); + PRINT_MIS_word(asd_ha, Q_EMPTY_HEAD); + PRINT_MIS_word(asd_ha, Q_EMPTY_TAIL); + PRINT_MIS_word(asd_ha, NEED_EMPTY_SCB); + PRINT_MIS_byte(asd_ha, EMPTY_REQ_HEAD); + PRINT_MIS_byte(asd_ha, EMPTY_REQ_TAIL); + PRINT_MIS_byte(asd_ha, EMPTY_SCB_OFFSET); + PRINT_MIS_word(asd_ha, PRIMITIVE_DATA); + PRINT_MIS_dword(asd_ha, TIMEOUT_CONST); + + asd_printk("MDP 0 >>>>\n"); + asd_printk("%-20s %6s %6s %6s %6s %6s %6s %6s %6s\n", + "Mode: ", "0", "1", "2", "3", "4", "5", "6", "7"); + PRINT_CMDP_word(asd_ha, LRM_SAVE_SINDEX); + PRINT_CMDP_word(asd_ha, LRM_SAVE_SCBPTR); + PRINT_CMDP_word(asd_ha, Q_LINK_HEAD); + PRINT_CMDP_word(asd_ha, Q_LINK_TAIL); + PRINT_CMDP_byte(asd_ha, LRM_SAVE_SCRPAGE); + + asd_printk("MDP 0 Mode 8 >>>>\n"); + PRINT_MIS_word(asd_ha, RET_ADDR); + PRINT_MIS_word(asd_ha, RET_SCBPTR); + PRINT_MIS_word(asd_ha, SAVE_SCBPTR); + PRINT_MIS_word(asd_ha, EMPTY_TRANS_CTX); + PRINT_MIS_word(asd_ha, RESP_LEN); + PRINT_MIS_word(asd_ha, TMF_SCBPTR); + PRINT_MIS_word(asd_ha, GLOBAL_PREV_SCB); + PRINT_MIS_word(asd_ha, GLOBAL_HEAD); + PRINT_MIS_word(asd_ha, CLEAR_LU_HEAD); + PRINT_MIS_byte(asd_ha, TMF_OPCODE); + PRINT_MIS_byte(asd_ha, SCRATCH_FLAGS); + PRINT_MIS_word(asd_ha, HSB_SITE); + PRINT_MIS_word(asd_ha, FIRST_INV_SCB_SITE); + PRINT_MIS_word(asd_ha, FIRST_INV_DDB_SITE); + + asd_printk("MDP 1 Mode 8 >>>>\n"); + PRINT_MIS_qword(asd_ha, LUN_TO_CLEAR); + PRINT_MIS_qword(asd_ha, LUN_TO_CHECK); + + asd_printk("MDP 2 Mode 8 >>>>\n"); + PRINT_MIS_qword(asd_ha, HQ_NEW_POINTER); + PRINT_MIS_qword(asd_ha, HQ_DONE_BASE); + PRINT_MIS_dword(asd_ha, HQ_DONE_POINTER); + PRINT_MIS_byte(asd_ha, HQ_DONE_PASS); +} + +#define PRINT_LREG_8bit(_h, _lseq, _n) \ + asd_printk(STR_8BIT, #_n, _n, asd_read_reg_byte(_h, Lm##_n(_lseq))) +#define PRINT_LREG_16bit(_h, _lseq, _n) \ + asd_printk(STR_16BIT, #_n, _n, asd_read_reg_word(_h, Lm##_n(_lseq))) +#define PRINT_LREG_32bit(_h, _lseq, _n) \ + asd_printk(STR_32BIT, #_n, _n, asd_read_reg_dword(_h, Lm##_n(_lseq))) + +#define PRINT_LMIP_byte(_h, _lseq, _n) \ + asd_printk(STR_8BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_byte(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_word(_h, _lseq, _n) \ + asd_printk(STR_16BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_word(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_dword(_h, _lseq, _n) \ + asd_printk(STR_32BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_qword(_h, _lseq, _n) \ + asd_printk(STR_64BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + (unsigned long long)(((unsigned long long) \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) \ + | (((unsigned long long) \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)+4))<<32))) + +static void asd_print_lseq_cio_reg(struct asd_ha_struct *asd_ha, + u32 lseq_cio_addr, int i) +{ + switch (LSEQmCIOREGS[i].width) { + case 8: + asd_printk("%20s[0x%x]: 0x%02x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_byte(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + + break; + case 16: + asd_printk("%20s[0x%x]: 0x%04x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_word(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + + break; + case 32: + asd_printk("%20s[0x%x]: 0x%08x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_dword(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + break; + } +} + +static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 moffs; + int mode; + + asd_printk("LSEQ %d STATE\n", lseq); + + asd_printk("LSEQ%d: ARP2 REGISTERS\n", lseq); + PRINT_LREG_32bit(asd_ha, lseq, ARP2CTL); + PRINT_LREG_32bit(asd_ha, lseq, ARP2INT); + PRINT_LREG_32bit(asd_ha, lseq, ARP2INTEN); + PRINT_LREG_8bit(asd_ha, lseq, MODEPTR); + PRINT_LREG_8bit(asd_ha, lseq, ALTMODE); + PRINT_LREG_8bit(asd_ha, lseq, FLAG); + PRINT_LREG_8bit(asd_ha, lseq, ARP2INTCTL); + PRINT_LREG_16bit(asd_ha, lseq, STACK); + PRINT_LREG_16bit(asd_ha, lseq, PRGMCNT); + PRINT_LREG_16bit(asd_ha, lseq, ACCUM); + PRINT_LREG_16bit(asd_ha, lseq, SINDEX); + PRINT_LREG_16bit(asd_ha, lseq, DINDEX); + PRINT_LREG_8bit(asd_ha, lseq, SINDIR); + PRINT_LREG_8bit(asd_ha, lseq, DINDIR); + PRINT_LREG_8bit(asd_ha, lseq, JUMLDIR); + PRINT_LREG_8bit(asd_ha, lseq, ARP2HALTCODE); + PRINT_LREG_16bit(asd_ha, lseq, CURRADDR); + PRINT_LREG_16bit(asd_ha, lseq, LASTADDR); + PRINT_LREG_16bit(asd_ha, lseq, NXTLADDR); + + asd_printk("LSEQ%d: IOP REGISTERS\n", lseq); + + PRINT_LREG_32bit(asd_ha, lseq, MODECTL); + PRINT_LREG_32bit(asd_ha, lseq, DBGMODE); + PRINT_LREG_32bit(asd_ha, lseq, CONTROL); + PRINT_REG_32bit(asd_ha, BISTCTL0, LmBISTCTL0(lseq)); + PRINT_REG_32bit(asd_ha, BISTCTL1, LmBISTCTL1(lseq)); + + asd_printk("LSEQ%d: CIO REGISTERS\n", lseq); + asd_printk("Mode common:\n"); + + for (mode = 0; mode < 8; mode++) { + u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq); + int i; + + for (i = 0; LSEQmCIOREGS[i].name; i++) + if (LSEQmCIOREGS[i].mode == MODE_COMMON) + asd_print_lseq_cio_reg(asd_ha,lseq_cio_addr,i); + } + + asd_printk("Mode unique:\n"); + for (mode = 0; mode < 8; mode++) { + u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq); + int i; + + asd_printk("Mode %d\n", mode); + for (i = 0; LSEQmCIOREGS[i].name; i++) { + if (!(LSEQmCIOREGS[i].mode & (1 << mode))) + continue; + asd_print_lseq_cio_reg(asd_ha, lseq_cio_addr, i); + } + } + + asd_printk("SCRATCH MEMORY\n"); + + asd_printk("LSEQ%d MIP 0 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_HEAD); + PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER); + PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS); + PRINT_LMIP_dword(asd_ha, lseq, CONNECTION_STATE); + PRINT_LMIP_word(asd_ha, lseq, CONCTL); + PRINT_LMIP_byte(asd_ha, lseq, CONSTAT); + PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES); + PRINT_LMIP_word(asd_ha, lseq, REG1_ISR); + PRINT_LMIP_word(asd_ha, lseq, REG2_ISR); + PRINT_LMIP_word(asd_ha, lseq, REG3_ISR); + PRINT_LMIP_qword(asd_ha, lseq,REG0_ISR); + + asd_printk("LSEQ%d MIP 1 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR0); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR1); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR2); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR3); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE0); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE1); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE2); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE3); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_BUF_AVAIL); + PRINT_LMIP_dword(asd_ha, lseq, TIMEOUT_CONST); + PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_SINDEX); + PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_DINDEX); + + asd_printk("LSEQ%d MIP 2 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR0); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR1); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR2); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR3); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD0); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD1); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD2); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD3); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_BUFS_AVAIL); + + asd_printk("LSEQ%d MIP 3 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TMR_TOUT_CONST); + PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, SRST_ASSERT_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, ONE_MILLISEC_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, TEN_MS_COMINIT_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMEOUT); + + for (mode = 0; mode < 3; mode++) { + asd_printk("LSEQ%d MDP 0 MODE %d >>>>\n", lseq, mode); + moffs = mode * LSEQ_MODE_SCRATCH_SIZE; + + asd_printk(STR_16BIT, "RET_ADDR", 0, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + + moffs)); + asd_printk(STR_16BIT, "REG0_MODE", 2, + asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + + moffs)); + asd_printk(STR_16BIT, "MODE_FLAGS", 4, + asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + + moffs)); + asd_printk(STR_16BIT, "RET_ADDR2", 0x6, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + + moffs)); + asd_printk(STR_16BIT, "RET_ADDR1", 0x8, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + + moffs)); + asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB, + asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + + moffs)); + asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC, + asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + + moffs)); + } + + asd_printk("LSEQ%d MDP 0 MODE 5 >>>>\n", lseq); + moffs = LSEQ_MODE5_PAGE0_OFFSET; + asd_printk(STR_16BIT, "RET_ADDR", 0, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + moffs)); + asd_printk(STR_16BIT, "REG0_MODE", 2, + asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + moffs)); + asd_printk(STR_16BIT, "MODE_FLAGS", 4, + asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + moffs)); + asd_printk(STR_16BIT, "RET_ADDR2", 0x6, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + moffs)); + asd_printk(STR_16BIT, "RET_ADDR1", 0x8, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + moffs)); + asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB, + asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + moffs)); + asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC, + asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + moffs)); + + asd_printk("LSEQ%d MDP 0 MODE 0 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_DDB_SITE); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_TRANS_CTX); + PRINT_LMIP_word(asd_ha, lseq, RESP_LEN); + PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_SCB_SITE); + PRINT_LMIP_dword(asd_ha, lseq, INTEN_SAVE); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_FRM_LEN); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_PROTOCOL); + PRINT_LMIP_byte(asd_ha, lseq, RESP_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, LAST_LOADED_SGE); + PRINT_LMIP_byte(asd_ha, lseq, SAVE_SCBPTR); + + asd_printk("LSEQ%d MDP 0 MODE 1 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, Q_XMIT_HEAD); + PRINT_LMIP_word(asd_ha, lseq, M1_EMPTY_TRANS_CTX); + PRINT_LMIP_word(asd_ha, lseq, INI_CONN_TAG); + PRINT_LMIP_byte(asd_ha, lseq, FAILED_OPEN_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, XMIT_REQUEST_TYPE); + PRINT_LMIP_byte(asd_ha, lseq, M1_RESP_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, M1_LAST_LOADED_SGE); + PRINT_LMIP_word(asd_ha, lseq, M1_SAVE_SCBPTR); + + asd_printk("LSEQ%d MDP 0 MODE 2 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, PORT_COUNTER); + PRINT_LMIP_word(asd_ha, lseq, PM_TABLE_PTR); + PRINT_LMIP_word(asd_ha, lseq, SATA_INTERLOCK_TMR_SAVE); + PRINT_LMIP_word(asd_ha, lseq, IP_BITL); + PRINT_LMIP_word(asd_ha, lseq, COPY_SMP_CONN_TAG); + PRINT_LMIP_byte(asd_ha, lseq, P0M2_OFFS1AH); + + asd_printk("LSEQ%d MDP 0 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_MODE); + PRINT_LMIP_word(asd_ha, lseq, Q_LINK_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_ERR); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_SIGNALS); + PRINT_LMIP_byte(asd_ha, lseq, SAS_RESET_MODE); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RESET_RETRY_COUNT); + PRINT_LMIP_byte(asd_ha, lseq, NUM_LINK_RESET_RETRIES); + PRINT_LMIP_word(asd_ha, lseq, OOB_INT_ENABLES); + PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_TIMEOUT); + PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_DOWN_COUNT); + + asd_printk("LSEQ%d MDP 1 MODE 0 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR0); + PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR1); + + asd_printk("LSEQ%d MDP 1 MODE 1 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR0); + PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR1); + + asd_printk("LSEQ%d MDP 1 MODE 2 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, INVALID_DWORD_COUNT); + PRINT_LMIP_dword(asd_ha, lseq, DISPARITY_ERROR_COUNT); + PRINT_LMIP_dword(asd_ha, lseq, LOSS_OF_SYNC_COUNT); + + asd_printk("LSEQ%d MDP 1 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, FRAME_TYPE_MASK); + PRINT_LMIP_dword(asd_ha, lseq, HASHED_SRC_ADDR_MASK_PRINT); + PRINT_LMIP_byte(asd_ha, lseq, NUM_FILL_BYTES_MASK); + PRINT_LMIP_word(asd_ha, lseq, TAG_MASK); + PRINT_LMIP_word(asd_ha, lseq, TARGET_PORT_XFER_TAG); + PRINT_LMIP_dword(asd_ha, lseq, DATA_OFFSET); + + asd_printk("LSEQ%d MDP 2 MODE 0 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMER_TERM_TS); + PRINT_LMIP_byte(asd_ha, lseq, DEVICE_BITS); + PRINT_LMIP_word(asd_ha, lseq, SDB_DDB); + PRINT_LMIP_word(asd_ha, lseq, SDB_NUM_TAGS); + PRINT_LMIP_word(asd_ha, lseq, SDB_CURR_TAG); + + asd_printk("LSEQ%d MDP 2 MODE 1 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, TX_ID_ADDR_FRAME); + PRINT_LMIP_dword(asd_ha, lseq, OPEN_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, SRST_AS_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, LAST_LOADED_SG_EL); + + asd_printk("LSEQ%d MDP 2 MODE 2 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, CLOSE_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, BREAK_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, DWS_RESET_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, MCTL_TIMER_TERM_TS); + + asd_printk("LSEQ%d MDP 2 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, COMINIT_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, RCV_ID_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS); +} + +/** + * asd_dump_seq_state -- dump CSEQ and LSEQ states + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of LSEQs of interest + */ +void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + + asd_dump_cseq_state(asd_ha); + + if (lseq_mask != 0) + for_each_sequencer(lseq_mask, lseq_mask, lseq) + asd_dump_lseq_state(asd_ha, lseq); +} + +void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl) +{ + unsigned long flags; + int i; + + switch ((dl->status_block[1] & 0x70) >> 3) { + case SAS_PROTOCOL_STP: + ASD_DPRINTK("STP proto device-to-host FIS:\n"); + break; + default: + case SAS_PROTOCOL_SSP: + ASD_DPRINTK("SAS proto IDENTIFY:\n"); + break; + } + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + for (i = 0; i < phy->sas_phy.frame_rcvd_size; i+=4) + ASD_DPRINTK("%02x: %02x %02x %02x %02x\n", + i, + phy->frame_rcvd[i], + phy->frame_rcvd[i+1], + phy->frame_rcvd[i+2], + phy->frame_rcvd[i+3]); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); +} + +#endif /* ASD_DEBUG */ diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h new file mode 100644 index 000000000..d8faa5db1 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dump.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver dump header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#ifndef _AIC94XX_DUMP_H_ +#define _AIC94XX_DUMP_H_ + +#ifdef ASD_DEBUG + +void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask); +void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl); +#else /* ASD_DEBUG */ + +static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha, + u8 lseq_mask) { } +static inline void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl) { } +#endif /* ASD_DEBUG */ + +#endif /* _AIC94XX_DUMP_H_ */ diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c new file mode 100644 index 000000000..9dda296c0 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA driver hardware interface. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include +#include +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" +#include "aic94xx_dump.h" + +u32 MBAR0_SWB_SIZE; + +/* ---------- Initialization ---------- */ + +static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) +{ + /* adapter came with a sas address */ + if (asd_ha->hw_prof.sas_addr[0]) + return 0; + + return sas_request_addr(asd_ha->sas_ha.shost, + asd_ha->hw_prof.sas_addr); +} + +static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) +{ + int i; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0) + continue; + /* Set a phy's address only if it has none. + */ + ASD_DPRINTK("setting phy%d addr to %llx\n", i, + SAS_ADDR(asd_ha->hw_prof.sas_addr)); + memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, + asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); + } +} + +/* ---------- PHY initialization ---------- */ + +static void asd_init_phy_identify(struct asd_phy *phy) +{ + phy->identify_frame = phy->id_frm_tok->vaddr; + + memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); + + phy->identify_frame->dev_type = SAS_END_DEVICE; + if (phy->sas_phy.role & PHY_ROLE_INITIATOR) + phy->identify_frame->initiator_bits = phy->sas_phy.iproto; + if (phy->sas_phy.role & PHY_ROLE_TARGET) + phy->identify_frame->target_bits = phy->sas_phy.tproto; + memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr, + SAS_ADDR_SIZE); + phy->identify_frame->phy_id = phy->sas_phy.id; +} + +static int asd_init_phy(struct asd_phy *phy) +{ + struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->enabled = 1; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + + phy->id_frm_tok = asd_alloc_coherent(asd_ha, + sizeof(*phy->identify_frame), + GFP_KERNEL); + if (!phy->id_frm_tok) { + asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id); + return -ENOMEM; + } else + asd_init_phy_identify(phy); + + memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd)); + + return 0; +} + +static void asd_init_ports(struct asd_ha_struct *asd_ha) +{ + int i; + + spin_lock_init(&asd_ha->asd_ports_lock); + for (i = 0; i < ASD_MAX_PHYS; i++) { + struct asd_port *asd_port = &asd_ha->asd_ports[i]; + + memset(asd_port->sas_addr, 0, SAS_ADDR_SIZE); + memset(asd_port->attached_sas_addr, 0, SAS_ADDR_SIZE); + asd_port->phy_mask = 0; + asd_port->num_phys = 0; + } +} + +static int asd_init_phys(struct asd_ha_struct *asd_ha) +{ + u8 i; + u8 phy_mask = asd_ha->hw_prof.enabled_phys; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + struct asd_phy *phy = &asd_ha->phys[i]; + + phy->phy_desc = &asd_ha->hw_prof.phy_desc[i]; + phy->asd_port = NULL; + + phy->sas_phy.enabled = 0; + phy->sas_phy.id = i; + phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0]; + phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0]; + phy->sas_phy.ha = &asd_ha->sas_ha; + phy->sas_phy.lldd_phy = phy; + } + + /* Now enable and initialize only the enabled phys. */ + for_each_phy(phy_mask, phy_mask, i) { + int err = asd_init_phy(&asd_ha->phys[i]); + if (err) + return err; + } + + return 0; +} + +/* ---------- Sliding windows ---------- */ + +static int asd_init_sw(struct asd_ha_struct *asd_ha) +{ + struct pci_dev *pcidev = asd_ha->pcidev; + int err; + u32 v; + + /* Unlock MBARs */ + err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v); + if (err) { + asd_printk("couldn't access conf. space of %s\n", + pci_name(pcidev)); + goto Err; + } + if (v) + err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v); + if (err) { + asd_printk("couldn't write to MBAR_KEY of %s\n", + pci_name(pcidev)); + goto Err; + } + + /* Set sliding windows A, B and C to point to proper internal + * memory regions. + */ + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR); + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB, + REG_BASE_ADDR_CSEQCIO); + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI); + asd_ha->io_handle[0].swa_base = REG_BASE_ADDR; + asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO; + asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI; + MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80; + if (!asd_ha->iospace) { + /* MBAR1 will point to OCM (On Chip Memory) */ + pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR); + asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR; + } + spin_lock_init(&asd_ha->iolock); +Err: + return err; +} + +/* ---------- SCB initialization ---------- */ + +/** + * asd_init_scbs - manually allocate the first SCB. + * @asd_ha: pointer to host adapter structure + * + * This allocates the very first SCB which would be sent to the + * sequencer for execution. Its bus address is written to + * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of + * the _next_ scb to be DMA-ed to the host adapter is read from the last + * SCB DMA-ed to the host adapter, we have to always stay one step + * ahead of the sequencer and keep one SCB already allocated. + */ +static int asd_init_scbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int bitmap_bytes; + + /* allocate the index array and bitmap */ + asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs; + asd_ha->seq.tc_index_array = kcalloc(asd_ha->seq.tc_index_bitmap_bits, + sizeof(void *), + GFP_KERNEL); + if (!asd_ha->seq.tc_index_array) + return -ENOMEM; + + bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; + bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); + asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); + if (!asd_ha->seq.tc_index_bitmap) { + kfree(asd_ha->seq.tc_index_array); + asd_ha->seq.tc_index_array = NULL; + return -ENOMEM; + } + + spin_lock_init(&seq->tc_index_lock); + + seq->next_scb.size = sizeof(struct scb); + seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL, + &seq->next_scb.dma_handle); + if (!seq->next_scb.vaddr) { + kfree(asd_ha->seq.tc_index_bitmap); + kfree(asd_ha->seq.tc_index_array); + asd_ha->seq.tc_index_bitmap = NULL; + asd_ha->seq.tc_index_array = NULL; + return -ENOMEM; + } + + seq->pending = 0; + spin_lock_init(&seq->pend_q_lock); + INIT_LIST_HEAD(&seq->pend_q); + + return 0; +} + +static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) +{ + asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; + asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; + ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n", + asd_ha->hw_prof.max_scbs, + asd_ha->hw_prof.max_ddbs); +} + +/* ---------- Done List initialization ---------- */ + +static void asd_dl_tasklet_handler(unsigned long); + +static int asd_init_dl(struct asd_ha_struct *asd_ha) +{ + asd_ha->seq.actual_dl + = asd_alloc_coherent(asd_ha, + ASD_DL_SIZE * sizeof(struct done_list_struct), + GFP_KERNEL); + if (!asd_ha->seq.actual_dl) + return -ENOMEM; + asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr; + asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE; + asd_ha->seq.dl_next = 0; + tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler, + (unsigned long) asd_ha); + + return 0; +} + +/* ---------- EDB and ESCB init ---------- */ + +static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + seq->edb_arr = kmalloc_array(seq->num_edbs, sizeof(*seq->edb_arr), + gfp_flags); + if (!seq->edb_arr) + return -ENOMEM; + + for (i = 0; i < seq->num_edbs; i++) { + seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE, + gfp_flags); + if (!seq->edb_arr[i]) + goto Err_unroll; + memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE); + } + + ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs); + + return 0; + +Err_unroll: + for (i-- ; i >= 0; i--) + asd_free_coherent(asd_ha, seq->edb_arr[i]); + kfree(seq->edb_arr); + seq->edb_arr = NULL; + + return -ENOMEM; +} + +static int asd_alloc_escbs(struct asd_ha_struct *asd_ha, + gfp_t gfp_flags) +{ + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *escb; + int i, escbs; + + seq->escb_arr = kmalloc_array(seq->num_escbs, sizeof(*seq->escb_arr), + gfp_flags); + if (!seq->escb_arr) + return -ENOMEM; + + escbs = seq->num_escbs; + escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags); + if (!escb) { + asd_printk("couldn't allocate list of escbs\n"); + goto Err; + } + seq->num_escbs -= escbs; /* subtract what was not allocated */ + ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs); + + for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next, + struct asd_ascb, + list)) { + seq->escb_arr[i] = escb; + escb->scb->header.opcode = EMPTY_SCB; + } + + return 0; +Err: + kfree(seq->escb_arr); + seq->escb_arr = NULL; + return -ENOMEM; + +} + +static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i, k, z = 0; + + for (i = 0; i < seq->num_escbs; i++) { + struct asd_ascb *ascb = seq->escb_arr[i]; + struct empty_scb *escb = &ascb->scb->escb; + + ascb->edb_index = z; + + escb->num_valid = ASD_EDBS_PER_SCB; + + for (k = 0; k < ASD_EDBS_PER_SCB; k++) { + struct sg_el *eb = &escb->eb[k]; + struct asd_dma_tok *edb = seq->edb_arr[z++]; + + memset(eb, 0, sizeof(*eb)); + eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); + eb->size = cpu_to_le32(((u32) edb->size)); + } + } +} + +/** + * asd_init_escbs -- allocate and initialize empty scbs + * @asd_ha: pointer to host adapter structure + * + * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers. + * They transport sense data, etc. + */ +static int asd_init_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int err = 0; + + /* Allocate two empty data buffers (edb) per sequencer. */ + int edbs = 2*(1+asd_ha->hw_prof.num_phys); + + seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB; + seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB; + + err = asd_alloc_edbs(asd_ha, GFP_KERNEL); + if (err) { + asd_printk("couldn't allocate edbs\n"); + return err; + } + + err = asd_alloc_escbs(asd_ha, GFP_KERNEL); + if (err) { + asd_printk("couldn't allocate escbs\n"); + return err; + } + + asd_assign_edbs2escbs(asd_ha); + /* In order to insure that normal SCBs do not overfill sequencer + * memory and leave no space for escbs (halting condition), + * we increment pending here by the number of escbs. However, + * escbs are never pending. + */ + seq->pending = seq->num_escbs; + seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2; + + return 0; +} + +/* ---------- HW initialization ---------- */ + +/** + * asd_chip_hardrst -- hard reset the chip + * @asd_ha: pointer to host adapter structure + * + * This takes 16 cycles and is synchronous to CFCLK, which runs + * at 200 MHz, so this should take at most 80 nanoseconds. + */ +int asd_chip_hardrst(struct asd_ha_struct *asd_ha) +{ + int i; + int count = 100; + u32 reg; + + for (i = 0 ; i < 4 ; i++) { + asd_write_reg_dword(asd_ha, COMBIST, HARDRST); + } + + do { + udelay(1); + reg = asd_read_reg_dword(asd_ha, CHIMINT); + if (reg & HARDRSTDET) { + asd_write_reg_dword(asd_ha, CHIMINT, + HARDRSTDET|PORRSTDET); + return 0; + } + } while (--count > 0); + + return -ENODEV; +} + +/** + * asd_init_chip -- initialize the chip + * @asd_ha: pointer to host adapter structure + * + * Hard resets the chip, disables HA interrupts, downloads the sequnecer + * microcode and starts the sequencers. The caller has to explicitly + * enable HA interrupts with asd_enable_ints(asd_ha). + */ +static int asd_init_chip(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_chip_hardrst(asd_ha); + if (err) { + asd_printk("couldn't hard reset %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } + + asd_disable_ints(asd_ha); + + err = asd_init_seqs(asd_ha); + if (err) { + asd_printk("couldn't init seqs for %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } + + err = asd_start_seqs(asd_ha); + if (err) { + asd_printk("couldn't start seqs for %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } +out: + return err; +} + +#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE)) + +static int max_devs = 0; +module_param_named(max_devs, max_devs, int, S_IRUGO); +MODULE_PARM_DESC(max_devs, "\n" + "\tMaximum number of SAS devices to support (not LUs).\n" + "\tDefault: 2176, Maximum: 65663.\n"); + +static int max_cmnds = 0; +module_param_named(max_cmnds, max_cmnds, int, S_IRUGO); +MODULE_PARM_DESC(max_cmnds, "\n" + "\tMaximum number of commands queuable.\n" + "\tDefault: 512, Maximum: 66047.\n"); + +static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha) +{ + unsigned long dma_addr = OCM_BASE_ADDR; + u32 d; + + dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; + asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d |= 4; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + asd_ha->hw_prof.max_ddbs += MAX_DEVS; +} + +static int asd_extend_devctx(struct asd_ha_struct *asd_ha) +{ + dma_addr_t dma_handle; + unsigned long dma_addr; + u32 d; + int size; + + asd_extend_devctx_ocm(asd_ha); + + asd_ha->hw_prof.ddb_ext = NULL; + if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) { + max_devs = asd_ha->hw_prof.max_ddbs; + return 0; + } + + size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE; + + asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); + if (!asd_ha->hw_prof.ddb_ext) { + asd_printk("couldn't allocate memory for %d devices\n", + max_devs); + max_devs = asd_ha->hw_prof.max_ddbs; + return -ENOMEM; + } + dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle; + dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE); + dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; + dma_handle = (dma_addr_t) dma_addr; + asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d &= ~4; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + + asd_ha->hw_prof.max_ddbs = max_devs; + + return 0; +} + +static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha) +{ + dma_addr_t dma_handle; + unsigned long dma_addr; + u32 d; + int size; + + asd_ha->hw_prof.scb_ext = NULL; + if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) { + max_cmnds = asd_ha->hw_prof.max_scbs; + return 0; + } + + size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE; + + asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); + if (!asd_ha->hw_prof.scb_ext) { + asd_printk("couldn't allocate memory for %d commands\n", + max_cmnds); + max_cmnds = asd_ha->hw_prof.max_scbs; + return -ENOMEM; + } + dma_handle = asd_ha->hw_prof.scb_ext->dma_handle; + dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE); + dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE; + dma_handle = (dma_addr_t) dma_addr; + asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d &= ~1; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + + asd_ha->hw_prof.max_scbs = max_cmnds; + + return 0; +} + +/** + * asd_init_ctxmem -- initialize context memory + * @asd_ha: pointer to host adapter structure + * + * This function sets the maximum number of SCBs and + * DDBs which can be used by the sequencer. This is normally + * 512 and 128 respectively. If support for more SCBs or more DDBs + * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are + * initialized here to extend context memory to point to host memory, + * thus allowing unlimited support for SCBs and DDBs -- only limited + * by host memory. + */ +static int asd_init_ctxmem(struct asd_ha_struct *asd_ha) +{ + int bitmap_bytes; + + asd_get_max_scb_ddb(asd_ha); + asd_extend_devctx(asd_ha); + asd_extend_cmdctx(asd_ha); + + /* The kernel wants bitmaps to be unsigned long sized. */ + bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8; + bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); + asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); + if (!asd_ha->hw_prof.ddb_bitmap) + return -ENOMEM; + spin_lock_init(&asd_ha->hw_prof.ddb_lock); + + return 0; +} + +int asd_init_hw(struct asd_ha_struct *asd_ha) +{ + int err; + u32 v; + + err = asd_init_sw(asd_ha); + if (err) + return err; + + err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v); + if (err) { + asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, + v | SC_TMR_DIS); + if (err) { + asd_printk("couldn't disable split completion timer of %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + err = asd_read_ocm(asd_ha); + if (err) { + asd_printk("couldn't read ocm(%d)\n", err); + /* While suspicios, it is not an error that we + * couldn't read the OCM. */ + } + + err = asd_read_flash(asd_ha); + if (err) { + asd_printk("couldn't read flash(%d)\n", err); + /* While suspicios, it is not an error that we + * couldn't read FLASH memory. + */ + } + + asd_init_ctxmem(asd_ha); + + if (asd_get_user_sas_addr(asd_ha)) { + asd_printk("No SAS Address provided for %s\n", + pci_name(asd_ha->pcidev)); + err = -ENODEV; + goto Out; + } + + asd_propagate_sas_addr(asd_ha); + + err = asd_init_phys(asd_ha); + if (err) { + asd_printk("couldn't initialize phys for %s\n", + pci_name(asd_ha->pcidev)); + goto Out; + } + + asd_init_ports(asd_ha); + + err = asd_init_scbs(asd_ha); + if (err) { + asd_printk("couldn't initialize scbs for %s\n", + pci_name(asd_ha->pcidev)); + goto Out; + } + + err = asd_init_dl(asd_ha); + if (err) { + asd_printk("couldn't initialize the done list:%d\n", + err); + goto Out; + } + + err = asd_init_escbs(asd_ha); + if (err) { + asd_printk("couldn't initialize escbs\n"); + goto Out; + } + + err = asd_init_chip(asd_ha); + if (err) { + asd_printk("couldn't init the chip\n"); + goto Out; + } +Out: + return err; +} + +/* ---------- Chip reset ---------- */ + +/** + * asd_chip_reset -- reset the host adapter, etc + * @asd_ha: pointer to host adapter structure of interest + * + * Called from the ISR. Hard reset the chip. Let everything + * timeout. This should be no different than hot-unplugging the + * host adapter. Once everything times out we'll init the chip with + * a call to asd_init_chip() and enable interrupts with asd_enable_ints(). + * XXX finish. + */ +static void asd_chip_reset(struct asd_ha_struct *asd_ha) +{ + ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev)); + asd_chip_hardrst(asd_ha); +} + +/* ---------- Done List Routines ---------- */ + +static void asd_dl_tasklet_handler(unsigned long data) +{ + struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data; + struct asd_seq_data *seq = &asd_ha->seq; + unsigned long flags; + + while (1) { + struct done_list_struct *dl = &seq->dl[seq->dl_next]; + struct asd_ascb *ascb; + + if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle) + break; + + /* find the aSCB */ + spin_lock_irqsave(&seq->tc_index_lock, flags); + ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index)); + spin_unlock_irqrestore(&seq->tc_index_lock, flags); + if (unlikely(!ascb)) { + ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n"); + goto next_1; + } else if (ascb->scb->header.opcode == EMPTY_SCB) { + goto out; + } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { + goto next_1; + } + spin_lock_irqsave(&seq->pend_q_lock, flags); + list_del_init(&ascb->list); + seq->pending--; + spin_unlock_irqrestore(&seq->pend_q_lock, flags); + out: + ascb->tasklet_complete(ascb, dl); + + next_1: + seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1); + if (!seq->dl_next) + seq->dl_toggle ^= DL_TOGGLE_MASK; + } +} + +/* ---------- Interrupt Service Routines ---------- */ + +/** + * asd_process_donelist_isr -- schedule processing of done list entries + * @asd_ha: pointer to host adapter structure + */ +static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) +{ + tasklet_schedule(&asd_ha->seq.dl_tasklet); +} + +/** + * asd_com_sas_isr -- process device communication interrupt (COMINT) + * @asd_ha: pointer to host adapter structure + */ +static void asd_com_sas_isr(struct asd_ha_struct *asd_ha) +{ + u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); + + /* clear COMSTAT int */ + asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF); + + if (comstat & CSBUFPERR) { + asd_printk("%s: command/status buffer dma parity error\n", + pci_name(asd_ha->pcidev)); + } else if (comstat & CSERR) { + int i; + u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); + dmaerr &= 0xFF; + asd_printk("%s: command/status dma error, DMAERR: 0x%02x, " + "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n", + pci_name(asd_ha->pcidev), + dmaerr, + asd_read_reg_dword(asd_ha, CSDMAADR), + asd_read_reg_dword(asd_ha, CSDMAADR+4)); + asd_printk("CSBUFFER:\n"); + for (i = 0; i < 8; i++) { + asd_printk("%08x %08x %08x %08x\n", + asd_read_reg_dword(asd_ha, CSBUFFER), + asd_read_reg_dword(asd_ha, CSBUFFER+4), + asd_read_reg_dword(asd_ha, CSBUFFER+8), + asd_read_reg_dword(asd_ha, CSBUFFER+12)); + } + asd_dump_seq_state(asd_ha, 0); + } else if (comstat & OVLYERR) { + u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); + dmaerr = (dmaerr >> 8) & 0xFF; + asd_printk("%s: overlay dma error:0x%x\n", + pci_name(asd_ha->pcidev), + dmaerr); + } + asd_chip_reset(asd_ha); +} + +static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) +{ + static const char *halt_code[256] = { + "UNEXPECTED_INTERRUPT0", + "UNEXPECTED_INTERRUPT1", + "UNEXPECTED_INTERRUPT2", + "UNEXPECTED_INTERRUPT3", + "UNEXPECTED_INTERRUPT4", + "UNEXPECTED_INTERRUPT5", + "UNEXPECTED_INTERRUPT6", + "UNEXPECTED_INTERRUPT7", + "UNEXPECTED_INTERRUPT8", + "UNEXPECTED_INTERRUPT9", + "UNEXPECTED_INTERRUPT10", + [11 ... 19] = "unknown[11,19]", + "NO_FREE_SCB_AVAILABLE", + "INVALID_SCB_OPCODE", + "INVALID_MBX_OPCODE", + "INVALID_ATA_STATE", + "ATA_QUEUE_FULL", + "ATA_TAG_TABLE_FAULT", + "ATA_TAG_MASK_FAULT", + "BAD_LINK_QUEUE_STATE", + "DMA2CHIM_QUEUE_ERROR", + "EMPTY_SCB_LIST_FULL", + "unknown[30]", + "IN_USE_SCB_ON_FREE_LIST", + "BAD_OPEN_WAIT_STATE", + "INVALID_STP_AFFILIATION", + "unknown[34]", + "EXEC_QUEUE_ERROR", + "TOO_MANY_EMPTIES_NEEDED", + "EMPTY_REQ_QUEUE_ERROR", + "Q_MONIRTT_MGMT_ERROR", + "TARGET_MODE_FLOW_ERROR", + "DEVICE_QUEUE_NOT_FOUND", + "START_IRTT_TIMER_ERROR", + "ABORT_TASK_ILLEGAL_REQ", + [43 ... 255] = "unknown[43,255]" + }; + + if (dchstatus & CSEQINT) { + u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT); + + if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) { + asd_printk("%s: CSEQ arp2int:0x%x\n", + pci_name(asd_ha->pcidev), + arp2int); + } else if (arp2int & ARP2HALTC) + asd_printk("%s: CSEQ halted: %s\n", + pci_name(asd_ha->pcidev), + halt_code[(arp2int>>16)&0xFF]); + else + asd_printk("%s: CARP2INT:0x%x\n", + pci_name(asd_ha->pcidev), + arp2int); + } + if (dchstatus & LSEQINT_MASK) { + int lseq; + u8 lseq_mask = dchstatus & LSEQINT_MASK; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + u32 arp2int = asd_read_reg_dword(asd_ha, + LmARP2INT(lseq)); + if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR + | ARP2CIOPERR)) { + asd_printk("%s: LSEQ%d arp2int:0x%x\n", + pci_name(asd_ha->pcidev), + lseq, arp2int); + /* XXX we should only do lseq reset */ + } else if (arp2int & ARP2HALTC) + asd_printk("%s: LSEQ%d halted: %s\n", + pci_name(asd_ha->pcidev), + lseq,halt_code[(arp2int>>16)&0xFF]); + else + asd_printk("%s: LSEQ%d ARP2INT:0x%x\n", + pci_name(asd_ha->pcidev), lseq, + arp2int); + } + } + asd_chip_reset(asd_ha); +} + +/** + * asd_dch_sas_isr -- process device channel interrupt (DEVINT) + * @asd_ha: pointer to host adapter structure + */ +static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) +{ + u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); + + if (dchstatus & CFIFTOERR) { + asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev)); + asd_chip_reset(asd_ha); + } else + asd_arp2_err(asd_ha, dchstatus); +} + +/** + * asd_rbi_exsi_isr -- process external system interface interrupt (INITERR) + * @asd_ha: pointer to host adapter structure + */ +static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) +{ + u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); + + if (!(stat0r & ASIERR)) { + asd_printk("hmm, EXSI interrupted but no error?\n"); + return; + } + + if (stat0r & ASIFMTERR) { + asd_printk("ASI SEEPROM format error for %s\n", + pci_name(asd_ha->pcidev)); + } else if (stat0r & ASISEECHKERR) { + u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R); + asd_printk("ASI SEEPROM checksum 0x%x error for %s\n", + stat1r & CHECKSUM_MASK, + pci_name(asd_ha->pcidev)); + } else { + u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR); + + if (!(statr & CPI2ASIMSTERR_MASK)) { + ASD_DPRINTK("hmm, ASIERR?\n"); + return; + } else { + u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR); + u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR); + + asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, " + "count: 0x%x, byteen: 0x%x, targerr: 0x%x " + "master id: 0x%x, master err: 0x%x\n", + pci_name(asd_ha->pcidev), + addr, data, + (statr & CPI2ASIBYTECNT_MASK) >> 16, + (statr & CPI2ASIBYTEEN_MASK) >> 12, + (statr & CPI2ASITARGERR_MASK) >> 8, + (statr & CPI2ASITARGMID_MASK) >> 4, + (statr & CPI2ASIMSTERR_MASK)); + } + } + asd_chip_reset(asd_ha); +} + +/** + * asd_hst_pcix_isr -- process host interface interrupts + * @asd_ha: pointer to host adapter structure + * + * Asserted on PCIX errors: target abort, etc. + */ +static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) +{ + u16 status; + u32 pcix_status; + u32 ecc_status; + + pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status); + pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status); + pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status); + + if (status & PCI_STATUS_DETECTED_PARITY) + asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_REC_MASTER_ABORT) + asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_REC_TARGET_ABORT) + asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_PARITY) + asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev)); + else if (pcix_status & RCV_SCE) { + asd_printk("received split completion error for %s\n", + pci_name(asd_ha->pcidev)); + pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); + /* XXX: Abort task? */ + return; + } else if (pcix_status & UNEXP_SC) { + asd_printk("unexpected split completion for %s\n", + pci_name(asd_ha->pcidev)); + pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); + /* ignore */ + return; + } else if (pcix_status & SC_DISCARD) + asd_printk("split completion discarded for %s\n", + pci_name(asd_ha->pcidev)); + else if (ecc_status & UNCOR_ECCERR) + asd_printk("uncorrectable ECC error for %s\n", + pci_name(asd_ha->pcidev)); + asd_chip_reset(asd_ha); +} + +/** + * asd_hw_isr -- host adapter interrupt service routine + * @irq: ignored + * @dev_id: pointer to host adapter structure + * + * The ISR processes done list entries and level 3 error handling. + */ +irqreturn_t asd_hw_isr(int irq, void *dev_id) +{ + struct asd_ha_struct *asd_ha = dev_id; + u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT); + + if (!chimint) + return IRQ_NONE; + + asd_write_reg_dword(asd_ha, CHIMINT, chimint); + (void) asd_read_reg_dword(asd_ha, CHIMINT); + + if (chimint & DLAVAIL) + asd_process_donelist_isr(asd_ha); + if (chimint & COMINT) + asd_com_sas_isr(asd_ha); + if (chimint & DEVINT) + asd_dch_sas_isr(asd_ha); + if (chimint & INITERR) + asd_rbi_exsi_isr(asd_ha); + if (chimint & HOSTERR) + asd_hst_pcix_isr(asd_ha); + + return IRQ_HANDLED; +} + +/* ---------- SCB handling ---------- */ + +static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, + gfp_t gfp_flags) +{ + extern struct kmem_cache *asd_ascb_cache; + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *ascb; + unsigned long flags; + + ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags); + + if (ascb) { + ascb->dma_scb.size = sizeof(struct scb); + ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool, + gfp_flags, + &ascb->dma_scb.dma_handle); + if (!ascb->dma_scb.vaddr) { + kmem_cache_free(asd_ascb_cache, ascb); + return NULL; + } + asd_init_ascb(asd_ha, ascb); + + spin_lock_irqsave(&seq->tc_index_lock, flags); + ascb->tc_index = asd_tc_index_get(seq, ascb); + spin_unlock_irqrestore(&seq->tc_index_lock, flags); + if (ascb->tc_index == -1) + goto undo; + + ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index); + } + + return ascb; +undo: + dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, + ascb->dma_scb.dma_handle); + kmem_cache_free(asd_ascb_cache, ascb); + ASD_DPRINTK("no index for ascb\n"); + return NULL; +} + +/** + * asd_ascb_alloc_list -- allocate a list of aSCBs + * @asd_ha: pointer to host adapter structure + * @num: pointer to integer number of aSCBs + * @gfp_flags: GFP_ flags. + * + * This is the only function which is used to allocate aSCBs. + * It can allocate one or many. If more than one, then they form + * a linked list in two ways: by their list field of the ascb struct + * and by the next_scb field of the scb_header. + * + * Returns NULL if no memory was available, else pointer to a list + * of ascbs. When this function returns, @num would be the number + * of SCBs which were not able to be allocated, 0 if all requested + * were able to be allocated. + */ +struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct + *asd_ha, int *num, + gfp_t gfp_flags) +{ + struct asd_ascb *first = NULL; + + for ( ; *num > 0; --*num) { + struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags); + + if (!ascb) + break; + else if (!first) + first = ascb; + else { + struct asd_ascb *last = list_entry(first->list.prev, + struct asd_ascb, + list); + list_add_tail(&ascb->list, &first->list); + last->scb->header.next_scb = + cpu_to_le64(((u64)ascb->dma_scb.dma_handle)); + } + } + + return first; +} + +/** + * asd_swap_head_scb -- swap the head scb + * @asd_ha: pointer to host adapter structure + * @ascb: pointer to the head of an ascb list + * + * The sequencer knows the DMA address of the next SCB to be DMAed to + * the host adapter, from initialization or from the last list DMAed. + * seq->next_scb keeps the address of this SCB. The sequencer will + * DMA to the host adapter this list of SCBs. But the head (first + * element) of this list is not known to the sequencer. Here we swap + * the head of the list with the known SCB (memcpy()). + * Only one memcpy() is required per list so it is in our interest + * to keep the list of SCB as long as possible so that the ratio + * of number of memcpy calls to the number of SCB DMA-ed is as small + * as possible. + * + * LOCKING: called with the pending list lock held. + */ +static void asd_swap_head_scb(struct asd_ha_struct *asd_ha, + struct asd_ascb *ascb) +{ + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *last = list_entry(ascb->list.prev, + struct asd_ascb, + list); + struct asd_dma_tok t = ascb->dma_scb; + + memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb)); + ascb->dma_scb = seq->next_scb; + ascb->scb = ascb->dma_scb.vaddr; + seq->next_scb = t; + last->scb->header.next_scb = + cpu_to_le64(((u64)seq->next_scb.dma_handle)); +} + +/** + * asd_start_scb_timers -- (add and) start timers of SCBs + * @list: pointer to struct list_head of the scbs + * + * If an SCB in the @list has no timer function, assign the default + * one, then start the timer of the SCB. This function is + * intended to be called from asd_post_ascb_list(), just prior to + * posting the SCBs to the sequencer. + */ +static void asd_start_scb_timers(struct list_head *list) +{ + struct asd_ascb *ascb; + list_for_each_entry(ascb, list, list) { + if (!ascb->uldd_timer) { + ascb->timer.function = asd_ascb_timedout; + ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; + add_timer(&ascb->timer); + } + } +} + +/** + * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter + * @asd_ha: pointer to a host adapter structure + * @ascb: pointer to the first aSCB in the list + * @num: number of aSCBs in the list (to be posted) + * + * See queueing comment in asd_post_escb_list(). + * + * Additional note on queuing: In order to minimize the ratio of memcpy() + * to the number of ascbs sent, we try to batch-send as many ascbs as possible + * in one go. + * Two cases are possible: + * A) can_queue >= num, + * B) can_queue < num. + * Case A: we can send the whole batch at once. Increment "pending" + * in the beginning of this function, when it is checked, in order to + * eliminate races when this function is called by multiple processes. + * Case B: should never happen. + */ +int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num) +{ + unsigned long flags; + LIST_HEAD(list); + int can_queue; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending; + if (can_queue >= num) + asd_ha->seq.pending += num; + else + can_queue = 0; + + if (!can_queue) { + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev)); + return -SAS_QUEUE_FULL; + } + + asd_swap_head_scb(asd_ha, ascb); + + __list_add(&list, ascb->list.prev, &ascb->list); + + asd_start_scb_timers(&list); + + asd_ha->seq.scbpro += num; + list_splice_init(&list, asd_ha->seq.pend_q.prev); + asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return 0; +} + +/** + * asd_post_escb_list -- post a list of 1 or more empty scb + * @asd_ha: pointer to a host adapter structure + * @ascb: pointer to the first empty SCB in the list + * @num: number of aSCBs in the list (to be posted) + * + * This is essentially the same as asd_post_ascb_list, but we do not + * increment pending, add those to the pending list or get indexes. + * See asd_init_escbs() and asd_init_post_escbs(). + * + * Since sending a list of ascbs is a superset of sending a single + * ascb, this function exists to generalize this. More specifically, + * when sending a list of those, we want to do only a _single_ + * memcpy() at swap head, as opposed to for each ascb sent (in the + * case of sending them one by one). That is, we want to minimize the + * ratio of memcpy() operations to the number of ascbs sent. The same + * logic applies to asd_post_ascb_list(). + */ +int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num) +{ + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_swap_head_scb(asd_ha, ascb); + asd_ha->seq.scbpro += num; + asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return 0; +} + +/* ---------- LED ---------- */ + +/** + * asd_turn_led -- turn on/off an LED + * @asd_ha: pointer to host adapter structure + * @phy_id: the PHY id whose LED we want to manupulate + * @op: 1 to turn on, 0 to turn off + */ +void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op) +{ + if (phy_id < ASD_MAX_PHYS) { + u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id)); + if (op) + v |= LEDPOL; + else + v &= ~LEDPOL; + asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v); + } +} + +/** + * asd_control_led -- enable/disable an LED on the board + * @asd_ha: pointer to host adapter structure + * @phy_id: integer, the phy id + * @op: integer, 1 to enable, 0 to disable the LED + * + * First we output enable the LED, then we set the source + * to be an external module. + */ +void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op) +{ + if (phy_id < ASD_MAX_PHYS) { + u32 v; + + v = asd_read_reg_dword(asd_ha, GPIOOER); + if (op) + v |= (1 << phy_id); + else + v &= ~(1 << phy_id); + asd_write_reg_dword(asd_ha, GPIOOER, v); + + v = asd_read_reg_dword(asd_ha, GPIOCNFGR); + if (op) + v |= (1 << phy_id); + else + v &= ~(1 << phy_id); + asd_write_reg_dword(asd_ha, GPIOCNFGR, v); + } +} + +/* ---------- PHY enable ---------- */ + +static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id) +{ + struct asd_phy *phy = &asd_ha->phys[phy_id]; + + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY), + HOTPLUG_DELAY_TIMEOUT); + + /* Get defaults from manuf. sector */ + /* XXX we need defaults for those in case MS is broken. */ + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0), + phy->phy_desc->phy_control_0); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1), + phy->phy_desc->phy_control_1); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2), + phy->phy_desc->phy_control_2); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3), + phy->phy_desc->phy_control_3); + + asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id), + ASD_COMINIT_TIMEOUT); + + asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id), + phy->id_frm_tok->dma_handle); + + asd_control_led(asd_ha, phy_id, 1); + + return 0; +} + +int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask) +{ + u8 phy_m; + u8 i; + int num = 0, k; + struct asd_ascb *ascb; + struct asd_ascb *ascb_list; + + if (!phy_mask) { + asd_printk("%s called with phy_mask of 0!?\n", __func__); + return 0; + } + + for_each_phy(phy_mask, phy_m, i) { + num++; + asd_enable_phy(asd_ha, i); + } + + k = num; + ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL); + if (!ascb_list) { + asd_printk("no memory for control phy ascb list\n"); + return -ENOMEM; + } + num -= k; + + ascb = ascb_list; + for_each_phy(phy_mask, phy_m, i) { + asd_build_control_phy(ascb, i, ENABLE_PHY); + ascb = list_entry(ascb->list.next, struct asd_ascb, list); + } + ASD_DPRINTK("posting %d control phy scbs\n", num); + k = asd_post_ascb_list(asd_ha, ascb_list, num); + if (k) + asd_ascb_free_list(ascb_list); + + return k; +} diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h new file mode 100644 index 000000000..930e192b1 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver hardware interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#ifndef _AIC94XX_HWI_H_ +#define _AIC94XX_HWI_H_ + +#include +#include +#include + +#include + +#include "aic94xx.h" +#include "aic94xx_sas.h" + +/* Define ASD_MAX_PHYS to the maximum phys ever. Currently 8. */ +#define ASD_MAX_PHYS 8 +#define ASD_PCBA_SN_SIZE 12 + +struct asd_ha_addrspace { + void __iomem *addr; + unsigned long start; /* pci resource start */ + unsigned long len; /* pci resource len */ + unsigned long flags; /* pci resource flags */ + + /* addresses internal to the host adapter */ + u32 swa_base; /* mmspace 1 (MBAR1) uses this only */ + u32 swb_base; + u32 swc_base; +}; + +struct bios_struct { + int present; + u8 maj; + u8 min; + u32 bld; +}; + +struct unit_element_struct { + u16 num; + u16 size; + void *area; +}; + +struct flash_struct { + u32 bar; + int present; + int wide; + u8 manuf; + u8 dev_id; + u8 sec_prot; + u8 method; + + u32 dir_offs; +}; + +struct asd_phy_desc { + /* From CTRL-A settings, then set to what is appropriate */ + u8 sas_addr[SAS_ADDR_SIZE]; + u8 max_sas_lrate; + u8 min_sas_lrate; + u8 max_sata_lrate; + u8 min_sata_lrate; + u8 flags; +#define ASD_CRC_DIS 1 +#define ASD_SATA_SPINUP_HOLD 2 + + u8 phy_control_0; /* mode 5 reg 0x160 */ + u8 phy_control_1; /* mode 5 reg 0x161 */ + u8 phy_control_2; /* mode 5 reg 0x162 */ + u8 phy_control_3; /* mode 5 reg 0x163 */ +}; + +struct asd_dma_tok { + void *vaddr; + dma_addr_t dma_handle; + size_t size; +}; + +struct hw_profile { + struct bios_struct bios; + struct unit_element_struct ue; + struct flash_struct flash; + + u8 sas_addr[SAS_ADDR_SIZE]; + char pcba_sn[ASD_PCBA_SN_SIZE+1]; + + u8 enabled_phys; /* mask of enabled phys */ + struct asd_phy_desc phy_desc[ASD_MAX_PHYS]; + u32 max_scbs; /* absolute sequencer scb queue size */ + struct asd_dma_tok *scb_ext; + u32 max_ddbs; + struct asd_dma_tok *ddb_ext; + + spinlock_t ddb_lock; + void *ddb_bitmap; + + int num_phys; /* ENABLEABLE */ + int max_phys; /* REPORTED + ENABLEABLE */ + + unsigned addr_range; /* max # of addrs; max # of possible ports */ + unsigned port_name_base; + unsigned dev_name_base; + unsigned sata_name_base; +}; + +struct asd_ascb { + struct list_head list; + struct asd_ha_struct *ha; + + struct scb *scb; /* equals dma_scb->vaddr */ + struct asd_dma_tok dma_scb; + struct asd_dma_tok *sg_arr; + + void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *); + u8 uldd_timer:1; + + /* internally generated command */ + struct timer_list timer; + struct completion *completion; + u8 tag_valid:1; + __be16 tag; /* error recovery only */ + + /* If this is an Empty SCB, index of first edb in seq->edb_arr. */ + int edb_index; + + /* Used by the timer timeout function. */ + int tc_index; + + void *uldd_task; +}; + +#define ASD_DL_SIZE_BITS 0x8 +#define ASD_DL_SIZE (1<<(2+ASD_DL_SIZE_BITS)) +#define ASD_DEF_DL_TOGGLE 0x01 + +struct asd_seq_data { + spinlock_t pend_q_lock; + u16 scbpro; + int pending; + struct list_head pend_q; + int can_queue; /* per adapter */ + struct asd_dma_tok next_scb; /* next scb to be delivered to CSEQ */ + + spinlock_t tc_index_lock; + void **tc_index_array; + void *tc_index_bitmap; + int tc_index_bitmap_bits; + + struct tasklet_struct dl_tasklet; + struct done_list_struct *dl; /* array of done list entries, equals */ + struct asd_dma_tok *actual_dl; /* actual_dl->vaddr */ + int dl_toggle; + int dl_next; + + int num_edbs; + struct asd_dma_tok **edb_arr; + int num_escbs; + struct asd_ascb **escb_arr; /* array of pointers to escbs */ +}; + +/* This is an internal port structure. These are used to get accurate + * phy_mask for updating DDB 0. + */ +struct asd_port { + u8 sas_addr[SAS_ADDR_SIZE]; + u8 attached_sas_addr[SAS_ADDR_SIZE]; + u32 phy_mask; + int num_phys; +}; + +/* This is the Host Adapter structure. It describes the hardware + * SAS adapter. + */ +struct asd_ha_struct { + struct pci_dev *pcidev; + const char *name; + + struct sas_ha_struct sas_ha; + + u8 revision_id; + + int iospace; + spinlock_t iolock; + struct asd_ha_addrspace io_handle[2]; + + struct hw_profile hw_prof; + + struct asd_phy phys[ASD_MAX_PHYS]; + spinlock_t asd_ports_lock; + struct asd_port asd_ports[ASD_MAX_PHYS]; + struct asd_sas_port ports[ASD_MAX_PHYS]; + + struct dma_pool *scb_pool; + + struct asd_seq_data seq; /* sequencer related */ + u32 bios_status; + const struct firmware *bios_image; +}; + +/* ---------- Common macros ---------- */ + +#define ASD_BUSADDR_LO(__dma_handle) ((u32)(__dma_handle)) +#define ASD_BUSADDR_HI(__dma_handle) (((sizeof(dma_addr_t))==8) \ + ? ((u32)((__dma_handle) >> 32)) \ + : ((u32)0)) + +#define dev_to_asd_ha(__dev) pci_get_drvdata(to_pci_dev(__dev)) +#define SCB_SITE_VALID(__site_no) (((__site_no) & 0xF0FF) != 0x00FF \ + && ((__site_no) & 0xF0FF) > 0x001F) +/* For each bit set in __lseq_mask, set __lseq to equal the bit + * position of the set bit and execute the statement following. + * __mc is the temporary mask, used as a mask "counter". + */ +#define for_each_sequencer(__lseq_mask, __mc, __lseq) \ + for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\ + if (((__mc) & 1)) +#define for_each_phy(__lseq_mask, __mc, __lseq) \ + for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\ + if (((__mc) & 1)) + +#define PHY_ENABLED(_HA, _I) ((_HA)->hw_prof.enabled_phys & (1<<(_I))) + +/* ---------- DMA allocs ---------- */ + +static inline struct asd_dma_tok *asd_dmatok_alloc(gfp_t flags) +{ + return kmem_cache_alloc(asd_dma_token_cache, flags); +} + +static inline void asd_dmatok_free(struct asd_dma_tok *token) +{ + kmem_cache_free(asd_dma_token_cache, token); +} + +static inline struct asd_dma_tok *asd_alloc_coherent(struct asd_ha_struct * + asd_ha, size_t size, + gfp_t flags) +{ + struct asd_dma_tok *token = asd_dmatok_alloc(flags); + if (token) { + token->size = size; + token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev, + token->size, + &token->dma_handle, + flags); + if (!token->vaddr) { + asd_dmatok_free(token); + token = NULL; + } + } + return token; +} + +static inline void asd_free_coherent(struct asd_ha_struct *asd_ha, + struct asd_dma_tok *token) +{ + if (token) { + dma_free_coherent(&asd_ha->pcidev->dev, token->size, + token->vaddr, token->dma_handle); + asd_dmatok_free(token); + } +} + +static inline void asd_init_ascb(struct asd_ha_struct *asd_ha, + struct asd_ascb *ascb) +{ + INIT_LIST_HEAD(&ascb->list); + ascb->scb = ascb->dma_scb.vaddr; + ascb->ha = asd_ha; + timer_setup(&ascb->timer, NULL, 0); + ascb->tc_index = -1; +} + +/* Must be called with the tc_index_lock held! + */ +static inline void asd_tc_index_release(struct asd_seq_data *seq, int index) +{ + seq->tc_index_array[index] = NULL; + clear_bit(index, seq->tc_index_bitmap); +} + +/* Must be called with the tc_index_lock held! + */ +static inline int asd_tc_index_get(struct asd_seq_data *seq, void *ptr) +{ + int index; + + index = find_first_zero_bit(seq->tc_index_bitmap, + seq->tc_index_bitmap_bits); + if (index == seq->tc_index_bitmap_bits) + return -1; + + seq->tc_index_array[index] = ptr; + set_bit(index, seq->tc_index_bitmap); + + return index; +} + +/* Must be called with the tc_index_lock held! + */ +static inline void *asd_tc_index_find(struct asd_seq_data *seq, int index) +{ + return seq->tc_index_array[index]; +} + +/** + * asd_ascb_free -- free a single aSCB after is has completed + * @ascb: pointer to the aSCB of interest + * + * This frees an aSCB after it has been executed/completed by + * the sequencer. + */ +static inline void asd_ascb_free(struct asd_ascb *ascb) +{ + if (ascb) { + struct asd_ha_struct *asd_ha = ascb->ha; + unsigned long flags; + + BUG_ON(!list_empty(&ascb->list)); + spin_lock_irqsave(&ascb->ha->seq.tc_index_lock, flags); + asd_tc_index_release(&ascb->ha->seq, ascb->tc_index); + spin_unlock_irqrestore(&ascb->ha->seq.tc_index_lock, flags); + dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, + ascb->dma_scb.dma_handle); + kmem_cache_free(asd_ascb_cache, ascb); + } +} + +/** + * asd_ascb_list_free -- free a list of ascbs + * @ascb_list: a list of ascbs + * + * This function will free a list of ascbs allocated by asd_ascb_alloc_list. + * It is used when say the scb queueing function returned QUEUE_FULL, + * and we do not need the ascbs any more. + */ +static inline void asd_ascb_free_list(struct asd_ascb *ascb_list) +{ + LIST_HEAD(list); + struct list_head *n, *pos; + + __list_add(&list, ascb_list->list.prev, &ascb_list->list); + list_for_each_safe(pos, n, &list) { + list_del_init(pos); + asd_ascb_free(list_entry(pos, struct asd_ascb, list)); + } +} + +/* ---------- Function declarations ---------- */ + +int asd_init_hw(struct asd_ha_struct *asd_ha); +irqreturn_t asd_hw_isr(int irq, void *dev_id); + + +struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct + *asd_ha, int *num, + gfp_t gfp_mask); + +int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num); +int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num); + +int asd_init_post_escbs(struct asd_ha_struct *asd_ha); +void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc); +void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op); +void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op); +int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask); + +void asd_ascb_timedout(struct timer_list *t); +int asd_chip_hardrst(struct asd_ha_struct *asd_ha); + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c new file mode 100644 index 000000000..8a3340d8d --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -0,0 +1,1053 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA driver initialization. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" +#include "aic94xx_sds.h" + +/* The format is "version.release.patchlevel" */ +#define ASD_DRIVER_VERSION "1.0.3" + +static int use_msi = 0; +module_param_named(use_msi, use_msi, int, S_IRUGO); +MODULE_PARM_DESC(use_msi, "\n" + "\tEnable(1) or disable(0) using PCI MSI.\n" + "\tDefault: 0"); + +static struct scsi_transport_template *aic94xx_transport_template; +static int asd_scan_finished(struct Scsi_Host *, unsigned long); +static void asd_scan_start(struct Scsi_Host *); + +static const struct scsi_host_template aic94xx_sht = { + .module = THIS_MODULE, + /* .name is initialized */ + .name = "aic94xx", + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .scan_finished = asd_scan_finished, + .scan_start = asd_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .track_queue_depth = 1, +}; + +static int asd_map_memio(struct asd_ha_struct *asd_ha) +{ + int err, i; + struct asd_ha_addrspace *io_handle; + + asd_ha->iospace = 0; + for (i = 0; i < 3; i += 2) { + io_handle = &asd_ha->io_handle[i==0?0:1]; + io_handle->start = pci_resource_start(asd_ha->pcidev, i); + io_handle->len = pci_resource_len(asd_ha->pcidev, i); + io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); + err = -ENODEV; + if (!io_handle->start || !io_handle->len) { + asd_printk("MBAR%d start or length for %s is 0.\n", + i==0?0:1, pci_name(asd_ha->pcidev)); + goto Err; + } + err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); + if (err) { + asd_printk("couldn't reserve memory region for %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + io_handle->addr = ioremap(io_handle->start, io_handle->len); + if (!io_handle->addr) { + asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, + pci_name(asd_ha->pcidev)); + err = -ENOMEM; + goto Err_unreq; + } + } + + return 0; +Err_unreq: + pci_release_region(asd_ha->pcidev, i); +Err: + if (i > 0) { + io_handle = &asd_ha->io_handle[0]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 0); + } + return err; +} + +static void asd_unmap_memio(struct asd_ha_struct *asd_ha) +{ + struct asd_ha_addrspace *io_handle; + + io_handle = &asd_ha->io_handle[1]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 2); + + io_handle = &asd_ha->io_handle[0]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 0); +} + +static int asd_map_ioport(struct asd_ha_struct *asd_ha) +{ + int i = PCI_IOBAR_OFFSET, err; + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; + + asd_ha->iospace = 1; + io_handle->start = pci_resource_start(asd_ha->pcidev, i); + io_handle->len = pci_resource_len(asd_ha->pcidev, i); + io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); + io_handle->addr = (void __iomem *) io_handle->start; + if (!io_handle->start || !io_handle->len) { + asd_printk("couldn't get IO ports for %s\n", + pci_name(asd_ha->pcidev)); + return -ENODEV; + } + err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); + if (err) { + asd_printk("couldn't reserve io space for %s\n", + pci_name(asd_ha->pcidev)); + } + + return err; +} + +static void asd_unmap_ioport(struct asd_ha_struct *asd_ha) +{ + pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET); +} + +static int asd_map_ha(struct asd_ha_struct *asd_ha) +{ + int err; + u16 cmd_reg; + + err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg); + if (err) { + asd_printk("couldn't read command register of %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + + err = -ENODEV; + if (cmd_reg & PCI_COMMAND_MEMORY) { + if ((err = asd_map_memio(asd_ha))) + goto Err; + } else if (cmd_reg & PCI_COMMAND_IO) { + if ((err = asd_map_ioport(asd_ha))) + goto Err; + asd_printk("%s ioport mapped -- upgrade your hardware\n", + pci_name(asd_ha->pcidev)); + } else { + asd_printk("no proper device access to %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + + return 0; +Err: + return err; +} + +static void asd_unmap_ha(struct asd_ha_struct *asd_ha) +{ + if (asd_ha->iospace) + asd_unmap_ioport(asd_ha); + else + asd_unmap_memio(asd_ha); +} + +static const char *asd_dev_rev[30] = { + [0] = "A0", + [1] = "A1", + [8] = "B0", +}; + +static int asd_common_setup(struct asd_ha_struct *asd_ha) +{ + int err, i; + + asd_ha->revision_id = asd_ha->pcidev->revision; + + err = -ENODEV; + if (asd_ha->revision_id < AIC9410_DEV_REV_B0) { + asd_printk("%s is revision %s (%X), which is not supported\n", + pci_name(asd_ha->pcidev), + asd_dev_rev[asd_ha->revision_id], + asd_ha->revision_id); + goto Err; + } + /* Provide some sane default values. */ + asd_ha->hw_prof.max_scbs = 512; + asd_ha->hw_prof.max_ddbs = ASD_MAX_DDBS; + asd_ha->hw_prof.num_phys = ASD_MAX_PHYS; + /* All phys are enabled, by default. */ + asd_ha->hw_prof.enabled_phys = 0xFF; + for (i = 0; i < ASD_MAX_PHYS; i++) { + asd_ha->hw_prof.phy_desc[i].max_sas_lrate = + SAS_LINK_RATE_3_0_GBPS; + asd_ha->hw_prof.phy_desc[i].min_sas_lrate = + SAS_LINK_RATE_1_5_GBPS; + asd_ha->hw_prof.phy_desc[i].max_sata_lrate = + SAS_LINK_RATE_1_5_GBPS; + asd_ha->hw_prof.phy_desc[i].min_sata_lrate = + SAS_LINK_RATE_1_5_GBPS; + } + + return 0; +Err: + return err; +} + +static int asd_aic9410_setup(struct asd_ha_struct *asd_ha) +{ + int err = asd_common_setup(asd_ha); + + if (err) + return err; + + asd_ha->hw_prof.addr_range = 8; + asd_ha->hw_prof.port_name_base = 0; + asd_ha->hw_prof.dev_name_base = 8; + asd_ha->hw_prof.sata_name_base = 16; + + return 0; +} + +static int asd_aic9405_setup(struct asd_ha_struct *asd_ha) +{ + int err = asd_common_setup(asd_ha); + + if (err) + return err; + + asd_ha->hw_prof.addr_range = 4; + asd_ha->hw_prof.port_name_base = 0; + asd_ha->hw_prof.dev_name_base = 4; + asd_ha->hw_prof.sata_name_base = 8; + + return 0; +} + +static ssize_t asd_show_dev_rev(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", + asd_dev_rev[asd_ha->revision_id]); +} +static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); + +static ssize_t asd_show_dev_bios_build(struct device *dev, + struct device_attribute *attr,char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld); +} +static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL); + +static ssize_t asd_show_dev_pcba_sn(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn); +} +static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL); + +#define FLASH_CMD_NONE 0x00 +#define FLASH_CMD_UPDATE 0x01 +#define FLASH_CMD_VERIFY 0x02 + +struct flash_command { + u8 command[8]; + int code; +}; + +static struct flash_command flash_command_table[] = +{ + {"verify", FLASH_CMD_VERIFY}, + {"update", FLASH_CMD_UPDATE}, + {"", FLASH_CMD_NONE} /* Last entry should be NULL. */ +}; + +struct error_bios { + char *reason; + int err_code; +}; + +static struct error_bios flash_error_table[] = +{ + {"Failed to open bios image file", FAIL_OPEN_BIOS_FILE}, + {"PCI ID mismatch", FAIL_CHECK_PCI_ID}, + {"Checksum mismatch", FAIL_CHECK_SUM}, + {"Unknown Error", FAIL_UNKNOWN}, + {"Failed to verify.", FAIL_VERIFY}, + {"Failed to reset flash chip.", FAIL_RESET_FLASH}, + {"Failed to find flash chip type.", FAIL_FIND_FLASH_ID}, + {"Failed to erash flash chip.", FAIL_ERASE_FLASH}, + {"Failed to program flash chip.", FAIL_WRITE_FLASH}, + {"Flash in progress", FLASH_IN_PROGRESS}, + {"Image file size Error", FAIL_FILE_SIZE}, + {"Input parameter error", FAIL_PARAMETERS}, + {"Out of memory", FAIL_OUT_MEMORY}, + {"OK", 0} /* Last entry err_code = 0. */ +}; + +static ssize_t asd_store_update_bios(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + char *cmd_ptr, *filename_ptr; + struct bios_file_header header, *hdr_ptr; + int res, i; + u32 csum = 0; + int flash_command = FLASH_CMD_NONE; + int err = 0; + + cmd_ptr = kcalloc(count, 2, GFP_KERNEL); + + if (!cmd_ptr) { + err = FAIL_OUT_MEMORY; + goto out; + } + + filename_ptr = cmd_ptr + count; + res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); + if (res != 2) { + err = FAIL_PARAMETERS; + goto out1; + } + + for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { + if (!memcmp(flash_command_table[i].command, + cmd_ptr, strlen(cmd_ptr))) { + flash_command = flash_command_table[i].code; + break; + } + } + if (flash_command == FLASH_CMD_NONE) { + err = FAIL_PARAMETERS; + goto out1; + } + + if (asd_ha->bios_status == FLASH_IN_PROGRESS) { + err = FLASH_IN_PROGRESS; + goto out1; + } + err = request_firmware(&asd_ha->bios_image, + filename_ptr, + &asd_ha->pcidev->dev); + if (err) { + asd_printk("Failed to load bios image file %s, error %d\n", + filename_ptr, err); + err = FAIL_OPEN_BIOS_FILE; + goto out1; + } + + hdr_ptr = (struct bios_file_header *)asd_ha->bios_image->data; + + if ((hdr_ptr->contrl_id.vendor != asd_ha->pcidev->vendor || + hdr_ptr->contrl_id.device != asd_ha->pcidev->device) && + (hdr_ptr->contrl_id.sub_vendor != asd_ha->pcidev->vendor || + hdr_ptr->contrl_id.sub_device != asd_ha->pcidev->device)) { + + ASD_DPRINTK("The PCI vendor or device id does not match\n"); + ASD_DPRINTK("vendor=%x dev=%x sub_vendor=%x sub_dev=%x" + " pci vendor=%x pci dev=%x\n", + hdr_ptr->contrl_id.vendor, + hdr_ptr->contrl_id.device, + hdr_ptr->contrl_id.sub_vendor, + hdr_ptr->contrl_id.sub_device, + asd_ha->pcidev->vendor, + asd_ha->pcidev->device); + err = FAIL_CHECK_PCI_ID; + goto out2; + } + + if (hdr_ptr->filelen != asd_ha->bios_image->size) { + err = FAIL_FILE_SIZE; + goto out2; + } + + /* calculate checksum */ + for (i = 0; i < hdr_ptr->filelen; i++) + csum += asd_ha->bios_image->data[i]; + + if ((csum & 0x0000ffff) != hdr_ptr->checksum) { + ASD_DPRINTK("BIOS file checksum mismatch\n"); + err = FAIL_CHECK_SUM; + goto out2; + } + if (flash_command == FLASH_CMD_UPDATE) { + asd_ha->bios_status = FLASH_IN_PROGRESS; + err = asd_write_flash_seg(asd_ha, + &asd_ha->bios_image->data[sizeof(*hdr_ptr)], + 0, hdr_ptr->filelen-sizeof(*hdr_ptr)); + if (!err) + err = asd_verify_flash_seg(asd_ha, + &asd_ha->bios_image->data[sizeof(*hdr_ptr)], + 0, hdr_ptr->filelen-sizeof(*hdr_ptr)); + } else { + asd_ha->bios_status = FLASH_IN_PROGRESS; + err = asd_verify_flash_seg(asd_ha, + &asd_ha->bios_image->data[sizeof(header)], + 0, hdr_ptr->filelen-sizeof(header)); + } + +out2: + release_firmware(asd_ha->bios_image); +out1: + kfree(cmd_ptr); +out: + asd_ha->bios_status = err; + + if (!err) + return count; + else + return -err; +} + +static ssize_t asd_show_update_bios(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + + for (i = 0; flash_error_table[i].err_code != 0; i++) { + if (flash_error_table[i].err_code == asd_ha->bios_status) + break; + } + if (asd_ha->bios_status != FLASH_IN_PROGRESS) + asd_ha->bios_status = FLASH_OK; + + return snprintf(buf, PAGE_SIZE, "status=%x %s\n", + flash_error_table[i].err_code, + flash_error_table[i].reason); +} + +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR, + asd_show_update_bios, asd_store_update_bios); + +static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) +{ + int err; + + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); + if (err) + return err; + + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); + if (err) + goto err_rev; + + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); + if (err) + goto err_biosb; + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); + if (err) + goto err_update_bios; + + return 0; + +err_update_bios: + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); +err_biosb: + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); +err_rev: + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); + return err; +} + +static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) +{ + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); +} + +/* The first entry, 0, is used for dynamic ids, the rest for devices + * we know about. + */ +static const struct asd_pcidev_struct { + const char * name; + int (*setup)(struct asd_ha_struct *asd_ha); +} asd_pcidev_data[] = { + /* Id 0 is used for dynamic ids. */ + { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter", + .setup = asd_aic9410_setup + }, + { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter", + .setup = asd_aic9410_setup + }, + { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter", + .setup = asd_aic9405_setup + }, +}; + +static int asd_create_ha_caches(struct asd_ha_struct *asd_ha) +{ + asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool", + &asd_ha->pcidev->dev, + sizeof(struct scb), + 8, 0); + if (!asd_ha->scb_pool) { + asd_printk("couldn't create scb pool\n"); + return -ENOMEM; + } + + return 0; +} + +/* + * asd_free_edbs -- free empty data buffers + * asd_ha: pointer to host adapter structure + */ +static void asd_free_edbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_edbs; i++) + asd_free_coherent(asd_ha, seq->edb_arr[i]); + kfree(seq->edb_arr); + seq->edb_arr = NULL; +} + +static void asd_free_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_escbs; i++) { + if (!list_empty(&seq->escb_arr[i]->list)) + list_del_init(&seq->escb_arr[i]->list); + + asd_ascb_free(seq->escb_arr[i]); + } + kfree(seq->escb_arr); + seq->escb_arr = NULL; +} + +static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) +{ + int i; + + if (asd_ha->hw_prof.ddb_ext) + asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext); + if (asd_ha->hw_prof.scb_ext) + asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext); + + kfree(asd_ha->hw_prof.ddb_bitmap); + asd_ha->hw_prof.ddb_bitmap = NULL; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + struct asd_phy *phy = &asd_ha->phys[i]; + + asd_free_coherent(asd_ha, phy->id_frm_tok); + } + if (asd_ha->seq.escb_arr) + asd_free_escbs(asd_ha); + if (asd_ha->seq.edb_arr) + asd_free_edbs(asd_ha); + if (asd_ha->hw_prof.ue.area) { + kfree(asd_ha->hw_prof.ue.area); + asd_ha->hw_prof.ue.area = NULL; + } + if (asd_ha->seq.tc_index_array) { + kfree(asd_ha->seq.tc_index_array); + kfree(asd_ha->seq.tc_index_bitmap); + asd_ha->seq.tc_index_array = NULL; + asd_ha->seq.tc_index_bitmap = NULL; + } + if (asd_ha->seq.actual_dl) { + asd_free_coherent(asd_ha, asd_ha->seq.actual_dl); + asd_ha->seq.actual_dl = NULL; + asd_ha->seq.dl = NULL; + } + if (asd_ha->seq.next_scb.vaddr) { + dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr, + asd_ha->seq.next_scb.dma_handle); + asd_ha->seq.next_scb.vaddr = NULL; + } + dma_pool_destroy(asd_ha->scb_pool); + asd_ha->scb_pool = NULL; +} + +struct kmem_cache *asd_dma_token_cache; +struct kmem_cache *asd_ascb_cache; + +static int asd_create_global_caches(void) +{ + if (!asd_dma_token_cache) { + asd_dma_token_cache + = kmem_cache_create(ASD_DRIVER_NAME "_dma_token", + sizeof(struct asd_dma_tok), + 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (!asd_dma_token_cache) { + asd_printk("couldn't create dma token cache\n"); + return -ENOMEM; + } + } + + if (!asd_ascb_cache) { + asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb", + sizeof(struct asd_ascb), + 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (!asd_ascb_cache) { + asd_printk("couldn't create ascb cache\n"); + goto Err; + } + } + + return 0; +Err: + kmem_cache_destroy(asd_dma_token_cache); + asd_dma_token_cache = NULL; + return -ENOMEM; +} + +static void asd_destroy_global_caches(void) +{ + kmem_cache_destroy(asd_dma_token_cache); + asd_dma_token_cache = NULL; + + kmem_cache_destroy(asd_ascb_cache); + asd_ascb_cache = NULL; +} + +static int asd_register_sas_ha(struct asd_ha_struct *asd_ha) +{ + int i; + struct asd_sas_phy **sas_phys = + kcalloc(ASD_MAX_PHYS, sizeof(*sas_phys), GFP_KERNEL); + struct asd_sas_port **sas_ports = + kcalloc(ASD_MAX_PHYS, sizeof(*sas_ports), GFP_KERNEL); + + if (!sas_phys || !sas_ports) { + kfree(sas_phys); + kfree(sas_ports); + return -ENOMEM; + } + + asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name; + asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0]; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + sas_phys[i] = &asd_ha->phys[i].sas_phy; + sas_ports[i] = &asd_ha->ports[i]; + } + + asd_ha->sas_ha.sas_phy = sas_phys; + asd_ha->sas_ha.sas_port= sas_ports; + asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; + + return sas_register_ha(&asd_ha->sas_ha); +} + +static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha) +{ + int err; + + err = sas_unregister_ha(&asd_ha->sas_ha); + + sas_remove_host(asd_ha->sas_ha.shost); + scsi_host_put(asd_ha->sas_ha.shost); + + kfree(asd_ha->sas_ha.sas_phy); + kfree(asd_ha->sas_ha.sas_port); + + return err; +} + +static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + const struct asd_pcidev_struct *asd_dev; + unsigned asd_id = (unsigned) id->driver_data; + struct asd_ha_struct *asd_ha; + struct Scsi_Host *shost; + int err; + + if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) { + asd_printk("wrong driver_data in PCI table\n"); + return -ENODEV; + } + + if ((err = pci_enable_device(dev))) { + asd_printk("couldn't enable device %s\n", pci_name(dev)); + return err; + } + + pci_set_master(dev); + + err = -ENOMEM; + + shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *)); + if (!shost) + goto Err; + + asd_dev = &asd_pcidev_data[asd_id]; + + asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL); + if (!asd_ha) { + asd_printk("out of memory\n"); + goto Err_put; + } + asd_ha->pcidev = dev; + asd_ha->sas_ha.dev = &asd_ha->pcidev->dev; + asd_ha->sas_ha.lldd_ha = asd_ha; + + asd_ha->bios_status = FLASH_OK; + asd_ha->name = asd_dev->name; + asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev)); + + SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha; + asd_ha->sas_ha.shost = shost; + shost->transportt = aic94xx_transport_template; + shost->max_id = ~0; + shost->max_lun = ~0; + shost->max_cmd_len = 16; + + err = scsi_add_host(shost, &dev->dev); + if (err) + goto Err_free; + + err = asd_dev->setup(asd_ha); + if (err) + goto Err_remove; + + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); + if (err) { + err = -ENODEV; + asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); + goto Err_remove; + } + + pci_set_drvdata(dev, asd_ha); + + err = asd_map_ha(asd_ha); + if (err) + goto Err_remove; + + err = asd_create_ha_caches(asd_ha); + if (err) + goto Err_unmap; + + err = asd_init_hw(asd_ha); + if (err) + goto Err_free_cache; + + asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled " + "phys, flash %s, BIOS %s%d\n", + pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr), + asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys, + asd_ha->hw_prof.num_phys, + asd_ha->hw_prof.flash.present ? "present" : "not present", + asd_ha->hw_prof.bios.present ? "build " : "not present", + asd_ha->hw_prof.bios.bld); + + shost->can_queue = asd_ha->seq.can_queue; + + if (use_msi) + pci_enable_msi(asd_ha->pcidev); + + err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, IRQF_SHARED, + ASD_DRIVER_NAME, asd_ha); + if (err) { + asd_printk("couldn't get irq %d for %s\n", + asd_ha->pcidev->irq, pci_name(asd_ha->pcidev)); + goto Err_irq; + } + asd_enable_ints(asd_ha); + + err = asd_init_post_escbs(asd_ha); + if (err) { + asd_printk("couldn't post escbs for %s\n", + pci_name(asd_ha->pcidev)); + goto Err_escbs; + } + ASD_DPRINTK("escbs posted\n"); + + err = asd_create_dev_attrs(asd_ha); + if (err) + goto Err_dev_attrs; + + err = asd_register_sas_ha(asd_ha); + if (err) + goto Err_reg_sas; + + scsi_scan_host(shost); + + return 0; + +Err_reg_sas: + asd_remove_dev_attrs(asd_ha); +Err_dev_attrs: +Err_escbs: + asd_disable_ints(asd_ha); + free_irq(dev->irq, asd_ha); +Err_irq: + if (use_msi) + pci_disable_msi(dev); + asd_chip_hardrst(asd_ha); +Err_free_cache: + asd_destroy_ha_caches(asd_ha); +Err_unmap: + asd_unmap_ha(asd_ha); +Err_remove: + scsi_remove_host(shost); +Err_free: + kfree(asd_ha); +Err_put: + scsi_host_put(shost); +Err: + pci_disable_device(dev); + return err; +} + +static void asd_free_queues(struct asd_ha_struct *asd_ha) +{ + unsigned long flags; + LIST_HEAD(pending); + struct list_head *n, *pos; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_ha->seq.pending = 0; + list_splice_init(&asd_ha->seq.pend_q, &pending); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + if (!list_empty(&pending)) + ASD_DPRINTK("Uh-oh! Pending is not empty!\n"); + + list_for_each_safe(pos, n, &pending) { + struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list); + /* + * Delete unexpired ascb timers. This may happen if we issue + * a CONTROL PHY scb to an adapter and rmmod before the scb + * times out. Apparently we don't wait for the CONTROL PHY + * to complete, so it doesn't matter if we kill the timer. + */ + del_timer_sync(&ascb->timer); + WARN_ON(ascb->scb->header.opcode != CONTROL_PHY); + + list_del_init(pos); + ASD_DPRINTK("freeing from pending\n"); + asd_ascb_free(ascb); + } +} + +static void asd_turn_off_leds(struct asd_ha_struct *asd_ha) +{ + u8 phy_mask = asd_ha->hw_prof.enabled_phys; + u8 i; + + for_each_phy(phy_mask, phy_mask, i) { + asd_turn_led(asd_ha, i, 0); + asd_control_led(asd_ha, i, 0); + } +} + +static void asd_pci_remove(struct pci_dev *dev) +{ + struct asd_ha_struct *asd_ha = pci_get_drvdata(dev); + + if (!asd_ha) + return; + + asd_unregister_sas_ha(asd_ha); + + asd_disable_ints(asd_ha); + + asd_remove_dev_attrs(asd_ha); + + /* XXX more here as needed */ + + free_irq(dev->irq, asd_ha); + if (use_msi) + pci_disable_msi(asd_ha->pcidev); + asd_turn_off_leds(asd_ha); + asd_chip_hardrst(asd_ha); + asd_free_queues(asd_ha); + asd_destroy_ha_caches(asd_ha); + asd_unmap_ha(asd_ha); + kfree(asd_ha); + pci_disable_device(dev); + return; +} + +static void asd_scan_start(struct Scsi_Host *shost) +{ + struct asd_ha_struct *asd_ha; + int err; + + asd_ha = SHOST_TO_SAS_HA(shost)->lldd_ha; + err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); + if (err) + asd_printk("Couldn't enable phys, err:%d\n", err); +} + +static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + sas_drain_work(SHOST_TO_SAS_HA(shost)); + return 1; +} + +static ssize_t version_show(struct device_driver *driver, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); +} +static DRIVER_ATTR_RO(version); + +static int asd_create_driver_attrs(struct device_driver *driver) +{ + return driver_create_file(driver, &driver_attr_version); +} + +static void asd_remove_driver_attrs(struct device_driver *driver) +{ + driver_remove_file(driver, &driver_attr_version); +} + +static struct sas_domain_function_template aic94xx_transport_functions = { + .lldd_dev_found = asd_dev_found, + .lldd_dev_gone = asd_dev_gone, + + .lldd_execute_task = asd_execute_task, + + .lldd_abort_task = asd_abort_task, + .lldd_abort_task_set = asd_abort_task_set, + .lldd_clear_task_set = asd_clear_task_set, + .lldd_I_T_nexus_reset = asd_I_T_nexus_reset, + .lldd_lu_reset = asd_lu_reset, + .lldd_query_task = asd_query_task, + + .lldd_clear_nexus_port = asd_clear_nexus_port, + .lldd_clear_nexus_ha = asd_clear_nexus_ha, + + .lldd_control_phy = asd_control_phy, + + .lldd_ata_set_dmamode = asd_set_dmamode, +}; + +static const struct pci_device_id aic94xx_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41E),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41F),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x430),0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x432),0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43E),0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43F),0, 0, 2}, + {} +}; + +MODULE_DEVICE_TABLE(pci, aic94xx_pci_table); + +static struct pci_driver aic94xx_pci_driver = { + .name = ASD_DRIVER_NAME, + .id_table = aic94xx_pci_table, + .probe = asd_pci_probe, + .remove = asd_pci_remove, +}; + +static int __init aic94xx_init(void) +{ + int err; + + + asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION, + ASD_DRIVER_VERSION); + + err = asd_create_global_caches(); + if (err) + return err; + + aic94xx_transport_template = + sas_domain_attach_transport(&aic94xx_transport_functions); + if (!aic94xx_transport_template) { + err = -ENOMEM; + goto out_destroy_caches; + } + + err = pci_register_driver(&aic94xx_pci_driver); + if (err) + goto out_release_transport; + + err = asd_create_driver_attrs(&aic94xx_pci_driver.driver); + if (err) + goto out_unregister_pcidrv; + + return err; + + out_unregister_pcidrv: + pci_unregister_driver(&aic94xx_pci_driver); + out_release_transport: + sas_release_transport(aic94xx_transport_template); + out_destroy_caches: + asd_destroy_global_caches(); + + return err; +} + +static void __exit aic94xx_exit(void) +{ + asd_remove_driver_attrs(&aic94xx_pci_driver.driver); + pci_unregister_driver(&aic94xx_pci_driver); + sas_release_transport(aic94xx_transport_template); + asd_release_firmware(); + asd_destroy_global_caches(); + asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION, + ASD_DRIVER_VERSION); +} + +module_init(aic94xx_init); +module_exit(aic94xx_exit); + +MODULE_AUTHOR("Luben Tuikov "); +MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(ASD_DRIVER_VERSION); diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c new file mode 100644 index 000000000..392499e80 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA driver register access. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include "aic94xx_reg.h" +#include "aic94xx.h" + +/* Writing to device address space. + * Offset comes before value to remind that the operation of + * this function is *offs = val. + */ +static void asd_write_byte(struct asd_ha_struct *asd_ha, + unsigned long offs, u8 val) +{ + if (unlikely(asd_ha->iospace)) + outb(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writeb(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +static void asd_write_word(struct asd_ha_struct *asd_ha, + unsigned long offs, u16 val) +{ + if (unlikely(asd_ha->iospace)) + outw(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writew(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +static void asd_write_dword(struct asd_ha_struct *asd_ha, + unsigned long offs, u32 val) +{ + if (unlikely(asd_ha->iospace)) + outl(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writel(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +/* Reading from device address space. + */ +static u8 asd_read_byte(struct asd_ha_struct *asd_ha, unsigned long offs) +{ + u8 val; + if (unlikely(asd_ha->iospace)) + val = inb((unsigned long) asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readb(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static u16 asd_read_word(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u16 val; + if (unlikely(asd_ha->iospace)) + val = inw((unsigned long)asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readw(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static u32 asd_read_dword(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u32 val; + if (unlikely(asd_ha->iospace)) + val = inl((unsigned long) asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readl(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static inline u32 asd_mem_offs_swa(void) +{ + return 0; +} + +static inline u32 asd_mem_offs_swc(void) +{ + return asd_mem_offs_swa() + MBAR0_SWA_SIZE; +} + +static inline u32 asd_mem_offs_swb(void) +{ + return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20; +} + +/* We know that the register wanted is in the range + * of the sliding window. + */ +#define ASD_READ_SW(ww, type, ord) \ +static type asd_read_##ww##_##ord(struct asd_ha_struct *asd_ha, \ + u32 reg) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ + u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\ + return asd_read_##ord(asd_ha, (unsigned long)map_offs); \ +} + +#define ASD_WRITE_SW(ww, type, ord) \ +static void asd_write_##ww##_##ord(struct asd_ha_struct *asd_ha, \ + u32 reg, type val) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ + u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\ + asd_write_##ord(asd_ha, (unsigned long)map_offs, val); \ +} + +ASD_READ_SW(swa, u8, byte); +ASD_READ_SW(swa, u16, word); +ASD_READ_SW(swa, u32, dword); + +ASD_READ_SW(swb, u8, byte); +ASD_READ_SW(swb, u16, word); +ASD_READ_SW(swb, u32, dword); + +ASD_READ_SW(swc, u8, byte); +ASD_READ_SW(swc, u16, word); +ASD_READ_SW(swc, u32, dword); + +ASD_WRITE_SW(swa, u8, byte); +ASD_WRITE_SW(swa, u16, word); +ASD_WRITE_SW(swa, u32, dword); + +ASD_WRITE_SW(swb, u8, byte); +ASD_WRITE_SW(swb, u16, word); +ASD_WRITE_SW(swb, u32, dword); + +ASD_WRITE_SW(swc, u8, byte); +ASD_WRITE_SW(swc, u16, word); +ASD_WRITE_SW(swc, u32, dword); + +/* + * A word about sliding windows: + * MBAR0 is divided into sliding windows A, C and B, in that order. + * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes. + * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes. + * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F. + * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0. + * See asd_init_sw() in aic94xx_hwi.c + * + * We map the most common registers we'd access of the internal 4GB + * host adapter memory space. If a register/internal memory location + * is wanted which is not mapped, we slide SWB, by paging it, + * see asd_move_swb() in aic94xx_reg.c. + */ + +/** + * asd_move_swb -- move sliding window B + * @asd_ha: pointer to host adapter structure + * @reg: register desired to be within range of the new window + */ +static void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg) +{ + u32 base = reg & ~(MBAR0_SWB_SIZE-1); + pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base); + asd_ha->io_handle[0].swb_base = base; +} + +static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val) +{ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); + if (io_handle->swa_base <= reg + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) + asd_write_swa_byte (asd_ha, reg,val); + else if (io_handle->swb_base <= reg + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) + asd_write_swb_byte (asd_ha, reg, val); + else if (io_handle->swc_base <= reg + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) + asd_write_swc_byte (asd_ha, reg, val); + else { + /* Ok, we have to move SWB */ + asd_move_swb(asd_ha, reg); + asd_write_swb_byte (asd_ha, reg, val); + } +} + +#define ASD_WRITE_REG(type, ord) \ +void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\ +{ \ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ + unsigned long flags; \ + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ + spin_lock_irqsave(&asd_ha->iolock, flags); \ + if (io_handle->swa_base <= reg \ + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ + asd_write_swa_##ord (asd_ha, reg,val); \ + else if (io_handle->swb_base <= reg \ + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ + asd_write_swb_##ord (asd_ha, reg, val); \ + else if (io_handle->swc_base <= reg \ + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ + asd_write_swc_##ord (asd_ha, reg, val); \ + else { \ + /* Ok, we have to move SWB */ \ + asd_move_swb(asd_ha, reg); \ + asd_write_swb_##ord (asd_ha, reg, val); \ + } \ + spin_unlock_irqrestore(&asd_ha->iolock, flags); \ +} + +ASD_WRITE_REG(u8, byte); +ASD_WRITE_REG(u16,word); +ASD_WRITE_REG(u32,dword); + +static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg) +{ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; + u8 val; + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); + if (io_handle->swa_base <= reg + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) + val = asd_read_swa_byte (asd_ha, reg); + else if (io_handle->swb_base <= reg + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) + val = asd_read_swb_byte (asd_ha, reg); + else if (io_handle->swc_base <= reg + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) + val = asd_read_swc_byte (asd_ha, reg); + else { + /* Ok, we have to move SWB */ + asd_move_swb(asd_ha, reg); + val = asd_read_swb_byte (asd_ha, reg); + } + return val; +} + +#define ASD_READ_REG(type, ord) \ +type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \ +{ \ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ + type val; \ + unsigned long flags; \ + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ + spin_lock_irqsave(&asd_ha->iolock, flags); \ + if (io_handle->swa_base <= reg \ + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ + val = asd_read_swa_##ord (asd_ha, reg); \ + else if (io_handle->swb_base <= reg \ + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ + val = asd_read_swb_##ord (asd_ha, reg); \ + else if (io_handle->swc_base <= reg \ + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ + val = asd_read_swc_##ord (asd_ha, reg); \ + else { \ + /* Ok, we have to move SWB */ \ + asd_move_swb(asd_ha, reg); \ + val = asd_read_swb_##ord (asd_ha, reg); \ + } \ + spin_unlock_irqrestore(&asd_ha->iolock, flags); \ + return val; \ +} + +ASD_READ_REG(u8, byte); +ASD_READ_REG(u16,word); +ASD_READ_REG(u32,dword); + +/** + * asd_read_reg_string -- read a string of bytes from io space memory + * @asd_ha: pointer to host adapter structure + * @dst: pointer to a destination buffer where data will be written to + * @offs: start offset (register) to read from + * @count: number of bytes to read + */ +void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst, + u32 offs, int count) +{ + u8 *p = dst; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->iolock, flags); + for ( ; count > 0; count--, offs++, p++) + *p = __asd_read_reg_byte(asd_ha, offs); + spin_unlock_irqrestore(&asd_ha->iolock, flags); +} + +/** + * asd_write_reg_string -- write a string of bytes to io space memory + * @asd_ha: pointer to host adapter structure + * @src: pointer to source buffer where data will be read from + * @offs: start offset (register) to write to + * @count: number of bytes to write + */ +void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src, + u32 offs, int count) +{ + u8 *p = src; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->iolock, flags); + for ( ; count > 0; count--, offs++, p++) + __asd_write_reg_byte(asd_ha, offs, *p); + spin_unlock_irqrestore(&asd_ha->iolock, flags); +} diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h new file mode 100644 index 000000000..d1c0975f8 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver hardware registers definitions. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#ifndef _AIC94XX_REG_H_ +#define _AIC94XX_REG_H_ + +#include +#include "aic94xx_hwi.h" + +/* Values */ +#define AIC9410_DEV_REV_B0 0x8 + +/* MBAR0, SWA, SWB, SWC, internal memory space addresses */ +#define REG_BASE_ADDR 0xB8000000 +#define REG_BASE_ADDR_CSEQCIO 0xB8002000 +#define REG_BASE_ADDR_EXSI 0xB8042800 + +#define MBAR0_SWA_SIZE 0x58 +extern u32 MBAR0_SWB_SIZE; +#define MBAR0_SWC_SIZE 0x8 + +/* MBAR1, points to On Chip Memory */ +#define OCM_BASE_ADDR 0xA0000000 +#define OCM_MAX_SIZE 0x20000 + +/* Smallest address possible to reference */ +#define ALL_BASE_ADDR OCM_BASE_ADDR + +/* PCI configuration space registers */ +#define PCI_IOBAR_OFFSET 4 + +#define PCI_CONF_MBAR1 0x6C +#define PCI_CONF_MBAR0_SWA 0x70 +#define PCI_CONF_MBAR0_SWB 0x74 +#define PCI_CONF_MBAR0_SWC 0x78 +#define PCI_CONF_MBAR_KEY 0x7C +#define PCI_CONF_FLSH_BAR 0xB8 + +#include "aic94xx_reg_def.h" + +u8 asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg); +u16 asd_read_reg_word(struct asd_ha_struct *asd_ha, u32 reg); +u32 asd_read_reg_dword(struct asd_ha_struct *asd_ha, u32 reg); + +void asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val); +void asd_write_reg_word(struct asd_ha_struct *asd_ha, u32 reg, u16 val); +void asd_write_reg_dword(struct asd_ha_struct *asd_ha, u32 reg, u32 val); + +void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst, + u32 offs, int count); +void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src, + u32 offs, int count); + +#define ASD_READ_OCM(type, ord, S) \ +static inline type asd_read_ocm_##ord (struct asd_ha_struct *asd_ha, \ + u32 offs) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \ + type val = read##S (io_handle->addr + (unsigned long) offs); \ + rmb(); \ + return val; \ +} + +ASD_READ_OCM(u8, byte, b); +ASD_READ_OCM(u16,word, w); +ASD_READ_OCM(u32,dword,l); + +#define ASD_WRITE_OCM(type, ord, S) \ +static inline void asd_write_ocm_##ord (struct asd_ha_struct *asd_ha, \ + u32 offs, type val) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \ + write##S (val, io_handle->addr + (unsigned long) offs); \ + return; \ +} + +ASD_WRITE_OCM(u8, byte, b); +ASD_WRITE_OCM(u16,word, w); +ASD_WRITE_OCM(u32,dword,l); + +#define ASD_DDBSITE_READ(type, ord) \ +static inline type asd_ddbsite_read_##ord (struct asd_ha_struct *asd_ha, \ + u16 ddb_site_no, \ + u16 offs) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \ + asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \ + return asd_read_reg_##ord (asd_ha, CTXACCESS); \ +} + +ASD_DDBSITE_READ(u32, dword); +ASD_DDBSITE_READ(u16, word); + +static inline u8 asd_ddbsite_read_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, + u16 offs) +{ + if (offs & 1) + return asd_ddbsite_read_word(asd_ha, ddb_site_no, + offs & ~1) >> 8; + else + return asd_ddbsite_read_word(asd_ha, ddb_site_no, + offs) & 0xFF; +} + + +#define ASD_DDBSITE_WRITE(type, ord) \ +static inline void asd_ddbsite_write_##ord (struct asd_ha_struct *asd_ha, \ + u16 ddb_site_no, \ + u16 offs, type val) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \ + asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \ + asd_write_reg_##ord (asd_ha, CTXACCESS, val); \ +} + +ASD_DDBSITE_WRITE(u32, dword); +ASD_DDBSITE_WRITE(u16, word); + +static inline void asd_ddbsite_write_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, + u16 offs, u8 val) +{ + u16 base = offs & ~1; + u16 rval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base); + if (offs & 1) + rval = (val << 8) | (rval & 0xFF); + else + rval = (rval & 0xFF00) | val; + asd_ddbsite_write_word(asd_ha, ddb_site_no, base, rval); +} + + +#define ASD_SCBSITE_READ(type, ord) \ +static inline type asd_scbsite_read_##ord (struct asd_ha_struct *asd_ha, \ + u16 scb_site_no, \ + u16 offs) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \ + asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \ + return asd_read_reg_##ord (asd_ha, CTXACCESS); \ +} + +ASD_SCBSITE_READ(u32, dword); +ASD_SCBSITE_READ(u16, word); + +static inline u8 asd_scbsite_read_byte(struct asd_ha_struct *asd_ha, + u16 scb_site_no, + u16 offs) +{ + if (offs & 1) + return asd_scbsite_read_word(asd_ha, scb_site_no, + offs & ~1) >> 8; + else + return asd_scbsite_read_word(asd_ha, scb_site_no, + offs) & 0xFF; +} + + +#define ASD_SCBSITE_WRITE(type, ord) \ +static inline void asd_scbsite_write_##ord (struct asd_ha_struct *asd_ha, \ + u16 scb_site_no, \ + u16 offs, type val) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \ + asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \ + asd_write_reg_##ord (asd_ha, CTXACCESS, val); \ +} + +ASD_SCBSITE_WRITE(u32, dword); +ASD_SCBSITE_WRITE(u16, word); + +static inline void asd_scbsite_write_byte(struct asd_ha_struct *asd_ha, + u16 scb_site_no, + u16 offs, u8 val) +{ + u16 base = offs & ~1; + u16 rval = asd_scbsite_read_word(asd_ha, scb_site_no, base); + if (offs & 1) + rval = (val << 8) | (rval & 0xFF); + else + rval = (rval & 0xFF00) | val; + asd_scbsite_write_word(asd_ha, scb_site_no, base, rval); +} + +/** + * asd_ddbsite_update_word -- atomically update a word in a ddb site + * @asd_ha: pointer to host adapter structure + * @ddb_site_no: the DDB site number + * @offs: the offset into the DDB + * @oldval: old value found in that offset + * @newval: the new value to replace it + * + * This function is used when the sequencers are running and we need to + * update a DDB site atomically without expensive pausing and upausing + * of the sequencers and accessing the DDB site through the CIO bus. + * + * Return 0 on success; -EFAULT on parity error; -EAGAIN if the old value + * is different than the current value at that offset. + */ +static inline int asd_ddbsite_update_word(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, u16 offs, + u16 oldval, u16 newval) +{ + u8 done; + u16 oval = asd_ddbsite_read_word(asd_ha, ddb_site_no, offs); + if (oval != oldval) + return -EAGAIN; + asd_write_reg_word(asd_ha, AOLDDATA, oldval); + asd_write_reg_word(asd_ha, ANEWDATA, newval); + do { + done = asd_read_reg_byte(asd_ha, ATOMICSTATCTL); + } while (!(done & ATOMICDONE)); + if (done & ATOMICERR) + return -EFAULT; /* parity error */ + else if (done & ATOMICWIN) + return 0; /* success */ + else + return -EAGAIN; /* oldval different than current value */ +} + +static inline int asd_ddbsite_update_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, u16 offs, + u8 _oldval, u8 _newval) +{ + u16 base = offs & ~1; + u16 oval; + u16 nval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base); + if (offs & 1) { + if ((nval >> 8) != _oldval) + return -EAGAIN; + nval = (_newval << 8) | (nval & 0xFF); + oval = (_oldval << 8) | (nval & 0xFF); + } else { + if ((nval & 0xFF) != _oldval) + return -EAGAIN; + nval = (nval & 0xFF00) | _newval; + oval = (nval & 0xFF00) | _oldval; + } + return asd_ddbsite_update_word(asd_ha, ddb_site_no, base, oval, nval); +} + +static inline void asd_write_reg_addr(struct asd_ha_struct *asd_ha, u32 reg, + dma_addr_t dma_handle) +{ + asd_write_reg_dword(asd_ha, reg, ASD_BUSADDR_LO(dma_handle)); + asd_write_reg_dword(asd_ha, reg+4, ASD_BUSADDR_HI(dma_handle)); +} + +static inline u32 asd_get_cmdctx_size(struct asd_ha_struct *asd_ha) +{ + /* DCHREVISION returns 0, possibly broken */ + u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE; + return ctxmemsize ? 65536 : 32768; +} + +static inline u32 asd_get_devctx_size(struct asd_ha_struct *asd_ha) +{ + u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE; + return ctxmemsize ? 8192 : 4096; +} + +static inline void asd_disable_ints(struct asd_ha_struct *asd_ha) +{ + asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); +} + +static inline void asd_enable_ints(struct asd_ha_struct *asd_ha) +{ + /* Enable COM SAS interrupt on errors, COMSTAT */ + asd_write_reg_dword(asd_ha, COMSTATEN, + EN_CSBUFPERR | EN_CSERR | EN_OVLYERR); + /* Enable DCH SAS CFIFTOERR */ + asd_write_reg_dword(asd_ha, DCHSTATUS, EN_CFIFTOERR); + /* Enable Host Device interrupts */ + asd_write_reg_dword(asd_ha, CHIMINTEN, SET_CHIMINTEN); +} + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h new file mode 100644 index 000000000..b96cfc33b --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h @@ -0,0 +1,2381 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver hardware registers definitions. + * + * Copyright (C) 2004 Adaptec, Inc. All rights reserved. + * Copyright (C) 2004 David Chaw + * Copyright (C) 2005 Luben Tuikov + * + * Luben Tuikov: Some register value updates to make it work with the window + * agnostic register r/w functions. Some register corrections, sizes, + * etc. + * + * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $ + */ + +#ifndef _ADP94XX_REG_DEF_H_ +#define _ADP94XX_REG_DEF_H_ + +/* + * Common definitions. + */ +#define CSEQ_MODE_PAGE_SIZE 0x200 /* CSEQ mode page size */ +#define LmSEQ_MODE_PAGE_SIZE 0x200 /* LmSEQ mode page size */ +#define LmSEQ_HOST_REG_SIZE 0x4000 /* LmSEQ Host Register size */ + +/********************* COM_SAS registers definition *************************/ + +/* The base is REG_BASE_ADDR, defined in aic94xx_reg.h. + */ + +/* + * CHIM Registers, Address Range : (0x00-0xFF) + */ +#define COMBIST (REG_BASE_ADDR + 0x00) + +/* bits 31:24 */ +#define L7BLKRST 0x80000000 +#define L6BLKRST 0x40000000 +#define L5BLKRST 0x20000000 +#define L4BLKRST 0x10000000 +#define L3BLKRST 0x08000000 +#define L2BLKRST 0x04000000 +#define L1BLKRST 0x02000000 +#define L0BLKRST 0x01000000 +#define LmBLKRST 0xFF000000 +#define LmBLKRST_COMBIST(phyid) (1 << (24 + phyid)) + +#define OCMBLKRST 0x00400000 +#define CTXMEMBLKRST 0x00200000 +#define CSEQBLKRST 0x00100000 +#define EXSIBLKRST 0x00040000 +#define DPIBLKRST 0x00020000 +#define DFIFBLKRST 0x00010000 +#define HARDRST 0x00000200 +#define COMBLKRST 0x00000100 +#define FRCDFPERR 0x00000080 +#define FRCCIOPERR 0x00000020 +#define FRCBISTERR 0x00000010 +#define COMBISTEN 0x00000004 +#define COMBISTDONE 0x00000002 /* ro */ +#define COMBISTFAIL 0x00000001 /* ro */ + +#define COMSTAT (REG_BASE_ADDR + 0x04) + +#define REQMBXREAD 0x00000040 +#define RSPMBXAVAIL 0x00000020 +#define CSBUFPERR 0x00000008 +#define OVLYERR 0x00000004 +#define CSERR 0x00000002 +#define OVLYDMADONE 0x00000001 + +#define COMSTAT_MASK (REQMBXREAD | RSPMBXAVAIL | \ + CSBUFPERR | OVLYERR | CSERR |\ + OVLYDMADONE) + +#define COMSTATEN (REG_BASE_ADDR + 0x08) + +#define EN_REQMBXREAD 0x00000040 +#define EN_RSPMBXAVAIL 0x00000020 +#define EN_CSBUFPERR 0x00000008 +#define EN_OVLYERR 0x00000004 +#define EN_CSERR 0x00000002 +#define EN_OVLYDONE 0x00000001 + +#define SCBPRO (REG_BASE_ADDR + 0x0C) + +#define SCBCONS_MASK 0xFFFF0000 +#define SCBPRO_MASK 0x0000FFFF + +#define CHIMREQMBX (REG_BASE_ADDR + 0x10) + +#define CHIMRSPMBX (REG_BASE_ADDR + 0x14) + +#define CHIMINT (REG_BASE_ADDR + 0x18) + +#define EXT_INT0 0x00000800 +#define EXT_INT1 0x00000400 +#define PORRSTDET 0x00000200 +#define HARDRSTDET 0x00000100 +#define DLAVAILQ 0x00000080 /* ro */ +#define HOSTERR 0x00000040 +#define INITERR 0x00000020 +#define DEVINT 0x00000010 +#define COMINT 0x00000008 +#define DEVTIMER2 0x00000004 +#define DEVTIMER1 0x00000002 +#define DLAVAIL 0x00000001 + +#define CHIMINT_MASK (HOSTERR | INITERR | DEVINT | COMINT |\ + DEVTIMER2 | DEVTIMER1 | DLAVAIL) + +#define DEVEXCEPT_MASK (HOSTERR | INITERR | DEVINT | COMINT) + +#define CHIMINTEN (REG_BASE_ADDR + 0x1C) + +#define RST_EN_EXT_INT1 0x01000000 +#define RST_EN_EXT_INT0 0x00800000 +#define RST_EN_HOSTERR 0x00400000 +#define RST_EN_INITERR 0x00200000 +#define RST_EN_DEVINT 0x00100000 +#define RST_EN_COMINT 0x00080000 +#define RST_EN_DEVTIMER2 0x00040000 +#define RST_EN_DEVTIMER1 0x00020000 +#define RST_EN_DLAVAIL 0x00010000 +#define SET_EN_EXT_INT1 0x00000100 +#define SET_EN_EXT_INT0 0x00000080 +#define SET_EN_HOSTERR 0x00000040 +#define SET_EN_INITERR 0x00000020 +#define SET_EN_DEVINT 0x00000010 +#define SET_EN_COMINT 0x00000008 +#define SET_EN_DEVTIMER2 0x00000004 +#define SET_EN_DEVTIMER1 0x00000002 +#define SET_EN_DLAVAIL 0x00000001 + +#define RST_CHIMINTEN (RST_EN_HOSTERR | RST_EN_INITERR | \ + RST_EN_DEVINT | RST_EN_COMINT | \ + RST_EN_DEVTIMER2 | RST_EN_DEVTIMER1 |\ + RST_EN_DLAVAIL) + +#define SET_CHIMINTEN (SET_EN_HOSTERR | SET_EN_INITERR |\ + SET_EN_DEVINT | SET_EN_COMINT |\ + SET_EN_DLAVAIL) + +#define OVLYDMACTL (REG_BASE_ADDR + 0x20) + +#define OVLYADR_MASK 0x07FF0000 +#define OVLYLSEQ_MASK 0x0000FF00 +#define OVLYCSEQ 0x00000080 +#define OVLYHALTERR 0x00000040 +#define PIOCMODE 0x00000020 +#define RESETOVLYDMA 0x00000008 /* wo */ +#define STARTOVLYDMA 0x00000004 +#define STOPOVLYDMA 0x00000002 /* wo */ +#define OVLYDMAACT 0x00000001 /* ro */ + +#define OVLYDMACNT (REG_BASE_ADDR + 0x24) + +#define OVLYDOMAIN1 0x20000000 /* ro */ +#define OVLYDOMAIN0 0x10000000 +#define OVLYBUFADR_MASK 0x007F0000 +#define OVLYDMACNT_MASK 0x00003FFF + +#define OVLYDMAADR (REG_BASE_ADDR + 0x28) + +#define DMAERR (REG_BASE_ADDR + 0x30) + +#define OVLYERRSTAT_MASK 0x0000FF00 /* ro */ +#define CSERRSTAT_MASK 0x000000FF /* ro */ + +#define SPIODATA (REG_BASE_ADDR + 0x34) + +/* 0x38 - 0x3C are reserved */ + +#define T1CNTRLR (REG_BASE_ADDR + 0x40) + +#define T1DONE 0x00010000 /* ro */ +#define TIMER64 0x00000400 +#define T1ENABLE 0x00000200 +#define T1RELOAD 0x00000100 +#define T1PRESCALER_MASK 0x00000003 + +#define T1CMPR (REG_BASE_ADDR + 0x44) + +#define T1CNTR (REG_BASE_ADDR + 0x48) + +#define T2CNTRLR (REG_BASE_ADDR + 0x4C) + +#define T2DONE 0x00010000 /* ro */ +#define T2ENABLE 0x00000200 +#define T2RELOAD 0x00000100 +#define T2PRESCALER_MASK 0x00000003 + +#define T2CMPR (REG_BASE_ADDR + 0x50) + +#define T2CNTR (REG_BASE_ADDR + 0x54) + +/* 0x58h - 0xFCh are reserved */ + +/* + * DCH_SAS Registers, Address Range : (0x800-0xFFF) + */ +#define CMDCTXBASE (REG_BASE_ADDR + 0x800) + +#define DEVCTXBASE (REG_BASE_ADDR + 0x808) + +#define CTXDOMAIN (REG_BASE_ADDR + 0x810) + +#define DEVCTXDOMAIN1 0x00000008 /* ro */ +#define DEVCTXDOMAIN0 0x00000004 +#define CMDCTXDOMAIN1 0x00000002 /* ro */ +#define CMDCTXDOMAIN0 0x00000001 + +#define DCHCTL (REG_BASE_ADDR + 0x814) + +#define OCMBISTREPAIR 0x00080000 +#define OCMBISTEN 0x00040000 +#define OCMBISTDN 0x00020000 /* ro */ +#define OCMBISTFAIL 0x00010000 /* ro */ +#define DDBBISTEN 0x00004000 +#define DDBBISTDN 0x00002000 /* ro */ +#define DDBBISTFAIL 0x00001000 /* ro */ +#define SCBBISTEN 0x00000400 +#define SCBBISTDN 0x00000200 /* ro */ +#define SCBBISTFAIL 0x00000100 /* ro */ + +#define MEMSEL_MASK 0x000000E0 +#define MEMSEL_CCM_LSEQ 0x00000000 +#define MEMSEL_CCM_IOP 0x00000020 +#define MEMSEL_CCM_SASCTL 0x00000040 +#define MEMSEL_DCM_LSEQ 0x00000060 +#define MEMSEL_DCM_IOP 0x00000080 +#define MEMSEL_OCM 0x000000A0 + +#define FRCERR 0x00000010 +#define AUTORLS 0x00000001 + +#define DCHREVISION (REG_BASE_ADDR + 0x818) + +#define DCHREVISION_MASK 0x000000FF + +#define DCHSTATUS (REG_BASE_ADDR + 0x81C) + +#define EN_CFIFTOERR 0x00020000 +#define CFIFTOERR 0x00000200 +#define CSEQINT 0x00000100 /* ro */ +#define LSEQ7INT 0x00000080 /* ro */ +#define LSEQ6INT 0x00000040 /* ro */ +#define LSEQ5INT 0x00000020 /* ro */ +#define LSEQ4INT 0x00000010 /* ro */ +#define LSEQ3INT 0x00000008 /* ro */ +#define LSEQ2INT 0x00000004 /* ro */ +#define LSEQ1INT 0x00000002 /* ro */ +#define LSEQ0INT 0x00000001 /* ro */ + +#define LSEQINT_MASK (LSEQ7INT | LSEQ6INT | LSEQ5INT |\ + LSEQ4INT | LSEQ3INT | LSEQ2INT |\ + LSEQ1INT | LSEQ0INT) + +#define DCHDFIFDEBUG (REG_BASE_ADDR + 0x820) +#define ENFAIRMST 0x00FF0000 +#define DISWRMST9 0x00000200 +#define DISWRMST8 0x00000100 +#define DISRDMST 0x000000FF + +#define ATOMICSTATCTL (REG_BASE_ADDR + 0x824) +/* 8 bit wide */ +#define AUTOINC 0x80 +#define ATOMICERR 0x04 +#define ATOMICWIN 0x02 +#define ATOMICDONE 0x01 + + +#define ALTCIOADR (REG_BASE_ADDR + 0x828) +/* 16 bit; bits 8:0 define CIO addr space of CSEQ */ + +#define ASCBPTR (REG_BASE_ADDR + 0x82C) +/* 16 bit wide */ + +#define ADDBPTR (REG_BASE_ADDR + 0x82E) +/* 16 bit wide */ + +#define ANEWDATA (REG_BASE_ADDR + 0x830) +/* 16 bit */ + +#define AOLDDATA (REG_BASE_ADDR + 0x834) +/* 16 bit */ + +#define CTXACCESS (REG_BASE_ADDR + 0x838) +/* 32 bit */ + +/* 0x83Ch - 0xFFCh are reserved */ + +/* + * ARP2 External Processor Registers, Address Range : (0x00-0x1F) + */ +#define ARP2CTL 0x00 + +#define FRCSCRPERR 0x00040000 +#define FRCARP2PERR 0x00020000 +#define FRCARP2ILLOPC 0x00010000 +#define ENWAITTO 0x00008000 +#define PERRORDIS 0x00004000 +#define FAILDIS 0x00002000 +#define CIOPERRDIS 0x00001000 +#define BREAKEN3 0x00000800 +#define BREAKEN2 0x00000400 +#define BREAKEN1 0x00000200 +#define BREAKEN0 0x00000100 +#define EPAUSE 0x00000008 +#define PAUSED 0x00000004 /* ro */ +#define STEP 0x00000002 +#define ARP2RESET 0x00000001 /* wo */ + +#define ARP2INT 0x04 + +#define HALTCODE_MASK 0x00FF0000 /* ro */ +#define ARP2WAITTO 0x00000100 +#define ARP2HALTC 0x00000080 +#define ARP2ILLOPC 0x00000040 +#define ARP2PERR 0x00000020 +#define ARP2CIOPERR 0x00000010 +#define ARP2BREAK3 0x00000008 +#define ARP2BREAK2 0x00000004 +#define ARP2BREAK1 0x00000002 +#define ARP2BREAK0 0x00000001 + +#define ARP2INTEN 0x08 + +#define EN_ARP2WAITTO 0x00000100 +#define EN_ARP2HALTC 0x00000080 +#define EN_ARP2ILLOPC 0x00000040 +#define EN_ARP2PERR 0x00000020 +#define EN_ARP2CIOPERR 0x00000010 +#define EN_ARP2BREAK3 0x00000008 +#define EN_ARP2BREAK2 0x00000004 +#define EN_ARP2BREAK1 0x00000002 +#define EN_ARP2BREAK0 0x00000001 + +#define ARP2BREAKADR01 0x0C + +#define BREAKADR1_MASK 0x0FFF0000 +#define BREAKADR0_MASK 0x00000FFF + +#define ARP2BREAKADR23 0x10 + +#define BREAKADR3_MASK 0x0FFF0000 +#define BREAKADR2_MASK 0x00000FFF + +/* 0x14h - 0x1Ch are reserved */ + +/* + * ARP2 Registers, Address Range : (0x00-0x1F) + * The definitions have the same address offset for CSEQ and LmSEQ + * CIO Bus Registers. + */ +#define MODEPTR 0x00 + +#define DSTMODE 0xF0 +#define SRCMODE 0x0F + +#define ALTMODE 0x01 + +#define ALTDMODE 0xF0 +#define ALTSMODE 0x0F + +#define ATOMICXCHG 0x02 + +#define FLAG 0x04 + +#define INTCODE_MASK 0xF0 +#define ALTMODEV2 0x04 +#define CARRY_INT 0x02 +#define CARRY 0x01 + +#define ARP2INTCTL 0x05 + +#define PAUSEDIS 0x80 +#define RSTINTCTL 0x40 +#define POPALTMODE 0x08 +#define ALTMODEV 0x04 +#define INTMASK 0x02 +#define IRET 0x01 + +#define STACK 0x06 + +#define FUNCTION1 0x07 + +#define PRGMCNT 0x08 + +#define ACCUM 0x0A + +#define SINDEX 0x0C + +#define DINDEX 0x0E + +#define ALLONES 0x10 + +#define ALLZEROS 0x11 + +#define SINDIR 0x12 + +#define DINDIR 0x13 + +#define JUMLDIR 0x14 + +#define ARP2HALTCODE 0x15 + +#define CURRADDR 0x16 + +#define LASTADDR 0x18 + +#define NXTLADDR 0x1A + +#define DBGPORTPTR 0x1C + +#define DBGPORT 0x1D + +/* + * CIO Registers. + * The definitions have the same address offset for CSEQ and LmSEQ + * CIO Bus Registers. + */ +#define MnSCBPTR 0x20 + +#define MnDDBPTR 0x22 + +#define SCRATCHPAGE 0x24 + +#define MnSCRATCHPAGE 0x25 + +#define SCRATCHPAGESV 0x26 + +#define MnSCRATCHPAGESV 0x27 + +#define MnDMAERRS 0x46 + +#define MnSGDMAERRS 0x47 + +#define MnSGBUF 0x53 + +#define MnSGDMASTAT 0x5b + +#define MnDDMACTL 0x5c /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDDMASTAT 0x5d /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDDMAMODE 0x5e /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDMAENG 0x60 + +#define MnPIPECTL 0x61 + +#define MnSGBADR 0x65 + +#define MnSCB_SITE 0x100 + +#define MnDDB_SITE 0x180 + +/* + * The common definitions below have the same address offset for both + * CSEQ and LmSEQ. + */ +#define BISTCTL0 0x4C + +#define BISTCTL1 0x50 + +#define MAPPEDSCR 0x800 + +/* + * CSEQ Host Register, Address Range : (0x000-0xFFC) + */ +#define CSEQ_HOST_REG_BASE_ADR 0xB8001000 + +#define CARP2CTL (CSEQ_HOST_REG_BASE_ADR + ARP2CTL) + +#define CARP2INT (CSEQ_HOST_REG_BASE_ADR + ARP2INT) + +#define CARP2INTEN (CSEQ_HOST_REG_BASE_ADR + ARP2INTEN) + +#define CARP2BREAKADR01 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR01) + +#define CARP2BREAKADR23 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR23) + +#define CBISTCTL (CSEQ_HOST_REG_BASE_ADR + BISTCTL1) + +#define CSEQRAMBISTEN 0x00000040 +#define CSEQRAMBISTDN 0x00000020 /* ro */ +#define CSEQRAMBISTFAIL 0x00000010 /* ro */ +#define CSEQSCRBISTEN 0x00000004 +#define CSEQSCRBISTDN 0x00000002 /* ro */ +#define CSEQSCRBISTFAIL 0x00000001 /* ro */ + +#define CMAPPEDSCR (CSEQ_HOST_REG_BASE_ADR + MAPPEDSCR) + +/* + * CSEQ CIO Bus Registers, Address Range : (0x0000-0x1FFC) + * 16 modes, each mode is 512 bytes. + * Unless specified, the register should valid for all modes. + */ +#define CSEQ_CIO_REG_BASE_ADR REG_BASE_ADDR_CSEQCIO + +#define CSEQm_CIO_REG(Mode, Reg) \ + (CSEQ_CIO_REG_BASE_ADR + \ + ((u32) (Mode) * CSEQ_MODE_PAGE_SIZE) + (u32) (Reg)) + +#define CMODEPTR (CSEQ_CIO_REG_BASE_ADR + MODEPTR) + +#define CALTMODE (CSEQ_CIO_REG_BASE_ADR + ALTMODE) + +#define CATOMICXCHG (CSEQ_CIO_REG_BASE_ADR + ATOMICXCHG) + +#define CFLAG (CSEQ_CIO_REG_BASE_ADR + FLAG) + +#define CARP2INTCTL (CSEQ_CIO_REG_BASE_ADR + ARP2INTCTL) + +#define CSTACK (CSEQ_CIO_REG_BASE_ADR + STACK) + +#define CFUNCTION1 (CSEQ_CIO_REG_BASE_ADR + FUNCTION1) + +#define CPRGMCNT (CSEQ_CIO_REG_BASE_ADR + PRGMCNT) + +#define CACCUM (CSEQ_CIO_REG_BASE_ADR + ACCUM) + +#define CSINDEX (CSEQ_CIO_REG_BASE_ADR + SINDEX) + +#define CDINDEX (CSEQ_CIO_REG_BASE_ADR + DINDEX) + +#define CALLONES (CSEQ_CIO_REG_BASE_ADR + ALLONES) + +#define CALLZEROS (CSEQ_CIO_REG_BASE_ADR + ALLZEROS) + +#define CSINDIR (CSEQ_CIO_REG_BASE_ADR + SINDIR) + +#define CDINDIR (CSEQ_CIO_REG_BASE_ADR + DINDIR) + +#define CJUMLDIR (CSEQ_CIO_REG_BASE_ADR + JUMLDIR) + +#define CARP2HALTCODE (CSEQ_CIO_REG_BASE_ADR + ARP2HALTCODE) + +#define CCURRADDR (CSEQ_CIO_REG_BASE_ADR + CURRADDR) + +#define CLASTADDR (CSEQ_CIO_REG_BASE_ADR + LASTADDR) + +#define CNXTLADDR (CSEQ_CIO_REG_BASE_ADR + NXTLADDR) + +#define CDBGPORTPTR (CSEQ_CIO_REG_BASE_ADR + DBGPORTPTR) + +#define CDBGPORT (CSEQ_CIO_REG_BASE_ADR + DBGPORT) + +#define CSCRATCHPAGE (CSEQ_CIO_REG_BASE_ADR + SCRATCHPAGE) + +#define CMnSCBPTR(Mode) CSEQm_CIO_REG(Mode, MnSCBPTR) + +#define CMnDDBPTR(Mode) CSEQm_CIO_REG(Mode, MnDDBPTR) + +#define CMnSCRATCHPAGE(Mode) CSEQm_CIO_REG(Mode, MnSCRATCHPAGE) + +#define CLINKCON (CSEQ_CIO_REG_BASE_ADR + 0x28) + +#define CCIOAACESS (CSEQ_CIO_REG_BASE_ADR + 0x2C) + +/* mode 0-7 */ +#define MnREQMBX 0x30 +#define CMnREQMBX(Mode) CSEQm_CIO_REG(Mode, 0x30) + +/* mode 8 */ +#define CSEQCON CSEQm_CIO_REG(8, 0x30) + +/* mode 0-7 */ +#define MnRSPMBX 0x34 +#define CMnRSPMBX(Mode) CSEQm_CIO_REG(Mode, 0x34) + +/* mode 8 */ +#define CSEQCOMCTL CSEQm_CIO_REG(8, 0x34) + +/* mode 8 */ +#define CSEQCOMSTAT CSEQm_CIO_REG(8, 0x35) + +/* mode 8 */ +#define CSEQCOMINTEN CSEQm_CIO_REG(8, 0x36) + +/* mode 8 */ +#define CSEQCOMDMACTL CSEQm_CIO_REG(8, 0x37) + +#define CSHALTERR 0x10 +#define RESETCSDMA 0x08 /* wo */ +#define STARTCSDMA 0x04 +#define STOPCSDMA 0x02 /* wo */ +#define CSDMAACT 0x01 /* ro */ + +/* mode 0-7 */ +#define MnINT 0x38 +#define CMnINT(Mode) CSEQm_CIO_REG(Mode, 0x38) + +#define CMnREQMBXE 0x02 +#define CMnRSPMBXF 0x01 +#define CMnINT_MASK 0x00000003 + +/* mode 8 */ +#define CSEQREQMBX CSEQm_CIO_REG(8, 0x38) + +/* mode 0-7 */ +#define MnINTEN 0x3C +#define CMnINTEN(Mode) CSEQm_CIO_REG(Mode, 0x3C) + +#define EN_CMnRSPMBXF 0x01 + +/* mode 8 */ +#define CSEQRSPMBX CSEQm_CIO_REG(8, 0x3C) + +/* mode 8 */ +#define CSDMAADR CSEQm_CIO_REG(8, 0x40) + +/* mode 8 */ +#define CSDMACNT CSEQm_CIO_REG(8, 0x48) + +/* mode 8 */ +#define CSEQDLCTL CSEQm_CIO_REG(8, 0x4D) + +#define DONELISTEND 0x10 +#define DONELISTSIZE_MASK 0x0F +#define DONELISTSIZE_8ELEM 0x01 +#define DONELISTSIZE_16ELEM 0x02 +#define DONELISTSIZE_32ELEM 0x03 +#define DONELISTSIZE_64ELEM 0x04 +#define DONELISTSIZE_128ELEM 0x05 +#define DONELISTSIZE_256ELEM 0x06 +#define DONELISTSIZE_512ELEM 0x07 +#define DONELISTSIZE_1024ELEM 0x08 +#define DONELISTSIZE_2048ELEM 0x09 +#define DONELISTSIZE_4096ELEM 0x0A +#define DONELISTSIZE_8192ELEM 0x0B +#define DONELISTSIZE_16384ELEM 0x0C + +/* mode 8 */ +#define CSEQDLOFFS CSEQm_CIO_REG(8, 0x4E) + +/* mode 11 */ +#define CM11INTVEC0 CSEQm_CIO_REG(11, 0x50) + +/* mode 11 */ +#define CM11INTVEC1 CSEQm_CIO_REG(11, 0x52) + +/* mode 11 */ +#define CM11INTVEC2 CSEQm_CIO_REG(11, 0x54) + +#define CCONMSK (CSEQ_CIO_REG_BASE_ADR + 0x60) + +#define CCONEXIST (CSEQ_CIO_REG_BASE_ADR + 0x61) + +#define CCONMODE (CSEQ_CIO_REG_BASE_ADR + 0x62) + +#define CTIMERCALC (CSEQ_CIO_REG_BASE_ADR + 0x64) + +#define CINTDIS (CSEQ_CIO_REG_BASE_ADR + 0x68) + +/* mode 8, 32x32 bits, 128 bytes of mapped buffer */ +#define CSBUFFER CSEQm_CIO_REG(8, 0x80) + +#define CSCRATCH (CSEQ_CIO_REG_BASE_ADR + 0x1C0) + +/* mode 0-8 */ +#define CMnSCRATCH(Mode) CSEQm_CIO_REG(Mode, 0x1E0) + +/* + * CSEQ Mapped Instruction RAM Page, Address Range : (0x0000-0x1FFC) + */ +#define CSEQ_RAM_REG_BASE_ADR 0xB8004000 + +/* + * The common definitions below have the same address offset for all the Link + * sequencers. + */ +#define MODECTL 0x40 + +#define DBGMODE 0x44 + +#define CONTROL 0x48 +#define LEDTIMER 0x00010000 +#define LEDTIMERS_10us 0x00000000 +#define LEDTIMERS_1ms 0x00000800 +#define LEDTIMERS_100ms 0x00001000 +#define LEDMODE_TXRX 0x00000000 +#define LEDMODE_CONNECTED 0x00000200 +#define LEDPOL 0x00000100 + +#define LSEQRAM 0x1000 + +/* + * LmSEQ Host Registers, Address Range : (0x0000-0x3FFC) + */ +#define LSEQ0_HOST_REG_BASE_ADR 0xB8020000 +#define LSEQ1_HOST_REG_BASE_ADR 0xB8024000 +#define LSEQ2_HOST_REG_BASE_ADR 0xB8028000 +#define LSEQ3_HOST_REG_BASE_ADR 0xB802C000 +#define LSEQ4_HOST_REG_BASE_ADR 0xB8030000 +#define LSEQ5_HOST_REG_BASE_ADR 0xB8034000 +#define LSEQ6_HOST_REG_BASE_ADR 0xB8038000 +#define LSEQ7_HOST_REG_BASE_ADR 0xB803C000 + +#define LmARP2CTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2CTL) + +#define LmARP2INT(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2INT) + +#define LmARP2INTEN(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2INTEN) + +#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + DBGMODE) + +#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + CONTROL) + +#define LmARP2BREAKADR01(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2BREAKADR01) + +#define LmARP2BREAKADR23(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2BREAKADR23) + +#define LmMODECTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + MODECTL) + +#define LmAUTODISCI 0x08000000 +#define LmDSBLBITLT 0x04000000 +#define LmDSBLANTT 0x02000000 +#define LmDSBLCRTT 0x01000000 +#define LmDSBLCONT 0x00000100 +#define LmPRIMODE 0x00000080 +#define LmDSBLHOLD 0x00000040 +#define LmDISACK 0x00000020 +#define LmBLIND48 0x00000010 +#define LmRCVMODE_MASK 0x0000000C +#define LmRCVMODE_PLD 0x00000000 +#define LmRCVMODE_HPC 0x00000004 + +#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + DBGMODE) + +#define LmFRCPERR 0x80000000 +#define LmMEMSEL_MASK 0x30000000 +#define LmFRCRBPERR 0x00000000 +#define LmFRCTBPERR 0x10000000 +#define LmFRCSGBPERR 0x20000000 +#define LmFRCARBPERR 0x30000000 +#define LmRCVIDW 0x00080000 +#define LmINVDWERR 0x00040000 +#define LmRCVDISP 0x00004000 +#define LmDISPERR 0x00002000 +#define LmDSBLDSCR 0x00000800 +#define LmDSBLSCR 0x00000400 +#define LmFRCNAK 0x00000200 +#define LmFRCROFS 0x00000100 +#define LmFRCCRC 0x00000080 +#define LmFRMTYPE_MASK 0x00000070 +#define LmSG_DATA 0x00000000 +#define LmSG_COMMAND 0x00000010 +#define LmSG_TASK 0x00000020 +#define LmSG_TGTXFER 0x00000030 +#define LmSG_RESPONSE 0x00000040 +#define LmSG_IDENADDR 0x00000050 +#define LmSG_OPENADDR 0x00000060 +#define LmDISCRCGEN 0x00000008 +#define LmDISCRCCHK 0x00000004 +#define LmSSXMTFRM 0x00000002 +#define LmSSRCVFRM 0x00000001 + +#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + CONTROL) + +#define LmSTEPXMTFRM 0x00000002 +#define LmSTEPRCVFRM 0x00000001 + +#define LmBISTCTL0(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + BISTCTL0) + +#define ARBBISTEN 0x40000000 +#define ARBBISTDN 0x20000000 /* ro */ +#define ARBBISTFAIL 0x10000000 /* ro */ +#define TBBISTEN 0x00000400 +#define TBBISTDN 0x00000200 /* ro */ +#define TBBISTFAIL 0x00000100 /* ro */ +#define RBBISTEN 0x00000040 +#define RBBISTDN 0x00000020 /* ro */ +#define RBBISTFAIL 0x00000010 /* ro */ +#define SGBISTEN 0x00000004 +#define SGBISTDN 0x00000002 /* ro */ +#define SGBISTFAIL 0x00000001 /* ro */ + +#define LmBISTCTL1(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) +\ + BISTCTL1) + +#define LmRAMPAGE1 0x00000200 +#define LmRAMPAGE0 0x00000100 +#define LmIMEMBISTEN 0x00000040 +#define LmIMEMBISTDN 0x00000020 /* ro */ +#define LmIMEMBISTFAIL 0x00000010 /* ro */ +#define LmSCRBISTEN 0x00000004 +#define LmSCRBISTDN 0x00000002 /* ro */ +#define LmSCRBISTFAIL 0x00000001 /* ro */ +#define LmRAMPAGE (LmRAMPAGE1 + LmRAMPAGE0) +#define LmRAMPAGE_LSHIFT 0x8 + +#define LmSCRATCH(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\ + MAPPEDSCR) + +#define LmSEQRAM(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\ + LSEQRAM) + +/* + * LmSEQ CIO Bus Register, Address Range : (0x0000-0xFFC) + * 8 modes, each mode is 512 bytes. + * Unless specified, the register should valid for all modes. + */ +#define LmSEQ_CIOBUS_REG_BASE 0x2000 + +#define LmSEQ_PHY_BASE(Mode, LinkNum) \ + (LSEQ0_HOST_REG_BASE_ADR + \ + (LmSEQ_HOST_REG_SIZE * (u32) (LinkNum)) + \ + LmSEQ_CIOBUS_REG_BASE + \ + ((u32) (Mode) * LmSEQ_MODE_PAGE_SIZE)) + +#define LmSEQ_PHY_REG(Mode, LinkNum, Reg) \ + (LmSEQ_PHY_BASE(Mode, LinkNum) + (u32) (Reg)) + +#define LmMODEPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, MODEPTR) + +#define LmALTMODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALTMODE) + +#define LmATOMICXCHG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ATOMICXCHG) + +#define LmFLAG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FLAG) + +#define LmARP2INTCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2INTCTL) + +#define LmSTACK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, STACK) + +#define LmFUNCTION1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FUNCTION1) + +#define LmPRGMCNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, PRGMCNT) + +#define LmACCUM(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ACCUM) + +#define LmSINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDEX) + +#define LmDINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDEX) + +#define LmALLONES(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLONES) + +#define LmALLZEROS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLZEROS) + +#define LmSINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDIR) + +#define LmDINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDIR) + +#define LmJUMLDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, JUMLDIR) + +#define LmARP2HALTCODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2HALTCODE) + +#define LmCURRADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, CURRADDR) + +#define LmLASTADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, LASTADDR) + +#define LmNXTLADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, NXTLADDR) + +#define LmDBGPORTPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORTPTR) + +#define LmDBGPORT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORT) + +#define LmSCRATCHPAGE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SCRATCHPAGE) + +#define LmMnSCRATCHPAGE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, \ + MnSCRATCHPAGE) + +#define LmTIMERCALC(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x28) + +#define LmREQMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x30) + +#define LmRSPMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x34) + +#define LmMnINT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x38) + +#define CTXMEMSIZE 0x80000000 /* ro */ +#define LmACKREQ 0x08000000 +#define LmNAKREQ 0x04000000 +#define LmMnXMTERR 0x02000000 +#define LmM5OOBSVC 0x01000000 +#define LmHWTINT 0x00800000 +#define LmMnCTXDONE 0x00100000 +#define LmM2REQMBXF 0x00080000 +#define LmM2RSPMBXE 0x00040000 +#define LmMnDMAERR 0x00020000 +#define LmRCVPRIM 0x00010000 +#define LmRCVERR 0x00008000 +#define LmADDRRCV 0x00004000 +#define LmMnHDRMISS 0x00002000 +#define LmMnWAITSCB 0x00001000 +#define LmMnRLSSCB 0x00000800 +#define LmMnSAVECTX 0x00000400 +#define LmMnFETCHSG 0x00000200 +#define LmMnLOADCTX 0x00000100 +#define LmMnCFGICL 0x00000080 +#define LmMnCFGSATA 0x00000040 +#define LmMnCFGEXPSATA 0x00000020 +#define LmMnCFGCMPLT 0x00000010 +#define LmMnCFGRBUF 0x00000008 +#define LmMnSAVETTR 0x00000004 +#define LmMnCFGRDAT 0x00000002 +#define LmMnCFGHDR 0x00000001 + +#define LmMnINTEN(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x3C) + +#define EN_LmACKREQ 0x08000000 +#define EN_LmNAKREQ 0x04000000 +#define EN_LmMnXMTERR 0x02000000 +#define EN_LmM5OOBSVC 0x01000000 +#define EN_LmHWTINT 0x00800000 +#define EN_LmMnCTXDONE 0x00100000 +#define EN_LmM2REQMBXF 0x00080000 +#define EN_LmM2RSPMBXE 0x00040000 +#define EN_LmMnDMAERR 0x00020000 +#define EN_LmRCVPRIM 0x00010000 +#define EN_LmRCVERR 0x00008000 +#define EN_LmADDRRCV 0x00004000 +#define EN_LmMnHDRMISS 0x00002000 +#define EN_LmMnWAITSCB 0x00001000 +#define EN_LmMnRLSSCB 0x00000800 +#define EN_LmMnSAVECTX 0x00000400 +#define EN_LmMnFETCHSG 0x00000200 +#define EN_LmMnLOADCTX 0x00000100 +#define EN_LmMnCFGICL 0x00000080 +#define EN_LmMnCFGSATA 0x00000040 +#define EN_LmMnCFGEXPSATA 0x00000020 +#define EN_LmMnCFGCMPLT 0x00000010 +#define EN_LmMnCFGRBUF 0x00000008 +#define EN_LmMnSAVETTR 0x00000004 +#define EN_LmMnCFGRDAT 0x00000002 +#define EN_LmMnCFGHDR 0x00000001 + +#define LmM0INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmMnCFGRBUF | \ + EN_LmMnSAVETTR | EN_LmMnCFGRDAT | \ + EN_LmMnCFGHDR | EN_LmRCVERR | \ + EN_LmADDRRCV | EN_LmMnHDRMISS | \ + EN_LmMnRLSSCB | EN_LmMnSAVECTX | \ + EN_LmMnFETCHSG | EN_LmMnLOADCTX | \ + EN_LmHWTINT | EN_LmMnCTXDONE | \ + EN_LmRCVPRIM | EN_LmMnCFGSATA | \ + EN_LmMnCFGEXPSATA | EN_LmMnDMAERR) + +#define LmM1INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmADDRRCV | \ + EN_LmMnRLSSCB | EN_LmMnSAVECTX | \ + EN_LmMnFETCHSG | EN_LmMnLOADCTX | \ + EN_LmMnXMTERR | EN_LmHWTINT | \ + EN_LmMnCTXDONE | EN_LmRCVPRIM | \ + EN_LmRCVERR | EN_LmMnDMAERR) + +#define LmM2INTEN_MASK (EN_LmADDRRCV | EN_LmHWTINT | \ + EN_LmM2REQMBXF | EN_LmRCVPRIM | \ + EN_LmRCVERR) + +#define LmM5INTEN_MASK (EN_LmADDRRCV | EN_LmM5OOBSVC | \ + EN_LmHWTINT | EN_LmRCVPRIM | \ + EN_LmRCVERR) + +#define LmXMTPRIMD(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x40) + +#define LmXMTPRIMCS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x44) + +#define LmCONSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x45) + +#define LmMnDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x46) + +#define LmMnSGDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x47) + +#define LmM0EXPHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x48) + +#define LmM1SASALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x48) +#define SAS_ALIGN_DEFAULT 0xFF + +#define LmM0MSKHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x49) + +#define LmM1STPALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x49) +#define STP_ALIGN_DEFAULT 0x1F + +#define LmM0RCVHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4A) + +#define LmM1XMTHDRP(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4A) + +#define LmM0ICLADR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4B) + +#define LmM1ALIGNMODE(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4B) + +#define LmDISALIGN 0x20 +#define LmROTSTPALIGN 0x10 +#define LmSTPALIGN 0x08 +#define LmROTNOTIFY 0x04 +#define LmDUALALIGN 0x02 +#define LmROTALIGN 0x01 + +#define LmM0EXPRCVNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4C) + +#define LmM1XMTCNT(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4C) + +#define LmMnBUFSTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x4E) + +#define LmMnBUFPERR 0x01 + +/* mode 0-1 */ +#define LmMnXFRLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x59) + +#define LmMnXFRLVL_128 0x05 +#define LmMnXFRLVL_256 0x04 +#define LmMnXFRLVL_512 0x03 +#define LmMnXFRLVL_1024 0x02 +#define LmMnXFRLVL_1536 0x01 +#define LmMnXFRLVL_2048 0x00 + + /* mode 0-1 */ +#define LmMnSGDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5A) + +#define LmMnRESETSG 0x04 +#define LmMnSTOPSG 0x02 +#define LmMnSTARTSG 0x01 + +/* mode 0-1 */ +#define LmMnSGDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5B) + +/* mode 0-1 */ +#define LmMnDDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5C) + +#define LmMnFLUSH 0x40 /* wo */ +#define LmMnRLSRTRY 0x20 /* wo */ +#define LmMnDISCARD 0x10 /* wo */ +#define LmMnRESETDAT 0x08 /* wo */ +#define LmMnSUSDAT 0x04 /* wo */ +#define LmMnSTOPDAT 0x02 /* wo */ +#define LmMnSTARTDAT 0x01 /* wo */ + +/* mode 0-1 */ +#define LmMnDDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5D) + +#define LmMnDPEMPTY 0x80 +#define LmMnFLUSHING 0x40 +#define LmMnDDMAREQ 0x20 +#define LmMnHDMAREQ 0x10 +#define LmMnDATFREE 0x08 +#define LmMnDATSUS 0x04 +#define LmMnDATACT 0x02 +#define LmMnDATEN 0x01 + +/* mode 0-1 */ +#define LmMnDDMAMODE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5E) + +#define LmMnDMATYPE_NORMAL 0x0000 +#define LmMnDMATYPE_HOST_ONLY_TX 0x0001 +#define LmMnDMATYPE_DEVICE_ONLY_TX 0x0002 +#define LmMnDMATYPE_INVALID 0x0003 +#define LmMnDMATYPE_MASK 0x0003 + +#define LmMnDMAWRAP 0x0004 +#define LmMnBITBUCKET 0x0008 +#define LmMnDISHDR 0x0010 +#define LmMnSTPCRC 0x0020 +#define LmXTEST 0x0040 +#define LmMnDISCRC 0x0080 +#define LmMnENINTLK 0x0100 +#define LmMnADDRFRM 0x0400 +#define LmMnENXMTCRC 0x0800 + +/* mode 0-1 */ +#define LmMnXFRCNT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x70) + +/* mode 0-1 */ +#define LmMnDPSEL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7B) +#define LmMnDPSEL_MASK 0x07 +#define LmMnEOLPRE 0x40 +#define LmMnEOSPRE 0x80 + +/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */ +/* Receive Mode n = 0 */ +#define LmMnHRADDR 0x00 +#define LmMnHBYTECNT 0x01 +#define LmMnHREWIND 0x02 +#define LmMnDWADDR 0x03 +#define LmMnDSPACECNT 0x04 +#define LmMnDFRMSIZE 0x05 + +/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */ +/* Transmit Mode n = 1 */ +#define LmMnHWADDR 0x00 +#define LmMnHSPACECNT 0x01 +/* #define LmMnHREWIND 0x02 */ +#define LmMnDRADDR 0x03 +#define LmMnDBYTECNT 0x04 +/* #define LmMnDFRMSIZE 0x05 */ + +/* mode 0-1 */ +#define LmMnDPACC(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x78) +#define LmMnDPACC_MASK 0x00FFFFFF + +/* mode 0-1 */ +#define LmMnHOLDLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7D) + +#define LmPRMSTAT0(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x80) +#define LmPRMSTAT0BYTE0 0x80 +#define LmPRMSTAT0BYTE1 0x81 +#define LmPRMSTAT0BYTE2 0x82 +#define LmPRMSTAT0BYTE3 0x83 + +#define LmFRAMERCVD 0x80000000 +#define LmXFRRDYRCVD 0x40000000 +#define LmUNKNOWNP 0x20000000 +#define LmBREAK 0x10000000 +#define LmDONE 0x08000000 +#define LmOPENACPT 0x04000000 +#define LmOPENRJCT 0x02000000 +#define LmOPENRTRY 0x01000000 +#define LmCLOSERV1 0x00800000 +#define LmCLOSERV0 0x00400000 +#define LmCLOSENORM 0x00200000 +#define LmCLOSECLAF 0x00100000 +#define LmNOTIFYRV2 0x00080000 +#define LmNOTIFYRV1 0x00040000 +#define LmNOTIFYRV0 0x00020000 +#define LmNOTIFYSPIN 0x00010000 +#define LmBROADRV4 0x00008000 +#define LmBROADRV3 0x00004000 +#define LmBROADRV2 0x00002000 +#define LmBROADRV1 0x00001000 +#define LmBROADSES 0x00000800 +#define LmBROADRVCH1 0x00000400 +#define LmBROADRVCH0 0x00000200 +#define LmBROADCH 0x00000100 +#define LmAIPRVWP 0x00000080 +#define LmAIPWP 0x00000040 +#define LmAIPWD 0x00000020 +#define LmAIPWC 0x00000010 +#define LmAIPRV2 0x00000008 +#define LmAIPRV1 0x00000004 +#define LmAIPRV0 0x00000002 +#define LmAIPNRML 0x00000001 + +#define LmBROADCAST_MASK (LmBROADCH | LmBROADRVCH0 | \ + LmBROADRVCH1) + +#define LmPRMSTAT1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x84) +#define LmPRMSTAT1BYTE0 0x84 +#define LmPRMSTAT1BYTE1 0x85 +#define LmPRMSTAT1BYTE2 0x86 +#define LmPRMSTAT1BYTE3 0x87 + +#define LmFRMRCVDSTAT 0x80000000 +#define LmBREAK_DET 0x04000000 +#define LmCLOSE_DET 0x02000000 +#define LmDONE_DET 0x01000000 +#define LmXRDY 0x00040000 +#define LmSYNCSRST 0x00020000 +#define LmSYNC 0x00010000 +#define LmXHOLD 0x00008000 +#define LmRRDY 0x00004000 +#define LmHOLD 0x00002000 +#define LmROK 0x00001000 +#define LmRIP 0x00000800 +#define LmCRBLK 0x00000400 +#define LmACK 0x00000200 +#define LmNAK 0x00000100 +#define LmHARDRST 0x00000080 +#define LmERROR 0x00000040 +#define LmRERR 0x00000020 +#define LmPMREQP 0x00000010 +#define LmPMREQS 0x00000008 +#define LmPMACK 0x00000004 +#define LmPMNAK 0x00000002 +#define LmDMAT 0x00000001 + +/* mode 1 */ +#define LmMnSATAFS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7E) +#define LmMnXMTSIZE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x93) + +/* mode 0 */ +#define LmMnFRMERR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xB0) + +#define LmACRCERR 0x00000800 +#define LmPHYOVRN 0x00000400 +#define LmOBOVRN 0x00000200 +#define LmMnZERODATA 0x00000100 +#define LmSATAINTLK 0x00000080 +#define LmMnCRCERR 0x00000020 +#define LmRRDYOVRN 0x00000010 +#define LmMISSSOAF 0x00000008 +#define LmMISSSOF 0x00000004 +#define LmMISSEOAF 0x00000002 +#define LmMISSEOF 0x00000001 + +#define LmFRMERREN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xB4) + +#define EN_LmACRCERR 0x00000800 +#define EN_LmPHYOVRN 0x00000400 +#define EN_LmOBOVRN 0x00000200 +#define EN_LmMnZERODATA 0x00000100 +#define EN_LmSATAINTLK 0x00000080 +#define EN_LmFRMBAD 0x00000040 +#define EN_LmMnCRCERR 0x00000020 +#define EN_LmRRDYOVRN 0x00000010 +#define EN_LmMISSSOAF 0x00000008 +#define EN_LmMISSSOF 0x00000004 +#define EN_LmMISSEOAF 0x00000002 +#define EN_LmMISSEOF 0x00000001 + +#define LmFRMERREN_MASK (EN_LmSATAINTLK | EN_LmMnCRCERR | \ + EN_LmRRDYOVRN | EN_LmMISSSOF | \ + EN_LmMISSEOAF | EN_LmMISSEOF | \ + EN_LmACRCERR | LmPHYOVRN | \ + EN_LmOBOVRN | EN_LmMnZERODATA) + +#define LmHWTSTATEN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC5) + +#define EN_LmDONETO 0x80 +#define EN_LmINVDISP 0x40 +#define EN_LmINVDW 0x20 +#define EN_LmDWSEVENT 0x08 +#define EN_LmCRTTTO 0x04 +#define EN_LmANTTTO 0x02 +#define EN_LmBITLTTO 0x01 + +#define LmHWTSTATEN_MASK (EN_LmINVDISP | EN_LmINVDW | \ + EN_LmDWSEVENT | EN_LmCRTTTO | \ + EN_LmANTTTO | EN_LmDONETO | \ + EN_LmBITLTTO) + +#define LmHWTSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC7) + +#define LmDONETO 0x80 +#define LmINVDISP 0x40 +#define LmINVDW 0x20 +#define LmDWSEVENT 0x08 +#define LmCRTTTO 0x04 +#define LmANTTTO 0x02 +#define LmBITLTTO 0x01 + +#define LmMnDATABUFADR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xC8) +#define LmDATABUFADR_MASK 0x0FFF + +#define LmMnDATABUF(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xCA) + +#define LmPRIMSTAT0EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE0) + +#define EN_LmUNKNOWNP 0x20000000 +#define EN_LmBREAK 0x10000000 +#define EN_LmDONE 0x08000000 +#define EN_LmOPENACPT 0x04000000 +#define EN_LmOPENRJCT 0x02000000 +#define EN_LmOPENRTRY 0x01000000 +#define EN_LmCLOSERV1 0x00800000 +#define EN_LmCLOSERV0 0x00400000 +#define EN_LmCLOSENORM 0x00200000 +#define EN_LmCLOSECLAF 0x00100000 +#define EN_LmNOTIFYRV2 0x00080000 +#define EN_LmNOTIFYRV1 0x00040000 +#define EN_LmNOTIFYRV0 0x00020000 +#define EN_LmNOTIFYSPIN 0x00010000 +#define EN_LmBROADRV4 0x00008000 +#define EN_LmBROADRV3 0x00004000 +#define EN_LmBROADRV2 0x00002000 +#define EN_LmBROADRV1 0x00001000 +#define EN_LmBROADRV0 0x00000800 +#define EN_LmBROADRVCH1 0x00000400 +#define EN_LmBROADRVCH0 0x00000200 +#define EN_LmBROADCH 0x00000100 +#define EN_LmAIPRVWP 0x00000080 +#define EN_LmAIPWP 0x00000040 +#define EN_LmAIPWD 0x00000020 +#define EN_LmAIPWC 0x00000010 +#define EN_LmAIPRV2 0x00000008 +#define EN_LmAIPRV1 0x00000004 +#define EN_LmAIPRV0 0x00000002 +#define EN_LmAIPNRML 0x00000001 + +#define LmPRIMSTAT0EN_MASK (EN_LmBREAK | \ + EN_LmDONE | EN_LmOPENACPT | \ + EN_LmOPENRJCT | EN_LmOPENRTRY | \ + EN_LmCLOSERV1 | EN_LmCLOSERV0 | \ + EN_LmCLOSENORM | EN_LmCLOSECLAF | \ + EN_LmBROADRV4 | EN_LmBROADRV3 | \ + EN_LmBROADRV2 | EN_LmBROADRV1 | \ + EN_LmBROADRV0 | EN_LmBROADRVCH1 | \ + EN_LmBROADRVCH0 | EN_LmBROADCH | \ + EN_LmAIPRVWP | EN_LmAIPWP | \ + EN_LmAIPWD | EN_LmAIPWC | \ + EN_LmAIPRV2 | EN_LmAIPRV1 | \ + EN_LmAIPRV0 | EN_LmAIPNRML) + +#define LmPRIMSTAT1EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE4) + +#define EN_LmXRDY 0x00040000 +#define EN_LmSYNCSRST 0x00020000 +#define EN_LmSYNC 0x00010000 +#define EN_LmXHOLD 0x00008000 +#define EN_LmRRDY 0x00004000 +#define EN_LmHOLD 0x00002000 +#define EN_LmROK 0x00001000 +#define EN_LmRIP 0x00000800 +#define EN_LmCRBLK 0x00000400 +#define EN_LmACK 0x00000200 +#define EN_LmNAK 0x00000100 +#define EN_LmHARDRST 0x00000080 +#define EN_LmERROR 0x00000040 +#define EN_LmRERR 0x00000020 +#define EN_LmPMREQP 0x00000010 +#define EN_LmPMREQS 0x00000008 +#define EN_LmPMACK 0x00000004 +#define EN_LmPMNAK 0x00000002 +#define EN_LmDMAT 0x00000001 + +#define LmPRIMSTAT1EN_MASK (EN_LmHARDRST | \ + EN_LmSYNCSRST | \ + EN_LmPMREQP | EN_LmPMREQS | \ + EN_LmPMACK | EN_LmPMNAK) + +#define LmSMSTATE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE8) + +#define LmSMSTATEBRK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xEC) + +#define LmSMDBGCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xF0) + + +/* + * LmSEQ CIO Bus Mode 3 Register. + * Mode 3: Configuration and Setup, IOP Context SCB. + */ +#define LmM3SATATIMER(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x48) + +#define LmM3INTVEC0(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x90) + +#define LmM3INTVEC1(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x92) + +#define LmM3INTVEC2(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x94) + +#define LmM3INTVEC3(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x96) + +#define LmM3INTVEC4(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x98) + +#define LmM3INTVEC5(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9A) + +#define LmM3INTVEC6(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9C) + +#define LmM3INTVEC7(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9E) + +#define LmM3INTVEC8(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA4) + +#define LmM3INTVEC9(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA6) + +#define LmM3INTVEC10(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB0) + +#define LmM3FRMGAP(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB4) + +#define LmBITL_TIMER(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA2) + +#define LmWWN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA8) + + +/* + * LmSEQ CIO Bus Mode 5 Registers. + * Mode 5: Phy/OOB Control and Status. + */ +#define LmSEQ_OOB_REG(phy_id, reg) LmSEQ_PHY_REG(5, (phy_id), (reg)) + +#define OOB_BFLTR 0x100 + +#define BFLTR_THR_MASK 0xF0 +#define BFLTR_TC_MASK 0x0F + +#define OOB_INIT_MIN 0x102 + +#define OOB_INIT_MAX 0x104 + +#define OOB_INIT_NEG 0x106 + +#define OOB_SAS_MIN 0x108 + +#define OOB_SAS_MAX 0x10A + +#define OOB_SAS_NEG 0x10C + +#define OOB_WAKE_MIN 0x10E + +#define OOB_WAKE_MAX 0x110 + +#define OOB_WAKE_NEG 0x112 + +#define OOB_IDLE_MAX 0x114 + +#define OOB_BURST_MAX 0x116 + +#define OOB_DATA_KBITS 0x126 + +#define OOB_ALIGN_0_DATA 0x12C + +#define OOB_ALIGN_1_DATA 0x130 + +#define D10_2_DATA_k 0x00 +#define SYNC_DATA_k 0x02 +#define ALIGN_1_DATA_k 0x04 +#define ALIGN_0_DATA_k 0x08 +#define BURST_DATA_k 0x10 + +#define OOB_PHY_RESET_COUNT 0x13C + +#define OOB_SIG_GEN 0x140 + +#define START_OOB 0x80 +#define START_DWS 0x40 +#define ALIGN_CNT3 0x30 +#define ALIGN_CNT2 0x20 +#define ALIGN_CNT1 0x10 +#define ALIGN_CNT4 0x00 +#define STOP_DWS 0x08 +#define SEND_COMSAS 0x04 +#define SEND_COMINIT 0x02 +#define SEND_COMWAKE 0x01 + +#define OOB_XMIT 0x141 + +#define TX_ENABLE 0x80 +#define XMIT_OOB_BURST 0x10 +#define XMIT_D10_2 0x08 +#define XMIT_SYNC 0x04 +#define XMIT_ALIGN_1 0x02 +#define XMIT_ALIGN_0 0x01 + +#define FUNCTION_MASK 0x142 + +#define SAS_MODE_DIS 0x80 +#define SATA_MODE_DIS 0x40 +#define SPINUP_HOLD_DIS 0x20 +#define HOT_PLUG_DIS 0x10 +#define SATA_PS_DIS 0x08 +#define FUNCTION_MASK_DEFAULT (SPINUP_HOLD_DIS | SATA_PS_DIS) + +#define OOB_MODE 0x143 + +#define SAS_MODE 0x80 +#define SATA_MODE 0x40 +#define SLOW_CLK 0x20 +#define FORCE_XMIT_15 0x08 +#define PHY_SPEED_60 0x04 +#define PHY_SPEED_30 0x02 +#define PHY_SPEED_15 0x01 + +#define CURRENT_STATUS 0x144 + +#define CURRENT_OOB_DONE 0x80 +#define CURRENT_LOSS_OF_SIGNAL 0x40 +#define CURRENT_SPINUP_HOLD 0x20 +#define CURRENT_HOT_PLUG_CNCT 0x10 +#define CURRENT_GTO_TIMEOUT 0x08 +#define CURRENT_OOB_TIMEOUT 0x04 +#define CURRENT_DEVICE_PRESENT 0x02 +#define CURRENT_OOB_ERROR 0x01 + +#define CURRENT_OOB1_ERROR (CURRENT_HOT_PLUG_CNCT | \ + CURRENT_GTO_TIMEOUT) + +#define CURRENT_OOB2_ERROR (CURRENT_HOT_PLUG_CNCT | \ + CURRENT_OOB_ERROR) + +#define DEVICE_ADDED_W_CNT (CURRENT_OOB_DONE | \ + CURRENT_HOT_PLUG_CNCT | \ + CURRENT_DEVICE_PRESENT) + +#define DEVICE_ADDED_WO_CNT (CURRENT_OOB_DONE | \ + CURRENT_DEVICE_PRESENT) + +#define DEVICE_REMOVED CURRENT_LOSS_OF_SIGNAL + +#define CURRENT_PHY_MASK (CURRENT_OOB_DONE | \ + CURRENT_LOSS_OF_SIGNAL | \ + CURRENT_SPINUP_HOLD | \ + CURRENT_HOT_PLUG_CNCT | \ + CURRENT_GTO_TIMEOUT | \ + CURRENT_DEVICE_PRESENT | \ + CURRENT_OOB_ERROR ) + +#define CURRENT_ERR_MASK (CURRENT_LOSS_OF_SIGNAL | \ + CURRENT_GTO_TIMEOUT | \ + CURRENT_OOB_TIMEOUT | \ + CURRENT_OOB_ERROR ) + +#define SPEED_MASK 0x145 + +#define SATA_SPEED_30_DIS 0x10 +#define SATA_SPEED_15_DIS 0x08 +#define SAS_SPEED_60_DIS 0x04 +#define SAS_SPEED_30_DIS 0x02 +#define SAS_SPEED_15_DIS 0x01 +#define SAS_SPEED_MASK_DEFAULT 0x00 + +#define OOB_TIMER_ENABLE 0x14D + +#define HOT_PLUG_EN 0x80 +#define RCD_EN 0x40 +#define COMTIMER_EN 0x20 +#define SNTT_EN 0x10 +#define SNLT_EN 0x04 +#define SNWT_EN 0x02 +#define ALIGN_EN 0x01 + +#define OOB_STATUS 0x14E + +#define OOB_DONE 0x80 +#define LOSS_OF_SIGNAL 0x40 /* ro */ +#define SPINUP_HOLD 0x20 +#define HOT_PLUG_CNCT 0x10 /* ro */ +#define GTO_TIMEOUT 0x08 /* ro */ +#define OOB_TIMEOUT 0x04 /* ro */ +#define DEVICE_PRESENT 0x02 /* ro */ +#define OOB_ERROR 0x01 /* ro */ + +#define OOB_STATUS_ERROR_MASK (LOSS_OF_SIGNAL | GTO_TIMEOUT | \ + OOB_TIMEOUT | OOB_ERROR) + +#define OOB_STATUS_CLEAR 0x14F + +#define OOB_DONE_CLR 0x80 +#define LOSS_OF_SIGNAL_CLR 0x40 +#define SPINUP_HOLD_CLR 0x20 +#define HOT_PLUG_CNCT_CLR 0x10 +#define GTO_TIMEOUT_CLR 0x08 +#define OOB_TIMEOUT_CLR 0x04 +#define OOB_ERROR_CLR 0x01 + +#define HOT_PLUG_DELAY 0x150 +/* In 5 ms units. 20 = 100 ms. */ +#define HOTPLUG_DELAY_TIMEOUT 20 + + +#define INT_ENABLE_2 0x15A + +#define OOB_DONE_EN 0x80 +#define LOSS_OF_SIGNAL_EN 0x40 +#define SPINUP_HOLD_EN 0x20 +#define HOT_PLUG_CNCT_EN 0x10 +#define GTO_TIMEOUT_EN 0x08 +#define OOB_TIMEOUT_EN 0x04 +#define DEVICE_PRESENT_EN 0x02 +#define OOB_ERROR_EN 0x01 + +#define PHY_CONTROL_0 0x160 + +#define PHY_LOWPWREN_TX 0x80 +#define PHY_LOWPWREN_RX 0x40 +#define SPARE_REG_160_B5 0x20 +#define OFFSET_CANCEL_RX 0x10 + +/* bits 3:2 */ +#define PHY_RXCOMCENTER_60V 0x00 +#define PHY_RXCOMCENTER_70V 0x04 +#define PHY_RXCOMCENTER_80V 0x08 +#define PHY_RXCOMCENTER_90V 0x0C +#define PHY_RXCOMCENTER_MASK 0x0C + +#define PHY_RESET 0x02 +#define SAS_DEFAULT_SEL 0x01 + +#define PHY_CONTROL_1 0x161 + +/* bits 2:0 */ +#define SATA_PHY_DETLEVEL_50mv 0x00 +#define SATA_PHY_DETLEVEL_75mv 0x01 +#define SATA_PHY_DETLEVEL_100mv 0x02 +#define SATA_PHY_DETLEVEL_125mv 0x03 +#define SATA_PHY_DETLEVEL_150mv 0x04 +#define SATA_PHY_DETLEVEL_175mv 0x05 +#define SATA_PHY_DETLEVEL_200mv 0x06 +#define SATA_PHY_DETLEVEL_225mv 0x07 +#define SATA_PHY_DETLEVEL_MASK 0x07 + +/* bits 5:3 */ +#define SAS_PHY_DETLEVEL_50mv 0x00 +#define SAS_PHY_DETLEVEL_75mv 0x08 +#define SAS_PHY_DETLEVEL_100mv 0x10 +#define SAS_PHY_DETLEVEL_125mv 0x11 +#define SAS_PHY_DETLEVEL_150mv 0x20 +#define SAS_PHY_DETLEVEL_175mv 0x21 +#define SAS_PHY_DETLEVEL_200mv 0x30 +#define SAS_PHY_DETLEVEL_225mv 0x31 +#define SAS_PHY_DETLEVEL_MASK 0x38 + +#define PHY_CONTROL_2 0x162 + +/* bits 7:5 */ +#define SATA_PHY_DRV_400mv 0x00 +#define SATA_PHY_DRV_450mv 0x20 +#define SATA_PHY_DRV_500mv 0x40 +#define SATA_PHY_DRV_550mv 0x60 +#define SATA_PHY_DRV_600mv 0x80 +#define SATA_PHY_DRV_650mv 0xA0 +#define SATA_PHY_DRV_725mv 0xC0 +#define SATA_PHY_DRV_800mv 0xE0 +#define SATA_PHY_DRV_MASK 0xE0 + +/* bits 4:3 */ +#define SATA_PREEMP_0 0x00 +#define SATA_PREEMP_1 0x08 +#define SATA_PREEMP_2 0x10 +#define SATA_PREEMP_3 0x18 +#define SATA_PREEMP_MASK 0x18 + +#define SATA_CMSH1P5 0x04 + +/* bits 1:0 */ +#define SATA_SLEW_0 0x00 +#define SATA_SLEW_1 0x01 +#define SATA_SLEW_2 0x02 +#define SATA_SLEW_3 0x03 +#define SATA_SLEW_MASK 0x03 + +#define PHY_CONTROL_3 0x163 + +/* bits 7:5 */ +#define SAS_PHY_DRV_400mv 0x00 +#define SAS_PHY_DRV_450mv 0x20 +#define SAS_PHY_DRV_500mv 0x40 +#define SAS_PHY_DRV_550mv 0x60 +#define SAS_PHY_DRV_600mv 0x80 +#define SAS_PHY_DRV_650mv 0xA0 +#define SAS_PHY_DRV_725mv 0xC0 +#define SAS_PHY_DRV_800mv 0xE0 +#define SAS_PHY_DRV_MASK 0xE0 + +/* bits 4:3 */ +#define SAS_PREEMP_0 0x00 +#define SAS_PREEMP_1 0x08 +#define SAS_PREEMP_2 0x10 +#define SAS_PREEMP_3 0x18 +#define SAS_PREEMP_MASK 0x18 + +#define SAS_CMSH1P5 0x04 + +/* bits 1:0 */ +#define SAS_SLEW_0 0x00 +#define SAS_SLEW_1 0x01 +#define SAS_SLEW_2 0x02 +#define SAS_SLEW_3 0x03 +#define SAS_SLEW_MASK 0x03 + +#define PHY_CONTROL_4 0x168 + +#define PHY_DONE_CAL_TX 0x80 +#define PHY_DONE_CAL_RX 0x40 +#define RX_TERM_LOAD_DIS 0x20 +#define TX_TERM_LOAD_DIS 0x10 +#define AUTO_TERM_CAL_DIS 0x08 +#define PHY_SIGDET_FLTR_EN 0x04 +#define OSC_FREQ 0x02 +#define PHY_START_CAL 0x01 + +/* + * HST_PCIX2 Registers, Address Range: (0x00-0xFC) + */ +#define PCIX_REG_BASE_ADR 0xB8040000 + +#define PCIC_VENDOR_ID 0x00 + +#define PCIC_DEVICE_ID 0x02 + +#define PCIC_COMMAND 0x04 + +#define INT_DIS 0x0400 +#define FBB_EN 0x0200 /* ro */ +#define SERR_EN 0x0100 +#define STEP_EN 0x0080 /* ro */ +#define PERR_EN 0x0040 +#define VGA_EN 0x0020 /* ro */ +#define MWI_EN 0x0010 +#define SPC_EN 0x0008 +#define MST_EN 0x0004 +#define MEM_EN 0x0002 +#define IO_EN 0x0001 + +#define PCIC_STATUS 0x06 + +#define PERR_DET 0x8000 +#define SERR_GEN 0x4000 +#define MABT_DET 0x2000 +#define TABT_DET 0x1000 +#define TABT_GEN 0x0800 +#define DPERR_DET 0x0100 +#define CAP_LIST 0x0010 +#define INT_STAT 0x0008 + +#define PCIC_DEVREV_ID 0x08 + +#define PCIC_CLASS_CODE 0x09 + +#define PCIC_CACHELINE_SIZE 0x0C + +#define PCIC_MBAR0 0x10 + +#define PCIC_MBAR0_OFFSET 0 + +#define PCIC_MBAR1 0x18 + +#define PCIC_MBAR1_OFFSET 2 + +#define PCIC_IOBAR 0x20 + +#define PCIC_IOBAR_OFFSET 4 + +#define PCIC_SUBVENDOR_ID 0x2C + +#define PCIC_SUBSYTEM_ID 0x2E + +#define PCIX_STATUS 0x44 +#define RCV_SCE 0x20000000 +#define UNEXP_SC 0x00080000 +#define SC_DISCARD 0x00040000 + +#define ECC_CTRL_STAT 0x48 +#define UNCOR_ECCERR 0x00000008 + +#define PCIC_PM_CSR 0x5C + +#define PWR_STATE_D0 0 +#define PWR_STATE_D1 1 /* not supported */ +#define PWR_STATE_D2 2 /* not supported */ +#define PWR_STATE_D3 3 + +#define PCIC_BASE1 0x6C /* internal use only */ + +#define BASE1_RSVD 0xFFFFFFF8 + +#define PCIC_BASEA 0x70 /* internal use only */ + +#define BASEA_RSVD 0xFFFFFFC0 +#define BASEA_START 0 + +#define PCIC_BASEB 0x74 /* internal use only */ + +#define BASEB_RSVD 0xFFFFFF80 +#define BASEB_IOMAP_MASK 0x7F +#define BASEB_START 0x80 + +#define PCIC_BASEC 0x78 /* internal use only */ + +#define BASEC_RSVD 0xFFFFFFFC +#define BASEC_MASK 0x03 +#define BASEC_START 0x58 + +#define PCIC_MBAR_KEY 0x7C /* internal use only */ + +#define MBAR_KEY_MASK 0xFFFFFFFF + +#define PCIC_HSTPCIX_CNTRL 0xA0 + +#define REWIND_DIS 0x0800 +#define SC_TMR_DIS 0x04000000 + +#define PCIC_MBAR0_MASK 0xA8 +#define PCIC_MBAR0_SIZE_MASK 0x1FFFE000 +#define PCIC_MBAR0_SIZE_SHIFT 13 +#define PCIC_MBAR0_SIZE(val) \ + (((val) & PCIC_MBAR0_SIZE_MASK) >> PCIC_MBAR0_SIZE_SHIFT) + +#define PCIC_FLASH_MBAR 0xB8 + +#define PCIC_INTRPT_STAT 0xD4 + +#define PCIC_TP_CTRL 0xFC + +/* + * EXSI Registers, Address Range: (0x00-0xFC) + */ +#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI + +#define EXSICNFGR (EXSI_REG_BASE_ADR + 0x00) + +#define OCMINITIALIZED 0x80000000 +#define ASIEN 0x00400000 +#define HCMODE 0x00200000 +#define PCIDEF 0x00100000 +#define COMSTOCK 0x00080000 +#define SEEPROMEND 0x00040000 +#define MSTTIMEN 0x00020000 +#define XREGEX 0x00000200 +#define NVRAMW 0x00000100 +#define NVRAMEX 0x00000080 +#define SRAMW 0x00000040 +#define SRAMEX 0x00000020 +#define FLASHW 0x00000010 +#define FLASHEX 0x00000008 +#define SEEPROMCFG 0x00000004 +#define SEEPROMTYP 0x00000002 +#define SEEPROMEX 0x00000001 + + +#define EXSICNTRLR (EXSI_REG_BASE_ADR + 0x04) + +#define MODINT_EN 0x00000001 + + +#define PMSTATR (EXSI_REG_BASE_ADR + 0x10) + +#define FLASHRST 0x00000002 +#define FLASHRDY 0x00000001 + + +#define FLCNFGR (EXSI_REG_BASE_ADR + 0x14) + +#define FLWEH_MASK 0x30000000 +#define FLWESU_MASK 0x0C000000 +#define FLWEPW_MASK 0x03F00000 +#define FLOEH_MASK 0x000C0000 +#define FLOESU_MASK 0x00030000 +#define FLOEPW_MASK 0x0000FC00 +#define FLCSH_MASK 0x00000300 +#define FLCSSU_MASK 0x000000C0 +#define FLCSPW_MASK 0x0000003F + +#define SRCNFGR (EXSI_REG_BASE_ADR + 0x18) + +#define SRWEH_MASK 0x30000000 +#define SRWESU_MASK 0x0C000000 +#define SRWEPW_MASK 0x03F00000 + +#define SROEH_MASK 0x000C0000 +#define SROESU_MASK 0x00030000 +#define SROEPW_MASK 0x0000FC00 +#define SRCSH_MASK 0x00000300 +#define SRCSSU_MASK 0x000000C0 +#define SRCSPW_MASK 0x0000003F + +#define NVCNFGR (EXSI_REG_BASE_ADR + 0x1C) + +#define NVWEH_MASK 0x30000000 +#define NVWESU_MASK 0x0C000000 +#define NVWEPW_MASK 0x03F00000 +#define NVOEH_MASK 0x000C0000 +#define NVOESU_MASK 0x00030000 +#define NVOEPW_MASK 0x0000FC00 +#define NVCSH_MASK 0x00000300 +#define NVCSSU_MASK 0x000000C0 +#define NVCSPW_MASK 0x0000003F + +#define XRCNFGR (EXSI_REG_BASE_ADR + 0x20) + +#define XRWEH_MASK 0x30000000 +#define XRWESU_MASK 0x0C000000 +#define XRWEPW_MASK 0x03F00000 +#define XROEH_MASK 0x000C0000 +#define XROESU_MASK 0x00030000 +#define XROEPW_MASK 0x0000FC00 +#define XRCSH_MASK 0x00000300 +#define XRCSSU_MASK 0x000000C0 +#define XRCSPW_MASK 0x0000003F + +#define XREGADDR (EXSI_REG_BASE_ADR + 0x24) + +#define XRADDRINCEN 0x80000000 +#define XREGADD_MASK 0x007FFFFF + + +#define XREGDATAR (EXSI_REG_BASE_ADR + 0x28) + +#define XREGDATA_MASK 0x0000FFFF + +#define GPIOOER (EXSI_REG_BASE_ADR + 0x40) + +#define GPIOODENR (EXSI_REG_BASE_ADR + 0x44) + +#define GPIOINVR (EXSI_REG_BASE_ADR + 0x48) + +#define GPIODATAOR (EXSI_REG_BASE_ADR + 0x4C) + +#define GPIODATAIR (EXSI_REG_BASE_ADR + 0x50) + +#define GPIOCNFGR (EXSI_REG_BASE_ADR + 0x54) + +#define GPIO_EXTSRC 0x00000001 + +#define SCNTRLR (EXSI_REG_BASE_ADR + 0xA0) + +#define SXFERDONE 0x00000100 +#define SXFERCNT_MASK 0x000000E0 +#define SCMDTYP_MASK 0x0000001C +#define SXFERSTART 0x00000002 +#define SXFEREN 0x00000001 + +#define SRATER (EXSI_REG_BASE_ADR + 0xA4) + +#define SADDRR (EXSI_REG_BASE_ADR + 0xA8) + +#define SADDR_MASK 0x0000FFFF + +#define SDATAOR (EXSI_REG_BASE_ADR + 0xAC) + +#define SDATAOR0 (EXSI_REG_BASE_ADR + 0xAC) +#define SDATAOR1 (EXSI_REG_BASE_ADR + 0xAD) +#define SDATAOR2 (EXSI_REG_BASE_ADR + 0xAE) +#define SDATAOR3 (EXSI_REG_BASE_ADR + 0xAF) + +#define SDATAIR (EXSI_REG_BASE_ADR + 0xB0) + +#define SDATAIR0 (EXSI_REG_BASE_ADR + 0xB0) +#define SDATAIR1 (EXSI_REG_BASE_ADR + 0xB1) +#define SDATAIR2 (EXSI_REG_BASE_ADR + 0xB2) +#define SDATAIR3 (EXSI_REG_BASE_ADR + 0xB3) + +#define ASISTAT0R (EXSI_REG_BASE_ADR + 0xD0) +#define ASIFMTERR 0x00000400 +#define ASISEECHKERR 0x00000200 +#define ASIERR 0x00000100 + +#define ASISTAT1R (EXSI_REG_BASE_ADR + 0xD4) +#define CHECKSUM_MASK 0x0000FFFF + +#define ASIERRADDR (EXSI_REG_BASE_ADR + 0xD8) +#define ASIERRDATAR (EXSI_REG_BASE_ADR + 0xDC) +#define ASIERRSTATR (EXSI_REG_BASE_ADR + 0xE0) +#define CPI2ASIBYTECNT_MASK 0x00070000 +#define CPI2ASIBYTEEN_MASK 0x0000F000 +#define CPI2ASITARGERR_MASK 0x00000F00 +#define CPI2ASITARGMID_MASK 0x000000F0 +#define CPI2ASIMSTERR_MASK 0x0000000F + +/* + * XSRAM, External SRAM (DWord and any BE pattern accessible) + */ +#define XSRAM_REG_BASE_ADDR 0xB8100000 +#define XSRAM_SIZE 0x100000 + +/* + * NVRAM Registers, Address Range: (0x00000 - 0x3FFFF). + */ +#define NVRAM_REG_BASE_ADR 0xBF800000 +#define NVRAM_MAX_BASE_ADR 0x003FFFFF + +/* OCM base address */ +#define OCM_BASE_ADDR 0xA0000000 +#define OCM_MAX_SIZE 0x20000 + +/* + * Sequencers (Central and Link) Scratch RAM page definitions. + */ + +/* + * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024 + * byte memory. It is dword accessible and has byte parity + * protection. The CSEQ accesses it in 32 byte windows, either as mode + * dependent or mode independent memory. Each mode has 96 bytes, + * (three 32 byte pages 0-2, not contiguous), leaving 128 bytes of + * Mode Independent memory (four 32 byte pages 3-7). Note that mode + * dependent scratch memory, Mode 8, page 0-3 overlaps mode + * independent scratch memory, pages 0-3. + * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and + * 128 bytes in mode 8, + * - 259 bytes of mode independent scratch, common to modes 0-15. + * + * Sequencer scratch RAM is 1024 bytes. This scratch memory is + * divided into mode dependent and mode independent scratch with this + * memory further subdivided into pages of size 32 bytes. There are 5 + * pages (160 bytes) of mode independent scratch and 3 pages of + * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages + * 0-2 dependent scratch overlap with pages 0-2 of mode independent + * scratch memory. + * + * The host accesses this scratch in a different manner from the + * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE + * and CMnSCRPAGE to access the scratch memory. A flat mapping of the + * scratch memory is available for software convenience and to prevent + * corruption while the sequencer is running. This memory is mapped + * onto addresses 800h - BFFh, total of 400h bytes. + * + * These addresses are mapped as follows: + * + * 800h-83Fh Mode Dependent Scratch Mode 0 Pages 0-1 + * 840h-87Fh Mode Dependent Scratch Mode 1 Pages 0-1 + * 880h-8BFh Mode Dependent Scratch Mode 2 Pages 0-1 + * 8C0h-8FFh Mode Dependent Scratch Mode 3 Pages 0-1 + * 900h-93Fh Mode Dependent Scratch Mode 4 Pages 0-1 + * 940h-97Fh Mode Dependent Scratch Mode 5 Pages 0-1 + * 980h-9BFh Mode Dependent Scratch Mode 6 Pages 0-1 + * 9C0h-9FFh Mode Dependent Scratch Mode 7 Pages 0-1 + * A00h-A5Fh Mode Dependent Scratch Mode 8 Pages 0-2 + * Mode Independent Scratch Pages 0-2 + * A60h-A7Fh Mode Dependent Scratch Mode 8 Page 3 + * Mode Independent Scratch Page 3 + * A80h-AFFh Mode Independent Scratch Pages 4-7 + * B00h-B1Fh Mode Dependent Scratch Mode 0 Page 2 + * B20h-B3Fh Mode Dependent Scratch Mode 1 Page 2 + * B40h-B5Fh Mode Dependent Scratch Mode 2 Page 2 + * B60h-B7Fh Mode Dependent Scratch Mode 3 Page 2 + * B80h-B9Fh Mode Dependent Scratch Mode 4 Page 2 + * BA0h-BBFh Mode Dependent Scratch Mode 5 Page 2 + * BC0h-BDFh Mode Dependent Scratch Mode 6 Page 2 + * BE0h-BFFh Mode Dependent Scratch Mode 7 Page 2 + */ + +/* General macros */ +#define CSEQ_PAGE_SIZE 32 /* Scratch page size (in bytes) */ + +/* All macros start with offsets from base + 0x800 (CMAPPEDSCR). + * Mode dependent scratch page 0, mode 0. + * For modes 1-7 you have to do arithmetic. */ +#define CSEQ_LRM_SAVE_SINDEX (CMAPPEDSCR + 0x0000) +#define CSEQ_LRM_SAVE_SCBPTR (CMAPPEDSCR + 0x0002) +#define CSEQ_Q_LINK_HEAD (CMAPPEDSCR + 0x0004) +#define CSEQ_Q_LINK_TAIL (CMAPPEDSCR + 0x0006) +#define CSEQ_LRM_SAVE_SCRPAGE (CMAPPEDSCR + 0x0008) + +/* Mode dependent scratch page 0 mode 8 macros. */ +#define CSEQ_RET_ADDR (CMAPPEDSCR + 0x0200) +#define CSEQ_RET_SCBPTR (CMAPPEDSCR + 0x0202) +#define CSEQ_SAVE_SCBPTR (CMAPPEDSCR + 0x0204) +#define CSEQ_EMPTY_TRANS_CTX (CMAPPEDSCR + 0x0206) +#define CSEQ_RESP_LEN (CMAPPEDSCR + 0x0208) +#define CSEQ_TMF_SCBPTR (CMAPPEDSCR + 0x020A) +#define CSEQ_GLOBAL_PREV_SCB (CMAPPEDSCR + 0x020C) +#define CSEQ_GLOBAL_HEAD (CMAPPEDSCR + 0x020E) +#define CSEQ_CLEAR_LU_HEAD (CMAPPEDSCR + 0x0210) +#define CSEQ_TMF_OPCODE (CMAPPEDSCR + 0x0212) +#define CSEQ_SCRATCH_FLAGS (CMAPPEDSCR + 0x0213) +#define CSEQ_HSB_SITE (CMAPPEDSCR + 0x021A) +#define CSEQ_FIRST_INV_SCB_SITE (CMAPPEDSCR + 0x021C) +#define CSEQ_FIRST_INV_DDB_SITE (CMAPPEDSCR + 0x021E) + +/* Mode dependent scratch page 1 mode 8 macros. */ +#define CSEQ_LUN_TO_CLEAR (CMAPPEDSCR + 0x0220) +#define CSEQ_LUN_TO_CHECK (CMAPPEDSCR + 0x0228) + +/* Mode dependent scratch page 2 mode 8 macros */ +#define CSEQ_HQ_NEW_POINTER (CMAPPEDSCR + 0x0240) +#define CSEQ_HQ_DONE_BASE (CMAPPEDSCR + 0x0248) +#define CSEQ_HQ_DONE_POINTER (CMAPPEDSCR + 0x0250) +#define CSEQ_HQ_DONE_PASS (CMAPPEDSCR + 0x0254) + +/* Mode independent scratch page 4 macros. */ +#define CSEQ_Q_EXE_HEAD (CMAPPEDSCR + 0x0280) +#define CSEQ_Q_EXE_TAIL (CMAPPEDSCR + 0x0282) +#define CSEQ_Q_DONE_HEAD (CMAPPEDSCR + 0x0284) +#define CSEQ_Q_DONE_TAIL (CMAPPEDSCR + 0x0286) +#define CSEQ_Q_SEND_HEAD (CMAPPEDSCR + 0x0288) +#define CSEQ_Q_SEND_TAIL (CMAPPEDSCR + 0x028A) +#define CSEQ_Q_DMA2CHIM_HEAD (CMAPPEDSCR + 0x028C) +#define CSEQ_Q_DMA2CHIM_TAIL (CMAPPEDSCR + 0x028E) +#define CSEQ_Q_COPY_HEAD (CMAPPEDSCR + 0x0290) +#define CSEQ_Q_COPY_TAIL (CMAPPEDSCR + 0x0292) +#define CSEQ_REG0 (CMAPPEDSCR + 0x0294) +#define CSEQ_REG1 (CMAPPEDSCR + 0x0296) +#define CSEQ_REG2 (CMAPPEDSCR + 0x0298) +#define CSEQ_LINK_CTL_Q_MAP (CMAPPEDSCR + 0x029C) +#define CSEQ_MAX_CSEQ_MODE (CMAPPEDSCR + 0x029D) +#define CSEQ_FREE_LIST_HACK_COUNT (CMAPPEDSCR + 0x029E) + +/* Mode independent scratch page 5 macros. */ +#define CSEQ_EST_NEXUS_REQ_QUEUE (CMAPPEDSCR + 0x02A0) +#define CSEQ_EST_NEXUS_REQ_COUNT (CMAPPEDSCR + 0x02A8) +#define CSEQ_Q_EST_NEXUS_HEAD (CMAPPEDSCR + 0x02B0) +#define CSEQ_Q_EST_NEXUS_TAIL (CMAPPEDSCR + 0x02B2) +#define CSEQ_NEED_EST_NEXUS_SCB (CMAPPEDSCR + 0x02B4) +#define CSEQ_EST_NEXUS_REQ_HEAD (CMAPPEDSCR + 0x02B6) +#define CSEQ_EST_NEXUS_REQ_TAIL (CMAPPEDSCR + 0x02B7) +#define CSEQ_EST_NEXUS_SCB_OFFSET (CMAPPEDSCR + 0x02B8) + +/* Mode independent scratch page 6 macros. */ +#define CSEQ_INT_ROUT_RET_ADDR0 (CMAPPEDSCR + 0x02C0) +#define CSEQ_INT_ROUT_RET_ADDR1 (CMAPPEDSCR + 0x02C2) +#define CSEQ_INT_ROUT_SCBPTR (CMAPPEDSCR + 0x02C4) +#define CSEQ_INT_ROUT_MODE (CMAPPEDSCR + 0x02C6) +#define CSEQ_ISR_SCRATCH_FLAGS (CMAPPEDSCR + 0x02C7) +#define CSEQ_ISR_SAVE_SINDEX (CMAPPEDSCR + 0x02C8) +#define CSEQ_ISR_SAVE_DINDEX (CMAPPEDSCR + 0x02CA) +#define CSEQ_Q_MONIRTT_HEAD (CMAPPEDSCR + 0x02D0) +#define CSEQ_Q_MONIRTT_TAIL (CMAPPEDSCR + 0x02D2) +#define CSEQ_FREE_SCB_MASK (CMAPPEDSCR + 0x02D5) +#define CSEQ_BUILTIN_FREE_SCB_HEAD (CMAPPEDSCR + 0x02D6) +#define CSEQ_BUILTIN_FREE_SCB_TAIL (CMAPPEDSCR + 0x02D8) +#define CSEQ_EXTENDED_FREE_SCB_HEAD (CMAPPEDSCR + 0x02DA) +#define CSEQ_EXTENDED_FREE_SCB_TAIL (CMAPPEDSCR + 0x02DC) + +/* Mode independent scratch page 7 macros. */ +#define CSEQ_EMPTY_REQ_QUEUE (CMAPPEDSCR + 0x02E0) +#define CSEQ_EMPTY_REQ_COUNT (CMAPPEDSCR + 0x02E8) +#define CSEQ_Q_EMPTY_HEAD (CMAPPEDSCR + 0x02F0) +#define CSEQ_Q_EMPTY_TAIL (CMAPPEDSCR + 0x02F2) +#define CSEQ_NEED_EMPTY_SCB (CMAPPEDSCR + 0x02F4) +#define CSEQ_EMPTY_REQ_HEAD (CMAPPEDSCR + 0x02F6) +#define CSEQ_EMPTY_REQ_TAIL (CMAPPEDSCR + 0x02F7) +#define CSEQ_EMPTY_SCB_OFFSET (CMAPPEDSCR + 0x02F8) +#define CSEQ_PRIMITIVE_DATA (CMAPPEDSCR + 0x02FA) +#define CSEQ_TIMEOUT_CONST (CMAPPEDSCR + 0x02FC) + +/*************************************************************************** +* Link m Sequencer scratch RAM is 512 bytes. +* This scratch memory is divided into mode dependent and mode +* independent scratch with this memory further subdivided into +* pages of size 32 bytes. There are 4 pages (128 bytes) of +* mode independent scratch and 4 pages of dependent scratch +* memory for modes 0-2 (384 bytes). +* +* The host accesses this scratch in a different manner from the +* link sequencer. The sequencer has to use LSEQ registers +* LmSCRPAGE and LmMnSCRPAGE to access the scratch memory. A flat +* mapping of the scratch memory is available for software +* convenience and to prevent corruption while the sequencer is +* running. This memory is mapped onto addresses 800h - 9FFh. +* +* These addresses are mapped as follows: +* +* 800h-85Fh Mode Dependent Scratch Mode 0 Pages 0-2 +* 860h-87Fh Mode Dependent Scratch Mode 0 Page 3 +* Mode Dependent Scratch Mode 5 Page 0 +* 880h-8DFh Mode Dependent Scratch Mode 1 Pages 0-2 +* 8E0h-8FFh Mode Dependent Scratch Mode 1 Page 3 +* Mode Dependent Scratch Mode 5 Page 1 +* 900h-95Fh Mode Dependent Scratch Mode 2 Pages 0-2 +* 960h-97Fh Mode Dependent Scratch Mode 2 Page 3 +* Mode Dependent Scratch Mode 5 Page 2 +* 980h-9DFh Mode Independent Scratch Pages 0-3 +* 9E0h-9FFh Mode Independent Scratch Page 3 +* Mode Dependent Scratch Mode 5 Page 3 +* +****************************************************************************/ +/* General macros */ +#define LSEQ_MODE_SCRATCH_SIZE 0x80 /* Size of scratch RAM per mode */ +#define LSEQ_PAGE_SIZE 0x20 /* Scratch page size (in bytes) */ +#define LSEQ_MODE5_PAGE0_OFFSET 0x60 + +/* Common mode dependent scratch page 0 macros for modes 0,1,2, and 5 */ +/* Indexed using LSEQ_MODE_SCRATCH_SIZE * mode, for modes 0,1,2. */ +#define LmSEQ_RET_ADDR(LinkNum) (LmSCRATCH(LinkNum) + 0x0000) +#define LmSEQ_REG0_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0002) +#define LmSEQ_MODE_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0004) + +/* Mode flag macros (byte 0) */ +#define SAS_SAVECTX_OCCURRED 0x80 +#define SAS_OOBSVC_OCCURRED 0x40 +#define SAS_OOB_DEVICE_PRESENT 0x20 +#define SAS_CFGHDR_OCCURRED 0x10 +#define SAS_RCV_INTS_ARE_DISABLED 0x08 +#define SAS_OOB_HOT_PLUG_CNCT 0x04 +#define SAS_AWAIT_OPEN_CONNECTION 0x02 +#define SAS_CFGCMPLT_OCCURRED 0x01 + +/* Mode flag macros (byte 1) */ +#define SAS_RLSSCB_OCCURRED 0x80 +#define SAS_FORCED_HEADER_MISS 0x40 + +#define LmSEQ_RET_ADDR2(LinkNum) (LmSCRATCH(LinkNum) + 0x0006) +#define LmSEQ_RET_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0008) +#define LmSEQ_OPCODE_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000B) +#define LmSEQ_DATA_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000C) + +/* Mode dependent scratch page 0 macros for mode 0 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_FIRST_INV_DDB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x000E) +#define LmSEQ_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0010) +#define LmSEQ_RESP_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x0012) +#define LmSEQ_FIRST_INV_SCB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x0014) +#define LmSEQ_INTEN_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0016) +#define LmSEQ_LINK_RST_FRM_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x001A) +#define LmSEQ_LINK_RST_PROTOCOL(LinkNum) (LmSCRATCH(LinkNum) + 0x001B) +#define LmSEQ_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x001C) +#define LmSEQ_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x001D) +#define LmSEQ_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x001E) + +/* Mode dependent scratch page 0 macros for mode 1 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_Q_XMIT_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x008E) +#define LmSEQ_M1_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0090) +#define LmSEQ_INI_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0092) +#define LmSEQ_FAILED_OPEN_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009A) +#define LmSEQ_XMIT_REQUEST_TYPE(LinkNum) (LmSCRATCH(LinkNum) + 0x009B) +#define LmSEQ_M1_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009C) +#define LmSEQ_M1_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x009D) +#define LmSEQ_M1_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x009E) + +/* Mode dependent scratch page 0 macros for mode 2 (non-common) */ +#define LmSEQ_PORT_COUNTER(LinkNum) (LmSCRATCH(LinkNum) + 0x010E) +#define LmSEQ_PM_TABLE_PTR(LinkNum) (LmSCRATCH(LinkNum) + 0x0110) +#define LmSEQ_SATA_INTERLOCK_TMR_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0112) +#define LmSEQ_IP_BITL(LinkNum) (LmSCRATCH(LinkNum) + 0x0114) +#define LmSEQ_COPY_SMP_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0116) +#define LmSEQ_P0M2_OFFS1AH(LinkNum) (LmSCRATCH(LinkNum) + 0x011A) + +/* Mode dependent scratch page 0 macros for modes 4/5 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_SAVED_OOB_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x006E) +#define LmSEQ_SAVED_OOB_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x006F) +#define LmSEQ_Q_LINK_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0070) +#define LmSEQ_LINK_RST_ERR(LinkNum) (LmSCRATCH(LinkNum) + 0x0072) +#define LmSEQ_SAVED_OOB_SIGNALS(LinkNum) (LmSCRATCH(LinkNum) + 0x0073) +#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074) +#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075) +#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076) +#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x0078) +#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007A) +#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C) +#define LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E) + +/* Mode dependent scratch page 1, mode 0 and mode 1 */ +#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020) +#define LmSEQ_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0030) +#define LmSEQ_M1_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x00A0) +#define LmSEQ_M1_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x00B0) + +/* Mode dependent scratch page 1 macros for mode 2 */ +/* Absolute offsets */ +#define LmSEQ_INVALID_DWORD_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0120) +#define LmSEQ_DISPARITY_ERROR_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0124) +#define LmSEQ_LOSS_OF_SYNC_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0128) + +/* Mode dependent scratch page 1 macros for mode 4/5 */ +#define LmSEQ_FRAME_TYPE_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E0) +#define LmSEQ_HASHED_DEST_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E1) +#define LmSEQ_HASHED_SRC_ADDR_MASK_PRINT(LinkNum) (LmSCRATCH(LinkNum) + 0x00E4) +#define LmSEQ_HASHED_SRC_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E5) +#define LmSEQ_NUM_FILL_BYTES_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00EB) +#define LmSEQ_TAG_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00F0) +#define LmSEQ_TARGET_PORT_XFER_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x00F2) +#define LmSEQ_DATA_OFFSET(LinkNum) (LmSCRATCH(LinkNum) + 0x00F4) + +/* Mode dependent scratch page 2 macros for mode 0 */ +/* Absolute offsets */ +#define LmSEQ_SMP_RCV_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0040) +#define LmSEQ_DEVICE_BITS(LinkNum) (LmSCRATCH(LinkNum) + 0x005B) +#define LmSEQ_SDB_DDB(LinkNum) (LmSCRATCH(LinkNum) + 0x005C) +#define LmSEQ_SDB_NUM_TAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x005E) +#define LmSEQ_SDB_CURR_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x005F) + +/* Mode dependent scratch page 2 macros for mode 1 */ +/* Absolute offsets */ +/* byte 0 bits 1-0 are domain select. */ +#define LmSEQ_TX_ID_ADDR_FRAME(LinkNum) (LmSCRATCH(LinkNum) + 0x00C0) +#define LmSEQ_OPEN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00C8) +#define LmSEQ_SRST_AS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00CC) +#define LmSEQ_LAST_LOADED_SG_EL(LinkNum) (LmSCRATCH(LinkNum) + 0x00D4) + +/* Mode dependent scratch page 2 macros for mode 2 */ +/* Absolute offsets */ +#define LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0140) +#define LmSEQ_CLOSE_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0144) +#define LmSEQ_BREAK_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0148) +#define LmSEQ_DWS_RESET_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x014C) +#define LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(LinkNum) \ + (LmSCRATCH(LinkNum) + 0x0150) +#define LmSEQ_MCTL_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0154) + +/* Mode dependent scratch page 2 macros for mode 5 */ +#define LmSEQ_COMINIT_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0160) +#define LmSEQ_RCV_ID_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0164) +#define LmSEQ_RCV_FIS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0168) +#define LmSEQ_DEV_PRES_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x016C) + +/* Mode dependent scratch page 3 macros for modes 0 and 1 */ +/* None defined */ + +/* Mode dependent scratch page 3 macros for modes 2 and 5 */ +/* None defined */ + +/* Mode Independent Scratch page 0 macros. */ +#define LmSEQ_Q_TGTXFR_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0180) +#define LmSEQ_Q_TGTXFR_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x0182) +#define LmSEQ_LINK_NUMBER(LinkNum) (LmSCRATCH(LinkNum) + 0x0186) +#define LmSEQ_SCRATCH_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0187) +/* + * Currently only bit 0, SAS_DWSAQD, is used. + */ +#define SAS_DWSAQD 0x01 /* + * DWSSTATUS: DWSAQD + * bit las read in ISR. + */ +#define LmSEQ_CONNECTION_STATE(LinkNum) (LmSCRATCH(LinkNum) + 0x0188) +/* Connection states (byte 0) */ +#define SAS_WE_OPENED_CS 0x01 +#define SAS_DEVICE_OPENED_CS 0x02 +#define SAS_WE_SENT_DONE_CS 0x04 +#define SAS_DEVICE_SENT_DONE_CS 0x08 +#define SAS_WE_SENT_CLOSE_CS 0x10 +#define SAS_DEVICE_SENT_CLOSE_CS 0x20 +#define SAS_WE_SENT_BREAK_CS 0x40 +#define SAS_DEVICE_SENT_BREAK_CS 0x80 +/* Connection states (byte 1) */ +#define SAS_OPN_TIMEOUT_OR_OPN_RJCT_CS 0x01 +#define SAS_AIP_RECEIVED_CS 0x02 +#define SAS_CREDIT_TIMEOUT_OCCURRED_CS 0x04 +#define SAS_ACKNAK_TIMEOUT_OCCURRED_CS 0x08 +#define SAS_SMPRSP_TIMEOUT_OCCURRED_CS 0x10 +#define SAS_DONE_TIMEOUT_OCCURRED_CS 0x20 +/* Connection states (byte 2) */ +#define SAS_SMP_RESPONSE_RECEIVED_CS 0x01 +#define SAS_INTLK_TIMEOUT_OCCURRED_CS 0x02 +#define SAS_DEVICE_SENT_DMAT_CS 0x04 +#define SAS_DEVICE_SENT_SYNCSRST_CS 0x08 +#define SAS_CLEARING_AFFILIATION_CS 0x20 +#define SAS_RXTASK_ACTIVE_CS 0x40 +#define SAS_TXTASK_ACTIVE_CS 0x80 +/* Connection states (byte 3) */ +#define SAS_PHY_LOSS_OF_SIGNAL_CS 0x01 +#define SAS_DWS_TIMER_EXPIRED_CS 0x02 +#define SAS_LINK_RESET_NOT_COMPLETE_CS 0x04 +#define SAS_PHY_DISABLED_CS 0x08 +#define SAS_LINK_CTL_TASK_ACTIVE_CS 0x10 +#define SAS_PHY_EVENT_TASK_ACTIVE_CS 0x20 +#define SAS_DEVICE_SENT_ID_FRAME_CS 0x40 +#define SAS_DEVICE_SENT_REG_FIS_CS 0x40 +#define SAS_DEVICE_SENT_HARD_RESET_CS 0x80 +#define SAS_PHY_IS_DOWN_FLAGS (SAS_PHY_LOSS_OF_SIGNAL_CS|\ + SAS_DWS_TIMER_EXPIRED_CS |\ + SAS_LINK_RESET_NOT_COMPLETE_CS|\ + SAS_PHY_DISABLED_CS) + +#define SAS_LINK_CTL_PHY_EVENT_FLAGS (SAS_LINK_CTL_TASK_ACTIVE_CS |\ + SAS_PHY_EVENT_TASK_ACTIVE_CS |\ + SAS_DEVICE_SENT_ID_FRAME_CS |\ + SAS_DEVICE_SENT_HARD_RESET_CS) + +#define LmSEQ_CONCTL(LinkNum) (LmSCRATCH(LinkNum) + 0x018C) +#define LmSEQ_CONSTAT(LinkNum) (LmSCRATCH(LinkNum) + 0x018E) +#define LmSEQ_CONNECTION_MODES(LinkNum) (LmSCRATCH(LinkNum) + 0x018F) +#define LmSEQ_REG1_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0192) +#define LmSEQ_REG2_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0194) +#define LmSEQ_REG3_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0196) +#define LmSEQ_REG0_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0198) + +/* Mode independent scratch page 1 macros. */ +#define LmSEQ_EST_NEXUS_SCBPTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A0) +#define LmSEQ_EST_NEXUS_SCBPTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A2) +#define LmSEQ_EST_NEXUS_SCBPTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01A4) +#define LmSEQ_EST_NEXUS_SCBPTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01A6) +#define LmSEQ_EST_NEXUS_SCB_OPCODE0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A8) +#define LmSEQ_EST_NEXUS_SCB_OPCODE1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A9) +#define LmSEQ_EST_NEXUS_SCB_OPCODE2(LinkNum) (LmSCRATCH(LinkNum) + 0x01AA) +#define LmSEQ_EST_NEXUS_SCB_OPCODE3(LinkNum) (LmSCRATCH(LinkNum) + 0x01AB) +#define LmSEQ_EST_NEXUS_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01AC) +#define LmSEQ_EST_NEXUS_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AD) +#define LmSEQ_EST_NEXUS_BUF_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AE) +#define LmSEQ_TIMEOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01B8) +#define LmSEQ_ISR_SAVE_SINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BC) +#define LmSEQ_ISR_SAVE_DINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BE) + +/* Mode independent scratch page 2 macros. */ +#define LmSEQ_EMPTY_SCB_PTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C0) +#define LmSEQ_EMPTY_SCB_PTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C2) +#define LmSEQ_EMPTY_SCB_PTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01C4) +#define LmSEQ_EMPTY_SCB_PTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01C6) +#define LmSEQ_EMPTY_SCB_OPCD0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C8) +#define LmSEQ_EMPTY_SCB_OPCD1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C9) +#define LmSEQ_EMPTY_SCB_OPCD2(LinkNum) (LmSCRATCH(LinkNum) + 0x01CA) +#define LmSEQ_EMPTY_SCB_OPCD3(LinkNum) (LmSCRATCH(LinkNum) + 0x01CB) +#define LmSEQ_EMPTY_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01CC) +#define LmSEQ_EMPTY_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CD) +#define LmSEQ_EMPTY_BUFS_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CE) +#define LmSEQ_ATA_SCR_REGS(LinkNum) (LmSCRATCH(LinkNum) + 0x01D4) + +/* Mode independent scratch page 3 macros. */ +#define LmSEQ_DEV_PRES_TMR_TOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01E0) +#define LmSEQ_SATA_INTERLOCK_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E4) +#define LmSEQ_STP_SHUTDOWN_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E8) +#define LmSEQ_SRST_ASSERT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01EC) +#define LmSEQ_RCV_FIS_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F0) +#define LmSEQ_ONE_MILLISEC_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F4) +#define LmSEQ_TEN_MS_COMINIT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F8) +#define LmSEQ_SMP_RCV_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01FC) + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h new file mode 100644 index 000000000..3fe34cb96 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_sas.h @@ -0,0 +1,732 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#ifndef _AIC94XX_SAS_H_ +#define _AIC94XX_SAS_H_ + +#include + +/* ---------- DDBs ---------- */ +/* DDBs are device descriptor blocks which describe a device in the + * domain that this sequencer can maintain low-level connections for + * us. They are be 64 bytes. + */ +#define ASD_MAX_DDBS 128 + +struct asd_ddb_ssp_smp_target_port { + u8 conn_type; /* byte 0 */ +#define DDB_TP_CONN_TYPE 0x81 /* Initiator port and addr frame type 0x01 */ + + u8 conn_rate; + __be16 init_conn_tag; + u8 dest_sas_addr[8]; /* bytes 4-11 */ + + __le16 send_queue_head; + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_TARGET */ +#define DDB_TYPE_UNUSED 0xFF +#define DDB_TYPE_TARGET 0xFE +#define DDB_TYPE_INITIATOR 0xFD +#define DDB_TYPE_PM_PORT 0xFC + + __le16 _r_a; + __be16 awt_def; + + u8 compat_features; /* byte 20 */ + u8 pathway_blocked_count; + __be16 arb_wait_time; + __be32 more_compat_features; /* byte 24 */ + + u8 conn_mask; + u8 flags; /* concurrent conn:2,2 and open:0(1) */ +#define CONCURRENT_CONN_SUPP 0x04 +#define OPEN_REQUIRED 0x01 + + u16 _r_b; + __le16 exec_queue_tail; + __le16 send_queue_tail; + __le16 sister_ddb; + + __le16 _r_c; + + u8 max_concurrent_conn; + u8 num_concurrent_conn; + u8 num_contexts; + + u8 _r_d; + + __le16 active_task_count; + + u8 _r_e[9]; + + u8 itnl_reason; /* I_T nexus loss reason */ + + __le16 _r_f; + + __le16 itnl_timeout; +#define ITNL_TIMEOUT_CONST 0x7D0 /* 2 seconds */ + + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +struct asd_ddb_stp_sata_target_port { + u8 conn_type; /* byte 0 */ + u8 conn_rate; + __be16 init_conn_tag; + u8 dest_sas_addr[8]; /* bytes 4-11 */ + + __le16 send_queue_head; + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_TARGET */ + + __le16 _r_a; + + __be16 awt_def; + u8 compat_features; /* byte 20 */ + u8 pathway_blocked_count; + __be16 arb_wait_time; + __be32 more_compat_features; /* byte 24 */ + + u8 conn_mask; + u8 flags; /* concurrent conn:2,2 and open:0(1) */ +#define SATA_MULTIPORT 0x80 +#define SUPPORTS_AFFIL 0x40 +#define STP_AFFIL_POL 0x20 + + u8 _r_b; + u8 flags2; /* STP close policy:0 */ +#define STP_CL_POL_NO_TX 0x00 +#define STP_CL_POL_BTW_CMDS 0x01 + + __le16 exec_queue_tail; + __le16 send_queue_tail; + __le16 sister_ddb; + __le16 ata_cmd_scbptr; + __le32 sata_tag_alloc_mask; + __le16 active_task_count; + __le16 _r_c; + __le32 sata_sactive; + u8 num_sata_tags; + u8 sata_status; + u8 sata_ending_status; + u8 itnl_reason; /* I_T nexus loss reason */ + __le16 ncq_data_scb_ptr; + __le16 itnl_timeout; + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +/* This struct asd_ddb_init_port, describes the device descriptor block + * of an initiator port (when the sequencer is operating in target mode). + * Bytes [0,11] and [20,27] are from the OPEN address frame. + * The sequencer allocates an initiator port DDB entry. + */ +struct asd_ddb_init_port { + u8 conn_type; /* byte 0 */ + u8 conn_rate; + __be16 init_conn_tag; /* BE */ + u8 dest_sas_addr[8]; + __le16 send_queue_head; /* LE, byte 12 */ + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_INITIATOR */ + __le16 _r_a; + __be16 awt_def; /* BE */ + u8 compat_features; + u8 pathway_blocked_count; + __be16 arb_wait_time; /* BE */ + __be32 more_compat_features; /* BE */ + u8 conn_mask; + u8 flags; /* == 5 */ + u16 _r_b; + __le16 exec_queue_tail; /* execution queue tail */ + __le16 send_queue_tail; + __le16 sister_ddb; + __le16 init_resp_timeout; /* initiator response timeout */ + __le32 _r_c; + __le16 active_tasks; /* active task count */ + __le16 init_list; /* initiator list link pointer */ + __le32 _r_d; + u8 max_conn_to[3]; /* from Conn-Disc mode page, in us, LE */ + u8 itnl_reason; /* I_T nexus loss reason */ + __le16 bus_inact_to; /* from Conn-Disc mode page, in 100 us, LE */ + __le16 itnl_to; /* from the Protocol Specific Port Ctrl MP */ + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_tag, describes a look-up table to be used + * by the sequencers. SATA II, IDENTIFY DEVICE data, word 76, bit 8: + * NCQ support. This table is used by the sequencers to find the + * corresponding SCB, given a SATA II tag value. + */ +struct asd_ddb_sata_tag { + __le16 scb_pointer[32]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_pm_table, describes a port number to + * connection handle look-up table. SATA targets attached to a port + * multiplier require a 4-bit port number value. There is one DDB + * entry of this type for each SATA port multiplier (sister DDB). + * Given a SATA PM port number, this table gives us the SATA PM Port + * DDB of the SATA port multiplier port (i.e. the SATA target + * discovered on the port). + */ +struct asd_ddb_sata_pm_table { + __le16 ddb_pointer[16]; + __le16 _r_a[16]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_pm_port, describes the SATA port multiplier + * port format DDB. + */ +struct asd_ddb_sata_pm_port { + u8 _r_a[15]; + u8 ddb_type; + u8 _r_b[13]; + u8 pm_port_flags; +#define PM_PORT_MASK 0xF0 +#define PM_PORT_SET 0x02 + u8 _r_c[6]; + __le16 sister_ddb; + __le16 ata_cmd_scbptr; + __le32 sata_tag_alloc_mask; + __le16 active_task_count; + __le16 parent_ddb; + __le32 sata_sactive; + u8 num_sata_tags; + u8 sata_status; + u8 sata_ending_status; + u8 _r_d[9]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_seq_shared, describes a DDB shared by the + * central and link sequencers. port_map_by_links is indexed phy + * number [0,7]; each byte is a bit mask of all the phys that are in + * the same port as the indexed phy. + */ +struct asd_ddb_seq_shared { + __le16 q_free_ddb_head; + __le16 q_free_ddb_tail; + __le16 q_free_ddb_cnt; + __le16 q_used_ddb_head; + __le16 q_used_ddb_tail; + __le16 shared_mem_lock; + __le16 smp_conn_tag; + __le16 est_nexus_buf_cnt; + __le16 est_nexus_buf_thresh; + u32 _r_a; + u8 settable_max_contexts; + u8 _r_b[23]; + u8 conn_not_active; + u8 phy_is_up; + u8 _r_c[8]; + u8 port_map_by_links[8]; +} __attribute__ ((packed)); + +/* ---------- SG Element ---------- */ + +/* This struct sg_el, describes the hardware scatter gather buffer + * element. All entries are little endian. In an SCB, there are 2 of + * this, plus one more, called a link element of this indicating a + * sublist if needed. + * + * A link element has only the bus address set and the flags (DS) bit + * valid. The bus address points to the start of the sublist. + * + * If a sublist is needed, then that sublist should also include the 2 + * sg_el embedded in the SCB, in which case next_sg_offset is 32, + * since sizeof(sg_el) = 16; EOS should be 1 and EOL 0 in this case. + */ +struct sg_el { + __le64 bus_addr; + __le32 size; + __le16 _r; + u8 next_sg_offs; + u8 flags; +#define ASD_SG_EL_DS_MASK 0x30 +#define ASD_SG_EL_DS_OCM 0x10 +#define ASD_SG_EL_DS_HM 0x00 +#define ASD_SG_EL_LIST_MASK 0xC0 +#define ASD_SG_EL_LIST_EOL 0x40 +#define ASD_SG_EL_LIST_EOS 0x80 +} __attribute__ ((packed)); + +/* ---------- SCBs ---------- */ + +/* An SCB (sequencer control block) is comprised of a common header + * and a task part, for a total of 128 bytes. All fields are in LE + * order, unless otherwise noted. + */ + +/* This struct scb_header, defines the SCB header format. + */ +struct scb_header { + __le64 next_scb; + __le16 index; /* transaction context */ + u8 opcode; +} __attribute__ ((packed)); + +/* SCB opcodes: Execution queue + */ +#define INITIATE_SSP_TASK 0x00 +#define INITIATE_LONG_SSP_TASK 0x01 +#define INITIATE_BIDIR_SSP_TASK 0x02 +#define SCB_ABORT_TASK 0x03 +#define INITIATE_SSP_TMF 0x04 +#define SSP_TARG_GET_DATA 0x05 +#define SSP_TARG_GET_DATA_GOOD 0x06 +#define SSP_TARG_SEND_RESP 0x07 +#define QUERY_SSP_TASK 0x08 +#define INITIATE_ATA_TASK 0x09 +#define INITIATE_ATAPI_TASK 0x0a +#define CONTROL_ATA_DEV 0x0b +#define INITIATE_SMP_TASK 0x0c +#define SMP_TARG_SEND_RESP 0x0f + +/* SCB opcodes: Send Queue + */ +#define SSP_TARG_SEND_DATA 0x40 +#define SSP_TARG_SEND_DATA_GOOD 0x41 + +/* SCB opcodes: Link Queue + */ +#define CONTROL_PHY 0x80 +#define SEND_PRIMITIVE 0x81 +#define INITIATE_LINK_ADM_TASK 0x82 + +/* SCB opcodes: other + */ +#define EMPTY_SCB 0xc0 +#define INITIATE_SEQ_ADM_TASK 0xc1 +#define EST_ICL_TARG_WINDOW 0xc2 +#define COPY_MEM 0xc3 +#define CLEAR_NEXUS 0xc4 +#define INITIATE_DDB_ADM_TASK 0xc6 +#define ESTABLISH_NEXUS_ESCB 0xd0 + +#define LUN_SIZE 8 + +#define EFB_MASK 0x80 +#define TASK_PRIO_MASK 0x78 +#define TASK_ATTR_MASK 0x07 +/* ---------- SCB tasks ---------- */ + +/* This is both ssp_task and long_ssp_task + */ +struct initiate_ssp_task { + u8 proto_conn_rate; /* proto:6,4, conn_rate:3,0 */ + __le32 total_xfer_len; + struct ssp_frame_hdr ssp_frame; + struct ssp_command_iu ssp_cmd; + __le16 sister_scb; /* 0xFFFF */ + __le16 conn_handle; /* index to DDB for the intended target */ + u8 data_dir; /* :1,0 */ +#define DATA_DIR_NONE 0x00 +#define DATA_DIR_IN 0x01 +#define DATA_DIR_OUT 0x02 +#define DATA_DIR_BYRECIPIENT 0x03 + + u8 _r_a; + u8 retry_count; + u8 _r_b[5]; + struct sg_el sg_element[3]; /* 2 real and 1 link */ +} __attribute__ ((packed)); + +/* This defines both ata_task and atapi_task. + * ata: C bit of FIS should be 1, + * atapi: C bit of FIS should be 1, and command register should be 0xA0, + * to indicate a packet command. + */ +struct initiate_ata_task { + u8 proto_conn_rate; + __le32 total_xfer_len; + struct host_to_dev_fis fis; + __le32 data_offs; + u8 atapi_packet[16]; + u8 _r_a[12]; + __le16 sister_scb; + __le16 conn_handle; + u8 ata_flags; /* CSMI:6,6, DTM:4,4, QT:3,3, data dir:1,0 */ +#define CSMI_TASK 0x40 +#define DATA_XFER_MODE_DMA 0x10 +#define ATA_Q_TYPE_MASK 0x08 +#define ATA_Q_TYPE_UNTAGGED 0x00 +#define ATA_Q_TYPE_NCQ 0x08 + + u8 _r_b; + u8 retry_count; + u8 _r_c; + u8 flags; +#define STP_AFFIL_POLICY 0x20 +#define SET_AFFIL_POLICY 0x10 +#define RET_PARTIAL_SGLIST 0x02 + + u8 _r_d[3]; + struct sg_el sg_element[3]; +} __attribute__ ((packed)); + +struct initiate_smp_task { + u8 proto_conn_rate; + u8 _r_a[40]; + struct sg_el smp_req; + __le16 sister_scb; + __le16 conn_handle; + u8 _r_c[8]; + struct sg_el smp_resp; + u8 _r_d[32]; +} __attribute__ ((packed)); + +struct control_phy { + u8 phy_id; + u8 sub_func; +#define DISABLE_PHY 0x00 +#define ENABLE_PHY 0x01 +#define RELEASE_SPINUP_HOLD 0x02 +#define ENABLE_PHY_NO_SAS_OOB 0x03 +#define ENABLE_PHY_NO_SATA_OOB 0x04 +#define PHY_NO_OP 0x05 +#define EXECUTE_HARD_RESET 0x81 + + u8 func_mask; + u8 speed_mask; + u8 hot_plug_delay; + u8 port_type; + u8 flags; +#define DEV_PRES_TIMER_OVERRIDE_ENABLE 0x01 +#define DISABLE_PHY_IF_OOB_FAILS 0x02 + + __le32 timeout_override; + u8 link_reset_retries; + u8 _r_a[47]; + __le16 conn_handle; + u8 _r_b[56]; +} __attribute__ ((packed)); + +struct control_ata_dev { + u8 proto_conn_rate; + __le32 _r_a; + struct host_to_dev_fis fis; + u8 _r_b[32]; + __le16 sister_scb; + __le16 conn_handle; + u8 ata_flags; /* 0 */ + u8 _r_c[55]; +} __attribute__ ((packed)); + +struct empty_scb { + u8 num_valid; + __le32 _r_a; +#define ASD_EDBS_PER_SCB 7 +/* header+data+CRC+DMA suffix data */ +#define ASD_EDB_SIZE (24+1024+4+16) + struct sg_el eb[ASD_EDBS_PER_SCB]; +#define ELEMENT_NOT_VALID 0xC0 +} __attribute__ ((packed)); + +struct initiate_link_adm { + u8 phy_id; + u8 sub_func; +#define GET_LINK_ERROR_COUNT 0x00 +#define RESET_LINK_ERROR_COUNT 0x01 +#define ENABLE_NOTIFY_SPINUP_INTS 0x02 + + u8 _r_a[57]; + __le16 conn_handle; + u8 _r_b[56]; +} __attribute__ ((packed)); + +struct copy_memory { + u8 _r_a; + __le16 xfer_len; + __le16 _r_b; + __le64 src_busaddr; + u8 src_ds; /* See definition of sg_el */ + u8 _r_c[45]; + __le16 conn_handle; + __le64 _r_d; + __le64 dest_busaddr; + u8 dest_ds; /* See definition of sg_el */ + u8 _r_e[39]; +} __attribute__ ((packed)); + +struct abort_task { + u8 proto_conn_rate; + __le32 _r_a; + struct ssp_frame_hdr ssp_frame; + struct ssp_tmf_iu ssp_task; + __le16 sister_scb; + __le16 conn_handle; + u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */ +#define SUSPEND_DATA_TRANS 0x04 + + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + __le16 index; /* Transaction context of task to be queried */ + __le16 itnl_to; + u8 _r_d[44]; +} __attribute__ ((packed)); + +struct clear_nexus { + u8 nexus; +#define NEXUS_ADAPTER 0x00 +#define NEXUS_PORT 0x01 +#define NEXUS_I_T 0x02 +#define NEXUS_I_T_L 0x03 +#define NEXUS_TAG 0x04 +#define NEXUS_TRANS_CX 0x05 +#define NEXUS_SATA_TAG 0x06 +#define NEXUS_T_L 0x07 +#define NEXUS_L 0x08 +#define NEXUS_T_TAG 0x09 + + __le32 _r_a; + u8 flags; +#define SUSPEND_TX 0x80 +#define RESUME_TX 0x40 +#define SEND_Q 0x04 +#define EXEC_Q 0x02 +#define NOTINQ 0x01 + + u8 _r_b[3]; + u8 conn_mask; + u8 _r_c[19]; + struct ssp_tmf_iu ssp_task; /* LUN and TAG */ + __le16 _r_d; + __le16 conn_handle; + __le64 _r_e; + __le16 index; /* Transaction context of task to be cleared */ + __le16 context; /* Clear nexus context */ + u8 _r_f[44]; +} __attribute__ ((packed)); + +struct initiate_ssp_tmf { + u8 proto_conn_rate; + __le32 _r_a; + struct ssp_frame_hdr ssp_frame; + struct ssp_tmf_iu ssp_task; + __le16 sister_scb; + __le16 conn_handle; + u8 flags; /* itnl override and suspend data tx */ +#define OVERRIDE_ITNL_TIMER 8 + + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + __le16 index; /* Transaction context of task to be queried */ + __le16 itnl_to; + u8 _r_d[44]; +} __attribute__ ((packed)); + +/* Transmits an arbitrary primitive on the link. + * Used for NOTIFY and BROADCAST. + */ +struct send_prim { + u8 phy_id; + u8 wait_transmit; /* :0,0 */ + u8 xmit_flags; +#define XMTPSIZE_MASK 0xF0 +#define XMTPSIZE_SINGLE 0x10 +#define XMTPSIZE_REPEATED 0x20 +#define XMTPSIZE_CONT 0x20 +#define XMTPSIZE_TRIPLE 0x30 +#define XMTPSIZE_REDUNDANT 0x60 +#define XMTPSIZE_INF 0 + +#define XMTCONTEN 0x04 +#define XMTPFRM 0x02 /* Transmit at the next frame boundary */ +#define XMTPIMM 0x01 /* Transmit immediately */ + + __le16 _r_a; + u8 prim[4]; /* K, D0, D1, D2 */ + u8 _r_b[50]; + __le16 conn_handle; + u8 _r_c[56]; +} __attribute__ ((packed)); + +/* This describes both SSP Target Get Data and SSP Target Get Data And + * Send Good Response SCBs. Used when the sequencer is operating in + * target mode... + */ +struct ssp_targ_get_data { + u8 proto_conn_rate; + __le32 total_xfer_len; + struct ssp_frame_hdr ssp_frame; + struct xfer_rdy_iu xfer_rdy; + u8 lun[LUN_SIZE]; + __le64 _r_a; + __le16 sister_scb; + __le16 conn_handle; + u8 data_dir; /* 01b */ + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + struct sg_el sg_element[3]; +} __attribute__ ((packed)); + +/* ---------- The actual SCB struct ---------- */ + +struct scb { + struct scb_header header; + union { + struct initiate_ssp_task ssp_task; + struct initiate_ata_task ata_task; + struct initiate_smp_task smp_task; + struct control_phy control_phy; + struct control_ata_dev control_ata_dev; + struct empty_scb escb; + struct initiate_link_adm link_adm; + struct copy_memory cp_mem; + struct abort_task abort_task; + struct clear_nexus clear_nexus; + struct initiate_ssp_tmf ssp_tmf; + }; +} __attribute__ ((packed)); + +/* ---------- Done List ---------- */ +/* The done list entry opcode field is defined below. + * The mnemonic encoding and meaning is as follows: + * TC - Task Complete, status was received and acknowledged + * TF - Task Failed, indicates an error prior to receiving acknowledgment + * for the command: + * - no conn, + * - NACK or R_ERR received in response to this command, + * - credit blocked or not available, or in the case of SMP request, + * - no SMP response was received. + * In these four cases it is known that the target didn't receive the + * command. + * TI - Task Interrupted, error after the command was acknowledged. It is + * known that the command was received by the target. + * TU - Task Unacked, command was transmitted but neither ACK (R_OK) nor NAK + * (R_ERR) was received due to loss of signal, broken connection, loss of + * dword sync or other reason. The application client should send the + * appropriate task query. + * TA - Task Aborted, see TF. + * _RESP - The completion includes an empty buffer containing status. + * TO - Timeout. + */ +#define TC_NO_ERROR 0x00 +#define TC_UNDERRUN 0x01 +#define TC_OVERRUN 0x02 +#define TF_OPEN_TO 0x03 +#define TF_OPEN_REJECT 0x04 +#define TI_BREAK 0x05 +#define TI_PROTO_ERR 0x06 +#define TC_SSP_RESP 0x07 +#define TI_PHY_DOWN 0x08 +#define TF_PHY_DOWN 0x09 +#define TC_LINK_ADM_RESP 0x0a +#define TC_CSMI 0x0b +#define TC_ATA_RESP 0x0c +#define TU_PHY_DOWN 0x0d +#define TU_BREAK 0x0e +#define TI_SATA_TO 0x0f +#define TI_NAK 0x10 +#define TC_CONTROL_PHY 0x11 +#define TF_BREAK 0x12 +#define TC_RESUME 0x13 +#define TI_ACK_NAK_TO 0x14 +#define TF_SMPRSP_TO 0x15 +#define TF_SMP_XMIT_RCV_ERR 0x16 +#define TC_PARTIAL_SG_LIST 0x17 +#define TU_ACK_NAK_TO 0x18 +#define TU_SATA_TO 0x19 +#define TF_NAK_RECV 0x1a +#define TA_I_T_NEXUS_LOSS 0x1b +#define TC_ATA_R_ERR_RECV 0x1c +#define TF_TMF_NO_CTX 0x1d +#define TA_ON_REQ 0x1e +#define TF_TMF_NO_TAG 0x1f +#define TF_TMF_TAG_FREE 0x20 +#define TF_TMF_TASK_DONE 0x21 +#define TF_TMF_NO_CONN_HANDLE 0x22 +#define TC_TASK_CLEARED 0x23 +#define TI_SYNCS_RECV 0x24 +#define TU_SYNCS_RECV 0x25 +#define TF_IRTT_TO 0x26 +#define TF_NO_SMP_CONN 0x27 +#define TF_IU_SHORT 0x28 +#define TF_DATA_OFFS_ERR 0x29 +#define TF_INV_CONN_HANDLE 0x2a +#define TF_REQUESTED_N_PENDING 0x2b + +/* 0xc1 - 0xc7: empty buffer received, + 0xd1 - 0xd7: establish nexus empty buffer received +*/ +/* This is the ESCB mask */ +#define ESCB_RECVD 0xC0 + + +/* This struct done_list_struct defines the done list entry. + * All fields are LE. + */ +struct done_list_struct { + __le16 index; /* aka transaction context */ + u8 opcode; + u8 status_block[4]; + u8 toggle; /* bit 0 */ +#define DL_TOGGLE_MASK 0x01 +} __attribute__ ((packed)); + +/* ---------- PHYS ---------- */ + +struct asd_phy { + struct asd_sas_phy sas_phy; + struct asd_phy_desc *phy_desc; /* hw profile */ + + struct sas_identify_frame *identify_frame; + struct asd_dma_tok *id_frm_tok; + struct asd_port *asd_port; + + u8 frame_rcvd[ASD_EDB_SIZE]; +}; + + +#define ASD_SCB_SIZE sizeof(struct scb) +#define ASD_DDB_SIZE sizeof(struct asd_ddb_ssp_smp_target_port) + +/* Define this to 0 if you do not want NOTIFY (ENABLE SPINIP) sent. + * Default: 0x10 (it's a mask) + */ +#define ASD_NOTIFY_ENABLE_SPINUP 0x10 + +/* If enabled, set this to the interval between transmission + * of NOTIFY (ENABLE SPINUP). In units of 200 us. + */ +#define ASD_NOTIFY_TIMEOUT 2500 + +/* Initial delay after OOB, before we transmit NOTIFY (ENABLE SPINUP). + * If 0, transmit immediately. In milliseconds. + */ +#define ASD_NOTIFY_DOWN_COUNT 0 + +/* Device present timer timeout constant, 10 ms. */ +#define ASD_DEV_PRESENT_TIMEOUT 0x2710 + +#define ASD_SATA_INTERLOCK_TIMEOUT 0 + +/* How long to wait before shutting down an STP connection, unless + * an STP target sent frame(s). 50 usec. + * IGNORED by the sequencer (i.e. value 0 always). + */ +#define ASD_STP_SHUTDOWN_TIMEOUT 0x0 + +/* ATA soft reset timer timeout. 5 usec. */ +#define ASD_SRST_ASSERT_TIMEOUT 0x05 + +/* 31 sec */ +#define ASD_RCV_FIS_TIMEOUT 0x01D905C0 + +#define ASD_ONE_MILLISEC_TIMEOUT 0x03e8 + +/* COMINIT timer */ +#define ASD_TEN_MILLISEC_TIMEOUT 0x2710 +#define ASD_COMINIT_TIMEOUT ASD_TEN_MILLISEC_TIMEOUT + +/* 1 sec */ +#define ASD_SMP_RCV_TIMEOUT 0x000F4240 + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c new file mode 100644 index 000000000..68214a58b --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -0,0 +1,928 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA driver SCB management. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" + +#include "aic94xx_dump.h" + +/* ---------- EMPTY SCB ---------- */ + +#define DL_PHY_MASK 7 +#define BYTES_DMAED 0 +#define PRIMITIVE_RECVD 0x08 +#define PHY_EVENT 0x10 +#define LINK_RESET_ERROR 0x18 +#define TIMER_EVENT 0x20 +#define REQ_TASK_ABORT 0xF0 +#define REQ_DEVICE_RESET 0xF1 +#define SIGNAL_NCQ_ERROR 0xF2 +#define CLEAR_NCQ_ERROR 0xF3 + +#define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE \ + | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ + | CURRENT_OOB_ERROR) + +static void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) +{ + struct sas_phy *sas_phy = phy->sas_phy.phy; + + switch (oob_mode & 7) { + case PHY_SPEED_60: + /* FIXME: sas transport class doesn't have this */ + phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + case PHY_SPEED_30: + phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_SPEED_15: + phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + } + sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; + sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sas_phy->maximum_linkrate = phy->phy_desc->max_sas_lrate; + sas_phy->minimum_linkrate = phy->phy_desc->min_sas_lrate; + + if (oob_mode & SAS_MODE) + phy->sas_phy.oob_mode = SAS_OOB_MODE; + else if (oob_mode & SATA_MODE) + phy->sas_phy.oob_mode = SATA_OOB_MODE; +} + +static void asd_phy_event_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + int phy_id = dl->status_block[0] & DL_PHY_MASK; + struct asd_phy *phy = &asd_ha->phys[phy_id]; + + u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS; + u8 oob_mode = dl->status_block[2]; + + switch (oob_status) { + case CURRENT_LOSS_OF_SIGNAL: + /* directly attached device was removed */ + ASD_DPRINTK("phy%d: device unplugged\n", phy_id); + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(&phy->sas_phy); + sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL, + GFP_ATOMIC); + break; + case CURRENT_OOB_DONE: + /* hot plugged device */ + asd_turn_led(asd_ha, phy_id, 1); + get_lrate_mode(phy, oob_mode); + ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n", + phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto); + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + break; + case CURRENT_SPINUP_HOLD: + /* hot plug SATA, no COMWAKE sent */ + asd_turn_led(asd_ha, phy_id, 1); + sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, + GFP_ATOMIC); + break; + case CURRENT_GTO_TIMEOUT: + case CURRENT_OOB_ERROR: + ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id, + dl->status_block[1]); + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(&phy->sas_phy); + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); + break; + } +} + +/* If phys are enabled sparsely, this will do the right thing. */ +static unsigned ord_phy(struct asd_ha_struct *asd_ha, struct asd_phy *phy) +{ + u8 enabled_mask = asd_ha->hw_prof.enabled_phys; + int i, k = 0; + + for_each_phy(enabled_mask, enabled_mask, i) { + if (&asd_ha->phys[i] == phy) + return k; + k++; + } + return 0; +} + +/** + * asd_get_attached_sas_addr -- extract/generate attached SAS address + * @phy: pointer to asd_phy + * @sas_addr: pointer to buffer where the SAS address is to be written + * + * This function extracts the SAS address from an IDENTIFY frame + * received. If OOB is SATA, then a SAS address is generated from the + * HA tables. + * + * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame + * buffer. + */ +static void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr) +{ + if (phy->sas_phy.frame_rcvd[0] == 0x34 + && phy->sas_phy.oob_mode == SATA_OOB_MODE) { + struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; + /* FIS device-to-host */ + u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr); + + addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy); + *(__be64 *)sas_addr = cpu_to_be64(addr); + } else { + struct sas_identify_frame *idframe = + (void *) phy->sas_phy.frame_rcvd; + memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); + } +} + +static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy) +{ + int i; + struct asd_port *free_port = NULL; + struct asd_port *port; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->asd_ports_lock, flags); + if (!phy->asd_port) { + for (i = 0; i < ASD_MAX_PHYS; i++) { + port = &asd_ha->asd_ports[i]; + + /* Check for wide port */ + if (port->num_phys > 0 && + memcmp(port->sas_addr, sas_phy->sas_addr, + SAS_ADDR_SIZE) == 0 && + memcmp(port->attached_sas_addr, + sas_phy->attached_sas_addr, + SAS_ADDR_SIZE) == 0) { + break; + } + + /* Find a free port */ + if (port->num_phys == 0 && free_port == NULL) { + free_port = port; + } + } + + /* Use a free port if this doesn't form a wide port */ + if (i >= ASD_MAX_PHYS) { + port = free_port; + BUG_ON(!port); + memcpy(port->sas_addr, sas_phy->sas_addr, + SAS_ADDR_SIZE); + memcpy(port->attached_sas_addr, + sas_phy->attached_sas_addr, + SAS_ADDR_SIZE); + } + port->num_phys++; + port->phy_mask |= (1U << sas_phy->id); + phy->asd_port = port; + } + ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n", + __func__, phy->asd_port->phy_mask, sas_phy->id); + asd_update_port_links(asd_ha, phy); + spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags); +} + +static void asd_deform_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy) +{ + struct asd_port *port = phy->asd_port; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->asd_ports_lock, flags); + if (port) { + port->num_phys--; + port->phy_mask &= ~(1U << sas_phy->id); + phy->asd_port = NULL; + } + spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags); +} + +static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int edb_id, int phy_id) +{ + unsigned long flags; + int edb_el = edb_id + ascb->edb_index; + struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el]; + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2]; + + size = min(size, (u16) sizeof(phy->frame_rcvd)); + + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size); + phy->sas_phy.frame_rcvd_size = size; + asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + asd_dump_frame_rcvd(phy, dl); + asd_form_port(ascb->ha, phy); + sas_notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC); +} + +static void asd_link_reset_err_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int phy_id) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + struct asd_phy *phy = &asd_ha->phys[phy_id]; + u8 lr_error = dl->status_block[1]; + u8 retries_left = dl->status_block[2]; + + switch (lr_error) { + case 0: + ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id); + break; + case 1: + ASD_DPRINTK("phy%d: Loss of signal\n", phy_id); + break; + case 2: + ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id); + break; + case 3: + ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id); + break; + default: + ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n", + phy_id, lr_error); + break; + } + + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(sas_phy); + asd_deform_port(asd_ha, phy); + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, GFP_ATOMIC); + + if (retries_left == 0) { + int num = 1; + struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num, + GFP_ATOMIC); + if (!cp) { + asd_printk("%s: out of memory\n", __func__); + goto out; + } + ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n", + phy_id); + asd_build_control_phy(cp, phy_id, ENABLE_PHY); + if (asd_post_ascb_list(ascb->ha, cp, 1) != 0) + asd_ascb_free(cp); + } +out: + ; +} + +static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int phy_id) +{ + unsigned long flags; + struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + struct asd_ha_struct *asd_ha = ascb->ha; + struct asd_phy *phy = &asd_ha->phys[phy_id]; + u8 reg = dl->status_block[1]; + u32 cont = dl->status_block[2] << ((reg & 3)*8); + + reg &= ~3; + switch (reg) { + case LmPRMSTAT0BYTE0: + switch (cont) { + case LmBROADCH: + case LmBROADRVCH0: + case LmBROADRVCH1: + case LmBROADSES: + ASD_DPRINTK("phy%d: BROADCAST change received:%d\n", + phy_id, cont); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = ffs(cont); + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + + case LmUNKNOWNP: + ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id); + break; + + default: + ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n", + phy_id, reg, cont); + break; + } + break; + case LmPRMSTAT1BYTE0: + switch (cont) { + case LmHARDRST: + ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n", + phy_id); + /* The sequencer disables all phys on that port. + * We have to re-enable the phys ourselves. */ + asd_deform_port(asd_ha, phy); + sas_notify_port_event(sas_phy, PORTE_HARD_RESET, + GFP_ATOMIC); + break; + + default: + ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n", + phy_id, reg, cont); + break; + } + break; + default: + ASD_DPRINTK("unknown primitive register:0x%x\n", + dl->status_block[1]); + break; + } +} + +/** + * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB + * @ascb: pointer to Empty SCB + * @edb_id: index [0,6] to the empty data buffer which is to be invalidated + * + * After an EDB has been invalidated, if all EDBs in this ESCB have been + * invalidated, the ESCB is posted back to the sequencer. + * Context is tasklet/IRQ. + */ +void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) +{ + struct asd_seq_data *seq = &ascb->ha->seq; + struct empty_scb *escb = &ascb->scb->escb; + struct sg_el *eb = &escb->eb[edb_id]; + struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id]; + + memset(edb->vaddr, 0, ASD_EDB_SIZE); + eb->flags |= ELEMENT_NOT_VALID; + escb->num_valid--; + + if (escb->num_valid == 0) { + int i; + /* ASD_DPRINTK("reposting escb: vaddr: 0x%p, " + "dma_handle: 0x%08llx, next: 0x%08llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (u64)ascb->dma_scb.dma_handle, + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + */ + escb->num_valid = ASD_EDBS_PER_SCB; + for (i = 0; i < ASD_EDBS_PER_SCB; i++) + escb->eb[i].flags = 0; + if (!list_empty(&ascb->list)) + list_del_init(&ascb->list); + i = asd_post_escb_list(ascb->ha, ascb, 1); + if (i) + asd_printk("couldn't post escb, err:%d\n", i); + } +} + +static void escb_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */ + u8 sb_opcode = dl->status_block[0]; + int phy_id = sb_opcode & DL_PHY_MASK; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + struct asd_phy *phy = &asd_ha->phys[phy_id]; + + if (edb > 6 || edb < 0) { + ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", + edb, dl->opcode); + ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n", + sb_opcode, phy_id); + ASD_DPRINTK("escb: vaddr: 0x%p, " + "dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + } + + /* Catch these before we mask off the sb_opcode bits */ + switch (sb_opcode) { + case REQ_TASK_ABORT: { + struct asd_ascb *a, *b; + u16 tc_abort; + struct domain_device *failed_dev = NULL; + + ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", + __func__, dl->status_block[3]); + + /* + * Find the task that caused the abort and abort it first. + * The sequencer won't put anything on the done list until + * that happens. + */ + tc_abort = *((u16*)(&dl->status_block[1])); + tc_abort = le16_to_cpu(tc_abort); + + list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { + struct sas_task *task = a->uldd_task; + + if (a->tc_index != tc_abort) + continue; + + if (task) { + failed_dev = task->dev; + sas_task_abort(task); + } else { + ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n", + a->scb->header.opcode); + } + break; + } + + if (!failed_dev) { + ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n", + __func__, tc_abort); + goto out; + } + + /* + * Now abort everything else for that device (hba?) so + * that the EH will wake up and do something. + */ + list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { + struct sas_task *task = a->uldd_task; + + if (task && + task->dev == failed_dev && + a->tc_index != tc_abort) + sas_task_abort(task); + } + + goto out; + } + case REQ_DEVICE_RESET: { + struct asd_ascb *a; + u16 conn_handle; + unsigned long flags; + struct sas_task *last_dev_task = NULL; + + conn_handle = *((u16*)(&dl->status_block[1])); + conn_handle = le16_to_cpu(conn_handle); + + ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__, + dl->status_block[3]); + + /* Find the last pending task for the device... */ + list_for_each_entry(a, &asd_ha->seq.pend_q, list) { + u16 x; + struct domain_device *dev; + struct sas_task *task = a->uldd_task; + + if (!task) + continue; + dev = task->dev; + + x = (unsigned long)dev->lldd_dev; + if (x == conn_handle) + last_dev_task = task; + } + + if (!last_dev_task) { + ASD_DPRINTK("%s: Device reset for idle device %d?\n", + __func__, conn_handle); + goto out; + } + + /* ...and set the reset flag */ + spin_lock_irqsave(&last_dev_task->task_state_lock, flags); + last_dev_task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&last_dev_task->task_state_lock, flags); + + /* Kill all pending tasks for the device */ + list_for_each_entry(a, &asd_ha->seq.pend_q, list) { + u16 x; + struct domain_device *dev; + struct sas_task *task = a->uldd_task; + + if (!task) + continue; + dev = task->dev; + + x = (unsigned long)dev->lldd_dev; + if (x == conn_handle) + sas_task_abort(task); + } + + goto out; + } + case SIGNAL_NCQ_ERROR: + ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__); + goto out; + case CLEAR_NCQ_ERROR: + ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__); + goto out; + } + + sb_opcode &= ~DL_PHY_MASK; + + switch (sb_opcode) { + case BYTES_DMAED: + ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id); + asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id); + break; + case PRIMITIVE_RECVD: + ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__, + phy_id); + asd_primitive_rcvd_tasklet(ascb, dl, phy_id); + break; + case PHY_EVENT: + ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id); + asd_phy_event_tasklet(ascb, dl); + break; + case LINK_RESET_ERROR: + ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__, + phy_id); + asd_link_reset_err_tasklet(ascb, dl, phy_id); + break; + case TIMER_EVENT: + ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n", + __func__, phy_id); + asd_turn_led(asd_ha, phy_id, 0); + /* the device is gone */ + sas_phy_disconnected(sas_phy); + asd_deform_port(asd_ha, phy); + sas_notify_port_event(sas_phy, PORTE_TIMER_EVENT, GFP_ATOMIC); + break; + default: + ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__, + phy_id, sb_opcode); + ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", + edb, dl->opcode); + ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n", + sb_opcode, phy_id); + ASD_DPRINTK("escb: vaddr: 0x%p, " + "dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + + break; + } +out: + asd_invalidate_edb(ascb, edb); +} + +int asd_init_post_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_escbs; i++) + seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete; + + ASD_DPRINTK("posting %d escbs\n", i); + return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs); +} + +/* ---------- CONTROL PHY ---------- */ + +#define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE \ + | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ + | CURRENT_OOB_ERROR) + +/** + * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb + * @ascb: pointer to an ascb + * @dl: pointer to the done list entry + * + * This function completes a CONTROL PHY scb and frees the ascb. + * A note on LEDs: + * - an LED blinks if there is IO though it, + * - if a device is connected to the LED, it is lit, + * - if no device is connected to the LED, is is dimmed (off). + */ +static void control_phy_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct scb *scb = ascb->scb; + struct control_phy *control_phy = &scb->control_phy; + u8 phy_id = control_phy->phy_id; + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + + u8 status = dl->status_block[0]; + u8 oob_status = dl->status_block[1]; + u8 oob_mode = dl->status_block[2]; + /* u8 oob_signals= dl->status_block[3]; */ + + if (status != 0) { + ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n", + __func__, phy_id, status); + goto out; + } + + switch (control_phy->sub_func) { + case DISABLE_PHY: + asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id); + asd_turn_led(asd_ha, phy_id, 0); + asd_control_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id); + break; + + case ENABLE_PHY: + asd_control_led(asd_ha, phy_id, 1); + if (oob_status & CURRENT_OOB_DONE) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + get_lrate_mode(phy, oob_mode); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n", + __func__, phy_id,phy->sas_phy.linkrate, + phy->sas_phy.iproto); + } else if (oob_status & CURRENT_SPINUP_HOLD) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__, + phy_id); + } else if (oob_status & CURRENT_ERR_MASK) { + asd_turn_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n", + __func__, phy_id, oob_status); + } else if (oob_status & (CURRENT_HOT_PLUG_CNCT + | CURRENT_DEVICE_PRESENT)) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d: hot plug or device present\n", + __func__, phy_id); + } else { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: phy%d: no device present: " + "oob_status:0x%x\n", + __func__, phy_id, oob_status); + } + break; + case RELEASE_SPINUP_HOLD: + case PHY_NO_OP: + case EXECUTE_HARD_RESET: + ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__, + phy_id, control_phy->sub_func); + /* XXX finish */ + break; + default: + ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__, + phy_id, control_phy->sub_func); + break; + } +out: + asd_ascb_free(ascb); +} + +static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) +{ + /* disable all speeds, then enable defaults */ + *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS + | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS; + + switch (pd->max_sas_lrate) { + case SAS_LINK_RATE_6_0_GBPS: + *speed_mask &= ~SAS_SPEED_60_DIS; + fallthrough; + default: + case SAS_LINK_RATE_3_0_GBPS: + *speed_mask &= ~SAS_SPEED_30_DIS; + fallthrough; + case SAS_LINK_RATE_1_5_GBPS: + *speed_mask &= ~SAS_SPEED_15_DIS; + } + + switch (pd->min_sas_lrate) { + case SAS_LINK_RATE_6_0_GBPS: + *speed_mask |= SAS_SPEED_30_DIS; + fallthrough; + case SAS_LINK_RATE_3_0_GBPS: + *speed_mask |= SAS_SPEED_15_DIS; + fallthrough; + default: + case SAS_LINK_RATE_1_5_GBPS: + /* nothing to do */ + ; + } + + switch (pd->max_sata_lrate) { + case SAS_LINK_RATE_3_0_GBPS: + *speed_mask &= ~SATA_SPEED_30_DIS; + fallthrough; + default: + case SAS_LINK_RATE_1_5_GBPS: + *speed_mask &= ~SATA_SPEED_15_DIS; + } + + switch (pd->min_sata_lrate) { + case SAS_LINK_RATE_3_0_GBPS: + *speed_mask |= SATA_SPEED_15_DIS; + fallthrough; + default: + case SAS_LINK_RATE_1_5_GBPS: + /* nothing to do */ + ; + } +} + +/** + * asd_build_control_phy -- build a CONTROL PHY SCB + * @ascb: pointer to an ascb + * @phy_id: phy id to control, integer + * @subfunc: subfunction, what to actually to do the phy + * + * This function builds a CONTROL PHY scb. No allocation of any kind + * is performed. @ascb is allocated with the list function. + * The caller can override the ascb->tasklet_complete to point + * to its own callback function. It must call asd_ascb_free() + * at its tasklet complete function. + * See the default implementation. + */ +void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc) +{ + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + struct scb *scb = ascb->scb; + struct control_phy *control_phy = &scb->control_phy; + + scb->header.opcode = CONTROL_PHY; + control_phy->phy_id = (u8) phy_id; + control_phy->sub_func = subfunc; + + switch (subfunc) { + case EXECUTE_HARD_RESET: /* 0x81 */ + case ENABLE_PHY: /* 0x01 */ + /* decide hot plug delay */ + control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT; + + /* decide speed mask */ + set_speed_mask(&control_phy->speed_mask, phy->phy_desc); + + /* initiator port settings are in the hi nibble */ + if (phy->sas_phy.role == PHY_ROLE_INITIATOR) + control_phy->port_type = SAS_PROTOCOL_ALL << 4; + else if (phy->sas_phy.role == PHY_ROLE_TARGET) + control_phy->port_type = SAS_PROTOCOL_ALL; + else + control_phy->port_type = + (SAS_PROTOCOL_ALL << 4) | SAS_PROTOCOL_ALL; + + /* link reset retries, this should be nominal */ + control_phy->link_reset_retries = 10; + fallthrough; + + case RELEASE_SPINUP_HOLD: /* 0x02 */ + /* decide the func_mask */ + control_phy->func_mask = FUNCTION_MASK_DEFAULT; + if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD) + control_phy->func_mask &= ~SPINUP_HOLD_DIS; + else + control_phy->func_mask |= SPINUP_HOLD_DIS; + } + + control_phy->conn_handle = cpu_to_le16(0xFFFF); + + ascb->tasklet_complete = control_phy_tasklet_complete; +} + +/* ---------- INITIATE LINK ADM TASK ---------- */ + +#if 0 + +static void link_adm_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + u8 opcode = dl->opcode; + struct initiate_link_adm *link_adm = &ascb->scb->link_adm; + u8 phy_id = link_adm->phy_id; + + if (opcode != TC_NO_ERROR) { + asd_printk("phy%d: link adm task 0x%x completed with error " + "0x%x\n", phy_id, link_adm->sub_func, opcode); + } + ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n", + phy_id, link_adm->sub_func, opcode); + + asd_ascb_free(ascb); +} + +void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, + u8 subfunc) +{ + struct scb *scb = ascb->scb; + struct initiate_link_adm *link_adm = &scb->link_adm; + + scb->header.opcode = INITIATE_LINK_ADM_TASK; + + link_adm->phy_id = phy_id; + link_adm->sub_func = subfunc; + link_adm->conn_handle = cpu_to_le16(0xFFFF); + + ascb->tasklet_complete = link_adm_tasklet_complete; +} + +#endif /* 0 */ + +/* ---------- SCB timer ---------- */ + +/** + * asd_ascb_timedout -- called when a pending SCB's timer has expired + * @t: Timer context used to fetch the SCB + * + * This is the default timeout function which does the most necessary. + * Upper layers can implement their own timeout function, say to free + * resources they have with this SCB, and then call this one at the + * end of their timeout function. To do this, one should initialize + * the ascb->timer.{function, expires} prior to calling the post + * function. The timer is started by the post function. + */ +void asd_ascb_timedout(struct timer_list *t) +{ + struct asd_ascb *ascb = from_timer(ascb, t, timer); + struct asd_seq_data *seq = &ascb->ha->seq; + unsigned long flags; + + ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode); + + spin_lock_irqsave(&seq->pend_q_lock, flags); + seq->pending--; + list_del_init(&ascb->list); + spin_unlock_irqrestore(&seq->pend_q_lock, flags); + + asd_ascb_free(ascb); +} + +/* ---------- CONTROL PHY ---------- */ + +/* Given the spec value, return a driver value. */ +static const int phy_func_table[] = { + [PHY_FUNC_NOP] = PHY_NO_OP, + [PHY_FUNC_LINK_RESET] = ENABLE_PHY, + [PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET, + [PHY_FUNC_DISABLE] = DISABLE_PHY, + [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD, +}; + +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg) +{ + struct asd_ha_struct *asd_ha = phy->ha->lldd_ha; + struct asd_phy_desc *pd = asd_ha->phys[phy->id].phy_desc; + struct asd_ascb *ascb; + struct sas_phy_linkrates *rates; + int res = 1; + + switch (func) { + case PHY_FUNC_CLEAR_ERROR_LOG: + case PHY_FUNC_GET_EVENTS: + return -ENOSYS; + case PHY_FUNC_SET_LINK_RATE: + rates = arg; + if (rates->minimum_linkrate) { + pd->min_sas_lrate = rates->minimum_linkrate; + pd->min_sata_lrate = rates->minimum_linkrate; + } + if (rates->maximum_linkrate) { + pd->max_sas_lrate = rates->maximum_linkrate; + pd->max_sata_lrate = rates->maximum_linkrate; + } + func = PHY_FUNC_LINK_RESET; + break; + default: + break; + } + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + + asd_build_control_phy(ascb, phy->id, phy_func_table[func]); + res = asd_post_ascb_list(asd_ha, ascb , 1); + if (res) + asd_ascb_free(ascb); + + return res; +} diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c new file mode 100644 index 000000000..5def83c88 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_sds.c @@ -0,0 +1,1462 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA driver access to shared data structures and memory + * maps. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_sds.h" + +/* ---------- OCM stuff ---------- */ + +struct asd_ocm_dir_ent { + u8 type; + u8 offs[3]; + u8 _r1; + u8 size[3]; +} __attribute__ ((packed)); + +struct asd_ocm_dir { + char sig[2]; + u8 _r1[2]; + u8 major; /* 0 */ + u8 minor; /* 0 */ + u8 _r2; + u8 num_de; + struct asd_ocm_dir_ent entry[15]; +} __attribute__ ((packed)); + +#define OCM_DE_OCM_DIR 0x00 +#define OCM_DE_WIN_DRVR 0x01 +#define OCM_DE_BIOS_CHIM 0x02 +#define OCM_DE_RAID_ENGN 0x03 +#define OCM_DE_BIOS_INTL 0x04 +#define OCM_DE_BIOS_CHIM_OSM 0x05 +#define OCM_DE_BIOS_CHIM_DYNAMIC 0x06 +#define OCM_DE_ADDC2C_RES0 0x07 +#define OCM_DE_ADDC2C_RES1 0x08 +#define OCM_DE_ADDC2C_RES2 0x09 +#define OCM_DE_ADDC2C_RES3 0x0A + +#define OCM_INIT_DIR_ENTRIES 5 +/*************************************************************************** +* OCM directory default +***************************************************************************/ +static struct asd_ocm_dir OCMDirInit = +{ + .sig = {0x4D, 0x4F}, /* signature */ + .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */ +}; + +/*************************************************************************** +* OCM directory Entries default +***************************************************************************/ +static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] = +{ + { + .type = (OCM_DE_ADDC2C_RES0), /* Entry type */ + .offs = {128}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES1), /* Entry type */ + .offs = {128, 4}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES2), /* Entry type */ + .offs = {128, 8}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES3), /* Entry type */ + .offs = {128, 12}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_WIN_DRVR), /* Entry type */ + .offs = {128, 16}, /* Offset */ + .size = {128, 235, 1}, /* size */ + }, +}; + +struct asd_bios_chim_struct { + char sig[4]; + u8 major; /* 1 */ + u8 minor; /* 0 */ + u8 bios_major; + u8 bios_minor; + __le32 bios_build; + u8 flags; + u8 pci_slot; + __le16 ue_num; + __le16 ue_size; + u8 _r[14]; + /* The unit element array is right here. + */ +} __attribute__ ((packed)); + +/** + * asd_read_ocm_seg - read an on chip memory (OCM) segment + * @asd_ha: pointer to the host adapter structure + * @buffer: where to write the read data + * @offs: offset into OCM where to read from + * @size: how many bytes to read + * + * Return the number of bytes not read. Return 0 on success. + */ +static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, + u32 offs, int size) +{ + u8 *p = buffer; + if (unlikely(asd_ha->iospace)) + asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); + else { + for ( ; size > 0; size--, offs++, p++) + *p = asd_read_ocm_byte(asd_ha, offs); + } + return size; +} + +static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha, + struct asd_ocm_dir *dir, u32 offs) +{ + int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir)); + if (err) { + ASD_DPRINTK("couldn't read ocm segment\n"); + return err; + } + + if (dir->sig[0] != 'M' || dir->sig[1] != 'O') { + ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n", + dir->sig[0], dir->sig[1]); + return -ENOENT; + } + if (dir->major != 0) { + asd_printk("unsupported major version of ocm dir:0x%x\n", + dir->major); + return -ENOENT; + } + dir->num_de &= 0xf; + return 0; +} + +/** + * asd_write_ocm_seg - write an on chip memory (OCM) segment + * @asd_ha: pointer to the host adapter structure + * @buffer: where to read the write data + * @offs: offset into OCM to write to + * @size: how many bytes to write + * + * Return the number of bytes not written. Return 0 on success. + */ +static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, + u32 offs, int size) +{ + u8 *p = buffer; + if (unlikely(asd_ha->iospace)) + asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); + else { + for ( ; size > 0; size--, offs++, p++) + asd_write_ocm_byte(asd_ha, offs, *p); + } + return; +} + +#define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16)) + +static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type, + u32 *offs, u32 *size) +{ + int i; + struct asd_ocm_dir_ent *ent; + + for (i = 0; i < dir->num_de; i++) { + if (dir->entry[i].type == type) + break; + } + if (i >= dir->num_de) + return -ENOENT; + ent = &dir->entry[i]; + *offs = (u32) THREE_TO_NUM(ent->offs); + *size = (u32) THREE_TO_NUM(ent->size); + return 0; +} + +#define OCM_BIOS_CHIM_DE 2 +#define BC_BIOS_PRESENT 1 + +static int asd_get_bios_chim(struct asd_ha_struct *asd_ha, + struct asd_ocm_dir *dir) +{ + int err; + struct asd_bios_chim_struct *bc_struct; + u32 offs, size; + + err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size); + if (err) { + ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n"); + goto out; + } + err = -ENOMEM; + bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL); + if (!bc_struct) { + asd_printk("no memory for bios_chim struct\n"); + goto out; + } + err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs, + sizeof(*bc_struct)); + if (err) { + ASD_DPRINTK("couldn't read ocm segment\n"); + goto out2; + } + if (strncmp(bc_struct->sig, "SOIB", 4) + && strncmp(bc_struct->sig, "IPSA", 4)) { + ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n", + bc_struct->sig[0], bc_struct->sig[1], + bc_struct->sig[2], bc_struct->sig[3]); + err = -ENOENT; + goto out2; + } + if (bc_struct->major != 1) { + asd_printk("BIOS_CHIM unsupported major version:0x%x\n", + bc_struct->major); + err = -ENOENT; + goto out2; + } + if (bc_struct->flags & BC_BIOS_PRESENT) { + asd_ha->hw_prof.bios.present = 1; + asd_ha->hw_prof.bios.maj = bc_struct->bios_major; + asd_ha->hw_prof.bios.min = bc_struct->bios_minor; + asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build); + ASD_DPRINTK("BIOS present (%d,%d), %d\n", + asd_ha->hw_prof.bios.maj, + asd_ha->hw_prof.bios.min, + asd_ha->hw_prof.bios.bld); + } + asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num); + asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size); + ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num, + asd_ha->hw_prof.ue.size); + size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size; + if (size > 0) { + err = -ENOMEM; + asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL); + if (!asd_ha->hw_prof.ue.area) + goto out2; + err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area, + offs + sizeof(*bc_struct), size); + if (err) { + kfree(asd_ha->hw_prof.ue.area); + asd_ha->hw_prof.ue.area = NULL; + asd_ha->hw_prof.ue.num = 0; + asd_ha->hw_prof.ue.size = 0; + ASD_DPRINTK("couldn't read ue entries(%d)\n", err); + } + } +out2: + kfree(bc_struct); +out: + return err; +} + +static void +asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha) +{ + int i; + + /* Zero OCM */ + for (i = 0; i < OCM_MAX_SIZE; i += 4) + asd_write_ocm_dword(asd_ha, i, 0); + + /* Write Dir */ + asd_write_ocm_seg(asd_ha, &OCMDirInit, 0, + sizeof(struct asd_ocm_dir)); + + /* Write Dir Entries */ + for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++) + asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i], + sizeof(struct asd_ocm_dir) + + (i * sizeof(struct asd_ocm_dir_ent)) + , sizeof(struct asd_ocm_dir_ent)); + +} + +static int +asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha) +{ + struct pci_dev *pcidev = asd_ha->pcidev; + u32 reg; + int err = 0; + u32 v; + + /* check if OCM has been initialized by BIOS */ + reg = asd_read_reg_dword(asd_ha, EXSICNFGR); + + if (!(reg & OCMINITIALIZED)) { + err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v); + if (err) { + asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n", + pci_name(pcidev)); + goto out; + } + + printk(KERN_INFO "OCM is not initialized by BIOS," + "reinitialize it and ignore it, current IntrptStatus" + "is 0x%x\n", v); + + if (v) + err = pci_write_config_dword(pcidev, + PCIC_INTRPT_STAT, v); + if (err) { + asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n", + pci_name(pcidev)); + goto out; + } + + asd_hwi_initialize_ocm_dir(asd_ha); + + } +out: + return err; +} + +/** + * asd_read_ocm - read on chip memory (OCM) + * @asd_ha: pointer to the host adapter structure + */ +int asd_read_ocm(struct asd_ha_struct *asd_ha) +{ + int err; + struct asd_ocm_dir *dir; + + if (asd_hwi_check_ocm_access(asd_ha)) + return -1; + + dir = kmalloc(sizeof(*dir), GFP_KERNEL); + if (!dir) { + asd_printk("no memory for ocm dir\n"); + return -ENOMEM; + } + + err = asd_read_ocm_dir(asd_ha, dir, 0); + if (err) + goto out; + + err = asd_get_bios_chim(asd_ha, dir); +out: + kfree(dir); + return err; +} + +/* ---------- FLASH stuff ---------- */ + +#define FLASH_RESET 0xF0 + +#define ASD_FLASH_SIZE 0x200000 +#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** " +#define FLASH_NEXT_ENTRY_OFFS 0x2000 +#define FLASH_MAX_DIR_ENTRIES 32 + +#define FLASH_DE_TYPE_MASK 0x3FFFFFFF +#define FLASH_DE_MS 0x120 +#define FLASH_DE_CTRL_A_USER 0xE0 + +struct asd_flash_de { + __le32 type; + __le32 offs; + __le32 pad_size; + __le32 image_size; + __le32 chksum; + u8 _r[12]; + u8 version[32]; +} __attribute__ ((packed)); + +struct asd_flash_dir { + u8 cookie[32]; + __le32 rev; /* 2 */ + __le32 chksum; + __le32 chksum_antidote; + __le32 bld; + u8 bld_id[32]; /* build id data */ + u8 ver_data[32]; /* date and time of build */ + __le32 ae_mask; + __le32 v_mask; + __le32 oc_mask; + u8 _r[20]; + struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES]; +} __attribute__ ((packed)); + +struct asd_manuf_sec { + char sig[2]; /* 'S', 'M' */ + u16 offs_next; + u8 maj; /* 0 */ + u8 min; /* 0 */ + u16 chksum; + u16 size; + u8 _r[6]; + u8 sas_addr[SAS_ADDR_SIZE]; + u8 pcba_sn[ASD_PCBA_SN_SIZE]; + /* Here start the other segments */ + u8 linked_list[]; +} __attribute__ ((packed)); + +struct asd_manuf_phy_desc { + u8 state; /* low 4 bits */ +#define MS_PHY_STATE_ENABLED 0 +#define MS_PHY_STATE_REPORTED 1 +#define MS_PHY_STATE_HIDDEN 2 + u8 phy_id; + u16 _r; + u8 phy_control_0; /* mode 5 reg 0x160 */ + u8 phy_control_1; /* mode 5 reg 0x161 */ + u8 phy_control_2; /* mode 5 reg 0x162 */ + u8 phy_control_3; /* mode 5 reg 0x163 */ +} __attribute__ ((packed)); + +struct asd_manuf_phy_param { + char sig[2]; /* 'P', 'M' */ + u16 next; + u8 maj; /* 0 */ + u8 min; /* 2 */ + u8 num_phy_desc; /* 8 */ + u8 phy_desc_size; /* 8 */ + u8 _r[3]; + u8 usage_model_id; + u32 _r2; + struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_sb_type[] = { + "unknown", + "SGPIO", + [2 ... 0x7F] = "unknown", + [0x80] = "ADPT_I2C", + [0x81 ... 0xFF] = "VENDOR_UNIQUExx" +}; +#endif + +struct asd_ms_sb_desc { + u8 type; + u8 node_desc_index; + u8 conn_desc_index; + u8 _recvd[]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_conn_type[] = { + [0 ... 7] = "unknown", + "SFF8470", + "SFF8482", + "SFF8484", + [0x80] = "PCIX_DAUGHTER0", + [0x81] = "SAS_DAUGHTER0", + [0x82 ... 0xFF] = "VENDOR_UNIQUExx" +}; + +static const char *asd_conn_location[] = { + "unknown", + "internal", + "external", + "board_to_board", +}; +#endif + +struct asd_ms_conn_desc { + u8 type; + u8 location; + u8 num_sideband_desc; + u8 size_sideband_desc; + u32 _resvd; + u8 name[16]; + struct asd_ms_sb_desc sb_desc[]; +} __attribute__ ((packed)); + +struct asd_nd_phy_desc { + u8 vp_attch_type; + u8 attch_specific[]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_node_type[] = { + "IOP", + "IO_CONTROLLER", + "EXPANDER", + "PORT_MULTIPLIER", + "PORT_MULTIPLEXER", + "MULTI_DROP_I2C_BUS", +}; +#endif + +struct asd_ms_node_desc { + u8 type; + u8 num_phy_desc; + u8 size_phy_desc; + u8 _resvd; + u8 name[16]; + struct asd_nd_phy_desc phy_desc[]; +} __attribute__ ((packed)); + +struct asd_ms_conn_map { + char sig[2]; /* 'M', 'C' */ + __le16 next; + u8 maj; /* 0 */ + u8 min; /* 0 */ + __le16 cm_size; /* size of this struct */ + u8 num_conn; + u8 conn_size; + u8 num_nodes; + u8 usage_model_id; + u32 _resvd; + union { + DECLARE_FLEX_ARRAY(struct asd_ms_conn_desc, conn_desc); + DECLARE_FLEX_ARRAY(struct asd_ms_node_desc, node_desc); + }; +} __attribute__ ((packed)); + +struct asd_ctrla_phy_entry { + u8 sas_addr[SAS_ADDR_SIZE]; + u8 sas_link_rates; /* max in hi bits, min in low bits */ + u8 flags; + u8 sata_link_rates; + u8 _r[5]; +} __attribute__ ((packed)); + +struct asd_ctrla_phy_settings { + u8 id0; /* P'h'y */ + u8 _r; + u16 next; + u8 num_phys; /* number of PHYs in the PCI function */ + u8 _r2[3]; + struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS]; +} __attribute__ ((packed)); + +struct asd_ll_el { + u8 id0; + u8 id1; + __le16 next; + u8 something_here[]; +} __attribute__ ((packed)); + +static int asd_poll_flash(struct asd_ha_struct *asd_ha) +{ + int c; + u8 d; + + for (c = 5000; c > 0; c--) { + d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); + d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); + if (!d) + return 0; + udelay(5); + } + return -ENOENT; +} + +static int asd_reset_flash(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_poll_flash(asd_ha); + if (err) + return err; + asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET); + err = asd_poll_flash(asd_ha); + + return err; +} + +static int asd_read_flash_seg(struct asd_ha_struct *asd_ha, + void *buffer, u32 offs, int size) +{ + asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs, + size); + return 0; +} + +/** + * asd_find_flash_dir - finds and reads the flash directory + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to flash directory structure + * + * If found, the flash directory segment will be copied to + * @flash_dir. Return 1 if found, 0 if not. + */ +static int asd_find_flash_dir(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + u32 v; + for (v = 0; v < ASD_FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) { + asd_read_flash_seg(asd_ha, flash_dir, v, + sizeof(FLASH_DIR_COOKIE)-1); + if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE, + sizeof(FLASH_DIR_COOKIE)-1) == 0) { + asd_ha->hw_prof.flash.dir_offs = v; + asd_read_flash_seg(asd_ha, flash_dir, v, + sizeof(*flash_dir)); + return 1; + } + } + return 0; +} + +static int asd_flash_getid(struct asd_ha_struct *asd_ha) +{ + int err = 0; + u32 reg; + + reg = asd_read_reg_dword(asd_ha, EXSICNFGR); + + if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR, + &asd_ha->hw_prof.flash.bar)) { + asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n", + pci_name(asd_ha->pcidev)); + return -ENOENT; + } + asd_ha->hw_prof.flash.present = 1; + asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0; + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash(%d)\n", err); + return err; + } + return 0; +} + +static u16 asd_calc_flash_chksum(u16 *p, int size) +{ + u16 chksum = 0; + + while (size-- > 0) + chksum += *p++; + + return chksum; +} + + +static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type, + u32 *offs, u32 *size) +{ + int i; + struct asd_flash_de *de; + + for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) { + u32 type = le32_to_cpu(flash_dir->dir_entry[i].type); + + type &= FLASH_DE_TYPE_MASK; + if (type == entry_type) + break; + } + if (i >= FLASH_MAX_DIR_ENTRIES) + return -ENOENT; + de = &flash_dir->dir_entry[i]; + *offs = le32_to_cpu(de->offs); + *size = le32_to_cpu(de->pad_size); + return 0; +} + +static int asd_validate_ms(struct asd_manuf_sec *ms) +{ + if (ms->sig[0] != 'S' || ms->sig[1] != 'M') { + ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n", + ms->sig[0], ms->sig[1]); + return -ENOENT; + } + if (ms->maj != 0) { + asd_printk("unsupported manuf. sector. major version:%x\n", + ms->maj); + return -ENOENT; + } + ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next); + ms->chksum = le16_to_cpu((__force __le16) ms->chksum); + ms->size = le16_to_cpu((__force __le16) ms->size); + + if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) { + asd_printk("failed manuf sector checksum\n"); + } + + return 0; +} + +static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *ms) +{ + memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE); + return 0; +} + +static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *ms) +{ + memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE); + asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0'; + return 0; +} + +/** + * asd_find_ll_by_id - find a linked list entry by its id + * @start: void pointer to the first element in the linked list + * @id0: the first byte of the id (offs 0) + * @id1: the second byte of the id (offs 1) + * + * @start has to be the _base_ element start, since the + * linked list entries's offset is from this pointer. + * Some linked list entries use only the first id, in which case + * you can pass 0xFF for the second. + */ +static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1) +{ + struct asd_ll_el *el = start; + + do { + switch (id1) { + default: + if (el->id1 == id1) { + fallthrough; + case 0xFF: + if (el->id0 == id0) + return el; + } + } + el = start + le16_to_cpu(el->next); + } while (el != start); + + return NULL; +} + +/** + * asd_ms_get_phy_params - get phy parameters from the manufacturing sector + * @asd_ha: pointer to the host adapter structure + * @manuf_sec: pointer to the manufacturing sector + * + * The manufacturing sector contans also the linked list of sub-segments, + * since when it was read, its size was taken from the flash directory, + * not from the structure size. + * + * HIDDEN phys do not count in the total count. REPORTED phys cannot + * be enabled but are reported and counted towards the total. + * ENABLED phys are enabled by default and count towards the total. + * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys + * merely specifies the number of phys the host adapter decided to + * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN, + * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENABLED. + * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2 + * are actually enabled (enabled by default, max number of phys + * enableable in this case). + */ +static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *manuf_sec) +{ + int i; + int en_phys = 0; + int rep_phys = 0; + struct asd_manuf_phy_param *phy_param; + struct asd_manuf_phy_param dflt_phy_param; + + phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M'); + if (!phy_param) { + ASD_DPRINTK("ms: no phy parameters found\n"); + ASD_DPRINTK("ms: Creating default phy parameters\n"); + dflt_phy_param.sig[0] = 'P'; + dflt_phy_param.sig[1] = 'M'; + dflt_phy_param.maj = 0; + dflt_phy_param.min = 2; + dflt_phy_param.num_phy_desc = 8; + dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc); + for (i =0; i < ASD_MAX_PHYS; i++) { + dflt_phy_param.phy_desc[i].state = 0; + dflt_phy_param.phy_desc[i].phy_id = i; + dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6; + dflt_phy_param.phy_desc[i].phy_control_1 = 0x10; + dflt_phy_param.phy_desc[i].phy_control_2 = 0x43; + dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb; + } + + phy_param = &dflt_phy_param; + + } + + if (phy_param->maj != 0) { + asd_printk("unsupported manuf. phy param major version:0x%x\n", + phy_param->maj); + return -ENOENT; + } + + ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc); + asd_ha->hw_prof.enabled_phys = 0; + for (i = 0; i < phy_param->num_phy_desc; i++) { + struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i]; + switch (pd->state & 0xF) { + case MS_PHY_STATE_HIDDEN: + ASD_DPRINTK("ms: phy%d: HIDDEN\n", i); + continue; + case MS_PHY_STATE_REPORTED: + ASD_DPRINTK("ms: phy%d: REPORTED\n", i); + asd_ha->hw_prof.enabled_phys &= ~(1 << i); + rep_phys++; + continue; + case MS_PHY_STATE_ENABLED: + ASD_DPRINTK("ms: phy%d: ENABLED\n", i); + asd_ha->hw_prof.enabled_phys |= (1 << i); + en_phys++; + break; + } + asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0; + asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1; + asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2; + asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3; + } + asd_ha->hw_prof.max_phys = rep_phys + en_phys; + asd_ha->hw_prof.num_phys = en_phys; + ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n", + asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys); + ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys); + return 0; +} + +static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *manuf_sec) +{ + struct asd_ms_conn_map *cm; + + cm = asd_find_ll_by_id(manuf_sec, 'M', 'C'); + if (!cm) { + ASD_DPRINTK("ms: no connector map found\n"); + return 0; + } + + if (cm->maj != 0) { + ASD_DPRINTK("ms: unsupported: connector map major version 0x%x" + "\n", cm->maj); + return -ENOENT; + } + + /* XXX */ + + return 0; +} + + +/** + * asd_process_ms - find and extract information from the manufacturing sector + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to the flash directory + */ +static int asd_process_ms(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + int err; + struct asd_manuf_sec *manuf_sec; + u32 offs, size; + + err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size); + if (err) { + ASD_DPRINTK("Couldn't find the manuf. sector\n"); + goto out; + } + + if (size == 0) + goto out; + + err = -ENOMEM; + manuf_sec = kmalloc(size, GFP_KERNEL); + if (!manuf_sec) { + ASD_DPRINTK("no mem for manuf sector\n"); + goto out; + } + + err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size); + if (err) { + ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n", + offs, size); + goto out2; + } + + err = asd_validate_ms(manuf_sec); + if (err) { + ASD_DPRINTK("couldn't validate manuf sector\n"); + goto out2; + } + + err = asd_ms_get_sas_addr(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("couldn't read the SAS_ADDR\n"); + goto out2; + } + ASD_DPRINTK("manuf sect SAS_ADDR %llx\n", + SAS_ADDR(asd_ha->hw_prof.sas_addr)); + + err = asd_ms_get_pcba_sn(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("couldn't read the PCBA SN\n"); + goto out2; + } + ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn); + + err = asd_ms_get_phy_params(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("ms: couldn't get phy parameters\n"); + goto out2; + } + + err = asd_ms_get_connector_map(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("ms: couldn't get connector map\n"); + goto out2; + } + +out2: + kfree(manuf_sec); +out: + return err; +} + +static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha, + struct asd_ctrla_phy_settings *ps) +{ + int i; + for (i = 0; i < ps->num_phys; i++) { + struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i]; + + if (!PHY_ENABLED(asd_ha, i)) + continue; + if (*(u64 *)pe->sas_addr == 0) { + asd_ha->hw_prof.enabled_phys &= ~(1 << i); + continue; + } + /* This is the SAS address which should be sent in IDENTIFY. */ + memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr, + SAS_ADDR_SIZE); + asd_ha->hw_prof.phy_desc[i].max_sas_lrate = + (pe->sas_link_rates & 0xF0) >> 4; + asd_ha->hw_prof.phy_desc[i].min_sas_lrate = + (pe->sas_link_rates & 0x0F); + asd_ha->hw_prof.phy_desc[i].max_sata_lrate = + (pe->sata_link_rates & 0xF0) >> 4; + asd_ha->hw_prof.phy_desc[i].min_sata_lrate = + (pe->sata_link_rates & 0x0F); + asd_ha->hw_prof.phy_desc[i].flags = pe->flags; + ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x," + " sata rate:0x%x-0x%x, flags:0x%x\n", + i, + SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr), + asd_ha->hw_prof.phy_desc[i].max_sas_lrate, + asd_ha->hw_prof.phy_desc[i].min_sas_lrate, + asd_ha->hw_prof.phy_desc[i].max_sata_lrate, + asd_ha->hw_prof.phy_desc[i].min_sata_lrate, + asd_ha->hw_prof.phy_desc[i].flags); + } + + return 0; +} + +/** + * asd_process_ctrl_a_user - process CTRL-A user settings + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to the flash directory + */ +static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + int err, i; + u32 offs, size; + struct asd_ll_el *el = NULL; + struct asd_ctrla_phy_settings *ps; + struct asd_ctrla_phy_settings dflt_ps; + + err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size); + if (err) { + ASD_DPRINTK("couldn't find CTRL-A user settings section\n"); + ASD_DPRINTK("Creating default CTRL-A user settings section\n"); + + dflt_ps.id0 = 'h'; + dflt_ps.num_phys = 8; + for (i =0; i < ASD_MAX_PHYS; i++) { + memcpy(dflt_ps.phy_ent[i].sas_addr, + asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); + dflt_ps.phy_ent[i].sas_link_rates = 0x98; + dflt_ps.phy_ent[i].flags = 0x0; + dflt_ps.phy_ent[i].sata_link_rates = 0x0; + } + + size = sizeof(struct asd_ctrla_phy_settings); + ps = &dflt_ps; + goto out_process; + } + + if (size == 0) + goto out; + + err = -ENOMEM; + el = kmalloc(size, GFP_KERNEL); + if (!el) { + ASD_DPRINTK("no mem for ctrla user settings section\n"); + goto out; + } + + err = asd_read_flash_seg(asd_ha, (void *)el, offs, size); + if (err) { + ASD_DPRINTK("couldn't read ctrla phy settings section\n"); + goto out2; + } + + err = -ENOENT; + ps = asd_find_ll_by_id(el, 'h', 0xFF); + if (!ps) { + ASD_DPRINTK("couldn't find ctrla phy settings struct\n"); + goto out2; + } +out_process: + err = asd_process_ctrla_phy_settings(asd_ha, ps); + if (err) { + ASD_DPRINTK("couldn't process ctrla phy settings\n"); + goto out2; + } +out2: + kfree(el); +out: + return err; +} + +/** + * asd_read_flash - read flash memory + * @asd_ha: pointer to the host adapter structure + */ +int asd_read_flash(struct asd_ha_struct *asd_ha) +{ + int err; + struct asd_flash_dir *flash_dir; + + err = asd_flash_getid(asd_ha); + if (err) + return err; + + flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL); + if (!flash_dir) + return -ENOMEM; + + err = -ENOENT; + if (!asd_find_flash_dir(asd_ha, flash_dir)) { + ASD_DPRINTK("couldn't find flash directory\n"); + goto out; + } + + if (le32_to_cpu(flash_dir->rev) != 2) { + asd_printk("unsupported flash dir version:0x%x\n", + le32_to_cpu(flash_dir->rev)); + goto out; + } + + err = asd_process_ms(asd_ha, flash_dir); + if (err) { + ASD_DPRINTK("couldn't process manuf sector settings\n"); + goto out; + } + + err = asd_process_ctrl_a_user(asd_ha, flash_dir); + if (err) { + ASD_DPRINTK("couldn't process CTRL-A user settings\n"); + goto out; + } + +out: + kfree(flash_dir); + return err; +} + +/** + * asd_verify_flash_seg - verify data with flash memory + * @asd_ha: pointer to the host adapter structure + * @src: pointer to the source data to be verified + * @dest_offset: offset from flash memory + * @bytes_to_verify: total bytes to verify + */ +int asd_verify_flash_seg(struct asd_ha_struct *asd_ha, + const void *src, u32 dest_offset, u32 bytes_to_verify) +{ + const u8 *src_buf; + u8 flash_char; + int err; + u32 nv_offset, reg, i; + + reg = asd_ha->hw_prof.flash.bar; + src_buf = NULL; + + err = FLASH_OK; + nv_offset = dest_offset; + src_buf = (const u8 *)src; + for (i = 0; i < bytes_to_verify; i++) { + flash_char = asd_read_reg_byte(asd_ha, reg + nv_offset + i); + if (flash_char != src_buf[i]) { + err = FAIL_VERIFY; + break; + } + } + return err; +} + +/** + * asd_write_flash_seg - write data into flash memory + * @asd_ha: pointer to the host adapter structure + * @src: pointer to the source data to be written + * @dest_offset: offset from flash memory + * @bytes_to_write: total bytes to write + */ +int asd_write_flash_seg(struct asd_ha_struct *asd_ha, + const void *src, u32 dest_offset, u32 bytes_to_write) +{ + const u8 *src_buf; + u32 nv_offset, reg, i; + int err; + + reg = asd_ha->hw_prof.flash.bar; + src_buf = NULL; + + err = asd_check_flash_type(asd_ha); + if (err) { + ASD_DPRINTK("couldn't find the type of flash. err=%d\n", err); + return err; + } + + nv_offset = dest_offset; + err = asd_erase_nv_sector(asd_ha, nv_offset, bytes_to_write); + if (err) { + ASD_DPRINTK("Erase failed at offset:0x%x\n", + nv_offset); + return err; + } + + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash. err=%d\n", err); + return err; + } + + src_buf = (const u8 *)src; + for (i = 0; i < bytes_to_write; i++) { + /* Setup program command sequence */ + switch (asd_ha->hw_prof.flash.method) { + case FLASH_METHOD_A: + { + asd_write_reg_byte(asd_ha, + (reg + 0xAAA), 0xAA); + asd_write_reg_byte(asd_ha, + (reg + 0x555), 0x55); + asd_write_reg_byte(asd_ha, + (reg + 0xAAA), 0xA0); + asd_write_reg_byte(asd_ha, + (reg + nv_offset + i), + (*(src_buf + i))); + break; + } + case FLASH_METHOD_B: + { + asd_write_reg_byte(asd_ha, + (reg + 0x555), 0xAA); + asd_write_reg_byte(asd_ha, + (reg + 0x2AA), 0x55); + asd_write_reg_byte(asd_ha, + (reg + 0x555), 0xA0); + asd_write_reg_byte(asd_ha, + (reg + nv_offset + i), + (*(src_buf + i))); + break; + } + default: + break; + } + if (asd_chk_write_status(asd_ha, + (nv_offset + i), 0) != 0) { + ASD_DPRINTK("aicx: Write failed at offset:0x%x\n", + reg + nv_offset + i); + return FAIL_WRITE_FLASH; + } + } + + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash. err=%d\n", err); + return err; + } + return 0; +} + +int asd_chk_write_status(struct asd_ha_struct *asd_ha, + u32 sector_addr, u8 erase_flag) +{ + u32 reg; + u32 loop_cnt; + u8 nv_data1, nv_data2; + u8 toggle_bit1; + + /* + * Read from DQ2 requires sector address + * while it's dont care for DQ6 + */ + reg = asd_ha->hw_prof.flash.bar; + + for (loop_cnt = 0; loop_cnt < 50000; loop_cnt++) { + nv_data1 = asd_read_reg_byte(asd_ha, reg); + nv_data2 = asd_read_reg_byte(asd_ha, reg); + + toggle_bit1 = ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6) + ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6)); + + if (toggle_bit1 == 0) { + return 0; + } else { + if (nv_data2 & FLASH_STATUS_BIT_MASK_DQ5) { + nv_data1 = asd_read_reg_byte(asd_ha, + reg); + nv_data2 = asd_read_reg_byte(asd_ha, + reg); + toggle_bit1 = + ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6) + ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6)); + + if (toggle_bit1 == 0) + return 0; + } + } + + /* + * ERASE is a sector-by-sector operation and requires + * more time to finish while WRITE is byte-byte-byte + * operation and takes lesser time to finish. + * + * For some strange reason a reduced ERASE delay gives different + * behaviour across different spirit boards. Hence we set + * a optimum balance of 50mus for ERASE which works well + * across all boards. + */ + if (erase_flag) { + udelay(FLASH_STATUS_ERASE_DELAY_COUNT); + } else { + udelay(FLASH_STATUS_WRITE_DELAY_COUNT); + } + } + return -1; +} + +/** + * asd_erase_nv_sector - Erase the flash memory sectors. + * @asd_ha: pointer to the host adapter structure + * @flash_addr: pointer to offset from flash memory + * @size: total bytes to erase. + */ +int asd_erase_nv_sector(struct asd_ha_struct *asd_ha, u32 flash_addr, u32 size) +{ + u32 reg; + u32 sector_addr; + + reg = asd_ha->hw_prof.flash.bar; + + /* sector staring address */ + sector_addr = flash_addr & FLASH_SECTOR_SIZE_MASK; + + /* + * Erasing an flash sector needs to be done in six consecutive + * write cyles. + */ + while (sector_addr < flash_addr+size) { + switch (asd_ha->hw_prof.flash.method) { + case FLASH_METHOD_A: + asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA); + asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55); + asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0x80); + asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA); + asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55); + asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30); + break; + case FLASH_METHOD_B: + asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA); + asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55); + asd_write_reg_byte(asd_ha, (reg + 0x555), 0x80); + asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA); + asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55); + asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30); + break; + default: + break; + } + + if (asd_chk_write_status(asd_ha, sector_addr, 1) != 0) + return FAIL_ERASE_FLASH; + + sector_addr += FLASH_SECTOR_SIZE; + } + + return 0; +} + +int asd_check_flash_type(struct asd_ha_struct *asd_ha) +{ + u8 manuf_id; + u8 dev_id; + u8 sec_prot; + u32 inc; + u32 reg; + int err; + + /* get Flash memory base address */ + reg = asd_ha->hw_prof.flash.bar; + + /* Determine flash info */ + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash. err=%d\n", err); + return err; + } + + asd_ha->hw_prof.flash.method = FLASH_METHOD_UNKNOWN; + asd_ha->hw_prof.flash.manuf = FLASH_MANUF_ID_UNKNOWN; + asd_ha->hw_prof.flash.dev_id = FLASH_DEV_ID_UNKNOWN; + + /* Get flash info. This would most likely be AMD Am29LV family flash. + * First try the sequence for word mode. It is the same as for + * 008B (byte mode only), 160B (word mode) and 800D (word mode). + */ + inc = asd_ha->hw_prof.flash.wide ? 2 : 1; + asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA); + asd_write_reg_byte(asd_ha, reg + 0x555, 0x55); + asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90); + manuf_id = asd_read_reg_byte(asd_ha, reg); + dev_id = asd_read_reg_byte(asd_ha, reg + inc); + sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc); + /* Get out of autoselect mode. */ + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash. err=%d\n", err); + return err; + } + ASD_DPRINTK("Flash MethodA manuf_id(0x%x) dev_id(0x%x) " + "sec_prot(0x%x)\n", manuf_id, dev_id, sec_prot); + err = asd_reset_flash(asd_ha); + if (err != 0) + return err; + + switch (manuf_id) { + case FLASH_MANUF_ID_AMD: + switch (sec_prot) { + case FLASH_DEV_ID_AM29LV800DT: + case FLASH_DEV_ID_AM29LV640MT: + case FLASH_DEV_ID_AM29F800B: + asd_ha->hw_prof.flash.method = FLASH_METHOD_A; + break; + default: + break; + } + break; + case FLASH_MANUF_ID_ST: + switch (sec_prot) { + case FLASH_DEV_ID_STM29W800DT: + case FLASH_DEV_ID_STM29LV640: + asd_ha->hw_prof.flash.method = FLASH_METHOD_A; + break; + default: + break; + } + break; + case FLASH_MANUF_ID_FUJITSU: + switch (sec_prot) { + case FLASH_DEV_ID_MBM29LV800TE: + case FLASH_DEV_ID_MBM29DL800TA: + asd_ha->hw_prof.flash.method = FLASH_METHOD_A; + break; + } + break; + case FLASH_MANUF_ID_MACRONIX: + switch (sec_prot) { + case FLASH_DEV_ID_MX29LV800BT: + asd_ha->hw_prof.flash.method = FLASH_METHOD_A; + break; + } + break; + } + + if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) { + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash. err=%d\n", err); + return err; + } + + /* Issue Unlock sequence for AM29LV008BT */ + asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA); + asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55); + asd_write_reg_byte(asd_ha, (reg + 0x555), 0x90); + manuf_id = asd_read_reg_byte(asd_ha, reg); + dev_id = asd_read_reg_byte(asd_ha, reg + inc); + sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc); + + ASD_DPRINTK("Flash MethodB manuf_id(0x%x) dev_id(0x%x) sec_prot" + "(0x%x)\n", manuf_id, dev_id, sec_prot); + + err = asd_reset_flash(asd_ha); + if (err != 0) { + ASD_DPRINTK("couldn't reset flash. err=%d\n", err); + return err; + } + + switch (manuf_id) { + case FLASH_MANUF_ID_AMD: + switch (dev_id) { + case FLASH_DEV_ID_AM29LV008BT: + asd_ha->hw_prof.flash.method = FLASH_METHOD_B; + break; + default: + break; + } + break; + case FLASH_MANUF_ID_ST: + switch (dev_id) { + case FLASH_DEV_ID_STM29008: + asd_ha->hw_prof.flash.method = FLASH_METHOD_B; + break; + default: + break; + } + break; + case FLASH_MANUF_ID_FUJITSU: + switch (dev_id) { + case FLASH_DEV_ID_MBM29LV008TA: + asd_ha->hw_prof.flash.method = FLASH_METHOD_B; + break; + } + break; + case FLASH_MANUF_ID_INTEL: + switch (dev_id) { + case FLASH_DEV_ID_I28LV00TAT: + asd_ha->hw_prof.flash.method = FLASH_METHOD_B; + break; + } + break; + case FLASH_MANUF_ID_MACRONIX: + switch (dev_id) { + case FLASH_DEV_ID_I28LV00TAT: + asd_ha->hw_prof.flash.method = FLASH_METHOD_B; + break; + } + break; + default: + return FAIL_FIND_FLASH_ID; + } + } + + if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) + return FAIL_FIND_FLASH_ID; + + asd_ha->hw_prof.flash.manuf = manuf_id; + asd_ha->hw_prof.flash.dev_id = dev_id; + asd_ha->hw_prof.flash.sec_prot = sec_prot; + return 0; +} diff --git a/drivers/scsi/aic94xx/aic94xx_sds.h b/drivers/scsi/aic94xx/aic94xx_sds.h new file mode 100644 index 000000000..80f3c4782 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_sds.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver hardware interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Gilbert Wu + */ +#ifndef _AIC94XX_SDS_H_ +#define _AIC94XX_SDS_H_ + +enum { + FLASH_METHOD_UNKNOWN, + FLASH_METHOD_A, + FLASH_METHOD_B +}; + +#define FLASH_MANUF_ID_AMD 0x01 +#define FLASH_MANUF_ID_ST 0x20 +#define FLASH_MANUF_ID_FUJITSU 0x04 +#define FLASH_MANUF_ID_MACRONIX 0xC2 +#define FLASH_MANUF_ID_INTEL 0x89 +#define FLASH_MANUF_ID_UNKNOWN 0xFF + +#define FLASH_DEV_ID_AM29LV008BT 0x3E +#define FLASH_DEV_ID_AM29LV800DT 0xDA +#define FLASH_DEV_ID_STM29W800DT 0xD7 +#define FLASH_DEV_ID_STM29LV640 0xDE +#define FLASH_DEV_ID_STM29008 0xEA +#define FLASH_DEV_ID_MBM29LV800TE 0xDA +#define FLASH_DEV_ID_MBM29DL800TA 0x4A +#define FLASH_DEV_ID_MBM29LV008TA 0x3E +#define FLASH_DEV_ID_AM29LV640MT 0x7E +#define FLASH_DEV_ID_AM29F800B 0xD6 +#define FLASH_DEV_ID_MX29LV800BT 0xDA +#define FLASH_DEV_ID_MX29LV008CT 0xDA +#define FLASH_DEV_ID_I28LV00TAT 0x3E +#define FLASH_DEV_ID_UNKNOWN 0xFF + +/* status bit mask values */ +#define FLASH_STATUS_BIT_MASK_DQ6 0x40 +#define FLASH_STATUS_BIT_MASK_DQ5 0x20 +#define FLASH_STATUS_BIT_MASK_DQ2 0x04 + +/* minimum value in micro seconds needed for checking status */ +#define FLASH_STATUS_ERASE_DELAY_COUNT 50 +#define FLASH_STATUS_WRITE_DELAY_COUNT 25 + +#define FLASH_SECTOR_SIZE 0x010000 +#define FLASH_SECTOR_SIZE_MASK 0xffff0000 + +#define FLASH_OK 0x000000 +#define FAIL_OPEN_BIOS_FILE 0x000100 +#define FAIL_CHECK_PCI_ID 0x000200 +#define FAIL_CHECK_SUM 0x000300 +#define FAIL_UNKNOWN 0x000400 +#define FAIL_VERIFY 0x000500 +#define FAIL_RESET_FLASH 0x000600 +#define FAIL_FIND_FLASH_ID 0x000700 +#define FAIL_ERASE_FLASH 0x000800 +#define FAIL_WRITE_FLASH 0x000900 +#define FAIL_FILE_SIZE 0x000a00 +#define FAIL_PARAMETERS 0x000b00 +#define FAIL_OUT_MEMORY 0x000c00 +#define FLASH_IN_PROGRESS 0x001000 + +struct controller_id { + u32 vendor; /* PCI Vendor ID */ + u32 device; /* PCI Device ID */ + u32 sub_vendor; /* PCI Subvendor ID */ + u32 sub_device; /* PCI Subdevice ID */ +}; + +struct image_info { + u32 ImageId; /* Identifies the image */ + u32 ImageOffset; /* Offset the beginning of the file */ + u32 ImageLength; /* length of the image */ + u32 ImageChecksum; /* Image checksum */ + u32 ImageVersion; /* Version of the image, could be build number */ +}; + +struct bios_file_header { + u8 signature[32]; /* Signature/Cookie to identify the file */ + u32 checksum; /*Entire file checksum with this field zero */ + u32 antidote; /* Entire file checksum with this field 0xFFFFFFFF */ + struct controller_id contrl_id; /*PCI id to identify the controller */ + u32 filelen; /*Length of the entire file*/ + u32 chunk_num; /*The chunk/part number for multiple Image files */ + u32 total_chunks; /*Total number of chunks/parts in the image file */ + u32 num_images; /* Number of images in the file */ + u32 build_num; /* Build number of this image */ + struct image_info image_header; +}; + +int asd_verify_flash_seg(struct asd_ha_struct *asd_ha, + const void *src, u32 dest_offset, u32 bytes_to_verify); +int asd_write_flash_seg(struct asd_ha_struct *asd_ha, + const void *src, u32 dest_offset, u32 bytes_to_write); +int asd_chk_write_status(struct asd_ha_struct *asd_ha, + u32 sector_addr, u8 erase_flag); +int asd_check_flash_type(struct asd_ha_struct *asd_ha); +int asd_erase_nv_sector(struct asd_ha_struct *asd_ha, + u32 flash_addr, u32 size); +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c new file mode 100644 index 000000000..c0f685c86 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_seq.c @@ -0,0 +1,1401 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA driver sequencer interface. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * Parts of this code adapted from David Chaw's adp94xx_seq.c. + */ + +#include +#include +#include +#include +#include +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" + +#include "aic94xx_seq.h" +#include "aic94xx_dump.h" + +/* It takes no more than 0.05 us for an instruction + * to complete. So waiting for 1 us should be more than + * plenty. + */ +#define PAUSE_DELAY 1 +#define PAUSE_TRIES 1000 + +static const struct firmware *sequencer_fw; +static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task, + cseq_idle_loop, lseq_idle_loop; +static const u8 *cseq_code, *lseq_code; +static u32 cseq_code_size, lseq_code_size; + +static u16 first_scb_site_no = 0xFFFF; +static u16 last_scb_site_no; + +/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */ + +/** + * asd_pause_cseq - pause the central sequencer + * @asd_ha: pointer to host adapter structure + * + * Return 0 on success, negative on failure. + */ +static int asd_pause_cseq(struct asd_ha_struct *asd_ha) +{ + int count = PAUSE_TRIES; + u32 arp2ctl; + + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (arp2ctl & PAUSED) + return 0; + + asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (arp2ctl & PAUSED) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't pause CSEQ\n"); + return -1; +} + +/** + * asd_unpause_cseq - unpause the central sequencer. + * @asd_ha: pointer to host adapter structure. + * + * Return 0 on success, negative on error. + */ +static int asd_unpause_cseq(struct asd_ha_struct *asd_ha) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (!(arp2ctl & PAUSED)) + return 0; + + asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (!(arp2ctl & PAUSED)) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't unpause the CSEQ\n"); + return -1; +} + +/** + * asd_seq_pause_lseq - pause a link sequencer + * @asd_ha: pointer to a host adapter structure + * @lseq: link sequencer of interest + * + * Return 0 on success, negative on error. + */ +static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (arp2ctl & PAUSED) + return 0; + + asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (arp2ctl & PAUSED) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq); + return -1; +} + +/** + * asd_pause_lseq - pause the link sequencer(s) + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of link sequencers of interest + * + * Return 0 on success, negative on failure. + */ +static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + int err = 0; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_pause_lseq(asd_ha, lseq); + if (err) + return err; + } + + return err; +} + +/** + * asd_seq_unpause_lseq - unpause a link sequencer + * @asd_ha: pointer to host adapter structure + * @lseq: link sequencer of interest + * + * Return 0 on success, negative on error. + */ +static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (!(arp2ctl & PAUSED)) + return 0; + + asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (!(arp2ctl & PAUSED)) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq); + return 0; +} + + +/* ---------- Downloading CSEQ/LSEQ microcode ---------- */ + +static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size) +{ + u32 addr = CSEQ_RAM_REG_BASE_ADR; + const u32 *prog = (u32 *) _prog; + u32 i; + + for (i = 0; i < size; i += 4, prog++, addr += 4) { + u32 val = asd_read_reg_dword(asd_ha, addr); + + if (le32_to_cpu(*prog) != val) { + asd_printk("%s: cseq verify failed at %u " + "read:0x%x, wanted:0x%x\n", + pci_name(asd_ha->pcidev), + i, val, le32_to_cpu(*prog)); + return -1; + } + } + ASD_DPRINTK("verified %d bytes, passed\n", size); + return 0; +} + +/** + * asd_verify_lseq - verify the microcode of a link sequencer + * @asd_ha: pointer to host adapter structure + * @_prog: pointer to the microcode + * @size: size of the microcode in bytes + * @lseq: link sequencer of interest + * + * The link sequencer code is accessed in 4 KB pages, which are selected + * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register. + * The 10 KB LSEQm instruction code is mapped, page at a time, at + * LmSEQRAM address. + */ +static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size, int lseq) +{ +#define LSEQ_CODEPAGE_SIZE 4096 + int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE; + u32 page; + const u32 *prog = (u32 *) _prog; + + for (page = 0; page < pages; page++) { + u32 i; + + asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq), + page << LmRAMPAGE_LSHIFT); + for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE; + i += 4, prog++, size-=4) { + + u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i); + + if (le32_to_cpu(*prog) != val) { + asd_printk("%s: LSEQ%d verify failed " + "page:%d, offs:%d\n", + pci_name(asd_ha->pcidev), + lseq, page, i); + return -1; + } + } + } + ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq, + (int)((u8 *)prog-_prog)); + return 0; +} + +/** + * asd_verify_seq -- verify CSEQ/LSEQ microcode + * @asd_ha: pointer to host adapter structure + * @prog: pointer to microcode + * @size: size of the microcode + * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest + * + * Return 0 if microcode is correct, negative on mismatch. + */ +static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog, + u32 size, u8 lseq_mask) +{ + if (lseq_mask == 0) + return asd_verify_cseq(asd_ha, prog, size); + else { + int lseq, err; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_verify_lseq(asd_ha, prog, size, lseq); + if (err) + return err; + } + } + + return 0; +} +#define ASD_DMA_MODE_DOWNLOAD +#ifdef ASD_DMA_MODE_DOWNLOAD +/* This is the size of the CSEQ Mapped instruction page */ +#define MAX_DMA_OVLY_COUNT ((1U << 14)-1) +static int asd_download_seq(struct asd_ha_struct *asd_ha, + const u8 * const prog, u32 size, u8 lseq_mask) +{ + u32 comstaten; + u32 reg; + int page; + const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT; + struct asd_dma_tok *token; + int err = 0; + + if (size % 4) { + asd_printk("sequencer program not multiple of 4\n"); + return -1; + } + + asd_pause_cseq(asd_ha); + asd_pause_lseq(asd_ha, 0xFF); + + /* save, disable and clear interrupts */ + comstaten = asd_read_reg_dword(asd_ha, COMSTATEN); + asd_write_reg_dword(asd_ha, COMSTATEN, 0); + asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK); + + asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); + asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK); + + token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL); + if (!token) { + asd_printk("out of memory for dma SEQ download\n"); + err = -ENOMEM; + goto out; + } + ASD_DPRINTK("dma-ing %d bytes\n", size); + + for (page = 0; page < pages; page++) { + int i; + u32 left = min(size-page*MAX_DMA_OVLY_COUNT, + (u32)MAX_DMA_OVLY_COUNT); + + memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left); + asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle); + asd_write_reg_dword(asd_ha, OVLYDMACNT, left); + reg = !page ? RESETOVLYDMA : 0; + reg |= (STARTOVLYDMA | OVLYHALTERR); + reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); + /* Start DMA. */ + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + for (i = PAUSE_TRIES*100; i > 0; i--) { + u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL); + if (!(dmadone & OVLYDMAACT)) + break; + udelay(PAUSE_DELAY); + } + } + + reg = asd_read_reg_dword(asd_ha, COMSTAT); + if (!(reg & OVLYDMADONE) || (reg & OVLYERR) + || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){ + asd_printk("%s: error DMA-ing sequencer code\n", + pci_name(asd_ha->pcidev)); + err = -ENODEV; + } + + asd_free_coherent(asd_ha, token); + out: + asd_write_reg_dword(asd_ha, COMSTATEN, comstaten); + + return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask); +} +#else /* ASD_DMA_MODE_DOWNLOAD */ +static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size, u8 lseq_mask) +{ + int i; + u32 reg = 0; + const u32 *prog = (u32 *) _prog; + + if (size % 4) { + asd_printk("sequencer program not multiple of 4\n"); + return -1; + } + + asd_pause_cseq(asd_ha); + asd_pause_lseq(asd_ha, 0xFF); + + reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); + reg |= PIOCMODE; + + asd_write_reg_dword(asd_ha, OVLYDMACNT, size); + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n", + lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : ""); + + for (i = 0; i < size; i += 4, prog++) + asd_write_reg_dword(asd_ha, SPIODATA, *prog); + + reg = (reg & ~PIOCMODE) | OVLYHALTERR; + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + return asd_verify_seq(asd_ha, _prog, size, lseq_mask); +} +#endif /* ASD_DMA_MODE_DOWNLOAD */ + +/** + * asd_seq_download_seqs - download the sequencer microcode + * @asd_ha: pointer to host adapter structure + * + * Download the central and link sequencer microcode. + */ +static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + + if (!asd_ha->hw_prof.enabled_phys) { + asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev)); + return -ENODEV; + } + + /* Download the CSEQ */ + ASD_DPRINTK("downloading CSEQ...\n"); + err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0); + if (err) { + asd_printk("CSEQ download failed:%d\n", err); + return err; + } + + /* Download the Link Sequencers code. All of the Link Sequencers + * microcode can be downloaded at the same time. + */ + ASD_DPRINTK("downloading LSEQs...\n"); + err = asd_download_seq(asd_ha, lseq_code, lseq_code_size, + asd_ha->hw_prof.enabled_phys); + if (err) { + /* Try it one at a time */ + u8 lseq; + u8 lseq_mask = asd_ha->hw_prof.enabled_phys; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_download_seq(asd_ha, lseq_code, + lseq_code_size, 1<> 8; + asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx); + } + asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD, + first_scb_site_no); + asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL, + last_scb_site_no); + asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF); + + /* CSEQ Mode independent, page 7 setup. */ + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0); + asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0); + asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0); + asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0); +} + +/** + * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha) +{ + int i; + int moffs; + + moffs = CSEQ_PAGE_SIZE * 2; + + /* CSEQ Mode dependent, modes 0-7, page 0 setup. */ + for (i = 0; i < 8; i++) { + asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF); + asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0); + } + + /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */ + + /* CSEQ Mode dependent, mode 8, page 0 setup. */ + asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0); + asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0); + asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0); + asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0); + asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0); + asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0); + asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0); + asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0); + asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE, + (u16)last_scb_site_no+1); + asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE, + (u16)asd_ha->hw_prof.max_ddbs); + + /* CSEQ Mode dependent, mode 8, page 1 setup. */ + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0); + + /* CSEQ Mode dependent, mode 8, page 2 setup. */ + /* Tell the sequencer the bus address of the first SCB. */ + asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER, + asd_ha->seq.next_scb.dma_handle); + ASD_DPRINTK("First SCB dma_handle: 0x%llx\n", + (unsigned long long)asd_ha->seq.next_scb.dma_handle); + + /* Tell the sequencer the first Done List entry address. */ + asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE, + asd_ha->seq.actual_dl->dma_handle); + + /* Initialize the Q_DONE_POINTER with the least significant + * 4 bytes of the first Done List address. */ + asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER, + ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle)); + + asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE); + + /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */ +} + +/** + * asd_init_cseq_scratch -- setup and init CSEQ + * @asd_ha: pointer to host adapter structure + * + * Setup and initialize Central sequencers. Initialize the mode + * independent and dependent scratch page to the default settings. + */ +static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha) +{ + asd_init_cseq_mip(asd_ha); + asd_init_cseq_mdp(asd_ha); +} + +/** + * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3 + * @asd_ha: pointer to host adapter structure + * @lseq: link sequencer + */ +static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq) +{ + int i; + + /* LSEQ Mode independent page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq); + asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq), + ASD_NOTIFY_ENABLE_SPINUP); + asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000); + asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0); + + /* LSEQ Mode independent page 1 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0); + + /* LSEQ Mode Independent page 2 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0); + for (i = 0; i < 12; i += 4) + asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0); + + /* LSEQ Mode Independent page 3 setup. */ + + /* Device present timer timeout */ + asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq), + ASD_DEV_PRESENT_TIMEOUT); + + /* SATA interlock timer disabled */ + asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq), + ASD_SATA_INTERLOCK_TIMEOUT); + + /* STP shutdown timer timeout constant, IGNORED by the sequencer, + * always 0. */ + asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq), + ASD_STP_SHUTDOWN_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq), + ASD_SRST_ASSERT_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq), + ASD_RCV_FIS_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq), + ASD_ONE_MILLISEC_TIMEOUT); + + /* COM_INIT timer */ + asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq), + ASD_TEN_MILLISEC_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq), + ASD_SMP_RCV_TIMEOUT); +} + +/** + * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages. + * @asd_ha: pointer to host adapter structure + * @lseq: link sequencer + */ +static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq) +{ + int i; + u32 moffs; + u16 ret_addr[] = { + 0xFFFF, /* mode 0 */ + 0xFFFF, /* mode 1 */ + mode2_task, /* mode 2 */ + 0, + 0xFFFF, /* mode 4/5 */ + 0xFFFF, /* mode 4/5 */ + }; + + /* + * Mode 0,1,2 and 4/5 have common field on page 0 for the first + * 14 bytes. + */ + for (i = 0; i < 3; i++) { + moffs = i * LSEQ_MODE_SCRATCH_SIZE; + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs, + ret_addr[i]); + asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0); + asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0); + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0); + asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0); + } + /* + * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3. + */ + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET, + ret_addr[5]); + asd_write_reg_word(asd_ha, + LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); + asd_write_reg_word(asd_ha, + LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); + asd_write_reg_byte(asd_ha, + LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); + asd_write_reg_word(asd_ha, + LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); + + /* LSEQ Mode dependent 0, page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq), + (u16)asd_ha->hw_prof.max_ddbs); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq), + (u16)last_scb_site_no+1); + asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq), + (u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16)); + asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2, + (u16) LmM0INTEN_MASK & 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0); + + /* LSEQ mode dependent, mode 1, page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0); + + /* LSEQ Mode dependent mode 2, page 0 setup */ + asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0); + + /* LSEQ Mode dependent, mode 4/5, page 0 setup. */ + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0); + /* + * Set the desired interval between transmissions of the NOTIFY + * (ENABLE SPINUP) primitive. Must be initialized to val - 1. + */ + asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq), + ASD_NOTIFY_TIMEOUT - 1); + /* No delay for the first NOTIFY to be sent to the attached target. */ + asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq), + ASD_NOTIFY_DOWN_COUNT); + asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq), + ASD_NOTIFY_DOWN_COUNT); + + /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */ + for (i = 0; i < 2; i++) { + int j; + /* Start from Page 1 of Mode 0 and 1. */ + moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE; + /* All the fields of page 1 can be initialized to 0. */ + for (j = 0; j < LSEQ_PAGE_SIZE; j += 4) + asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0); + } + + /* LSEQ Mode dependent, mode 2, page 1 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0); + + /* LSEQ Mode dependent, mode 4/5, page 1. */ + for (i = 0; i < LSEQ_PAGE_SIZE; i+=4) + asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0); + asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF); + asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF); + + /* LSEQ Mode dependent, mode 0, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0); + + /* LSEQ Mode Dependent 1, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0); + asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0); + + /* LSEQ Mode Dependent 2, page 2 setup. */ + /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer, + * i.e. always 0. */ + asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0); + asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0); + asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0); + + /* LSEQ Mode Dependent 4/5, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0); +} + +/** + * asd_init_lseq_scratch -- setup and init link sequencers + * @asd_ha: pointer to host adapter struct + */ +static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha) +{ + u8 lseq; + u8 lseq_mask; + + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + asd_init_lseq_mip(asd_ha, lseq); + asd_init_lseq_mdp(asd_ha, lseq); + } +} + +/** + * asd_init_scb_sites -- initialize sequencer SCB sites (memory). + * @asd_ha: pointer to host adapter structure + * + * This should be done before initializing common CSEQ and LSEQ + * scratch since those areas depend on some computed values here, + * last_scb_site_no, etc. + */ +static void asd_init_scb_sites(struct asd_ha_struct *asd_ha) +{ + u16 site_no; + u16 max_scbs = 0; + + for (site_no = asd_ha->hw_prof.max_scbs-1; + site_no != (u16) -1; + site_no--) { + u16 i; + + /* Initialize all fields in the SCB site to 0. */ + for (i = 0; i < ASD_SCB_SIZE; i += 4) + asd_scbsite_write_dword(asd_ha, site_no, i, 0); + + /* Initialize SCB Site Opcode field to invalid. */ + asd_scbsite_write_byte(asd_ha, site_no, + offsetof(struct scb_header, opcode), + 0xFF); + + /* Initialize SCB Site Flags field to mean a response + * frame has been received. This means inadvertent + * frames received to be dropped. */ + asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01); + + /* Workaround needed by SEQ to fix a SATA issue is to exclude + * certain SCB sites from the free list. */ + if (!SCB_SITE_VALID(site_no)) + continue; + + if (last_scb_site_no == 0) + last_scb_site_no = site_no; + + /* For every SCB site, we need to initialize the + * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS, + * and SG Element Flag. */ + + /* Q_NEXT field of the last SCB is invalidated. */ + asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); + + first_scb_site_no = site_no; + max_scbs++; + } + asd_ha->hw_prof.max_scbs = max_scbs; + ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs); + ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no); + ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no); +} + +/** + * asd_init_cseq_cio - initialize CSEQ CIO registers + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha) +{ + int i; + + asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0); + asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS); + asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0); + asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0); + asd_ha->seq.scbpro = 0; + asd_write_reg_dword(asd_ha, SCBPRO, 0); + asd_write_reg_dword(asd_ha, CSEQCON, 0); + + /* Initialize CSEQ Mode 11 Interrupt Vectors. + * The addresses are 16 bit wide and in dword units. + * The values of their macros are in byte units. + * Thus we have to divide by 4. */ + asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]); + asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]); + asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]); + + /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ + asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC); + + /* Initialize CSEQ Scratch Page to 0x04. */ + asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04); + + /* Initialize CSEQ Mode[0-8] Dependent registers. */ + /* Initialize Scratch Page to 0. */ + for (i = 0; i < 9; i++) + asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0); + + /* Reset the ARP2 Program Count. */ + asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); + + for (i = 0; i < 8; i++) { + /* Initialize Mode n Link m Interrupt Enable. */ + asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF); + /* Initialize Mode n Request Mailbox. */ + asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0); + } +} + +/** + * asd_init_lseq_cio -- initialize LmSEQ CIO registers + * @asd_ha: pointer to host adapter structure + * @lseq: link sequencer + */ +static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq) +{ + u8 *sas_addr; + int i; + + /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ + asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC); + + asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0); + + /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */ + for (i = 0; i < 3; i++) + asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0); + + /* Initialize Mode 5 SCRATCHPAGE to 0. */ + asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0); + + asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0); + /* Initialize Mode 0,1,2 and 5 Interrupt Enable and + * Interrupt registers. */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF); + /* Mode 1 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF); + /* Mode 2 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF); + /* Mode 5 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF); + + /* Enable HW Timer status. */ + asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK); + + /* Enable Primitive Status 0 and 1. */ + asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK); + asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK); + + /* Enable Frame Error. */ + asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK); + asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50); + + /* Initialize Mode 0 Transfer Level to 512. */ + asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512); + /* Initialize Mode 1 Transfer Level to 256. */ + asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256); + + /* Initialize Program Count. */ + asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); + + /* Enable Blind SG Move. */ + asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48); + asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq), + ASD_SATA_INTERLOCK_TIMEOUT); + + (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq)); + + /* Clear Primitive Status 0 and 1. */ + asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF); + asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF); + + /* Clear HW Timer status. */ + asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF); + + /* Clear DMA Errors for Mode 0 and 1. */ + asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF); + asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF); + + /* Clear SG DMA Errors for Mode 0 and 1. */ + asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF); + asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF); + + /* Clear Mode 0 Buffer Parity Error. */ + asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR); + + /* Clear Mode 0 Frame Error register. */ + asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF); + + /* Reset LSEQ external interrupt arbiter. */ + asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL); + + /* Set the Phy SAS for the LmSEQ WWN. */ + sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr; + for (i = 0; i < SAS_ADDR_SIZE; i++) + asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]); + + /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */ + asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0); + + /* Set the Bus Inactivity Time Limit Timer. */ + asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9); + + /* Enable SATA Port Multiplier. */ + asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80); + + /* Initialize Interrupt Vector[0-10] address in Mode 3. + * See the comment on CSEQ_INT_* */ + asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]); + asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]); + asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]); + asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]); + asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]); + asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]); + asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]); + asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]); + asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]); + asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]); + asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]); + /* + * Program the Link LED control, applicable only for + * Chip Rev. B or later. + */ + asd_write_reg_dword(asd_ha, LmCONTROL(lseq), + (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms)); + + /* Set the Align Rate for SAS and STP mode. */ + asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT); + asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT); +} + + +/** + * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox + * @asd_ha: pointer to host adapter struct + */ +static void asd_post_init_cseq(struct asd_ha_struct *asd_ha) +{ + int i; + + for (i = 0; i < 8; i++) + asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF); + for (i = 0; i < 8; i++) + asd_read_reg_dword(asd_ha, CMnRSPMBX(i)); + /* Reset the external interrupt arbiter. */ + asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL); +} + +/** + * asd_init_ddb_0 -- initialize DDB 0 + * @asd_ha: pointer to host adapter structure + * + * Initialize DDB site 0 which is used internally by the sequencer. + */ +static void asd_init_ddb_0(struct asd_ha_struct *asd_ha) +{ + int i; + + /* Zero out the DDB explicitly */ + for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4) + asd_ddbsite_write_dword(asd_ha, 0, i, 0); + + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail), + asd_ha->hw_prof.max_ddbs-1); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh), + asd_ha->hw_prof.num_phys * 2); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00); + /* DDB 0 is reserved */ + set_bit(0, asd_ha->hw_prof.ddb_bitmap); +} + +static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha) +{ + unsigned int i; + unsigned int ddb_site; + + for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++) + for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) + asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0); +} + +/** + * asd_seq_setup_seqs -- setup and initialize central and link sequencers + * @asd_ha: pointer to host adapter structure + */ +static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha) +{ + int lseq; + u8 lseq_mask; + + /* Initialize DDB sites */ + asd_seq_init_ddb_sites(asd_ha); + + /* Initialize SCB sites. Done first to compute some values which + * the rest of the init code depends on. */ + asd_init_scb_sites(asd_ha); + + /* Initialize CSEQ Scratch RAM registers. */ + asd_init_cseq_scratch(asd_ha); + + /* Initialize LmSEQ Scratch RAM registers. */ + asd_init_lseq_scratch(asd_ha); + + /* Initialize CSEQ CIO registers. */ + asd_init_cseq_cio(asd_ha); + + asd_init_ddb_0(asd_ha); + + /* Initialize LmSEQ CIO registers. */ + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) + asd_init_lseq_cio(asd_ha, lseq); + asd_post_init_cseq(asd_ha); +} + + +/** + * asd_seq_start_cseq -- start the central sequencer, CSEQ + * @asd_ha: pointer to host adapter structure + */ +static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha) +{ + /* Reset the ARP2 instruction to location zero. */ + asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); + + /* Unpause the CSEQ */ + return asd_unpause_cseq(asd_ha); +} + +/** + * asd_seq_start_lseq -- start a link sequencer + * @asd_ha: pointer to host adapter structure + * @lseq: the link sequencer of interest + */ +static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + /* Reset the ARP2 instruction to location zero. */ + asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); + + /* Unpause the LmSEQ */ + return asd_seq_unpause_lseq(asd_ha, lseq); +} + +int asd_release_firmware(void) +{ + release_firmware(sequencer_fw); + return 0; +} + +static int asd_request_firmware(struct asd_ha_struct *asd_ha) +{ + int err, i; + struct sequencer_file_header header; + const struct sequencer_file_header *hdr_ptr; + u32 csum = 0; + u16 *ptr_cseq_vecs, *ptr_lseq_vecs; + + if (sequencer_fw) + /* already loaded */ + return 0; + + err = request_firmware(&sequencer_fw, + SAS_RAZOR_SEQUENCER_FW_FILE, + &asd_ha->pcidev->dev); + if (err) + return err; + + hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data; + + header.csum = le32_to_cpu(hdr_ptr->csum); + header.major = le32_to_cpu(hdr_ptr->major); + header.minor = le32_to_cpu(hdr_ptr->minor); + header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset); + header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size); + header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset); + header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size); + header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset); + header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size); + header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset); + header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size); + header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task); + header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop); + header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop); + + for (i = sizeof(header.csum); i < sequencer_fw->size; i++) + csum += sequencer_fw->data[i]; + + if (csum != header.csum) { + asd_printk("Firmware file checksum mismatch\n"); + return -EINVAL; + } + + if (header.cseq_table_size != CSEQ_NUM_VECS || + header.lseq_table_size != LSEQ_NUM_VECS) { + asd_printk("Firmware file table size mismatch\n"); + return -EINVAL; + } + + asd_printk("Found sequencer Firmware version %d.%d (%s)\n", + header.major, header.minor, hdr_ptr->version); + + if (header.major != SAS_RAZOR_SEQUENCER_FW_MAJOR) { + asd_printk("Firmware Major Version Mismatch;" + "driver requires version %d.X", + SAS_RAZOR_SEQUENCER_FW_MAJOR); + return -EINVAL; + } + + ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset]; + ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset]; + mode2_task = header.mode2_task; + cseq_idle_loop = header.cseq_idle_loop; + lseq_idle_loop = header.lseq_idle_loop; + + for (i = 0; i < CSEQ_NUM_VECS; i++) + cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]); + + for (i = 0; i < LSEQ_NUM_VECS; i++) + lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]); + + cseq_code = &sequencer_fw->data[header.cseq_code_offset]; + cseq_code_size = header.cseq_code_size; + lseq_code = &sequencer_fw->data[header.lseq_code_offset]; + lseq_code_size = header.lseq_code_size; + + return 0; +} + +int asd_init_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_request_firmware(asd_ha); + + if (err) { + asd_printk("Failed to load sequencer firmware file %s, error %d\n", + SAS_RAZOR_SEQUENCER_FW_FILE, err); + return err; + } + + err = asd_seq_download_seqs(asd_ha); + if (err) { + asd_printk("couldn't download sequencers for %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + asd_seq_setup_seqs(asd_ha); + + return 0; +} + +int asd_start_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + u8 lseq_mask; + int lseq; + + err = asd_seq_start_cseq(asd_ha); + if (err) { + asd_printk("couldn't start CSEQ for %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_start_lseq(asd_ha, lseq); + if (err) { + asd_printk("couldn't start LSEQ %d for %s\n", lseq, + pci_name(asd_ha->pcidev)); + return err; + } + } + + return 0; +} + +/** + * asd_update_port_links -- update port_map_by_links and phy_is_up + * @asd_ha: pointer to host adapter structure + * @phy: pointer to the phy which has been added to a port + * + * 1) When a link reset has completed and we got BYTES DMAED with a + * valid frame we call this function for that phy, to indicate that + * the phy is up, i.e. we update the phy_is_up in DDB 0. The + * sequencer checks phy_is_up when pending SCBs are to be sent, and + * when an open address frame has been received. + * + * 2) When we know of ports, we call this function to update the map + * of phys participaing in that port, i.e. we update the + * port_map_by_links in DDB 0. When a HARD_RESET primitive has been + * received, the sequencer disables all phys in that port. + * port_map_by_links is also used as the conn_mask byte in the + * initiator/target port DDB. + */ +void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy) +{ + const u8 phy_mask = (u8) phy->asd_port->phy_mask; + u8 phy_is_up; + u8 mask; + int i, err; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); + for_each_phy(phy_mask, mask, i) + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, + port_map_by_links)+i,phy_mask); + + for (i = 0; i < 12; i++) { + phy_is_up = asd_ddbsite_read_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up)); + err = asd_ddbsite_update_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up), + phy_is_up, + phy_is_up | phy_mask); + if (!err) + break; + else if (err == -EFAULT) { + asd_printk("phy_is_up: parity error in DDB 0\n"); + break; + } + } + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); + + if (err) + asd_printk("couldn't update DDB 0:error:%d\n", err); +} + +MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE); diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h new file mode 100644 index 000000000..5bf9b8ae6 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_seq.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Aic94xx SAS/SATA driver sequencer interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#ifndef _AIC94XX_SEQ_H_ +#define _AIC94XX_SEQ_H_ + +#define CSEQ_NUM_VECS 3 +#define LSEQ_NUM_VECS 11 + +#define SAS_RAZOR_SEQUENCER_FW_FILE "aic94xx-seq.fw" +#define SAS_RAZOR_SEQUENCER_FW_MAJOR 1 + +/* Note: All quantites in the sequencer file are little endian */ +struct sequencer_file_header { + /* Checksum of the entire contents of the sequencer excluding + * these four bytes */ + u32 csum; + /* numeric major version */ + u32 major; + /* numeric minor version */ + u32 minor; + /* version string printed by driver */ + char version[16]; + u32 cseq_table_offset; + u32 cseq_table_size; + u32 lseq_table_offset; + u32 lseq_table_size; + u32 cseq_code_offset; + u32 cseq_code_size; + u32 lseq_code_offset; + u32 lseq_code_size; + u16 mode2_task; + u16 cseq_idle_loop; + u16 lseq_idle_loop; +} __attribute__((packed)); + +#ifdef __KERNEL__ +int asd_init_seqs(struct asd_ha_struct *asd_ha); +int asd_start_seqs(struct asd_ha_struct *asd_ha); +int asd_release_firmware(void); + +void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy); +#endif + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c new file mode 100644 index 000000000..4bfd03724 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -0,0 +1,612 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx SAS/SATA Tasks + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include "aic94xx.h" +#include "aic94xx_sas.h" +#include "aic94xx_hwi.h" + +static void asd_unbuild_ata_ascb(struct asd_ascb *a); +static void asd_unbuild_smp_ascb(struct asd_ascb *a); +static void asd_unbuild_ssp_ascb(struct asd_ascb *a); + +static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) +{ + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_ha->seq.can_queue += num; + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); +} + +/* DMA_... to our direction translation. + */ +static const u8 data_dir_flags[] = { + [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ + [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ + [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ + [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ +}; + +static int asd_map_scatterlist(struct sas_task *task, + struct sg_el *sg_arr, + gfp_t gfp_flags) +{ + struct asd_ascb *ascb = task->lldd_task; + struct asd_ha_struct *asd_ha = ascb->ha; + struct scatterlist *sc; + int num_sg, res; + + if (task->data_dir == DMA_NONE) + return 0; + + if (task->num_scatter == 0) { + void *p = task->scatter; + dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p, + task->total_xfer_len, + task->data_dir); + if (dma_mapping_error(&asd_ha->pcidev->dev, dma)) + return -ENOMEM; + + sg_arr[0].bus_addr = cpu_to_le64((u64)dma); + sg_arr[0].size = cpu_to_le32(task->total_xfer_len); + sg_arr[0].flags |= ASD_SG_EL_LIST_EOL; + return 0; + } + + /* STP tasks come from libata which has already mapped + * the SG list */ + if (sas_protocol_ata(task->task_proto)) + num_sg = task->num_scatter; + else + num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter, + task->num_scatter, task->data_dir); + if (num_sg == 0) + return -ENOMEM; + + if (num_sg > 3) { + int i; + + ascb->sg_arr = asd_alloc_coherent(asd_ha, + num_sg*sizeof(struct sg_el), + gfp_flags); + if (!ascb->sg_arr) { + res = -ENOMEM; + goto err_unmap; + } + for_each_sg(task->scatter, sc, num_sg, i) { + struct sg_el *sg = + &((struct sg_el *)ascb->sg_arr->vaddr)[i]; + sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); + sg->size = cpu_to_le32((u32)sg_dma_len(sc)); + if (i == num_sg-1) + sg->flags |= ASD_SG_EL_LIST_EOL; + } + + for_each_sg(task->scatter, sc, 2, i) { + sg_arr[i].bus_addr = + cpu_to_le64((u64)sg_dma_address(sc)); + sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); + } + sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr); + sg_arr[1].flags |= ASD_SG_EL_LIST_EOS; + + memset(&sg_arr[2], 0, sizeof(*sg_arr)); + sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); + } else { + int i; + for_each_sg(task->scatter, sc, num_sg, i) { + sg_arr[i].bus_addr = + cpu_to_le64((u64)sg_dma_address(sc)); + sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); + } + sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL; + } + + return 0; +err_unmap: + if (sas_protocol_ata(task->task_proto)) + dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter, + task->num_scatter, task->data_dir); + return res; +} + +static void asd_unmap_scatterlist(struct asd_ascb *ascb) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_task *task = ascb->uldd_task; + + if (task->data_dir == DMA_NONE) + return; + + if (task->num_scatter == 0) { + dma_addr_t dma = (dma_addr_t) + le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); + dma_unmap_single(&ascb->ha->pcidev->dev, dma, + task->total_xfer_len, task->data_dir); + return; + } + + asd_free_coherent(asd_ha, ascb->sg_arr); + if (task->task_proto != SAS_PROTOCOL_STP) + dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter, + task->num_scatter, task->data_dir); +} + +/* ---------- Task complete tasklet ---------- */ + +static void asd_get_response_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_task *task = ascb->uldd_task; + struct task_status_struct *ts = &task->task_status; + unsigned long flags; + struct tc_resp_sb_struct { + __le16 index_escb; + u8 len_lsb; + u8 flags; + } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; + +/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */ + int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; + struct asd_ascb *escb; + struct asd_dma_tok *edb; + void *r; + + spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); + escb = asd_tc_index_find(&asd_ha->seq, + (int)le16_to_cpu(resp_sb->index_escb)); + spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); + + if (!escb) { + ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); + return; + } + + ts->buf_valid_size = 0; + edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; + r = edb->vaddr; + if (task->task_proto == SAS_PROTOCOL_SSP) { + struct ssp_response_iu *iu = + r + 16 + sizeof(struct ssp_frame_hdr); + + ts->residual = le32_to_cpu(*(__le32 *)r); + + sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu); + } else { + struct ata_task_resp *resp = (void *) &ts->buf[0]; + + ts->residual = le32_to_cpu(*(__le32 *)r); + + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); + memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE); + ts->buf_valid_size = sizeof(*resp); + } + } + + asd_invalidate_edb(escb, edb_id); +} + +static void asd_task_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct sas_task *task = ascb->uldd_task; + struct task_status_struct *ts = &task->task_status; + unsigned long flags; + u8 opcode = dl->opcode; + + asd_can_dequeue(ascb->ha, 1); + +Again: + switch (opcode) { + case TC_NO_ERROR: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + break; + case TC_UNDERRUN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = le32_to_cpu(*(__le32 *)dl->status_block); + break; + case TC_OVERRUN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + case TC_SSP_RESP: + case TC_ATA_RESP: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + asd_get_response_tasklet(ascb, dl); + break; + case TF_OPEN_REJECT: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + if (dl->status_block[1] & 2) + ts->open_rej_reason = 1 + dl->status_block[2]; + else if (dl->status_block[1] & 1) + ts->open_rej_reason = (dl->status_block[2] >> 4)+10; + else + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case TF_OPEN_TO: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_TO; + break; + case TF_PHY_DOWN: + case TU_PHY_DOWN: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case TI_PHY_DOWN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case TI_BREAK: + case TI_PROTO_ERR: + case TI_NAK: + case TI_ACK_NAK_TO: + case TF_SMP_XMIT_RCV_ERR: + case TC_ATA_R_ERR_RECV: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case TF_BREAK: + case TU_BREAK: + case TU_ACK_NAK_TO: + case TF_SMPRSP_TO: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case TF_NAK_RECV: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case TA_I_T_NEXUS_LOSS: + opcode = dl->status_block[0]; + goto Again; + case TF_INV_CONN_HANDLE: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEVICE_UNKNOWN; + break; + case TF_REQUESTED_N_PENDING: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PENDING; + break; + case TC_TASK_CLEARED: + case TA_ON_REQ: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + + case TF_NO_SMP_CONN: + case TF_TMF_NO_CTX: + case TF_TMF_NO_TAG: + case TF_TMF_TAG_FREE: + case TF_TMF_TASK_DONE: + case TF_TMF_NO_CONN_HANDLE: + case TF_IRTT_TO: + case TF_IU_SHORT: + case TF_DATA_OFFS_ERR: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + + case TC_LINK_ADM_RESP: + case TC_CONTROL_PHY: + case TC_RESUME: + case TC_PARTIAL_SG_LIST: + default: + ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode); + break; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + asd_unbuild_ata_ascb(ascb); + break; + case SAS_PROTOCOL_SMP: + asd_unbuild_smp_ascb(ascb); + break; + case SAS_PROTOCOL_SSP: + asd_unbuild_ssp_ascb(ascb); + break; + default: + break; + } + + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { + struct completion *completion = ascb->completion; + spin_unlock_irqrestore(&task->task_state_lock, flags); + ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + task, opcode, ts->resp, ts->stat); + if (completion) + complete(completion); + } else { + spin_unlock_irqrestore(&task->task_state_lock, flags); + task->lldd_task = NULL; + asd_ascb_free(ascb); + mb(); + task->task_done(task); + } +} + +/* ---------- ATA ---------- */ + +static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, + gfp_t gfp_flags) +{ + struct domain_device *dev = task->dev; + struct scb *scb; + u8 flags; + int res = 0; + + scb = ascb->scb; + + if (unlikely(task->ata_task.device_control_reg_update)) + scb->header.opcode = CONTROL_ATA_DEV; + else if (dev->sata_dev.class == ATA_DEV_ATAPI) + scb->header.opcode = INITIATE_ATAPI_TASK; + else + scb->header.opcode = INITIATE_ATA_TASK; + + scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ + if (dev->port->oob_mode == SAS_OOB_MODE) + scb->ata_task.proto_conn_rate |= dev->linkrate; + + scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); + scb->ata_task.fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ + if (dev->sata_dev.class == ATA_DEV_ATAPI) + memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, + 16); + scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); + scb->ata_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)dev->lldd_dev); + + if (likely(!task->ata_task.device_control_reg_update)) { + flags = 0; + if (task->ata_task.dma_xfer) + flags |= DATA_XFER_MODE_DMA; + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) + flags |= ATA_Q_TYPE_NCQ; + flags |= data_dir_flags[task->data_dir]; + scb->ata_task.ata_flags = flags; + + scb->ata_task.retry_count = 0; + + scb->ata_task.flags = 0; + } + ascb->tasklet_complete = asd_task_tasklet_complete; + + if (likely(!task->ata_task.device_control_reg_update)) + res = asd_map_scatterlist(task, scb->ata_task.sg_element, + gfp_flags); + + return res; +} + +static void asd_unbuild_ata_ascb(struct asd_ascb *a) +{ + asd_unmap_scatterlist(a); +} + +/* ---------- SMP ---------- */ + +static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, + gfp_t gfp_flags) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct domain_device *dev = task->dev; + struct scb *scb; + + dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); + + scb = ascb->scb; + + scb->header.opcode = INITIATE_SMP_TASK; + + scb->smp_task.proto_conn_rate = dev->linkrate; + + scb->smp_task.smp_req.bus_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + scb->smp_task.smp_req.size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + + scb->smp_task.smp_resp.bus_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); + scb->smp_task.smp_resp.size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + + scb->smp_task.sister_scb = cpu_to_le16(0xFFFF); + scb->smp_task.conn_handle = cpu_to_le16((u16) + (unsigned long)dev->lldd_dev); + + ascb->tasklet_complete = asd_task_tasklet_complete; + + return 0; +} + +static void asd_unbuild_smp_ascb(struct asd_ascb *a) +{ + struct sas_task *task = a->uldd_task; + + BUG_ON(!task); + dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +} + +/* ---------- SSP ---------- */ + +static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, + gfp_t gfp_flags) +{ + struct domain_device *dev = task->dev; + struct scb *scb; + int res = 0; + + scb = ascb->scb; + + scb->header.opcode = INITIATE_SSP_TASK; + + scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */ + scb->ssp_task.proto_conn_rate |= dev->linkrate; + scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); + scb->ssp_task.ssp_frame.frame_type = SSP_DATA; + memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + memcpy(scb->ssp_task.ssp_frame.hashed_src_addr, + dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); + + memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); + scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + + scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF); + scb->ssp_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)dev->lldd_dev); + scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; + scb->ssp_task.retry_count = scb->ssp_task.retry_count; + + ascb->tasklet_complete = asd_task_tasklet_complete; + + res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); + + return res; +} + +static void asd_unbuild_ssp_ascb(struct asd_ascb *a) +{ + asd_unmap_scatterlist(a); +} + +/* ---------- Execute Task ---------- */ + +static int asd_can_queue(struct asd_ha_struct *asd_ha, int num) +{ + int res = 0; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + if ((asd_ha->seq.can_queue - num) < 0) + res = -SAS_QUEUE_FULL; + else + asd_ha->seq.can_queue -= num; + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return res; +} + +int asd_execute_task(struct sas_task *task, gfp_t gfp_flags) +{ + int res = 0; + LIST_HEAD(alist); + struct sas_task *t = task; + struct asd_ascb *ascb = NULL, *a; + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + + res = asd_can_queue(asd_ha, 1); + if (res) + return res; + + res = 1; + ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); + if (res) { + res = -ENOMEM; + goto out_err; + } + + __list_add(&alist, ascb->list.prev, &ascb->list); + list_for_each_entry(a, &alist, list) { + a->uldd_task = t; + t->lldd_task = a; + break; + } + list_for_each_entry(a, &alist, list) { + t = a->uldd_task; + a->uldd_timer = 1; + if (t->task_proto & SAS_PROTOCOL_STP) + t->task_proto = SAS_PROTOCOL_STP; + switch (t->task_proto) { + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + res = asd_build_ata_ascb(a, t, gfp_flags); + break; + case SAS_PROTOCOL_SMP: + res = asd_build_smp_ascb(a, t, gfp_flags); + break; + case SAS_PROTOCOL_SSP: + res = asd_build_ssp_ascb(a, t, gfp_flags); + break; + default: + asd_printk("unknown sas_task proto: 0x%x\n", + t->task_proto); + res = -ENOMEM; + break; + } + if (res) + goto out_err_unmap; + } + list_del_init(&alist); + + res = asd_post_ascb_list(asd_ha, ascb, 1); + if (unlikely(res)) { + a = NULL; + __list_add(&alist, ascb->list.prev, &ascb->list); + goto out_err_unmap; + } + + return 0; +out_err_unmap: + { + struct asd_ascb *b = a; + list_for_each_entry(a, &alist, list) { + if (a == b) + break; + t = a->uldd_task; + switch (t->task_proto) { + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + asd_unbuild_ata_ascb(a); + break; + case SAS_PROTOCOL_SMP: + asd_unbuild_smp_ascb(a); + break; + case SAS_PROTOCOL_SSP: + asd_unbuild_ssp_ascb(a); + break; + default: + break; + } + t->lldd_task = NULL; + } + } + list_del_init(&alist); +out_err: + if (ascb) + asd_ascb_free_list(ascb); + asd_can_dequeue(asd_ha, 1); + return res; +} diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c new file mode 100644 index 000000000..27d32b8c2 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -0,0 +1,686 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aic94xx Task Management Functions + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include "aic94xx.h" +#include "aic94xx_sas.h" +#include "aic94xx_hwi.h" + +/* ---------- Internal enqueue ---------- */ + +static int asd_enqueue_internal(struct asd_ascb *ascb, + void (*tasklet_complete)(struct asd_ascb *, + struct done_list_struct *), + void (*timed_out)(struct timer_list *t)) +{ + int res; + + ascb->tasklet_complete = tasklet_complete; + ascb->uldd_timer = 1; + + ascb->timer.function = timed_out; + ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; + + add_timer(&ascb->timer); + + res = asd_post_ascb_list(ascb->ha, ascb, 1); + if (unlikely(res)) + del_timer(&ascb->timer); + return res; +} + +/* ---------- CLEAR NEXUS ---------- */ + +struct tasklet_completion_status { + int dl_opcode; + int tmf_state; + u8 tag_valid:1; + __be16 tag; +}; + +#define DECLARE_TCS(tcs) \ + struct tasklet_completion_status tcs = { \ + .dl_opcode = 0, \ + .tmf_state = 0, \ + .tag_valid = 0, \ + .tag = 0, \ + } + + +static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct tasklet_completion_status *tcs = ascb->uldd_task; + ASD_DPRINTK("%s: here\n", __func__); + if (!del_timer(&ascb->timer)) { + ASD_DPRINTK("%s: couldn't delete timer\n", __func__); + return; + } + ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode); + tcs->dl_opcode = dl->opcode; + complete(ascb->completion); + asd_ascb_free(ascb); +} + +static void asd_clear_nexus_timedout(struct timer_list *t) +{ + struct asd_ascb *ascb = from_timer(ascb, t, timer); + struct tasklet_completion_status *tcs = ascb->uldd_task; + + ASD_DPRINTK("%s: here\n", __func__); + tcs->dl_opcode = TMF_RESP_FUNC_FAILED; + complete(ascb->completion); +} + +#define CLEAR_NEXUS_PRE \ + struct asd_ascb *ascb; \ + struct scb *scb; \ + int res; \ + DECLARE_COMPLETION_ONSTACK(completion); \ + DECLARE_TCS(tcs); \ + \ + ASD_DPRINTK("%s: PRE\n", __func__); \ + res = 1; \ + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ + if (!ascb) \ + return -ENOMEM; \ + \ + ascb->completion = &completion; \ + ascb->uldd_task = &tcs; \ + scb = ascb->scb; \ + scb->header.opcode = CLEAR_NEXUS + +#define CLEAR_NEXUS_POST \ + ASD_DPRINTK("%s: POST\n", __func__); \ + res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \ + asd_clear_nexus_timedout); \ + if (res) \ + goto out_err; \ + ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \ + wait_for_completion(&completion); \ + res = tcs.dl_opcode; \ + if (res == TC_NO_ERROR) \ + res = TMF_RESP_FUNC_COMPLETE; \ + return res; \ +out_err: \ + asd_ascb_free(ascb); \ + return res + +int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) +{ + struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_ADAPTER; + CLEAR_NEXUS_POST; +} + +int asd_clear_nexus_port(struct asd_sas_port *port) +{ + struct asd_ha_struct *asd_ha = port->ha->lldd_ha; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_PORT; + scb->clear_nexus.conn_mask = port->phy_mask; + CLEAR_NEXUS_POST; +} + +enum clear_nexus_phase { + NEXUS_PHASE_PRE, + NEXUS_PHASE_POST, + NEXUS_PHASE_RESUME, +}; + +static int asd_clear_nexus_I_T(struct domain_device *dev, + enum clear_nexus_phase phase) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_I_T; + switch (phase) { + case NEXUS_PHASE_PRE: + scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX; + break; + case NEXUS_PHASE_POST: + scb->clear_nexus.flags = SEND_Q | NOTINQ; + break; + case NEXUS_PHASE_RESUME: + scb->clear_nexus.flags = RESUME_TX; + } + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + CLEAR_NEXUS_POST; +} + +int asd_I_T_nexus_reset(struct domain_device *dev) +{ + int res, tmp_res, i; + struct sas_phy *phy = sas_get_local_phy(dev); + /* Standard mandates link reset for ATA (type 0) and + * hard reset for SSP (type 1) */ + int reset_type = (dev->dev_type == SAS_SATA_DEV || + (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + + asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); + /* send a hard reset */ + ASD_DPRINTK("sending %s reset to %s\n", + reset_type ? "hard" : "soft", dev_name(&phy->dev)); + res = sas_phy_reset(phy, reset_type); + if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) { + /* wait for the maximum settle time */ + msleep(500); + /* clear all outstanding commands (keep nexus suspended) */ + asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST); + } + for (i = 0 ; i < 3; i++) { + tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME); + if (tmp_res == TC_RESUME) + goto out; + msleep(500); + } + + /* This is a bit of a problem: the sequencer is still suspended + * and is refusing to resume. Hope it will resume on a bigger hammer + * or the disk is lost */ + dev_printk(KERN_ERR, &phy->dev, + "Failed to resume nexus after reset 0x%x\n", tmp_res); + + res = TMF_RESP_FUNC_FAILED; + out: + sas_put_local_phy(phy); + return res; +} + +static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_I_T_L; + scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; + memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + CLEAR_NEXUS_POST; +} + +static int asd_clear_nexus_tag(struct sas_task *task) +{ + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + struct asd_ascb *tascb = task->lldd_task; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_TAG; + memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8); + scb->clear_nexus.ssp_task.tag = tascb->tag; + if (task->dev->tproto) + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + task->dev->lldd_dev); + CLEAR_NEXUS_POST; +} + +static int asd_clear_nexus_index(struct sas_task *task) +{ + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + struct asd_ascb *tascb = task->lldd_task; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_TRANS_CX; + if (task->dev->tproto) + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + task->dev->lldd_dev); + scb->clear_nexus.index = cpu_to_le16(tascb->tc_index); + CLEAR_NEXUS_POST; +} + +/* ---------- TMFs ---------- */ + +static void asd_tmf_timedout(struct timer_list *t) +{ + struct asd_ascb *ascb = from_timer(ascb, t, timer); + struct tasklet_completion_status *tcs = ascb->uldd_task; + + ASD_DPRINTK("tmf timed out\n"); + tcs->tmf_state = TMF_RESP_FUNC_FAILED; + complete(ascb->completion); +} + +static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + unsigned long flags; + struct tc_resp_sb_struct { + __le16 index_escb; + u8 len_lsb; + u8 flags; + } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; + + int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; + struct asd_ascb *escb; + struct asd_dma_tok *edb; + struct ssp_frame_hdr *fh; + struct ssp_response_iu *ru; + int res = TMF_RESP_FUNC_FAILED; + + ASD_DPRINTK("tmf resp tasklet\n"); + + spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); + escb = asd_tc_index_find(&asd_ha->seq, + (int)le16_to_cpu(resp_sb->index_escb)); + spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); + + if (!escb) { + ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); + return res; + } + + edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; + ascb->tag = *(__be16 *)(edb->vaddr+4); + fh = edb->vaddr + 16; + ru = edb->vaddr + 16 + sizeof(*fh); + res = ru->status; + if (ru->datapres == SAS_DATAPRES_RESPONSE_DATA) + res = ru->resp_data[3]; +#if 0 + ascb->tag = fh->tag; +#endif + ascb->tag_valid = 1; + + asd_invalidate_edb(escb, edb_id); + return res; +} + +static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct tasklet_completion_status *tcs; + + if (!del_timer(&ascb->timer)) + return; + + tcs = ascb->uldd_task; + ASD_DPRINTK("tmf tasklet complete\n"); + + tcs->dl_opcode = dl->opcode; + + if (dl->opcode == TC_SSP_RESP) { + tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl); + tcs->tag_valid = ascb->tag_valid; + tcs->tag = ascb->tag; + } + + complete(ascb->completion); + asd_ascb_free(ascb); +} + +static int asd_clear_nexus(struct sas_task *task) +{ + int res = TMF_RESP_FUNC_FAILED; + int leftover; + struct asd_ascb *tascb = task->lldd_task; + DECLARE_COMPLETION_ONSTACK(completion); + unsigned long flags; + + tascb->completion = &completion; + + ASD_DPRINTK("task not done, clearing nexus\n"); + if (tascb->tag_valid) + res = asd_clear_nexus_tag(task); + else + res = asd_clear_nexus_index(task); + leftover = wait_for_completion_timeout(&completion, + AIC94XX_SCB_TIMEOUT); + tascb->completion = NULL; + ASD_DPRINTK("came back from clear nexus\n"); + spin_lock_irqsave(&task->task_state_lock, flags); + if (leftover < 1) + res = TMF_RESP_FUNC_FAILED; + if (task->task_state_flags & SAS_TASK_STATE_DONE) + res = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + return res; +} + +/** + * asd_abort_task -- ABORT TASK TMF + * @task: the task to be aborted + * + * Before calling ABORT TASK the task state flags should be ORed with + * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under + * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called. + * + * Implements the ABORT TASK TMF, I_T_L_Q nexus. + * Returns: SAS TMF responses (see sas_task.h), + * -ENOMEM, + * -SAS_QUEUE_FULL. + * + * When ABORT TASK returns, the caller of ABORT TASK checks first the + * task->task_state_flags, and then the return value of ABORT TASK. + * + * If the task has task state bit SAS_TASK_STATE_DONE set, then the + * task was completed successfully prior to it being aborted. The + * caller of ABORT TASK has responsibility to call task->task_done() + * xor free the task, depending on their framework. The return code + * is TMF_RESP_FUNC_FAILED in this case. + * + * Else the SAS_TASK_STATE_DONE bit is not set, + * If the return code is TMF_RESP_FUNC_COMPLETE, then + * the task was aborted successfully. The caller of + * ABORT TASK has responsibility to call task->task_done() + * to finish the task, xor free the task depending on their + * framework. + * else + * the ABORT TASK returned some kind of error. The task + * was _not_ cancelled. Nothing can be assumed. + * The caller of ABORT TASK may wish to retry. + */ +int asd_abort_task(struct sas_task *task) +{ + struct asd_ascb *tascb = task->lldd_task; + struct asd_ha_struct *asd_ha = tascb->ha; + int res = 1; + unsigned long flags; + struct asd_ascb *ascb = NULL; + struct scb *scb; + int leftover; + DECLARE_TCS(tcs); + DECLARE_COMPLETION_ONSTACK(completion); + DECLARE_COMPLETION_ONSTACK(tascb_completion); + + tascb->completion = &tascb_completion; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + res = TMF_RESP_FUNC_COMPLETE; + ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + + ascb->uldd_task = &tcs; + ascb->completion = &completion; + scb = ascb->scb; + scb->header.opcode = SCB_ABORT_TASK; + + switch (task->task_proto) { + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + scb->abort_task.proto_conn_rate = (1 << 5); /* STP */ + break; + case SAS_PROTOCOL_SSP: + scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */ + scb->abort_task.proto_conn_rate |= task->dev->linkrate; + break; + case SAS_PROTOCOL_SMP: + break; + default: + break; + } + + if (task->task_proto == SAS_PROTOCOL_SSP) { + scb->abort_task.ssp_frame.frame_type = SSP_TASK; + memcpy(scb->abort_task.ssp_frame.hashed_dest_addr, + task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + memcpy(scb->abort_task.ssp_frame.hashed_src_addr, + task->dev->port->ha->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); + + memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8); + scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK; + scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF); + } + + scb->abort_task.sister_scb = cpu_to_le16(0xFFFF); + scb->abort_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)task->dev->lldd_dev); + scb->abort_task.retry_count = 1; + scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index); + scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); + + res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, + asd_tmf_timedout); + if (res) + goto out_free; + wait_for_completion(&completion); + ASD_DPRINTK("tmf came back\n"); + + tascb->tag = tcs.tag; + tascb->tag_valid = tcs.tag_valid; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + res = TMF_RESP_FUNC_COMPLETE; + ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (tcs.dl_opcode == TC_SSP_RESP) { + /* The task to be aborted has been sent to the device. + * We got a Response IU for the ABORT TASK TMF. */ + if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE) + res = asd_clear_nexus(task); + else + res = tcs.tmf_state; + } else if (tcs.dl_opcode == TC_NO_ERROR && + tcs.tmf_state == TMF_RESP_FUNC_FAILED) { + /* timeout */ + res = TMF_RESP_FUNC_FAILED; + } else { + /* In the following we assume that the managing layer + * will _never_ make a mistake, when issuing ABORT + * TASK. + */ + switch (tcs.dl_opcode) { + default: + res = asd_clear_nexus(task); + fallthrough; + case TC_NO_ERROR: + break; + /* The task hasn't been sent to the device xor + * we never got a (sane) Response IU for the + * ABORT TASK TMF. + */ + case TF_NAK_RECV: + res = TMF_RESP_INVALID_FRAME; + break; + case TF_TMF_TASK_DONE: /* done but not reported yet */ + res = TMF_RESP_FUNC_FAILED; + leftover = + wait_for_completion_timeout(&tascb_completion, + AIC94XX_SCB_TIMEOUT); + spin_lock_irqsave(&task->task_state_lock, flags); + if (leftover < 1) + res = TMF_RESP_FUNC_FAILED; + if (task->task_state_flags & SAS_TASK_STATE_DONE) + res = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + break; + case TF_TMF_NO_TAG: + case TF_TMF_TAG_FREE: /* the tag is in the free list */ + case TF_TMF_NO_CONN_HANDLE: /* no such device */ + res = TMF_RESP_FUNC_COMPLETE; + break; + case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ + res = TMF_RESP_FUNC_ESUPP; + break; + } + } + out_done: + tascb->completion = NULL; + if (res == TMF_RESP_FUNC_COMPLETE) { + task->lldd_task = NULL; + mb(); + asd_ascb_free(tascb); + } + ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); + return res; + + out_free: + asd_ascb_free(ascb); + ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); + return res; +} + +/** + * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus + * @dev: pointer to struct domain_device of interest + * @lun: pointer to u8[8] which is the LUN + * @tmf: the TMF to be performed (see sas_task.h or the SAS spec) + * @index: the transaction context of the task to be queried if QT TMF + * + * This function is used to send ABORT TASK SET, CLEAR ACA, + * CLEAR TASK SET, LU RESET and QUERY TASK TMFs. + * + * No SCBs should be queued to the I_T_L nexus when this SCB is + * pending. + * + * Returns: TMF response code (see sas_task.h or the SAS spec) + */ +static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, + int tmf, int index) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct asd_ascb *ascb; + int res = 1; + struct scb *scb; + DECLARE_COMPLETION_ONSTACK(completion); + DECLARE_TCS(tcs); + + if (!(dev->tproto & SAS_PROTOCOL_SSP)) + return TMF_RESP_FUNC_ESUPP; + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + + ascb->completion = &completion; + ascb->uldd_task = &tcs; + scb = ascb->scb; + + if (tmf == TMF_QUERY_TASK) + scb->header.opcode = QUERY_SSP_TASK; + else + scb->header.opcode = INITIATE_SSP_TMF; + + scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */ + scb->ssp_tmf.proto_conn_rate |= dev->linkrate; + /* SSP frame header */ + scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK; + memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr, + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr, + dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF); + /* SSP Task IU */ + memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8); + scb->ssp_tmf.ssp_task.tmf = tmf; + + scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF); + scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + scb->ssp_tmf.retry_count = 1; + scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); + if (tmf == TMF_QUERY_TASK) + scb->ssp_tmf.index = cpu_to_le16(index); + + res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, + asd_tmf_timedout); + if (res) + goto out_err; + wait_for_completion(&completion); + + switch (tcs.dl_opcode) { + case TC_NO_ERROR: + res = TMF_RESP_FUNC_COMPLETE; + break; + case TF_NAK_RECV: + res = TMF_RESP_INVALID_FRAME; + break; + case TF_TMF_TASK_DONE: + res = TMF_RESP_FUNC_FAILED; + break; + case TF_TMF_NO_TAG: + case TF_TMF_TAG_FREE: /* the tag is in the free list */ + case TF_TMF_NO_CONN_HANDLE: /* no such device */ + res = TMF_RESP_FUNC_COMPLETE; + break; + case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ + res = TMF_RESP_FUNC_ESUPP; + break; + default: + /* Allow TMF response codes to propagate upwards */ + res = tcs.dl_opcode; + break; + } + return res; +out_err: + asd_ascb_free(ascb); + return res; +} + +int asd_abort_task_set(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_clear_task_set(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_lu_reset(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +/** + * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus + * @task: pointer to sas_task struct of interest + * + * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set, + * or TMF_RESP_FUNC_SUCC if the task is in the task set. + * + * Normally the management layer sets the task to aborted state, + * and then calls query task and then abort task. + */ +int asd_query_task(struct sas_task *task) +{ + struct asd_ascb *ascb = task->lldd_task; + int index; + + if (ascb) { + index = ascb->tc_index; + return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN, + TMF_QUERY_TASK, index); + } + return TMF_RESP_FUNC_COMPLETE; +} diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c new file mode 100644 index 000000000..fbb29dbb1 --- /dev/null +++ b/drivers/scsi/am53c974.c @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD am53c974 driver. + * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "esp_scsi.h" + +#define DRV_MODULE_NAME "am53c974" +#define DRV_MODULE_VERSION "1.00" + +static bool am53c974_debug; +static bool am53c974_fenab = true; + +#define esp_dma_log(f, a...) \ + do { \ + if (am53c974_debug) \ + shost_printk(KERN_DEBUG, esp->host, f, ##a); \ + } while (0) + +#define ESP_DMA_CMD 0x10 +#define ESP_DMA_STC 0x11 +#define ESP_DMA_SPA 0x12 +#define ESP_DMA_WBC 0x13 +#define ESP_DMA_WAC 0x14 +#define ESP_DMA_STATUS 0x15 +#define ESP_DMA_SMDLA 0x16 +#define ESP_DMA_WMAC 0x17 + +#define ESP_DMA_CMD_IDLE 0x00 +#define ESP_DMA_CMD_BLAST 0x01 +#define ESP_DMA_CMD_ABORT 0x02 +#define ESP_DMA_CMD_START 0x03 +#define ESP_DMA_CMD_MASK 0x03 +#define ESP_DMA_CMD_DIAG 0x04 +#define ESP_DMA_CMD_MDL 0x10 +#define ESP_DMA_CMD_INTE_P 0x20 +#define ESP_DMA_CMD_INTE_D 0x40 +#define ESP_DMA_CMD_DIR 0x80 + +#define ESP_DMA_STAT_PWDN 0x01 +#define ESP_DMA_STAT_ERROR 0x02 +#define ESP_DMA_STAT_ABORT 0x04 +#define ESP_DMA_STAT_DONE 0x08 +#define ESP_DMA_STAT_SCSIINT 0x10 +#define ESP_DMA_STAT_BCMPLT 0x20 + +/* EEPROM is accessed with 16-bit values */ +#define DC390_EEPROM_READ 0x80 +#define DC390_EEPROM_LEN 0x40 + +/* + * DC390 EEPROM + * + * 8 * 4 bytes of per-device options + * followed by HBA specific options + */ + +/* Per-device options */ +#define DC390_EE_MODE1 0x00 +#define DC390_EE_SPEED 0x01 + +/* HBA-specific options */ +#define DC390_EE_ADAPT_SCSI_ID 0x40 +#define DC390_EE_MODE2 0x41 +#define DC390_EE_DELAY 0x42 +#define DC390_EE_TAG_CMD_NUM 0x43 + +#define DC390_EE_MODE1_PARITY_CHK 0x01 +#define DC390_EE_MODE1_SYNC_NEGO 0x02 +#define DC390_EE_MODE1_EN_DISC 0x04 +#define DC390_EE_MODE1_SEND_START 0x08 +#define DC390_EE_MODE1_TCQ 0x10 + +#define DC390_EE_MODE2_MORE_2DRV 0x01 +#define DC390_EE_MODE2_GREATER_1G 0x02 +#define DC390_EE_MODE2_RST_SCSI_BUS 0x04 +#define DC390_EE_MODE2_ACTIVE_NEGATION 0x08 +#define DC390_EE_MODE2_NO_SEEK 0x10 +#define DC390_EE_MODE2_LUN_CHECK 0x20 + +struct pci_esp_priv { + struct esp *esp; + u8 dma_status; +}; + +static void pci_esp_dma_drain(struct esp *esp); + +static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp) +{ + return dev_get_drvdata(esp->dev); +} + +static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg) +{ + iowrite8(val, esp->regs + (reg * 4UL)); +} + +static u8 pci_esp_read8(struct esp *esp, unsigned long reg) +{ + return ioread8(esp->regs + (reg * 4UL)); +} + +static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg) +{ + return iowrite32(val, esp->regs + (reg * 4UL)); +} + +static int pci_esp_irq_pending(struct esp *esp) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + + pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS); + esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status); + + if (pep->dma_status & (ESP_DMA_STAT_ERROR | + ESP_DMA_STAT_ABORT | + ESP_DMA_STAT_DONE | + ESP_DMA_STAT_SCSIINT)) + return 1; + + return 0; +} + +static void pci_esp_reset_dma(struct esp *esp) +{ + /* Nothing to do ? */ +} + +static void pci_esp_dma_drain(struct esp *esp) +{ + u8 resid; + int lim = 1000; + + + if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP || + (esp->sreg & ESP_STAT_PMASK) == ESP_DIP) + /* Data-In or Data-Out, nothing to be done */ + return; + + while (--lim > 0) { + resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES; + if (resid <= 1) + break; + cpu_relax(); + } + + /* + * When there is a residual BCMPLT will never be set + * (obviously). But we still have to issue the BLAST + * command, otherwise the data will not being transferred. + * But we'll never know when the BLAST operation is + * finished. So check for some time and give up eventually. + */ + lim = 1000; + pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD); + while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) { + if (--lim == 0) + break; + cpu_relax(); + } + pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD); + esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid); + /* BLAST residual handling is currently untested */ + if (WARN_ON_ONCE(resid == 1)) { + struct esp_cmd_entry *ent = esp->active_cmd; + + ent->flags |= ESP_CMD_FLAG_RESIDUAL; + } +} + +static void pci_esp_dma_invalidate(struct esp *esp) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + + esp_dma_log("invalidate DMA\n"); + + pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD); + pep->dma_status = 0; +} + +static int pci_esp_dma_error(struct esp *esp) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + + if (pep->dma_status & ESP_DMA_STAT_ERROR) { + u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD); + + if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START) + pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD); + + return 1; + } + if (pep->dma_status & ESP_DMA_STAT_ABORT) { + pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD); + pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD); + return 1; + } + return 0; +} + +static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, + u32 dma_count, int write, u8 cmd) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + u32 val = 0; + + BUG_ON(!(cmd & ESP_CMD_DMA)); + + pep->dma_status = 0; + + /* Set DMA engine to IDLE */ + if (write) + /* DMA write direction logic is inverted */ + val |= ESP_DMA_CMD_DIR; + pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD); + + pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + if (esp->config2 & ESP_CONFIG2_FENAB) + pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); + + pci_esp_write32(esp, esp_count, ESP_DMA_STC); + pci_esp_write32(esp, addr, ESP_DMA_SPA); + + esp_dma_log("start dma addr[%x] count[%d:%d]\n", + addr, esp_count, dma_count); + + scsi_esp_cmd(esp, cmd); + /* Send DMA Start command */ + pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD); +} + +static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) +{ + int dma_limit = 16; + u32 base, end; + + /* + * If CONFIG2_FENAB is set we can + * handle up to 24 bit addresses + */ + if (esp->config2 & ESP_CONFIG2_FENAB) + dma_limit = 24; + + if (dma_len > (1U << dma_limit)) + dma_len = (1U << dma_limit); + + /* + * Prevent crossing a 24-bit address boundary. + */ + base = dma_addr & ((1U << 24) - 1U); + end = base + dma_len; + if (end > (1U << 24)) + end = (1U <<24); + dma_len = end - base; + + return dma_len; +} + +static const struct esp_driver_ops pci_esp_ops = { + .esp_write8 = pci_esp_write8, + .esp_read8 = pci_esp_read8, + .irq_pending = pci_esp_irq_pending, + .reset_dma = pci_esp_reset_dma, + .dma_drain = pci_esp_dma_drain, + .dma_invalidate = pci_esp_dma_invalidate, + .send_dma_cmd = pci_esp_send_dma_cmd, + .dma_error = pci_esp_dma_error, + .dma_length_limit = pci_esp_dma_length_limit, +}; + +/* + * Read DC-390 eeprom + */ +static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd) +{ + u8 carry_flag = 1, j = 0x80, bval; + int i; + + for (i = 0; i < 9; i++) { + if (carry_flag) { + pci_write_config_byte(pdev, 0x80, 0x40); + bval = 0xc0; + } else + bval = 0x80; + + udelay(160); + pci_write_config_byte(pdev, 0x80, bval); + udelay(160); + pci_write_config_byte(pdev, 0x80, 0); + udelay(160); + + carry_flag = (cmd & j) ? 1 : 0; + j >>= 1; + } +} + +static u16 dc390_eeprom_get_data(struct pci_dev *pdev) +{ + int i; + u16 wval = 0; + u8 bval; + + for (i = 0; i < 16; i++) { + wval <<= 1; + + pci_write_config_byte(pdev, 0x80, 0x80); + udelay(160); + pci_write_config_byte(pdev, 0x80, 0x40); + udelay(160); + pci_read_config_byte(pdev, 0x00, &bval); + + if (bval == 0x22) + wval |= 1; + } + + return wval; +} + +static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr) +{ + u8 cmd = DC390_EEPROM_READ, i; + + for (i = 0; i < DC390_EEPROM_LEN; i++) { + pci_write_config_byte(pdev, 0xc0, 0); + udelay(160); + + dc390_eeprom_prepare_read(pdev, cmd++); + *ptr++ = dc390_eeprom_get_data(pdev); + + pci_write_config_byte(pdev, 0x80, 0); + pci_write_config_byte(pdev, 0x80, 0); + udelay(160); + } +} + +static void dc390_check_eeprom(struct esp *esp) +{ + struct pci_dev *pdev = to_pci_dev(esp->dev); + u8 EEbuf[128]; + u16 *ptr = (u16 *)EEbuf, wval = 0; + int i; + + dc390_read_eeprom(pdev, ptr); + + for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++) + wval += *ptr; + + /* no Tekram EEprom found */ + if (wval != 0x1234) { + dev_printk(KERN_INFO, &pdev->dev, + "No valid Tekram EEprom found\n"); + return; + } + esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID]; + esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM]; + if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION) + esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE; +} + +static int pci_esp_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const struct scsi_host_template *hostt = &scsi_esp_template; + int err = -ENODEV; + struct Scsi_Host *shost; + struct esp *esp; + struct pci_esp_priv *pep; + + if (pci_enable_device(pdev)) { + dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n"); + return -ENODEV; + } + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + dev_printk(KERN_INFO, &pdev->dev, + "failed to set 32bit DMA mask\n"); + goto fail_disable_device; + } + + shost = scsi_host_alloc(hostt, sizeof(struct esp)); + if (!shost) { + dev_printk(KERN_INFO, &pdev->dev, + "failed to allocate scsi host\n"); + err = -ENOMEM; + goto fail_disable_device; + } + + pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL); + if (!pep) { + dev_printk(KERN_INFO, &pdev->dev, + "failed to allocate esp_priv\n"); + err = -ENOMEM; + goto fail_host_alloc; + } + + esp = shost_priv(shost); + esp->host = shost; + esp->dev = &pdev->dev; + esp->ops = &pci_esp_ops; + /* + * The am53c974 HBA has a design flaw of generating + * spurious DMA completion interrupts when using + * DMA for command submission. + */ + esp->flags |= ESP_FLAG_USE_FIFO; + /* + * Enable CONFIG2_FENAB to allow for large DMA transfers + */ + if (am53c974_fenab) + esp->config2 |= ESP_CONFIG2_FENAB; + + pep->esp = esp; + + if (pci_request_regions(pdev, DRV_MODULE_NAME)) { + dev_printk(KERN_ERR, &pdev->dev, + "pci memory selection failed\n"); + goto fail_priv_alloc; + } + + esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!esp->regs) { + dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n"); + err = -EINVAL; + goto fail_release_regions; + } + esp->dma_regs = esp->regs; + + pci_set_master(pdev); + + esp->command_block = dma_alloc_coherent(&pdev->dev, 16, + &esp->command_block_dma, GFP_KERNEL); + if (!esp->command_block) { + dev_printk(KERN_ERR, &pdev->dev, + "failed to allocate command block\n"); + err = -ENOMEM; + goto fail_unmap_regs; + } + + pci_set_drvdata(pdev, pep); + + err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED, + DRV_MODULE_NAME, esp); + if (err < 0) { + dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n"); + goto fail_unmap_command_block; + } + + esp->scsi_id = 7; + dc390_check_eeprom(esp); + + shost->this_id = esp->scsi_id; + shost->max_id = 8; + shost->irq = pdev->irq; + shost->io_port = pci_resource_start(pdev, 0); + shost->n_io_port = pci_resource_len(pdev, 0); + shost->unique_id = shost->io_port; + esp->scsi_id_mask = (1 << esp->scsi_id); + /* Assume 40MHz clock */ + esp->cfreq = 40000000; + + err = scsi_esp_register(esp); + if (err) + goto fail_free_irq; + + return 0; + +fail_free_irq: + free_irq(pdev->irq, esp); +fail_unmap_command_block: + pci_set_drvdata(pdev, NULL); + dma_free_coherent(&pdev->dev, 16, esp->command_block, + esp->command_block_dma); +fail_unmap_regs: + pci_iounmap(pdev, esp->regs); +fail_release_regions: + pci_release_regions(pdev); +fail_priv_alloc: + kfree(pep); +fail_host_alloc: + scsi_host_put(shost); +fail_disable_device: + pci_disable_device(pdev); + + return err; +} + +static void pci_esp_remove_one(struct pci_dev *pdev) +{ + struct pci_esp_priv *pep = pci_get_drvdata(pdev); + struct esp *esp = pep->esp; + + scsi_esp_unregister(esp); + free_irq(pdev->irq, esp); + pci_set_drvdata(pdev, NULL); + dma_free_coherent(&pdev->dev, 16, esp->command_block, + esp->command_block_dma); + pci_iounmap(pdev, esp->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(pep); + + scsi_host_put(esp->host); +} + +static struct pci_device_id am53c974_pci_tbl[] = { + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } +}; +MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl); + +static struct pci_driver am53c974_driver = { + .name = DRV_MODULE_NAME, + .id_table = am53c974_pci_tbl, + .probe = pci_esp_probe_one, + .remove = pci_esp_remove_one, +}; + +module_pci_driver(am53c974_driver); + +MODULE_DESCRIPTION("AM53C974 SCSI driver"); +MODULE_AUTHOR("Hannes Reinecke "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_ALIAS("tmscsim"); + +module_param(am53c974_debug, bool, 0644); +MODULE_PARM_DESC(am53c974_debug, "Enable debugging"); + +module_param(am53c974_fenab, bool, 0444); +MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes"); diff --git a/drivers/scsi/arcmsr/Makefile b/drivers/scsi/arcmsr/Makefile new file mode 100644 index 000000000..9051f66ca --- /dev/null +++ b/drivers/scsi/arcmsr/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# File: drivers/arcmsr/Makefile +# Makefile for the ARECA PCI-X PCI-EXPRESS SATA RAID controllers SCSI driver. + +arcmsr-objs := arcmsr_attr.o arcmsr_hba.o + +obj-$(CONFIG_SCSI_ARCMSR) := arcmsr.o diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h new file mode 100644 index 000000000..ed8d93198 --- /dev/null +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -0,0 +1,1047 @@ +/* +******************************************************************************* +** O.S : Linux +** FILE NAME : arcmsr.h +** BY : Nick Cheng +** Description: SCSI RAID Device Driver for +** ARECA RAID Host adapter +******************************************************************************* +** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved. +** +** Web site: www.areca.com.tw +** E-mail: support@areca.com.tw +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License version 2 as +** published by the Free Software Foundation. +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +******************************************************************************* +** Redistribution and use in source and binary forms, with or without +** modification, are permitted provided that the following conditions +** are met: +** 1. Redistributions of source code must retain the above copyright +** notice, this list of conditions and the following disclaimer. +** 2. Redistributions in binary form must reproduce the above copyright +** notice, this list of conditions and the following disclaimer in the +** documentation and/or other materials provided with the distribution. +** 3. The name of the author may not be used to endorse or promote products +** derived from this software without specific prior written permission. +** +** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT +** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY +** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +**(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF +** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +******************************************************************************* +*/ +#include +struct device_attribute; +/*The limit of outstanding scsi command that firmware can handle*/ +#define ARCMSR_NAME "arcmsr" +#define ARCMSR_MAX_FREECCB_NUM 1024 +#define ARCMSR_MAX_OUTSTANDING_CMD 1024 +#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128 +#define ARCMSR_MIN_OUTSTANDING_CMD 32 +#define ARCMSR_DRIVER_VERSION "v1.50.00.13-20230206" +#define ARCMSR_SCSI_INITIATOR_ID 255 +#define ARCMSR_MAX_XFER_SECTORS 512 +#define ARCMSR_MAX_XFER_SECTORS_B 4096 +#define ARCMSR_MAX_XFER_SECTORS_C 304 +#define ARCMSR_MAX_TARGETID 17 +#define ARCMSR_MAX_TARGETLUN 8 +#define ARCMSR_MAX_CMD_PERLUN 128 +#define ARCMSR_DEFAULT_CMD_PERLUN 32 +#define ARCMSR_MIN_CMD_PERLUN 1 +#define ARCMSR_MAX_QBUFFER 4096 +#define ARCMSR_DEFAULT_SG_ENTRIES 38 +#define ARCMSR_MAX_HBB_POSTQUEUE 264 +#define ARCMSR_MAX_ARC1214_POSTQUEUE 256 +#define ARCMSR_MAX_ARC1214_DONEQUEUE 257 +#define ARCMSR_MAX_HBE_DONEQUEUE 512 +#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ +#define ARCMSR_CDB_SG_PAGE_LENGTH 256 +#define ARCMST_NUM_MSIX_VECTORS 4 +#ifndef PCI_DEVICE_ID_ARECA_1880 +#define PCI_DEVICE_ID_ARECA_1880 0x1880 +#endif +#ifndef PCI_DEVICE_ID_ARECA_1214 +#define PCI_DEVICE_ID_ARECA_1214 0x1214 +#endif +#ifndef PCI_DEVICE_ID_ARECA_1203 +#define PCI_DEVICE_ID_ARECA_1203 0x1203 +#endif +#ifndef PCI_DEVICE_ID_ARECA_1884 +#define PCI_DEVICE_ID_ARECA_1884 0x1884 +#endif +#define PCI_DEVICE_ID_ARECA_1886 0x188A +#define ARCMSR_HOURS (1000 * 60 * 60 * 4) +#define ARCMSR_MINUTES (1000 * 60 * 60) +#define ARCMSR_DEFAULT_TIMEOUT 90 +/* +********************************************************************************** +** +********************************************************************************** +*/ +#define ARC_SUCCESS 0 +#define ARC_FAILURE 1 +/* +******************************************************************************* +** split 64bits dma addressing +******************************************************************************* +*/ +#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16) +#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff) +/* +******************************************************************************* +** MESSAGE CONTROL CODE +******************************************************************************* +*/ +struct CMD_MESSAGE +{ + uint32_t HeaderLength; + uint8_t Signature[8]; + uint32_t Timeout; + uint32_t ControlCode; + uint32_t ReturnCode; + uint32_t Length; +}; +/* +******************************************************************************* +** IOP Message Transfer Data for user space +******************************************************************************* +*/ +#define ARCMSR_API_DATA_BUFLEN 1032 +struct CMD_MESSAGE_FIELD +{ + struct CMD_MESSAGE cmdmessage; + uint8_t messagedatabuffer[ARCMSR_API_DATA_BUFLEN]; +}; +/* IOP message transfer */ +#define ARCMSR_MESSAGE_FAIL 0x0001 +/* DeviceType */ +#define ARECA_SATA_RAID 0x90000000 +/* FunctionCode */ +#define FUNCTION_READ_RQBUFFER 0x0801 +#define FUNCTION_WRITE_WQBUFFER 0x0802 +#define FUNCTION_CLEAR_RQBUFFER 0x0803 +#define FUNCTION_CLEAR_WQBUFFER 0x0804 +#define FUNCTION_CLEAR_ALLQBUFFER 0x0805 +#define FUNCTION_RETURN_CODE_3F 0x0806 +#define FUNCTION_SAY_HELLO 0x0807 +#define FUNCTION_SAY_GOODBYE 0x0808 +#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 +#define FUNCTION_GET_FIRMWARE_STATUS 0x080A +#define FUNCTION_HARDWARE_RESET 0x080B +/* ARECA IO CONTROL CODE*/ +#define ARCMSR_MESSAGE_READ_RQBUFFER \ + ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER +#define ARCMSR_MESSAGE_WRITE_WQBUFFER \ + ARECA_SATA_RAID | FUNCTION_WRITE_WQBUFFER +#define ARCMSR_MESSAGE_CLEAR_RQBUFFER \ + ARECA_SATA_RAID | FUNCTION_CLEAR_RQBUFFER +#define ARCMSR_MESSAGE_CLEAR_WQBUFFER \ + ARECA_SATA_RAID | FUNCTION_CLEAR_WQBUFFER +#define ARCMSR_MESSAGE_CLEAR_ALLQBUFFER \ + ARECA_SATA_RAID | FUNCTION_CLEAR_ALLQBUFFER +#define ARCMSR_MESSAGE_RETURN_CODE_3F \ + ARECA_SATA_RAID | FUNCTION_RETURN_CODE_3F +#define ARCMSR_MESSAGE_SAY_HELLO \ + ARECA_SATA_RAID | FUNCTION_SAY_HELLO +#define ARCMSR_MESSAGE_SAY_GOODBYE \ + ARECA_SATA_RAID | FUNCTION_SAY_GOODBYE +#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \ + ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE +/* ARECA IOCTL ReturnCode */ +#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 +#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 +#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F +#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088 +/* +************************************************************* +** structure for holding DMA address data +************************************************************* +*/ +#define IS_DMA64 (sizeof(dma_addr_t) == 8) +#define IS_SG64_ADDR 0x01000000 /* bit24 */ +struct SG32ENTRY +{ + __le32 length; + __le32 address; +}__attribute__ ((packed)); +struct SG64ENTRY +{ + __le32 length; + __le32 address; + __le32 addresshigh; +}__attribute__ ((packed)); +/* +******************************************************************** +** Q Buffer of IOP Message Transfer +******************************************************************** +*/ +struct QBUFFER +{ + uint32_t data_len; + uint8_t data[124]; +}; +/* +******************************************************************************* +** FIRMWARE INFO for Intel IOP R 80331 processor (Type A) +******************************************************************************* +*/ +struct FIRMWARE_INFO +{ + uint32_t signature; /*0, 00-03*/ + uint32_t request_len; /*1, 04-07*/ + uint32_t numbers_queue; /*2, 08-11*/ + uint32_t sdram_size; /*3, 12-15*/ + uint32_t ide_channels; /*4, 16-19*/ + char vendor[40]; /*5, 20-59*/ + char model[8]; /*15, 60-67*/ + char firmware_ver[16]; /*17, 68-83*/ + char device_map[16]; /*21, 84-99*/ + uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/ + uint8_t cfgSerial[16]; /*26,104-119*/ + uint32_t cfgPicStatus; /*30,120-123*/ +}; +/* signature of set and get firmware config */ +#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060 +#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063 +/* message code of inbound message register */ +#define ARCMSR_INBOUND_MESG0_NOP 0x00000000 +#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001 +#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002 +#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003 +#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004 +#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005 +#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006 +#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007 +#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008 +/* doorbell interrupt generator */ +#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001 +#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002 +#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001 +#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002 +/* ccb areca cdb flag */ +#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000 +#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000 +#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000 +#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000 +#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001 +/* outbound firmware ok */ +#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000 +/* ARC-1680 Bus Reset*/ +#define ARCMSR_ARC1680_BUS_RESET 0x00000003 +/* ARC-1880 Bus Reset*/ +#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024 +#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080 + +/* +************************************************************************ +** SPEC. for Areca Type B adapter +************************************************************************ +*/ +/* ARECA HBB COMMAND for its FIRMWARE */ +/* window of "instruction flags" from driver to iop */ +#define ARCMSR_DRV2IOP_DOORBELL 0x00020400 +#define ARCMSR_DRV2IOP_DOORBELL_MASK 0x00020404 +/* window of "instruction flags" from iop to driver */ +#define ARCMSR_IOP2DRV_DOORBELL 0x00020408 +#define ARCMSR_IOP2DRV_DOORBELL_MASK 0x0002040C +/* window of "instruction flags" from iop to driver */ +#define ARCMSR_IOP2DRV_DOORBELL_1203 0x00021870 +#define ARCMSR_IOP2DRV_DOORBELL_MASK_1203 0x00021874 +/* window of "instruction flags" from driver to iop */ +#define ARCMSR_DRV2IOP_DOORBELL_1203 0x00021878 +#define ARCMSR_DRV2IOP_DOORBELL_MASK_1203 0x0002187C +/* ARECA FLAG LANGUAGE */ +/* ioctl transfer */ +#define ARCMSR_IOP2DRV_DATA_WRITE_OK 0x00000001 +/* ioctl transfer */ +#define ARCMSR_IOP2DRV_DATA_READ_OK 0x00000002 +#define ARCMSR_IOP2DRV_CDB_DONE 0x00000004 +#define ARCMSR_IOP2DRV_MESSAGE_CMD_DONE 0x00000008 + +#define ARCMSR_DOORBELL_HANDLE_INT 0x0000000F +#define ARCMSR_DOORBELL_INT_CLEAR_PATTERN 0xFF00FFF0 +#define ARCMSR_MESSAGE_INT_CLEAR_PATTERN 0xFF00FFF7 +/* (ARCMSR_INBOUND_MESG0_GET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */ +#define ARCMSR_MESSAGE_GET_CONFIG 0x00010008 +/* (ARCMSR_INBOUND_MESG0_SET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */ +#define ARCMSR_MESSAGE_SET_CONFIG 0x00020008 +/* (ARCMSR_INBOUND_MESG0_ABORT_CMD<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */ +#define ARCMSR_MESSAGE_ABORT_CMD 0x00030008 +/* (ARCMSR_INBOUND_MESG0_STOP_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */ +#define ARCMSR_MESSAGE_STOP_BGRB 0x00040008 +/* (ARCMSR_INBOUND_MESG0_FLUSH_CACHE<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */ +#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008 +/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */ +#define ARCMSR_MESSAGE_START_BGRB 0x00060008 +#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008 +#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008 +#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008 +#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008 +/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */ +#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000 +/* ioctl transfer */ +#define ARCMSR_DRV2IOP_DATA_WRITE_OK 0x00000001 +/* ioctl transfer */ +#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002 +#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004 +#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008 +#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010 + +/* data tunnel buffer between user space program and its firmware */ +/* user space data to iop 128bytes */ +#define ARCMSR_MESSAGE_WBUFFER 0x0000fe00 +/* iop data to user space 128bytes */ +#define ARCMSR_MESSAGE_RBUFFER 0x0000ff00 +/* iop message_rwbuffer for message command */ +#define ARCMSR_MESSAGE_RWBUFFER 0x0000fa00 + +#define MEM_BASE0(x) (u32 __iomem *)((unsigned long)acb->mem_base0 + x) +#define MEM_BASE1(x) (u32 __iomem *)((unsigned long)acb->mem_base1 + x) +/* +************************************************************************ +** SPEC. for Areca HBC adapter +************************************************************************ +*/ +#define ARCMSR_HBC_ISR_THROTTLING_LEVEL 12 +#define ARCMSR_HBC_ISR_MAX_DONE_QUEUE 20 +/* Host Interrupt Mask */ +#define ARCMSR_HBCMU_UTILITY_A_ISR_MASK 0x00000001 /* When clear, the Utility_A interrupt routes to the host.*/ +#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK 0x00000004 /* When clear, the General Outbound Doorbell interrupt routes to the host.*/ +#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/ +#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */ +/* Host Interrupt Status */ +#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001 + /* + ** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register. + ** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled). + */ +#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004 + /* + ** Set if Outbound Doorbell register bits 30:1 have a non-zero + ** value. This bit clears only when Outbound Doorbell bits + ** 30:1 are ALL clear. Only a write to the Outbound Doorbell + ** Clear register clears bits in the Outbound Doorbell register. + */ +#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR 0x00000008 + /* + ** Set whenever the Outbound Post List Producer/Consumer + ** Register (FIFO) is not empty. It clears when the Outbound + ** Post List FIFO is empty. + */ +#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010 + /* + ** This bit indicates a SAS interrupt from a source external to + ** the PCIe core. This bit is not maskable. + */ + /* DoorBell*/ +#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002 +#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004 + /*inbound message 0 ready*/ +#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008 + /*more than 12 request completed in a time*/ +#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010 +#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002 + /*outbound DATA WRITE isr door bell clear*/ +#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002 +#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004 + /*outbound DATA READ isr door bell clear*/ +#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004 + /*outbound message 0 ready*/ +#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008 + /*outbound message cmd isr door bell clear*/ +#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008 + /*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/ +#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000 +/* +******************************************************************************* +** SPEC. for Areca Type D adapter +******************************************************************************* +*/ +#define ARCMSR_ARC1214_CHIP_ID 0x00004 +#define ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION 0x00008 +#define ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK 0x00034 +#define ARCMSR_ARC1214_SAMPLE_RESET 0x00100 +#define ARCMSR_ARC1214_RESET_REQUEST 0x00108 +#define ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS 0x00200 +#define ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE 0x0020C +#define ARCMSR_ARC1214_INBOUND_MESSAGE0 0x00400 +#define ARCMSR_ARC1214_INBOUND_MESSAGE1 0x00404 +#define ARCMSR_ARC1214_OUTBOUND_MESSAGE0 0x00420 +#define ARCMSR_ARC1214_OUTBOUND_MESSAGE1 0x00424 +#define ARCMSR_ARC1214_INBOUND_DOORBELL 0x00460 +#define ARCMSR_ARC1214_OUTBOUND_DOORBELL 0x00480 +#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE 0x00484 +#define ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW 0x01000 +#define ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH 0x01004 +#define ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER 0x01018 +#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW 0x01060 +#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH 0x01064 +#define ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER 0x0106C +#define ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER 0x01070 +#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE 0x01088 +#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE 0x0108C +#define ARCMSR_ARC1214_MESSAGE_WBUFFER 0x02000 +#define ARCMSR_ARC1214_MESSAGE_RBUFFER 0x02100 +#define ARCMSR_ARC1214_MESSAGE_RWBUFFER 0x02200 +/* Host Interrupt Mask */ +#define ARCMSR_ARC1214_ALL_INT_ENABLE 0x00001010 +#define ARCMSR_ARC1214_ALL_INT_DISABLE 0x00000000 +/* Host Interrupt Status */ +#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR 0x00001000 +#define ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR 0x00000010 +/* DoorBell*/ +#define ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY 0x00000001 +#define ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ 0x00000002 +/*inbound message 0 ready*/ +#define ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 0x00000001 +/*outbound DATA WRITE isr door bell clear*/ +#define ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 0x00000002 +/*outbound message 0 ready*/ +#define ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE 0x02000000 +/*outbound message cmd isr door bell clear*/ +/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/ +#define ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK 0x80000000 +#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001 +/* +******************************************************************************* +** SPEC. for Areca Type E adapter +******************************************************************************* +*/ +#define ARCMSR_SIGNATURE_1884 0x188417D3 + +#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002 +#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004 +#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008 + +#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002 +#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004 +#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008 + +#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000 + +#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001 +#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008 +#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009 + +/* ARC-1884 doorbell sync */ +#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100 +#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004 +#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080 + +/* +******************************************************************************* +** SPEC. for Areca Type F adapter +******************************************************************************* +*/ +#define ARCMSR_SIGNATURE_1886 0x188617D3 +// Doorbell and interrupt definition are same as Type E adapter +/* ARC-1886 doorbell sync */ +#define ARCMSR_HBFMU_DOORBELL_SYNC 0x100 +//set host rw buffer physical address at inbound message 0, 1 (low,high) +#define ARCMSR_HBFMU_DOORBELL_SYNC1 0x300 +#define ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK 0x80000000 +#define ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE 0x20000000 + +/* +******************************************************************************* +** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504) +******************************************************************************* +*/ +struct ARCMSR_CDB +{ + uint8_t Bus; + uint8_t TargetID; + uint8_t LUN; + uint8_t Function; + uint8_t CdbLength; + uint8_t sgcount; + uint8_t Flags; +#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01 +#define ARCMSR_CDB_FLAG_BIOS 0x02 +#define ARCMSR_CDB_FLAG_WRITE 0x04 +#define ARCMSR_CDB_FLAG_SIMPLEQ 0x00 +#define ARCMSR_CDB_FLAG_HEADQ 0x08 +#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10 + + uint8_t msgPages; + uint32_t msgContext; + uint32_t DataLength; + uint8_t Cdb[16]; + uint8_t DeviceStatus; +#define ARCMSR_DEV_CHECK_CONDITION 0x02 +#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0 +#define ARCMSR_DEV_ABORTED 0xF1 +#define ARCMSR_DEV_INIT_FAIL 0xF2 + + uint8_t SenseData[15]; + union + { + struct SG32ENTRY sg32entry[1]; + struct SG64ENTRY sg64entry[1]; + } u; +}; +/* +******************************************************************************* +** Messaging Unit (MU) of the Intel R 80331 I/O processor(Type A) and Type B processor +******************************************************************************* +*/ +struct MessageUnit_A +{ + uint32_t resrved0[4]; /*0000 000F*/ + uint32_t inbound_msgaddr0; /*0010 0013*/ + uint32_t inbound_msgaddr1; /*0014 0017*/ + uint32_t outbound_msgaddr0; /*0018 001B*/ + uint32_t outbound_msgaddr1; /*001C 001F*/ + uint32_t inbound_doorbell; /*0020 0023*/ + uint32_t inbound_intstatus; /*0024 0027*/ + uint32_t inbound_intmask; /*0028 002B*/ + uint32_t outbound_doorbell; /*002C 002F*/ + uint32_t outbound_intstatus; /*0030 0033*/ + uint32_t outbound_intmask; /*0034 0037*/ + uint32_t reserved1[2]; /*0038 003F*/ + uint32_t inbound_queueport; /*0040 0043*/ + uint32_t outbound_queueport; /*0044 0047*/ + uint32_t reserved2[2]; /*0048 004F*/ + uint32_t reserved3[492]; /*0050 07FF 492*/ + uint32_t reserved4[128]; /*0800 09FF 128*/ + uint32_t message_rwbuffer[256]; /*0a00 0DFF 256*/ + uint32_t message_wbuffer[32]; /*0E00 0E7F 32*/ + uint32_t reserved5[32]; /*0E80 0EFF 32*/ + uint32_t message_rbuffer[32]; /*0F00 0F7F 32*/ + uint32_t reserved6[32]; /*0F80 0FFF 32*/ +}; + +struct MessageUnit_B +{ + uint32_t post_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; + uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; + uint32_t postq_index; + uint32_t doneq_index; + uint32_t __iomem *drv2iop_doorbell; + uint32_t __iomem *drv2iop_doorbell_mask; + uint32_t __iomem *iop2drv_doorbell; + uint32_t __iomem *iop2drv_doorbell_mask; + uint32_t __iomem *message_rwbuffer; + uint32_t __iomem *message_wbuffer; + uint32_t __iomem *message_rbuffer; +}; +/* +********************************************************************* +** LSI +********************************************************************* +*/ +struct MessageUnit_C{ + uint32_t message_unit_status; /*0000 0003*/ + uint32_t slave_error_attribute; /*0004 0007*/ + uint32_t slave_error_address; /*0008 000B*/ + uint32_t posted_outbound_doorbell; /*000C 000F*/ + uint32_t master_error_attribute; /*0010 0013*/ + uint32_t master_error_address_low; /*0014 0017*/ + uint32_t master_error_address_high; /*0018 001B*/ + uint32_t hcb_size; /*001C 001F*/ + uint32_t inbound_doorbell; /*0020 0023*/ + uint32_t diagnostic_rw_data; /*0024 0027*/ + uint32_t diagnostic_rw_address_low; /*0028 002B*/ + uint32_t diagnostic_rw_address_high; /*002C 002F*/ + uint32_t host_int_status; /*0030 0033*/ + uint32_t host_int_mask; /*0034 0037*/ + uint32_t dcr_data; /*0038 003B*/ + uint32_t dcr_address; /*003C 003F*/ + uint32_t inbound_queueport; /*0040 0043*/ + uint32_t outbound_queueport; /*0044 0047*/ + uint32_t hcb_pci_address_low; /*0048 004B*/ + uint32_t hcb_pci_address_high; /*004C 004F*/ + uint32_t iop_int_status; /*0050 0053*/ + uint32_t iop_int_mask; /*0054 0057*/ + uint32_t iop_inbound_queue_port; /*0058 005B*/ + uint32_t iop_outbound_queue_port; /*005C 005F*/ + uint32_t inbound_free_list_index; /*0060 0063*/ + uint32_t inbound_post_list_index; /*0064 0067*/ + uint32_t outbound_free_list_index; /*0068 006B*/ + uint32_t outbound_post_list_index; /*006C 006F*/ + uint32_t inbound_doorbell_clear; /*0070 0073*/ + uint32_t i2o_message_unit_control; /*0074 0077*/ + uint32_t last_used_message_source_address_low; /*0078 007B*/ + uint32_t last_used_message_source_address_high; /*007C 007F*/ + uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/ + uint32_t message_dest_address_index; /*0090 0093*/ + uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/ + uint32_t utility_A_int_counter_timer; /*0098 009B*/ + uint32_t outbound_doorbell; /*009C 009F*/ + uint32_t outbound_doorbell_clear; /*00A0 00A3*/ + uint32_t message_source_address_index; /*00A4 00A7*/ + uint32_t message_done_queue_index; /*00A8 00AB*/ + uint32_t reserved0; /*00AC 00AF*/ + uint32_t inbound_msgaddr0; /*00B0 00B3*/ + uint32_t inbound_msgaddr1; /*00B4 00B7*/ + uint32_t outbound_msgaddr0; /*00B8 00BB*/ + uint32_t outbound_msgaddr1; /*00BC 00BF*/ + uint32_t inbound_queueport_low; /*00C0 00C3*/ + uint32_t inbound_queueport_high; /*00C4 00C7*/ + uint32_t outbound_queueport_low; /*00C8 00CB*/ + uint32_t outbound_queueport_high; /*00CC 00CF*/ + uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/ + uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/ + uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/ + uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/ + uint32_t message_dest_queue_port_low; /*00E0 00E3*/ + uint32_t message_dest_queue_port_high; /*00E4 00E7*/ + uint32_t last_used_message_dest_address_low; /*00E8 00EB*/ + uint32_t last_used_message_dest_address_high; /*00EC 00EF*/ + uint32_t message_done_queue_base_address_low; /*00F0 00F3*/ + uint32_t message_done_queue_base_address_high; /*00F4 00F7*/ + uint32_t host_diagnostic; /*00F8 00FB*/ + uint32_t write_sequence; /*00FC 00FF*/ + uint32_t reserved1[34]; /*0100 0187*/ + uint32_t reserved2[1950]; /*0188 1FFF*/ + uint32_t message_wbuffer[32]; /*2000 207F*/ + uint32_t reserved3[32]; /*2080 20FF*/ + uint32_t message_rbuffer[32]; /*2100 217F*/ + uint32_t reserved4[32]; /*2180 21FF*/ + uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/ +}; +/* +********************************************************************* +** Messaging Unit (MU) of Type D processor +********************************************************************* +*/ +struct InBound_SRB { + uint32_t addressLow; /* pointer to SRB block */ + uint32_t addressHigh; + uint32_t length; /* in DWORDs */ + uint32_t reserved0; +}; + +struct OutBound_SRB { + uint32_t addressLow; /* pointer to SRB block */ + uint32_t addressHigh; +}; + +struct MessageUnit_D { + struct InBound_SRB post_qbuffer[ARCMSR_MAX_ARC1214_POSTQUEUE]; + volatile struct OutBound_SRB + done_qbuffer[ARCMSR_MAX_ARC1214_DONEQUEUE]; + u16 postq_index; + volatile u16 doneq_index; + u32 __iomem *chip_id; /* 0x00004 */ + u32 __iomem *cpu_mem_config; /* 0x00008 */ + u32 __iomem *i2o_host_interrupt_mask; /* 0x00034 */ + u32 __iomem *sample_at_reset; /* 0x00100 */ + u32 __iomem *reset_request; /* 0x00108 */ + u32 __iomem *host_int_status; /* 0x00200 */ + u32 __iomem *pcief0_int_enable; /* 0x0020C */ + u32 __iomem *inbound_msgaddr0; /* 0x00400 */ + u32 __iomem *inbound_msgaddr1; /* 0x00404 */ + u32 __iomem *outbound_msgaddr0; /* 0x00420 */ + u32 __iomem *outbound_msgaddr1; /* 0x00424 */ + u32 __iomem *inbound_doorbell; /* 0x00460 */ + u32 __iomem *outbound_doorbell; /* 0x00480 */ + u32 __iomem *outbound_doorbell_enable; /* 0x00484 */ + u32 __iomem *inboundlist_base_low; /* 0x01000 */ + u32 __iomem *inboundlist_base_high; /* 0x01004 */ + u32 __iomem *inboundlist_write_pointer; /* 0x01018 */ + u32 __iomem *outboundlist_base_low; /* 0x01060 */ + u32 __iomem *outboundlist_base_high; /* 0x01064 */ + u32 __iomem *outboundlist_copy_pointer; /* 0x0106C */ + u32 __iomem *outboundlist_read_pointer; /* 0x01070 0x01072 */ + u32 __iomem *outboundlist_interrupt_cause; /* 0x1088 */ + u32 __iomem *outboundlist_interrupt_enable; /* 0x108C */ + u32 __iomem *message_wbuffer; /* 0x2000 */ + u32 __iomem *message_rbuffer; /* 0x2100 */ + u32 __iomem *msgcode_rwbuffer; /* 0x2200 */ +}; +/* +********************************************************************* +** Messaging Unit (MU) of Type E processor(LSI) +********************************************************************* +*/ +struct MessageUnit_E{ + uint32_t iobound_doorbell; /*0000 0003*/ + uint32_t write_sequence_3xxx; /*0004 0007*/ + uint32_t host_diagnostic_3xxx; /*0008 000B*/ + uint32_t posted_outbound_doorbell; /*000C 000F*/ + uint32_t master_error_attribute; /*0010 0013*/ + uint32_t master_error_address_low; /*0014 0017*/ + uint32_t master_error_address_high; /*0018 001B*/ + uint32_t hcb_size; /*001C 001F*/ + uint32_t inbound_doorbell; /*0020 0023*/ + uint32_t diagnostic_rw_data; /*0024 0027*/ + uint32_t diagnostic_rw_address_low; /*0028 002B*/ + uint32_t diagnostic_rw_address_high; /*002C 002F*/ + uint32_t host_int_status; /*0030 0033*/ + uint32_t host_int_mask; /*0034 0037*/ + uint32_t dcr_data; /*0038 003B*/ + uint32_t dcr_address; /*003C 003F*/ + uint32_t inbound_queueport; /*0040 0043*/ + uint32_t outbound_queueport; /*0044 0047*/ + uint32_t hcb_pci_address_low; /*0048 004B*/ + uint32_t hcb_pci_address_high; /*004C 004F*/ + uint32_t iop_int_status; /*0050 0053*/ + uint32_t iop_int_mask; /*0054 0057*/ + uint32_t iop_inbound_queue_port; /*0058 005B*/ + uint32_t iop_outbound_queue_port; /*005C 005F*/ + uint32_t inbound_free_list_index; /*0060 0063*/ + uint32_t inbound_post_list_index; /*0064 0067*/ + uint32_t reply_post_producer_index; /*0068 006B*/ + uint32_t reply_post_consumer_index; /*006C 006F*/ + uint32_t inbound_doorbell_clear; /*0070 0073*/ + uint32_t i2o_message_unit_control; /*0074 0077*/ + uint32_t last_used_message_source_address_low; /*0078 007B*/ + uint32_t last_used_message_source_address_high; /*007C 007F*/ + uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/ + uint32_t message_dest_address_index; /*0090 0093*/ + uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/ + uint32_t utility_A_int_counter_timer; /*0098 009B*/ + uint32_t outbound_doorbell; /*009C 009F*/ + uint32_t outbound_doorbell_clear; /*00A0 00A3*/ + uint32_t message_source_address_index; /*00A4 00A7*/ + uint32_t message_done_queue_index; /*00A8 00AB*/ + uint32_t reserved0; /*00AC 00AF*/ + uint32_t inbound_msgaddr0; /*00B0 00B3*/ + uint32_t inbound_msgaddr1; /*00B4 00B7*/ + uint32_t outbound_msgaddr0; /*00B8 00BB*/ + uint32_t outbound_msgaddr1; /*00BC 00BF*/ + uint32_t inbound_queueport_low; /*00C0 00C3*/ + uint32_t inbound_queueport_high; /*00C4 00C7*/ + uint32_t outbound_queueport_low; /*00C8 00CB*/ + uint32_t outbound_queueport_high; /*00CC 00CF*/ + uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/ + uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/ + uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/ + uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/ + uint32_t message_dest_queue_port_low; /*00E0 00E3*/ + uint32_t message_dest_queue_port_high; /*00E4 00E7*/ + uint32_t last_used_message_dest_address_low; /*00E8 00EB*/ + uint32_t last_used_message_dest_address_high; /*00EC 00EF*/ + uint32_t message_done_queue_base_address_low; /*00F0 00F3*/ + uint32_t message_done_queue_base_address_high; /*00F4 00F7*/ + uint32_t host_diagnostic; /*00F8 00FB*/ + uint32_t write_sequence; /*00FC 00FF*/ + uint32_t reserved1[34]; /*0100 0187*/ + uint32_t reserved2[1950]; /*0188 1FFF*/ + uint32_t message_wbuffer[32]; /*2000 207F*/ + uint32_t reserved3[32]; /*2080 20FF*/ + uint32_t message_rbuffer[32]; /*2100 217F*/ + uint32_t reserved4[32]; /*2180 21FF*/ + uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/ +}; + +/* +********************************************************************* +** Messaging Unit (MU) of Type F processor(LSI) +********************************************************************* +*/ +struct MessageUnit_F { + uint32_t iobound_doorbell; /*0000 0003*/ + uint32_t write_sequence_3xxx; /*0004 0007*/ + uint32_t host_diagnostic_3xxx; /*0008 000B*/ + uint32_t posted_outbound_doorbell; /*000C 000F*/ + uint32_t master_error_attribute; /*0010 0013*/ + uint32_t master_error_address_low; /*0014 0017*/ + uint32_t master_error_address_high; /*0018 001B*/ + uint32_t hcb_size; /*001C 001F*/ + uint32_t inbound_doorbell; /*0020 0023*/ + uint32_t diagnostic_rw_data; /*0024 0027*/ + uint32_t diagnostic_rw_address_low; /*0028 002B*/ + uint32_t diagnostic_rw_address_high; /*002C 002F*/ + uint32_t host_int_status; /*0030 0033*/ + uint32_t host_int_mask; /*0034 0037*/ + uint32_t dcr_data; /*0038 003B*/ + uint32_t dcr_address; /*003C 003F*/ + uint32_t inbound_queueport; /*0040 0043*/ + uint32_t outbound_queueport; /*0044 0047*/ + uint32_t hcb_pci_address_low; /*0048 004B*/ + uint32_t hcb_pci_address_high; /*004C 004F*/ + uint32_t iop_int_status; /*0050 0053*/ + uint32_t iop_int_mask; /*0054 0057*/ + uint32_t iop_inbound_queue_port; /*0058 005B*/ + uint32_t iop_outbound_queue_port; /*005C 005F*/ + uint32_t inbound_free_list_index; /*0060 0063*/ + uint32_t inbound_post_list_index; /*0064 0067*/ + uint32_t reply_post_producer_index; /*0068 006B*/ + uint32_t reply_post_consumer_index; /*006C 006F*/ + uint32_t inbound_doorbell_clear; /*0070 0073*/ + uint32_t i2o_message_unit_control; /*0074 0077*/ + uint32_t last_used_message_source_address_low; /*0078 007B*/ + uint32_t last_used_message_source_address_high; /*007C 007F*/ + uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/ + uint32_t message_dest_address_index; /*0090 0093*/ + uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/ + uint32_t utility_A_int_counter_timer; /*0098 009B*/ + uint32_t outbound_doorbell; /*009C 009F*/ + uint32_t outbound_doorbell_clear; /*00A0 00A3*/ + uint32_t message_source_address_index; /*00A4 00A7*/ + uint32_t message_done_queue_index; /*00A8 00AB*/ + uint32_t reserved0; /*00AC 00AF*/ + uint32_t inbound_msgaddr0; /*00B0 00B3*/ + uint32_t inbound_msgaddr1; /*00B4 00B7*/ + uint32_t outbound_msgaddr0; /*00B8 00BB*/ + uint32_t outbound_msgaddr1; /*00BC 00BF*/ + uint32_t inbound_queueport_low; /*00C0 00C3*/ + uint32_t inbound_queueport_high; /*00C4 00C7*/ + uint32_t outbound_queueport_low; /*00C8 00CB*/ + uint32_t outbound_queueport_high; /*00CC 00CF*/ + uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/ + uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/ + uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/ + uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/ + uint32_t message_dest_queue_port_low; /*00E0 00E3*/ + uint32_t message_dest_queue_port_high; /*00E4 00E7*/ + uint32_t last_used_message_dest_address_low; /*00E8 00EB*/ + uint32_t last_used_message_dest_address_high; /*00EC 00EF*/ + uint32_t message_done_queue_base_address_low; /*00F0 00F3*/ + uint32_t message_done_queue_base_address_high; /*00F4 00F7*/ + uint32_t host_diagnostic; /*00F8 00FB*/ + uint32_t write_sequence; /*00FC 00FF*/ + uint32_t reserved1[46]; /*0100 01B7*/ + uint32_t reply_post_producer_index1; /*01B8 01BB*/ + uint32_t reply_post_consumer_index1; /*01BC 01BF*/ +}; + +#define MESG_RW_BUFFER_SIZE (256 * 3) + +typedef struct deliver_completeQ { + uint16_t cmdFlag; + uint16_t cmdSMID; + uint16_t cmdLMID; // reserved (0) + uint16_t cmdFlag2; // reserved (0) +} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q; +/* +******************************************************************************* +** Adapter Control Block +******************************************************************************* +*/ +struct AdapterControlBlock +{ + uint32_t adapter_type; /* adapter A,B..... */ +#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */ +#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */ +#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */ +#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */ +#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */ +#define ACB_ADAPTER_TYPE_F 0x00000005 /* hba L IOP */ + u32 ioqueue_size; + struct pci_dev * pdev; + struct Scsi_Host * host; + unsigned long vir2phy_offset; + /* Offset is used in making arc cdb physical to virtual calculations */ + uint32_t outbound_int_enable; + uint32_t cdb_phyaddr_hi32; + uint32_t reg_mu_acc_handle0; + uint64_t cdb_phyadd_hipart; + spinlock_t eh_lock; + spinlock_t ccblist_lock; + spinlock_t postq_lock; + spinlock_t doneq_lock; + spinlock_t rqbuffer_lock; + spinlock_t wqbuffer_lock; + union { + struct MessageUnit_A __iomem *pmuA; + struct MessageUnit_B *pmuB; + struct MessageUnit_C __iomem *pmuC; + struct MessageUnit_D *pmuD; + struct MessageUnit_E __iomem *pmuE; + struct MessageUnit_F __iomem *pmuF; + }; + /* message unit ATU inbound base address0 */ + void __iomem *mem_base0; + void __iomem *mem_base1; + //0x000 - COMPORT_IN (Host sent to ROC) + uint32_t *message_wbuffer; + //0x100 - COMPORT_OUT (ROC sent to Host) + uint32_t *message_rbuffer; + uint32_t *msgcode_rwbuffer; //0x200 - BIOS_AREA + uint32_t acb_flags; + u16 dev_id; + uint8_t adapter_index; +#define ACB_F_SCSISTOPADAPTER 0x0001 +#define ACB_F_MSG_STOP_BGRB 0x0002 +/* stop RAID background rebuild */ +#define ACB_F_MSG_START_BGRB 0x0004 +/* stop RAID background rebuild */ +#define ACB_F_IOPDATA_OVERFLOW 0x0008 +/* iop message data rqbuffer overflow */ +#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010 +/* message clear wqbuffer */ +#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020 +/* message clear rqbuffer */ +#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040 +#define ACB_F_BUS_RESET 0x0080 + +#define ACB_F_IOP_INITED 0x0100 +/* iop init */ +#define ACB_F_ABORT 0x0200 +#define ACB_F_FIRMWARE_TRAP 0x0400 +#define ACB_F_ADAPTER_REMOVED 0x0800 +#define ACB_F_MSG_GET_CONFIG 0x1000 + struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; + /* used for memory free */ + struct list_head ccb_free_list; + /* head of free ccb list */ + + atomic_t ccboutstandingcount; + /*The present outstanding command number that in the IOP that + waiting for being handled by FW*/ + + void * dma_coherent; + /* dma_coherent used for memory free */ + dma_addr_t dma_coherent_handle; + /* dma_coherent_handle used for memory free */ + dma_addr_t dma_coherent_handle2; + void *dma_coherent2; + unsigned int uncache_size; + uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; + /* data collection buffer for read from 80331 */ + int32_t rqbuf_getIndex; + /* first of read buffer */ + int32_t rqbuf_putIndex; + /* last of read buffer */ + uint8_t wqbuffer[ARCMSR_MAX_QBUFFER]; + /* data collection buffer for write to 80331 */ + int32_t wqbuf_getIndex; + /* first of write buffer */ + int32_t wqbuf_putIndex; + /* last of write buffer */ + uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN]; + /* id0 ..... id15, lun0...lun7 */ +#define ARECA_RAID_GONE 0x55 +#define ARECA_RAID_GOOD 0xaa + uint32_t num_resets; + uint32_t num_aborts; + uint32_t signature; + uint32_t firm_request_len; + uint32_t firm_numbers_queue; + uint32_t firm_sdram_size; + uint32_t firm_hd_channels; + uint32_t firm_cfg_version; + char firm_model[12]; + char firm_version[20]; + char device_map[20]; /*21,84-99*/ + struct work_struct arcmsr_do_message_isr_bh; + struct timer_list eternal_timer; + unsigned short fw_flag; +#define FW_NORMAL 0x0000 +#define FW_BOG 0x0001 +#define FW_DEADLOCK 0x0010 + uint32_t maxOutstanding; + int vector_count; + uint32_t maxFreeCCB; + struct timer_list refresh_timer; + uint32_t doneq_index; + uint32_t ccbsize; + uint32_t in_doorbell; + uint32_t out_doorbell; + uint32_t completionQ_entry; + pCompletion_Q pCompletionQ; + uint32_t completeQ_size; +};/* HW_DEVICE_EXTENSION */ +/* +******************************************************************************* +** Command Control Block +** this CCB length must be 32 bytes boundary +******************************************************************************* +*/ +struct CommandControlBlock{ + /*x32:sizeof struct_CCB=(64+60)byte, x64:sizeof struct_CCB=(64+60)byte*/ + struct list_head list; /*x32: 8byte, x64: 16byte*/ + struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */ + struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/ + unsigned long cdb_phyaddr; /*x32: 4byte, x64: 8byte*/ + uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/ + uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/ +#define CCB_FLAG_READ 0x0000 +#define CCB_FLAG_WRITE 0x0001 +#define CCB_FLAG_ERROR 0x0002 +#define CCB_FLAG_FLUSHCACHE 0x0004 +#define CCB_FLAG_MASTER_ABORTED 0x0008 + uint16_t startdone; /*x32:2byte,x32:2byte*/ +#define ARCMSR_CCB_DONE 0x0000 +#define ARCMSR_CCB_START 0x55AA +#define ARCMSR_CCB_ABORTED 0xAA55 +#define ARCMSR_CCB_ILLEGAL 0xFFFF + uint32_t smid; +#if BITS_PER_LONG == 64 + /* ======================512+64 bytes======================== */ + uint32_t reserved[3]; /*12 byte*/ +#else + /* ======================512+32 bytes======================== */ + uint32_t reserved[8]; /*32 byte*/ +#endif + /* ======================================================= */ + struct ARCMSR_CDB arcmsr_cdb; +}; +/* +******************************************************************************* +** ARECA SCSI sense data +******************************************************************************* +*/ +struct SENSE_DATA +{ + uint8_t ErrorCode:7; +#define SCSI_SENSE_CURRENT_ERRORS 0x70 +#define SCSI_SENSE_DEFERRED_ERRORS 0x71 + uint8_t Valid:1; + uint8_t SegmentNumber; + uint8_t SenseKey:4; + uint8_t Reserved:1; + uint8_t IncorrectLength:1; + uint8_t EndOfMedia:1; + uint8_t FileMark:1; + uint8_t Information[4]; + uint8_t AdditionalSenseLength; + uint8_t CommandSpecificInformation[4]; + uint8_t AdditionalSenseCode; + uint8_t AdditionalSenseCodeQualifier; + uint8_t FieldReplaceableUnitCode; + uint8_t SenseKeySpecific[3]; +}; +/* +******************************************************************************* +** Outbound Interrupt Status Register - OISR +******************************************************************************* +*/ +#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30 +#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10 +#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08 +#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04 +#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02 +#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01 +#define ARCMSR_MU_OUTBOUND_HANDLE_INT \ + (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \ + |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \ + |ARCMSR_MU_OUTBOUND_DOORBELL_INT \ + |ARCMSR_MU_OUTBOUND_POSTQUEUE_INT \ + |ARCMSR_MU_OUTBOUND_PCI_INT) +/* +******************************************************************************* +** Outbound Interrupt Mask Register - OIMR +******************************************************************************* +*/ +#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34 +#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10 +#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08 +#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04 +#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02 +#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01 +#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F + +extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *); +extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *, + struct QBUFFER __iomem *); +extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *); +extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); +extern const struct attribute_group *arcmsr_host_groups[]; +extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); +void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb); diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c new file mode 100644 index 000000000..baeb5e795 --- /dev/null +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -0,0 +1,411 @@ +/* +******************************************************************************* +** O.S : Linux +** FILE NAME : arcmsr_attr.c +** BY : Nick Cheng +** Description: attributes exported to sysfs and device host +******************************************************************************* +** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved +** +** Web site: www.areca.com.tw +** E-mail: support@areca.com.tw +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License version 2 as +** published by the Free Software Foundation. +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +******************************************************************************* +** Redistribution and use in source and binary forms, with or without +** modification, are permitted provided that the following conditions +** are met: +** 1. Redistributions of source code must retain the above copyright +** notice, this list of conditions and the following disclaimer. +** 2. Redistributions in binary form must reproduce the above copyright +** notice, this list of conditions and the following disclaimer in the +** documentation and/or other materials provided with the distribution. +** 3. The name of the author may not be used to endorse or promote products +** derived from this software without specific prior written permission. +** +** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT +** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY +** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF +** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +******************************************************************************* +** For history of changes, see Documentation/scsi/ChangeLog.arcmsr +** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst +******************************************************************************* +*/ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "arcmsr.h" + +static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *bin, + char *buf, loff_t off, + size_t count) +{ + struct device *dev = container_of(kobj,struct device,kobj); + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + uint8_t *ptmpQbuffer; + int32_t allxfer_len = 0; + unsigned long flags; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* do message unit read. */ + ptmpQbuffer = (uint8_t *)buf; + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { + unsigned int tail = acb->rqbuf_getIndex; + unsigned int head = acb->rqbuf_putIndex; + unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); + + allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); + if (allxfer_len > ARCMSR_API_DATA_BUFLEN) + allxfer_len = ARCMSR_API_DATA_BUFLEN; + + if (allxfer_len <= cnt_to_end) + memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); + else { + memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); + memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); + } + acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; + } + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + struct QBUFFER __iomem *prbuffer; + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + prbuffer = arcmsr_get_iop_rqbuffer(acb); + if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; + } + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + return allxfer_len; +} + +static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, + struct kobject *kobj, + struct bin_attribute *bin, + char *buf, loff_t off, + size_t count) +{ + struct device *dev = container_of(kobj,struct device,kobj); + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + int32_t user_len, cnt2end; + uint8_t *pQbuffer, *ptmpuserbuffer; + unsigned long flags; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (count > ARCMSR_API_DATA_BUFLEN) + return -EINVAL; + /* do message unit write. */ + ptmpuserbuffer = (uint8_t *)buf; + user_len = (int32_t)count; + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { + arcmsr_write_ioctldata2iop(acb); + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + return 0; /*need retry*/ + } else { + pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; + cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; + if (user_len > cnt2end) { + memcpy(pQbuffer, ptmpuserbuffer, cnt2end); + ptmpuserbuffer += cnt2end; + user_len -= cnt2end; + acb->wqbuf_putIndex = 0; + pQbuffer = acb->wqbuffer; + } + memcpy(pQbuffer, ptmpuserbuffer, user_len); + acb->wqbuf_putIndex += user_len; + acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { + acb->acb_flags &= + ~ACB_F_MESSAGE_WQBUFFER_CLEARED; + arcmsr_write_ioctldata2iop(acb); + } + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + return count; + } +} + +static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp, + struct kobject *kobj, + struct bin_attribute *bin, + char *buf, loff_t off, + size_t count) +{ + struct device *dev = container_of(kobj,struct device,kobj); + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + uint8_t *pQbuffer; + unsigned long flags; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + arcmsr_clear_iop2drv_rqueue_buffer(acb); + acb->acb_flags |= + (ACB_F_MESSAGE_WQBUFFER_CLEARED + | ACB_F_MESSAGE_RQBUFFER_CLEARED + | ACB_F_MESSAGE_WQBUFFER_READED); + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + acb->wqbuf_getIndex = 0; + acb->wqbuf_putIndex = 0; + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + pQbuffer = acb->rqbuffer; + memset(pQbuffer, 0, sizeof (struct QBUFFER)); + pQbuffer = acb->wqbuffer; + memset(pQbuffer, 0, sizeof (struct QBUFFER)); + return 1; +} + +static const struct bin_attribute arcmsr_sysfs_message_read_attr = { + .attr = { + .name = "mu_read", + .mode = S_IRUSR , + }, + .size = ARCMSR_API_DATA_BUFLEN, + .read = arcmsr_sysfs_iop_message_read, +}; + +static const struct bin_attribute arcmsr_sysfs_message_write_attr = { + .attr = { + .name = "mu_write", + .mode = S_IWUSR, + }, + .size = ARCMSR_API_DATA_BUFLEN, + .write = arcmsr_sysfs_iop_message_write, +}; + +static const struct bin_attribute arcmsr_sysfs_message_clear_attr = { + .attr = { + .name = "mu_clear", + .mode = S_IWUSR, + }, + .size = 1, + .write = arcmsr_sysfs_iop_message_clear, +}; + +int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb) +{ + struct Scsi_Host *host = acb->host; + int error; + + error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); + if (error) { + printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n"); + goto error_bin_file_message_read; + } + error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); + if (error) { + printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n"); + goto error_bin_file_message_write; + } + error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr); + if (error) { + printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n"); + goto error_bin_file_message_clear; + } + return 0; +error_bin_file_message_clear: + sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); +error_bin_file_message_write: + sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); +error_bin_file_message_read: + return error; +} + +void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) +{ + struct Scsi_Host *host = acb->host; + + sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr); + sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); + sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); +} + + +static ssize_t +arcmsr_attr_host_driver_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, + "%s\n", + ARCMSR_DRIVER_VERSION); +} + +static ssize_t +arcmsr_attr_host_driver_posted_cmd(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "%4d\n", + atomic_read(&acb->ccboutstandingcount)); +} + +static ssize_t +arcmsr_attr_host_driver_reset(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "%4d\n", + acb->num_resets); +} + +static ssize_t +arcmsr_attr_host_driver_abort(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "%4d\n", + acb->num_aborts); +} + +static ssize_t +arcmsr_attr_host_fw_model(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "%s\n", + acb->firm_model); +} + +static ssize_t +arcmsr_attr_host_fw_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "%s\n", + acb->firm_version); +} + +static ssize_t +arcmsr_attr_host_fw_request_len(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "%4d\n", + acb->firm_request_len); +} + +static ssize_t +arcmsr_attr_host_fw_numbers_queue(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "%4d\n", + acb->firm_numbers_queue); +} + +static ssize_t +arcmsr_attr_host_fw_sdram_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "%4d\n", + acb->firm_sdram_size); +} + +static ssize_t +arcmsr_attr_host_fw_hd_channels(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "%4d\n", + acb->firm_hd_channels); +} + +static DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL); +static DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL); +static DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL); +static DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL); +static DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL); +static DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL); +static DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL); +static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL); +static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL); +static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL); + +static struct attribute *arcmsr_host_attrs[] = { + &dev_attr_host_driver_version.attr, + &dev_attr_host_driver_posted_cmd.attr, + &dev_attr_host_driver_reset.attr, + &dev_attr_host_driver_abort.attr, + &dev_attr_host_fw_model.attr, + &dev_attr_host_fw_version.attr, + &dev_attr_host_fw_request_len.attr, + &dev_attr_host_fw_numbers_queue.attr, + &dev_attr_host_fw_sdram_size.attr, + &dev_attr_host_fw_hd_channels.attr, + NULL, +}; + +static const struct attribute_group arcmsr_host_attr_group = { + .attrs = arcmsr_host_attrs, +}; + +const struct attribute_group *arcmsr_host_groups[] = { + &arcmsr_host_attr_group, + NULL +}; diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c new file mode 100644 index 000000000..a66221c3b --- /dev/null +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -0,0 +1,4723 @@ +/* +******************************************************************************* +** O.S : Linux +** FILE NAME : arcmsr_hba.c +** BY : Nick Cheng, C.L. Huang +** Description: SCSI RAID Device Driver for Areca RAID Controller +******************************************************************************* +** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved +** +** Web site: www.areca.com.tw +** E-mail: support@areca.com.tw +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License version 2 as +** published by the Free Software Foundation. +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +******************************************************************************* +** Redistribution and use in source and binary forms, with or without +** modification, are permitted provided that the following conditions +** are met: +** 1. Redistributions of source code must retain the above copyright +** notice, this list of conditions and the following disclaimer. +** 2. Redistributions in binary form must reproduce the above copyright +** notice, this list of conditions and the following disclaimer in the +** documentation and/or other materials provided with the distribution. +** 3. The name of the author may not be used to endorse or promote products +** derived from this software without specific prior written permission. +** +** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT +** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY +** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF +** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +******************************************************************************* +** For history of changes, see Documentation/scsi/ChangeLog.arcmsr +** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst +******************************************************************************* +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "arcmsr.h" +MODULE_AUTHOR("Nick Cheng, C.L. Huang "); +MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ARCMSR_DRIVER_VERSION); + +static int msix_enable = 1; +module_param(msix_enable, int, S_IRUGO); +MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)"); + +static int msi_enable = 1; +module_param(msi_enable, int, S_IRUGO); +MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)"); + +static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; +module_param(host_can_queue, int, S_IRUGO); +MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128"); + +static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; +module_param(cmd_per_lun, int, S_IRUGO); +MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32"); + +static int dma_mask_64 = 0; +module_param(dma_mask_64, int, S_IRUGO); +MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)"); + +static int set_date_time = 0; +module_param(set_date_time, int, S_IRUGO); +MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable"); + +static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT; +module_param(cmd_timeout, int, S_IRUGO); +MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90"); + +#define ARCMSR_SLEEPTIME 10 +#define ARCMSR_RETRYCOUNT 12 + +static wait_queue_head_t wait_q; +static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, + struct scsi_cmnd *cmd); +static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); +static int arcmsr_abort(struct scsi_cmnd *); +static int arcmsr_bus_reset(struct scsi_cmnd *); +static int arcmsr_bios_param(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int *info); +static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); +static int arcmsr_probe(struct pci_dev *pdev, + const struct pci_device_id *id); +static int __maybe_unused arcmsr_suspend(struct device *dev); +static int __maybe_unused arcmsr_resume(struct device *dev); +static void arcmsr_remove(struct pci_dev *pdev); +static void arcmsr_shutdown(struct pci_dev *pdev); +static void arcmsr_iop_init(struct AdapterControlBlock *acb); +static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); +static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); +static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, + u32 intmask_org); +static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); +static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb); +static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb); +static void arcmsr_request_device_map(struct timer_list *t); +static void arcmsr_message_isr_bh_fn(struct work_struct *work); +static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); +static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); +static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); +static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); +static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb); +static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb); +static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb); +static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); +static const char *arcmsr_info(struct Scsi_Host *); +static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); +static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); +static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); +static void arcmsr_set_iop_datetime(struct timer_list *); +static int arcmsr_slave_config(struct scsi_device *sdev); +static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) +{ + if (queue_depth > ARCMSR_MAX_CMD_PERLUN) + queue_depth = ARCMSR_MAX_CMD_PERLUN; + return scsi_change_queue_depth(sdev, queue_depth); +} + +static const struct scsi_host_template arcmsr_scsi_host_template = { + .module = THIS_MODULE, + .proc_name = ARCMSR_NAME, + .name = "Areca SAS/SATA RAID driver", + .info = arcmsr_info, + .queuecommand = arcmsr_queue_command, + .eh_abort_handler = arcmsr_abort, + .eh_bus_reset_handler = arcmsr_bus_reset, + .bios_param = arcmsr_bios_param, + .slave_configure = arcmsr_slave_config, + .change_queue_depth = arcmsr_adjust_disk_queue_depth, + .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD, + .this_id = ARCMSR_SCSI_INITIATOR_ID, + .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, + .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, + .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN, + .shost_groups = arcmsr_host_groups, + .no_write_same = 1, +}; + +static struct pci_device_id arcmsr_device_id_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200), + .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201), + .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202), + .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203), + .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214), + .driver_data = ACB_ADAPTER_TYPE_D}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681), + .driver_data = ACB_ADAPTER_TYPE_A}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880), + .driver_data = ACB_ADAPTER_TYPE_C}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884), + .driver_data = ACB_ADAPTER_TYPE_E}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886), + .driver_data = ACB_ADAPTER_TYPE_F}, + {0, 0}, /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); + +static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume); + +static struct pci_driver arcmsr_pci_driver = { + .name = "arcmsr", + .id_table = arcmsr_device_id_table, + .probe = arcmsr_probe, + .remove = arcmsr_remove, + .driver.pm = &arcmsr_pm_ops, + .shutdown = arcmsr_shutdown, +}; +/* +**************************************************************************** +**************************************************************************** +*/ + +static void arcmsr_free_io_queue(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: + case ACB_ADAPTER_TYPE_D: + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size, + acb->dma_coherent2, acb->dma_coherent_handle2); + break; + } +} + +static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) +{ + struct pci_dev *pdev = acb->pdev; + switch (acb->adapter_type){ + case ACB_ADAPTER_TYPE_A:{ + acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0)); + if (!acb->pmuA) { + printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); + return false; + } + break; + } + case ACB_ADAPTER_TYPE_B:{ + void __iomem *mem_base0, *mem_base1; + mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!mem_base0) { + printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); + return false; + } + mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); + if (!mem_base1) { + iounmap(mem_base0); + printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); + return false; + } + acb->mem_base0 = mem_base0; + acb->mem_base1 = mem_base1; + break; + } + case ACB_ADAPTER_TYPE_C:{ + acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); + if (!acb->pmuC) { + printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); + return false; + } + if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/ + return true; + } + break; + } + case ACB_ADAPTER_TYPE_D: { + void __iomem *mem_base0; + unsigned long addr, range; + + addr = (unsigned long)pci_resource_start(pdev, 0); + range = pci_resource_len(pdev, 0); + mem_base0 = ioremap(addr, range); + if (!mem_base0) { + pr_notice("arcmsr%d: memory mapping region fail\n", + acb->host->host_no); + return false; + } + acb->mem_base0 = mem_base0; + break; + } + case ACB_ADAPTER_TYPE_E: { + acb->pmuE = ioremap(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); + if (!acb->pmuE) { + pr_notice("arcmsr%d: memory mapping region fail \n", + acb->host->host_no); + return false; + } + writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/ + writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */ + acb->in_doorbell = 0; + acb->out_doorbell = 0; + break; + } + case ACB_ADAPTER_TYPE_F: { + acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!acb->pmuF) { + pr_notice("arcmsr%d: memory mapping region fail\n", + acb->host->host_no); + return false; + } + writel(0, &acb->pmuF->host_int_status); /* clear interrupt */ + writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); + acb->in_doorbell = 0; + acb->out_doorbell = 0; + break; + } + } + return true; +} + +static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + iounmap(acb->pmuA); + break; + case ACB_ADAPTER_TYPE_B: + iounmap(acb->mem_base0); + iounmap(acb->mem_base1); + break; + case ACB_ADAPTER_TYPE_C: + iounmap(acb->pmuC); + break; + case ACB_ADAPTER_TYPE_D: + iounmap(acb->mem_base0); + break; + case ACB_ADAPTER_TYPE_E: + iounmap(acb->pmuE); + break; + case ACB_ADAPTER_TYPE_F: + iounmap(acb->pmuF); + break; + } +} + +static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) +{ + irqreturn_t handle_state; + struct AdapterControlBlock *acb = dev_id; + + handle_state = arcmsr_interrupt(acb); + return handle_state; +} + +static int arcmsr_bios_param(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int *geom) +{ + int heads, sectors, cylinders, total_capacity; + + if (scsi_partsize(bdev, capacity, geom)) + return 0; + + total_capacity = capacity; + heads = 64; + sectors = 32; + cylinders = total_capacity / (heads * sectors); + if (cylinders > 1024) { + heads = 255; + sectors = 63; + cylinders = total_capacity / (heads * sectors); + } + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + return 0; +} + +static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + int i; + + for (i = 0; i < 2000; i++) { + if (readl(®->outbound_intstatus) & + ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { + writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, + ®->outbound_intstatus); + return true; + } + msleep(10); + } /* max 20 seconds */ + + return false; +} + +static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + int i; + + for (i = 0; i < 2000; i++) { + if (readl(reg->iop2drv_doorbell) + & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, + reg->iop2drv_doorbell); + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, + reg->drv2iop_doorbell); + return true; + } + msleep(10); + } /* max 20 seconds */ + + return false; +} + +static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; + int i; + + for (i = 0; i < 2000; i++) { + if (readl(&phbcmu->outbound_doorbell) + & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, + &phbcmu->outbound_doorbell_clear); /*clear interrupt*/ + return true; + } + msleep(10); + } /* max 20 seconds */ + + return false; +} + +static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *reg = pACB->pmuD; + int i; + + for (i = 0; i < 2000; i++) { + if (readl(reg->outbound_doorbell) + & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, + reg->outbound_doorbell); + return true; + } + msleep(10); + } /* max 20 seconds */ + return false; +} + +static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB) +{ + int i; + uint32_t read_doorbell; + struct MessageUnit_E __iomem *phbcmu = pACB->pmuE; + + for (i = 0; i < 2000; i++) { + read_doorbell = readl(&phbcmu->iobound_doorbell); + if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { + writel(0, &phbcmu->host_int_status); /*clear interrupt*/ + pACB->in_doorbell = read_doorbell; + return true; + } + msleep(10); + } /* max 20 seconds */ + return false; +} + +static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + int retry_count = 30; + writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); + do { + if (arcmsr_hbaA_wait_msgint_ready(acb)) + break; + else { + retry_count--; + printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ + timeout, retry count down = %d \n", acb->host->host_no, retry_count); + } + } while (retry_count != 0); +} + +static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + int retry_count = 30; + writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); + do { + if (arcmsr_hbaB_wait_msgint_ready(acb)) + break; + else { + retry_count--; + printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ + timeout,retry count down = %d \n", acb->host->host_no, retry_count); + } + } while (retry_count != 0); +} + +static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_C __iomem *reg = pACB->pmuC; + int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ + writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); + do { + if (arcmsr_hbaC_wait_msgint_ready(pACB)) { + break; + } else { + retry_count--; + printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ + timeout,retry count down = %d \n", pACB->host->host_no, retry_count); + } + } while (retry_count != 0); + return; +} + +static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB) +{ + int retry_count = 15; + struct MessageUnit_D *reg = pACB->pmuD; + + writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0); + do { + if (arcmsr_hbaD_wait_msgint_ready(pACB)) + break; + + retry_count--; + pr_notice("arcmsr%d: wait 'flush adapter " + "cache' timeout, retry count down = %d\n", + pACB->host->host_no, retry_count); + } while (retry_count != 0); +} + +static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB) +{ + int retry_count = 30; + struct MessageUnit_E __iomem *reg = pACB->pmuE; + + writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); + pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pACB->out_doorbell, ®->iobound_doorbell); + do { + if (arcmsr_hbaE_wait_msgint_ready(pACB)) + break; + retry_count--; + pr_notice("arcmsr%d: wait 'flush adapter " + "cache' timeout, retry count down = %d\n", + pACB->host->host_no, retry_count); + } while (retry_count != 0); +} + +static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: + arcmsr_hbaA_flush_cache(acb); + break; + case ACB_ADAPTER_TYPE_B: + arcmsr_hbaB_flush_cache(acb); + break; + case ACB_ADAPTER_TYPE_C: + arcmsr_hbaC_flush_cache(acb); + break; + case ACB_ADAPTER_TYPE_D: + arcmsr_hbaD_flush_cache(acb); + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + arcmsr_hbaE_flush_cache(acb); + break; + } +} + +static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + + if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { + reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); + reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); + reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); + reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); + } else { + reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); + reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); + reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); + reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); + } + reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); + reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); + reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); +} + +static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_D *reg = acb->pmuD; + + reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); + reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); + reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); + reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); + reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); + reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); + reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); + reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); + reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); + reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); + reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); + reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); + reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); + reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); + reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); + reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); + reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); + reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); + reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); + reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); + reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); + reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); + reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); + reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); + reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); + reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); +} + +static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb) +{ + dma_addr_t host_buffer_dma; + struct MessageUnit_F __iomem *pmuF; + + memset(acb->dma_coherent2, 0xff, acb->completeQ_size); + acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 + + acb->completeQ_size, 4); + acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100; + acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200; + memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE); + host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4); + pmuF = acb->pmuF; + /* host buffer low address, bit0:1 all buffer active */ + writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0); + /* host buffer high address */ + writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1); + /* set host buffer physical address */ + writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell); +} + +static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) +{ + bool rtn = true; + void *dma_coherent; + dma_addr_t dma_coherent_handle; + struct pci_dev *pdev = acb->pdev; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: { + acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent) { + pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); + return false; + } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; + acb->pmuB = (struct MessageUnit_B *)dma_coherent; + arcmsr_hbaB_assign_regAddr(acb); + } + break; + case ACB_ADAPTER_TYPE_D: { + acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent) { + pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); + return false; + } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; + acb->pmuD = (struct MessageUnit_D *)dma_coherent; + arcmsr_hbaD_assign_regAddr(acb); + } + break; + case ACB_ADAPTER_TYPE_E: { + uint32_t completeQ_size; + completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; + acb->ioqueue_size = roundup(completeQ_size, 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent){ + pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); + return false; + } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; + acb->pCompletionQ = dma_coherent; + acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); + acb->doneq_index = 0; + } + break; + case ACB_ADAPTER_TYPE_F: { + uint32_t QueueDepth; + uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32}; + + arcmsr_wait_firmware_ready(acb); + QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7]; + acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128; + acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent) { + pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); + return false; + } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; + acb->pCompletionQ = dma_coherent; + acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ); + acb->doneq_index = 0; + arcmsr_hbaF_assign_regAddr(acb); + } + break; + default: + break; + } + return rtn; +} + +static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) +{ + struct pci_dev *pdev = acb->pdev; + void *dma_coherent; + dma_addr_t dma_coherent_handle; + struct CommandControlBlock *ccb_tmp; + int i = 0, j = 0; + unsigned long cdb_phyaddr, next_ccb_phy; + unsigned long roundup_ccbsize; + unsigned long max_xfer_len; + unsigned long max_sg_entrys; + uint32_t firm_config_version, curr_phy_upper32; + + for (i = 0; i < ARCMSR_MAX_TARGETID; i++) + for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) + acb->devstate[i][j] = ARECA_RAID_GONE; + + max_xfer_len = ARCMSR_MAX_XFER_LEN; + max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; + firm_config_version = acb->firm_cfg_version; + if((firm_config_version & 0xFF) >= 3){ + max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ + max_sg_entrys = (max_xfer_len/4096); + } + acb->host->max_sectors = max_xfer_len/512; + acb->host->sg_tablesize = max_sg_entrys; + roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); + acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB; + if (acb->adapter_type != ACB_ADAPTER_TYPE_F) + acb->uncache_size += acb->ioqueue_size; + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); + if(!dma_coherent){ + printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); + return -ENOMEM; + } + acb->dma_coherent = dma_coherent; + acb->dma_coherent_handle = dma_coherent_handle; + memset(dma_coherent, 0, acb->uncache_size); + acb->ccbsize = roundup_ccbsize; + ccb_tmp = dma_coherent; + curr_phy_upper32 = upper_32_bits(dma_coherent_handle); + acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; + for(i = 0; i < acb->maxFreeCCB; i++){ + cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + case ACB_ADAPTER_TYPE_B: + ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5; + break; + case ACB_ADAPTER_TYPE_C: + case ACB_ADAPTER_TYPE_D: + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + ccb_tmp->cdb_phyaddr = cdb_phyaddr; + break; + } + acb->pccb_pool[i] = ccb_tmp; + ccb_tmp->acb = acb; + ccb_tmp->smid = (u32)i << 16; + INIT_LIST_HEAD(&ccb_tmp->list); + next_ccb_phy = dma_coherent_handle + roundup_ccbsize; + if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) { + acb->maxFreeCCB = i; + acb->host->can_queue = i; + break; + } + else + list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); + ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); + dma_coherent_handle = next_ccb_phy; + } + if (acb->adapter_type != ACB_ADAPTER_TYPE_F) { + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = ccb_tmp; + } + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: + acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2; + arcmsr_hbaB_assign_regAddr(acb); + break; + case ACB_ADAPTER_TYPE_D: + acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2; + arcmsr_hbaD_assign_regAddr(acb); + break; + case ACB_ADAPTER_TYPE_E: + acb->pCompletionQ = acb->dma_coherent2; + acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); + acb->doneq_index = 0; + break; + } + return 0; +} + +static void arcmsr_message_isr_bh_fn(struct work_struct *work) +{ + struct AdapterControlBlock *acb = container_of(work, + struct AdapterControlBlock, arcmsr_do_message_isr_bh); + char *acb_dev_map = (char *)acb->device_map; + uint32_t __iomem *signature = NULL; + char __iomem *devicemap = NULL; + int target, lun; + struct scsi_device *psdev; + char diff, temp; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + + signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); + devicemap = (char __iomem *)(®->message_rwbuffer[21]); + break; + } + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + + signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); + devicemap = (char __iomem *)(®->message_rwbuffer[21]); + break; + } + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + + signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); + devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); + break; + } + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + + signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); + devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); + break; + } + case ACB_ADAPTER_TYPE_E: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + + signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); + devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); + break; + } + case ACB_ADAPTER_TYPE_F: { + signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]); + devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]); + break; + } + } + if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) + return; + for (target = 0; target < ARCMSR_MAX_TARGETID - 1; + target++) { + temp = readb(devicemap); + diff = (*acb_dev_map) ^ temp; + if (diff != 0) { + *acb_dev_map = temp; + for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; + lun++) { + if ((diff & 0x01) == 1 && + (temp & 0x01) == 1) { + scsi_add_device(acb->host, + 0, target, lun); + } else if ((diff & 0x01) == 1 + && (temp & 0x01) == 0) { + psdev = scsi_device_lookup(acb->host, + 0, target, lun); + if (psdev != NULL) { + scsi_remove_device(psdev); + scsi_device_put(psdev); + } + } + temp >>= 1; + diff >>= 1; + } + } + devicemap++; + acb_dev_map++; + } + acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG; +} + +static int +arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) +{ + unsigned long flags; + int nvec, i; + + if (msix_enable == 0) + goto msi_int0; + nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, + PCI_IRQ_MSIX); + if (nvec > 0) { + pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); + flags = 0; + } else { +msi_int0: + if (msi_enable == 1) { + nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (nvec == 1) { + dev_info(&pdev->dev, "msi enabled\n"); + goto msi_int1; + } + } + nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); + if (nvec < 1) + return FAILED; +msi_int1: + flags = IRQF_SHARED; + } + + acb->vector_count = nvec; + for (i = 0; i < nvec; i++) { + if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt, + flags, "arcmsr", acb)) { + pr_warn("arcmsr%d: request_irq =%d failed!\n", + acb->host->host_no, pci_irq_vector(pdev, i)); + goto out_free_irq; + } + } + + return SUCCESS; +out_free_irq: + while (--i >= 0) + free_irq(pci_irq_vector(pdev, i), acb); + pci_free_irq_vectors(pdev); + return FAILED; +} + +static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) +{ + INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); + pacb->fw_flag = FW_NORMAL; + timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0); + pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); + add_timer(&pacb->eternal_timer); +} + +static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) +{ + timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); + pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); + add_timer(&pacb->refresh_timer); +} + +static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb) +{ + struct pci_dev *pcidev = acb->pdev; + + if (IS_DMA64) { + if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) || + dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64))) + goto dma32; + if (acb->adapter_type <= ACB_ADAPTER_TYPE_B) + return 0; + if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) || + dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) { + printk("arcmsr: set DMA 64 mask failed\n"); + return -ENXIO; + } + } else { +dma32: + if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) || + dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) { + printk("arcmsr: set DMA 32-bit mask failed\n"); + return -ENXIO; + } + } + return 0; +} + +static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct Scsi_Host *host; + struct AdapterControlBlock *acb; + uint8_t bus,dev_fun; + int error; + error = pci_enable_device(pdev); + if(error){ + return -ENODEV; + } + host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock)); + if(!host){ + goto pci_disable_dev; + } + init_waitqueue_head(&wait_q); + bus = pdev->bus->number; + dev_fun = pdev->devfn; + acb = (struct AdapterControlBlock *) host->hostdata; + memset(acb,0,sizeof(struct AdapterControlBlock)); + acb->pdev = pdev; + acb->adapter_type = id->driver_data; + if (arcmsr_set_dma_mask(acb)) + goto scsi_host_release; + acb->host = host; + host->max_lun = ARCMSR_MAX_TARGETLUN; + host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ + host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ + if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD)) + host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; + host->can_queue = host_can_queue; /* max simultaneous cmds */ + if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN)) + cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; + host->cmd_per_lun = cmd_per_lun; + host->this_id = ARCMSR_SCSI_INITIATOR_ID; + host->unique_id = (bus << 8) | dev_fun; + pci_set_drvdata(pdev, host); + pci_set_master(pdev); + error = pci_request_regions(pdev, "arcmsr"); + if(error){ + goto scsi_host_release; + } + spin_lock_init(&acb->eh_lock); + spin_lock_init(&acb->ccblist_lock); + spin_lock_init(&acb->postq_lock); + spin_lock_init(&acb->doneq_lock); + spin_lock_init(&acb->rqbuffer_lock); + spin_lock_init(&acb->wqbuffer_lock); + acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | + ACB_F_MESSAGE_RQBUFFER_CLEARED | + ACB_F_MESSAGE_WQBUFFER_READED); + acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; + INIT_LIST_HEAD(&acb->ccb_free_list); + error = arcmsr_remap_pciregion(acb); + if(!error){ + goto pci_release_regs; + } + error = arcmsr_alloc_io_queue(acb); + if (!error) + goto unmap_pci_region; + error = arcmsr_get_firmware_spec(acb); + if(!error){ + goto free_hbb_mu; + } + if (acb->adapter_type != ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); + error = arcmsr_alloc_ccb_pool(acb); + if(error){ + goto unmap_pci_region; + } + error = scsi_add_host(host, &pdev->dev); + if(error){ + goto free_ccb_pool; + } + if (arcmsr_request_irq(pdev, acb) == FAILED) + goto scsi_host_remove; + arcmsr_iop_init(acb); + arcmsr_init_get_devmap_timer(acb); + if (set_date_time) + arcmsr_init_set_datetime_timer(acb); + if(arcmsr_alloc_sysfs_attr(acb)) + goto out_free_sysfs; + scsi_scan_host(host); + return 0; +out_free_sysfs: + if (set_date_time) + del_timer_sync(&acb->refresh_timer); + del_timer_sync(&acb->eternal_timer); + flush_work(&acb->arcmsr_do_message_isr_bh); + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); + arcmsr_free_irq(pdev, acb); +scsi_host_remove: + scsi_remove_host(host); +free_ccb_pool: + arcmsr_free_ccb_pool(acb); + goto unmap_pci_region; +free_hbb_mu: + arcmsr_free_io_queue(acb); +unmap_pci_region: + arcmsr_unmap_pciregion(acb); +pci_release_regs: + pci_release_regions(pdev); +scsi_host_release: + scsi_host_put(host); +pci_disable_dev: + pci_disable_device(pdev); + return -ENODEV; +} + +static void arcmsr_free_irq(struct pci_dev *pdev, + struct AdapterControlBlock *acb) +{ + int i; + + for (i = 0; i < acb->vector_count; i++) + free_irq(pci_irq_vector(pdev, i), acb); + pci_free_irq_vectors(pdev); +} + +static int __maybe_unused arcmsr_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)host->hostdata; + + arcmsr_disable_outbound_ints(acb); + arcmsr_free_irq(pdev, acb); + del_timer_sync(&acb->eternal_timer); + if (set_date_time) + del_timer_sync(&acb->refresh_timer); + flush_work(&acb->arcmsr_do_message_isr_bh); + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); + return 0; +} + +static int __maybe_unused arcmsr_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)host->hostdata; + + if (arcmsr_set_dma_mask(acb)) + goto controller_unregister; + if (arcmsr_request_irq(pdev, acb) == FAILED) + goto controller_stop; + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + uint32_t i; + for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { + reg->post_qbuffer[i] = 0; + reg->done_qbuffer[i] = 0; + } + reg->postq_index = 0; + reg->doneq_index = 0; + break; + } + case ACB_ADAPTER_TYPE_E: + writel(0, &acb->pmuE->host_int_status); + writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); + acb->in_doorbell = 0; + acb->out_doorbell = 0; + acb->doneq_index = 0; + break; + case ACB_ADAPTER_TYPE_F: + writel(0, &acb->pmuF->host_int_status); + writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); + acb->in_doorbell = 0; + acb->out_doorbell = 0; + acb->doneq_index = 0; + arcmsr_hbaF_assign_regAddr(acb); + break; + } + arcmsr_iop_init(acb); + arcmsr_init_get_devmap_timer(acb); + if (set_date_time) + arcmsr_init_set_datetime_timer(acb); + return 0; +controller_stop: + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); +controller_unregister: + scsi_remove_host(host); + arcmsr_free_ccb_pool(acb); + if (acb->adapter_type == ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); + arcmsr_unmap_pciregion(acb); + scsi_host_put(host); + return -ENODEV; +} + +static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { + printk(KERN_NOTICE + "arcmsr%d: wait 'abort all outstanding command' timeout\n" + , acb->host->host_no); + return false; + } + return true; +} + +static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + + writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_NOTICE + "arcmsr%d: wait 'abort all outstanding command' timeout\n" + , acb->host->host_no); + return false; + } + return true; +} +static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_C __iomem *reg = pACB->pmuC; + writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); + if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { + printk(KERN_NOTICE + "arcmsr%d: wait 'abort all outstanding command' timeout\n" + , pACB->host->host_no); + return false; + } + return true; +} + +static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *reg = pACB->pmuD; + + writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait 'abort all outstanding " + "command' timeout\n", pACB->host->host_no); + return false; + } + return true; +} + +static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_E __iomem *reg = pACB->pmuE; + + writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); + pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pACB->out_doorbell, ®->iobound_doorbell); + if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait 'abort all outstanding " + "command' timeout\n", pACB->host->host_no); + return false; + } + return true; +} + +static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) +{ + uint8_t rtnval = 0; + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + rtnval = arcmsr_hbaA_abort_allcmd(acb); + break; + case ACB_ADAPTER_TYPE_B: + rtnval = arcmsr_hbaB_abort_allcmd(acb); + break; + case ACB_ADAPTER_TYPE_C: + rtnval = arcmsr_hbaC_abort_allcmd(acb); + break; + case ACB_ADAPTER_TYPE_D: + rtnval = arcmsr_hbaD_abort_allcmd(acb); + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + rtnval = arcmsr_hbaE_abort_allcmd(acb); + break; + } + return rtnval; +} + +static void arcmsr_ccb_complete(struct CommandControlBlock *ccb) +{ + struct AdapterControlBlock *acb = ccb->acb; + struct scsi_cmnd *pcmd = ccb->pcmd; + unsigned long flags; + atomic_dec(&acb->ccboutstandingcount); + scsi_dma_unmap(ccb->pcmd); + ccb->startdone = ARCMSR_CCB_DONE; + spin_lock_irqsave(&acb->ccblist_lock, flags); + list_add_tail(&ccb->list, &acb->ccb_free_list); + spin_unlock_irqrestore(&acb->ccblist_lock, flags); + scsi_done(pcmd); +} + +static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) +{ + struct scsi_cmnd *pcmd = ccb->pcmd; + + pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; + if (pcmd->sense_buffer) { + struct SENSE_DATA *sensebuffer; + + memcpy_and_pad(pcmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + ccb->arcmsr_cdb.SenseData, + sizeof(ccb->arcmsr_cdb.SenseData), + 0); + + sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; + sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; + sensebuffer->Valid = 1; + } +} + +static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) +{ + u32 orig_mask = 0; + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A : { + struct MessageUnit_A __iomem *reg = acb->pmuA; + orig_mask = readl(®->outbound_intmask); + writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ + ®->outbound_intmask); + } + break; + case ACB_ADAPTER_TYPE_B : { + struct MessageUnit_B *reg = acb->pmuB; + orig_mask = readl(reg->iop2drv_doorbell_mask); + writel(0, reg->iop2drv_doorbell_mask); + } + break; + case ACB_ADAPTER_TYPE_C:{ + struct MessageUnit_C __iomem *reg = acb->pmuC; + /* disable all outbound interrupt */ + orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */ + writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + /* disable all outbound interrupt */ + writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); + } + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + orig_mask = readl(®->host_int_mask); + writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask); + readl(®->host_int_mask); /* Dummy readl to force pci flush */ + } + break; + } + return orig_mask; +} + +static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, + struct CommandControlBlock *ccb, bool error) +{ + uint8_t id, lun; + id = ccb->pcmd->device->id; + lun = ccb->pcmd->device->lun; + if (!error) { + if (acb->devstate[id][lun] == ARECA_RAID_GONE) + acb->devstate[id][lun] = ARECA_RAID_GOOD; + ccb->pcmd->result = DID_OK << 16; + arcmsr_ccb_complete(ccb); + }else{ + switch (ccb->arcmsr_cdb.DeviceStatus) { + case ARCMSR_DEV_SELECT_TIMEOUT: { + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_NO_CONNECT << 16; + arcmsr_ccb_complete(ccb); + } + break; + + case ARCMSR_DEV_ABORTED: + + case ARCMSR_DEV_INIT_FAIL: { + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_BAD_TARGET << 16; + arcmsr_ccb_complete(ccb); + } + break; + + case ARCMSR_DEV_CHECK_CONDITION: { + acb->devstate[id][lun] = ARECA_RAID_GOOD; + arcmsr_report_sense_info(ccb); + arcmsr_ccb_complete(ccb); + } + break; + + default: + printk(KERN_NOTICE + "arcmsr%d: scsi id = %d lun = %d isr get command error done, \ + but got unknown DeviceStatus = 0x%x \n" + , acb->host->host_no + , id + , lun + , ccb->arcmsr_cdb.DeviceStatus); + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_NO_CONNECT << 16; + arcmsr_ccb_complete(ccb); + break; + } + } +} + +static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) +{ + if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { + if (pCCB->startdone == ARCMSR_CCB_ABORTED) { + struct scsi_cmnd *abortcmd = pCCB->pcmd; + if (abortcmd) { + abortcmd->result |= DID_ABORT << 16; + arcmsr_ccb_complete(pCCB); + printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", + acb->host->host_no, pCCB); + } + return; + } + printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ + done acb = '0x%p'" + "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x" + " ccboutstandingcount = %d \n" + , acb->host->host_no + , acb + , pCCB + , pCCB->acb + , pCCB->startdone + , atomic_read(&acb->ccboutstandingcount)); + return; + } + arcmsr_report_ccb_state(acb, pCCB, error); +} + +static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) +{ + int i = 0; + uint32_t flag_ccb; + struct ARCMSR_CDB *pARCMSR_CDB; + bool error; + struct CommandControlBlock *pCCB; + unsigned long ccb_cdb_phy; + + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + uint32_t outbound_intstatus; + outbound_intstatus = readl(®->outbound_intstatus) & + acb->outbound_int_enable; + /*clear and abort all outbound posted Q*/ + writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ + while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) + && (i++ < acb->maxOutstanding)) { + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); + pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; + arcmsr_drain_donequeue(acb, pCCB, error); + } + } + break; + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + /*clear all outbound posted Q*/ + writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ + for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { + flag_ccb = reg->done_qbuffer[i]; + if (flag_ccb != 0) { + reg->done_qbuffer[i] = 0; + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); + pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; + arcmsr_drain_donequeue(acb, pCCB, error); + } + reg->post_qbuffer[i] = 0; + } + reg->doneq_index = 0; + reg->postq_index = 0; + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) { + /*need to do*/ + flag_ccb = readl(®->outbound_queueport_low); + ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); + pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; + arcmsr_drain_donequeue(acb, pCCB, error); + } + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *pmu = acb->pmuD; + uint32_t outbound_write_pointer; + uint32_t doneq_index, index_stripped, addressLow, residual, toggle; + unsigned long flags; + + residual = atomic_read(&acb->ccboutstandingcount); + for (i = 0; i < residual; i++) { + spin_lock_irqsave(&acb->doneq_lock, flags); + outbound_write_pointer = + pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + if ((doneq_index & 0xFFF) != + (outbound_write_pointer & 0xFFF)) { + toggle = doneq_index & 0x4000; + index_stripped = (doneq_index & 0xFFF) + 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? (index_stripped | toggle) : + ((toggle ^ 0x4000) + 1); + doneq_index = pmu->doneq_index; + spin_unlock_irqrestore(&acb->doneq_lock, flags); + addressLow = pmu->done_qbuffer[doneq_index & + 0xFFF].addressLow; + ccb_cdb_phy = (addressLow & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *) + (acb->vir2phy_offset + ccb_cdb_phy); + pCCB = container_of(pARCMSR_CDB, + struct CommandControlBlock, arcmsr_cdb); + error = (addressLow & + ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? + true : false; + arcmsr_drain_donequeue(acb, pCCB, error); + writel(doneq_index, + pmu->outboundlist_read_pointer); + } else { + spin_unlock_irqrestore(&acb->doneq_lock, flags); + mdelay(10); + } + } + pmu->postq_index = 0; + pmu->doneq_index = 0x40FF; + } + break; + case ACB_ADAPTER_TYPE_E: + arcmsr_hbaE_postqueue_isr(acb); + break; + case ACB_ADAPTER_TYPE_F: + arcmsr_hbaF_postqueue_isr(acb); + break; + } +} + +static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb) +{ + char *acb_dev_map = (char *)acb->device_map; + int target, lun, i; + struct scsi_device *psdev; + struct CommandControlBlock *ccb; + char temp; + + for (i = 0; i < acb->maxFreeCCB; i++) { + ccb = acb->pccb_pool[i]; + if (ccb->startdone == ARCMSR_CCB_START) { + ccb->pcmd->result = DID_NO_CONNECT << 16; + scsi_dma_unmap(ccb->pcmd); + scsi_done(ccb->pcmd); + } + } + for (target = 0; target < ARCMSR_MAX_TARGETID; target++) { + temp = *acb_dev_map; + if (temp) { + for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { + if (temp & 1) { + psdev = scsi_device_lookup(acb->host, + 0, target, lun); + if (psdev != NULL) { + scsi_remove_device(psdev); + scsi_device_put(psdev); + } + } + temp >>= 1; + } + *acb_dev_map = 0; + } + acb_dev_map++; + } +} + +static void arcmsr_free_pcidev(struct AdapterControlBlock *acb) +{ + struct pci_dev *pdev; + struct Scsi_Host *host; + + host = acb->host; + arcmsr_free_sysfs_attr(acb); + scsi_remove_host(host); + flush_work(&acb->arcmsr_do_message_isr_bh); + del_timer_sync(&acb->eternal_timer); + if (set_date_time) + del_timer_sync(&acb->refresh_timer); + pdev = acb->pdev; + arcmsr_free_irq(pdev, acb); + arcmsr_free_ccb_pool(acb); + if (acb->adapter_type == ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); + arcmsr_unmap_pciregion(acb); + pci_release_regions(pdev); + scsi_host_put(host); + pci_disable_device(pdev); +} + +static void arcmsr_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + int poll_count = 0; + uint16_t dev_id; + + pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); + if (dev_id == 0xffff) { + acb->acb_flags &= ~ACB_F_IOP_INITED; + acb->acb_flags |= ACB_F_ADAPTER_REMOVED; + arcmsr_remove_scsi_devices(acb); + arcmsr_free_pcidev(acb); + return; + } + arcmsr_free_sysfs_attr(acb); + scsi_remove_host(host); + flush_work(&acb->arcmsr_do_message_isr_bh); + del_timer_sync(&acb->eternal_timer); + if (set_date_time) + del_timer_sync(&acb->refresh_timer); + arcmsr_disable_outbound_ints(acb); + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); + acb->acb_flags |= ACB_F_SCSISTOPADAPTER; + acb->acb_flags &= ~ACB_F_IOP_INITED; + + for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){ + if (!atomic_read(&acb->ccboutstandingcount)) + break; + arcmsr_interrupt(acb);/* FIXME: need spinlock */ + msleep(25); + } + + if (atomic_read(&acb->ccboutstandingcount)) { + int i; + + arcmsr_abort_allcmd(acb); + arcmsr_done4abort_postqueue(acb); + for (i = 0; i < acb->maxFreeCCB; i++) { + struct CommandControlBlock *ccb = acb->pccb_pool[i]; + if (ccb->startdone == ARCMSR_CCB_START) { + ccb->startdone = ARCMSR_CCB_ABORTED; + ccb->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(ccb); + } + } + } + arcmsr_free_irq(pdev, acb); + arcmsr_free_ccb_pool(acb); + if (acb->adapter_type == ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); + arcmsr_unmap_pciregion(acb); + pci_release_regions(pdev); + scsi_host_put(host); + pci_disable_device(pdev); +} + +static void arcmsr_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)host->hostdata; + if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) + return; + del_timer_sync(&acb->eternal_timer); + if (set_date_time) + del_timer_sync(&acb->refresh_timer); + arcmsr_disable_outbound_ints(acb); + arcmsr_free_irq(pdev, acb); + flush_work(&acb->arcmsr_do_message_isr_bh); + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); +} + +static int __init arcmsr_module_init(void) +{ + int error = 0; + error = pci_register_driver(&arcmsr_pci_driver); + return error; +} + +static void __exit arcmsr_module_exit(void) +{ + pci_unregister_driver(&arcmsr_pci_driver); +} +module_init(arcmsr_module_init); +module_exit(arcmsr_module_exit); + +static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, + u32 intmask_org) +{ + u32 mask; + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | + ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| + ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); + writel(mask, ®->outbound_intmask); + acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; + } + break; + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | + ARCMSR_IOP2DRV_DATA_READ_OK | + ARCMSR_IOP2DRV_CDB_DONE | + ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); + writel(mask, reg->iop2drv_doorbell_mask); + acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); + writel(intmask_org & mask, ®->host_int_mask); + acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + + mask = ARCMSR_ARC1214_ALL_INT_ENABLE; + writel(intmask_org | mask, reg->pcief0_int_enable); + break; + } + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + + mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR); + writel(intmask_org & mask, ®->host_int_mask); + break; + } + } +} + +static int arcmsr_build_ccb(struct AdapterControlBlock *acb, + struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) +{ + struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; + int8_t *psge = (int8_t *)&arcmsr_cdb->u; + __le32 address_lo, address_hi; + int arccdbsize = 0x30; + __le32 length = 0; + int i; + struct scatterlist *sg; + int nseg; + ccb->pcmd = pcmd; + memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); + arcmsr_cdb->TargetID = pcmd->device->id; + arcmsr_cdb->LUN = pcmd->device->lun; + arcmsr_cdb->Function = 1; + arcmsr_cdb->msgContext = 0; + memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); + + nseg = scsi_dma_map(pcmd); + if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0)) + return FAILED; + scsi_for_each_sg(pcmd, sg, nseg, i) { + /* Get the physical address of the current data pointer */ + length = cpu_to_le32(sg_dma_len(sg)); + address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); + address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); + if (address_hi == 0) { + struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; + + pdma_sg->address = address_lo; + pdma_sg->length = length; + psge += sizeof (struct SG32ENTRY); + arccdbsize += sizeof (struct SG32ENTRY); + } else { + struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; + + pdma_sg->addresshigh = address_hi; + pdma_sg->address = address_lo; + pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); + psge += sizeof (struct SG64ENTRY); + arccdbsize += sizeof (struct SG64ENTRY); + } + } + arcmsr_cdb->sgcount = (uint8_t)nseg; + arcmsr_cdb->DataLength = scsi_bufflen(pcmd); + arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); + if ( arccdbsize > 256) + arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; + if (pcmd->sc_data_direction == DMA_TO_DEVICE) + arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; + ccb->arc_cdb_size = arccdbsize; + return SUCCESS; +} + +static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) +{ + uint32_t cdb_phyaddr = ccb->cdb_phyaddr; + struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; + atomic_inc(&acb->ccboutstandingcount); + ccb->startdone = ARCMSR_CCB_START; + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + + if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) + writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, + ®->inbound_queueport); + else + writel(cdb_phyaddr, ®->inbound_queueport); + break; + } + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + uint32_t ending_index, index = reg->postq_index; + + ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); + reg->post_qbuffer[ending_index] = 0; + if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { + reg->post_qbuffer[index] = + cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE; + } else { + reg->post_qbuffer[index] = cdb_phyaddr; + } + index++; + index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ + reg->postq_index = index; + writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell); + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *phbcmu = acb->pmuC; + uint32_t ccb_post_stamp, arc_cdb_size; + + arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; + ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1); + writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high); + writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *pmu = acb->pmuD; + u16 index_stripped; + u16 postq_index, toggle; + unsigned long flags; + struct InBound_SRB *pinbound_srb; + + spin_lock_irqsave(&acb->postq_lock, flags); + postq_index = pmu->postq_index; + pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]); + pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr); + pinbound_srb->addressLow = cdb_phyaddr; + pinbound_srb->length = ccb->arc_cdb_size >> 2; + arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); + toggle = postq_index & 0x4000; + index_stripped = postq_index + 1; + index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1); + pmu->postq_index = index_stripped ? (index_stripped | toggle) : + (toggle ^ 0x4000); + writel(postq_index, pmu->inboundlist_write_pointer); + spin_unlock_irqrestore(&acb->postq_lock, flags); + break; + } + case ACB_ADAPTER_TYPE_E: { + struct MessageUnit_E __iomem *pmu = acb->pmuE; + u32 ccb_post_stamp, arc_cdb_size; + + arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; + ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6)); + writel(0, &pmu->inbound_queueport_high); + writel(ccb_post_stamp, &pmu->inbound_queueport_low); + break; + } + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *pmu = acb->pmuF; + u32 ccb_post_stamp, arc_cdb_size; + + if (ccb->arc_cdb_size <= 0x300) + arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1; + else { + arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2; + if (arc_cdb_size > 0xF) + arc_cdb_size = 0xF; + arc_cdb_size = (arc_cdb_size << 1) | 1; + } + ccb_post_stamp = (ccb->smid | arc_cdb_size); + writel(0, &pmu->inbound_queueport_high); + writel(ccb_post_stamp, &pmu->inbound_queueport_low); + break; + } + } +} + +static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + acb->acb_flags &= ~ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { + printk(KERN_NOTICE + "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" + , acb->host->host_no); + } +} + +static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + acb->acb_flags &= ~ACB_F_MSG_START_BGRB; + writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); + + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_NOTICE + "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" + , acb->host->host_no); + } +} + +static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_C __iomem *reg = pACB->pmuC; + pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); + if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { + printk(KERN_NOTICE + "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" + , pACB->host->host_no); + } + return; +} + +static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *reg = pACB->pmuD; + + pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(pACB)) + pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " + "timeout\n", pACB->host->host_no); +} + +static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_E __iomem *reg = pACB->pmuE; + + pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); + pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pACB->out_doorbell, ®->iobound_doorbell); + if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " + "timeout\n", pACB->host->host_no); + } +} + +static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + arcmsr_hbaA_stop_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_B: + arcmsr_hbaB_stop_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_C: + arcmsr_hbaC_stop_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_D: + arcmsr_hbaD_stop_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + arcmsr_hbaE_stop_bgrb(acb); + break; + } +} + +static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) +{ + dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); +} + +static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); + } + break; + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + + writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, + reg->inbound_doorbell); + } + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; + writel(acb->out_doorbell, ®->iobound_doorbell); + } + break; + } +} + +static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + /* + ** push inbound doorbell tell iop, driver data write ok + ** and wait reply on next hwinterrupt for next Qbuffer post + */ + writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell); + } + break; + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + /* + ** push inbound doorbell tell iop, driver data write ok + ** and wait reply on next hwinterrupt for next Qbuffer post + */ + writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell); + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + /* + ** push inbound doorbell tell iop, driver data write ok + ** and wait reply on next hwinterrupt for next Qbuffer post + */ + writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell); + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY, + reg->inbound_doorbell); + } + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK; + writel(acb->out_doorbell, ®->iobound_doorbell); + } + break; + } +} + +struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) +{ + struct QBUFFER __iomem *qbuffer = NULL; + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; + } + break; + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *phbcmu = acb->pmuC; + qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; + } + break; + case ACB_ADAPTER_TYPE_E: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; + } + break; + case ACB_ADAPTER_TYPE_F: { + qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer; + } + break; + } + return qbuffer; +} + +static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) +{ + struct QBUFFER __iomem *pqbuffer = NULL; + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; + } + break; + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; + } + break; + case ACB_ADAPTER_TYPE_E: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; + } + break; + case ACB_ADAPTER_TYPE_F: + pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer; + break; + } + return pqbuffer; +} + +static uint32_t +arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb, + struct QBUFFER __iomem *prbuffer) +{ + uint8_t *pQbuffer; + uint8_t *buf1 = NULL; + uint32_t __iomem *iop_data; + uint32_t iop_len, data_len, *buf2 = NULL; + + iop_data = (uint32_t __iomem *)prbuffer->data; + iop_len = readl(&prbuffer->data_len); + if (iop_len > 0) { + buf1 = kmalloc(128, GFP_ATOMIC); + buf2 = (uint32_t *)buf1; + if (buf1 == NULL) + return 0; + data_len = iop_len; + while (data_len >= 4) { + *buf2++ = readl(iop_data); + iop_data++; + data_len -= 4; + } + if (data_len) + *buf2 = readl(iop_data); + buf2 = (uint32_t *)buf1; + } + while (iop_len > 0) { + pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; + *pQbuffer = *buf1; + acb->rqbuf_putIndex++; + /* if last, index number set it to 0 */ + acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; + buf1++; + iop_len--; + } + kfree(buf2); + /* let IOP know data has been read */ + arcmsr_iop_message_read(acb); + return 1; +} + +uint32_t +arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, + struct QBUFFER __iomem *prbuffer) { + + uint8_t *pQbuffer; + uint8_t __iomem *iop_data; + uint32_t iop_len; + + if (acb->adapter_type > ACB_ADAPTER_TYPE_B) + return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); + iop_data = (uint8_t __iomem *)prbuffer->data; + iop_len = readl(&prbuffer->data_len); + while (iop_len > 0) { + pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; + *pQbuffer = readb(iop_data); + acb->rqbuf_putIndex++; + acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; + iop_data++; + iop_len--; + } + arcmsr_iop_message_read(acb); + return 1; +} + +static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) +{ + unsigned long flags; + struct QBUFFER __iomem *prbuffer; + int32_t buf_empty_len; + + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + prbuffer = arcmsr_get_iop_rqbuffer(acb); + if (acb->rqbuf_putIndex >= acb->rqbuf_getIndex) { + buf_empty_len = (ARCMSR_MAX_QBUFFER - 1) - + (acb->rqbuf_putIndex - acb->rqbuf_getIndex); + } else + buf_empty_len = acb->rqbuf_getIndex - acb->rqbuf_putIndex - 1; + if (buf_empty_len >= readl(&prbuffer->data_len)) { + if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; + } else + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); +} + +static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb) +{ + uint8_t *pQbuffer; + struct QBUFFER __iomem *pwbuffer; + uint8_t *buf1 = NULL; + uint32_t __iomem *iop_data; + uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data; + + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { + buf1 = kmalloc(128, GFP_ATOMIC); + buf2 = (uint32_t *)buf1; + if (buf1 == NULL) + return; + + acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); + pwbuffer = arcmsr_get_iop_wqbuffer(acb); + iop_data = (uint32_t __iomem *)pwbuffer->data; + while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) + && (allxfer_len < 124)) { + pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; + *buf1 = *pQbuffer; + acb->wqbuf_getIndex++; + acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; + buf1++; + allxfer_len++; + } + data_len = allxfer_len; + buf1 = (uint8_t *)buf2; + while (data_len >= 4) { + data = *buf2++; + writel(data, iop_data); + iop_data++; + data_len -= 4; + } + if (data_len) { + data = *buf2; + writel(data, iop_data); + } + writel(allxfer_len, &pwbuffer->data_len); + kfree(buf1); + arcmsr_iop_message_wrote(acb); + } +} + +void +arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) +{ + uint8_t *pQbuffer; + struct QBUFFER __iomem *pwbuffer; + uint8_t __iomem *iop_data; + int32_t allxfer_len = 0; + + if (acb->adapter_type > ACB_ADAPTER_TYPE_B) { + arcmsr_write_ioctldata2iop_in_DWORD(acb); + return; + } + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { + acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); + pwbuffer = arcmsr_get_iop_wqbuffer(acb); + iop_data = (uint8_t __iomem *)pwbuffer->data; + while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) + && (allxfer_len < 124)) { + pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; + writeb(*pQbuffer, iop_data); + acb->wqbuf_getIndex++; + acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; + iop_data++; + allxfer_len++; + } + writel(allxfer_len, &pwbuffer->data_len); + arcmsr_iop_message_wrote(acb); + } +} + +static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) +{ + unsigned long flags; + + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; + if (acb->wqbuf_getIndex != acb->wqbuf_putIndex) + arcmsr_write_ioctldata2iop(acb); + if (acb->wqbuf_getIndex == acb->wqbuf_putIndex) + acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); +} + +static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) +{ + uint32_t outbound_doorbell; + struct MessageUnit_A __iomem *reg = acb->pmuA; + outbound_doorbell = readl(®->outbound_doorbell); + do { + writel(outbound_doorbell, ®->outbound_doorbell); + if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(acb); + if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(acb); + outbound_doorbell = readl(®->outbound_doorbell); + } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK + | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)); +} +static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) +{ + uint32_t outbound_doorbell; + struct MessageUnit_C __iomem *reg = pACB->pmuC; + /* + ******************************************************************* + ** Maybe here we need to check wrqbuffer_lock is lock or not + ** DOORBELL: din! don! + ** check if there are any mail need to pack from firmware + ******************************************************************* + */ + outbound_doorbell = readl(®->outbound_doorbell); + do { + writel(outbound_doorbell, ®->outbound_doorbell_clear); + readl(®->outbound_doorbell_clear); + if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(pACB); + if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(pACB); + if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) + arcmsr_hbaC_message_isr(pACB); + outbound_doorbell = readl(®->outbound_doorbell); + } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK + | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK + | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); +} + +static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB) +{ + uint32_t outbound_doorbell; + struct MessageUnit_D *pmu = pACB->pmuD; + + outbound_doorbell = readl(pmu->outbound_doorbell); + do { + writel(outbound_doorbell, pmu->outbound_doorbell); + if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) + arcmsr_hbaD_message_isr(pACB); + if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(pACB); + if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(pACB); + outbound_doorbell = readl(pmu->outbound_doorbell); + } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK + | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK + | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)); +} + +static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB) +{ + uint32_t outbound_doorbell, in_doorbell, tmp, i; + struct MessageUnit_E __iomem *reg = pACB->pmuE; + + if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) { + for (i = 0; i < 5; i++) { + in_doorbell = readl(®->iobound_doorbell); + if (in_doorbell != 0) + break; + } + } else + in_doorbell = readl(®->iobound_doorbell); + outbound_doorbell = in_doorbell ^ pACB->in_doorbell; + do { + writel(0, ®->host_int_status); /* clear interrupt */ + if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { + arcmsr_iop2drv_data_wrote_handle(pACB); + } + if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) { + arcmsr_iop2drv_data_read_handle(pACB); + } + if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { + arcmsr_hbaE_message_isr(pACB); + } + tmp = in_doorbell; + in_doorbell = readl(®->iobound_doorbell); + outbound_doorbell = tmp ^ in_doorbell; + } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK + | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK + | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE)); + pACB->in_doorbell = in_doorbell; +} + +static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) +{ + uint32_t flag_ccb; + struct MessageUnit_A __iomem *reg = acb->pmuA; + struct ARCMSR_CDB *pARCMSR_CDB; + struct CommandControlBlock *pCCB; + bool error; + unsigned long cdb_phy_addr; + + while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { + cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); + pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; + arcmsr_drain_donequeue(acb, pCCB, error); + } +} +static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) +{ + uint32_t index; + uint32_t flag_ccb; + struct MessageUnit_B *reg = acb->pmuB; + struct ARCMSR_CDB *pARCMSR_CDB; + struct CommandControlBlock *pCCB; + bool error; + unsigned long cdb_phy_addr; + + index = reg->doneq_index; + while ((flag_ccb = reg->done_qbuffer[index]) != 0) { + cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); + pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; + arcmsr_drain_donequeue(acb, pCCB, error); + reg->done_qbuffer[index] = 0; + index++; + index %= ARCMSR_MAX_HBB_POSTQUEUE; + reg->doneq_index = index; + } +} + +static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_C __iomem *phbcmu; + struct ARCMSR_CDB *arcmsr_cdb; + struct CommandControlBlock *ccb; + uint32_t flag_ccb, throttling = 0; + unsigned long ccb_cdb_phy; + int error; + + phbcmu = acb->pmuC; + /* areca cdb command done */ + /* Use correct offset and size for syncing */ + + while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) != + 0xFFFFFFFF) { + ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + + ccb_cdb_phy); + ccb = container_of(arcmsr_cdb, struct CommandControlBlock, + arcmsr_cdb); + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) + ? true : false; + /* check if command done with no error */ + arcmsr_drain_donequeue(acb, ccb, error); + throttling++; + if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { + writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, + &phbcmu->inbound_doorbell); + throttling = 0; + } + } +} + +static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) +{ + u32 outbound_write_pointer, doneq_index, index_stripped, toggle; + uint32_t addressLow; + int error; + struct MessageUnit_D *pmu; + struct ARCMSR_CDB *arcmsr_cdb; + struct CommandControlBlock *ccb; + unsigned long flags, ccb_cdb_phy; + + spin_lock_irqsave(&acb->doneq_lock, flags); + pmu = acb->pmuD; + outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { + do { + toggle = doneq_index & 0x4000; + index_stripped = (doneq_index & 0xFFF) + 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? (index_stripped | toggle) : + ((toggle ^ 0x4000) + 1); + doneq_index = pmu->doneq_index; + addressLow = pmu->done_qbuffer[doneq_index & + 0xFFF].addressLow; + ccb_cdb_phy = (addressLow & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + + ccb_cdb_phy); + ccb = container_of(arcmsr_cdb, + struct CommandControlBlock, arcmsr_cdb); + error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) + ? true : false; + arcmsr_drain_donequeue(acb, ccb, error); + writel(doneq_index, pmu->outboundlist_read_pointer); + } while ((doneq_index & 0xFFF) != + (outbound_write_pointer & 0xFFF)); + } + writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR, + pmu->outboundlist_interrupt_cause); + readl(pmu->outboundlist_interrupt_cause); + spin_unlock_irqrestore(&acb->doneq_lock, flags); +} + +static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb) +{ + uint32_t doneq_index; + uint16_t cmdSMID; + int error; + struct MessageUnit_E __iomem *pmu; + struct CommandControlBlock *ccb; + unsigned long flags; + + spin_lock_irqsave(&acb->doneq_lock, flags); + doneq_index = acb->doneq_index; + pmu = acb->pmuE; + while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) { + cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; + ccb = acb->pccb_pool[cmdSMID]; + error = (acb->pCompletionQ[doneq_index].cmdFlag + & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; + arcmsr_drain_donequeue(acb, ccb, error); + doneq_index++; + if (doneq_index >= acb->completionQ_entry) + doneq_index = 0; + } + acb->doneq_index = doneq_index; + writel(doneq_index, &pmu->reply_post_consumer_index); + spin_unlock_irqrestore(&acb->doneq_lock, flags); +} + +static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb) +{ + uint32_t doneq_index; + uint16_t cmdSMID; + int error; + struct MessageUnit_F __iomem *phbcmu; + struct CommandControlBlock *ccb; + unsigned long flags; + + spin_lock_irqsave(&acb->doneq_lock, flags); + doneq_index = acb->doneq_index; + phbcmu = acb->pmuF; + while (1) { + cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; + if (cmdSMID == 0xffff) + break; + ccb = acb->pccb_pool[cmdSMID]; + error = (acb->pCompletionQ[doneq_index].cmdFlag & + ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; + arcmsr_drain_donequeue(acb, ccb, error); + acb->pCompletionQ[doneq_index].cmdSMID = 0xffff; + doneq_index++; + if (doneq_index >= acb->completionQ_entry) + doneq_index = 0; + } + acb->doneq_index = doneq_index; + writel(doneq_index, &phbcmu->reply_post_consumer_index); + spin_unlock_irqrestore(&acb->doneq_lock, flags); +} + +/* +********************************************************************************** +** Handle a message interrupt +** +** The only message interrupt we expect is in response to a query for the current adapter config. +** We want this in order to compare the drivemap so that we can detect newly-attached drives. +********************************************************************************** +*/ +static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + /*clear interrupt and message state*/ + writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); + if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) + schedule_work(&acb->arcmsr_do_message_isr_bh); +} +static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + + /*clear interrupt and message state*/ + writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); + if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) + schedule_work(&acb->arcmsr_do_message_isr_bh); +} +/* +********************************************************************************** +** Handle a message interrupt +** +** The only message interrupt we expect is in response to a query for the +** current adapter config. +** We want this in order to compare the drivemap so that we can detect newly-attached drives. +********************************************************************************** +*/ +static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_C __iomem *reg = acb->pmuC; + /*clear interrupt and message state*/ + writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear); + if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) + schedule_work(&acb->arcmsr_do_message_isr_bh); +} + +static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_D *reg = acb->pmuD; + + writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); + readl(reg->outbound_doorbell); + if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) + schedule_work(&acb->arcmsr_do_message_isr_bh); +} + +static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_E __iomem *reg = acb->pmuE; + + writel(0, ®->host_int_status); + if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) + schedule_work(&acb->arcmsr_do_message_isr_bh); +} + +static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) +{ + uint32_t outbound_intstatus; + struct MessageUnit_A __iomem *reg = acb->pmuA; + outbound_intstatus = readl(®->outbound_intstatus) & + acb->outbound_int_enable; + if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) + return IRQ_NONE; + do { + writel(outbound_intstatus, ®->outbound_intstatus); + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) + arcmsr_hbaA_doorbell_isr(acb); + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) + arcmsr_hbaA_postqueue_isr(acb); + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) + arcmsr_hbaA_message_isr(acb); + outbound_intstatus = readl(®->outbound_intstatus) & + acb->outbound_int_enable; + } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT + | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT + | ARCMSR_MU_OUTBOUND_MESSAGE0_INT)); + return IRQ_HANDLED; +} + +static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb) +{ + uint32_t outbound_doorbell; + struct MessageUnit_B *reg = acb->pmuB; + outbound_doorbell = readl(reg->iop2drv_doorbell) & + acb->outbound_int_enable; + if (!outbound_doorbell) + return IRQ_NONE; + do { + writel(~outbound_doorbell, reg->iop2drv_doorbell); + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); + if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) + arcmsr_iop2drv_data_wrote_handle(acb); + if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) + arcmsr_iop2drv_data_read_handle(acb); + if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) + arcmsr_hbaB_postqueue_isr(acb); + if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) + arcmsr_hbaB_message_isr(acb); + outbound_doorbell = readl(reg->iop2drv_doorbell) & + acb->outbound_int_enable; + } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK + | ARCMSR_IOP2DRV_DATA_READ_OK + | ARCMSR_IOP2DRV_CDB_DONE + | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)); + return IRQ_HANDLED; +} + +static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) +{ + uint32_t host_interrupt_status; + struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; + /* + ********************************************* + ** check outbound intstatus + ********************************************* + */ + host_interrupt_status = readl(&phbcmu->host_int_status) & + (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); + if (!host_interrupt_status) + return IRQ_NONE; + do { + if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) + arcmsr_hbaC_doorbell_isr(pACB); + /* MU post queue interrupts*/ + if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) + arcmsr_hbaC_postqueue_isr(pACB); + host_interrupt_status = readl(&phbcmu->host_int_status); + } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); + return IRQ_HANDLED; +} + +static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB) +{ + u32 host_interrupt_status; + struct MessageUnit_D *pmu = pACB->pmuD; + + host_interrupt_status = readl(pmu->host_int_status) & + (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR); + if (!host_interrupt_status) + return IRQ_NONE; + do { + /* MU post queue interrupts*/ + if (host_interrupt_status & + ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR) + arcmsr_hbaD_postqueue_isr(pACB); + if (host_interrupt_status & + ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR) + arcmsr_hbaD_doorbell_isr(pACB); + host_interrupt_status = readl(pmu->host_int_status); + } while (host_interrupt_status & + (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)); + return IRQ_HANDLED; +} + +static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB) +{ + uint32_t host_interrupt_status; + struct MessageUnit_E __iomem *pmu = pACB->pmuE; + + host_interrupt_status = readl(&pmu->host_int_status) & + (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); + if (!host_interrupt_status) + return IRQ_NONE; + do { + /* MU ioctl transfer doorbell interrupts*/ + if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) { + arcmsr_hbaE_doorbell_isr(pACB); + } + /* MU post queue interrupts*/ + if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) { + arcmsr_hbaE_postqueue_isr(pACB); + } + host_interrupt_status = readl(&pmu->host_int_status); + } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); + return IRQ_HANDLED; +} + +static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB) +{ + uint32_t host_interrupt_status; + struct MessageUnit_F __iomem *phbcmu = pACB->pmuF; + + host_interrupt_status = readl(&phbcmu->host_int_status) & + (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); + if (!host_interrupt_status) + return IRQ_NONE; + do { + /* MU post queue interrupts*/ + if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) + arcmsr_hbaF_postqueue_isr(pACB); + + /* MU ioctl transfer doorbell interrupts*/ + if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) + arcmsr_hbaE_doorbell_isr(pACB); + + host_interrupt_status = readl(&phbcmu->host_int_status); + } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); + return IRQ_HANDLED; +} + +static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + return arcmsr_hbaA_handle_isr(acb); + case ACB_ADAPTER_TYPE_B: + return arcmsr_hbaB_handle_isr(acb); + case ACB_ADAPTER_TYPE_C: + return arcmsr_hbaC_handle_isr(acb); + case ACB_ADAPTER_TYPE_D: + return arcmsr_hbaD_handle_isr(acb); + case ACB_ADAPTER_TYPE_E: + return arcmsr_hbaE_handle_isr(acb); + case ACB_ADAPTER_TYPE_F: + return arcmsr_hbaF_handle_isr(acb); + default: + return IRQ_NONE; + } +} + +static void arcmsr_iop_parking(struct AdapterControlBlock *acb) +{ + if (acb) { + /* stop adapter background rebuild */ + if (acb->acb_flags & ACB_F_MSG_START_BGRB) { + uint32_t intmask_org; + acb->acb_flags &= ~ACB_F_MSG_START_BGRB; + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); + arcmsr_enable_outbound_ints(acb, intmask_org); + } + } +} + + +void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb) +{ + uint32_t i; + + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + for (i = 0; i < 15; i++) { + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; + arcmsr_iop_message_read(acb); + mdelay(30); + } else if (acb->rqbuf_getIndex != + acb->rqbuf_putIndex) { + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; + mdelay(30); + } else + break; + } + } +} + +static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, + struct scsi_cmnd *cmd) +{ + char *buffer; + unsigned short use_sg; + int retvalue = 0, transfer_len = 0; + unsigned long flags; + struct CMD_MESSAGE_FIELD *pcmdmessagefld; + uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 | + (uint32_t)cmd->cmnd[6] << 16 | + (uint32_t)cmd->cmnd[7] << 8 | + (uint32_t)cmd->cmnd[8]; + struct scatterlist *sg; + + use_sg = scsi_sg_count(cmd); + sg = scsi_sglist(cmd); + buffer = kmap_atomic(sg_page(sg)) + sg->offset; + if (use_sg > 1) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + transfer_len += sg->length; + if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { + retvalue = ARCMSR_MESSAGE_FAIL; + pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__); + goto message_out; + } + pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer; + switch (controlcode) { + case ARCMSR_MESSAGE_READ_RQBUFFER: { + unsigned char *ver_addr; + uint8_t *ptmpQbuffer; + uint32_t allxfer_len = 0; + ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); + if (!ver_addr) { + retvalue = ARCMSR_MESSAGE_FAIL; + pr_info("%s: memory not enough!\n", __func__); + goto message_out; + } + ptmpQbuffer = ver_addr; + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { + unsigned int tail = acb->rqbuf_getIndex; + unsigned int head = acb->rqbuf_putIndex; + unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); + + allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); + if (allxfer_len > ARCMSR_API_DATA_BUFLEN) + allxfer_len = ARCMSR_API_DATA_BUFLEN; + + if (allxfer_len <= cnt_to_end) + memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); + else { + memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); + memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); + } + acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; + } + memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, + allxfer_len); + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + struct QBUFFER __iomem *prbuffer; + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + prbuffer = arcmsr_get_iop_rqbuffer(acb); + if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; + } + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + kfree(ver_addr); + pcmdmessagefld->cmdmessage.Length = allxfer_len; + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + break; + } + case ARCMSR_MESSAGE_WRITE_WQBUFFER: { + unsigned char *ver_addr; + uint32_t user_len; + int32_t cnt2end; + uint8_t *pQbuffer, *ptmpuserbuffer; + + user_len = pcmdmessagefld->cmdmessage.Length; + if (user_len > ARCMSR_API_DATA_BUFLEN) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + + ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); + if (!ver_addr) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + ptmpuserbuffer = ver_addr; + + memcpy(ptmpuserbuffer, + pcmdmessagefld->messagedatabuffer, user_len); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { + struct SENSE_DATA *sensebuffer = + (struct SENSE_DATA *)cmd->sense_buffer; + arcmsr_write_ioctldata2iop(acb); + /* has error report sensedata */ + sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; + sensebuffer->SenseKey = ILLEGAL_REQUEST; + sensebuffer->AdditionalSenseLength = 0x0A; + sensebuffer->AdditionalSenseCode = 0x20; + sensebuffer->Valid = 1; + retvalue = ARCMSR_MESSAGE_FAIL; + } else { + pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; + cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; + if (user_len > cnt2end) { + memcpy(pQbuffer, ptmpuserbuffer, cnt2end); + ptmpuserbuffer += cnt2end; + user_len -= cnt2end; + acb->wqbuf_putIndex = 0; + pQbuffer = acb->wqbuffer; + } + memcpy(pQbuffer, ptmpuserbuffer, user_len); + acb->wqbuf_putIndex += user_len; + acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { + acb->acb_flags &= + ~ACB_F_MESSAGE_WQBUFFER_CLEARED; + arcmsr_write_ioctldata2iop(acb); + } + } + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + kfree(ver_addr); + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + break; + } + case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { + uint8_t *pQbuffer = acb->rqbuffer; + + arcmsr_clear_iop2drv_rqueue_buffer(acb); + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; + memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + break; + } + case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { + uint8_t *pQbuffer = acb->wqbuffer; + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | + ACB_F_MESSAGE_WQBUFFER_READED); + acb->wqbuf_getIndex = 0; + acb->wqbuf_putIndex = 0; + memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + break; + } + case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { + uint8_t *pQbuffer; + arcmsr_clear_iop2drv_rqueue_buffer(acb); + spin_lock_irqsave(&acb->rqbuffer_lock, flags); + acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; + acb->rqbuf_getIndex = 0; + acb->rqbuf_putIndex = 0; + pQbuffer = acb->rqbuffer; + memset(pQbuffer, 0, sizeof(struct QBUFFER)); + spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); + acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | + ACB_F_MESSAGE_WQBUFFER_READED); + acb->wqbuf_getIndex = 0; + acb->wqbuf_putIndex = 0; + pQbuffer = acb->wqbuffer; + memset(pQbuffer, 0, sizeof(struct QBUFFER)); + spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + break; + } + case ARCMSR_MESSAGE_RETURN_CODE_3F: { + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_3F; + break; + } + case ARCMSR_MESSAGE_SAY_HELLO: { + int8_t *hello_string = "Hello! I am ARCMSR"; + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + memcpy(pcmdmessagefld->messagedatabuffer, + hello_string, (int16_t)strlen(hello_string)); + break; + } + case ARCMSR_MESSAGE_SAY_GOODBYE: { + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + arcmsr_iop_parking(acb); + break; + } + case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { + if (acb->fw_flag == FW_DEADLOCK) + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + else + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + arcmsr_flush_adapter_cache(acb); + break; + } + default: + retvalue = ARCMSR_MESSAGE_FAIL; + pr_info("%s: unknown controlcode!\n", __func__); + } +message_out: + if (use_sg) { + struct scatterlist *sg = scsi_sglist(cmd); + kunmap_atomic(buffer - sg->offset); + } + return retvalue; +} + +static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) +{ + struct list_head *head; + struct CommandControlBlock *ccb = NULL; + unsigned long flags; + + spin_lock_irqsave(&acb->ccblist_lock, flags); + head = &acb->ccb_free_list; + if (!list_empty(head)) { + ccb = list_entry(head->next, struct CommandControlBlock, list); + list_del_init(&ccb->list); + }else{ + spin_unlock_irqrestore(&acb->ccblist_lock, flags); + return NULL; + } + spin_unlock_irqrestore(&acb->ccblist_lock, flags); + return ccb; +} + +static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, + struct scsi_cmnd *cmd) +{ + switch (cmd->cmnd[0]) { + case INQUIRY: { + unsigned char inqdata[36]; + char *buffer; + struct scatterlist *sg; + + if (cmd->device->lun) { + cmd->result = (DID_TIME_OUT << 16); + scsi_done(cmd); + return; + } + inqdata[0] = TYPE_PROCESSOR; + /* Periph Qualifier & Periph Dev Type */ + inqdata[1] = 0; + /* rem media bit & Dev Type Modifier */ + inqdata[2] = 0; + /* ISO, ECMA, & ANSI versions */ + inqdata[4] = 31; + /* length of additional data */ + memcpy(&inqdata[8], "Areca ", 8); + /* Vendor Identification */ + memcpy(&inqdata[16], "RAID controller ", 16); + /* Product Identification */ + memcpy(&inqdata[32], "R001", 4); /* Product Revision */ + + sg = scsi_sglist(cmd); + buffer = kmap_atomic(sg_page(sg)) + sg->offset; + + memcpy(buffer, inqdata, sizeof(inqdata)); + sg = scsi_sglist(cmd); + kunmap_atomic(buffer - sg->offset); + + scsi_done(cmd); + } + break; + case WRITE_BUFFER: + case READ_BUFFER: { + if (arcmsr_iop_message_xfer(acb, cmd)) + cmd->result = (DID_ERROR << 16); + scsi_done(cmd); + } + break; + default: + scsi_done(cmd); + } +} + +static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + struct CommandControlBlock *ccb; + int target = cmd->device->id; + + if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) { + cmd->result = (DID_NO_CONNECT << 16); + scsi_done(cmd); + return 0; + } + cmd->host_scribble = NULL; + cmd->result = 0; + if (target == 16) { + /* virtual device for iop message transfer */ + arcmsr_handle_virtual_command(acb, cmd); + return 0; + } + ccb = arcmsr_get_freeccb(acb); + if (!ccb) + return SCSI_MLQUEUE_HOST_BUSY; + if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) { + cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT; + scsi_done(cmd); + return 0; + } + arcmsr_post_ccb(acb, ccb); + return 0; +} + +static DEF_SCSI_QCMD(arcmsr_queue_command) + +static int arcmsr_slave_config(struct scsi_device *sdev) +{ + unsigned int dev_timeout; + + dev_timeout = sdev->request_queue->rq_timeout; + if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout)) + blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ); + return 0; +} + +static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer) +{ + int count; + uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model; + uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version; + uint32_t *acb_device_map = (uint32_t *)pACB->device_map; + uint32_t *firm_model = &rwbuffer[15]; + uint32_t *firm_version = &rwbuffer[17]; + uint32_t *device_map = &rwbuffer[21]; + + count = 2; + while (count) { + *acb_firm_model = readl(firm_model); + acb_firm_model++; + firm_model++; + count--; + } + count = 4; + while (count) { + *acb_firm_version = readl(firm_version); + acb_firm_version++; + firm_version++; + count--; + } + count = 4; + while (count) { + *acb_device_map = readl(device_map); + acb_device_map++; + device_map++; + count--; + } + pACB->signature = readl(&rwbuffer[0]); + pACB->firm_request_len = readl(&rwbuffer[1]); + pACB->firm_numbers_queue = readl(&rwbuffer[2]); + pACB->firm_sdram_size = readl(&rwbuffer[3]); + pACB->firm_hd_channels = readl(&rwbuffer[4]); + pACB->firm_cfg_version = readl(&rwbuffer[25]); + pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", + pACB->host->host_no, + pACB->firm_model, + pACB->firm_version); +} + +static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + + arcmsr_wait_firmware_ready(acb); + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ + miscellaneous data' timeout \n", acb->host->host_no); + return false; + } + arcmsr_get_adapter_config(acb, reg->message_rwbuffer); + return true; +} +static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + + arcmsr_wait_firmware_ready(acb); + writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no); + return false; + } + writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ + miscellaneous data' timeout \n", acb->host->host_no); + return false; + } + arcmsr_get_adapter_config(acb, reg->message_rwbuffer); + return true; +} + +static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) +{ + uint32_t intmask_org; + struct MessageUnit_C __iomem *reg = pACB->pmuC; + + /* disable all outbound interrupt */ + intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ + writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); + /* wait firmware ready */ + arcmsr_wait_firmware_ready(pACB); + /* post "get config" instruction */ + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); + /* wait message ready */ + if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { + printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ + miscellaneous data' timeout \n", pACB->host->host_no); + return false; + } + arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); + return true; +} + +static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) +{ + struct MessageUnit_D *reg = acb->pmuD; + + if (readl(acb->pmuD->outbound_doorbell) & + ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, + acb->pmuD->outbound_doorbell);/*clear interrupt*/ + } + arcmsr_wait_firmware_ready(acb); + /* post "get config" instruction */ + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); + /* wait message ready */ + if (!arcmsr_hbaD_wait_msgint_ready(acb)) { + pr_notice("arcmsr%d: wait get adapter firmware " + "miscellaneous data timeout\n", acb->host->host_no); + return false; + } + arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer); + return true; +} + +static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_E __iomem *reg = pACB->pmuE; + uint32_t intmask_org; + + /* disable all outbound interrupt */ + intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ + writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); + /* wait firmware ready */ + arcmsr_wait_firmware_ready(pACB); + mdelay(20); + /* post "get config" instruction */ + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + + pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pACB->out_doorbell, ®->iobound_doorbell); + /* wait message ready */ + if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait get adapter firmware " + "miscellaneous data timeout\n", pACB->host->host_no); + return false; + } + arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); + return true; +} + +static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_F __iomem *reg = pACB->pmuF; + uint32_t intmask_org; + + /* disable all outbound interrupt */ + intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ + writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); + /* wait firmware ready */ + arcmsr_wait_firmware_ready(pACB); + /* post "get config" instruction */ + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + + pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pACB->out_doorbell, ®->iobound_doorbell); + /* wait message ready */ + if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n", + pACB->host->host_no); + return false; + } + arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer); + return true; +} + +static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) +{ + bool rtn = false; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + rtn = arcmsr_hbaA_get_config(acb); + break; + case ACB_ADAPTER_TYPE_B: + rtn = arcmsr_hbaB_get_config(acb); + break; + case ACB_ADAPTER_TYPE_C: + rtn = arcmsr_hbaC_get_config(acb); + break; + case ACB_ADAPTER_TYPE_D: + rtn = arcmsr_hbaD_get_config(acb); + break; + case ACB_ADAPTER_TYPE_E: + rtn = arcmsr_hbaE_get_config(acb); + break; + case ACB_ADAPTER_TYPE_F: + rtn = arcmsr_hbaF_get_config(acb); + break; + default: + break; + } + acb->maxOutstanding = acb->firm_numbers_queue - 1; + if (acb->host->can_queue >= acb->firm_numbers_queue) + acb->host->can_queue = acb->maxOutstanding; + else + acb->maxOutstanding = acb->host->can_queue; + acb->maxFreeCCB = acb->host->can_queue; + if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM) + acb->maxFreeCCB += 64; + return rtn; +} + +static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + struct CommandControlBlock *ccb; + struct ARCMSR_CDB *arcmsr_cdb; + uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; + int rtn; + bool error; + unsigned long ccb_cdb_phy; + +polling_hba_ccb_retry: + poll_count++; + outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; + writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ + while (1) { + if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) { + if (poll_ccb_done){ + rtn = SUCCESS; + break; + }else { + msleep(25); + if (poll_count > 100){ + rtn = FAILED; + break; + } + goto polling_hba_ccb_retry; + } + } + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); + ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); + poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; + if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { + if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { + printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" + " poll command abort successfully \n" + , acb->host->host_no + , ccb->pcmd->device->id + , (u32)ccb->pcmd->device->lun + , ccb); + ccb->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(ccb); + continue; + } + printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" + " command done ccb = '0x%p'" + "ccboutstandingcount = %d \n" + , acb->host->host_no + , ccb + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; + arcmsr_report_ccb_state(acb, ccb, error); + } + return rtn; +} + +static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + struct MessageUnit_B *reg = acb->pmuB; + struct ARCMSR_CDB *arcmsr_cdb; + struct CommandControlBlock *ccb; + uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; + int index, rtn; + bool error; + unsigned long ccb_cdb_phy; + +polling_hbb_ccb_retry: + poll_count++; + /* clear doorbell interrupt */ + writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); + while(1){ + index = reg->doneq_index; + flag_ccb = reg->done_qbuffer[index]; + if (flag_ccb == 0) { + if (poll_ccb_done){ + rtn = SUCCESS; + break; + }else { + msleep(25); + if (poll_count > 100){ + rtn = FAILED; + break; + } + goto polling_hbb_ccb_retry; + } + } + reg->done_qbuffer[index] = 0; + index++; + /*if last index number set it to 0 */ + index %= ARCMSR_MAX_HBB_POSTQUEUE; + reg->doneq_index = index; + /* check if command done with no error*/ + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); + ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); + poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; + if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { + if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { + printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" + " poll command abort successfully \n" + ,acb->host->host_no + ,ccb->pcmd->device->id + ,(u32)ccb->pcmd->device->lun + ,ccb); + ccb->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(ccb); + continue; + } + printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" + " command done ccb = '0x%p'" + "ccboutstandingcount = %d \n" + , acb->host->host_no + , ccb + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; + arcmsr_report_ccb_state(acb, ccb, error); + } + return rtn; +} + +static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + struct MessageUnit_C __iomem *reg = acb->pmuC; + uint32_t flag_ccb; + struct ARCMSR_CDB *arcmsr_cdb; + bool error; + struct CommandControlBlock *pCCB; + uint32_t poll_ccb_done = 0, poll_count = 0; + int rtn; + unsigned long ccb_cdb_phy; + +polling_hbc_ccb_retry: + poll_count++; + while (1) { + if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) { + if (poll_ccb_done) { + rtn = SUCCESS; + break; + } else { + msleep(25); + if (poll_count > 100) { + rtn = FAILED; + break; + } + goto polling_hbc_ccb_retry; + } + } + flag_ccb = readl(®->outbound_queueport_low); + ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); + pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); + poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; + /* check ifcommand done with no error*/ + if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { + if (pCCB->startdone == ARCMSR_CCB_ABORTED) { + printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" + " poll command abort successfully \n" + , acb->host->host_no + , pCCB->pcmd->device->id + , (u32)pCCB->pcmd->device->lun + , pCCB); + pCCB->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(pCCB); + continue; + } + printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" + " command done ccb = '0x%p'" + "ccboutstandingcount = %d \n" + , acb->host->host_no + , pCCB + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; + arcmsr_report_ccb_state(acb, pCCB, error); + } + return rtn; +} + +static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + bool error; + uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb; + int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle; + unsigned long flags, ccb_cdb_phy; + struct ARCMSR_CDB *arcmsr_cdb; + struct CommandControlBlock *pCCB; + struct MessageUnit_D *pmu = acb->pmuD; + +polling_hbaD_ccb_retry: + poll_count++; + while (1) { + spin_lock_irqsave(&acb->doneq_lock, flags); + outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; + doneq_index = pmu->doneq_index; + if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) { + spin_unlock_irqrestore(&acb->doneq_lock, flags); + if (poll_ccb_done) { + rtn = SUCCESS; + break; + } else { + msleep(25); + if (poll_count > 40) { + rtn = FAILED; + break; + } + goto polling_hbaD_ccb_retry; + } + } + toggle = doneq_index & 0x4000; + index_stripped = (doneq_index & 0xFFF) + 1; + index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; + pmu->doneq_index = index_stripped ? (index_stripped | toggle) : + ((toggle ^ 0x4000) + 1); + doneq_index = pmu->doneq_index; + spin_unlock_irqrestore(&acb->doneq_lock, flags); + flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; + ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + + ccb_cdb_phy); + pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, + arcmsr_cdb); + poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; + if ((pCCB->acb != acb) || + (pCCB->startdone != ARCMSR_CCB_START)) { + if (pCCB->startdone == ARCMSR_CCB_ABORTED) { + pr_notice("arcmsr%d: scsi id = %d " + "lun = %d ccb = '0x%p' poll command " + "abort successfully\n" + , acb->host->host_no + , pCCB->pcmd->device->id + , (u32)pCCB->pcmd->device->lun + , pCCB); + pCCB->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(pCCB); + continue; + } + pr_notice("arcmsr%d: polling an illegal " + "ccb command done ccb = '0x%p' " + "ccboutstandingcount = %d\n" + , acb->host->host_no + , pCCB + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) + ? true : false; + arcmsr_report_ccb_state(acb, pCCB, error); + } + return rtn; +} + +static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + bool error; + uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index; + uint16_t cmdSMID; + unsigned long flags; + int rtn; + struct CommandControlBlock *pCCB; + struct MessageUnit_E __iomem *reg = acb->pmuE; + + polling_hbaC_ccb_retry: + poll_count++; + while (1) { + spin_lock_irqsave(&acb->doneq_lock, flags); + doneq_index = acb->doneq_index; + if ((readl(®->reply_post_producer_index) & 0xFFFF) == + doneq_index) { + spin_unlock_irqrestore(&acb->doneq_lock, flags); + if (poll_ccb_done) { + rtn = SUCCESS; + break; + } else { + msleep(25); + if (poll_count > 40) { + rtn = FAILED; + break; + } + goto polling_hbaC_ccb_retry; + } + } + cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; + doneq_index++; + if (doneq_index >= acb->completionQ_entry) + doneq_index = 0; + acb->doneq_index = doneq_index; + spin_unlock_irqrestore(&acb->doneq_lock, flags); + pCCB = acb->pccb_pool[cmdSMID]; + poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; + /* check if command done with no error*/ + if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { + if (pCCB->startdone == ARCMSR_CCB_ABORTED) { + pr_notice("arcmsr%d: scsi id = %d " + "lun = %d ccb = '0x%p' poll command " + "abort successfully\n" + , acb->host->host_no + , pCCB->pcmd->device->id + , (u32)pCCB->pcmd->device->lun + , pCCB); + pCCB->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(pCCB); + continue; + } + pr_notice("arcmsr%d: polling an illegal " + "ccb command done ccb = '0x%p' " + "ccboutstandingcount = %d\n" + , acb->host->host_no + , pCCB + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + error = (acb->pCompletionQ[doneq_index].cmdFlag & + ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; + arcmsr_report_ccb_state(acb, pCCB, error); + } + writel(doneq_index, ®->reply_post_consumer_index); + return rtn; +} + +static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + int rtn = 0; + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: + rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb); + break; + case ACB_ADAPTER_TYPE_B: + rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb); + break; + case ACB_ADAPTER_TYPE_C: + rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); + break; + case ACB_ADAPTER_TYPE_D: + rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb); + break; + } + return rtn; +} + +static void arcmsr_set_iop_datetime(struct timer_list *t) +{ + struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer); + unsigned int next_time; + struct tm tm; + + union { + struct { + uint16_t signature; + uint8_t year; + uint8_t month; + uint8_t date; + uint8_t hour; + uint8_t minute; + uint8_t second; + } a; + struct { + uint32_t msg_time[2]; + } b; + } datetime; + + time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm); + + datetime.a.signature = 0x55AA; + datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */ + datetime.a.month = tm.tm_mon; + datetime.a.date = tm.tm_mday; + datetime.a.hour = tm.tm_hour; + datetime.a.minute = tm.tm_min; + datetime.a.second = tm.tm_sec; + + switch (pacb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = pacb->pmuA; + writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]); + writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]); + writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); + break; + } + case ACB_ADAPTER_TYPE_B: { + uint32_t __iomem *rwbuffer; + struct MessageUnit_B *reg = pacb->pmuB; + rwbuffer = reg->message_rwbuffer; + writel(datetime.b.msg_time[0], rwbuffer++); + writel(datetime.b.msg_time[1], rwbuffer++); + writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell); + break; + } + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = pacb->pmuC; + writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); + writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); + writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); + break; + } + case ACB_ADAPTER_TYPE_D: { + uint32_t __iomem *rwbuffer; + struct MessageUnit_D *reg = pacb->pmuD; + rwbuffer = reg->msgcode_rwbuffer; + writel(datetime.b.msg_time[0], rwbuffer++); + writel(datetime.b.msg_time[1], rwbuffer++); + writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0); + break; + } + case ACB_ADAPTER_TYPE_E: { + struct MessageUnit_E __iomem *reg = pacb->pmuE; + writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); + writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); + writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); + pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pacb->out_doorbell, ®->iobound_doorbell); + break; + } + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *reg = pacb->pmuF; + + pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0]; + pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1]; + writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); + pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pacb->out_doorbell, ®->iobound_doorbell); + break; + } + } + if (sys_tz.tz_minuteswest) + next_time = ARCMSR_HOURS; + else + next_time = ARCMSR_MINUTES; + mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time)); +} + +static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) +{ + uint32_t cdb_phyaddr, cdb_phyaddr_hi32; + dma_addr_t dma_coherent_handle; + + /* + ******************************************************************** + ** here we need to tell iop 331 our freeccb.HighPart + ** if freeccb.HighPart is not zero + ******************************************************************** + */ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: + case ACB_ADAPTER_TYPE_D: + dma_coherent_handle = acb->dma_coherent_handle2; + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + dma_coherent_handle = acb->dma_coherent_handle + + offsetof(struct CommandControlBlock, arcmsr_cdb); + break; + default: + dma_coherent_handle = acb->dma_coherent_handle; + break; + } + cdb_phyaddr = lower_32_bits(dma_coherent_handle); + cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle); + acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; + acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32; + /* + *********************************************************************** + ** if adapter type B, set window of "post command Q" + *********************************************************************** + */ + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: { + if (cdb_phyaddr_hi32 != 0) { + struct MessageUnit_A __iomem *reg = acb->pmuA; + writel(ARCMSR_SIGNATURE_SET_CONFIG, \ + ®->message_rwbuffer[0]); + writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]); + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ + ®->inbound_msgaddr0); + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ + part physical address timeout\n", + acb->host->host_no); + return 1; + } + } + } + break; + + case ACB_ADAPTER_TYPE_B: { + uint32_t __iomem *rwbuffer; + + struct MessageUnit_B *reg = acb->pmuB; + reg->postq_index = 0; + reg->doneq_index = 0; + writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \ + acb->host->host_no); + return 1; + } + rwbuffer = reg->message_rwbuffer; + /* driver "set config" signature */ + writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); + /* normal should be zero */ + writel(cdb_phyaddr_hi32, rwbuffer++); + /* postQ size (256 + 8)*4 */ + writel(cdb_phyaddr, rwbuffer++); + /* doneQ size (256 + 8)*4 */ + writel(cdb_phyaddr + 1056, rwbuffer++); + /* ccb maxQ size must be --> [(256 + 8)*4]*/ + writel(1056, rwbuffer); + + writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ + timeout \n",acb->host->host_no); + return 1; + } + writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + pr_err("arcmsr%d: can't set driver mode.\n", + acb->host->host_no); + return 1; + } + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + + printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", + acb->adapter_index, cdb_phyaddr_hi32); + writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); + writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]); + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); + if (!arcmsr_hbaC_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ + timeout \n", acb->host->host_no); + return 1; + } + } + break; + case ACB_ADAPTER_TYPE_D: { + uint32_t __iomem *rwbuffer; + struct MessageUnit_D *reg = acb->pmuD; + reg->postq_index = 0; + reg->doneq_index = 0; + rwbuffer = reg->msgcode_rwbuffer; + writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); + writel(cdb_phyaddr_hi32, rwbuffer++); + writel(cdb_phyaddr, rwbuffer++); + writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE * + sizeof(struct InBound_SRB)), rwbuffer++); + writel(0x100, rwbuffer); + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(acb)) { + pr_notice("arcmsr%d: 'set command Q window' timeout\n", + acb->host->host_no); + return 1; + } + } + break; + case ACB_ADAPTER_TYPE_E: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); + writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]); + writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]); + writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]); + writel(acb->ccbsize, ®->msgcode_rwbuffer[4]); + writel(lower_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[5]); + writel(upper_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[6]); + writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]); + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(acb->out_doorbell, ®->iobound_doorbell); + if (!arcmsr_hbaE_wait_msgint_ready(acb)) { + pr_notice("arcmsr%d: 'set command Q window' timeout \n", + acb->host->host_no); + return 1; + } + } + break; + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *reg = acb->pmuF; + + acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG; + acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886; + acb->msgcode_rwbuffer[2] = cdb_phyaddr; + acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32; + acb->msgcode_rwbuffer[4] = acb->ccbsize; + acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2); + acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2); + acb->msgcode_rwbuffer[7] = acb->completeQ_size; + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(acb->out_doorbell, ®->iobound_doorbell); + if (!arcmsr_hbaE_wait_msgint_ready(acb)) { + pr_notice("arcmsr%d: 'set command Q window' timeout\n", + acb->host->host_no); + return 1; + } + } + break; + } + return 0; +} + +static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) +{ + uint32_t firmware_state = 0; + switch (acb->adapter_type) { + + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + do { + if (!(acb->acb_flags & ACB_F_IOP_INITED)) + msleep(20); + firmware_state = readl(®->outbound_msgaddr1); + } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); + } + break; + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + do { + if (!(acb->acb_flags & ACB_F_IOP_INITED)) + msleep(20); + firmware_state = readl(reg->iop2drv_doorbell); + } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + do { + if (!(acb->acb_flags & ACB_F_IOP_INITED)) + msleep(20); + firmware_state = readl(®->outbound_msgaddr1); + } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + do { + if (!(acb->acb_flags & ACB_F_IOP_INITED)) + msleep(20); + firmware_state = readl(reg->outbound_msgaddr1); + } while ((firmware_state & + ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); + } + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + do { + if (!(acb->acb_flags & ACB_F_IOP_INITED)) + msleep(20); + firmware_state = readl(®->outbound_msgaddr1); + } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0); + } + break; + } +} + +static void arcmsr_request_device_map(struct timer_list *t) +{ + struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer); + if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); + } else { + acb->fw_flag = FW_NORMAL; + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + break; + } + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); + break; + } + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); + break; + } + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); + break; + } + case ACB_ADAPTER_TYPE_E: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(acb->out_doorbell, ®->iobound_doorbell); + break; + } + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *reg = acb->pmuF; + uint32_t outMsg1 = readl(®->outbound_msgaddr1); + + if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) || + (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE)) + goto nxt6s; + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(acb->out_doorbell, ®->iobound_doorbell); + break; + } + default: + return; + } + acb->acb_flags |= ACB_F_MSG_GET_CONFIG; +nxt6s: + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); + } +} + +static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + acb->acb_flags |= ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); + if (!arcmsr_hbaA_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ + rebuild' timeout \n", acb->host->host_no); + } +} + +static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + acb->acb_flags |= ACB_F_MSG_START_BGRB; + writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ + rebuild' timeout \n",acb->host->host_no); + } +} + +static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; + pACB->acb_flags |= ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); + writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); + if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { + printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ + rebuild' timeout \n", pACB->host->host_no); + } + return; +} + +static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_D *pmu = pACB->pmuD; + + pACB->acb_flags |= ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); + if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait 'start adapter " + "background rebuild' timeout\n", pACB->host->host_no); + } +} + +static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_E __iomem *pmu = pACB->pmuE; + + pACB->acb_flags |= ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0); + pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pACB->out_doorbell, &pmu->iobound_doorbell); + if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait 'start adapter " + "background rebuild' timeout \n", pACB->host->host_no); + } +} + +static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + arcmsr_hbaA_start_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_B: + arcmsr_hbaB_start_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_C: + arcmsr_hbaC_start_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_D: + arcmsr_hbaD_start_bgrb(acb); + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: + arcmsr_hbaE_start_bgrb(acb); + break; + } +} + +static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + uint32_t outbound_doorbell; + /* empty doorbell Qbuffer if door bell ringed */ + outbound_doorbell = readl(®->outbound_doorbell); + /*clear doorbell interrupt */ + writel(outbound_doorbell, ®->outbound_doorbell); + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); + } + break; + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + uint32_t outbound_doorbell, i; + writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); + writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); + /* let IOP know data has been read */ + for(i=0; i < 200; i++) { + msleep(20); + outbound_doorbell = readl(reg->iop2drv_doorbell); + if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { + writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); + writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); + } else + break; + } + } + break; + case ACB_ADAPTER_TYPE_C: { + struct MessageUnit_C __iomem *reg = acb->pmuC; + uint32_t outbound_doorbell, i; + /* empty doorbell Qbuffer if door bell ringed */ + outbound_doorbell = readl(®->outbound_doorbell); + writel(outbound_doorbell, ®->outbound_doorbell_clear); + writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); + for (i = 0; i < 200; i++) { + msleep(20); + outbound_doorbell = readl(®->outbound_doorbell); + if (outbound_doorbell & + ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { + writel(outbound_doorbell, + ®->outbound_doorbell_clear); + writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, + ®->inbound_doorbell); + } else + break; + } + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg = acb->pmuD; + uint32_t outbound_doorbell, i; + /* empty doorbell Qbuffer if door bell ringed */ + outbound_doorbell = readl(reg->outbound_doorbell); + writel(outbound_doorbell, reg->outbound_doorbell); + writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, + reg->inbound_doorbell); + for (i = 0; i < 200; i++) { + msleep(20); + outbound_doorbell = readl(reg->outbound_doorbell); + if (outbound_doorbell & + ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) { + writel(outbound_doorbell, + reg->outbound_doorbell); + writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, + reg->inbound_doorbell); + } else + break; + } + } + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_E __iomem *reg = acb->pmuE; + uint32_t i, tmp; + + acb->in_doorbell = readl(®->iobound_doorbell); + writel(0, ®->host_int_status); /*clear interrupt*/ + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; + writel(acb->out_doorbell, ®->iobound_doorbell); + for(i=0; i < 200; i++) { + msleep(20); + tmp = acb->in_doorbell; + acb->in_doorbell = readl(®->iobound_doorbell); + if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { + writel(0, ®->host_int_status); /*clear interrupt*/ + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; + writel(acb->out_doorbell, ®->iobound_doorbell); + } else + break; + } + } + break; + } +} + +static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + return; + case ACB_ADAPTER_TYPE_B: + { + struct MessageUnit_B *reg = acb->pmuB; + writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); + return; + } + } + break; + case ACB_ADAPTER_TYPE_C: + return; + } + return; +} + +static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) +{ + uint8_t value[64]; + int i, count = 0; + struct MessageUnit_A __iomem *pmuA = acb->pmuA; + struct MessageUnit_C __iomem *pmuC = acb->pmuC; + struct MessageUnit_D *pmuD = acb->pmuD; + + /* backup pci config data */ + printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); + for (i = 0; i < 64; i++) { + pci_read_config_byte(acb->pdev, i, &value[i]); + } + /* hardware reset signal */ + if (acb->dev_id == 0x1680) { + writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); + } else if (acb->dev_id == 0x1880) { + do { + count++; + writel(0xF, &pmuC->write_sequence); + writel(0x4, &pmuC->write_sequence); + writel(0xB, &pmuC->write_sequence); + writel(0x2, &pmuC->write_sequence); + writel(0x7, &pmuC->write_sequence); + writel(0xD, &pmuC->write_sequence); + } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); + writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); + } else if (acb->dev_id == 0x1884) { + struct MessageUnit_E __iomem *pmuE = acb->pmuE; + do { + count++; + writel(0x4, &pmuE->write_sequence_3xxx); + writel(0xB, &pmuE->write_sequence_3xxx); + writel(0x2, &pmuE->write_sequence_3xxx); + writel(0x7, &pmuE->write_sequence_3xxx); + writel(0xD, &pmuE->write_sequence_3xxx); + mdelay(10); + } while (((readl(&pmuE->host_diagnostic_3xxx) & + ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); + writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); + } else if (acb->dev_id == 0x1214) { + writel(0x20, pmuD->reset_request); + } else { + pci_write_config_byte(acb->pdev, 0x84, 0x20); + } + msleep(2000); + /* write back pci config data */ + for (i = 0; i < 64; i++) { + pci_write_config_byte(acb->pdev, i, value[i]); + } + msleep(1000); + return; +} + +static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb) +{ + bool rtn = true; + + switch(acb->adapter_type) { + case ACB_ADAPTER_TYPE_A:{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + rtn = ((readl(®->outbound_msgaddr1) & + ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false; + } + break; + case ACB_ADAPTER_TYPE_B:{ + struct MessageUnit_B *reg = acb->pmuB; + rtn = ((readl(reg->iop2drv_doorbell) & + ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false; + } + break; + case ACB_ADAPTER_TYPE_C:{ + struct MessageUnit_C __iomem *reg = acb->pmuC; + rtn = (readl(®->host_diagnostic) & 0x04) ? true : false; + } + break; + case ACB_ADAPTER_TYPE_D:{ + struct MessageUnit_D *reg = acb->pmuD; + rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ? + true : false; + } + break; + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F:{ + struct MessageUnit_E __iomem *reg = acb->pmuE; + rtn = (readl(®->host_diagnostic_3xxx) & + ARCMSR_ARC188X_RESET_ADAPTER) ? true : false; + } + break; + } + return rtn; +} + +static void arcmsr_iop_init(struct AdapterControlBlock *acb) +{ + uint32_t intmask_org; + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_wait_firmware_ready(acb); + arcmsr_iop_confirm(acb); + /*start background rebuild*/ + arcmsr_start_adapter_bgrb(acb); + /* empty doorbell Qbuffer if door bell ringed */ + arcmsr_clear_doorbell_queue_buffer(acb); + arcmsr_enable_eoi_mode(acb); + /* enable outbound Post Queue,outbound doorbell Interrupt */ + arcmsr_enable_outbound_ints(acb, intmask_org); + acb->acb_flags |= ACB_F_IOP_INITED; +} + +static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) +{ + struct CommandControlBlock *ccb; + uint32_t intmask_org; + uint8_t rtnval = 0x00; + int i = 0; + unsigned long flags; + + if (atomic_read(&acb->ccboutstandingcount) != 0) { + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + /* talk to iop 331 outstanding command aborted */ + rtnval = arcmsr_abort_allcmd(acb); + /* clear all outbound posted Q */ + arcmsr_done4abort_postqueue(acb); + for (i = 0; i < acb->maxFreeCCB; i++) { + ccb = acb->pccb_pool[i]; + if (ccb->startdone == ARCMSR_CCB_START) { + scsi_dma_unmap(ccb->pcmd); + ccb->startdone = ARCMSR_CCB_DONE; + ccb->ccb_flags = 0; + spin_lock_irqsave(&acb->ccblist_lock, flags); + list_add_tail(&ccb->list, &acb->ccb_free_list); + spin_unlock_irqrestore(&acb->ccblist_lock, flags); + } + } + atomic_set(&acb->ccboutstandingcount, 0); + /* enable all outbound interrupt */ + arcmsr_enable_outbound_ints(acb, intmask_org); + return rtnval; + } + return rtnval; +} + +static int arcmsr_bus_reset(struct scsi_cmnd *cmd) +{ + struct AdapterControlBlock *acb; + int retry_count = 0; + int rtn = FAILED; + acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; + if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) + return SUCCESS; + pr_notice("arcmsr: executing bus reset eh.....num_resets = %d," + " num_aborts = %d \n", acb->num_resets, acb->num_aborts); + acb->num_resets++; + + if (acb->acb_flags & ACB_F_BUS_RESET) { + long timeout; + pr_notice("arcmsr: there is a bus reset eh proceeding...\n"); + timeout = wait_event_timeout(wait_q, (acb->acb_flags + & ACB_F_BUS_RESET) == 0, 220 * HZ); + if (timeout) + return SUCCESS; + } + acb->acb_flags |= ACB_F_BUS_RESET; + if (!arcmsr_iop_reset(acb)) { + arcmsr_hardware_reset(acb); + acb->acb_flags &= ~ACB_F_IOP_INITED; +wait_reset_done: + ssleep(ARCMSR_SLEEPTIME); + if (arcmsr_reset_in_progress(acb)) { + if (retry_count > ARCMSR_RETRYCOUNT) { + acb->fw_flag = FW_DEADLOCK; + pr_notice("arcmsr%d: waiting for hw bus reset" + " return, RETRY TERMINATED!!\n", + acb->host->host_no); + return FAILED; + } + retry_count++; + goto wait_reset_done; + } + arcmsr_iop_init(acb); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + + msecs_to_jiffies(6 * HZ)); + acb->acb_flags &= ~ACB_F_BUS_RESET; + rtn = SUCCESS; + pr_notice("arcmsr: scsi bus reset eh returns with success\n"); + } else { + acb->acb_flags &= ~ACB_F_BUS_RESET; + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + + msecs_to_jiffies(6 * HZ)); + rtn = SUCCESS; + } + return rtn; +} + +static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, + struct CommandControlBlock *ccb) +{ + int rtn; + rtn = arcmsr_polling_ccbdone(acb, ccb); + return rtn; +} + +static int arcmsr_abort(struct scsi_cmnd *cmd) +{ + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)cmd->device->host->hostdata; + int i = 0; + int rtn = FAILED; + uint32_t intmask_org; + + if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) + return SUCCESS; + printk(KERN_NOTICE + "arcmsr%d: abort device command of scsi id = %d lun = %d\n", + acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); + acb->acb_flags |= ACB_F_ABORT; + acb->num_aborts++; + /* + ************************************************ + ** the all interrupt service routine is locked + ** we need to handle it as soon as possible and exit + ************************************************ + */ + if (!atomic_read(&acb->ccboutstandingcount)) { + acb->acb_flags &= ~ACB_F_ABORT; + return rtn; + } + + intmask_org = arcmsr_disable_outbound_ints(acb); + for (i = 0; i < acb->maxFreeCCB; i++) { + struct CommandControlBlock *ccb = acb->pccb_pool[i]; + if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { + ccb->startdone = ARCMSR_CCB_ABORTED; + rtn = arcmsr_abort_one_cmd(acb, ccb); + break; + } + } + acb->acb_flags &= ~ACB_F_ABORT; + arcmsr_enable_outbound_ints(acb, intmask_org); + return rtn; +} + +static const char *arcmsr_info(struct Scsi_Host *host) +{ + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + static char buf[256]; + char *type; + int raid6 = 1; + switch (acb->pdev->device) { + case PCI_DEVICE_ID_ARECA_1110: + case PCI_DEVICE_ID_ARECA_1200: + case PCI_DEVICE_ID_ARECA_1202: + case PCI_DEVICE_ID_ARECA_1210: + raid6 = 0; + fallthrough; + case PCI_DEVICE_ID_ARECA_1120: + case PCI_DEVICE_ID_ARECA_1130: + case PCI_DEVICE_ID_ARECA_1160: + case PCI_DEVICE_ID_ARECA_1170: + case PCI_DEVICE_ID_ARECA_1201: + case PCI_DEVICE_ID_ARECA_1203: + case PCI_DEVICE_ID_ARECA_1220: + case PCI_DEVICE_ID_ARECA_1230: + case PCI_DEVICE_ID_ARECA_1260: + case PCI_DEVICE_ID_ARECA_1270: + case PCI_DEVICE_ID_ARECA_1280: + type = "SATA"; + break; + case PCI_DEVICE_ID_ARECA_1214: + case PCI_DEVICE_ID_ARECA_1380: + case PCI_DEVICE_ID_ARECA_1381: + case PCI_DEVICE_ID_ARECA_1680: + case PCI_DEVICE_ID_ARECA_1681: + case PCI_DEVICE_ID_ARECA_1880: + case PCI_DEVICE_ID_ARECA_1884: + type = "SAS/SATA"; + break; + case PCI_DEVICE_ID_ARECA_1886: + type = "NVMe/SAS/SATA"; + break; + default: + type = "unknown"; + raid6 = 0; + break; + } + sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n", + type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); + return buf; +} diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig new file mode 100644 index 000000000..9f64133f9 --- /dev/null +++ b/drivers/scsi/arm/Kconfig @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# SCSI driver configuration for Acorn +# +config SCSI_ACORNSCSI_3 + tristate "Acorn SCSI card (aka30) support" + depends on ARCH_ACORN && SCSI + select SCSI_SPI_ATTRS + help + This enables support for the Acorn SCSI card (aka30). If you have an + Acorn system with one of these, say Y. If unsure, say N. + +config SCSI_ACORNSCSI_SYNC + bool "Support SCSI 2 Synchronous Transfers" + depends on SCSI_ACORNSCSI_3 + help + Say Y here to enable synchronous transfer negotiation with all + targets on the Acorn SCSI card. + + In general, this improves performance; however some SCSI devices + don't implement it properly, so the safe answer is N. + +config SCSI_ARXESCSI + tristate "ARXE SCSI support" + depends on ARCH_ACORN && SCSI + help + Around 1991, Arxe Systems Limited released a high density floppy + disc interface for the Acorn Archimedes range, to allow the use of + HD discs from the then new A5000 on earlier models. This interface + was either sold on its own or with an integral SCSI controller. + Technical details on this NCR53c94-based device are available at + + Say Y here to compile in support for the SCSI controller. + +config SCSI_CUMANA_2 + tristate "CumanaSCSI II support" + depends on ARCH_ACORN && SCSI + help + This enables support for the Cumana SCSI II card. If you have an + Acorn system with one of these, say Y. If unsure, say N. + +config SCSI_EESOXSCSI + tristate "EESOX support" + depends on ARCH_ACORN && SCSI + help + This enables support for the EESOX SCSI card. If you have an Acorn + system with one of these, say Y, otherwise say N. + +config SCSI_POWERTECSCSI + tristate "PowerTec support" + depends on ARCH_ACORN && SCSI + help + This enables support for the Powertec SCSI card on Acorn systems. If + you have one of these, say Y. If unsure, say N. + +comment "The following drivers are not fully supported" + depends on ARCH_ACORN + +config SCSI_CUMANA_1 + tristate "CumanaSCSI I support" + depends on ARCH_ACORN && SCSI + select SCSI_SPI_ATTRS + help + This enables support for the Cumana SCSI I card. If you have an + Acorn system with one of these, say Y. If unsure, say N. + +config SCSI_OAK1 + tristate "Oak SCSI support" + depends on ARCH_ACORN && SCSI + select SCSI_SPI_ATTRS + help + This enables support for the Oak SCSI card. If you have an Acorn + system with one of these, say Y. If unsure, say N. + diff --git a/drivers/scsi/arm/Makefile b/drivers/scsi/arm/Makefile new file mode 100644 index 000000000..b576d9276 --- /dev/null +++ b/drivers/scsi/arm/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for drivers/scsi/arm +# + +acornscsi_mod-objs := acornscsi.o acornscsi-io.o + +obj-$(CONFIG_SCSI_ACORNSCSI_3) += acornscsi_mod.o queue.o msgqueue.o +obj-$(CONFIG_SCSI_ARXESCSI) += arxescsi.o fas216.o queue.o msgqueue.o +obj-$(CONFIG_SCSI_CUMANA_1) += cumana_1.o +obj-$(CONFIG_SCSI_CUMANA_2) += cumana_2.o fas216.o queue.o msgqueue.o +obj-$(CONFIG_SCSI_OAK1) += oak.o +obj-$(CONFIG_SCSI_POWERTECSCSI) += powertec.o fas216.o queue.o msgqueue.o +obj-$(CONFIG_SCSI_EESOXSCSI) += eesox.o fas216.o queue.o msgqueue.o diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S new file mode 100644 index 000000000..fdd7237bb --- /dev/null +++ b/drivers/scsi/arm/acornscsi-io.S @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/drivers/acorn/scsi/acornscsi-io.S: Acorn SCSI card IO + */ +#include + +#include +#include + +#if defined(__APCS_32__) +#define LOADREGS(t,r,l...) ldm##t r, l +#elif defined(__APCS_26__) +#define LOADREGS(t,r,l...) ldm##t r, l##^ +#endif + +@ Purpose: transfer a block of data from the acorn scsi card to memory +@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length) +@ Returns: nothing + + .align +ENTRY(__acornscsi_in) + stmfd sp!, {r4 - r7, lr} + bic r0, r0, #3 + mov lr, #0xff + orr lr, lr, #0xff00 +acornscsi_in16lp: + subs r2, r2, #16 + bmi acornscsi_in8 + ldmia r0!, {r3, r4, r5, r6} + and r3, r3, lr + orr r3, r3, r4, lsl #16 + and r4, r5, lr + orr r4, r4, r6, lsl #16 + ldmia r0!, {r5, r6, r7, ip} + and r5, r5, lr + orr r5, r5, r6, lsl #16 + and r6, r7, lr + orr r6, r6, ip, lsl #16 + stmia r1!, {r3 - r6} + bne acornscsi_in16lp + LOADREGS(fd, sp!, {r4 - r7, pc}) + +acornscsi_in8: adds r2, r2, #8 + bmi acornscsi_in4 + ldmia r0!, {r3, r4, r5, r6} + and r3, r3, lr + orr r3, r3, r4, lsl #16 + and r4, r5, lr + orr r4, r4, r6, lsl #16 + stmia r1!, {r3 - r4} + LOADREGS(eqfd, sp!, {r4 - r7, pc}) + sub r2, r2, #8 + +acornscsi_in4: adds r2, r2, #4 + bmi acornscsi_in2 + ldmia r0!, {r3, r4} + and r3, r3, lr + orr r3, r3, r4, lsl #16 + str r3, [r1], #4 + LOADREGS(eqfd, sp!, {r4 - r7, pc}) + sub r2, r2, #4 + +acornscsi_in2: adds r2, r2, #2 + ldr r3, [r0], #4 + and r3, r3, lr + strb r3, [r1], #1 + mov r3, r3, lsr #8 + strplb r3, [r1], #1 + LOADREGS(fd, sp!, {r4 - r7, pc}) + +@ Purpose: transfer a block of data from memory to the acorn scsi card +@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length) +@ Returns: nothing + +ENTRY(__acornscsi_out) + stmfd sp!, {r4 - r6, lr} + bic r0, r0, #3 +acornscsi_out16lp: + subs r2, r2, #16 + bmi acornscsi_out8 + ldmia r1!, {r4, r6, ip, lr} + mov r3, r4, lsl #16 + orr r3, r3, r3, lsr #16 + mov r4, r4, lsr #16 + orr r4, r4, r4, lsl #16 + mov r5, r6, lsl #16 + orr r5, r5, r5, lsr #16 + mov r6, r6, lsr #16 + orr r6, r6, r6, lsl #16 + stmia r0!, {r3, r4, r5, r6} + mov r3, ip, lsl #16 + orr r3, r3, r3, lsr #16 + mov r4, ip, lsr #16 + orr r4, r4, r4, lsl #16 + mov ip, lr, lsl #16 + orr ip, ip, ip, lsr #16 + mov lr, lr, lsr #16 + orr lr, lr, lr, lsl #16 + stmia r0!, {r3, r4, ip, lr} + bne acornscsi_out16lp + LOADREGS(fd, sp!, {r4 - r6, pc}) + +acornscsi_out8: adds r2, r2, #8 + bmi acornscsi_out4 + ldmia r1!, {r4, r6} + mov r3, r4, lsl #16 + orr r3, r3, r3, lsr #16 + mov r4, r4, lsr #16 + orr r4, r4, r4, lsl #16 + mov r5, r6, lsl #16 + orr r5, r5, r5, lsr #16 + mov r6, r6, lsr #16 + orr r6, r6, r6, lsl #16 + stmia r0!, {r3, r4, r5, r6} + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + + sub r2, r2, #8 +acornscsi_out4: adds r2, r2, #4 + bmi acornscsi_out2 + ldr r4, [r1], #4 + mov r3, r4, lsl #16 + orr r3, r3, r3, lsr #16 + mov r4, r4, lsr #16 + orr r4, r4, r4, lsl #16 + stmia r0!, {r3, r4} + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + + sub r2, r2, #4 +acornscsi_out2: adds r2, r2, #2 + ldr r3, [r1], #2 + strb r3, [r0], #1 + mov r3, r3, lsr #8 + strplb r3, [r0], #1 + LOADREGS(fd, sp!, {r4 - r6, pc}) + diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c new file mode 100644 index 000000000..0b046e4b3 --- /dev/null +++ b/drivers/scsi/arm/acornscsi.c @@ -0,0 +1,2921 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/acorn/scsi/acornscsi.c + * + * Acorn SCSI 3 driver + * By R.M.King. + * + * Abandoned using the Select and Transfer command since there were + * some nasty races between our software and the target devices that + * were not easy to solve, and the device errata had a lot of entries + * for this command, some of them quite nasty... + * + * Changelog: + * 26-Sep-1997 RMK Re-jigged to use the queue module. + * Re-coded state machine to be based on driver + * state not scsi state. Should be easier to debug. + * Added acornscsi_release to clean up properly. + * Updated proc/scsi reporting. + * 05-Oct-1997 RMK Implemented writing to SCSI devices. + * 06-Oct-1997 RMK Corrected small (non-serious) bug with the connect/ + * reconnect race condition causing a warning message. + * 12-Oct-1997 RMK Added catch for re-entering interrupt routine. + * 15-Oct-1997 RMK Improved handling of commands. + * 27-Jun-1998 RMK Changed asm/delay.h to linux/delay.h. + * 13-Dec-1998 RMK Better abort code and command handling. Extra state + * transitions added to allow dodgy devices to work. + */ +#define DEBUG_NO_WRITE 1 +#define DEBUG_QUEUES 2 +#define DEBUG_DMA 4 +#define DEBUG_ABORT 8 +#define DEBUG_DISCON 16 +#define DEBUG_CONNECT 32 +#define DEBUG_PHASES 64 +#define DEBUG_WRITE 128 +#define DEBUG_LINK 256 +#define DEBUG_MESSAGES 512 +#define DEBUG_RESET 1024 +#define DEBUG_ALL (DEBUG_RESET|DEBUG_MESSAGES|DEBUG_LINK|DEBUG_WRITE|\ + DEBUG_PHASES|DEBUG_CONNECT|DEBUG_DISCON|DEBUG_ABORT|\ + DEBUG_DMA|DEBUG_QUEUES) + +/* DRIVER CONFIGURATION + * + * SCSI-II Tagged queue support. + * + * I don't have any SCSI devices that support it, so it is totally untested + * (except to make sure that it doesn't interfere with any non-tagging + * devices). It is not fully implemented either - what happens when a + * tagging device reconnects??? + * + * You can tell if you have a device that supports tagged queueing my + * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported + * as '2 TAG'. + */ + +/* + * SCSI-II Synchronous transfer support. + * + * Tried and tested... + * + * SDTR_SIZE - maximum number of un-acknowledged bytes (0 = off, 12 = max) + * SDTR_PERIOD - period of REQ signal (min=125, max=1020) + * DEFAULT_PERIOD - default REQ period. + */ +#define SDTR_SIZE 12 +#define SDTR_PERIOD 125 +#define DEFAULT_PERIOD 500 + +/* + * Debugging information + * + * DEBUG - bit mask from list above + * DEBUG_TARGET - is defined to the target number if you want to debug + * a specific target. [only recon/write/dma]. + */ +#define DEBUG (DEBUG_RESET|DEBUG_WRITE|DEBUG_NO_WRITE) +/* only allow writing to SCSI device 0 */ +#define NO_WRITE 0xFE +/*#define DEBUG_TARGET 2*/ +/* + * Select timeout time (in 10ms units) + * + * This is the timeout used between the start of selection and the WD33C93 + * chip deciding that the device isn't responding. + */ +#define TIMEOUT_TIME 10 +/* + * Define this if you want to have verbose explanation of SCSI + * status/messages. + */ +#undef CONFIG_ACORNSCSI_CONSTANTS +/* + * Define this if you want to use the on board DMAC [don't remove this option] + * If not set, then use PIO mode (not currently supported). + */ +#define USE_DMAC + +/* + * ==================================================================================== + */ + +#ifdef DEBUG_TARGET +#define DBG(cmd,xxx...) \ + if (cmd->device->id == DEBUG_TARGET) { \ + xxx; \ + } +#else +#define DBG(cmd,xxx...) xxx +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "acornscsi.h" +#include "msgqueue.h" +#include "arm_scsi.h" + +#include + +#define VER_MAJOR 2 +#define VER_MINOR 0 +#define VER_PATCH 6 + +#ifdef USE_DMAC +/* + * DMAC setup parameters + */ +#define INIT_DEVCON0 (DEVCON0_RQL|DEVCON0_EXW|DEVCON0_CMP) +#define INIT_DEVCON1 (DEVCON1_BHLD) +#define DMAC_READ (MODECON_READ) +#define DMAC_WRITE (MODECON_WRITE) +#define INIT_SBICDMA (CTRL_DMABURST) + +#define scsi_xferred have_data_in + +/* + * Size of on-board DMA buffer + */ +#define DMAC_BUFFER_SIZE 65536 +#endif + +#define STATUS_BUFFER_TO_PRINT 24 + +unsigned int sdtr_period = SDTR_PERIOD; +unsigned int sdtr_size = SDTR_SIZE; + +static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, + unsigned int result); +static int acornscsi_reconnect_finish(AS_Host *host); +static void acornscsi_dma_cleanup(AS_Host *host); +static void acornscsi_abortcmd(AS_Host *host); + +/* ==================================================================================== + * Miscellaneous + */ + +/* Offsets from MEMC base */ +#define SBIC_REGIDX 0x2000 +#define SBIC_REGVAL 0x2004 +#define DMAC_OFFSET 0x3000 + +/* Offsets from FAST IOC base */ +#define INT_REG 0x2000 +#define PAGE_REG 0x3000 + +static inline void sbic_arm_write(AS_Host *host, unsigned int reg, unsigned int value) +{ + writeb(reg, host->base + SBIC_REGIDX); + writeb(value, host->base + SBIC_REGVAL); +} + +static inline int sbic_arm_read(AS_Host *host, unsigned int reg) +{ + if(reg == SBIC_ASR) + return readl(host->base + SBIC_REGIDX) & 255; + writeb(reg, host->base + SBIC_REGIDX); + return readl(host->base + SBIC_REGVAL) & 255; +} + +#define sbic_arm_writenext(host, val) writeb((val), (host)->base + SBIC_REGVAL) +#define sbic_arm_readnext(host) readb((host)->base + SBIC_REGVAL) + +#ifdef USE_DMAC +#define dmac_read(host,reg) \ + readb((host)->base + DMAC_OFFSET + ((reg) << 2)) + +#define dmac_write(host,reg,value) \ + ({ writeb((value), (host)->base + DMAC_OFFSET + ((reg) << 2)); }) + +#define dmac_clearintr(host) writeb(0, (host)->fast + INT_REG) + +static inline unsigned int dmac_address(AS_Host *host) +{ + return dmac_read(host, DMAC_TXADRHI) << 16 | + dmac_read(host, DMAC_TXADRMD) << 8 | + dmac_read(host, DMAC_TXADRLO); +} + +static +void acornscsi_dumpdma(AS_Host *host, char *where) +{ + unsigned int mode, addr, len; + + mode = dmac_read(host, DMAC_MODECON); + addr = dmac_address(host); + len = dmac_read(host, DMAC_TXCNTHI) << 8 | + dmac_read(host, DMAC_TXCNTLO); + + printk("scsi%d: %s: DMAC %02x @%06x+%04x msk %02x, ", + host->host->host_no, where, + mode, addr, (len + 1) & 0xffff, + dmac_read(host, DMAC_MASKREG)); + + printk("DMA @%06x, ", host->dma.start_addr); + printk("BH @%p +%04x, ", host->scsi.SCp.ptr, + host->scsi.SCp.this_residual); + printk("DT @+%04x ST @+%04x", host->dma.transferred, + host->scsi.SCp.scsi_xferred); + printk("\n"); +} +#endif + +static +unsigned long acornscsi_sbic_xfcount(AS_Host *host) +{ + unsigned long length; + + length = sbic_arm_read(host, SBIC_TRANSCNTH) << 16; + length |= sbic_arm_readnext(host) << 8; + length |= sbic_arm_readnext(host); + + return length; +} + +static int +acornscsi_sbic_wait(AS_Host *host, int stat_mask, int stat, int timeout, char *msg) +{ + int asr; + + do { + asr = sbic_arm_read(host, SBIC_ASR); + + if ((asr & stat_mask) == stat) + return 0; + + udelay(1); + } while (--timeout); + + printk("scsi%d: timeout while %s\n", host->host->host_no, msg); + + return -1; +} + +static +int acornscsi_sbic_issuecmd(AS_Host *host, int command) +{ + if (acornscsi_sbic_wait(host, ASR_CIP, 0, 1000, "issuing command")) + return -1; + + sbic_arm_write(host, SBIC_CMND, command); + + return 0; +} + +static void +acornscsi_csdelay(unsigned int cs) +{ + unsigned long target_jiffies, flags; + + target_jiffies = jiffies + 1 + cs * HZ / 100; + + local_save_flags(flags); + local_irq_enable(); + + while (time_before(jiffies, target_jiffies)) barrier(); + + local_irq_restore(flags); +} + +static +void acornscsi_resetcard(AS_Host *host) +{ + unsigned int i, timeout; + + /* assert reset line */ + host->card.page_reg = 0x80; + writeb(host->card.page_reg, host->fast + PAGE_REG); + + /* wait 3 cs. SCSI standard says 25ms. */ + acornscsi_csdelay(3); + + host->card.page_reg = 0; + writeb(host->card.page_reg, host->fast + PAGE_REG); + + /* + * Should get a reset from the card + */ + timeout = 1000; + do { + if (readb(host->fast + INT_REG) & 8) + break; + udelay(1); + } while (--timeout); + + if (timeout == 0) + printk("scsi%d: timeout while resetting card\n", + host->host->host_no); + + sbic_arm_read(host, SBIC_ASR); + sbic_arm_read(host, SBIC_SSR); + + /* setup sbic - WD33C93A */ + sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id); + sbic_arm_write(host, SBIC_CMND, CMND_RESET); + + /* + * Command should cause a reset interrupt + */ + timeout = 1000; + do { + if (readb(host->fast + INT_REG) & 8) + break; + udelay(1); + } while (--timeout); + + if (timeout == 0) + printk("scsi%d: timeout while resetting card\n", + host->host->host_no); + + sbic_arm_read(host, SBIC_ASR); + if (sbic_arm_read(host, SBIC_SSR) != 0x01) + printk(KERN_CRIT "scsi%d: WD33C93A didn't give enhanced reset interrupt\n", + host->host->host_no); + + sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); + sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME); + sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); + sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); + + host->card.page_reg = 0x40; + writeb(host->card.page_reg, host->fast + PAGE_REG); + + /* setup dmac - uPC71071 */ + dmac_write(host, DMAC_INIT, 0); +#ifdef USE_DMAC + dmac_write(host, DMAC_INIT, INIT_8BIT); + dmac_write(host, DMAC_CHANNEL, CHANNEL_0); + dmac_write(host, DMAC_DEVCON0, INIT_DEVCON0); + dmac_write(host, DMAC_DEVCON1, INIT_DEVCON1); +#endif + + host->SCpnt = NULL; + host->scsi.phase = PHASE_IDLE; + host->scsi.disconnectable = 0; + + memset(host->busyluns, 0, sizeof(host->busyluns)); + + for (i = 0; i < 8; i++) { + host->device[i].sync_state = SYNC_NEGOCIATE; + host->device[i].disconnect_ok = 1; + } + + /* wait 25 cs. SCSI standard says 250ms. */ + acornscsi_csdelay(25); +} + +/*============================================================================================= + * Utility routines (eg. debug) + */ +#ifdef CONFIG_ACORNSCSI_CONSTANTS +static char *acornscsi_interrupttype[] = { + "rst", "suc", "p/a", "3", + "term", "5", "6", "7", + "serv", "9", "a", "b", + "c", "d", "e", "f" +}; + +static signed char acornscsi_map[] = { + 0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 2, -1, -1, -1, -1, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, -1, -1, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 15, 16, 17, 18, 19, -1, -1, 20, 4, 5, 6, 7, 8, 9, 10, 11, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 21, 22, -1, -1, -1, 23, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}; + +static char *acornscsi_interruptcode[] = { + /* 0 */ + "reset - normal mode", /* 00 */ + "reset - advanced mode", /* 01 */ + + /* 2 */ + "sel", /* 11 */ + "sel+xfer", /* 16 */ + "data-out", /* 18 */ + "data-in", /* 19 */ + "cmd", /* 1A */ + "stat", /* 1B */ + "??-out", /* 1C */ + "??-in", /* 1D */ + "msg-out", /* 1E */ + "msg-in", /* 1F */ + + /* 12 */ + "/ACK asserted", /* 20 */ + "save-data-ptr", /* 21 */ + "{re}sel", /* 22 */ + + /* 15 */ + "inv cmd", /* 40 */ + "unexpected disconnect", /* 41 */ + "sel timeout", /* 42 */ + "P err", /* 43 */ + "P err+ATN", /* 44 */ + "bad status byte", /* 47 */ + + /* 21 */ + "resel, no id", /* 80 */ + "resel", /* 81 */ + "discon", /* 85 */ +}; + +static +void print_scsi_status(unsigned int ssr) +{ + if (acornscsi_map[ssr] != -1) + printk("%s:%s", + acornscsi_interrupttype[(ssr >> 4)], + acornscsi_interruptcode[acornscsi_map[ssr]]); + else + printk("%X:%X", ssr >> 4, ssr & 0x0f); +} +#endif + +static +void print_sbic_status(int asr, int ssr, int cmdphase) +{ +#ifdef CONFIG_ACORNSCSI_CONSTANTS + printk("sbic: %c%c%c%c%c%c ", + asr & ASR_INT ? 'I' : 'i', + asr & ASR_LCI ? 'L' : 'l', + asr & ASR_BSY ? 'B' : 'b', + asr & ASR_CIP ? 'C' : 'c', + asr & ASR_PE ? 'P' : 'p', + asr & ASR_DBR ? 'D' : 'd'); + printk("scsi: "); + print_scsi_status(ssr); + printk(" ph %02X\n", cmdphase); +#else + printk("sbic: %02X scsi: %X:%X ph: %02X\n", + asr, (ssr & 0xf0)>>4, ssr & 0x0f, cmdphase); +#endif +} + +static void +acornscsi_dumplogline(AS_Host *host, int target, int line) +{ + unsigned long prev; + signed int ptr; + + ptr = host->status_ptr[target] - STATUS_BUFFER_TO_PRINT; + if (ptr < 0) + ptr += STATUS_BUFFER_SIZE; + + printk("%c: %3s:", target == 8 ? 'H' : '0' + target, + line == 0 ? "ph" : line == 1 ? "ssr" : "int"); + + prev = host->status[target][ptr].when; + + for (; ptr != host->status_ptr[target]; ptr = (ptr + 1) & (STATUS_BUFFER_SIZE - 1)) { + unsigned long time_diff; + + if (!host->status[target][ptr].when) + continue; + + switch (line) { + case 0: + printk("%c%02X", host->status[target][ptr].irq ? '-' : ' ', + host->status[target][ptr].ph); + break; + + case 1: + printk(" %02X", host->status[target][ptr].ssr); + break; + + case 2: + time_diff = host->status[target][ptr].when - prev; + prev = host->status[target][ptr].when; + if (time_diff == 0) + printk("==^"); + else if (time_diff >= 100) + printk(" "); + else + printk(" %02ld", time_diff); + break; + } + } + + printk("\n"); +} + +static +void acornscsi_dumplog(AS_Host *host, int target) +{ + do { + acornscsi_dumplogline(host, target, 0); + acornscsi_dumplogline(host, target, 1); + acornscsi_dumplogline(host, target, 2); + + if (target == 8) + break; + + target = 8; + } while (1); +} + +static +char acornscsi_target(AS_Host *host) +{ + if (host->SCpnt) + return '0' + host->SCpnt->device->id; + return 'H'; +} + +/* + * Prototype: cmdtype_t acornscsi_cmdtype(int command) + * Purpose : differentiate READ from WRITE from other commands + * Params : command - command to interpret + * Returns : CMD_READ - command reads data, + * CMD_WRITE - command writes data, + * CMD_MISC - everything else + */ +static inline +cmdtype_t acornscsi_cmdtype(int command) +{ + switch (command) { + case WRITE_6: case WRITE_10: case WRITE_12: + return CMD_WRITE; + case READ_6: case READ_10: case READ_12: + return CMD_READ; + default: + return CMD_MISC; + } +} + +/* + * Prototype: int acornscsi_datadirection(int command) + * Purpose : differentiate between commands that have a DATA IN phase + * and a DATA OUT phase + * Params : command - command to interpret + * Returns : DATADIR_OUT - data out phase expected + * DATADIR_IN - data in phase expected + */ +static +datadir_t acornscsi_datadirection(int command) +{ + switch (command) { + case CHANGE_DEFINITION: case COMPARE: case COPY: + case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT: + case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER: + case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: + case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: + case WRITE_6: case WRITE_10: case WRITE_VERIFY: + case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME: + case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12: + case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW: + case MEDIUM_SCAN: case SEND_VOLUME_TAG: case 0xea: + return DATADIR_OUT; + default: + return DATADIR_IN; + } +} + +/* + * Purpose : provide values for synchronous transfers with 33C93. + * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting + * Modified by Russell King for 8MHz WD33C93A + */ +static struct sync_xfer_tbl { + unsigned int period_ns; + unsigned char reg_value; +} sync_xfer_table[] = { + { 1, 0x20 }, { 249, 0x20 }, { 374, 0x30 }, + { 499, 0x40 }, { 624, 0x50 }, { 749, 0x60 }, + { 874, 0x70 }, { 999, 0x00 }, { 0, 0 } +}; + +/* + * Prototype: int acornscsi_getperiod(unsigned char syncxfer) + * Purpose : period for the synchronous transfer setting + * Params : syncxfer SYNCXFER register value + * Returns : period in ns. + */ +static +int acornscsi_getperiod(unsigned char syncxfer) +{ + int i; + + syncxfer &= 0xf0; + if (syncxfer == 0x10) + syncxfer = 0; + + for (i = 1; sync_xfer_table[i].period_ns; i++) + if (syncxfer == sync_xfer_table[i].reg_value) + return sync_xfer_table[i].period_ns; + return 0; +} + +/* + * Prototype: int round_period(unsigned int period) + * Purpose : return index into above table for a required REQ period + * Params : period - time (ns) for REQ + * Returns : table index + * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting + */ +static inline +int round_period(unsigned int period) +{ + int i; + + for (i = 1; sync_xfer_table[i].period_ns; i++) { + if ((period <= sync_xfer_table[i].period_ns) && + (period > sync_xfer_table[i - 1].period_ns)) + return i; + } + return 7; +} + +/* + * Prototype: unsigned char calc_sync_xfer(unsigned int period, unsigned int offset) + * Purpose : calculate value for 33c93s SYNC register + * Params : period - time (ns) for REQ + * offset - offset in bytes between REQ/ACK + * Returns : value for SYNC register + * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting + */ +static +unsigned char __maybe_unused calc_sync_xfer(unsigned int period, + unsigned int offset) +{ + return sync_xfer_table[round_period(period)].reg_value | + ((offset < SDTR_SIZE) ? offset : SDTR_SIZE); +} + +/* ==================================================================================== + * Command functions + */ +/* + * Function: acornscsi_kick(AS_Host *host) + * Purpose : kick next command to interface + * Params : host - host to send command to + * Returns : INTR_IDLE if idle, otherwise INTR_PROCESSING + * Notes : interrupts are always disabled! + */ +static +intr_ret_t acornscsi_kick(AS_Host *host) +{ + int from_queue = 0; + struct scsi_cmnd *SCpnt; + + /* first check to see if a command is waiting to be executed */ + SCpnt = host->origSCpnt; + host->origSCpnt = NULL; + + /* retrieve next command */ + if (!SCpnt) { + SCpnt = queue_remove_exclude(&host->queues.issue, host->busyluns); + if (!SCpnt) + return INTR_IDLE; + + from_queue = 1; + } + + if (host->scsi.disconnectable && host->SCpnt) { + queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); + host->scsi.disconnectable = 0; +#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) + DBG(host->SCpnt, printk("scsi%d.%c: moved command to disconnected queue\n", + host->host->host_no, acornscsi_target(host))); +#endif + host->SCpnt = NULL; + } + + /* + * If we have an interrupt pending, then we may have been reselected. + * In this case, we don't want to write to the registers + */ + if (!(sbic_arm_read(host, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) { + sbic_arm_write(host, SBIC_DESTID, SCpnt->device->id); + sbic_arm_write(host, SBIC_CMND, CMND_SELWITHATN); + } + + /* + * claim host busy - all of these must happen atomically wrt + * our interrupt routine. Failure means command loss. + */ + host->scsi.phase = PHASE_CONNECTING; + host->SCpnt = SCpnt; + host->scsi.SCp = *arm_scsi_pointer(SCpnt); + host->dma.xfer_setup = 0; + host->dma.xfer_required = 0; + host->dma.xfer_done = 0; + +#if (DEBUG & (DEBUG_ABORT|DEBUG_CONNECT)) + DBG(SCpnt,printk("scsi%d.%c: starting cmd %02X\n", + host->host->host_no, '0' + SCpnt->device->id, + SCpnt->cmnd[0])); +#endif + + if (from_queue) { + set_bit(SCpnt->device->id * 8 + + (u8)(SCpnt->device->lun & 0x07), host->busyluns); + + host->stats.removes += 1; + + switch (acornscsi_cmdtype(SCpnt->cmnd[0])) { + case CMD_WRITE: + host->stats.writes += 1; + break; + case CMD_READ: + host->stats.reads += 1; + break; + case CMD_MISC: + host->stats.miscs += 1; + break; + } + } + + return INTR_PROCESSING; +} + +/* + * Function: void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, unsigned int result) + * Purpose : complete processing for command + * Params : host - interface that completed + * result - driver byte of result + */ +static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, + unsigned int result) +{ + struct scsi_cmnd *SCpnt = *SCpntp; + + /* clean up */ + sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); + + host->stats.fins += 1; + + if (SCpnt) { + *SCpntp = NULL; + + acornscsi_dma_cleanup(host); + + set_host_byte(SCpnt, result); + if (result == DID_OK) + scsi_msg_to_host_byte(SCpnt, host->scsi.SCp.Message); + set_status_byte(SCpnt, host->scsi.SCp.Status); + + /* + * In theory, this should not happen. In practice, it seems to. + * Only trigger an error if the device attempts to report all happy + * but with untransferred buffers... If we don't do something, then + * data loss will occur. Should we check SCpnt->underflow here? + * It doesn't appear to be set to something meaningful by the higher + * levels all the time. + */ + if (result == DID_OK) { + int xfer_warn = 0; + + if (SCpnt->underflow == 0) { + if (host->scsi.SCp.ptr && + acornscsi_cmdtype(SCpnt->cmnd[0]) != CMD_MISC) + xfer_warn = 1; + } else { + if (host->scsi.SCp.scsi_xferred < SCpnt->underflow || + host->scsi.SCp.scsi_xferred != host->dma.transferred) + xfer_warn = 1; + } + + /* ANSI standard says: (SCSI-2 Rev 10c Sect 5.6.6) + * Targets which break data transfers into multiple + * connections shall end each successful connection + * (except possibly the last) with a SAVE DATA + * POINTER - DISCONNECT message sequence. + * + * This makes it difficult to ensure that a transfer has + * completed. If we reach the end of a transfer during + * the command, then we can only have finished the transfer. + * therefore, if we seem to have some data remaining, this + * is not a problem. + */ + if (host->dma.xfer_done) + xfer_warn = 0; + + if (xfer_warn) { + switch (get_status_byte(SCpnt)) { + case SAM_STAT_CHECK_CONDITION: + case SAM_STAT_COMMAND_TERMINATED: + case SAM_STAT_BUSY: + case SAM_STAT_TASK_SET_FULL: + case SAM_STAT_RESERVATION_CONFLICT: + break; + + default: + scmd_printk(KERN_ERR, SCpnt, + "incomplete data transfer detected: " + "result=%08X", SCpnt->result); + scsi_print_command(SCpnt); + acornscsi_dumpdma(host, "done"); + acornscsi_dumplog(host, SCpnt->device->id); + set_host_byte(SCpnt, DID_ERROR); + } + } + } + + clear_bit(SCpnt->device->id * 8 + + (u8)(SCpnt->device->lun & 0x7), host->busyluns); + + scsi_done(SCpnt); + } else + printk("scsi%d: null command in acornscsi_done", host->host->host_no); + + host->scsi.phase = PHASE_IDLE; +} + +/* ==================================================================================== + * DMA routines + */ +/* + * Purpose : update SCSI Data Pointer + * Notes : this will only be one SG entry or less + */ +static +void acornscsi_data_updateptr(AS_Host *host, struct scsi_pointer *SCp, unsigned int length) +{ + SCp->ptr += length; + SCp->this_residual -= length; + + if (SCp->this_residual == 0 && next_SCp(SCp) == 0) + host->dma.xfer_done = 1; +} + +/* + * Prototype: void acornscsi_data_read(AS_Host *host, char *ptr, + * unsigned int start_addr, unsigned int length) + * Purpose : read data from DMA RAM + * Params : host - host to transfer from + * ptr - DRAM address + * start_addr - host mem address + * length - number of bytes to transfer + * Notes : this will only be one SG entry or less + */ +static +void acornscsi_data_read(AS_Host *host, char *ptr, + unsigned int start_addr, unsigned int length) +{ + extern void __acornscsi_in(void __iomem *, char *buf, int len); + unsigned int page, offset, len = length; + + page = (start_addr >> 12); + offset = start_addr & ((1 << 12) - 1); + + writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); + + while (len > 0) { + unsigned int this_len; + + if (len + offset > (1 << 12)) + this_len = (1 << 12) - offset; + else + this_len = len; + + __acornscsi_in(host->base + (offset << 1), ptr, this_len); + + offset += this_len; + ptr += this_len; + len -= this_len; + + if (offset == (1 << 12)) { + offset = 0; + page ++; + writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); + } + } + writeb(host->card.page_reg, host->fast + PAGE_REG); +} + +/* + * Prototype: void acornscsi_data_write(AS_Host *host, char *ptr, + * unsigned int start_addr, unsigned int length) + * Purpose : write data to DMA RAM + * Params : host - host to transfer from + * ptr - DRAM address + * start_addr - host mem address + * length - number of bytes to transfer + * Notes : this will only be one SG entry or less + */ +static +void acornscsi_data_write(AS_Host *host, char *ptr, + unsigned int start_addr, unsigned int length) +{ + extern void __acornscsi_out(void __iomem *, char *buf, int len); + unsigned int page, offset, len = length; + + page = (start_addr >> 12); + offset = start_addr & ((1 << 12) - 1); + + writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); + + while (len > 0) { + unsigned int this_len; + + if (len + offset > (1 << 12)) + this_len = (1 << 12) - offset; + else + this_len = len; + + __acornscsi_out(host->base + (offset << 1), ptr, this_len); + + offset += this_len; + ptr += this_len; + len -= this_len; + + if (offset == (1 << 12)) { + offset = 0; + page ++; + writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); + } + } + writeb(host->card.page_reg, host->fast + PAGE_REG); +} + +/* ========================================================================================= + * On-board DMA routines + */ +#ifdef USE_DMAC +/* + * Prototype: void acornscsi_dmastop(AS_Host *host) + * Purpose : stop all DMA + * Params : host - host on which to stop DMA + * Notes : This is called when leaving DATA IN/OUT phase, + * or when interface is RESET + */ +static inline +void acornscsi_dma_stop(AS_Host *host) +{ + dmac_write(host, DMAC_MASKREG, MASK_ON); + dmac_clearintr(host); + +#if (DEBUG & DEBUG_DMA) + DBG(host->SCpnt, acornscsi_dumpdma(host, "stop")); +#endif +} + +/* + * Function: void acornscsi_dma_setup(AS_Host *host, dmadir_t direction) + * Purpose : setup DMA controller for data transfer + * Params : host - host to setup + * direction - data transfer direction + * Notes : This is called when entering DATA I/O phase, not + * while we're in a DATA I/O phase + */ +static +void acornscsi_dma_setup(AS_Host *host, dmadir_t direction) +{ + unsigned int address, length, mode; + + host->dma.direction = direction; + + dmac_write(host, DMAC_MASKREG, MASK_ON); + + if (direction == DMA_OUT) { +#if (DEBUG & DEBUG_NO_WRITE) + if (NO_WRITE & (1 << host->SCpnt->device->id)) { + printk(KERN_CRIT "scsi%d.%c: I can't handle DMA_OUT!\n", + host->host->host_no, acornscsi_target(host)); + return; + } +#endif + mode = DMAC_WRITE; + } else + mode = DMAC_READ; + + /* + * Allocate some buffer space, limited to half the buffer size + */ + length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2); + if (length) { + host->dma.start_addr = address = host->dma.free_addr; + host->dma.free_addr = (host->dma.free_addr + length) & + (DMAC_BUFFER_SIZE - 1); + + /* + * Transfer data to DMA memory + */ + if (direction == DMA_OUT) + acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr, + length); + + length -= 1; + dmac_write(host, DMAC_TXCNTLO, length); + dmac_write(host, DMAC_TXCNTHI, length >> 8); + dmac_write(host, DMAC_TXADRLO, address); + dmac_write(host, DMAC_TXADRMD, address >> 8); + dmac_write(host, DMAC_TXADRHI, 0); + dmac_write(host, DMAC_MODECON, mode); + dmac_write(host, DMAC_MASKREG, MASK_OFF); + +#if (DEBUG & DEBUG_DMA) + DBG(host->SCpnt, acornscsi_dumpdma(host, "strt")); +#endif + host->dma.xfer_setup = 1; + } +} + +/* + * Function: void acornscsi_dma_cleanup(AS_Host *host) + * Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct + * Params : host - host to finish + * Notes : This is called when a command is: + * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT + * : This must not return until all transfers are completed. + */ +static +void acornscsi_dma_cleanup(AS_Host *host) +{ + dmac_write(host, DMAC_MASKREG, MASK_ON); + dmac_clearintr(host); + + /* + * Check for a pending transfer + */ + if (host->dma.xfer_required) { + host->dma.xfer_required = 0; + if (host->dma.direction == DMA_IN) + acornscsi_data_read(host, host->dma.xfer_ptr, + host->dma.xfer_start, host->dma.xfer_length); + } + + /* + * Has a transfer been setup? + */ + if (host->dma.xfer_setup) { + unsigned int transferred; + + host->dma.xfer_setup = 0; + +#if (DEBUG & DEBUG_DMA) + DBG(host->SCpnt, acornscsi_dumpdma(host, "cupi")); +#endif + + /* + * Calculate number of bytes transferred from DMA. + */ + transferred = dmac_address(host) - host->dma.start_addr; + host->dma.transferred += transferred; + + if (host->dma.direction == DMA_IN) + acornscsi_data_read(host, host->scsi.SCp.ptr, + host->dma.start_addr, transferred); + + /* + * Update SCSI pointers + */ + acornscsi_data_updateptr(host, &host->scsi.SCp, transferred); +#if (DEBUG & DEBUG_DMA) + DBG(host->SCpnt, acornscsi_dumpdma(host, "cupo")); +#endif + } +} + +/* + * Function: void acornscsi_dmacintr(AS_Host *host) + * Purpose : handle interrupts from DMAC device + * Params : host - host to process + * Notes : If reading, we schedule the read to main memory & + * allow the transfer to continue. + * : If writing, we fill the onboard DMA memory from main + * memory. + * : Called whenever DMAC finished it's current transfer. + */ +static +void acornscsi_dma_intr(AS_Host *host) +{ + unsigned int address, length, transferred; + +#if (DEBUG & DEBUG_DMA) + DBG(host->SCpnt, acornscsi_dumpdma(host, "inti")); +#endif + + dmac_write(host, DMAC_MASKREG, MASK_ON); + dmac_clearintr(host); + + /* + * Calculate amount transferred via DMA + */ + transferred = dmac_address(host) - host->dma.start_addr; + host->dma.transferred += transferred; + + /* + * Schedule DMA transfer off board + */ + if (host->dma.direction == DMA_IN) { + host->dma.xfer_start = host->dma.start_addr; + host->dma.xfer_length = transferred; + host->dma.xfer_ptr = host->scsi.SCp.ptr; + host->dma.xfer_required = 1; + } + + acornscsi_data_updateptr(host, &host->scsi.SCp, transferred); + + /* + * Allocate some buffer space, limited to half the on-board RAM size + */ + length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2); + if (length) { + host->dma.start_addr = address = host->dma.free_addr; + host->dma.free_addr = (host->dma.free_addr + length) & + (DMAC_BUFFER_SIZE - 1); + + /* + * Transfer data to DMA memory + */ + if (host->dma.direction == DMA_OUT) + acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr, + length); + + length -= 1; + dmac_write(host, DMAC_TXCNTLO, length); + dmac_write(host, DMAC_TXCNTHI, length >> 8); + dmac_write(host, DMAC_TXADRLO, address); + dmac_write(host, DMAC_TXADRMD, address >> 8); + dmac_write(host, DMAC_TXADRHI, 0); + dmac_write(host, DMAC_MASKREG, MASK_OFF); + +#if (DEBUG & DEBUG_DMA) + DBG(host->SCpnt, acornscsi_dumpdma(host, "into")); +#endif + } else { + host->dma.xfer_setup = 0; +#if 0 + /* + * If the interface still wants more, then this is an error. + * We give it another byte, but we also attempt to raise an + * attention condition. We continue giving one byte until + * the device recognises the attention. + */ + if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { + acornscsi_abortcmd(host); + + dmac_write(host, DMAC_TXCNTLO, 0); + dmac_write(host, DMAC_TXCNTHI, 0); + dmac_write(host, DMAC_TXADRLO, 0); + dmac_write(host, DMAC_TXADRMD, 0); + dmac_write(host, DMAC_TXADRHI, 0); + dmac_write(host, DMAC_MASKREG, MASK_OFF); + } +#endif + } +} + +/* + * Function: void acornscsi_dma_xfer(AS_Host *host) + * Purpose : transfer data between AcornSCSI and memory + * Params : host - host to process + */ +static +void acornscsi_dma_xfer(AS_Host *host) +{ + host->dma.xfer_required = 0; + + if (host->dma.direction == DMA_IN) + acornscsi_data_read(host, host->dma.xfer_ptr, + host->dma.xfer_start, host->dma.xfer_length); +} + +/* + * Function: void acornscsi_dma_adjust(AS_Host *host) + * Purpose : adjust DMA pointers & count for bytes transferred to + * SBIC but not SCSI bus. + * Params : host - host to adjust DMA count for + */ +static +void acornscsi_dma_adjust(AS_Host *host) +{ + if (host->dma.xfer_setup) { + signed long transferred; +#if (DEBUG & (DEBUG_DMA|DEBUG_WRITE)) + DBG(host->SCpnt, acornscsi_dumpdma(host, "adji")); +#endif + /* + * Calculate correct DMA address - DMA is ahead of SCSI bus while + * writing. + * host->scsi.SCp.scsi_xferred is the number of bytes + * actually transferred to/from the SCSI bus. + * host->dma.transferred is the number of bytes transferred + * over DMA since host->dma.start_addr was last set. + * + * real_dma_addr = host->dma.start_addr + host->scsi.SCp.scsi_xferred + * - host->dma.transferred + */ + transferred = host->scsi.SCp.scsi_xferred - host->dma.transferred; + if (transferred < 0) + printk("scsi%d.%c: Ack! DMA write correction %ld < 0!\n", + host->host->host_no, acornscsi_target(host), transferred); + else if (transferred == 0) + host->dma.xfer_setup = 0; + else { + transferred += host->dma.start_addr; + dmac_write(host, DMAC_TXADRLO, transferred); + dmac_write(host, DMAC_TXADRMD, transferred >> 8); + dmac_write(host, DMAC_TXADRHI, transferred >> 16); +#if (DEBUG & (DEBUG_DMA|DEBUG_WRITE)) + DBG(host->SCpnt, acornscsi_dumpdma(host, "adjo")); +#endif + } + } +} +#endif + +/* ========================================================================================= + * Data I/O + */ +static int +acornscsi_write_pio(AS_Host *host, char *bytes, int *ptr, int len, unsigned int max_timeout) +{ + unsigned int asr, timeout = max_timeout; + int my_ptr = *ptr; + + while (my_ptr < len) { + asr = sbic_arm_read(host, SBIC_ASR); + + if (asr & ASR_DBR) { + timeout = max_timeout; + + sbic_arm_write(host, SBIC_DATA, bytes[my_ptr++]); + } else if (asr & ASR_INT) + break; + else if (--timeout == 0) + break; + udelay(1); + } + + *ptr = my_ptr; + + return (timeout == 0) ? -1 : 0; +} + +/* + * Function: void acornscsi_sendcommand(AS_Host *host) + * Purpose : send a command to a target + * Params : host - host which is connected to target + */ +static void +acornscsi_sendcommand(AS_Host *host) +{ + struct scsi_cmnd *SCpnt = host->SCpnt; + + sbic_arm_write(host, SBIC_TRANSCNTH, 0); + sbic_arm_writenext(host, 0); + sbic_arm_writenext(host, SCpnt->cmd_len - host->scsi.SCp.sent_command); + + acornscsi_sbic_issuecmd(host, CMND_XFERINFO); + + if (acornscsi_write_pio(host, SCpnt->cmnd, + (int *)&host->scsi.SCp.sent_command, SCpnt->cmd_len, 1000000)) + printk("scsi%d: timeout while sending command\n", host->host->host_no); + + host->scsi.phase = PHASE_COMMAND; +} + +static +void acornscsi_sendmessage(AS_Host *host) +{ + unsigned int message_length = msgqueue_msglength(&host->scsi.msgs); + unsigned int msgnr; + struct message *msg; + +#if (DEBUG & DEBUG_MESSAGES) + printk("scsi%d.%c: sending message ", + host->host->host_no, acornscsi_target(host)); +#endif + + switch (message_length) { + case 0: + acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); + + acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 1"); + + sbic_arm_write(host, SBIC_DATA, NOP); + + host->scsi.last_message = NOP; +#if (DEBUG & DEBUG_MESSAGES) + printk("NOP"); +#endif + break; + + case 1: + acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); + msg = msgqueue_getmsg(&host->scsi.msgs, 0); + + acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 2"); + + sbic_arm_write(host, SBIC_DATA, msg->msg[0]); + + host->scsi.last_message = msg->msg[0]; +#if (DEBUG & DEBUG_MESSAGES) + spi_print_msg(msg->msg); +#endif + break; + + default: + /* + * ANSI standard says: (SCSI-2 Rev 10c Sect 5.6.14) + * 'When a target sends this (MESSAGE_REJECT) message, it + * shall change to MESSAGE IN phase and send this message + * prior to requesting additional message bytes from the + * initiator. This provides an interlock so that the + * initiator can determine which message byte is rejected. + */ + sbic_arm_write(host, SBIC_TRANSCNTH, 0); + sbic_arm_writenext(host, 0); + sbic_arm_writenext(host, message_length); + acornscsi_sbic_issuecmd(host, CMND_XFERINFO); + + msgnr = 0; + while ((msg = msgqueue_getmsg(&host->scsi.msgs, msgnr++)) != NULL) { + unsigned int i; +#if (DEBUG & DEBUG_MESSAGES) + spi_print_msg(msg); +#endif + i = 0; + if (acornscsi_write_pio(host, msg->msg, &i, msg->length, 1000000)) + printk("scsi%d: timeout while sending message\n", host->host->host_no); + + host->scsi.last_message = msg->msg[0]; + if (msg->msg[0] == EXTENDED_MESSAGE) + host->scsi.last_message |= msg->msg[2] << 8; + + if (i != msg->length) + break; + } + break; + } +#if (DEBUG & DEBUG_MESSAGES) + printk("\n"); +#endif +} + +/* + * Function: void acornscsi_readstatusbyte(AS_Host *host) + * Purpose : Read status byte from connected target + * Params : host - host connected to target + */ +static +void acornscsi_readstatusbyte(AS_Host *host) +{ + acornscsi_sbic_issuecmd(host, CMND_XFERINFO|CMND_SBT); + acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "reading status byte"); + host->scsi.SCp.Status = sbic_arm_read(host, SBIC_DATA); +} + +/* + * Function: unsigned char acornscsi_readmessagebyte(AS_Host *host) + * Purpose : Read one message byte from connected target + * Params : host - host connected to target + */ +static +unsigned char acornscsi_readmessagebyte(AS_Host *host) +{ + unsigned char message; + + acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); + + acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "for message byte"); + + message = sbic_arm_read(host, SBIC_DATA); + + /* wait for MSGIN-XFER-PAUSED */ + acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after message byte"); + + sbic_arm_read(host, SBIC_SSR); + + return message; +} + +/* + * Function: void acornscsi_message(AS_Host *host) + * Purpose : Read complete message from connected target & action message + * Params : host - host connected to target + */ +static +void acornscsi_message(AS_Host *host) +{ + struct scsi_pointer *scsi_pointer; + unsigned char message[16]; + unsigned int msgidx = 0, msglen = 1; + + do { + message[msgidx] = acornscsi_readmessagebyte(host); + + switch (msgidx) { + case 0: + if (message[0] == EXTENDED_MESSAGE || + (message[0] >= 0x20 && message[0] <= 0x2f)) + msglen = 2; + break; + + case 1: + if (message[0] == EXTENDED_MESSAGE) + msglen += message[msgidx]; + break; + } + msgidx += 1; + if (msgidx < msglen) { + acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); + + /* wait for next msg-in */ + acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after negate ack"); + sbic_arm_read(host, SBIC_SSR); + } + } while (msgidx < msglen); + +#if (DEBUG & DEBUG_MESSAGES) + printk("scsi%d.%c: message in: ", + host->host->host_no, acornscsi_target(host)); + spi_print_msg(message); + printk("\n"); +#endif + + if (host->scsi.phase == PHASE_RECONNECTED) { + /* + * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17) + * 'Whenever a target reconnects to an initiator to continue + * a tagged I/O process, the SIMPLE QUEUE TAG message shall + * be sent immediately following the IDENTIFY message...' + */ + if (message[0] == SIMPLE_QUEUE_TAG) + host->scsi.reconnected.tag = message[1]; + if (acornscsi_reconnect_finish(host)) + host->scsi.phase = PHASE_MSGIN; + } + + switch (message[0]) { + case ABORT_TASK_SET: + case ABORT_TASK: + case COMMAND_COMPLETE: + if (host->scsi.phase != PHASE_STATUSIN) { + printk(KERN_ERR "scsi%d.%c: command complete following non-status in phase?\n", + host->host->host_no, acornscsi_target(host)); + acornscsi_dumplog(host, host->SCpnt->device->id); + } + host->scsi.phase = PHASE_DONE; + host->scsi.SCp.Message = message[0]; + break; + + case SAVE_POINTERS: + /* + * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.20) + * 'The SAVE DATA POINTER message is sent from a target to + * direct the initiator to copy the active data pointer to + * the saved data pointer for the current I/O process. + */ + acornscsi_dma_cleanup(host); + scsi_pointer = arm_scsi_pointer(host->SCpnt); + *scsi_pointer = host->scsi.SCp; + scsi_pointer->sent_command = 0; + host->scsi.phase = PHASE_MSGIN; + break; + + case RESTORE_POINTERS: + /* + * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.19) + * 'The RESTORE POINTERS message is sent from a target to + * direct the initiator to copy the most recently saved + * command, data, and status pointers for the I/O process + * to the corresponding active pointers. The command and + * status pointers shall be restored to the beginning of + * the present command and status areas.' + */ + acornscsi_dma_cleanup(host); + host->scsi.SCp = *arm_scsi_pointer(host->SCpnt); + host->scsi.phase = PHASE_MSGIN; + break; + + case DISCONNECT: + /* + * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 6.4.2) + * 'On those occasions when an error or exception condition occurs + * and the target elects to repeat the information transfer, the + * target may repeat the transfer either issuing a RESTORE POINTERS + * message or by disconnecting without issuing a SAVE POINTERS + * message. When reconnection is completed, the most recent + * saved pointer values are restored.' + */ + acornscsi_dma_cleanup(host); + host->scsi.phase = PHASE_DISCONNECT; + break; + + case MESSAGE_REJECT: +#if 0 /* this isn't needed any more */ + /* + * If we were negociating sync transfer, we don't yet know if + * this REJECT is for the sync transfer or for the tagged queue/wide + * transfer. Re-initiate sync transfer negotiation now, and if + * we got a REJECT in response to SDTR, then it'll be set to DONE. + */ + if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST) + host->device[host->SCpnt->device->id].sync_state = SYNC_NEGOCIATE; +#endif + + /* + * If we have any messages waiting to go out, then assert ATN now + */ + if (msgqueue_msglength(&host->scsi.msgs)) + acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); + + switch (host->scsi.last_message) { + case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): + /* + * Target can't handle synchronous transfers + */ + printk(KERN_NOTICE "scsi%d.%c: Using asynchronous transfer\n", + host->host->host_no, acornscsi_target(host)); + host->device[host->SCpnt->device->id].sync_xfer = SYNCHTRANSFER_2DBA; + host->device[host->SCpnt->device->id].sync_state = SYNC_ASYNCHRONOUS; + sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); + break; + + default: + break; + } + break; + + case SIMPLE_QUEUE_TAG: + /* tag queue reconnect... message[1] = queue tag. Print something to indicate something happened! */ + printk("scsi%d.%c: reconnect queue tag %02X\n", + host->host->host_no, acornscsi_target(host), + message[1]); + break; + + case EXTENDED_MESSAGE: + switch (message[2]) { +#ifdef CONFIG_SCSI_ACORNSCSI_SYNC + case EXTENDED_SDTR: + if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST) { + /* + * We requested synchronous transfers. This isn't quite right... + * We can only say if this succeeded if we proceed on to execute the + * command from this message. If we get a MESSAGE PARITY ERROR, + * and the target retries fail, then we fallback to asynchronous mode + */ + host->device[host->SCpnt->device->id].sync_state = SYNC_COMPLETED; + printk(KERN_NOTICE "scsi%d.%c: Using synchronous transfer, offset %d, %d ns\n", + host->host->host_no, acornscsi_target(host), + message[4], message[3] * 4); + host->device[host->SCpnt->device->id].sync_xfer = + calc_sync_xfer(message[3] * 4, message[4]); + } else { + unsigned char period, length; + /* + * Target requested synchronous transfers. The agreement is only + * to be in operation AFTER the target leaves message out phase. + */ + acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); + period = max_t(unsigned int, message[3], sdtr_period / 4); + length = min_t(unsigned int, message[4], sdtr_size); + msgqueue_addmsg(&host->scsi.msgs, 5, EXTENDED_MESSAGE, 3, + EXTENDED_SDTR, period, length); + host->device[host->SCpnt->device->id].sync_xfer = + calc_sync_xfer(period * 4, length); + } + sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); + break; +#else + /* We do not accept synchronous transfers. Respond with a + * MESSAGE_REJECT. + */ +#endif + + case EXTENDED_WDTR: + /* The WD33C93A is only 8-bit. We respond with a MESSAGE_REJECT + * to a wide data transfer request. + */ + default: + acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); + msgqueue_flush(&host->scsi.msgs); + msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT); + break; + } + break; + + default: /* reject message */ + printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n", + host->host->host_no, acornscsi_target(host), + message[0]); + acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); + msgqueue_flush(&host->scsi.msgs); + msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT); + host->scsi.phase = PHASE_MSGIN; + break; + } + acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); +} + +/* + * Function: int acornscsi_buildmessages(AS_Host *host) + * Purpose : build the connection messages for a host + * Params : host - host to add messages to + */ +static +void acornscsi_buildmessages(AS_Host *host) +{ +#if 0 + /* does the device need resetting? */ + if (cmd_reset) { + msgqueue_addmsg(&host->scsi.msgs, 1, BUS_DEVICE_RESET); + return; + } +#endif + + msgqueue_addmsg(&host->scsi.msgs, 1, + IDENTIFY(host->device[host->SCpnt->device->id].disconnect_ok, + host->SCpnt->device->lun)); + +#if 0 + /* does the device need the current command aborted */ + if (cmd_aborted) { + acornscsi_abortcmd(host); + return; + } +#endif + + +#ifdef CONFIG_SCSI_ACORNSCSI_SYNC + if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { + host->device[host->SCpnt->device->id].sync_state = SYNC_SENT_REQUEST; + msgqueue_addmsg(&host->scsi.msgs, 5, + EXTENDED_MESSAGE, 3, EXTENDED_SDTR, + sdtr_period / 4, sdtr_size); + } +#endif +} + +/* + * Function: int acornscsi_starttransfer(AS_Host *host) + * Purpose : transfer data to/from connected target + * Params : host - host to which target is connected + * Returns : 0 if failure + */ +static +int acornscsi_starttransfer(AS_Host *host) +{ + int residual; + + if (!host->scsi.SCp.ptr /*&& host->scsi.SCp.this_residual*/) { + printk(KERN_ERR "scsi%d.%c: null buffer passed to acornscsi_starttransfer\n", + host->host->host_no, acornscsi_target(host)); + return 0; + } + + residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred; + + sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); + sbic_arm_writenext(host, residual >> 16); + sbic_arm_writenext(host, residual >> 8); + sbic_arm_writenext(host, residual); + acornscsi_sbic_issuecmd(host, CMND_XFERINFO); + return 1; +} + +/* ========================================================================================= + * Connection & Disconnection + */ +/* + * Function : acornscsi_reconnect(AS_Host *host) + * Purpose : reconnect a previously disconnected command + * Params : host - host specific data + * Remarks : SCSI spec says: + * 'The set of active pointers is restored from the set + * of saved pointers upon reconnection of the I/O process' + */ +static +int acornscsi_reconnect(AS_Host *host) +{ + unsigned int target, lun, ok = 0; + + target = sbic_arm_read(host, SBIC_SOURCEID); + + if (!(target & 8)) + printk(KERN_ERR "scsi%d: invalid source id after reselection " + "- device fault?\n", + host->host->host_no); + + target &= 7; + + if (host->SCpnt && !host->scsi.disconnectable) { + printk(KERN_ERR "scsi%d.%d: reconnected while command in " + "progress to target %d?\n", + host->host->host_no, target, host->SCpnt->device->id); + host->SCpnt = NULL; + } + + lun = sbic_arm_read(host, SBIC_DATA) & 7; + + host->scsi.reconnected.target = target; + host->scsi.reconnected.lun = lun; + host->scsi.reconnected.tag = 0; + + if (host->scsi.disconnectable && host->SCpnt && + host->SCpnt->device->id == target && host->SCpnt->device->lun == lun) + ok = 1; + + if (!ok && queue_probetgtlun(&host->queues.disconnected, target, lun)) + ok = 1; + + ADD_STATUS(target, 0x81, host->scsi.phase, 0); + + if (ok) { + host->scsi.phase = PHASE_RECONNECTED; + } else { + /* this doesn't seem to work */ + printk(KERN_ERR "scsi%d.%c: reselected with no command " + "to reconnect with\n", + host->host->host_no, '0' + target); + acornscsi_dumplog(host, target); + acornscsi_abortcmd(host); + if (host->SCpnt) { + queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); + host->SCpnt = NULL; + } + } + acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); + return !ok; +} + +/* + * Function: int acornscsi_reconnect_finish(AS_Host *host) + * Purpose : finish reconnecting a command + * Params : host - host to complete + * Returns : 0 if failed + */ +static +int acornscsi_reconnect_finish(AS_Host *host) +{ + if (host->scsi.disconnectable && host->SCpnt) { + host->scsi.disconnectable = 0; + if (host->SCpnt->device->id == host->scsi.reconnected.target && + host->SCpnt->device->lun == host->scsi.reconnected.lun && + scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) { +#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) + DBG(host->SCpnt, printk("scsi%d.%c: reconnected", + host->host->host_no, acornscsi_target(host))); +#endif + } else { + queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); +#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) + DBG(host->SCpnt, printk("scsi%d.%c: had to move command " + "to disconnected queue\n", + host->host->host_no, acornscsi_target(host))); +#endif + host->SCpnt = NULL; + } + } + if (!host->SCpnt) { + host->SCpnt = queue_remove_tgtluntag(&host->queues.disconnected, + host->scsi.reconnected.target, + host->scsi.reconnected.lun, + host->scsi.reconnected.tag); +#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) + DBG(host->SCpnt, printk("scsi%d.%c: had to get command", + host->host->host_no, acornscsi_target(host))); +#endif + } + + if (!host->SCpnt) + acornscsi_abortcmd(host); + else { + /* + * Restore data pointer from SAVED pointers. + */ + host->scsi.SCp = *arm_scsi_pointer(host->SCpnt); +#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) + printk(", data pointers: [%p, %X]", + host->scsi.SCp.ptr, host->scsi.SCp.this_residual); +#endif + } +#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) + printk("\n"); +#endif + + host->dma.transferred = host->scsi.SCp.scsi_xferred; + + return host->SCpnt != NULL; +} + +/* + * Function: void acornscsi_disconnect_unexpected(AS_Host *host) + * Purpose : handle an unexpected disconnect + * Params : host - host on which disconnect occurred + */ +static +void acornscsi_disconnect_unexpected(AS_Host *host) +{ + printk(KERN_ERR "scsi%d.%c: unexpected disconnect\n", + host->host->host_no, acornscsi_target(host)); +#if (DEBUG & DEBUG_ABORT) + acornscsi_dumplog(host, 8); +#endif + + acornscsi_done(host, &host->SCpnt, DID_ERROR); +} + +/* + * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) + * Purpose : abort a currently executing command + * Params : host - host with connected command to abort + */ +static +void acornscsi_abortcmd(AS_Host *host) +{ + host->scsi.phase = PHASE_ABORTED; + sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); + + msgqueue_flush(&host->scsi.msgs); + msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); +} + +/* ========================================================================================== + * Interrupt routines. + */ +/* + * Function: int acornscsi_sbicintr(AS_Host *host) + * Purpose : handle interrupts from SCSI device + * Params : host - host to process + * Returns : INTR_PROCESS if expecting another SBIC interrupt + * INTR_IDLE if no interrupt + * INTR_NEXT_COMMAND if we have finished processing the command + */ +static +intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq) +{ + unsigned int asr, ssr; + + asr = sbic_arm_read(host, SBIC_ASR); + if (!(asr & ASR_INT)) + return INTR_IDLE; + + ssr = sbic_arm_read(host, SBIC_SSR); + +#if (DEBUG & DEBUG_PHASES) + print_sbic_status(asr, ssr, host->scsi.phase); +#endif + + ADD_STATUS(8, ssr, host->scsi.phase, in_irq); + + if (host->SCpnt && !host->scsi.disconnectable) + ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq); + + switch (ssr) { + case 0x00: /* reset state - not advanced */ + printk(KERN_ERR "scsi%d: reset in standard mode but wanted advanced mode.\n", + host->host->host_no); + /* setup sbic - WD33C93A */ + sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id); + sbic_arm_write(host, SBIC_CMND, CMND_RESET); + return INTR_IDLE; + + case 0x01: /* reset state - advanced */ + sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); + sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME); + sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); + sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); + msgqueue_flush(&host->scsi.msgs); + return INTR_IDLE; + + case 0x41: /* unexpected disconnect aborted command */ + acornscsi_disconnect_unexpected(host); + return INTR_NEXT_COMMAND; + } + + switch (host->scsi.phase) { + case PHASE_CONNECTING: /* STATE: command removed from issue queue */ + switch (ssr) { + case 0x11: /* -> PHASE_CONNECTED */ + /* BUS FREE -> SELECTION */ + host->scsi.phase = PHASE_CONNECTED; + msgqueue_flush(&host->scsi.msgs); + host->dma.transferred = host->scsi.SCp.scsi_xferred; + /* 33C93 gives next interrupt indicating bus phase */ + asr = sbic_arm_read(host, SBIC_ASR); + if (!(asr & ASR_INT)) + break; + ssr = sbic_arm_read(host, SBIC_SSR); + ADD_STATUS(8, ssr, host->scsi.phase, 1); + ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, 1); + goto connected; + + case 0x42: /* select timed out */ + /* -> PHASE_IDLE */ + acornscsi_done(host, &host->SCpnt, DID_NO_CONNECT); + return INTR_NEXT_COMMAND; + + case 0x81: /* -> PHASE_RECONNECTED or PHASE_ABORTED */ + /* BUS FREE -> RESELECTION */ + host->origSCpnt = host->SCpnt; + host->SCpnt = NULL; + msgqueue_flush(&host->scsi.msgs); + acornscsi_reconnect(host); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + acornscsi_abortcmd(host); + } + return INTR_PROCESSING; + + connected: + case PHASE_CONNECTED: /* STATE: device selected ok */ + switch (ssr) { +#ifdef NONSTANDARD + case 0x8a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ + /* SELECTION -> COMMAND */ + acornscsi_sendcommand(host); + break; + + case 0x8b: /* -> PHASE_STATUS */ + /* SELECTION -> STATUS */ + acornscsi_readstatusbyte(host); + host->scsi.phase = PHASE_STATUSIN; + break; +#endif + + case 0x8e: /* -> PHASE_MSGOUT */ + /* SELECTION ->MESSAGE OUT */ + host->scsi.phase = PHASE_MSGOUT; + acornscsi_buildmessages(host); + acornscsi_sendmessage(host); + break; + + /* these should not happen */ + case 0x85: /* target disconnected */ + acornscsi_done(host, &host->SCpnt, DID_ERROR); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + acornscsi_abortcmd(host); + } + return INTR_PROCESSING; + + case PHASE_MSGOUT: /* STATE: connected & sent IDENTIFY message */ + /* + * SCSI standard says that MESSAGE OUT phases can be followed by a + * DATA phase, STATUS phase, MESSAGE IN phase or COMMAND phase + */ + switch (ssr) { + case 0x8a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ + case 0x1a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ + /* MESSAGE OUT -> COMMAND */ + acornscsi_sendcommand(host); + break; + + case 0x8b: /* -> PHASE_STATUS */ + case 0x1b: /* -> PHASE_STATUS */ + /* MESSAGE OUT -> STATUS */ + acornscsi_readstatusbyte(host); + host->scsi.phase = PHASE_STATUSIN; + break; + + case 0x8e: /* -> PHASE_MSGOUT */ + /* MESSAGE_OUT(MESSAGE_IN) ->MESSAGE OUT */ + acornscsi_sendmessage(host); + break; + + case 0x4f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */ + case 0x1f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */ + /* MESSAGE OUT -> MESSAGE IN */ + acornscsi_message(host); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_MSGOUT, SSR %02X?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_COMMAND: /* STATE: connected & command sent */ + switch (ssr) { + case 0x18: /* -> PHASE_DATAOUT */ + /* COMMAND -> DATA OUT */ + if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) + acornscsi_abortcmd(host); + acornscsi_dma_setup(host, DMA_OUT); + if (!acornscsi_starttransfer(host)) + acornscsi_abortcmd(host); + host->scsi.phase = PHASE_DATAOUT; + return INTR_IDLE; + + case 0x19: /* -> PHASE_DATAIN */ + /* COMMAND -> DATA IN */ + if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) + acornscsi_abortcmd(host); + acornscsi_dma_setup(host, DMA_IN); + if (!acornscsi_starttransfer(host)) + acornscsi_abortcmd(host); + host->scsi.phase = PHASE_DATAIN; + return INTR_IDLE; + + case 0x1b: /* -> PHASE_STATUS */ + /* COMMAND -> STATUS */ + acornscsi_readstatusbyte(host); + host->scsi.phase = PHASE_STATUSIN; + break; + + case 0x1e: /* -> PHASE_MSGOUT */ + /* COMMAND -> MESSAGE OUT */ + acornscsi_sendmessage(host); + break; + + case 0x1f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */ + /* COMMAND -> MESSAGE IN */ + acornscsi_message(host); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_COMMAND, SSR %02X?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_DISCONNECT: /* STATE: connected, received DISCONNECT msg */ + if (ssr == 0x85) { /* -> PHASE_IDLE */ + host->scsi.disconnectable = 1; + host->scsi.reconnected.tag = 0; + host->scsi.phase = PHASE_IDLE; + host->stats.disconnects += 1; + } else { + printk(KERN_ERR "scsi%d.%c: PHASE_DISCONNECT, SSR %02X instead of disconnect?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_NEXT_COMMAND; + + case PHASE_IDLE: /* STATE: disconnected */ + if (ssr == 0x81) /* -> PHASE_RECONNECTED or PHASE_ABORTED */ + acornscsi_reconnect(host); + else { + printk(KERN_ERR "scsi%d.%c: PHASE_IDLE, SSR %02X while idle?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_RECONNECTED: /* STATE: device reconnected to initiator */ + /* + * Command reconnected - if MESGIN, get message - it may be + * the tag. If not, get command out of disconnected queue + */ + /* + * If we reconnected and we're not in MESSAGE IN phase after IDENTIFY, + * reconnect I_T_L command + */ + if (ssr != 0x8f && !acornscsi_reconnect_finish(host)) + return INTR_IDLE; + ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq); + switch (ssr) { + case 0x88: /* data out phase */ + /* -> PHASE_DATAOUT */ + /* MESSAGE IN -> DATA OUT */ + acornscsi_dma_setup(host, DMA_OUT); + if (!acornscsi_starttransfer(host)) + acornscsi_abortcmd(host); + host->scsi.phase = PHASE_DATAOUT; + return INTR_IDLE; + + case 0x89: /* data in phase */ + /* -> PHASE_DATAIN */ + /* MESSAGE IN -> DATA IN */ + acornscsi_dma_setup(host, DMA_IN); + if (!acornscsi_starttransfer(host)) + acornscsi_abortcmd(host); + host->scsi.phase = PHASE_DATAIN; + return INTR_IDLE; + + case 0x8a: /* command out */ + /* MESSAGE IN -> COMMAND */ + acornscsi_sendcommand(host);/* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ + break; + + case 0x8b: /* status in */ + /* -> PHASE_STATUSIN */ + /* MESSAGE IN -> STATUS */ + acornscsi_readstatusbyte(host); + host->scsi.phase = PHASE_STATUSIN; + break; + + case 0x8e: /* message out */ + /* -> PHASE_MSGOUT */ + /* MESSAGE IN -> MESSAGE OUT */ + acornscsi_sendmessage(host); + break; + + case 0x8f: /* message in */ + acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_RECONNECTED, SSR %02X after reconnect?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_DATAIN: /* STATE: transferred data in */ + /* + * This is simple - if we disconnect then the DMA address & count is + * correct. + */ + switch (ssr) { + case 0x19: /* -> PHASE_DATAIN */ + case 0x89: /* -> PHASE_DATAIN */ + acornscsi_abortcmd(host); + return INTR_IDLE; + + case 0x1b: /* -> PHASE_STATUSIN */ + case 0x4b: /* -> PHASE_STATUSIN */ + case 0x8b: /* -> PHASE_STATUSIN */ + /* DATA IN -> STATUS */ + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - + acornscsi_sbic_xfcount(host); + acornscsi_dma_stop(host); + acornscsi_readstatusbyte(host); + host->scsi.phase = PHASE_STATUSIN; + break; + + case 0x1e: /* -> PHASE_MSGOUT */ + case 0x4e: /* -> PHASE_MSGOUT */ + case 0x8e: /* -> PHASE_MSGOUT */ + /* DATA IN -> MESSAGE OUT */ + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - + acornscsi_sbic_xfcount(host); + acornscsi_dma_stop(host); + acornscsi_sendmessage(host); + break; + + case 0x1f: /* message in */ + case 0x4f: /* message in */ + case 0x8f: /* message in */ + /* DATA IN -> MESSAGE IN */ + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - + acornscsi_sbic_xfcount(host); + acornscsi_dma_stop(host); + acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_DATAIN, SSR %02X?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_DATAOUT: /* STATE: transferred data out */ + /* + * This is more complicated - if we disconnect, the DMA could be 12 + * bytes ahead of us. We need to correct this. + */ + switch (ssr) { + case 0x18: /* -> PHASE_DATAOUT */ + case 0x88: /* -> PHASE_DATAOUT */ + acornscsi_abortcmd(host); + return INTR_IDLE; + + case 0x1b: /* -> PHASE_STATUSIN */ + case 0x4b: /* -> PHASE_STATUSIN */ + case 0x8b: /* -> PHASE_STATUSIN */ + /* DATA OUT -> STATUS */ + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - + acornscsi_sbic_xfcount(host); + acornscsi_dma_stop(host); + acornscsi_dma_adjust(host); + acornscsi_readstatusbyte(host); + host->scsi.phase = PHASE_STATUSIN; + break; + + case 0x1e: /* -> PHASE_MSGOUT */ + case 0x4e: /* -> PHASE_MSGOUT */ + case 0x8e: /* -> PHASE_MSGOUT */ + /* DATA OUT -> MESSAGE OUT */ + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - + acornscsi_sbic_xfcount(host); + acornscsi_dma_stop(host); + acornscsi_dma_adjust(host); + acornscsi_sendmessage(host); + break; + + case 0x1f: /* message in */ + case 0x4f: /* message in */ + case 0x8f: /* message in */ + /* DATA OUT -> MESSAGE IN */ + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - + acornscsi_sbic_xfcount(host); + acornscsi_dma_stop(host); + acornscsi_dma_adjust(host); + acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_DATAOUT, SSR %02X?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_STATUSIN: /* STATE: status in complete */ + switch (ssr) { + case 0x1f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */ + case 0x8f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */ + /* STATUS -> MESSAGE IN */ + acornscsi_message(host); + break; + + case 0x1e: /* -> PHASE_MSGOUT */ + case 0x8e: /* -> PHASE_MSGOUT */ + /* STATUS -> MESSAGE OUT */ + acornscsi_sendmessage(host); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_STATUSIN, SSR %02X instead of MESSAGE_IN?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_MSGIN: /* STATE: message in */ + switch (ssr) { + case 0x1e: /* -> PHASE_MSGOUT */ + case 0x4e: /* -> PHASE_MSGOUT */ + case 0x8e: /* -> PHASE_MSGOUT */ + /* MESSAGE IN -> MESSAGE OUT */ + acornscsi_sendmessage(host); + break; + + case 0x1f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */ + case 0x2f: + case 0x4f: + case 0x8f: + acornscsi_message(host); + break; + + case 0x85: + printk("scsi%d.%c: strange message in disconnection\n", + host->host->host_no, acornscsi_target(host)); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + acornscsi_done(host, &host->SCpnt, DID_ERROR); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_MSGIN, SSR %02X after message in?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_DONE: /* STATE: received status & message */ + switch (ssr) { + case 0x85: /* -> PHASE_IDLE */ + acornscsi_done(host, &host->SCpnt, DID_OK); + return INTR_NEXT_COMMAND; + + case 0x1e: + case 0x8e: + acornscsi_sendmessage(host); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_DONE, SSR %02X instead of disconnect?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + case PHASE_ABORTED: + switch (ssr) { + case 0x85: + if (host->SCpnt) + acornscsi_done(host, &host->SCpnt, DID_ABORT); + else { + clear_bit(host->scsi.reconnected.target * 8 + host->scsi.reconnected.lun, + host->busyluns); + host->scsi.phase = PHASE_IDLE; + } + return INTR_NEXT_COMMAND; + + case 0x1e: + case 0x2e: + case 0x4e: + case 0x8e: + acornscsi_sendmessage(host); + break; + + default: + printk(KERN_ERR "scsi%d.%c: PHASE_ABORTED, SSR %02X?\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; + + default: + printk(KERN_ERR "scsi%d.%c: unknown driver phase %d\n", + host->host->host_no, acornscsi_target(host), ssr); + acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); + } + return INTR_PROCESSING; +} + +/* + * Prototype: void acornscsi_intr(int irq, void *dev_id) + * Purpose : handle interrupts from Acorn SCSI card + * Params : irq - interrupt number + * dev_id - device specific data (AS_Host structure) + */ +static irqreturn_t +acornscsi_intr(int irq, void *dev_id) +{ + AS_Host *host = (AS_Host *)dev_id; + intr_ret_t ret; + int iostatus; + int in_irq = 0; + + do { + ret = INTR_IDLE; + + iostatus = readb(host->fast + INT_REG); + + if (iostatus & 2) { + acornscsi_dma_intr(host); + iostatus = readb(host->fast + INT_REG); + } + + if (iostatus & 8) + ret = acornscsi_sbicintr(host, in_irq); + + /* + * If we have a transfer pending, start it. + * Only start it if the interface has already started transferring + * it's data + */ + if (host->dma.xfer_required) + acornscsi_dma_xfer(host); + + if (ret == INTR_NEXT_COMMAND) + ret = acornscsi_kick(host); + + in_irq = 1; + } while (ret != INTR_IDLE); + + return IRQ_HANDLED; +} + +/*============================================================================================= + * Interfaces between interrupt handler and rest of scsi code + */ + +/* + * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd) + * Purpose : queues a SCSI command + * Params : cmd - SCSI command + * Returns : 0, or < 0 on error. + */ +static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt) +{ + struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); + void (*done)(struct scsi_cmnd *) = scsi_done; + AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; + +#if (DEBUG & DEBUG_NO_WRITE) + if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) { + printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n", + host->host->host_no, '0' + SCpnt->device->id); + set_host_byte(SCpnt, DID_NO_CONNECT); + done(SCpnt); + return 0; + } +#endif + + SCpnt->host_scribble = NULL; + SCpnt->result = 0; + scsi_pointer->phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]); + scsi_pointer->sent_command = 0; + scsi_pointer->scsi_xferred = 0; + + init_SCp(SCpnt); + + host->stats.queues += 1; + + { + unsigned long flags; + + if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) { + set_host_byte(SCpnt, DID_ERROR); + done(SCpnt); + return 0; + } + local_irq_save(flags); + if (host->scsi.phase == PHASE_IDLE) + acornscsi_kick(host); + local_irq_restore(flags); + } + return 0; +} + +DEF_SCSI_QCMD(acornscsi_queuecmd) + +enum res_abort { res_not_running, res_success, res_success_clear, res_snooze }; + +/* + * Prototype: enum res acornscsi_do_abort(struct scsi_cmnd *SCpnt) + * Purpose : abort a command on this host + * Params : SCpnt - command to abort + * Returns : our abort status + */ +static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt) +{ + enum res_abort res = res_not_running; + + if (queue_remove_cmd(&host->queues.issue, SCpnt)) { + /* + * The command was on the issue queue, and has not been + * issued yet. We can remove the command from the queue, + * and acknowledge the abort. Neither the devices nor the + * interface know about the command. + */ +//#if (DEBUG & DEBUG_ABORT) + printk("on issue queue "); +//#endif + res = res_success; + } else if (queue_remove_cmd(&host->queues.disconnected, SCpnt)) { + /* + * The command was on the disconnected queue. Simply + * acknowledge the abort condition, and when the target + * reconnects, we will give it an ABORT message. The + * target should then disconnect, and we will clear + * the busylun bit. + */ +//#if (DEBUG & DEBUG_ABORT) + printk("on disconnected queue "); +//#endif + res = res_success; + } else if (host->SCpnt == SCpnt) { + unsigned long flags; + +//#if (DEBUG & DEBUG_ABORT) + printk("executing "); +//#endif + + local_irq_save(flags); + switch (host->scsi.phase) { + /* + * If the interface is idle, and the command is 'disconnectable', + * then it is the same as on the disconnected queue. We simply + * remove all traces of the command. When the target reconnects, + * we will give it an ABORT message since the command could not + * be found. When the target finally disconnects, we will clear + * the busylun bit. + */ + case PHASE_IDLE: + if (host->scsi.disconnectable) { + host->scsi.disconnectable = 0; + host->SCpnt = NULL; + res = res_success; + } + break; + + /* + * If the command has connected and done nothing further, + * simply force a disconnect. We also need to clear the + * busylun bit. + */ + case PHASE_CONNECTED: + sbic_arm_write(host, SBIC_CMND, CMND_DISCONNECT); + host->SCpnt = NULL; + res = res_success_clear; + break; + + default: + acornscsi_abortcmd(host); + res = res_snooze; + } + local_irq_restore(flags); + } else if (host->origSCpnt == SCpnt) { + /* + * The command will be executed next, but a command + * is currently using the interface. This is similar to + * being on the issue queue, except the busylun bit has + * been set. + */ + host->origSCpnt = NULL; +//#if (DEBUG & DEBUG_ABORT) + printk("waiting for execution "); +//#endif + res = res_success_clear; + } else + printk("unknown "); + + return res; +} + +/* + * Prototype: int acornscsi_abort(struct scsi_cmnd *SCpnt) + * Purpose : abort a command on this host + * Params : SCpnt - command to abort + * Returns : one of SCSI_ABORT_ macros + */ +int acornscsi_abort(struct scsi_cmnd *SCpnt) +{ + AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata; + int result; + + host->stats.aborts += 1; + +#if (DEBUG & DEBUG_ABORT) + { + int asr, ssr; + asr = sbic_arm_read(host, SBIC_ASR); + ssr = sbic_arm_read(host, SBIC_SSR); + + printk(KERN_WARNING "acornscsi_abort: "); + print_sbic_status(asr, ssr, host->scsi.phase); + acornscsi_dumplog(host, SCpnt->device->id); + } +#endif + + printk("scsi%d: ", host->host->host_no); + + switch (acornscsi_do_abort(host, SCpnt)) { + /* + * We managed to find the command and cleared it out. + * We do not expect the command to be executing on the + * target, but we have set the busylun bit. + */ + case res_success_clear: +//#if (DEBUG & DEBUG_ABORT) + printk("clear "); +//#endif + clear_bit(SCpnt->device->id * 8 + + (u8)(SCpnt->device->lun & 0x7), host->busyluns); + fallthrough; + + /* + * We found the command, and cleared it out. Either + * the command is still known to be executing on the + * target, or the busylun bit is not set. + */ + case res_success: +//#if (DEBUG & DEBUG_ABORT) + printk("success\n"); +//#endif + result = SUCCESS; + break; + + /* + * We did find the command, but unfortunately we couldn't + * unhook it from ourselves. Wait some more, and if it + * still doesn't complete, reset the interface. + */ + case res_snooze: +//#if (DEBUG & DEBUG_ABORT) + printk("snooze\n"); +//#endif + result = FAILED; + break; + + /* + * The command could not be found (either because it completed, + * or it got dropped. + */ + default: + case res_not_running: + acornscsi_dumplog(host, SCpnt->device->id); + result = FAILED; +//#if (DEBUG & DEBUG_ABORT) + printk("not running\n"); +//#endif + break; + } + + return result; +} + +/* + * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt) + * Purpose : reset a command on this host/reset this host + * Params : SCpnt - command causing reset + * Returns : one of SCSI_RESET_ macros + */ +int acornscsi_host_reset(struct scsi_cmnd *SCpnt) +{ + AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; + struct scsi_cmnd *SCptr; + + host->stats.resets += 1; + +#if (DEBUG & DEBUG_RESET) + { + int asr, ssr, devidx; + + asr = sbic_arm_read(host, SBIC_ASR); + ssr = sbic_arm_read(host, SBIC_SSR); + + printk(KERN_WARNING "acornscsi_reset: "); + print_sbic_status(asr, ssr, host->scsi.phase); + for (devidx = 0; devidx < 9; devidx++) + acornscsi_dumplog(host, devidx); + } +#endif + + acornscsi_dma_stop(host); + + /* + * do hard reset. This resets all devices on this host, and so we + * must set the reset status on all commands. + */ + acornscsi_resetcard(host); + + while ((SCptr = queue_remove(&host->queues.disconnected)) != NULL) + ; + + return SUCCESS; +} + +/*============================================================================================== + * initialisation & miscellaneous support + */ + +/* + * Function: char *acornscsi_info(struct Scsi_Host *host) + * Purpose : return a string describing this interface + * Params : host - host to give information on + * Returns : a constant string + */ +const +char *acornscsi_info(struct Scsi_Host *host) +{ + static char string[100], *p; + + p = string; + + p += sprintf(string, "%s at port %08lX irq %d v%d.%d.%d" +#ifdef CONFIG_SCSI_ACORNSCSI_SYNC + " SYNC" +#endif +#if (DEBUG & DEBUG_NO_WRITE) + " NOWRITE (" __stringify(NO_WRITE) ")" +#endif + , host->hostt->name, host->io_port, host->irq, + VER_MAJOR, VER_MINOR, VER_PATCH); + return string; +} + +static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance) +{ + int devidx; + struct scsi_device *scd; + AS_Host *host; + + host = (AS_Host *)instance->hostdata; + + seq_printf(m, "AcornSCSI driver v%d.%d.%d" +#ifdef CONFIG_SCSI_ACORNSCSI_SYNC + " SYNC" +#endif +#if (DEBUG & DEBUG_NO_WRITE) + " NOWRITE (" __stringify(NO_WRITE) ")" +#endif + "\n\n", VER_MAJOR, VER_MINOR, VER_PATCH); + + seq_printf(m, "SBIC: WD33C93A Address: %p IRQ : %d\n", + host->base + SBIC_REGIDX, host->scsi.irq); +#ifdef USE_DMAC + seq_printf(m, "DMAC: uPC71071 Address: %p IRQ : %d\n\n", + host->base + DMAC_OFFSET, host->scsi.irq); +#endif + + seq_printf(m, "Statistics:\n" + "Queued commands: %-10u Issued commands: %-10u\n" + "Done commands : %-10u Reads : %-10u\n" + "Writes : %-10u Others : %-10u\n" + "Disconnects : %-10u Aborts : %-10u\n" + "Resets : %-10u\n\nLast phases:", + host->stats.queues, host->stats.removes, + host->stats.fins, host->stats.reads, + host->stats.writes, host->stats.miscs, + host->stats.disconnects, host->stats.aborts, + host->stats.resets); + + for (devidx = 0; devidx < 9; devidx ++) { + unsigned int statptr, prev; + + seq_printf(m, "\n%c:", devidx == 8 ? 'H' : ('0' + devidx)); + statptr = host->status_ptr[devidx] - 10; + + if ((signed int)statptr < 0) + statptr += STATUS_BUFFER_SIZE; + + prev = host->status[devidx][statptr].when; + + for (; statptr != host->status_ptr[devidx]; statptr = (statptr + 1) & (STATUS_BUFFER_SIZE - 1)) { + if (host->status[devidx][statptr].when) { + seq_printf(m, "%c%02X:%02X+%2ld", + host->status[devidx][statptr].irq ? '-' : ' ', + host->status[devidx][statptr].ph, + host->status[devidx][statptr].ssr, + (host->status[devidx][statptr].when - prev) < 100 ? + (host->status[devidx][statptr].when - prev) : 99); + prev = host->status[devidx][statptr].when; + } + } + } + + seq_printf(m, "\nAttached devices:\n"); + + shost_for_each_device(scd, instance) { + seq_printf(m, "Device/Lun TaggedQ Sync\n"); + seq_printf(m, " %d/%llu ", scd->id, scd->lun); + if (scd->tagged_supported) + seq_printf(m, "%3sabled ", + scd->simple_tags ? "en" : "dis"); + else + seq_printf(m, "unsupported "); + + if (host->device[scd->id].sync_xfer & 15) + seq_printf(m, "offset %d, %d ns\n", + host->device[scd->id].sync_xfer & 15, + acornscsi_getperiod(host->device[scd->id].sync_xfer)); + else + seq_printf(m, "async\n"); + + } + return 0; +} + +static const struct scsi_host_template acornscsi_template = { + .module = THIS_MODULE, + .show_info = acornscsi_show_info, + .name = "AcornSCSI", + .info = acornscsi_info, + .queuecommand = acornscsi_queuecmd, + .eh_abort_handler = acornscsi_abort, + .eh_host_reset_handler = acornscsi_host_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .dma_boundary = PAGE_SIZE - 1, + .proc_name = "acornscsi", + .cmd_size = sizeof(struct arm_cmd_priv), +}; + +static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + struct Scsi_Host *host; + AS_Host *ashost; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host)); + if (!host) { + ret = -ENOMEM; + goto out_release; + } + + ashost = (AS_Host *)host->hostdata; + + ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); + ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); + if (!ashost->base || !ashost->fast) { + ret = -ENOMEM; + goto out_put; + } + + host->irq = ec->irq; + ashost->host = host; + ashost->scsi.irq = host->irq; + + ec->irqaddr = ashost->fast + INT_REG; + ec->irqmask = 0x0a; + + ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost); + if (ret) { + printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", + host->host_no, ashost->scsi.irq, ret); + goto out_put; + } + + memset(&ashost->stats, 0, sizeof (ashost->stats)); + queue_initialise(&ashost->queues.issue); + queue_initialise(&ashost->queues.disconnected); + msgqueue_initialise(&ashost->scsi.msgs); + + acornscsi_resetcard(ashost); + + ret = scsi_add_host(host, &ec->dev); + if (ret) + goto out_irq; + + scsi_scan_host(host); + goto out; + + out_irq: + free_irq(host->irq, ashost); + msgqueue_free(&ashost->scsi.msgs); + queue_free(&ashost->queues.disconnected); + queue_free(&ashost->queues.issue); + out_put: + ecardm_iounmap(ec, ashost->fast); + ecardm_iounmap(ec, ashost->base); + scsi_host_put(host); + out_release: + ecard_release_resources(ec); + out: + return ret; +} + +static void acornscsi_remove(struct expansion_card *ec) +{ + struct Scsi_Host *host = ecard_get_drvdata(ec); + AS_Host *ashost = (AS_Host *)host->hostdata; + + ecard_set_drvdata(ec, NULL); + scsi_remove_host(host); + + /* + * Put card into RESET state + */ + writeb(0x80, ashost->fast + PAGE_REG); + + free_irq(host->irq, ashost); + + msgqueue_free(&ashost->scsi.msgs); + queue_free(&ashost->queues.disconnected); + queue_free(&ashost->queues.issue); + ecardm_iounmap(ec, ashost->fast); + ecardm_iounmap(ec, ashost->base); + scsi_host_put(host); + ecard_release_resources(ec); +} + +static const struct ecard_id acornscsi_cids[] = { + { MANU_ACORN, PROD_ACORN_SCSI }, + { 0xffff, 0xffff }, +}; + +static struct ecard_driver acornscsi_driver = { + .probe = acornscsi_probe, + .remove = acornscsi_remove, + .id_table = acornscsi_cids, + .drv = { + .name = "acornscsi", + }, +}; + +static int __init acornscsi_init(void) +{ + return ecard_register_driver(&acornscsi_driver); +} + +static void __exit acornscsi_exit(void) +{ + ecard_remove_driver(&acornscsi_driver); +} + +module_init(acornscsi_init); +module_exit(acornscsi_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("AcornSCSI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/acornscsi.h b/drivers/scsi/arm/acornscsi.h new file mode 100644 index 000000000..376c76bc2 --- /dev/null +++ b/drivers/scsi/arm/acornscsi.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/drivers/acorn/scsi/acornscsi.h + * + * Copyright (C) 1997 Russell King + * + * Acorn SCSI driver + */ +#ifndef ACORNSCSI_H +#define ACORNSCSI_H + +/* SBIC registers */ +#define SBIC_OWNID 0 +#define OWNID_FS1 (1<<7) +#define OWNID_FS2 (1<<6) +#define OWNID_EHP (1<<4) +#define OWNID_EAF (1<<3) + +#define SBIC_CTRL 1 +#define CTRL_DMAMODE (1<<7) +#define CTRL_DMADBAMODE (1<<6) +#define CTRL_DMABURST (1<<5) +#define CTRL_DMAPOLLED 0 +#define CTRL_HHP (1<<4) +#define CTRL_EDI (1<<3) +#define CTRL_IDI (1<<2) +#define CTRL_HA (1<<1) +#define CTRL_HSP (1<<0) + +#define SBIC_TIMEOUT 2 +#define SBIC_TOTSECTS 3 +#define SBIC_TOTHEADS 4 +#define SBIC_TOTCYLH 5 +#define SBIC_TOTCYLL 6 +#define SBIC_LOGADDRH 7 +#define SBIC_LOGADDRM2 8 +#define SBIC_LOGADDRM1 9 +#define SBIC_LOGADDRL 10 +#define SBIC_SECTORNUM 11 +#define SBIC_HEADNUM 12 +#define SBIC_CYLH 13 +#define SBIC_CYLL 14 +#define SBIC_TARGETLUN 15 +#define TARGETLUN_TLV (1<<7) +#define TARGETLUN_DOK (1<<6) + +#define SBIC_CMNDPHASE 16 +#define SBIC_SYNCHTRANSFER 17 +#define SYNCHTRANSFER_OF0 0x00 +#define SYNCHTRANSFER_OF1 0x01 +#define SYNCHTRANSFER_OF2 0x02 +#define SYNCHTRANSFER_OF3 0x03 +#define SYNCHTRANSFER_OF4 0x04 +#define SYNCHTRANSFER_OF5 0x05 +#define SYNCHTRANSFER_OF6 0x06 +#define SYNCHTRANSFER_OF7 0x07 +#define SYNCHTRANSFER_OF8 0x08 +#define SYNCHTRANSFER_OF9 0x09 +#define SYNCHTRANSFER_OF10 0x0A +#define SYNCHTRANSFER_OF11 0x0B +#define SYNCHTRANSFER_OF12 0x0C +#define SYNCHTRANSFER_8DBA 0x00 +#define SYNCHTRANSFER_2DBA 0x20 +#define SYNCHTRANSFER_3DBA 0x30 +#define SYNCHTRANSFER_4DBA 0x40 +#define SYNCHTRANSFER_5DBA 0x50 +#define SYNCHTRANSFER_6DBA 0x60 +#define SYNCHTRANSFER_7DBA 0x70 + +#define SBIC_TRANSCNTH 18 +#define SBIC_TRANSCNTM 19 +#define SBIC_TRANSCNTL 20 +#define SBIC_DESTID 21 +#define DESTID_SCC (1<<7) +#define DESTID_DPD (1<<6) + +#define SBIC_SOURCEID 22 +#define SOURCEID_ER (1<<7) +#define SOURCEID_ES (1<<6) +#define SOURCEID_DSP (1<<5) +#define SOURCEID_SIV (1<<4) + +#define SBIC_SSR 23 +#define SBIC_CMND 24 +#define CMND_RESET 0x00 +#define CMND_ABORT 0x01 +#define CMND_ASSERTATN 0x02 +#define CMND_NEGATEACK 0x03 +#define CMND_DISCONNECT 0x04 +#define CMND_RESELECT 0x05 +#define CMND_SELWITHATN 0x06 +#define CMND_SELECT 0x07 +#define CMND_SELECTATNTRANSFER 0x08 +#define CMND_SELECTTRANSFER 0x09 +#define CMND_RESELECTRXDATA 0x0A +#define CMND_RESELECTTXDATA 0x0B +#define CMND_WAITFORSELRECV 0x0C +#define CMND_SENDSTATCMD 0x0D +#define CMND_SENDDISCONNECT 0x0E +#define CMND_SETIDI 0x0F +#define CMND_RECEIVECMD 0x10 +#define CMND_RECEIVEDTA 0x11 +#define CMND_RECEIVEMSG 0x12 +#define CMND_RECEIVEUSP 0x13 +#define CMND_SENDCMD 0x14 +#define CMND_SENDDATA 0x15 +#define CMND_SENDMSG 0x16 +#define CMND_SENDUSP 0x17 +#define CMND_TRANSLATEADDR 0x18 +#define CMND_XFERINFO 0x20 +#define CMND_SBT (1<<7) + +#define SBIC_DATA 25 +#define SBIC_ASR 26 +#define ASR_INT (1<<7) +#define ASR_LCI (1<<6) +#define ASR_BSY (1<<5) +#define ASR_CIP (1<<4) +#define ASR_PE (1<<1) +#define ASR_DBR (1<<0) + +/* DMAC registers */ +#define DMAC_INIT 0x00 +#define INIT_8BIT (1) + +#define DMAC_CHANNEL 0x80 +#define CHANNEL_0 0x00 +#define CHANNEL_1 0x01 +#define CHANNEL_2 0x02 +#define CHANNEL_3 0x03 + +#define DMAC_TXCNTLO 0x01 +#define DMAC_TXCNTHI 0x81 +#define DMAC_TXADRLO 0x02 +#define DMAC_TXADRMD 0x82 +#define DMAC_TXADRHI 0x03 + +#define DMAC_DEVCON0 0x04 +#define DEVCON0_AKL (1<<7) +#define DEVCON0_RQL (1<<6) +#define DEVCON0_EXW (1<<5) +#define DEVCON0_ROT (1<<4) +#define DEVCON0_CMP (1<<3) +#define DEVCON0_DDMA (1<<2) +#define DEVCON0_AHLD (1<<1) +#define DEVCON0_MTM (1<<0) + +#define DMAC_DEVCON1 0x84 +#define DEVCON1_WEV (1<<1) +#define DEVCON1_BHLD (1<<0) + +#define DMAC_MODECON 0x05 +#define MODECON_WOED 0x01 +#define MODECON_VERIFY 0x00 +#define MODECON_READ 0x04 +#define MODECON_WRITE 0x08 +#define MODECON_AUTOINIT 0x10 +#define MODECON_ADDRDIR 0x20 +#define MODECON_DEMAND 0x00 +#define MODECON_SINGLE 0x40 +#define MODECON_BLOCK 0x80 +#define MODECON_CASCADE 0xC0 + +#define DMAC_STATUS 0x85 +#define STATUS_TC0 (1<<0) +#define STATUS_RQ0 (1<<4) + +#define DMAC_TEMPLO 0x06 +#define DMAC_TEMPHI 0x86 +#define DMAC_REQREG 0x07 +#define DMAC_MASKREG 0x87 +#define MASKREG_M0 0x01 +#define MASKREG_M1 0x02 +#define MASKREG_M2 0x04 +#define MASKREG_M3 0x08 + +/* miscellaneous internal variables */ + +#define MASK_ON (MASKREG_M3|MASKREG_M2|MASKREG_M1|MASKREG_M0) +#define MASK_OFF (MASKREG_M3|MASKREG_M2|MASKREG_M1) + +/* + * SCSI driver phases + */ +typedef enum { + PHASE_IDLE, /* we're not planning on doing anything */ + PHASE_CONNECTING, /* connecting to a target */ + PHASE_CONNECTED, /* connected to a target */ + PHASE_MSGOUT, /* message out to device */ + PHASE_RECONNECTED, /* reconnected */ + PHASE_COMMANDPAUSED, /* command partly sent */ + PHASE_COMMAND, /* command all sent */ + PHASE_DATAOUT, /* data out to device */ + PHASE_DATAIN, /* data in from device */ + PHASE_STATUSIN, /* status in from device */ + PHASE_MSGIN, /* message in from device */ + PHASE_DONE, /* finished */ + PHASE_ABORTED, /* aborted */ + PHASE_DISCONNECT, /* disconnecting */ +} phase_t; + +/* + * After interrupt, what to do now + */ +typedef enum { + INTR_IDLE, /* not expecting another IRQ */ + INTR_NEXT_COMMAND, /* start next command */ + INTR_PROCESSING, /* interrupt routine still processing */ +} intr_ret_t; + +/* + * DMA direction + */ +typedef enum { + DMA_OUT, /* DMA from memory to chip */ + DMA_IN /* DMA from chip to memory */ +} dmadir_t; + +/* + * Synchronous transfer state + */ +typedef enum { /* Synchronous transfer state */ + SYNC_ASYNCHRONOUS, /* don't negotiate synchronous transfers*/ + SYNC_NEGOCIATE, /* start negotiation */ + SYNC_SENT_REQUEST, /* sent SDTR message */ + SYNC_COMPLETED, /* received SDTR reply */ +} syncxfer_t; + +/* + * Command type + */ +typedef enum { /* command type */ + CMD_READ, /* READ_6, READ_10, READ_12 */ + CMD_WRITE, /* WRITE_6, WRITE_10, WRITE_12 */ + CMD_MISC, /* Others */ +} cmdtype_t; + +/* + * Data phase direction + */ +typedef enum { /* Data direction */ + DATADIR_IN, /* Data in phase expected */ + DATADIR_OUT /* Data out phase expected */ +} datadir_t; + +#include "queue.h" +#include "msgqueue.h" + +#define STATUS_BUFFER_SIZE 32 +/* + * This is used to dump the previous states of the SBIC + */ +struct status_entry { + unsigned long when; + unsigned char ssr; + unsigned char ph; + unsigned char irq; + unsigned char unused; +}; + +#define ADD_STATUS(_q,_ssr,_ph,_irq) \ +({ \ + host->status[(_q)][host->status_ptr[(_q)]].when = jiffies; \ + host->status[(_q)][host->status_ptr[(_q)]].ssr = (_ssr); \ + host->status[(_q)][host->status_ptr[(_q)]].ph = (_ph); \ + host->status[(_q)][host->status_ptr[(_q)]].irq = (_irq); \ + host->status_ptr[(_q)] = (host->status_ptr[(_q)] + 1) & (STATUS_BUFFER_SIZE - 1); \ +}) + +/* + * AcornSCSI host specific data + */ +typedef struct acornscsi_hostdata { + /* miscellaneous */ + struct Scsi_Host *host; /* host */ + struct scsi_cmnd *SCpnt; /* currently processing command */ + struct scsi_cmnd *origSCpnt; /* original connecting command */ + void __iomem *base; /* memc base address */ + void __iomem *fast; /* fast ioc base address */ + + /* driver information */ + struct { + unsigned int irq; /* interrupt */ + phase_t phase; /* current phase */ + + struct { + unsigned char target; /* reconnected target */ + unsigned char lun; /* reconnected lun */ + unsigned char tag; /* reconnected tag */ + } reconnected; + + struct scsi_pointer SCp; /* current commands data pointer */ + + MsgQueue_t msgs; + + unsigned short last_message; /* last message to be sent */ + unsigned char disconnectable:1; /* this command can be disconnected */ + } scsi; + + /* statistics information */ + struct { + unsigned int queues; + unsigned int removes; + unsigned int fins; + unsigned int reads; + unsigned int writes; + unsigned int miscs; + unsigned int disconnects; + unsigned int aborts; + unsigned int resets; + } stats; + + /* queue handling */ + struct { + Queue_t issue; /* issue queue */ + Queue_t disconnected; /* disconnected command queue */ + } queues; + + /* per-device info */ + struct { + unsigned char sync_xfer; /* synchronous transfer (SBIC value) */ + syncxfer_t sync_state; /* sync xfer negotiation state */ + unsigned char disconnect_ok:1; /* device can disconnect */ + } device[8]; + unsigned long busyluns[64 / sizeof(unsigned long)];/* array of bits indicating LUNs busy */ + + /* DMA info */ + struct { + unsigned int free_addr; /* next free address */ + unsigned int start_addr; /* start address of current transfer */ + dmadir_t direction; /* dma direction */ + unsigned int transferred; /* number of bytes transferred */ + unsigned int xfer_start; /* scheduled DMA transfer start */ + unsigned int xfer_length; /* scheduled DMA transfer length */ + char *xfer_ptr; /* pointer to area */ + unsigned char xfer_required:1; /* set if we need to transfer something */ + unsigned char xfer_setup:1; /* set if DMA is setup */ + unsigned char xfer_done:1; /* set if DMA reached end of BH list */ + } dma; + + /* card info */ + struct { + unsigned char page_reg; /* current setting of page reg */ + } card; + + unsigned char status_ptr[9]; + struct status_entry status[9][STATUS_BUFFER_SIZE]; +} AS_Host; + +#endif /* ACORNSCSI_H */ diff --git a/drivers/scsi/arm/arm_scsi.h b/drivers/scsi/arm/arm_scsi.h new file mode 100644 index 000000000..ea9fcd92c --- /dev/null +++ b/drivers/scsi/arm/arm_scsi.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2002 Russell King + * + * Commonly used functions by the ARM SCSI-II drivers. + */ + +#include + +#define BELT_AND_BRACES + +struct arm_cmd_priv { + struct scsi_pointer scsi_pointer; +}; + +static inline struct scsi_pointer *arm_scsi_pointer(struct scsi_cmnd *cmd) +{ + struct arm_cmd_priv *acmd = scsi_cmd_priv(cmd); + + return &acmd->scsi_pointer; +} + +/* + * The scatter-gather list handling. This contains all + * the yucky stuff that needs to be fixed properly. + */ + +/* + * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max + * entries of uninitialized memory. SCp is from scsi-ml and has a valid + * (possibly chained) sg-list + */ +static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max) +{ + int bufs = SCp->buffers_residual; + + /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg(). + * and to remove this BUG_ON. Use min() in-its-place + */ + BUG_ON(bufs + 1 > max); + + sg_set_buf(sg, SCp->ptr, SCp->this_residual); + + if (bufs) { + struct scatterlist *src_sg; + unsigned i; + + for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i) + *(++sg) = *src_sg; + sg_mark_end(sg); + } + + return bufs + 1; +} + +static inline int next_SCp(struct scsi_pointer *SCp) +{ + int ret = SCp->buffers_residual; + if (ret) { + SCp->buffer = sg_next(SCp->buffer); + SCp->buffers_residual--; + SCp->ptr = sg_virt(SCp->buffer); + SCp->this_residual = SCp->buffer->length; + } else { + SCp->ptr = NULL; + SCp->this_residual = 0; + } + return ret; +} + +static inline unsigned char get_next_SCp_byte(struct scsi_pointer *SCp) +{ + char c = *SCp->ptr; + + SCp->ptr += 1; + SCp->this_residual -= 1; + + return c; +} + +static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c) +{ + *SCp->ptr = c; + SCp->ptr += 1; + SCp->this_residual -= 1; +} + +static inline void init_SCp(struct scsi_cmnd *SCpnt) +{ + struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); + + memset(scsi_pointer, 0, sizeof(struct scsi_pointer)); + + if (scsi_bufflen(SCpnt)) { + unsigned long len = 0; + + scsi_pointer->buffer = scsi_sglist(SCpnt); + scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1; + scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); + scsi_pointer->this_residual = scsi_pointer->buffer->length; + scsi_pointer->phase = scsi_bufflen(SCpnt); + +#ifdef BELT_AND_BRACES + { /* + * Calculate correct buffer length. Some commands + * come in with the wrong scsi_bufflen. + */ + struct scatterlist *sg; + unsigned i, sg_count = scsi_sg_count(SCpnt); + + scsi_for_each_sg(SCpnt, sg, sg_count, i) + len += sg->length; + + if (scsi_bufflen(SCpnt) != len) { + printk(KERN_WARNING + "scsi%d.%c: bad request buffer " + "length %d, should be %ld\n", + SCpnt->device->host->host_no, + '0' + SCpnt->device->id, + scsi_bufflen(SCpnt), len); + /* + * FIXME: Totaly naive fixup. We should abort + * with error + */ + scsi_pointer->phase = + min_t(unsigned long, len, + scsi_bufflen(SCpnt)); + } + } +#endif + } else { + scsi_pointer->ptr = NULL; + scsi_pointer->this_residual = 0; + scsi_pointer->phase = 0; + } +} diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c new file mode 100644 index 000000000..925d0bd68 --- /dev/null +++ b/drivers/scsi/arm/arxescsi.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/scsi/arm/arxescsi.c + * + * Copyright (C) 1997-2000 Russell King, Stefan Hanske + * + * This driver is based on experimentation. Hence, it may have made + * assumptions about the particular card that I have available, and + * may not be reliable! + * + * Changelog: + * 30-08-1997 RMK 0.0.0 Created, READONLY version as cumana_2.c + * 22-01-1998 RMK 0.0.1 Updated to 2.1.80 + * 15-04-1998 RMK 0.0.1 Only do PIO if FAS216 will allow it. + * 11-06-1998 SH 0.0.2 Changed to support ARXE 16-bit SCSI card + * enabled writing + * 01-01-2000 SH 0.1.0 Added *real* pseudo dma writing + * (arxescsi_pseudo_dma_write) + * 02-04-2000 RMK 0.1.1 Updated for new error handling code. + * 22-10-2000 SH Updated for new registering scheme. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "fas216.h" + +struct arxescsi_info { + FAS216_Info info; + struct expansion_card *ec; + void __iomem *base; +}; + +#define DMADATA_OFFSET (0x200) + +#define DMASTAT_OFFSET (0x600) +#define DMASTAT_DRQ (1 << 0) + +#define CSTATUS_IRQ (1 << 0) + +#define VERSION "1.10 (23/01/2003 2.5.57)" + +/* + * Function: int arxescsi_dma_setup(host, SCpnt, direction, min_type) + * Purpose : initialises DMA/PIO + * Params : host - host + * SCpnt - command + * direction - DMA on to/off of card + * min_type - minimum DMA support that we must have for this transfer + * Returns : 0 if we should not set CMD_WITHDMA for transfer info command + */ +static fasdmatype_t +arxescsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, + fasdmadir_t direction, fasdmatype_t min_type) +{ + /* + * We don't do real DMA + */ + return fasdma_pseudo; +} + +static void arxescsi_pseudo_dma_write(unsigned char *addr, void __iomem *base) +{ + __asm__ __volatile__( + " stmdb sp!, {r0-r12}\n" + " mov r3, %0\n" + " mov r1, %1\n" + " add r2, r1, #512\n" + " mov r4, #256\n" + ".loop_1: ldmia r3!, {r6, r8, r10, r12}\n" + " mov r5, r6, lsl #16\n" + " mov r7, r8, lsl #16\n" + ".loop_2: ldrb r0, [r1, #1536]\n" + " tst r0, #1\n" + " beq .loop_2\n" + " stmia r2, {r5-r8}\n\t" + " mov r9, r10, lsl #16\n" + " mov r11, r12, lsl #16\n" + ".loop_3: ldrb r0, [r1, #1536]\n" + " tst r0, #1\n" + " beq .loop_3\n" + " stmia r2, {r9-r12}\n" + " subs r4, r4, #16\n" + " bne .loop_1\n" + " ldmia sp!, {r0-r12}\n" + : + : "r" (addr), "r" (base)); +} + +/* + * Function: int arxescsi_dma_pseudo(host, SCpnt, direction, transfer) + * Purpose : handles pseudo DMA + * Params : host - host + * SCpnt - command + * direction - DMA on to/off of card + * transfer - minimum number of bytes we expect to transfer + */ +static void +arxescsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, + fasdmadir_t direction, int transfer) +{ + struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata; + unsigned int length, error = 0; + void __iomem *base = info->info.scsi.io_base; + unsigned char *addr; + + length = SCp->this_residual; + addr = SCp->ptr; + + if (direction == DMA_OUT) { + unsigned int word; + while (length > 256) { + if (readb(base + 0x80) & STAT_INT) { + error = 1; + break; + } + arxescsi_pseudo_dma_write(addr, base); + addr += 256; + length -= 256; + } + + if (!error) + while (length > 0) { + if (readb(base + 0x80) & STAT_INT) + break; + + if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ)) + continue; + + word = *addr | *(addr + 1) << 8; + + writew(word, base + DMADATA_OFFSET); + if (length > 1) { + addr += 2; + length -= 2; + } else { + addr += 1; + length -= 1; + } + } + } + else { + if (transfer && (transfer & 255)) { + while (length >= 256) { + if (readb(base + 0x80) & STAT_INT) { + error = 1; + break; + } + + if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ)) + continue; + + readsw(base + DMADATA_OFFSET, addr, 256 >> 1); + addr += 256; + length -= 256; + } + } + + if (!(error)) + while (length > 0) { + unsigned long word; + + if (readb(base + 0x80) & STAT_INT) + break; + + if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ)) + continue; + + word = readw(base + DMADATA_OFFSET); + *addr++ = word; + if (--length > 0) { + *addr++ = word >> 8; + length --; + } + } + } +} + +/* + * Function: int arxescsi_dma_stop(host, SCpnt) + * Purpose : stops DMA/PIO + * Params : host - host + * SCpnt - command + */ +static void arxescsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) +{ + /* + * no DMA to stop + */ +} + +/* + * Function: const char *arxescsi_info(struct Scsi_Host * host) + * Purpose : returns a descriptive string about this interface, + * Params : host - driver host structure to return info for. + * Returns : pointer to a static buffer containing null terminated string. + */ +static const char *arxescsi_info(struct Scsi_Host *host) +{ + struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata; + static char string[150]; + + sprintf(string, "%s (%s) in slot %d v%s", + host->hostt->name, info->info.scsi.type, info->ec->slot_no, + VERSION); + + return string; +} + +static int +arxescsi_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + struct arxescsi_info *info; + info = (struct arxescsi_info *)host->hostdata; + + seq_printf(m, "ARXE 16-bit SCSI driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; +} + +static const struct scsi_host_template arxescsi_template = { + .show_info = arxescsi_show_info, + .name = "ARXE SCSI card", + .info = arxescsi_info, + .queuecommand = fas216_noqueue_command, + .eh_host_reset_handler = fas216_eh_host_reset, + .eh_bus_reset_handler = fas216_eh_bus_reset, + .eh_device_reset_handler = fas216_eh_device_reset, + .eh_abort_handler = fas216_eh_abort, + .cmd_size = sizeof(struct fas216_cmd_priv), + .can_queue = 0, + .this_id = 7, + .sg_tablesize = SG_ALL, + .dma_boundary = PAGE_SIZE - 1, + .proc_name = "arxescsi", +}; + +static int arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + struct Scsi_Host *host; + struct arxescsi_info *info; + void __iomem *base; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); + if (!base) { + ret = -ENOMEM; + goto out_region; + } + + host = scsi_host_alloc(&arxescsi_template, sizeof(struct arxescsi_info)); + if (!host) { + ret = -ENOMEM; + goto out_region; + } + + info = (struct arxescsi_info *)host->hostdata; + info->ec = ec; + info->base = base; + + info->info.scsi.io_base = base + 0x2000; + info->info.scsi.irq = 0; + info->info.scsi.dma = NO_DMA; + info->info.scsi.io_shift = 5; + info->info.ifcfg.clockrate = 24; /* MHz */ + info->info.ifcfg.select_timeout = 255; + info->info.ifcfg.asyncperiod = 200; /* ns */ + info->info.ifcfg.sync_max_depth = 0; + info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; + info->info.ifcfg.disconnect_ok = 0; + info->info.ifcfg.wide_max_size = 0; + info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; + info->info.dma.setup = arxescsi_dma_setup; + info->info.dma.pseudo = arxescsi_dma_pseudo; + info->info.dma.stop = arxescsi_dma_stop; + + ec->irqaddr = base; + ec->irqmask = CSTATUS_IRQ; + + ret = fas216_init(host); + if (ret) + goto out_unregister; + + ret = fas216_add(host, &ec->dev); + if (ret == 0) + goto out; + + fas216_release(host); + out_unregister: + scsi_host_put(host); + out_region: + ecard_release_resources(ec); + out: + return ret; +} + +static void arxescsi_remove(struct expansion_card *ec) +{ + struct Scsi_Host *host = ecard_get_drvdata(ec); + + ecard_set_drvdata(ec, NULL); + fas216_remove(host); + + fas216_release(host); + scsi_host_put(host); + ecard_release_resources(ec); +} + +static const struct ecard_id arxescsi_cids[] = { + { MANU_ARXE, PROD_ARXE_SCSI }, + { 0xffff, 0xffff }, +}; + +static struct ecard_driver arxescsi_driver = { + .probe = arxescsi_probe, + .remove = arxescsi_remove, + .id_table = arxescsi_cids, + .drv = { + .name = "arxescsi", + }, +}; + +static int __init init_arxe_scsi_driver(void) +{ + return ecard_register_driver(&arxescsi_driver); +} + +static void __exit exit_arxe_scsi_driver(void) +{ + ecard_remove_driver(&arxescsi_driver); +} + +module_init(init_arxe_scsi_driver); +module_exit(exit_arxe_scsi_driver); + +MODULE_AUTHOR("Stefan Hanske"); +MODULE_DESCRIPTION("ARXESCSI driver for Acorn machines"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c new file mode 100644 index 000000000..d1a2a22ff --- /dev/null +++ b/drivers/scsi/arm/cumana_1.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic Generic NCR5380 driver + * + * Copyright 1995-2002, Russell King + */ +#include +#include +#include +#include + +#include +#include + +#include + +#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) +#define NCR5380_read(reg) cumanascsi_read(hostdata, reg) +#define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value) + +#define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len +#define NCR5380_dma_recv_setup cumanascsi_pread +#define NCR5380_dma_send_setup cumanascsi_pwrite +#define NCR5380_dma_residual NCR5380_dma_residual_none + +#define NCR5380_intr cumanascsi_intr +#define NCR5380_queue_command cumanascsi_queue_command +#define NCR5380_info cumanascsi_info + +#define NCR5380_implementation_fields \ + unsigned ctrl + +struct NCR5380_hostdata; +static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int); +static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8); + +#include "../NCR5380.h" + +#define CTRL 0x16fc +#define STAT 0x2004 +#define L(v) (((v)<<16)|((v) & 0x0000ffff)) +#define H(v) (((v)>>16)|((v) & 0xffff0000)) + +static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata, + unsigned char *addr, int len) +{ + unsigned long *laddr; + u8 __iomem *base = hostdata->io; + u8 __iomem *dma = hostdata->pdma_io + 0x2000; + + if(!len) return 0; + + writeb(0x02, base + CTRL); + laddr = (unsigned long *)addr; + while(len >= 32) + { + unsigned int status; + unsigned long v; + status = readb(base + STAT); + if(status & 0x80) + goto end; + if(!(status & 0x40)) + continue; + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + v=*laddr++; writew(L(v), dma); writew(H(v), dma); + len -= 32; + if(len == 0) + break; + } + + addr = (unsigned char *)laddr; + writeb(0x12, base + CTRL); + + while(len > 0) + { + unsigned int status; + status = readb(base + STAT); + if(status & 0x80) + goto end; + if(status & 0x40) + { + writeb(*addr++, dma); + if(--len == 0) + break; + } + + status = readb(base + STAT); + if(status & 0x80) + goto end; + if(status & 0x40) + { + writeb(*addr++, dma); + if(--len == 0) + break; + } + } +end: + writeb(hostdata->ctrl | 0x40, base + CTRL); + + if (len) + return -1; + return 0; +} + +static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata, + unsigned char *addr, int len) +{ + unsigned long *laddr; + u8 __iomem *base = hostdata->io; + u8 __iomem *dma = hostdata->pdma_io + 0x2000; + + if(!len) return 0; + + writeb(0x00, base + CTRL); + laddr = (unsigned long *)addr; + while(len >= 32) + { + unsigned int status; + status = readb(base + STAT); + if(status & 0x80) + goto end; + if(!(status & 0x40)) + continue; + *laddr++ = readw(dma) | (readw(dma) << 16); + *laddr++ = readw(dma) | (readw(dma) << 16); + *laddr++ = readw(dma) | (readw(dma) << 16); + *laddr++ = readw(dma) | (readw(dma) << 16); + *laddr++ = readw(dma) | (readw(dma) << 16); + *laddr++ = readw(dma) | (readw(dma) << 16); + *laddr++ = readw(dma) | (readw(dma) << 16); + *laddr++ = readw(dma) | (readw(dma) << 16); + len -= 32; + if(len == 0) + break; + } + + addr = (unsigned char *)laddr; + writeb(0x10, base + CTRL); + + while(len > 0) + { + unsigned int status; + status = readb(base + STAT); + if(status & 0x80) + goto end; + if(status & 0x40) + { + *addr++ = readb(dma); + if(--len == 0) + break; + } + + status = readb(base + STAT); + if(status & 0x80) + goto end; + if(status & 0x40) + { + *addr++ = readb(dma); + if(--len == 0) + break; + } + } +end: + writeb(hostdata->ctrl | 0x40, base + CTRL); + + if (len) + return -1; + return 0; +} + +static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + return cmd->transfersize; +} + +static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata, + unsigned int reg) +{ + u8 __iomem *base = hostdata->io; + u8 val; + + writeb(0, base + CTRL); + + val = readb(base + 0x2100 + (reg << 2)); + + hostdata->ctrl = 0x40; + writeb(0x40, base + CTRL); + + return val; +} + +static void cumanascsi_write(struct NCR5380_hostdata *hostdata, + unsigned int reg, u8 value) +{ + u8 __iomem *base = hostdata->io; + + writeb(0, base + CTRL); + + writeb(value, base + 0x2100 + (reg << 2)); + + hostdata->ctrl = 0x40; + writeb(0x40, base + CTRL); +} + +#include "../NCR5380.c" + +static const struct scsi_host_template cumanascsi_template = { + .module = THIS_MODULE, + .name = "Cumana 16-bit SCSI", + .info = cumanascsi_info, + .queuecommand = cumanascsi_queue_command, + .eh_abort_handler = NCR5380_abort, + .eh_host_reset_handler = NCR5380_host_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .proc_name = "CumanaSCSI-1", + .cmd_size = sizeof(struct NCR5380_cmd), + .max_sectors = 128, + .dma_boundary = PAGE_SIZE - 1, +}; + +static int cumanascsi1_probe(struct expansion_card *ec, + const struct ecard_id *id) +{ + struct Scsi_Host *host; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata)); + if (!host) { + ret = -ENOMEM; + goto out_release; + } + + priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW), + ecard_resource_len(ec, ECARD_RES_IOCSLOW)); + priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), + ecard_resource_len(ec, ECARD_RES_MEMC)); + if (!priv(host)->io || !priv(host)->pdma_io) { + ret = -ENOMEM; + goto out_unmap; + } + + host->irq = ec->irq; + + ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); + if (ret) + goto out_unmap; + + NCR5380_maybe_reset_bus(host); + + priv(host)->ctrl = 0; + writeb(0, priv(host)->io + CTRL); + + ret = request_irq(host->irq, cumanascsi_intr, 0, + "CumanaSCSI-1", host); + if (ret) { + printk("scsi%d: IRQ%d not free: %d\n", + host->host_no, host->irq, ret); + goto out_exit; + } + + ret = scsi_add_host(host, &ec->dev); + if (ret) + goto out_free_irq; + + scsi_scan_host(host); + goto out; + + out_free_irq: + free_irq(host->irq, host); + out_exit: + NCR5380_exit(host); + out_unmap: + iounmap(priv(host)->io); + iounmap(priv(host)->pdma_io); + scsi_host_put(host); + out_release: + ecard_release_resources(ec); + out: + return ret; +} + +static void cumanascsi1_remove(struct expansion_card *ec) +{ + struct Scsi_Host *host = ecard_get_drvdata(ec); + void __iomem *base = priv(host)->io; + void __iomem *dma = priv(host)->pdma_io; + + ecard_set_drvdata(ec, NULL); + + scsi_remove_host(host); + free_irq(host->irq, host); + NCR5380_exit(host); + scsi_host_put(host); + iounmap(base); + iounmap(dma); + ecard_release_resources(ec); +} + +static const struct ecard_id cumanascsi1_cids[] = { + { MANU_CUMANA, PROD_CUMANA_SCSI_1 }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver cumanascsi1_driver = { + .probe = cumanascsi1_probe, + .remove = cumanascsi1_remove, + .id_table = cumanascsi1_cids, + .drv = { + .name = "cumanascsi1", + }, +}; + +static int __init cumanascsi_init(void) +{ + return ecard_register_driver(&cumanascsi1_driver); +} + +static void __exit cumanascsi_exit(void) +{ + ecard_remove_driver(&cumanascsi1_driver); +} + +module_init(cumanascsi_init); +module_exit(cumanascsi_exit); + +MODULE_DESCRIPTION("Cumana SCSI-1 driver for Acorn machines"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c new file mode 100644 index 000000000..c5d8f4313 --- /dev/null +++ b/drivers/scsi/arm/cumana_2.c @@ -0,0 +1,524 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/acorn/scsi/cumana_2.c + * + * Copyright (C) 1997-2005 Russell King + * + * Changelog: + * 30-08-1997 RMK 0.0.0 Created, READONLY version. + * 22-01-1998 RMK 0.0.1 Updated to 2.1.80. + * 15-04-1998 RMK 0.0.1 Only do PIO if FAS216 will allow it. + * 02-05-1998 RMK 0.0.2 Updated & added DMA support. + * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h + * 18-08-1998 RMK 0.0.3 Fixed synchronous transfer depth. + * 02-04-2000 RMK 0.0.4 Updated for new error handling code. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "fas216.h" +#include "arm_scsi.h" + +#include + +#define CUMANASCSI2_STATUS (0x0000) +#define STATUS_INT (1 << 0) +#define STATUS_DRQ (1 << 1) +#define STATUS_LATCHED (1 << 3) + +#define CUMANASCSI2_ALATCH (0x0014) +#define ALATCH_ENA_INT (3) +#define ALATCH_DIS_INT (2) +#define ALATCH_ENA_TERM (5) +#define ALATCH_DIS_TERM (4) +#define ALATCH_ENA_BIT32 (11) +#define ALATCH_DIS_BIT32 (10) +#define ALATCH_ENA_DMA (13) +#define ALATCH_DIS_DMA (12) +#define ALATCH_DMA_OUT (15) +#define ALATCH_DMA_IN (14) + +#define CUMANASCSI2_PSEUDODMA (0x0200) + +#define CUMANASCSI2_FAS216_OFFSET (0x0300) +#define CUMANASCSI2_FAS216_SHIFT 2 + +/* + * Version + */ +#define VERSION "1.00 (13/11/2002 2.5.47)" + +/* + * Use term=0,1,0,0,0 to turn terminators on/off + */ +static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; + +#define NR_SG 256 + +struct cumanascsi2_info { + FAS216_Info info; + struct expansion_card *ec; + void __iomem *base; + unsigned int terms; /* Terminator state */ + struct scatterlist sg[NR_SG]; /* Scatter DMA list */ +}; + +#define CSTATUS_IRQ (1 << 0) +#define CSTATUS_DRQ (1 << 1) + +/* Prototype: void cumanascsi_2_irqenable(ec, irqnr) + * Purpose : Enable interrupts on Cumana SCSI 2 card + * Params : ec - expansion card structure + * : irqnr - interrupt number + */ +static void +cumanascsi_2_irqenable(struct expansion_card *ec, int irqnr) +{ + struct cumanascsi2_info *info = ec->irq_data; + writeb(ALATCH_ENA_INT, info->base + CUMANASCSI2_ALATCH); +} + +/* Prototype: void cumanascsi_2_irqdisable(ec, irqnr) + * Purpose : Disable interrupts on Cumana SCSI 2 card + * Params : ec - expansion card structure + * : irqnr - interrupt number + */ +static void +cumanascsi_2_irqdisable(struct expansion_card *ec, int irqnr) +{ + struct cumanascsi2_info *info = ec->irq_data; + writeb(ALATCH_DIS_INT, info->base + CUMANASCSI2_ALATCH); +} + +static const expansioncard_ops_t cumanascsi_2_ops = { + .irqenable = cumanascsi_2_irqenable, + .irqdisable = cumanascsi_2_irqdisable, +}; + +/* Prototype: void cumanascsi_2_terminator_ctl(host, on_off) + * Purpose : Turn the Cumana SCSI 2 terminators on or off + * Params : host - card to turn on/off + * : on_off - !0 to turn on, 0 to turn off + */ +static void +cumanascsi_2_terminator_ctl(struct Scsi_Host *host, int on_off) +{ + struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; + + if (on_off) { + info->terms = 1; + writeb(ALATCH_ENA_TERM, info->base + CUMANASCSI2_ALATCH); + } else { + info->terms = 0; + writeb(ALATCH_DIS_TERM, info->base + CUMANASCSI2_ALATCH); + } +} + +/* Prototype: void cumanascsi_2_intr(irq, *dev_id, *regs) + * Purpose : handle interrupts from Cumana SCSI 2 card + * Params : irq - interrupt number + * dev_id - user-defined (Scsi_Host structure) + */ +static irqreturn_t +cumanascsi_2_intr(int irq, void *dev_id) +{ + struct cumanascsi2_info *info = dev_id; + + return fas216_intr(&info->info); +} + +/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type) + * Purpose : initialises DMA/PIO + * Params : host - host + * SCpnt - command + * direction - DMA on to/off of card + * min_type - minimum DMA support that we must have for this transfer + * Returns : type of transfer to be performed + */ +static fasdmatype_t +cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, + fasdmadir_t direction, fasdmatype_t min_type) +{ + struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; + struct device *dev = scsi_get_device(host); + int dmach = info->info.scsi.dma; + + writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH); + + if (dmach != NO_DMA && + (min_type == fasdma_real_all || SCp->this_residual >= 512)) { + int bufs, map_dir, dma_dir, alatch_dir; + + bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); + + if (direction == DMA_OUT) { + map_dir = DMA_TO_DEVICE; + dma_dir = DMA_MODE_WRITE; + alatch_dir = ALATCH_DMA_OUT; + } else { + map_dir = DMA_FROM_DEVICE; + dma_dir = DMA_MODE_READ; + alatch_dir = ALATCH_DMA_IN; + } + + dma_map_sg(dev, info->sg, bufs, map_dir); + + disable_dma(dmach); + set_dma_sg(dmach, info->sg, bufs); + writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH); + set_dma_mode(dmach, dma_dir); + enable_dma(dmach); + writeb(ALATCH_ENA_DMA, info->base + CUMANASCSI2_ALATCH); + writeb(ALATCH_DIS_BIT32, info->base + CUMANASCSI2_ALATCH); + return fasdma_real_all; + } + + /* + * If we're not doing DMA, + * we'll do pseudo DMA + */ + return fasdma_pio; +} + +/* + * Prototype: void cumanascsi_2_dma_pseudo(host, SCpnt, direction, transfer) + * Purpose : handles pseudo DMA + * Params : host - host + * SCpnt - command + * direction - DMA on to/off of card + * transfer - minimum number of bytes we expect to transfer + */ +static void +cumanascsi_2_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, + fasdmadir_t direction, int transfer) +{ + struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; + unsigned int length; + unsigned char *addr; + + length = SCp->this_residual; + addr = SCp->ptr; + + if (direction == DMA_OUT) +#if 0 + while (length > 1) { + unsigned long word; + unsigned int status = readb(info->base + CUMANASCSI2_STATUS); + + if (status & STATUS_INT) + goto end; + + if (!(status & STATUS_DRQ)) + continue; + + word = *addr | *(addr + 1) << 8; + writew(word, info->base + CUMANASCSI2_PSEUDODMA); + addr += 2; + length -= 2; + } +#else + printk ("PSEUDO_OUT???\n"); +#endif + else { + if (transfer && (transfer & 255)) { + while (length >= 256) { + unsigned int status = readb(info->base + CUMANASCSI2_STATUS); + + if (status & STATUS_INT) + return; + + if (!(status & STATUS_DRQ)) + continue; + + readsw(info->base + CUMANASCSI2_PSEUDODMA, + addr, 256 >> 1); + addr += 256; + length -= 256; + } + } + + while (length > 0) { + unsigned long word; + unsigned int status = readb(info->base + CUMANASCSI2_STATUS); + + if (status & STATUS_INT) + return; + + if (!(status & STATUS_DRQ)) + continue; + + word = readw(info->base + CUMANASCSI2_PSEUDODMA); + *addr++ = word; + if (--length > 0) { + *addr++ = word >> 8; + length --; + } + } + } +} + +/* Prototype: int cumanascsi_2_dma_stop(host, SCpnt) + * Purpose : stops DMA/PIO + * Params : host - host + * SCpnt - command + */ +static void +cumanascsi_2_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) +{ + struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; + if (info->info.scsi.dma != NO_DMA) { + writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH); + disable_dma(info->info.scsi.dma); + } +} + +/* Prototype: const char *cumanascsi_2_info(struct Scsi_Host * host) + * Purpose : returns a descriptive string about this interface, + * Params : host - driver host structure to return info for. + * Returns : pointer to a static buffer containing null terminated string. + */ +const char *cumanascsi_2_info(struct Scsi_Host *host) +{ + struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; + static char string[150]; + + sprintf(string, "%s (%s) in slot %d v%s terminators o%s", + host->hostt->name, info->info.scsi.type, info->ec->slot_no, + VERSION, info->terms ? "n" : "ff"); + + return string; +} + +/* Prototype: int cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length) + * Purpose : Set a driver specific function + * Params : host - host to setup + * : buffer - buffer containing string describing operation + * : length - length of string + * Returns : -EINVAL, or 0 + */ +static int +cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length) +{ + int ret = length; + + if (length >= 11 && strncmp(buffer, "CUMANASCSI2", 11) == 0) { + buffer += 11; + length -= 11; + + if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { + if (buffer[5] == '1') + cumanascsi_2_terminator_ctl(host, 1); + else if (buffer[5] == '0') + cumanascsi_2_terminator_ctl(host, 0); + else + ret = -EINVAL; + } else { + ret = -EINVAL; + } + } else { + ret = -EINVAL; + } + + return ret; +} + +static int cumanascsi_2_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + struct cumanascsi2_info *info; + info = (struct cumanascsi2_info *)host->hostdata; + + seq_printf(m, "Cumana SCSI II driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + seq_printf(m, "Term : o%s\n", + info->terms ? "n" : "ff"); + + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; +} + +static const struct scsi_host_template cumanascsi2_template = { + .module = THIS_MODULE, + .show_info = cumanascsi_2_show_info, + .write_info = cumanascsi_2_set_proc_info, + .name = "Cumana SCSI II", + .info = cumanascsi_2_info, + .queuecommand = fas216_queue_command, + .eh_host_reset_handler = fas216_eh_host_reset, + .eh_bus_reset_handler = fas216_eh_bus_reset, + .eh_device_reset_handler = fas216_eh_device_reset, + .eh_abort_handler = fas216_eh_abort, + .cmd_size = sizeof(struct fas216_cmd_priv), + .can_queue = 1, + .this_id = 7, + .sg_tablesize = SG_MAX_SEGMENTS, + .dma_boundary = IOMD_DMA_BOUNDARY, + .proc_name = "cumanascsi2", +}; + +static int cumanascsi2_probe(struct expansion_card *ec, + const struct ecard_id *id) +{ + struct Scsi_Host *host; + struct cumanascsi2_info *info; + void __iomem *base; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); + if (!base) { + ret = -ENOMEM; + goto out_region; + } + + host = scsi_host_alloc(&cumanascsi2_template, + sizeof(struct cumanascsi2_info)); + if (!host) { + ret = -ENOMEM; + goto out_region; + } + + ecard_set_drvdata(ec, host); + + info = (struct cumanascsi2_info *)host->hostdata; + info->ec = ec; + info->base = base; + + cumanascsi_2_terminator_ctl(host, term[ec->slot_no]); + + info->info.scsi.io_base = base + CUMANASCSI2_FAS216_OFFSET; + info->info.scsi.io_shift = CUMANASCSI2_FAS216_SHIFT; + info->info.scsi.irq = ec->irq; + info->info.scsi.dma = ec->dma; + info->info.ifcfg.clockrate = 40; /* MHz */ + info->info.ifcfg.select_timeout = 255; + info->info.ifcfg.asyncperiod = 200; /* ns */ + info->info.ifcfg.sync_max_depth = 7; + info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK; + info->info.ifcfg.disconnect_ok = 1; + info->info.ifcfg.wide_max_size = 0; + info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; + info->info.dma.setup = cumanascsi_2_dma_setup; + info->info.dma.pseudo = cumanascsi_2_dma_pseudo; + info->info.dma.stop = cumanascsi_2_dma_stop; + + ec->irqaddr = info->base + CUMANASCSI2_STATUS; + ec->irqmask = STATUS_INT; + + ecard_setirq(ec, &cumanascsi_2_ops, info); + + ret = fas216_init(host); + if (ret) + goto out_free; + + ret = request_irq(ec->irq, cumanascsi_2_intr, + 0, "cumanascsi2", info); + if (ret) { + printk("scsi%d: IRQ%d not free: %d\n", + host->host_no, ec->irq, ret); + goto out_release; + } + + if (info->info.scsi.dma != NO_DMA) { + if (request_dma(info->info.scsi.dma, "cumanascsi2")) { + printk("scsi%d: DMA%d not free, using PIO\n", + host->host_no, info->info.scsi.dma); + info->info.scsi.dma = NO_DMA; + } else { + set_dma_speed(info->info.scsi.dma, 180); + info->info.ifcfg.capabilities |= FASCAP_DMA; + } + } + + ret = fas216_add(host, &ec->dev); + if (ret == 0) + goto out; + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); + free_irq(ec->irq, info); + + out_release: + fas216_release(host); + + out_free: + scsi_host_put(host); + + out_region: + ecard_release_resources(ec); + + out: + return ret; +} + +static void cumanascsi2_remove(struct expansion_card *ec) +{ + struct Scsi_Host *host = ecard_get_drvdata(ec); + struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; + + ecard_set_drvdata(ec, NULL); + fas216_remove(host); + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); + free_irq(ec->irq, info); + + fas216_release(host); + scsi_host_put(host); + ecard_release_resources(ec); +} + +static const struct ecard_id cumanascsi2_cids[] = { + { MANU_CUMANA, PROD_CUMANA_SCSI_2 }, + { 0xffff, 0xffff }, +}; + +static struct ecard_driver cumanascsi2_driver = { + .probe = cumanascsi2_probe, + .remove = cumanascsi2_remove, + .id_table = cumanascsi2_cids, + .drv = { + .name = "cumanascsi2", + }, +}; + +static int __init cumanascsi2_init(void) +{ + return ecard_register_driver(&cumanascsi2_driver); +} + +static void __exit cumanascsi2_exit(void) +{ + ecard_remove_driver(&cumanascsi2_driver); +} + +module_init(cumanascsi2_init); +module_exit(cumanascsi2_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("Cumana SCSI-2 driver for Acorn machines"); +module_param_array(term, int, NULL, 0); +MODULE_PARM_DESC(term, "SCSI bus termination"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c new file mode 100644 index 000000000..b3ec7635b --- /dev/null +++ b/drivers/scsi/arm/eesox.c @@ -0,0 +1,646 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/acorn/scsi/eesox.c + * + * Copyright (C) 1997-2005 Russell King + * + * This driver is based on experimentation. Hence, it may have made + * assumptions about the particular card that I have available, and + * may not be reliable! + * + * Changelog: + * 01-10-1997 RMK Created, READONLY version + * 15-02-1998 RMK READ/WRITE version + * added DMA support and hardware definitions + * 14-03-1998 RMK Updated DMA support + * Added terminator control + * 15-04-1998 RMK Only do PIO if FAS216 will allow it. + * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h + * 02-04-2000 RMK 0.0.3 Fixed NO_IRQ/NO_DMA problem, updated for new + * error handling code. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "fas216.h" +#include "arm_scsi.h" + +#include + +#define EESOX_FAS216_OFFSET 0x3000 +#define EESOX_FAS216_SHIFT 5 + +#define EESOX_DMASTAT 0x2800 +#define EESOX_STAT_INTR 0x01 +#define EESOX_STAT_DMA 0x02 + +#define EESOX_CONTROL 0x2800 +#define EESOX_INTR_ENABLE 0x04 +#define EESOX_TERM_ENABLE 0x02 +#define EESOX_RESET 0x01 + +#define EESOX_DMADATA 0x3800 + +#define VERSION "1.10 (17/01/2003 2.5.59)" + +/* + * Use term=0,1,0,0,0 to turn terminators on/off + */ +static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; + +#define NR_SG 256 + +struct eesoxscsi_info { + FAS216_Info info; + struct expansion_card *ec; + void __iomem *base; + void __iomem *ctl_port; + unsigned int control; + struct scatterlist sg[NR_SG]; /* Scatter DMA list */ +}; + +/* Prototype: void eesoxscsi_irqenable(ec, irqnr) + * Purpose : Enable interrupts on EESOX SCSI card + * Params : ec - expansion card structure + * : irqnr - interrupt number + */ +static void +eesoxscsi_irqenable(struct expansion_card *ec, int irqnr) +{ + struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; + + info->control |= EESOX_INTR_ENABLE; + + writeb(info->control, info->ctl_port); +} + +/* Prototype: void eesoxscsi_irqdisable(ec, irqnr) + * Purpose : Disable interrupts on EESOX SCSI card + * Params : ec - expansion card structure + * : irqnr - interrupt number + */ +static void +eesoxscsi_irqdisable(struct expansion_card *ec, int irqnr) +{ + struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; + + info->control &= ~EESOX_INTR_ENABLE; + + writeb(info->control, info->ctl_port); +} + +static const expansioncard_ops_t eesoxscsi_ops = { + .irqenable = eesoxscsi_irqenable, + .irqdisable = eesoxscsi_irqdisable, +}; + +/* Prototype: void eesoxscsi_terminator_ctl(*host, on_off) + * Purpose : Turn the EESOX SCSI terminators on or off + * Params : host - card to turn on/off + * : on_off - !0 to turn on, 0 to turn off + */ +static void +eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off) +{ + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + unsigned long flags; + + spin_lock_irqsave(host->host_lock, flags); + if (on_off) + info->control |= EESOX_TERM_ENABLE; + else + info->control &= ~EESOX_TERM_ENABLE; + + writeb(info->control, info->ctl_port); + spin_unlock_irqrestore(host->host_lock, flags); +} + +/* Prototype: void eesoxscsi_intr(irq, *dev_id, *regs) + * Purpose : handle interrupts from EESOX SCSI card + * Params : irq - interrupt number + * dev_id - user-defined (Scsi_Host structure) + */ +static irqreturn_t +eesoxscsi_intr(int irq, void *dev_id) +{ + struct eesoxscsi_info *info = dev_id; + + return fas216_intr(&info->info); +} + +/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) + * Purpose : initialises DMA/PIO + * Params : host - host + * SCpnt - command + * direction - DMA on to/off of card + * min_type - minimum DMA support that we must have for this transfer + * Returns : type of transfer to be performed + */ +static fasdmatype_t +eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, + fasdmadir_t direction, fasdmatype_t min_type) +{ + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + struct device *dev = scsi_get_device(host); + int dmach = info->info.scsi.dma; + + if (dmach != NO_DMA && + (min_type == fasdma_real_all || SCp->this_residual >= 512)) { + int bufs, map_dir, dma_dir; + + bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); + + if (direction == DMA_OUT) { + map_dir = DMA_TO_DEVICE; + dma_dir = DMA_MODE_WRITE; + } else { + map_dir = DMA_FROM_DEVICE; + dma_dir = DMA_MODE_READ; + } + + dma_map_sg(dev, info->sg, bufs, map_dir); + + disable_dma(dmach); + set_dma_sg(dmach, info->sg, bufs); + set_dma_mode(dmach, dma_dir); + enable_dma(dmach); + return fasdma_real_all; + } + /* + * We don't do DMA, we only do slow PIO + * + * Some day, we will do Pseudo DMA + */ + return fasdma_pseudo; +} + +static void eesoxscsi_buffer_in(void *buf, int length, void __iomem *base) +{ + const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; + const void __iomem *reg_dmastat = base + EESOX_DMASTAT; + const void __iomem *reg_dmadata = base + EESOX_DMADATA; + register const unsigned long mask = 0xffff; + + do { + unsigned int status; + + /* + * Interrupt request? + */ + status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); + if (status & STAT_INT) + break; + + /* + * DMA request active? + */ + status = readb(reg_dmastat); + if (!(status & EESOX_STAT_DMA)) + continue; + + /* + * Get number of bytes in FIFO + */ + status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; + if (status > 16) + status = 16; + if (status > length) + status = length; + + /* + * Align buffer. + */ + if (((u32)buf) & 2 && status >= 2) { + *(u16 *)buf = readl(reg_dmadata); + buf += 2; + status -= 2; + length -= 2; + } + + if (status >= 8) { + unsigned long l1, l2; + + l1 = readl(reg_dmadata) & mask; + l1 |= readl(reg_dmadata) << 16; + l2 = readl(reg_dmadata) & mask; + l2 |= readl(reg_dmadata) << 16; + *(u32 *)buf = l1; + buf += 4; + *(u32 *)buf = l2; + buf += 4; + length -= 8; + continue; + } + + if (status >= 4) { + unsigned long l1; + + l1 = readl(reg_dmadata) & mask; + l1 |= readl(reg_dmadata) << 16; + + *(u32 *)buf = l1; + buf += 4; + length -= 4; + continue; + } + + if (status >= 2) { + *(u16 *)buf = readl(reg_dmadata); + buf += 2; + length -= 2; + } + } while (length); +} + +static void eesoxscsi_buffer_out(void *buf, int length, void __iomem *base) +{ + const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; + const void __iomem *reg_dmastat = base + EESOX_DMASTAT; + void __iomem *reg_dmadata = base + EESOX_DMADATA; + + do { + unsigned int status; + + /* + * Interrupt request? + */ + status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); + if (status & STAT_INT) + break; + + /* + * DMA request active? + */ + status = readb(reg_dmastat); + if (!(status & EESOX_STAT_DMA)) + continue; + + /* + * Get number of bytes in FIFO + */ + status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; + if (status > 16) + status = 16; + status = 16 - status; + if (status > length) + status = length; + status &= ~1; + + /* + * Align buffer. + */ + if (((u32)buf) & 2 && status >= 2) { + writel(*(u16 *)buf << 16, reg_dmadata); + buf += 2; + status -= 2; + length -= 2; + } + + if (status >= 8) { + unsigned long l1, l2; + + l1 = *(u32 *)buf; + buf += 4; + l2 = *(u32 *)buf; + buf += 4; + + writel(l1 << 16, reg_dmadata); + writel(l1, reg_dmadata); + writel(l2 << 16, reg_dmadata); + writel(l2, reg_dmadata); + length -= 8; + continue; + } + + if (status >= 4) { + unsigned long l1; + + l1 = *(u32 *)buf; + buf += 4; + + writel(l1 << 16, reg_dmadata); + writel(l1, reg_dmadata); + length -= 4; + continue; + } + + if (status >= 2) { + writel(*(u16 *)buf << 16, reg_dmadata); + buf += 2; + length -= 2; + } + } while (length); +} + +static void +eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, + fasdmadir_t dir, int transfer_size) +{ + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + if (dir == DMA_IN) { + eesoxscsi_buffer_in(SCp->ptr, SCp->this_residual, info->base); + } else { + eesoxscsi_buffer_out(SCp->ptr, SCp->this_residual, info->base); + } +} + +/* Prototype: int eesoxscsi_dma_stop(host, SCpnt) + * Purpose : stops DMA/PIO + * Params : host - host + * SCpnt - command + */ +static void +eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) +{ + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + if (info->info.scsi.dma != NO_DMA) + disable_dma(info->info.scsi.dma); +} + +/* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host) + * Purpose : returns a descriptive string about this interface, + * Params : host - driver host structure to return info for. + * Returns : pointer to a static buffer containing null terminated string. + */ +const char *eesoxscsi_info(struct Scsi_Host *host) +{ + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + static char string[150]; + + sprintf(string, "%s (%s) in slot %d v%s terminators o%s", + host->hostt->name, info->info.scsi.type, info->ec->slot_no, + VERSION, info->control & EESOX_TERM_ENABLE ? "n" : "ff"); + + return string; +} + +/* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) + * Purpose : Set a driver specific function + * Params : host - host to setup + * : buffer - buffer containing string describing operation + * : length - length of string + * Returns : -EINVAL, or 0 + */ +static int +eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) +{ + int ret = length; + + if (length >= 9 && strncmp(buffer, "EESOXSCSI", 9) == 0) { + buffer += 9; + length -= 9; + + if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { + if (buffer[5] == '1') + eesoxscsi_terminator_ctl(host, 1); + else if (buffer[5] == '0') + eesoxscsi_terminator_ctl(host, 0); + else + ret = -EINVAL; + } else + ret = -EINVAL; + } else + ret = -EINVAL; + + return ret; +} + +static int eesoxscsi_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + struct eesoxscsi_info *info; + + info = (struct eesoxscsi_info *)host->hostdata; + + seq_printf(m, "EESOX SCSI driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + seq_printf(m, "Term : o%s\n", + info->control & EESOX_TERM_ENABLE ? "n" : "ff"); + + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; +} + +static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct expansion_card *ec = ECARD_DEV(dev); + struct Scsi_Host *host = ecard_get_drvdata(ec); + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + + return sprintf(buf, "%d\n", info->control & EESOX_TERM_ENABLE ? 1 : 0); +} + +static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) +{ + struct expansion_card *ec = ECARD_DEV(dev); + struct Scsi_Host *host = ecard_get_drvdata(ec); + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + unsigned long flags; + + if (len > 1) { + spin_lock_irqsave(host->host_lock, flags); + if (buf[0] != '0') { + info->control |= EESOX_TERM_ENABLE; + } else { + info->control &= ~EESOX_TERM_ENABLE; + } + writeb(info->control, info->ctl_port); + spin_unlock_irqrestore(host->host_lock, flags); + } + + return len; +} + +static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, + eesoxscsi_show_term, eesoxscsi_store_term); + +static const struct scsi_host_template eesox_template = { + .module = THIS_MODULE, + .show_info = eesoxscsi_show_info, + .write_info = eesoxscsi_set_proc_info, + .name = "EESOX SCSI", + .info = eesoxscsi_info, + .queuecommand = fas216_queue_command, + .eh_host_reset_handler = fas216_eh_host_reset, + .eh_bus_reset_handler = fas216_eh_bus_reset, + .eh_device_reset_handler = fas216_eh_device_reset, + .eh_abort_handler = fas216_eh_abort, + .cmd_size = sizeof(struct fas216_cmd_priv), + .can_queue = 1, + .this_id = 7, + .sg_tablesize = SG_MAX_SEGMENTS, + .dma_boundary = IOMD_DMA_BOUNDARY, + .proc_name = "eesox", +}; + +static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + struct Scsi_Host *host; + struct eesoxscsi_info *info; + void __iomem *base; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); + if (!base) { + ret = -ENOMEM; + goto out_region; + } + + host = scsi_host_alloc(&eesox_template, + sizeof(struct eesoxscsi_info)); + if (!host) { + ret = -ENOMEM; + goto out_region; + } + + ecard_set_drvdata(ec, host); + + info = (struct eesoxscsi_info *)host->hostdata; + info->ec = ec; + info->base = base; + info->ctl_port = base + EESOX_CONTROL; + info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0; + writeb(info->control, info->ctl_port); + + info->info.scsi.io_base = base + EESOX_FAS216_OFFSET; + info->info.scsi.io_shift = EESOX_FAS216_SHIFT; + info->info.scsi.irq = ec->irq; + info->info.scsi.dma = ec->dma; + info->info.ifcfg.clockrate = 40; /* MHz */ + info->info.ifcfg.select_timeout = 255; + info->info.ifcfg.asyncperiod = 200; /* ns */ + info->info.ifcfg.sync_max_depth = 7; + info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; + info->info.ifcfg.disconnect_ok = 1; + info->info.ifcfg.wide_max_size = 0; + info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; + info->info.dma.setup = eesoxscsi_dma_setup; + info->info.dma.pseudo = eesoxscsi_dma_pseudo; + info->info.dma.stop = eesoxscsi_dma_stop; + + ec->irqaddr = base + EESOX_DMASTAT; + ec->irqmask = EESOX_STAT_INTR; + + ecard_setirq(ec, &eesoxscsi_ops, info); + + device_create_file(&ec->dev, &dev_attr_bus_term); + + ret = fas216_init(host); + if (ret) + goto out_free; + + ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info); + if (ret) { + printk("scsi%d: IRQ%d not free: %d\n", + host->host_no, ec->irq, ret); + goto out_remove; + } + + if (info->info.scsi.dma != NO_DMA) { + if (request_dma(info->info.scsi.dma, "eesox")) { + printk("scsi%d: DMA%d not free, DMA disabled\n", + host->host_no, info->info.scsi.dma); + info->info.scsi.dma = NO_DMA; + } else { + set_dma_speed(info->info.scsi.dma, 180); + info->info.ifcfg.capabilities |= FASCAP_DMA; + info->info.ifcfg.cntl3 |= CNTL3_BS8; + } + } + + ret = fas216_add(host, &ec->dev); + if (ret == 0) + goto out; + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); + free_irq(ec->irq, info); + + out_remove: + fas216_remove(host); + + out_free: + device_remove_file(&ec->dev, &dev_attr_bus_term); + scsi_host_put(host); + + out_region: + ecard_release_resources(ec); + + out: + return ret; +} + +static void eesoxscsi_remove(struct expansion_card *ec) +{ + struct Scsi_Host *host = ecard_get_drvdata(ec); + struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; + + ecard_set_drvdata(ec, NULL); + fas216_remove(host); + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); + free_irq(ec->irq, info); + + device_remove_file(&ec->dev, &dev_attr_bus_term); + + fas216_release(host); + scsi_host_put(host); + ecard_release_resources(ec); +} + +static const struct ecard_id eesoxscsi_cids[] = { + { MANU_EESOX, PROD_EESOX_SCSI2 }, + { 0xffff, 0xffff }, +}; + +static struct ecard_driver eesoxscsi_driver = { + .probe = eesoxscsi_probe, + .remove = eesoxscsi_remove, + .id_table = eesoxscsi_cids, + .drv = { + .name = "eesoxscsi", + }, +}; + +static int __init eesox_init(void) +{ + return ecard_register_driver(&eesoxscsi_driver); +} + +static void __exit eesox_exit(void) +{ + ecard_remove_driver(&eesoxscsi_driver); +} + +module_init(eesox_init); +module_exit(eesox_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("EESOX 'Fast' SCSI driver for Acorn machines"); +module_param_array(term, int, NULL, 0); +MODULE_PARM_DESC(term, "SCSI bus termination"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c new file mode 100644 index 000000000..4ce0b2d73 --- /dev/null +++ b/drivers/scsi/arm/fas216.c @@ -0,0 +1,3038 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/acorn/scsi/fas216.c + * + * Copyright (C) 1997-2003 Russell King + * + * Based on information in qlogicfas.c by Tom Zerucha, Michael Griffith, and + * other sources, including: + * the AMD Am53CF94 data sheet + * the AMD Am53C94 data sheet + * + * This is a generic driver. To use it, have a look at cumana_2.c. You + * should define your own structure that overlays FAS216_Info, eg: + * struct my_host_data { + * FAS216_Info info; + * ... my host specific data ... + * }; + * + * Changelog: + * 30-08-1997 RMK Created + * 14-09-1997 RMK Started disconnect support + * 08-02-1998 RMK Corrected real DMA support + * 15-02-1998 RMK Started sync xfer support + * 06-04-1998 RMK Tightened conditions for printing incomplete + * transfers + * 02-05-1998 RMK Added extra checks in fas216_reset + * 24-05-1998 RMK Fixed synchronous transfers with period >= 200ns + * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h + * 26-08-1998 RMK Improved message support wrt MESSAGE_REJECT + * 02-04-2000 RMK Converted to use the new error handling, and + * automatically request sense data upon check + * condition status from targets. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "fas216.h" +#include "arm_scsi.h" + +/* NOTE: SCSI2 Synchronous transfers *require* DMA according to + * the data sheet. This restriction is crazy, especially when + * you only want to send 16 bytes! What were the guys who + * designed this chip on at that time? Did they read the SCSI2 + * spec at all? The following sections are taken from the SCSI2 + * standard (s2r10) concerning this: + * + * > IMPLEMENTORS NOTES: + * > (1) Re-negotiation at every selection is not recommended, since a + * > significant performance impact is likely. + * + * > The implied synchronous agreement shall remain in effect until a BUS DEVICE + * > RESET message is received, until a hard reset condition occurs, or until one + * > of the two SCSI devices elects to modify the agreement. The default data + * > transfer mode is asynchronous data transfer mode. The default data transfer + * > mode is entered at power on, after a BUS DEVICE RESET message, or after a hard + * > reset condition. + * + * In total, this means that once you have elected to use synchronous + * transfers, you must always use DMA. + * + * I was thinking that this was a good chip until I found this restriction ;( + */ +#define SCSI2_SYNC + +#undef DEBUG_CONNECT +#undef DEBUG_MESSAGES + +#undef CHECK_STRUCTURE + +#define LOG_CONNECT (1 << 0) +#define LOG_BUSSERVICE (1 << 1) +#define LOG_FUNCTIONDONE (1 << 2) +#define LOG_MESSAGES (1 << 3) +#define LOG_BUFFER (1 << 4) +#define LOG_ERROR (1 << 8) + +static int level_mask = LOG_ERROR; + +module_param(level_mask, int, 0644); + +#ifndef MODULE +static int __init fas216_log_setup(char *str) +{ + char *s; + + level_mask = 0; + + while ((s = strsep(&str, ",")) != NULL) { + switch (s[0]) { + case 'a': + if (strcmp(s, "all") == 0) + level_mask |= -1; + break; + case 'b': + if (strncmp(s, "bus", 3) == 0) + level_mask |= LOG_BUSSERVICE; + if (strncmp(s, "buf", 3) == 0) + level_mask |= LOG_BUFFER; + break; + case 'c': + level_mask |= LOG_CONNECT; + break; + case 'e': + level_mask |= LOG_ERROR; + break; + case 'm': + level_mask |= LOG_MESSAGES; + break; + case 'n': + if (strcmp(s, "none") == 0) + level_mask = 0; + break; + case 's': + level_mask |= LOG_FUNCTIONDONE; + break; + } + } + return 1; +} + +__setup("fas216_logging=", fas216_log_setup); +#endif + +static inline unsigned char fas216_readb(FAS216_Info *info, unsigned int reg) +{ + unsigned int off = reg << info->scsi.io_shift; + return readb(info->scsi.io_base + off); +} + +static inline void fas216_writeb(FAS216_Info *info, unsigned int reg, unsigned int val) +{ + unsigned int off = reg << info->scsi.io_shift; + writeb(val, info->scsi.io_base + off); +} + +static void fas216_dumpstate(FAS216_Info *info) +{ + unsigned char is, stat, inst; + + is = fas216_readb(info, REG_IS); + stat = fas216_readb(info, REG_STAT); + inst = fas216_readb(info, REG_INST); + + printk("FAS216: CTCL=%02X CTCM=%02X CMD=%02X STAT=%02X" + " INST=%02X IS=%02X CFIS=%02X", + fas216_readb(info, REG_CTCL), + fas216_readb(info, REG_CTCM), + fas216_readb(info, REG_CMD), stat, inst, is, + fas216_readb(info, REG_CFIS)); + printk(" CNTL1=%02X CNTL2=%02X CNTL3=%02X CTCH=%02X\n", + fas216_readb(info, REG_CNTL1), + fas216_readb(info, REG_CNTL2), + fas216_readb(info, REG_CNTL3), + fas216_readb(info, REG_CTCH)); +} + +static void print_SCp(struct scsi_pointer *SCp, const char *prefix, const char *suffix) +{ + printk("%sptr %p this_residual 0x%x buffer %p buffers_residual 0x%x%s", + prefix, SCp->ptr, SCp->this_residual, SCp->buffer, + SCp->buffers_residual, suffix); +} + +#ifdef CHECK_STRUCTURE +static void fas216_dumpinfo(FAS216_Info *info) +{ + static int used = 0; + int i; + + if (used++) + return; + + printk("FAS216_Info=\n"); + printk(" { magic_start=%lX host=%p SCpnt=%p origSCpnt=%p\n", + info->magic_start, info->host, info->SCpnt, + info->origSCpnt); + printk(" scsi={ io_shift=%X irq=%X cfg={ %X %X %X %X }\n", + info->scsi.io_shift, info->scsi.irq, + info->scsi.cfg[0], info->scsi.cfg[1], info->scsi.cfg[2], + info->scsi.cfg[3]); + printk(" type=%p phase=%X\n", + info->scsi.type, info->scsi.phase); + print_SCp(&info->scsi.SCp, " SCp={ ", " }\n"); + printk(" msgs async_stp=%X disconnectable=%d aborting=%d }\n", + info->scsi.async_stp, + info->scsi.disconnectable, info->scsi.aborting); + printk(" stats={ queues=%X removes=%X fins=%X reads=%X writes=%X miscs=%X\n" + " disconnects=%X aborts=%X bus_resets=%X host_resets=%X}\n", + info->stats.queues, info->stats.removes, info->stats.fins, + info->stats.reads, info->stats.writes, info->stats.miscs, + info->stats.disconnects, info->stats.aborts, info->stats.bus_resets, + info->stats.host_resets); + printk(" ifcfg={ clockrate=%X select_timeout=%X asyncperiod=%X sync_max_depth=%X }\n", + info->ifcfg.clockrate, info->ifcfg.select_timeout, + info->ifcfg.asyncperiod, info->ifcfg.sync_max_depth); + for (i = 0; i < 8; i++) { + printk(" busyluns[%d]=%08lx dev[%d]={ disconnect_ok=%d stp=%X sof=%X sync_state=%X }\n", + i, info->busyluns[i], i, + info->device[i].disconnect_ok, info->device[i].stp, + info->device[i].sof, info->device[i].sync_state); + } + printk(" dma={ transfer_type=%X setup=%p pseudo=%p stop=%p }\n", + info->dma.transfer_type, info->dma.setup, + info->dma.pseudo, info->dma.stop); + printk(" internal_done=%X magic_end=%lX }\n", + info->internal_done, info->magic_end); +} + +static void __fas216_checkmagic(FAS216_Info *info, const char *func) +{ + int corruption = 0; + if (info->magic_start != MAGIC) { + printk(KERN_CRIT "FAS216 Error: magic at start corrupted\n"); + corruption++; + } + if (info->magic_end != MAGIC) { + printk(KERN_CRIT "FAS216 Error: magic at end corrupted\n"); + corruption++; + } + if (corruption) { + fas216_dumpinfo(info); + panic("scsi memory space corrupted in %s", func); + } +} +#define fas216_checkmagic(info) __fas216_checkmagic((info), __func__) +#else +#define fas216_checkmagic(info) +#endif + +static const char *fas216_bus_phase(int stat) +{ + static const char *phases[] = { + "DATA OUT", "DATA IN", + "COMMAND", "STATUS", + "MISC OUT", "MISC IN", + "MESG OUT", "MESG IN" + }; + + return phases[stat & STAT_BUSMASK]; +} + +static const char *fas216_drv_phase(FAS216_Info *info) +{ + static const char *phases[] = { + [PHASE_IDLE] = "idle", + [PHASE_SELECTION] = "selection", + [PHASE_COMMAND] = "command", + [PHASE_DATAOUT] = "data out", + [PHASE_DATAIN] = "data in", + [PHASE_MSGIN] = "message in", + [PHASE_MSGIN_DISCONNECT]= "disconnect", + [PHASE_MSGOUT_EXPECT] = "expect message out", + [PHASE_MSGOUT] = "message out", + [PHASE_STATUS] = "status", + [PHASE_DONE] = "done", + }; + + if (info->scsi.phase < ARRAY_SIZE(phases) && + phases[info->scsi.phase]) + return phases[info->scsi.phase]; + return "???"; +} + +static char fas216_target(FAS216_Info *info) +{ + if (info->SCpnt) + return '0' + info->SCpnt->device->id; + else + return 'H'; +} + +static void +fas216_do_log(FAS216_Info *info, char target, char *fmt, va_list ap) +{ + static char buf[1024]; + + vsnprintf(buf, sizeof(buf), fmt, ap); + printk("scsi%d.%c: %s", info->host->host_no, target, buf); +} + +static void fas216_log_command(FAS216_Info *info, int level, + struct scsi_cmnd *SCpnt, char *fmt, ...) +{ + va_list args; + + if (level != 0 && !(level & level_mask)) + return; + + va_start(args, fmt); + fas216_do_log(info, '0' + SCpnt->device->id, fmt, args); + va_end(args); + + scsi_print_command(SCpnt); +} + +static void +fas216_log_target(FAS216_Info *info, int level, int target, char *fmt, ...) +{ + va_list args; + + if (level != 0 && !(level & level_mask)) + return; + + if (target < 0) + target = 'H'; + else + target += '0'; + + va_start(args, fmt); + fas216_do_log(info, target, fmt, args); + va_end(args); + + printk("\n"); +} + +static void fas216_log(FAS216_Info *info, int level, char *fmt, ...) +{ + va_list args; + + if (level != 0 && !(level & level_mask)) + return; + + va_start(args, fmt); + fas216_do_log(info, fas216_target(info), fmt, args); + va_end(args); + + printk("\n"); +} + +#define PH_SIZE 32 + +static struct { int stat, ssr, isr, ph; } ph_list[PH_SIZE]; +static int ph_ptr; + +static void add_debug_list(int stat, int ssr, int isr, int ph) +{ + ph_list[ph_ptr].stat = stat; + ph_list[ph_ptr].ssr = ssr; + ph_list[ph_ptr].isr = isr; + ph_list[ph_ptr].ph = ph; + + ph_ptr = (ph_ptr + 1) & (PH_SIZE-1); +} + +static struct { int command; void *from; } cmd_list[8]; +static int cmd_ptr; + +static void fas216_cmd(FAS216_Info *info, unsigned int command) +{ + cmd_list[cmd_ptr].command = command; + cmd_list[cmd_ptr].from = __builtin_return_address(0); + + cmd_ptr = (cmd_ptr + 1) & 7; + + fas216_writeb(info, REG_CMD, command); +} + +static void print_debug_list(void) +{ + int i; + + i = ph_ptr; + + printk(KERN_ERR "SCSI IRQ trail\n"); + do { + printk(" %02x:%02x:%02x:%1x", + ph_list[i].stat, ph_list[i].ssr, + ph_list[i].isr, ph_list[i].ph); + i = (i + 1) & (PH_SIZE - 1); + if (((i ^ ph_ptr) & 7) == 0) + printk("\n"); + } while (i != ph_ptr); + if ((i ^ ph_ptr) & 7) + printk("\n"); + + i = cmd_ptr; + printk(KERN_ERR "FAS216 commands: "); + do { + printk("%02x:%p ", cmd_list[i].command, cmd_list[i].from); + i = (i + 1) & 7; + } while (i != cmd_ptr); + printk("\n"); +} + +static void fas216_done(FAS216_Info *info, unsigned int result); + +/** + * fas216_get_last_msg - retrive last message from the list + * @info: interface to search + * @pos: current fifo position + * + * Retrieve a last message from the list, using position in fifo. + */ +static inline unsigned short +fas216_get_last_msg(FAS216_Info *info, int pos) +{ + unsigned short packed_msg = NOP; + struct message *msg; + int msgnr = 0; + + while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { + if (pos >= msg->fifo) + break; + } + + if (msg) { + if (msg->msg[0] == EXTENDED_MESSAGE) + packed_msg = EXTENDED_MESSAGE | msg->msg[2] << 8; + else + packed_msg = msg->msg[0]; + } + + fas216_log(info, LOG_MESSAGES, + "Message: %04x found at position %02x\n", packed_msg, pos); + + return packed_msg; +} + +/** + * fas216_syncperiod - calculate STP register value + * @info: state structure for interface connected to device + * @ns: period in ns (between subsequent bytes) + * + * Calculate value to be loaded into the STP register for a given period + * in ns. Returns a value suitable for REG_STP. + */ +static int fas216_syncperiod(FAS216_Info *info, int ns) +{ + int value = (info->ifcfg.clockrate * ns) / 1000; + + fas216_checkmagic(info); + + if (value < 4) + value = 4; + else if (value > 35) + value = 35; + + return value & 31; +} + +/** + * fas216_set_sync - setup FAS216 chip for specified transfer period. + * @info: state structure for interface connected to device + * @target: target + * + * Correctly setup FAS216 chip for specified transfer period. + * Notes : we need to switch the chip out of FASTSCSI mode if we have + * a transfer period >= 200ns - otherwise the chip will violate + * the SCSI timings. + */ +static void fas216_set_sync(FAS216_Info *info, int target) +{ + unsigned int cntl3; + + fas216_writeb(info, REG_SOF, info->device[target].sof); + fas216_writeb(info, REG_STP, info->device[target].stp); + + cntl3 = info->scsi.cfg[2]; + if (info->device[target].period >= (200 / 4)) + cntl3 = cntl3 & ~CNTL3_FASTSCSI; + + fas216_writeb(info, REG_CNTL3, cntl3); +} + +/* Synchronous transfer support + * + * Note: The SCSI II r10 spec says (5.6.12): + * + * (2) Due to historical problems with early host adapters that could + * not accept an SDTR message, some targets may not initiate synchronous + * negotiation after a power cycle as required by this standard. Host + * adapters that support synchronous mode may avoid the ensuing failure + * modes when the target is independently power cycled by initiating a + * synchronous negotiation on each REQUEST SENSE and INQUIRY command. + * This approach increases the SCSI bus overhead and is not recommended + * for new implementations. The correct method is to respond to an + * SDTR message with a MESSAGE REJECT message if the either the + * initiator or target devices does not support synchronous transfers + * or does not want to negotiate for synchronous transfers at the time. + * Using the correct method assures compatibility with wide data + * transfers and future enhancements. + * + * We will always initiate a synchronous transfer negotiation request on + * every INQUIRY or REQUEST SENSE message, unless the target itself has + * at some point performed a synchronous transfer negotiation request, or + * we have synchronous transfers disabled for this device. + */ + +/** + * fas216_handlesync - Handle a synchronous transfer message + * @info: state structure for interface + * @msg: message from target + * + * Handle a synchronous transfer message from the target + */ +static void fas216_handlesync(FAS216_Info *info, char *msg) +{ + struct fas216_device *dev = &info->device[info->SCpnt->device->id]; + enum { sync, async, none, reject } res = none; + +#ifdef SCSI2_SYNC + switch (msg[0]) { + case MESSAGE_REJECT: + /* Synchronous transfer request failed. + * Note: SCSI II r10: + * + * SCSI devices that are capable of synchronous + * data transfers shall not respond to an SDTR + * message with a MESSAGE REJECT message. + * + * Hence, if we get this condition, we disable + * negotiation for this device. + */ + if (dev->sync_state == neg_inprogress) { + dev->sync_state = neg_invalid; + res = async; + } + break; + + case EXTENDED_MESSAGE: + switch (dev->sync_state) { + /* We don't accept synchronous transfer requests. + * Respond with a MESSAGE_REJECT to prevent a + * synchronous transfer agreement from being reached. + */ + case neg_invalid: + res = reject; + break; + + /* We were not negotiating a synchronous transfer, + * but the device sent us a negotiation request. + * Honour the request by sending back a SDTR + * message containing our capability, limited by + * the targets capability. + */ + default: + fas216_cmd(info, CMD_SETATN); + if (msg[4] > info->ifcfg.sync_max_depth) + msg[4] = info->ifcfg.sync_max_depth; + if (msg[3] < 1000 / info->ifcfg.clockrate) + msg[3] = 1000 / info->ifcfg.clockrate; + + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 5, + EXTENDED_MESSAGE, 3, EXTENDED_SDTR, + msg[3], msg[4]); + info->scsi.phase = PHASE_MSGOUT_EXPECT; + + /* This is wrong. The agreement is not in effect + * until this message is accepted by the device + */ + dev->sync_state = neg_targcomplete; + res = sync; + break; + + /* We initiated the synchronous transfer negotiation, + * and have successfully received a response from the + * target. The synchronous transfer agreement has been + * reached. Note: if the values returned are out of our + * bounds, we must reject the message. + */ + case neg_inprogress: + res = reject; + if (msg[4] <= info->ifcfg.sync_max_depth && + msg[3] >= 1000 / info->ifcfg.clockrate) { + dev->sync_state = neg_complete; + res = sync; + } + break; + } + } +#else + res = reject; +#endif + + switch (res) { + case sync: + dev->period = msg[3]; + dev->sof = msg[4]; + dev->stp = fas216_syncperiod(info, msg[3] * 4); + fas216_set_sync(info, info->SCpnt->device->id); + break; + + case reject: + fas216_cmd(info, CMD_SETATN); + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); + info->scsi.phase = PHASE_MSGOUT_EXPECT; + fallthrough; + + case async: + dev->period = info->ifcfg.asyncperiod / 4; + dev->sof = 0; + dev->stp = info->scsi.async_stp; + fas216_set_sync(info, info->SCpnt->device->id); + break; + + case none: + break; + } +} + +/** + * fas216_updateptrs - update data pointers after transfer suspended/paused + * @info: interface's local pointer to update + * @bytes_transferred: number of bytes transferred + * + * Update data pointers after transfer suspended/paused + */ +static void fas216_updateptrs(FAS216_Info *info, int bytes_transferred) +{ + struct scsi_pointer *SCp = &info->scsi.SCp; + + fas216_checkmagic(info); + + BUG_ON(bytes_transferred < 0); + + SCp->phase -= bytes_transferred; + + while (bytes_transferred != 0) { + if (SCp->this_residual > bytes_transferred) + break; + /* + * We have used up this buffer. Move on to the + * next buffer. + */ + bytes_transferred -= SCp->this_residual; + if (!next_SCp(SCp) && bytes_transferred) { + printk(KERN_WARNING "scsi%d.%c: out of buffers\n", + info->host->host_no, '0' + info->SCpnt->device->id); + return; + } + } + + SCp->this_residual -= bytes_transferred; + if (SCp->this_residual) + SCp->ptr += bytes_transferred; + else + SCp->ptr = NULL; +} + +/** + * fas216_pio - transfer data off of/on to card using programmed IO + * @info: interface to transfer data to/from + * @direction: direction to transfer data (DMA_OUT/DMA_IN) + * + * Transfer data off of/on to card using programmed IO. + * Notes: this is incredibly slow. + */ +static void fas216_pio(FAS216_Info *info, fasdmadir_t direction) +{ + struct scsi_pointer *SCp = &info->scsi.SCp; + + fas216_checkmagic(info); + + if (direction == DMA_OUT) + fas216_writeb(info, REG_FF, get_next_SCp_byte(SCp)); + else + put_next_SCp_byte(SCp, fas216_readb(info, REG_FF)); + + if (SCp->this_residual == 0) + next_SCp(SCp); +} + +static void fas216_set_stc(FAS216_Info *info, unsigned int length) +{ + fas216_writeb(info, REG_STCL, length); + fas216_writeb(info, REG_STCM, length >> 8); + fas216_writeb(info, REG_STCH, length >> 16); +} + +static unsigned int fas216_get_ctc(FAS216_Info *info) +{ + return fas216_readb(info, REG_CTCL) + + (fas216_readb(info, REG_CTCM) << 8) + + (fas216_readb(info, REG_CTCH) << 16); +} + +/** + * fas216_cleanuptransfer - clean up after a transfer has completed. + * @info: interface to clean up + * + * Update the data pointers according to the number of bytes transferred + * on the SCSI bus. + */ +static void fas216_cleanuptransfer(FAS216_Info *info) +{ + unsigned long total, residual, fifo; + fasdmatype_t dmatype = info->dma.transfer_type; + + info->dma.transfer_type = fasdma_none; + + /* + * PIO transfers do not need to be cleaned up. + */ + if (dmatype == fasdma_pio || dmatype == fasdma_none) + return; + + if (dmatype == fasdma_real_all) + total = info->scsi.SCp.phase; + else + total = info->scsi.SCp.this_residual; + + residual = fas216_get_ctc(info); + + fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; + + fas216_log(info, LOG_BUFFER, "cleaning up from previous " + "transfer: length 0x%06x, residual 0x%x, fifo %d", + total, residual, fifo); + + /* + * If we were performing Data-Out, the transfer counter + * counts down each time a byte is transferred by the + * host to the FIFO. This means we must include the + * bytes left in the FIFO from the transfer counter. + */ + if (info->scsi.phase == PHASE_DATAOUT) + residual += fifo; + + fas216_updateptrs(info, total - residual); +} + +/** + * fas216_transfer - Perform a DMA/PIO transfer off of/on to card + * @info: interface from which device disconnected from + * + * Start a DMA/PIO transfer off of/on to card + */ +static void fas216_transfer(FAS216_Info *info) +{ + fasdmadir_t direction; + fasdmatype_t dmatype; + + fas216_log(info, LOG_BUFFER, + "starttransfer: buffer %p length 0x%06x reqlen 0x%06x", + info->scsi.SCp.ptr, info->scsi.SCp.this_residual, + info->scsi.SCp.phase); + + if (!info->scsi.SCp.ptr) { + fas216_log(info, LOG_ERROR, "null buffer passed to " + "fas216_starttransfer"); + print_SCp(&info->scsi.SCp, "SCp: ", "\n"); + print_SCp(arm_scsi_pointer(info->SCpnt), "Cmnd SCp: ", "\n"); + return; + } + + /* + * If we have a synchronous transfer agreement in effect, we must + * use DMA mode. If we are using asynchronous transfers, we may + * use DMA mode or PIO mode. + */ + if (info->device[info->SCpnt->device->id].sof) + dmatype = fasdma_real_all; + else + dmatype = fasdma_pio; + + if (info->scsi.phase == PHASE_DATAOUT) + direction = DMA_OUT; + else + direction = DMA_IN; + + if (info->dma.setup) + dmatype = info->dma.setup(info->host, &info->scsi.SCp, + direction, dmatype); + info->dma.transfer_type = dmatype; + + if (dmatype == fasdma_real_all) + fas216_set_stc(info, info->scsi.SCp.phase); + else + fas216_set_stc(info, info->scsi.SCp.this_residual); + + switch (dmatype) { + case fasdma_pio: + fas216_log(info, LOG_BUFFER, "PIO transfer"); + fas216_writeb(info, REG_SOF, 0); + fas216_writeb(info, REG_STP, info->scsi.async_stp); + fas216_cmd(info, CMD_TRANSFERINFO); + fas216_pio(info, direction); + break; + + case fasdma_pseudo: + fas216_log(info, LOG_BUFFER, "pseudo transfer"); + fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA); + info->dma.pseudo(info->host, &info->scsi.SCp, + direction, info->SCpnt->transfersize); + break; + + case fasdma_real_block: + fas216_log(info, LOG_BUFFER, "block dma transfer"); + fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA); + break; + + case fasdma_real_all: + fas216_log(info, LOG_BUFFER, "total dma transfer"); + fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA); + break; + + default: + fas216_log(info, LOG_BUFFER | LOG_ERROR, + "invalid FAS216 DMA type"); + break; + } +} + +/** + * fas216_stoptransfer - Stop a DMA transfer onto / off of the card + * @info: interface from which device disconnected from + * + * Called when we switch away from DATA IN or DATA OUT phases. + */ +static void fas216_stoptransfer(FAS216_Info *info) +{ + fas216_checkmagic(info); + + if (info->dma.transfer_type == fasdma_real_all || + info->dma.transfer_type == fasdma_real_block) + info->dma.stop(info->host, &info->scsi.SCp); + + fas216_cleanuptransfer(info); + + if (info->scsi.phase == PHASE_DATAIN) { + unsigned int fifo; + + /* + * If we were performing Data-In, then the FIFO counter + * contains the number of bytes not transferred via DMA + * from the on-board FIFO. Read them manually. + */ + fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; + while (fifo && info->scsi.SCp.ptr) { + *info->scsi.SCp.ptr = fas216_readb(info, REG_FF); + fas216_updateptrs(info, 1); + fifo--; + } + } else { + /* + * After a Data-Out phase, there may be unsent + * bytes left in the FIFO. Flush them out. + */ + fas216_cmd(info, CMD_FLUSHFIFO); + } +} + +static void fas216_aborttransfer(FAS216_Info *info) +{ + fas216_checkmagic(info); + + if (info->dma.transfer_type == fasdma_real_all || + info->dma.transfer_type == fasdma_real_block) + info->dma.stop(info->host, &info->scsi.SCp); + + info->dma.transfer_type = fasdma_none; + fas216_cmd(info, CMD_FLUSHFIFO); +} + +static void fas216_kick(FAS216_Info *info); + +/** + * fas216_disconnected_intr - handle device disconnection + * @info: interface from which device disconnected from + * + * Handle device disconnection + */ +static void fas216_disconnect_intr(FAS216_Info *info) +{ + unsigned long flags; + + fas216_checkmagic(info); + + fas216_log(info, LOG_CONNECT, "disconnect phase=%02x", + info->scsi.phase); + + msgqueue_flush(&info->scsi.msgs); + + switch (info->scsi.phase) { + case PHASE_SELECTION: /* while selecting - no target */ + case PHASE_SELSTEPS: + fas216_done(info, DID_NO_CONNECT); + break; + + case PHASE_MSGIN_DISCONNECT: /* message in - disconnecting */ + info->scsi.disconnectable = 1; + info->scsi.phase = PHASE_IDLE; + info->stats.disconnects += 1; + spin_lock_irqsave(&info->host_lock, flags); + if (info->scsi.phase == PHASE_IDLE) + fas216_kick(info); + spin_unlock_irqrestore(&info->host_lock, flags); + break; + + case PHASE_DONE: /* at end of command - complete */ + fas216_done(info, DID_OK); + break; + + case PHASE_MSGOUT: /* message out - possible ABORT message */ + if (fas216_get_last_msg(info, info->scsi.msgin_fifo) == ABORT) { + info->scsi.aborting = 0; + fas216_done(info, DID_ABORT); + break; + } + fallthrough; + + default: /* huh? */ + printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n", + info->host->host_no, fas216_target(info), fas216_drv_phase(info)); + print_debug_list(); + fas216_stoptransfer(info); + fas216_done(info, DID_ERROR); + break; + } +} + +/** + * fas216_reselected_intr - start reconnection of a device + * @info: interface which was reselected + * + * Start reconnection of a device + */ +static void +fas216_reselected_intr(FAS216_Info *info) +{ + unsigned int cfis, i; + unsigned char msg[4]; + unsigned char target, lun, tag; + + fas216_checkmagic(info); + + WARN_ON(info->scsi.phase == PHASE_SELECTION || + info->scsi.phase == PHASE_SELSTEPS); + + cfis = fas216_readb(info, REG_CFIS); + + fas216_log(info, LOG_CONNECT, "reconnect phase=%02x cfis=%02x", + info->scsi.phase, cfis); + + cfis &= CFIS_CF; + + if (cfis < 2 || cfis > 4) { + printk(KERN_ERR "scsi%d.H: incorrect number of bytes after reselect\n", + info->host->host_no); + goto bad_message; + } + + for (i = 0; i < cfis; i++) + msg[i] = fas216_readb(info, REG_FF); + + if (!(msg[0] & (1 << info->host->this_id)) || + !(msg[1] & 0x80)) + goto initiator_error; + + target = msg[0] & ~(1 << info->host->this_id); + target = ffs(target) - 1; + lun = msg[1] & 7; + tag = 0; + + if (cfis >= 3) { + if (msg[2] != SIMPLE_QUEUE_TAG) + goto initiator_error; + + tag = msg[3]; + } + + /* set up for synchronous transfers */ + fas216_writeb(info, REG_SDID, target); + fas216_set_sync(info, target); + msgqueue_flush(&info->scsi.msgs); + + fas216_log(info, LOG_CONNECT, "Reconnected: target %1x lun %1x tag %02x", + target, lun, tag); + + if (info->scsi.disconnectable && info->SCpnt) { + info->scsi.disconnectable = 0; + if (info->SCpnt->device->id == target && + info->SCpnt->device->lun == lun && + scsi_cmd_to_rq(info->SCpnt)->tag == tag) { + fas216_log(info, LOG_CONNECT, "reconnected previously executing command"); + } else { + queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); + fas216_log(info, LOG_CONNECT, "had to move command to disconnected queue"); + info->SCpnt = NULL; + } + } + if (!info->SCpnt) { + info->SCpnt = queue_remove_tgtluntag(&info->queues.disconnected, + target, lun, tag); + fas216_log(info, LOG_CONNECT, "had to get command"); + } + + if (info->SCpnt) { + /* + * Restore data pointer from SAVED data pointer + */ + info->scsi.SCp = *arm_scsi_pointer(info->SCpnt); + + fas216_log(info, LOG_CONNECT, "data pointers: [%p, %X]", + info->scsi.SCp.ptr, info->scsi.SCp.this_residual); + info->scsi.phase = PHASE_MSGIN; + } else { + /* + * Our command structure not found - abort the + * command on the target. Since we have no + * record of this command, we can't send + * an INITIATOR DETECTED ERROR message. + */ + fas216_cmd(info, CMD_SETATN); + +#if 0 + if (tag) + msgqueue_addmsg(&info->scsi.msgs, 2, ABORT_TAG, tag); + else +#endif + msgqueue_addmsg(&info->scsi.msgs, 1, ABORT); + info->scsi.phase = PHASE_MSGOUT_EXPECT; + info->scsi.aborting = 1; + } + + fas216_cmd(info, CMD_MSGACCEPTED); + return; + + initiator_error: + printk(KERN_ERR "scsi%d.H: error during reselection: bytes", + info->host->host_no); + for (i = 0; i < cfis; i++) + printk(" %02x", msg[i]); + printk("\n"); + bad_message: + fas216_cmd(info, CMD_SETATN); + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR); + info->scsi.phase = PHASE_MSGOUT_EXPECT; + fas216_cmd(info, CMD_MSGACCEPTED); +} + +static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int msglen) +{ + struct scsi_pointer *scsi_pointer; + int i; + + switch (message[0]) { + case COMMAND_COMPLETE: + if (msglen != 1) + goto unrecognised; + + printk(KERN_ERR "scsi%d.%c: command complete with no " + "status in MESSAGE_IN?\n", + info->host->host_no, fas216_target(info)); + break; + + case SAVE_POINTERS: + if (msglen != 1) + goto unrecognised; + + /* + * Save current data pointer to SAVED data pointer + * SCSI II standard says that we must not acknowledge + * this until we have really saved pointers. + * NOTE: we DO NOT save the command nor status pointers + * as required by the SCSI II standard. These always + * point to the start of their respective areas. + */ + scsi_pointer = arm_scsi_pointer(info->SCpnt); + *scsi_pointer = info->scsi.SCp; + scsi_pointer->sent_command = 0; + fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER, + "save data pointers: [%p, %X]", + info->scsi.SCp.ptr, info->scsi.SCp.this_residual); + break; + + case RESTORE_POINTERS: + if (msglen != 1) + goto unrecognised; + + /* + * Restore current data pointer from SAVED data pointer + */ + info->scsi.SCp = *arm_scsi_pointer(info->SCpnt); + fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER, + "restore data pointers: [%p, 0x%x]", + info->scsi.SCp.ptr, info->scsi.SCp.this_residual); + break; + + case DISCONNECT: + if (msglen != 1) + goto unrecognised; + + info->scsi.phase = PHASE_MSGIN_DISCONNECT; + break; + + case MESSAGE_REJECT: + if (msglen != 1) + goto unrecognised; + + switch (fas216_get_last_msg(info, info->scsi.msgin_fifo)) { + case EXTENDED_MESSAGE | EXTENDED_SDTR << 8: + fas216_handlesync(info, message); + break; + + default: + fas216_log(info, 0, "reject, last message 0x%04x", + fas216_get_last_msg(info, info->scsi.msgin_fifo)); + } + break; + + case NOP: + break; + + case EXTENDED_MESSAGE: + if (msglen < 3) + goto unrecognised; + + switch (message[2]) { + case EXTENDED_SDTR: /* Sync transfer negotiation request/reply */ + fas216_handlesync(info, message); + break; + + default: + goto unrecognised; + } + break; + + default: + goto unrecognised; + } + return; + +unrecognised: + fas216_log(info, 0, "unrecognised message, rejecting"); + printk("scsi%d.%c: message was", info->host->host_no, fas216_target(info)); + for (i = 0; i < msglen; i++) + printk("%s%02X", i & 31 ? " " : "\n ", message[i]); + printk("\n"); + + /* + * Something strange seems to be happening here - + * I can't use SETATN since the chip gives me an + * invalid command interrupt when I do. Weird. + */ +fas216_cmd(info, CMD_NOP); +fas216_dumpstate(info); + fas216_cmd(info, CMD_SETATN); + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); + info->scsi.phase = PHASE_MSGOUT_EXPECT; +fas216_dumpstate(info); +} + +static int fas216_wait_cmd(FAS216_Info *info, int cmd) +{ + int tout; + int stat; + + fas216_cmd(info, cmd); + + for (tout = 1000; tout; tout -= 1) { + stat = fas216_readb(info, REG_STAT); + if (stat & (STAT_INT|STAT_PARITYERROR)) + break; + udelay(1); + } + + return stat; +} + +static int fas216_get_msg_byte(FAS216_Info *info) +{ + unsigned int stat = fas216_wait_cmd(info, CMD_MSGACCEPTED); + + if ((stat & STAT_INT) == 0) + goto timedout; + + if ((stat & STAT_BUSMASK) != STAT_MESGIN) + goto unexpected_phase_change; + + fas216_readb(info, REG_INST); + + stat = fas216_wait_cmd(info, CMD_TRANSFERINFO); + + if ((stat & STAT_INT) == 0) + goto timedout; + + if (stat & STAT_PARITYERROR) + goto parity_error; + + if ((stat & STAT_BUSMASK) != STAT_MESGIN) + goto unexpected_phase_change; + + fas216_readb(info, REG_INST); + + return fas216_readb(info, REG_FF); + +timedout: + fas216_log(info, LOG_ERROR, "timed out waiting for message byte"); + return -1; + +unexpected_phase_change: + fas216_log(info, LOG_ERROR, "unexpected phase change: status = %02x", stat); + return -2; + +parity_error: + fas216_log(info, LOG_ERROR, "parity error during message in phase"); + return -3; +} + +/** + * fas216_message - handle a function done interrupt from FAS216 chip + * @info: interface which caused function done interrupt + * + * Handle a function done interrupt from FAS216 chip + */ +static void fas216_message(FAS216_Info *info) +{ + unsigned char *message = info->scsi.message; + unsigned int msglen = 1; + int msgbyte = 0; + + fas216_checkmagic(info); + + message[0] = fas216_readb(info, REG_FF); + + if (message[0] == EXTENDED_MESSAGE) { + msgbyte = fas216_get_msg_byte(info); + + if (msgbyte >= 0) { + message[1] = msgbyte; + + for (msglen = 2; msglen < message[1] + 2; msglen++) { + msgbyte = fas216_get_msg_byte(info); + + if (msgbyte >= 0) + message[msglen] = msgbyte; + else + break; + } + } + } + + if (msgbyte == -3) + goto parity_error; + +#ifdef DEBUG_MESSAGES + { + int i; + + printk("scsi%d.%c: message in: ", + info->host->host_no, fas216_target(info)); + for (i = 0; i < msglen; i++) + printk("%02X ", message[i]); + printk("\n"); + } +#endif + + fas216_parse_message(info, message, msglen); + fas216_cmd(info, CMD_MSGACCEPTED); + return; + +parity_error: + fas216_cmd(info, CMD_SETATN); + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 1, MSG_PARITY_ERROR); + info->scsi.phase = PHASE_MSGOUT_EXPECT; + fas216_cmd(info, CMD_MSGACCEPTED); + return; +} + +/** + * fas216_send_command - send command after all message bytes have been sent + * @info: interface which caused bus service + * + * Send a command to a target after all message bytes have been sent + */ +static void fas216_send_command(FAS216_Info *info) +{ + int i; + + fas216_checkmagic(info); + + fas216_cmd(info, CMD_NOP|CMD_WITHDMA); + fas216_cmd(info, CMD_FLUSHFIFO); + + /* load command */ + for (i = info->scsi.SCp.sent_command; i < info->SCpnt->cmd_len; i++) + fas216_writeb(info, REG_FF, info->SCpnt->cmnd[i]); + + fas216_cmd(info, CMD_TRANSFERINFO); + + info->scsi.phase = PHASE_COMMAND; +} + +/** + * fas216_send_messageout - handle bus service to send a message + * @info: interface which caused bus service + * + * Handle bus service to send a message. + * Note: We do not allow the device to change the data direction! + */ +static void fas216_send_messageout(FAS216_Info *info, int start) +{ + unsigned int tot_msglen = msgqueue_msglength(&info->scsi.msgs); + + fas216_checkmagic(info); + + fas216_cmd(info, CMD_FLUSHFIFO); + + if (tot_msglen) { + struct message *msg; + int msgnr = 0; + + while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { + int i; + + for (i = start; i < msg->length; i++) + fas216_writeb(info, REG_FF, msg->msg[i]); + + msg->fifo = tot_msglen - (fas216_readb(info, REG_CFIS) & CFIS_CF); + start = 0; + } + } else + fas216_writeb(info, REG_FF, NOP); + + fas216_cmd(info, CMD_TRANSFERINFO); + + info->scsi.phase = PHASE_MSGOUT; +} + +/** + * fas216_busservice_intr - handle bus service interrupt from FAS216 chip + * @info: interface which caused bus service interrupt + * @stat: Status register contents + * @is: SCSI Status register contents + * + * Handle a bus service interrupt from FAS216 chip + */ +static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigned int is) +{ + fas216_checkmagic(info); + + fas216_log(info, LOG_BUSSERVICE, + "bus service: stat=%02x is=%02x phase=%02x", + stat, is, info->scsi.phase); + + switch (info->scsi.phase) { + case PHASE_SELECTION: + if ((is & IS_BITS) != IS_MSGBYTESENT) + goto bad_is; + break; + + case PHASE_SELSTEPS: + switch (is & IS_BITS) { + case IS_SELARB: + case IS_MSGBYTESENT: + goto bad_is; + + case IS_NOTCOMMAND: + case IS_EARLYPHASE: + if ((stat & STAT_BUSMASK) == STAT_MESGIN) + break; + goto bad_is; + + case IS_COMPLETE: + break; + } + break; + + default: + break; + } + + fas216_cmd(info, CMD_NOP); + +#define STATE(st,ph) ((ph) << 3 | (st)) + /* This table describes the legal SCSI state transitions, + * as described by the SCSI II spec. + */ + switch (STATE(stat & STAT_BUSMASK, info->scsi.phase)) { + case STATE(STAT_DATAIN, PHASE_SELSTEPS):/* Sel w/ steps -> Data In */ + case STATE(STAT_DATAIN, PHASE_MSGOUT): /* Message Out -> Data In */ + case STATE(STAT_DATAIN, PHASE_COMMAND): /* Command -> Data In */ + case STATE(STAT_DATAIN, PHASE_MSGIN): /* Message In -> Data In */ + info->scsi.phase = PHASE_DATAIN; + fas216_transfer(info); + return; + + case STATE(STAT_DATAIN, PHASE_DATAIN): /* Data In -> Data In */ + case STATE(STAT_DATAOUT, PHASE_DATAOUT):/* Data Out -> Data Out */ + fas216_cleanuptransfer(info); + fas216_transfer(info); + return; + + case STATE(STAT_DATAOUT, PHASE_SELSTEPS):/* Sel w/ steps-> Data Out */ + case STATE(STAT_DATAOUT, PHASE_MSGOUT): /* Message Out -> Data Out */ + case STATE(STAT_DATAOUT, PHASE_COMMAND):/* Command -> Data Out */ + case STATE(STAT_DATAOUT, PHASE_MSGIN): /* Message In -> Data Out */ + fas216_cmd(info, CMD_FLUSHFIFO); + info->scsi.phase = PHASE_DATAOUT; + fas216_transfer(info); + return; + + case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */ + case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */ + fas216_stoptransfer(info); + fallthrough; + + case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */ + case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */ + case STATE(STAT_STATUS, PHASE_COMMAND): /* Command -> Status */ + case STATE(STAT_STATUS, PHASE_MSGIN): /* Message In -> Status */ + fas216_cmd(info, CMD_INITCMDCOMPLETE); + info->scsi.phase = PHASE_STATUS; + return; + + case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */ + case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */ + fas216_stoptransfer(info); + fallthrough; + + case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */ + case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */ + case STATE(STAT_MESGIN, PHASE_MSGOUT): /* Message Out -> Message In */ + info->scsi.msgin_fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; + fas216_cmd(info, CMD_FLUSHFIFO); + fas216_cmd(info, CMD_TRANSFERINFO); + info->scsi.phase = PHASE_MSGIN; + return; + + case STATE(STAT_MESGIN, PHASE_MSGIN): + info->scsi.msgin_fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; + fas216_cmd(info, CMD_TRANSFERINFO); + return; + + case STATE(STAT_COMMAND, PHASE_MSGOUT): /* Message Out -> Command */ + case STATE(STAT_COMMAND, PHASE_MSGIN): /* Message In -> Command */ + fas216_send_command(info); + info->scsi.phase = PHASE_COMMAND; + return; + + + /* + * Selection -> Message Out + */ + case STATE(STAT_MESGOUT, PHASE_SELECTION): + fas216_send_messageout(info, 1); + return; + + /* + * Message Out -> Message Out + */ + case STATE(STAT_MESGOUT, PHASE_SELSTEPS): + case STATE(STAT_MESGOUT, PHASE_MSGOUT): + /* + * If we get another message out phase, this usually + * means some parity error occurred. Resend complete + * set of messages. If we have more than one byte to + * send, we need to assert ATN again. + */ + if (info->device[info->SCpnt->device->id].parity_check) { + /* + * We were testing... good, the device + * supports parity checking. + */ + info->device[info->SCpnt->device->id].parity_check = 0; + info->device[info->SCpnt->device->id].parity_enabled = 1; + fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); + } + + if (msgqueue_msglength(&info->scsi.msgs) > 1) + fas216_cmd(info, CMD_SETATN); + fallthrough; + + /* + * Any -> Message Out + */ + case STATE(STAT_MESGOUT, PHASE_MSGOUT_EXPECT): + fas216_send_messageout(info, 0); + return; + + /* Error recovery rules. + * These either attempt to abort or retry the operation. + * TODO: we need more of these + */ + case STATE(STAT_COMMAND, PHASE_COMMAND):/* Command -> Command */ + /* error - we've sent out all the command bytes + * we have. + * NOTE: we need SAVE DATA POINTERS/RESTORE DATA POINTERS + * to include the command bytes sent for this to work + * correctly. + */ + printk(KERN_ERR "scsi%d.%c: " + "target trying to receive more command bytes\n", + info->host->host_no, fas216_target(info)); + fas216_cmd(info, CMD_SETATN); + fas216_set_stc(info, 15); + fas216_cmd(info, CMD_PADBYTES | CMD_WITHDMA); + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR); + info->scsi.phase = PHASE_MSGOUT_EXPECT; + return; + } + + if (info->scsi.phase == PHASE_MSGIN_DISCONNECT) { + printk(KERN_ERR "scsi%d.%c: disconnect message received, but bus service %s?\n", + info->host->host_no, fas216_target(info), + fas216_bus_phase(stat)); + msgqueue_flush(&info->scsi.msgs); + fas216_cmd(info, CMD_SETATN); + msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR); + info->scsi.phase = PHASE_MSGOUT_EXPECT; + info->scsi.aborting = 1; + fas216_cmd(info, CMD_TRANSFERINFO); + return; + } + printk(KERN_ERR "scsi%d.%c: bus phase %s after %s?\n", + info->host->host_no, fas216_target(info), + fas216_bus_phase(stat), + fas216_drv_phase(info)); + print_debug_list(); + return; + +bad_is: + fas216_log(info, 0, "bus service at step %d?", is & IS_BITS); + fas216_dumpstate(info); + print_debug_list(); + + fas216_done(info, DID_ERROR); +} + +/** + * fas216_funcdone_intr - handle a function done interrupt from FAS216 chip + * @info: interface which caused function done interrupt + * @stat: Status register contents + * @is: SCSI Status register contents + * + * Handle a function done interrupt from FAS216 chip + */ +static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned int is) +{ + unsigned int fifo_len = fas216_readb(info, REG_CFIS) & CFIS_CF; + + fas216_checkmagic(info); + + fas216_log(info, LOG_FUNCTIONDONE, + "function done: stat=%02x is=%02x phase=%02x", + stat, is, info->scsi.phase); + + switch (info->scsi.phase) { + case PHASE_STATUS: /* status phase - read status and msg */ + if (fifo_len != 2) { + fas216_log(info, 0, "odd number of bytes in FIFO: %d", fifo_len); + } + /* + * Read status then message byte. + */ + info->scsi.SCp.Status = fas216_readb(info, REG_FF); + info->scsi.SCp.Message = fas216_readb(info, REG_FF); + info->scsi.phase = PHASE_DONE; + fas216_cmd(info, CMD_MSGACCEPTED); + break; + + case PHASE_IDLE: + case PHASE_SELECTION: + case PHASE_SELSTEPS: + break; + + case PHASE_MSGIN: /* message in phase */ + if ((stat & STAT_BUSMASK) == STAT_MESGIN) { + info->scsi.msgin_fifo = fifo_len; + fas216_message(info); + break; + } + fallthrough; + + default: + fas216_log(info, 0, "internal phase %s for function done?" + " What do I do with this?", + fas216_target(info), fas216_drv_phase(info)); + } +} + +static void fas216_bus_reset(FAS216_Info *info) +{ + neg_t sync_state; + int i; + + msgqueue_flush(&info->scsi.msgs); + + sync_state = neg_invalid; + +#ifdef SCSI2_SYNC + if (info->ifcfg.capabilities & (FASCAP_DMA|FASCAP_PSEUDODMA)) + sync_state = neg_wait; +#endif + + info->scsi.phase = PHASE_IDLE; + info->SCpnt = NULL; /* bug! */ + memset(&info->scsi.SCp, 0, sizeof(info->scsi.SCp)); + + for (i = 0; i < 8; i++) { + info->device[i].disconnect_ok = info->ifcfg.disconnect_ok; + info->device[i].sync_state = sync_state; + info->device[i].period = info->ifcfg.asyncperiod / 4; + info->device[i].stp = info->scsi.async_stp; + info->device[i].sof = 0; + info->device[i].wide_xfer = 0; + } + + info->rst_bus_status = 1; + wake_up(&info->eh_wait); +} + +/** + * fas216_intr - handle interrupts to progress a command + * @info: interface to service + * + * Handle interrupts from the interface to progress a command + */ +irqreturn_t fas216_intr(FAS216_Info *info) +{ + unsigned char inst, is, stat; + int handled = IRQ_NONE; + + fas216_checkmagic(info); + + stat = fas216_readb(info, REG_STAT); + is = fas216_readb(info, REG_IS); + inst = fas216_readb(info, REG_INST); + + add_debug_list(stat, is, inst, info->scsi.phase); + + if (stat & STAT_INT) { + if (inst & INST_BUSRESET) { + fas216_log(info, 0, "bus reset detected"); + fas216_bus_reset(info); + scsi_report_bus_reset(info->host, 0); + } else if (inst & INST_ILLEGALCMD) { + fas216_log(info, LOG_ERROR, "illegal command given\n"); + fas216_dumpstate(info); + print_debug_list(); + } else if (inst & INST_DISCONNECT) + fas216_disconnect_intr(info); + else if (inst & INST_RESELECTED) /* reselected */ + fas216_reselected_intr(info); + else if (inst & INST_BUSSERVICE) /* bus service request */ + fas216_busservice_intr(info, stat, is); + else if (inst & INST_FUNCDONE) /* function done */ + fas216_funcdone_intr(info, stat, is); + else + fas216_log(info, 0, "unknown interrupt received:" + " phase %s inst %02X is %02X stat %02X", + fas216_drv_phase(info), inst, is, stat); + handled = IRQ_HANDLED; + } + return handled; +} + +static void __fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt) +{ + int tot_msglen; + + /* following what the ESP driver says */ + fas216_set_stc(info, 0); + fas216_cmd(info, CMD_NOP | CMD_WITHDMA); + + /* flush FIFO */ + fas216_cmd(info, CMD_FLUSHFIFO); + + /* load bus-id and timeout */ + fas216_writeb(info, REG_SDID, BUSID(SCpnt->device->id)); + fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout); + + /* synchronous transfers */ + fas216_set_sync(info, SCpnt->device->id); + + tot_msglen = msgqueue_msglength(&info->scsi.msgs); + +#ifdef DEBUG_MESSAGES + { + struct message *msg; + int msgnr = 0, i; + + printk("scsi%d.%c: message out: ", + info->host->host_no, '0' + SCpnt->device->id); + while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { + printk("{ "); + for (i = 0; i < msg->length; i++) + printk("%02x ", msg->msg[i]); + printk("} "); + } + printk("\n"); + } +#endif + + if (tot_msglen == 1 || tot_msglen == 3) { + /* + * We have an easy message length to send... + */ + struct message *msg; + int msgnr = 0, i; + + info->scsi.phase = PHASE_SELSTEPS; + + /* load message bytes */ + while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { + for (i = 0; i < msg->length; i++) + fas216_writeb(info, REG_FF, msg->msg[i]); + msg->fifo = tot_msglen - (fas216_readb(info, REG_CFIS) & CFIS_CF); + } + + /* load command */ + for (i = 0; i < SCpnt->cmd_len; i++) + fas216_writeb(info, REG_FF, SCpnt->cmnd[i]); + + if (tot_msglen == 1) + fas216_cmd(info, CMD_SELECTATN); + else + fas216_cmd(info, CMD_SELECTATN3); + } else { + /* + * We have an unusual number of message bytes to send. + * Load first byte into fifo, and issue SELECT with ATN and + * stop steps. + */ + struct message *msg = msgqueue_getmsg(&info->scsi.msgs, 0); + + fas216_writeb(info, REG_FF, msg->msg[0]); + msg->fifo = 1; + + fas216_cmd(info, CMD_SELECTATNSTOP); + } +} + +/* + * Decide whether we need to perform a parity test on this device. + * Can also be used to force parity error conditions during initial + * information transfer phase (message out) for test purposes. + */ +static int parity_test(FAS216_Info *info, int target) +{ +#if 0 + if (target == 3) { + info->device[target].parity_check = 0; + return 1; + } +#endif + return info->device[target].parity_check; +} + +static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt) +{ + int disconnect_ok; + + /* + * claim host busy + */ + info->scsi.phase = PHASE_SELECTION; + info->scsi.SCp = *arm_scsi_pointer(SCpnt); + info->SCpnt = SCpnt; + info->dma.transfer_type = fasdma_none; + + if (parity_test(info, SCpnt->device->id)) + fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0] | CNTL1_PTE); + else + fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); + + /* + * Don't allow request sense commands to disconnect. + */ + disconnect_ok = SCpnt->cmnd[0] != REQUEST_SENSE && + info->device[SCpnt->device->id].disconnect_ok; + + /* + * build outgoing message bytes + */ + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 1, IDENTIFY(disconnect_ok, SCpnt->device->lun)); + + /* + * add tag message if required + */ + if (SCpnt->device->simple_tags) + msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, + scsi_cmd_to_rq(SCpnt)->tag); + + do { +#ifdef SCSI2_SYNC + if ((info->device[SCpnt->device->id].sync_state == neg_wait || + info->device[SCpnt->device->id].sync_state == neg_complete) && + (SCpnt->cmnd[0] == REQUEST_SENSE || + SCpnt->cmnd[0] == INQUIRY)) { + info->device[SCpnt->device->id].sync_state = neg_inprogress; + msgqueue_addmsg(&info->scsi.msgs, 5, + EXTENDED_MESSAGE, 3, EXTENDED_SDTR, + 1000 / info->ifcfg.clockrate, + info->ifcfg.sync_max_depth); + break; + } +#endif + } while (0); + + __fas216_start_command(info, SCpnt); +} + +static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) +{ + set_bit(SCpnt->device->id * 8 + + (u8)(SCpnt->device->lun & 0x7), info->busyluns); + + info->stats.removes += 1; + switch (SCpnt->cmnd[0]) { + case WRITE_6: + case WRITE_10: + case WRITE_12: + info->stats.writes += 1; + break; + case READ_6: + case READ_10: + case READ_12: + info->stats.reads += 1; + break; + default: + info->stats.miscs += 1; + break; + } +} + +static void fas216_do_bus_device_reset(FAS216_Info *info, + struct scsi_cmnd *SCpnt) +{ + struct message *msg; + + /* + * claim host busy + */ + info->scsi.phase = PHASE_SELECTION; + info->scsi.SCp = *arm_scsi_pointer(SCpnt); + info->SCpnt = SCpnt; + info->dma.transfer_type = fasdma_none; + + fas216_log(info, LOG_ERROR, "sending bus device reset"); + + msgqueue_flush(&info->scsi.msgs); + msgqueue_addmsg(&info->scsi.msgs, 1, BUS_DEVICE_RESET); + + /* following what the ESP driver says */ + fas216_set_stc(info, 0); + fas216_cmd(info, CMD_NOP | CMD_WITHDMA); + + /* flush FIFO */ + fas216_cmd(info, CMD_FLUSHFIFO); + + /* load bus-id and timeout */ + fas216_writeb(info, REG_SDID, BUSID(SCpnt->device->id)); + fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout); + + /* synchronous transfers */ + fas216_set_sync(info, SCpnt->device->id); + + msg = msgqueue_getmsg(&info->scsi.msgs, 0); + + fas216_writeb(info, REG_FF, BUS_DEVICE_RESET); + msg->fifo = 1; + + fas216_cmd(info, CMD_SELECTATNSTOP); +} + +/** + * fas216_kick - kick a command to the interface + * @info: our host interface to kick + * + * Kick a command to the interface, interface should be idle. + * Notes: Interrupts are always disabled! + */ +static void fas216_kick(FAS216_Info *info) +{ + struct scsi_cmnd *SCpnt = NULL; +#define TYPE_OTHER 0 +#define TYPE_RESET 1 +#define TYPE_QUEUE 2 + int where_from = TYPE_OTHER; + + fas216_checkmagic(info); + + /* + * Obtain the next command to process. + */ + do { + if (info->rstSCpnt) { + SCpnt = info->rstSCpnt; + /* don't remove it */ + where_from = TYPE_RESET; + break; + } + + if (info->reqSCpnt) { + SCpnt = info->reqSCpnt; + info->reqSCpnt = NULL; + break; + } + + if (info->origSCpnt) { + SCpnt = info->origSCpnt; + info->origSCpnt = NULL; + break; + } + + /* retrieve next command */ + if (!SCpnt) { + SCpnt = queue_remove_exclude(&info->queues.issue, + info->busyluns); + where_from = TYPE_QUEUE; + break; + } + } while (0); + + if (!SCpnt) { + /* + * no command pending, so enable reselection. + */ + fas216_cmd(info, CMD_ENABLESEL); + return; + } + + /* + * We're going to start a command, so disable reselection + */ + fas216_cmd(info, CMD_DISABLESEL); + + if (info->scsi.disconnectable && info->SCpnt) { + fas216_log(info, LOG_CONNECT, + "moved command for %d to disconnected queue", + info->SCpnt->device->id); + queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); + info->scsi.disconnectable = 0; + info->SCpnt = NULL; + } + + fas216_log_command(info, LOG_CONNECT | LOG_MESSAGES, SCpnt, + "starting"); + + switch (where_from) { + case TYPE_QUEUE: + fas216_allocate_tag(info, SCpnt); + fallthrough; + case TYPE_OTHER: + fas216_start_command(info, SCpnt); + break; + case TYPE_RESET: + fas216_do_bus_device_reset(info, SCpnt); + break; + } + + fas216_log(info, LOG_CONNECT, "select: data pointers [%p, %X]", + info->scsi.SCp.ptr, info->scsi.SCp.this_residual); + + /* + * should now get either DISCONNECT or + * (FUNCTION DONE with BUS SERVICE) interrupt + */ +} + +/* + * Clean up from issuing a BUS DEVICE RESET message to a device. + */ +static void fas216_devicereset_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, + unsigned int result) +{ + fas216_log(info, LOG_ERROR, "fas216 device reset complete"); + + info->rstSCpnt = NULL; + info->rst_dev_status = 1; + wake_up(&info->eh_wait); +} + +/** + * fas216_rq_sns_done - Finish processing automatic request sense command + * @info: interface that completed + * @SCpnt: command that completed + * @result: driver byte of result + * + * Finish processing automatic request sense command + */ +static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, + unsigned int result) +{ + struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); + + fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, + "request sense complete, result=0x%04x%02x%02x", + result, scsi_pointer->Message, scsi_pointer->Status); + + if (result != DID_OK || scsi_pointer->Status != SAM_STAT_GOOD) + /* + * Something went wrong. Make sure that we don't + * have valid data in the sense buffer that could + * confuse the higher levels. + */ + memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); +//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id); +//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); } + /* + * Note that we don't set SCpnt->result, since that should + * reflect the status of the command that we were asked by + * the upper layers to process. This would have been set + * correctly by fas216_std_done. + */ + scsi_eh_restore_cmnd(SCpnt, &info->ses); + fas216_cmd_priv(SCpnt)->scsi_done(SCpnt); +} + +/** + * fas216_std_done - finish processing of standard command + * @info: interface that completed + * @SCpnt: command that completed + * @result: driver byte of result + * + * Finish processing of standard command + */ +static void +fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result) +{ + struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); + + info->stats.fins += 1; + + set_host_byte(SCpnt, result); + if (result == DID_OK) + scsi_msg_to_host_byte(SCpnt, info->scsi.SCp.Message); + set_status_byte(SCpnt, info->scsi.SCp.Status); + + fas216_log_command(info, LOG_CONNECT, SCpnt, + "command complete, result=0x%08x", SCpnt->result); + + /* + * If the driver detected an error, we're all done. + */ + if (get_host_byte(SCpnt) != DID_OK) + goto done; + + /* + * If the command returned CHECK_CONDITION or COMMAND_TERMINATED + * status, request the sense information. + */ + if (get_status_byte(SCpnt) == SAM_STAT_CHECK_CONDITION || + get_status_byte(SCpnt) == SAM_STAT_COMMAND_TERMINATED) + goto request_sense; + + /* + * If the command did not complete with GOOD status, + * we are all done here. + */ + if (get_status_byte(SCpnt) != SAM_STAT_GOOD) + goto done; + + /* + * We have successfully completed a command. Make sure that + * we do not have any buffers left to transfer. The world + * is not perfect, and we seem to occasionally hit this. + * It can be indicative of a buggy driver, target or the upper + * levels of the SCSI code. + */ + if (info->scsi.SCp.ptr) { + switch (SCpnt->cmnd[0]) { + case INQUIRY: + case START_STOP: + case MODE_SENSE: + break; + + default: + scmd_printk(KERN_ERR, SCpnt, + "incomplete data transfer detected: res=%08X ptr=%p len=%X\n", + SCpnt->result, info->scsi.SCp.ptr, + info->scsi.SCp.this_residual); + scsi_print_command(SCpnt); + set_host_byte(SCpnt, DID_ERROR); + goto request_sense; + } + } + +done: + if (fas216_cmd_priv(SCpnt)->scsi_done) { + fas216_cmd_priv(SCpnt)->scsi_done(SCpnt); + return; + } + + panic("scsi%d.H: null scsi_done function in fas216_done", + info->host->host_no); + + +request_sense: + if (SCpnt->cmnd[0] == REQUEST_SENSE) + goto done; + + scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0); + fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, + "requesting sense"); + init_SCp(SCpnt); + scsi_pointer->Message = 0; + scsi_pointer->Status = 0; + SCpnt->host_scribble = (void *)fas216_rq_sns_done; + + /* + * Place this command into the high priority "request + * sense" slot. This will be the very next command + * executed, unless a target connects to us. + */ + if (info->reqSCpnt) + printk(KERN_WARNING "scsi%d.%c: losing request command\n", + info->host->host_no, '0' + SCpnt->device->id); + info->reqSCpnt = SCpnt; +} + +/** + * fas216_done - complete processing for current command + * @info: interface that completed + * @result: driver byte of result + * + * Complete processing for current command + */ +static void fas216_done(FAS216_Info *info, unsigned int result) +{ + void (*fn)(FAS216_Info *, struct scsi_cmnd *, unsigned int); + struct scsi_cmnd *SCpnt; + unsigned long flags; + + fas216_checkmagic(info); + + if (!info->SCpnt) + goto no_command; + + SCpnt = info->SCpnt; + info->SCpnt = NULL; + info->scsi.phase = PHASE_IDLE; + + if (info->scsi.aborting) { + fas216_log(info, 0, "uncaught abort - returning DID_ABORT"); + result = DID_ABORT; + info->scsi.aborting = 0; + } + + /* + * Sanity check the completion - if we have zero bytes left + * to transfer, we should not have a valid pointer. + */ + if (info->scsi.SCp.ptr && info->scsi.SCp.this_residual == 0) { + scmd_printk(KERN_INFO, SCpnt, + "zero bytes left to transfer, but buffer pointer still valid: ptr=%p len=%08x\n", + info->scsi.SCp.ptr, info->scsi.SCp.this_residual); + info->scsi.SCp.ptr = NULL; + scsi_print_command(SCpnt); + } + + /* + * Clear down this command as completed. If we need to request + * the sense information, fas216_kick will re-assert the busy + * status. + */ + info->device[SCpnt->device->id].parity_check = 0; + clear_bit(SCpnt->device->id * 8 + + (u8)(SCpnt->device->lun & 0x7), info->busyluns); + + fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble; + fn(info, SCpnt, result); + + if (info->scsi.irq) { + spin_lock_irqsave(&info->host_lock, flags); + if (info->scsi.phase == PHASE_IDLE) + fas216_kick(info); + spin_unlock_irqrestore(&info->host_lock, flags); + } + return; + +no_command: + panic("scsi%d.H: null command in fas216_done", + info->host->host_no); +} + +/** + * fas216_queue_command_internal - queue a command for the adapter to process + * @SCpnt: Command to queue + * @done: done function to call once command is complete + * + * Queue a command for adapter to process. + * Returns: 0 on success, else error. + * Notes: io_request_lock is held, interrupts are disabled. + */ +static int fas216_queue_command_internal(struct scsi_cmnd *SCpnt, + void (*done)(struct scsi_cmnd *)) +{ + FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; + int result; + + fas216_checkmagic(info); + + fas216_log_command(info, LOG_CONNECT, SCpnt, + "received command (%p)", SCpnt); + + fas216_cmd_priv(SCpnt)->scsi_done = done; + SCpnt->host_scribble = (void *)fas216_std_done; + SCpnt->result = 0; + + init_SCp(SCpnt); + + info->stats.queues += 1; + + spin_lock(&info->host_lock); + + /* + * Add command into execute queue and let it complete under + * whatever scheme we're using. + */ + result = !queue_add_cmd_ordered(&info->queues.issue, SCpnt); + + /* + * If we successfully added the command, + * kick the interface to get it moving. + */ + if (result == 0 && info->scsi.phase == PHASE_IDLE) + fas216_kick(info); + spin_unlock(&info->host_lock); + + fas216_log_target(info, LOG_CONNECT, -1, "queue %s", + result ? "failure" : "success"); + + return result; +} + +static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt) +{ + return fas216_queue_command_internal(SCpnt, scsi_done); +} + +DEF_SCSI_QCMD(fas216_queue_command) + +/** + * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command + * @SCpnt: Command to wake + * + * Trigger restart of a waiting thread in fas216_command + */ +static void fas216_internal_done(struct scsi_cmnd *SCpnt) +{ + FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; + + fas216_checkmagic(info); + + info->internal_done = 1; +} + +/** + * fas216_noqueue_command - process a command for the adapter. + * @SCpnt: Command to queue + * + * Queue a command for adapter to process. + * Returns: scsi result code. + * Notes: io_request_lock is held, interrupts are disabled. + */ +static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt) +{ + FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; + + fas216_checkmagic(info); + + /* + * We should only be using this if we don't have an interrupt. + * Provide some "incentive" to use the queueing code. + */ + BUG_ON(info->scsi.irq); + + info->internal_done = 0; + fas216_queue_command_internal(SCpnt, fas216_internal_done); + + /* + * This wastes time, since we can't return until the command is + * complete. We can't sleep either since we may get re-entered! + * However, we must re-enable interrupts, or else we'll be + * waiting forever. + */ + spin_unlock_irq(info->host->host_lock); + + while (!info->internal_done) { + /* + * If we don't have an IRQ, then we must poll the card for + * it's interrupt, and use that to call this driver's + * interrupt routine. That way, we keep the command + * progressing. Maybe we can add some intelligence here + * and go to sleep if we know that the device is going + * to be some time (eg, disconnected). + */ + if (fas216_readb(info, REG_STAT) & STAT_INT) { + spin_lock_irq(info->host->host_lock); + fas216_intr(info); + spin_unlock_irq(info->host->host_lock); + } + } + + spin_lock_irq(info->host->host_lock); + + scsi_done(SCpnt); + + return 0; +} + +DEF_SCSI_QCMD(fas216_noqueue_command) + +/* + * Error handler timeout function. Indicate that we timed out, + * and wake up any error handler process so it can continue. + */ +static void fas216_eh_timer(struct timer_list *t) +{ + FAS216_Info *info = from_timer(info, t, eh_timer); + + fas216_log(info, LOG_ERROR, "error handling timed out\n"); + + del_timer(&info->eh_timer); + + if (info->rst_bus_status == 0) + info->rst_bus_status = -1; + if (info->rst_dev_status == 0) + info->rst_dev_status = -1; + + wake_up(&info->eh_wait); +} + +enum res_find { + res_failed, /* not found */ + res_success, /* command on issue queue */ + res_hw_abort /* command on disconnected dev */ +}; + +/** + * fas216_do_abort - decide how to abort a command + * @SCpnt: command to abort + * + * Decide how to abort a command. + * Returns: abort status + */ +static enum res_find fas216_find_command(FAS216_Info *info, + struct scsi_cmnd *SCpnt) +{ + enum res_find res = res_failed; + + if (queue_remove_cmd(&info->queues.issue, SCpnt)) { + /* + * The command was on the issue queue, and has not been + * issued yet. We can remove the command from the queue, + * and acknowledge the abort. Neither the device nor the + * interface know about the command. + */ + printk("on issue queue "); + + res = res_success; + } else if (queue_remove_cmd(&info->queues.disconnected, SCpnt)) { + /* + * The command was on the disconnected queue. We must + * reconnect with the device if possible, and send it + * an abort message. + */ + printk("on disconnected queue "); + + res = res_hw_abort; + } else if (info->SCpnt == SCpnt) { + printk("executing "); + + switch (info->scsi.phase) { + /* + * If the interface is idle, and the command is 'disconnectable', + * then it is the same as on the disconnected queue. + */ + case PHASE_IDLE: + if (info->scsi.disconnectable) { + info->scsi.disconnectable = 0; + info->SCpnt = NULL; + res = res_hw_abort; + } + break; + + default: + break; + } + } else if (info->origSCpnt == SCpnt) { + /* + * The command will be executed next, but a command + * is currently using the interface. This is similar to + * being on the issue queue, except the busylun bit has + * been set. + */ + info->origSCpnt = NULL; + clear_bit(SCpnt->device->id * 8 + + (u8)(SCpnt->device->lun & 0x7), info->busyluns); + printk("waiting for execution "); + res = res_success; + } else + printk("unknown "); + + return res; +} + +/** + * fas216_eh_abort - abort this command + * @SCpnt: command to abort + * + * Abort this command. + * Returns: FAILED if unable to abort + * Notes: io_request_lock is taken, and irqs are disabled + */ +int fas216_eh_abort(struct scsi_cmnd *SCpnt) +{ + FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; + int result = FAILED; + + fas216_checkmagic(info); + + info->stats.aborts += 1; + + scmd_printk(KERN_WARNING, SCpnt, "abort command\n"); + + print_debug_list(); + fas216_dumpstate(info); + + switch (fas216_find_command(info, SCpnt)) { + /* + * We found the command, and cleared it out. Either + * the command is still known to be executing on the + * target, or the busylun bit is not set. + */ + case res_success: + scmd_printk(KERN_WARNING, SCpnt, "abort %p success\n", SCpnt); + result = SUCCESS; + break; + + /* + * We need to reconnect to the target and send it an + * ABORT or ABORT_TAG message. We can only do this + * if the bus is free. + */ + case res_hw_abort: + + /* + * We are unable to abort the command for some reason. + */ + default: + case res_failed: + scmd_printk(KERN_WARNING, SCpnt, "abort %p failed\n", SCpnt); + break; + } + + return result; +} + +/** + * fas216_eh_device_reset - Reset the device associated with this command + * @SCpnt: command specifing device to reset + * + * Reset the device associated with this command. + * Returns: FAILED if unable to reset. + * Notes: We won't be re-entered, so we'll only have one device + * reset on the go at one time. + */ +int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) +{ + FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; + unsigned long flags; + int i, res = FAILED, target = SCpnt->device->id; + + fas216_log(info, LOG_ERROR, "device reset for target %d", target); + + spin_lock_irqsave(&info->host_lock, flags); + + do { + /* + * If we are currently connected to a device, and + * it is the device we want to reset, there is + * nothing we can do here. Chances are it is stuck, + * and we need a bus reset. + */ + if (info->SCpnt && !info->scsi.disconnectable && + info->SCpnt->device->id == SCpnt->device->id) + break; + + /* + * We're going to be resetting this device. Remove + * all pending commands from the driver. By doing + * so, we guarantee that we won't touch the command + * structures except to process the reset request. + */ + queue_remove_all_target(&info->queues.issue, target); + queue_remove_all_target(&info->queues.disconnected, target); + if (info->origSCpnt && info->origSCpnt->device->id == target) + info->origSCpnt = NULL; + if (info->reqSCpnt && info->reqSCpnt->device->id == target) + info->reqSCpnt = NULL; + for (i = 0; i < 8; i++) + clear_bit(target * 8 + i, info->busyluns); + + /* + * Hijack this SCSI command structure to send + * a bus device reset message to this device. + */ + SCpnt->host_scribble = (void *)fas216_devicereset_done; + + info->rst_dev_status = 0; + info->rstSCpnt = SCpnt; + + if (info->scsi.phase == PHASE_IDLE) + fas216_kick(info); + + mod_timer(&info->eh_timer, jiffies + 30 * HZ); + spin_unlock_irqrestore(&info->host_lock, flags); + + /* + * Wait up to 30 seconds for the reset to complete. + */ + wait_event(info->eh_wait, info->rst_dev_status); + + del_timer_sync(&info->eh_timer); + spin_lock_irqsave(&info->host_lock, flags); + info->rstSCpnt = NULL; + + if (info->rst_dev_status == 1) + res = SUCCESS; + } while (0); + + SCpnt->host_scribble = NULL; + spin_unlock_irqrestore(&info->host_lock, flags); + + fas216_log(info, LOG_ERROR, "device reset complete: %s\n", + res == SUCCESS ? "success" : "failed"); + + return res; +} + +/** + * fas216_eh_bus_reset - Reset the bus associated with the command + * @SCpnt: command specifing bus to reset + * + * Reset the bus associated with the command. + * Returns: FAILED if unable to reset. + * Notes: Further commands are blocked. + */ +int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt) +{ + FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; + unsigned long flags; + struct scsi_device *SDpnt; + + fas216_checkmagic(info); + fas216_log(info, LOG_ERROR, "resetting bus"); + + info->stats.bus_resets += 1; + + spin_lock_irqsave(&info->host_lock, flags); + + /* + * Stop all activity on this interface. + */ + fas216_aborttransfer(info); + fas216_writeb(info, REG_CNTL3, info->scsi.cfg[2]); + + /* + * Clear any pending interrupts. + */ + while (fas216_readb(info, REG_STAT) & STAT_INT) + fas216_readb(info, REG_INST); + + info->rst_bus_status = 0; + + /* + * For each attached hard-reset device, clear out + * all command structures. Leave the running + * command in place. + */ + shost_for_each_device(SDpnt, info->host) { + int i; + + if (SDpnt->soft_reset) + continue; + + queue_remove_all_target(&info->queues.issue, SDpnt->id); + queue_remove_all_target(&info->queues.disconnected, SDpnt->id); + if (info->origSCpnt && info->origSCpnt->device->id == SDpnt->id) + info->origSCpnt = NULL; + if (info->reqSCpnt && info->reqSCpnt->device->id == SDpnt->id) + info->reqSCpnt = NULL; + info->SCpnt = NULL; + + for (i = 0; i < 8; i++) + clear_bit(SDpnt->id * 8 + i, info->busyluns); + } + + info->scsi.phase = PHASE_IDLE; + + /* + * Reset the SCSI bus. Device cleanup happens in + * the interrupt handler. + */ + fas216_cmd(info, CMD_RESETSCSI); + + mod_timer(&info->eh_timer, jiffies + HZ); + spin_unlock_irqrestore(&info->host_lock, flags); + + /* + * Wait one second for the interrupt. + */ + wait_event(info->eh_wait, info->rst_bus_status); + del_timer_sync(&info->eh_timer); + + fas216_log(info, LOG_ERROR, "bus reset complete: %s\n", + info->rst_bus_status == 1 ? "success" : "failed"); + + return info->rst_bus_status == 1 ? SUCCESS : FAILED; +} + +/** + * fas216_init_chip - Initialise FAS216 state after reset + * @info: state structure for interface + * + * Initialise FAS216 state after reset + */ +static void fas216_init_chip(FAS216_Info *info) +{ + unsigned int clock = ((info->ifcfg.clockrate - 1) / 5 + 1) & 7; + fas216_writeb(info, REG_CLKF, clock); + fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); + fas216_writeb(info, REG_CNTL2, info->scsi.cfg[1]); + fas216_writeb(info, REG_CNTL3, info->scsi.cfg[2]); + fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout); + fas216_writeb(info, REG_SOF, 0); + fas216_writeb(info, REG_STP, info->scsi.async_stp); + fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); +} + +/** + * fas216_eh_host_reset - Reset the host associated with this command + * @SCpnt: command specifing host to reset + * + * Reset the host associated with this command. + * Returns: FAILED if unable to reset. + * Notes: io_request_lock is taken, and irqs are disabled + */ +int fas216_eh_host_reset(struct scsi_cmnd *SCpnt) +{ + FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; + + spin_lock_irq(info->host->host_lock); + + fas216_checkmagic(info); + + fas216_log(info, LOG_ERROR, "resetting host"); + + /* + * Reset the SCSI chip. + */ + fas216_cmd(info, CMD_RESETCHIP); + + /* + * Ugly ugly ugly! + * We need to release the host_lock and enable + * IRQs if we sleep, but we must relock and disable + * IRQs after the sleep. + */ + spin_unlock_irq(info->host->host_lock); + msleep(50 * 1000/100); + spin_lock_irq(info->host->host_lock); + + /* + * Release the SCSI reset. + */ + fas216_cmd(info, CMD_NOP); + + fas216_init_chip(info); + + spin_unlock_irq(info->host->host_lock); + return SUCCESS; +} + +#define TYPE_UNKNOWN 0 +#define TYPE_NCR53C90 1 +#define TYPE_NCR53C90A 2 +#define TYPE_NCR53C9x 3 +#define TYPE_Am53CF94 4 +#define TYPE_EmFAS216 5 +#define TYPE_QLFAS216 6 + +static char *chip_types[] = { + "unknown", + "NS NCR53C90", + "NS NCR53C90A", + "NS NCR53C9x", + "AMD Am53CF94", + "Emulex FAS216", + "QLogic FAS216" +}; + +static int fas216_detect_type(FAS216_Info *info) +{ + int family, rev; + + /* + * Reset the chip. + */ + fas216_writeb(info, REG_CMD, CMD_RESETCHIP); + udelay(50); + fas216_writeb(info, REG_CMD, CMD_NOP); + + /* + * Check to see if control reg 2 is present. + */ + fas216_writeb(info, REG_CNTL3, 0); + fas216_writeb(info, REG_CNTL2, CNTL2_S2FE); + + /* + * If we are unable to read back control reg 2 + * correctly, it is not present, and we have a + * NCR53C90. + */ + if ((fas216_readb(info, REG_CNTL2) & (~0xe0)) != CNTL2_S2FE) + return TYPE_NCR53C90; + + /* + * Now, check control register 3 + */ + fas216_writeb(info, REG_CNTL2, 0); + fas216_writeb(info, REG_CNTL3, 0); + fas216_writeb(info, REG_CNTL3, 5); + + /* + * If we are unable to read the register back + * correctly, we have a NCR53C90A + */ + if (fas216_readb(info, REG_CNTL3) != 5) + return TYPE_NCR53C90A; + + /* + * Now read the ID from the chip. + */ + fas216_writeb(info, REG_CNTL3, 0); + + fas216_writeb(info, REG_CNTL3, CNTL3_ADIDCHK); + fas216_writeb(info, REG_CNTL3, 0); + + fas216_writeb(info, REG_CMD, CMD_RESETCHIP); + udelay(50); + fas216_writeb(info, REG_CMD, CMD_WITHDMA | CMD_NOP); + + fas216_writeb(info, REG_CNTL2, CNTL2_ENF); + fas216_writeb(info, REG_CMD, CMD_RESETCHIP); + udelay(50); + fas216_writeb(info, REG_CMD, CMD_NOP); + + rev = fas216_readb(info, REG_ID); + family = rev >> 3; + rev &= 7; + + switch (family) { + case 0x01: + if (rev == 4) + return TYPE_Am53CF94; + break; + + case 0x02: + switch (rev) { + case 2: + return TYPE_EmFAS216; + case 3: + return TYPE_QLFAS216; + } + break; + + default: + break; + } + printk("family %x rev %x\n", family, rev); + return TYPE_NCR53C9x; +} + +/** + * fas216_reset_state - Initialise driver internal state + * @info: state to initialise + * + * Initialise driver internal state + */ +static void fas216_reset_state(FAS216_Info *info) +{ + int i; + + fas216_checkmagic(info); + + fas216_bus_reset(info); + + /* + * Clear out all stale info in our state structure + */ + memset(info->busyluns, 0, sizeof(info->busyluns)); + info->scsi.disconnectable = 0; + info->scsi.aborting = 0; + + for (i = 0; i < 8; i++) { + info->device[i].parity_enabled = 0; + info->device[i].parity_check = 1; + } + + /* + * Drain all commands on disconnected queue + */ + while (queue_remove(&info->queues.disconnected) != NULL); + + /* + * Remove executing commands. + */ + info->SCpnt = NULL; + info->reqSCpnt = NULL; + info->rstSCpnt = NULL; + info->origSCpnt = NULL; +} + +/** + * fas216_init - initialise FAS/NCR/AMD SCSI structures. + * @host: a driver-specific filled-out structure + * + * Initialise FAS/NCR/AMD SCSI structures. + * Returns: 0 on success + */ +int fas216_init(struct Scsi_Host *host) +{ + FAS216_Info *info = (FAS216_Info *)host->hostdata; + + info->magic_start = MAGIC; + info->magic_end = MAGIC; + info->host = host; + info->scsi.cfg[0] = host->this_id | CNTL1_PERE; + info->scsi.cfg[1] = CNTL2_ENF | CNTL2_S2FE; + info->scsi.cfg[2] = info->ifcfg.cntl3 | + CNTL3_ADIDCHK | CNTL3_QTAG | CNTL3_G2CB | CNTL3_LBTM; + info->scsi.async_stp = fas216_syncperiod(info, info->ifcfg.asyncperiod); + + info->rst_dev_status = -1; + info->rst_bus_status = -1; + init_waitqueue_head(&info->eh_wait); + timer_setup(&info->eh_timer, fas216_eh_timer, 0); + + spin_lock_init(&info->host_lock); + + memset(&info->stats, 0, sizeof(info->stats)); + + msgqueue_initialise(&info->scsi.msgs); + + if (!queue_initialise(&info->queues.issue)) + return -ENOMEM; + + if (!queue_initialise(&info->queues.disconnected)) { + queue_free(&info->queues.issue); + return -ENOMEM; + } + + return 0; +} + +/** + * fas216_add - initialise FAS/NCR/AMD SCSI ic. + * @host: a driver-specific filled-out structure + * @dev: parent device + * + * Initialise FAS/NCR/AMD SCSI ic. + * Returns: 0 on success + */ +int fas216_add(struct Scsi_Host *host, struct device *dev) +{ + FAS216_Info *info = (FAS216_Info *)host->hostdata; + int type, ret; + + if (info->ifcfg.clockrate <= 10 || info->ifcfg.clockrate > 40) { + printk(KERN_CRIT "fas216: invalid clock rate %u MHz\n", + info->ifcfg.clockrate); + return -EINVAL; + } + + fas216_reset_state(info); + type = fas216_detect_type(info); + info->scsi.type = chip_types[type]; + + udelay(300); + + /* + * Initialise the chip correctly. + */ + fas216_init_chip(info); + + /* + * Reset the SCSI bus. We don't want to see + * the resulting reset interrupt, so mask it + * out. + */ + fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0] | CNTL1_DISR); + fas216_writeb(info, REG_CMD, CMD_RESETSCSI); + + /* + * scsi standard says wait 250ms + */ + spin_unlock_irq(info->host->host_lock); + msleep(100*1000/100); + spin_lock_irq(info->host->host_lock); + + fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); + fas216_readb(info, REG_INST); + + fas216_checkmagic(info); + + ret = scsi_add_host(host, dev); + if (ret) + fas216_writeb(info, REG_CMD, CMD_RESETCHIP); + else + scsi_scan_host(host); + + return ret; +} + +void fas216_remove(struct Scsi_Host *host) +{ + FAS216_Info *info = (FAS216_Info *)host->hostdata; + + fas216_checkmagic(info); + scsi_remove_host(host); + + fas216_writeb(info, REG_CMD, CMD_RESETCHIP); + scsi_host_put(host); +} + +/** + * fas216_release - release all resources for FAS/NCR/AMD SCSI ic. + * @host: a driver-specific filled-out structure + * + * release all resources and put everything to bed for FAS/NCR/AMD SCSI ic. + */ +void fas216_release(struct Scsi_Host *host) +{ + FAS216_Info *info = (FAS216_Info *)host->hostdata; + + queue_free(&info->queues.disconnected); + queue_free(&info->queues.issue); +} + +void fas216_print_host(FAS216_Info *info, struct seq_file *m) +{ + seq_printf(m, + "\n" + "Chip : %s\n" + " Address: 0x%p\n" + " IRQ : %d\n" + " DMA : %d\n", + info->scsi.type, info->scsi.io_base, + info->scsi.irq, info->scsi.dma); +} + +void fas216_print_stats(FAS216_Info *info, struct seq_file *m) +{ + seq_printf(m, "\n" + "Command Statistics:\n" + " Queued : %u\n" + " Issued : %u\n" + " Completed : %u\n" + " Reads : %u\n" + " Writes : %u\n" + " Others : %u\n" + " Disconnects: %u\n" + " Aborts : %u\n" + " Bus resets : %u\n" + " Host resets: %u\n", + info->stats.queues, info->stats.removes, + info->stats.fins, info->stats.reads, + info->stats.writes, info->stats.miscs, + info->stats.disconnects, info->stats.aborts, + info->stats.bus_resets, info->stats.host_resets); +} + +void fas216_print_devices(FAS216_Info *info, struct seq_file *m) +{ + struct fas216_device *dev; + struct scsi_device *scd; + + seq_puts(m, "Device/Lun TaggedQ Parity Sync\n"); + + shost_for_each_device(scd, info->host) { + dev = &info->device[scd->id]; + seq_printf(m, " %d/%llu ", scd->id, scd->lun); + if (scd->tagged_supported) + seq_printf(m, "%3sabled ", + scd->simple_tags ? "en" : "dis"); + else + seq_puts(m, "unsupported "); + + seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis"); + + if (dev->sof) + seq_printf(m, "offset %d, %d ns\n", + dev->sof, dev->period * 4); + else + seq_puts(m, "async\n"); + } +} + +EXPORT_SYMBOL(fas216_init); +EXPORT_SYMBOL(fas216_add); +EXPORT_SYMBOL(fas216_queue_command); +EXPORT_SYMBOL(fas216_noqueue_command); +EXPORT_SYMBOL(fas216_intr); +EXPORT_SYMBOL(fas216_remove); +EXPORT_SYMBOL(fas216_release); +EXPORT_SYMBOL(fas216_eh_abort); +EXPORT_SYMBOL(fas216_eh_device_reset); +EXPORT_SYMBOL(fas216_eh_bus_reset); +EXPORT_SYMBOL(fas216_eh_host_reset); +EXPORT_SYMBOL(fas216_print_host); +EXPORT_SYMBOL(fas216_print_stats); +EXPORT_SYMBOL(fas216_print_devices); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("Generic FAS216/NCR53C9x driver core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h new file mode 100644 index 000000000..08113277a --- /dev/null +++ b/drivers/scsi/arm/fas216.h @@ -0,0 +1,404 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/drivers/acorn/scsi/fas216.h + * + * Copyright (C) 1997-2000 Russell King + * + * FAS216 generic driver + */ +#ifndef FAS216_H +#define FAS216_H + +#include + +#include "queue.h" +#include "msgqueue.h" + +/* FAS register definitions */ + +/* transfer count low */ +#define REG_CTCL (0) +#define REG_STCL (0) + +/* transfer count medium */ +#define REG_CTCM (1) +#define REG_STCM (1) + +/* fifo data */ +#define REG_FF (2) + +/* command */ +#define REG_CMD (3) +#define CMD_NOP 0x00 +#define CMD_FLUSHFIFO 0x01 +#define CMD_RESETCHIP 0x02 +#define CMD_RESETSCSI 0x03 + +#define CMD_TRANSFERINFO 0x10 +#define CMD_INITCMDCOMPLETE 0x11 +#define CMD_MSGACCEPTED 0x12 +#define CMD_PADBYTES 0x18 +#define CMD_SETATN 0x1a +#define CMD_RSETATN 0x1b + +#define CMD_SELECTWOATN 0x41 +#define CMD_SELECTATN 0x42 +#define CMD_SELECTATNSTOP 0x43 +#define CMD_ENABLESEL 0x44 +#define CMD_DISABLESEL 0x45 +#define CMD_SELECTATN3 0x46 +#define CMD_RESEL3 0x47 + +#define CMD_WITHDMA 0x80 + +/* status register (read) */ +#define REG_STAT (4) +#define STAT_IO (1 << 0) /* IO phase */ +#define STAT_CD (1 << 1) /* CD phase */ +#define STAT_MSG (1 << 2) /* MSG phase */ +#define STAT_TRANSFERDONE (1 << 3) /* Transfer completed */ +#define STAT_TRANSFERCNTZ (1 << 4) /* Transfer counter is zero */ +#define STAT_PARITYERROR (1 << 5) /* Parity error */ +#define STAT_REALBAD (1 << 6) /* Something bad */ +#define STAT_INT (1 << 7) /* Interrupt */ + +#define STAT_BUSMASK (STAT_MSG|STAT_CD|STAT_IO) +#define STAT_DATAOUT (0) /* Data out */ +#define STAT_DATAIN (STAT_IO) /* Data in */ +#define STAT_COMMAND (STAT_CD) /* Command out */ +#define STAT_STATUS (STAT_CD|STAT_IO) /* Status In */ +#define STAT_MESGOUT (STAT_MSG|STAT_CD) /* Message out */ +#define STAT_MESGIN (STAT_MSG|STAT_CD|STAT_IO) /* Message In */ + +/* bus ID for select / reselect */ +#define REG_SDID (4) +#define BUSID(target) ((target) & 7) + +/* Interrupt status register (read) */ +#define REG_INST (5) +#define INST_SELWOATN (1 << 0) /* Select w/o ATN */ +#define INST_SELATN (1 << 1) /* Select w/ATN */ +#define INST_RESELECTED (1 << 2) /* Reselected */ +#define INST_FUNCDONE (1 << 3) /* Function done */ +#define INST_BUSSERVICE (1 << 4) /* Bus service */ +#define INST_DISCONNECT (1 << 5) /* Disconnect */ +#define INST_ILLEGALCMD (1 << 6) /* Illegal command */ +#define INST_BUSRESET (1 << 7) /* SCSI Bus reset */ + +/* Timeout register (write) */ +#define REG_STIM (5) + +/* Sequence step register (read) */ +#define REG_IS (6) +#define IS_BITS 0x07 +#define IS_SELARB 0x00 /* Select & Arb ok */ +#define IS_MSGBYTESENT 0x01 /* One byte message sent*/ +#define IS_NOTCOMMAND 0x02 /* Not in command state */ +#define IS_EARLYPHASE 0x03 /* Early phase change */ +#define IS_COMPLETE 0x04 /* Command ok */ +#define IS_SOF 0x08 /* Sync off flag */ + +/* Transfer period step (write) */ +#define REG_STP (6) + +/* Synchronous Offset (write) */ +#define REG_SOF (7) + +/* Fifo state register (read) */ +#define REG_CFIS (7) +#define CFIS_CF 0x1f /* Num bytes in FIFO */ +#define CFIS_IS 0xe0 /* Step */ + +/* config register 1 */ +#define REG_CNTL1 (8) +#define CNTL1_CID (7 << 0) /* Chip ID */ +#define CNTL1_STE (1 << 3) /* Self test enable */ +#define CNTL1_PERE (1 << 4) /* Parity enable reporting en. */ +#define CNTL1_PTE (1 << 5) /* Parity test enable */ +#define CNTL1_DISR (1 << 6) /* Disable Irq on SCSI reset */ +#define CNTL1_ETM (1 << 7) /* Extended Timing Mode */ + +/* Clock conversion factor (read) */ +#define REG_CLKF (9) +#define CLKF_F37MHZ 0x00 /* 35.01 - 40 MHz */ +#define CLKF_F10MHZ 0x02 /* 10 MHz */ +#define CLKF_F12MHZ 0x03 /* 10.01 - 15 MHz */ +#define CLKF_F17MHZ 0x04 /* 15.01 - 20 MHz */ +#define CLKF_F22MHZ 0x05 /* 20.01 - 25 MHz */ +#define CLKF_F27MHZ 0x06 /* 25.01 - 30 MHz */ +#define CLKF_F32MHZ 0x07 /* 30.01 - 35 MHz */ + +/* Chip test register (write) */ +#define REG_FTM (10) +#define TEST_FTM 0x01 /* Force target mode */ +#define TEST_FIM 0x02 /* Force initiator mode */ +#define TEST_FHI 0x04 /* Force high impedance mode */ + +/* Configuration register 2 (read/write) */ +#define REG_CNTL2 (11) +#define CNTL2_PGDP (1 << 0) /* Pass Th/Generate Data Parity */ +#define CNTL2_PGRP (1 << 1) /* Pass Th/Generate Reg Parity */ +#define CNTL2_ACDPE (1 << 2) /* Abort on Cmd/Data Parity Err */ +#define CNTL2_S2FE (1 << 3) /* SCSI2 Features Enable */ +#define CNTL2_TSDR (1 << 4) /* Tristate DREQ */ +#define CNTL2_SBO (1 << 5) /* Select Byte Order */ +#define CNTL2_ENF (1 << 6) /* Enable features */ +#define CNTL2_DAE (1 << 7) /* Data Alignment Enable */ + +/* Configuration register 3 (read/write) */ +#define REG_CNTL3 (12) +#define CNTL3_BS8 (1 << 0) /* Burst size 8 */ +#define CNTL3_MDM (1 << 1) /* Modify DMA mode */ +#define CNTL3_LBTM (1 << 2) /* Last Byte Transfer mode */ +#define CNTL3_FASTCLK (1 << 3) /* Fast SCSI clocking */ +#define CNTL3_FASTSCSI (1 << 4) /* Fast SCSI */ +#define CNTL3_G2CB (1 << 5) /* Group2 SCSI support */ +#define CNTL3_QTAG (1 << 6) /* Enable 3 byte msgs */ +#define CNTL3_ADIDCHK (1 << 7) /* Additional ID check */ + +/* High transfer count (read/write) */ +#define REG_CTCH (14) +#define REG_STCH (14) + +/* ID register (read only) */ +#define REG_ID (14) + +/* Data alignment */ +#define REG_DAL (15) + +typedef enum { + PHASE_IDLE, /* we're not planning on doing anything */ + PHASE_SELECTION, /* selecting a device */ + PHASE_SELSTEPS, /* selection with command steps */ + PHASE_COMMAND, /* command sent */ + PHASE_MESSAGESENT, /* selected, and we're sending cmd */ + PHASE_DATAOUT, /* data out to device */ + PHASE_DATAIN, /* data in from device */ + PHASE_MSGIN, /* message in from device */ + PHASE_MSGIN_DISCONNECT, /* disconnecting from bus */ + PHASE_MSGOUT, /* after message out phase */ + PHASE_MSGOUT_EXPECT, /* expecting message out */ + PHASE_STATUS, /* status from device */ + PHASE_DONE /* Command complete */ +} phase_t; + +typedef enum { + DMA_OUT, /* DMA from memory to chip */ + DMA_IN /* DMA from chip to memory */ +} fasdmadir_t; + +typedef enum { + fasdma_none, /* No dma */ + fasdma_pio, /* PIO mode */ + fasdma_pseudo, /* Pseudo DMA */ + fasdma_real_block, /* Real DMA, on block by block basis */ + fasdma_real_all /* Real DMA, on request by request */ +} fasdmatype_t; + +typedef enum { + neg_wait, /* Negotiate with device */ + neg_inprogress, /* Negotiation sent */ + neg_complete, /* Negotiation complete */ + neg_targcomplete, /* Target completed negotiation */ + neg_invalid /* Negotiation not supported */ +} neg_t; + +#define MAGIC 0x441296bdUL +#define NR_MSGS 8 + +#define FASCAP_DMA (1 << 0) +#define FASCAP_PSEUDODMA (1 << 1) + +typedef struct { + unsigned long magic_start; + spinlock_t host_lock; + struct Scsi_Host *host; /* host */ + struct scsi_cmnd *SCpnt; /* currently processing command */ + struct scsi_cmnd *origSCpnt; /* original connecting command */ + struct scsi_cmnd *reqSCpnt; /* request sense command */ + struct scsi_cmnd *rstSCpnt; /* reset command */ + struct scsi_cmnd *pending_SCpnt[8]; /* per-device pending commands */ + int next_pending; /* next pending device */ + + /* + * Error recovery + */ + wait_queue_head_t eh_wait; + struct timer_list eh_timer; + unsigned int rst_dev_status; + unsigned int rst_bus_status; + + /* driver information */ + struct { + phase_t phase; /* current phase */ + void __iomem *io_base; /* iomem base of FAS216 */ + unsigned int io_shift; /* shift to adjust reg offsets by */ + unsigned char cfg[4]; /* configuration registers */ + const char *type; /* chip type */ + unsigned int irq; /* interrupt */ + int dma; /* dma channel */ + + struct scsi_pointer SCp; /* current commands data pointer */ + + MsgQueue_t msgs; /* message queue for connected device */ + + unsigned int async_stp; /* Async transfer STP value */ + unsigned char msgin_fifo; /* bytes in fifo at time of message in */ + unsigned char message[256]; /* last message received from device */ + + unsigned char disconnectable:1; /* this command can be disconnected */ + unsigned char aborting:1; /* aborting command */ + } scsi; + + /* statistics information */ + struct { + unsigned int queues; + unsigned int removes; + unsigned int fins; + unsigned int reads; + unsigned int writes; + unsigned int miscs; + unsigned int disconnects; + unsigned int aborts; + unsigned int bus_resets; + unsigned int host_resets; + } stats; + + /* configuration information */ + struct { + unsigned char clockrate; /* clock rate of FAS device (MHz) */ + unsigned char select_timeout; /* timeout (R5) */ + unsigned char sync_max_depth; /* Synchronous xfer max fifo depth */ + unsigned char wide_max_size; /* Maximum wide transfer size */ + unsigned char cntl3; /* Control Reg 3 */ + unsigned int asyncperiod; /* Async transfer period (ns) */ + unsigned int capabilities; /* driver capabilities */ + unsigned int disconnect_ok:1; /* Disconnects allowed? */ + } ifcfg; + + /* queue handling */ + struct { + Queue_t issue; /* issue queue */ + Queue_t disconnected; /* disconnected command queue */ + } queues; + + /* per-device info */ + struct fas216_device { + unsigned char disconnect_ok:1; /* device can disconnect */ + unsigned char parity_enabled:1; /* parity checking enabled */ + unsigned char parity_check:1; /* need to check parity checking */ + unsigned char period; /* sync xfer period in (*4ns) */ + unsigned char stp; /* synchronous transfer period */ + unsigned char sof; /* synchronous offset register */ + unsigned char wide_xfer; /* currently negociated wide transfer */ + neg_t sync_state; /* synchronous transfer mode */ + neg_t wide_state; /* wide transfer mode */ + } device[8]; + unsigned long busyluns[64/sizeof(unsigned long)];/* array of bits indicating LUNs busy */ + + /* dma */ + struct { + fasdmatype_t transfer_type; /* current type of DMA transfer */ + fasdmatype_t (*setup) (struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_dma); + void (*pseudo)(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, int transfer); + void (*stop) (struct Scsi_Host *host, struct scsi_pointer *SCp); + } dma; + + /* miscellaneous */ + int internal_done; /* flag to indicate request done */ + struct scsi_eh_save ses; /* holds request sense restore info */ + unsigned long magic_end; +} FAS216_Info; + +/* driver-private data per SCSI command. */ +struct fas216_cmd_priv { + /* + * @scsi_pointer must be the first member. See also arm_scsi_pointer(). + */ + struct scsi_pointer scsi_pointer; + void (*scsi_done)(struct scsi_cmnd *cmd); +}; + +static inline struct fas216_cmd_priv *fas216_cmd_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +/* Function: int fas216_init (struct Scsi_Host *instance) + * Purpose : initialise FAS/NCR/AMD SCSI structures. + * Params : instance - a driver-specific filled-out structure + * Returns : 0 on success + */ +extern int fas216_init (struct Scsi_Host *instance); + +/* Function: int fas216_add (struct Scsi_Host *instance, struct device *dev) + * Purpose : initialise FAS/NCR/AMD SCSI ic. + * Params : instance - a driver-specific filled-out structure + * Returns : 0 on success + */ +extern int fas216_add (struct Scsi_Host *instance, struct device *dev); + +/* Function: int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) + * Purpose : queue a command for adapter to process. + * Params : h - host adapter + * : SCpnt - Command to queue + * Returns : 0 - success, else error + */ +extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); + +/* Function: int fas216_noqueue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) + * Purpose : queue a command for adapter to process, and process it to completion. + * Params : h - host adapter + * : SCpnt - Command to queue + * Returns : 0 - success, else error + */ +extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *); + +/* Function: irqreturn_t fas216_intr (FAS216_Info *info) + * Purpose : handle interrupts from the interface to progress a command + * Params : info - interface to service + */ +extern irqreturn_t fas216_intr (FAS216_Info *info); + +extern void fas216_remove (struct Scsi_Host *instance); + +/* Function: void fas216_release (struct Scsi_Host *instance) + * Purpose : release all resources and put everything to bed for FAS/NCR/AMD SCSI ic. + * Params : instance - a driver-specific filled-out structure + * Returns : 0 on success + */ +extern void fas216_release (struct Scsi_Host *instance); + +extern void fas216_print_host(FAS216_Info *info, struct seq_file *m); +extern void fas216_print_stats(FAS216_Info *info, struct seq_file *m); +extern void fas216_print_devices(FAS216_Info *info, struct seq_file *m); + +/* Function: int fas216_eh_abort(struct scsi_cmnd *SCpnt) + * Purpose : abort this command + * Params : SCpnt - command to abort + * Returns : FAILED if unable to abort + */ +extern int fas216_eh_abort(struct scsi_cmnd *SCpnt); + +/* Function: int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) + * Purpose : Reset the device associated with this command + * Params : SCpnt - command specifing device to reset + * Returns : FAILED if unable to reset + */ +extern int fas216_eh_device_reset(struct scsi_cmnd *SCpnt); + +/* Function: int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt) + * Purpose : Reset the complete bus associated with this command + * Params : SCpnt - command specifing bus to reset + * Returns : FAILED if unable to reset + */ +extern int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt); + +/* Function: int fas216_eh_host_reset(struct scsi_cmnd *SCpnt) + * Purpose : Reset the host associated with this command + * Params : SCpnt - command specifing host to reset + * Returns : FAILED if unable to reset + */ +extern int fas216_eh_host_reset(struct scsi_cmnd *SCpnt); + +#endif /* FAS216_H */ diff --git a/drivers/scsi/arm/msgqueue.c b/drivers/scsi/arm/msgqueue.c new file mode 100644 index 000000000..581158313 --- /dev/null +++ b/drivers/scsi/arm/msgqueue.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/acorn/scsi/msgqueue.c + * + * Copyright (C) 1997-1998 Russell King + * + * message queue handling + */ +#include +#include +#include +#include + +#include "msgqueue.h" + +/* + * Function: struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) + * Purpose : Allocate a message queue entry + * Params : msgq - message queue to claim entry for + * Returns : message queue entry or NULL. + */ +static struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) +{ + struct msgqueue_entry *mq; + + if ((mq = msgq->free) != NULL) + msgq->free = mq->next; + + return mq; +} + +/* + * Function: void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) + * Purpose : free a message queue entry + * Params : msgq - message queue to free entry from + * mq - message queue entry to free + */ +static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) +{ + if (mq) { + mq->next = msgq->free; + msgq->free = mq; + } +} + +/* + * Function: void msgqueue_initialise(MsgQueue_t *msgq) + * Purpose : initialise a message queue + * Params : msgq - queue to initialise + */ +void msgqueue_initialise(MsgQueue_t *msgq) +{ + int i; + + msgq->qe = NULL; + msgq->free = &msgq->entries[0]; + + for (i = 0; i < NR_MESSAGES; i++) + msgq->entries[i].next = &msgq->entries[i + 1]; + + msgq->entries[NR_MESSAGES - 1].next = NULL; +} + + +/* + * Function: void msgqueue_free(MsgQueue_t *msgq) + * Purpose : free a queue + * Params : msgq - queue to free + */ +void msgqueue_free(MsgQueue_t *msgq) +{ +} + +/* + * Function: int msgqueue_msglength(MsgQueue_t *msgq) + * Purpose : calculate the total length of all messages on the message queue + * Params : msgq - queue to examine + * Returns : number of bytes of messages in queue + */ +int msgqueue_msglength(MsgQueue_t *msgq) +{ + struct msgqueue_entry *mq = msgq->qe; + int length = 0; + + for (mq = msgq->qe; mq; mq = mq->next) + length += mq->msg.length; + + return length; +} + +/* + * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) + * Purpose : return a message + * Params : msgq - queue to obtain message from + * : msgno - message number + * Returns : pointer to message string, or NULL + */ +struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) +{ + struct msgqueue_entry *mq; + + for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--); + + return mq ? &mq->msg : NULL; +} + +/* + * Function: int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) + * Purpose : add a message onto a message queue + * Params : msgq - queue to add message on + * length - length of message + * ... - message bytes + * Returns : != 0 if successful + */ +int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) +{ + struct msgqueue_entry *mq = mqe_alloc(msgq); + va_list ap; + + if (mq) { + struct msgqueue_entry **mqp; + int i; + + va_start(ap, length); + for (i = 0; i < length; i++) + mq->msg.msg[i] = va_arg(ap, unsigned int); + va_end(ap); + + mq->msg.length = length; + mq->msg.fifo = 0; + mq->next = NULL; + + mqp = &msgq->qe; + while (*mqp) + mqp = &(*mqp)->next; + + *mqp = mq; + } + + return mq != NULL; +} + +/* + * Function: void msgqueue_flush(MsgQueue_t *msgq) + * Purpose : flush all messages from message queue + * Params : msgq - queue to flush + */ +void msgqueue_flush(MsgQueue_t *msgq) +{ + struct msgqueue_entry *mq, *mqnext; + + for (mq = msgq->qe; mq; mq = mqnext) { + mqnext = mq->next; + mqe_free(msgq, mq); + } + msgq->qe = NULL; +} + +EXPORT_SYMBOL(msgqueue_initialise); +EXPORT_SYMBOL(msgqueue_free); +EXPORT_SYMBOL(msgqueue_msglength); +EXPORT_SYMBOL(msgqueue_getmsg); +EXPORT_SYMBOL(msgqueue_addmsg); +EXPORT_SYMBOL(msgqueue_flush); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("SCSI message queue handling"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/msgqueue.h b/drivers/scsi/arm/msgqueue.h new file mode 100644 index 000000000..4bcc400f5 --- /dev/null +++ b/drivers/scsi/arm/msgqueue.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/drivers/acorn/scsi/msgqueue.h + * + * Copyright (C) 1997 Russell King + * + * message queue handling + */ +#ifndef MSGQUEUE_H +#define MSGQUEUE_H + +struct message { + char msg[8]; + int length; + int fifo; +}; + +struct msgqueue_entry { + struct message msg; + struct msgqueue_entry *next; +}; + +#define NR_MESSAGES 4 + +typedef struct { + struct msgqueue_entry *qe; + struct msgqueue_entry *free; + struct msgqueue_entry entries[NR_MESSAGES]; +} MsgQueue_t; + +/* + * Function: void msgqueue_initialise(MsgQueue_t *msgq) + * Purpose : initialise a message queue + * Params : msgq - queue to initialise + */ +extern void msgqueue_initialise(MsgQueue_t *msgq); + +/* + * Function: void msgqueue_free(MsgQueue_t *msgq) + * Purpose : free a queue + * Params : msgq - queue to free + */ +extern void msgqueue_free(MsgQueue_t *msgq); + +/* + * Function: int msgqueue_msglength(MsgQueue_t *msgq) + * Purpose : calculate the total length of all messages on the message queue + * Params : msgq - queue to examine + * Returns : number of bytes of messages in queue + */ +extern int msgqueue_msglength(MsgQueue_t *msgq); + +/* + * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) + * Purpose : return a message & its length + * Params : msgq - queue to obtain message from + * : msgno - message number + * Returns : pointer to message string, or NULL + */ +extern struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno); + +/* + * Function: int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) + * Purpose : add a message onto a message queue + * Params : msgq - queue to add message on + * length - length of message + * ... - message bytes + * Returns : != 0 if successful + */ +extern int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...); + +/* + * Function: void msgqueue_flush(MsgQueue_t *msgq) + * Purpose : flush all messages from message queue + * Params : msgq - queue to flush + */ +extern void msgqueue_flush(MsgQueue_t *msgq); + +#endif diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c new file mode 100644 index 000000000..d69245007 --- /dev/null +++ b/drivers/scsi/arm/oak.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Oak Generic NCR5380 driver + * + * Copyright 1995-2002, Russell King + */ + +#include +#include +#include +#include + +#include +#include + +#include + +#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) + +#define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2)) +#define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2)) + +#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none +#define NCR5380_dma_recv_setup oakscsi_pread +#define NCR5380_dma_send_setup oakscsi_pwrite +#define NCR5380_dma_residual NCR5380_dma_residual_none + +#define NCR5380_queue_command oakscsi_queue_command +#define NCR5380_info oakscsi_info + +#define NCR5380_implementation_fields /* none */ + +#include "../NCR5380.h" + +#undef START_DMA_INITIATOR_RECEIVE_REG +#define START_DMA_INITIATOR_RECEIVE_REG (128 + 7) + +#define STAT ((128 + 16) << 2) +#define DATA ((128 + 8) << 2) + +static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata, + unsigned char *addr, int len) +{ + u8 __iomem *base = hostdata->io; + +printk("writing %p len %d\n",addr, len); + + while(1) + { + int status; + while (((status = readw(base + STAT)) & 0x100)==0); + } + return 0; +} + +static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata, + unsigned char *addr, int len) +{ + u8 __iomem *base = hostdata->io; + +printk("reading %p len %d\n", addr, len); + while(len > 0) + { + unsigned int status, timeout; + unsigned long b; + + timeout = 0x01FFFFFF; + + while (((status = readw(base + STAT)) & 0x100)==0) + { + timeout--; + if(status & 0x200 || !timeout) + { + printk("status = %08X\n", status); + return -1; + } + } + + if(len >= 128) + { + readsw(base + DATA, addr, 128); + addr += 128; + len -= 128; + } + else + { + b = (unsigned long) readw(base + DATA); + *addr ++ = b; + len -= 1; + if(len) + *addr ++ = b>>8; + len -= 1; + } + } + return 0; +} + +#undef STAT +#undef DATA + +#include "../NCR5380.c" + +static const struct scsi_host_template oakscsi_template = { + .module = THIS_MODULE, + .name = "Oak 16-bit SCSI", + .info = oakscsi_info, + .queuecommand = oakscsi_queue_command, + .eh_abort_handler = NCR5380_abort, + .eh_host_reset_handler = NCR5380_host_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .dma_boundary = PAGE_SIZE - 1, + .proc_name = "oakscsi", + .cmd_size = sizeof(struct NCR5380_cmd), + .max_sectors = 128, +}; + +static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + struct Scsi_Host *host; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata)); + if (!host) { + ret = -ENOMEM; + goto release; + } + + priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), + ecard_resource_len(ec, ECARD_RES_MEMC)); + if (!priv(host)->io) { + ret = -ENOMEM; + goto unreg; + } + + host->irq = NO_IRQ; + + ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); + if (ret) + goto out_unmap; + + NCR5380_maybe_reset_bus(host); + + ret = scsi_add_host(host, &ec->dev); + if (ret) + goto out_exit; + + scsi_scan_host(host); + goto out; + + out_exit: + NCR5380_exit(host); + out_unmap: + iounmap(priv(host)->io); + unreg: + scsi_host_put(host); + release: + ecard_release_resources(ec); + out: + return ret; +} + +static void oakscsi_remove(struct expansion_card *ec) +{ + struct Scsi_Host *host = ecard_get_drvdata(ec); + void __iomem *base = priv(host)->io; + + ecard_set_drvdata(ec, NULL); + scsi_remove_host(host); + + NCR5380_exit(host); + scsi_host_put(host); + iounmap(base); + ecard_release_resources(ec); +} + +static const struct ecard_id oakscsi_cids[] = { + { MANU_OAK, PROD_OAK_SCSI }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver oakscsi_driver = { + .probe = oakscsi_probe, + .remove = oakscsi_remove, + .id_table = oakscsi_cids, + .drv = { + .name = "oakscsi", + }, +}; + +static int __init oakscsi_init(void) +{ + return ecard_register_driver(&oakscsi_driver); +} + +static void __exit oakscsi_exit(void) +{ + ecard_remove_driver(&oakscsi_driver); +} + +module_init(oakscsi_init); +module_exit(oakscsi_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("Oak SCSI driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c new file mode 100644 index 000000000..3b5991427 --- /dev/null +++ b/drivers/scsi/arm/powertec.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/acorn/scsi/powertec.c + * + * Copyright (C) 1997-2005 Russell King + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "fas216.h" +#include "arm_scsi.h" + +#include + +#define POWERTEC_FAS216_OFFSET 0x3000 +#define POWERTEC_FAS216_SHIFT 6 + +#define POWERTEC_INTR_STATUS 0x2000 +#define POWERTEC_INTR_BIT 0x80 + +#define POWERTEC_RESET_CONTROL 0x1018 +#define POWERTEC_RESET_BIT 1 + +#define POWERTEC_TERM_CONTROL 0x2018 +#define POWERTEC_TERM_ENABLE 1 + +#define POWERTEC_INTR_CONTROL 0x101c +#define POWERTEC_INTR_ENABLE 1 +#define POWERTEC_INTR_DISABLE 0 + +#define VERSION "1.10 (19/01/2003 2.5.59)" + +/* + * Use term=0,1,0,0,0 to turn terminators on/off. + * One entry per slot. + */ +static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; + +#define NR_SG 256 + +struct powertec_info { + FAS216_Info info; + struct expansion_card *ec; + void __iomem *base; + unsigned int term_ctl; + struct scatterlist sg[NR_SG]; +}; + +/* Prototype: void powertecscsi_irqenable(ec, irqnr) + * Purpose : Enable interrupts on Powertec SCSI card + * Params : ec - expansion card structure + * : irqnr - interrupt number + */ +static void +powertecscsi_irqenable(struct expansion_card *ec, int irqnr) +{ + struct powertec_info *info = ec->irq_data; + writeb(POWERTEC_INTR_ENABLE, info->base + POWERTEC_INTR_CONTROL); +} + +/* Prototype: void powertecscsi_irqdisable(ec, irqnr) + * Purpose : Disable interrupts on Powertec SCSI card + * Params : ec - expansion card structure + * : irqnr - interrupt number + */ +static void +powertecscsi_irqdisable(struct expansion_card *ec, int irqnr) +{ + struct powertec_info *info = ec->irq_data; + writeb(POWERTEC_INTR_DISABLE, info->base + POWERTEC_INTR_CONTROL); +} + +static const expansioncard_ops_t powertecscsi_ops = { + .irqenable = powertecscsi_irqenable, + .irqdisable = powertecscsi_irqdisable, +}; + +/* Prototype: void powertecscsi_terminator_ctl(host, on_off) + * Purpose : Turn the Powertec SCSI terminators on or off + * Params : host - card to turn on/off + * : on_off - !0 to turn on, 0 to turn off + */ +static void +powertecscsi_terminator_ctl(struct Scsi_Host *host, int on_off) +{ + struct powertec_info *info = (struct powertec_info *)host->hostdata; + + info->term_ctl = on_off ? POWERTEC_TERM_ENABLE : 0; + writeb(info->term_ctl, info->base + POWERTEC_TERM_CONTROL); +} + +/* Prototype: void powertecscsi_intr(irq, *dev_id, *regs) + * Purpose : handle interrupts from Powertec SCSI card + * Params : irq - interrupt number + * dev_id - user-defined (Scsi_Host structure) + */ +static irqreturn_t powertecscsi_intr(int irq, void *dev_id) +{ + struct powertec_info *info = dev_id; + + return fas216_intr(&info->info); +} + +/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type) + * Purpose : initialises DMA/PIO + * Params : host - host + * SCpnt - command + * direction - DMA on to/off of card + * min_type - minimum DMA support that we must have for this transfer + * Returns : type of transfer to be performed + */ +static fasdmatype_t +powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, + fasdmadir_t direction, fasdmatype_t min_type) +{ + struct powertec_info *info = (struct powertec_info *)host->hostdata; + struct device *dev = scsi_get_device(host); + int dmach = info->info.scsi.dma; + + if (info->info.ifcfg.capabilities & FASCAP_DMA && + min_type == fasdma_real_all) { + int bufs, map_dir, dma_dir; + + bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); + + if (direction == DMA_OUT) { + map_dir = DMA_TO_DEVICE; + dma_dir = DMA_MODE_WRITE; + } else { + map_dir = DMA_FROM_DEVICE; + dma_dir = DMA_MODE_READ; + } + + dma_map_sg(dev, info->sg, bufs, map_dir); + + disable_dma(dmach); + set_dma_sg(dmach, info->sg, bufs); + set_dma_mode(dmach, dma_dir); + enable_dma(dmach); + return fasdma_real_all; + } + + /* + * If we're not doing DMA, + * we'll do slow PIO + */ + return fasdma_pio; +} + +/* Prototype: int powertecscsi_dma_stop(host, SCpnt) + * Purpose : stops DMA/PIO + * Params : host - host + * SCpnt - command + */ +static void +powertecscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) +{ + struct powertec_info *info = (struct powertec_info *)host->hostdata; + if (info->info.scsi.dma != NO_DMA) + disable_dma(info->info.scsi.dma); +} + +/* Prototype: const char *powertecscsi_info(struct Scsi_Host * host) + * Purpose : returns a descriptive string about this interface, + * Params : host - driver host structure to return info for. + * Returns : pointer to a static buffer containing null terminated string. + */ +const char *powertecscsi_info(struct Scsi_Host *host) +{ + struct powertec_info *info = (struct powertec_info *)host->hostdata; + static char string[150]; + + sprintf(string, "%s (%s) in slot %d v%s terminators o%s", + host->hostt->name, info->info.scsi.type, info->ec->slot_no, + VERSION, info->term_ctl ? "n" : "ff"); + + return string; +} + +/* Prototype: int powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) + * Purpose : Set a driver specific function + * Params : host - host to setup + * : buffer - buffer containing string describing operation + * : length - length of string + * Returns : -EINVAL, or 0 + */ +static int +powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) +{ + int ret = length; + + if (length >= 12 && strncmp(buffer, "POWERTECSCSI", 12) == 0) { + buffer += 12; + length -= 12; + + if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { + if (buffer[5] == '1') + powertecscsi_terminator_ctl(host, 1); + else if (buffer[5] == '0') + powertecscsi_terminator_ctl(host, 0); + else + ret = -EINVAL; + } else + ret = -EINVAL; + } else + ret = -EINVAL; + + return ret; +} + +/* Prototype: int powertecscsi_proc_info(char *buffer, char **start, off_t offset, + * int length, int host_no, int inout) + * Purpose : Return information about the driver to a user process accessing + * the /proc filesystem. + * Params : buffer - a buffer to write information to + * start - a pointer into this buffer set by this routine to the start + * of the required information. + * offset - offset into information that we have read up to. + * length - length of buffer + * inout - 0 for reading, 1 for writing. + * Returns : length of data written to buffer. + */ +static int powertecscsi_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + struct powertec_info *info; + + info = (struct powertec_info *)host->hostdata; + + seq_printf(m, "PowerTec SCSI driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + seq_printf(m, "Term : o%s\n", + info->term_ctl ? "n" : "ff"); + + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; +} + +static ssize_t powertecscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct expansion_card *ec = ECARD_DEV(dev); + struct Scsi_Host *host = ecard_get_drvdata(ec); + struct powertec_info *info = (struct powertec_info *)host->hostdata; + + return sprintf(buf, "%d\n", info->term_ctl ? 1 : 0); +} + +static ssize_t +powertecscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) +{ + struct expansion_card *ec = ECARD_DEV(dev); + struct Scsi_Host *host = ecard_get_drvdata(ec); + + if (len > 1) + powertecscsi_terminator_ctl(host, buf[0] != '0'); + + return len; +} + +static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, + powertecscsi_show_term, powertecscsi_store_term); + +static const struct scsi_host_template powertecscsi_template = { + .module = THIS_MODULE, + .show_info = powertecscsi_show_info, + .write_info = powertecscsi_set_proc_info, + .name = "PowerTec SCSI", + .info = powertecscsi_info, + .queuecommand = fas216_queue_command, + .eh_host_reset_handler = fas216_eh_host_reset, + .eh_bus_reset_handler = fas216_eh_bus_reset, + .eh_device_reset_handler = fas216_eh_device_reset, + .eh_abort_handler = fas216_eh_abort, + .cmd_size = sizeof(struct fas216_cmd_priv), + .can_queue = 8, + .this_id = 7, + .sg_tablesize = SG_MAX_SEGMENTS, + .dma_boundary = IOMD_DMA_BOUNDARY, + .cmd_per_lun = 2, + .proc_name = "powertec", +}; + +static int powertecscsi_probe(struct expansion_card *ec, + const struct ecard_id *id) +{ + struct Scsi_Host *host; + struct powertec_info *info; + void __iomem *base; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); + if (!base) { + ret = -ENOMEM; + goto out_region; + } + + host = scsi_host_alloc(&powertecscsi_template, + sizeof (struct powertec_info)); + if (!host) { + ret = -ENOMEM; + goto out_region; + } + + ecard_set_drvdata(ec, host); + + info = (struct powertec_info *)host->hostdata; + info->base = base; + powertecscsi_terminator_ctl(host, term[ec->slot_no]); + + info->ec = ec; + info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET; + info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT; + info->info.scsi.irq = ec->irq; + info->info.scsi.dma = ec->dma; + info->info.ifcfg.clockrate = 40; /* MHz */ + info->info.ifcfg.select_timeout = 255; + info->info.ifcfg.asyncperiod = 200; /* ns */ + info->info.ifcfg.sync_max_depth = 7; + info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK; + info->info.ifcfg.disconnect_ok = 1; + info->info.ifcfg.wide_max_size = 0; + info->info.ifcfg.capabilities = 0; + info->info.dma.setup = powertecscsi_dma_setup; + info->info.dma.pseudo = NULL; + info->info.dma.stop = powertecscsi_dma_stop; + + ec->irqaddr = base + POWERTEC_INTR_STATUS; + ec->irqmask = POWERTEC_INTR_BIT; + + ecard_setirq(ec, &powertecscsi_ops, info); + + device_create_file(&ec->dev, &dev_attr_bus_term); + + ret = fas216_init(host); + if (ret) + goto out_free; + + ret = request_irq(ec->irq, powertecscsi_intr, + 0, "powertec", info); + if (ret) { + printk("scsi%d: IRQ%d not free: %d\n", + host->host_no, ec->irq, ret); + goto out_release; + } + + if (info->info.scsi.dma != NO_DMA) { + if (request_dma(info->info.scsi.dma, "powertec")) { + printk("scsi%d: DMA%d not free, using PIO\n", + host->host_no, info->info.scsi.dma); + info->info.scsi.dma = NO_DMA; + } else { + set_dma_speed(info->info.scsi.dma, 180); + info->info.ifcfg.capabilities |= FASCAP_DMA; + } + } + + ret = fas216_add(host, &ec->dev); + if (ret == 0) + goto out; + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); + free_irq(ec->irq, info); + + out_release: + fas216_release(host); + + out_free: + device_remove_file(&ec->dev, &dev_attr_bus_term); + scsi_host_put(host); + + out_region: + ecard_release_resources(ec); + + out: + return ret; +} + +static void powertecscsi_remove(struct expansion_card *ec) +{ + struct Scsi_Host *host = ecard_get_drvdata(ec); + struct powertec_info *info = (struct powertec_info *)host->hostdata; + + ecard_set_drvdata(ec, NULL); + fas216_remove(host); + + device_remove_file(&ec->dev, &dev_attr_bus_term); + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); + free_irq(ec->irq, info); + + fas216_release(host); + scsi_host_put(host); + ecard_release_resources(ec); +} + +static const struct ecard_id powertecscsi_cids[] = { + { MANU_ALSYSTEMS, PROD_ALSYS_SCSIATAPI }, + { 0xffff, 0xffff }, +}; + +static struct ecard_driver powertecscsi_driver = { + .probe = powertecscsi_probe, + .remove = powertecscsi_remove, + .id_table = powertecscsi_cids, + .drv = { + .name = "powertecscsi", + }, +}; + +static int __init powertecscsi_init(void) +{ + return ecard_register_driver(&powertecscsi_driver); +} + +static void __exit powertecscsi_exit(void) +{ + ecard_remove_driver(&powertecscsi_driver); +} + +module_init(powertecscsi_init); +module_exit(powertecscsi_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("Powertec SCSI driver"); +module_param_array(term, int, NULL, 0); +MODULE_PARM_DESC(term, "SCSI bus termination"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c new file mode 100644 index 000000000..978df23ce --- /dev/null +++ b/drivers/scsi/arm/queue.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/acorn/scsi/queue.c: queue handling primitives + * + * Copyright (C) 1997-2000 Russell King + * + * Changelog: + * 15-Sep-1997 RMK Created. + * 11-Oct-1997 RMK Corrected problem with queue_remove_exclude + * not updating internal linked list properly + * (was causing commands to go missing). + * 30-Aug-2000 RMK Use Linux list handling and spinlocks + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define DEBUG + +typedef struct queue_entry { + struct list_head list; + struct scsi_cmnd *SCpnt; +#ifdef DEBUG + unsigned long magic; +#endif +} QE_t; + +#ifdef DEBUG +#define QUEUE_MAGIC_FREE 0xf7e1c9a3 +#define QUEUE_MAGIC_USED 0xf7e1cc33 + +#define SET_MAGIC(q,m) ((q)->magic = (m)) +#define BAD_MAGIC(q,m) ((q)->magic != (m)) +#else +#define SET_MAGIC(q,m) do { } while (0) +#define BAD_MAGIC(q,m) (0) +#endif + +#include "queue.h" + +#define NR_QE 32 + +/* + * Function: void queue_initialise (Queue_t *queue) + * Purpose : initialise a queue + * Params : queue - queue to initialise + */ +int queue_initialise (Queue_t *queue) +{ + unsigned int nqueues = NR_QE; + QE_t *q; + + spin_lock_init(&queue->queue_lock); + INIT_LIST_HEAD(&queue->head); + INIT_LIST_HEAD(&queue->free); + + /* + * If life was easier, then SCpnt would have a + * host-available list head, and we wouldn't + * need to keep free lists or allocate this + * memory. + */ + queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); + if (q) { + for (; nqueues; q++, nqueues--) { + SET_MAGIC(q, QUEUE_MAGIC_FREE); + q->SCpnt = NULL; + list_add(&q->list, &queue->free); + } + } + + return queue->alloc != NULL; +} + +/* + * Function: void queue_free (Queue_t *queue) + * Purpose : free a queue + * Params : queue - queue to free + */ +void queue_free (Queue_t *queue) +{ + if (!list_empty(&queue->head)) + printk(KERN_WARNING "freeing non-empty queue %p\n", queue); + kfree(queue->alloc); +} + + +/* + * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) + * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head. + * Params : queue - destination queue + * SCpnt - command to add + * head - add command to head of queue + * Returns : 0 on error, !0 on success + */ +int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) +{ + unsigned long flags; + struct list_head *l; + QE_t *q; + int ret = 0; + + spin_lock_irqsave(&queue->queue_lock, flags); + if (list_empty(&queue->free)) + goto empty; + + l = queue->free.next; + list_del(l); + + q = list_entry(l, QE_t, list); + BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE)); + + SET_MAGIC(q, QUEUE_MAGIC_USED); + q->SCpnt = SCpnt; + + if (head) + list_add(l, &queue->head); + else + list_add_tail(l, &queue->head); + + ret = 1; +empty: + spin_unlock_irqrestore(&queue->queue_lock, flags); + return ret; +} + +static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent) +{ + QE_t *q; + + /* + * Move the entry from the "used" list onto the "free" list + */ + list_del(ent); + q = list_entry(ent, QE_t, list); + BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED)); + + SET_MAGIC(q, QUEUE_MAGIC_FREE); + list_add(ent, &queue->free); + + return q->SCpnt; +} + +/* + * Function: struct scsi_cmnd *queue_remove_exclude (queue, exclude) + * Purpose : remove a SCSI command from a queue + * Params : queue - queue to remove command from + * exclude - bit array of target&lun which is busy + * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available + */ +struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude) +{ + unsigned long flags; + struct list_head *l; + struct scsi_cmnd *SCpnt = NULL; + + spin_lock_irqsave(&queue->queue_lock, flags); + list_for_each(l, &queue->head) { + QE_t *q = list_entry(l, QE_t, list); + if (!test_bit(q->SCpnt->device->id * 8 + + (u8)(q->SCpnt->device->lun & 0x7), exclude)) { + SCpnt = __queue_remove(queue, l); + break; + } + } + spin_unlock_irqrestore(&queue->queue_lock, flags); + + return SCpnt; +} + +/* + * Function: struct scsi_cmnd *queue_remove (queue) + * Purpose : removes first SCSI command from a queue + * Params : queue - queue to remove command from + * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available + */ +struct scsi_cmnd *queue_remove(Queue_t *queue) +{ + unsigned long flags; + struct scsi_cmnd *SCpnt = NULL; + + spin_lock_irqsave(&queue->queue_lock, flags); + if (!list_empty(&queue->head)) + SCpnt = __queue_remove(queue, queue->head.next); + spin_unlock_irqrestore(&queue->queue_lock, flags); + + return SCpnt; +} + +/* + * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag) + * Purpose : remove a SCSI command from the queue for a specified target/lun/tag + * Params : queue - queue to remove command from + * target - target that we want + * lun - lun on device + * tag - tag on device + * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements + */ +struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun, + int tag) +{ + unsigned long flags; + struct list_head *l; + struct scsi_cmnd *SCpnt = NULL; + + spin_lock_irqsave(&queue->queue_lock, flags); + list_for_each(l, &queue->head) { + QE_t *q = list_entry(l, QE_t, list); + if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && + scsi_cmd_to_rq(q->SCpnt)->tag == tag) { + SCpnt = __queue_remove(queue, l); + break; + } + } + spin_unlock_irqrestore(&queue->queue_lock, flags); + + return SCpnt; +} + +/* + * Function: queue_remove_all_target(queue, target) + * Purpose : remove all SCSI commands from the queue for a specified target + * Params : queue - queue to remove command from + * target - target device id + * Returns : nothing + */ +void queue_remove_all_target(Queue_t *queue, int target) +{ + unsigned long flags; + struct list_head *l; + + spin_lock_irqsave(&queue->queue_lock, flags); + list_for_each(l, &queue->head) { + QE_t *q = list_entry(l, QE_t, list); + if (q->SCpnt->device->id == target) + __queue_remove(queue, l); + } + spin_unlock_irqrestore(&queue->queue_lock, flags); +} + +/* + * Function: int queue_probetgtlun (queue, target, lun) + * Purpose : check to see if we have a command in the queue for the specified + * target/lun. + * Params : queue - queue to look in + * target - target we want to probe + * lun - lun on target + * Returns : 0 if not found, != 0 if found + */ +int queue_probetgtlun (Queue_t *queue, int target, int lun) +{ + unsigned long flags; + struct list_head *l; + int found = 0; + + spin_lock_irqsave(&queue->queue_lock, flags); + list_for_each(l, &queue->head) { + QE_t *q = list_entry(l, QE_t, list); + if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&queue->queue_lock, flags); + + return found; +} + +/* + * Function: int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) + * Purpose : remove a specific command from the queues + * Params : queue - queue to look in + * SCpnt - command to find + * Returns : 0 if not found + */ +int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) +{ + unsigned long flags; + struct list_head *l; + int found = 0; + + spin_lock_irqsave(&queue->queue_lock, flags); + list_for_each(l, &queue->head) { + QE_t *q = list_entry(l, QE_t, list); + if (q->SCpnt == SCpnt) { + __queue_remove(queue, l); + found = 1; + break; + } + } + spin_unlock_irqrestore(&queue->queue_lock, flags); + + return found; +} + +EXPORT_SYMBOL(queue_initialise); +EXPORT_SYMBOL(queue_free); +EXPORT_SYMBOL(__queue_add); +EXPORT_SYMBOL(queue_remove); +EXPORT_SYMBOL(queue_remove_exclude); +EXPORT_SYMBOL(queue_remove_tgtluntag); +EXPORT_SYMBOL(queue_remove_cmd); +EXPORT_SYMBOL(queue_remove_all_target); +EXPORT_SYMBOL(queue_probetgtlun); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("SCSI command queueing"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/arm/queue.h b/drivers/scsi/arm/queue.h new file mode 100644 index 000000000..cb51379dc --- /dev/null +++ b/drivers/scsi/arm/queue.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/drivers/acorn/scsi/queue.h: queue handling + * + * Copyright (C) 1997 Russell King + */ +#ifndef QUEUE_H +#define QUEUE_H + +typedef struct { + struct list_head head; + struct list_head free; + spinlock_t queue_lock; + void *alloc; /* start of allocated mem */ +} Queue_t; + +/* + * Function: void queue_initialise (Queue_t *queue) + * Purpose : initialise a queue + * Params : queue - queue to initialise + */ +extern int queue_initialise (Queue_t *queue); + +/* + * Function: void queue_free (Queue_t *queue) + * Purpose : free a queue + * Params : queue - queue to free + */ +extern void queue_free (Queue_t *queue); + +/* + * Function: struct scsi_cmnd *queue_remove (queue) + * Purpose : removes first SCSI command from a queue + * Params : queue - queue to remove command from + * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available + */ +extern struct scsi_cmnd *queue_remove (Queue_t *queue); + +/* + * Function: struct scsi_cmnd *queue_remove_exclude_ref (queue, exclude) + * Purpose : remove a SCSI command from a queue + * Params : queue - queue to remove command from + * exclude - array of busy LUNs + * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available + */ +extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, + unsigned long *exclude); + +#define queue_add_cmd_ordered(queue,SCpnt) \ + __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE) +#define queue_add_cmd_tail(queue,SCpnt) \ + __queue_add(queue,SCpnt,0) +/* + * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) + * Purpose : Add a new command onto a queue + * Params : queue - destination queue + * SCpnt - command to add + * head - add command to head of queue + * Returns : 0 on error, !0 on success + */ +extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head); + +/* + * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag) + * Purpose : remove a SCSI command from the queue for a specified target/lun/tag + * Params : queue - queue to remove command from + * target - target that we want + * lun - lun on device + * tag - tag on device + * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements + */ +extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, + int lun, int tag); + +/* + * Function: queue_remove_all_target(queue, target) + * Purpose : remove all SCSI commands from the queue for a specified target + * Params : queue - queue to remove command from + * target - target device id + * Returns : nothing + */ +extern void queue_remove_all_target(Queue_t *queue, int target); + +/* + * Function: int queue_probetgtlun (queue, target, lun) + * Purpose : check to see if we have a command in the queue for the specified + * target/lun. + * Params : queue - queue to look in + * target - target we want to probe + * lun - lun on target + * Returns : 0 if not found, != 0 if found + */ +extern int queue_probetgtlun (Queue_t *queue, int target, int lun); + +/* + * Function: int queue_remove_cmd (Queue_t *queue, struct scsi_cmnd *SCpnt) + * Purpose : remove a specific command from the queues + * Params : queue - queue to look in + * SCpnt - command to find + * Returns : 0 if not found + */ +int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt); + +#endif /* QUEUE_H */ diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c new file mode 100644 index 000000000..d401cf271 --- /dev/null +++ b/drivers/scsi/atari_scsi.c @@ -0,0 +1,892 @@ +/* + * atari_scsi.c -- Device dependent functions for the Atari generic SCSI port + * + * Copyright 1994 Roman Hodek + * + * Loosely based on the work of Robert De Vries' team and added: + * - working real DMA + * - Falcon support (untested yet!) ++bjoern fixed and now it works + * - lots of extensions and bug fixes. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + */ + +/* + * Notes for Falcon SCSI DMA + * + * The 5380 device is one of several that all share the DMA chip. Hence + * "locking" and "unlocking" access to this chip is required. + * + * Two possible schemes for ST DMA acquisition by atari_scsi are: + * 1) The lock is taken for each command separately (i.e. can_queue == 1). + * 2) The lock is taken when the first command arrives and released + * when the last command is finished (i.e. can_queue > 1). + * + * The first alternative limits SCSI bus utilization, since interleaving + * commands is not possible. The second gives better performance but is + * unfair to other drivers needing to use the ST DMA chip. In order to + * allow the IDE and floppy drivers equal access to the ST DMA chip + * the default is can_queue == 1. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#define DMA_MIN_SIZE 32 + +/* Definitions for the core NCR5380 driver. */ + +#define NCR5380_implementation_fields /* none */ + +static u8 (*atari_scsi_reg_read)(unsigned int); +static void (*atari_scsi_reg_write)(unsigned int, u8); + +#define NCR5380_read(reg) atari_scsi_reg_read(reg) +#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value) + +#define NCR5380_queue_command atari_scsi_queue_command +#define NCR5380_abort atari_scsi_abort +#define NCR5380_info atari_scsi_info + +#define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len +#define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup +#define NCR5380_dma_send_setup atari_scsi_dma_send_setup +#define NCR5380_dma_residual atari_scsi_dma_residual + +#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance) +#define NCR5380_release_dma_irq(instance) falcon_release_lock() + +#include "NCR5380.h" + + +#define IS_A_TT() ATARIHW_PRESENT(TT_SCSI) + +#define SCSI_DMA_WRITE_P(elt,val) \ + do { \ + unsigned long v = val; \ + tt_scsi_dma.elt##_lo = v & 0xff; \ + v >>= 8; \ + tt_scsi_dma.elt##_lmd = v & 0xff; \ + v >>= 8; \ + tt_scsi_dma.elt##_hmd = v & 0xff; \ + v >>= 8; \ + tt_scsi_dma.elt##_hi = v & 0xff; \ + } while(0) + +#define SCSI_DMA_READ_P(elt) \ + (((((((unsigned long)tt_scsi_dma.elt##_hi << 8) | \ + (unsigned long)tt_scsi_dma.elt##_hmd) << 8) | \ + (unsigned long)tt_scsi_dma.elt##_lmd) << 8) | \ + (unsigned long)tt_scsi_dma.elt##_lo) + + +static inline void SCSI_DMA_SETADR(unsigned long adr) +{ + st_dma.dma_lo = (unsigned char)adr; + MFPDELAY(); + adr >>= 8; + st_dma.dma_md = (unsigned char)adr; + MFPDELAY(); + adr >>= 8; + st_dma.dma_hi = (unsigned char)adr; + MFPDELAY(); +} + +static inline unsigned long SCSI_DMA_GETADR(void) +{ + unsigned long adr; + adr = st_dma.dma_lo; + MFPDELAY(); + adr |= (st_dma.dma_md & 0xff) << 8; + MFPDELAY(); + adr |= (st_dma.dma_hi & 0xff) << 16; + MFPDELAY(); + return adr; +} + +static void atari_scsi_fetch_restbytes(void); + +static unsigned long atari_dma_residual, atari_dma_startaddr; +static short atari_dma_active; +/* pointer to the dribble buffer */ +static char *atari_dma_buffer; +/* precalculated physical address of the dribble buffer */ +static unsigned long atari_dma_phys_buffer; +/* != 0 tells the Falcon int handler to copy data from the dribble buffer */ +static char *atari_dma_orig_addr; +/* size of the dribble buffer; 4k seems enough, since the Falcon cannot use + * scatter-gather anyway, so most transfers are 1024 byte only. In the rare + * cases where requests to physical contiguous buffers have been merged, this + * request is <= 4k (one page). So I don't think we have to split transfers + * just due to this buffer size... + */ +#define STRAM_BUFFER_SIZE (4096) +/* mask for address bits that can't be used with the ST-DMA */ +static unsigned long atari_dma_stram_mask; +#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) + +static int setup_can_queue = -1; +module_param(setup_can_queue, int, 0); +static int setup_cmd_per_lun = -1; +module_param(setup_cmd_per_lun, int, 0); +static int setup_sg_tablesize = -1; +module_param(setup_sg_tablesize, int, 0); +static int setup_hostid = -1; +module_param(setup_hostid, int, 0); +static int setup_toshiba_delay = -1; +module_param(setup_toshiba_delay, int, 0); + + +static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) +{ + int i; + unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr; + + if (dma_stat & 0x01) { + + /* A bus error happens when DMA-ing from the last page of a + * physical memory chunk (DMA prefetch!), but that doesn't hurt. + * Check for this case: + */ + + for (i = 0; i < m68k_num_memory; ++i) { + end_addr = m68k_memory[i].addr + m68k_memory[i].size; + if (end_addr <= addr && addr <= end_addr + 4) + return 1; + } + } + return 0; +} + + +static irqreturn_t scsi_tt_intr(int irq, void *dev) +{ + struct Scsi_Host *instance = dev; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int dma_stat; + + dma_stat = tt_scsi_dma.dma_ctrl; + + dsprintk(NDEBUG_INTR, instance, "NCR5380 interrupt, DMA status = %02x\n", + dma_stat & 0xff); + + /* Look if it was the DMA that has interrupted: First possibility + * is that a bus error occurred... + */ + if (dma_stat & 0x80) { + if (!scsi_dma_is_ignored_buserr(dma_stat)) { + printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", + SCSI_DMA_READ_P(dma_addr)); + printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); + } + } + + /* If the DMA is active but not finished, we have the case + * that some other 5380 interrupt occurred within the DMA transfer. + * This means we have residual bytes, if the desired end address + * is not yet reached. Maybe we have to fetch some bytes from the + * rest data register, too. The residual must be calculated from + * the address pointer, not the counter register, because only the + * addr reg counts bytes not yet written and pending in the rest + * data reg! + */ + if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { + atari_dma_residual = hostdata->dma_len - + (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); + + dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", + atari_dma_residual); + + if ((signed int)atari_dma_residual < 0) + atari_dma_residual = 0; + if ((dma_stat & 1) == 0) { + /* + * After read operations, we maybe have to + * transport some rest bytes + */ + atari_scsi_fetch_restbytes(); + } else { + /* + * There seems to be a nasty bug in some SCSI-DMA/NCR + * combinations: If a target disconnects while a write + * operation is going on, the address register of the + * DMA may be a few bytes farer than it actually read. + * This is probably due to DMA prefetching and a delay + * between DMA and NCR. Experiments showed that the + * dma_addr is 9 bytes to high, but this could vary. + * The problem is, that the residual is thus calculated + * wrong and the next transfer will start behind where + * it should. So we round up the residual to the next + * multiple of a sector size, if it isn't already a + * multiple and the originally expected transfer size + * was. The latter condition is there to ensure that + * the correction is taken only for "real" data + * transfers and not for, e.g., the parameters of some + * other command. These shouldn't disconnect anyway. + */ + if (atari_dma_residual & 0x1ff) { + dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, " + "difference %ld bytes\n", + 512 - (atari_dma_residual & 0x1ff)); + atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; + } + } + tt_scsi_dma.dma_ctrl = 0; + } + + /* If the DMA is finished, fetch the rest bytes and turn it off */ + if (dma_stat & 0x40) { + atari_dma_residual = 0; + if ((dma_stat & 1) == 0) + atari_scsi_fetch_restbytes(); + tt_scsi_dma.dma_ctrl = 0; + } + + NCR5380_intr(irq, dev); + + return IRQ_HANDLED; +} + + +static irqreturn_t scsi_falcon_intr(int irq, void *dev) +{ + struct Scsi_Host *instance = dev; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int dma_stat; + + /* Turn off DMA and select sector counter register before + * accessing the status register (Atari recommendation!) + */ + st_dma.dma_mode_status = 0x90; + dma_stat = st_dma.dma_mode_status; + + /* Bit 0 indicates some error in the DMA process... don't know + * what happened exactly (no further docu). + */ + if (!(dma_stat & 0x01)) { + /* DMA error */ + printk(KERN_CRIT "SCSI DMA error near 0x%08lx!\n", SCSI_DMA_GETADR()); + } + + /* If the DMA was active, but now bit 1 is not clear, it is some + * other 5380 interrupt that finishes the DMA transfer. We have to + * calculate the number of residual bytes and give a warning if + * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) + */ + if (atari_dma_active && (dma_stat & 0x02)) { + unsigned long transferred; + + transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; + /* The ST-DMA address is incremented in 2-byte steps, but the + * data are written only in 16-byte chunks. If the number of + * transferred bytes is not divisible by 16, the remainder is + * lost somewhere in outer space. + */ + if (transferred & 15) + printk(KERN_ERR "SCSI DMA error: %ld bytes lost in " + "ST-DMA fifo\n", transferred & 15); + + atari_dma_residual = hostdata->dma_len - transferred; + dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", + atari_dma_residual); + } else + atari_dma_residual = 0; + atari_dma_active = 0; + + if (atari_dma_orig_addr) { + /* If the dribble buffer was used on a read operation, copy the DMA-ed + * data to the original destination address. + */ + memcpy(atari_dma_orig_addr, phys_to_virt(atari_dma_startaddr), + hostdata->dma_len - atari_dma_residual); + atari_dma_orig_addr = NULL; + } + + NCR5380_intr(irq, dev); + + return IRQ_HANDLED; +} + + +static void atari_scsi_fetch_restbytes(void) +{ + int nr; + char *src, *dst; + unsigned long phys_dst; + + /* fetch rest bytes in the DMA register */ + phys_dst = SCSI_DMA_READ_P(dma_addr); + nr = phys_dst & 3; + if (nr) { + /* there are 'nr' bytes left for the last long address + before the DMA pointer */ + phys_dst ^= nr; + dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", + nr, phys_dst); + /* The content of the DMA pointer is a physical address! */ + dst = phys_to_virt(phys_dst); + dprintk(NDEBUG_DMA, " = virt addr %p\n", dst); + for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) + *dst++ = *src++; + } +} + + +/* This function releases the lock on the DMA chip if there is no + * connected command and the disconnected queue is empty. + */ + +static void falcon_release_lock(void) +{ + if (IS_A_TT()) + return; + + if (stdma_is_locked_by(scsi_falcon_intr)) + stdma_release(); +} + +/* This function manages the locking of the ST-DMA. + * If the DMA isn't locked already for SCSI, it tries to lock it by + * calling stdma_lock(). But if the DMA is locked by the SCSI code and + * there are other drivers waiting for the chip, we do not issue the + * command immediately but tell the SCSI mid-layer to defer. + */ + +static int falcon_get_lock(struct Scsi_Host *instance) +{ + if (IS_A_TT()) + return 1; + + if (stdma_is_locked_by(scsi_falcon_intr)) + return 1; + + /* stdma_lock() may sleep which means it can't be used here */ + return stdma_try_lock(scsi_falcon_intr, instance); +} + +#ifndef MODULE +static int __init atari_scsi_setup(char *str) +{ + /* Format of atascsi parameter is: + * atascsi=,,,, + * Defaults depend on TT or Falcon, determined at run time. + * Negative values mean don't change. + */ + int ints[8]; + + get_options(str, ARRAY_SIZE(ints), ints); + + if (ints[0] < 1) { + printk("atari_scsi_setup: no arguments!\n"); + return 0; + } + if (ints[0] >= 1) + setup_can_queue = ints[1]; + if (ints[0] >= 2) + setup_cmd_per_lun = ints[2]; + if (ints[0] >= 3) + setup_sg_tablesize = ints[3]; + if (ints[0] >= 4) + setup_hostid = ints[4]; + /* ints[5] (use_tagged_queuing) is ignored */ + /* ints[6] (use_pdma) is ignored */ + if (ints[0] >= 7) + setup_toshiba_delay = ints[7]; + + return 1; +} + +__setup("atascsi=", atari_scsi_setup); +#endif /* !MODULE */ + +static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata, + void *data, unsigned long count, + int dir) +{ + unsigned long addr = virt_to_phys(data); + + dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n", + hostdata->host->host_no, data, addr, count, dir); + + if (!IS_A_TT() && !STRAM_ADDR(addr)) { + /* If we have a non-DMAable address on a Falcon, use the dribble + * buffer; 'orig_addr' != 0 in the read case tells the interrupt + * handler to copy data from the dribble buffer to the originally + * wanted address. + */ + if (dir) + memcpy(atari_dma_buffer, data, count); + else + atari_dma_orig_addr = data; + addr = atari_dma_phys_buffer; + } + + atari_dma_startaddr = addr; /* Needed for calculating residual later. */ + + /* Cache cleanup stuff: On writes, push any dirty cache out before sending + * it to the peripheral. (Must be done before DMA setup, since at least + * the ST-DMA begins to fill internal buffers right after setup. For + * reads, invalidate any cache, may be altered after DMA without CPU + * knowledge. + * + * ++roman: For the Medusa, there's no need at all for that cache stuff, + * because the hardware does bus snooping (fine!). + */ + dma_cache_maintenance(addr, count, dir); + + if (IS_A_TT()) { + tt_scsi_dma.dma_ctrl = dir; + SCSI_DMA_WRITE_P(dma_addr, addr); + SCSI_DMA_WRITE_P(dma_cnt, count); + tt_scsi_dma.dma_ctrl = dir | 2; + } else { /* ! IS_A_TT */ + + /* set address */ + SCSI_DMA_SETADR(addr); + + /* toggle direction bit to clear FIFO and set DMA direction */ + dir <<= 8; + st_dma.dma_mode_status = 0x90 | dir; + st_dma.dma_mode_status = 0x90 | (dir ^ 0x100); + st_dma.dma_mode_status = 0x90 | dir; + udelay(40); + /* On writes, round up the transfer length to the next multiple of 512 + * (see also comment at atari_dma_xfer_len()). */ + st_dma.fdc_acces_seccount = (count + (dir ? 511 : 0)) >> 9; + udelay(40); + st_dma.dma_mode_status = 0x10 | dir; + udelay(40); + /* need not restore value of dir, only boolean value is tested */ + atari_dma_active = 1; + } + + return count; +} + +static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return atari_scsi_dma_setup(hostdata, data, count, 0); +} + +static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return atari_scsi_dma_setup(hostdata, data, count, 1); +} + +static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata) +{ + return atari_dma_residual; +} + + +#define CMD_SURELY_BLOCK_MODE 0 +#define CMD_SURELY_BYTE_MODE 1 +#define CMD_MODE_UNKNOWN 2 + +static int falcon_classify_cmd(struct scsi_cmnd *cmd) +{ + unsigned char opcode = cmd->cmnd[0]; + + if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || + opcode == READ_BUFFER) + return CMD_SURELY_BYTE_MODE; + else if (opcode == READ_6 || opcode == READ_10 || + opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || + opcode == RECOVER_BUFFERED_DATA) { + /* In case of a sequential-access target (tape), special care is + * needed here: The transfer is block-mode only if the 'fixed' bit is + * set! */ + if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) + return CMD_SURELY_BYTE_MODE; + else + return CMD_SURELY_BLOCK_MODE; + } else + return CMD_MODE_UNKNOWN; +} + + +/* This function calculates the number of bytes that can be transferred via + * DMA. On the TT, this is arbitrary, but on the Falcon we have to use the + * ST-DMA chip. There are only multiples of 512 bytes possible and max. + * 255*512 bytes :-( This means also, that defining READ_OVERRUNS is not + * possible on the Falcon, since that would require to program the DMA for + * n*512 - atari_read_overrun bytes. But it seems that the Falcon doesn't have + * the overrun problem, so this question is academic :-) + */ + +static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + int wanted_len = NCR5380_to_ncmd(cmd)->this_residual; + int possible_len, limit; + + if (wanted_len < DMA_MIN_SIZE) + return 0; + + if (IS_A_TT()) + /* TT SCSI DMA can transfer arbitrary #bytes */ + return wanted_len; + + /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. + * 255*512 bytes, but this should be enough) + * + * ++roman: Aaargl! Another Falcon-SCSI problem... There are some commands + * that return a number of bytes which cannot be known beforehand. In this + * case, the given transfer length is an "allocation length". Now it + * can happen that this allocation length is a multiple of 512 bytes and + * the DMA is used. But if not n*512 bytes really arrive, some input data + * will be lost in the ST-DMA's FIFO :-( Thus, we have to distinguish + * between commands that do block transfers and those that do byte + * transfers. But this isn't easy... there are lots of vendor specific + * commands, and the user can issue any command via the + * SCSI_IOCTL_SEND_COMMAND. + * + * The solution: We classify SCSI commands in 1) surely block-mode cmd.s, + * 2) surely byte-mode cmd.s and 3) cmd.s with unknown mode. In case 1) + * and 3), the thing to do is obvious: allow any number of blocks via DMA + * or none. In case 2), we apply some heuristic: Byte mode is assumed if + * the transfer (allocation) length is < 1024, hoping that no cmd. not + * explicitly known as byte mode have such big allocation lengths... + * BTW, all the discussion above applies only to reads. DMA writes are + * unproblematic anyways, since the targets aborts the transfer after + * receiving a sufficient number of bytes. + * + * Another point: If the transfer is from/to an non-ST-RAM address, we + * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes. + */ + + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + /* Write operation can always use the DMA, but the transfer size must + * be rounded up to the next multiple of 512 (atari_dma_setup() does + * this). + */ + possible_len = wanted_len; + } else { + /* Read operations: if the wanted transfer length is not a multiple of + * 512, we cannot use DMA, since the ST-DMA cannot split transfers + * (no interrupt on DMA finished!) + */ + if (wanted_len & 0x1ff) + possible_len = 0; + else { + /* Now classify the command (see above) and decide whether it is + * allowed to do DMA at all */ + switch (falcon_classify_cmd(cmd)) { + case CMD_SURELY_BLOCK_MODE: + possible_len = wanted_len; + break; + case CMD_SURELY_BYTE_MODE: + possible_len = 0; /* DMA prohibited */ + break; + case CMD_MODE_UNKNOWN: + default: + /* For unknown commands assume block transfers if the transfer + * size/allocation length is >= 1024 */ + possible_len = (wanted_len < 1024) ? 0 : wanted_len; + break; + } + } + } + + /* Last step: apply the hard limit on DMA transfers */ + limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(NCR5380_to_ncmd(cmd)->ptr))) ? + STRAM_BUFFER_SIZE : 255*512; + if (possible_len > limit) + possible_len = limit; + + if (possible_len != wanted_len) + dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n", + possible_len, wanted_len); + + return possible_len; +} + + +/* NCR5380 register access functions + * + * There are separate functions for TT and Falcon, because the access + * methods are quite different. The calling macros NCR5380_read and + * NCR5380_write call these functions via function pointers. + */ + +static u8 atari_scsi_tt_reg_read(unsigned int reg) +{ + return tt_scsi_regp[reg * 2]; +} + +static void atari_scsi_tt_reg_write(unsigned int reg, u8 value) +{ + tt_scsi_regp[reg * 2] = value; +} + +static u8 atari_scsi_falcon_reg_read(unsigned int reg) +{ + unsigned long flags; + u8 result; + + reg += 0x88; + local_irq_save(flags); + dma_wd.dma_mode_status = (u_short)reg; + result = (u8)dma_wd.fdc_acces_seccount; + local_irq_restore(flags); + return result; +} + +static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value) +{ + unsigned long flags; + + reg += 0x88; + local_irq_save(flags); + dma_wd.dma_mode_status = (u_short)reg; + dma_wd.fdc_acces_seccount = (u_short)value; + local_irq_restore(flags); +} + + +#include "NCR5380.c" + +static int atari_scsi_host_reset(struct scsi_cmnd *cmd) +{ + int rv; + unsigned long flags; + + local_irq_save(flags); + + /* Abort a maybe active DMA transfer */ + if (IS_A_TT()) { + tt_scsi_dma.dma_ctrl = 0; + } else { + if (stdma_is_locked_by(scsi_falcon_intr)) + st_dma.dma_mode_status = 0x90; + atari_dma_active = 0; + atari_dma_orig_addr = NULL; + } + + rv = NCR5380_host_reset(cmd); + + /* The 5380 raises its IRQ line while _RST is active but the ST DMA + * "lock" has been released so this interrupt may end up handled by + * floppy or IDE driver (if one of them holds the lock). The NCR5380 + * interrupt flag has been cleared already. + */ + + local_irq_restore(flags); + + return rv; +} + +#define DRV_MODULE_NAME "atari_scsi" +#define PFX DRV_MODULE_NAME ": " + +static struct scsi_host_template atari_scsi_template = { + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, + .name = "Atari native SCSI", + .info = atari_scsi_info, + .queuecommand = atari_scsi_queue_command, + .eh_abort_handler = atari_scsi_abort, + .eh_host_reset_handler = atari_scsi_host_reset, + .this_id = 7, + .cmd_per_lun = 2, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct NCR5380_cmd), +}; + +static int __init atari_scsi_probe(struct platform_device *pdev) +{ + struct Scsi_Host *instance; + int error; + struct resource *irq; + int host_flags = 0; + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) + return -ENODEV; + + if (ATARIHW_PRESENT(TT_SCSI)) { + atari_scsi_reg_read = atari_scsi_tt_reg_read; + atari_scsi_reg_write = atari_scsi_tt_reg_write; + } else { + atari_scsi_reg_read = atari_scsi_falcon_reg_read; + atari_scsi_reg_write = atari_scsi_falcon_reg_write; + } + + if (ATARIHW_PRESENT(TT_SCSI)) { + atari_scsi_template.can_queue = 16; + atari_scsi_template.sg_tablesize = SG_ALL; + } else { + atari_scsi_template.can_queue = 1; + atari_scsi_template.sg_tablesize = 1; + } + + if (setup_can_queue > 0) + atari_scsi_template.can_queue = setup_can_queue; + + if (setup_cmd_per_lun > 0) + atari_scsi_template.cmd_per_lun = setup_cmd_per_lun; + + /* Don't increase sg_tablesize on Falcon! */ + if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0) + atari_scsi_template.sg_tablesize = setup_sg_tablesize; + + if (setup_hostid >= 0) { + atari_scsi_template.this_id = setup_hostid & 7; + } else if (IS_REACHABLE(CONFIG_NVRAM)) { + /* Test if a host id is set in the NVRam */ + if (ATARIHW_PRESENT(TT_CLK)) { + unsigned char b; + loff_t offset = 16; + ssize_t count = nvram_read(&b, 1, &offset); + + /* Arbitration enabled? (for TOS) + * If yes, use configured host ID + */ + if ((count == 1) && (b & 0x80)) + atari_scsi_template.this_id = b & 7; + } + } + + /* If running on a Falcon and if there's TT-Ram (i.e., more than one + * memory block, since there's always ST-Ram in a Falcon), then + * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers + * from/to alternative Ram. + */ + if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) && + m68k_realnum_memory > 1) { + atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); + if (!atari_dma_buffer) { + pr_err(PFX "can't allocate ST-RAM double buffer\n"); + return -ENOMEM; + } + atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); + atari_dma_orig_addr = NULL; + } + + instance = scsi_host_alloc(&atari_scsi_template, + sizeof(struct NCR5380_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = irq->start; + + host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; + host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; + + error = NCR5380_init(instance, host_flags); + if (error) + goto fail_init; + + if (IS_A_TT()) { + error = request_irq(instance->irq, scsi_tt_intr, 0, + "NCR5380", instance); + if (error) { + pr_err(PFX "request irq %d failed, aborting\n", + instance->irq); + goto fail_irq; + } + tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ + + tt_scsi_dma.dma_ctrl = 0; + atari_dma_residual = 0; + + /* While the read overruns (described by Drew Eckhardt in + * NCR5380.c) never happened on TTs, they do in fact on the + * Medusa (This was the cause why SCSI didn't work right for + * so long there.) Since handling the overruns slows down + * a bit, I turned the #ifdef's into a runtime condition. + * + * In principle it should be sufficient to do max. 1 byte with + * PIO, but there is another problem on the Medusa with the DMA + * rest data register. So read_overruns is currently set + * to 4 to avoid having transfers that aren't a multiple of 4. + * If the rest data bug is fixed, this can be lowered to 1. + */ + if (MACH_IS_MEDUSA) { + struct NCR5380_hostdata *hostdata = + shost_priv(instance); + + hostdata->read_overruns = 4; + } + } else { + /* Nothing to do for the interrupt: the ST-DMA is initialized + * already. + */ + atari_dma_residual = 0; + atari_dma_active = 0; + atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 + : 0xff000000); + } + + NCR5380_maybe_reset_bus(instance); + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + if (IS_A_TT()) + free_irq(instance->irq, instance); +fail_irq: + NCR5380_exit(instance); +fail_init: + scsi_host_put(instance); +fail_alloc: + if (atari_dma_buffer) + atari_stram_free(atari_dma_buffer); + return error; +} + +static int __exit atari_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + + scsi_remove_host(instance); + if (IS_A_TT()) + free_irq(instance->irq, instance); + NCR5380_exit(instance); + scsi_host_put(instance); + if (atari_dma_buffer) + atari_stram_free(atari_dma_buffer); + return 0; +} + +static struct platform_driver atari_scsi_driver = { + .remove = __exit_p(atari_scsi_remove), + .driver = { + .name = DRV_MODULE_NAME, + }, +}; + +module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe); + +MODULE_ALIAS("platform:" DRV_MODULE_NAME); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c new file mode 100644 index 000000000..2a748af26 --- /dev/null +++ b/drivers/scsi/atp870u.c @@ -0,0 +1,2380 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1997 Wu Ching Chen + * 2.1.x update (C) 1998 Krzysztof G. Baranowski + * 2.5.x update (C) 2002 Red Hat + * 2.6.x update (C) 2004 Red Hat + * + * Marcelo Tosatti : SMP fixes + * + * Wu Ching Chen : NULL pointer fixes 2000/06/02 + * support atp876 chip + * enable 32 bit fifo transfer + * support cdrom & remove device run ultra speed + * fix disconnect bug 2000/12/21 + * support atp880 chip lvd u160 2001/05/15 + * fix prd table bug 2001/09/12 (7.1) + * + * atp885 support add by ACARD Hao Ping Lian 2005/01/05 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "atp870u.h" + +static const struct scsi_host_template atp870u_template; +static void send_s870(struct atp_unit *dev,unsigned char c); +static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, + unsigned char lvdmode); + +static inline void atp_writeb_base(struct atp_unit *atp, u8 reg, u8 val) +{ + outb(val, atp->baseport + reg); +} + +static inline void atp_writew_base(struct atp_unit *atp, u8 reg, u16 val) +{ + outw(val, atp->baseport + reg); +} + +static inline void atp_writeb_io(struct atp_unit *atp, u8 channel, u8 reg, u8 val) +{ + outb(val, atp->ioport[channel] + reg); +} + +static inline void atp_writew_io(struct atp_unit *atp, u8 channel, u8 reg, u16 val) +{ + outw(val, atp->ioport[channel] + reg); +} + +static inline void atp_writeb_pci(struct atp_unit *atp, u8 channel, u8 reg, u8 val) +{ + outb(val, atp->pciport[channel] + reg); +} + +static inline void atp_writel_pci(struct atp_unit *atp, u8 channel, u8 reg, u32 val) +{ + outl(val, atp->pciport[channel] + reg); +} + +static inline u8 atp_readb_base(struct atp_unit *atp, u8 reg) +{ + return inb(atp->baseport + reg); +} + +static inline u16 atp_readw_base(struct atp_unit *atp, u8 reg) +{ + return inw(atp->baseport + reg); +} + +static inline u32 atp_readl_base(struct atp_unit *atp, u8 reg) +{ + return inl(atp->baseport + reg); +} + +static inline u8 atp_readb_io(struct atp_unit *atp, u8 channel, u8 reg) +{ + return inb(atp->ioport[channel] + reg); +} + +static inline u16 atp_readw_io(struct atp_unit *atp, u8 channel, u8 reg) +{ + return inw(atp->ioport[channel] + reg); +} + +static inline u8 atp_readb_pci(struct atp_unit *atp, u8 channel, u8 reg) +{ + return inb(atp->pciport[channel] + reg); +} + +static inline bool is880(struct atp_unit *atp) +{ + return atp->pdev->device == ATP880_DEVID1 || + atp->pdev->device == ATP880_DEVID2; +} + +static inline bool is885(struct atp_unit *atp) +{ + return atp->pdev->device == ATP885_DEVID; +} + +static irqreturn_t atp870u_intr_handle(int irq, void *dev_id) +{ + unsigned long flags; + unsigned short int id; + unsigned char i, j, c, target_id, lun,cmdp; + unsigned char *prd; + struct scsi_cmnd *workreq; + unsigned long adrcnt, k; +#ifdef ED_DBGP + unsigned long l; +#endif + struct Scsi_Host *host = dev_id; + struct atp_unit *dev = (struct atp_unit *)&host->hostdata; + + for (c = 0; c < 2; c++) { + j = atp_readb_io(dev, c, 0x1f); + if ((j & 0x80) != 0) + break; + dev->in_int[c] = 0; + } + if ((j & 0x80) == 0) + return IRQ_NONE; +#ifdef ED_DBGP + printk("atp870u_intr_handle enter\n"); +#endif + dev->in_int[c] = 1; + cmdp = atp_readb_io(dev, c, 0x10); + if (dev->working[c] != 0) { + if (is885(dev)) { + if ((atp_readb_io(dev, c, 0x16) & 0x80) == 0) + atp_writeb_io(dev, c, 0x16, + (atp_readb_io(dev, c, 0x16) | 0x80)); + } + if ((atp_readb_pci(dev, c, 0x00) & 0x08) != 0) + { + for (k=0; k < 1000; k++) { + if ((atp_readb_pci(dev, c, 2) & 0x08) == 0) + break; + if ((atp_readb_pci(dev, c, 2) & 0x01) == 0) + break; + } + } + atp_writeb_pci(dev, c, 0, 0x00); + + i = atp_readb_io(dev, c, 0x17); + + if (is885(dev)) + atp_writeb_pci(dev, c, 2, 0x06); + + target_id = atp_readb_io(dev, c, 0x15); + + /* + * Remap wide devices onto id numbers + */ + + if ((target_id & 0x40) != 0) { + target_id = (target_id & 0x07) | 0x08; + } else { + target_id &= 0x07; + } + + if ((j & 0x40) != 0) { + if (dev->last_cmd[c] == 0xff) { + dev->last_cmd[c] = target_id; + } + dev->last_cmd[c] |= 0x40; + } + if (is885(dev)) + dev->r1f[c][target_id] |= j; +#ifdef ED_DBGP + printk("atp870u_intr_handle status = %x\n",i); +#endif + if (i == 0x85) { + if ((dev->last_cmd[c] & 0xf0) != 0x40) { + dev->last_cmd[c] = 0xff; + } + if (is885(dev)) { + adrcnt = 0; + ((unsigned char *) &adrcnt)[2] = + atp_readb_io(dev, c, 0x12); + ((unsigned char *) &adrcnt)[1] = + atp_readb_io(dev, c, 0x13); + ((unsigned char *) &adrcnt)[0] = + atp_readb_io(dev, c, 0x14); + if (dev->id[c][target_id].last_len != adrcnt) { + k = dev->id[c][target_id].last_len; + k -= adrcnt; + dev->id[c][target_id].tran_len = k; + dev->id[c][target_id].last_len = adrcnt; + } +#ifdef ED_DBGP + printk("dev->id[c][target_id].last_len = %d " + "dev->id[c][target_id].tran_len = %d\n", + dev->id[c][target_id].last_len, + dev->id[c][target_id].tran_len); +#endif + } + + /* + * Flip wide + */ + if (dev->wide_id[c] != 0) { + atp_writeb_io(dev, c, 0x1b, 0x01); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) + atp_writeb_io(dev, c, 0x1b, 0x01); + } + /* + * Issue more commands + */ + spin_lock_irqsave(dev->host->host_lock, flags); + if (((dev->quhd[c] != dev->quend[c]) || + (dev->last_cmd[c] != 0xff)) && + (dev->in_snd[c] == 0)) { +#ifdef ED_DBGP + printk("Call sent_s870\n"); +#endif + send_s870(dev,c); + } + spin_unlock_irqrestore(dev->host->host_lock, flags); + /* + * Done + */ + dev->in_int[c] = 0; +#ifdef ED_DBGP + printk("Status 0x85 return\n"); +#endif + return IRQ_HANDLED; + } + + if (i == 0x40) { + dev->last_cmd[c] |= 0x40; + dev->in_int[c] = 0; + return IRQ_HANDLED; + } + + if (i == 0x21) { + if ((dev->last_cmd[c] & 0xf0) != 0x40) { + dev->last_cmd[c] = 0xff; + } + adrcnt = 0; + ((unsigned char *) &adrcnt)[2] = + atp_readb_io(dev, c, 0x12); + ((unsigned char *) &adrcnt)[1] = + atp_readb_io(dev, c, 0x13); + ((unsigned char *) &adrcnt)[0] = + atp_readb_io(dev, c, 0x14); + k = dev->id[c][target_id].last_len; + k -= adrcnt; + dev->id[c][target_id].tran_len = k; + dev->id[c][target_id].last_len = adrcnt; + atp_writeb_io(dev, c, 0x10, 0x41); + atp_writeb_io(dev, c, 0x18, 0x08); + dev->in_int[c] = 0; + return IRQ_HANDLED; + } + + if (is885(dev)) { + if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) { + if ((i == 0x4c) || (i == 0x8c)) + i=0x48; + else + i=0x49; + } + } + if ((i == 0x80) || (i == 0x8f)) { +#ifdef ED_DBGP + printk(KERN_DEBUG "Device reselect\n"); +#endif + lun = 0; + if (cmdp == 0x44 || i == 0x80) + lun = atp_readb_io(dev, c, 0x1d) & 0x07; + else { + if ((dev->last_cmd[c] & 0xf0) != 0x40) { + dev->last_cmd[c] = 0xff; + } + if (cmdp == 0x41) { +#ifdef ED_DBGP + printk("cmdp = 0x41\n"); +#endif + adrcnt = 0; + ((unsigned char *) &adrcnt)[2] = + atp_readb_io(dev, c, 0x12); + ((unsigned char *) &adrcnt)[1] = + atp_readb_io(dev, c, 0x13); + ((unsigned char *) &adrcnt)[0] = + atp_readb_io(dev, c, 0x14); + k = dev->id[c][target_id].last_len; + k -= adrcnt; + dev->id[c][target_id].tran_len = k; + dev->id[c][target_id].last_len = adrcnt; + atp_writeb_io(dev, c, 0x18, 0x08); + dev->in_int[c] = 0; + return IRQ_HANDLED; + } else { +#ifdef ED_DBGP + printk("cmdp != 0x41\n"); +#endif + atp_writeb_io(dev, c, 0x10, 0x46); + dev->id[c][target_id].dirct = 0x00; + atp_writeb_io(dev, c, 0x12, 0x00); + atp_writeb_io(dev, c, 0x13, 0x00); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + dev->in_int[c] = 0; + return IRQ_HANDLED; + } + } + if (dev->last_cmd[c] != 0xff) { + dev->last_cmd[c] |= 0x40; + } + if (is885(dev)) { + j = atp_readb_base(dev, 0x29) & 0xfe; + atp_writeb_base(dev, 0x29, j); + } else + atp_writeb_io(dev, c, 0x10, 0x45); + + target_id = atp_readb_io(dev, c, 0x16); + /* + * Remap wide identifiers + */ + if ((target_id & 0x10) != 0) { + target_id = (target_id & 0x07) | 0x08; + } else { + target_id &= 0x07; + } + if (is885(dev)) + atp_writeb_io(dev, c, 0x10, 0x45); + workreq = dev->id[c][target_id].curr_req; +#ifdef ED_DBGP + scmd_printk(KERN_DEBUG, workreq, "CDB"); + for (l = 0; l < workreq->cmd_len; l++) + printk(KERN_DEBUG " %x",workreq->cmnd[l]); + printk("\n"); +#endif + + atp_writeb_io(dev, c, 0x0f, lun); + atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); + adrcnt = dev->id[c][target_id].tran_len; + k = dev->id[c][target_id].last_len; + + atp_writeb_io(dev, c, 0x12, ((unsigned char *) &k)[2]); + atp_writeb_io(dev, c, 0x13, ((unsigned char *) &k)[1]); + atp_writeb_io(dev, c, 0x14, ((unsigned char *) &k)[0]); +#ifdef ED_DBGP + printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, + atp_readb_io(dev, c, 0x14), + atp_readb_io(dev, c, 0x13), + atp_readb_io(dev, c, 0x12)); +#endif + /* Remap wide */ + j = target_id; + if (target_id > 7) { + j = (j & 0x07) | 0x40; + } + /* Add direction */ + j |= dev->id[c][target_id].dirct; + atp_writeb_io(dev, c, 0x15, j); + atp_writeb_io(dev, c, 0x16, 0x80); + + /* enable 32 bit fifo transfer */ + if (is885(dev)) { + i = atp_readb_pci(dev, c, 1) & 0xf3; + //j=workreq->cmnd[0]; + if ((workreq->cmnd[0] == READ_6) || + (workreq->cmnd[0] == READ_10) || + (workreq->cmnd[0] == WRITE_6) || + (workreq->cmnd[0] == WRITE_10)) { + i |= 0x0c; + } + atp_writeb_pci(dev, c, 1, i); + } else if (is880(dev)) { + if ((workreq->cmnd[0] == READ_6) || + (workreq->cmnd[0] == READ_10) || + (workreq->cmnd[0] == WRITE_6) || + (workreq->cmnd[0] == WRITE_10)) + atp_writeb_base(dev, 0x3b, + (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); + else + atp_writeb_base(dev, 0x3b, + atp_readb_base(dev, 0x3b) & 0x3f); + } else { + if ((workreq->cmnd[0] == READ_6) || + (workreq->cmnd[0] == READ_10) || + (workreq->cmnd[0] == WRITE_6) || + (workreq->cmnd[0] == WRITE_10)) + atp_writeb_base(dev, 0x3a, + (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); + else + atp_writeb_base(dev, 0x3a, + atp_readb_base(dev, 0x3a) & 0xf3); + } + j = 0; + id = 1; + id = id << target_id; + /* + * Is this a wide device + */ + if ((id & dev->wide_id[c]) != 0) { + j |= 0x01; + } + atp_writeb_io(dev, c, 0x1b, j); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) + atp_writeb_io(dev, c, 0x1b, j); + if (dev->id[c][target_id].last_len == 0) { + atp_writeb_io(dev, c, 0x18, 0x08); + dev->in_int[c] = 0; +#ifdef ED_DBGP + printk("dev->id[c][target_id].last_len = 0\n"); +#endif + return IRQ_HANDLED; + } +#ifdef ED_DBGP + printk("target_id = %d adrcnt = %d\n",target_id,adrcnt); +#endif + prd = dev->id[c][target_id].prd_pos; + while (adrcnt != 0) { + id = ((unsigned short int *)prd)[2]; + if (id == 0) { + k = 0x10000; + } else { + k = id; + } + if (k > adrcnt) { + ((unsigned short int *)prd)[2] = + (unsigned short int)(k - adrcnt); + ((unsigned long *)prd)[0] += adrcnt; + adrcnt = 0; + dev->id[c][target_id].prd_pos = prd; + } else { + adrcnt -= k; + dev->id[c][target_id].prdaddr += 0x08; + prd += 0x08; + if (adrcnt == 0) { + dev->id[c][target_id].prd_pos = prd; + } + } + } + atp_writel_pci(dev, c, 0x04, dev->id[c][target_id].prdaddr); +#ifdef ED_DBGP + printk("dev->id[%d][%d].prdaddr 0x%8x\n", + c, target_id, dev->id[c][target_id].prdaddr); +#endif + if (!is885(dev)) { + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); + } + /* + * Check transfer direction + */ + if (dev->id[c][target_id].dirct != 0) { + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x01); + dev->in_int[c] = 0; +#ifdef ED_DBGP + printk("status 0x80 return dirct != 0\n"); +#endif + return IRQ_HANDLED; + } + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x09); + dev->in_int[c] = 0; +#ifdef ED_DBGP + printk("status 0x80 return dirct = 0\n"); +#endif + return IRQ_HANDLED; + } + + /* + * Current scsi request on this target + */ + + workreq = dev->id[c][target_id].curr_req; + + if (i == 0x42 || i == 0x16) { + if ((dev->last_cmd[c] & 0xf0) != 0x40) { + dev->last_cmd[c] = 0xff; + } + if (i == 0x16) { + workreq->result = atp_readb_io(dev, c, 0x0f); + if (((dev->r1f[c][target_id] & 0x10) != 0) && is885(dev)) { + printk(KERN_WARNING "AEC67162 CRC ERROR !\n"); + workreq->result = SAM_STAT_CHECK_CONDITION; + } + } else + workreq->result = SAM_STAT_CHECK_CONDITION; + + if (is885(dev)) { + j = atp_readb_base(dev, 0x29) | 0x01; + atp_writeb_base(dev, 0x29, j); + } + /* + * Complete the command + */ + scsi_dma_unmap(workreq); + + spin_lock_irqsave(dev->host->host_lock, flags); + scsi_done(workreq); +#ifdef ED_DBGP + printk("workreq->scsi_done\n"); +#endif + /* + * Clear it off the queue + */ + dev->id[c][target_id].curr_req = NULL; + dev->working[c]--; + spin_unlock_irqrestore(dev->host->host_lock, flags); + /* + * Take it back wide + */ + if (dev->wide_id[c] != 0) { + atp_writeb_io(dev, c, 0x1b, 0x01); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) + atp_writeb_io(dev, c, 0x1b, 0x01); + } + /* + * If there is stuff to send and nothing going then send it + */ + spin_lock_irqsave(dev->host->host_lock, flags); + if (((dev->last_cmd[c] != 0xff) || + (dev->quhd[c] != dev->quend[c])) && + (dev->in_snd[c] == 0)) { +#ifdef ED_DBGP + printk("Call sent_s870(scsi_done)\n"); +#endif + send_s870(dev,c); + } + spin_unlock_irqrestore(dev->host->host_lock, flags); + dev->in_int[c] = 0; + return IRQ_HANDLED; + } + if ((dev->last_cmd[c] & 0xf0) != 0x40) { + dev->last_cmd[c] = 0xff; + } + if (i == 0x4f) { + i = 0x89; + } + i &= 0x0f; + if (i == 0x09) { + atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); + atp_writeb_io(dev, c, 0x10, 0x41); + if (is885(dev)) { + k = dev->id[c][target_id].last_len; + atp_writeb_io(dev, c, 0x12, + ((unsigned char *) (&k))[2]); + atp_writeb_io(dev, c, 0x13, + ((unsigned char *) (&k))[1]); + atp_writeb_io(dev, c, 0x14, + ((unsigned char *) (&k))[0]); + dev->id[c][target_id].dirct = 0x00; + } else { + dev->id[c][target_id].dirct = 0x00; + } + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x09); + dev->in_int[c] = 0; + return IRQ_HANDLED; + } + if (i == 0x08) { + atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); + atp_writeb_io(dev, c, 0x10, 0x41); + if (is885(dev)) { + k = dev->id[c][target_id].last_len; + atp_writeb_io(dev, c, 0x12, + ((unsigned char *) (&k))[2]); + atp_writeb_io(dev, c, 0x13, + ((unsigned char *) (&k))[1]); + atp_writeb_io(dev, c, 0x14, + ((unsigned char *) (&k))[0]); + } + atp_writeb_io(dev, c, 0x15, + atp_readb_io(dev, c, 0x15) | 0x20); + dev->id[c][target_id].dirct = 0x20; + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x01); + dev->in_int[c] = 0; + return IRQ_HANDLED; + } + if (i == 0x0a) + atp_writeb_io(dev, c, 0x10, 0x30); + else + atp_writeb_io(dev, c, 0x10, 0x46); + dev->id[c][target_id].dirct = 0x00; + atp_writeb_io(dev, c, 0x12, 0x00); + atp_writeb_io(dev, c, 0x13, 0x00); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + } + dev->in_int[c] = 0; + + return IRQ_HANDLED; +} +/** + * atp870u_queuecommand_lck - Queue SCSI command + * @req_p: request block + * + * Queue a command to the ATP queue. Called with the host lock held. + */ +static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + unsigned char c; + unsigned int m; + struct atp_unit *dev; + struct Scsi_Host *host; + + c = scmd_channel(req_p); + req_p->sense_buffer[0]=0; + scsi_set_resid(req_p, 0); + if (scmd_channel(req_p) > 1) { + req_p->result = DID_BAD_TARGET << 16; + done(req_p); +#ifdef ED_DBGP + printk("atp870u_queuecommand : req_p->device->channel > 1\n"); +#endif + return 0; + } + + host = req_p->device->host; + dev = (struct atp_unit *)&host->hostdata; + + m = 1; + m = m << scmd_id(req_p); + + /* + * Fake a timeout for missing targets + */ + + if ((m & dev->active_id[c]) == 0) { + req_p->result = DID_BAD_TARGET << 16; + done(req_p); + return 0; + } + + /* + * Count new command + */ + dev->quend[c]++; + if (dev->quend[c] >= qcnt) { + dev->quend[c] = 0; + } + + /* + * Check queue state + */ + if (dev->quhd[c] == dev->quend[c]) { + if (dev->quend[c] == 0) { + dev->quend[c] = qcnt; + } +#ifdef ED_DBGP + printk("atp870u_queuecommand : dev->quhd[c] == dev->quend[c]\n"); +#endif + dev->quend[c]--; + req_p->result = DID_BUS_BUSY << 16; + done(req_p); + return 0; + } + dev->quereq[c][dev->quend[c]] = req_p; +#ifdef ED_DBGP + printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x " + "dev->in_int[%d] = %d dev->in_snd[%d] = %d\n", + dev->ioport[c], atp_readb_io(dev, c, 0x1c), c, + dev->in_int[c],c,dev->in_snd[c]); +#endif + if ((atp_readb_io(dev, c, 0x1c) == 0) && + (dev->in_int[c] == 0) && + (dev->in_snd[c] == 0)) { +#ifdef ED_DBGP + printk("Call sent_s870(atp870u_queuecommand)\n"); +#endif + send_s870(dev,c); + } +#ifdef ED_DBGP + printk("atp870u_queuecommand : exit\n"); +#endif + return 0; +} + +static DEF_SCSI_QCMD(atp870u_queuecommand) + +/* + * send_s870 - send a command to the controller + * + * On entry there is work queued to be done. We move some of that work to the + * controller itself. + * + * Caller holds the host lock. + */ +static void send_s870(struct atp_unit *dev, unsigned char c) +{ + struct scsi_cmnd *workreq = NULL; + unsigned int i;//,k; + unsigned char j, target_id; + unsigned char *prd; + unsigned short int w; + unsigned long l, bttl = 0; + unsigned long sg_count; + + if (dev->in_snd[c] != 0) { +#ifdef ED_DBGP + printk("cmnd in_snd\n"); +#endif + return; + } +#ifdef ED_DBGP + printk("Sent_s870 enter\n"); +#endif + dev->in_snd[c] = 1; + if ((dev->last_cmd[c] != 0xff) && ((dev->last_cmd[c] & 0x40) != 0)) { + dev->last_cmd[c] &= 0x0f; + workreq = dev->id[c][dev->last_cmd[c]].curr_req; + if (!workreq) { + dev->last_cmd[c] = 0xff; + if (dev->quhd[c] == dev->quend[c]) { + dev->in_snd[c] = 0; + return; + } + } + } + if (!workreq) { + if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) { + dev->in_snd[c] = 0; + return; + } + dev->working[c]++; + j = dev->quhd[c]; + dev->quhd[c]++; + if (dev->quhd[c] >= qcnt) + dev->quhd[c] = 0; + workreq = dev->quereq[c][dev->quhd[c]]; + if (dev->id[c][scmd_id(workreq)].curr_req != NULL) { + dev->quhd[c] = j; + dev->working[c]--; + dev->in_snd[c] = 0; + return; + } + dev->id[c][scmd_id(workreq)].curr_req = workreq; + dev->last_cmd[c] = scmd_id(workreq); + } + if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 || + atp_readb_io(dev, c, 0x1c) != 0) { +#ifdef ED_DBGP + printk("Abort to Send\n"); +#endif + dev->last_cmd[c] |= 0x40; + dev->in_snd[c] = 0; + return; + } +#ifdef ED_DBGP + printk("OK to Send\n"); + scmd_printk(KERN_DEBUG, workreq, "CDB"); + for(i=0;icmd_len;i++) { + printk(" %x",workreq->cmnd[i]); + } + printk("\n"); +#endif + l = scsi_bufflen(workreq); + + if (is885(dev)) { + j = atp_readb_base(dev, 0x29) & 0xfe; + atp_writeb_base(dev, 0x29, j); + dev->r1f[c][scmd_id(workreq)] = 0; + } + + if (workreq->cmnd[0] == READ_CAPACITY) { + if (l > 8) + l = 8; + } + if (workreq->cmnd[0] == TEST_UNIT_READY) { + l = 0; + } + + j = 0; + target_id = scmd_id(workreq); + + /* + * Wide ? + */ + w = 1; + w = w << target_id; + if ((w & dev->wide_id[c]) != 0) { + j |= 0x01; + } + atp_writeb_io(dev, c, 0x1b, j); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) { + atp_writeb_pci(dev, c, 0x1b, j); +#ifdef ED_DBGP + printk("send_s870 while loop 1\n"); +#endif + } + /* + * Write the command + */ + + atp_writeb_io(dev, c, 0x00, workreq->cmd_len); + atp_writeb_io(dev, c, 0x01, 0x2c); + if (is885(dev)) + atp_writeb_io(dev, c, 0x02, 0x7f); + else + atp_writeb_io(dev, c, 0x02, 0xcf); + for (i = 0; i < workreq->cmd_len; i++) + atp_writeb_io(dev, c, 0x03 + i, workreq->cmnd[i]); + atp_writeb_io(dev, c, 0x0f, workreq->device->lun); + /* + * Write the target + */ + atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); +#ifdef ED_DBGP + printk("dev->id[%d][%d].devsp = %2x\n",c,target_id, + dev->id[c][target_id].devsp); +#endif + + sg_count = scsi_dma_map(workreq); + /* + * Write transfer size + */ + atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]); + atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]); + atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]); + j = target_id; + dev->id[c][j].last_len = l; + dev->id[c][j].tran_len = 0; +#ifdef ED_DBGP + printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len); +#endif + /* + * Flip the wide bits + */ + if ((j & 0x08) != 0) { + j = (j & 0x07) | 0x40; + } + /* + * Check transfer direction + */ + if (workreq->sc_data_direction == DMA_TO_DEVICE) + atp_writeb_io(dev, c, 0x15, j | 0x20); + else + atp_writeb_io(dev, c, 0x15, j); + atp_writeb_io(dev, c, 0x16, atp_readb_io(dev, c, 0x16) | 0x80); + atp_writeb_io(dev, c, 0x16, 0x80); + dev->id[c][target_id].dirct = 0; + if (l == 0) { + if (atp_readb_io(dev, c, 0x1c) == 0) { +#ifdef ED_DBGP + printk("change SCSI_CMD_REG 0x08\n"); +#endif + atp_writeb_io(dev, c, 0x18, 0x08); + } else + dev->last_cmd[c] |= 0x40; + dev->in_snd[c] = 0; + return; + } + prd = dev->id[c][target_id].prd_table; + dev->id[c][target_id].prd_pos = prd; + + /* + * Now write the request list. Either as scatter/gather or as + * a linear chain. + */ + + if (l) { + struct scatterlist *sgpnt; + i = 0; + scsi_for_each_sg(workreq, sgpnt, sg_count, j) { + bttl = sg_dma_address(sgpnt); + l=sg_dma_len(sgpnt); +#ifdef ED_DBGP + printk("1. bttl %x, l %x\n",bttl, l); +#endif + while (l > 0x10000) { + (((u16 *) (prd))[i + 3]) = 0x0000; + (((u16 *) (prd))[i + 2]) = 0x0000; + (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); + l -= 0x10000; + bttl += 0x10000; + i += 0x04; + } + (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); + (((u16 *) (prd))[i + 2]) = cpu_to_le16(l); + (((u16 *) (prd))[i + 3]) = 0; + i += 0x04; + } + (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000); +#ifdef ED_DBGP + printk("prd %4x %4x %4x %4x\n", + (((unsigned short int *)prd)[0]), + (((unsigned short int *)prd)[1]), + (((unsigned short int *)prd)[2]), + (((unsigned short int *)prd)[3])); + printk("2. bttl %x, l %x\n",bttl, l); +#endif + } +#ifdef ED_DBGP + printk("send_s870: prdaddr_2 0x%8x target_id %d\n", + dev->id[c][target_id].prdaddr,target_id); +#endif + dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus; + atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); + if (is885(dev)) { + j = atp_readb_pci(dev, c, 1) & 0xf3; + if ((workreq->cmnd[0] == READ_6) || + (workreq->cmnd[0] == READ_10) || + (workreq->cmnd[0] == WRITE_6) || + (workreq->cmnd[0] == WRITE_10)) { + j |= 0x0c; + } + atp_writeb_pci(dev, c, 1, j); + } else if (is880(dev)) { + if ((workreq->cmnd[0] == READ_6) || + (workreq->cmnd[0] == READ_10) || + (workreq->cmnd[0] == WRITE_6) || + (workreq->cmnd[0] == WRITE_10)) + atp_writeb_base(dev, 0x3b, + (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); + else + atp_writeb_base(dev, 0x3b, + atp_readb_base(dev, 0x3b) & 0x3f); + } else { + if ((workreq->cmnd[0] == READ_6) || + (workreq->cmnd[0] == READ_10) || + (workreq->cmnd[0] == WRITE_6) || + (workreq->cmnd[0] == WRITE_10)) + atp_writeb_base(dev, 0x3a, + (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); + else + atp_writeb_base(dev, 0x3a, + atp_readb_base(dev, 0x3a) & 0xf3); + } + + if(workreq->sc_data_direction == DMA_TO_DEVICE) { + dev->id[c][target_id].dirct = 0x20; + if (atp_readb_io(dev, c, 0x1c) == 0) { + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x01); +#ifdef ED_DBGP + printk( "start DMA(to target)\n"); +#endif + } else { + dev->last_cmd[c] |= 0x40; + } + dev->in_snd[c] = 0; + return; + } + if (atp_readb_io(dev, c, 0x1c) == 0) { + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x09); +#ifdef ED_DBGP + printk( "start DMA(to host)\n"); +#endif + } else { + dev->last_cmd[c] |= 0x40; + } + dev->in_snd[c] = 0; + return; + +} + +static unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val) +{ + unsigned short int i, k; + unsigned char j; + + atp_writew_io(dev, 0, 0x1c, *val); + for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ + k = atp_readw_io(dev, 0, 0x1c); + j = (unsigned char) (k >> 8); + if ((k & 0x8000) != 0) /* DB7 all release? */ + i = 0; + } + *val |= 0x4000; /* assert DB6 */ + atp_writew_io(dev, 0, 0x1c, *val); + *val &= 0xdfff; /* assert DB5 */ + atp_writew_io(dev, 0, 0x1c, *val); + for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ + if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) != 0) /* DB5 all release? */ + i = 0; + } + *val |= 0x8000; /* no DB4-0, assert DB7 */ + *val &= 0xe0ff; + atp_writew_io(dev, 0, 0x1c, *val); + *val &= 0xbfff; /* release DB6 */ + atp_writew_io(dev, 0, 0x1c, *val); + for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ + if ((atp_readw_io(dev, 0, 0x1c) & 0x4000) != 0) /* DB6 all release? */ + i = 0; + } + + return j; +} + +static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on) +{ + + unsigned char i, j, k; + unsigned long n; + unsigned short int m, assignid_map, val; + unsigned char mbuf[33], quintet[2]; + struct atp_unit *dev = (struct atp_unit *)&host->hostdata; + static unsigned char g2q_tab[8] = { + 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27 + }; + +/* I can't believe we need this before we've even done anything. Remove it + * and see if anyone bitches. + for (i = 0; i < 0x10; i++) { + udelay(0xffff); + } + */ + + atp_writeb_io(dev, 0, 1, 0x08); + atp_writeb_io(dev, 0, 2, 0x7f); + atp_writeb_io(dev, 0, 0x11, 0x20); + + if ((scam_on & 0x40) == 0) { + return; + } + m = 1; + m <<= dev->host_id[0]; + j = 16; + if (!wide_chip) { + m |= 0xff00; + j = 8; + } + assignid_map = m; + atp_writeb_io(dev, 0, 0x02, 0x02); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ + atp_writeb_io(dev, 0, 0x03, 0); + atp_writeb_io(dev, 0, 0x04, 0); + atp_writeb_io(dev, 0, 0x05, 0); + atp_writeb_io(dev, 0, 0x06, 0); + atp_writeb_io(dev, 0, 0x07, 0); + atp_writeb_io(dev, 0, 0x08, 0); + + for (i = 0; i < j; i++) { + m = 1; + m = m << i; + if ((m & assignid_map) != 0) { + continue; + } + atp_writeb_io(dev, 0, 0x0f, 0); + atp_writeb_io(dev, 0, 0x12, 0); + atp_writeb_io(dev, 0, 0x13, 0); + atp_writeb_io(dev, 0, 0x14, 0); + if (i > 7) { + k = (i & 0x07) | 0x40; + } else { + k = i; + } + atp_writeb_io(dev, 0, 0x15, k); + if (wide_chip) + atp_writeb_io(dev, 0, 0x1b, 0x01); + else + atp_writeb_io(dev, 0, 0x1b, 0x00); + do { + atp_writeb_io(dev, 0, 0x18, 0x09); + + while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0x00) + cpu_relax(); + k = atp_readb_io(dev, 0, 0x17); + if ((k == 0x85) || (k == 0x42)) + break; + if (k != 0x16) + atp_writeb_io(dev, 0, 0x10, 0x41); + } while (k != 0x16); + if ((k == 0x85) || (k == 0x42)) + continue; + assignid_map |= m; + + } + atp_writeb_io(dev, 0, 0x02, 0x7f); + atp_writeb_io(dev, 0, 0x1b, 0x02); + + udelay(2); + + val = 0x0080; /* bsy */ + atp_writew_io(dev, 0, 0x1c, val); + val |= 0x0040; /* sel */ + atp_writew_io(dev, 0, 0x1c, val); + val |= 0x0004; /* msg */ + atp_writew_io(dev, 0, 0x1c, val); + udelay(2); /* 2 deskew delay(45ns*2=90ns) */ + val &= 0x007f; /* no bsy */ + atp_writew_io(dev, 0, 0x1c, val); + msleep(128); + val &= 0x00fb; /* after 1ms no msg */ + atp_writew_io(dev, 0, 0x1c, val); + while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0) + ; + udelay(2); + udelay(100); + for (n = 0; n < 0x30000; n++) + if ((atp_readb_io(dev, 0, 0x1c) & 0x80) != 0) /* bsy ? */ + break; + if (n < 0x30000) + for (n = 0; n < 0x30000; n++) + if ((atp_readb_io(dev, 0, 0x1c) & 0x81) == 0x0081) { + udelay(2); + val |= 0x8003; /* io,cd,db7 */ + atp_writew_io(dev, 0, 0x1c, val); + udelay(2); + val &= 0x00bf; /* no sel */ + atp_writew_io(dev, 0, 0x1c, val); + udelay(2); + break; + } + while (1) { + /* + * The funny division into multiple delays is to accomodate + * arches like ARM where udelay() multiplies its argument by + * a large number to initialize a loop counter. To avoid + * overflow, the maximum supported udelay is 2000 microseconds. + * + * XXX it would be more polite to find a way to use msleep() + */ + mdelay(2); + udelay(48); + if ((atp_readb_io(dev, 0, 0x1c) & 0x80) == 0x00) { /* bsy ? */ + atp_writew_io(dev, 0, 0x1c, 0); + atp_writeb_io(dev, 0, 0x1b, 0); + atp_writeb_io(dev, 0, 0x15, 0); + atp_writeb_io(dev, 0, 0x18, 0x09); + while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0) + cpu_relax(); + atp_readb_io(dev, 0, 0x17); + return; + } + val &= 0x00ff; /* synchronization */ + val |= 0x3f00; + fun_scam(dev, &val); + udelay(2); + val &= 0x00ff; /* isolation */ + val |= 0x2000; + fun_scam(dev, &val); + udelay(2); + i = 8; + j = 0; + + while (1) { + if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) == 0) + continue; + udelay(2); + val &= 0x00ff; /* get ID_STRING */ + val |= 0x2000; + k = fun_scam(dev, &val); + if ((k & 0x03) == 0) + break; + mbuf[j] <<= 0x01; + mbuf[j] &= 0xfe; + if ((k & 0x02) != 0) + mbuf[j] |= 0x01; + i--; + if (i > 0) + continue; + j++; + i = 8; + } + + /* isolation complete.. */ +/* mbuf[32]=0; + printk(" \n%x %x %x %s\n ",assignid_map,mbuf[0],mbuf[1],&mbuf[2]); */ + i = 15; + j = mbuf[0]; + if ((j & 0x20) != 0) { /* bit5=1:ID up to 7 */ + i = 7; + } + if ((j & 0x06) != 0) { /* IDvalid? */ + k = mbuf[1]; + while (1) { + m = 1; + m <<= k; + if ((m & assignid_map) == 0) + break; + if (k > 0) + k--; + else + break; + } + } + if ((m & assignid_map) != 0) { /* srch from max acceptable ID# */ + k = i; /* max acceptable ID# */ + while (1) { + m = 1; + m <<= k; + if ((m & assignid_map) == 0) + break; + if (k > 0) + k--; + else + break; + } + } + /* k=binID#, */ + assignid_map |= m; + if (k < 8) { + quintet[0] = 0x38; /* 1st dft ID<8 */ + } else { + quintet[0] = 0x31; /* 1st ID>=8 */ + } + k &= 0x07; + quintet[1] = g2q_tab[k]; + + val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */ + m = quintet[0] << 8; + val |= m; + fun_scam(dev, &val); + val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */ + m = quintet[1] << 8; + val |= m; + fun_scam(dev, &val); + + } +} + +static void atp870u_free_tables(struct Scsi_Host *host) +{ + struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; + int j, k; + for (j=0; j < 2; j++) { + for (k = 0; k < 16; k++) { + if (!atp_dev->id[j][k].prd_table) + continue; + dma_free_coherent(&atp_dev->pdev->dev, 1024, + atp_dev->id[j][k].prd_table, + atp_dev->id[j][k].prd_bus); + atp_dev->id[j][k].prd_table = NULL; + } + } +} + +static int atp870u_init_tables(struct Scsi_Host *host) +{ + struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; + int c,k; + for(c=0;c < 2;c++) { + for(k=0;k<16;k++) { + atp_dev->id[c][k].prd_table = + dma_alloc_coherent(&atp_dev->pdev->dev, 1024, + &(atp_dev->id[c][k].prd_bus), + GFP_KERNEL); + if (!atp_dev->id[c][k].prd_table) { + printk("atp870u_init_tables fail\n"); + atp870u_free_tables(host); + return -ENOMEM; + } + atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus; + atp_dev->id[c][k].devsp=0x20; + atp_dev->id[c][k].devtype = 0x7f; + atp_dev->id[c][k].curr_req = NULL; + } + + atp_dev->active_id[c] = 0; + atp_dev->wide_id[c] = 0; + atp_dev->host_id[c] = 0x07; + atp_dev->quhd[c] = 0; + atp_dev->quend[c] = 0; + atp_dev->last_cmd[c] = 0xff; + atp_dev->in_snd[c] = 0; + atp_dev->in_int[c] = 0; + + for (k = 0; k < qcnt; k++) { + atp_dev->quereq[c][k] = NULL; + } + for (k = 0; k < 16; k++) { + atp_dev->id[c][k].curr_req = NULL; + atp_dev->sp[c][k] = 0x04; + } + } + return 0; +} + +static void atp_set_host_id(struct atp_unit *atp, u8 c, u8 host_id) +{ + atp_writeb_io(atp, c, 0, host_id | 0x08); + atp_writeb_io(atp, c, 0x18, 0); + while ((atp_readb_io(atp, c, 0x1f) & 0x80) == 0) + mdelay(1); + atp_readb_io(atp, c, 0x17); + atp_writeb_io(atp, c, 1, 8); + atp_writeb_io(atp, c, 2, 0x7f); + atp_writeb_io(atp, c, 0x11, 0x20); +} + +static void atp870_init(struct Scsi_Host *shpnt) +{ + struct atp_unit *atpdev = shost_priv(shpnt); + struct pci_dev *pdev = atpdev->pdev; + unsigned char k, host_id; + u8 scam_on; + bool wide_chip = + (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7610 && + pdev->revision == 4) || + (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612UW) || + (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612SUW); + + pci_read_config_byte(pdev, 0x49, &host_id); + + dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 " + "Host Adapter: IO:%lx, IRQ:%d.\n", + shpnt->io_port, shpnt->irq); + + atpdev->ioport[0] = shpnt->io_port; + atpdev->pciport[0] = shpnt->io_port + 0x20; + host_id &= 0x07; + atpdev->host_id[0] = host_id; + scam_on = atp_readb_pci(atpdev, 0, 2); + atpdev->global_map[0] = atp_readb_base(atpdev, 0x2d); + atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x2e); + + if (atpdev->ultra_map[0] == 0) { + scam_on = 0x00; + atpdev->global_map[0] = 0x20; + atpdev->ultra_map[0] = 0xffff; + } + + if (pdev->revision > 0x07) /* check if atp876 chip */ + atp_writeb_base(atpdev, 0x3e, 0x00); /* enable terminator */ + + k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10; + atp_writeb_base(atpdev, 0x3a, k); + atp_writeb_base(atpdev, 0x3a, k & 0xdf); + msleep(32); + atp_writeb_base(atpdev, 0x3a, k); + msleep(32); + atp_set_host_id(atpdev, 0, host_id); + + tscam(shpnt, wide_chip, scam_on); + atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) | 0x10); + atp_is(atpdev, 0, wide_chip, 0); + atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) & 0xef); + atp_writeb_base(atpdev, 0x3b, atp_readb_base(atpdev, 0x3b) | 0x20); + shpnt->max_id = wide_chip ? 16 : 8; + shpnt->this_id = host_id; +} + +static void atp880_init(struct Scsi_Host *shpnt) +{ + struct atp_unit *atpdev = shost_priv(shpnt); + struct pci_dev *pdev = atpdev->pdev; + unsigned char k, m, host_id; + unsigned int n; + + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80); + + atpdev->ioport[0] = shpnt->io_port + 0x40; + atpdev->pciport[0] = shpnt->io_port + 0x28; + + host_id = atp_readb_base(atpdev, 0x39) >> 4; + + dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD " + "Host Adapter: IO:%lx, IRQ:%d.\n", + shpnt->io_port, shpnt->irq); + atpdev->host_id[0] = host_id; + + atpdev->global_map[0] = atp_readb_base(atpdev, 0x35); + atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x3c); + + n = 0x3f09; + while (n < 0x4000) { + m = 0; + atp_writew_base(atpdev, 0x34, n); + n += 0x0002; + if (atp_readb_base(atpdev, 0x30) == 0xff) + break; + + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); + atp_writew_base(atpdev, 0x34, n); + n += 0x0002; + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); + atp_writew_base(atpdev, 0x34, n); + n += 0x0002; + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); + atp_writew_base(atpdev, 0x34, n); + n += 0x0002; + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); + n += 0x0018; + } + atp_writew_base(atpdev, 0x34, 0); + atpdev->ultra_map[0] = 0; + atpdev->async[0] = 0; + for (k = 0; k < 16; k++) { + n = 1 << k; + if (atpdev->sp[0][k] > 1) + atpdev->ultra_map[0] |= n; + else + if (atpdev->sp[0][k] == 0) + atpdev->async[0] |= n; + } + atpdev->async[0] = ~(atpdev->async[0]); + atp_writeb_base(atpdev, 0x35, atpdev->global_map[0]); + + k = atp_readb_base(atpdev, 0x38) & 0x80; + atp_writeb_base(atpdev, 0x38, k); + atp_writeb_base(atpdev, 0x3b, 0x20); + msleep(32); + atp_writeb_base(atpdev, 0x3b, 0); + msleep(32); + atp_readb_io(atpdev, 0, 0x1b); + atp_readb_io(atpdev, 0, 0x17); + + atp_set_host_id(atpdev, 0, host_id); + + tscam(shpnt, true, atp_readb_base(atpdev, 0x22)); + atp_is(atpdev, 0, true, atp_readb_base(atpdev, 0x3f) & 0x40); + atp_writeb_base(atpdev, 0x38, 0xb0); + shpnt->max_id = 16; + shpnt->this_id = host_id; +} + +static void atp885_init(struct Scsi_Host *shpnt) +{ + struct atp_unit *atpdev = shost_priv(shpnt); + struct pci_dev *pdev = atpdev->pdev; + unsigned char k, m, c; + unsigned int n; + unsigned char setupdata[2][16]; + + dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD " + "Host Adapter: IO:%lx, IRQ:%d.\n", + shpnt->io_port, shpnt->irq); + + atpdev->ioport[0] = shpnt->io_port + 0x80; + atpdev->ioport[1] = shpnt->io_port + 0xc0; + atpdev->pciport[0] = shpnt->io_port + 0x40; + atpdev->pciport[1] = shpnt->io_port + 0x50; + + c = atp_readb_base(atpdev, 0x29); + atp_writeb_base(atpdev, 0x29, c | 0x04); + + n = 0x1f80; + while (n < 0x2000) { + atp_writew_base(atpdev, 0x3c, n); + if (atp_readl_base(atpdev, 0x38) == 0xffffffff) + break; + for (m = 0; m < 2; m++) { + atpdev->global_map[m] = 0; + for (k = 0; k < 4; k++) { + atp_writew_base(atpdev, 0x3c, n++); + ((u32 *)&setupdata[m][0])[k] = + atp_readl_base(atpdev, 0x38); + } + for (k = 0; k < 4; k++) { + atp_writew_base(atpdev, 0x3c, n++); + ((u32 *)&atpdev->sp[m][0])[k] = + atp_readl_base(atpdev, 0x38); + } + n += 8; + } + } + c = atp_readb_base(atpdev, 0x29); + atp_writeb_base(atpdev, 0x29, c & 0xfb); + for (c = 0; c < 2; c++) { + atpdev->ultra_map[c] = 0; + atpdev->async[c] = 0; + for (k = 0; k < 16; k++) { + n = 1 << k; + if (atpdev->sp[c][k] > 1) + atpdev->ultra_map[c] |= n; + else + if (atpdev->sp[c][k] == 0) + atpdev->async[c] |= n; + } + atpdev->async[c] = ~(atpdev->async[c]); + + if (atpdev->global_map[c] == 0) { + k = setupdata[c][1]; + if ((k & 0x40) != 0) + atpdev->global_map[c] |= 0x20; + k &= 0x07; + atpdev->global_map[c] |= k; + if ((setupdata[c][2] & 0x04) != 0) + atpdev->global_map[c] |= 0x08; + atpdev->host_id[c] = setupdata[c][0] & 0x07; + } + } + + k = atp_readb_base(atpdev, 0x28) & 0x8f; + k |= 0x10; + atp_writeb_base(atpdev, 0x28, k); + atp_writeb_pci(atpdev, 0, 1, 0x80); + atp_writeb_pci(atpdev, 1, 1, 0x80); + msleep(100); + atp_writeb_pci(atpdev, 0, 1, 0); + atp_writeb_pci(atpdev, 1, 1, 0); + msleep(1000); + atp_readb_io(atpdev, 0, 0x1b); + atp_readb_io(atpdev, 0, 0x17); + atp_readb_io(atpdev, 1, 0x1b); + atp_readb_io(atpdev, 1, 0x17); + + k = atpdev->host_id[0]; + if (k > 7) + k = (k & 0x07) | 0x40; + atp_set_host_id(atpdev, 0, k); + + k = atpdev->host_id[1]; + if (k > 7) + k = (k & 0x07) | 0x40; + atp_set_host_id(atpdev, 1, k); + + msleep(600); /* this delay used to be called tscam_885() */ + dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n"); + atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7); + atp_writeb_io(atpdev, 0, 0x16, 0x80); + dev_info(&pdev->dev, "Scanning Channel B SCSI Device ...\n"); + atp_is(atpdev, 1, true, atp_readb_io(atpdev, 1, 0x1b) >> 7); + atp_writeb_io(atpdev, 1, 0x16, 0x80); + k = atp_readb_base(atpdev, 0x28) & 0xcf; + k |= 0xc0; + atp_writeb_base(atpdev, 0x28, k); + k = atp_readb_base(atpdev, 0x1f) | 0x80; + atp_writeb_base(atpdev, 0x1f, k); + k = atp_readb_base(atpdev, 0x29) | 0x01; + atp_writeb_base(atpdev, 0x29, k); + shpnt->max_id = 16; + shpnt->max_lun = (atpdev->global_map[0] & 0x07) + 1; + shpnt->max_channel = 1; + shpnt->this_id = atpdev->host_id[0]; +} + +/* return non-zero on detection */ +static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct Scsi_Host *shpnt = NULL; + struct atp_unit *atpdev; + int err; + + if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610 && pdev->revision < 2) { + dev_err(&pdev->dev, "ATP850S chips (AEC6710L/F cards) are not supported.\n"); + return -ENODEV; + } + + err = pci_enable_device(pdev); + if (err) + goto fail; + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); + err = -EIO; + goto disable_device; + } + + err = pci_request_regions(pdev, "atp870u"); + if (err) + goto disable_device; + pci_set_master(pdev); + + err = -ENOMEM; + shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); + if (!shpnt) + goto release_region; + + atpdev = shost_priv(shpnt); + + atpdev->host = shpnt; + atpdev->pdev = pdev; + pci_set_drvdata(pdev, atpdev); + + shpnt->io_port = pci_resource_start(pdev, 0); + shpnt->io_port &= 0xfffffff8; + shpnt->n_io_port = pci_resource_len(pdev, 0); + atpdev->baseport = shpnt->io_port; + shpnt->unique_id = shpnt->io_port; + shpnt->irq = pdev->irq; + + err = atp870u_init_tables(shpnt); + if (err) { + dev_err(&pdev->dev, "Unable to allocate tables for Acard controller\n"); + goto unregister; + } + + if (is880(atpdev)) + atp880_init(shpnt); + else if (is885(atpdev)) + atp885_init(shpnt); + else + atp870_init(shpnt); + + err = request_irq(shpnt->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt); + if (err) { + dev_err(&pdev->dev, "Unable to allocate IRQ %d.\n", shpnt->irq); + goto free_tables; + } + + err = scsi_add_host(shpnt, &pdev->dev); + if (err) + goto scsi_add_fail; + scsi_scan_host(shpnt); + + return 0; + +scsi_add_fail: + free_irq(shpnt->irq, shpnt); +free_tables: + atp870u_free_tables(shpnt); +unregister: + scsi_host_put(shpnt); +release_region: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +fail: + return err; +} + +/* The abort command does not leave the device in a clean state where + it is available to be used again. Until this gets worked out, we will + leave it commented out. */ + +static int atp870u_abort(struct scsi_cmnd * SCpnt) +{ + unsigned char j, k, c; + struct scsi_cmnd *workrequ; + struct atp_unit *dev; + struct Scsi_Host *host; + host = SCpnt->device->host; + + dev = (struct atp_unit *)&host->hostdata; + c = scmd_channel(SCpnt); + printk(" atp870u: abort Channel = %x \n", c); + printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]); + printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]); + for (j = 0; j < 0x18; j++) { + printk(" r%2x=%2x", j, atp_readb_io(dev, c, j)); + } + printk(" r1c=%2x", atp_readb_io(dev, c, 0x1c)); + printk(" r1f=%2x in_snd=%2x ", atp_readb_io(dev, c, 0x1f), dev->in_snd[c]); + printk(" d00=%2x", atp_readb_pci(dev, c, 0x00)); + printk(" d02=%2x", atp_readb_pci(dev, c, 0x02)); + for(j=0;j<16;j++) { + if (dev->id[c][j].curr_req != NULL) { + workrequ = dev->id[c][j].curr_req; + printk("\n que cdb= "); + for (k=0; k < workrequ->cmd_len; k++) { + printk(" %2x ",workrequ->cmnd[k]); + } + printk(" last_lenu= %x ",(unsigned int)dev->id[c][j].last_len); + } + } + return SUCCESS; +} + +static const char *atp870u_info(struct Scsi_Host *notused) +{ + static char buffer[128]; + + strcpy(buffer, "ACARD AEC-6710/6712/67160 PCI Ultra/W/LVD SCSI-3 Adapter Driver V2.6+ac "); + + return buffer; +} + +static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) +{ + seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n" + "Adapter Configuration:\n"); + seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); + seq_printf(m, " IRQ: %d\n", HBAptr->irq); + return 0; +} + + +static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev, + sector_t capacity, int *ip) +{ + int heads, sectors, cylinders; + + heads = 64; + sectors = 32; + cylinders = (unsigned long)capacity / (heads * sectors); + if (cylinders > 1024) { + heads = 255; + sectors = 63; + cylinders = (unsigned long)capacity / (heads * sectors); + } + ip[0] = heads; + ip[1] = sectors; + ip[2] = cylinders; + + return 0; +} + +static void atp870u_remove (struct pci_dev *pdev) +{ + struct atp_unit *devext = pci_get_drvdata(pdev); + struct Scsi_Host *pshost = devext->host; + + scsi_remove_host(pshost); + free_irq(pshost->irq, pshost); + pci_release_regions(pdev); + pci_disable_device(pdev); + atp870u_free_tables(pshost); + scsi_host_put(pshost); +} +MODULE_LICENSE("GPL"); + +static const struct scsi_host_template atp870u_template = { + .module = THIS_MODULE, + .name = "atp870u" /* name */, + .proc_name = "atp870u", + .show_info = atp870u_show_info, + .info = atp870u_info /* info */, + .queuecommand = atp870u_queuecommand /* queuecommand */, + .eh_abort_handler = atp870u_abort /* abort */, + .bios_param = atp870u_biosparam /* biosparm */, + .can_queue = qcnt /* can_queue */, + .this_id = 7 /* SCSI ID */, + .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/, + .max_sectors = ATP870U_MAX_SECTORS, +}; + +static struct pci_device_id atp870u_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7610) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612UW) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612U) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612S) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612D) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612SUW) }, + { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_8060) }, + { 0, }, +}; + +MODULE_DEVICE_TABLE(pci, atp870u_id_table); + +static struct pci_driver atp870u_driver = { + .id_table = atp870u_id_table, + .name = "atp870u", + .probe = atp870u_probe, + .remove = atp870u_remove, +}; + +module_pci_driver(atp870u_driver); + +static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, + unsigned char lvdmode) +{ + unsigned char i, j, k, rmb, n; + unsigned short int m; + static unsigned char mbuf[512]; + static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; + static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; + static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; + unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; + static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; + static unsigned char synw_870[6] = { 0x80, 1, 3, 1, 0x0c, 0x07 }; + unsigned char synuw[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; + static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; + static unsigned char u3[9] = { 0x80, 1, 6, 4, 0x09, 00, 0x0e, 0x01, 0x02 }; + + for (i = 0; i < 16; i++) { + if (!wide_chip && (i > 7)) + break; + m = 1; + m = m << i; + if ((m & dev->active_id[c]) != 0) { + continue; + } + if (i == dev->host_id[c]) { + printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[c]); + continue; + } + atp_writeb_io(dev, c, 0x1b, wide_chip ? 0x01 : 0x00); + atp_writeb_io(dev, c, 1, 0x08); + atp_writeb_io(dev, c, 2, 0x7f); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); + j = i; + if ((j & 0x08) != 0) { + j = (j & 0x07) | 0x40; + } + atp_writeb_io(dev, c, 0x15, j); + atp_writeb_io(dev, c, 0x18, satn[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) + continue; + + while (atp_readb_io(dev, c, 0x17) != 0x8e) + cpu_relax(); + + dev->active_id[c] |= m; + + atp_writeb_io(dev, c, 0x10, 0x30); + if (is885(dev) || is880(dev)) + atp_writeb_io(dev, c, 0x14, 0x00); + else /* result of is870() merge - is this a bug? */ + atp_writeb_io(dev, c, 0x04, 0x00); + +phase_cmd: + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + j = atp_readb_io(dev, c, 0x17); + if (j != 0x16) { + atp_writeb_io(dev, c, 0x10, 0x41); + goto phase_cmd; + } +sel_ok: + atp_writeb_io(dev, c, 3, inqd[0]); + atp_writeb_io(dev, c, 4, inqd[1]); + atp_writeb_io(dev, c, 5, inqd[2]); + atp_writeb_io(dev, c, 6, inqd[3]); + atp_writeb_io(dev, c, 7, inqd[4]); + atp_writeb_io(dev, c, 8, inqd[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, inqd[6]); + atp_writeb_io(dev, c, 0x14, inqd[7]); + atp_writeb_io(dev, c, 0x18, inqd[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) + continue; + + while (atp_readb_io(dev, c, 0x17) != 0x8e) + cpu_relax(); + + if (wide_chip) + atp_writeb_io(dev, c, 0x1b, 0x00); + + atp_writeb_io(dev, c, 0x18, 0x08); + j = 0; +rd_inq_data: + k = atp_readb_io(dev, c, 0x1f); + if ((k & 0x01) != 0) { + mbuf[j++] = atp_readb_io(dev, c, 0x19); + goto rd_inq_data; + } + if ((k & 0x80) == 0) { + goto rd_inq_data; + } + j = atp_readb_io(dev, c, 0x17); + if (j == 0x16) { + goto inq_ok; + } + atp_writeb_io(dev, c, 0x10, 0x46); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, 0); + atp_writeb_io(dev, c, 0x14, 0); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + if (atp_readb_io(dev, c, 0x17) != 0x16) + goto sel_ok; + +inq_ok: + mbuf[36] = 0; + printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); + dev->id[c][i].devtype = mbuf[0]; + rmb = mbuf[1]; + n = mbuf[7]; + if (!wide_chip) + goto not_wide; + if ((mbuf[7] & 0x60) == 0) { + goto not_wide; + } + if (is885(dev) || is880(dev)) { + if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) + goto not_wide; + } else { /* result of is870() merge - is this a bug? */ + if ((dev->global_map[c] & 0x20) == 0) + goto not_wide; + } + if (lvdmode == 0) { + goto chg_wide; + } + if (dev->sp[c][i] != 0x04) // force u2 + { + goto chg_wide; + } + + atp_writeb_io(dev, c, 0x1b, 0x01); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); + atp_writeb_io(dev, c, 0x18, satn[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) + continue; + + while (atp_readb_io(dev, c, 0x17) != 0x8e) + cpu_relax(); + +try_u3: + j = 0; + atp_writeb_io(dev, c, 0x14, 0x09); + atp_writeb_io(dev, c, 0x18, 0x20); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, u3[j++]); + cpu_relax(); + } + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) + cpu_relax(); + + j = atp_readb_io(dev, c, 0x17) & 0x0f; + if (j == 0x0f) { + goto u3p_in; + } + if (j == 0x0a) { + goto u3p_cmd; + } + if (j == 0x0e) { + goto try_u3; + } + continue; +u3p_out: + atp_writeb_io(dev, c, 0x18, 0x20); + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, 0); + cpu_relax(); + } + j = atp_readb_io(dev, c, 0x17) & 0x0f; + if (j == 0x0f) { + goto u3p_in; + } + if (j == 0x0a) { + goto u3p_cmd; + } + if (j == 0x0e) { + goto u3p_out; + } + continue; +u3p_in: + atp_writeb_io(dev, c, 0x14, 0x09); + atp_writeb_io(dev, c, 0x18, 0x20); + k = 0; +u3p_in1: + j = atp_readb_io(dev, c, 0x1f); + if ((j & 0x01) != 0) { + mbuf[k++] = atp_readb_io(dev, c, 0x19); + goto u3p_in1; + } + if ((j & 0x80) == 0x00) { + goto u3p_in1; + } + j = atp_readb_io(dev, c, 0x17) & 0x0f; + if (j == 0x0f) { + goto u3p_in; + } + if (j == 0x0a) { + goto u3p_cmd; + } + if (j == 0x0e) { + goto u3p_out; + } + continue; +u3p_cmd: + atp_writeb_io(dev, c, 0x10, 0x30); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00); + + j = atp_readb_io(dev, c, 0x17); + if (j != 0x16) { + if (j == 0x4e) { + goto u3p_out; + } + continue; + } + if (mbuf[0] != 0x01) { + goto chg_wide; + } + if (mbuf[1] != 0x06) { + goto chg_wide; + } + if (mbuf[2] != 0x04) { + goto chg_wide; + } + if (mbuf[3] == 0x09) { + m = 1; + m = m << i; + dev->wide_id[c] |= m; + dev->id[c][i].devsp = 0xce; +#ifdef ED_DBGP + printk("dev->id[%2d][%2d].devsp = %2x\n", + c, i, dev->id[c][i].devsp); +#endif + continue; + } +chg_wide: + atp_writeb_io(dev, c, 0x1b, 0x01); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); + atp_writeb_io(dev, c, 0x18, satn[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + if (atp_readb_io(dev, c, 0x17) != 0x11 && + atp_readb_io(dev, c, 0x17) != 0x8e) + continue; + + while (atp_readb_io(dev, c, 0x17) != 0x8e) + cpu_relax(); + +try_wide: + j = 0; + atp_writeb_io(dev, c, 0x14, 0x05); + atp_writeb_io(dev, c, 0x18, 0x20); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, wide[j++]); + cpu_relax(); + } + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) + cpu_relax(); + + j = atp_readb_io(dev, c, 0x17) & 0x0f; + if (j == 0x0f) { + goto widep_in; + } + if (j == 0x0a) { + goto widep_cmd; + } + if (j == 0x0e) { + goto try_wide; + } + continue; +widep_out: + atp_writeb_io(dev, c, 0x18, 0x20); + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, 0); + cpu_relax(); + } + j = atp_readb_io(dev, c, 0x17) & 0x0f; + if (j == 0x0f) { + goto widep_in; + } + if (j == 0x0a) { + goto widep_cmd; + } + if (j == 0x0e) { + goto widep_out; + } + continue; +widep_in: + atp_writeb_io(dev, c, 0x14, 0xff); + atp_writeb_io(dev, c, 0x18, 0x20); + k = 0; +widep_in1: + j = atp_readb_io(dev, c, 0x1f); + if ((j & 0x01) != 0) { + mbuf[k++] = atp_readb_io(dev, c, 0x19); + goto widep_in1; + } + if ((j & 0x80) == 0x00) { + goto widep_in1; + } + j = atp_readb_io(dev, c, 0x17) & 0x0f; + if (j == 0x0f) { + goto widep_in; + } + if (j == 0x0a) { + goto widep_cmd; + } + if (j == 0x0e) { + goto widep_out; + } + continue; +widep_cmd: + atp_writeb_io(dev, c, 0x10, 0x30); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + j = atp_readb_io(dev, c, 0x17); + if (j != 0x16) { + if (j == 0x4e) { + goto widep_out; + } + continue; + } + if (mbuf[0] != 0x01) { + goto not_wide; + } + if (mbuf[1] != 0x02) { + goto not_wide; + } + if (mbuf[2] != 0x03) { + goto not_wide; + } + if (mbuf[3] != 0x01) { + goto not_wide; + } + m = 1; + m = m << i; + dev->wide_id[c] |= m; +not_wide: + if ((dev->id[c][i].devtype == 0x00) || + (dev->id[c][i].devtype == 0x07) || + ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { + m = 1; + m = m << i; + if ((dev->async[c] & m) != 0) { + goto set_sync; + } + } + continue; +set_sync: + if ((!is885(dev) && !is880(dev)) || (dev->sp[c][i] == 0x02)) { + synu[4] = 0x0c; + synuw[4] = 0x0c; + } else { + if (dev->sp[c][i] >= 0x03) { + synu[4] = 0x0a; + synuw[4] = 0x0a; + } + } + j = 0; + if ((m & dev->wide_id[c]) != 0) { + j |= 0x01; + } + atp_writeb_io(dev, c, 0x1b, j); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); + atp_writeb_io(dev, c, 0x18, satn[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + if (atp_readb_io(dev, c, 0x17) != 0x11 && + atp_readb_io(dev, c, 0x17) != 0x8e) + continue; + + while (atp_readb_io(dev, c, 0x17) != 0x8e) + cpu_relax(); + +try_sync: + j = 0; + atp_writeb_io(dev, c, 0x14, 0x06); + atp_writeb_io(dev, c, 0x18, 0x20); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) { + if ((m & dev->wide_id[c]) != 0) { + if (is885(dev) || is880(dev)) { + if ((m & dev->ultra_map[c]) != 0) { + atp_writeb_io(dev, c, 0x19, synuw[j++]); + } else { + atp_writeb_io(dev, c, 0x19, synw[j++]); + } + } else + atp_writeb_io(dev, c, 0x19, synw_870[j++]); + } else { + if ((m & dev->ultra_map[c]) != 0) { + atp_writeb_io(dev, c, 0x19, synu[j++]); + } else { + atp_writeb_io(dev, c, 0x19, synn[j++]); + } + } + } + } + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) + cpu_relax(); + + j = atp_readb_io(dev, c, 0x17) & 0x0f; + if (j == 0x0f) { + goto phase_ins; + } + if (j == 0x0a) { + goto phase_cmds; + } + if (j == 0x0e) { + goto try_sync; + } + continue; +phase_outs: + atp_writeb_io(dev, c, 0x18, 0x20); + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0x00) + atp_writeb_io(dev, c, 0x19, 0x00); + cpu_relax(); + } + j = atp_readb_io(dev, c, 0x17); + if (j == 0x85) { + goto tar_dcons; + } + j &= 0x0f; + if (j == 0x0f) { + goto phase_ins; + } + if (j == 0x0a) { + goto phase_cmds; + } + if (j == 0x0e) { + goto phase_outs; + } + continue; +phase_ins: + if (is885(dev) || is880(dev)) + atp_writeb_io(dev, c, 0x14, 0x06); + else + atp_writeb_io(dev, c, 0x14, 0xff); + atp_writeb_io(dev, c, 0x18, 0x20); + k = 0; +phase_ins1: + j = atp_readb_io(dev, c, 0x1f); + if ((j & 0x01) != 0x00) { + mbuf[k++] = atp_readb_io(dev, c, 0x19); + goto phase_ins1; + } + if ((j & 0x80) == 0x00) { + goto phase_ins1; + } + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00); + + j = atp_readb_io(dev, c, 0x17); + if (j == 0x85) { + goto tar_dcons; + } + j &= 0x0f; + if (j == 0x0f) { + goto phase_ins; + } + if (j == 0x0a) { + goto phase_cmds; + } + if (j == 0x0e) { + goto phase_outs; + } + continue; +phase_cmds: + atp_writeb_io(dev, c, 0x10, 0x30); +tar_dcons: + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) + cpu_relax(); + + j = atp_readb_io(dev, c, 0x17); + if (j != 0x16) { + continue; + } + if (mbuf[0] != 0x01) { + continue; + } + if (mbuf[1] != 0x03) { + continue; + } + if (mbuf[4] == 0x00) { + continue; + } + if (mbuf[3] > 0x64) { + continue; + } + if (is885(dev) || is880(dev)) { + if (mbuf[4] > 0x0e) { + mbuf[4] = 0x0e; + } + } else { + if (mbuf[4] > 0x0c) { + mbuf[4] = 0x0c; + } + } + dev->id[c][i].devsp = mbuf[4]; + if (is885(dev) || is880(dev)) + if (mbuf[3] < 0x0c) { + j = 0xb0; + goto set_syn_ok; + } + if ((mbuf[3] < 0x0d) && (rmb == 0)) { + j = 0xa0; + goto set_syn_ok; + } + if (mbuf[3] < 0x1a) { + j = 0x20; + goto set_syn_ok; + } + if (mbuf[3] < 0x33) { + j = 0x40; + goto set_syn_ok; + } + if (mbuf[3] < 0x4c) { + j = 0x50; + goto set_syn_ok; + } + j = 0x60; +set_syn_ok: + dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j; +#ifdef ED_DBGP + printk("dev->id[%2d][%2d].devsp = %2x\n", + c,i,dev->id[c][i].devsp); +#endif + } +} diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h new file mode 100644 index 000000000..31f6ab24b --- /dev/null +++ b/drivers/scsi/atp870u.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ATP870U_H +#define _ATP870U_H + +#include +#include + +/* I/O Port */ + +#define MAX_CDB 12 +#define MAX_SENSE 14 +#define qcnt 32 +#define ATP870U_SCATTER 128 + +#define MAX_ADAPTER 8 +#define MAX_SCSI_ID 16 +#define ATP870U_MAX_SECTORS 128 + +#define ATP885_DEVID 0x808A +#define ATP880_DEVID1 0x8080 +#define ATP880_DEVID2 0x8081 + +//#define ED_DBGP + +struct atp_unit +{ + unsigned long baseport; + unsigned long ioport[2]; + unsigned long pciport[2]; + unsigned char last_cmd[2]; + unsigned char in_snd[2]; + unsigned char in_int[2]; + unsigned char quhd[2]; + unsigned char quend[2]; + unsigned char global_map[2]; + unsigned char host_id[2]; + unsigned int working[2]; + unsigned short wide_id[2]; + unsigned short active_id[2]; + unsigned short ultra_map[2]; + unsigned short async[2]; + unsigned char sp[2][16]; + unsigned char r1f[2][16]; + struct scsi_cmnd *quereq[2][qcnt]; + struct atp_id + { + unsigned char dirct; + unsigned char devsp; + unsigned char devtype; + unsigned long tran_len; + unsigned long last_len; + unsigned char *prd_pos; + unsigned char *prd_table; /* Kernel address of PRD table */ + dma_addr_t prd_bus; /* Bus address of PRD */ + dma_addr_t prdaddr; /* Dynamically updated in driver */ + struct scsi_cmnd *curr_req; + } id[2][16]; + struct Scsi_Host *host; + struct pci_dev *pdev; + unsigned int unit; +}; + +#endif diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig new file mode 100644 index 000000000..958c9b46e --- /dev/null +++ b/drivers/scsi/be2iscsi/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +config BE2ISCSI + tristate "Emulex 10Gbps iSCSI - BladeEngine 2" + depends on PCI && SCSI && NET + select SCSI_ISCSI_ATTRS + select ISCSI_BOOT_SYSFS + select IRQ_POLL + + help + This driver implements the iSCSI functionality for Emulex + 10Gbps Storage adapter - BladeEngine 2. diff --git a/drivers/scsi/be2iscsi/Makefile b/drivers/scsi/be2iscsi/Makefile new file mode 100644 index 000000000..910885343 --- /dev/null +++ b/drivers/scsi/be2iscsi/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile to build the iSCSI driver for Emulex OneConnect. +# +# + +obj-$(CONFIG_BE2ISCSI) += be2iscsi.o + +be2iscsi-y := be_iscsi.o be_main.o be_mgmt.o be_cmds.o diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h new file mode 100644 index 000000000..4c58a0259 --- /dev/null +++ b/drivers/scsi/be2iscsi/be.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. + * + * Contact Information: + * linux-drivers@broadcom.com + */ + +#ifndef BEISCSI_H +#define BEISCSI_H + +#include +#include +#include +#define FW_VER_LEN 32 +#define MCC_Q_LEN 128 +#define MCC_CQ_LEN 256 +#define MAX_MCC_CMD 16 +/* BladeEngine Generation numbers */ +#define BE_GEN2 2 +#define BE_GEN3 3 +#define BE_GEN4 4 +struct be_dma_mem { + void *va; + dma_addr_t dma; + u32 size; +}; + +struct be_queue_info { + struct be_dma_mem dma_mem; + u16 len; + u16 entry_size; /* Size of an element in the queue */ + u16 id; + u16 tail, head; + bool created; + u16 used; /* Number of valid elements in the queue */ +}; + +static inline u32 MODULO(u16 val, u16 limit) +{ + WARN_ON(limit & (limit - 1)); + return val & (limit - 1); +} + +static inline void index_inc(u16 *index, u16 limit) +{ + *index = MODULO((*index + 1), limit); +} + +static inline void *queue_head_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->head * q->entry_size; +} + +static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num) +{ + return q->dma_mem.va + wrb_num * q->entry_size; +} + +static inline void *queue_tail_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->tail * q->entry_size; +} + +static inline void queue_head_inc(struct be_queue_info *q) +{ + index_inc(&q->head, q->len); +} + +static inline void queue_tail_inc(struct be_queue_info *q) +{ + index_inc(&q->tail, q->len); +} + +/*ISCSI */ + +struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + unsigned long jiffies; + u32 eq_prev; /* Used to calculate eqe */ + u32 prev_eqd; +#define BEISCSI_EQ_DELAY_MIN 0 +#define BEISCSI_EQ_DELAY_DEF 32 +#define BEISCSI_EQ_DELAY_MAX 128 +}; + +struct be_eq_obj { + u32 cq_count; + struct be_queue_info q; + struct beiscsi_hba *phba; + struct be_queue_info *cq; + struct work_struct mcc_work; /* Work Item */ + struct irq_poll iopoll; +}; + +struct be_mcc_obj { + struct be_queue_info q; + struct be_queue_info cq; +}; + +struct beiscsi_mcc_tag_state { + unsigned long tag_state; +#define MCC_TAG_STATE_RUNNING 0 +#define MCC_TAG_STATE_TIMEOUT 1 +#define MCC_TAG_STATE_ASYNC 2 +#define MCC_TAG_STATE_IGNORE 3 + void (*cbfn)(struct beiscsi_hba *, unsigned int); + struct be_dma_mem tag_mem_state; +}; + +struct be_ctrl_info { + u8 __iomem *csr; + u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* PCI config space */ + struct pci_dev *pdev; + + /* Mbox used for cmd request/response */ + struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ + struct be_dma_mem mbox_mem; + /* Mbox mem is adjusted to align to 16 bytes. The allocated addr + * is stored for freeing purpose */ + struct be_dma_mem mbox_mem_alloced; + + /* MCC Rings */ + struct be_mcc_obj mcc_obj; + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ + + wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1]; + unsigned int mcc_tag[MAX_MCC_CMD]; + unsigned int mcc_tag_status[MAX_MCC_CMD + 1]; + unsigned short mcc_alloc_index; + unsigned short mcc_free_index; + unsigned int mcc_tag_available; + + struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1]; +}; + +#include "be_cmds.h" + +/* WRB index mask for MCC_Q_LEN queue entries */ +#define MCC_Q_WRB_IDX_MASK CQE_STATUS_WRB_MASK +#define MCC_Q_WRB_IDX_SHIFT CQE_STATUS_WRB_SHIFT +/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */ +#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1) + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) + +/* Returns number of pages spanned by the data starting at the given addr */ +#define PAGES_4K_SPANNED(_address, size) \ + ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ + (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) + +/* Returns bit offset within a DWORD of a bitfield */ +#define AMAP_BIT_OFFSET(_struct, field) \ + (((size_t)&(((_struct *)0)->field))%32) + +/* Returns the bit mask of the field that is NOT shifted into location. */ +static inline u32 amap_mask(u32 bitsize) +{ + return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1); +} + +static inline void amap_set(void *ptr, u32 dw_offset, u32 mask, + u32 offset, u32 value) +{ + u32 *dw = (u32 *) ptr + dw_offset; + *dw &= ~(mask << offset); + *dw |= (mask & value) << offset; +} + +#define AMAP_SET_BITS(_struct, field, ptr, val) \ + amap_set(ptr, \ + offsetof(_struct, field)/32, \ + amap_mask(sizeof(((_struct *)0)->field)), \ + AMAP_BIT_OFFSET(_struct, field), \ + val) + +static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) +{ + u32 *dw = ptr; + return mask & (*(dw + dw_offset) >> offset); +} + +#define AMAP_GET_BITS(_struct, field, ptr) \ + amap_get(ptr, \ + offsetof(_struct, field)/32, \ + amap_mask(sizeof(((_struct *)0)->field)), \ + AMAP_BIT_OFFSET(_struct, field)) + +#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) +#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) +static inline void swap_dws(void *wrb, int len) +{ +#ifdef __BIG_ENDIAN + u32 *dw = wrb; + WARN_ON(len % 4); + do { + *dw = cpu_to_le32(*dw); + dw++; + len -= 4; + } while (len); +#endif /* __BIG_ENDIAN */ +} +#endif /* BEISCSI_H */ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c new file mode 100644 index 000000000..0b59b63bc --- /dev/null +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -0,0 +1,1864 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. + * + * Contact Information: + * linux-drivers@broadcom.com + */ + +#include + +#include "be_main.h" +#include "be.h" +#include "be_mgmt.h" + +/* UE Status Low CSR */ +static const char * const desc_ue_status_low[] = { + "CEV", + "CTX", + "DBUF", + "ERX", + "Host", + "MPU", + "NDMA", + "PTC ", + "RDMA ", + "RXF ", + "RXIPS ", + "RXULP0 ", + "RXULP1 ", + "RXULP2 ", + "TIM ", + "TPOST ", + "TPRE ", + "TXIPS ", + "TXULP0 ", + "TXULP1 ", + "UC ", + "WDMA ", + "TXULP2 ", + "HOST1 ", + "P0_OB_LINK ", + "P1_OB_LINK ", + "HOST_GPIO ", + "MBOX ", + "AXGMAC0", + "AXGMAC1", + "JTAG", + "MPU_INTPEND" +}; + +/* UE Status High CSR */ +static const char * const desc_ue_status_hi[] = { + "LPCMEMHOST", + "MGMT_MAC", + "PCS0ONLINE", + "MPU_IRAM", + "PCS1ONLINE", + "PCTL0", + "PCTL1", + "PMEM", + "RR", + "TXPB", + "RXPP", + "XAUI", + "TXP", + "ARM", + "IPC", + "HOST2", + "HOST3", + "HOST4", + "HOST5", + "HOST6", + "HOST7", + "HOST8", + "HOST9", + "NETC", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown" +}; + +struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba, + unsigned int *ref_tag) +{ + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + struct be_mcc_wrb *wrb = NULL; + unsigned int tag; + + spin_lock(&phba->ctrl.mcc_lock); + if (mccq->used == mccq->len) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : MCC queue full: WRB used %u tag avail %u\n", + mccq->used, phba->ctrl.mcc_tag_available); + goto alloc_failed; + } + + if (!phba->ctrl.mcc_tag_available) + goto alloc_failed; + + tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; + if (!tag) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n", + phba->ctrl.mcc_tag_available, + phba->ctrl.mcc_alloc_index); + goto alloc_failed; + } + + /* return this tag for further reference */ + *ref_tag = tag; + phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; + phba->ctrl.mcc_tag_status[tag] = 0; + phba->ctrl.ptag_state[tag].tag_state = 0; + phba->ctrl.ptag_state[tag].cbfn = NULL; + phba->ctrl.mcc_tag_available--; + if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1)) + phba->ctrl.mcc_alloc_index = 0; + else + phba->ctrl.mcc_alloc_index++; + + wrb = queue_head_node(mccq); + memset(wrb, 0, sizeof(*wrb)); + wrb->tag0 = tag; + wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK; + queue_head_inc(mccq); + mccq->used++; + +alloc_failed: + spin_unlock(&phba->ctrl.mcc_lock); + return wrb; +} + +void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag) +{ + struct be_queue_info *mccq = &ctrl->mcc_obj.q; + + spin_lock(&ctrl->mcc_lock); + tag = tag & MCC_Q_CMD_TAG_MASK; + ctrl->mcc_tag[ctrl->mcc_free_index] = tag; + if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1)) + ctrl->mcc_free_index = 0; + else + ctrl->mcc_free_index++; + ctrl->mcc_tag_available++; + mccq->used--; + spin_unlock(&ctrl->mcc_lock); +} + +/* + * beiscsi_mcc_compl_status - Return the status of MCC completion + * @phba: Driver private structure + * @tag: Tag for the MBX Command + * @wrb: the WRB used for the MBX Command + * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd + * + * return + * Success: 0 + * Failure: Non-Zero + */ +int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba, + unsigned int tag, + struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem) +{ + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + uint16_t status = 0, addl_status = 0, wrb_num = 0; + struct be_cmd_resp_hdr *mbx_resp_hdr; + struct be_cmd_req_hdr *mbx_hdr; + struct be_mcc_wrb *temp_wrb; + uint32_t mcc_tag_status; + int rc = 0; + + mcc_tag_status = phba->ctrl.mcc_tag_status[tag]; + status = (mcc_tag_status & CQE_STATUS_MASK); + addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >> + CQE_STATUS_ADDL_SHIFT); + + if (mbx_cmd_mem) { + mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va; + } else { + wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >> + CQE_STATUS_WRB_SHIFT; + temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num); + mbx_hdr = embedded_payload(temp_wrb); + + if (wrb) + *wrb = temp_wrb; + } + + if (status || addl_status) { + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT | BEISCSI_LOG_EH | + BEISCSI_LOG_CONFIG, + "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n", + mbx_hdr->subsystem, mbx_hdr->opcode, + status, addl_status); + rc = -EIO; + if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { + mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr; + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT | BEISCSI_LOG_EH | + BEISCSI_LOG_CONFIG, + "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n", + mbx_resp_hdr->response_length, + mbx_resp_hdr->actual_resp_len); + rc = -EAGAIN; + } + } + + return rc; +} + +/* + * beiscsi_mccq_compl_wait()- Process completion in MCC CQ + * @phba: Driver private structure + * @tag: Tag for the MBX Command + * @wrb: the WRB used for the MBX Command + * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd + * + * Waits for MBX completion with the passed TAG. + * + * return + * Success: 0 + * Failure: Non-Zero + **/ +int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, + unsigned int tag, + struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem) +{ + int rc = 0; + + if (!tag || tag > MAX_MCC_CMD) { + __beiscsi_log(phba, KERN_ERR, + "BC_%d : invalid tag %u\n", tag); + return -EINVAL; + } + + if (beiscsi_hba_in_error(phba)) { + clear_bit(MCC_TAG_STATE_RUNNING, + &phba->ctrl.ptag_state[tag].tag_state); + return -EIO; + } + + /* wait for the mccq completion */ + rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_tag_status[tag], + msecs_to_jiffies( + BEISCSI_HOST_MBX_TIMEOUT)); + /** + * Return EIO if port is being disabled. Associated DMA memory, if any, + * is freed by the caller. When port goes offline, MCCQ is cleaned up + * so does WRB. + */ + if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { + clear_bit(MCC_TAG_STATE_RUNNING, + &phba->ctrl.ptag_state[tag].tag_state); + return -EIO; + } + + /** + * If MBOX cmd timeout expired, tag and resource allocated + * for cmd is not freed until FW returns completion. + */ + if (rc <= 0) { + struct be_dma_mem *tag_mem; + + /** + * PCI/DMA memory allocated and posted in non-embedded mode + * will have mbx_cmd_mem != NULL. + * Save virtual and bus addresses for the command so that it + * can be freed later. + **/ + tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; + if (mbx_cmd_mem) { + tag_mem->size = mbx_cmd_mem->size; + tag_mem->va = mbx_cmd_mem->va; + tag_mem->dma = mbx_cmd_mem->dma; + } else + tag_mem->size = 0; + + /* first make tag_mem_state visible to all */ + wmb(); + set_bit(MCC_TAG_STATE_TIMEOUT, + &phba->ctrl.ptag_state[tag].tag_state); + + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_INIT | BEISCSI_LOG_EH | + BEISCSI_LOG_CONFIG, + "BC_%d : MBX Cmd Completion timed out\n"); + return -EBUSY; + } + + rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem); + + free_mcc_wrb(&phba->ctrl, tag); + return rc; +} + +/* + * beiscsi_process_mbox_compl()- Check the MBX completion status + * @ctrl: Function specific MBX data structure + * @compl: Completion status of MBX Command + * + * Check for the MBX completion status when BMBX method used + * + * return + * Success: Zero + * Failure: Non-Zero + **/ +static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl, + struct be_mcc_compl *compl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + struct be_cmd_req_hdr *hdr = embedded_payload(wrb); + u16 compl_status, extd_status; + + /** + * To check if valid bit is set, check the entire word as we don't know + * the endianness of the data (old entry is host endian while a new + * entry is little endian) + */ + if (!compl->flags) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : BMBX busy, no completion\n"); + return -EBUSY; + } + compl->flags = le32_to_cpu(compl->flags); + WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); + + /** + * Just swap the status to host endian; + * mcc tag is opaquely copied from mcc_wrb. + */ + be_dws_le_to_cpu(compl, 4); + compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & + CQE_STATUS_COMPL_MASK; + extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & + CQE_STATUS_EXTD_MASK; + /* Need to reset the entire word that houses the valid bit */ + compl->flags = 0; + + if (compl_status == MCC_STATUS_SUCCESS) + return 0; + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n", + hdr->subsystem, hdr->opcode, compl_status, extd_status); + return compl_status; +} + +static void beiscsi_process_async_link(struct beiscsi_hba *phba, + struct be_mcc_compl *compl) +{ + struct be_async_event_link_state *evt; + + evt = (struct be_async_event_link_state *)compl; + + phba->port_speed = evt->port_speed; + /** + * Check logical link status in ASYNC event. + * This has been newly introduced in SKH-R Firmware 10.0.338.45. + **/ + if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) { + set_bit(BEISCSI_HBA_LINK_UP, &phba->state); + if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state)) + beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE); + __beiscsi_log(phba, KERN_ERR, + "BC_%d : Link Up on Port %d tag 0x%x\n", + evt->physical_port, evt->event_tag); + } else { + clear_bit(BEISCSI_HBA_LINK_UP, &phba->state); + __beiscsi_log(phba, KERN_ERR, + "BC_%d : Link Down on Port %d tag 0x%x\n", + evt->physical_port, evt->event_tag); + iscsi_host_for_each_session(phba->shost, + beiscsi_session_fail); + } +} + +static char *beiscsi_port_misconf_event_msg[] = { + "Physical Link is functional.", + "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.", + "Optics of two types installed - Remove one optic or install matching pair of optics.", + "Incompatible optics - Replace with compatible optics for card to function.", + "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.", + "Uncertified optics - Replace with Avago Certified optics to enable link operation." +}; + +static void beiscsi_process_async_sli(struct beiscsi_hba *phba, + struct be_mcc_compl *compl) +{ + struct be_async_event_sli *async_sli; + u8 evt_type, state, old_state, le; + char *sev = KERN_WARNING; + char *msg = NULL; + + evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT; + evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK; + + /* processing only MISCONFIGURED physical port event */ + if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED) + return; + + async_sli = (struct be_async_event_sli *)compl; + state = async_sli->event_data1 >> + (phba->fw_config.phys_port * 8) & 0xff; + le = async_sli->event_data2 >> + (phba->fw_config.phys_port * 8) & 0xff; + + old_state = phba->optic_state; + phba->optic_state = state; + + if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) { + /* fw is reporting a state we don't know, log and return */ + __beiscsi_log(phba, KERN_ERR, + "BC_%d : Port %c: Unrecognized optic state 0x%x\n", + phba->port_name, async_sli->event_data1); + return; + } + + if (ASYNC_SLI_LINK_EFFECT_VALID(le)) { + /* log link effect for unqualified-4, uncertified-5 optics */ + if (state > 3) + msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ? + " Link is non-operational." : + " Link is operational."; + /* 1 - info */ + if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1) + sev = KERN_INFO; + /* 2 - error */ + if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2) + sev = KERN_ERR; + } + + if (old_state != phba->optic_state) + __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n", + phba->port_name, + beiscsi_port_misconf_event_msg[state], + !msg ? "" : msg); +} + +void beiscsi_process_async_event(struct beiscsi_hba *phba, + struct be_mcc_compl *compl) +{ + char *sev = KERN_INFO; + u8 evt_code; + + /* interpret flags as an async trailer */ + evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT; + evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK; + switch (evt_code) { + case ASYNC_EVENT_CODE_LINK_STATE: + beiscsi_process_async_link(phba, compl); + break; + case ASYNC_EVENT_CODE_ISCSI: + if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state)) + beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE); + sev = KERN_ERR; + break; + case ASYNC_EVENT_CODE_SLI: + beiscsi_process_async_sli(phba, compl); + break; + default: + /* event not registered */ + sev = KERN_ERR; + } + + beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n", + evt_code, compl->status, compl->flags); +} + +int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl, + struct be_mcc_compl *compl) +{ + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + u16 compl_status, extd_status; + struct be_dma_mem *tag_mem; + unsigned int tag, wrb_idx; + + be_dws_le_to_cpu(compl, 4); + tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK); + wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT; + + if (!test_bit(MCC_TAG_STATE_RUNNING, + &ctrl->ptag_state[tag].tag_state)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX | + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BC_%d : MBX cmd completed but not posted\n"); + return 0; + } + + /* end MCC with this tag */ + clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state); + + if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) { + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG, + "BC_%d : MBX Completion for timeout Command from FW\n"); + /** + * Check for the size before freeing resource. + * Only for non-embedded cmd, PCI resource is allocated. + **/ + tag_mem = &ctrl->ptag_state[tag].tag_mem_state; + if (tag_mem->size) { + dma_free_coherent(&ctrl->pdev->dev, tag_mem->size, + tag_mem->va, tag_mem->dma); + tag_mem->size = 0; + } + free_mcc_wrb(ctrl, tag); + return 0; + } + + compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & + CQE_STATUS_COMPL_MASK; + extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & + CQE_STATUS_EXTD_MASK; + /* The ctrl.mcc_tag_status[tag] is filled with + * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status, + * [7:0] = compl_status + */ + ctrl->mcc_tag_status[tag] = CQE_VALID_MASK; + ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT); + ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) & + CQE_STATUS_ADDL_MASK; + ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK); + + if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) { + if (ctrl->ptag_state[tag].cbfn) + ctrl->ptag_state[tag].cbfn(phba, tag); + else + __beiscsi_log(phba, KERN_ERR, + "BC_%d : MBX ASYNC command with no callback\n"); + free_mcc_wrb(ctrl, tag); + return 0; + } + + if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) { + /* just check completion status and free wrb */ + __beiscsi_mcc_compl_status(phba, tag, NULL, NULL); + free_mcc_wrb(ctrl, tag); + return 0; + } + + wake_up_interruptible(&ctrl->mcc_wait[tag]); + return 0; +} + +void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag) +{ + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + u32 val = 0; + + set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state); + val |= mccq->id & DB_MCCQ_RING_ID_MASK; + val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; + /* make request available for DMA */ + wmb(); + iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); +} + +/* + * be_mbox_db_ready_poll()- Check ready status + * @ctrl: Function specific MBX data structure + * + * Check for the ready status of FW to send BMBX + * commands to adapter. + * + * return + * Success: 0 + * Failure: Non-Zero + **/ +static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl) +{ + /* wait 30s for generic non-flash MBOX operation */ +#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000 + void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + unsigned long timeout; + u32 ready; + + /* + * This BMBX busy wait path is used during init only. + * For the commands executed during init, 5s should suffice. + */ + timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT); + do { + if (beiscsi_hba_in_error(phba)) + return -EIO; + + ready = ioread32(db); + if (ready == 0xffffffff) + return -EIO; + + ready &= MPU_MAILBOX_DB_RDY_MASK; + if (ready) + return 0; + + if (time_after(jiffies, timeout)) + break; + /* 1ms sleep is enough in most cases */ + schedule_timeout_uninterruptible(msecs_to_jiffies(1)); + } while (!ready); + + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : FW Timed Out\n"); + set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state); + return -EBUSY; +} + +/* + * be_mbox_notify: Notify adapter of new BMBX command + * @ctrl: Function specific MBX data structure + * + * Ring doorbell to inform adapter of a BMBX command + * to process + * + * return + * Success: 0 + * Failure: Non-Zero + **/ +static int be_mbox_notify(struct be_ctrl_info *ctrl) +{ + int status; + u32 val = 0; + void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; + struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; + struct be_mcc_mailbox *mbox = mbox_mem->va; + + status = be_mbox_db_ready_poll(ctrl); + if (status) + return status; + + val &= ~MPU_MAILBOX_DB_RDY_MASK; + val |= MPU_MAILBOX_DB_HI_MASK; + val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; + iowrite32(val, db); + + status = be_mbox_db_ready_poll(ctrl); + if (status) + return status; + + val = 0; + val &= ~MPU_MAILBOX_DB_RDY_MASK; + val &= ~MPU_MAILBOX_DB_HI_MASK; + val |= (u32) (mbox_mem->dma >> 4) << 2; + iowrite32(val, db); + + status = be_mbox_db_ready_poll(ctrl); + if (status) + return status; + + /* RDY is set; small delay before CQE read. */ + udelay(1); + + status = beiscsi_process_mbox_compl(ctrl, &mbox->compl); + return status; +} + +void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len, + bool embedded, u8 sge_cnt) +{ + if (embedded) + wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK; + else + wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << + MCC_WRB_SGE_CNT_SHIFT; + wrb->payload_length = payload_len; + be_dws_cpu_to_le(wrb, 8); +} + +void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, + u8 subsystem, u8 opcode, u32 cmd_len) +{ + req_hdr->opcode = opcode; + req_hdr->subsystem = subsystem; + req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); + req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT; +} + +static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, + struct be_dma_mem *mem) +{ + int i, buf_pages; + u64 dma = (u64) mem->dma; + + buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); + for (i = 0; i < buf_pages; i++) { + pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); + pages[i].hi = cpu_to_le32(upper_32_bits(dma)); + dma += PAGE_SIZE_4K; + } +} + +static u32 eq_delay_to_mult(u32 usec_delay) +{ +#define MAX_INTR_RATE 651042 + const u32 round = 10; + u32 multiplier; + + if (usec_delay == 0) + multiplier = 0; + else { + u32 interrupt_rate = 1000000 / usec_delay; + if (interrupt_rate == 0) + multiplier = 1023; + else { + multiplier = (MAX_INTR_RATE - interrupt_rate) * round; + multiplier /= interrupt_rate; + multiplier = (multiplier + round / 2) / round; + multiplier = min(multiplier, (u32) 1023); + } + } + return multiplier; +} + +struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) +{ + return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; +} + +int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *eq, int eq_delay) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_eq_create *req = embedded_payload(wrb); + struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); + struct be_dma_mem *q_mem = &eq->dma_mem; + int status; + + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_EQ_CREATE, sizeof(*req)); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + + AMAP_SET_BITS(struct amap_eq_context, func, req->context, + PCI_FUNC(ctrl->pdev->devfn)); + AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); + AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); + AMAP_SET_BITS(struct amap_eq_context, count, req->context, + __ilog2_u32(eq->len / 256)); + AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, + eq_delay_to_mult(eq_delay)); + be_dws_cpu_to_le(req->context, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + if (!status) { + eq->id = le16_to_cpu(resp->eq_id); + eq->created = true; + } + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *cq, struct be_queue_info *eq, + bool sol_evts, bool no_delay, int coalesce_wm) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_cq_create *req = embedded_payload(wrb); + struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + struct be_dma_mem *q_mem = &cq->dma_mem; + void *ctxt = &req->context; + int status; + + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_CQ_CREATE, sizeof(*req)); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_cq_context, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); + AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context, func, ctxt, + PCI_FUNC(ctrl->pdev->devfn)); + } else { + req->hdr.version = MBX_CMD_VER2; + req->page_size = 1; + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, + ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); + } + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + if (!status) { + cq->id = le16_to_cpu(resp->cq_id); + cq->created = true; + } else + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : In be_cmd_cq_create, status=ox%08x\n", + status); + + mutex_unlock(&ctrl->mbox_lock); + + return status; +} + +static u32 be_encoded_q_len(int q_len) +{ + u32 len_encoded = fls(q_len); /* log2(len) + 1 */ + if (len_encoded == 16) + len_encoded = 0; + return len_encoded; +} + +int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, + struct be_queue_info *mccq, + struct be_queue_info *cq) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_mcc_create_ext *req; + struct be_dma_mem *q_mem = &mccq->dma_mem; + struct be_ctrl_info *ctrl; + void *ctxt; + int status; + + mutex_lock(&phba->ctrl.mbox_lock); + ctrl = &phba->ctrl; + wrb = wrb_from_mbox(&ctrl->mbox_mem); + memset(wrb, 0, sizeof(*wrb)); + req = embedded_payload(wrb); + ctxt = &req->context; + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE; + req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI; + req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI; + + AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, + PCI_FUNC(phba->pcidev->devfn)); + AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + if (!status) { + struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + mccq->id = le16_to_cpu(resp->id); + mccq->created = true; + } + mutex_unlock(&phba->ctrl.mbox_lock); + + return status; +} + +int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, + int queue_type) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_q_destroy *req = embedded_payload(wrb); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + u8 subsys = 0, opcode = 0; + int status; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BC_%d : In beiscsi_cmd_q_destroy " + "queue_type : %d\n", queue_type); + + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + switch (queue_type) { + case QTYPE_EQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_EQ_DESTROY; + break; + case QTYPE_CQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_CQ_DESTROY; + break; + case QTYPE_MCCQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_MCC_DESTROY; + break; + case QTYPE_WRBQ: + subsys = CMD_SUBSYSTEM_ISCSI; + opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY; + break; + case QTYPE_DPDUQ: + subsys = CMD_SUBSYSTEM_ISCSI; + opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY; + break; + case QTYPE_SGL: + subsys = CMD_SUBSYSTEM_ISCSI; + opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES; + break; + default: + mutex_unlock(&ctrl->mbox_lock); + BUG(); + } + be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); + if (queue_type != QTYPE_SGL) + req->id = cpu_to_le16(q->id); + + status = be_mbox_notify(ctrl); + + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +/** + * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter + * @ctrl: ptr to ctrl_info + * @cq: Completion Queue + * @dq: Default Queue + * @length: ring size + * @entry_size: size of each entry in DEFQ + * @is_header: Header or Data DEFQ + * @ulp_num: Bind to which ULP + * + * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted + * on this queue by the FW + * + * return + * Success: 0 + * Failure: Non-Zero Value + * + **/ +int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, + struct be_queue_info *cq, + struct be_queue_info *dq, int length, + int entry_size, uint8_t is_header, + uint8_t ulp_num) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_defq_create_req *req = embedded_payload(wrb); + struct be_dma_mem *q_mem = &dq->dma_mem; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + void *ctxt = &req->context; + int status; + + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + if (phba->fw_config.dual_ulp_aware) { + req->ulp_num = ulp_num; + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); + } + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + cq_id_recv, ctxt, cq->id); + } else { + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + cq_id_recv, ctxt, cq->id); + } + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + if (!status) { + struct be_ring *defq_ring; + struct be_defq_create_resp *resp = embedded_payload(wrb); + + dq->id = le16_to_cpu(resp->id); + dq->created = true; + if (is_header) + defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num]; + else + defq_ring = &phba->phwi_ctrlr-> + default_pdu_data[ulp_num]; + + defq_ring->id = dq->id; + + if (!phba->fw_config.dual_ulp_aware) { + defq_ring->ulp_num = BEISCSI_ULP0; + defq_ring->doorbell_offset = DB_RXULP0_OFFSET; + } else { + defq_ring->ulp_num = resp->ulp_num; + defq_ring->doorbell_offset = resp->doorbell_offset; + } + } + mutex_unlock(&ctrl->mbox_lock); + + return status; +} + +/** + * be_cmd_wrbq_create()- Create WRBQ + * @ctrl: ptr to ctrl_info + * @q_mem: memory details for the queue + * @wrbq: queue info + * @pwrb_context: ptr to wrb_context + * @ulp_num: ULP on which the WRBQ is to be created + * + * Create WRBQ on the passed ULP_NUM. + * + **/ +int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem, + struct be_queue_info *wrbq, + struct hwi_wrb_context *pwrb_context, + uint8_t ulp_num) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_wrbq_create_req *req = embedded_payload(wrb); + struct be_wrbq_create_resp *resp = embedded_payload(wrb); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + int status; + + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req)); + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + + if (phba->fw_config.dual_ulp_aware) { + req->ulp_num = ulp_num; + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); + } + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + if (!status) { + wrbq->id = le16_to_cpu(resp->cid); + wrbq->created = true; + + pwrb_context->cid = wrbq->id; + if (!phba->fw_config.dual_ulp_aware) { + pwrb_context->doorbell_offset = DB_TXULP0_OFFSET; + pwrb_context->ulp_num = BEISCSI_ULP0; + } else { + pwrb_context->ulp_num = resp->ulp_num; + pwrb_context->doorbell_offset = resp->doorbell_offset; + } + } + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_post_template_pages_req *req = embedded_payload(wrb); + int status; + + mutex_lock(&ctrl->mbox_lock); + + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS, + sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_remove_template_pages_req *req = embedded_payload(wrb); + int status; + + mutex_lock(&ctrl->mbox_lock); + + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS, + sizeof(*req)); + + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; + + status = be_mbox_notify(ctrl); + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem, + u32 page_offset, u32 num_pages) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_post_sgl_pages_req *req = embedded_payload(wrb); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + int status; + unsigned int curr_pages; + u32 temp_num_pages = num_pages; + + if (num_pages == 0xff) + num_pages = 1; + + mutex_lock(&ctrl->mbox_lock); + do { + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES, + sizeof(*req)); + curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req, + pages); + req->num_pages = min(num_pages, curr_pages); + req->page_offset = page_offset; + be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem); + q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE); + page_offset += req->num_pages; + num_pages -= req->num_pages; + + if (temp_num_pages == 0xff) + req->num_pages = temp_num_pages; + + status = be_mbox_notify(ctrl); + if (status) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : FW CMD to map iscsi frags failed.\n"); + + goto error; + } + } while (num_pages > 0); +error: + mutex_unlock(&ctrl->mbox_lock); + if (status != 0) + beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); + return status; +} + +/** + * be_cmd_set_vlan()- Configure VLAN paramters on the adapter + * @phba: device priv structure instance + * @vlan_tag: TAG to be set + * + * Set the VLAN_TAG for the adapter or Disable VLAN on adapter + * + * returns + * TAG for the MBX Cmd + * **/ +int be_cmd_set_vlan(struct beiscsi_hba *phba, + uint16_t vlan_tag) +{ + unsigned int tag; + struct be_mcc_wrb *wrb; + struct be_cmd_set_vlan_req *req; + struct be_ctrl_info *ctrl = &phba->ctrl; + + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return 0; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_SET_VLAN, + sizeof(*req)); + + req->interface_hndl = phba->interface_handle; + req->vlan_priority = vlan_tag; + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + + return tag; +} + +int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba) +{ + struct be_dma_mem nonemb_cmd; + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_mgmt_controller_attributes *req; + struct be_sge *sge = nonembedded_sgl(wrb); + int status = 0; + + nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev, + sizeof(struct be_mgmt_controller_attributes), + &nonemb_cmd.dma, GFP_KERNEL); + if (nonemb_cmd.va == NULL) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : dma_alloc_coherent failed in %s\n", + __func__); + return -ENOMEM; + } + nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); + req = nonemb_cmd.va; + memset(req, 0, sizeof(*req)); + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req)); + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); + sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(nonemb_cmd.size); + status = be_mbox_notify(ctrl); + if (!status) { + struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : Firmware Version of CMD : %s\n" + "Firmware Version is : %s\n" + "Developer Build, not performing version check...\n", + resp->params.hba_attribs + .flashrom_version_string, + resp->params.hba_attribs. + firmware_version_string); + + phba->fw_config.iscsi_features = + resp->params.hba_attribs.iscsi_features; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : phba->fw_config.iscsi_features = %d\n", + phba->fw_config.iscsi_features); + memcpy(phba->fw_ver_str, resp->params.hba_attribs. + firmware_version_string, BEISCSI_VER_STRLEN); + } else + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : Failed in beiscsi_check_supported_fw\n"); + mutex_unlock(&ctrl->mbox_lock); + if (nonemb_cmd.va) + dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + + return status; +} + +/** + * beiscsi_get_fw_config()- Get the FW config for the function + * @ctrl: ptr to Ctrl Info + * @phba: ptr to the dev priv structure + * + * Get the FW config and resources available for the function. + * The resources are created based on the count received here. + * + * return + * Success: 0 + * Failure: Non-Zero Value + **/ +int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_fw_cfg *pfw_cfg = embedded_payload(wrb); + uint32_t cid_count, icd_count; + int status = -EINVAL; + uint8_t ulp_num = 0; + + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0); + + be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, + EMBED_MBX_MAX_PAYLOAD_SIZE); + + if (be_mbox_notify(ctrl)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : Failed in beiscsi_get_fw_config\n"); + goto fail_init; + } + + /* FW response formats depend on port id */ + phba->fw_config.phys_port = pfw_cfg->phys_port; + if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : invalid physical port id %d\n", + phba->fw_config.phys_port); + goto fail_init; + } + + /* populate and check FW config against min and max values */ + if (!is_chip_be2_be3r(phba)) { + phba->fw_config.eqid_count = pfw_cfg->eqid_count; + phba->fw_config.cqid_count = pfw_cfg->cqid_count; + if (phba->fw_config.eqid_count == 0 || + phba->fw_config.eqid_count > 2048) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : invalid EQ count %d\n", + phba->fw_config.eqid_count); + goto fail_init; + } + if (phba->fw_config.cqid_count == 0 || + phba->fw_config.cqid_count > 4096) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : invalid CQ count %d\n", + phba->fw_config.cqid_count); + goto fail_init; + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : EQ_Count : %d CQ_Count : %d\n", + phba->fw_config.eqid_count, + phba->fw_config.cqid_count); + } + + /** + * Check on which all ULP iSCSI Protocol is loaded. + * Set the Bit for those ULP. This set flag is used + * at all places in the code to check on which ULP + * iSCSi Protocol is loaded + **/ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (pfw_cfg->ulp[ulp_num].ulp_mode & + BEISCSI_ULP_ISCSI_INI_MODE) { + set_bit(ulp_num, &phba->fw_config.ulp_supported); + + /* Get the CID, ICD and Chain count for each ULP */ + phba->fw_config.iscsi_cid_start[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_base; + phba->fw_config.iscsi_cid_count[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_count; + + phba->fw_config.iscsi_icd_start[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_base; + phba->fw_config.iscsi_icd_count[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_count; + + phba->fw_config.iscsi_chain_start[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_base; + phba->fw_config.iscsi_chain_count[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_count; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : Function loaded on ULP : %d\n" + "\tiscsi_cid_count : %d\n" + "\tiscsi_cid_start : %d\n" + "\t iscsi_icd_count : %d\n" + "\t iscsi_icd_start : %d\n", + ulp_num, + phba->fw_config. + iscsi_cid_count[ulp_num], + phba->fw_config. + iscsi_cid_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + phba->fw_config. + iscsi_icd_start[ulp_num]); + } + } + + if (phba->fw_config.ulp_supported == 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n", + pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode, + pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode); + goto fail_init; + } + + /** + * ICD is shared among ULPs. Use icd_count of any one loaded ULP + **/ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + if (icd_count == 0 || icd_count > 65536) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d: invalid ICD count %d\n", icd_count); + goto fail_init; + } + + cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); + if (cid_count == 0 || cid_count > 4096) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d: invalid CID count %d\n", cid_count); + goto fail_init; + } + + /** + * Check FW is dual ULP aware i.e. can handle either + * of the protocols. + */ + phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & + BEISCSI_FUNC_DUA_MODE); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : DUA Mode : 0x%x\n", + phba->fw_config.dual_ulp_aware); + + /* all set, continue using this FW config */ + status = 0; +fail_init: + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +/** + * beiscsi_get_port_name()- Get port name for the function + * @ctrl: ptr to Ctrl Info + * @phba: ptr to the dev priv structure + * + * Get the alphanumeric character for port + * + **/ +int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) +{ + int ret = 0; + struct be_mcc_wrb *wrb; + struct be_cmd_get_port_name *ioctl; + + mutex_lock(&ctrl->mbox_lock); + wrb = wrb_from_mbox(&ctrl->mbox_mem); + memset(wrb, 0, sizeof(*wrb)); + ioctl = embedded_payload(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0); + be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_PORT_NAME, + EMBED_MBX_MAX_PAYLOAD_SIZE); + ret = be_mbox_notify(ctrl); + phba->port_name = 0; + if (!ret) { + phba->port_name = ioctl->p.resp.port_names >> + (phba->fw_config.phys_port * 8) & 0xff; + } else { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n", + ret, ioctl->h.resp_hdr.status); + } + + if (phba->port_name == 0) + phba->port_name = '?'; + + mutex_unlock(&ctrl->mbox_lock); + return ret; +} + +int beiscsi_set_host_data(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_cmd_set_host_data *ioctl; + struct be_mcc_wrb *wrb; + int ret = 0; + + if (is_chip_be2_be3r(phba)) + return ret; + + mutex_lock(&ctrl->mbox_lock); + wrb = wrb_from_mbox(&ctrl->mbox_mem); + memset(wrb, 0, sizeof(*wrb)); + ioctl = embedded_payload(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0); + be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_HOST_DATA, + EMBED_MBX_MAX_PAYLOAD_SIZE); + ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID; + ioctl->param.req.param_len = + snprintf((char *)ioctl->param.req.param_data, + sizeof(ioctl->param.req.param_data), + "Linux iSCSI v%s", BUILD_STR); + ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4); + if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION) + ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION; + ret = be_mbox_notify(ctrl); + if (!ret) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : HBA set host driver version\n"); + } else { + /** + * Check "MCC_STATUS_INVALID_LENGTH" for SKH. + * Older FW versions return this error. + */ + if (ret == MCC_STATUS_ILLEGAL_REQUEST || + ret == MCC_STATUS_INVALID_LENGTH) + __beiscsi_log(phba, KERN_INFO, + "BG_%d : HBA failed to set host driver version\n"); + } + + mutex_unlock(&ctrl->mbox_lock); + return ret; +} + +int beiscsi_set_uer_feature(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_cmd_set_features *ioctl; + struct be_mcc_wrb *wrb; + int ret = 0; + + mutex_lock(&ctrl->mbox_lock); + wrb = wrb_from_mbox(&ctrl->mbox_mem); + memset(wrb, 0, sizeof(*wrb)); + ioctl = embedded_payload(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0); + be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_FEATURES, + EMBED_MBX_MAX_PAYLOAD_SIZE); + ioctl->feature = BE_CMD_SET_FEATURE_UER; + ioctl->param_len = sizeof(ioctl->param.req); + ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT; + ret = be_mbox_notify(ctrl); + if (!ret) { + phba->ue2rp = ioctl->param.resp.ue2rp; + set_bit(BEISCSI_HBA_UER_SUPP, &phba->state); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : HBA error recovery supported\n"); + } else { + /** + * Check "MCC_STATUS_INVALID_LENGTH" for SKH. + * Older FW versions return this error. + */ + if (ret == MCC_STATUS_ILLEGAL_REQUEST || + ret == MCC_STATUS_INVALID_LENGTH) + __beiscsi_log(phba, KERN_INFO, + "BG_%d : HBA error recovery not supported\n"); + } + + mutex_unlock(&ctrl->mbox_lock); + return ret; +} + +static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba) +{ + u32 sem; + + if (is_chip_be2_be3r(phba)) + sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx); + else + pci_read_config_dword(phba->pcidev, + SLIPORT_SEMAPHORE_OFFSET_SH, &sem); + return sem; +} + +int beiscsi_check_fw_rdy(struct beiscsi_hba *phba) +{ + u32 loop, post, rdy = 0; + + loop = 1000; + while (loop--) { + post = beiscsi_get_post_stage(phba); + if (post & POST_ERROR_BIT) + break; + if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) { + rdy = 1; + break; + } + msleep(60); + } + + if (!rdy) { + __beiscsi_log(phba, KERN_ERR, + "BC_%d : FW not ready 0x%x\n", post); + } + + return rdy; +} + +int beiscsi_cmd_function_reset(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_post_sgl_pages_req *req; + int status; + + mutex_lock(&ctrl->mbox_lock); + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); + status = be_mbox_notify(ctrl); + + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + u8 *endian_check; + int status; + + mutex_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + endian_check = (u8 *) wrb; + if (load) { + /* to start communicating */ + *endian_check++ = 0xFF; + *endian_check++ = 0x12; + *endian_check++ = 0x34; + *endian_check++ = 0xFF; + *endian_check++ = 0xFF; + *endian_check++ = 0x56; + *endian_check++ = 0x78; + *endian_check++ = 0xFF; + } else { + /* to stop communicating */ + *endian_check++ = 0xFF; + *endian_check++ = 0xAA; + *endian_check++ = 0xBB; + *endian_check++ = 0xFF; + *endian_check++ = 0xFF; + *endian_check++ = 0xCC; + *endian_check++ = 0xDD; + *endian_check = 0xFF; + } + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + + status = be_mbox_notify(ctrl); + if (status) + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BC_%d : special WRB message failed\n"); + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +int beiscsi_init_sliport(struct beiscsi_hba *phba) +{ + int status; + + /* check POST stage before talking to FW */ + status = beiscsi_check_fw_rdy(phba); + if (!status) + return -EIO; + + /* clear all error states after checking FW rdy */ + phba->state &= ~BEISCSI_HBA_IN_ERR; + + /* check again UER support */ + phba->state &= ~BEISCSI_HBA_UER_SUPP; + + /* + * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit. + * It should clean up any stale info in FW for this fn. + */ + status = beiscsi_cmd_function_reset(phba); + if (status) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : SLI Function Reset failed\n"); + return status; + } + + /* indicate driver is loading */ + return beiscsi_cmd_special_wrb(&phba->ctrl, 1); +} + +/** + * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures. + * @phba: pointer to dev priv structure + * @ulp: ULP number. + * + * return + * Success: 0 + * Failure: Non-Zero Value + **/ +int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct iscsi_cleanup_req_v1 *req_v1; + struct iscsi_cleanup_req *req; + u16 hdr_ring_id, data_ring_id; + struct be_mcc_wrb *wrb; + int status; + + mutex_lock(&ctrl->mbox_lock); + wrb = wrb_from_mbox(&ctrl->mbox_mem); + + hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp); + data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp); + if (is_chip_be2_be3r(phba)) { + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); + req->chute = (1 << ulp); + /* BE2/BE3 FW creates 8-bit ring id */ + req->hdr_ring_id = hdr_ring_id; + req->data_ring_id = data_ring_id; + } else { + req_v1 = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0); + be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_CLEANUP, + sizeof(*req_v1)); + req_v1->hdr.version = 1; + req_v1->chute = (1 << ulp); + req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id); + req_v1->data_ring_id = cpu_to_le16(data_ring_id); + } + + status = be_mbox_notify(ctrl); + if (status) + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BG_%d : %s failed %d\n", __func__, ulp); + mutex_unlock(&ctrl->mbox_lock); + return status; +} + +/* + * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter + * @phba: Driver priv structure + * + * Read registers linked to UE and check for the UE status + **/ +int beiscsi_detect_ue(struct beiscsi_hba *phba) +{ + uint32_t ue_mask_hi = 0, ue_mask_lo = 0; + uint32_t ue_hi = 0, ue_lo = 0; + uint8_t i = 0; + int ret = 0; + + pci_read_config_dword(phba->pcidev, + PCICFG_UE_STATUS_LOW, &ue_lo); + pci_read_config_dword(phba->pcidev, + PCICFG_UE_STATUS_MASK_LOW, + &ue_mask_lo); + pci_read_config_dword(phba->pcidev, + PCICFG_UE_STATUS_HIGH, + &ue_hi); + pci_read_config_dword(phba->pcidev, + PCICFG_UE_STATUS_MASK_HI, + &ue_mask_hi); + + ue_lo = (ue_lo & ~ue_mask_lo); + ue_hi = (ue_hi & ~ue_mask_hi); + + + if (ue_lo || ue_hi) { + set_bit(BEISCSI_HBA_IN_UE, &phba->state); + __beiscsi_log(phba, KERN_ERR, + "BC_%d : HBA error detected\n"); + ret = 1; + } + + if (ue_lo) { + for (i = 0; ue_lo; ue_lo >>= 1, i++) { + if (ue_lo & 1) + __beiscsi_log(phba, KERN_ERR, + "BC_%d : UE_LOW %s bit set\n", + desc_ue_status_low[i]); + } + } + + if (ue_hi) { + for (i = 0; ue_hi; ue_hi >>= 1, i++) { + if (ue_hi & 1) + __beiscsi_log(phba, KERN_ERR, + "BC_%d : UE_HIGH %s bit set\n", + desc_ue_status_hi[i]); + } + } + return ret; +} + +/* + * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter + * @phba: Driver priv structure + * + * Read SLIPORT SEMAPHORE register to check for UER + * + **/ +int beiscsi_detect_tpe(struct beiscsi_hba *phba) +{ + u32 post, status; + int ret = 0; + + post = beiscsi_get_post_stage(phba); + status = post & POST_STAGE_MASK; + if ((status & POST_ERR_RECOVERY_CODE_MASK) == + POST_STAGE_RECOVERABLE_ERR) { + set_bit(BEISCSI_HBA_IN_TPE, &phba->state); + __beiscsi_log(phba, KERN_INFO, + "BC_%d : HBA error recoverable: 0x%x\n", post); + ret = 1; + } else { + __beiscsi_log(phba, KERN_INFO, + "BC_%d : HBA in UE: 0x%x\n", post); + } + + return ret; +} diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h new file mode 100644 index 000000000..5f9f0b18d --- /dev/null +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -0,0 +1,1461 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. + * + * Contact Information: + * linux-drivers@broadcom.com + */ + +#ifndef BEISCSI_CMDS_H +#define BEISCSI_CMDS_H + +/** + * The driver sends configuration and managements command requests to the + * firmware in the BE. These requests are communicated to the processor + * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one + * WRB inside a MAILBOX. + * The commands are serviced by the ARM processor in the OneConnect's MPU. + */ +struct be_sge { + __le32 pa_lo; + __le32 pa_hi; + __le32 len; +}; + +struct be_mcc_wrb { + u32 emb_sgecnt_special; /* dword 0 */ + /* bits 0 - embedded */ + /* bits 1 - 2 reserved */ + /* bits 3 - 7 sge count */ + /* bits 8 - 23 reserved */ + /* bits 24 - 31 special */ +#define MCC_WRB_EMBEDDED_MASK 1 +#define MCC_WRB_SGE_CNT_SHIFT 3 +#define MCC_WRB_SGE_CNT_MASK 0x1F + u32 payload_length; /* dword 1 */ + u32 tag0; /* dword 2 */ + u32 tag1; /* dword 3 */ + u32 rsvd; /* dword 4 */ + union { +#define EMBED_MBX_MAX_PAYLOAD_SIZE 220 + u8 embedded_payload[236]; /* used by embedded cmds */ + struct be_sge sgl[19]; /* used by non-embedded cmds */ + } payload; +}; + +#define CQE_FLAGS_VALID_MASK (1 << 31) +#define CQE_FLAGS_ASYNC_MASK (1 << 30) +#define CQE_FLAGS_COMPLETED_MASK (1 << 28) +#define CQE_FLAGS_CONSUMED_MASK (1 << 27) + +/* Completion Status */ +#define MCC_STATUS_SUCCESS 0x0 +#define MCC_STATUS_FAILED 0x1 +#define MCC_STATUS_ILLEGAL_REQUEST 0x2 +#define MCC_STATUS_ILLEGAL_FIELD 0x3 +#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 +#define MCC_STATUS_INVALID_LENGTH 0x74 + +#define CQE_STATUS_COMPL_MASK 0xFFFF +#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ +#define CQE_STATUS_EXTD_MASK 0xFFFF +#define CQE_STATUS_EXTD_SHIFT 16 /* bits 31 - 16 */ +#define CQE_STATUS_ADDL_MASK 0xFF00 +#define CQE_STATUS_ADDL_SHIFT 8 +#define CQE_STATUS_MASK 0xFF +#define CQE_STATUS_WRB_MASK 0xFF0000 +#define CQE_STATUS_WRB_SHIFT 16 + +#define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000) +#define BEISCSI_FW_MBX_TIMEOUT 100 + +/* MBOX Command VER */ +#define MBX_CMD_VER1 0x01 +#define MBX_CMD_VER2 0x02 + +struct be_mcc_compl { + u32 status; /* dword 0 */ + u32 tag0; /* dword 1 */ + u32 tag1; /* dword 2 */ + u32 flags; /* dword 3 */ +}; + +/********* Mailbox door bell *************/ +/** + * Used for driver communication with the FW. + * The software must write this register twice to post any command. First, + * it writes the register with hi=1 and the upper bits of the physical address + * for the MAILBOX structure. Software must poll the ready bit until this + * is acknowledged. Then, sotware writes the register with hi=0 with the lower + * bits in the address. It must poll the ready bit until the command is + * complete. Upon completion, the MAILBOX will contain a valid completion + * queue entry. + */ +#define MPU_MAILBOX_DB_OFFSET 0x160 +#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */ +#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */ + +/********** MPU semphore: used for SH & BE ******************/ +#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */ +#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */ +#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */ +#define POST_STAGE_MASK 0x0000FFFF +#define POST_ERROR_BIT 0x80000000 +#define POST_ERR_RECOVERY_CODE_MASK 0xF000 + +/* Soft Reset register masks */ +#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */ + +/* MPU semphore POST stage values */ +#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ +#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ +#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ +#define POST_STAGE_ARMFW_RDY 0xC000 /* FW is done with POST */ +#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */ + +/********** MCC door bell ************/ +#define DB_MCCQ_OFFSET 0x140 +#define DB_MCCQ_RING_ID_MASK 0xFFFF /* bits 0 - 15 */ +/* Number of entries posted */ +#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */ + +/** + * When the async bit of mcc_compl is set, the last 4 bytes of + * mcc_compl is interpreted as follows: + */ +#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ +#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF +#define ASYNC_EVENT_CODE_LINK_STATE 0x1 +#define ASYNC_EVENT_CODE_ISCSI 0x4 +#define ASYNC_EVENT_CODE_SLI 0x11 + +#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */ +#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF + +/* iSCSI events */ +#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4 +#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5 +#define ASYNC_EVENT_NEW_TCP_CONN 0x7 + +/* SLI events */ +#define ASYNC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 +#define ASYNC_SLI_LINK_EFFECT_VALID(le) (le & 0x80) +#define ASYNC_SLI_LINK_EFFECT_SEV(le) ((le >> 1) & 0x03) +#define ASYNC_SLI_LINK_EFFECT_STATE(le) (le & 0x01) + +struct be_async_event_trailer { + u32 code; +}; + +enum { + ASYNC_EVENT_LINK_DOWN = 0x0, + ASYNC_EVENT_LINK_UP = 0x1, +}; + +/** + * When the event code of an async trailer is link-state, the mcc_compl + * must be interpreted as follows + */ +struct be_async_event_link_state { + u8 physical_port; + u8 port_link_status; +/** + * ASYNC_EVENT_LINK_DOWN 0x0 + * ASYNC_EVENT_LINK_UP 0x1 + * ASYNC_EVENT_LINK_LOGICAL_DOWN 0x2 + * ASYNC_EVENT_LINK_LOGICAL_UP 0x3 + */ +#define BE_ASYNC_LINK_UP_MASK 0x01 + u8 port_duplex; + u8 port_speed; +/* BE2ISCSI_LINK_SPEED_ZERO 0x00 - no link */ +#define BE2ISCSI_LINK_SPEED_10MBPS 0x01 +#define BE2ISCSI_LINK_SPEED_100MBPS 0x02 +#define BE2ISCSI_LINK_SPEED_1GBPS 0x03 +#define BE2ISCSI_LINK_SPEED_10GBPS 0x04 +#define BE2ISCSI_LINK_SPEED_25GBPS 0x06 +#define BE2ISCSI_LINK_SPEED_40GBPS 0x07 + u8 port_fault; + u8 event_reason; + u16 qos_link_speed; + u32 event_tag; + struct be_async_event_trailer trailer; +} __packed; + +/** + * When async-trailer is SLI event, mcc_compl is interpreted as + */ +struct be_async_event_sli { + u32 event_data1; + u32 event_data2; + u32 reserved; + u32 trailer; +} __packed; + +struct be_mcc_mailbox { + struct be_mcc_wrb wrb; + struct be_mcc_compl compl; +}; + +/* Type of subsystems supported by FW */ +#define CMD_SUBSYSTEM_COMMON 0x1 +#define CMD_SUBSYSTEM_ISCSI 0x2 +#define CMD_SUBSYSTEM_ETH 0x3 +#define CMD_SUBSYSTEM_ISCSI_INI 0x6 +#define CMD_COMMON_TCP_UPLOAD 0x1 + +/** + * List of common opcodes subsystem CMD_SUBSYSTEM_COMMON + * These opcodes are unique for each subsystem defined above + */ +#define OPCODE_COMMON_CQ_CREATE 12 +#define OPCODE_COMMON_EQ_CREATE 13 +#define OPCODE_COMMON_MCC_CREATE 21 +#define OPCODE_COMMON_MCC_CREATE_EXT 90 +#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24 +#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25 +#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 +#define OPCODE_COMMON_GET_FW_VERSION 35 +#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 +#define OPCODE_COMMON_FIRMWARE_CONFIG 42 +#define OPCODE_COMMON_MCC_DESTROY 53 +#define OPCODE_COMMON_CQ_DESTROY 54 +#define OPCODE_COMMON_EQ_DESTROY 55 +#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 +#define OPCODE_COMMON_FUNCTION_RESET 61 +#define OPCODE_COMMON_GET_PORT_NAME 77 +#define OPCODE_COMMON_SET_HOST_DATA 93 +#define OPCODE_COMMON_SET_FEATURES 191 + +/** + * LIST of opcodes that are common between Initiator and Target + * used by CMD_SUBSYSTEM_ISCSI + * These opcodes are unique for each subsystem defined above + */ +#define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2 +#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3 +#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7 +#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14 +#define OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR 17 +#define OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR 18 +#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21 +#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22 +#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23 +#define OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID 24 +#define OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO 25 +#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61 +#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64 +#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65 +#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66 +#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67 + +struct be_cmd_req_hdr { + u8 opcode; /* dword 0 */ + u8 subsystem; /* dword 0 */ + u8 port_number; /* dword 0 */ + u8 domain; /* dword 0 */ + u32 timeout; /* dword 1 */ + u32 request_length; /* dword 2 */ + u8 version; /* dword 3 */ + u8 rsvd0[3]; /* dword 3 */ +}; + +struct be_cmd_resp_hdr { + u32 info; /* dword 0 */ + u32 status; /* dword 1 */ + u32 response_length; /* dword 2 */ + u32 actual_resp_len; /* dword 3 */ +}; + +struct phys_addr { + u32 lo; + u32 hi; +}; + +struct virt_addr { + u32 lo; + u32 hi; +}; +/************************** + * BE Command definitions * + **************************/ + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte - used to calculate offset/shift/mask of each field + */ +struct amap_eq_context { + u8 cidx[13]; /* dword 0 */ + u8 rsvd0[3]; /* dword 0 */ + u8 epidx[13]; /* dword 0 */ + u8 valid; /* dword 0 */ + u8 rsvd1; /* dword 0 */ + u8 size; /* dword 0 */ + u8 pidx[13]; /* dword 1 */ + u8 rsvd2[3]; /* dword 1 */ + u8 pd[10]; /* dword 1 */ + u8 count[3]; /* dword 1 */ + u8 solevent; /* dword 1 */ + u8 stalled; /* dword 1 */ + u8 armed; /* dword 1 */ + u8 rsvd3[4]; /* dword 2 */ + u8 func[8]; /* dword 2 */ + u8 rsvd4; /* dword 2 */ + u8 delaymult[10]; /* dword 2 */ + u8 rsvd5[2]; /* dword 2 */ + u8 phase[2]; /* dword 2 */ + u8 nodelay; /* dword 2 */ + u8 rsvd6[4]; /* dword 2 */ + u8 rsvd7[32]; /* dword 3 */ +} __packed; + +struct be_cmd_req_eq_create { + struct be_cmd_req_hdr hdr; /* dw[4] */ + u16 num_pages; /* sword */ + u16 rsvd0; /* sword */ + u8 context[sizeof(struct amap_eq_context) / 8]; /* dw[4] */ + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_eq_create { + struct be_cmd_resp_hdr resp_hdr; + u16 eq_id; /* sword */ + u16 rsvd0; /* sword */ +} __packed; + +struct be_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +} __packed; + +struct mgmt_chap_format { + u32 flags; + u8 intr_chap_name[256]; + u8 intr_secret[16]; + u8 target_chap_name[256]; + u8 target_secret[16]; + u16 intr_chap_name_length; + u16 intr_secret_length; + u16 target_chap_name_length; + u16 target_secret_length; +} __packed; + +struct mgmt_auth_method_format { + u8 auth_method_type; + u8 padding[3]; + struct mgmt_chap_format chap; +} __packed; + +struct be_cmd_req_logout_fw_sess { + struct be_cmd_req_hdr hdr; /* dw[4] */ + uint32_t session_handle; +} __packed; + +struct be_cmd_resp_logout_fw_sess { + struct be_cmd_resp_hdr hdr; /* dw[4] */ + uint32_t session_status; +#define BE_SESS_STATUS_CLOSE 0x20 +} __packed; + +struct mgmt_conn_login_options { + u8 flags; + u8 header_digest; + u8 data_digest; + u8 rsvd0; + u32 max_recv_datasegment_len_ini; + u32 max_recv_datasegment_len_tgt; + u32 tcp_mss; + u32 tcp_window_size; + struct mgmt_auth_method_format auth_data; +} __packed; + +struct ip_addr_format { + u16 size_of_structure; + u8 reserved; + u8 ip_type; +#define BEISCSI_IP_TYPE_V4 0x1 +#define BEISCSI_IP_TYPE_STATIC_V4 0x3 +#define BEISCSI_IP_TYPE_DHCP_V4 0x5 +/* type v4 values < type v6 values */ +#define BEISCSI_IP_TYPE_V6 0x10 +#define BEISCSI_IP_TYPE_ROUTABLE_V6 0x30 +#define BEISCSI_IP_TYPE_LINK_LOCAL_V6 0x50 +#define BEISCSI_IP_TYPE_AUTO_V6 0x90 + u8 addr[16]; + u32 rsvd0; +} __packed; + +struct mgmt_conn_info { + u32 connection_handle; + u32 connection_status; + u16 src_port; + u16 dest_port; + u16 dest_port_redirected; + u16 cid; + u32 estimated_throughput; + struct ip_addr_format src_ipaddr; + struct ip_addr_format dest_ipaddr; + struct ip_addr_format dest_ipaddr_redirected; + struct mgmt_conn_login_options negotiated_login_options; +} __packed; + +struct mgmt_session_login_options { + u8 flags; + u8 error_recovery_level; + u16 rsvd0; + u32 first_burst_length; + u32 max_burst_length; + u16 max_connections; + u16 max_outstanding_r2t; + u16 default_time2wait; + u16 default_time2retain; +} __packed; + +struct mgmt_session_info { + u32 session_handle; + u32 status; + u8 isid[6]; + u16 tsih; + u32 session_flags; + u16 conn_count; + u16 pad; + u8 target_name[224]; + u8 initiator_iscsiname[224]; + struct mgmt_session_login_options negotiated_login_options; + struct mgmt_conn_info conn_list[1]; +} __packed; + +struct be_cmd_get_session_req { + struct be_cmd_req_hdr hdr; + u32 session_handle; +} __packed; + +struct be_cmd_get_session_resp { + struct be_cmd_resp_hdr hdr; + struct mgmt_session_info session_info; +} __packed; + +struct mac_addr { + u16 size_of_structure; + u8 addr[ETH_ALEN]; +} __packed; + +struct be_cmd_get_boot_target_req { + struct be_cmd_req_hdr hdr; +} __packed; + +struct be_cmd_get_boot_target_resp { + struct be_cmd_resp_hdr hdr; + u32 boot_session_count; + u32 boot_session_handle; +/** + * FW returns 0xffffffff if it couldn't establish connection with + * configured boot target. + */ +#define BE_BOOT_INVALID_SHANDLE 0xffffffff +}; + +struct be_cmd_reopen_session_req { + struct be_cmd_req_hdr hdr; +#define BE_REOPEN_ALL_SESSIONS 0x00 +#define BE_REOPEN_BOOT_SESSIONS 0x01 +#define BE_REOPEN_A_SESSION 0x02 + u16 reopen_type; + u16 rsvd; + u32 session_handle; +} __packed; + +struct be_cmd_reopen_session_resp { + struct be_cmd_resp_hdr hdr; + u32 rsvd; + u32 session_handle; +} __packed; + + +struct be_cmd_mac_query_req { + struct be_cmd_req_hdr hdr; + u8 type; + u8 permanent; + u16 if_id; +} __packed; + +struct be_cmd_get_mac_resp { + struct be_cmd_resp_hdr hdr; + struct mac_addr mac; +}; + +struct be_ip_addr_subnet_format { + u16 size_of_structure; + u8 ip_type; + u8 ipv6_prefix_length; + u8 addr[16]; + u8 subnet_mask[16]; + u32 rsvd0; +} __packed; + +struct be_cmd_get_if_info_req { + struct be_cmd_req_hdr hdr; + u32 interface_hndl; + u32 ip_type; +} __packed; + +struct be_cmd_get_if_info_resp { + struct be_cmd_req_hdr hdr; + u32 interface_hndl; + u32 vlan_priority; + u32 ip_addr_count; + u32 dhcp_state; + struct be_ip_addr_subnet_format ip_addr; +} __packed; + +struct be_ip_addr_record { + u32 action; + u32 interface_hndl; + struct be_ip_addr_subnet_format ip_addr; + u32 status; +} __packed; + +struct be_ip_addr_record_params { + u32 record_entry_count; + struct be_ip_addr_record ip_record; +} __packed; + +struct be_cmd_set_ip_addr_req { + struct be_cmd_req_hdr hdr; + struct be_ip_addr_record_params ip_params; +} __packed; + + +struct be_cmd_set_dhcp_req { + struct be_cmd_req_hdr hdr; + u32 interface_hndl; + u32 ip_type; + u32 flags; + u32 retry_count; +} __packed; + +struct be_cmd_rel_dhcp_req { + struct be_cmd_req_hdr hdr; + u32 interface_hndl; + u32 ip_type; +} __packed; + +struct be_cmd_set_def_gateway_req { + struct be_cmd_req_hdr hdr; + u32 action; + struct ip_addr_format ip_addr; +} __packed; + +struct be_cmd_get_def_gateway_req { + struct be_cmd_req_hdr hdr; + u32 ip_type; +} __packed; + +struct be_cmd_get_def_gateway_resp { + struct be_cmd_req_hdr hdr; + struct ip_addr_format ip_addr; +} __packed; + +#define BEISCSI_VLAN_DISABLE 0xFFFF +struct be_cmd_set_vlan_req { + struct be_cmd_req_hdr hdr; + u32 interface_hndl; + u32 vlan_priority; +} __packed; +/******************** Create CQ ***************************/ +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte - used to calculate offset/shift/mask of each field + */ +struct amap_cq_context { + u8 cidx[11]; /* dword 0 */ + u8 rsvd0; /* dword 0 */ + u8 coalescwm[2]; /* dword 0 */ + u8 nodelay; /* dword 0 */ + u8 epidx[11]; /* dword 0 */ + u8 rsvd1; /* dword 0 */ + u8 count[2]; /* dword 0 */ + u8 valid; /* dword 0 */ + u8 solevent; /* dword 0 */ + u8 eventable; /* dword 0 */ + u8 pidx[11]; /* dword 1 */ + u8 rsvd2; /* dword 1 */ + u8 pd[10]; /* dword 1 */ + u8 eqid[8]; /* dword 1 */ + u8 stalled; /* dword 1 */ + u8 armed; /* dword 1 */ + u8 rsvd3[4]; /* dword 2 */ + u8 func[8]; /* dword 2 */ + u8 rsvd4[20]; /* dword 2 */ + u8 rsvd5[32]; /* dword 3 */ +} __packed; + +struct amap_cq_context_v2 { + u8 rsvd0[12]; /* dword 0 */ + u8 coalescwm[2]; /* dword 0 */ + u8 nodelay; /* dword 0 */ + u8 rsvd1[12]; /* dword 0 */ + u8 count[2]; /* dword 0 */ + u8 valid; /* dword 0 */ + u8 rsvd2; /* dword 0 */ + u8 eventable; /* dword 0 */ + u8 eqid[16]; /* dword 1 */ + u8 rsvd3[15]; /* dword 1 */ + u8 armed; /* dword 1 */ + u8 cqecount[16];/* dword 2 */ + u8 rsvd4[16]; /* dword 2 */ + u8 rsvd5[32]; /* dword 3 */ +}; + +struct be_cmd_req_cq_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u8 page_size; + u8 rsvd0; + u8 context[sizeof(struct amap_cq_context) / 8]; + struct phys_addr pages[4]; +} __packed; + +struct be_cmd_resp_cq_create { + struct be_cmd_resp_hdr hdr; + u16 cq_id; + u16 rsvd0; +} __packed; + +/******************** Create MCCQ ***************************/ +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte - used to calculate offset/shift/mask of each field + */ +struct amap_mcc_context { + u8 con_index[14]; + u8 rsvd0[2]; + u8 ring_size[4]; + u8 fetch_wrb; + u8 fetch_r2t; + u8 cq_id[10]; + u8 prod_index[14]; + u8 fid[8]; + u8 pdid[9]; + u8 valid; + u8 rsvd1[32]; + u8 rsvd2[32]; +} __packed; + +struct be_cmd_req_mcc_create_ext { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u16 rsvd0; + u32 async_evt_bitmap; + u8 context[sizeof(struct amap_mcc_context) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_mcc_create { + struct be_cmd_resp_hdr hdr; + u16 id; + u16 rsvd0; +} __packed; + +/******************** Q Destroy ***************************/ +/* Type of Queue to be destroyed */ +enum { + QTYPE_EQ = 1, + QTYPE_CQ, + QTYPE_MCCQ, + QTYPE_WRBQ, + QTYPE_DPDUQ, + QTYPE_SGL +}; + +struct be_cmd_req_q_destroy { + struct be_cmd_req_hdr hdr; + u16 id; + u16 bypass_flush; /* valid only for rx q destroy */ +} __packed; + +struct macaddr { + u8 byte[ETH_ALEN]; +}; + +struct be_cmd_req_mcast_mac_config { + struct be_cmd_req_hdr hdr; + u16 num_mac; + u8 promiscuous; + u8 interface_id; + struct macaddr mac[32]; +} __packed; + +static inline void *embedded_payload(struct be_mcc_wrb *wrb) +{ + return wrb->payload.embedded_payload; +} + +static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) +{ + return &wrb->payload.sgl[0]; +} + +/******************** Modify EQ Delay *******************/ +struct be_cmd_req_modify_eq_delay { + struct be_cmd_req_hdr hdr; + __le32 num_eq; + struct { + __le32 eq_id; + __le32 phase; + __le32 delay_multiplier; + } delay[MAX_CPUS]; +} __packed; + +/******************** Get MAC ADDR *******************/ + +struct be_cmd_get_nic_conf_resp { + struct be_cmd_resp_hdr hdr; + u32 nic_port_count; + u32 speed; + u32 max_speed; + u32 link_state; + u32 max_frame_size; + u16 size_of_structure; + u8 mac_address[ETH_ALEN]; +} __packed; + +/******************** Get HBA NAME *******************/ + +struct be_cmd_hba_name { + struct be_cmd_req_hdr hdr; + u16 flags; + u16 rsvd0; + u8 initiator_name[ISCSI_NAME_LEN]; +#define BE_INI_ALIAS_LEN 32 + u8 initiator_alias[BE_INI_ALIAS_LEN]; +} __packed; + +/******************** COMMON SET HOST DATA *******************/ +#define BE_CMD_SET_HOST_PARAM_ID 0x2 +#define BE_CMD_MAX_DRV_VERSION 0x30 +struct be_sethost_req { + u32 param_id; + u32 param_len; + u32 param_data[32]; +}; + +struct be_sethost_resp { + u32 rsvd0; +}; + +struct be_cmd_set_host_data { + union { + struct be_cmd_req_hdr req_hdr; + struct be_cmd_resp_hdr resp_hdr; + } h; + union { + struct be_sethost_req req; + struct be_sethost_resp resp; + } param; +} __packed; + +/******************** COMMON SET Features *******************/ +#define BE_CMD_SET_FEATURE_UER 0x10 +#define BE_CMD_UER_SUPP_BIT 0x1 +struct be_uer_req { + u32 uer; + u32 rsvd; +}; + +struct be_uer_resp { + u32 uer; + u16 ue2rp; + u16 ue2sr; +}; + +struct be_cmd_set_features { + union { + struct be_cmd_req_hdr req_hdr; + struct be_cmd_resp_hdr resp_hdr; + } h; + u32 feature; + u32 param_len; + union { + struct be_uer_req req; + struct be_uer_resp resp; + u32 rsvd[2]; + } param; +} __packed; + +int beiscsi_cmd_function_reset(struct beiscsi_hba *phba); + +int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load); + +int beiscsi_check_fw_rdy(struct beiscsi_hba *phba); + +int beiscsi_init_sliport(struct beiscsi_hba *phba); + +int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num); + +int beiscsi_detect_ue(struct beiscsi_hba *phba); + +int beiscsi_detect_tpe(struct beiscsi_hba *phba); + +int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *eq, int eq_delay); + +int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *cq, struct be_queue_info *eq, + bool sol_evts, bool no_delay, + int num_cqe_dma_coalesce); + +int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, + int type); +int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, + struct be_queue_info *mccq, + struct be_queue_info *cq); + +void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag); + +int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, + int num); +int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, + unsigned int tag, + struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem); +int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba, + unsigned int tag, + struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem); +struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); +void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag); +struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba, + unsigned int *ref_tag); +void beiscsi_process_async_event(struct beiscsi_hba *phba, + struct be_mcc_compl *compl); +int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl, + struct be_mcc_compl *compl); + +int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, + struct be_queue_info *cq, + struct be_queue_info *dq, int length, + int entry_size, uint8_t is_header, + uint8_t ulp_num); + +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem); + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl); + +int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem, u32 page_offset, + u32 num_pages); + +int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, + struct be_queue_info *wrbq, + struct hwi_wrb_context *pwrb_context, + uint8_t ulp_num); + +/* Configuration Functions */ +int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); + +int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba); + +int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba); + +int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba); + +int beiscsi_set_uer_feature(struct beiscsi_hba *phba); +int beiscsi_set_host_data(struct beiscsi_hba *phba); + +struct be_default_pdu_context { + u32 dw[4]; +} __packed; + +struct amap_be_default_pdu_context { + u8 dbuf_cindex[13]; /* dword 0 */ + u8 rsvd0[3]; /* dword 0 */ + u8 ring_size[4]; /* dword 0 */ + u8 ring_state[4]; /* dword 0 */ + u8 rsvd1[8]; /* dword 0 */ + u8 dbuf_pindex[13]; /* dword 1 */ + u8 rsvd2; /* dword 1 */ + u8 pci_func_id[8]; /* dword 1 */ + u8 rx_pdid[9]; /* dword 1 */ + u8 rx_pdid_valid; /* dword 1 */ + u8 default_buffer_size[16]; /* dword 2 */ + u8 cq_id_recv[10]; /* dword 2 */ + u8 rx_pdid_not_valid; /* dword 2 */ + u8 rsvd3[5]; /* dword 2 */ + u8 rsvd4[32]; /* dword 3 */ +} __packed; + +struct amap_default_pdu_context_ext { + u8 rsvd0[16]; /* dword 0 */ + u8 ring_size[4]; /* dword 0 */ + u8 rsvd1[12]; /* dword 0 */ + u8 rsvd2[22]; /* dword 1 */ + u8 rx_pdid[9]; /* dword 1 */ + u8 rx_pdid_valid; /* dword 1 */ + u8 default_buffer_size[16]; /* dword 2 */ + u8 cq_id_recv[16]; /* dword 2 */ + u8 rsvd3[32]; /* dword 3 */ +} __packed; + +struct be_defq_create_req { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u8 ulp_num; +#define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */ +#define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */ + u8 dua_feature; + struct be_default_pdu_context context; + struct phys_addr pages[8]; +} __packed; + +struct be_defq_create_resp { + struct be_cmd_req_hdr hdr; + u16 id; + u8 rsvd0; + u8 ulp_num; + u32 doorbell_offset; + u16 register_set; + u16 doorbell_format; +} __packed; + +struct be_post_template_pages_req { + struct be_cmd_req_hdr hdr; + u16 num_pages; +#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1 + u16 type; + struct phys_addr scratch_pa; + struct virt_addr scratch_va; + struct virt_addr pages_va; + struct phys_addr pages[16]; +} __packed; + +struct be_remove_template_pages_req { + struct be_cmd_req_hdr hdr; + u16 type; + u16 rsvd0; +} __packed; + +struct be_post_sgl_pages_req { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u16 page_offset; + u32 rsvd0; + struct phys_addr pages[26]; + u32 rsvd1; +} __packed; + +struct be_wrbq_create_req { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u8 ulp_num; + u8 dua_feature; + struct phys_addr pages[8]; +} __packed; + +struct be_wrbq_create_resp { + struct be_cmd_resp_hdr resp_hdr; + u16 cid; + u8 rsvd0; + u8 ulp_num; + u32 doorbell_offset; + u16 register_set; + u16 doorbell_format; +} __packed; + +#define SOL_CID_MASK 0x0000FFC0 +#define SOL_CODE_MASK 0x0000003F +#define SOL_WRB_INDEX_MASK 0x00FF0000 +#define SOL_CMD_WND_MASK 0xFF000000 +#define SOL_RES_CNT_MASK 0x7FFFFFFF +#define SOL_EXP_CMD_SN_MASK 0xFFFFFFFF +#define SOL_HW_STS_MASK 0x000000FF +#define SOL_STS_MASK 0x0000FF00 +#define SOL_RESP_MASK 0x00FF0000 +#define SOL_FLAGS_MASK 0x7F000000 +#define SOL_S_MASK 0x80000000 + +struct sol_cqe { + u32 dw[4]; +}; + +struct amap_sol_cqe { + u8 hw_sts[8]; /* dword 0 */ + u8 i_sts[8]; /* dword 0 */ + u8 i_resp[8]; /* dword 0 */ + u8 i_flags[7]; /* dword 0 */ + u8 s; /* dword 0 */ + u8 i_exp_cmd_sn[32]; /* dword 1 */ + u8 code[6]; /* dword 2 */ + u8 cid[10]; /* dword 2 */ + u8 wrb_index[8]; /* dword 2 */ + u8 i_cmd_wnd[8]; /* dword 2 */ + u8 i_res_cnt[31]; /* dword 3 */ + u8 valid; /* dword 3 */ +} __packed; + +#define SOL_ICD_INDEX_MASK 0x0003FFC0 +struct amap_sol_cqe_ring { + u8 hw_sts[8]; /* dword 0 */ + u8 i_sts[8]; /* dword 0 */ + u8 i_resp[8]; /* dword 0 */ + u8 i_flags[7]; /* dword 0 */ + u8 s; /* dword 0 */ + u8 i_exp_cmd_sn[32]; /* dword 1 */ + u8 code[6]; /* dword 2 */ + u8 icd_index[12]; /* dword 2 */ + u8 rsvd[6]; /* dword 2 */ + u8 i_cmd_wnd[8]; /* dword 2 */ + u8 i_res_cnt[31]; /* dword 3 */ + u8 valid; /* dword 3 */ +} __packed; + +struct amap_sol_cqe_v2 { + u8 hw_sts[8]; /* dword 0 */ + u8 i_sts[8]; /* dword 0 */ + u8 wrb_index[16]; /* dword 0 */ + u8 i_exp_cmd_sn[32]; /* dword 1 */ + u8 code[6]; /* dword 2 */ + u8 cmd_cmpl; /* dword 2 */ + u8 rsvd0; /* dword 2 */ + u8 i_cmd_wnd[8]; /* dword 2 */ + u8 cid[13]; /* dword 2 */ + u8 u; /* dword 2 */ + u8 o; /* dword 2 */ + u8 s; /* dword 2 */ + u8 i_res_cnt[31]; /* dword 3 */ + u8 valid; /* dword 3 */ +} __packed; + +struct common_sol_cqe { + u32 exp_cmdsn; + u32 res_cnt; + u16 wrb_index; + u16 cid; + u8 hw_sts; + u8 cmd_wnd; + u8 res_flag; /* the s feild of structure */ + u8 i_resp; /* for skh if cmd_complete is set then i_sts is response */ + u8 i_flags; /* for skh or the u and o feilds */ + u8 i_sts; /* for skh if cmd_complete is not-set then i_sts is status */ +}; + +/*** iSCSI ack/driver message completions ***/ +struct amap_it_dmsg_cqe { + u8 ack_num[32]; /* DWORD 0 */ + u8 pdu_bytes_rcvd[32]; /* DWORD 1 */ + u8 code[6]; /* DWORD 2 */ + u8 cid[10]; /* DWORD 2 */ + u8 wrb_idx[8]; /* DWORD 2 */ + u8 rsvd0[8]; /* DWORD 2*/ + u8 rsvd1[31]; /* DWORD 3*/ + u8 valid; /* DWORD 3 */ +} __packed; + +struct amap_it_dmsg_cqe_v2 { + u8 ack_num[32]; /* DWORD 0 */ + u8 pdu_bytes_rcvd[32]; /* DWORD 1 */ + u8 code[6]; /* DWORD 2 */ + u8 rsvd0[10]; /* DWORD 2 */ + u8 wrb_idx[16]; /* DWORD 2 */ + u8 rsvd1[16]; /* DWORD 3 */ + u8 cid[13]; /* DWORD 3 */ + u8 rsvd2[2]; /* DWORD 3 */ + u8 valid; /* DWORD 3 */ +} __packed; + + +/** + * Post WRB Queue Doorbell Register used by the host Storage + * stack to notify the + * controller of a posted Work Request Block + */ +#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */ +#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ + +#define DB_DEF_PDU_WRB_INDEX_SHIFT 16 +#define DB_DEF_PDU_NUM_POSTED_SHIFT 24 + +struct fragnum_bits_for_sgl_cra_in { + struct be_cmd_req_hdr hdr; + u32 num_bits; +} __packed; + +struct iscsi_cleanup_req { + struct be_cmd_req_hdr hdr; + u16 chute; + u8 hdr_ring_id; + u8 data_ring_id; +} __packed; + +struct iscsi_cleanup_req_v1 { + struct be_cmd_req_hdr hdr; + u16 chute; + u16 rsvd1; + u16 hdr_ring_id; + u16 rsvd2; + u16 data_ring_id; + u16 rsvd3; +} __packed; + +struct eq_delay { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +} __packed; + +struct be_eq_delay_params_in { + struct be_cmd_req_hdr hdr; + u32 num_eq; + struct eq_delay delay[8]; +} __packed; + +struct tcp_connect_and_offload_in { + struct be_cmd_req_hdr hdr; + struct ip_addr_format ip_address; + u16 tcp_port; + u16 cid; + u16 cq_id; + u16 defq_id; + struct phys_addr dataout_template_pa; + u16 hdr_ring_id; + u16 data_ring_id; + u8 do_offload; + u8 rsvd0[3]; +} __packed; + +struct tcp_connect_and_offload_in_v1 { + struct be_cmd_req_hdr hdr; + struct ip_addr_format ip_address; + u16 tcp_port; + u16 cid; + u16 cq_id; + u16 defq_id; + struct phys_addr dataout_template_pa; + u16 hdr_ring_id; + u16 data_ring_id; + u8 do_offload; + u8 ifd_state; + u8 rsvd0[2]; + u16 tcp_window_size; + u8 tcp_window_scale_count; + u8 rsvd1; + u32 tcp_mss:24; + u8 rsvd2; +} __packed; + +struct tcp_connect_and_offload_out { + struct be_cmd_resp_hdr hdr; + u32 connection_handle; + u16 cid; + u16 rsvd0; + +} __packed; + +#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */ +#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */ +#define DB_DEF_PDU_REARM_SHIFT 14 +#define DB_DEF_PDU_EVENT_SHIFT 15 +#define DB_DEF_PDU_CQPROC_SHIFT 16 + +struct be_invalidate_connection_params_in { + struct be_cmd_req_hdr hdr; + u32 session_handle; + u16 cid; + u16 unused; +#define BE_CLEANUP_TYPE_INVALIDATE 0x8001 +#define BE_CLEANUP_TYPE_ISSUE_TCP_RST 0x8002 + u16 cleanup_type; + u16 save_cfg; +} __packed; + +struct be_invalidate_connection_params_out { + u32 session_handle; + u16 cid; + u16 unused; +} __packed; + +union be_invalidate_connection_params { + struct be_invalidate_connection_params_in req; + struct be_invalidate_connection_params_out resp; +} __packed; + +struct be_tcp_upload_params_in { + struct be_cmd_req_hdr hdr; + u16 id; +#define BE_UPLOAD_TYPE_GRACEFUL 1 +/* abortive upload with reset */ +#define BE_UPLOAD_TYPE_ABORT_RESET 2 +/* abortive upload without reset */ +#define BE_UPLOAD_TYPE_ABORT 3 +/* abortive upload with reset, sequence number by driver */ +#define BE_UPLOAD_TYPE_ABORT_WITH_SEQ 4 + u16 upload_type; + u32 reset_seq; +} __packed; + +struct be_tcp_upload_params_out { + u32 dw[32]; +} __packed; + +union be_tcp_upload_params { + struct be_tcp_upload_params_in request; + struct be_tcp_upload_params_out response; +} __packed; + +struct be_ulp_fw_cfg { +#define BEISCSI_ULP_ISCSI_INI_MODE 0x10 + u32 ulp_mode; + u32 etx_base; + u32 etx_count; + u32 sq_base; + u32 sq_count; + u32 rq_base; + u32 rq_count; + u32 dq_base; + u32 dq_count; + u32 lro_base; + u32 lro_count; + u32 icd_base; + u32 icd_count; +}; + +struct be_ulp_chain_icd { + u32 chain_base; + u32 chain_count; +}; + +struct be_fw_cfg { + struct be_cmd_req_hdr hdr; + u32 be_config_number; + u32 asic_revision; + u32 phys_port; +#define BEISCSI_FUNC_ISCSI_INI_MODE 0x10 +#define BEISCSI_FUNC_DUA_MODE 0x800 + u32 function_mode; + struct be_ulp_fw_cfg ulp[2]; + u32 function_caps; + u32 cqid_base; + u32 cqid_count; + u32 eqid_base; + u32 eqid_count; + struct be_ulp_chain_icd chain_icd[2]; +} __packed; + +struct be_cmd_get_all_if_id_req { + struct be_cmd_req_hdr hdr; + u32 if_count; + u32 if_hndl_list[1]; +} __packed; + +struct be_cmd_get_port_name { + union { + struct be_cmd_req_hdr req_hdr; + struct be_cmd_resp_hdr resp_hdr; + } h; + union { + struct { + u32 reserved; + } req; + struct { + u32 port_names; + } resp; + } p; +} __packed; + +#define ISCSI_OPCODE_SCSI_DATA_OUT 5 +#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5 +#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 +#define OPCODE_COMMON_ISCSI_CLEANUP 59 +#define OPCODE_COMMON_TCP_UPLOAD 56 +#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70 +#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1 +#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6 +#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7 +#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14 +#define OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET 24 +#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36 +#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41 +#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 +#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52 +#define OPCODE_COMMON_WRITE_FLASH 96 +#define OPCODE_COMMON_READ_FLASH 97 + +#define CMD_ISCSI_COMMAND_INVALIDATE 1 + +#define INI_WR_CMD 1 /* Initiator write command */ +#define INI_TMF_CMD 2 /* Initiator TMF command */ +#define INI_NOPOUT_CMD 3 /* Initiator; Send a NOP-OUT */ +#define INI_RD_CMD 5 /* Initiator requesting to send + * a read command + */ +#define TGT_CTX_UPDT_CMD 7 /* Target context update */ +#define TGT_DM_CMD 11 /* Indicates that the bhs + * prepared by driver should not + * be touched. + */ + +/* Returns the number of items in the field array. */ +#define BE_NUMBER_OF_FIELD(_type_, _field_) \ + (sizeof_field(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\ + +/** + * Different types of iSCSI completions to host driver for both initiator + * and taget mode + * of operation. + */ +#define SOL_CMD_COMPLETE 1 /* Solicited command completed + * normally + */ +#define SOL_CMD_KILLED_DATA_DIGEST_ERR 2 /* Solicited command got + * invalidated internally due + * to Data Digest error + */ +#define CXN_KILLED_PDU_SIZE_EXCEEDS_DSL 3 /* Connection got invalidated + * internally + * due to a received PDU + * size > DSL + */ +#define CXN_KILLED_BURST_LEN_MISMATCH 4 /* Connection got invalidated + * internally due ti received + * PDU sequence size > + * FBL/MBL. + */ +#define CXN_KILLED_AHS_RCVD 5 /* Connection got invalidated + * internally due to a received + * PDU Hdr that has + * AHS */ +#define CXN_KILLED_HDR_DIGEST_ERR 6 /* Connection got invalidated + * internally due to Hdr Digest + * error + */ +#define CXN_KILLED_UNKNOWN_HDR 7 /* Connection got invalidated + * internally + * due to a bad opcode in the + * pdu hdr + */ +#define CXN_KILLED_STALE_ITT_TTT_RCVD 8 /* Connection got invalidated + * internally due to a received + * ITT/TTT that does not belong + * to this Connection + */ +#define CXN_KILLED_INVALID_ITT_TTT_RCVD 9 /* Connection got invalidated + * internally due to received + * ITT/TTT value > Max + * Supported ITTs/TTTs + */ +#define CXN_KILLED_RST_RCVD 10 /* Connection got invalidated + * internally due to an + * incoming TCP RST + */ +#define CXN_KILLED_TIMED_OUT 11 /* Connection got invalidated + * internally due to timeout on + * tcp segment 12 retransmit + * attempts failed + */ +#define CXN_KILLED_RST_SENT 12 /* Connection got invalidated + * internally due to TCP RST + * sent by the Tx side + */ +#define CXN_KILLED_FIN_RCVD 13 /* Connection got invalidated + * internally due to an + * incoming TCP FIN. + */ +#define CXN_KILLED_BAD_UNSOL_PDU_RCVD 14 /* Connection got invalidated + * internally due to bad + * unsolicited PDU Unsolicited + * PDUs are PDUs with + * ITT=0xffffffff + */ +#define CXN_KILLED_BAD_WRB_INDEX_ERROR 15 /* Connection got invalidated + * internally due to bad WRB + * index. + */ +#define CXN_KILLED_OVER_RUN_RESIDUAL 16 /* Command got invalidated + * internally due to received + * command has residual + * over run bytes. + */ +#define CXN_KILLED_UNDER_RUN_RESIDUAL 17 /* Command got invalidated + * internally due to received + * command has residual under + * run bytes. + */ +#define CMD_KILLED_INVALID_STATSN_RCVD 18 /* Command got invalidated + * internally due to a received + * PDU has an invalid StatusSN + */ +#define CMD_KILLED_INVALID_R2T_RCVD 19 /* Command got invalidated + * internally due to a received + * an R2T with some invalid + * fields in it + */ +#define CMD_CXN_KILLED_LUN_INVALID 20 /* Command got invalidated + * internally due to received + * PDU has an invalid LUN. + */ +#define CMD_CXN_KILLED_ICD_INVALID 21 /* Command got invalidated + * internally due to the + * corresponding ICD not in a + * valid state + */ +#define CMD_CXN_KILLED_ITT_INVALID 22 /* Command got invalidated due + * to received PDU has an + * invalid ITT. + */ +#define CMD_CXN_KILLED_SEQ_OUTOFORDER 23 /* Command got invalidated due + * to received sequence buffer + * offset is out of order. + */ +#define CMD_CXN_KILLED_INVALID_DATASN_RCVD 24 /* Command got invalidated + * internally due to a + * received PDU has an invalid + * DataSN + */ +#define CXN_INVALIDATE_NOTIFY 25 /* Connection invalidation + * completion notify. + */ +#define CXN_INVALIDATE_INDEX_NOTIFY 26 /* Connection invalidation + * completion + * with data PDU index. + */ +#define CMD_INVALIDATED_NOTIFY 27 /* Command invalidation + * completionnotifify. + */ +#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/ +#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/ +#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest + * error notify. + */ +#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based + * notification. + */ +#define CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN 32 /* Connection got invalidated + * internally due to command + * and data are not on same + * connection. + */ +#define SOL_CMD_KILLED_DIF_ERR 33 /* Solicited command got + * invalidated internally due + * to DIF error + */ +#define CXN_KILLED_SYN_RCVD 34 /* Connection got invalidated + * internally due to incoming + * TCP SYN + */ +#define CXN_KILLED_IMM_DATA_RCVD 35 /* Connection got invalidated + * internally due to an + * incoming Unsolicited PDU + * that has immediate data on + * the cxn + */ + +void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len, + bool embedded, u8 sge_cnt); + +void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, + u8 subsystem, u8 opcode, u32 cmd_len); +#endif /* !BEISCSI_CMDS_H */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c new file mode 100644 index 000000000..8d374ae86 --- /dev/null +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -0,0 +1,1415 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI + * Host Bus Adapters. Refer to the README file included with this package + * for driver version and adapter compatibility. + * + * Copyright (c) 2018 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + * + * Contact Information: + * linux-drivers@broadcom.com + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "be_iscsi.h" + +extern struct iscsi_transport beiscsi_iscsi_transport; + +/** + * beiscsi_session_create - creates a new iscsi session + * @ep: pointer to iscsi ep + * @cmds_max: max commands supported + * @qdepth: max queue depth supported + * @initial_cmdsn: initial iscsi CMDSN + */ +struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, + u16 cmds_max, + u16 qdepth, + u32 initial_cmdsn) +{ + struct Scsi_Host *shost; + struct beiscsi_endpoint *beiscsi_ep; + struct iscsi_cls_session *cls_session; + struct beiscsi_hba *phba; + struct iscsi_session *sess; + struct beiscsi_session *beiscsi_sess; + struct beiscsi_io_task *io_task; + + + if (!ep) { + pr_err("beiscsi_session_create: invalid ep\n"); + return NULL; + } + beiscsi_ep = ep->dd_data; + phba = beiscsi_ep->phba; + + if (!beiscsi_hba_is_online(phba)) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : HBA in error 0x%lx\n", phba->state); + return NULL; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_session_create\n"); + if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Cannot handle %d cmds." + "Max cmds per session supported is %d. Using %d." + "\n", cmds_max, + beiscsi_ep->phba->params.wrbs_per_cxn, + beiscsi_ep->phba->params.wrbs_per_cxn); + + cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; + } + + shost = phba->shost; + cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, + shost, cmds_max, + sizeof(*beiscsi_sess), + sizeof(*io_task), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) + return NULL; + sess = cls_session->dd_data; + beiscsi_sess = sess->dd_data; + beiscsi_sess->bhs_pool = dma_pool_create("beiscsi_bhs_pool", + &phba->pcidev->dev, + sizeof(struct be_cmd_bhs), + 64, 0); + if (!beiscsi_sess->bhs_pool) + goto destroy_sess; + + return cls_session; +destroy_sess: + iscsi_session_teardown(cls_session); + return NULL; +} + +/** + * beiscsi_session_destroy - destroys iscsi session + * @cls_session: pointer to iscsi cls session + * + * Destroys iSCSI session instance and releases + * resources allocated for it. + */ +void beiscsi_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *sess = cls_session->dd_data; + struct beiscsi_session *beiscsi_sess = sess->dd_data; + + printk(KERN_INFO "In beiscsi_session_destroy\n"); + dma_pool_destroy(beiscsi_sess->bhs_pool); + iscsi_session_teardown(cls_session); +} + +/** + * beiscsi_session_fail(): Closing session with appropriate error + * @cls_session: ptr to session + **/ +void beiscsi_session_fail(struct iscsi_cls_session *cls_session) +{ + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); +} + + +/** + * beiscsi_conn_create - create an instance of iscsi connection + * @cls_session: ptr to iscsi_cls_session + * @cid: iscsi cid + */ +struct iscsi_cls_conn * +beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid) +{ + struct beiscsi_hba *phba; + struct Scsi_Host *shost; + struct iscsi_cls_conn *cls_conn; + struct beiscsi_conn *beiscsi_conn; + struct iscsi_conn *conn; + struct iscsi_session *sess; + struct beiscsi_session *beiscsi_sess; + + shost = iscsi_session_to_shost(cls_session); + phba = iscsi_host_priv(shost); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_conn_create ,cid" + "from iscsi layer=%d\n", cid); + + cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid); + if (!cls_conn) + return NULL; + + conn = cls_conn->dd_data; + beiscsi_conn = conn->dd_data; + beiscsi_conn->ep = NULL; + beiscsi_conn->phba = phba; + beiscsi_conn->conn = conn; + sess = cls_session->dd_data; + beiscsi_sess = sess->dd_data; + beiscsi_conn->beiscsi_sess = beiscsi_sess; + return cls_conn; +} + +/** + * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection + * @cls_session: pointer to iscsi cls session + * @cls_conn: pointer to iscsi cls conn + * @transport_fd: EP handle(64 bit) + * @is_leading: indicate if this is the session leading connection (MCS) + * + * This function binds the TCP Conn with iSCSI Connection and Session. + */ +int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + u64 transport_fd, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; + struct beiscsi_endpoint *beiscsi_ep; + struct iscsi_endpoint *ep; + uint16_t cri_index; + int rc = 0; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; + + beiscsi_ep = ep->dd_data; + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + rc = -EINVAL; + goto put_ep; + } + + if (beiscsi_ep->phba != phba) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n", + beiscsi_ep->phba, phba); + rc = -EEXIST; + goto put_ep; + } + cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); + if (phba->conn_table[cri_index]) { + if (beiscsi_conn != phba->conn_table[cri_index] || + beiscsi_ep != phba->conn_table[cri_index]->ep) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : conn_table not empty at %u: cid %u conn %p:%p\n", + cri_index, + beiscsi_ep->ep_cid, + beiscsi_conn, + phba->conn_table[cri_index]); + rc = -EINVAL; + goto put_ep; + } + } + + beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; + beiscsi_conn->ep = beiscsi_ep; + beiscsi_ep->conn = beiscsi_conn; + /** + * Each connection is associated with a WRBQ kept in wrb_context. + * Store doorbell offset for transmit path. + */ + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : cid %d phba->conn_table[%u]=%p\n", + beiscsi_ep->ep_cid, cri_index, beiscsi_conn); + phba->conn_table[cri_index] = beiscsi_conn; + +put_ep: + iscsi_put_endpoint(ep); + return rc; +} + +static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba) +{ + if (phba->ipv4_iface) + return 0; + + phba->ipv4_iface = iscsi_create_iface(phba->shost, + &beiscsi_iscsi_transport, + ISCSI_IFACE_TYPE_IPV4, + 0, 0); + if (!phba->ipv4_iface) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Could not " + "create default IPv4 address.\n"); + return -ENODEV; + } + + return 0; +} + +static int beiscsi_iface_create_ipv6(struct beiscsi_hba *phba) +{ + if (phba->ipv6_iface) + return 0; + + phba->ipv6_iface = iscsi_create_iface(phba->shost, + &beiscsi_iscsi_transport, + ISCSI_IFACE_TYPE_IPV6, + 0, 0); + if (!phba->ipv6_iface) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Could not " + "create default IPv6 address.\n"); + return -ENODEV; + } + + return 0; +} + +void beiscsi_iface_create_default(struct beiscsi_hba *phba) +{ + struct be_cmd_get_if_info_resp *if_info; + + if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V4, &if_info)) { + beiscsi_iface_create_ipv4(phba); + kfree(if_info); + } + + if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V6, &if_info)) { + beiscsi_iface_create_ipv6(phba); + kfree(if_info); + } +} + +void beiscsi_iface_destroy_default(struct beiscsi_hba *phba) +{ + if (phba->ipv6_iface) { + iscsi_destroy_iface(phba->ipv6_iface); + phba->ipv6_iface = NULL; + } + if (phba->ipv4_iface) { + iscsi_destroy_iface(phba->ipv4_iface); + phba->ipv4_iface = NULL; + } +} + +/** + * beiscsi_iface_config_vlan()- Set the VLAN TAG + * @shost: Scsi Host for the driver instance + * @iface_param: Interface paramters + * + * Set the VLAN TAG for the adapter or disable + * the VLAN config + * + * returns + * Success: 0 + * Failure: Non-Zero Value + **/ +static int +beiscsi_iface_config_vlan(struct Scsi_Host *shost, + struct iscsi_iface_param_info *iface_param) +{ + struct beiscsi_hba *phba = iscsi_host_priv(shost); + int ret = -EPERM; + + switch (iface_param->param) { + case ISCSI_NET_PARAM_VLAN_ENABLED: + ret = 0; + if (iface_param->value[0] != ISCSI_VLAN_ENABLE) + ret = beiscsi_if_set_vlan(phba, BEISCSI_VLAN_DISABLE); + break; + case ISCSI_NET_PARAM_VLAN_TAG: + ret = beiscsi_if_set_vlan(phba, + *((uint16_t *)iface_param->value)); + break; + } + return ret; +} + + +static int +beiscsi_iface_config_ipv4(struct Scsi_Host *shost, + struct iscsi_iface_param_info *info, + void *data, uint32_t dt_len) +{ + struct beiscsi_hba *phba = iscsi_host_priv(shost); + u8 *ip = NULL, *subnet = NULL, *gw; + struct nlattr *nla; + int ret = -EPERM; + + /* Check the param */ + switch (info->param) { + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (info->value[0] == ISCSI_IFACE_ENABLE) + ret = beiscsi_iface_create_ipv4(phba); + else { + iscsi_destroy_iface(phba->ipv4_iface); + phba->ipv4_iface = NULL; + } + break; + case ISCSI_NET_PARAM_IPV4_GW: + gw = info->value; + ret = beiscsi_if_set_gw(phba, BEISCSI_IP_TYPE_V4, gw); + break; + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + if (info->value[0] == ISCSI_BOOTPROTO_DHCP) + ret = beiscsi_if_en_dhcp(phba, BEISCSI_IP_TYPE_V4); + else if (info->value[0] == ISCSI_BOOTPROTO_STATIC) + /* release DHCP IP address */ + ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4, + NULL, NULL); + else + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Invalid BOOTPROTO: %d\n", + info->value[0]); + break; + case ISCSI_NET_PARAM_IPV4_ADDR: + ip = info->value; + nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET); + if (nla) { + info = nla_data(nla); + subnet = info->value; + } + ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4, + ip, subnet); + break; + case ISCSI_NET_PARAM_IPV4_SUBNET: + /* + * OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR ioctl needs IP + * and subnet both. Find IP to be applied for this subnet. + */ + subnet = info->value; + nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR); + if (nla) { + info = nla_data(nla); + ip = info->value; + } + ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4, + ip, subnet); + break; + } + + return ret; +} + +static int +beiscsi_iface_config_ipv6(struct Scsi_Host *shost, + struct iscsi_iface_param_info *iface_param, + void *data, uint32_t dt_len) +{ + struct beiscsi_hba *phba = iscsi_host_priv(shost); + int ret = -EPERM; + + switch (iface_param->param) { + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface_param->value[0] == ISCSI_IFACE_ENABLE) + ret = beiscsi_iface_create_ipv6(phba); + else { + iscsi_destroy_iface(phba->ipv6_iface); + phba->ipv6_iface = NULL; + } + break; + case ISCSI_NET_PARAM_IPV6_ADDR: + ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V6, + iface_param->value, NULL); + break; + } + + return ret; +} + +int beiscsi_iface_set_param(struct Scsi_Host *shost, + void *data, uint32_t dt_len) +{ + struct iscsi_iface_param_info *iface_param = NULL; + struct beiscsi_hba *phba = iscsi_host_priv(shost); + struct nlattr *attrib; + uint32_t rm_len = dt_len; + int ret; + + if (!beiscsi_hba_is_online(phba)) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : HBA in error 0x%lx\n", phba->state); + return -EBUSY; + } + + /* update interface_handle */ + ret = beiscsi_if_get_handle(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Getting Interface Handle Failed\n"); + return ret; + } + + nla_for_each_attr(attrib, data, dt_len, rm_len) { + /* ignore nla_type as it is never used */ + if (nla_len(attrib) < sizeof(*iface_param)) + return -EINVAL; + + iface_param = nla_data(attrib); + + if (iface_param->param_type != ISCSI_NET_PARAM) + continue; + + /* + * BE2ISCSI only supports 1 interface + */ + if (iface_param->iface_num) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Invalid iface_num %d." + "Only iface_num 0 is supported.\n", + iface_param->iface_num); + + return -EINVAL; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : %s.0 set param %d", + (iface_param->iface_type == ISCSI_IFACE_TYPE_IPV4) ? + "ipv4" : "ipv6", iface_param->param); + + ret = -EPERM; + switch (iface_param->param) { + case ISCSI_NET_PARAM_VLAN_ENABLED: + case ISCSI_NET_PARAM_VLAN_TAG: + ret = beiscsi_iface_config_vlan(shost, iface_param); + break; + default: + switch (iface_param->iface_type) { + case ISCSI_IFACE_TYPE_IPV4: + ret = beiscsi_iface_config_ipv4(shost, + iface_param, + data, dt_len); + break; + case ISCSI_IFACE_TYPE_IPV6: + ret = beiscsi_iface_config_ipv6(shost, + iface_param, + data, dt_len); + break; + } + } + + if (ret == -EPERM) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : %s.0 set param %d not permitted", + (iface_param->iface_type == + ISCSI_IFACE_TYPE_IPV4) ? "ipv4" : "ipv6", + iface_param->param); + ret = 0; + } + if (ret) + break; + } + + return ret; +} + +static int __beiscsi_iface_get_param(struct beiscsi_hba *phba, + struct iscsi_iface *iface, + int param, char *buf) +{ + struct be_cmd_get_if_info_resp *if_info; + int len, ip_type = BEISCSI_IP_TYPE_V4; + + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + ip_type = BEISCSI_IP_TYPE_V6; + + len = beiscsi_if_get_info(phba, ip_type, &if_info); + if (len) + return len; + + switch (param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr); + break; + case ISCSI_NET_PARAM_IPV6_ADDR: + len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr); + break; + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + if (!if_info->dhcp_state) + len = sprintf(buf, "static\n"); + else + len = sprintf(buf, "dhcp\n"); + break; + case ISCSI_NET_PARAM_IPV4_SUBNET: + len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + len = sprintf(buf, "%s\n", + (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ? + "disable" : "enable"); + break; + case ISCSI_NET_PARAM_VLAN_ID: + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) + len = -EINVAL; + else + len = sprintf(buf, "%d\n", + (if_info->vlan_priority & + ISCSI_MAX_VLAN_ID)); + break; + case ISCSI_NET_PARAM_VLAN_PRIORITY: + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) + len = -EINVAL; + else + len = sprintf(buf, "%d\n", + ((if_info->vlan_priority >> 13) & + ISCSI_MAX_VLAN_PRIORITY)); + break; + default: + WARN_ON(1); + } + + kfree(if_info); + return len; +} + +int beiscsi_iface_get_param(struct iscsi_iface *iface, + enum iscsi_param_type param_type, + int param, char *buf) +{ + struct Scsi_Host *shost = iscsi_iface_to_shost(iface); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + struct be_cmd_get_def_gateway_resp gateway; + int len = -EPERM; + + if (param_type != ISCSI_NET_PARAM) + return 0; + if (!beiscsi_hba_is_online(phba)) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : HBA in error 0x%lx\n", phba->state); + return -EBUSY; + } + + switch (param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + case ISCSI_NET_PARAM_IPV4_SUBNET: + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + case ISCSI_NET_PARAM_IPV6_ADDR: + case ISCSI_NET_PARAM_VLAN_ENABLED: + case ISCSI_NET_PARAM_VLAN_ID: + case ISCSI_NET_PARAM_VLAN_PRIORITY: + len = __beiscsi_iface_get_param(phba, iface, param, buf); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%s\n", + phba->ipv4_iface ? "enable" : "disable"); + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + len = sprintf(buf, "%s\n", + phba->ipv6_iface ? "enable" : "disable"); + break; + case ISCSI_NET_PARAM_IPV4_GW: + memset(&gateway, 0, sizeof(gateway)); + len = beiscsi_if_get_gw(phba, BEISCSI_IP_TYPE_V4, &gateway); + if (!len) + len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr); + break; + } + + return len; +} + +/** + * beiscsi_ep_get_param - get the iscsi parameter + * @ep: pointer to iscsi ep + * @param: parameter type identifier + * @buf: buffer pointer + * + * returns iscsi parameter + */ +int beiscsi_ep_get_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; + int len; + + beiscsi_log(beiscsi_ep->phba, KERN_INFO, + BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_ep_get_param," + " param= %d\n", param); + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport); + break; + case ISCSI_PARAM_CONN_ADDRESS: + if (beiscsi_ep->ip_type == BEISCSI_IP_TYPE_V4) + len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr); + else + len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr); + break; + default: + len = -EPERM; + } + return len; +} + +int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct beiscsi_hba *phba = NULL; + int ret; + + phba = ((struct beiscsi_conn *)conn->dd_data)->phba; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_conn_set_param," + " param= %d\n", param); + + ret = iscsi_set_param(cls_conn, param, buf, buflen); + if (ret) + return ret; + /* + * If userspace tried to set the value to higher than we can + * support override here. + */ + switch (param) { + case ISCSI_PARAM_FIRST_BURST: + if (session->first_burst > 8192) + session->first_burst = 8192; + break; + case ISCSI_PARAM_MAX_RECV_DLENGTH: + if (conn->max_recv_dlength > 65536) + conn->max_recv_dlength = 65536; + break; + case ISCSI_PARAM_MAX_BURST: + if (session->max_burst > 262144) + session->max_burst = 262144; + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + if (conn->max_xmit_dlength > 65536) + conn->max_xmit_dlength = 65536; + fallthrough; + default: + return 0; + } + + return 0; +} + +/** + * beiscsi_get_port_state - Get the Port State + * @shost : pointer to scsi_host structure + * + */ +static void beiscsi_get_port_state(struct Scsi_Host *shost) +{ + struct beiscsi_hba *phba = iscsi_host_priv(shost); + struct iscsi_cls_host *ihost = shost->shost_data; + + ihost->port_state = test_bit(BEISCSI_HBA_LINK_UP, &phba->state) ? + ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; +} + +/** + * beiscsi_get_port_speed - Get the Port Speed from Adapter + * @shost : pointer to scsi_host structure + * + */ +static void beiscsi_get_port_speed(struct Scsi_Host *shost) +{ + struct beiscsi_hba *phba = iscsi_host_priv(shost); + struct iscsi_cls_host *ihost = shost->shost_data; + + switch (phba->port_speed) { + case BE2ISCSI_LINK_SPEED_10MBPS: + ihost->port_speed = ISCSI_PORT_SPEED_10MBPS; + break; + case BE2ISCSI_LINK_SPEED_100MBPS: + ihost->port_speed = ISCSI_PORT_SPEED_100MBPS; + break; + case BE2ISCSI_LINK_SPEED_1GBPS: + ihost->port_speed = ISCSI_PORT_SPEED_1GBPS; + break; + case BE2ISCSI_LINK_SPEED_10GBPS: + ihost->port_speed = ISCSI_PORT_SPEED_10GBPS; + break; + case BE2ISCSI_LINK_SPEED_25GBPS: + ihost->port_speed = ISCSI_PORT_SPEED_25GBPS; + break; + case BE2ISCSI_LINK_SPEED_40GBPS: + ihost->port_speed = ISCSI_PORT_SPEED_40GBPS; + break; + default: + ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN; + } +} + +/** + * beiscsi_get_host_param - get the iscsi parameter + * @shost: pointer to scsi_host structure + * @param: parameter type identifier + * @buf: buffer pointer + * + */ +int beiscsi_get_host_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct beiscsi_hba *phba = iscsi_host_priv(shost); + int status = 0; + + if (!beiscsi_hba_is_online(phba)) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : HBA in error 0x%lx\n", phba->state); + return 0; + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_get_host_param, param = %d\n", param); + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + status = beiscsi_get_macaddr(buf, phba); + if (status < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : beiscsi_get_macaddr Failed\n"); + return 0; + } + break; + case ISCSI_HOST_PARAM_INITIATOR_NAME: + /* try fetching user configured name first */ + status = beiscsi_get_initiator_name(phba, buf, true); + if (status < 0) { + status = beiscsi_get_initiator_name(phba, buf, false); + if (status < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Retrieving Initiator Name Failed\n"); + status = 0; + } + } + break; + case ISCSI_HOST_PARAM_PORT_STATE: + beiscsi_get_port_state(shost); + status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); + break; + case ISCSI_HOST_PARAM_PORT_SPEED: + beiscsi_get_port_speed(shost); + status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); + break; + default: + return iscsi_host_get_param(shost, param, buf); + } + return status; +} + +int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) +{ + struct be_cmd_get_nic_conf_resp resp; + int rc; + + if (phba->mac_addr_set) + return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); + + memset(&resp, 0, sizeof(resp)); + rc = mgmt_get_nic_conf(phba, &resp); + if (rc) + return rc; + + phba->mac_addr_set = true; + memcpy(phba->mac_address, resp.mac_address, ETH_ALEN); + return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); +} + +/** + * beiscsi_conn_get_stats - get the iscsi stats + * @cls_conn: pointer to iscsi cls conn + * @stats: pointer to iscsi_stats structure + * + * returns iscsi stats + */ +void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct beiscsi_hba *phba = NULL; + + phba = ((struct beiscsi_conn *)conn->dd_data)->phba; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_conn_get_stats\n"); + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + stats->custom_length = 1; + strcpy(stats->custom[0].desc, "eh_abort_cnt"); + stats->custom[0].value = conn->eh_abort_cnt; +} + +/** + * beiscsi_set_params_for_offld - get the parameters for offload + * @beiscsi_conn: pointer to beiscsi_conn + * @params: pointer to offload_params structure + */ +static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn, + struct beiscsi_offload_params *params) +{ + struct iscsi_conn *conn = beiscsi_conn->conn; + struct iscsi_session *session = conn->session; + + AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_burst_length, + params, session->max_burst); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, + max_send_data_segment_length, params, + conn->max_xmit_dlength); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, first_burst_length, + params, session->first_burst); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, erl, params, + session->erl); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, dde, params, + conn->datadgst_en); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, hde, params, + conn->hdrdgst_en); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, ir2t, params, + session->initial_r2t_en); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params, + session->imm_data_en); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, + data_seq_inorder, params, + session->dataseq_inorder_en); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, + pdu_seq_inorder, params, + session->pdu_inorder_en); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_r2t, params, + session->max_r2t); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params, + (conn->exp_statsn - 1)); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, + max_recv_data_segment_length, params, + conn->max_recv_dlength); + +} + +/** + * beiscsi_conn_start - offload of session to chip + * @cls_conn: pointer to beiscsi_conn + */ +int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_endpoint *beiscsi_ep; + struct beiscsi_offload_params params; + struct beiscsi_hba *phba; + + phba = ((struct beiscsi_conn *)conn->dd_data)->phba; + + if (!beiscsi_hba_is_online(phba)) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : HBA in error 0x%lx\n", phba->state); + return -EBUSY; + } + beiscsi_log(beiscsi_conn->phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_conn_start\n"); + + memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); + beiscsi_ep = beiscsi_conn->ep; + if (!beiscsi_ep) + beiscsi_log(beiscsi_conn->phba, KERN_ERR, + BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_conn_start , no beiscsi_ep\n"); + + beiscsi_conn->login_in_progress = 0; + beiscsi_set_params_for_offld(beiscsi_conn, ¶ms); + beiscsi_offload_connection(beiscsi_conn, ¶ms); + iscsi_conn_start(cls_conn); + return 0; +} + +/** + * beiscsi_get_cid - Allocate a cid + * @phba: The phba instance + */ +static int beiscsi_get_cid(struct beiscsi_hba *phba) +{ + uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; + unsigned short cid, cid_from_ulp; + struct ulp_cid_info *cid_info; + + /* Find the ULP which has more CID available */ + cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? + BEISCSI_ULP0_AVLBL_CID(phba) : 0; + cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ? + BEISCSI_ULP1_AVLBL_CID(phba) : 0; + cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? + BEISCSI_ULP0 : BEISCSI_ULP1; + /** + * If iSCSI protocol is loaded only on ULP 0, and when cid_avlbl_ulp + * is ZERO for both, ULP 1 is returned. + * Check if ULP is loaded before getting new CID. + */ + if (!test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) + return BE_INVALID_CID; + + cid_info = phba->cid_array_info[cid_from_ulp]; + cid = cid_info->cid_array[cid_info->cid_alloc]; + if (!cid_info->avlbl_cids || cid == BE_INVALID_CID) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : failed to get cid: available %u:%u\n", + cid_info->avlbl_cids, cid_info->cid_free); + return BE_INVALID_CID; + } + /* empty the slot */ + cid_info->cid_array[cid_info->cid_alloc++] = BE_INVALID_CID; + if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(phba, cid_from_ulp)) + cid_info->cid_alloc = 0; + cid_info->avlbl_cids--; + return cid; +} + +/** + * beiscsi_put_cid - Free the cid + * @phba: The phba for which the cid is being freed + * @cid: The cid to free + */ +static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) +{ + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + struct ulp_cid_info *cid_info; + uint16_t cid_post_ulp; + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + cid_post_ulp = pwrb_context->ulp_num; + + cid_info = phba->cid_array_info[cid_post_ulp]; + /* fill only in empty slot */ + if (cid_info->cid_array[cid_info->cid_free] != BE_INVALID_CID) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : failed to put cid %u: available %u:%u\n", + cid, cid_info->avlbl_cids, cid_info->cid_free); + return; + } + cid_info->cid_array[cid_info->cid_free++] = cid; + if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) + cid_info->cid_free = 0; + cid_info->avlbl_cids++; +} + +/** + * beiscsi_free_ep - free endpoint + * @beiscsi_ep: pointer to device endpoint struct + */ +static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) +{ + struct beiscsi_hba *phba = beiscsi_ep->phba; + struct beiscsi_conn *beiscsi_conn; + + beiscsi_put_cid(phba, beiscsi_ep->ep_cid); + beiscsi_ep->phba = NULL; + /* clear this to track freeing in beiscsi_ep_disconnect */ + phba->ep_array[BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid)] = NULL; + + /** + * Check if any connection resource allocated by driver + * is to be freed.This case occurs when target redirection + * or connection retry is done. + **/ + if (!beiscsi_ep->conn) + return; + + beiscsi_conn = beiscsi_ep->conn; + /** + * Break ep->conn link here so that completions after + * this are ignored. + */ + beiscsi_ep->conn = NULL; + if (beiscsi_conn->login_in_progress) { + beiscsi_free_mgmt_task_handles(beiscsi_conn, + beiscsi_conn->task); + beiscsi_conn->login_in_progress = 0; + } +} + +/** + * beiscsi_open_conn - Ask FW to open a TCP connection + * @ep: pointer to device endpoint struct + * @src_addr: The source IP address + * @dst_addr: The Destination IP address + * @non_blocking: blocking or non-blocking call + * + * Asks the FW to open a TCP connection + */ +static int beiscsi_open_conn(struct iscsi_endpoint *ep, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, int non_blocking) +{ + struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; + struct beiscsi_hba *phba = beiscsi_ep->phba; + struct tcp_connect_and_offload_out *ptcpcnct_out; + struct be_dma_mem nonemb_cmd; + unsigned int tag, req_memsize; + int ret = -ENOMEM; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_open_conn\n"); + + beiscsi_ep->ep_cid = beiscsi_get_cid(phba); + if (beiscsi_ep->ep_cid == BE_INVALID_CID) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : No free cid available\n"); + return ret; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", + beiscsi_ep->ep_cid); + + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = ep; + + beiscsi_ep->cid_vld = 0; + + if (is_chip_be2_be3r(phba)) + req_memsize = sizeof(struct tcp_connect_and_offload_in); + else + req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); + + nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, + req_memsize, + &nonemb_cmd.dma, GFP_KERNEL); + if (nonemb_cmd.va == NULL) { + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Failed to allocate memory for" + " mgmt_open_connection\n"); + + beiscsi_free_ep(beiscsi_ep); + return -ENOMEM; + } + nonemb_cmd.size = req_memsize; + memset(nonemb_cmd.va, 0, nonemb_cmd.size); + tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); + if (!tag) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : mgmt_open_connection Failed for cid=%d\n", + beiscsi_ep->ep_cid); + + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + beiscsi_free_ep(beiscsi_ep); + return -EAGAIN; + } + + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); + if (ret) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BS_%d : mgmt_open_connection Failed"); + + if (ret != -EBUSY) + dma_free_coherent(&phba->ctrl.pdev->dev, + nonemb_cmd.size, nonemb_cmd.va, + nonemb_cmd.dma); + + beiscsi_free_ep(beiscsi_ep); + return ret; + } + + ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; + beiscsi_ep = ep->dd_data; + beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; + beiscsi_ep->cid_vld = 1; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : mgmt_open_connection Success\n"); + + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return 0; +} + +/** + * beiscsi_ep_connect - Ask chip to create TCP Conn + * @shost: Pointer to scsi_host structure + * @dst_addr: The IP address of Target + * @non_blocking: blocking or non-blocking call + * + * This routines first asks chip to create a connection and then allocates an EP + */ +struct iscsi_endpoint * +beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) +{ + struct beiscsi_hba *phba; + struct beiscsi_endpoint *beiscsi_ep; + struct iscsi_endpoint *ep; + int ret; + + if (!shost) { + ret = -ENXIO; + pr_err("beiscsi_ep_connect shost is NULL\n"); + return ERR_PTR(ret); + } + + phba = iscsi_host_priv(shost); + if (!beiscsi_hba_is_online(phba)) { + ret = -EIO; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : HBA in error 0x%lx\n", phba->state); + return ERR_PTR(ret); + } + if (!test_bit(BEISCSI_HBA_LINK_UP, &phba->state)) { + ret = -EBUSY; + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BS_%d : The Adapter Port state is Down!!!\n"); + return ERR_PTR(ret); + } + + ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint)); + if (!ep) { + ret = -ENOMEM; + return ERR_PTR(ret); + } + + beiscsi_ep = ep->dd_data; + beiscsi_ep->phba = phba; + beiscsi_ep->openiscsi_ep = ep; + ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : Failed in beiscsi_open_conn\n"); + goto free_ep; + } + + return ep; + +free_ep: + iscsi_destroy_endpoint(ep); + return ERR_PTR(ret); +} + +/** + * beiscsi_ep_poll - Poll to see if connection is established + * @ep: endpoint to be used + * @timeout_ms: timeout specified in millisecs + * + * Poll to see if TCP connection established + */ +int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; + + beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_ep_poll\n"); + + if (beiscsi_ep->cid_vld == 1) + return 1; + else + return 0; +} + +/** + * beiscsi_flush_cq()- Flush the CQ created. + * @phba: ptr device priv structure. + * + * Before the connection resource are freed flush + * all the CQ enteries + **/ +static void beiscsi_flush_cq(struct beiscsi_hba *phba) +{ + uint16_t i; + struct be_eq_obj *pbe_eq; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + irq_poll_disable(&pbe_eq->iopoll); + beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC); + irq_poll_enable(&pbe_eq->iopoll); + } +} + +/** + * beiscsi_conn_close - Invalidate and upload connection + * @beiscsi_ep: pointer to device endpoint struct + * + * Returns 0 on success, -1 on failure. + */ +static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep) +{ + struct beiscsi_hba *phba = beiscsi_ep->phba; + unsigned int tag, attempts; + int ret; + + /** + * Without successfully invalidating and uploading connection + * driver can't reuse the CID so attempt more than once. + */ + attempts = 0; + while (attempts++ < 3) { + tag = beiscsi_invalidate_cxn(phba, beiscsi_ep); + if (tag) { + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); + if (!ret) + break; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : invalidate conn failed cid %d\n", + beiscsi_ep->ep_cid); + } + } + + /* wait for all completions to arrive, then process them */ + msleep(250); + /* flush CQ entries */ + beiscsi_flush_cq(phba); + + if (attempts > 3) + return -1; + + attempts = 0; + while (attempts++ < 3) { + tag = beiscsi_upload_cxn(phba, beiscsi_ep); + if (tag) { + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); + if (!ret) + break; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : upload conn failed cid %d\n", + beiscsi_ep->ep_cid); + } + } + if (attempts > 3) + return -1; + + return 0; +} + +/** + * beiscsi_ep_disconnect - Tears down the TCP connection + * @ep: endpoint to be used + * + * Tears down the TCP connection + */ +void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct beiscsi_endpoint *beiscsi_ep; + struct beiscsi_hba *phba; + uint16_t cri_index; + + beiscsi_ep = ep->dd_data; + phba = beiscsi_ep->phba; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_ep_disconnect for ep_cid = %u\n", + beiscsi_ep->ep_cid); + + cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); + if (!phba->ep_array[cri_index]) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : ep_array at %u cid %u empty\n", + cri_index, + beiscsi_ep->ep_cid); + return; + } + + if (!beiscsi_hba_is_online(phba)) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : HBA in error 0x%lx\n", phba->state); + } else { + /** + * Make CID available even if close fails. + * If not freed, FW might fail open using the CID. + */ + if (beiscsi_conn_close(beiscsi_ep) < 0) + __beiscsi_log(phba, KERN_ERR, + "BS_%d : close conn failed cid %d\n", + beiscsi_ep->ep_cid); + } + + beiscsi_free_ep(beiscsi_ep); + if (!phba->conn_table[cri_index]) + __beiscsi_log(phba, KERN_ERR, + "BS_%d : conn_table empty at %u: cid %u\n", + cri_index, beiscsi_ep->ep_cid); + phba->conn_table[cri_index] = NULL; + iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); +} + +umode_t beiscsi_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_NET_PARAM: + switch (param) { + case ISCSI_NET_PARAM_IFACE_ENABLE: + case ISCSI_NET_PARAM_IPV4_ADDR: + case ISCSI_NET_PARAM_IPV4_SUBNET: + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + case ISCSI_NET_PARAM_IPV4_GW: + case ISCSI_NET_PARAM_IPV6_ADDR: + case ISCSI_NET_PARAM_VLAN_ID: + case ISCSI_NET_PARAM_VLAN_PRIORITY: + case ISCSI_NET_PARAM_VLAN_ENABLED: + return S_IRUGO; + default: + return 0; + } + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + case ISCSI_HOST_PARAM_PORT_STATE: + case ISCSI_HOST_PARAM_PORT_SPEED: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h new file mode 100644 index 000000000..8c88657ee --- /dev/null +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. + * + * Contact Information: + * linux-drivers@broadcom.com + */ + +#ifndef _BE_ISCSI_ +#define _BE_ISCSI_ + +#include "be_main.h" +#include "be_mgmt.h" + +void beiscsi_iface_create_default(struct beiscsi_hba *phba); + +void beiscsi_iface_destroy_default(struct beiscsi_hba *phba); + +int beiscsi_iface_get_param(struct iscsi_iface *iface, + enum iscsi_param_type param_type, + int param, char *buf); + +int beiscsi_iface_set_param(struct Scsi_Host *shost, + void *data, uint32_t count); + +umode_t beiscsi_attr_is_visible(int param_type, int param); + +void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, + struct beiscsi_offload_params *params); + +void beiscsi_offload_iscsi(struct beiscsi_hba *phba, struct iscsi_conn *conn, + struct beiscsi_conn *beiscsi_conn, + unsigned int fw_handle); + +struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, + uint16_t qdepth, + uint32_t initial_cmdsn); + +void beiscsi_session_destroy(struct iscsi_cls_session *cls_session); + +void beiscsi_session_fail(struct iscsi_cls_session *cls_session); + +struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session + *cls_session, uint32_t cid); + +int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading); + +int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, + char *buf); + +int beiscsi_get_host_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf); + +int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba); + +int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen); + +int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn); + +struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking); + +int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); + +void beiscsi_ep_disconnect(struct iscsi_endpoint *ep); + +void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats); + +#endif diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c new file mode 100644 index 000000000..06acb5ff6 --- /dev/null +++ b/drivers/scsi/be2iscsi/be_main.c @@ -0,0 +1,5863 @@ +/* + * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI + * Host Bus Adapters. Refer to the README file included with this package + * for driver version and adapter compatibility. + * + * Copyright (c) 2018 Broadcom. All Rights Reserved. + * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. ALL EXPRESS + * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY + * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, + * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH + * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package. + * + * Contact Information: + * linux-drivers@broadcom.com + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "be_main.h" +#include "be_iscsi.h" +#include "be_mgmt.h" +#include "be_cmds.h" + +static unsigned int be_iopoll_budget = 10; +static unsigned int be_max_phys_size = 64; +static unsigned int enable_msix = 1; + +MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); +MODULE_VERSION(BUILD_STR); +MODULE_AUTHOR("Emulex Corporation"); +MODULE_LICENSE("GPL"); +module_param(be_iopoll_budget, int, 0); +module_param(enable_msix, int, 0); +module_param(be_max_phys_size, uint, S_IRUGO); +MODULE_PARM_DESC(be_max_phys_size, + "Maximum Size (In Kilobytes) of physically contiguous " + "memory that can be allocated. Range is 16 - 128"); + +#define beiscsi_disp_param(_name)\ +static ssize_t \ +beiscsi_##_name##_disp(struct device *dev,\ + struct device_attribute *attrib, char *buf) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct beiscsi_hba *phba = iscsi_host_priv(shost); \ + return snprintf(buf, PAGE_SIZE, "%d\n",\ + phba->attr_##_name);\ +} + +#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ +static int \ +beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ +{\ + if (val >= _minval && val <= _maxval) {\ + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ + "BA_%d : beiscsi_"#_name" updated "\ + "from 0x%x ==> 0x%x\n",\ + phba->attr_##_name, val); \ + phba->attr_##_name = val;\ + return 0;\ + } \ + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ + "BA_%d beiscsi_"#_name" attribute "\ + "cannot be updated to 0x%x, "\ + "range allowed is ["#_minval" - "#_maxval"]\n", val);\ + return -EINVAL;\ +} + +#define beiscsi_store_param(_name) \ +static ssize_t \ +beiscsi_##_name##_store(struct device *dev,\ + struct device_attribute *attr, const char *buf,\ + size_t count) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct beiscsi_hba *phba = iscsi_host_priv(shost);\ + uint32_t param_val = 0;\ + if (!isdigit(buf[0]))\ + return -EINVAL;\ + if (sscanf(buf, "%i", ¶m_val) != 1)\ + return -EINVAL;\ + if (beiscsi_##_name##_change(phba, param_val) == 0) \ + return strlen(buf);\ + else \ + return -EINVAL;\ +} + +#define beiscsi_init_param(_name, _minval, _maxval, _defval) \ +static int \ +beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ +{ \ + if (val >= _minval && val <= _maxval) {\ + phba->attr_##_name = val;\ + return 0;\ + } \ + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ + "BA_%d beiscsi_"#_name" attribute " \ + "cannot be updated to 0x%x, "\ + "range allowed is ["#_minval" - "#_maxval"]\n", val);\ + phba->attr_##_name = _defval;\ + return -EINVAL;\ +} + +#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ +static uint beiscsi_##_name = _defval;\ +module_param(beiscsi_##_name, uint, S_IRUGO);\ +MODULE_PARM_DESC(beiscsi_##_name, _descp);\ +beiscsi_disp_param(_name)\ +beiscsi_change_param(_name, _minval, _maxval, _defval)\ +beiscsi_store_param(_name)\ +beiscsi_init_param(_name, _minval, _maxval, _defval)\ +static DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ + beiscsi_##_name##_disp, beiscsi_##_name##_store) + +/* + * When new log level added update MAX allowed value for log_enable + */ +BEISCSI_RW_ATTR(log_enable, 0x00, + 0xFF, 0x00, "Enable logging Bit Mask\n" + "\t\t\t\tInitialization Events : 0x01\n" + "\t\t\t\tMailbox Events : 0x02\n" + "\t\t\t\tMiscellaneous Events : 0x04\n" + "\t\t\t\tError Handling : 0x08\n" + "\t\t\t\tIO Path Events : 0x10\n" + "\t\t\t\tConfiguration Path : 0x20\n" + "\t\t\t\tiSCSI Protocol : 0x40\n"); + +static DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); +static DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); +static DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); +static DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); +static DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, + beiscsi_active_session_disp, NULL); +static DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, + beiscsi_free_session_disp, NULL); + +static struct attribute *beiscsi_attrs[] = { + &dev_attr_beiscsi_log_enable.attr, + &dev_attr_beiscsi_drvr_ver.attr, + &dev_attr_beiscsi_adapter_family.attr, + &dev_attr_beiscsi_fw_ver.attr, + &dev_attr_beiscsi_active_session_count.attr, + &dev_attr_beiscsi_free_session_count.attr, + &dev_attr_beiscsi_phys_port.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(beiscsi); + +static char const *cqe_desc[] = { + "RESERVED_DESC", + "SOL_CMD_COMPLETE", + "SOL_CMD_KILLED_DATA_DIGEST_ERR", + "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", + "CXN_KILLED_BURST_LEN_MISMATCH", + "CXN_KILLED_AHS_RCVD", + "CXN_KILLED_HDR_DIGEST_ERR", + "CXN_KILLED_UNKNOWN_HDR", + "CXN_KILLED_STALE_ITT_TTT_RCVD", + "CXN_KILLED_INVALID_ITT_TTT_RCVD", + "CXN_KILLED_RST_RCVD", + "CXN_KILLED_TIMED_OUT", + "CXN_KILLED_RST_SENT", + "CXN_KILLED_FIN_RCVD", + "CXN_KILLED_BAD_UNSOL_PDU_RCVD", + "CXN_KILLED_BAD_WRB_INDEX_ERROR", + "CXN_KILLED_OVER_RUN_RESIDUAL", + "CXN_KILLED_UNDER_RUN_RESIDUAL", + "CMD_KILLED_INVALID_STATSN_RCVD", + "CMD_KILLED_INVALID_R2T_RCVD", + "CMD_CXN_KILLED_LUN_INVALID", + "CMD_CXN_KILLED_ICD_INVALID", + "CMD_CXN_KILLED_ITT_INVALID", + "CMD_CXN_KILLED_SEQ_OUTOFORDER", + "CMD_CXN_KILLED_INVALID_DATASN_RCVD", + "CXN_INVALIDATE_NOTIFY", + "CXN_INVALIDATE_INDEX_NOTIFY", + "CMD_INVALIDATED_NOTIFY", + "UNSOL_HDR_NOTIFY", + "UNSOL_DATA_NOTIFY", + "UNSOL_DATA_DIGEST_ERROR_NOTIFY", + "DRIVERMSG_NOTIFY", + "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", + "SOL_CMD_KILLED_DIF_ERR", + "CXN_KILLED_SYN_RCVD", + "CXN_KILLED_IMM_DATA_RCVD" +}; + +static int beiscsi_eh_abort(struct scsi_cmnd *sc) +{ + struct iscsi_task *abrt_task = iscsi_cmd(sc)->task; + struct iscsi_cls_session *cls_session; + struct beiscsi_io_task *abrt_io_task; + struct beiscsi_conn *beiscsi_conn; + struct iscsi_session *session; + struct invldt_cmd_tbl inv_tbl; + struct beiscsi_hba *phba; + struct iscsi_conn *conn; + int rc; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + +completion_check: + /* check if we raced, task just got cleaned up under us */ + spin_lock_bh(&session->back_lock); + if (!abrt_task || !abrt_task->sc) { + spin_unlock_bh(&session->back_lock); + return SUCCESS; + } + /* get a task ref till FW processes the req for the ICD used */ + if (!iscsi_get_task(abrt_task)) { + spin_unlock(&session->back_lock); + /* We are just about to call iscsi_free_task so wait for it. */ + udelay(5); + goto completion_check; + } + + abrt_io_task = abrt_task->dd_data; + conn = abrt_task->conn; + beiscsi_conn = conn->dd_data; + phba = beiscsi_conn->phba; + /* mark WRB invalid which have been not processed by FW yet */ + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + abrt_io_task->pwrb_handle->pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, + abrt_io_task->pwrb_handle->pwrb, 1); + } + inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; + inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; + spin_unlock_bh(&session->back_lock); + + rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); + iscsi_put_task(abrt_task); + if (rc) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, + "BM_%d : sc %p invalidation failed %d\n", + sc, rc); + return FAILED; + } + + return iscsi_eh_abort(sc); +} + +static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) +{ + struct beiscsi_invldt_cmd_tbl { + struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; + struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; + } *inv_tbl; + struct iscsi_cls_session *cls_session; + struct beiscsi_conn *beiscsi_conn; + struct beiscsi_io_task *io_task; + struct iscsi_session *session; + struct beiscsi_hba *phba; + struct iscsi_conn *conn; + struct iscsi_task *task; + unsigned int i, nents; + int rc, more = 0; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + spin_lock_bh(&session->frwd_lock); + if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { + spin_unlock_bh(&session->frwd_lock); + return FAILED; + } + + conn = session->leadconn; + beiscsi_conn = conn->dd_data; + phba = beiscsi_conn->phba; + + inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); + if (!inv_tbl) { + spin_unlock_bh(&session->frwd_lock); + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, + "BM_%d : invldt_cmd_tbl alloc failed\n"); + return FAILED; + } + nents = 0; + /* take back_lock to prevent task from getting cleaned up under us */ + spin_lock(&session->back_lock); + for (i = 0; i < conn->session->cmds_max; i++) { + task = conn->session->cmds[i]; + if (!task->sc) + continue; + + if (sc->device->lun != task->sc->device->lun) + continue; + /** + * Can't fit in more cmds? Normally this won't happen b'coz + * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. + */ + if (nents == BE_INVLDT_CMD_TBL_SZ) { + more = 1; + break; + } + + /* get a task ref till FW processes the req for the ICD used */ + if (!iscsi_get_task(task)) { + /* + * The task has completed in the driver and is + * completing in libiscsi. Just ignore it here. When we + * call iscsi_eh_device_reset, it will wait for us. + */ + continue; + } + + io_task = task->dd_data; + /* mark WRB invalid which have been not processed by FW yet */ + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + io_task->pwrb_handle->pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, + io_task->pwrb_handle->pwrb, 1); + } + + inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; + inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; + inv_tbl->task[nents] = task; + nents++; + } + spin_unlock(&session->back_lock); + spin_unlock_bh(&session->frwd_lock); + + rc = SUCCESS; + if (!nents) + goto end_reset; + + if (more) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, + "BM_%d : number of cmds exceeds size of invalidation table\n"); + rc = FAILED; + goto end_reset; + } + + if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, + "BM_%d : cid %u scmds invalidation failed\n", + beiscsi_conn->beiscsi_conn_cid); + rc = FAILED; + } + +end_reset: + for (i = 0; i < nents; i++) + iscsi_put_task(inv_tbl->task[i]); + kfree(inv_tbl); + + if (rc == SUCCESS) + rc = iscsi_eh_device_reset(sc); + return rc; +} + +/*------------------- PCI Driver operations and data ----------------- */ +static const struct pci_device_id beiscsi_pci_id_table[] = { + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, + { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); + + +static const struct scsi_host_template beiscsi_sht = { + .module = THIS_MODULE, + .name = "Emulex 10Gbe open-iscsi Initiator Driver", + .proc_name = DRV_NAME, + .queuecommand = iscsi_queuecommand, + .change_queue_depth = scsi_change_queue_depth, + .target_alloc = iscsi_target_alloc, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = beiscsi_eh_abort, + .eh_device_reset_handler = beiscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_session_reset, + .shost_groups = beiscsi_groups, + .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, + .can_queue = BE2_IO_DEPTH, + .this_id = -1, + .max_sectors = BEISCSI_MAX_SECTORS, + .max_segment_size = 65536, + .cmd_per_lun = BEISCSI_CMD_PER_LUN, + .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, + .track_queue_depth = 1, + .cmd_size = sizeof(struct iscsi_cmd), +}; + +static struct scsi_transport_template *beiscsi_scsi_transport; + +static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) +{ + struct beiscsi_hba *phba; + struct Scsi_Host *shost; + + shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); + if (!shost) { + dev_err(&pcidev->dev, + "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); + return NULL; + } + shost->max_id = BE2_MAX_SESSIONS - 1; + shost->max_channel = 0; + shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; + shost->max_lun = BEISCSI_NUM_MAX_LUN; + shost->transportt = beiscsi_scsi_transport; + phba = iscsi_host_priv(shost); + memset(phba, 0, sizeof(*phba)); + phba->shost = shost; + phba->pcidev = pci_dev_get(pcidev); + pci_set_drvdata(pcidev, phba); + phba->interface_handle = 0xFFFFFFFF; + + return phba; +} + +static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) +{ + if (phba->csr_va) { + iounmap(phba->csr_va); + phba->csr_va = NULL; + } + if (phba->db_va) { + iounmap(phba->db_va); + phba->db_va = NULL; + } + if (phba->pci_va) { + iounmap(phba->pci_va); + phba->pci_va = NULL; + } +} + +static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, + struct pci_dev *pcidev) +{ + u8 __iomem *addr; + int pcicfg_reg; + + addr = ioremap(pci_resource_start(pcidev, 2), + pci_resource_len(pcidev, 2)); + if (addr == NULL) + return -ENOMEM; + phba->ctrl.csr = addr; + phba->csr_va = addr; + + addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); + if (addr == NULL) + goto pci_map_err; + phba->ctrl.db = addr; + phba->db_va = addr; + + if (phba->generation == BE_GEN2) + pcicfg_reg = 1; + else + pcicfg_reg = 0; + + addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), + pci_resource_len(pcidev, pcicfg_reg)); + + if (addr == NULL) + goto pci_map_err; + phba->ctrl.pcicfg = addr; + phba->pci_va = addr; + return 0; + +pci_map_err: + beiscsi_unmap_pci_function(phba); + return -ENOMEM; +} + +static int beiscsi_enable_pci(struct pci_dev *pcidev) +{ + int ret; + + ret = pci_enable_device(pcidev); + if (ret) { + dev_err(&pcidev->dev, + "beiscsi_enable_pci - enable device failed\n"); + return ret; + } + + ret = pci_request_regions(pcidev, DRV_NAME); + if (ret) { + dev_err(&pcidev->dev, + "beiscsi_enable_pci - request region failed\n"); + goto pci_dev_disable; + } + + pci_set_master(pcidev); + ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); + goto pci_region_release; + } + } + return 0; + +pci_region_release: + pci_release_regions(pcidev); +pci_dev_disable: + pci_disable_device(pcidev); + + return ret; +} + +static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; + struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; + int status = 0; + + ctrl->pdev = pdev; + status = beiscsi_map_pci_bars(phba, pdev); + if (status) + return status; + mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; + mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev, + mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); + if (!mbox_mem_alloc->va) { + beiscsi_unmap_pci_function(phba); + return -ENOMEM; + } + + mbox_mem_align->size = sizeof(struct be_mcc_mailbox); + mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); + mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); + memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); + mutex_init(&ctrl->mbox_lock); + spin_lock_init(&phba->ctrl.mcc_lock); + + return status; +} + +/** + * beiscsi_get_params()- Set the config paramters + * @phba: ptr device priv structure + **/ +static void beiscsi_get_params(struct beiscsi_hba *phba) +{ + uint32_t total_cid_count = 0; + uint32_t total_icd_count = 0; + uint8_t ulp_num = 0; + + total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint32_t align_mask = 0; + uint32_t icd_post_per_page = 0; + uint32_t icd_count_unavailable = 0; + uint32_t icd_start = 0, icd_count = 0; + uint32_t icd_start_align = 0, icd_count_align = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + + /* Get ICD count that can be posted on each page */ + icd_post_per_page = (PAGE_SIZE / (BE2_SGE * + sizeof(struct iscsi_sge))); + align_mask = (icd_post_per_page - 1); + + /* Check if icd_start is aligned ICD per page posting */ + if (icd_start % icd_post_per_page) { + icd_start_align = ((icd_start + + icd_post_per_page) & + ~(align_mask)); + phba->fw_config. + iscsi_icd_start[ulp_num] = + icd_start_align; + } + + icd_count_align = (icd_count & ~align_mask); + + /* ICD discarded in the process of alignment */ + if (icd_start_align) + icd_count_unavailable = ((icd_start_align - + icd_start) + + (icd_count - + icd_count_align)); + + /* Updated ICD count available */ + phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - + icd_count_unavailable); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Aligned ICD values\n" + "\t ICD Start : %d\n" + "\t ICD Count : %d\n" + "\t ICD Discarded : %d\n", + phba->fw_config. + iscsi_icd_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + icd_count_unavailable); + break; + } + } + + total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + phba->params.ios_per_ctrl = (total_icd_count - + (total_cid_count + + BE2_TMFS + BE2_NOPOUT_REQ)); + phba->params.cxns_per_ctrl = total_cid_count; + phba->params.icds_per_ctrl = total_icd_count; + phba->params.num_sge_per_io = BE2_SGE; + phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; + phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; + phba->params.num_eq_entries = 1024; + phba->params.num_cq_entries = 1024; + phba->params.wrbs_per_cxn = 256; +} + +static void hwi_ring_eq_db(struct beiscsi_hba *phba, + unsigned int id, unsigned int clr_interrupt, + unsigned int num_processed, + unsigned char rearm, unsigned char event) +{ + u32 val = 0; + + if (rearm) + val |= 1 << DB_EQ_REARM_SHIFT; + if (clr_interrupt) + val |= 1 << DB_EQ_CLR_SHIFT; + if (event) + val |= 1 << DB_EQ_EVNT_SHIFT; + + val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; + /* Setting lower order EQ_ID Bits */ + val |= (id & DB_EQ_RING_ID_LOW_MASK); + + /* Setting Higher order EQ_ID Bits */ + val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & + DB_EQ_RING_ID_HIGH_MASK) + << DB_EQ_HIGH_SET_SHIFT); + + iowrite32(val, phba->db_va + DB_EQ_OFFSET); +} + +/** + * be_isr_mcc - The isr routine of the driver. + * @irq: Not used + * @dev_id: Pointer to host adapter structure + */ +static irqreturn_t be_isr_mcc(int irq, void *dev_id) +{ + struct beiscsi_hba *phba; + struct be_eq_entry *eqe; + struct be_queue_info *eq; + struct be_queue_info *mcc; + unsigned int mcc_events; + struct be_eq_obj *pbe_eq; + + pbe_eq = dev_id; + eq = &pbe_eq->q; + phba = pbe_eq->phba; + mcc = &phba->ctrl.mcc_obj.cq; + eqe = queue_tail_node(eq); + + mcc_events = 0; + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (((eqe->dw[offsetof(struct amap_eq_entry, + resource_id) / 32] & + EQE_RESID_MASK) >> 16) == mcc->id) { + mcc_events++; + } + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + } + + if (mcc_events) { + queue_work(phba->wq, &pbe_eq->mcc_work); + hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); + } + return IRQ_HANDLED; +} + +/** + * be_isr_msix - The isr routine of the driver. + * @irq: Not used + * @dev_id: Pointer to host adapter structure + */ +static irqreturn_t be_isr_msix(int irq, void *dev_id) +{ + struct beiscsi_hba *phba; + struct be_queue_info *eq; + struct be_eq_obj *pbe_eq; + + pbe_eq = dev_id; + eq = &pbe_eq->q; + + phba = pbe_eq->phba; + /* disable interrupt till iopoll completes */ + hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); + irq_poll_sched(&pbe_eq->iopoll); + + return IRQ_HANDLED; +} + +/** + * be_isr - The isr routine of the driver. + * @irq: Not used + * @dev_id: Pointer to host adapter structure + */ +static irqreturn_t be_isr(int irq, void *dev_id) +{ + struct beiscsi_hba *phba; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + struct be_eq_entry *eqe; + struct be_queue_info *eq; + struct be_queue_info *mcc; + unsigned int mcc_events, io_events; + struct be_ctrl_info *ctrl; + struct be_eq_obj *pbe_eq; + int isr, rearm; + + phba = dev_id; + ctrl = &phba->ctrl; + isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + + (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); + if (!isr) + return IRQ_NONE; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + pbe_eq = &phwi_context->be_eq[0]; + + eq = &phwi_context->be_eq[0].q; + mcc = &phba->ctrl.mcc_obj.cq; + eqe = queue_tail_node(eq); + + io_events = 0; + mcc_events = 0; + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (((eqe->dw[offsetof(struct amap_eq_entry, + resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) + mcc_events++; + else + io_events++; + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + } + if (!io_events && !mcc_events) + return IRQ_NONE; + + /* no need to rearm if interrupt is only for IOs */ + rearm = 0; + if (mcc_events) { + queue_work(phba->wq, &pbe_eq->mcc_work); + /* rearm for MCCQ */ + rearm = 1; + } + if (io_events) + irq_poll_sched(&pbe_eq->iopoll); + hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); + return IRQ_HANDLED; +} + +static void beiscsi_free_irqs(struct beiscsi_hba *phba) +{ + struct hwi_context_memory *phwi_context; + int i; + + if (!phba->pcidev->msix_enabled) { + if (phba->pcidev->irq) + free_irq(phba->pcidev->irq, phba); + return; + } + + phwi_context = phba->phwi_ctrlr->phwi_ctxt; + for (i = 0; i <= phba->num_cpus; i++) { + free_irq(pci_irq_vector(phba->pcidev, i), + &phwi_context->be_eq[i]); + kfree(phba->msi_name[i]); + } +} + +static int beiscsi_init_irqs(struct beiscsi_hba *phba) +{ + struct pci_dev *pcidev = phba->pcidev; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + int ret, i, j; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + if (pcidev->msix_enabled) { + for (i = 0; i < phba->num_cpus; i++) { + phba->msi_name[i] = kasprintf(GFP_KERNEL, + "beiscsi_%02x_%02x", + phba->shost->host_no, i); + if (!phba->msi_name[i]) { + ret = -ENOMEM; + goto free_msix_irqs; + } + + ret = request_irq(pci_irq_vector(pcidev, i), + be_isr_msix, 0, phba->msi_name[i], + &phwi_context->be_eq[i]); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : %s-Failed to register msix for i = %d\n", + __func__, i); + kfree(phba->msi_name[i]); + goto free_msix_irqs; + } + } + phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", + phba->shost->host_no); + if (!phba->msi_name[i]) { + ret = -ENOMEM; + goto free_msix_irqs; + } + ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, + phba->msi_name[i], &phwi_context->be_eq[i]); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : %s-Failed to register beiscsi_msix_mcc\n", + __func__); + kfree(phba->msi_name[i]); + goto free_msix_irqs; + } + + } else { + ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, + "beiscsi", phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : %s-Failed to register irq\n", + __func__); + return ret; + } + } + return 0; +free_msix_irqs: + for (j = i - 1; j >= 0; j--) { + free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); + kfree(phba->msi_name[j]); + } + return ret; +} + +void hwi_ring_cq_db(struct beiscsi_hba *phba, + unsigned int id, unsigned int num_processed, + unsigned char rearm) +{ + u32 val = 0; + + if (rearm) + val |= 1 << DB_CQ_REARM_SHIFT; + + val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; + + /* Setting lower order CQ_ID Bits */ + val |= (id & DB_CQ_RING_ID_LOW_MASK); + + /* Setting Higher order CQ_ID Bits */ + val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & + DB_CQ_RING_ID_HIGH_MASK) + << DB_CQ_HIGH_SET_SHIFT); + + iowrite32(val, phba->db_va + DB_CQ_OFFSET); +} + +static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) +{ + struct sgl_handle *psgl_handle; + unsigned long flags; + + spin_lock_irqsave(&phba->io_sgl_lock, flags); + if (phba->io_sgl_hndl_avbl) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, + "BM_%d : In alloc_io_sgl_handle," + " io_sgl_alloc_index=%d\n", + phba->io_sgl_alloc_index); + + psgl_handle = phba->io_sgl_hndl_base[phba-> + io_sgl_alloc_index]; + phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; + phba->io_sgl_hndl_avbl--; + if (phba->io_sgl_alloc_index == (phba->params. + ios_per_ctrl - 1)) + phba->io_sgl_alloc_index = 0; + else + phba->io_sgl_alloc_index++; + } else + psgl_handle = NULL; + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); + return psgl_handle; +} + +static void +free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) +{ + unsigned long flags; + + spin_lock_irqsave(&phba->io_sgl_lock, flags); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, + "BM_%d : In free_,io_sgl_free_index=%d\n", + phba->io_sgl_free_index); + + if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { + /* + * this can happen if clean_task is called on a task that + * failed in xmit_task or alloc_pdu. + */ + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, + "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", + phba->io_sgl_free_index, + phba->io_sgl_hndl_base[phba->io_sgl_free_index]); + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); + return; + } + phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; + phba->io_sgl_hndl_avbl++; + if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) + phba->io_sgl_free_index = 0; + else + phba->io_sgl_free_index++; + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); +} + +static inline struct wrb_handle * +beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, + unsigned int wrbs_per_cxn) +{ + struct wrb_handle *pwrb_handle; + unsigned long flags; + + spin_lock_irqsave(&pwrb_context->wrb_lock, flags); + if (!pwrb_context->wrb_handles_available) { + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); + return NULL; + } + pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; + pwrb_context->wrb_handles_available--; + if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) + pwrb_context->alloc_index = 0; + else + pwrb_context->alloc_index++; + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); + + if (pwrb_handle) + memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); + + return pwrb_handle; +} + +/** + * alloc_wrb_handle - To allocate a wrb handle + * @phba: The hba pointer + * @cid: The cid to use for allocation + * @pcontext: ptr to ptr to wrb context + * + * This happens under session_lock until submission to chip + */ +struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, + struct hwi_wrb_context **pcontext) +{ + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + /* return the context address */ + *pcontext = pwrb_context; + return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); +} + +static inline void +beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, + struct wrb_handle *pwrb_handle, + unsigned int wrbs_per_cxn) +{ + unsigned long flags; + + spin_lock_irqsave(&pwrb_context->wrb_lock, flags); + pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; + pwrb_context->wrb_handles_available++; + if (pwrb_context->free_index == (wrbs_per_cxn - 1)) + pwrb_context->free_index = 0; + else + pwrb_context->free_index++; + pwrb_handle->pio_handle = NULL; + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); +} + +/** + * free_wrb_handle - To free the wrb handle back to pool + * @phba: The hba pointer + * @pwrb_context: The context to free from + * @pwrb_handle: The wrb_handle to free + * + * This happens under session_lock until submission to chip + */ +static void +free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, + struct wrb_handle *pwrb_handle) +{ + beiscsi_put_wrb_handle(pwrb_context, + pwrb_handle, + phba->params.wrbs_per_cxn); + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x " + "wrb_handles_available=%d\n", + pwrb_handle, pwrb_context->free_index, + pwrb_context->wrb_handles_available); +} + +static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) +{ + struct sgl_handle *psgl_handle; + unsigned long flags; + + spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); + if (phba->eh_sgl_hndl_avbl) { + psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; + phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", + phba->eh_sgl_alloc_index, + phba->eh_sgl_alloc_index); + + phba->eh_sgl_hndl_avbl--; + if (phba->eh_sgl_alloc_index == + (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - + 1)) + phba->eh_sgl_alloc_index = 0; + else + phba->eh_sgl_alloc_index++; + } else + psgl_handle = NULL; + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); + return psgl_handle; +} + +void +free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) +{ + unsigned long flags; + + spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BM_%d : In free_mgmt_sgl_handle," + "eh_sgl_free_index=%d\n", + phba->eh_sgl_free_index); + + if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { + /* + * this can happen if clean_task is called on a task that + * failed in xmit_task or alloc_pdu. + */ + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BM_%d : Double Free in eh SGL ," + "eh_sgl_free_index=%d\n", + phba->eh_sgl_free_index); + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); + return; + } + phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; + phba->eh_sgl_hndl_avbl++; + if (phba->eh_sgl_free_index == + (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) + phba->eh_sgl_free_index = 0; + else + phba->eh_sgl_free_index++; + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); +} + +static void +be_complete_io(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) +{ + struct beiscsi_io_task *io_task = task->dd_data; + struct be_status_bhs *sts_bhs = + (struct be_status_bhs *)io_task->cmd_bhs; + struct iscsi_conn *conn = beiscsi_conn->conn; + unsigned char *sense; + u32 resid = 0, exp_cmdsn, max_cmdsn; + u8 rsp, status, flags; + + exp_cmdsn = csol_cqe->exp_cmdsn; + max_cmdsn = (csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + rsp = csol_cqe->i_resp; + status = csol_cqe->i_sts; + flags = csol_cqe->i_flags; + resid = csol_cqe->res_cnt; + + if (!task->sc) { + if (io_task->scsi_cmnd) { + scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } + + return; + } + task->sc->result = (DID_OK << 16) | status; + if (rsp != ISCSI_STATUS_CMD_COMPLETED) { + task->sc->result = DID_ERROR << 16; + goto unmap; + } + + /* bidi not initially supported */ + if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { + if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) + task->sc->result = DID_ERROR << 16; + + if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { + scsi_set_resid(task->sc, resid); + if (!status && (scsi_bufflen(task->sc) - resid < + task->sc->underflow)) + task->sc->result = DID_ERROR << 16; + } + } + + if (status == SAM_STAT_CHECK_CONDITION) { + u16 sense_len; + unsigned short *slen = (unsigned short *)sts_bhs->sense_info; + + sense = sts_bhs->sense_info + sizeof(unsigned short); + sense_len = be16_to_cpu(*slen); + memcpy(task->sc->sense_buffer, sense, + min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); + } + + if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) + conn->rxdata_octets += resid; +unmap: + if (io_task->scsi_cmnd) { + scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } + iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); +} + +static void +be_complete_logout(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) +{ + struct iscsi_logout_rsp *hdr; + struct beiscsi_io_task *io_task = task->dd_data; + struct iscsi_conn *conn = beiscsi_conn->conn; + + hdr = (struct iscsi_logout_rsp *)task->hdr; + hdr->opcode = ISCSI_OP_LOGOUT_RSP; + hdr->t2wait = 5; + hdr->t2retain = 0; + hdr->flags = csol_cqe->i_flags; + hdr->response = csol_cqe->i_resp; + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + + hdr->dlength[0] = 0; + hdr->dlength[1] = 0; + hdr->dlength[2] = 0; + hdr->hlength = 0; + hdr->itt = io_task->libiscsi_itt; + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); +} + +static void +be_complete_tmf(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) +{ + struct iscsi_tm_rsp *hdr; + struct iscsi_conn *conn = beiscsi_conn->conn; + struct beiscsi_io_task *io_task = task->dd_data; + + hdr = (struct iscsi_tm_rsp *)task->hdr; + hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; + hdr->flags = csol_cqe->i_flags; + hdr->response = csol_cqe->i_resp; + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + + hdr->itt = io_task->libiscsi_itt; + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); +} + +static void +hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, + struct beiscsi_hba *phba, struct sol_cqe *psol) +{ + struct hwi_wrb_context *pwrb_context; + uint16_t wrb_index, cid, cri_index; + struct hwi_controller *phwi_ctrlr; + struct wrb_handle *pwrb_handle; + struct iscsi_session *session; + struct iscsi_task *task; + + phwi_ctrlr = phba->phwi_ctrlr; + if (is_chip_be2_be3r(phba)) { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + wrb_idx, psol); + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + cid, psol); + } else { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + wrb_idx, psol); + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + cid, psol); + } + + cri_index = BE_GET_CRI_FROM_CID(cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; + session = beiscsi_conn->conn->session; + spin_lock_bh(&session->back_lock); + task = pwrb_handle->pio_handle; + if (task) + __iscsi_put_task(task); + spin_unlock_bh(&session->back_lock); +} + +static void +be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) +{ + struct iscsi_nopin *hdr; + struct iscsi_conn *conn = beiscsi_conn->conn; + struct beiscsi_io_task *io_task = task->dd_data; + + hdr = (struct iscsi_nopin *)task->hdr; + hdr->flags = csol_cqe->i_flags; + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + + hdr->opcode = ISCSI_OP_NOOP_IN; + hdr->itt = io_task->libiscsi_itt; + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); +} + +static void adapter_get_sol_cqe(struct beiscsi_hba *phba, + struct sol_cqe *psol, + struct common_sol_cqe *csol_cqe) +{ + if (is_chip_be2_be3r(phba)) { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, + i_res_cnt, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + i_cmd_wnd, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, + hw_sts, psol); + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, + i_resp, psol); + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, + i_sts, psol); + csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, + i_flags, psol); + } else { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_res_cnt, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, + hw_sts, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_cmd_wnd, psol); + if (AMAP_GET_BITS(struct amap_sol_cqe_v2, + cmd_cmpl, psol)) + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_sts, psol); + else + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_sts, psol); + if (AMAP_GET_BITS(struct amap_sol_cqe_v2, + u, psol)) + csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; + + if (AMAP_GET_BITS(struct amap_sol_cqe_v2, + o, psol)) + csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; + } +} + + +static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, + struct beiscsi_hba *phba, struct sol_cqe *psol) +{ + struct iscsi_conn *conn = beiscsi_conn->conn; + struct iscsi_session *session = conn->session; + struct common_sol_cqe csol_cqe = {0}; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + struct wrb_handle *pwrb_handle; + struct iscsi_task *task; + uint16_t cri_index = 0; + uint8_t type; + + phwi_ctrlr = phba->phwi_ctrlr; + + /* Copy the elements to a common structure */ + adapter_get_sol_cqe(phba, psol, &csol_cqe); + + cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + pwrb_handle = pwrb_context->pwrb_handle_basestd[ + csol_cqe.wrb_index]; + + spin_lock_bh(&session->back_lock); + task = pwrb_handle->pio_handle; + if (!task) { + spin_unlock_bh(&session->back_lock); + return; + } + type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; + + switch (type) { + case HWH_TYPE_IO: + case HWH_TYPE_IO_RD: + if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == + ISCSI_OP_NOOP_OUT) + be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); + else + be_complete_io(beiscsi_conn, task, &csol_cqe); + break; + + case HWH_TYPE_LOGOUT: + if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) + be_complete_logout(beiscsi_conn, task, &csol_cqe); + else + be_complete_tmf(beiscsi_conn, task, &csol_cqe); + break; + + case HWH_TYPE_LOGIN: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" + " %s- Solicited path\n", __func__); + break; + + case HWH_TYPE_NOP: + be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); + break; + + default: + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d : In %s, unknown type = %d " + "wrb_index 0x%x CID 0x%x\n", __func__, type, + csol_cqe.wrb_index, + csol_cqe.cid); + break; + } + + spin_unlock_bh(&session->back_lock); +} + +/* + * ASYNC PDUs include + * a. Unsolicited NOP-In (target initiated NOP-In) + * b. ASYNC Messages + * c. Reject PDU + * d. Login response + * These headers arrive unprocessed by the EP firmware. + * iSCSI layer processes them. + */ +static unsigned int +beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, + struct pdu_base *phdr, void *pdata, unsigned int dlen) +{ + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct iscsi_conn *conn = beiscsi_conn->conn; + struct beiscsi_io_task *io_task; + struct iscsi_hdr *login_hdr; + struct iscsi_task *task; + u8 code; + + code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); + switch (code) { + case ISCSI_OP_NOOP_IN: + pdata = NULL; + dlen = 0; + break; + case ISCSI_OP_ASYNC_EVENT: + break; + case ISCSI_OP_REJECT: + WARN_ON(!pdata); + WARN_ON(!(dlen == 48)); + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d : In ISCSI_OP_REJECT\n"); + break; + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_TEXT_RSP: + task = conn->login_task; + io_task = task->dd_data; + login_hdr = (struct iscsi_hdr *)phdr; + login_hdr->itt = io_task->libiscsi_itt; + break; + default: + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : unrecognized async PDU opcode 0x%x\n", + code); + return 1; + } + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); + return 0; +} + +static inline void +beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, + struct hd_async_handle *pasync_handle) +{ + pasync_handle->is_final = 0; + pasync_handle->buffer_len = 0; + pasync_handle->in_use = 0; + list_del_init(&pasync_handle->link); +} + +static void +beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, + struct hd_async_context *pasync_ctx, + u16 cri) +{ + struct hd_async_handle *pasync_handle, *tmp_handle; + struct list_head *plist; + + plist = &pasync_ctx->async_entry[cri].wq.list; + list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) + beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); + + INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); + pasync_ctx->async_entry[cri].wq.hdr_len = 0; + pasync_ctx->async_entry[cri].wq.bytes_received = 0; + pasync_ctx->async_entry[cri].wq.bytes_needed = 0; +} + +static struct hd_async_handle * +beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, + struct hd_async_context *pasync_ctx, + struct i_t_dpdu_cqe *pdpdu_cqe, + u8 *header) +{ + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hd_async_handle *pasync_handle; + struct be_bus_address phys_addr; + u16 cid, code, ci, cri; + u8 final, error = 0; + u32 dpl; + + cid = beiscsi_conn->beiscsi_conn_cid; + cri = BE_GET_ASYNC_CRI_FROM_CID(cid); + /** + * This function is invoked to get the right async_handle structure + * from a given DEF PDU CQ entry. + * + * - index in CQ entry gives the vertical index + * - address in CQ entry is the offset where the DMA last ended + * - final - no more notifications for this PDU + */ + if (is_chip_be2_be3r(phba)) { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + dpl, pdpdu_cqe); + ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + index, pdpdu_cqe); + final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + final, pdpdu_cqe); + } else { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + dpl, pdpdu_cqe); + ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + index, pdpdu_cqe); + final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + final, pdpdu_cqe); + } + + /** + * DB addr Hi/Lo is same for BE and SKH. + * Subtract the dataplacementlength to get to the base. + */ + phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + db_addr_lo, pdpdu_cqe); + phys_addr.u.a32.address_lo -= dpl; + phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + db_addr_hi, pdpdu_cqe); + + code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); + switch (code) { + case UNSOL_HDR_NOTIFY: + pasync_handle = pasync_ctx->async_entry[ci].header; + *header = 1; + break; + case UNSOL_DATA_DIGEST_ERROR_NOTIFY: + error = 1; + fallthrough; + case UNSOL_DATA_NOTIFY: + pasync_handle = pasync_ctx->async_entry[ci].data; + break; + /* called only for above codes */ + default: + return NULL; + } + + if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || + pasync_handle->index != ci) { + /* driver bug - if ci does not match async handle index */ + error = 1; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, + "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", + cid, pasync_handle->is_header ? 'H' : 'D', + pasync_handle->pa.u.a64.address, + pasync_handle->index, + phys_addr.u.a64.address, ci); + /* FW has stale address - attempt continuing by dropping */ + } + + /** + * DEF PDU header and data buffers with errors should be simply + * dropped as there are no consumers for it. + */ + if (error) { + beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); + return NULL; + } + + if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, + "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", + cid, code, ci, phys_addr.u.a64.address); + beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); + } + + list_del_init(&pasync_handle->link); + /** + * Each CID is associated with unique CRI. + * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. + **/ + pasync_handle->cri = cri; + pasync_handle->is_final = final; + pasync_handle->buffer_len = dpl; + pasync_handle->in_use = 1; + + return pasync_handle; +} + +static unsigned int +beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, + struct hd_async_context *pasync_ctx, + u16 cri) +{ + struct iscsi_session *session = beiscsi_conn->conn->session; + struct hd_async_handle *pasync_handle, *plast_handle; + struct beiscsi_hba *phba = beiscsi_conn->phba; + void *phdr = NULL, *pdata = NULL; + u32 dlen = 0, status = 0; + struct list_head *plist; + + plist = &pasync_ctx->async_entry[cri].wq.list; + plast_handle = NULL; + list_for_each_entry(pasync_handle, plist, link) { + plast_handle = pasync_handle; + /* get the header, the first entry */ + if (!phdr) { + phdr = pasync_handle->pbuffer; + continue; + } + /* use first buffer to collect all the data */ + if (!pdata) { + pdata = pasync_handle->pbuffer; + dlen = pasync_handle->buffer_len; + continue; + } + if (!pasync_handle->buffer_len || + (dlen + pasync_handle->buffer_len) > + pasync_ctx->async_data.buffer_size) + break; + memcpy(pdata + dlen, pasync_handle->pbuffer, + pasync_handle->buffer_len); + dlen += pasync_handle->buffer_len; + } + + if (!plast_handle->is_final) { + /* last handle should have final PDU notification from FW */ + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, + "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", + beiscsi_conn->beiscsi_conn_cid, plast_handle, + AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), + pasync_ctx->async_entry[cri].wq.hdr_len, + pasync_ctx->async_entry[cri].wq.bytes_needed, + pasync_ctx->async_entry[cri].wq.bytes_received); + } + spin_lock_bh(&session->back_lock); + status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); + spin_unlock_bh(&session->back_lock); + beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); + return status; +} + +static unsigned int +beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, + struct hd_async_context *pasync_ctx, + struct hd_async_handle *pasync_handle) +{ + unsigned int bytes_needed = 0, status = 0; + u16 cri = pasync_handle->cri; + struct cri_wait_queue *wq; + struct beiscsi_hba *phba; + struct pdu_base *ppdu; + char *err = ""; + + phba = beiscsi_conn->phba; + wq = &pasync_ctx->async_entry[cri].wq; + if (pasync_handle->is_header) { + /* check if PDU hdr is rcv'd when old hdr not completed */ + if (wq->hdr_len) { + err = "incomplete"; + goto drop_pdu; + } + ppdu = pasync_handle->pbuffer; + bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, + data_len_hi, ppdu); + bytes_needed <<= 16; + bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, + data_len_lo, ppdu)); + wq->hdr_len = pasync_handle->buffer_len; + wq->bytes_received = 0; + wq->bytes_needed = bytes_needed; + list_add_tail(&pasync_handle->link, &wq->list); + if (!bytes_needed) + status = beiscsi_hdl_fwd_pdu(beiscsi_conn, + pasync_ctx, cri); + } else { + /* check if data received has header and is needed */ + if (!wq->hdr_len || !wq->bytes_needed) { + err = "header less"; + goto drop_pdu; + } + wq->bytes_received += pasync_handle->buffer_len; + /* Something got overwritten? Better catch it here. */ + if (wq->bytes_received > wq->bytes_needed) { + err = "overflow"; + goto drop_pdu; + } + list_add_tail(&pasync_handle->link, &wq->list); + if (wq->bytes_received == wq->bytes_needed) + status = beiscsi_hdl_fwd_pdu(beiscsi_conn, + pasync_ctx, cri); + } + return status; + +drop_pdu: + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, + "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", + beiscsi_conn->beiscsi_conn_cid, err, + pasync_handle->is_header ? 'H' : 'D', + wq->hdr_len, wq->bytes_needed, + pasync_handle->buffer_len); + /* discard this handle */ + beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); + /* free all the other handles in cri_wait_queue */ + beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); + /* try continuing */ + return status; +} + +static void +beiscsi_hdq_post_handles(struct beiscsi_hba *phba, + u8 header, u8 ulp_num, u16 nbuf) +{ + struct hd_async_handle *pasync_handle; + struct hd_async_context *pasync_ctx; + struct hwi_controller *phwi_ctrlr; + struct phys_addr *pasync_sge; + u32 ring_id, doorbell = 0; + u32 doorbell_offset; + u16 prod, pi; + + phwi_ctrlr = phba->phwi_ctrlr; + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); + if (header) { + pasync_sge = pasync_ctx->async_header.ring_base; + pi = pasync_ctx->async_header.pi; + ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. + doorbell_offset; + } else { + pasync_sge = pasync_ctx->async_data.ring_base; + pi = pasync_ctx->async_data.pi; + ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. + doorbell_offset; + } + + for (prod = 0; prod < nbuf; prod++) { + if (header) + pasync_handle = pasync_ctx->async_entry[pi].header; + else + pasync_handle = pasync_ctx->async_entry[pi].data; + WARN_ON(pasync_handle->is_header != header); + WARN_ON(pasync_handle->index != pi); + /* setup the ring only once */ + if (nbuf == pasync_ctx->num_entries) { + /* note hi is lo */ + pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; + pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; + } + if (++pi == pasync_ctx->num_entries) + pi = 0; + } + + if (header) + pasync_ctx->async_header.pi = pi; + else + pasync_ctx->async_data.pi = pi; + + doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; + doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; + doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; + doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; + iowrite32(doorbell, phba->db_va + doorbell_offset); +} + +static void +beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, + struct i_t_dpdu_cqe *pdpdu_cqe) +{ + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hd_async_handle *pasync_handle = NULL; + struct hd_async_context *pasync_ctx; + struct hwi_controller *phwi_ctrlr; + u8 ulp_num, consumed, header = 0; + u16 cid_cri; + + phwi_ctrlr = phba->phwi_ctrlr; + cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); + ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); + pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, + pdpdu_cqe, &header); + if (is_chip_be2_be3r(phba)) + consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + num_cons, pdpdu_cqe); + else + consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + num_cons, pdpdu_cqe); + if (pasync_handle) + beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); + /* num_cons indicates number of 8 RQEs consumed */ + if (consumed) + beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); +} + +void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) +{ + struct be_queue_info *mcc_cq; + struct be_mcc_compl *mcc_compl; + unsigned int num_processed = 0; + + mcc_cq = &phba->ctrl.mcc_obj.cq; + mcc_compl = queue_tail_node(mcc_cq); + mcc_compl->flags = le32_to_cpu(mcc_compl->flags); + while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { + if (beiscsi_hba_in_error(phba)) + return; + + if (num_processed >= 32) { + hwi_ring_cq_db(phba, mcc_cq->id, + num_processed, 0); + num_processed = 0; + } + if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { + beiscsi_process_async_event(phba, mcc_compl); + } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { + beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); + } + + mcc_compl->flags = 0; + queue_tail_inc(mcc_cq); + mcc_compl = queue_tail_node(mcc_cq); + mcc_compl->flags = le32_to_cpu(mcc_compl->flags); + num_processed++; + } + + if (num_processed > 0) + hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); +} + +static void beiscsi_mcc_work(struct work_struct *work) +{ + struct be_eq_obj *pbe_eq; + struct beiscsi_hba *phba; + + pbe_eq = container_of(work, struct be_eq_obj, mcc_work); + phba = pbe_eq->phba; + beiscsi_process_mcc_cq(phba); + /* rearm EQ for further interrupts */ + if (!beiscsi_hba_in_error(phba)) + hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); +} + +/** + * beiscsi_process_cq()- Process the Completion Queue + * @pbe_eq: Event Q on which the Completion has come + * @budget: Max number of events to processed + * + * return + * Number of Completion Entries processed. + **/ +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) +{ + struct be_queue_info *cq; + struct sol_cqe *sol; + unsigned int total = 0; + unsigned int num_processed = 0; + unsigned short code = 0, cid = 0; + uint16_t cri_index = 0; + struct beiscsi_conn *beiscsi_conn; + struct beiscsi_endpoint *beiscsi_ep; + struct iscsi_endpoint *ep; + struct beiscsi_hba *phba; + + cq = pbe_eq->cq; + sol = queue_tail_node(cq); + phba = pbe_eq->phba; + + while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & + CQE_VALID_MASK) { + if (beiscsi_hba_in_error(phba)) + return 0; + + be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); + + code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & + CQE_CODE_MASK); + + /* Get the CID */ + if (is_chip_be2_be3r(phba)) { + cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } else { + if ((code == DRIVERMSG_NOTIFY) || + (code == UNSOL_HDR_NOTIFY) || + (code == UNSOL_DATA_NOTIFY)) + cid = AMAP_GET_BITS( + struct amap_i_t_dpdu_cqe_v2, + cid, sol); + else + cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, + cid, sol); + } + + cri_index = BE_GET_CRI_FROM_CID(cid); + ep = phba->ep_array[cri_index]; + + if (ep == NULL) { + /* connection has already been freed + * just move on to next one + */ + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : proc cqe of disconn ep: cid %d\n", + cid); + goto proc_next_cqe; + } + + beiscsi_ep = ep->dd_data; + beiscsi_conn = beiscsi_ep->conn; + + /* replenish cq */ + if (num_processed == 32) { + hwi_ring_cq_db(phba, cq->id, 32, 0); + num_processed = 0; + } + total++; + + switch (code) { + case SOL_CMD_COMPLETE: + hwi_complete_cmd(beiscsi_conn, phba, sol); + break; + case DRIVERMSG_NOTIFY: + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Received %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + + hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); + break; + case UNSOL_HDR_NOTIFY: + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Received %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + + spin_lock_bh(&phba->async_pdu_lock); + beiscsi_hdq_process_compl(beiscsi_conn, + (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); + break; + case UNSOL_DATA_NOTIFY: + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d : Received %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + + spin_lock_bh(&phba->async_pdu_lock); + beiscsi_hdq_process_compl(beiscsi_conn, + (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); + break; + case CXN_INVALIDATE_INDEX_NOTIFY: + case CMD_INVALIDATED_NOTIFY: + case CXN_INVALIDATE_NOTIFY: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Ignoring %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + break; + case CXN_KILLED_HDR_DIGEST_ERR: + case SOL_CMD_KILLED_DATA_DIGEST_ERR: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d : Cmd Notification %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + break; + case CMD_KILLED_INVALID_STATSN_RCVD: + case CMD_KILLED_INVALID_R2T_RCVD: + case CMD_CXN_KILLED_LUN_INVALID: + case CMD_CXN_KILLED_ICD_INVALID: + case CMD_CXN_KILLED_ITT_INVALID: + case CMD_CXN_KILLED_SEQ_OUTOFORDER: + case CMD_CXN_KILLED_INVALID_DATASN_RCVD: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d : Cmd Notification %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + break; + case UNSOL_DATA_DIGEST_ERROR_NOTIFY: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", + cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); + /* driver consumes the entry and drops the contents */ + beiscsi_hdq_process_compl(beiscsi_conn, + (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); + break; + case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: + case CXN_KILLED_BURST_LEN_MISMATCH: + case CXN_KILLED_AHS_RCVD: + case CXN_KILLED_UNKNOWN_HDR: + case CXN_KILLED_STALE_ITT_TTT_RCVD: + case CXN_KILLED_INVALID_ITT_TTT_RCVD: + case CXN_KILLED_TIMED_OUT: + case CXN_KILLED_FIN_RCVD: + case CXN_KILLED_RST_SENT: + case CXN_KILLED_RST_RCVD: + case CXN_KILLED_BAD_UNSOL_PDU_RCVD: + case CXN_KILLED_BAD_WRB_INDEX_ERROR: + case CXN_KILLED_OVER_RUN_RESIDUAL: + case CXN_KILLED_UNDER_RUN_RESIDUAL: + case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Event %s[%d] received on CID : %d\n", + cqe_desc[code], code, cid); + if (beiscsi_conn) + iscsi_conn_failure(beiscsi_conn->conn, + ISCSI_ERR_CONN_FAILED); + break; + default: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n", + code, cid); + break; + } + +proc_next_cqe: + AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); + queue_tail_inc(cq); + sol = queue_tail_node(cq); + num_processed++; + if (total == budget) + break; + } + + hwi_ring_cq_db(phba, cq->id, num_processed, 1); + return total; +} + +static int be_iopoll(struct irq_poll *iop, int budget) +{ + unsigned int ret, io_events; + struct beiscsi_hba *phba; + struct be_eq_obj *pbe_eq; + struct be_eq_entry *eqe = NULL; + struct be_queue_info *eq; + + pbe_eq = container_of(iop, struct be_eq_obj, iopoll); + phba = pbe_eq->phba; + if (beiscsi_hba_in_error(phba)) { + irq_poll_complete(iop); + return 0; + } + + io_events = 0; + eq = &pbe_eq->q; + eqe = queue_tail_node(eq); + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & + EQE_VALID_MASK) { + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + io_events++; + } + hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); + + ret = beiscsi_process_cq(pbe_eq, budget); + pbe_eq->cq_count += ret; + if (ret < budget) { + irq_poll_complete(iop); + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", + pbe_eq->q.id, ret); + if (!beiscsi_hba_in_error(phba)) + hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); + } + return ret; +} + +static void +hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, + unsigned int num_sg, struct beiscsi_io_task *io_task) +{ + struct iscsi_sge *psgl; + unsigned int sg_len, index; + unsigned int sge_len = 0; + unsigned long long addr; + struct scatterlist *l_sg; + unsigned int offset; + + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, + io_task->bhs_pa.u.a32.address_lo); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, + io_task->bhs_pa.u.a32.address_hi); + + l_sg = sg; + for (index = 0; (index < num_sg) && (index < 2); index++, + sg = sg_next(sg)) { + if (index == 0) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge0_addr_lo, pwrb, + lower_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge0_addr_hi, pwrb, + upper_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge0_len, pwrb, + sg_len); + sge_len = sg_len; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, + pwrb, sge_len); + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge1_addr_lo, pwrb, + lower_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge1_addr_hi, pwrb, + upper_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge1_len, pwrb, + sg_len); + } + } + psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; + memset(psgl, 0, sizeof(*psgl) * BE2_SGE); + + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); + + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + io_task->bhs_pa.u.a32.address_hi); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + io_task->bhs_pa.u.a32.address_lo); + + if (num_sg == 1) { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, + 1); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, + 0); + } else if (num_sg == 2) { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, + 0); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, + 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, + 0); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, + 0); + } + + sg = l_sg; + psgl++; + psgl++; + offset = 0; + for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + lower_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + upper_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); + AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); + offset += sg_len; + } + psgl--; + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); +} + +static void +hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, + unsigned int num_sg, struct beiscsi_io_task *io_task) +{ + struct iscsi_sge *psgl; + unsigned int sg_len, index; + unsigned int sge_len = 0; + unsigned long long addr; + struct scatterlist *l_sg; + unsigned int offset; + + AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, + io_task->bhs_pa.u.a32.address_lo); + AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, + io_task->bhs_pa.u.a32.address_hi); + + l_sg = sg; + for (index = 0; (index < num_sg) && (index < 2); index++, + sg = sg_next(sg)) { + if (index == 0) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, + ((u32)(addr & 0xFFFFFFFF))); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, + ((u32)(addr >> 32))); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, + sg_len); + sge_len = sg_len; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, + pwrb, sge_len); + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, + ((u32)(addr & 0xFFFFFFFF))); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, + ((u32)(addr >> 32))); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, + sg_len); + } + } + psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; + memset(psgl, 0, sizeof(*psgl) * BE2_SGE); + + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); + + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + io_task->bhs_pa.u.a32.address_hi); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + io_task->bhs_pa.u.a32.address_lo); + + if (num_sg == 1) { + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, + 1); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, + 0); + } else if (num_sg == 2) { + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, + 0); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, + 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, + 0); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, + 0); + } + sg = l_sg; + psgl++; + psgl++; + offset = 0; + for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + (addr & 0xFFFFFFFF)); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + (addr >> 32)); + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); + AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); + offset += sg_len; + } + psgl--; + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); +} + +/** + * hwi_write_buffer()- Populate the WRB with task info + * @pwrb: ptr to the WRB entry + * @task: iscsi task which is to be executed + **/ +static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) +{ + struct iscsi_sge *psgl; + struct beiscsi_io_task *io_task = task->dd_data; + struct beiscsi_conn *beiscsi_conn = io_task->conn; + struct beiscsi_hba *phba = beiscsi_conn->phba; + uint8_t dsp_value = 0; + + io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; + AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, + io_task->bhs_pa.u.a32.address_lo); + AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, + io_task->bhs_pa.u.a32.address_hi); + + if (task->data) { + + /* Check for the data_count */ + dsp_value = (task->data_count) ? 1 : 0; + + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, + pwrb, dsp_value); + else + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, + pwrb, dsp_value); + + /* Map addr only if there is data_count */ + if (dsp_value) { + io_task->mtask_addr = dma_map_single(&phba->pcidev->dev, + task->data, + task->data_count, + DMA_TO_DEVICE); + if (dma_mapping_error(&phba->pcidev->dev, + io_task->mtask_addr)) + return -ENOMEM; + io_task->mtask_data_count = task->data_count; + } else + io_task->mtask_addr = 0; + + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, + lower_32_bits(io_task->mtask_addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, + upper_32_bits(io_task->mtask_addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, + task->data_count); + + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); + io_task->mtask_addr = 0; + } + + psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; + + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); + + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + io_task->bhs_pa.u.a32.address_hi); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + io_task->bhs_pa.u.a32.address_lo); + if (task->data) { + psgl++; + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); + AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); + AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); + + psgl++; + if (task->data) { + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + lower_32_bits(io_task->mtask_addr)); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + upper_32_bits(io_task->mtask_addr)); + } + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); + } + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); + return 0; +} + +/** + * beiscsi_find_mem_req()- Find mem needed + * @phba: ptr to HBA struct + **/ +static void beiscsi_find_mem_req(struct beiscsi_hba *phba) +{ + uint8_t mem_descr_index, ulp_num; + unsigned int num_async_pdu_buf_pages; + unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; + unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; + + phba->params.hwi_ws_sz = sizeof(struct hwi_controller); + + phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * + BE_ISCSI_PDU_HEADER_SIZE; + phba->mem_req[HWI_MEM_ADDN_CONTEXT] = + sizeof(struct hwi_context_memory); + + + phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) + * (phba->params.wrbs_per_cxn) + * phba->params.cxns_per_ctrl; + wrb_sz_per_cxn = sizeof(struct wrb_handle) * + (phba->params.wrbs_per_cxn); + phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * + phba->params.cxns_per_ctrl); + + phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * + phba->params.icds_per_ctrl; + phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * + phba->params.num_sge_per_io * phba->params.icds_per_ctrl; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + num_async_pdu_buf_sgl_pages = + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( + phba, ulp_num) * + sizeof(struct phys_addr)); + + num_async_pdu_buf_pages = + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( + phba, ulp_num) * + phba->params.defpdu_hdr_sz); + + num_async_pdu_data_pages = + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( + phba, ulp_num) * + phba->params.defpdu_data_sz); + + num_async_pdu_data_sgl_pages = + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( + phba, ulp_num) * + sizeof(struct phys_addr)); + + mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * + sizeof(struct hd_async_handle); + + mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * + sizeof(struct hd_async_handle); + + mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + sizeof(struct hd_async_context) + + (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * + sizeof(struct hd_async_entry)); + } + } +} + +static int beiscsi_alloc_mem(struct beiscsi_hba *phba) +{ + dma_addr_t bus_add; + struct hwi_controller *phwi_ctrlr; + struct be_mem_descriptor *mem_descr; + struct mem_array *mem_arr, *mem_arr_orig; + unsigned int i, j, alloc_size, curr_alloc_size; + + phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); + if (!phba->phwi_ctrlr) + return -ENOMEM; + + /* Allocate memory for wrb_context */ + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, + sizeof(struct hwi_wrb_context), + GFP_KERNEL); + if (!phwi_ctrlr->wrb_context) { + kfree(phba->phwi_ctrlr); + return -ENOMEM; + } + + phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), + GFP_KERNEL); + if (!phba->init_mem) { + kfree(phwi_ctrlr->wrb_context); + kfree(phba->phwi_ctrlr); + return -ENOMEM; + } + + mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, + sizeof(*mem_arr_orig), + GFP_KERNEL); + if (!mem_arr_orig) { + kfree(phba->init_mem); + kfree(phwi_ctrlr->wrb_context); + kfree(phba->phwi_ctrlr); + return -ENOMEM; + } + + mem_descr = phba->init_mem; + for (i = 0; i < SE_MEM_MAX; i++) { + if (!phba->mem_req[i]) { + mem_descr->mem_array = NULL; + mem_descr++; + continue; + } + + j = 0; + mem_arr = mem_arr_orig; + alloc_size = phba->mem_req[i]; + memset(mem_arr, 0, sizeof(struct mem_array) * + BEISCSI_MAX_FRAGS_INIT); + curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); + do { + mem_arr->virtual_address = + dma_alloc_coherent(&phba->pcidev->dev, + curr_alloc_size, &bus_add, GFP_KERNEL); + if (!mem_arr->virtual_address) { + if (curr_alloc_size <= BE_MIN_MEM_SIZE) + goto free_mem; + if (curr_alloc_size - + rounddown_pow_of_two(curr_alloc_size)) + curr_alloc_size = rounddown_pow_of_two + (curr_alloc_size); + else + curr_alloc_size = curr_alloc_size / 2; + } else { + mem_arr->bus_address.u. + a64.address = (__u64) bus_add; + mem_arr->size = curr_alloc_size; + alloc_size -= curr_alloc_size; + curr_alloc_size = min(be_max_phys_size * + 1024, alloc_size); + j++; + mem_arr++; + } + } while (alloc_size); + mem_descr->num_elements = j; + mem_descr->size_in_bytes = phba->mem_req[i]; + mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), + GFP_KERNEL); + if (!mem_descr->mem_array) + goto free_mem; + + memcpy(mem_descr->mem_array, mem_arr_orig, + sizeof(struct mem_array) * j); + mem_descr++; + } + kfree(mem_arr_orig); + return 0; +free_mem: + mem_descr->num_elements = j; + while ((i) || (j)) { + for (j = mem_descr->num_elements; j > 0; j--) { + dma_free_coherent(&phba->pcidev->dev, + mem_descr->mem_array[j - 1].size, + mem_descr->mem_array[j - 1]. + virtual_address, + (unsigned long)mem_descr-> + mem_array[j - 1]. + bus_address.u.a64.address); + } + if (i) { + i--; + kfree(mem_descr->mem_array); + mem_descr--; + } + } + kfree(mem_arr_orig); + kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); + kfree(phba->phwi_ctrlr); + return -ENOMEM; +} + +static int beiscsi_get_memory(struct beiscsi_hba *phba) +{ + beiscsi_find_mem_req(phba); + return beiscsi_alloc_mem(phba); +} + +static void iscsi_init_global_templates(struct beiscsi_hba *phba) +{ + struct pdu_data_out *pdata_out; + struct pdu_nop_out *pnop_out; + struct be_mem_descriptor *mem_descr; + + mem_descr = phba->init_mem; + mem_descr += ISCSI_MEM_GLOBAL_HEADER; + pdata_out = + (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; + memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); + + AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, + IIOC_SCSI_DATA); + + pnop_out = + (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. + virtual_address + BE_ISCSI_PDU_HEADER_SIZE); + + memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); + AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); + AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); + AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); +} + +static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; + struct hwi_context_memory *phwi_ctxt; + struct wrb_handle *pwrb_handle = NULL; + struct hwi_controller *phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; + struct iscsi_wrb *pwrb = NULL; + unsigned int num_cxn_wrbh = 0; + unsigned int num_cxn_wrb = 0, j, idx = 0, index; + + mem_descr_wrbh = phba->init_mem; + mem_descr_wrbh += HWI_MEM_WRBH; + + mem_descr_wrb = phba->init_mem; + mem_descr_wrb += HWI_MEM_WRB; + phwi_ctrlr = phba->phwi_ctrlr; + + /* Allocate memory for WRBQ */ + phwi_ctxt = phwi_ctrlr->phwi_ctxt; + phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, + sizeof(struct be_queue_info), + GFP_KERNEL); + if (!phwi_ctxt->be_wrbq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRBQ Mem Alloc Failed\n"); + return -ENOMEM; + } + + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { + pwrb_context = &phwi_ctrlr->wrb_context[index]; + pwrb_context->pwrb_handle_base = + kcalloc(phba->params.wrbs_per_cxn, + sizeof(struct wrb_handle *), + GFP_KERNEL); + if (!pwrb_context->pwrb_handle_base) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Mem Alloc Failed. Failing to load\n"); + goto init_wrb_hndl_failed; + } + pwrb_context->pwrb_handle_basestd = + kcalloc(phba->params.wrbs_per_cxn, + sizeof(struct wrb_handle *), + GFP_KERNEL); + if (!pwrb_context->pwrb_handle_basestd) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Mem Alloc Failed. Failing to load\n"); + goto init_wrb_hndl_failed; + } + if (!num_cxn_wrbh) { + pwrb_handle = + mem_descr_wrbh->mem_array[idx].virtual_address; + num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / + ((sizeof(struct wrb_handle)) * + phba->params.wrbs_per_cxn)); + idx++; + } + pwrb_context->alloc_index = 0; + pwrb_context->wrb_handles_available = 0; + pwrb_context->free_index = 0; + + if (num_cxn_wrbh) { + for (j = 0; j < phba->params.wrbs_per_cxn; j++) { + pwrb_context->pwrb_handle_base[j] = pwrb_handle; + pwrb_context->pwrb_handle_basestd[j] = + pwrb_handle; + pwrb_context->wrb_handles_available++; + pwrb_handle->wrb_index = j; + pwrb_handle++; + } + num_cxn_wrbh--; + } + spin_lock_init(&pwrb_context->wrb_lock); + } + idx = 0; + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { + pwrb_context = &phwi_ctrlr->wrb_context[index]; + if (!num_cxn_wrb) { + pwrb = mem_descr_wrb->mem_array[idx].virtual_address; + num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / + ((sizeof(struct iscsi_wrb) * + phba->params.wrbs_per_cxn)); + idx++; + } + + if (num_cxn_wrb) { + for (j = 0; j < phba->params.wrbs_per_cxn; j++) { + pwrb_handle = pwrb_context->pwrb_handle_base[j]; + pwrb_handle->pwrb = pwrb; + pwrb++; + } + num_cxn_wrb--; + } + } + return 0; +init_wrb_hndl_failed: + for (j = index; j > 0; j--) { + pwrb_context = &phwi_ctrlr->wrb_context[j]; + kfree(pwrb_context->pwrb_handle_base); + kfree(pwrb_context->pwrb_handle_basestd); + } + kfree(phwi_ctxt->be_wrbq); + return -ENOMEM; +} + +static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) +{ + uint8_t ulp_num; + struct hwi_controller *phwi_ctrlr; + struct hba_parameters *p = &phba->params; + struct hd_async_context *pasync_ctx; + struct hd_async_handle *pasync_header_h, *pasync_data_h; + unsigned int index, idx, num_per_mem, num_async_data; + struct be_mem_descriptor *mem_descr; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + /* get async_ctx for each ULP */ + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = + (struct hd_async_context *) + mem_descr->mem_array[0].virtual_address; + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; + memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + + pasync_ctx->async_entry = + (struct hd_async_entry *) + ((long unsigned int)pasync_ctx + + sizeof(struct hd_async_context)); + + pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, + ulp_num); + /* setup header buffers */ + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.pi = 0; + pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; + pasync_ctx->async_header.va_base = + mem_descr->mem_array[0].virtual_address; + + pasync_ctx->async_header.pa_base.u.a64.address = + mem_descr->mem_array[0]. + bus_address.u.a64.address; + + /* setup header buffer sgls */ + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.ring_base = + mem_descr->mem_array[0].virtual_address; + + /* setup header buffer handles */ + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.handle_base = + mem_descr->mem_array[0].virtual_address; + + /* setup data buffer sgls */ + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_data.ring_base = + mem_descr->mem_array[0].virtual_address; + + /* setup data buffer handles */ + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (!mem_descr->mem_array[0].virtual_address) + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_data.handle_base = + mem_descr->mem_array[0].virtual_address; + + pasync_header_h = + (struct hd_async_handle *) + pasync_ctx->async_header.handle_base; + pasync_data_h = + (struct hd_async_handle *) + pasync_ctx->async_data.handle_base; + + /* setup data buffers */ + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + idx = 0; + pasync_ctx->async_data.pi = 0; + pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; + pasync_ctx->async_data.va_base = + mem_descr->mem_array[idx].virtual_address; + pasync_ctx->async_data.pa_base.u.a64.address = + mem_descr->mem_array[idx]. + bus_address.u.a64.address; + + num_async_data = ((mem_descr->mem_array[idx].size) / + phba->params.defpdu_data_sz); + num_per_mem = 0; + + for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE + (phba, ulp_num); index++) { + pasync_header_h->cri = -1; + pasync_header_h->is_header = 1; + pasync_header_h->index = index; + INIT_LIST_HEAD(&pasync_header_h->link); + pasync_header_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx-> + async_header.va_base) + + (p->defpdu_hdr_sz * index)); + + pasync_header_h->pa.u.a64.address = + pasync_ctx->async_header.pa_base.u.a64. + address + (p->defpdu_hdr_sz * index); + + pasync_ctx->async_entry[index].header = + pasync_header_h; + pasync_header_h++; + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + wq.list); + + pasync_data_h->cri = -1; + pasync_data_h->is_header = 0; + pasync_data_h->index = index; + INIT_LIST_HEAD(&pasync_data_h->link); + + if (!num_async_data) { + num_per_mem = 0; + idx++; + pasync_ctx->async_data.va_base = + mem_descr->mem_array[idx]. + virtual_address; + pasync_ctx->async_data.pa_base.u. + a64.address = + mem_descr->mem_array[idx]. + bus_address.u.a64.address; + num_async_data = + ((mem_descr->mem_array[idx]. + size) / + phba->params.defpdu_data_sz); + } + pasync_data_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx->async_data.va_base) + + (p->defpdu_data_sz * num_per_mem)); + + pasync_data_h->pa.u.a64.address = + pasync_ctx->async_data.pa_base.u.a64. + address + (p->defpdu_data_sz * + num_per_mem); + num_per_mem++; + num_async_data--; + + pasync_ctx->async_entry[index].data = + pasync_data_h; + pasync_data_h++; + } + } + } + + return 0; +} + +static int +be_sgl_create_contiguous(void *virtual_address, + u64 physical_address, u32 length, + struct be_dma_mem *sgl) +{ + WARN_ON(!virtual_address); + WARN_ON(!physical_address); + WARN_ON(!length); + WARN_ON(!sgl); + + sgl->va = virtual_address; + sgl->dma = (unsigned long)physical_address; + sgl->size = length; + + return 0; +} + +static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) +{ + memset(sgl, 0, sizeof(*sgl)); +} + +static void +hwi_build_be_sgl_arr(struct beiscsi_hba *phba, + struct mem_array *pmem, struct be_dma_mem *sgl) +{ + if (sgl->va) + be_sgl_destroy_contiguous(sgl); + + be_sgl_create_contiguous(pmem->virtual_address, + pmem->bus_address.u.a64.address, + pmem->size, sgl); +} + +static void +hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, + struct mem_array *pmem, struct be_dma_mem *sgl) +{ + if (sgl->va) + be_sgl_destroy_contiguous(sgl); + + be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, + pmem->bus_address.u.a64.address, + pmem->size, sgl); +} + +static int be_fill_queue(struct be_queue_info *q, + u16 len, u16 entry_size, void *vaddress) +{ + struct be_dma_mem *mem = &q->dma_mem; + + memset(q, 0, sizeof(*q)); + q->len = len; + q->entry_size = entry_size; + mem->size = len * entry_size; + mem->va = vaddress; + if (!mem->va) + return -ENOMEM; + memset(mem->va, 0, mem->size); + return 0; +} + +static int beiscsi_create_eqs(struct beiscsi_hba *phba, + struct hwi_context_memory *phwi_context) +{ + int ret = -ENOMEM, eq_for_mcc; + unsigned int i, num_eq_pages; + struct be_queue_info *eq; + struct be_dma_mem *mem; + void *eq_vaddress; + dma_addr_t paddr; + + num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * + sizeof(struct be_eq_entry)); + + if (phba->pcidev->msix_enabled) + eq_for_mcc = 1; + else + eq_for_mcc = 0; + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { + eq = &phwi_context->be_eq[i].q; + mem = &eq->dma_mem; + phwi_context->be_eq[i].phba = phba; + eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, + num_eq_pages * PAGE_SIZE, + &paddr, GFP_KERNEL); + if (!eq_vaddress) { + ret = -ENOMEM; + goto create_eq_error; + } + + mem->va = eq_vaddress; + ret = be_fill_queue(eq, phba->params.num_eq_entries, + sizeof(struct be_eq_entry), eq_vaddress); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : be_fill_queue Failed for EQ\n"); + goto create_eq_error; + } + + mem->dma = paddr; + ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, + BEISCSI_EQ_DELAY_DEF); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n"); + goto create_eq_error; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : eqid = %d\n", + phwi_context->be_eq[i].q.id); + } + return 0; + +create_eq_error: + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { + eq = &phwi_context->be_eq[i].q; + mem = &eq->dma_mem; + if (mem->va) + dma_free_coherent(&phba->pcidev->dev, num_eq_pages + * PAGE_SIZE, + mem->va, mem->dma); + } + return ret; +} + +static int beiscsi_create_cqs(struct beiscsi_hba *phba, + struct hwi_context_memory *phwi_context) +{ + unsigned int i, num_cq_pages; + struct be_queue_info *cq, *eq; + struct be_dma_mem *mem; + struct be_eq_obj *pbe_eq; + void *cq_vaddress; + int ret = -ENOMEM; + dma_addr_t paddr; + + num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * + sizeof(struct sol_cqe)); + + for (i = 0; i < phba->num_cpus; i++) { + cq = &phwi_context->be_cq[i]; + eq = &phwi_context->be_eq[i].q; + pbe_eq = &phwi_context->be_eq[i]; + pbe_eq->cq = cq; + pbe_eq->phba = phba; + mem = &cq->dma_mem; + cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, + num_cq_pages * PAGE_SIZE, + &paddr, GFP_KERNEL); + if (!cq_vaddress) { + ret = -ENOMEM; + goto create_cq_error; + } + + ret = be_fill_queue(cq, phba->params.num_cq_entries, + sizeof(struct sol_cqe), cq_vaddress); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : be_fill_queue Failed for ISCSI CQ\n"); + goto create_cq_error; + } + + mem->dma = paddr; + ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, + false, 0); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n"); + goto create_cq_error; + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : iscsi cq_id is %d for eq_id %d\n" + "iSCSI CQ CREATED\n", cq->id, eq->id); + } + return 0; + +create_cq_error: + for (i = 0; i < phba->num_cpus; i++) { + cq = &phwi_context->be_cq[i]; + mem = &cq->dma_mem; + if (mem->va) + dma_free_coherent(&phba->pcidev->dev, num_cq_pages + * PAGE_SIZE, + mem->va, mem->dma); + } + return ret; +} + +static int +beiscsi_create_def_hdr(struct beiscsi_hba *phba, + struct hwi_context_memory *phwi_context, + struct hwi_controller *phwi_ctrlr, + unsigned int def_pdu_ring_sz, uint8_t ulp_num) +{ + unsigned int idx; + int ret; + struct be_queue_info *dq, *cq; + struct be_dma_mem *mem; + struct be_mem_descriptor *mem_descr; + void *dq_vaddress; + + idx = 0; + dq = &phwi_context->be_def_hdrq[ulp_num]; + cq = &phwi_context->be_cq[0]; + mem = &dq->dma_mem; + mem_descr = phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + dq_vaddress = mem_descr->mem_array[idx].virtual_address; + ret = be_fill_queue(dq, mem_descr->mem_array[0].size / + sizeof(struct phys_addr), + sizeof(struct phys_addr), dq_vaddress); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", + ulp_num); + + return ret; + } + mem->dma = (unsigned long)mem_descr->mem_array[idx]. + bus_address.u.a64.address; + ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, + def_pdu_ring_sz, + phba->params.defpdu_hdr_sz, + BEISCSI_DEFQ_HDR, ulp_num); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", + ulp_num); + + return ret; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_hdrq[ulp_num].id); + return 0; +} + +static int +beiscsi_create_def_data(struct beiscsi_hba *phba, + struct hwi_context_memory *phwi_context, + struct hwi_controller *phwi_ctrlr, + unsigned int def_pdu_ring_sz, uint8_t ulp_num) +{ + unsigned int idx; + int ret; + struct be_queue_info *dataq, *cq; + struct be_dma_mem *mem; + struct be_mem_descriptor *mem_descr; + void *dq_vaddress; + + idx = 0; + dataq = &phwi_context->be_def_dataq[ulp_num]; + cq = &phwi_context->be_cq[0]; + mem = &dataq->dma_mem; + mem_descr = phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + dq_vaddress = mem_descr->mem_array[idx].virtual_address; + ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / + sizeof(struct phys_addr), + sizeof(struct phys_addr), dq_vaddress); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : be_fill_queue Failed for DEF PDU " + "DATA on ULP : %d\n", + ulp_num); + + return ret; + } + mem->dma = (unsigned long)mem_descr->mem_array[idx]. + bus_address.u.a64.address; + ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, + def_pdu_ring_sz, + phba->params.defpdu_data_sz, + BEISCSI_DEFQ_DATA, ulp_num); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d be_cmd_create_default_pdu_queue" + " Failed for DEF PDU DATA on ULP : %d\n", + ulp_num); + return ret; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : iscsi def data id on ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_dataq[ulp_num].id); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n", + ulp_num); + return 0; +} + + +static int +beiscsi_post_template_hdr(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr; + struct mem_array *pm_arr; + struct be_dma_mem sgl; + int status, ulp_num; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + pm_arr = mem_descr->mem_array; + + hwi_build_be_sgl_arr(phba, pm_arr, &sgl); + status = be_cmd_iscsi_post_template_hdr( + &phba->ctrl, &sgl); + + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Post Template HDR Failed for " + "ULP_%d\n", ulp_num); + return status; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Template HDR Pages Posted for " + "ULP_%d\n", ulp_num); + } + } + return 0; +} + +static int +beiscsi_post_pages(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr; + struct mem_array *pm_arr; + unsigned int page_offset, i; + struct be_dma_mem sgl; + int status, ulp_num = 0; + + mem_descr = phba->init_mem; + mem_descr += HWI_MEM_SGE; + pm_arr = mem_descr->mem_array; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + + page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * + phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; + for (i = 0; i < mem_descr->num_elements; i++) { + hwi_build_be_sgl_arr(phba, pm_arr, &sgl); + status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, + page_offset, + (pm_arr->size / PAGE_SIZE)); + page_offset += pm_arr->size / PAGE_SIZE; + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : post sgl failed.\n"); + return status; + } + pm_arr++; + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : POSTED PAGES\n"); + return 0; +} + +static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) +{ + struct be_dma_mem *mem = &q->dma_mem; + if (mem->va) { + dma_free_coherent(&phba->pcidev->dev, mem->size, + mem->va, mem->dma); + mem->va = NULL; + } +} + +static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, + u16 len, u16 entry_size) +{ + struct be_dma_mem *mem = &q->dma_mem; + + memset(q, 0, sizeof(*q)); + q->len = len; + q->entry_size = entry_size; + mem->size = len * entry_size; + mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, + GFP_KERNEL); + if (!mem->va) + return -ENOMEM; + return 0; +} + +static int +beiscsi_create_wrb_rings(struct beiscsi_hba *phba, + struct hwi_context_memory *phwi_context, + struct hwi_controller *phwi_ctrlr) +{ + unsigned int num_wrb_rings; + u64 pa_addr_lo; + unsigned int idx, num, i, ulp_num; + struct mem_array *pwrb_arr; + void *wrb_vaddr; + struct be_dma_mem sgl; + struct be_mem_descriptor *mem_descr; + struct hwi_wrb_context *pwrb_context; + int status; + uint8_t ulp_count = 0, ulp_base_num = 0; + uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; + + idx = 0; + mem_descr = phba->init_mem; + mem_descr += HWI_MEM_WRB; + pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, + sizeof(*pwrb_arr), + GFP_KERNEL); + if (!pwrb_arr) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Memory alloc failed in create wrb ring.\n"); + return -ENOMEM; + } + wrb_vaddr = mem_descr->mem_array[idx].virtual_address; + pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; + num_wrb_rings = mem_descr->mem_array[idx].size / + (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); + + for (num = 0; num < phba->params.cxns_per_ctrl; num++) { + if (num_wrb_rings) { + pwrb_arr[num].virtual_address = wrb_vaddr; + pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; + pwrb_arr[num].size = phba->params.wrbs_per_cxn * + sizeof(struct iscsi_wrb); + wrb_vaddr += pwrb_arr[num].size; + pa_addr_lo += pwrb_arr[num].size; + num_wrb_rings--; + } else { + idx++; + wrb_vaddr = mem_descr->mem_array[idx].virtual_address; + pa_addr_lo = mem_descr->mem_array[idx]. + bus_address.u.a64.address; + num_wrb_rings = mem_descr->mem_array[idx].size / + (phba->params.wrbs_per_cxn * + sizeof(struct iscsi_wrb)); + pwrb_arr[num].virtual_address = wrb_vaddr; + pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; + pwrb_arr[num].size = phba->params.wrbs_per_cxn * + sizeof(struct iscsi_wrb); + wrb_vaddr += pwrb_arr[num].size; + pa_addr_lo += pwrb_arr[num].size; + num_wrb_rings--; + } + } + + /* Get the ULP Count */ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + ulp_count++; + ulp_base_num = ulp_num; + cid_count_ulp[ulp_num] = + BEISCSI_GET_CID_COUNT(phba, ulp_num); + } + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { + if (ulp_count > 1) { + ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; + + if (!cid_count_ulp[ulp_base_num]) + ulp_base_num = (ulp_base_num + 1) % + BEISCSI_ULP_COUNT; + + cid_count_ulp[ulp_base_num]--; + } + + + hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); + status = be_cmd_wrbq_create(&phba->ctrl, &sgl, + &phwi_context->be_wrbq[i], + &phwi_ctrlr->wrb_context[i], + ulp_base_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : wrbq create failed."); + kfree(pwrb_arr); + return status; + } + pwrb_context = &phwi_ctrlr->wrb_context[i]; + BE_SET_CID_TO_CRI(i, pwrb_context->cid); + } + kfree(pwrb_arr); + return 0; +} + +static void free_wrb_handles(struct beiscsi_hba *phba) +{ + unsigned int index; + struct hwi_controller *phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; + + phwi_ctrlr = phba->phwi_ctrlr; + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { + pwrb_context = &phwi_ctrlr->wrb_context[index]; + kfree(pwrb_context->pwrb_handle_base); + kfree(pwrb_context->pwrb_handle_basestd); + } +} + +static void be_mcc_queues_destroy(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_dma_mem *ptag_mem; + struct be_queue_info *q; + int i, tag; + + q = &phba->ctrl.mcc_obj.q; + for (i = 0; i < MAX_MCC_CMD; i++) { + tag = i + 1; + if (!test_bit(MCC_TAG_STATE_RUNNING, + &ctrl->ptag_state[tag].tag_state)) + continue; + + if (test_bit(MCC_TAG_STATE_TIMEOUT, + &ctrl->ptag_state[tag].tag_state)) { + ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; + if (ptag_mem->size) { + dma_free_coherent(&ctrl->pdev->dev, + ptag_mem->size, + ptag_mem->va, + ptag_mem->dma); + ptag_mem->size = 0; + } + continue; + } + /** + * If MCC is still active and waiting then wake up the process. + * We are here only because port is going offline. The process + * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is + * returned for the operation and allocated memory cleaned up. + */ + if (waitqueue_active(&ctrl->mcc_wait[tag])) { + ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; + ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; + wake_up_interruptible(&ctrl->mcc_wait[tag]); + /* + * Control tag info gets reinitialized in enable + * so wait for the process to clear running state. + */ + while (test_bit(MCC_TAG_STATE_RUNNING, + &ctrl->ptag_state[tag].tag_state)) + schedule_timeout_uninterruptible(HZ); + } + /** + * For MCC with tag_states MCC_TAG_STATE_ASYNC and + * MCC_TAG_STATE_IGNORE nothing needs to done. + */ + } + if (q->created) { + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); + be_queue_free(phba, q); + } + + q = &phba->ctrl.mcc_obj.cq; + if (q->created) { + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); + be_queue_free(phba, q); + } +} + +static int be_mcc_queues_create(struct beiscsi_hba *phba, + struct hwi_context_memory *phwi_context) +{ + struct be_queue_info *q, *cq; + struct be_ctrl_info *ctrl = &phba->ctrl; + + /* Alloc MCC compl queue */ + cq = &phba->ctrl.mcc_obj.cq; + if (be_queue_alloc(phba, cq, MCC_CQ_LEN, + sizeof(struct be_mcc_compl))) + goto err; + /* Ask BE to create MCC compl queue; */ + if (phba->pcidev->msix_enabled) { + if (beiscsi_cmd_cq_create(ctrl, cq, + &phwi_context->be_eq[phba->num_cpus].q, + false, true, 0)) + goto mcc_cq_free; + } else { + if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, + false, true, 0)) + goto mcc_cq_free; + } + + /* Alloc MCC queue */ + q = &phba->ctrl.mcc_obj.q; + if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) + goto mcc_cq_destroy; + + /* Ask BE to create MCC queue */ + if (beiscsi_cmd_mccq_create(phba, q, cq)) + goto mcc_q_free; + + return 0; + +mcc_q_free: + be_queue_free(phba, q); +mcc_cq_destroy: + beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); +mcc_cq_free: + be_queue_free(phba, cq); +err: + return -ENOMEM; +} + +static void be2iscsi_enable_msix(struct beiscsi_hba *phba) +{ + int nvec = 1; + + switch (phba->generation) { + case BE_GEN2: + case BE_GEN3: + nvec = BEISCSI_MAX_NUM_CPUS + 1; + break; + case BE_GEN4: + nvec = phba->fw_config.eqid_count; + break; + default: + nvec = 2; + break; + } + + /* if eqid_count == 1 fall back to INTX */ + if (enable_msix && nvec > 1) { + struct irq_affinity desc = { .post_vectors = 1 }; + + if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { + phba->num_cpus = nvec - 1; + return; + } + } + + phba->num_cpus = 1; +} + +static void hwi_purge_eq(struct beiscsi_hba *phba) +{ + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + struct be_queue_info *eq; + struct be_eq_entry *eqe = NULL; + int i, eq_msix; + unsigned int num_processed; + + if (beiscsi_hba_in_error(phba)) + return; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + if (phba->pcidev->msix_enabled) + eq_msix = 1; + else + eq_msix = 0; + + for (i = 0; i < (phba->num_cpus + eq_msix); i++) { + eq = &phwi_context->be_eq[i].q; + eqe = queue_tail_node(eq); + num_processed = 0; + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_processed++; + } + + if (num_processed) + hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); + } +} + +static void hwi_cleanup_port(struct beiscsi_hba *phba) +{ + struct be_queue_info *q; + struct be_ctrl_info *ctrl = &phba->ctrl; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + int i, eq_for_mcc, ulp_num; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + beiscsi_cmd_iscsi_cleanup(phba, ulp_num); + + /** + * Purge all EQ entries that may have been left out. This is to + * workaround a problem we've seen occasionally where driver gets an + * interrupt with EQ entry bit set after stopping the controller. + */ + hwi_purge_eq(phba); + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + be_cmd_iscsi_remove_template_hdr(ctrl); + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { + q = &phwi_context->be_wrbq[i]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); + } + kfree(phwi_context->be_wrbq); + free_wrb_handles(phba); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + q = &phwi_context->be_def_hdrq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + + q = &phwi_context->be_def_dataq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + } + } + + beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); + + for (i = 0; i < (phba->num_cpus); i++) { + q = &phwi_context->be_cq[i]; + if (q->created) { + be_queue_free(phba, q); + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); + } + } + + be_mcc_queues_destroy(phba); + if (phba->pcidev->msix_enabled) + eq_for_mcc = 1; + else + eq_for_mcc = 0; + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { + q = &phwi_context->be_eq[i].q; + if (q->created) { + be_queue_free(phba, q); + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); + } + } + /* this ensures complete FW cleanup */ + beiscsi_cmd_function_reset(phba); + /* last communication, indicate driver is unloading */ + beiscsi_cmd_special_wrb(&phba->ctrl, 0); +} + +static int hwi_init_port(struct beiscsi_hba *phba) +{ + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + unsigned int def_pdu_ring_sz; + struct be_ctrl_info *ctrl = &phba->ctrl; + int status, ulp_num; + u16 nbufs; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + /* set port optic state to unknown */ + phba->optic_state = 0xff; + + status = beiscsi_create_eqs(phba, phwi_context); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EQ not created\n"); + goto error; + } + + status = be_mcc_queues_create(phba, phwi_context); + if (status != 0) + goto error; + + status = beiscsi_check_supported_fw(ctrl, phba); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Unsupported fw version\n"); + goto error; + } + + status = beiscsi_create_cqs(phba, phwi_context); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : CQ not created\n"); + goto error; + } + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; + def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); + + status = beiscsi_create_def_hdr(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Header not created for ULP : %d\n", + ulp_num); + goto error; + } + + status = beiscsi_create_def_data(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Data not created for ULP : %d\n", + ulp_num); + goto error; + } + /** + * Now that the default PDU rings have been created, + * let EP know about it. + */ + beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, + ulp_num, nbufs); + beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, + ulp_num, nbufs); + } + } + + status = beiscsi_post_pages(phba); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Post SGL Pages Failed\n"); + goto error; + } + + status = beiscsi_post_template_hdr(phba); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Template HDR Posting for CXN Failed\n"); + } + + status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRB Rings not created\n"); + goto error; + } + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint16_t async_arr_idx = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + uint16_t cri = 0; + struct hd_async_context *pasync_ctx; + + pasync_ctx = HWI_GET_ASYNC_PDU_CTX( + phwi_ctrlr, ulp_num); + for (cri = 0; cri < + phba->params.cxns_per_ctrl; cri++) { + if (ulp_num == BEISCSI_GET_ULP_FROM_CRI + (phwi_ctrlr, cri)) + pasync_ctx->cid_to_async_cri_map[ + phwi_ctrlr->wrb_context[cri].cid] = + async_arr_idx++; + } + } + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_port success\n"); + return 0; + +error: + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_port failed"); + hwi_cleanup_port(phba); + return status; +} + +static int hwi_init_controller(struct beiscsi_hba *phba) +{ + struct hwi_controller *phwi_ctrlr; + + phwi_ctrlr = phba->phwi_ctrlr; + if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { + phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> + init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", + phwi_ctrlr->phwi_ctxt); + } else { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : HWI_MEM_ADDN_CONTEXT is more " + "than one element.Failing to load\n"); + return -ENOMEM; + } + + iscsi_init_global_templates(phba); + if (beiscsi_init_wrb_handle(phba)) + return -ENOMEM; + + if (hwi_init_async_pdu_ctx(phba)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx failed\n"); + return -ENOMEM; + } + + if (hwi_init_port(phba) != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_controller failed\n"); + + return -ENOMEM; + } + return 0; +} + +static void beiscsi_free_mem(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr; + int i, j; + + mem_descr = phba->init_mem; + for (i = 0; i < SE_MEM_MAX; i++) { + for (j = mem_descr->num_elements; j > 0; j--) { + dma_free_coherent(&phba->pcidev->dev, + mem_descr->mem_array[j - 1].size, + mem_descr->mem_array[j - 1].virtual_address, + (unsigned long)mem_descr->mem_array[j - 1]. + bus_address.u.a64.address); + } + + kfree(mem_descr->mem_array); + mem_descr++; + } + kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); + kfree(phba->phwi_ctrlr); +} + +static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; + struct sgl_handle *psgl_handle; + struct iscsi_sge *pfrag; + unsigned int arr_index, i, idx; + unsigned int ulp_icd_start, ulp_num = 0; + + phba->io_sgl_hndl_avbl = 0; + phba->eh_sgl_hndl_avbl = 0; + + mem_descr_sglh = phba->init_mem; + mem_descr_sglh += HWI_MEM_SGLH; + if (1 == mem_descr_sglh->num_elements) { + phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, + sizeof(struct sgl_handle *), + GFP_KERNEL); + if (!phba->io_sgl_hndl_base) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Mem Alloc Failed. Failing to load\n"); + return -ENOMEM; + } + phba->eh_sgl_hndl_base = + kcalloc(phba->params.icds_per_ctrl - + phba->params.ios_per_ctrl, + sizeof(struct sgl_handle *), GFP_KERNEL); + if (!phba->eh_sgl_hndl_base) { + kfree(phba->io_sgl_hndl_base); + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Mem Alloc Failed. Failing to load\n"); + return -ENOMEM; + } + } else { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : HWI_MEM_SGLH is more than one element." + "Failing to load\n"); + return -ENOMEM; + } + + arr_index = 0; + idx = 0; + while (idx < mem_descr_sglh->num_elements) { + psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; + + for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / + sizeof(struct sgl_handle)); i++) { + if (arr_index < phba->params.ios_per_ctrl) { + phba->io_sgl_hndl_base[arr_index] = psgl_handle; + phba->io_sgl_hndl_avbl++; + arr_index++; + } else { + phba->eh_sgl_hndl_base[arr_index - + phba->params.ios_per_ctrl] = + psgl_handle; + arr_index++; + phba->eh_sgl_hndl_avbl++; + } + psgl_handle++; + } + idx++; + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : phba->io_sgl_hndl_avbl=%d " + "phba->eh_sgl_hndl_avbl=%d\n", + phba->io_sgl_hndl_avbl, + phba->eh_sgl_hndl_avbl); + + mem_descr_sg = phba->init_mem; + mem_descr_sg += HWI_MEM_SGE; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "\n BM_%d : mem_descr_sg->num_elements=%d\n", + mem_descr_sg->num_elements); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + + ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + + arr_index = 0; + idx = 0; + while (idx < mem_descr_sg->num_elements) { + pfrag = mem_descr_sg->mem_array[idx].virtual_address; + + for (i = 0; + i < (mem_descr_sg->mem_array[idx].size) / + (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); + i++) { + if (arr_index < phba->params.ios_per_ctrl) + psgl_handle = phba->io_sgl_hndl_base[arr_index]; + else + psgl_handle = phba->eh_sgl_hndl_base[arr_index - + phba->params.ios_per_ctrl]; + psgl_handle->pfrag = pfrag; + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); + pfrag += phba->params.num_sge_per_io; + psgl_handle->sgl_index = ulp_icd_start + arr_index++; + } + idx++; + } + phba->io_sgl_free_index = 0; + phba->io_sgl_alloc_index = 0; + phba->eh_sgl_free_index = 0; + phba->eh_sgl_alloc_index = 0; + return 0; +} + +static int hba_setup_cid_tbls(struct beiscsi_hba *phba) +{ + int ret; + uint16_t i, ulp_num; + struct ulp_cid_info *ptr_cid_info = NULL; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), + GFP_KERNEL); + + if (!ptr_cid_info) { + ret = -ENOMEM; + goto free_memory; + } + + /* Allocate memory for CID array */ + ptr_cid_info->cid_array = + kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), + sizeof(*ptr_cid_info->cid_array), + GFP_KERNEL); + if (!ptr_cid_info->cid_array) { + kfree(ptr_cid_info); + ptr_cid_info = NULL; + ret = -ENOMEM; + + goto free_memory; + } + ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( + phba, ulp_num); + + /* Save the cid_info_array ptr */ + phba->cid_array_info[ulp_num] = ptr_cid_info; + } + } + phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, + sizeof(struct iscsi_endpoint *), + GFP_KERNEL); + if (!phba->ep_array) { + ret = -ENOMEM; + + goto free_memory; + } + + phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, + sizeof(struct beiscsi_conn *), + GFP_KERNEL); + if (!phba->conn_table) { + kfree(phba->ep_array); + phba->ep_array = NULL; + ret = -ENOMEM; + + goto free_memory; + } + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { + ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; + + ptr_cid_info = phba->cid_array_info[ulp_num]; + ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = + phba->phwi_ctrlr->wrb_context[i].cid; + + } + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + ptr_cid_info->cid_alloc = 0; + ptr_cid_info->cid_free = 0; + } + } + return 0; + +free_memory: + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } + + return ret; +} + +static void hwi_enable_intr(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + struct be_queue_info *eq; + u8 __iomem *addr; + u32 reg, i; + u32 enabled; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); + reg = ioread32(addr); + + enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + if (!enabled) { + reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : reg =x%08x addr=%p\n", reg, addr); + iowrite32(reg, addr); + } + + if (!phba->pcidev->msix_enabled) { + eq = &phwi_context->be_eq[0].q; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : eq->id=%d\n", eq->id); + + hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); + } else { + for (i = 0; i <= phba->num_cpus; i++) { + eq = &phwi_context->be_eq[i].q; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : eq->id=%d\n", eq->id); + hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); + } + } +} + +static void hwi_disable_intr(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + + u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; + u32 reg = ioread32(addr); + + u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + if (enabled) { + reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + iowrite32(reg, addr); + } else + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : In hwi_disable_intr, Already Disabled\n"); +} + +static int beiscsi_init_port(struct beiscsi_hba *phba) +{ + int ret; + + ret = hwi_init_controller(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : init controller failed\n"); + return ret; + } + ret = beiscsi_init_sgl_handle(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : init sgl handles failed\n"); + goto cleanup_port; + } + + ret = hba_setup_cid_tbls(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : setup CID table failed\n"); + kfree(phba->io_sgl_hndl_base); + kfree(phba->eh_sgl_hndl_base); + goto cleanup_port; + } + return ret; + +cleanup_port: + hwi_cleanup_port(phba); + return ret; +} + +static void beiscsi_cleanup_port(struct beiscsi_hba *phba) +{ + struct ulp_cid_info *ptr_cid_info = NULL; + int ulp_num; + + kfree(phba->io_sgl_hndl_base); + kfree(phba->eh_sgl_hndl_base); + kfree(phba->ep_array); + kfree(phba->conn_table); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } +} + +/** + * beiscsi_free_mgmt_task_handles()- Free driver CXN resources + * @beiscsi_conn: ptr to the conn to be cleaned up + * @task: ptr to iscsi_task resource to be freed. + * + * Free driver mgmt resources binded to CXN. + **/ +void +beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + io_task = task->dd_data; + + if (io_task->pwrb_handle) { + free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; + } + + if (io_task->mtask_addr) { + dma_unmap_single(&phba->pcidev->dev, + io_task->mtask_addr, + io_task->mtask_data_count, + DMA_TO_DEVICE); + io_task->mtask_addr = 0; + } +} + +/** + * beiscsi_cleanup_task()- Free driver resources of the task + * @task: ptr to the iscsi task + * + **/ +static void beiscsi_cleanup_task(struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + if (io_task->cmd_bhs) { + dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, + io_task->bhs_pa.u.a64.address); + io_task->cmd_bhs = NULL; + task->hdr = NULL; + } + + if (task->sc) { + if (io_task->pwrb_handle) { + free_wrb_handle(phba, pwrb_context, + io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + free_io_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; + } + + if (io_task->scsi_cmnd) { + if (io_task->num_sg) + scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } + } else { + if (!beiscsi_conn->login_in_progress) + beiscsi_free_mgmt_task_handles(beiscsi_conn, task); + } +} + +void +beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, + struct beiscsi_offload_params *params) +{ + struct wrb_handle *pwrb_handle; + struct hwi_wrb_context *pwrb_context = NULL; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct iscsi_task *task = beiscsi_conn->task; + struct iscsi_session *session = task->conn->session; + u32 doorbell = 0; + + /* + * We can always use 0 here because it is reserved by libiscsi for + * login/startup related tasks. + */ + beiscsi_conn->login_in_progress = 0; + spin_lock_bh(&session->back_lock); + beiscsi_cleanup_task(task); + spin_unlock_bh(&session->back_lock); + + pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, + &pwrb_context); + + /* Check for the adapter family */ + if (is_chip_be2_be3r(phba)) + beiscsi_offload_cxn_v0(params, pwrb_handle, + phba->init_mem, + pwrb_context); + else + beiscsi_offload_cxn_v2(params, pwrb_handle, + pwrb_context); + + be_dws_le_to_cpu(pwrb_handle->pwrb, + sizeof(struct iscsi_target_context_update_wrb)); + + doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; + doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) + << DB_DEF_PDU_WRB_INDEX_SHIFT; + doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); + + /* + * There is no completion for CONTEXT_UPDATE. The completion of next + * WRB posted guarantees FW's processing and DMA'ing of it. + * Use beiscsi_put_wrb_handle to put it back in the pool which makes + * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. + */ + beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, + phba->params.wrbs_per_cxn); + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", + pwrb_handle, pwrb_context->free_index, + pwrb_context->wrb_handles_available); +} + +static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, + int *index, int *age) +{ + *index = (int)itt; + if (age) + *age = conn->session->age; +} + +/** + * beiscsi_alloc_pdu - allocates pdu and related resources + * @task: libiscsi task + * @opcode: opcode of pdu for task + * + * This is called with the session lock held. It will allocate + * the wrb and sgl if needed for the command. And it will prep + * the pdu's itt. beiscsi_parse_pdu will later translate + * the pdu itt to the libiscsi task itt. + */ +static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) +{ + struct beiscsi_io_task *io_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + itt_t itt; + uint16_t cri_index = 0; + struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; + dma_addr_t paddr; + + io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, + GFP_ATOMIC, &paddr); + if (!io_task->cmd_bhs) + return -ENOMEM; + io_task->bhs_pa.u.a64.address = paddr; + io_task->libiscsi_itt = (itt_t)task->itt; + io_task->conn = beiscsi_conn; + + task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; + task->hdr_max = sizeof(struct be_cmd_bhs); + io_task->psgl_handle = NULL; + io_task->pwrb_handle = NULL; + + if (task->sc) { + io_task->psgl_handle = alloc_io_sgl_handle(phba); + if (!io_task->psgl_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of IO_SGL_ICD Failed " + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); + goto free_hndls; + } + io_task->pwrb_handle = alloc_wrb_handle(phba, + beiscsi_conn->beiscsi_conn_cid, + &io_task->pwrb_context); + if (!io_task->pwrb_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of WRB_HANDLE Failed " + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); + goto free_io_hndls; + } + } else { + io_task->scsi_cmnd = NULL; + if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { + beiscsi_conn->task = task; + if (!beiscsi_conn->login_in_progress) { + io_task->psgl_handle = (struct sgl_handle *) + alloc_mgmt_sgl_handle(phba); + if (!io_task->psgl_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | + BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of MGMT_SGL_ICD Failed " + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); + goto free_hndls; + } + + beiscsi_conn->login_in_progress = 1; + beiscsi_conn->plogin_sgl_handle = + io_task->psgl_handle; + io_task->pwrb_handle = + alloc_wrb_handle(phba, + beiscsi_conn->beiscsi_conn_cid, + &io_task->pwrb_context); + if (!io_task->pwrb_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | + BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of WRB_HANDLE Failed " + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); + goto free_mgmt_hndls; + } + beiscsi_conn->plogin_wrb_handle = + io_task->pwrb_handle; + + } else { + io_task->psgl_handle = + beiscsi_conn->plogin_sgl_handle; + io_task->pwrb_handle = + beiscsi_conn->plogin_wrb_handle; + } + } else { + io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); + if (!io_task->psgl_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | + BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of MGMT_SGL_ICD Failed " + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); + goto free_hndls; + } + io_task->pwrb_handle = + alloc_wrb_handle(phba, + beiscsi_conn->beiscsi_conn_cid, + &io_task->pwrb_context); + if (!io_task->pwrb_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of WRB_HANDLE Failed " + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); + goto free_mgmt_hndls; + } + + } + } + itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> + wrb_index << 16) | (unsigned int) + (io_task->psgl_handle->sgl_index)); + io_task->pwrb_handle->pio_handle = task; + + io_task->cmd_bhs->iscsi_hdr.itt = itt; + return 0; + +free_io_hndls: + free_io_sgl_handle(phba, io_task->psgl_handle); + goto free_hndls; +free_mgmt_hndls: + free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; +free_hndls: + phwi_ctrlr = phba->phwi_ctrlr; + cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + if (io_task->pwrb_handle) + free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, + io_task->bhs_pa.u.a64.address); + io_task->cmd_bhs = NULL; + return -ENOMEM; +} +static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, + unsigned int num_sg, unsigned int xferlen, + unsigned int writedir) +{ + + struct beiscsi_io_task *io_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct iscsi_wrb *pwrb = NULL; + unsigned int doorbell = 0; + + pwrb = io_task->pwrb_handle->pwrb; + + io_task->bhs_len = sizeof(struct be_cmd_bhs); + + if (writedir) { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, + INI_WR_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, + INI_RD_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); + } + + io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, + type, pwrb); + + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, + cpu_to_be16(*(unsigned short *) + &io_task->cmd_bhs->iscsi_hdr.lun)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + + hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->wrb_index); + if (io_task->pwrb_context->plast_wrb) + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, + io_task->pwrb_context->plast_wrb, + io_task->pwrb_handle->wrb_index); + io_task->pwrb_context->plast_wrb = pwrb; + + be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); + + doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; + doorbell |= (io_task->pwrb_handle->wrb_index & + DB_DEF_PDU_WRB_INDEX_MASK) << + DB_DEF_PDU_WRB_INDEX_SHIFT; + doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); + return 0; +} + +static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, + unsigned int num_sg, unsigned int xferlen, + unsigned int writedir) +{ + + struct beiscsi_io_task *io_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct iscsi_wrb *pwrb = NULL; + unsigned int doorbell = 0; + + pwrb = io_task->pwrb_handle->pwrb; + io_task->bhs_len = sizeof(struct be_cmd_bhs); + + if (writedir) { + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_WR_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_RD_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); + } + + io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, + type, pwrb); + + AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, + cpu_to_be16(*(unsigned short *) + &io_task->cmd_bhs->iscsi_hdr.lun)); + AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); + AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, + io_task->psgl_handle->sgl_index); + + hwi_write_sgl(pwrb, sg, num_sg, io_task); + + AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, + io_task->pwrb_handle->wrb_index); + if (io_task->pwrb_context->plast_wrb) + AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, + io_task->pwrb_context->plast_wrb, + io_task->pwrb_handle->wrb_index); + io_task->pwrb_context->plast_wrb = pwrb; + + be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); + + doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; + doorbell |= (io_task->pwrb_handle->wrb_index & + DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; + doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; + + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); + return 0; +} + +static int beiscsi_mtask(struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct iscsi_wrb *pwrb = NULL; + unsigned int doorbell = 0; + unsigned int cid; + unsigned int pwrb_typeoffset = 0; + int ret = 0; + + cid = beiscsi_conn->beiscsi_conn_cid; + pwrb = io_task->pwrb_handle->pwrb; + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, + io_task->pwrb_handle->wrb_index); + if (io_task->pwrb_context->plast_wrb) + AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, + io_task->pwrb_context->plast_wrb, + io_task->pwrb_handle->wrb_index); + io_task->pwrb_context->plast_wrb = pwrb; + + pwrb_typeoffset = BE_WRB_TYPE_OFFSET; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->wrb_index); + if (io_task->pwrb_context->plast_wrb) + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, + io_task->pwrb_context->plast_wrb, + io_task->pwrb_handle->wrb_index); + io_task->pwrb_context->plast_wrb = pwrb; + + pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; + } + + + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); + ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); + ret = hwi_write_buffer(pwrb, task); + break; + case ISCSI_OP_NOOP_OUT: + if (task->hdr->ttt != ISCSI_RESERVED_TAG) { + ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, + dmsg, pwrb, 1); + else + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + dmsg, pwrb, 1); + } else { + ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, + dmsg, pwrb, 0); + else + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + dmsg, pwrb, 0); + } + ret = hwi_write_buffer(pwrb, task); + break; + case ISCSI_OP_TEXT: + ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); + ret = hwi_write_buffer(pwrb, task); + break; + case ISCSI_OP_SCSI_TMFUNC: + ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); + ret = hwi_write_buffer(pwrb, task); + break; + case ISCSI_OP_LOGOUT: + ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); + ret = hwi_write_buffer(pwrb, task); + break; + + default: + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BM_%d : opcode =%d Not supported\n", + task->hdr->opcode & ISCSI_OPCODE_MASK); + + return -EINVAL; + } + + if (ret) + return ret; + + /* Set the task type */ + io_task->wrb_type = (is_chip_be2_be3r(phba)) ? + AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : + AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); + + doorbell |= cid & DB_WRB_POST_CID_MASK; + doorbell |= (io_task->pwrb_handle->wrb_index & + DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; + doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); + return 0; +} + +static int beiscsi_task_xmit(struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task = task->dd_data; + struct scsi_cmnd *sc = task->sc; + struct beiscsi_hba *phba; + struct scatterlist *sg; + int num_sg; + unsigned int writedir = 0, xferlen = 0; + + phba = io_task->conn->phba; + /** + * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be + * operational if FW still gets heartbeat from EP FW. Is management + * path really needed to continue further? + */ + if (!beiscsi_hba_is_online(phba)) + return -EIO; + + if (!io_task->conn->login_in_progress) + task->hdr->exp_statsn = 0; + + if (!sc) + return beiscsi_mtask(task); + + io_task->scsi_cmnd = sc; + io_task->num_sg = 0; + num_sg = scsi_dma_map(sc); + if (num_sg < 0) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, + "BM_%d : scsi_dma_map Failed " + "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", + be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), + io_task->libiscsi_itt, scsi_bufflen(sc)); + + return num_sg; + } + /** + * For scsi cmd task, check num_sg before unmapping in cleanup_task. + * For management task, cleanup_task checks mtask_addr before unmapping. + */ + io_task->num_sg = num_sg; + xferlen = scsi_bufflen(sc); + sg = scsi_sglist(sc); + if (sc->sc_data_direction == DMA_TO_DEVICE) + writedir = 1; + else + writedir = 0; + + return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); +} + +/** + * beiscsi_bsg_request - handle bsg request from ISCSI transport + * @job: job to handle + */ +static int beiscsi_bsg_request(struct bsg_job *job) +{ + struct Scsi_Host *shost; + struct beiscsi_hba *phba; + struct iscsi_bsg_request *bsg_req = job->request; + int rc = -EINVAL; + unsigned int tag; + struct be_dma_mem nonemb_cmd; + struct be_cmd_resp_hdr *resp; + struct iscsi_bsg_reply *bsg_reply = job->reply; + unsigned short status, extd_status; + + shost = iscsi_job_to_shost(job); + phba = iscsi_host_priv(shost); + + if (!beiscsi_hba_is_online(phba)) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BM_%d : HBA in error 0x%lx\n", phba->state); + return -ENXIO; + } + + switch (bsg_req->msgcode) { + case ISCSI_BSG_HST_VENDOR: + nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, + job->request_payload.payload_len, + &nonemb_cmd.dma, GFP_KERNEL); + if (nonemb_cmd.va == NULL) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BM_%d : Failed to allocate memory for " + "beiscsi_bsg_request\n"); + return -ENOMEM; + } + tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, + &nonemb_cmd); + if (!tag) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BM_%d : MBX Tag Allocation Failed\n"); + + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return -EAGAIN; + } + + rc = wait_event_interruptible_timeout( + phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_tag_status[tag], + msecs_to_jiffies( + BEISCSI_HOST_MBX_TIMEOUT)); + + if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { + clear_bit(MCC_TAG_STATE_RUNNING, + &phba->ctrl.ptag_state[tag].tag_state); + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return -EIO; + } + extd_status = (phba->ctrl.mcc_tag_status[tag] & + CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; + status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; + free_mcc_wrb(&phba->ctrl, tag); + resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + nonemb_cmd.va, (resp->response_length + + sizeof(*resp))); + bsg_reply->reply_payload_rcv_len = resp->response_length; + bsg_reply->result = status; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + if (status || extd_status) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BM_%d : MBX Cmd Failed" + " status = %d extd_status = %d\n", + status, extd_status); + + return -EIO; + } else { + rc = 0; + } + break; + + default: + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BM_%d : Unsupported bsg command: 0x%x\n", + bsg_req->msgcode); + break; + } + + return rc; +} + +static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) +{ + /* Set the logging parameter */ + beiscsi_log_enable_init(phba, beiscsi_log_enable); +} + +void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) +{ + if (phba->boot_struct.boot_kset) + return; + + /* skip if boot work is already in progress */ + if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) + return; + + phba->boot_struct.retry = 3; + phba->boot_struct.tag = 0; + phba->boot_struct.s_handle = s_handle; + phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; + schedule_work(&phba->boot_work); +} + +#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 +/* + * beiscsi_show_boot_tgt_info() + * Boot flag info for iscsi-utilities + * Bit 0 Block valid flag + * Bit 1 Firmware booting selected + */ +static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) +{ + struct beiscsi_hba *phba = data; + struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; + struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; + char *str = buf; + int rc = -EPERM; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + rc = sprintf(buf, "%.*s\n", + (int)strlen(boot_sess->target_name), + (char *)&boot_sess->target_name); + break; + case ISCSI_BOOT_TGT_IP_ADDR: + if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) + rc = sprintf(buf, "%pI4\n", + (char *)&boot_conn->dest_ipaddr.addr); + else + rc = sprintf(str, "%pI6\n", + (char *)&boot_conn->dest_ipaddr.addr); + break; + case ISCSI_BOOT_TGT_PORT: + rc = sprintf(str, "%d\n", boot_conn->dest_port); + break; + + case ISCSI_BOOT_TGT_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + boot_conn->negotiated_login_options.auth_data.chap. + target_chap_name_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.target_chap_name); + break; + case ISCSI_BOOT_TGT_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + boot_conn->negotiated_login_options.auth_data.chap. + target_secret_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.target_secret); + break; + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + boot_conn->negotiated_login_options.auth_data.chap. + intr_chap_name_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.intr_chap_name); + break; + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + boot_conn->negotiated_login_options.auth_data.chap. + intr_secret_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.intr_secret); + break; + case ISCSI_BOOT_TGT_FLAGS: + rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); + break; + case ISCSI_BOOT_TGT_NIC_ASSOC: + rc = sprintf(str, "0\n"); + break; + } + return rc; +} + +static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) +{ + struct beiscsi_hba *phba = data; + char *str = buf; + int rc = -EPERM; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = sprintf(str, "%s\n", + phba->boot_struct.boot_sess.initiator_iscsiname); + break; + } + return rc; +} + +static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) +{ + struct beiscsi_hba *phba = data; + char *str = buf; + int rc = -EPERM; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); + break; + case ISCSI_BOOT_ETH_INDEX: + rc = sprintf(str, "0\n"); + break; + case ISCSI_BOOT_ETH_MAC: + rc = beiscsi_get_macaddr(str, phba); + break; + } + return rc; +} + +static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) +{ + umode_t rc = 0; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + case ISCSI_BOOT_TGT_IP_ADDR: + case ISCSI_BOOT_TGT_PORT: + case ISCSI_BOOT_TGT_CHAP_NAME: + case ISCSI_BOOT_TGT_CHAP_SECRET: + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + case ISCSI_BOOT_TGT_NIC_ASSOC: + case ISCSI_BOOT_TGT_FLAGS: + rc = S_IRUGO; + break; + } + return rc; +} + +static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) +{ + umode_t rc = 0; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = S_IRUGO; + break; + } + return rc; +} + +static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) +{ + umode_t rc = 0; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + case ISCSI_BOOT_ETH_MAC: + case ISCSI_BOOT_ETH_INDEX: + rc = S_IRUGO; + break; + } + return rc; +} + +static void beiscsi_boot_kobj_release(void *data) +{ + struct beiscsi_hba *phba = data; + + scsi_host_put(phba->shost); +} + +static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) +{ + struct boot_struct *bs = &phba->boot_struct; + struct iscsi_boot_kobj *boot_kobj; + + if (bs->boot_kset) { + __beiscsi_log(phba, KERN_ERR, + "BM_%d: boot_kset already created\n"); + return 0; + } + + bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); + if (!bs->boot_kset) { + __beiscsi_log(phba, KERN_ERR, + "BM_%d: boot_kset alloc failed\n"); + return -ENOMEM; + } + + /* get shost ref because the show function will refer phba */ + if (!scsi_host_get(phba->shost)) + goto free_kset; + + boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, + beiscsi_show_boot_tgt_info, + beiscsi_tgt_get_attr_visibility, + beiscsi_boot_kobj_release); + if (!boot_kobj) + goto put_shost; + + if (!scsi_host_get(phba->shost)) + goto free_kset; + + boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, + beiscsi_show_boot_ini_info, + beiscsi_ini_get_attr_visibility, + beiscsi_boot_kobj_release); + if (!boot_kobj) + goto put_shost; + + if (!scsi_host_get(phba->shost)) + goto free_kset; + + boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, + beiscsi_show_boot_eth_info, + beiscsi_eth_get_attr_visibility, + beiscsi_boot_kobj_release); + if (!boot_kobj) + goto put_shost; + + return 0; + +put_shost: + scsi_host_put(phba->shost); +free_kset: + iscsi_boot_destroy_kset(bs->boot_kset); + bs->boot_kset = NULL; + return -ENOMEM; +} + +static void beiscsi_boot_work(struct work_struct *work) +{ + struct beiscsi_hba *phba = + container_of(work, struct beiscsi_hba, boot_work); + struct boot_struct *bs = &phba->boot_struct; + unsigned int tag = 0; + + if (!beiscsi_hba_is_online(phba)) + return; + + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BM_%d : %s action %d\n", + __func__, phba->boot_struct.action); + + switch (phba->boot_struct.action) { + case BEISCSI_BOOT_REOPEN_SESS: + tag = beiscsi_boot_reopen_sess(phba); + break; + case BEISCSI_BOOT_GET_SHANDLE: + tag = __beiscsi_boot_get_shandle(phba, 1); + break; + case BEISCSI_BOOT_GET_SINFO: + tag = beiscsi_boot_get_sinfo(phba); + break; + case BEISCSI_BOOT_LOGOUT_SESS: + tag = beiscsi_boot_logout_sess(phba); + break; + case BEISCSI_BOOT_CREATE_KSET: + beiscsi_boot_create_kset(phba); + /** + * updated boot_kset is made visible to all before + * ending the boot work. + */ + mb(); + clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); + return; + } + if (!tag) { + if (bs->retry--) + schedule_work(&phba->boot_work); + else + clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); + } +} + +static void beiscsi_eqd_update_work(struct work_struct *work) +{ + struct hwi_context_memory *phwi_context; + struct be_set_eqd set_eqd[MAX_CPUS]; + struct hwi_controller *phwi_ctrlr; + struct be_eq_obj *pbe_eq; + struct beiscsi_hba *phba; + unsigned int pps, delta; + struct be_aic_obj *aic; + int eqd, i, num = 0; + unsigned long now; + + phba = container_of(work, struct beiscsi_hba, eqd_update.work); + if (!beiscsi_hba_is_online(phba)) + return; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i <= phba->num_cpus; i++) { + aic = &phba->aic_obj[i]; + pbe_eq = &phwi_context->be_eq[i]; + now = jiffies; + if (!aic->jiffies || time_before(now, aic->jiffies) || + pbe_eq->cq_count < aic->eq_prev) { + aic->jiffies = now; + aic->eq_prev = pbe_eq->cq_count; + continue; + } + delta = jiffies_to_msecs(now - aic->jiffies); + pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); + eqd = (pps / 1500) << 2; + + if (eqd < 8) + eqd = 0; + eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); + eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); + + aic->jiffies = now; + aic->eq_prev = pbe_eq->cq_count; + + if (eqd != aic->prev_eqd) { + set_eqd[num].delay_multiplier = (eqd * 65)/100; + set_eqd[num].eq_id = pbe_eq->q.id; + aic->prev_eqd = eqd; + num++; + } + } + if (num) + /* completion of this is ignored */ + beiscsi_modify_eq_delay(phba, set_eqd, num); + + schedule_delayed_work(&phba->eqd_update, + msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); +} + +static void beiscsi_hw_tpe_check(struct timer_list *t) +{ + struct beiscsi_hba *phba = from_timer(phba, t, hw_check); + u32 wait; + + /* if not TPE, do nothing */ + if (!beiscsi_detect_tpe(phba)) + return; + + /* wait default 4000ms before recovering */ + wait = 4000; + if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) + wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; + queue_delayed_work(phba->wq, &phba->recover_port, + msecs_to_jiffies(wait)); +} + +static void beiscsi_hw_health_check(struct timer_list *t) +{ + struct beiscsi_hba *phba = from_timer(phba, t, hw_check); + + beiscsi_detect_ue(phba); + if (beiscsi_detect_ue(phba)) { + __beiscsi_log(phba, KERN_ERR, + "BM_%d : port in error: %lx\n", phba->state); + /* sessions are no longer valid, so first fail the sessions */ + queue_work(phba->wq, &phba->sess_work); + + /* detect UER supported */ + if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) + return; + /* modify this timer to check TPE */ + phba->hw_check.function = beiscsi_hw_tpe_check; + } + + mod_timer(&phba->hw_check, + jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); +} + +/* + * beiscsi_enable_port()- Enables the disabled port. + * Only port resources freed in disable function are reallocated. + * This is called in HBA error handling path. + * + * @phba: Instance of driver private structure + * + **/ +static int beiscsi_enable_port(struct beiscsi_hba *phba) +{ + struct hwi_context_memory *phwi_context; + struct hwi_controller *phwi_ctrlr; + struct be_eq_obj *pbe_eq; + int ret, i; + + if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { + __beiscsi_log(phba, KERN_ERR, + "BM_%d : %s : port is online %lx\n", + __func__, phba->state); + return 0; + } + + ret = beiscsi_init_sliport(phba); + if (ret) + return ret; + + be2iscsi_enable_msix(phba); + + beiscsi_get_params(phba); + beiscsi_set_host_data(phba); + /* Re-enable UER. If different TPE occurs then it is recoverable. */ + beiscsi_set_uer_feature(phba); + + phba->shost->max_id = phba->params.cxns_per_ctrl - 1; + phba->shost->can_queue = phba->params.ios_per_ctrl; + ret = beiscsi_init_port(phba); + if (ret < 0) { + __beiscsi_log(phba, KERN_ERR, + "BM_%d : init port failed\n"); + goto disable_msix; + } + + for (i = 0; i < MAX_MCC_CMD; i++) { + init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); + phba->ctrl.mcc_tag[i] = i + 1; + phba->ctrl.mcc_tag_status[i + 1] = 0; + phba->ctrl.mcc_tag_available++; + } + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); + } + + i = (phba->pcidev->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); + + ret = beiscsi_init_irqs(phba); + if (ret < 0) { + __beiscsi_log(phba, KERN_ERR, + "BM_%d : setup IRQs failed %d\n", ret); + goto cleanup_port; + } + hwi_enable_intr(phba); + /* port operational: clear all error bits */ + set_bit(BEISCSI_HBA_ONLINE, &phba->state); + __beiscsi_log(phba, KERN_INFO, + "BM_%d : port online: 0x%lx\n", phba->state); + + /* start hw_check timer and eqd_update work */ + schedule_delayed_work(&phba->eqd_update, + msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); + + /** + * Timer function gets modified for TPE detection. + * Always reinit to do health check first. + */ + phba->hw_check.function = beiscsi_hw_health_check; + mod_timer(&phba->hw_check, + jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); + return 0; + +cleanup_port: + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + irq_poll_disable(&pbe_eq->iopoll); + } + hwi_cleanup_port(phba); + +disable_msix: + pci_free_irq_vectors(phba->pcidev); + return ret; +} + +/* + * beiscsi_disable_port()- Disable port and cleanup driver resources. + * This is called in HBA error handling and driver removal. + * @phba: Instance Priv structure + * @unload: indicate driver is unloading + * + * Free the OS and HW resources held by the driver + **/ +static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) +{ + struct hwi_context_memory *phwi_context; + struct hwi_controller *phwi_ctrlr; + struct be_eq_obj *pbe_eq; + unsigned int i; + + if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) + return; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + hwi_disable_intr(phba); + beiscsi_free_irqs(phba); + pci_free_irq_vectors(phba->pcidev); + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + irq_poll_disable(&pbe_eq->iopoll); + } + cancel_delayed_work_sync(&phba->eqd_update); + cancel_work_sync(&phba->boot_work); + /* WQ might be running cancel queued mcc_work if we are not exiting */ + if (!unload && beiscsi_hba_in_error(phba)) { + pbe_eq = &phwi_context->be_eq[i]; + cancel_work_sync(&pbe_eq->mcc_work); + } + hwi_cleanup_port(phba); + beiscsi_cleanup_port(phba); +} + +static void beiscsi_sess_work(struct work_struct *work) +{ + struct beiscsi_hba *phba; + + phba = container_of(work, struct beiscsi_hba, sess_work); + /* + * This work gets scheduled only in case of HBA error. + * Old sessions are gone so need to be re-established. + * iscsi_session_failure needs process context hence this work. + */ + iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); +} + +static void beiscsi_recover_port(struct work_struct *work) +{ + struct beiscsi_hba *phba; + + phba = container_of(work, struct beiscsi_hba, recover_port.work); + beiscsi_disable_port(phba, 0); + beiscsi_enable_port(phba); +} + +static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct beiscsi_hba *phba = NULL; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH error detected\n"); + + /* first stop UE detection when PCI error detected */ + del_timer_sync(&phba->hw_check); + cancel_delayed_work_sync(&phba->recover_port); + + /* sessions are no longer valid, so first fail the sessions */ + iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); + beiscsi_disable_port(phba, 0); + + if (state == pci_channel_io_perm_failure) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH : State PERM Failure"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_disable_device(pdev); + + /* The error could cause the FW to trigger a flash debug dump. + * Resetting the card while flash dump is in progress + * can cause it not to recover; wait for it to finish. + * Wait only for first function as it is needed only once per + * adapter. + **/ + if (pdev->devfn == 0) + ssleep(30); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) +{ + struct beiscsi_hba *phba = NULL; + int status = 0; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset\n"); + + status = pci_enable_device(pdev); + if (status) + return PCI_ERS_RESULT_DISCONNECT; + + pci_set_master(pdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + status = beiscsi_check_fw_rdy(phba); + if (status) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completed\n"); + } else { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completion Failure\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static void beiscsi_eeh_resume(struct pci_dev *pdev) +{ + struct beiscsi_hba *phba; + int ret; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + pci_save_state(pdev); + + ret = beiscsi_enable_port(phba); + if (ret) + __beiscsi_log(phba, KERN_ERR, + "BM_%d : AER EEH resume failed\n"); +} + +static int beiscsi_dev_probe(struct pci_dev *pcidev, + const struct pci_device_id *id) +{ + struct hwi_context_memory *phwi_context; + struct hwi_controller *phwi_ctrlr; + struct beiscsi_hba *phba = NULL; + struct be_eq_obj *pbe_eq; + unsigned int s_handle; + char wq_name[20]; + int ret, i; + + ret = beiscsi_enable_pci(pcidev); + if (ret < 0) { + dev_err(&pcidev->dev, + "beiscsi_dev_probe - Failed to enable pci device\n"); + return ret; + } + + phba = beiscsi_hba_alloc(pcidev); + if (!phba) { + dev_err(&pcidev->dev, + "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); + ret = -ENOMEM; + goto disable_pci; + } + + pci_save_state(pcidev); + + /* Initialize Driver configuration Paramters */ + beiscsi_hba_attrs_init(phba); + + phba->mac_addr_set = false; + + switch (pcidev->device) { + case BE_DEVICE_ID1: + case OC_DEVICE_ID1: + case OC_DEVICE_ID2: + phba->generation = BE_GEN2; + phba->iotask_fn = beiscsi_iotask; + dev_warn(&pcidev->dev, + "Obsolete/Unsupported BE2 Adapter Family\n"); + break; + case BE_DEVICE_ID2: + case OC_DEVICE_ID3: + phba->generation = BE_GEN3; + phba->iotask_fn = beiscsi_iotask; + break; + case OC_SKH_ID1: + phba->generation = BE_GEN4; + phba->iotask_fn = beiscsi_iotask_v2; + break; + default: + phba->generation = 0; + } + + ret = be_ctrl_init(phba, pcidev); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : be_ctrl_init failed\n"); + goto free_hba; + } + + ret = beiscsi_init_sliport(phba); + if (ret) + goto free_hba; + + spin_lock_init(&phba->io_sgl_lock); + spin_lock_init(&phba->mgmt_sgl_lock); + spin_lock_init(&phba->async_pdu_lock); + ret = beiscsi_get_fw_config(&phba->ctrl, phba); + if (ret != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Error getting fw config\n"); + goto free_port; + } + beiscsi_get_port_name(&phba->ctrl, phba); + beiscsi_get_params(phba); + beiscsi_set_host_data(phba); + beiscsi_set_uer_feature(phba); + + be2iscsi_enable_msix(phba); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : num_cpus = %d\n", + phba->num_cpus); + + phba->shost->max_id = phba->params.cxns_per_ctrl; + phba->shost->can_queue = phba->params.ios_per_ctrl; + ret = beiscsi_get_memory(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : alloc host mem failed\n"); + goto free_port; + } + + ret = beiscsi_init_port(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : init port failed\n"); + beiscsi_free_mem(phba); + goto free_port; + } + + for (i = 0; i < MAX_MCC_CMD; i++) { + init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); + phba->ctrl.mcc_tag[i] = i + 1; + phba->ctrl.mcc_tag_status[i + 1] = 0; + phba->ctrl.mcc_tag_available++; + memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, + sizeof(struct be_dma_mem)); + } + + phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; + + snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", + phba->shost->host_no); + phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); + if (!phba->wq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_dev_probe-" + "Failed to allocate work queue\n"); + ret = -ENOMEM; + goto free_twq; + } + + INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); + } + + i = (phba->pcidev->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); + + ret = beiscsi_init_irqs(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_dev_probe-" + "Failed to beiscsi_init_irqs\n"); + goto disable_iopoll; + } + hwi_enable_intr(phba); + + ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); + if (ret) + goto free_irqs; + + /* set online bit after port is operational */ + set_bit(BEISCSI_HBA_ONLINE, &phba->state); + __beiscsi_log(phba, KERN_INFO, + "BM_%d : port online: 0x%lx\n", phba->state); + + INIT_WORK(&phba->boot_work, beiscsi_boot_work); + ret = beiscsi_boot_get_shandle(phba, &s_handle); + if (ret > 0) { + beiscsi_start_boot_work(phba, s_handle); + /** + * Set this bit after starting the work to let + * probe handle it first. + * ASYNC event can too schedule this work. + */ + set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); + } + + beiscsi_iface_create_default(phba); + schedule_delayed_work(&phba->eqd_update, + msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); + + INIT_WORK(&phba->sess_work, beiscsi_sess_work); + INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); + /** + * Start UE detection here. UE before this will cause stall in probe + * and eventually fail the probe. + */ + timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); + mod_timer(&phba->hw_check, + jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); + return 0; + +free_irqs: + hwi_disable_intr(phba); + beiscsi_free_irqs(phba); +disable_iopoll: + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + irq_poll_disable(&pbe_eq->iopoll); + } + destroy_workqueue(phba->wq); +free_twq: + hwi_cleanup_port(phba); + beiscsi_cleanup_port(phba); + beiscsi_free_mem(phba); +free_port: + dma_free_coherent(&phba->pcidev->dev, + phba->ctrl.mbox_mem_alloced.size, + phba->ctrl.mbox_mem_alloced.va, + phba->ctrl.mbox_mem_alloced.dma); + beiscsi_unmap_pci_function(phba); +free_hba: + pci_disable_msix(phba->pcidev); + pci_dev_put(phba->pcidev); + iscsi_host_free(phba->shost); + pci_set_drvdata(pcidev, NULL); +disable_pci: + pci_release_regions(pcidev); + pci_disable_device(pcidev); + return ret; +} + +static void beiscsi_remove(struct pci_dev *pcidev) +{ + struct beiscsi_hba *phba = NULL; + + phba = pci_get_drvdata(pcidev); + if (!phba) { + dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); + return; + } + + /* first stop UE detection before unloading */ + del_timer_sync(&phba->hw_check); + cancel_delayed_work_sync(&phba->recover_port); + cancel_work_sync(&phba->sess_work); + + beiscsi_iface_destroy_default(phba); + iscsi_host_remove(phba->shost, false); + beiscsi_disable_port(phba, 1); + + /* after cancelling boot_work */ + iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); + + /* free all resources */ + destroy_workqueue(phba->wq); + beiscsi_free_mem(phba); + + /* ctrl uninit */ + beiscsi_unmap_pci_function(phba); + dma_free_coherent(&phba->pcidev->dev, + phba->ctrl.mbox_mem_alloced.size, + phba->ctrl.mbox_mem_alloced.va, + phba->ctrl.mbox_mem_alloced.dma); + + pci_dev_put(phba->pcidev); + iscsi_host_free(phba->shost); + pci_set_drvdata(pcidev, NULL); + pci_release_regions(pcidev); + pci_disable_device(pcidev); +} + + +static struct pci_error_handlers beiscsi_eeh_handlers = { + .error_detected = beiscsi_eeh_err_detected, + .slot_reset = beiscsi_eeh_reset, + .resume = beiscsi_eeh_resume, +}; + +struct iscsi_transport beiscsi_iscsi_transport = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | + CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, + .create_session = beiscsi_session_create, + .destroy_session = beiscsi_session_destroy, + .create_conn = beiscsi_conn_create, + .bind_conn = beiscsi_conn_bind, + .unbind_conn = iscsi_conn_unbind, + .destroy_conn = iscsi_conn_teardown, + .attr_is_visible = beiscsi_attr_is_visible, + .set_iface_param = beiscsi_iface_set_param, + .get_iface_param = beiscsi_iface_get_param, + .set_param = beiscsi_set_param, + .get_conn_param = iscsi_conn_get_param, + .get_session_param = iscsi_session_get_param, + .get_host_param = beiscsi_get_host_param, + .start_conn = beiscsi_conn_start, + .stop_conn = iscsi_conn_stop, + .send_pdu = iscsi_conn_send_pdu, + .xmit_task = beiscsi_task_xmit, + .cleanup_task = beiscsi_cleanup_task, + .alloc_pdu = beiscsi_alloc_pdu, + .parse_pdu_itt = beiscsi_parse_pdu, + .get_stats = beiscsi_conn_get_stats, + .get_ep_param = beiscsi_ep_get_param, + .ep_connect = beiscsi_ep_connect, + .ep_poll = beiscsi_ep_poll, + .ep_disconnect = beiscsi_ep_disconnect, + .session_recovery_timedout = iscsi_session_recovery_timedout, + .bsg_request = beiscsi_bsg_request, +}; + +static struct pci_driver beiscsi_pci_driver = { + .name = DRV_NAME, + .probe = beiscsi_dev_probe, + .remove = beiscsi_remove, + .id_table = beiscsi_pci_id_table, + .err_handler = &beiscsi_eeh_handlers +}; + +static int __init beiscsi_module_init(void) +{ + int ret; + + beiscsi_scsi_transport = + iscsi_register_transport(&beiscsi_iscsi_transport); + if (!beiscsi_scsi_transport) { + printk(KERN_ERR + "beiscsi_module_init - Unable to register beiscsi transport.\n"); + return -ENOMEM; + } + printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", + &beiscsi_iscsi_transport); + + ret = pci_register_driver(&beiscsi_pci_driver); + if (ret) { + printk(KERN_ERR + "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); + goto unregister_iscsi_transport; + } + return 0; + +unregister_iscsi_transport: + iscsi_unregister_transport(&beiscsi_iscsi_transport); + return ret; +} + +static void __exit beiscsi_module_exit(void) +{ + pci_unregister_driver(&beiscsi_pci_driver); + iscsi_unregister_transport(&beiscsi_iscsi_transport); +} + +module_init(beiscsi_module_init); +module_exit(beiscsi_module_exit); diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h new file mode 100644 index 000000000..71c95d144 --- /dev/null +++ b/drivers/scsi/be2iscsi/be_main.h @@ -0,0 +1,1029 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. + * + * Contact Information: + * linux-drivers@broadcom.com + */ + +#ifndef _BEISCSI_MAIN_ +#define _BEISCSI_MAIN_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "be2iscsi" +#define BUILD_STR "11.4.0.1" +#define BE_NAME "Emulex OneConnect" \ + "Open-iSCSI Driver version" BUILD_STR +#define DRV_DESC BE_NAME " " "Driver" + +#define BE_VENDOR_ID 0x19A2 +#define ELX_VENDOR_ID 0x10DF +/* DEVICE ID's for BE2 */ +#define BE_DEVICE_ID1 0x212 +#define OC_DEVICE_ID1 0x702 +#define OC_DEVICE_ID2 0x703 + +/* DEVICE ID's for BE3 */ +#define BE_DEVICE_ID2 0x222 +#define OC_DEVICE_ID3 0x712 + +/* DEVICE ID for SKH */ +#define OC_SKH_ID1 0x722 + +#define BE2_IO_DEPTH 1024 +#define BE2_MAX_SESSIONS 256 +#define BE2_TMFS 16 +#define BE2_NOPOUT_REQ 16 +#define BE2_SGE 32 +#define BE2_DEFPDU_HDR_SZ 64 +#define BE2_DEFPDU_DATA_SZ 8192 +#define BE2_MAX_NUM_CQ_PROC 512 + +#define MAX_CPUS 64U +#define BEISCSI_MAX_NUM_CPUS 7 + +#define BEISCSI_VER_STRLEN 32 + +#define BEISCSI_SGLIST_ELEMENTS 30 + +/** + * BE_INVLDT_CMD_TBL_SZ is 128 which is total number commands that can + * be invalidated at a time, consider it before changing the value of + * BEISCSI_CMD_PER_LUN. + */ +#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ +#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ +#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ + +#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ +#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ +#define BEISCSI_MAX_FRAGS_INIT 192 + +#define BE_SENSE_INFO_SIZE 258 +#define BE_ISCSI_PDU_HEADER_SIZE 64 +#define BE_MIN_MEM_SIZE 16384 +#define MAX_CMD_SZ 65536 +#define IIOC_SCSI_DATA 0x05 /* Write Operation */ + +/** + * hardware needs the async PDU buffers to be posted in multiples of 8 + * So have atleast 8 of them by default + */ + +#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \ + (phwi->phwi_ctxt->pasync_ctx[ulp_num]) + +/********* Memory BAR register ************/ +#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc +/** + * Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt + * Disable" may still globally block interrupts in addition to individual + * interrupt masks; a mechanism for the device driver to block all interrupts + * atomically without having to arbitrate for the PCI Interrupt Disable bit + * with the OS. + */ +#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ + +/********* ISR0 Register offset **********/ +#define CEV_ISR0_OFFSET 0xC18 +#define CEV_ISR_SIZE 4 + +/** + * Macros for reading/writing a protection domain or CSR registers + * in BladeEngine. + */ + +#define DB_TXULP0_OFFSET 0x40 +#define DB_RXULP0_OFFSET 0xA0 +/********* Event Q door bell *************/ +#define DB_EQ_OFFSET DB_CQ_OFFSET +#define DB_EQ_RING_ID_LOW_MASK 0x1FF /* bits 0 - 8 */ +/* Clear the interrupt for this eq */ +#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ +/* Must be 1 */ +#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ +/* Higher Order EQ_ID bit */ +#define DB_EQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */ +#define DB_EQ_HIGH_SET_SHIFT 11 +#define DB_EQ_HIGH_FEILD_SHIFT 9 +/* Number of event entries processed */ +#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ +/* Rearm bit */ +#define DB_EQ_REARM_SHIFT (29) /* bit 29 */ + +/********* Compl Q door bell *************/ +#define DB_CQ_OFFSET 0x120 +#define DB_CQ_RING_ID_LOW_MASK 0x3FF /* bits 0 - 9 */ +/* Higher Order CQ_ID bit */ +#define DB_CQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */ +#define DB_CQ_HIGH_SET_SHIFT 11 +#define DB_CQ_HIGH_FEILD_SHIFT 10 + +/* Number of event entries processed */ +#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ +/* Rearm bit */ +#define DB_CQ_REARM_SHIFT (29) /* bit 29 */ + +#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr) +#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\ + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id) +#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\ + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id) + +#define PAGES_REQUIRED(x) \ + ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) + +#define MEM_DESCR_OFFSET 8 +#define BEISCSI_DEFQ_HDR 1 +#define BEISCSI_DEFQ_DATA 0 +enum be_mem_enum { + HWI_MEM_ADDN_CONTEXT, + HWI_MEM_WRB, + HWI_MEM_WRBH, + HWI_MEM_SGLH, + HWI_MEM_SGE, + HWI_MEM_TEMPLATE_HDR_ULP0, + HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */ + HWI_MEM_ASYNC_DATA_BUF_ULP0, + HWI_MEM_ASYNC_HEADER_RING_ULP0, + HWI_MEM_ASYNC_DATA_RING_ULP0, + HWI_MEM_ASYNC_HEADER_HANDLE_ULP0, + HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */ + HWI_MEM_ASYNC_PDU_CONTEXT_ULP0, + HWI_MEM_TEMPLATE_HDR_ULP1, + HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */ + HWI_MEM_ASYNC_DATA_BUF_ULP1, + HWI_MEM_ASYNC_HEADER_RING_ULP1, + HWI_MEM_ASYNC_DATA_RING_ULP1, + HWI_MEM_ASYNC_HEADER_HANDLE_ULP1, + HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */ + HWI_MEM_ASYNC_PDU_CONTEXT_ULP1, + ISCSI_MEM_GLOBAL_HEADER, + SE_MEM_MAX +}; + +struct be_bus_address32 { + unsigned int address_lo; + unsigned int address_hi; +}; + +struct be_bus_address64 { + unsigned long long address; +}; + +struct be_bus_address { + union { + struct be_bus_address32 a32; + struct be_bus_address64 a64; + } u; +}; + +struct mem_array { + struct be_bus_address bus_address; /* Bus address of location */ + void *virtual_address; /* virtual address to the location */ + unsigned int size; /* Size required by memory block */ +}; + +struct be_mem_descriptor { + unsigned int size_in_bytes; /* Size required by memory block */ + unsigned int num_elements; + struct mem_array *mem_array; +}; + +struct sgl_handle { + unsigned int sgl_index; + unsigned int type; + unsigned int cid; + struct iscsi_task *task; + struct iscsi_sge *pfrag; +}; + +struct hba_parameters { + unsigned int ios_per_ctrl; + unsigned int cxns_per_ctrl; + unsigned int icds_per_ctrl; + unsigned int num_sge_per_io; + unsigned int defpdu_hdr_sz; + unsigned int defpdu_data_sz; + unsigned int num_cq_entries; + unsigned int num_eq_entries; + unsigned int wrbs_per_cxn; + unsigned int hwi_ws_sz; +}; + +#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ + (phwi_ctrlr->wrb_context[cri].ulp_num) +struct hwi_wrb_context { + spinlock_t wrb_lock; + struct wrb_handle **pwrb_handle_base; + struct wrb_handle **pwrb_handle_basestd; + struct iscsi_wrb *plast_wrb; + unsigned short alloc_index; + unsigned short free_index; + unsigned short wrb_handles_available; + unsigned short cid; + uint8_t ulp_num; /* ULP to which CID binded */ + uint32_t doorbell_offset; +}; + +struct ulp_cid_info { + unsigned short *cid_array; + unsigned short avlbl_cids; + unsigned short cid_alloc; + unsigned short cid_free; +}; + +#include "be.h" +#define chip_be2(phba) (phba->generation == BE_GEN2) +#define chip_be3_r(phba) (phba->generation == BE_GEN3) +#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) + +#define BEISCSI_ULP0 0 +#define BEISCSI_ULP1 1 +#define BEISCSI_ULP_COUNT 2 +#define BEISCSI_ULP0_LOADED 0x01 +#define BEISCSI_ULP1_LOADED 0x02 + +#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \ + (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids) +#define BEISCSI_ULP0_AVLBL_CID(phba) \ + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0) +#define BEISCSI_ULP1_AVLBL_CID(phba) \ + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1) + +struct beiscsi_hba { + struct hba_parameters params; + struct hwi_controller *phwi_ctrlr; + unsigned int mem_req[SE_MEM_MAX]; + /* PCI BAR mapped addresses */ + u8 __iomem *csr_va; /* CSR */ + u8 __iomem *db_va; /* Door Bell */ + u8 __iomem *pci_va; /* PCI Config */ + /* PCI representation of our HBA */ + struct pci_dev *pcidev; + unsigned int num_cpus; + unsigned int nxt_cqid; + char *msi_name[MAX_CPUS]; + struct be_mem_descriptor *init_mem; + + unsigned short io_sgl_alloc_index; + unsigned short io_sgl_free_index; + unsigned short io_sgl_hndl_avbl; + struct sgl_handle **io_sgl_hndl_base; + + unsigned short eh_sgl_alloc_index; + unsigned short eh_sgl_free_index; + unsigned short eh_sgl_hndl_avbl; + struct sgl_handle **eh_sgl_hndl_base; + spinlock_t io_sgl_lock; + spinlock_t mgmt_sgl_lock; + spinlock_t async_pdu_lock; + struct list_head hba_queue; +#define BE_MAX_SESSION 2048 +#define BE_INVALID_CID 0xffff +#define BE_SET_CID_TO_CRI(cri_index, cid) \ + (phba->cid_to_cri_map[cid] = cri_index) +#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) + unsigned short cid_to_cri_map[BE_MAX_SESSION]; + struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT]; + struct iscsi_endpoint **ep_array; + struct beiscsi_conn **conn_table; + struct Scsi_Host *shost; + struct iscsi_iface *ipv4_iface; + struct iscsi_iface *ipv6_iface; + struct { + /** + * group together since they are used most frequently + * for cid to cri conversion + */ +#define BEISCSI_PHYS_PORT_MAX 4 + unsigned int phys_port; + /* valid values of phys_port id are 0, 1, 2, 3 */ + unsigned int eqid_count; + unsigned int cqid_count; + unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT]; +#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \ + (phba->fw_config.iscsi_cid_count[ulp_num]) + unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT]; + unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT]; + unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT]; + unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT]; + unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT]; + + unsigned short iscsi_features; + uint16_t dual_ulp_aware; + unsigned long ulp_supported; + } fw_config; + + unsigned long state; +#define BEISCSI_HBA_ONLINE 0 +#define BEISCSI_HBA_LINK_UP 1 +#define BEISCSI_HBA_BOOT_FOUND 2 +#define BEISCSI_HBA_BOOT_WORK 3 +#define BEISCSI_HBA_UER_SUPP 4 +#define BEISCSI_HBA_PCI_ERR 5 +#define BEISCSI_HBA_FW_TIMEOUT 6 +#define BEISCSI_HBA_IN_UE 7 +#define BEISCSI_HBA_IN_TPE 8 + +/* error bits */ +#define BEISCSI_HBA_IN_ERR ((1 << BEISCSI_HBA_PCI_ERR) | \ + (1 << BEISCSI_HBA_FW_TIMEOUT) | \ + (1 << BEISCSI_HBA_IN_UE) | \ + (1 << BEISCSI_HBA_IN_TPE)) + + u8 optic_state; + struct delayed_work eqd_update; + /* update EQ delay timer every 1000ms */ +#define BEISCSI_EQD_UPDATE_INTERVAL 1000 + struct timer_list hw_check; + /* check for UE every 1000ms */ +#define BEISCSI_UE_DETECT_INTERVAL 1000 + u32 ue2rp; + struct delayed_work recover_port; + struct work_struct sess_work; + + bool mac_addr_set; + u8 mac_address[ETH_ALEN]; + u8 port_name; + u8 port_speed; + char fw_ver_str[BEISCSI_VER_STRLEN]; + struct workqueue_struct *wq; /* The actuak work queue */ + struct be_ctrl_info ctrl; + unsigned int generation; + unsigned int interface_handle; + + struct be_aic_obj aic_obj[MAX_CPUS]; + unsigned int attr_log_enable; + int (*iotask_fn)(struct iscsi_task *, + struct scatterlist *sg, + uint32_t num_sg, uint32_t xferlen, + uint32_t writedir); + struct boot_struct { + int retry; + unsigned int tag; + unsigned int s_handle; + struct be_dma_mem nonemb_cmd; + enum { + BEISCSI_BOOT_REOPEN_SESS = 1, + BEISCSI_BOOT_GET_SHANDLE, + BEISCSI_BOOT_GET_SINFO, + BEISCSI_BOOT_LOGOUT_SESS, + BEISCSI_BOOT_CREATE_KSET, + } action; + struct mgmt_session_info boot_sess; + struct iscsi_boot_kset *boot_kset; + } boot_struct; + struct work_struct boot_work; +}; + +#define beiscsi_hba_in_error(phba) ((phba)->state & BEISCSI_HBA_IN_ERR) +#define beiscsi_hba_is_online(phba) \ + (!beiscsi_hba_in_error((phba)) && \ + test_bit(BEISCSI_HBA_ONLINE, &phba->state)) + +struct beiscsi_session { + struct dma_pool *bhs_pool; +}; + +/** + * struct beiscsi_conn - iscsi connection structure + */ +struct beiscsi_conn { + struct iscsi_conn *conn; + struct beiscsi_hba *phba; + u32 exp_statsn; + u32 doorbell_offset; + u32 beiscsi_conn_cid; + struct beiscsi_endpoint *ep; + unsigned short login_in_progress; + struct wrb_handle *plogin_wrb_handle; + struct sgl_handle *plogin_sgl_handle; + struct beiscsi_session *beiscsi_sess; + struct iscsi_task *task; +}; + +/* This structure is used by the chip */ +struct pdu_data_out { + u32 dw[12]; +}; +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_pdu_data_out { + u8 opcode[6]; /* opcode */ + u8 rsvd0[2]; /* should be 0 */ + u8 rsvd1[7]; + u8 final_bit; /* F bit */ + u8 rsvd2[16]; + u8 ahs_length[8]; /* no AHS */ + u8 data_len_hi[8]; + u8 data_len_lo[16]; /* DataSegmentLength */ + u8 lun[64]; + u8 itt[32]; /* ITT; initiator task tag */ + u8 ttt[32]; /* TTT; valid for R2T or 0xffffffff */ + u8 rsvd3[32]; + u8 exp_stat_sn[32]; + u8 rsvd4[32]; + u8 data_sn[32]; + u8 buffer_offset[32]; + u8 rsvd5[32]; +}; + +struct be_cmd_bhs { + struct iscsi_scsi_req iscsi_hdr; + unsigned char pad1[16]; + struct pdu_data_out iscsi_data_pdu; + unsigned char pad2[BE_SENSE_INFO_SIZE - + sizeof(struct pdu_data_out)]; +}; + +struct beiscsi_io_task { + struct wrb_handle *pwrb_handle; + struct sgl_handle *psgl_handle; + struct beiscsi_conn *conn; + struct scsi_cmnd *scsi_cmnd; + int num_sg; + struct hwi_wrb_context *pwrb_context; + itt_t libiscsi_itt; + struct be_cmd_bhs *cmd_bhs; + struct be_bus_address bhs_pa; + unsigned short bhs_len; + dma_addr_t mtask_addr; + uint32_t mtask_data_count; + uint8_t wrb_type; +}; + +struct be_nonio_bhs { + struct iscsi_hdr iscsi_hdr; + unsigned char pad1[16]; + struct pdu_data_out iscsi_data_pdu; + unsigned char pad2[BE_SENSE_INFO_SIZE - + sizeof(struct pdu_data_out)]; +}; + +struct be_status_bhs { + struct iscsi_scsi_req iscsi_hdr; + unsigned char pad1[16]; + /** + * The plus 2 below is to hold the sense info length that gets + * DMA'ed by RxULP + */ + unsigned char sense_info[BE_SENSE_INFO_SIZE]; +}; + +struct iscsi_sge { + u32 dw[4]; +}; + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_iscsi_sge { + u8 addr_hi[32]; + u8 addr_lo[32]; + u8 sge_offset[22]; /* DWORD 2 */ + u8 rsvd0[9]; /* DWORD 2 */ + u8 last_sge; /* DWORD 2 */ + u8 len[17]; /* DWORD 3 */ + u8 rsvd1[15]; /* DWORD 3 */ +}; + +struct beiscsi_offload_params { + u32 dw[6]; +}; + +#define OFFLD_PARAMS_ERL 0x00000003 +#define OFFLD_PARAMS_DDE 0x00000004 +#define OFFLD_PARAMS_HDE 0x00000008 +#define OFFLD_PARAMS_IR2T 0x00000010 +#define OFFLD_PARAMS_IMD 0x00000020 +#define OFFLD_PARAMS_DATA_SEQ_INORDER 0x00000040 +#define OFFLD_PARAMS_PDU_SEQ_INORDER 0x00000080 +#define OFFLD_PARAMS_MAX_R2T 0x00FFFF00 + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_beiscsi_offload_params { + u8 max_burst_length[32]; + u8 max_send_data_segment_length[32]; + u8 first_burst_length[32]; + u8 erl[2]; + u8 dde[1]; + u8 hde[1]; + u8 ir2t[1]; + u8 imd[1]; + u8 data_seq_inorder[1]; + u8 pdu_seq_inorder[1]; + u8 max_r2t[16]; + u8 pad[8]; + u8 exp_statsn[32]; + u8 max_recv_data_segment_length[32]; +}; + +struct hd_async_handle { + struct list_head link; + struct be_bus_address pa; + void *pbuffer; + u32 buffer_len; + u16 index; + u16 cri; + u8 is_header; + u8 is_final; + u8 in_use; +}; + +#define BEISCSI_ASYNC_HDQ_SIZE(phba, ulp) \ + (BEISCSI_GET_CID_COUNT((phba), (ulp)) * 2) + +/** + * This has list of async PDUs that are waiting to be processed. + * Buffers live in this list for a brief duration before they get + * processed and posted back to hardware. + * Note that we don't really need one cri_wait_queue per async_entry. + * We need one cri_wait_queue per CRI. Its easier to manage if this + * is tagged along with the async_entry. + */ +struct hd_async_entry { + struct cri_wait_queue { + unsigned short hdr_len; + unsigned int bytes_received; + unsigned int bytes_needed; + struct list_head list; + } wq; + /* handles posted to FW resides here */ + struct hd_async_handle *header; + struct hd_async_handle *data; +}; + +struct hd_async_buf_context { + struct be_bus_address pa_base; + void *va_base; + void *ring_base; + struct hd_async_handle *handle_base; + u32 buffer_size; + u16 pi; +}; + +/** + * hd_async_context is declared for each ULP supporting iSCSI function. + */ +struct hd_async_context { + struct hd_async_buf_context async_header; + struct hd_async_buf_context async_data; + u16 num_entries; + /** + * When unsol PDU is in, it needs to be chained till all the bytes are + * received and then processing is done. hd_async_entry is created + * based on the cid_count for each ULP. When unsol PDU comes in based + * on the conn_id it needs to be added to the correct async_entry wq. + * Below defined cid_to_async_cri_map is used to reterive the + * async_cri_map for a particular connection. + * + * This array is initialized after beiscsi_create_wrb_rings returns. + * + * - this method takes more memory space, fixed to 2K + * - any support for connections greater than this the array size needs + * to be incremented + */ +#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid]) + unsigned short cid_to_async_cri_map[BE_MAX_SESSION]; + /** + * This is a variable size array. Don`t add anything after this field!! + */ + struct hd_async_entry *async_entry; +}; + +struct i_t_dpdu_cqe { + u32 dw[4]; +} __packed; + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_i_t_dpdu_cqe { + u8 db_addr_hi[32]; + u8 db_addr_lo[32]; + u8 code[6]; + u8 cid[10]; + u8 dpl[16]; + u8 index[16]; + u8 num_cons[10]; + u8 rsvd0[4]; + u8 final; + u8 valid; +} __packed; + +struct amap_i_t_dpdu_cqe_v2 { + u8 db_addr_hi[32]; /* DWORD 0 */ + u8 db_addr_lo[32]; /* DWORD 1 */ + u8 code[6]; /* DWORD 2 */ + u8 num_cons; /* DWORD 2*/ + u8 rsvd0[8]; /* DWORD 2 */ + u8 dpl[17]; /* DWORD 2 */ + u8 index[16]; /* DWORD 3 */ + u8 cid[13]; /* DWORD 3 */ + u8 rsvd1; /* DWORD 3 */ + u8 final; /* DWORD 3 */ + u8 valid; /* DWORD 3 */ +} __packed; + +#define CQE_VALID_MASK 0x80000000 +#define CQE_CODE_MASK 0x0000003F +#define CQE_CID_MASK 0x0000FFC0 + +#define EQE_VALID_MASK 0x00000001 +#define EQE_MAJORCODE_MASK 0x0000000E +#define EQE_RESID_MASK 0xFFFF0000 + +struct be_eq_entry { + u32 dw[1]; +} __packed; + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_eq_entry { + u8 valid; /* DWORD 0 */ + u8 major_code[3]; /* DWORD 0 */ + u8 minor_code[12]; /* DWORD 0 */ + u8 resource_id[16]; /* DWORD 0 */ + +} __packed; + +struct cq_db { + u32 dw[1]; +} __packed; + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_cq_db { + u8 qid[10]; + u8 event[1]; + u8 rsvd0[5]; + u8 num_popped[13]; + u8 rearm[1]; + u8 rsvd1[2]; +} __packed; + +void beiscsi_process_eq(struct beiscsi_hba *phba); + +struct iscsi_wrb { + u32 dw[16]; +} __packed; + +#define WRB_TYPE_MASK 0xF0000000 +#define SKH_WRB_TYPE_OFFSET 27 +#define BE_WRB_TYPE_OFFSET 28 + +#define ADAPTER_SET_WRB_TYPE(pwrb, wrb_type, type_offset) \ + (pwrb->dw[0] |= (wrb_type << type_offset)) + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_iscsi_wrb { + u8 lun[14]; /* DWORD 0 */ + u8 lt; /* DWORD 0 */ + u8 invld; /* DWORD 0 */ + u8 wrb_idx[8]; /* DWORD 0 */ + u8 dsp; /* DWORD 0 */ + u8 dmsg; /* DWORD 0 */ + u8 undr_run; /* DWORD 0 */ + u8 over_run; /* DWORD 0 */ + u8 type[4]; /* DWORD 0 */ + u8 ptr2nextwrb[8]; /* DWORD 1 */ + u8 r2t_exp_dtl[24]; /* DWORD 1 */ + u8 sgl_icd_idx[12]; /* DWORD 2 */ + u8 rsvd0[20]; /* DWORD 2 */ + u8 exp_data_sn[32]; /* DWORD 3 */ + u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */ + u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */ + u8 cmdsn_itt[32]; /* DWORD 6 */ + u8 dif_ref_tag[32]; /* DWORD 7 */ + u8 sge0_addr_hi[32]; /* DWORD 8 */ + u8 sge0_addr_lo[32]; /* DWORD 9 */ + u8 sge0_offset[22]; /* DWORD 10 */ + u8 pbs; /* DWORD 10 */ + u8 dif_mode[2]; /* DWORD 10 */ + u8 rsvd1[6]; /* DWORD 10 */ + u8 sge0_last; /* DWORD 10 */ + u8 sge0_len[17]; /* DWORD 11 */ + u8 dif_meta_tag[14]; /* DWORD 11 */ + u8 sge0_in_ddr; /* DWORD 11 */ + u8 sge1_addr_hi[32]; /* DWORD 12 */ + u8 sge1_addr_lo[32]; /* DWORD 13 */ + u8 sge1_r2t_offset[22]; /* DWORD 14 */ + u8 rsvd2[9]; /* DWORD 14 */ + u8 sge1_last; /* DWORD 14 */ + u8 sge1_len[17]; /* DWORD 15 */ + u8 ref_sgl_icd_idx[12]; /* DWORD 15 */ + u8 rsvd3[2]; /* DWORD 15 */ + u8 sge1_in_ddr; /* DWORD 15 */ + +} __packed; + +struct amap_iscsi_wrb_v2 { + u8 r2t_exp_dtl[25]; /* DWORD 0 */ + u8 rsvd0[2]; /* DWORD 0*/ + u8 type[5]; /* DWORD 0 */ + u8 ptr2nextwrb[8]; /* DWORD 1 */ + u8 wrb_idx[8]; /* DWORD 1 */ + u8 lun[16]; /* DWORD 1 */ + u8 sgl_idx[16]; /* DWORD 2 */ + u8 ref_sgl_icd_idx[16]; /* DWORD 2 */ + u8 exp_data_sn[32]; /* DWORD 3 */ + u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */ + u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */ + u8 cq_id[16]; /* DWORD 6 */ + u8 rsvd1[16]; /* DWORD 6 */ + u8 cmdsn_itt[32]; /* DWORD 7 */ + u8 sge0_addr_hi[32]; /* DWORD 8 */ + u8 sge0_addr_lo[32]; /* DWORD 9 */ + u8 sge0_offset[24]; /* DWORD 10 */ + u8 rsvd2[7]; /* DWORD 10 */ + u8 sge0_last; /* DWORD 10 */ + u8 sge0_len[17]; /* DWORD 11 */ + u8 rsvd3[7]; /* DWORD 11 */ + u8 diff_enbl; /* DWORD 11 */ + u8 u_run; /* DWORD 11 */ + u8 o_run; /* DWORD 11 */ + u8 invld; /* DWORD 11 */ + u8 dsp; /* DWORD 11 */ + u8 dmsg; /* DWORD 11 */ + u8 rsvd4; /* DWORD 11 */ + u8 lt; /* DWORD 11 */ + u8 sge1_addr_hi[32]; /* DWORD 12 */ + u8 sge1_addr_lo[32]; /* DWORD 13 */ + u8 sge1_r2t_offset[24]; /* DWORD 14 */ + u8 rsvd5[7]; /* DWORD 14 */ + u8 sge1_last; /* DWORD 14 */ + u8 sge1_len[17]; /* DWORD 15 */ + u8 rsvd6[15]; /* DWORD 15 */ +} __packed; + + +struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, + struct hwi_wrb_context **pcontext); +void +free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); + +void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task); + +void hwi_ring_cq_db(struct beiscsi_hba *phba, + unsigned int id, unsigned int num_processed, + unsigned char rearm); + +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget); +void beiscsi_process_mcc_cq(struct beiscsi_hba *phba); + +struct pdu_nop_out { + u32 dw[12]; +}; + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_pdu_nop_out { + u8 opcode[6]; /* opcode 0x00 */ + u8 i_bit; /* I Bit */ + u8 x_bit; /* reserved; should be 0 */ + u8 fp_bit_filler1[7]; + u8 f_bit; /* always 1 */ + u8 reserved1[16]; + u8 ahs_length[8]; /* no AHS */ + u8 data_len_hi[8]; + u8 data_len_lo[16]; /* DataSegmentLength */ + u8 lun[64]; + u8 itt[32]; /* initiator id for ping or 0xffffffff */ + u8 ttt[32]; /* target id for ping or 0xffffffff */ + u8 cmd_sn[32]; + u8 exp_stat_sn[32]; + u8 reserved5[128]; +}; + +#define PDUBASE_OPCODE_MASK 0x0000003F +#define PDUBASE_DATALENHI_MASK 0x0000FF00 +#define PDUBASE_DATALENLO_MASK 0xFFFF0000 + +struct pdu_base { + u32 dw[16]; +} __packed; + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +struct amap_pdu_base { + u8 opcode[6]; + u8 i_bit; /* immediate bit */ + u8 x_bit; /* reserved, always 0 */ + u8 reserved1[24]; /* opcode-specific fields */ + u8 ahs_length[8]; /* length units is 4 byte words */ + u8 data_len_hi[8]; + u8 data_len_lo[16]; /* DatasegmentLength */ + u8 lun[64]; /* lun or opcode-specific fields */ + u8 itt[32]; /* initiator task tag */ + u8 reserved4[224]; +}; + +struct iscsi_target_context_update_wrb { + u32 dw[16]; +} __packed; + +/** + * Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field + */ +#define BE_TGT_CTX_UPDT_CMD 0x07 +struct amap_iscsi_target_context_update_wrb { + u8 lun[14]; /* DWORD 0 */ + u8 lt; /* DWORD 0 */ + u8 invld; /* DWORD 0 */ + u8 wrb_idx[8]; /* DWORD 0 */ + u8 dsp; /* DWORD 0 */ + u8 dmsg; /* DWORD 0 */ + u8 undr_run; /* DWORD 0 */ + u8 over_run; /* DWORD 0 */ + u8 type[4]; /* DWORD 0 */ + u8 ptr2nextwrb[8]; /* DWORD 1 */ + u8 max_burst_length[19]; /* DWORD 1 */ + u8 rsvd0[5]; /* DWORD 1 */ + u8 rsvd1[15]; /* DWORD 2 */ + u8 max_send_data_segment_length[17]; /* DWORD 2 */ + u8 first_burst_length[14]; /* DWORD 3 */ + u8 rsvd2[2]; /* DWORD 3 */ + u8 tx_wrbindex_drv_msg[8]; /* DWORD 3 */ + u8 rsvd3[5]; /* DWORD 3 */ + u8 session_state[3]; /* DWORD 3 */ + u8 rsvd4[16]; /* DWORD 4 */ + u8 tx_jumbo; /* DWORD 4 */ + u8 hde; /* DWORD 4 */ + u8 dde; /* DWORD 4 */ + u8 erl[2]; /* DWORD 4 */ + u8 domain_id[5]; /* DWORD 4 */ + u8 mode; /* DWORD 4 */ + u8 imd; /* DWORD 4 */ + u8 ir2t; /* DWORD 4 */ + u8 notpredblq[2]; /* DWORD 4 */ + u8 compltonack; /* DWORD 4 */ + u8 stat_sn[32]; /* DWORD 5 */ + u8 pad_buffer_addr_hi[32]; /* DWORD 6 */ + u8 pad_buffer_addr_lo[32]; /* DWORD 7 */ + u8 pad_addr_hi[32]; /* DWORD 8 */ + u8 pad_addr_lo[32]; /* DWORD 9 */ + u8 rsvd5[32]; /* DWORD 10 */ + u8 rsvd6[32]; /* DWORD 11 */ + u8 rsvd7[32]; /* DWORD 12 */ + u8 rsvd8[32]; /* DWORD 13 */ + u8 rsvd9[32]; /* DWORD 14 */ + u8 rsvd10[32]; /* DWORD 15 */ + +} __packed; + +#define BEISCSI_MAX_RECV_DATASEG_LEN (64 * 1024) +#define BEISCSI_MAX_CXNS 1 +struct amap_iscsi_target_context_update_wrb_v2 { + u8 max_burst_length[24]; /* DWORD 0 */ + u8 rsvd0[3]; /* DWORD 0 */ + u8 type[5]; /* DWORD 0 */ + u8 ptr2nextwrb[8]; /* DWORD 1 */ + u8 wrb_idx[8]; /* DWORD 1 */ + u8 rsvd1[16]; /* DWORD 1 */ + u8 max_send_data_segment_length[24]; /* DWORD 2 */ + u8 rsvd2[8]; /* DWORD 2 */ + u8 first_burst_length[24]; /* DWORD 3 */ + u8 rsvd3[8]; /* DOWRD 3 */ + u8 max_r2t[16]; /* DWORD 4 */ + u8 rsvd4; /* DWORD 4 */ + u8 hde; /* DWORD 4 */ + u8 dde; /* DWORD 4 */ + u8 erl[2]; /* DWORD 4 */ + u8 rsvd5[6]; /* DWORD 4 */ + u8 imd; /* DWORD 4 */ + u8 ir2t; /* DWORD 4 */ + u8 rsvd6[3]; /* DWORD 4 */ + u8 stat_sn[32]; /* DWORD 5 */ + u8 rsvd7[32]; /* DWORD 6 */ + u8 rsvd8[32]; /* DWORD 7 */ + u8 max_recv_dataseg_len[24]; /* DWORD 8 */ + u8 rsvd9[8]; /* DWORD 8 */ + u8 rsvd10[32]; /* DWORD 9 */ + u8 rsvd11[32]; /* DWORD 10 */ + u8 max_cxns[16]; /* DWORD 11 */ + u8 rsvd12[11]; /* DWORD 11*/ + u8 invld; /* DWORD 11 */ + u8 rsvd13;/* DWORD 11*/ + u8 dmsg; /* DWORD 11 */ + u8 data_seq_inorder; /* DWORD 11 */ + u8 pdu_seq_inorder; /* DWORD 11 */ + u8 rsvd14[32]; /*DWORD 12 */ + u8 rsvd15[32]; /* DWORD 13 */ + u8 rsvd16[32]; /* DWORD 14 */ + u8 rsvd17[32]; /* DWORD 15 */ +} __packed; + + +struct be_ring { + u32 pages; /* queue size in pages */ + u32 id; /* queue id assigned by beklib */ + u32 num; /* number of elements in queue */ + u32 cidx; /* consumer index */ + u32 pidx; /* producer index -- not used by most rings */ + u32 item_size; /* size in bytes of one object */ + u8 ulp_num; /* ULP to which CID binded */ + u16 register_set; + u16 doorbell_format; + u32 doorbell_offset; + + void *va; /* The virtual address of the ring. This + * should be last to allow 32 & 64 bit debugger + * extensions to work. + */ +}; + +struct hwi_controller { + struct hwi_wrb_context *wrb_context; + struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; + struct be_ring default_pdu_data[BEISCSI_ULP_COUNT]; + struct hwi_context_memory *phwi_ctxt; +}; + +enum hwh_type_enum { + HWH_TYPE_IO = 1, + HWH_TYPE_LOGOUT = 2, + HWH_TYPE_TMF = 3, + HWH_TYPE_NOP = 4, + HWH_TYPE_IO_RD = 5, + HWH_TYPE_LOGIN = 11, + HWH_TYPE_INVALID = 0xFFFFFFFF +}; + +struct wrb_handle { + unsigned short wrb_index; + struct iscsi_task *pio_handle; + struct iscsi_wrb *pwrb; +}; + +struct hwi_context_memory { + struct be_eq_obj be_eq[MAX_CPUS]; + struct be_queue_info be_cq[MAX_CPUS - 1]; + + struct be_queue_info *be_wrbq; + /** + * Create array of ULP number for below entries as DEFQ + * will be created for both ULP if iSCSI Protocol is + * loaded on both ULP. + */ + struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT]; + struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT]; + struct hd_async_context *pasync_ctx[BEISCSI_ULP_COUNT]; +}; + +void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle); + +/* Logging related definitions */ +#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */ +#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */ +#define BEISCSI_LOG_MISC 0x0004 /* Miscllaneous Events */ +#define BEISCSI_LOG_EH 0x0008 /* Error Handler */ +#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */ +#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */ +#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */ + +#define __beiscsi_log(phba, level, fmt, arg...) \ + shost_printk(level, phba->shost, fmt, __LINE__, ##arg) + +#define beiscsi_log(phba, level, mask, fmt, arg...) \ +do { \ + uint32_t log_value = phba->attr_log_enable; \ + if (((mask) & log_value) || (level[1] <= '3')) \ + __beiscsi_log(phba, level, fmt, ##arg); \ +} while (0); + +#endif diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c new file mode 100644 index 000000000..4e899ec14 --- /dev/null +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -0,0 +1,1559 @@ +/* + * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI + * Host Bus Adapters. Refer to the README file included with this package + * for driver version and adapter compatibility. + * + * Copyright (c) 2018 Broadcom. All Rights Reserved. + * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. ALL EXPRESS + * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY + * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, + * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH + * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package. + * + * Contact Information: + * linux-drivers@broadcom.com + * + */ + +#include +#include +#include +#include "be_mgmt.h" +#include "be_iscsi.h" +#include "be_main.h" + +unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba, + struct bsg_job *job, + struct be_dma_mem *nonemb_cmd) +{ + struct be_mcc_wrb *wrb; + struct be_sge *mcc_sge; + unsigned int tag = 0; + struct iscsi_bsg_request *bsg_req = job->request; + struct be_bsg_vendor_cmd *req = nonemb_cmd->va; + unsigned short region, sector_size, sector, offset; + + nonemb_cmd->size = job->request_payload.payload_len; + memset(nonemb_cmd->va, 0, nonemb_cmd->size); + region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; + sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[4]; + req->region = region; + req->sector = sector; + req->offset = offset; + + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return 0; + switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { + case BEISCSI_WRITE_FLASH: + offset = sector * sector_size + offset; + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_WRITE_FLASH, sizeof(*req)); + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + nonemb_cmd->va + offset, job->request_len); + break; + case BEISCSI_READ_FLASH: + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_READ_FLASH, sizeof(*req)); + break; + default: + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : Unsupported cmd = 0x%x\n\n", + bsg_req->rqst_data.h_vendor.vendor_cmd[0]); + + mutex_unlock(&ctrl->mbox_lock); + return -EPERM; + } + + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + mcc_sge = nonembedded_sgl(wrb); + be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, + job->request_payload.sg_cnt); + mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); + mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); + mcc_sge->len = cpu_to_le32(nonemb_cmd->size); + + be_mcc_notify(phba, tag); + + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + +/** + * mgmt_open_connection()- Establish a TCP CXN + * @phba: driver priv structure + * @dst_addr: Destination Address + * @beiscsi_ep: ptr to device endpoint struct + * @nonemb_cmd: ptr to memory allocated for command + * + * return + * Success: Tag number of the MBX Command issued + * Failure: Error code + **/ +int mgmt_open_connection(struct beiscsi_hba *phba, + struct sockaddr *dst_addr, + struct beiscsi_endpoint *beiscsi_ep, + struct be_dma_mem *nonemb_cmd) +{ + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; + struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct tcp_connect_and_offload_in_v1 *req; + unsigned short def_hdr_id; + unsigned short def_data_id; + struct phys_addr template_address = { 0, 0 }; + struct phys_addr *ptemplate_address; + unsigned int tag = 0; + unsigned int i, ulp_num; + unsigned short cid = beiscsi_ep->ep_cid; + struct be_sge *sge; + + if (dst_addr->sa_family != PF_INET && dst_addr->sa_family != PF_INET6) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BG_%d : unknown addr family %d\n", + dst_addr->sa_family); + return 0; + } + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num; + + def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num); + def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num); + + ptemplate_address = &template_address; + ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return 0; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + sge = nonembedded_sgl(wrb); + req = nonemb_cmd->va; + memset(req, 0, sizeof(*req)); + + be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, + nonemb_cmd->size); + if (dst_addr->sa_family == PF_INET) { + __be32 s_addr = daddr_in->sin_addr.s_addr; + req->ip_address.ip_type = BEISCSI_IP_TYPE_V4; + req->ip_address.addr[0] = s_addr & 0x000000ff; + req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8; + req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16; + req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24; + req->tcp_port = ntohs(daddr_in->sin_port); + beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr; + beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port); + beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V4; + } else { + /* else its PF_INET6 family */ + req->ip_address.ip_type = BEISCSI_IP_TYPE_V6; + memcpy(&req->ip_address.addr, + &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); + req->tcp_port = ntohs(daddr_in6->sin6_port); + beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port); + memcpy(&beiscsi_ep->dst6_addr, + &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); + beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V6; + } + req->cid = cid; + i = phba->nxt_cqid++; + if (phba->nxt_cqid == phba->num_cpus) + phba->nxt_cqid = 0; + req->cq_id = phwi_context->be_cq[i].id; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BG_%d : i=%d cq_id=%d\n", i, req->cq_id); + req->defq_id = def_hdr_id; + req->hdr_ring_id = def_hdr_id; + req->data_ring_id = def_data_id; + req->do_offload = 1; + req->dataout_template_pa.lo = ptemplate_address->lo; + req->dataout_template_pa.hi = ptemplate_address->hi; + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); + sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(nonemb_cmd->size); + + if (!is_chip_be2_be3r(phba)) { + req->hdr.version = MBX_CMD_VER1; + req->tcp_window_size = 0x8000; + req->tcp_window_scale_count = 2; + } + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + +/** + * beiscsi_exec_nemb_cmd()- execute non-embedded MBX cmd + * @phba: driver priv structure + * @nonemb_cmd: DMA address of the MBX command to be issued + * @cbfn: callback func on MCC completion + * @resp_buf: buffer to copy the MBX cmd response + * @resp_buf_len: response length to be copied + * + **/ +static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba, + struct be_dma_mem *nonemb_cmd, + void (*cbfn)(struct beiscsi_hba *, + unsigned int), + void *resp_buf, u32 resp_buf_len) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_sge *sge; + unsigned int tag; + int rc = 0; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return -ENOMEM; + } + + sge = nonembedded_sgl(wrb); + be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); + sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma)); + sge->len = cpu_to_le32(nonemb_cmd->size); + + if (cbfn) { + struct be_dma_mem *tag_mem; + + set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); + ctrl->ptag_state[tag].cbfn = cbfn; + tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; + + /* store DMA mem to be freed in callback */ + tag_mem->size = nonemb_cmd->size; + tag_mem->va = nonemb_cmd->va; + tag_mem->dma = nonemb_cmd->dma; + } + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + + /* with cbfn set, its async cmd, don't wait */ + if (cbfn) + return 0; + + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd); + + /* copy the response, if any */ + if (resp_buf) + memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); + return rc; +} + +static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba, + struct be_dma_mem *cmd, + u8 subsystem, u8 opcode, u32 size) +{ + cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, + GFP_KERNEL); + if (!cmd->va) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BG_%d : Failed to allocate memory for if info\n"); + return -ENOMEM; + } + cmd->size = size; + be_cmd_hdr_prepare(cmd->va, subsystem, opcode, size); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BG_%d : subsystem %u cmd %u size %u\n", + subsystem, opcode, size); + return 0; +} + +static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba, + struct be_dma_mem *cmd, int rc) +{ + /* + * If FW is busy the DMA buffer is saved with the tag. When the cmd + * completes this buffer is freed. + */ + if (rc == -EBUSY) + return; + + dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma); +} + +static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag) +{ + struct be_dma_mem *tag_mem; + + /* status is ignored */ + __beiscsi_mcc_compl_status(phba, tag, NULL, NULL); + tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; + if (tag_mem->size) { + dma_free_coherent(&phba->pcidev->dev, tag_mem->size, + tag_mem->va, tag_mem->dma); + tag_mem->size = 0; + } +} + +int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *set_eqd, int num) +{ + struct be_cmd_req_modify_eq_delay *req; + struct be_dma_mem nonemb_cmd; + int i, rc; + + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); + if (rc) + return rc; + + req = nonemb_cmd.va; + req->num_eq = cpu_to_le32(num); + for (i = 0; i < num; i++) { + req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); + req->delay[i].phase = 0; + req->delay[i].delay_multiplier = + cpu_to_le32(set_eqd[i].delay_multiplier); + } + + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl, + NULL, 0); + if (rc) { + /* + * Only free on failure. Async cmds are handled like -EBUSY + * where it's handled for us. + */ + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + } + return rc; +} + +/** + * beiscsi_get_initiator_name - read initiator name from flash + * @phba: device priv structure + * @name: buffer pointer + * @cfg: fetch user configured + * + */ +int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg) +{ + struct be_dma_mem nonemb_cmd; + struct be_cmd_hba_name resp; + struct be_cmd_hba_name *req; + int rc; + + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_CFG_GET_HBA_NAME, sizeof(resp)); + if (rc) + return rc; + + req = nonemb_cmd.va; + if (cfg) + req->hdr.version = 1; + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, + &resp, sizeof(resp)); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + if (rc) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BS_%d : Initiator Name MBX Failed\n"); + return rc; + } + rc = sprintf(name, "%s\n", resp.initiator_name); + return rc; +} + +unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_get_all_if_id_req *req; + struct be_cmd_get_all_if_id_req *pbe_allid; + unsigned int tag; + int status = 0; + + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return -EINTR; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return -ENOMEM; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID, + sizeof(*req)); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + + status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); + if (status) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : %s failed: %d\n", __func__, status); + return -EBUSY; + } + + pbe_allid = embedded_payload(wrb); + /* we now support only one interface per function */ + phba->interface_handle = pbe_allid->if_hndl_list[0]; + + return status; +} + +static inline bool beiscsi_if_zero_ip(u8 *ip, u32 ip_type) +{ + u32 len; + + len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN; + while (len && !ip[len - 1]) + len--; + return (len == 0); +} + +static int beiscsi_if_mod_gw(struct beiscsi_hba *phba, + u32 action, u32 ip_type, u8 *gw) +{ + struct be_cmd_set_def_gateway_req *req; + struct be_dma_mem nonemb_cmd; + int rt_val; + + rt_val = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY, + sizeof(*req)); + if (rt_val) + return rt_val; + + req = nonemb_cmd.va; + req->action = action; + req->ip_addr.ip_type = ip_type; + memcpy(req->ip_addr.addr, gw, + (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN); + rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val); + return rt_val; +} + +int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw) +{ + struct be_cmd_get_def_gateway_resp gw_resp; + int rt_val; + + memset(&gw_resp, 0, sizeof(gw_resp)); + rt_val = beiscsi_if_get_gw(phba, ip_type, &gw_resp); + if (rt_val) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : Failed to Get Gateway Addr\n"); + return rt_val; + } + + if (!beiscsi_if_zero_ip(gw_resp.ip_addr.addr, ip_type)) { + rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, ip_type, + gw_resp.ip_addr.addr); + if (rt_val) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : Failed to clear Gateway Addr Set\n"); + return rt_val; + } + } + + rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_ADD, ip_type, gw); + if (rt_val) + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : Failed to Set Gateway Addr\n"); + + return rt_val; +} + +int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type, + struct be_cmd_get_def_gateway_resp *resp) +{ + struct be_cmd_get_def_gateway_req *req; + struct be_dma_mem nonemb_cmd; + int rc; + + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY, + sizeof(*resp)); + if (rc) + return rc; + + req = nonemb_cmd.va; + req->ip_type = ip_type; + + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp, + sizeof(*resp)); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + return rc; +} + +static int +beiscsi_if_clr_ip(struct beiscsi_hba *phba, + struct be_cmd_get_if_info_resp *if_info) +{ + struct be_cmd_set_ip_addr_req *req; + struct be_dma_mem nonemb_cmd; + int rc; + + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, + sizeof(*req)); + if (rc) + return rc; + + req = nonemb_cmd.va; + req->ip_params.record_entry_count = 1; + req->ip_params.ip_record.action = IP_ACTION_DEL; + req->ip_params.ip_record.interface_hndl = + phba->interface_handle; + req->ip_params.ip_record.ip_addr.size_of_structure = + sizeof(struct be_ip_addr_subnet_format); + req->ip_params.ip_record.ip_addr.ip_type = if_info->ip_addr.ip_type; + memcpy(req->ip_params.ip_record.ip_addr.addr, + if_info->ip_addr.addr, + sizeof(if_info->ip_addr.addr)); + memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, + if_info->ip_addr.subnet_mask, + sizeof(if_info->ip_addr.subnet_mask)); + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + if (rc < 0 || req->ip_params.ip_record.status) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BG_%d : failed to clear IP: rc %d status %d\n", + rc, req->ip_params.ip_record.status); + } + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + return rc; +} + +static int +beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip, + u8 *subnet, u32 ip_type) +{ + struct be_cmd_set_ip_addr_req *req; + struct be_dma_mem nonemb_cmd; + uint32_t ip_len; + int rc; + + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, + sizeof(*req)); + if (rc) + return rc; + + req = nonemb_cmd.va; + req->ip_params.record_entry_count = 1; + req->ip_params.ip_record.action = IP_ACTION_ADD; + req->ip_params.ip_record.interface_hndl = + phba->interface_handle; + req->ip_params.ip_record.ip_addr.size_of_structure = + sizeof(struct be_ip_addr_subnet_format); + req->ip_params.ip_record.ip_addr.ip_type = ip_type; + ip_len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN; + memcpy(req->ip_params.ip_record.ip_addr.addr, ip, ip_len); + if (subnet) + memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, + subnet, ip_len); + + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + /** + * In some cases, host needs to look into individual record status + * even though FW reported success for that IOCTL. + */ + if (rc < 0 || req->ip_params.ip_record.status) { + __beiscsi_log(phba, KERN_ERR, + "BG_%d : failed to set IP: rc %d status %d\n", + rc, req->ip_params.ip_record.status); + if (req->ip_params.ip_record.status) + rc = -EINVAL; + } + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + return rc; +} + +int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type, + u8 *ip, u8 *subnet) +{ + struct be_cmd_get_if_info_resp *if_info; + struct be_cmd_rel_dhcp_req *reldhcp; + struct be_dma_mem nonemb_cmd; + int rc; + + rc = beiscsi_if_get_info(phba, ip_type, &if_info); + if (rc) + return rc; + + if (if_info->dhcp_state) { + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, + CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, + sizeof(*reldhcp)); + if (rc) + goto exit; + + reldhcp = nonemb_cmd.va; + reldhcp->interface_hndl = phba->interface_handle; + reldhcp->ip_type = ip_type; + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + if (rc < 0) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : failed to release existing DHCP: %d\n", + rc); + goto exit; + } + } + + /* first delete any IP set */ + if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) { + rc = beiscsi_if_clr_ip(phba, if_info); + if (rc) + goto exit; + } + + /* if ip == NULL then this is called just to release DHCP IP */ + if (ip) + rc = beiscsi_if_set_ip(phba, ip, subnet, ip_type); +exit: + kfree(if_info); + return rc; +} + +int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type) +{ + struct be_cmd_get_def_gateway_resp gw_resp; + struct be_cmd_get_if_info_resp *if_info; + struct be_cmd_set_dhcp_req *dhcpreq; + struct be_dma_mem nonemb_cmd; + u8 *gw; + int rc; + + rc = beiscsi_if_get_info(phba, ip_type, &if_info); + if (rc) + return rc; + + if (if_info->dhcp_state) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : DHCP Already Enabled\n"); + goto exit; + } + + /* first delete any IP set */ + if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) { + rc = beiscsi_if_clr_ip(phba, if_info); + if (rc) + goto exit; + } + + /* delete gateway settings if mode change is to DHCP */ + memset(&gw_resp, 0, sizeof(gw_resp)); + /* use ip_type provided in if_info */ + rc = beiscsi_if_get_gw(phba, if_info->ip_addr.ip_type, &gw_resp); + if (rc) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : Failed to Get Gateway Addr\n"); + goto exit; + } + gw = (u8 *)&gw_resp.ip_addr.addr; + if (!beiscsi_if_zero_ip(gw, if_info->ip_addr.ip_type)) { + rc = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, + if_info->ip_addr.ip_type, gw); + if (rc) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BG_%d : Failed to clear Gateway Addr Set\n"); + goto exit; + } + } + + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR, + sizeof(*dhcpreq)); + if (rc) + goto exit; + + dhcpreq = nonemb_cmd.va; + dhcpreq->flags = 1; /* 1 - blocking; 0 - non-blocking */ + dhcpreq->retry_count = 1; + dhcpreq->interface_hndl = phba->interface_handle; + dhcpreq->ip_type = ip_type; + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); +exit: + kfree(if_info); + return rc; +} + +/** + * beiscsi_if_set_vlan()- Issue and wait for CMD completion + * @phba: device private structure instance + * @vlan_tag: VLAN tag + * + * Issue the MBX Cmd and wait for the completion of the + * command. + * + * returns + * Success: 0 + * Failure: Non-Xero Value + **/ +int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag) +{ + int rc; + unsigned int tag; + + tag = be_cmd_set_vlan(phba, vlan_tag); + if (!tag) { + beiscsi_log(phba, KERN_ERR, + (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), + "BG_%d : VLAN Setting Failed\n"); + return -EBUSY; + } + + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); + if (rc) { + beiscsi_log(phba, KERN_ERR, + (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), + "BS_%d : VLAN MBX Cmd Failed\n"); + return rc; + } + return rc; +} + + +int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type, + struct be_cmd_get_if_info_resp **if_info) +{ + struct be_cmd_get_if_info_req *req; + struct be_dma_mem nonemb_cmd; + uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp); + int rc; + + rc = beiscsi_if_get_handle(phba); + if (rc) + return rc; + + do { + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, + CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, + ioctl_size); + if (rc) + return rc; + + req = nonemb_cmd.va; + req->interface_hndl = phba->interface_handle; + req->ip_type = ip_type; + + /* Allocate memory for if_info */ + *if_info = kzalloc(ioctl_size, GFP_KERNEL); + if (!*if_info) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BG_%d : Memory Allocation Failure\n"); + + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, + -ENOMEM); + return -ENOMEM; + } + + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, *if_info, + ioctl_size); + + /* Check if the error is because of Insufficent_Buffer */ + if (rc == -EAGAIN) { + + /* Get the new memory size */ + ioctl_size = ((struct be_cmd_resp_hdr *) + nonemb_cmd.va)->actual_resp_len; + ioctl_size += sizeof(struct be_cmd_req_hdr); + + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + /* Free the virtual memory */ + kfree(*if_info); + } else { + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + break; + } + } while (true); + return rc; +} + +int mgmt_get_nic_conf(struct beiscsi_hba *phba, + struct be_cmd_get_nic_conf_resp *nic) +{ + struct be_dma_mem nonemb_cmd; + int rc; + + rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, + sizeof(*nic)); + if (rc) + return rc; + + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic)); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + return rc; +} + +static void beiscsi_boot_process_compl(struct beiscsi_hba *phba, + unsigned int tag) +{ + struct be_cmd_get_boot_target_resp *boot_resp; + struct be_cmd_resp_logout_fw_sess *logo_resp; + struct be_cmd_get_session_resp *sess_resp; + struct be_mcc_wrb *wrb; + struct boot_struct *bs; + int boot_work, status; + + if (!test_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) { + __beiscsi_log(phba, KERN_ERR, + "BG_%d : %s no boot work %lx\n", + __func__, phba->state); + return; + } + + if (phba->boot_struct.tag != tag) { + __beiscsi_log(phba, KERN_ERR, + "BG_%d : %s tag mismatch %d:%d\n", + __func__, tag, phba->boot_struct.tag); + return; + } + bs = &phba->boot_struct; + boot_work = 1; + status = 0; + switch (bs->action) { + case BEISCSI_BOOT_REOPEN_SESS: + status = __beiscsi_mcc_compl_status(phba, tag, NULL, NULL); + if (!status) + bs->action = BEISCSI_BOOT_GET_SHANDLE; + else + bs->retry--; + break; + case BEISCSI_BOOT_GET_SHANDLE: + status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL); + if (!status) { + boot_resp = embedded_payload(wrb); + bs->s_handle = boot_resp->boot_session_handle; + } + if (bs->s_handle == BE_BOOT_INVALID_SHANDLE) { + bs->action = BEISCSI_BOOT_REOPEN_SESS; + bs->retry--; + } else { + bs->action = BEISCSI_BOOT_GET_SINFO; + } + break; + case BEISCSI_BOOT_GET_SINFO: + status = __beiscsi_mcc_compl_status(phba, tag, NULL, + &bs->nonemb_cmd); + if (!status) { + sess_resp = bs->nonemb_cmd.va; + memcpy(&bs->boot_sess, &sess_resp->session_info, + sizeof(struct mgmt_session_info)); + bs->action = BEISCSI_BOOT_LOGOUT_SESS; + } else { + __beiscsi_log(phba, KERN_ERR, + "BG_%d : get boot session info error : 0x%x\n", + status); + boot_work = 0; + } + dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size, + bs->nonemb_cmd.va, bs->nonemb_cmd.dma); + bs->nonemb_cmd.va = NULL; + break; + case BEISCSI_BOOT_LOGOUT_SESS: + status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL); + if (!status) { + logo_resp = embedded_payload(wrb); + if (logo_resp->session_status != BE_SESS_STATUS_CLOSE) { + __beiscsi_log(phba, KERN_ERR, + "BG_%d : FW boot session logout error : 0x%x\n", + logo_resp->session_status); + } + } + /* continue to create boot_kset even if logout failed? */ + bs->action = BEISCSI_BOOT_CREATE_KSET; + break; + default: + break; + } + + /* clear the tag so no other completion matches this tag */ + bs->tag = 0; + if (!bs->retry) { + boot_work = 0; + __beiscsi_log(phba, KERN_ERR, + "BG_%d : failed to setup boot target: status %d action %d\n", + status, bs->action); + } + if (!boot_work) { + /* wait for next event to start boot_work */ + clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); + return; + } + schedule_work(&phba->boot_work); +} + +/** + * beiscsi_boot_logout_sess()- Logout from boot FW session + * @phba: Device priv structure instance + * + * return + * the TAG used for MBOX Command + * + */ +unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_req_logout_fw_sess *req; + unsigned int tag; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET, + sizeof(struct be_cmd_req_logout_fw_sess)); + /* Use the session handle copied into boot_sess */ + req->session_handle = phba->boot_struct.boot_sess.session_handle; + + phba->boot_struct.tag = tag; + set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); + ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + + return tag; +} +/** + * beiscsi_boot_reopen_sess()- Reopen boot session + * @phba: Device priv structure instance + * + * return + * the TAG used for MBOX Command + * + **/ +unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_reopen_session_req *req; + unsigned int tag; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS, + sizeof(struct be_cmd_reopen_session_resp)); + req->reopen_type = BE_REOPEN_BOOT_SESSIONS; + req->session_handle = BE_BOOT_INVALID_SHANDLE; + + phba->boot_struct.tag = tag; + set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); + ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + + +/** + * beiscsi_boot_get_sinfo()- Get boot session info + * @phba: device priv structure instance + * + * Fetches the boot_struct.s_handle info from FW. + * return + * the TAG used for MBOX Command + * + **/ +unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_cmd_get_session_req *req; + struct be_dma_mem *nonemb_cmd; + struct be_mcc_wrb *wrb; + struct be_sge *sge; + unsigned int tag; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + nonemb_cmd = &phba->boot_struct.nonemb_cmd; + nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp); + nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, + nonemb_cmd->size, + &nonemb_cmd->dma, + GFP_KERNEL); + if (!nonemb_cmd->va) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = nonemb_cmd->va; + memset(req, 0, sizeof(*req)); + sge = nonembedded_sgl(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_SESSION_GET_A_SESSION, + sizeof(struct be_cmd_get_session_resp)); + req->session_handle = phba->boot_struct.s_handle; + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); + sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(nonemb_cmd->size); + + phba->boot_struct.tag = tag; + set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); + ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + +unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_get_boot_target_req *req; + unsigned int tag; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET, + sizeof(struct be_cmd_get_boot_target_resp)); + + if (async) { + phba->boot_struct.tag = tag; + set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); + ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; + } + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + +/** + * beiscsi_boot_get_shandle()- Get boot session handle + * @phba: device priv structure instance + * @s_handle: session handle returned for boot session. + * + * return + * Success: 1 + * Failure: negative + * + **/ +int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle) +{ + struct be_cmd_get_boot_target_resp *boot_resp; + struct be_mcc_wrb *wrb; + unsigned int tag; + int rc; + + *s_handle = BE_BOOT_INVALID_SHANDLE; + /* get configured boot session count and handle */ + tag = __beiscsi_boot_get_shandle(phba, 0); + if (!tag) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, + "BG_%d : Getting Boot Target Info Failed\n"); + return -EAGAIN; + } + + rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); + if (rc) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BG_%d : MBX CMD get_boot_target Failed\n"); + return -EBUSY; + } + + boot_resp = embedded_payload(wrb); + /* check if there are any boot targets configured */ + if (!boot_resp->boot_session_count) { + __beiscsi_log(phba, KERN_INFO, + "BG_%d : No boot targets configured\n"); + return -ENXIO; + } + + /* only if FW has logged in to the boot target, s_handle is valid */ + *s_handle = boot_resp->boot_session_handle; + return 1; +} + +/** + * beiscsi_drvr_ver_disp()- Display the driver Name and Version + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text driver name and version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, BE_NAME "\n"); +} + +/** + * beiscsi_fw_ver_disp()- Display Firmware Version + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Firmware version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); +} + +/** + * beiscsi_active_session_disp()- Display Sessions Active + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num); + total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num); + len += scnprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, + (total_cids - avlbl_cids)); + } else + len += scnprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, 0); + } + + return len; +} + +/** + * beiscsi_free_session_disp()- Display Avaliable Session + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint16_t ulp_num, len = 0; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) + len += scnprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, + BEISCSI_ULP_AVLBL_CID(phba, ulp_num)); + else + len += scnprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, 0); + } + + return len; +} + +/** + * beiscsi_adap_family_disp()- Display adapter family. + * @dev: ptr to device to get priv structure + * @attr: device attribute, not used. + * @buf: contains formatted text driver name and version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + uint16_t dev_id = 0; + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + dev_id = phba->pcidev->device; + switch (dev_id) { + case BE_DEVICE_ID1: + case OC_DEVICE_ID1: + case OC_DEVICE_ID2: + return snprintf(buf, PAGE_SIZE, + "Obsolete/Unsupported BE2 Adapter Family\n"); + case BE_DEVICE_ID2: + case OC_DEVICE_ID3: + return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n"); + case OC_SKH_ID1: + return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n"); + default: + return snprintf(buf, PAGE_SIZE, + "Unknown Adapter Family: 0x%x\n", dev_id); + } +} + +/** + * beiscsi_phys_port_disp()- Display Physical Port Identifier + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text port identifier + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "Port Identifier : %u\n", + phba->fw_config.phys_port); +} + +void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, + struct wrb_handle *pwrb_handle, + struct be_mem_descriptor *mem_descr, + struct hwi_wrb_context *pwrb_context) +{ + struct iscsi_wrb *pwrb = pwrb_handle->pwrb; + + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, + max_send_data_segment_length, pwrb, + params->dw[offsetof(struct amap_beiscsi_offload_params, + max_send_data_segment_length) / 32]); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb, + BE_TGT_CTX_UPDT_CMD); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, + first_burst_length, + pwrb, + params->dw[offsetof(struct amap_beiscsi_offload_params, + first_burst_length) / 32]); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + erl) / 32] & OFFLD_PARAMS_ERL)); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + dde) / 32] & OFFLD_PARAMS_DDE) >> 2); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + hde) / 32] & OFFLD_PARAMS_HDE) >> 3); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + imd) / 32] & OFFLD_PARAMS_IMD) >> 5); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn, + pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + exp_statsn) / 32] + 1)); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx, + pwrb, pwrb_handle->wrb_index); + + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, + max_burst_length, pwrb, params->dw[offsetof + (struct amap_beiscsi_offload_params, + max_burst_length) / 32]); + + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb, + pwrb, pwrb_handle->wrb_index); + if (pwrb_context->plast_wrb) + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, + ptr2nextwrb, + pwrb_context->plast_wrb, + pwrb_handle->wrb_index); + pwrb_context->plast_wrb = pwrb; + + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, + session_state, pwrb, 0); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack, + pwrb, 1); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq, + pwrb, 0); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb, + 0); + + mem_descr += ISCSI_MEM_GLOBAL_HEADER; + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, + pad_buffer_addr_hi, pwrb, + mem_descr->mem_array[0].bus_address.u.a32.address_hi); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, + pad_buffer_addr_lo, pwrb, + mem_descr->mem_array[0].bus_address.u.a32.address_lo); +} + +void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, + struct wrb_handle *pwrb_handle, + struct hwi_wrb_context *pwrb_context) +{ + struct iscsi_wrb *pwrb = pwrb_handle->pwrb; + + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + max_burst_length, pwrb, params->dw[offsetof + (struct amap_beiscsi_offload_params, + max_burst_length) / 32]); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + type, pwrb, + BE_TGT_CTX_UPDT_CMD); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + ptr2nextwrb, + pwrb, pwrb_handle->wrb_index); + if (pwrb_context->plast_wrb) + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + ptr2nextwrb, + pwrb_context->plast_wrb, + pwrb_handle->wrb_index); + pwrb_context->plast_wrb = pwrb; + + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, wrb_idx, + pwrb, pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + max_send_data_segment_length, pwrb, + params->dw[offsetof(struct amap_beiscsi_offload_params, + max_send_data_segment_length) / 32]); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + first_burst_length, pwrb, + params->dw[offsetof(struct amap_beiscsi_offload_params, + first_burst_length) / 32]); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + max_recv_dataseg_len, pwrb, + params->dw[offsetof(struct amap_beiscsi_offload_params, + max_recv_data_segment_length) / 32]); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + max_cxns, pwrb, BEISCSI_MAX_CXNS); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + erl) / 32] & OFFLD_PARAMS_ERL)); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, dde, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + dde) / 32] & OFFLD_PARAMS_DDE) >> 2); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, hde, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + hde) / 32] & OFFLD_PARAMS_HDE) >> 3); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + ir2t, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, imd, pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + imd) / 32] & OFFLD_PARAMS_IMD) >> 5); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + data_seq_inorder, + pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + data_seq_inorder) / 32] & + OFFLD_PARAMS_DATA_SEQ_INORDER) >> 6); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, + pdu_seq_inorder, + pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + pdu_seq_inorder) / 32] & + OFFLD_PARAMS_PDU_SEQ_INORDER) >> 7); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_r2t, + pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + max_r2t) / 32] & + OFFLD_PARAMS_MAX_R2T) >> 8); + AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, stat_sn, + pwrb, + (params->dw[offsetof(struct amap_beiscsi_offload_params, + exp_statsn) / 32] + 1)); +} + +unsigned int beiscsi_invalidate_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep) +{ + struct be_invalidate_connection_params_in *req; + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + unsigned int tag = 0; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(union be_invalidate_connection_params), + true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION, + sizeof(*req)); + req->session_handle = beiscsi_ep->fw_handle; + req->cid = beiscsi_ep->ep_cid; + if (beiscsi_ep->conn) + req->cleanup_type = BE_CLEANUP_TYPE_INVALIDATE; + else + req->cleanup_type = BE_CLEANUP_TYPE_ISSUE_TCP_RST; + /** + * 0 - non-persistent targets + * 1 - save session info on flash + */ + req->save_cfg = 0; + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + +unsigned int beiscsi_upload_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_tcp_upload_params_in *req; + unsigned int tag; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(union be_tcp_upload_params), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, + OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); + req->id = beiscsi_ep->ep_cid; + if (beiscsi_ep->conn) + req->upload_type = BE_UPLOAD_TYPE_GRACEFUL; + else + req->upload_type = BE_UPLOAD_TYPE_ABORT; + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + +int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, + struct invldt_cmd_tbl *inv_tbl, + unsigned int nents) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct invldt_cmds_params_in *req; + struct be_dma_mem nonemb_cmd; + struct be_mcc_wrb *wrb; + unsigned int i, tag; + struct be_sge *sge; + int rc; + + if (!nents || nents > BE_INVLDT_CMD_TBL_SZ) + return -EINVAL; + + nonemb_cmd.size = sizeof(union be_invldt_cmds_params); + nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, + nonemb_cmd.size, &nonemb_cmd.dma, + GFP_KERNEL); + if (!nonemb_cmd.va) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, + "BM_%d : invldt_cmds_params alloc failed\n"); + return -ENOMEM; + } + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return -ENOMEM; + } + + req = nonemb_cmd.va; + be_wrb_hdr_prepare(wrb, nonemb_cmd.size, false, 1); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS, + sizeof(*req)); + req->ref_handle = 0; + req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; + for (i = 0; i < nents; i++) { + req->table[i].icd = inv_tbl[i].icd; + req->table[i].cid = inv_tbl[i].cid; + req->icd_count++; + } + sge = nonembedded_sgl(wrb); + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); + sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd.dma)); + sge->len = cpu_to_le32(nonemb_cmd.size); + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return rc; +} diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h new file mode 100644 index 000000000..d10858828 --- /dev/null +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. + * + * Contact Information: + * linux-drivers@broadcom.com + */ + +#ifndef _BEISCSI_MGMT_ +#define _BEISCSI_MGMT_ + +#include +#include "be_iscsi.h" +#include "be_main.h" + +#define IP_ACTION_ADD 0x01 +#define IP_ACTION_DEL 0x02 + +#define IP_V6_LEN 16 +#define IP_V4_LEN 4 + +/* UE Status and Mask register */ +#define PCICFG_UE_STATUS_LOW 0xA0 +#define PCICFG_UE_STATUS_HIGH 0xA4 +#define PCICFG_UE_STATUS_MASK_LOW 0xA8 +#define PCICFG_UE_STATUS_MASK_HI 0xAC + +int mgmt_open_connection(struct beiscsi_hba *phba, + struct sockaddr *dst_addr, + struct beiscsi_endpoint *beiscsi_ep, + struct be_dma_mem *nonemb_cmd); + +unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba, + struct bsg_job *job, + struct be_dma_mem *nonemb_cmd); + +#define BE_INVLDT_CMD_TBL_SZ 128 +struct invldt_cmd_tbl { + unsigned short icd; + unsigned short cid; +} __packed; + +struct invldt_cmds_params_in { + struct be_cmd_req_hdr hdr; + unsigned int ref_handle; + unsigned int icd_count; + struct invldt_cmd_tbl table[BE_INVLDT_CMD_TBL_SZ]; + unsigned short cleanup_type; + unsigned short unused; +} __packed; + +struct invldt_cmds_params_out { + struct be_cmd_resp_hdr hdr; + unsigned int ref_handle; + unsigned int icd_count; + unsigned int icd_status[BE_INVLDT_CMD_TBL_SZ]; +} __packed; + +union be_invldt_cmds_params { + struct invldt_cmds_params_in request; + struct invldt_cmds_params_out response; +} __packed; + +struct mgmt_hba_attributes { + u8 flashrom_version_string[BEISCSI_VER_STRLEN]; + u8 manufacturer_name[BEISCSI_VER_STRLEN]; + u32 supported_modes; + u8 seeprom_version_lo; + u8 seeprom_version_hi; + u8 rsvd0[2]; + u32 fw_cmd_data_struct_version; + u32 ep_fw_data_struct_version; + u8 ncsi_version_string[12]; + u32 default_extended_timeout; + u8 controller_model_number[BEISCSI_VER_STRLEN]; + u8 controller_description[64]; + u8 controller_serial_number[BEISCSI_VER_STRLEN]; + u8 ip_version_string[BEISCSI_VER_STRLEN]; + u8 firmware_version_string[BEISCSI_VER_STRLEN]; + u8 bios_version_string[BEISCSI_VER_STRLEN]; + u8 redboot_version_string[BEISCSI_VER_STRLEN]; + u8 driver_version_string[BEISCSI_VER_STRLEN]; + u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN]; + u32 functionalities_supported; + u16 max_cdblength; + u8 asic_revision; + u8 generational_guid[16]; + u8 hba_port_count; + u16 default_link_down_timeout; + u8 iscsi_ver_min_max; + u8 multifunction_device; + u8 cache_valid; + u8 hba_status; + u8 max_domains_supported; + u8 phy_port; + u32 firmware_post_status; + u32 hba_mtu[8]; + u8 iscsi_features; + u8 asic_generation; + u8 future_u8[2]; + u32 future_u32[3]; +} __packed; + +struct mgmt_controller_attributes { + struct mgmt_hba_attributes hba_attribs; + u16 pci_vendor_id; + u16 pci_device_id; + u16 pci_sub_vendor_id; + u16 pci_sub_system_id; + u8 pci_bus_number; + u8 pci_device_number; + u8 pci_function_number; + u8 interface_type; + u64 unique_identifier; + u8 netfilters; + u8 rsvd0[3]; + u32 future_u32[4]; +} __packed; + +struct be_mgmt_controller_attributes { + struct be_cmd_req_hdr hdr; + struct mgmt_controller_attributes params; +} __packed; + +struct be_mgmt_controller_attributes_resp { + struct be_cmd_resp_hdr hdr; + struct mgmt_controller_attributes params; +} __packed; + +struct be_bsg_vendor_cmd { + struct be_cmd_req_hdr hdr; + unsigned short region; + unsigned short offset; + unsigned short sector; +} __packed; + +/* configuration management */ + +#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws) + +#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\ + pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\ + bus_address.u.a32.address_lo; \ + pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\ + bus_address.u.a32.address_hi; \ +} + +#define BEISCSI_WRITE_FLASH 0 +#define BEISCSI_READ_FLASH 1 + +struct beiscsi_endpoint { + struct beiscsi_hba *phba; + struct beiscsi_conn *conn; + struct iscsi_endpoint *openiscsi_ep; + unsigned short ip_type; + char dst6_addr[ISCSI_ADDRESS_BUF_LEN]; + unsigned long dst_addr; + unsigned short ep_cid; + unsigned int fw_handle; + u16 dst_tcpport; + u16 cid_vld; +}; + +int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, + struct invldt_cmd_tbl *inv_tbl, + unsigned int nents); + +int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg); + +int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type); + +int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type, + u8 *ip, u8 *subnet); + +int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw); + +int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type, + struct be_cmd_get_def_gateway_resp *resp); + +int mgmt_get_nic_conf(struct beiscsi_hba *phba, + struct be_cmd_get_nic_conf_resp *mac); + +int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type, + struct be_cmd_get_if_info_resp **if_info); + +unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba); + +int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); + +unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba); + +unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba); + +unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba); + +unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async); + +int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle); + +ssize_t beiscsi_drvr_ver_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_fw_ver_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_active_session_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_adap_family_disp(struct device *dev, + struct device_attribute *attr, char *buf); + + +ssize_t beiscsi_free_session_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_phys_port_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, + struct wrb_handle *pwrb_handle, + struct be_mem_descriptor *mem_descr, + struct hwi_wrb_context *pwrb_context); + +void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, + struct wrb_handle *pwrb_handle, + struct hwi_wrb_context *pwrb_context); + +unsigned int beiscsi_invalidate_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep); + +unsigned int beiscsi_upload_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep); + +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *, int num); + +int beiscsi_logout_fw_sess(struct beiscsi_hba *phba, + uint32_t fw_sess_handle); + +#endif diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile new file mode 100644 index 000000000..442fc3db8 --- /dev/null +++ b/drivers/scsi/bfa/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_SCSI_BFA_FC) := bfa.o + +bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o +bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o +bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o +bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h new file mode 100644 index 000000000..7bd2ba1ad --- /dev/null +++ b/drivers/scsi/bfa/bfa.h @@ -0,0 +1,440 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ +#ifndef __BFA_H__ +#define __BFA_H__ + +#include "bfad_drv.h" +#include "bfa_cs.h" +#include "bfa_plog.h" +#include "bfa_defs_svc.h" +#include "bfi.h" +#include "bfa_ioc.h" + +struct bfa_s; + +typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); +typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status); + +/* + * Interrupt message handlers + */ +void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); + +/* + * Request and response queue related defines + */ +#define BFA_REQQ_NELEMS_MIN (4) +#define BFA_RSPQ_NELEMS_MIN (4) + +#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq]) +#define bfa_reqq_ci(__bfa, __reqq) \ + (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)) + +#define bfa_reqq_full(__bfa, __reqq) \ + (((bfa_reqq_pi(__bfa, __reqq) + 1) & \ + ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \ + bfa_reqq_ci(__bfa, __reqq)) + +#define bfa_reqq_next(__bfa, __reqq) \ + (bfa_reqq_full(__bfa, __reqq) ? NULL : \ + ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \ + + bfa_reqq_pi((__bfa), (__reqq))))) + +#define bfa_reqq_produce(__bfa, __reqq, __mh) do { \ + (__mh).mtag.h2i.qid = (__bfa)->iocfc.hw_qid[__reqq];\ + (__bfa)->iocfc.req_cq_pi[__reqq]++; \ + (__bfa)->iocfc.req_cq_pi[__reqq] &= \ + ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ + writel((__bfa)->iocfc.req_cq_pi[__reqq], \ + (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \ + } while (0) + +#define bfa_rspq_pi(__bfa, __rspq) \ + (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)) + +#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq]) +#define bfa_rspq_elem(__bfa, __rspq, __ci) \ + (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]) + +#define CQ_INCR(__index, __size) do { \ + (__index)++; \ + (__index) &= ((__size) - 1); \ +} while (0) + +/* + * Circular queue usage assignments + */ +enum { + BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */ + BFA_REQQ_FCXP = 0, /* all FCXP messages */ + BFA_REQQ_LPS = 0, /* all lport service msgs */ + BFA_REQQ_PORT = 0, /* all port messages */ + BFA_REQQ_FLASH = 0, /* for flash module */ + BFA_REQQ_DIAG = 0, /* for diag module */ + BFA_REQQ_RPORT = 0, /* all port messages */ + BFA_REQQ_SBOOT = 0, /* all san boot messages */ + BFA_REQQ_QOS_LO = 1, /* all low priority IO */ + BFA_REQQ_QOS_MD = 2, /* all medium priority IO */ + BFA_REQQ_QOS_HI = 3, /* all high priority IO */ +}; + +static inline void +bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), + void *cbarg) +{ + wqe->qresume = qresume; + wqe->cbarg = cbarg; +} + +#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) + +/* + * static inline void + * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe) + */ +#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \ + \ + struct list_head *waitq = bfa_reqq(__bfa, __reqq); \ + \ + WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS)); \ + WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg)); \ + \ + list_add_tail(&(__wqe)->qe, waitq); \ + } while (0) + +#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) + +#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ + (__hcb_qe)->cbfn = (__cbfn); \ + (__hcb_qe)->cbarg = (__cbarg); \ + (__hcb_qe)->pre_rmv = BFA_FALSE; \ + list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ + } while (0) + +#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe) + +#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ + (__hcb_qe)->cbfn = (__cbfn); \ + (__hcb_qe)->cbarg = (__cbarg); \ + if (!(__hcb_qe)->once) { \ + list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ + (__hcb_qe)->once = BFA_TRUE; \ + } \ + } while (0) + +#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \ + (__hcb_qe)->fw_status = (__status); \ + list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ +} while (0) + +#define bfa_cb_queue_done(__hcb_qe) do { \ + (__hcb_qe)->once = BFA_FALSE; \ + } while (0) + + +/* + * PCI devices supported by the current BFA + */ +struct bfa_pciid_s { + u16 device_id; + u16 vendor_id; +}; + +extern char bfa_version[]; + +struct bfa_iocfc_regs_s { + void __iomem *intr_status; + void __iomem *intr_mask; + void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS]; + void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS]; + void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS]; + void __iomem *rme_q_ci[BFI_IOC_MAX_CQS]; + void __iomem *rme_q_pi[BFI_IOC_MAX_CQS]; + void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS]; +}; + +/* + * MSIX vector handlers + */ +#define BFA_MSIX_MAX_VECTORS 22 +typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec); +struct bfa_msix_s { + int nvecs; + bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS]; +}; + +/* + * Chip specific interfaces + */ +struct bfa_hwif_s { + void (*hw_reginit)(struct bfa_s *bfa); + void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); + void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci); + void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); + void (*hw_msix_ctrl_install)(struct bfa_s *bfa); + void (*hw_msix_queue_install)(struct bfa_s *bfa); + void (*hw_msix_uninstall)(struct bfa_s *bfa); + void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); + void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, + u32 *nvecs, u32 *maxvec); + void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start, + u32 *end); + int cpe_vec_q0; + int rme_vec_q0; +}; +typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); + +struct bfa_faa_cbfn_s { + bfa_cb_iocfc_t faa_cbfn; + void *faa_cbarg; +}; + +#define BFA_FAA_ENABLED 1 +#define BFA_FAA_DISABLED 2 + +/* + * FAA attributes + */ +struct bfa_faa_attr_s { + wwn_t faa; + u8 faa_state; + u8 pwwn_source; + u8 rsvd[6]; +}; + +struct bfa_faa_args_s { + struct bfa_faa_attr_s *faa_attr; + struct bfa_faa_cbfn_s faa_cb; + u8 faa_state; + bfa_boolean_t busy; +}; + +struct bfa_iocfc_s { + bfa_fsm_t fsm; + struct bfa_s *bfa; + struct bfa_iocfc_cfg_s cfg; + u32 req_cq_pi[BFI_IOC_MAX_CQS]; + u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; + u8 hw_qid[BFI_IOC_MAX_CQS]; + struct bfa_cb_qe_s init_hcb_qe; + struct bfa_cb_qe_s stop_hcb_qe; + struct bfa_cb_qe_s dis_hcb_qe; + struct bfa_cb_qe_s en_hcb_qe; + struct bfa_cb_qe_s stats_hcb_qe; + bfa_boolean_t submod_enabled; + bfa_boolean_t cb_reqd; /* Driver call back reqd */ + bfa_status_t op_status; /* Status of bfa iocfc op */ + + struct bfa_dma_s cfg_info; + struct bfi_iocfc_cfg_s *cfginfo; + struct bfa_dma_s cfgrsp_dma; + struct bfi_iocfc_cfgrsp_s *cfgrsp; + struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS]; + struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS]; + struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS]; + struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS]; + struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */ + struct bfa_hwif_s hwif; + bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */ + void *updateq_cbarg; /* bios callback arg */ + u32 intr_mask; + struct bfa_faa_args_s faa_args; + struct bfa_mem_dma_s ioc_dma; + struct bfa_mem_dma_s iocfc_dma; + struct bfa_mem_dma_s reqq_dma[BFI_IOC_MAX_CQS]; + struct bfa_mem_dma_s rspq_dma[BFI_IOC_MAX_CQS]; + struct bfa_mem_kva_s kva_seg; +}; + +#define BFA_MEM_IOC_DMA(_bfa) (&((_bfa)->iocfc.ioc_dma)) +#define BFA_MEM_IOCFC_DMA(_bfa) (&((_bfa)->iocfc.iocfc_dma)) +#define BFA_MEM_REQQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.reqq_dma[(_qno)])) +#define BFA_MEM_RSPQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.rspq_dma[(_qno)])) +#define BFA_MEM_IOCFC_KVA(_bfa) (&((_bfa)->iocfc.kva_seg)) + +#define bfa_fn_lpu(__bfa) \ + bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc)) +#define bfa_msix_init(__bfa, __nvecs) \ + ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) +#define bfa_msix_ctrl_install(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa)) +#define bfa_msix_queue_install(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa)) +#define bfa_msix_uninstall(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) +#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \ + ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci)) +#define bfa_isr_reqq_ack(__bfa, __queue) do { \ + if ((__bfa)->iocfc.hwif.hw_reqq_ack) \ + (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \ +} while (0) +#define bfa_isr_mode_set(__bfa, __msix) do { \ + if ((__bfa)->iocfc.hwif.hw_isr_mode_set) \ + (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix); \ +} while (0) +#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ + ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \ + __nvecs, __maxvec)) +#define bfa_msix_get_rme_range(__bfa, __start, __end) \ + ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end)) +#define bfa_msix(__bfa, __vec) \ + ((__bfa)->msix.handler[__vec](__bfa, __vec)) + +/* + * FC specific IOC functions. + */ +void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa); +void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, + struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev); +void bfa_iocfc_init(struct bfa_s *bfa); +void bfa_iocfc_start(struct bfa_s *bfa); +void bfa_iocfc_stop(struct bfa_s *bfa); +void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg); +void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa); +bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa); +void bfa_iocfc_reset_queues(struct bfa_s *bfa); + +void bfa_msix_all(struct bfa_s *bfa, int vec); +void bfa_msix_reqq(struct bfa_s *bfa, int vec); +void bfa_msix_rspq(struct bfa_s *bfa, int vec); +void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); + +void bfa_hwcb_reginit(struct bfa_s *bfa); +void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); +void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); +void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa); +void bfa_hwcb_msix_queue_install(struct bfa_s *bfa); +void bfa_hwcb_msix_uninstall(struct bfa_s *bfa); +void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); +void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, + u32 *maxvec); +void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, + u32 *end); +void bfa_hwct_reginit(struct bfa_s *bfa); +void bfa_hwct2_reginit(struct bfa_s *bfa); +void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); +void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); +void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); +void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); +void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa); +void bfa_hwct_msix_queue_install(struct bfa_s *bfa); +void bfa_hwct_msix_uninstall(struct bfa_s *bfa); +void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); +void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, + u32 *maxvec); +void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, + u32 *end); +void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); +int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, + struct bfi_pbc_vport_s *pbc_vport); + + +/* + *---------------------------------------------------------------------- + * BFA public interfaces + *---------------------------------------------------------------------- + */ +#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++) +#define bfa_ioc_get_stats(__bfa, __ioc_stats) \ + bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) +#define bfa_ioc_clear_stats(__bfa) \ + bfa_ioc_clr_stats(&(__bfa)->ioc) +#define bfa_get_nports(__bfa) \ + bfa_ioc_get_nports(&(__bfa)->ioc) +#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \ + bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer) +#define bfa_get_adapter_model(__bfa, __model) \ + bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model) +#define bfa_get_adapter_serial_num(__bfa, __serial_num) \ + bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num) +#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \ + bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver) +#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \ + bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver) +#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \ + bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev) +#define bfa_get_ioc_state(__bfa) \ + bfa_ioc_get_state(&(__bfa)->ioc) +#define bfa_get_type(__bfa) \ + bfa_ioc_get_type(&(__bfa)->ioc) +#define bfa_get_mac(__bfa) \ + bfa_ioc_get_mac(&(__bfa)->ioc) +#define bfa_get_mfg_mac(__bfa) \ + bfa_ioc_get_mfg_mac(&(__bfa)->ioc) +#define bfa_get_fw_clock_res(__bfa) \ + ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res) + +/* + * lun mask macros return NULL when min cfg is enabled and there is + * no memory allocated for lunmask. + */ +#define bfa_get_lun_mask(__bfa) \ + ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \ + (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask)) + +#define bfa_get_lun_mask_list(_bfa) \ + ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \ + (bfa_get_lun_mask(_bfa)->lun_list) + +#define bfa_get_lun_mask_status(_bfa) \ + (((&(_bfa)->modules.dconf_mod)->min_cfg) \ + ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status)) + +void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); +void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); +void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); +void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa); +void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_pcidev_s *pcidev); +void bfa_detach(struct bfa_s *bfa); +void bfa_cb_init(void *bfad, bfa_status_t status); +void bfa_cb_updateq(void *bfad, bfa_status_t status); + +bfa_boolean_t bfa_intx(struct bfa_s *bfa); +void bfa_isr_enable(struct bfa_s *bfa); +void bfa_isr_disable(struct bfa_s *bfa); + +void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q); +void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q); +void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q); + +typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status); +void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr); + + +bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, + struct bfa_iocfc_intr_attr_s *attr); + +void bfa_iocfc_enable(struct bfa_s *bfa); +void bfa_iocfc_disable(struct bfa_s *bfa); +#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ + bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) + +struct bfa_cb_pending_q_s { + struct bfa_cb_qe_s hcb_qe; + void *data; /* Driver buffer */ +}; + +/* Common macros to operate on pending stats/attr apis */ +#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \ + bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \ + (__qe)->hcb_qe.cbfn = (__cbfn); \ + (__qe)->hcb_qe.cbarg = (__cbarg); \ + (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \ + (__qe)->data = (__data); \ +} while (0) + +#endif /* __BFA_H__ */ diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c new file mode 100644 index 000000000..6846ca8f7 --- /dev/null +++ b/drivers/scsi/bfa/bfa_core.c @@ -0,0 +1,2008 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfa_modules.h" +#include "bfi_reg.h" + +BFA_TRC_FILE(HAL, CORE); + +/* + * Message handlers for various modules. + */ +static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { + bfa_isr_unhandled, /* NONE */ + bfa_isr_unhandled, /* BFI_MC_IOC */ + bfa_fcdiag_intr, /* BFI_MC_DIAG */ + bfa_isr_unhandled, /* BFI_MC_FLASH */ + bfa_isr_unhandled, /* BFI_MC_CEE */ + bfa_fcport_isr, /* BFI_MC_FCPORT */ + bfa_isr_unhandled, /* BFI_MC_IOCFC */ + bfa_isr_unhandled, /* BFI_MC_LL */ + bfa_uf_isr, /* BFI_MC_UF */ + bfa_fcxp_isr, /* BFI_MC_FCXP */ + bfa_lps_isr, /* BFI_MC_LPS */ + bfa_rport_isr, /* BFI_MC_RPORT */ + bfa_itn_isr, /* BFI_MC_ITN */ + bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ + bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ + bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ + bfa_ioim_isr, /* BFI_MC_IOIM */ + bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ + bfa_tskim_isr, /* BFI_MC_TSKIM */ + bfa_isr_unhandled, /* BFI_MC_SBOOT */ + bfa_isr_unhandled, /* BFI_MC_IPFC */ + bfa_isr_unhandled, /* BFI_MC_PORT */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ +}; +/* + * Message handlers for mailbox command classes + */ +static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { + NULL, + NULL, /* BFI_MC_IOC */ + NULL, /* BFI_MC_DIAG */ + NULL, /* BFI_MC_FLASH */ + NULL, /* BFI_MC_CEE */ + NULL, /* BFI_MC_PORT */ + bfa_iocfc_isr, /* BFI_MC_IOCFC */ + NULL, +}; + + + +void +__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) +{ + int tail = trcm->tail; + struct bfa_trc_s *trc = &trcm->trc[tail]; + + if (trcm->stopped) + return; + + trc->fileno = (u16) fileno; + trc->line = (u16) line; + trc->data.u64 = data; + trc->timestamp = BFA_TRC_TS(trcm); + + trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); + if (trcm->tail == trcm->head) + trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); +} + +static void +bfa_com_port_attach(struct bfa_s *bfa) +{ + struct bfa_port_s *port = &bfa->modules.port; + struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); + + bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); + bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); +} + +/* + * ablk module attach + */ +static void +bfa_com_ablk_attach(struct bfa_s *bfa) +{ + struct bfa_ablk_s *ablk = &bfa->modules.ablk; + struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); + + bfa_ablk_attach(ablk, &bfa->ioc); + bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); +} + +static void +bfa_com_cee_attach(struct bfa_s *bfa) +{ + struct bfa_cee_s *cee = &bfa->modules.cee; + struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); + + cee->trcmod = bfa->trcmod; + bfa_cee_attach(cee, &bfa->ioc, bfa); + bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); +} + +static void +bfa_com_sfp_attach(struct bfa_s *bfa) +{ + struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); + struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); + + bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); + bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); +} + +static void +bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) +{ + struct bfa_flash_s *flash = BFA_FLASH(bfa); + struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); + + bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); + bfa_flash_memclaim(flash, flash_dma->kva_curp, + flash_dma->dma_curp, mincfg); +} + +static void +bfa_com_diag_attach(struct bfa_s *bfa) +{ + struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); + struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); + + bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); + bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); +} + +static void +bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) +{ + struct bfa_phy_s *phy = BFA_PHY(bfa); + struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); + + bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); + bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); +} + +static void +bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) +{ + struct bfa_fru_s *fru = BFA_FRU(bfa); + struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); + + bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg); + bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg); +} + +/* + * BFA IOC FC related definitions + */ + +/* + * IOC local definitions + */ +#define BFA_IOCFC_TOV 5000 /* msecs */ + +enum { + BFA_IOCFC_ACT_NONE = 0, + BFA_IOCFC_ACT_INIT = 1, + BFA_IOCFC_ACT_STOP = 2, + BFA_IOCFC_ACT_DISABLE = 3, + BFA_IOCFC_ACT_ENABLE = 4, +}; + +#define DEF_CFG_NUM_FABRICS 1 +#define DEF_CFG_NUM_LPORTS 256 +#define DEF_CFG_NUM_CQS 4 +#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) +#define DEF_CFG_NUM_TSKIM_REQS 128 +#define DEF_CFG_NUM_FCXP_REQS 64 +#define DEF_CFG_NUM_UF_BUFS 64 +#define DEF_CFG_NUM_RPORTS 1024 +#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) +#define DEF_CFG_NUM_TINS 256 + +#define DEF_CFG_NUM_SGPGS 2048 +#define DEF_CFG_NUM_REQQ_ELEMS 256 +#define DEF_CFG_NUM_RSPQ_ELEMS 64 +#define DEF_CFG_NUM_SBOOT_TGTS 16 +#define DEF_CFG_NUM_SBOOT_LUNS 16 + +/* + * IOCFC state machine definitions/declarations + */ +bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, init_cfg_done, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, operational, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, dconf_write, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, init_failed, + struct bfa_iocfc_s, enum iocfc_event); + +/* + * forward declaration for IOC FC functions + */ +static void bfa_iocfc_start_submod(struct bfa_s *bfa); +static void bfa_iocfc_disable_submod(struct bfa_s *bfa); +static void bfa_iocfc_send_cfg(void *bfa_arg); +static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); +static void bfa_iocfc_disable_cbfn(void *bfa_arg); +static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); +static void bfa_iocfc_reset_cbfn(void *bfa_arg); +static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; +static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete); +static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl); +static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl); +static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl); + +static void +bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc) +{ +} + +static void +bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_INIT: + case IOCFC_E_ENABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_enable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); + break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_dconf_modinit(iocfc->bfa); +} + +static void +bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_DCONF_DONE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); + break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_iocfc_send_cfg(iocfc->bfa); +} + +static void +bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_CFG_DONE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); + break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc) +{ + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, + bfa_iocfc_init_cb, iocfc->bfa); +} + +static void +bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_START: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); + break; + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_fcport_init(iocfc->bfa); + bfa_iocfc_start_submod(iocfc->bfa); +} + +static void +bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_dconf_modexit(iocfc->bfa); +} + +static void +bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_DCONF_DONE: + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_disable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_DISABLED: + bfa_isr_disable(iocfc->bfa); + bfa_iocfc_disable_submod(iocfc->bfa); + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, + bfa_iocfc_stop_cb, iocfc->bfa); + break; + + case IOCFC_E_IOC_ENABLED: + case IOCFC_E_DCONF_DONE: + case IOCFC_E_CFG_DONE: + break; + + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_enable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); + break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + + if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) + break; + + iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, iocfc->bfa); + iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_iocfc_send_cfg(iocfc->bfa); +} + +static void +bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_CFG_DONE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); + if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) + break; + + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, iocfc->bfa); + iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; + break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) + break; + + iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, iocfc->bfa); + iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_disable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_DISABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); + break; + case IOCFC_E_IOC_ENABLED: + case IOCFC_E_DCONF_DONE: + case IOCFC_E_CFG_DONE: + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_isr_disable(iocfc->bfa); + bfa_iocfc_disable_submod(iocfc->bfa); + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, + bfa_iocfc_disable_cb, iocfc->bfa); +} + +static void +bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_ENABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_isr_disable(iocfc->bfa); + bfa_iocfc_disable_submod(iocfc->bfa); +} + +static void +bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); + break; + case IOCFC_E_IOC_FAILED: + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_isr_disable(iocfc->bfa); + iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, + bfa_iocfc_init_cb, iocfc->bfa); +} + +static void +bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + case IOCFC_E_DISABLE: + bfa_ioc_disable(&iocfc->bfa->ioc); + break; + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); + break; + case IOCFC_E_IOC_DISABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, + bfa_iocfc_disable_cb, iocfc->bfa); + break; + case IOCFC_E_IOC_FAILED: + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +/* + * BFA Interrupt handling functions + */ +static void +bfa_reqq_resume(struct bfa_s *bfa, int qid) +{ + struct list_head *waitq, *qe, *qen; + struct bfa_reqq_wait_s *wqe; + + waitq = bfa_reqq(bfa, qid); + list_for_each_safe(qe, qen, waitq) { + /* + * Callback only as long as there is room in request queue + */ + if (bfa_reqq_full(bfa, qid)) + break; + + list_del(qe); + wqe = (struct bfa_reqq_wait_s *) qe; + wqe->qresume(wqe->cbarg); + } +} + +static bfa_boolean_t +bfa_isr_rspq(struct bfa_s *bfa, int qid) +{ + struct bfi_msg_s *m; + u32 pi, ci; + struct list_head *waitq; + bfa_boolean_t ret; + + ci = bfa_rspq_ci(bfa, qid); + pi = bfa_rspq_pi(bfa, qid); + + ret = (ci != pi); + + while (ci != pi) { + m = bfa_rspq_elem(bfa, qid, ci); + WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); + + bfa_isrs[m->mhdr.msg_class] (bfa, m); + CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); + } + + /* + * acknowledge RME completions and update CI + */ + bfa_isr_rspq_ack(bfa, qid, ci); + + /* + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); + + return ret; +} + +static inline void +bfa_isr_reqq(struct bfa_s *bfa, int qid) +{ + struct list_head *waitq; + + bfa_isr_reqq_ack(bfa, qid); + + /* + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); +} + +void +bfa_msix_all(struct bfa_s *bfa, int vec) +{ + u32 intr, qintr; + int queue; + + intr = readl(bfa->iocfc.bfa_regs.intr_status); + if (!intr) + return; + + /* + * RME completion queue interrupt + */ + qintr = intr & __HFN_INT_RME_MASK; + if (qintr && bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + bfa_isr_rspq(bfa, queue); + } + + intr &= ~qintr; + if (!intr) + return; + + /* + * CPE completion queue interrupt + */ + qintr = intr & __HFN_INT_CPE_MASK; + if (qintr && bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + bfa_isr_reqq(bfa, queue); + } + intr &= ~qintr; + if (!intr) + return; + + bfa_msix_lpu_err(bfa, intr); +} + +bfa_boolean_t +bfa_intx(struct bfa_s *bfa) +{ + u32 intr, qintr; + int queue; + bfa_boolean_t rspq_comp = BFA_FALSE; + + intr = readl(bfa->iocfc.bfa_regs.intr_status); + + qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); + if (qintr) + writel(qintr, bfa->iocfc.bfa_regs.intr_status); + + /* + * Unconditional RME completion queue interrupt + */ + if (bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + if (bfa_isr_rspq(bfa, queue)) + rspq_comp = BFA_TRUE; + } + + if (!intr) + return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE; + + /* + * CPE completion queue interrupt + */ + qintr = intr & __HFN_INT_CPE_MASK; + if (qintr && bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + bfa_isr_reqq(bfa, queue); + } + intr &= ~qintr; + if (!intr) + return BFA_TRUE; + + if (bfa->intr_enabled) + bfa_msix_lpu_err(bfa, intr); + + return BFA_TRUE; +} + +void +bfa_isr_enable(struct bfa_s *bfa) +{ + u32 umsk; + int port_id = bfa_ioc_portid(&bfa->ioc); + + bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc)); + bfa_trc(bfa, port_id); + + bfa_msix_ctrl_install(bfa); + + if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { + umsk = __HFN_INT_ERR_MASK_CT2; + umsk |= port_id == 0 ? + __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; + } else { + umsk = __HFN_INT_ERR_MASK; + umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; + } + + writel(umsk, bfa->iocfc.bfa_regs.intr_status); + writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); + bfa->iocfc.intr_mask = ~umsk; + bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); + + /* + * Set the flag indicating successful enabling of interrupts + */ + bfa->intr_enabled = BFA_TRUE; +} + +void +bfa_isr_disable(struct bfa_s *bfa) +{ + bfa->intr_enabled = BFA_FALSE; + bfa_isr_mode_set(bfa, BFA_FALSE); + writel(-1L, bfa->iocfc.bfa_regs.intr_mask); + bfa_msix_uninstall(bfa); +} + +void +bfa_msix_reqq(struct bfa_s *bfa, int vec) +{ + bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); +} + +void +bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + bfa_trc(bfa, m->mhdr.msg_class); + bfa_trc(bfa, m->mhdr.msg_id); + bfa_trc(bfa, m->mhdr.mtag.i2htok); + WARN_ON(1); + bfa_trc_stop(bfa->trcmod); +} + +void +bfa_msix_rspq(struct bfa_s *bfa, int vec) +{ + bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); +} + +void +bfa_msix_lpu_err(struct bfa_s *bfa, int vec) +{ + u32 intr, curr_value; + bfa_boolean_t lpu_isr, halt_isr, pss_isr; + + intr = readl(bfa->iocfc.bfa_regs.intr_status); + + if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { + halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; + pss_isr = intr & __HFN_INT_ERR_PSS_CT2; + lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | + __HFN_INT_MBOX_LPU1_CT2); + intr &= __HFN_INT_ERR_MASK_CT2; + } else { + halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ? + (intr & __HFN_INT_LL_HALT) : 0; + pss_isr = intr & __HFN_INT_ERR_PSS; + lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); + intr &= __HFN_INT_ERR_MASK; + } + + if (lpu_isr) + bfa_ioc_mbox_isr(&bfa->ioc); + + if (intr) { + if (halt_isr) { + /* + * If LL_HALT bit is set then FW Init Halt LL Port + * Register needs to be cleared as well so Interrupt + * Status Register will be cleared. + */ + curr_value = readl(bfa->ioc.ioc_regs.ll_halt); + curr_value &= ~__FW_INIT_HALT_P; + writel(curr_value, bfa->ioc.ioc_regs.ll_halt); + } + + if (pss_isr) { + /* + * ERR_PSS bit needs to be cleared as well in case + * interrups are shared so driver's interrupt handler is + * still called even though it is already masked out. + */ + curr_value = readl( + bfa->ioc.ioc_regs.pss_err_status_reg); + writel(curr_value, + bfa->ioc.ioc_regs.pss_err_status_reg); + } + + writel(intr, bfa->iocfc.bfa_regs.intr_status); + bfa_ioc_error_isr(&bfa->ioc); + } +} + +/* + * BFA IOC FC related functions + */ + +/* + * BFA IOC private functions + */ + +/* + * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ + */ +static void +bfa_iocfc_send_cfg(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfg_req_s cfg_req; + struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; + struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; + int i; + + WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); + bfa_trc(bfa, cfg->fwcfg.num_cqs); + + bfa_iocfc_reset_queues(bfa); + + /* + * initialize IOC configuration info + */ + cfg_info->single_msix_vec = 0; + if (bfa->msix.nvecs == 1) + cfg_info->single_msix_vec = 1; + cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; + cfg_info->num_cqs = cfg->fwcfg.num_cqs; + cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa, + cfg->fwcfg.num_ioim_reqs)); + cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); + + bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); + /* + * dma map REQ and RSP circular queues and shadow pointers + */ + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { + bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], + iocfc->req_cq_ba[i].pa); + bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], + iocfc->req_cq_shadow_ci[i].pa); + cfg_info->req_cq_elems[i] = + cpu_to_be16(cfg->drvcfg.num_reqq_elems); + + bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], + iocfc->rsp_cq_ba[i].pa); + bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], + iocfc->rsp_cq_shadow_pi[i].pa); + cfg_info->rsp_cq_elems[i] = + cpu_to_be16(cfg->drvcfg.num_rspq_elems); + } + + /* + * Enable interrupt coalescing if it is driver init path + * and not ioc disable/enable path. + */ + if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait)) + cfg_info->intr_attr.coalesce = BFA_TRUE; + + /* + * dma map IOC configuration itself + */ + bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, + bfa_fn_lpu(bfa)); + bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); + + bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, + sizeof(struct bfi_iocfc_cfg_req_s)); +} + +static void +bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + + bfa->bfad = bfad; + iocfc->bfa = bfa; + iocfc->cfg = *cfg; + + /* + * Initialize chip specific handlers. + */ + if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { + iocfc->hwif.hw_reginit = bfa_hwct_reginit; + iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; + iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; + iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; + iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; + iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; + iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; + iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; + iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; + iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; + iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; + iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; + } else { + iocfc->hwif.hw_reginit = bfa_hwcb_reginit; + iocfc->hwif.hw_reqq_ack = NULL; + iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; + iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; + iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; + iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; + iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; + iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; + iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; + iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; + iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; + iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; + } + + if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { + iocfc->hwif.hw_reginit = bfa_hwct2_reginit; + iocfc->hwif.hw_isr_mode_set = NULL; + iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack; + } + + iocfc->hwif.hw_reginit(bfa); + bfa->msix.nvecs = 0; +} + +static void +bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) +{ + u8 *dm_kva = NULL; + u64 dm_pa = 0; + int i, per_reqq_sz, per_rspq_sz; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); + struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); + struct bfa_mem_dma_s *reqq_dma, *rspq_dma; + + /* First allocate dma memory for IOC */ + bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), + bfa_mem_dma_phys(ioc_dma)); + + /* Claim DMA-able memory for the request/response queues */ + per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { + reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); + iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); + iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); + memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); + + rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); + iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); + iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); + memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); + } + + /* Claim IOCFC dma memory - for shadow CI/PI */ + dm_kva = bfa_mem_dma_virt(iocfc_dma); + dm_pa = bfa_mem_dma_phys(iocfc_dma); + + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { + iocfc->req_cq_shadow_ci[i].kva = dm_kva; + iocfc->req_cq_shadow_ci[i].pa = dm_pa; + dm_kva += BFA_CACHELINE_SZ; + dm_pa += BFA_CACHELINE_SZ; + + iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; + iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; + dm_kva += BFA_CACHELINE_SZ; + dm_pa += BFA_CACHELINE_SZ; + } + + /* Claim IOCFC dma memory - for the config info page */ + bfa->iocfc.cfg_info.kva = dm_kva; + bfa->iocfc.cfg_info.pa = dm_pa; + bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; + dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); + dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); + + /* Claim IOCFC dma memory - for the config response */ + bfa->iocfc.cfgrsp_dma.kva = dm_kva; + bfa->iocfc.cfgrsp_dma.pa = dm_pa; + bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; + dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); + dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); + + /* Claim IOCFC kva memory */ + bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); + bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN; +} + +/* + * Start BFA submodules. + */ +static void +bfa_iocfc_start_submod(struct bfa_s *bfa) +{ + int i; + + bfa->queue_process = BFA_TRUE; + for (i = 0; i < BFI_IOC_MAX_CQS; i++) + bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); + + bfa_fcport_start(bfa); + bfa_uf_start(bfa); + /* + * bfa_init() with flash read is complete. now invalidate the stale + * content of lun mask like unit attention, rp tag and lp tag. + */ + bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa); + + bfa->iocfc.submod_enabled = BFA_TRUE; +} + +/* + * Disable BFA submodules. + */ +static void +bfa_iocfc_disable_submod(struct bfa_s *bfa) +{ + if (bfa->iocfc.submod_enabled == BFA_FALSE) + return; + + bfa_fcdiag_iocdisable(bfa); + bfa_fcport_iocdisable(bfa); + bfa_fcxp_iocdisable(bfa); + bfa_lps_iocdisable(bfa); + bfa_rport_iocdisable(bfa); + bfa_fcp_iocdisable(bfa); + bfa_dconf_iocdisable(bfa); + + bfa->iocfc.submod_enabled = BFA_FALSE; +} + +static void +bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) +{ + struct bfa_s *bfa = bfa_arg; + + if (complete) + bfa_cb_init(bfa->bfad, bfa->iocfc.op_status); +} + +static void +bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) +{ + struct bfa_s *bfa = bfa_arg; + struct bfad_s *bfad = bfa->bfad; + + if (compl) + complete(&bfad->comp); +} + +static void +bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) +{ + struct bfa_s *bfa = bfa_arg; + struct bfad_s *bfad = bfa->bfad; + + if (compl) + complete(&bfad->enable_comp); +} + +static void +bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) +{ + struct bfa_s *bfa = bfa_arg; + struct bfad_s *bfad = bfa->bfad; + + if (compl) + complete(&bfad->disable_comp); +} + +/* + * configure queue registers from firmware response + */ +static void +bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) +{ + int i; + struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); + + for (i = 0; i < BFI_IOC_MAX_CQS; i++) { + bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; + r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); + r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); + r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); + r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); + r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); + r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); + } +} + +static void +bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; + + bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); + bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); + bfa_rport_res_recfg(bfa, fwcfg->num_rports); + bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs), + fwcfg->num_ioim_reqs); + bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); +} + +/* + * Update BFA configuration from firmware configuration. + */ +static void +bfa_iocfc_cfgrsp(struct bfa_s *bfa) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; + + fwcfg->num_cqs = fwcfg->num_cqs; + fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); + fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); + fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); + fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); + fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); + fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); + + /* + * configure queue register offsets as learnt from firmware + */ + bfa_iocfc_qreg(bfa, &cfgrsp->qreg); + + /* + * Re-configure resources as learnt from Firmware + */ + bfa_iocfc_res_recfg(bfa, fwcfg); + + /* + * Install MSIX queue handlers + */ + bfa_msix_queue_install(bfa); + + if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) { + bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn; + bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn; + bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); + } +} + +void +bfa_iocfc_reset_queues(struct bfa_s *bfa) +{ + int q; + + for (q = 0; q < BFI_IOC_MAX_CQS; q++) { + bfa_reqq_ci(bfa, q) = 0; + bfa_reqq_pi(bfa, q) = 0; + bfa_rspq_ci(bfa, q) = 0; + bfa_rspq_pi(bfa, q) = 0; + } +} + +/* + * Process FAA pwwn msg from fw. + */ +static void +bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + + cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn; + cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn; + + bfa->ioc.attr->pwwn = msg->pwwn; + bfa->ioc.attr->nwwn = msg->nwwn; + bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); +} + +/* Fabric Assigned Address specific functions */ + +/* + * Check whether IOC is ready before sending command down + */ +static bfa_status_t +bfa_faa_validate_request(struct bfa_s *bfa) +{ + enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); + u32 card_type = bfa->ioc.attr->card_type; + + if (bfa_ioc_is_operational(&bfa->ioc)) { + if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) + return BFA_STATUS_FEATURE_NOT_SUPPORTED; + } else { + return BFA_STATUS_IOC_NON_OP; + } + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, + bfa_cb_iocfc_t cbfn, void *cbarg) +{ + struct bfi_faa_query_s faa_attr_req; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + bfa_status_t status; + + status = bfa_faa_validate_request(bfa); + if (status != BFA_STATUS_OK) + return status; + + if (iocfc->faa_args.busy == BFA_TRUE) + return BFA_STATUS_DEVBUSY; + + iocfc->faa_args.faa_attr = attr; + iocfc->faa_args.faa_cb.faa_cbfn = cbfn; + iocfc->faa_args.faa_cb.faa_cbarg = cbarg; + + iocfc->faa_args.busy = BFA_TRUE; + memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); + bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, + BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); + + bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, + sizeof(struct bfi_faa_query_s)); + + return BFA_STATUS_OK; +} + +/* + * FAA query response + */ +static void +bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, + bfi_faa_query_rsp_t *rsp) +{ + void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; + + if (iocfc->faa_args.faa_attr) { + iocfc->faa_args.faa_attr->faa = rsp->faa; + iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; + iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; + } + + WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); + + iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); + iocfc->faa_args.busy = BFA_FALSE; +} + +/* + * IOC enable request is complete + */ +static void +bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) +{ + struct bfa_s *bfa = bfa_arg; + + if (status == BFA_STATUS_OK) + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED); + else + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); +} + +/* + * IOC disable request is complete + */ +static void +bfa_iocfc_disable_cbfn(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + + bfa->queue_process = BFA_FALSE; + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); +} + +/* + * Notify sub-modules of hardware failure. + */ +static void +bfa_iocfc_hbfail_cbfn(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + + bfa->queue_process = BFA_FALSE; + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); +} + +/* + * Actions on chip-reset completion. + */ +static void +bfa_iocfc_reset_cbfn(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + + bfa_iocfc_reset_queues(bfa); + bfa_isr_enable(bfa); +} + +/* + * Query IOC memory requirement information. + */ +void +bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa) +{ + int q, per_reqq_sz, per_rspq_sz; + struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); + struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); + struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); + u32 dm_len = 0; + + /* dma memory setup for IOC */ + bfa_mem_dma_setup(meminfo, ioc_dma, + BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); + + /* dma memory setup for REQ/RSP queues */ + per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + + for (q = 0; q < cfg->fwcfg.num_cqs; q++) { + bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), + per_reqq_sz); + bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), + per_rspq_sz); + } + + /* IOCFC dma memory - calculate Shadow CI/PI size */ + for (q = 0; q < cfg->fwcfg.num_cqs; q++) + dm_len += (2 * BFA_CACHELINE_SZ); + + /* IOCFC dma memory - calculate config info / rsp size */ + dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); + dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); + + /* dma memory setup for IOCFC */ + bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); + + /* kva memory setup for IOCFC */ + bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN); +} + +/* + * Query IOC memory requirement information. + */ +void +bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + int i; + struct bfa_ioc_s *ioc = &bfa->ioc; + + bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; + bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; + bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; + bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; + + ioc->trcmod = bfa->trcmod; + bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); + + bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); + bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); + + bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); + bfa_iocfc_mem_claim(bfa, cfg); + INIT_LIST_HEAD(&bfa->timer_mod.timer_q); + + INIT_LIST_HEAD(&bfa->comp_q); + for (i = 0; i < BFI_IOC_MAX_CQS; i++) + INIT_LIST_HEAD(&bfa->reqq_waitq[i]); + + bfa->iocfc.cb_reqd = BFA_FALSE; + bfa->iocfc.op_status = BFA_STATUS_OK; + bfa->iocfc.submod_enabled = BFA_FALSE; + + bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped); +} + +/* + * Query IOC memory requirement information. + */ +void +bfa_iocfc_init(struct bfa_s *bfa) +{ + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT); +} + +/* + * IOC start called from bfa_start(). Called to start IOC operations + * at driver instantiation for this instance. + */ +void +bfa_iocfc_start(struct bfa_s *bfa) +{ + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START); +} + +/* + * IOC stop called from bfa_stop(). Called only when driver is unloaded + * for this instance. + */ +void +bfa_iocfc_stop(struct bfa_s *bfa) +{ + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); +} + +void +bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) +{ + struct bfa_s *bfa = bfaarg; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + union bfi_iocfc_i2h_msg_u *msg; + + msg = (union bfi_iocfc_i2h_msg_u *) m; + bfa_trc(bfa, msg->mh.msg_id); + + switch (msg->mh.msg_id) { + case BFI_IOCFC_I2H_CFG_REPLY: + bfa_iocfc_cfgrsp(bfa); + break; + case BFI_IOCFC_I2H_UPDATEQ_RSP: + iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); + break; + case BFI_IOCFC_I2H_ADDR_MSG: + bfa_iocfc_process_faa_addr(bfa, + (struct bfi_faa_addr_msg_s *)msg); + break; + case BFI_IOCFC_I2H_FAA_QUERY_RSP: + bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); + break; + default: + WARN_ON(1); + } +} + +void +bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + + attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; + + attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? + be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : + be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); + + attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? + be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : + be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); + + attr->config = iocfc->cfg; +} + +bfa_status_t +bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_set_intr_req_s *m; + + iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; + iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); + iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); + + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_OK; + + m = bfa_reqq_next(bfa, BFA_REQQ_IOC); + if (!m) + return BFA_STATUS_DEVBUSY; + + bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, + bfa_fn_lpu(bfa)); + m->coalesce = iocfc->cfginfo->intr_attr.coalesce; + m->delay = iocfc->cfginfo->intr_attr.delay; + m->latency = iocfc->cfginfo->intr_attr.latency; + + bfa_trc(bfa, attr->delay); + bfa_trc(bfa, attr->latency); + + bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); + return BFA_STATUS_OK; +} + +void +bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + + iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); + bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); +} +/* + * Enable IOC after it is disabled. + */ +void +bfa_iocfc_enable(struct bfa_s *bfa) +{ + bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, + "IOC Enable"); + bfa->iocfc.cb_reqd = BFA_TRUE; + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE); +} + +void +bfa_iocfc_disable(struct bfa_s *bfa) +{ + bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, + "IOC Disable"); + + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); +} + +bfa_boolean_t +bfa_iocfc_is_operational(struct bfa_s *bfa) +{ + return bfa_ioc_is_operational(&bfa->ioc) && + bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational); +} + +/* + * Return boot target port wwns -- read from boot information in flash. + */ +void +bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + int i; + + if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { + bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); + *nwwns = cfgrsp->pbc_cfg.nbluns; + for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) + wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; + + return; + } + + *nwwns = cfgrsp->bootwwns.nwwns; + memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); +} + +int +bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + + memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); + return cfgrsp->pbc_cfg.nvports; +} + + +/* + * Use this function query the memory requirement of the BFA library. + * This function needs to be called before bfa_attach() to get the + * memory required of the BFA layer for a given driver configuration. + * + * This call will fail, if the cap is out of range compared to pre-defined + * values within the BFA library + * + * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate + * its configuration in this structure. + * The default values for struct bfa_iocfc_cfg_s can be + * fetched using bfa_cfg_get_default() API. + * + * If cap's boundary check fails, the library will use + * the default bfa_cap_t values (and log a warning msg). + * + * @param[out] meminfo - pointer to bfa_meminfo_t. This content + * indicates the memory type (see bfa_mem_type_t) and + * amount of memory required. + * + * Driver should allocate the memory, populate the + * starting address for each block and provide the same + * structure as input parameter to bfa_attach() call. + * + * @param[in] bfa - pointer to the bfa structure, used while fetching the + * dma, kva memory information of the bfa sub-modules. + * + * @return void + * + * Special Considerations: @note + */ +void +bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa) +{ + struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); + struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); + struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); + struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); + struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); + struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); + struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); + struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); + + WARN_ON((cfg == NULL) || (meminfo == NULL)); + + memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); + + /* Initialize the DMA & KVA meminfo queues */ + INIT_LIST_HEAD(&meminfo->dma_info.qe); + INIT_LIST_HEAD(&meminfo->kva_info.qe); + + bfa_iocfc_meminfo(cfg, meminfo, bfa); + bfa_sgpg_meminfo(cfg, meminfo, bfa); + bfa_fcport_meminfo(cfg, meminfo, bfa); + bfa_fcxp_meminfo(cfg, meminfo, bfa); + bfa_lps_meminfo(cfg, meminfo, bfa); + bfa_uf_meminfo(cfg, meminfo, bfa); + bfa_rport_meminfo(cfg, meminfo, bfa); + bfa_fcp_meminfo(cfg, meminfo, bfa); + bfa_dconf_meminfo(cfg, meminfo, bfa); + + /* dma info setup */ + bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); + bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); + bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); + bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); + bfa_mem_dma_setup(meminfo, flash_dma, + bfa_flash_meminfo(cfg->drvcfg.min_cfg)); + bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); + bfa_mem_dma_setup(meminfo, phy_dma, + bfa_phy_meminfo(cfg->drvcfg.min_cfg)); + bfa_mem_dma_setup(meminfo, fru_dma, + bfa_fru_meminfo(cfg->drvcfg.min_cfg)); +} + +/* + * Use this function to do attach the driver instance with the BFA + * library. This function will not trigger any HW initialization + * process (which will be done in bfa_init() call) + * + * This call will fail, if the cap is out of range compared to + * pre-defined values within the BFA library + * + * @param[out] bfa Pointer to bfa_t. + * @param[in] bfad Opaque handle back to the driver's IOC structure + * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure + * that was used in bfa_cfg_get_meminfo(). + * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should + * use the bfa_cfg_get_meminfo() call to + * find the memory blocks required, allocate the + * required memory and provide the starting addresses. + * @param[in] pcidev pointer to struct bfa_pcidev_s + * + * @return + * void + * + * Special Considerations: + * + * @note + * + */ +void +bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +{ + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; + + bfa->fcs = BFA_FALSE; + + WARN_ON((cfg == NULL) || (meminfo == NULL)); + + /* Initialize memory pointers for iterative allocation */ + dma_info = &meminfo->dma_info; + dma_info->kva_curp = dma_info->kva; + dma_info->dma_curp = dma_info->dma; + + kva_info = &meminfo->kva_info; + kva_info->kva_curp = kva_info->kva; + + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_elem->kva_curp = dma_elem->kva; + dma_elem->dma_curp = dma_elem->dma; + } + + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + kva_elem->kva_curp = kva_elem->kva; + } + + bfa_iocfc_attach(bfa, bfad, cfg, pcidev); + bfa_fcdiag_attach(bfa, bfad, cfg, pcidev); + bfa_sgpg_attach(bfa, bfad, cfg, pcidev); + bfa_fcport_attach(bfa, bfad, cfg, pcidev); + bfa_fcxp_attach(bfa, bfad, cfg, pcidev); + bfa_lps_attach(bfa, bfad, cfg, pcidev); + bfa_uf_attach(bfa, bfad, cfg, pcidev); + bfa_rport_attach(bfa, bfad, cfg, pcidev); + bfa_fcp_attach(bfa, bfad, cfg, pcidev); + bfa_dconf_attach(bfa, bfad, cfg); + bfa_com_port_attach(bfa); + bfa_com_ablk_attach(bfa); + bfa_com_cee_attach(bfa); + bfa_com_sfp_attach(bfa); + bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); + bfa_com_diag_attach(bfa); + bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); + bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg); +} + +/* + * Use this function to delete a BFA IOC. IOC should be stopped (by + * calling bfa_stop()) before this function call. + * + * @param[in] bfa - pointer to bfa_t. + * + * @return + * void + * + * Special Considerations: + * + * @note + */ +void +bfa_detach(struct bfa_s *bfa) +{ + bfa_ioc_detach(&bfa->ioc); +} + +void +bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) +{ + INIT_LIST_HEAD(comp_q); + list_splice_tail_init(&bfa->comp_q, comp_q); +} + +void +bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) +{ + struct list_head *qe; + struct list_head *qen; + struct bfa_cb_qe_s *hcb_qe; + bfa_cb_cbfn_status_t cbfn; + + list_for_each_safe(qe, qen, comp_q) { + hcb_qe = (struct bfa_cb_qe_s *) qe; + if (hcb_qe->pre_rmv) { + /* qe is invalid after return, dequeue before cbfn() */ + list_del(qe); + cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); + cbfn(hcb_qe->cbarg, hcb_qe->fw_status); + } else + hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); + } +} + +void +bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) +{ + struct list_head *qe; + struct bfa_cb_qe_s *hcb_qe; + + while (!list_empty(comp_q)) { + bfa_q_deq(comp_q, &qe); + hcb_qe = (struct bfa_cb_qe_s *) qe; + WARN_ON(hcb_qe->pre_rmv); + hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); + } +} + +/* + * Return the list of PCI vendor/device id lists supported by this + * BFA instance. + */ +void +bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) +{ + static struct bfa_pciid_s __pciids[] = { + {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, + {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, + {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, + {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, + }; + + *npciids = ARRAY_SIZE(__pciids); + *pciids = __pciids; +} + +/* + * Use this function query the default struct bfa_iocfc_cfg_s value (compiled + * into BFA layer). The OS driver can then turn back and overwrite entries that + * have been configured by the user. + * + * @param[in] cfg - pointer to bfa_ioc_cfg_t + * + * @return + * void + * + * Special Considerations: + * note + */ +void +bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) +{ + cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; + cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; + cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; + cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; + cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; + cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; + cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; + cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; + cfg->fwcfg.num_fwtio_reqs = 0; + + cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; + cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; + cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; + cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; + cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; + cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; + cfg->drvcfg.ioc_recover = BFA_FALSE; + cfg->drvcfg.delay_comp = BFA_FALSE; + +} + +void +bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) +{ + bfa_cfg_get_default(cfg); + cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; + cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; + cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; + cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; + cfg->fwcfg.num_rports = BFA_RPORT_MIN; + cfg->fwcfg.num_fwtio_reqs = 0; + + cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; + cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; + cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; + cfg->drvcfg.min_cfg = BFA_TRUE; +} diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h new file mode 100644 index 000000000..6b606bf58 --- /dev/null +++ b/drivers/scsi/bfa/bfa_cs.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * bfa_cs.h BFA common services + */ + +#ifndef __BFA_CS_H__ +#define __BFA_CS_H__ + +#include "bfad_drv.h" + +/* + * BFA TRC + */ + +#ifndef BFA_TRC_MAX +#define BFA_TRC_MAX (4 * 1024) +#endif + +#define BFA_TRC_TS(_trcm) \ + ({ \ + struct timespec64 ts; \ + \ + ktime_get_ts64(&ts); \ + (ts.tv_sec*1000000+ts.tv_nsec / 1000); \ + }) + +#ifndef BFA_TRC_TS +#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) +#endif + +struct bfa_trc_s { +#ifdef __BIG_ENDIAN + u16 fileno; + u16 line; +#else + u16 line; + u16 fileno; +#endif + u32 timestamp; + union { + struct { + u32 rsvd; + u32 u32; + } u32; + u64 u64; + } data; +}; + +struct bfa_trc_mod_s { + u32 head; + u32 tail; + u32 ntrc; + u32 stopped; + u32 ticks; + u32 rsvd[3]; + struct bfa_trc_s trc[BFA_TRC_MAX]; +}; + +enum { + BFA_TRC_HAL = 1, /* BFA modules */ + BFA_TRC_FCS = 2, /* BFA FCS modules */ + BFA_TRC_LDRV = 3, /* Linux driver modules */ + BFA_TRC_CNA = 4, /* Common modules */ +}; +#define BFA_TRC_MOD_SH 10 +#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH) + +/* + * Define a new tracing file (module). Module should match one defined above. + */ +#define BFA_TRC_FILE(__mod, __submod) \ + static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \ + BFA_TRC_MOD(__mod)) + + +#define bfa_trc32(_trcp, _data) \ + __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data) +#define bfa_trc(_trcp, _data) \ + __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data) + +static inline void +bfa_trc_init(struct bfa_trc_mod_s *trcm) +{ + trcm->head = trcm->tail = trcm->stopped = 0; + trcm->ntrc = BFA_TRC_MAX; +} + +static inline void +bfa_trc_stop(struct bfa_trc_mod_s *trcm) +{ + trcm->stopped = 1; +} + +void +__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data); + +void +__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data); + +#define bfa_sm_fault(__mod, __event) do { \ + bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \ + printk(KERN_ERR "Assertion failure: %s:%d: %d", \ + __FILE__, __LINE__, (__event)); \ +} while (0) + +/* BFA queue definitions */ +#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) +#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) +#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev) + +/* + * bfa_q_qe_init - to initialize a queue element + */ +#define bfa_q_qe_init(_qe) { \ + bfa_q_next(_qe) = (struct list_head *) NULL; \ + bfa_q_prev(_qe) = (struct list_head *) NULL; \ +} + +/* + * bfa_q_deq - dequeue an element from head of the queue + */ +#define bfa_q_deq(_q, _qe) do { \ + if (!list_empty(_q)) { \ + (*((struct list_head **) (_qe))) = bfa_q_next(_q); \ + bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\ + } else { \ + *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ + } \ +} while (0) + +/* + * bfa_q_deq_tail - dequeue an element from tail of the queue + */ +#define bfa_q_deq_tail(_q, _qe) { \ + if (!list_empty(_q)) { \ + *((struct list_head **) (_qe)) = bfa_q_prev(_q); \ + bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ + } else { \ + *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ + } \ +} + +static inline int +bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) +{ + struct list_head *tqe; + + tqe = bfa_q_next(q); + while (tqe != q) { + if (tqe == qe) + return 1; + tqe = bfa_q_next(tqe); + if (tqe == NULL) + break; + } + return 0; +} + +#define bfa_q_is_on_q(_q, _qe) \ + bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) + +/* + * @ BFA state machine interfaces + */ + +typedef void (*bfa_sm_t)(void *sm, int event); + +/* + * oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc_s + * etype - object type, eg. enum ioc_event + */ +#define bfa_sm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event) + +#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) +#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) +#define bfa_sm_get_state(_sm) ((_sm)->sm) +#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) + +/* + * For converting from state machine function to state encoding. + */ +struct bfa_sm_table_s { + bfa_sm_t sm; /* state machine function */ + int state; /* state machine encoding */ + char *name; /* state name for display */ +}; +#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) + +/* + * State machine with entry actions. + */ +typedef void (*bfa_fsm_t)(void *fsm, int event); + +/* + * oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc_s + * etype - object type, eg. enum ioc_event + */ +#define bfa_fsm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event); \ + static void oc ## _sm_ ## st ## _entry(otype * fsm) + +#define bfa_fsm_set_state(_fsm, _state) do { \ + (_fsm)->fsm = (bfa_fsm_t)(_state); \ + _state ## _entry(_fsm); \ +} while (0) + +#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) +#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm) +#define bfa_fsm_cmp_state(_fsm, _state) \ + ((_fsm)->fsm == (bfa_fsm_t)(_state)) + +static inline int +bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +/* + * @ Generic wait counter. + */ + +typedef void (*bfa_wc_resume_t) (void *cbarg); + +struct bfa_wc_s { + bfa_wc_resume_t wc_resume; + void *wc_cbarg; + int wc_count; +}; + +static inline void +bfa_wc_up(struct bfa_wc_s *wc) +{ + wc->wc_count++; +} + +static inline void +bfa_wc_down(struct bfa_wc_s *wc) +{ + wc->wc_count--; + if (wc->wc_count == 0) + wc->wc_resume(wc->wc_cbarg); +} + +/* + * Initialize a waiting counter. + */ +static inline void +bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) +{ + wc->wc_resume = wc_resume; + wc->wc_cbarg = wc_cbarg; + wc->wc_count = 0; + bfa_wc_up(wc); +} + +/* + * Wait for counter to reach zero + */ +static inline void +bfa_wc_wait(struct bfa_wc_s *wc) +{ + bfa_wc_down(wc); +} + +static inline void +wwn2str(char *wwn_str, u64 wwn) +{ + union { + u64 wwn; + u8 byte[8]; + } w; + + w.wwn = wwn; + sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0], + w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5], + w.byte[6], w.byte[7]); +} + +static inline void +fcid2str(char *fcid_str, u32 fcid) +{ + union { + u32 fcid; + u8 byte[4]; + } f; + + f.fcid = fcid; + sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]); +} + +#define bfa_swap_3b(_x) \ + ((((_x) & 0xff) << 16) | \ + ((_x) & 0x00ff00) | \ + (((_x) & 0xff0000) >> 16)) + +#ifndef __BIG_ENDIAN +#define bfa_hton3b(_x) bfa_swap_3b(_x) +#else +#define bfa_hton3b(_x) (_x) +#endif + +#define bfa_ntoh3b(_x) bfa_hton3b(_x) + +#endif /* __BFA_CS_H__ */ diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h new file mode 100644 index 000000000..6abd9f42a --- /dev/null +++ b/drivers/scsi/bfa/bfa_defs.h @@ -0,0 +1,1280 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_DEFS_H__ +#define __BFA_DEFS_H__ + +#include "bfa_fc.h" +#include "bfad_drv.h" + +#define BFA_MFG_SERIALNUM_SIZE 11 +#define STRSZ(_n) (((_n) + 4) & ~3) + +/* + * Manufacturing card type + */ +enum { + BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */ + BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */ + BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */ + BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */ + BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */ + BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */ + BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */ + BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */ + BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */ + BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */ + BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */ + BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */ + BFA_MFG_TYPE_PROWLER_F = 1560, /* Prowler FC only cards */ + BFA_MFG_TYPE_PROWLER_N = 1410, /* Prowler NIC only cards */ + BFA_MFG_TYPE_PROWLER_C = 1710, /* Prowler CNA only cards */ + BFA_MFG_TYPE_PROWLER_D = 1860, /* Prowler Dual cards */ + BFA_MFG_TYPE_CHINOOK = 1867, /* Chinook cards */ + BFA_MFG_TYPE_CHINOOK2 = 1869, /*!< Chinook2 cards */ + BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */ +}; + +#pragma pack(1) + +/* + * Check if Mezz card + */ +#define bfa_mfg_is_mezz(type) (( \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE || \ + (type) == BFA_MFG_TYPE_ASTRA || \ + (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ + (type) == BFA_MFG_TYPE_LIGHTNING || \ + (type) == BFA_MFG_TYPE_CHINOOK || \ + (type) == BFA_MFG_TYPE_CHINOOK2)) + +/* + * Check if the card having old wwn/mac handling + */ +#define bfa_mfg_is_old_wwn_mac_model(type) (( \ + (type) == BFA_MFG_TYPE_FC8P2 || \ + (type) == BFA_MFG_TYPE_FC8P1 || \ + (type) == BFA_MFG_TYPE_FC4P2 || \ + (type) == BFA_MFG_TYPE_FC4P1 || \ + (type) == BFA_MFG_TYPE_CNA10P2 || \ + (type) == BFA_MFG_TYPE_CNA10P1 || \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE)) + +#define bfa_mfg_increment_wwn_mac(m, i) \ +do { \ + u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \ + (u32)(m)[2]; \ + t += (i); \ + (m)[0] = (t >> 16) & 0xFF; \ + (m)[1] = (t >> 8) & 0xFF; \ + (m)[2] = t & 0xFF; \ +} while (0) + +/* + * VPD data length + */ +#define BFA_MFG_VPD_LEN 512 + +/* + * VPD vendor tag + */ +enum { + BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */ + BFA_MFG_VPD_IBM = 1, /* vendor IBM */ + BFA_MFG_VPD_HP = 2, /* vendor HP */ + BFA_MFG_VPD_DELL = 3, /* vendor DELL */ + BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */ + BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */ + BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */ + BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ +}; + +/* + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_vpd_s { + u8 version; /* vpd data version */ + u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ + u8 chksum; /* u8 checksum */ + u8 vendor; /* vendor */ + u8 len; /* vpd data length excluding header */ + u8 rsv; + u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ +}; + +#pragma pack() + +/* + * Status return values + */ +enum bfa_status { + BFA_STATUS_OK = 0, /* Success */ + BFA_STATUS_FAILED = 1, /* Operation failed */ + BFA_STATUS_EINVAL = 2, /* Invalid params Check input + * parameters */ + BFA_STATUS_ENOMEM = 3, /* Out of resources */ + BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists, + * contact support */ + BFA_STATUS_EPROTOCOL = 6, /* Protocol error */ + BFA_STATUS_BADFLASH = 9, /* Flash is bad */ + BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */ + BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */ + BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted */ + BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */ + BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */ + BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */ + BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */ + BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */ + BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */ + BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */ + BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */ + BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ + BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */ + BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ + BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */ + BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */ + BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ + BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */ + BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */ + BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists + * contact support */ + BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */ + BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */ + BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational */ + BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */ + BFA_STATUS_DIAG_BUSY = 71, /* diag busy */ + BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ + BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ + BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ + BFA_STATUS_ERROR_TRL_ENABLED = 87, /* TRL is enabled */ + BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */ + BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ + BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ + BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ + BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ + BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */ + BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot + * configuration */ + BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ + BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */ + BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */ + BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on + * this adapter */ + BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on + * the adapter */ + BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */ + BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */ + BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */ + BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */ + BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */ + BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */ + BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */ + BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */ + BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */ + BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ + BFA_STATUS_BBCR_FC_ONLY = 201, /*!< BBCredit Recovery is supported for * + * FC mode only */ + BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ + BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ + BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */ + BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported + * on mezz cards */ + BFA_STATUS_INVALID_BW = 233, /* Invalid bandwidth value */ + BFA_STATUS_QOS_BW_INVALID = 234, /* Invalid QOS bandwidth + * configuration */ + BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */ + BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */ + BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */ + BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */ + BFA_STATUS_DPORT_NO_SFP = 243, /* SFP is not present.\n D-port will be + * enabled but it will be operational + * only after inserting a valid SFP. */ + BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */ + BFA_STATUS_DPORT_ENOSYS = 254, /* Switch has no D_Port functionality */ + BFA_STATUS_DPORT_CANT_PERF = 255, /* Switch port is not D_Port capable + * or D_Port is disabled */ + BFA_STATUS_DPORT_LOGICALERR = 256, /* Switch D_Port fail */ + BFA_STATUS_DPORT_SWBUSY = 257, /* Switch port busy */ + BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT = 258, /*!< BB credit recovery is + * supported at max port speed alone */ + BFA_STATUS_ERROR_BBCR_ENABLED = 259, /*!< BB credit recovery + * is enabled */ + BFA_STATUS_INVALID_BBSCN = 260, /*!< Invalid BBSCN value. + * Valid range is [1-15] */ + BFA_STATUS_DDPORT_ERR = 261, /* Dynamic D_Port mode is active.\n To + * exit dynamic mode, disable D_Port on + * the remote port */ + BFA_STATUS_DPORT_SFPWRAP_ERR = 262, /* Clear e/o_wrap fail, check or + * replace SFP */ + BFA_STATUS_BBCR_CFG_NO_CHANGE = 265, /*!< BBCR is operational. + * Disable BBCR and try this operation again. */ + BFA_STATUS_DPORT_SW_NOTREADY = 268, /* Remote port is not ready to + * start dport test. Check remote + * port status. */ + BFA_STATUS_DPORT_INV_SFP = 271, /* Invalid SFP for D-PORT mode. */ + BFA_STATUS_DPORT_CMD_NOTSUPP = 273, /* Dport is not supported by + * remote port */ + BFA_STATUS_MAX_VAL /* Unknown error code */ +}; +#define bfa_status_t enum bfa_status + +enum bfa_eproto_status { + BFA_EPROTO_BAD_ACCEPT = 0, + BFA_EPROTO_UNKNOWN_RSP = 1 +}; +#define bfa_eproto_status_t enum bfa_eproto_status + +enum bfa_boolean { + BFA_FALSE = 0, + BFA_TRUE = 1 +}; +#define bfa_boolean_t enum bfa_boolean + +#define BFA_STRING_32 32 +#define BFA_VERSION_LEN 64 + +/* + * ---------------------- adapter definitions ------------ + */ + +/* + * BFA adapter level attributes. + */ +enum { + BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), + /* + *!< adapter serial num length + */ + BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */ + BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */ + BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */ + BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */ + BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */ + BFA_ADAPTER_UUID_LEN = 16, /* adapter uuid length */ +}; + +struct bfa_adapter_attr_s { + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + u32 card_type; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; + wwn_t pwwn; + char node_symname[FC_SYMNAME_MAX]; + char hw_ver[BFA_VERSION_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + char os_type[BFA_ADAPTER_OS_TYPE_LEN]; + struct bfa_mfg_vpd_s vpd; + struct mac_s mac; + + u8 nports; + u8 max_speed; + u8 prototype; + char asic_rev; + + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 cna_capable; + + u8 is_mezz; + u8 trunk_capable; + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ + u16 rsvd; + u8 uuid[BFA_ADAPTER_UUID_LEN]; +}; + +/* + * ---------------------- IOC definitions ------------ + */ + +enum { + BFA_IOC_DRIVER_LEN = 16, + BFA_IOC_CHIP_REV_LEN = 8, +}; + +/* + * Driver and firmware versions. + */ +struct bfa_ioc_driver_attr_s { + char driver[BFA_IOC_DRIVER_LEN]; /* driver name */ + char driver_ver[BFA_VERSION_LEN]; /* driver version */ + char fw_ver[BFA_VERSION_LEN]; /* firmware version */ + char bios_ver[BFA_VERSION_LEN]; /* bios version */ + char efi_ver[BFA_VERSION_LEN]; /* EFI version */ + char ob_ver[BFA_VERSION_LEN]; /* openboot version */ +}; + +/* + * IOC PCI device attributes + */ +struct bfa_ioc_pci_attr_s { + u16 vendor_id; /* PCI vendor ID */ + u16 device_id; /* PCI device ID */ + u16 ssid; /* subsystem ID */ + u16 ssvid; /* subsystem vendor ID */ + u32 pcifn; /* PCI device function */ + u32 rsvd; /* padding */ + char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ +}; + +/* + * IOC states + */ +enum bfa_ioc_state { + BFA_IOC_UNINIT = 1, /* IOC is in uninit state */ + BFA_IOC_RESET = 2, /* IOC is in reset state */ + BFA_IOC_SEMWAIT = 3, /* Waiting for IOC h/w semaphore */ + BFA_IOC_HWINIT = 4, /* IOC h/w is being initialized */ + BFA_IOC_GETATTR = 5, /* IOC is being configured */ + BFA_IOC_OPERATIONAL = 6, /* IOC is operational */ + BFA_IOC_INITFAIL = 7, /* IOC hardware failure */ + BFA_IOC_FAIL = 8, /* IOC heart-beat failure */ + BFA_IOC_DISABLING = 9, /* IOC is being disabled */ + BFA_IOC_DISABLED = 10, /* IOC is disabled */ + BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */ + BFA_IOC_ENABLING = 12, /* IOC is being enabled */ + BFA_IOC_HWFAIL = 13, /* PCI mapping doesn't exist */ + BFA_IOC_ACQ_ADDR = 14, /* Acquiring addr from fabric */ +}; + +/* + * IOC firmware stats + */ +struct bfa_fw_ioc_stats_s { + u32 enable_reqs; + u32 disable_reqs; + u32 get_attr_reqs; + u32 dbg_sync; + u32 dbg_dump; + u32 unknown_reqs; +}; + +/* + * IOC driver stats + */ +struct bfa_ioc_drv_stats_s { + u32 ioc_isrs; + u32 ioc_enables; + u32 ioc_disables; + u32 ioc_hbfails; + u32 ioc_boots; + u32 stats_tmos; + u32 hb_count; + u32 disable_reqs; + u32 enable_reqs; + u32 disable_replies; + u32 enable_replies; + u32 rsvd; +}; + +/* + * IOC statistics + */ +struct bfa_ioc_stats_s { + struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */ + struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */ +}; + +enum bfa_ioc_type_e { + BFA_IOC_TYPE_FC = 1, + BFA_IOC_TYPE_FCoE = 2, + BFA_IOC_TYPE_LL = 3, +}; + +/* + * IOC attributes returned in queries + */ +struct bfa_ioc_attr_s { + enum bfa_ioc_type_e ioc_type; + enum bfa_ioc_state state; /* IOC state */ + struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */ + struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ + struct bfa_ioc_pci_attr_s pci_attr; + u8 port_id; /* port number */ + u8 port_mode; /* bfa_mode_s */ + u8 cap_bm; /* capability */ + u8 port_mode_cfg; /* bfa_mode_s */ + u8 def_fn; /* 1 if default fn */ + u8 rsvd[3]; /* 64bit align */ +}; + +/* + * AEN related definitions + */ +enum bfa_aen_category { + BFA_AEN_CAT_ADAPTER = 1, + BFA_AEN_CAT_PORT = 2, + BFA_AEN_CAT_LPORT = 3, + BFA_AEN_CAT_RPORT = 4, + BFA_AEN_CAT_ITNIM = 5, + BFA_AEN_CAT_AUDIT = 8, + BFA_AEN_CAT_IOC = 9, +}; + +/* BFA adapter level events */ +enum bfa_adapter_aen_event { + BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */ + BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */ +}; + +struct bfa_adapter_aen_data_s { + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + u32 nports; /* Number of NPorts */ + wwn_t pwwn; /* WWN of one of its physical port */ +}; + +/* BFA physical port Level events */ +enum bfa_port_aen_event { + BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */ + BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */ + BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */ + BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */ + BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */ + BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */ + BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */ + BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */ + BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */ + BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */ + BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */ + BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */ + BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */ + BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */ + BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */ +}; + +enum bfa_port_aen_sfp_pom { + BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */ + BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */ + BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */ + BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED +}; + +struct bfa_port_aen_data_s { + wwn_t pwwn; /* WWN of the physical port */ + wwn_t fwwn; /* WWN of the fabric port */ + u32 phy_port_num; /* For SFP related events */ + u16 ioc_type; + u16 level; /* Only transitions will be informed */ + mac_t mac; /* MAC address of the ethernet port */ + u16 rsvd; +}; + +/* BFA AEN logical port events */ +enum bfa_lport_aen_event { + BFA_LPORT_AEN_NEW = 1, /* LPort created event */ + BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */ + BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */ + BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */ + BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */ + BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */ + BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */ + BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */ + BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */ + BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */ + BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */ + BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */ +}; + +struct bfa_lport_aen_data_s { + u16 vf_id; /* vf_id of this logical port */ + u16 roles; /* Logical port mode,IM/TM/IP etc */ + u32 rsvd; + wwn_t ppwwn; /* WWN of its physical port */ + wwn_t lpwwn; /* WWN of this logical port */ +}; + +/* BFA ITNIM events */ +enum bfa_itnim_aen_event { + BFA_ITNIM_AEN_ONLINE = 1, /* Target online */ + BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */ + BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */ +}; + +struct bfa_itnim_aen_data_s { + u16 vf_id; /* vf_id of the IT nexus */ + u16 rsvd[3]; + wwn_t ppwwn; /* WWN of its physical port */ + wwn_t lpwwn; /* WWN of logical port */ + wwn_t rpwwn; /* WWN of remote(target) port */ +}; + +/* BFA audit events */ +enum bfa_audit_aen_event { + BFA_AUDIT_AEN_AUTH_ENABLE = 1, + BFA_AUDIT_AEN_AUTH_DISABLE = 2, + BFA_AUDIT_AEN_FLASH_ERASE = 3, + BFA_AUDIT_AEN_FLASH_UPDATE = 4, +}; + +struct bfa_audit_aen_data_s { + wwn_t pwwn; + int partition_inst; + int partition_type; +}; + +/* BFA IOC level events */ +enum bfa_ioc_aen_event { + BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */ + BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */ + BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */ + BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */ + BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */ + BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */ + BFA_IOC_AEN_INVALID_VENDOR = 7, + BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */ + BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */ +}; + +struct bfa_ioc_aen_data_s { + wwn_t pwwn; + u16 ioc_type; + mac_t mac; +}; + +/* + * ---------------------- mfg definitions ------------ + */ + +/* + * Checksum size + */ +#define BFA_MFG_CHKSUM_SIZE 16 + +#define BFA_MFG_PARTNUM_SIZE 14 +#define BFA_MFG_SUPPLIER_ID_SIZE 10 +#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 +/* + * Initial capability definition + */ +#define BFA_MFG_IC_FC 0x01 +#define BFA_MFG_IC_ETH 0x02 + +/* + * Adapter capability mask definition + */ +#define BFA_CM_HBA 0x01 +#define BFA_CM_CNA 0x02 +#define BFA_CM_NIC 0x04 +#define BFA_CM_FC16G 0x08 +#define BFA_CM_SRIOV 0x10 +#define BFA_CM_MEZZ 0x20 + +#pragma pack(1) + +/* + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_block_s { + u8 version; /*!< manufacturing block version */ + u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */ + u16 mfgsize; /*!< mfg block size */ + u16 u16_chksum; /*!< old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /*!< manufacturing day */ + u8 mfg_month; /*!< manufacturing month */ + u16 mfg_year; /*!< manufacturing year */ + wwn_t mfg_wwn; /*!< wwn base for this adapter */ + u8 num_wwn; /*!< number of wwns assigned */ + u8 mfg_speeds; /*!< speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + mac_t mfg_mac; /*!< base mac address */ + u8 num_mac; /*!< number of mac addresses */ + u8 rsv2; + u32 card_type; /*!< card type */ + char cap_nic; /*!< capability nic */ + char cap_cna; /*!< capability cna */ + char cap_hba; /*!< capability hba */ + char cap_fc16g; /*!< capability fc 16g */ + char cap_sriov; /*!< capability sriov */ + char cap_mezz; /*!< capability mezz */ + u8 rsv3; + u8 mfg_nports; /*!< number of ports */ + char media[8]; /*!< xfi/xaui */ + char initial_mode[8]; /*!< initial mode: hba/cna/nic */ + u8 rsv4[84]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */ +}; + +#pragma pack() + +/* + * ---------------------- pci definitions ------------ + */ + +/* + * PCI device and vendor ID information + */ +enum { + BFA_PCI_VENDOR_ID_BROCADE = 0x1657, + BFA_PCI_DEVICE_ID_FC_8G2P = 0x13, + BFA_PCI_DEVICE_ID_FC_8G1P = 0x17, + BFA_PCI_DEVICE_ID_CT = 0x14, + BFA_PCI_DEVICE_ID_CT_FC = 0x21, + BFA_PCI_DEVICE_ID_CT2 = 0x22, + BFA_PCI_DEVICE_ID_CT2_QUAD = 0x23, +}; + +#define bfa_asic_id_cb(__d) \ + ((__d) == BFA_PCI_DEVICE_ID_FC_8G2P || \ + (__d) == BFA_PCI_DEVICE_ID_FC_8G1P) +#define bfa_asic_id_ct(__d) \ + ((__d) == BFA_PCI_DEVICE_ID_CT || \ + (__d) == BFA_PCI_DEVICE_ID_CT_FC) +#define bfa_asic_id_ct2(__d) \ + ((__d) == BFA_PCI_DEVICE_ID_CT2 || \ + (__d) == BFA_PCI_DEVICE_ID_CT2_QUAD) +#define bfa_asic_id_ctc(__d) \ + (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d)) + +/* + * PCI sub-system device and vendor ID information + */ +enum { + BFA_PCI_FCOE_SSDEVICE_ID = 0x14, + BFA_PCI_CT2_SSID_FCoE = 0x22, + BFA_PCI_CT2_SSID_ETH = 0x23, + BFA_PCI_CT2_SSID_FC = 0x24, +}; + +/* + * Maximum number of device address ranges mapped through different BAR(s) + */ +#define BFA_PCI_ACCESS_RANGES 1 + +/* + * Port speed settings. Each specific speed is a bit field. Use multiple + * bits to specify speeds to be selected for auto-negotiation. + */ +enum bfa_port_speed { + BFA_PORT_SPEED_UNKNOWN = 0, + BFA_PORT_SPEED_1GBPS = 1, + BFA_PORT_SPEED_2GBPS = 2, + BFA_PORT_SPEED_4GBPS = 4, + BFA_PORT_SPEED_8GBPS = 8, + BFA_PORT_SPEED_10GBPS = 10, + BFA_PORT_SPEED_16GBPS = 16, + BFA_PORT_SPEED_AUTO = 0xf, +}; +#define bfa_port_speed_t enum bfa_port_speed + +enum { + BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */ + BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */ +}; + +#define BOOT_CFG_REV1 1 +#define BOOT_CFG_VLAN 1 + +/* + * Boot options setting. Boot options setting determines from where + * to get the boot lun information + */ +enum bfa_boot_bootopt { + BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */ + BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */ + BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */ + BFA_BOOT_PBC = 3, /* Boot from pbc configured blun */ +}; + +#pragma pack(1) +/* + * Boot lun information. + */ +struct bfa_boot_bootlun_s { + wwn_t pwwn; /* port wwn of target */ + struct scsi_lun lun; /* 64-bit lun */ +}; +#pragma pack() + +/* + * BOOT boot configuraton + */ +struct bfa_boot_cfg_s { + u8 version; + u8 rsvd1; + u16 chksum; + u8 enable; /* enable/disable SAN boot */ + u8 speed; /* boot speed settings */ + u8 topology; /* boot topology setting */ + u8 bootopt; /* bfa_boot_bootopt_t */ + u32 nbluns; /* number of boot luns */ + u32 rsvd2; + struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX]; + struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX]; +}; + +struct bfa_boot_pbc_s { + u8 enable; /* enable/disable SAN boot */ + u8 speed; /* boot speed settings */ + u8 topology; /* boot topology setting */ + u8 rsvd1; + u32 nbluns; /* number of boot luns */ + struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; +}; + +struct bfa_ethboot_cfg_s { + u8 version; + u8 rsvd1; + u16 chksum; + u8 enable; /* enable/disable Eth/PXE boot */ + u8 rsvd2; + u16 vlan; +}; + +/* + * ASIC block configuration related structures + */ +#define BFA_ABLK_MAX_PORTS 2 +#define BFA_ABLK_MAX_PFS 16 +#define BFA_ABLK_MAX 2 + +#pragma pack(1) +enum bfa_mode_s { + BFA_MODE_HBA = 1, + BFA_MODE_CNA = 2, + BFA_MODE_NIC = 3 +}; + +struct bfa_adapter_cfg_mode_s { + u16 max_pf; + u16 max_vf; + enum bfa_mode_s mode; +}; + +struct bfa_ablk_cfg_pf_s { + u16 pers; + u8 port_id; + u8 optrom; + u8 valid; + u8 sriov; + u8 max_vfs; + u8 rsvd[1]; + u16 num_qpairs; + u16 num_vectors; + u16 bw_min; + u16 bw_max; +}; + +struct bfa_ablk_cfg_port_s { + u8 mode; + u8 type; + u8 max_pfs; + u8 rsvd[5]; +}; + +struct bfa_ablk_cfg_inst_s { + u8 nports; + u8 max_pfs; + u8 rsvd[6]; + struct bfa_ablk_cfg_pf_s pf_cfg[BFA_ABLK_MAX_PFS]; + struct bfa_ablk_cfg_port_s port_cfg[BFA_ABLK_MAX_PORTS]; +}; + +struct bfa_ablk_cfg_s { + struct bfa_ablk_cfg_inst_s inst[BFA_ABLK_MAX]; +}; + + +/* + * SFP module specific + */ +#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */ + +/* SFP state change notification event */ +#define BFA_SFP_SCN_REMOVED 0 +#define BFA_SFP_SCN_INSERTED 1 +#define BFA_SFP_SCN_POM 2 +#define BFA_SFP_SCN_FAILED 3 +#define BFA_SFP_SCN_UNSUPPORT 4 +#define BFA_SFP_SCN_VALID 5 + +enum bfa_defs_sfp_media_e { + BFA_SFP_MEDIA_UNKNOWN = 0x00, + BFA_SFP_MEDIA_CU = 0x01, + BFA_SFP_MEDIA_LW = 0x02, + BFA_SFP_MEDIA_SW = 0x03, + BFA_SFP_MEDIA_EL = 0x04, + BFA_SFP_MEDIA_UNSUPPORT = 0x05, +}; + +/* + * values for xmtr_tech above + */ +enum { + SFP_XMTR_TECH_CU = (1 << 0), /* copper FC-BaseT */ + SFP_XMTR_TECH_CP = (1 << 1), /* copper passive */ + SFP_XMTR_TECH_CA = (1 << 2), /* copper active */ + SFP_XMTR_TECH_LL = (1 << 3), /* longwave laser */ + SFP_XMTR_TECH_SL = (1 << 4), /* shortwave laser w/ OFC */ + SFP_XMTR_TECH_SN = (1 << 5), /* shortwave laser w/o OFC */ + SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */ + SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */ + SFP_XMTR_TECH_LC = (1 << 8), /* longwave laser */ + SFP_XMTR_TECH_SA = (1 << 9) +}; + +/* + * Serial ID: Data Fields -- Address A0h + * Basic ID field total 64 bytes + */ +struct sfp_srlid_base_s { + u8 id; /* 00: Identifier */ + u8 extid; /* 01: Extended Identifier */ + u8 connector; /* 02: Connector */ + u8 xcvr[8]; /* 03-10: Transceiver */ + u8 encoding; /* 11: Encoding */ + u8 br_norm; /* 12: BR, Nominal */ + u8 rate_id; /* 13: Rate Identifier */ + u8 len_km; /* 14: Length single mode km */ + u8 len_100m; /* 15: Length single mode 100m */ + u8 len_om2; /* 16: Length om2 fiber 10m */ + u8 len_om1; /* 17: Length om1 fiber 10m */ + u8 len_cu; /* 18: Length copper 1m */ + u8 len_om3; /* 19: Length om3 fiber 10m */ + u8 vendor_name[16];/* 20-35 */ + u8 unalloc1; + u8 vendor_oui[3]; /* 37-39 */ + u8 vendor_pn[16]; /* 40-55 */ + u8 vendor_rev[4]; /* 56-59 */ + u8 wavelen[2]; /* 60-61 */ + u8 unalloc2; + u8 cc_base; /* 63: check code for base id field */ +}; + +/* + * Serial ID: Data Fields -- Address A0h + * Extended id field total 32 bytes + */ +struct sfp_srlid_ext_s { + u8 options[2]; + u8 br_max; + u8 br_min; + u8 vendor_sn[16]; + u8 date_code[8]; + u8 diag_mon_type; /* 92: Diagnostic Monitoring type */ + u8 en_options; + u8 sff_8472; + u8 cc_ext; +}; + +/* + * Diagnostic: Data Fields -- Address A2h + * Diagnostic and control/status base field total 96 bytes + */ +struct sfp_diag_base_s { + /* + * Alarm and warning Thresholds 40 bytes + */ + u8 temp_high_alarm[2]; /* 00-01 */ + u8 temp_low_alarm[2]; /* 02-03 */ + u8 temp_high_warning[2]; /* 04-05 */ + u8 temp_low_warning[2]; /* 06-07 */ + + u8 volt_high_alarm[2]; /* 08-09 */ + u8 volt_low_alarm[2]; /* 10-11 */ + u8 volt_high_warning[2]; /* 12-13 */ + u8 volt_low_warning[2]; /* 14-15 */ + + u8 bias_high_alarm[2]; /* 16-17 */ + u8 bias_low_alarm[2]; /* 18-19 */ + u8 bias_high_warning[2]; /* 20-21 */ + u8 bias_low_warning[2]; /* 22-23 */ + + u8 tx_pwr_high_alarm[2]; /* 24-25 */ + u8 tx_pwr_low_alarm[2]; /* 26-27 */ + u8 tx_pwr_high_warning[2]; /* 28-29 */ + u8 tx_pwr_low_warning[2]; /* 30-31 */ + + u8 rx_pwr_high_alarm[2]; /* 32-33 */ + u8 rx_pwr_low_alarm[2]; /* 34-35 */ + u8 rx_pwr_high_warning[2]; /* 36-37 */ + u8 rx_pwr_low_warning[2]; /* 38-39 */ + + u8 unallocate_1[16]; + + /* + * ext_cal_const[36] + */ + u8 rx_pwr[20]; + u8 tx_i[4]; + u8 tx_pwr[4]; + u8 temp[4]; + u8 volt[4]; + u8 unallocate_2[3]; + u8 cc_dmi; +}; + +/* + * Diagnostic: Data Fields -- Address A2h + * Diagnostic and control/status extended field total 24 bytes + */ +struct sfp_diag_ext_s { + u8 diag[SFP_DIAGMON_SIZE]; + u8 unalloc1[4]; + u8 status_ctl; + u8 rsvd; + u8 alarm_flags[2]; + u8 unalloc2[2]; + u8 warning_flags[2]; + u8 ext_status_ctl[2]; +}; + +/* + * Diagnostic: Data Fields -- Address A2h + * General Use Fields: User Writable Table - Features's Control Registers + * Total 32 bytes + */ +struct sfp_usr_eeprom_s { + u8 rsvd1[2]; /* 128-129 */ + u8 ewrap; /* 130 */ + u8 rsvd2[2]; /* */ + u8 owrap; /* 133 */ + u8 rsvd3[2]; /* */ + u8 prbs; /* 136: PRBS 7 generator */ + u8 rsvd4[2]; /* */ + u8 tx_eqz_16; /* 139: TX Equalizer (16xFC) */ + u8 tx_eqz_8; /* 140: TX Equalizer (8xFC) */ + u8 rsvd5[2]; /* */ + u8 rx_emp_16; /* 143: RX Emphasis (16xFC) */ + u8 rx_emp_8; /* 144: RX Emphasis (8xFC) */ + u8 rsvd6[2]; /* */ + u8 tx_eye_adj; /* 147: TX eye Threshold Adjust */ + u8 rsvd7[3]; /* */ + u8 tx_eye_qctl; /* 151: TX eye Quality Control */ + u8 tx_eye_qres; /* 152: TX eye Quality Result */ + u8 rsvd8[2]; /* */ + u8 poh[3]; /* 155-157: Power On Hours */ + u8 rsvd9[2]; /* */ +}; + +struct sfp_mem_s { + struct sfp_srlid_base_s srlid_base; + struct sfp_srlid_ext_s srlid_ext; + struct sfp_diag_base_s diag_base; + struct sfp_diag_ext_s diag_ext; + struct sfp_usr_eeprom_s usr_eeprom; +}; + +/* + * transceiver codes (SFF-8472 Rev 10.2 Table 3.5) + */ +union sfp_xcvr_e10g_code_u { + u8 b; + struct { +#ifdef __BIG_ENDIAN + u8 e10g_unall:1; /* 10G Ethernet compliance */ + u8 e10g_lrm:1; + u8 e10g_lr:1; + u8 e10g_sr:1; + u8 ib_sx:1; /* Infiniband compliance */ + u8 ib_lx:1; + u8 ib_cu_a:1; + u8 ib_cu_p:1; +#else + u8 ib_cu_p:1; + u8 ib_cu_a:1; + u8 ib_lx:1; + u8 ib_sx:1; /* Infiniband compliance */ + u8 e10g_sr:1; + u8 e10g_lr:1; + u8 e10g_lrm:1; + u8 e10g_unall:1; /* 10G Ethernet compliance */ +#endif + } r; +}; + +union sfp_xcvr_so1_code_u { + u8 b; + struct { + u8 escon:2; /* ESCON compliance code */ + u8 oc192_reach:1; /* SONET compliance code */ + u8 so_reach:2; + u8 oc48_reach:3; + } r; +}; + +union sfp_xcvr_so2_code_u { + u8 b; + struct { + u8 reserved:1; + u8 oc12_reach:3; /* OC12 reach */ + u8 reserved1:1; + u8 oc3_reach:3; /* OC3 reach */ + } r; +}; + +union sfp_xcvr_eth_code_u { + u8 b; + struct { + u8 base_px:1; + u8 base_bx10:1; + u8 e100base_fx:1; + u8 e100base_lx:1; + u8 e1000base_t:1; + u8 e1000base_cx:1; + u8 e1000base_lx:1; + u8 e1000base_sx:1; + } r; +}; + +struct sfp_xcvr_fc1_code_s { + u8 link_len:5; /* FC link length */ + u8 xmtr_tech2:3; + u8 xmtr_tech1:7; /* FC transmitter technology */ + u8 reserved1:1; +}; + +union sfp_xcvr_fc2_code_u { + u8 b; + struct { + u8 tw_media:1; /* twin axial pair (tw) */ + u8 tp_media:1; /* shielded twisted pair (sp) */ + u8 mi_media:1; /* miniature coax (mi) */ + u8 tv_media:1; /* video coax (tv) */ + u8 m6_media:1; /* multimode, 62.5m (m6) */ + u8 m5_media:1; /* multimode, 50m (m5) */ + u8 reserved:1; + u8 sm_media:1; /* single mode (sm) */ + } r; +}; + +union sfp_xcvr_fc3_code_u { + u8 b; + struct { +#ifdef __BIG_ENDIAN + u8 rsv4:1; + u8 mb800:1; /* 800 Mbytes/sec */ + u8 mb1600:1; /* 1600 Mbytes/sec */ + u8 mb400:1; /* 400 Mbytes/sec */ + u8 rsv2:1; + u8 mb200:1; /* 200 Mbytes/sec */ + u8 rsv1:1; + u8 mb100:1; /* 100 Mbytes/sec */ +#else + u8 mb100:1; /* 100 Mbytes/sec */ + u8 rsv1:1; + u8 mb200:1; /* 200 Mbytes/sec */ + u8 rsv2:1; + u8 mb400:1; /* 400 Mbytes/sec */ + u8 mb1600:1; /* 1600 Mbytes/sec */ + u8 mb800:1; /* 800 Mbytes/sec */ + u8 rsv4:1; +#endif + } r; +}; + +struct sfp_xcvr_s { + union sfp_xcvr_e10g_code_u e10g; + union sfp_xcvr_so1_code_u so1; + union sfp_xcvr_so2_code_u so2; + union sfp_xcvr_eth_code_u eth; + struct sfp_xcvr_fc1_code_s fc1; + union sfp_xcvr_fc2_code_u fc2; + union sfp_xcvr_fc3_code_u fc3; +}; + +/* + * Flash module specific + */ +#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */ +#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */ + +enum bfa_flash_part_type { + BFA_FLASH_PART_OPTROM = 1, /* option rom partition */ + BFA_FLASH_PART_FWIMG = 2, /* firmware image partition */ + BFA_FLASH_PART_FWCFG = 3, /* firmware tuneable config */ + BFA_FLASH_PART_DRV = 4, /* IOC driver config */ + BFA_FLASH_PART_BOOT = 5, /* boot config */ + BFA_FLASH_PART_ASIC = 6, /* asic bootstrap configuration */ + BFA_FLASH_PART_MFG = 7, /* manufacturing block partition */ + BFA_FLASH_PART_OPTROM2 = 8, /* 2nd option rom partition */ + BFA_FLASH_PART_VPD = 9, /* vpd data of OEM info */ + BFA_FLASH_PART_PBC = 10, /* pre-boot config */ + BFA_FLASH_PART_BOOTOVL = 11, /* boot overlay partition */ + BFA_FLASH_PART_LOG = 12, /* firmware log partition */ + BFA_FLASH_PART_PXECFG = 13, /* pxe boot config partition */ + BFA_FLASH_PART_PXEOVL = 14, /* pxe boot overlay partition */ + BFA_FLASH_PART_PORTCFG = 15, /* port cfg partition */ + BFA_FLASH_PART_ASICBK = 16, /* asic backup partition */ +}; + +/* + * flash partition attributes + */ +struct bfa_flash_part_attr_s { + u32 part_type; /* partition type */ + u32 part_instance; /* partition instance */ + u32 part_off; /* partition offset */ + u32 part_size; /* partition size */ + u32 part_len; /* partition content length */ + u32 part_status; /* partition status */ + char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24]; +}; + +/* + * flash attributes + */ +struct bfa_flash_attr_s { + u32 status; /* flash overall status */ + u32 npart; /* num of partitions */ + struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX]; +}; + +/* + * DIAG module specific + */ +#define LB_PATTERN_DEFAULT 0xB5B5B5B5 +#define QTEST_CNT_DEFAULT 10 +#define QTEST_PAT_DEFAULT LB_PATTERN_DEFAULT +#define DPORT_ENABLE_LOOPCNT_DEFAULT (1024 * 1024) + +struct bfa_diag_memtest_s { + u8 algo; + u8 rsvd[7]; +}; + +struct bfa_diag_memtest_result { + u32 status; + u32 addr; + u32 exp; /* expect value read from reg */ + u32 act; /* actually value read */ + u32 err_status; /* error status reg */ + u32 err_status1; /* extra error info reg */ + u32 err_addr; /* error address reg */ + u8 algo; + u8 rsv[3]; +}; + +struct bfa_diag_loopback_result_s { + u32 numtxmfrm; /* no. of transmit frame */ + u32 numosffrm; /* no. of outstanding frame */ + u32 numrcvfrm; /* no. of received good frame */ + u32 badfrminf; /* mis-match info */ + u32 badfrmnum; /* mis-match fram number */ + u8 status; /* loopback test result */ + u8 rsvd[3]; +}; + +enum bfa_diag_dport_test_status { + DPORT_TEST_ST_IDLE = 0, /* the test has not started yet. */ + DPORT_TEST_ST_FINAL = 1, /* the test done successfully */ + DPORT_TEST_ST_SKIP = 2, /* the test skipped */ + DPORT_TEST_ST_FAIL = 3, /* the test failed */ + DPORT_TEST_ST_INPRG = 4, /* the testing is in progress */ + DPORT_TEST_ST_RESPONDER = 5, /* test triggered from remote port */ + DPORT_TEST_ST_STOPPED = 6, /* the test stopped by user. */ + DPORT_TEST_ST_MAX +}; + +enum bfa_diag_dport_test_type { + DPORT_TEST_ELOOP = 0, + DPORT_TEST_OLOOP = 1, + DPORT_TEST_ROLOOP = 2, + DPORT_TEST_LINK = 3, + DPORT_TEST_MAX +}; + +enum bfa_diag_dport_test_opmode { + BFA_DPORT_OPMODE_AUTO = 0, + BFA_DPORT_OPMODE_MANU = 1, +}; + +struct bfa_diag_dport_subtest_result_s { + u8 status; /* bfa_diag_dport_test_status */ + u8 rsvd[7]; /* 64bit align */ + u64 start_time; /* timestamp */ +}; + +struct bfa_diag_dport_result_s { + wwn_t rp_pwwn; /* switch port wwn */ + wwn_t rp_nwwn; /* switch node wwn */ + u64 start_time; /* user/sw start time */ + u64 end_time; /* timestamp */ + u8 status; /* bfa_diag_dport_test_status */ + u8 mode; /* bfa_diag_dport_test_opmode */ + u8 rsvd; /* 64bit align */ + u8 speed; /* link speed for buf_reqd */ + u16 buffer_required; + u16 frmsz; /* frame size for buf_reqd */ + u32 lpcnt; /* Frame count */ + u32 pat; /* Pattern */ + u32 roundtrip_latency; /* in nano sec */ + u32 est_cable_distance; /* in meter */ + struct bfa_diag_dport_subtest_result_s subtest[DPORT_TEST_MAX]; +}; + +struct bfa_diag_ledtest_s { + u32 cmd; /* bfa_led_op_t */ + u32 color; /* bfa_led_color_t */ + u16 freq; /* no. of blinks every 10 secs */ + u8 led; /* bitmap of LEDs to be tested */ + u8 rsvd[5]; +}; + +struct bfa_diag_loopback_s { + u32 loopcnt; + u32 pattern; + u8 lb_mode; /* bfa_port_opmode_t */ + u8 speed; /* bfa_port_speed_t */ + u8 rsvd[2]; +}; + +/* + * PHY module specific + */ +enum bfa_phy_status_e { + BFA_PHY_STATUS_GOOD = 0, /* phy is good */ + BFA_PHY_STATUS_NOT_PRESENT = 1, /* phy does not exist */ + BFA_PHY_STATUS_BAD = 2, /* phy is bad */ +}; + +/* + * phy attributes for phy query + */ +struct bfa_phy_attr_s { + u32 status; /* phy present/absent status */ + u32 length; /* firmware length */ + u32 fw_ver; /* firmware version */ + u32 an_status; /* AN status */ + u32 pma_pmd_status; /* PMA/PMD link status */ + u32 pma_pmd_signal; /* PMA/PMD signal detect */ + u32 pcs_status; /* PCS link status */ +}; + +/* + * phy stats + */ +struct bfa_phy_stats_s { + u32 status; /* phy stats status */ + u32 link_breaks; /* Num of link breaks after linkup */ + u32 pma_pmd_fault; /* NPMA/PMD fault */ + u32 pcs_fault; /* PCS fault */ + u32 speed_neg; /* Num of speed negotiation */ + u32 tx_eq_training; /* Num of TX EQ training */ + u32 tx_eq_timeout; /* Num of TX EQ timeout */ + u32 crc_error; /* Num of CRC errors */ +}; + +#pragma pack() + +#endif /* __BFA_DEFS_H__ */ diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h new file mode 100644 index 000000000..5e3662042 --- /dev/null +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_DEFS_FCS_H__ +#define __BFA_DEFS_FCS_H__ + +#include "bfa_fc.h" +#include "bfa_defs_svc.h" + +/* + * VF states + */ +enum bfa_vf_state { + BFA_VF_UNINIT = 0, /* fabric is not yet initialized */ + BFA_VF_LINK_DOWN = 1, /* link is down */ + BFA_VF_FLOGI = 2, /* flogi is in progress */ + BFA_VF_AUTH = 3, /* authentication in progress */ + BFA_VF_NOFABRIC = 4, /* fabric is not present */ + BFA_VF_ONLINE = 5, /* login to fabric is complete */ + BFA_VF_EVFP = 6, /* EVFP is in progress */ + BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */ +}; + +/* + * VF statistics + */ +struct bfa_vf_stats_s { + u32 flogi_sent; /* Num FLOGIs sent */ + u32 flogi_rsp_err; /* FLOGI response errors */ + u32 flogi_acc_err; /* FLOGI accept errors */ + u32 flogi_accepts; /* FLOGI accepts received */ + u32 flogi_rejects; /* FLOGI rejects received */ + u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */ + u32 flogi_alloc_wait; /* Allocation waits prior to sending FLOGI */ + u32 flogi_rcvd; /* FLOGIs received */ + u32 flogi_rejected; /* Incoming FLOGIs rejected */ + u32 fabric_onlines; /* Internal fabric online notification sent + * to other modules */ + u32 fabric_offlines; /* Internal fabric offline notification sent + * to other modules */ + u32 resvd; /* padding for 64 bit alignment */ +}; + +/* + * VF attributes returned in queries + */ +struct bfa_vf_attr_s { + enum bfa_vf_state state; /* VF state */ + u32 rsvd; + wwn_t fabric_name; /* fabric name */ +}; + +#define BFA_FCS_MAX_LPORTS 256 +#define BFA_FCS_FABRIC_IPADDR_SZ 16 + +/* + * symbolic names for base port/virtual port + */ +#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */ +struct bfa_lport_symname_s { + char symname[BFA_SYMNAME_MAXLEN]; +}; + +/* +* Roles of FCS port: + * - FCP IM and FCP TM roles cannot be enabled together for a FCS port + * - Create multiple ports if both IM and TM functions required. + * - Atleast one role must be specified. + */ +enum bfa_lport_role { + BFA_LPORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */ + BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM, +}; + +/* + * FCS port configuration. + */ +struct bfa_lport_cfg_s { + wwn_t pwwn; /* port wwn */ + wwn_t nwwn; /* node wwn */ + struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ + struct bfa_lport_symname_s node_sym_name; /* Node symbolic name */ + enum bfa_lport_role roles; /* FCS port roles */ + u32 rsvd; + bfa_boolean_t preboot_vp; /* vport created from PBC */ + u8 tag[16]; /* opaque tag from application */ + u8 padding[4]; +}; + +/* + * FCS port states + */ +enum bfa_lport_state { + BFA_LPORT_UNINIT = 0, /* PORT is not yet initialized */ + BFA_LPORT_FDISC = 1, /* FDISC is in progress */ + BFA_LPORT_ONLINE = 2, /* login to fabric is complete */ + BFA_LPORT_OFFLINE = 3, /* No login to fabric */ +}; + +/* + * FCS port type. + */ +enum bfa_lport_type { + BFA_LPORT_TYPE_PHYSICAL = 0, + BFA_LPORT_TYPE_VIRTUAL, +}; + +/* + * FCS port offline reason. + */ +enum bfa_lport_offline_reason { + BFA_LPORT_OFFLINE_UNKNOWN = 0, + BFA_LPORT_OFFLINE_LINKDOWN, + BFA_LPORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the + * fabric */ + BFA_LPORT_OFFLINE_FAB_NORESOURCES, + BFA_LPORT_OFFLINE_FAB_LOGOUT, +}; + +/* + * FCS lport info. + */ +struct bfa_lport_info_s { + u8 port_type; /* bfa_lport_type_t : physical or + * virtual */ + u8 port_state; /* one of bfa_lport_state values */ + u8 offline_reason; /* one of bfa_lport_offline_reason_t + * values */ + wwn_t port_wwn; + wwn_t node_wwn; + + /* + * following 4 feilds are valid for Physical Ports only + */ + u32 max_vports_supp; /* Max supported vports */ + u32 num_vports_inuse; /* Num of in use vports */ + u32 max_rports_supp; /* Max supported rports */ + u32 num_rports_inuse; /* Num of doscovered rports */ + +}; + +/* + * FCS port statistics + */ +struct bfa_lport_stats_s { + u32 ns_plogi_sent; + u32 ns_plogi_rsp_err; + u32 ns_plogi_acc_err; + u32 ns_plogi_accepts; + u32 ns_rejects; /* NS command rejects */ + u32 ns_plogi_unknown_rsp; + u32 ns_plogi_alloc_wait; + + u32 ns_retries; /* NS command retries */ + u32 ns_timeouts; /* NS command timeouts */ + + u32 ns_rspnid_sent; + u32 ns_rspnid_accepts; + u32 ns_rspnid_rsp_err; + u32 ns_rspnid_rejects; + u32 ns_rspnid_alloc_wait; + + u32 ns_rftid_sent; + u32 ns_rftid_accepts; + u32 ns_rftid_rsp_err; + u32 ns_rftid_rejects; + u32 ns_rftid_alloc_wait; + + u32 ns_rffid_sent; + u32 ns_rffid_accepts; + u32 ns_rffid_rsp_err; + u32 ns_rffid_rejects; + u32 ns_rffid_alloc_wait; + + u32 ns_gidft_sent; + u32 ns_gidft_accepts; + u32 ns_gidft_rsp_err; + u32 ns_gidft_rejects; + u32 ns_gidft_unknown_rsp; + u32 ns_gidft_alloc_wait; + + u32 ns_rnnid_sent; + u32 ns_rnnid_accepts; + u32 ns_rnnid_rsp_err; + u32 ns_rnnid_rejects; + u32 ns_rnnid_alloc_wait; + + u32 ns_rsnn_nn_sent; + u32 ns_rsnn_nn_accepts; + u32 ns_rsnn_nn_rsp_err; + u32 ns_rsnn_nn_rejects; + u32 ns_rsnn_nn_alloc_wait; + + /* + * Mgmt Server stats + */ + u32 ms_retries; /* MS command retries */ + u32 ms_timeouts; /* MS command timeouts */ + u32 ms_plogi_sent; + u32 ms_plogi_rsp_err; + u32 ms_plogi_acc_err; + u32 ms_plogi_accepts; + u32 ms_rejects; /* MS command rejects */ + u32 ms_plogi_unknown_rsp; + u32 ms_plogi_alloc_wait; + + u32 num_rscn; /* Num of RSCN received */ + u32 num_portid_rscn;/* Num portid format RSCN + * received */ + + u32 uf_recvs; /* Unsolicited recv frames */ + u32 uf_recv_drops; /* Dropped received frames */ + + u32 plogi_rcvd; /* Received plogi */ + u32 prli_rcvd; /* Received prli */ + u32 adisc_rcvd; /* Received adisc */ + u32 prlo_rcvd; /* Received prlo */ + u32 logo_rcvd; /* Received logo */ + u32 rpsc_rcvd; /* Received rpsc */ + u32 un_handled_els_rcvd; /* Received unhandled ELS */ + u32 rport_plogi_timeouts; /* Rport plogi retry timeout count */ + u32 rport_del_max_plogi_retry; /* Deleted rport + * (max retry of plogi) */ +}; + +/* + * BFA port attribute returned in queries + */ +struct bfa_lport_attr_s { + enum bfa_lport_state state; /* port state */ + u32 pid; /* port ID */ + struct bfa_lport_cfg_s port_cfg; /* port configuration */ + enum bfa_port_type port_type; /* current topology */ + u32 loopback; /* cable is externally looped back */ + wwn_t fabric_name; /* attached switch's nwwn */ + u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached + * fabric's ip addr */ + mac_t fpma_mac; /* Lport's FPMA Mac address */ + u16 authfail; /* auth failed state */ +}; + + +/* + * VPORT states + */ +enum bfa_vport_state { + BFA_FCS_VPORT_UNINIT = 0, + BFA_FCS_VPORT_CREATED = 1, + BFA_FCS_VPORT_OFFLINE = 1, + BFA_FCS_VPORT_FDISC_SEND = 2, + BFA_FCS_VPORT_FDISC = 3, + BFA_FCS_VPORT_FDISC_RETRY = 4, + BFA_FCS_VPORT_FDISC_RSP_WAIT = 5, + BFA_FCS_VPORT_ONLINE = 6, + BFA_FCS_VPORT_DELETING = 7, + BFA_FCS_VPORT_CLEANUP = 8, + BFA_FCS_VPORT_LOGO_SEND = 9, + BFA_FCS_VPORT_LOGO = 10, + BFA_FCS_VPORT_ERROR = 11, + BFA_FCS_VPORT_MAX_STATE, +}; + +/* + * vport statistics + */ +struct bfa_vport_stats_s { + struct bfa_lport_stats_s port_stats; /* base class (port) stats */ + /* + * TODO - remove + */ + + u32 fdisc_sent; /* num fdisc sent */ + u32 fdisc_accepts; /* fdisc accepts */ + u32 fdisc_retries; /* fdisc retries */ + u32 fdisc_timeouts; /* fdisc timeouts */ + u32 fdisc_rsp_err; /* fdisc response error */ + u32 fdisc_acc_bad; /* bad fdisc accepts */ + u32 fdisc_rejects; /* fdisc rejects */ + u32 fdisc_unknown_rsp; + /* + *!< fdisc rsp unknown error + */ + u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */ + + u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */ + u32 logo_sent; /* logo sent */ + u32 logo_accepts; /* logo accepts */ + u32 logo_rejects; /* logo rejects */ + u32 logo_rsp_err; /* logo rsp errors */ + u32 logo_unknown_rsp; + /* logo rsp unknown errors */ + + u32 fab_no_npiv; /* fabric does not support npiv */ + + u32 fab_offline; /* offline events from fab SM */ + u32 fab_online; /* online events from fab SM */ + u32 fab_cleanup; /* cleanup request from fab SM */ + u32 rsvd; +}; + +/* + * BFA vport attribute returned in queries + */ +struct bfa_vport_attr_s { + struct bfa_lport_attr_s port_attr; /* base class (port) attributes */ + enum bfa_vport_state vport_state; /* vport state */ + u32 rsvd; +}; + +/* + * FCS remote port states + */ +enum bfa_rport_state { + BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */ + BFA_RPORT_OFFLINE = 1, /* rport is offline */ + BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */ + BFA_RPORT_ONLINE = 3, /* login to rport is complete */ + BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */ + BFA_RPORT_NSQUERY = 5, /* nameserver query */ + BFA_RPORT_ADISC = 6, /* ADISC authentication */ + BFA_RPORT_LOGO = 7, /* logging out with rport */ + BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */ + BFA_RPORT_NSDISC = 9, /* re-discover rport */ +}; + +/* + * Rport Scsi Function : Initiator/Target. + */ +enum bfa_rport_function { + BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */ + BFA_RPORT_TARGET = 0x02, /* SCSI Target */ +}; + +/* + * port/node symbolic names for rport + */ +#define BFA_RPORT_SYMNAME_MAXLEN 255 +struct bfa_rport_symname_s { + char symname[BFA_RPORT_SYMNAME_MAXLEN]; +}; + +/* + * FCS remote port statistics + */ +struct bfa_rport_stats_s { + u32 offlines; /* remote port offline count */ + u32 onlines; /* remote port online count */ + u32 rscns; /* RSCN affecting rport */ + u32 plogis; /* plogis sent */ + u32 plogi_accs; /* plogi accepts */ + u32 plogi_timeouts; /* plogi timeouts */ + u32 plogi_rejects; /* rcvd plogi rejects */ + u32 plogi_failed; /* local failure */ + u32 plogi_rcvd; /* plogis rcvd */ + u32 prli_rcvd; /* inbound PRLIs */ + u32 adisc_rcvd; /* ADISCs received */ + u32 adisc_rejects; /* recvd ADISC rejects */ + u32 adisc_sent; /* ADISC requests sent */ + u32 adisc_accs; /* ADISC accepted by rport */ + u32 adisc_failed; /* ADISC failed (no response) */ + u32 adisc_rejected; /* ADISC rejected by us */ + u32 logos; /* logos sent */ + u32 logo_accs; /* LOGO accepts from rport */ + u32 logo_failed; /* LOGO failures */ + u32 logo_rejected; /* LOGO rejects from rport */ + u32 logo_rcvd; /* LOGO from remote port */ + + u32 rpsc_rcvd; /* RPSC received */ + u32 rpsc_rejects; /* recvd RPSC rejects */ + u32 rpsc_sent; /* RPSC requests sent */ + u32 rpsc_accs; /* RPSC accepted by rport */ + u32 rpsc_failed; /* RPSC failed (no response) */ + u32 rpsc_rejected; /* RPSC rejected by us */ + + u32 rjt_insuff_res; /* LS RJT with insuff resources */ + struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */ +}; + +/* + * FCS remote port attributes returned in queries + */ +struct bfa_rport_attr_s { + wwn_t nwwn; /* node wwn */ + wwn_t pwwn; /* port wwn */ + enum fc_cos cos_supported; /* supported class of services */ + u32 pid; /* port ID */ + u32 df_sz; /* Max payload size */ + enum bfa_rport_state state; /* Rport State machine state */ + enum fc_cos fc_cos; /* FC classes of services */ + bfa_boolean_t cisc; /* CISC capable device */ + struct bfa_rport_symname_s symname; /* Symbolic Name */ + enum bfa_rport_function scsi_function; /* Initiator/Target */ + struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */ + enum bfa_port_speed curr_speed; /* operating speed got from + * RPSC ELS. UNKNOWN, if RPSC + * is not supported */ + bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */ + enum bfa_port_speed assigned_speed; /* Speed assigned by the user. + * will be used if RPSC is not + * supported by the rport */ +}; + +struct bfa_rport_remote_link_stats_s { + u32 lfc; /* Link Failure Count */ + u32 lsyc; /* Loss of Synchronization Count */ + u32 lsic; /* Loss of Signal Count */ + u32 pspec; /* Primitive Sequence Protocol Error Count */ + u32 itwc; /* Invalid Transmission Word Count */ + u32 icc; /* Invalid CRC Count */ +}; + +struct bfa_rport_qualifier_s { + wwn_t pwwn; /* Port WWN */ + u32 pid; /* port ID */ + u32 rsvd; +}; + +#define BFA_MAX_IO_INDEX 7 +#define BFA_NO_IO_INDEX 9 + +/* + * FCS itnim states + */ +enum bfa_itnim_state { + BFA_ITNIM_OFFLINE = 0, /* offline */ + BFA_ITNIM_PRLI_SEND = 1, /* prli send */ + BFA_ITNIM_PRLI_SENT = 2, /* prli sent */ + BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */ + BFA_ITNIM_HCB_ONLINE = 4, /* online callback */ + BFA_ITNIM_ONLINE = 5, /* online */ + BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */ + BFA_ITNIM_INITIATIOR = 7, /* initiator */ +}; + +/* + * FCS remote port statistics + */ +struct bfa_itnim_stats_s { + u32 onlines; /* num rport online */ + u32 offlines; /* num rport offline */ + u32 prli_sent; /* num prli sent out */ + u32 fcxp_alloc_wait;/* num fcxp alloc waits */ + u32 prli_rsp_err; /* num prli rsp errors */ + u32 prli_rsp_acc; /* num prli rsp accepts */ + u32 initiator; /* rport is an initiator */ + u32 prli_rsp_parse_err; /* prli rsp parsing errors */ + u32 prli_rsp_rjt; /* num prli rsp rejects */ + u32 timeout; /* num timeouts detected */ + u32 sler; /* num sler notification from BFA */ + u32 rsvd; /* padding for 64 bit alignment */ +}; + +/* + * FCS itnim attributes returned in queries + */ +struct bfa_itnim_attr_s { + enum bfa_itnim_state state; /* FCS itnim state */ + u8 retry; /* data retransmision support */ + u8 task_retry_id; /* task retry ident support */ + u8 rec_support; /* REC supported */ + u8 conf_comp; /* confirmed completion supp */ +}; + +#endif /* __BFA_DEFS_FCS_H__ */ diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h new file mode 100644 index 000000000..f2c49f0e5 --- /dev/null +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -0,0 +1,1456 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_DEFS_SVC_H__ +#define __BFA_DEFS_SVC_H__ + +#include "bfa_defs.h" +#include "bfa_fc.h" +#include "bfi.h" + +#define BFA_IOCFC_INTR_DELAY 1125 +#define BFA_IOCFC_INTR_LATENCY 225 +#define BFA_IOCFCOE_INTR_DELAY 25 +#define BFA_IOCFCOE_INTR_LATENCY 5 + +/* + * Interrupt coalescing configuration. + */ +#pragma pack(1) +struct bfa_iocfc_intr_attr_s { + u8 coalesce; /* enable/disable coalescing */ + u8 rsvd[3]; + __be16 latency; /* latency in microseconds */ + __be16 delay; /* delay in microseconds */ +}; + +/* + * IOC firmware configuraton + */ +struct bfa_iocfc_fwcfg_s { + u16 num_fabrics; /* number of fabrics */ + u16 num_lports; /* number of local lports */ + u16 num_rports; /* number of remote ports */ + u16 num_ioim_reqs; /* number of IO reqs */ + u16 num_tskim_reqs; /* task management requests */ + u16 num_fwtio_reqs; /* number of TM IO reqs in FW */ + u16 num_fcxp_reqs; /* unassisted FC exchanges */ + u16 num_uf_bufs; /* unsolicited recv buffers */ + u8 num_cqs; + u8 fw_tick_res; /* FW clock resolution in ms */ + u8 rsvd[6]; +}; +#pragma pack() + +struct bfa_iocfc_drvcfg_s { + u16 num_reqq_elems; /* number of req queue elements */ + u16 num_rspq_elems; /* number of rsp queue elements */ + u16 num_sgpgs; /* number of total SG pages */ + u16 num_sboot_tgts; /* number of SAN boot targets */ + u16 num_sboot_luns; /* number of SAN boot luns */ + u16 ioc_recover; /* IOC recovery mode */ + u16 min_cfg; /* minimum configuration */ + u16 path_tov; /* device path timeout */ + u16 num_tio_reqs; /* number of TM IO reqs */ + u8 port_mode; + u8 rsvd_a; + bfa_boolean_t delay_comp; /* delay completion of failed + * inflight IOs */ + u16 num_ttsk_reqs; /* TM task management requests */ + u32 rsvd; +}; + +/* + * IOC configuration + */ +struct bfa_iocfc_cfg_s { + struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */ + struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */ +}; + +/* + * IOC firmware IO stats + */ +struct bfa_fw_ioim_stats_s { + u32 host_abort; /* IO aborted by host driver*/ + u32 host_cleanup; /* IO clean up by host driver */ + + u32 fw_io_timeout; /* IOs timedout */ + u32 fw_frm_parse; /* frame parsed by f/w */ + u32 fw_frm_data; /* fcp_data frame parsed by f/w */ + u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */ + u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */ + u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */ + u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */ + u32 fw_frm_unknown; /* unknown parsed by f/w */ + u32 fw_data_dma; /* f/w DMA'ed the data frame */ + u32 fw_frm_drop; /* f/w drop the frame */ + + u32 rec_timeout; /* FW rec timed out */ + u32 error_rec; /* FW sending rec on + * an error condition*/ + u32 wait_for_si; /* FW wait for SI */ + u32 rec_rsp_inval; /* REC rsp invalid */ + u32 rec_rsp_xchg_comp; /* REC rsp xchg complete */ + u32 rec_rsp_rd_si_ownd; /* REC rsp read si owned */ + + u32 seqr_io_abort; /* target does not know cmd so abort */ + u32 seqr_io_retry; /* SEQR failed so retry IO */ + + u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */ + u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */ + u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */ + + u32 fcp_data_lost; /* fcp data lost */ + + u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */ + u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */ + u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */ + + u32 io_abort_timeout; /* ABTS timedout */ + u32 sler_initiated; /* SLER initiated */ + + u32 unexp_fcp_rsp; /* fcp response in wrong state */ + + u32 fcp_rsp_under_run; /* fcp rsp IO underrun */ + u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */ + u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */ + u32 fcp_rsp_resid_inval; /* invalid residue */ + u32 fcp_rsp_over_run; /* fcp rsp IO overrun */ + u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */ + u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */ + u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */ + u32 fcp_conf_req; /* FCP conf requested */ + + u32 tgt_aborted_io; /* target initiated abort */ + + u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */ + u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */ + u32 ioh_fcp_conf_event; /* IOH FCP_CONF */ + u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */ + u32 ioh_hit_class2_event; /* IOH hit class2 */ + u32 ioh_miss_other_event; /* IOH miss other */ + u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */ + u32 ioh_len_err_event; /* IOH len error - fcp_dl != + * bytes xfered */ + u32 ioh_seq_len_err_event; /* IOH seq len error */ + u32 ioh_data_oor_event; /* Data out of range */ + u32 ioh_ro_ooo_event; /* Relative offset out of range */ + u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */ + u32 ioh_unexp_frame_event; /* unexpected frame received + * count */ + u32 ioh_err_int; /* IOH error int during data-phase + * for scsi write */ +}; + +struct bfa_fw_tio_stats_s { + u32 tio_conf_proc; /* TIO CONF processed */ + u32 tio_conf_drop; /* TIO CONF dropped */ + u32 tio_cleanup_req; /* TIO cleanup requested */ + u32 tio_cleanup_comp; /* TIO cleanup completed */ + u32 tio_abort_rsp; /* TIO abort response */ + u32 tio_abort_rsp_comp; /* TIO abort rsp completed */ + u32 tio_abts_req; /* TIO ABTS requested */ + u32 tio_abts_ack; /* TIO ABTS ack-ed */ + u32 tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */ + u32 tio_abts_tmo; /* TIO ABTS timeout */ + u32 tio_snsdata_dma; /* TIO sense data DMA */ + u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */ + u32 tio_rxwchan_avail; /* TIO RX wait channel available */ + u32 tio_hit_bls; /* TIO IOH BLS event */ + u32 tio_uf_recv; /* TIO received UF */ + u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */ + u32 tio_wr_invalid_sm; /* TIO write reqst in wrong state machine */ + + u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */ + u32 ds_rxwchan_avail; /* DS RX wait channel available */ + u32 ds_unaligned_rd; /* DS unaligned read */ + u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state + * machine */ + u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state + * machine */ + u32 ds_flush_req; /* DS flush requested */ + u32 ds_flush_comp; /* DS flush completed */ + u32 ds_xfrdy_exp; /* DS XFER_RDY expired */ + u32 ds_seq_cnt_err; /* DS seq cnt error */ + u32 ds_seq_len_err; /* DS seq len error */ + u32 ds_data_oor; /* DS data out of order */ + u32 ds_hit_bls; /* DS hit BLS */ + u32 ds_edtov_timer_exp; /* DS edtov expired */ + u32 ds_cpu_owned; /* DS cpu owned */ + u32 ds_hit_class2; /* DS hit class2 */ + u32 ds_length_err; /* DS length error */ + u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */ + u32 ds_rectov_timer_exp;/* DS rectov expired */ + u32 ds_unexp_fr_err; /* DS unexp frame error */ +}; + +/* + * IOC firmware IO stats + */ +struct bfa_fw_io_stats_s { + struct bfa_fw_ioim_stats_s ioim_stats; + struct bfa_fw_tio_stats_s tio_stats; +}; + +/* + * IOC port firmware stats + */ + +struct bfa_fw_port_fpg_stats_s { + u32 intr_evt; + u32 intr; + u32 intr_excess; + u32 intr_cause0; + u32 intr_other; + u32 intr_other_ign; + u32 sig_lost; + u32 sig_regained; + u32 sync_lost; + u32 sync_to; + u32 sync_regained; + u32 div2_overflow; + u32 div2_underflow; + u32 efifo_overflow; + u32 efifo_underflow; + u32 idle_rx; + u32 lrr_rx; + u32 lr_rx; + u32 ols_rx; + u32 nos_rx; + u32 lip_rx; + u32 arbf0_rx; + u32 arb_rx; + u32 mrk_rx; + u32 const_mrk_rx; + u32 prim_unknown; +}; + + +struct bfa_fw_port_lksm_stats_s { + u32 hwsm_success; /* hwsm state machine success */ + u32 hwsm_fails; /* hwsm fails */ + u32 hwsm_wdtov; /* hwsm timed out */ + u32 swsm_success; /* swsm success */ + u32 swsm_fails; /* swsm fails */ + u32 swsm_wdtov; /* swsm timed out */ + u32 busybufs; /* link init failed due to busybuf */ + u32 buf_waits; /* bufwait state entries */ + u32 link_fails; /* link failures */ + u32 psp_errors; /* primitive sequence protocol errors */ + u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */ + u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */ + u32 lr_tx; /* No. of times LR tx started */ + u32 lrr_tx; /* No. of times LRR tx started */ + u32 ols_tx; /* No. of times OLS tx started */ + u32 nos_tx; /* No. of times NOS tx started */ + u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ + u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ +}; + +struct bfa_fw_port_snsm_stats_s { + u32 hwsm_success; /* Successful hwsm terminations */ + u32 hwsm_fails; /* hwsm fail count */ + u32 hwsm_wdtov; /* hwsm timed out */ + u32 swsm_success; /* swsm success */ + u32 swsm_wdtov; /* swsm timed out */ + u32 error_resets; /* error resets initiated by upsm */ + u32 sync_lost; /* Sync loss count */ + u32 sig_lost; /* Signal loss count */ + u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ + u32 adapt_success; /* SNSM adaptation success */ + u32 adapt_fails; /* SNSM adaptation failures */ + u32 adapt_ign_fails; /* SNSM adaptation failures ignored */ +}; + +struct bfa_fw_port_physm_stats_s { + u32 module_inserts; /* Module insert count */ + u32 module_xtracts; /* Module extracts count */ + u32 module_invalids; /* Invalid module inserted count */ + u32 module_read_ign; /* Module validation status ignored */ + u32 laser_faults; /* Laser fault count */ + u32 rsvd; +}; + +struct bfa_fw_fip_stats_s { + u32 vlan_req; /* vlan discovery requests */ + u32 vlan_notify; /* vlan notifications */ + u32 vlan_err; /* vlan response error */ + u32 vlan_timeouts; /* vlan disvoery timeouts */ + u32 vlan_invalids; /* invalid vlan in discovery advert. */ + u32 disc_req; /* Discovery solicit requests */ + u32 disc_rsp; /* Discovery solicit response */ + u32 disc_err; /* Discovery advt. parse errors */ + u32 disc_unsol; /* Discovery unsolicited */ + u32 disc_timeouts; /* Discovery timeouts */ + u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */ + u32 linksvc_unsupp; /* Unsupported link service req */ + u32 linksvc_err; /* Parse error in link service req */ + u32 logo_req; /* FIP logos received */ + u32 clrvlink_req; /* Clear virtual link req */ + u32 op_unsupp; /* Unsupported FIP operation */ + u32 untagged; /* Untagged frames (ignored) */ + u32 invalid_version; /* Invalid FIP version */ +}; + +struct bfa_fw_lps_stats_s { + u32 mac_invalids; /* Invalid mac assigned */ + u32 rsvd; +}; + +struct bfa_fw_fcoe_stats_s { + u32 cee_linkups; /* CEE link up count */ + u32 cee_linkdns; /* CEE link down count */ + u32 fip_linkups; /* FIP link up count */ + u32 fip_linkdns; /* FIP link up count */ + u32 fip_fails; /* FIP fail count */ + u32 mac_invalids; /* Invalid mac assigned */ +}; + +/* + * IOC firmware FCoE port stats + */ +struct bfa_fw_fcoe_port_stats_s { + struct bfa_fw_fcoe_stats_s fcoe_stats; + struct bfa_fw_fip_stats_s fip_stats; +}; + +/** + * @brief LPSM statistics + */ +struct bfa_fw_lpsm_stats_s { + u32 cls_rx; /* LPSM cls_rx */ + u32 cls_tx; /* LPSM cls_tx */ + u32 arbf0_rx; /* LPSM abrf0 rcvd */ + u32 arbf0_tx; /* LPSM abrf0 xmit */ + u32 init_rx; /* LPSM loop init start */ + u32 unexp_hwst; /* LPSM unknown hw state */ + u32 unexp_frame; /* LPSM unknown_frame */ + u32 unexp_prim; /* LPSM unexpected primitive */ + u32 prev_alpa_unavail; /* LPSM prev alpa unavailable */ + u32 alpa_unavail; /* LPSM alpa not available */ + u32 lip_rx; /* LPSM lip rcvd */ + u32 lip_f7f7_rx; /* LPSM lip f7f7 rcvd */ + u32 lip_f8_rx; /* LPSM lip f8 rcvd */ + u32 lip_f8f7_rx; /* LPSM lip f8f7 rcvd */ + u32 lip_other_rx; /* LPSM lip other rcvd */ + u32 lip_tx; /* LPSM lip xmit */ + u32 retry_tov; /* LPSM retry TOV */ + u32 lip_tov; /* LPSM LIP wait TOV */ + u32 idle_tov; /* LPSM idle wait TOV */ + u32 arbf0_tov; /* LPSM arbfo wait TOV */ + u32 stop_loop_tov; /* LPSM stop loop wait TOV */ + u32 lixa_tov; /* LPSM lisa wait TOV */ + u32 lixx_tov; /* LPSM lilp/lirp wait TOV */ + u32 cls_tov; /* LPSM cls wait TOV */ + u32 sler; /* LPSM SLER recvd */ + u32 failed; /* LPSM failed */ + u32 success; /* LPSM online */ +}; + +/* + * IOC firmware FC uport stats + */ +struct bfa_fw_fc_uport_stats_s { + struct bfa_fw_port_snsm_stats_s snsm_stats; + struct bfa_fw_port_lksm_stats_s lksm_stats; + struct bfa_fw_lpsm_stats_s lpsm_stats; +}; + +/* + * IOC firmware FC port stats + */ +union bfa_fw_fc_port_stats_s { + struct bfa_fw_fc_uport_stats_s fc_stats; + struct bfa_fw_fcoe_port_stats_s fcoe_stats; +}; + +/* + * IOC firmware port stats + */ +struct bfa_fw_port_stats_s { + struct bfa_fw_port_fpg_stats_s fpg_stats; + struct bfa_fw_port_physm_stats_s physm_stats; + union bfa_fw_fc_port_stats_s fc_port; +}; + +/* + * fcxchg module statistics + */ +struct bfa_fw_fcxchg_stats_s { + u32 ua_tag_inv; + u32 ua_state_inv; +}; + +/* + * Trunk statistics + */ +struct bfa_fw_trunk_stats_s { + u32 emt_recvd; /* Trunk EMT received */ + u32 emt_accepted; /* Trunk EMT Accepted */ + u32 emt_rejected; /* Trunk EMT rejected */ + u32 etp_recvd; /* Trunk ETP received */ + u32 etp_accepted; /* Trunk ETP Accepted */ + u32 etp_rejected; /* Trunk ETP rejected */ + u32 lr_recvd; /* Trunk LR received */ + u32 rsvd; /* padding for 64 bit alignment */ +}; + +struct bfa_fw_aport_stats_s { + u32 flogi_sent; /* Flogi sent */ + u32 flogi_acc_recvd; /* Flogi Acc received */ + u32 flogi_rjt_recvd; /* Flogi rejects received */ + u32 flogi_retries; /* Flogi retries */ + + u32 elp_recvd; /* ELP received */ + u32 elp_accepted; /* ELP Accepted */ + u32 elp_rejected; /* ELP rejected */ + u32 elp_dropped; /* ELP dropped */ + + u32 bbcr_lr_count; /*!< BBCR Link Resets */ + u32 frame_lost_intrs; /*!< BBCR Frame loss intrs */ + u32 rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */ + + u32 rsvd; +}; + +/* + * IOCFC firmware stats + */ +struct bfa_fw_iocfc_stats_s { + u32 cfg_reqs; /* cfg request */ + u32 updq_reqs; /* update queue request */ + u32 ic_reqs; /* interrupt coalesce reqs */ + u32 unknown_reqs; + u32 set_intr_reqs; /* set interrupt reqs */ +}; + +/* + * IOC attributes returned in queries + */ +struct bfa_iocfc_attr_s { + struct bfa_iocfc_cfg_s config; /* IOCFC config */ + struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */ +}; + +/* + * Eth_sndrcv mod stats + */ +struct bfa_fw_eth_sndrcv_stats_s { + u32 crc_err; + u32 rsvd; /* 64bit align */ +}; + +/* + * CT MAC mod stats + */ +struct bfa_fw_mac_mod_stats_s { + u32 mac_on; /* MAC got turned-on */ + u32 link_up; /* link-up */ + u32 signal_off; /* lost signal */ + u32 dfe_on; /* DFE on */ + u32 mac_reset; /* # of MAC reset to bring lnk up */ + u32 pcs_reset; /* # of PCS reset to bring lnk up */ + u32 loopback; /* MAC got into serdes loopback */ + u32 lb_mac_reset; + /* # of MAC reset to bring link up in loopback */ + u32 lb_pcs_reset; + /* # of PCS reset to bring link up in loopback */ + u32 rsvd; /* 64bit align */ +}; + +/* + * CT MOD stats + */ +struct bfa_fw_ct_mod_stats_s { + u32 rxa_rds_undrun; /* RxA RDS underrun */ + u32 rad_bpc_ovfl; /* RAD BPC overflow */ + u32 rad_rlb_bpc_ovfl; /* RAD RLB BPC overflow */ + u32 bpc_fcs_err; /* BPC FCS_ERR */ + u32 txa_tso_hdr; /* TxA TSO header too long */ + u32 rsvd; /* 64bit align */ +}; + +/* + * RDS mod stats + */ +struct bfa_fw_rds_stats_s { + u32 no_fid_drop_err; /* RDS no fid drop error */ + u32 rsvd; /* 64bit align */ +}; + +/* + * IOC firmware stats + */ +struct bfa_fw_stats_s { + struct bfa_fw_ioc_stats_s ioc_stats; + struct bfa_fw_iocfc_stats_s iocfc_stats; + struct bfa_fw_io_stats_s io_stats; + struct bfa_fw_port_stats_s port_stats; + struct bfa_fw_fcxchg_stats_s fcxchg_stats; + struct bfa_fw_lps_stats_s lps_stats; + struct bfa_fw_trunk_stats_s trunk_stats; + struct bfa_fw_aport_stats_s aport_stats; + struct bfa_fw_mac_mod_stats_s macmod_stats; + struct bfa_fw_ct_mod_stats_s ctmod_stats; + struct bfa_fw_eth_sndrcv_stats_s ethsndrcv_stats; + struct bfa_fw_rds_stats_s rds_stats; +}; + +#define BFA_IOCFC_PATHTOV_MAX 60 +#define BFA_IOCFC_QDEPTH_MAX 2000 + +/* + * QoS states + */ +enum bfa_qos_state { + BFA_QOS_DISABLED = 0, /* QoS is disabled */ + BFA_QOS_ONLINE = 1, /* QoS is online */ + BFA_QOS_OFFLINE = 2, /* QoS is offline */ +}; + +/* + * QoS Priority levels. + */ +enum bfa_qos_priority { + BFA_QOS_UNKNOWN = 0, + BFA_QOS_HIGH = 1, /* QoS Priority Level High */ + BFA_QOS_MED = 2, /* QoS Priority Level Medium */ + BFA_QOS_LOW = 3, /* QoS Priority Level Low */ +}; + +/* + * QoS bandwidth allocation for each priority level + */ +enum bfa_qos_bw_alloc { + BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */ + BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */ + BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ +}; +#pragma pack(1) + +struct bfa_qos_bw_s { + u8 qos_bw_set; + u8 high; + u8 med; + u8 low; +}; + +/* + * QoS attribute returned in QoS Query + */ +struct bfa_qos_attr_s { + u8 state; /* QoS current state */ + u8 rsvd1[3]; + u32 total_bb_cr; /* Total BB Credits */ + struct bfa_qos_bw_s qos_bw; /* QOS bw cfg */ + struct bfa_qos_bw_s qos_bw_op; /* QOS bw operational */ +}; + +enum bfa_bbcr_state { + BFA_BBCR_DISABLED, /*!< BBCR is disable */ + BFA_BBCR_ONLINE, /*!< BBCR is online */ + BFA_BBCR_OFFLINE, /*!< BBCR is offline */ +}; + +enum bfa_bbcr_err_reason { + BFA_BBCR_ERR_REASON_NONE, /*!< Unknown */ + BFA_BBCR_ERR_REASON_SPEED_UNSUP, /*!< Port speed < max sup_speed */ + BFA_BBCR_ERR_REASON_PEER_UNSUP, /*!< BBCR is disable on peer port */ + BFA_BBCR_ERR_REASON_NON_BRCD_SW, /*!< Connected to non BRCD switch */ + BFA_BBCR_ERR_REASON_FLOGI_RJT, /*!< Login rejected by the switch */ +}; + +struct bfa_bbcr_attr_s { + u8 state; + u8 peer_bb_scn; + u8 reason; + u8 rsvd; +}; + +/* + * These fields should be displayed only from the CLI. + * There will be a separate BFAL API (get_qos_vc_attr ?) + * to retrieve this. + * + */ +#define BFA_QOS_MAX_VC 16 + +struct bfa_qos_vc_info_s { + u8 vc_credit; + u8 borrow_credit; + u8 priority; + u8 resvd; +}; + +struct bfa_qos_vc_attr_s { + u16 total_vc_count; /* Total VC Count */ + u16 shared_credit; + u32 elp_opmode_flags; + struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as + * total_vc_count */ +}; + +/* + * QoS statistics + */ +struct bfa_qos_stats_s { + u32 flogi_sent; /* QoS Flogi sent */ + u32 flogi_acc_recvd; /* QoS Flogi Acc received */ + u32 flogi_rjt_recvd; /* QoS Flogi rejects received */ + u32 flogi_retries; /* QoS Flogi retries */ + + u32 elp_recvd; /* QoS ELP received */ + u32 elp_accepted; /* QoS ELP Accepted */ + u32 elp_rejected; /* QoS ELP rejected */ + u32 elp_dropped; /* QoS ELP dropped */ + + u32 qos_rscn_recvd; /* QoS RSCN received */ + u32 rsvd; /* padding for 64 bit alignment */ +}; + +/* + * FCoE statistics + */ +struct bfa_fcoe_stats_s { + u64 secs_reset; /* Seconds since stats reset */ + u64 cee_linkups; /* CEE link up */ + u64 cee_linkdns; /* CEE link down */ + u64 fip_linkups; /* FIP link up */ + u64 fip_linkdns; /* FIP link down */ + u64 fip_fails; /* FIP failures */ + u64 mac_invalids; /* Invalid mac assignments */ + u64 vlan_req; /* Vlan requests */ + u64 vlan_notify; /* Vlan notifications */ + u64 vlan_err; /* Vlan notification errors */ + u64 vlan_timeouts; /* Vlan request timeouts */ + u64 vlan_invalids; /* Vlan invalids */ + u64 disc_req; /* Discovery requests */ + u64 disc_rsp; /* Discovery responses */ + u64 disc_err; /* Discovery error frames */ + u64 disc_unsol; /* Discovery unsolicited */ + u64 disc_timeouts; /* Discovery timeouts */ + u64 disc_fcf_unavail; /* Discovery FCF not avail */ + u64 linksvc_unsupp; /* FIP link service req unsupp */ + u64 linksvc_err; /* FIP link service req errors */ + u64 logo_req; /* FIP logos received */ + u64 clrvlink_req; /* Clear virtual link requests */ + u64 op_unsupp; /* FIP operation unsupp. */ + u64 untagged; /* FIP untagged frames */ + u64 txf_ucast; /* Tx FCoE unicast frames */ + u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */ + u64 txf_ucast_octets; /* Tx FCoE unicast octets */ + u64 txf_mcast; /* Tx FCoE multicast frames */ + u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */ + u64 txf_mcast_octets; /* Tx FCoE multicast octets */ + u64 txf_bcast; /* Tx FCoE broadcast frames */ + u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */ + u64 txf_bcast_octets; /* Tx FCoE broadcast octets */ + u64 txf_timeout; /* Tx timeouts */ + u64 txf_parity_errors; /* Transmit parity err */ + u64 txf_fid_parity_errors; /* Transmit FID parity err */ + u64 rxf_ucast_octets; /* Rx FCoE unicast octets */ + u64 rxf_ucast; /* Rx FCoE unicast frames */ + u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */ + u64 rxf_mcast_octets; /* Rx FCoE multicast octets */ + u64 rxf_mcast; /* Rx FCoE multicast frames */ + u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */ + u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */ + u64 rxf_bcast; /* Rx FCoE broadcast frames */ + u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */ +}; + +/* + * QoS or FCoE stats (fcport stats excluding physical FC port stats) + */ +union bfa_fcport_stats_u { + struct bfa_qos_stats_s fcqos; + struct bfa_fcoe_stats_s fcoe; +}; +#pragma pack() + +struct bfa_fcpim_del_itn_stats_s { + u32 del_itn_iocomp_aborted; /* Aborted IO requests */ + u32 del_itn_iocomp_timedout; /* IO timeouts */ + u32 del_itn_iocom_sqer_needed; /* IO retry for SQ error recovery */ + u32 del_itn_iocom_res_free; /* Delayed freeing of IO resources */ + u32 del_itn_iocom_hostabrts; /* Host IO abort requests */ + u32 del_itn_total_ios; /* Total IO count */ + u32 del_io_iocdowns; /* IO cleaned-up due to IOC down */ + u32 del_tm_iocdowns; /* TM cleaned-up due to IOC down */ +}; + +struct bfa_itnim_iostats_s { + + u32 total_ios; /* Total IO Requests */ + u32 input_reqs; /* Data in-bound requests */ + u32 output_reqs; /* Data out-bound requests */ + u32 io_comps; /* Total IO Completions */ + u32 wr_throughput; /* Write data transferred in bytes */ + u32 rd_throughput; /* Read data transferred in bytes */ + + u32 iocomp_ok; /* Slowpath IO completions */ + u32 iocomp_underrun; /* IO underrun */ + u32 iocomp_overrun; /* IO overrun */ + u32 qwait; /* IO Request-Q wait */ + u32 qresumes; /* IO Request-Q wait done */ + u32 no_iotags; /* No free IO tag */ + u32 iocomp_timedout; /* IO timeouts */ + u32 iocom_nexus_abort; /* IO failure due to target offline */ + u32 iocom_proto_err; /* IO protocol errors */ + u32 iocom_dif_err; /* IO SBC-3 protection errors */ + + u32 iocom_sqer_needed; /* fcp-2 error recovery failed */ + u32 iocom_res_free; /* Delayed freeing of IO tag */ + + + u32 io_aborts; /* Host IO abort requests */ + u32 iocom_hostabrts; /* Host IO abort completions */ + u32 io_cleanups; /* IO clean-up requests */ + u32 path_tov_expired; /* IO path tov expired */ + u32 iocomp_aborted; /* IO abort completions */ + u32 io_iocdowns; /* IO cleaned-up due to IOC down */ + u32 iocom_utags; /* IO comp with unknown tags */ + + u32 io_tmaborts; /* Abort request due to TM command */ + u32 tm_io_comps; /* Abort completion due to TM command */ + + u32 creates; /* IT Nexus create requests */ + u32 fw_create; /* IT Nexus FW create requests */ + u32 create_comps; /* IT Nexus FW create completions */ + u32 onlines; /* IT Nexus onlines */ + u32 offlines; /* IT Nexus offlines */ + u32 fw_delete; /* IT Nexus FW delete requests */ + u32 delete_comps; /* IT Nexus FW delete completions */ + u32 deletes; /* IT Nexus delete requests */ + u32 sler_events; /* SLER events */ + u32 ioc_disabled; /* Num IOC disables */ + u32 cleanup_comps; /* IT Nexus cleanup completions */ + + u32 tm_cmnds; /* TM Requests */ + u32 tm_fw_rsps; /* TM Completions */ + u32 tm_success; /* TM initiated IO cleanup success */ + u32 tm_failures; /* TM initiated IO cleanup failure */ + u32 no_tskims; /* No free TM tag */ + u32 tm_qwait; /* TM Request-Q wait */ + u32 tm_qresumes; /* TM Request-Q wait done */ + + u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ + u32 tm_cleanups; /* TM cleanup requests */ + u32 tm_cleanup_comps; /* TM cleanup completions */ + u32 rsvd[6]; +}; + +/* Modify char* port_stt[] in bfal_port.c if a new state was added */ +enum bfa_port_states { + BFA_PORT_ST_UNINIT = 1, + BFA_PORT_ST_ENABLING_QWAIT = 2, + BFA_PORT_ST_ENABLING = 3, + BFA_PORT_ST_LINKDOWN = 4, + BFA_PORT_ST_LINKUP = 5, + BFA_PORT_ST_DISABLING_QWAIT = 6, + BFA_PORT_ST_DISABLING = 7, + BFA_PORT_ST_DISABLED = 8, + BFA_PORT_ST_STOPPED = 9, + BFA_PORT_ST_IOCDOWN = 10, + BFA_PORT_ST_IOCDIS = 11, + BFA_PORT_ST_FWMISMATCH = 12, + BFA_PORT_ST_PREBOOT_DISABLED = 13, + BFA_PORT_ST_TOGGLING_QWAIT = 14, + BFA_PORT_ST_FAA_MISCONFIG = 15, + BFA_PORT_ST_DPORT = 16, + BFA_PORT_ST_DDPORT = 17, + BFA_PORT_ST_MAX_STATE, +}; + +/* + * Port operational type (in sync with SNIA port type). + */ +enum bfa_port_type { + BFA_PORT_TYPE_UNKNOWN = 1, /* port type is unknown */ + BFA_PORT_TYPE_NPORT = 5, /* P2P with switched fabric */ + BFA_PORT_TYPE_NLPORT = 6, /* public loop */ + BFA_PORT_TYPE_LPORT = 20, /* private loop */ + BFA_PORT_TYPE_P2P = 21, /* P2P with no switched fabric */ + BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */ +}; + +/* + * Port topology setting. A port's topology and fabric login status + * determine its operational type. + */ +enum bfa_port_topology { + BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */ + BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */ + BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ + BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */ + BFA_PORT_TOPOLOGY_AUTO = 4, /* auto topology selection */ + BFA_PORT_TOPOLOGY_P2P = 5, /* P2P only */ +}; + +/* + * Physical port loopback types. + */ +enum bfa_port_opmode { + BFA_PORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */ + BFA_PORT_OPMODE_LB_INT = 0x01, /* internal loop back */ + BFA_PORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */ + BFA_PORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */ + BFA_PORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */ + BFA_PORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */ +}; + +#define BFA_PORT_OPMODE_LB_HARD(_mode) \ + ((_mode == BFA_PORT_OPMODE_LB_INT) || \ + (_mode == BFA_PORT_OPMODE_LB_SLW) || \ + (_mode == BFA_PORT_OPMODE_LB_EXT)) + +/* + * Port link state + */ +enum bfa_port_linkstate { + BFA_PORT_LINKUP = 1, /* Physical port/Trunk link up */ + BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */ +}; + +/* + * Port link state reason code + */ +enum bfa_port_linkstate_rsn { + BFA_PORT_LINKSTATE_RSN_NONE = 0, + BFA_PORT_LINKSTATE_RSN_DISABLED = 1, + BFA_PORT_LINKSTATE_RSN_RX_NOS = 2, + BFA_PORT_LINKSTATE_RSN_RX_OLS = 3, + BFA_PORT_LINKSTATE_RSN_RX_LIP = 4, + BFA_PORT_LINKSTATE_RSN_RX_LIPF7 = 5, + BFA_PORT_LINKSTATE_RSN_SFP_REMOVED = 6, + BFA_PORT_LINKSTATE_RSN_PORT_FAULT = 7, + BFA_PORT_LINKSTATE_RSN_RX_LOS = 8, + BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9, + BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10, + BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11, + BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG = 12, + + + + /* CEE related reason codes/errors */ + CEE_LLDP_INFO_AGED_OUT = 20, + CEE_LLDP_SHUTDOWN_TLV_RCVD = 21, + CEE_PEER_NOT_ADVERTISE_DCBX = 22, + CEE_PEER_NOT_ADVERTISE_PG = 23, + CEE_PEER_NOT_ADVERTISE_PFC = 24, + CEE_PEER_NOT_ADVERTISE_FCOE = 25, + CEE_PG_NOT_COMPATIBLE = 26, + CEE_PFC_NOT_COMPATIBLE = 27, + CEE_FCOE_NOT_COMPATIBLE = 28, + CEE_BAD_PG_RCVD = 29, + CEE_BAD_BW_RCVD = 30, + CEE_BAD_PFC_RCVD = 31, + CEE_BAD_APP_PRI_RCVD = 32, + CEE_FCOE_PRI_PFC_OFF = 33, + CEE_DUP_CONTROL_TLV_RCVD = 34, + CEE_DUP_FEAT_TLV_RCVD = 35, + CEE_APPLY_NEW_CFG = 36, /* reason, not error */ + CEE_PROTOCOL_INIT = 37, /* reason, not error */ + CEE_PHY_LINK_DOWN = 38, + CEE_LLS_FCOE_ABSENT = 39, + CEE_LLS_FCOE_DOWN = 40, + CEE_ISCSI_NOT_COMPATIBLE = 41, + CEE_ISCSI_PRI_PFC_OFF = 42, + CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 +}; + +#define MAX_LUN_MASK_CFG 16 + +/* + * Initially flash content may be fff. On making LUN mask enable and disable + * state change. when report lun command is being processed it goes from + * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to + * BFA_LUN_MASK_ACTIVE. + */ +enum bfa_ioim_lun_mask_state_s { + BFA_IOIM_LUN_MASK_INACTIVE = 0, + BFA_IOIM_LUN_MASK_ACTIVE = 1, + BFA_IOIM_LUN_MASK_FETCHED = 2, +}; + +enum bfa_lunmask_state_s { + BFA_LUNMASK_DISABLED = 0x00, + BFA_LUNMASK_ENABLED = 0x01, + BFA_LUNMASK_MINCFG = 0x02, + BFA_LUNMASK_UNINITIALIZED = 0xff, +}; + +/** + * FEC states + */ +enum bfa_fec_state_s { + BFA_FEC_ONLINE = 1, /*!< FEC is online */ + BFA_FEC_OFFLINE = 2, /*!< FEC is offline */ + BFA_FEC_OFFLINE_NOT_16G = 3, /*!< FEC is offline (speed not 16Gig) */ +}; + +#pragma pack(1) +/* + * LUN mask configuration + */ +struct bfa_lun_mask_s { + wwn_t lp_wwn; + wwn_t rp_wwn; + struct scsi_lun lun; + u8 ua; + u8 rsvd[3]; + u16 rp_tag; + u8 lp_tag; + u8 state; +}; + +#define MAX_LUN_MASK_CFG 16 +struct bfa_lunmask_cfg_s { + u32 status; + u32 rsvd; + struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; +}; + +struct bfa_throttle_cfg_s { + u16 is_valid; + u16 value; + u32 rsvd; +}; + +struct bfa_defs_fcpim_throttle_s { + u16 max_value; + u16 cur_value; + u16 cfg_value; + u16 rsvd; +}; + +#define BFA_BB_SCN_DEF 3 +#define BFA_BB_SCN_MAX 0x0F + +/* + * Physical port configuration + */ +struct bfa_port_cfg_s { + u8 topology; /* bfa_port_topology */ + u8 speed; /* enum bfa_port_speed */ + u8 trunked; /* trunked or not */ + u8 qos_enabled; /* qos enabled or not */ + u8 cfg_hardalpa; /* is hard alpa configured */ + u8 hardalpa; /* configured hard alpa */ + __be16 maxfrsize; /* maximum frame size */ + u8 rx_bbcredit; /* receive buffer credits */ + u8 tx_bbcredit; /* transmit buffer credits */ + u8 ratelimit; /* ratelimit enabled or not */ + u8 trl_def_speed; /* ratelimit default speed */ + u8 bb_cr_enabled; /*!< Config state of BB_SCN */ + u8 bb_scn; /*!< BB_SCN value for FLOGI Exchg */ + u8 faa_state; /* FAA enabled/disabled */ + u8 rsvd1; + u16 path_tov; /* device path timeout */ + u16 q_depth; /* SCSI Queue depth */ + struct bfa_qos_bw_s qos_bw; /* QOS bandwidth */ +}; +#pragma pack() + +/* + * Port attribute values. + */ +struct bfa_port_attr_s { + /* + * Static fields + */ + wwn_t nwwn; /* node wwn */ + wwn_t pwwn; /* port wwn */ + wwn_t factorynwwn; /* factory node wwn */ + wwn_t factorypwwn; /* factory port wwn */ + enum fc_cos cos_supported; /* supported class of + * services */ + u32 rsvd; + struct fc_symname_s port_symname; /* port symbolic name */ + enum bfa_port_speed speed_supported; /* supported speeds */ + bfa_boolean_t pbind_enabled; + + /* + * Configured values + */ + struct bfa_port_cfg_s pport_cfg; /* pport cfg */ + + /* + * Dynamic field - info from BFA + */ + enum bfa_port_states port_state; /* current port state */ + enum bfa_port_speed speed; /* current speed */ + enum bfa_port_topology topology; /* current topology */ + bfa_boolean_t beacon; /* current beacon status */ + bfa_boolean_t link_e2e_beacon; /* link beacon is on */ + bfa_boolean_t bbsc_op_status; /* fc credit recovery oper + * state */ + enum bfa_fec_state_s fec_state; /*!< current FEC state */ + + /* + * Dynamic field - info from FCS + */ + u32 pid; /* port ID */ + enum bfa_port_type port_type; /* current topology */ + u32 loopback; /* external loopback */ + u32 authfail; /* auth fail state */ + + /* FCoE specific */ + u16 fcoe_vlan; + u8 rsvd1[2]; +}; + +/* + * Port FCP mappings. + */ +struct bfa_port_fcpmap_s { + char osdevname[256]; + u32 bus; + u32 target; + u32 oslun; + u32 fcid; + wwn_t nwwn; + wwn_t pwwn; + u64 fcplun; + char luid[256]; +}; + +/* + * Port RNID info. + */ +struct bfa_port_rnid_s { + wwn_t wwn; + u32 unittype; + u32 portid; + u32 attached_nodes_num; + u16 ip_version; + u16 udp_port; + u8 ipaddr[16]; + u16 rsvd; + u16 topologydiscoveryflags; +}; + +#pragma pack(1) +struct bfa_fcport_fcf_s { + wwn_t name; /* FCF name */ + wwn_t fabric_name; /* Fabric Name */ + u8 fipenabled; /* FIP enabled or not */ + u8 fipfailed; /* FIP failed or not */ + u8 resv[2]; + u8 pri; /* FCF priority */ + u8 version; /* FIP version used */ + u8 available; /* Available for login */ + u8 fka_disabled; /* FKA is disabled */ + u8 maxsz_verified; /* FCoE max size verified */ + u8 fc_map[3]; /* FC map */ + __be16 vlan; /* FCoE vlan tag/priority */ + u32 fka_adv_per; /* FIP ka advert. period */ + mac_t mac; /* FCF mac */ +}; + +/* + * Trunk states for BCU/BFAL + */ +enum bfa_trunk_state { + BFA_TRUNK_DISABLED = 0, /* Trunk is not configured */ + BFA_TRUNK_ONLINE = 1, /* Trunk is online */ + BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */ +}; + +/* + * VC attributes for trunked link + */ +struct bfa_trunk_vc_attr_s { + u32 bb_credit; + u32 elp_opmode_flags; + u32 req_credit; + u16 vc_credits[8]; +}; + +struct bfa_fcport_loop_info_s { + u8 myalpa; /* alpa claimed */ + u8 alpabm_val; /* alpa bitmap valid or not (1 or 0) */ + u8 resvd[6]; + struct fc_alpabm_s alpabm; /* alpa bitmap */ +}; + +/* + * Link state information + */ +struct bfa_port_link_s { + u8 linkstate; /* Link state bfa_port_linkstate */ + u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */ + u8 topology; /* P2P/LOOP bfa_port_topology */ + u8 speed; /* Link speed (1/2/4/8 G) */ + u32 linkstate_opt; /* Linkstate optional data (debug) */ + u8 trunked; /* Trunked or not (1 or 0) */ + u8 fec_state; /*!< State of FEC */ + u8 resvd[6]; + struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ + union { + struct bfa_fcport_loop_info_s loop_info; + struct bfa_bbcr_attr_s bbcr_attr; + union { + struct bfa_qos_vc_attr_s qos_vc_attr; + /* VC info from ELP */ + struct bfa_trunk_vc_attr_s trunk_vc_attr; + struct bfa_fcport_fcf_s fcf; + /* FCF information (for FCoE) */ + } vc_fcf; + } attr; +}; +#pragma pack() + +enum bfa_trunk_link_fctl { + BFA_TRUNK_LINK_FCTL_NORMAL, + BFA_TRUNK_LINK_FCTL_VC, + BFA_TRUNK_LINK_FCTL_VC_QOS, +}; + +enum bfa_trunk_link_state { + BFA_TRUNK_LINK_STATE_UP = 1, /* link part of trunk */ + BFA_TRUNK_LINK_STATE_DN_LINKDN = 2, /* physical link down */ + BFA_TRUNK_LINK_STATE_DN_GRP_MIS = 3, /* trunk group different */ + BFA_TRUNK_LINK_STATE_DN_SPD_MIS = 4, /* speed mismatch */ + BFA_TRUNK_LINK_STATE_DN_MODE_MIS = 5, /* remote port not trunked */ +}; + +#define BFA_TRUNK_MAX_PORTS 2 +struct bfa_trunk_link_attr_s { + wwn_t trunk_wwn; + enum bfa_trunk_link_fctl fctl; + enum bfa_trunk_link_state link_state; + enum bfa_port_speed speed; + u32 deskew; +}; + +struct bfa_trunk_attr_s { + enum bfa_trunk_state state; + enum bfa_port_speed speed; + u32 port_id; + u32 rsvd; + struct bfa_trunk_link_attr_s link_attr[BFA_TRUNK_MAX_PORTS]; +}; + +struct bfa_rport_hal_stats_s { + u32 sm_un_cr; /* uninit: create events */ + u32 sm_un_unexp; /* uninit: exception events */ + u32 sm_cr_on; /* created: online events */ + u32 sm_cr_del; /* created: delete events */ + u32 sm_cr_hwf; /* created: IOC down */ + u32 sm_cr_unexp; /* created: exception events */ + u32 sm_fwc_rsp; /* fw create: f/w responses */ + u32 sm_fwc_del; /* fw create: delete events */ + u32 sm_fwc_off; /* fw create: offline events */ + u32 sm_fwc_hwf; /* fw create: IOC down */ + u32 sm_fwc_unexp; /* fw create: exception events*/ + u32 sm_on_off; /* online: offline events */ + u32 sm_on_del; /* online: delete events */ + u32 sm_on_hwf; /* online: IOC down events */ + u32 sm_on_unexp; /* online: exception events */ + u32 sm_fwd_rsp; /* fw delete: fw responses */ + u32 sm_fwd_del; /* fw delete: delete events */ + u32 sm_fwd_hwf; /* fw delete: IOC down events */ + u32 sm_fwd_unexp; /* fw delete: exception events*/ + u32 sm_off_del; /* offline: delete events */ + u32 sm_off_on; /* offline: online events */ + u32 sm_off_hwf; /* offline: IOC down events */ + u32 sm_off_unexp; /* offline: exception events */ + u32 sm_del_fwrsp; /* delete: fw responses */ + u32 sm_del_hwf; /* delete: IOC down events */ + u32 sm_del_unexp; /* delete: exception events */ + u32 sm_delp_fwrsp; /* delete pend: fw responses */ + u32 sm_delp_hwf; /* delete pend: IOC downs */ + u32 sm_delp_unexp; /* delete pend: exceptions */ + u32 sm_offp_fwrsp; /* off-pending: fw responses */ + u32 sm_offp_del; /* off-pending: deletes */ + u32 sm_offp_hwf; /* off-pending: IOC downs */ + u32 sm_offp_unexp; /* off-pending: exceptions */ + u32 sm_iocd_off; /* IOC down: offline events */ + u32 sm_iocd_del; /* IOC down: delete events */ + u32 sm_iocd_on; /* IOC down: online events */ + u32 sm_iocd_unexp; /* IOC down: exceptions */ + u32 rsvd; +}; +#pragma pack(1) +/* + * Rport's QoS attributes + */ +struct bfa_rport_qos_attr_s { + u8 qos_priority; /* rport's QoS priority */ + u8 rsvd[3]; + u32 qos_flow_id; /* QoS flow Id */ +}; +#pragma pack() + +#define BFA_IOBUCKET_MAX 14 + +struct bfa_itnim_latency_s { + u32 min[BFA_IOBUCKET_MAX]; + u32 max[BFA_IOBUCKET_MAX]; + u32 count[BFA_IOBUCKET_MAX]; + u32 avg[BFA_IOBUCKET_MAX]; +}; + +struct bfa_itnim_ioprofile_s { + u32 clock_res_mul; + u32 clock_res_div; + u32 index; + u32 io_profile_start_time; /* IO profile start time */ + u32 iocomps[BFA_IOBUCKET_MAX]; /* IO completed */ + struct bfa_itnim_latency_s io_latency; +}; + +/* + * vHBA port attribute values. + */ +struct bfa_vhba_attr_s { + wwn_t nwwn; /* node wwn */ + wwn_t pwwn; /* port wwn */ + u32 pid; /* port ID */ + bfa_boolean_t io_profile; /* get it from fcpim mod */ + bfa_boolean_t plog_enabled; /* portlog is enabled */ + u16 path_tov; + u8 rsvd[2]; +}; + +/* + * FC physical port statistics. + */ +struct bfa_port_fc_stats_s { + u64 secs_reset; /* Seconds since stats is reset */ + u64 tx_frames; /* Tx frames */ + u64 tx_words; /* Tx words */ + u64 tx_lip; /* Tx LIP */ + u64 tx_lip_f7f7; /* Tx LIP_F7F7 */ + u64 tx_lip_f8f7; /* Tx LIP_F8F7 */ + u64 tx_arbf0; /* Tx ARB F0 */ + u64 tx_nos; /* Tx NOS */ + u64 tx_ols; /* Tx OLS */ + u64 tx_lr; /* Tx LR */ + u64 tx_lrr; /* Tx LRR */ + u64 rx_frames; /* Rx frames */ + u64 rx_words; /* Rx words */ + u64 lip_count; /* Rx LIP */ + u64 rx_lip_f7f7; /* Rx LIP_F7F7 */ + u64 rx_lip_f8f7; /* Rx LIP_F8F7 */ + u64 rx_arbf0; /* Rx ARB F0 */ + u64 nos_count; /* Rx NOS */ + u64 ols_count; /* Rx OLS */ + u64 lr_count; /* Rx LR */ + u64 lrr_count; /* Rx LRR */ + u64 invalid_crcs; /* Rx CRC err frames */ + u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */ + u64 undersized_frm; /* Rx undersized frames */ + u64 oversized_frm; /* Rx oversized frames */ + u64 bad_eof_frm; /* Rx frames with bad EOF */ + u64 error_frames; /* Errored frames */ + u64 dropped_frames; /* Dropped frames */ + u64 link_failures; /* Link Failure (LF) count */ + u64 loss_of_syncs; /* Loss of sync count */ + u64 loss_of_signals; /* Loss of signal count */ + u64 primseq_errs; /* Primitive sequence protocol err. */ + u64 bad_os_count; /* Invalid ordered sets */ + u64 err_enc_out; /* Encoding err nonframe_8b10b */ + u64 err_enc; /* Encoding err frame_8b10b */ + u64 bbcr_frames_lost; /*!< BBCR Frames Lost */ + u64 bbcr_rrdys_lost; /*!< BBCR RRDYs Lost */ + u64 bbcr_link_resets; /*!< BBCR Link Resets */ + u64 bbcr_frame_lost_intrs; /*!< BBCR Frame loss intrs */ + u64 bbcr_rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */ + u64 loop_timeouts; /* Loop timeouts */ +}; + +/* + * Eth Physical Port statistics. + */ +struct bfa_port_eth_stats_s { + u64 secs_reset; /* Seconds since stats is reset */ + u64 frame_64; /* Frames 64 bytes */ + u64 frame_65_127; /* Frames 65-127 bytes */ + u64 frame_128_255; /* Frames 128-255 bytes */ + u64 frame_256_511; /* Frames 256-511 bytes */ + u64 frame_512_1023; /* Frames 512-1023 bytes */ + u64 frame_1024_1518; /* Frames 1024-1518 bytes */ + u64 frame_1519_1522; /* Frames 1519-1522 bytes */ + u64 tx_bytes; /* Tx bytes */ + u64 tx_packets; /* Tx packets */ + u64 tx_mcast_packets; /* Tx multicast packets */ + u64 tx_bcast_packets; /* Tx broadcast packets */ + u64 tx_control_frame; /* Tx control frame */ + u64 tx_drop; /* Tx drops */ + u64 tx_jabber; /* Tx jabber */ + u64 tx_fcs_error; /* Tx FCS errors */ + u64 tx_fragments; /* Tx fragments */ + u64 rx_bytes; /* Rx bytes */ + u64 rx_packets; /* Rx packets */ + u64 rx_mcast_packets; /* Rx multicast packets */ + u64 rx_bcast_packets; /* Rx broadcast packets */ + u64 rx_control_frames; /* Rx control frames */ + u64 rx_unknown_opcode; /* Rx unknown opcode */ + u64 rx_drop; /* Rx drops */ + u64 rx_jabber; /* Rx jabber */ + u64 rx_fcs_error; /* Rx FCS errors */ + u64 rx_alignment_error; /* Rx alignment errors */ + u64 rx_frame_length_error; /* Rx frame len errors */ + u64 rx_code_error; /* Rx code errors */ + u64 rx_fragments; /* Rx fragments */ + u64 rx_pause; /* Rx pause */ + u64 rx_zero_pause; /* Rx zero pause */ + u64 tx_pause; /* Tx pause */ + u64 tx_zero_pause; /* Tx zero pause */ + u64 rx_fcoe_pause; /* Rx FCoE pause */ + u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */ + u64 tx_fcoe_pause; /* Tx FCoE pause */ + u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */ + u64 rx_iscsi_pause; /* Rx iSCSI pause */ + u64 rx_iscsi_zero_pause; /* Rx iSCSI zero pause */ + u64 tx_iscsi_pause; /* Tx iSCSI pause */ + u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */ +}; + +/* + * Port statistics. + */ +union bfa_port_stats_u { + struct bfa_port_fc_stats_s fc; + struct bfa_port_eth_stats_s eth; +}; + +struct bfa_port_cfg_mode_s { + u16 max_pf; + u16 max_vf; + enum bfa_mode_s mode; +}; + +#pragma pack(1) + +#define BFA_CEE_LLDP_MAX_STRING_LEN (128) +#define BFA_CEE_DCBX_MAX_PRIORITY (8) +#define BFA_CEE_DCBX_MAX_PGID (8) + +struct bfa_cee_lldp_str_s { + u8 sub_type; + u8 len; + u8 rsvd[2]; + u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; +}; + +struct bfa_cee_lldp_cfg_s { + struct bfa_cee_lldp_str_s chassis_id; + struct bfa_cee_lldp_str_s port_id; + struct bfa_cee_lldp_str_s port_desc; + struct bfa_cee_lldp_str_s sys_name; + struct bfa_cee_lldp_str_s sys_desc; + struct bfa_cee_lldp_str_s mgmt_addr; + u16 time_to_live; + u16 enabled_system_cap; +}; + +/* CEE/DCBX parameters */ +struct bfa_cee_dcbx_cfg_s { + u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY]; + u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID]; + u8 pfc_primap; /* bitmap of priorties with PFC enabled */ + u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */ + u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */ + u8 dcbx_version; /* operating version:CEE or preCEE */ + u8 lls_fcoe; /* FCoE Logical Link Status */ + u8 lls_lan; /* LAN Logical Link Status */ + u8 rsvd[2]; +}; + +/* CEE Query */ +struct bfa_cee_attr_s { + u8 cee_status; + u8 error_reason; + struct bfa_cee_lldp_cfg_s lldp_remote; + struct bfa_cee_dcbx_cfg_s dcbx_remote; + mac_t src_mac; + u8 link_speed; + u8 nw_priority; + u8 filler[2]; +}; + +/* LLDP/DCBX/CEE Statistics */ +struct bfa_cee_stats_s { + u32 lldp_tx_frames; /* LLDP Tx Frames */ + u32 lldp_rx_frames; /* LLDP Rx Frames */ + u32 lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */ + u32 lldp_rx_frames_new; /* LLDP Rx Frames new */ + u32 lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */ + u32 lldp_rx_shutdown_tlvs; /* LLDP Rx shutdown TLVs */ + u32 lldp_info_aged_out; /* LLDP remote info aged */ + u32 dcbx_phylink_ups; /* DCBX phy link ups */ + u32 dcbx_phylink_downs; /* DCBX phy link downs */ + u32 dcbx_rx_tlvs; /* DCBX Rx TLVs */ + u32 dcbx_rx_tlvs_invalid; /* DCBX Rx TLVs invalid */ + u32 dcbx_control_tlv_error; /* DCBX control TLV errors */ + u32 dcbx_feature_tlv_error; /* DCBX feature TLV errors */ + u32 dcbx_cee_cfg_new; /* DCBX new CEE cfg rcvd */ + u32 cee_status_down; /* DCB status down */ + u32 cee_status_up; /* DCB status up */ + u32 cee_hw_cfg_changed; /* DCB hw cfg changed */ + u32 cee_rx_invalid_cfg; /* DCB invalid cfg */ +}; + +#pragma pack() + +/* + * AEN related definitions + */ +#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \ + | BFA_PCI_VENDOR_ID_BROCADE) + +/* BFA remote port events */ +enum bfa_rport_aen_event { + BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */ + BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */ + BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */ + BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */ + BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */ +}; + +struct bfa_rport_aen_data_s { + u16 vf_id; /* vf_id of this logical port */ + u16 rsvd[3]; + wwn_t ppwwn; /* WWN of its physical port */ + wwn_t lpwwn; /* WWN of this logical port */ + wwn_t rpwwn; /* WWN of this remote port */ + union { + struct bfa_rport_qos_attr_s qos; + } priv; +}; + +union bfa_aen_data_u { + struct bfa_adapter_aen_data_s adapter; + struct bfa_port_aen_data_s port; + struct bfa_lport_aen_data_s lport; + struct bfa_rport_aen_data_s rport; + struct bfa_itnim_aen_data_s itnim; + struct bfa_audit_aen_data_s audit; + struct bfa_ioc_aen_data_s ioc; +}; + +#define BFA_AEN_MAX_ENTRY 512 + +struct bfa_aen_entry_s { + struct list_head qe; + enum bfa_aen_category aen_category; + int aen_type; + union bfa_aen_data_u aen_data; + u64 aen_tv_sec; + u64 aen_tv_usec; + u32 seq_num; + u32 bfad_num; +}; + +#endif /* __BFA_DEFS_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h new file mode 100644 index 000000000..1091aa428 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fc.h @@ -0,0 +1,1606 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_FC_H__ +#define __BFA_FC_H__ + +#include "bfad_drv.h" + +typedef u64 wwn_t; + +#define WWN_NULL (0) +#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ +#define FC_ALPA_MAX 128 + +#pragma pack(1) + +#define MAC_ADDRLEN (6) +struct mac_s { u8 mac[MAC_ADDRLEN]; }; +#define mac_t struct mac_s + +/* + * generic SCSI cdb definition + */ +#define SCSI_MAX_CDBLEN 16 +struct scsi_cdb_s { + u8 scsi_cdb[SCSI_MAX_CDBLEN]; +}; + +#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ + +/* + * Fibre Channel Header Structure (FCHS) definition + */ +struct fchs_s { +#ifdef __BIG_ENDIAN + u32 routing:4; /* routing bits */ + u32 cat_info:4; /* category info */ +#else + u32 cat_info:4; /* category info */ + u32 routing:4; /* routing bits */ +#endif + u32 d_id:24; /* destination identifier */ + + u32 cs_ctl:8; /* class specific control */ + u32 s_id:24; /* source identifier */ + + u32 type:8; /* data structure type */ + u32 f_ctl:24; /* initial frame control */ + + u8 seq_id; /* sequence identifier */ + u8 df_ctl; /* data field control */ + u16 seq_cnt; /* sequence count */ + + __be16 ox_id; /* originator exchange ID */ + u16 rx_id; /* responder exchange ID */ + + u32 ro; /* relative offset */ +}; + +/* + * routing bit definitions + */ +enum { + FC_RTG_FC4_DEV_DATA = 0x0, /* FC-4 Device Data */ + FC_RTG_EXT_LINK = 0x2, /* Extended Link Data */ + FC_RTG_FC4_LINK_DATA = 0x3, /* FC-4 Link Data */ + FC_RTG_VIDEO_DATA = 0x4, /* Video Data */ + FC_RTG_EXT_HDR = 0x5, /* VFT, IFR or Encapsuled */ + FC_RTG_BASIC_LINK = 0x8, /* Basic Link data */ + FC_RTG_LINK_CTRL = 0xC, /* Link Control */ +}; + +/* + * information category for extended link data and FC-4 Link Data + */ +enum { + FC_CAT_LD_REQUEST = 0x2, /* Request */ + FC_CAT_LD_REPLY = 0x3, /* Reply */ + FC_CAT_LD_DIAG = 0xF, /* for DIAG use only */ +}; + +/* + * information category for extended headers (VFT, IFR or encapsulation) + */ +enum { + FC_CAT_VFT_HDR = 0x0, /* Virtual fabric tagging header */ + FC_CAT_IFR_HDR = 0x1, /* Inter-Fabric routing header */ + FC_CAT_ENC_HDR = 0x2, /* Encapsulation header */ +}; + +/* + * information category for FC-4 device data + */ +enum { + FC_CAT_UNCATEG_INFO = 0x0, /* Uncategorized information */ + FC_CAT_SOLICIT_DATA = 0x1, /* Solicited Data */ + FC_CAT_UNSOLICIT_CTRL = 0x2, /* Unsolicited Control */ + FC_CAT_SOLICIT_CTRL = 0x3, /* Solicited Control */ + FC_CAT_UNSOLICIT_DATA = 0x4, /* Unsolicited Data */ + FC_CAT_DATA_DESC = 0x5, /* Data Descriptor */ + FC_CAT_UNSOLICIT_CMD = 0x6, /* Unsolicited Command */ + FC_CAT_CMD_STATUS = 0x7, /* Command Status */ +}; + +/* + * Type Field Definitions. FC-PH Section 18.5 pg. 165 + */ +enum { + FC_TYPE_BLS = 0x0, /* Basic Link Service */ + FC_TYPE_ELS = 0x1, /* Extended Link Service */ + FC_TYPE_IP = 0x5, /* IP */ + FC_TYPE_FCP = 0x8, /* SCSI-FCP */ + FC_TYPE_GPP = 0x9, /* SCSI_GPP */ + FC_TYPE_SERVICES = 0x20, /* Fibre Channel Services */ + FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */ + FC_TYPE_FC_AL = 0x23, /* FC-AL */ + FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */ + FC_TYPE_FC_SPINFAB = 0xEE, /* SPINFAB */ + FC_TYPE_FC_DIAG = 0xEF, /* DIAG */ + FC_TYPE_MAX = 256, /* 256 FC-4 types */ +}; + +/* + * Frame Control Definitions. FC-PH Table-45. pg. 168 + */ +enum { + FCTL_EC_ORIG = 0x000000, /* exchange originator */ + FCTL_EC_RESP = 0x800000, /* exchange responder */ + FCTL_SEQ_INI = 0x000000, /* sequence initiator */ + FCTL_SEQ_REC = 0x400000, /* sequence recipient */ + FCTL_FS_EXCH = 0x200000, /* first sequence of xchg */ + FCTL_LS_EXCH = 0x100000, /* last sequence of xchg */ + FCTL_END_SEQ = 0x080000, /* last frame of sequence */ + FCTL_SI_XFER = 0x010000, /* seq initiative transfer */ + FCTL_RO_PRESENT = 0x000008, /* relative offset present */ + FCTL_FILLBYTE_MASK = 0x000003 /* , fill byte mask */ +}; + +/* + * Fabric Well Known Addresses + */ +enum { + FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0, + FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00, + FC_ALIAS_SERVER = 0xFFFFF8, + FC_MGMT_SERVER = 0xFFFFFA, + FC_TIME_SERVER = 0xFFFFFB, + FC_NAME_SERVER = 0xFFFFFC, + FC_FABRIC_CONTROLLER = 0xFFFFFD, + FC_FABRIC_PORT = 0xFFFFFE, + FC_BROADCAST_SERVER = 0xFFFFFF +}; + +/* + * domain/area/port defines + */ +#define FC_DOMAIN_MASK 0xFF0000 +#define FC_DOMAIN_SHIFT 16 +#define FC_AREA_MASK 0x00FF00 +#define FC_AREA_SHIFT 8 +#define FC_PORT_MASK 0x0000FF +#define FC_PORT_SHIFT 0 + +#define FC_GET_DOMAIN(p) (((p) & FC_DOMAIN_MASK) >> FC_DOMAIN_SHIFT) +#define FC_GET_AREA(p) (((p) & FC_AREA_MASK) >> FC_AREA_SHIFT) +#define FC_GET_PORT(p) (((p) & FC_PORT_MASK) >> FC_PORT_SHIFT) + +#define FC_DOMAIN_CTRLR(p) (FC_DOMAIN_CONTROLLER_MASK | (FC_GET_DOMAIN(p))) + +enum { + FC_RXID_ANY = 0xFFFFU, +}; + +/* + * generic ELS command + */ +struct fc_els_cmd_s { + u32 els_code:8; /* ELS Command Code */ + u32 reserved:24; +}; + +/* + * ELS Command Codes. FC-PH Table-75. pg. 223 + */ +enum { + FC_ELS_LS_RJT = 0x1, /* Link Service Reject. */ + FC_ELS_ACC = 0x02, /* Accept */ + FC_ELS_PLOGI = 0x03, /* N_Port Login. */ + FC_ELS_FLOGI = 0x04, /* F_Port Login. */ + FC_ELS_LOGO = 0x05, /* Logout. */ + FC_ELS_ABTX = 0x06, /* Abort Exchange */ + FC_ELS_RES = 0x08, /* Read Exchange status */ + FC_ELS_RSS = 0x09, /* Read sequence status block */ + FC_ELS_RSI = 0x0A, /* Request Sequence Initiative */ + FC_ELS_ESTC = 0x0C, /* Estimate Credit. */ + FC_ELS_RTV = 0x0E, /* Read Timeout Value. */ + FC_ELS_RLS = 0x0F, /* Read Link Status. */ + FC_ELS_ECHO = 0x10, /* Echo */ + FC_ELS_TEST = 0x11, /* Test */ + FC_ELS_RRQ = 0x12, /* Reinstate Recovery Qualifier. */ + FC_ELS_REC = 0x13, /* Add this for TAPE support in FCR */ + FC_ELS_PRLI = 0x20, /* Process Login */ + FC_ELS_PRLO = 0x21, /* Process Logout. */ + FC_ELS_SCN = 0x22, /* State Change Notification. */ + FC_ELS_TPRLO = 0x24, /* Third Party Process Logout. */ + FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */ + FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */ + FC_ELS_ADISC = 0x52, /* Discover Address. */ + FC_ELS_FARP_REQ = 0x54, /* FARP Request. */ + FC_ELS_FARP_REP = 0x55, /* FARP Reply. */ + FC_ELS_FAN = 0x60, /* Fabric Address Notification */ + FC_ELS_RSCN = 0x61, /* Reg State Change Notification */ + FC_ELS_SCR = 0x62, /* State Change Registration. */ + FC_ELS_RTIN = 0x77, /* Mangement server request */ + FC_ELS_RNID = 0x78, /* Mangement server request */ + FC_ELS_RLIR = 0x79, /* Registered Link Incident Record */ + + FC_ELS_RPSC = 0x7D, /* Report Port Speed Capabilities */ + FC_ELS_QSA = 0x7E, /* Query Security Attributes. Ref FC-SP */ + FC_ELS_E2E_LBEACON = 0x81, + /* End-to-End Link Beacon */ + FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */ + FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref + *FC-SP */ +}; + +/* + * Version numbers for FC-PH standards, + * used in login to indicate what port + * supports. See FC-PH-X table 158. + */ +enum { + FC_PH_VER_4_3 = 0x09, + FC_PH_VER_PH_3 = 0x20, +}; + +/* + * PDU size defines + */ +enum { + FC_MIN_PDUSZ = 512, + FC_MAX_PDUSZ = 2112, +}; + +/* + * N_Port PLOGI Common Service Parameters. + * FC-PH-x. Figure-76. pg. 308. + */ +struct fc_plogi_csp_s { + u8 verhi; /* FC-PH high version */ + u8 verlo; /* FC-PH low version */ + __be16 bbcred; /* BB_Credit */ + +#ifdef __BIG_ENDIAN + u8 ciro:1, /* continuously increasing RO */ + rro:1, /* random relative offset */ + npiv_supp:1, /* NPIV supported */ + port_type:1, /* N_Port/F_port */ + altbbcred:1, /* alternate BB_Credit */ + resolution:1, /* ms/ns ED_TOV resolution */ + vvl_info:1, /* VVL Info included */ + reserved1:1; + + u8 hg_supp:1, + query_dbc:1, + security:1, + sync_cap:1, + r_t_tov:1, + dh_dup_supp:1, + cisc:1, /* continuously increasing seq count */ + payload:1; +#else + u8 reserved2:2, + resolution:1, /* ms/ns ED_TOV resolution */ + altbbcred:1, /* alternate BB_Credit */ + port_type:1, /* N_Port/F_port */ + npiv_supp:1, /* NPIV supported */ + rro:1, /* random relative offset */ + ciro:1; /* continuously increasing RO */ + + u8 payload:1, + cisc:1, /* continuously increasing seq count */ + dh_dup_supp:1, + r_t_tov:1, + sync_cap:1, + security:1, + query_dbc:1, + hg_supp:1; +#endif + __be16 rxsz; /* receive data_field size */ + __be16 conseq; + __be16 ro_bitmap; + __be32 e_d_tov; +}; + +/* + * N_Port PLOGI Class Specific Parameters. + * FC-PH-x. Figure 78. pg. 318. + */ +struct fc_plogi_clp_s { +#ifdef __BIG_ENDIAN + u32 class_valid:1; + u32 intermix:1; /* class intermix supported if set =1. + * valid only for class1. Reserved for + * class2 & class3 */ + u32 reserved1:2; + u32 sequential:1; + u32 reserved2:3; +#else + u32 reserved2:3; + u32 sequential:1; + u32 reserved1:2; + u32 intermix:1; /* class intermix supported if set =1. + * valid only for class1. Reserved for + * class2 & class3 */ + u32 class_valid:1; +#endif + u32 reserved3:24; + + u32 reserved4:16; + u32 rxsz:16; /* Receive data_field size */ + + u32 reserved5:8; + u32 conseq:8; + u32 e2e_credit:16; /* end to end credit */ + + u32 reserved7:8; + u32 ospx:8; + u32 reserved8:16; +}; + +/* ASCII value for each character in string "BRCD" */ +#define FLOGI_VVL_BRCD 0x42524344 + +/* + * PLOGI els command and reply payload + */ +struct fc_logi_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + struct fc_plogi_csp_s csp; /* common service params */ + wwn_t port_name; + wwn_t node_name; + struct fc_plogi_clp_s class1; /* class 1 service parameters */ + struct fc_plogi_clp_s class2; /* class 2 service parameters */ + struct fc_plogi_clp_s class3; /* class 3 service parameters */ + struct fc_plogi_clp_s class4; /* class 4 service parameters */ + u8 vvl[16]; /* vendor version level */ +}; + +/* + * LOGO els command payload + */ +struct fc_logo_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 nport_id:24; /* N_Port identifier of source */ + wwn_t orig_port_name; /* Port name of the LOGO originator */ +}; + +/* + * ADISC els command payload + */ +struct fc_adisc_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 orig_HA:24; /* originator hard address */ + wwn_t orig_port_name; /* originator port name */ + wwn_t orig_node_name; /* originator node name */ + u32 res2:8; + u32 nport_id:24; /* originator NPortID */ +}; + +/* + * Exchange status block + */ +struct fc_exch_status_blk_s { + u32 oxid:16; + u32 rxid:16; + u32 res1:8; + u32 orig_np:24; /* originator NPortID */ + u32 res2:8; + u32 resp_np:24; /* responder NPortID */ + u32 es_bits; + u32 res3; + /* + * un modified section of the fields + */ +}; + +/* + * RES els command payload + */ +struct fc_res_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 nport_id:24; /* N_Port identifier of source */ + u32 oxid:16; + u32 rxid:16; + u8 assoc_hdr[32]; +}; + +/* + * RES els accept payload + */ +struct fc_res_acc_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */ +}; + +/* + * REC els command payload + */ +struct fc_rec_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 nport_id:24; /* N_Port identifier of source */ + u32 oxid:16; + u32 rxid:16; +}; + +#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */ +#define FC_REC_ESB_SI 0x40000000 /* SI is owned */ +#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */ +#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */ +#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */ +#define FC_REC_ESB_ERRP_MSK 0x03000000 +#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */ +#define FC_REC_ESB_RXID_INV 0x00400000 /* invalid RXID */ +#define FC_REC_ESB_PRIO_INUSE 0x00200000 + +/* + * REC els accept payload + */ +struct fc_rec_acc_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 oxid:16; + u32 rxid:16; + u32 res1:8; + u32 orig_id:24; /* N_Port id of exchange originator */ + u32 res2:8; + u32 resp_id:24; /* N_Port id of exchange responder */ + u32 count; /* data transfer count */ + u32 e_stat; /* exchange status */ +}; + +/* + * RSI els payload + */ +struct fc_rsi_s { + struct fc_els_cmd_s els_cmd; + u32 res1:8; + u32 orig_sid:24; + u32 oxid:16; + u32 rxid:16; +}; + +/* + * structure for PRLI paramater pages, both request & response + * see FC-PH-X table 113 & 115 for explanation also FCP table 8 + */ +struct fc_prli_params_s { + u32 reserved:16; +#ifdef __BIG_ENDIAN + u32 reserved1:5; + u32 rec_support:1; + u32 task_retry_id:1; + u32 retry:1; + + u32 confirm:1; + u32 doverlay:1; + u32 initiator:1; + u32 target:1; + u32 cdmix:1; + u32 drmix:1; + u32 rxrdisab:1; + u32 wxrdisab:1; +#else + u32 retry:1; + u32 task_retry_id:1; + u32 rec_support:1; + u32 reserved1:5; + + u32 wxrdisab:1; + u32 rxrdisab:1; + u32 drmix:1; + u32 cdmix:1; + u32 target:1; + u32 initiator:1; + u32 doverlay:1; + u32 confirm:1; +#endif +}; + +/* + * valid values for rspcode in PRLI ACC payload + */ +enum { + FC_PRLI_ACC_XQTD = 0x1, /* request executed */ + FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */ +}; + +struct fc_prli_params_page_s { + u32 type:8; + u32 codext:8; +#ifdef __BIG_ENDIAN + u32 origprocasv:1; + u32 rsppav:1; + u32 imagepair:1; + u32 reserved1:1; + u32 rspcode:4; +#else + u32 rspcode:4; + u32 reserved1:1; + u32 imagepair:1; + u32 rsppav:1; + u32 origprocasv:1; +#endif + u32 reserved2:8; + + u32 origprocas; + u32 rspprocas; + struct fc_prli_params_s servparams; +}; + +/* + * PRLI request and accept payload, FC-PH-X tables 112 & 114 + */ +struct fc_prli_s { + u32 command:8; + u32 pglen:8; + u32 pagebytes:16; + struct fc_prli_params_page_s parampage; +}; + +/* + * PRLO logout params page + */ +struct fc_prlo_params_page_s { + u32 type:8; + u32 type_ext:8; +#ifdef __BIG_ENDIAN + u32 opa_valid:1; /* originator process associator valid */ + u32 rpa_valid:1; /* responder process associator valid */ + u32 res1:14; +#else + u32 res1:14; + u32 rpa_valid:1; /* responder process associator valid */ + u32 opa_valid:1; /* originator process associator valid */ +#endif + u32 orig_process_assc; + u32 resp_process_assc; + + u32 res2; +}; + +/* + * PRLO els command payload + */ +struct fc_prlo_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + struct fc_prlo_params_page_s prlo_params[1]; +}; + +/* + * PRLO Logout response parameter page + */ +struct fc_prlo_acc_params_page_s { + u32 type:8; + u32 type_ext:8; + +#ifdef __BIG_ENDIAN + u32 opa_valid:1; /* originator process associator valid */ + u32 rpa_valid:1; /* responder process associator valid */ + u32 res1:14; +#else + u32 res1:14; + u32 rpa_valid:1; /* responder process associator valid */ + u32 opa_valid:1; /* originator process associator valid */ +#endif + u32 orig_process_assc; + u32 resp_process_assc; + + u32 fc4type_csp; +}; + +/* + * PRLO els command ACC payload + */ +struct fc_prlo_acc_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + struct fc_prlo_acc_params_page_s prlo_acc_params[1]; +}; + +/* + * SCR els command payload + */ +enum { + FC_SCR_REG_FUNC_FABRIC_DETECTED = 0x01, + FC_SCR_REG_FUNC_N_PORT_DETECTED = 0x02, + FC_SCR_REG_FUNC_FULL = 0x03, + FC_SCR_REG_FUNC_CLEAR_REG = 0xFF, +}; + +/* SCR VU registrations */ +enum { + FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01 +}; + +struct fc_scr_s { + u32 command:8; + u32 res:24; + u32 vu_reg_func:8; /* Vendor Unique Registrations */ + u32 res1:16; + u32 reg_func:8; +}; + +/* + * Information category for Basic link data + */ +enum { + FC_CAT_NOP = 0x0, + FC_CAT_ABTS = 0x1, + FC_CAT_RMC = 0x2, + FC_CAT_BA_ACC = 0x4, + FC_CAT_BA_RJT = 0x5, + FC_CAT_PRMT = 0x6, +}; + +/* + * LS_RJT els reply payload + */ +struct fc_ls_rjt_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 reason_code:8; /* Reason code for reject */ + u32 reason_code_expl:8; /* Reason code explanation */ + u32 vendor_unique:8; /* Vendor specific */ +}; + +/* + * LS_RJT reason codes + */ +enum { + FC_LS_RJT_RSN_INV_CMD_CODE = 0x01, + FC_LS_RJT_RSN_LOGICAL_ERROR = 0x03, + FC_LS_RJT_RSN_LOGICAL_BUSY = 0x05, + FC_LS_RJT_RSN_PROTOCOL_ERROR = 0x07, + FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD = 0x09, + FC_LS_RJT_RSN_CMD_NOT_SUPP = 0x0B, +}; + +/* + * LS_RJT reason code explanation + */ +enum { + FC_LS_RJT_EXP_NO_ADDL_INFO = 0x00, + FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS = 0x01, + FC_LS_RJT_EXP_SPARMS_ERR_INI_CTL = 0x03, + FC_LS_RJT_EXP_SPARMS_ERR_REC_CTL = 0x05, + FC_LS_RJT_EXP_SPARMS_ERR_RXSZ = 0x07, + FC_LS_RJT_EXP_SPARMS_ERR_CONSEQ = 0x09, + FC_LS_RJT_EXP_SPARMS_ERR_CREDIT = 0x0B, + FC_LS_RJT_EXP_INV_PORT_NAME = 0x0D, + FC_LS_RJT_EXP_INV_NODE_FABRIC_NAME = 0x0E, + FC_LS_RJT_EXP_INV_CSP = 0x0F, + FC_LS_RJT_EXP_INV_ASSOC_HDR = 0x11, + FC_LS_RJT_EXP_ASSOC_HDR_REQD = 0x13, + FC_LS_RJT_EXP_INV_ORIG_S_ID = 0x15, + FC_LS_RJT_EXP_INV_OXID_RXID_COMB = 0x17, + FC_LS_RJT_EXP_CMD_ALREADY_IN_PROG = 0x19, + FC_LS_RJT_EXP_LOGIN_REQUIRED = 0x1E, + FC_LS_RJT_EXP_INVALID_NPORT_ID = 0x1F, + FC_LS_RJT_EXP_INSUFF_RES = 0x29, + FC_LS_RJT_EXP_CMD_NOT_SUPP = 0x2C, + FC_LS_RJT_EXP_INV_PAYLOAD_LEN = 0x2D, +}; + +/* + * RRQ els command payload + */ +struct fc_rrq_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 s_id:24; /* exchange originator S_ID */ + + u32 ox_id:16; /* originator exchange ID */ + u32 rx_id:16; /* responder exchange ID */ + + u32 res2[8]; /* optional association header */ +}; + +/* + * ABTS BA_ACC reply payload + */ +struct fc_ba_acc_s { + u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ + u32 seq_id:8; /* invalid for Abort Exchange */ + u32 res2:16; + u32 ox_id:16; /* OX_ID from ABTS frame */ + u32 rx_id:16; /* RX_ID from ABTS frame */ + u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */ + u32 high_seq_cnt:16; /* set to 0xFFFF for Abort Exchange */ +}; + +/* + * ABTS BA_RJT reject payload + */ +struct fc_ba_rjt_s { + u32 res1:8; /* Reserved */ + u32 reason_code:8; /* reason code for reject */ + u32 reason_expl:8; /* reason code explanation */ + u32 vendor_unique:8; /* vendor unique reason code,set to 0 */ +}; + +/* + * TPRLO logout parameter page + */ +struct fc_tprlo_params_page_s { + u32 type:8; + u32 type_ext:8; + +#ifdef __BIG_ENDIAN + u32 opa_valid:1; + u32 rpa_valid:1; + u32 tpo_nport_valid:1; + u32 global_process_logout:1; + u32 res1:12; +#else + u32 res1:12; + u32 global_process_logout:1; + u32 tpo_nport_valid:1; + u32 rpa_valid:1; + u32 opa_valid:1; +#endif + + u32 orig_process_assc; + u32 resp_process_assc; + + u32 res2:8; + u32 tpo_nport_id; +}; + +/* + * TPRLO ELS command payload + */ +struct fc_tprlo_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + + struct fc_tprlo_params_page_s tprlo_params[1]; +}; + +enum fc_tprlo_type { + FC_GLOBAL_LOGO = 1, + FC_TPR_LOGO +}; + +/* + * TPRLO els command ACC payload + */ +struct fc_tprlo_acc_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + struct fc_prlo_acc_params_page_s tprlo_acc_params[1]; +}; + +/* + * RSCN els command req payload + */ +#define FC_RSCN_PGLEN 0x4 + +enum fc_rscn_format { + FC_RSCN_FORMAT_PORTID = 0x0, + FC_RSCN_FORMAT_AREA = 0x1, + FC_RSCN_FORMAT_DOMAIN = 0x2, + FC_RSCN_FORMAT_FABRIC = 0x3, +}; + +struct fc_rscn_event_s { + u32 format:2; + u32 qualifier:4; + u32 resvd:2; + u32 portid:24; +}; + +struct fc_rscn_pl_s { + u8 command; + u8 pagelen; + __be16 payldlen; + struct fc_rscn_event_s event[]; +}; + +/* + * ECHO els command req payload + */ +struct fc_echo_s { + struct fc_els_cmd_s els_cmd; +}; + +/* + * RNID els command + */ +#define RNID_NODEID_DATA_FORMAT_COMMON 0x00 +#define RNID_NODEID_DATA_FORMAT_FCP3 0x08 +#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF + +#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 +#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 +#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 +#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 +#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005 +#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009 +#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A +#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B +#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E +#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011 +#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002 +#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 +#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF + +/* + * RNID els command payload + */ +struct fc_rnid_cmd_s { + struct fc_els_cmd_s els_cmd; + u32 node_id_data_format:8; + u32 reserved:24; +}; + +/* + * RNID els response payload + */ + +struct fc_rnid_common_id_data_s { + wwn_t port_name; + wwn_t node_name; +}; + +struct fc_rnid_general_topology_data_s { + u32 vendor_unique[4]; + __be32 asso_type; + u32 phy_port_num; + __be32 num_attached_nodes; + u32 node_mgmt:8; + u32 ip_version:8; + u32 udp_tcp_port_num:16; + u32 ip_address[4]; + u32 reserved:16; + u32 vendor_specific:16; +}; + +struct fc_rnid_acc_s { + struct fc_els_cmd_s els_cmd; + u32 node_id_data_format:8; + u32 common_id_data_length:8; + u32 reserved:8; + u32 specific_id_data_length:8; + struct fc_rnid_common_id_data_s common_id_data; + struct fc_rnid_general_topology_data_s gen_topology_data; +}; + +#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 +#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 +#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 +#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 +#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005 +#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009 +#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A +#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B +#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E +#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011 +#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002 +#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 +#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF + +enum fc_rpsc_speed_cap { + RPSC_SPEED_CAP_1G = 0x8000, + RPSC_SPEED_CAP_2G = 0x4000, + RPSC_SPEED_CAP_4G = 0x2000, + RPSC_SPEED_CAP_10G = 0x1000, + RPSC_SPEED_CAP_8G = 0x0800, + RPSC_SPEED_CAP_16G = 0x0400, + + RPSC_SPEED_CAP_UNKNOWN = 0x0001, +}; + +enum fc_rpsc_op_speed { + RPSC_OP_SPEED_1G = 0x8000, + RPSC_OP_SPEED_2G = 0x4000, + RPSC_OP_SPEED_4G = 0x2000, + RPSC_OP_SPEED_10G = 0x1000, + RPSC_OP_SPEED_8G = 0x0800, + RPSC_OP_SPEED_16G = 0x0400, + + RPSC_OP_SPEED_NOT_EST = 0x0001, /* speed not established */ +}; + +struct fc_rpsc_speed_info_s { + __be16 port_speed_cap; /* see enum fc_rpsc_speed_cap */ + __be16 port_op_speed; /* see enum fc_rpsc_op_speed */ +}; + +/* + * If RPSC request is sent to the Domain Controller, the request is for + * all the ports within that domain. + */ +struct fc_rpsc_cmd_s { + struct fc_els_cmd_s els_cmd; +}; + +/* + * RPSC Acc + */ +struct fc_rpsc_acc_s { + u32 command:8; + u32 rsvd:8; + u32 num_entries:16; + + struct fc_rpsc_speed_info_s speed_info[1]; +}; + +/* + * If RPSC2 request is sent to the Domain Controller, + */ +#define FC_BRCD_TOKEN 0x42524344 + +struct fc_rpsc2_cmd_s { + struct fc_els_cmd_s els_cmd; + __be32 token; + u16 resvd; + __be16 num_pids; /* Number of pids in the request */ + struct { + u32 rsvd1:8; + u32 pid:24; /* port identifier */ + } pid_list[1]; +}; + +enum fc_rpsc2_port_type { + RPSC2_PORT_TYPE_UNKNOWN = 0, + RPSC2_PORT_TYPE_NPORT = 1, + RPSC2_PORT_TYPE_NLPORT = 2, + RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, + RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, +}; + +/* + * RPSC2 portInfo entry structure + */ +struct fc_rpsc2_port_info_s { + __be32 pid; /* PID */ + u16 resvd1; + __be16 index; /* port number / index */ + u8 resvd2; + u8 type; /* port type N/NL/... */ + __be16 speed; /* port Operating Speed */ +}; + +/* + * RPSC2 Accept payload + */ +struct fc_rpsc2_acc_s { + u8 els_cmd; + u8 resvd; + __be16 num_pids; /* Number of pids in the request */ + struct fc_rpsc2_port_info_s port_info[1]; /* port information */ +}; + +/* + * bit fields so that multiple classes can be specified + */ +enum fc_cos { + FC_CLASS_2 = 0x04, + FC_CLASS_3 = 0x08, + FC_CLASS_2_3 = 0x0C, +}; + +/* + * symbolic name + */ +struct fc_symname_s { + u8 symname[FC_SYMNAME_MAX]; +}; + +struct fc_alpabm_s { + u8 alpa_bm[FC_ALPA_MAX / 8]; +}; + +/* + * protocol default timeout values + */ +#define FC_ED_TOV 2 +#define FC_REC_TOV (FC_ED_TOV + 1) +#define FC_RA_TOV 10 +#define FC_ELS_TOV (2 * FC_RA_TOV) +#define FC_FCCT_TOV (3 * FC_RA_TOV) + +/* + * virtual fabric related defines + */ +#define FC_VF_ID_NULL 0 /* must not be used as VF_ID */ +#define FC_VF_ID_MIN 1 +#define FC_VF_ID_MAX 0xEFF +#define FC_VF_ID_CTL 0xFEF /* control VF_ID */ + +/* + * Virtual Fabric Tagging header format + * @caution This is defined only in BIG ENDIAN format. + */ +struct fc_vft_s { + u32 r_ctl:8; + u32 ver:2; + u32 type:4; + u32 res_a:2; + u32 priority:3; + u32 vf_id:12; + u32 res_b:1; + u32 hopct:8; + u32 res_c:24; +}; + +/* + * FCP_CMND definitions + */ +#define FCP_CMND_CDB_LEN 16 +#define FCP_CMND_LUN_LEN 8 + +struct fcp_cmnd_s { + struct scsi_lun lun; /* 64-bit LU number */ + u8 crn; /* command reference number */ +#ifdef __BIG_ENDIAN + u8 resvd:1, + priority:4, /* FCP-3: SAM-3 priority */ + taskattr:3; /* scsi task attribute */ +#else + u8 taskattr:3, /* scsi task attribute */ + priority:4, /* FCP-3: SAM-3 priority */ + resvd:1; +#endif + u8 tm_flags; /* task management flags */ +#ifdef __BIG_ENDIAN + u8 addl_cdb_len:6, /* additional CDB length words */ + iodir:2; /* read/write FCP_DATA IUs */ +#else + u8 iodir:2, /* read/write FCP_DATA IUs */ + addl_cdb_len:6; /* additional CDB length */ +#endif + struct scsi_cdb_s cdb; + + __be32 fcp_dl; /* bytes to be transferred */ +}; + +#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN) +#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len]) + +/* + * struct fcp_cmnd_s .iodir field values + */ +enum fcp_iodir { + FCP_IODIR_NONE = 0, + FCP_IODIR_WRITE = 1, + FCP_IODIR_READ = 2, + FCP_IODIR_RW = 3, +}; + +/* + * Task management flags field - only one bit shall be set + */ +enum fcp_tm_cmnd { + FCP_TM_ABORT_TASK_SET = BIT(1), + FCP_TM_CLEAR_TASK_SET = BIT(2), + FCP_TM_LUN_RESET = BIT(4), + FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */ + FCP_TM_CLEAR_ACA = BIT(6), +}; + +/* + * FCP_RSP residue flags + */ +enum fcp_residue { + FCP_NO_RESIDUE = 0, /* no residue */ + FCP_RESID_OVER = 1, /* more data left that was not sent */ + FCP_RESID_UNDER = 2, /* less data than requested */ +}; + +struct fcp_rspinfo_s { + u32 res0:24; + u32 rsp_code:8; /* response code (as above) */ + u32 res1; +}; + +struct fcp_resp_s { + u32 reserved[2]; /* 2 words reserved */ + u16 reserved2; +#ifdef __BIG_ENDIAN + u8 reserved3:3; + u8 fcp_conf_req:1; /* FCP_CONF is requested */ + u8 resid_flags:2; /* underflow/overflow */ + u8 sns_len_valid:1; /* sense len is valid */ + u8 rsp_len_valid:1; /* response len is valid */ +#else + u8 rsp_len_valid:1; /* response len is valid */ + u8 sns_len_valid:1; /* sense len is valid */ + u8 resid_flags:2; /* underflow/overflow */ + u8 fcp_conf_req:1; /* FCP_CONF is requested */ + u8 reserved3:3; +#endif + u8 scsi_status; /* one byte SCSI status */ + u32 residue; /* residual data bytes */ + u32 sns_len; /* length od sense info */ + u32 rsp_len; /* length of response info */ +}; + +#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \ + (__fcprsp)->sns_len : 0) +#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \ + (__fcprsp)->rsp_len : 0) +#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1)) +#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \ + fcp_rsplen(__fcprsp)) +/* + * CT + */ +struct ct_hdr_s { + u32 rev_id:8; /* Revision of the CT */ + u32 in_id:24; /* Initiator Id */ + u32 gs_type:8; /* Generic service Type */ + u32 gs_sub_type:8; /* Generic service sub type */ + u32 options:8; /* options */ + u32 rsvrd:8; /* reserved */ + u32 cmd_rsp_code:16;/* ct command/response code */ + u32 max_res_size:16;/* maximum/residual size */ + u32 frag_id:8; /* fragment ID */ + u32 reason_code:8; /* reason code */ + u32 exp_code:8; /* explanation code */ + u32 vendor_unq:8; /* vendor unique */ +}; + +/* + * defines for the Revision + */ +enum { + CT_GS3_REVISION = 0x01, +}; + +/* + * defines for gs_type + */ +enum { + CT_GSTYPE_KEYSERVICE = 0xF7, + CT_GSTYPE_ALIASSERVICE = 0xF8, + CT_GSTYPE_MGMTSERVICE = 0xFA, + CT_GSTYPE_TIMESERVICE = 0xFB, + CT_GSTYPE_DIRSERVICE = 0xFC, +}; + +/* + * defines for gs_sub_type for gs type directory service + */ +enum { + CT_GSSUBTYPE_NAMESERVER = 0x02, +}; + +/* + * defines for gs_sub_type for gs type management service + */ +enum { + CT_GSSUBTYPE_CFGSERVER = 0x01, + CT_GSSUBTYPE_UNZONED_NS = 0x02, + CT_GSSUBTYPE_ZONESERVER = 0x03, + CT_GSSUBTYPE_LOCKSERVER = 0x04, + CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */ +}; + +/* + * defines for CT response code field + */ +enum { + CT_RSP_REJECT = 0x8001, + CT_RSP_ACCEPT = 0x8002, +}; + +/* + * definitions for CT reason code + */ +enum { + CT_RSN_INV_CMD = 0x01, + CT_RSN_INV_VER = 0x02, + CT_RSN_LOGIC_ERR = 0x03, + CT_RSN_INV_SIZE = 0x04, + CT_RSN_LOGICAL_BUSY = 0x05, + CT_RSN_PROTO_ERR = 0x07, + CT_RSN_UNABLE_TO_PERF = 0x09, + CT_RSN_NOT_SUPP = 0x0B, + CT_RSN_SERVER_NOT_AVBL = 0x0D, + CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E, + CT_RSN_VENDOR_SPECIFIC = 0xFF, + +}; + +/* + * definitions for explanations code for Name server + */ +enum { + CT_NS_EXP_NOADDITIONAL = 0x00, + CT_NS_EXP_ID_NOT_REG = 0x01, + CT_NS_EXP_PN_NOT_REG = 0x02, + CT_NS_EXP_NN_NOT_REG = 0x03, + CT_NS_EXP_CS_NOT_REG = 0x04, + CT_NS_EXP_IPN_NOT_REG = 0x05, + CT_NS_EXP_IPA_NOT_REG = 0x06, + CT_NS_EXP_FT_NOT_REG = 0x07, + CT_NS_EXP_SPN_NOT_REG = 0x08, + CT_NS_EXP_SNN_NOT_REG = 0x09, + CT_NS_EXP_PT_NOT_REG = 0x0A, + CT_NS_EXP_IPP_NOT_REG = 0x0B, + CT_NS_EXP_FPN_NOT_REG = 0x0C, + CT_NS_EXP_HA_NOT_REG = 0x0D, + CT_NS_EXP_FD_NOT_REG = 0x0E, + CT_NS_EXP_FF_NOT_REG = 0x0F, + CT_NS_EXP_ACCESSDENIED = 0x10, + CT_NS_EXP_UNACCEPTABLE_ID = 0x11, + CT_NS_EXP_DATABASEEMPTY = 0x12, + CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13, + CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14, + CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15, + CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16 +}; + +/* + * definitions for the explanation code for all servers + */ +enum { + CT_EXP_AUTH_EXCEPTION = 0xF1, + CT_EXP_DB_FULL = 0xF2, + CT_EXP_DB_EMPTY = 0xF3, + CT_EXP_PROCESSING_REQ = 0xF4, + CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5, + CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6 +}; + +/* + * Command codes for Name server + */ +enum { + GS_GID_PN = 0x0121, /* Get Id on port name */ + GS_GPN_ID = 0x0112, /* Get port name on ID */ + GS_GNN_ID = 0x0113, /* Get node name on ID */ + GS_GID_FT = 0x0171, /* Get Id on FC4 type */ + GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */ + GS_RFT_ID = 0x0217, /* Register fc4type on ID */ + GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */ + GS_RSNN_NN = 0x0239, /* Register symbolic NN on NN */ + GS_RPN_ID = 0x0212, /* Register port name */ + GS_RNN_ID = 0x0213, /* Register node name */ + GS_RCS_ID = 0x0214, /* Register class of service */ + GS_RPT_ID = 0x021A, /* Register port type */ + GS_GA_NXT = 0x0100, /* Get all next */ + GS_RFF_ID = 0x021F, /* Register FC4 Feature */ +}; + +struct fcgs_id_req_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ +}; +#define fcgs_gpnid_req_t struct fcgs_id_req_s +#define fcgs_gnnid_req_t struct fcgs_id_req_s +#define fcgs_gspnid_req_t struct fcgs_id_req_s + +struct fcgs_gidpn_req_s { + wwn_t port_name; /* port wwn */ +}; + +struct fcgs_gidpn_resp_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ +}; + +/* + * RFT_ID + */ +struct fcgs_rftid_req_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ + __be32 fc4_type[8]; /* fc4 types */ +}; + +/* + * RFF_ID : Register FC4 features. + */ +#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02 +#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 + +struct fcgs_rffid_req_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ + u32 rsvd1:16; + u32 fc4ftr_bits:8; /* fc4 feature bits */ + u32 fc4_type:8; /* corresponding FC4 Type */ +}; + +/* + * GID_FT Request + */ +struct fcgs_gidft_req_s { + u8 reserved; + u8 domain_id; /* domain, 0 - all fabric */ + u8 area_id; /* area, 0 - whole domain */ + u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ +}; + +/* + * GID_FT Response + */ +struct fcgs_gidft_resp_s { + u8 last:1; /* last port identifier flag */ + u8 reserved:7; + u32 pid:24; /* port identifier */ +}; + +/* + * RSPN_ID + */ +struct fcgs_rspnid_req_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ + u8 spn_len; /* symbolic port name length */ + u8 spn[256]; /* symbolic port name */ +}; + +/* + * RSNN_NN + */ +struct fcgs_rsnn_nn_req_s { + wwn_t node_name; /* Node name */ + u8 snn_len; /* symbolic node name length */ + u8 snn[256]; /* symbolic node name */ +}; + +/* + * RPN_ID + */ +struct fcgs_rpnid_req_s { + u32 rsvd:8; + u32 port_id:24; + wwn_t port_name; +}; + +/* + * RNN_ID + */ +struct fcgs_rnnid_req_s { + u32 rsvd:8; + u32 port_id:24; + wwn_t node_name; +}; + +/* + * RCS_ID + */ +struct fcgs_rcsid_req_s { + u32 rsvd:8; + u32 port_id:24; + u32 cos; +}; + +/* + * RPT_ID + */ +struct fcgs_rptid_req_s { + u32 rsvd:8; + u32 port_id:24; + u32 port_type:8; + u32 rsvd1:24; +}; + +/* + * GA_NXT Request + */ +struct fcgs_ganxt_req_s { + u32 rsvd:8; + u32 port_id:24; +}; + +/* + * GA_NXT Response + */ +struct fcgs_ganxt_rsp_s { + u32 port_type:8; /* Port Type */ + u32 port_id:24; /* Port Identifier */ + wwn_t port_name; /* Port Name */ + u8 spn_len; /* Length of Symbolic Port Name */ + char spn[255]; /* Symbolic Port Name */ + wwn_t node_name; /* Node Name */ + u8 snn_len; /* Length of Symbolic Node Name */ + char snn[255]; /* Symbolic Node Name */ + u8 ipa[8]; /* Initial Process Associator */ + u8 ip[16]; /* IP Address */ + u32 cos; /* Class of Service */ + u32 fc4types[8]; /* FC-4 TYPEs */ + wwn_t fabric_port_name; /* Fabric Port Name */ + u32 rsvd:8; /* Reserved */ + u32 hard_addr:24; /* Hard Address */ +}; + +/* + * Command codes for Fabric Configuration Server + */ +enum { + GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */ + GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */ + GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */ + GS_FC_PING_CMD = 0x0401, /* GS FC Ping */ +}; + +/* + * GMAL Command ( Get ( interconnect Element) Management Address List) + * To retrieve the IP Address of a Switch. + */ +#define CT_GMAL_RESP_PREFIX_TELNET "telnet://" +#define CT_GMAL_RESP_PREFIX_HTTP "http://" + +/* GMAL/GFN request */ +struct fcgs_req_s { + wwn_t wwn; /* PWWN/NWWN */ +}; + +#define fcgs_gmal_req_t struct fcgs_req_s +#define fcgs_gfn_req_t struct fcgs_req_s + +/* Accept Response to GMAL */ +struct fcgs_gmal_resp_s { + __be32 ms_len; /* Num of entries */ + u8 ms_ma[256]; +}; + +struct fcgs_gmal_entry_s { + u8 len; + u8 prefix[7]; /* like "http://" */ + u8 ip_addr[248]; +}; + +/* + * FDMI Command Codes + */ +#define FDMI_GRHL 0x0100 +#define FDMI_GHAT 0x0101 +#define FDMI_GRPL 0x0102 +#define FDMI_GPAT 0x0110 +#define FDMI_RHBA 0x0200 +#define FDMI_RHAT 0x0201 +#define FDMI_RPRT 0x0210 +#define FDMI_RPA 0x0211 +#define FDMI_DHBA 0x0300 +#define FDMI_DPRT 0x0310 + +/* + * FDMI reason codes + */ +#define FDMI_NO_ADDITIONAL_EXP 0x00 +#define FDMI_HBA_ALREADY_REG 0x10 +#define FDMI_HBA_ATTRIB_NOT_REG 0x11 +#define FDMI_HBA_ATTRIB_MULTIPLE 0x12 +#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13 +#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14 +#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15 +#define FDMI_PORT_HBA_NOT_IN_LIST 0x16 +#define FDMI_PORT_ATTRIB_NOT_REG 0x20 +#define FDMI_PORT_NOT_REG 0x21 +#define FDMI_PORT_ATTRIB_MULTIPLE 0x22 +#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23 +#define FDMI_PORT_ALREADY_REGISTEREED 0x24 + +/* + * FDMI Transmission Speed Mask values + */ +#define FDMI_TRANS_SPEED_1G 0x00000001 +#define FDMI_TRANS_SPEED_2G 0x00000002 +#define FDMI_TRANS_SPEED_10G 0x00000004 +#define FDMI_TRANS_SPEED_4G 0x00000008 +#define FDMI_TRANS_SPEED_8G 0x00000010 +#define FDMI_TRANS_SPEED_16G 0x00000020 +#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000 + +/* + * FDMI HBA attribute types + */ +enum fdmi_hba_attribute_type { + FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */ + FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */ + FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */ + FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */ + FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */ + FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */ + FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */ + FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */ + FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */ + FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */ + FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */ + FDMI_HBA_ATTRIB_NODE_SYM_NAME, /* 0x000C */ + FDMI_HBA_ATTRIB_VENDOR_INFO, /* 0x000D */ + FDMI_HBA_ATTRIB_NUM_PORTS, /* 0x000E */ + FDMI_HBA_ATTRIB_FABRIC_NAME, /* 0x000F */ + FDMI_HBA_ATTRIB_BIOS_VER, /* 0x0010 */ + FDMI_HBA_ATTRIB_VENDOR_ID = 0x00E0, + + FDMI_HBA_ATTRIB_MAX_TYPE +}; + +/* + * FDMI Port attribute types + */ +enum fdmi_port_attribute_type { + FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */ + FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */ + FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */ + FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */ + FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */ + FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */ + FDMI_PORT_ATTRIB_NODE_NAME, /* 0x0007 */ + FDMI_PORT_ATTRIB_PORT_NAME, /* 0x0008 */ + FDMI_PORT_ATTRIB_PORT_SYM_NAME, /* 0x0009 */ + FDMI_PORT_ATTRIB_PORT_TYPE, /* 0x000A */ + FDMI_PORT_ATTRIB_SUPP_COS, /* 0x000B */ + FDMI_PORT_ATTRIB_PORT_FAB_NAME, /* 0x000C */ + FDMI_PORT_ATTRIB_PORT_FC4_TYPE, /* 0x000D */ + FDMI_PORT_ATTRIB_PORT_STATE = 0x101, /* 0x0101 */ + FDMI_PORT_ATTRIB_PORT_NUM_RPRT = 0x102, /* 0x0102 */ + + FDMI_PORT_ATTR_MAX_TYPE +}; + +/* + * FDMI attribute + */ +struct fdmi_attr_s { + __be16 type; + __be16 len; + u8 value[]; +}; + +/* + * HBA Attribute Block + */ +struct fdmi_hba_attr_s { + __be32 attr_count; /* # of attributes */ + struct fdmi_attr_s hba_attr; /* n attributes */ +}; + +/* + * Registered Port List + */ +struct fdmi_port_list_s { + __be32 num_ports; /* number Of Port Entries */ + wwn_t port_entry; /* one or more */ +}; + +/* + * Port Attribute Block + */ +struct fdmi_port_attr_s { + __be32 attr_count; /* # of attributes */ + struct fdmi_attr_s port_attr; /* n attributes */ +}; + +/* + * FDMI Register HBA Attributes + */ +struct fdmi_rhba_s { + wwn_t hba_id; /* HBA Identifier */ + struct fdmi_port_list_s port_list; /* Registered Port List */ + struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */ +}; + +/* + * FDMI Register Port + */ +struct fdmi_rprt_s { + wwn_t hba_id; /* HBA Identifier */ + wwn_t port_name; /* Port wwn */ + struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ +}; + +/* + * FDMI Register Port Attributes + */ +struct fdmi_rpa_s { + wwn_t port_name; /* port wwn */ + struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ +}; + +#pragma pack() + +#endif /* __BFA_FC_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c new file mode 100644 index 000000000..52303e8c7 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -0,0 +1,1348 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ +/* + * fcbuild.c - FC link service frame building and parsing routines + */ + +#include "bfad_drv.h" +#include "bfa_fcbuild.h" + +/* + * static build functions + */ +static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + __be16 ox_id); +static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + __be16 ox_id); +static struct fchs_s fc_els_req_tmpl; +static struct fchs_s fc_els_rsp_tmpl; +static struct fchs_s fc_bls_req_tmpl; +static struct fchs_s fc_bls_rsp_tmpl; +static struct fc_ba_acc_s ba_acc_tmpl; +static struct fc_logi_s plogi_tmpl; +static struct fc_prli_s prli_tmpl; +static struct fc_rrq_s rrq_tmpl; +static struct fchs_s fcp_fchs_tmpl; + +void +fcbuild_init(void) +{ + /* + * fc_els_req_tmpl + */ + fc_els_req_tmpl.routing = FC_RTG_EXT_LINK; + fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; + fc_els_req_tmpl.type = FC_TYPE_ELS; + fc_els_req_tmpl.f_ctl = + bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | + FCTL_SI_XFER); + fc_els_req_tmpl.rx_id = FC_RXID_ANY; + + /* + * fc_els_rsp_tmpl + */ + fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK; + fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; + fc_els_rsp_tmpl.type = FC_TYPE_ELS; + fc_els_rsp_tmpl.f_ctl = + bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | + FCTL_END_SEQ | FCTL_SI_XFER); + fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; + + /* + * fc_bls_req_tmpl + */ + fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; + fc_bls_req_tmpl.type = FC_TYPE_BLS; + fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); + fc_bls_req_tmpl.rx_id = FC_RXID_ANY; + + /* + * fc_bls_rsp_tmpl + */ + fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK; + fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; + fc_bls_rsp_tmpl.type = FC_TYPE_BLS; + fc_bls_rsp_tmpl.f_ctl = + bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | + FCTL_END_SEQ | FCTL_SI_XFER); + fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; + + /* + * ba_acc_tmpl + */ + ba_acc_tmpl.seq_id_valid = 0; + ba_acc_tmpl.low_seq_cnt = 0; + ba_acc_tmpl.high_seq_cnt = 0xFFFF; + + /* + * plogi_tmpl + */ + plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; + plogi_tmpl.csp.verlo = FC_PH_VER_4_3; + plogi_tmpl.csp.ciro = 0x1; + plogi_tmpl.csp.cisc = 0x0; + plogi_tmpl.csp.altbbcred = 0x0; + plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF); + plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002); + plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000); + + plogi_tmpl.class3.class_valid = 1; + plogi_tmpl.class3.sequential = 1; + plogi_tmpl.class3.conseq = 0xFF; + plogi_tmpl.class3.ospx = 1; + + /* + * prli_tmpl + */ + prli_tmpl.command = FC_ELS_PRLI; + prli_tmpl.pglen = 0x10; + prli_tmpl.pagebytes = cpu_to_be16(0x0014); + prli_tmpl.parampage.type = FC_TYPE_FCP; + prli_tmpl.parampage.imagepair = 1; + prli_tmpl.parampage.servparams.rxrdisab = 1; + + /* + * rrq_tmpl + */ + rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ; + + /* + * fcp_struct fchs_s mpl + */ + fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA; + fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; + fcp_fchs_tmpl.type = FC_TYPE_FCP; + fcp_fchs_tmpl.f_ctl = + bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); + fcp_fchs_tmpl.seq_id = 1; + fcp_fchs_tmpl.rx_id = FC_RXID_ANY; +} + +static void +fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) +{ + memset(fchs, 0, sizeof(struct fchs_s)); + + fchs->routing = FC_RTG_FC4_DEV_DATA; + fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; + fchs->type = FC_TYPE_SERVICES; + fchs->f_ctl = + bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | + FCTL_SI_XFER); + fchs->rx_id = FC_RXID_ANY; + fchs->d_id = (d_id); + fchs->s_id = (s_id); + fchs->ox_id = cpu_to_be16(ox_id); + + /* + * @todo no need to set ox_id for request + * no need to set rx_id for response + */ +} + +static void +fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) +{ + memset(fchs, 0, sizeof(struct fchs_s)); + + fchs->routing = FC_RTG_FC4_DEV_DATA; + fchs->cat_info = FC_CAT_SOLICIT_CTRL; + fchs->type = FC_TYPE_SERVICES; + fchs->f_ctl = + bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | + FCTL_END_SEQ | FCTL_SI_XFER); + fchs->d_id = d_id; + fchs->s_id = s_id; + fchs->ox_id = ox_id; +} + +void +fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) +{ + memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); + fchs->d_id = (d_id); + fchs->s_id = (s_id); + fchs->ox_id = cpu_to_be16(ox_id); +} + +static void +fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) +{ + memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); + fchs->d_id = d_id; + fchs->s_id = s_id; + fchs->ox_id = ox_id; +} + +static void +fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) +{ + memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); + fchs->d_id = d_id; + fchs->s_id = s_id; + fchs->ox_id = ox_id; +} + +static u16 +fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + __be16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u16 bb_cr, u8 els_code) +{ + struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); + + memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + + /* For FC AL bb_cr is 0 and altbbcred is 1 */ + if (!bb_cr) + plogi->csp.altbbcred = 1; + + plogi->els_cmd.els_code = els_code; + if (els_code == FC_ELS_PLOGI) + fc_els_req_build(fchs, d_id, s_id, ox_id); + else + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size); + plogi->csp.bbcred = cpu_to_be16(bb_cr); + + memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); + memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); + + return sizeof(struct fc_logi_s); +} + +u16 +fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, + u8 set_npiv, u8 set_auth, u16 local_bb_credits) +{ + u32 d_id = bfa_hton3b(FC_FABRIC_PORT); + __be32 *vvl_info; + + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + + flogi->els_cmd.els_code = FC_ELS_FLOGI; + fc_els_req_build(fchs, d_id, s_id, ox_id); + + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); + flogi->port_name = port_name; + flogi->node_name = node_name; + + /* + * Set the NPIV Capability Bit ( word 1, bit 31) of Common + * Service Parameters. + */ + flogi->csp.ciro = set_npiv; + + /* set AUTH capability */ + flogi->csp.security = set_auth; + + flogi->csp.bbcred = cpu_to_be16(local_bb_credits); + + /* Set brcd token in VVL */ + vvl_info = (u32 *)&flogi->vvl[0]; + + /* set the flag to indicate the presence of VVL */ + flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ + vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD); + + return sizeof(struct fc_logi_s); +} + +u16 +fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, + __be16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u16 local_bb_credits, u8 bb_scn) +{ + u32 d_id = 0; + u16 bbscn_rxsz = (bb_scn << 12) | pdu_size; + + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + flogi->els_cmd.els_code = FC_ELS_ACC; + flogi->class3.rxsz = cpu_to_be16(pdu_size); + flogi->csp.rxsz = cpu_to_be16(bbscn_rxsz); /* bb_scn/rxsz */ + flogi->port_name = port_name; + flogi->node_name = node_name; + + flogi->csp.bbcred = cpu_to_be16(local_bb_credits); + + return sizeof(struct fc_logi_s); +} + +u16 +fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) +{ + u32 d_id = bfa_hton3b(FC_FABRIC_PORT); + + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + + flogi->els_cmd.els_code = FC_ELS_FDISC; + fc_els_req_build(fchs, d_id, s_id, ox_id); + + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); + flogi->port_name = port_name; + flogi->node_name = node_name; + + return sizeof(struct fc_logi_s); +} + +u16 +fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u16 bb_cr) +{ + return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, + node_name, pdu_size, bb_cr, FC_ELS_PLOGI); +} + +u16 +fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u16 bb_cr) +{ + return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, + node_name, pdu_size, bb_cr, FC_ELS_ACC); +} + +enum fc_parse_status +fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) +{ + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + struct fc_logi_s *plogi; + struct fc_ls_rjt_s *ls_rjt; + + switch (els_cmd->els_code) { + case FC_ELS_LS_RJT: + ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); + if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) + return FC_PARSE_BUSY; + else + return FC_PARSE_FAILURE; + case FC_ELS_ACC: + plogi = (struct fc_logi_s *) (fchs + 1); + if (len < sizeof(struct fc_logi_s)) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(plogi->port_name, port_name)) + return FC_PARSE_FAILURE; + + if (!plogi->class3.class_valid) + return FC_PARSE_FAILURE; + + if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; + default: + return FC_PARSE_FAILURE; + } +} + +enum fc_parse_status +fc_plogi_parse(struct fchs_s *fchs) +{ + struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); + + if (plogi->class3.class_valid != 1) + return FC_PARSE_FAILURE; + + if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ) + || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ) + || (plogi->class3.rxsz == 0)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id) +{ + struct fc_prli_s *prli = (struct fc_prli_s *) (pld); + + fc_els_req_build(fchs, d_id, s_id, ox_id); + memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); + + prli->command = FC_ELS_PRLI; + prli->parampage.servparams.initiator = 1; + prli->parampage.servparams.retry = 1; + prli->parampage.servparams.rec_support = 1; + prli->parampage.servparams.task_retry_id = 0; + prli->parampage.servparams.confirm = 1; + + return sizeof(struct fc_prli_s); +} + +u16 +fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + __be16 ox_id, enum bfa_lport_role role) +{ + struct fc_prli_s *prli = (struct fc_prli_s *) (pld); + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); + + prli->command = FC_ELS_ACC; + + prli->parampage.servparams.initiator = 1; + + prli->parampage.rspcode = FC_PRLI_ACC_XQTD; + + return sizeof(struct fc_prli_s); +} + +enum fc_parse_status +fc_prli_rsp_parse(struct fc_prli_s *prli, int len) +{ + if (len < sizeof(struct fc_prli_s)) + return FC_PARSE_FAILURE; + + if (prli->command != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) + && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) + return FC_PARSE_FAILURE; + + if (prli->parampage.servparams.target != 1) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +enum fc_parse_status +fc_prli_parse(struct fc_prli_s *prli) +{ + if (prli->parampage.type != FC_TYPE_FCP) + return FC_PARSE_FAILURE; + + if (!prli->parampage.imagepair) + return FC_PARSE_FAILURE; + + if (!prli->parampage.servparams.initiator) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + memset(logo, '\0', sizeof(struct fc_logo_s)); + logo->els_cmd.els_code = FC_ELS_LOGO; + logo->nport_id = (s_id); + logo->orig_port_name = port_name; + + return sizeof(struct fc_logo_s); +} + +static u16 +fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, + u32 s_id, __be16 ox_id, wwn_t port_name, + wwn_t node_name, u8 els_code) +{ + memset(adisc, '\0', sizeof(struct fc_adisc_s)); + + adisc->els_cmd.els_code = els_code; + + if (els_code == FC_ELS_ADISC) + fc_els_req_build(fchs, d_id, s_id, ox_id); + else + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + adisc->orig_HA = 0; + adisc->orig_port_name = port_name; + adisc->orig_node_name = node_name; + adisc->nport_id = (s_id); + + return sizeof(struct fc_adisc_s); +} + +u16 +fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, + u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name) +{ + return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, + node_name, FC_ELS_ADISC); +} + +u16 +fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, + u32 s_id, __be16 ox_id, wwn_t port_name, + wwn_t node_name) +{ + return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, + node_name, FC_ELS_ACC); +} + +enum fc_parse_status +fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, + wwn_t node_name) +{ + + if (len < sizeof(struct fc_adisc_s)) + return FC_PARSE_FAILURE; + + if (adisc->els_cmd.els_code != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(adisc->orig_port_name, port_name)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +enum fc_parse_status +fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name, + wwn_t port_name) +{ + struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; + + if (adisc->els_cmd.els_code != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + if ((adisc->nport_id == (host_dap)) + && wwn_is_equal(adisc->orig_port_name, port_name) + && wwn_is_equal(adisc->orig_node_name, node_name)) + return FC_PARSE_OK; + + return FC_PARSE_FAILURE; +} + +enum fc_parse_status +fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) +{ + struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); + + if (pdisc->class3.class_valid != 1) + return FC_PARSE_FAILURE; + + if ((be16_to_cpu(pdisc->class3.rxsz) < + (FC_MIN_PDUSZ - sizeof(struct fchs_s))) + || (pdisc->class3.rxsz == 0)) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(pdisc->port_name, port_name)) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(pdisc->node_name, node_name)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) +{ + memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); + fchs->cat_info = FC_CAT_ABTS; + fchs->d_id = (d_id); + fchs->s_id = (s_id); + fchs->ox_id = cpu_to_be16(ox_id); + + return sizeof(struct fchs_s); +} + +enum fc_parse_status +fc_abts_rsp_parse(struct fchs_s *fchs, int len) +{ + if ((fchs->cat_info == FC_CAT_BA_ACC) + || (fchs->cat_info == FC_CAT_BA_RJT)) + return FC_PARSE_OK; + + return FC_PARSE_FAILURE; +} + +u16 +fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id, + u16 ox_id, u16 rrq_oxid) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + /* + * build rrq payload + */ + memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); + rrq->s_id = (s_id); + rrq->ox_id = cpu_to_be16(rrq_oxid); + rrq->rx_id = FC_RXID_ANY; + + return sizeof(struct fc_rrq_s); +} + +u16 +fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + __be16 ox_id) +{ + struct fc_els_cmd_s *acc = pld; + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + memset(acc, 0, sizeof(struct fc_els_cmd_s)); + acc->els_code = FC_ELS_ACC; + + return sizeof(struct fc_els_cmd_s); +} + +u16 +fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, + u32 s_id, __be16 ox_id, u8 reason_code, + u8 reason_code_expl) +{ + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); + + ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; + ls_rjt->reason_code = reason_code; + ls_rjt->reason_code_expl = reason_code_expl; + ls_rjt->vendor_unique = 0x00; + + return sizeof(struct fc_ls_rjt_s); +} + +u16 +fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, + u32 s_id, __be16 ox_id, u16 rx_id) +{ + fc_bls_rsp_build(fchs, d_id, s_id, ox_id); + + memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); + + fchs->rx_id = rx_id; + + ba_acc->ox_id = fchs->ox_id; + ba_acc->rx_id = fchs->rx_id; + + return sizeof(struct fc_ba_acc_s); +} + +u16 +fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, + u32 s_id, __be16 ox_id) +{ + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); + els_cmd->els_code = FC_ELS_ACC; + + return sizeof(struct fc_els_cmd_s); +} + +int +fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) +{ + int num_pages = 0; + struct fc_prlo_s *prlo; + struct fc_tprlo_s *tprlo; + + if (els_code == FC_ELS_PRLO) { + prlo = (struct fc_prlo_s *) (fc_frame + 1); + num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16; + } else { + tprlo = (struct fc_tprlo_s *) (fc_frame + 1); + num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; + } + return num_pages; +} + +u16 +fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, + u32 d_id, u32 s_id, __be16 ox_id, int num_pages) +{ + int page; + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + memset(tprlo_acc, 0, (num_pages * 16) + 4); + tprlo_acc->command = FC_ELS_ACC; + + tprlo_acc->page_len = 0x10; + tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + tprlo_acc->tprlo_acc_params[page].opa_valid = 0; + tprlo_acc->tprlo_acc_params[page].rpa_valid = 0; + tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; + tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; + tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; + } + return be16_to_cpu(tprlo_acc->payload_len); +} + +u16 +fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, + u32 s_id, __be16 ox_id, int num_pages) +{ + int page; + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + memset(prlo_acc, 0, (num_pages * 16) + 4); + prlo_acc->command = FC_ELS_ACC; + prlo_acc->page_len = 0x10; + prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + prlo_acc->prlo_acc_params[page].opa_valid = 0; + prlo_acc->prlo_acc_params[page].rpa_valid = 0; + prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; + prlo_acc->prlo_acc_params[page].orig_process_assc = 0; + prlo_acc->prlo_acc_params[page].resp_process_assc = 0; + } + + return be16_to_cpu(prlo_acc->payload_len); +} + +u16 +fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, + u32 s_id, u16 ox_id, u32 data_format) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); + + rnid->els_cmd.els_code = FC_ELS_RNID; + rnid->node_id_data_format = data_format; + + return sizeof(struct fc_rnid_cmd_s); +} + +u16 +fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, + u32 s_id, __be16 ox_id, u32 data_format, + struct fc_rnid_common_id_data_s *common_id_data, + struct fc_rnid_general_topology_data_s *gen_topo_data) +{ + memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + rnid_acc->els_cmd.els_code = FC_ELS_ACC; + rnid_acc->node_id_data_format = data_format; + rnid_acc->common_id_data_length = + sizeof(struct fc_rnid_common_id_data_s); + rnid_acc->common_id_data = *common_id_data; + + if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { + rnid_acc->specific_id_data_length = + sizeof(struct fc_rnid_general_topology_data_s); + rnid_acc->gen_topology_data = *gen_topo_data; + return sizeof(struct fc_rnid_acc_s); + } else { + return sizeof(struct fc_rnid_acc_s) - + sizeof(struct fc_rnid_general_topology_data_s); + } + +} + +u16 +fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, + u32 s_id, u16 ox_id) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); + + rpsc->els_cmd.els_code = FC_ELS_RPSC; + return sizeof(struct fc_rpsc_cmd_s); +} + +u16 +fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, + u32 s_id, u32 *pid_list, u16 npids) +{ + u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id)); + int i = 0; + + fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0); + + memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); + + rpsc2->els_cmd.els_code = FC_ELS_RPSC; + rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN); + rpsc2->num_pids = cpu_to_be16(npids); + for (i = 0; i < npids; i++) + rpsc2->pid_list[i].pid = pid_list[i]; + + return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32))); +} + +u16 +fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, + u32 d_id, u32 s_id, __be16 ox_id, + struct fc_rpsc_speed_info_s *oper_speed) +{ + memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + rpsc_acc->command = FC_ELS_ACC; + rpsc_acc->num_entries = cpu_to_be16(1); + + rpsc_acc->speed_info[0].port_speed_cap = + cpu_to_be16(oper_speed->port_speed_cap); + + rpsc_acc->speed_info[0].port_op_speed = + cpu_to_be16(oper_speed->port_op_speed); + + return sizeof(struct fc_rpsc_acc_s); +} + +u16 +fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, + wwn_t port_name, wwn_t node_name, u16 pdu_size) +{ + struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); + + memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); + + pdisc->els_cmd.els_code = FC_ELS_PDISC; + fc_els_req_build(fchs, d_id, s_id, ox_id); + + pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size); + pdisc->port_name = port_name; + pdisc->node_name = node_name; + + return sizeof(struct fc_logi_s); +} + +u16 +fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) +{ + struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); + + if (len < sizeof(struct fc_logi_s)) + return FC_PARSE_LEN_INVAL; + + if (pdisc->els_cmd.els_code != FC_ELS_ACC) + return FC_PARSE_ACC_INVAL; + + if (!wwn_is_equal(pdisc->port_name, port_name)) + return FC_PARSE_PWWN_NOT_EQUAL; + + if (!pdisc->class3.class_valid) + return FC_PARSE_NWWN_NOT_EQUAL; + + if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) + return FC_PARSE_RXSZ_INVAL; + + return FC_PARSE_OK; +} + +u16 +fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, + int num_pages) +{ + struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); + int page; + + fc_els_req_build(fchs, d_id, s_id, ox_id); + memset(prlo, 0, (num_pages * 16) + 4); + prlo->command = FC_ELS_PRLO; + prlo->page_len = 0x10; + prlo->payload_len = cpu_to_be16((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + prlo->prlo_params[page].type = FC_TYPE_FCP; + prlo->prlo_params[page].opa_valid = 0; + prlo->prlo_params[page].rpa_valid = 0; + prlo->prlo_params[page].orig_process_assc = 0; + prlo->prlo_params[page].resp_process_assc = 0; + } + + return be16_to_cpu(prlo->payload_len); +} + +u16 +fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, + int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) +{ + struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); + int page; + + fc_els_req_build(fchs, d_id, s_id, ox_id); + memset(tprlo, 0, (num_pages * 16) + 4); + tprlo->command = FC_ELS_TPRLO; + tprlo->page_len = 0x10; + tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + tprlo->tprlo_params[page].type = FC_TYPE_FCP; + tprlo->tprlo_params[page].opa_valid = 0; + tprlo->tprlo_params[page].rpa_valid = 0; + tprlo->tprlo_params[page].orig_process_assc = 0; + tprlo->tprlo_params[page].resp_process_assc = 0; + if (tprlo_type == FC_GLOBAL_LOGO) { + tprlo->tprlo_params[page].global_process_logout = 1; + } else if (tprlo_type == FC_TPR_LOGO) { + tprlo->tprlo_params[page].tpo_nport_valid = 1; + tprlo->tprlo_params[page].tpo_nport_id = (tpr_id); + } + } + + return be16_to_cpu(tprlo->payload_len); +} + +u16 +fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id, + u32 reason_code, u32 reason_expl) +{ + struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); + + fc_bls_rsp_build(fchs, d_id, s_id, ox_id); + + fchs->cat_info = FC_CAT_BA_RJT; + ba_rjt->reason_code = reason_code; + ba_rjt->reason_expl = reason_expl; + return sizeof(struct fc_ba_rjt_s); +} + +static void +fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) +{ + memset(cthdr, 0, sizeof(struct ct_hdr_s)); + cthdr->rev_id = CT_GS3_REVISION; + cthdr->gs_type = CT_GSTYPE_DIRSERVICE; + cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); +} + +static void +fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) +{ + memset(cthdr, 0, sizeof(struct ct_hdr_s)); + cthdr->rev_id = CT_GS3_REVISION; + cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; + cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); +} + +static void +fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, + u8 sub_type) +{ + memset(cthdr, 0, sizeof(struct ct_hdr_s)); + cthdr->rev_id = CT_GS3_REVISION; + cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; + cthdr->gs_sub_type = sub_type; + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); +} + +u16 +fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + wwn_t port_name) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); + + memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); + gidpn->port_name = port_name; + return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u32 port_id) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); + + memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); + gpnid->dap = port_id; + return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); +} + +u16 +fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u32 port_id) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); + + memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); + gnnid->dap = port_id; + return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); +} + +u16 +fc_ct_rsp_parse(struct ct_hdr_s *cthdr) +{ + if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { + if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) + return FC_PARSE_BUSY; + else + return FC_PARSE_FAILURE; + } + + return FC_PARSE_OK; +} + +u16 +fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr, + u32 d_id, u32 s_id, u16 ox_id, u8 reason_code, + u8 reason_code_expl) +{ + fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id); + + cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT); + cthdr->rev_id = CT_GS3_REVISION; + + cthdr->reason_code = reason_code; + cthdr->exp_code = reason_code_expl; + return sizeof(struct ct_hdr_s); +} + +u16 +fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, + u8 set_br_reg, u32 s_id, u16 ox_id) +{ + u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); + + fc_els_req_build(fchs, d_id, s_id, ox_id); + + memset(scr, 0, sizeof(struct fc_scr_s)); + scr->command = FC_ELS_SCR; + scr->reg_func = FC_SCR_REG_FUNC_FULL; + if (set_br_reg) + scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; + + return sizeof(struct fc_scr_s); +} + +u16 +fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, + u32 s_id, u16 ox_id) +{ + u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); + u16 payldlen; + + fc_els_req_build(fchs, d_id, s_id, ox_id); + rscn->command = FC_ELS_RSCN; + rscn->pagelen = sizeof(rscn->event[0]); + + payldlen = sizeof(u32) + rscn->pagelen; + rscn->payldlen = cpu_to_be16(payldlen); + + rscn->event[0].format = FC_RSCN_FORMAT_PORTID; + rscn->event[0].portid = s_id; + + return struct_size(rscn, event, 1); +} + +u16 +fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + enum bfa_lport_role roles) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); + u32 type_value, d_id = bfa_hton3b(FC_NAME_SERVER); + u8 index; + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); + + memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); + + rftid->dap = s_id; + + /* By default, FCP FC4 Type is registered */ + index = FC_TYPE_FCP >> 5; + type_value = 1 << (FC_TYPE_FCP % 32); + rftid->fc4_type[index] = cpu_to_be32(type_value); + + return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u8 *fc4_bitmap, u32 bitmap_size) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); + + memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); + + rftid->dap = s_id; + memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, + (bitmap_size < 32 ? bitmap_size : 32)); + + return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u8 fc4_type, u8 fc4_ftrs) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); + + memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); + + rffid->dap = s_id; + rffid->fc4ftr_bits = fc4_ftrs; + rffid->fc4_type = fc4_type; + + return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u8 *name) +{ + + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rspnid_req_s *rspnid = + (struct fcgs_rspnid_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); + + memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); + + rspnid->dap = s_id; + strscpy(rspnid->spn, name, sizeof(rspnid->spn)); + rspnid->spn_len = (u8) strlen(rspnid->spn); + + return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id, + wwn_t node_name, u8 *name) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rsnn_nn_req_s *rsnn_nn = + (struct fcgs_rsnn_nn_req_s *) (cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN); + + memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s)); + + rsnn_nn->node_name = node_name; + strscpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn)); + rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn); + + return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) +{ + + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + + fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); + + memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); + gidft->fc4_type = fc4_type; + gidft->domain_id = 0; + gidft->area_id = 0; + + return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + wwn_t port_name) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); + + memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); + rpnid->port_id = port_id; + rpnid->port_name = port_name; + + return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + wwn_t node_name) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); + + memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); + rnnid->port_id = port_id; + rnnid->node_name = node_name; + + return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + u32 cos) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rcsid_req_s *rcsid = + (struct fcgs_rcsid_req_s *) (cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); + + memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); + rcsid->port_id = port_id; + rcsid->cos = cos; + + return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + u8 port_type) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); + + memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); + rptid->port_id = port_id; + rptid->port_type = port_type; + + return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); + u32 d_id = bfa_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); + + memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); + ganxt->port_id = port_id; + + return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); +} + +/* + * Builds fc hdr and ct hdr for FDMI requests. + */ +u16 +fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 cmd_code) +{ + + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + u32 d_id = bfa_hton3b(FC_MGMT_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); + + return sizeof(struct ct_hdr_s); +} + +/* + * Given a FC4 Type, this function returns a fc4 type bitmask + */ +void +fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) +{ + u8 index; + __be32 *ptr = (__be32 *) bit_mask; + u32 type_value; + + /* + * @todo : Check for bitmask size + */ + + index = fc4_type >> 5; + type_value = 1 << (fc4_type % 32); + ptr[index] = cpu_to_be32(type_value); + +} + +/* + * GMAL Request + */ +u16 +fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); + u32 d_id = bfa_hton3b(FC_MGMT_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, + CT_GSSUBTYPE_CFGSERVER); + + memset(gmal, 0, sizeof(fcgs_gmal_req_t)); + gmal->wwn = wwn; + + return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); +} + +/* + * GFN (Get Fabric Name) Request + */ +u16 +fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); + u32 d_id = bfa_hton3b(FC_MGMT_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, + CT_GSSUBTYPE_CFGSERVER); + + memset(gfn, 0, sizeof(fcgs_gfn_req_t)); + gfn->wwn = wwn; + + return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); +} diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h new file mode 100644 index 000000000..49e0ee4a7 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcbuild.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ +/* + * fcbuild.h - FC link service frame building and parsing routines + */ + +#ifndef __FCBUILD_H__ +#define __FCBUILD_H__ + +#include "bfad_drv.h" +#include "bfa_fc.h" +#include "bfa_defs_fcs.h" + +/* + * Utility Macros/functions + */ + +#define wwn_is_equal(_wwn1, _wwn2) \ + (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0) + +#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) + +/* + * Given the fc response length, this routine will return + * the length of the actual payload bytes following the CT header. + * + * Assumes the input response length does not include the crc, eof, etc. + */ +static inline u32 +fc_get_ctresp_pyld_len(u32 resp_len) +{ + return resp_len - sizeof(struct ct_hdr_s); +} + +/* + * Convert bfa speed to rpsc speed value. + */ +static inline enum bfa_port_speed +fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed) +{ + switch (speed) { + + case RPSC_OP_SPEED_1G: + return BFA_PORT_SPEED_1GBPS; + + case RPSC_OP_SPEED_2G: + return BFA_PORT_SPEED_2GBPS; + + case RPSC_OP_SPEED_4G: + return BFA_PORT_SPEED_4GBPS; + + case RPSC_OP_SPEED_8G: + return BFA_PORT_SPEED_8GBPS; + + case RPSC_OP_SPEED_16G: + return BFA_PORT_SPEED_16GBPS; + + case RPSC_OP_SPEED_10G: + return BFA_PORT_SPEED_10GBPS; + + default: + return BFA_PORT_SPEED_UNKNOWN; + } +} + +/* + * Convert RPSC speed to bfa speed value. + */ +static inline enum fc_rpsc_op_speed +fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed) +{ + switch (op_speed) { + + case BFA_PORT_SPEED_1GBPS: + return RPSC_OP_SPEED_1G; + + case BFA_PORT_SPEED_2GBPS: + return RPSC_OP_SPEED_2G; + + case BFA_PORT_SPEED_4GBPS: + return RPSC_OP_SPEED_4G; + + case BFA_PORT_SPEED_8GBPS: + return RPSC_OP_SPEED_8G; + + case BFA_PORT_SPEED_16GBPS: + return RPSC_OP_SPEED_16G; + + case BFA_PORT_SPEED_10GBPS: + return RPSC_OP_SPEED_10G; + + default: + return RPSC_OP_SPEED_NOT_EST; + } +} + +enum fc_parse_status { + FC_PARSE_OK = 0, + FC_PARSE_FAILURE = 1, + FC_PARSE_BUSY = 2, + FC_PARSE_LEN_INVAL, + FC_PARSE_ACC_INVAL, + FC_PARSE_PWWN_NOT_EQUAL, + FC_PARSE_NWWN_NOT_EQUAL, + FC_PARSE_RXSZ_INVAL, + FC_PARSE_NOT_FCP, + FC_PARSE_OPAFLAG_INVAL, + FC_PARSE_RPAFLAG_INVAL, + FC_PARSE_OPA_INVAL, + FC_PARSE_RPA_INVAL, + +}; + +struct fc_templates_s { + struct fchs_s fc_els_req; + struct fchs_s fc_bls_req; + struct fc_logi_s plogi; + struct fc_rrq_s rrq; +}; + +void fcbuild_init(void); + +u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, + u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u8 set_npiv, u8 set_auth, + u16 local_bb_credits); + +u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size); + +u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, + u32 s_id, __be16 ox_id, + wwn_t port_name, wwn_t node_name, + u16 pdu_size, + u16 local_bb_credits, u8 bb_scn); + +u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id, wwn_t port_name, + wwn_t node_name, u16 pdu_size, u16 bb_cr); + +enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs); + +u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id, + u16 ox_id); + +enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len); + +u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id, + u32 s_id, u16 ox_id, u16 rrq_oxid); + +u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id, + u16 ox_id, u8 *name); +u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id, + wwn_t node_name, u8 *name); + +u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id, + u16 ox_id, enum bfa_lport_role role); + +u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 ox_id, u8 *fc4_bitmap, + u32 bitmap_size); + +u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 ox_id, u8 fc4_type, u8 fc4_ftrs); + +u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 ox_id, wwn_t port_name); + +u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id, + u16 ox_id, u32 port_id); + +u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr, + u32 d_id, u32 s_id, u16 ox_id, + u8 reason_code, u8 reason_code_expl); + +u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, + u8 set_br_reg, u32 s_id, u16 ox_id); + +u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id, + wwn_t port_name, wwn_t node_name, + u16 pdu_size, u16 bb_cr); + +u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, + u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, + wwn_t node_name); + +enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, + u32 host_dap, wwn_t node_name, wwn_t port_name); + +enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, + wwn_t port_name, wwn_t node_name); + +u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, + u32 d_id, u32 s_id, __be16 ox_id, + wwn_t port_name, wwn_t node_name); +u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, + u32 d_id, u32 s_id, __be16 ox_id, + u8 reason_code, u8 reason_code_expl); +u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, + u32 d_id, u32 s_id, __be16 ox_id); +u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id); + +enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len); + +u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, __be16 ox_id, + enum bfa_lport_role role); + +u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, + u32 d_id, u32 s_id, u16 ox_id, + u32 data_format); + +u16 fc_rnid_acc_build(struct fchs_s *fchs, + struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, + __be16 ox_id, u32 data_format, + struct fc_rnid_common_id_data_s *common_id_data, + struct fc_rnid_general_topology_data_s *gen_topo_data); + +u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c, + u32 d_id, u32 s_id, u32 *pid_list, u16 npids); +u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, + u32 d_id, u32 s_id, u16 ox_id); +u16 fc_rpsc_acc_build(struct fchs_s *fchs, + struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, + __be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed); +u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id, + u8 fc4_type); + +u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, wwn_t port_name); + +u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, wwn_t node_name); + +u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, u32 cos); + +u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, u8 port_type); + +u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id); + +u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, + u32 s_id, u16 ox_id, wwn_t port_name); + +u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, __be16 ox_id); + +u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 cmd_code); +u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn); +u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn); + +void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask); + +void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + __be16 ox_id); + +enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, + wwn_t port_name); + +enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli); + +enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, + wwn_t port_name); + +u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, + u32 s_id, __be16 ox_id, u16 rx_id); + +int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code); + +u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, + u32 d_id, u32 s_id, __be16 ox_id, int num_pages); + +u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, + u32 d_id, u32 s_id, __be16 ox_id, int num_pages); + +u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size); + +u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name); + +u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id, int num_pages); + +u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, + u32 tpr_id); + +u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + __be16 ox_id, u32 reason_code, u32 reason_expl); + +u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u32 port_id); + +u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr); + +u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, + u16 ox_id); +#endif diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c new file mode 100644 index 000000000..7ad222880 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -0,0 +1,3897 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfa_modules.h" + +BFA_TRC_FILE(HAL, FCPIM); + +/* + * BFA ITNIM Related definitions + */ +static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); + +#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ + (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) + +#define bfa_fcpim_additn(__itnim) \ + list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) +#define bfa_fcpim_delitn(__itnim) do { \ + WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ + bfa_itnim_update_del_itn_stats(__itnim); \ + list_del(&(__itnim)->qe); \ + WARN_ON(!list_empty(&(__itnim)->io_q)); \ + WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \ + WARN_ON(!list_empty(&(__itnim)->pending_q)); \ +} while (0) + +#define bfa_itnim_online_cb(__itnim) do { \ + if ((__itnim)->bfa->fcs) \ + bfa_cb_itnim_online((__itnim)->ditn); \ + else { \ + bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ + __bfa_cb_itnim_online, (__itnim)); \ + } \ +} while (0) + +#define bfa_itnim_offline_cb(__itnim) do { \ + if ((__itnim)->bfa->fcs) \ + bfa_cb_itnim_offline((__itnim)->ditn); \ + else { \ + bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ + __bfa_cb_itnim_offline, (__itnim)); \ + } \ +} while (0) + +#define bfa_itnim_sler_cb(__itnim) do { \ + if ((__itnim)->bfa->fcs) \ + bfa_cb_itnim_sler((__itnim)->ditn); \ + else { \ + bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ + __bfa_cb_itnim_sler, (__itnim)); \ + } \ +} while (0) + +enum bfa_ioim_lm_ua_status { + BFA_IOIM_LM_UA_RESET = 0, + BFA_IOIM_LM_UA_SET = 1, +}; + +/* + * itnim state machine event + */ +enum bfa_itnim_event { + BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ + BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ + BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */ + BFA_ITNIM_SM_FWRSP = 4, /* firmware response */ + BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */ + BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */ + BFA_ITNIM_SM_SLER = 7, /* second level error recovery */ + BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */ + BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ +}; + +/* + * BFA IOIM related definitions + */ +#define bfa_ioim_move_to_comp_q(__ioim) do { \ + list_del(&(__ioim)->qe); \ + list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \ +} while (0) + + +#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \ + if ((__fcpim)->profile_comp) \ + (__fcpim)->profile_comp(__ioim); \ +} while (0) + +#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \ + if ((__fcpim)->profile_start) \ + (__fcpim)->profile_start(__ioim); \ +} while (0) + +/* + * IO state machine events + */ +enum bfa_ioim_event { + BFA_IOIM_SM_START = 1, /* io start request from host */ + BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */ + BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */ + BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */ + BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */ + BFA_IOIM_SM_FREE = 6, /* io resource is freed */ + BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */ + BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */ + BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */ + BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */ + BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */ + BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */ + BFA_IOIM_SM_HCB = 13, /* bfa callback complete */ + BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */ + BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */ + BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ + BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ + BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ +}; + + +/* + * BFA TSKIM related definitions + */ + +/* + * task management completion handling + */ +#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ + bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\ + bfa_tskim_notify_comp(__tskim); \ +} while (0) + +#define bfa_tskim_notify_comp(__tskim) do { \ + if ((__tskim)->notify) \ + bfa_itnim_tskdone((__tskim)->itnim); \ +} while (0) + + +enum bfa_tskim_event { + BFA_TSKIM_SM_START = 1, /* TM command start */ + BFA_TSKIM_SM_DONE = 2, /* TM completion */ + BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */ + BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */ + BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */ + BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ + BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ + BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ + BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */ +}; + +/* + * forward declaration for BFA ITNIM functions + */ +static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); +static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim); +static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim); +static void bfa_itnim_cleanp_comp(void *itnim_cbarg); +static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim); +static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete); +static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov(void *itnim_arg); +static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); + +/* + * forward declaration of ITNIM state machine + */ +static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); + +/* + * forward declaration for BFA IOIM functions + */ +static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); +static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim); +static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); +static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); +static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); +static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); + +/* + * forward declaration of BFA IO state machine + */ +static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +/* + * forward declaration for BFA TSKIM functions + */ +static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); +static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, + struct scsi_lun lun); +static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); +static void bfa_tskim_cleanp_comp(void *tskim_cbarg); +static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); +static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim); +static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); +static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); + +/* + * forward declaration of BFA TSKIM state machine + */ +static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +/* + * BFA FCP Initiator Mode module + */ + +/* + * Compute and return memory needed by FCP(im) module. + */ +static void +bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) +{ + bfa_itnim_meminfo(cfg, km_len); + + /* + * IO memory + */ + *km_len += cfg->fwcfg.num_ioim_reqs * + (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s)); + + /* + * task management command memory + */ + if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) + cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; + *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s); +} + + +static void +bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad, + struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcpim_s *fcpim = &fcp->fcpim; + struct bfa_s *bfa = fcp->bfa; + + bfa_trc(bfa, cfg->drvcfg.path_tov); + bfa_trc(bfa, cfg->fwcfg.num_rports); + bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); + bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); + + fcpim->fcp = fcp; + fcpim->bfa = bfa; + fcpim->num_itnims = cfg->fwcfg.num_rports; + fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; + fcpim->path_tov = cfg->drvcfg.path_tov; + fcpim->delay_comp = cfg->drvcfg.delay_comp; + fcpim->profile_comp = NULL; + fcpim->profile_start = NULL; + + bfa_itnim_attach(fcpim); + bfa_tskim_attach(fcpim); + bfa_ioim_attach(fcpim); +} + +void +bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp) +{ + struct bfa_fcpim_s *fcpim = &fcp->fcpim; + struct bfa_itnim_s *itnim; + struct list_head *qe, *qen; + + /* Enqueue unused ioim resources to free_q */ + list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q); + + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_itnim_iocdisable(itnim); + } +} + +void +bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + + fcpim->path_tov = path_tov * 1000; + if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX) + fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX; +} + +u16 +bfa_fcpim_path_tov_get(struct bfa_s *bfa) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + + return fcpim->path_tov / 1000; +} + +#define bfa_fcpim_add_iostats(__l, __r, __stats) \ + (__l->__stats += __r->__stats) + +void +bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats, + struct bfa_itnim_iostats_s *rstats) +{ + bfa_fcpim_add_iostats(lstats, rstats, total_ios); + bfa_fcpim_add_iostats(lstats, rstats, qresumes); + bfa_fcpim_add_iostats(lstats, rstats, no_iotags); + bfa_fcpim_add_iostats(lstats, rstats, io_aborts); + bfa_fcpim_add_iostats(lstats, rstats, no_tskims); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout); + bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort); + bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err); + bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err); + bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed); + bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free); + bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts); + bfa_fcpim_add_iostats(lstats, rstats, iocom_utags); + bfa_fcpim_add_iostats(lstats, rstats, io_cleanups); + bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts); + bfa_fcpim_add_iostats(lstats, rstats, onlines); + bfa_fcpim_add_iostats(lstats, rstats, offlines); + bfa_fcpim_add_iostats(lstats, rstats, creates); + bfa_fcpim_add_iostats(lstats, rstats, deletes); + bfa_fcpim_add_iostats(lstats, rstats, create_comps); + bfa_fcpim_add_iostats(lstats, rstats, delete_comps); + bfa_fcpim_add_iostats(lstats, rstats, sler_events); + bfa_fcpim_add_iostats(lstats, rstats, fw_create); + bfa_fcpim_add_iostats(lstats, rstats, fw_delete); + bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled); + bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps); + bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds); + bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps); + bfa_fcpim_add_iostats(lstats, rstats, tm_success); + bfa_fcpim_add_iostats(lstats, rstats, tm_failures); + bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps); + bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes); + bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns); + bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups); + bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps); + bfa_fcpim_add_iostats(lstats, rstats, io_comps); + bfa_fcpim_add_iostats(lstats, rstats, input_reqs); + bfa_fcpim_add_iostats(lstats, rstats, output_reqs); + bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); + bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); +} + +bfa_status_t +bfa_fcpim_port_iostats(struct bfa_s *bfa, + struct bfa_itnim_iostats_s *stats, u8 lp_tag) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + + /* accumulate IO stats from itnim */ + memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + if (itnim->rport->rport_info.lp_tag != lp_tag) + continue; + bfa_fcpim_add_stats(stats, &(itnim->stats)); + } + return BFA_STATUS_OK; +} + +static void +bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) +{ + struct bfa_itnim_latency_s *io_lat = + &(ioim->itnim->ioprofile.io_latency); + u32 val, idx; + + val = (u32)(jiffies - ioim->start_time); + idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio)); + bfa_itnim_ioprofile_update(ioim->itnim, idx); + + io_lat->count[idx]++; + io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val; + io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val; + io_lat->avg[idx] += val; +} + +static void +bfa_ioim_profile_start(struct bfa_ioim_s *ioim) +{ + ioim->start_time = jiffies; +} + +bfa_status_t +bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time) +{ + struct bfa_itnim_s *itnim; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct list_head *qe, *qen; + + /* accumulate IO stats from itnim */ + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_itnim_clear_stats(itnim); + } + fcpim->io_profile = BFA_TRUE; + fcpim->io_profile_start_time = time; + fcpim->profile_comp = bfa_ioim_profile_comp; + fcpim->profile_start = bfa_ioim_profile_start; + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcpim_profile_off(struct bfa_s *bfa) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + fcpim->io_profile = BFA_FALSE; + fcpim->io_profile_start_time = 0; + fcpim->profile_comp = NULL; + fcpim->profile_start = NULL; + return BFA_STATUS_OK; +} + +u16 +bfa_fcpim_qdepth_get(struct bfa_s *bfa) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + + return fcpim->q_depth; +} + +/* + * BFA ITNIM module state machine functions + */ + +/* + * Beginning/unallocated state - no events expected. + */ +static void +bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_CREATE: + bfa_sm_set_state(itnim, bfa_itnim_sm_created); + itnim->is_online = BFA_FALSE; + bfa_fcpim_additn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Beginning state, only online event expected. + */ +static void +bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_ONLINE: + if (bfa_itnim_send_fwcreate(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Waiting for itnim create response from firmware. + */ +static void +bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + bfa_sm_set_state(itnim, bfa_itnim_sm_online); + itnim->is_online = BFA_TRUE; + bfa_itnim_iotov_online(itnim); + bfa_itnim_online_cb(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending); + break; + + case BFA_ITNIM_SM_OFFLINE: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +static void +bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_QRESUME: + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + bfa_itnim_send_fwcreate(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_itnim_sm_offline); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_reqq_wcancel(&itnim->reqq_wait); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Waiting for itnim create response from firmware, a delete is pending. + */ +static void +bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_fcpim_delitn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Online state - normal parking state. + */ +static void +bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); + itnim->is_online = BFA_FALSE; + bfa_itnim_iotov_start(itnim); + bfa_itnim_cleanup(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); + itnim->is_online = BFA_FALSE; + bfa_itnim_cleanup(itnim); + break; + + case BFA_ITNIM_SM_SLER: + bfa_sm_set_state(itnim, bfa_itnim_sm_sler); + itnim->is_online = BFA_FALSE; + bfa_itnim_iotov_start(itnim); + bfa_itnim_sler_cb(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + itnim->is_online = BFA_FALSE; + bfa_itnim_iotov_start(itnim); + bfa_itnim_iocdisable_cleanup(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Second level error recovery need. + */ +static void +bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); + bfa_itnim_cleanup(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); + bfa_itnim_cleanup(itnim); + bfa_itnim_iotov_delete(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_iocdisable_cleanup(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Going offline. Waiting for active IO cleanup. + */ +static void +bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_CLEANUP: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); + bfa_itnim_iotov_delete(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_iocdisable_cleanup(itnim); + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_SLER: + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Deleting itnim. Waiting for active IO cleanup. + */ +static void +bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_CLEANUP: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_iocdisable_cleanup(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. + */ +static void +bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + bfa_sm_set_state(itnim, bfa_itnim_sm_offline); + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_offline_cb(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +static void +bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_QRESUME: + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); + bfa_itnim_send_fwdelete(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_itnim_offline_cb(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Offline state. + */ +static void +bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_itnim_iotov_delete(itnim); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_ONLINE: + if (bfa_itnim_send_fwcreate(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +static void +bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_itnim_iotov_delete(itnim); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_OFFLINE: + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_ONLINE: + if (bfa_itnim_send_fwcreate(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Itnim is deleted, waiting for firmware response to delete. + */ +static void +bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_fcpim_delitn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +static void +bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_QRESUME: + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + bfa_itnim_send_fwdelete(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_fcpim_delitn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/* + * Initiate cleanup of all IOs on an IOC failure. + */ +static void +bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) +{ + struct bfa_tskim_s *tskim; + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &itnim->tsk_q) { + tskim = (struct bfa_tskim_s *) qe; + bfa_tskim_iocdisable(tskim); + } + + list_for_each_safe(qe, qen, &itnim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_iocdisable(ioim); + } + + /* + * For IO request in pending queue, we pretend an early timeout. + */ + list_for_each_safe(qe, qen, &itnim->pending_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_tov(ioim); + } + + list_for_each_safe(qe, qen, &itnim->io_cleanup_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_iocdisable(ioim); + } +} + +/* + * IO cleanup completion + */ +static void +bfa_itnim_cleanp_comp(void *itnim_cbarg) +{ + struct bfa_itnim_s *itnim = itnim_cbarg; + + bfa_stats(itnim, cleanup_comps); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); +} + +/* + * Initiate cleanup of all IOs. + */ +static void +bfa_itnim_cleanup(struct bfa_itnim_s *itnim) +{ + struct bfa_ioim_s *ioim; + struct bfa_tskim_s *tskim; + struct list_head *qe, *qen; + + bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim); + + list_for_each_safe(qe, qen, &itnim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + + /* + * Move IO to a cleanup queue from active queue so that a later + * TM will not pickup this IO. + */ + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &itnim->io_cleanup_q); + + bfa_wc_up(&itnim->wc); + bfa_ioim_cleanup(ioim); + } + + list_for_each_safe(qe, qen, &itnim->tsk_q) { + tskim = (struct bfa_tskim_s *) qe; + bfa_wc_up(&itnim->wc); + bfa_tskim_cleanup(tskim); + } + + bfa_wc_wait(&itnim->wc); +} + +static void +__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_itnim_s *itnim = cbarg; + + if (complete) + bfa_cb_itnim_online(itnim->ditn); +} + +static void +__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_itnim_s *itnim = cbarg; + + if (complete) + bfa_cb_itnim_offline(itnim->ditn); +} + +static void +__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_itnim_s *itnim = cbarg; + + if (complete) + bfa_cb_itnim_sler(itnim->ditn); +} + +/* + * Call to resume any I/O requests waiting for room in request queue. + */ +static void +bfa_itnim_qresume(void *cbarg) +{ + struct bfa_itnim_s *itnim = cbarg; + + bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); +} + +/* + * bfa_itnim_public + */ + +void +bfa_itnim_iodone(struct bfa_itnim_s *itnim) +{ + bfa_wc_down(&itnim->wc); +} + +void +bfa_itnim_tskdone(struct bfa_itnim_s *itnim) +{ + bfa_wc_down(&itnim->wc); +} + +void +bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) +{ + /* + * ITN memory + */ + *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); +} + +void +bfa_itnim_attach(struct bfa_fcpim_s *fcpim) +{ + struct bfa_s *bfa = fcpim->bfa; + struct bfa_fcp_mod_s *fcp = fcpim->fcp; + struct bfa_itnim_s *itnim; + int i, j; + + INIT_LIST_HEAD(&fcpim->itnim_q); + + itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp); + fcpim->itnim_arr = itnim; + + for (i = 0; i < fcpim->num_itnims; i++, itnim++) { + memset(itnim, 0, sizeof(struct bfa_itnim_s)); + itnim->bfa = bfa; + itnim->fcpim = fcpim; + itnim->reqq = BFA_REQQ_QOS_LO; + itnim->rport = BFA_RPORT_FROM_TAG(bfa, i); + itnim->iotov_active = BFA_FALSE; + bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim); + + INIT_LIST_HEAD(&itnim->io_q); + INIT_LIST_HEAD(&itnim->io_cleanup_q); + INIT_LIST_HEAD(&itnim->pending_q); + INIT_LIST_HEAD(&itnim->tsk_q); + INIT_LIST_HEAD(&itnim->delay_comp_q); + for (j = 0; j < BFA_IOBUCKET_MAX; j++) + itnim->ioprofile.io_latency.min[j] = ~0; + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + } + + bfa_mem_kva_curp(fcp) = (u8 *) itnim; +} + +void +bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) +{ + bfa_stats(itnim, ioc_disabled); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL); +} + +static bfa_boolean_t +bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) +{ + struct bfi_itn_create_req_s *m; + + itnim->msg_no++; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(itnim->bfa, itnim->reqq); + if (!m) { + bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ, + bfa_fn_lpu(itnim->bfa)); + m->fw_handle = itnim->rport->fw_handle; + m->class = FC_CLASS_3; + m->seq_rec = itnim->seq_rec; + m->msg_no = itnim->msg_no; + bfa_stats(itnim, fw_create); + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) +{ + struct bfi_itn_delete_req_s *m; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(itnim->bfa, itnim->reqq); + if (!m) { + bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ, + bfa_fn_lpu(itnim->bfa)); + m->fw_handle = itnim->rport->fw_handle; + bfa_stats(itnim, fw_delete); + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); + return BFA_TRUE; +} + +/* + * Cleanup all pending failed inflight requests. + */ +static void +bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) +{ + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &itnim->delay_comp_q) { + ioim = (struct bfa_ioim_s *)qe; + bfa_ioim_delayed_comp(ioim, iotov); + } +} + +/* + * Start all pending IO requests. + */ +static void +bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) +{ + struct bfa_ioim_s *ioim; + + bfa_itnim_iotov_stop(itnim); + + /* + * Abort all inflight IO requests in the queue + */ + bfa_itnim_delayed_comp(itnim, BFA_FALSE); + + /* + * Start all pending IO requests. + */ + while (!list_empty(&itnim->pending_q)) { + bfa_q_deq(&itnim->pending_q, &ioim); + list_add_tail(&ioim->qe, &itnim->io_q); + bfa_ioim_start(ioim); + } +} + +/* + * Fail all pending IO requests + */ +static void +bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) +{ + struct bfa_ioim_s *ioim; + + /* + * Fail all inflight IO requests in the queue + */ + bfa_itnim_delayed_comp(itnim, BFA_TRUE); + + /* + * Fail any pending IO requests. + */ + while (!list_empty(&itnim->pending_q)) { + bfa_q_deq(&itnim->pending_q, &ioim); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); + bfa_ioim_tov(ioim); + } +} + +/* + * IO TOV timer callback. Fail any pending IO requests. + */ +static void +bfa_itnim_iotov(void *itnim_arg) +{ + struct bfa_itnim_s *itnim = itnim_arg; + + itnim->iotov_active = BFA_FALSE; + + bfa_cb_itnim_tov_begin(itnim->ditn); + bfa_itnim_iotov_cleanup(itnim); + bfa_cb_itnim_tov(itnim->ditn); +} + +/* + * Start IO TOV timer for failing back pending IO requests in offline state. + */ +static void +bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) +{ + if (itnim->fcpim->path_tov > 0) { + + itnim->iotov_active = BFA_TRUE; + WARN_ON(!bfa_itnim_hold_io(itnim)); + bfa_timer_start(itnim->bfa, &itnim->timer, + bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); + } +} + +/* + * Stop IO TOV timer. + */ +static void +bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) +{ + if (itnim->iotov_active) { + itnim->iotov_active = BFA_FALSE; + bfa_timer_stop(&itnim->timer); + } +} + +/* + * Stop IO TOV timer. + */ +static void +bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) +{ + bfa_boolean_t pathtov_active = BFA_FALSE; + + if (itnim->iotov_active) + pathtov_active = BFA_TRUE; + + bfa_itnim_iotov_stop(itnim); + if (pathtov_active) + bfa_cb_itnim_tov_begin(itnim->ditn); + bfa_itnim_iotov_cleanup(itnim); + if (pathtov_active) + bfa_cb_itnim_tov(itnim->ditn); +} + +static void +bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa); + fcpim->del_itn_stats.del_itn_iocomp_aborted += + itnim->stats.iocomp_aborted; + fcpim->del_itn_stats.del_itn_iocomp_timedout += + itnim->stats.iocomp_timedout; + fcpim->del_itn_stats.del_itn_iocom_sqer_needed += + itnim->stats.iocom_sqer_needed; + fcpim->del_itn_stats.del_itn_iocom_res_free += + itnim->stats.iocom_res_free; + fcpim->del_itn_stats.del_itn_iocom_hostabrts += + itnim->stats.iocom_hostabrts; + fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios; + fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns; + fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; +} + +/* + * bfa_itnim_public + */ + +/* + * Itnim interrupt processing. + */ +void +bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + union bfi_itn_i2h_msg_u msg; + struct bfa_itnim_s *itnim; + + bfa_trc(bfa, m->mhdr.msg_id); + + msg.msg = m; + + switch (m->mhdr.msg_id) { + case BFI_ITN_I2H_CREATE_RSP: + itnim = BFA_ITNIM_FROM_TAG(fcpim, + msg.create_rsp->bfa_handle); + WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); + bfa_stats(itnim, create_comps); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); + break; + + case BFI_ITN_I2H_DELETE_RSP: + itnim = BFA_ITNIM_FROM_TAG(fcpim, + msg.delete_rsp->bfa_handle); + WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); + bfa_stats(itnim, delete_comps); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); + break; + + case BFI_ITN_I2H_SLER_EVENT: + itnim = BFA_ITNIM_FROM_TAG(fcpim, + msg.sler_event->bfa_handle); + bfa_stats(itnim, sler_events); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER); + break; + + default: + bfa_trc(bfa, m->mhdr.msg_id); + WARN_ON(1); + } +} + +/* + * bfa_itnim_api + */ + +struct bfa_itnim_s * +bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfa_itnim_s *itnim; + + bfa_itn_create(bfa, rport, bfa_itnim_isr); + + itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); + WARN_ON(itnim->rport != rport); + + itnim->ditn = ditn; + + bfa_stats(itnim, creates); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); + + return itnim; +} + +void +bfa_itnim_delete(struct bfa_itnim_s *itnim) +{ + bfa_stats(itnim, deletes); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE); +} + +void +bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec) +{ + itnim->seq_rec = seq_rec; + bfa_stats(itnim, onlines); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE); +} + +void +bfa_itnim_offline(struct bfa_itnim_s *itnim) +{ + bfa_stats(itnim, offlines); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); +} + +/* + * Return true if itnim is considered offline for holding off IO request. + * IO is not held if itnim is being deleted. + */ +bfa_boolean_t +bfa_itnim_hold_io(struct bfa_itnim_s *itnim) +{ + return itnim->fcpim->path_tov && itnim->iotov_active && + (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); +} + +#define bfa_io_lat_clock_res_div HZ +#define bfa_io_lat_clock_res_mul 1000 +bfa_status_t +bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, + struct bfa_itnim_ioprofile_s *ioprofile) +{ + struct bfa_fcpim_s *fcpim; + + if (!itnim) + return BFA_STATUS_NO_FCPIM_NEXUS; + + fcpim = BFA_FCPIM(itnim->bfa); + + if (!fcpim->io_profile) + return BFA_STATUS_IOPROFILE_OFF; + + itnim->ioprofile.index = BFA_IOBUCKET_MAX; + /* unsigned 32-bit time_t overflow here in y2106 */ + itnim->ioprofile.io_profile_start_time = + bfa_io_profile_start_time(itnim->bfa); + itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul; + itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div; + *ioprofile = itnim->ioprofile; + + return BFA_STATUS_OK; +} + +void +bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) +{ + int j; + + if (!itnim) + return; + + memset(&itnim->stats, 0, sizeof(itnim->stats)); + memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); + for (j = 0; j < BFA_IOBUCKET_MAX; j++) + itnim->ioprofile.io_latency.min[j] = ~0; +} + +/* + * BFA IO module state machine functions + */ + +/* + * IO is not started (unallocated). + */ +static void +bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + switch (event) { + case BFA_IOIM_SM_START: + if (!bfa_itnim_is_online(ioim->itnim)) { + if (!bfa_itnim_hold_io(ioim->itnim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + list_del(&ioim->qe); + list_add_tail(&ioim->qe, + &ioim->fcpim->ioim_comp_q); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_pathtov, ioim); + } else { + list_del(&ioim->qe); + list_add_tail(&ioim->qe, + &ioim->itnim->pending_q); + } + break; + } + + if (ioim->nsges > BFI_SGE_INLINE) { + if (!bfa_ioim_sgpg_alloc(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); + return; + } + } + + if (!bfa_ioim_send_ioreq(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); + break; + } + + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + break; + + case BFA_IOIM_SM_IOTOV: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_pathtov, ioim); + break; + + case BFA_IOIM_SM_ABORT: + /* + * IO in pending queue can get abort requests. Complete abort + * requests immediately. + */ + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_abort, ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO is waiting for SG pages. + */ +static void +bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_SGALLOCED: + if (!bfa_ioim_send_ioreq(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); + break; + } + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_ABORT: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO is active. + */ +static void +bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + switch (event) { + case BFA_IOIM_SM_COMP_GOOD: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_good_comp, ioim); + break; + + case BFA_IOIM_SM_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, + ioim); + break; + + case BFA_IOIM_SM_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, + ioim); + break; + + case BFA_IOIM_SM_ABORT: + ioim->iosp->abort_explicit = BFA_TRUE; + ioim->io_cbfn = __bfa_cb_ioim_abort; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_abort); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_CLEANUP: + ioim->iosp->abort_explicit = BFA_FALSE; + ioim->io_cbfn = __bfa_cb_ioim_failed; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + case BFA_IOIM_SM_SQRETRY: + if (bfa_ioim_maxretry_reached(ioim)) { + /* max retry reached, free IO */ + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_failed, ioim); + break; + } + /* waiting for IO tag resource free */ + bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO is retried with new tag. + */ +static void +bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + switch (event) { + case BFA_IOIM_SM_FREE: + /* abts and rrq done. Now retry the IO with new tag */ + bfa_ioim_update_iotag(ioim); + if (!bfa_ioim_send_ioreq(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); + break; + } + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + break; + + case BFA_IOIM_SM_CLEANUP: + ioim->iosp->abort_explicit = BFA_FALSE; + ioim->io_cbfn = __bfa_cb_ioim_failed; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_failed, ioim); + break; + + case BFA_IOIM_SM_ABORT: + /* in this state IO abort is done. + * Waiting for IO tag resource free. + */ + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO is being aborted, waiting for completion from firmware. + */ +static void +bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + case BFA_IOIM_SM_DONE: + case BFA_IOIM_SM_FREE: + break; + + case BFA_IOIM_SM_ABORT_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_ABORT_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_COMP_UTAG: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); + ioim->iosp->abort_explicit = BFA_FALSE; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO is being cleaned up (implicit abort), waiting for completion from + * firmware. + */ +static void +bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + case BFA_IOIM_SM_DONE: + case BFA_IOIM_SM_FREE: + break; + + case BFA_IOIM_SM_ABORT: + /* + * IO is already being aborted implicitly + */ + ioim->io_cbfn = __bfa_cb_ioim_abort; + break; + + case BFA_IOIM_SM_ABORT_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_ABORT_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_COMP_UTAG: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + /* + * IO can be in cleanup state already due to TM command. + * 2nd cleanup request comes from ITN offline event. + */ + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO is waiting for room in request CQ + */ +static void +bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_QRESUME: + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + bfa_ioim_send_ioreq(ioim); + break; + + case BFA_IOIM_SM_ABORT: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * Active IO is being aborted, waiting for room in request CQ. + */ +static void +bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_QRESUME: + bfa_sm_set_state(ioim, bfa_ioim_sm_abort); + bfa_ioim_send_abort(ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); + ioim->iosp->abort_explicit = BFA_FALSE; + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + break; + + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * Active IO is being cleaned up, waiting for room in request CQ. + */ +static void +bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_QRESUME: + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + bfa_ioim_send_abort(ioim); + break; + + case BFA_IOIM_SM_ABORT: + /* + * IO is already being cleaned up implicitly + */ + ioim->io_cbfn = __bfa_cb_ioim_abort; + break; + + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO bfa callback is pending. + */ +static void +bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + switch (event) { + case BFA_IOIM_SM_HCB: + bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); + bfa_ioim_free(ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO bfa callback is pending. IO resource cannot be freed. + */ +static void +bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_HCB: + bfa_sm_set_state(ioim, bfa_ioim_sm_resfree); + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q); + break; + + case BFA_IOIM_SM_FREE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * IO is completed, waiting resource free from firmware. + */ +static void +bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_FREE: + bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); + bfa_ioim_free(ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/* + * This is called from bfa_fcpim_start after the bfa_init() with flash read + * is complete by driver. now invalidate the stale content of lun mask + * like unit attention, rp tag and lp tag. + */ +void +bfa_ioim_lm_init(struct bfa_s *bfa) +{ + struct bfa_lun_mask_s *lunm_list; + int i; + + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return; + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + lunm_list[i].ua = BFA_IOIM_LM_UA_RESET; + lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; + lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; + } +} + +static void +__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio); +} + +static void +__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + struct bfi_ioim_rsp_s *m; + u8 *snsinfo = NULL; + u8 sns_len = 0; + s32 residue = 0; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; + if (m->io_status == BFI_IOIM_STS_OK) { + /* + * setup sense information, if present + */ + if ((m->scsi_status == SAM_STAT_CHECK_CONDITION) && + m->sns_len) { + sns_len = m->sns_len; + snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp, + ioim->iotag); + } + + /* + * setup residue value correctly for normal completions + */ + if (m->resid_flags == FCP_RESID_UNDER) { + residue = be32_to_cpu(m->residue); + bfa_stats(ioim->itnim, iocomp_underrun); + } + if (m->resid_flags == FCP_RESID_OVER) { + residue = be32_to_cpu(m->residue); + residue = -residue; + bfa_stats(ioim->itnim, iocomp_overrun); + } + } + + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status, + m->scsi_status, sns_len, snsinfo, residue); +} + +void +bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, + u16 rp_tag, u8 lp_tag) +{ + struct bfa_lun_mask_s *lun_list; + u8 i; + + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return; + + lun_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { + if ((lun_list[i].lp_wwn == lp_wwn) && + (lun_list[i].rp_wwn == rp_wwn)) { + lun_list[i].rp_tag = rp_tag; + lun_list[i].lp_tag = lp_tag; + } + } + } +} + +/* + * set UA for all active luns in LM DB + */ +static void +bfa_ioim_lm_set_ua(struct bfa_s *bfa) +{ + struct bfa_lun_mask_s *lunm_list; + int i; + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) + continue; + lunm_list[i].ua = BFA_IOIM_LM_UA_SET; + } +} + +bfa_status_t +bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update) +{ + struct bfa_lunmask_cfg_s *lun_mask; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + if (bfa_get_lun_mask_status(bfa) == update) + return BFA_STATUS_NO_CHANGE; + + lun_mask = bfa_get_lun_mask(bfa); + lun_mask->status = update; + + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) + bfa_ioim_lm_set_ua(bfa); + + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_lunmask_clear(struct bfa_s *bfa) +{ + int i; + struct bfa_lun_mask_s *lunm_list; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { + if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) + bfa_rport_unset_lunmask(bfa, + BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag)); + } + } + + memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG); + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf) +{ + struct bfa_lunmask_cfg_s *lun_mask; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + lun_mask = bfa_get_lun_mask(bfa); + memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s)); + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, + wwn_t rpwwn, struct scsi_lun lun) +{ + struct bfa_lun_mask_s *lunm_list; + struct bfa_rport_s *rp = NULL; + int i, free_index = MAX_LUN_MASK_CFG + 1; + struct bfa_fcs_lport_s *port = NULL; + struct bfa_fcs_rport_s *rp_fcs; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs, + vf_id, *pwwn); + if (port) { + *pwwn = port->port_cfg.pwwn; + rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); + if (rp_fcs) + rp = rp_fcs->bfa_rport; + } + + lunm_list = bfa_get_lun_mask_list(bfa); + /* if entry exists */ + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) + free_index = i; + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn) && + (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == + scsilun_to_int((struct scsi_lun *)&lun))) + return BFA_STATUS_ENTRY_EXISTS; + } + + if (free_index > MAX_LUN_MASK_CFG) + return BFA_STATUS_MAX_ENTRY_REACHED; + + if (rp) { + lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa, + rp->rport_info.local_pid); + lunm_list[free_index].rp_tag = rp->rport_tag; + } else { + lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID; + lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID; + } + + lunm_list[free_index].lp_wwn = *pwwn; + lunm_list[free_index].rp_wwn = rpwwn; + lunm_list[free_index].lun = lun; + lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE; + + /* set for all luns in this rp */ + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn)) + lunm_list[i].ua = BFA_IOIM_LM_UA_SET; + } + + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, + wwn_t rpwwn, struct scsi_lun lun) +{ + struct bfa_lun_mask_s *lunm_list; + struct bfa_fcs_lport_s *port = NULL; + int i; + + /* in min cfg lunm_list could be NULL but no commands should run. */ + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + bfa_trc(bfa, *pwwn); + bfa_trc(bfa, rpwwn); + bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun)); + + if (*pwwn == 0) { + port = bfa_fcs_lookup_port( + &((struct bfad_s *)bfa->bfad)->bfa_fcs, + vf_id, *pwwn); + if (port) + *pwwn = port->port_cfg.pwwn; + } + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn) && + (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == + scsilun_to_int((struct scsi_lun *)&lun))) { + lunm_list[i].lp_wwn = 0; + lunm_list[i].rp_wwn = 0; + int_to_scsilun(0, &lunm_list[i].lun); + lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE; + if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) { + lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; + lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; + } + return bfa_dconf_update(bfa); + } + } + + /* set for all luns in this rp */ + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn)) + lunm_list[i].ua = BFA_IOIM_LM_UA_SET; + } + + return BFA_STATUS_ENTRY_NOT_EXISTS; +} + +static void +__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, + 0, 0, NULL, 0); +} + +static void +__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + bfa_stats(ioim->itnim, path_tov_expired); + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, + 0, 0, NULL, 0); +} + +static void +__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); +} + +static void +bfa_ioim_sgpg_alloced(void *cbarg) +{ + struct bfa_ioim_s *ioim = cbarg; + + ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); + list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); + ioim->sgpg = bfa_q_first(&ioim->sgpg_q); + bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); +} + +/* + * Send I/O request to firmware. + */ +static bfa_boolean_t +bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) +{ + struct bfa_itnim_s *itnim = ioim->itnim; + struct bfi_ioim_req_s *m; + static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } }; + struct bfi_sge_s *sge, *sgpge; + u32 pgdlen = 0; + u32 fcp_dl; + u64 addr; + struct scatterlist *sg; + struct bfa_sgpg_s *sgpg; + struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; + u32 i, sge_id, pgcumsz; + enum dma_data_direction dmadir; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(ioim->bfa, ioim->reqq); + if (!m) { + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + return BFA_FALSE; + } + + /* + * build i/o request message next + */ + m->io_tag = cpu_to_be16(ioim->iotag); + m->rport_hdl = ioim->itnim->rport->fw_handle; + m->io_timeout = 0; + + sge = &m->sges[0]; + sgpg = ioim->sgpg; + sge_id = 0; + sgpge = NULL; + pgcumsz = 0; + scsi_for_each_sg(cmnd, sg, ioim->nsges, i) { + if (i == 0) { + /* build inline IO SG element */ + addr = bfa_sgaddr_le(sg_dma_address(sg)); + sge->sga = *(union bfi_addr_u *) &addr; + pgdlen = sg_dma_len(sg); + sge->sg_len = pgdlen; + sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? + BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; + bfa_sge_to_be(sge); + sge++; + } else { + if (sge_id == 0) + sgpge = sgpg->sgpg->sges; + + addr = bfa_sgaddr_le(sg_dma_address(sg)); + sgpge->sga = *(union bfi_addr_u *) &addr; + sgpge->sg_len = sg_dma_len(sg); + pgcumsz += sgpge->sg_len; + + /* set flags */ + if (i < (ioim->nsges - 1) && + sge_id < (BFI_SGPG_DATA_SGES - 1)) + sgpge->flags = BFI_SGE_DATA; + else if (i < (ioim->nsges - 1)) + sgpge->flags = BFI_SGE_DATA_CPL; + else + sgpge->flags = BFI_SGE_DATA_LAST; + + bfa_sge_to_le(sgpge); + + sgpge++; + if (i == (ioim->nsges - 1)) { + sgpge->flags = BFI_SGE_PGDLEN; + sgpge->sga.a32.addr_lo = 0; + sgpge->sga.a32.addr_hi = 0; + sgpge->sg_len = pgcumsz; + bfa_sge_to_le(sgpge); + } else if (++sge_id == BFI_SGPG_DATA_SGES) { + sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); + sgpge->flags = BFI_SGE_LINK; + sgpge->sga = sgpg->sgpg_pa; + sgpge->sg_len = pgcumsz; + bfa_sge_to_le(sgpge); + sge_id = 0; + pgcumsz = 0; + } + } + } + + if (ioim->nsges > BFI_SGE_INLINE) { + sge->sga = ioim->sgpg->sgpg_pa; + } else { + sge->sga.a32.addr_lo = 0; + sge->sga.a32.addr_hi = 0; + } + sge->sg_len = pgdlen; + sge->flags = BFI_SGE_PGDLEN; + bfa_sge_to_be(sge); + + /* + * set up I/O command parameters + */ + m->cmnd = cmnd_z0; + int_to_scsilun(cmnd->device->lun, &m->cmnd.lun); + dmadir = cmnd->sc_data_direction; + if (dmadir == DMA_TO_DEVICE) + m->cmnd.iodir = FCP_IODIR_WRITE; + else if (dmadir == DMA_FROM_DEVICE) + m->cmnd.iodir = FCP_IODIR_READ; + else + m->cmnd.iodir = FCP_IODIR_NONE; + + m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd; + fcp_dl = scsi_bufflen(cmnd); + m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); + + /* + * set up I/O message header + */ + switch (m->cmnd.iodir) { + case FCP_IODIR_READ: + bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa)); + bfa_stats(itnim, input_reqs); + ioim->itnim->stats.rd_throughput += fcp_dl; + break; + case FCP_IODIR_WRITE: + bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa)); + bfa_stats(itnim, output_reqs); + ioim->itnim->stats.wr_throughput += fcp_dl; + break; + case FCP_IODIR_RW: + bfa_stats(itnim, input_reqs); + bfa_stats(itnim, output_reqs); + fallthrough; + default: + bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); + } + if (itnim->seq_rec || + (scsi_bufflen(cmnd) & (sizeof(u32) - 1))) + bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); + return BFA_TRUE; +} + +/* + * Setup any additional SG pages needed.Inline SG element is setup + * at queuing time. + */ +static bfa_boolean_t +bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim) +{ + u16 nsgpgs; + + WARN_ON(ioim->nsges <= BFI_SGE_INLINE); + + /* + * allocate SG pages needed + */ + nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); + if (!nsgpgs) + return BFA_TRUE; + + if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs) + != BFA_STATUS_OK) { + bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs); + return BFA_FALSE; + } + + ioim->nsgpgs = nsgpgs; + ioim->sgpg = bfa_q_first(&ioim->sgpg_q); + + return BFA_TRUE; +} + +/* + * Send I/O abort request to firmware. + */ +static bfa_boolean_t +bfa_ioim_send_abort(struct bfa_ioim_s *ioim) +{ + struct bfi_ioim_abort_req_s *m; + enum bfi_ioim_h2i msgop; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(ioim->bfa, ioim->reqq); + if (!m) + return BFA_FALSE; + + /* + * build i/o request message next + */ + if (ioim->iosp->abort_explicit) + msgop = BFI_IOIM_H2I_IOABORT_REQ; + else + msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; + + bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa)); + m->io_tag = cpu_to_be16(ioim->iotag); + m->abort_tag = ++ioim->abort_tag; + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); + return BFA_TRUE; +} + +/* + * Call to resume any I/O requests waiting for room in request queue. + */ +static void +bfa_ioim_qresume(void *cbarg) +{ + struct bfa_ioim_s *ioim = cbarg; + + bfa_stats(ioim->itnim, qresumes); + bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME); +} + + +static void +bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) +{ + /* + * Move IO from itnim queue to fcpim global queue since itnim will be + * freed. + */ + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); + + if (!ioim->iosp->tskim) { + if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) { + bfa_cb_dequeue(&ioim->hcb_qe); + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q); + } + bfa_itnim_iodone(ioim->itnim); + } else + bfa_wc_down(&ioim->iosp->tskim->wc); +} + +static bfa_boolean_t +bfa_ioim_is_abortable(struct bfa_ioim_s *ioim) +{ + if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) && + (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree))) + return BFA_FALSE; + + return BFA_TRUE; +} + +void +bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) +{ + /* + * If path tov timer expired, failback with PATHTOV status - these + * IO requests are not normally retried by IO stack. + * + * Otherwise device cameback online and fail it with normal failed + * status so that IO stack retries these failed IO requests. + */ + if (iotov) + ioim->io_cbfn = __bfa_cb_ioim_pathtov; + else { + ioim->io_cbfn = __bfa_cb_ioim_failed; + bfa_stats(ioim->itnim, iocom_nexus_abort); + } + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + + /* + * Move IO to fcpim global queue since itnim will be + * freed. + */ + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); +} + + +/* + * Memory allocation and initialization. + */ +void +bfa_ioim_attach(struct bfa_fcpim_s *fcpim) +{ + struct bfa_ioim_s *ioim; + struct bfa_fcp_mod_s *fcp = fcpim->fcp; + struct bfa_ioim_sp_s *iosp; + u16 i; + + /* + * claim memory first + */ + ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp); + fcpim->ioim_arr = ioim; + bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs); + + iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp); + fcpim->ioim_sp_arr = iosp; + bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs); + + /* + * Initialize ioim free queues + */ + INIT_LIST_HEAD(&fcpim->ioim_resfree_q); + INIT_LIST_HEAD(&fcpim->ioim_comp_q); + + for (i = 0; i < fcpim->fcp->num_ioim_reqs; + i++, ioim++, iosp++) { + /* + * initialize IOIM + */ + memset(ioim, 0, sizeof(struct bfa_ioim_s)); + ioim->iotag = i; + ioim->bfa = fcpim->bfa; + ioim->fcpim = fcpim; + ioim->iosp = iosp; + INIT_LIST_HEAD(&ioim->sgpg_q); + bfa_reqq_winit(&ioim->iosp->reqq_wait, + bfa_ioim_qresume, ioim); + bfa_sgpg_winit(&ioim->iosp->sgpg_wqe, + bfa_ioim_sgpg_alloced, ioim); + bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); + } +} + +void +bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; + struct bfa_ioim_s *ioim; + u16 iotag; + enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; + + iotag = be16_to_cpu(rsp->io_tag); + + ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); + WARN_ON(ioim->iotag != iotag); + + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, rsp->io_status); + bfa_trc(ioim->bfa, rsp->reuse_io_tag); + + if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) + ioim->iosp->comp_rspmsg = *m; + + switch (rsp->io_status) { + case BFI_IOIM_STS_OK: + bfa_stats(ioim->itnim, iocomp_ok); + if (rsp->reuse_io_tag == 0) + evt = BFA_IOIM_SM_DONE; + else + evt = BFA_IOIM_SM_COMP; + break; + + case BFI_IOIM_STS_TIMEDOUT: + bfa_stats(ioim->itnim, iocomp_timedout); + fallthrough; + case BFI_IOIM_STS_ABORTED: + rsp->io_status = BFI_IOIM_STS_ABORTED; + bfa_stats(ioim->itnim, iocomp_aborted); + if (rsp->reuse_io_tag == 0) + evt = BFA_IOIM_SM_DONE; + else + evt = BFA_IOIM_SM_COMP; + break; + + case BFI_IOIM_STS_PROTO_ERR: + bfa_stats(ioim->itnim, iocom_proto_err); + WARN_ON(!rsp->reuse_io_tag); + evt = BFA_IOIM_SM_COMP; + break; + + case BFI_IOIM_STS_SQER_NEEDED: + bfa_stats(ioim->itnim, iocom_sqer_needed); + WARN_ON(rsp->reuse_io_tag != 0); + evt = BFA_IOIM_SM_SQRETRY; + break; + + case BFI_IOIM_STS_RES_FREE: + bfa_stats(ioim->itnim, iocom_res_free); + evt = BFA_IOIM_SM_FREE; + break; + + case BFI_IOIM_STS_HOST_ABORTED: + bfa_stats(ioim->itnim, iocom_hostabrts); + if (rsp->abort_tag != ioim->abort_tag) { + bfa_trc(ioim->bfa, rsp->abort_tag); + bfa_trc(ioim->bfa, ioim->abort_tag); + return; + } + + if (rsp->reuse_io_tag) + evt = BFA_IOIM_SM_ABORT_COMP; + else + evt = BFA_IOIM_SM_ABORT_DONE; + break; + + case BFI_IOIM_STS_UTAG: + bfa_stats(ioim->itnim, iocom_utags); + evt = BFA_IOIM_SM_COMP_UTAG; + break; + + default: + WARN_ON(1); + } + + bfa_sm_send_event(ioim, evt); +} + +void +bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; + struct bfa_ioim_s *ioim; + u16 iotag; + + iotag = be16_to_cpu(rsp->io_tag); + + ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); + WARN_ON(ioim->iotag != iotag); + + bfa_ioim_cb_profile_comp(fcpim, ioim); + + bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); +} + +/* + * Called by itnim to clean up IO while going offline. + */ +void +bfa_ioim_cleanup(struct bfa_ioim_s *ioim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_stats(ioim->itnim, io_cleanups); + + ioim->iosp->tskim = NULL; + bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); +} + +void +bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_stats(ioim->itnim, io_tmaborts); + + ioim->iosp->tskim = tskim; + bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); +} + +/* + * IOC failure handling. + */ +void +bfa_ioim_iocdisable(struct bfa_ioim_s *ioim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_stats(ioim->itnim, io_iocdowns); + bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); +} + +/* + * IO offline TOV popped. Fail the pending IO. + */ +void +bfa_ioim_tov(struct bfa_ioim_s *ioim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV); +} + + +/* + * Allocate IOIM resource for initiator mode I/O request. + */ +struct bfa_ioim_s * +bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, + struct bfa_itnim_s *itnim, u16 nsges) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfa_ioim_s *ioim; + struct bfa_iotag_s *iotag = NULL; + + /* + * alocate IOIM resource + */ + bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag); + if (!iotag) { + bfa_stats(itnim, no_iotags); + return NULL; + } + + ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag); + + ioim->dio = dio; + ioim->itnim = itnim; + ioim->nsges = nsges; + ioim->nsgpgs = 0; + + bfa_stats(itnim, total_ios); + fcpim->ios_active++; + + list_add_tail(&ioim->qe, &itnim->io_q); + + return ioim; +} + +void +bfa_ioim_free(struct bfa_ioim_s *ioim) +{ + struct bfa_fcpim_s *fcpim = ioim->fcpim; + struct bfa_iotag_s *iotag; + + if (ioim->nsgpgs > 0) + bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); + + bfa_stats(ioim->itnim, io_comps); + fcpim->ios_active--; + + ioim->iotag &= BFA_IOIM_IOTAG_MASK; + + WARN_ON(!(ioim->iotag < + (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs))); + iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag); + + if (ioim->iotag < fcpim->fcp->num_ioim_reqs) + list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q); + else + list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q); + + list_del(&ioim->qe); +} + +void +bfa_ioim_start(struct bfa_ioim_s *ioim) +{ + bfa_ioim_cb_profile_start(ioim->fcpim, ioim); + + /* + * Obtain the queue over which this request has to be issued + */ + ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? + BFA_FALSE : bfa_itnim_get_reqq(ioim); + + bfa_sm_send_event(ioim, BFA_IOIM_SM_START); +} + +/* + * Driver I/O abort request. + */ +bfa_status_t +bfa_ioim_abort(struct bfa_ioim_s *ioim) +{ + + bfa_trc(ioim->bfa, ioim->iotag); + + if (!bfa_ioim_is_abortable(ioim)) + return BFA_STATUS_FAILED; + + bfa_stats(ioim->itnim, io_aborts); + bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT); + + return BFA_STATUS_OK; +} + +/* + * BFA TSKIM state machine functions + */ + +/* + * Task management command beginning state. + */ +static void +bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); + + switch (event) { + case BFA_TSKIM_SM_START: + bfa_sm_set_state(tskim, bfa_tskim_sm_active); + bfa_tskim_gather_ios(tskim); + + /* + * If device is offline, do not send TM on wire. Just cleanup + * any pending IO requests and complete TM request. + */ + if (!bfa_itnim_is_online(tskim->itnim)) { + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + tskim->tsk_status = BFI_TSKIM_STS_OK; + bfa_tskim_cleanup_ios(tskim); + return; + } + + if (!bfa_tskim_send(tskim)) { + bfa_sm_set_state(tskim, bfa_tskim_sm_qfull); + bfa_stats(tskim->itnim, tm_qwait); + bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, + &tskim->reqq_wait); + } + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/* + * TM command is active, awaiting completion from firmware to + * cleanup IO requests in TM scope. + */ +static void +bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); + + switch (event) { + case BFA_TSKIM_SM_DONE: + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + bfa_tskim_cleanup_ios(tskim); + break; + + case BFA_TSKIM_SM_CLEANUP: + bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); + if (!bfa_tskim_send_abort(tskim)) { + bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull); + bfa_stats(tskim->itnim, tm_qwait); + bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, + &tskim->reqq_wait); + } + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/* + * An active TM is being cleaned up since ITN is offline. Awaiting cleanup + * completion event from firmware. + */ +static void +bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); + + switch (event) { + case BFA_TSKIM_SM_DONE: + /* + * Ignore and wait for ABORT completion from firmware. + */ + break; + + case BFA_TSKIM_SM_UTAG: + case BFA_TSKIM_SM_CLEANUP_DONE: + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + bfa_tskim_cleanup_ios(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +static void +bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); + + switch (event) { + case BFA_TSKIM_SM_IOS_DONE: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done); + break; + + case BFA_TSKIM_SM_CLEANUP: + /* + * Ignore, TM command completed on wire. + * Notify TM conmpletion on IO cleanup completion. + */ + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/* + * Task management command is waiting for room in request CQ + */ +static void +bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); + + switch (event) { + case BFA_TSKIM_SM_QRESUME: + bfa_sm_set_state(tskim, bfa_tskim_sm_active); + bfa_tskim_send(tskim); + break; + + case BFA_TSKIM_SM_CLEANUP: + /* + * No need to send TM on wire since ITN is offline. + */ + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + bfa_reqq_wcancel(&tskim->reqq_wait); + bfa_tskim_cleanup_ios(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_reqq_wcancel(&tskim->reqq_wait); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/* + * Task management command is active, awaiting for room in request CQ + * to send clean up request. + */ +static void +bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); + + switch (event) { + case BFA_TSKIM_SM_DONE: + bfa_reqq_wcancel(&tskim->reqq_wait); + fallthrough; + case BFA_TSKIM_SM_QRESUME: + bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); + bfa_tskim_send_abort(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_reqq_wcancel(&tskim->reqq_wait); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/* + * BFA callback is pending + */ +static void +bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); + + switch (event) { + case BFA_TSKIM_SM_HCB: + bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); + bfa_tskim_free(tskim); + break; + + case BFA_TSKIM_SM_CLEANUP: + bfa_tskim_notify_comp(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +static void +__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_tskim_s *tskim = cbarg; + + if (!complete) { + bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); + return; + } + + bfa_stats(tskim->itnim, tm_success); + bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status); +} + +static void +__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_tskim_s *tskim = cbarg; + + if (!complete) { + bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); + return; + } + + bfa_stats(tskim->itnim, tm_failures); + bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, + BFI_TSKIM_STS_FAILED); +} + +static bfa_boolean_t +bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun) +{ + switch (tskim->tm_cmnd) { + case FCP_TM_TARGET_RESET: + return BFA_TRUE; + + case FCP_TM_ABORT_TASK_SET: + case FCP_TM_CLEAR_TASK_SET: + case FCP_TM_LUN_RESET: + case FCP_TM_CLEAR_ACA: + return !memcmp(&tskim->lun, &lun, sizeof(lun)); + + default: + WARN_ON(1); + } + + return BFA_FALSE; +} + +/* + * Gather affected IO requests and task management commands. + */ +static void +bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) +{ + struct bfa_itnim_s *itnim = tskim->itnim; + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + struct scsi_cmnd *cmnd; + struct scsi_lun scsilun; + + INIT_LIST_HEAD(&tskim->io_q); + + /* + * Gather any active IO requests first. + */ + list_for_each_safe(qe, qen, &itnim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + cmnd = (struct scsi_cmnd *) ioim->dio; + int_to_scsilun(cmnd->device->lun, &scsilun); + if (bfa_tskim_match_scope(tskim, scsilun)) { + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &tskim->io_q); + } + } + + /* + * Failback any pending IO requests immediately. + */ + list_for_each_safe(qe, qen, &itnim->pending_q) { + ioim = (struct bfa_ioim_s *) qe; + cmnd = (struct scsi_cmnd *) ioim->dio; + int_to_scsilun(cmnd->device->lun, &scsilun); + if (bfa_tskim_match_scope(tskim, scsilun)) { + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); + bfa_ioim_tov(ioim); + } + } +} + +/* + * IO cleanup completion + */ +static void +bfa_tskim_cleanp_comp(void *tskim_cbarg) +{ + struct bfa_tskim_s *tskim = tskim_cbarg; + + bfa_stats(tskim->itnim, tm_io_comps); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); +} + +/* + * Gather affected IO requests and task management commands. + */ +static void +bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) +{ + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim); + + list_for_each_safe(qe, qen, &tskim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_wc_up(&tskim->wc); + bfa_ioim_cleanup_tm(ioim, tskim); + } + + bfa_wc_wait(&tskim->wc); +} + +/* + * Send task management request to firmware. + */ +static bfa_boolean_t +bfa_tskim_send(struct bfa_tskim_s *tskim) +{ + struct bfa_itnim_s *itnim = tskim->itnim; + struct bfi_tskim_req_s *m; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(tskim->bfa, itnim->reqq); + if (!m) + return BFA_FALSE; + + /* + * build i/o request message next + */ + bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, + bfa_fn_lpu(tskim->bfa)); + + m->tsk_tag = cpu_to_be16(tskim->tsk_tag); + m->itn_fhdl = tskim->itnim->rport->fw_handle; + m->t_secs = tskim->tsecs; + m->lun = tskim->lun; + m->tm_flags = tskim->tm_cmnd; + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); + return BFA_TRUE; +} + +/* + * Send abort request to cleanup an active TM to firmware. + */ +static bfa_boolean_t +bfa_tskim_send_abort(struct bfa_tskim_s *tskim) +{ + struct bfa_itnim_s *itnim = tskim->itnim; + struct bfi_tskim_abortreq_s *m; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(tskim->bfa, itnim->reqq); + if (!m) + return BFA_FALSE; + + /* + * build i/o request message next + */ + bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, + bfa_fn_lpu(tskim->bfa)); + + m->tsk_tag = cpu_to_be16(tskim->tsk_tag); + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); + return BFA_TRUE; +} + +/* + * Call to resume task management cmnd waiting for room in request queue. + */ +static void +bfa_tskim_qresume(void *cbarg) +{ + struct bfa_tskim_s *tskim = cbarg; + + bfa_stats(tskim->itnim, tm_qresumes); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); +} + +/* + * Cleanup IOs associated with a task mangement command on IOC failures. + */ +static void +bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) +{ + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &tskim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_iocdisable(ioim); + } +} + +/* + * Notification on completions from related ioim. + */ +void +bfa_tskim_iodone(struct bfa_tskim_s *tskim) +{ + bfa_wc_down(&tskim->wc); +} + +/* + * Handle IOC h/w failure notification from itnim. + */ +void +bfa_tskim_iocdisable(struct bfa_tskim_s *tskim) +{ + tskim->notify = BFA_FALSE; + bfa_stats(tskim->itnim, tm_iocdowns); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); +} + +/* + * Cleanup TM command and associated IOs as part of ITNIM offline. + */ +void +bfa_tskim_cleanup(struct bfa_tskim_s *tskim) +{ + tskim->notify = BFA_TRUE; + bfa_stats(tskim->itnim, tm_cleanups); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); +} + +/* + * Memory allocation and initialization. + */ +void +bfa_tskim_attach(struct bfa_fcpim_s *fcpim) +{ + struct bfa_tskim_s *tskim; + struct bfa_fcp_mod_s *fcp = fcpim->fcp; + u16 i; + + INIT_LIST_HEAD(&fcpim->tskim_free_q); + INIT_LIST_HEAD(&fcpim->tskim_unused_q); + + tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp); + fcpim->tskim_arr = tskim; + + for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { + /* + * initialize TSKIM + */ + memset(tskim, 0, sizeof(struct bfa_tskim_s)); + tskim->tsk_tag = i; + tskim->bfa = fcpim->bfa; + tskim->fcpim = fcpim; + tskim->notify = BFA_FALSE; + bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume, + tskim); + bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); + + list_add_tail(&tskim->qe, &fcpim->tskim_free_q); + } + + bfa_mem_kva_curp(fcp) = (u8 *) tskim; +} + +void +bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; + struct bfa_tskim_s *tskim; + u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); + + tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); + WARN_ON(tskim->tsk_tag != tsk_tag); + + tskim->tsk_status = rsp->tsk_status; + + /* + * Firmware sends BFI_TSKIM_STS_ABORTED status for abort + * requests. All other statuses are for normal completions. + */ + if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { + bfa_stats(tskim->itnim, tm_cleanup_comps); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); + } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) { + bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG); + } else { + bfa_stats(tskim->itnim, tm_fw_rsps); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); + } +} + + +struct bfa_tskim_s * +bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfa_tskim_s *tskim; + + bfa_q_deq(&fcpim->tskim_free_q, &tskim); + + if (tskim) + tskim->dtsk = dtsk; + + return tskim; +} + +void +bfa_tskim_free(struct bfa_tskim_s *tskim) +{ + WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); + list_del(&tskim->qe); + list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); +} + +/* + * Start a task management command. + * + * @param[in] tskim BFA task management command instance + * @param[in] itnim i-t nexus for the task management command + * @param[in] lun lun, if applicable + * @param[in] tm_cmnd Task management command code. + * @param[in] t_secs Timeout in seconds + * + * @return None. + */ +void +bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, + struct scsi_lun lun, + enum fcp_tm_cmnd tm_cmnd, u8 tsecs) +{ + tskim->itnim = itnim; + tskim->lun = lun; + tskim->tm_cmnd = tm_cmnd; + tskim->tsecs = tsecs; + tskim->notify = BFA_FALSE; + bfa_stats(itnim, tm_cmnds); + + list_add_tail(&tskim->qe, &itnim->tsk_q); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); +} + +void +bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) { + bfa_q_deq_tail(&fcpim->tskim_free_q, &qe); + list_add_tail(qe, &fcpim->tskim_unused_q); + } +} + +void +bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_ios, num_io_req; + u32 km_len = 0; + + /* + * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value. + * So if the values are non zero, adjust them appropriately. + */ + if (cfg->fwcfg.num_ioim_reqs && + cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) + cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; + else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX) + cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; + + if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX) + cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; + + num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); + if (num_io_req > BFA_IO_MAX) { + if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) { + cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2; + cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2; + } else if (cfg->fwcfg.num_fwtio_reqs) + cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; + else + cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; + } + + bfa_fcpim_meminfo(cfg, &km_len); + + num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); + km_len += num_io_req * sizeof(struct bfa_iotag_s); + km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s); + + /* dma memory */ + nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); + per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN); + + bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { + if (num_io_req >= per_seg_ios) { + num_io_req -= per_seg_ios; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_ios * BFI_IOIM_SNSLEN); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_io_req * BFI_IOIM_SNSLEN); + } + + /* kva memory */ + bfa_mem_kva_setup(minfo, fcp_kva, km_len); +} + +void +bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 idx, nsegs, num_io_req; + + fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs; + fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; + fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; + fcp->num_itns = cfg->fwcfg.num_rports; + fcp->bfa = bfa; + + /* + * Setup the pool of snsbase addr's, that is passed to fw as + * part of bfi_iocfc_cfg_s. + */ + num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); + nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); + + bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { + + if (!bfa_mem_dma_virt(seg_ptr)) + break; + + fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr); + fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr); + bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); + } + + fcp->throttle_update_required = 1; + bfa_fcpim_attach(fcp, bfad, cfg, pcidev); + + bfa_iotag_attach(fcp); + + fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp); + bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr + + (fcp->num_itns * sizeof(struct bfa_itn_s)); + memset(fcp->itn_arr, 0, + (fcp->num_itns * sizeof(struct bfa_itn_s))); +} + +void +bfa_fcp_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + + bfa_fcpim_iocdisable(fcp); +} + +void +bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw) +{ + struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); + struct list_head *qe; + int i; + + /* Update io throttle value only once during driver load time */ + if (!mod->throttle_update_required) + return; + + for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { + bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); + list_add_tail(qe, &mod->iotag_unused_q); + } + + if (mod->num_ioim_reqs != num_ioim_fw) { + bfa_trc(bfa, mod->num_ioim_reqs); + bfa_trc(bfa, num_ioim_fw); + } + + mod->max_ioim_reqs = max_ioim_fw; + mod->num_ioim_reqs = num_ioim_fw; + mod->throttle_update_required = 0; +} + +void +bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + struct bfa_itn_s *itn; + + itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag); + itn->isr = isr; +} + +/* + * Itn interrupt processing. + */ +void +bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + union bfi_itn_i2h_msg_u msg; + struct bfa_itn_s *itn; + + msg.msg = m; + itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle); + + if (itn->isr) + itn->isr(bfa, m); + else + WARN_ON(1); +} + +void +bfa_iotag_attach(struct bfa_fcp_mod_s *fcp) +{ + struct bfa_iotag_s *iotag; + u16 num_io_req, i; + + iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp); + fcp->iotag_arr = iotag; + + INIT_LIST_HEAD(&fcp->iotag_ioim_free_q); + INIT_LIST_HEAD(&fcp->iotag_tio_free_q); + INIT_LIST_HEAD(&fcp->iotag_unused_q); + + num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs; + for (i = 0; i < num_io_req; i++, iotag++) { + memset(iotag, 0, sizeof(struct bfa_iotag_s)); + iotag->tag = i; + if (i < fcp->num_ioim_reqs) + list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q); + else + list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q); + } + + bfa_mem_kva_curp(fcp) = (u8 *) iotag; +} + + +/* + * To send config req, first try to use throttle value from flash + * If 0, then use driver parameter + * We need to use min(flash_val, drv_val) because + * memory allocation was done based on this cfg'd value + */ +u16 +bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param) +{ + u16 tmp; + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + + /* + * If throttle value from flash is already in effect after driver is + * loaded then until next load, always return current value instead + * of actual flash value + */ + if (!fcp->throttle_update_required) + return (u16)fcp->num_ioim_reqs; + + tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0; + if (!tmp || (tmp > drv_cfg_param)) + tmp = drv_cfg_param; + + return tmp; +} + +bfa_status_t +bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value) +{ + if (!bfa_dconf_get_min_cfg(bfa)) { + BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value; + BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1; + return BFA_STATUS_OK; + } + + return BFA_STATUS_FAILED; +} + +u16 +bfa_fcpim_read_throttle(struct bfa_s *bfa) +{ + struct bfa_throttle_cfg_s *throttle_cfg = + &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg); + + return ((!bfa_dconf_get_min_cfg(bfa)) ? + ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0); +} + +bfa_status_t +bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value) +{ + /* in min cfg no commands should run. */ + if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || + (!bfa_dconf_read_data_valid(bfa))) + return BFA_STATUS_FAILED; + + bfa_fcpim_write_throttle(bfa, value); + + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfa_defs_fcpim_throttle_s throttle; + + if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || + (!bfa_dconf_read_data_valid(bfa))) + return BFA_STATUS_FAILED; + + memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s)); + + throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs); + throttle.cfg_value = bfa_fcpim_read_throttle(bfa); + if (!throttle.cfg_value) + throttle.cfg_value = throttle.cur_value; + throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs); + memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s)); + + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h new file mode 100644 index 000000000..8bf094335 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -0,0 +1,422 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_FCPIM_H__ +#define __BFA_FCPIM_H__ + +#include "bfa.h" +#include "bfa_svc.h" +#include "bfi_ms.h" +#include "bfa_defs_svc.h" +#include "bfa_cs.h" + +/* FCP module related definitions */ +#define BFA_IO_MAX BFI_IO_MAX +#define BFA_FWTIO_MAX 2000 + +struct bfa_fcp_mod_s; +struct bfa_iotag_s { + struct list_head qe; /* queue element */ + u16 tag; /* FW IO tag */ +}; + +struct bfa_itn_s { + bfa_isr_func_t isr; +}; + +void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); +void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); +void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); +void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw); + +#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) +#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) +#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \ + (&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)]) +#define BFA_ITN_FROM_TAG(_fcp, _tag) \ + ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1))) +#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \ + bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK), \ + BFI_IOIM_SNSLEN) + + +#define BFA_ITNIM_MIN 32 +#define BFA_ITNIM_MAX 1024 + +#define BFA_IOIM_MIN 8 +#define BFA_IOIM_MAX 2000 + +#define BFA_TSKIM_MIN 4 +#define BFA_TSKIM_MAX 512 +#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */ +#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */ + + +#define bfa_itnim_ioprofile_update(__itnim, __index) \ + (__itnim->ioprofile.iocomps[__index]++) + +#define BFA_IOIM_RETRY_TAG_OFFSET 11 +#define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */ +#define BFA_IOIM_RETRY_MAX 7 + +/* Buckets are are 512 bytes to 2MB */ +static inline u32 +bfa_ioim_get_index(u32 n) { + int pos = 0; + if (n >= (1UL)<<22) + return BFA_IOBUCKET_MAX - 1; + n >>= 8; + if (n >= (1UL)<<16) { + n >>= 16; + pos += 16; + } + if (n >= 1 << 8) { + n >>= 8; + pos += 8; + } + if (n >= 1 << 4) { + n >>= 4; + pos += 4; + } + if (n >= 1 << 2) { + n >>= 2; + pos += 2; + } + if (n >= 1 << 1) + pos += 1; + + return (n == 0) ? (0) : pos; +} + +/* + * forward declarations + */ +struct bfa_ioim_s; +struct bfa_tskim_s; +struct bfad_ioim_s; +struct bfad_tskim_s; + +typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); + +struct bfa_fcpim_s { + struct bfa_s *bfa; + struct bfa_fcp_mod_s *fcp; + struct bfa_itnim_s *itnim_arr; + struct bfa_ioim_s *ioim_arr; + struct bfa_ioim_sp_s *ioim_sp_arr; + struct bfa_tskim_s *tskim_arr; + int num_itnims; + int num_tskim_reqs; + u32 path_tov; + u16 q_depth; + u8 reqq; /* Request queue to be used */ + struct list_head itnim_q; /* queue of active itnim */ + struct list_head ioim_resfree_q; /* IOs waiting for f/w */ + struct list_head ioim_comp_q; /* IO global comp Q */ + struct list_head tskim_free_q; + struct list_head tskim_unused_q; /* Unused tskim Q */ + u32 ios_active; /* current active IOs */ + u32 delay_comp; + struct bfa_fcpim_del_itn_stats_s del_itn_stats; + bfa_boolean_t ioredirect; + bfa_boolean_t io_profile; + time64_t io_profile_start_time; + bfa_fcpim_profile_t profile_comp; + bfa_fcpim_profile_t profile_start; +}; + +/* Max FCP dma segs required */ +#define BFA_FCP_DMA_SEGS BFI_IOIM_SNSBUF_SEGS + +struct bfa_fcp_mod_s { + struct bfa_s *bfa; + struct list_head iotag_ioim_free_q; /* free IO resources */ + struct list_head iotag_tio_free_q; /* free IO resources */ + struct list_head iotag_unused_q; /* unused IO resources*/ + struct bfa_iotag_s *iotag_arr; + struct bfa_itn_s *itn_arr; + int max_ioim_reqs; + int num_ioim_reqs; + int num_fwtio_reqs; + int num_itns; + struct bfa_dma_s snsbase[BFA_FCP_DMA_SEGS]; + struct bfa_fcpim_s fcpim; + struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; + int throttle_update_required; +}; + +/* + * BFA IO (initiator mode) + */ +struct bfa_ioim_s { + struct list_head qe; /* queue elememt */ + bfa_sm_t sm; /* BFA ioim state machine */ + struct bfa_s *bfa; /* BFA module */ + struct bfa_fcpim_s *fcpim; /* parent fcpim module */ + struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ + struct bfad_ioim_s *dio; /* driver IO handle */ + u16 iotag; /* FWI IO tag */ + u16 abort_tag; /* unqiue abort request tag */ + u16 nsges; /* number of SG elements */ + u16 nsgpgs; /* number of SG pages */ + struct bfa_sgpg_s *sgpg; /* first SG page */ + struct list_head sgpg_q; /* allocated SG pages */ + struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ + bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ + struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ + u8 reqq; /* Request queue for I/O */ + u8 mode; /* IO is passthrough or not */ + u64 start_time; /* IO's Profile start val */ +}; + +struct bfa_ioim_sp_s { + struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ + struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_boolean_t abort_explicit; /* aborted by OS */ + struct bfa_tskim_s *tskim; /* Relevant TM cmd */ +}; + +/* + * BFA Task management command (initiator mode) + */ +struct bfa_tskim_s { + struct list_head qe; + bfa_sm_t sm; + struct bfa_s *bfa; /* BFA module */ + struct bfa_fcpim_s *fcpim; /* parent fcpim module */ + struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ + struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ + bfa_boolean_t notify; /* notify itnim on TM comp */ + struct scsi_lun lun; /* lun if applicable */ + enum fcp_tm_cmnd tm_cmnd; /* task management command */ + u16 tsk_tag; /* FWI IO tag */ + u8 tsecs; /* timeout in seconds */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + struct list_head io_q; /* queue of affected IOs */ + struct bfa_wc_s wc; /* waiting counter */ + struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ + enum bfi_tskim_status tsk_status; /* TM status */ +}; + +/* + * BFA i-t-n (initiator mode) + */ +struct bfa_itnim_s { + struct list_head qe; /* queue element */ + bfa_sm_t sm; /* i-t-n im BFA state machine */ + struct bfa_s *bfa; /* bfa instance */ + struct bfa_rport_s *rport; /* bfa rport */ + void *ditn; /* driver i-t-n structure */ + struct bfi_mhdr_s mhdr; /* pre-built mhdr */ + u8 msg_no; /* itnim/rport firmware handle */ + u8 reqq; /* CQ for requests */ + struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ + struct list_head pending_q; /* queue of pending IO requests */ + struct list_head io_q; /* queue of active IO requests */ + struct list_head io_cleanup_q; /* IO being cleaned up */ + struct list_head tsk_q; /* queue of active TM commands */ + struct list_head delay_comp_q; /* queue of failed inflight cmds */ + bfa_boolean_t seq_rec; /* SQER supported */ + bfa_boolean_t is_online; /* itnim is ONLINE for IO */ + bfa_boolean_t iotov_active; /* IO TOV timer is active */ + struct bfa_wc_s wc; /* waiting counter */ + struct bfa_timer_s timer; /* pending IO TOV */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + struct bfa_fcpim_s *fcpim; /* fcpim module */ + struct bfa_itnim_iostats_s stats; + struct bfa_itnim_ioprofile_s ioprofile; +}; + +#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) +#define BFA_FCPIM(_hal) (&(_hal)->modules.fcp_mod.fcpim) +#define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK) +#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ + (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)]) +#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \ + (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) + +#define bfa_io_profile_start_time(_bfa) \ + ((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time) +#define bfa_fcpim_get_io_profile(_bfa) \ + ((_bfa)->modules.fcp_mod.fcpim.io_profile) +#define bfa_ioim_update_iotag(__ioim) do { \ + uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \ + k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \ + (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ +} while (0) + +static inline bfa_boolean_t +bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) +{ + uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; + if (k < BFA_IOIM_RETRY_MAX) + return BFA_FALSE; + return BFA_TRUE; +} + +/* + * function prototypes + */ +void bfa_ioim_attach(struct bfa_fcpim_s *fcpim); +void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); +void bfa_ioim_good_comp_isr(struct bfa_s *bfa, + struct bfi_msg_s *msg); +void bfa_ioim_cleanup(struct bfa_ioim_s *ioim); +void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, + struct bfa_tskim_s *tskim); +void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim); +void bfa_ioim_tov(struct bfa_ioim_s *ioim); + +void bfa_tskim_attach(struct bfa_fcpim_s *fcpim); +void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); +void bfa_tskim_iodone(struct bfa_tskim_s *tskim); +void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); +void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); +void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw); + +void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len); +void bfa_itnim_attach(struct bfa_fcpim_s *fcpim); +void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim); +void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); +void bfa_itnim_iodone(struct bfa_itnim_s *itnim); +void bfa_itnim_tskdone(struct bfa_itnim_s *itnim); +bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim); + +/* + * bfa fcpim module API functions + */ +void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov); +u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa); +u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa); +bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa, + struct bfa_itnim_iostats_s *stats, u8 lp_tag); +void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats, + struct bfa_itnim_iostats_s *itnim_stats); +bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time); +bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa); + +#define bfa_fcpim_ioredirect_enabled(__bfa) \ + (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect) + +#define bfa_fcpim_get_next_reqq(__bfa, __qid) \ +{ \ + struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa); \ + __fcpim->reqq++; \ + __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \ + *(__qid) = __fcpim->reqq; \ +} + +#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \ + *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1)); +/* + * bfa itnim API functions + */ +struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa, + struct bfa_rport_s *rport, void *itnim); +void bfa_itnim_delete(struct bfa_itnim_s *itnim); +void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec); +void bfa_itnim_offline(struct bfa_itnim_s *itnim); +void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim); +bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, + struct bfa_itnim_ioprofile_s *ioprofile); + +#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) + +/* + * BFA completion callback for bfa_itnim_online(). + */ +void bfa_cb_itnim_online(void *itnim); + +/* + * BFA completion callback for bfa_itnim_offline(). + */ +void bfa_cb_itnim_offline(void *itnim); +void bfa_cb_itnim_tov_begin(void *itnim); +void bfa_cb_itnim_tov(void *itnim); + +/* + * BFA notification to FCS/driver for second level error recovery. + * Atleast one I/O request has timedout and target is unresponsive to + * repeated abort requests. Second level error recovery should be initiated + * by starting implicit logout and recovery procedures. + */ +void bfa_cb_itnim_sler(void *itnim); + +/* + * bfa ioim API functions + */ +struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa, + struct bfad_ioim_s *dio, + struct bfa_itnim_s *itnim, + u16 nsgles); + +void bfa_ioim_free(struct bfa_ioim_s *ioim); +void bfa_ioim_start(struct bfa_ioim_s *ioim); +bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim); +void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, + bfa_boolean_t iotov); +/* + * I/O completion notification. + * + * @param[in] dio driver IO structure + * @param[in] io_status IO completion status + * @param[in] scsi_status SCSI status returned by target + * @param[in] sns_len SCSI sense length, 0 if none + * @param[in] sns_info SCSI sense data, if any + * @param[in] residue Residual length + * + * @return None + */ +void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, + enum bfi_ioim_status io_status, + u8 scsi_status, int sns_len, + u8 *sns_info, s32 residue); + +/* + * I/O good completion notification. + */ +void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); + +/* + * I/O abort completion notification + */ +void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio); + +/* + * bfa tskim API functions + */ +struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa, + struct bfad_tskim_s *dtsk); +void bfa_tskim_free(struct bfa_tskim_s *tskim); +void bfa_tskim_start(struct bfa_tskim_s *tskim, + struct bfa_itnim_s *itnim, struct scsi_lun lun, + enum fcp_tm_cmnd tm, u8 t_secs); +void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, + enum bfi_tskim_status tsk_status); + +void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, + wwn_t rp_wwn, u16 rp_tag, u8 lp_tag); +bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off); +bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf); +bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, + wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); +bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, + wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); +bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); +u16 bfa_fcpim_read_throttle(struct bfa_s *bfa); +bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value); +bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value); +bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf); +u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param); + +#endif /* __BFA_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c new file mode 100644 index 000000000..5023c0ab4 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -0,0 +1,1616 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * bfa_fcs.c BFA FCS main + */ + +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" + +BFA_TRC_FILE(FCS, FCS); + +/* + * fcs_api BFA FCS API + */ + +static void +bfa_fcs_exit_comp(void *fcs_cbarg) +{ + struct bfa_fcs_s *fcs = fcs_cbarg; + struct bfad_s *bfad = fcs->bfad; + + complete(&bfad->comp); +} + +/* + * fcs initialization, called once after bfa initialization is complete + */ +void +bfa_fcs_init(struct bfa_fcs_s *fcs) +{ + bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); + bfa_trc(fcs, 0); +} + +/* + * fcs_api BFA FCS API + */ + +/* + * FCS update cfg - reset the pwwn/nwwn of fabric base logical port + * with values learned during bfa_init firmware GETATTR REQ. + */ +void +bfa_fcs_update_cfg(struct bfa_fcs_s *fcs) +{ + struct bfa_fcs_fabric_s *fabric = &fcs->fabric; + struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; + struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc; + + port_cfg->nwwn = ioc->attr->nwwn; + port_cfg->pwwn = ioc->attr->pwwn; +} + +/* + * Stop FCS operations. + */ +void +bfa_fcs_stop(struct bfa_fcs_s *fcs) +{ + bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); + bfa_wc_up(&fcs->wc); + bfa_fcs_fabric_modstop(fcs); + bfa_wc_wait(&fcs->wc); +} + +/* + * fcs pbc vport initialization + */ +void +bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs) +{ + int i, npbc_vports; + struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; + + /* Initialize pbc vports */ + if (!fcs->min_cfg) { + npbc_vports = + bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); + for (i = 0; i < npbc_vports; i++) + bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); + } +} + +/* + * brief + * FCS driver details initialization. + * + * param[in] fcs FCS instance + * param[in] driver_info Driver Details + * + * return None + */ +void +bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, + struct bfa_fcs_driver_info_s *driver_info) +{ + + fcs->driver_info = *driver_info; + + bfa_fcs_fabric_psymb_init(&fcs->fabric); + bfa_fcs_fabric_nsymb_init(&fcs->fabric); +} + +/* + * brief + * FCS instance cleanup and exit. + * + * param[in] fcs FCS instance + * return None + */ +void +bfa_fcs_exit(struct bfa_fcs_s *fcs) +{ + bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); + bfa_wc_up(&fcs->wc); + bfa_trc(fcs, 0); + bfa_lps_delete(fcs->fabric.lps); + bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_DELETE); + bfa_wc_wait(&fcs->wc); +} + +/* + * Fabric module implementation. + */ + +#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ +#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ + +#define bfa_fcs_fabric_set_opertype(__fabric) do { \ + if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ + == BFA_PORT_TOPOLOGY_P2P) { \ + if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED) \ + (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ + else \ + (__fabric)->oper_type = BFA_PORT_TYPE_P2P; \ + } else \ + (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ +} while (0) + +/* + * forward declarations + */ +static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_delay(void *cbarg); +static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_delete_comp(void *cbarg); +static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_stop_comp(void *cbarg); +static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len); +static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len); +static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rspfchs); + +static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +/* + * Beginning state before fabric creation. + */ +static void +bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_CREATE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); + bfa_fcs_fabric_init(fabric); + bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); + break; + + case BFA_FCS_FABRIC_SM_LINK_UP: + case BFA_FCS_FABRIC_SM_LINK_DOWN: + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Beginning state before fabric creation. + */ +static void +bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + struct bfa_s *bfa = fabric->fcs->bfa; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_START: + if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + break; + } + if (bfa_fcport_get_topology(bfa) == + BFA_PORT_TOPOLOGY_LOOP) { + fabric->fab_type = BFA_FCS_FABRIC_LOOP; + fabric->bport.pid = bfa_fcport_get_myalpa(bfa); + fabric->bport.pid = bfa_hton3b(fabric->bport.pid); + bfa_sm_set_state(fabric, + bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_set_opertype(fabric); + bfa_fcs_lport_online(&fabric->bport); + } else { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); + bfa_fcs_fabric_login(fabric); + } + break; + + case BFA_FCS_FABRIC_SM_LINK_UP: + case BFA_FCS_FABRIC_SM_LINK_DOWN: + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Link is down, awaiting LINK UP event from port. This is also the + * first state at fabric creation. + */ +static void +bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + struct bfa_s *bfa = fabric->fcs->bfa; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_UP: + if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); + bfa_fcs_fabric_login(fabric); + break; + } + fabric->fab_type = BFA_FCS_FABRIC_LOOP; + fabric->bport.pid = bfa_fcport_get_myalpa(bfa); + fabric->bport.pid = bfa_hton3b(fabric->bport.pid); + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_set_opertype(fabric); + bfa_fcs_lport_online(&fabric->bport); + break; + + case BFA_FCS_FABRIC_SM_RETRY_OP: + case BFA_FCS_FABRIC_SM_LOOPBACK: + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + case BFA_FCS_FABRIC_SM_STOP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); + bfa_fcs_fabric_stop(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * FLOGI is in progress, awaiting FLOGI reply. + */ +static void +bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_CONT_OP: + + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, + fabric->bb_credit); + fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; + + if (fabric->auth_reqd && fabric->is_auth) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); + bfa_trc(fabric->fcs, event); + } else { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_notify_online(fabric); + } + break; + + case BFA_FCS_FABRIC_SM_RETRY_OP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); + bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, + bfa_fcs_fabric_delay, fabric, + BFA_FCS_FABRIC_RETRY_DELAY); + break; + + case BFA_FCS_FABRIC_SM_LOOPBACK: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_fabric_set_opertype(fabric); + break; + + case BFA_FCS_FABRIC_SM_NO_FABRIC: + fabric->fab_type = BFA_FCS_FABRIC_N2N; + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, + fabric->bb_credit); + bfa_fcs_fabric_notify_online(fabric); + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + + +static void +bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_DELAYED: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); + bfa_fcs_fabric_login(fabric); + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_timer_stop(&fabric->delay_timer); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_timer_stop(&fabric->delay_timer); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Authentication is in progress, awaiting authentication results. + */ +static void +bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_AUTH_FAILED: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + break; + + case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_notify_online(fabric); + break; + + case BFA_FCS_FABRIC_SM_PERF_EVFP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Authentication failed + */ +void +bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_fcs_fabric_notify_offline(fabric); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Port is in loopback mode. + */ +void +bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_fcs_fabric_notify_offline(fabric); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * There is no attached fabric - private loop or NPort-to-NPort topology. + */ +static void +bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_fabric_notify_offline(fabric); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + case BFA_FCS_FABRIC_SM_NO_FABRIC: + bfa_trc(fabric->fcs, fabric->bb_credit); + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, + fabric->bb_credit); + break; + + case BFA_FCS_FABRIC_SM_RETRY_OP: + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Fabric is online - normal operating state. + */ +void +bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + struct bfa_s *bfa = fabric->fcs->bfa; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { + bfa_fcs_lport_offline(&fabric->bport); + } else { + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_fabric_notify_offline(fabric); + } + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + case BFA_FCS_FABRIC_SM_STOP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping); + bfa_fcs_fabric_stop(fabric); + break; + + case BFA_FCS_FABRIC_SM_AUTH_FAILED: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + break; + + case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Exchanging virtual fabric parameters. + */ +static void +bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_CONT_OP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); + break; + + case BFA_FCS_FABRIC_SM_ISOLATE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * EVFP exchange complete and VFT tagging is enabled. + */ +static void +bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); +} + +/* + * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). + */ +static void +bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; + char pwwn_ptr[BFA_STRING_32]; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); + + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Port is isolated due to VF_ID mismatch. " + "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", + pwwn_ptr, fabric->fcs->port_vfid, + fabric->event_arg.swp_vfid); +} + +/* + * Fabric is being deleted, awaiting vport delete completions. + */ +static void +bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_DELCOMP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); + bfa_wc_down(&fabric->fcs->wc); + break; + + case BFA_FCS_FABRIC_SM_LINK_UP: + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_fcs_fabric_notify_offline(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Fabric is being stopped, awaiting vport stop completions. + */ +static void +bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + struct bfa_s *bfa = fabric->fcs->bfa; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_STOPCOMP: + if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); + } else { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); + } + break; + + case BFA_FCS_FABRIC_SM_LINK_UP: + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); + else + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * Fabric is being stopped, cleanup without FLOGO + */ +static void +bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_STOPCOMP: + case BFA_FCS_FABRIC_SM_LOGOCOMP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); + bfa_wc_down(&(fabric->fcs)->wc); + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + /* + * Ignore - can get this event if we get notified about IOC down + * before the fabric completion callbk is done. + */ + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/* + * fcs_fabric_private fabric private functions + */ + +static void +bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; + + port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; + port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn; + port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn; +} + +/* + * Port Symbolic Name Creation for base port. + */ +void +bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; + char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; + struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; + + bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); + + /* Model name/number */ + strscpy(port_cfg->sym_name.symname, model, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + + /* Driver Version */ + strlcat(port_cfg->sym_name.symname, driver_info->version, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + + /* Host machine name */ + strlcat(port_cfg->sym_name.symname, + driver_info->host_machine_name, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + + /* + * Host OS Info : + * If OS Patch Info is not there, do not truncate any bytes from the + * OS name string and instead copy the entire OS info string (64 bytes). + */ + if (driver_info->host_os_patch[0] == '\0') { + strlcat(port_cfg->sym_name.symname, + driver_info->host_os_name, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->sym_name.symname, + BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + } else { + strlcat(port_cfg->sym_name.symname, + driver_info->host_os_name, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->sym_name.symname, + BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + + /* Append host OS Patch Info */ + strlcat(port_cfg->sym_name.symname, + driver_info->host_os_patch, + BFA_SYMNAME_MAXLEN); + } + + /* null terminate */ + port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; +} + +/* + * Node Symbolic Name Creation for base port and all vports + */ +void +bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; + char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; + struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; + + bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); + + /* Model name/number */ + strscpy(port_cfg->node_sym_name.symname, model, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->node_sym_name.symname, + BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + + /* Driver Version */ + strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->node_sym_name.symname, + BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + + /* Host machine name */ + strlcat(port_cfg->node_sym_name.symname, + driver_info->host_machine_name, + BFA_SYMNAME_MAXLEN); + strlcat(port_cfg->node_sym_name.symname, + BFA_FCS_PORT_SYMBNAME_SEPARATOR, + BFA_SYMNAME_MAXLEN); + + /* null terminate */ + port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; +} + +/* + * bfa lps login completion callback + */ +void +bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) +{ + struct bfa_fcs_fabric_s *fabric = uarg; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, status); + + switch (status) { + case BFA_STATUS_OK: + fabric->stats.flogi_accepts++; + break; + + case BFA_STATUS_INVALID_MAC: + /* Only for CNA */ + fabric->stats.flogi_acc_err++; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + + return; + + case BFA_STATUS_EPROTOCOL: + switch (fabric->lps->ext_status) { + case BFA_EPROTO_BAD_ACCEPT: + fabric->stats.flogi_acc_err++; + break; + + case BFA_EPROTO_UNKNOWN_RSP: + fabric->stats.flogi_unknown_rsp++; + break; + + default: + break; + } + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + + return; + + case BFA_STATUS_FABRIC_RJT: + fabric->stats.flogi_rejects++; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + return; + + default: + fabric->stats.flogi_rsp_err++; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + return; + } + + fabric->bb_credit = fabric->lps->pr_bbcred; + bfa_trc(fabric->fcs, fabric->bb_credit); + + if (!(fabric->lps->brcd_switch)) + fabric->fabric_name = fabric->lps->pr_nwwn; + + /* + * Check port type. It should be 1 = F-port. + */ + if (fabric->lps->fport) { + fabric->bport.pid = fabric->lps->lp_pid; + fabric->is_npiv = fabric->lps->npiv_en; + fabric->is_auth = fabric->lps->auth_req; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); + } else { + /* + * Nport-2-Nport direct attached + */ + fabric->bport.port_topo.pn2n.rem_port_wwn = + fabric->lps->pr_pwwn; + fabric->fab_type = BFA_FCS_FABRIC_N2N; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); + } + + bfa_trc(fabric->fcs, fabric->bport.pid); + bfa_trc(fabric->fcs, fabric->is_npiv); + bfa_trc(fabric->fcs, fabric->is_auth); +} +/* + * Allocate and send FLOGI. + */ +static void +bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_s *bfa = fabric->fcs->bfa; + struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; + u8 alpa = 0; + + + bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), + pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); + + fabric->stats.flogi_sent++; +} + +static void +bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe, *qen; + + bfa_trc(fabric->fcs, fabric->fabric_name); + + bfa_fcs_fabric_set_opertype(fabric); + fabric->stats.fabric_onlines++; + + /* + * notify online event to base and then virtual ports + */ + bfa_fcs_lport_online(&fabric->bport); + + list_for_each_safe(qe, qen, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + bfa_fcs_vport_online(vport); + } +} + +static void +bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe, *qen; + + bfa_trc(fabric->fcs, fabric->fabric_name); + fabric->stats.fabric_offlines++; + + /* + * notify offline event first to vports and then base port. + */ + list_for_each_safe(qe, qen, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + bfa_fcs_vport_offline(vport); + } + + bfa_fcs_lport_offline(&fabric->bport); + + fabric->fabric_name = 0; + fabric->fabric_ip_addr[0] = 0; +} + +static void +bfa_fcs_fabric_delay(void *cbarg) +{ + struct bfa_fcs_fabric_s *fabric = cbarg; + + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); +} + +/* + * Stop all vports and wait for vport stop completions. + */ +static void +bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe, *qen; + + bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric); + + list_for_each_safe(qe, qen, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + bfa_wc_up(&fabric->stop_wc); + bfa_fcs_vport_fcs_stop(vport); + } + + bfa_wc_up(&fabric->stop_wc); + bfa_fcs_lport_stop(&fabric->bport); + bfa_wc_wait(&fabric->stop_wc); +} + +/* + * Delete all vports and wait for vport delete completions. + */ +static void +bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + bfa_fcs_vport_fcs_delete(vport); + } + + bfa_fcs_lport_delete(&fabric->bport); + bfa_wc_wait(&fabric->wc); +} + +static void +bfa_fcs_fabric_delete_comp(void *cbarg) +{ + struct bfa_fcs_fabric_s *fabric = cbarg; + + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); +} + +static void +bfa_fcs_fabric_stop_comp(void *cbarg) +{ + struct bfa_fcs_fabric_s *fabric = cbarg; + + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP); +} + +/* + * fcs_fabric_public fabric public functions + */ + +/* + * Fabric module stop -- stop FCS actions + */ +void +bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs) +{ + struct bfa_fcs_fabric_s *fabric; + + bfa_trc(fcs, 0); + fabric = &fcs->fabric; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP); +} + +/* + * Fabric module start -- kick starts FCS actions + */ +void +bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) +{ + struct bfa_fcs_fabric_s *fabric; + + bfa_trc(fcs, 0); + fabric = &fcs->fabric; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); +} + + +/* + * Link up notification from BFA physical port module. + */ +void +bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); +} + +/* + * Link down notification from BFA physical port module. + */ +void +bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); +} + +/* + * A child vport is being created in the fabric. + * + * Call from vport module at vport creation. A list of base port and vports + * belonging to a fabric is maintained to propagate link events. + * + * param[in] fabric - Fabric instance. This can be a base fabric or vf. + * param[in] vport - Vport being created. + * + * @return None (always succeeds) + */ +void +bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport) +{ + /* + * - add vport to fabric's vport_q + */ + bfa_trc(fabric->fcs, fabric->vf_id); + + list_add_tail(&vport->qe, &fabric->vport_q); + fabric->num_vports++; + bfa_wc_up(&fabric->wc); +} + +/* + * A child vport is being deleted from fabric. + * + * Vport is being deleted. + */ +void +bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport) +{ + list_del(&vport->qe); + fabric->num_vports--; + bfa_wc_down(&fabric->wc); +} + + +/* + * Lookup for a vport within a fabric given its pwwn + */ +struct bfa_fcs_vport_s * +bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe; + + list_for_each(qe, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn) + return vport; + } + + return NULL; +} + + +/* + * Get OUI of the attached switch. + * + * Note : Use of this function should be avoided as much as possible. + * This function should be used only if there is any requirement +* to check for FOS version below 6.3. + * To check if the attached fabric is a brocade fabric, use + * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 + * or above only. + */ + +u16 +bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) +{ + wwn_t fab_nwwn; + u8 *tmp; + u16 oui; + + fab_nwwn = fabric->lps->pr_nwwn; + + tmp = (u8 *)&fab_nwwn; + oui = (tmp[3] << 8) | tmp[4]; + + return oui; +} +/* + * Unsolicited frame receive handling. + */ +void +bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, + u16 len) +{ + u32 pid = fchs->d_id; + struct bfa_fcs_vport_s *vport; + struct list_head *qe; + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; + + bfa_trc(fabric->fcs, len); + bfa_trc(fabric->fcs, pid); + + /* + * Look for our own FLOGI frames being looped back. This means an + * external loopback cable is in place. Our own FLOGI frames are + * sometimes looped back when switch port gets temporarily bypassed. + */ + if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) && + (els_cmd->els_code == FC_ELS_FLOGI) && + (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); + return; + } + + /* + * FLOGI/EVFP exchanges should be consumed by base fabric. + */ + if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) { + bfa_trc(fabric->fcs, pid); + bfa_fcs_fabric_process_uf(fabric, fchs, len); + return; + } + + if (fabric->bport.pid == pid) { + /* + * All authentication frames should be routed to auth + */ + bfa_trc(fabric->fcs, els_cmd->els_code); + if (els_cmd->els_code == FC_ELS_AUTH) { + bfa_trc(fabric->fcs, els_cmd->els_code); + return; + } + + bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); + bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); + return; + } + + /* + * look for a matching local port ID + */ + list_for_each(qe, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + if (vport->lport.pid == pid) { + bfa_fcs_lport_uf_recv(&vport->lport, fchs, len); + return; + } + } + + if (!bfa_fcs_fabric_is_switched(fabric)) + bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); + + bfa_trc(fabric->fcs, fchs->type); +} + +/* + * Unsolicited frames to be processed by fabric. + */ +static void +bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, + u16 len) +{ + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + bfa_trc(fabric->fcs, els_cmd->els_code); + + switch (els_cmd->els_code) { + case FC_ELS_FLOGI: + bfa_fcs_fabric_process_flogi(fabric, fchs, len); + break; + + default: + /* + * need to generate a LS_RJT + */ + break; + } +} + +/* + * Process incoming FLOGI + */ +static void +bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len) +{ + struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); + struct bfa_fcs_lport_s *bport = &fabric->bport; + + bfa_trc(fabric->fcs, fchs->s_id); + + fabric->stats.flogi_rcvd++; + /* + * Check port type. It should be 0 = n-port. + */ + if (flogi->csp.port_type) { + /* + * @todo: may need to send a LS_RJT + */ + bfa_trc(fabric->fcs, flogi->port_name); + fabric->stats.flogi_rejected++; + return; + } + + fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); + bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; + bport->port_topo.pn2n.reply_oxid = fchs->ox_id; + + /* + * Send a Flogi Acc + */ + bfa_fcs_fabric_send_flogi_acc(fabric); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); +} + +static void +bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; + struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; + struct bfa_s *bfa = fabric->fcs->bfa; + struct bfa_fcxp_s *fcxp; + u16 reqlen; + struct fchs_s fchs; + + fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE); + /* + * Do not expect this failure -- expect remote node to retry + */ + if (!fcxp) + return; + + reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_hton3b(FC_FABRIC_PORT), + n2n_port->reply_oxid, pcfg->pwwn, + pcfg->nwwn, + bfa_fcport_get_maxfrsize(bfa), + bfa_fcport_get_rx_bbcredit(bfa), 0); + + bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag, + BFA_FALSE, FC_CLASS_3, + reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, + FC_MAX_PDUSZ, 0); +} + +/* + * Flogi Acc completion callback. + */ +static void +bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t status, u32 rsp_len, + u32 resid_len, struct fchs_s *rspfchs) +{ + struct bfa_fcs_fabric_s *fabric = cbarg; + + bfa_trc(fabric->fcs, status); +} + + +/* + * Send AEN notification + */ +static void +bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port, + enum bfa_port_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port); + aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port); + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, + BFA_AEN_CAT_PORT, event); +} + +/* + * + * @param[in] fabric - fabric + * @param[in] wwn_t - new fabric name + * + * @return - none + */ +void +bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, + wwn_t fabric_name) +{ + struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; + char pwwn_ptr[BFA_STRING_32]; + char fwwn_ptr[BFA_STRING_32]; + + bfa_trc(fabric->fcs, fabric_name); + + if (fabric->fabric_name == 0) { + /* + * With BRCD switches, we don't get Fabric Name in FLOGI. + * Don't generate a fabric name change event in this case. + */ + fabric->fabric_name = fabric_name; + } else { + fabric->fabric_name = fabric_name; + wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); + wwn2str(fwwn_ptr, + bfa_fcs_lport_get_fabric_name(&fabric->bport)); + BFA_LOG(KERN_WARNING, bfad, bfa_log_level, + "Base port WWN = %s Fabric WWN = %s\n", + pwwn_ptr, fwwn_ptr); + bfa_fcs_fabric_aen_post(&fabric->bport, + BFA_PORT_AEN_FABRIC_NAME_CHANGE); + } +} + +void +bfa_cb_lps_flogo_comp(void *bfad, void *uarg) +{ + struct bfa_fcs_fabric_s *fabric = uarg; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP); +} + +/* + * Returns FCS vf structure for a given vf_id. + * + * param[in] vf_id - VF_ID + * + * return + * If lookup succeeds, retuns fcs vf object, otherwise returns NULL + */ +bfa_fcs_vf_t * +bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) +{ + bfa_trc(fcs, vf_id); + if (vf_id == FC_VF_ID_NULL) + return &fcs->fabric; + + return NULL; +} + +/* + * Return the list of local logical ports present in the given VF. + * + * @param[in] vf vf for which logical ports are returned + * @param[out] lpwwn returned logical port wwn list + * @param[in,out] nlports in:size of lpwwn list; + * out:total elements present, + * actual elements returned is limited by the size + */ +void +bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) +{ + struct list_head *qe; + struct bfa_fcs_vport_s *vport; + int i = 0; + struct bfa_fcs_s *fcs; + + if (vf == NULL || lpwwn == NULL || *nlports == 0) + return; + + fcs = vf->fcs; + + bfa_trc(fcs, vf->vf_id); + bfa_trc(fcs, (uint32_t) *nlports); + + lpwwn[i++] = vf->bport.port_cfg.pwwn; + + list_for_each(qe, &vf->vport_q) { + if (i >= *nlports) + break; + + vport = (struct bfa_fcs_vport_s *) qe; + lpwwn[i++] = vport->lport.port_cfg.pwwn; + } + + bfa_trc(fcs, i); + *nlports = i; +} + +/* + * BFA FCS PPORT ( physical port) + */ +static void +bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) +{ + struct bfa_fcs_s *fcs = cbarg; + + bfa_trc(fcs, event); + + switch (event) { + case BFA_PORT_LINKUP: + bfa_fcs_fabric_link_up(&fcs->fabric); + break; + + case BFA_PORT_LINKDOWN: + bfa_fcs_fabric_link_down(&fcs->fabric); + break; + + default: + WARN_ON(1); + } +} + +/* + * BFA FCS UF ( Unsolicited Frames) + */ + +/* + * BFA callback for unsolicited frame receive handler. + * + * @param[in] cbarg callback arg for receive handler + * @param[in] uf unsolicited frame descriptor + * + * @return None + */ +static void +bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) +{ + struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; + struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); + u16 len = bfa_uf_get_frmlen(uf); + struct fc_vft_s *vft; + struct bfa_fcs_fabric_s *fabric; + + /* + * check for VFT header + */ + if (fchs->routing == FC_RTG_EXT_HDR && + fchs->cat_info == FC_CAT_VFT_HDR) { + bfa_stats(fcs, uf.tagged); + vft = bfa_uf_get_frmbuf(uf); + if (fcs->port_vfid == vft->vf_id) + fabric = &fcs->fabric; + else + fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); + + /* + * drop frame if vfid is unknown + */ + if (!fabric) { + WARN_ON(1); + bfa_stats(fcs, uf.vfid_unknown); + bfa_uf_free(uf); + return; + } + + /* + * skip vft header + */ + fchs = (struct fchs_s *) (vft + 1); + len -= sizeof(struct fc_vft_s); + + bfa_trc(fcs, vft->vf_id); + } else { + bfa_stats(fcs, uf.untagged); + fabric = &fcs->fabric; + } + + bfa_trc(fcs, ((u32 *) fchs)[0]); + bfa_trc(fcs, ((u32 *) fchs)[1]); + bfa_trc(fcs, ((u32 *) fchs)[2]); + bfa_trc(fcs, ((u32 *) fchs)[3]); + bfa_trc(fcs, ((u32 *) fchs)[4]); + bfa_trc(fcs, ((u32 *) fchs)[5]); + bfa_trc(fcs, len); + + bfa_fcs_fabric_uf_recv(fabric, fchs, len); + bfa_uf_free(uf); +} + +/* + * fcs attach -- called once to initialize data structures at driver attach time + */ +void +bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, + bfa_boolean_t min_cfg) +{ + struct bfa_fcs_fabric_s *fabric = &fcs->fabric; + + fcs->bfa = bfa; + fcs->bfad = bfad; + fcs->min_cfg = min_cfg; + fcs->num_rport_logins = 0; + + bfa->fcs = BFA_TRUE; + fcbuild_init(); + + bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); + bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); + + memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); + + /* + * Initialize base fabric. + */ + fabric->fcs = fcs; + INIT_LIST_HEAD(&fabric->vport_q); + INIT_LIST_HEAD(&fabric->vf_q); + fabric->lps = bfa_lps_alloc(fcs->bfa); + WARN_ON(!fabric->lps); + + /* + * Initialize fabric delete completion handler. Fabric deletion is + * complete when the last vport delete is complete. + */ + bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); + bfa_wc_up(&fabric->wc); /* For the base port */ + + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); + bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); +} diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h new file mode 100644 index 000000000..c1baf5cd0 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -0,0 +1,869 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_FCS_H__ +#define __BFA_FCS_H__ + +#include "bfa_cs.h" +#include "bfa_defs.h" +#include "bfa_defs_fcs.h" +#include "bfa_modules.h" +#include "bfa_fc.h" + +#define BFA_FCS_OS_STR_LEN 64 + +/* + * lps_pvt BFA LPS private functions + */ + +enum bfa_lps_event { + BFA_LPS_SM_LOGIN = 1, /* login request from user */ + BFA_LPS_SM_LOGOUT = 2, /* logout request from user */ + BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */ + BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ + BFA_LPS_SM_DELETE = 5, /* lps delete from user */ + BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ + BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ + BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */ +}; + + +/* + * !!! Only append to the enums defined here to avoid any versioning + * !!! needed between trace utility and driver version + */ +enum { + BFA_TRC_FCS_FCS = 1, + BFA_TRC_FCS_PORT = 2, + BFA_TRC_FCS_RPORT = 3, + BFA_TRC_FCS_FCPIM = 4, +}; + + +struct bfa_fcs_s; + +#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) + +#define BFA_FCS_BRCD_SWITCH_OUI 0x051e +#define N2N_LOCAL_PID 0x010000 +#define N2N_REMOTE_PID 0x020000 +#define BFA_FCS_RETRY_TIMEOUT 2000 +#define BFA_FCS_MAX_NS_RETRIES 5 +#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0) +#define BFA_FCS_MAX_RPORT_LOGINS 1024 + +struct bfa_fcs_lport_ns_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_s *port; /* parent port */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; + u8 num_rnnid_retries; + u8 num_rsnn_nn_retries; +}; + + +struct bfa_fcs_lport_scn_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_s *port; /* parent port */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; +}; + + +struct bfa_fcs_lport_fdmi_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_ms_s *ms; /* parent ms */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; + u8 retry_cnt; /* retry count */ + u8 rsvd[3]; +}; + + +struct bfa_fcs_lport_ms_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_s *port; /* parent port */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; + struct bfa_fcs_lport_fdmi_s fdmi; /* FDMI component of MS */ + u8 retry_cnt; /* retry count */ + u8 rsvd[3]; +}; + + +struct bfa_fcs_lport_fab_s { + struct bfa_fcs_lport_ns_s ns; /* NS component of port */ + struct bfa_fcs_lport_scn_s scn; /* scn component of port */ + struct bfa_fcs_lport_ms_s ms; /* MS component of port */ +}; + +#define MAX_ALPA_COUNT 127 + +struct bfa_fcs_lport_loop_s { + u8 num_alpa; /* Num of ALPA entries in the map */ + u8 alpabm_valid; /* alpa bitmap valid or not (1 or 0) */ + u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional Map */ + struct bfa_fcs_lport_s *port; /* parent port */ +}; + +struct bfa_fcs_lport_n2n_s { + u32 rsvd; + __be16 reply_oxid; /* ox_id from the req flogi to be + *used in flogi acc */ + wwn_t rem_port_wwn; /* Attached port's wwn */ +}; + + +union bfa_fcs_lport_topo_u { + struct bfa_fcs_lport_fab_s pfab; + struct bfa_fcs_lport_loop_s ploop; + struct bfa_fcs_lport_n2n_s pn2n; +}; + + +struct bfa_fcs_lport_s { + struct list_head qe; /* used by port/vport */ + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_fabric_s *fabric; /* parent fabric */ + struct bfa_lport_cfg_s port_cfg; /* port configuration */ + struct bfa_timer_s link_timer; /* timer for link offline */ + u32 pid:24; /* FC address */ + u8 lp_tag; /* lport tag */ + u16 num_rports; /* Num of r-ports */ + struct list_head rport_q; /* queue of discovered r-ports */ + struct bfa_fcs_s *fcs; /* FCS instance */ + union bfa_fcs_lport_topo_u port_topo; /* fabric/loop/n2n details */ + struct bfad_port_s *bfad_port; /* driver peer instance */ + struct bfa_fcs_vport_s *vport; /* NULL for base ports */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; + struct bfa_lport_stats_s stats; + struct bfa_wc_s wc; /* waiting counter for events */ +}; +#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa) +#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns) +#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn) +#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms) +#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi) +#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \ + (port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM) + +/* + * forward declaration + */ +struct bfad_vf_s; + +enum bfa_fcs_fabric_type { + BFA_FCS_FABRIC_UNKNOWN = 0, + BFA_FCS_FABRIC_SWITCHED = 1, + BFA_FCS_FABRIC_N2N = 2, + BFA_FCS_FABRIC_LOOP = 3, +}; + + +struct bfa_fcs_fabric_s { + struct list_head qe; /* queue element */ + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_s *fcs; /* FCS instance */ + struct bfa_fcs_lport_s bport; /* base logical port */ + enum bfa_fcs_fabric_type fab_type; /* fabric type */ + enum bfa_port_type oper_type; /* current link topology */ + u8 is_vf; /* is virtual fabric? */ + u8 is_npiv; /* is NPIV supported ? */ + u8 is_auth; /* is Security/Auth supported ? */ + u16 bb_credit; /* BB credit from fabric */ + u16 vf_id; /* virtual fabric ID */ + u16 num_vports; /* num vports */ + u16 rsvd; + struct list_head vport_q; /* queue of virtual ports */ + struct list_head vf_q; /* queue of virtual fabrics */ + struct bfad_vf_s *vf_drv; /* driver vf structure */ + struct bfa_timer_s link_timer; /* Link Failure timer. Vport */ + wwn_t fabric_name; /* attached fabric name */ + bfa_boolean_t auth_reqd; /* authentication required */ + struct bfa_timer_s delay_timer; /* delay timer */ + union { + u16 swp_vfid;/* switch port VF id */ + } event_arg; + struct bfa_wc_s wc; /* wait counter for delete */ + struct bfa_vf_stats_s stats; /* fabric/vf stats */ + struct bfa_lps_s *lps; /* lport login services */ + u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; + /* attached fabric's ip addr */ + struct bfa_wc_s stop_wc; /* wait counter for stop */ +}; + +#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv) +#define bfa_fcs_fabric_is_switched(__f) \ + ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) + +/* + * The design calls for a single implementation of base fabric and vf. + */ +#define bfa_fcs_vf_t struct bfa_fcs_fabric_s + +struct bfa_vf_event_s { + u32 undefined; +}; + +/* + * @todo : need to move to a global config file. + * Maximum Rports supported per port (physical/logical). + */ +#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */ + +#define bfa_fcs_lport_t struct bfa_fcs_lport_s + +/* + * Symbolic Name related defines + * Total bytes 255. + * Physical Port's symbolic name 128 bytes. + * For Vports, Vport's symbolic name is appended to the Physical port's + * Symbolic Name. + * + * Physical Port's symbolic name Format : (Total 128 bytes) + * Adapter Model number/name : 16 bytes + * Driver Version : 10 bytes + * Host Machine Name : 30 bytes + * Host OS Info : 44 bytes + * Host OS PATCH Info : 16 bytes + * ( remaining 12 bytes reserved to be used for separator) + */ +#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | " + +#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 16 +#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10 +#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30 +#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 44 +#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 + +/* + * Get FC port ID for a logical port. + */ +#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid) +#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn) +#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn) +#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name) +#define bfa_fcs_lport_get_nsym_name(_lport) ((_lport)->port_cfg.node_sym_name) +#define bfa_fcs_lport_is_initiator(_lport) \ + ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM) +#define bfa_fcs_lport_get_nrports(_lport) \ + ((_lport) ? (_lport)->num_rports : 0) + +static inline struct bfad_port_s * +bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port) +{ + return port->bfad_port; +} + +#define bfa_fcs_lport_get_opertype(_lport) ((_lport)->fabric->oper_type) +#define bfa_fcs_lport_get_fabric_name(_lport) ((_lport)->fabric->fabric_name) +#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \ + ((_lport)->fabric->fabric_ip_addr) + +/* + * bfa fcs port public functions + */ + +bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port); +struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); +void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, + struct bfa_rport_qualifier_s rport[], int *nrports); +wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, + int index, int nrports, bfa_boolean_t bwwn); + +struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, + u16 vf_id, wwn_t lpwwn); + +void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); +void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, + struct bfa_lport_info_s *port_info); +void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, + struct bfa_lport_attr_s *port_attr); +void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, + struct bfa_lport_stats_s *port_stats); +void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port); +enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed( + struct bfa_fcs_lport_s *port); + +/* MS FCS routines */ +void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port); + +/* FDMI FCS routines */ +void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms); +void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms); +void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms); +void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs, + u16 len); +void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_fcs_vport_s *vport); +void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, + struct bfa_lport_cfg_s *port_cfg); +void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid( + struct bfa_fcs_lport_s *port, u32 pid); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid( + struct bfa_fcs_lport_s *port, u32 pid); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn( + struct bfa_fcs_lport_s *port, wwn_t pwwn); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn( + struct bfa_fcs_lport_s *port, wwn_t nwwn); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier( + struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid); +void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport); +void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport); +void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, + struct bfa_fcxp_s *fcxp_alloced); +void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_frame, u32 len); +void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port); + +struct bfa_fcs_vport_s { + struct list_head qe; /* queue elem */ + bfa_sm_t sm; /* state machine */ + bfa_fcs_lport_t lport; /* logical port */ + struct bfa_timer_s timer; + struct bfad_vport_s *vport_drv; /* Driver private */ + struct bfa_vport_stats_s vport_stats; /* vport statistics */ + struct bfa_lps_s *lps; /* Lport login service*/ + int fdisc_retries; +}; + +#define bfa_fcs_vport_get_port(vport) \ + ((struct bfa_fcs_lport_s *)(&vport->port)) + +/* + * bfa fcs vport public functions + */ +bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, + struct bfa_fcs_s *fcs, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, + struct bfad_vport_s *vport_drv); +bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, + struct bfa_fcs_s *fcs, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, + struct bfad_vport_s *vport_drv); +bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport); +bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); +bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); +bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, + struct bfa_vport_attr_s *vport_attr); +struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, + u16 vf_id, wwn_t vpwwn); +void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport); + +#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */ +#define BFA_FCS_RPORT_MAX_RETRIES (5) + +/* + * forward declarations + */ +struct bfad_rport_s; + +struct bfa_fcs_itnim_s; +struct bfa_fcs_tin_s; +struct bfa_fcs_iprp_s; + +/* Rport Features (RPF) */ +struct bfa_fcs_rpf_s { + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_rport_s *rport; /* parent rport */ + struct bfa_timer_s timer; /* general purpose timer */ + struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */ + struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */ + int rpsc_retries; /* max RPSC retry attempts */ + enum bfa_port_speed rpsc_speed; + /* Current Speed from RPSC. O if RPSC fails */ + enum bfa_port_speed assigned_speed; + /* + * Speed assigned by the user. will be used if RPSC is + * not supported by the rport. + */ +}; + +struct bfa_fcs_rport_s { + struct list_head qe; /* used by port/vport */ + struct bfa_fcs_lport_s *port; /* parent FCS port */ + struct bfa_fcs_s *fcs; /* fcs instance */ + struct bfad_rport_s *rp_drv; /* driver peer instance */ + u32 pid; /* port ID of rport */ + u32 old_pid; /* PID before rport goes offline */ + u16 maxfrsize; /* maximum frame size */ + __be16 reply_oxid; /* OX_ID of inbound requests */ + enum fc_cos fc_cos; /* FC classes of service supp */ + bfa_boolean_t cisc; /* CISC capable device */ + bfa_boolean_t prlo; /* processing prlo or LOGO */ + bfa_boolean_t plogi_pending; /* Rx Plogi Pending */ + wwn_t pwwn; /* port wwn of rport */ + wwn_t nwwn; /* node wwn of rport */ + struct bfa_rport_symname_s psym_name; /* port symbolic name */ + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; /* general purpose timer */ + struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */ + struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */ + struct bfa_fcs_iprp_s *iprp; /* IP/FC role */ + struct bfa_rport_s *bfa_rport; /* BFA Rport */ + struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */ + int plogi_retries; /* max plogi retry attempts */ + int ns_retries; /* max NS query retry attempts */ + struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */ + struct bfa_rport_stats_s stats; /* rport stats */ + enum bfa_rport_function scsi_function; /* Initiator/Target */ + struct bfa_fcs_rpf_s rpf; /* Rport features module */ + bfa_boolean_t scn_online; /* SCN online flag */ +}; + +static inline struct bfa_rport_s * +bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport) +{ + return rport->bfa_rport; +} + +/* + * bfa fcs rport API functions + */ +void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, + struct bfa_rport_attr_s *attr); +struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, + wwn_t rpwwn); +struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( + struct bfa_fcs_lport_s *port, wwn_t rnwwn); +void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); +void bfa_fcs_rport_set_max_logins(u32 max_logins); +void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, + struct fchs_s *fchs, u16 len); +void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); + +struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, + u32 pid); +void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, + struct fc_logi_s *plogi_rsp); +void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, + struct fc_logi_s *plogi); +void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, + struct fc_logi_s *plogi); +void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id); + +void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport); +int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport); +struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn( + struct bfa_fcs_lport_s *port, wwn_t wwn); +void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport); + +/* + * forward declarations + */ +struct bfad_itnim_s; + +struct bfa_fcs_itnim_s { + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_rport_s *rport; /* parent remote rport */ + struct bfad_itnim_s *itnim_drv; /* driver peer instance */ + struct bfa_fcs_s *fcs; /* fcs instance */ + struct bfa_timer_s timer; /* timer functions */ + struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */ + u32 prli_retries; /* max prli retry attempts */ + bfa_boolean_t seq_rec; /* seq recovery support */ + bfa_boolean_t rec_support; /* REC supported */ + bfa_boolean_t conf_comp; /* FCP_CONF support */ + bfa_boolean_t task_retry_id; /* task retry id supp */ + struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */ + struct bfa_fcxp_s *fcxp; /* FCXP in use */ + struct bfa_itnim_stats_s stats; /* itn statistics */ +}; +#define bfa_fcs_fcxp_alloc(__fcs, __req) \ + bfa_fcxp_req_rsp_alloc(NULL, (__fcs)->bfa, 0, 0, \ + NULL, NULL, NULL, NULL, __req) +#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \ + __alloc_cbarg, __req) \ + bfa_fcxp_req_rsp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \ + __alloc_cbarg, NULL, 0, 0, NULL, NULL, NULL, NULL, __req) + +static inline struct bfad_port_s * +bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->port->bfad_port; +} + + +static inline struct bfa_fcs_lport_s * +bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->port; +} + + +static inline wwn_t +bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->nwwn; +} + + +static inline wwn_t +bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->pwwn; +} + + +static inline u32 +bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->pid; +} + + +static inline u32 +bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->maxfrsize; +} + + +static inline enum fc_cos +bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->fc_cos; +} + + +static inline struct bfad_itnim_s * +bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->itnim_drv; +} + + +static inline struct bfa_itnim_s * +bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->bfa_itnim; +} + +/* + * bfa fcs FCP Initiator mode API functions + */ +void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim, + struct bfa_itnim_attr_s *attr); +void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim, + struct bfa_itnim_stats_s *stats); +struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, + wwn_t rpwwn); +bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_attr_s *attr); +bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_stats_s *stats); +bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, + wwn_t rpwwn); +struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport); +void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim); +bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, + struct fchs_s *fchs, u16 len); + +#define BFA_FCS_FDMI_SUPP_SPEEDS_4G (FDMI_TRANS_SPEED_1G | \ + FDMI_TRANS_SPEED_2G | \ + FDMI_TRANS_SPEED_4G) + +#define BFA_FCS_FDMI_SUPP_SPEEDS_8G (FDMI_TRANS_SPEED_1G | \ + FDMI_TRANS_SPEED_2G | \ + FDMI_TRANS_SPEED_4G | \ + FDMI_TRANS_SPEED_8G) + +#define BFA_FCS_FDMI_SUPP_SPEEDS_16G (FDMI_TRANS_SPEED_2G | \ + FDMI_TRANS_SPEED_4G | \ + FDMI_TRANS_SPEED_8G | \ + FDMI_TRANS_SPEED_16G) + +#define BFA_FCS_FDMI_SUPP_SPEEDS_10G FDMI_TRANS_SPEED_10G + +#define BFA_FCS_FDMI_VENDOR_INFO_LEN 8 +#define BFA_FCS_FDMI_FC4_TYPE_LEN 32 + +/* + * HBA Attribute Block : BFA internal representation. Note : Some variable + * sizes have been trimmed to suit BFA For Ex : Model will be "QLogic ". Based + * on this the size has been reduced to 16 bytes from the standard's 64 bytes. + */ +struct bfa_fcs_fdmi_hba_attr_s { + wwn_t node_name; + u8 manufacturer[64]; + u8 serial_num[64]; + u8 model[16]; + u8 model_desc[128]; + u8 hw_version[8]; + u8 driver_version[BFA_VERSION_LEN]; + u8 option_rom_ver[BFA_VERSION_LEN]; + u8 fw_version[BFA_VERSION_LEN]; + u8 os_name[256]; + __be32 max_ct_pyld; + struct bfa_lport_symname_s node_sym_name; + u8 vendor_info[BFA_FCS_FDMI_VENDOR_INFO_LEN]; + __be32 num_ports; + wwn_t fabric_name; + u8 bios_ver[BFA_VERSION_LEN]; +}; + +/* + * Port Attribute Block + */ +struct bfa_fcs_fdmi_port_attr_s { + u8 supp_fc4_types[BFA_FCS_FDMI_FC4_TYPE_LEN]; + __be32 supp_speed; /* supported speed */ + __be32 curr_speed; /* current Speed */ + __be32 max_frm_size; /* max frame size */ + u8 os_device_name[256]; /* OS device Name */ + u8 host_name[256]; /* host name */ + wwn_t port_name; + wwn_t node_name; + struct bfa_lport_symname_s port_sym_name; + __be32 port_type; + enum fc_cos scos; + wwn_t port_fabric_name; + u8 port_act_fc4_type[BFA_FCS_FDMI_FC4_TYPE_LEN]; + __be32 port_state; + __be32 num_ports; +}; + +struct bfa_fcs_stats_s { + struct { + u32 untagged; /* untagged receive frames */ + u32 tagged; /* tagged receive frames */ + u32 vfid_unknown; /* VF id is unknown */ + } uf; +}; + +struct bfa_fcs_driver_info_s { + u8 version[BFA_VERSION_LEN]; /* Driver Version */ + u8 host_machine_name[BFA_FCS_OS_STR_LEN]; + u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */ + u8 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */ + u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */ +}; + +struct bfa_fcs_s { + struct bfa_s *bfa; /* corresponding BFA bfa instance */ + struct bfad_s *bfad; /* corresponding BDA driver instance */ + struct bfa_trc_mod_s *trcmod; /* tracing module */ + bfa_boolean_t vf_enabled; /* VF mode is enabled */ + bfa_boolean_t fdmi_enabled; /* FDMI is enabled */ + bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ + u16 port_vfid; /* port default VF ID */ + struct bfa_fcs_driver_info_s driver_info; + struct bfa_fcs_fabric_s fabric; /* base fabric state machine */ + struct bfa_fcs_stats_s stats; /* FCS statistics */ + struct bfa_wc_s wc; /* waiting counter */ + int fcs_aen_seq; + u32 num_rport_logins; +}; + +/* + * fcs_fabric_sm fabric state machine functions + */ + +/* + * Fabric state machine events + */ +enum bfa_fcs_fabric_event { + BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */ + BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */ + BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */ + BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */ + BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */ + BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */ + BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */ + BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */ + BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */ + BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */ + BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */ + BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */ + BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */ + BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */ + BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */ + BFA_FCS_FABRIC_SM_START = 16, /* from driver */ + BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */ + BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */ + BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */ +}; + +/* + * fcs_rport_sm FCS rport state machine events + */ + +enum rport_event { + RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */ + RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */ + RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */ + RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */ + RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ + RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ + RPSM_EVENT_DELETE = 7, /* RPORT delete request */ + RPSM_EVENT_FAB_SCN = 8, /* state change notification */ + RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ + RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ + RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ + RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */ + RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ + RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ + RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ + RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ + RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ + RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */ + RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */ + RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */ + RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */ +}; + +/* + * fcs_itnim_sm FCS itnim state machine events + */ +enum bfa_fcs_itnim_event { + BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */ + BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */ + BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */ + BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */ + BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */ + BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */ + BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */ + BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */ + BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ + BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ + BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ + BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */ + BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */ +}; + +/* + * bfa fcs API functions + */ +void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, + struct bfad_s *bfad, + bfa_boolean_t min_cfg); +void bfa_fcs_init(struct bfa_fcs_s *fcs); +void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs); +void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs); +void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, + struct bfa_fcs_driver_info_s *driver_info); +void bfa_fcs_exit(struct bfa_fcs_s *fcs); +void bfa_fcs_stop(struct bfa_fcs_s *fcs); + +/* + * bfa fcs vf public functions + */ +bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); +void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports); + +/* + * fabric protected interface functions + */ +void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport); +void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport); +struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup( + struct bfa_fcs_fabric_s *fabric, wwn_t pwwn); +void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len); +void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, + wwn_t fabric_name); +u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); + +/* + * BFA FCS callback interfaces + */ + +/* + * fcb Main fcs callbacks + */ + +struct bfad_port_s; +struct bfad_vf_s; +struct bfad_vport_s; +struct bfad_rport_s; + +/* + * lport callbacks + */ +struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad, + struct bfa_fcs_lport_s *port, + enum bfa_lport_role roles, + struct bfad_vf_s *vf_drv, + struct bfad_vport_s *vp_drv); + +/* + * vport callbacks + */ +void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s); + +/* + * rport callbacks + */ +bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, + struct bfa_fcs_rport_s **rport, + struct bfad_rport_s **rport_drv); + +/* + * itnim callbacks + */ +int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, + struct bfad_itnim_s **itnim_drv); +void bfa_fcb_itnim_free(struct bfad_s *bfad, + struct bfad_itnim_s *itnim_drv); +void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv); +void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv); + +#endif /* __BFA_FCS_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c new file mode 100644 index 000000000..c7de62bae --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c @@ -0,0 +1,833 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * fcpim.c - FCP initiator mode i-t nexus state machine + */ + +#include "bfad_drv.h" +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" +#include "bfad_im.h" + +BFA_TRC_FILE(FCS, FCPIM); + +/* + * forward declarations + */ +static void bfa_fcs_itnim_timeout(void *arg); +static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); +static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_itnim_prli_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, + enum bfa_itnim_aen_event event); + +static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); + +static struct bfa_sm_table_s itnim_sm_table[] = { + {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE}, + {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND}, + {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT}, + {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY}, + {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE}, + {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE}, + {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE}, + {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, +}; + +/* + * fcs_itnim_sm FCS itnim state machine + */ + +static void +bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_FCS_ONLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); + itnim->prli_retries = 0; + bfa_fcs_itnim_send_prli(itnim, NULL); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } + +} + +static void +bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_FRMSENT: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_RSP_OK: + if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + else + bfa_sm_set_state(itnim, + bfa_fcs_itnim_sm_hal_rport_online); + + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); + break; + + case BFA_FCS_ITNIM_SM_RSP_ERROR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); + bfa_timer_start(itnim->fcs->bfa, &itnim->timer, + bfa_fcs_itnim_timeout, itnim, + BFA_FCS_RETRY_TIMEOUT); + break; + + case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_discard(itnim->fcxp); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + bfa_fcxp_discard(itnim->fcxp); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_discard(itnim->fcxp); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_HAL_ONLINE: + if (!itnim->bfa_itnim) + itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa, + itnim->rport->bfa_rport, itnim); + + if (itnim->bfa_itnim) { + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); + bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); + } else { + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE); + } + + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_TIMEOUT: + if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) { + itnim->prli_retries++; + bfa_trc(itnim->fcs, itnim->prli_retries); + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); + bfa_fcs_itnim_send_prli(itnim, NULL); + } else { + /* invoke target offline */ + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); + } + break; + + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_timer_stop(&itnim->timer); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + bfa_timer_stop(&itnim->timer); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_timer_stop(&itnim->timer); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_HCB_ONLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); + bfa_fcb_itnim_online(itnim->itnim_drv); + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); + wwn2str(rpwwn_buf, itnim->rport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Target (WWN = %s) is online for initiator (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); + bfa_itnim_offline(itnim->bfa_itnim); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); + bfa_fcb_itnim_offline(itnim->itnim_drv); + bfa_itnim_offline(itnim->bfa_itnim); + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); + wwn2str(rpwwn_buf, itnim->rport->pwwn); + if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "Target (WWN = %s) connectivity lost for " + "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); + bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); + } else { + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Target (WWN = %s) offlined by initiator (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); + } + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_HCB_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +/* + * This state is set when a discovered rport is also in intiator mode. + * This ITN is marked as no_op and is not active and will not be truned into + * online state. + */ +static void +bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); + break; + + /* + * fcs_online is expected here for well known initiator ports + */ + case BFA_FCS_ITNIM_SM_FCS_ONLINE: + bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); + break; + + case BFA_FCS_ITNIM_SM_RSP_ERROR: + case BFA_FCS_ITNIM_SM_INITIATOR: + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, + enum bfa_itnim_aen_event event) +{ + struct bfa_fcs_rport_s *rport = itnim->rport; + struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + /* Don't post events for well known addresses */ + if (BFA_FCS_PID_IS_WKA(rport->pid)) + return; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id; + aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(itnim->fcs)); + aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port); + aen_entry->aen_data.itnim.rpwwn = rport->pwwn; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq, + BFA_AEN_CAT_ITNIM, event); +} + +static void +bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_itnim_s *itnim = itnim_cbarg; + struct bfa_fcs_rport_s *rport = itnim->rport; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + itnim->stats.fcxp_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, + bfa_fcs_itnim_send_prli, itnim, BFA_TRUE); + return; + } + itnim->fcxp = fcxp; + + len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0); + + bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, + bfa_fcs_itnim_prli_response, (void *)itnim, + FC_MAX_PDUSZ, FC_ELS_TOV); + + itnim->stats.prli_sent++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); +} + +static void +bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; + struct fc_els_cmd_s *els_cmd; + struct fc_prli_s *prli_resp; + struct fc_ls_rjt_s *ls_rjt; + struct fc_prli_params_s *sparams; + + bfa_trc(itnim->fcs, req_status); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + itnim->stats.prli_rsp_err++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + if (els_cmd->els_code == FC_ELS_ACC) { + prli_resp = (struct fc_prli_s *) els_cmd; + + if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) { + bfa_trc(itnim->fcs, rsp_len); + /* + * Check if this r-port is also in Initiator mode. + * If so, we need to set this ITN as a no-op. + */ + if (prli_resp->parampage.servparams.initiator) { + bfa_trc(itnim->fcs, prli_resp->parampage.type); + itnim->rport->scsi_function = + BFA_RPORT_INITIATOR; + itnim->stats.prli_rsp_acc++; + itnim->stats.initiator++; + bfa_sm_send_event(itnim, + BFA_FCS_ITNIM_SM_RSP_OK); + return; + } + + itnim->stats.prli_rsp_parse_err++; + return; + } + itnim->rport->scsi_function = BFA_RPORT_TARGET; + + sparams = &prli_resp->parampage.servparams; + itnim->seq_rec = sparams->retry; + itnim->rec_support = sparams->rec_support; + itnim->task_retry_id = sparams->task_retry_id; + itnim->conf_comp = sparams->confirm; + + itnim->stats.prli_rsp_acc++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); + } else { + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(itnim->fcs, ls_rjt->reason_code); + bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); + + itnim->stats.prli_rsp_rjt++; + if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP); + return; + } + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); + } +} + +static void +bfa_fcs_itnim_timeout(void *arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg; + + itnim->stats.timeout++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); +} + +static void +bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) +{ + if (itnim->bfa_itnim) { + bfa_itnim_delete(itnim->bfa_itnim); + itnim->bfa_itnim = NULL; + } + + bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv); +} + + + +/* + * itnim_public FCS ITNIM public interfaces + */ + +/* + * Called by rport when a new rport is created. + * + * @param[in] rport - remote port. + */ +struct bfa_fcs_itnim_s * +bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfa_fcs_itnim_s *itnim; + struct bfad_itnim_s *itnim_drv; + int ret; + + /* + * call bfad to allocate the itnim + */ + ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); + if (ret) { + bfa_trc(port->fcs, rport->pwwn); + return NULL; + } + + /* + * Initialize itnim + */ + itnim->rport = rport; + itnim->fcs = rport->fcs; + itnim->itnim_drv = itnim_drv; + + itnim->bfa_itnim = NULL; + itnim->seq_rec = BFA_FALSE; + itnim->rec_support = BFA_FALSE; + itnim->conf_comp = BFA_FALSE; + itnim->task_retry_id = BFA_FALSE; + + /* + * Set State machine + */ + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + + return itnim; +} + +/* + * Called by rport to delete the instance of FCPIM. + * + * @param[in] rport - remote port. + */ +void +bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim) +{ + bfa_trc(itnim->fcs, itnim->rport->pid); + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); +} + +/* + * Notification from rport that PLOGI is complete to initiate FC-4 session. + */ +void +bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim) +{ + itnim->stats.onlines++; + + if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE); +} + +/* + * Called by rport to handle a remote device offline. + */ +void +bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim) +{ + itnim->stats.offlines++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); +} + +/* + * Called by rport when remote port is known to be an initiator from + * PRLI received. + */ +void +bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) +{ + bfa_trc(itnim->fcs, itnim->rport->pid); + itnim->stats.initiator++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); +} + +/* + * Called by rport to check if the itnim is online. + */ +bfa_status_t +bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) +{ + bfa_trc(itnim->fcs, itnim->rport->pid); + switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) { + case BFA_ITNIM_ONLINE: + case BFA_ITNIM_INITIATIOR: + return BFA_STATUS_OK; + + default: + return BFA_STATUS_NO_FCPIM_NEXUS; + } +} + +/* + * BFA completion callback for bfa_itnim_online(). + */ +void +bfa_cb_itnim_online(void *cbarg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); +} + +/* + * BFA completion callback for bfa_itnim_offline(). + */ +void +bfa_cb_itnim_offline(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); +} + +/* + * Mark the beginning of PATH TOV handling. IO completion callbacks + * are still pending. + */ +void +bfa_cb_itnim_tov_begin(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); +} + +/* + * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. + */ +void +bfa_cb_itnim_tov(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + struct bfad_itnim_s *itnim_drv = itnim->itnim_drv; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + itnim_drv->state = ITNIM_STATE_TIMEOUT; +} + +/* + * BFA notification to FCS/driver for second level error recovery. + * + * Atleast one I/O request has timedout and target is unresponsive to + * repeated abort requests. Second level error recovery should be initiated + * by starting implicit logout and recovery procedures. + */ +void +bfa_cb_itnim_sler(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + + itnim->stats.sler++; + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); +} + +struct bfa_fcs_itnim_s * +bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_rport_s *rport; + rport = bfa_fcs_rport_lookup(port, rpwwn); + + if (!rport) + return NULL; + + WARN_ON(rport->itnim == NULL); + return rport->itnim; +} + +bfa_status_t +bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_attr_s *attr) +{ + struct bfa_fcs_itnim_s *itnim = NULL; + + itnim = bfa_fcs_itnim_lookup(port, rpwwn); + + if (itnim == NULL) + return BFA_STATUS_NO_FCPIM_NEXUS; + + attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); + attr->retry = itnim->seq_rec; + attr->rec_support = itnim->rec_support; + attr->conf_comp = itnim->conf_comp; + attr->task_retry_id = itnim->task_retry_id; + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_stats_s *stats) +{ + struct bfa_fcs_itnim_s *itnim = NULL; + + WARN_ON(port == NULL); + + itnim = bfa_fcs_itnim_lookup(port, rpwwn); + + if (itnim == NULL) + return BFA_STATUS_NO_FCPIM_NEXUS; + + memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_itnim_s *itnim = NULL; + + WARN_ON(port == NULL); + + itnim = bfa_fcs_itnim_lookup(port, rpwwn); + + if (itnim == NULL) + return BFA_STATUS_NO_FCPIM_NEXUS; + + memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); + return BFA_STATUS_OK; +} + +void +bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, + struct fchs_s *fchs, u16 len) +{ + struct fc_els_cmd_s *els_cmd; + + bfa_trc(itnim->fcs, fchs->type); + + if (fchs->type != FC_TYPE_ELS) + return; + + els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + bfa_trc(itnim->fcs, els_cmd->els_code); + + switch (els_cmd->els_code) { + case FC_ELS_PRLO: + bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id); + break; + + default: + WARN_ON(1); + } +} diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c new file mode 100644 index 000000000..008afd817 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -0,0 +1,6982 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" +#include "bfa_fc.h" + +BFA_TRC_FILE(FCS, PORT); + +/* + * ALPA to LIXA bitmap mapping + * + * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31 + * is for L_bit (login required) and is filled as ALPA 0x00 here. + */ +static const u8 loop_alpa_map[] = { + 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */ + 0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */ + 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */ + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */ + + 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */ + 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */ + 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */ + 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */ + + 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */ + 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */ + 0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */ + 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */ + + 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */ + 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */ + 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */ + 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */ +}; + +static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, u8 reason_code, + u8 reason_code_expl); +static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, struct fc_logi_s *plogi); +static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, + struct fc_echo_s *echo, u16 len); +static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, + struct fc_rnid_cmd_s *rnid, u16 len); +static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, + struct fc_rnid_general_topology_data_s *gen_topo_data); + +static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port); + +static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); + +static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port); + +static struct { + void (*init) (struct bfa_fcs_lport_s *port); + void (*online) (struct bfa_fcs_lport_s *port); + void (*offline) (struct bfa_fcs_lport_s *port); +} __port_action[] = { + [BFA_FCS_FABRIC_UNKNOWN] = { + .init = bfa_fcs_lport_unknown_init, + .online = bfa_fcs_lport_unknown_online, + .offline = bfa_fcs_lport_unknown_offline + }, + [BFA_FCS_FABRIC_SWITCHED] = { + .init = bfa_fcs_lport_fab_init, + .online = bfa_fcs_lport_fab_online, + .offline = bfa_fcs_lport_fab_offline + }, + [BFA_FCS_FABRIC_N2N] = { + .init = bfa_fcs_lport_n2n_init, + .online = bfa_fcs_lport_n2n_online, + .offline = bfa_fcs_lport_n2n_offline + }, + [BFA_FCS_FABRIC_LOOP] = { + .init = bfa_fcs_lport_loop_init, + .online = bfa_fcs_lport_loop_online, + .offline = bfa_fcs_lport_loop_offline + }, +}; + +/* + * fcs_port_sm FCS logical port state machine + */ + +enum bfa_fcs_lport_event { + BFA_FCS_PORT_SM_CREATE = 1, + BFA_FCS_PORT_SM_ONLINE = 2, + BFA_FCS_PORT_SM_OFFLINE = 3, + BFA_FCS_PORT_SM_DELETE = 4, + BFA_FCS_PORT_SM_DELRPORT = 5, + BFA_FCS_PORT_SM_STOP = 6, +}; + +static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); + +static void +bfa_fcs_lport_sm_uninit( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) +{ + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case BFA_FCS_PORT_SM_CREATE: + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) +{ + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case BFA_FCS_PORT_SM_ONLINE: + bfa_sm_set_state(port, bfa_fcs_lport_sm_online); + bfa_fcs_lport_online_actions(port); + break; + + case BFA_FCS_PORT_SM_DELETE: + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); + break; + + case BFA_FCS_PORT_SM_STOP: + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + else + bfa_wc_down(&(port->fabric->stop_wc)); + break; + + case BFA_FCS_PORT_SM_OFFLINE: + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_sm_online( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe, *qen; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case BFA_FCS_PORT_SM_OFFLINE: + bfa_sm_set_state(port, bfa_fcs_lport_sm_offline); + bfa_fcs_lport_offline_actions(port); + break; + + case BFA_FCS_PORT_SM_STOP: + __port_action[port->fabric->fab_type].offline(port); + + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + else + bfa_wc_down(&(port->fabric->stop_wc)); + } else { + bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); + list_for_each_safe(qe, qen, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + } + } + break; + + case BFA_FCS_PORT_SM_DELETE: + + __port_action[port->fabric->fab_type].offline(port); + + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); + } else { + bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); + list_for_each_safe(qe, qen, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + } + } + break; + + case BFA_FCS_PORT_SM_DELRPORT: + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_sm_offline( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe, *qen; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case BFA_FCS_PORT_SM_ONLINE: + bfa_sm_set_state(port, bfa_fcs_lport_sm_online); + bfa_fcs_lport_online_actions(port); + break; + + case BFA_FCS_PORT_SM_STOP: + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + else + bfa_wc_down(&(port->fabric->stop_wc)); + } else { + bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); + list_for_each_safe(qe, qen, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + } + } + break; + + case BFA_FCS_PORT_SM_DELETE: + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); + } else { + bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); + list_for_each_safe(qe, qen, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + } + } + break; + + case BFA_FCS_PORT_SM_DELRPORT: + case BFA_FCS_PORT_SM_OFFLINE: + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) +{ + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case BFA_FCS_PORT_SM_DELRPORT: + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + else + bfa_wc_down(&(port->fabric->stop_wc)); + } + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_sm_deleting( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) +{ + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case BFA_FCS_PORT_SM_DELRPORT: + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); + } + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +/* + * fcs_port_pvt + */ + +/* + * Send AEN notification + */ +static void +bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port, + enum bfa_lport_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.lport.vf_id = port->fabric->vf_id; + aen_entry->aen_data.lport.roles = port->port_cfg.roles; + aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(port->fcs)); + aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port); + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, + BFA_AEN_CAT_LPORT, event); +} + +/* + * Send a LS reject + */ +static void +bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, + u8 reason_code, u8 reason_code_expl) +{ + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + int len; + + bfa_trc(port->fcs, rx_fchs->d_id); + bfa_trc(port->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, reason_code, reason_code_expl); + + bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/* + * Send a FCCT Reject + */ +static void +bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl) +{ + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + int len; + struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1); + struct ct_hdr_s *ct_hdr; + + bfa_trc(port->fcs, rx_fchs->d_id); + bfa_trc(port->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + ct_hdr = bfa_fcxp_get_reqbuf(fcxp); + ct_hdr->gs_type = rx_cthdr->gs_type; + ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type; + + len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id, + bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, reason_code, reason_code_expl); + + bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/* + * Process incoming plogi from a remote port. + */ +static void +bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, struct fc_logi_s *plogi) +{ + struct bfa_fcs_rport_s *rport; + + bfa_trc(port->fcs, rx_fchs->d_id); + bfa_trc(port->fcs, rx_fchs->s_id); + + /* + * If min cfg mode is enabled, drop any incoming PLOGIs + */ + if (__fcs_min_cfg(port->fcs)) { + bfa_trc(port->fcs, rx_fchs->s_id); + return; + } + + if (fc_plogi_parse(rx_fchs) != FC_PARSE_OK) { + bfa_trc(port->fcs, rx_fchs->s_id); + /* + * send a LS reject + */ + bfa_fcs_lport_send_ls_rjt(port, rx_fchs, + FC_LS_RJT_RSN_PROTOCOL_ERROR, + FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS); + return; + } + + /* + * Direct Attach P2P mode : verify address assigned by the r-port. + */ + if ((!bfa_fcs_fabric_is_switched(port->fabric)) && + (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), + (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { + if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) { + /* Address assigned to us cannot be a WKA */ + bfa_fcs_lport_send_ls_rjt(port, rx_fchs, + FC_LS_RJT_RSN_PROTOCOL_ERROR, + FC_LS_RJT_EXP_INVALID_NPORT_ID); + return; + } + port->pid = rx_fchs->d_id; + bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id); + } + + /* + * First, check if we know the device by pwwn. + */ + rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); + if (rport) { + /* + * Direct Attach P2P mode : handle address assigned by r-port. + */ + if ((!bfa_fcs_fabric_is_switched(port->fabric)) && + (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), + (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { + port->pid = rx_fchs->d_id; + bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id); + rport->pid = rx_fchs->s_id; + } + bfa_fcs_rport_plogi(rport, rx_fchs, plogi); + return; + } + + /* + * Next, lookup rport by PID. + */ + rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); + if (!rport) { + /* + * Inbound PLOGI from a new device. + */ + bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); + return; + } + + /* + * Rport is known only by PID. + */ + if (rport->pwwn) { + /* + * This is a different device with the same pid. Old device + * disappeared. Send implicit LOGO to old device. + */ + WARN_ON(rport->pwwn == plogi->port_name); + bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); + + /* + * Inbound PLOGI from a new device (with old PID). + */ + bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); + return; + } + + /* + * PLOGI crossing each other. + */ + WARN_ON(rport->pwwn != WWN_NULL); + bfa_fcs_rport_plogi(rport, rx_fchs, plogi); +} + +/* + * Process incoming ECHO. + * Since it does not require a login, it is processed here. + */ +static void +bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, + struct fc_echo_s *echo, u16 rx_len) +{ + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + int len, pyld_len; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id); + + /* + * Copy the payload (if any) from the echo frame + */ + pyld_len = rx_len - sizeof(struct fchs_s); + bfa_trc(port->fcs, rx_len); + bfa_trc(port->fcs, pyld_len); + + if (pyld_len > len) + memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) + + sizeof(struct fc_echo_s), (echo + 1), + (pyld_len - sizeof(struct fc_echo_s))); + + bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/* + * Process incoming RNID. + * Since it does not require a login, it is processed here. + */ +static void +bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, + struct fc_rnid_cmd_s *rnid, u16 rx_len) +{ + struct fc_rnid_common_id_data_s common_id_data; + struct fc_rnid_general_topology_data_s gen_topo_data; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + u16 len; + u32 data_format; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + bfa_trc(port->fcs, rx_len); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + /* + * Check Node Indentification Data Format + * We only support General Topology Discovery Format. + * For any other requested Data Formats, we return Common Node Id Data + * only, as per FC-LS. + */ + bfa_trc(port->fcs, rnid->node_id_data_format); + if (rnid->node_id_data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { + data_format = RNID_NODEID_DATA_FORMAT_DISCOVERY; + /* + * Get General topology data for this port + */ + bfa_fs_port_get_gen_topo_data(port, &gen_topo_data); + } else { + data_format = RNID_NODEID_DATA_FORMAT_COMMON; + } + + /* + * Copy the Node Id Info + */ + common_id_data.port_name = bfa_fcs_lport_get_pwwn(port); + common_id_data.node_name = bfa_fcs_lport_get_nwwn(port); + + len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, data_format, &common_id_data, + &gen_topo_data); + + bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/* + * Fill out General Topolpgy Discovery Data for RNID ELS. + */ +static void +bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, + struct fc_rnid_general_topology_data_s *gen_topo_data) +{ + memset(gen_topo_data, 0, + sizeof(struct fc_rnid_general_topology_data_s)); + + gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST); + gen_topo_data->phy_port_num = 0; /* @todo */ + gen_topo_data->num_attached_nodes = cpu_to_be32(1); +} + +static void +bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + + bfa_trc(port->fcs, port->fabric->oper_type); + + __port_action[port->fabric->fab_type].init(port); + __port_action[port->fabric->fab_type].online(port); + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + BFA_LOG(KERN_WARNING, bfad, bfa_log_level, + "Logical port online: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE); + + bfad->bfad_flags |= BFAD_PORT_ONLINE; +} + +static void +bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port) +{ + struct list_head *qe, *qen; + struct bfa_fcs_rport_s *rport; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + + bfa_trc(port->fcs, port->fabric->oper_type); + + __port_action[port->fabric->fab_type].offline(port); + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + if (bfa_sm_cmp_state(port->fabric, + bfa_fcs_fabric_sm_online) == BFA_TRUE) { + BFA_LOG(KERN_WARNING, bfad, bfa_log_level, + "Logical port lost fabric connectivity: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT); + } else { + BFA_LOG(KERN_WARNING, bfad, bfa_log_level, + "Logical port taken offline: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE); + } + + list_for_each_safe(qe, qen, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); + } +} + +static void +bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port) +{ + WARN_ON(1); +} + +static void +bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port) +{ + WARN_ON(1); +} + +static void +bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port) +{ + WARN_ON(1); +} + +static void +bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs) +{ + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + + bfa_trc(port->fcs, rx_fchs->d_id); + bfa_trc(port->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, 0); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} +static void +bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Logical port deleted: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE); + + /* Base port will be deleted by the OS driver */ + if (port->vport) + bfa_fcs_vport_delete_comp(port->vport); + else + bfa_wc_down(&port->fabric->wc); +} + + +/* + * Unsolicited frame receive handling. + */ +void +bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, + struct fchs_s *fchs, u16 len) +{ + u32 pid = fchs->s_id; + struct bfa_fcs_rport_s *rport = NULL; + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + bfa_stats(lport, uf_recvs); + bfa_trc(lport->fcs, fchs->type); + + if (!bfa_fcs_lport_is_online(lport)) { + /* + * In direct attach topology, it is possible to get a PLOGI + * before the lport is online due to port feature + * (QoS/Trunk/FEC/CR), so send a rjt + */ + if ((fchs->type == FC_TYPE_ELS) && + (els_cmd->els_code == FC_ELS_PLOGI)) { + bfa_fcs_lport_send_ls_rjt(lport, fchs, + FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, + FC_LS_RJT_EXP_NO_ADDL_INFO); + bfa_stats(lport, plogi_rcvd); + } else + bfa_stats(lport, uf_recv_drops); + + return; + } + + /* + * First, handle ELSs that donot require a login. + */ + /* + * Handle PLOGI first + */ + if ((fchs->type == FC_TYPE_ELS) && + (els_cmd->els_code == FC_ELS_PLOGI)) { + bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd); + return; + } + + /* + * Handle ECHO separately. + */ + if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) { + bfa_fcs_lport_echo(lport, fchs, + (struct fc_echo_s *)els_cmd, len); + return; + } + + /* + * Handle RNID separately. + */ + if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) { + bfa_fcs_lport_rnid(lport, fchs, + (struct fc_rnid_cmd_s *) els_cmd, len); + return; + } + + if (fchs->type == FC_TYPE_BLS) { + if ((fchs->routing == FC_RTG_BASIC_LINK) && + (fchs->cat_info == FC_CAT_ABTS)) + bfa_fcs_lport_abts_acc(lport, fchs); + return; + } + + if (fchs->type == FC_TYPE_SERVICES) { + /* + * Unhandled FC-GS frames. Send a FC-CT Reject + */ + bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP, + CT_NS_EXP_NOADDITIONAL); + return; + } + + /* + * look for a matching remote port ID + */ + rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); + if (rport) { + bfa_trc(rport->fcs, fchs->s_id); + bfa_trc(rport->fcs, fchs->d_id); + bfa_trc(rport->fcs, fchs->type); + + bfa_fcs_rport_uf_recv(rport, fchs, len); + return; + } + + /* + * Only handles ELS frames for now. + */ + if (fchs->type != FC_TYPE_ELS) { + bfa_trc(lport->fcs, fchs->s_id); + bfa_trc(lport->fcs, fchs->d_id); + /* ignore type FC_TYPE_FC_FSS */ + if (fchs->type != FC_TYPE_FC_FSS) + bfa_sm_fault(lport->fcs, fchs->type); + return; + } + + bfa_trc(lport->fcs, els_cmd->els_code); + if (els_cmd->els_code == FC_ELS_RSCN) { + bfa_fcs_lport_scn_process_rscn(lport, fchs, len); + return; + } + + if (els_cmd->els_code == FC_ELS_LOGO) { + /* + * @todo Handle LOGO frames received. + */ + return; + } + + if (els_cmd->els_code == FC_ELS_PRLI) { + /* + * @todo Handle PRLI frames received. + */ + return; + } + + /* + * Unhandled ELS frames. Send a LS_RJT. + */ + bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, + FC_LS_RJT_EXP_NO_ADDL_INFO); + +} + +/* + * PID based Lookup for a R-Port in the Port R-Port Queue + */ +struct bfa_fcs_rport_s * +bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + if (rport->pid == pid) + return rport; + } + + bfa_trc(port->fcs, pid); + return NULL; +} + +/* + * OLD_PID based Lookup for a R-Port in the Port R-Port Queue + */ +struct bfa_fcs_rport_s * +bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + if (rport->old_pid == pid) + return rport; + } + + bfa_trc(port->fcs, pid); + return NULL; +} + +/* + * PWWN based Lookup for a R-Port in the Port R-Port Queue + */ +struct bfa_fcs_rport_s * +bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + if (wwn_is_equal(rport->pwwn, pwwn)) + return rport; + } + + bfa_trc(port->fcs, pwwn); + return NULL; +} + +/* + * NWWN based Lookup for a R-Port in the Port R-Port Queue + */ +struct bfa_fcs_rport_s * +bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + if (wwn_is_equal(rport->nwwn, nwwn)) + return rport; + } + + bfa_trc(port->fcs, nwwn); + return NULL; +} + +/* + * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue + */ +struct bfa_fcs_rport_s * +bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port, + wwn_t pwwn, u32 pid) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid) + return rport; + } + + bfa_trc(port->fcs, pwwn); + return NULL; +} + +/* + * Called by rport module when new rports are discovered. + */ +void +bfa_fcs_lport_add_rport( + struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport) +{ + list_add_tail(&rport->qe, &port->rport_q); + port->num_rports++; +} + +/* + * Called by rport module to when rports are deleted. + */ +void +bfa_fcs_lport_del_rport( + struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport) +{ + WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport)); + list_del(&rport->qe); + port->num_rports--; + + bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); +} + +/* + * Called by fabric for base port when fabric login is complete. + * Called by vport for virtual ports when FDISC is complete. + */ +void +bfa_fcs_lport_online(struct bfa_fcs_lport_s *port) +{ + bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); +} + +/* + * Called by fabric for base port when fabric goes offline. + * Called by vport for virtual ports when virtual port becomes offline. + */ +void +bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port) +{ + bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); +} + +/* + * Called by fabric for base port and by vport for virtual ports + * when target mode driver is unloaded. + */ +void +bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port) +{ + bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP); +} + +/* + * Called by fabric to delete base lport and associated resources. + * + * Called by vport to delete lport and associated resources. Should call + * bfa_fcs_vport_delete_comp() for vports on completion. + */ +void +bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port) +{ + bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); +} + +/* + * Return TRUE if port is online, else return FALSE + */ +bfa_boolean_t +bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port) +{ + return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); +} + +/* + * Attach time initialization of logical ports. + */ +void +bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_fcs_vport_s *vport) +{ + lport->fcs = fcs; + lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); + lport->vport = vport; + lport->lp_tag = (vport) ? vport->lps->bfa_tag : + lport->fabric->lps->bfa_tag; + + INIT_LIST_HEAD(&lport->rport_q); + lport->num_rports = 0; +} + +/* + * Logical port initialization of base or virtual port. + * Called by fabric for base port or by vport for virtual ports. + */ + +void +bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, + struct bfa_lport_cfg_s *port_cfg) +{ + struct bfa_fcs_vport_s *vport = lport->vport; + struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + + lport->port_cfg = *port_cfg; + + lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport, + lport->port_cfg.roles, + lport->fabric->vf_drv, + vport ? vport->vport_drv : NULL); + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport)); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "New logical port created: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW); + + bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit); + bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); +} + +void +bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, + char *symname) +{ + strcpy(port->port_cfg.sym_name.symname, symname); + + if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) + bfa_fcs_lport_ns_util_send_rspn_id( + BFA_FCS_GET_NS_FROM_PORT(port), NULL); +} + +/* + * fcs_lport_api + */ + +void +bfa_fcs_lport_get_attr( + struct bfa_fcs_lport_s *port, + struct bfa_lport_attr_s *port_attr) +{ + if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) + port_attr->pid = port->pid; + else + port_attr->pid = 0; + + port_attr->port_cfg = port->port_cfg; + + if (port->fabric) { + port_attr->port_type = port->fabric->oper_type; + port_attr->loopback = bfa_sm_cmp_state(port->fabric, + bfa_fcs_fabric_sm_loopback); + port_attr->authfail = + bfa_sm_cmp_state(port->fabric, + bfa_fcs_fabric_sm_auth_failed); + port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port); + memcpy(port_attr->fabric_ip_addr, + bfa_fcs_lport_get_fabric_ipaddr(port), + BFA_FCS_FABRIC_IPADDR_SZ); + + if (port->vport != NULL) { + port_attr->port_type = BFA_PORT_TYPE_VPORT; + port_attr->fpma_mac = + port->vport->lps->lp_mac; + } else { + port_attr->fpma_mac = + port->fabric->lps->lp_mac; + } + } else { + port_attr->port_type = BFA_PORT_TYPE_UNKNOWN; + port_attr->state = BFA_LPORT_UNINIT; + } +} + +/* + * bfa_fcs_lport_fab port fab functions + */ + +/* + * Called by port to initialize fabric services of the base port. + */ +static void +bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_ns_init(port); + bfa_fcs_lport_scn_init(port); + bfa_fcs_lport_ms_init(port); +} + +/* + * Called by port to notify transition to online state. + */ +static void +bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_ns_online(port); + bfa_fcs_lport_fab_scn_online(port); +} + +/* + * Called by port to notify transition to offline state. + */ +static void +bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_ns_offline(port); + bfa_fcs_lport_scn_offline(port); + bfa_fcs_lport_ms_offline(port); +} + +/* + * bfa_fcs_lport_n2n functions + */ + +/* + * Called by fcs/port to initialize N2N topology. + */ +static void +bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port) +{ +} + +/* + * Called by fcs/port to notify transition to online state. + */ +static void +bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n; + struct bfa_lport_cfg_s *pcfg = &port->port_cfg; + struct bfa_fcs_rport_s *rport; + + bfa_trc(port->fcs, pcfg->pwwn); + + /* + * If our PWWN is > than that of the r-port, we have to initiate PLOGI + * and assign an Address. if not, we need to wait for its PLOGI. + * + * If our PWWN is < than that of the remote port, it will send a PLOGI + * with the PIDs assigned. The rport state machine take care of this + * incoming PLOGI. + */ + if (memcmp + ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, + sizeof(wwn_t)) > 0) { + port->pid = N2N_LOCAL_PID; + bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID); + /* + * First, check if we know the device by pwwn. + */ + rport = bfa_fcs_lport_get_rport_by_pwwn(port, + n2n_port->rem_port_wwn); + if (rport) { + bfa_trc(port->fcs, rport->pid); + bfa_trc(port->fcs, rport->pwwn); + rport->pid = N2N_REMOTE_PID; + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); + return; + } + + /* + * In n2n there can be only one rport. Delete the old one + * whose pid should be zero, because it is offline. + */ + if (port->num_rports > 0) { + rport = bfa_fcs_lport_get_rport_by_pid(port, 0); + WARN_ON(rport == NULL); + if (rport) { + bfa_trc(port->fcs, rport->pwwn); + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + } + } + bfa_fcs_rport_create(port, N2N_REMOTE_PID); + } +} + +/* + * Called by fcs/port to notify transition to offline state. + */ +static void +bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n; + + bfa_trc(port->fcs, port->pid); + port->pid = 0; + n2n_port->rem_port_wwn = 0; + n2n_port->reply_oxid = 0; +} + +static void +bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port) +{ + int i = 0, j = 0, bit = 0, alpa_bit = 0; + u8 k = 0; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa); + + port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid; + port->pid = fcport->myalpa; + port->pid = bfa_hton3b(port->pid); + + for (i = 0; i < (FC_ALPA_MAX / 8); i++) { + for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) { + bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]); + bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j))); + if (bit) { + port->port_topo.ploop.alpa_pos_map[k] = + loop_alpa_map[(i * 8) + alpa_bit]; + k++; + bfa_trc(port->fcs->bfa, k); + bfa_trc(port->fcs->bfa, + port->port_topo.ploop.alpa_pos_map[k]); + } + } + } + port->port_topo.ploop.num_alpa = k; +} + +/* + * Called by fcs/port to initialize Loop topology. + */ +static void +bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port) +{ +} + +/* + * Called by fcs/port to notify transition to online state. + */ +static void +bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port) +{ + u8 num_alpa = 0, alpabm_valid = 0; + struct bfa_fcs_rport_s *rport; + u8 *alpa_map = NULL; + int i = 0; + u32 pid; + + bfa_fcport_get_loop_attr(port); + + num_alpa = port->port_topo.ploop.num_alpa; + alpabm_valid = port->port_topo.ploop.alpabm_valid; + alpa_map = port->port_topo.ploop.alpa_pos_map; + + bfa_trc(port->fcs->bfa, port->pid); + bfa_trc(port->fcs->bfa, num_alpa); + if (alpabm_valid == 1) { + for (i = 0; i < num_alpa; i++) { + bfa_trc(port->fcs->bfa, alpa_map[i]); + if (alpa_map[i] != bfa_hton3b(port->pid)) { + pid = alpa_map[i]; + bfa_trc(port->fcs->bfa, pid); + rport = bfa_fcs_lport_get_rport_by_pid(port, + bfa_hton3b(pid)); + if (!rport) + rport = bfa_fcs_rport_create(port, + bfa_hton3b(pid)); + } + } + } else { + for (i = 0; i < MAX_ALPA_COUNT; i++) { + if (alpa_map[i] != port->pid) { + pid = loop_alpa_map[i]; + bfa_trc(port->fcs->bfa, pid); + rport = bfa_fcs_lport_get_rport_by_pid(port, + bfa_hton3b(pid)); + if (!rport) + rport = bfa_fcs_rport_create(port, + bfa_hton3b(pid)); + } + } + } +} + +/* + * Called by fcs/port to notify transition to offline state. + */ +static void +bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port) +{ +} + +#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 + +/* + * forward declarations + */ +static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_fdmi_timeout(void *arg); +static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld); +static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld); +static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld); +static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s * + fdmi, u8 *pyld); +static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_hba_attr_s *hba_attr); +static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_port_attr_s *port_attr); +u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed); + +/* + * fcs_fdmi_sm FCS FDMI state machine + */ + +/* + * FDMI State Machine events + */ +enum port_fdmi_event { + FDMISM_EVENT_PORT_ONLINE = 1, + FDMISM_EVENT_PORT_OFFLINE = 2, + FDMISM_EVENT_RSP_OK = 4, + FDMISM_EVENT_RSP_ERROR = 5, + FDMISM_EVENT_TIMEOUT = 6, + FDMISM_EVENT_RHBA_SENT = 7, + FDMISM_EVENT_RPRT_SENT = 8, + FDMISM_EVENT_RPA_SENT = 9, +}; + +static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_sending_rhba( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rhba_retry( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_sending_rprt( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rprt_retry( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_sending_rpa( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rpa_retry( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_disabled( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +/* + * Start in offline state - awaiting MS to send start. + */ +static void +bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + fdmi->retry_cnt = 0; + + switch (event) { + case FDMISM_EVENT_PORT_ONLINE: + if (port->vport) { + /* + * For Vports, register a new port. + */ + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_sending_rprt); + bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL); + } else { + /* + * For a base port, we should first register the HBA + * attribute. The HBA attribute also contains the base + * port registration. + */ + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_sending_rhba); + bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL); + } + break; + + case FDMISM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RHBA_SENT: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->fcxp_wqe); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RSP_ERROR: + /* + * if max retries have not been reached, start timer for a + * delayed retry + */ + if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_rhba_retry); + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->timer, + bfa_fcs_lport_fdmi_timeout, fdmi, + BFA_FCS_RETRY_TIMEOUT); + } else { + /* + * set state to offline + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + } + break; + + case FDMISM_EVENT_RSP_OK: + /* + * Initiate Register Port Attributes + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa); + fdmi->retry_cnt = 0; + bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(fdmi->fcxp); + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba); + bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_timer_stop(&fdmi->timer); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +/* +* RPRT : Register Port + */ +static void +bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RPRT_SENT: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->fcxp_wqe); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RSP_ERROR: + /* + * if max retries have not been reached, start timer for a + * delayed retry + */ + if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_rprt_retry); + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->timer, + bfa_fcs_lport_fdmi_timeout, fdmi, + BFA_FCS_RETRY_TIMEOUT); + + } else { + /* + * set state to offline + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + fdmi->retry_cnt = 0; + } + break; + + case FDMISM_EVENT_RSP_OK: + fdmi->retry_cnt = 0; + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(fdmi->fcxp); + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt); + bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_timer_stop(&fdmi->timer); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +/* + * Register Port Attributes + */ +static void +bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RPA_SENT: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->fcxp_wqe); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RSP_ERROR: + /* + * if max retries have not been reached, start timer for a + * delayed retry + */ + if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry); + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->timer, + bfa_fcs_lport_fdmi_timeout, fdmi, + BFA_FCS_RETRY_TIMEOUT); + } else { + /* + * set state to offline + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + fdmi->retry_cnt = 0; + } + break; + + case FDMISM_EVENT_RSP_OK: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online); + fdmi->retry_cnt = 0; + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(fdmi->fcxp); + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa); + bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_timer_stop(&fdmi->timer); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} +/* + * FDMI is disabled state. + */ +static void +bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + /* No op State. It can only be enabled at Driver Init. */ +} + +/* +* RHBA : Register HBA Attributes. + */ +static void +bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fchs_s fchs; + int len, attr_len; + struct bfa_fcxp_s *fcxp; + u8 *pyld; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, + bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE); + return; + } + fdmi->fcxp = fcxp; + + pyld = bfa_fcxp_get_reqbuf(fcxp); + memset(pyld, 0, FC_MAX_PDUSZ); + + len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), + FDMI_RHBA); + + attr_len = + bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi, + (u8 *) ((struct ct_hdr_s *) pyld + + 1)); + if (attr_len < 0) + return; + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, (len + attr_len), &fchs, + bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT); +} + +static int +bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr; + struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld; + struct fdmi_attr_s *attr; + int len; + u8 *curr_ptr; + u16 templen, count; + + fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL); + if (!fcs_hba_attr) + return -ENOMEM; + + /* + * get hba attributes + */ + bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr); + + rhba->hba_id = bfa_fcs_lport_get_pwwn(port); + rhba->port_list.num_ports = cpu_to_be32(1); + rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port); + + len = sizeof(rhba->hba_id) + sizeof(rhba->port_list); + + count = 0; + len += sizeof(rhba->hba_attr_blk.attr_count); + + /* + * fill out the invididual entries of the HBA attrib Block + */ + curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr; + + /* + * Node Name + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME); + templen = sizeof(wwn_t); + memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * Manufacturer + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); + templen = (u16) strlen(fcs_hba_attr->manufacturer); + memcpy(attr->value, fcs_hba_attr->manufacturer, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * Serial Number + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); + templen = (u16) strlen(fcs_hba_attr->serial_num); + memcpy(attr->value, fcs_hba_attr->serial_num, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * Model + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); + templen = (u16) strlen(fcs_hba_attr->model); + memcpy(attr->value, fcs_hba_attr->model, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * Model Desc + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); + templen = (u16) strlen(fcs_hba_attr->model_desc); + memcpy(attr->value, fcs_hba_attr->model_desc, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * H/W Version + */ + if (fcs_hba_attr->hw_version[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); + templen = (u16) strlen(fcs_hba_attr->hw_version); + memcpy(attr->value, fcs_hba_attr->hw_version, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + + /* + * Driver Version + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); + templen = (u16) strlen(fcs_hba_attr->driver_version); + memcpy(attr->value, fcs_hba_attr->driver_version, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * Option Rom Version + */ + if (fcs_hba_attr->option_rom_ver[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); + templen = (u16) strlen(fcs_hba_attr->option_rom_ver); + memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); + templen = (u16) strlen(fcs_hba_attr->fw_version); + memcpy(attr->value, fcs_hba_attr->fw_version, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * OS Name + */ + if (fcs_hba_attr->os_name[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); + templen = (u16) strlen(fcs_hba_attr->os_name); + memcpy(attr->value, fcs_hba_attr->os_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + + /* + * MAX_CT_PAYLOAD + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); + templen = sizeof(fcs_hba_attr->max_ct_pyld); + memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + /* + * Send extended attributes ( FOS 7.1 support ) + */ + if (fdmi->retry_cnt == 0) { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODE_SYM_NAME); + templen = sizeof(fcs_hba_attr->node_sym_name); + memcpy(attr->value, &fcs_hba_attr->node_sym_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_VENDOR_ID); + templen = sizeof(fcs_hba_attr->vendor_info); + memcpy(attr->value, &fcs_hba_attr->vendor_info, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NUM_PORTS); + templen = sizeof(fcs_hba_attr->num_ports); + memcpy(attr->value, &fcs_hba_attr->num_ports, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FABRIC_NAME); + templen = sizeof(fcs_hba_attr->fabric_name); + memcpy(attr->value, &fcs_hba_attr->fabric_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_BIOS_VER); + templen = sizeof(fcs_hba_attr->bios_ver); + memcpy(attr->value, &fcs_hba_attr->bios_ver, templen); + templen = fc_roundup(attr->len, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + + /* + * Update size of payload + */ + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); + + rhba->hba_attr_blk.attr_count = cpu_to_be32(count); + + kfree(fcs_hba_attr); + + return len; +} + +static void +bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = + (struct bfa_fcs_lport_fdmi_s *) cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); +} + +/* +* RPRT : Register Port + */ +static void +bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fchs_s fchs; + u16 len, attr_len; + struct bfa_fcxp_s *fcxp; + u8 *pyld; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, + bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE); + return; + } + fdmi->fcxp = fcxp; + + pyld = bfa_fcxp_get_reqbuf(fcxp); + memset(pyld, 0, FC_MAX_PDUSZ); + + len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), + FDMI_RPRT); + + attr_len = + bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi, + (u8 *) ((struct ct_hdr_s *) pyld + + 1)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len + attr_len, &fchs, + bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); +} + +/* + * This routine builds Port Attribute Block that used in RPA, RPRT commands. + */ +static u16 +bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld) +{ + struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; + struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld; + struct fdmi_attr_s *attr; + u8 *curr_ptr; + u16 len; + u8 count = 0; + u16 templen; + + /* + * get port attributes + */ + bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); + + len = sizeof(port_attrib->attr_count); + + /* + * fill out the invididual entries + */ + curr_ptr = (u8 *) &port_attrib->port_attr; + + /* + * FC4 Types + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES); + templen = sizeof(fcs_port_attr.supp_fc4_types); + memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = + cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * Supported Speed + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED); + templen = sizeof(fcs_port_attr.supp_speed); + memcpy(attr->value, &fcs_port_attr.supp_speed, templen); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = + cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * current Port Speed + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED); + templen = sizeof(fcs_port_attr.curr_speed); + memcpy(attr->value, &fcs_port_attr.curr_speed, templen); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * max frame size + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE); + templen = sizeof(fcs_port_attr.max_frm_size); + memcpy(attr->value, &fcs_port_attr.max_frm_size, templen); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + /* + * OS Device Name + */ + if (fcs_port_attr.os_device_name[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); + templen = (u16) strlen(fcs_port_attr.os_device_name); + memcpy(attr->value, fcs_port_attr.os_device_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + /* + * Host Name + */ + if (fcs_port_attr.host_name[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); + templen = (u16) strlen(fcs_port_attr.host_name); + memcpy(attr->value, fcs_port_attr.host_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + + if (fdmi->retry_cnt == 0) { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_NODE_NAME); + templen = sizeof(fcs_port_attr.node_name); + memcpy(attr->value, &fcs_port_attr.node_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NAME); + templen = sizeof(fcs_port_attr.port_name); + memcpy(attr->value, &fcs_port_attr.port_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + if (fcs_port_attr.port_sym_name.symname[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = + cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SYM_NAME); + templen = sizeof(fcs_port_attr.port_sym_name); + memcpy(attr->value, + &fcs_port_attr.port_sym_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + + sizeof(attr->type) + sizeof(templen)); + } + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_TYPE); + templen = sizeof(fcs_port_attr.port_type); + memcpy(attr->value, &fcs_port_attr.port_type, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_COS); + templen = sizeof(fcs_port_attr.scos); + memcpy(attr->value, &fcs_port_attr.scos, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FAB_NAME); + templen = sizeof(fcs_port_attr.port_fabric_name); + memcpy(attr->value, &fcs_port_attr.port_fabric_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FC4_TYPE); + templen = sizeof(fcs_port_attr.port_act_fc4_type); + memcpy(attr->value, fcs_port_attr.port_act_fc4_type, + templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_STATE); + templen = sizeof(fcs_port_attr.port_state); + memcpy(attr->value, &fcs_port_attr.port_state, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NUM_RPRT); + templen = sizeof(fcs_port_attr.num_ports); + memcpy(attr->value, &fcs_port_attr.num_ports, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + + /* + * Update size of payload + */ + port_attrib->attr_count = cpu_to_be32(count); + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); + return len; +} + +static u16 +bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld; + u16 len; + + rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs)); + rprt->port_name = bfa_fcs_lport_get_pwwn(port); + + len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi, + (u8 *) &rprt->port_attr_blk); + + len += sizeof(rprt->hba_id) + sizeof(rprt->port_name); + + return len; +} + +static void +bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = + (struct bfa_fcs_lport_fdmi_s *) cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); +} + +/* +* RPA : Register Port Attributes. + */ +static void +bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fchs_s fchs; + u16 len, attr_len; + struct bfa_fcxp_s *fcxp; + u8 *pyld; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, + bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE); + return; + } + fdmi->fcxp = fcxp; + + pyld = bfa_fcxp_get_reqbuf(fcxp); + memset(pyld, 0, FC_MAX_PDUSZ); + + len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), + FDMI_RPA); + + attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, + (u8 *) ((struct ct_hdr_s *) pyld + 1)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len + attr_len, &fchs, + bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT); +} + +static u16 +bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld; + u16 len; + + rpa->port_name = bfa_fcs_lport_get_pwwn(port); + + len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi, + (u8 *) &rpa->port_attr_blk); + + len += sizeof(rpa->port_name); + + return len; +} + +static void +bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = + (struct bfa_fcs_lport_fdmi_s *) cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); +} + +static void +bfa_fcs_lport_fdmi_timeout(void *arg) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg; + + bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); +} + +static void +bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_hba_attr_s *hba_attr) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; + struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; + + memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); + + bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, + hba_attr->manufacturer); + bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc, + hba_attr->serial_num); + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, + hba_attr->model); + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, + hba_attr->model_desc); + bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, + hba_attr->hw_version); + bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc, + hba_attr->option_rom_ver); + bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, + hba_attr->fw_version); + + strscpy(hba_attr->driver_version, (char *)driver_info->version, + sizeof(hba_attr->driver_version)); + + strscpy(hba_attr->os_name, driver_info->host_os_name, + sizeof(hba_attr->os_name)); + + /* + * If there is a patch level, append it + * to the os name along with a separator + */ + if (driver_info->host_os_patch[0] != '\0') { + strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + sizeof(hba_attr->os_name)); + strlcat(hba_attr->os_name, driver_info->host_os_patch, + sizeof(hba_attr->os_name)); + } + + /* Retrieve the max frame size from the port attr */ + bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); + hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size; + + strscpy(hba_attr->node_sym_name.symname, + port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN); + strcpy(hba_attr->vendor_info, "QLogic"); + hba_attr->num_ports = + cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc)); + hba_attr->fabric_name = port->fabric->lps->pr_nwwn; + strscpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN); + +} + +static void +bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_port_attr_s *port_attr) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; + struct bfa_port_attr_s pport_attr; + struct bfa_lport_attr_s lport_attr; + + memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); + + /* + * get pport attributes from hal + */ + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); + + /* + * get FC4 type Bitmask + */ + fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types); + + /* + * Supported Speeds + */ + switch (pport_attr.speed_supported) { + case BFA_PORT_SPEED_16GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G); + break; + + case BFA_PORT_SPEED_10GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G); + break; + + case BFA_PORT_SPEED_8GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G); + break; + + case BFA_PORT_SPEED_4GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G); + break; + + default: + bfa_sm_fault(port->fcs, pport_attr.speed_supported); + } + + /* + * Current Speed + */ + port_attr->curr_speed = cpu_to_be32( + bfa_fcs_fdmi_convert_speed(pport_attr.speed)); + + /* + * Max PDU Size. + */ + port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize); + + /* + * OS device Name + */ + strscpy(port_attr->os_device_name, driver_info->os_device_name, + sizeof(port_attr->os_device_name)); + + /* + * Host name + */ + strscpy(port_attr->host_name, driver_info->host_machine_name, + sizeof(port_attr->host_name)); + + port_attr->node_name = bfa_fcs_lport_get_nwwn(port); + port_attr->port_name = bfa_fcs_lport_get_pwwn(port); + + strscpy(port_attr->port_sym_name.symname, + bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN); + bfa_fcs_lport_get_attr(port, &lport_attr); + port_attr->port_type = cpu_to_be32(lport_attr.port_type); + port_attr->scos = pport_attr.cos_supported; + port_attr->port_fabric_name = port->fabric->lps->pr_nwwn; + fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->port_act_fc4_type); + port_attr->port_state = cpu_to_be32(pport_attr.port_state); + port_attr->num_ports = cpu_to_be32(port->num_rports); +} + +/* + * Convert BFA speed to FDMI format. + */ +u32 +bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed) +{ + u32 ret; + + switch (pport_speed) { + case BFA_PORT_SPEED_1GBPS: + case BFA_PORT_SPEED_2GBPS: + ret = pport_speed; + break; + + case BFA_PORT_SPEED_4GBPS: + ret = FDMI_TRANS_SPEED_4G; + break; + + case BFA_PORT_SPEED_8GBPS: + ret = FDMI_TRANS_SPEED_8G; + break; + + case BFA_PORT_SPEED_10GBPS: + ret = FDMI_TRANS_SPEED_10G; + break; + + case BFA_PORT_SPEED_16GBPS: + ret = FDMI_TRANS_SPEED_16G; + break; + + default: + ret = FDMI_TRANS_SPEED_UNKNOWN; + } + return ret; +} + +void +bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; + + fdmi->ms = ms; + if (ms->port->fcs->fdmi_enabled) + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + else + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled); +} + +void +bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; + + fdmi->ms = ms; + bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE); +} + +void +bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; + + fdmi->ms = ms; + bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE); +} + +#define BFA_FCS_MS_CMD_MAX_RETRIES 2 + +/* + * forward declarations + */ +static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ms_timeout(void *arg); +static void bfa_fcs_lport_ms_plogi_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); + +static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ms_gmal_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ms_gfn_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +/* + * fcs_ms_sm FCS MS state machine + */ + +/* + * MS State Machine events + */ +enum port_ms_event { + MSSM_EVENT_PORT_ONLINE = 1, + MSSM_EVENT_PORT_OFFLINE = 2, + MSSM_EVENT_RSP_OK = 3, + MSSM_EVENT_RSP_ERROR = 4, + MSSM_EVENT_TIMEOUT = 5, + MSSM_EVENT_FCXP_SENT = 6, + MSSM_EVENT_PORT_FABRIC_RSCN = 7 +}; + +static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +/* + * Start in offline state - awaiting NS to send start. + */ +static void +bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_PORT_ONLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending); + bfa_fcs_lport_ms_send_plogi(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_FCXP_SENT: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->fcxp_wqe); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry); + ms->port->stats.ms_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->timer, bfa_fcs_lport_ms_timeout, ms, + BFA_FCS_RETRY_TIMEOUT); + break; + + case MSSM_EVENT_RSP_OK: + /* + * since plogi is done, now invoke MS related sub-modules + */ + bfa_fcs_lport_fdmi_online(ms); + + /* + * if this is a Vport, go to online state. + */ + if (ms->port->vport) { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); + break; + } + + /* + * For a base port we need to get the + * switch's IP address. + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending); + bfa_fcs_lport_ms_send_gmal(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_discard(ms->fcxp); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending); + bfa_fcs_lport_ms_send_plogi(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_timer_stop(&ms->timer); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + break; + + case MSSM_EVENT_PORT_FABRIC_RSCN: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + ms->retry_cnt = 0; + bfa_fcs_lport_ms_send_gfn(ms, NULL); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_FCXP_SENT: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->fcxp_wqe); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry); + ms->port->stats.ms_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->timer, bfa_fcs_lport_ms_timeout, ms, + BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + bfa_fcs_lport_ms_send_gfn(ms, NULL); + ms->retry_cnt = 0; + } + break; + + case MSSM_EVENT_RSP_OK: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + bfa_fcs_lport_ms_send_gfn(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_discard(ms->fcxp); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending); + bfa_fcs_lport_ms_send_gmal(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_timer_stop(&ms->timer); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} +/* + * ms_pvt MS local functions + */ + +static void +bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ms_s *ms = ms_cbarg; + bfa_fcs_lport_t *port = ms->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, + bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE); + return; + } + ms->fcxp = fcxp; + + len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), + port->fabric->lps->pr_nwwn); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ms_gmal_response, (void *)ms, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; + bfa_fcs_lport_t *port = ms->port; + struct ct_hdr_s *cthdr = NULL; + struct fcgs_gmal_resp_s *gmal_resp; + struct fcgs_gmal_entry_s *gmal_entry; + u32 num_entries; + u8 *rsp_str; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1); + + num_entries = be32_to_cpu(gmal_resp->ms_len); + if (num_entries == 0) { + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + /* + * The response could contain multiple Entries. + * Entries for SNMP interface, etc. + * We look for the entry with a telnet prefix. + * First "http://" entry refers to IP addr + */ + + gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma; + while (num_entries > 0) { + if (strncmp(gmal_entry->prefix, + CT_GMAL_RESP_PREFIX_HTTP, + sizeof(gmal_entry->prefix)) == 0) { + + /* + * if the IP address is terminating with a '/', + * remove it. + * Byte 0 consists of the length of the string. + */ + rsp_str = &(gmal_entry->prefix[0]); + if (rsp_str[gmal_entry->len-1] == '/') + rsp_str[gmal_entry->len-1] = 0; + + /* copy IP Address to fabric */ + strscpy(bfa_fcs_lport_get_fabric_ipaddr(port), + gmal_entry->ip_addr, + BFA_FCS_FABRIC_IPADDR_SZ); + break; + } else { + --num_entries; + ++gmal_entry; + } + } + + bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); +} + +static void +bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_FCXP_SENT: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->fcxp_wqe); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry); + ms->port->stats.ms_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->timer, bfa_fcs_lport_ms_timeout, ms, + BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); + ms->retry_cnt = 0; + } + break; + + case MSSM_EVENT_RSP_OK: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_discard(ms->fcxp); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + bfa_fcs_lport_ms_send_gfn(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_timer_stop(&ms->timer); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} +/* + * ms_pvt MS local functions + */ + +static void +bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ms_s *ms = ms_cbarg; + bfa_fcs_lport_t *port = ms->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, + bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE); + return; + } + ms->fcxp = fcxp; + + len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), + port->fabric->lps->pr_nwwn); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ms_gfn_response, (void *)ms, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; + bfa_fcs_lport_t *port = ms->port; + struct ct_hdr_s *cthdr = NULL; + wwn_t *gfn_resp; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + gfn_resp = (wwn_t *)(cthdr + 1); + /* check if it has actually changed */ + if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port), + gfn_resp, sizeof(wwn_t)) != 0)) { + bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp); + } + bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); +} + +/* + * ms_pvt MS local functions + */ + +static void +bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ms_s *ms = ms_cbarg; + struct bfa_fcs_lport_s *port = ms->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ms_plogi_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, + bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE); + return; + } + ms->fcxp = fcxp; + + len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_hton3b(FC_MGMT_SERVER), + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ms_plogi_response, (void *)ms, + FC_MAX_PDUSZ, FC_ELS_TOV); + + port->stats.ms_plogi_sent++; + bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; + struct bfa_fcs_lport_s *port = ms->port; + struct fc_els_cmd_s *els_cmd; + struct fc_ls_rjt_s *ls_rjt; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + port->stats.ms_plogi_rsp_err++; + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + switch (els_cmd->els_code) { + + case FC_ELS_ACC: + if (rsp_len < sizeof(struct fc_logi_s)) { + bfa_trc(port->fcs, rsp_len); + port->stats.ms_plogi_acc_err++; + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + break; + } + port->stats.ms_plogi_accepts++; + bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); + break; + + case FC_ELS_LS_RJT: + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(port->fcs, ls_rjt->reason_code); + bfa_trc(port->fcs, ls_rjt->reason_code_expl); + + port->stats.ms_rejects++; + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + break; + + default: + port->stats.ms_plogi_unknown_rsp++; + bfa_trc(port->fcs, els_cmd->els_code); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + } +} + +static void +bfa_fcs_lport_ms_timeout(void *arg) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg; + + ms->port->stats.ms_timeouts++; + bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT); +} + + +void +bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + ms->port = port; + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + + /* + * Invoke init routines of sub modules. + */ + bfa_fcs_lport_fdmi_init(ms); +} + +void +bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + ms->port = port; + bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); + bfa_fcs_lport_fdmi_offline(ms); +} + +void +bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + ms->port = port; + bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE); +} +void +bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + /* todo. Handle this only when in Online state */ + if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online)) + bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); +} + +/* + * @page ns_sm_info VPORT NS State Machine + * + * @section ns_sm_interactions VPORT NS State Machine Interactions + * + * @section ns_sm VPORT NS State Machine + * img ns_sm.jpg + */ + +/* + * forward declarations + */ +static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_timeout(void *arg); +static void bfa_fcs_lport_ns_plogi_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_process_gidft_pids( + struct bfa_fcs_lport_s *port, + u32 *pid_buf, u32 n_pids); + +static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); +/* + * fcs_ns_sm FCS nameserver interface state machine + */ + +/* + * VPort NS State Machine events + */ +enum vport_ns_event { + NSSM_EVENT_PORT_ONLINE = 1, + NSSM_EVENT_PORT_OFFLINE = 2, + NSSM_EVENT_PLOGI_SENT = 3, + NSSM_EVENT_RSP_OK = 4, + NSSM_EVENT_RSP_ERROR = 5, + NSSM_EVENT_TIMEOUT = 6, + NSSM_EVENT_NS_QUERY = 7, + NSSM_EVENT_RSPNID_SENT = 8, + NSSM_EVENT_RFTID_SENT = 9, + NSSM_EVENT_RFFID_SENT = 10, + NSSM_EVENT_GIDFT_SENT = 11, + NSSM_EVENT_RNNID_SENT = 12, + NSSM_EVENT_RSNN_NN_SENT = 13, +}; + +static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rspn_id( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rft_id( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rff_id( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_gid_ft( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rnn_id( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rsnn_nn( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rsnn_nn_retry( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +/* + * Start in offline state - awaiting linkup + */ +static void +bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_PORT_ONLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending); + bfa_fcs_lport_ns_send_plogi(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_PLOGI_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id); + ns->num_rnnid_retries = 0; + bfa_fcs_lport_ns_send_rnn_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending); + bfa_fcs_lport_ns_send_plogi(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RNNID_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn); + ns->num_rnnid_retries = 0; + ns->num_rsnn_nn_retries = 0; + bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL); + break; + + case NSSM_EVENT_RSP_ERROR: + if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) { + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry); + ns->port->stats.ns_retries++; + ns->num_rnnid_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_sm_set_state(ns, + bfa_fcs_lport_ns_sm_sending_rspn_id); + bfa_fcs_lport_ns_send_rspn_id(ns, NULL); + } + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(ns->fcxp); + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id); + bfa_fcs_lport_ns_send_rnn_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSNN_NN_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); + ns->num_rsnn_nn_retries = 0; + bfa_fcs_lport_ns_send_rspn_id(ns, NULL); + break; + + case NSSM_EVENT_RSP_ERROR: + if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) { + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry); + ns->port->stats.ns_retries++; + ns->num_rsnn_nn_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, + ns, BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_sm_set_state(ns, + bfa_fcs_lport_ns_sm_sending_rspn_id); + bfa_fcs_lport_ns_send_rspn_id(ns, NULL); + } + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn); + bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSPNID_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id); + bfa_fcs_lport_ns_send_rft_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(ns->fcxp); + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); + bfa_fcs_lport_ns_send_rspn_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RFTID_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + /* Now move to register FC4 Features */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id); + bfa_fcs_lport_ns_send_rff_id(ns, NULL); + break; + + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id); + bfa_fcs_lport_ns_send_rft_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RFFID_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + + /* + * If min cfg mode is enabled, we donot initiate rport + * discovery with the fabric. Instead, we will retrieve the + * boot targets from HAL/FW. + */ + if (__fcs_min_cfg(ns->port->fcs)) { + bfa_fcs_lport_ns_boot_target_disc(ns->port); + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online); + return; + } + + /* + * If the port role is Initiator Mode issue NS query. + * If it is Target Mode, skip this and go to online. + */ + if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { + bfa_sm_set_state(ns, + bfa_fcs_lport_ns_sm_sending_gid_ft); + bfa_fcs_lport_ns_send_gid_ft(ns, NULL); + } + /* + * kick off mgmt srvr state machine + */ + bfa_fcs_lport_ms_online(ns->port); + break; + + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id); + bfa_fcs_lport_ns_send_rff_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} +static void +bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_GIDFT_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online); + break; + + case NSSM_EVENT_RSP_ERROR: + /* + * TBD: for certain reject codes, we don't need to retry + */ + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + case NSSM_EVENT_NS_QUERY: + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft); + bfa_fcs_lport_ns_send_gid_ft(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + break; + + case NSSM_EVENT_NS_QUERY: + /* + * If the port role is Initiator Mode issue NS query. + * If it is Target Mode, skip this and go to online. + */ + if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { + bfa_sm_set_state(ns, + bfa_fcs_lport_ns_sm_sending_gid_ft); + bfa_fcs_lport_ns_send_gid_ft(ns, NULL); + } + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + + + +/* + * ns_pvt Nameserver local functions + */ + +static void +bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ns_plogi_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE); + return; + } + ns->fcxp = fcxp; + + len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_hton3b(FC_NAME_SERVER), + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_plogi_response, (void *)ns, + FC_MAX_PDUSZ, FC_ELS_TOV); + port->stats.ns_plogi_sent++; + + bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT); +} + +static void +bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + /* struct fc_logi_s *plogi_resp; */ + struct fc_els_cmd_s *els_cmd; + struct fc_ls_rjt_s *ls_rjt; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_plogi_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + switch (els_cmd->els_code) { + + case FC_ELS_ACC: + if (rsp_len < sizeof(struct fc_logi_s)) { + bfa_trc(port->fcs, rsp_len); + port->stats.ns_plogi_acc_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + break; + } + port->stats.ns_plogi_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + break; + + case FC_ELS_LS_RJT: + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(port->fcs, ls_rjt->reason_code); + bfa_trc(port->fcs, ls_rjt->reason_code_expl); + + port->stats.ns_rejects++; + + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + break; + + default: + port->stats.ns_plogi_unknown_rsp++; + bfa_trc(port->fcs, els_cmd->els_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + } +} + +/* + * Register node name for port_id + */ +static void +bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ns_rnnid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE); + return; + } + + ns->fcxp = fcxp; + + len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), + bfa_fcs_lport_get_fcid(port), + bfa_fcs_lport_get_nwwn(port)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rnn_id_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rnnid_sent++; + bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT); +} + +static void +bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) + +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rnnid_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rnnid_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rnnid_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} + +/* + * Register the symbolic node name for a given node name. + */ +static void +bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + u8 *nsymbl; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ns_rsnn_nn_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE); + return; + } + ns->fcxp = fcxp; + + nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name( + bfa_fcs_get_base_port(port->fcs))); + + len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), + bfa_fcs_lport_get_nwwn(port), nsymbl); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rsnn_nn_sent++; + + bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT); +} + +static void +bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rsnn_nn_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rsnn_nn_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rsnn_nn_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} + +/* + * Register the symbolic port name. + */ +static void +bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + u8 symbl[256]; + u8 *psymbl = &symbl[0]; + + memset(symbl, 0, sizeof(symbl)); + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ns_rspnid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE); + return; + } + ns->fcxp = fcxp; + + /* + * for V-Port, form a Port Symbolic Name + */ + if (port->vport) { + /* + * For Vports, we append the vport's port symbolic name + * to that of the base port. + */ + + strscpy(symbl, + (char *)&(bfa_fcs_lport_get_psym_name + (bfa_fcs_get_base_port(port->fcs))), + sizeof(symbl)); + + strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)), + sizeof(symbl)); + } else { + psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port)); + } + + len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, psymbl); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rspn_id_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rspnid_sent++; + + bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT); +} + +static void +bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rspnid_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rspnid_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rspnid_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} + +/* + * Register FC4-Types + */ +static void +bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ns_rftid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE); + return; + } + ns->fcxp = fcxp; + + len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rft_id_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rftid_sent++; + bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT); +} + +static void +bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rftid_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rftid_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rftid_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} + +/* + * Register FC4-Features : Should be done after RFT_ID + */ +static void +bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + u8 fc4_ftrs = 0; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ns_rffid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE); + return; + } + ns->fcxp = fcxp; + + if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) + fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; + + len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, + FC_TYPE_FCP, fc4_ftrs); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rff_id_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rffid_sent++; + bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT); +} + +static void +bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rffid_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rffid_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rffid_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + + if (cthdr->reason_code == CT_RSN_NOT_SUPP) { + /* if this command is not supported, we don't retry */ + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + } else + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} +/* + * Query Fabric for FC4-Types Devices. + * +* TBD : Need to use a local (FCS private) response buffer, since the response + * can be larger than 2K. + */ +static void +bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + port->stats.ns_gidft_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE); + return; + } + ns->fcxp = fcxp; + + /* + * This query is only initiated for FCP initiator mode. + */ + len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + ns->port->pid, FC_TYPE_FCP); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_gid_ft_response, (void *)ns, + bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV); + + port->stats.ns_gidft_sent++; + + bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT); +} + +static void +bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + u32 n_pids; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_gidft_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + if (resid_len != 0) { + /* + * TBD : we will need to allocate a larger buffer & retry the + * command + */ + bfa_trc(port->fcs, rsp_len); + bfa_trc(port->fcs, resid_len); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + switch (cthdr->cmd_rsp_code) { + + case CT_RSP_ACCEPT: + + port->stats.ns_gidft_accepts++; + n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32)); + bfa_trc(port->fcs, n_pids); + bfa_fcs_lport_ns_process_gidft_pids(port, + (u32 *) (cthdr + 1), + n_pids); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + break; + + case CT_RSP_REJECT: + + /* + * Check the reason code & explanation. + * There may not have been any FC4 devices in the fabric + */ + port->stats.ns_gidft_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + + if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF) + && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) { + + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + } else { + /* + * for all other errors, retry + */ + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + } + break; + + default: + port->stats.ns_gidft_unknown_rsp++; + bfa_trc(port->fcs, cthdr->cmd_rsp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + } +} + +/* + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] port - pointer to bfa_fcs_lport_t. + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_lport_ns_timeout(void *arg) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg; + + ns->port->stats.ns_timeouts++; + bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT); +} + +/* + * Process the PID list in GID_FT response + */ +static void +bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf, + u32 n_pids) +{ + struct fcgs_gidft_resp_s *gidft_entry; + struct bfa_fcs_rport_s *rport; + u32 ii; + struct bfa_fcs_fabric_s *fabric = port->fabric; + struct bfa_fcs_vport_s *vport; + struct list_head *qe; + u8 found = 0; + + for (ii = 0; ii < n_pids; ii++) { + gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii]; + + if (gidft_entry->pid == port->pid) + continue; + + /* + * Ignore PID if it is of base port + * (Avoid vports discovering base port as remote port) + */ + if (gidft_entry->pid == fabric->bport.pid) + continue; + + /* + * Ignore PID if it is of vport created on the same base port + * (Avoid vport discovering every other vport created on the + * same port as remote port) + */ + list_for_each(qe, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + if (vport->lport.pid == gidft_entry->pid) + found = 1; + } + + if (found) { + found = 0; + continue; + } + + /* + * Check if this rport already exists + */ + rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid); + if (rport == NULL) { + /* + * this is a new device. create rport + */ + rport = bfa_fcs_rport_create(port, gidft_entry->pid); + } else { + /* + * this rport already exists + */ + bfa_fcs_rport_scn(rport); + } + + bfa_trc(port->fcs, gidft_entry->pid); + + /* + * if the last entry bit is set, bail out. + */ + if (gidft_entry->last) + return; + } +} + +/* + * fcs_ns_public FCS nameserver public interfaces + */ + +/* + * Functions called by port/fab. + * These will send relevant Events to the ns state machine. + */ +void +bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + ns->port = port; + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); +} + +void +bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + ns->port = port; + bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE); +} + +void +bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + ns->port = port; + bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE); +} + +void +bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + bfa_trc(port->fcs, port->pid); + if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online)) + bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); +} + +static void +bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) +{ + + struct bfa_fcs_rport_s *rport; + u8 nwwns; + wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; + int ii; + + bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns); + + for (ii = 0 ; ii < nwwns; ++ii) { + rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); + WARN_ON(!rport); + } +} + +void +bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + u8 symbl[256]; + int len; + + /* Avoid sending RSPN in the following states. */ + if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) || + bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || + bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) || + bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) || + bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry)) + return; + + memset(symbl, 0, sizeof(symbl)); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) { + port->stats.ns_rspnid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE); + return; + } + + ns->fcxp = fcxp; + + if (port->vport) { + /* + * For Vports, we append the vport's port symbolic name + * to that of the base port. + */ + strscpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name + (bfa_fcs_get_base_port(port->fcs))), + sizeof(symbl)); + + strlcat(symbl, + (char *)&(bfa_fcs_lport_get_psym_name(port)), + sizeof(symbl)); + } + + len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, symbl); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); + + port->stats.ns_rspnid_sent++; +} + +/* + * FCS SCN + */ + +#define FC_QOS_RSCN_EVENT 0x0c +#define FC_FABRIC_NAME_RSCN_EVENT 0x0d + +/* + * forward declarations + */ +static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_scn_scr_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs); +static void bfa_fcs_lport_scn_timeout(void *arg); + +/* + * fcs_scm_sm FCS SCN state machine + */ + +/* + * VPort SCN State Machine events + */ +enum port_scn_event { + SCNSM_EVENT_PORT_ONLINE = 1, + SCNSM_EVENT_PORT_OFFLINE = 2, + SCNSM_EVENT_RSP_OK = 3, + SCNSM_EVENT_RSP_ERROR = 4, + SCNSM_EVENT_TIMEOUT = 5, + SCNSM_EVENT_SCR_SENT = 6, +}; + +static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_sending_scr( + struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); + +/* + * Starting state - awaiting link up. + */ +static void +bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_PORT_ONLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr); + bfa_fcs_lport_scn_send_scr(scn, NULL); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_SCR_SENT: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe); + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + struct bfa_fcs_lport_s *port = scn->port; + + switch (event) { + case SCNSM_EVENT_RSP_OK: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online); + break; + + case SCNSM_EVENT_RSP_ERROR: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry); + bfa_timer_start(port->fcs->bfa, &scn->timer, + bfa_fcs_lport_scn_timeout, scn, + BFA_FCS_RETRY_TIMEOUT); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + bfa_fcxp_discard(scn->fcxp); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_TIMEOUT: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr); + bfa_fcs_lport_scn_send_scr(scn, NULL); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + bfa_timer_stop(&scn->timer); + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + + + +/* + * fcs_scn_private FCS SCN private functions + */ + +/* + * This routine will be called to send a SCR command. + */ +static void +bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_scn_s *scn = scn_cbarg; + struct bfa_fcs_lport_s *port = scn->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe, + bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE); + return; + } + scn->fcxp = fcxp; + + /* Handle VU registrations for Base port only */ + if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { + len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + port->fabric->lps->brcd_switch, + port->pid, 0); + } else { + len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + BFA_FALSE, + port->pid, 0); + } + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_scn_scr_response, + (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV); + + bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT); +} + +static void +bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg; + struct bfa_fcs_lport_s *port = scn->port; + struct fc_els_cmd_s *els_cmd; + struct fc_ls_rjt_s *ls_rjt; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + switch (els_cmd->els_code) { + + case FC_ELS_ACC: + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK); + break; + + case FC_ELS_LS_RJT: + + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(port->fcs, ls_rjt->reason_code); + bfa_trc(port->fcs, ls_rjt->reason_code_expl); + + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); + break; + + default: + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); + } +} + +/* + * Send a LS Accept + */ +static void +bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs) +{ + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + int len; + + bfa_trc(port->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id); + + bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/* + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] vport - pointer to bfa_fcs_lport_t. + * param[out] vport_status - pointer to return vport status in + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_lport_scn_timeout(void *arg) +{ + struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg; + + bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT); +} + + + +/* + * fcs_scn_public FCS state change notification public interfaces + */ + +/* + * Functions called by port/fab + */ +void +bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); + + scn->port = port; + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); +} + +void +bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); + + scn->port = port; + bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE); +} + +void +bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); + + scn->port = port; + bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE); +} + +static void +bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid) +{ + struct bfa_fcs_rport_s *rport; + struct bfa_fcs_fabric_s *fabric = port->fabric; + struct bfa_fcs_vport_s *vport; + struct list_head *qe; + + bfa_trc(port->fcs, rpid); + + /* + * Ignore PID if it is of base port or of vports created on the + * same base port. It is to avoid vports discovering base port or + * other vports created on same base port as remote port + */ + if (rpid == fabric->bport.pid) + return; + + list_for_each(qe, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + if (vport->lport.pid == rpid) + return; + } + /* + * If this is an unknown device, then it just came online. + * Otherwise let rport handle the RSCN event. + */ + rport = bfa_fcs_lport_get_rport_by_pid(port, rpid); + if (!rport) + rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid); + + if (rport == NULL) { + /* + * If min cfg mode is enabled, we donot need to + * discover any new rports. + */ + if (!__fcs_min_cfg(port->fcs)) + rport = bfa_fcs_rport_create(port, rpid); + } else + bfa_fcs_rport_scn(rport); +} + +/* + * rscn format based PID comparison + */ +#define __fc_pid_match(__c0, __c1, __fmt) \ + (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \ + (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \ + ((__c0)[0] == (__c1)[0])) || \ + (((__fmt) == FC_RSCN_FORMAT_AREA) && \ + ((__c0)[0] == (__c1)[0]) && \ + ((__c0)[1] == (__c1)[1]))) + +static void +bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port, + enum fc_rscn_format format, + u32 rscn_pid) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe, *qe_next; + u8 *c0, *c1; + + bfa_trc(port->fcs, format); + bfa_trc(port->fcs, rscn_pid); + + c0 = (u8 *) &rscn_pid; + + list_for_each_safe(qe, qe_next, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + c1 = (u8 *) &rport->pid; + if (__fc_pid_match(c0, c1, format)) + bfa_fcs_rport_scn(rport); + } +} + + +void +bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, + struct fchs_s *fchs, u32 len) +{ + struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1); + int num_entries; + u32 rscn_pid; + bfa_boolean_t nsquery = BFA_FALSE, found; + int i = 0, j; + + num_entries = + (be16_to_cpu(rscn->payldlen) - + sizeof(u32)) / sizeof(rscn->event[0]); + + bfa_trc(port->fcs, num_entries); + + port->stats.num_rscn++; + + bfa_fcs_lport_scn_send_ls_acc(port, fchs); + + for (i = 0; i < num_entries; i++) { + rscn_pid = rscn->event[i].portid; + + bfa_trc(port->fcs, rscn->event[i].format); + bfa_trc(port->fcs, rscn_pid); + + /* check for duplicate entries in the list */ + found = BFA_FALSE; + for (j = 0; j < i; j++) { + if (rscn->event[j].portid == rscn_pid) { + found = BFA_TRUE; + break; + } + } + + /* if found in down the list, pid has been already processed */ + if (found) { + bfa_trc(port->fcs, rscn_pid); + continue; + } + + switch (rscn->event[i].format) { + case FC_RSCN_FORMAT_PORTID: + if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) { + /* + * Ignore this event. + * f/w would have processed it + */ + bfa_trc(port->fcs, rscn_pid); + } else { + port->stats.num_portid_rscn++; + bfa_fcs_lport_scn_portid_rscn(port, rscn_pid); + } + break; + + case FC_RSCN_FORMAT_FABRIC: + if (rscn->event[i].qualifier == + FC_FABRIC_NAME_RSCN_EVENT) { + bfa_fcs_lport_ms_fabric_rscn(port); + break; + } + fallthrough; + + case FC_RSCN_FORMAT_AREA: + case FC_RSCN_FORMAT_DOMAIN: + nsquery = BFA_TRUE; + bfa_fcs_lport_scn_multiport_rscn(port, + rscn->event[i].format, + rscn_pid); + break; + + + default: + WARN_ON(1); + nsquery = BFA_TRUE; + } + } + + /* + * If any of area, domain or fabric RSCN is received, do a fresh + * discovery to find new devices. + */ + if (nsquery) + bfa_fcs_lport_ns_query(port); +} + +/* + * BFA FCS port + */ +/* + * fcs_port_api BFA FCS port API + */ +struct bfa_fcs_lport_s * +bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) +{ + return &fcs->fabric.bport; +} + +wwn_t +bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, + int nrports, bfa_boolean_t bwwn) +{ + struct list_head *qh, *qe; + struct bfa_fcs_rport_s *rport = NULL; + int i; + struct bfa_fcs_s *fcs; + + if (port == NULL || nrports == 0) + return (wwn_t) 0; + + fcs = port->fcs; + bfa_trc(fcs, (u32) nrports); + + i = 0; + qh = &port->rport_q; + qe = bfa_q_first(qh); + + while ((qe != qh) && (i < nrports)) { + rport = (struct bfa_fcs_rport_s *) qe; + if (bfa_ntoh3b(rport->pid) > 0xFFF000) { + qe = bfa_q_next(qe); + bfa_trc(fcs, (u32) rport->pwwn); + bfa_trc(fcs, rport->pid); + bfa_trc(fcs, i); + continue; + } + + if (bwwn) { + if (!memcmp(&wwn, &rport->pwwn, 8)) + break; + } else { + if (i == index) + break; + } + + i++; + qe = bfa_q_next(qe); + } + + bfa_trc(fcs, i); + if (rport) + return rport->pwwn; + else + return (wwn_t) 0; +} + +void +bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, + struct bfa_rport_qualifier_s rports[], int *nrports) +{ + struct list_head *qh, *qe; + struct bfa_fcs_rport_s *rport = NULL; + int i; + struct bfa_fcs_s *fcs; + + if (port == NULL || rports == NULL || *nrports == 0) + return; + + fcs = port->fcs; + bfa_trc(fcs, (u32) *nrports); + + i = 0; + qh = &port->rport_q; + qe = bfa_q_first(qh); + + while ((qe != qh) && (i < *nrports)) { + rport = (struct bfa_fcs_rport_s *) qe; + if (bfa_ntoh3b(rport->pid) > 0xFFF000) { + qe = bfa_q_next(qe); + bfa_trc(fcs, (u32) rport->pwwn); + bfa_trc(fcs, rport->pid); + bfa_trc(fcs, i); + continue; + } + + if (!rport->pwwn && !rport->pid) { + qe = bfa_q_next(qe); + continue; + } + + rports[i].pwwn = rport->pwwn; + rports[i].pid = rport->pid; + + i++; + qe = bfa_q_next(qe); + } + + bfa_trc(fcs, i); + *nrports = i; +} + +/* + * Iterate's through all the rport's in the given port to + * determine the maximum operating speed. + * + * !!!! To be used in TRL Functionality only !!!! + */ +bfa_port_speed_t +bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port) +{ + struct list_head *qh, *qe; + struct bfa_fcs_rport_s *rport = NULL; + struct bfa_fcs_s *fcs; + bfa_port_speed_t max_speed = 0; + struct bfa_port_attr_s port_attr; + bfa_port_speed_t port_speed, rport_speed; + bfa_boolean_t trl_enabled; + + if (port == NULL) + return 0; + + fcs = port->fcs; + trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa); + + /* Get Physical port's current speed */ + bfa_fcport_get_attr(port->fcs->bfa, &port_attr); + port_speed = port_attr.speed; + bfa_trc(fcs, port_speed); + + qh = &port->rport_q; + qe = bfa_q_first(qh); + + while (qe != qh) { + rport = (struct bfa_fcs_rport_s *) qe; + if ((bfa_ntoh3b(rport->pid) > 0xFFF000) || + (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) || + (rport->scsi_function != BFA_RPORT_TARGET)) { + qe = bfa_q_next(qe); + continue; + } + + rport_speed = rport->rpf.rpsc_speed; + if ((trl_enabled) && (rport_speed == + BFA_PORT_SPEED_UNKNOWN)) { + /* Use default ratelim speed setting */ + rport_speed = + bfa_fcport_get_ratelim_speed(port->fcs->bfa); + } + + if (rport_speed > max_speed) + max_speed = rport_speed; + + qe = bfa_q_next(qe); + } + + if (max_speed > port_speed) + max_speed = port_speed; + + bfa_trc(fcs, max_speed); + return max_speed; +} + +struct bfa_fcs_lport_s * +bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) +{ + struct bfa_fcs_vport_s *vport; + bfa_fcs_vf_t *vf; + + WARN_ON(fcs == NULL); + + vf = bfa_fcs_vf_lookup(fcs, vf_id); + if (vf == NULL) { + bfa_trc(fcs, vf_id); + return NULL; + } + + if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) + return &vf->bport; + + vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); + if (vport) + return &vport->lport; + + return NULL; +} + +/* + * API corresponding to NPIV_VPORT_GETINFO. + */ +void +bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, + struct bfa_lport_info_s *port_info) +{ + + bfa_trc(port->fcs, port->fabric->fabric_name); + + if (port->vport == NULL) { + /* + * This is a Physical port + */ + port_info->port_type = BFA_LPORT_TYPE_PHYSICAL; + + /* + * @todo : need to fix the state & reason + */ + port_info->port_state = 0; + port_info->offline_reason = 0; + + port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); + port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); + + port_info->max_vports_supp = + bfa_lps_get_max_vport(port->fcs->bfa); + port_info->num_vports_inuse = + port->fabric->num_vports; + port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; + port_info->num_rports_inuse = port->num_rports; + } else { + /* + * This is a virtual port + */ + port_info->port_type = BFA_LPORT_TYPE_VIRTUAL; + + /* + * @todo : need to fix the state & reason + */ + port_info->port_state = 0; + port_info->offline_reason = 0; + + port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); + port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); + } +} + +void +bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, + struct bfa_lport_stats_s *port_stats) +{ + *port_stats = fcs_port->stats; +} + +void +bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) +{ + memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); +} + +/* + * Let new loop map create missing rports + */ +void +bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_loop_online(port); +} + +/* + * FCS virtual port state machine + */ + +#define __vport_fcs(__vp) ((__vp)->lport.fcs) +#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn) +#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn) +#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa) +#define __vport_fcid(__vp) ((__vp)->lport.pid) +#define __vport_fabric(__vp) ((__vp)->lport.fabric) +#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id) + +#define BFA_FCS_VPORT_MAX_RETRIES 5 +/* + * Forward declarations + */ +static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport); +static void bfa_fcs_vport_timeout(void *vport_arg); +static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); +static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); + +/* + * fcs_vport_sm FCS virtual port state machine + */ + +/* + * VPort State Machine events + */ +enum bfa_fcs_vport_event { + BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */ + BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */ + BFA_FCS_VPORT_SM_START = 3, /* vport start request */ + BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */ + BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */ + BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */ + BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */ + BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */ + BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */ + BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */ + BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */ + BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/ + BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */ + BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */ + BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */ +}; + +static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); + +static struct bfa_sm_table_s vport_sm_table[] = { + {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT}, + {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED}, + {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE}, + {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC}, + {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY}, + {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT}, + {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE}, + {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING}, + {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP}, + {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO}, + {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} +}; + +/* + * Beginning state. + */ +static void +bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_CREATE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); + bfa_fcs_fabric_addvport(__vport_fabric(vport), vport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * Created state - a start event is required to start up the state machine. + */ +static void +bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_START: + if (bfa_sm_cmp_state(__vport_fabric(vport), + bfa_fcs_fabric_sm_online) + && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) { + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); + bfa_fcs_vport_do_fdisc(vport); + } else { + /* + * Fabric is offline or not NPIV capable, stay in + * offline state. + */ + vport->vport_stats.fab_no_npiv++; + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + } + break; + + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_ONLINE: + case BFA_FCS_VPORT_SM_OFFLINE: + /* + * Ignore ONLINE/OFFLINE events from fabric + * till vport is started. + */ + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * Offline state - awaiting ONLINE event from fabric SM. + */ +static void +bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_ONLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); + vport->fdisc_retries = 0; + bfa_fcs_vport_do_fdisc(vport); + break; + + case BFA_FCS_VPORT_SM_STOP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + /* + * This can happen if the vport couldn't be initialzied + * due the fact that the npiv was not enabled on the switch. + * In that case we will put the vport in offline state. + * However, the link can go down and cause the this event to + * be sent when we are already offline. Ignore it. + */ + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + + +/* + * FDISC is sent and awaiting reply from fabric. + */ +static void +bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); + break; + + case BFA_FCS_VPORT_SM_RSP_OK: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_online); + bfa_fcs_lport_online(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_RSP_ERROR: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry); + bfa_timer_start(__vport_bfa(vport), &vport->timer, + bfa_fcs_vport_timeout, vport, + BFA_FCS_RETRY_TIMEOUT); + break; + + case BFA_FCS_VPORT_SM_RSP_FAILED: + case BFA_FCS_VPORT_SM_FABRIC_MAX: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + break; + + case BFA_FCS_VPORT_SM_RSP_DUP_WWN: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_error); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * FDISC attempt failed - a timer is active to retry FDISC. + */ +static void +bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_timer_stop(&vport->timer); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + bfa_timer_stop(&vport->timer); + break; + + case BFA_FCS_VPORT_SM_TIMEOUT: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); + vport->vport_stats.fdisc_retries++; + vport->fdisc_retries++; + bfa_fcs_vport_do_fdisc(vport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * FDISC is in progress and we got a vport delete request - + * this is a wait state while we wait for fdisc response and + * we will transition to the appropriate state - on rsp status. + */ +static void +bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_RSP_OK: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_DELETE: + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + case BFA_FCS_VPORT_SM_RSP_ERROR: + case BFA_FCS_VPORT_SM_RSP_FAILED: + case BFA_FCS_VPORT_SM_FABRIC_MAX: + case BFA_FCS_VPORT_SM_RSP_DUP_WWN: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_lport_delete(&vport->lport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * Vport is online (FDISC is complete). + */ +static void +bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_STOP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping); + bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_lport_offline(&vport->lport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * Vport is being stopped - awaiting lport stop completion to send + * LOGO to fabric. + */ +static void +bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_STOPCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop); + bfa_fcs_vport_do_logo(vport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * Vport is being deleted - awaiting lport delete completion to send + * LOGO to fabric. + */ +static void +bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + break; + + case BFA_FCS_VPORT_SM_DELCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo); + bfa_fcs_vport_do_logo(vport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * Error State. + * This state will be set when the Vport Creation fails due + * to errors like Dup WWN. In this state only operation allowed + * is a Vport Delete. + */ +static void +bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_fcs_lport_delete(&vport->lport); + break; + + default: + bfa_trc(__vport_fcs(vport), event); + } +} + +/* + * Lport cleanup is in progress since vport is being deleted. Fabric is + * offline, so no LOGO is needed to complete vport deletion. + */ +static void +bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); + bfa_fcs_vport_free(vport); + break; + + case BFA_FCS_VPORT_SM_STOPCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); + break; + + case BFA_FCS_VPORT_SM_DELETE: + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup + * is done. + */ +static void +bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); + fallthrough; + + case BFA_FCS_VPORT_SM_RSP_OK: + case BFA_FCS_VPORT_SM_RSP_ERROR: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* + * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup + * is done. + */ +static void +bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); + fallthrough; + + case BFA_FCS_VPORT_SM_RSP_OK: + case BFA_FCS_VPORT_SM_RSP_ERROR: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); + bfa_fcs_vport_free(vport); + break; + + case BFA_FCS_VPORT_SM_DELETE: + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + + + +/* + * fcs_vport_private FCS virtual port private functions + */ +/* + * Send AEN notification + */ +static void +bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port, + enum bfa_lport_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.lport.vf_id = port->fabric->vf_id; + aen_entry->aen_data.lport.roles = port->port_cfg.roles; + aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(port->fcs)); + aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port); + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, + BFA_AEN_CAT_LPORT, event); +} + +/* + * This routine will be called to send a FDISC command. + */ +static void +bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) +{ + bfa_lps_fdisc(vport->lps, vport, + bfa_fcport_get_maxfrsize(__vport_bfa(vport)), + __vport_pwwn(vport), __vport_nwwn(vport)); + vport->vport_stats.fdisc_sent++; +} + +static void +bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) +{ + u8 lsrjt_rsn = vport->lps->lsrjt_rsn; + u8 lsrjt_expl = vport->lps->lsrjt_expl; + + bfa_trc(__vport_fcs(vport), lsrjt_rsn); + bfa_trc(__vport_fcs(vport), lsrjt_expl); + + /* For certain reason codes, we don't want to retry. */ + switch (vport->lps->lsrjt_expl) { + case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */ + case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else { + bfa_fcs_vport_aen_post(&vport->lport, + BFA_LPORT_AEN_NPIV_DUP_WWN); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); + } + break; + + case FC_LS_RJT_EXP_INSUFF_RES: + /* + * This means max logins per port/switch setting on the + * switch was exceeded. + */ + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else { + bfa_fcs_vport_aen_post(&vport->lport, + BFA_LPORT_AEN_NPIV_FABRIC_MAX); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_FABRIC_MAX); + } + break; + + default: + if (vport->fdisc_retries == 0) + bfa_fcs_vport_aen_post(&vport->lport, + BFA_LPORT_AEN_NPIV_UNKNOWN); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + } +} + +/* + * Called to send a logout to the fabric. Used when a V-Port is + * deleted/stopped. + */ +static void +bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + + vport->vport_stats.logo_sent++; + bfa_lps_fdisclogo(vport->lps); +} + + +/* + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] vport - pointer to bfa_fcs_vport_t. + * param[out] vport_status - pointer to return vport status in + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_vport_timeout(void *vport_arg) +{ + struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg; + + vport->vport_stats.fdisc_timeouts++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT); +} + +static void +bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) +{ + struct bfad_vport_s *vport_drv = + (struct bfad_vport_s *)vport->vport_drv; + + bfa_fcs_fabric_delvport(__vport_fabric(vport), vport); + bfa_lps_delete(vport->lps); + + if (vport_drv->comp_del) { + complete(vport_drv->comp_del); + return; + } + + /* + * We queue the vport delete work to the IM work_q from here. + * The memory for the bfad_vport_s is freed from the FC function + * template vport_delete entry point. + */ + bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port); +} + +/* + * fcs_vport_public FCS virtual port public interfaces + */ + +/* + * Online notification from fabric SM. + */ +void +bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport) +{ + vport->vport_stats.fab_online++; + if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); + else + vport->vport_stats.fab_no_npiv++; +} + +/* + * Offline notification from fabric SM. + */ +void +bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) +{ + vport->vport_stats.fab_offline++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); +} + +/* + * Cleanup notification from fabric SM on link timer expiry. + */ +void +bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) +{ + vport->vport_stats.fab_cleanup++; +} + +/* + * Stop notification from fabric SM. To be invoked from within FCS. + */ +void +bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP); +} + +/* + * delete notification from fabric SM. To be invoked from within FCS. + */ +void +bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); +} + +/* + * Stop completion callback from associated lport + */ +void +bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP); +} + +/* + * Delete completion callback from associated lport + */ +void +bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP); +} + + + +/* + * fcs_vport_api Virtual port API + */ + +/* + * Use this function to instantiate a new FCS vport object. This + * function will not trigger any HW initialization process (which will be + * done in vport_start() call) + * + * param[in] vport - pointer to bfa_fcs_vport_t. This space + * needs to be allocated by the driver. + * param[in] fcs - FCS instance + * param[in] vport_cfg - vport configuration + * param[in] vf_id - VF_ID if vport is created within a VF. + * FC_VF_ID_NULL to specify base fabric. + * param[in] vport_drv - Opaque handle back to the driver's vport + * structure + * + * retval BFA_STATUS_OK - on success. + * retval BFA_STATUS_FAILED - on failure. + */ +bfa_status_t +bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_lport_cfg_s *vport_cfg, + struct bfad_vport_s *vport_drv) +{ + if (vport_cfg->pwwn == 0) + return BFA_STATUS_INVALID_WWN; + + if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) + return BFA_STATUS_VPORT_WWN_BP; + + if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) + return BFA_STATUS_VPORT_EXISTS; + + if (fcs->fabric.num_vports == + bfa_lps_get_max_vport(fcs->bfa)) + return BFA_STATUS_VPORT_MAX; + + vport->lps = bfa_lps_alloc(fcs->bfa); + if (!vport->lps) + return BFA_STATUS_VPORT_MAX; + + vport->vport_drv = vport_drv; + vport_cfg->preboot_vp = BFA_FALSE; + + bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); + bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport); + bfa_fcs_lport_init(&vport->lport, vport_cfg); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); + + return BFA_STATUS_OK; +} + +/* + * Use this function to instantiate a new FCS PBC vport object. This + * function will not trigger any HW initialization process (which will be + * done in vport_start() call) + * + * param[in] vport - pointer to bfa_fcs_vport_t. This space + * needs to be allocated by the driver. + * param[in] fcs - FCS instance + * param[in] vport_cfg - vport configuration + * param[in] vf_id - VF_ID if vport is created within a VF. + * FC_VF_ID_NULL to specify base fabric. + * param[in] vport_drv - Opaque handle back to the driver's vport + * structure + * + * retval BFA_STATUS_OK - on success. + * retval BFA_STATUS_FAILED - on failure. + */ +bfa_status_t +bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_lport_cfg_s *vport_cfg, + struct bfad_vport_s *vport_drv) +{ + bfa_status_t rc; + + rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv); + vport->lport.port_cfg.preboot_vp = BFA_TRUE; + + return rc; +} + +/* + * Use this function to findout if this is a pbc vport or not. + * + * @param[in] vport - pointer to bfa_fcs_vport_t. + * + * @returns None + */ +bfa_boolean_t +bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) +{ + + if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE)) + return BFA_TRUE; + else + return BFA_FALSE; + +} + +/* + * Use this function initialize the vport. + * + * @param[in] vport - pointer to bfa_fcs_vport_t. + * + * @returns None + */ +bfa_status_t +bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START); + + return BFA_STATUS_OK; +} + +/* + * Use this function quiese the vport object. This function will return + * immediately, when the vport is actually stopped, the + * bfa_drv_vport_stop_cb() will be called. + * + * param[in] vport - pointer to bfa_fcs_vport_t. + * + * return None + */ +bfa_status_t +bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP); + + return BFA_STATUS_OK; +} + +/* + * Use this function to delete a vport object. Fabric object should + * be stopped before this function call. + * + * !!!!!!! Donot invoke this from within FCS !!!!!!! + * + * param[in] vport - pointer to bfa_fcs_vport_t. + * + * return None + */ +bfa_status_t +bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport) +{ + + if (vport->lport.port_cfg.preboot_vp) + return BFA_STATUS_PBC; + + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); + + return BFA_STATUS_OK; +} + +/* + * Use this function to get vport's current status info. + * + * param[in] vport pointer to bfa_fcs_vport_t. + * param[out] attr pointer to return vport attributes + * + * return None + */ +void +bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, + struct bfa_vport_attr_s *attr) +{ + if (vport == NULL || attr == NULL) + return; + + memset(attr, 0, sizeof(struct bfa_vport_attr_s)); + + bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr); + attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); +} + + +/* + * Lookup a virtual port. Excludes base port from lookup. + */ +struct bfa_fcs_vport_s * +bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn) +{ + struct bfa_fcs_vport_s *vport; + struct bfa_fcs_fabric_s *fabric; + + bfa_trc(fcs, vf_id); + bfa_trc(fcs, vpwwn); + + fabric = bfa_fcs_vf_lookup(fcs, vf_id); + if (!fabric) { + bfa_trc(fcs, vf_id); + return NULL; + } + + vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn); + return vport; +} + +/* + * FDISC Response + */ +void +bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) +{ + struct bfa_fcs_vport_s *vport = uarg; + + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), status); + + switch (status) { + case BFA_STATUS_OK: + /* + * Initialize the V-Port fields + */ + __vport_fcid(vport) = vport->lps->lp_pid; + vport->vport_stats.fdisc_accepts++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); + break; + + case BFA_STATUS_INVALID_MAC: + /* Only for CNA */ + vport->vport_stats.fdisc_acc_bad++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + + break; + + case BFA_STATUS_EPROTOCOL: + switch (vport->lps->ext_status) { + case BFA_EPROTO_BAD_ACCEPT: + vport->vport_stats.fdisc_acc_bad++; + break; + + case BFA_EPROTO_UNKNOWN_RSP: + vport->vport_stats.fdisc_unknown_rsp++; + break; + + default: + break; + } + + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); + + break; + + case BFA_STATUS_ETIMER: + vport->vport_stats.fdisc_timeouts++; + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); + break; + + case BFA_STATUS_FABRIC_RJT: + vport->vport_stats.fdisc_rejects++; + bfa_fcs_vport_fdisc_rejected(vport); + break; + + default: + vport->vport_stats.fdisc_rsp_err++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + } +} + +/* + * LOGO response + */ +void +bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg) +{ + struct bfa_fcs_vport_s *vport = uarg; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); +} + +/* + * Received clear virtual link + */ +void +bfa_cb_lps_cvl_event(void *bfad, void *uarg) +{ + struct bfa_fcs_vport_s *vport = uarg; + + /* Send an Offline followed by an ONLINE */ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); +} diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c new file mode 100644 index 000000000..c21aa37b8 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -0,0 +1,3449 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * rport.c Remote port implementation. + */ + +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" + +BFA_TRC_FILE(FCS, RPORT); + +static u32 +bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; + /* In millisecs */ +/* + * bfa_fcs_rport_max_logins is max count of bfa_fcs_rports + * whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports + */ +static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS; + +/* + * forward declarations + */ +static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc( + struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid); +static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, + struct fc_logi_s *plogi); +static void bfa_fcs_rport_timeout(void *arg); +static void bfa_fcs_rport_send_plogi(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_plogi_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_send_adisc(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_adisc_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_gidpn_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_gpnid_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_send_logo(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg); +static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len); +static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u8 reason_code, + u8 reason_code_expl); +static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len); +static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport); + +static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_adisc_online_sending( + struct bfa_fcs_rport_s *rport, enum rport_event event); +static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s + *rport, enum rport_event event); +static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport, + enum rport_event event); + +static struct bfa_sm_table_s rport_sm_table[] = { + {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, + {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI}, + {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE}, + {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY}, + {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI}, + {BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE}, + {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE}, + {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, + {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, + {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, + {BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, + {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, + {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, + {BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE}, + {BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV}, + {BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO}, + {BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO}, + {BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE}, + {BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC}, + {BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC}, + {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, +}; + +/* + * Beginning state. + */ +static void +bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_PLOGI_SEND: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcs_rport_hal_online(rport); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_ADDRESS_DISC: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * PLOGI is being sent. + */ +static void +bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_FAB_SCN: + /* query the NS */ + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * PLOGI is being sent. + */ +static void +bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + bfa_fcs_rport_fcs_online_action(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_PLOGI_COMP: + case RPSM_EVENT_FAB_SCN: + /* + * Ignore, SCN is possibly online notification. + */ + break; + + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_HCB_OFFLINE: + /* + * Ignore BFA callback, on a PLOGI receive we call bfa offline. + */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * PLOGI is sent. + */ +static void +bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_TIMEOUT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + bfa_fcs_rport_send_plogi(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_LOGO_RCVD: + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_stop(&rport->timer); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_FAB_SCN: + bfa_timer_stop(&rport->timer); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_stop(&rport->timer); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_fcs_online_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * PLOGI is sent. + */ +static void +bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + rport->plogi_retries = 0; + bfa_fcs_rport_fcs_online_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_fcs_rport_send_logo_acc(rport); + fallthrough; + case RPSM_EVENT_PRLO_RCVD: + if (rport->prlo == BFA_TRUE) + bfa_fcs_rport_send_prlo_acc(rport); + + bfa_fcxp_discard(rport->fcxp); + fallthrough; + case RPSM_EVENT_FAILED: + if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { + rport->plogi_retries++; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_stats(rport->port, rport_del_max_plogi_retry); + rport->old_pid = rport->pid; + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } + break; + + case RPSM_EVENT_SCN_ONLINE: + break; + + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_RETRY: + rport->plogi_retries = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + (FC_RA_TOV * 1000)); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_FAB_SCN: + bfa_fcxp_discard(rport->fcxp); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_fcs_online_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function + */ +static void +bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_FCS_ONLINE: + if (rport->scsi_function == BFA_RPORT_INITIATOR) { + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_online(rport); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); + break; + } + + if (!rport->bfa_rport) + rport->bfa_rport = + bfa_rport_create(rport->fcs->bfa, rport); + + if (rport->bfa_rport) { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcs_rport_hal_online(rport); + } else { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcs_rport_fcs_offline_action(rport); + } + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + rport->plogi_pending = BFA_TRUE; + bfa_fcs_rport_fcs_offline_action(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_fcs_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcs_rport_fcs_offline_action(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcs_rport_fcs_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + break; + } +} + +/* + * PLOGI is complete. Awaiting BFA rport online callback. FC-4s + * are offline. + */ +static void +bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_ONLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); + bfa_fcs_rport_hal_online_action(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + break; + + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_LOGO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcs_rport_fcs_offline_action(rport); + break; + + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_fcs_offline_action(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + rport->plogi_pending = BFA_TRUE; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_fcs_offline_action(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcs_rport_fcs_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport is ONLINE. FC-4s active. + */ +static void +bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FAB_SCN: + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsquery_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_adisc_online_sending); + bfa_fcs_rport_send_adisc(rport, NULL); + } + break; + + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_PLOGI_COMP: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * An SCN event is received in ONLINE state. NS query is being sent + * prior to ADISC authentication with rport. FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_FAB_SCN: + /* + * ignore SCN, wait for response to query itself + */ + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * An SCN event is received in ONLINE state. NS query is sent to rport. + * FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending); + bfa_fcs_rport_send_adisc(rport, NULL); + break; + + case RPSM_EVENT_FAILED: + rport->ns_retries++; + if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsquery_sending); + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_hal_offline_action(rport); + } + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_FAB_SCN: + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * An SCN event is received in ONLINE state. ADISC is being sent for + * authenticating with rport. FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_FAB_SCN: + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * An SCN event is received in ONLINE state. ADISC is to rport. + * FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); + break; + + case RPSM_EVENT_PLOGI_RCVD: + /* + * Too complex to cleanup FC-4 & rport and then acc to PLOGI. + * At least go offline when a PLOGI is received. + */ + bfa_fcxp_discard(rport->fcxp); + fallthrough; + + case RPSM_EVENT_FAILED: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_FAB_SCN: + /* + * already processing RSCN + */ + break; + + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * ADISC is being sent for authenticating with rport + * Already did offline actions. + */ +static void +bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline); + break; + + case RPSM_EVENT_DELETE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, + &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * ADISC to rport + * Already did offline actions + */ +static void +bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcs_rport_hal_online(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_FAILED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_DELETE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport has sent LOGO. Awaiting FC-4 offline completion callback. + */ +static void +bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); + bfa_fcs_rport_hal_offline(rport); + break; + + case RPSM_EVENT_DELETE: + if (rport->pid && (rport->prlo == BFA_TRUE)) + bfa_fcs_rport_send_prlo_acc(rport); + if (rport->pid && (rport->prlo == BFA_FALSE)) + bfa_fcs_rport_send_logo_acc(rport); + + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); + break; + + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_HCB_ONLINE: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_ADDRESS_CHANGE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * LOGO needs to be sent to rport. Awaiting FC-4 offline completion + * callback. + */ +static void +bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); + bfa_fcs_rport_hal_offline(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_fcs_rport_send_logo_acc(rport); + fallthrough; + case RPSM_EVENT_PRLO_RCVD: + if (rport->prlo == BFA_TRUE) + bfa_fcs_rport_send_prlo_acc(rport); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); + break; + + case RPSM_EVENT_HCB_ONLINE: + case RPSM_EVENT_DELETE: + /* Rport is being deleted */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport is going offline. Awaiting FC-4 offline completion callback. + */ +static void +bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); + bfa_fcs_rport_hal_offline(rport); + break; + + case RPSM_EVENT_SCN_ONLINE: + break; + case RPSM_EVENT_LOGO_RCVD: + /* + * Rport is going offline. Just ack the logo + */ + bfa_fcs_rport_send_logo_acc(rport); + break; + + case RPSM_EVENT_PRLO_RCVD: + bfa_fcs_rport_send_prlo_acc(rport); + break; + + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_HCB_ONLINE: + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + /* + * rport is already going offline. + * SCN - ignore and wait till transitioning to offline state + */ + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport is offline. FC-4s are offline. Awaiting BFA rport offline + * callback. + */ +static void +bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_OFFLINE: + if (bfa_fcs_lport_is_online(rport->port) && + (rport->plogi_pending)) { + rport->plogi_pending = BFA_FALSE; + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + } + fallthrough; + + case RPSM_EVENT_ADDRESS_CHANGE: + if (!bfa_fcs_lport_is_online(rport->port)) { + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + } + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else if (bfa_fcport_get_topology(rport->port->fcs->bfa) == + BFA_PORT_TOPOLOGY_LOOP) { + if (rport->scn_online) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_adisc_offline_sending); + bfa_fcs_rport_send_adisc(rport, NULL); + } else { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } + } else { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + } + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_LOGO_IMP: + /* + * Ignore, already offline. + */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport is offline. FC-4s are offline. Awaiting BFA rport offline + * callback to send LOGO accept. + */ +static void +bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_OFFLINE: + case RPSM_EVENT_ADDRESS_CHANGE: + if (rport->pid && (rport->prlo == BFA_TRUE)) + bfa_fcs_rport_send_prlo_acc(rport); + if (rport->pid && (rport->prlo == BFA_FALSE)) + bfa_fcs_rport_send_logo_acc(rport); + /* + * If the lport is online and if the rport is not a well + * known address port, + * we try to re-discover the r-port. + */ + if (bfa_fcs_lport_is_online(rport->port) && + (!BFA_FCS_PID_IS_WKA(rport->pid))) { + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + /* For N2N Direct Attach, try to re-login */ + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + } + } else { + /* + * if it is not a well known address, reset the + * pid to 0. + */ + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); + if (rport->pid && (rport->prlo == BFA_TRUE)) + bfa_fcs_rport_send_prlo_acc(rport); + if (rport->pid && (rport->prlo == BFA_FALSE)) + bfa_fcs_rport_send_logo_acc(rport); + break; + + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); + break; + + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + /* + * Ignore - already processing a LOGO. + */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport is being deleted. FC-4s are offline. + * Awaiting BFA rport offline + * callback to send LOGO. + */ +static void +bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending); + bfa_fcs_rport_send_logo(rport, NULL); + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_fcs_rport_send_logo_acc(rport); + fallthrough; + case RPSM_EVENT_PRLO_RCVD: + if (rport->prlo == BFA_TRUE) + bfa_fcs_rport_send_prlo_acc(rport); + + bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); + break; + + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_ADDRESS_CHANGE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport is being deleted. FC-4s are offline. LOGO is being sent. + */ +static void +bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + /* Once LOGO is sent, we donot wait for the response */ + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_ADDRESS_CHANGE: + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_fcs_rport_send_logo_acc(rport); + fallthrough; + case RPSM_EVENT_PRLO_RCVD: + if (rport->prlo == BFA_TRUE) + bfa_fcs_rport_send_prlo_acc(rport); + + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport is offline. FC-4s are offline. BFA rport is offline. + * Timer active to delete stale rport. + */ +static void +bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_TIMEOUT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_timer_stop(&rport->timer); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_SCN_OFFLINE: + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_fcs_online_action(rport); + break; + + case RPSM_EVENT_SCN_ONLINE: + bfa_timer_stop(&rport->timer); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + bfa_fcs_rport_send_plogi(rport, NULL); + break; + + case RPSM_EVENT_PLOGI_SEND: + bfa_timer_stop(&rport->timer); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport address has changed. Nameserver discovery request is being sent. + */ +static void +bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_PLOGI_SEND: + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + rport->ns_retries = 0; /* reset the retry count */ + break; + + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_fcs_online_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Nameserver discovery failed. Waiting for timeout to retry. + */ +static void +bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_TIMEOUT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + bfa_timer_stop(&rport->timer); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_stop(&rport->timer); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_fcs_rport_send_logo_acc(rport); + break; + case RPSM_EVENT_PRLO_RCVD: + bfa_fcs_rport_send_prlo_acc(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_fcs_online_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport address has changed. Nameserver discovery request is sent. + */ +static void +bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + case RPSM_EVENT_ADDRESS_CHANGE: + if (rport->pid) { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + bfa_fcs_rport_send_plogi(rport, NULL); + } else { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } + break; + + case RPSM_EVENT_FAILED: + rport->ns_retries++; + if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + rport->old_pid = rport->pid; + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + + case RPSM_EVENT_PRLO_RCVD: + bfa_fcs_rport_send_prlo_acc(rport); + break; + case RPSM_EVENT_FAB_SCN: + /* + * ignore, wait for NS query response + */ + break; + + case RPSM_EVENT_LOGO_RCVD: + /* + * Not logged-in yet. Accept LOGO. + */ + bfa_fcs_rport_send_logo_acc(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_fcs_online_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport needs to be deleted + * waiting for ITNIM clean up to finish + */ +static void +bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); + bfa_fcs_rport_hal_offline(rport); + break; + + case RPSM_EVENT_DELETE: + case RPSM_EVENT_PLOGI_RCVD: + /* Ignore these events */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + break; + } +} + +/* + * RPort needs to be deleted + * waiting for BFA/FW to finish current processing + */ +static void +bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_DELETE: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_PLOGI_RCVD: + /* Ignore these events */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * fcs_rport_private FCS RPORT provate functions + */ + +static void +bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_plogi, rport, BFA_TRUE); + return; + } + rport->fcxp = fcxp; + + len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, + (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV); + + rport->stats.plogis++; + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct fc_logi_s *plogi_rsp; + struct fc_ls_rjt_s *ls_rjt; + struct bfa_fcs_rport_s *twin; + struct list_head *qe; + + bfa_trc(rport->fcs, rport->pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(rport->fcs, req_status); + rport->stats.plogi_failed++; + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + return; + } + + plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); + + /* + * Check for failure first. + */ + if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(rport->fcs, ls_rjt->reason_code); + bfa_trc(rport->fcs, ls_rjt->reason_code_expl); + + if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) && + (ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) { + rport->stats.rjt_insuff_res++; + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY); + return; + } + + rport->stats.plogi_rejects++; + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + return; + } + + /* + * PLOGI is complete. Make sure this device is not one of the known + * device with a new FC port address. + */ + list_for_each(qe, &rport->port->rport_q) { + twin = (struct bfa_fcs_rport_s *) qe; + if (twin == rport) + continue; + if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) { + bfa_trc(rport->fcs, twin->pid); + bfa_trc(rport->fcs, rport->pid); + + /* Update plogi stats in twin */ + twin->stats.plogis += rport->stats.plogis; + twin->stats.plogi_rejects += + rport->stats.plogi_rejects; + twin->stats.plogi_timeouts += + rport->stats.plogi_timeouts; + twin->stats.plogi_failed += + rport->stats.plogi_failed; + twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; + twin->stats.plogi_accs++; + + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + + bfa_fcs_rport_update(twin, plogi_rsp); + twin->pid = rsp_fchs->s_id; + bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP); + return; + } + } + + /* + * Normal login path -- no evil twins. + */ + rport->stats.plogi_accs++; + bfa_fcs_rport_update(rport, plogi_rsp); + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); +} + +static void +bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->reply_oxid); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE); + return; + } + rport->fcxp = fcxp; + + len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rport->pid, bfa_fcs_lport_get_fcid(port), + rport->reply_oxid, port->port_cfg.pwwn, + port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); + + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_adisc, rport, BFA_TRUE); + return; + } + rport->fcxp = fcxp; + + len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response, + rport, FC_MAX_PDUSZ, FC_ELS_TOV); + + rport->stats.adisc_sent++; + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + void *pld = bfa_fcxp_get_rspbuf(fcxp); + struct fc_ls_rjt_s *ls_rjt; + + if (req_status != BFA_STATUS_OK) { + bfa_trc(rport->fcs, req_status); + rport->stats.adisc_failed++; + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + return; + } + + if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn, + rport->nwwn) == FC_PARSE_OK) { + rport->stats.adisc_accs++; + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); + return; + } + + rport->stats.adisc_rejects++; + ls_rjt = pld; + bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code); + bfa_trc(rport->fcs, ls_rjt->reason_code); + bfa_trc(rport->fcs, ls_rjt->reason_code_expl); + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); +} + +static void +bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + bfa_cb_fcxp_send_t cbfn; + + bfa_trc(rport->fcs, rport->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE); + return; + } + rport->fcxp = fcxp; + + if (rport->pwwn) { + len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, rport->pwwn); + cbfn = bfa_fcs_rport_gidpn_response; + } else { + len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, rport->pid); + cbfn = bfa_fcs_rport_gpnid_response; + } + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, cbfn, + (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct ct_hdr_s *cthdr; + struct fcgs_gidpn_resp_s *gidpn_rsp; + struct bfa_fcs_rport_s *twin; + struct list_head *qe; + + bfa_trc(rport->fcs, rport->pwwn); + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + /* Check if the pid is the same as before. */ + gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1); + + if (gidpn_rsp->dap == rport->pid) { + /* Device is online */ + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); + } else { + /* + * Device's PID has changed. We need to cleanup + * and re-login. If there is another device with + * the the newly discovered pid, send an scn notice + * so that its new pid can be discovered. + */ + list_for_each(qe, &rport->port->rport_q) { + twin = (struct bfa_fcs_rport_s *) qe; + if (twin == rport) + continue; + if (gidpn_rsp->dap == twin->pid) { + bfa_trc(rport->fcs, twin->pid); + bfa_trc(rport->fcs, rport->pid); + + twin->pid = 0; + bfa_sm_send_event(twin, + RPSM_EVENT_ADDRESS_CHANGE); + } + } + rport->pid = gidpn_rsp->dap; + bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE); + } + return; + } + + /* + * Reject Response + */ + switch (cthdr->reason_code) { + case CT_RSN_LOGICAL_BUSY: + /* + * Need to retry + */ + bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); + break; + + case CT_RSN_UNABLE_TO_PERF: + /* + * device doesn't exist : Start timer to cleanup this later. + */ + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + + default: + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + } +} + +static void +bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct ct_hdr_s *cthdr; + + bfa_trc(rport->fcs, rport->pwwn); + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); + return; + } + + /* + * Reject Response + */ + switch (cthdr->reason_code) { + case CT_RSN_LOGICAL_BUSY: + /* + * Need to retry + */ + bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); + break; + + case CT_RSN_UNABLE_TO_PERF: + /* + * device doesn't exist : Start timer to cleanup this later. + */ + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + + default: + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + } +} + +/* + * Called to send a logout to the rport. + */ +static void +bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + u16 len; + + bfa_trc(rport->fcs, rport->pid); + + port = rport->port; + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_logo, rport, BFA_FALSE); + return; + } + rport->fcxp = fcxp; + + len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), 0, + bfa_fcs_lport_get_pwwn(port)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, + rport, FC_MAX_PDUSZ, FC_ELS_TOV); + + rport->stats.logos++; + bfa_fcxp_discard(rport->fcxp); + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +/* + * Send ACC for a LOGO received. + */ +static void +bfa_fcs_rport_send_logo_acc(void *rport_cbarg) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + u16 len; + + bfa_trc(rport->fcs, rport->pid); + + port = rport->port; + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + rport->stats.logo_rcvd++; + len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rport->pid, bfa_fcs_lport_get_fcid(port), + rport->reply_oxid); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); +} + +/* + * brief + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] rport - pointer to bfa_fcs_lport_ns_t. + * param[out] rport_status - pointer to return vport status in + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_rport_timeout(void *arg) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg; + + rport->stats.plogi_timeouts++; + bfa_stats(rport->port, rport_plogi_timeouts); + bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); +} + +static void +bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len) +{ + struct bfa_fcxp_s *fcxp; + struct fchs_s fchs; + struct bfa_fcs_lport_s *port = rport->port; + struct fc_prli_s *prli; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + + rport->stats.prli_rcvd++; + + /* + * We are in Initiator Mode + */ + prli = (struct fc_prli_s *) (rx_fchs + 1); + + if (prli->parampage.servparams.target) { + /* + * PRLI from a target ? + * Send the Acc. + * PRLI sent by us will be used to transition the IT nexus, + * once the response is received from the target. + */ + bfa_trc(port->fcs, rx_fchs->s_id); + rport->scsi_function = BFA_RPORT_TARGET; + } else { + bfa_trc(rport->fcs, prli->parampage.type); + rport->scsi_function = BFA_RPORT_INITIATOR; + bfa_fcs_itnim_is_initiator(rport->itnim); + } + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, port->port_cfg.roles); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); +} + +static void +bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len) +{ + struct bfa_fcxp_s *fcxp; + struct fchs_s fchs; + struct bfa_fcs_lport_s *port = rport->port; + struct fc_rpsc_speed_info_s speeds; + struct bfa_port_attr_s pport_attr; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + + rport->stats.rpsc_rcvd++; + speeds.port_speed_cap = + RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G | + RPSC_SPEED_CAP_8G; + + /* + * get curent speed from pport attributes from BFA + */ + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); + + speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, &speeds); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); +} + +static void +bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len) +{ + struct bfa_fcxp_s *fcxp; + struct fchs_s fchs; + struct bfa_fcs_lport_s *port = rport->port; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + + rport->stats.adisc_rcvd++; + + /* + * Accept if the itnim for this rport is online. + * Else reject the ADISC. + */ + if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) { + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, port->port_cfg.pwwn, + port->port_cfg.nwwn); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); + } else { + rport->stats.adisc_rejected++; + bfa_fcs_rport_send_ls_rjt(rport, rx_fchs, + FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, + FC_LS_RJT_EXP_LOGIN_REQUIRED); + } +} + +static void +bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfa_rport_info_s rport_info; + + rport_info.pid = rport->pid; + rport_info.local_pid = port->pid; + rport_info.lp_tag = port->lp_tag; + rport_info.vf_id = port->fabric->vf_id; + rport_info.vf_en = port->fabric->is_vf; + rport_info.fc_class = rport->fc_cos; + rport_info.cisc = rport->cisc; + rport_info.max_frmsz = rport->maxfrsize; + bfa_rport_online(rport->bfa_rport, &rport_info); +} + +static void +bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport) +{ + if (rport->bfa_rport) + bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); + else + bfa_cb_rport_offline(rport); +} + +static struct bfa_fcs_rport_s * +bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) +{ + struct bfa_fcs_s *fcs = port->fcs; + struct bfa_fcs_rport_s *rport; + struct bfad_rport_s *rport_drv; + + /* + * allocate rport + */ + if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) { + bfa_trc(fcs, rpid); + return NULL; + } + + if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) + != BFA_STATUS_OK) { + bfa_trc(fcs, rpid); + return NULL; + } + + /* + * Initialize r-port + */ + rport->port = port; + rport->fcs = fcs; + rport->rp_drv = rport_drv; + rport->pid = rpid; + rport->pwwn = pwwn; + rport->old_pid = 0; + + rport->bfa_rport = NULL; + + /* + * allocate FC-4s + */ + WARN_ON(!bfa_fcs_lport_is_initiator(port)); + + if (bfa_fcs_lport_is_initiator(port)) { + rport->itnim = bfa_fcs_itnim_create(rport); + if (!rport->itnim) { + bfa_trc(fcs, rpid); + kfree(rport_drv); + return NULL; + } + } + + bfa_fcs_lport_add_rport(port, rport); + fcs->num_rport_logins++; + + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + + /* Initialize the Rport Features(RPF) Sub Module */ + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_init(rport); + + return rport; +} + + +static void +bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfa_fcs_s *fcs = port->fcs; + + /* + * - delete FC-4s + * - delete BFA rport + * - remove from queue of rports + */ + rport->plogi_pending = BFA_FALSE; + + if (bfa_fcs_lport_is_initiator(port)) { + bfa_fcs_itnim_delete(rport->itnim); + if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_offline(rport); + } + + if (rport->bfa_rport) { + bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE); + rport->bfa_rport = NULL; + } + + bfa_fcs_lport_del_rport(port, rport); + fcs->num_rport_logins--; + kfree(rport->rp_drv); +} + +static void +bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport, + enum bfa_rport_aen_event event, + struct bfa_rport_aen_data_s *data) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + if (event == BFA_RPORT_AEN_QOS_PRIO) + aen_entry->aen_data.rport.priv.qos = data->priv.qos; + else if (event == BFA_RPORT_AEN_QOS_FLOWID) + aen_entry->aen_data.rport.priv.qos = data->priv.qos; + + aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id; + aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(rport->fcs)); + aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port); + aen_entry->aen_data.rport.rpwwn = rport->pwwn; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq, + BFA_AEN_CAT_RPORT, event); +} + +static void +bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport) +{ + if ((!rport->pid) || (!rport->pwwn)) { + bfa_trc(rport->fcs, rport->pid); + bfa_sm_fault(rport->fcs, rport->pid); + } + + bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE); +} + +static void +bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + rport->stats.onlines++; + + if ((!rport->pid) || (!rport->pwwn)) { + bfa_trc(rport->fcs, rport->pid); + bfa_sm_fault(rport->fcs, rport->pid); + } + + if (bfa_fcs_lport_is_initiator(port)) { + bfa_fcs_itnim_brp_online(rport->itnim); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_online(rport); + } + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + wwn2str(rpwwn_buf, rport->pwwn); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) { + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Remote port (WWN = %s) online for logical port (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL); + } +} + +static void +bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport) +{ + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_offline(rport); + + bfa_fcs_itnim_rport_offline(rport->itnim); +} + +static void +bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + if (!rport->bfa_rport) { + bfa_fcs_rport_fcs_offline_action(rport); + return; + } + + rport->stats.offlines++; + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + wwn2str(rpwwn_buf, rport->pwwn); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) { + if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "Remote port (WWN = %s) connectivity lost for " + "logical port (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + bfa_fcs_rport_aen_post(rport, + BFA_RPORT_AEN_DISCONNECT, NULL); + } else { + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Remote port (WWN = %s) offlined by " + "logical port (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + bfa_fcs_rport_aen_post(rport, + BFA_RPORT_AEN_OFFLINE, NULL); + } + } + + if (bfa_fcs_lport_is_initiator(port)) { + bfa_fcs_itnim_rport_offline(rport->itnim); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_offline(rport); + } +} + +/* + * Update rport parameters from PLOGI or PLOGI accept. + */ +static void +bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) +{ + bfa_fcs_lport_t *port = rport->port; + + /* + * - port name + * - node name + */ + rport->pwwn = plogi->port_name; + rport->nwwn = plogi->node_name; + + /* + * - class of service + */ + rport->fc_cos = 0; + if (plogi->class3.class_valid) + rport->fc_cos = FC_CLASS_3; + + if (plogi->class2.class_valid) + rport->fc_cos |= FC_CLASS_2; + + /* + * - CISC + * - MAX receive frame size + */ + rport->cisc = plogi->csp.cisc; + if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz)) + rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); + else + rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz); + + bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); + bfa_trc(port->fcs, port->fabric->bb_credit); + /* + * Direct Attach P2P mode : + * This is to handle a bug (233476) in IBM targets in Direct Attach + * Mode. Basically, in FLOGI Accept the target would have + * erroneously set the BB Credit to the value used in the FLOGI + * sent by the HBA. It uses the correct value (its own BB credit) + * in PLOGI. + */ + if ((!bfa_fcs_fabric_is_switched(port->fabric)) && + (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) { + + bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); + bfa_trc(port->fcs, port->fabric->bb_credit); + + port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred); + bfa_fcport_set_tx_bbcredit(port->fcs->bfa, + port->fabric->bb_credit); + } + +} + +/* + * Called to handle LOGO received from an existing remote port. + */ +static void +bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs) +{ + rport->reply_oxid = fchs->ox_id; + bfa_trc(rport->fcs, rport->reply_oxid); + + rport->prlo = BFA_FALSE; + rport->stats.logo_rcvd++; + bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD); +} + + + +/* + * fcs_rport_public FCS rport public interfaces + */ + +/* + * Called by bport/vport to create a remote port instance for a discovered + * remote device. + * + * @param[in] port - base port or vport + * @param[in] rpid - remote port ID + * + * @return None + */ +struct bfa_fcs_rport_s * +bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid) +{ + struct bfa_fcs_rport_s *rport; + + bfa_trc(port->fcs, rpid); + rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid); + if (!rport) + return NULL; + + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); + return rport; +} + +/* + * Called to create a rport for which only the wwn is known. + * + * @param[in] port - base port + * @param[in] rpwwn - remote port wwn + * + * @return None + */ +struct bfa_fcs_rport_s * +bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_rport_s *rport; + bfa_trc(port->fcs, rpwwn); + rport = bfa_fcs_rport_alloc(port, rpwwn, 0); + if (!rport) + return NULL; + + bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); + return rport; +} +/* + * Called by bport in private loop topology to indicate that a + * rport has been discovered and plogi has been completed. + * + * @param[in] port - base port or vport + * @param[in] rpid - remote port ID + */ +void +bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, + struct fc_logi_s *plogi) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); + if (!rport) + return; + + bfa_fcs_rport_update(rport, plogi); + + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); +} + +/* + * Called by bport/vport to handle PLOGI received from a new remote port. + * If an existing rport does a plogi, it will be handled separately. + */ +void +bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, + struct fc_logi_s *plogi) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id); + if (!rport) + return; + + bfa_fcs_rport_update(rport, plogi); + + rport->reply_oxid = fchs->ox_id; + bfa_trc(rport->fcs, rport->reply_oxid); + + rport->stats.plogi_rcvd++; + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); +} + +/* + * Called by bport/vport to handle PLOGI received from an existing + * remote port. + */ +void +bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, + struct fc_logi_s *plogi) +{ + /* + * @todo Handle P2P and initiator-initiator. + */ + + bfa_fcs_rport_update(rport, plogi); + + rport->reply_oxid = rx_fchs->ox_id; + bfa_trc(rport->fcs, rport->reply_oxid); + + rport->pid = rx_fchs->s_id; + bfa_trc(rport->fcs, rport->pid); + + rport->stats.plogi_rcvd++; + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); +} + + +/* + * Called by bport/vport to notify SCN for the remote port + */ +void +bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) +{ + rport->stats.rscns++; + bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN); +} + +/* + * brief + * This routine BFA callback for bfa_rport_online() call. + * + * param[in] cb_arg - rport struct. + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_online(void *cbarg) +{ + + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); +} + +/* + * brief + * This routine BFA callback for bfa_rport_offline() call. + * + * param[in] rport - + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_offline(void *cbarg) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); +} + +/* + * brief + * This routine is a static BFA callback when there is a QoS flow_id + * change notification + * + * param[in] rport - + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_qos_scn_flowid(void *cbarg, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct bfa_rport_aen_data_s aen_data; + + bfa_trc(rport->fcs, rport->pwwn); + aen_data.priv.qos = new_qos_attr; + bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); +} + +void +bfa_cb_rport_scn_online(struct bfa_s *bfa) +{ + struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; + struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); + struct bfa_fcs_rport_s *rp; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rp = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE); + rp->scn_online = BFA_TRUE; + } + + if (bfa_fcs_lport_is_online(port)) + bfa_fcs_lport_lip_scn_online(port); +} + +void +bfa_cb_rport_scn_no_dev(void *rport) +{ + struct bfa_fcs_rport_s *rp = rport; + + bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); + rp->scn_online = BFA_FALSE; +} + +void +bfa_cb_rport_scn_offline(struct bfa_s *bfa) +{ + struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; + struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); + struct bfa_fcs_rport_s *rp; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rp = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); + rp->scn_online = BFA_FALSE; + } +} + +/* + * brief + * This routine is a static BFA callback when there is a QoS priority + * change notification + * + * param[in] rport - + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_qos_scn_prio(void *cbarg, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct bfa_rport_aen_data_s aen_data; + + bfa_trc(rport->fcs, rport->pwwn); + aen_data.priv.qos = new_qos_attr; + bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data); +} + +/* + * Called to process any unsolicted frames from this remote port + */ +void +bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, + struct fchs_s *fchs, u16 len) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct fc_els_cmd_s *els_cmd; + + bfa_trc(rport->fcs, fchs->s_id); + bfa_trc(rport->fcs, fchs->d_id); + bfa_trc(rport->fcs, fchs->type); + + if (fchs->type != FC_TYPE_ELS) + return; + + els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + bfa_trc(rport->fcs, els_cmd->els_code); + + switch (els_cmd->els_code) { + case FC_ELS_LOGO: + bfa_stats(port, plogi_rcvd); + bfa_fcs_rport_process_logo(rport, fchs); + break; + + case FC_ELS_ADISC: + bfa_stats(port, adisc_rcvd); + bfa_fcs_rport_process_adisc(rport, fchs, len); + break; + + case FC_ELS_PRLO: + bfa_stats(port, prlo_rcvd); + if (bfa_fcs_lport_is_initiator(port)) + bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len); + break; + + case FC_ELS_PRLI: + bfa_stats(port, prli_rcvd); + bfa_fcs_rport_process_prli(rport, fchs, len); + break; + + case FC_ELS_RPSC: + bfa_stats(port, rpsc_rcvd); + bfa_fcs_rport_process_rpsc(rport, fchs, len); + break; + + default: + bfa_stats(port, un_handled_els_rcvd); + bfa_fcs_rport_send_ls_rjt(rport, fchs, + FC_LS_RJT_RSN_CMD_NOT_SUPP, + FC_LS_RJT_EXP_NO_ADDL_INFO); + break; + } +} + +/* send best case acc to prlo */ +static void +bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + + bfa_trc(rport->fcs, rport->pid); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); + if (!fcxp) + return; + len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rport->pid, bfa_fcs_lport_get_fcid(port), + rport->reply_oxid, 0); + + bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, + port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, + NULL, NULL, FC_MAX_PDUSZ, 0); +} + +/* + * Send a LS reject + */ +static void +bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, + u8 reason_code, u8 reason_code_expl) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + + bfa_trc(rport->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE); + if (!fcxp) + return; + + len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, reason_code, reason_code_expl); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/* + * Return state of rport. + */ +int +bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport) +{ + return bfa_sm_to_state(rport_sm_table, rport->sm); +} + + +/* + * brief + * Called by the Driver to set rport delete/ageout timeout + * + * param[in] rport timeout value in seconds. + * + * return None + */ +void +bfa_fcs_rport_set_del_timeout(u8 rport_tmo) +{ + /* convert to Millisecs */ + if (rport_tmo > 0) + bfa_fcs_rport_del_timeout = rport_tmo * 1000; +} +void +bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id) +{ + bfa_trc(rport->fcs, rport->pid); + + rport->prlo = BFA_TRUE; + rport->reply_oxid = ox_id; + bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); +} + +/* + * Called by BFAD to set the max limit on number of bfa_fcs_rport allocation + * which limits number of concurrent logins to remote ports + */ +void +bfa_fcs_rport_set_max_logins(u32 max_logins) +{ + if (max_logins > 0) + bfa_fcs_rport_max_logins = max_logins; +} + +void +bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, + struct bfa_rport_attr_s *rport_attr) +{ + struct bfa_rport_qos_attr_s qos_attr; + struct bfa_fcs_lport_s *port = rport->port; + bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; + struct bfa_port_attr_s port_attr; + + bfa_fcport_get_attr(rport->fcs->bfa, &port_attr); + + memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); + memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); + + rport_attr->pid = rport->pid; + rport_attr->pwwn = rport->pwwn; + rport_attr->nwwn = rport->nwwn; + rport_attr->cos_supported = rport->fc_cos; + rport_attr->df_sz = rport->maxfrsize; + rport_attr->state = bfa_fcs_rport_get_state(rport); + rport_attr->fc_cos = rport->fc_cos; + rport_attr->cisc = rport->cisc; + rport_attr->scsi_function = rport->scsi_function; + rport_attr->curr_speed = rport->rpf.rpsc_speed; + rport_attr->assigned_speed = rport->rpf.assigned_speed; + + if (rport->bfa_rport) { + qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority; + qos_attr.qos_flow_id = + cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id); + } + rport_attr->qos_attr = qos_attr; + + rport_attr->trl_enforced = BFA_FALSE; + if (bfa_fcport_is_ratelim(port->fcs->bfa) && + (rport->scsi_function == BFA_RPORT_TARGET)) { + if (rport_speed == BFA_PORT_SPEED_UNKNOWN) + rport_speed = + bfa_fcport_get_ratelim_speed(rport->fcs->bfa); + + if ((bfa_fcs_lport_get_rport_max_speed(port) != + BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed)) + rport_attr->trl_enforced = BFA_TRUE; + } +} + +/* + * Remote port implementation. + */ + +/* + * fcs_rport_api FCS rport API. + */ + +struct bfa_fcs_rport_s * +bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); + if (rport == NULL) { + /* + * TBD Error handling + */ + } + + return rport; +} + +struct bfa_fcs_rport_s * +bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn); + if (rport == NULL) { + /* + * TBD Error handling + */ + } + + return rport; +} + +/* + * Remote port features (RPF) implementation. + */ + +#define BFA_FCS_RPF_RETRIES (3) +#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */ + +static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rpf_rpsc2_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); + +static void bfa_fcs_rpf_timeout(void *arg); + +/* + * fcs_rport_ftrs_sm FCS rport state machine events + */ + +enum rpf_event { + RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */ + RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */ + RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */ + RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */ + RPFSM_EVENT_RPSC_COMP = 5, + RPFSM_EVENT_RPSC_FAIL = 6, + RPFSM_EVENT_RPSC_ERROR = 7, +}; + +static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); + +static void +bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPORT_ONLINE: + /* Send RPSC2 to a Brocade fabric only. */ + if ((!BFA_FCS_PID_IS_WKA(rport->pid)) && + ((rport->port->fabric->lps->brcd_switch) || + (bfa_fcs_fabric_get_switch_oui(fabric) == + BFA_FCS_BRCD_SWITCH_OUI))) { + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); + rpf->rpsc_retries = 0; + bfa_fcs_rpf_send_rpsc2(rpf, NULL); + } + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPSC_COMP: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); + /* Update speed info in f/w via BFA */ + if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN) + bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); + else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN) + bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); + break; + + case RPFSM_EVENT_RPSC_FAIL: + /* RPSC not supported by rport */ + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); + break; + + case RPFSM_EVENT_RPSC_ERROR: + /* need to retry...delayed a bit. */ + if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) { + bfa_timer_start(rport->fcs->bfa, &rpf->timer, + bfa_fcs_rpf_timeout, rpf, + BFA_FCS_RPF_RETRY_TIMEOUT); + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry); + } else { + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); + } + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + bfa_fcxp_discard(rpf->fcxp); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_TIMEOUT: + /* re-send the RPSC */ + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); + bfa_fcs_rpf_send_rpsc2(rpf, NULL); + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_timer_stop(&rpf->timer); + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPORT_ONLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); + bfa_fcs_rpf_send_rpsc2(rpf, NULL); + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} +/* + * Called when Rport is created. + */ +void +bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_rpf_s *rpf = &rport->rpf; + + bfa_trc(rport->fcs, rport->pid); + rpf->rport = rport; + + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); +} + +/* + * Called when Rport becomes online + */ +void +bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport) +{ + bfa_trc(rport->fcs, rport->pid); + + if (__fcs_min_cfg(rport->port->fcs)) + return; + + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) + bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); +} + +/* + * Called when Rport becomes offline + */ +void +bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport) +{ + bfa_trc(rport->fcs, rport->pid); + + if (__fcs_min_cfg(rport->port->fcs)) + return; + + rport->rpf.rpsc_speed = 0; + bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE); +} + +static void +bfa_fcs_rpf_timeout(void *arg) +{ + struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg; + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pid); + bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT); +} + +static void +bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg; + struct bfa_fcs_rport_s *rport = rpf->rport; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : + bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe, + bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE); + return; + } + rpf->fcxp = fcxp; + + len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), &rport->pid, 1); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response, + rpf, FC_MAX_PDUSZ, FC_ELS_TOV); + rport->stats.rpsc_sent++; + bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT); + +} + +static void +bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg; + struct bfa_fcs_rport_s *rport = rpf->rport; + struct fc_ls_rjt_s *ls_rjt; + struct fc_rpsc2_acc_s *rpsc2_acc; + u16 num_ents; + + bfa_trc(rport->fcs, req_status); + + if (req_status != BFA_STATUS_OK) { + bfa_trc(rport->fcs, req_status); + if (req_status == BFA_STATUS_ETIMER) + rport->stats.rpsc_failed++; + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); + return; + } + + rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); + if (rpsc2_acc->els_cmd == FC_ELS_ACC) { + rport->stats.rpsc_accs++; + num_ents = be16_to_cpu(rpsc2_acc->num_pids); + bfa_trc(rport->fcs, num_ents); + if (num_ents > 0) { + WARN_ON(be32_to_cpu(rpsc2_acc->port_info[0].pid) != + bfa_ntoh3b(rport->pid)); + bfa_trc(rport->fcs, + be32_to_cpu(rpsc2_acc->port_info[0].pid)); + bfa_trc(rport->fcs, + be16_to_cpu(rpsc2_acc->port_info[0].speed)); + bfa_trc(rport->fcs, + be16_to_cpu(rpsc2_acc->port_info[0].index)); + bfa_trc(rport->fcs, + rpsc2_acc->port_info[0].type); + + if (rpsc2_acc->port_info[0].speed == 0) { + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); + return; + } + + rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( + be16_to_cpu(rpsc2_acc->port_info[0].speed)); + + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); + } + } else { + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + bfa_trc(rport->fcs, ls_rjt->reason_code); + bfa_trc(rport->fcs, ls_rjt->reason_code_expl); + rport->stats.rpsc_rejects++; + if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); + else + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); + } +} diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c new file mode 100644 index 000000000..6cc2f7290 --- /dev/null +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfa_modules.h" +#include "bfi_reg.h" + +void +bfa_hwcb_reginit(struct bfa_s *bfa) +{ + struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); + int fn = bfa_ioc_pcifn(&bfa->ioc); + + if (fn == 0) { + bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); + bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK); + } else { + bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); + bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); + } +} + +static void +bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) +{ + writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq), + bfa->iocfc.bfa_regs.intr_status); +} + +/* + * Actions to respond RME Interrupt for Crossbow ASIC: + * - Write 1 to Interrupt Status register + * INTX - done in bfa_intx() + * MSIX - done in bfa_hwcb_rspq_ack_msix() + * - Update CI (only if new CI) + */ +static void +bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) +{ + writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), + bfa->iocfc.bfa_regs.intr_status); + + if (bfa_rspq_ci(bfa, rspq) == ci) + return; + + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); +} + +void +bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) +{ + if (bfa_rspq_ci(bfa, rspq) == ci) + return; + + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); +} + +void +bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, + u32 *num_vecs, u32 *max_vec_bit) +{ +#define __HFN_NUMINTS 13 + if (bfa_ioc_pcifn(&bfa->ioc) == 0) { + *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | + __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | + __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | + __HFN_INT_MBOX_LPU0); + *max_vec_bit = __HFN_INT_MBOX_LPU0; + } else { + *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | + __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | + __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | + __HFN_INT_MBOX_LPU1); + *max_vec_bit = __HFN_INT_MBOX_LPU1; + } + + *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | + __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); + *num_vecs = __HFN_NUMINTS; +} + +/* + * Dummy interrupt handler for handling spurious interrupts. + */ +static void +bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec) +{ +} + +/* + * No special setup required for crossbow -- vector assignments are implicit. + */ +void +bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) +{ + WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); + + bfa->msix.nvecs = nvecs; + bfa_hwcb_msix_uninstall(bfa); +} + +void +bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa) +{ + int i; + + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) { + for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++) + bfa->msix.handler[i] = bfa_msix_all; + return; + } + + for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++) + bfa->msix.handler[i] = bfa_msix_lpu_err; +} + +void +bfa_hwcb_msix_queue_install(struct bfa_s *bfa) +{ + int i; + + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) { + for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_all; + return; + } + + for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_reqq; + + for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_rspq; +} + +void +bfa_hwcb_msix_uninstall(struct bfa_s *bfa) +{ + int i; + + for (i = 0; i < BFI_MSIX_CB_MAX; i++) + bfa->msix.handler[i] = bfa_hwcb_msix_dummy; +} + +/* + * No special enable/disable -- vector assignments are implicit. + */ +void +bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) +{ + if (msix) { + bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; + bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; + } else { + bfa->iocfc.hwif.hw_reqq_ack = NULL; + bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; + } +} + +void +bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) +{ + *start = BFI_MSIX_RME_QMIN_CB; + *end = BFI_MSIX_RME_QMAX_CB; +} diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c new file mode 100644 index 000000000..4b1c0a568 --- /dev/null +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfa_modules.h" +#include "bfi_reg.h" + +BFA_TRC_FILE(HAL, IOCFC_CT); + +/* + * Dummy interrupt handler for handling spurious interrupt during chip-reinit. + */ +static void +bfa_hwct_msix_dummy(struct bfa_s *bfa, int vec) +{ +} + +void +bfa_hwct_reginit(struct bfa_s *bfa) +{ + struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); + int fn = bfa_ioc_pcifn(&bfa->ioc); + + if (fn == 0) { + bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); + bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK); + } else { + bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); + bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); + } +} + +void +bfa_hwct2_reginit(struct bfa_s *bfa) +{ + struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); + + bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS); + bfa_regs->intr_mask = (kva + CT2_HOSTFN_INTR_MASK); +} + +void +bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) +{ + u32 r32; + + r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); + writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); +} + +/* + * Actions to respond RME Interrupt for Catapult ASIC: + * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) + * - Acknowledge by writing to RME Queue Control register + * - Update CI + */ +void +bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) +{ + u32 r32; + + r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); + writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); + + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); +} + +/* + * Actions to respond RME Interrupt for Catapult2 ASIC: + * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) + * - Update CI + */ +void +bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) +{ + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); +} + +void +bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, + u32 *num_vecs, u32 *max_vec_bit) +{ + *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1; + *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1)); + *num_vecs = BFI_MSIX_CT_MAX; +} + +/* + * Setup MSI-X vector for catapult + */ +void +bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) +{ + WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX)); + bfa_trc(bfa, nvecs); + + bfa->msix.nvecs = nvecs; + bfa_hwct_msix_uninstall(bfa); +} + +void +bfa_hwct_msix_ctrl_install(struct bfa_s *bfa) +{ + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) + bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all; + else + bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; +} + +void +bfa_hwct_msix_queue_install(struct bfa_s *bfa) +{ + int i; + + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) { + for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++) + bfa->msix.handler[i] = bfa_msix_all; + return; + } + + for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++) + bfa->msix.handler[i] = bfa_msix_reqq; + + for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++) + bfa->msix.handler[i] = bfa_msix_rspq; +} + +void +bfa_hwct_msix_uninstall(struct bfa_s *bfa) +{ + int i; + + for (i = 0; i < BFI_MSIX_CT_MAX; i++) + bfa->msix.handler[i] = bfa_hwct_msix_dummy; +} + +/* + * Enable MSI-X vectors + */ +void +bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) +{ + bfa_trc(bfa, 0); + bfa_ioc_isr_mode_set(&bfa->ioc, msix); +} + +void +bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) +{ + *start = BFI_MSIX_RME_QMIN_CT; + *end = BFI_MSIX_RME_QMAX_CT; +} diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c new file mode 100644 index 000000000..e1ed1424f --- /dev/null +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -0,0 +1,7032 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfa_ioc.h" +#include "bfi_reg.h" +#include "bfa_defs.h" +#include "bfa_defs_svc.h" +#include "bfi.h" + +BFA_TRC_FILE(CNA, IOC); + +/* + * IOC local definitions + */ +#define BFA_IOC_TOV 3000 /* msecs */ +#define BFA_IOC_HWSEM_TOV 500 /* msecs */ +#define BFA_IOC_HB_TOV 500 /* msecs */ +#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV +#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ + +#define bfa_ioc_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ + bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) +#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) + +#define bfa_hb_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \ + bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV) +#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer) + +#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) + +#define bfa_ioc_state_disabled(__sm) \ + (((__sm) == BFI_IOC_UNINIT) || \ + ((__sm) == BFI_IOC_INITING) || \ + ((__sm) == BFI_IOC_HWINIT) || \ + ((__sm) == BFI_IOC_DISABLED) || \ + ((__sm) == BFI_IOC_FAIL) || \ + ((__sm) == BFI_IOC_CFG_DISABLED)) + +/* + * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. + */ + +#define bfa_ioc_firmware_lock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) +#define bfa_ioc_firmware_unlock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) +#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) +#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) +#define bfa_ioc_notify_fail(__ioc) \ + ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) +#define bfa_ioc_sync_start(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) +#define bfa_ioc_sync_join(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) +#define bfa_ioc_sync_leave(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) +#define bfa_ioc_sync_ack(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) +#define bfa_ioc_sync_complete(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) +#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) +#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) + +#define bfa_ioc_mbox_cmd_pending(__ioc) \ + (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ + readl((__ioc)->ioc_regs.hfn_mbox_cmd)) + +bfa_boolean_t bfa_auto_recover = BFA_TRUE; + +/* + * forward declarations + */ +static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); +static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); +static void bfa_ioc_timeout(void *ioc); +static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc); +static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); +static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); +static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); +static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); +static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); +static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc); +static void bfa_ioc_recover(struct bfa_ioc_s *ioc); +static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , + enum bfa_ioc_event_e event); +static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); +static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); +static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); +static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); +static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp( + struct bfi_ioc_image_hdr_s *base_fwhdr, + struct bfi_ioc_image_hdr_s *fwhdr_to_cmp); +static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp( + struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *base_fwhdr); + +/* + * IOC state machine definitions/declarations + */ +enum ioc_event { + IOC_E_RESET = 1, /* IOC reset request */ + IOC_E_ENABLE = 2, /* IOC enable request */ + IOC_E_DISABLE = 3, /* IOC disable request */ + IOC_E_DETACH = 4, /* driver detach cleanup */ + IOC_E_ENABLED = 5, /* f/w enabled */ + IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ + IOC_E_DISABLED = 7, /* f/w disabled */ + IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */ + IOC_E_HBFAIL = 9, /* heartbeat failure */ + IOC_E_HWERROR = 10, /* hardware error interrupt */ + IOC_E_TIMEOUT = 11, /* timeout */ + IOC_E_HWFAILED = 12, /* PCI mapping failure notice */ +}; + +bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); + +static struct bfa_sm_table_s ioc_sm_table[] = { + {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, + {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, + {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, + {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, + {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, + {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, + {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, + {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, + {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, + {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, +}; + +/* + * IOCPF state machine definitions/declarations + */ + +#define bfa_iocpf_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ + bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) +#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) + +#define bfa_iocpf_poll_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ + bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV) + +#define bfa_sem_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ + bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV) +#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer) + +/* + * Forward declareations for iocpf state machine + */ +static void bfa_iocpf_timeout(void *ioc_arg); +static void bfa_iocpf_sem_timeout(void *ioc_arg); +static void bfa_iocpf_poll_timeout(void *ioc_arg); + +/* + * IOCPF state machine events + */ +enum iocpf_event { + IOCPF_E_ENABLE = 1, /* IOCPF enable request */ + IOCPF_E_DISABLE = 2, /* IOCPF disable request */ + IOCPF_E_STOP = 3, /* stop on driver detach */ + IOCPF_E_FWREADY = 4, /* f/w initialization done */ + IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */ + IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */ + IOCPF_E_FAIL = 7, /* failure notice by ioc sm */ + IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */ + IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */ + IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */ + IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ + IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */ +}; + +/* + * IOCPF states + */ +enum bfa_iocpf_state { + BFA_IOCPF_RESET = 1, /* IOC is in reset state */ + BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */ + BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */ + BFA_IOCPF_READY = 4, /* IOCPF is initialized */ + BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */ + BFA_IOCPF_FAIL = 6, /* IOCPF failed */ + BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */ + BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */ + BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */ +}; + +bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); + +static struct bfa_sm_table_s iocpf_sm_table[] = { + {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, + {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, + {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, + {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, +}; + +/* + * IOC State Machine + */ + +/* + * Beginning state. IOC uninit state. + */ + +static void +bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc) +{ +} + +/* + * IOC is in uninit state. + */ +static void +bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_RESET: + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + break; + + default: + bfa_sm_fault(ioc, event); + } +} +/* + * Reset entry actions -- initialize state machine + */ +static void +bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) +{ + bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); +} + +/* + * IOC is in reset state. + */ +static void +bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + bfa_ioc_disable_comp(ioc); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); +} + +/* + * Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + /* !!! fall through !!! */ + case IOC_E_HWERROR: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) +{ + bfa_ioc_timer_start(ioc); + bfa_ioc_send_getattr(ioc); +} + +/* + * IOC configuration in progress. Timer is active. + */ +static void +bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_FWRSP_GETATTR: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_op); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + bfa_ioc_timer_stop(ioc); + fallthrough; + case IOC_E_TIMEOUT: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); + break; + + case IOC_E_DISABLE: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); + bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); + bfa_ioc_hb_monitor(ioc); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); +} + +static void +bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_hb_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + bfa_hb_timer_stop(ioc); + fallthrough; + case IOC_E_HBFAIL: + if (ioc->iocpf.auto_recover) + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + else + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + + bfa_ioc_fail_notify(ioc); + + if (event != IOC_E_PFFAILED) + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); +} + +/* + * IOC is being disabled + */ +static void +bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_DISABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + break; + + case IOC_E_HWERROR: + /* + * No state change. Will move to disabled state + * after iocpf sm completes failure processing and + * moves to disabled state. + */ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); + break; + + case IOC_E_HWFAILED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + bfa_ioc_disable_comp(ioc); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +/* + * IOC disable completion entry. + */ +static void +bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) +{ + bfa_ioc_disable_comp(ioc); +} + +static void +bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc) +{ + bfa_trc(ioc, 0); +} + +/* + * Hardware initialization retry. + */ +static void +bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + /* + * Initialization retry failed. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) +{ + bfa_trc(ioc, 0); +} + +/* + * IOC failure. + */ +static void +bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); + break; + + case IOC_E_HWERROR: + case IOC_E_HWFAILED: + /* + * HB failure / HW error notification, ignore. + */ + break; + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc) +{ + bfa_trc(ioc, 0); +} + +static void +bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + case IOC_E_HWERROR: + /* Ignore - already in hwfail state */ + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +/* + * IOCPF State Machine + */ + +/* + * Reset entry actions -- initialize state machine + */ +static void +bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) +{ + iocpf->fw_mismatch_notified = BFA_FALSE; + iocpf->auto_recover = bfa_auto_recover; +} + +/* + * Beginning state. IOC is in reset state. + */ +static void +bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_STOP: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +/* + * Semaphore should be acquired for version check. + */ +static void +bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) +{ + struct bfi_ioc_image_hdr_s fwhdr; + u32 r32, fwstate, pgnum, loff = 0; + int i; + + /* + * Spin on init semaphore to serialize. + */ + r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); + while (r32 & 0x1) { + udelay(20); + r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); + } + + /* h/w sem init */ + fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc); + if (fwstate == BFI_IOC_UNINIT) { + writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); + goto sem_get; + } + + bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); + + if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { + writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); + goto sem_get; + } + + /* + * Clear fwver hdr + */ + pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff); + writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) { + bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0); + loff += sizeof(u32); + } + + bfa_trc(iocpf->ioc, fwstate); + bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); + bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); + bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); + + /* + * Unlock the hw semaphore. Should be here only once per boot. + */ + bfa_ioc_ownership_reset(iocpf->ioc); + + /* + * unlock init semaphore. + */ + writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); + +sem_get: + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* + * Awaiting h/w semaphore to continue with version check. + */ +static void +bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_firmware_lock(ioc)) { + if (bfa_ioc_sync_start(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_ioc_firmware_unlock(ioc); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_sem_timer_start(ioc); + } + } else { + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + + case IOCPF_E_DISABLE: + bfa_sem_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_fsm_send_event(ioc, IOC_E_DISABLED); + break; + + case IOCPF_E_STOP: + bfa_sem_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +/* + * Notify enable completion callback. + */ +static void +bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf) +{ + /* + * Call only the first time sm enters fwmismatch state. + */ + if (iocpf->fw_mismatch_notified == BFA_FALSE) + bfa_ioc_pf_fwmismatch(iocpf->ioc); + + iocpf->fw_mismatch_notified = BFA_TRUE; + bfa_iocpf_timer_start(iocpf->ioc); +} + +/* + * Awaiting firmware version match. + */ +static void +bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_TIMEOUT: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_DISABLE: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_fsm_send_event(ioc, IOC_E_DISABLED); + break; + + case IOCPF_E_STOP: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +/* + * Request for semaphore. + */ +static void +bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* + * Awaiting semaphore for h/w initialzation. + */ +static void +bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_sync_complete(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_sem_timer_start(ioc); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + + case IOCPF_E_DISABLE: + bfa_sem_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) +{ + iocpf->poll_time = 0; + bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); +} + +/* + * Hardware is being initialized. Interrupts are enabled. + * Holding hardware semaphore lock. + */ +static void +bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_FWREADY: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); + break; + + case IOCPF_E_TIMEOUT: + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_send_event(ioc, IOC_E_PFFAILED); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + bfa_iocpf_timer_stop(ioc); + bfa_ioc_sync_leave(ioc); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_iocpf_timer_start(iocpf->ioc); + /* + * Enable Interrupts before sending fw IOC ENABLE cmd. + */ + iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); + bfa_ioc_send_enable(iocpf->ioc); +} + +/* + * Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_FWRSP_ENABLE: + bfa_iocpf_timer_stop(ioc); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); + break; + + case IOCPF_E_INITFAIL: + bfa_iocpf_timer_stop(ioc); + fallthrough; + + case IOCPF_E_TIMEOUT: + writel(1, ioc->ioc_regs.ioc_sem_reg); + if (event == IOCPF_E_TIMEOUT) + bfa_fsm_send_event(ioc, IOC_E_PFFAILED); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + bfa_iocpf_timer_stop(ioc); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED); +} + +static void +bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + case IOCPF_E_GETATTRFAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_FAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_iocpf_timer_start(iocpf->ioc); + bfa_ioc_send_disable(iocpf->ioc); +} + +/* + * IOC is being disabled + */ +static void +bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_FWRSP_DISABLE: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + bfa_iocpf_timer_stop(ioc); + fallthrough; + + case IOCPF_E_TIMEOUT: + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FWRSP_ENABLE: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* + * IOC hb ack request is being removed. + */ +static void +bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_leave(ioc); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +/* + * IOC disable completion entry. + */ +static void +bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_ioc_mbox_flush(iocpf->ioc); + bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); +} + +static void +bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_ioc_debug_save_ftrc(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* + * Hardware initialization failed. + */ +static void +bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_notify_fail(ioc); + bfa_ioc_sync_leave(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + + case IOCPF_E_DISABLE: + bfa_sem_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_STOP: + bfa_sem_timer_stop(ioc); + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_trc(iocpf->ioc, 0); +} + +/* + * Hardware initialization failed. + */ +static void +bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf) +{ + /* + * Mark IOC as failed in hardware and stop firmware. + */ + bfa_ioc_lpu_stop(iocpf->ioc); + + /* + * Flush any queued up mailbox requests. + */ + bfa_ioc_mbox_flush(iocpf->ioc); + + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +static void +bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_ack(ioc); + bfa_ioc_notify_fail(ioc); + if (!iocpf->auto_recover) { + bfa_ioc_sync_leave(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + } else { + if (bfa_ioc_sync_complete(ioc)) + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + else { + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + } + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + + case IOCPF_E_DISABLE: + bfa_sem_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +static void +bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_trc(iocpf->ioc, 0); +} + +/* + * IOC is in failed state. + */ +static void +bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + +/* + * BFA IOC private functions + */ + +/* + * Notify common modules registered for notification. + */ +static void +bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event) +{ + struct bfa_ioc_notify_s *notify; + struct list_head *qe; + + list_for_each(qe, &ioc->notify_q) { + notify = (struct bfa_ioc_notify_s *)qe; + notify->cbfn(notify->cbarg, event); + } +} + +static void +bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) +{ + ioc->cbfn->disable_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); +} + +bfa_boolean_t +bfa_ioc_sem_get(void __iomem *sem_reg) +{ + u32 r32; + int cnt = 0; +#define BFA_SEM_SPINCNT 3000 + + r32 = readl(sem_reg); + + while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { + cnt++; + udelay(2); + r32 = readl(sem_reg); + } + + if (!(r32 & 1)) + return BFA_TRUE; + + return BFA_FALSE; +} + +static void +bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) +{ + u32 r32; + + /* + * First read to the semaphore register will return 0, subsequent reads + * will return 1. Semaphore is released by writing 1 to the register + */ + r32 = readl(ioc->ioc_regs.ioc_sem_reg); + if (r32 == ~0) { + WARN_ON(r32 == ~0); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); + return; + } + if (!(r32 & 1)) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); + return; + } + + bfa_sem_timer_start(ioc); +} + +/* + * Initialize LPU local memory (aka secondary memory / SRAM) + */ +static void +bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) +{ + u32 pss_ctl; + int i; +#define PSS_LMEM_INIT_TIME 10000 + + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LMEM_RESET; + pss_ctl |= __PSS_LMEM_INIT_EN; + + /* + * i2c workaround 12.5khz clock + */ + pss_ctl |= __PSS_I2C_CLK_DIV(3UL); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); + + /* + * wait for memory initialization to be complete + */ + i = 0; + do { + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + i++; + } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); + + /* + * If memory initialization is not successful, IOC timeout will catch + * such failures. + */ + WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); + bfa_trc(ioc, pss_ctl); + + pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) +{ + u32 pss_ctl; + + /* + * Take processor out of reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LPU0_RESET; + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) +{ + u32 pss_ctl; + + /* + * Put processors in reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +/* + * Get driver and firmware versions. + */ +void +bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) +{ + u32 pgnum; + u32 loff = 0; + int i; + u32 *fwsig = (u32 *) fwhdr; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); + i++) { + fwsig[i] = + bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); + loff += sizeof(u32); + } +} + +/* + * Returns TRUE if driver is willing to work with current smem f/w version. + */ +bfa_boolean_t +bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *smem_fwhdr) +{ + struct bfi_ioc_image_hdr_s *drv_fwhdr; + enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp; + + drv_fwhdr = (struct bfi_ioc_image_hdr_s *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + + /* + * If smem is incompatible or old, driver should not work with it. + */ + drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr); + if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || + drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { + return BFA_FALSE; + } + + /* + * IF Flash has a better F/W than smem do not work with smem. + * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. + * If Flash is old or incomp work with smem iff smem f/w == drv f/w. + */ + smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr); + + if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) { + return BFA_FALSE; + } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) { + return BFA_TRUE; + } else { + return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? + BFA_TRUE : BFA_FALSE; + } +} + +/* + * Return true if current running version is valid. Firmware signature and + * execution context (driver/bios) must match. + */ +static bfa_boolean_t +bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) +{ + struct bfi_ioc_image_hdr_s fwhdr; + + bfa_ioc_fwver_get(ioc, &fwhdr); + + if (swab32(fwhdr.bootenv) != boot_env) { + bfa_trc(ioc, fwhdr.bootenv); + bfa_trc(ioc, boot_env); + return BFA_FALSE; + } + + return bfa_ioc_fwver_cmp(ioc, &fwhdr); +} + +static bfa_boolean_t +bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1, + struct bfi_ioc_image_hdr_s *fwhdr_2) +{ + int i; + + for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) + if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) + return BFA_FALSE; + + return BFA_TRUE; +} + +/* + * Returns TRUE if major minor and maintainence are same. + * If patch versions are same, check for MD5 Checksum to be same. + */ +static bfa_boolean_t +bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr, + struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) +{ + if (drv_fwhdr->signature != fwhdr_to_cmp->signature) + return BFA_FALSE; + + if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) + return BFA_FALSE; + + if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) + return BFA_FALSE; + + if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) + return BFA_FALSE; + + if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && + drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && + drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) { + return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); + } + + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr) +{ + if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) + return BFA_FALSE; + + return BFA_TRUE; +} + +static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr) +{ + if (fwhdr->fwver.phase == 0 && + fwhdr->fwver.build == 0) + return BFA_TRUE; + + return BFA_FALSE; +} + +/* + * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. + */ +static enum bfi_ioc_img_ver_cmp_e +bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr, + struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) +{ + if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE) + return BFI_IOC_IMG_VER_INCOMP; + + if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_BETTER; + + else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_OLD; + + /* + * GA takes priority over internal builds of the same patch stream. + * At this point major minor maint and patch numbers are same. + */ + + if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) { + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_SAME; + else + return BFI_IOC_IMG_VER_OLD; + } else { + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_BETTER; + } + + if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_OLD; + + if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_OLD; + + /* + * All Version Numbers are equal. + * Md5 check to be done as a part of compatibility check. + */ + return BFI_IOC_IMG_VER_SAME; +} + +#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ + +bfa_status_t +bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off, + u32 *fwimg) +{ + return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, + BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), + (char *)fwimg, BFI_FLASH_CHUNK_SZ); +} + +static enum bfi_ioc_img_ver_cmp_e +bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *base_fwhdr) +{ + struct bfi_ioc_image_hdr_s *flash_fwhdr; + bfa_status_t status; + u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; + + status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg); + if (status != BFA_STATUS_OK) + return BFI_IOC_IMG_VER_INCOMP; + + flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg; + if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE) + return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); + else + return BFI_IOC_IMG_VER_INCOMP; +} + + +/* + * Invalidate fwver signature + */ +bfa_status_t +bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc) +{ + + u32 pgnum; + u32 loff = 0; + enum bfi_ioc_state ioc_fwstate; + + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + if (!bfa_ioc_state_disabled(ioc_fwstate)) + return BFA_STATUS_ADAPTER_ENABLED; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN); + + return BFA_STATUS_OK; +} + +/* + * Conditionally flush any pending message from firmware at start. + */ +static void +bfa_ioc_msgflush(struct bfa_ioc_s *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if (r32) + writel(1, ioc->ioc_regs.lpu_mbox_cmd); +} + +static void +bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) +{ + enum bfi_ioc_state ioc_fwstate; + bfa_boolean_t fwvalid; + u32 boot_type; + u32 boot_env; + + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + + if (force) + ioc_fwstate = BFI_IOC_UNINIT; + + bfa_trc(ioc, ioc_fwstate); + + boot_type = BFI_FWBOOT_TYPE_NORMAL; + boot_env = BFI_FWBOOT_ENV_OS; + + /* + * check if firmware is valid + */ + fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? + BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env); + + if (!fwvalid) { + if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); + return; + } + + /* + * If hardware initialization is in progress (initialized by other IOC), + * just wait for an initialization completion interrupt. + */ + if (ioc_fwstate == BFI_IOC_INITING) { + bfa_ioc_poll_fwinit(ioc); + return; + } + + /* + * If IOC function is disabled and firmware version is same, + * just re-enable IOC. + * + * If option rom, IOC must not be in operational state. With + * convergence, IOC will be in operational state when 2nd driver + * is loaded. + */ + if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { + + /* + * When using MSI-X any pending firmware ready event should + * be flushed. Otherwise MSI-X interrupts are not delivered. + */ + bfa_ioc_msgflush(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + /* + * Initialize the h/w for any other states. + */ + if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); +} + +static void +bfa_ioc_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_trc(ioc, 0); + bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); +} + +void +bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) +{ + u32 *msgp = (u32 *) ioc_msg; + u32 i; + + bfa_trc(ioc, msgp[0]); + bfa_trc(ioc, len); + + WARN_ON(len > BFI_IOC_MSGLEN_MAX); + + /* + * first write msg to mailbox registers + */ + for (i = 0; i < len / sizeof(u32); i++) + writel(cpu_to_le32(msgp[i]), + ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) + writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + /* + * write 1 to mailbox CMD to trigger LPU event + */ + writel(1, ioc->ioc_regs.hfn_mbox_cmd); + (void) readl(ioc->ioc_regs.hfn_mbox_cmd); +} + +static void +bfa_ioc_send_enable(struct bfa_ioc_s *ioc) +{ + struct bfi_ioc_ctrl_req_s enable_req; + + bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, + bfa_ioc_portid(ioc)); + enable_req.clscode = cpu_to_be16(ioc->clscode); + /* unsigned 32-bit time_t overflow in y2106 */ + enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds()); + bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); +} + +static void +bfa_ioc_send_disable(struct bfa_ioc_s *ioc) +{ + struct bfi_ioc_ctrl_req_s disable_req; + + bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, + bfa_ioc_portid(ioc)); + disable_req.clscode = cpu_to_be16(ioc->clscode); + /* unsigned 32-bit time_t overflow in y2106 */ + disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds()); + bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s)); +} + +static void +bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) +{ + struct bfi_ioc_getattr_req_s attr_req; + + bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, + bfa_ioc_portid(ioc)); + bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); + bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); +} + +static void +bfa_ioc_hb_check(void *cbarg) +{ + struct bfa_ioc_s *ioc = cbarg; + u32 hb_count; + + hb_count = readl(ioc->ioc_regs.heartbeat); + if (ioc->hb_count == hb_count) { + bfa_ioc_recover(ioc); + return; + } else { + ioc->hb_count = hb_count; + } + + bfa_ioc_mbox_poll(ioc); + bfa_hb_timer_start(ioc); +} + +static void +bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) +{ + ioc->hb_count = readl(ioc->ioc_regs.heartbeat); + bfa_hb_timer_start(ioc); +} + +/* + * Initiate a full firmware download. + */ +static bfa_status_t +bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, + u32 boot_env) +{ + u32 *fwimg; + u32 pgnum; + u32 loff = 0; + u32 chunkno = 0; + u32 i; + u32 asicmode; + u32 fwimg_size; + u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; + bfa_status_t status; + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); + + status = bfa_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + + bfa_trc(ioc, fwimg_size); + + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < fwimg_size; i++) { + + if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { + chunkno = BFA_IOC_FLASH_CHUNK_NO(i); + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + status = bfa_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), + fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg = bfa_cb_image_get_chunk( + bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + } + + /* + * write smem + */ + bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, + fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); + + loff += sizeof(u32); + + /* + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + } + } + + writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * Set boot type, env and device mode at the end. + */ + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + boot_type = BFI_FWBOOT_TYPE_NORMAL; + } + asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, + ioc->port0_mode, ioc->port1_mode); + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, + swab32(asicmode)); + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF, + swab32(boot_type)); + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF, + swab32(boot_env)); + return BFA_STATUS_OK; +} + + +/* + * Update BFA configuration from firmware configuration. + */ +static void +bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) +{ + struct bfi_ioc_attr_s *attr = ioc->attr; + + attr->adapter_prop = be32_to_cpu(attr->adapter_prop); + attr->card_type = be32_to_cpu(attr->card_type); + attr->maxfrsize = be16_to_cpu(attr->maxfrsize); + ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); + attr->mfg_year = be16_to_cpu(attr->mfg_year); + + bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); +} + +/* + * Attach time initialization of mbox logic. + */ +static void +bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + int mc; + + INIT_LIST_HEAD(&mod->cmd_q); + for (mc = 0; mc < BFI_MC_MAX; mc++) { + mod->mbhdlr[mc].cbfn = NULL; + mod->mbhdlr[mc].cbarg = ioc->bfa; + } +} + +/* + * Mbox poll timer -- restarts any pending mailbox requests. + */ +static void +bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd_s *cmd; + u32 stat; + + /* + * If no command pending, do nothing + */ + if (list_empty(&mod->cmd_q)) + return; + + /* + * If previous command is not yet fetched by firmware, do nothing + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) + return; + + /* + * Enqueue command to firmware. + */ + bfa_q_deq(&mod->cmd_q, &cmd); + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); +} + +/* + * Cleanup any pending requests. + */ +static void +bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd_s *cmd; + + while (!list_empty(&mod->cmd_q)) + bfa_q_deq(&mod->cmd_q, &cmd); +} + +/* + * Read data from SMEM to host through PCI memmap + * + * @param[in] ioc memory for IOC + * @param[in] tbuf app memory to store data from smem + * @param[in] soff smem offset + * @param[in] sz size of smem in bytes + */ +static bfa_status_t +bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) +{ + u32 pgnum, loff; + __be32 r32; + int i, len; + u32 *buf = tbuf; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); + loff = PSS_SMEM_PGOFF(soff); + bfa_trc(ioc, pgnum); + bfa_trc(ioc, loff); + bfa_trc(ioc, sz); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { + bfa_trc(ioc, 0); + return BFA_STATUS_FAILED; + } + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + len = sz/sizeof(u32); + bfa_trc(ioc, len); + for (i = 0; i < len; i++) { + r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); + buf[i] = swab32(r32); + loff += sizeof(u32); + + /* + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + } + } + writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), + ioc->ioc_regs.host_page_num_fn); + /* + * release semaphore. + */ + readl(ioc->ioc_regs.ioc_init_sem_reg); + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + + bfa_trc(ioc, pgnum); + return BFA_STATUS_OK; +} + +/* + * Clear SMEM data from host through PCI memmap + * + * @param[in] ioc memory for IOC + * @param[in] soff smem offset + * @param[in] sz size of smem in bytes + */ +static bfa_status_t +bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) +{ + int i, len; + u32 pgnum, loff; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); + loff = PSS_SMEM_PGOFF(soff); + bfa_trc(ioc, pgnum); + bfa_trc(ioc, loff); + bfa_trc(ioc, sz); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { + bfa_trc(ioc, 0); + return BFA_STATUS_FAILED; + } + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + len = sz/sizeof(u32); /* len in words */ + bfa_trc(ioc, len); + for (i = 0; i < len; i++) { + bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); + loff += sizeof(u32); + + /* + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + } + } + writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * release semaphore. + */ + readl(ioc->ioc_regs.ioc_init_sem_reg); + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + bfa_trc(ioc, pgnum); + return BFA_STATUS_OK; +} + +static void +bfa_ioc_fail_notify(struct bfa_ioc_s *ioc) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + + /* + * Notify driver and common modules registered for notification. + */ + ioc->cbfn->hbfail_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); + + bfa_ioc_debug_save_ftrc(ioc); + + BFA_LOG(KERN_CRIT, bfad, bfa_log_level, + "Heart Beat of IOC has failed\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL); + +} + +static void +bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + /* + * Provide enable completion callback. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + BFA_LOG(KERN_WARNING, bfad, bfa_log_level, + "Running firmware version is incompatible " + "with the driver version\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); +} + +bfa_status_t +bfa_ioc_pll_init(struct bfa_ioc_s *ioc) +{ + + /* + * Hold semaphore so that nobody can access the chip during init. + */ + bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + + bfa_ioc_pll_init_asic(ioc); + + ioc->pllinit = BFA_TRUE; + + /* + * Initialize LMEM + */ + bfa_ioc_lmem_init(ioc); + + /* + * release semaphore. + */ + readl(ioc->ioc_regs.ioc_init_sem_reg); + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + + return BFA_STATUS_OK; +} + +/* + * Interface used by diag module to do firmware boot with memory test + * as the entry vector. + */ +bfa_status_t +bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) +{ + struct bfi_ioc_image_hdr_s *drv_fwhdr; + bfa_status_t status; + bfa_ioc_stats(ioc, ioc_boots); + + if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) + return BFA_STATUS_FAILED; + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_NORMAL) { + + drv_fwhdr = (struct bfi_ioc_image_hdr_s *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + + /* + * Work with Flash iff flash f/w is better than driver f/w. + * Otherwise push drivers firmware. + */ + if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == + BFI_IOC_IMG_VER_BETTER) + boot_type = BFI_FWBOOT_TYPE_FLASH; + } + + /* + * Initialize IOC state of all functions on a chip reset. + */ + if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + } else { + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); + } + + bfa_ioc_msgflush(ioc); + status = bfa_ioc_download_fw(ioc, boot_type, boot_env); + if (status == BFA_STATUS_OK) + bfa_ioc_lpu_start(ioc); + else { + WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST); + bfa_iocpf_timeout(ioc); + } + return status; +} + +/* + * Enable/disable IOC failure auto recovery. + */ +void +bfa_ioc_auto_recover(bfa_boolean_t auto_recover) +{ + bfa_auto_recover = auto_recover; +} + + + +bfa_boolean_t +bfa_ioc_is_operational(struct bfa_ioc_s *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); +} + +bfa_boolean_t +bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) +{ + u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); + + return ((r32 != BFI_IOC_UNINIT) && + (r32 != BFI_IOC_INITING) && + (r32 != BFI_IOC_MEMTEST)); +} + +bfa_boolean_t +bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) +{ + __be32 *msgp = mbmsg; + u32 r32; + int i; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if ((r32 & 1) == 0) + return BFA_FALSE; + + /* + * read the MBOX msg + */ + for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); + i++) { + r32 = readl(ioc->ioc_regs.lpu_mbox + + i * sizeof(u32)); + msgp[i] = cpu_to_be32(r32); + } + + /* + * turn off mailbox interrupt by clearing mailbox status + */ + writel(1, ioc->ioc_regs.lpu_mbox_cmd); + readl(ioc->ioc_regs.lpu_mbox_cmd); + + return BFA_TRUE; +} + +void +bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) +{ + union bfi_ioc_i2h_msg_u *msg; + struct bfa_iocpf_s *iocpf = &ioc->iocpf; + + msg = (union bfi_ioc_i2h_msg_u *) m; + + bfa_ioc_stats(ioc, ioc_isrs); + + switch (msg->mh.msg_id) { + case BFI_IOC_I2H_HBEAT: + break; + + case BFI_IOC_I2H_ENABLE_REPLY: + ioc->port_mode = ioc->port_mode_cfg = + (enum bfa_mode_s)msg->fw_event.port_mode; + ioc->ad_cap_bm = msg->fw_event.cap_bm; + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); + break; + + case BFI_IOC_I2H_DISABLE_REPLY: + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); + break; + + case BFI_IOC_I2H_GETATTR_REPLY: + bfa_ioc_getattr_reply(ioc); + break; + + default: + bfa_trc(ioc, msg->mh.msg_id); + WARN_ON(1); + } +} + +/* + * IOC attach time initialization and setup. + * + * @param[in] ioc memory for IOC + * @param[in] bfa driver instance structure + */ +void +bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, + struct bfa_timer_mod_s *timer_mod) +{ + ioc->bfa = bfa; + ioc->cbfn = cbfn; + ioc->timer_mod = timer_mod; + ioc->fcmode = BFA_FALSE; + ioc->pllinit = BFA_FALSE; + ioc->dbg_fwsave_once = BFA_TRUE; + ioc->iocpf.ioc = ioc; + + bfa_ioc_mbox_attach(ioc); + INIT_LIST_HEAD(&ioc->notify_q); + + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(ioc, IOC_E_RESET); +} + +/* + * Driver detach time IOC cleanup. + */ +void +bfa_ioc_detach(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_DETACH); + INIT_LIST_HEAD(&ioc->notify_q); +} + +/* + * Setup IOC PCI properties. + * + * @param[in] pcidev PCI device information for this IOC + */ +void +bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, + enum bfi_pcifn_class clscode) +{ + ioc->clscode = clscode; + ioc->pcidev = *pcidev; + + /* + * Initialize IOC and device personality + */ + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; + ioc->asic_mode = BFI_ASIC_MODE_FC; + + switch (pcidev->device_id) { + case BFA_PCI_DEVICE_ID_FC_8G1P: + case BFA_PCI_DEVICE_ID_FC_8G2P: + ioc->asic_gen = BFI_ASIC_GEN_CB; + ioc->fcmode = BFA_TRUE; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + break; + + case BFA_PCI_DEVICE_ID_CT: + ioc->asic_gen = BFI_ASIC_GEN_CT; + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + break; + + case BFA_PCI_DEVICE_ID_CT_FC: + ioc->asic_gen = BFI_ASIC_GEN_CT; + ioc->fcmode = BFA_TRUE; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + break; + + case BFA_PCI_DEVICE_ID_CT2: + case BFA_PCI_DEVICE_ID_CT2_QUAD: + ioc->asic_gen = BFI_ASIC_GEN_CT2; + if (clscode == BFI_PCIFN_CLASS_FC && + pcidev->ssid == BFA_PCI_CT2_SSID_FC) { + ioc->asic_mode = BFI_ASIC_MODE_FC16; + ioc->fcmode = BFA_TRUE; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + } else { + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + } else { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_NIC; + ioc->ad_cap_bm = BFA_CM_NIC; + } + } + break; + + default: + WARN_ON(1); + } + + /* + * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c + */ + if (ioc->asic_gen == BFI_ASIC_GEN_CB) + bfa_ioc_set_cb_hwif(ioc); + else if (ioc->asic_gen == BFI_ASIC_GEN_CT) + bfa_ioc_set_ct_hwif(ioc); + else { + WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); + bfa_ioc_set_ct2_hwif(ioc); + bfa_ioc_ct2_poweron(ioc); + } + + bfa_ioc_map_port(ioc); + bfa_ioc_reg_init(ioc); +} + +/* + * Initialize IOC dma memory + * + * @param[in] dm_kva kernel virtual address of IOC dma memory + * @param[in] dm_pa physical address of IOC dma memory + */ +void +bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) +{ + /* + * dma memory for firmware attribute + */ + ioc->attr_dma.kva = dm_kva; + ioc->attr_dma.pa = dm_pa; + ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; +} + +void +bfa_ioc_enable(struct bfa_ioc_s *ioc) +{ + bfa_ioc_stats(ioc, ioc_enables); + ioc->dbg_fwsave_once = BFA_TRUE; + + bfa_fsm_send_event(ioc, IOC_E_ENABLE); +} + +void +bfa_ioc_disable(struct bfa_ioc_s *ioc) +{ + bfa_ioc_stats(ioc, ioc_disables); + bfa_fsm_send_event(ioc, IOC_E_DISABLE); +} + +void +bfa_ioc_suspend(struct bfa_ioc_s *ioc) +{ + ioc->dbg_fwsave_once = BFA_TRUE; + bfa_fsm_send_event(ioc, IOC_E_HWERROR); +} + +/* + * Initialize memory for saving firmware trace. Driver must initialize + * trace memory before call bfa_ioc_enable(). + */ +void +bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) +{ + ioc->dbg_fwsave = dbg_fwsave; + ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN; +} + +/* + * Register mailbox message handler functions + * + * @param[in] ioc IOC instance + * @param[in] mcfuncs message class handler functions + */ +void +bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + int mc; + + for (mc = 0; mc < BFI_MC_MAX; mc++) + mod->mbhdlr[mc].cbfn = mcfuncs[mc]; +} + +/* + * Register mailbox message handler function, to be called by common modules + */ +void +bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + + mod->mbhdlr[mc].cbfn = cbfn; + mod->mbhdlr[mc].cbarg = cbarg; +} + +/* + * Queue a mailbox command request to firmware. Waits if mailbox is busy. + * Responsibility of caller to serialize + * + * @param[in] ioc IOC instance + * @param[i] cmd Mailbox command + */ +void +bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + u32 stat; + + /* + * If a previous command is pending, queue new command + */ + if (!list_empty(&mod->cmd_q)) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return; + } + + /* + * If mailbox is busy, queue command for poll timer + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return; + } + + /* + * mailbox is free -- queue command to firmware + */ + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); +} + +/* + * Handle mailbox interrupts + */ +void +bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + struct bfi_mbmsg_s m; + int mc; + + if (bfa_ioc_msgget(ioc, &m)) { + /* + * Treat IOC message class as special. + */ + mc = m.mh.msg_class; + if (mc == BFI_MC_IOC) { + bfa_ioc_isr(ioc, &m); + return; + } + + if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) + return; + + mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); + } + + bfa_ioc_lpu_read_stat(ioc); + + /* + * Try to send pending mailbox commands + */ + bfa_ioc_mbox_poll(ioc); +} + +void +bfa_ioc_error_isr(struct bfa_ioc_s *ioc) +{ + bfa_ioc_stats(ioc, ioc_hbfails); + ioc->stats.hb_count = ioc->hb_count; + bfa_fsm_send_event(ioc, IOC_E_HWERROR); +} + +/* + * return true if IOC is disabled + */ +bfa_boolean_t +bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || + bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); +} + +/* + * return true if IOC firmware is different. + */ +bfa_boolean_t +bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || + bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) || + bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); +} + +/* + * Check if adapter is disabled -- both IOCs should be in a disabled + * state. + */ +bfa_boolean_t +bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) +{ + u32 ioc_state; + + if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) + return BFA_FALSE; + + ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); + if (!bfa_ioc_state_disabled(ioc_state)) + return BFA_FALSE; + + if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { + ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); + if (!bfa_ioc_state_disabled(ioc_state)) + return BFA_FALSE; + } + + return BFA_TRUE; +} + +/* + * Reset IOC fwstate registers. + */ +void +bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc) +{ + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); +} + +#define BFA_MFG_NAME "QLogic" +void +bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, + struct bfa_adapter_attr_s *ad_attr) +{ + struct bfi_ioc_attr_s *ioc_attr; + + ioc_attr = ioc->attr; + + bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); + bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); + bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); + bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); + memcpy(&ad_attr->vpd, &ioc_attr->vpd, + sizeof(struct bfa_mfg_vpd_s)); + + ad_attr->nports = bfa_ioc_get_nports(ioc); + ad_attr->max_speed = bfa_ioc_speed_sup(ioc); + + bfa_ioc_get_adapter_model(ioc, ad_attr->model); + /* For now, model descr uses same model string */ + bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); + + ad_attr->card_type = ioc_attr->card_type; + ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); + + if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) + ad_attr->prototype = 1; + else + ad_attr->prototype = 0; + + ad_attr->pwwn = ioc->attr->pwwn; + ad_attr->mac = bfa_ioc_get_mac(ioc); + + ad_attr->pcie_gen = ioc_attr->pcie_gen; + ad_attr->pcie_lanes = ioc_attr->pcie_lanes; + ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; + ad_attr->asic_rev = ioc_attr->asic_rev; + + bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); + + ad_attr->cna_capable = bfa_ioc_is_cna(ioc); + ad_attr->trunk_capable = (ad_attr->nports > 1) && + !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; + ad_attr->mfg_day = ioc_attr->mfg_day; + ad_attr->mfg_month = ioc_attr->mfg_month; + ad_attr->mfg_year = ioc_attr->mfg_year; + memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN); +} + +enum bfa_ioc_type_e +bfa_ioc_get_type(struct bfa_ioc_s *ioc) +{ + if (ioc->clscode == BFI_PCIFN_CLASS_ETH) + return BFA_IOC_TYPE_LL; + + WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC); + + return (ioc->attr->port_mode == BFI_PORT_MODE_FC) + ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; +} + +void +bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) +{ + memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); + memcpy((void *)serial_num, + (void *)ioc->attr->brcd_serialnum, + BFA_ADAPTER_SERIAL_NUM_LEN); +} + +void +bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) +{ + memset((void *)fw_ver, 0, BFA_VERSION_LEN); + memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); +} + +void +bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) +{ + WARN_ON(!chip_rev); + + memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); + + chip_rev[0] = 'R'; + chip_rev[1] = 'e'; + chip_rev[2] = 'v'; + chip_rev[3] = '-'; + chip_rev[4] = ioc->attr->asic_rev; + chip_rev[5] = '\0'; +} + +void +bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) +{ + memset((void *)optrom_ver, 0, BFA_VERSION_LEN); + memcpy(optrom_ver, ioc->attr->optrom_version, + BFA_VERSION_LEN); +} + +void +bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) +{ + memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); + strscpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); +} + +void +bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) +{ + struct bfi_ioc_attr_s *ioc_attr; + u8 nports = bfa_ioc_get_nports(ioc); + + WARN_ON(!model); + memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); + + ioc_attr = ioc->attr; + + if (bfa_asic_id_ct2(ioc->pcidev.device_id) && + (!bfa_mfg_is_mezz(ioc_attr->card_type))) + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s", + BFA_MFG_NAME, ioc_attr->card_type, nports, "p"); + else + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", + BFA_MFG_NAME, ioc_attr->card_type); +} + +enum bfa_ioc_state +bfa_ioc_get_state(struct bfa_ioc_s *ioc) +{ + enum bfa_iocpf_state iocpf_st; + enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); + + if (ioc_st == BFA_IOC_ENABLING || + ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { + + iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + switch (iocpf_st) { + case BFA_IOCPF_SEMWAIT: + ioc_st = BFA_IOC_SEMWAIT; + break; + + case BFA_IOCPF_HWINIT: + ioc_st = BFA_IOC_HWINIT; + break; + + case BFA_IOCPF_FWMISMATCH: + ioc_st = BFA_IOC_FWMISMATCH; + break; + + case BFA_IOCPF_FAIL: + ioc_st = BFA_IOC_FAIL; + break; + + case BFA_IOCPF_INITFAIL: + ioc_st = BFA_IOC_INITFAIL; + break; + + default: + break; + } + } + + return ioc_st; +} + +void +bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) +{ + memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); + + ioc_attr->state = bfa_ioc_get_state(ioc); + ioc_attr->port_id = bfa_ioc_portid(ioc); + ioc_attr->port_mode = ioc->port_mode; + ioc_attr->port_mode_cfg = ioc->port_mode_cfg; + ioc_attr->cap_bm = ioc->ad_cap_bm; + + ioc_attr->ioc_type = bfa_ioc_get_type(ioc); + + bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); + + ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); + ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); + ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc)); + bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); +} + +mac_t +bfa_ioc_get_mac(struct bfa_ioc_s *ioc) +{ + /* + * Check the IOC type and return the appropriate MAC + */ + if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) + return ioc->attr->fcoe_mac; + else + return ioc->attr->mac; +} + +mac_t +bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) +{ + mac_t m; + + m = ioc->attr->mfg_mac; + if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) + m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); + else + bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), + bfa_ioc_pcifn(ioc)); + + return m; +} + +/* + * Send AEN notification + */ +void +bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + enum bfa_ioc_type_e ioc_type; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + ioc_type = bfa_ioc_get_type(ioc); + switch (ioc_type) { + case BFA_IOC_TYPE_FC: + aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; + break; + case BFA_IOC_TYPE_FCoE: + aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; + aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); + break; + case BFA_IOC_TYPE_LL: + aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); + break; + default: + WARN_ON(ioc_type != BFA_IOC_TYPE_FC); + break; + } + + /* Send the AEN notification */ + aen_entry->aen_data.ioc.ioc_type = ioc_type; + bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, + BFA_AEN_CAT_IOC, event); +} + +/* + * Retrieve saved firmware trace from a prior IOC failure. + */ +bfa_status_t +bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) +{ + int tlen; + + if (ioc->dbg_fwsave_len == 0) + return BFA_STATUS_ENOFSAVE; + + tlen = *trclen; + if (tlen > ioc->dbg_fwsave_len) + tlen = ioc->dbg_fwsave_len; + + memcpy(trcdata, ioc->dbg_fwsave, tlen); + *trclen = tlen; + return BFA_STATUS_OK; +} + + +/* + * Retrieve saved firmware trace from a prior IOC failure. + */ +bfa_status_t +bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) +{ + u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); + int tlen; + bfa_status_t status; + + bfa_trc(ioc, *trclen); + + tlen = *trclen; + if (tlen > BFA_DBG_FWTRC_LEN) + tlen = BFA_DBG_FWTRC_LEN; + + status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen); + *trclen = tlen; + return status; +} + +static void +bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc) +{ + struct bfa_mbox_cmd_s cmd; + struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg; + + bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, + bfa_ioc_portid(ioc)); + req->clscode = cpu_to_be16(ioc->clscode); + bfa_ioc_mbox_queue(ioc, &cmd); +} + +static void +bfa_ioc_fwsync(struct bfa_ioc_s *ioc) +{ + u32 fwsync_iter = 1000; + + bfa_ioc_send_fwsync(ioc); + + /* + * After sending a fw sync mbox command wait for it to + * take effect. We will not wait for a response because + * 1. fw_sync mbox cmd doesn't have a response. + * 2. Even if we implement that, interrupts might not + * be enabled when we call this function. + * So, just keep checking if any mbox cmd is pending, and + * after waiting for a reasonable amount of time, go ahead. + * It is possible that fw has crashed and the mbox command + * is never acknowledged. + */ + while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0) + fwsync_iter--; +} + +/* + * Dump firmware smem + */ +bfa_status_t +bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, + u32 *offset, int *buflen) +{ + u32 loff; + int dlen; + bfa_status_t status; + u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc); + + if (*offset >= smem_len) { + *offset = *buflen = 0; + return BFA_STATUS_EINVAL; + } + + loff = *offset; + dlen = *buflen; + + /* + * First smem read, sync smem before proceeding + * No need to sync before reading every chunk. + */ + if (loff == 0) + bfa_ioc_fwsync(ioc); + + if ((loff + dlen) >= smem_len) + dlen = smem_len - loff; + + status = bfa_ioc_smem_read(ioc, buf, loff, dlen); + + if (status != BFA_STATUS_OK) { + *offset = *buflen = 0; + return status; + } + + *offset += dlen; + + if (*offset >= smem_len) + *offset = 0; + + *buflen = dlen; + + return status; +} + +/* + * Firmware statistics + */ +bfa_status_t +bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats) +{ + u32 loff = BFI_IOC_FWSTATS_OFF + \ + BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); + int tlen; + bfa_status_t status; + + if (ioc->stats_busy) { + bfa_trc(ioc, ioc->stats_busy); + return BFA_STATUS_DEVBUSY; + } + ioc->stats_busy = BFA_TRUE; + + tlen = sizeof(struct bfa_fw_stats_s); + status = bfa_ioc_smem_read(ioc, stats, loff, tlen); + + ioc->stats_busy = BFA_FALSE; + return status; +} + +bfa_status_t +bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc) +{ + u32 loff = BFI_IOC_FWSTATS_OFF + \ + BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); + int tlen; + bfa_status_t status; + + if (ioc->stats_busy) { + bfa_trc(ioc, ioc->stats_busy); + return BFA_STATUS_DEVBUSY; + } + ioc->stats_busy = BFA_TRUE; + + tlen = sizeof(struct bfa_fw_stats_s); + status = bfa_ioc_smem_clr(ioc, loff, tlen); + + ioc->stats_busy = BFA_FALSE; + return status; +} + +/* + * Save firmware trace if configured. + */ +void +bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc) +{ + int tlen; + + if (ioc->dbg_fwsave_once) { + ioc->dbg_fwsave_once = BFA_FALSE; + if (ioc->dbg_fwsave_len) { + tlen = ioc->dbg_fwsave_len; + bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); + } + } +} + +/* + * Firmware failure detected. Start recovery actions. + */ +static void +bfa_ioc_recover(struct bfa_ioc_s *ioc) +{ + bfa_ioc_stats(ioc, ioc_hbfails); + ioc->stats.hb_count = ioc->hb_count; + bfa_fsm_send_event(ioc, IOC_E_HBFAIL); +} + +/* + * BFA IOC PF private functions + */ +static void +bfa_iocpf_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_trc(ioc, 0); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); +} + +static void +bfa_iocpf_sem_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_ioc_hw_sem_get(ioc); +} + +static void +bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) +{ + u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + + bfa_trc(ioc, fwstate); + + if (fwstate == BFI_IOC_DISABLED) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV)) + bfa_iocpf_timeout(ioc); + else { + ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; + bfa_iocpf_poll_timer_start(ioc); + } +} + +static void +bfa_iocpf_poll_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_ioc_poll_fwinit(ioc); +} + +/* + * bfa timer function + */ +void +bfa_timer_beat(struct bfa_timer_mod_s *mod) +{ + struct list_head *qh = &mod->timer_q; + struct list_head *qe, *qe_next; + struct bfa_timer_s *elem; + struct list_head timedout_q; + + INIT_LIST_HEAD(&timedout_q); + + qe = bfa_q_next(qh); + + while (qe != qh) { + qe_next = bfa_q_next(qe); + + elem = (struct bfa_timer_s *) qe; + if (elem->timeout <= BFA_TIMER_FREQ) { + elem->timeout = 0; + list_del(&elem->qe); + list_add_tail(&elem->qe, &timedout_q); + } else { + elem->timeout -= BFA_TIMER_FREQ; + } + + qe = qe_next; /* go to next elem */ + } + + /* + * Pop all the timeout entries + */ + while (!list_empty(&timedout_q)) { + bfa_q_deq(&timedout_q, &elem); + elem->timercb(elem->arg); + } +} + +/* + * Should be called with lock protection + */ +void +bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, + void (*timercb) (void *), void *arg, unsigned int timeout) +{ + + WARN_ON(timercb == NULL); + WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer)); + + timer->timeout = timeout; + timer->timercb = timercb; + timer->arg = arg; + + list_add_tail(&timer->qe, &mod->timer_q); +} + +/* + * Should be called with lock protection + */ +void +bfa_timer_stop(struct bfa_timer_s *timer) +{ + WARN_ON(list_empty(&timer->qe)); + + list_del(&timer->qe); +} + +/* + * ASIC block related + */ +static void +bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) +{ + struct bfa_ablk_cfg_inst_s *cfg_inst; + int i, j; + u16 be16; + + for (i = 0; i < BFA_ABLK_MAX; i++) { + cfg_inst = &cfg->inst[i]; + for (j = 0; j < BFA_ABLK_MAX_PFS; j++) { + be16 = cfg_inst->pf_cfg[j].pers; + cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].num_qpairs; + cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].num_vectors; + cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].bw_min; + cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].bw_max; + cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16); + } + } +} + +static void +bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg; + struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg; + bfa_ablk_cbfn_t cbfn; + + WARN_ON(msg->mh.msg_class != BFI_MC_ABLK); + bfa_trc(ablk->ioc, msg->mh.msg_id); + + switch (msg->mh.msg_id) { + case BFI_ABLK_I2H_QUERY: + if (rsp->status == BFA_STATUS_OK) { + memcpy(ablk->cfg, ablk->dma_addr.kva, + sizeof(struct bfa_ablk_cfg_s)); + bfa_ablk_config_swap(ablk->cfg); + ablk->cfg = NULL; + } + break; + + case BFI_ABLK_I2H_ADPT_CONFIG: + case BFI_ABLK_I2H_PORT_CONFIG: + /* update config port mode */ + ablk->ioc->port_mode_cfg = rsp->port_mode; + break; + + case BFI_ABLK_I2H_PF_DELETE: + case BFI_ABLK_I2H_PF_UPDATE: + case BFI_ABLK_I2H_OPTROM_ENABLE: + case BFI_ABLK_I2H_OPTROM_DISABLE: + /* No-op */ + break; + + case BFI_ABLK_I2H_PF_CREATE: + *(ablk->pcifn) = rsp->pcifn; + ablk->pcifn = NULL; + break; + + default: + WARN_ON(1); + } + + ablk->busy = BFA_FALSE; + if (ablk->cbfn) { + cbfn = ablk->cbfn; + ablk->cbfn = NULL; + cbfn(ablk->cbarg, rsp->status); + } +} + +static void +bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg; + + bfa_trc(ablk->ioc, event); + + switch (event) { + case BFA_IOC_E_ENABLED: + WARN_ON(ablk->busy != BFA_FALSE); + break; + + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + /* Fail any pending requests */ + ablk->pcifn = NULL; + if (ablk->busy) { + if (ablk->cbfn) + ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED); + ablk->cbfn = NULL; + ablk->busy = BFA_FALSE; + } + break; + + default: + WARN_ON(1); + break; + } +} + +u32 +bfa_ablk_meminfo(void) +{ + return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ); +} + +void +bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa) +{ + ablk->dma_addr.kva = dma_kva; + ablk->dma_addr.pa = dma_pa; +} + +void +bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc) +{ + ablk->ioc = ioc; + + bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk); + bfa_q_qe_init(&ablk->ioc_notify); + bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk); + list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q); +} + +bfa_status_t +bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg, + bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_query_s *m; + + WARN_ON(!ablk_cfg); + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cfg = ablk_cfg; + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY, + bfa_ioc_portid(ablk->ioc)); + bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, + u8 port, enum bfi_pcifn_class personality, + u16 bw_min, u16 bw_max, + bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_pf_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->pcifn = pcifn; + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, + bfa_ioc_portid(ablk->ioc)); + m->pers = cpu_to_be16((u16)personality); + m->bw_min = cpu_to_be16(bw_min); + m->bw_max = cpu_to_be16(bw_max); + m->port = port; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, + bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_pf_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE, + bfa_ioc_portid(ablk->ioc)); + m->pcifn = (u8)pcifn; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode, + int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_cfg_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG, + bfa_ioc_portid(ablk->ioc)); + m->mode = (u8)mode; + m->max_pf = (u8)max_pf; + m->max_vf = (u8)max_vf; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode, + int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_cfg_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG, + bfa_ioc_portid(ablk->ioc)); + m->port = (u8)port; + m->mode = (u8)mode; + m->max_pf = (u8)max_pf; + m->max_vf = (u8)max_vf; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min, + u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_pf_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, + bfa_ioc_portid(ablk->ioc)); + m->pcifn = (u8)pcifn; + m->bw_min = cpu_to_be16(bw_min); + m->bw_max = cpu_to_be16(bw_max); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_optrom_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE, + bfa_ioc_portid(ablk->ioc)); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_optrom_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE, + bfa_ioc_portid(ablk->ioc)); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +/* + * SFP module specific + */ + +/* forward declarations */ +static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp); +static void bfa_sfp_media_get(struct bfa_sfp_s *sfp); +static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, + enum bfa_port_speed portspeed); + +static void +bfa_cb_sfp_show(struct bfa_sfp_s *sfp) +{ + bfa_trc(sfp, sfp->lock); + if (sfp->cbfn) + sfp->cbfn(sfp->cbarg, sfp->status); + sfp->lock = 0; + sfp->cbfn = NULL; +} + +static void +bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp) +{ + bfa_trc(sfp, sfp->portspeed); + if (sfp->media) { + bfa_sfp_media_get(sfp); + if (sfp->state_query_cbfn) + sfp->state_query_cbfn(sfp->state_query_cbarg, + sfp->status); + sfp->media = NULL; + } + + if (sfp->portspeed) { + sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); + if (sfp->state_query_cbfn) + sfp->state_query_cbfn(sfp->state_query_cbarg, + sfp->status); + sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; + } + + sfp->state_query_lock = 0; + sfp->state_query_cbfn = NULL; +} + +/* + * IOC event handler. + */ +static void +bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event) +{ + struct bfa_sfp_s *sfp = sfp_arg; + + bfa_trc(sfp, event); + bfa_trc(sfp, sfp->lock); + bfa_trc(sfp, sfp->state_query_lock); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (sfp->lock) { + sfp->status = BFA_STATUS_IOC_FAILURE; + bfa_cb_sfp_show(sfp); + } + + if (sfp->state_query_lock) { + sfp->status = BFA_STATUS_IOC_FAILURE; + bfa_cb_sfp_state_query(sfp); + } + break; + + default: + break; + } +} + +/* + * SFP's State Change Notification post to AEN + */ +static void +bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp) +{ + struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + enum bfa_port_aen_event aen_evt = 0; + + bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) | + ((u64)rsp->event)); + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc); + aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn; + aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc); + + switch (rsp->event) { + case BFA_SFP_SCN_INSERTED: + aen_evt = BFA_PORT_AEN_SFP_INSERT; + break; + case BFA_SFP_SCN_REMOVED: + aen_evt = BFA_PORT_AEN_SFP_REMOVE; + break; + case BFA_SFP_SCN_FAILED: + aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR; + break; + case BFA_SFP_SCN_UNSUPPORT: + aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT; + break; + case BFA_SFP_SCN_POM: + aen_evt = BFA_PORT_AEN_SFP_POM; + aen_entry->aen_data.port.level = rsp->pomlvl; + break; + default: + bfa_trc(sfp, rsp->event); + WARN_ON(1); + } + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq, + BFA_AEN_CAT_PORT, aen_evt); +} + +/* + * SFP get data send + */ +static void +bfa_sfp_getdata_send(struct bfa_sfp_s *sfp) +{ + struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; + + bfa_trc(sfp, req->memtype); + + /* build host command */ + bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW, + bfa_ioc_portid(sfp->ioc)); + + /* send mbox cmd */ + bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd); +} + +/* + * SFP is valid, read sfp data + */ +static void +bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype) +{ + struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; + + WARN_ON(sfp->lock != 0); + bfa_trc(sfp, sfp->state); + + sfp->lock = 1; + sfp->memtype = memtype; + req->memtype = memtype; + + /* Setup SG list */ + bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa); + + bfa_sfp_getdata_send(sfp); +} + +/* + * SFP scn handler + */ +static void +bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg) +{ + struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg; + + switch (rsp->event) { + case BFA_SFP_SCN_INSERTED: + sfp->state = BFA_SFP_STATE_INSERTED; + sfp->data_valid = 0; + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_REMOVED: + sfp->state = BFA_SFP_STATE_REMOVED; + sfp->data_valid = 0; + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_FAILED: + sfp->state = BFA_SFP_STATE_FAILED; + sfp->data_valid = 0; + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_UNSUPPORT: + sfp->state = BFA_SFP_STATE_UNSUPPORT; + bfa_sfp_scn_aen_post(sfp, rsp); + if (!sfp->lock) + bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); + break; + case BFA_SFP_SCN_POM: + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_VALID: + sfp->state = BFA_SFP_STATE_VALID; + if (!sfp->lock) + bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); + break; + default: + bfa_trc(sfp, rsp->event); + WARN_ON(1); + } +} + +/* + * SFP show complete + */ +static void +bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg) +{ + struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg; + + if (!sfp->lock) { + /* + * receiving response after ioc failure + */ + bfa_trc(sfp, sfp->lock); + return; + } + + bfa_trc(sfp, rsp->status); + if (rsp->status == BFA_STATUS_OK) { + sfp->data_valid = 1; + if (sfp->state == BFA_SFP_STATE_VALID) + sfp->status = BFA_STATUS_OK; + else if (sfp->state == BFA_SFP_STATE_UNSUPPORT) + sfp->status = BFA_STATUS_SFP_UNSUPP; + else + bfa_trc(sfp, sfp->state); + } else { + sfp->data_valid = 0; + sfp->status = rsp->status; + /* sfpshow shouldn't change sfp state */ + } + + bfa_trc(sfp, sfp->memtype); + if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) { + bfa_trc(sfp, sfp->data_valid); + if (sfp->data_valid) { + u32 size = sizeof(struct sfp_mem_s); + u8 *des = (u8 *)(sfp->sfpmem); + memcpy(des, sfp->dbuf_kva, size); + } + /* + * Queue completion callback. + */ + bfa_cb_sfp_show(sfp); + } else + sfp->lock = 0; + + bfa_trc(sfp, sfp->state_query_lock); + if (sfp->state_query_lock) { + sfp->state = rsp->state; + /* Complete callback */ + bfa_cb_sfp_state_query(sfp); + } +} + +/* + * SFP query fw sfp state + */ +static void +bfa_sfp_state_query(struct bfa_sfp_s *sfp) +{ + struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; + + /* Should not be doing query if not in _INIT state */ + WARN_ON(sfp->state != BFA_SFP_STATE_INIT); + WARN_ON(sfp->state_query_lock != 0); + bfa_trc(sfp, sfp->state); + + sfp->state_query_lock = 1; + req->memtype = 0; + + if (!sfp->lock) + bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); +} + +static void +bfa_sfp_media_get(struct bfa_sfp_s *sfp) +{ + enum bfa_defs_sfp_media_e *media = sfp->media; + + *media = BFA_SFP_MEDIA_UNKNOWN; + + if (sfp->state == BFA_SFP_STATE_UNSUPPORT) + *media = BFA_SFP_MEDIA_UNSUPPORT; + else if (sfp->state == BFA_SFP_STATE_VALID) { + union sfp_xcvr_e10g_code_u e10g; + struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; + u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 | + (sfpmem->srlid_base.xcvr[5] >> 1); + + e10g.b = sfpmem->srlid_base.xcvr[0]; + bfa_trc(sfp, e10g.b); + bfa_trc(sfp, xmtr_tech); + /* check fc transmitter tech */ + if ((xmtr_tech & SFP_XMTR_TECH_CU) || + (xmtr_tech & SFP_XMTR_TECH_CP) || + (xmtr_tech & SFP_XMTR_TECH_CA)) + *media = BFA_SFP_MEDIA_CU; + else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) || + (xmtr_tech & SFP_XMTR_TECH_EL_INTER)) + *media = BFA_SFP_MEDIA_EL; + else if ((xmtr_tech & SFP_XMTR_TECH_LL) || + (xmtr_tech & SFP_XMTR_TECH_LC)) + *media = BFA_SFP_MEDIA_LW; + else if ((xmtr_tech & SFP_XMTR_TECH_SL) || + (xmtr_tech & SFP_XMTR_TECH_SN) || + (xmtr_tech & SFP_XMTR_TECH_SA)) + *media = BFA_SFP_MEDIA_SW; + /* Check 10G Ethernet Compilance code */ + else if (e10g.r.e10g_sr) + *media = BFA_SFP_MEDIA_SW; + else if (e10g.r.e10g_lrm && e10g.r.e10g_lr) + *media = BFA_SFP_MEDIA_LW; + else if (e10g.r.e10g_unall) + *media = BFA_SFP_MEDIA_UNKNOWN; + else + bfa_trc(sfp, 0); + } else + bfa_trc(sfp, sfp->state); +} + +static bfa_status_t +bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed) +{ + struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; + struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr; + union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3; + union sfp_xcvr_e10g_code_u e10g = xcvr->e10g; + + if (portspeed == BFA_PORT_SPEED_10GBPS) { + if (e10g.r.e10g_sr || e10g.r.e10g_lr) + return BFA_STATUS_OK; + else { + bfa_trc(sfp, e10g.b); + return BFA_STATUS_UNSUPP_SPEED; + } + } + if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) || + ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) || + ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) || + ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) || + ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100)) + return BFA_STATUS_OK; + else { + bfa_trc(sfp, portspeed); + bfa_trc(sfp, fc3.b); + bfa_trc(sfp, e10g.b); + return BFA_STATUS_UNSUPP_SPEED; + } +} + +/* + * SFP hmbox handler + */ +void +bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg) +{ + struct bfa_sfp_s *sfp = sfparg; + + switch (msg->mh.msg_id) { + case BFI_SFP_I2H_SHOW: + bfa_sfp_show_comp(sfp, msg); + break; + + case BFI_SFP_I2H_SCN: + bfa_sfp_scn(sfp, msg); + break; + + default: + bfa_trc(sfp, msg->mh.msg_id); + WARN_ON(1); + } +} + +/* + * Return DMA memory needed by sfp module. + */ +u32 +bfa_sfp_meminfo(void) +{ + return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); +} + +/* + * Attach virtual and physical memory for SFP. + */ +void +bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod) +{ + sfp->dev = dev; + sfp->ioc = ioc; + sfp->trcmod = trcmod; + + sfp->cbfn = NULL; + sfp->cbarg = NULL; + sfp->sfpmem = NULL; + sfp->lock = 0; + sfp->data_valid = 0; + sfp->state = BFA_SFP_STATE_INIT; + sfp->state_query_lock = 0; + sfp->state_query_cbfn = NULL; + sfp->state_query_cbarg = NULL; + sfp->media = NULL; + sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; + sfp->is_elb = BFA_FALSE; + + bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp); + bfa_q_qe_init(&sfp->ioc_notify); + bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp); + list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q); +} + +/* + * Claim Memory for SFP + */ +void +bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa) +{ + sfp->dbuf_kva = dm_kva; + sfp->dbuf_pa = dm_pa; + memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s)); + + dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); +} + +/* + * Show SFP eeprom content + * + * @param[in] sfp - bfa sfp module + * + * @param[out] sfpmem - sfp eeprom data + * + */ +bfa_status_t +bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem, + bfa_cb_sfp_t cbfn, void *cbarg) +{ + + if (!bfa_ioc_is_operational(sfp->ioc)) { + bfa_trc(sfp, 0); + return BFA_STATUS_IOC_NON_OP; + } + + if (sfp->lock) { + bfa_trc(sfp, 0); + return BFA_STATUS_DEVBUSY; + } + + sfp->cbfn = cbfn; + sfp->cbarg = cbarg; + sfp->sfpmem = sfpmem; + + bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT); + return BFA_STATUS_OK; +} + +/* + * Return SFP Media type + * + * @param[in] sfp - bfa sfp module + * + * @param[out] media - port speed from user + * + */ +bfa_status_t +bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media, + bfa_cb_sfp_t cbfn, void *cbarg) +{ + if (!bfa_ioc_is_operational(sfp->ioc)) { + bfa_trc(sfp, 0); + return BFA_STATUS_IOC_NON_OP; + } + + sfp->media = media; + if (sfp->state == BFA_SFP_STATE_INIT) { + if (sfp->state_query_lock) { + bfa_trc(sfp, 0); + return BFA_STATUS_DEVBUSY; + } else { + sfp->state_query_cbfn = cbfn; + sfp->state_query_cbarg = cbarg; + bfa_sfp_state_query(sfp); + return BFA_STATUS_SFP_NOT_READY; + } + } + + bfa_sfp_media_get(sfp); + return BFA_STATUS_OK; +} + +/* + * Check if user set port speed is allowed by the SFP + * + * @param[in] sfp - bfa sfp module + * @param[in] portspeed - port speed from user + * + */ +bfa_status_t +bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed, + bfa_cb_sfp_t cbfn, void *cbarg) +{ + WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN); + + if (!bfa_ioc_is_operational(sfp->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* For Mezz card, all speed is allowed */ + if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type)) + return BFA_STATUS_OK; + + /* Check SFP state */ + sfp->portspeed = portspeed; + if (sfp->state == BFA_SFP_STATE_INIT) { + if (sfp->state_query_lock) { + bfa_trc(sfp, 0); + return BFA_STATUS_DEVBUSY; + } else { + sfp->state_query_cbfn = cbfn; + sfp->state_query_cbarg = cbarg; + bfa_sfp_state_query(sfp); + return BFA_STATUS_SFP_NOT_READY; + } + } + + if (sfp->state == BFA_SFP_STATE_REMOVED || + sfp->state == BFA_SFP_STATE_FAILED) { + bfa_trc(sfp, sfp->state); + return BFA_STATUS_NO_SFP_DEV; + } + + if (sfp->state == BFA_SFP_STATE_INSERTED) { + bfa_trc(sfp, sfp->state); + return BFA_STATUS_DEVBUSY; /* sfp is reading data */ + } + + /* For eloopback, all speed is allowed */ + if (sfp->is_elb) + return BFA_STATUS_OK; + + return bfa_sfp_speed_valid(sfp, portspeed); +} + +/* + * Flash module specific + */ + +/* + * FLASH DMA buffer should be big enough to hold both MFG block and + * asic block(64k) at the same time and also should be 2k aligned to + * avoid write segement to cross sector boundary. + */ +#define BFA_FLASH_SEG_SZ 2048 +#define BFA_FLASH_DMA_BUF_SZ \ + BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ) + +static void +bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event, + int inst, int type) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn; + aen_entry->aen_data.audit.partition_inst = inst; + aen_entry->aen_data.audit.partition_type = type; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, + BFA_AEN_CAT_AUDIT, event); +} + +static void +bfa_flash_cb(struct bfa_flash_s *flash) +{ + flash->op_busy = 0; + if (flash->cbfn) + flash->cbfn(flash->cbarg, flash->status); +} + +static void +bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_flash_s *flash = cbarg; + + bfa_trc(flash, event); + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (flash->op_busy) { + flash->status = BFA_STATUS_IOC_FAILURE; + flash->cbfn(flash->cbarg, flash->status); + flash->op_busy = 0; + } + break; + + default: + break; + } +} + +/* + * Send flash attribute query request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_query_send(void *cbarg) +{ + struct bfa_flash_s *flash = cbarg; + struct bfi_flash_query_req_s *msg = + (struct bfi_flash_query_req_s *) flash->mb.msg; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s), + flash->dbuf_pa); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); +} + +/* + * Send flash write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_write_send(struct bfa_flash_s *flash) +{ + struct bfi_flash_write_req_s *msg = + (struct bfi_flash_write_req_s *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + + /* indicate if it's the last msg of the whole write operation */ + msg->last = (len == flash->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); + + flash->residue -= len; + flash->offset += len; +} + +/* + * Send flash read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_read_send(void *cbarg) +{ + struct bfa_flash_s *flash = cbarg; + struct bfi_flash_read_req_s *msg = + (struct bfi_flash_read_req_s *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); +} + +/* + * Send flash erase request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_erase_send(void *cbarg) +{ + struct bfa_flash_s *flash = cbarg; + struct bfi_flash_erase_req_s *msg = + (struct bfi_flash_erase_req_s *) flash->mb.msg; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); +} + +/* + * Process flash response messages upon receiving interrupts. + * + * @param[in] flasharg - flash structure + * @param[in] msg - message structure + */ +static void +bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg) +{ + struct bfa_flash_s *flash = flasharg; + u32 status; + + union { + struct bfi_flash_query_rsp_s *query; + struct bfi_flash_erase_rsp_s *erase; + struct bfi_flash_write_rsp_s *write; + struct bfi_flash_read_rsp_s *read; + struct bfi_flash_event_s *event; + struct bfi_mbmsg_s *msg; + } m; + + m.msg = msg; + bfa_trc(flash, msg->mh.msg_id); + + if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) { + /* receiving response after ioc failure */ + bfa_trc(flash, 0x9999); + return; + } + + switch (msg->mh.msg_id) { + case BFI_FLASH_I2H_QUERY_RSP: + status = be32_to_cpu(m.query->status); + bfa_trc(flash, status); + if (status == BFA_STATUS_OK) { + u32 i; + struct bfa_flash_attr_s *attr, *f; + + attr = (struct bfa_flash_attr_s *) flash->ubuf; + f = (struct bfa_flash_attr_s *) flash->dbuf_kva; + attr->status = be32_to_cpu(f->status); + attr->npart = be32_to_cpu(f->npart); + bfa_trc(flash, attr->status); + bfa_trc(flash, attr->npart); + for (i = 0; i < attr->npart; i++) { + attr->part[i].part_type = + be32_to_cpu(f->part[i].part_type); + attr->part[i].part_instance = + be32_to_cpu(f->part[i].part_instance); + attr->part[i].part_off = + be32_to_cpu(f->part[i].part_off); + attr->part[i].part_size = + be32_to_cpu(f->part[i].part_size); + attr->part[i].part_len = + be32_to_cpu(f->part[i].part_len); + attr->part[i].part_status = + be32_to_cpu(f->part[i].part_status); + } + } + flash->status = status; + bfa_flash_cb(flash); + break; + case BFI_FLASH_I2H_ERASE_RSP: + status = be32_to_cpu(m.erase->status); + bfa_trc(flash, status); + flash->status = status; + bfa_flash_cb(flash); + break; + case BFI_FLASH_I2H_WRITE_RSP: + status = be32_to_cpu(m.write->status); + bfa_trc(flash, status); + if (status != BFA_STATUS_OK || flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else { + bfa_trc(flash, flash->offset); + bfa_flash_write_send(flash); + } + break; + case BFI_FLASH_I2H_READ_RSP: + status = be32_to_cpu(m.read->status); + bfa_trc(flash, status); + if (status != BFA_STATUS_OK) { + flash->status = status; + bfa_flash_cb(flash); + } else { + u32 len = be32_to_cpu(m.read->length); + bfa_trc(flash, flash->offset); + bfa_trc(flash, len); + memcpy(flash->ubuf + flash->offset, + flash->dbuf_kva, len); + flash->residue -= len; + flash->offset += len; + if (flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_read_send(flash); + } + break; + case BFI_FLASH_I2H_BOOT_VER_RSP: + break; + case BFI_FLASH_I2H_EVENT: + status = be32_to_cpu(m.event->status); + bfa_trc(flash, status); + if (status == BFA_STATUS_BAD_FWCFG) + bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR); + else if (status == BFA_STATUS_INVALID_VENDOR) { + u32 param; + param = be32_to_cpu(m.event->param); + bfa_trc(flash, param); + bfa_ioc_aen_post(flash->ioc, + BFA_IOC_AEN_INVALID_VENDOR); + } + break; + + default: + WARN_ON(1); + } +} + +/* + * Flash memory info API. + * + * @param[in] mincfg - minimal cfg variable + */ +u32 +bfa_flash_meminfo(bfa_boolean_t mincfg) +{ + /* min driver doesn't need flash */ + if (mincfg) + return 0; + return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] flash - flash structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + * @param[in] trcmod - trace module + * @param[in] logmod - log module + */ +void +bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) +{ + flash->ioc = ioc; + flash->trcmod = trcmod; + flash->cbfn = NULL; + flash->cbarg = NULL; + flash->op_busy = 0; + + bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); + bfa_q_qe_init(&flash->ioc_notify); + bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); + list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); + + /* min driver doesn't need flash */ + if (mincfg) { + flash->dbuf_kva = NULL; + flash->dbuf_pa = 0; + } +} + +/* + * Claim memory for flash + * + * @param[in] flash - flash structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - physical memory address + * @param[in] mincfg - minimal cfg variable + */ +void +bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa, + bfa_boolean_t mincfg) +{ + if (mincfg) + return; + + flash->dbuf_kva = dm_kva; + flash->dbuf_pa = dm_pa; + memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); + dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Get flash attribute. + * + * @param[in] flash - flash structure + * @param[in] attr - flash attribute structure + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr, + bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->ubuf = (u8 *) attr; + bfa_flash_query_send(flash); + + return BFA_STATUS_OK; +} + +/* + * Erase flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, + u8 instance, bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ); + bfa_trc(flash, type); + bfa_trc(flash, instance); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + + bfa_flash_erase_send(flash); + bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE, + instance, type); + return BFA_STATUS_OK; +} + +/* + * Update flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to the partition starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, + u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ); + bfa_trc(flash, type); + bfa_trc(flash, instance); + bfa_trc(flash, len); + bfa_trc(flash, offset); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + * 'offset' must be in sector (16kb) boundary + */ + if (!len || (len & 0x03) || (offset & 0x00003FFF)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (type == BFA_FLASH_PART_MFG) + return BFA_STATUS_EINVAL; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_write_send(flash); + return BFA_STATUS_OK; +} + +/* + * Read flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to the partition starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, + u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_READ_REQ); + bfa_trc(flash, type); + bfa_trc(flash, instance); + bfa_trc(flash, len); + bfa_trc(flash, offset); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + * 'offset' must be in sector (16kb) boundary + */ + if (!len || (len & 0x03) || (offset & 0x00003FFF)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + bfa_flash_read_send(flash); + + return BFA_STATUS_OK; +} + +/* + * DIAG module specific + */ + +#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */ +#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */ + +/* IOC event handler */ +static void +bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event) +{ + struct bfa_diag_s *diag = diag_arg; + + bfa_trc(diag, event); + bfa_trc(diag, diag->block); + bfa_trc(diag, diag->fwping.lock); + bfa_trc(diag, diag->tsensor.lock); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (diag->fwping.lock) { + diag->fwping.status = BFA_STATUS_IOC_FAILURE; + diag->fwping.cbfn(diag->fwping.cbarg, + diag->fwping.status); + diag->fwping.lock = 0; + } + + if (diag->tsensor.lock) { + diag->tsensor.status = BFA_STATUS_IOC_FAILURE; + diag->tsensor.cbfn(diag->tsensor.cbarg, + diag->tsensor.status); + diag->tsensor.lock = 0; + } + + if (diag->block) { + if (diag->timer_active) { + bfa_timer_stop(&diag->timer); + diag->timer_active = 0; + } + + diag->status = BFA_STATUS_IOC_FAILURE; + diag->cbfn(diag->cbarg, diag->status); + diag->block = 0; + } + break; + + default: + break; + } +} + +static void +bfa_diag_memtest_done(void *cbarg) +{ + struct bfa_diag_s *diag = cbarg; + struct bfa_ioc_s *ioc = diag->ioc; + struct bfa_diag_memtest_result *res = diag->result; + u32 loff = BFI_BOOT_MEMTEST_RES_ADDR; + u32 pgnum, i; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) / + sizeof(u32)); i++) { + /* read test result from smem */ + *((u32 *) res + i) = + bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); + loff += sizeof(u32); + } + + /* Reset IOC fwstates to BFI_IOC_UNINIT */ + bfa_ioc_reset_fwstate(ioc); + + res->status = swab32(res->status); + bfa_trc(diag, res->status); + + if (res->status == BFI_BOOT_MEMTEST_RES_SIG) + diag->status = BFA_STATUS_OK; + else { + diag->status = BFA_STATUS_MEMTEST_FAILED; + res->addr = swab32(res->addr); + res->exp = swab32(res->exp); + res->act = swab32(res->act); + res->err_status = swab32(res->err_status); + res->err_status1 = swab32(res->err_status1); + res->err_addr = swab32(res->err_addr); + bfa_trc(diag, res->addr); + bfa_trc(diag, res->exp); + bfa_trc(diag, res->act); + bfa_trc(diag, res->err_status); + bfa_trc(diag, res->err_status1); + bfa_trc(diag, res->err_addr); + } + diag->timer_active = 0; + diag->cbfn(diag->cbarg, diag->status); + diag->block = 0; +} + +/* + * Firmware ping + */ + +/* + * Perform DMA test directly + */ +static void +diag_fwping_send(struct bfa_diag_s *diag) +{ + struct bfi_diag_fwping_req_s *fwping_req; + u32 i; + + bfa_trc(diag, diag->fwping.dbuf_pa); + + /* fill DMA area with pattern */ + for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) + *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data; + + /* Fill mbox msg */ + fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg; + + /* Setup SG list */ + bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ, + diag->fwping.dbuf_pa); + /* Set up dma count */ + fwping_req->count = cpu_to_be32(diag->fwping.count); + /* Set up data pattern */ + fwping_req->data = diag->fwping.data; + + /* build host command */ + bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING, + bfa_ioc_portid(diag->ioc)); + + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd); +} + +static void +diag_fwping_comp(struct bfa_diag_s *diag, + struct bfi_diag_fwping_rsp_s *diag_rsp) +{ + u32 rsp_data = diag_rsp->data; + u8 rsp_dma_status = diag_rsp->dma_status; + + bfa_trc(diag, rsp_data); + bfa_trc(diag, rsp_dma_status); + + if (rsp_dma_status == BFA_STATUS_OK) { + u32 i, pat; + pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) : + diag->fwping.data; + /* Check mbox data */ + if (diag->fwping.data != rsp_data) { + bfa_trc(diag, rsp_data); + diag->fwping.result->dmastatus = + BFA_STATUS_DATACORRUPTED; + diag->fwping.status = BFA_STATUS_DATACORRUPTED; + diag->fwping.cbfn(diag->fwping.cbarg, + diag->fwping.status); + diag->fwping.lock = 0; + return; + } + /* Check dma pattern */ + for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) { + if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) { + bfa_trc(diag, i); + bfa_trc(diag, pat); + bfa_trc(diag, + *((u32 *)diag->fwping.dbuf_kva + i)); + diag->fwping.result->dmastatus = + BFA_STATUS_DATACORRUPTED; + diag->fwping.status = BFA_STATUS_DATACORRUPTED; + diag->fwping.cbfn(diag->fwping.cbarg, + diag->fwping.status); + diag->fwping.lock = 0; + return; + } + } + diag->fwping.result->dmastatus = BFA_STATUS_OK; + diag->fwping.status = BFA_STATUS_OK; + diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); + diag->fwping.lock = 0; + } else { + diag->fwping.status = BFA_STATUS_HDMA_FAILED; + diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); + diag->fwping.lock = 0; + } +} + +/* + * Temperature Sensor + */ + +static void +diag_tempsensor_send(struct bfa_diag_s *diag) +{ + struct bfi_diag_ts_req_s *msg; + + msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg; + bfa_trc(diag, msg->temp); + /* build host command */ + bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR, + bfa_ioc_portid(diag->ioc)); + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd); +} + +static void +diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp) +{ + if (!diag->tsensor.lock) { + /* receiving response after ioc failure */ + bfa_trc(diag, diag->tsensor.lock); + return; + } + + /* + * ASIC junction tempsensor is a reg read operation + * it will always return OK + */ + diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); + diag->tsensor.temp->ts_junc = rsp->ts_junc; + diag->tsensor.temp->ts_brd = rsp->ts_brd; + + if (rsp->ts_brd) { + /* tsensor.temp->status is brd_temp status */ + diag->tsensor.temp->status = rsp->status; + if (rsp->status == BFA_STATUS_OK) { + diag->tsensor.temp->brd_temp = + be16_to_cpu(rsp->brd_temp); + } else + diag->tsensor.temp->brd_temp = 0; + } + + bfa_trc(diag, rsp->status); + bfa_trc(diag, rsp->ts_junc); + bfa_trc(diag, rsp->temp); + bfa_trc(diag, rsp->ts_brd); + bfa_trc(diag, rsp->brd_temp); + + /* tsensor status is always good bcos we always have junction temp */ + diag->tsensor.status = BFA_STATUS_OK; + diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); + diag->tsensor.lock = 0; +} + +/* + * LED Test command + */ +static void +diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) +{ + struct bfi_diag_ledtest_req_s *msg; + + msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg; + /* build host command */ + bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST, + bfa_ioc_portid(diag->ioc)); + + /* + * convert the freq from N blinks per 10 sec to + * crossbow ontime value. We do it here because division is need + */ + if (ledtest->freq) + ledtest->freq = 500 / ledtest->freq; + + if (ledtest->freq == 0) + ledtest->freq = 1; + + bfa_trc(diag, ledtest->freq); + /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */ + msg->cmd = (u8) ledtest->cmd; + msg->color = (u8) ledtest->color; + msg->portid = bfa_ioc_portid(diag->ioc); + msg->led = ledtest->led; + msg->freq = cpu_to_be16(ledtest->freq); + + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd); +} + +static void +diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg) +{ + bfa_trc(diag, diag->ledtest.lock); + diag->ledtest.lock = BFA_FALSE; + /* no bfa_cb_queue is needed because driver is not waiting */ +} + +/* + * Port beaconing + */ +static void +diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec) +{ + struct bfi_diag_portbeacon_req_s *msg; + + msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg; + /* build host command */ + bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON, + bfa_ioc_portid(diag->ioc)); + msg->beacon = beacon; + msg->period = cpu_to_be32(sec); + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd); +} + +static void +diag_portbeacon_comp(struct bfa_diag_s *diag) +{ + bfa_trc(diag, diag->beacon.state); + diag->beacon.state = BFA_FALSE; + if (diag->cbfn_beacon) + diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e); +} + +/* + * Diag hmbox handler + */ +static void +bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_diag_s *diag = diagarg; + + switch (msg->mh.msg_id) { + case BFI_DIAG_I2H_PORTBEACON: + diag_portbeacon_comp(diag); + break; + case BFI_DIAG_I2H_FWPING: + diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg); + break; + case BFI_DIAG_I2H_TEMPSENSOR: + diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg); + break; + case BFI_DIAG_I2H_LEDTEST: + diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg); + break; + default: + bfa_trc(diag, msg->mh.msg_id); + WARN_ON(1); + } +} + +/* + * Gen RAM Test + * + * @param[in] *diag - diag data struct + * @param[in] *memtest - mem test params input from upper layer, + * @param[in] pattern - mem test pattern + * @param[in] *result - mem test result + * @param[in] cbfn - mem test callback functioin + * @param[in] cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest, + u32 pattern, struct bfa_diag_memtest_result *result, + bfa_cb_diag_t cbfn, void *cbarg) +{ + u32 memtest_tov; + + bfa_trc(diag, pattern); + + if (!bfa_ioc_adapter_is_disabled(diag->ioc)) + return BFA_STATUS_ADAPTER_ENABLED; + + /* check to see if there is another destructive diag cmd running */ + if (diag->block) { + bfa_trc(diag, diag->block); + return BFA_STATUS_DEVBUSY; + } else + diag->block = 1; + + diag->result = result; + diag->cbfn = cbfn; + diag->cbarg = cbarg; + + /* download memtest code and take LPU0 out of reset */ + bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); + + memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ? + CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV; + bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, + bfa_diag_memtest_done, diag, memtest_tov); + diag->timer_active = 1; + return BFA_STATUS_OK; +} + +/* + * DIAG firmware ping command + * + * @param[in] *diag - diag data struct + * @param[in] cnt - dma loop count for testing PCIE + * @param[in] data - data pattern to pass in fw + * @param[in] *result - pt to bfa_diag_fwping_result_t data struct + * @param[in] cbfn - callback function + * @param[in] *cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data, + struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn, + void *cbarg) +{ + bfa_trc(diag, cnt); + bfa_trc(diag, data); + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) && + ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH)) + return BFA_STATUS_CMD_NOTSUPP; + + /* check to see if there is another destructive diag cmd running */ + if (diag->block || diag->fwping.lock) { + bfa_trc(diag, diag->block); + bfa_trc(diag, diag->fwping.lock); + return BFA_STATUS_DEVBUSY; + } + + /* Initialization */ + diag->fwping.lock = 1; + diag->fwping.cbfn = cbfn; + diag->fwping.cbarg = cbarg; + diag->fwping.result = result; + diag->fwping.data = data; + diag->fwping.count = cnt; + + /* Init test results */ + diag->fwping.result->data = 0; + diag->fwping.result->status = BFA_STATUS_OK; + + /* kick off the first ping */ + diag_fwping_send(diag); + return BFA_STATUS_OK; +} + +/* + * Read Temperature Sensor + * + * @param[in] *diag - diag data struct + * @param[in] *result - pt to bfa_diag_temp_t data struct + * @param[in] cbfn - callback function + * @param[in] *cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_diag_tsensor_query(struct bfa_diag_s *diag, + struct bfa_diag_results_tempsensor_s *result, + bfa_cb_diag_t cbfn, void *cbarg) +{ + /* check to see if there is a destructive diag cmd running */ + if (diag->block || diag->tsensor.lock) { + bfa_trc(diag, diag->block); + bfa_trc(diag, diag->tsensor.lock); + return BFA_STATUS_DEVBUSY; + } + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* Init diag mod params */ + diag->tsensor.lock = 1; + diag->tsensor.temp = result; + diag->tsensor.cbfn = cbfn; + diag->tsensor.cbarg = cbarg; + diag->tsensor.status = BFA_STATUS_OK; + + /* Send msg to fw */ + diag_tempsensor_send(diag); + + return BFA_STATUS_OK; +} + +/* + * LED Test command + * + * @param[in] *diag - diag data struct + * @param[in] *ledtest - pt to ledtest data structure + * + * @param[out] + */ +bfa_status_t +bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) +{ + bfa_trc(diag, ledtest->cmd); + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (diag->beacon.state) + return BFA_STATUS_BEACON_ON; + + if (diag->ledtest.lock) + return BFA_STATUS_LEDTEST_OP; + + /* Send msg to fw */ + diag->ledtest.lock = BFA_TRUE; + diag_ledtest_send(diag, ledtest); + + return BFA_STATUS_OK; +} + +/* + * Port beaconing command + * + * @param[in] *diag - diag data struct + * @param[in] beacon - port beaconing 1:ON 0:OFF + * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF + * @param[in] sec - beaconing duration in seconds + * + * @param[out] + */ +bfa_status_t +bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon, uint32_t sec) +{ + bfa_trc(diag, beacon); + bfa_trc(diag, link_e2e_beacon); + bfa_trc(diag, sec); + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (diag->ledtest.lock) + return BFA_STATUS_LEDTEST_OP; + + if (diag->beacon.state && beacon) /* beacon alread on */ + return BFA_STATUS_BEACON_ON; + + diag->beacon.state = beacon; + diag->beacon.link_e2e = link_e2e_beacon; + if (diag->cbfn_beacon) + diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon); + + /* Send msg to fw */ + diag_portbeacon_send(diag, beacon, sec); + + return BFA_STATUS_OK; +} + +/* + * Return DMA memory needed by diag module. + */ +u32 +bfa_diag_meminfo(void) +{ + return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Attach virtual and physical memory for Diag. + */ +void +bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev, + bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod) +{ + diag->dev = dev; + diag->ioc = ioc; + diag->trcmod = trcmod; + + diag->block = 0; + diag->cbfn = NULL; + diag->cbarg = NULL; + diag->result = NULL; + diag->cbfn_beacon = cbfn_beacon; + + bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag); + bfa_q_qe_init(&diag->ioc_notify); + bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag); + list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q); +} + +void +bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa) +{ + diag->fwping.dbuf_kva = dm_kva; + diag->fwping.dbuf_pa = dm_pa; + memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ); +} + +/* + * PHY module specific + */ +#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ +#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */ + +static void +bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz) +{ + int i, m = sz >> 2; + + for (i = 0; i < m; i++) + obuf[i] = be32_to_cpu(ibuf[i]); +} + +static bfa_boolean_t +bfa_phy_present(struct bfa_phy_s *phy) +{ + return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING); +} + +static void +bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_phy_s *phy = cbarg; + + bfa_trc(phy, event); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (phy->op_busy) { + phy->status = BFA_STATUS_IOC_FAILURE; + phy->cbfn(phy->cbarg, phy->status); + phy->op_busy = 0; + } + break; + + default: + break; + } +} + +/* + * Send phy attribute query request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_query_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_query_req_s *msg = + (struct bfi_phy_query_req_s *) phy->mb.msg; + + msg->instance = phy->instance; + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa); + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); +} + +/* + * Send phy write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_write_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_write_req_s *msg = + (struct bfi_phy_write_req_s *) phy->mb.msg; + u32 len; + u16 *buf, *dbuf; + int i, sz; + + msg->instance = phy->instance; + msg->offset = cpu_to_be32(phy->addr_off + phy->offset); + len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? + phy->residue : BFA_PHY_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + + /* indicate if it's the last msg of the whole write operation */ + msg->last = (len == phy->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, len, phy->dbuf_pa); + + buf = (u16 *) (phy->ubuf + phy->offset); + dbuf = (u16 *)phy->dbuf_kva; + sz = len >> 1; + for (i = 0; i < sz; i++) + buf[i] = cpu_to_be16(dbuf[i]); + + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); + + phy->residue -= len; + phy->offset += len; +} + +/* + * Send phy read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_read_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_read_req_s *msg = + (struct bfi_phy_read_req_s *) phy->mb.msg; + u32 len; + + msg->instance = phy->instance; + msg->offset = cpu_to_be32(phy->addr_off + phy->offset); + len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? + phy->residue : BFA_PHY_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, len, phy->dbuf_pa); + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); +} + +/* + * Send phy stats request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_stats_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_stats_req_s *msg = + (struct bfi_phy_stats_req_s *) phy->mb.msg; + + msg->instance = phy->instance; + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa); + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); +} + +/* + * Flash memory info API. + * + * @param[in] mincfg - minimal cfg variable + */ +u32 +bfa_phy_meminfo(bfa_boolean_t mincfg) +{ + /* min driver doesn't need phy */ + if (mincfg) + return 0; + + return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] phy - phy structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + * @param[in] trcmod - trace module + * @param[in] logmod - log module + */ +void +bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) +{ + phy->ioc = ioc; + phy->trcmod = trcmod; + phy->cbfn = NULL; + phy->cbarg = NULL; + phy->op_busy = 0; + + bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy); + bfa_q_qe_init(&phy->ioc_notify); + bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy); + list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q); + + /* min driver doesn't need phy */ + if (mincfg) { + phy->dbuf_kva = NULL; + phy->dbuf_pa = 0; + } +} + +/* + * Claim memory for phy + * + * @param[in] phy - phy structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - physical memory address + * @param[in] mincfg - minimal cfg variable + */ +void +bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa, + bfa_boolean_t mincfg) +{ + if (mincfg) + return; + + phy->dbuf_kva = dm_kva; + phy->dbuf_pa = dm_pa; + memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ); + dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +bfa_boolean_t +bfa_phy_busy(struct bfa_ioc_s *ioc) +{ + void __iomem *rb; + + rb = bfa_ioc_bar0(ioc); + return readl(rb + BFA_PHY_LOCK_STATUS); +} + +/* + * Get phy attribute. + * + * @param[in] phy - phy structure + * @param[in] attr - phy attribute structure + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ); + bfa_trc(phy, instance); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->ubuf = (uint8_t *) attr; + bfa_phy_query_send(phy); + + return BFA_STATUS_OK; +} + +/* + * Get phy stats. + * + * @param[in] phy - phy structure + * @param[in] instance - phy image instance + * @param[in] stats - pointer to phy stats + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_stats_s *stats, + bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_STATS_REQ); + bfa_trc(phy, instance); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->ubuf = (u8 *) stats; + bfa_phy_stats_send(phy); + + return BFA_STATUS_OK; +} + +/* + * Update phy image. + * + * @param[in] phy - phy structure + * @param[in] instance - phy image instance + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_update(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ); + bfa_trc(phy, instance); + bfa_trc(phy, len); + bfa_trc(phy, offset); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* 'len' must be in word (4-byte) boundary */ + if (!len || (len & 0x03)) + return BFA_STATUS_FAILED; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->residue = len; + phy->offset = 0; + phy->addr_off = offset; + phy->ubuf = buf; + + bfa_phy_write_send(phy); + return BFA_STATUS_OK; +} + +/* + * Read phy image. + * + * @param[in] phy - phy structure + * @param[in] instance - phy image instance + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_read(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_READ_REQ); + bfa_trc(phy, instance); + bfa_trc(phy, len); + bfa_trc(phy, offset); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* 'len' must be in word (4-byte) boundary */ + if (!len || (len & 0x03)) + return BFA_STATUS_FAILED; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->residue = len; + phy->offset = 0; + phy->addr_off = offset; + phy->ubuf = buf; + bfa_phy_read_send(phy); + + return BFA_STATUS_OK; +} + +/* + * Process phy response messages upon receiving interrupts. + * + * @param[in] phyarg - phy structure + * @param[in] msg - message structure + */ +void +bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_phy_s *phy = phyarg; + u32 status; + + union { + struct bfi_phy_query_rsp_s *query; + struct bfi_phy_stats_rsp_s *stats; + struct bfi_phy_write_rsp_s *write; + struct bfi_phy_read_rsp_s *read; + struct bfi_mbmsg_s *msg; + } m; + + m.msg = msg; + bfa_trc(phy, msg->mh.msg_id); + + if (!phy->op_busy) { + /* receiving response after ioc failure */ + bfa_trc(phy, 0x9999); + return; + } + + switch (msg->mh.msg_id) { + case BFI_PHY_I2H_QUERY_RSP: + status = be32_to_cpu(m.query->status); + bfa_trc(phy, status); + + if (status == BFA_STATUS_OK) { + struct bfa_phy_attr_s *attr = + (struct bfa_phy_attr_s *) phy->ubuf; + bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva, + sizeof(struct bfa_phy_attr_s)); + bfa_trc(phy, attr->status); + bfa_trc(phy, attr->length); + } + + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + break; + case BFI_PHY_I2H_STATS_RSP: + status = be32_to_cpu(m.stats->status); + bfa_trc(phy, status); + + if (status == BFA_STATUS_OK) { + struct bfa_phy_stats_s *stats = + (struct bfa_phy_stats_s *) phy->ubuf; + bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva, + sizeof(struct bfa_phy_stats_s)); + bfa_trc(phy, stats->status); + } + + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + break; + case BFI_PHY_I2H_WRITE_RSP: + status = be32_to_cpu(m.write->status); + bfa_trc(phy, status); + + if (status != BFA_STATUS_OK || phy->residue == 0) { + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + } else { + bfa_trc(phy, phy->offset); + bfa_phy_write_send(phy); + } + break; + case BFI_PHY_I2H_READ_RSP: + status = be32_to_cpu(m.read->status); + bfa_trc(phy, status); + + if (status != BFA_STATUS_OK) { + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + } else { + u32 len = be32_to_cpu(m.read->length); + u16 *buf = (u16 *)(phy->ubuf + phy->offset); + u16 *dbuf = (u16 *)phy->dbuf_kva; + int i, sz = len >> 1; + + bfa_trc(phy, phy->offset); + bfa_trc(phy, len); + + for (i = 0; i < sz; i++) + buf[i] = be16_to_cpu(dbuf[i]); + + phy->residue -= len; + phy->offset += len; + + if (phy->residue == 0) { + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + } else + bfa_phy_read_send(phy); + } + break; + default: + WARN_ON(1); + } +} + +/* + * DCONF state machine events + */ +enum bfa_dconf_event { + BFA_DCONF_SM_INIT = 1, /* dconf Init */ + BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */ + BFA_DCONF_SM_WR = 3, /* binding change, map */ + BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */ + BFA_DCONF_SM_EXIT = 5, /* exit dconf module */ + BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */ +}; + +/* forward declaration of DCONF state machine */ +static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); + +static void bfa_dconf_cbfn(void *dconf, bfa_status_t status); +static void bfa_dconf_timer(void *cbarg); +static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf); +static void bfa_dconf_init_cb(void *arg, bfa_status_t status); + +/* + * Beginning state of dconf module. Waiting for an event to start. + */ +static void +bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_status_t bfa_status; + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_INIT: + if (dconf->min_cfg) { + bfa_trc(dconf->bfa, dconf->min_cfg); + bfa_fsm_send_event(&dconf->bfa->iocfc, + IOCFC_E_DCONF_DONE); + return; + } + bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV); + bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), + BFA_FLASH_PART_DRV, dconf->instance, + dconf->dconf, + sizeof(struct bfa_dconf_s), 0, + bfa_dconf_init_cb, dconf->bfa); + if (bfa_status != BFA_STATUS_OK) { + bfa_timer_stop(&dconf->timer); + bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + return; + } + break; + case BFA_DCONF_SM_EXIT: + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); + break; + case BFA_DCONF_SM_IOCDISABLE: + case BFA_DCONF_SM_WR: + case BFA_DCONF_SM_FLASH_COMP: + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * Read flash for dconf entries and make a call back to the driver once done. + */ +static void +bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_FLASH_COMP: + bfa_timer_stop(&dconf->timer); + bfa_sm_set_state(dconf, bfa_dconf_sm_ready); + break; + case BFA_DCONF_SM_TIMEOUT: + bfa_sm_set_state(dconf, bfa_dconf_sm_ready); + bfa_ioc_suspend(&dconf->bfa->ioc); + break; + case BFA_DCONF_SM_EXIT: + bfa_timer_stop(&dconf->timer); + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); + break; + case BFA_DCONF_SM_IOCDISABLE: + bfa_timer_stop(&dconf->timer); + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * DCONF Module is in ready state. Has completed the initialization. + */ +static void +bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_WR: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); + break; + case BFA_DCONF_SM_EXIT: + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); + break; + case BFA_DCONF_SM_INIT: + case BFA_DCONF_SM_IOCDISABLE: + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * entries are dirty, write back to the flash. + */ + +static void +bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_TIMEOUT: + bfa_sm_set_state(dconf, bfa_dconf_sm_sync); + bfa_dconf_flash_write(dconf); + break; + case BFA_DCONF_SM_WR: + bfa_timer_stop(&dconf->timer); + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + break; + case BFA_DCONF_SM_EXIT: + bfa_timer_stop(&dconf->timer); + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync); + bfa_dconf_flash_write(dconf); + break; + case BFA_DCONF_SM_FLASH_COMP: + break; + case BFA_DCONF_SM_IOCDISABLE: + bfa_timer_stop(&dconf->timer); + bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * Sync the dconf entries to the flash. + */ +static void +bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_IOCDISABLE: + case BFA_DCONF_SM_FLASH_COMP: + bfa_timer_stop(&dconf->timer); + fallthrough; + case BFA_DCONF_SM_TIMEOUT: + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +static void +bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_FLASH_COMP: + bfa_sm_set_state(dconf, bfa_dconf_sm_ready); + break; + case BFA_DCONF_SM_WR: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); + break; + case BFA_DCONF_SM_EXIT: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync); + break; + case BFA_DCONF_SM_IOCDISABLE: + bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +static void +bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_INIT: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); + break; + case BFA_DCONF_SM_EXIT: + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); + break; + case BFA_DCONF_SM_IOCDISABLE: + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * Compute and return memory needed by DRV_CFG module. + */ +void +bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa) +{ + struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa); + + if (cfg->drvcfg.min_cfg) + bfa_mem_kva_setup(meminfo, dconf_kva, + sizeof(struct bfa_dconf_hdr_s)); + else + bfa_mem_kva_setup(meminfo, dconf_kva, + sizeof(struct bfa_dconf_s)); +} + +void +bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + + dconf->bfad = bfad; + dconf->bfa = bfa; + dconf->instance = bfa->ioc.port_id; + bfa_trc(bfa, dconf->instance); + + dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf); + if (cfg->drvcfg.min_cfg) { + bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s); + dconf->min_cfg = BFA_TRUE; + } else { + dconf->min_cfg = BFA_FALSE; + bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s); + } + + bfa_dconf_read_data_valid(bfa) = BFA_FALSE; + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); +} + +static void +bfa_dconf_init_cb(void *arg, bfa_status_t status) +{ + struct bfa_s *bfa = arg; + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + + if (status == BFA_STATUS_OK) { + bfa_dconf_read_data_valid(bfa) = BFA_TRUE; + if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) + dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE; + if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) + dconf->dconf->hdr.version = BFI_DCONF_VERSION; + } + bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); +} + +void +bfa_dconf_modinit(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT); +} + +static void bfa_dconf_timer(void *cbarg) +{ + struct bfa_dconf_mod_s *dconf = cbarg; + bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT); +} + +void +bfa_dconf_iocdisable(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE); +} + +static bfa_status_t +bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf) +{ + bfa_status_t bfa_status; + bfa_trc(dconf->bfa, 0); + + bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa), + BFA_FLASH_PART_DRV, dconf->instance, + dconf->dconf, sizeof(struct bfa_dconf_s), 0, + bfa_dconf_cbfn, dconf); + if (bfa_status != BFA_STATUS_OK) + WARN_ON(bfa_status); + bfa_trc(dconf->bfa, bfa_status); + + return bfa_status; +} + +bfa_status_t +bfa_dconf_update(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + bfa_trc(dconf->bfa, 0); + if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty)) + return BFA_STATUS_FAILED; + + if (dconf->min_cfg) { + bfa_trc(dconf->bfa, dconf->min_cfg); + return BFA_STATUS_FAILED; + } + + bfa_sm_send_event(dconf, BFA_DCONF_SM_WR); + return BFA_STATUS_OK; +} + +static void +bfa_dconf_cbfn(void *arg, bfa_status_t status) +{ + struct bfa_dconf_mod_s *dconf = arg; + WARN_ON(status); + bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); +} + +void +bfa_dconf_modexit(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); +} + +/* + * FRU specific functions + */ + +#define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ +#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000 +#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200 + +static void +bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_fru_s *fru = cbarg; + + bfa_trc(fru, event); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (fru->op_busy) { + fru->status = BFA_STATUS_IOC_FAILURE; + fru->cbfn(fru->cbarg, fru->status); + fru->op_busy = 0; + } + break; + + default: + break; + } +} + +/* + * Send fru write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) +{ + struct bfa_fru_s *fru = cbarg; + struct bfi_fru_write_req_s *msg = + (struct bfi_fru_write_req_s *) fru->mb.msg; + u32 len; + + msg->offset = cpu_to_be32(fru->addr_off + fru->offset); + len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? + fru->residue : BFA_FRU_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + + /* + * indicate if it's the last msg of the whole write operation + */ + msg->last = (len == fru->residue) ? 1 : 0; + + msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0; + bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); + bfa_alen_set(&msg->alen, len, fru->dbuf_pa); + + memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len); + bfa_ioc_mbox_queue(fru->ioc, &fru->mb); + + fru->residue -= len; + fru->offset += len; +} + +/* + * Send fru read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) +{ + struct bfa_fru_s *fru = cbarg; + struct bfi_fru_read_req_s *msg = + (struct bfi_fru_read_req_s *) fru->mb.msg; + u32 len; + + msg->offset = cpu_to_be32(fru->addr_off + fru->offset); + len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? + fru->residue : BFA_FRU_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); + bfa_alen_set(&msg->alen, len, fru->dbuf_pa); + bfa_ioc_mbox_queue(fru->ioc, &fru->mb); +} + +/* + * Flash memory info API. + * + * @param[in] mincfg - minimal cfg variable + */ +u32 +bfa_fru_meminfo(bfa_boolean_t mincfg) +{ + /* min driver doesn't need fru */ + if (mincfg) + return 0; + + return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] fru - fru structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + * @param[in] trcmod - trace module + * @param[in] logmod - log module + */ +void +bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) +{ + fru->ioc = ioc; + fru->trcmod = trcmod; + fru->cbfn = NULL; + fru->cbarg = NULL; + fru->op_busy = 0; + + bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru); + bfa_q_qe_init(&fru->ioc_notify); + bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru); + list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q); + + /* min driver doesn't need fru */ + if (mincfg) { + fru->dbuf_kva = NULL; + fru->dbuf_pa = 0; + } +} + +/* + * Claim memory for fru + * + * @param[in] fru - fru structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - frusical memory address + * @param[in] mincfg - minimal cfg variable + */ +void +bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa, + bfa_boolean_t mincfg) +{ + if (mincfg) + return; + + fru->dbuf_kva = dm_kva; + fru->dbuf_pa = dm_pa; + memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ); + dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Update fru vpd image. + * + * @param[in] fru - fru structure + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl) +{ + bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 && + fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) + return BFA_STATUS_CMD_NOTSUPP; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + fru->trfr_cmpl = trfr_cmpl; + + bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ); + + return BFA_STATUS_OK; +} + +/* + * Read fru vpd image. + * + * @param[in] fru - fru structure + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK && + fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) + return BFA_STATUS_CMD_NOTSUPP; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ); + + return BFA_STATUS_OK; +} + +/* + * Get maximum size fru vpd image. + * + * @param[in] fru - fru structure + * @param[out] size - maximum size of fru vpd data + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size) +{ + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK || + fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2) + *max_size = BFA_FRU_CHINOOK_MAX_SIZE; + else + return BFA_STATUS_CMD_NOTSUPP; + return BFA_STATUS_OK; +} +/* + * tfru write. + * + * @param[in] fru - fru structure + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + bfa_trc(fru, *((u8 *) buf)); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + + bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ); + + return BFA_STATUS_OK; +} + +/* + * tfru read. + * + * @param[in] fru - fru structure + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_TFRU_H2I_READ_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ); + + return BFA_STATUS_OK; +} + +/* + * Process fru response messages upon receiving interrupts. + * + * @param[in] fruarg - fru structure + * @param[in] msg - message structure + */ +void +bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_fru_s *fru = fruarg; + struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg; + u32 status; + + bfa_trc(fru, msg->mh.msg_id); + + if (!fru->op_busy) { + /* + * receiving response after ioc failure + */ + bfa_trc(fru, 0x9999); + return; + } + + switch (msg->mh.msg_id) { + case BFI_FRUVPD_I2H_WRITE_RSP: + case BFI_TFRU_I2H_WRITE_RSP: + status = be32_to_cpu(rsp->status); + bfa_trc(fru, status); + + if (status != BFA_STATUS_OK || fru->residue == 0) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + bfa_trc(fru, fru->offset); + if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP) + bfa_fru_write_send(fru, + BFI_FRUVPD_H2I_WRITE_REQ); + else + bfa_fru_write_send(fru, + BFI_TFRU_H2I_WRITE_REQ); + } + break; + case BFI_FRUVPD_I2H_READ_RSP: + case BFI_TFRU_I2H_READ_RSP: + status = be32_to_cpu(rsp->status); + bfa_trc(fru, status); + + if (status != BFA_STATUS_OK) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + u32 len = be32_to_cpu(rsp->length); + + bfa_trc(fru, fru->offset); + bfa_trc(fru, len); + + memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len); + fru->residue -= len; + fru->offset += len; + + if (fru->residue == 0) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP) + bfa_fru_read_send(fru, + BFI_FRUVPD_H2I_READ_REQ); + else + bfa_fru_read_send(fru, + BFI_TFRU_H2I_READ_REQ); + } + } + break; + default: + WARN_ON(1); + } +} + +/* + * register definitions + */ +#define FLI_CMD_REG 0x0001d000 +#define FLI_RDDATA_REG 0x0001d010 +#define FLI_ADDR_REG 0x0001d004 +#define FLI_DEV_STATUS_REG 0x0001d014 + +#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ +#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ +#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ +#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ + +enum bfa_flash_cmd { + BFA_FLASH_FAST_READ = 0x0b, /* fast read */ + BFA_FLASH_READ_STATUS = 0x05, /* read status */ +}; + +/* + * Hardware error definition + */ +enum bfa_flash_err { + BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ + BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ + BFA_FLASH_BAD = -3, /*!< flash bad */ + BFA_FLASH_BUSY = -4, /*!< flash busy */ + BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ + BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ + BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ + BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ + BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ +}; + +/* + * Flash command register data structure + */ +union bfa_flash_cmd_reg_u { + struct { +#ifdef __BIG_ENDIAN + u32 act:1; + u32 rsv:1; + u32 write_cnt:9; + u32 read_cnt:9; + u32 addr_cnt:4; + u32 cmd:8; +#else + u32 cmd:8; + u32 addr_cnt:4; + u32 read_cnt:9; + u32 write_cnt:9; + u32 rsv:1; + u32 act:1; +#endif + } r; + u32 i; +}; + +/* + * Flash device status register data structure + */ +union bfa_flash_dev_status_reg_u { + struct { +#ifdef __BIG_ENDIAN + u32 rsv:21; + u32 fifo_cnt:6; + u32 busy:1; + u32 init_status:1; + u32 present:1; + u32 bad:1; + u32 good:1; +#else + u32 good:1; + u32 bad:1; + u32 present:1; + u32 init_status:1; + u32 busy:1; + u32 fifo_cnt:6; + u32 rsv:21; +#endif + } r; + u32 i; +}; + +/* + * Flash address register data structure + */ +union bfa_flash_addr_reg_u { + struct { +#ifdef __BIG_ENDIAN + u32 addr:24; + u32 dummy:8; +#else + u32 dummy:8; + u32 addr:24; +#endif + } r; + u32 i; +}; + +/* + * dg flash_raw_private Flash raw private functions + */ +static void +bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, + u8 rd_cnt, u8 ad_cnt, u8 op) +{ + union bfa_flash_cmd_reg_u cmd; + + cmd.i = 0; + cmd.r.act = 1; + cmd.r.write_cnt = wr_cnt; + cmd.r.read_cnt = rd_cnt; + cmd.r.addr_cnt = ad_cnt; + cmd.r.cmd = op; + writel(cmd.i, (pci_bar + FLI_CMD_REG)); +} + +static void +bfa_flash_set_addr(void __iomem *pci_bar, u32 address) +{ + union bfa_flash_addr_reg_u addr; + + addr.r.addr = address & 0x00ffffff; + addr.r.dummy = 0; + writel(addr.i, (pci_bar + FLI_ADDR_REG)); +} + +static int +bfa_flash_cmd_act_check(void __iomem *pci_bar) +{ + union bfa_flash_cmd_reg_u cmd; + + cmd.i = readl(pci_bar + FLI_CMD_REG); + + if (cmd.r.act) + return BFA_FLASH_ERR_CMD_ACT; + + return 0; +} + +/* + * @brief + * Flush FLI data fifo. + * + * @param[in] pci_bar - pci bar address + * @param[in] dev_status - device status + * + * Return 0 on success, negative error number on error. + */ +static u32 +bfa_flash_fifo_flush(void __iomem *pci_bar) +{ + u32 i; + union bfa_flash_dev_status_reg_u dev_status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + + if (!dev_status.r.fifo_cnt) + return 0; + + /* fifo counter in terms of words */ + for (i = 0; i < dev_status.r.fifo_cnt; i++) + readl(pci_bar + FLI_RDDATA_REG); + + /* + * Check the device status. It may take some time. + */ + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + break; + } + + if (dev_status.r.fifo_cnt) + return BFA_FLASH_ERR_FIFO_CNT; + + return 0; +} + +/* + * @brief + * Read flash status. + * + * @param[in] pci_bar - pci bar address + * + * Return 0 on success, negative error number on error. +*/ +static u32 +bfa_flash_status_read(void __iomem *pci_bar) +{ + union bfa_flash_dev_status_reg_u dev_status; + int status; + u32 ret_status; + int i; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); + + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + status = bfa_flash_cmd_act_check(pci_bar); + if (!status) + break; + } + + if (status) + return status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + return BFA_FLASH_BUSY; + + ret_status = readl(pci_bar + FLI_RDDATA_REG); + ret_status >>= 24; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + return ret_status; +} + +/* + * @brief + * Start flash read operation. + * + * @param[in] pci_bar - pci bar address + * @param[in] offset - flash address offset + * @param[in] len - read data length + * @param[in] buf - read data buffer + * + * Return 0 on success, negative error number on error. + */ +static u32 +bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, + char *buf) +{ + int status; + + /* + * len must be mutiple of 4 and not exceeding fifo size + */ + if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) + return BFA_FLASH_ERR_LEN; + + /* + * check status + */ + status = bfa_flash_status_read(pci_bar); + if (status == BFA_FLASH_BUSY) + status = bfa_flash_status_read(pci_bar); + + if (status < 0) + return status; + + /* + * check if write-in-progress bit is cleared + */ + if (status & BFA_FLASH_WIP_MASK) + return BFA_FLASH_ERR_WIP; + + bfa_flash_set_addr(pci_bar, offset); + + bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); + + return 0; +} + +/* + * @brief + * Check flash read operation. + * + * @param[in] pci_bar - pci bar address + * + * Return flash device status, 1 if busy, 0 if not. + */ +static u32 +bfa_flash_read_check(void __iomem *pci_bar) +{ + if (bfa_flash_cmd_act_check(pci_bar)) + return 1; + + return 0; +} + +/* + * @brief + * End flash read operation. + * + * @param[in] pci_bar - pci bar address + * @param[in] len - read data length + * @param[in] buf - read data buffer + * + */ +static void +bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) +{ + + u32 i; + + /* + * read data fifo up to 32 words + */ + for (i = 0; i < len; i += 4) { + u32 w = readl(pci_bar + FLI_RDDATA_REG); + *((u32 *) (buf + i)) = swab32(w); + } + + bfa_flash_fifo_flush(pci_bar); +} + +/* + * @brief + * Perform flash raw read. + * + * @param[in] pci_bar - pci bar address + * @param[in] offset - flash partition address offset + * @param[in] buf - read data buffer + * @param[in] len - read data length + * + * Return status. + */ + + +#define FLASH_BLOCKING_OP_MAX 500 +#define FLASH_SEM_LOCK_REG 0x18820 + +static int +bfa_raw_sem_get(void __iomem *bar) +{ + int locked; + + locked = readl((bar + FLASH_SEM_LOCK_REG)); + return !locked; + +} + +static bfa_status_t +bfa_flash_sem_get(void __iomem *bar) +{ + u32 n = FLASH_BLOCKING_OP_MAX; + + while (!bfa_raw_sem_get(bar)) { + if (--n <= 0) + return BFA_STATUS_BADFLASH; + mdelay(10); + } + return BFA_STATUS_OK; +} + +static void +bfa_flash_sem_put(void __iomem *bar) +{ + writel(0, (bar + FLASH_SEM_LOCK_REG)); +} + +bfa_status_t +bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, + u32 len) +{ + u32 n; + int status; + u32 off, l, s, residue, fifo_sz; + + residue = len; + off = 0; + fifo_sz = BFA_FLASH_FIFO_SIZE; + status = bfa_flash_sem_get(pci_bar); + if (status != BFA_STATUS_OK) + return status; + + while (residue) { + s = offset + off; + n = s / fifo_sz; + l = (n + 1) * fifo_sz - s; + if (l > residue) + l = residue; + + status = bfa_flash_read_start(pci_bar, offset + off, l, + &buf[off]); + if (status < 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + + n = BFA_FLASH_BLOCKING_OP_MAX; + while (bfa_flash_read_check(pci_bar)) { + if (--n <= 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + } + + bfa_flash_read_end(pci_bar, l, &buf[off]); + + residue -= l; + off += l; + } + bfa_flash_sem_put(pci_bar); + + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h new file mode 100644 index 000000000..933a1c389 --- /dev/null +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -0,0 +1,1045 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_IOC_H__ +#define __BFA_IOC_H__ + +#include "bfad_drv.h" +#include "bfa_cs.h" +#include "bfi.h" + +#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) +#define BFA_DBG_FWTRC_LEN \ + (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ + (sizeof(struct bfa_trc_mod_s) - \ + BFA_TRC_MAX * sizeof(struct bfa_trc_s))) +/* + * BFA timer declarations + */ +typedef void (*bfa_timer_cbfn_t)(void *); + +/* + * BFA timer data structure + */ +struct bfa_timer_s { + struct list_head qe; + bfa_timer_cbfn_t timercb; + void *arg; + int timeout; /* in millisecs */ +}; + +/* + * Timer module structure + */ +struct bfa_timer_mod_s { + struct list_head timer_q; +}; + +#define BFA_TIMER_FREQ 200 /* specified in millisecs */ + +void bfa_timer_beat(struct bfa_timer_mod_s *mod); +void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, + bfa_timer_cbfn_t timercb, void *arg, + unsigned int timeout); +void bfa_timer_stop(struct bfa_timer_s *timer); + +/* + * Generic Scatter Gather Element used by driver + */ +struct bfa_sge_s { + u32 sg_len; + void *sg_addr; +}; + +#define bfa_sge_word_swap(__sge) do { \ + ((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \ + ((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \ + ((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \ +} while (0) + +#define bfa_swap_words(_x) ( \ + ((u64)(_x) << 32) | ((u64)(_x) >> 32)) + +#ifdef __BIG_ENDIAN +#define bfa_sge_to_be(_x) +#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x) +#define bfa_sgaddr_le(_x) bfa_swap_words(_x) +#else +#define bfa_sge_to_be(_x) bfa_sge_word_swap(_x) +#define bfa_sge_to_le(_x) +#define bfa_sgaddr_le(_x) (_x) +#endif + +/* + * BFA memory resources + */ +struct bfa_mem_dma_s { + struct list_head qe; /* Queue of DMA elements */ + u32 mem_len; /* Total Length in Bytes */ + u8 *kva; /* kernel virtual address */ + u64 dma; /* dma address if DMA memory */ + u8 *kva_curp; /* kva allocation cursor */ + u64 dma_curp; /* dma allocation cursor */ +}; +#define bfa_mem_dma_t struct bfa_mem_dma_s + +struct bfa_mem_kva_s { + struct list_head qe; /* Queue of KVA elements */ + u32 mem_len; /* Total Length in Bytes */ + u8 *kva; /* kernel virtual address */ + u8 *kva_curp; /* kva allocation cursor */ +}; +#define bfa_mem_kva_t struct bfa_mem_kva_s + +struct bfa_meminfo_s { + struct bfa_mem_dma_s dma_info; + struct bfa_mem_kva_s kva_info; +}; + +/* BFA memory segment setup helpers */ +static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo, + struct bfa_mem_dma_s *dm_ptr, + size_t seg_sz) +{ + dm_ptr->mem_len = seg_sz; + if (seg_sz) + list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe); +} + +static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo, + struct bfa_mem_kva_s *kva_ptr, + size_t seg_sz) +{ + kva_ptr->mem_len = seg_sz; + if (seg_sz) + list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe); +} + +/* BFA dma memory segments iterator */ +#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)]) +#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i) \ + for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr); \ + _i++, _sptr = bfa_mem_dma_sptr(_mod, _i)) + +#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp) +#define bfa_mem_dma_virt(_sptr) ((_sptr)->kva_curp) +#define bfa_mem_dma_phys(_sptr) ((_sptr)->dma_curp) +#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len) + +/* Get the corresponding dma buf kva for a req - from the tag */ +#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz) \ + (((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\ + BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz)) + +/* Get the corresponding dma buf pa for a req - from the tag */ +#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz) \ + ((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp + \ + BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz)) + +/* + * PCI device information required by IOC + */ +struct bfa_pcidev_s { + int pci_slot; + u8 pci_func; + u16 device_id; + u16 ssid; + void __iomem *pci_bar_kva; +}; + +/* + * Structure used to remember the DMA-able memory block's KVA and Physical + * Address + */ +struct bfa_dma_s { + void *kva; /* ! Kernel virtual address */ + u64 pa; /* ! Physical address */ +}; + +#define BFA_DMA_ALIGN_SZ 256 +#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) + +/* + * smem size for Crossbow and Catapult + */ +#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ +#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ + +#define bfa_dma_be_addr_set(dma_addr, pa) \ + __bfa_dma_be_addr_set(&dma_addr, (u64)pa) +static inline void +__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) +{ + dma_addr->a32.addr_lo = cpu_to_be32(pa); + dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32); +} + +#define bfa_alen_set(__alen, __len, __pa) \ + __bfa_alen_set(__alen, __len, (u64)__pa) + +static inline void +__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa) +{ + alen->al_len = cpu_to_be32(len); + bfa_dma_be_addr_set(alen->al_addr, pa); +} + +struct bfa_ioc_regs_s { + void __iomem *hfn_mbox_cmd; + void __iomem *hfn_mbox; + void __iomem *lpu_mbox_cmd; + void __iomem *lpu_mbox; + void __iomem *lpu_read_stat; + void __iomem *pss_ctl_reg; + void __iomem *pss_err_status_reg; + void __iomem *app_pll_fast_ctl_reg; + void __iomem *app_pll_slow_ctl_reg; + void __iomem *ioc_sem_reg; + void __iomem *ioc_usage_sem_reg; + void __iomem *ioc_init_sem_reg; + void __iomem *ioc_usage_reg; + void __iomem *host_page_num_fn; + void __iomem *heartbeat; + void __iomem *ioc_fwstate; + void __iomem *alt_ioc_fwstate; + void __iomem *ll_halt; + void __iomem *alt_ll_halt; + void __iomem *err_set; + void __iomem *ioc_fail_sync; + void __iomem *shirq_isr_next; + void __iomem *shirq_msk_next; + void __iomem *smem_page_start; + u32 smem_pg0; +}; + +#define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off)))) +#define bfa_mem_write(_raddr, _off, _val) \ + writel(swab32((_val)), ((_raddr) + (_off))) +/* + * IOC Mailbox structures + */ +struct bfa_mbox_cmd_s { + struct list_head qe; + u32 msg[BFI_IOC_MSGSZ]; +}; + +/* + * IOC mailbox module + */ +typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m); +struct bfa_ioc_mbox_mod_s { + struct list_head cmd_q; /* pending mbox queue */ + int nmclass; /* number of handlers */ + struct { + bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */ + void *cbarg; + } mbhdlr[BFI_MC_MAX]; +}; + +/* + * IOC callback function interfaces + */ +typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); +typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa); +struct bfa_ioc_cbfn_s { + bfa_ioc_enable_cbfn_t enable_cbfn; + bfa_ioc_disable_cbfn_t disable_cbfn; + bfa_ioc_hbfail_cbfn_t hbfail_cbfn; + bfa_ioc_reset_cbfn_t reset_cbfn; +}; + +/* + * IOC event notification mechanism. + */ +enum bfa_ioc_event_e { + BFA_IOC_E_ENABLED = 1, + BFA_IOC_E_DISABLED = 2, + BFA_IOC_E_FAILED = 3, +}; + +typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e); + +struct bfa_ioc_notify_s { + struct list_head qe; + bfa_ioc_notify_cbfn_t cbfn; + void *cbarg; +}; + +/* + * Initialize a IOC event notification structure + */ +#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ + (__notify)->cbfn = (__cbfn); \ + (__notify)->cbarg = (__cbarg); \ +} while (0) + +struct bfa_iocpf_s { + bfa_fsm_t fsm; + struct bfa_ioc_s *ioc; + bfa_boolean_t fw_mismatch_notified; + bfa_boolean_t auto_recover; + u32 poll_time; +}; + +struct bfa_ioc_s { + bfa_fsm_t fsm; + struct bfa_s *bfa; + struct bfa_pcidev_s pcidev; + struct bfa_timer_mod_s *timer_mod; + struct bfa_timer_s ioc_timer; + struct bfa_timer_s sem_timer; + struct bfa_timer_s hb_timer; + u32 hb_count; + struct list_head notify_q; + void *dbg_fwsave; + int dbg_fwsave_len; + bfa_boolean_t dbg_fwsave_once; + enum bfi_pcifn_class clscode; + struct bfa_ioc_regs_s ioc_regs; + struct bfa_trc_mod_s *trcmod; + struct bfa_ioc_drv_stats_s stats; + bfa_boolean_t fcmode; + bfa_boolean_t pllinit; + bfa_boolean_t stats_busy; /* outstanding stats */ + u8 port_id; + struct bfa_dma_s attr_dma; + struct bfi_ioc_attr_s *attr; + struct bfa_ioc_cbfn_s *cbfn; + struct bfa_ioc_mbox_mod_s mbox_mod; + struct bfa_ioc_hwif_s *ioc_hwif; + struct bfa_iocpf_s iocpf; + enum bfi_asic_gen asic_gen; + enum bfi_asic_mode asic_mode; + enum bfi_port_mode port0_mode; + enum bfi_port_mode port1_mode; + enum bfa_mode_s port_mode; + u8 ad_cap_bm; /* adapter cap bit mask */ + u8 port_mode_cfg; /* config port mode */ + int ioc_aen_seq; +}; + +struct bfa_ioc_hwif_s { + bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m); + bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); + void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); + void (*ioc_reg_init) (struct bfa_ioc_s *ioc); + void (*ioc_map_port) (struct bfa_ioc_s *ioc); + void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, + bfa_boolean_t msix); + void (*ioc_notify_fail) (struct bfa_ioc_s *ioc); + void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); + bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc); + void (*ioc_sync_join) (struct bfa_ioc_s *ioc); + void (*ioc_sync_leave) (struct bfa_ioc_s *ioc); + void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); + bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); + bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc); + void (*ioc_set_fwstate) (struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc_s *ioc); + void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc); +}; + +/* + * Queue element to wait for room in request queue. FIFO order is + * maintained when fullfilling requests. + */ +struct bfa_reqq_wait_s { + struct list_head qe; + void (*qresume) (void *cbarg); + void *cbarg; +}; + +typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); + +/* + * Generic BFA callback element. + */ +struct bfa_cb_qe_s { + struct list_head qe; + bfa_cb_cbfn_t cbfn; + bfa_boolean_t once; + bfa_boolean_t pre_rmv; /* set for stack based qe(s) */ + bfa_status_t fw_status; /* to access fw status in comp proc */ + void *cbarg; +}; + +/* + * IOCFC state machine definitions/declarations + */ +enum iocfc_event { + IOCFC_E_INIT = 1, /* IOCFC init request */ + IOCFC_E_START = 2, /* IOCFC mod start request */ + IOCFC_E_STOP = 3, /* IOCFC stop request */ + IOCFC_E_ENABLE = 4, /* IOCFC enable request */ + IOCFC_E_DISABLE = 5, /* IOCFC disable request */ + IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */ + IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */ + IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */ + IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */ + IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */ +}; + +/* + * ASIC block configurtion related + */ + +typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status); + +struct bfa_ablk_s { + struct bfa_ioc_s *ioc; + struct bfa_ablk_cfg_s *cfg; + u16 *pcifn; + struct bfa_dma_s dma_addr; + bfa_boolean_t busy; + struct bfa_mbox_cmd_s mb; + bfa_ablk_cbfn_t cbfn; + void *cbarg; + struct bfa_ioc_notify_s ioc_notify; + struct bfa_mem_dma_s ablk_dma; +}; +#define BFA_MEM_ABLK_DMA(__bfa) (&((__bfa)->modules.ablk.ablk_dma)) + +/* + * SFP module specific + */ +typedef void (*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status); + +struct bfa_sfp_s { + void *dev; + struct bfa_ioc_s *ioc; + struct bfa_trc_mod_s *trcmod; + struct sfp_mem_s *sfpmem; + bfa_cb_sfp_t cbfn; + void *cbarg; + enum bfi_sfp_mem_e memtype; /* mem access type */ + u32 status; + struct bfa_mbox_cmd_s mbcmd; + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_ioc_notify_s ioc_notify; + enum bfa_defs_sfp_media_e *media; + enum bfa_port_speed portspeed; + bfa_cb_sfp_t state_query_cbfn; + void *state_query_cbarg; + u8 lock; + u8 data_valid; /* data in dbuf is valid */ + u8 state; /* sfp state */ + u8 state_query_lock; + struct bfa_mem_dma_s sfp_dma; + u8 is_elb; /* eloopback */ +}; + +#define BFA_SFP_MOD(__bfa) (&(__bfa)->modules.sfp) +#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma)) + +u32 bfa_sfp_meminfo(void); + +void bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod); + +void bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa); +void bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg); + +bfa_status_t bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem, + bfa_cb_sfp_t cbfn, void *cbarg); + +bfa_status_t bfa_sfp_media(struct bfa_sfp_s *sfp, + enum bfa_defs_sfp_media_e *media, + bfa_cb_sfp_t cbfn, void *cbarg); + +bfa_status_t bfa_sfp_speed(struct bfa_sfp_s *sfp, + enum bfa_port_speed portspeed, + bfa_cb_sfp_t cbfn, void *cbarg); + +/* + * Flash module specific + */ +typedef void (*bfa_cb_flash_t) (void *cbarg, bfa_status_t status); + +struct bfa_flash_s { + struct bfa_ioc_s *ioc; /* back pointer to ioc */ + struct bfa_trc_mod_s *trcmod; + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 op_busy; /* operation busy flag */ + u32 residue; /* residual length */ + u32 offset; /* offset */ + bfa_status_t status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_cb_flash_t cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */ + u32 addr_off; /* partition address offset */ + struct bfa_mbox_cmd_s mb; /* mailbox */ + struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ + struct bfa_mem_dma_s flash_dma; +}; + +#define BFA_FLASH(__bfa) (&(__bfa)->modules.flash) +#define BFA_MEM_FLASH_DMA(__bfa) (&(BFA_FLASH(__bfa)->flash_dma)) + +bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash, + struct bfa_flash_attr_s *attr, + bfa_cb_flash_t cbfn, void *cbarg); +bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash, + enum bfa_flash_part_type type, u8 instance, + bfa_cb_flash_t cbfn, void *cbarg); +bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash, + enum bfa_flash_part_type type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash_t cbfn, void *cbarg); +bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash, + enum bfa_flash_part_type type, u8 instance, void *buf, + u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg); +u32 bfa_flash_meminfo(bfa_boolean_t mincfg); +void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); +void bfa_flash_memclaim(struct bfa_flash_s *flash, + u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); +bfa_status_t bfa_flash_raw_read(void __iomem *pci_bar_kva, + u32 offset, char *buf, u32 len); + +/* + * DIAG module specific + */ + +typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status); +typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon); + +/* + * Firmware ping test results + */ +struct bfa_diag_results_fwping { + u32 data; /* store the corrupted data */ + u32 status; + u32 dmastatus; + u8 rsvd[4]; +}; + +struct bfa_diag_qtest_result_s { + u32 status; + u16 count; /* successful queue test count */ + u8 queue; + u8 rsvd; /* 64-bit align */ +}; + +/* + * Firmware ping test results + */ +struct bfa_diag_fwping_s { + struct bfa_diag_results_fwping *result; + bfa_cb_diag_t cbfn; + void *cbarg; + u32 data; + u8 lock; + u8 rsv[3]; + u32 status; + u32 count; + struct bfa_mbox_cmd_s mbcmd; + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ +}; + +/* + * Temperature sensor query results + */ +struct bfa_diag_results_tempsensor_s { + u32 status; + u16 temp; /* 10-bit A/D value */ + u16 brd_temp; /* 9-bit board temp */ + u8 ts_junc; /* show junction tempsensor */ + u8 ts_brd; /* show board tempsensor */ + u8 rsvd[6]; /* keep 8 bytes alignment */ +}; + +struct bfa_diag_tsensor_s { + bfa_cb_diag_t cbfn; + void *cbarg; + struct bfa_diag_results_tempsensor_s *temp; + u8 lock; + u8 rsv[3]; + u32 status; + struct bfa_mbox_cmd_s mbcmd; +}; + +struct bfa_diag_sfpshow_s { + struct sfp_mem_s *sfpmem; + bfa_cb_diag_t cbfn; + void *cbarg; + u8 lock; + u8 static_data; + u8 rsv[2]; + u32 status; + struct bfa_mbox_cmd_s mbcmd; + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ +}; + +struct bfa_diag_led_s { + struct bfa_mbox_cmd_s mbcmd; + bfa_boolean_t lock; /* 1: ledtest is operating */ +}; + +struct bfa_diag_beacon_s { + struct bfa_mbox_cmd_s mbcmd; + bfa_boolean_t state; /* port beacon state */ + bfa_boolean_t link_e2e; /* link beacon state */ +}; + +struct bfa_diag_s { + void *dev; + struct bfa_ioc_s *ioc; + struct bfa_trc_mod_s *trcmod; + struct bfa_diag_fwping_s fwping; + struct bfa_diag_tsensor_s tsensor; + struct bfa_diag_sfpshow_s sfpshow; + struct bfa_diag_led_s ledtest; + struct bfa_diag_beacon_s beacon; + void *result; + struct bfa_timer_s timer; + bfa_cb_diag_beacon_t cbfn_beacon; + bfa_cb_diag_t cbfn; + void *cbarg; + u8 block; + u8 timer_active; + u8 rsvd[2]; + u32 status; + struct bfa_ioc_notify_s ioc_notify; + struct bfa_mem_dma_s diag_dma; +}; + +#define BFA_DIAG_MOD(__bfa) (&(__bfa)->modules.diag_mod) +#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma)) + +u32 bfa_diag_meminfo(void); +void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa); +void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev, + bfa_cb_diag_beacon_t cbfn_beacon, + struct bfa_trc_mod_s *trcmod); +bfa_status_t bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset, + u32 len, u32 *buf, u32 force); +bfa_status_t bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset, + u32 len, u32 value, u32 force); +bfa_status_t bfa_diag_tsensor_query(struct bfa_diag_s *diag, + struct bfa_diag_results_tempsensor_s *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, + u32 pattern, struct bfa_diag_results_fwping *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_sfpshow(struct bfa_diag_s *diag, + struct sfp_mem_s *sfpmem, u8 static_data, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_memtest(struct bfa_diag_s *diag, + struct bfa_diag_memtest_s *memtest, u32 pattern, + struct bfa_diag_memtest_result *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_ledtest(struct bfa_diag_s *diag, + struct bfa_diag_ledtest_s *ledtest); +bfa_status_t bfa_diag_beacon_port(struct bfa_diag_s *diag, + bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon, + u32 sec); + +/* + * PHY module specific + */ +typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status); + +struct bfa_phy_s { + struct bfa_ioc_s *ioc; /* back pointer to ioc */ + struct bfa_trc_mod_s *trcmod; /* trace module */ + u8 instance; /* port instance */ + u8 op_busy; /* operation busy flag */ + u8 rsv[2]; + u32 residue; /* residual length */ + u32 offset; /* offset */ + bfa_status_t status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_cb_phy_t cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */ + u32 addr_off; /* phy address offset */ + struct bfa_mbox_cmd_s mb; /* mailbox */ + struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ + struct bfa_mem_dma_s phy_dma; +}; +#define BFA_PHY(__bfa) (&(__bfa)->modules.phy) +#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma)) + +bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc); +bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_attr_s *attr, + bfa_cb_phy_t cbfn, void *cbarg); +bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_stats_s *stats, + bfa_cb_phy_t cbfn, void *cbarg); +bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg); +bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg); + +u32 bfa_phy_meminfo(bfa_boolean_t mincfg); +void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); +void bfa_phy_memclaim(struct bfa_phy_s *phy, + u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); +void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); + +/* + * FRU module specific + */ +typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status); + +struct bfa_fru_s { + struct bfa_ioc_s *ioc; /* back pointer to ioc */ + struct bfa_trc_mod_s *trcmod; /* trace module */ + u8 op_busy; /* operation busy flag */ + u8 rsv[3]; + u32 residue; /* residual length */ + u32 offset; /* offset */ + bfa_status_t status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_cb_fru_t cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */ + u32 addr_off; /* fru address offset */ + struct bfa_mbox_cmd_s mb; /* mailbox */ + struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ + struct bfa_mem_dma_s fru_dma; + u8 trfr_cmpl; +}; + +#define BFA_FRU(__bfa) (&(__bfa)->modules.fru) +#define BFA_MEM_FRU_DMA(__bfa) (&(BFA_FRU(__bfa)->fru_dma)) + +bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl); +bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg); +bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size); +bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg); +bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg); +u32 bfa_fru_meminfo(bfa_boolean_t mincfg); +void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); +void bfa_fru_memclaim(struct bfa_fru_s *fru, + u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); +void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg); + +/* + * Driver Config( dconf) specific + */ +#define BFI_DCONF_SIGNATURE 0xabcdabcd +#define BFI_DCONF_VERSION 1 + +#pragma pack(1) +struct bfa_dconf_hdr_s { + u32 signature; + u32 version; +}; + +struct bfa_dconf_s { + struct bfa_dconf_hdr_s hdr; + struct bfa_lunmask_cfg_s lun_mask; + struct bfa_throttle_cfg_s throttle_cfg; +}; +#pragma pack() + +struct bfa_dconf_mod_s { + bfa_sm_t sm; + u8 instance; + bfa_boolean_t read_data_valid; + bfa_boolean_t min_cfg; + struct bfa_timer_s timer; + struct bfa_s *bfa; + void *bfad; + void *trcmod; + struct bfa_dconf_s *dconf; + struct bfa_mem_kva_s kva_seg; +}; + +#define BFA_DCONF_MOD(__bfa) \ + (&(__bfa)->modules.dconf_mod) +#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg)) +#define bfa_dconf_read_data_valid(__bfa) \ + (BFA_DCONF_MOD(__bfa)->read_data_valid) +#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ +#define bfa_dconf_get_min_cfg(__bfa) \ + (BFA_DCONF_MOD(__bfa)->min_cfg) + +void bfa_dconf_modinit(struct bfa_s *bfa); +void bfa_dconf_modexit(struct bfa_s *bfa); +bfa_status_t bfa_dconf_update(struct bfa_s *bfa); + +/* + * IOC specfic macros + */ +#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) +#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) +#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) +#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) +#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_is_cna(__ioc) \ + ((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) || \ + (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL)) +#define bfa_ioc_fetch_stats(__ioc, __stats) \ + (((__stats)->drv_stats) = (__ioc)->stats) +#define bfa_ioc_clr_stats(__ioc) \ + memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) +#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) +#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) +#define bfa_ioc_speed_sup(__ioc) \ + ((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS : \ + BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)) +#define bfa_ioc_get_nports(__ioc) \ + BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) + +#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) +#define BFA_IOC_FWIMG_MINSZ (16 * 1024) +#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ + ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \ + ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE) +#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) + +/* + * IOC mailbox interface + */ +void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd); +void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, + bfa_ioc_mbox_mcfunc_t *mcfuncs); +void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc); +void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len); +bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg); +void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); + +/* + * IOC interfaces + */ + +#define bfa_ioc_pll_init_asic(__ioc) \ + ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ + (__ioc)->asic_mode)) + +bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); +bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode); +bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode); +bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode); + +#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \ + if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \ + ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \ +} while (0) +#define bfa_ioc_ownership_reset(__ioc) \ + ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) +#define bfa_ioc_get_fcmode(__ioc) ((__ioc)->fcmode) +#define bfa_ioc_lpu_read_stat(__ioc) do { \ + if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \ + ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \ +} while (0) + +void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); +void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); +void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc); +void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc); + +void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, + struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); +void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); +void bfa_ioc_detach(struct bfa_ioc_s *ioc); +void bfa_ioc_suspend(struct bfa_ioc_s *ioc); +void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, + enum bfi_pcifn_class clscode); +void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa); +void bfa_ioc_enable(struct bfa_ioc_s *ioc); +void bfa_ioc_disable(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); + +bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, + u32 boot_env); +void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); +void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); +void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc); +enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); +void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); +void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); +void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver); +void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model); +void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, + char *manufacturer); +void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev); +enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc); + +void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); +void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, + struct bfa_adapter_attr_s *ad_attr); +void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); +bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, + int *trclen); +bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, + int *trclen); +bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, + u32 *offset, int *buflen); +bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); +void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *fwhdr); +bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *fwhdr); +void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event); +bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats); +bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); +void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc); + +/* + * asic block configuration related APIs + */ +u32 bfa_ablk_meminfo(void); +void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa); +void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc); +bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk, + struct bfa_ablk_cfg_s *ablk_cfg, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, + enum bfa_mode_s mode, int max_pf, int max_vf, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, + enum bfa_mode_s mode, int max_pf, int max_vf, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, + u8 port, enum bfi_pcifn_class personality, + u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, + u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, + bfa_ablk_cbfn_t cbfn, void *cbarg); + +bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off, + u32 *fwimg); +/* + * bfa mfg wwn API functions + */ +mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc); +mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); + +/* + * F/W Image Size & Chunk + */ +extern u32 bfi_image_cb_size; +extern u32 bfi_image_ct_size; +extern u32 bfi_image_ct2_size; +extern u32 *bfi_image_cb; +extern u32 *bfi_image_ct; +extern u32 *bfi_image_ct2; + +static inline u32 * +bfi_image_cb_get_chunk(u32 off) +{ + return (u32 *)(bfi_image_cb + off); +} + +static inline u32 * +bfi_image_ct_get_chunk(u32 off) +{ + return (u32 *)(bfi_image_ct + off); +} + +static inline u32 * +bfi_image_ct2_get_chunk(u32 off) +{ + return (u32 *)(bfi_image_ct2 + off); +} + +static inline u32* +bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CB: + return bfi_image_cb_get_chunk(off); + break; + case BFI_ASIC_GEN_CT: + return bfi_image_ct_get_chunk(off); + break; + case BFI_ASIC_GEN_CT2: + return bfi_image_ct2_get_chunk(off); + break; + default: + return NULL; + } +} + +static inline u32 +bfa_cb_image_get_size(enum bfi_asic_gen asic_gen) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CB: + return bfi_image_cb_size; + break; + case BFI_ASIC_GEN_CT: + return bfi_image_ct_size; + break; + case BFI_ASIC_GEN_CT2: + return bfi_image_ct2_size; + break; + default: + return 0; + } +} + +/* + * CNA TRCMOD declaration + */ +/* + * !!! Only append to the enums defined here to avoid any versioning + * !!! needed between trace utility and driver version + */ +enum { + BFA_TRC_CNA_PORT = 1, + BFA_TRC_CNA_IOC = 2, + BFA_TRC_CNA_IOC_CB = 3, + BFA_TRC_CNA_IOC_CT = 4, +}; + +#endif /* __BFA_IOC_H__ */ diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c new file mode 100644 index 000000000..2fc6215c2 --- /dev/null +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfa_ioc.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +BFA_TRC_FILE(CNA, IOC_CB); + +#define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH)) + +/* + * forward declarations + */ +static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); +static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); +static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); +static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_set_cur_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_set_alt_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); + +static struct bfa_ioc_hwif_s hwif_cb; + +/* + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) +{ + hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init; + hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock; + hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock; + hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init; + hwif_cb.ioc_map_port = bfa_ioc_cb_map_port; + hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; + hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail; + hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; + hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start; + hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join; + hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; + hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; + hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete; + hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate; + hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate; + hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate; + hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate; + + ioc->ioc_hwif = &hwif_cb; +} + +/* + * Return true if firmware of current driver matches the running firmware. + */ +static bfa_boolean_t +bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) +{ + enum bfi_ioc_state alt_fwstate, cur_fwstate; + struct bfi_ioc_image_hdr_s fwhdr; + + cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); + bfa_trc(ioc, cur_fwstate); + alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); + bfa_trc(ioc, alt_fwstate); + + /* + * Uninit implies this is the only driver as of now. + */ + if (cur_fwstate == BFI_IOC_UNINIT) + return BFA_TRUE; + /* + * Check if another driver with a different firmware is active + */ + bfa_ioc_fwver_get(ioc, &fwhdr); + if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) && + alt_fwstate != BFI_IOC_DISABLED) { + bfa_trc(ioc, alt_fwstate); + return BFA_FALSE; + } + + return BFA_TRUE; +} + +static void +bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc) +{ +} + +/* + * Notify other functions on HB failure. + */ +static void +bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc) +{ + writel(~0U, ioc->ioc_regs.err_set); + readl(ioc->ioc_regs.err_set); +} + +/* + * Host to LPU mailbox message addresses + */ +static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } +}; + +/* + * Host <-> LPU mailbox command/status registers + */ +static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { + + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } +}; + +static void +bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) +{ + void __iomem *rb; + int pcifn = bfa_ioc_pcifn(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; + + if (ioc->port_id == 0) { + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; + } else { + ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); + ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); + ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG); + } + + /* + * Host <-> LPU mailbox command/status registers + */ + ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu; + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); + ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); + + /* + * sram memory access + */ + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB; + + /* + * err set reg : for notification of hb failure + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + +/* + * Initialize IOC to port mapping. + */ + +static void +bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) +{ + /* + * For crossbow, port id is same as pci function. + */ + ioc->port_id = bfa_ioc_pcifn(ioc); + + bfa_trc(ioc, ioc->port_id); +} + +/* + * Set interrupt mode for a function: INTX or MSIX + */ +static void +bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) +{ +} + +/* + * Synchronized IOC failure processing routines + */ +static bfa_boolean_t +bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc) +{ + u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + /** + * Driver load time. If the join bit is set, + * it is due to an unclean exit by the driver for this + * PCI fn in the previous incarnation. Whoever comes here first + * should clean it up, no matter which PCI fn. + */ + if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) { + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + return BFA_TRUE; + } + + return bfa_ioc_cb_sync_complete(ioc); +} + +/* + * Cleanup hw semaphore and usecnt registers + */ +static void +bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) +{ + + /* + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + writel(1, ioc->ioc_regs.ioc_sem_reg); +} + +/* + * Synchronized IOC failure processing routines + */ +static void +bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); + u32 join_pos = bfa_ioc_cb_join_pos(ioc); + + writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); + u32 join_pos = bfa_ioc_cb_join_pos(ioc); + + writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); + + writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), + ioc->ioc_regs.ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) & + BFA_IOC_CB_FWSTATE_MASK); +} + +static void +bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate); + + writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), + ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) & + BFA_IOC_CB_FWSTATE_MASK); +} + +static void +bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc) +{ + bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); +} + +static bfa_boolean_t +bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) +{ + u32 fwstate, alt_fwstate; + fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); + + /* + * At this point, this IOC is hoding the hw sem in the + * start path (fwcheck) OR in the disable/enable path + * OR to check if the other IOC has acknowledged failure. + * + * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL + * or in MEMTEST states. In a normal scenario, this IOC + * can not be in OP state when this function is called. + * + * However, this IOC could still be in OP state when + * the OS driver is starting up, if the OptROM code has + * left it in that state. + * + * If we had marked this IOC's fwstate as BFI_IOC_FAIL + * in the failure case and now, if the fwstate is not + * BFI_IOC_FAIL it implies that the other PCI fn have + * reinitialized the ASIC or this IOC got disabled, so + * return TRUE. + */ + if (fwstate == BFI_IOC_UNINIT || + fwstate == BFI_IOC_INITING || + fwstate == BFI_IOC_DISABLED || + fwstate == BFI_IOC_MEMTEST || + fwstate == BFI_IOC_OP) + return BFA_TRUE; + else { + alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); + if (alt_fwstate == BFI_IOC_FAIL || + alt_fwstate == BFI_IOC_DISABLED || + alt_fwstate == BFI_IOC_UNINIT || + alt_fwstate == BFI_IOC_INITING || + alt_fwstate == BFI_IOC_MEMTEST) + return BFA_TRUE; + else + return BFA_FALSE; + } +} + +bfa_status_t +bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) +{ + u32 pll_sclk, pll_fclk, join_bits; + + pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN | + __APP_PLL_SCLK_P0_1(3U) | + __APP_PLL_SCLK_JITLMT0_1(3U) | + __APP_PLL_SCLK_CNTLMT0_1(3U); + pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN | + __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | + __APP_PLL_LCLK_JITLMT0_1(3U) | + __APP_PLL_LCLK_CNTLMT0_1(3U); + join_bits = readl(rb + BFA_IOC0_STATE_REG) & + BFA_IOC_CB_JOIN_MASK; + writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG)); + join_bits = readl(rb + BFA_IOC1_STATE_REG) & + BFA_IOC_CB_JOIN_MASK; + writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); + writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); + writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); + udelay(2); + writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); + writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); + writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); + udelay(2000); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG)); + writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG)); + + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c new file mode 100644 index 000000000..fb7482916 --- /dev/null +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -0,0 +1,990 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfa_ioc.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +BFA_TRC_FILE(CNA, IOC_CT); + +#define bfa_ioc_ct_sync_pos(__ioc) \ + ((uint32_t) (1 << bfa_ioc_pcifn(__ioc))) +#define BFA_IOC_SYNC_REQD_SH 16 +#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) +#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) +#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) +#define bfa_ioc_ct_sync_reqd_pos(__ioc) \ + (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) + +/* + * forward declarations + */ +static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); +static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); +static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_set_cur_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_set_alt_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); + +static struct bfa_ioc_hwif_s hwif_ct; +static struct bfa_ioc_hwif_s hwif_ct2; + +/* + * Return true if firmware of current driver matches the running firmware. + */ +static bfa_boolean_t +bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) +{ + enum bfi_ioc_state ioc_fwstate; + u32 usecnt; + struct bfi_ioc_image_hdr_s fwhdr; + + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + + /* + * If usage count is 0, always return TRUE. + */ + if (usecnt == 0) { + writel(1, ioc->ioc_regs.ioc_usage_reg); + readl(ioc->ioc_regs.ioc_usage_sem_reg); + writel(1, ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_fail_sync); + bfa_trc(ioc, usecnt); + return BFA_TRUE; + } + + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + bfa_trc(ioc, ioc_fwstate); + + /* + * Use count cannot be non-zero and chip in uninitialized state. + */ + WARN_ON(ioc_fwstate == BFI_IOC_UNINIT); + + /* + * Check if another driver with a different firmware is active + */ + bfa_ioc_fwver_get(ioc, &fwhdr); + if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { + readl(ioc->ioc_regs.ioc_usage_sem_reg); + writel(1, ioc->ioc_regs.ioc_usage_sem_reg); + bfa_trc(ioc, usecnt); + return BFA_FALSE; + } + + /* + * Same firmware version. Increment the reference count. + */ + usecnt++; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + readl(ioc->ioc_regs.ioc_usage_sem_reg); + writel(1, ioc->ioc_regs.ioc_usage_sem_reg); + bfa_trc(ioc, usecnt); + return BFA_TRUE; +} + +static void +bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) +{ + u32 usecnt; + + /* + * decrement usage count + */ + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + WARN_ON(usecnt <= 0); + + usecnt--; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + bfa_trc(ioc, usecnt); + + readl(ioc->ioc_regs.ioc_usage_sem_reg); + writel(1, ioc->ioc_regs.ioc_usage_sem_reg); +} + +/* + * Notify other functions on HB failure. + */ +static void +bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) +{ + if (bfa_ioc_is_cna(ioc)) { + writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); + writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); + /* Wait for halt to take effect */ + readl(ioc->ioc_regs.ll_halt); + readl(ioc->ioc_regs.alt_ll_halt); + } else { + writel(~0U, ioc->ioc_regs.err_set); + readl(ioc->ioc_regs.err_set); + } +} + +/* + * Host to LPU mailbox message addresses + */ +static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = { + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, + { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, + { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } +}; + +/* + * Host <-> LPU mailbox command/status registers - port 0 + */ +static struct { u32 hfn, lpu; } ct_p0reg[] = { + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } +}; + +/* + * Host <-> LPU mailbox command/status registers - port 1 + */ +static struct { u32 hfn, lpu; } ct_p1reg[] = { + { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } +}; + +static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; } + ct2_reg[] = { + { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU0_READ_STAT}, + { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU1_READ_STAT}, +}; + +static void +bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) +{ + void __iomem *rb; + int pcifn = bfa_ioc_pcifn(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; + + if (ioc->port_id == 0) { + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); + ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); + ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); + ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); + ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); + ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); + + /* + * sram memory access + */ + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + +static void +bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc) +{ + void __iomem *rb; + int port = bfa_ioc_portid(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; + ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; + + if (port == 0) { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG); + ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG); + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG); + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); + ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); + ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); + ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT); + ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC); + + /* + * sram memory access + */ + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + +/* + * Initialize IOC to port mapping. + */ + +#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) +static void +bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + /* + * For catapult, base port id on personality register and IOC type + */ + r32 = readl(rb + FNC_PERS_REG); + r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); + ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; + + bfa_trc(ioc, bfa_ioc_pcifn(ioc)); + bfa_trc(ioc, ioc->port_id); +} + +static void +bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); + ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); + + bfa_trc(ioc, bfa_ioc_pcifn(ioc)); + bfa_trc(ioc, ioc->port_id); +} + +/* + * Set interrupt mode for a function: INTX or MSIX + */ +static void +bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32, mode; + + r32 = readl(rb + FNC_PERS_REG); + bfa_trc(ioc, r32); + + mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & + __F0_INTX_STATUS; + + /* + * If already in desired mode, do not change anything + */ + if ((!msix && mode) || (msix && !mode)) + return; + + if (msix) + mode = __F0_INTX_STATUS_MSIX; + else + mode = __F0_INTX_STATUS_INTA; + + r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + bfa_trc(ioc, r32); + + writel(r32, rb + FNC_PERS_REG); +} + +static bfa_boolean_t +bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_read_stat); + if (r32) { + writel(1, ioc->ioc_regs.lpu_read_stat); + return BFA_TRUE; + } + + return BFA_FALSE; +} + +/* + * Cleanup hw semaphore and usecnt registers + */ +static void +bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) +{ + + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_usage_reg); + readl(ioc->ioc_regs.ioc_usage_sem_reg); + writel(1, ioc->ioc_regs.ioc_usage_sem_reg); + + writel(0, ioc->ioc_regs.ioc_fail_sync); + /* + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + writel(1, ioc->ioc_regs.ioc_sem_reg); +} + +static bfa_boolean_t +bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc) +{ + uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); + uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + + /* + * Driver load time. If the sync required bit for this PCI fn + * is set, it is due to an unclean exit by the driver for this + * PCI fn in the previous incarnation. Whoever comes here first + * should clean it up, no matter which PCI fn. + */ + + if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { + writel(0, ioc->ioc_regs.ioc_fail_sync); + writel(1, ioc->ioc_regs.ioc_usage_reg); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + return BFA_TRUE; + } + + return bfa_ioc_ct_sync_complete(ioc); +} + +/* + * Synchronized IOC failure processing routines + */ +static void +bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc) +{ + uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); + uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); + + writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc) +{ + uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); + uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | + bfa_ioc_ct_sync_pos(ioc); + + writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc) +{ + uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); + + writel((r32 | bfa_ioc_ct_sync_pos(ioc)), + ioc->ioc_regs.ioc_fail_sync); +} + +static bfa_boolean_t +bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc) +{ + uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); + uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); + uint32_t tmp_ackd; + + if (sync_ackd == 0) + return BFA_TRUE; + + /* + * The check below is to see whether any other PCI fn + * has reinitialized the ASIC (reset sync_ackd bits) + * and failed again while this IOC was waiting for hw + * semaphore (in bfa_iocpf_sm_semwait()). + */ + tmp_ackd = sync_ackd; + if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && + !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) + sync_ackd |= bfa_ioc_ct_sync_pos(ioc); + + if (sync_reqd == sync_ackd) { + writel(bfa_ioc_ct_clear_sync_ackd(r32), + ioc->ioc_regs.ioc_fail_sync); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); + return BFA_TRUE; + } + + /* + * If another PCI fn reinitialized and failed again while + * this IOC was waiting for hw sem, the sync_ackd bit for + * this IOC need to be set again to allow reinitialization. + */ + if (tmp_ackd != sync_ackd) + writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); + + return BFA_FALSE; +} + +/* + * Called from bfa_ioc_attach() to map asic specific calls. + */ +static void +bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif) +{ + hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock; + hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; + hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail; + hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset; + hwif->ioc_sync_start = bfa_ioc_ct_sync_start; + hwif->ioc_sync_join = bfa_ioc_ct_sync_join; + hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; + hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; + hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; + hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate; + hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate; + hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate; + hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate; +} + +/* + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) +{ + bfa_ioc_set_ctx_hwif(ioc, &hwif_ct); + + hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; + hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; + hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; + hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; + ioc->ioc_hwif = &hwif_ct; +} + +/* + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc) +{ + bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2); + + hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init; + hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init; + hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port; + hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat; + hwif_ct2.ioc_isr_mode_set = NULL; + ioc->ioc_hwif = &hwif_ct2; +} + +/* + * Workaround for MSI-X resource allocation for catapult-2 with no asic block + */ +#define HOSTFN_MSIX_DEFAULT 64 +#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 +#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c +#define __MSIX_VT_NUMVT__MK 0x003ff800 +#define __MSIX_VT_NUMVT__SH 11 +#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) +#define __MSIX_VT_OFST_ 0x000007ff +void +bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); + if (r32 & __MSIX_VT_NUMVT__MK) { + writel(r32 & __MSIX_VT_OFST_, + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); + return; + } + + writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | + HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_OFST_NUMVT); + writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); +} + +bfa_status_t +bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode) +{ + u32 pll_sclk, pll_fclk, r32; + bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC); + + pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | + __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | + __APP_PLL_SCLK_JITLMT0_1(3U) | + __APP_PLL_SCLK_CNTLMT0_1(1U); + pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | + __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | + __APP_PLL_LCLK_JITLMT0_1(3U) | + __APP_PLL_LCLK_CNTLMT0_1(1U); + + if (fcmode) { + writel(0, (rb + OP_MODE)); + writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | + __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); + } else { + writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); + writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); + } + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); + writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET | + __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET | + __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); + readl(rb + HOSTFN0_INT_MSK); + udelay(2000); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); + + if (!fcmode) { + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); + } + r32 = readl((rb + PSS_CTL_REG)); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + if (!fcmode) { + writel(0, (rb + PMM_1T_RESET_REG_P0)); + writel(0, (rb + PMM_1T_RESET_REG_P1)); + } + + writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); + udelay(1000); + r32 = readl((rb + MBIST_STAT_REG)); + writel(0, (rb + MBIST_CTL_REG)); + return BFA_STATUS_OK; +} + +static void +bfa_ioc_ct2_sclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put s_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); + r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | + __APP_PLL_SCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * Ignore mode and program for the max clock (which is FC16) + * Firmware/NFC will do the PLL init appropiately + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * while doing PLL init dont clock gate ethernet subsystem + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); + + r32 = readl((rb + CT2_PCIE_MISC_REG)); + writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); + + /* + * set sclk value + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | + __APP_PLL_SCLK_CLK_DIV2); + writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); +} + +static void +bfa_ioc_ct2_lclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put l_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); + r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | + __APP_PLL_LCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set LPU speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32, (rb + CT2_CHIP_MISC_PRG)); + + /* + * set LPU half speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set lclk for mode (set for FC16) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); + r32 |= 0x20c1731b; + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); +} + +static void +bfa_ioc_ct2_mem_init(void __iomem *rb) +{ + u32 r32; + + r32 = readl((rb + PSS_CTL_REG)); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + + writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); + udelay(1000); + writel(0, (rb + CT2_MBIST_CTL_REG)); +} + +static void +bfa_ioc_ct2_mac_reset(void __iomem *rb) +{ + /* put port0, port1 MAC & AHB in reset */ + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + rb + CT2_CSI_MAC_CONTROL_REG(0)); + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + rb + CT2_CSI_MAC_CONTROL_REG(1)); +} + +static void +bfa_ioc_ct2_enable_flash(void __iomem *rb) +{ + u32 r32; + + r32 = readl((rb + PSS_GPIO_OUT_REG)); + writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); + r32 = readl((rb + PSS_GPIO_OE_REG)); + writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); +} + +#define CT2_NFC_MAX_DELAY 1000 +#define CT2_NFC_PAUSE_MAX_DELAY 4000 +#define CT2_NFC_VER_VALID 0x147 +#define CT2_NFC_STATE_RUNNING 0x20000001 +#define BFA_IOC_PLL_POLL 1000000 + +static bfa_boolean_t +bfa_ioc_ct2_nfc_halted(void __iomem *rb) +{ + u32 r32; + + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + return BFA_TRUE; + + return BFA_FALSE; +} + +static void +bfa_ioc_ct2_nfc_halt(void __iomem *rb) +{ + int i; + + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + if (bfa_ioc_ct2_nfc_halted(rb)) + break; + udelay(1000); + } + WARN_ON(!bfa_ioc_ct2_nfc_halted(rb)); +} + +static void +bfa_ioc_ct2_nfc_resume(void __iomem *rb) +{ + u32 r32; + int i; + + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (!(r32 & __NFC_CONTROLLER_HALTED)) + return; + udelay(1000); + } + WARN_ON(1); +} + +static void +bfa_ioc_ct2_clk_reset(void __iomem *rb) +{ + u32 r32; + + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_LCLK_CTL_REG)); + +} + +static void +bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb) +{ + u32 r32, i; + + r32 = readl((rb + PSS_CTL_REG)); + r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); + writel(r32, (rb + PSS_CTL_REG)); + + writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_NFC_FLASH_STS_REG); + + if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) + break; + } + WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_NFC_FLASH_STS_REG); + + if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) + break; + } + WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); + + r32 = readl(rb + CT2_CSI_FW_CTL_REG); + WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); +} + +static void +bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb) +{ + u32 r32; + int i; + + if (bfa_ioc_ct2_nfc_halted(rb)) + bfa_ioc_ct2_nfc_resume(rb); + for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_STS_REG); + if (r32 == CT2_NFC_STATE_RUNNING) + return; + udelay(1000); + } + + r32 = readl(rb + CT2_NFC_STS_REG); + WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING)); +} + +bfa_status_t +bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) +{ + u32 wgn, r32, nfc_ver; + + wgn = readl(rb + CT2_WGN_STATUS); + + if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { + /* + * If flash is corrupted, enable flash explicitly + */ + bfa_ioc_ct2_clk_reset(rb); + bfa_ioc_ct2_enable_flash(rb); + + bfa_ioc_ct2_mac_reset(rb); + + bfa_ioc_ct2_clk_reset(rb); + bfa_ioc_ct2_enable_flash(rb); + + } else { + nfc_ver = readl(rb + CT2_RSC_GPR15_REG); + + if ((nfc_ver >= CT2_NFC_VER_VALID) && + (wgn == (__A2T_AHB_LOAD | __WGN_READY))) { + + bfa_ioc_ct2_wait_till_nfc_running(rb); + + bfa_ioc_ct2_nfc_clk_reset(rb); + } else { + bfa_ioc_ct2_nfc_halt(rb); + + bfa_ioc_ct2_clk_reset(rb); + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_clk_reset(rb); + + } + } + /* + * The very first PCIe DMA Read done by LPU fails with a fatal error, + * when Address Translation Cache (ATC) has been enabled by system BIOS. + * + * Workaround: + * Disable Invalidated Tag Match Enable capability by setting the bit 26 + * of CHIP_MISC_PRG to 0, by default it is set to 1. + */ + r32 = readl(rb + CT2_CHIP_MISC_PRG); + writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG)); + + /* + * Mask the interrupts and clear any + * pending interrupts left by BIOS/EFI + */ + + writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); + writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); + + /* For first time initialization, no need to clear interrupts */ + r32 = readl(rb + HOST_SEM5_REG); + if (r32 & 0x1) { + r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + } + r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + } + } + + bfa_ioc_ct2_mem_init(rb); + + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); + + return BFA_STATUS_OK; +} + +static void +bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate); +} diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h new file mode 100644 index 000000000..578e7678b --- /dev/null +++ b/drivers/scsi/bfa/bfa_modules.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * bfa_modules.h BFA modules + */ + +#ifndef __BFA_MODULES_H__ +#define __BFA_MODULES_H__ + +#include "bfa_cs.h" +#include "bfa.h" +#include "bfa_svc.h" +#include "bfa_fcpim.h" +#include "bfa_port.h" + +struct bfa_modules_s { + struct bfa_fcdiag_s fcdiag; /* fcdiag module */ + struct bfa_fcport_s fcport; /* fc port module */ + struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ + struct bfa_lps_mod_s lps_mod; /* fcxp module */ + struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ + struct bfa_rport_mod_s rport_mod; /* remote port module */ + struct bfa_fcp_mod_s fcp_mod; /* FCP initiator module */ + struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */ + struct bfa_port_s port; /* Physical port module */ + struct bfa_ablk_s ablk; /* ASIC block config module */ + struct bfa_cee_s cee; /* CEE Module */ + struct bfa_sfp_s sfp; /* SFP module */ + struct bfa_flash_s flash; /* flash module */ + struct bfa_diag_s diag_mod; /* diagnostics module */ + struct bfa_phy_s phy; /* phy module */ + struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */ + struct bfa_fru_s fru; /* fru module */ +}; + +/* + * !!! Only append to the enums defined here to avoid any versioning + * !!! needed between trace utility and driver version + */ +enum { + BFA_TRC_HAL_CORE = 1, + BFA_TRC_HAL_FCXP = 2, + BFA_TRC_HAL_FCPIM = 3, + BFA_TRC_HAL_IOCFC_CT = 4, + BFA_TRC_HAL_IOCFC_CB = 5, +}; + +#define BFA_CACHELINE_SZ (256) + +struct bfa_s { + void *bfad; /* BFA driver instance */ + struct bfa_plog_s *plog; /* portlog buffer */ + struct bfa_trc_mod_s *trcmod; /* driver tracing */ + struct bfa_ioc_s ioc; /* IOC module */ + struct bfa_iocfc_s iocfc; /* IOCFC module */ + struct bfa_timer_mod_s timer_mod; /* timer module */ + struct bfa_modules_s modules; /* BFA modules */ + struct list_head comp_q; /* pending completions */ + bfa_boolean_t queue_process; /* queue processing enabled */ + struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; + bfa_boolean_t fcs; /* FCS is attached to BFA */ + struct bfa_msix_s msix; + int bfa_aen_seq; + bfa_boolean_t intr_enabled; /* Status of interrupts */ +}; + +extern bfa_boolean_t bfa_auto_recover; + +void bfa_dconf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *); +void bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_dconf_iocdisable(struct bfa_s *); +void bfa_fcp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_fcp_iocdisable(struct bfa_s *bfa); +void bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *); +void bfa_fcport_start(struct bfa_s *); +void bfa_fcport_iocdisable(struct bfa_s *); +void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_fcport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_fcxp_iocdisable(struct bfa_s *); +void bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_fcxp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_fcdiag_iocdisable(struct bfa_s *); +void bfa_fcdiag_attach(struct bfa_s *bfa, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_ioim_lm_init(struct bfa_s *); +void bfa_lps_iocdisable(struct bfa_s *bfa); +void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_lps_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_rport_iocdisable(struct bfa_s *bfa); +void bfa_rport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_rport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_uf_iocdisable(struct bfa_s *); +void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_uf_start(struct bfa_s *); + +#endif /* __BFA_MODULES_H__ */ diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h new file mode 100644 index 000000000..0ed67339e --- /dev/null +++ b/drivers/scsi/bfa/bfa_plog.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ +#ifndef __BFA_PORTLOG_H__ +#define __BFA_PORTLOG_H__ + +#include "bfa_fc.h" +#include "bfa_defs.h" + +#define BFA_PL_NLOG_ENTS 256 +#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS) + +#define BFA_PL_STRING_LOG_SZ 32 /* number of chars in string log */ +#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */ + +enum bfa_plog_log_type { + BFA_PL_LOG_TYPE_INVALID = 0, + BFA_PL_LOG_TYPE_INT = 1, + BFA_PL_LOG_TYPE_STRING = 2, +}; + +/* + * the (fixed size) record format for each entry in the portlog + */ +struct bfa_plog_rec_s { + u64 tv; /* timestamp */ + u8 port; /* Source port that logged this entry */ + u8 mid; /* module id */ + u8 eid; /* indicates Rx, Tx, IOCTL, etc. bfa_plog_eid */ + u8 log_type; /* string/integer log, bfa_plog_log_type_t */ + u8 log_num_ints; + /* + * interpreted only if log_type is INT_LOG. indicates number of + * integers in the int_log[] (0-PL_INT_LOG_SZ). + */ + u8 rsvd; + u16 misc; /* can be used to indicate fc frame length */ + union { + char string_log[BFA_PL_STRING_LOG_SZ]; + u32 int_log[BFA_PL_INT_LOG_SZ]; + } log_entry; + +}; + +/* + * the following #defines will be used by the logging entities to indicate + * their module id. BFAL will convert the integer value to string format + * +* process to be used while changing the following #defines: + * - Always add new entries at the end + * - define corresponding string in BFAL + * - Do not remove any entry or rearrange the order. + */ +enum bfa_plog_mid { + BFA_PL_MID_INVALID = 0, + BFA_PL_MID_DEBUG = 1, + BFA_PL_MID_DRVR = 2, + BFA_PL_MID_HAL = 3, + BFA_PL_MID_HAL_FCXP = 4, + BFA_PL_MID_HAL_UF = 5, + BFA_PL_MID_FCS = 6, + BFA_PL_MID_LPS = 7, + BFA_PL_MID_MAX = 8 +}; + +#define BFA_PL_MID_STRLEN 8 +struct bfa_plog_mid_strings_s { + char m_str[BFA_PL_MID_STRLEN]; +}; + +/* + * the following #defines will be used by the logging entities to indicate + * their event type. BFAL will convert the integer value to string format + * +* process to be used while changing the following #defines: + * - Always add new entries at the end + * - define corresponding string in BFAL + * - Do not remove any entry or rearrange the order. + */ +enum bfa_plog_eid { + BFA_PL_EID_INVALID = 0, + BFA_PL_EID_IOC_DISABLE = 1, + BFA_PL_EID_IOC_ENABLE = 2, + BFA_PL_EID_PORT_DISABLE = 3, + BFA_PL_EID_PORT_ENABLE = 4, + BFA_PL_EID_PORT_ST_CHANGE = 5, + BFA_PL_EID_TX = 6, + BFA_PL_EID_TX_ACK1 = 7, + BFA_PL_EID_TX_RJT = 8, + BFA_PL_EID_TX_BSY = 9, + BFA_PL_EID_RX = 10, + BFA_PL_EID_RX_ACK1 = 11, + BFA_PL_EID_RX_RJT = 12, + BFA_PL_EID_RX_BSY = 13, + BFA_PL_EID_CT_IN = 14, + BFA_PL_EID_CT_OUT = 15, + BFA_PL_EID_DRIVER_START = 16, + BFA_PL_EID_RSCN = 17, + BFA_PL_EID_DEBUG = 18, + BFA_PL_EID_MISC = 19, + BFA_PL_EID_FIP_FCF_DISC = 20, + BFA_PL_EID_FIP_FCF_CVL = 21, + BFA_PL_EID_LOGIN = 22, + BFA_PL_EID_LOGO = 23, + BFA_PL_EID_TRUNK_SCN = 24, + BFA_PL_EID_MAX +}; + +#define BFA_PL_ENAME_STRLEN 8 +struct bfa_plog_eid_strings_s { + char e_str[BFA_PL_ENAME_STRLEN]; +}; + +#define BFA_PL_SIG_LEN 8 +#define BFA_PL_SIG_STR "12pl123" + +/* + * per port circular log buffer + */ +struct bfa_plog_s { + char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */ + u8 plog_enabled; + u8 rsvd[7]; + u32 ticks; + u16 head; + u16 tail; + struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS]; +}; + +void bfa_plog_init(struct bfa_plog_s *plog); +void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, char *log_str); +void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, + u32 *intarr, u32 num_ints); +void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr); +void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, + struct fchs_s *fchdr, u32 pld_w0); + +#endif /* __BFA_PORTLOG_H__ */ diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c new file mode 100644 index 000000000..cfe2c9c33 --- /dev/null +++ b/drivers/scsi/bfa/bfa_port.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfa_defs_svc.h" +#include "bfa_port.h" +#include "bfi.h" +#include "bfa_ioc.h" + + +BFA_TRC_FILE(CNA, PORT); + +static void +bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) +{ + u32 *dip = (u32 *) stats; + __be32 t0, t1; + int i; + + for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32); + i += 2) { + t0 = dip[i]; + t1 = dip[i + 1]; +#ifdef __BIG_ENDIAN + dip[i] = be32_to_cpu(t0); + dip[i + 1] = be32_to_cpu(t1); +#else + dip[i] = be32_to_cpu(t1); + dip[i + 1] = be32_to_cpu(t0); +#endif + } +} + +/* + * bfa_port_enable_isr() + * + * + * @param[in] port - Pointer to the port module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status) +{ + bfa_trc(port, status); + port->endis_pending = BFA_FALSE; + port->endis_cbfn(port->endis_cbarg, status); +} + +/* + * bfa_port_disable_isr() + * + * + * @param[in] port - Pointer to the port module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status) +{ + bfa_trc(port, status); + port->endis_pending = BFA_FALSE; + port->endis_cbfn(port->endis_cbarg, status); +} + +/* + * bfa_port_get_stats_isr() + * + * + * @param[in] port - Pointer to the Port module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status) +{ + port->stats_status = status; + port->stats_busy = BFA_FALSE; + + if (status == BFA_STATUS_OK) { + memcpy(port->stats, port->stats_dma.kva, + sizeof(union bfa_port_stats_u)); + bfa_port_stats_swap(port, port->stats); + + port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time; + } + + if (port->stats_cbfn) { + port->stats_cbfn(port->stats_cbarg, status); + port->stats_cbfn = NULL; + } +} + +/* + * bfa_port_clear_stats_isr() + * + * + * @param[in] port - Pointer to the Port module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) +{ + port->stats_status = status; + port->stats_busy = BFA_FALSE; + + /* + * re-initialize time stamp for stats reset + */ + port->stats_reset_time = ktime_get_seconds(); + + if (port->stats_cbfn) { + port->stats_cbfn(port->stats_cbarg, status); + port->stats_cbfn = NULL; + } +} + +/* + * bfa_port_isr() + * + * + * @param[in] Pointer to the Port module data structure. + * + * @return void + */ +static void +bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) +{ + struct bfa_port_s *port = (struct bfa_port_s *) cbarg; + union bfi_port_i2h_msg_u *i2hmsg; + + i2hmsg = (union bfi_port_i2h_msg_u *) m; + bfa_trc(port, m->mh.msg_id); + + switch (m->mh.msg_id) { + case BFI_PORT_I2H_ENABLE_RSP: + if (port->endis_pending == BFA_FALSE) + break; + bfa_port_enable_isr(port, i2hmsg->enable_rsp.status); + break; + + case BFI_PORT_I2H_DISABLE_RSP: + if (port->endis_pending == BFA_FALSE) + break; + bfa_port_disable_isr(port, i2hmsg->disable_rsp.status); + break; + + case BFI_PORT_I2H_GET_STATS_RSP: + /* Stats busy flag is still set? (may be cmd timed out) */ + if (port->stats_busy == BFA_FALSE) + break; + bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status); + break; + + case BFI_PORT_I2H_CLEAR_STATS_RSP: + if (port->stats_busy == BFA_FALSE) + break; + bfa_port_clear_stats_isr(port, i2hmsg->clearstats_rsp.status); + break; + + default: + WARN_ON(1); + } +} + +/* + * bfa_port_meminfo() + * + * + * @param[in] void + * + * @return Size of DMA region + */ +u32 +bfa_port_meminfo(void) +{ + return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); +} + +/* + * bfa_port_mem_claim() + * + * + * @param[in] port Port module pointer + * dma_kva Kernel Virtual Address of Port DMA Memory + * dma_pa Physical Address of Port DMA Memory + * + * @return void + */ +void +bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa) +{ + port->stats_dma.kva = dma_kva; + port->stats_dma.pa = dma_pa; +} + +/* + * bfa_port_enable() + * + * Send the Port enable request to the f/w + * + * @param[in] Pointer to the Port module data structure. + * + * @return Status + */ +bfa_status_t +bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, + void *cbarg) +{ + struct bfi_port_generic_req_s *m; + + /* If port is PBC disabled, return error */ + if (port->pbc_disabled) { + bfa_trc(port, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + if (bfa_ioc_is_disabled(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_DISABLED); + return BFA_STATUS_IOC_DISABLED; + } + + if (!bfa_ioc_is_operational(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + /* if port is d-port enabled, return error */ + if (port->dport_enabled) { + bfa_trc(port, BFA_STATUS_DPORT_ERR); + return BFA_STATUS_DPORT_ERR; + } + + if (port->endis_pending) { + bfa_trc(port, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + m = (struct bfi_port_generic_req_s *) port->endis_mb.msg; + + port->msgtag++; + port->endis_cbfn = cbfn; + port->endis_cbarg = cbarg; + port->endis_pending = BFA_TRUE; + + bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ, + bfa_ioc_portid(port->ioc)); + bfa_ioc_mbox_queue(port->ioc, &port->endis_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_port_disable() + * + * Send the Port disable request to the f/w + * + * @param[in] Pointer to the Port module data structure. + * + * @return Status + */ +bfa_status_t +bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, + void *cbarg) +{ + struct bfi_port_generic_req_s *m; + + /* If port is PBC disabled, return error */ + if (port->pbc_disabled) { + bfa_trc(port, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + if (bfa_ioc_is_disabled(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_DISABLED); + return BFA_STATUS_IOC_DISABLED; + } + + if (!bfa_ioc_is_operational(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + /* if port is d-port enabled, return error */ + if (port->dport_enabled) { + bfa_trc(port, BFA_STATUS_DPORT_ERR); + return BFA_STATUS_DPORT_ERR; + } + + if (port->endis_pending) { + bfa_trc(port, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + m = (struct bfi_port_generic_req_s *) port->endis_mb.msg; + + port->msgtag++; + port->endis_cbfn = cbfn; + port->endis_cbarg = cbarg; + port->endis_pending = BFA_TRUE; + + bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ, + bfa_ioc_portid(port->ioc)); + bfa_ioc_mbox_queue(port->ioc, &port->endis_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_port_get_stats() + * + * Send the request to the f/w to fetch Port statistics. + * + * @param[in] Pointer to the Port module data structure. + * + * @return Status + */ +bfa_status_t +bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats, + bfa_port_stats_cbfn_t cbfn, void *cbarg) +{ + struct bfi_port_get_stats_req_s *m; + + if (!bfa_ioc_is_operational(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (port->stats_busy) { + bfa_trc(port, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg; + + port->stats = stats; + port->stats_cbfn = cbfn; + port->stats_cbarg = cbarg; + port->stats_busy = BFA_TRUE; + bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa); + + bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ, + bfa_ioc_portid(port->ioc)); + bfa_ioc_mbox_queue(port->ioc, &port->stats_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_port_clear_stats() + * + * + * @param[in] Pointer to the Port module data structure. + * + * @return Status + */ +bfa_status_t +bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, + void *cbarg) +{ + struct bfi_port_generic_req_s *m; + + if (!bfa_ioc_is_operational(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (port->stats_busy) { + bfa_trc(port, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + m = (struct bfi_port_generic_req_s *) port->stats_mb.msg; + + port->stats_cbfn = cbfn; + port->stats_cbarg = cbarg; + port->stats_busy = BFA_TRUE; + + bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ, + bfa_ioc_portid(port->ioc)); + bfa_ioc_mbox_queue(port->ioc, &port->stats_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_port_notify() + * + * Port module IOC event handler + * + * @param[in] Pointer to the Port module data structure. + * @param[in] IOC event structure + * + * @return void + */ +void +bfa_port_notify(void *arg, enum bfa_ioc_event_e event) +{ + struct bfa_port_s *port = (struct bfa_port_s *) arg; + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + /* Fail any pending get_stats/clear_stats requests */ + if (port->stats_busy) { + if (port->stats_cbfn) + port->stats_cbfn(port->stats_cbarg, + BFA_STATUS_FAILED); + port->stats_cbfn = NULL; + port->stats_busy = BFA_FALSE; + } + + /* Clear any enable/disable is pending */ + if (port->endis_pending) { + if (port->endis_cbfn) + port->endis_cbfn(port->endis_cbarg, + BFA_STATUS_FAILED); + port->endis_cbfn = NULL; + port->endis_pending = BFA_FALSE; + } + + /* clear D-port mode */ + if (port->dport_enabled) + bfa_port_set_dportenabled(port, BFA_FALSE); + break; + default: + break; + } +} + +/* + * bfa_port_attach() + * + * + * @param[in] port - Pointer to the Port module data structure + * ioc - Pointer to the ioc module data structure + * dev - Pointer to the device driver module data structure + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. + * trcmod - + * + * @return void + */ +void +bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod) +{ + WARN_ON(!port); + + port->dev = dev; + port->ioc = ioc; + port->trcmod = trcmod; + + port->stats_busy = BFA_FALSE; + port->endis_pending = BFA_FALSE; + port->stats_cbfn = NULL; + port->endis_cbfn = NULL; + port->pbc_disabled = BFA_FALSE; + port->dport_enabled = BFA_FALSE; + + bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); + bfa_q_qe_init(&port->ioc_notify); + bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port); + list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q); + + /* + * initialize time stamp for stats reset + */ + port->stats_reset_time = ktime_get_seconds(); + + bfa_trc(port, 0); +} + +/* + * bfa_port_set_dportenabled(); + * + * Port module- set pbc disabled flag + * + * @param[in] port - Pointer to the Port module data structure + * + * @return void + */ +void +bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled) +{ + port->dport_enabled = enabled; +} + +/* + * CEE module specific definitions + */ + +/* + * bfa_cee_get_attr_isr() + * + * @brief CEE ISR for get-attributes responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status) +{ + struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote; + + cee->get_attr_status = status; + bfa_trc(cee, 0); + if (status == BFA_STATUS_OK) { + bfa_trc(cee, 0); + memcpy(cee->attr, cee->attr_dma.kva, + sizeof(struct bfa_cee_attr_s)); + lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live); + lldp_cfg->enabled_system_cap = + be16_to_cpu(lldp_cfg->enabled_system_cap); + } + cee->get_attr_pending = BFA_FALSE; + if (cee->cbfn.get_attr_cbfn) { + bfa_trc(cee, 0); + cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status); + } +} + +/* + * bfa_cee_get_stats_isr() + * + * @brief CEE ISR for get-stats responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) +{ + u32 *buffer; + int i; + + cee->get_stats_status = status; + bfa_trc(cee, 0); + if (status == BFA_STATUS_OK) { + bfa_trc(cee, 0); + memcpy(cee->stats, cee->stats_dma.kva, + sizeof(struct bfa_cee_stats_s)); + /* swap the cee stats */ + buffer = (u32 *)cee->stats; + for (i = 0; i < (sizeof(struct bfa_cee_stats_s) / + sizeof(u32)); i++) + buffer[i] = cpu_to_be32(buffer[i]); + } + cee->get_stats_pending = BFA_FALSE; + bfa_trc(cee, 0); + if (cee->cbfn.get_stats_cbfn) { + bfa_trc(cee, 0); + cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status); + } +} + +/* + * bfa_cee_reset_stats_isr() + * + * @brief CEE ISR for reset-stats responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) +{ + cee->reset_stats_status = status; + cee->reset_stats_pending = BFA_FALSE; + if (cee->cbfn.reset_stats_cbfn) + cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); +} + +/* + * bfa_cee_meminfo() + * + * @brief Returns the size of the DMA memory needed by CEE module + * + * @param[in] void + * + * @return Size of DMA region + */ +u32 +bfa_cee_meminfo(void) +{ + return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) + + BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ); +} + +/* + * bfa_cee_mem_claim() + * + * @brief Initialized CEE DMA Memory + * + * @param[in] cee CEE module pointer + * dma_kva Kernel Virtual Address of CEE DMA Memory + * dma_pa Physical Address of CEE DMA Memory + * + * @return void + */ +void +bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa) +{ + cee->attr_dma.kva = dma_kva; + cee->attr_dma.pa = dma_pa; + cee->stats_dma.kva = dma_kva + BFA_ROUNDUP( + sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ); + cee->stats_dma.pa = dma_pa + BFA_ROUNDUP( + sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ); + cee->attr = (struct bfa_cee_attr_s *) dma_kva; + cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP( + sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ)); +} + +/* + * bfa_cee_get_attr() + * + * @brief + * Send the request to the f/w to fetch CEE attributes. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return Status + */ + +bfa_status_t +bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_get_req_s *cmd; + + WARN_ON((cee == NULL) || (cee->ioc == NULL)); + bfa_trc(cee, 0); + if (!bfa_ioc_is_operational(cee->ioc)) { + bfa_trc(cee, 0); + return BFA_STATUS_IOC_FAILURE; + } + if (cee->get_attr_pending == BFA_TRUE) { + bfa_trc(cee, 0); + return BFA_STATUS_DEVBUSY; + } + cee->get_attr_pending = BFA_TRUE; + cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg; + cee->attr = attr; + cee->cbfn.get_attr_cbfn = cbfn; + cee->cbfn.get_attr_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, + bfa_ioc_portid(cee->ioc)); + bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); + bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_cee_get_stats() + * + * @brief + * Send the request to the f/w to fetch CEE statistics. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return Status + */ + +bfa_status_t +bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats, + bfa_cee_get_stats_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_get_req_s *cmd; + + WARN_ON((cee == NULL) || (cee->ioc == NULL)); + + if (!bfa_ioc_is_operational(cee->ioc)) { + bfa_trc(cee, 0); + return BFA_STATUS_IOC_FAILURE; + } + if (cee->get_stats_pending == BFA_TRUE) { + bfa_trc(cee, 0); + return BFA_STATUS_DEVBUSY; + } + cee->get_stats_pending = BFA_TRUE; + cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg; + cee->stats = stats; + cee->cbfn.get_stats_cbfn = cbfn; + cee->cbfn.get_stats_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ, + bfa_ioc_portid(cee->ioc)); + bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa); + bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_cee_reset_stats() + * + * @brief Clears CEE Stats in the f/w. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return Status + */ + +bfa_status_t +bfa_cee_reset_stats(struct bfa_cee_s *cee, + bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_reset_stats_s *cmd; + + WARN_ON((cee == NULL) || (cee->ioc == NULL)); + if (!bfa_ioc_is_operational(cee->ioc)) { + bfa_trc(cee, 0); + return BFA_STATUS_IOC_FAILURE; + } + if (cee->reset_stats_pending == BFA_TRUE) { + bfa_trc(cee, 0); + return BFA_STATUS_DEVBUSY; + } + cee->reset_stats_pending = BFA_TRUE; + cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg; + cee->cbfn.reset_stats_cbfn = cbfn; + cee->cbfn.reset_stats_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS, + bfa_ioc_portid(cee->ioc)); + bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_cee_isrs() + * + * @brief Handles Mail-box interrupts for CEE module. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return void + */ + +static void +bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m) +{ + union bfi_cee_i2h_msg_u *msg; + struct bfi_cee_get_rsp_s *get_rsp; + struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg; + msg = (union bfi_cee_i2h_msg_u *) m; + get_rsp = (struct bfi_cee_get_rsp_s *) m; + bfa_trc(cee, msg->mh.msg_id); + switch (msg->mh.msg_id) { + case BFI_CEE_I2H_GET_CFG_RSP: + bfa_trc(cee, get_rsp->cmd_status); + bfa_cee_get_attr_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_GET_STATS_RSP: + bfa_cee_get_stats_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_RESET_STATS_RSP: + bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status); + break; + default: + WARN_ON(1); + } +} + +/* + * bfa_cee_notify() + * + * @brief CEE module IOC event handler. + * + * @param[in] Pointer to the CEE module data structure. + * @param[in] IOC event type + * + * @return void + */ + +static void +bfa_cee_notify(void *arg, enum bfa_ioc_event_e event) +{ + struct bfa_cee_s *cee = (struct bfa_cee_s *) arg; + + bfa_trc(cee, event); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (cee->get_attr_pending == BFA_TRUE) { + cee->get_attr_status = BFA_STATUS_FAILED; + cee->get_attr_pending = BFA_FALSE; + if (cee->cbfn.get_attr_cbfn) { + cee->cbfn.get_attr_cbfn( + cee->cbfn.get_attr_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->get_stats_pending == BFA_TRUE) { + cee->get_stats_status = BFA_STATUS_FAILED; + cee->get_stats_pending = BFA_FALSE; + if (cee->cbfn.get_stats_cbfn) { + cee->cbfn.get_stats_cbfn( + cee->cbfn.get_stats_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->reset_stats_pending == BFA_TRUE) { + cee->reset_stats_status = BFA_STATUS_FAILED; + cee->reset_stats_pending = BFA_FALSE; + if (cee->cbfn.reset_stats_cbfn) { + cee->cbfn.reset_stats_cbfn( + cee->cbfn.reset_stats_cbarg, + BFA_STATUS_FAILED); + } + } + break; + + default: + break; + } +} + +/* + * bfa_cee_attach() + * + * @brief CEE module-attach API + * + * @param[in] cee - Pointer to the CEE module data structure + * ioc - Pointer to the ioc module data structure + * dev - Pointer to the device driver module data structure + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. + * + * @return void + */ +void +bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, + void *dev) +{ + WARN_ON(cee == NULL); + cee->dev = dev; + cee->ioc = ioc; + + bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); + bfa_q_qe_init(&cee->ioc_notify); + bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee); + list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q); +} diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h new file mode 100644 index 000000000..7e569d4b5 --- /dev/null +++ b/drivers/scsi/bfa/bfa_port.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_PORT_H__ +#define __BFA_PORT_H__ + +#include "bfa_defs_svc.h" +#include "bfa_ioc.h" +#include "bfa_cs.h" + +typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status); +typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status); + +struct bfa_port_s { + void *dev; + struct bfa_ioc_s *ioc; + struct bfa_trc_mod_s *trcmod; + u32 msgtag; + bfa_boolean_t stats_busy; + struct bfa_mbox_cmd_s stats_mb; + bfa_port_stats_cbfn_t stats_cbfn; + void *stats_cbarg; + bfa_status_t stats_status; + time64_t stats_reset_time; + union bfa_port_stats_u *stats; + struct bfa_dma_s stats_dma; + bfa_boolean_t endis_pending; + struct bfa_mbox_cmd_s endis_mb; + bfa_port_endis_cbfn_t endis_cbfn; + void *endis_cbarg; + bfa_status_t endis_status; + struct bfa_ioc_notify_s ioc_notify; + bfa_boolean_t pbc_disabled; + bfa_boolean_t dport_enabled; + struct bfa_mem_dma_s port_dma; +}; + +#define BFA_MEM_PORT_DMA(__bfa) (&((__bfa)->modules.port.port_dma)) + +void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod); +void bfa_port_notify(void *arg, enum bfa_ioc_event_e event); + +bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, + union bfa_port_stats_u *stats, + bfa_port_stats_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port, + bfa_port_stats_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_port_enable(struct bfa_port_s *port, + bfa_port_endis_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_port_disable(struct bfa_port_s *port, + bfa_port_endis_cbfn_t cbfn, void *cbarg); +u32 bfa_port_meminfo(void); +void bfa_port_mem_claim(struct bfa_port_s *port, + u8 *dma_kva, u64 dma_pa); +void bfa_port_set_dportenabled(struct bfa_port_s *port, + bfa_boolean_t enabled); + +/* + * CEE declaration + */ +typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status); +typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status); +typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status); + +struct bfa_cee_cbfn_s { + bfa_cee_get_attr_cbfn_t get_attr_cbfn; + void *get_attr_cbarg; + bfa_cee_get_stats_cbfn_t get_stats_cbfn; + void *get_stats_cbarg; + bfa_cee_reset_stats_cbfn_t reset_stats_cbfn; + void *reset_stats_cbarg; +}; + +struct bfa_cee_s { + void *dev; + bfa_boolean_t get_attr_pending; + bfa_boolean_t get_stats_pending; + bfa_boolean_t reset_stats_pending; + bfa_status_t get_attr_status; + bfa_status_t get_stats_status; + bfa_status_t reset_stats_status; + struct bfa_cee_cbfn_s cbfn; + struct bfa_ioc_notify_s ioc_notify; + struct bfa_trc_mod_s *trcmod; + struct bfa_cee_attr_s *attr; + struct bfa_cee_stats_s *stats; + struct bfa_dma_s attr_dma; + struct bfa_dma_s stats_dma; + struct bfa_ioc_s *ioc; + struct bfa_mbox_cmd_s get_cfg_mb; + struct bfa_mbox_cmd_s get_stats_mb; + struct bfa_mbox_cmd_s reset_stats_mb; + struct bfa_mem_dma_s cee_dma; +}; + +#define BFA_MEM_CEE_DMA(__bfa) (&((__bfa)->modules.cee.cee_dma)) + +u32 bfa_cee_meminfo(void); +void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa); +void bfa_cee_attach(struct bfa_cee_s *cee, + struct bfa_ioc_s *ioc, void *dev); +bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee, + struct bfa_cee_attr_s *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee, + struct bfa_cee_stats_s *stats, + bfa_cee_get_stats_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee, + bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg); + +#endif /* __BFA_PORT_H__ */ diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c new file mode 100644 index 000000000..c9745c0b4 --- /dev/null +++ b/drivers/scsi/bfa/bfa_svc.c @@ -0,0 +1,6897 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfa_plog.h" +#include "bfa_cs.h" +#include "bfa_modules.h" + +BFA_TRC_FILE(HAL, FCXP); + +/* + * LPS related definitions + */ +#define BFA_LPS_MIN_LPORTS (1) +#define BFA_LPS_MAX_LPORTS (256) + +/* + * Maximum Vports supported per physical port or vf. + */ +#define BFA_LPS_MAX_VPORTS_SUPP_CB 255 +#define BFA_LPS_MAX_VPORTS_SUPP_CT 190 + + +/* + * FC PORT related definitions + */ +/* + * The port is considered disabled if corresponding physical port or IOC are + * disabled explicitly + */ +#define BFA_PORT_IS_DISABLED(bfa) \ + ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ + (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) + +/* + * BFA port state machine events + */ +enum bfa_fcport_sm_event { + BFA_FCPORT_SM_START = 1, /* start port state machine */ + BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ + BFA_FCPORT_SM_ENABLE = 3, /* enable port */ + BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ + BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ + BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ + BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ + BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ + BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ + BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */ + BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */ + BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */ + BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */ + BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */ +}; + +/* + * BFA port link notification state machine events + */ + +enum bfa_fcport_ln_sm_event { + BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ + BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ + BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ +}; + +/* + * RPORT related definitions + */ +#define bfa_rport_offline_cb(__rp) do { \ + if ((__rp)->bfa->fcs) \ + bfa_cb_rport_offline((__rp)->rport_drv); \ + else { \ + bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ + __bfa_cb_rport_offline, (__rp)); \ + } \ +} while (0) + +#define bfa_rport_online_cb(__rp) do { \ + if ((__rp)->bfa->fcs) \ + bfa_cb_rport_online((__rp)->rport_drv); \ + else { \ + bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ + __bfa_cb_rport_online, (__rp)); \ + } \ +} while (0) + +/* + * forward declarations FCXP related functions + */ +static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); +static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, + struct bfi_fcxp_send_rsp_s *fcxp_rsp); +static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, + struct bfa_fcxp_s *fcxp, struct fchs_s *fchs); +static void bfa_fcxp_qresume(void *cbarg); +static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, + struct bfi_fcxp_send_req_s *send_req); + +/* + * forward declarations for LPS functions + */ +static void bfa_lps_login_rsp(struct bfa_s *bfa, + struct bfi_lps_login_rsp_s *rsp); +static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count); +static void bfa_lps_logout_rsp(struct bfa_s *bfa, + struct bfi_lps_logout_rsp_s *rsp); +static void bfa_lps_reqq_resume(void *lps_arg); +static void bfa_lps_free(struct bfa_lps_s *lps); +static void bfa_lps_send_login(struct bfa_lps_s *lps); +static void bfa_lps_send_logout(struct bfa_lps_s *lps); +static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps); +static void bfa_lps_login_comp(struct bfa_lps_s *lps); +static void bfa_lps_logout_comp(struct bfa_lps_s *lps); +static void bfa_lps_cvl_event(struct bfa_lps_s *lps); + +/* + * forward declaration for LPS state machine + */ +static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event + event); +static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, + enum bfa_lps_event event); +static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event + event); + +/* + * forward declaration for FC Port functions + */ +static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); +static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); +static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); +static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); +static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); +static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); +static void bfa_fcport_scn(struct bfa_fcport_s *fcport, + enum bfa_port_linkstate event, bfa_boolean_t trunk); +static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, + enum bfa_port_linkstate event); +static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); +static void bfa_fcport_stats_get_timeout(void *cbarg); +static void bfa_fcport_stats_clr_timeout(void *cbarg); +static void bfa_trunk_iocdisable(struct bfa_s *bfa); + +/* + * forward declaration for FC PORT state machine + */ +static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); + +static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); + +static struct bfa_sm_table_s hal_port_sm_table[] = { + {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT}, + {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING}, + {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN}, + {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP}, + {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING}, + {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED}, + {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, + {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, + {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, + {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT}, + {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT}, + {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG}, +}; + + +/* + * forward declaration for RPORT related functions + */ +static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); +static void bfa_rport_free(struct bfa_rport_s *rport); +static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp); +static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp); +static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp); +static void __bfa_cb_rport_online(void *cbarg, + bfa_boolean_t complete); +static void __bfa_cb_rport_offline(void *cbarg, + bfa_boolean_t complete); + +/* + * forward declaration for RPORT state machine + */ +static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_created(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_online(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_offline(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, + enum bfa_rport_event event); + +/* + * PLOG related definitions + */ +static int +plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec) +{ + if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && + (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING)) + return 1; + + if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && + (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ)) + return 1; + + return 0; +} + +static void +bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) +{ + u16 tail; + struct bfa_plog_rec_s *pl_recp; + + if (plog->plog_enabled == 0) + return; + + if (plkd_validate_logrec(pl_rec)) { + WARN_ON(1); + return; + } + + tail = plog->tail; + + pl_recp = &(plog->plog_recs[tail]); + + memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); + + pl_recp->tv = ktime_get_real_seconds(); + BFA_PL_LOG_REC_INCR(plog->tail); + + if (plog->head == plog->tail) + BFA_PL_LOG_REC_INCR(plog->head); +} + +void +bfa_plog_init(struct bfa_plog_s *plog) +{ + memset((char *)plog, 0, sizeof(struct bfa_plog_s)); + + memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); + plog->head = plog->tail = 0; + plog->plog_enabled = 1; +} + +void +bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, + u16 misc, char *log_str) +{ + struct bfa_plog_rec_s lp; + + if (plog->plog_enabled) { + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + lp.mid = mid; + lp.eid = event; + lp.log_type = BFA_PL_LOG_TYPE_STRING; + lp.misc = misc; + strscpy(lp.log_entry.string_log, log_str, + BFA_PL_STRING_LOG_SZ); + lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; + bfa_plog_add(plog, &lp); + } +} + +void +bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, + u16 misc, u32 *intarr, u32 num_ints) +{ + struct bfa_plog_rec_s lp; + u32 i; + + if (num_ints > BFA_PL_INT_LOG_SZ) + num_ints = BFA_PL_INT_LOG_SZ; + + if (plog->plog_enabled) { + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + lp.mid = mid; + lp.eid = event; + lp.log_type = BFA_PL_LOG_TYPE_INT; + lp.misc = misc; + + for (i = 0; i < num_ints; i++) + lp.log_entry.int_log[i] = intarr[i]; + + lp.log_num_ints = (u8) num_ints; + + bfa_plog_add(plog, &lp); + } +} + +void +bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, + u16 misc, struct fchs_s *fchdr) +{ + u32 *tmp_int = (u32 *) fchdr; + u32 ints[BFA_PL_INT_LOG_SZ]; + + if (plog->plog_enabled) { + ints[0] = tmp_int[0]; + ints[1] = tmp_int[1]; + ints[2] = tmp_int[4]; + + bfa_plog_intarr(plog, mid, event, misc, ints, 3); + } +} + +void +bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr, + u32 pld_w0) +{ + u32 *tmp_int = (u32 *) fchdr; + u32 ints[BFA_PL_INT_LOG_SZ]; + + if (plog->plog_enabled) { + ints[0] = tmp_int[0]; + ints[1] = tmp_int[1]; + ints[2] = tmp_int[4]; + ints[3] = pld_w0; + + bfa_plog_intarr(plog, mid, event, misc, ints, 4); + } +} + + +/* + * fcxp_pvt BFA FCXP private functions + */ + +static void +claim_fcxps_mem(struct bfa_fcxp_mod_s *mod) +{ + u16 i; + struct bfa_fcxp_s *fcxp; + + fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod); + memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); + + INIT_LIST_HEAD(&mod->fcxp_req_free_q); + INIT_LIST_HEAD(&mod->fcxp_rsp_free_q); + INIT_LIST_HEAD(&mod->fcxp_active_q); + INIT_LIST_HEAD(&mod->fcxp_req_unused_q); + INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q); + + mod->fcxp_list = fcxp; + + for (i = 0; i < mod->num_fcxps; i++) { + fcxp->fcxp_mod = mod; + fcxp->fcxp_tag = i; + + if (i < (mod->num_fcxps / 2)) { + list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q); + fcxp->req_rsp = BFA_TRUE; + } else { + list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q); + fcxp->req_rsp = BFA_FALSE; + } + + bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp); + fcxp->reqq_waiting = BFA_FALSE; + + fcxp = fcxp + 1; + } + + bfa_mem_kva_curp(mod) = (void *)fcxp; +} + +void +bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa); + struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_fcxp; + u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs; + u32 per_fcxp_sz; + + if (num_fcxps == 0) + return; + + if (cfg->drvcfg.min_cfg) + per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ; + else + per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ; + + /* dma memory */ + nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz); + per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz); + + bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) { + if (num_fcxps >= per_seg_fcxp) { + num_fcxps -= per_seg_fcxp; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_fcxp * per_fcxp_sz); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_fcxps * per_fcxp_sz); + } + + /* kva memory */ + bfa_mem_kva_setup(minfo, fcxp_kva, + cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s)); +} + +void +bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + mod->bfa = bfa; + mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; + + /* + * Initialize FCXP request and response payload sizes. + */ + mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; + if (!cfg->drvcfg.min_cfg) + mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ; + + INIT_LIST_HEAD(&mod->req_wait_q); + INIT_LIST_HEAD(&mod->rsp_wait_q); + + claim_fcxps_mem(mod); +} + +void +bfa_fcxp_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + struct bfa_fcxp_s *fcxp; + struct list_head *qe, *qen; + + /* Enqueue unused fcxp resources to free_q */ + list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q); + list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q); + + list_for_each_safe(qe, qen, &mod->fcxp_active_q) { + fcxp = (struct bfa_fcxp_s *) qe; + if (fcxp->caller == NULL) { + fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, + BFA_STATUS_IOC_FAILURE, 0, 0, NULL); + bfa_fcxp_free(fcxp); + } else { + fcxp->rsp_status = BFA_STATUS_IOC_FAILURE; + bfa_cb_queue(bfa, &fcxp->hcb_qe, + __bfa_fcxp_send_cbfn, fcxp); + } + } +} + +static struct bfa_fcxp_s * +bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req) +{ + struct bfa_fcxp_s *fcxp; + + if (req) + bfa_q_deq(&fm->fcxp_req_free_q, &fcxp); + else + bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp); + + if (fcxp) + list_add_tail(&fcxp->qe, &fm->fcxp_active_q); + + return fcxp; +} + +static void +bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp, + struct bfa_s *bfa, + u8 *use_ibuf, + u32 *nr_sgles, + bfa_fcxp_get_sgaddr_t *r_sga_cbfn, + bfa_fcxp_get_sglen_t *r_sglen_cbfn, + struct list_head *r_sgpg_q, + int n_sgles, + bfa_fcxp_get_sgaddr_t sga_cbfn, + bfa_fcxp_get_sglen_t sglen_cbfn) +{ + + WARN_ON(bfa == NULL); + + bfa_trc(bfa, fcxp->fcxp_tag); + + if (n_sgles == 0) { + *use_ibuf = 1; + } else { + WARN_ON(*sga_cbfn == NULL); + WARN_ON(*sglen_cbfn == NULL); + + *use_ibuf = 0; + *r_sga_cbfn = sga_cbfn; + *r_sglen_cbfn = sglen_cbfn; + + *nr_sgles = n_sgles; + + /* + * alloc required sgpgs + */ + if (n_sgles > BFI_SGE_INLINE) + WARN_ON(1); + } + +} + +static void +bfa_fcxp_init(struct bfa_fcxp_s *fcxp, + void *caller, struct bfa_s *bfa, int nreq_sgles, + int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, + bfa_fcxp_get_sglen_t req_sglen_cbfn, + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, + bfa_fcxp_get_sglen_t rsp_sglen_cbfn) +{ + + WARN_ON(bfa == NULL); + + bfa_trc(bfa, fcxp->fcxp_tag); + + fcxp->caller = caller; + + bfa_fcxp_init_reqrsp(fcxp, bfa, + &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn, + &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q, + nreq_sgles, req_sga_cbfn, req_sglen_cbfn); + + bfa_fcxp_init_reqrsp(fcxp, bfa, + &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn, + &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q, + nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn); + +} + +static void +bfa_fcxp_put(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + struct bfa_fcxp_wqe_s *wqe; + + if (fcxp->req_rsp) + bfa_q_deq(&mod->req_wait_q, &wqe); + else + bfa_q_deq(&mod->rsp_wait_q, &wqe); + + if (wqe) { + bfa_trc(mod->bfa, fcxp->fcxp_tag); + + bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, + wqe->nrsp_sgles, wqe->req_sga_cbfn, + wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, + wqe->rsp_sglen_cbfn); + + wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); + return; + } + + WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); + list_del(&fcxp->qe); + + if (fcxp->req_rsp) + list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q); + else + list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q); +} + +static void +bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + /* discarded fcxp completion */ +} + +static void +__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcxp_s *fcxp = cbarg; + + if (complete) { + fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, + fcxp->rsp_status, fcxp->rsp_len, + fcxp->residue_len, &fcxp->rsp_fchs); + } else { + bfa_fcxp_free(fcxp); + } +} + +static void +hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + struct bfa_fcxp_s *fcxp; + u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag); + + bfa_trc(bfa, fcxp_tag); + + fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); + + /* + * @todo f/w should not set residue to non-0 when everything + * is received. + */ + if (fcxp_rsp->req_status == BFA_STATUS_OK) + fcxp_rsp->residue_len = 0; + else + fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len); + + fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); + + WARN_ON(fcxp->send_cbfn == NULL); + + hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); + + if (fcxp->send_cbfn != NULL) { + bfa_trc(mod->bfa, (NULL == fcxp->caller)); + if (fcxp->caller == NULL) { + fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, + fcxp_rsp->req_status, fcxp_rsp->rsp_len, + fcxp_rsp->residue_len, &fcxp_rsp->fchs); + /* + * fcxp automatically freed on return from the callback + */ + bfa_fcxp_free(fcxp); + } else { + fcxp->rsp_status = fcxp_rsp->req_status; + fcxp->rsp_len = fcxp_rsp->rsp_len; + fcxp->residue_len = fcxp_rsp->residue_len; + fcxp->rsp_fchs = fcxp_rsp->fchs; + + bfa_cb_queue(bfa, &fcxp->hcb_qe, + __bfa_fcxp_send_cbfn, fcxp); + } + } else { + bfa_trc(bfa, (NULL == fcxp->send_cbfn)); + } +} + +static void +hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, + struct fchs_s *fchs) +{ + /* + * TODO: TX ox_id + */ + if (reqlen > 0) { + if (fcxp->use_ireqbuf) { + u32 pld_w0 = + *((u32 *) BFA_FCXP_REQ_PLD(fcxp)); + + bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_TX, + reqlen + sizeof(struct fchs_s), fchs, + pld_w0); + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_TX, + reqlen + sizeof(struct fchs_s), + fchs); + } + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, + reqlen + sizeof(struct fchs_s), fchs); + } +} + +static void +hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, + struct bfi_fcxp_send_rsp_s *fcxp_rsp) +{ + if (fcxp_rsp->rsp_len > 0) { + if (fcxp->use_irspbuf) { + u32 pld_w0 = + *((u32 *) BFA_FCXP_RSP_PLD(fcxp)); + + bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_RX, + (u16) fcxp_rsp->rsp_len, + &fcxp_rsp->fchs, pld_w0); + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_RX, + (u16) fcxp_rsp->rsp_len, + &fcxp_rsp->fchs); + } + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, + (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); + } +} + +/* + * Handler to resume sending fcxp when space in available in cpe queue. + */ +static void +bfa_fcxp_qresume(void *cbarg) +{ + struct bfa_fcxp_s *fcxp = cbarg; + struct bfa_s *bfa = fcxp->fcxp_mod->bfa; + struct bfi_fcxp_send_req_s *send_req; + + fcxp->reqq_waiting = BFA_FALSE; + send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); + bfa_fcxp_queue(fcxp, send_req); +} + +/* + * Queue fcxp send request to foimrware. + */ +static void +bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) +{ + struct bfa_s *bfa = fcxp->fcxp_mod->bfa; + struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; + struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; + struct bfa_rport_s *rport = reqi->bfa_rport; + + bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, + bfa_fn_lpu(bfa)); + + send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); + if (rport) { + send_req->rport_fw_hndl = rport->fw_handle; + send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz); + if (send_req->max_frmsz == 0) + send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); + } else { + send_req->rport_fw_hndl = 0; + send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); + } + + send_req->vf_id = cpu_to_be16(reqi->vf_id); + send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag); + send_req->class = reqi->class; + send_req->rsp_timeout = rspi->rsp_timeout; + send_req->cts = reqi->cts; + send_req->fchs = reqi->fchs; + + send_req->req_len = cpu_to_be32(reqi->req_tot_len); + send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen); + + /* + * setup req sgles + */ + if (fcxp->use_ireqbuf == 1) { + bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, + BFA_FCXP_REQ_PLD_PA(fcxp)); + } else { + if (fcxp->nreq_sgles > 0) { + WARN_ON(fcxp->nreq_sgles != 1); + bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, + fcxp->req_sga_cbfn(fcxp->caller, 0)); + } else { + WARN_ON(reqi->req_tot_len != 0); + bfa_alen_set(&send_req->rsp_alen, 0, 0); + } + } + + /* + * setup rsp sgles + */ + if (fcxp->use_irspbuf == 1) { + WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ); + + bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, + BFA_FCXP_RSP_PLD_PA(fcxp)); + } else { + if (fcxp->nrsp_sgles > 0) { + WARN_ON(fcxp->nrsp_sgles != 1); + bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, + fcxp->rsp_sga_cbfn(fcxp->caller, 0)); + + } else { + WARN_ON(rspi->rsp_maxlen != 0); + bfa_alen_set(&send_req->rsp_alen, 0, 0); + } + } + + hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); + + bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh); + + bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); + bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); +} + +/* + * Allocate an FCXP instance to send a response or to send a request + * that has a response. Request/response buffers are allocated by caller. + * + * @param[in] bfa BFA bfa instance + * @param[in] nreq_sgles Number of SG elements required for request + * buffer. 0, if fcxp internal buffers are used. + * Use bfa_fcxp_get_reqbuf() to get the + * internal req buffer. + * @param[in] req_sgles SG elements describing request buffer. Will be + * copied in by BFA and hence can be freed on + * return from this function. + * @param[in] get_req_sga function ptr to be called to get a request SG + * Address (given the sge index). + * @param[in] get_req_sglen function ptr to be called to get a request SG + * len (given the sge index). + * @param[in] get_rsp_sga function ptr to be called to get a response SG + * Address (given the sge index). + * @param[in] get_rsp_sglen function ptr to be called to get a response SG + * len (given the sge index). + * @param[in] req Allocated FCXP is used to send req or rsp? + * request - BFA_TRUE, response - BFA_FALSE + * + * @return FCXP instance. NULL on failure. + */ +struct bfa_fcxp_s * +bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, + int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, + bfa_fcxp_get_sglen_t req_sglen_cbfn, + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, + bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req) +{ + struct bfa_fcxp_s *fcxp = NULL; + + WARN_ON(bfa == NULL); + + fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req); + if (fcxp == NULL) + return NULL; + + bfa_trc(bfa, fcxp->fcxp_tag); + + bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn, + req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn); + + return fcxp; +} + +/* + * Get the internal request buffer pointer + * + * @param[in] fcxp BFA fcxp pointer + * + * @return pointer to the internal request buffer + */ +void * +bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + void *reqbuf; + + WARN_ON(fcxp->use_ireqbuf != 1); + reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, + mod->req_pld_sz + mod->rsp_pld_sz); + return reqbuf; +} + +u32 +bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + + return mod->req_pld_sz; +} + +/* + * Get the internal response buffer pointer + * + * @param[in] fcxp BFA fcxp pointer + * + * @return pointer to the internal request buffer + */ +void * +bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + void *fcxp_buf; + + WARN_ON(fcxp->use_irspbuf != 1); + + fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, + mod->req_pld_sz + mod->rsp_pld_sz); + + /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */ + return ((u8 *) fcxp_buf) + mod->req_pld_sz; +} + +/* + * Free the BFA FCXP + * + * @param[in] fcxp BFA fcxp pointer + * + * @return void + */ +void +bfa_fcxp_free(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + + WARN_ON(fcxp == NULL); + bfa_trc(mod->bfa, fcxp->fcxp_tag); + bfa_fcxp_put(fcxp); +} + +/* + * Send a FCXP request + * + * @param[in] fcxp BFA fcxp pointer + * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports + * @param[in] vf_id virtual Fabric ID + * @param[in] lp_tag lport tag + * @param[in] cts use Continuous sequence + * @param[in] cos fc Class of Service + * @param[in] reqlen request length, does not include FCHS length + * @param[in] fchs fc Header Pointer. The header content will be copied + * in by BFA. + * + * @param[in] cbfn call back function to be called on receiving + * the response + * @param[in] cbarg arg for cbfn + * @param[in] rsp_timeout + * response timeout + * + * @return bfa_status_t + */ +void +bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, + u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos, + u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn, + void *cbarg, u32 rsp_maxlen, u8 rsp_timeout) +{ + struct bfa_s *bfa = fcxp->fcxp_mod->bfa; + struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; + struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; + struct bfi_fcxp_send_req_s *send_req; + + bfa_trc(bfa, fcxp->fcxp_tag); + + /* + * setup request/response info + */ + reqi->bfa_rport = rport; + reqi->vf_id = vf_id; + reqi->lp_tag = lp_tag; + reqi->class = cos; + rspi->rsp_timeout = rsp_timeout; + reqi->cts = cts; + reqi->fchs = *fchs; + reqi->req_tot_len = reqlen; + rspi->rsp_maxlen = rsp_maxlen; + fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; + fcxp->send_cbarg = cbarg; + + /* + * If no room in CPE queue, wait for space in request queue + */ + send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); + if (!send_req) { + bfa_trc(bfa, fcxp->fcxp_tag); + fcxp->reqq_waiting = BFA_TRUE; + bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe); + return; + } + + bfa_fcxp_queue(fcxp, send_req); +} + +/* + * Abort a BFA FCXP + * + * @param[in] fcxp BFA fcxp pointer + * + * @return void + */ +bfa_status_t +bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) +{ + bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); + WARN_ON(1); + return BFA_STATUS_OK; +} + +void +bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, + bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, + void *caller, int nreq_sgles, + int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, + bfa_fcxp_get_sglen_t req_sglen_cbfn, + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, + bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + if (req) + WARN_ON(!list_empty(&mod->fcxp_req_free_q)); + else + WARN_ON(!list_empty(&mod->fcxp_rsp_free_q)); + + wqe->alloc_cbfn = alloc_cbfn; + wqe->alloc_cbarg = alloc_cbarg; + wqe->caller = caller; + wqe->bfa = bfa; + wqe->nreq_sgles = nreq_sgles; + wqe->nrsp_sgles = nrsp_sgles; + wqe->req_sga_cbfn = req_sga_cbfn; + wqe->req_sglen_cbfn = req_sglen_cbfn; + wqe->rsp_sga_cbfn = rsp_sga_cbfn; + wqe->rsp_sglen_cbfn = rsp_sglen_cbfn; + + if (req) + list_add_tail(&wqe->qe, &mod->req_wait_q); + else + list_add_tail(&wqe->qe, &mod->rsp_wait_q); +} + +void +bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) || + !bfa_q_is_on_q(&mod->rsp_wait_q, wqe)); + list_del(&wqe->qe); +} + +void +bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) +{ + /* + * If waiting for room in request queue, cancel reqq wait + * and free fcxp. + */ + if (fcxp->reqq_waiting) { + fcxp->reqq_waiting = BFA_FALSE; + bfa_reqq_wcancel(&fcxp->reqq_wqe); + bfa_fcxp_free(fcxp); + return; + } + + fcxp->send_cbfn = bfa_fcxp_null_comp; +} + +void +bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + switch (msg->mhdr.msg_id) { + case BFI_FCXP_I2H_SEND_RSP: + hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg); + break; + + default: + bfa_trc(bfa, msg->mhdr.msg_id); + WARN_ON(1); + } +} + +u32 +bfa_fcxp_get_maxrsp(struct bfa_s *bfa) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + return mod->rsp_pld_sz; +} + +void +bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) { + if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) { + bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe); + list_add_tail(qe, &mod->fcxp_req_unused_q); + } else { + bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe); + list_add_tail(qe, &mod->fcxp_rsp_unused_q); + } + } +} + +/* + * BFA LPS state machine functions + */ + +/* + * Init state -- no login + */ +static void +bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_LOGIN: + if (bfa_reqq_full(lps->bfa, lps->reqq)) { + bfa_sm_set_state(lps, bfa_lps_sm_loginwait); + bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); + } else { + bfa_sm_set_state(lps, bfa_lps_sm_login); + bfa_lps_send_login(lps); + } + + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FDISC Request"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FLOGI Request"); + break; + + case BFA_LPS_SM_LOGOUT: + bfa_lps_logout_comp(lps); + break; + + case BFA_LPS_SM_DELETE: + bfa_lps_free(lps); + break; + + case BFA_LPS_SM_RX_CVL: + case BFA_LPS_SM_OFFLINE: + break; + + case BFA_LPS_SM_FWRSP: + /* + * Could happen when fabric detects loopback and discards + * the lps request. Fw will eventually sent out the timeout + * Just ignore + */ + break; + case BFA_LPS_SM_SET_N2N_PID: + /* + * When topology is set to loop, bfa_lps_set_n2n_pid() sends + * this event. Ignore this event. + */ + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/* + * login is in progress -- awaiting response from firmware + */ +static void +bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_FWRSP: + if (lps->status == BFA_STATUS_OK) { + bfa_sm_set_state(lps, bfa_lps_sm_online); + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FDISC Accept"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); + /* If N2N, send the assigned PID to FW */ + bfa_trc(lps->bfa, lps->fport); + bfa_trc(lps->bfa, lps->lp_pid); + + if (!lps->fport && lps->lp_pid) + bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); + } else { + bfa_sm_set_state(lps, bfa_lps_sm_init); + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, + "FDISC Fail (RJT or timeout)"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, + "FLOGI Fail (RJT or timeout)"); + } + bfa_lps_login_comp(lps); + break; + + case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + break; + + case BFA_LPS_SM_SET_N2N_PID: + bfa_trc(lps->bfa, lps->fport); + bfa_trc(lps->bfa, lps->lp_pid); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/* + * login pending - awaiting space in request queue + */ +static void +bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_RESUME: + bfa_sm_set_state(lps, bfa_lps_sm_login); + bfa_lps_send_login(lps); + break; + + case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_reqq_wcancel(&lps->wqe); + break; + + case BFA_LPS_SM_RX_CVL: + /* + * Login was not even sent out; so when getting out + * of this state, it will appear like a login retry + * after Clear virtual link + */ + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/* + * login complete + */ +static void +bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_LOGOUT: + if (bfa_reqq_full(lps->bfa, lps->reqq)) { + bfa_sm_set_state(lps, bfa_lps_sm_logowait); + bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); + } else { + bfa_sm_set_state(lps, bfa_lps_sm_logout); + bfa_lps_send_logout(lps); + } + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGO, 0, "Logout"); + break; + + case BFA_LPS_SM_RX_CVL: + bfa_sm_set_state(lps, bfa_lps_sm_init); + + /* Let the vport module know about this event */ + bfa_lps_cvl_event(lps); + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); + break; + + case BFA_LPS_SM_SET_N2N_PID: + if (bfa_reqq_full(lps->bfa, lps->reqq)) { + bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait); + bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); + } else + bfa_lps_send_set_n2n_pid(lps); + break; + + case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/* + * login complete + */ +static void +bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_RESUME: + bfa_sm_set_state(lps, bfa_lps_sm_online); + bfa_lps_send_set_n2n_pid(lps); + break; + + case BFA_LPS_SM_LOGOUT: + bfa_sm_set_state(lps, bfa_lps_sm_logowait); + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGO, 0, "Logout"); + break; + + case BFA_LPS_SM_RX_CVL: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_reqq_wcancel(&lps->wqe); + + /* Let the vport module know about this event */ + bfa_lps_cvl_event(lps); + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); + break; + + case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_reqq_wcancel(&lps->wqe); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/* + * logout in progress - awaiting firmware response + */ +static void +bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_FWRSP: + case BFA_LPS_SM_OFFLINE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_lps_logout_comp(lps); + break; + + case BFA_LPS_SM_DELETE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/* + * logout pending -- awaiting space in request queue + */ +static void +bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_RESUME: + bfa_sm_set_state(lps, bfa_lps_sm_logout); + bfa_lps_send_logout(lps); + break; + + case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_reqq_wcancel(&lps->wqe); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + + + +/* + * lps_pvt BFA LPS private functions + */ + +/* + * return memory requirement + */ +void +bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa); + + if (cfg->drvcfg.min_cfg) + bfa_mem_kva_setup(minfo, lps_kva, + sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS); + else + bfa_mem_kva_setup(minfo, lps_kva, + sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS); +} + +/* + * bfa module attach at initialization time + */ +void +bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + int i; + + mod->num_lps = BFA_LPS_MAX_LPORTS; + if (cfg->drvcfg.min_cfg) + mod->num_lps = BFA_LPS_MIN_LPORTS; + else + mod->num_lps = BFA_LPS_MAX_LPORTS; + mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod); + + bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s); + + INIT_LIST_HEAD(&mod->lps_free_q); + INIT_LIST_HEAD(&mod->lps_active_q); + INIT_LIST_HEAD(&mod->lps_login_q); + + for (i = 0; i < mod->num_lps; i++, lps++) { + lps->bfa = bfa; + lps->bfa_tag = (u8) i; + lps->reqq = BFA_REQQ_LPS; + bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); + list_add_tail(&lps->qe, &mod->lps_free_q); + } +} + +/* + * IOC in disabled state -- consider all lps offline + */ +void +bfa_lps_iocdisable(struct bfa_s *bfa) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &mod->lps_active_q) { + lps = (struct bfa_lps_s *) qe; + bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); + } + list_for_each_safe(qe, qen, &mod->lps_login_q) { + lps = (struct bfa_lps_s *) qe; + bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); + } + list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q); +} + +/* + * Firmware login response + */ +static void +bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + + WARN_ON(rsp->bfa_tag >= mod->num_lps); + lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); + + lps->status = rsp->status; + switch (rsp->status) { + case BFA_STATUS_OK: + lps->fw_tag = rsp->fw_tag; + lps->fport = rsp->f_port; + if (lps->fport) + lps->lp_pid = rsp->lp_pid; + lps->npiv_en = rsp->npiv_en; + lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); + lps->pr_pwwn = rsp->port_name; + lps->pr_nwwn = rsp->node_name; + lps->auth_req = rsp->auth_req; + lps->lp_mac = rsp->lp_mac; + lps->brcd_switch = rsp->brcd_switch; + lps->fcf_mac = rsp->fcf_mac; + + break; + + case BFA_STATUS_FABRIC_RJT: + lps->lsrjt_rsn = rsp->lsrjt_rsn; + lps->lsrjt_expl = rsp->lsrjt_expl; + + break; + + case BFA_STATUS_EPROTOCOL: + lps->ext_status = rsp->ext_status; + + break; + + case BFA_STATUS_VPORT_MAX: + if (rsp->ext_status) + bfa_lps_no_res(lps, rsp->ext_status); + break; + + default: + /* Nothing to do with other status */ + break; + } + + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_active_q); + bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); +} + +static void +bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count) +{ + struct bfa_s *bfa = first_lps->bfa; + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct list_head *qe, *qe_next; + struct bfa_lps_s *lps; + + bfa_trc(bfa, count); + + qe = bfa_q_next(first_lps); + + while (count && qe) { + qe_next = bfa_q_next(qe); + lps = (struct bfa_lps_s *)qe; + bfa_trc(bfa, lps->bfa_tag); + lps->status = first_lps->status; + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_active_q); + bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); + qe = qe_next; + count--; + } +} + +/* + * Firmware logout response + */ +static void +bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + + WARN_ON(rsp->bfa_tag >= mod->num_lps); + lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); + + bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); +} + +/* + * Firmware received a Clear virtual link request (for FCoE) + */ +static void +bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + + lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag); + + bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); +} + +/* + * Space is available in request queue, resume queueing request to firmware. + */ +static void +bfa_lps_reqq_resume(void *lps_arg) +{ + struct bfa_lps_s *lps = lps_arg; + + bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); +} + +/* + * lps is freed -- triggered by vport delete + */ +static void +bfa_lps_free(struct bfa_lps_s *lps) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); + + lps->lp_pid = 0; + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_free_q); +} + +/* + * send login request to firmware + */ +static void +bfa_lps_send_login(struct bfa_lps_s *lps) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); + struct bfi_lps_login_req_s *m; + + m = bfa_reqq_next(lps->bfa, lps->reqq); + WARN_ON(!m); + + bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, + bfa_fn_lpu(lps->bfa)); + + m->bfa_tag = lps->bfa_tag; + m->alpa = lps->alpa; + m->pdu_size = cpu_to_be16(lps->pdusz); + m->pwwn = lps->pwwn; + m->nwwn = lps->nwwn; + m->fdisc = lps->fdisc; + m->auth_en = lps->auth_en; + + bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_login_q); +} + +/* + * send logout request to firmware + */ +static void +bfa_lps_send_logout(struct bfa_lps_s *lps) +{ + struct bfi_lps_logout_req_s *m; + + m = bfa_reqq_next(lps->bfa, lps->reqq); + WARN_ON(!m); + + bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, + bfa_fn_lpu(lps->bfa)); + + m->fw_tag = lps->fw_tag; + m->port_name = lps->pwwn; + bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); +} + +/* + * send n2n pid set request to firmware + */ +static void +bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps) +{ + struct bfi_lps_n2n_pid_req_s *m; + + m = bfa_reqq_next(lps->bfa, lps->reqq); + WARN_ON(!m); + + bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ, + bfa_fn_lpu(lps->bfa)); + + m->fw_tag = lps->fw_tag; + m->lp_pid = lps->lp_pid; + bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); +} + +/* + * Indirect login completion handler for non-fcs + */ +static void +bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete) +{ + struct bfa_lps_s *lps = arg; + + if (!complete) + return; + + if (lps->fdisc) + bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); + else + bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); +} + +/* + * Login completion handler -- direct call for fcs, queue for others + */ +static void +bfa_lps_login_comp(struct bfa_lps_s *lps) +{ + if (!lps->bfa->fcs) { + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb, + lps); + return; + } + + if (lps->fdisc) + bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); + else + bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); +} + +/* + * Indirect logout completion handler for non-fcs + */ +static void +bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete) +{ + struct bfa_lps_s *lps = arg; + + if (!complete) + return; + + if (lps->fdisc) + bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); + else + bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); +} + +/* + * Logout completion handler -- direct call for fcs, queue for others + */ +static void +bfa_lps_logout_comp(struct bfa_lps_s *lps) +{ + if (!lps->bfa->fcs) { + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb, + lps); + return; + } + if (lps->fdisc) + bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); +} + +/* + * Clear virtual link completion handler for non-fcs + */ +static void +bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) +{ + struct bfa_lps_s *lps = arg; + + if (!complete) + return; + + /* Clear virtual link to base port will result in link down */ + if (lps->fdisc) + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); +} + +/* + * Received Clear virtual link event --direct call for fcs, + * queue for others + */ +static void +bfa_lps_cvl_event(struct bfa_lps_s *lps) +{ + if (!lps->bfa->fcs) { + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, + lps); + return; + } + + /* Clear virtual link to base port will result in link down */ + if (lps->fdisc) + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); +} + + + +/* + * lps_public BFA LPS public functions + */ + +u32 +bfa_lps_get_max_vport(struct bfa_s *bfa) +{ + if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) + return BFA_LPS_MAX_VPORTS_SUPP_CT; + else + return BFA_LPS_MAX_VPORTS_SUPP_CB; +} + +/* + * Allocate a lport srvice tag. + */ +struct bfa_lps_s * +bfa_lps_alloc(struct bfa_s *bfa) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps = NULL; + + bfa_q_deq(&mod->lps_free_q, &lps); + + if (lps == NULL) + return NULL; + + list_add_tail(&lps->qe, &mod->lps_active_q); + + bfa_sm_set_state(lps, bfa_lps_sm_init); + return lps; +} + +/* + * Free lport service tag. This can be called anytime after an alloc. + * No need to wait for any pending login/logout completions. + */ +void +bfa_lps_delete(struct bfa_lps_s *lps) +{ + bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); +} + +/* + * Initiate a lport login. + */ +void +bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, + wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) +{ + lps->uarg = uarg; + lps->alpa = alpa; + lps->pdusz = pdusz; + lps->pwwn = pwwn; + lps->nwwn = nwwn; + lps->fdisc = BFA_FALSE; + lps->auth_en = auth_en; + bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); +} + +/* + * Initiate a lport fdisc login. + */ +void +bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, + wwn_t nwwn) +{ + lps->uarg = uarg; + lps->alpa = 0; + lps->pdusz = pdusz; + lps->pwwn = pwwn; + lps->nwwn = nwwn; + lps->fdisc = BFA_TRUE; + lps->auth_en = BFA_FALSE; + bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); +} + + +/* + * Initiate a lport FDSIC logout. + */ +void +bfa_lps_fdisclogo(struct bfa_lps_s *lps) +{ + bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); +} + +u8 +bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + + return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag; +} + +/* + * Return lport services tag given the pid + */ +u8 +bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + int i; + + for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { + if (lps->lp_pid == pid) + return lps->bfa_tag; + } + + /* Return base port tag anyway */ + return 0; +} + + +/* + * return port id assigned to the base lport + */ +u32 +bfa_lps_get_base_pid(struct bfa_s *bfa) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + + return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; +} + +/* + * Set PID in case of n2n (which is assigned during PLOGI) + */ +void +bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid) +{ + bfa_trc(lps->bfa, lps->bfa_tag); + bfa_trc(lps->bfa, n2n_pid); + + lps->lp_pid = n2n_pid; + bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); +} + +/* + * LPS firmware message class handler. + */ +void +bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + union bfi_lps_i2h_msg_u msg; + + bfa_trc(bfa, m->mhdr.msg_id); + msg.msg = m; + + switch (m->mhdr.msg_id) { + case BFI_LPS_I2H_LOGIN_RSP: + bfa_lps_login_rsp(bfa, msg.login_rsp); + break; + + case BFI_LPS_I2H_LOGOUT_RSP: + bfa_lps_logout_rsp(bfa, msg.logout_rsp); + break; + + case BFI_LPS_I2H_CVL_EVENT: + bfa_lps_rx_cvl_event(bfa, msg.cvl_event); + break; + + default: + bfa_trc(bfa, m->mhdr.msg_id); + WARN_ON(1); + } +} + +static void +bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa); + aen_entry->aen_data.port.pwwn = fcport->pwwn; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq, + BFA_AEN_CAT_PORT, event); +} + +/* + * FC PORT state machine functions + */ +static void +bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + /* + * Start event after IOC is configured and BFA is started. + */ + fcport->use_flash_cfg = BFA_TRUE; + + if (bfa_fcport_send_enable(fcport)) { + bfa_trc(fcport->bfa, BFA_TRUE); + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + } else { + bfa_trc(fcport->bfa, BFA_FALSE); + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + } + break; + + case BFA_FCPORT_SM_ENABLE: + /* + * Port is persistently configured to be in enabled state. Do + * not change state. Port enabling is done when START event is + * received. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + /* + * If a port is persistently configured to be disabled, the + * first event will a port disable request. + */ + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + bfa_fcport_send_enable(fcport); + break; + + case BFA_FCPORT_SM_STOP: + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_ENABLE: + /* + * Already enable is in progress. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + /* + * Just send disable request to firmware when room becomes + * available in request queue. + */ + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /* + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_FWRSP: + case BFA_FCPORT_SM_LINKDOWN: + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); + break; + + case BFA_FCPORT_SM_LINKUP: + bfa_fcport_update_linkinfo(fcport); + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); + + WARN_ON(!fcport->event_cbfn); + bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); + break; + + case BFA_FCPORT_SM_ENABLE: + /* + * Already being enabled. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_disabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_LINKUP: + bfa_fcport_update_linkinfo(fcport); + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); + WARN_ON(!fcport->event_cbfn); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); + if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { + + bfa_trc(fcport->bfa, + pevent->link_state.attr.vc_fcf.fcf.fipenabled); + bfa_trc(fcport->bfa, + pevent->link_state.attr.vc_fcf.fcf.fipfailed); + + if (pevent->link_state.attr.vc_fcf.fcf.fipfailed) + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_FIP_FCF_DISC, 0, + "FIP FCF Discovery Failed"); + else + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_FIP_FCF_DISC, 0, + "FIP FCF Discovered"); + } + + bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port online: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); + + /* If QoS is enabled and it is not online, send AEN */ + if (fcport->cfg.qos_enabled && + fcport->qos_attr.state != BFA_QOS_ONLINE) + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); + break; + + case BFA_FCPORT_SM_LINKDOWN: + /* + * Possible to get link down event. + */ + break; + + case BFA_FCPORT_SM_ENABLE: + /* + * Already enabled. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_disabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_ENABLE: + /* + * Already enabled. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_disabling_qwait); + + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); + break; + + case BFA_FCPORT_SM_LINKDOWN: + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); + wwn2str(pwwn_buf, fcport->pwwn); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) { + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + } else { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "Base port (WWN = %s) " + "lost fabric connectivity\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + } + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_fcport_reset_linkinfo(fcport); + wwn2str(pwwn_buf, fcport->pwwn); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) { + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + } else { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "Base port (WWN = %s) " + "lost fabric connectivity\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + } + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + wwn2str(pwwn_buf, fcport->pwwn); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) { + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + } else { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "Base port (WWN = %s) " + "lost fabric connectivity\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + } + break; + + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + bfa_fcport_send_disable(fcport); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + case BFA_FCPORT_SM_ENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait); + break; + + case BFA_FCPORT_SM_DISABLE: + /* + * Already being disabled. + */ + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /* + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + bfa_fcport_send_disable(fcport); + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + case BFA_FCPORT_SM_ENABLE: + break; + + case BFA_FCPORT_SM_DISABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /* + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_FWRSP: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_DISABLE: + /* + * Already being disabled. + */ + break; + + case BFA_FCPORT_SM_ENABLE: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port enabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /* + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + /* + * Ignore start event for a port that is disabled. + */ + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_ENABLE: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "Base port enabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); + break; + + case BFA_FCPORT_SM_DISABLE: + /* + * Already disabled. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + case BFA_FCPORT_SM_DPORTENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_dport); + break; + + case BFA_FCPORT_SM_DDPORTENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_ddport); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + break; + + default: + /* + * Ignore all other events. + */ + ; + } +} + +/* + * Port is enabled. IOC is down/failed. + */ +static void +bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + break; + + default: + /* + * Ignore all events. + */ + ; + } +} + +/* + * Port is disabled. IOC is down/failed. + */ +static void +bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_ENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + /* + * Ignore all events. + */ + ; + } +} + +static void +bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_DPORTENABLE: + case BFA_FCPORT_SM_DISABLE: + case BFA_FCPORT_SM_ENABLE: + case BFA_FCPORT_SM_START: + /* + * Ignore event for a port that is dport + */ + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + case BFA_FCPORT_SM_DPORTDISABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_DISABLE: + case BFA_FCPORT_SM_DDPORTDISABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_DPORTENABLE: + case BFA_FCPORT_SM_DPORTDISABLE: + case BFA_FCPORT_SM_ENABLE: + case BFA_FCPORT_SM_START: + /* + * Ignore event for a port that is ddport + */ + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_DPORTENABLE: + case BFA_FCPORT_SM_ENABLE: + case BFA_FCPORT_SM_START: + /* + * Ignore event for a port as there is FAA misconfig + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); + + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +/* + * Link state is down + */ +static void +bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/* + * Link state is waiting for down notification + */ +static void +bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/* + * Link state is waiting for down notification and there is a pending up + */ +static void +bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/* + * Link state is up + */ +static void +bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/* + * Link state is waiting for up notification + */ +static void +bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/* + * Link state is waiting for up notification and there is a pending down + */ +static void +bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/* + * Link state is waiting for up notification and there are pending down and up + */ +static void +bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +static void +__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_ln_s *ln = cbarg; + + if (complete) + ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); + else + bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); +} + +/* + * Send SCN notification to upper layers. + * trunk - false if caller is fcport to ignore fcport event in trunked mode + */ +static void +bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, + bfa_boolean_t trunk) +{ + if (fcport->cfg.trunked && !trunk) + return; + + switch (event) { + case BFA_PORT_LINKUP: + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); + break; + case BFA_PORT_LINKDOWN: + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); + break; + default: + WARN_ON(1); + } +} + +static void +bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) +{ + struct bfa_fcport_s *fcport = ln->fcport; + + if (fcport->bfa->fcs) { + fcport->event_cbfn(fcport->event_cbarg, event); + bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); + } else { + ln->ln_event = event; + bfa_cb_queue(fcport->bfa, &ln->ln_qe, + __bfa_cb_fcport_event, ln); + } +} + +#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ + BFA_CACHELINE_SZ)) + +void +bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa); + + bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ); +} + +static void +bfa_fcport_qresume(void *cbarg) +{ + struct bfa_fcport_s *fcport = cbarg; + + bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); +} + +static void +bfa_fcport_mem_claim(struct bfa_fcport_s *fcport) +{ + struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma; + + fcport->stats_kva = bfa_mem_dma_virt(fcport_dma); + fcport->stats_pa = bfa_mem_dma_phys(fcport_dma); + fcport->stats = (union bfa_fcport_stats_u *) + bfa_mem_dma_virt(fcport_dma); +} + +/* + * Memory initialization. + */ +void +bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_port_cfg_s *port_cfg = &fcport->cfg; + struct bfa_fcport_ln_s *ln = &fcport->ln; + + fcport->bfa = bfa; + ln->fcport = fcport; + + bfa_fcport_mem_claim(fcport); + + bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); + + /* + * initialize time stamp for stats reset + */ + fcport->stats_reset_time = ktime_get_seconds(); + fcport->stats_dma_ready = BFA_FALSE; + + /* + * initialize and set default configuration + */ + port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; + port_cfg->speed = BFA_PORT_SPEED_AUTO; + port_cfg->trunked = BFA_FALSE; + port_cfg->maxfrsize = 0; + + port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; + port_cfg->qos_bw.high = BFA_QOS_BW_HIGH; + port_cfg->qos_bw.med = BFA_QOS_BW_MED; + port_cfg->qos_bw.low = BFA_QOS_BW_LOW; + + fcport->fec_state = BFA_FEC_OFFLINE; + + INIT_LIST_HEAD(&fcport->stats_pending_q); + INIT_LIST_HEAD(&fcport->statsclr_pending_q); + + bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); +} + +void +bfa_fcport_start(struct bfa_s *bfa) +{ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); +} + +/* + * Called when IOC failure is detected. + */ +void +bfa_fcport_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL); + bfa_trunk_iocdisable(bfa); +} + +/* + * Update loop info in fcport for SCN online + */ +static void +bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport, + struct bfa_fcport_loop_info_s *loop_info) +{ + fcport->myalpa = loop_info->myalpa; + fcport->alpabm_valid = + loop_info->alpabm_val; + memcpy(fcport->alpabm.alpa_bm, + loop_info->alpabm.alpa_bm, + sizeof(struct fc_alpabm_s)); +} + +static void +bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) +{ + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + + fcport->speed = pevent->link_state.speed; + fcport->topology = pevent->link_state.topology; + + if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) { + bfa_fcport_update_loop_info(fcport, + &pevent->link_state.attr.loop_info); + return; + } + + /* QoS Details */ + fcport->qos_attr = pevent->link_state.qos_attr; + fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr; + + if (fcport->cfg.bb_cr_enabled) + fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr; + + fcport->fec_state = pevent->link_state.fec_state; + + /* + * update trunk state if applicable + */ + if (!fcport->cfg.trunked) + trunk->attr.state = BFA_TRUNK_DISABLED; + + /* update FCoE specific */ + fcport->fcoe_vlan = + be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan); + + bfa_trc(fcport->bfa, fcport->speed); + bfa_trc(fcport->bfa, fcport->topology); +} + +static void +bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) +{ + fcport->speed = BFA_PORT_SPEED_UNKNOWN; + fcport->topology = BFA_PORT_TOPOLOGY_NONE; + fcport->fec_state = BFA_FEC_OFFLINE; +} + +/* + * Send port enable message to firmware. + */ +static bfa_boolean_t +bfa_fcport_send_enable(struct bfa_fcport_s *fcport) +{ + struct bfi_fcport_enable_req_s *m; + + /* + * Increment message tag before queue check, so that responses to old + * requests are discarded. + */ + fcport->msgtag++; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + if (!m) { + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, + bfa_fn_lpu(fcport->bfa)); + m->nwwn = fcport->nwwn; + m->pwwn = fcport->pwwn; + m->port_cfg = fcport->cfg; + m->msgtag = fcport->msgtag; + m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); + m->use_flash_cfg = fcport->use_flash_cfg; + bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); + return BFA_TRUE; +} + +/* + * Send port disable message to firmware. + */ +static bfa_boolean_t +bfa_fcport_send_disable(struct bfa_fcport_s *fcport) +{ + struct bfi_fcport_req_s *m; + + /* + * Increment message tag before queue check, so that responses to old + * requests are discarded. + */ + fcport->msgtag++; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + if (!m) { + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, + bfa_fn_lpu(fcport->bfa)); + m->msgtag = fcport->msgtag; + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); + + return BFA_TRUE; +} + +static void +bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) +{ + fcport->pwwn = fcport->bfa->ioc.attr->pwwn; + fcport->nwwn = fcport->bfa->ioc.attr->nwwn; + + bfa_trc(fcport->bfa, fcport->pwwn); + bfa_trc(fcport->bfa, fcport->nwwn); +} + +static void +bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, + struct bfa_qos_stats_s *s) +{ + u32 *dip = (u32 *) d; + __be32 *sip = (__be32 *) s; + int i; + + /* Now swap the 32 bit fields */ + for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) + dip[i] = be32_to_cpu(sip[i]); +} + +static void +bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, + struct bfa_fcoe_stats_s *s) +{ + u32 *dip = (u32 *) d; + __be32 *sip = (__be32 *) s; + int i; + + for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); + i = i + 2) { +#ifdef __BIG_ENDIAN + dip[i] = be32_to_cpu(sip[i]); + dip[i + 1] = be32_to_cpu(sip[i + 1]); +#else + dip[i] = be32_to_cpu(sip[i + 1]); + dip[i + 1] = be32_to_cpu(sip[i]); +#endif + } +} + +static void +__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; + struct bfa_cb_pending_q_s *cb; + struct list_head *qe, *qen; + union bfa_fcport_stats_u *ret; + + if (complete) { + time64_t time = ktime_get_seconds(); + + list_for_each_safe(qe, qen, &fcport->stats_pending_q) { + bfa_q_deq(&fcport->stats_pending_q, &qe); + cb = (struct bfa_cb_pending_q_s *)qe; + if (fcport->stats_status == BFA_STATUS_OK) { + ret = (union bfa_fcport_stats_u *)cb->data; + /* Swap FC QoS or FCoE stats */ + if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) + bfa_fcport_qos_stats_swap(&ret->fcqos, + &fcport->stats->fcqos); + else { + bfa_fcport_fcoe_stats_swap(&ret->fcoe, + &fcport->stats->fcoe); + ret->fcoe.secs_reset = + time - fcport->stats_reset_time; + } + } + bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, + fcport->stats_status); + } + fcport->stats_status = BFA_STATUS_OK; + } else { + INIT_LIST_HEAD(&fcport->stats_pending_q); + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +bfa_fcport_stats_get_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); +} + +static void +bfa_fcport_send_stats_get(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfi_fcport_req_s *msg; + + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_send_stats_get, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, + bfa_fn_lpu(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); +} + +static void +__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfa_cb_pending_q_s *cb; + struct list_head *qe, *qen; + + if (complete) { + /* + * re-initialize time stamp for stats reset + */ + fcport->stats_reset_time = ktime_get_seconds(); + list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) { + bfa_q_deq(&fcport->statsclr_pending_q, &qe); + cb = (struct bfa_cb_pending_q_s *)qe; + bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, + fcport->stats_status); + } + fcport->stats_status = BFA_STATUS_OK; + } else { + INIT_LIST_HEAD(&fcport->statsclr_pending_q); + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +bfa_fcport_stats_clr_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); +} + +static void +bfa_fcport_send_stats_clear(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfi_fcport_req_s *msg; + + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_send_stats_clear, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, + bfa_fn_lpu(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); +} + +/* + * Handle trunk SCN event from firmware. + */ +static void +bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) +{ + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + struct bfi_fcport_trunk_link_s *tlink; + struct bfa_trunk_link_attr_s *lattr; + enum bfa_trunk_state state_prev; + int i; + int link_bm = 0; + + bfa_trc(fcport->bfa, fcport->cfg.trunked); + WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE && + scn->trunk_state != BFA_TRUNK_OFFLINE); + + bfa_trc(fcport->bfa, trunk->attr.state); + bfa_trc(fcport->bfa, scn->trunk_state); + bfa_trc(fcport->bfa, scn->trunk_speed); + + /* + * Save off new state for trunk attribute query + */ + state_prev = trunk->attr.state; + if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED)) + trunk->attr.state = scn->trunk_state; + trunk->attr.speed = scn->trunk_speed; + for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { + lattr = &trunk->attr.link_attr[i]; + tlink = &scn->tlink[i]; + + lattr->link_state = tlink->state; + lattr->trunk_wwn = tlink->trunk_wwn; + lattr->fctl = tlink->fctl; + lattr->speed = tlink->speed; + lattr->deskew = be32_to_cpu(tlink->deskew); + + if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { + fcport->speed = tlink->speed; + fcport->topology = BFA_PORT_TOPOLOGY_P2P; + link_bm |= 1 << i; + } + + bfa_trc(fcport->bfa, lattr->link_state); + bfa_trc(fcport->bfa, lattr->trunk_wwn); + bfa_trc(fcport->bfa, lattr->fctl); + bfa_trc(fcport->bfa, lattr->speed); + bfa_trc(fcport->bfa, lattr->deskew); + } + + switch (link_bm) { + case 3: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)"); + break; + case 2: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)"); + break; + case 1: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)"); + break; + default: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); + } + + /* + * Notify upper layers if trunk state changed. + */ + if ((state_prev != trunk->attr.state) || + (scn->trunk_state == BFA_TRUNK_OFFLINE)) { + bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ? + BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE); + } +} + +static void +bfa_trunk_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + int i = 0; + + /* + * In trunked mode, notify upper layers that link is down + */ + if (fcport->cfg.trunked) { + if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE) + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE); + + fcport->trunk.attr.state = BFA_TRUNK_OFFLINE; + fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN; + for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { + fcport->trunk.attr.link_attr[i].trunk_wwn = 0; + fcport->trunk.attr.link_attr[i].fctl = + BFA_TRUNK_LINK_FCTL_NORMAL; + fcport->trunk.attr.link_attr[i].link_state = + BFA_TRUNK_LINK_STATE_DN_LINKDN; + fcport->trunk.attr.link_attr[i].speed = + BFA_PORT_SPEED_UNKNOWN; + fcport->trunk.attr.link_attr[i].deskew = 0; + } + } +} + +/* + * Called to initialize port attributes + */ +void +bfa_fcport_init(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + /* + * Initialize port attributes from IOC hardware data. + */ + bfa_fcport_set_wwns(fcport); + if (fcport->cfg.maxfrsize == 0) + fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); + fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); + fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); + + if (bfa_fcport_is_pbcdisabled(bfa)) + bfa->modules.port.pbc_disabled = BFA_TRUE; + + WARN_ON(!fcport->cfg.maxfrsize); + WARN_ON(!fcport->cfg.rx_bbcredit); + WARN_ON(!fcport->speed_sup); +} + +/* + * Firmware message handler. + */ +void +bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + union bfi_fcport_i2h_msg_u i2hmsg; + + i2hmsg.msg = msg; + fcport->event_arg.i2hmsg = i2hmsg; + + bfa_trc(bfa, msg->mhdr.msg_id); + bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm)); + + switch (msg->mhdr.msg_id) { + case BFI_FCPORT_I2H_ENABLE_RSP: + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { + + fcport->stats_dma_ready = BFA_TRUE; + if (fcport->use_flash_cfg) { + fcport->cfg = i2hmsg.penable_rsp->port_cfg; + fcport->cfg.maxfrsize = + cpu_to_be16(fcport->cfg.maxfrsize); + fcport->cfg.path_tov = + cpu_to_be16(fcport->cfg.path_tov); + fcport->cfg.q_depth = + cpu_to_be16(fcport->cfg.q_depth); + + if (fcport->cfg.trunked) + fcport->trunk.attr.state = + BFA_TRUNK_OFFLINE; + else + fcport->trunk.attr.state = + BFA_TRUNK_DISABLED; + fcport->qos_attr.qos_bw = + i2hmsg.penable_rsp->port_cfg.qos_bw; + fcport->use_flash_cfg = BFA_FALSE; + } + + if (fcport->cfg.qos_enabled) + fcport->qos_attr.state = BFA_QOS_OFFLINE; + else + fcport->qos_attr.state = BFA_QOS_DISABLED; + + fcport->qos_attr.qos_bw_op = + i2hmsg.penable_rsp->port_cfg.qos_bw; + + if (fcport->cfg.bb_cr_enabled) + fcport->bbcr_attr.state = BFA_BBCR_OFFLINE; + else + fcport->bbcr_attr.state = BFA_BBCR_DISABLED; + + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); + } + break; + + case BFI_FCPORT_I2H_DISABLE_RSP: + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); + break; + + case BFI_FCPORT_I2H_EVENT: + if (fcport->cfg.bb_cr_enabled) + fcport->bbcr_attr.state = BFA_BBCR_OFFLINE; + else + fcport->bbcr_attr.state = BFA_BBCR_DISABLED; + + if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) + bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); + else { + if (i2hmsg.event->link_state.linkstate_rsn == + BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG) + bfa_sm_send_event(fcport, + BFA_FCPORT_SM_FAA_MISCONFIG); + else + bfa_sm_send_event(fcport, + BFA_FCPORT_SM_LINKDOWN); + } + fcport->qos_attr.qos_bw_op = + i2hmsg.event->link_state.qos_attr.qos_bw_op; + break; + + case BFI_FCPORT_I2H_TRUNK_SCN: + bfa_trunk_scn(fcport, i2hmsg.trunk_scn); + break; + + case BFI_FCPORT_I2H_STATS_GET_RSP: + /* + * check for timer pop before processing the rsp + */ + if (list_empty(&fcport->stats_pending_q) || + (fcport->stats_status == BFA_STATUS_ETIMER)) + break; + + bfa_timer_stop(&fcport->timer); + fcport->stats_status = i2hmsg.pstatsget_rsp->status; + __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); + break; + + case BFI_FCPORT_I2H_STATS_CLEAR_RSP: + /* + * check for timer pop before processing the rsp + */ + if (list_empty(&fcport->statsclr_pending_q) || + (fcport->stats_status == BFA_STATUS_ETIMER)) + break; + + bfa_timer_stop(&fcport->timer); + fcport->stats_status = BFA_STATUS_OK; + __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); + break; + + case BFI_FCPORT_I2H_ENABLE_AEN: + bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE); + break; + + case BFI_FCPORT_I2H_DISABLE_AEN: + bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE); + break; + + default: + WARN_ON(1); + break; + } +} + +/* + * Registered callback for port events. + */ +void +bfa_fcport_event_register(struct bfa_s *bfa, + void (*cbfn) (void *cbarg, + enum bfa_port_linkstate event), + void *cbarg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + fcport->event_cbfn = cbfn; + fcport->event_cbarg = cbarg; +} + +bfa_status_t +bfa_fcport_enable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (bfa_fcport_is_pbcdisabled(bfa)) + return BFA_STATUS_PBC; + + if (bfa_ioc_is_disabled(&bfa->ioc)) + return BFA_STATUS_IOC_DISABLED; + + if (fcport->diag_busy) + return BFA_STATUS_DIAG_BUSY; + + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcport_disable(struct bfa_s *bfa) +{ + if (bfa_fcport_is_pbcdisabled(bfa)) + return BFA_STATUS_PBC; + + if (bfa_ioc_is_disabled(&bfa->ioc)) + return BFA_STATUS_IOC_DISABLED; + + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); + return BFA_STATUS_OK; +} + +/* If PBC is disabled on port, return error */ +bfa_status_t +bfa_fcport_is_pbcdisabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + + if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) { + bfa_trc(bfa, fcport->pwwn); + return BFA_STATUS_PBC; + } + return BFA_STATUS_OK; +} + +/* + * Configure port speed. + */ +bfa_status_t +bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, speed); + + if (fcport->cfg.trunked == BFA_TRUE) + return BFA_STATUS_TRUNK_ENABLED; + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (speed == BFA_PORT_SPEED_16GBPS)) + return BFA_STATUS_UNSUPP_SPEED; + if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { + bfa_trc(bfa, fcport->speed_sup); + return BFA_STATUS_UNSUPP_SPEED; + } + + /* Port speed entered needs to be checked */ + if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { + /* For CT2, 1G is not supported */ + if ((speed == BFA_PORT_SPEED_1GBPS) && + (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) + return BFA_STATUS_UNSUPP_SPEED; + + /* Already checked for Auto Speed and Max Speed supp */ + if (!(speed == BFA_PORT_SPEED_1GBPS || + speed == BFA_PORT_SPEED_2GBPS || + speed == BFA_PORT_SPEED_4GBPS || + speed == BFA_PORT_SPEED_8GBPS || + speed == BFA_PORT_SPEED_16GBPS || + speed == BFA_PORT_SPEED_AUTO)) + return BFA_STATUS_UNSUPP_SPEED; + } else { + if (speed != BFA_PORT_SPEED_10GBPS) + return BFA_STATUS_UNSUPP_SPEED; + } + + fcport->cfg.speed = speed; + + return BFA_STATUS_OK; +} + +/* + * Get current speed. + */ +enum bfa_port_speed +bfa_fcport_get_speed(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->speed; +} + +/* + * Configure port topology. + */ +bfa_status_t +bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, topology); + bfa_trc(bfa, fcport->cfg.topology); + + switch (topology) { + case BFA_PORT_TOPOLOGY_P2P: + break; + + case BFA_PORT_TOPOLOGY_LOOP: + if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) || + (fcport->qos_attr.state != BFA_QOS_DISABLED)) + return BFA_STATUS_ERROR_QOS_ENABLED; + if (fcport->cfg.ratelimit != BFA_FALSE) + return BFA_STATUS_ERROR_TRL_ENABLED; + if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) || + (fcport->trunk.attr.state != BFA_TRUNK_DISABLED)) + return BFA_STATUS_ERROR_TRUNK_ENABLED; + if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) || + (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS)) + return BFA_STATUS_UNSUPP_SPEED; + if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) + return BFA_STATUS_LOOP_UNSUPP_MEZZ; + if (bfa_fcport_is_dport(bfa) != BFA_FALSE) + return BFA_STATUS_DPORT_ERR; + if (bfa_fcport_is_ddport(bfa) != BFA_FALSE) + return BFA_STATUS_DPORT_ERR; + break; + + case BFA_PORT_TOPOLOGY_AUTO: + break; + + default: + return BFA_STATUS_EINVAL; + } + + fcport->cfg.topology = topology; + return BFA_STATUS_OK; +} + +/* + * Get current topology. + */ +enum bfa_port_topology +bfa_fcport_get_topology(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->topology; +} + +/* + * Get config topology. + */ +enum bfa_port_topology +bfa_fcport_get_cfg_topology(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.topology; +} + +bfa_status_t +bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, alpa); + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); + bfa_trc(bfa, fcport->cfg.hardalpa); + + fcport->cfg.cfg_hardalpa = BFA_TRUE; + fcport->cfg.hardalpa = alpa; + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcport_clr_hardalpa(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); + bfa_trc(bfa, fcport->cfg.hardalpa); + + fcport->cfg.cfg_hardalpa = BFA_FALSE; + return BFA_STATUS_OK; +} + +bfa_boolean_t +bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + *alpa = fcport->cfg.hardalpa; + return fcport->cfg.cfg_hardalpa; +} + +u8 +bfa_fcport_get_myalpa(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->myalpa; +} + +bfa_status_t +bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, maxfrsize); + bfa_trc(bfa, fcport->cfg.maxfrsize); + + /* with in range */ + if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) + return BFA_STATUS_INVLD_DFSZ; + + /* power of 2, if not the max frame size of 2112 */ + if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) + return BFA_STATUS_INVLD_DFSZ; + + fcport->cfg.maxfrsize = maxfrsize; + return BFA_STATUS_OK; +} + +u16 +bfa_fcport_get_maxfrsize(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.maxfrsize; +} + +u8 +bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) +{ + if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) + return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit; + + else + return 0; +} + +void +bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; +} + +/* + * Get port attributes. + */ + +wwn_t +bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + if (node) + return fcport->nwwn; + else + return fcport->pwwn; +} + +void +bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + memset(attr, 0, sizeof(struct bfa_port_attr_s)); + + attr->nwwn = fcport->nwwn; + attr->pwwn = fcport->pwwn; + + attr->factorypwwn = bfa->ioc.attr->mfg_pwwn; + attr->factorynwwn = bfa->ioc.attr->mfg_nwwn; + + memcpy(&attr->pport_cfg, &fcport->cfg, + sizeof(struct bfa_port_cfg_s)); + /* speed attributes */ + attr->pport_cfg.speed = fcport->cfg.speed; + attr->speed_supported = fcport->speed_sup; + attr->speed = fcport->speed; + attr->cos_supported = FC_CLASS_3; + + /* topology attributes */ + attr->pport_cfg.topology = fcport->cfg.topology; + attr->topology = fcport->topology; + attr->pport_cfg.trunked = fcport->cfg.trunked; + + /* beacon attributes */ + attr->beacon = fcport->beacon; + attr->link_e2e_beacon = fcport->link_e2e_beacon; + + attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); + attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); + attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); + + attr->fec_state = fcport->fec_state; + + /* PBC Disabled State */ + if (bfa_fcport_is_pbcdisabled(bfa)) + attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED; + else { + if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) + attr->port_state = BFA_PORT_ST_IOCDIS; + else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) + attr->port_state = BFA_PORT_ST_FWMISMATCH; + } + + /* FCoE vlan */ + attr->fcoe_vlan = fcport->fcoe_vlan; +} + +#define BFA_FCPORT_STATS_TOV 1000 + +/* + * Fetch port statistics (FCQoS or FCoE). + */ +bfa_status_t +bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (!bfa_iocfc_is_operational(bfa) || + !fcport->stats_dma_ready) + return BFA_STATUS_IOC_NON_OP; + + if (!list_empty(&fcport->statsclr_pending_q)) + return BFA_STATUS_DEVBUSY; + + if (list_empty(&fcport->stats_pending_q)) { + list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); + bfa_fcport_send_stats_get(fcport); + bfa_timer_start(bfa, &fcport->timer, + bfa_fcport_stats_get_timeout, + fcport, BFA_FCPORT_STATS_TOV); + } else + list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); + + return BFA_STATUS_OK; +} + +/* + * Reset port statistics (FCQoS or FCoE). + */ +bfa_status_t +bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (!bfa_iocfc_is_operational(bfa) || + !fcport->stats_dma_ready) + return BFA_STATUS_IOC_NON_OP; + + if (!list_empty(&fcport->stats_pending_q)) + return BFA_STATUS_DEVBUSY; + + if (list_empty(&fcport->statsclr_pending_q)) { + list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); + bfa_fcport_send_stats_clear(fcport); + bfa_timer_start(bfa, &fcport->timer, + bfa_fcport_stats_clr_timeout, + fcport, BFA_FCPORT_STATS_TOV); + } else + list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); + + return BFA_STATUS_OK; +} + +/* + * Fetch port attributes. + */ +bfa_boolean_t +bfa_fcport_is_disabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + BFA_PORT_ST_DISABLED; + +} + +bfa_boolean_t +bfa_fcport_is_dport(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + BFA_PORT_ST_DPORT); +} + +bfa_boolean_t +bfa_fcport_is_ddport(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + BFA_PORT_ST_DDPORT); +} + +bfa_status_t +bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); + + bfa_trc(bfa, ioc_type); + + if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0)) + return BFA_STATUS_QOS_BW_INVALID; + + if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100) + return BFA_STATUS_QOS_BW_INVALID; + + if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) || + (qos_bw->low > qos_bw->high)) + return BFA_STATUS_QOS_BW_INVALID; + + if ((ioc_type == BFA_IOC_TYPE_FC) && + (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP)) + fcport->cfg.qos_bw = *qos_bw; + + return BFA_STATUS_OK; +} + +bfa_boolean_t +bfa_fcport_is_ratelim(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; + +} + +/* + * Enable/Disable FAA feature in port config + */ +void +bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, state); + fcport->cfg.faa_state = state; +} + +/* + * Get default minimum ratelim speed + */ +enum bfa_port_speed +bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, fcport->cfg.trl_def_speed); + return fcport->cfg.trl_def_speed; + +} + +void +bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon) +{ + struct bfa_s *bfa = dev; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, beacon); + bfa_trc(bfa, link_e2e_beacon); + bfa_trc(bfa, fcport->beacon); + bfa_trc(bfa, fcport->link_e2e_beacon); + + fcport->beacon = beacon; + fcport->link_e2e_beacon = link_e2e_beacon; +} + +bfa_boolean_t +bfa_fcport_is_linkup(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return (!fcport->cfg.trunked && + bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) || + (fcport->cfg.trunked && + fcport->trunk.attr.state == BFA_TRUNK_ONLINE); +} + +bfa_boolean_t +bfa_fcport_is_qos_enabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.qos_enabled; +} + +bfa_boolean_t +bfa_fcport_is_trunk_enabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.trunked; +} + +bfa_status_t +bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, on_off); + + if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC) + return BFA_STATUS_BBCR_FC_ONLY; + + if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) && + (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK)) + return BFA_STATUS_CMD_NOTSUPP_MEZZ; + + if (on_off) { + if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) + return BFA_STATUS_TOPOLOGY_LOOP; + + if (fcport->cfg.qos_enabled) + return BFA_STATUS_ERROR_QOS_ENABLED; + + if (fcport->cfg.trunked) + return BFA_STATUS_TRUNK_ENABLED; + + if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) && + (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc))) + return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT; + + if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS) + return BFA_STATUS_FEATURE_NOT_SUPPORTED; + + if (fcport->cfg.bb_cr_enabled) { + if (bb_scn != fcport->cfg.bb_scn) + return BFA_STATUS_BBCR_CFG_NO_CHANGE; + else + return BFA_STATUS_NO_CHANGE; + } + + if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX)) + bb_scn = BFA_BB_SCN_DEF; + + fcport->cfg.bb_cr_enabled = on_off; + fcport->cfg.bb_scn = bb_scn; + } else { + if (!fcport->cfg.bb_cr_enabled) + return BFA_STATUS_NO_CHANGE; + + fcport->cfg.bb_cr_enabled = on_off; + fcport->cfg.bb_scn = 0; + } + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, + struct bfa_bbcr_attr_s *bbcr_attr) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC) + return BFA_STATUS_BBCR_FC_ONLY; + + if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) + return BFA_STATUS_TOPOLOGY_LOOP; + + *bbcr_attr = fcport->bbcr_attr; + + return BFA_STATUS_OK; +} + +void +bfa_fcport_dportenable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE); + bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE); +} + +void +bfa_fcport_dportdisable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE); + bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE); +} + +static void +bfa_fcport_ddportenable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE); +} + +static void +bfa_fcport_ddportdisable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE); +} + +/* + * Rport State machine functions + */ +/* + * Beginning state, only online event expected. + */ +static void +bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_CREATE: + bfa_stats(rp, sm_un_cr); + bfa_sm_set_state(rp, bfa_rport_sm_created); + break; + + default: + bfa_stats(rp, sm_un_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +static void +bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_ONLINE: + bfa_stats(rp, sm_cr_on); + if (bfa_rport_send_fwcreate(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_cr_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_cr_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + default: + bfa_stats(rp, sm_cr_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Waiting for rport create response from firmware. + */ +static void +bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_fwc_rsp); + bfa_sm_set_state(rp, bfa_rport_sm_online); + bfa_rport_online_cb(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwc_del); + bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); + break; + + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_fwc_off); + bfa_sm_set_state(rp, bfa_rport_sm_offline_pending); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwc_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + default: + bfa_stats(rp, sm_fwc_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Request queue is full, awaiting queue resume to send create request. + */ +static void +bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_QRESUME: + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + bfa_rport_send_fwcreate(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwc_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_fwc_off); + bfa_sm_set_state(rp, bfa_rport_sm_offline); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_offline_cb(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwc_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + bfa_reqq_wcancel(&rp->reqq_wait); + break; + + default: + bfa_stats(rp, sm_fwc_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Online state - normal parking state. + */ +static void +bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + struct bfi_rport_qos_scn_s *qos_scn; + + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_on_off); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_on_del); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + else + bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_on_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + case BFA_RPORT_SM_SET_SPEED: + bfa_rport_send_fwspeed(rp); + break; + + case BFA_RPORT_SM_QOS_SCN: + qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg; + rp->qos_attr = qos_scn->new_qos_attr; + bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id); + bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id); + bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority); + bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); + + qos_scn->old_qos_attr.qos_flow_id = + be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id); + qos_scn->new_qos_attr.qos_flow_id = + be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id); + + if (qos_scn->old_qos_attr.qos_flow_id != + qos_scn->new_qos_attr.qos_flow_id) + bfa_cb_rport_qos_scn_flowid(rp->rport_drv, + qos_scn->old_qos_attr, + qos_scn->new_qos_attr); + if (qos_scn->old_qos_attr.qos_priority != + qos_scn->new_qos_attr.qos_priority) + bfa_cb_rport_qos_scn_prio(rp->rport_drv, + qos_scn->old_qos_attr, + qos_scn->new_qos_attr); + break; + + default: + bfa_stats(rp, sm_on_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Firmware rport is being deleted - awaiting f/w response. + */ +static void +bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_fwd_rsp); + bfa_sm_set_state(rp, bfa_rport_sm_offline); + bfa_rport_offline_cb(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwd_del); + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwd_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + bfa_rport_offline_cb(rp); + break; + + default: + bfa_stats(rp, sm_fwd_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +static void +bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_QRESUME: + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); + bfa_rport_send_fwdelete(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwd_del); + bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwd_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_offline_cb(rp); + break; + + default: + bfa_stats(rp, sm_fwd_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Offline state. + */ +static void +bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_off_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_ONLINE: + bfa_stats(rp, sm_off_on); + if (bfa_rport_send_fwcreate(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_off_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + case BFA_RPORT_SM_OFFLINE: + bfa_rport_offline_cb(rp); + break; + + default: + bfa_stats(rp, sm_off_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Rport is deleted, waiting for firmware response to delete. + */ +static void +bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_del_fwrsp); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_del_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + default: + bfa_sm_fault(rp->bfa, event); + } +} + +static void +bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_QRESUME: + bfa_stats(rp, sm_del_fwrsp); + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + bfa_rport_send_fwdelete(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_del_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_free(rp); + break; + + default: + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Waiting for rport create response from firmware. A delete is pending. + */ +static void +bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_delp_fwrsp); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + else + bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_delp_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + default: + bfa_stats(rp, sm_delp_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * Waiting for rport create response from firmware. Rport offline is pending. + */ +static void +bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_offp_fwrsp); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_offp_del); + bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_offp_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + bfa_rport_offline_cb(rp); + break; + + default: + bfa_stats(rp, sm_offp_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/* + * IOC h/w failed. + */ +static void +bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_iocd_off); + bfa_rport_offline_cb(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_iocd_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_ONLINE: + bfa_stats(rp, sm_iocd_on); + if (bfa_rport_send_fwcreate(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + break; + + default: + bfa_stats(rp, sm_iocd_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + + + +/* + * bfa_rport_private BFA rport private functions + */ + +static void +__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_rport_s *rp = cbarg; + + if (complete) + bfa_cb_rport_online(rp->rport_drv); +} + +static void +__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_rport_s *rp = cbarg; + + if (complete) + bfa_cb_rport_offline(rp->rport_drv); +} + +static void +bfa_rport_qresume(void *cbarg) +{ + struct bfa_rport_s *rp = cbarg; + + bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME); +} + +void +bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa); + + if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) + cfg->fwcfg.num_rports = BFA_RPORT_MIN; + + /* kva memory */ + bfa_mem_kva_setup(minfo, rport_kva, + cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s)); +} + +void +bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); + struct bfa_rport_s *rp; + u16 i; + + INIT_LIST_HEAD(&mod->rp_free_q); + INIT_LIST_HEAD(&mod->rp_active_q); + INIT_LIST_HEAD(&mod->rp_unused_q); + + rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod); + mod->rps_list = rp; + mod->num_rports = cfg->fwcfg.num_rports; + + WARN_ON(!mod->num_rports || + (mod->num_rports & (mod->num_rports - 1))); + + for (i = 0; i < mod->num_rports; i++, rp++) { + memset(rp, 0, sizeof(struct bfa_rport_s)); + rp->bfa = bfa; + rp->rport_tag = i; + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + + /* + * - is unused + */ + if (i) + list_add_tail(&rp->qe, &mod->rp_free_q); + + bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); + } + + /* + * consume memory + */ + bfa_mem_kva_curp(mod) = (u8 *) rp; +} + +void +bfa_rport_iocdisable(struct bfa_s *bfa) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); + struct bfa_rport_s *rport; + struct list_head *qe, *qen; + + /* Enqueue unused rport resources to free_q */ + list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q); + + list_for_each_safe(qe, qen, &mod->rp_active_q) { + rport = (struct bfa_rport_s *) qe; + bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); + } +} + +static struct bfa_rport_s * +bfa_rport_alloc(struct bfa_rport_mod_s *mod) +{ + struct bfa_rport_s *rport; + + bfa_q_deq(&mod->rp_free_q, &rport); + if (rport) + list_add_tail(&rport->qe, &mod->rp_active_q); + + return rport; +} + +static void +bfa_rport_free(struct bfa_rport_s *rport) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); + + WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport)); + list_del(&rport->qe); + list_add_tail(&rport->qe, &mod->rp_free_q); +} + +static bfa_boolean_t +bfa_rport_send_fwcreate(struct bfa_rport_s *rp) +{ + struct bfi_rport_create_req_s *m; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); + if (!m) { + bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, + bfa_fn_lpu(rp->bfa)); + m->bfa_handle = rp->rport_tag; + m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); + m->pid = rp->rport_info.pid; + m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag); + m->local_pid = rp->rport_info.local_pid; + m->fc_class = rp->rport_info.fc_class; + m->vf_en = rp->rport_info.vf_en; + m->vf_id = rp->rport_info.vf_id; + m->cisc = rp->rport_info.cisc; + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_rport_send_fwdelete(struct bfa_rport_s *rp) +{ + struct bfi_rport_delete_req_s *m; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); + if (!m) { + bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, + bfa_fn_lpu(rp->bfa)); + m->fw_handle = rp->fw_handle; + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_rport_send_fwspeed(struct bfa_rport_s *rp) +{ + struct bfa_rport_speed_req_s *m; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); + if (!m) { + bfa_trc(rp->bfa, rp->rport_info.speed); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, + bfa_fn_lpu(rp->bfa)); + m->fw_handle = rp->fw_handle; + m->speed = (u8)rp->rport_info.speed; + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); + return BFA_TRUE; +} + + + +/* + * bfa_rport_public + */ + +/* + * Rport interrupt processing. + */ +void +bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + union bfi_rport_i2h_msg_u msg; + struct bfa_rport_s *rp; + + bfa_trc(bfa, m->mhdr.msg_id); + + msg.msg = m; + + switch (m->mhdr.msg_id) { + case BFI_RPORT_I2H_CREATE_RSP: + rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); + rp->fw_handle = msg.create_rsp->fw_handle; + rp->qos_attr = msg.create_rsp->qos_attr; + bfa_rport_set_lunmask(bfa, rp); + WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); + bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); + break; + + case BFI_RPORT_I2H_DELETE_RSP: + rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); + WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); + bfa_rport_unset_lunmask(bfa, rp); + bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); + break; + + case BFI_RPORT_I2H_QOS_SCN: + rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle); + rp->event_arg.fw_msg = msg.qos_scn_evt; + bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); + break; + + case BFI_RPORT_I2H_LIP_SCN_ONLINE: + bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa), + &msg.lip_scn->loop_info); + bfa_cb_rport_scn_online(bfa); + break; + + case BFI_RPORT_I2H_LIP_SCN_OFFLINE: + bfa_cb_rport_scn_offline(bfa); + break; + + case BFI_RPORT_I2H_NO_DEV: + rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle); + bfa_cb_rport_scn_no_dev(rp->rport_drv); + break; + + default: + bfa_trc(bfa, m->mhdr.msg_id); + WARN_ON(1); + } +} + +void +bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (mod->num_rports - num_rport_fw); i++) { + bfa_q_deq_tail(&mod->rp_free_q, &qe); + list_add_tail(qe, &mod->rp_unused_q); + } +} + +/* + * bfa_rport_api + */ + +struct bfa_rport_s * +bfa_rport_create(struct bfa_s *bfa, void *rport_drv) +{ + struct bfa_rport_s *rp; + + rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); + + if (rp == NULL) + return NULL; + + rp->bfa = bfa; + rp->rport_drv = rport_drv; + memset(&rp->stats, 0, sizeof(rp->stats)); + + WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); + bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); + + return rp; +} + +void +bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) +{ + WARN_ON(rport_info->max_frmsz == 0); + + /* + * Some JBODs are seen to be not setting PDU size correctly in PLOGI + * responses. Default to minimum size. + */ + if (rport_info->max_frmsz == 0) { + bfa_trc(rport->bfa, rport->rport_tag); + rport_info->max_frmsz = FC_MIN_PDUSZ; + } + + rport->rport_info = *rport_info; + bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); +} + +void +bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) +{ + WARN_ON(speed == 0); + WARN_ON(speed == BFA_PORT_SPEED_AUTO); + + if (rport) { + rport->rport_info.speed = speed; + bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); + } +} + +/* Set Rport LUN Mask */ +void +bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) +{ + struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); + wwn_t lp_wwn, rp_wwn; + u8 lp_tag = (u8)rp->rport_info.lp_tag; + + rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; + lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; + + BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = + rp->lun_mask = BFA_TRUE; + bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag); +} + +/* Unset Rport LUN mask */ +void +bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) +{ + struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); + wwn_t lp_wwn, rp_wwn; + + rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; + lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; + + BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = + rp->lun_mask = BFA_FALSE; + bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, + BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID); +} + +/* + * SGPG related functions + */ + +/* + * Compute and return memory needed by FCP(im) module. + */ +void +bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa); + struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_sgpg, num_sgpg; + u32 sgpg_sz = sizeof(struct bfi_sgpg_s); + + if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) + cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; + else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX) + cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX; + + num_sgpg = cfg->drvcfg.num_sgpgs; + + nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); + per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz); + + bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) { + if (num_sgpg >= per_seg_sgpg) { + num_sgpg -= per_seg_sgpg; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_sgpg * sgpg_sz); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_sgpg * sgpg_sz); + } + + /* kva memory */ + bfa_mem_kva_setup(minfo, sgpg_kva, + cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s)); +} + +void +bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + struct bfa_sgpg_s *hsgpg; + struct bfi_sgpg_s *sgpg; + u64 align_len; + struct bfa_mem_dma_s *seg_ptr; + u32 sgpg_sz = sizeof(struct bfi_sgpg_s); + u16 i, idx, nsegs, per_seg_sgpg, num_sgpg; + + union { + u64 pa; + union bfi_addr_u addr; + } sgpg_pa, sgpg_pa_tmp; + + INIT_LIST_HEAD(&mod->sgpg_q); + INIT_LIST_HEAD(&mod->sgpg_wait_q); + + bfa_trc(bfa, cfg->drvcfg.num_sgpgs); + + mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs; + + num_sgpg = cfg->drvcfg.num_sgpgs; + nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); + + /* dma/kva mem claim */ + hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod); + + bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) { + + if (!bfa_mem_dma_virt(seg_ptr)) + break; + + align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) - + bfa_mem_dma_phys(seg_ptr); + + sgpg = (struct bfi_sgpg_s *) + (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len); + sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len; + WARN_ON(sgpg_pa.pa & (sgpg_sz - 1)); + + per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz; + + for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) { + memset(hsgpg, 0, sizeof(*hsgpg)); + memset(sgpg, 0, sizeof(*sgpg)); + + hsgpg->sgpg = sgpg; + sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); + hsgpg->sgpg_pa = sgpg_pa_tmp.addr; + list_add_tail(&hsgpg->qe, &mod->sgpg_q); + + sgpg++; + hsgpg++; + sgpg_pa.pa += sgpg_sz; + } + } + + bfa_mem_kva_curp(mod) = (u8 *) hsgpg; +} + +bfa_status_t +bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + struct bfa_sgpg_s *hsgpg; + int i; + + if (mod->free_sgpgs < nsgpgs) + return BFA_STATUS_ENOMEM; + + for (i = 0; i < nsgpgs; i++) { + bfa_q_deq(&mod->sgpg_q, &hsgpg); + WARN_ON(!hsgpg); + list_add_tail(&hsgpg->qe, sgpg_q); + } + + mod->free_sgpgs -= nsgpgs; + return BFA_STATUS_OK; +} + +void +bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + struct bfa_sgpg_wqe_s *wqe; + + mod->free_sgpgs += nsgpg; + WARN_ON(mod->free_sgpgs > mod->num_sgpgs); + + list_splice_tail_init(sgpg_q, &mod->sgpg_q); + + if (list_empty(&mod->sgpg_wait_q)) + return; + + /* + * satisfy as many waiting requests as possible + */ + do { + wqe = bfa_q_first(&mod->sgpg_wait_q); + if (mod->free_sgpgs < wqe->nsgpg) + nsgpg = mod->free_sgpgs; + else + nsgpg = wqe->nsgpg; + bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg); + wqe->nsgpg -= nsgpg; + if (wqe->nsgpg == 0) { + list_del(&wqe->qe); + wqe->cbfn(wqe->cbarg); + } + } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q)); +} + +void +bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + + WARN_ON(nsgpg <= 0); + WARN_ON(nsgpg <= mod->free_sgpgs); + + wqe->nsgpg_total = wqe->nsgpg = nsgpg; + + /* + * allocate any left to this one first + */ + if (mod->free_sgpgs) { + /* + * no one else is waiting for SGPG + */ + WARN_ON(!list_empty(&mod->sgpg_wait_q)); + list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); + wqe->nsgpg -= mod->free_sgpgs; + mod->free_sgpgs = 0; + } + + list_add_tail(&wqe->qe, &mod->sgpg_wait_q); +} + +void +bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + + WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); + list_del(&wqe->qe); + + if (wqe->nsgpg_total != wqe->nsgpg) + bfa_sgpg_mfree(bfa, &wqe->sgpg_q, + wqe->nsgpg_total - wqe->nsgpg); +} + +void +bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), + void *cbarg) +{ + INIT_LIST_HEAD(&wqe->sgpg_q); + wqe->cbfn = cbfn; + wqe->cbarg = cbarg; +} + +/* + * UF related functions + */ +/* + ***************************************************************************** + * Internal functions + ***************************************************************************** + */ +static void +__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_uf_s *uf = cbarg; + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa); + + if (complete) + ufm->ufrecv(ufm->cbarg, uf); +} + +static void +claim_uf_post_msgs(struct bfa_uf_mod_s *ufm) +{ + struct bfi_uf_buf_post_s *uf_bp_msg; + u16 i; + u16 buf_len; + + ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm); + uf_bp_msg = ufm->uf_buf_posts; + + for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; + i++, uf_bp_msg++) { + memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); + + uf_bp_msg->buf_tag = i; + buf_len = sizeof(struct bfa_uf_buf_s); + uf_bp_msg->buf_len = cpu_to_be16(buf_len); + bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, + bfa_fn_lpu(ufm->bfa)); + bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i)); + } + + /* + * advance pointer beyond consumed memory + */ + bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg; +} + +static void +claim_ufs(struct bfa_uf_mod_s *ufm) +{ + u16 i; + struct bfa_uf_s *uf; + + /* + * Claim block of memory for UF list + */ + ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm); + + /* + * Initialize UFs and queue it in UF free queue + */ + for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { + memset(uf, 0, sizeof(struct bfa_uf_s)); + uf->bfa = ufm->bfa; + uf->uf_tag = i; + uf->pb_len = BFA_PER_UF_DMA_SZ; + uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ); + uf->buf_pa = ufm_pbs_pa(ufm, i); + list_add_tail(&uf->qe, &ufm->uf_free_q); + } + + /* + * advance memory pointer + */ + bfa_mem_kva_curp(ufm) = (u8 *) uf; +} + +static void +uf_mem_claim(struct bfa_uf_mod_s *ufm) +{ + claim_ufs(ufm); + claim_uf_post_msgs(ufm); +} + +void +bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa); + u32 num_ufs = cfg->fwcfg.num_uf_bufs; + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_uf = 0; + + nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ); + per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ); + + bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) { + if (num_ufs >= per_seg_uf) { + num_ufs -= per_seg_uf; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_uf * BFA_PER_UF_DMA_SZ); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_ufs * BFA_PER_UF_DMA_SZ); + } + + /* kva memory */ + bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs * + (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s))); +} + +void +bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + + ufm->bfa = bfa; + ufm->num_ufs = cfg->fwcfg.num_uf_bufs; + INIT_LIST_HEAD(&ufm->uf_free_q); + INIT_LIST_HEAD(&ufm->uf_posted_q); + INIT_LIST_HEAD(&ufm->uf_unused_q); + + uf_mem_claim(ufm); +} + +static struct bfa_uf_s * +bfa_uf_get(struct bfa_uf_mod_s *uf_mod) +{ + struct bfa_uf_s *uf; + + bfa_q_deq(&uf_mod->uf_free_q, &uf); + return uf; +} + +static void +bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf) +{ + list_add_tail(&uf->qe, &uf_mod->uf_free_q); +} + +static bfa_status_t +bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) +{ + struct bfi_uf_buf_post_s *uf_post_msg; + + uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP); + if (!uf_post_msg) + return BFA_STATUS_FAILED; + + memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], + sizeof(struct bfi_uf_buf_post_s)); + bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh); + + bfa_trc(ufm->bfa, uf->uf_tag); + + list_add_tail(&uf->qe, &ufm->uf_posted_q); + return BFA_STATUS_OK; +} + +static void +bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod) +{ + struct bfa_uf_s *uf; + + while ((uf = bfa_uf_get(uf_mod)) != NULL) { + if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK) + break; + } +} + +static void +uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + u16 uf_tag = m->buf_tag; + struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; + struct bfa_uf_buf_s *uf_buf; + uint8_t *buf; + + uf_buf = (struct bfa_uf_buf_s *) + bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len); + buf = &uf_buf->d[0]; + + m->frm_len = be16_to_cpu(m->frm_len); + m->xfr_len = be16_to_cpu(m->xfr_len); + + list_del(&uf->qe); /* dequeue from posted queue */ + + uf->data_ptr = buf; + uf->data_len = m->xfr_len; + + WARN_ON(uf->data_len < sizeof(struct fchs_s)); + + if (uf->data_len == sizeof(struct fchs_s)) { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, + uf->data_len, (struct fchs_s *)buf); + } else { + u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s))); + bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF, + BFA_PL_EID_RX, uf->data_len, + (struct fchs_s *)buf, pld_w0); + } + + if (bfa->fcs) + __bfa_cb_uf_recv(uf, BFA_TRUE); + else + bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); +} + +void +bfa_uf_iocdisable(struct bfa_s *bfa) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + struct bfa_uf_s *uf; + struct list_head *qe, *qen; + + /* Enqueue unused uf resources to free_q */ + list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); + + list_for_each_safe(qe, qen, &ufm->uf_posted_q) { + uf = (struct bfa_uf_s *) qe; + list_del(&uf->qe); + bfa_uf_put(ufm, uf); + } +} + +void +bfa_uf_start(struct bfa_s *bfa) +{ + bfa_uf_post_all(BFA_UF_MOD(bfa)); +} + +/* + * Register handler for all unsolicted receive frames. + * + * @param[in] bfa BFA instance + * @param[in] ufrecv receive handler function + * @param[in] cbarg receive handler arg + */ +void +bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + + ufm->ufrecv = ufrecv; + ufm->cbarg = cbarg; +} + +/* + * Free an unsolicited frame back to BFA. + * + * @param[in] uf unsolicited frame to be freed + * + * @return None + */ +void +bfa_uf_free(struct bfa_uf_s *uf) +{ + bfa_uf_put(BFA_UF_MOD(uf->bfa), uf); + bfa_uf_post_all(BFA_UF_MOD(uf->bfa)); +} + + + +/* + * uf_pub BFA uf module public functions + */ +void +bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + bfa_trc(bfa, msg->mhdr.msg_id); + + switch (msg->mhdr.msg_id) { + case BFI_UF_I2H_FRM_RCVD: + uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg); + break; + + default: + bfa_trc(bfa, msg->mhdr.msg_id); + WARN_ON(1); + } +} + +void +bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw) +{ + struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) { + bfa_q_deq_tail(&mod->uf_free_q, &qe); + list_add_tail(qe, &mod->uf_unused_q); + } +} + +/* + * Dport forward declaration + */ + +enum bfa_dport_test_state_e { + BFA_DPORT_ST_DISABLED = 0, /*!< dport is disabled */ + BFA_DPORT_ST_INP = 1, /*!< test in progress */ + BFA_DPORT_ST_COMP = 2, /*!< test complete successfully */ + BFA_DPORT_ST_NO_SFP = 3, /*!< sfp is not present */ + BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */ +}; + +/* + * BFA DPORT state machine events + */ +enum bfa_dport_sm_event { + BFA_DPORT_SM_ENABLE = 1, /* dport enable event */ + BFA_DPORT_SM_DISABLE = 2, /* dport disable event */ + BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */ + BFA_DPORT_SM_QRESUME = 4, /* CQ space available */ + BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */ + BFA_DPORT_SM_START = 6, /* re-start dport test */ + BFA_DPORT_SM_REQFAIL = 7, /* request failure */ + BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */ +}; + +static void bfa_dport_sm_disabled(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_enabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_enabled(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_disabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_starting(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_qresume(void *cbarg); +static void bfa_dport_req_comp(struct bfa_dport_s *dport, + struct bfi_diag_dport_rsp_s *msg); +static void bfa_dport_scn(struct bfa_dport_s *dport, + struct bfi_diag_dport_scn_s *msg); + +/* + * BFA fcdiag module + */ +#define BFA_DIAG_QTEST_TOV 1000 /* msec */ + +/* + * Set port status to busy + */ +static void +bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa); + + if (fcdiag->lb.lock) + fcport->diag_busy = BFA_TRUE; + else + fcport->diag_busy = BFA_FALSE; +} + +void +bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + fcdiag->bfa = bfa; + fcdiag->trcmod = bfa->trcmod; + /* The common DIAG attach bfa_diag_attach() will do all memory claim */ + dport->bfa = bfa; + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport); + dport->cbfn = NULL; + dport->cbarg = NULL; + dport->test_state = BFA_DPORT_ST_DISABLED; + memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); +} + +void +bfa_fcdiag_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + bfa_trc(fcdiag, fcdiag->lb.lock); + if (fcdiag->lb.lock) { + fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; + fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); + fcdiag->lb.lock = 0; + bfa_fcdiag_set_busy_status(fcdiag); + } + + bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL); +} + +static void +bfa_fcdiag_queuetest_timeout(void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = cbarg; + struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; + + bfa_trc(fcdiag, fcdiag->qtest.all); + bfa_trc(fcdiag, fcdiag->qtest.count); + + fcdiag->qtest.timer_active = 0; + + res->status = BFA_STATUS_ETIMER; + res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; + if (fcdiag->qtest.all) + res->queue = fcdiag->qtest.all; + + bfa_trc(fcdiag, BFA_STATUS_ETIMER); + fcdiag->qtest.status = BFA_STATUS_ETIMER; + fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); + fcdiag->qtest.lock = 0; +} + +static bfa_status_t +bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag) +{ + u32 i; + struct bfi_diag_qtest_req_s *req; + + req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue); + if (!req) + return BFA_STATUS_DEVBUSY; + + /* build host command */ + bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST, + bfa_fn_lpu(fcdiag->bfa)); + + for (i = 0; i < BFI_LMSG_PL_WSZ; i++) + req->data[i] = QTEST_PAT_DEFAULT; + + bfa_trc(fcdiag, fcdiag->qtest.queue); + /* ring door bell */ + bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh); + return BFA_STATUS_OK; +} + +static void +bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag, + bfi_diag_qtest_rsp_t *rsp) +{ + struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; + bfa_status_t status = BFA_STATUS_OK; + int i; + + /* Check timer, should still be active */ + if (!fcdiag->qtest.timer_active) { + bfa_trc(fcdiag, fcdiag->qtest.timer_active); + return; + } + + /* update count */ + fcdiag->qtest.count--; + + /* Check result */ + for (i = 0; i < BFI_LMSG_PL_WSZ; i++) { + if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) { + res->status = BFA_STATUS_DATACORRUPTED; + break; + } + } + + if (res->status == BFA_STATUS_OK) { + if (fcdiag->qtest.count > 0) { + status = bfa_fcdiag_queuetest_send(fcdiag); + if (status == BFA_STATUS_OK) + return; + else + res->status = status; + } else if (fcdiag->qtest.all > 0 && + fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) { + fcdiag->qtest.count = QTEST_CNT_DEFAULT; + fcdiag->qtest.queue++; + status = bfa_fcdiag_queuetest_send(fcdiag); + if (status == BFA_STATUS_OK) + return; + else + res->status = status; + } + } + + /* Stop timer when we comp all queue */ + if (fcdiag->qtest.timer_active) { + bfa_timer_stop(&fcdiag->qtest.timer); + fcdiag->qtest.timer_active = 0; + } + res->queue = fcdiag->qtest.queue; + res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; + bfa_trc(fcdiag, res->count); + bfa_trc(fcdiag, res->status); + fcdiag->qtest.status = res->status; + fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); + fcdiag->qtest.lock = 0; +} + +static void +bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag, + struct bfi_diag_lb_rsp_s *rsp) +{ + struct bfa_diag_loopback_result_s *res = fcdiag->lb.result; + + res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm); + res->numosffrm = be32_to_cpu(rsp->res.numosffrm); + res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm); + res->badfrminf = be32_to_cpu(rsp->res.badfrminf); + res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum); + res->status = rsp->res.status; + fcdiag->lb.status = rsp->res.status; + bfa_trc(fcdiag, fcdiag->lb.status); + fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); + fcdiag->lb.lock = 0; + bfa_fcdiag_set_busy_status(fcdiag); +} + +static bfa_status_t +bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag, + struct bfa_diag_loopback_s *loopback) +{ + struct bfi_diag_lb_req_s *lb_req; + + lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG); + if (!lb_req) + return BFA_STATUS_DEVBUSY; + + /* build host command */ + bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK, + bfa_fn_lpu(fcdiag->bfa)); + + lb_req->lb_mode = loopback->lb_mode; + lb_req->speed = loopback->speed; + lb_req->loopcnt = loopback->loopcnt; + lb_req->pattern = loopback->pattern; + + /* ring door bell */ + bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh); + + bfa_trc(fcdiag, loopback->lb_mode); + bfa_trc(fcdiag, loopback->speed); + bfa_trc(fcdiag, loopback->loopcnt); + bfa_trc(fcdiag, loopback->pattern); + return BFA_STATUS_OK; +} + +/* + * cpe/rme intr handler + */ +void +bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + + switch (msg->mhdr.msg_id) { + case BFI_DIAG_I2H_LOOPBACK: + bfa_fcdiag_loopback_comp(fcdiag, + (struct bfi_diag_lb_rsp_s *) msg); + break; + case BFI_DIAG_I2H_QTEST: + bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); + break; + case BFI_DIAG_I2H_DPORT: + bfa_dport_req_comp(&fcdiag->dport, + (struct bfi_diag_dport_rsp_s *)msg); + break; + case BFI_DIAG_I2H_DPORT_SCN: + bfa_dport_scn(&fcdiag->dport, + (struct bfi_diag_dport_scn_s *)msg); + break; + default: + bfa_trc(fcdiag, msg->mhdr.msg_id); + WARN_ON(1); + } +} + +/* + * Loopback test + * + * @param[in] *bfa - bfa data struct + * @param[in] opmode - port operation mode + * @param[in] speed - port speed + * @param[in] lpcnt - loop count + * @param[in] pat - pattern to build packet + * @param[in] *result - pt to bfa_diag_loopback_result_t data struct + * @param[in] cbfn - callback function + * @param[in] cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode, + enum bfa_port_speed speed, u32 lpcnt, u32 pat, + struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn, + void *cbarg) +{ + struct bfa_diag_loopback_s loopback; + struct bfa_port_attr_s attr; + bfa_status_t status; + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* if port is PBC disabled, return error */ + if (bfa_fcport_is_pbcdisabled(bfa)) { + bfa_trc(fcdiag, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) { + bfa_trc(fcdiag, opmode); + return BFA_STATUS_PORT_NOT_DISABLED; + } + + /* + * Check if input speed is supported by the port mode + */ + if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { + if (!(speed == BFA_PORT_SPEED_1GBPS || + speed == BFA_PORT_SPEED_2GBPS || + speed == BFA_PORT_SPEED_4GBPS || + speed == BFA_PORT_SPEED_8GBPS || + speed == BFA_PORT_SPEED_16GBPS || + speed == BFA_PORT_SPEED_AUTO)) { + bfa_trc(fcdiag, speed); + return BFA_STATUS_UNSUPP_SPEED; + } + bfa_fcport_get_attr(bfa, &attr); + bfa_trc(fcdiag, attr.speed_supported); + if (speed > attr.speed_supported) + return BFA_STATUS_UNSUPP_SPEED; + } else { + if (speed != BFA_PORT_SPEED_10GBPS) { + bfa_trc(fcdiag, speed); + return BFA_STATUS_UNSUPP_SPEED; + } + } + + /* + * For CT2, 1G is not supported + */ + if ((speed == BFA_PORT_SPEED_1GBPS) && + (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) { + bfa_trc(fcdiag, speed); + return BFA_STATUS_UNSUPP_SPEED; + } + + /* For Mezz card, port speed entered needs to be checked */ + if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { + if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { + if (!(speed == BFA_PORT_SPEED_1GBPS || + speed == BFA_PORT_SPEED_2GBPS || + speed == BFA_PORT_SPEED_4GBPS || + speed == BFA_PORT_SPEED_8GBPS || + speed == BFA_PORT_SPEED_16GBPS || + speed == BFA_PORT_SPEED_AUTO)) + return BFA_STATUS_UNSUPP_SPEED; + } else { + if (speed != BFA_PORT_SPEED_10GBPS) + return BFA_STATUS_UNSUPP_SPEED; + } + } + /* check to see if fcport is dport */ + if (bfa_fcport_is_dport(bfa)) { + bfa_trc(fcdiag, fcdiag->lb.lock); + return BFA_STATUS_DPORT_ENABLED; + } + /* check to see if there is another destructive diag cmd running */ + if (fcdiag->lb.lock) { + bfa_trc(fcdiag, fcdiag->lb.lock); + return BFA_STATUS_DEVBUSY; + } + + fcdiag->lb.lock = 1; + loopback.lb_mode = opmode; + loopback.speed = speed; + loopback.loopcnt = lpcnt; + loopback.pattern = pat; + fcdiag->lb.result = result; + fcdiag->lb.cbfn = cbfn; + fcdiag->lb.cbarg = cbarg; + memset(result, 0, sizeof(struct bfa_diag_loopback_result_s)); + bfa_fcdiag_set_busy_status(fcdiag); + + /* Send msg to fw */ + status = bfa_fcdiag_loopback_send(fcdiag, &loopback); + return status; +} + +/* + * DIAG queue test command + * + * @param[in] *bfa - bfa data struct + * @param[in] force - 1: don't do ioc op checking + * @param[in] queue - queue no. to test + * @param[in] *result - pt to bfa_diag_qtest_result_t data struct + * @param[in] cbfn - callback function + * @param[in] *cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue, + struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn, + void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + bfa_status_t status; + bfa_trc(fcdiag, force); + bfa_trc(fcdiag, queue); + + if (!force && !bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* check to see if there is another destructive diag cmd running */ + if (fcdiag->qtest.lock) { + bfa_trc(fcdiag, fcdiag->qtest.lock); + return BFA_STATUS_DEVBUSY; + } + + /* Initialization */ + fcdiag->qtest.lock = 1; + fcdiag->qtest.cbfn = cbfn; + fcdiag->qtest.cbarg = cbarg; + fcdiag->qtest.result = result; + fcdiag->qtest.count = QTEST_CNT_DEFAULT; + + /* Init test results */ + fcdiag->qtest.result->status = BFA_STATUS_OK; + fcdiag->qtest.result->count = 0; + + /* send */ + if (queue < BFI_IOC_MAX_CQS) { + fcdiag->qtest.result->queue = (u8)queue; + fcdiag->qtest.queue = (u8)queue; + fcdiag->qtest.all = 0; + } else { + fcdiag->qtest.result->queue = 0; + fcdiag->qtest.queue = 0; + fcdiag->qtest.all = 1; + } + status = bfa_fcdiag_queuetest_send(fcdiag); + + /* Start a timer */ + if (status == BFA_STATUS_OK) { + bfa_timer_start(bfa, &fcdiag->qtest.timer, + bfa_fcdiag_queuetest_timeout, fcdiag, + BFA_DIAG_QTEST_TOV); + fcdiag->qtest.timer_active = 1; + } + return status; +} + +/* + * DIAG PLB is running + * + * @param[in] *bfa - bfa data struct + * + * @param[out] + */ +bfa_status_t +bfa_fcdiag_lb_is_running(struct bfa_s *bfa) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; +} + +/* + * D-port + */ +#define bfa_dport_result_start(__dport, __mode) do { \ + (__dport)->result.start_time = ktime_get_real_seconds(); \ + (__dport)->result.status = DPORT_TEST_ST_INPRG; \ + (__dport)->result.mode = (__mode); \ + (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \ + (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \ + (__dport)->result.lpcnt = (__dport)->lpcnt; \ +} while (0) + +static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, + enum bfi_dport_req req); +static void +bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status) +{ + if (dport->cbfn != NULL) { + dport->cbfn(dport->cbarg, bfa_status); + dport->cbfn = NULL; + dport->cbarg = NULL; + } +} + +static void +bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_ENABLE: + bfa_fcport_dportenable(dport->bfa); + if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE)) + bfa_sm_set_state(dport, bfa_dport_sm_enabling); + else + bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait); + break; + + case BFA_DPORT_SM_DISABLE: + /* Already disabled */ + break; + + case BFA_DPORT_SM_HWFAIL: + /* ignore */ + break; + + case BFA_DPORT_SM_SCN: + if (dport->i2hmsg.scn.state == BFI_DPORT_SCN_DDPORT_ENABLE) { + bfa_fcport_ddportenable(dport->bfa); + dport->dynamic = BFA_TRUE; + dport->test_state = BFA_DPORT_ST_NOTSTART; + bfa_sm_set_state(dport, bfa_dport_sm_enabled); + } else { + bfa_trc(dport->bfa, dport->i2hmsg.scn.state); + WARN_ON(1); + } + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_enabling); + bfa_dport_send_req(dport, BFI_DPORT_ENABLE); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_FWRSP: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) { + dport->test_state = BFA_DPORT_ST_NO_SFP; + } else { + dport->test_state = BFA_DPORT_ST_INP; + bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO); + } + bfa_sm_set_state(dport, bfa_dport_sm_enabled); + break; + + case BFA_DPORT_SM_REQFAIL: + dport->test_state = BFA_DPORT_ST_DISABLED; + bfa_fcport_dportdisable(dport->bfa); + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_START: + if (bfa_dport_send_req(dport, BFI_DPORT_START)) + bfa_sm_set_state(dport, bfa_dport_sm_starting); + else + bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait); + break; + + case BFA_DPORT_SM_DISABLE: + bfa_fcport_dportdisable(dport->bfa); + if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE)) + bfa_sm_set_state(dport, bfa_dport_sm_disabling); + else + bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + break; + + case BFA_DPORT_SM_SCN: + switch (dport->i2hmsg.scn.state) { + case BFI_DPORT_SCN_TESTCOMP: + dport->test_state = BFA_DPORT_ST_COMP; + break; + + case BFI_DPORT_SCN_TESTSTART: + dport->test_state = BFA_DPORT_ST_INP; + break; + + case BFI_DPORT_SCN_TESTSKIP: + case BFI_DPORT_SCN_SUBTESTSTART: + /* no state change */ + break; + + case BFI_DPORT_SCN_SFP_REMOVED: + dport->test_state = BFA_DPORT_ST_NO_SFP; + break; + + case BFI_DPORT_SCN_DDPORT_DISABLE: + bfa_fcport_ddportdisable(dport->bfa); + + if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE)) + bfa_sm_set_state(dport, + bfa_dport_sm_dynamic_disabling); + else + bfa_sm_set_state(dport, + bfa_dport_sm_dynamic_disabling_qwait); + break; + + case BFI_DPORT_SCN_FCPORT_DISABLE: + bfa_fcport_ddportdisable(dport->bfa); + + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + dport->dynamic = BFA_FALSE; + break; + + default: + bfa_trc(dport->bfa, dport->i2hmsg.scn.state); + bfa_sm_fault(dport->bfa, event); + } + break; + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_disabling); + bfa_dport_send_req(dport, BFI_DPORT_DISABLE); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + case BFA_DPORT_SM_SCN: + /* ignore */ + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_FWRSP: + dport->test_state = BFA_DPORT_ST_DISABLED; + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + case BFA_DPORT_SM_SCN: + /* no state change */ + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_starting); + bfa_dport_send_req(dport, BFI_DPORT_START); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_FWRSP: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) { + dport->test_state = BFA_DPORT_ST_NO_SFP; + } else { + dport->test_state = BFA_DPORT_ST_INP; + bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU); + } + fallthrough; + + case BFA_DPORT_SM_REQFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_enabled); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_SCN: + switch (dport->i2hmsg.scn.state) { + case BFI_DPORT_SCN_DDPORT_DISABLED: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + dport->dynamic = BFA_FALSE; + bfa_fcport_enable(dport->bfa); + break; + + default: + bfa_trc(dport->bfa, dport->i2hmsg.scn.state); + bfa_sm_fault(dport->bfa, event); + + } + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling); + bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + case BFA_DPORT_SM_SCN: + /* ignore */ + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static bfa_boolean_t +bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) +{ + struct bfi_diag_dport_req_s *m; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG); + if (!m) { + bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT, + bfa_fn_lpu(dport->bfa)); + m->req = req; + if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) { + m->lpcnt = cpu_to_be32(dport->lpcnt); + m->payload = cpu_to_be32(dport->payload); + } + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh); + + return BFA_TRUE; +} + +static void +bfa_dport_qresume(void *cbarg) +{ + struct bfa_dport_s *dport = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME); +} + +static void +bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg) +{ + msg->status = cpu_to_be32(msg->status); + dport->i2hmsg.rsp.status = msg->status; + dport->rp_pwwn = msg->pwwn; + dport->rp_nwwn = msg->nwwn; + + if ((msg->status == BFA_STATUS_OK) || + (msg->status == BFA_STATUS_DPORT_NO_SFP)) { + bfa_trc(dport->bfa, msg->status); + bfa_trc(dport->bfa, dport->rp_pwwn); + bfa_trc(dport->bfa, dport->rp_nwwn); + bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP); + + } else { + bfa_trc(dport->bfa, msg->status); + bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL); + } + bfa_cb_fcdiag_dport(dport, msg->status); +} + +static bfa_boolean_t +bfa_dport_is_sending_req(struct bfa_dport_s *dport) +{ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) || + bfa_sm_cmp_state(dport, bfa_dport_sm_starting) || + bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) { + return BFA_TRUE; + } else { + return BFA_FALSE; + } +} + +static void +bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg) +{ + int i; + uint8_t subtesttype; + + bfa_trc(dport->bfa, msg->state); + dport->i2hmsg.scn.state = msg->state; + + switch (dport->i2hmsg.scn.state) { + case BFI_DPORT_SCN_TESTCOMP: + dport->result.end_time = ktime_get_real_seconds(); + bfa_trc(dport->bfa, dport->result.end_time); + + dport->result.status = msg->info.testcomp.status; + bfa_trc(dport->bfa, dport->result.status); + + dport->result.roundtrip_latency = + cpu_to_be32(msg->info.testcomp.latency); + dport->result.est_cable_distance = + cpu_to_be32(msg->info.testcomp.distance); + dport->result.buffer_required = + be16_to_cpu(msg->info.testcomp.numbuffer); + + dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz); + dport->result.speed = msg->info.testcomp.speed; + + bfa_trc(dport->bfa, dport->result.roundtrip_latency); + bfa_trc(dport->bfa, dport->result.est_cable_distance); + bfa_trc(dport->bfa, dport->result.buffer_required); + bfa_trc(dport->bfa, dport->result.frmsz); + bfa_trc(dport->bfa, dport->result.speed); + + for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) { + dport->result.subtest[i].status = + msg->info.testcomp.subtest_status[i]; + bfa_trc(dport->bfa, dport->result.subtest[i].status); + } + break; + + case BFI_DPORT_SCN_TESTSKIP: + case BFI_DPORT_SCN_DDPORT_ENABLE: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + break; + + case BFI_DPORT_SCN_TESTSTART: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + dport->rp_pwwn = msg->info.teststart.pwwn; + dport->rp_nwwn = msg->info.teststart.nwwn; + dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm); + bfa_dport_result_start(dport, msg->info.teststart.mode); + break; + + case BFI_DPORT_SCN_SUBTESTSTART: + subtesttype = msg->info.teststart.type; + dport->result.subtest[subtesttype].start_time = + ktime_get_real_seconds(); + dport->result.subtest[subtesttype].status = + DPORT_TEST_ST_INPRG; + + bfa_trc(dport->bfa, subtesttype); + bfa_trc(dport->bfa, + dport->result.subtest[subtesttype].start_time); + break; + + case BFI_DPORT_SCN_SFP_REMOVED: + case BFI_DPORT_SCN_DDPORT_DISABLED: + case BFI_DPORT_SCN_DDPORT_DISABLE: + case BFI_DPORT_SCN_FCPORT_DISABLE: + dport->result.status = DPORT_TEST_ST_IDLE; + break; + + default: + bfa_sm_fault(dport->bfa, msg->state); + } + + bfa_sm_send_event(dport, BFA_DPORT_SM_SCN); +} + +/* + * Dport enable + * + * @param[in] *bfa - bfa data struct + */ +bfa_status_t +bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + /* + * Dport is not support in MEZZ card + */ + if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) { + bfa_trc(dport->bfa, BFA_STATUS_PBC); + return BFA_STATUS_CMD_NOTSUPP_MEZZ; + } + + /* + * Dport is supported in CT2 or above + */ + if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) { + bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id); + return BFA_STATUS_FEATURE_NOT_SUPPORTED; + } + + /* + * Check to see if IOC is down + */ + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* if port is PBC disabled, return error */ + if (bfa_fcport_is_pbcdisabled(bfa)) { + bfa_trc(dport->bfa, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + /* + * Check if port mode is FC port + */ + if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) { + bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc)); + return BFA_STATUS_CMD_NOTSUPP_CNA; + } + + /* + * Check if port is in LOOP mode + */ + if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) || + (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_TOPOLOGY_LOOP; + } + + /* + * Check if port is TRUNK mode + */ + if (bfa_fcport_is_trunk_enabled(bfa)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_ERROR_TRUNK_ENABLED; + } + + /* + * Check if diag loopback is running + */ + if (bfa_fcdiag_lb_is_running(bfa)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DIAG_BUSY; + } + + /* + * Check to see if port is disable or in dport state + */ + if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && + (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_PORT_NOT_DISABLED; + } + + /* + * Check if dport is in dynamic mode + */ + if (dport->dynamic) + return BFA_STATUS_DDPORT_ERR; + + /* + * Check if dport is busy + */ + if (bfa_dport_is_sending_req(dport)) + return BFA_STATUS_DEVBUSY; + + /* + * Check if dport is already enabled + */ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_ENABLED; + } + + bfa_trc(dport->bfa, lpcnt); + bfa_trc(dport->bfa, pat); + dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT; + dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT; + dport->cbfn = cbfn; + dport->cbarg = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE); + return BFA_STATUS_OK; +} + +/* + * Dport disable + * + * @param[in] *bfa - bfa data struct + */ +bfa_status_t +bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + if (bfa_ioc_is_disabled(&bfa->ioc)) + return BFA_STATUS_IOC_DISABLED; + + /* if port is PBC disabled, return error */ + if (bfa_fcport_is_pbcdisabled(bfa)) { + bfa_trc(dport->bfa, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + /* + * Check if dport is in dynamic mode + */ + if (dport->dynamic) { + return BFA_STATUS_DDPORT_ERR; + } + + /* + * Check to see if port is disable or in dport state + */ + if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && + (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_PORT_NOT_DISABLED; + } + + /* + * Check if dport is busy + */ + if (bfa_dport_is_sending_req(dport)) + return BFA_STATUS_DEVBUSY; + + /* + * Check if dport is already disabled + */ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_DISABLED; + } + + dport->cbfn = cbfn; + dport->cbarg = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE); + return BFA_STATUS_OK; +} + +/* + * Dport start -- restart dport test + * + * @param[in] *bfa - bfa data struct + */ +bfa_status_t +bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + /* + * Check to see if IOC is down + */ + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* + * Check if dport is in dynamic mode + */ + if (dport->dynamic) + return BFA_STATUS_DDPORT_ERR; + + /* + * Check if dport is busy + */ + if (bfa_dport_is_sending_req(dport)) + return BFA_STATUS_DEVBUSY; + + /* + * Check if dport is in enabled state. + * Test can only be restart when previous test has completed + */ + if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_DISABLED; + + } else { + if (dport->test_state == BFA_DPORT_ST_NO_SFP) + return BFA_STATUS_DPORT_INV_SFP; + + if (dport->test_state == BFA_DPORT_ST_INP) + return BFA_STATUS_DEVBUSY; + + WARN_ON(dport->test_state != BFA_DPORT_ST_COMP); + } + + bfa_trc(dport->bfa, lpcnt); + bfa_trc(dport->bfa, pat); + + dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT; + dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT; + + dport->cbfn = cbfn; + dport->cbarg = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_START); + return BFA_STATUS_OK; +} + +/* + * Dport show -- return dport test result + * + * @param[in] *bfa - bfa data struct + */ +bfa_status_t +bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + /* + * Check to see if IOC is down + */ + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* + * Check if dport is busy + */ + if (bfa_dport_is_sending_req(dport)) + return BFA_STATUS_DEVBUSY; + + /* + * Check if dport is in enabled state. + */ + if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_DISABLED; + + } + + /* + * Check if there is SFP + */ + if (dport->test_state == BFA_DPORT_ST_NO_SFP) + return BFA_STATUS_DPORT_INV_SFP; + + memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s)); + + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h new file mode 100644 index 000000000..9c8310957 --- /dev/null +++ b/drivers/scsi/bfa/bfa_svc.h @@ -0,0 +1,756 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFA_SVC_H__ +#define __BFA_SVC_H__ + +#include "bfa_cs.h" +#include "bfi_ms.h" + + +/* + * Scatter-gather DMA related defines + */ +#define BFA_SGPG_MIN (16) +#define BFA_SGPG_MAX (8192) + +/* + * Alignment macro for SG page allocation + */ +#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \ + & ~(sizeof(struct bfi_sgpg_s) - 1)) + +struct bfa_sgpg_wqe_s { + struct list_head qe; /* queue sg page element */ + int nsgpg; /* pages to be allocated */ + int nsgpg_total; /* total pages required */ + void (*cbfn) (void *cbarg); /* callback function */ + void *cbarg; /* callback arg */ + struct list_head sgpg_q; /* queue of alloced sgpgs */ +}; + +struct bfa_sgpg_s { + struct list_head qe; /* queue sg page element */ + struct bfi_sgpg_s *sgpg; /* va of SG page */ + union bfi_addr_u sgpg_pa; /* pa of SG page */ +}; + +/* + * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of + * SG pages required. + */ +#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1) + +/* Max SGPG dma segs required */ +#define BFA_SGPG_DMA_SEGS \ + BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s)) + +struct bfa_sgpg_mod_s { + struct bfa_s *bfa; + int num_sgpgs; /* number of SG pages */ + int free_sgpgs; /* number of free SG pages */ + struct list_head sgpg_q; /* queue of free SG pages */ + struct list_head sgpg_wait_q; /* wait queue for SG pages */ + struct bfa_mem_dma_s dma_seg[BFA_SGPG_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; +}; +#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod) +#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg)) + +bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, + int nsgpgs); +void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs); +void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, + void (*cbfn) (void *cbarg), void *cbarg); +void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs); +void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); + + +/* + * FCXP related defines + */ +#define BFA_FCXP_MIN (1) +#define BFA_FCXP_MAX (256) +#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256) +#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256) + +/* Max FCXP dma segs required */ +#define BFA_FCXP_DMA_SEGS \ + BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX, \ + (u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ) + +struct bfa_fcxp_mod_s { + struct bfa_s *bfa; /* backpointer to BFA */ + struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */ + u16 num_fcxps; /* max num FCXP requests */ + struct list_head fcxp_req_free_q; /* free FCXPs used for sending req */ + struct list_head fcxp_rsp_free_q; /* free FCXPs used for sending req */ + struct list_head fcxp_active_q; /* active FCXPs */ + struct list_head req_wait_q; /* wait queue for free req_fcxp */ + struct list_head rsp_wait_q; /* wait queue for free rsp_fcxp */ + struct list_head fcxp_req_unused_q; /* unused req_fcxps */ + struct list_head fcxp_rsp_unused_q; /* unused rsp_fcxps */ + u32 req_pld_sz; + u32 rsp_pld_sz; + struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; +}; + +#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod) +#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag]) +#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg)) + +typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp, + void *cb_arg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs); + +typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid); +typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid); +typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp, + void *cbarg, enum bfa_status req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs); +typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp); + + + +/* + * Information needed for a FCXP request + */ +struct bfa_fcxp_req_info_s { + struct bfa_rport_s *bfa_rport; + /* Pointer to the bfa rport that was + * returned from bfa_rport_create(). + * This could be left NULL for WKA or + * for FCXP interactions before the + * rport nexus is established + */ + struct fchs_s fchs; /* request FC header structure */ + u8 cts; /* continuous sequence */ + u8 class; /* FC class for the request/response */ + u16 max_frmsz; /* max send frame size */ + u16 vf_id; /* vsan tag if applicable */ + u8 lp_tag; /* lport tag */ + u32 req_tot_len; /* request payload total length */ +}; + +struct bfa_fcxp_rsp_info_s { + struct fchs_s rsp_fchs; + /* Response frame's FC header will + * be sent back in this field */ + u8 rsp_timeout; + /* timeout in seconds, 0-no response */ + u8 rsvd2[3]; + u32 rsp_maxlen; /* max response length expected */ +}; + +struct bfa_fcxp_s { + struct list_head qe; /* fcxp queue element */ + bfa_sm_t sm; /* state machine */ + void *caller; /* driver or fcs */ + struct bfa_fcxp_mod_s *fcxp_mod; + /* back pointer to fcxp mod */ + u16 fcxp_tag; /* internal tag */ + struct bfa_fcxp_req_info_s req_info; + /* request info */ + struct bfa_fcxp_rsp_info_s rsp_info; + /* response info */ + u8 use_ireqbuf; /* use internal req buf */ + u8 use_irspbuf; /* use internal rsp buf */ + u32 nreq_sgles; /* num request SGLEs */ + u32 nrsp_sgles; /* num response SGLEs */ + struct list_head req_sgpg_q; /* SG pages for request buf */ + struct list_head req_sgpg_wqe; /* wait queue for req SG page */ + struct list_head rsp_sgpg_q; /* SG pages for response buf */ + struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */ + + bfa_fcxp_get_sgaddr_t req_sga_cbfn; + /* SG elem addr user function */ + bfa_fcxp_get_sglen_t req_sglen_cbfn; + /* SG elem len user function */ + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn; + /* SG elem addr user function */ + bfa_fcxp_get_sglen_t rsp_sglen_cbfn; + /* SG elem len user function */ + bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */ + void *send_cbarg; /* callback arg */ + struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES]; + /* req SG elems */ + struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; + /* rsp SG elems */ + u8 rsp_status; /* comp: rsp status */ + u32 rsp_len; /* comp: actual response len */ + u32 residue_len; /* comp: residual rsp length */ + struct fchs_s rsp_fchs; /* comp: response fchs */ + struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */ + struct bfa_reqq_wait_s reqq_wqe; + bfa_boolean_t reqq_waiting; + bfa_boolean_t req_rsp; /* Used to track req/rsp fcxp */ +}; + +struct bfa_fcxp_wqe_s { + struct list_head qe; + bfa_fcxp_alloc_cbfn_t alloc_cbfn; + void *alloc_cbarg; + void *caller; + struct bfa_s *bfa; + int nreq_sgles; + int nrsp_sgles; + bfa_fcxp_get_sgaddr_t req_sga_cbfn; + bfa_fcxp_get_sglen_t req_sglen_cbfn; + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn; + bfa_fcxp_get_sglen_t rsp_sglen_cbfn; +}; + +#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp)) +#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs)) +#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp)) + +#define BFA_FCXP_REQ_PLD_PA(_fcxp) \ + bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \ + (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + +/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */ +#define BFA_FCXP_RSP_PLD_PA(_fcxp) \ + (bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \ + (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \ + (_fcxp)->fcxp_mod->req_pld_sz) + +void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + + +/* + * RPORT related defines + */ +enum bfa_rport_event { + BFA_RPORT_SM_CREATE = 1, /* rport create event */ + BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */ + BFA_RPORT_SM_ONLINE = 3, /* rport is online */ + BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */ + BFA_RPORT_SM_FWRSP = 5, /* firmware response */ + BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */ + BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */ + BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */ + BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ +}; + +#define BFA_RPORT_MIN 4 + +struct bfa_rport_mod_s { + struct bfa_rport_s *rps_list; /* list of rports */ + struct list_head rp_free_q; /* free bfa_rports */ + struct list_head rp_active_q; /* free bfa_rports */ + struct list_head rp_unused_q; /* unused bfa rports */ + u16 num_rports; /* number of rports */ + struct bfa_mem_kva_s kva_seg; +}; + +#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) +#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg)) + +/* + * Convert rport tag to RPORT + */ +#define BFA_RPORT_FROM_TAG(__bfa, _tag) \ + (BFA_RPORT_MOD(__bfa)->rps_list + \ + ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1))) + +/* + * protected functions + */ +void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); +void bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw); + +/* + * BFA rport information. + */ +struct bfa_rport_info_s { + u16 max_frmsz; /* max rcv pdu size */ + u32 pid:24, /* remote port ID */ + lp_tag:8; /* tag */ + u32 local_pid:24, /* local port ID */ + cisc:8; /* CIRO supported */ + u8 fc_class; /* supported FC classes. enum fc_cos */ + u8 vf_en; /* virtual fabric enable */ + u16 vf_id; /* virtual fabric ID */ + enum bfa_port_speed speed; /* Rport's current speed */ +}; + +/* + * BFA rport data structure + */ +struct bfa_rport_s { + struct list_head qe; /* queue element */ + bfa_sm_t sm; /* state machine */ + struct bfa_s *bfa; /* backpointer to BFA */ + void *rport_drv; /* fcs/driver rport object */ + u16 fw_handle; /* firmware rport handle */ + u16 rport_tag; /* BFA rport tag */ + u8 lun_mask; /* LUN mask flag */ + struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ + struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */ + struct bfa_rport_qos_attr_s qos_attr; + union a { + bfa_status_t status; /* f/w status */ + void *fw_msg; /* QoS scn event */ + } event_arg; +}; +#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class) + + +/* + * UF - unsolicited receive related defines + */ + +#define BFA_UF_MIN (4) +#define BFA_UF_MAX (256) + +struct bfa_uf_s { + struct list_head qe; /* queue element */ + struct bfa_s *bfa; /* bfa instance */ + u16 uf_tag; /* identifying tag fw msgs */ + u16 vf_id; + u16 src_rport_handle; + u16 rsvd; + u8 *data_ptr; + u16 data_len; /* actual receive length */ + u16 pb_len; /* posted buffer length */ + void *buf_kva; /* buffer virtual address */ + u64 buf_pa; /* buffer physical address */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */ + struct bfa_sge_s sges[BFI_SGE_INLINE_MAX]; +}; + +/* + * Callback prototype for unsolicited frame receive handler. + * + * @param[in] cbarg callback arg for receive handler + * @param[in] uf unsolicited frame descriptor + * + * @return None + */ +typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf); + +#define BFA_UF_BUFSZ (2 * 1024 + 256) + +struct bfa_uf_buf_s { + u8 d[BFA_UF_BUFSZ]; +}; + +#define BFA_PER_UF_DMA_SZ \ + (u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ) + +/* Max UF dma segs required */ +#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ) + +struct bfa_uf_mod_s { + struct bfa_s *bfa; /* back pointer to BFA */ + struct bfa_uf_s *uf_list; /* array of UFs */ + u16 num_ufs; /* num unsolicited rx frames */ + struct list_head uf_free_q; /* free UFs */ + struct list_head uf_posted_q; /* UFs posted to IOC */ + struct list_head uf_unused_q; /* unused UF's */ + struct bfi_uf_buf_post_s *uf_buf_posts; + /* pre-built UF post msgs */ + bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */ + void *cbarg; /* uf receive handler arg */ + struct bfa_mem_dma_s dma_seg[BFA_UF_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; +}; + +#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod) +#define BFA_MEM_UF_KVA(__bfa) (&(BFA_UF_MOD(__bfa)->kva_seg)) + +#define ufm_pbs_pa(_ufmod, _uftag) \ + bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ) + +void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); +void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw); + +/* + * LPS - bfa lport login/logout service interface + */ +struct bfa_lps_s { + struct list_head qe; /* queue element */ + struct bfa_s *bfa; /* parent bfa instance */ + bfa_sm_t sm; /* finite state machine */ + u8 bfa_tag; /* lport tag */ + u8 fw_tag; /* lport fw tag */ + u8 reqq; /* lport request queue */ + u8 alpa; /* ALPA for loop topologies */ + u32 lp_pid; /* lport port ID */ + bfa_boolean_t fdisc; /* snd FDISC instead of FLOGI */ + bfa_boolean_t auth_en; /* enable authentication */ + bfa_boolean_t auth_req; /* authentication required */ + bfa_boolean_t npiv_en; /* NPIV is allowed by peer */ + bfa_boolean_t fport; /* attached peer is F_PORT */ + bfa_boolean_t brcd_switch; /* attached peer is brcd sw */ + bfa_status_t status; /* login status */ + u16 pdusz; /* max receive PDU size */ + u16 pr_bbcred; /* BB_CREDIT from peer */ + u8 lsrjt_rsn; /* LSRJT reason */ + u8 lsrjt_expl; /* LSRJT explanation */ + u8 lun_mask; /* LUN mask flag */ + wwn_t pwwn; /* port wwn of lport */ + wwn_t nwwn; /* node wwn of lport */ + wwn_t pr_pwwn; /* port wwn of lport peer */ + wwn_t pr_nwwn; /* node wwn of lport peer */ + mac_t lp_mac; /* fpma/spma MAC for lport */ + mac_t fcf_mac; /* FCF MAC of lport */ + struct bfa_reqq_wait_s wqe; /* request wait queue element */ + void *uarg; /* user callback arg */ + struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */ + struct bfi_lps_login_rsp_s *loginrsp; + bfa_eproto_status_t ext_status; +}; + +struct bfa_lps_mod_s { + struct list_head lps_free_q; + struct list_head lps_active_q; + struct list_head lps_login_q; + struct bfa_lps_s *lps_arr; + int num_lps; + struct bfa_mem_kva_s kva_seg; +}; + +#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod) +#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag]) +#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg)) + +/* + * external functions + */ +void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + + +/* + * FCPORT related defines + */ + +#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) + +/* + * Link notification data structure + */ +struct bfa_fcport_ln_s { + struct bfa_fcport_s *fcport; + bfa_sm_t sm; + struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */ + enum bfa_port_linkstate ln_event; /* ln event for callback */ +}; + +struct bfa_fcport_trunk_s { + struct bfa_trunk_attr_s attr; +}; + +/* + * BFA FC port data structure + */ +struct bfa_fcport_s { + struct bfa_s *bfa; /* parent BFA instance */ + bfa_sm_t sm; /* port state machine */ + wwn_t nwwn; /* node wwn of physical port */ + wwn_t pwwn; /* port wwn of physical oprt */ + enum bfa_port_speed speed_sup; + /* supported speeds */ + enum bfa_port_speed speed; /* current speed */ + enum bfa_port_topology topology; /* current topology */ + u8 rsvd[3]; + u8 myalpa; /* my ALPA in LOOP topology */ + u8 alpabm_valid; /* alpa bitmap valid or not */ + struct fc_alpabm_s alpabm; /* alpa bitmap */ + struct bfa_port_cfg_s cfg; /* current port configuration */ + bfa_boolean_t use_flash_cfg; /* get port cfg from flash */ + struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ + struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ + struct bfa_reqq_wait_s reqq_wait; + /* to wait for room in reqq */ + struct bfa_reqq_wait_s svcreq_wait; + /* to wait for room in reqq */ + struct bfa_reqq_wait_s stats_reqq_wait; + /* to wait for room in reqq (stats) */ + void *event_cbarg; + void (*event_cbfn) (void *cbarg, + enum bfa_port_linkstate event); + union { + union bfi_fcport_i2h_msg_u i2hmsg; + } event_arg; + void *bfad; /* BFA driver handle */ + struct bfa_fcport_ln_s ln; /* Link Notification */ + struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ + struct bfa_timer_s timer; /* timer */ + u32 msgtag; /* fimrware msg tag for reply */ + u8 *stats_kva; + u64 stats_pa; + union bfa_fcport_stats_u *stats; + bfa_status_t stats_status; /* stats/statsclr status */ + struct list_head stats_pending_q; + struct list_head statsclr_pending_q; + bfa_boolean_t stats_qfull; + time64_t stats_reset_time; /* stats reset time stamp */ + bfa_boolean_t diag_busy; /* diag busy status */ + bfa_boolean_t beacon; /* port beacon status */ + bfa_boolean_t link_e2e_beacon; /* link beacon status */ + struct bfa_fcport_trunk_s trunk; + u16 fcoe_vlan; + struct bfa_mem_dma_s fcport_dma; + bfa_boolean_t stats_dma_ready; + struct bfa_bbcr_attr_s bbcr_attr; + enum bfa_fec_state_s fec_state; +}; + +#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) +#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma)) + +/* + * protected functions + */ +void bfa_fcport_init(struct bfa_s *bfa); +void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + +/* + * bfa fcport API functions + */ +bfa_status_t bfa_fcport_enable(struct bfa_s *bfa); +bfa_status_t bfa_fcport_disable(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa, + enum bfa_port_speed speed); +enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, + enum bfa_port_topology topo); +enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); +enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); +bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); +u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); +bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); +u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); +u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); +void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr); +wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); +void bfa_fcport_event_register(struct bfa_s *bfa, + void (*event_cbfn) (void *cbarg, + enum bfa_port_linkstate event), void *event_cbarg); +bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_ddport(struct bfa_s *bfa); +bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa, + struct bfa_qos_bw_s *qos_bw); +enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); + +void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); +bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); +void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon); +bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); +bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, + struct bfa_cb_pending_q_s *cb); +bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, + struct bfa_cb_pending_q_s *cb); +bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); +void bfa_fcport_dportenable(struct bfa_s *bfa); +void bfa_fcport_dportdisable(struct bfa_s *bfa); +bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); +void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); +bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa, + bfa_boolean_t on_off, u8 bb_scn); +bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, + struct bfa_bbcr_attr_s *bbcr_attr); + +/* + * bfa rport API functions + */ +struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv); +void bfa_rport_online(struct bfa_rport_s *rport, + struct bfa_rport_info_s *rport_info); +void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed); +void bfa_cb_rport_online(void *rport); +void bfa_cb_rport_offline(void *rport); +void bfa_cb_rport_qos_scn_flowid(void *rport, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr); +void bfa_cb_rport_scn_online(struct bfa_s *bfa); +void bfa_cb_rport_scn_offline(struct bfa_s *bfa); +void bfa_cb_rport_scn_no_dev(void *rp); +void bfa_cb_rport_qos_scn_prio(void *rport, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr); + +/* + * Rport LUN masking related + */ +#define BFA_RPORT_TAG_INVALID 0xffff +#define BFA_LP_TAG_INVALID 0xff +void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); +void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); + +/* + * bfa fcxp API functions + */ +struct bfa_fcxp_s *bfa_fcxp_req_rsp_alloc(void *bfad_fcxp, struct bfa_s *bfa, + int nreq_sgles, int nrsp_sgles, + bfa_fcxp_get_sgaddr_t get_req_sga, + bfa_fcxp_get_sglen_t get_req_sglen, + bfa_fcxp_get_sgaddr_t get_rsp_sga, + bfa_fcxp_get_sglen_t get_rsp_sglen, + bfa_boolean_t req); +void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, + bfa_fcxp_alloc_cbfn_t alloc_cbfn, + void *cbarg, void *bfad_fcxp, + int nreq_sgles, int nrsp_sgles, + bfa_fcxp_get_sgaddr_t get_req_sga, + bfa_fcxp_get_sglen_t get_req_sglen, + bfa_fcxp_get_sgaddr_t get_rsp_sga, + bfa_fcxp_get_sglen_t get_rsp_sglen, + bfa_boolean_t req); +void bfa_fcxp_walloc_cancel(struct bfa_s *bfa, + struct bfa_fcxp_wqe_s *wqe); +void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp); + +void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp); +void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp); + +void bfa_fcxp_free(struct bfa_fcxp_s *fcxp); + +void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, + u16 vf_id, u8 lp_tag, + bfa_boolean_t cts, enum fc_cos cos, + u32 reqlen, struct fchs_s *fchs, + bfa_cb_fcxp_send_t cbfn, + void *cbarg, + u32 rsp_maxlen, u8 rsp_timeout); +bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); +u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); +u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); +void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw); + +static inline void * +bfa_uf_get_frmbuf(struct bfa_uf_s *uf) +{ + return uf->data_ptr; +} + +static inline u16 +bfa_uf_get_frmlen(struct bfa_uf_s *uf) +{ + return uf->data_len; +} + +/* + * bfa uf API functions + */ +void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, + void *cbarg); +void bfa_uf_free(struct bfa_uf_s *uf); + +/* + * bfa lport service api + */ + +u32 bfa_lps_get_max_vport(struct bfa_s *bfa); +struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); +void bfa_lps_delete(struct bfa_lps_s *lps); +void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, + u16 pdusz, wwn_t pwwn, wwn_t nwwn, + bfa_boolean_t auth_en); +void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, + wwn_t pwwn, wwn_t nwwn); +void bfa_lps_fdisclogo(struct bfa_lps_s *lps); +void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid); +u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag); +u32 bfa_lps_get_base_pid(struct bfa_s *bfa); +u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); +void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); +void bfa_cb_lps_flogo_comp(void *bfad, void *uarg); +void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); +void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); +void bfa_cb_lps_cvl_event(void *bfad, void *uarg); + +/* FAA specific APIs */ +bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, + bfa_cb_iocfc_t cbfn, void *cbarg); + +/* + * FC DIAG data structure + */ +struct bfa_fcdiag_qtest_s { + struct bfa_diag_qtest_result_s *result; + bfa_cb_diag_t cbfn; + void *cbarg; + struct bfa_timer_s timer; + u32 status; + u32 count; + u8 lock; + u8 queue; + u8 all; + u8 timer_active; +}; + +struct bfa_fcdiag_lb_s { + bfa_cb_diag_t cbfn; + void *cbarg; + void *result; + bfa_boolean_t lock; + u32 status; +}; + +struct bfa_dport_s { + struct bfa_s *bfa; /* Back pointer to BFA */ + bfa_sm_t sm; /* finite state machine */ + struct bfa_reqq_wait_s reqq_wait; + bfa_cb_diag_t cbfn; + void *cbarg; + union bfi_diag_dport_msg_u i2hmsg; + u8 test_state; /* enum dport_test_state */ + u8 dynamic; /* boolean_t */ + u8 rsvd[2]; + u32 lpcnt; + u32 payload; /* user defined payload pattern */ + wwn_t rp_pwwn; + wwn_t rp_nwwn; + struct bfa_diag_dport_result_s result; +}; + +struct bfa_fcdiag_s { + struct bfa_s *bfa; /* Back pointer to BFA */ + struct bfa_trc_mod_s *trcmod; + struct bfa_fcdiag_lb_s lb; + struct bfa_fcdiag_qtest_s qtest; + struct bfa_dport_s dport; +}; + +#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) + +void bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg); + +bfa_status_t bfa_fcdiag_loopback(struct bfa_s *bfa, + enum bfa_port_opmode opmode, + enum bfa_port_speed speed, u32 lpcnt, u32 pat, + struct bfa_diag_loopback_result_s *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore, + u32 queue, struct bfa_diag_qtest_result_s *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); +bfa_status_t bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, + void *cbarg); +bfa_status_t bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_dport_show(struct bfa_s *bfa, + struct bfa_diag_dport_result_s *result); + +#endif /* __BFA_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c new file mode 100644 index 000000000..62cb7a864 --- /dev/null +++ b/drivers/scsi/bfa/bfad.c @@ -0,0 +1,1797 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * bfad.c Linux driver PCI interface module. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfa_fcs.h" +#include "bfa_defs.h" +#include "bfa.h" + +BFA_TRC_FILE(LDRV, BFAD); +DEFINE_MUTEX(bfad_mutex); +LIST_HEAD(bfad_list); + +static int bfad_inst; +static int num_sgpgs_parm; +int supported_fc4s; +char *host_name, *os_name, *os_patch; +int num_rports, num_ios, num_tms; +int num_fcxps, num_ufbufs; +int reqq_size, rspq_size, num_sgpgs; +int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; +int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; +int bfa_io_max_sge = BFAD_IO_MAX_SGE; +int bfa_log_level = 3; /* WARNING log level */ +int ioc_auto_recover = BFA_TRUE; +int bfa_linkup_delay = -1; +int fdmi_enable = BFA_TRUE; +int pcie_max_read_reqsz; +int bfa_debugfs_enable = 1; +int msix_disable_cb = 0, msix_disable_ct = 0; +int max_xfer_size = BFAD_MAX_SECTORS >> 1; +static int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; + +/* Firmware releated */ +u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; +u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; + +#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin" +#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin" +#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" + +static u32 *bfad_load_fwimg(struct pci_dev *pdev); +static void bfad_free_fwimg(void); +static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name); + +static const char *msix_name_ct[] = { + "ctrl", + "cpe0", "cpe1", "cpe2", "cpe3", + "rme0", "rme1", "rme2", "rme3" }; + +static const char *msix_name_cb[] = { + "cpe0", "cpe1", "cpe2", "cpe3", + "rme0", "rme1", "rme2", "rme3", + "eemc", "elpu0", "elpu1", "epss", "mlpu" }; + +MODULE_FIRMWARE(BFAD_FW_FILE_CB); +MODULE_FIRMWARE(BFAD_FW_FILE_CT); +MODULE_FIRMWARE(BFAD_FW_FILE_CT2); + +module_param(os_name, charp, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); +module_param(os_patch, charp, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); +module_param(host_name, charp, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); +module_param(num_rports, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " + "(physical/logical), default=1024"); +module_param(num_ios, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); +module_param(num_tms, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); +module_param(num_fcxps, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); +module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " + "buffers, default=64"); +module_param(reqq_size, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " + "default=256"); +module_param(rspq_size, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " + "default=64"); +module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); +module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " + "Range[>0]"); +module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); +module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); +module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " + "Range[Critical:1|Error:2|Warning:3|Info:4]"); +module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " + "Range[off:0|on:1]"); +module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " + "boot port. Otherwise 10 secs in RHEL4 & 0 for " + "[RHEL5, SLES10, ESX40] Range[>0]"); +module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts for QLogic-415/425/815/825 cards, default=0 Range[false:0|true:1]"); +module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts if possible for QLogic-1010/1020/804/1007/902/1741 cards, default=0, Range[false:0|true:1]"); +module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " + "Range[false:0|true:1]"); +module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " + "(use system setting), Range[128|256|512|1024|2048|4096]"); +module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," + " Range[false:0|true:1]"); +module_param(max_xfer_size, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_xfer_size, "default=32MB," + " Range[64k|128k|256k|512k|1024k|2048k]"); +module_param(max_rport_logins, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024"); + +static void +bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); + +/* + * Beginning state for the driver instance, awaiting the pci_probe event + */ +static void +bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) +{ + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_CREATE: + bfa_sm_set_state(bfad, bfad_sm_created); + bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, + "%s", "bfad_worker"); + if (IS_ERR(bfad->bfad_tsk)) { + printk(KERN_INFO "bfad[%d]: Kernel thread " + "creation failed!\n", bfad->inst_no); + bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); + } + bfa_sm_send_event(bfad, BFAD_E_INIT); + break; + + case BFAD_E_STOP: + /* Ignore stop; already in uninit */ + break; + + default: + bfa_sm_fault(bfad, event); + } +} + +/* + * Driver Instance is created, awaiting event INIT to initialize the bfad + */ +static void +bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) +{ + unsigned long flags; + bfa_status_t ret; + + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_INIT: + bfa_sm_set_state(bfad, bfad_sm_initializing); + + init_completion(&bfad->comp); + + /* Enable Interrupt and wait bfa_init completion */ + if (bfad_setup_intr(bfad)) { + printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", + bfad->inst_no); + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + break; + } + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_iocfc_init(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* Set up interrupt handler for each vectors */ + if ((bfad->bfad_flags & BFAD_MSIX_ON) && + bfad_install_msix_handler(bfad)) { + printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", + __func__, bfad->inst_no); + } + + bfad_init_timer(bfad); + + wait_for_completion(&bfad->comp); + + if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { + bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); + } else { + printk(KERN_WARNING + "bfa %s: bfa init failed\n", + bfad->pci_name); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_init(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); + if (ret != BFA_STATUS_OK) { + init_completion(&bfad->comp); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->pport.flags |= BFAD_PORT_DELETE; + bfa_fcs_exit(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + wait_for_completion(&bfad->comp); + + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + break; + } + bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; + bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED); + } + + break; + + case BFAD_E_KTHREAD_CREATE_FAILED: + bfa_sm_set_state(bfad, bfad_sm_uninit); + break; + + default: + bfa_sm_fault(bfad, event); + } +} + +static void +bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) +{ + int retval; + unsigned long flags; + + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_INIT_SUCCESS: + kthread_stop(bfad->bfad_tsk); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_tsk = NULL; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + retval = bfad_start_ops(bfad); + if (retval != BFA_STATUS_OK) { + bfa_sm_set_state(bfad, bfad_sm_failed); + break; + } + bfa_sm_set_state(bfad, bfad_sm_operational); + break; + + case BFAD_E_INIT_FAILED: + bfa_sm_set_state(bfad, bfad_sm_uninit); + kthread_stop(bfad->bfad_tsk); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_tsk = NULL; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + break; + + case BFAD_E_HAL_INIT_FAILED: + bfa_sm_set_state(bfad, bfad_sm_failed); + break; + default: + bfa_sm_fault(bfad, event); + } +} + +static void +bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) +{ + int retval; + + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_INIT_SUCCESS: + retval = bfad_start_ops(bfad); + if (retval != BFA_STATUS_OK) + break; + bfa_sm_set_state(bfad, bfad_sm_operational); + break; + + case BFAD_E_STOP: + bfa_sm_set_state(bfad, bfad_sm_fcs_exit); + bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); + break; + + case BFAD_E_EXIT_COMP: + bfa_sm_set_state(bfad, bfad_sm_uninit); + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + break; + + default: + bfa_sm_fault(bfad, event); + } +} + +static void +bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) +{ + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_STOP: + bfa_sm_set_state(bfad, bfad_sm_fcs_exit); + bfad_fcs_stop(bfad); + break; + + default: + bfa_sm_fault(bfad, event); + } +} + +static void +bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) +{ + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_FCS_EXIT_COMP: + bfa_sm_set_state(bfad, bfad_sm_stopping); + bfad_stop(bfad); + break; + + default: + bfa_sm_fault(bfad, event); + } +} + +static void +bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) +{ + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_EXIT_COMP: + bfa_sm_set_state(bfad, bfad_sm_uninit); + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + bfad_im_probe_undo(bfad); + bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; + bfad_uncfg_pport(bfad); + break; + + default: + bfa_sm_fault(bfad, event); + break; + } +} + +/* + * BFA callbacks + */ +void +bfad_hcb_comp(void *arg, bfa_status_t status) +{ + struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; + + fcomp->status = status; + complete(&fcomp->comp); +} + +/* + * bfa_init callback + */ +void +bfa_cb_init(void *drv, bfa_status_t init_status) +{ + struct bfad_s *bfad = drv; + + if (init_status == BFA_STATUS_OK) { + bfad->bfad_flags |= BFAD_HAL_INIT_DONE; + + /* + * If BFAD_HAL_INIT_FAIL flag is set: + * Wake up the kernel thread to start + * the bfad operations after HAL init done + */ + if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { + bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; + wake_up_process(bfad->bfad_tsk); + } + } + + complete(&bfad->comp); +} + +/* + * BFA_FCS callbacks + */ +struct bfad_port_s * +bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, + enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, + struct bfad_vport_s *vp_drv) +{ + bfa_status_t rc; + struct bfad_port_s *port_drv; + + if (!vp_drv && !vf_drv) { + port_drv = &bfad->pport; + port_drv->pvb_type = BFAD_PORT_PHYS_BASE; + } else if (!vp_drv && vf_drv) { + port_drv = &vf_drv->base_port; + port_drv->pvb_type = BFAD_PORT_VF_BASE; + } else if (vp_drv && !vf_drv) { + port_drv = &vp_drv->drv_port; + port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; + } else { + port_drv = &vp_drv->drv_port; + port_drv->pvb_type = BFAD_PORT_VF_VPORT; + } + + port_drv->fcs_port = port; + port_drv->roles = roles; + + if (roles & BFA_LPORT_ROLE_FCP_IM) { + rc = bfad_im_port_new(bfad, port_drv); + if (rc != BFA_STATUS_OK) { + bfad_im_port_delete(bfad, port_drv); + port_drv = NULL; + } + } + + return port_drv; +} + +/* + * FCS RPORT alloc callback, after successful PLOGI by FCS + */ +bfa_status_t +bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, + struct bfad_rport_s **rport_drv) +{ + bfa_status_t rc = BFA_STATUS_OK; + + *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); + if (*rport_drv == NULL) { + rc = BFA_STATUS_ENOMEM; + goto ext; + } + + *rport = &(*rport_drv)->fcs_rport; + +ext: + return rc; +} + +/* + * FCS PBC VPORT Create + */ +void +bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) +{ + + struct bfa_lport_cfg_s port_cfg = {0}; + struct bfad_vport_s *vport; + int rc; + + vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC); + if (!vport) { + bfa_trc(bfad, 0); + return; + } + + vport->drv_port.bfad = bfad; + port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; + port_cfg.pwwn = pbc_vport.vp_pwwn; + port_cfg.nwwn = pbc_vport.vp_nwwn; + port_cfg.preboot_vp = BFA_TRUE; + + rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, + &port_cfg, vport); + + if (rc != BFA_STATUS_OK) { + bfa_trc(bfad, 0); + return; + } + + list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); +} + +void +bfad_hal_mem_release(struct bfad_s *bfad) +{ + struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; + + dma_info = &hal_meminfo->dma_info; + kva_info = &hal_meminfo->kva_info; + + /* Iterate through the KVA meminfo queue */ + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + vfree(kva_elem->kva); + } + + /* Iterate through the DMA meminfo queue */ + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_free_coherent(&bfad->pcidev->dev, + dma_elem->mem_len, dma_elem->kva, + (dma_addr_t) dma_elem->dma); + } + + memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); +} + +void +bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) +{ + if (num_rports > 0) + bfa_cfg->fwcfg.num_rports = num_rports; + if (num_ios > 0) + bfa_cfg->fwcfg.num_ioim_reqs = num_ios; + if (num_tms > 0) + bfa_cfg->fwcfg.num_tskim_reqs = num_tms; + if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX) + bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; + if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX) + bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; + if (reqq_size > 0) + bfa_cfg->drvcfg.num_reqq_elems = reqq_size; + if (rspq_size > 0) + bfa_cfg->drvcfg.num_rspq_elems = rspq_size; + if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX) + bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; + + /* + * populate the hal values back to the driver for sysfs use. + * otherwise, the default values will be shown as 0 in sysfs + */ + num_rports = bfa_cfg->fwcfg.num_rports; + num_ios = bfa_cfg->fwcfg.num_ioim_reqs; + num_tms = bfa_cfg->fwcfg.num_tskim_reqs; + num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; + num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; + reqq_size = bfa_cfg->drvcfg.num_reqq_elems; + rspq_size = bfa_cfg->drvcfg.num_rspq_elems; + num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; +} + +bfa_status_t +bfad_hal_mem_alloc(struct bfad_s *bfad) +{ + struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; + bfa_status_t rc = BFA_STATUS_OK; + dma_addr_t phys_addr; + + bfa_cfg_get_default(&bfad->ioc_cfg); + bfad_update_hal_cfg(&bfad->ioc_cfg); + bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; + bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa); + + dma_info = &hal_meminfo->dma_info; + kva_info = &hal_meminfo->kva_info; + + /* Iterate through the KVA meminfo queue */ + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + kva_elem->kva = vzalloc(kva_elem->mem_len); + if (kva_elem->kva == NULL) { + bfad_hal_mem_release(bfad); + rc = BFA_STATUS_ENOMEM; + goto ext; + } + } + + /* Iterate through the DMA meminfo queue */ + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev, + dma_elem->mem_len, + &phys_addr, GFP_KERNEL); + if (dma_elem->kva == NULL) { + bfad_hal_mem_release(bfad); + rc = BFA_STATUS_ENOMEM; + goto ext; + } + dma_elem->dma = phys_addr; + memset(dma_elem->kva, 0, dma_elem->mem_len); + } +ext: + return rc; +} + +/* + * Create a vport under a vf. + */ +bfa_status_t +bfad_vport_create(struct bfad_s *bfad, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, struct device *dev) +{ + struct bfad_vport_s *vport; + int rc = BFA_STATUS_OK; + unsigned long flags; + struct completion fcomp; + + vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); + if (!vport) { + rc = BFA_STATUS_ENOMEM; + goto ext; + } + + vport->drv_port.bfad = bfad; + spin_lock_irqsave(&bfad->bfad_lock, flags); + rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, + port_cfg, vport); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (rc != BFA_STATUS_OK) + goto ext_free_vport; + + if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { + rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, + dev); + if (rc != BFA_STATUS_OK) + goto ext_free_fcs_vport; + } + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_vport_start(&vport->fcs_vport); + list_add_tail(&vport->list_entry, &bfad->vport_list); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return BFA_STATUS_OK; + +ext_free_fcs_vport: + spin_lock_irqsave(&bfad->bfad_lock, flags); + vport->comp_del = &fcomp; + init_completion(vport->comp_del); + bfa_fcs_vport_delete(&vport->fcs_vport); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(vport->comp_del); +ext_free_vport: + kfree(vport); +ext: + return rc; +} + +void +bfad_bfa_tmo(struct timer_list *t) +{ + struct bfad_s *bfad = from_timer(bfad, t, hal_tmo); + unsigned long flags; + struct list_head doneq; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + bfa_timer_beat(&bfad->bfa.timer_mod); + + bfa_comp_deq(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (!list_empty(&doneq)) { + bfa_comp_process(&bfad->bfa, &doneq); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_comp_free(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + mod_timer(&bfad->hal_tmo, + jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); +} + +void +bfad_init_timer(struct bfad_s *bfad) +{ + timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0); + + mod_timer(&bfad->hal_tmo, + jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); +} + +int +bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) +{ + int rc = -ENODEV; + + if (pci_enable_device(pdev)) { + printk(KERN_ERR "pci_enable_device fail %p\n", pdev); + goto out; + } + + if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) + goto out_disable_device; + + pci_set_master(pdev); + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + rc = -ENODEV; + printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev); + goto out_release_region; + } + + bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2)); + + if (bfad->pci_bar0_kva == NULL) { + printk(KERN_ERR "Fail to map bar0\n"); + rc = -ENODEV; + goto out_release_region; + } + + bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); + bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); + bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; + bfad->hal_pcidev.device_id = pdev->device; + bfad->hal_pcidev.ssid = pdev->subsystem_device; + bfad->pci_name = pci_name(pdev); + + bfad->pci_attr.vendor_id = pdev->vendor; + bfad->pci_attr.device_id = pdev->device; + bfad->pci_attr.ssid = pdev->subsystem_device; + bfad->pci_attr.ssvid = pdev->subsystem_vendor; + bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); + + bfad->pcidev = pdev; + + /* Adjust PCIe Maximum Read Request Size */ + if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { + if (pcie_max_read_reqsz >= 128 && + pcie_max_read_reqsz <= 4096 && + is_power_of_2(pcie_max_read_reqsz)) { + int max_rq = pcie_get_readrq(pdev); + printk(KERN_WARNING "BFA[%s]: " + "pcie_max_read_request_size is %d, " + "reset to %d\n", bfad->pci_name, max_rq, + pcie_max_read_reqsz); + pcie_set_readrq(pdev, pcie_max_read_reqsz); + } else { + printk(KERN_WARNING "BFA[%s]: invalid " + "pcie_max_read_request_size %d ignored\n", + bfad->pci_name, pcie_max_read_reqsz); + } + } + + pci_save_state(pdev); + + return 0; + +out_release_region: + pci_release_regions(pdev); +out_disable_device: + pci_disable_device(pdev); +out: + return rc; +} + +void +bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) +{ + pci_iounmap(pdev, bfad->pci_bar0_kva); + pci_iounmap(pdev, bfad->pci_bar2_kva); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +bfa_status_t +bfad_drv_init(struct bfad_s *bfad) +{ + bfa_status_t rc; + unsigned long flags; + + bfad->cfg_data.rport_del_timeout = rport_del_timeout; + bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; + bfad->cfg_data.io_max_sge = bfa_io_max_sge; + bfad->cfg_data.binding_method = FCP_PWWN_BINDING; + + rc = bfad_hal_mem_alloc(bfad); + if (rc != BFA_STATUS_OK) { + printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", + bfad->inst_no); + printk(KERN_WARNING + "Not enough memory to attach all QLogic BR-series HBA ports. System may need more memory.\n"); + return BFA_STATUS_FAILED; + } + + bfad->bfa.trcmod = bfad->trcmod; + bfad->bfa.plog = &bfad->plog_buf; + bfa_plog_init(&bfad->plog_buf); + bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, + 0, "Driver Attach"); + + bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, + &bfad->hal_pcidev); + + /* FCS INIT */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfa_fcs.trcmod = bfad->trcmod; + bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); + bfad->bfa_fcs.fdmi_enabled = fdmi_enable; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + bfad->bfad_flags |= BFAD_DRV_INIT_DONE; + + return BFA_STATUS_OK; +} + +void +bfad_drv_uninit(struct bfad_s *bfad) +{ + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfa_iocfc_stop(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + + del_timer_sync(&bfad->hal_tmo); + bfa_isr_disable(&bfad->bfa); + bfa_detach(&bfad->bfa); + bfad_remove_intr(bfad); + bfad_hal_mem_release(bfad); + + bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; +} + +void +bfad_drv_start(struct bfad_s *bfad) +{ + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_iocfc_start(&bfad->bfa); + bfa_fcs_pbc_vport_init(&bfad->bfa_fcs); + bfa_fcs_fabric_modstart(&bfad->bfa_fcs); + bfad->bfad_flags |= BFAD_HAL_START_DONE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (bfad->im) + flush_workqueue(bfad->im->drv_workq); +} + +void +bfad_fcs_stop(struct bfad_s *bfad) +{ + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfad->pport.flags |= BFAD_PORT_DELETE; + bfa_fcs_exit(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + + bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); +} + +void +bfad_stop(struct bfad_s *bfad) +{ + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfa_iocfc_stop(&bfad->bfa); + bfad->bfad_flags &= ~BFAD_HAL_START_DONE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + + bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); +} + +bfa_status_t +bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) +{ + int rc = BFA_STATUS_OK; + + /* Allocate scsi_host for the physical port */ + if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && + (role & BFA_LPORT_ROLE_FCP_IM)) { + if (bfad->pport.im_port == NULL) { + rc = BFA_STATUS_FAILED; + goto out; + } + + rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, + &bfad->pcidev->dev); + if (rc != BFA_STATUS_OK) + goto out; + + bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; + } + + bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; + +out: + return rc; +} + +void +bfad_uncfg_pport(struct bfad_s *bfad) +{ + if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && + (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { + bfad_im_scsi_host_free(bfad, bfad->pport.im_port); + bfad_im_port_clean(bfad->pport.im_port); + kfree(bfad->pport.im_port); + bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; + } + + bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; +} + +bfa_status_t +bfad_start_ops(struct bfad_s *bfad) { + + int retval; + unsigned long flags; + struct bfad_vport_s *vport, *vport_new; + struct bfa_fcs_driver_info_s driver_info; + + /* Limit min/max. xfer size to [64k-32MB] */ + if (max_xfer_size < BFAD_MIN_SECTORS >> 1) + max_xfer_size = BFAD_MIN_SECTORS >> 1; + if (max_xfer_size > BFAD_MAX_SECTORS >> 1) + max_xfer_size = BFAD_MAX_SECTORS >> 1; + + /* Fill the driver_info info to fcs*/ + memset(&driver_info, 0, sizeof(driver_info)); + strscpy(driver_info.version, BFAD_DRIVER_VERSION, + sizeof(driver_info.version)); + if (host_name) + strscpy(driver_info.host_machine_name, host_name, + sizeof(driver_info.host_machine_name)); + if (os_name) + strscpy(driver_info.host_os_name, os_name, + sizeof(driver_info.host_os_name)); + if (os_patch) + strscpy(driver_info.host_os_patch, os_patch, + sizeof(driver_info.host_os_patch)); + + strscpy(driver_info.os_device_name, bfad->pci_name, + sizeof(driver_info.os_device_name)); + + /* FCS driver info init */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); + + if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) + bfa_fcs_update_cfg(&bfad->bfa_fcs); + else + bfa_fcs_init(&bfad->bfa_fcs); + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) { + retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); + if (retval != BFA_STATUS_OK) + return BFA_STATUS_FAILED; + } + + /* Setup fc host fixed attribute if the lk supports */ + bfad_fc_host_init(bfad->pport.im_port); + + /* BFAD level FC4 IM specific resource allocation */ + retval = bfad_im_probe(bfad); + if (retval != BFA_STATUS_OK) { + printk(KERN_WARNING "bfad_im_probe failed\n"); + if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) + bfa_sm_set_state(bfad, bfad_sm_failed); + return BFA_STATUS_FAILED; + } else + bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; + + bfad_drv_start(bfad); + + /* Complete pbc vport create */ + list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, + list_entry) { + struct fc_vport_identifiers vid; + struct fc_vport *fc_vport; + char pwwn_buf[BFA_STRING_32]; + + memset(&vid, 0, sizeof(vid)); + vid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vid.vport_type = FC_PORTTYPE_NPIV; + vid.disable = false; + vid.node_name = wwn_to_u64((u8 *) + (&((vport->fcs_vport).lport.port_cfg.nwwn))); + vid.port_name = wwn_to_u64((u8 *) + (&((vport->fcs_vport).lport.port_cfg.pwwn))); + fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); + if (!fc_vport) { + wwn2str(pwwn_buf, vid.port_name); + printk(KERN_WARNING "bfad%d: failed to create pbc vport" + " %s\n", bfad->inst_no, pwwn_buf); + } + list_del(&vport->list_entry); + kfree(vport); + } + + /* + * If bfa_linkup_delay is set to -1 default; try to retrive the + * value using the bfad_get_linkup_delay(); else use the + * passed in module param value as the bfa_linkup_delay. + */ + if (bfa_linkup_delay < 0) { + bfa_linkup_delay = bfad_get_linkup_delay(bfad); + bfad_rport_online_wait(bfad); + bfa_linkup_delay = -1; + } else + bfad_rport_online_wait(bfad); + + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); + + return BFA_STATUS_OK; +} + +int +bfad_worker(void *ptr) +{ + struct bfad_s *bfad = ptr; + unsigned long flags; + + if (kthread_should_stop()) + return 0; + + /* Send event BFAD_E_INIT_SUCCESS */ + bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_tsk = NULL; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +/* + * BFA driver interrupt functions + */ +irqreturn_t +bfad_intx(int irq, void *dev_id) +{ + struct bfad_s *bfad = dev_id; + struct list_head doneq; + unsigned long flags; + bfa_boolean_t rc; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + rc = bfa_intx(&bfad->bfa); + if (!rc) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return IRQ_NONE; + } + + bfa_comp_deq(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (!list_empty(&doneq)) { + bfa_comp_process(&bfad->bfa, &doneq); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_comp_free(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + return IRQ_HANDLED; + +} + +static irqreturn_t +bfad_msix(int irq, void *dev_id) +{ + struct bfad_msix_s *vec = dev_id; + struct bfad_s *bfad = vec->bfad; + struct list_head doneq; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + bfa_msix(&bfad->bfa, vec->msix.entry); + bfa_comp_deq(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (!list_empty(&doneq)) { + bfa_comp_process(&bfad->bfa, &doneq); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_comp_free(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + return IRQ_HANDLED; +} + +/* + * Initialize the MSIX entry table. + */ +static void +bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, + int mask, int max_bit) +{ + int i; + int match = 0x00000001; + + for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { + if (mask & match) { + bfad->msix_tab[bfad->nvec].msix.entry = i; + bfad->msix_tab[bfad->nvec].bfad = bfad; + msix_entries[bfad->nvec].entry = i; + bfad->nvec++; + } + + match <<= 1; + } + +} + +int +bfad_install_msix_handler(struct bfad_s *bfad) +{ + int i, error = 0; + + for (i = 0; i < bfad->nvec; i++) { + sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", + bfad->pci_name, + ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ? + msix_name_cb[i] : msix_name_ct[i])); + + error = request_irq(bfad->msix_tab[i].msix.vector, + (irq_handler_t) bfad_msix, 0, + bfad->msix_tab[i].name, &bfad->msix_tab[i]); + bfa_trc(bfad, i); + bfa_trc(bfad, bfad->msix_tab[i].msix.vector); + if (error) { + int j; + + for (j = 0; j < i; j++) + free_irq(bfad->msix_tab[j].msix.vector, + &bfad->msix_tab[j]); + + bfad->bfad_flags &= ~BFAD_MSIX_ON; + pci_disable_msix(bfad->pcidev); + + return 1; + } + } + + return 0; +} + +/* + * Setup MSIX based interrupt. + */ +int +bfad_setup_intr(struct bfad_s *bfad) +{ + int error; + u32 mask = 0, i, num_bit = 0, max_bit = 0; + struct msix_entry msix_entries[MAX_MSIX_ENTRY]; + struct pci_dev *pdev = bfad->pcidev; + u16 reg; + + /* Call BFA to get the msix map for this PCI function. */ + bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); + + /* Set up the msix entry table */ + bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); + + if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || + (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { + + error = pci_enable_msix_exact(bfad->pcidev, + msix_entries, bfad->nvec); + /* In CT1 & CT2, try to allocate just one vector */ + if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) { + printk(KERN_WARNING "bfa %s: trying one msix " + "vector failed to allocate %d[%d]\n", + bfad->pci_name, bfad->nvec, error); + bfad->nvec = 1; + error = pci_enable_msix_exact(bfad->pcidev, + msix_entries, 1); + } + + if (error) { + printk(KERN_WARNING "bfad%d: " + "pci_enable_msix_exact failed (%d), " + "use line based.\n", + bfad->inst_no, error); + goto line_based; + } + + /* Disable INTX in MSI-X mode */ + pci_read_config_word(pdev, PCI_COMMAND, ®); + + if (!(reg & PCI_COMMAND_INTX_DISABLE)) + pci_write_config_word(pdev, PCI_COMMAND, + reg | PCI_COMMAND_INTX_DISABLE); + + /* Save the vectors */ + for (i = 0; i < bfad->nvec; i++) { + bfa_trc(bfad, msix_entries[i].vector); + bfad->msix_tab[i].msix.vector = msix_entries[i].vector; + } + + bfa_msix_init(&bfad->bfa, bfad->nvec); + + bfad->bfad_flags |= BFAD_MSIX_ON; + + return 0; + } + +line_based: + error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx, + BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad); + if (error) + return error; + + bfad->bfad_flags |= BFAD_INTX_ON; + + return 0; +} + +void +bfad_remove_intr(struct bfad_s *bfad) +{ + int i; + + if (bfad->bfad_flags & BFAD_MSIX_ON) { + for (i = 0; i < bfad->nvec; i++) + free_irq(bfad->msix_tab[i].msix.vector, + &bfad->msix_tab[i]); + + pci_disable_msix(bfad->pcidev); + bfad->bfad_flags &= ~BFAD_MSIX_ON; + } else if (bfad->bfad_flags & BFAD_INTX_ON) { + free_irq(bfad->pcidev->irq, bfad); + } +} + +/* + * PCI probe entry. + */ +int +bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct bfad_s *bfad; + int error = -ENODEV, retval, i; + + /* For single port cards - only claim function 0 */ + if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && + (PCI_FUNC(pdev->devfn) != 0)) + return -ENODEV; + + bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); + if (!bfad) { + error = -ENOMEM; + goto out; + } + + bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); + if (!bfad->trcmod) { + printk(KERN_WARNING "Error alloc trace buffer!\n"); + error = -ENOMEM; + goto out_alloc_trace_failure; + } + + /* TRACE INIT */ + bfa_trc_init(bfad->trcmod); + bfa_trc(bfad, bfad_inst); + + /* AEN INIT */ + INIT_LIST_HEAD(&bfad->free_aen_q); + INIT_LIST_HEAD(&bfad->active_aen_q); + for (i = 0; i < BFA_AEN_MAX_ENTRY; i++) + list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q); + + if (!(bfad_load_fwimg(pdev))) { + kfree(bfad->trcmod); + goto out_alloc_trace_failure; + } + + retval = bfad_pci_init(pdev, bfad); + if (retval) { + printk(KERN_WARNING "bfad_pci_init failure!\n"); + error = retval; + goto out_pci_init_failure; + } + + mutex_lock(&bfad_mutex); + bfad->inst_no = bfad_inst++; + list_add_tail(&bfad->list_entry, &bfad_list); + mutex_unlock(&bfad_mutex); + + /* Initializing the state machine: State set to uninit */ + bfa_sm_set_state(bfad, bfad_sm_uninit); + + spin_lock_init(&bfad->bfad_lock); + spin_lock_init(&bfad->bfad_aen_spinlock); + + pci_set_drvdata(pdev, bfad); + + bfad->ref_count = 0; + bfad->pport.bfad = bfad; + INIT_LIST_HEAD(&bfad->pbc_vport_list); + INIT_LIST_HEAD(&bfad->vport_list); + + /* Setup the debugfs node for this bfad */ + if (bfa_debugfs_enable) + bfad_debugfs_init(&bfad->pport); + + retval = bfad_drv_init(bfad); + if (retval != BFA_STATUS_OK) + goto out_drv_init_failure; + + bfa_sm_send_event(bfad, BFAD_E_CREATE); + + if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) + goto out_bfad_sm_failure; + + return 0; + +out_bfad_sm_failure: + bfad_hal_mem_release(bfad); +out_drv_init_failure: + /* Remove the debugfs node for this bfad */ + kfree(bfad->regdata); + bfad_debugfs_exit(&bfad->pport); + mutex_lock(&bfad_mutex); + bfad_inst--; + list_del(&bfad->list_entry); + mutex_unlock(&bfad_mutex); + bfad_pci_uninit(pdev, bfad); +out_pci_init_failure: + kfree(bfad->trcmod); +out_alloc_trace_failure: + kfree(bfad); +out: + return error; +} + +/* + * PCI remove entry. + */ +void +bfad_pci_remove(struct pci_dev *pdev) +{ + struct bfad_s *bfad = pci_get_drvdata(pdev); + unsigned long flags; + + bfa_trc(bfad, bfad->inst_no); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfad->bfad_tsk != NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + kthread_stop(bfad->bfad_tsk); + } else { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + /* Send Event BFAD_E_STOP */ + bfa_sm_send_event(bfad, BFAD_E_STOP); + + /* Driver detach and dealloc mem */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_detach(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfad_hal_mem_release(bfad); + + /* Remove the debugfs node for this bfad */ + kfree(bfad->regdata); + bfad_debugfs_exit(&bfad->pport); + + /* Cleaning the BFAD instance */ + mutex_lock(&bfad_mutex); + bfad_inst--; + list_del(&bfad->list_entry); + mutex_unlock(&bfad_mutex); + bfad_pci_uninit(pdev, bfad); + + kfree(bfad->trcmod); + kfree(bfad); +} + +/* + * PCI Error Recovery entry, error detected. + */ +static pci_ers_result_t +bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct bfad_s *bfad = pci_get_drvdata(pdev); + unsigned long flags; + pci_ers_result_t ret = PCI_ERS_RESULT_NONE; + + dev_printk(KERN_ERR, &pdev->dev, + "error detected state: %d - flags: 0x%x\n", + state, bfad->bfad_flags); + + switch (state) { + case pci_channel_io_normal: /* non-fatal error */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags &= ~BFAD_EEH_BUSY; + /* Suspend/fail all bfa operations */ + bfa_ioc_suspend(&bfad->bfa.ioc); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + del_timer_sync(&bfad->hal_tmo); + ret = PCI_ERS_RESULT_CAN_RECOVER; + break; + case pci_channel_io_frozen: /* fatal error */ + init_completion(&bfad->comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags |= BFAD_EEH_BUSY; + /* Suspend/fail all bfa operations */ + bfa_ioc_suspend(&bfad->bfa.ioc); + bfa_fcs_stop(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + pci_disable_device(pdev); + ret = PCI_ERS_RESULT_NEED_RESET; + break; + case pci_channel_io_perm_failure: /* PCI Card is DEAD */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags |= BFAD_EEH_BUSY | + BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* If the error_detected handler is called with the reason + * pci_channel_io_perm_failure - it will subsequently call + * pci_remove() entry point to remove the pci device from the + * system - So defer the cleanup to pci_remove(); cleaning up + * here causes inconsistent state during pci_remove(). + */ + ret = PCI_ERS_RESULT_DISCONNECT; + break; + default: + WARN_ON(1); + } + + return ret; +} + +static int restart_bfa(struct bfad_s *bfad) +{ + unsigned long flags; + struct pci_dev *pdev = bfad->pcidev; + + bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, + &bfad->meminfo, &bfad->hal_pcidev); + + /* Enable Interrupt and wait bfa_init completion */ + if (bfad_setup_intr(bfad)) { + dev_printk(KERN_WARNING, &pdev->dev, + "%s: bfad_setup_intr failed\n", bfad->pci_name); + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + return -1; + } + + init_completion(&bfad->comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_iocfc_init(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* Set up interrupt handler for each vectors */ + if ((bfad->bfad_flags & BFAD_MSIX_ON) && + bfad_install_msix_handler(bfad)) + dev_printk(KERN_WARNING, &pdev->dev, + "%s: install_msix failed.\n", bfad->pci_name); + + bfad_init_timer(bfad); + wait_for_completion(&bfad->comp); + bfad_drv_start(bfad); + + return 0; +} + +/* + * PCI Error Recovery entry, re-initialize the chip. + */ +static pci_ers_result_t +bfad_pci_slot_reset(struct pci_dev *pdev) +{ + struct bfad_s *bfad = pci_get_drvdata(pdev); + u8 byte; + int rc; + + dev_printk(KERN_ERR, &pdev->dev, + "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags); + + if (pci_enable_device(pdev)) { + dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable " + "PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_restore_state(pdev); + + /* + * Read some byte (e.g. DMA max. payload size which can't + * be 0xff any time) to make sure - we did not hit another PCI error + * in the middle of recovery. If we did, then declare permanent failure. + */ + pci_read_config_byte(pdev, 0x68, &byte); + if (byte == 0xff) { + dev_printk(KERN_ERR, &pdev->dev, + "slot_reset failed ... got another PCI error !\n"); + goto out_disable_device; + } + + pci_save_state(pdev); + pci_set_master(pdev); + + rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)); + if (rc) + goto out_disable_device; + + if (restart_bfa(bfad) == -1) + goto out_disable_device; + + dev_printk(KERN_WARNING, &pdev->dev, + "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags); + + return PCI_ERS_RESULT_RECOVERED; + +out_disable_device: + pci_disable_device(pdev); + return PCI_ERS_RESULT_DISCONNECT; +} + +static pci_ers_result_t +bfad_pci_mmio_enabled(struct pci_dev *pdev) +{ + unsigned long flags; + struct bfad_s *bfad = pci_get_drvdata(pdev); + + dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n"); + + /* Fetch FW diagnostic information */ + bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc); + + /* Cancel all pending IOs */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfa_fcs_stop(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static void +bfad_pci_resume(struct pci_dev *pdev) +{ + unsigned long flags; + struct bfad_s *bfad = pci_get_drvdata(pdev); + + dev_printk(KERN_WARNING, &pdev->dev, "resume\n"); + + /* wait until the link is online */ + bfad_rport_online_wait(bfad); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags &= ~BFAD_EEH_BUSY; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + +struct pci_device_id bfad_id_table[] = { + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_FC_8G2P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_FC_8G1P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT_FC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, + + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT2_QUAD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, + {0, 0}, +}; + +MODULE_DEVICE_TABLE(pci, bfad_id_table); + +/* + * PCI error recovery handlers. + */ +static struct pci_error_handlers bfad_err_handler = { + .error_detected = bfad_pci_error_detected, + .slot_reset = bfad_pci_slot_reset, + .mmio_enabled = bfad_pci_mmio_enabled, + .resume = bfad_pci_resume, +}; + +static struct pci_driver bfad_pci_driver = { + .name = BFAD_DRIVER_NAME, + .id_table = bfad_id_table, + .probe = bfad_pci_probe, + .remove = bfad_pci_remove, + .err_handler = &bfad_err_handler, +}; + +/* + * Driver module init. + */ +static int __init +bfad_init(void) +{ + int error = 0; + + pr_info("QLogic BR-series BFA FC/FCOE SCSI driver - version: %s\n", + BFAD_DRIVER_VERSION); + + if (num_sgpgs > 0) + num_sgpgs_parm = num_sgpgs; + + error = bfad_im_module_init(); + if (error) { + error = -ENOMEM; + printk(KERN_WARNING "bfad_im_module_init failure\n"); + goto ext; + } + + if (strcmp(FCPI_NAME, " fcpim") == 0) + supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; + + bfa_auto_recover = ioc_auto_recover; + bfa_fcs_rport_set_del_timeout(rport_del_timeout); + bfa_fcs_rport_set_max_logins(max_rport_logins); + + error = pci_register_driver(&bfad_pci_driver); + if (error) { + printk(KERN_WARNING "pci_register_driver failure\n"); + goto ext; + } + + return 0; + +ext: + bfad_im_module_exit(); + return error; +} + +/* + * Driver module exit. + */ +static void __exit +bfad_exit(void) +{ + pci_unregister_driver(&bfad_pci_driver); + bfad_im_module_exit(); + bfad_free_fwimg(); +} + +/* Firmware handling */ +static void +bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name) +{ + const struct firmware *fw; + + if (request_firmware(&fw, fw_name, &pdev->dev)) { + printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); + *bfi_image = NULL; + goto out; + } + + *bfi_image = vmalloc(fw->size); + if (NULL == *bfi_image) { + printk(KERN_ALERT "Fail to allocate buffer for fw image " + "size=%x!\n", (u32) fw->size); + goto out; + } + + memcpy(*bfi_image, fw->data, fw->size); + *bfi_image_size = fw->size/sizeof(u32); +out: + release_firmware(fw); +} + +static u32 * +bfad_load_fwimg(struct pci_dev *pdev) +{ + if (bfa_asic_id_ct2(pdev->device)) { + if (bfi_image_ct2_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct2, + &bfi_image_ct2_size, BFAD_FW_FILE_CT2); + return bfi_image_ct2; + } else if (bfa_asic_id_ct(pdev->device)) { + if (bfi_image_ct_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct, + &bfi_image_ct_size, BFAD_FW_FILE_CT); + return bfi_image_ct; + } else if (bfa_asic_id_cb(pdev->device)) { + if (bfi_image_cb_size == 0) + bfad_read_firmware(pdev, &bfi_image_cb, + &bfi_image_cb_size, BFAD_FW_FILE_CB); + return bfi_image_cb; + } + + return NULL; +} + +static void +bfad_free_fwimg(void) +{ + if (bfi_image_ct2_size && bfi_image_ct2) + vfree(bfi_image_ct2); + if (bfi_image_ct_size && bfi_image_ct) + vfree(bfi_image_ct); + if (bfi_image_cb_size && bfi_image_cb) + vfree(bfi_image_cb); +} + +module_init(bfad_init); +module_exit(bfad_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("QLogic BR-series Fibre Channel HBA Driver" BFAD_PROTO_NAME); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_VERSION(BFAD_DRIVER_VERSION); diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c new file mode 100644 index 000000000..e96e4b6df --- /dev/null +++ b/drivers/scsi/bfa/bfad_attr.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * bfa_attr.c Linux driver configuration interface module. + */ + +#include "bfad_drv.h" +#include "bfad_im.h" + +/* + * FC transport template entry, get SCSI target port ID. + */ +static void +bfad_im_get_starget_port_id(struct scsi_target *starget) +{ + struct Scsi_Host *shost; + struct bfad_im_port_s *im_port; + struct bfad_s *bfad; + struct bfad_itnim_s *itnim = NULL; + u32 fc_id = -1; + unsigned long flags; + + shost = dev_to_shost(starget->dev.parent); + im_port = (struct bfad_im_port_s *) shost->hostdata[0]; + bfad = im_port->bfad; + spin_lock_irqsave(&bfad->bfad_lock, flags); + + itnim = bfad_get_itnim(im_port, starget->id); + if (itnim) + fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); + + fc_starget_port_id(starget) = fc_id; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + +/* + * FC transport template entry, get SCSI target nwwn. + */ +static void +bfad_im_get_starget_node_name(struct scsi_target *starget) +{ + struct Scsi_Host *shost; + struct bfad_im_port_s *im_port; + struct bfad_s *bfad; + struct bfad_itnim_s *itnim = NULL; + u64 node_name = 0; + unsigned long flags; + + shost = dev_to_shost(starget->dev.parent); + im_port = (struct bfad_im_port_s *) shost->hostdata[0]; + bfad = im_port->bfad; + spin_lock_irqsave(&bfad->bfad_lock, flags); + + itnim = bfad_get_itnim(im_port, starget->id); + if (itnim) + node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); + + fc_starget_node_name(starget) = cpu_to_be64(node_name); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + +/* + * FC transport template entry, get SCSI target pwwn. + */ +static void +bfad_im_get_starget_port_name(struct scsi_target *starget) +{ + struct Scsi_Host *shost; + struct bfad_im_port_s *im_port; + struct bfad_s *bfad; + struct bfad_itnim_s *itnim = NULL; + u64 port_name = 0; + unsigned long flags; + + shost = dev_to_shost(starget->dev.parent); + im_port = (struct bfad_im_port_s *) shost->hostdata[0]; + bfad = im_port->bfad; + spin_lock_irqsave(&bfad->bfad_lock, flags); + + itnim = bfad_get_itnim(im_port, starget->id); + if (itnim) + port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); + + fc_starget_port_name(starget) = cpu_to_be64(port_name); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + +/* + * FC transport template entry, get SCSI host port ID. + */ +static void +bfad_im_get_host_port_id(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_port_s *port = im_port->port; + + fc_host_port_id(shost) = + bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); +} + +/* + * FC transport template entry, get SCSI host port type. + */ +static void +bfad_im_get_host_port_type(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfa_lport_attr_s port_attr; + + bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); + + switch (port_attr.port_type) { + case BFA_PORT_TYPE_NPORT: + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + break; + case BFA_PORT_TYPE_NLPORT: + fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; + break; + case BFA_PORT_TYPE_P2P: + fc_host_port_type(shost) = FC_PORTTYPE_PTP; + break; + case BFA_PORT_TYPE_LPORT: + fc_host_port_type(shost) = FC_PORTTYPE_LPORT; + break; + default: + fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; + break; + } +} + +/* + * FC transport template entry, get SCSI host port state. + */ +static void +bfad_im_get_host_port_state(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfa_port_attr_s attr; + + bfa_fcport_get_attr(&bfad->bfa, &attr); + + switch (attr.port_state) { + case BFA_PORT_ST_LINKDOWN: + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + break; + case BFA_PORT_ST_LINKUP: + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + break; + case BFA_PORT_ST_DISABLED: + case BFA_PORT_ST_STOPPED: + case BFA_PORT_ST_IOCDOWN: + case BFA_PORT_ST_IOCDIS: + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + break; + case BFA_PORT_ST_UNINIT: + case BFA_PORT_ST_ENABLING_QWAIT: + case BFA_PORT_ST_ENABLING: + case BFA_PORT_ST_DISABLING_QWAIT: + case BFA_PORT_ST_DISABLING: + default: + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + break; + } +} + +/* + * FC transport template entry, get SCSI host active fc4s. + */ +static void +bfad_im_get_host_active_fc4s(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_port_s *port = im_port->port; + + memset(fc_host_active_fc4s(shost), 0, + sizeof(fc_host_active_fc4s(shost))); + + if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM) + fc_host_active_fc4s(shost)[2] = 1; + + fc_host_active_fc4s(shost)[7] = 1; +} + +/* + * FC transport template entry, get SCSI host link speed. + */ +static void +bfad_im_get_host_speed(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfa_port_attr_s attr; + + bfa_fcport_get_attr(&bfad->bfa, &attr); + switch (attr.speed) { + case BFA_PORT_SPEED_10GBPS: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case BFA_PORT_SPEED_16GBPS: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; + case BFA_PORT_SPEED_8GBPS: + fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + break; + case BFA_PORT_SPEED_4GBPS: + fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + break; + case BFA_PORT_SPEED_2GBPS: + fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + break; + case BFA_PORT_SPEED_1GBPS: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } +} + +/* + * FC transport template entry, get SCSI host port type. + */ +static void +bfad_im_get_host_fabric_name(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_port_s *port = im_port->port; + wwn_t fabric_nwwn = 0; + + fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); + + fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn); + +} + +/* + * FC transport template entry, get BFAD statistics. + */ +static struct fc_host_statistics * +bfad_im_get_stats(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfad_hal_comp fcomp; + union bfa_port_stats_u *fcstats; + struct fc_host_statistics *hstats; + bfa_status_t rc; + unsigned long flags; + + fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL); + if (fcstats == NULL) + return NULL; + + hstats = &bfad->link_stats; + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + memset(hstats, 0, sizeof(struct fc_host_statistics)); + rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), + fcstats, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (rc != BFA_STATUS_OK) { + kfree(fcstats); + return NULL; + } + + wait_for_completion(&fcomp.comp); + + /* Fill the fc_host_statistics structure */ + hstats->seconds_since_last_reset = fcstats->fc.secs_reset; + hstats->tx_frames = fcstats->fc.tx_frames; + hstats->tx_words = fcstats->fc.tx_words; + hstats->rx_frames = fcstats->fc.rx_frames; + hstats->rx_words = fcstats->fc.rx_words; + hstats->lip_count = fcstats->fc.lip_count; + hstats->nos_count = fcstats->fc.nos_count; + hstats->error_frames = fcstats->fc.error_frames; + hstats->dumped_frames = fcstats->fc.dropped_frames; + hstats->link_failure_count = fcstats->fc.link_failures; + hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs; + hstats->loss_of_signal_count = fcstats->fc.loss_of_signals; + hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs; + hstats->invalid_crc_count = fcstats->fc.invalid_crcs; + + kfree(fcstats); + return hstats; +} + +/* + * FC transport template entry, reset BFAD statistics. + */ +static void +bfad_im_reset_stats(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfad_hal_comp fcomp; + unsigned long flags; + bfa_status_t rc; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, + &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (rc != BFA_STATUS_OK) + return; + + wait_for_completion(&fcomp.comp); + + return; +} + +/* + * FC transport template entry, set rport loss timeout. + * Update dev_loss_tmo based on the value pushed down by the stack + * In case it is lesser than path_tov of driver, set it to path_tov + 1 + * to ensure that the driver times out before the application + */ +static void +bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) +{ + struct bfad_itnim_data_s *itnim_data = rport->dd_data; + struct bfad_itnim_s *itnim = itnim_data->itnim; + struct bfad_s *bfad = itnim->im->bfad; + uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); + + rport->dev_loss_tmo = timeout; + if (timeout < path_tov) + rport->dev_loss_tmo = path_tov + 1; +} + +static int +bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) +{ + char *vname = fc_vport->symbolic_name; + struct Scsi_Host *shost = fc_vport->shost; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfa_lport_cfg_s port_cfg; + struct bfad_vport_s *vp; + int status = 0, rc; + unsigned long flags; + + memset(&port_cfg, 0, sizeof(port_cfg)); + u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn); + u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); + if (strlen(vname) > 0) + strcpy((char *)&port_cfg.sym_name, vname); + port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) { + if (port_cfg.pwwn == + vp->fcs_vport.lport.port_cfg.pwwn) { + port_cfg.preboot_vp = + vp->fcs_vport.lport.port_cfg.preboot_vp; + break; + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev); + if (rc == BFA_STATUS_OK) { + struct bfad_vport_s *vport; + struct bfa_fcs_vport_s *fcs_vport; + struct Scsi_Host *vshost; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, + port_cfg.pwwn); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (fcs_vport == NULL) + return VPCERR_BAD_WWN; + + fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); + if (disable) { + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_vport_stop(fcs_vport); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); + } + + vport = fcs_vport->vport_drv; + vshost = vport->drv_port.im_port->shost; + fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); + fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); + fc_host_supported_classes(vshost) = FC_COS_CLASS3; + + memset(fc_host_supported_fc4s(vshost), 0, + sizeof(fc_host_supported_fc4s(vshost))); + + /* For FCP type 0x08 */ + if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) + fc_host_supported_fc4s(vshost)[2] = 1; + + /* For fibre channel services type 0x20 */ + fc_host_supported_fc4s(vshost)[7] = 1; + + fc_host_supported_speeds(vshost) = + bfad_im_supported_speeds(&bfad->bfa); + fc_host_maxframe_size(vshost) = + bfa_fcport_get_maxfrsize(&bfad->bfa); + + fc_vport->dd_data = vport; + vport->drv_port.im_port->fc_vport = fc_vport; + } else if (rc == BFA_STATUS_INVALID_WWN) + return VPCERR_BAD_WWN; + else if (rc == BFA_STATUS_VPORT_EXISTS) + return VPCERR_BAD_WWN; + else if (rc == BFA_STATUS_VPORT_MAX) + return VPCERR_NO_FABRIC_SUPP; + else if (rc == BFA_STATUS_VPORT_WWN_BP) + return VPCERR_BAD_WWN; + else + return FC_VPORT_FAILED; + + return status; +} + +static int +bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfad_hal_comp fcomp; + unsigned long flags; + uint32_t status; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + status = bfa_port_disable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (status != BFA_STATUS_OK) + return -EIO; + + wait_for_completion(&fcomp.comp); + if (fcomp.status != BFA_STATUS_OK) + return -EIO; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + status = bfa_port_enable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (status != BFA_STATUS_OK) + return -EIO; + + wait_for_completion(&fcomp.comp); + if (fcomp.status != BFA_STATUS_OK) + return -EIO; + + return 0; +} + +static int +bfad_im_vport_delete(struct fc_vport *fc_vport) +{ + struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) vport->drv_port.im_port; + struct bfad_s *bfad = im_port->bfad; + struct bfa_fcs_vport_s *fcs_vport; + struct Scsi_Host *vshost; + wwn_t pwwn; + int rc; + unsigned long flags; + struct completion fcomp; + + if (im_port->flags & BFAD_PORT_DELETE) { + bfad_scsi_host_free(bfad, im_port); + list_del(&vport->list_entry); + kfree(vport); + return 0; + } + + vshost = vport->drv_port.im_port->shost; + u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (fcs_vport == NULL) + return VPCERR_BAD_WWN; + + vport->drv_port.flags |= BFAD_PORT_DELETE; + + vport->comp_del = &fcomp; + init_completion(vport->comp_del); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + rc = bfa_fcs_vport_delete(&vport->fcs_vport); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (rc == BFA_STATUS_PBC) { + vport->drv_port.flags &= ~BFAD_PORT_DELETE; + vport->comp_del = NULL; + return -1; + } + + wait_for_completion(vport->comp_del); + + bfad_scsi_host_free(bfad, im_port); + list_del(&vport->list_entry); + kfree(vport); + + return 0; +} + +static int +bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable) +{ + struct bfad_vport_s *vport; + struct bfad_s *bfad; + struct bfa_fcs_vport_s *fcs_vport; + struct Scsi_Host *vshost; + wwn_t pwwn; + unsigned long flags; + + vport = (struct bfad_vport_s *)fc_vport->dd_data; + bfad = vport->drv_port.bfad; + vshost = vport->drv_port.im_port->shost; + u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (fcs_vport == NULL) + return VPCERR_BAD_WWN; + + if (disable) { + bfa_fcs_vport_stop(fcs_vport); + fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); + } else { + bfa_fcs_vport_start(fcs_vport); + fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); + } + + return 0; +} + +static void +bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) +{ + struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *)vport->drv_port.im_port; + struct bfad_s *bfad = im_port->bfad; + struct Scsi_Host *vshost = vport->drv_port.im_port->shost; + char *sym_name = fc_vport->symbolic_name; + struct bfa_fcs_vport_s *fcs_vport; + wwn_t pwwn; + unsigned long flags; + + u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (fcs_vport == NULL) + return; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (strlen(sym_name) > 0) + bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + +struct fc_function_template bfad_im_fc_function_template = { + + /* Target dynamic attributes */ + .get_starget_port_id = bfad_im_get_starget_port_id, + .show_starget_port_id = 1, + .get_starget_node_name = bfad_im_get_starget_node_name, + .show_starget_node_name = 1, + .get_starget_port_name = bfad_im_get_starget_port_name, + .show_starget_port_name = 1, + + /* Host dynamic attribute */ + .get_host_port_id = bfad_im_get_host_port_id, + .show_host_port_id = 1, + + /* Host fixed attributes */ + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + + /* More host dynamic attributes */ + .show_host_port_type = 1, + .get_host_port_type = bfad_im_get_host_port_type, + .show_host_port_state = 1, + .get_host_port_state = bfad_im_get_host_port_state, + .show_host_active_fc4s = 1, + .get_host_active_fc4s = bfad_im_get_host_active_fc4s, + .show_host_speed = 1, + .get_host_speed = bfad_im_get_host_speed, + .show_host_fabric_name = 1, + .get_host_fabric_name = bfad_im_get_host_fabric_name, + + .show_host_symbolic_name = 1, + + /* Statistics */ + .get_fc_host_stats = bfad_im_get_stats, + .reset_fc_host_stats = bfad_im_reset_stats, + + /* Allocation length for host specific data */ + .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), + + /* Remote port fixed attributes */ + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_rport_dev_loss_tmo = 1, + .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, + .issue_fc_host_lip = bfad_im_issue_fc_host_lip, + .vport_create = bfad_im_vport_create, + .vport_delete = bfad_im_vport_delete, + .vport_disable = bfad_im_vport_disable, + .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name, + .bsg_request = bfad_im_bsg_request, + .bsg_timeout = bfad_im_bsg_timeout, +}; + +struct fc_function_template bfad_im_vport_fc_function_template = { + + /* Target dynamic attributes */ + .get_starget_port_id = bfad_im_get_starget_port_id, + .show_starget_port_id = 1, + .get_starget_node_name = bfad_im_get_starget_node_name, + .show_starget_node_name = 1, + .get_starget_port_name = bfad_im_get_starget_port_name, + .show_starget_port_name = 1, + + /* Host dynamic attribute */ + .get_host_port_id = bfad_im_get_host_port_id, + .show_host_port_id = 1, + + /* Host fixed attributes */ + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + + /* More host dynamic attributes */ + .show_host_port_type = 1, + .get_host_port_type = bfad_im_get_host_port_type, + .show_host_port_state = 1, + .get_host_port_state = bfad_im_get_host_port_state, + .show_host_active_fc4s = 1, + .get_host_active_fc4s = bfad_im_get_host_active_fc4s, + .show_host_speed = 1, + .get_host_speed = bfad_im_get_host_speed, + .show_host_fabric_name = 1, + .get_host_fabric_name = bfad_im_get_host_fabric_name, + + .show_host_symbolic_name = 1, + + /* Statistics */ + .get_fc_host_stats = bfad_im_get_stats, + .reset_fc_host_stats = bfad_im_reset_stats, + + /* Allocation length for host specific data */ + .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), + + /* Remote port fixed attributes */ + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_rport_dev_loss_tmo = 1, + .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, +}; + +/* + * Scsi_Host_attrs SCSI host attributes + */ +static ssize_t +bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + + bfa_get_adapter_serial_num(&bfad->bfa, serial_num); + return sysfs_emit(buf, "%s\n", serial_num); +} + +static ssize_t +bfad_im_model_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + + bfa_get_adapter_model(&bfad->bfa, model); + return sysfs_emit(buf, "%s\n", model); +} + +static ssize_t +bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; + int nports = 0; + + bfa_get_adapter_model(&bfad->bfa, model); + nports = bfa_get_nports(&bfad->bfa); + if (!strcmp(model, "QLogic-425")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 4Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "QLogic-825")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 8Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "QLogic-42B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 4Gbps PCIe dual port FC HBA for HP"); + else if (!strcmp(model, "QLogic-82B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 8Gbps PCIe dual port FC HBA for HP"); + else if (!strcmp(model, "QLogic-1010")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 10Gbps single port CNA"); + else if (!strcmp(model, "QLogic-1020")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 10Gbps dual port CNA"); + else if (!strcmp(model, "QLogic-1007")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 10Gbps CNA for IBM Blade Center"); + else if (!strcmp(model, "QLogic-415")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 4Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "QLogic-815")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 8Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "QLogic-41B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 4Gbps PCIe single port FC HBA for HP"); + else if (!strcmp(model, "QLogic-81B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 8Gbps PCIe single port FC HBA for HP"); + else if (!strcmp(model, "QLogic-804")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 8Gbps FC HBA for HP Bladesystem C-class"); + else if (!strcmp(model, "QLogic-1741")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 10Gbps CNA for Dell M-Series Blade Servers"); + else if (strstr(model, "QLogic-1860")) { + if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 10Gbps single port CNA"); + else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 16Gbps PCIe single port FC HBA"); + else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 10Gbps dual port CNA"); + else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 16Gbps PCIe dual port FC HBA"); + } else if (!strcmp(model, "QLogic-1867")) { + if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 16Gbps PCIe single port FC HBA for IBM"); + else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "QLogic BR-series 16Gbps PCIe dual port FC HBA for IBM"); + } else + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Invalid Model"); + + return sysfs_emit(buf, "%s\n", model_descr); +} + +static ssize_t +bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_port_s *port = im_port->port; + u64 nwwn; + + nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); + return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn)); +} + +static ssize_t +bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfa_lport_attr_s port_attr; + char symname[BFA_SYMNAME_MAXLEN]; + + bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); + strscpy(symname, port_attr.port_cfg.sym_name.symname, + BFA_SYMNAME_MAXLEN); + return sysfs_emit(buf, "%s\n", symname); +} + +static ssize_t +bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + char hw_ver[BFA_VERSION_LEN]; + + bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); + return sysfs_emit(buf, "%s\n", hw_ver); +} + +static ssize_t +bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION); +} + +static ssize_t +bfad_im_optionrom_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + char optrom_ver[BFA_VERSION_LEN]; + + bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); + return sysfs_emit(buf, "%s\n", optrom_ver); +} + +static ssize_t +bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + char fw_ver[BFA_VERSION_LEN]; + + bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); + return sysfs_emit(buf, "%s\n", fw_ver); +} + +static ssize_t +bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + + return sysfs_emit(buf, "%d\n", + bfa_get_nports(&bfad->bfa)); +} + +static ssize_t +bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME); +} + +static ssize_t +bfad_im_num_of_discovered_ports_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_port_s *port = im_port->port; + struct bfad_s *bfad = im_port->bfad; + int nrports = 2048; + struct bfa_rport_qualifier_s *rports = NULL; + unsigned long flags; + + rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s), + GFP_ATOMIC); + if (rports == NULL) + return sysfs_emit(buf, "Failed\n"); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + kfree(rports); + + return sysfs_emit(buf, "%d\n", nrports); +} + +static DEVICE_ATTR(serial_number, S_IRUGO, + bfad_im_serial_num_show, NULL); +static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL); +static DEVICE_ATTR(model_description, S_IRUGO, + bfad_im_model_desc_show, NULL); +static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL); +static DEVICE_ATTR(symbolic_name, S_IRUGO, + bfad_im_symbolic_name_show, NULL); +static DEVICE_ATTR(hardware_version, S_IRUGO, + bfad_im_hw_version_show, NULL); +static DEVICE_ATTR(driver_version, S_IRUGO, + bfad_im_drv_version_show, NULL); +static DEVICE_ATTR(option_rom_version, S_IRUGO, + bfad_im_optionrom_version_show, NULL); +static DEVICE_ATTR(firmware_version, S_IRUGO, + bfad_im_fw_version_show, NULL); +static DEVICE_ATTR(number_of_ports, S_IRUGO, + bfad_im_num_of_ports_show, NULL); +static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL); +static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO, + bfad_im_num_of_discovered_ports_show, NULL); + +static struct attribute *bfad_im_host_attrs[] = { + &dev_attr_serial_number.attr, + &dev_attr_model.attr, + &dev_attr_model_description.attr, + &dev_attr_node_name.attr, + &dev_attr_symbolic_name.attr, + &dev_attr_hardware_version.attr, + &dev_attr_driver_version.attr, + &dev_attr_option_rom_version.attr, + &dev_attr_firmware_version.attr, + &dev_attr_number_of_ports.attr, + &dev_attr_driver_name.attr, + &dev_attr_number_of_discovered_ports.attr, + NULL, +}; + +static const struct attribute_group bfad_im_host_attr_group = { + .attrs = bfad_im_host_attrs +}; + +const struct attribute_group *bfad_im_host_groups[] = { + &bfad_im_host_attr_group, + NULL +}; + +static struct attribute *bfad_im_vport_attrs[] = { + &dev_attr_serial_number.attr, + &dev_attr_model.attr, + &dev_attr_model_description.attr, + &dev_attr_node_name.attr, + &dev_attr_symbolic_name.attr, + &dev_attr_hardware_version.attr, + &dev_attr_driver_version.attr, + &dev_attr_option_rom_version.attr, + &dev_attr_firmware_version.attr, + &dev_attr_number_of_ports.attr, + &dev_attr_driver_name.attr, + &dev_attr_number_of_discovered_ports.attr, + NULL, +}; + +static const struct attribute_group bfad_im_vport_attr_group = { + .attrs = bfad_im_vport_attrs +}; + +const struct attribute_group *bfad_im_vport_groups[] = { + &bfad_im_vport_attr_group, + NULL +}; diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c new file mode 100644 index 000000000..d4ceca2d4 --- /dev/null +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -0,0 +1,3615 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfad_bsg.h" + +BFA_TRC_FILE(LDRV, BSG); + +static int +bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + /* If IOC is not in disabled state - return */ + if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; + } + + init_completion(&bfad->enable_comp); + bfa_iocfc_enable(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->enable_comp); + + return 0; +} + +static int +bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; + } + + if (bfad->disable_active) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return -EBUSY; + } + + bfad->disable_active = BFA_TRUE; + init_completion(&bfad->disable_comp); + bfa_iocfc_disable(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + wait_for_completion(&bfad->disable_comp); + bfad->disable_active = BFA_FALSE; + iocmd->status = BFA_STATUS_OK; + + return 0; +} + +static int +bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) +{ + int i; + struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; + struct bfad_im_port_s *im_port; + struct bfa_port_attr_s pattr; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcport_get_attr(&bfad->bfa, &pattr); + iocmd->nwwn = pattr.nwwn; + iocmd->pwwn = pattr.pwwn; + iocmd->ioc_type = bfa_get_type(&bfad->bfa); + iocmd->mac = bfa_get_mac(&bfad->bfa); + iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); + bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); + iocmd->factorynwwn = pattr.factorynwwn; + iocmd->factorypwwn = pattr.factorypwwn; + iocmd->bfad_num = bfad->inst_no; + im_port = bfad->pport.im_port; + iocmd->host = im_port->shost->host_no; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + strcpy(iocmd->name, bfad->adapter_name); + strcpy(iocmd->port_name, bfad->port_name); + strcpy(iocmd->hwpath, bfad->pci_name); + + /* set adapter hw path */ + strcpy(iocmd->adapter_hwpath, bfad->pci_name); + for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++) + ; + for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; ) + ; + iocmd->adapter_hwpath[i] = '\0'; + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* fill in driver attr info */ + strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); + strscpy(iocmd->ioc_attr.driver_attr.driver_ver, + BFAD_DRIVER_VERSION, BFA_VERSION_LEN); + strcpy(iocmd->ioc_attr.driver_attr.fw_ver, + iocmd->ioc_attr.adapter_attr.fw_ver); + strcpy(iocmd->ioc_attr.driver_attr.bios_ver, + iocmd->ioc_attr.adapter_attr.optrom_ver); + + /* copy chip rev info first otherwise it will be overwritten */ + memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, + sizeof(bfad->pci_attr.chip_rev)); + memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, + sizeof(struct bfa_ioc_pci_attr_s)); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; + + bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_ioc_fwstats_s *iocmd = + (struct bfa_bsg_ioc_fwstats_s *)cmd; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_ioc_fwstats_s), + sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + goto out; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } +out: + bfa_trc(bfad, 0x6666); + return 0; +} + +static int +bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + if (v_cmd == IOCMD_IOC_RESET_STATS) { + bfa_ioc_clear_stats(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + return 0; +} + +static int +bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; + + if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) + strcpy(bfad->adapter_name, iocmd->name); + else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) + strcpy(bfad->port_name, iocmd->name); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; + + iocmd->status = BFA_STATUS_OK; + bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); + + return 0; +} + +static int +bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + return 0; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + return 0; +} + +static int +bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + return 0; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + return 0; +} + +static int +bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; + struct bfa_lport_attr_s port_attr; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); + bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) + iocmd->attr.pid = port_attr.pid; + else + iocmd->attr.pid = 0; + + iocmd->attr.port_type = port_attr.port_type; + iocmd->attr.loopback = port_attr.loopback; + iocmd->attr.authfail = port_attr.authfail; + strscpy(iocmd->attr.port_symname.symname, + port_attr.port_cfg.sym_name.symname, + sizeof(iocmd->attr.port_symname.symname)); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; + struct bfad_hal_comp fcomp; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_port_stats_s), + sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, + iocmd_bufptr, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + return 0; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + return 0; +} + +static int +bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) +{ + struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_PORT_CFG_TOPO) + cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); + else if (v_cmd == IOCMD_PORT_CFG_SPEED) + cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); + else if (v_cmd == IOCMD_PORT_CFG_ALPA) + cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); + else if (v_cmd == IOCMD_PORT_CLR_ALPA) + cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = + (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_bbcr_enable_s *iocmd = + (struct bfa_bsg_bbcr_enable_s *)pcmd; + unsigned long flags; + int rc; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (cmd == IOCMD_PORT_BBCR_ENABLE) + rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn); + else if (cmd == IOCMD_PORT_BBCR_DISABLE) + rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0); + else { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = rc; + return 0; +} + +static int +bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = + bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + + +static int +bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_lport_stats_s *iocmd = + (struct bfa_bsg_lport_stats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_reset_stats_s *iocmd = + (struct bfa_bsg_reset_stats_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_clear_stats(fcs_port); + /* clear IO stats from all active itnims */ + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) + continue; + bfa_itnim_clear_stats(itnim); + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_lport_iostats_s *iocmd = + (struct bfa_bsg_lport_iostats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, + fcs_port->lp_tag); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_lport_get_rports_s *iocmd = + (struct bfa_bsg_lport_get_rports_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + unsigned long flags; + void *iocmd_bufptr; + + if (iocmd->nrports == 0) + return -EINVAL; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_lport_get_rports_s), + sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports) + != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + + sizeof(struct bfa_bsg_lport_get_rports_s); + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, 0); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_get_rport_quals(fcs_port, + (struct bfa_rport_qualifier_s *)iocmd_bufptr, + &iocmd->nrports); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + if (iocmd->pid) + fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port, + iocmd->rpwwn, iocmd->pid); + else + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_scsi_addr_s *iocmd = + (struct bfa_bsg_rport_scsi_addr_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *fcs_itnim; + struct bfad_itnim_s *drv_itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (fcs_itnim == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + drv_itnim = fcs_itnim->itnim_drv; + + if (drv_itnim && drv_itnim->im_port) + iocmd->host = drv_itnim->im_port->shost->host_no; + else { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + iocmd->target = drv_itnim->scsi_tgt_id; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->bus = 0; + iocmd->lun = 0; + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_stats_s *iocmd = + (struct bfa_bsg_rport_stats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, + sizeof(struct bfa_rport_stats_s)); + if (bfa_fcs_rport_get_halrport(fcs_rport)) { + memcpy((void *)&iocmd->stats.hal_stats, + (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), + sizeof(struct bfa_rport_hal_stats_s)); + } + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_reset_stats_s *iocmd = + (struct bfa_bsg_rport_reset_stats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + struct bfa_rport_s *rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); + rport = bfa_fcs_rport_get_halrport(fcs_rport); + if (rport) + memset(&rport->stats, 0, sizeof(rport->stats)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_set_speed_s *iocmd = + (struct bfa_bsg_rport_set_speed_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + fcs_rport->rpf.assigned_speed = iocmd->speed; + /* Set this speed in f/w only if the RPSC speed is not available */ + if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) + if (fcs_rport->bfa_rport) + bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_vport_s *fcs_vport; + struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_vport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VWWN; + goto out; + } + + bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_vport_s *fcs_vport; + struct bfa_bsg_vport_stats_s *iocmd = + (struct bfa_bsg_vport_stats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_vport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VWWN; + goto out; + } + + memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, + sizeof(struct bfa_vport_stats_s)); + memcpy((void *)&iocmd->vport_stats.port_stats, + (void *)&fcs_vport->lport.stats, + sizeof(struct bfa_lport_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_vport_s *fcs_vport; + struct bfa_bsg_reset_stats_s *iocmd = + (struct bfa_bsg_reset_stats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_vport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VWWN; + goto out; + } + + memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); + memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_fabric_get_lports_s *iocmd = + (struct bfa_bsg_fabric_get_lports_s *)cmd; + bfa_fcs_vf_t *fcs_vf; + uint32_t nports = iocmd->nports; + unsigned long flags; + void *iocmd_bufptr; + + if (nports == 0) { + iocmd->status = BFA_STATUS_EINVAL; + goto out; + } + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_fabric_get_lports_s), + sizeof(wwn_t) * iocmd->nports) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + goto out; + } + + iocmd_bufptr = (char *)iocmd + + sizeof(struct bfa_bsg_fabric_get_lports_s); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); + if (fcs_vf == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VFID; + goto out; + } + bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->nports = nports; + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + if (cmd == IOCMD_RATELIM_ENABLE) + fcport->cfg.ratelimit = BFA_TRUE; + else if (cmd == IOCMD_RATELIM_DISABLE) + fcport->cfg.ratelimit = BFA_FALSE; + + if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) + fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; + + iocmd->status = BFA_STATUS_OK; + } + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + /* Auto and speeds greater than the supported speed, are invalid */ + if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || + (iocmd->speed > fcport->speed_sup)) { + iocmd->status = BFA_STATUS_UNSUPP_SPEED; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; + } + + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + fcport->cfg.trl_def_speed = iocmd->speed; + iocmd->status = BFA_STATUS_OK; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_modstats_s *iocmd = + (struct bfa_bsg_fcpim_modstats_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + /* accumulate IO stats from itnim */ + memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_modstatsclr_s *iocmd = + (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_itnim_clear_stats(itnim); + } + memset(&fcpim->del_itn_stats, 0, + sizeof(struct bfa_fcpim_del_itn_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = + (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, + sizeof(struct bfa_fcpim_del_itn_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + else + iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, + iocmd->rpwwn, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_iostats_s *iocmd = + (struct bfa_bsg_itnim_iostats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) { + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + bfa_trc(bfad, 0); + } else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else { + iocmd->status = BFA_STATUS_OK; + if (bfa_fcs_itnim_get_halitn(itnim)) + memcpy((void *)&iocmd->iostats, (void *) + &(bfa_fcs_itnim_get_halitn(itnim)->stats), + sizeof(struct bfa_itnim_iostats_s)); + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_reset_stats_s *iocmd = + (struct bfa_bsg_rport_reset_stats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (!fcs_port) + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else { + iocmd->status = BFA_STATUS_OK; + bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); + bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_itnstats_s *iocmd = + (struct bfa_bsg_itnim_itnstats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) { + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + bfa_trc(bfad, 0); + } else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else { + iocmd->status = BFA_STATUS_OK; + bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, + &iocmd->itnstats); + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_enable(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_disable(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, + &iocmd->pcifn_cfg, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, + &iocmd->pcifn_id, iocmd->port, + iocmd->pcifn_class, iocmd->bw_min, + iocmd->bw_max, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, + iocmd->pcifn_id, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, + iocmd->pcifn_id, iocmd->bw_min, + iocmd->bw_max, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + bfa_trc(bfad, iocmd->status); +out: + return 0; +} + +static int +bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_adapter_cfg_mode_s *iocmd = + (struct bfa_bsg_adapter_cfg_mode_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, + iocmd->cfg.mode, iocmd->cfg.max_pf, + iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_port_cfg_mode_s *iocmd = + (struct bfa_bsg_port_cfg_mode_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, + iocmd->instance, iocmd->cfg.mode, + iocmd->cfg.max_pf, iocmd->cfg.max_vf, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (cmd == IOCMD_FLASH_ENABLE_OPTROM) + iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, + bfad_hcb_comp, &fcomp); + else + iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + iocmd->status = BFA_STATUS_OK; + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) +{ + struct bfa_bsg_cee_attr_s *iocmd = + (struct bfa_bsg_cee_attr_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp cee_comp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_cee_attr_s), + sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); + + cee_comp.status = 0; + init_completion(&cee_comp.comp); + mutex_lock(&bfad_mutex); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, + bfad_hcb_comp, &cee_comp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + mutex_unlock(&bfad_mutex); + bfa_trc(bfad, 0x5555); + goto out; + } + wait_for_completion(&cee_comp.comp); + mutex_unlock(&bfad_mutex); +out: + return 0; +} + +static int +bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_cee_stats_s *iocmd = + (struct bfa_bsg_cee_stats_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp cee_comp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_cee_stats_s), + sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); + + cee_comp.status = 0; + init_completion(&cee_comp.comp); + mutex_lock(&bfad_mutex); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, + bfad_hcb_comp, &cee_comp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + mutex_unlock(&bfad_mutex); + bfa_trc(bfad, 0x5555); + goto out; + } + wait_for_completion(&cee_comp.comp); + mutex_unlock(&bfad_mutex); +out: + return 0; +} + +static int +bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + bfa_trc(bfad, 0x5555); + return 0; +} + +static int +bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_SFP_NOT_READY) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_SFP_NOT_READY) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_flash_attr_s *iocmd = + (struct bfa_bsg_flash_attr_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, + iocmd->instance, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp fcomp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_flash_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), + iocmd->type, iocmd->instance, iocmd_bufptr, + iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; + struct bfad_hal_comp fcomp; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_flash_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, + iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_get_temp_s *iocmd = + (struct bfa_bsg_diag_get_temp_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), + &iocmd->result, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_memtest_s *iocmd = + (struct bfa_bsg_diag_memtest_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), + &iocmd->memtest, iocmd->pat, + &iocmd->result, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_loopback_s *iocmd = + (struct bfa_bsg_diag_loopback_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, + iocmd->speed, iocmd->lpcnt, iocmd->pat, + &iocmd->result, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_fwping_s *iocmd = + (struct bfa_bsg_diag_fwping_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, + iocmd->pattern, &iocmd->result, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + bfa_trc(bfad, 0x77771); + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, + iocmd->queue, &iocmd->result, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_sfp_show_s *iocmd = + (struct bfa_bsg_sfp_show_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + bfa_trc(bfad, iocmd->status); +out: + return 0; +} + +static int +bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), + &iocmd->ledtest); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_beacon_s *iocmd = + (struct bfa_bsg_diag_beacon_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), + iocmd->beacon, iocmd->link_e2e_beacon, + iocmd->second); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_lb_stat_s *iocmd = + (struct bfa_bsg_diag_lb_stat_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + + return 0; +} + +static int +bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_dport_enable_s *iocmd = + (struct bfa_bsg_dport_enable_s *)pcmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt, + iocmd->pat, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + bfa_trc(bfad, iocmd->status); + else { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + return 0; +} + +static int +bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + bfa_trc(bfad, iocmd->status); + else { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + return 0; +} + +static int +bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_dport_enable_s *iocmd = + (struct bfa_bsg_dport_enable_s *)pcmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt, + iocmd->pat, bfad_hcb_comp, + &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + } else { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +static int +bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_diag_dport_show_s *iocmd = + (struct bfa_bsg_diag_dport_show_s *)pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + + +static int +bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_phy_attr_s *iocmd = + (struct bfa_bsg_phy_attr_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, + &iocmd->attr, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_phy_stats_s *iocmd = + (struct bfa_bsg_phy_stats_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, + &iocmd->stats, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) +{ + struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; + struct bfad_hal_comp fcomp; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_phy_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), + iocmd->instance, iocmd_bufptr, iocmd->bufsz, + 0, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + if (iocmd->status != BFA_STATUS_OK) + goto out; +out: + return 0; +} + +static int +bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_vhba_attr_s *iocmd = + (struct bfa_bsg_vhba_attr_s *)cmd; + struct bfa_vhba_attr_s *attr = &iocmd->attr; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + attr->pwwn = bfad->bfa.ioc.attr->pwwn; + attr->nwwn = bfad->bfa.ioc.attr->nwwn; + attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; + attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); + attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) +{ + struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp fcomp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_phy_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), + iocmd->instance, iocmd_bufptr, iocmd->bufsz, + 0, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; + void *iocmd_bufptr; + + if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { + bfa_trc(bfad, sizeof(struct bfa_plog_s)); + iocmd->status = BFA_STATUS_EINVAL; + goto out; + } + + iocmd->status = BFA_STATUS_OK; + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); + memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); +out: + return 0; +} + +#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ +static int +bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; + void *iocmd_bufptr; + unsigned long flags; + u32 offset; + + if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), + BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || + !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || + !IS_ALIGNED(iocmd->offset, sizeof(u32))) { + bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); + iocmd->status = BFA_STATUS_EINVAL; + goto out; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); + spin_lock_irqsave(&bfad->bfad_lock, flags); + offset = iocmd->offset; + iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, + &offset, &iocmd->bufsz); + iocmd->offset = offset; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +out: + return 0; +} + +static int +bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) + bfad->plog_buf.head = bfad->plog_buf.tail = 0; + else if (v_cmd == IOCMD_DEBUG_START_DTRC) + bfa_trc_init(bfad->trcmod); + else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) + bfa_trc_stop(bfad->trcmod); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; + + if (iocmd->ctl == BFA_TRUE) + bfad->plog_buf.plog_enabled = 1; + else + bfad->plog_buf.plog_enabled = 0; + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_fcpim_profile_s *iocmd = + (struct bfa_bsg_fcpim_profile_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_FCPIM_PROFILE_ON) + iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds()); + else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) + iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_ioprofile_s *iocmd = + (struct bfa_bsg_itnim_ioprofile_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else + iocmd->status = bfa_itnim_get_ioprofile( + bfa_fcs_itnim_get_halitn(itnim), + &iocmd->ioprofile); + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcport_stats_s *iocmd = + (struct bfa_bsg_fcport_stats_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + &fcomp, &iocmd->stats); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, + &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, + &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; + struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; + struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; + pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; + pbcfg->speed = cfgrsp->pbc_cfg.port_speed; + memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); + iocmd->status = BFA_STATUS_OK; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_PXECFG, + bfad->bfa.ioc.port_id, &iocmd->cfg, + sizeof(struct bfa_ethboot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_PXECFG, + bfad->bfa.ioc.port_id, &iocmd->cfg, + sizeof(struct bfa_ethboot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + if (bfa_fcport_is_dport(&bfad->bfa)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return BFA_STATUS_DPORT_ERR; + } + + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + if (v_cmd == IOCMD_TRUNK_ENABLE) { + trunk->attr.state = BFA_TRUNK_OFFLINE; + bfa_fcport_disable(&bfad->bfa); + fcport->cfg.trunked = BFA_TRUE; + } else if (v_cmd == IOCMD_TRUNK_DISABLE) { + trunk->attr.state = BFA_TRUNK_DISABLED; + bfa_fcport_disable(&bfad->bfa); + fcport->cfg.trunked = BFA_FALSE; + } + + if (!bfa_fcport_is_disabled(&bfad->bfa)) + bfa_fcport_enable(&bfad->bfa); + + iocmd->status = BFA_STATUS_OK; + } + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + memcpy((void *)&iocmd->attr, (void *)&trunk->attr, + sizeof(struct bfa_trunk_attr_s)); + iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + if (v_cmd == IOCMD_QOS_ENABLE) + fcport->cfg.qos_enabled = BFA_TRUE; + else if (v_cmd == IOCMD_QOS_DISABLE) { + fcport->cfg.qos_enabled = BFA_FALSE; + fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH; + fcport->cfg.qos_bw.med = BFA_QOS_BW_MED; + fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW; + } + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + iocmd->attr.state = fcport->qos_attr.state; + iocmd->attr.total_bb_cr = + be32_to_cpu(fcport->qos_attr.total_bb_cr); + iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high; + iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med; + iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low; + iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op; + iocmd->status = BFA_STATUS_OK; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_qos_vc_attr_s *iocmd = + (struct bfa_bsg_qos_vc_attr_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; + unsigned long flags; + u32 i = 0; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); + iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); + iocmd->attr.elp_opmode_flags = + be32_to_cpu(bfa_vc_attr->elp_opmode_flags); + + /* Individual VC info */ + while (i < iocmd->attr.total_vc_count) { + iocmd->attr.vc_info[i].vc_credit = + bfa_vc_attr->vc_info[i].vc_credit; + iocmd->attr.vc_info[i].borrow_credit = + bfa_vc_attr->vc_info[i].borrow_credit; + iocmd->attr.vc_info[i].priority = + bfa_vc_attr->vc_info[i].priority; + i++; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcport_stats_s *iocmd = + (struct bfa_bsg_fcport_stats_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + &fcomp, &iocmd->stats); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else + iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + &fcomp, NULL); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else + iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +static int +bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_vf_stats_s *iocmd = + (struct bfa_bsg_vf_stats_s *)cmd; + struct bfa_fcs_fabric_s *fcs_vf; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); + if (fcs_vf == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VFID; + goto out; + } + memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, + sizeof(struct bfa_vf_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_vf_reset_stats_s *iocmd = + (struct bfa_bsg_vf_reset_stats_s *)cmd; + struct bfa_fcs_fabric_s *fcs_vf; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); + if (fcs_vf == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VFID; + goto out; + } + memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +/* + * Set the SCSI device sdev_bflags - sdev_bflags are used by the + * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan + * + * Internally iterates over all the ITNIM's part of the im_port & sets the + * sdev_bflags for the scsi_device associated with LUN #0. + */ +static void bfad_reset_sdev_bflags(struct bfad_im_port_s *im_port, + int lunmask_cfg) +{ + const blist_flags_t scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; + struct bfad_itnim_s *itnim; + struct scsi_device *sdev; + unsigned long flags; + + spin_lock_irqsave(im_port->shost->host_lock, flags); + list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { + sdev = __scsi_device_lookup(im_port->shost, itnim->channel, + itnim->scsi_tgt_id, 0); + if (sdev) { + if (lunmask_cfg == BFA_TRUE) + sdev->sdev_bflags |= scan_flags; + else + sdev->sdev_bflags &= ~scan_flags; + } + } + spin_unlock_irqrestore(im_port->shost->host_lock, flags); +} + +/* Function to reset the LUN SCAN mode */ +static void +bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) +{ + struct bfad_im_port_s *pport_im = bfad->pport.im_port; + struct bfad_vport_s *vport = NULL; + + /* Set the scsi device LUN SCAN flags for base port */ + bfad_reset_sdev_bflags(pport_im, lunmask_cfg); + + /* Set the scsi device LUN SCAN flags for the vports */ + list_for_each_entry(vport, &bfad->vport_list, list_entry) + bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); +} + +static int +bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { + iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); + /* Set the LUN Scanning mode to be Sequential scan */ + if (iocmd->status == BFA_STATUS_OK) + bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); + } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { + iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); + /* Set the LUN Scanning mode to default REPORT_LUNS scan */ + if (iocmd->status == BFA_STATUS_OK) + bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); + } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) + iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_lunmask_query_s *iocmd = + (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; + struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_fcpim_lunmask_s *iocmd = + (struct bfa_bsg_fcpim_lunmask_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) + iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, + &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); + else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) + iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, + iocmd->vf_id, &iocmd->pwwn, + iocmd->rpwwn, iocmd->lun); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_throttle_s *iocmd = + (struct bfa_bsg_fcpim_throttle_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, + (void *)&iocmd->throttle); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_throttle_s *iocmd = + (struct bfa_bsg_fcpim_throttle_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, + iocmd->throttle.cfg_value); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_tfru_s *iocmd = + (struct bfa_bsg_tfru_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +static int +bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_tfru_s *iocmd = + (struct bfa_bsg_tfru_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +static int +bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fruvpd_s *iocmd = + (struct bfa_bsg_fruvpd_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +static int +bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fruvpd_s *iocmd = + (struct bfa_bsg_fruvpd_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +static int +bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fruvpd_max_size_s *iocmd = + (struct bfa_bsg_fruvpd_max_size_s *)cmd; + unsigned long flags = 0; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa), + &iocmd->max_size); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, + unsigned int payload_len) +{ + int rc = -EINVAL; + + switch (cmd) { + case IOCMD_IOC_ENABLE: + rc = bfad_iocmd_ioc_enable(bfad, iocmd); + break; + case IOCMD_IOC_DISABLE: + rc = bfad_iocmd_ioc_disable(bfad, iocmd); + break; + case IOCMD_IOC_GET_INFO: + rc = bfad_iocmd_ioc_get_info(bfad, iocmd); + break; + case IOCMD_IOC_GET_ATTR: + rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); + break; + case IOCMD_IOC_GET_STATS: + rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); + break; + case IOCMD_IOC_GET_FWSTATS: + rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); + break; + case IOCMD_IOC_RESET_STATS: + case IOCMD_IOC_RESET_FWSTATS: + rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); + break; + case IOCMD_IOC_SET_ADAPTER_NAME: + case IOCMD_IOC_SET_PORT_NAME: + rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); + break; + case IOCMD_IOCFC_GET_ATTR: + rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); + break; + case IOCMD_IOCFC_SET_INTR: + rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); + break; + case IOCMD_PORT_ENABLE: + rc = bfad_iocmd_port_enable(bfad, iocmd); + break; + case IOCMD_PORT_DISABLE: + rc = bfad_iocmd_port_disable(bfad, iocmd); + break; + case IOCMD_PORT_GET_ATTR: + rc = bfad_iocmd_port_get_attr(bfad, iocmd); + break; + case IOCMD_PORT_GET_STATS: + rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); + break; + case IOCMD_PORT_RESET_STATS: + rc = bfad_iocmd_port_reset_stats(bfad, iocmd); + break; + case IOCMD_PORT_CFG_TOPO: + case IOCMD_PORT_CFG_SPEED: + case IOCMD_PORT_CFG_ALPA: + case IOCMD_PORT_CLR_ALPA: + rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); + break; + case IOCMD_PORT_CFG_MAXFRSZ: + rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); + break; + case IOCMD_PORT_BBCR_ENABLE: + case IOCMD_PORT_BBCR_DISABLE: + rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd); + break; + case IOCMD_PORT_BBCR_GET_ATTR: + rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd); + break; + case IOCMD_LPORT_GET_ATTR: + rc = bfad_iocmd_lport_get_attr(bfad, iocmd); + break; + case IOCMD_LPORT_GET_STATS: + rc = bfad_iocmd_lport_get_stats(bfad, iocmd); + break; + case IOCMD_LPORT_RESET_STATS: + rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); + break; + case IOCMD_LPORT_GET_IOSTATS: + rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); + break; + case IOCMD_LPORT_GET_RPORTS: + rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); + break; + case IOCMD_RPORT_GET_ATTR: + rc = bfad_iocmd_rport_get_attr(bfad, iocmd); + break; + case IOCMD_RPORT_GET_ADDR: + rc = bfad_iocmd_rport_get_addr(bfad, iocmd); + break; + case IOCMD_RPORT_GET_STATS: + rc = bfad_iocmd_rport_get_stats(bfad, iocmd); + break; + case IOCMD_RPORT_RESET_STATS: + rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); + break; + case IOCMD_RPORT_SET_SPEED: + rc = bfad_iocmd_rport_set_speed(bfad, iocmd); + break; + case IOCMD_VPORT_GET_ATTR: + rc = bfad_iocmd_vport_get_attr(bfad, iocmd); + break; + case IOCMD_VPORT_GET_STATS: + rc = bfad_iocmd_vport_get_stats(bfad, iocmd); + break; + case IOCMD_VPORT_RESET_STATS: + rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); + break; + case IOCMD_FABRIC_GET_LPORTS: + rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); + break; + case IOCMD_RATELIM_ENABLE: + case IOCMD_RATELIM_DISABLE: + rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); + break; + case IOCMD_RATELIM_DEF_SPEED: + rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); + break; + case IOCMD_FCPIM_FAILOVER: + rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); + break; + case IOCMD_FCPIM_MODSTATS: + rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); + break; + case IOCMD_FCPIM_MODSTATSCLR: + rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); + break; + case IOCMD_FCPIM_DEL_ITN_STATS: + rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); + break; + case IOCMD_ITNIM_GET_ATTR: + rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); + break; + case IOCMD_ITNIM_GET_IOSTATS: + rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); + break; + case IOCMD_ITNIM_RESET_STATS: + rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); + break; + case IOCMD_ITNIM_GET_ITNSTATS: + rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); + break; + case IOCMD_FCPORT_ENABLE: + rc = bfad_iocmd_fcport_enable(bfad, iocmd); + break; + case IOCMD_FCPORT_DISABLE: + rc = bfad_iocmd_fcport_disable(bfad, iocmd); + break; + case IOCMD_IOC_PCIFN_CFG: + rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); + break; + case IOCMD_IOC_FW_SIG_INV: + rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd); + break; + case IOCMD_PCIFN_CREATE: + rc = bfad_iocmd_pcifn_create(bfad, iocmd); + break; + case IOCMD_PCIFN_DELETE: + rc = bfad_iocmd_pcifn_delete(bfad, iocmd); + break; + case IOCMD_PCIFN_BW: + rc = bfad_iocmd_pcifn_bw(bfad, iocmd); + break; + case IOCMD_ADAPTER_CFG_MODE: + rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); + break; + case IOCMD_PORT_CFG_MODE: + rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); + break; + case IOCMD_FLASH_ENABLE_OPTROM: + case IOCMD_FLASH_DISABLE_OPTROM: + rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); + break; + case IOCMD_FAA_QUERY: + rc = bfad_iocmd_faa_query(bfad, iocmd); + break; + case IOCMD_CEE_GET_ATTR: + rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); + break; + case IOCMD_CEE_GET_STATS: + rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); + break; + case IOCMD_CEE_RESET_STATS: + rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); + break; + case IOCMD_SFP_MEDIA: + rc = bfad_iocmd_sfp_media(bfad, iocmd); + break; + case IOCMD_SFP_SPEED: + rc = bfad_iocmd_sfp_speed(bfad, iocmd); + break; + case IOCMD_FLASH_GET_ATTR: + rc = bfad_iocmd_flash_get_attr(bfad, iocmd); + break; + case IOCMD_FLASH_ERASE_PART: + rc = bfad_iocmd_flash_erase_part(bfad, iocmd); + break; + case IOCMD_FLASH_UPDATE_PART: + rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); + break; + case IOCMD_FLASH_READ_PART: + rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); + break; + case IOCMD_DIAG_TEMP: + rc = bfad_iocmd_diag_temp(bfad, iocmd); + break; + case IOCMD_DIAG_MEMTEST: + rc = bfad_iocmd_diag_memtest(bfad, iocmd); + break; + case IOCMD_DIAG_LOOPBACK: + rc = bfad_iocmd_diag_loopback(bfad, iocmd); + break; + case IOCMD_DIAG_FWPING: + rc = bfad_iocmd_diag_fwping(bfad, iocmd); + break; + case IOCMD_DIAG_QUEUETEST: + rc = bfad_iocmd_diag_queuetest(bfad, iocmd); + break; + case IOCMD_DIAG_SFP: + rc = bfad_iocmd_diag_sfp(bfad, iocmd); + break; + case IOCMD_DIAG_LED: + rc = bfad_iocmd_diag_led(bfad, iocmd); + break; + case IOCMD_DIAG_BEACON_LPORT: + rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); + break; + case IOCMD_DIAG_LB_STAT: + rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); + break; + case IOCMD_DIAG_DPORT_ENABLE: + rc = bfad_iocmd_diag_dport_enable(bfad, iocmd); + break; + case IOCMD_DIAG_DPORT_DISABLE: + rc = bfad_iocmd_diag_dport_disable(bfad, iocmd); + break; + case IOCMD_DIAG_DPORT_SHOW: + rc = bfad_iocmd_diag_dport_show(bfad, iocmd); + break; + case IOCMD_DIAG_DPORT_START: + rc = bfad_iocmd_diag_dport_start(bfad, iocmd); + break; + case IOCMD_PHY_GET_ATTR: + rc = bfad_iocmd_phy_get_attr(bfad, iocmd); + break; + case IOCMD_PHY_GET_STATS: + rc = bfad_iocmd_phy_get_stats(bfad, iocmd); + break; + case IOCMD_PHY_UPDATE_FW: + rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); + break; + case IOCMD_PHY_READ_FW: + rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); + break; + case IOCMD_VHBA_QUERY: + rc = bfad_iocmd_vhba_query(bfad, iocmd); + break; + case IOCMD_DEBUG_PORTLOG: + rc = bfad_iocmd_porglog_get(bfad, iocmd); + break; + case IOCMD_DEBUG_FW_CORE: + rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); + break; + case IOCMD_DEBUG_FW_STATE_CLR: + case IOCMD_DEBUG_PORTLOG_CLR: + case IOCMD_DEBUG_START_DTRC: + case IOCMD_DEBUG_STOP_DTRC: + rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); + break; + case IOCMD_DEBUG_PORTLOG_CTL: + rc = bfad_iocmd_porglog_ctl(bfad, iocmd); + break; + case IOCMD_FCPIM_PROFILE_ON: + case IOCMD_FCPIM_PROFILE_OFF: + rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); + break; + case IOCMD_ITNIM_GET_IOPROFILE: + rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); + break; + case IOCMD_FCPORT_GET_STATS: + rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); + break; + case IOCMD_FCPORT_RESET_STATS: + rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); + break; + case IOCMD_BOOT_CFG: + rc = bfad_iocmd_boot_cfg(bfad, iocmd); + break; + case IOCMD_BOOT_QUERY: + rc = bfad_iocmd_boot_query(bfad, iocmd); + break; + case IOCMD_PREBOOT_QUERY: + rc = bfad_iocmd_preboot_query(bfad, iocmd); + break; + case IOCMD_ETHBOOT_CFG: + rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); + break; + case IOCMD_ETHBOOT_QUERY: + rc = bfad_iocmd_ethboot_query(bfad, iocmd); + break; + case IOCMD_TRUNK_ENABLE: + case IOCMD_TRUNK_DISABLE: + rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); + break; + case IOCMD_TRUNK_GET_ATTR: + rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); + break; + case IOCMD_QOS_ENABLE: + case IOCMD_QOS_DISABLE: + rc = bfad_iocmd_qos(bfad, iocmd, cmd); + break; + case IOCMD_QOS_GET_ATTR: + rc = bfad_iocmd_qos_get_attr(bfad, iocmd); + break; + case IOCMD_QOS_GET_VC_ATTR: + rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); + break; + case IOCMD_QOS_GET_STATS: + rc = bfad_iocmd_qos_get_stats(bfad, iocmd); + break; + case IOCMD_QOS_RESET_STATS: + rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); + break; + case IOCMD_QOS_SET_BW: + rc = bfad_iocmd_qos_set_bw(bfad, iocmd); + break; + case IOCMD_VF_GET_STATS: + rc = bfad_iocmd_vf_get_stats(bfad, iocmd); + break; + case IOCMD_VF_RESET_STATS: + rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); + break; + case IOCMD_FCPIM_LUNMASK_ENABLE: + case IOCMD_FCPIM_LUNMASK_DISABLE: + case IOCMD_FCPIM_LUNMASK_CLEAR: + rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); + break; + case IOCMD_FCPIM_LUNMASK_QUERY: + rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); + break; + case IOCMD_FCPIM_LUNMASK_ADD: + case IOCMD_FCPIM_LUNMASK_DELETE: + rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); + break; + case IOCMD_FCPIM_THROTTLE_QUERY: + rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); + break; + case IOCMD_FCPIM_THROTTLE_SET: + rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); + break; + /* TFRU */ + case IOCMD_TFRU_READ: + rc = bfad_iocmd_tfru_read(bfad, iocmd); + break; + case IOCMD_TFRU_WRITE: + rc = bfad_iocmd_tfru_write(bfad, iocmd); + break; + /* FRU */ + case IOCMD_FRUVPD_READ: + rc = bfad_iocmd_fruvpd_read(bfad, iocmd); + break; + case IOCMD_FRUVPD_UPDATE: + rc = bfad_iocmd_fruvpd_update(bfad, iocmd); + break; + case IOCMD_FRUVPD_GET_MAX_SIZE: + rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd); + break; + default: + rc = -EINVAL; + break; + } + return rc; +} + +static int +bfad_im_bsg_vendor_request(struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct bfad_im_port_s *im_port = bfad_get_im_port(shost); + struct bfad_s *bfad = im_port->bfad; + void *payload_kbuf; + int rc = -EINVAL; + + /* Allocate a temp buffer to hold the passed in user space command */ + payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); + if (!payload_kbuf) { + rc = -ENOMEM; + goto out; + } + + /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, payload_kbuf, + job->request_payload.payload_len); + + /* Invoke IOCMD handler - to handle all the vendor command requests */ + rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, + job->request_payload.payload_len); + if (rc != BFA_STATUS_OK) + goto error; + + /* Copy the response data to the job->reply_payload sg_list */ + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + payload_kbuf, + job->reply_payload.payload_len); + + /* free the command buffer */ + kfree(payload_kbuf); + + /* Fill the BSG job reply data */ + job->reply_len = job->reply_payload.payload_len; + bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len; + bsg_reply->result = rc; + + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +error: + /* free the command buffer */ + kfree(payload_kbuf); +out: + bsg_reply->result = rc; + job->reply_len = sizeof(uint32_t); + bsg_reply->reply_payload_rcv_len = 0; + return rc; +} + +/* FC passthru call backs */ +static u64 +bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + u64 addr; + + sge = drv_fcxp->req_sge + sgeid; + addr = (u64)(size_t) sge->sg_addr; + return addr; +} + +static u32 +bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + + sge = drv_fcxp->req_sge + sgeid; + return sge->sg_len; +} + +static u64 +bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + u64 addr; + + sge = drv_fcxp->rsp_sge + sgeid; + addr = (u64)(size_t) sge->sg_addr; + return addr; +} + +static u32 +bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + + sge = drv_fcxp->rsp_sge + sgeid; + return sge->sg_len; +} + +static void +bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + + drv_fcxp->req_status = req_status; + drv_fcxp->rsp_len = rsp_len; + + /* bfa_fcxp will be automatically freed by BFA */ + drv_fcxp->bfa_fcxp = NULL; + complete(&drv_fcxp->comp); +} + +static struct bfad_buf_info * +bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, + uint32_t payload_len, uint32_t *num_sgles) +{ + struct bfad_buf_info *buf_base, *buf_info; + struct bfa_sge_s *sg_table; + int sge_num = 1; + + buf_base = kcalloc(sizeof(struct bfad_buf_info) + + sizeof(struct bfa_sge_s), + sge_num, GFP_KERNEL); + if (!buf_base) + return NULL; + + sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + + (sizeof(struct bfad_buf_info) * sge_num)); + + /* Allocate dma coherent memory */ + buf_info = buf_base; + buf_info->size = payload_len; + buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, + buf_info->size, &buf_info->phys, + GFP_KERNEL); + if (!buf_info->virt) + goto out_free_mem; + + /* copy the linear bsg buffer to buf_info */ + memcpy(buf_info->virt, payload_kbuf, buf_info->size); + + /* + * Setup SG table + */ + sg_table->sg_len = buf_info->size; + sg_table->sg_addr = (void *)(size_t) buf_info->phys; + + *num_sgles = sge_num; + + return buf_base; + +out_free_mem: + kfree(buf_base); + return NULL; +} + +static void +bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, + uint32_t num_sgles) +{ + int i; + struct bfad_buf_info *buf_info = buf_base; + + if (buf_base) { + for (i = 0; i < num_sgles; buf_info++, i++) { + if (buf_info->virt != NULL) + dma_free_coherent(&bfad->pcidev->dev, + buf_info->size, buf_info->virt, + buf_info->phys); + } + kfree(buf_base); + } +} + +static int +bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp, + bfa_bsg_fcpt_t *bsg_fcpt) +{ + struct bfa_fcxp_s *hal_fcxp; + struct bfad_s *bfad = drv_fcxp->port->bfad; + unsigned long flags; + uint8_t lp_tag; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + /* Allocate bfa_fcxp structure */ + hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa, + drv_fcxp->num_req_sgles, + drv_fcxp->num_rsp_sgles, + bfad_fcxp_get_req_sgaddr_cb, + bfad_fcxp_get_req_sglen_cb, + bfad_fcxp_get_rsp_sgaddr_cb, + bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE); + if (!hal_fcxp) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return BFA_STATUS_ENOMEM; + } + + drv_fcxp->bfa_fcxp = hal_fcxp; + + lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); + + bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, + bsg_fcpt->cts, bsg_fcpt->cos, + job->request_payload.payload_len, + &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, + job->reply_payload.payload_len, bsg_fcpt->tsecs); + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return BFA_STATUS_OK; +} + +static int +bfad_im_bsg_els_ct_request(struct bsg_job *job) +{ + struct bfa_bsg_data *bsg_data; + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct bfad_im_port_s *im_port = bfad_get_im_port(shost); + struct bfad_s *bfad = im_port->bfad; + bfa_bsg_fcpt_t *bsg_fcpt; + struct bfad_fcxp *drv_fcxp; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + uint32_t command_type = bsg_request->msgcode; + unsigned long flags; + struct bfad_buf_info *rsp_buf_info; + void *req_kbuf = NULL, *rsp_kbuf = NULL; + int rc = -EINVAL; + + job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ + bsg_reply->reply_payload_rcv_len = 0; + + /* Get the payload passed in from userspace */ + bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) + + sizeof(struct fc_bsg_request)); + if (bsg_data == NULL) + goto out; + + /* + * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload + * buffer of size bsg_data->payload_len + */ + bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); + if (!bsg_fcpt) { + rc = -ENOMEM; + goto out; + } + + if (copy_from_user((uint8_t *)bsg_fcpt, + (void *)(unsigned long)bsg_data->payload, + bsg_data->payload_len)) { + kfree(bsg_fcpt); + rc = -EIO; + goto out; + } + + drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); + if (drv_fcxp == NULL) { + kfree(bsg_fcpt); + rc = -ENOMEM; + goto out; + } + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, + bsg_fcpt->lpwwn); + if (fcs_port == NULL) { + bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + /* Check if the port is online before sending FC Passthru cmd */ + if (!bfa_fcs_lport_is_online(fcs_port)) { + bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + drv_fcxp->port = fcs_port->bfad_port; + + if (!drv_fcxp->port->bfad) + drv_fcxp->port->bfad = bfad; + + /* Fetch the bfa_rport - if nexus needed */ + if (command_type == FC_BSG_HST_ELS_NOLOGIN || + command_type == FC_BSG_HST_CT) { + /* BSG HST commands: no nexus needed */ + drv_fcxp->bfa_rport = NULL; + + } else if (command_type == FC_BSG_RPT_ELS || + command_type == FC_BSG_RPT_CT) { + /* BSG RPT commands: nexus needed */ + fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, + bsg_fcpt->dpwwn); + if (fcs_rport == NULL) { + bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + drv_fcxp->bfa_rport = fcs_rport->bfa_rport; + + } else { /* Unknown BSG msgcode; return -EINVAL */ + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* allocate memory for req / rsp buffers */ + req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); + if (!req_kbuf) { + printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); + if (!rsp_kbuf) { + printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + /* map req sg - copy the sg_list passed in to the linear buffer */ + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, req_kbuf, + job->request_payload.payload_len); + + drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, + job->request_payload.payload_len, + &drv_fcxp->num_req_sgles); + if (!drv_fcxp->reqbuf_info) { + printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + drv_fcxp->req_sge = (struct bfa_sge_s *) + (((uint8_t *)drv_fcxp->reqbuf_info) + + (sizeof(struct bfad_buf_info) * + drv_fcxp->num_req_sgles)); + + /* map rsp sg */ + drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, + job->reply_payload.payload_len, + &drv_fcxp->num_rsp_sgles); + if (!drv_fcxp->rspbuf_info) { + printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; + drv_fcxp->rsp_sge = (struct bfa_sge_s *) + (((uint8_t *)drv_fcxp->rspbuf_info) + + (sizeof(struct bfad_buf_info) * + drv_fcxp->num_rsp_sgles)); + + /* fcxp send */ + init_completion(&drv_fcxp->comp); + rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); + if (rc == BFA_STATUS_OK) { + wait_for_completion(&drv_fcxp->comp); + bsg_fcpt->status = drv_fcxp->req_status; + } else { + bsg_fcpt->status = rc; + goto out_free_mem; + } + + /* fill the job->reply data */ + if (drv_fcxp->req_status == BFA_STATUS_OK) { + job->reply_len = drv_fcxp->rsp_len; + bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len; + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + } else { + bsg_reply->reply_payload_rcv_len = + sizeof(struct fc_bsg_ctels_reply); + job->reply_len = sizeof(uint32_t); + bsg_reply->reply_data.ctels_reply.status = + FC_CTELS_STATUS_REJECT; + } + + /* Copy the response data to the reply_payload sg list */ + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + (uint8_t *)rsp_buf_info->virt, + job->reply_payload.payload_len); + +out_free_mem: + bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, + drv_fcxp->num_rsp_sgles); + bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, + drv_fcxp->num_req_sgles); + kfree(req_kbuf); + kfree(rsp_kbuf); + + /* Need a copy to user op */ + if (copy_to_user((void *)(unsigned long)bsg_data->payload, + (void *)bsg_fcpt, bsg_data->payload_len)) + rc = -EIO; + + kfree(bsg_fcpt); + kfree(drv_fcxp); +out: + bsg_reply->result = rc; + + if (rc == BFA_STATUS_OK) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return rc; +} + +int +bfad_im_bsg_request(struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + uint32_t rc = BFA_STATUS_OK; + + switch (bsg_request->msgcode) { + case FC_BSG_HST_VENDOR: + /* Process BSG HST Vendor requests */ + rc = bfad_im_bsg_vendor_request(job); + break; + case FC_BSG_HST_ELS_NOLOGIN: + case FC_BSG_RPT_ELS: + case FC_BSG_HST_CT: + case FC_BSG_RPT_CT: + /* Process BSG ELS/CT commands */ + rc = bfad_im_bsg_els_ct_request(job); + break; + default: + bsg_reply->result = rc = -EINVAL; + bsg_reply->reply_payload_rcv_len = 0; + break; + } + + return rc; +} + +int +bfad_im_bsg_timeout(struct bsg_job *job) +{ + /* Don't complete the BSG job request - return -EAGAIN + * to reset bsg job timeout : for ELS/CT pass thru we + * already have timer to track the request. + */ + return -EAGAIN; +} diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h new file mode 100644 index 000000000..e525339df --- /dev/null +++ b/drivers/scsi/bfa/bfad_bsg.h @@ -0,0 +1,829 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ +#ifndef BFAD_BSG_H +#define BFAD_BSG_H + +#include "bfa_defs.h" +#include "bfa_defs_fcs.h" + +/* Definitions of vendor unique structures and command codes passed in + * using FC_BSG_HST_VENDOR message code. + */ +enum { + IOCMD_IOC_ENABLE = 0x1, + IOCMD_IOC_DISABLE, + IOCMD_IOC_GET_ATTR, + IOCMD_IOC_GET_INFO, + IOCMD_IOC_GET_STATS, + IOCMD_IOC_GET_FWSTATS, + IOCMD_IOC_RESET_STATS, + IOCMD_IOC_RESET_FWSTATS, + IOCMD_IOC_SET_ADAPTER_NAME, + IOCMD_IOC_SET_PORT_NAME, + IOCMD_IOC_FW_SIG_INV, + IOCMD_IOCFC_GET_ATTR, + IOCMD_IOCFC_SET_INTR, + IOCMD_PORT_ENABLE, + IOCMD_PORT_DISABLE, + IOCMD_PORT_GET_ATTR, + IOCMD_PORT_GET_STATS, + IOCMD_PORT_RESET_STATS, + IOCMD_PORT_CFG_TOPO, + IOCMD_PORT_CFG_SPEED, + IOCMD_PORT_CFG_ALPA, + IOCMD_PORT_CFG_MAXFRSZ, + IOCMD_PORT_CLR_ALPA, + IOCMD_PORT_BBCR_ENABLE, + IOCMD_PORT_BBCR_DISABLE, + IOCMD_PORT_BBCR_GET_ATTR, + IOCMD_LPORT_GET_ATTR, + IOCMD_LPORT_GET_RPORTS, + IOCMD_LPORT_GET_STATS, + IOCMD_LPORT_RESET_STATS, + IOCMD_LPORT_GET_IOSTATS, + IOCMD_RPORT_GET_ATTR, + IOCMD_RPORT_GET_ADDR, + IOCMD_RPORT_GET_STATS, + IOCMD_RPORT_RESET_STATS, + IOCMD_RPORT_SET_SPEED, + IOCMD_VPORT_GET_ATTR, + IOCMD_VPORT_GET_STATS, + IOCMD_VPORT_RESET_STATS, + IOCMD_FABRIC_GET_LPORTS, + IOCMD_RATELIM_ENABLE, + IOCMD_RATELIM_DISABLE, + IOCMD_RATELIM_DEF_SPEED, + IOCMD_FCPIM_FAILOVER, + IOCMD_FCPIM_MODSTATS, + IOCMD_FCPIM_MODSTATSCLR, + IOCMD_FCPIM_DEL_ITN_STATS, + IOCMD_ITNIM_GET_ATTR, + IOCMD_ITNIM_GET_IOSTATS, + IOCMD_ITNIM_RESET_STATS, + IOCMD_ITNIM_GET_ITNSTATS, + IOCMD_IOC_PCIFN_CFG, + IOCMD_FCPORT_ENABLE, + IOCMD_FCPORT_DISABLE, + IOCMD_PCIFN_CREATE, + IOCMD_PCIFN_DELETE, + IOCMD_PCIFN_BW, + IOCMD_ADAPTER_CFG_MODE, + IOCMD_PORT_CFG_MODE, + IOCMD_FLASH_ENABLE_OPTROM, + IOCMD_FLASH_DISABLE_OPTROM, + IOCMD_FAA_QUERY, + IOCMD_CEE_GET_ATTR, + IOCMD_CEE_GET_STATS, + IOCMD_CEE_RESET_STATS, + IOCMD_SFP_MEDIA, + IOCMD_SFP_SPEED, + IOCMD_FLASH_GET_ATTR, + IOCMD_FLASH_ERASE_PART, + IOCMD_FLASH_UPDATE_PART, + IOCMD_FLASH_READ_PART, + IOCMD_DIAG_TEMP, + IOCMD_DIAG_MEMTEST, + IOCMD_DIAG_LOOPBACK, + IOCMD_DIAG_FWPING, + IOCMD_DIAG_QUEUETEST, + IOCMD_DIAG_SFP, + IOCMD_DIAG_LED, + IOCMD_DIAG_BEACON_LPORT, + IOCMD_DIAG_LB_STAT, + IOCMD_PHY_GET_ATTR, + IOCMD_PHY_GET_STATS, + IOCMD_PHY_UPDATE_FW, + IOCMD_PHY_READ_FW, + IOCMD_VHBA_QUERY, + IOCMD_DEBUG_PORTLOG, + IOCMD_DEBUG_FW_CORE, + IOCMD_DEBUG_FW_STATE_CLR, + IOCMD_DEBUG_PORTLOG_CLR, + IOCMD_DEBUG_START_DTRC, + IOCMD_DEBUG_STOP_DTRC, + IOCMD_DEBUG_PORTLOG_CTL, + IOCMD_FCPIM_PROFILE_ON, + IOCMD_FCPIM_PROFILE_OFF, + IOCMD_ITNIM_GET_IOPROFILE, + IOCMD_FCPORT_GET_STATS, + IOCMD_FCPORT_RESET_STATS, + IOCMD_BOOT_CFG, + IOCMD_BOOT_QUERY, + IOCMD_PREBOOT_QUERY, + IOCMD_ETHBOOT_CFG, + IOCMD_ETHBOOT_QUERY, + IOCMD_TRUNK_ENABLE, + IOCMD_TRUNK_DISABLE, + IOCMD_TRUNK_GET_ATTR, + IOCMD_QOS_ENABLE, + IOCMD_QOS_DISABLE, + IOCMD_QOS_GET_ATTR, + IOCMD_QOS_GET_VC_ATTR, + IOCMD_QOS_GET_STATS, + IOCMD_QOS_RESET_STATS, + IOCMD_VF_GET_STATS, + IOCMD_VF_RESET_STATS, + IOCMD_FCPIM_LUNMASK_ENABLE, + IOCMD_FCPIM_LUNMASK_DISABLE, + IOCMD_FCPIM_LUNMASK_CLEAR, + IOCMD_FCPIM_LUNMASK_QUERY, + IOCMD_FCPIM_LUNMASK_ADD, + IOCMD_FCPIM_LUNMASK_DELETE, + IOCMD_DIAG_DPORT_ENABLE, + IOCMD_DIAG_DPORT_DISABLE, + IOCMD_QOS_SET_BW, + IOCMD_FCPIM_THROTTLE_QUERY, + IOCMD_FCPIM_THROTTLE_SET, + IOCMD_TFRU_READ, + IOCMD_TFRU_WRITE, + IOCMD_FRUVPD_READ, + IOCMD_FRUVPD_UPDATE, + IOCMD_FRUVPD_GET_MAX_SIZE, + IOCMD_DIAG_DPORT_SHOW, + IOCMD_DIAG_DPORT_START, +}; + +struct bfa_bsg_gen_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; +}; + +struct bfa_bsg_portlogctl_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + bfa_boolean_t ctl; + int inst_no; +}; + +struct bfa_bsg_fcpim_profile_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; +}; + +struct bfa_bsg_itnim_ioprofile_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_ioprofile_s ioprofile; +}; + +struct bfa_bsg_fcport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + union bfa_fcport_stats_u stats; +}; + +struct bfa_bsg_ioc_name_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + char name[BFA_ADAPTER_SYM_NAME_LEN]; +}; + +struct bfa_bsg_ioc_info_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + char serialnum[64]; + char hwpath[BFA_STRING_32]; + char adapter_hwpath[BFA_STRING_32]; + char guid[BFA_ADAPTER_SYM_NAME_LEN*2]; + char name[BFA_ADAPTER_SYM_NAME_LEN]; + char port_name[BFA_ADAPTER_SYM_NAME_LEN]; + char eth_name[BFA_ADAPTER_SYM_NAME_LEN]; + wwn_t pwwn; + wwn_t nwwn; + wwn_t factorypwwn; + wwn_t factorynwwn; + mac_t mac; + mac_t factory_mac; /* Factory mac address */ + mac_t current_mac; /* Currently assigned mac address */ + enum bfa_ioc_type_e ioc_type; + u16 pvid; /* Port vlan id */ + u16 rsvd1; + u32 host; + u32 bandwidth; /* For PF support */ + u32 rsvd2; +}; + +struct bfa_bsg_ioc_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ioc_attr_s ioc_attr; +}; + +struct bfa_bsg_ioc_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ioc_stats_s ioc_stats; +}; + +struct bfa_bsg_ioc_fwstats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_iocfc_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_iocfc_attr_s iocfc_attr; +}; + +struct bfa_bsg_iocfc_intr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_iocfc_intr_attr_s attr; +}; + +struct bfa_bsg_port_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_port_attr_s attr; +}; + +struct bfa_bsg_port_cfg_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 param; + u32 rsvd1; +}; + +struct bfa_bsg_port_cfg_maxfrsize_s { + bfa_status_t status; + u16 bfad_num; + u16 maxfrsize; +}; + +struct bfa_bsg_port_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_lport_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + struct bfa_lport_attr_s port_attr; +}; + +struct bfa_bsg_lport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + struct bfa_lport_stats_s port_stats; +}; + +struct bfa_bsg_lport_iostats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + struct bfa_itnim_iostats_s iostats; +}; + +struct bfa_bsg_lport_get_rports_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + u64 rbuf_ptr; + u32 nrports; + u32 rsvd; +}; + +struct bfa_bsg_rport_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + u32 pid; + u32 rsvd; + struct bfa_rport_attr_s attr; +}; + +struct bfa_bsg_rport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + struct bfa_rport_stats_s stats; +}; + +struct bfa_bsg_rport_scsi_addr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + u32 host; + u32 bus; + u32 target; + u32 lun; +}; + +struct bfa_bsg_rport_reset_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; +}; + +struct bfa_bsg_rport_set_speed_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + enum bfa_port_speed speed; + u32 rsvd; + wwn_t pwwn; + wwn_t rpwwn; +}; + +struct bfa_bsg_vport_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t vpwwn; + struct bfa_vport_attr_s vport_attr; +}; + +struct bfa_bsg_vport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t vpwwn; + struct bfa_vport_stats_s vport_stats; +}; + +struct bfa_bsg_reset_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t vpwwn; +}; + +struct bfa_bsg_fabric_get_lports_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + u64 buf_ptr; + u32 nports; + u32 rsvd; +}; + +struct bfa_bsg_trl_speed_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_port_speed speed; +}; + +struct bfa_bsg_fcpim_s { + bfa_status_t status; + u16 bfad_num; + u16 param; +}; + +struct bfa_bsg_fcpim_modstats_s { + bfa_status_t status; + u16 bfad_num; + struct bfa_itnim_iostats_s modstats; +}; + +struct bfa_bsg_fcpim_del_itn_stats_s { + bfa_status_t status; + u16 bfad_num; + struct bfa_fcpim_del_itn_stats_s modstats; +}; + +struct bfa_bsg_fcpim_modstatsclr_s { + bfa_status_t status; + u16 bfad_num; +}; + +struct bfa_bsg_itnim_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_attr_s attr; +}; + +struct bfa_bsg_itnim_iostats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_iostats_s iostats; +}; + +struct bfa_bsg_itnim_itnstats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_stats_s itnstats; +}; + +struct bfa_bsg_pcifn_cfg_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ablk_cfg_s pcifn_cfg; +}; + +struct bfa_bsg_pcifn_s { + bfa_status_t status; + u16 bfad_num; + u16 pcifn_id; + u16 bw_min; + u16 bw_max; + u8 port; + enum bfi_pcifn_class pcifn_class; + u8 rsvd[1]; +}; + +struct bfa_bsg_adapter_cfg_mode_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_adapter_cfg_mode_s cfg; +}; + +struct bfa_bsg_port_cfg_mode_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + struct bfa_port_cfg_mode_s cfg; +}; + +struct bfa_bsg_bbcr_enable_s { + bfa_status_t status; + u16 bfad_num; + u8 bb_scn; + u8 rsvd; +}; + +struct bfa_bsg_bbcr_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_bbcr_attr_s attr; +}; + +struct bfa_bsg_faa_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_faa_attr_s faa_attr; +}; + +struct bfa_bsg_cee_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_cee_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_sfp_media_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_defs_sfp_media_e media; +}; + +struct bfa_bsg_sfp_speed_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_port_speed speed; +}; + +struct bfa_bsg_flash_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_flash_attr_s attr; +}; + +struct bfa_bsg_flash_s { + bfa_status_t status; + u16 bfad_num; + u8 instance; + u8 rsvd; + enum bfa_flash_part_type type; + int bufsz; + u64 buf_ptr; +}; + +struct bfa_bsg_diag_get_temp_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_diag_results_tempsensor_s result; +}; + +struct bfa_bsg_diag_memtest_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd[3]; + u32 pat; + struct bfa_diag_memtest_result result; + struct bfa_diag_memtest_s memtest; +}; + +struct bfa_bsg_diag_loopback_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_port_opmode opmode; + enum bfa_port_speed speed; + u32 lpcnt; + u32 pat; + struct bfa_diag_loopback_result_s result; +}; + +struct bfa_bsg_diag_dport_show_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_diag_dport_result_s result; +}; + +struct bfa_bsg_dport_enable_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u16 lpcnt; + u16 pat; +}; + +struct bfa_bsg_diag_fwping_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 cnt; + u32 pattern; + struct bfa_diag_results_fwping result; +}; + +struct bfa_bsg_diag_qtest_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 force; + u32 queue; + struct bfa_diag_qtest_result_s result; +}; + +struct bfa_bsg_sfp_show_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct sfp_mem_s sfp; +}; + +struct bfa_bsg_diag_led_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_diag_ledtest_s ledtest; +}; + +struct bfa_bsg_diag_beacon_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + bfa_boolean_t beacon; + bfa_boolean_t link_e2e_beacon; + u32 second; +}; + +struct bfa_bsg_diag_lb_stat_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; +}; + +struct bfa_bsg_phy_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + struct bfa_phy_attr_s attr; +}; + +struct bfa_bsg_phy_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + u64 bufsz; + u64 buf_ptr; +}; + +struct bfa_bsg_debug_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 bufsz; + int inst_no; + u64 buf_ptr; + u64 offset; +}; + +struct bfa_bsg_phy_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + struct bfa_phy_stats_s stats; +}; + +struct bfa_bsg_vhba_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 pcifn_id; + struct bfa_vhba_attr_s attr; +}; + +struct bfa_bsg_boot_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_boot_cfg_s cfg; +}; + +struct bfa_bsg_preboot_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_boot_pbc_s cfg; +}; + +struct bfa_bsg_ethboot_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ethboot_cfg_s cfg; +}; + +struct bfa_bsg_trunk_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_trunk_attr_s attr; +}; + +struct bfa_bsg_qos_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_qos_attr_s attr; +}; + +struct bfa_bsg_qos_vc_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_qos_vc_attr_s attr; +}; + +struct bfa_bsg_qos_bw_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_qos_bw_s qos_bw; +}; + +struct bfa_bsg_vf_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + struct bfa_vf_stats_s stats; +}; + +struct bfa_bsg_vf_reset_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; +}; + +struct bfa_bsg_fcpim_lunmask_query_s { + bfa_status_t status; + u16 bfad_num; + struct bfa_lunmask_cfg_s lun_mask; +}; + +struct bfa_bsg_fcpim_lunmask_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + struct scsi_lun lun; +}; + +struct bfa_bsg_fcpim_throttle_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + struct bfa_defs_fcpim_throttle_s throttle; +}; + +#define BFA_TFRU_DATA_SIZE 64 +#define BFA_MAX_FRUVPD_TRANSFER_SIZE 0x1000 + +struct bfa_bsg_tfru_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 offset; + u32 len; + u8 data[BFA_TFRU_DATA_SIZE]; +}; + +struct bfa_bsg_fruvpd_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd1; + u32 offset; + u32 len; + u8 data[BFA_MAX_FRUVPD_TRANSFER_SIZE]; + u8 trfr_cmpl; + u8 rsvd2[3]; +}; + +struct bfa_bsg_fruvpd_max_size_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 max_size; +}; + +struct bfa_bsg_fcpt_s { + bfa_status_t status; + u16 vf_id; + wwn_t lpwwn; + wwn_t dpwwn; + u32 tsecs; + int cts; + enum fc_cos cos; + struct fchs_s fchs; +}; +#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s + +#pragma pack(1) +struct bfa_bsg_data { + int payload_len; + u64 payload; +}; +#pragma pack() + +#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz) \ + (((__payload_len) != ((__hdrsz) + (__bufsz))) ? \ + BFA_STATUS_FAILED : BFA_STATUS_OK) + +#endif /* BFAD_BSG_H */ diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c new file mode 100644 index 000000000..52db147d9 --- /dev/null +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#include +#include + +#include "bfad_drv.h" +#include "bfad_im.h" + +/* + * BFA debufs interface + * + * To access the interface, debugfs file system should be mounted + * if not already mounted using: + * mount -t debugfs none /sys/kernel/debug + * + * BFA Hierarchy: + * - bfa/pci_dev: + * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa + * + * Debugging service available per pci_dev: + * fwtrc: To collect current firmware trace. + * drvtrc: To collect current driver trace + * fwsave: To collect last saved fw trace as a result of firmware crash. + * regwr: To write one word to chip register + * regrd: To read one or more words from chip register. + */ + +struct bfad_debug_info { + char *debug_buffer; + void *i_private; + int buffer_len; +}; + +static int +bfad_debugfs_open_drvtrc(struct inode *inode, struct file *file) +{ + struct bfad_port_s *port = inode->i_private; + struct bfad_s *bfad = port->bfad; + struct bfad_debug_info *debug; + + debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->debug_buffer = (void *) bfad->trcmod; + debug->buffer_len = sizeof(struct bfa_trc_mod_s); + + file->private_data = debug; + + return 0; +} + +static int +bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file) +{ + struct bfad_port_s *port = inode->i_private; + struct bfad_s *bfad = port->bfad; + struct bfad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s); + + fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n", + bfad->inst_no); + return -ENOMEM; + } + + spin_lock_irqsave(&bfad->bfad_lock, flags); + rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (rc != BFA_STATUS_OK) { + vfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + printk(KERN_INFO "bfad[%d]: Failed to collect fwtrc\n", + bfad->inst_no); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bfad_debugfs_open_fwsave(struct inode *inode, struct file *file) +{ + struct bfad_port_s *port = inode->i_private; + struct bfad_s *bfad = port->bfad; + struct bfad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s); + + fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n", + bfad->inst_no); + return -ENOMEM; + } + + spin_lock_irqsave(&bfad->bfad_lock, flags); + rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (rc != BFA_STATUS_OK) { + vfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + printk(KERN_INFO "bfad[%d]: Failed to collect fwsave\n", + bfad->inst_no); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bfad_debugfs_open_reg(struct inode *inode, struct file *file) +{ + struct bfad_debug_info *reg_debug; + + reg_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); + if (!reg_debug) + return -ENOMEM; + + reg_debug->i_private = inode->i_private; + + file->private_data = reg_debug; + + return 0; +} + +/* Changes the current file position */ +static loff_t +bfad_debugfs_lseek(struct file *file, loff_t offset, int orig) +{ + struct bfad_debug_info *debug = file->private_data; + return fixed_size_llseek(file, offset, orig, + debug->buffer_len); +} + +static ssize_t +bfad_debugfs_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bfad_debug_info *debug = file->private_data; + + if (!debug || !debug->debug_buffer) + return 0; + + return simple_read_from_buffer(buf, nbytes, pos, + debug->debug_buffer, debug->buffer_len); +} + +#define BFA_REG_CT_ADDRSZ (0x40000) +#define BFA_REG_CB_ADDRSZ (0x20000) +#define BFA_REG_ADDRSZ(__ioc) \ + ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ + BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) +#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) + +static bfa_status_t +bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) +{ + u8 area; + + /* check [16:15] */ + area = (offset >> 15) & 0x7; + if (area == 0) { + /* PCIe core register */ + if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else if (area == 0x1) { + /* CB 32 KB memory page */ + if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else { + /* CB register space 64KB */ + if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc)) + return BFA_STATUS_EINVAL; + } + return BFA_STATUS_OK; +} + +static ssize_t +bfad_debugfs_read_regrd(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bfad_debug_info *regrd_debug = file->private_data; + struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private; + struct bfad_s *bfad = port->bfad; + ssize_t rc; + + if (!bfad->regdata) + return 0; + + rc = simple_read_from_buffer(buf, nbytes, pos, + bfad->regdata, bfad->reglen); + + if ((*pos + nbytes) >= bfad->reglen) { + kfree(bfad->regdata); + bfad->regdata = NULL; + bfad->reglen = 0; + } + + return rc; +} + +static ssize_t +bfad_debugfs_write_regrd(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bfad_debug_info *regrd_debug = file->private_data; + struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private; + struct bfad_s *bfad = port->bfad; + struct bfa_s *bfa = &bfad->bfa; + struct bfa_ioc_s *ioc = &bfa->ioc; + int addr, rc, i; + u32 len; + u32 *regbuf; + void __iomem *rb, *reg_addr; + unsigned long flags; + void *kern_buf; + + kern_buf = memdup_user(buf, nbytes); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + rc = sscanf(kern_buf, "%x:%x", &addr, &len); + if (rc < 2 || len > (UINT_MAX >> 2)) { + printk(KERN_INFO + "bfad[%d]: %s failed to read user buf\n", + bfad->inst_no, __func__); + kfree(kern_buf); + return -EINVAL; + } + + kfree(kern_buf); + kfree(bfad->regdata); + bfad->regdata = NULL; + bfad->reglen = 0; + + bfad->regdata = kzalloc(len << 2, GFP_KERNEL); + if (!bfad->regdata) { + printk(KERN_INFO "bfad[%d]: Failed to allocate regrd buffer\n", + bfad->inst_no); + return -ENOMEM; + } + + bfad->reglen = len << 2; + rb = bfa_ioc_bar0(ioc); + addr &= BFA_REG_ADDRMSK(ioc); + + /* offset and len sanity check */ + rc = bfad_reg_offset_check(bfa, addr, len); + if (rc) { + printk(KERN_INFO "bfad[%d]: Failed reg offset check\n", + bfad->inst_no); + kfree(bfad->regdata); + bfad->regdata = NULL; + bfad->reglen = 0; + return -EINVAL; + } + + reg_addr = rb + addr; + regbuf = (u32 *)bfad->regdata; + spin_lock_irqsave(&bfad->bfad_lock, flags); + for (i = 0; i < len; i++) { + *regbuf = readl(reg_addr); + regbuf++; + reg_addr += sizeof(u32); + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return nbytes; +} + +static ssize_t +bfad_debugfs_write_regwr(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bfad_debug_info *debug = file->private_data; + struct bfad_port_s *port = (struct bfad_port_s *)debug->i_private; + struct bfad_s *bfad = port->bfad; + struct bfa_s *bfa = &bfad->bfa; + struct bfa_ioc_s *ioc = &bfa->ioc; + int addr, val, rc; + void __iomem *reg_addr; + unsigned long flags; + void *kern_buf; + + kern_buf = memdup_user(buf, nbytes); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + rc = sscanf(kern_buf, "%x:%x", &addr, &val); + if (rc < 2) { + printk(KERN_INFO + "bfad[%d]: %s failed to read user buf\n", + bfad->inst_no, __func__); + kfree(kern_buf); + return -EINVAL; + } + kfree(kern_buf); + + addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ + + /* offset and len sanity check */ + rc = bfad_reg_offset_check(bfa, addr, 1); + if (rc) { + printk(KERN_INFO + "bfad[%d]: Failed reg offset check\n", + bfad->inst_no); + return -EINVAL; + } + + reg_addr = (bfa_ioc_bar0(ioc)) + addr; + spin_lock_irqsave(&bfad->bfad_lock, flags); + writel(val, reg_addr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return nbytes; +} + +static int +bfad_debugfs_release(struct inode *inode, struct file *file) +{ + struct bfad_debug_info *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static int +bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file) +{ + struct bfad_debug_info *fw_debug = file->private_data; + + if (!fw_debug) + return 0; + + vfree(fw_debug->debug_buffer); + + file->private_data = NULL; + kfree(fw_debug); + return 0; +} + +static const struct file_operations bfad_debugfs_op_drvtrc = { + .owner = THIS_MODULE, + .open = bfad_debugfs_open_drvtrc, + .llseek = bfad_debugfs_lseek, + .read = bfad_debugfs_read, + .release = bfad_debugfs_release, +}; + +static const struct file_operations bfad_debugfs_op_fwtrc = { + .owner = THIS_MODULE, + .open = bfad_debugfs_open_fwtrc, + .llseek = bfad_debugfs_lseek, + .read = bfad_debugfs_read, + .release = bfad_debugfs_release_fwtrc, +}; + +static const struct file_operations bfad_debugfs_op_fwsave = { + .owner = THIS_MODULE, + .open = bfad_debugfs_open_fwsave, + .llseek = bfad_debugfs_lseek, + .read = bfad_debugfs_read, + .release = bfad_debugfs_release_fwtrc, +}; + +static const struct file_operations bfad_debugfs_op_regrd = { + .owner = THIS_MODULE, + .open = bfad_debugfs_open_reg, + .llseek = bfad_debugfs_lseek, + .read = bfad_debugfs_read_regrd, + .write = bfad_debugfs_write_regrd, + .release = bfad_debugfs_release, +}; + +static const struct file_operations bfad_debugfs_op_regwr = { + .owner = THIS_MODULE, + .open = bfad_debugfs_open_reg, + .llseek = bfad_debugfs_lseek, + .write = bfad_debugfs_write_regwr, + .release = bfad_debugfs_release, +}; + +struct bfad_debugfs_entry { + const char *name; + umode_t mode; + const struct file_operations *fops; +}; + +static const struct bfad_debugfs_entry bfad_debugfs_files[] = { + { "drvtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_drvtrc, }, + { "fwtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwtrc, }, + { "fwsave", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwsave, }, + { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bfad_debugfs_op_regrd, }, + { "regwr", S_IFREG|S_IWUSR, &bfad_debugfs_op_regwr, }, +}; + +static struct dentry *bfa_debugfs_root; +static atomic_t bfa_debugfs_port_count; + +inline void +bfad_debugfs_init(struct bfad_port_s *port) +{ + struct bfad_s *bfad = port->bfad; + const struct bfad_debugfs_entry *file; + char name[64]; + int i; + + if (!bfa_debugfs_enable) + return; + + /* Setup the BFA debugfs root directory*/ + if (!bfa_debugfs_root) { + bfa_debugfs_root = debugfs_create_dir("bfa", NULL); + atomic_set(&bfa_debugfs_port_count, 0); + } + + /* Setup the pci_dev debugfs directory for the port */ + snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name); + if (!port->port_debugfs_root) { + port->port_debugfs_root = + debugfs_create_dir(name, bfa_debugfs_root); + + atomic_inc(&bfa_debugfs_port_count); + + for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) { + file = &bfad_debugfs_files[i]; + bfad->bfad_dentry_files[i] = + debugfs_create_file(file->name, + file->mode, + port->port_debugfs_root, + port, + file->fops); + } + } + + return; +} + +inline void +bfad_debugfs_exit(struct bfad_port_s *port) +{ + struct bfad_s *bfad = port->bfad; + int i; + + for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) { + if (bfad->bfad_dentry_files[i]) { + debugfs_remove(bfad->bfad_dentry_files[i]); + bfad->bfad_dentry_files[i] = NULL; + } + } + + /* Remove the pci_dev debugfs directory for the port */ + if (port->port_debugfs_root) { + debugfs_remove(port->port_debugfs_root); + port->port_debugfs_root = NULL; + atomic_dec(&bfa_debugfs_port_count); + } + + /* Remove the BFA debugfs root directory */ + if (atomic_read(&bfa_debugfs_port_count) == 0) { + debugfs_remove(bfa_debugfs_root); + bfa_debugfs_root = NULL; + } +} diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h new file mode 100644 index 000000000..7682cfa34 --- /dev/null +++ b/drivers/scsi/bfa/bfad_drv.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * Contains base driver definitions. + */ + +/* + * bfa_drv.h Linux driver data structures. + */ + +#ifndef __BFAD_DRV_H__ +#define __BFAD_DRV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bfa_modules.h" +#include "bfa_fcs.h" +#include "bfa_defs_fcs.h" + +#include "bfa_plog.h" +#include "bfa_cs.h" + +#define BFAD_DRIVER_NAME "bfa" +#ifdef BFA_DRIVER_VERSION +#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION +#else +#define BFAD_DRIVER_VERSION "3.2.25.1" +#endif + +#define BFAD_PROTO_NAME FCPI_NAME +#define BFAD_IRQ_FLAGS IRQF_SHARED + +#ifndef FC_PORTSPEED_8GBIT +#define FC_PORTSPEED_8GBIT 0x10 +#endif + +/* + * BFAD flags + */ +#define BFAD_MSIX_ON 0x00000001 +#define BFAD_HAL_INIT_DONE 0x00000002 +#define BFAD_DRV_INIT_DONE 0x00000004 +#define BFAD_CFG_PPORT_DONE 0x00000008 +#define BFAD_HAL_START_DONE 0x00000010 +#define BFAD_PORT_ONLINE 0x00000020 +#define BFAD_RPORT_ONLINE 0x00000040 +#define BFAD_FCS_INIT_DONE 0x00000080 +#define BFAD_HAL_INIT_FAIL 0x00000100 +#define BFAD_FC4_PROBE_DONE 0x00000200 +#define BFAD_PORT_DELETE 0x00000001 +#define BFAD_INTX_ON 0x00000400 +#define BFAD_EEH_BUSY 0x00000800 +#define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE 0x00001000 +/* + * BFAD related definition + */ +#define SCSI_SCAN_DELAY HZ +#define BFAD_STOP_TIMEOUT 30 +#define BFAD_SUSPEND_TIMEOUT BFAD_STOP_TIMEOUT + +/* + * BFAD configuration parameter default values + */ +#define BFAD_LUN_QUEUE_DEPTH 32 +#define BFAD_IO_MAX_SGE SG_ALL +#define BFAD_MIN_SECTORS 128 /* 64k */ +#define BFAD_MAX_SECTORS 0xFFFF /* 32 MB */ + +#define bfad_isr_t irq_handler_t + +#define MAX_MSIX_ENTRY 22 + +struct bfad_msix_s { + struct bfad_s *bfad; + struct msix_entry msix; + char name[32]; +}; + +/* + * Only append to the enums defined here to avoid any versioning + * needed between trace utility and driver version + */ +enum { + BFA_TRC_LDRV_BFAD = 1, + BFA_TRC_LDRV_IM = 2, + BFA_TRC_LDRV_BSG = 3, +}; + +enum bfad_port_pvb_type { + BFAD_PORT_PHYS_BASE = 0, + BFAD_PORT_PHYS_VPORT = 1, + BFAD_PORT_VF_BASE = 2, + BFAD_PORT_VF_VPORT = 3, +}; + +/* + * PORT data structure + */ +struct bfad_port_s { + struct list_head list_entry; + struct bfad_s *bfad; + struct bfa_fcs_lport_s *fcs_port; + u32 roles; + s32 flags; + u32 supported_fc4s; + enum bfad_port_pvb_type pvb_type; + struct bfad_im_port_s *im_port; /* IM specific data */ + /* port debugfs specific data */ + struct dentry *port_debugfs_root; +}; + +/* + * VPORT data structure + */ +struct bfad_vport_s { + struct bfad_port_s drv_port; + struct bfa_fcs_vport_s fcs_vport; + struct completion *comp_del; + struct list_head list_entry; +}; + +/* + * VF data structure + */ +struct bfad_vf_s { + bfa_fcs_vf_t fcs_vf; + struct bfad_port_s base_port; /* base port for vf */ + struct bfad_s *bfad; +}; + +struct bfad_cfg_param_s { + u32 rport_del_timeout; + u32 ioc_queue_depth; + u32 lun_queue_depth; + u32 io_max_sge; + u32 binding_method; +}; + +union bfad_tmp_buf { + /* From struct bfa_adapter_attr_s */ + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + + /* From struct bfa_ioc_pci_attr_s */ + u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ + + wwn_t wwn[BFA_FCS_MAX_LPORTS]; +}; + +/* + * BFAD (PCI function) data structure + */ +struct bfad_s { + bfa_sm_t sm; /* state machine */ + struct list_head list_entry; + struct bfa_s bfa; + struct bfa_fcs_s bfa_fcs; + struct pci_dev *pcidev; + const char *pci_name; + struct bfa_pcidev_s hal_pcidev; + struct bfa_ioc_pci_attr_s pci_attr; + void __iomem *pci_bar0_kva; + void __iomem *pci_bar2_kva; + struct completion comp; + struct completion suspend; + struct completion enable_comp; + struct completion disable_comp; + bfa_boolean_t disable_active; + struct bfad_port_s pport; /* physical port of the BFAD */ + struct bfa_meminfo_s meminfo; + struct bfa_iocfc_cfg_s ioc_cfg; + u32 inst_no; /* BFAD instance number */ + u32 bfad_flags; + spinlock_t bfad_lock; + struct task_struct *bfad_tsk; + struct bfad_cfg_param_s cfg_data; + struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; + int nvec; + char adapter_name[BFA_ADAPTER_SYM_NAME_LEN]; + char port_name[BFA_ADAPTER_SYM_NAME_LEN]; + struct timer_list hal_tmo; + unsigned long hs_start; + struct bfad_im_s *im; /* IM specific data */ + struct bfa_trc_mod_s *trcmod; + struct bfa_plog_s plog_buf; + int ref_count; + union bfad_tmp_buf tmp_buf; + struct fc_host_statistics link_stats; + struct list_head pbc_vport_list; + /* debugfs specific data */ + char *regdata; + u32 reglen; + struct dentry *bfad_dentry_files[5]; + struct list_head free_aen_q; + struct list_head active_aen_q; + struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY]; + spinlock_t bfad_aen_spinlock; + struct list_head vport_list; +}; + +/* BFAD state machine events */ +enum bfad_sm_event { + BFAD_E_CREATE = 1, + BFAD_E_KTHREAD_CREATE_FAILED = 2, + BFAD_E_INIT = 3, + BFAD_E_INIT_SUCCESS = 4, + BFAD_E_HAL_INIT_FAILED = 5, + BFAD_E_INIT_FAILED = 6, + BFAD_E_FCS_EXIT_COMP = 7, + BFAD_E_EXIT_COMP = 8, + BFAD_E_STOP = 9 +}; + +/* + * RPORT data structure + */ +struct bfad_rport_s { + struct bfa_fcs_rport_s fcs_rport; +}; + +struct bfad_buf_info { + void *virt; + dma_addr_t phys; + u32 size; +}; + +struct bfad_fcxp { + struct bfad_port_s *port; + struct bfa_rport_s *bfa_rport; + bfa_status_t req_status; + u16 tag; + u16 rsp_len; + u16 rsp_maxlen; + u8 use_ireqbuf; + u8 use_irspbuf; + u32 num_req_sgles; + u32 num_rsp_sgles; + struct fchs_s fchs; + void *reqbuf_info; + void *rspbuf_info; + struct bfa_sge_s *req_sge; + struct bfa_sge_s *rsp_sge; + fcxp_send_cb_t send_cbfn; + void *send_cbarg; + void *bfa_fcxp; + struct completion comp; +}; + +struct bfad_hal_comp { + bfa_status_t status; + struct completion comp; +}; + +#define BFA_LOG(level, bfad, mask, fmt, arg...) \ +do { \ + if (((mask) == 4) || (level[1] <= '4')) \ + dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \ +} while (0) + +bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, + struct device *dev); +bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg); +bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role); +bfa_status_t bfad_drv_init(struct bfad_s *bfad); +bfa_status_t bfad_start_ops(struct bfad_s *bfad); +void bfad_drv_start(struct bfad_s *bfad); +void bfad_uncfg_pport(struct bfad_s *bfad); +void bfad_stop(struct bfad_s *bfad); +void bfad_fcs_stop(struct bfad_s *bfad); +void bfad_remove_intr(struct bfad_s *bfad); +void bfad_hal_mem_release(struct bfad_s *bfad); +void bfad_hcb_comp(void *arg, bfa_status_t status); + +int bfad_setup_intr(struct bfad_s *bfad); +void bfad_remove_intr(struct bfad_s *bfad); +void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg); +bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad); +void bfad_bfa_tmo(struct timer_list *t); +void bfad_init_timer(struct bfad_s *bfad); +int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); +void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); +void bfad_drv_uninit(struct bfad_s *bfad); +int bfad_worker(void *ptr); +void bfad_debugfs_init(struct bfad_port_s *port); +void bfad_debugfs_exit(struct bfad_port_s *port); + +void bfad_pci_remove(struct pci_dev *pdev); +int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); +void bfad_rport_online_wait(struct bfad_s *bfad); +int bfad_get_linkup_delay(struct bfad_s *bfad); +int bfad_install_msix_handler(struct bfad_s *bfad); + +extern struct idr bfad_im_port_index; +extern struct pci_device_id bfad_id_table[]; +extern struct list_head bfad_list; +extern char *os_name; +extern char *os_patch; +extern char *host_name; +extern int num_rports; +extern int num_ios; +extern int num_tms; +extern int num_fcxps; +extern int num_ufbufs; +extern int reqq_size; +extern int rspq_size; +extern int num_sgpgs; +extern int rport_del_timeout; +extern int bfa_lun_queue_depth; +extern int bfa_io_max_sge; +extern int bfa_log_level; +extern int ioc_auto_recover; +extern int bfa_linkup_delay; +extern int msix_disable_cb; +extern int msix_disable_ct; +extern int fdmi_enable; +extern int supported_fc4s; +extern int pcie_max_read_reqsz; +extern int max_xfer_size; +extern int bfa_debugfs_enable; +extern struct mutex bfad_mutex; + +#endif /* __BFAD_DRV_H__ */ diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c new file mode 100644 index 000000000..a9d3d8562 --- /dev/null +++ b/drivers/scsi/bfa/bfad_im.c @@ -0,0 +1,1330 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * bfad_im.c Linux driver IM module. + */ + +#include + +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfa_fcs.h" + +BFA_TRC_FILE(LDRV, IM); + +DEFINE_IDR(bfad_im_port_index); +struct scsi_transport_template *bfad_im_scsi_transport_template; +struct scsi_transport_template *bfad_im_scsi_vport_transport_template; +static void bfad_im_itnim_work_handler(struct work_struct *work); +static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); +static int bfad_im_slave_alloc(struct scsi_device *sdev); +static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, + struct bfad_itnim_s *itnim); + +void +bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, + enum bfi_ioim_status io_status, u8 scsi_status, + int sns_len, u8 *sns_info, s32 residue) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + struct bfad_s *bfad = drv; + struct bfad_itnim_data_s *itnim_data; + struct bfad_itnim_s *itnim; + u8 host_status = DID_OK; + + switch (io_status) { + case BFI_IOIM_STS_OK: + bfa_trc(bfad, scsi_status); + scsi_set_resid(cmnd, 0); + + if (sns_len > 0) { + bfa_trc(bfad, sns_len); + if (sns_len > SCSI_SENSE_BUFFERSIZE) + sns_len = SCSI_SENSE_BUFFERSIZE; + memcpy(cmnd->sense_buffer, sns_info, sns_len); + } + + if (residue > 0) { + bfa_trc(bfad, residue); + scsi_set_resid(cmnd, residue); + if (!sns_len && (scsi_status == SAM_STAT_GOOD) && + (scsi_bufflen(cmnd) - residue) < + cmnd->underflow) { + bfa_trc(bfad, 0); + host_status = DID_ERROR; + } + } + cmnd->result = host_status << 16 | scsi_status; + + break; + + case BFI_IOIM_STS_TIMEDOUT: + cmnd->result = DID_TIME_OUT << 16; + break; + case BFI_IOIM_STS_PATHTOV: + cmnd->result = DID_TRANSPORT_DISRUPTED << 16; + break; + default: + cmnd->result = DID_ERROR << 16; + } + + /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ + if (cmnd->device->host != NULL) + scsi_dma_unmap(cmnd); + + cmnd->host_scribble = NULL; + bfa_trc(bfad, cmnd->result); + + itnim_data = cmnd->device->hostdata; + if (itnim_data) { + itnim = itnim_data->itnim; + if (!cmnd->result && itnim && + (bfa_lun_queue_depth > cmnd->device->queue_depth)) { + /* Queue depth adjustment for good status completion */ + bfad_ramp_up_qdepth(itnim, cmnd->device); + } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { + /* qfull handling */ + bfad_handle_qfull(itnim, cmnd->device); + } + } + + scsi_done(cmnd); +} + +void +bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + struct bfad_itnim_data_s *itnim_data; + struct bfad_itnim_s *itnim; + + cmnd->result = DID_OK << 16 | SAM_STAT_GOOD; + + /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ + if (cmnd->device->host != NULL) + scsi_dma_unmap(cmnd); + + cmnd->host_scribble = NULL; + + /* Queue depth adjustment */ + if (bfa_lun_queue_depth > cmnd->device->queue_depth) { + itnim_data = cmnd->device->hostdata; + if (itnim_data) { + itnim = itnim_data->itnim; + if (itnim) + bfad_ramp_up_qdepth(itnim, cmnd->device); + } + } + + scsi_done(cmnd); +} + +void +bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + struct bfad_s *bfad = drv; + + cmnd->result = DID_ERROR << 16; + + /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ + if (cmnd->device->host != NULL) + scsi_dma_unmap(cmnd); + + bfa_trc(bfad, cmnd->result); + cmnd->host_scribble = NULL; +} + +void +bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, + enum bfi_tskim_status tsk_status) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk; + wait_queue_head_t *wq; + + bfad_priv(cmnd)->status |= tsk_status << 1; + set_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status); + wq = bfad_priv(cmnd)->wq; + bfad_priv(cmnd)->wq = NULL; + + if (wq) + wake_up(wq); +} + +/* + * Scsi_Host_template SCSI host template + */ +/* + * Scsi_Host template entry, returns BFAD PCI info. + */ +static const char * +bfad_im_info(struct Scsi_Host *shost) +{ + static char bfa_buf[256]; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + + memset(bfa_buf, 0, sizeof(bfa_buf)); + snprintf(bfa_buf, sizeof(bfa_buf), + "QLogic BR-series FC/FCOE Adapter, hwpath: %s driver: %s", + bfad->pci_name, BFAD_DRIVER_VERSION); + + return bfa_buf; +} + +/* + * Scsi_Host template entry, aborts the specified SCSI command. + * + * Returns: SUCCESS or FAILED. + */ +static int +bfad_im_abort_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfa_ioim_s *hal_io; + unsigned long flags; + u32 timeout; + int rc = FAILED; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + hal_io = (struct bfa_ioim_s *) cmnd->host_scribble; + if (!hal_io) { + /* IO has been completed, return success */ + rc = SUCCESS; + goto out; + } + if (hal_io->dio != (struct bfad_ioim_s *) cmnd) { + rc = FAILED; + goto out; + } + + bfa_trc(bfad, hal_io->iotag); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "scsi%d: abort cmnd %p iotag %x\n", + im_port->shost->host_no, cmnd, hal_io->iotag); + (void) bfa_ioim_abort(hal_io); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* Need to wait until the command get aborted */ + timeout = 10; + while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(timeout); + if (timeout < 4 * HZ) + timeout *= 2; + } + + scsi_done(cmnd); + bfa_trc(bfad, hal_io->iotag); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "scsi%d: complete abort 0x%p iotag 0x%x\n", + im_port->shost->host_no, cmnd, hal_io->iotag); + return SUCCESS; +out: + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return rc; +} + +static bfa_status_t +bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, + struct bfad_itnim_s *itnim) +{ + struct bfa_tskim_s *tskim; + struct bfa_itnim_s *bfa_itnim; + bfa_status_t rc = BFA_STATUS_OK; + struct scsi_lun scsilun; + + tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); + if (!tskim) { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "target reset, fail to allocate tskim\n"); + rc = BFA_STATUS_FAILED; + goto out; + } + + /* + * Set host_scribble to NULL to avoid aborting a task command if + * happens. + */ + cmnd->host_scribble = NULL; + bfad_priv(cmnd)->status = 0; + bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); + /* + * bfa_itnim can be NULL if the port gets disconnected and the bfa + * and fcs layers have cleaned up their nexus with the targets and + * the same has not been cleaned up by the shim + */ + if (bfa_itnim == NULL) { + bfa_tskim_free(tskim); + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "target reset, bfa_itnim is NULL\n"); + rc = BFA_STATUS_FAILED; + goto out; + } + + memset(&scsilun, 0, sizeof(scsilun)); + bfa_tskim_start(tskim, bfa_itnim, scsilun, + FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); +out: + return rc; +} + +/* + * Scsi_Host template entry, resets a LUN and abort its all commands. + * + * Returns: SUCCESS or FAILED. + * + */ +static int +bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; + struct bfad_s *bfad = im_port->bfad; + struct bfa_tskim_s *tskim; + struct bfad_itnim_s *itnim; + struct bfa_itnim_s *bfa_itnim; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + int rc = SUCCESS; + unsigned long flags; + enum bfi_tskim_status task_status; + struct scsi_lun scsilun; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + itnim = itnim_data->itnim; + if (!itnim) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + rc = FAILED; + goto out; + } + + tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); + if (!tskim) { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "LUN reset, fail to allocate tskim"); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + rc = FAILED; + goto out; + } + + /* + * Set host_scribble to NULL to avoid aborting a task command + * if happens. + */ + cmnd->host_scribble = NULL; + bfad_priv(cmnd)->wq = &wq; + bfad_priv(cmnd)->status = 0; + bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); + /* + * bfa_itnim can be NULL if the port gets disconnected and the bfa + * and fcs layers have cleaned up their nexus with the targets and + * the same has not been cleaned up by the shim + */ + if (bfa_itnim == NULL) { + bfa_tskim_free(tskim); + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "lun reset, bfa_itnim is NULL\n"); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + rc = FAILED; + goto out; + } + int_to_scsilun(cmnd->device->lun, &scsilun); + bfa_tskim_start(tskim, bfa_itnim, scsilun, + FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status)); + + task_status = bfad_priv(cmnd)->status >> 1; + if (task_status != BFI_TSKIM_STS_OK) { + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "LUN reset failure, status: %d\n", task_status); + rc = FAILED; + } + +out: + return rc; +} + +/* + * Scsi_Host template entry, resets the target and abort all commands. + */ +static int +bfad_im_reset_target_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct scsi_target *starget = scsi_target(cmnd->device); + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfad_itnim_s *itnim; + unsigned long flags; + u32 rc, rtn = FAILED; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + enum bfi_tskim_status task_status; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + itnim = bfad_get_itnim(im_port, starget->id); + if (itnim) { + bfad_priv(cmnd)->wq = &wq; + rc = bfad_im_target_reset_send(bfad, cmnd, itnim); + if (rc == BFA_STATUS_OK) { + /* wait target reset to complete */ + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_event(wq, test_bit(IO_DONE_BIT, + &bfad_priv(cmnd)->status)); + spin_lock_irqsave(&bfad->bfad_lock, flags); + + task_status = bfad_priv(cmnd)->status >> 1; + if (task_status != BFI_TSKIM_STS_OK) + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "target reset failure," + " status: %d\n", task_status); + else + rtn = SUCCESS; + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return rtn; +} + +/* + * Scsi_Host template entry slave_destroy. + */ +static void +bfad_im_slave_destroy(struct scsi_device *sdev) +{ + sdev->hostdata = NULL; + return; +} + +/* + * BFA FCS itnim callbacks + */ + +/* + * BFA FCS itnim alloc callback, after successful PRLI + * Context: Interrupt + */ +int +bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, + struct bfad_itnim_s **itnim_drv) +{ + *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); + if (*itnim_drv == NULL) + return -ENOMEM; + + (*itnim_drv)->im = bfad->im; + *itnim = &(*itnim_drv)->fcs_itnim; + (*itnim_drv)->state = ITNIM_STATE_NONE; + + /* + * Initiaze the itnim_work + */ + INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); + bfad->bfad_flags |= BFAD_RPORT_ONLINE; + return 0; +} + +/* + * BFA FCS itnim free callback. + * Context: Interrupt. bfad_lock is held + */ +void +bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) +{ + struct bfad_port_s *port; + wwn_t wwpn; + u32 fcid; + char wwpn_str[32], fcid_str[16]; + struct bfad_im_s *im = itnim_drv->im; + + /* online to free state transtion should not happen */ + WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE); + + itnim_drv->queue_work = 1; + /* offline request is not yet done, use the same request to free */ + if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING) + itnim_drv->queue_work = 0; + + itnim_drv->state = ITNIM_STATE_FREE; + port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); + itnim_drv->im_port = port->im_port; + wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim); + fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); + wwn2str(wwpn_str, wwpn); + fcid2str(fcid_str, fcid); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", + port->im_port->shost->host_no, + fcid_str, wwpn_str); + + /* ITNIM processing */ + if (itnim_drv->queue_work) + queue_work(im->drv_workq, &itnim_drv->itnim_work); +} + +/* + * BFA FCS itnim online callback. + * Context: Interrupt. bfad_lock is held + */ +void +bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) +{ + struct bfad_port_s *port; + struct bfad_im_s *im = itnim_drv->im; + + itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); + port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); + itnim_drv->state = ITNIM_STATE_ONLINE; + itnim_drv->queue_work = 1; + itnim_drv->im_port = port->im_port; + + /* ITNIM processing */ + if (itnim_drv->queue_work) + queue_work(im->drv_workq, &itnim_drv->itnim_work); +} + +/* + * BFA FCS itnim offline callback. + * Context: Interrupt. bfad_lock is held + */ +void +bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) +{ + struct bfad_port_s *port; + struct bfad_s *bfad; + struct bfad_im_s *im = itnim_drv->im; + + port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); + bfad = port->bfad; + if ((bfad->pport.flags & BFAD_PORT_DELETE) || + (port->flags & BFAD_PORT_DELETE)) { + itnim_drv->state = ITNIM_STATE_OFFLINE; + return; + } + itnim_drv->im_port = port->im_port; + itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; + itnim_drv->queue_work = 1; + + /* ITNIM processing */ + if (itnim_drv->queue_work) + queue_work(im->drv_workq, &itnim_drv->itnim_work); +} + +/* + * Allocate a Scsi_Host for a port. + */ +int +bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, + struct device *dev) +{ + struct bfad_im_port_pointer *im_portp; + int error; + + mutex_lock(&bfad_mutex); + error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL); + if (error < 0) { + mutex_unlock(&bfad_mutex); + printk(KERN_WARNING "idr_alloc failure\n"); + goto out; + } + im_port->idr_id = error; + mutex_unlock(&bfad_mutex); + + im_port->shost = bfad_scsi_host_alloc(im_port, bfad); + if (!im_port->shost) { + error = 1; + goto out_free_idr; + } + + im_portp = shost_priv(im_port->shost); + im_portp->p = im_port; + im_port->shost->unique_id = im_port->idr_id; + im_port->shost->this_id = -1; + im_port->shost->max_id = MAX_FCP_TARGET; + im_port->shost->max_lun = MAX_FCP_LUN; + im_port->shost->max_cmd_len = 16; + im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; + if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) + im_port->shost->transportt = bfad_im_scsi_transport_template; + else + im_port->shost->transportt = + bfad_im_scsi_vport_transport_template; + + error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); + if (error) { + printk(KERN_WARNING "scsi_add_host failure %d\n", error); + goto out_fc_rel; + } + + return 0; + +out_fc_rel: + scsi_host_put(im_port->shost); + im_port->shost = NULL; +out_free_idr: + mutex_lock(&bfad_mutex); + idr_remove(&bfad_im_port_index, im_port->idr_id); + mutex_unlock(&bfad_mutex); +out: + return error; +} + +void +bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) +{ + bfa_trc(bfad, bfad->inst_no); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", + im_port->shost->host_no); + + fc_remove_host(im_port->shost); + + scsi_remove_host(im_port->shost); + scsi_host_put(im_port->shost); + + mutex_lock(&bfad_mutex); + idr_remove(&bfad_im_port_index, im_port->idr_id); + mutex_unlock(&bfad_mutex); +} + +static void +bfad_im_port_delete_handler(struct work_struct *work) +{ + struct bfad_im_port_s *im_port = + container_of(work, struct bfad_im_port_s, port_delete_work); + + if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { + im_port->flags |= BFAD_PORT_DELETE; + fc_vport_terminate(im_port->fc_vport); + } +} + +bfa_status_t +bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) +{ + int rc = BFA_STATUS_OK; + struct bfad_im_port_s *im_port; + + im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC); + if (im_port == NULL) { + rc = BFA_STATUS_ENOMEM; + goto ext; + } + port->im_port = im_port; + im_port->port = port; + im_port->bfad = bfad; + + INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler); + INIT_LIST_HEAD(&im_port->itnim_mapped_list); + INIT_LIST_HEAD(&im_port->binding_list); + +ext: + return rc; +} + +void +bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) +{ + struct bfad_im_port_s *im_port = port->im_port; + + queue_work(bfad->im->drv_workq, + &im_port->port_delete_work); +} + +void +bfad_im_port_clean(struct bfad_im_port_s *im_port) +{ + struct bfad_fcp_binding *bp, *bp_new; + unsigned long flags; + struct bfad_s *bfad = im_port->bfad; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + list_for_each_entry_safe(bp, bp_new, &im_port->binding_list, + list_entry) { + list_del(&bp->list_entry); + kfree(bp); + } + + /* the itnim_mapped_list must be empty at this time */ + WARN_ON(!list_empty(&im_port->itnim_mapped_list)); + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + +static void bfad_aen_im_notify_handler(struct work_struct *work) +{ + struct bfad_im_s *im = + container_of(work, struct bfad_im_s, aen_im_notify_work); + struct bfa_aen_entry_s *aen_entry; + struct bfad_s *bfad = im->bfad; + struct Scsi_Host *shost = bfad->pport.im_port->shost; + void *event_data; + unsigned long flags; + + while (!list_empty(&bfad->active_aen_q)) { + spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); + bfa_q_deq(&bfad->active_aen_q, &aen_entry); + spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); + event_data = (char *)aen_entry + sizeof(struct list_head); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(struct bfa_aen_entry_s) - + sizeof(struct list_head), + (char *)event_data, BFAD_NL_VENDOR_ID); + spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); + list_add_tail(&aen_entry->qe, &bfad->free_aen_q); + spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); + } +} + +bfa_status_t +bfad_im_probe(struct bfad_s *bfad) +{ + struct bfad_im_s *im; + + im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); + if (im == NULL) + return BFA_STATUS_ENOMEM; + + bfad->im = im; + im->bfad = bfad; + + if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { + kfree(im); + return BFA_STATUS_FAILED; + } + + INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); + return BFA_STATUS_OK; +} + +void +bfad_im_probe_undo(struct bfad_s *bfad) +{ + if (bfad->im) { + bfad_destroy_workq(bfad->im); + kfree(bfad->im); + bfad->im = NULL; + } +} + +struct Scsi_Host * +bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) +{ + struct scsi_host_template *sht; + + if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) + sht = &bfad_im_scsi_host_template; + else + sht = &bfad_im_vport_template; + + if (max_xfer_size != BFAD_MAX_SECTORS >> 1) + sht->max_sectors = max_xfer_size << 1; + + sht->sg_tablesize = bfad->cfg_data.io_max_sge; + + return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer)); +} + +void +bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) +{ + if (!(im_port->flags & BFAD_PORT_DELETE)) + flush_workqueue(bfad->im->drv_workq); + bfad_im_scsi_host_free(im_port->bfad, im_port); + bfad_im_port_clean(im_port); + kfree(im_port); +} + +void +bfad_destroy_workq(struct bfad_im_s *im) +{ + if (im && im->drv_workq) { + destroy_workqueue(im->drv_workq); + im->drv_workq = NULL; + } +} + +bfa_status_t +bfad_thread_workq(struct bfad_s *bfad) +{ + struct bfad_im_s *im = bfad->im; + + bfa_trc(bfad, 0); + snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", + bfad->inst_no); + im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); + if (!im->drv_workq) + return BFA_STATUS_FAILED; + + return BFA_STATUS_OK; +} + +/* + * Scsi_Host template entry. + * + * Description: + * OS entry point to adjust the queue_depths on a per-device basis. + * Called once per device during the bus scan. + * Return non-zero if fails. + */ +static int +bfad_im_slave_configure(struct scsi_device *sdev) +{ + scsi_change_queue_depth(sdev, bfa_lun_queue_depth); + return 0; +} + +struct scsi_host_template bfad_im_scsi_host_template = { + .module = THIS_MODULE, + .name = BFAD_DRIVER_NAME, + .info = bfad_im_info, + .queuecommand = bfad_im_queuecommand, + .cmd_size = sizeof(struct bfad_cmd_priv), + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = bfad_im_abort_handler, + .eh_device_reset_handler = bfad_im_reset_lun_handler, + .eh_target_reset_handler = bfad_im_reset_target_handler, + + .slave_alloc = bfad_im_slave_alloc, + .slave_configure = bfad_im_slave_configure, + .slave_destroy = bfad_im_slave_destroy, + + .this_id = -1, + .sg_tablesize = BFAD_IO_MAX_SGE, + .cmd_per_lun = 3, + .shost_groups = bfad_im_host_groups, + .max_sectors = BFAD_MAX_SECTORS, + .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, +}; + +struct scsi_host_template bfad_im_vport_template = { + .module = THIS_MODULE, + .name = BFAD_DRIVER_NAME, + .info = bfad_im_info, + .queuecommand = bfad_im_queuecommand, + .cmd_size = sizeof(struct bfad_cmd_priv), + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = bfad_im_abort_handler, + .eh_device_reset_handler = bfad_im_reset_lun_handler, + .eh_target_reset_handler = bfad_im_reset_target_handler, + + .slave_alloc = bfad_im_slave_alloc, + .slave_configure = bfad_im_slave_configure, + .slave_destroy = bfad_im_slave_destroy, + + .this_id = -1, + .sg_tablesize = BFAD_IO_MAX_SGE, + .cmd_per_lun = 3, + .shost_groups = bfad_im_vport_groups, + .max_sectors = BFAD_MAX_SECTORS, +}; + +bfa_status_t +bfad_im_module_init(void) +{ + bfad_im_scsi_transport_template = + fc_attach_transport(&bfad_im_fc_function_template); + if (!bfad_im_scsi_transport_template) + return BFA_STATUS_ENOMEM; + + bfad_im_scsi_vport_transport_template = + fc_attach_transport(&bfad_im_vport_fc_function_template); + if (!bfad_im_scsi_vport_transport_template) { + fc_release_transport(bfad_im_scsi_transport_template); + return BFA_STATUS_ENOMEM; + } + + return BFA_STATUS_OK; +} + +void +bfad_im_module_exit(void) +{ + if (bfad_im_scsi_transport_template) + fc_release_transport(bfad_im_scsi_transport_template); + + if (bfad_im_scsi_vport_transport_template) + fc_release_transport(bfad_im_scsi_vport_transport_template); + + idr_destroy(&bfad_im_port_index); +} + +void +bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) +{ + struct scsi_device *tmp_sdev; + + if (((jiffies - itnim->last_ramp_up_time) > + BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) && + ((jiffies - itnim->last_queue_full_time) > + BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) { + shost_for_each_device(tmp_sdev, sdev->host) { + if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { + if (tmp_sdev->id != sdev->id) + continue; + scsi_change_queue_depth(tmp_sdev, + tmp_sdev->queue_depth + 1); + + itnim->last_ramp_up_time = jiffies; + } + } + } +} + +void +bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) +{ + struct scsi_device *tmp_sdev; + + itnim->last_queue_full_time = jiffies; + + shost_for_each_device(tmp_sdev, sdev->host) { + if (tmp_sdev->id != sdev->id) + continue; + scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); + } +} + +struct bfad_itnim_s * +bfad_get_itnim(struct bfad_im_port_s *im_port, int id) +{ + struct bfad_itnim_s *itnim = NULL; + + /* Search the mapped list for this target ID */ + list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { + if (id == itnim->scsi_tgt_id) + return itnim; + } + + return NULL; +} + +/* + * Function is invoked from the SCSI Host Template slave_alloc() entry point. + * Has the logic to query the LUN Mask database to check if this LUN needs to + * be made visible to the SCSI mid-layer or not. + * + * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack. + * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack. + */ +static int +bfad_im_check_if_make_lun_visible(struct scsi_device *sdev, + struct fc_rport *rport) +{ + struct bfad_itnim_data_s *itnim_data = + (struct bfad_itnim_data_s *) rport->dd_data; + struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; + struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport; + struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa); + int i = 0, ret = -ENXIO; + + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE && + scsilun_to_int(&lun_list[i].lun) == sdev->lun && + lun_list[i].rp_tag == bfa_rport->rport_tag && + lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) { + ret = BFA_STATUS_OK; + break; + } + } + return ret; +} + +/* + * Scsi_Host template entry slave_alloc + */ +static int +bfad_im_slave_alloc(struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct bfad_itnim_data_s *itnim_data; + struct bfa_s *bfa; + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; + bfa = itnim_data->itnim->bfa_itnim->bfa; + + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) { + /* + * We should not mask LUN 0 - since this will translate + * to no LUN / TARGET for SCSI ml resulting no scan. + */ + if (sdev->lun == 0) { + sdev->sdev_bflags |= BLIST_NOREPORTLUN | + BLIST_SPARSELUN; + goto done; + } + + /* + * Query LUN Mask configuration - to expose this LUN + * to the SCSI mid-layer or to mask it. + */ + if (bfad_im_check_if_make_lun_visible(sdev, rport) != + BFA_STATUS_OK) + return -ENXIO; + } +done: + sdev->hostdata = rport->dd_data; + + return 0; +} + +u32 +bfad_im_supported_speeds(struct bfa_s *bfa) +{ + struct bfa_ioc_attr_s *ioc_attr; + u32 supported_speed = 0; + + ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); + if (!ioc_attr) + return 0; + + bfa_ioc_get_attr(&bfa->ioc, ioc_attr); + if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS) + supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; + else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { + if (ioc_attr->adapter_attr.is_mezz) { + supported_speed |= FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT | + FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; + } else { + supported_speed |= FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT | + FC_PORTSPEED_2GBIT; + } + } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { + supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | + FC_PORTSPEED_1GBIT; + } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { + supported_speed |= FC_PORTSPEED_10GBIT; + } + kfree(ioc_attr); + return supported_speed; +} + +void +bfad_fc_host_init(struct bfad_im_port_s *im_port) +{ + struct Scsi_Host *host = im_port->shost; + struct bfad_s *bfad = im_port->bfad; + struct bfad_port_s *port = im_port->port; + char symname[BFA_SYMNAME_MAXLEN]; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + + fc_host_node_name(host) = + cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); + fc_host_port_name(host) = + cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); + fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); + + fc_host_supported_classes(host) = FC_COS_CLASS3; + + memset(fc_host_supported_fc4s(host), 0, + sizeof(fc_host_supported_fc4s(host))); + if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) + /* For FCP type 0x08 */ + fc_host_supported_fc4s(host)[2] = 1; + /* For fibre channel services type 0x20 */ + fc_host_supported_fc4s(host)[7] = 1; + + strscpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, + BFA_SYMNAME_MAXLEN); + sprintf(fc_host_symbolic_name(host), "%s", symname); + + fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); + fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; +} + +static void +bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) +{ + struct fc_rport_identifiers rport_ids; + struct fc_rport *fc_rport; + struct bfad_itnim_data_s *itnim_data; + + rport_ids.node_name = + cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); + rport_ids.port_name = + cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); + rport_ids.port_id = + bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); + rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; + + itnim->fc_rport = fc_rport = + fc_remote_port_add(im_port->shost, 0, &rport_ids); + + if (!fc_rport) + return; + + fc_rport->maxframe_size = + bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim); + fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim); + + itnim_data = fc_rport->dd_data; + itnim_data->itnim = itnim; + + rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + + if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) + fc_remote_port_rolechg(fc_rport, rport_ids.roles); + + if ((fc_rport->scsi_target_id != -1) + && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) + itnim->scsi_tgt_id = fc_rport->scsi_target_id; + + itnim->channel = fc_rport->channel; + + return; +} + +/* + * Work queue handler using FC transport service +* Context: kernel + */ +static void +bfad_im_itnim_work_handler(struct work_struct *work) +{ + struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s, + itnim_work); + struct bfad_im_s *im = itnim->im; + struct bfad_s *bfad = im->bfad; + struct bfad_im_port_s *im_port; + unsigned long flags; + struct fc_rport *fc_rport; + wwn_t wwpn; + u32 fcid; + char wwpn_str[32], fcid_str[16]; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + im_port = itnim->im_port; + bfa_trc(bfad, itnim->state); + switch (itnim->state) { + case ITNIM_STATE_ONLINE: + if (!itnim->fc_rport) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfad_im_fc_rport_add(im_port, itnim); + spin_lock_irqsave(&bfad->bfad_lock, flags); + wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); + fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); + wwn2str(wwpn_str, wwpn); + fcid2str(fcid_str, fcid); + list_add_tail(&itnim->list_entry, + &im_port->itnim_mapped_list); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "ITNIM ONLINE Target: %d:0:%d " + "FCID: %s WWPN: %s\n", + im_port->shost->host_no, + itnim->scsi_tgt_id, + fcid_str, wwpn_str); + } else { + printk(KERN_WARNING + "%s: itnim %llx is already in online state\n", + __func__, + bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); + } + + break; + case ITNIM_STATE_OFFLINE_PENDING: + itnim->state = ITNIM_STATE_OFFLINE; + if (itnim->fc_rport) { + fc_rport = itnim->fc_rport; + ((struct bfad_itnim_data_s *) + fc_rport->dd_data)->itnim = NULL; + itnim->fc_rport = NULL; + if (!(im_port->port->flags & BFAD_PORT_DELETE)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + fc_rport->dev_loss_tmo = + bfa_fcpim_path_tov_get(&bfad->bfa) + 1; + fc_remote_port_delete(fc_rport); + spin_lock_irqsave(&bfad->bfad_lock, flags); + } + wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); + fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); + wwn2str(wwpn_str, wwpn); + fcid2str(fcid_str, fcid); + list_del(&itnim->list_entry); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, + "ITNIM OFFLINE Target: %d:0:%d " + "FCID: %s WWPN: %s\n", + im_port->shost->host_no, + itnim->scsi_tgt_id, + fcid_str, wwpn_str); + } + break; + case ITNIM_STATE_FREE: + if (itnim->fc_rport) { + fc_rport = itnim->fc_rport; + ((struct bfad_itnim_data_s *) + fc_rport->dd_data)->itnim = NULL; + itnim->fc_rport = NULL; + if (!(im_port->port->flags & BFAD_PORT_DELETE)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + fc_rport->dev_loss_tmo = + bfa_fcpim_path_tov_get(&bfad->bfa) + 1; + fc_remote_port_delete(fc_rport); + spin_lock_irqsave(&bfad->bfad_lock, flags); + } + list_del(&itnim->list_entry); + } + + kfree(itnim); + break; + default: + WARN_ON(1); + break; + } + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + +/* + * Scsi_Host template entry, queue a SCSI command to the BFAD. + */ +static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; + struct bfad_itnim_s *itnim; + struct bfa_ioim_s *hal_io; + unsigned long flags; + int rc; + int sg_cnt = 0; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + rc = fc_remote_port_chkready(rport); + if (rc) { + cmnd->result = rc; + done(cmnd); + return 0; + } + + if (bfad->bfad_flags & BFAD_EEH_BUSY) { + if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE) + cmnd->result = DID_NO_CONNECT << 16; + else + cmnd->result = DID_REQUEUE << 16; + done(cmnd); + return 0; + } + + sg_cnt = scsi_dma_map(cmnd); + if (sg_cnt < 0) + return SCSI_MLQUEUE_HOST_BUSY; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { + printk(KERN_WARNING + "bfad%d, queuecommand %p %x failed, BFA stopped\n", + bfad->inst_no, cmnd, cmnd->cmnd[0]); + cmnd->result = DID_NO_CONNECT << 16; + goto out_fail_cmd; + } + + + itnim = itnim_data->itnim; + if (!itnim) { + cmnd->result = DID_IMM_RETRY << 16; + goto out_fail_cmd; + } + + hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, + itnim->bfa_itnim, sg_cnt); + if (!hal_io) { + printk(KERN_WARNING "hal_io failure\n"); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + scsi_dma_unmap(cmnd); + return SCSI_MLQUEUE_HOST_BUSY; + } + + cmnd->host_scribble = (char *)hal_io; + bfa_ioim_start(hal_io); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; + +out_fail_cmd: + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + scsi_dma_unmap(cmnd); + if (done) + done(cmnd); + + return 0; +} + +static DEF_SCSI_QCMD(bfad_im_queuecommand) + +void +bfad_rport_online_wait(struct bfad_s *bfad) +{ + int i; + int rport_delay = 10; + + for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) + && i < bfa_linkup_delay; i++) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + } + + if (bfad->bfad_flags & BFAD_PORT_ONLINE) { + rport_delay = rport_delay < bfa_linkup_delay ? + rport_delay : bfa_linkup_delay; + for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) + && i < rport_delay; i++) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + } + + if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(rport_delay * HZ); + } + } +} + +int +bfad_get_linkup_delay(struct bfad_s *bfad) +{ + u8 nwwns = 0; + wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; + int linkup_delay; + + /* + * Querying for the boot target port wwns + * -- read from boot information in flash. + * If nwwns > 0 => boot over SAN and set linkup_delay = 30 + * else => local boot machine set linkup_delay = 0 + */ + + bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); + + if (nwwns > 0) + /* If Boot over SAN set linkup_delay = 30sec */ + linkup_delay = 30; + else + /* If local boot; no linkup_delay */ + linkup_delay = 0; + + return linkup_delay; +} diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h new file mode 100644 index 000000000..4353feedf --- /dev/null +++ b/drivers/scsi/bfa/bfad_im.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFAD_IM_H__ +#define __BFAD_IM_H__ + +#include "bfa_fcs.h" + +#define FCPI_NAME " fcpim" + +#ifndef KOBJ_NAME_LEN +#define KOBJ_NAME_LEN 20 +#endif + +bfa_status_t bfad_im_module_init(void); +void bfad_im_module_exit(void); +bfa_status_t bfad_im_probe(struct bfad_s *bfad); +void bfad_im_probe_undo(struct bfad_s *bfad); +bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port); +void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port); +void bfad_im_port_clean(struct bfad_im_port_s *im_port); +int bfad_im_scsi_host_alloc(struct bfad_s *bfad, + struct bfad_im_port_s *im_port, struct device *dev); +void bfad_im_scsi_host_free(struct bfad_s *bfad, + struct bfad_im_port_s *im_port); +u32 bfad_im_supported_speeds(struct bfa_s *bfa); + +#define MAX_FCP_TARGET 1024 +#define MAX_FCP_LUN 16384 +#define BFAD_TARGET_RESET_TMO 60 +#define BFAD_LUN_RESET_TMO 60 +#define BFA_QUEUE_FULL_RAMP_UP_TIME 120 + +/* + * itnim flags + */ +#define IO_DONE_BIT 0 + +/** + * struct bfad_cmd_priv - private data per SCSI command. + * @status: Lowest bit represents IO_DONE. The next seven bits hold a value of + * type enum bfi_tskim_status. + * @wq: Wait queue used to wait for completion of an operation. + */ +struct bfad_cmd_priv { + unsigned long status; + wait_queue_head_t *wq; +}; + +static inline struct bfad_cmd_priv *bfad_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +struct bfad_itnim_data_s { + struct bfad_itnim_s *itnim; +}; + +struct bfad_im_port_s { + struct bfad_s *bfad; + struct bfad_port_s *port; + struct work_struct port_delete_work; + int idr_id; + u16 cur_scsi_id; + u16 flags; + struct list_head binding_list; + struct Scsi_Host *shost; + struct list_head itnim_mapped_list; + struct fc_vport *fc_vport; +}; + +struct bfad_im_port_pointer { + struct bfad_im_port_s *p; +}; + +static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host) +{ + struct bfad_im_port_pointer *im_portp = shost_priv(host); + return im_portp->p; +} + +enum bfad_itnim_state { + ITNIM_STATE_NONE, + ITNIM_STATE_ONLINE, + ITNIM_STATE_OFFLINE_PENDING, + ITNIM_STATE_OFFLINE, + ITNIM_STATE_TIMEOUT, + ITNIM_STATE_FREE, +}; + +/* + * Per itnim data structure + */ +struct bfad_itnim_s { + struct list_head list_entry; + struct bfa_fcs_itnim_s fcs_itnim; + struct work_struct itnim_work; + u32 flags; + enum bfad_itnim_state state; + struct bfad_im_s *im; + struct bfad_im_port_s *im_port; + struct bfad_rport_s *drv_rport; + struct fc_rport *fc_rport; + struct bfa_itnim_s *bfa_itnim; + u16 scsi_tgt_id; + u16 channel; + u16 queue_work; + unsigned long last_ramp_up_time; + unsigned long last_queue_full_time; +}; + +enum bfad_binding_type { + FCP_PWWN_BINDING = 0x1, + FCP_NWWN_BINDING = 0x2, + FCP_FCID_BINDING = 0x3, +}; + +struct bfad_fcp_binding { + struct list_head list_entry; + enum bfad_binding_type binding_type; + u16 scsi_target_id; + u32 fc_id; + wwn_t nwwn; + wwn_t pwwn; +}; + +struct bfad_im_s { + struct bfad_s *bfad; + struct workqueue_struct *drv_workq; + char drv_workq_name[KOBJ_NAME_LEN]; + struct work_struct aen_im_notify_work; +}; + +#define bfad_get_aen_entry(_drv, _entry) do { \ + unsigned long _flags; \ + spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \ + bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \ + if (_entry) \ + list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \ + spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \ +} while (0) + +/* post fc_host vendor event */ +static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry, + struct bfad_s *drv, int cnt, + enum bfa_aen_category cat, + int evt) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + /* + * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit + * architectures, or in 2038 if user space interprets it + * as 'signed'. + */ + entry->aen_tv_sec = ts.tv_sec; + entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC; + entry->bfad_num = drv->inst_no; + entry->seq_num = cnt; + entry->aen_category = cat; + entry->aen_type = evt; + if (drv->bfad_flags & BFAD_FC4_PROBE_DONE) + queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work); +} + +struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, + struct bfad_s *); +bfa_status_t bfad_thread_workq(struct bfad_s *bfad); +void bfad_destroy_workq(struct bfad_im_s *im); +void bfad_fc_host_init(struct bfad_im_port_s *im_port); +void bfad_scsi_host_free(struct bfad_s *bfad, + struct bfad_im_port_s *im_port); +void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, + struct scsi_device *sdev); +void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev); +struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id); + +extern struct scsi_host_template bfad_im_scsi_host_template; +extern struct scsi_host_template bfad_im_vport_template; +extern struct fc_function_template bfad_im_fc_function_template; +extern struct fc_function_template bfad_im_vport_fc_function_template; +extern struct scsi_transport_template *bfad_im_scsi_transport_template; +extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template; + +extern const struct attribute_group *bfad_im_host_groups[]; +extern const struct attribute_group *bfad_im_vport_groups[]; + +irqreturn_t bfad_intx(int irq, void *dev_id); + +int bfad_im_bsg_request(struct bsg_job *job); +int bfad_im_bsg_timeout(struct bsg_job *job); + +#endif diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h new file mode 100644 index 000000000..41e6b4dac --- /dev/null +++ b/drivers/scsi/bfa/bfi.h @@ -0,0 +1,1317 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFI_H__ +#define __BFI_H__ + +#include "bfa_defs.h" +#include "bfa_defs_svc.h" + +#pragma pack(1) + +/* Per dma segment max size */ +#define BFI_MEM_DMA_SEG_SZ (131072) + +/* Get number of dma segments required */ +#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz) \ + ((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) & \ + ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ)) + +/* Get num dma reqs - that fit in a segment */ +#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz)) + +/* Get segment num from tag */ +#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz)) + +/* Get dma req offset in a segment */ +#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz) \ + ((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz))) + +/* + * BFI FW image type + */ +#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ +#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) +#define BFI_FLASH_IMAGE_SZ 0x100000 + +/* + * Msg header common to all msgs + */ +struct bfi_mhdr_s { + u8 msg_class; /* @ref bfi_mclass_t */ + u8 msg_id; /* msg opcode with in the class */ + union { + struct { + u8 qid; + u8 fn_lpu; /* msg destination */ + } h2i; + u16 i2htok; /* token in msgs to host */ + } mtag; +}; + +#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu)) +#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1) + +#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \ +} while (0) + +#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.i2htok = (_i2htok); \ +} while (0) + +/* + * Message opcodes: 0-127 to firmware, 128-255 to host + */ +#define BFI_I2H_OPCODE_BASE 128 +#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) + +/* + **************************************************************************** + * + * Scatter Gather Element and Page definition + * + **************************************************************************** + */ + +#define BFI_SGE_INLINE 1 +#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1) + +/* + * SG Flags + */ +enum { + BFI_SGE_DATA = 0, /* data address, not last */ + BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */ + BFI_SGE_DATA_LAST = 3, /* data address, last */ + BFI_SGE_LINK = 2, /* link address */ + BFI_SGE_PGDLEN = 2, /* cumulative data length for page */ +}; + +/* + * DMA addresses + */ +union bfi_addr_u { + struct { + __be32 addr_lo; + __be32 addr_hi; + } a32; +}; + +/* + * Scatter Gather Element used for fast-path IO requests + */ +struct bfi_sge_s { +#ifdef __BIG_ENDIAN + u32 flags:2, + rsvd:2, + sg_len:28; +#else + u32 sg_len:28, + rsvd:2, + flags:2; +#endif + union bfi_addr_u sga; +}; + +/** + * Generic DMA addr-len pair. + */ +struct bfi_alen_s { + union bfi_addr_u al_addr; /* DMA addr of buffer */ + u32 al_len; /* length of buffer */ +}; + +/* + * Scatter Gather Page + */ +#define BFI_SGPG_DATA_SGES 7 +#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1) +#define BFI_SGPG_RSVD_WD_LEN 8 +struct bfi_sgpg_s { + struct bfi_sge_s sges[BFI_SGPG_SGES_MAX]; + u32 rsvd[BFI_SGPG_RSVD_WD_LEN]; +}; + +/* FCP module definitions */ +#define BFI_IO_MAX (2000) +#define BFI_IOIM_SNSLEN (256) +#define BFI_IOIM_SNSBUF_SEGS \ + BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN) + +/* + * Large Message structure - 128 Bytes size Msgs + */ +#define BFI_LMSG_SZ 128 +#define BFI_LMSG_PL_WSZ \ + ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4) + +struct bfi_msg_s { + struct bfi_mhdr_s mhdr; + u32 pl[BFI_LMSG_PL_WSZ]; +}; + +/* + * Mailbox message structure + */ +#define BFI_MBMSG_SZ 7 +struct bfi_mbmsg_s { + struct bfi_mhdr_s mh; + u32 pl[BFI_MBMSG_SZ]; +}; + +/* + * Supported PCI function class codes (personality) + */ +enum bfi_pcifn_class { + BFI_PCIFN_CLASS_FC = 0x0c04, + BFI_PCIFN_CLASS_ETH = 0x0200, +}; + +/* + * Message Classes + */ +enum bfi_mclass { + BFI_MC_IOC = 1, /* IO Controller (IOC) */ + BFI_MC_DIAG = 2, /* Diagnostic Msgs */ + BFI_MC_FLASH = 3, /* Flash message class */ + BFI_MC_CEE = 4, /* CEE */ + BFI_MC_FCPORT = 5, /* FC port */ + BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ + BFI_MC_ABLK = 7, /* ASIC block configuration */ + BFI_MC_UF = 8, /* Unsolicited frame receive */ + BFI_MC_FCXP = 9, /* FC Transport */ + BFI_MC_LPS = 10, /* lport fc login services */ + BFI_MC_RPORT = 11, /* Remote port */ + BFI_MC_ITN = 12, /* I-T nexus (Initiator mode) */ + BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */ + BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */ + BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */ + BFI_MC_IOIM = 16, /* IO (Initiator mode) */ + BFI_MC_IOIM_IOCOM = 17, /* good IO completion */ + BFI_MC_TSKIM = 18, /* Initiator Task management */ + BFI_MC_PORT = 21, /* Physical port */ + BFI_MC_SFP = 22, /* SFP module */ + BFI_MC_PHY = 25, /* External PHY message class */ + BFI_MC_FRU = 34, + BFI_MC_MAX = 35 +}; + +#define BFI_IOC_MAX_CQS 4 +#define BFI_IOC_MAX_CQS_ASIC 8 +#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ + +/* + *---------------------------------------------------------------------- + * IOC + *---------------------------------------------------------------------- + */ + +/* + * Different asic generations + */ +enum bfi_asic_gen { + BFI_ASIC_GEN_CB = 1, /* crossbow 8G FC */ + BFI_ASIC_GEN_CT = 2, /* catapult 8G FC or 10G CNA */ + BFI_ASIC_GEN_CT2 = 3, /* catapult-2 16G FC or 10G CNA */ +}; + +enum bfi_asic_mode { + BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */ + BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */ + BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */ + BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */ +}; + +enum bfi_ioc_h2i_msgs { + BFI_IOC_H2I_ENABLE_REQ = 1, + BFI_IOC_H2I_DISABLE_REQ = 2, + BFI_IOC_H2I_GETATTR_REQ = 3, + BFI_IOC_H2I_DBG_SYNC = 4, + BFI_IOC_H2I_DBG_DUMP = 5, +}; + +enum bfi_ioc_i2h_msgs { + BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), + BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), + BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), + BFI_IOC_I2H_HBEAT = BFA_I2HM(4), + BFI_IOC_I2H_ACQ_ADDR_REPLY = BFA_I2HM(5), +}; + +/* + * BFI_IOC_H2I_GETATTR_REQ message + */ +struct bfi_ioc_getattr_req_s { + struct bfi_mhdr_s mh; + union bfi_addr_u attr_addr; +}; + +#define BFI_IOC_ATTR_UUID_SZ 16 +struct bfi_ioc_attr_s { + wwn_t mfg_pwwn; /* Mfg port wwn */ + wwn_t mfg_nwwn; /* Mfg node wwn */ + mac_t mfg_mac; /* Mfg mac */ + u8 port_mode; /* bfi_port_mode */ + u8 rsvd_a; + wwn_t pwwn; + wwn_t nwwn; + mac_t mac; /* PBC or Mfg mac */ + u16 rsvd_b; + mac_t fcoe_mac; + u16 rsvd_c; + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 rx_bbcredit; /* receive buffer credits */ + u32 adapter_prop; /* adapter properties */ + u16 maxfrsize; /* max receive frame size */ + char asic_rev; + u8 rsvd_d; + char fw_version[BFA_VERSION_LEN]; + char optrom_version[BFA_VERSION_LEN]; + struct bfa_mfg_vpd_s vpd; + u32 card_type; /* card type */ + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ + u8 uuid[BFI_IOC_ATTR_UUID_SZ]; /*!< chinook uuid */ +}; + +/* + * BFI_IOC_I2H_GETATTR_REPLY message + */ +struct bfi_ioc_getattr_reply_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 status; /* cfg reply status */ + u8 rsvd[3]; +}; + +/* + * Firmware memory page offsets + */ +#define BFI_IOC_SMEM_PG0_CB (0x40) +#define BFI_IOC_SMEM_PG0_CT (0x180) + +/* + * Firmware statistic offset + */ +#define BFI_IOC_FWSTATS_OFF (0x6B40) +#define BFI_IOC_FWSTATS_SZ (4096) + +/* + * Firmware trace offset + */ +#define BFI_IOC_TRC_OFF (0x4b00) +#define BFI_IOC_TRC_ENTS 256 + +#define BFI_IOC_FW_SIGNATURE (0xbfadbfad) +#define BFA_IOC_FW_INV_SIGN (0xdeaddead) +#define BFI_IOC_MD5SUM_SZ 4 + +struct bfi_ioc_fwver_s { +#ifdef __BIG_ENDIAN + uint8_t patch; + uint8_t maint; + uint8_t minor; + uint8_t major; + uint8_t rsvd[2]; + uint8_t build; + uint8_t phase; +#else + uint8_t major; + uint8_t minor; + uint8_t maint; + uint8_t patch; + uint8_t phase; + uint8_t build; + uint8_t rsvd[2]; +#endif +}; + +struct bfi_ioc_image_hdr_s { + u32 signature; /* constant signature */ + u8 asic_gen; /* asic generation */ + u8 asic_mode; + u8 port0_mode; /* device mode for port 0 */ + u8 port1_mode; /* device mode for port 1 */ + u32 exec; /* exec vector */ + u32 bootenv; /* firmware boot env */ + u32 rsvd_b[2]; + struct bfi_ioc_fwver_s fwver; + u32 md5sum[BFI_IOC_MD5SUM_SZ]; +}; + +enum bfi_ioc_img_ver_cmp_e { + BFI_IOC_IMG_VER_INCOMP, + BFI_IOC_IMG_VER_OLD, + BFI_IOC_IMG_VER_SAME, + BFI_IOC_IMG_VER_BETTER +}; + +#define BFI_FWBOOT_DEVMODE_OFF 4 +#define BFI_FWBOOT_TYPE_OFF 8 +#define BFI_FWBOOT_ENV_OFF 12 +#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \ + (((u32)(__asic_gen)) << 24 | \ + ((u32)(__asic_mode)) << 16 | \ + ((u32)(__p0_mode)) << 8 | \ + ((u32)(__p1_mode))) + +enum bfi_fwboot_type { + BFI_FWBOOT_TYPE_NORMAL = 0, + BFI_FWBOOT_TYPE_FLASH = 1, + BFI_FWBOOT_TYPE_MEMTEST = 2, +}; + +#define BFI_FWBOOT_TYPE_NORMAL 0 +#define BFI_FWBOOT_TYPE_MEMTEST 2 +#define BFI_FWBOOT_ENV_OS 0 + +enum bfi_port_mode { + BFI_PORT_MODE_FC = 1, + BFI_PORT_MODE_ETH = 2, +}; + +struct bfi_ioc_hbeat_s { + struct bfi_mhdr_s mh; /* common msg header */ + u32 hb_count; /* current heart beat count */ +}; + +/* + * IOC hardware/firmware state + */ +enum bfi_ioc_state { + BFI_IOC_UNINIT = 0, /* not initialized */ + BFI_IOC_INITING = 1, /* h/w is being initialized */ + BFI_IOC_HWINIT = 2, /* h/w is initialized */ + BFI_IOC_CFG = 3, /* IOC configuration in progress */ + BFI_IOC_OP = 4, /* IOC is operational */ + BFI_IOC_DISABLING = 5, /* IOC is being disabled */ + BFI_IOC_DISABLED = 6, /* IOC is disabled */ + BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */ + BFI_IOC_FAIL = 8, /* IOC heart-beat failure */ + BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ +}; + +#define BFA_IOC_CB_JOIN_SH 16 +#define BFA_IOC_CB_FWSTATE_MASK 0x0000ffff +#define BFA_IOC_CB_JOIN_MASK 0xffff0000 + +#define BFI_IOC_ENDIAN_SIG 0x12345678 + +enum { + BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */ + BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */ + BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */ + BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */ + BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */ + BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */ + BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */ + BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */ + BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */ + BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */ +}; + +#define BFI_ADAPTER_GETP(__prop, __adap_prop) \ + (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ + BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_SETP(__prop, __val) \ + ((__val) << BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_IS_PROTO(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_PROTO) +#define BFI_ADAPTER_IS_TTV(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_TTV) +#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_UNSUPP) +#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \ + ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ + BFI_ADAPTER_UNSUPP)) + +/* + * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages + */ +struct bfi_ioc_ctrl_req_s { + struct bfi_mhdr_s mh; + u16 clscode; + u16 rsvd; + u32 tv_sec; +}; +#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s; +#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s; + +/* + * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages + */ +struct bfi_ioc_ctrl_reply_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 status; /* enable/disable status */ + u8 port_mode; /* bfa_mode_s */ + u8 cap_bm; /* capability bit mask */ + u8 rsvd; +}; +#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s; +#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s; + +#define BFI_IOC_MSGSZ 8 +/* + * H2I Messages + */ +union bfi_ioc_h2i_msg_u { + struct bfi_mhdr_s mh; + struct bfi_ioc_ctrl_req_s enable_req; + struct bfi_ioc_ctrl_req_s disable_req; + struct bfi_ioc_getattr_req_s getattr_req; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + +/* + * I2H Messages + */ +union bfi_ioc_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_ioc_ctrl_reply_s fw_event; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + + +/* + *---------------------------------------------------------------------- + * PBC + *---------------------------------------------------------------------- + */ + +#define BFI_PBC_MAX_BLUNS 8 +#define BFI_PBC_MAX_VPORTS 16 +#define BFI_PBC_PORT_DISABLED 2 + +/* + * PBC boot lun configuration + */ +struct bfi_pbc_blun_s { + wwn_t tgt_pwwn; + struct scsi_lun tgt_lun; +}; + +/* + * PBC virtual port configuration + */ +struct bfi_pbc_vport_s { + wwn_t vp_pwwn; + wwn_t vp_nwwn; +}; + +/* + * BFI pre-boot configuration information + */ +struct bfi_pbc_s { + u8 port_enabled; + u8 boot_enabled; + u8 nbluns; + u8 nvports; + u8 port_speed; + u8 rsvd_a; + u16 hss; + wwn_t pbc_pwwn; + wwn_t pbc_nwwn; + struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS]; + struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS]; +}; + +/* + *---------------------------------------------------------------------- + * MSGQ + *---------------------------------------------------------------------- + */ +#define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci) +#define BFI_MSGQ_EMPTY(_q) (_q->pi == _q->ci) +#define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth) +#define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth) + +/* q_depth must be power of 2 */ +#define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1)) + +enum bfi_msgq_h2i_msgs_e { + BFI_MSGQ_H2I_INIT_REQ = 1, + BFI_MSGQ_H2I_DOORBELL = 2, + BFI_MSGQ_H2I_SHUTDOWN = 3, +}; + +enum bfi_msgq_i2h_msgs_e { + BFI_MSGQ_I2H_INIT_RSP = 1, + BFI_MSGQ_I2H_DOORBELL = 2, +}; + + +/* Messages(commands/responsed/AENS will have the following header */ +struct bfi_msgq_mhdr_s { + u8 msg_class; + u8 msg_id; + u16 msg_token; + u16 num_entries; + u8 enet_id; + u8 rsvd[1]; +}; + +#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_mid); \ + (_mh).msg_token = (_tok); \ + (_mh).enet_id = (_enet_id); \ +} while (0) + +/* + * Mailbox for messaging interface + * +*/ +#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */ +#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */ +#define BFI_MSGQ_MSG_SIZE_MAX (2048) /* TBD */ + +struct bfi_msgq_s { + union bfi_addr_u addr; + u16 q_depth; /* Total num of entries in the queue */ + u8 rsvd[2]; +}; + +/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */ +struct bfi_msgq_cfg_req_s { + struct bfi_mhdr_s mh; + struct bfi_msgq_s cmdq; + struct bfi_msgq_s rspq; +}; + +/* BFI_ENET_MSGQ_CFG_RSP */ +struct bfi_msgq_cfg_rsp_s { + struct bfi_mhdr_s mh; + u8 cmd_status; + u8 rsvd[3]; +}; + + +/* BFI_MSGQ_H2I_DOORBELL */ +struct bfi_msgq_h2i_db_s { + struct bfi_mhdr_s mh; + u16 cmdq_pi; + u16 rspq_ci; +}; + +/* BFI_MSGQ_I2H_DOORBELL */ +struct bfi_msgq_i2h_db_s { + struct bfi_mhdr_s mh; + u16 rspq_pi; + u16 cmdq_ci; +}; + +#pragma pack() + +/* BFI port specific */ +#pragma pack(1) + +enum bfi_port_h2i { + BFI_PORT_H2I_ENABLE_REQ = (1), + BFI_PORT_H2I_DISABLE_REQ = (2), + BFI_PORT_H2I_GET_STATS_REQ = (3), + BFI_PORT_H2I_CLEAR_STATS_REQ = (4), +}; + +enum bfi_port_i2h { + BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3), + BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), +}; + +/* + * Generic REQ type + */ +struct bfi_port_generic_req_s { + struct bfi_mhdr_s mh; /* msg header */ + u32 msgtag; /* msgtag for reply */ + u32 rsvd; +}; + +/* + * Generic RSP type + */ +struct bfi_port_generic_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* port enable status */ + u8 rsvd[3]; + u32 msgtag; /* msgtag for reply */ +}; + +/* + * BFI_PORT_H2I_GET_STATS_REQ + */ +struct bfi_port_get_stats_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + union bfi_addr_u dma_addr; +}; + +union bfi_port_h2i_msg_u { + struct bfi_mhdr_s mh; + struct bfi_port_generic_req_s enable_req; + struct bfi_port_generic_req_s disable_req; + struct bfi_port_get_stats_req_s getstats_req; + struct bfi_port_generic_req_s clearstats_req; +}; + +union bfi_port_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_port_generic_rsp_s enable_rsp; + struct bfi_port_generic_rsp_s disable_rsp; + struct bfi_port_generic_rsp_s getstats_rsp; + struct bfi_port_generic_rsp_s clearstats_rsp; +}; + +/* + *---------------------------------------------------------------------- + * ABLK + *---------------------------------------------------------------------- + */ +enum bfi_ablk_h2i_msgs_e { + BFI_ABLK_H2I_QUERY = 1, + BFI_ABLK_H2I_ADPT_CONFIG = 2, + BFI_ABLK_H2I_PORT_CONFIG = 3, + BFI_ABLK_H2I_PF_CREATE = 4, + BFI_ABLK_H2I_PF_DELETE = 5, + BFI_ABLK_H2I_PF_UPDATE = 6, + BFI_ABLK_H2I_OPTROM_ENABLE = 7, + BFI_ABLK_H2I_OPTROM_DISABLE = 8, +}; + +enum bfi_ablk_i2h_msgs_e { + BFI_ABLK_I2H_QUERY = BFA_I2HM(BFI_ABLK_H2I_QUERY), + BFI_ABLK_I2H_ADPT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG), + BFI_ABLK_I2H_PORT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG), + BFI_ABLK_I2H_PF_CREATE = BFA_I2HM(BFI_ABLK_H2I_PF_CREATE), + BFI_ABLK_I2H_PF_DELETE = BFA_I2HM(BFI_ABLK_H2I_PF_DELETE), + BFI_ABLK_I2H_PF_UPDATE = BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE), + BFI_ABLK_I2H_OPTROM_ENABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE), + BFI_ABLK_I2H_OPTROM_DISABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE), +}; + +/* BFI_ABLK_H2I_QUERY */ +struct bfi_ablk_h2i_query_s { + struct bfi_mhdr_s mh; + union bfi_addr_u addr; +}; + +/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */ +struct bfi_ablk_h2i_cfg_req_s { + struct bfi_mhdr_s mh; + u8 mode; + u8 port; + u8 max_pf; + u8 max_vf; +}; + +/* + * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE, + */ +struct bfi_ablk_h2i_pf_req_s { + struct bfi_mhdr_s mh; + u8 pcifn; + u8 port; + u16 pers; + u16 bw_min; /* percent BW @ max speed */ + u16 bw_max; /* percent BW @ max speed */ +}; + +/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */ +struct bfi_ablk_h2i_optrom_s { + struct bfi_mhdr_s mh; +}; + +/* + * BFI_ABLK_I2H_QUERY + * BFI_ABLK_I2H_PORT_CONFIG + * BFI_ABLK_I2H_PF_CREATE + * BFI_ABLK_I2H_PF_DELETE + * BFI_ABLK_I2H_PF_UPDATE + * BFI_ABLK_I2H_OPTROM_ENABLE + * BFI_ABLK_I2H_OPTROM_DISABLE + */ +struct bfi_ablk_i2h_rsp_s { + struct bfi_mhdr_s mh; + u8 status; + u8 pcifn; + u8 port_mode; +}; + + +/* + * CEE module specific messages + */ + +/* Mailbox commands from host to firmware */ +enum bfi_cee_h2i_msgs_e { + BFI_CEE_H2I_GET_CFG_REQ = 1, + BFI_CEE_H2I_RESET_STATS = 2, + BFI_CEE_H2I_GET_STATS_REQ = 3, +}; + +enum bfi_cee_i2h_msgs_e { + BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1), + BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2), + BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3), +}; + +/* + * H2I command structure for resetting the stats + */ +struct bfi_cee_reset_stats_s { + struct bfi_mhdr_s mh; +}; + +/* + * Get configuration command from host + */ +struct bfi_cee_get_req_s { + struct bfi_mhdr_s mh; + union bfi_addr_u dma_addr; +}; + +/* + * Reply message from firmware + */ +struct bfi_cee_get_rsp_s { + struct bfi_mhdr_s mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* + * Reply message from firmware + */ +struct bfi_cee_stats_rsp_s { + struct bfi_mhdr_s mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* Mailbox message structures from firmware to host */ +union bfi_cee_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_cee_get_rsp_s get_rsp; + struct bfi_cee_stats_rsp_s stats_rsp; +}; + +/* + * SFP related + */ + +enum bfi_sfp_h2i_e { + BFI_SFP_H2I_SHOW = 1, + BFI_SFP_H2I_SCN = 2, +}; + +enum bfi_sfp_i2h_e { + BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW), + BFI_SFP_I2H_SCN = BFA_I2HM(BFI_SFP_H2I_SCN), +}; + +/* + * SFP state change notification + */ +struct bfi_sfp_scn_s { + struct bfi_mhdr_s mhr; /* host msg header */ + u8 event; + u8 sfpid; + u8 pomlvl; /* pom level: normal/warning/alarm */ + u8 is_elb; /* e-loopback */ +}; + +/* + * SFP state + */ +enum bfa_sfp_stat_e { + BFA_SFP_STATE_INIT = 0, /* SFP state is uninit */ + BFA_SFP_STATE_REMOVED = 1, /* SFP is removed */ + BFA_SFP_STATE_INSERTED = 2, /* SFP is inserted */ + BFA_SFP_STATE_VALID = 3, /* SFP is valid */ + BFA_SFP_STATE_UNSUPPORT = 4, /* SFP is unsupport */ + BFA_SFP_STATE_FAILED = 5, /* SFP i2c read fail */ +}; + +/* + * SFP memory access type + */ +enum bfi_sfp_mem_e { + BFI_SFP_MEM_ALL = 0x1, /* access all data field */ + BFI_SFP_MEM_DIAGEXT = 0x2, /* access diag ext data field only */ +}; + +struct bfi_sfp_req_s { + struct bfi_mhdr_s mh; + u8 memtype; + u8 rsvd[3]; + struct bfi_alen_s alen; +}; + +struct bfi_sfp_rsp_s { + struct bfi_mhdr_s mh; + u8 status; + u8 state; + u8 rsvd[2]; +}; + +/* + * FLASH module specific + */ +enum bfi_flash_h2i_msgs { + BFI_FLASH_H2I_QUERY_REQ = 1, + BFI_FLASH_H2I_ERASE_REQ = 2, + BFI_FLASH_H2I_WRITE_REQ = 3, + BFI_FLASH_H2I_READ_REQ = 4, + BFI_FLASH_H2I_BOOT_VER_REQ = 5, +}; + +enum bfi_flash_i2h_msgs { + BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1), + BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2), + BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4), + BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5), + BFI_FLASH_I2H_EVENT = BFA_I2HM(127), +}; + +/* + * Flash query request + */ +struct bfi_flash_query_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + struct bfi_alen_s alen; +}; + +/* + * Flash erase request + */ +struct bfi_flash_erase_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; +}; + +/* + * Flash write request + */ +struct bfi_flash_write_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + struct bfi_alen_s alen; + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 last; + u8 rsv[2]; + u32 offset; + u32 length; +}; + +/* + * Flash read request + */ +struct bfi_flash_read_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * Flash query response + */ +struct bfi_flash_query_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; +}; + +/* + * Flash read response + */ +struct bfi_flash_read_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + +/* + * Flash write response + */ +struct bfi_flash_write_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + +/* + * Flash erase response + */ +struct bfi_flash_erase_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; +}; + +/* + * Flash event notification + */ +struct bfi_flash_event_s { + struct bfi_mhdr_s mh; /* Common msg header */ + bfa_status_t status; + u32 param; +}; + +/* + *---------------------------------------------------------------------- + * DIAG + *---------------------------------------------------------------------- + */ +enum bfi_diag_h2i { + BFI_DIAG_H2I_PORTBEACON = 1, + BFI_DIAG_H2I_LOOPBACK = 2, + BFI_DIAG_H2I_FWPING = 3, + BFI_DIAG_H2I_TEMPSENSOR = 4, + BFI_DIAG_H2I_LEDTEST = 5, + BFI_DIAG_H2I_QTEST = 6, + BFI_DIAG_H2I_DPORT = 7, +}; + +enum bfi_diag_i2h { + BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON), + BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK), + BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING), + BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), + BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), + BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), + BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT), + BFI_DIAG_I2H_DPORT_SCN = BFA_I2HM(8), +}; + +#define BFI_DIAG_MAX_SGES 2 +#define BFI_DIAG_DMA_BUF_SZ (2 * 1024) +#define BFI_BOOT_MEMTEST_RES_ADDR 0x900 +#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3 + +struct bfi_diag_lb_req_s { + struct bfi_mhdr_s mh; + u32 loopcnt; + u32 pattern; + u8 lb_mode; /*!< bfa_port_opmode_t */ + u8 speed; /*!< bfa_port_speed_t */ + u8 rsvd[2]; +}; + +struct bfi_diag_lb_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + struct bfa_diag_loopback_result_s res; /* 16 bytes */ +}; + +struct bfi_diag_fwping_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + struct bfi_alen_s alen; /* 12 bytes */ + u32 data; /* user input data pattern */ + u32 count; /* user input dma count */ + u8 qtag; /* track CPE vc */ + u8 rsv[3]; +}; + +struct bfi_diag_fwping_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u32 data; /* user input data pattern */ + u8 qtag; /* track CPE vc */ + u8 dma_status; /* dma status */ + u8 rsv[2]; +}; + +/* + * Temperature Sensor + */ +struct bfi_diag_ts_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u16 temp; /* 10-bit A/D value */ + u16 brd_temp; /* 9-bit board temp */ + u8 status; + u8 ts_junc; /* show junction tempsensor */ + u8 ts_brd; /* show board tempsensor */ + u8 rsv; +}; +#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s + +struct bfi_diag_ledtest_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u8 cmd; + u8 color; + u8 portid; + u8 led; /* bitmap of LEDs to be tested */ + u16 freq; /* no. of blinks every 10 secs */ + u8 rsv[2]; +}; + +/* notify host led operation is done */ +struct bfi_diag_ledtest_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ +}; + +struct bfi_diag_portbeacon_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u32 period; /* beaconing period */ + u8 beacon; /* 1: beacon on */ + u8 rsvd[3]; +}; + +/* notify host the beacon is off */ +struct bfi_diag_portbeacon_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ +}; + +struct bfi_diag_qtest_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u32 data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */ +}; +#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s + +/* + * D-port test + */ +enum bfi_dport_req { + BFI_DPORT_DISABLE = 0, /* disable dport request */ + BFI_DPORT_ENABLE = 1, /* enable dport request */ + BFI_DPORT_START = 2, /* start dport request */ + BFI_DPORT_SHOW = 3, /* show dport request */ + BFI_DPORT_DYN_DISABLE = 4, /* disable dynamic dport request */ +}; + +enum bfi_dport_scn { + BFI_DPORT_SCN_TESTSTART = 1, + BFI_DPORT_SCN_TESTCOMP = 2, + BFI_DPORT_SCN_SFP_REMOVED = 3, + BFI_DPORT_SCN_DDPORT_ENABLE = 4, + BFI_DPORT_SCN_DDPORT_DISABLE = 5, + BFI_DPORT_SCN_FCPORT_DISABLE = 6, + BFI_DPORT_SCN_SUBTESTSTART = 7, + BFI_DPORT_SCN_TESTSKIP = 8, + BFI_DPORT_SCN_DDPORT_DISABLED = 9, +}; + +struct bfi_diag_dport_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u8 req; /* request 1: enable 0: disable */ + u8 rsvd[3]; + u32 lpcnt; + u32 payload; +}; + +struct bfi_diag_dport_rsp_s { + struct bfi_mhdr_s mh; /* header 4 bytes */ + bfa_status_t status; /* reply status */ + wwn_t pwwn; /* switch port wwn. 8 bytes */ + wwn_t nwwn; /* switch node wwn. 8 bytes */ +}; + +struct bfi_diag_dport_scn_teststart_s { + wwn_t pwwn; /* switch port wwn. 8 bytes */ + wwn_t nwwn; /* switch node wwn. 8 bytes */ + u8 type; /* bfa_diag_dport_test_type_e */ + u8 mode; /* bfa_diag_dport_test_opmode */ + u8 rsvd[2]; + u32 numfrm; /* from switch uint in 1M */ +}; + +struct bfi_diag_dport_scn_testcomp_s { + u8 status; /* bfa_diag_dport_test_status_e */ + u8 speed; /* bfa_port_speed_t */ + u16 numbuffer; /* from switch */ + u8 subtest_status[DPORT_TEST_MAX]; /* 4 bytes */ + u32 latency; /* from switch */ + u32 distance; /* from swtich unit in meters */ + /* Buffers required to saturate the link */ + u16 frm_sz; /* from switch for buf_reqd */ + u8 rsvd[2]; +}; + +struct bfi_diag_dport_scn_s { /* max size == RDS_RMESZ */ + struct bfi_mhdr_s mh; /* header 4 bytes */ + u8 state; /* new state */ + u8 rsvd[3]; + union { + struct bfi_diag_dport_scn_teststart_s teststart; + struct bfi_diag_dport_scn_testcomp_s testcomp; + } info; +}; + +union bfi_diag_dport_msg_u { + struct bfi_diag_dport_req_s req; + struct bfi_diag_dport_rsp_s rsp; + struct bfi_diag_dport_scn_s scn; +}; + +/* + * PHY module specific + */ +enum bfi_phy_h2i_msgs_e { + BFI_PHY_H2I_QUERY_REQ = 1, + BFI_PHY_H2I_STATS_REQ = 2, + BFI_PHY_H2I_WRITE_REQ = 3, + BFI_PHY_H2I_READ_REQ = 4, +}; + +enum bfi_phy_i2h_msgs_e { + BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1), + BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2), + BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_PHY_I2H_READ_RSP = BFA_I2HM(4), +}; + +/* + * External PHY query request + */ +struct bfi_phy_query_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 rsv[3]; + struct bfi_alen_s alen; +}; + +/* + * External PHY stats request + */ +struct bfi_phy_stats_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 rsv[3]; + struct bfi_alen_s alen; +}; + +/* + * External PHY write request + */ +struct bfi_phy_write_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 last; + u8 rsv[2]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * External PHY read request + */ +struct bfi_phy_read_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * External PHY query response + */ +struct bfi_phy_query_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; +}; + +/* + * External PHY stats response + */ +struct bfi_phy_stats_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; +}; + +/* + * External PHY read response + */ +struct bfi_phy_read_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; + u32 length; +}; + +/* + * External PHY write response + */ +struct bfi_phy_write_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; + u32 length; +}; + +enum bfi_fru_h2i_msgs { + BFI_FRUVPD_H2I_WRITE_REQ = 1, + BFI_FRUVPD_H2I_READ_REQ = 2, + BFI_TFRU_H2I_WRITE_REQ = 3, + BFI_TFRU_H2I_READ_REQ = 4, +}; + +enum bfi_fru_i2h_msgs { + BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1), + BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2), + BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4), +}; + +/* + * FRU write request + */ +struct bfi_fru_write_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 last; + u8 rsv_1[3]; + u8 trfr_cmpl; + u8 rsv_2[3]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * FRU read request + */ +struct bfi_fru_read_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * FRU response + */ +struct bfi_fru_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; + u32 length; +}; +#pragma pack() + +#endif /* __BFI_H__ */ diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h new file mode 100644 index 000000000..b9dc0b9bb --- /dev/null +++ b/drivers/scsi/bfa/bfi_ms.h @@ -0,0 +1,871 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +#ifndef __BFI_MS_H__ +#define __BFI_MS_H__ + +#include "bfi.h" +#include "bfa_fc.h" +#include "bfa_defs_svc.h" + +#pragma pack(1) + +enum bfi_iocfc_h2i_msgs { + BFI_IOCFC_H2I_CFG_REQ = 1, + BFI_IOCFC_H2I_SET_INTR_REQ = 2, + BFI_IOCFC_H2I_UPDATEQ_REQ = 3, + BFI_IOCFC_H2I_FAA_QUERY_REQ = 4, + BFI_IOCFC_H2I_ADDR_REQ = 5, +}; + +enum bfi_iocfc_i2h_msgs { + BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), + BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), + BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(4), + BFI_IOCFC_I2H_ADDR_MSG = BFA_I2HM(5), +}; + +struct bfi_iocfc_cfg_s { + u8 num_cqs; /* Number of CQs to be used */ + u8 sense_buf_len; /* SCSI sense length */ + u16 rsvd_1; + u32 endian_sig; /* endian signature of host */ + u8 rsvd_2; + u8 single_msix_vec; + u8 rsvd[2]; + __be16 num_ioim_reqs; + __be16 num_fwtio_reqs; + + + /* + * Request and response circular queue base addresses, size and + * shadow index pointers. + */ + union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS]; + union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS]; + __be16 req_cq_elems[BFI_IOC_MAX_CQS]; + union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS]; + union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS]; + __be16 rsp_cq_elems[BFI_IOC_MAX_CQS]; + + union bfi_addr_u stats_addr; /* DMA-able address for stats */ + union bfi_addr_u cfgrsp_addr; /* config response dma address */ + union bfi_addr_u ioim_snsbase[BFI_IOIM_SNSBUF_SEGS]; + /* IO sense buf base addr segments */ + struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ +}; + +/* + * Boot target wwn information for this port. This contains either the stored + * or discovered boot target port wwns for the port. + */ +struct bfi_iocfc_bootwwns { + wwn_t wwn[BFA_BOOT_BOOTLUN_MAX]; + u8 nwwns; + u8 rsvd[7]; +}; + +/** + * Queue configuration response from firmware + */ +struct bfi_iocfc_qreg_s { + u32 cpe_q_ci_off[BFI_IOC_MAX_CQS]; + u32 cpe_q_pi_off[BFI_IOC_MAX_CQS]; + u32 cpe_qctl_off[BFI_IOC_MAX_CQS]; + u32 rme_q_ci_off[BFI_IOC_MAX_CQS]; + u32 rme_q_pi_off[BFI_IOC_MAX_CQS]; + u32 rme_qctl_off[BFI_IOC_MAX_CQS]; + u8 hw_qid[BFI_IOC_MAX_CQS]; +}; + +struct bfi_iocfc_cfgrsp_s { + struct bfa_iocfc_fwcfg_s fwcfg; + struct bfa_iocfc_intr_attr_s intr_attr; + struct bfi_iocfc_bootwwns bootwwns; + struct bfi_pbc_s pbc_cfg; + struct bfi_iocfc_qreg_s qreg; +}; + +/* + * BFI_IOCFC_H2I_CFG_REQ message + */ +struct bfi_iocfc_cfg_req_s { + struct bfi_mhdr_s mh; + union bfi_addr_u ioc_cfg_dma_addr; +}; + + +/* + * BFI_IOCFC_I2H_CFG_REPLY message + */ +struct bfi_iocfc_cfg_reply_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 cfg_success; /* cfg reply status */ + u8 lpu_bm; /* LPUs assigned for this IOC */ + u8 rsvd[2]; +}; + + +/* + * BFI_IOCFC_H2I_SET_INTR_REQ message + */ +struct bfi_iocfc_set_intr_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 coalesce; /* enable intr coalescing */ + u8 rsvd[3]; + __be16 delay; /* delay timer 0..1125us */ + __be16 latency; /* latency timer 0..225us */ +}; + + +/* + * BFI_IOCFC_H2I_UPDATEQ_REQ message + */ +struct bfi_iocfc_updateq_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u32 reqq_ba; /* reqq base addr */ + u32 rspq_ba; /* rspq base addr */ + u32 reqq_sci; /* reqq shadow ci */ + u32 rspq_spi; /* rspq shadow pi */ +}; + + +/* + * BFI_IOCFC_I2H_UPDATEQ_RSP message + */ +struct bfi_iocfc_updateq_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* updateq status */ + u8 rsvd[3]; +}; + + +/* + * H2I Messages + */ +union bfi_iocfc_h2i_msg_u { + struct bfi_mhdr_s mh; + struct bfi_iocfc_cfg_req_s cfg_req; + struct bfi_iocfc_updateq_req_s updateq_req; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + + +/* + * I2H Messages + */ +union bfi_iocfc_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_iocfc_cfg_reply_s cfg_reply; + struct bfi_iocfc_updateq_rsp_s updateq_rsp; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + +/* + * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message + */ +struct bfi_faa_en_dis_s { + struct bfi_mhdr_s mh; /* common msg header */ +}; + +struct bfi_faa_addr_msg_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 rsvd[4]; + wwn_t pwwn; /* Fabric acquired PWWN */ + wwn_t nwwn; /* Fabric acquired PWWN */ +}; + +/* + * BFI_IOCFC_H2I_FAA_QUERY_REQ message + */ +struct bfi_faa_query_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 faa_status; /* FAA status */ + u8 addr_source; /* PWWN source */ + u8 rsvd[2]; + wwn_t faa; /* Fabric acquired PWWN */ +}; + +/* + * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message + */ +struct bfi_faa_en_dis_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* updateq status */ + u8 rsvd[3]; +}; + +/* + * BFI_IOCFC_I2H_FAA_QUERY_RSP message + */ +#define bfi_faa_query_rsp_t struct bfi_faa_query_s + +enum bfi_fcport_h2i { + BFI_FCPORT_H2I_ENABLE_REQ = (1), + BFI_FCPORT_H2I_DISABLE_REQ = (2), + BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3), + BFI_FCPORT_H2I_STATS_GET_REQ = (4), + BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5), +}; + + +enum bfi_fcport_i2h { + BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3), + BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4), + BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5), + BFI_FCPORT_I2H_EVENT = BFA_I2HM(6), + BFI_FCPORT_I2H_TRUNK_SCN = BFA_I2HM(7), + BFI_FCPORT_I2H_ENABLE_AEN = BFA_I2HM(8), + BFI_FCPORT_I2H_DISABLE_AEN = BFA_I2HM(9), +}; + + +/* + * Generic REQ type + */ +struct bfi_fcport_req_s { + struct bfi_mhdr_s mh; /* msg header */ + u32 msgtag; /* msgtag for reply */ +}; + +/* + * Generic RSP type + */ +struct bfi_fcport_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* port enable status */ + u8 rsvd[3]; + struct bfa_port_cfg_s port_cfg;/* port configuration */ + u32 msgtag; /* msgtag for reply */ +}; + +/* + * BFI_FCPORT_H2I_ENABLE_REQ + */ +struct bfi_fcport_enable_req_s { + struct bfi_mhdr_s mh; /* msg header */ + u32 rsvd1; + wwn_t nwwn; /* node wwn of physical port */ + wwn_t pwwn; /* port wwn of physical port */ + struct bfa_port_cfg_s port_cfg; /* port configuration */ + union bfi_addr_u stats_dma_addr; /* DMA address for stats */ + u32 msgtag; /* msgtag for reply */ + u8 use_flash_cfg; /* get prot cfg from flash */ + u8 rsvd2[3]; +}; + +/* + * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ + */ +struct bfi_fcport_set_svc_params_req_s { + struct bfi_mhdr_s mh; /* msg header */ + __be16 tx_bbcredit; /* Tx credits */ + u8 rsvd[2]; +}; + +/* + * BFI_FCPORT_I2H_EVENT + */ +struct bfi_fcport_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + struct bfa_port_link_s link_state; +}; + +/* + * BFI_FCPORT_I2H_TRUNK_SCN + */ +struct bfi_fcport_trunk_link_s { + wwn_t trunk_wwn; + u8 fctl; /* bfa_trunk_link_fctl_t */ + u8 state; /* bfa_trunk_link_state_t */ + u8 speed; /* bfa_port_speed_t */ + u8 rsvd; + __be32 deskew; +}; + +#define BFI_FCPORT_MAX_LINKS 2 +struct bfi_fcport_trunk_scn_s { + struct bfi_mhdr_s mh; + u8 trunk_state; /* bfa_trunk_state_t */ + u8 trunk_speed; /* bfa_port_speed_t */ + u8 rsvd_a[2]; + struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS]; +}; + +/* + * fcport H2I message + */ +union bfi_fcport_h2i_msg_u { + struct bfi_mhdr_s *mhdr; + struct bfi_fcport_enable_req_s *penable; + struct bfi_fcport_req_s *pdisable; + struct bfi_fcport_set_svc_params_req_s *psetsvcparams; + struct bfi_fcport_req_s *pstatsget; + struct bfi_fcport_req_s *pstatsclear; +}; + +/* + * fcport I2H message + */ +union bfi_fcport_i2h_msg_u { + struct bfi_msg_s *msg; + struct bfi_fcport_rsp_s *penable_rsp; + struct bfi_fcport_rsp_s *pdisable_rsp; + struct bfi_fcport_rsp_s *psetsvcparams_rsp; + struct bfi_fcport_rsp_s *pstatsget_rsp; + struct bfi_fcport_rsp_s *pstatsclear_rsp; + struct bfi_fcport_event_s *event; + struct bfi_fcport_trunk_scn_s *trunk_scn; +}; + +enum bfi_fcxp_h2i { + BFI_FCXP_H2I_SEND_REQ = 1, +}; + +enum bfi_fcxp_i2h { + BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1), +}; + +#define BFA_FCXP_MAX_SGES 2 + +/* + * FCXP send request structure + */ +struct bfi_fcxp_send_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + __be16 fcxp_tag; /* driver request tag */ + __be16 max_frmsz; /* max send frame size */ + __be16 vf_id; /* vsan tag if applicable */ + u16 rport_fw_hndl; /* FW Handle for the remote port */ + u8 class; /* FC class used for req/rsp */ + u8 rsp_timeout; /* timeout in secs, 0-no response */ + u8 cts; /* continue sequence */ + u8 lp_fwtag; /* lport tag */ + struct fchs_s fchs; /* request FC header structure */ + __be32 req_len; /* request payload length */ + __be32 rsp_maxlen; /* max response length expected */ + struct bfi_alen_s req_alen; /* request buffer */ + struct bfi_alen_s rsp_alen; /* response buffer */ +}; + +/* + * FCXP send response structure + */ +struct bfi_fcxp_send_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + __be16 fcxp_tag; /* send request tag */ + u8 req_status; /* request status */ + u8 rsvd; + __be32 rsp_len; /* actual response length */ + __be32 residue_len; /* residual response length */ + struct fchs_s fchs; /* response FC header structure */ +}; + +enum bfi_uf_h2i { + BFI_UF_H2I_BUF_POST = 1, +}; + +enum bfi_uf_i2h { + BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1), +}; + +#define BFA_UF_MAX_SGES 2 + +struct bfi_uf_buf_post_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 buf_tag; /* buffer tag */ + __be16 buf_len; /* total buffer length */ + struct bfi_alen_s alen; /* buffer address/len pair */ +}; + +struct bfi_uf_frm_rcvd_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 buf_tag; /* buffer tag */ + u16 rsvd; + u16 frm_len; /* received frame length */ + u16 xfr_len; /* tranferred length */ +}; + +enum bfi_lps_h2i_msgs { + BFI_LPS_H2I_LOGIN_REQ = 1, + BFI_LPS_H2I_LOGOUT_REQ = 2, + BFI_LPS_H2I_N2N_PID_REQ = 3, +}; + +enum bfi_lps_i2h_msgs { + BFI_LPS_I2H_LOGIN_RSP = BFA_I2HM(1), + BFI_LPS_I2H_LOGOUT_RSP = BFA_I2HM(2), + BFI_LPS_I2H_CVL_EVENT = BFA_I2HM(3), +}; + +struct bfi_lps_login_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 bfa_tag; + u8 alpa; + __be16 pdu_size; + wwn_t pwwn; + wwn_t nwwn; + u8 fdisc; + u8 auth_en; + u8 lps_role; + u8 bb_scn; + u32 vvl_flag; +}; + +struct bfi_lps_login_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 fw_tag; + u8 status; + u8 lsrjt_rsn; + u8 lsrjt_expl; + wwn_t port_name; + wwn_t node_name; + __be16 bb_credit; + u8 f_port; + u8 npiv_en; + u32 lp_pid:24; + u32 auth_req:8; + mac_t lp_mac; + mac_t fcf_mac; + u8 ext_status; + u8 brcd_switch; /* attached peer is brcd switch */ + u8 bfa_tag; + u8 rsvd; +}; + +struct bfi_lps_logout_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 fw_tag; + u8 rsvd[3]; + wwn_t port_name; +}; + +struct bfi_lps_logout_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 bfa_tag; + u8 status; + u8 rsvd[2]; +}; + +struct bfi_lps_cvl_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 bfa_tag; + u8 rsvd[3]; +}; + +struct bfi_lps_n2n_pid_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 fw_tag; + u32 lp_pid:24; +}; + +union bfi_lps_h2i_msg_u { + struct bfi_mhdr_s *msg; + struct bfi_lps_login_req_s *login_req; + struct bfi_lps_logout_req_s *logout_req; + struct bfi_lps_n2n_pid_req_s *n2n_pid_req; +}; + +union bfi_lps_i2h_msg_u { + struct bfi_msg_s *msg; + struct bfi_lps_login_rsp_s *login_rsp; + struct bfi_lps_logout_rsp_s *logout_rsp; + struct bfi_lps_cvl_event_s *cvl_event; +}; + +enum bfi_rport_h2i_msgs { + BFI_RPORT_H2I_CREATE_REQ = 1, + BFI_RPORT_H2I_DELETE_REQ = 2, + BFI_RPORT_H2I_SET_SPEED_REQ = 3, +}; + +enum bfi_rport_i2h_msgs { + BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), + BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), + BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), + BFI_RPORT_I2H_LIP_SCN_ONLINE = BFA_I2HM(4), + BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5), + BFI_RPORT_I2H_NO_DEV = BFA_I2HM(6), +}; + +struct bfi_rport_create_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* host rport handle */ + __be16 max_frmsz; /* max rcv pdu size */ + u32 pid:24, /* remote port ID */ + lp_fwtag:8; /* local port tag */ + u32 local_pid:24, /* local port ID */ + cisc:8; + u8 fc_class; /* supported FC classes */ + u8 vf_en; /* virtual fabric enable */ + u16 vf_id; /* virtual fabric ID */ +}; + +struct bfi_rport_create_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* rport creation status */ + u8 rsvd[3]; + u16 bfa_handle; /* host rport handle */ + u16 fw_handle; /* firmware rport handle */ + struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */ +}; + +struct bfa_rport_speed_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* firmware rport handle */ + u8 speed; /* rport's speed via RPSC */ + u8 rsvd; +}; + +struct bfi_rport_delete_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* firmware rport handle */ + u16 rsvd; +}; + +struct bfi_rport_delete_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* host rport handle */ + u8 status; /* rport deletion status */ + u8 rsvd; +}; + +struct bfi_rport_qos_scn_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* host rport handle */ + u16 rsvd; + struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */ + struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ +}; + +struct bfi_rport_lip_scn_s { + struct bfi_mhdr_s mh; /*!< common msg header */ + u16 bfa_handle; /*!< host rport handle */ + u8 status; /*!< scn online status */ + u8 rsvd; + struct bfa_fcport_loop_info_s loop_info; +}; + +union bfi_rport_h2i_msg_u { + struct bfi_msg_s *msg; + struct bfi_rport_create_req_s *create_req; + struct bfi_rport_delete_req_s *delete_req; + struct bfi_rport_speed_req_s *speed_req; +}; + +union bfi_rport_i2h_msg_u { + struct bfi_msg_s *msg; + struct bfi_rport_create_rsp_s *create_rsp; + struct bfi_rport_delete_rsp_s *delete_rsp; + struct bfi_rport_qos_scn_s *qos_scn_evt; + struct bfi_rport_lip_scn_s *lip_scn; +}; + +/* + * Initiator mode I-T nexus interface defines. + */ + +enum bfi_itn_h2i { + BFI_ITN_H2I_CREATE_REQ = 1, /* i-t nexus creation */ + BFI_ITN_H2I_DELETE_REQ = 2, /* i-t nexus deletion */ +}; + +enum bfi_itn_i2h { + BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1), + BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2), + BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3), +}; + +struct bfi_itn_create_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* f/w handle for itnim */ + u8 class; /* FC class for IO */ + u8 seq_rec; /* sequence recovery support */ + u8 msg_no; /* seq id of the msg */ + u8 role; +}; + +struct bfi_itn_create_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* bfa handle for itnim */ + u8 status; /* fcp request status */ + u8 seq_id; /* seq id of the msg */ +}; + +struct bfi_itn_delete_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* f/w itnim handle */ + u8 seq_id; /* seq id of the msg */ + u8 rsvd; +}; + +struct bfi_itn_delete_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* bfa handle for itnim */ + u8 status; /* fcp request status */ + u8 seq_id; /* seq id of the msg */ +}; + +struct bfi_itn_sler_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* bfa handle for itnim */ + u16 rsvd; +}; + +union bfi_itn_h2i_msg_u { + struct bfi_itn_create_req_s *create_req; + struct bfi_itn_delete_req_s *delete_req; + struct bfi_msg_s *msg; +}; + +union bfi_itn_i2h_msg_u { + struct bfi_itn_create_rsp_s *create_rsp; + struct bfi_itn_delete_rsp_s *delete_rsp; + struct bfi_itn_sler_event_s *sler_event; + struct bfi_msg_s *msg; +}; + +/* + * Initiator mode IO interface defines. + */ + +enum bfi_ioim_h2i { + BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */ + BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */ +}; + +enum bfi_ioim_i2h { + BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */ + BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */ +}; + +/* + * IO command DIF info + */ +struct bfi_ioim_dif_s { + u32 dif_info[4]; +}; + +/* + * FCP IO messages overview + * + * @note + * - Max CDB length supported is 64 bytes. + * - SCSI Linked commands and SCSI bi-directional Commands not + * supported. + * + */ +struct bfi_ioim_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + __be16 io_tag; /* I/O tag */ + u16 rport_hdl; /* itnim/rport firmware handle */ + struct fcp_cmnd_s cmnd; /* IO request info */ + + /* + * SG elements array within the IO request must be double word + * aligned. This alignment is required to optimize SGM setup for the IO. + */ + struct bfi_sge_s sges[BFI_SGE_INLINE_MAX]; + u8 io_timeout; + u8 dif_en; + u8 rsvd_a[2]; + struct bfi_ioim_dif_s dif; +}; + +/* + * This table shows various IO status codes from firmware and their + * meaning. Host driver can use these status codes to further process + * IO completions. + * + * BFI_IOIM_STS_OK : IO completed with error free SCSI & + * transport status. + * io-tag can be reused. + * + * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error. + * - io-tag can be reused. + * + * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to + * host request. + * - io-tag cannot be reused yet. + * + * BFI_IOIM_STS_ABORTED : IO was aborted successfully + * internally by f/w. + * - io-tag cannot be reused yet. + * + * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening + * in the firmware and + * - io-tag cannot be reused yet. + * + * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO + * with sequence level error + * logic and hence host needs to retry + * this IO with a different IO tag + * - io-tag cannot be used yet. + * + * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host + * is required because 2 consecutive ABTS + * timedout and host needs logout and + * re-login with the target + * - io-tag cannot be used yet. + * + * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good, + * but the data tranferred is less than + * the fcp data length in the command. + * ex. SCSI INQUIRY where transferred + * data length and residue count in FCP + * response accounts for total fcp-dl + * - io-tag can be reused. + * + * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good, + * but the data transerred is more than + * fcp data length in the command. ex. + * TAPE IOs where blocks can of unequal + * lengths. + * - io-tag can be reused. + * + * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag + * during abort process + * - io-tag can be reused. + * + * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error. + * ex target sent more data than + * requested, or there was data frame + * loss and other reasons + * - io-tag cannot be used yet. + * + * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF + * CRC err or Ref Tag err or App tag err. + * - io-tag can be reused. + * + * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task + * Management command from the host + * - io-tag can be reused. + * + * BFI_IOIM_STS_UTAG : Firmware does not know about this + * io_tag. + * - io-tag can be reused. + */ +enum bfi_ioim_status { + BFI_IOIM_STS_OK = 0, + BFI_IOIM_STS_HOST_ABORTED = 1, + BFI_IOIM_STS_ABORTED = 2, + BFI_IOIM_STS_TIMEDOUT = 3, + BFI_IOIM_STS_RES_FREE = 4, + BFI_IOIM_STS_SQER_NEEDED = 5, + BFI_IOIM_STS_PROTO_ERR = 6, + BFI_IOIM_STS_UTAG = 7, + BFI_IOIM_STS_PATHTOV = 8, +}; + +/* + * I/O response message + */ +struct bfi_ioim_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + __be16 io_tag; /* completed IO tag */ + u16 bfa_rport_hndl; /* releated rport handle */ + u8 io_status; /* IO completion status */ + u8 reuse_io_tag; /* IO tag can be reused */ + u16 abort_tag; /* host abort request tag */ + u8 scsi_status; /* scsi status from target */ + u8 sns_len; /* scsi sense length */ + u8 resid_flags; /* IO residue flags */ + u8 rsvd_a; + __be32 residue; /* IO residual length in bytes */ + u32 rsvd_b[3]; +}; + +struct bfi_ioim_abort_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + __be16 io_tag; /* I/O tag */ + u16 abort_tag; /* unique request tag */ +}; + +/* + * Initiator mode task management command interface defines. + */ + +enum bfi_tskim_h2i { + BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */ + BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */ +}; + +enum bfi_tskim_i2h { + BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1), +}; + +struct bfi_tskim_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + __be16 tsk_tag; /* task management tag */ + u16 itn_fhdl; /* itn firmware handle */ + struct scsi_lun lun; /* LU number */ + u8 tm_flags; /* see enum fcp_tm_cmnd */ + u8 t_secs; /* Timeout value in seconds */ + u8 rsvd[2]; +}; + +struct bfi_tskim_abortreq_s { + struct bfi_mhdr_s mh; /* Common msg header */ + __be16 tsk_tag; /* task management tag */ + u16 rsvd; +}; + +enum bfi_tskim_status { + /* + * Following are FCP-4 spec defined status codes, + * **DO NOT CHANGE THEM ** + */ + BFI_TSKIM_STS_OK = 0, + BFI_TSKIM_STS_NOT_SUPP = 4, + BFI_TSKIM_STS_FAILED = 5, + + /* + * Defined by BFA + */ + BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ + BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ + BFI_TSKIM_STS_UTAG = 12, /* unknown tag for request */ +}; + +struct bfi_tskim_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + __be16 tsk_tag; /* task mgmt cmnd tag */ + u8 tsk_status; /* @ref bfi_tskim_status */ + u8 rsvd; +}; + +#pragma pack() + +/* + * Crossbow PCI MSI-X vector defines + */ +enum { + BFI_MSIX_CPE_QMIN_CB = 0, + BFI_MSIX_CPE_QMAX_CB = 7, + BFI_MSIX_RME_QMIN_CB = 8, + BFI_MSIX_RME_QMAX_CB = 15, + BFI_MSIX_CB_MAX = 22, +}; + +/* + * Catapult FC PCI MSI-X vector defines + */ +enum { + BFI_MSIX_LPU_ERR_CT = 0, + BFI_MSIX_CPE_QMIN_CT = 1, + BFI_MSIX_CPE_QMAX_CT = 4, + BFI_MSIX_RME_QMIN_CT = 5, + BFI_MSIX_RME_QMAX_CT = 8, + BFI_MSIX_CT_MAX = 9, +}; + +#endif /* __BFI_MS_H__ */ diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h new file mode 100644 index 000000000..0803b710a --- /dev/null +++ b/drivers/scsi/bfa/bfi_reg.h @@ -0,0 +1,452 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. + * All rights reserved + * www.qlogic.com + * + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. + */ + +/* + * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs + */ + +#ifndef __BFI_REG_H__ +#define __BFI_REG_H__ + +#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ +#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ +#define HOSTFN2_INT_STATUS 0x00014300 /* ct */ +#define HOSTFN3_INT_STATUS 0x00014400 /* ct */ +#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ +#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ +#define HOSTFN2_INT_MSK 0x00014304 /* ct */ +#define HOSTFN3_INT_MSK 0x00014404 /* ct */ + +#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ +#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ +#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */ +#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */ + +#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ +#define __P_LCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000 +#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_LCLK_RESET_TIMER_SH 17 +#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH) +#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_LCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH) +#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_LCLK_JITLMT0_1_SH 12 +#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH) +#define __APP_PLL_LCLK_HREF 0x00000800 +#define __APP_PLL_LCLK_HDIV 0x00000400 +#define __APP_PLL_LCLK_P0_1_MK 0x00000300 +#define __APP_PLL_LCLK_P0_1_SH 8 +#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH) +#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_LCLK_Z0_2_SH 5 +#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH) +#define __APP_PLL_LCLK_RSEL200500 0x00000010 +#define __APP_PLL_LCLK_ENARST 0x00000008 +#define __APP_PLL_LCLK_BYPASS 0x00000004 +#define __APP_PLL_LCLK_LRESETN 0x00000002 +#define __APP_PLL_LCLK_ENABLE 0x00000001 +#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_SCLK_RESET_TIMER_SH 17 +#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH) +#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_SCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH) +#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_SCLK_JITLMT0_1_SH 12 +#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH) +#define __APP_PLL_SCLK_HREF 0x00000800 +#define __APP_PLL_SCLK_HDIV 0x00000400 +#define __APP_PLL_SCLK_P0_1_MK 0x00000300 +#define __APP_PLL_SCLK_P0_1_SH 8 +#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH) +#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_SCLK_Z0_2_SH 5 +#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH) +#define __APP_PLL_SCLK_RSEL200500 0x00000010 +#define __APP_PLL_SCLK_ENARST 0x00000008 +#define __APP_PLL_SCLK_BYPASS 0x00000004 +#define __APP_PLL_SCLK_LRESETN 0x00000002 +#define __APP_PLL_SCLK_ENABLE 0x00000001 +#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */ +#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */ +#define __ENABLE_MAC_1 0x00200000 /* ct */ +#define __ENABLE_MAC_0 0x00100000 /* ct */ + +#define HOST_SEM0_REG 0x00014230 /* cb/ct */ +#define HOST_SEM1_REG 0x00014234 /* cb/ct */ +#define HOST_SEM2_REG 0x00014238 /* cb/ct */ +#define HOST_SEM3_REG 0x0001423c /* cb/ct */ +#define HOST_SEM4_REG 0x00014610 /* cb/ct */ +#define HOST_SEM5_REG 0x00014614 /* cb/ct */ +#define HOST_SEM6_REG 0x00014618 /* cb/ct */ +#define HOST_SEM7_REG 0x0001461c /* cb/ct */ +#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */ +#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */ +#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */ +#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */ +#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */ +#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */ +#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */ +#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */ + +#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */ +#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */ +#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */ +#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */ +#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */ +#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */ +#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */ +#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */ +#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */ +#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */ +#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */ +#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */ +#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */ +#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */ +#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */ +#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */ + +#define PSS_CTL_REG 0x00018800 /* cb/ct */ +#define __PSS_I2C_CLK_DIV_MK 0x007f0000 +#define __PSS_I2C_CLK_DIV_SH 16 +#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) +#define __PSS_LMEM_INIT_DONE 0x00001000 +#define __PSS_LMEM_RESET 0x00000200 +#define __PSS_LMEM_INIT_EN 0x00000100 +#define __PSS_LPU1_RESET 0x00000002 +#define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */ +#define ERR_SET_REG 0x00018818 /* cb/ct */ +#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */ +#define __PSS_GPIO_OUT_REG 0x00000fff +#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */ +#define __PSS_GPIO_OE_REG 0x000000ff + +#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */ +#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */ +#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */ +#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */ +#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */ +#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */ +#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */ +#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */ + +#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */ + +#define MBIST_CTL_REG 0x00014220 /* ct */ +#define __EDRAM_BISTR_START 0x00000004 +#define MBIST_STAT_REG 0x00014224 /* ct */ +#define ETH_MAC_SER_REG 0x00014288 /* ct */ +#define __APP_EMS_CKBUFAMPIN 0x00000020 +#define __APP_EMS_REFCLKSEL 0x00000010 +#define __APP_EMS_CMLCKSEL 0x00000008 +#define __APP_EMS_REFCKBUFEN2 0x00000004 +#define __APP_EMS_REFCKBUFEN1 0x00000002 +#define __APP_EMS_CHANNEL_SEL 0x00000001 +#define FNC_PERS_REG 0x00014604 /* ct */ +#define __F3_FUNCTION_ACTIVE 0x80000000 +#define __F3_FUNCTION_MODE 0x40000000 +#define __F3_PORT_MAP_MK 0x30000000 +#define __F3_PORT_MAP_SH 28 +#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH) +#define __F3_VM_MODE 0x08000000 +#define __F3_INTX_STATUS_MK 0x07000000 +#define __F3_INTX_STATUS_SH 24 +#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH) +#define __F2_FUNCTION_ACTIVE 0x00800000 +#define __F2_FUNCTION_MODE 0x00400000 +#define __F2_PORT_MAP_MK 0x00300000 +#define __F2_PORT_MAP_SH 20 +#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH) +#define __F2_VM_MODE 0x00080000 +#define __F2_INTX_STATUS_MK 0x00070000 +#define __F2_INTX_STATUS_SH 16 +#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH) +#define __F1_FUNCTION_ACTIVE 0x00008000 +#define __F1_FUNCTION_MODE 0x00004000 +#define __F1_PORT_MAP_MK 0x00003000 +#define __F1_PORT_MAP_SH 12 +#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH) +#define __F1_VM_MODE 0x00000800 +#define __F1_INTX_STATUS_MK 0x00000700 +#define __F1_INTX_STATUS_SH 8 +#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH) +#define __F0_FUNCTION_ACTIVE 0x00000080 +#define __F0_FUNCTION_MODE 0x00000040 +#define __F0_PORT_MAP_MK 0x00000030 +#define __F0_PORT_MAP_SH 4 +#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH) +#define __F0_VM_MODE 0x00000008 +#define __F0_INTX_STATUS 0x00000007 +enum { + __F0_INTX_STATUS_MSIX = 0x0, + __F0_INTX_STATUS_INTA = 0x1, + __F0_INTX_STATUS_INTB = 0x2, + __F0_INTX_STATUS_INTC = 0x3, + __F0_INTX_STATUS_INTD = 0x4, +}; + +#define OP_MODE 0x0001460c /* ct */ +#define __APP_ETH_CLK_LOWSPEED 0x00000004 +#define __GLOBAL_CORECLK_HALFSPEED 0x00000002 +#define __GLOBAL_FCOE_MODE 0x00000001 +#define FW_INIT_HALT_P0 0x000191ac /* ct */ +#define __FW_INIT_HALT_P 0x00000001 +#define FW_INIT_HALT_P1 0x000191bc /* ct */ +#define PMM_1T_RESET_REG_P0 0x0002381c /* ct */ +#define __PMM_1T_RESET_P 0x00000001 +#define PMM_1T_RESET_REG_P1 0x00023c1c /* ct */ + +/** + * Catapult-2 specific defines + */ +#define CT2_PCI_CPQ_BASE 0x00030000 +#define CT2_PCI_APP_BASE 0x00030100 +#define CT2_PCI_ETH_BASE 0x00030400 + +/* + * APP block registers + */ +#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00) +#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04) +#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08) +#define __PME_STATUS_ 0x00200000 +#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000 +#define __PF_VF_BAR_SIZE_MODE__SH 19 +#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH) +#define __FC_LL_PORT_MAP__MK 0x00060000 +#define __FC_LL_PORT_MAP__SH 17 +#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH) +#define __PF_VF_ACTIVE_ 0x00010000 +#define __PF_VF_CFG_RDY_ 0x00008000 +#define __PF_VF_ENABLE_ 0x00004000 +#define __PF_DRIVER_ACTIVE_ 0x00002000 +#define __PF_PME_SEND_ENABLE_ 0x00001000 +#define __PF_EXROM_OFFSET__MK 0x00000ff0 +#define __PF_EXROM_OFFSET__SH 4 +#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH) +#define __FC_LL_MODE_ 0x00000008 +#define __PF_INTX_PIN_ 0x00000007 +#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C) +#define __PF_NUM_QUEUES1__MK 0xff000000 +#define __PF_NUM_QUEUES1__SH 24 +#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH) +#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000 +#define __PF_VF_QUE_OFFSET1__SH 16 +#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH) +#define __PF_VF_NUM_QUEUES__MK 0x0000ff00 +#define __PF_VF_NUM_QUEUES__SH 8 +#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH) +#define __PF_VF_QUE_OFFSET_ 0x000000ff +#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18) +#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) + +/* + * Catapult-2 CPQ block registers + */ +#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) +#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) +#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40) +#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60) +#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80) +#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84) +#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88) +#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c) +#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90) +#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94) +#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98) +#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C) +#define CT2_HOST_SEM0_REG 0x000148f0 +#define CT2_HOST_SEM1_REG 0x000148f4 +#define CT2_HOST_SEM2_REG 0x000148f8 +#define CT2_HOST_SEM3_REG 0x000148fc +#define CT2_HOST_SEM4_REG 0x00014900 +#define CT2_HOST_SEM5_REG 0x00014904 +#define CT2_HOST_SEM6_REG 0x00014908 +#define CT2_HOST_SEM7_REG 0x0001490c +#define CT2_HOST_SEM0_INFO_REG 0x000148b0 +#define CT2_HOST_SEM1_INFO_REG 0x000148b4 +#define CT2_HOST_SEM2_INFO_REG 0x000148b8 +#define CT2_HOST_SEM3_INFO_REG 0x000148bc +#define CT2_HOST_SEM4_INFO_REG 0x000148c0 +#define CT2_HOST_SEM5_INFO_REG 0x000148c4 +#define CT2_HOST_SEM6_INFO_REG 0x000148c8 +#define CT2_HOST_SEM7_INFO_REG 0x000148cc + +#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808 +#define __APP_LPUCLK_HALFSPEED 0x40000000 +#define __APP_PLL_LCLK_LOAD 0x20000000 +#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000 +#define __APP_PLL_LCLK_FBCNT_SH 21 +#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_LCLK_FBCNT_425_MHZ = 6, + __APP_PLL_LCLK_FBCNT_468_MHZ = 4, +}; +#define __APP_PLL_LCLK_EXTFB 0x00000800 +#define __APP_PLL_LCLK_ENOUTS 0x00000400 +#define __APP_PLL_LCLK_RATE 0x00000010 +#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000 +#define __APP_PLL_SCLK_CLK_DIV2 0x20000000 +#define __APP_PLL_SCLK_LOAD 0x10000000 +#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000 +#define __APP_PLL_SCLK_FBCNT_SH 20 +#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_SCLK_FBCNT_NORM = 6, + __APP_PLL_SCLK_FBCNT_10G_FC = 10, +}; +#define __APP_PLL_SCLK_EXTFB 0x00000800 +#define __APP_PLL_SCLK_ENOUTS 0x00000400 +#define __APP_PLL_SCLK_RATE 0x00000010 +#define CT2_PCIE_MISC_REG 0x00014804 +#define __ETH_CLK_ENABLE_PORT1 0x00000010 +#define CT2_CHIP_MISC_PRG 0x000148a4 +#define __ETH_CLK_ENABLE_PORT0 0x00004000 +#define __APP_LPU_SPEED 0x00000002 +#define CT2_MBIST_STAT_REG 0x00014818 +#define CT2_MBIST_CTL_REG 0x0001481c +#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c +#define __PMM_1T_PNDB_P 0x00000002 +#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c +#define CT2_WGN_STATUS 0x00014990 +#define __A2T_AHB_LOAD 0x00000800 +#define __WGN_READY 0x00000400 +#define __GLBL_PF_VF_CFG_RDY 0x00000200 +#define CT2_NFC_STS_REG 0x00027410 +#define CT2_NFC_CSR_CLR_REG 0x00027420 +#define CT2_NFC_CSR_SET_REG 0x00027424 +#define __HALT_NFC_CONTROLLER 0x00000002 +#define __NFC_CONTROLLER_HALTED 0x00001000 +#define CT2_RSC_GPR15_REG 0x0002765c +#define CT2_CSI_FW_CTL_REG 0x00027080 +#define CT2_CSI_FW_CTL_SET_REG 0x00027088 +#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000 + +#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 +#define __CSI_MAC_RESET 0x00000010 +#define __CSI_MAC_AHB_RESET 0x00000008 +#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4 +#define CT2_CSI_MAC_CONTROL_REG(__n) \ + (CT2_CSI_MAC0_CONTROL_REG + \ + (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) + +#define CT2_NFC_FLASH_STS_REG 0x00014834 +#define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS 0x00000020 +/* + * Name semaphore registers based on usage + */ +#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG +#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG +#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG +#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG +#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG +#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG + +/* + * CT2 semaphore register locations changed + */ +#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG +#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG +#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG +#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG +#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG +#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG + +#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) + +/* + * And corresponding host interrupt status bit field defines + */ +#define __HFN_INT_CPE_Q0 0x00000001U +#define __HFN_INT_CPE_Q1 0x00000002U +#define __HFN_INT_CPE_Q2 0x00000004U +#define __HFN_INT_CPE_Q3 0x00000008U +#define __HFN_INT_CPE_Q4 0x00000010U +#define __HFN_INT_CPE_Q5 0x00000020U +#define __HFN_INT_CPE_Q6 0x00000040U +#define __HFN_INT_CPE_Q7 0x00000080U +#define __HFN_INT_RME_Q0 0x00000100U +#define __HFN_INT_RME_Q1 0x00000200U +#define __HFN_INT_RME_Q2 0x00000400U +#define __HFN_INT_RME_Q3 0x00000800U +#define __HFN_INT_RME_Q4 0x00001000U +#define __HFN_INT_RME_Q5 0x00002000U +#define __HFN_INT_RME_Q6 0x00004000U +#define __HFN_INT_RME_Q7 0x00008000U +#define __HFN_INT_ERR_EMC 0x00010000U +#define __HFN_INT_ERR_LPU0 0x00020000U +#define __HFN_INT_ERR_LPU1 0x00040000U +#define __HFN_INT_ERR_PSS 0x00080000U +#define __HFN_INT_MBOX_LPU0 0x00100000U +#define __HFN_INT_MBOX_LPU1 0x00200000U +#define __HFN_INT_MBOX1_LPU0 0x00400000U +#define __HFN_INT_MBOX1_LPU1 0x00800000U +#define __HFN_INT_LL_HALT 0x01000000U +#define __HFN_INT_CPE_MASK 0x000000ffU +#define __HFN_INT_RME_MASK 0x0000ff00U +#define __HFN_INT_ERR_MASK \ + (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \ + __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT) +#define __HFN_INT_FN0_MASK \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0) +#define __HFN_INT_FN1_MASK \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1) + +/* + * Host interrupt status defines for catapult-2 + */ +#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U +#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U +#define __HFN_INT_ERR_PSS_CT2 0x00040000U +#define __HFN_INT_ERR_LPU0_CT2 0x00080000U +#define __HFN_INT_ERR_LPU1_CT2 0x00100000U +#define __HFN_INT_CPQ_HALT_CT2 0x00200000U +#define __HFN_INT_ERR_WGN_CT2 0x00400000U +#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U +#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U +#define __HFN_INT_ERR_MASK_CT2 \ + (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \ + __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \ + __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \ + __HFN_INT_ERR_LEHTX_CT2) +#define __HFN_INT_FN0_MASK_CT2 \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2) +#define __HFN_INT_FN1_MASK_CT2 \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2) + +/* + * asic memory map. + */ +#define PSS_SMEM_PAGE_START 0x8000 +#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) +#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) + +#endif /* __BFI_REG_H__ */ diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h new file mode 100644 index 000000000..698f5ebaa --- /dev/null +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h @@ -0,0 +1,1004 @@ +/* 57xx_hsi_bnx2fc.h: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __57XX_FCOE_HSI_LINUX_LE__ +#define __57XX_FCOE_HSI_LINUX_LE__ + +/* + * common data for all protocols + */ +struct b577xx_doorbell_hdr { + u8 header; +#define B577XX_DOORBELL_HDR_RX (0x1<<0) +#define B577XX_DOORBELL_HDR_RX_SHIFT 0 +#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) +#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * doorbell message sent to the chip + */ +struct b577xx_doorbell { +#if defined(__BIG_ENDIAN) + u16 zero_fill2; + u8 zero_fill1; + struct b577xx_doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct b577xx_doorbell_hdr header; + u8 zero_fill1; + u16 zero_fill2; +#endif +}; + + + +/* + * doorbell message sent to the chip + */ +struct b577xx_doorbell_set_prod { +#if defined(__BIG_ENDIAN) + u16 prod; + u8 zero_fill1; + struct b577xx_doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct b577xx_doorbell_hdr header; + u8 zero_fill1; + u16 prod; +#endif +}; + + +struct regpair { + __le32 lo; + __le32 hi; +}; + + +/* + * ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_info { + __le16 aborted_task_id; + __le16 reserved0; + __le32 reserved1; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_rsp_union { + u8 r_ctl; + u8 rsrv[3]; + __le32 abts_rsp_payload[7]; +}; + + +/* + * 4 regs size $$KEEP_ENDIANNESS$$ + */ +struct fcoe_bd_ctx { + __le32 buf_addr_hi; + __le32 buf_addr_lo; + __le16 buf_len; + __le16 rsrv0; + __le16 flags; + __le16 rsrv1; +}; + + +/* + * FCoE cached sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cached_sge_ctx { + struct regpair cur_buf_addr; + __le16 cur_buf_rem; + __le16 second_buf_rem; + struct regpair second_buf_addr; +}; + + +/* + * Cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cleanup_info { + __le16 cleaned_task_id; + __le16 rolled_tx_seq_cnt; + __le32 rolled_tx_data_offset; +}; + + +/* + * Fcp RSP flags $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + +/* + * Fcp RSP payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_payload { + struct regpair reserved0; + __le32 fcp_resid; + u8 scsi_status_code; + struct fcoe_fcp_rsp_flags fcp_flags; + __le16 retry_delay_timer; + __le32 fcp_rsp_len; + __le32 fcp_sns_len; +}; + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_union { + struct fcoe_fcp_rsp_payload payload; + struct regpair reserved0; +}; + +/* + * FC header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_hdr { + u8 s_id[3]; + u8 cs_ctl; + u8 d_id[3]; + u8 r_ctl; + __le16 seq_cnt; + u8 df_ctl; + u8 seq_id; + u8 f_ctl[3]; + u8 type; + __le32 parameters; + __le16 rx_id; + __le16 ox_id; +}; + +/* + * FC header union $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mp_rsp_union { + struct fcoe_fc_hdr fc_hdr; + __le32 mp_payload_len; + __le32 rsrv; +}; + +/* + * Completion information $$KEEP_ENDIANNESS$$ + */ +union fcoe_comp_flow_info { + struct fcoe_fcp_rsp_union fcp_rsp; + struct fcoe_abts_rsp_union abts_rsp; + struct fcoe_mp_rsp_union mp_rsp; + __le32 opaque[8]; +}; + + +/* + * External ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_abts_info { + __le32 rsrv0[6]; + struct fcoe_abts_info ctx; +}; + + +/* + * External cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_cleanup_info { + __le32 rsrv0[6]; + struct fcoe_cleanup_info ctx; +}; + + +/* + * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fw_tx_seq_ctx { + __le32 data_offset; + __le16 seq_cnt; + __le16 rsrv0; +}; + +/* + * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_fw_tx_seq_ctx { + __le32 rsrv0[6]; + struct fcoe_fw_tx_seq_ctx ctx; +}; + + +/* + * FCoE multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mul_sges_ctx { + struct regpair cur_sge_addr; + __le16 cur_sge_off; + u8 cur_sge_idx; + u8 sgl_size; +}; + +/* + * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_mul_sges_ctx { + struct fcoe_mul_sges_ctx mul_sgl; + struct regpair rsrv0; +}; + + +/* + * FCP CMD payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + + + + + +/* + * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_xfr_rdy_payload { + __le32 burst_len; + __le32 data_ro; +}; + + +/* + * FC frame $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_frame { + struct fcoe_fc_hdr fc_hdr; + __le32 reserved0[2]; +}; + + + + +/* + * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ + */ +union fcoe_kcqe_params { + __le32 reserved0[4]; +}; + +/* + * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kcqe { + __le32 fcoe_conn_id; + __le32 completion_status; + __le32 fcoe_conn_context_id; + union fcoe_kcqe_params params; + __le16 qe_self_seq; + u8 op_code; + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 +}; + + + +/* + * FCoE KWQE header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_header { + u8 op_code; + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init1 { + __le16 num_tasks; + struct fcoe_kwqe_header hdr; + __le32 task_list_pbl_addr_lo; + __le32 task_list_pbl_addr_hi; + __le32 dummy_buffer_addr_lo; + __le32 dummy_buffer_addr_hi; + __le16 sq_num_wqes; + __le16 rq_num_wqes; + __le16 rq_buffer_log_size; + __le16 cq_num_wqes; + __le16 mtu; + u8 num_sessions_log; + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init2 { + u8 hsi_major_version; + u8 hsi_minor_version; + struct fcoe_kwqe_header hdr; + __le32 hash_tbl_pbl_addr_lo; + __le32 hash_tbl_pbl_addr_hi; + __le32 t2_hash_tbl_addr_lo; + __le32 t2_hash_tbl_addr_hi; + __le32 t2_ptr_hash_tbl_addr_lo; + __le32 t2_ptr_hash_tbl_addr_hi; + __le32 free_list_count; +}; + +/* + * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init3 { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 error_bit_map_lo; + __le32 error_bit_map_hi; + u8 perf_config; + u8 reserved21[3]; + __le32 reserved2[4]; +}; + +/* + * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload1 { + __le16 fcoe_conn_id; + struct fcoe_kwqe_header hdr; + __le32 sq_addr_lo; + __le32 sq_addr_hi; + __le32 rq_pbl_addr_lo; + __le32 rq_pbl_addr_hi; + __le32 rq_first_pbe_addr_lo; + __le32 rq_first_pbe_addr_hi; + __le16 rq_prod; + __le16 reserved0; +}; + +/* + * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload2 { + __le16 tx_max_fc_pay_len; + struct fcoe_kwqe_header hdr; + __le32 cq_addr_lo; + __le32 cq_addr_hi; + __le32 xferq_addr_lo; + __le32 xferq_addr_hi; + __le32 conn_db_addr_lo; + __le32 conn_db_addr_hi; + __le32 reserved1; +}; + +/* + * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload3 { + __le16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 + struct fcoe_kwqe_header hdr; + u8 s_id[3]; + u8 tx_max_conc_seqs_c3; + u8 d_id[3]; + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 + __le32 reserved; + __le32 confq_first_pbe_addr_lo; + __le32 confq_first_pbe_addr_hi; + __le16 tx_total_conc_seqs; + __le16 rx_max_fc_pay_len; + __le16 rx_total_conc_seqs; + u8 rx_max_conc_seqs_c3; + u8 rx_open_seqs_exch_c3; +}; + +/* + * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload4 { + u8 e_d_tov_timer_val; + u8 reserved2; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u8 dst_mac_addr_hi[2]; + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + __le32 lcq_addr_lo; + __le32 lcq_addr_hi; + __le32 confq_pbl_base_addr_lo; + __le32 confq_pbl_base_addr_hi; +}; + +/* + * FCoE connection enable request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_enable_disable { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + u8 dst_mac_addr_hi[2]; + __le16 reserved1; + u8 s_id[3]; + u8 vlan_flag; + u8 d_id[3]; + u8 reserved3; + __le32 context_id; + __le32 conn_id; + __le32 reserved4; +}; + +/* + * FCoE connection destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 context_id; + __le32 conn_id; + __le32 reserved1[5]; +}; + +/* + * FCoe destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 reserved1[7]; +}; + +/* + * FCoe statistics request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_stat { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 stat_params_addr_lo; + __le32 stat_params_addr_hi; + __le32 reserved1[5]; +}; + +/* + * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ + */ +union fcoe_kwqe { + struct fcoe_kwqe_init1 init1; + struct fcoe_kwqe_init2 init2; + struct fcoe_kwqe_init3 init3; + struct fcoe_kwqe_conn_offload1 conn_offload1; + struct fcoe_kwqe_conn_offload2 conn_offload2; + struct fcoe_kwqe_conn_offload3 conn_offload3; + struct fcoe_kwqe_conn_offload4 conn_offload4; + struct fcoe_kwqe_conn_enable_disable conn_enable_disable; + struct fcoe_kwqe_conn_destroy conn_destroy; + struct fcoe_kwqe_destroy destroy; + struct fcoe_kwqe_stat statistics; +}; + + + + + + + + + + + + + + + + +/* + * TX SGL context $$KEEP_ENDIANNESS$$ + */ +union fcoe_sgl_union_ctx { + struct fcoe_cached_sge_ctx cached_sge; + struct fcoe_ext_mul_sges_ctx sgl; + __le32 opaque[5]; +}; + +/* + * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ + */ +struct fcoe_read_flow_info { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0[3]; +}; + + +/* + * Fcoe stat context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_s_stat_ctx { + u8 flags; +#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) +#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 +#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) +#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_S_STAT_CTX_P_RJT (0x1<<4) +#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 +#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) +#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 +#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) +#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 +}; + +/* + * Fcoe rx seq context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_seq_ctx { + u8 seq_id; + struct fcoe_s_stat_ctx s_stat; + __le16 seq_cnt; + __le32 low_exp_ro; + __le32 high_exp_ro; +}; + + +/* + * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ + */ +union fcoe_rx_wr_union_ctx { + struct fcoe_read_flow_info read_info; + union fcoe_comp_flow_info comp_info; + __le32 opaque[8]; +}; + + + +/* + * FCoE SQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_sqe { + __le16 wqe; +#define FCOE_SQE_TASK_ID (0x7FFF<<0) +#define FCOE_SQE_TASK_ID_SHIFT 0 +#define FCOE_SQE_TOGGLE_BIT (0x1<<15) +#define FCOE_SQE_TOGGLE_BIT_SHIFT 15 +}; + + + +/* + * 14 regs $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_only { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0; +}; + +/* + * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ + */ +union fcoe_tx_wr_rx_rd_union_ctx { + struct fcoe_fc_frame tx_frame; + struct fcoe_fcp_cmd_payload fcp_cmd; + struct fcoe_ext_cleanup_info cleanup; + struct fcoe_ext_abts_info abts; + struct fcoe_ext_fw_tx_seq_ctx tx_seq; + __le32 opaque[8]; +}; + +/* + * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd_const { + u8 init_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 + u8 tx_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7 + __le16 rsrv3; + __le32 verify_tx_seq; +}; + +/* + * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd { + union fcoe_tx_wr_rx_rd_union_ctx union_ctx; + struct fcoe_tce_tx_wr_rx_rd_const const_ctx; +}; + +/* + * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_const { + __le32 data_2_trns; + __le32 init_flags; +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 +}; + +/* + * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_var { + __le16 rx_flags; +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 + __le16 rx_id; + struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; +}; + +/* + * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd { + struct fcoe_tce_rx_wr_tx_rd_const const_ctx; + struct fcoe_tce_rx_wr_tx_rd_var var_ctx; +}; + +/* + * tce_rx_only $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_only { + struct fcoe_rx_seq_ctx rx_seq_ctx; + union fcoe_rx_wr_union_ctx union_ctx; +}; + +/* + * task_ctx_entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_task_ctx_entry { + struct fcoe_tce_tx_only txwr_only; + struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; + struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; + struct fcoe_tce_rx_only rxwr_only; +}; + + + + + + + + + + +/* + * FCoE XFRQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_xfrqe { + __le16 wqe; +#define FCOE_XFRQE_TASK_ID (0x7FFF<<0) +#define FCOE_XFRQE_TASK_ID_SHIFT 0 +#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) +#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * fcoe rx doorbell message sent to the chip $$KEEP_ENDIANNESS$$ + */ +struct b577xx_fcoe_rx_doorbell { + struct b577xx_doorbell_hdr hdr; + u8 params; +#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM (0x1F<<0) +#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT 0 +#define B577XX_FCOE_RX_DOORBELL_OPCODE (0x7<<5) +#define B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT 5 + __le16 doorbell_cq_cons; +}; + + +/* + * FCoE CONFQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_confqe { + __le16 ox_id; + __le16 rx_id; + __le32 param; +}; + + +/* + * FCoE connection data base + */ +struct fcoe_conn_db { +#if defined(__BIG_ENDIAN) + u16 rsrv0; + u16 rq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 rq_prod; + u16 rsrv0; +#endif + u32 rsrv1; + struct regpair cq_arm; +}; + + +/* + * FCoE CQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cqe { + __le16 wqe; +#define FCOE_CQE_CQE_INFO (0x3FFF<<0) +#define FCOE_CQE_CQE_INFO_SHIFT 0 +#define FCOE_CQE_CQE_TYPE (0x1<<14) +#define FCOE_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_partial_err_report_entry { + __le32 err_warn_bitmap_lo; + __le32 err_warn_bitmap_hi; + __le32 tx_buf_off; + __le32 rx_buf_off; +}; + +/* + * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_err_report_entry { + struct fcoe_partial_err_report_entry data; + struct fcoe_fc_hdr fc_hdr; +}; + + +/* + * FCoE hash table entry (32 bytes) $$KEEP_ENDIANNESS$$ + */ +struct fcoe_hash_table_entry { + u8 s_id_0; + u8 s_id_1; + u8 s_id_2; + u8 d_id_0; + u8 d_id_1; + u8 d_id_2; + __le16 dst_mac_addr_hi; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_lo; + __le16 src_mac_addr_hi; + __le16 vlan_id; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + u8 vlan_flag; + u8 reserved0; + __le16 reserved1; + __le32 reserved2; + __le32 field_id; +#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0) +#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0 +#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24) +#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24 +#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31) +#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31 +}; + + +/* + * FCoE LCQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_lcqe { + __le32 wqe; +#define FCOE_LCQE_TASK_ID (0xFFFF<<0) +#define FCOE_LCQE_TASK_ID_SHIFT 0 +#define FCOE_LCQE_LCQE_TYPE (0xFF<<16) +#define FCOE_LCQE_LCQE_TYPE_SHIFT 16 +#define FCOE_LCQE_RESERVED (0xFF<<24) +#define FCOE_LCQE_RESERVED_SHIFT 24 +}; + + + +/* + * FCoE pending work request CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_pend_wq_cqe { + __le16 wqe; +#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0) +#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0 +#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14) +#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE RX statistics parameters section#0 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_stat_params_section0 { + __le32 fcoe_rx_pkt_cnt; + __le32 fcoe_rx_byte_cnt; +}; + + +/* + * FCoE RX statistics parameters section#1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_stat_params_section1 { + __le32 fcoe_ver_cnt; + __le32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_stat_params_section2 { + __le32 fc_crc_cnt; + __le32 eofa_del_cnt; + __le32 miss_frame_cnt; + __le32 seq_timeout_cnt; + __le32 drop_seq_cnt; + __le32 fcoe_rx_drop_pkt_cnt; + __le32 fcp_rx_pkt_cnt; + __le32 reserved0; +}; + + +/* + * FCoE TX statistics parameters $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tx_stat_params { + __le32 fcoe_tx_pkt_cnt; + __le32 fcoe_tx_byte_cnt; + __le32 fcp_tx_pkt_cnt; + __le32 reserved0; +}; + +/* + * FCoE statistics parameters $$KEEP_ENDIANNESS$$ + */ +struct fcoe_statistics_params { + struct fcoe_tx_stat_params tx_stat; + struct fcoe_rx_stat_params_section0 rx_stat0; + struct fcoe_rx_stat_params_section1 rx_stat1; + struct fcoe_rx_stat_params_section2 rx_stat2; +}; + + +/* + * FCoE t2 hash table entry (64 bytes) $$KEEP_ENDIANNESS$$ + */ +struct fcoe_t2_hash_table_entry { + struct fcoe_hash_table_entry data; + struct regpair next; + struct regpair reserved0[3]; +}; + + + +/* + * FCoE unsolicited CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_unsolicited_cqe { + __le16 wqe; +#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0) +#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0 +#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2) +#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2 +#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14) +#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15 +}; + +#endif /* __57XX_FCOE_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig new file mode 100644 index 000000000..ecdc0f0f4 --- /dev/null +++ b/drivers/scsi/bnx2fc/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_BNX2X_FCOE + tristate "QLogic FCoE offload support" + depends on PCI + depends on (IPV6 || IPV6=n) + depends on LIBFC + depends on LIBFCOE + depends on MMU + select NETDEVICES + select ETHERNET + select NET_VENDOR_BROADCOM + select CNIC + help + This driver supports FCoE offload for the QLogic devices. diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile new file mode 100644 index 000000000..1d72e279a --- /dev/null +++ b/drivers/scsi/bnx2fc/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o + +bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \ + bnx2fc_debug.o diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h new file mode 100644 index 000000000..046247420 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -0,0 +1,608 @@ +/* bnx2fc.h: QLogic Linux FCoE offload driver. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#ifndef _BNX2FC_H_ +#define _BNX2FC_H_ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "57xx_hsi_bnx2fc.h" +#include "../../net/ethernet/broadcom/cnic_if.h" +#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h" +#include "bnx2fc_constants.h" + +#define BNX2FC_NAME "bnx2fc" +#define BNX2FC_VERSION "2.12.13" + +#define PFX "bnx2fc: " + +#define BCM_CHIP_LEN 16 + +#define BNX2X_DOORBELL_PCI_BAR 2 + +#define BNX2FC_MAX_BD_LEN 0xffff +#define BNX2FC_BD_SPLIT_SZ 0xffff +#define BNX2FC_MAX_BDS_PER_CMD 255 +#define BNX2FC_FW_MAX_BDS_PER_CMD 255 + +#define BNX2FC_SQ_WQES_MAX 256 + +#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8) +#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2) +#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1) + +#define BNX2FC_RQ_WQES_MAX 16 +#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX) + +#define BNX2FC_NUM_MAX_SESS 1024 +#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) + +#define BNX2FC_MAX_NPIV 256 + +#define BNX2FC_MIN_PAYLOAD 256 +#define BNX2FC_MAX_PAYLOAD 2048 +#define BNX2FC_MFS \ + (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header)) +#define BNX2FC_MINI_JUMBO_MTU 2500 + + +#define BNX2FC_RQ_BUF_SZ 256 +#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) + +#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe)) +#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe)) +#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) +#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) +#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) +#define BNX2X_DB_SHIFT 3 + +#define BNX2FC_TASK_SIZE 128 +#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) + +#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 +#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) + +#define BNX2FC_MAX_SEQS 255 +#define BNX2FC_MAX_RETRY_CNT 3 +#define BNX2FC_MAX_RPORT_RETRY_CNT 255 + +#define BNX2FC_READ (1 << 1) +#define BNX2FC_WRITE (1 << 0) + +#define BNX2FC_MIN_XID 0 +#define FCOE_MAX_NUM_XIDS 0x2000 +#define FCOE_MAX_XID_OFFSET (FCOE_MAX_NUM_XIDS - 1) +#define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1) +#define BNX2FC_MAX_LUN 0xFFFF +#define BNX2FC_MAX_FCP_TGT 256 +#define BNX2FC_MAX_CMD_LEN 16 + +#define BNX2FC_TM_TIMEOUT 60 /* secs */ +#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */ + +#define BNX2FC_WAIT_CNT 1200 +#define BNX2FC_FW_TIMEOUT (3 * HZ) +#define PORT_MAX 2 + +/* FC FCP Status */ +#define FC_GOOD 0 + +#define BNX2FC_RNID_HBA 0x7 + +#define SRR_RETRY_COUNT 5 +#define REC_RETRY_COUNT 1 +#define BNX2FC_NUM_ERR_BITS 63 + +#define BNX2FC_RELOGIN_WAIT_TIME 200 +#define BNX2FC_RELOGIN_WAIT_CNT 10 + +#define BNX2FC_STATS(hba, stat, cnt) \ + do { \ + u32 val; \ + \ + val = fw_stats->stat.cnt; \ + if (hba->prev_stats.stat.cnt <= val) \ + val -= hba->prev_stats.stat.cnt; \ + else \ + val += (0xfffffff - hba->prev_stats.stat.cnt); \ + hba->bfw_stats.cnt += val; \ + } while (0) + +/* bnx2fc driver uses only one instance of fcoe_percpu_s */ +extern struct fcoe_percpu_s bnx2fc_global; + +extern struct workqueue_struct *bnx2fc_wq; + +struct bnx2fc_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t fp_work_lock; +}; + +struct bnx2fc_fw_stats { + u64 fc_crc_cnt; + u64 fcoe_tx_pkt_cnt; + u64 fcoe_rx_pkt_cnt; + u64 fcoe_tx_byte_cnt; + u64 fcoe_rx_byte_cnt; +}; + +struct bnx2fc_hba { + struct list_head list; + struct cnic_dev *cnic; + struct pci_dev *pcidev; + struct net_device *phys_dev; + unsigned long reg_with_cnic; + #define BNX2FC_CNIC_REGISTERED 1 + struct bnx2fc_cmd_mgr *cmd_mgr; + spinlock_t hba_lock; + struct mutex hba_mutex; + struct mutex hba_stats_mutex; + unsigned long adapter_state; + #define ADAPTER_STATE_UP 0 + #define ADAPTER_STATE_GOING_DOWN 1 + #define ADAPTER_STATE_LINK_DOWN 2 + #define ADAPTER_STATE_READY 3 + unsigned long flags; + #define BNX2FC_FLAG_FW_INIT_DONE 0 + #define BNX2FC_FLAG_DESTROY_CMPL 1 + u32 next_conn_id; + + /* xid resources */ + u16 max_xid; + u32 max_tasks; + u32 max_outstanding_cmds; + u32 elstm_xids; + + struct fcoe_task_ctx_entry **task_ctx; + dma_addr_t *task_ctx_dma; + struct regpair *task_ctx_bd_tbl; + dma_addr_t task_ctx_bd_dma; + + int hash_tbl_segment_count; + void **hash_tbl_segments; + void *hash_tbl_pbl; + dma_addr_t hash_tbl_pbl_dma; + struct fcoe_t2_hash_table_entry *t2_hash_tbl; + dma_addr_t t2_hash_tbl_dma; + char *t2_hash_tbl_ptr; + dma_addr_t t2_hash_tbl_ptr_dma; + + char *dummy_buffer; + dma_addr_t dummy_buf_dma; + + /* Active list of offloaded sessions */ + struct bnx2fc_rport **tgt_ofld_list; + + /* statistics */ + struct bnx2fc_fw_stats bfw_stats; + struct fcoe_statistics_params prev_stats; + struct fcoe_statistics_params *stats_buffer; + dma_addr_t stats_buf_dma; + struct completion stat_req_done; + struct fcoe_capabilities fcoe_cap; + + /*destroy handling */ + struct timer_list destroy_timer; + wait_queue_head_t destroy_wait; + + /* linkdown handling */ + wait_queue_head_t shutdown_wait; + int wait_for_link_down; + int num_ofld_sess; + struct list_head vports; + + char chip_num[BCM_CHIP_LEN]; +}; + +struct bnx2fc_interface { + struct list_head list; + unsigned long if_flags; + #define BNX2FC_CTLR_INIT_DONE 0 + struct bnx2fc_hba *hba; + struct net_device *netdev; + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; + struct workqueue_struct *timer_work_queue; + struct kref kref; + u8 vlan_enabled; + int vlan_id; + bool enabled; + u8 tm_timeout; +}; + +#define bnx2fc_from_ctlr(x) \ + ((struct bnx2fc_interface *)((x) + 1)) + +#define bnx2fc_to_ctlr(x) \ + ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)) + +struct bnx2fc_lport { + struct list_head list; + struct fc_lport *lport; +}; + +struct bnx2fc_cmd_mgr { + struct bnx2fc_hba *hba; + u16 next_idx; + struct list_head *free_list; + spinlock_t *free_list_lock; + struct io_bdt **io_bdt_pool; + struct bnx2fc_cmd **cmds; +}; + +struct bnx2fc_rport { + struct fcoe_port *port; + struct fc_rport *rport; + struct fc_rport_priv *rdata; + void __iomem *ctx_base; +#define DPM_TRIGER_TYPE 0x40 + u32 io_timeout; + u32 fcoe_conn_id; + u32 context_id; + u32 sid; + int dev_type; + + unsigned long flags; +#define BNX2FC_FLAG_SESSION_READY 0x1 +#define BNX2FC_FLAG_OFFLOADED 0x2 +#define BNX2FC_FLAG_DISABLED 0x3 +#define BNX2FC_FLAG_DESTROYED 0x4 +#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 +#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6 +#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7 +#define BNX2FC_FLAG_DISABLE_FAILED 0x9 +#define BNX2FC_FLAG_ENABLED 0xa + + u8 src_addr[ETH_ALEN]; + u32 max_sqes; + u32 max_rqes; + u32 max_cqes; + atomic_t free_sqes; + + struct b577xx_doorbell_set_prod sq_db; + struct b577xx_fcoe_rx_doorbell rx_db; + + struct fcoe_sqe *sq; + dma_addr_t sq_dma; + u16 sq_prod_idx; + u8 sq_curr_toggle_bit; + u32 sq_mem_size; + + struct fcoe_cqe *cq; + dma_addr_t cq_dma; + u16 cq_cons_idx; + u8 cq_curr_toggle_bit; + u32 cq_mem_size; + + void *rq; + dma_addr_t rq_dma; + u32 rq_prod_idx; + u32 rq_cons_idx; + u32 rq_mem_size; + + void *rq_pbl; + dma_addr_t rq_pbl_dma; + u32 rq_pbl_size; + + struct fcoe_xfrqe *xferq; + dma_addr_t xferq_dma; + u32 xferq_mem_size; + + struct fcoe_confqe *confq; + dma_addr_t confq_dma; + u32 confq_mem_size; + + void *confq_pbl; + dma_addr_t confq_pbl_dma; + u32 confq_pbl_size; + + struct fcoe_conn_db *conn_db; + dma_addr_t conn_db_dma; + u32 conn_db_mem_size; + + struct fcoe_sqe *lcq; + dma_addr_t lcq_dma; + u32 lcq_mem_size; + + void *ofld_req[4]; + dma_addr_t ofld_req_dma[4]; + void *enbl_req; + dma_addr_t enbl_req_dma; + + spinlock_t tgt_lock; + spinlock_t cq_lock; + atomic_t num_active_ios; + u32 flush_in_prog; + unsigned long timestamp; + unsigned long retry_delay_timestamp; + struct list_head free_task_list; + struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; + struct list_head active_cmd_queue; + struct list_head els_queue; + struct list_head io_retire_queue; + struct list_head active_tm_queue; + + struct timer_list ofld_timer; + wait_queue_head_t ofld_wait; + + struct timer_list upld_timer; + wait_queue_head_t upld_wait; +}; + +struct bnx2fc_mp_req { + u8 tm_flags; + + u32 req_len; + void *req_buf; + dma_addr_t req_buf_dma; + struct fcoe_bd_ctx *mp_req_bd; + dma_addr_t mp_req_bd_dma; + struct fc_frame_header req_fc_hdr; + + u32 resp_len; + void *resp_buf; + dma_addr_t resp_buf_dma; + struct fcoe_bd_ctx *mp_resp_bd; + dma_addr_t mp_resp_bd_dma; + struct fc_frame_header resp_fc_hdr; +}; + +struct bnx2fc_els_cb_arg { + struct bnx2fc_cmd *aborted_io_req; + struct bnx2fc_cmd *io_req; + u16 l2_oxid; + u32 offset; + enum fc_rctl r_ctl; +}; + +/* bnx2fc command structure */ +struct bnx2fc_cmd { + struct list_head link; + u8 on_active_queue; + u8 on_tmf_queue; + u8 cmd_type; +#define BNX2FC_SCSI_CMD 1 +#define BNX2FC_TASK_MGMT_CMD 2 +#define BNX2FC_ABTS 3 +#define BNX2FC_ELS 4 +#define BNX2FC_CLEANUP 5 +#define BNX2FC_SEQ_CLEANUP 6 + u8 io_req_flags; + struct kref refcount; + struct fcoe_port *port; + struct bnx2fc_rport *tgt; + struct scsi_cmnd *sc_cmd; + struct bnx2fc_cmd_mgr *cmd_mgr; + struct bnx2fc_mp_req mp_req; + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg); + struct bnx2fc_els_cb_arg *cb_arg; + struct delayed_work timeout_work; /* timer for ULP timeouts */ + struct completion abts_done; + struct completion cleanup_done; + int wait_for_abts_comp; + int wait_for_cleanup_comp; + u16 xid; + struct fcoe_err_report_entry err_entry; + struct fcoe_task_ctx_entry *task; + struct io_bdt *bd_tbl; + struct fcp_rsp *rsp; + size_t data_xfer_len; + unsigned long req_flags; +#define BNX2FC_FLAG_ISSUE_RRQ 0x1 +#define BNX2FC_FLAG_ISSUE_ABTS 0x2 +#define BNX2FC_FLAG_ABTS_DONE 0x3 +#define BNX2FC_FLAG_TM_COMPL 0x4 +#define BNX2FC_FLAG_TM_TIMEOUT 0x5 +#define BNX2FC_FLAG_IO_CLEANUP 0x6 +#define BNX2FC_FLAG_RETIRE_OXID 0x7 +#define BNX2FC_FLAG_EH_ABORT 0x8 +#define BNX2FC_FLAG_IO_COMPL 0x9 +#define BNX2FC_FLAG_ELS_DONE 0xa +#define BNX2FC_FLAG_ELS_TIMEOUT 0xb +#define BNX2FC_FLAG_CMD_LOST 0xc +#define BNX2FC_FLAG_SRR_SENT 0xd +#define BNX2FC_FLAG_ISSUE_CLEANUP_REQ 0xe + u8 rec_retry; + u8 srr_retry; + u32 srr_offset; + u8 srr_rctl; + u32 fcp_resid; + u32 fcp_rsp_len; + u32 fcp_sns_len; + u8 cdb_status; /* SCSI IO status */ + u8 fcp_status; /* FCP IO status */ + u8 fcp_rsp_code; + u8 scsi_comp_flags; +}; + +struct io_bdt { + struct bnx2fc_cmd *io_req; + struct fcoe_bd_ctx *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + +struct bnx2fc_work { + struct list_head list; + struct bnx2fc_rport *tgt; + struct fcoe_task_ctx_entry *task; + unsigned char rq_data[BNX2FC_RQ_BUF_SZ]; + u16 wqe; + u8 num_rq; +}; +struct bnx2fc_unsol_els { + struct fc_lport *lport; + struct fc_frame *fp; + struct bnx2fc_hba *hba; + struct work_struct unsol_els_work; +}; + +struct bnx2fc_priv { + struct bnx2fc_cmd *io_req; +}; + +static inline struct bnx2fc_priv *bnx2fc_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt); +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); +void bnx2fc_cmd_release(struct kref *ref); +int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_enable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt); +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe); +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba); +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba); +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); +void bnx2fc_get_link_state(struct bnx2fc_hba *hba); +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items); +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen); +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req); +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req); +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req); +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec); +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req); +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid); +void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req, + struct fcoe_task_ctx_entry *task, + struct bnx2fc_cmd *orig_io_req, + u32 offset); +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid); +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt); +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd); +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rport, + enum fc_rport_event event); +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq, unsigned char *rq_data); +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq, unsigned char *rq_data); +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd); + + + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt); +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout); +void bnx2fc_arm_cq(struct bnx2fc_rport *tgt); +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task); +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id); +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid); +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); +int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req); +int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req); +int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl); +void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req, + struct fcoe_task_ctx_entry *task, + u8 rx_state); +int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, + enum fc_rctl r_ctl); + + +#include "bnx2fc_debug.h" + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h new file mode 100644 index 000000000..9ed150307 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h @@ -0,0 +1,288 @@ +/* bnx2fc_constants.h: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __BNX2FC_CONSTANTS_H_ +#define __BNX2FC_CONSTANTS_H_ + +/** + * This file defines HSI constants for the FCoE flows + */ + +/* Current FCoE HSI version number composed of two fields (16 bit) */ +/* Implies on a change broken previous HSI */ +#define FCOE_HSI_MAJOR_VERSION (2) +/* Implies on a change which does not broken previous HSI */ +#define FCOE_HSI_MINOR_VERSION (1) + +/* KWQ/KCQ FCoE layer code */ +#define FCOE_KWQE_LAYER_CODE (7) + +/* KWQ (kernel work queue) request op codes */ +#define FCOE_KWQE_OPCODE_INIT1 (0) +#define FCOE_KWQE_OPCODE_INIT2 (1) +#define FCOE_KWQE_OPCODE_INIT3 (2) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6) +#define FCOE_KWQE_OPCODE_ENABLE_CONN (7) +#define FCOE_KWQE_OPCODE_DISABLE_CONN (8) +#define FCOE_KWQE_OPCODE_DESTROY_CONN (9) +#define FCOE_KWQE_OPCODE_DESTROY (10) +#define FCOE_KWQE_OPCODE_STAT (11) + +/* KCQ (kernel completion queue) response op codes */ +#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10) +#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11) +#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12) +#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15) +#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16) +#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17) +#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18) +#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) + +/* KCQ (kernel completion queue) completion status */ +#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0) +#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1) +#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4) +#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5) +#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6) +#define FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR (0x81) + +/* CQE type */ +#define FCOE_PENDING_CQE_TYPE 0 +#define FCOE_UNSOLIC_CQE_TYPE 1 + +/* Unsolicited CQE type */ +#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0 +#define FCOE_ERROR_DETECTION_CQE_TYPE 1 +#define FCOE_WARNING_DETECTION_CQE_TYPE 2 + +/* E_D_TOV timer resolution in ms */ +#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20) + +/* E_D_TOV timer resolution for SDM (4 micro) */ +#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION \ + (FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4) + +/* REC timer resolution in ms */ +#define FCOE_REC_TIMER_RESOLUTION_MS (20) + +/* REC timer resolution for SDM (4 micro) */ +#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4) + +/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */ +#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL \ + (2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS) + +/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */ +#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL \ + (3000 / FCOE_REC_TIMER_RESOLUTION_MS) + +#define FCOE_NUM_OF_TIMER_TASKS (8 * 1024) + +#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8) + +/* Task context constants */ +/******** Remove FCP_CMD write tce sleep ***********************/ +/* In case timer services are required then shall be updated by Xstorm after + * start processing the task. In case no timer facilities are required then the + * driver would initialize the state to this value + * +#define FCOE_TASK_TX_STATE_NORMAL 0 + * After driver has initialize the task in case timer services required * +#define FCOE_TASK_TX_STATE_INIT 1 +******** Remove FCP_CMD write tce sleep ***********************/ +/* After driver has initialize the task in case timer services required */ +#define FCOE_TASK_TX_STATE_INIT 0 +/* In case timer services are required then shall be updated by Xstorm after + * start processing the task. In case no timer facilities are required then the + * driver would initialize the state to this value + */ +#define FCOE_TASK_TX_STATE_NORMAL 1 +/* Task is under abort procedure. Updated in order to stop processing of + * pending WQEs on this task + */ +#define FCOE_TASK_TX_STATE_ABORT 2 +/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */ +#define FCOE_TASK_TX_STATE_ERROR 3 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_TX_STATE_WARNING 4 +/* For completed unsolicited task */ +#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5 +/* For exchange cleanup request task */ +#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6 +/* For sequence cleanup request task */ +#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7 +/* For completion the ABTS task. */ +#define FCOE_TASK_TX_STATE_ABTS_TX 8 + +#define FCOE_TASK_RX_STATE_NORMAL 0 +#define FCOE_TASK_RX_STATE_COMPLETED 1 +/* Obsolete: Intermediate completion (middle path with local completion) */ +#define FCOE_TASK_RX_STATE_INTER_COMP 2 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_RX_STATE_WARNING 3 +/* For E_D_T_TOV timer expiration in Ustorm */ +#define FCOE_TASK_RX_STATE_ERROR 4 +/* FW only: First visit at rx-path, part of the abts round trip */ +#define FCOE_TASK_RX_STATE_ABTS_IN_PROCESS 5 +/* FW only: Second visit at rx-path, after ABTS frame transmitted */ +#define FCOE_TASK_RX_STATE_ABTS_TRANSMITTED 6 +/* Special completion indication in case of task was aborted. */ +#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7 +/* FW only: First visit at rx-path, part of the cleanup round trip */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS 8 +/* FW only: Special completion indication in case of task was cleaned. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 9 +/* Not in used: Special completion indication (in task requested the exchange + * cleanup) in case cleaned task is in non-valid. + */ +#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 10 +/* Special completion indication (in task requested the sequence cleanup) in + * case cleaned task was already returned to normal. + */ +#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 11 + + +#define FCOE_TASK_TYPE_WRITE 0 +#define FCOE_TASK_TYPE_READ 1 +#define FCOE_TASK_TYPE_MIDPATH 2 +#define FCOE_TASK_TYPE_UNSOLICITED 3 +#define FCOE_TASK_TYPE_ABTS 4 +#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5 +#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6 + +#define FCOE_TASK_DEV_TYPE_DISK 0 +#define FCOE_TASK_DEV_TYPE_TAPE 1 + +#define FCOE_TASK_CLASS_TYPE_3 0 +#define FCOE_TASK_CLASS_TYPE_2 1 + +/* FCoE/FC packet fields */ +#define FCOE_ETH_TYPE 0x8906 + +/* FCoE maximum elements in hash table */ +#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW 8 + +/* FCoE half of the elements in hash table */ +#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW \ + (FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2) + +/* FcoE number of cached T2 entries */ +#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4) + +/* FCoE maximum elements in hash table */ +#define FCOE_HASH_TBL_CHUNK_SIZE 16384 + +/* Everest FCoE connection type */ +#define B577XX_FCOE_CONNECTION_TYPE 4 + +/* FCoE number of rows (in log). This number derives + * from the maximum connections supported which is 2048. + * TBA: Need a different constant for E2 + */ +#define FCOE_MAX_NUM_SESSIONS_LOG 11 + +#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 + +/* Error codes for Error Reporting in slow path flows */ +#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS 0 +#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE 1 + +/* Error codes for Error Reporting in fast path flows + * XFER error codes + */ +#define FCOE_ERROR_CODE_XFER_OOO_RO 0 +#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1 +#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2 +#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3 +#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4 +#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5 +#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6 +#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7 +#define FCOE_ERROR_CODE_XFER_FCTL 8 + +/* FCP RSP error codes */ +#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9 +#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10 +#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14 +#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15 +#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16 +#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17 +#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18 +#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19 + +/* FCP DATA error codes */ +#define FCOE_ERROR_CODE_DATA_OOO_RO 20 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22 +#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23 +#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24 +#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25 +#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 +#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 +#define FCOE_ERROR_CODE_DATA_FCTL 28 + +/* Middle path error codes */ +#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE 29 +#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30 +#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31 +#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32 +#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33 +#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL 34 +#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35 +#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36 + +/* ABTS error codes */ +#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37 +#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 +#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41 + +/* Common error codes */ +#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42 +#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43 +#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44 +#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45 +#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46 +#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47 +#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48 +#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49 +#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 + +/* Unsolicited Rx error codes */ +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51 +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54 +#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55 + +#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56 +#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57 +#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58 + +/* Timer error codes */ +#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60 +#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61 + + +#endif /* BNX2FC_CONSTANTS_H_ */ diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c new file mode 100644 index 000000000..47ba3ba1e --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c @@ -0,0 +1,84 @@ +/* bnx2fc_debug.c: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#include "bnx2fc.h" + +void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_IO))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (io_req && io_req->port && io_req->port->lport && + io_req->port->lport->host) + shost_printk(KERN_INFO, io_req->port->lport->host, + PFX "xid:0x%x %pV", + io_req->xid, &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} + +void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_TGT))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host && + tgt->rport) + shost_printk(KERN_INFO, tgt->port->lport->host, + PFX "port:%x %pV", + tgt->rport->port_id, &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} + +void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_HBA))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (lport && lport->host) + shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h new file mode 100644 index 000000000..76717acee --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h @@ -0,0 +1,47 @@ +/* bnx2fc_debug.h: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __BNX2FC_DEBUG__ +#define __BNX2FC_DEBUG__ + +/* Log level bit mask */ +#define LOG_IO 0x01 /* scsi cmd error, cleanup */ +#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */ +#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */ +#define LOG_ELS 0x08 /* ELS logs */ +#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/ +#define LOG_ALL 0xff /* LOG all messages */ + +extern unsigned int bnx2fc_debug_level; + +#define BNX2FC_ELS_DBG(fmt, ...) \ +do { \ + if (unlikely(bnx2fc_debug_level & LOG_ELS)) \ + pr_info(fmt, ##__VA_ARGS__); \ +} while (0) + +#define BNX2FC_MISC_DBG(fmt, ...) \ +do { \ + if (unlikely(bnx2fc_debug_level & LOG_MISC)) \ + pr_info(fmt, ##__VA_ARGS__); \ +} while (0) + +__printf(2, 3) +void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...); +__printf(2, 3) +void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...); +__printf(2, 3) +void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...); + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c new file mode 100644 index 000000000..754f2e82d --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -0,0 +1,950 @@ +/* + * bnx2fc_els.c: QLogic Linux FCoE offload driver. + * This file contains helper routines that handle ELS requests + * and responses. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec); + +static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *orig_io_req; + struct bnx2fc_cmd *rrq_req; + int rc = 0; + + BUG_ON(!cb_arg); + rrq_req = cb_arg->io_req; + orig_io_req = cb_arg->aborted_io_req; + BUG_ON(!orig_io_req); + BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n", + orig_io_req->xid, rrq_req->xid); + + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. Remove from active_cmd_queue. + */ + BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n", + rrq_req->xid); + + if (rrq_req->on_active_queue) { + list_del_init(&rrq_req->link); + rrq_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(rrq_req); + BUG_ON(rc); + } + } + kfree(cb_arg); +} +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) +{ + + struct fc_els_rrq rrq; + struct bnx2fc_rport *tgt = aborted_io_req->tgt; + struct fc_lport *lport = NULL; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 sid = 0; + u32 r_a_tov = 0; + unsigned long start = jiffies; + int rc; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) + return -EINVAL; + + lport = tgt->rdata->local_port; + sid = tgt->sid; + r_a_tov = lport->r_a_tov; + + BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n", + aborted_io_req->xid); + memset(&rrq, 0, sizeof(rrq)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n"); + rc = -ENOMEM; + goto rrq_err; + } + + cb_arg->aborted_io_req = aborted_io_req; + + rrq.rrq_cmd = ELS_RRQ; + hton24(rrq.rrq_s_id, sid); + rrq.rrq_ox_id = htons(aborted_io_req->xid); + rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id); + +retry_rrq: + rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), + bnx2fc_rrq_compl, cb_arg, + r_a_tov); + if (rc == -ENOMEM) { + if (time_after(jiffies, start + (10 * HZ))) { + BNX2FC_ELS_DBG("rrq Failed\n"); + rc = FAILED; + goto rrq_err; + } + msleep(20); + goto retry_rrq; + } +rrq_err: + if (rc) { + BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n", + aborted_io_req->xid); + kfree(cb_arg); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + } + return rc; +} + +static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *els_req; + struct bnx2fc_rport *tgt; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + unsigned char *buf; + void *resp_buf; + u32 resp_len, hdr_len; + u16 l2_oxid; + int frame_len; + int rc = 0; + + l2_oxid = cb_arg->l2_oxid; + BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid); + + els_req = cb_arg->io_req; + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. libfc will handle the els timeout + */ + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(els_req); + BUG_ON(rc); + } + goto free_arg; + } + + tgt = els_req->tgt; + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "Unable to alloc mp buf\n"); + goto free_arg; + } + hdr_len = sizeof(*fc_hdr); + if (hdr_len + resp_len > PAGE_SIZE) { + printk(KERN_ERR PFX "l2_els_compl: resp len is " + "beyond page size\n"); + goto free_buf; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + frame_len = hdr_len + resp_len; + + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid); + +free_buf: + kfree(buf); +free_arg: + kfree(cb_arg); +} + +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + /* adisc is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_logo *logo; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + logo = fc_frame_payload_get(fp, sizeof(*logo)); + /* logo is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_rls *rls; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + rls = fc_frame_payload_get(fp, sizeof(*rls)); + /* rls is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr, *fh; + struct bnx2fc_cmd *srr_req; + struct bnx2fc_cmd *orig_io_req; + struct fc_frame *fp; + unsigned char *buf; + void *resp_buf; + u32 resp_len, hdr_len; + u8 opcode; + int rc = 0; + + orig_io_req = cb_arg->aborted_io_req; + srr_req = cb_arg->io_req; + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { + /* SRR timedout */ + BNX2FC_IO_DBG(srr_req, "srr timed out, abort " + "orig_io - 0x%x\n", + orig_io_req->xid); + rc = bnx2fc_initiate_abts(srr_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(srr_req); + } + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || + test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx", + orig_io_req->xid, orig_io_req->req_flags); + goto srr_compl_done; + } + orig_io_req->srr_retry++; + if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { + struct bnx2fc_rport *tgt = orig_io_req->tgt; + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, + orig_io_req->srr_offset, + orig_io_req->srr_rctl); + spin_lock_bh(&tgt->tgt_lock); + if (!rc) + goto srr_compl_done; + } + + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + goto srr_compl_done; + } + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || + test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx", + orig_io_req->xid, orig_io_req->req_flags); + goto srr_compl_done; + } + mp_req = &(srr_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + hdr_len = sizeof(*fc_hdr); + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "srr buf: mem alloc failure\n"); + goto srr_compl_done; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + + fp = fc_frame_alloc(NULL, resp_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + goto free_buf; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, hdr_len + resp_len); + + opcode = fc_frame_payload_op(fp); + switch (opcode) { + case ELS_LS_ACC: + BNX2FC_IO_DBG(srr_req, "SRR success\n"); + break; + case ELS_LS_RJT: + BNX2FC_IO_DBG(srr_req, "SRR rejected\n"); + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + break; + default: + BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n", + opcode); + break; + } + fc_frame_free(fp); +free_buf: + kfree(buf); +srr_compl_done: + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); +} + +static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *orig_io_req, *new_io_req; + struct bnx2fc_cmd *rec_req; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr, *fh; + struct fc_els_ls_rjt *rjt; + struct fc_els_rec_acc *acc; + struct bnx2fc_rport *tgt; + struct fcoe_err_report_entry *err_entry; + struct scsi_cmnd *sc_cmd; + enum fc_rctl r_ctl; + unsigned char *buf; + void *resp_buf; + struct fc_frame *fp; + u8 opcode; + u32 offset; + u32 e_stat; + u32 resp_len, hdr_len; + int rc = 0; + bool send_seq_clnp = false; + bool abort_io = false; + + BNX2FC_MISC_DBG("Entered rec_compl callback\n"); + rec_req = cb_arg->io_req; + orig_io_req = cb_arg->aborted_io_req; + BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); + tgt = orig_io_req->tgt; + + /* Handle REC timeout case */ + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "timed out, abort " + "orig_io - 0x%x\n", + orig_io_req->xid); + /* els req is timed out. send abts for els */ + rc = bnx2fc_initiate_abts(rec_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(rec_req); + } + orig_io_req->rec_retry++; + /* REC timedout. send ABTS to the orig IO req */ + if (orig_io_req->rec_retry <= REC_RETRY_COUNT) { + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_rec(orig_io_req); + spin_lock_bh(&tgt->tgt_lock); + if (!rc) + goto rec_compl_done; + } + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + goto rec_compl_done; + } + + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "completed" + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "abts in prog " + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + + mp_req = &(rec_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + acc = resp_buf = mp_req->resp_buf; + + hdr_len = sizeof(*fc_hdr); + + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "rec buf: mem alloc failure\n"); + goto rec_compl_done; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + + fp = fc_frame_alloc(NULL, resp_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + goto free_buf; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, hdr_len + resp_len); + + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + BNX2FC_IO_DBG(rec_req, "opcode is RJT\n"); + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + if ((rjt->er_reason == ELS_RJT_LOGIC || + rjt->er_reason == ELS_RJT_UNAB) && + rjt->er_explan == ELS_EXPL_OXID_RXID) { + BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n"); + new_io_req = bnx2fc_cmd_alloc(tgt); + if (!new_io_req) + goto abort_io; + new_io_req->sc_cmd = orig_io_req->sc_cmd; + /* cleanup orig_io_req that is with the FW */ + set_bit(BNX2FC_FLAG_CMD_LOST, + &orig_io_req->req_flags); + bnx2fc_initiate_cleanup(orig_io_req); + /* Post a new IO req with the same sc_cmd */ + BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); + rc = bnx2fc_post_io_req(tgt, new_io_req); + if (!rc) + goto free_frame; + BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); + } +abort_io: + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(orig_io_req); + } + } else if (opcode == ELS_LS_ACC) { + /* REVISIT: Check if the exchange is already aborted */ + offset = ntohl(acc->reca_fc4value); + e_stat = ntohl(acc->reca_e_stat); + if (e_stat & ESB_ST_SEQ_INIT) { + BNX2FC_IO_DBG(rec_req, "target has the seq init\n"); + goto free_frame; + } + BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n", + e_stat, offset); + /* Seq initiative is with us */ + err_entry = (struct fcoe_err_report_entry *) + &orig_io_req->err_entry; + sc_cmd = orig_io_req->sc_cmd; + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + /* SCSI WRITE command */ + if (offset == orig_io_req->data_xfer_len) { + BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n"); + /* FCP_RSP lost */ + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + /* start transmitting from offset */ + BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n"); + send_seq_clnp = true; + r_ctl = FC_RCTL_DD_DATA_DESC; + if (bnx2fc_initiate_seq_cleanup(orig_io_req, + offset, r_ctl)) + abort_io = true; + /* XFER_RDY */ + } + } else { + /* SCSI READ command */ + if (err_entry->data.rx_buf_off == + orig_io_req->data_xfer_len) { + /* FCP_RSP lost */ + BNX2FC_IO_DBG(rec_req, "READ - resp lost\n"); + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + /* request retransmission from this offset */ + send_seq_clnp = true; + offset = err_entry->data.rx_buf_off; + BNX2FC_IO_DBG(rec_req, "RD DATA lost\n"); + /* FCP_DATA lost */ + r_ctl = FC_RCTL_DD_SOL_DATA; + if (bnx2fc_initiate_seq_cleanup(orig_io_req, + offset, r_ctl)) + abort_io = true; + } + } + if (abort_io) { + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts" + " failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(orig_io_req); + } + } else if (!send_seq_clnp) { + BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n"); + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) { + BNX2FC_IO_DBG(rec_req, "Unable to send SRR" + " IO will abort\n"); + } + } + } +free_frame: + fc_frame_free(fp); +free_buf: + kfree(buf); +rec_compl_done: + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + kfree(cb_arg); +} + +int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req) +{ + struct fc_els_rec rec; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 sid = tgt->sid; + u32 r_a_tov = lport->r_a_tov; + int rc; + + BNX2FC_IO_DBG(orig_io_req, "Sending REC\n"); + memset(&rec, 0, sizeof(rec)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n"); + rc = -ENOMEM; + goto rec_err; + } + kref_get(&orig_io_req->refcount); + + cb_arg->aborted_io_req = orig_io_req; + + rec.rec_cmd = ELS_REC; + hton24(rec.rec_s_id, sid); + rec.rec_ox_id = htons(orig_io_req->xid); + rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); + + rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec), + bnx2fc_rec_compl, cb_arg, + r_a_tov); + if (rc) { + BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + kfree(cb_arg); + } +rec_err: + return rc; +} + +int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl) +{ + struct fcp_srr srr; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 r_a_tov = lport->r_a_tov; + int rc; + + BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n"); + memset(&srr, 0, sizeof(srr)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n"); + rc = -ENOMEM; + goto srr_err; + } + kref_get(&orig_io_req->refcount); + + cb_arg->aborted_io_req = orig_io_req; + + srr.srr_op = ELS_SRR; + srr.srr_ox_id = htons(orig_io_req->xid); + srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); + srr.srr_rel_off = htonl(offset); + srr.srr_r_ctl = r_ctl; + orig_io_req->srr_offset = offset; + orig_io_req->srr_rctl = r_ctl; + + rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr), + bnx2fc_srr_compl, cb_arg, + r_a_tov); + if (rc) { + BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + kfree(cb_arg); + } else + set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags); + +srr_err: + return rc; +} + +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct fc_rport *rport = tgt->rport; + struct fc_lport *lport = port->lport; + struct bnx2fc_cmd *els_req; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int rc = 0; + int task_idx, index; + u32 did, sid; + u16 xid; + + rc = fc_remote_port_chkready(rport); + if (rc) { + printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op); + rc = -EINVAL; + goto els_err; + } + els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS); + if (!els_req) { + rc = -ENOMEM; + goto els_err; + } + + els_req->sc_cmd = NULL; + els_req->port = port; + els_req->tgt = tgt; + els_req->cb_func = cb_func; + cb_arg->io_req = els_req; + els_req->cb_arg = cb_arg; + els_req->data_xfer_len = data_len; + + mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); + rc = bnx2fc_init_mp_req(els_req); + if (rc == FAILED) { + printk(KERN_ERR PFX "ELS MP request init failed\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -ENOMEM; + goto els_err; + } else { + /* rc SUCCESS */ + rc = 0; + } + + /* Set the data_xfer_len to the size of ELS payload */ + mp_req->req_len = data_len; + els_req->data_xfer_len = mp_req->req_len; + + /* Fill ELS Payload */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { + memcpy(mp_req->req_buf, data, data_len); + } else { + printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -EINVAL; + } + + if (rc) + goto els_err; + + /* Fill FC header */ + fc_hdr = &(mp_req->req_fc_hdr); + + did = tgt->rport->port_id; + sid = tgt->sid; + + if (op == ELS_SRR) + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + else + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + /* Obtain exchange id */ + xid = els_req->xid; + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(els_req, task); + + spin_lock_bh(&tgt->tgt_lock); + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_els.. session not ready\n"); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EINVAL; + } + + if (timer_msec) + bnx2fc_cmd_timer_set(els_req, timer_msec); + bnx2fc_add_2_sq(tgt, xid); + + els_req->on_active_queue = 1; + list_add_tail(&els_req->link, &tgt->els_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + +els_err: + return rc; +} + +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, u8 num_rq) +{ + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + u64 *hdr; + u64 *temp_hdr; + + BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x" + "cmd_type = %d\n", els_req->xid, els_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &els_req->req_flags)) { + BNX2FC_ELS_DBG("Timer context finished processing this " + "els - 0x%x\n", els_req->xid); + /* This IO doesn't receive cleanup completion */ + kref_put(&els_req->refcount, bnx2fc_cmd_release); + return; + } + + /* Cancel the timeout_work, as we received the response */ + if (cancel_delayed_work(&els_req->timeout_work)) + kref_put(&els_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + } + + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + mp_req->resp_len = + task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; + + /* Parse ELS response */ + if ((els_req->cb_func) && (els_req->cb_arg)) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + kref_put(&els_req->refcount, bnx2fc_cmd_release); +} + +#define BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC 1 +#define BNX2FC_FCOE_MAC_METHOD_FCF_MAP 2 +#define BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC 3 +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + + struct fc_frame_header *fh; + u8 *granted_mac; + u8 fcoe_mac[6]; + u8 fc_map[3]; + int method; + + if (IS_ERR(fp)) + goto done; + + fh = fc_frame_header_get(fp); + granted_mac = fr_cb(fp)->granted_mac; + + /* + * We set the source MAC for FCoE traffic based on the Granted MAC + * address from the switch. + * + * If granted_mac is non-zero, we use that. + * If the granted_mac is zeroed out, create the FCoE MAC based on + * the sel_fcf->fc_map and the d_id fo the FLOGI frame. + * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the + * d_id of the FLOGI frame. + */ + if (!is_zero_ether_addr(granted_mac)) { + ether_addr_copy(fcoe_mac, granted_mac); + method = BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC; + } else if (fip->sel_fcf && fip->sel_fcf->fc_map != 0) { + hton24(fc_map, fip->sel_fcf->fc_map); + fcoe_mac[0] = fc_map[0]; + fcoe_mac[1] = fc_map[1]; + fcoe_mac[2] = fc_map[2]; + fcoe_mac[3] = fh->fh_d_id[0]; + fcoe_mac[4] = fh->fh_d_id[1]; + fcoe_mac[5] = fh->fh_d_id[2]; + method = BNX2FC_FCOE_MAC_METHOD_FCF_MAP; + } else { + fc_fcoe_set_mac(fcoe_mac, fh->fh_d_id); + method = BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC; + } + + BNX2FC_HBA_DBG(lport, "fcoe_mac=%pM method=%d\n", fcoe_mac, method); + fip->update_mac(lport, fcoe_mac); +done: + fc_lport_flogi_resp(seq, fp, lport); +} + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + static u8 zero_mac[ETH_ALEN] = { 0 }; + + if (!IS_ERR(fp)) + fip->update_mac(lport, zero_mac); + fc_lport_logo_resp(seq, fp, lport); +} + +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface); + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (op) { + case ELS_FLOGI: + case ELS_FDISC: + return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp, + fip, timeout); + case ELS_LOGO: + /* only hook onto fabric logouts, not port logouts */ + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + break; + return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp, + fip, timeout); + } + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c new file mode 100644 index 000000000..451a58e0f --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -0,0 +1,2989 @@ +/* bnx2fc_fcoe.c: QLogic Linux FCoE offload driver. + * This file contains the code that interacts with libfc, libfcoe, + * cnic modules to create FCoE instances, send/receive non-offloaded + * FIP/FCoE packets, listen to link events etc. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +#include + +static struct list_head adapter_list; +static struct list_head if_list; +static u32 adapter_count; +static DEFINE_MUTEX(bnx2fc_dev_lock); +DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +#define DRV_MODULE_NAME "bnx2fc" +#define DRV_MODULE_VERSION BNX2FC_VERSION +#define DRV_MODULE_RELDATE "October 15, 2015" + + +static char version[] = + "QLogic FCoE Driver " DRV_MODULE_NAME \ + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + + +MODULE_AUTHOR("Bhanu Prakash Gollapudi "); +MODULE_DESCRIPTION("QLogic FCoE Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define BNX2FC_MAX_QUEUE_DEPTH 256 +#define BNX2FC_MIN_QUEUE_DEPTH 32 +#define FCOE_WORD_TO_BYTE 4 + +static struct scsi_transport_template *bnx2fc_transport_template; +static struct scsi_transport_template *bnx2fc_vport_xport_template; + +struct workqueue_struct *bnx2fc_wq; + +/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure. + * Here the io threads are per cpu but the l2 thread is just one + */ +struct fcoe_percpu_s bnx2fc_global; +static DEFINE_SPINLOCK(bnx2fc_global_lock); + +static struct cnic_ulp_ops bnx2fc_cnic_cb; +static struct libfc_function_template bnx2fc_libfc_fcn_templ; +static struct scsi_host_template bnx2fc_shost_template; +static struct fc_function_template bnx2fc_transport_function; +static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ; +static struct fc_function_template bnx2fc_vport_xport_function; +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode); +static void __bnx2fc_destroy(struct bnx2fc_interface *interface); +static int bnx2fc_destroy(struct net_device *net_device); +static int bnx2fc_enable(struct net_device *netdev); +static int bnx2fc_disable(struct net_device *netdev); + +/* fcoe_syfs control interface handlers */ +static int bnx2fc_ctlr_alloc(struct net_device *netdev); +static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev); + +static void bnx2fc_recv_frame(struct sk_buff *skb); + +static void bnx2fc_start_disc(struct bnx2fc_interface *interface); +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); +static int bnx2fc_lport_config(struct fc_lport *lport); +static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba); +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, + struct device *parent, int npiv); +static void bnx2fc_port_destroy(struct fcoe_port *port); + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); +static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device + *phys_dev); +static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface); +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba); +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); + +static void bnx2fc_port_shutdown(struct fc_lport *lport); +static void bnx2fc_stop(struct bnx2fc_interface *interface); +static int __init bnx2fc_mod_init(void); +static void __exit bnx2fc_mod_exit(void); + +unsigned int bnx2fc_debug_level; +module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, + "Option to enable extended logging,\n" + "\t\tDefault is 0 - no logging.\n" + "\t\t0x01 - SCSI cmd error, cleanup.\n" + "\t\t0x02 - Session setup, cleanup, etc.\n" + "\t\t0x04 - lport events, link, mtu, etc.\n" + "\t\t0x08 - ELS logs.\n" + "\t\t0x10 - fcoe L2 fame related logs.\n" + "\t\t0xff - LOG all messages."); + +static uint bnx2fc_devloss_tmo; +module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO); +MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports " + "attached via bnx2fc."); + +static uint bnx2fc_max_luns = BNX2FC_MAX_LUN; +module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO); +MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default " + "0xffff."); + +static uint bnx2fc_queue_depth; +module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO); +MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices " + "attached via bnx2fc."); + +static uint bnx2fc_log_fka; +module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " + "initiating a FIP keep alive when debug logging is enabled."); + +static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) +{ + return ((struct bnx2fc_interface *) + ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; +} + +static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) +{ + struct fcoe_ctlr_device *ctlr_dev = + fcoe_fcf_dev_to_ctlr_dev(fcf_dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr); + + fcf_dev->vlan_id = fcoe->vlan_id; +} + +static void bnx2fc_clean_rx_queue(struct fc_lport *lp) +{ + struct fcoe_percpu_s *bg; + struct fcoe_rcv_info *fr; + struct sk_buff_head *list; + struct sk_buff *skb, *next; + + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + list = &bg->fcoe_rx_list; + skb_queue_walk_safe(list, skb, next) { + fr = fcoe_dev_from_skb(skb); + if (fr->fr_dev == lp) { + __skb_unlink(skb, list); + kfree_skb(skb); + } + } + spin_unlock_bh(&bg->fcoe_rx_list.lock); +} + +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + int rc; + spin_lock(&bnx2fc_global_lock); + rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global); + spin_unlock(&bnx2fc_global_lock); + + return rc; +} + +static void bnx2fc_abort_io(struct fc_lport *lport) +{ + /* + * This function is no-op for bnx2fc, but we do + * not want to leave it as NULL either, as libfc + * can call the default function which is + * fc_fcp_abort_io. + */ +} + +static void bnx2fc_cleanup(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct bnx2fc_rport *tgt; + int i; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&hba->hba_mutex); + spin_lock_bh(&hba->hba_lock); + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if (tgt) { + /* Cleanup IOs belonging to requested vport */ + if (tgt->port == port) { + spin_unlock_bh(&hba->hba_lock); + BNX2FC_TGT_DBG(tgt, "flush/cleanup\n"); + bnx2fc_flush_active_ios(tgt); + spin_lock_bh(&hba->hba_lock); + } + } + } + spin_unlock_bh(&hba->hba_lock); + mutex_unlock(&hba->hba_mutex); +} + +static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt, + struct fc_frame *fp) +{ + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_frame_header *fh; + int rc = 0; + + fh = fc_frame_header_get(fp); + BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, " + "r_ctl = 0x%x\n", rdata->ids.port_id, + ntohs(fh->fh_ox_id), fh->fh_r_ctl); + if ((fh->fh_type == FC_TYPE_ELS) && + (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + + switch (fc_frame_payload_op(fp)) { + case ELS_ADISC: + rc = bnx2fc_send_adisc(tgt, fp); + break; + case ELS_LOGO: + rc = bnx2fc_send_logo(tgt, fp); + break; + case ELS_RLS: + rc = bnx2fc_send_rls(tgt, fp); + break; + default: + break; + } + } else if ((fh->fh_type == FC_TYPE_BLS) && + (fh->fh_r_ctl == FC_RCTL_BA_ABTS)) + BNX2FC_TGT_DBG(tgt, "ABTS frame\n"); + else { + BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x " + "rctl 0x%x thru non-offload path\n", + fh->fh_type, fh->fh_r_ctl); + return -ENODEV; + } + if (rc) + return -ENOMEM; + else + return 0; +} + +/** + * bnx2fc_xmit - bnx2fc's FCoE frame transmit function + * + * @lport: the associated local port + * @fp: the fc_frame to be transmitted + */ +static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct fcoe_hdr *hp; + struct bnx2fc_rport *tgt; + u8 sof, eof; + u32 crc; + unsigned int hlen, tlen, elen; + int wlen, rc = 0; + + port = (struct fcoe_port *)lport_priv(lport); + interface = port->priv; + ctlr = bnx2fc_to_ctlr(interface); + hba = interface->hba; + + fh = fc_frame_header_get(fp); + + skb = fp_skb(fp); + if (!lport->link_up) { + BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n"); + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (!ctlr->sel_fcf) { + BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); + kfree_skb(skb); + return -EINVAL; + } + if (fcoe_ctlr_els_send(ctlr, lport, skb)) + return 0; + } + + sof = fr_sof(fp); + eof = fr_eof(fp); + + /* + * Snoop the frame header to check if the frame is for + * an offloaded session + */ + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id)); + if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + /* This frame is for offloaded session */ + BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session " + "port_id = 0x%x\n", ntoh24(fh->fh_d_id)); + spin_unlock_bh(&hba->hba_lock); + rc = bnx2fc_xmit_l2_frame(tgt, fp); + if (rc != -ENODEV) { + kfree_skb(skb); + return rc; + } + } else { + spin_unlock_bh(&hba->hba_lock); + } + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + if (bnx2fc_get_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); + } else { + cp = skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp); + cp = NULL; + } + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + skb->dev = interface->netdev; + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + if (ctlr->map_dest) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); + + if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); + else + memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ + if (lport->seq_offload && fr_max_payload(fp)) { + skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; + skb_shinfo(skb)->gso_size = fr_max_payload(fp); + } else { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + } + + /*update tx stats */ + this_cpu_inc(lport->stats->TxFrames); + this_cpu_add(lport->stats->TxWords, wlen); + + /* send down to lld */ + fr_dev(fp) = lport; + if (port->fcoe_pending_queue.qlen) + fcoe_check_wait_queue(lport, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(lport, skb); + + return 0; +} + +/** + * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ + * + * @skb: the receive socket buffer + * @dev: associated net device + * @ptype: context + * @olddev: last device + * + * This function receives the packet and builds FC frame and passes it up + */ +static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *olddev) +{ + struct fc_lport *lport; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct fcoe_rcv_info *fr; + struct fcoe_percpu_s *bg; + + interface = container_of(ptype, struct bnx2fc_interface, + fcoe_packet_type); + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; + + if (unlikely(lport == NULL)) { + printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); + goto err; + } + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return -1; + + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { + printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); + goto err; + } + + /* + * Check for minimum frame length, and make sure required FCoE + * and FC headers are pulled into the linear data area. + */ + if (unlikely((skb->len < FCOE_MIN_FRAME) || + !pskb_may_pull(skb, FCOE_HEADER_LEN))) + goto err; + + skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); + + fr = fcoe_dev_from_skb(skb); + fr->fr_dev = lport; + + bg = &bnx2fc_global; + spin_lock(&bg->fcoe_rx_list.lock); + + __skb_queue_tail(&bg->fcoe_rx_list, skb); + if (bg->fcoe_rx_list.qlen == 1) + wake_up_process(bg->kthread); + + spin_unlock(&bg->fcoe_rx_list.lock); + + return 0; +err: + kfree_skb(skb); + return -1; +} + +static int bnx2fc_l2_rcv_thread(void *arg) +{ + struct fcoe_percpu_s *bg = arg; + struct sk_buff *skb; + + set_user_nice(current, MIN_NICE); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + spin_lock_bh(&bg->fcoe_rx_list.lock); + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { + spin_unlock_bh(&bg->fcoe_rx_list.lock); + bnx2fc_recv_frame(skb); + spin_lock_bh(&bg->fcoe_rx_list.lock); + } + __set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&bg->fcoe_rx_list.lock); + } + __set_current_state(TASK_RUNNING); + return 0; +} + + +static void bnx2fc_recv_frame(struct sk_buff *skb) +{ + u64 crc_err; + u32 fr_len, fr_crc; + struct fc_lport *lport; + struct fcoe_rcv_info *fr; + struct fc_frame_header *fh; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + struct fc_lport *vn_port; + struct fcoe_port *port, *phys_port; + u8 *mac = NULL; + u8 *dest_mac = NULL; + struct fcoe_hdr *hp; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + + fr = fcoe_dev_from_skb(skb); + lport = fr->fr_dev; + if (unlikely(lport == NULL)) { + printk(KERN_ERR PFX "Invalid lport struct\n"); + kfree_skb(skb); + return; + } + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); + mac = eth_hdr(skb)->h_source; + dest_mac = eth_hdr(skb)->h_dest; + + /* Pull the header */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + fh = (struct fc_frame_header *) skb_transport_header(skb); + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + this_cpu_inc(lport->stats->RxFrames); + this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE); + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + kfree_skb(skb); + return; + } + + phys_port = lport_priv(lport); + interface = phys_port->priv; + ctlr = bnx2fc_to_ctlr(interface); + + fh = fc_frame_header_get(fp); + + if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { + BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n", + dest_mac); + kfree_skb(skb); + return; + } + + vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + if (vn_port) { + port = lport_priv(vn_port); + if (!ether_addr_equal(port->data_src_addr, dest_mac)) { + BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); + kfree_skb(skb); + return; + } + } + if (ctlr->state) { + if (!ether_addr_equal(mac, ctlr->dest_addr)) { + BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n", + mac, ctlr->dest_addr); + kfree_skb(skb); + return; + } + } + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + /* Drop FCP data. We dont this in L2 path */ + kfree_skb(skb); + return; + } + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fh->fh_type == FC_TYPE_ELS) { + switch (fc_frame_payload_op(fp)) { + case ELS_LOGO: + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + /* drop non-FIP LOGO */ + kfree_skb(skb); + return; + } + break; + } + } + + if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { + /* Drop incoming ABTS */ + kfree_skb(skb); + return; + } + + /* + * If the destination ID from the frame header does not match what we + * have on record for lport and the search for a NPIV port came up + * empty then this is not addressed to our port so simply drop it. + */ + if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { + BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", + lport->port_id, ntoh24(fh->fh_d_id)); + kfree_skb(skb); + return; + } + + fr_crc = le32_to_cpu(fr_crc(fp)); + + if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { + crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount); + if (crc_err < 5) + printk(KERN_WARNING PFX "dropping frame with " + "CRC error\n"); + kfree_skb(skb); + return; + } + fc_exch_recv(lport, fp); +} + +/** + * bnx2fc_percpu_io_thread - thread per cpu for ios + * + * @arg: ptr to bnx2fc_percpu_info structure + */ +static int bnx2fc_percpu_io_thread(void *arg) +{ + struct bnx2fc_percpu_s *p = arg; + struct bnx2fc_work *work, *tmp; + LIST_HEAD(work_list); + + set_user_nice(current, MIN_NICE); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + spin_lock_bh(&p->fp_work_lock); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_bh(&p->fp_work_lock); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe, + work->rq_data, + work->num_rq, + work->task); + kfree(work); + } + + spin_lock_bh(&p->fp_work_lock); + } + __set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&p->fp_work_lock); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + +static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) +{ + struct fc_host_statistics *bnx2fc_stats; + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct fcoe_statistics_params *fw_stats; + int rc = 0; + + fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer; + if (!fw_stats) + return NULL; + + mutex_lock(&hba->hba_stats_mutex); + + bnx2fc_stats = fc_get_host_stats(shost); + + init_completion(&hba->stat_req_done); + if (bnx2fc_send_stat_req(hba)) + goto unlock_stats_mutex; + rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); + if (!rc) { + BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); + goto unlock_stats_mutex; + } + BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); + bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; + BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt); + bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt; + BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt); + bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4); + BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt); + bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt; + BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt); + bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4); + + bnx2fc_stats->dumped_frames = 0; + bnx2fc_stats->lip_count = 0; + bnx2fc_stats->nos_count = 0; + bnx2fc_stats->loss_of_sync_count = 0; + bnx2fc_stats->loss_of_signal_count = 0; + bnx2fc_stats->prim_seq_protocol_err_count = 0; + + memcpy(&hba->prev_stats, hba->stats_buffer, + sizeof(struct fcoe_statistics_params)); + +unlock_stats_mutex: + mutex_unlock(&hba->hba_stats_mutex); + return bnx2fc_stats; +} + +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct Scsi_Host *shost = lport->host; + int rc = 0; + + shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; + shost->max_lun = bnx2fc_max_luns; + shost->max_id = BNX2FC_MAX_FCP_TGT; + shost->max_channel = 0; + if (lport->vport) + shost->transportt = bnx2fc_vport_xport_template; + else + shost->transportt = bnx2fc_transport_template; + + /* Add the new host to SCSI-ml */ + rc = scsi_add_host(lport->host, dev); + if (rc) { + printk(KERN_ERR PFX "Error on scsi_add_host\n"); + return rc; + } + if (!lport->vport) + fc_host_max_npiv_vports(lport->host) = USHRT_MAX; + snprintf(fc_host_symbolic_name(lport->host), 256, + "%s (QLogic %s) v%s over %s", + BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, + interface->netdev->name); + + return 0; +} + +static int bnx2fc_link_ok(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct net_device *dev = hba->phys_dev; + int rc = 0; + + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else { + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + rc = -1; + } + return rc; +} + +/** + * bnx2fc_get_link_state - get network link state + * + * @hba: adapter instance pointer + * + * updates adapter structure flag based on netdev state + */ +void bnx2fc_get_link_state(struct bnx2fc_hba *hba) +{ + if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state)) + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); +} + +static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) +{ + struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct fcoe_port *port; + u64 wwnn, wwpn; + + port = lport_priv(lport); + interface = port->priv; + ctlr = bnx2fc_to_ctlr(interface); + hba = interface->hba; + + /* require support for get_pauseparam ethtool op. */ + if (!hba->phys_dev->ethtool_ops || + !hba->phys_dev->ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (fc_set_mfs(lport, BNX2FC_MFS)) + return -EINVAL; + + skb_queue_head_init(&port->fcoe_pending_queue); + port->fcoe_pending_queue_active = 0; + timer_setup(&port->timer, fcoe_queue_timer, 0); + + fcoe_link_speed_update(lport); + + if (!lport->vport) { + if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) + wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, + 1, 0); + BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); + fc_set_wwnn(lport, wwnn); + + if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) + wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, + 2, 0); + + BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); + fc_set_wwpn(lport, wwpn); + } + + return 0; +} + +static void bnx2fc_destroy_timer(struct timer_list *t) +{ + struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer); + + printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " + "Destroy compl not received!!\n"); + set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); + wake_up_interruptible(&hba->destroy_wait); +} + +/** + * bnx2fc_indicate_netevent - Generic netdev event handler + * + * @context: adapter structure pointer + * @event: event type + * @vlan_id: vlan id - associated vlan id with this event + * + * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and + * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans. + */ +static void bnx2fc_indicate_netevent(void *context, unsigned long event, + u16 vlan_id) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + struct fcoe_ctlr_device *cdev; + struct fc_lport *lport; + struct fc_lport *vport; + struct bnx2fc_interface *interface, *tmp; + struct fcoe_ctlr *ctlr; + int wait_for_upload = 0; + u32 link_possible = 1; + + if (vlan_id != 0 && event != NETDEV_UNREGISTER) + return; + + switch (event) { + case NETDEV_UP: + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + printk(KERN_ERR "indicate_netevent: "\ + "hba is not UP!!\n"); + break; + + case NETDEV_DOWN: + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_GOING_DOWN: + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_CHANGE: + break; + + case NETDEV_UNREGISTER: + if (!vlan_id) + return; + mutex_lock(&bnx2fc_dev_lock); + list_for_each_entry_safe(interface, tmp, &if_list, list) { + if (interface->hba == hba && + interface->vlan_id == (vlan_id & VLAN_VID_MASK)) + __bnx2fc_destroy(interface); + } + mutex_unlock(&bnx2fc_dev_lock); + return; + + default: + return; + } + + mutex_lock(&bnx2fc_dev_lock); + list_for_each_entry(interface, &if_list, list) { + + if (interface->hba != hba) + continue; + + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; + BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", + interface->netdev->name, event); + + fcoe_link_speed_update(lport); + + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + + if (link_possible && !bnx2fc_link_ok(lport)) { + switch (cdev->enabled) { + case FCOE_CTLR_DISABLED: + pr_info("Link up while interface is disabled.\n"); + break; + case FCOE_CTLR_ENABLED: + case FCOE_CTLR_UNUSED: + /* Reset max recv frame size to default */ + fc_set_mfs(lport, BNX2FC_MFS); + /* + * ctlr link up will only be handled during + * enable to avoid sending discovery + * solicitation on a stale vlan + */ + if (interface->enabled) + fcoe_ctlr_link_up(ctlr); + } + } else if (fcoe_ctlr_link_down(ctlr)) { + switch (cdev->enabled) { + case FCOE_CTLR_DISABLED: + pr_info("Link down while interface is disabled.\n"); + break; + case FCOE_CTLR_ENABLED: + case FCOE_CTLR_UNUSED: + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = + FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = + FC_PORTTYPE_UNKNOWN; + this_cpu_inc(lport->stats->LinkFailureCount); + fcoe_clean_pending_queue(lport); + wait_for_upload = 1; + } + } + } + mutex_unlock(&bnx2fc_dev_lock); + + if (wait_for_upload) { + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + init_waitqueue_head(&hba->shutdown_wait); + BNX2FC_MISC_DBG("indicate_netevent " + "num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 1; + wait_event_interruptible(hba->shutdown_wait, + (hba->num_ofld_sess == 0)); + BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 0; + + if (signal_pending(current)) + flush_signals(current); + } +} + +static int bnx2fc_libfc_config(struct fc_lport *lport) +{ + + /* Set the function pointers set by bnx2fc driver */ + memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ, + sizeof(struct libfc_function_template)); + fc_elsct_init(lport); + fc_exch_init(lport); + fc_disc_init(lport); + fc_disc_config(lport, lport); + return 0; +} + +static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba) +{ + int fcoe_min_xid, fcoe_max_xid; + + fcoe_min_xid = hba->max_xid + 1; + if (nr_cpu_ids <= 2) + fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET; + else + fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET; + if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid, + fcoe_max_xid, NULL)) { + printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int bnx2fc_lport_config(struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = BNX2FC_MAX_RETRY_CNT; + lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->does_npiv = 1; + + memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); + lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA; + + /* alloc stats structure */ + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish fc_lport configuration */ + fc_lport_config(lport); + + return 0; +} + +/** + * bnx2fc_fip_recv - handle a received FIP frame. + * + * @skb: the received skb + * @dev: associated &net_device + * @ptype: the &packet_type structure which was used to register this handler. + * @orig_dev: original receive &net_device, in case @ dev is a bond. + * + * Returns: 0 for success + */ +static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + interface = container_of(ptype, struct bnx2fc_interface, + fip_packet_type); + ctlr = bnx2fc_to_ctlr(interface); + fcoe_ctlr_recv(ctlr, skb); + return 0; +} + +/** + * bnx2fc_update_src_mac - Update Ethernet MAC filters. + * + * @lport: The local port + * @addr: Location of data to copy + * + * Remove any previously-set unicast MAC filter. + * Add secondary FCoE MAC address filter for our OUI. + */ +static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr) +{ + struct fcoe_port *port = lport_priv(lport); + + memcpy(port->data_src_addr, addr, ETH_ALEN); +} + +/** + * bnx2fc_get_src_mac - return the ethernet source address for an lport + * + * @lport: libfc port + */ +static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) +{ + struct fcoe_port *port; + + port = (struct fcoe_port *)lport_priv(lport); + return port->data_src_addr; +} + +/** + * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame. + * + * @fip: FCoE controller. + * @skb: FIP Packet. + */ +static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + struct ethhdr *eth_hdr; + u16 op; + u8 sub; + + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka) + BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n", + eth_hdr->h_source, eth_hdr->h_dest); + + skb->dev = bnx2fc_from_ctlr(fip)->netdev; + dev_queue_xmit(skb); +} + +static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fcoe_port *port = lport_priv(n_port); + struct bnx2fc_interface *interface = port->priv; + struct net_device *netdev = interface->netdev; + struct fc_lport *vn_port; + int rc; + char buf[32]; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + printk(KERN_ERR PFX "Failed to create vport, " + "WWPN (0x%s) already exists\n", + buf); + return rc; + } + + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { + printk(KERN_ERR PFX "vn ports cannot be created on" + "this interface\n"); + return -EIO; + } + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + vn_port = bnx2fc_if_create(interface, &vport->dev, 1); + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + + if (!vn_port) { + printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", + netdev->name); + return -EIO; + } + + if (bnx2fc_devloss_tmo) + fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo; + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_lport_init(vn_port); + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + return 0; +} + +static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport) +{ + struct bnx2fc_lport *blport, *tmp; + + spin_lock_bh(&hba->hba_lock); + list_for_each_entry_safe(blport, tmp, &hba->vports, list) { + if (blport->lport == lport) { + list_del(&blport->list); + kfree(blport); + } + } + spin_unlock_bh(&hba->hba_lock); +} + +static int bnx2fc_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + struct fcoe_port *port = lport_priv(vn_port); + struct bnx2fc_interface *interface = port->priv; + struct fc_lport *v_port; + bool found = false; + + mutex_lock(&n_port->lp_mutex); + list_for_each_entry(v_port, &n_port->vports, list) + if (v_port->vport == vport) { + found = true; + break; + } + + if (!found) { + mutex_unlock(&n_port->lp_mutex); + return -ENOENT; + } + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + bnx2fc_free_vport(interface->hba, port->lport); + bnx2fc_port_shutdown(port->lport); + bnx2fc_port_destroy(port); + bnx2fc_interface_put(interface); + return 0; +} + +static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + return 0; +} + + +static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) +{ + struct net_device *netdev = interface->netdev; + struct net_device *physdev = interface->hba->phys_dev; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct netdev_hw_addr *ha; + int sel_san_mac = 0; + + /* setup Source MAC Address */ + rcu_read_lock(); + for_each_dev_addr(physdev, ha) { + BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ", + ha->type); + printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0], + ha->addr[1], ha->addr[2], ha->addr[3], + ha->addr[4], ha->addr[5]); + + if ((ha->type == NETDEV_HW_ADDR_T_SAN) && + (is_valid_ether_addr(ha->addr))) { + memcpy(ctlr->ctl_src_addr, ha->addr, + ETH_ALEN); + sel_san_mac = 1; + BNX2FC_MISC_DBG("Found SAN MAC\n"); + } + } + rcu_read_unlock(); + + if (!sel_san_mac) + return -ENODEV; + + interface->fip_packet_type.func = bnx2fc_fip_recv; + interface->fip_packet_type.type = htons(ETH_P_FIP); + interface->fip_packet_type.dev = netdev; + dev_add_pack(&interface->fip_packet_type); + + interface->fcoe_packet_type.func = bnx2fc_rcv; + interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + interface->fcoe_packet_type.dev = netdev; + dev_add_pack(&interface->fcoe_packet_type); + + return 0; +} + +static int bnx2fc_attach_transport(void) +{ + bnx2fc_transport_template = + fc_attach_transport(&bnx2fc_transport_function); + + if (bnx2fc_transport_template == NULL) { + printk(KERN_ERR PFX "Failed to attach FC transport\n"); + return -ENODEV; + } + + bnx2fc_vport_xport_template = + fc_attach_transport(&bnx2fc_vport_xport_function); + if (bnx2fc_vport_xport_template == NULL) { + printk(KERN_ERR PFX + "Failed to attach FC transport for vport\n"); + fc_release_transport(bnx2fc_transport_template); + bnx2fc_transport_template = NULL; + return -ENODEV; + } + return 0; +} +static void bnx2fc_release_transport(void) +{ + fc_release_transport(bnx2fc_transport_template); + fc_release_transport(bnx2fc_vport_xport_template); + bnx2fc_transport_template = NULL; + bnx2fc_vport_xport_template = NULL; +} + +static void bnx2fc_interface_release(struct kref *kref) +{ + struct fcoe_ctlr_device *ctlr_dev; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct net_device *netdev; + + interface = container_of(kref, struct bnx2fc_interface, kref); + BNX2FC_MISC_DBG("Interface is being released\n"); + + ctlr = bnx2fc_to_ctlr(interface); + ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); + netdev = interface->netdev; + + /* tear-down FIP controller */ + if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) + fcoe_ctlr_destroy(ctlr); + + fcoe_ctlr_device_delete(ctlr_dev); + + dev_put(netdev); + module_put(THIS_MODULE); +} + +static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface) +{ + kref_get(&interface->kref); +} + +static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface) +{ + kref_put(&interface->kref, bnx2fc_interface_release); +} +static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba) +{ + /* Free the command manager */ + if (hba->cmd_mgr) { + bnx2fc_cmd_mgr_free(hba->cmd_mgr); + hba->cmd_mgr = NULL; + } + kfree(hba->tgt_ofld_list); + bnx2fc_unbind_pcidev(hba); + kfree(hba); +} + +/** + * bnx2fc_hba_create - create a new bnx2fc hba + * + * @cnic: pointer to cnic device + * + * Creates a new FCoE hba on the given device. + * + */ +static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) +{ + struct bnx2fc_hba *hba; + struct fcoe_capabilities *fcoe_cap; + int rc; + + hba = kzalloc(sizeof(*hba), GFP_KERNEL); + if (!hba) { + printk(KERN_ERR PFX "Unable to allocate hba structure\n"); + return NULL; + } + spin_lock_init(&hba->hba_lock); + mutex_init(&hba->hba_mutex); + mutex_init(&hba->hba_stats_mutex); + + hba->cnic = cnic; + + hba->max_tasks = cnic->max_fcoe_exchanges; + hba->elstm_xids = (hba->max_tasks / 2); + hba->max_outstanding_cmds = hba->elstm_xids; + hba->max_xid = (hba->max_tasks - 1); + + rc = bnx2fc_bind_pcidev(hba); + if (rc) { + printk(KERN_ERR PFX "create_adapter: bind error\n"); + goto bind_err; + } + hba->phys_dev = cnic->netdev; + hba->next_conn_id = 0; + + hba->tgt_ofld_list = + kcalloc(BNX2FC_NUM_MAX_SESS, sizeof(struct bnx2fc_rport *), + GFP_KERNEL); + if (!hba->tgt_ofld_list) { + printk(KERN_ERR PFX "Unable to allocate tgt offload list\n"); + goto tgtofld_err; + } + + hba->num_ofld_sess = 0; + + hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba); + if (!hba->cmd_mgr) { + printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); + goto cmgr_err; + } + fcoe_cap = &hba->fcoe_cap; + + fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES << + FCOE_IOS_PER_CONNECTION_SHIFT; + fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << + FCOE_LOGINS_PER_PORT_SHIFT; + fcoe_cap->capability2 = hba->max_outstanding_cmds << + FCOE_NUMBER_OF_EXCHANGES_SHIFT; + fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << + FCOE_NPIV_WWN_PER_PORT_SHIFT; + fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << + FCOE_TARGETS_SUPPORTED_SHIFT; + fcoe_cap->capability3 |= hba->max_outstanding_cmds << + FCOE_OUTSTANDING_COMMANDS_SHIFT; + fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; + + init_waitqueue_head(&hba->shutdown_wait); + init_waitqueue_head(&hba->destroy_wait); + INIT_LIST_HEAD(&hba->vports); + + return hba; + +cmgr_err: + kfree(hba->tgt_ofld_list); +tgtofld_err: + bnx2fc_unbind_pcidev(hba); +bind_err: + kfree(hba); + return NULL; +} + +static struct bnx2fc_interface * +bnx2fc_interface_create(struct bnx2fc_hba *hba, + struct net_device *netdev, + enum fip_mode fip_mode) +{ + struct fcoe_ctlr_device *ctlr_dev; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + int size; + int rc = 0; + + size = (sizeof(*interface) + sizeof(struct fcoe_ctlr)); + ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ, + size); + if (!ctlr_dev) { + printk(KERN_ERR PFX "Unable to allocate interface structure\n"); + return NULL; + } + ctlr = fcoe_ctlr_device_priv(ctlr_dev); + ctlr->cdev = ctlr_dev; + interface = fcoe_ctlr_priv(ctlr); + dev_hold(netdev); + kref_init(&interface->kref); + interface->hba = hba; + interface->netdev = netdev; + + /* Initialize FIP */ + fcoe_ctlr_init(ctlr, fip_mode); + ctlr->send = bnx2fc_fip_send; + ctlr->update_mac = bnx2fc_update_src_mac; + ctlr->get_src_addr = bnx2fc_get_src_mac; + set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); + + rc = bnx2fc_interface_setup(interface); + if (!rc) + return interface; + + fcoe_ctlr_destroy(ctlr); + dev_put(netdev); + fcoe_ctlr_device_delete(ctlr_dev); + return NULL; +} + +/** + * bnx2fc_if_create - Create FCoE instance on a given interface + * + * @interface: FCoE interface to create a local port on + * @parent: Device pointer to be the parent in sysfs for the SCSI host + * @npiv: Indicates if the port is vport or not + * + * Creates a fc_lport instance and a Scsi_Host instance and configure them. + * + * Returns: Allocated fc_lport or an error pointer + */ +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, + struct device *parent, int npiv) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport, *n_port; + struct fcoe_port *port; + struct Scsi_Host *shost; + struct fc_vport *vport = dev_to_vport(parent); + struct bnx2fc_lport *blport; + struct bnx2fc_hba *hba = interface->hba; + int rc = 0; + + blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); + if (!blport) { + BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n"); + return NULL; + } + + /* Allocate Scsi_Host structure */ + bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds; + if (!npiv) + lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); + else + lport = libfc_vport_create(vport, sizeof(*port)); + + if (!lport) { + printk(KERN_ERR PFX "could not allocate scsi host structure\n"); + goto free_blport; + } + shost = lport->host; + port = lport_priv(lport); + port->lport = lport; + port->priv = interface; + port->get_netdev = bnx2fc_netdev; + + /* Configure fcoe_port */ + rc = bnx2fc_lport_config(lport); + if (rc) + goto lp_config_err; + + if (npiv) { + printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", + vport->node_name, vport->port_name); + fc_set_wwnn(lport, vport->node_name); + fc_set_wwpn(lport, vport->port_name); + } + /* Configure netdev and networking properties of the lport */ + rc = bnx2fc_net_config(lport, interface->netdev); + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); + goto lp_config_err; + } + + rc = bnx2fc_shost_config(lport, parent); + if (rc) { + printk(KERN_ERR PFX "Couldn't configure shost for %s\n", + interface->netdev->name); + goto lp_config_err; + } + + /* Initialize the libfc library */ + rc = bnx2fc_libfc_config(lport); + if (rc) { + printk(KERN_ERR PFX "Couldn't configure libfc\n"); + goto shost_err; + } + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + + if (bnx2fc_devloss_tmo) + fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo; + + /* Allocate exchange manager */ + if (!npiv) + rc = bnx2fc_em_config(lport, hba); + else { + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + rc = fc_exch_mgr_list_clone(n_port, lport); + } + + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); + goto shost_err; + } + + bnx2fc_interface_get(interface); + + spin_lock_bh(&hba->hba_lock); + blport->lport = lport; + list_add_tail(&blport->list, &hba->vports); + spin_unlock_bh(&hba->hba_lock); + + return lport; + +shost_err: + scsi_remove_host(shost); +lp_config_err: + scsi_host_put(lport->host); +free_blport: + kfree(blport); + return NULL; +} + +static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface) +{ + /* Dont listen for Ethernet packets anymore */ + __dev_remove_pack(&interface->fcoe_packet_type); + __dev_remove_pack(&interface->fip_packet_type); + synchronize_net(); +} + +static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport = ctlr->lp; + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = interface->hba; + + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(lport); + + bnx2fc_net_cleanup(interface); + + bnx2fc_free_vport(hba, lport); +} + +static void bnx2fc_if_destroy(struct fc_lport *lport) +{ + + /* Free queued packets for the receive thread */ + bnx2fc_clean_rx_queue(lport); + + /* Detach from scsi-ml */ + fc_remove_host(lport->host); + scsi_remove_host(lport->host); + + /* + * Note that only the physical lport will have the exchange manager. + * for vports, this function is NOP + */ + fc_exch_mgr_free(lport); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(lport); + + /* Release Scsi_Host */ + scsi_host_put(lport->host); +} + +static void __bnx2fc_destroy(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport = ctlr->lp; + struct fcoe_port *port = lport_priv(lport); + + bnx2fc_interface_cleanup(interface); + bnx2fc_stop(interface); + list_del(&interface->list); + bnx2fc_port_destroy(port); + bnx2fc_interface_put(interface); +} + +/** + * bnx2fc_destroy - Destroy a bnx2fc FCoE interface + * + * @netdev: The net device that the FCoE interface is on + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int bnx2fc_destroy(struct net_device *netdev) +{ + struct bnx2fc_interface *interface = NULL; + struct workqueue_struct *timer_work_queue; + struct fcoe_ctlr *ctlr; + int rc = 0; + + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + + interface = bnx2fc_interface_lookup(netdev); + ctlr = bnx2fc_to_ctlr(interface); + if (!interface || !ctlr->lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); + goto netdev_err; + } + + timer_work_queue = interface->timer_work_queue; + __bnx2fc_destroy(interface); + destroy_workqueue(timer_work_queue); + +netdev_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +static void bnx2fc_port_destroy(struct fcoe_port *port) +{ + struct fc_lport *lport; + + lport = port->lport; + BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); + + bnx2fc_if_destroy(lport); +} + +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) +{ + bnx2fc_free_fw_resc(hba); + bnx2fc_free_task_ctx(hba); +} + +/** + * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated + * pci structure + * + * @hba: Adapter instance + */ +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) +{ + if (bnx2fc_setup_task_ctx(hba)) + goto mem_err; + + if (bnx2fc_setup_fw_resc(hba)) + goto mem_err; + + return 0; +mem_err: + bnx2fc_unbind_adapter_devices(hba); + return -ENOMEM; +} + +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) +{ + struct cnic_dev *cnic; + struct pci_dev *pdev; + + if (!hba->cnic) { + printk(KERN_ERR PFX "cnic is NULL\n"); + return -ENODEV; + } + cnic = hba->cnic; + pdev = hba->pcidev = cnic->pcidev; + if (!hba->pcidev) + return -ENODEV; + + switch (pdev->device) { + case PCI_DEVICE_ID_NX2_57710: + strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57711: + strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57712: + case PCI_DEVICE_ID_NX2_57712_MF: + case PCI_DEVICE_ID_NX2_57712_VF: + strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57800: + case PCI_DEVICE_ID_NX2_57800_MF: + case PCI_DEVICE_ID_NX2_57800_VF: + strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57810: + case PCI_DEVICE_ID_NX2_57810_MF: + case PCI_DEVICE_ID_NX2_57810_VF: + strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57840: + case PCI_DEVICE_ID_NX2_57840_MF: + case PCI_DEVICE_ID_NX2_57840_VF: + case PCI_DEVICE_ID_NX2_57840_2_20: + case PCI_DEVICE_ID_NX2_57840_4_10: + strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); + break; + default: + pr_err(PFX "Unknown device id 0x%x\n", pdev->device); + break; + } + pci_dev_get(hba->pcidev); + return 0; +} + +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) +{ + if (hba->pcidev) { + hba->chip_num[0] = '\0'; + pci_dev_put(hba->pcidev); + } + hba->pcidev = NULL; +} + +/** + * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats + * + * @handle: transport handle pointing to adapter structure + */ +static int bnx2fc_ulp_get_stats(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct cnic_dev *cnic; + struct fcoe_stats_info *stats_addr; + + if (!hba) + return -EINVAL; + + cnic = hba->cnic; + stats_addr = &cnic->stats_addr->fcoe_stat; + if (!stats_addr) + return -EINVAL; + + strncpy(stats_addr->version, BNX2FC_VERSION, + sizeof(stats_addr->version)); + stats_addr->txq_size = BNX2FC_SQ_WQES_MAX; + stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX; + + return 0; +} + + +/** + * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance + * + * @handle: transport handle pointing to adapter structure + * + * This function maps adapter structure to pcidev structure and initiates + * firmware handshake to enable/initialize on-chip FCoE components. + * This bnx2fc - cnic interface api callback is used after following + * conditions are met - + * a) underlying network interface is up (marked by event NETDEV_UP + * from netdev + * b) bnx2fc adatper structure is registered. + */ +static void bnx2fc_ulp_start(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct fc_lport *lport; + + mutex_lock(&bnx2fc_dev_lock); + + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) + bnx2fc_fw_init(hba); + + BNX2FC_MISC_DBG("bnx2fc started.\n"); + + list_for_each_entry(interface, &if_list, list) { + if (interface->hba == hba) { + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; + /* Kick off Fabric discovery*/ + printk(KERN_ERR PFX "ulp_init: start discovery\n"); + lport->tt.frame_send = bnx2fc_xmit; + bnx2fc_start_disc(interface); + } + } + + mutex_unlock(&bnx2fc_dev_lock); +} + +static void bnx2fc_port_shutdown(struct fc_lport *lport) +{ + BNX2FC_MISC_DBG("Entered %s\n", __func__); + fc_fabric_logoff(lport); + fc_lport_destroy(lport); +} + +static void bnx2fc_stop(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport; + struct fc_lport *vport; + + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) + return; + + lport = ctlr->lp; + bnx2fc_port_shutdown(lport); + + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = + FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(lport); +} + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba) +{ +#define BNX2FC_INIT_POLL_TIME (1000 / HZ) + int rc = -1; + int i = HZ; + + rc = bnx2fc_bind_adapter_devices(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc); + goto err_out; + } + + rc = bnx2fc_send_fw_fcoe_init_msg(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc); + goto err_unbind; + } + + /* + * Wait until the adapter init message is complete, and adapter + * state is UP. + */ + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + msleep(BNX2FC_INIT_POLL_TIME); + + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) { + printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. " + "Ignoring...\n", + hba->cnic->netdev->name); + rc = -1; + goto err_unbind; + } + + + set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags); + return 0; + +err_unbind: + bnx2fc_unbind_adapter_devices(hba); +err_out: + return rc; +} + +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) +{ + if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { + if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { + timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer, + 0); + hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + + jiffies; + add_timer(&hba->destroy_timer); + wait_event_interruptible(hba->destroy_wait, + test_bit(BNX2FC_FLAG_DESTROY_CMPL, + &hba->flags)); + clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); + /* This should never happen */ + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&hba->destroy_timer); + } + bnx2fc_unbind_adapter_devices(hba); + } +} + +/** + * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance + * + * @handle: transport handle pointing to adapter structure + * + * Driver checks if adapter is already in shutdown mode, if not start + * the shutdown process. + */ +static void bnx2fc_ulp_stop(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct bnx2fc_interface *interface; + + printk(KERN_ERR "ULP_STOP\n"); + + mutex_lock(&bnx2fc_dev_lock); + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) + goto exit; + list_for_each_entry(interface, &if_list, list) { + if (interface->hba == hba) + bnx2fc_stop(interface); + } + BUG_ON(hba->num_ofld_sess != 0); + + mutex_lock(&hba->hba_mutex); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + clear_bit(ADAPTER_STATE_GOING_DOWN, + &hba->adapter_state); + + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + mutex_unlock(&hba->hba_mutex); + + bnx2fc_fw_destroy(hba); +exit: + mutex_unlock(&bnx2fc_dev_lock); +} + +static void bnx2fc_start_disc(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport; + int wait_cnt = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* Kick off FIP/FLOGI */ + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { + printk(KERN_ERR PFX "Init not done yet\n"); + return; + } + + lport = ctlr->lp; + BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); + + if (!bnx2fc_link_ok(lport) && interface->enabled) { + BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); + fcoe_ctlr_link_up(ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); + } + + /* wait for the FCF to be selected before issuing FLOGI */ + while (!ctlr->sel_fcf) { + msleep(250); + /* give up after 3 secs */ + if (++wait_cnt > 12) + break; + } + + /* Reset max receive frame size to default */ + if (fc_set_mfs(lport, BNX2FC_MFS)) + return; + + fc_lport_init(lport); + fc_fabric_login(lport); +} + + +/** + * bnx2fc_ulp_init - Initialize an adapter instance + * + * @dev : cnic device handle + * Called from cnic_register_driver() context to initialize all + * enumerated cnic devices. This routine allocates adapter structure + * and other device specific resources. + */ +static void bnx2fc_ulp_init(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + int rc = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* bnx2fc works only when bnx2x is loaded */ + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) || + (dev->max_fcoe_conn == 0)) { + printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," + " flags: %lx fcoe_conn: %d\n", + dev->netdev->name, dev->flags, dev->max_fcoe_conn); + return; + } + + hba = bnx2fc_hba_create(dev); + if (!hba) { + printk(KERN_ERR PFX "hba initialization failed\n"); + return; + } + + pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name); + + /* Add HBA to the adapter list */ + mutex_lock(&bnx2fc_dev_lock); + list_add_tail(&hba->list, &adapter_list); + adapter_count++; + mutex_unlock(&bnx2fc_dev_lock); + + dev->fcoe_cap = &hba->fcoe_cap; + clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); + rc = dev->register_device(dev, CNIC_ULP_FCOE, + (void *) hba); + if (rc) + printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc); + else + set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); +} + +/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ +static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + + if (interface->enabled) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_disable: lport not found\n"); + return -ENODEV; + } else { + interface->enabled = false; + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); + } + } + return 0; +} + +/* + * Deperecated: Use bnx2fc_enabled() + */ +static int bnx2fc_disable(struct net_device *netdev) +{ + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + int rc = 0; + + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + + interface = bnx2fc_interface_lookup(netdev); + ctlr = bnx2fc_to_ctlr(interface); + + if (!interface) { + rc = -ENODEV; + pr_err(PFX "bnx2fc_disable: interface not found\n"); + } else { + rc = __bnx2fc_disable(ctlr); + } + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +static uint bnx2fc_npiv_create_vports(struct fc_lport *lport, + struct cnic_fc_npiv_tbl *npiv_tbl) +{ + struct fc_vport_identifiers vpid; + uint i, created = 0; + u64 wwnn = 0; + char wwpn_str[32]; + char wwnn_str[32]; + + if (npiv_tbl->count > MAX_NPIV_ENTRIES) { + BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n"); + goto done; + } + + /* Sanity check the first entry to make sure it's not 0 */ + if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 && + wwn_to_u64(npiv_tbl->wwpn[0]) == 0) { + BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n"); + goto done; + } + + vpid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vpid.vport_type = FC_PORTTYPE_NPIV; + vpid.disable = false; + + for (i = 0; i < npiv_tbl->count; i++) { + wwnn = wwn_to_u64(npiv_tbl->wwnn[i]); + if (wwnn == 0) { + /* + * If we get a 0 element from for the WWNN then assume + * the WWNN should be the same as the physical port. + */ + wwnn = lport->wwnn; + } + vpid.node_name = wwnn; + vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]); + scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name), + "NPIV[%u]:%016llx-%016llx", + created, vpid.port_name, vpid.node_name); + fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str)); + fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str)); + BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str, + wwpn_str); + if (fc_vport_create(lport->host, 0, &vpid)) + created++; + else + BNX2FC_HBA_DBG(lport, "Failed to create vport\n"); + } +done: + return created; +} + +static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + struct bnx2fc_hba *hba; + struct cnic_fc_npiv_tbl *npiv_tbl; + struct fc_lport *lport; + + if (!interface->enabled) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_enable: lport not found\n"); + return -ENODEV; + } else if (!bnx2fc_link_ok(ctlr->lp)) { + fcoe_ctlr_link_up(ctlr); + interface->enabled = true; + } + } + + /* Create static NPIV ports if any are contained in NVRAM */ + hba = interface->hba; + lport = ctlr->lp; + + if (!hba) + goto done; + + if (!hba->cnic) + goto done; + + if (!lport) + goto done; + + if (!lport->host) + goto done; + + if (!hba->cnic->get_fc_npiv_tbl) + goto done; + + npiv_tbl = kzalloc(sizeof(struct cnic_fc_npiv_tbl), GFP_KERNEL); + if (!npiv_tbl) + goto done; + + if (hba->cnic->get_fc_npiv_tbl(hba->cnic, npiv_tbl)) + goto done_free; + + bnx2fc_npiv_create_vports(lport, npiv_tbl); +done_free: + kfree(npiv_tbl); +done: + return 0; +} + +/* + * Deprecated: Use bnx2fc_enabled() + */ +static int bnx2fc_enable(struct net_device *netdev) +{ + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + int rc = 0; + + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + + interface = bnx2fc_interface_lookup(netdev); + ctlr = bnx2fc_to_ctlr(interface); + if (!interface) { + rc = -ENODEV; + pr_err(PFX "bnx2fc_enable: interface not found\n"); + } else { + rc = __bnx2fc_enable(ctlr); + } + + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller + * @cdev: The FCoE Controller that is being enabled or disabled + * + * fcoe_sysfs will ensure that the state of 'enabled' has + * changed, so no checking is necessary here. This routine simply + * calls fcoe_enable or fcoe_disable, both of which are deprecated. + * When those routines are removed the functionality can be merged + * here. + */ +static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); + + switch (cdev->enabled) { + case FCOE_CTLR_ENABLED: + return __bnx2fc_enable(ctlr); + case FCOE_CTLR_DISABLED: + return __bnx2fc_disable(ctlr); + case FCOE_CTLR_UNUSED: + default: + return -ENOTSUPP; + } +} + +enum bnx2fc_create_link_state { + BNX2FC_CREATE_LINK_DOWN, + BNX2FC_CREATE_LINK_UP, +}; + +/** + * _bnx2fc_create() - Create bnx2fc FCoE interface + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation + * @link_state: The ctlr link state on creation + * + * Called from either the libfcoe 'create' module parameter + * via fcoe_create or from fcoe_syfs's ctlr_create file. + * + * libfcoe's 'create' module parameter is deprecated so some + * consolidation of code can be done when that interface is + * removed. + * + * Returns: 0 for success + */ +static int _bnx2fc_create(struct net_device *netdev, + enum fip_mode fip_mode, + enum bnx2fc_create_link_state link_state) +{ + struct fcoe_ctlr_device *cdev; + struct fcoe_ctlr *ctlr; + struct bnx2fc_interface *interface; + struct bnx2fc_hba *hba; + struct net_device *phys_dev = netdev; + struct fc_lport *lport; + struct ethtool_drvinfo drvinfo; + int rc = 0; + int vlan_id = 0; + + BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); + if (fip_mode != FIP_MODE_FABRIC) { + printk(KERN_ERR "fip mode not FABRIC\n"); + return -EIO; + } + + rtnl_lock(); + + mutex_lock(&bnx2fc_dev_lock); + + if (!try_module_get(THIS_MODULE)) { + rc = -EINVAL; + goto mod_err; + } + + /* obtain physical netdev */ + if (is_vlan_dev(netdev)) + phys_dev = vlan_dev_real_dev(netdev); + + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -EINVAL; + goto netdev_err; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -EINVAL; + goto netdev_err; + } + + /* obtain interface and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_create: hba not found\n"); + goto netdev_err; + } + + if (bnx2fc_interface_lookup(netdev)) { + rc = -EEXIST; + goto netdev_err; + } + + interface = bnx2fc_interface_create(hba, netdev, fip_mode); + if (!interface) { + printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); + rc = -ENOMEM; + goto netdev_err; + } + + if (is_vlan_dev(netdev)) { + vlan_id = vlan_dev_vlan_id(netdev); + interface->vlan_enabled = 1; + } + + ctlr = bnx2fc_to_ctlr(interface); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + interface->vlan_id = vlan_id; + interface->tm_timeout = BNX2FC_TM_TIMEOUT; + + interface->timer_work_queue = + create_singlethread_workqueue("bnx2fc_timer_wq"); + if (!interface->timer_work_queue) { + printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); + rc = -EINVAL; + goto ifput_err; + } + + lport = bnx2fc_if_create(interface, &cdev->dev, 0); + if (!lport) { + printk(KERN_ERR PFX "Failed to create interface (%s)\n", + netdev->name); + rc = -EINVAL; + goto if_create_err; + } + + /* Add interface to if_list */ + list_add_tail(&interface->list, &if_list); + + lport->boot_time = jiffies; + + /* Make this master N_port */ + ctlr->lp = lport; + + if (link_state == BNX2FC_CREATE_LINK_UP) + cdev->enabled = FCOE_CTLR_ENABLED; + else + cdev->enabled = FCOE_CTLR_DISABLED; + + if (link_state == BNX2FC_CREATE_LINK_UP && + !bnx2fc_link_ok(lport)) { + fcoe_ctlr_link_up(ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); + } + + BNX2FC_HBA_DBG(lport, "create: START DISC\n"); + bnx2fc_start_disc(interface); + + if (link_state == BNX2FC_CREATE_LINK_UP) + interface->enabled = true; + + /* + * Release from kref_init in bnx2fc_interface_setup, on success + * lport should be holding a reference taken in bnx2fc_if_create + */ + bnx2fc_interface_put(interface); + /* put netdev that was held while calling dev_get_by_name */ + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return 0; + +if_create_err: + destroy_workqueue(interface->timer_work_queue); +ifput_err: + bnx2fc_net_cleanup(interface); + bnx2fc_interface_put(interface); + goto mod_err; +netdev_err: + module_put(THIS_MODULE); +mod_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_create() - Create a bnx2fc interface + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation + * + * Called from fcoe transport + * + * Returns: 0 for success + */ +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode) +{ + return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP); +} + +/** + * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs + * @netdev: The net_device to be used by the allocated FCoE Controller + * + * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr + * in a link_down state. The allows the user an opportunity to configure + * the FCoE Controller from sysfs before enabling the FCoE Controller. + * + * Creating in with this routine starts the FCoE Controller in Fabric + * mode. The user can change to VN2VN or another mode before enabling. + */ +static int bnx2fc_ctlr_alloc(struct net_device *netdev) +{ + return _bnx2fc_create(netdev, FIP_MODE_FABRIC, + BNX2FC_CREATE_LINK_DOWN); +} + +/** + * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance + * + * @cnic: Pointer to cnic device instance + * + **/ +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) +{ + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_entry(hba, &adapter_list, list) { + if (hba->cnic == cnic) + return hba; + } + return NULL; +} + +static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device + *netdev) +{ + struct bnx2fc_interface *interface; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_entry(interface, &if_list, list) { + if (interface->netdev == netdev) + return interface; + } + return NULL; +} + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device + *phys_dev) +{ + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_entry(hba, &adapter_list, list) { + if (hba->phys_dev == phys_dev) + return hba; + } + printk(KERN_ERR PFX "adapter_lookup: hba NULL\n"); + return NULL; +} + +/** + * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources + * + * @dev: cnic device handle + */ +static void bnx2fc_ulp_exit(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface, *tmp; + + BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); + + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n", + dev->netdev->name, dev->flags); + return; + } + + mutex_lock(&bnx2fc_dev_lock); + hba = bnx2fc_find_hba_for_cnic(dev); + if (!hba) { + printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n", + dev); + mutex_unlock(&bnx2fc_dev_lock); + return; + } + + list_del_init(&hba->list); + adapter_count--; + + list_for_each_entry_safe(interface, tmp, &if_list, list) + /* destroy not called yet, move to quiesced list */ + if (interface->hba == hba) + __bnx2fc_destroy(interface); + mutex_unlock(&bnx2fc_dev_lock); + + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); + bnx2fc_hba_destroy(hba); +} + +static void bnx2fc_rport_terminate_io(struct fc_rport *rport) +{ + /* This is a no-op */ +} + +/** + * bnx2fc_fcoe_reset - Resets the fcoe + * + * @shost: shost the reset is from + * + * Returns: always 0 + */ +static int bnx2fc_fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + fc_lport_reset(lport); + return 0; +} + + +static bool bnx2fc_match(struct net_device *netdev) +{ + struct net_device *phys_dev = netdev; + + mutex_lock(&bnx2fc_dev_lock); + if (is_vlan_dev(netdev)) + phys_dev = vlan_dev_real_dev(netdev); + + if (bnx2fc_hba_lookup(phys_dev)) { + mutex_unlock(&bnx2fc_dev_lock); + return true; + } + + mutex_unlock(&bnx2fc_dev_lock); + return false; +} + + +static struct fcoe_transport bnx2fc_transport = { + .name = {"bnx2fc"}, + .attached = false, + .list = LIST_HEAD_INIT(bnx2fc_transport.list), + .alloc = bnx2fc_ctlr_alloc, + .match = bnx2fc_match, + .create = bnx2fc_create, + .destroy = bnx2fc_destroy, + .enable = bnx2fc_enable, + .disable = bnx2fc_disable, +}; + +/** + * bnx2fc_cpu_online - Create a receive thread for an online CPU + * + * @cpu: cpu index for the online cpu + */ +static int bnx2fc_cpu_online(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + + p = &per_cpu(bnx2fc_percpu, cpu); + + thread = kthread_create_on_node(bnx2fc_percpu_io_thread, + (void *)p, cpu_to_node(cpu), + "bnx2fc_thread/%d", cpu); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + /* bind thread to the cpu */ + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + return 0; +} + +static int bnx2fc_cpu_offline(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + struct bnx2fc_work *work, *tmp; + + BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); + + /* Prevent any new work from being queued for this CPU */ + p = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&p->fp_work_lock); + thread = p->iothread; + p->iothread = NULL; + + /* Free all work in the list */ + list_for_each_entry_safe(work, tmp, &p->work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data, + work->num_rq, work->task); + kfree(work); + } + + spin_unlock_bh(&p->fp_work_lock); + + if (thread) + kthread_stop(thread); + return 0; +} + +static int bnx2fc_slave_configure(struct scsi_device *sdev) +{ + if (!bnx2fc_queue_depth) + return 0; + + scsi_change_queue_depth(sdev, bnx2fc_queue_depth); + return 0; +} + +static enum cpuhp_state bnx2fc_online_state; + +/** + * bnx2fc_mod_init - module init entry point + * + * Initialize driver wide global data structures, and register + * with cnic module + **/ +static int __init bnx2fc_mod_init(void) +{ + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + int rc = 0; + unsigned int cpu = 0; + struct bnx2fc_percpu_s *p; + + printk(KERN_INFO PFX "%s", version); + + /* register as a fcoe transport */ + rc = fcoe_transport_attach(&bnx2fc_transport); + if (rc) { + printk(KERN_ERR "failed to register an fcoe transport, check " + "if libfcoe is loaded\n"); + goto out; + } + + INIT_LIST_HEAD(&adapter_list); + INIT_LIST_HEAD(&if_list); + mutex_init(&bnx2fc_dev_lock); + adapter_count = 0; + + /* Attach FC transport template */ + rc = bnx2fc_attach_transport(); + if (rc) + goto detach_ft; + + bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0); + if (!bnx2fc_wq) { + rc = -ENOMEM; + goto release_bt; + } + + bg = &bnx2fc_global; + skb_queue_head_init(&bg->fcoe_rx_list); + l2_thread = kthread_run(bnx2fc_l2_rcv_thread, + (void *)bg, + "bnx2fc_l2_thread"); + if (IS_ERR(l2_thread)) { + rc = PTR_ERR(l2_thread); + goto free_wq; + } + spin_lock_bh(&bg->fcoe_rx_list.lock); + bg->kthread = l2_thread; + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + for_each_possible_cpu(cpu) { + p = &per_cpu(bnx2fc_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->fp_work_lock); + } + + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", + bnx2fc_cpu_online, bnx2fc_cpu_offline); + if (rc < 0) + goto stop_thread; + bnx2fc_online_state = rc; + + cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); + return 0; + +stop_thread: + kthread_stop(l2_thread); +free_wq: + destroy_workqueue(bnx2fc_wq); +release_bt: + bnx2fc_release_transport(); +detach_ft: + fcoe_transport_detach(&bnx2fc_transport); +out: + return rc; +} + +static void __exit bnx2fc_mod_exit(void) +{ + LIST_HEAD(to_be_deleted); + struct bnx2fc_hba *hba, *next; + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + struct sk_buff *skb; + + /* + * NOTE: Since cnic calls register_driver routine rtnl_lock, + * it will have higher precedence than bnx2fc_dev_lock. + * unregister_device() cannot be called with bnx2fc_dev_lock + * held. + */ + mutex_lock(&bnx2fc_dev_lock); + list_splice_init(&adapter_list, &to_be_deleted); + adapter_count = 0; + mutex_unlock(&bnx2fc_dev_lock); + + /* Unregister with cnic */ + list_for_each_entry_safe(hba, next, &to_be_deleted, list) { + list_del_init(&hba->list); + printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n", + hba); + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, + &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, + CNIC_ULP_FCOE); + bnx2fc_hba_destroy(hba); + } + cnic_unregister_driver(CNIC_ULP_FCOE); + + /* Destroy global thread */ + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + l2_thread = bg->kthread; + bg->kthread = NULL; + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) + kfree_skb(skb); + + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + if (l2_thread) + kthread_stop(l2_thread); + + cpuhp_remove_state(bnx2fc_online_state); + + destroy_workqueue(bnx2fc_wq); + /* + * detach from scsi transport + * must happen after all destroys are done + */ + bnx2fc_release_transport(); + + /* detach from fcoe transport */ + fcoe_transport_detach(&bnx2fc_transport); +} + +module_init(bnx2fc_mod_init); +module_exit(bnx2fc_mod_exit); + +static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = { + .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled, + .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, + + .get_fcoe_fcf_selected = fcoe_fcf_get_selected, + .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id, +}; + +static struct fc_function_template bnx2fc_transport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = bnx2fc_get_host_stats, + + .issue_fc_host_lip = bnx2fc_fcoe_reset, + + .terminate_rport_io = bnx2fc_rport_terminate_io, + + .vport_create = bnx2fc_vport_create, + .vport_delete = bnx2fc_vport_destroy, + .vport_disable = bnx2fc_vport_disable, + .bsg_request = fc_lport_bsg_request, +}; + +static struct fc_function_template bnx2fc_vport_xport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = bnx2fc_fcoe_reset, + .terminate_rport_io = fc_rport_terminate_io, + .bsg_request = fc_lport_bsg_request, +}; + +/* + * Additional scsi_host attributes. + */ +static ssize_t +bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + + sprintf(buf, "%u\n", interface->tm_timeout); + return strlen(buf); +} + +static ssize_t +bnx2fc_tm_timeout_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + int rval, val; + + rval = kstrtouint(buf, 10, &val); + if (rval) + return rval; + if (val > 255) + return -ERANGE; + + interface->tm_timeout = (u8)val; + return strlen(buf); +} + +static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show, + bnx2fc_tm_timeout_store); + +static struct attribute *bnx2fc_host_attrs[] = { + &dev_attr_tm_timeout.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(bnx2fc_host); + +/* + * scsi_host_template structure used while registering with SCSI-ml + */ +static struct scsi_host_template bnx2fc_shost_template = { + .module = THIS_MODULE, + .name = "QLogic Offload FCoE Initiator", + .queuecommand = bnx2fc_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = bnx2fc_eh_abort, /* abts */ + .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ + .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ + .eh_host_reset_handler = fc_eh_host_reset, + .slave_alloc = fc_slave_alloc, + .change_queue_depth = scsi_change_queue_depth, + .this_id = -1, + .cmd_per_lun = 3, + .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, + .dma_boundary = 0x7fff, + .max_sectors = 0x3fbf, + .track_queue_depth = 1, + .slave_configure = bnx2fc_slave_configure, + .shost_groups = bnx2fc_host_groups, + .cmd_size = sizeof(struct bnx2fc_priv), +}; + +static struct libfc_function_template bnx2fc_libfc_fcn_templ = { + .frame_send = bnx2fc_xmit, + .elsct_send = bnx2fc_elsct_send, + .fcp_abort_io = bnx2fc_abort_io, + .fcp_cleanup = bnx2fc_cleanup, + .get_lesb = fcoe_get_lesb, + .rport_event_callback = bnx2fc_rport_event_handler, +}; + +/* + * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface + * structure carrying callback function pointers + */ +static struct cnic_ulp_ops bnx2fc_cnic_cb = { + .owner = THIS_MODULE, + .cnic_init = bnx2fc_ulp_init, + .cnic_exit = bnx2fc_ulp_exit, + .cnic_start = bnx2fc_ulp_start, + .cnic_stop = bnx2fc_ulp_stop, + .indicate_kcqes = bnx2fc_indicate_kcqe, + .indicate_netevent = bnx2fc_indicate_netevent, + .cnic_get_stats = bnx2fc_ulp_get_stats, +}; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c new file mode 100644 index 000000000..776544385 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -0,0 +1,2199 @@ +/* bnx2fc_hwi.c: QLogic Linux FCoE offload driver. + * This file contains the code that low level functions that interact + * with 57712 FCoE firmware. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe); +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *destroy_kcqe); + +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_stat stat_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); + stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; + stat_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; + stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &stat_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w + * + * @hba: adapter structure pointer + * + * Send down FCoE firmware init KWQEs which initiates the initial handshake + * with the f/w. + * + */ +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_init1 fcoe_init1; + struct fcoe_kwqe_init2 fcoe_init2; + struct fcoe_kwqe_init3 fcoe_init3; + struct kwqe *kwqe_arr[3]; + int num_kwqes = 3; + int rc = 0; + + if (!hba->cnic) { + printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); + return -ENODEV; + } + + /* fill init1 KWQE */ + memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); + fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; + fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init1.num_tasks = hba->max_tasks; + fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; + fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; + fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; + fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; + fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; + fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); + fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; + fcoe_init1.task_list_pbl_addr_hi = + (u32) ((u64) hba->task_ctx_bd_dma >> 32); + fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; + + fcoe_init1.flags = (PAGE_SHIFT << + FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); + + fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; + + /* fill init2 KWQE */ + memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); + fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; + fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; + fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; + + + fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; + fcoe_init2.hash_tbl_pbl_addr_hi = (u32) + ((u64) hba->hash_tbl_pbl_dma >> 32); + + fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; + fcoe_init2.t2_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_dma >> 32); + + fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; + fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_ptr_dma >> 32); + + fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; + + /* fill init3 KWQE */ + memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); + fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; + fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + fcoe_init3.error_bit_map_lo = 0xffffffff; + fcoe_init3.error_bit_map_hi = 0xffffffff; + + /* + * enable both cached connection and cached tasks + * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both + */ + fcoe_init3.perf_config = 3; + + kwqe_arr[0] = (struct kwqe *) &fcoe_init1; + kwqe_arr[1] = (struct kwqe *) &fcoe_init2; + kwqe_arr[2] = (struct kwqe *) &fcoe_init3; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_destroy fcoe_destroy; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = -1; + + /* fill destroy KWQE */ + memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); + fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; + fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct fc_lport *lport = port->lport; + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct bnx2fc_hba *hba = interface->hba; + struct kwqe *kwqe_arr[4]; + struct fcoe_kwqe_conn_offload1 ofld_req1; + struct fcoe_kwqe_conn_offload2 ofld_req2; + struct fcoe_kwqe_conn_offload3 ofld_req3; + struct fcoe_kwqe_conn_offload4 ofld_req4; + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 4; + u32 port_id; + int rc = 0; + u16 conn_id; + + /* Initialize offload request 1 structure */ + memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); + + ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + + conn_id = (u16)tgt->fcoe_conn_id; + ofld_req1.fcoe_conn_id = conn_id; + + + ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; + ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); + + ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; + ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); + + ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; + ofld_req1.rq_first_pbe_addr_hi = + (u32)((u64) tgt->rq_dma >> 32); + + ofld_req1.rq_prod = 0x8000; + + /* Initialize offload request 2 structure */ + memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); + + ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; + + ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; + ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); + + ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; + ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); + + ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; + ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); + + /* Initialize offload request 3 structure */ + memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); + + ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; + ofld_req3.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req3.vlan_tag = interface->vlan_id << + FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; + ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; + + port_id = fc_host_port_id(lport->host); + if (port_id == 0) { + BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); + return -EINVAL; + } + + /* + * Store s_id of the initiator for further reference. This will + * be used during disable/destroy during linkdown processing as + * when the lport is reset, the port_id also is reset to 0 + */ + tgt->sid = port_id; + ofld_req3.s_id[0] = (port_id & 0x000000FF); + ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + ofld_req3.d_id[0] = (port_id & 0x000000FF); + ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; + + ofld_req3.tx_total_conc_seqs = rdata->max_seq; + + ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; + ofld_req3.rx_max_fc_pay_len = lport->mfs; + + ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; + ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; + ofld_req3.rx_open_seqs_exch_c3 = 1; + + ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; + ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); + + /* set mul_n_port_ids supported flag to 0, until it is supported */ + ofld_req3.flags = 0; + /* + ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << + FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); + */ + /* Info from PLOGI response */ + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); + + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); + + /* + * Info from PRLI response, this info is used for sequence level error + * recovery support + */ + if (tgt->dev_type == TYPE_TAPE) { + ofld_req3.flags |= 1 << + FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; + ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) + ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); + } + + /* vlan flag */ + ofld_req3.flags |= (interface->vlan_enabled << + FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); + + /* C2_VALID and ACK flags are not set as they are not supported */ + + + /* Initialize offload request 4 structure */ + memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); + ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; + ofld_req4.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; + + + ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; + /* local mac */ + ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; + ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; + ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; + ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; + ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; + ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + /* fcf mac */ + ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; + + ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; + ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); + + ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; + ofld_req4.confq_pbl_base_addr_hi = + (u32)((u64) tgt->confq_pbl_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + kwqe_arr[2] = (struct kwqe *) &ofld_req3; + kwqe_arr[3] = (struct kwqe *) &ofld_req4; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_enable_req - initiates FCoE Session enablement + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_enable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct kwqe *kwqe_arr[2]; + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct bnx2fc_hba *hba = interface->hba; + struct fcoe_kwqe_conn_enable_disable enbl_req; + struct fc_lport *lport = port->lport; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&enbl_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; + enbl_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; + /* local mac */ + enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; + enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; + enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; + enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; + enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; + memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); + + enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; + + port_id = fc_host_port_id(lport->host); + if (port_id != tgt->sid) { + printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," + "sid = 0x%x\n", port_id, tgt->sid); + port_id = tgt->sid; + } + enbl_req.s_id[0] = (port_id & 0x000000FF); + enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + enbl_req.d_id[0] = (port_id & 0x000000FF); + enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + enbl_req.vlan_tag = interface->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + enbl_req.vlan_flag = interface->vlan_enabled; + enbl_req.context_id = tgt->context_id; + enbl_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &enbl_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_disable_req - initiates FCoE Session disable + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct bnx2fc_hba *hba = interface->hba; + struct fcoe_kwqe_conn_enable_disable disable_req; + struct kwqe *kwqe_arr[2]; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&disable_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; + disable_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; + disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; + disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; + disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; + disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; + disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; + + disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; + + port_id = tgt->sid; + disable_req.s_id[0] = (port_id & 0x000000FF); + disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + + port_id = rport->port_id; + disable_req.d_id[0] = (port_id & 0x000000FF); + disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + disable_req.context_id = tgt->context_id; + disable_req.conn_id = tgt->fcoe_conn_id; + disable_req.vlan_tag = interface->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + disable_req.vlan_tag |= + 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + disable_req.vlan_flag = interface->vlan_enabled; + + kwqe_arr[0] = (struct kwqe *) &disable_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy + * + * @hba: adapter structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + struct fcoe_kwqe_conn_destroy destroy_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); + destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; + destroy_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + destroy_req.context_id = tgt->context_id; + destroy_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &destroy_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) +{ + struct bnx2fc_lport *blport; + + spin_lock_bh(&hba->hba_lock); + list_for_each_entry(blport, &hba->vports, list) { + if (blport->lport == lport) { + spin_unlock_bh(&hba->hba_lock); + return true; + } + } + spin_unlock_bh(&hba->hba_lock); + return false; + +} + + +static void bnx2fc_unsol_els_work(struct work_struct *work) +{ + struct bnx2fc_unsol_els *unsol_els; + struct fc_lport *lport; + struct bnx2fc_hba *hba; + struct fc_frame *fp; + + unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); + lport = unsol_els->lport; + fp = unsol_els->fp; + hba = unsol_els->hba; + if (is_valid_lport(hba, lport)) + fc_exch_recv(lport, fp); + kfree(unsol_els); +} + +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid) +{ + struct fcoe_port *port = tgt->port; + struct fc_lport *lport = port->lport; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_unsol_els *unsol_els; + struct fc_frame_header *fh; + struct fc_frame *fp; + struct sk_buff *skb; + u32 payload_len; + u32 crc; + u8 op; + + + unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); + if (!unsol_els) { + BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", + l2_oxid, frame_len); + + payload_len = frame_len - sizeof(struct fc_frame_header); + + fp = fc_frame_alloc(lport, payload_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + kfree(unsol_els); + return; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, frame_len); + + if (l2_oxid != FC_XID_UNKNOWN) + fh->fh_ox_id = htons(l2_oxid); + + skb = fp_skb(fp); + + if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || + (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { + + if (fh->fh_type == FC_TYPE_ELS) { + op = fc_frame_payload_op(fp); + if ((op == ELS_TEST) || (op == ELS_ESTC) || + (op == ELS_FAN) || (op == ELS_CSU)) { + /* + * No need to reply for these + * ELS requests + */ + printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); + kfree_skb(skb); + kfree(unsol_els); + return; + } + } + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + unsol_els->lport = lport; + unsol_els->hba = interface->hba; + unsol_els->fp = fp; + INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); + queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); + } else { + BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); + kfree_skb(skb); + kfree(unsol_els); + } +} + +static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) +{ + u8 num_rq; + struct fcoe_err_report_entry *err_entry; + unsigned char *rq_data; + unsigned char *buf = NULL, *buf1; + int i; + u16 xid; + u32 frame_len, len; + struct bnx2fc_cmd *io_req = NULL; + struct bnx2fc_interface *interface = tgt->port->priv; + struct bnx2fc_hba *hba = interface->hba; + int rc = 0; + u64 err_warn_bit_map; + u8 err_warn = 0xff; + + + BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); + switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { + case FCOE_UNSOLICITED_FRAME_CQE_TYPE: + frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> + FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; + + num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; + + spin_lock_bh(&tgt->tgt_lock); + rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); + spin_unlock_bh(&tgt->tgt_lock); + + if (rq_data) { + buf = rq_data; + } else { + buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), + GFP_ATOMIC); + + if (!buf1) { + BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); + break; + } + + for (i = 0; i < num_rq; i++) { + spin_lock_bh(&tgt->tgt_lock); + rq_data = (unsigned char *) + bnx2fc_get_next_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); + len = BNX2FC_RQ_BUF_SZ; + memcpy(buf1, rq_data, len); + buf1 += len; + } + } + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, + FC_XID_UNKNOWN); + + if (buf != rq_data) + kfree(buf); + spin_lock_bh(&tgt->tgt_lock); + bnx2fc_return_rqe(tgt, num_rq); + spin_unlock_bh(&tgt->tgt_lock); + break; + + case FCOE_ERROR_DETECTION_CQE_TYPE: + /* + * In case of error reporting CQE a single RQ entry + * is consumed. + */ + spin_lock_bh(&tgt->tgt_lock); + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = err_entry->fc_hdr.ox_id; + BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", + err_entry->data.err_warn_bitmap_hi, + err_entry->data.err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", + err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); + + if (xid > hba->max_xid) { + BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", + xid); + goto ret_err_rqe; + } + + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + if (!io_req) + goto ret_err_rqe; + + if (io_req->cmd_type != BNX2FC_SCSI_CMD) { + printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); + goto ret_err_rqe; + } + + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " + "progress.. ignore unsol err\n"); + goto ret_err_rqe; + } + + err_warn_bit_map = (u64) + ((u64)err_entry->data.err_warn_bitmap_hi << 32) | + (u64)err_entry->data.err_warn_bitmap_lo; + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { + if (err_warn_bit_map & (u64)((u64)1 << i)) { + err_warn = i; + break; + } + } + + /* + * If ABTS is already in progress, and FW error is + * received after that, do not cancel the timeout_work + * and let the error recovery continue by explicitly + * logging out the target, when the ABTS eventually + * times out. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " + "in ABTS processing\n", xid); + goto ret_err_rqe; + } + BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); + if (tgt->dev_type != TYPE_TAPE) + goto skip_rec; + switch (err_warn) { + case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: + case FCOE_ERROR_CODE_DATA_OOO_RO: + case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: + case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: + case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: + case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: + BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", + xid); + memcpy(&io_req->err_entry, err_entry, + sizeof(struct fcoe_err_report_entry)); + if (!test_bit(BNX2FC_FLAG_SRR_SENT, + &io_req->req_flags)) { + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_rec(io_req); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) + goto skip_rec; + } else + printk(KERN_ERR PFX "SRR in progress\n"); + goto ret_err_rqe; + default: + break; + } + +skip_rec: + set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); + /* + * Cancel the timeout_work, as we received IO + * completion with FW error. + */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + + rc = bnx2fc_initiate_abts(io_req); + if (rc != SUCCESS) { + printk(KERN_ERR PFX "err_warn: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + io_req->xid); + bnx2fc_initiate_cleanup(io_req); + } +ret_err_rqe: + bnx2fc_return_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); + break; + + case FCOE_WARNING_DETECTION_CQE_TYPE: + /* + *In case of warning reporting CQE a single RQ entry + * is consumes. + */ + spin_lock_bh(&tgt->tgt_lock); + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = cpu_to_be16(err_entry->fc_hdr.ox_id); + BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", + err_entry->data.err_warn_bitmap_hi, + err_entry->data.err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", + err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); + + if (xid > hba->max_xid) { + BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); + goto ret_warn_rqe; + } + + err_warn_bit_map = (u64) + ((u64)err_entry->data.err_warn_bitmap_hi << 32) | + (u64)err_entry->data.err_warn_bitmap_lo; + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { + if (err_warn_bit_map & ((u64)1 << i)) { + err_warn = i; + break; + } + } + BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + if (!io_req) + goto ret_warn_rqe; + + if (io_req->cmd_type != BNX2FC_SCSI_CMD) { + printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); + goto ret_warn_rqe; + } + + memcpy(&io_req->err_entry, err_entry, + sizeof(struct fcoe_err_report_entry)); + + if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) + /* REC_TOV is not a warning code */ + BUG_ON(1); + else + BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); +ret_warn_rqe: + bnx2fc_return_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); + break; + + default: + printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); + break; + } +} + +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct bnx2fc_cmd *io_req; + + u16 xid; + u8 cmd_type; + u8 rx_state = 0; + + spin_lock_bh(&tgt->tgt_lock); + + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + + if (io_req == NULL) { + printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + + /* Timestamp IO completion time */ + cmd_type = io_req->cmd_type; + + rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & + FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> + FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); + + /* Process other IO completion types */ + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { + bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq, + rq_data); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + + if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) + bnx2fc_process_abts_compl(io_req, task, num_rq); + else if (rx_state == + FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + else + printk(KERN_ERR PFX "Invalid rx state - %d\n", + rx_state); + break; + + case BNX2FC_TASK_MGMT_CMD: + BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); + bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data); + break; + + case BNX2FC_ABTS: + /* + * ABTS request received by firmware. ABTS response + * will be delivered to the task belonging to the IO + * that was aborted + */ + BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + case BNX2FC_ELS: + if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) + bnx2fc_process_els_compl(io_req, task, num_rq); + else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) + bnx2fc_process_abts_compl(io_req, task, num_rq); + else if (rx_state == + FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + else + printk(KERN_ERR PFX "Invalid rx state = %d\n", + rx_state); + break; + + case BNX2FC_CLEANUP: + BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + case BNX2FC_SEQ_CLEANUP: + BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", + io_req->xid); + bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + default: + printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); + break; + } + spin_unlock_bh(&tgt->tgt_lock); +} + +void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) +{ + struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; + u32 msg; + + wmb(); + rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT); + msg = *((u32 *)rx_db); + writel(cpu_to_le32(msg), tgt->ctx_base); + +} + +static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task) +{ + struct bnx2fc_work *work; + work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); + if (!work) + return NULL; + + INIT_LIST_HEAD(&work->list); + work->tgt = tgt; + work->wqe = wqe; + work->num_rq = num_rq; + work->task = task; + if (rq_data) + memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ); + + return work; +} + +/* Pending work request completion */ +static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) +{ + unsigned int cpu = wqe % num_possible_cpus(); + struct bnx2fc_percpu_s *fps; + struct bnx2fc_work *work; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + unsigned char *rq_data = NULL; + unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ]; + int task_idx, index; + u16 xid; + u8 num_rq; + int i; + + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; + if (xid >= hba->max_tasks) { + pr_err(PFX "ERROR:xid out of range\n"); + return false; + } + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; + task = &task_page[index]; + + num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); + + memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ); + + if (!num_rq) + goto num_rq_zero; + + rq_data = bnx2fc_get_next_rqe(tgt, 1); + + if (num_rq > 1) { + /* We do not need extra sense data */ + for (i = 1; i < num_rq; i++) + bnx2fc_get_next_rqe(tgt, 1); + } + + if (rq_data) + memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ); + + /* return RQ entries */ + for (i = 0; i < num_rq; i++) + bnx2fc_return_rqe(tgt, 1); + +num_rq_zero: + + fps = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&fps->fp_work_lock); + if (fps->iothread) { + work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff, + num_rq, task); + if (work) { + list_add_tail(&work->list, &fps->work_list); + wake_up_process(fps->iothread); + spin_unlock_bh(&fps->fp_work_lock); + return true; + } + } + spin_unlock_bh(&fps->fp_work_lock); + bnx2fc_process_cq_compl(tgt, wqe, + rq_data_buff, num_rq, task); + + return true; +} + +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) +{ + struct fcoe_cqe *cq; + u32 cq_cons; + struct fcoe_cqe *cqe; + u32 num_free_sqes = 0; + u32 num_cqes = 0; + u16 wqe; + + /* + * cq_lock is a low contention lock used to protect + * the CQ data structure from being freed up during + * the upload operation + */ + spin_lock_bh(&tgt->cq_lock); + + if (!tgt->cq) { + printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); + spin_unlock_bh(&tgt->cq_lock); + return 0; + } + cq = tgt->cq; + cq_cons = tgt->cq_cons_idx; + cqe = &cq[cq_cons]; + + while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == + (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT)) { + + /* new entry on the cq */ + if (wqe & FCOE_CQE_CQE_TYPE) { + /* Unsolicited event notification */ + bnx2fc_process_unsol_compl(tgt, wqe); + } else { + if (bnx2fc_pending_work(tgt, wqe)) + num_free_sqes++; + } + cqe++; + tgt->cq_cons_idx++; + num_cqes++; + + if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { + tgt->cq_cons_idx = 0; + cqe = cq; + tgt->cq_curr_toggle_bit = + 1 - tgt->cq_curr_toggle_bit; + } + } + if (num_cqes) { + /* Arm CQ only if doorbell is mapped */ + if (tgt->ctx_base) + bnx2fc_arm_cq(tgt); + atomic_add(num_free_sqes, &tgt->free_sqes); + } + spin_unlock_bh(&tgt->cq_lock); + return 0; +} + +/** + * bnx2fc_fastpath_notification - process global event queue (KCQ) + * + * @hba: adapter structure pointer + * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry + * + * Fast path event notification handler + */ +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe) +{ + u32 conn_id = new_cqe_kcqe->fcoe_conn_id; + struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; + + if (!tgt) { + printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); + return; + } + + bnx2fc_process_new_cqes(tgt); +} + +/** + * bnx2fc_process_ofld_cmpl - process FCoE session offload completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session offload completion, enable the session if offload is + * successful. + */ +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + struct bnx2fc_interface *interface; + u32 conn_id; + u32 context_id; + + conn_id = ofld_kcqe->fcoe_conn_id; + context_id = ofld_kcqe->fcoe_conn_context_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); + return; + } + BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + interface = tgt->port->priv; + if (hba != interface->hba) { + printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n"); + goto ofld_cmpl_err; + } + /* + * cnic has allocated a context_id for this session; use this + * while enabling the session. + */ + tgt->context_id = context_id; + if (ofld_kcqe->completion_status) { + if (ofld_kcqe->completion_status == + FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { + printk(KERN_ERR PFX "unable to allocate FCoE context " + "resources\n"); + set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); + } + } else { + /* FW offload request successfully completed */ + set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + } +ofld_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +/** + * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session enable completion, mark the rport as ready + */ + +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + struct bnx2fc_interface *interface; + u32 conn_id; + u32 context_id; + + context_id = ofld_kcqe->fcoe_conn_context_id; + conn_id = ofld_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + + /* + * context_id should be the same for this target during offload + * and enable + */ + if (tgt->context_id != context_id) { + printk(KERN_ERR PFX "context id mismatch\n"); + return; + } + interface = tgt->port->priv; + if (hba != interface->hba) { + printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n"); + goto enbl_cmpl_err; + } + if (!ofld_kcqe->completion_status) + /* enable successful - rport ready for issuing IOs */ + set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + +enbl_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *disable_kcqe) +{ + + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = disable_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); + + if (disable_kcqe->completion_status) { + printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", + disable_kcqe->completion_status); + set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } else { + /* disable successful */ + BNX2FC_TGT_DBG(tgt, "disable successful\n"); + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *destroy_kcqe) +{ + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = destroy_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); + + if (destroy_kcqe->completion_status) { + printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", + destroy_kcqe->completion_status); + return; + } else { + /* destroy successful */ + BNX2FC_TGT_DBG(tgt, "upload successful\n"); + clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) +{ + switch (err_code) { + case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: + printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: + printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: + printk(KERN_ERR PFX "init_failure due to NIC error\n"); + break; + case FCOE_KCQE_COMPLETION_STATUS_ERROR: + printk(KERN_ERR PFX "init failure due to compl status err\n"); + break; + case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: + printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); + break; + default: + printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); + } +} + +/** + * bnx2fc_indicate_kcqe() - process KCQE + * + * @context: adapter structure pointer + * @kcq: kcqe pointer + * @num_cqe: Number of completion queue elements + * + * Generic KCQ event handler + */ +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + int i = 0; + struct fcoe_kcqe *kcqe = NULL; + + while (i < num_cqe) { + kcqe = (struct fcoe_kcqe *) kcq[i++]; + + switch (kcqe->op_code) { + case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: + bnx2fc_fastpath_notification(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_OFFLOAD_CONN: + bnx2fc_process_ofld_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_ENABLE_CONN: + bnx2fc_process_enable_conn_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_INIT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + bnx2fc_init_failure(hba, + kcqe->completion_status); + } else { + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); + bnx2fc_get_link_state(hba); + printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", + (u8)hba->pcidev->bus->number); + } + break; + + case FCOE_KCQE_OPCODE_DESTROY_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + + printk(KERN_ERR PFX "DESTROY failed\n"); + } else { + printk(KERN_ERR PFX "DESTROY success\n"); + } + set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); + wake_up_interruptible(&hba->destroy_wait); + break; + + case FCOE_KCQE_OPCODE_DISABLE_CONN: + bnx2fc_process_conn_disable_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_DESTROY_CONN: + bnx2fc_process_conn_destroy_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_STAT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) + printk(KERN_ERR PFX "STAT failed\n"); + complete(&hba->stat_req_done); + break; + + case FCOE_KCQE_OPCODE_FCOE_ERROR: + default: + printk(KERN_ERR PFX "unknown opcode 0x%x\n", + kcqe->op_code); + } + } +} + +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) +{ + struct fcoe_sqe *sqe; + + sqe = &tgt->sq[tgt->sq_prod_idx]; + + /* Fill SQ WQE */ + sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; + sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; + + /* Advance SQ Prod Idx */ + if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { + tgt->sq_prod_idx = 0; + tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; + } +} + +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) +{ + struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; + u32 msg; + + wmb(); + sq_db->prod = tgt->sq_prod_idx | + (tgt->sq_curr_toggle_bit << 15); + msg = *((u32 *)sq_db); + writel(cpu_to_le32(msg), tgt->ctx_base); + +} + +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) +{ + u32 context_id = tgt->context_id; + struct fcoe_port *port = tgt->port; + u32 reg_off; + resource_size_t reg_base; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + + reg_base = pci_resource_start(hba->pcidev, + BNX2X_DOORBELL_PCI_BAR); + reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); + tgt->ctx_base = ioremap(reg_base + reg_off, 4); + if (!tgt->ctx_base) + return -ENOMEM; + return 0; +} + +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); + + if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) + return NULL; + + tgt->rq_cons_idx += num_items; + + if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) + tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; + + return buf; +} + +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + /* return the rq buffer */ + u32 next_prod_idx = tgt->rq_prod_idx + num_items; + if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { + /* Wrap around RQ */ + next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; + } + tgt->rq_prod_idx = next_prod_idx; + tgt->conn_db->rq_prod = tgt->rq_prod_idx; +} + +void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, + struct fcoe_task_ctx_entry *task, + struct bnx2fc_cmd *orig_io_req, + u32 offset) +{ + struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; + struct bnx2fc_rport *tgt = seq_clnp_req->tgt; + struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; + struct fcoe_ext_mul_sges_ctx *sgl; + u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; + u8 orig_task_type; + u16 orig_xid = orig_io_req->xid; + u32 context_id = tgt->context_id; + u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; + u32 orig_offset = offset; + int bd_count; + int i; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + orig_task_type = FCOE_TASK_TYPE_WRITE; + else + orig_task_type = FCOE_TASK_TYPE_READ; + + /* Tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = + FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; + + task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; + task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; + + bd_count = orig_io_req->bd_tbl->bd_valid; + + /* obtain the appropriate bd entry from relative offset */ + for (i = 0; i < bd_count; i++) { + if (offset < bd[i].buf_len) + break; + offset -= bd[i].buf_len; + } + phys_addr += (i * sizeof(struct fcoe_bd_ctx)); + + if (orig_task_type == FCOE_TASK_TYPE_WRITE) { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)phys_addr; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)phys_addr >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = + bd_count; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = + offset; /* adjusted offset */ + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; + } else { + + /* Multiple SGEs were used for this IO */ + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; + sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); + sgl->mul_sgl.sgl_size = bd_count; + sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ + sgl->mul_sgl.cur_sge_idx = i; + + memset(&task->rxwr_only.rx_seq_ctx, 0, + sizeof(struct fcoe_rx_seq_ctx)); + task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; + task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; + } +} +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid) +{ + u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; + struct bnx2fc_rport *tgt = io_req->tgt; + u32 context_id = tgt->context_id; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Tx Write Rx Read */ + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + if (tgt->dev_type == TYPE_TAPE) + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; + + /* Tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = + FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + + /* Rx Read Tx Write */ + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; +} + +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_frame_header *fc_hdr; + struct fcoe_ext_mul_sges_ctx *sgl; + u8 task_type = 0; + u64 *hdr; + u64 temp_hdr[3]; + u32 context_id; + + + /* Obtain task_type */ + if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || + (io_req->cmd_type == BNX2FC_ELS)) { + task_type = FCOE_TASK_TYPE_MIDPATH; + } else if (io_req->cmd_type == BNX2FC_ABTS) { + task_type = FCOE_TASK_TYPE_ABTS; + } + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", + io_req->cmd_type, task_type); + + /* Tx only */ + if ((task_type == FCOE_TASK_TYPE_MIDPATH) || + (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)mp_req->mp_req_bd_dma; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_req_bd_dma >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; + } + + /* Tx Write Rx Read */ + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + if (tgt->dev_type == TYPE_TAPE) + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + + /* tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + + /* Rx Write Tx Read */ + task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; + + /* rx flags */ + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; + + context_id = tgt->context_id; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + fc_hdr = &(mp_req->req_fc_hdr); + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); + fc_hdr->fh_rx_id = htons(0xffff); + task->rxwr_txrd.var_ctx.rx_id = 0xffff; + } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { + fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); + } + + /* Fill FC Header into middle path buffer */ + hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; + memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + /* Rx Only */ + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + + sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; + sgl->mul_sgl.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_resp_bd_dma >> 32); + sgl->mul_sgl.sgl_size = 1; + } +} + +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + u8 task_type; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct io_bdt *bd_tbl = io_req->bd_tbl; + struct bnx2fc_rport *tgt = io_req->tgt; + struct fcoe_cached_sge_ctx *cached_sge; + struct fcoe_ext_mul_sges_ctx *sgl; + int dev_type = tgt->dev_type; + u64 *fcp_cmnd; + u64 tmp_fcp_cmnd[4]; + u32 context_id; + int cnt, i; + int bd_count; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + task_type = FCOE_TASK_TYPE_WRITE; + else + task_type = FCOE_TASK_TYPE_READ; + + /* Tx only */ + bd_count = bd_tbl->bd_valid; + cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; + if (task_type == FCOE_TASK_TYPE_WRITE) { + if ((dev_type == TYPE_DISK) && (bd_count == 1)) { + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = + cached_sge->cur_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = + cached_sge->cur_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = + cached_sge->cur_buf_rem = + fcoe_bd_tbl->buf_len; + + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)bd_tbl->bd_tbl_dma; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = + bd_tbl->bd_valid; + } + } + + /*Tx Write Rx Read */ + /* Init state to NORMAL */ + task->txwr_rxrd.const_ctx.init_flags |= task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + if (dev_type == TYPE_TAPE) { + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + io_req->rec_retry = 0; + io_req->rec_retry = 0; + } else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + /* tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + + /* Set initial seq counter */ + task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; + + /* Fill FCP_CMND IU */ + fcp_cmnd = (u64 *) + task->txwr_rxrd.union_ctx.fcp_cmd.opaque; + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); + + /* swap fcp_cmnd */ + cnt = sizeof(struct fcp_cmnd) / sizeof(u64); + + for (i = 0; i < cnt; i++) { + *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); + fcp_cmnd++; + } + + /* Rx Write Tx Read */ + task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; + + context_id = tgt->context_id; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + /* rx flags */ + /* Set state to "waiting for the first packet" */ + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; + + task->rxwr_txrd.var_ctx.rx_id = 0xffff; + + /* Rx Only */ + if (task_type != FCOE_TASK_TYPE_READ) + return; + + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + bd_count = bd_tbl->bd_valid; + + if (dev_type == TYPE_DISK) { + if (bd_count == 1) { + + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; + cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; + cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else if (bd_count == 2) { + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; + cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; + cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; + + fcoe_bd_tbl++; + cached_sge->second_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + cached_sge->second_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else { + + sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; + sgl->mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + sgl->mul_sgl.sgl_size = bd_count; + } + } else { + sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; + sgl->mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + sgl->mul_sgl.sgl_size = bd_count; + } +} + +/** + * bnx2fc_setup_task_ctx - allocate and map task context + * + * @hba: pointer to adapter structure + * + * allocate memory for task context, and associated BD table to be used + * by firmware + * + */ +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) +{ + int rc = 0; + struct regpair *task_ctx_bdt; + dma_addr_t addr; + int task_ctx_arr_sz; + int i; + + /* + * Allocate task context bd table. A page size of bd table + * can map 256 buffers. Each buffer contains 32 task context + * entries. Hence the limit with one page is 8192 task context + * entries. + */ + hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_bd_dma, + GFP_KERNEL); + if (!hba->task_ctx_bd_tbl) { + printk(KERN_ERR PFX "unable to allocate task context BDT\n"); + rc = -1; + goto out; + } + + /* + * Allocate task_ctx which is an array of pointers pointing to + * a page containing 32 task contexts + */ + task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); + hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)), + GFP_KERNEL); + if (!hba->task_ctx) { + printk(KERN_ERR PFX "unable to allocate task context array\n"); + rc = -1; + goto out1; + } + + /* + * Allocate task_ctx_dma which is an array of dma addresses + */ + hba->task_ctx_dma = kmalloc((task_ctx_arr_sz * + sizeof(dma_addr_t)), GFP_KERNEL); + if (!hba->task_ctx_dma) { + printk(KERN_ERR PFX "unable to alloc context mapping array\n"); + rc = -1; + goto out2; + } + + task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; + for (i = 0; i < task_ctx_arr_sz; i++) { + + hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_dma[i], + GFP_KERNEL); + if (!hba->task_ctx[i]) { + printk(KERN_ERR PFX "unable to alloc task context\n"); + rc = -1; + goto out3; + } + addr = (u64)hba->task_ctx_dma[i]; + task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); + task_ctx_bdt->lo = cpu_to_le32((u32)addr); + task_ctx_bdt++; + } + return 0; + +out3: + for (i = 0; i < task_ctx_arr_sz; i++) { + if (hba->task_ctx[i]) { + + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +out2: + kfree(hba->task_ctx); + hba->task_ctx = NULL; +out1: + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; +out: + return rc; +} + +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) +{ + int task_ctx_arr_sz; + int i; + + if (hba->task_ctx_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, + hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; + } + + task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); + if (hba->task_ctx) { + for (i = 0; i < task_ctx_arr_sz; i++) { + if (hba->task_ctx[i]) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], + hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + kfree(hba->task_ctx); + hba->task_ctx = NULL; + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +} + +static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int segment_count; + u32 *pbl; + + if (hba->hash_tbl_segments) { + + pbl = hba->hash_tbl_pbl; + if (pbl) { + segment_count = hba->hash_tbl_segment_count; + for (i = 0; i < segment_count; ++i) { + dma_addr_t dma_address; + + dma_address = le32_to_cpu(*pbl); + ++pbl; + dma_address += ((u64)le32_to_cpu(*pbl)) << 32; + ++pbl; + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_address); + } + } + + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; + } + + if (hba->hash_tbl_pbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->hash_tbl_pbl, + hba->hash_tbl_pbl_dma); + hba->hash_tbl_pbl = NULL; + } +} + +static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int hash_table_size; + int segment_count; + int segment_array_size; + int dma_segment_array_size; + dma_addr_t *dma_segment_array; + u32 *pbl; + + hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * + sizeof(struct fcoe_hash_table_entry); + + segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; + segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; + hba->hash_tbl_segment_count = segment_count; + + segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); + hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); + if (!hba->hash_tbl_segments) { + printk(KERN_ERR PFX "hash table pointers alloc failed\n"); + return -ENOMEM; + } + dma_segment_array_size = segment_count * sizeof(*dma_segment_array); + dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); + if (!dma_segment_array) { + printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); + goto cleanup_ht; + } + + for (i = 0; i < segment_count; ++i) { + hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + &dma_segment_array[i], + GFP_KERNEL); + if (!hba->hash_tbl_segments[i]) { + printk(KERN_ERR PFX "hash segment alloc failed\n"); + goto cleanup_dma; + } + } + + hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->hash_tbl_pbl_dma, + GFP_KERNEL); + if (!hba->hash_tbl_pbl) { + printk(KERN_ERR PFX "hash table pbl alloc failed\n"); + goto cleanup_dma; + } + + pbl = hba->hash_tbl_pbl; + for (i = 0; i < segment_count; ++i) { + u64 paddr = dma_segment_array[i]; + *pbl = cpu_to_le32((u32) paddr); + ++pbl; + *pbl = cpu_to_le32((u32) (paddr >> 32)); + ++pbl; + } + pbl = hba->hash_tbl_pbl; + i = 0; + while (*pbl && *(pbl + 1)) { + ++pbl; + ++pbl; + ++i; + } + kfree(dma_segment_array); + return 0; + +cleanup_dma: + for (i = 0; i < segment_count; ++i) { + if (hba->hash_tbl_segments[i]) + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_segment_array[i]); + } + + kfree(dma_segment_array); + +cleanup_ht: + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; + return -ENOMEM; +} + +/** + * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer + * + * @hba: Pointer to adapter structure + * + */ +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) +{ + u64 addr; + u32 mem_size; + int i; + + if (bnx2fc_allocate_hash_table(hba)) + return -ENOMEM; + + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_ptr_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl_ptr) { + printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl) { + printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + addr = (unsigned long) hba->t2_hash_tbl_dma + + ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); + hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; + hba->t2_hash_tbl[i].next.hi = addr >> 32; + } + + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, &hba->dummy_buf_dma, + GFP_KERNEL); + if (!hba->dummy_buffer) { + printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->stats_buf_dma, + GFP_KERNEL); + if (!hba->stats_buffer) { + printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + return 0; +} + +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) +{ + u32 mem_size; + + if (hba->stats_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->stats_buffer, hba->stats_buf_dma); + hba->stats_buffer = NULL; + } + + if (hba->dummy_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer, hba->dummy_buf_dma); + hba->dummy_buffer = NULL; + } + + if (hba->t2_hash_tbl_ptr) { + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl_ptr, + hba->t2_hash_tbl_ptr_dma); + hba->t2_hash_tbl_ptr = NULL; + } + + if (hba->t2_hash_tbl) { + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl, hba->t2_hash_tbl_dma); + hba->t2_hash_tbl = NULL; + } + bnx2fc_free_hash_table(hba); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c new file mode 100644 index 000000000..b42a9accb --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -0,0 +1,2102 @@ +/* bnx2fc_io.c: QLogic Linux FCoE offload driver. + * IO manager and SCSI IO processing. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +#define RESERVE_FREE_LIST_INDEX num_possible_cpus() + +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index); +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); +static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq, unsigned char *rq_data); + +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec) +{ + struct bnx2fc_interface *interface = io_req->port->priv; + + if (queue_delayed_work(interface->timer_work_queue, + &io_req->timeout_work, + msecs_to_jiffies(timer_msec))) + kref_get(&io_req->refcount); +} + +static void bnx2fc_cmd_timeout(struct work_struct *work) +{ + struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, + timeout_work.work); + u8 cmd_type = io_req->cmd_type; + struct bnx2fc_rport *tgt = io_req->tgt; + int rc; + + BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," + "req_flags = %lx\n", cmd_type, io_req->req_flags); + + spin_lock_bh(&tgt->tgt_lock); + if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { + clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + /* + * ideally we should hold the io_req until RRQ complets, + * and release io_req from timeout hold. + */ + spin_unlock_bh(&tgt->tgt_lock); + bnx2fc_send_rrq(io_req); + return; + } + if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); + goto done; + } + + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); + complete(&io_req->abts_done); + } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + /* Handle internally generated ABTS timeout */ + BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", + kref_read(&io_req->refcount)); + if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + /* + * Cleanup and return original command to + * mid-layer. + */ + bnx2fc_initiate_cleanup(io_req); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + return; + } + } else { + /* Hanlde IO timeout */ + BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed before " + " timer expiry\n"); + goto done; + } + + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + rc = bnx2fc_initiate_abts(io_req); + if (rc == SUCCESS) + goto done; + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + return; + } else { + BNX2FC_IO_DBG(io_req, "IO already in " + "ABTS processing\n"); + } + } + break; + case BNX2FC_ELS: + + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); + + if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + return; + } + } else { + /* + * Handle ELS timeout. + * tgt_lock is used to sync compl path and timeout + * path. If els compl path is processing this IO, we + * have nothing to do here, just release the timer hold + */ + BNX2FC_IO_DBG(io_req, "ELS timed out\n"); + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &io_req->req_flags)) + goto done; + + /* Indicate the cb_func that this ELS is timed out */ + set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + } + break; + default: + printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", + cmd_type); + break; + } + +done: + /* release the cmd that was held when timer was set */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) +{ + /* Called with host lock held */ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + /* + * active_cmd_queue may have other command types as well, + * and during flush operation, we want to error back only + * scsi commands. + */ + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + return; + + BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); + if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { + /* Do not call scsi done for this IO */ + return; + } + + bnx2fc_unmap_sg_list(io_req); + io_req->sc_cmd = NULL; + + /* Sanity checks before returning command to mid-layer */ + if (!sc_cmd) { + printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " + "IO(0x%x) already cleaned up\n", + io_req->xid); + return; + } + if (!sc_cmd->device) { + pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid); + return; + } + if (!sc_cmd->device->host) { + pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n", + io_req->xid); + return; + } + + sc_cmd->result = err_code << 16; + + BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", + sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, + sc_cmd->allowed); + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + bnx2fc_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); +} + +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) +{ + struct bnx2fc_cmd_mgr *cmgr; + struct io_bdt *bdt_info; + struct bnx2fc_cmd *io_req; + size_t len; + u32 mem_size; + u16 xid; + int i; + int num_ios, num_pri_ios; + size_t bd_tbl_sz; + int arr_sz = num_possible_cpus() + 1; + u16 min_xid = BNX2FC_MIN_XID; + u16 max_xid = hba->max_xid; + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ + and max_xid 0x%x\n", min_xid, max_xid); + return NULL; + } + BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); + + num_ios = max_xid - min_xid + 1; + len = (num_ios * (sizeof(struct bnx2fc_cmd *))); + len += sizeof(struct bnx2fc_cmd_mgr); + + cmgr = kzalloc(len, GFP_KERNEL); + if (!cmgr) { + printk(KERN_ERR PFX "failed to alloc cmgr\n"); + return NULL; + } + + cmgr->hba = hba; + cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), + GFP_KERNEL); + if (!cmgr->free_list) { + printk(KERN_ERR PFX "failed to alloc free_list\n"); + goto mem_err; + } + + cmgr->free_list_lock = kcalloc(arr_sz, sizeof(*cmgr->free_list_lock), + GFP_KERNEL); + if (!cmgr->free_list_lock) { + printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + kfree(cmgr->free_list); + cmgr->free_list = NULL; + goto mem_err; + } + + cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); + + for (i = 0; i < arr_sz; i++) { + INIT_LIST_HEAD(&cmgr->free_list[i]); + spin_lock_init(&cmgr->free_list_lock[i]); + } + + /* + * Pre-allocated pool of bnx2fc_cmds. + * Last entry in the free list array is the free list + * of slow path requests. + */ + xid = BNX2FC_MIN_XID; + num_pri_ios = num_ios - hba->elstm_xids; + for (i = 0; i < num_ios; i++) { + io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); + + if (!io_req) { + printk(KERN_ERR PFX "failed to alloc io_req\n"); + goto mem_err; + } + + INIT_LIST_HEAD(&io_req->link); + INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); + + io_req->xid = xid++; + if (i < num_pri_ios) + list_add_tail(&io_req->link, + &cmgr->free_list[io_req->xid % + num_possible_cpus()]); + else + list_add_tail(&io_req->link, + &cmgr->free_list[num_possible_cpus()]); + io_req++; + } + + /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ + mem_size = num_ios * sizeof(struct io_bdt *); + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool) { + printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); + goto mem_err; + } + + mem_size = sizeof(struct io_bdt); + for (i = 0; i < num_ios; i++) { + cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool[i]) { + printk(KERN_ERR PFX "failed to alloc " + "io_bdt_pool[%d]\n", i); + goto mem_err; + } + } + + /* Allocate an map fcoe_bdt_ctx structures */ + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + bd_tbl_sz, + &bdt_info->bd_tbl_dma, + GFP_KERNEL); + if (!bdt_info->bd_tbl) { + printk(KERN_ERR PFX "failed to alloc " + "bdt_tbl[%d]\n", i); + goto mem_err; + } + } + + return cmgr; + +mem_err: + bnx2fc_cmd_mgr_free(cmgr); + return NULL; +} + +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) +{ + struct io_bdt *bdt_info; + struct bnx2fc_hba *hba = cmgr->hba; + size_t bd_tbl_sz; + u16 min_xid = BNX2FC_MIN_XID; + u16 max_xid = hba->max_xid; + int num_ios; + int i; + + num_ios = max_xid - min_xid + 1; + + /* Free fcoe_bdt_ctx structures */ + if (!cmgr->io_bdt_pool) + goto free_cmd_pool; + + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + if (bdt_info->bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, + bdt_info->bd_tbl, + bdt_info->bd_tbl_dma); + bdt_info->bd_tbl = NULL; + } + } + + /* Destroy io_bdt pool */ + for (i = 0; i < num_ios; i++) { + kfree(cmgr->io_bdt_pool[i]); + cmgr->io_bdt_pool[i] = NULL; + } + + kfree(cmgr->io_bdt_pool); + cmgr->io_bdt_pool = NULL; + +free_cmd_pool: + kfree(cmgr->free_list_lock); + + /* Destroy cmd pool */ + if (!cmgr->free_list) + goto free_cmgr; + + for (i = 0; i < num_possible_cpus() + 1; i++) { + struct bnx2fc_cmd *tmp, *io_req; + + list_for_each_entry_safe(io_req, tmp, + &cmgr->free_list[i], link) { + list_del(&io_req->link); + kfree(io_req); + } + } + kfree(cmgr->free_list); +free_cmgr: + /* Free command manager itself */ + kfree(cmgr); +} + +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + int index = RESERVE_FREE_LIST_INDEX; + u32 free_sqes; + u32 max_sqes; + u16 xid; + + max_sqes = tgt->max_sqes; + switch (type) { + case BNX2FC_TASK_MGMT_CMD: + max_sqes = BNX2FC_TM_MAX_SQES; + break; + case BNX2FC_ELS: + max_sqes = BNX2FC_ELS_MAX_SQES; + break; + default: + break; + } + + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + free_sqes = atomic_read(&tgt->free_sqes); + if ((list_empty(&(cmd_mgr->free_list[index]))) || + (tgt->num_active_ios.counter >= max_sqes) || + (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { + BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " + "ios(%d):sqes(%d)\n", + tgt->num_active_ios.counter, tgt->max_sqes); + if (list_empty(&(cmd_mgr->free_list[index]))) + printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[index].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + atomic_dec(&tgt->free_sqes); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + io_req->cmd_type = type; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} + +struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + u32 free_sqes; + u32 max_sqes; + u16 xid; + int index = raw_smp_processor_id(); + + max_sqes = BNX2FC_SCSI_MAX_SQES; + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + free_sqes = atomic_read(&tgt->free_sqes); + if ((list_empty(&cmd_mgr->free_list[index])) || + (tgt->num_active_ios.counter >= max_sqes) || + (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[index].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + atomic_dec(&tgt->free_sqes); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} + +void bnx2fc_cmd_release(struct kref *ref) +{ + struct bnx2fc_cmd *io_req = container_of(ref, + struct bnx2fc_cmd, refcount); + struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + int index; + + if (io_req->cmd_type == BNX2FC_SCSI_CMD) + index = io_req->xid % num_possible_cpus(); + else + index = RESERVE_FREE_LIST_INDEX; + + + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + bnx2fc_free_mp_resc(io_req); + cmd_mgr->cmds[io_req->xid] = NULL; + /* Delete IO from retire queue */ + list_del_init(&io_req->link); + /* Add it to the free list */ + list_add(&io_req->link, + &cmd_mgr->free_list[index]); + atomic_dec(&io_req->tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + +} + +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + size_t sz = sizeof(struct fcoe_bd_ctx); + + /* clear tm flags */ + mp_req->tm_flags = 0; + if (mp_req->mp_req_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_req_bd, + mp_req->mp_req_bd_dma); + mp_req->mp_req_bd = NULL; + } + if (mp_req->mp_resp_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_resp_bd, + mp_req->mp_resp_bd_dma); + mp_req->mp_resp_bd = NULL; + } + if (mp_req->req_buf) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + mp_req->req_buf, + mp_req->req_buf_dma); + mp_req->req_buf = NULL; + } + if (mp_req->resp_buf) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + mp_req->resp_buf, + mp_req->resp_buf_dma); + mp_req->resp_buf = NULL; + } +} + +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req; + struct fcoe_bd_ctx *mp_req_bd; + struct fcoe_bd_ctx *mp_resp_bd; + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + dma_addr_t addr; + size_t sz; + + mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); + + if (io_req->cmd_type != BNX2FC_ELS) { + mp_req->req_len = sizeof(struct fcp_cmnd); + io_req->data_xfer_len = mp_req->req_len; + } else + mp_req->req_len = io_req->data_xfer_len; + + mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + &mp_req->req_buf_dma, + GFP_ATOMIC); + if (!mp_req->req_buf) { + printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + + mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + &mp_req->resp_buf_dma, + GFP_ATOMIC); + if (!mp_req->resp_buf) { + printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); + memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); + + /* Allocate and map mp_req_bd and mp_resp_bd */ + sz = sizeof(struct fcoe_bd_ctx); + mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_req_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_req_bd) { + printk(KERN_ERR PFX "unable to alloc MP req bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_resp_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_resp_bd) { + printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + /* Fill bd table */ + addr = mp_req->req_buf_dma; + mp_req_bd = mp_req->mp_req_bd; + mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_req_bd->buf_len = CNIC_PAGE_SIZE; + mp_req_bd->flags = 0; + + /* + * MP buffer is either a task mgmt command or an ELS. + * So the assumption is that it consumes a single bd + * entry in the bd table + */ + mp_resp_bd = mp_req->mp_resp_bd; + addr = mp_req->resp_buf_dma; + mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_resp_bd->buf_len = CNIC_PAGE_SIZE; + mp_resp_bd->flags = 0; + + return SUCCESS; +} + +static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) +{ + struct fc_lport *lport; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rp; + struct fcoe_port *port; + struct bnx2fc_interface *interface; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + struct bnx2fc_mp_req *tm_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct Scsi_Host *host = sc_cmd->device->host; + struct fc_frame_header *fc_hdr; + struct fcp_cmnd *fcp_cmnd; + int task_idx, index; + int rc = SUCCESS; + u16 xid; + u32 sid, did; + unsigned long start = jiffies; + + lport = shost_priv(host); + rport = starget_to_rport(scsi_target(sc_cmd->device)); + port = lport_priv(lport); + interface = port->priv; + + if (rport == NULL) { + printk(KERN_ERR PFX "device_reset: rport is NULL\n"); + rc = FAILED; + goto tmf_err; + } + rp = rport->dd_data; + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "device_reset: link is not ready\n"); + rc = FAILED; + goto tmf_err; + } + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); + rc = FAILED; + goto tmf_err; + } +retry_tmf: + io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); + if (!io_req) { + if (time_after(jiffies, start + HZ)) { + printk(KERN_ERR PFX "tmf: Failed TMF"); + rc = FAILED; + goto tmf_err; + } + msleep(20); + goto retry_tmf; + } + /* Initialize rest of io_req fields */ + io_req->sc_cmd = sc_cmd; + io_req->port = port; + io_req->tgt = tgt; + + tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + + rc = bnx2fc_init_mp_req(io_req); + if (rc == FAILED) { + printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + goto tmf_err; + } + + /* Set TM flags */ + io_req->io_req_flags = 0; + tm_req->tm_flags = tm_flags; + + /* Fill FCP_CMND */ + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); + fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; + memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); + fcp_cmnd->fc_dl = 0; + + /* Fill FC header */ + fc_hdr = &(tm_req->req_fc_hdr); + sid = tgt->sid; + did = rport->port_id; + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + /* Obtain exchange id */ + xid = io_req->xid; + + BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(io_req, task); + + bnx2fc_priv(sc_cmd)->io_req = io_req; + + /* Obtain free SQ entry */ + spin_lock_bh(&tgt->tgt_lock); + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_tm_queue */ + io_req->on_tmf_queue = 1; + list_add_tail(&io_req->link, &tgt->active_tm_queue); + + init_completion(&io_req->abts_done); + io_req->wait_for_abts_comp = 1; + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + + rc = wait_for_completion_timeout(&io_req->abts_done, + interface->tm_timeout * HZ); + spin_lock_bh(&tgt->tgt_lock); + + io_req->wait_for_abts_comp = 0; + if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { + set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); + if (io_req->on_tmf_queue) { + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + } + io_req->wait_for_cleanup_comp = 1; + init_completion(&io_req->cleanup_done); + bnx2fc_initiate_cleanup(io_req); + spin_unlock_bh(&tgt->tgt_lock); + rc = wait_for_completion_timeout(&io_req->cleanup_done, + BNX2FC_FW_TIMEOUT); + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_cleanup_comp = 0; + if (!rc) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + spin_unlock_bh(&tgt->tgt_lock); + + if (!rc) { + BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); + rc = FAILED; + } else { + BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); + rc = SUCCESS; + } +tmf_err: + return rc; +} + +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) +{ + struct fc_lport *lport; + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_rport *rport = tgt->rport; + struct fc_rport_priv *rdata = tgt->rdata; + struct bnx2fc_interface *interface; + struct fcoe_port *port; + struct bnx2fc_cmd *abts_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fc_frame_header *fc_hdr; + struct bnx2fc_mp_req *abts_req; + int task_idx, index; + u32 sid, did; + u16 xid; + int rc = SUCCESS; + u32 r_a_tov = rdata->r_a_tov; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); + + port = io_req->port; + interface = port->priv; + lport = port->lport; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); + rc = FAILED; + goto abts_err; + } + + if (rport == NULL) { + printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); + rc = FAILED; + goto abts_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); + rc = FAILED; + goto abts_err; + } + + abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); + if (!abts_io_req) { + printk(KERN_ERR PFX "abts: couldn't allocate cmd\n"); + rc = FAILED; + goto abts_err; + } + + /* Initialize rest of io_req fields */ + abts_io_req->sc_cmd = NULL; + abts_io_req->port = port; + abts_io_req->tgt = tgt; + abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ + + abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); + memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); + + /* Fill FC header */ + fc_hdr = &(abts_req->req_fc_hdr); + + /* Obtain oxid and rxid for the original exchange to be aborted */ + fc_hdr->fh_ox_id = htons(io_req->xid); + fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); + + sid = tgt->sid; + did = rport->port_id; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, + FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + xid = abts_io_req->xid; + BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(abts_io_req, task); + + /* + * ABTS task is a temporary task that will be cleaned up + * irrespective of ABTS response. We need to start the timer + * for the original exchange, as the CQE is posted for the original + * IO request. + * + * Timer for ABTS is started only when it is originated by a + * TM request. For the ABTS issued as part of ULP timeout, + * scsi-ml maintains the timers. + */ + + /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ + bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +abts_err: + return rc; +} + +int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, + enum fc_rctl r_ctl) +{ + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct bnx2fc_interface *interface; + struct fcoe_port *port; + struct bnx2fc_cmd *seq_clnp_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + int task_idx, index; + u16 xid; + int rc = 0; + + BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", + orig_io_req->xid); + kref_get(&orig_io_req->refcount); + + port = orig_io_req->port; + interface = port->priv; + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); + rc = -ENOMEM; + goto cleanup_err; + } + + seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); + if (!seq_clnp_req) { + printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); + rc = -ENOMEM; + kfree(cb_arg); + goto cleanup_err; + } + /* Initialize rest of io_req fields */ + seq_clnp_req->sc_cmd = NULL; + seq_clnp_req->port = port; + seq_clnp_req->tgt = tgt; + seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ + + xid = seq_clnp_req->xid; + + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + cb_arg->aborted_io_req = orig_io_req; + cb_arg->io_req = seq_clnp_req; + cb_arg->r_ctl = r_ctl; + cb_arg->offset = offset; + seq_clnp_req->cb_arg = cb_arg; + + printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); + bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); +cleanup_err: + return rc; +} + +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_interface *interface; + struct fcoe_port *port; + struct bnx2fc_cmd *cleanup_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int task_idx, index; + u16 xid, orig_xid; + int rc = 0; + + /* ASSUMPTION: called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); + + port = io_req->port; + interface = port->priv; + + cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); + if (!cleanup_io_req) { + printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); + rc = -1; + goto cleanup_err; + } + + /* Initialize rest of io_req fields */ + cleanup_io_req->sc_cmd = NULL; + cleanup_io_req->port = port; + cleanup_io_req->tgt = tgt; + cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ + + xid = cleanup_io_req->xid; + + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + orig_xid = io_req->xid; + + BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); + + bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Set flag that cleanup request is pending with the firmware */ + set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +cleanup_err: + return rc; +} + +/** + * bnx2fc_eh_target_reset: Reset a target + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); +} + +/** + * bnx2fc_eh_device_reset - Reset a single LUN + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); +} + +static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) + __must_hold(&tgt->tgt_lock) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + unsigned int time_left; + + init_completion(&io_req->cleanup_done); + io_req->wait_for_cleanup_comp = 1; + bnx2fc_initiate_cleanup(io_req); + + spin_unlock_bh(&tgt->tgt_lock); + + /* + * Can't wait forever on cleanup response lest we let the SCSI error + * handler wait forever + */ + time_left = wait_for_completion_timeout(&io_req->cleanup_done, + BNX2FC_FW_TIMEOUT); + if (!time_left) { + BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n", + __func__); + + /* + * Put the extra reference to the SCSI command since it would + * not have been returned in this case. + */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_cleanup_comp = 0; + return SUCCESS; +} + +/** + * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding + * SCSI command + * + * @sc_cmd: SCSI_ML command pointer + * + * SCSI abort request handler + */ +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_cmd *io_req; + struct fc_lport *lport; + struct bnx2fc_rport *tgt; + int rc; + unsigned int time_left; + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + lport = shost_priv(sc_cmd->device->host); + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + printk(KERN_ERR PFX "eh_abort: link not ready\n"); + return FAILED; + } + + tgt = (struct bnx2fc_rport *)&rp[1]; + + BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); + + spin_lock_bh(&tgt->tgt_lock); + io_req = bnx2fc_priv(sc_cmd)->io_req; + if (!io_req) { + /* Command might have just completed */ + printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", + kref_read(&io_req->refcount)); + + /* Hold IO request across abort processing */ + kref_get(&io_req->refcount); + + BUG_ON(tgt != io_req->tgt); + + /* Remove the io_req from the active_q. */ + /* + * Task Mgmt functions (LUN RESET & TGT RESET) will not + * issue an ABTS on this particular IO req, as the + * io_req is no longer in the active_q. + */ + if (tgt->flush_in_prog) { + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " + "flush in progress\n", io_req->xid); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + + if (io_req->on_active_queue == 0) { + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " + "not on active_q\n", io_req->xid); + /* + * The IO is still with the FW. + * Return failure and let SCSI-ml retry eh_abort. + */ + spin_unlock_bh(&tgt->tgt_lock); + return FAILED; + } + + /* + * Only eh_abort processing will remove the IO from + * active_cmd_q before processing the request. this is + * done to avoid race conditions between IOs aborted + * as part of task management completion and eh_abort + * processing + */ + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + + init_completion(&io_req->abts_done); + init_completion(&io_req->cleanup_done); + + if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " + "already in abts processing\n", io_req->xid); + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + /* + * We don't want to hold off the upper layer timer so simply + * cleanup the command and return that I/O was successfully + * aborted. + */ + bnx2fc_abts_cleanup(io_req); + /* This only occurs when an task abort was requested while ABTS + is in progress. Setting the IO_CLEANUP flag will skip the + RRQ process in the case when the fw generated SCSI_CMD cmpl + was a result from the ABTS request rather than the CLEANUP + request */ + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); + rc = FAILED; + goto done; + } + + /* Cancel the current timer running on this io_req */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); + io_req->wait_for_abts_comp = 1; + rc = bnx2fc_initiate_abts(io_req); + if (rc == FAILED) { + io_req->wait_for_cleanup_comp = 1; + bnx2fc_initiate_cleanup(io_req); + spin_unlock_bh(&tgt->tgt_lock); + wait_for_completion(&io_req->cleanup_done); + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_cleanup_comp = 0; + goto done; + } + spin_unlock_bh(&tgt->tgt_lock); + + /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */ + time_left = wait_for_completion_timeout(&io_req->abts_done, + msecs_to_jiffies(2 * rp->r_a_tov + 1)); + if (time_left) + BNX2FC_IO_DBG(io_req, + "Timed out in eh_abort waiting for abts_done"); + + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_abts_comp = 0; + if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); + rc = SUCCESS; + } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + /* Let the scsi-ml try to recover this command */ + printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", + io_req->xid); + /* + * Cleanup firmware residuals before returning control back + * to SCSI ML. + */ + rc = bnx2fc_abts_cleanup(io_req); + goto done; + } else { + /* + * We come here even when there was a race condition + * between timeout and abts completion, and abts + * completion happens just in time. + */ + BNX2FC_IO_DBG(io_req, "abort succeeded\n"); + rc = SUCCESS; + bnx2fc_scsi_done(io_req, DID_ABORT); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } +done: + /* release the reference taken in eh_abort */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return rc; +} + +void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, + struct fcoe_task_ctx_entry *task, + u8 rx_state) +{ + struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; + struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; + u32 offset = cb_arg->offset; + enum fc_rctl r_ctl = cb_arg->r_ctl; + int rc = 0; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + + BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" + "cmd_type = %d\n", + seq_clnp_req->xid, seq_clnp_req->cmd_type); + + if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { + printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", + seq_clnp_req->xid); + goto free_cb_arg; + } + + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) + printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" + " IO will abort\n"); + seq_clnp_req->cb_arg = NULL; + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); +free_cb_arg: + kfree(cb_arg); + return; +} + +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " + "refcnt = %d, cmd_type = %d\n", + kref_read(&io_req->refcount), io_req->cmd_type); + /* + * Test whether there is a cleanup request pending. If not just + * exit. + */ + if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, + &io_req->req_flags)) + return; + /* + * If we receive a cleanup completion for this request then the + * firmware will not give us an abort completion for this request + * so clear any ABTS pending flags. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) && + !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) { + set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags); + if (io_req->wait_for_abts_comp) + complete(&io_req->abts_done); + } + + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + if (io_req->wait_for_cleanup_comp) + complete(&io_req->cleanup_done); +} + +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + u32 r_ctl; + u32 r_a_tov = FC_DEF_R_A_TOV; + u8 issue_rrq = 0; + struct bnx2fc_rport *tgt = io_req->tgt; + + BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" + "refcnt = %d, cmd_type = %d\n", + io_req->xid, + kref_read(&io_req->refcount), io_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "Timer context finished processing" + " this io\n"); + return; + } + + /* + * If we receive an ABTS completion here then we will not receive + * a cleanup completion so clear any cleanup pending flags. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) { + clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); + if (io_req->wait_for_cleanup_comp) + complete(&io_req->cleanup_done); + } + + /* Do not issue RRQ as this IO is already cleanedup */ + if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) + goto io_compl; + + /* + * For ABTS issued due to SCSI eh_abort_handler, timeout + * values are maintained by scsi-ml itself. Cancel timeout + * in case ABTS issued as part of task management function + * or due to FW error. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; + + switch (r_ctl) { + case FC_RCTL_BA_ACC: + /* + * Dont release this cmd yet. It will be relesed + * after we get RRQ response + */ + BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); + issue_rrq = 1; + break; + + case FC_RCTL_BA_RJT: + BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); + break; + default: + printk(KERN_ERR PFX "Unknown ABTS response\n"); + break; + } + + if (issue_rrq) { + BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); + set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + bnx2fc_cmd_timer_set(io_req, r_a_tov); + +io_compl: + if (io_req->wait_for_abts_comp) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->abts_done); + } else { + /* + * We end up here when ABTS is issued as + * in asynchronous context, i.e., as part + * of task management completion, or + * when FW error is received or when the + * ABTS is issued when the IO is timed + * out. + */ + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } +} + +static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_cmd *cmd, *tmp; + u64 tm_lun = sc_cmd->device->lun; + u64 lun; + int rc = 0; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { + BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); + lun = cmd->sc_cmd->device->lun; + if (lun == tm_lun) { + /* Initiate ABTS on this cmd */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); + /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldn't fail in this context */ + WARN_ON(rc != SUCCESS); + } else + printk(KERN_ERR PFX "lun_rst: abts already in" + " progress for this IO 0x%x\n", + cmd->xid); + } + } +} + +static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_cmd *cmd, *tmp; + int rc = 0; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { + BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); + /* Initiate ABTS */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldn't fail in this context */ + WARN_ON(rc != SUCCESS); + + } else + printk(KERN_ERR PFX "tgt_rst: abts already in progress" + " for this IO 0x%x\n", cmd->xid); + } +} + +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, u8 num_rq, + unsigned char *rq_data) +{ + struct bnx2fc_mp_req *tm_req; + struct fc_frame_header *fc_hdr; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + u64 *hdr; + u64 *temp_hdr; + void *rsp_buf; + + /* Called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); + + if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) + set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); + else { + /* TM has already timed out and we got + * delayed completion. Ignore completion + * processing. + */ + return; + } + + tm_req = &(io_req->mp_req); + fc_hdr = &(tm_req->resp_fc_hdr); + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + tm_req->resp_len = + task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; + + rsp_buf = tm_req->resp_buf; + + if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { + bnx2fc_parse_fcp_rsp(io_req, + (struct fcoe_fcp_rsp_payload *) + rsp_buf, num_rq, rq_data); + if (io_req->fcp_rsp_code == 0) { + /* TM successful */ + if (tm_req->tm_flags & FCP_TMF_LUN_RESET) + bnx2fc_lun_reset_cmpl(io_req); + else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) + bnx2fc_tgt_reset_cmpl(io_req); + } + } else { + printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", + fc_hdr->fh_r_ctl); + } + if (!bnx2fc_priv(sc_cmd)->io_req) { + printk(KERN_ERR PFX "tm_compl: io_req is NULL\n"); + return; + } + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + + default: + BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + + sc_cmd = io_req->sc_cmd; + io_req->sc_cmd = NULL; + + /* check if the io_req exists in tgt's tmf_q */ + if (io_req->on_tmf_queue) { + + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + } else { + + printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); + return; + } + + bnx2fc_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + if (io_req->wait_for_abts_comp) { + BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); + complete(&io_req->abts_done); + } +} + +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index) +{ + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + while (sg_len) { + if (sg_len >= BNX2FC_BD_SPLIT_SZ) + frag_size = BNX2FC_BD_SPLIT_SZ; + else + frag_size = sg_len; + bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; + bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; + bd[bd_index + sg_frags].buf_len = (u16)frag_size; + bd[bd_index + sg_frags].flags = 0; + + addr += (u64) frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; + +} + +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int sg_count = 0; + int bd_count = 0; + int sg_frags; + unsigned int sg_len; + u64 addr; + int i; + + WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD); + /* + * Use dma_map_sg directly to ensure we're using the correct + * dev struct off of pcidev. + */ + sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = sg_dma_address(sg); + if (sg_len > BNX2FC_MAX_BD_LEN) { + sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, + bd_count); + } else { + + sg_frags = 1; + bd[bd_count].buf_addr_lo = addr & 0xffffffff; + bd[bd_count].buf_addr_hi = addr >> 32; + bd[bd_count].buf_len = (u16)sg_len; + bd[bd_count].flags = 0; + } + bd_count += sg_frags; + byte_count += sg_len; + } + if (byte_count != scsi_bufflen(sc)) + printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " + "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), + io_req->xid); + return bd_count; +} + +static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int bd_count; + + if (scsi_sg_count(sc)) { + bd_count = bnx2fc_map_sg(io_req); + if (bd_count == 0) + return -ENOMEM; + } else { + bd_count = 0; + bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; + bd[0].buf_len = bd[0].flags = 0; + } + io_req->bd_tbl->bd_valid = bd_count; + + /* + * Return the command to ML if BD count exceeds the max number + * that can be handled by FW. + */ + if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) { + pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n", + bd_count, io_req->xid); + return -ENOMEM; + } + + return 0; +} + +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + + /* + * Use dma_unmap_sg directly to ensure we're using the correct + * dev struct off of pcidev. + */ + if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { + dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + io_req->bd_tbl->bd_valid = 0; + } +} + +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + + int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); + + fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + + fcp_cmnd->fc_cmdref = 0; + fcp_cmnd->fc_pri_ta = 0; + fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; + fcp_cmnd->fc_flags = io_req->io_req_flags; + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; +} + +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq, unsigned char *rq_data) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + u8 rsp_flags = fcp_rsp->fcp_flags.flags; + u32 rq_buff_len = 0; + int fcp_sns_len = 0; + int fcp_rsp_len = 0; + + io_req->fcp_status = FC_GOOD; + io_req->fcp_resid = 0; + if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | + FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) + io_req->fcp_resid = fcp_rsp->fcp_resid; + + io_req->scsi_comp_flags = rsp_flags; + io_req->cdb_status = fcp_rsp->scsi_status_code; + + /* Fetch fcp_rsp_info and fcp_sns_info if available */ + if (num_rq) { + + /* + * We do not anticipate num_rq >1, as the linux defined + * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO + * 256 bytes of single rq buffer is good enough to hold this. + */ + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { + fcp_rsp_len = rq_buff_len + = fcp_rsp->fcp_rsp_len; + } + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { + fcp_sns_len = fcp_rsp->fcp_sns_len; + rq_buff_len += fcp_rsp->fcp_sns_len; + } + + io_req->fcp_rsp_len = fcp_rsp_len; + io_req->fcp_sns_len = fcp_sns_len; + + if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { + /* Invalid sense sense length. */ + printk(KERN_ERR PFX "invalid sns length %d\n", + rq_buff_len); + /* reset rq_buff_len */ + rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; + } + + /* fetch fcp_rsp_code */ + if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { + /* Only for task management function */ + io_req->fcp_rsp_code = rq_data[3]; + BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n", + io_req->fcp_rsp_code); + } + + /* fetch sense data */ + rq_data += fcp_rsp_len; + + if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { + printk(KERN_ERR PFX "Truncating sense buffer\n"); + fcp_sns_len = SCSI_SENSE_BUFFERSIZE; + } + + memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); + + } +} + +/** + * bnx2fc_queuecommand - Queuecommand function of the scsi template + * + * @host: The Scsi_Host the command was issued to + * @sc_cmd: struct scsi_cmnd to be executed + * + * This is the IO strategy routine, called by SCSI-ML + **/ +int bnx2fc_queuecommand(struct Scsi_Host *host, + struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(host); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + int rc = 0; + int rval; + + rval = fc_remote_port_chkready(rport); + if (rval) { + sc_cmd->result = rval; + scsi_done(sc_cmd); + return 0; + } + + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + /* + * Session is not offloaded yet. Let SCSI-ml retry + * the command. + */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + if (tgt->retry_delay_timestamp) { + if (time_after(jiffies, tgt->retry_delay_timestamp)) { + tgt->retry_delay_timestamp = 0; + } else { + /* If retry_delay timer is active, flow off the ML */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + } + + spin_lock_bh(&tgt->tgt_lock); + + io_req = bnx2fc_cmd_alloc(tgt); + if (!io_req) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd_tgtlock; + } + io_req->sc_cmd = sc_cmd; + + if (bnx2fc_post_io_req(tgt, io_req)) { + printk(KERN_ERR PFX "Unable to post io_req\n"); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd_tgtlock; + } + +exit_qcmd_tgtlock: + spin_unlock_bh(&tgt->tgt_lock); +exit_qcmd: + return rc; +} + +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq, unsigned char *rq_data) +{ + struct fcoe_fcp_rsp_payload *fcp_rsp; + struct bnx2fc_rport *tgt = io_req->tgt; + struct scsi_cmnd *sc_cmd; + u16 scope = 0, qualifier = 0; + + /* scsi_cmd_cmpl is called with tgt lock held */ + + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + /* we will not receive ABTS response for this IO */ + BNX2FC_IO_DBG(io_req, "Timer context finished processing " + "this scsi cmd\n"); + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, + "Actual completion after cleanup request cleaning up\n"); + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + } + return; + } + + /* Cancel the timeout_work, as we received IO completion */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + sc_cmd = io_req->sc_cmd; + if (sc_cmd == NULL) { + printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); + return; + } + + /* Fetch fcp_rsp from task context and perform cmd completion */ + fcp_rsp = (struct fcoe_fcp_rsp_payload *) + &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); + + /* parse fcp_rsp and obtain sense data from RQ if available */ + bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data); + + if (!bnx2fc_priv(sc_cmd)->io_req) { + printk(KERN_ERR PFX "io_req is NULL\n"); + return; + } + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } else { + /* This should not happen, but could have been pulled + * by bnx2fc_flush_active_ios(), or during a race + * between command abort and (late) completion. + */ + BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); + if (io_req->wait_for_abts_comp) + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->abts_done); + } + + bnx2fc_unmap_sg_list(io_req); + io_req->sc_cmd = NULL; + + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" + " fcp_resid = 0x%x\n", + io_req->cdb_status, io_req->fcp_resid); + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || + io_req->cdb_status == SAM_STAT_BUSY) { + /* Newer array firmware with BUSY or + * TASK_SET_FULL may return a status that needs + * the scope bits masked. + * Or a huge delay timestamp up to 27 minutes + * can result. + */ + if (fcp_rsp->retry_delay_timer) { + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer + & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer + & 0x3FFF; + } + if (scope > 0 && qualifier > 0 && + qualifier <= 0x3FEF) { + /* Set the jiffies + + * retry_delay_timer * 100ms + * for the rport/tgt + */ + tgt->retry_delay_timestamp = jiffies + + (qualifier * HZ / 10); + } + } + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + default: + printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + bnx2fc_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); + kref_put(&io_req->refcount, bnx2fc_cmd_release); +} + +int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, + struct bnx2fc_cmd *io_req) +{ + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct fc_lport *lport = port->lport; + int task_idx, index; + u16 xid; + + /* bnx2fc_post_io_req() is called with the tgt_lock held */ + + /* Initialize rest of io_req fields */ + io_req->cmd_type = BNX2FC_SCSI_CMD; + io_req->port = port; + io_req->tgt = tgt; + io_req->data_xfer_len = scsi_bufflen(sc_cmd); + bnx2fc_priv(sc_cmd)->io_req = io_req; + + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + io_req->io_req_flags = BNX2FC_READ; + this_cpu_inc(lport->stats->InputRequests); + this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len); + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + io_req->io_req_flags = BNX2FC_WRITE; + this_cpu_inc(lport->stats->OutputRequests); + this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len); + } else { + io_req->io_req_flags = 0; + this_cpu_inc(lport->stats->ControlRequests); + } + + xid = io_req->xid; + + /* Build buffer descriptor list for firmware from sg list */ + if (bnx2fc_build_bd_list_from_sg(io_req)) { + printk(KERN_ERR PFX "BD list creation failed\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + return -EAGAIN; + } + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_task(io_req, task); + + if (tgt->flush_in_prog) { + printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + return -EAGAIN; + } + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "Session not ready...post_io\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + return -EAGAIN; + } + + /* Time IO req */ + if (tgt->io_timeout) + bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_cmd_queue */ + + io_req->on_active_queue = 1; + /* move io_req from pending_queue to active_queue */ + list_add_tail(&io_req->link, &tgt->active_cmd_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + return 0; +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c new file mode 100644 index 000000000..2c246e80c --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -0,0 +1,896 @@ +/* bnx2fc_tgt.c: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" +static void bnx2fc_upld_timer(struct timer_list *t); +static void bnx2fc_ofld_timer(struct timer_list *t); +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata); +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); + +static void bnx2fc_upld_timer(struct timer_list *t) +{ + + struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer); + + BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); + /* fake upload completion */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); +} + +static void bnx2fc_ofld_timer(struct timer_list *t) +{ + + struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer); + + BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n"); + /* NOTE: This function should never be called, as + * offload should never timeout + */ + /* + * If the timer has expired, this session is dead + * Clear offloaded flag and logout of this device. + * Since OFFLOADED flag is cleared, this case + * will be considered as offload error and the + * port will be logged off, and conn_id, session + * resources are freed up in bnx2fc_offload_session + */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt) +{ + timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0); + mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + wait_event_interruptible(tgt->ofld_wait, + (test_bit( + BNX2FC_FLAG_OFLD_REQ_CMPL, + &tgt->flags))); + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&tgt->ofld_timer); +} + +static void bnx2fc_offload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt, + struct fc_rport_priv *rdata) +{ + struct fc_rport *rport = rdata->rport; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + int rval; + int i = 0; + + /* Initialize bnx2fc_rport */ + /* NOTE: tgt is already bzero'd */ + rval = bnx2fc_init_tgt(tgt, port, rdata); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate conn id for " + "port_id (%6x)\n", rport->port_id); + goto tgt_init_err; + } + + /* Allocate session resources */ + rval = bnx2fc_alloc_session_resc(hba, tgt); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate resources\n"); + goto ofld_err; + } + + /* + * Initialize FCoE session offload process. + * Upon completion of offload process add + * rport to list of rports + */ +retry_ofld: + clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + rval = bnx2fc_send_session_ofld_req(port, tgt); + if (rval) { + printk(KERN_ERR PFX "ofld_req failed\n"); + goto ofld_err; + } + + /* + * wait for the session is offloaded and enabled. 3 Secs + * should be ample time for this process to complete. + */ + bnx2fc_ofld_wait(tgt); + + if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { + if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, + &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " + "retry ofld..%d\n", i++); + msleep_interruptible(1000); + if (i > 3) { + i = 0; + goto ofld_err; + } + goto retry_ofld; + } + goto ofld_err; + } + if (bnx2fc_map_doorbell(tgt)) { + printk(KERN_ERR PFX "map doorbell failed - no mem\n"); + goto ofld_err; + } + clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + rval = bnx2fc_send_session_enable_req(port, tgt); + if (rval) { + pr_err(PFX "enable session failed\n"); + goto ofld_err; + } + bnx2fc_ofld_wait(tgt); + if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) + goto ofld_err; + return; + +ofld_err: + /* couldn't offload the session. log off from this rport */ + BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + /* Free session resources */ + bnx2fc_free_session_resc(hba, tgt); +tgt_init_err: + if (tgt->fcoe_conn_id != -1) + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); + fc_rport_logoff(rdata); +} + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) +{ + struct bnx2fc_cmd *io_req; + struct bnx2fc_cmd *tmp; + int rc; + int i = 0; + BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", + tgt->num_active_ios.counter); + + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 1; + + list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) { + i++; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort for IO " + "cleaned up\n"); + complete(&io_req->abts_done); + } + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + } + + set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); + + /* Do not issue cleanup when disable request failed */ + if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) + bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); + else { + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + } + + list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) { + i++; + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n"); + if (io_req->wait_for_abts_comp) + complete(&io_req->abts_done); + } + + list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) { + i++; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + + BNX2FC_IO_DBG(io_req, "els_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + + /* Do not issue cleanup when disable request failed */ + if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) + bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); + else { + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + } + + list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) { + i++; + list_del_init(&io_req->link); + + BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort for IO " + "in retire_q\n"); + if (io_req->wait_for_abts_comp) + complete(&io_req->abts_done); + } + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + + BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i); + i = 0; + spin_unlock_bh(&tgt->tgt_lock); + /* wait for active_ios to go to 0 */ + while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT)) + msleep(25); + if (tgt->num_active_ios.counter != 0) + printk(KERN_ERR PFX "CLEANUP on port 0x%x:" + " active_ios = %d\n", + tgt->rdata->ids.port_id, tgt->num_active_ios.counter); + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 0; + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt) +{ + timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0); + mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); + wait_event_interruptible(tgt->upld_wait, + (test_bit( + BNX2FC_FLAG_UPLD_REQ_COMPL, + &tgt->flags))); + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&tgt->upld_timer); +} + +static void bnx2fc_upload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + + BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", + tgt->num_active_ios.counter); + + /* + * Called with hba->hba_mutex held. + * This is a blocking call + */ + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_disable_req(port, tgt); + + /* + * wait for upload to complete. 3 Secs + * should be sufficient time for this process to complete. + */ + BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); + bnx2fc_upld_wait(tgt); + + /* + * traverse thru the active_q and tmf_q and cleanup + * IOs in these lists + */ + BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n", + tgt->flags); + bnx2fc_flush_active_ios(tgt); + + /* Issue destroy KWQE */ + if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "send destroy req\n"); + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_destroy_req(hba, tgt); + + /* wait for destroy to complete */ + bnx2fc_upld_wait(tgt); + + if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) + printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); + + BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", + tgt->flags); + + } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) { + printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy" + " not sent to FW\n"); + } else { + printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" + " not sent to FW\n"); + } + + /* Free session resources */ + bnx2fc_free_session_resc(hba, tgt); + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); +} + +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata) +{ + + struct fc_rport *rport = rdata->rport; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; + struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; + + tgt->rport = rport; + tgt->rdata = rdata; + tgt->port = port; + + if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { + BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n"); + tgt->fcoe_conn_id = -1; + return -1; + } + + tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); + if (tgt->fcoe_conn_id == -1) + return -1; + + BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id); + + tgt->max_sqes = BNX2FC_SQ_WQES_MAX; + tgt->max_rqes = BNX2FC_RQ_WQES_MAX; + tgt->max_cqes = BNX2FC_CQ_WQES_MAX; + atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX); + + /* Initialize the toggle bit */ + tgt->sq_curr_toggle_bit = 1; + tgt->cq_curr_toggle_bit = 1; + tgt->sq_prod_idx = 0; + tgt->cq_cons_idx = 0; + tgt->rq_prod_idx = 0x8000; + tgt->rq_cons_idx = 0; + atomic_set(&tgt->num_active_ios, 0); + tgt->retry_delay_timestamp = 0; + + if (rdata->flags & FC_RP_FLAGS_RETRY && + rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && + !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { + tgt->dev_type = TYPE_TAPE; + tgt->io_timeout = 0; /* use default ULP timeout */ + } else { + tgt->dev_type = TYPE_DISK; + tgt->io_timeout = BNX2FC_IO_TIMEOUT; + } + + /* initialize sq doorbell */ + sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; + sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; + /* initialize rx doorbell */ + rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) | + (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) | + (B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT)); + rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) | + (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT); + + spin_lock_init(&tgt->tgt_lock); + spin_lock_init(&tgt->cq_lock); + + /* Initialize active_cmd_queue list */ + INIT_LIST_HEAD(&tgt->active_cmd_queue); + + /* Initialize IO retire queue */ + INIT_LIST_HEAD(&tgt->io_retire_queue); + + INIT_LIST_HEAD(&tgt->els_queue); + + /* Initialize active_tm_queue list */ + INIT_LIST_HEAD(&tgt->active_tm_queue); + + init_waitqueue_head(&tgt->ofld_wait); + init_waitqueue_head(&tgt->upld_wait); + + return 0; +} + +/* + * This event_callback is called after successful completion of libfc + * initiated target login. bnx2fc can proceed with initiating the session + * establishment. + */ +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct fc_rport *rport = rdata->rport; + struct fc_rport_libfc_priv *rp; + struct bnx2fc_rport *tgt; + u32 port_id; + + BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n", + event, rdata->ids.port_id); + switch (event) { + case RPORT_EV_READY: + if (!rport) { + printk(KERN_ERR PFX "rport is NULL: ERROR!\n"); + break; + } + + rp = rport->dd_data; + if (rport->port_id == FC_FID_DIR_SERV) { + /* + * bnx2fc_rport structure doesn't exist for + * directory server. + * We should not come here, as lport will + * take care of fabric login + */ + printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n", + rdata->ids.port_id); + break; + } + + if (rdata->spp_type != FC_TYPE_FCP) { + BNX2FC_HBA_DBG(lport, "not FCP type target." + " not offloading\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + BNX2FC_HBA_DBG(lport, "not FCP_TARGET" + " not offloading\n"); + break; + } + + /* + * Offload process is protected with hba mutex. + * Use the same mutex_lock for upload process too + */ + mutex_lock(&hba->hba_mutex); + tgt = (struct bnx2fc_rport *)&rp[1]; + + /* This can happen when ADISC finds the same target */ + if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "already offloaded\n"); + mutex_unlock(&hba->hba_mutex); + return; + } + + /* + * Offload the session. This is a blocking call, and will + * wait until the session is offloaded. + */ + bnx2fc_offload_session(port, tgt, rdata); + + BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + + if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { + /* Session is offloaded and enabled. */ + BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); + /* This counter is protected with hba mutex */ + hba->num_ofld_sess++; + + set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + } else { + /* + * Offload or enable would have failed. + * In offload/enable completion path, the + * rport would have already been removed + */ + BNX2FC_TGT_DBG(tgt, "Port is being logged off as " + "offloaded flag not set\n"); + } + mutex_unlock(&hba->hba_mutex); + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + port_id = rdata->ids.port_id; + if (port_id == FC_FID_DIR_SERV) + break; + + if (!rport) { + printk(KERN_INFO PFX "%x - rport not created Yet!!\n", + port_id); + break; + } + rp = rport->dd_data; + mutex_lock(&hba->hba_mutex); + /* + * Perform session upload. Note that rdata->peers is already + * removed from disc->rports list before we get this event. + */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) { + mutex_unlock(&hba->hba_mutex); + break; + } + clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + + bnx2fc_upload_session(port, tgt); + hba->num_ofld_sess--; + BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + /* + * Try to wake up the linkdown wait thread. If num_ofld_sess + * is 0, the waiting therad wakes up + */ + if ((hba->wait_for_link_down) && + (hba->num_ofld_sess == 0)) { + wake_up_interruptible(&hba->shutdown_wait); + } + mutex_unlock(&hba->hba_mutex); + + break; + + case RPORT_EV_NONE: + break; + } +} + +/** + * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id + * + * @port: fcoe_port struct to lookup the target port on + * @port_id: The remote port ID to look up + */ +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id) +{ + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct bnx2fc_rport *tgt; + struct fc_rport_priv *rdata; + int i; + + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if ((tgt) && (tgt->port == port)) { + rdata = tgt->rdata; + if (rdata->ids.port_id == port_id) { + if (rdata->rp_state != RPORT_ST_DELETE) { + BNX2FC_TGT_DBG(tgt, "rport " + "obtained\n"); + return tgt; + } else { + BNX2FC_TGT_DBG(tgt, "rport 0x%x " + "is in DELETED state\n", + rdata->ids.port_id); + return NULL; + } + } + } + } + return NULL; +} + + +/** + * bnx2fc_alloc_conn_id - allocates FCOE Connection id + * + * @hba: pointer to adapter structure + * @tgt: pointer to bnx2fc_rport structure + */ +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + u32 conn_id, next; + + /* called with hba mutex held */ + + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + next = hba->next_conn_id; + conn_id = hba->next_conn_id++; + if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) + hba->next_conn_id = 0; + + while (hba->tgt_ofld_list[conn_id] != NULL) { + conn_id++; + if (conn_id == BNX2FC_NUM_MAX_SESS) + conn_id = 0; + + if (conn_id == next) { + /* No free conn_ids are available */ + spin_unlock_bh(&hba->hba_lock); + return -1; + } + } + hba->tgt_ofld_list[conn_id] = tgt; + tgt->fcoe_conn_id = conn_id; + spin_unlock_bh(&hba->hba_lock); + return conn_id; +} + +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) +{ + /* called with hba mutex held */ + spin_lock_bh(&hba->hba_lock); + hba->tgt_ofld_list[conn_id] = NULL; + spin_unlock_bh(&hba->hba_lock); +} + +/* + * bnx2fc_alloc_session_resc - Allocate qp resources for the session + */ +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + dma_addr_t page; + int num_pages; + u32 *pbl; + + /* Allocate and map SQ */ + tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; + tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + &tgt->sq_dma, GFP_KERNEL); + if (!tgt->sq) { + printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", + tgt->sq_mem_size); + goto mem_alloc_failure; + } + + /* Allocate and map CQ */ + tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; + tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + &tgt->cq_dma, GFP_KERNEL); + if (!tgt->cq) { + printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", + tgt->cq_mem_size); + goto mem_alloc_failure; + } + + /* Allocate and map RQ and RQ PBL */ + tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; + tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + &tgt->rq_dma, GFP_KERNEL); + if (!tgt->rq) { + printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", + tgt->rq_mem_size); + goto mem_alloc_failure; + } + + tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + &tgt->rq_pbl_dma, GFP_KERNEL); + if (!tgt->rq_pbl) { + printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", + tgt->rq_pbl_size); + goto mem_alloc_failure; + } + + num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; + page = tgt->rq_dma; + pbl = (u32 *)tgt->rq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += CNIC_PAGE_SIZE; + } + + /* Allocate and map XFERQ */ + tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; + tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, + tgt->xferq_mem_size, &tgt->xferq_dma, + GFP_KERNEL); + if (!tgt->xferq) { + printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", + tgt->xferq_mem_size); + goto mem_alloc_failure; + } + + /* Allocate and map CONFQ & CONFQ PBL */ + tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; + tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_mem_size, &tgt->confq_dma, + GFP_KERNEL); + if (!tgt->confq) { + printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", + tgt->confq_mem_size); + goto mem_alloc_failure; + } + + tgt->confq_pbl_size = + (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + tgt->confq_pbl_size = + (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + + tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_pbl_size, + &tgt->confq_pbl_dma, GFP_KERNEL); + if (!tgt->confq_pbl) { + printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", + tgt->confq_pbl_size); + goto mem_alloc_failure; + } + + num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; + page = tgt->confq_dma; + pbl = (u32 *)tgt->confq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += CNIC_PAGE_SIZE; + } + + /* Allocate and map ConnDB */ + tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); + + tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, + tgt->conn_db_mem_size, + &tgt->conn_db_dma, GFP_KERNEL); + if (!tgt->conn_db) { + printk(KERN_ERR PFX "unable to allocate conn_db %d\n", + tgt->conn_db_mem_size); + goto mem_alloc_failure; + } + + + /* Allocate and map LCQ */ + tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; + tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + &tgt->lcq_dma, GFP_KERNEL); + + if (!tgt->lcq) { + printk(KERN_ERR PFX "unable to allocate lcq %d\n", + tgt->lcq_mem_size); + goto mem_alloc_failure; + } + + tgt->conn_db->rq_prod = 0x8000; + + return 0; + +mem_alloc_failure: + return -ENOMEM; +} + +/** + * bnx2fc_free_session_resc - free qp resources for the session + * + * @hba: adapter structure pointer + * @tgt: bnx2fc_rport structure pointer + * + * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL + */ +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + void __iomem *ctx_base_ptr; + + BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); + + spin_lock_bh(&tgt->cq_lock); + ctx_base_ptr = tgt->ctx_base; + tgt->ctx_base = NULL; + + /* Free LCQ */ + if (tgt->lcq) { + dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + tgt->lcq, tgt->lcq_dma); + tgt->lcq = NULL; + } + /* Free connDB */ + if (tgt->conn_db) { + dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, + tgt->conn_db, tgt->conn_db_dma); + tgt->conn_db = NULL; + } + /* Free confq and confq pbl */ + if (tgt->confq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, + tgt->confq_pbl, tgt->confq_pbl_dma); + tgt->confq_pbl = NULL; + } + if (tgt->confq) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, + tgt->confq, tgt->confq_dma); + tgt->confq = NULL; + } + /* Free XFERQ */ + if (tgt->xferq) { + dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, + tgt->xferq, tgt->xferq_dma); + tgt->xferq = NULL; + } + /* Free RQ PBL and RQ */ + if (tgt->rq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + tgt->rq_pbl, tgt->rq_pbl_dma); + tgt->rq_pbl = NULL; + } + if (tgt->rq) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + tgt->rq, tgt->rq_dma); + tgt->rq = NULL; + } + /* Free CQ */ + if (tgt->cq) { + dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + tgt->cq, tgt->cq_dma); + tgt->cq = NULL; + } + /* Free SQ */ + if (tgt->sq) { + dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + tgt->sq, tgt->sq_dma); + tgt->sq = NULL; + } + spin_unlock_bh(&tgt->cq_lock); + + if (ctx_base_ptr) + iounmap(ctx_base_ptr); +} diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h new file mode 100644 index 000000000..917534109 --- /dev/null +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h @@ -0,0 +1,161 @@ +/* 57xx_iscsi_constants.h: QLogic NetXtreme II iSCSI HSI + * + * Copyright (c) 2006 - 2013 Broadcom Corporation + * Copyright (c) 2014, QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) + * Maintained by: QLogic-Storage-Upstream@qlogic.com + */ +#ifndef __57XX_ISCSI_CONSTANTS_H_ +#define __57XX_ISCSI_CONSTANTS_H_ + +/** +* This file defines HSI constants for the iSCSI flows +*/ + +/* iSCSI request op codes */ +#define ISCSI_OPCODE_CLEANUP_REQUEST (7) + +/* iSCSI response/messages op codes */ +#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27) +#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0) + +/* iSCSI task types */ +#define ISCSI_TASK_TYPE_READ (0) +#define ISCSI_TASK_TYPE_WRITE (1) +#define ISCSI_TASK_TYPE_MPATH (2) + +/* initial CQ sequence numbers */ +#define ISCSI_INITIAL_SN (1) + +/* KWQ (kernel work queue) layer codes */ +#define ISCSI_KWQE_LAYER_CODE (6) + +/* KWQ (kernel work queue) request op codes */ +#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0) +#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1) +#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2) +#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3) +#define ISCSI_KWQE_OPCODE_INIT1 (4) +#define ISCSI_KWQE_OPCODE_INIT2 (5) + +/* KCQ (kernel completion queue) response op codes */ +#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10) +#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12) +#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13) +#define ISCSI_KCQE_OPCODE_INIT (0x14) +#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15) +#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16) +#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17) +#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18) +#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19) +#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21) + +/* KCQ (kernel completion queue) completion status */ +#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0) +#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1) +#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2) +#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3) +#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4) + +#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5) +#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6) + +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe) + +/* Response */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17) + +/* Data-In */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d) + +/* R2T */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27) + +/* TMF */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b) + +/* IP/TCP processing errors: */ +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43) + +/* iSCSI licensing errors */ +/* general iSCSI license not installed */ +#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50) +/* additional LOM specific iSCSI license not installed */ +#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) + +#define ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY (0x80) +#define ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR (0x81) + +/* SQ/RQ/CQ DB structure sizes */ +#define ISCSI_SQ_DB_SIZE (16) +#define ISCSI_RQ_DB_SIZE (64) +#define ISCSI_CQ_DB_SIZE (80) + +#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF + +/* Page size codes (for flags field in connection offload request) */ +#define ISCSI_PAGE_SIZE_256 (0) +#define ISCSI_PAGE_SIZE_512 (1) +#define ISCSI_PAGE_SIZE_1K (2) +#define ISCSI_PAGE_SIZE_2K (3) +#define ISCSI_PAGE_SIZE_4K (4) +#define ISCSI_PAGE_SIZE_8K (5) +#define ISCSI_PAGE_SIZE_16K (6) +#define ISCSI_PAGE_SIZE_32K (7) +#define ISCSI_PAGE_SIZE_64K (8) +#define ISCSI_PAGE_SIZE_128K (9) +#define ISCSI_PAGE_SIZE_256K (10) +#define ISCSI_PAGE_SIZE_512K (11) +#define ISCSI_PAGE_SIZE_1M (12) +#define ISCSI_PAGE_SIZE_2M (13) +#define ISCSI_PAGE_SIZE_4M (14) +#define ISCSI_PAGE_SIZE_8M (15) + +/* Iscsi PDU related defines */ +#define ISCSI_HEADER_SIZE (48) +#define ISCSI_DIGEST_SHIFT (2) +#define ISCSI_DIGEST_SIZE (4) + +#define B577XX_ISCSI_CONNECTION_TYPE 3 + +#endif /*__57XX_ISCSI_CONSTANTS_H_ */ diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h new file mode 100644 index 000000000..19b3a97db --- /dev/null +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -0,0 +1,1526 @@ +/* 57xx_iscsi_hsi.h: QLogic NetXtreme II iSCSI HSI. + * + * Copyright (c) 2006 - 2013 Broadcom Corporation + * Copyright (c) 2014, QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) + * Maintained by: QLogic-Storage-Upstream@qlogic.com + */ +#ifndef __57XX_ISCSI_HSI_LINUX_LE__ +#define __57XX_ISCSI_HSI_LINUX_LE__ + +/* + * iSCSI Async CQE + */ +struct bnx2i_async_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u8 async_event; + u8 async_vcode; + u16 param1; +#elif defined(__LITTLE_ENDIAN) + u16 param1; + u8 async_vcode; + u8 async_event; +#endif +#if defined(__BIG_ENDIAN) + u16 param2; + u16 param3; +#elif defined(__LITTLE_ENDIAN) + u16 param3; + u16 param2; +#endif + u32 reserved7[3]; + u32 cq_req_sn; +}; + + +/* + * iSCSI Buffer Descriptor (BD) + */ +struct iscsi_bd { + u32 buffer_addr_hi; + u32 buffer_addr_lo; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 buffer_length; +#elif defined(__LITTLE_ENDIAN) + u16 buffer_length; + u16 reserved0; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 flags; +#define ISCSI_BD_RESERVED1 (0x3F<<0) +#define ISCSI_BD_RESERVED1_SHIFT 0 +#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) +#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 +#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) +#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 +#define ISCSI_BD_RESERVED2 (0xFF<<8) +#define ISCSI_BD_RESERVED2_SHIFT 8 +#elif defined(__LITTLE_ENDIAN) + u16 flags; +#define ISCSI_BD_RESERVED1 (0x3F<<0) +#define ISCSI_BD_RESERVED1_SHIFT 0 +#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) +#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 +#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) +#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 +#define ISCSI_BD_RESERVED2 (0xFF<<8) +#define ISCSI_BD_RESERVED2_SHIFT 8 + u16 reserved3; +#endif +}; + + +/* + * iSCSI Cleanup SQ WQE + */ +struct bnx2i_cleanup_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2[3]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 itt; +#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) +#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) +#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 + u16 reserved3; +#endif + u32 reserved4[10]; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u16 reserved5; +#elif defined(__LITTLE_ENDIAN) + u16 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Cleanup CQE + */ +struct bnx2i_cleanup_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 status; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 status; + u8 op_code; +#endif + u32 reserved1[3]; + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[7]; +#if defined(__BIG_ENDIAN) + u16 reserved6; + u16 itt; +#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 + u16 reserved6; +#endif + u32 cq_req_sn; +}; + + +/* + * SCSI read/write SQ WQE + */ +struct bnx2i_cmd_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) +#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 +#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) +#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 +#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) +#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 +#define ISCSI_CMD_REQUEST_READ (0x1<<6) +#define ISCSI_CMD_REQUEST_READ_SHIFT 6 +#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) +#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) +#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 +#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) +#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 +#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) +#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 +#define ISCSI_CMD_REQUEST_READ (0x1<<6) +#define ISCSI_CMD_REQUEST_READ_SHIFT 6 +#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) +#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 + u8 op_code; +#endif +#if defined(__BIG_ENDIAN) + u16 ud_buffer_offset; + u16 sd_buffer_offset; +#elif defined(__LITTLE_ENDIAN) + u16 sd_buffer_offset; + u16 ud_buffer_offset; +#endif + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) +#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) +#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif + u32 total_data_transfer_length; + u32 cmd_sn; + u32 reserved3; + u32 cdb[4]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 sd_start_bd_index; + u8 ud_start_bd_index; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 ud_start_bd_index; + u8 sd_start_bd_index; + u8 cq_index; +#endif +}; + + +/* + * task statistics for write response + */ +struct bnx2i_write_resp_task_stat { +#if defined(__BIG_ENDIAN) + u16 num_r2ts; + u16 num_data_outs; +#elif defined(__LITTLE_ENDIAN) + u16 num_data_outs; + u16 num_r2ts; +#endif +}; + +/* + * task statistics for read response + */ +struct bnx2i_read_resp_task_stat { +#if defined(__BIG_ENDIAN) + u16 reserved; + u16 num_data_ins; +#elif defined(__LITTLE_ENDIAN) + u16 num_data_ins; + u16 reserved; +#endif +}; + +/* + * task statistics for iSCSI cmd response + */ +union bnx2i_cmd_resp_task_stat { + struct bnx2i_write_resp_task_stat write_stat; + struct bnx2i_read_resp_task_stat read_stat; +}; + +/* + * SCSI Command CQE + */ +struct bnx2i_cmd_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) +#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 +#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) +#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 + u8 response; + u8 status; +#elif defined(__LITTLE_ENDIAN) + u8 status; + u8 response; + u8 response_flags; +#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) +#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 +#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) +#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved2; + u32 residual_count; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[5]; + union bnx2i_cmd_resp_task_stat task_stat; + u32 reserved6; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + + + +/* + * firmware middle-path request SQ WQE + */ +struct bnx2i_fw_mp_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; + u16 hdr_opaque1; +#elif defined(__LITTLE_ENDIAN) + u16 hdr_opaque1; + u8 op_attr; + u8 op_code; +#endif + u32 data_length; + u32 hdr_opaque2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 itt; +#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) +#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) +#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 + u16 reserved0; +#endif + u32 hdr_opaque3[4]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 reserved3; + u8 flags; +#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) +#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) +#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) +#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) +#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 + u8 reserved3; + u16 reserved4; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u8 reserved5; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * firmware response - CQE: used only by firmware + */ +struct bnx2i_fw_response { + u32 hdr_dword1[2]; + u32 hdr_exp_cmd_sn; + u32 hdr_max_cmd_sn; + u32 hdr_ttt; + u32 hdr_res_cnt; + u32 cqe_flags; +#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0) +#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0 +#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8) +#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8 +#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16) +#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16 + u32 stat_sn; + u32 hdr_dword2[2]; + u32 hdr_dword3[2]; + u32 task_stat; + u32 reserved0; + u32 hdr_itt; + u32 cq_req_sn; +}; + + +/* + * iSCSI KCQ CQE parameters + */ +union iscsi_kcqe_params { + u32 reserved0[4]; +}; + +/* + * iSCSI KCQ CQE + */ +struct iscsi_kcqe { + u32 iscsi_conn_id; + u32 completion_status; + u32 iscsi_conn_context_id; + union iscsi_kcqe_params params; +#if defined(__BIG_ENDIAN) + u8 flags; +#define ISCSI_KCQE_RESERVED0 (0xF<<0) +#define ISCSI_KCQE_RESERVED0_SHIFT 0 +#define ISCSI_KCQE_LAYER_CODE (0x7<<4) +#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 +#define ISCSI_KCQE_RESERVED1 (0x1<<7) +#define ISCSI_KCQE_RESERVED1_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define ISCSI_KCQE_RESERVED0 (0xF<<0) +#define ISCSI_KCQE_RESERVED0_SHIFT 0 +#define ISCSI_KCQE_LAYER_CODE (0x7<<4) +#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 +#define ISCSI_KCQE_RESERVED1 (0x1<<7) +#define ISCSI_KCQE_RESERVED1_SHIFT 7 +#endif +}; + + + +/* + * iSCSI KWQE header + */ +struct iscsi_kwqe_header { +#if defined(__BIG_ENDIAN) + u8 flags; +#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) +#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 +#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 + u8 op_code; +#elif defined(__LITTLE_ENDIAN) + u8 op_code; + u8 flags; +#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) +#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 +#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 +#endif +}; + +/* + * iSCSI firmware init request 1 + */ +struct iscsi_kwqe_init1 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u8 reserved0; + u8 num_cqs; +#elif defined(__LITTLE_ENDIAN) + u8 num_cqs; + u8 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 dummy_buffer_addr_lo; + u32 dummy_buffer_addr_hi; +#if defined(__BIG_ENDIAN) + u16 num_ccells_per_conn; + u16 num_tasks_per_conn; +#elif defined(__LITTLE_ENDIAN) + u16 num_tasks_per_conn; + u16 num_ccells_per_conn; +#endif +#if defined(__BIG_ENDIAN) + u16 sq_wqes_per_page; + u16 sq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_wqes; + u16 sq_wqes_per_page; +#endif +#if defined(__BIG_ENDIAN) + u8 cq_log_wqes_per_page; + u8 flags; +#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) +#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6) +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7 + u16 cq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 cq_num_wqes; + u8 flags; +#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) +#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6) +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7 + u8 cq_log_wqes_per_page; +#endif +#if defined(__BIG_ENDIAN) + u16 cq_num_pages; + u16 sq_num_pages; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_pages; + u16 cq_num_pages; +#endif +#if defined(__BIG_ENDIAN) + u16 rq_buffer_size; + u16 rq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 rq_num_wqes; + u16 rq_buffer_size; +#endif +}; + +/* + * iSCSI firmware init request 2 + */ +struct iscsi_kwqe_init2 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 max_cq_sqn; +#elif defined(__LITTLE_ENDIAN) + u16 max_cq_sqn; + struct iscsi_kwqe_header hdr; +#endif + u32 error_bit_map[2]; + u32 reserved1[5]; +}; + +/* + * Initial iSCSI connection offload request 1 + */ +struct iscsi_kwqe_conn_offload1 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + struct iscsi_kwqe_header hdr; +#endif + u32 sq_page_table_addr_lo; + u32 sq_page_table_addr_hi; + u32 cq_page_table_addr_lo; + u32 cq_page_table_addr_hi; + u32 reserved0[3]; +}; + +/* + * iSCSI Page Table Entry (PTE) + */ +struct iscsi_pte { + u32 hi; + u32 lo; +}; + +/* + * Initial iSCSI connection offload request 2 + */ +struct iscsi_kwqe_conn_offload2 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 rq_page_table_addr_lo; + u32 rq_page_table_addr_hi; + struct iscsi_pte sq_first_pte; + struct iscsi_pte cq_first_pte; + u32 num_additional_wqes; +}; + + +/* + * Initial iSCSI connection offload request 3 + */ +struct iscsi_kwqe_conn_offload3 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 reserved1; + struct iscsi_pte qp_first_pte[3]; +}; + + +/* + * iSCSI connection update request + */ +struct iscsi_kwqe_conn_update { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif +#if defined(__BIG_ENDIAN) + u8 session_error_recovery_level; + u8 max_outstanding_r2ts; + u8 reserved2; + u8 conn_flags; +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4) +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 conn_flags; +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4) +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6 + u8 reserved2; + u8 max_outstanding_r2ts; + u8 session_error_recovery_level; +#endif + u32 context_id; + u32 max_send_pdu_length; + u32 max_recv_pdu_length; + u32 first_burst_length; + u32 max_burst_length; + u32 exp_stat_sn; +}; + +/* + * iSCSI destroy connection request + */ +struct iscsi_kwqe_conn_destroy { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 context_id; + u32 reserved1[6]; +}; + +/* + * iSCSI KWQ WQE + */ +union iscsi_kwqe { + struct iscsi_kwqe_init1 init1; + struct iscsi_kwqe_init2 init2; + struct iscsi_kwqe_conn_offload1 conn_offload1; + struct iscsi_kwqe_conn_offload2 conn_offload2; + struct iscsi_kwqe_conn_update conn_update; + struct iscsi_kwqe_conn_destroy conn_destroy; +}; + +/* + * iSCSI Login SQ WQE + */ +struct bnx2i_login_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) +#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 +#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_min; +#elif defined(__LITTLE_ENDIAN) + u8 version_min; + u8 version_max; + u8 op_attr; +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) +#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 +#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif +#if defined(__BIG_ENDIAN) + u16 cid; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 cid; +#endif + u32 cmd_sn; + u32 exp_stat_sn; + u32 reserved4; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved8; + u8 reserved7; + u8 flags; +#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) +#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) +#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) +#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) +#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 + u8 reserved7; + u16 reserved8; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved10; + u8 reserved9; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved9; + u8 reserved10; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Login CQE + */ +struct bnx2i_login_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) +#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_active; +#elif defined(__LITTLE_ENDIAN) + u8 version_active; + u8 version_max; + u8 response_flags; +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) +#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved1[2]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 err_code; + u8 reserved2; +#elif defined(__LITTLE_ENDIAN) + u8 reserved2; + u8 err_code; + u16 reserved3; +#endif + u32 stat_sn; + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif +#if defined(__BIG_ENDIAN) + u8 status_class; + u8 status_detail; + u16 reserved4; +#elif defined(__LITTLE_ENDIAN) + u16 reserved4; + u8 status_detail; + u8 status_class; +#endif + u32 reserved5[3]; +#if defined(__BIG_ENDIAN) + u16 reserved6; + u16 itt; +#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 + u16 reserved6; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI Logout SQ WQE + */ +struct bnx2i_logout_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) +#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) +#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 reserved1[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif +#if defined(__BIG_ENDIAN) + u16 cid; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 cid; +#endif + u32 cmd_sn; + u32 reserved4[5]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u8 reserved5; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Logout CQE + */ +struct bnx2i_logout_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 response; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 response; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6[3]; +#if defined(__BIG_ENDIAN) + u16 time_to_wait; + u16 time_to_retain; +#elif defined(__LITTLE_ENDIAN) + u16 time_to_retain; + u16 time_to_wait; +#endif + u32 reserved7[3]; +#if defined(__BIG_ENDIAN) + u16 reserved8; + u16 itt; +#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 + u16 reserved8; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI Nop-In CQE + */ +struct bnx2i_nop_in_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 ttt; + u32 reserved2; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5; + u32 lun[2]; + u32 reserved6[4]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) +#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 +#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) +#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) +#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 +#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) +#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI NOP-OUT SQ WQE + */ +struct bnx2i_nop_out_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif + u32 ttt; + u32 cmd_sn; + u32 reserved3[2]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved7; + u8 reserved6; + u8 flags; +#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 + u8 reserved6; + u16 reserved7; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved9; + u8 reserved8; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved8; + u8 reserved9; + u8 cq_index; +#endif +}; + +/* + * iSCSI Reject CQE + */ +struct bnx2i_reject_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 reason; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 reason; + u8 reserved1; + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[8]; + u32 cq_req_sn; +}; + +/* + * bnx2i iSCSI TMF SQ WQE + */ +struct bnx2i_tmf_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u16 itt; +#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) +#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) +#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 + u16 reserved1; +#endif + u32 ref_itt; + u32 cmd_sn; + u32 reserved2; + u32 ref_cmd_sn; + u32 reserved3[3]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved5; + u8 reserved4; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved4; + u8 reserved5; + u8 cq_index; +#endif +}; + +/* + * iSCSI Text SQ WQE + */ +struct bnx2i_text_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) +#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 +#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) +#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) +#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 +#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) +#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 itt; +#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) +#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) +#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 + u16 reserved3; +#endif + u32 ttt; + u32 cmd_sn; + u32 reserved4[2]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24 + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved7; + u8 reserved6; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved6; + u8 reserved7; + u8 cq_index; +#endif +}; + +/* + * iSCSI SQ WQE + */ +union iscsi_request { + struct bnx2i_cmd_request cmd; + struct bnx2i_tmf_request tmf; + struct bnx2i_nop_out_request nop_out; + struct bnx2i_login_request login_req; + struct bnx2i_text_request text; + struct bnx2i_logout_request logout_req; + struct bnx2i_cleanup_request cleanup; +}; + + +/* + * iSCSI TMF CQE + */ +struct bnx2i_tmf_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 response; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 response; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6[7]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + +/* + * iSCSI Text CQE + */ +struct bnx2i_text_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) +#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) +#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 response_flags; +#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) +#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) +#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 ttt; + u32 reserved2; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5; + u32 lun[2]; + u32 reserved6[4]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + +/* + * iSCSI CQE + */ +union iscsi_response { + struct bnx2i_cmd_response cmd; + struct bnx2i_tmf_response tmf; + struct bnx2i_login_response login_resp; + struct bnx2i_text_response text; + struct bnx2i_logout_response logout_resp; + struct bnx2i_cleanup_response cleanup; + struct bnx2i_reject_msg reject; + struct bnx2i_async_msg async; + struct bnx2i_nop_in_msg nop_in; +}; + +#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig new file mode 100644 index 000000000..0cc06c2ce --- /dev/null +++ b/drivers/scsi/bnx2i/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_BNX2_ISCSI + tristate "QLogic NetXtreme II iSCSI support" + depends on NET + depends on PCI + depends on (IPV6 || IPV6=n) + depends on MMU + select SCSI_ISCSI_ATTRS + select NETDEVICES + select ETHERNET + select NET_VENDOR_BROADCOM + select CNIC + help + This driver supports iSCSI offload for the QLogic NetXtreme II + devices. diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile new file mode 100644 index 000000000..25378671b --- /dev/null +++ b/drivers/scsi/bnx2i/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o + +obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h new file mode 100644 index 000000000..df7d04afc --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -0,0 +1,882 @@ +/* bnx2i.h: QLogic NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2013 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * Copyright (c) 2014, QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) + * Maintained by: QLogic-Storage-Upstream@qlogic.com + */ + +#ifndef _BNX2I_H_ +#define _BNX2I_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../net/ethernet/broadcom/cnic_if.h" +#include "57xx_iscsi_hsi.h" +#include "57xx_iscsi_constants.h" + +#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h" + +#define BNX2_ISCSI_DRIVER_NAME "bnx2i" + +#define BNX2I_MAX_ADAPTERS 8 + +#define ISCSI_MAX_CONNS_PER_HBA 128 +#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA +#define ISCSI_MAX_CMDS_PER_SESS 128 + +/* Total active commands across all connections supported by devices */ +#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1)) +#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1)) +#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1)) + +#define ISCSI_MAX_BDS_PER_CMD 32 + +#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 +#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 + +#define BNX2X_DB_SHIFT 3 + +/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ +#define MAX_BD_LENGTH 65535 +#define BD_SPLIT_SIZE 32768 + +/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ +#define BNX2I_SQ_WQES_MIN 16 +#define BNX2I_570X_SQ_WQES_MAX 128 +#define BNX2I_5770X_SQ_WQES_MAX 512 +#define BNX2I_570X_SQ_WQES_DEFAULT 128 +#define BNX2I_5770X_SQ_WQES_DEFAULT 128 + +#define BNX2I_570X_CQ_WQES_MAX 128 +#define BNX2I_5770X_CQ_WQES_MAX 512 + +#define BNX2I_RQ_WQES_MIN 16 +#define BNX2I_RQ_WQES_MAX 32 +#define BNX2I_RQ_WQES_DEFAULT 16 + +/* CCELLs per conn */ +#define BNX2I_CCELLS_MIN 16 +#define BNX2I_CCELLS_MAX 96 +#define BNX2I_CCELLS_DEFAULT 64 + +#define ITT_INVALID_SIGNATURE 0xFFFF + +#define ISCSI_CMD_CLEANUP_TIMEOUT 100 + +#define BNX2I_CONN_CTX_BUF_SIZE 16384 + +#define BNX2I_SQ_WQE_SIZE 64 +#define BNX2I_RQ_WQE_SIZE 256 +#define BNX2I_CQE_SIZE 64 + +#define MB_KERNEL_CTX_SHIFT 8 +#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) + +#define CTX_SHIFT 7 +#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT) + +#define CTX_OFFSET 0x10000 +#define MAX_CID_CNT 0x4000 + +#define BNX2I_570X_PAGE_SIZE_DEFAULT 4096 + +/* 5709 context registers */ +#define BNX2_MQ_CONFIG2 0x00003d00 +#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4) +#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8) + +/* 57710's BAR2 is mapped to doorbell registers */ +#define BNX2X_DOORBELL_PCI_BAR 2 +#define BNX2X_MAX_CQS 8 + +#define CNIC_ARM_CQE 1 +#define CNIC_ARM_CQE_FP 2 +#define CNIC_DISARM_CQE 0 + +#define REG_RD(__hba, offset) \ + readl(__hba->regview + offset) +#define REG_WR(__hba, offset, val) \ + writel(val, __hba->regview + offset) + +#ifdef CONFIG_32BIT +#define GET_STATS_64(__hba, dst, field) \ + do { \ + spin_lock_bh(&__hba->stat_lock); \ + dst->field##_lo = __hba->stats.field##_lo; \ + dst->field##_hi = __hba->stats.field##_hi; \ + spin_unlock_bh(&__hba->stat_lock); \ + } while (0) + +#define ADD_STATS_64(__hba, field, len) \ + do { \ + if (spin_trylock(&__hba->stat_lock)) { \ + if (__hba->stats.field##_lo + len < \ + __hba->stats.field##_lo) \ + __hba->stats.field##_hi++; \ + __hba->stats.field##_lo += len; \ + spin_unlock(&__hba->stat_lock); \ + } \ + } while (0) + +#else +#define GET_STATS_64(__hba, dst, field) \ + do { \ + u64 val, *out; \ + \ + val = __hba->bnx2i_stats.field; \ + out = (u64 *)&__hba->stats.field##_lo; \ + *out = cpu_to_le64(val); \ + out = (u64 *)&dst->field##_lo; \ + *out = cpu_to_le64(val); \ + } while (0) + +#define ADD_STATS_64(__hba, field, len) \ + do { \ + __hba->bnx2i_stats.field += len; \ + } while (0) +#endif + +/** + * struct generic_pdu_resc - login pdu resource structure + * + * @req_buf: driver buffer used to stage payload associated with + * the login request + * @req_dma_addr: dma address for iscsi login request payload buffer + * @req_buf_size: actual login request payload length + * @req_wr_ptr: pointer into login request buffer when next data is + * to be written + * @resp_hdr: iscsi header where iscsi login response header is to + * be recreated + * @resp_buf: buffer to stage login response payload + * @resp_dma_addr: login response payload buffer dma address + * @resp_buf_size: login response paylod length + * @resp_wr_ptr: pointer into login response buffer when next data is + * to be written + * @req_bd_tbl: iscsi login request payload BD table + * @req_bd_dma: login request BD table dma address + * @resp_bd_tbl: iscsi login response payload BD table + * @resp_bd_dma: login request BD table dma address + * + * following structure defines buffer info for generic pdus such as iSCSI Login, + * Logout and NOP + */ +struct generic_pdu_resc { + char *req_buf; + dma_addr_t req_dma_addr; + u32 req_buf_size; + char *req_wr_ptr; + struct iscsi_hdr resp_hdr; + char *resp_buf; + dma_addr_t resp_dma_addr; + u32 resp_buf_size; + char *resp_wr_ptr; + char *req_bd_tbl; + dma_addr_t req_bd_dma; + char *resp_bd_tbl; + dma_addr_t resp_bd_dma; +}; + + +/** + * struct bd_resc_page - tracks DMA'able memory allocated for BD tables + * + * @link: list head to link elements + * @max_ptrs: maximun pointers that can be stored in this page + * @num_valid: number of pointer valid in this page + * @page: base addess for page pointer array + * + * structure to track DMA'able memory allocated for command BD tables + */ +struct bd_resc_page { + struct list_head link; + u32 max_ptrs; + u32 num_valid; + void *page[1]; +}; + + +/** + * struct io_bdt - I/O buffer destricptor table + * + * @bd_tbl: BD table's virtual address + * @bd_tbl_dma: BD table's dma address + * @bd_valid: num valid BD entries + * + * IO BD table + */ +struct io_bdt { + struct iscsi_bd *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + + +/** + * bnx2i_cmd - iscsi command structure + * + * @hdr: iSCSI header + * @conn: iscsi_conn pointer + * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd + * @sg: SG list + * @io_tbl: buffer descriptor (BD) table + * @bd_tbl_dma: buffer descriptor (BD) table's dma address + * @req: bnx2i specific command request struct + */ +struct bnx2i_cmd { + struct iscsi_hdr hdr; + struct bnx2i_conn *conn; + struct scsi_cmnd *scsi_cmd; + struct scatterlist *sg; + struct io_bdt io_tbl; + dma_addr_t bd_tbl_dma; + struct bnx2i_cmd_request req; +}; + + +/** + * struct bnx2i_conn - iscsi connection structure + * + * @cls_conn: pointer to iscsi cls conn + * @hba: adapter structure pointer + * @iscsi_conn_cid: iscsi conn id + * @fw_cid: firmware iscsi context id + * @ep: endpoint structure pointer + * @gen_pdu: login/nopout/logout pdu resources + * @violation_notified: bit mask used to track iscsi error/warning messages + * already printed out + * @work_cnt: keeps track of the number of outstanding work + * + * iSCSI connection structure + */ +struct bnx2i_conn { + struct iscsi_cls_conn *cls_conn; + struct bnx2i_hba *hba; + struct completion cmd_cleanup_cmpl; + + u32 iscsi_conn_cid; +#define BNX2I_CID_RESERVED 0x5AFF + u32 fw_cid; + + struct timer_list poll_timer; + /* + * Queue Pair (QP) related structure elements. + */ + struct bnx2i_endpoint *ep; + + /* + * Buffer for login negotiation process + */ + struct generic_pdu_resc gen_pdu; + u64 violation_notified; + + atomic_t work_cnt; +}; + + + +/** + * struct iscsi_cid_queue - Per adapter iscsi cid queue + * + * @cid_que_base: queue base memory + * @cid_que: queue memory pointer + * @cid_q_prod_idx: produce index + * @cid_q_cons_idx: consumer index + * @cid_q_max_idx: max index. used to detect wrap around condition + * @cid_free_cnt: queue size + * @conn_cid_tbl: iscsi cid to conn structure mapping table + * + * Per adapter iSCSI CID Queue + */ +struct iscsi_cid_queue { + void *cid_que_base; + u32 *cid_que; + u32 cid_q_prod_idx; + u32 cid_q_cons_idx; + u32 cid_q_max_idx; + u32 cid_free_cnt; + struct bnx2i_conn **conn_cid_tbl; +}; + + +struct bnx2i_stats_info { + u64 rx_pdus; + u64 rx_bytes; + u64 tx_pdus; + u64 tx_bytes; +}; + + +/** + * struct bnx2i_hba - bnx2i adapter structure + * + * @link: list head to link elements + * @cnic: pointer to cnic device + * @pcidev: pointer to pci dev + * @netdev: pointer to netdev structure + * @regview: mapped PCI register space + * @age: age, incremented by every recovery + * @cnic_dev_type: cnic device type, 5706/5708/5709/57710 + * @mail_queue_access: mailbox queue access mode, applicable to 5709 only + * @reg_with_cnic: indicates whether the device is register with CNIC + * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN + * @mtu_supported: Ethernet MTU supported + * @shost: scsi host pointer + * @max_sqes: SQ size + * @max_rqes: RQ size + * @max_cqes: CQ size + * @num_ccell: number of command cells per connection + * @ofld_conns_active: active connection list + * @eh_wait: wait queue for the endpoint to shutdown + * @max_active_conns: max offload connections supported by this device + * @cid_que: iscsi cid queue + * @ep_rdwr_lock: read / write lock to synchronize various ep lists + * @ep_ofld_list: connection list for pending offload completion + * @ep_active_list: connection list for active offload endpoints + * @ep_destroy_list: connection list for pending offload completion + * @mp_bd_tbl: BD table to be used with middle path requests + * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer + * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs + * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer + * @lock: lock to synchonize access to hba structure + * @hba_shutdown_tmo: Timeout value to shutdown each connection + * @conn_teardown_tmo: Timeout value to tear down each connection + * @conn_ctx_destroy_tmo: Timeout value to destroy context of each connection + * @pci_did: PCI device ID + * @pci_vid: PCI vendor ID + * @pci_sdid: PCI subsystem device ID + * @pci_svid: PCI subsystem vendor ID + * @pci_func: PCI function number in system pci tree + * @pci_devno: PCI device number in system pci tree + * @num_wqe_sent: statistic counter, total wqe's sent + * @num_cqe_rcvd: statistic counter, total cqe's received + * @num_intr_claimed: statistic counter, total interrupts claimed + * @link_changed_count: statistic counter, num of link change notifications + * received + * @ipaddr_changed_count: statistic counter, num times IP address changed while + * at least one connection is offloaded + * @num_sess_opened: statistic counter, total num sessions opened + * @num_conn_opened: statistic counter, total num conns opened on this hba + * @ctx_ccell_tasks: captures number of ccells and tasks supported by + * currently offloaded connection, used to decode + * context memory + * @stat_lock: spin lock used by the statistic collector (32 bit) + * @stats: local iSCSI statistic collection place holder + * + * Adapter Data Structure + */ +struct bnx2i_hba { + struct list_head link; + struct cnic_dev *cnic; + struct pci_dev *pcidev; + struct net_device *netdev; + void __iomem *regview; + resource_size_t reg_base; + + u32 age; + unsigned long cnic_dev_type; + #define BNX2I_NX2_DEV_5706 0x0 + #define BNX2I_NX2_DEV_5708 0x1 + #define BNX2I_NX2_DEV_5709 0x2 + #define BNX2I_NX2_DEV_57710 0x3 + u32 mail_queue_access; + #define BNX2I_MQ_KERNEL_MODE 0x0 + #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1 + #define BNX2I_MQ_BIN_MODE 0x2 + unsigned long reg_with_cnic; + #define BNX2I_CNIC_REGISTERED 1 + + unsigned long adapter_state; + #define ADAPTER_STATE_UP 0 + #define ADAPTER_STATE_GOING_DOWN 1 + #define ADAPTER_STATE_LINK_DOWN 2 + #define ADAPTER_STATE_INIT_FAILED 31 + unsigned int mtu_supported; + #define BNX2I_MAX_MTU_SUPPORTED 9000 + + struct Scsi_Host *shost; + + u32 max_sqes; + u32 max_rqes; + u32 max_cqes; + u32 num_ccell; + + int ofld_conns_active; + wait_queue_head_t eh_wait; + + int max_active_conns; + struct iscsi_cid_queue cid_que; + + rwlock_t ep_rdwr_lock; + struct list_head ep_ofld_list; + struct list_head ep_active_list; + struct list_head ep_destroy_list; + + /* + * BD table to be used with MP (Middle Path requests. + */ + char *mp_bd_tbl; + dma_addr_t mp_bd_dma; + char *dummy_buffer; + dma_addr_t dummy_buf_dma; + + spinlock_t lock; /* protects hba structure access */ + struct mutex net_dev_lock;/* sync net device access */ + + int hba_shutdown_tmo; + int conn_teardown_tmo; + int conn_ctx_destroy_tmo; + /* + * PCI related info. + */ + u16 pci_did; + u16 pci_vid; + u16 pci_sdid; + u16 pci_svid; + u16 pci_func; + u16 pci_devno; + + /* + * Following are a bunch of statistics useful during development + * and later stage for score boarding. + */ + u32 num_wqe_sent; + u32 num_cqe_rcvd; + u32 num_intr_claimed; + u32 link_changed_count; + u32 ipaddr_changed_count; + u32 num_sess_opened; + u32 num_conn_opened; + unsigned int ctx_ccell_tasks; + +#ifdef CONFIG_32BIT + spinlock_t stat_lock; +#endif + struct bnx2i_stats_info bnx2i_stats; + struct iscsi_stats_info stats; +}; + + +/******************************************************************************* + * QP [ SQ / RQ / CQ ] info. + ******************************************************************************/ + +/* + * SQ/RQ/CQ generic structure definition + */ +struct sqe { + u8 sqe_byte[BNX2I_SQ_WQE_SIZE]; +}; + +struct rqe { + u8 rqe_byte[BNX2I_RQ_WQE_SIZE]; +}; + +struct cqe { + u8 cqe_byte[BNX2I_CQE_SIZE]; +}; + + +enum { +#if defined(__LITTLE_ENDIAN) + CNIC_EVENT_COAL_INDEX = 0x0, + CNIC_SEND_DOORBELL = 0x4, + CNIC_EVENT_CQ_ARM = 0x7, + CNIC_RECV_DOORBELL = 0x8 +#elif defined(__BIG_ENDIAN) + CNIC_EVENT_COAL_INDEX = 0x2, + CNIC_SEND_DOORBELL = 0x6, + CNIC_EVENT_CQ_ARM = 0x4, + CNIC_RECV_DOORBELL = 0xa +#endif +}; + + +/* + * CQ DB + */ +struct bnx2x_iscsi_cq_pend_cmpl { + /* CQ producer, updated by Ustorm */ + u16 ustrom_prod; + /* CQ pending completion counter */ + u16 pend_cntr; +}; + + +struct bnx2i_5771x_cq_db { + struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS]; + /* CQ pending completion ITT array */ + u16 itt[BNX2X_MAX_CQS]; + /* Cstorm CQ sequence to notify array, updated by driver */; + u16 sqn[BNX2X_MAX_CQS]; + u32 reserved[4] /* 16 byte allignment */; +}; + + +struct bnx2i_5771x_sq_rq_db { + u16 prod_idx; + u8 reserved0[62]; /* Pad structure size to 64 bytes */ +}; + + +struct bnx2i_5771x_dbell_hdr { + u8 header; + /* 1 for rx doorbell, 0 for tx doorbell */ +#define B577XX_DOORBELL_HDR_RX (0x1<<0) +#define B577XX_DOORBELL_HDR_RX_SHIFT 0 + /* 0 for normal doorbell, 1 for advertise wnd doorbell */ +#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) +#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 + /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */ +#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 + /* connection type */ +#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +struct bnx2i_5771x_dbell { + struct bnx2i_5771x_dbell_hdr dbell; + u8 pad[3]; + +}; + +/** + * struct qp_info - QP (share queue region) atrributes structure + * + * @ctx_base: ioremapped pci register base to access doorbell register + * pertaining to this offloaded connection + * @sq_virt: virtual address of send queue (SQ) region + * @sq_phys: DMA address of SQ memory region + * @sq_mem_size: SQ size + * @sq_prod_qe: SQ producer entry pointer + * @sq_cons_qe: SQ consumer entry pointer + * @sq_first_qe: virtual address of first entry in SQ + * @sq_last_qe: virtual address of last entry in SQ + * @sq_prod_idx: SQ producer index + * @sq_cons_idx: SQ consumer index + * @sqe_left: number sq entry left + * @sq_pgtbl_virt: page table describing buffer consituting SQ region + * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt' + * @sq_pgtbl_size: SQ page table size + * @cq_virt: virtual address of completion queue (CQ) region + * @cq_phys: DMA address of RQ memory region + * @cq_mem_size: CQ size + * @cq_prod_qe: CQ producer entry pointer + * @cq_cons_qe: CQ consumer entry pointer + * @cq_first_qe: virtual address of first entry in CQ + * @cq_last_qe: virtual address of last entry in CQ + * @cq_prod_idx: CQ producer index + * @cq_cons_idx: CQ consumer index + * @cqe_left: number cq entry left + * @cqe_size: size of each CQ entry + * @cqe_exp_seq_sn: next expected CQE sequence number + * @cq_pgtbl_virt: page table describing buffer consituting CQ region + * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt' + * @cq_pgtbl_size: CQ page table size + * @rq_virt: virtual address of receive queue (RQ) region + * @rq_phys: DMA address of RQ memory region + * @rq_mem_size: RQ size + * @rq_prod_qe: RQ producer entry pointer + * @rq_cons_qe: RQ consumer entry pointer + * @rq_first_qe: virtual address of first entry in RQ + * @rq_last_qe: virtual address of last entry in RQ + * @rq_prod_idx: RQ producer index + * @rq_cons_idx: RQ consumer index + * @rqe_left: number rq entry left + * @rq_pgtbl_virt: page table describing buffer consituting RQ region + * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt' + * @rq_pgtbl_size: RQ page table size + * + * queue pair (QP) is a per connection shared data structure which is used + * to send work requests (SQ), receive completion notifications (CQ) + * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure + * below holds queue memory, consumer/producer indexes and page table + * information + */ +struct qp_info { + void __iomem *ctx_base; +#define DPM_TRIGER_TYPE 0x40 + +#define BNX2I_570x_QUE_DB_SIZE 0 +#define BNX2I_5771x_QUE_DB_SIZE 16 + struct sqe *sq_virt; + dma_addr_t sq_phys; + u32 sq_mem_size; + + struct sqe *sq_prod_qe; + struct sqe *sq_cons_qe; + struct sqe *sq_first_qe; + struct sqe *sq_last_qe; + u16 sq_prod_idx; + u16 sq_cons_idx; + u32 sqe_left; + + void *sq_pgtbl_virt; + dma_addr_t sq_pgtbl_phys; + u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ + + struct cqe *cq_virt; + dma_addr_t cq_phys; + u32 cq_mem_size; + + struct cqe *cq_prod_qe; + struct cqe *cq_cons_qe; + struct cqe *cq_first_qe; + struct cqe *cq_last_qe; + u16 cq_prod_idx; + u16 cq_cons_idx; + u32 cqe_left; + u32 cqe_size; + u32 cqe_exp_seq_sn; + + void *cq_pgtbl_virt; + dma_addr_t cq_pgtbl_phys; + u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ + + struct rqe *rq_virt; + dma_addr_t rq_phys; + u32 rq_mem_size; + + struct rqe *rq_prod_qe; + struct rqe *rq_cons_qe; + struct rqe *rq_first_qe; + struct rqe *rq_last_qe; + u16 rq_prod_idx; + u16 rq_cons_idx; + u32 rqe_left; + + void *rq_pgtbl_virt; + dma_addr_t rq_pgtbl_phys; + u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ +}; + + + +/* + * CID handles + */ +struct ep_handles { + u32 fw_cid; + u32 drv_iscsi_cid; + u16 pg_cid; + u16 rsvd; +}; + + +enum { + EP_STATE_IDLE = 0x0, + EP_STATE_PG_OFLD_START = 0x1, + EP_STATE_PG_OFLD_COMPL = 0x2, + EP_STATE_OFLD_START = 0x4, + EP_STATE_OFLD_COMPL = 0x8, + EP_STATE_CONNECT_START = 0x10, + EP_STATE_CONNECT_COMPL = 0x20, + EP_STATE_ULP_UPDATE_START = 0x40, + EP_STATE_ULP_UPDATE_COMPL = 0x80, + EP_STATE_DISCONN_START = 0x100, + EP_STATE_DISCONN_COMPL = 0x200, + EP_STATE_CLEANUP_START = 0x400, + EP_STATE_CLEANUP_CMPL = 0x800, + EP_STATE_TCP_FIN_RCVD = 0x1000, + EP_STATE_TCP_RST_RCVD = 0x2000, + EP_STATE_LOGOUT_SENT = 0x4000, + EP_STATE_LOGOUT_RESP_RCVD = 0x8000, + EP_STATE_PG_OFLD_FAILED = 0x1000000, + EP_STATE_ULP_UPDATE_FAILED = 0x2000000, + EP_STATE_CLEANUP_FAILED = 0x4000000, + EP_STATE_OFLD_FAILED = 0x8000000, + EP_STATE_CONNECT_FAILED = 0x10000000, + EP_STATE_DISCONN_TIMEDOUT = 0x20000000, + EP_STATE_OFLD_FAILED_CID_BUSY = 0x80000000, +}; + +/** + * struct bnx2i_endpoint - representation of tcp connection in NX2 world + * + * @link: list head to link elements + * @hba: adapter to which this connection belongs + * @conn: iscsi connection this EP is linked to + * @cls_ep: associated iSCSI endpoint pointer + * @cm_sk: cnic sock struct + * @hba_age: age to detect if 'iscsid' issues ep_disconnect() + * after HBA reset is completed by bnx2i/cnic/bnx2 + * modules + * @state: tracks offload connection state machine + * @timestamp: tracks the start time when the ep begins to connect + * @num_active_cmds: tracks the number of outstanding commands for this ep + * @ec_shift: the amount of shift as part of the event coal calc + * @qp: QP information + * @ids: contains chip allocated *context id* & driver assigned + * *iscsi cid* + * @ofld_timer: offload timer to detect timeout + * @ofld_wait: wait queue + * + * Endpoint Structure - equivalent of tcp socket structure + */ +struct bnx2i_endpoint { + struct list_head link; + struct bnx2i_hba *hba; + struct bnx2i_conn *conn; + struct iscsi_endpoint *cls_ep; + struct cnic_sock *cm_sk; + u32 hba_age; + u32 state; + unsigned long timestamp; + atomic_t num_active_cmds; + u32 ec_shift; + + struct qp_info qp; + struct ep_handles ids; + #define ep_iscsi_cid ids.drv_iscsi_cid + #define ep_cid ids.fw_cid + #define ep_pg_cid ids.pg_cid + struct timer_list ofld_timer; + wait_queue_head_t ofld_wait; +}; + + +struct bnx2i_work { + struct list_head list; + struct iscsi_session *session; + struct bnx2i_conn *bnx2i_conn; + struct cqe cqe; +}; + +struct bnx2i_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t p_work_lock; +}; + + +/* Global variables */ +extern unsigned int error_mask1, error_mask2; +extern u64 iscsi_error_mask; +extern unsigned int en_tcp_dack; +extern unsigned int event_coal_div; +extern unsigned int event_coal_min; + +extern struct scsi_transport_template *bnx2i_scsi_xport_template; +extern struct iscsi_transport bnx2i_iscsi_transport; +extern struct cnic_ulp_ops bnx2i_cnic_cb; + +extern unsigned int sq_size; +extern unsigned int rq_size; + +extern const struct attribute_group *bnx2i_dev_groups[]; + + + +/* + * Function Prototypes + */ +extern void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev); + +extern void bnx2i_ulp_init(struct cnic_dev *dev); +extern void bnx2i_ulp_exit(struct cnic_dev *dev); +extern void bnx2i_start(void *handle); +extern void bnx2i_stop(void *handle); +extern int bnx2i_get_stats(void *handle); + +extern struct bnx2i_hba *get_adapter_list_head(void); + +struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, + u16 iscsi_cid); + +int bnx2i_alloc_ep_pool(void); +void bnx2i_release_ep_pool(void); +struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); +struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); + +struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); + +struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); +void bnx2i_free_hba(struct bnx2i_hba *hba); + +void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len); +void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count); + +void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd); + +void bnx2i_drop_session(struct iscsi_cls_session *session); + +extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba); +extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, + struct bnx2i_cmd *cmnd); +extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, + struct iscsi_task *mtask, + char *datap, int data_len, int unsol); +extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, + struct bnx2i_cmd *cmd); +extern int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn); +extern int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); + +extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_ep_ofld_timer(struct timer_list *t); +extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list( + struct bnx2i_hba *hba, u32 iscsi_cid); +extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list( + struct bnx2i_hba *hba, u32 iscsi_cid); + +extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); +extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); + +extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep); + +/* Debug related function prototypes */ +extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); + +extern int bnx2i_percpu_io_thread(void *arg); +extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe); +#endif diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c new file mode 100644 index 000000000..6c864b093 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -0,0 +1,2745 @@ +/* bnx2i_hwi.c: QLogic NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2013 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * Copyright (c) 2014, QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) + * Maintained by: QLogic-Storage-Upstream@qlogic.com + */ + +#include +#include +#include +#include "bnx2i.h" + +DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); + +/** + * bnx2i_get_cid_num - get cid from ep + * @ep: endpoint pointer + * + * Only applicable to 57710 family of devices + */ +static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep) +{ + u32 cid; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + cid = ep->ep_cid; + else + cid = GET_CID_NUM(ep->ep_cid); + return cid; +} + + +/** + * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type + * @hba: Adapter for which adjustments is to be made + * + * Only applicable to 57710 family of devices + */ +static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) +{ + u32 num_elements_per_pg; + + if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) || + test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) || + test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { + if (!is_power_of_2(hba->max_sqes)) + hba->max_sqes = rounddown_pow_of_two(hba->max_sqes); + + if (!is_power_of_2(hba->max_rqes)) + hba->max_rqes = rounddown_pow_of_two(hba->max_rqes); + } + + /* Adjust each queue size if the user selection does not + * yield integral num of page buffers + */ + /* adjust SQ */ + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + if (hba->max_sqes < num_elements_per_pg) + hba->max_sqes = num_elements_per_pg; + else if (hba->max_sqes % num_elements_per_pg) + hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); + + /* adjust CQ */ + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; + if (hba->max_cqes < num_elements_per_pg) + hba->max_cqes = num_elements_per_pg; + else if (hba->max_cqes % num_elements_per_pg) + hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); + + /* adjust RQ */ + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; + if (hba->max_rqes < num_elements_per_pg) + hba->max_rqes = num_elements_per_pg; + else if (hba->max_rqes % num_elements_per_pg) + hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); +} + + +/** + * bnx2i_get_link_state - get network interface link state + * @hba: adapter instance pointer + * + * updates adapter structure flag based on netdev state + */ +static void bnx2i_get_link_state(struct bnx2i_hba *hba) +{ + if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); +} + + +/** + * bnx2i_iscsi_license_error - displays iscsi license related error message + * @hba: adapter instance pointer + * @error_code: error classification + * + * Puts out an error log when driver is unable to offload iscsi connection + * due to license restrictions + */ +static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) +{ + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED) + /* iSCSI offload not supported on this device */ + printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n", + hba->netdev->name); + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED) + /* iSCSI offload not supported on this LOM device */ + printk(KERN_ERR "bnx2i: LOM is not enable to " + "offload iSCSI connections, dev=%s\n", + hba->netdev->name); + set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state); +} + + +/** + * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification + * @ep: endpoint (transport identifier) structure + * @action: action, ARM or DISARM. For now only ARM_CQE is used + * + * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt + * the driver. EQ event is generated CQ index is hit or at least 1 CQ is + * outstanding and on chip timer expires + */ +int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) +{ + struct bnx2i_5771x_cq_db *cq_db; + u16 cq_index; + u16 next_index = 0; + u32 num_active_cmds; + + /* Coalesce CQ entries only on 10G devices */ + if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + return 0; + + /* Do not update CQ DB multiple times before firmware writes + * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious + * interrupts and other unwanted results + */ + cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; + + if (action != CNIC_ARM_CQE_FP) + if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF) + return 0; + + if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) { + num_active_cmds = atomic_read(&ep->num_active_cmds); + if (num_active_cmds <= event_coal_min) + next_index = 1; + else { + next_index = num_active_cmds >> ep->ec_shift; + if (next_index > num_active_cmds - event_coal_min) + next_index = num_active_cmds - event_coal_min; + } + if (!next_index) + next_index = 1; + cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; + if (cq_index > ep->qp.cqe_size * 2) + cq_index -= ep->qp.cqe_size * 2; + if (!cq_index) + cq_index = 1; + + cq_db->sqn[0] = cq_index; + } + return next_index; +} + + +/** + * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer + * @bnx2i_conn: iscsi connection on which RQ event occurred + * @ptr: driver buffer to which RQ buffer contents is to + * be copied + * @len: length of valid data inside RQ buf + * + * Copies RQ buffer contents from shared (DMA'able) memory region to + * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and + * scsi sense info + */ +void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len) +{ + if (!bnx2i_conn->ep->qp.rqe_left) + return; + + bnx2i_conn->ep->qp.rqe_left--; + memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); + if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { + bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; + bnx2i_conn->ep->qp.rq_cons_idx = 0; + } else { + bnx2i_conn->ep->qp.rq_cons_qe++; + bnx2i_conn->ep->qp.rq_cons_idx++; + } +} + + +static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn) +{ + struct bnx2i_5771x_dbell dbell; + u32 msg; + + memset(&dbell, 0, sizeof(dbell)); + dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT); + msg = *((u32 *)&dbell); + /* TODO : get doorbell register mapping */ + writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); +} + + +/** + * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell + * @bnx2i_conn: iscsi connection on which event to post + * @count: number of RQ buffer being posted to chip + * + * No need to ring hardware doorbell for 57710 family of devices + */ +void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count) +{ + struct bnx2i_5771x_sq_rq_db *rq_db; + u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + + ep->qp.rqe_left += count; + ep->qp.rq_prod_idx &= 0x7FFF; + ep->qp.rq_prod_idx += count; + + if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { + ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; + if (!hi_bit) + ep->qp.rq_prod_idx |= 0x8000; + } else + ep->qp.rq_prod_idx |= hi_bit; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; + rq_db->prod_idx = ep->qp.rq_prod_idx; + /* no need to ring hardware doorbell for 57710 */ + } else { + writew(ep->qp.rq_prod_idx, + ep->qp.ctx_base + CNIC_RECV_DOORBELL); + } +} + + +/** + * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine + * @bnx2i_conn: iscsi connection to which new SQ entries belong + * @count: number of SQ WQEs to post + * + * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family + * of devices. For 5706/5708/5709 new SQ WQE count is written into the + * doorbell register + */ +static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) +{ + struct bnx2i_5771x_sq_rq_db *sq_db; + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + + atomic_inc(&ep->num_active_cmds); + wmb(); /* flush SQ WQE memory before the doorbell is rung */ + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; + sq_db->prod_idx = ep->qp.sq_prod_idx; + bnx2i_ring_577xx_doorbell(bnx2i_conn); + } else + writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); +} + + +/** + * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters + * @bnx2i_conn: iscsi connection to which new SQ entries belong + * @count: number of SQ WQEs to post + * + * this routine will update SQ driver parameters and ring the doorbell + */ +static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn, + int count) +{ + int tmp_cnt; + + if (count == 1) { + if (bnx2i_conn->ep->qp.sq_prod_qe == + bnx2i_conn->ep->qp.sq_last_qe) + bnx2i_conn->ep->qp.sq_prod_qe = + bnx2i_conn->ep->qp.sq_first_qe; + else + bnx2i_conn->ep->qp.sq_prod_qe++; + } else { + if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= + bnx2i_conn->ep->qp.sq_last_qe) + bnx2i_conn->ep->qp.sq_prod_qe += count; + else { + tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - + bnx2i_conn->ep->qp.sq_prod_qe; + bnx2i_conn->ep->qp.sq_prod_qe = + &bnx2i_conn->ep->qp.sq_first_qe[count - + (tmp_cnt + 1)]; + } + } + bnx2i_conn->ep->qp.sq_prod_idx += count; + /* Ring the doorbell */ + bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); +} + + +/** + * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware + * @bnx2i_conn: iscsi connection + * @task: transport layer's command structure pointer which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Login request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task) +{ + struct bnx2i_login_request *login_wqe; + struct iscsi_login_req *login_hdr; + u32 dword; + + login_hdr = (struct iscsi_login_req *)task->hdr; + login_wqe = (struct bnx2i_login_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + + login_wqe->op_code = login_hdr->opcode; + login_wqe->op_attr = login_hdr->flags; + login_wqe->version_max = login_hdr->max_version; + login_wqe->version_min = login_hdr->min_version; + login_wqe->data_length = ntoh24(login_hdr->dlength); + login_wqe->isid_lo = *((u32 *) login_hdr->isid); + login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2); + login_wqe->tsih = login_hdr->tsih; + login_wqe->itt = task->itt | + (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT); + login_wqe->cid = login_hdr->cid; + + login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn); + login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); + login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN; + + login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; + login_wqe->resp_bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); + + dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) | + (bnx2i_conn->gen_pdu.resp_buf_size << + ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); + login_wqe->resp_buffer = dword; + login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; + login_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); + login_wqe->num_bds = 1; + login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware + * @bnx2i_conn: iscsi connection + * @mtask: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Login request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *mtask) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_tm *tmfabort_hdr; + struct scsi_cmnd *ref_sc; + struct iscsi_task *ctask; + struct bnx2i_tmf_request *tmfabort_wqe; + u32 dword; + u32 scsi_lun[2]; + + tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; + tmfabort_wqe = (struct bnx2i_tmf_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + + tmfabort_wqe->op_code = tmfabort_hdr->opcode; + tmfabort_wqe->op_attr = tmfabort_hdr->flags; + + tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); + tmfabort_wqe->reserved2 = 0; + tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); + + switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { + case ISCSI_TM_FUNC_ABORT_TASK: + case ISCSI_TM_FUNC_TASK_REASSIGN: + ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); + if (!ctask || !ctask->sc) + /* + * the iscsi layer must have completed the cmd while + * was starting up. + * + * Note: In the case of a SCSI cmd timeout, the task's + * sc is still active; hence ctask->sc != 0 + * In this case, the task must be aborted + */ + return 0; + + ref_sc = ctask->sc; + if (ref_sc->sc_data_direction == DMA_TO_DEVICE) + dword = (ISCSI_TASK_TYPE_WRITE << + ISCSI_CMD_REQUEST_TYPE_SHIFT); + else + dword = (ISCSI_TASK_TYPE_READ << + ISCSI_CMD_REQUEST_TYPE_SHIFT); + tmfabort_wqe->ref_itt = (dword | + (tmfabort_hdr->rtt & ISCSI_ITT_MASK)); + break; + default: + tmfabort_wqe->ref_itt = RESERVED_ITT; + } + memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun)); + tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); + tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); + + tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); + + tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; + tmfabort_wqe->bd_list_addr_hi = (u32) + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + tmfabort_wqe->num_bds = 1; + tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware + * @bnx2i_conn: iscsi connection + * @mtask: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Text request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *mtask) +{ + struct bnx2i_text_request *text_wqe; + struct iscsi_text *text_hdr; + u32 dword; + + text_hdr = (struct iscsi_text *)mtask->hdr; + text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; + + memset(text_wqe, 0, sizeof(struct bnx2i_text_request)); + + text_wqe->op_code = text_hdr->opcode; + text_wqe->op_attr = text_hdr->flags; + text_wqe->data_length = ntoh24(text_hdr->dlength); + text_wqe->itt = mtask->itt | + (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT); + text_wqe->ttt = be32_to_cpu(text_hdr->ttt); + + text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn); + + text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; + text_wqe->resp_bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); + + dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) | + (bnx2i_conn->gen_pdu.resp_buf_size << + ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); + text_wqe->resp_buffer = dword; + text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; + text_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); + text_wqe->num_bds = 1; + text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** + * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware + * @bnx2i_conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, + struct bnx2i_cmd *cmd) +{ + struct bnx2i_cmd_request *scsi_cmd_wqe; + + scsi_cmd_wqe = (struct bnx2i_cmd_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request)); + scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware + * @bnx2i_conn: iscsi connection + * @task: transport layer's command structure pointer which is + * requesting a WQE to sent to chip for further processing + * @datap: payload buffer pointer + * @data_len: payload data length + * @unsol: indicated whether nopout pdu is unsolicited pdu or + * in response to target's NOPIN w/ TTT != FFFFFFFF + * + * prepare and post a nopout request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task, + char *datap, int data_len, int unsol) +{ + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + struct bnx2i_nop_out_request *nopout_wqe; + struct iscsi_nopout *nopout_hdr; + + nopout_hdr = (struct iscsi_nopout *)task->hdr; + nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; + + memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request)); + + nopout_wqe->op_code = nopout_hdr->opcode; + nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; + memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8); + + /* 57710 requires LUN field to be swapped */ + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + swap(nopout_wqe->lun[0], nopout_wqe->lun[1]); + + nopout_wqe->itt = ((u16)task->itt | + (ISCSI_TASK_TYPE_MPATH << + ISCSI_TMF_REQUEST_TYPE_SHIFT)); + nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt); + nopout_wqe->flags = 0; + if (!unsol) + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; + else if (nopout_hdr->itt == RESERVED_ITT) + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; + + nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); + nopout_wqe->data_length = data_len; + if (data_len) { + /* handle payload data, not required in first release */ + printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n"); + } else { + nopout_wqe->bd_list_addr_lo = (u32) + bnx2i_conn->hba->mp_bd_dma; + nopout_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + nopout_wqe->num_bds = 1; + } + nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** + * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware + * @bnx2i_conn: iscsi connection + * @task: transport layer's command structure pointer which is + * requesting a WQE to sent to chip for further processing + * + * prepare and post logout request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task) +{ + struct bnx2i_logout_request *logout_wqe; + struct iscsi_logout *logout_hdr; + + logout_hdr = (struct iscsi_logout *)task->hdr; + + logout_wqe = (struct bnx2i_logout_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request)); + + logout_wqe->op_code = logout_hdr->opcode; + logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); + logout_wqe->op_attr = + logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE; + logout_wqe->itt = ((u16)task->itt | + (ISCSI_TASK_TYPE_MPATH << + ISCSI_LOGOUT_REQUEST_TYPE_SHIFT)); + logout_wqe->data_length = 0; + logout_wqe->cid = 0; + + logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; + logout_wqe->bd_list_addr_hi = (u32) + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + logout_wqe->num_bds = 1; + logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_conn->ep->state = EP_STATE_LOGOUT_SENT; + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** + * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware + * @conn: iscsi connection which requires iscsi parameter update + * + * sends down iSCSI Conn Update request to move iSCSI conn to FFP + */ +void bnx2i_update_iscsi_conn(struct iscsi_conn *conn) +{ + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_update *update_wqe; + struct iscsi_kwqe_conn_update conn_update_kwqe; + + update_wqe = &conn_update_kwqe; + + update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN; + update_wqe->hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + /* 5771x requires conn context id to be passed as is */ + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) + update_wqe->context_id = bnx2i_conn->ep->ep_cid; + else + update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7); + update_wqe->conn_flags = 0; + if (conn->hdrdgst_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST; + if (conn->datadgst_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST; + if (conn->session->initial_r2t_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T; + if (conn->session->imm_data_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA; + + update_wqe->max_send_pdu_length = conn->max_xmit_dlength; + update_wqe->max_recv_pdu_length = conn->max_recv_dlength; + update_wqe->first_burst_length = conn->session->first_burst; + update_wqe->max_burst_length = conn->session->max_burst; + update_wqe->exp_stat_sn = conn->exp_statsn; + update_wqe->max_outstanding_r2ts = conn->session->max_r2t; + update_wqe->session_error_recovery_level = conn->session->erl; + iscsi_conn_printk(KERN_ALERT, conn, + "bnx2i: conn update - MBL 0x%x FBL 0x%x" + "MRDSL_I 0x%x MRDSL_T 0x%x \n", + update_wqe->max_burst_length, + update_wqe->first_burst_length, + update_wqe->max_recv_pdu_length, + update_wqe->max_send_pdu_length); + + kwqe_arr[0] = (struct kwqe *) update_wqe; + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); +} + + +/** + * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware + * @t: timer context used to fetch the endpoint (transport + * handle) structure pointer + * + * routine to handle connection offload/destroy request timeout + */ +void bnx2i_ep_ofld_timer(struct timer_list *t) +{ + struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer); + + if (ep->state == EP_STATE_OFLD_START) { + printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n"); + ep->state = EP_STATE_OFLD_FAILED; + } else if (ep->state == EP_STATE_DISCONN_START) { + printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n"); + ep->state = EP_STATE_DISCONN_TIMEDOUT; + } else if (ep->state == EP_STATE_CLEANUP_START) { + printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n"); + ep->state = EP_STATE_CLEANUP_FAILED; + } + + wake_up_interruptible(&ep->ofld_wait); +} + + +static int bnx2i_power_of2(u32 val) +{ + u32 power = 0; + if (val & (val - 1)) + return power; + val--; + while (val) { + val = val >> 1; + power++; + } + return power; +} + + +/** + * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request + * @hba: adapter structure pointer + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) +{ + struct bnx2i_cleanup_request *cmd_cleanup; + + cmd_cleanup = + (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; + memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request)); + + cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST; + cmd_cleanup->itt = cmd->req.itt; + cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(cmd->conn, 1); +} + + +/** + * bnx2i_send_conn_destroy - initiates iscsi connection teardown process + * @hba: adapter structure pointer + * @ep: endpoint (transport identifier) structure + * + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate + * iscsi connection context clean-up process + */ +int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_destroy conn_cleanup; + int rc = -EINVAL; + + memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); + + conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN; + conn_cleanup.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + /* 5771x requires conn context id to be passed as is */ + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + conn_cleanup.context_id = ep->ep_cid; + else + conn_cleanup.context_id = (ep->ep_cid >> 7); + + conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid; + + kwqe_arr[0] = (struct kwqe *) &conn_cleanup; + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); + + return rc; +} + + +/** + * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process + * @hba: adapter structure pointer + * @ep: endpoint (transport identifier) structure + * + * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_offload1 ofld_req1; + struct iscsi_kwqe_conn_offload2 ofld_req2; + dma_addr_t dma_addr; + int num_kwqes = 2; + u32 *ptbl; + int rc = -EINVAL; + + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; + + dma_addr = ep->qp.sq_pgtbl_phys; + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + dma_addr = ep->qp.cq_pgtbl_phys; + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + dma_addr = ep->qp.rq_pgtbl_phys; + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; + + ofld_req2.sq_first_pte.hi = *ptbl++; + ofld_req2.sq_first_pte.lo = *ptbl; + + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; + ofld_req2.cq_first_pte.hi = *ptbl++; + ofld_req2.cq_first_pte.lo = *ptbl; + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + ofld_req2.num_additional_wqes = 0; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + + +/** + * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation + * @hba: adapter structure pointer + * @ep: endpoint (transport identifier) structure + * + * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[5]; + struct iscsi_kwqe_conn_offload1 ofld_req1; + struct iscsi_kwqe_conn_offload2 ofld_req2; + struct iscsi_kwqe_conn_offload3 ofld_req3[1]; + dma_addr_t dma_addr; + int num_kwqes = 2; + u32 *ptbl; + int rc = -EINVAL; + + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; + + dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); + ofld_req2.sq_first_pte.hi = *ptbl++; + ofld_req2.sq_first_pte.lo = *ptbl; + + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); + ofld_req2.cq_first_pte.hi = *ptbl++; + ofld_req2.cq_first_pte.lo = *ptbl; + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + + ofld_req2.num_additional_wqes = 1; + memset(ofld_req3, 0x00, sizeof(ofld_req3[0])); + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); + ofld_req3[0].qp_first_pte[0].hi = *ptbl++; + ofld_req3[0].qp_first_pte[0].lo = *ptbl; + + kwqe_arr[2] = (struct kwqe *) ofld_req3; + /* need if we decide to go with multiple KCQE's per conn */ + num_kwqes += 1; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process + * + * @hba: adapter structure pointer + * @ep: endpoint (transport identifier) structure + * + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + int rc; + + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + rc = bnx2i_5771x_send_conn_ofld_req(hba, ep); + else + rc = bnx2i_570x_send_conn_ofld_req(hba, ep); + + return rc; +} + + +/** + * setup_qp_page_tables - iscsi QP page table setup function + * @ep: endpoint (transport identifier) structure + * + * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires + * 64-bit address in big endian format. Whereas 10G/sec (57710) requires + * PT in little endian format + */ +static void setup_qp_page_tables(struct bnx2i_endpoint *ep) +{ + int num_pages; + u32 *ptbl; + dma_addr_t page; + int cnic_dev_10g; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + cnic_dev_10g = 1; + else + cnic_dev_10g = 0; + + /* SQ page table */ + memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); + num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; + page = ep->qp.sq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += CNIC_PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += CNIC_PAGE_SIZE; + } + } + + /* RQ page table */ + memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); + num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; + page = ep->qp.rq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.rq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += CNIC_PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += CNIC_PAGE_SIZE; + } + } + + /* CQ page table */ + memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); + num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; + page = ep->qp.cq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += CNIC_PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += CNIC_PAGE_SIZE; + } + } +} + + +/** + * bnx2i_alloc_qp_resc - allocates required resources for QP. + * @hba: adapter structure pointer + * @ep: endpoint (transport identifier) structure + * + * Allocate QP (transport layer for iSCSI connection) resources, DMA'able + * memory for SQ/RQ/CQ and page tables. EP structure elements such + * as producer/consumer indexes/pointers, queue sizes and page table + * contents are setup + */ +int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + struct bnx2i_5771x_cq_db *cq_db; + + ep->hba = hba; + ep->conn = NULL; + ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0; + + /* Allocate page table memory for SQ which is page aligned */ + ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; + ep->qp.sq_mem_size = + (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + ep->qp.sq_pgtbl_size = + (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + ep->qp.sq_pgtbl_size = + (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + + ep->qp.sq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, + &ep->qp.sq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.sq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n", + ep->qp.sq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual SQ element */ + ep->qp.sq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, + &ep->qp.sq_phys, GFP_KERNEL); + if (!ep->qp.sq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", + ep->qp.sq_mem_size); + goto mem_alloc_err; + } + + ep->qp.sq_first_qe = ep->qp.sq_virt; + ep->qp.sq_prod_qe = ep->qp.sq_first_qe; + ep->qp.sq_cons_qe = ep->qp.sq_first_qe; + ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; + ep->qp.sq_prod_idx = 0; + ep->qp.sq_cons_idx = 0; + ep->qp.sqe_left = hba->max_sqes; + + /* Allocate page table memory for CQ which is page aligned */ + ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; + ep->qp.cq_mem_size = + (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + ep->qp.cq_pgtbl_size = + (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + ep->qp.cq_pgtbl_size = + (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + + ep->qp.cq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, + &ep->qp.cq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.cq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n", + ep->qp.cq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual CQ element */ + ep->qp.cq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, + &ep->qp.cq_phys, GFP_KERNEL); + if (!ep->qp.cq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", + ep->qp.cq_mem_size); + goto mem_alloc_err; + } + + ep->qp.cq_first_qe = ep->qp.cq_virt; + ep->qp.cq_prod_qe = ep->qp.cq_first_qe; + ep->qp.cq_cons_qe = ep->qp.cq_first_qe; + ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; + ep->qp.cq_prod_idx = 0; + ep->qp.cq_cons_idx = 0; + ep->qp.cqe_left = hba->max_cqes; + ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; + ep->qp.cqe_size = hba->max_cqes; + + /* Invalidate all EQ CQE index, req only for 57710 */ + cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; + memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS); + + /* Allocate page table memory for RQ which is page aligned */ + ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; + ep->qp.rq_mem_size = + (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + ep->qp.rq_pgtbl_size = + (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + ep->qp.rq_pgtbl_size = + (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + + ep->qp.rq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, + &ep->qp.rq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.rq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n", + ep->qp.rq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual RQ element */ + ep->qp.rq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, + &ep->qp.rq_phys, GFP_KERNEL); + if (!ep->qp.rq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n", + ep->qp.rq_mem_size); + goto mem_alloc_err; + } + + ep->qp.rq_first_qe = ep->qp.rq_virt; + ep->qp.rq_prod_qe = ep->qp.rq_first_qe; + ep->qp.rq_cons_qe = ep->qp.rq_first_qe; + ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; + ep->qp.rq_prod_idx = 0x8000; + ep->qp.rq_cons_idx = 0; + ep->qp.rqe_left = hba->max_rqes; + + setup_qp_page_tables(ep); + + return 0; + +mem_alloc_err: + bnx2i_free_qp_resc(hba, ep); + return -ENOMEM; +} + + + +/** + * bnx2i_free_qp_resc - free memory resources held by QP + * @hba: adapter structure pointer + * @ep: endpoint (transport identifier) structure + * + * Free QP resources - SQ/RQ/CQ memory and page tables. + */ +void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + if (ep->qp.ctx_base) { + iounmap(ep->qp.ctx_base); + ep->qp.ctx_base = NULL; + } + /* Free SQ mem */ + if (ep->qp.sq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, + ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); + ep->qp.sq_pgtbl_virt = NULL; + ep->qp.sq_pgtbl_phys = 0; + } + if (ep->qp.sq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, + ep->qp.sq_virt, ep->qp.sq_phys); + ep->qp.sq_virt = NULL; + ep->qp.sq_phys = 0; + } + + /* Free RQ mem */ + if (ep->qp.rq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, + ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); + ep->qp.rq_pgtbl_virt = NULL; + ep->qp.rq_pgtbl_phys = 0; + } + if (ep->qp.rq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, + ep->qp.rq_virt, ep->qp.rq_phys); + ep->qp.rq_virt = NULL; + ep->qp.rq_phys = 0; + } + + /* Free CQ mem */ + if (ep->qp.cq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, + ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); + ep->qp.cq_pgtbl_virt = NULL; + ep->qp.cq_pgtbl_phys = 0; + } + if (ep->qp.cq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, + ep->qp.cq_virt, ep->qp.cq_phys); + ep->qp.cq_virt = NULL; + ep->qp.cq_phys = 0; + } +} + + +/** + * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w + * @hba: adapter structure pointer + * + * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w + * This results in iSCSi support validation and on-chip context manager + * initialization. Firmware completes this handshake with a CQE carrying + * the result of iscsi support validation. Parameter carried by + * iscsi init request determines the number of offloaded connection and + * tolerance level for iscsi protocol violation this hba/chip can support + */ +int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) +{ + struct kwqe *kwqe_arr[3]; + struct iscsi_kwqe_init1 iscsi_init; + struct iscsi_kwqe_init2 iscsi_init2; + int rc = 0; + u64 mask64; + + memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1)); + memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2)); + + bnx2i_adjust_qp_size(hba); + + iscsi_init.flags = + (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; + if (en_tcp_dack) + iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; + iscsi_init.reserved0 = 0; + iscsi_init.num_cqs = 1; + iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1; + iscsi_init.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; + iscsi_init.dummy_buffer_addr_hi = + (u32) ((u64) hba->dummy_buf_dma >> 32); + + hba->num_ccell = hba->max_sqes >> 1; + hba->ctx_ccell_tasks = + ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); + iscsi_init.num_ccells_per_conn = hba->num_ccell; + iscsi_init.num_tasks_per_conn = hba->max_sqes; + iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + iscsi_init.sq_num_wqes = hba->max_sqes; + iscsi_init.cq_log_wqes_per_page = + (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); + iscsi_init.cq_num_wqes = hba->max_cqes; + iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; + iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; + iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; + iscsi_init.rq_num_wqes = hba->max_rqes; + + + iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2; + iscsi_init2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1; + mask64 = 0x0ULL; + mask64 |= ( + /* CISCO MDS */ + (1UL << + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) | + /* HP MSA1510i */ + (1UL << + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) | + /* EMC */ + (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); + if (error_mask1) { + iscsi_init2.error_bit_map[0] = error_mask1; + mask64 ^= (u32)(mask64); + mask64 |= error_mask1; + } else + iscsi_init2.error_bit_map[0] = (u32) mask64; + + if (error_mask2) { + iscsi_init2.error_bit_map[1] = error_mask2; + mask64 &= 0xffffffff; + mask64 |= ((u64)error_mask2 << 32); + } else + iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32); + + iscsi_error_mask = mask64; + + kwqe_arr[0] = (struct kwqe *) &iscsi_init; + kwqe_arr[1] = (struct kwqe *) &iscsi_init2; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2); + return rc; +} + + +/** + * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. + * @session: iscsi session + * @bnx2i_conn: bnx2i connection + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process SCSI CMD Response CQE & complete the request to SCSI-ML + */ +int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + struct bnx2i_cmd_response *resp_cqe; + struct bnx2i_cmd *bnx2i_cmd; + struct iscsi_task *task; + struct iscsi_scsi_rsp *hdr; + u32 datalen = 0; + + resp_cqe = (struct bnx2i_cmd_response *)cqe; + spin_lock_bh(&session->back_lock); + task = iscsi_itt_to_task(conn, + resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); + if (!task) + goto fail; + + bnx2i_cmd = task->dd_data; + + if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { + conn->datain_pdus_cnt += + resp_cqe->task_stat.read_stat.num_data_ins; + conn->rxdata_octets += + bnx2i_cmd->req.total_data_transfer_length; + ADD_STATS_64(hba, rx_pdus, + resp_cqe->task_stat.read_stat.num_data_ins); + ADD_STATS_64(hba, rx_bytes, + bnx2i_cmd->req.total_data_transfer_length); + } else { + conn->dataout_pdus_cnt += + resp_cqe->task_stat.write_stat.num_data_outs; + conn->r2t_pdus_cnt += + resp_cqe->task_stat.write_stat.num_r2ts; + conn->txdata_octets += + bnx2i_cmd->req.total_data_transfer_length; + ADD_STATS_64(hba, tx_pdus, + resp_cqe->task_stat.write_stat.num_data_outs); + ADD_STATS_64(hba, tx_bytes, + bnx2i_cmd->req.total_data_transfer_length); + ADD_STATS_64(hba, rx_pdus, + resp_cqe->task_stat.write_stat.num_r2ts); + } + bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); + + hdr = (struct iscsi_scsi_rsp *)task->hdr; + resp_cqe = (struct bnx2i_cmd_response *)cqe; + hdr->opcode = resp_cqe->op_code; + hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn); + hdr->response = resp_cqe->response; + hdr->cmd_status = resp_cqe->status; + hdr->flags = resp_cqe->response_flags; + hdr->residual_count = cpu_to_be32(resp_cqe->residual_count); + + if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN) + goto done; + + if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) { + datalen = resp_cqe->data_length; + if (datalen < 2) + goto done; + + if (datalen > BNX2I_RQ_WQE_SIZE) { + iscsi_conn_printk(KERN_ERR, conn, + "sense data len %d > RQ sz\n", + datalen); + datalen = BNX2I_RQ_WQE_SIZE; + } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) { + iscsi_conn_printk(KERN_ERR, conn, + "sense data len %d > conn data\n", + datalen); + datalen = ISCSI_DEF_MAX_RECV_SEG_LEN; + } + + bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen); + bnx2i_put_rq_buf(bnx2i_cmd->conn, 1); + } + +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, + conn->data, datalen); +fail: + spin_unlock_bh(&session->back_lock); + return 0; +} + + +/** + * bnx2i_process_login_resp - this function handles iscsi login response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process Login Response CQE & complete it to open-iscsi user daemon + */ +static int bnx2i_process_login_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_login_response *login; + struct iscsi_login_rsp *resp_hdr; + int pld_len; + int pad_len; + + login = (struct bnx2i_login_response *) cqe; + spin_lock(&session->back_lock); + task = iscsi_itt_to_task(conn, + login->itt & ISCSI_LOGIN_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = login->op_code; + resp_hdr->flags = login->response_flags; + resp_hdr->max_version = login->version_max; + resp_hdr->active_version = login->version_active; + resp_hdr->hlength = 0; + + hton24(resp_hdr->dlength, login->data_length); + memcpy(resp_hdr->isid, &login->isid_lo, 6); + resp_hdr->tsih = cpu_to_be16(login->tsih); + resp_hdr->itt = task->hdr->itt; + resp_hdr->statsn = cpu_to_be32(login->stat_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn); + resp_hdr->status_class = login->status_class; + resp_hdr->status_detail = login->status_detail; + pld_len = login->data_length; + bnx2i_conn->gen_pdu.resp_wr_ptr = + bnx2i_conn->gen_pdu.resp_buf + pld_len; + + pad_len = 0; + if (pld_len & 0x3) + pad_len = 4 - (pld_len % 4); + + if (pad_len) { + int i = 0; + for (i = 0; i < pad_len; i++) { + bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; + bnx2i_conn->gen_pdu.resp_wr_ptr++; + } + } + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); +done: + spin_unlock(&session->back_lock); + return 0; +} + + +/** + * bnx2i_process_text_resp - this function handles iscsi text response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI Text Response CQE& complete it to open-iscsi user daemon + */ +static int bnx2i_process_text_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_text_response *text; + struct iscsi_text_rsp *resp_hdr; + int pld_len; + int pad_len; + + text = (struct bnx2i_text_response *) cqe; + spin_lock(&session->back_lock); + task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = text->op_code; + resp_hdr->flags = text->response_flags; + resp_hdr->hlength = 0; + + hton24(resp_hdr->dlength, text->data_length); + resp_hdr->itt = task->hdr->itt; + resp_hdr->ttt = cpu_to_be32(text->ttt); + resp_hdr->statsn = task->hdr->exp_statsn; + resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn); + pld_len = text->data_length; + bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf + + pld_len; + pad_len = 0; + if (pld_len & 0x3) + pad_len = 4 - (pld_len % 4); + + if (pad_len) { + int i = 0; + for (i = 0; i < pad_len; i++) { + bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; + bnx2i_conn->gen_pdu.resp_wr_ptr++; + } + } + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_wr_ptr - + bnx2i_conn->gen_pdu.resp_buf); +done: + spin_unlock(&session->back_lock); + return 0; +} + + +/** + * bnx2i_process_tmf_resp - this function handles iscsi TMF response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI TMF Response CQE and wake up the driver eh thread. + */ +static int bnx2i_process_tmf_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_tmf_response *tmf_cqe; + struct iscsi_tm_rsp *resp_hdr; + + tmf_cqe = (struct bnx2i_tmf_response *)cqe; + spin_lock(&session->back_lock); + task = iscsi_itt_to_task(conn, + tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = tmf_cqe->op_code; + resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn); + resp_hdr->itt = task->hdr->itt; + resp_hdr->response = tmf_cqe->response; + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); +done: + spin_unlock(&session->back_lock); + return 0; +} + +/** + * bnx2i_process_logout_resp - this function handles iscsi logout response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI Logout Response CQE & make function call to + * notify the user daemon. + */ +static int bnx2i_process_logout_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_logout_response *logout; + struct iscsi_logout_rsp *resp_hdr; + + logout = (struct bnx2i_logout_response *) cqe; + spin_lock(&session->back_lock); + task = iscsi_itt_to_task(conn, + logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = logout->op_code; + resp_hdr->flags = logout->response; + resp_hdr->hlength = 0; + + resp_hdr->itt = task->hdr->itt; + resp_hdr->statsn = task->hdr->exp_statsn; + resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn); + + resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait); + resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); + + bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD; +done: + spin_unlock(&session->back_lock); + return 0; +} + +/** + * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI NOPIN local completion CQE, frees IIT and command structures + */ +static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_nop_in_msg *nop_in; + struct iscsi_task *task; + + nop_in = (struct bnx2i_nop_in_msg *)cqe; + spin_lock(&session->back_lock); + task = iscsi_itt_to_task(conn, + nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); + if (task) + __iscsi_put_task(task); + spin_unlock(&session->back_lock); +} + +/** + * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd + * @bnx2i_conn: iscsi connection + * + * Firmware advances RQ producer index for every unsolicited PDU even if + * payload data length is '0'. This function makes corresponding + * adjustments on the driver side to match this f/w behavior + */ +static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn) +{ + char dummy_rq_data[2]; + bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1); + bnx2i_put_rq_buf(bnx2i_conn, 1); +} + + +/** + * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI target's proactive iSCSI NOPIN request + */ +static int bnx2i_process_nopin_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_nop_in_msg *nop_in; + struct iscsi_nopin *hdr; + int tgt_async_nop = 0; + + nop_in = (struct bnx2i_nop_in_msg *)cqe; + + spin_lock(&session->back_lock); + hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = nop_in->op_code; + hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); + hdr->ttt = cpu_to_be32(nop_in->ttt); + + if (nop_in->itt == (u16) RESERVED_ITT) { + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + hdr->itt = RESERVED_ITT; + tgt_async_nop = 1; + goto done; + } + + /* this is a response to one of our nop-outs */ + task = iscsi_itt_to_task(conn, + (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX)); + if (task) { + hdr->flags = ISCSI_FLAG_CMD_FINAL; + hdr->itt = task->hdr->itt; + hdr->ttt = cpu_to_be32(nop_in->ttt); + memcpy(&hdr->lun, nop_in->lun, 8); + } +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); + spin_unlock(&session->back_lock); + + return tgt_async_nop; +} + + +/** + * bnx2i_process_async_mesg - this function handles iscsi async message + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI ASYNC Message + */ +static void bnx2i_process_async_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct bnx2i_async_msg *async_cqe; + struct iscsi_async *resp_hdr; + u8 async_event; + + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + + async_cqe = (struct bnx2i_async_msg *)cqe; + async_event = async_cqe->async_event; + + if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) { + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "async: scsi events not supported\n"); + return; + } + + spin_lock(&session->back_lock); + resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = async_cqe->op_code; + resp_hdr->flags = 0x80; + + memcpy(&resp_hdr->lun, async_cqe->lun, 8); + resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); + + resp_hdr->async_event = async_cqe->async_event; + resp_hdr->async_vcode = async_cqe->async_vcode; + + resp_hdr->param1 = cpu_to_be16(async_cqe->param1); + resp_hdr->param2 = cpu_to_be16(async_cqe->param2); + resp_hdr->param3 = cpu_to_be16(async_cqe->param3); + + __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, + (struct iscsi_hdr *)resp_hdr, NULL, 0); + spin_unlock(&session->back_lock); +} + + +/** + * bnx2i_process_reject_mesg - process iscsi reject pdu + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI REJECT message + */ +static void bnx2i_process_reject_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_reject_msg *reject; + struct iscsi_reject *hdr; + + reject = (struct bnx2i_reject_msg *) cqe; + if (reject->data_length) { + bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length); + bnx2i_put_rq_buf(bnx2i_conn, 1); + } else + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + + spin_lock(&session->back_lock); + hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = reject->op_code; + hdr->reason = reject->reason; + hton24(hdr->dlength, reject->data_length); + hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn); + hdr->ffffffff = cpu_to_be32(RESERVED_ITT); + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, + reject->data_length); + spin_unlock(&session->back_lock); +} + +/** + * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process command cleanup response CQE during conn shutdown or error recovery + */ +static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct bnx2i_cleanup_response *cmd_clean_rsp; + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + + cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; + spin_lock(&session->back_lock); + task = iscsi_itt_to_task(conn, + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); + if (!task) + printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); + spin_unlock(&session->back_lock); + complete(&bnx2i_conn->cmd_cleanup_cmpl); +} + + +/** + * bnx2i_percpu_io_thread - thread per cpu for ios + * + * @arg: ptr to bnx2i_percpu_info structure + */ +int bnx2i_percpu_io_thread(void *arg) +{ + struct bnx2i_percpu_s *p = arg; + struct bnx2i_work *work, *tmp; + LIST_HEAD(work_list); + + set_user_nice(current, MIN_NICE); + + while (!kthread_should_stop()) { + spin_lock_bh(&p->p_work_lock); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_bh(&p->p_work_lock); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + /* work allocated in the bh, freed here */ + bnx2i_process_scsi_cmd_resp(work->session, + work->bnx2i_conn, + &work->cqe); + atomic_dec(&work->bnx2i_conn->work_cnt); + kfree(work); + } + spin_lock_bh(&p->p_work_lock); + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&p->p_work_lock); + schedule(); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + + +/** + * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread + * @session: iscsi session + * @bnx2i_conn: bnx2i connection + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * this function is called by generic KCQ handler to queue all pending cmd + * completion CQEs + * + * The implementation is to queue the cmd response based on the + * last recorded command for the given connection. The + * cpu_id gets recorded upon task_xmit. No out-of-order completion! + */ +static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct bnx2i_nop_in_msg *cqe) +{ + struct bnx2i_work *bnx2i_work = NULL; + struct bnx2i_percpu_s *p = NULL; + struct iscsi_task *task; + struct scsi_cmnd *sc; + int rc = 0; + + spin_lock(&session->back_lock); + task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, + cqe->itt & ISCSI_CMD_RESPONSE_INDEX); + if (!task || !task->sc) { + spin_unlock(&session->back_lock); + return -EINVAL; + } + sc = task->sc; + + spin_unlock(&session->back_lock); + + p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc))); + spin_lock(&p->p_work_lock); + if (unlikely(!p->iothread)) { + rc = -EINVAL; + goto err; + } + /* Alloc and copy to the cqe */ + bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC); + if (bnx2i_work) { + INIT_LIST_HEAD(&bnx2i_work->list); + bnx2i_work->session = session; + bnx2i_work->bnx2i_conn = bnx2i_conn; + memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe)); + list_add_tail(&bnx2i_work->list, &p->work_list); + atomic_inc(&bnx2i_conn->work_cnt); + wake_up_process(p->iothread); + spin_unlock(&p->p_work_lock); + goto done; + } else + rc = -ENOMEM; +err: + spin_unlock(&p->p_work_lock); + bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe); +done: + return rc; +} + + +/** + * bnx2i_process_new_cqes - process newly DMA'ed CQE's + * @bnx2i_conn: bnx2i connection + * + * this function is called by generic KCQ handler to process all pending CQE's + */ +static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct bnx2i_hba *hba = bnx2i_conn->hba; + struct qp_info *qp; + struct bnx2i_nop_in_msg *nopin; + int tgt_async_msg; + int cqe_cnt = 0; + + if (bnx2i_conn->ep == NULL) + return 0; + + qp = &bnx2i_conn->ep->qp; + + if (!qp->cq_virt) { + printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", + hba->netdev->name); + goto out; + } + while (1) { + nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; + if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) + break; + + if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { + if (nopin->op_code == ISCSI_OP_NOOP_IN && + nopin->itt == (u16) RESERVED_ITT) { + printk(KERN_ALERT "bnx2i: Unsolicited " + "NOP-In detected for suspended " + "connection dev=%s!\n", + hba->netdev->name); + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + goto cqe_out; + } + break; + } + tgt_async_msg = 0; + + switch (nopin->op_code) { + case ISCSI_OP_SCSI_CMD_RSP: + case ISCSI_OP_SCSI_DATA_IN: + /* Run the kthread engine only for data cmds + All other cmds will be completed in this bh! */ + bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); + goto done; + case ISCSI_OP_LOGIN_RSP: + bnx2i_process_login_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_SCSI_TMFUNC_RSP: + bnx2i_process_tmf_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_TEXT_RSP: + bnx2i_process_text_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_LOGOUT_RSP: + bnx2i_process_logout_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_NOOP_IN: + if (bnx2i_process_nopin_mesg(session, bnx2i_conn, + qp->cq_cons_qe)) + tgt_async_msg = 1; + break; + case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION: + bnx2i_process_nopin_local_cmpl(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_ASYNC_EVENT: + bnx2i_process_async_mesg(session, bnx2i_conn, + qp->cq_cons_qe); + tgt_async_msg = 1; + break; + case ISCSI_OP_REJECT: + bnx2i_process_reject_mesg(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OPCODE_CLEANUP_RESPONSE: + bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + default: + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", + nopin->op_code); + } + + ADD_STATS_64(hba, rx_pdus, 1); + ADD_STATS_64(hba, rx_bytes, nopin->data_length); +done: + if (!tgt_async_msg) { + if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) + printk(KERN_ALERT "bnx2i (%s): no active cmd! " + "op 0x%x\n", + hba->netdev->name, + nopin->op_code); + else + atomic_dec(&bnx2i_conn->ep->num_active_cmds); + } +cqe_out: + /* clear out in production version only, till beta keep opcode + * field intact, will be helpful in debugging (context dump) + * nopin->op_code = 0; + */ + cqe_cnt++; + qp->cqe_exp_seq_sn++; + if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) + qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; + + if (qp->cq_cons_qe == qp->cq_last_qe) { + qp->cq_cons_qe = qp->cq_first_qe; + qp->cq_cons_idx = 0; + } else { + qp->cq_cons_qe++; + qp->cq_cons_idx++; + } + } +out: + return cqe_cnt; +} + +/** + * bnx2i_fastpath_notification - process global event queue (KCQ) + * @hba: adapter structure pointer + * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry + * + * Fast path event notification handler, KCQ entry carries context id + * of the connection that has 1 or more pending CQ entries + */ +static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, + struct iscsi_kcqe *new_cqe_kcqe) +{ + struct bnx2i_conn *bnx2i_conn; + u32 iscsi_cid; + int nxt_idx; + + iscsi_cid = new_cqe_kcqe->iscsi_conn_id; + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!bnx2i_conn) { + printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); + return; + } + if (!bnx2i_conn->ep) { + printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); + return; + } + + bnx2i_process_new_cqes(bnx2i_conn); + nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, + CNIC_ARM_CQE_FP); + if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn)) + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP); +} + + +/** + * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration + */ +static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *update_kcqe) +{ + struct bnx2i_conn *conn; + u32 iscsi_cid; + + iscsi_cid = update_kcqe->iscsi_conn_id; + conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!conn) { + printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid); + return; + } + if (!conn->ep) { + printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid); + return; + } + + if (update_kcqe->completion_status) { + printk(KERN_ALERT "request failed cid %x\n", iscsi_cid); + conn->ep->state = EP_STATE_ULP_UPDATE_FAILED; + } else + conn->ep->state = EP_STATE_ULP_UPDATE_COMPL; + + wake_up_interruptible(&conn->ep->ofld_wait); +} + + +/** + * bnx2i_recovery_que_add_conn - add connection to recovery queue + * @hba: adapter structure pointer + * @bnx2i_conn: iscsi connection + * + * Add connection to recovery queue and schedule adapter eh worker + */ +static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data, + ISCSI_ERR_CONN_FAILED); +} + + +/** + * bnx2i_process_tcp_error - process error notification on a given connection + * + * @hba: adapter structure pointer + * @tcp_err: tcp error kcqe pointer + * + * handles tcp level error notifications from FW. + */ +static void bnx2i_process_tcp_error(struct bnx2i_hba *hba, + struct iscsi_kcqe *tcp_err) +{ + struct bnx2i_conn *bnx2i_conn; + u32 iscsi_cid; + + iscsi_cid = tcp_err->iscsi_conn_id; + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!bnx2i_conn) { + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); + return; + } + + printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n", + iscsi_cid, tcp_err->completion_status); + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); +} + + +/** + * bnx2i_process_iscsi_error - process error notification on a given connection + * @hba: adapter structure pointer + * @iscsi_err: iscsi error kcqe pointer + * + * handles iscsi error notifications from the FW. Firmware based in initial + * handshake classifies iscsi protocol / TCP rfc violation into either + * warning or error indications. If indication is of "Error" type, driver + * will initiate session recovery for that connection/session. For + * "Warning" type indication, driver will put out a system log message + * (there will be only one message for each type for the life of the + * session, this is to avoid un-necessarily overloading the system) + */ +static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba, + struct iscsi_kcqe *iscsi_err) +{ + struct bnx2i_conn *bnx2i_conn; + u32 iscsi_cid; + const char *additional_notice = ""; + const char *message; + int need_recovery; + u64 err_mask64; + + iscsi_cid = iscsi_err->iscsi_conn_id; + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + if (!bnx2i_conn) { + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); + return; + } + + err_mask64 = (0x1ULL << iscsi_err->completion_status); + + if (err_mask64 & iscsi_error_mask) { + need_recovery = 0; + message = "iscsi_warning"; + } else { + need_recovery = 1; + message = "iscsi_error"; + } + + switch (iscsi_err->completion_status) { + case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR: + additional_notice = "hdr digest err"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR: + additional_notice = "data digest err"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE: + additional_notice = "wrong opcode rcvd"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN: + additional_notice = "AHS len > 0 rcvd"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT: + additional_notice = "invalid ITT rcvd"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN: + additional_notice = "wrong StatSN rcvd"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN: + additional_notice = "wrong DataSN rcvd"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T: + additional_notice = "pend R2T violation"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0: + additional_notice = "ERL0, UO"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1: + additional_notice = "ERL0, U1"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2: + additional_notice = "ERL0, U2"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3: + additional_notice = "ERL0, U3"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4: + additional_notice = "ERL0, U4"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5: + additional_notice = "ERL0, U5"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6: + additional_notice = "ERL0, U6"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN: + additional_notice = "invalid resi len"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN: + additional_notice = "MRDSL violation"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO: + additional_notice = "F-bit not set"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV: + additional_notice = "invalid TTT"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN: + additional_notice = "invalid DataSN"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN: + additional_notice = "burst len violation"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF: + additional_notice = "buf offset violation"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN: + additional_notice = "invalid LUN field"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN: + additional_notice = "invalid R2TSN field"; + break; +#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0: + additional_notice = "invalid cmd len1"; + break; +#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1: + additional_notice = "invalid cmd len2"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED: + additional_notice = "pend r2t exceeds MaxOutstandingR2T value"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV: + additional_notice = "TTT is rsvd"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN: + additional_notice = "MBL violation"; + break; +#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO + case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO: + additional_notice = "data seg len != 0"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN: + additional_notice = "reject pdu len error"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN: + additional_notice = "async pdu len error"; + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN: + additional_notice = "nopin pdu len error"; + break; +#define BNX2_ERR_PEND_R2T_IN_CLEANUP \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP + case BNX2_ERR_PEND_R2T_IN_CLEANUP: + additional_notice = "pend r2t in cleanup"; + break; + + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT: + additional_notice = "IP fragments rcvd"; + break; + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS: + additional_notice = "IP options error"; + break; + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG: + additional_notice = "urgent flag error"; + break; + default: + printk(KERN_ALERT "iscsi_err - unknown err %x\n", + iscsi_err->completion_status); + } + + if (need_recovery) { + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "bnx2i: %s - %s\n", + message, additional_notice); + + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "conn_err - hostno %d conn %p, " + "iscsi_cid %x cid %x\n", + bnx2i_conn->hba->shost->host_no, + bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid, + bnx2i_conn->ep->ep_cid); + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); + } else + if (!test_and_set_bit(iscsi_err->completion_status, + (void *) &bnx2i_conn->violation_notified)) + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "bnx2i: %s - %s\n", + message, additional_notice); +} + + +/** + * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion + * @hba: adapter structure pointer + * @conn_destroy: conn destroy kcqe pointer + * + * handles connection destroy completion request. + */ +static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *conn_destroy) +{ + struct bnx2i_endpoint *ep; + + ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); + if (!ep) { + printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " + "offload request, unexpected completion\n"); + return; + } + + if (hba != ep->hba) { + printk(KERN_ALERT "conn destroy- error hba mismatch\n"); + return; + } + + if (conn_destroy->completion_status) { + printk(KERN_ALERT "conn_destroy_cmpl: op failed\n"); + ep->state = EP_STATE_CLEANUP_FAILED; + } else + ep->state = EP_STATE_CLEANUP_CMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion + * @hba: adapter structure pointer + * @ofld_kcqe: conn offload kcqe pointer + * + * handles initial connection offload completion, ep_connect() thread is + * woken-up to continue with LLP connect process + */ +static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *ofld_kcqe) +{ + u32 cid_addr; + struct bnx2i_endpoint *ep; + + ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); + if (!ep) { + printk(KERN_ALERT "ofld_cmpl: no pend offload request\n"); + return; + } + + if (hba != ep->hba) { + printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n"); + return; + } + + if (ofld_kcqe->completion_status) { + ep->state = EP_STATE_OFLD_FAILED; + if (ofld_kcqe->completion_status == + ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) + printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable " + "to allocate iSCSI context resources\n", + hba->netdev->name); + else if (ofld_kcqe->completion_status == + ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE) + printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " + "opcode\n", hba->netdev->name); + else if (ofld_kcqe->completion_status == + ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY) + /* error status code valid only for 5771x chipset */ + ep->state = EP_STATE_OFLD_FAILED_CID_BUSY; + else + printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " + "error code %d\n", hba->netdev->name, + ofld_kcqe->completion_status); + } else { + ep->state = EP_STATE_OFLD_COMPL; + cid_addr = ofld_kcqe->iscsi_conn_context_id; + ep->ep_cid = cid_addr; + ep->qp.ctx_base = NULL; + } + wake_up_interruptible(&ep->ofld_wait); +} + +/** + * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE + * @context: adapter structure pointer + * @kcqe: kcqe pointer + * @num_cqe: number of kcqes to process + * + * Generic KCQ event handler/dispatcher + */ +static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[], + u32 num_cqe) +{ + struct bnx2i_hba *hba = context; + int i = 0; + struct iscsi_kcqe *ikcqe = NULL; + + while (i < num_cqe) { + ikcqe = (struct iscsi_kcqe *) kcqe[i++]; + + if (ikcqe->op_code == + ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION) + bnx2i_fastpath_notification(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN) + bnx2i_process_ofld_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN) + bnx2i_process_update_conn_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) { + if (ikcqe->completion_status != + ISCSI_KCQE_COMPLETION_STATUS_SUCCESS) + bnx2i_iscsi_license_error(hba, ikcqe->\ + completion_status); + else { + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); + bnx2i_get_link_state(hba); + printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: " + "ISCSI_INIT passed\n", + (u8)hba->pcidev->bus->number, + hba->pci_devno, + (u8)hba->pci_func); + + + } + } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN) + bnx2i_process_conn_destroy_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR) + bnx2i_process_iscsi_error(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR) + bnx2i_process_tcp_error(hba, ikcqe); + else + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", + ikcqe->op_code); + } +} + + +/** + * bnx2i_indicate_netevent - Generic netdev event handler + * @context: adapter structure pointer + * @event: event type + * @vlan_id: vlans id - associated vlan id with this event + * + * Handles four netdev events, NETDEV_UP, NETDEV_DOWN, + * NETDEV_GOING_DOWN and NETDEV_CHANGE + */ +static void bnx2i_indicate_netevent(void *context, unsigned long event, + u16 vlan_id) +{ + struct bnx2i_hba *hba = context; + + /* Ignore all netevent coming from vlans */ + if (vlan_id != 0) + return; + + switch (event) { + case NETDEV_UP: + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + bnx2i_send_fw_iscsi_init_msg(hba); + break; + case NETDEV_DOWN: + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + break; + case NETDEV_GOING_DOWN: + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + iscsi_host_for_each_session(hba->shost, + bnx2i_drop_session); + break; + case NETDEV_CHANGE: + bnx2i_get_link_state(hba); + break; + default: + ; + } +} + + +/** + * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 TCP connect request. + */ +static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) + ep->state = EP_STATE_CONNECT_FAILED; + else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags)) + ep->state = EP_STATE_CONNECT_COMPL; + else + ep->state = EP_STATE_CONNECT_FAILED; + + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_close_cmpl - process tcp conn close completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 graceful TCP connect shutdown + */ +static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 abortive TCP connect termination + */ +static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_remote_close - process received TCP FIN + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to indicate + * async TCP events such as FIN + */ +static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_TCP_FIN_RCVD; + if (ep->conn) + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); +} + +/** + * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate async TCP events (RST) sent by the peer. + */ +static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + u32 old_state = ep->state; + + ep->state = EP_STATE_TCP_RST_RCVD; + if (old_state == EP_STATE_DISCONN_START) + wake_up_interruptible(&ep->ofld_wait); + else + if (ep->conn) + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); +} + + +static int bnx2i_send_nl_mesg(void *context, u32 msg_type, + char *buf, u16 buflen) +{ + struct bnx2i_hba *hba = context; + int rc; + + if (!hba) + return -ENODEV; + + rc = iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport, + msg_type, buf, buflen); + if (rc) + printk(KERN_ALERT "bnx2i: private nl message send error\n"); + + return rc; +} + + +/* + * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure + * carrying callback function pointers + */ +struct cnic_ulp_ops bnx2i_cnic_cb = { + .cnic_init = bnx2i_ulp_init, + .cnic_exit = bnx2i_ulp_exit, + .cnic_start = bnx2i_start, + .cnic_stop = bnx2i_stop, + .indicate_kcqes = bnx2i_indicate_kcqe, + .indicate_netevent = bnx2i_indicate_netevent, + .cm_connect_complete = bnx2i_cm_connect_cmpl, + .cm_close_complete = bnx2i_cm_close_cmpl, + .cm_abort_complete = bnx2i_cm_abort_cmpl, + .cm_remote_close = bnx2i_cm_remote_close, + .cm_remote_abort = bnx2i_cm_remote_abort, + .iscsi_nl_send_msg = bnx2i_send_nl_mesg, + .cnic_get_stats = bnx2i_get_stats, + .owner = THIS_MODULE +}; + + +/** + * bnx2i_map_ep_dbell_regs - map connection doorbell registers + * @ep: bnx2i endpoint + * + * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these + * register in BAR #0. Whereas in 57710 these register are accessed by + * mapping BAR #1 + */ +int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) +{ + u32 cid_num; + u32 reg_off; + u32 first_l4l5; + u32 ctx_sz; + u32 config2; + resource_size_t reg_base; + + cid_num = bnx2i_get_cid_num(ep); + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + reg_base = pci_resource_start(ep->hba->pcidev, + BNX2X_DOORBELL_PCI_BAR); + reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); + ep->qp.ctx_base = ioremap(reg_base + reg_off, 4); + if (!ep->qp.ctx_base) + return -ENOMEM; + goto arm_cq; + } + + if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && + (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { + config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); + first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5; + ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3; + if (ctx_sz) + reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE + + BNX2I_570X_PAGE_SIZE_DEFAULT * + (((cid_num - first_l4l5) / ctx_sz) + 256); + else + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); + } else + /* 5709 device in normal node and 5706/5708 devices */ + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); + + ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off, + MB_KERNEL_CTX_SIZE); + if (!ep->qp.ctx_base) + return -ENOMEM; + +arm_cq: + bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE); + return 0; +} diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c new file mode 100644 index 000000000..872ad37e2 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -0,0 +1,550 @@ +/* bnx2i.c: QLogic NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2013 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * Copyright (c) 2014, QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) + * Maintained by: QLogic-Storage-Upstream@qlogic.com + */ + +#include "bnx2i.h" + +static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); +static u32 adapter_count; + +#define DRV_MODULE_NAME "bnx2i" +#define DRV_MODULE_VERSION "2.7.10.1" +#define DRV_MODULE_RELDATE "Jul 16, 2014" + +static char version[] = + "QLogic NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + + +MODULE_AUTHOR("Anil Veerabhadrappa and " + "Eddie Wai "); + +MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/57710/57711/57712" + "/57800/57810/57840 iSCSI Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static DEFINE_MUTEX(bnx2i_dev_lock); + +unsigned int event_coal_min = 24; +module_param(event_coal_min, int, 0664); +MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands"); + +unsigned int event_coal_div = 2; +module_param(event_coal_div, int, 0664); +MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); + +unsigned int en_tcp_dack = 1; +module_param(en_tcp_dack, int, 0664); +MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); + +unsigned int error_mask1 = 0x00; +module_param(error_mask1, uint, 0664); +MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); + +unsigned int error_mask2 = 0x00; +module_param(error_mask2, uint, 0664); +MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); + +unsigned int sq_size; +module_param(sq_size, int, 0664); +MODULE_PARM_DESC(sq_size, "Configure SQ size"); + +unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT; +module_param(rq_size, int, 0664); +MODULE_PARM_DESC(rq_size, "Configure RQ size"); + +u64 iscsi_error_mask = 0x00; + +DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); + +/** + * bnx2i_identify_device - identifies NetXtreme II device type + * @hba: Adapter structure pointer + * @dev: Corresponding cnic device + * + * This function identifies the NX2 device type and sets appropriate + * queue mailbox register access method, 5709 requires driver to + * access MBOX regs using *bin* mode + */ +void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev) +{ + hba->cnic_dev_type = 0; + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { + if (hba->pci_did == PCI_DEVICE_ID_NX2_5706 || + hba->pci_did == PCI_DEVICE_ID_NX2_5706S) { + set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5708 || + hba->pci_did == PCI_DEVICE_ID_NX2_5708S) { + set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5709 || + hba->pci_did == PCI_DEVICE_ID_NX2_5709S) { + set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); + hba->mail_queue_access = BNX2I_MQ_BIN_MODE; + } + } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); + } else { + printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n", + hba->pci_did); + } +} + + +/** + * get_adapter_list_head - returns head of adapter list + */ +struct bnx2i_hba *get_adapter_list_head(void) +{ + struct bnx2i_hba *hba = NULL; + struct bnx2i_hba *tmp_hba; + + if (!adapter_count) + goto hba_not_found; + + mutex_lock(&bnx2i_dev_lock); + list_for_each_entry(tmp_hba, &adapter_list, link) { + if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { + hba = tmp_hba; + break; + } + } + mutex_unlock(&bnx2i_dev_lock); +hba_not_found: + return hba; +} + + +/** + * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance + * @cnic: pointer to cnic device instance + * + */ +struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) +{ + struct bnx2i_hba *hba, *temp; + + mutex_lock(&bnx2i_dev_lock); + list_for_each_entry_safe(hba, temp, &adapter_list, link) { + if (hba->cnic == cnic) { + mutex_unlock(&bnx2i_dev_lock); + return hba; + } + } + mutex_unlock(&bnx2i_dev_lock); + return NULL; +} + + +/** + * bnx2i_start - cnic callback to initialize & start adapter instance + * @handle: transparent handle pointing to adapter structure + * + * This function maps adapter structure to pcidev structure and initiates + * firmware handshake to enable/initialize on chip iscsi components + * This bnx2i - cnic interface api callback is issued after following + * 2 conditions are met - + * a) underlying network interface is up (marked by event 'NETDEV_UP' + * from netdev + * b) bnx2i adapter instance is registered + */ +void bnx2i_start(void *handle) +{ +#define BNX2I_INIT_POLL_TIME (1000 / HZ) + struct bnx2i_hba *hba = handle; + int i = HZ; + + /* On some bnx2x devices, it is possible that iSCSI is no + * longer supported after firmware is downloaded. In that + * case, the iscsi_init_msg will return failure. + */ + + bnx2i_send_fw_iscsi_init_msg(hba); + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && + !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--) + msleep(BNX2I_INIT_POLL_TIME); +} + + +/** + * bnx2i_chip_cleanup - local routine to handle chip cleanup + * @hba: Adapter instance to register + * + * Driver checks if adapter still has any active connections before + * executing the cleanup process + */ +static void bnx2i_chip_cleanup(struct bnx2i_hba *hba) +{ + struct bnx2i_endpoint *bnx2i_ep; + struct list_head *pos, *tmp; + + if (hba->ofld_conns_active) { + /* Stage to force the disconnection + * This is the case where the daemon is either slow or + * not present + */ + printk(KERN_ALERT "bnx2i: (%s) chip cleanup for %d active " + "connections\n", hba->netdev->name, + hba->ofld_conns_active); + mutex_lock(&hba->net_dev_lock); + list_for_each_safe(pos, tmp, &hba->ep_active_list) { + bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link); + /* Clean up the chip only */ + bnx2i_hw_ep_disconnect(bnx2i_ep); + bnx2i_ep->cm_sk = NULL; + } + mutex_unlock(&hba->net_dev_lock); + } +} + + +/** + * bnx2i_stop - cnic callback to shutdown adapter instance + * @handle: transparent handle pointing to adapter structure + * + * driver checks if adapter is already in shutdown mode, if not start + * the shutdown process + */ +void bnx2i_stop(void *handle) +{ + struct bnx2i_hba *hba = handle; + int conns_active; + int wait_delay = 1 * HZ; + + /* check if cleanup happened in GOING_DOWN context */ + if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN, + &hba->adapter_state)) { + iscsi_host_for_each_session(hba->shost, + bnx2i_drop_session); + wait_delay = hba->hba_shutdown_tmo; + } + /* Wait for inflight offload connection tasks to complete before + * proceeding. Forcefully terminate all connection recovery in + * progress at the earliest, either in bind(), send_pdu(LOGIN), + * or conn_start() + */ + wait_event_interruptible_timeout(hba->eh_wait, + (list_empty(&hba->ep_ofld_list) && + list_empty(&hba->ep_destroy_list)), + 2 * HZ); + /* Wait for all endpoints to be torn down, Chip will be reset once + * control returns to network driver. So it is required to cleanup and + * release all connection resources before returning from this routine. + */ + while (hba->ofld_conns_active) { + conns_active = hba->ofld_conns_active; + wait_event_interruptible_timeout(hba->eh_wait, + (hba->ofld_conns_active != conns_active), + wait_delay); + if (hba->ofld_conns_active == conns_active) + break; + } + bnx2i_chip_cleanup(hba); + + /* This flag should be cleared last so that ep_disconnect() gracefully + * cleans up connection context + */ + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); +} + + +/** + * bnx2i_init_one - initialize an adapter instance and allocate memory resources + * @hba: bnx2i adapter instance + * @cnic: cnic device handle + * + * Global resource lock is held during critical sections below. This routine is + * called from either cnic_register_driver() or device hot plug context and + * and does majority of device specific initialization + */ +static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) +{ + int rc; + + mutex_lock(&bnx2i_dev_lock); + if (!cnic->max_iscsi_conn) { + printk(KERN_ALERT "bnx2i: dev %s does not support " + "iSCSI\n", hba->netdev->name); + rc = -EOPNOTSUPP; + goto out; + } + + hba->cnic = cnic; + rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); + if (!rc) { + hba->age++; + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + list_add_tail(&hba->link, &adapter_list); + adapter_count++; + } else if (rc == -EBUSY) /* duplicate registration */ + printk(KERN_ALERT "bnx2i, duplicate registration" + "hba=%p, cnic=%p\n", hba, cnic); + else if (rc == -EAGAIN) + printk(KERN_ERR "bnx2i, driver not registered\n"); + else if (rc == -EINVAL) + printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI); + else + printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc); + +out: + mutex_unlock(&bnx2i_dev_lock); + + return rc; +} + + +/** + * bnx2i_ulp_init - initialize an adapter instance + * @dev: cnic device handle + * + * Called from cnic_register_driver() context to initialize all enumerated + * cnic devices. This routine allocate adapter structure and other + * device specific resources. + */ +void bnx2i_ulp_init(struct cnic_dev *dev) +{ + struct bnx2i_hba *hba; + + /* Allocate a HBA structure for this device */ + hba = bnx2i_alloc_hba(dev); + if (!hba) { + printk(KERN_ERR "bnx2i init: hba initialization failed\n"); + return; + } + + /* Get PCI related information and update hba struct members */ + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + if (bnx2i_init_one(hba, dev)) { + printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); + bnx2i_free_hba(hba); + } +} + + +/** + * bnx2i_ulp_exit - shuts down adapter instance and frees all resources + * @dev: cnic device handle + * + */ +void bnx2i_ulp_exit(struct cnic_dev *dev) +{ + struct bnx2i_hba *hba; + + hba = bnx2i_find_hba_for_cnic(dev); + if (!hba) { + printk(KERN_INFO "bnx2i_ulp_exit: hba not " + "found, dev 0x%p\n", dev); + return; + } + mutex_lock(&bnx2i_dev_lock); + list_del_init(&hba->link); + adapter_count--; + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + } + mutex_unlock(&bnx2i_dev_lock); + + bnx2i_free_hba(hba); +} + + +/** + * bnx2i_get_stats - Retrieve various statistic from iSCSI offload + * @handle: bnx2i_hba + * + * function callback exported via bnx2i - cnic driver interface to + * retrieve various iSCSI offload related statistics. + */ +int bnx2i_get_stats(void *handle) +{ + struct bnx2i_hba *hba = handle; + struct iscsi_stats_info *stats; + + if (!hba) + return -EINVAL; + + stats = (struct iscsi_stats_info *)hba->cnic->stats_addr; + + if (!stats) + return -ENOMEM; + + strscpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version)); + memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN); + + stats->max_frame_size = hba->netdev->mtu; + stats->txq_size = hba->max_sqes; + stats->rxq_size = hba->max_cqes; + + stats->txq_avg_depth = 0; + stats->rxq_avg_depth = 0; + + GET_STATS_64(hba, stats, rx_pdus); + GET_STATS_64(hba, stats, rx_bytes); + + GET_STATS_64(hba, stats, tx_pdus); + GET_STATS_64(hba, stats, tx_bytes); + + return 0; +} + + +/** + * bnx2i_cpu_online - Create a receive thread for an online CPU + * + * @cpu: cpu index for the online cpu + */ +static int bnx2i_cpu_online(unsigned int cpu) +{ + struct bnx2i_percpu_s *p; + struct task_struct *thread; + + p = &per_cpu(bnx2i_percpu, cpu); + + thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, + cpu_to_node(cpu), + "bnx2i_thread/%d", cpu); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + /* bind thread to the cpu */ + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + return 0; +} + +static int bnx2i_cpu_offline(unsigned int cpu) +{ + struct bnx2i_percpu_s *p; + struct task_struct *thread; + struct bnx2i_work *work, *tmp; + + /* Prevent any new work from being queued for this CPU */ + p = &per_cpu(bnx2i_percpu, cpu); + spin_lock_bh(&p->p_work_lock); + thread = p->iothread; + p->iothread = NULL; + + /* Free all work in the list */ + list_for_each_entry_safe(work, tmp, &p->work_list, list) { + list_del_init(&work->list); + bnx2i_process_scsi_cmd_resp(work->session, + work->bnx2i_conn, &work->cqe); + kfree(work); + } + + spin_unlock_bh(&p->p_work_lock); + if (thread) + kthread_stop(thread); + return 0; +} + +static enum cpuhp_state bnx2i_online_state; + +/** + * bnx2i_mod_init - module init entry point + * + * initialize any driver wide global data structures such as endpoint pool, + * tcp port manager/queue, sysfs. finally driver will register itself + * with the cnic module + */ +static int __init bnx2i_mod_init(void) +{ + int err; + unsigned cpu = 0; + struct bnx2i_percpu_s *p; + + printk(KERN_INFO "%s", version); + + if (sq_size && !is_power_of_2(sq_size)) + sq_size = roundup_pow_of_two(sq_size); + + bnx2i_scsi_xport_template = + iscsi_register_transport(&bnx2i_iscsi_transport); + if (!bnx2i_scsi_xport_template) { + printk(KERN_ERR "Could not register bnx2i transport.\n"); + err = -ENOMEM; + goto out; + } + + err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb); + if (err) { + printk(KERN_ERR "Could not register bnx2i cnic driver.\n"); + goto unreg_xport; + } + + /* Create percpu kernel threads to handle iSCSI I/O completions */ + for_each_possible_cpu(cpu) { + p = &per_cpu(bnx2i_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->p_work_lock); + p->iothread = NULL; + } + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", + bnx2i_cpu_online, bnx2i_cpu_offline); + if (err < 0) + goto unreg_driver; + bnx2i_online_state = err; + return 0; + +unreg_driver: + cnic_unregister_driver(CNIC_ULP_ISCSI); +unreg_xport: + iscsi_unregister_transport(&bnx2i_iscsi_transport); +out: + return err; +} + + +/** + * bnx2i_mod_exit - module cleanup/exit entry point + * + * Global resource lock and host adapter lock is held during critical sections + * in this function. Driver will browse through the adapter list, cleans-up + * each instance, unregisters iscsi transport name and finally driver will + * unregister itself with the cnic module + */ +static void __exit bnx2i_mod_exit(void) +{ + struct bnx2i_hba *hba; + + mutex_lock(&bnx2i_dev_lock); + while (!list_empty(&adapter_list)) { + hba = list_entry(adapter_list.next, struct bnx2i_hba, link); + list_del(&hba->link); + adapter_count--; + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + bnx2i_chip_cleanup(hba); + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + } + + bnx2i_free_hba(hba); + } + mutex_unlock(&bnx2i_dev_lock); + + cpuhp_remove_state(bnx2i_online_state); + + iscsi_unregister_transport(&bnx2i_iscsi_transport); + cnic_unregister_driver(CNIC_ULP_ISCSI); +} + +module_init(bnx2i_mod_init); +module_exit(bnx2i_mod_exit); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c new file mode 100644 index 000000000..9971f32a6 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -0,0 +1,2306 @@ +/* + * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2013 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * Copyright (c) 2014, QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) + * Maintained by: QLogic-Storage-Upstream@qlogic.com + */ + +#include +#include +#include +#include "bnx2i.h" + +struct scsi_transport_template *bnx2i_scsi_xport_template; +struct iscsi_transport bnx2i_iscsi_transport; +static const struct scsi_host_template bnx2i_host_template; + +/* + * Global endpoint resource info + */ +static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ + +DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); + +static int bnx2i_adapter_ready(struct bnx2i_hba *hba) +{ + int retval = 0; + + if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || + test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || + test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) + retval = -EPERM; + return retval; +} + +/** + * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks + * @cmd: iscsi cmd struct pointer + * @buf_off: absolute buffer offset + * @start_bd_off: u32 pointer to return the offset within the BD + * indicated by 'start_bd_idx' on which 'buf_off' falls + * @start_bd_idx: index of the BD on which 'buf_off' falls + * + * identifies & marks various bd info for scsi command's imm data, + * unsolicited data and the first solicited data seq. + */ +static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, + u32 *start_bd_off, u32 *start_bd_idx) +{ + struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; + u32 cur_offset = 0; + u32 cur_bd_idx = 0; + + if (buf_off) { + while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { + cur_offset += bd_tbl->buffer_length; + cur_bd_idx++; + bd_tbl++; + } + } + + *start_bd_off = buf_off - cur_offset; + *start_bd_idx = cur_bd_idx; +} + +/** + * bnx2i_setup_write_cmd_bd_info - sets up BD various information + * @task: transport layer's cmd struct pointer + * + * identifies & marks various bd info for scsi command's immediate data, + * unsolicited data and first solicited data seq which includes BD start + * index & BD buf off. his function takes into account iscsi parameter such + * as immediate data and unsolicited data is support on this connection. + */ +static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) +{ + struct bnx2i_cmd *cmd = task->dd_data; + u32 start_bd_offset; + u32 start_bd_idx; + u32 buffer_offset = 0; + u32 cmd_len = cmd->req.total_data_transfer_length; + + /* if ImmediateData is turned off & IntialR2T is turned on, + * there will be no immediate or unsolicited data, just return. + */ + if (!iscsi_task_has_unsol_data(task) && !task->imm_count) + return; + + /* Immediate data */ + buffer_offset += task->imm_count; + if (task->imm_count == cmd_len) + return; + + if (iscsi_task_has_unsol_data(task)) { + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, + &start_bd_offset, &start_bd_idx); + cmd->req.ud_buffer_offset = start_bd_offset; + cmd->req.ud_start_bd_index = start_bd_idx; + buffer_offset += task->unsol_r2t.data_length; + } + + if (buffer_offset != cmd_len) { + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, + &start_bd_offset, &start_bd_idx); + if ((start_bd_offset > task->conn->session->first_burst) || + (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { + int i = 0; + + iscsi_conn_printk(KERN_ALERT, task->conn, + "bnx2i- error, buf offset 0x%x " + "bd_valid %d use_sg %d\n", + buffer_offset, cmd->io_tbl.bd_valid, + scsi_sg_count(cmd->scsi_cmd)); + for (i = 0; i < cmd->io_tbl.bd_valid; i++) + iscsi_conn_printk(KERN_ALERT, task->conn, + "bnx2i err, bd[%d]: len %x\n", + i, cmd->io_tbl.bd_tbl[i].\ + buffer_length); + } + cmd->req.sd_buffer_offset = start_bd_offset; + cmd->req.sd_start_bd_index = start_bd_idx; + } +} + + + +/** + * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table + * @hba: adapter instance + * @cmd: iscsi cmd struct pointer + * + * map SG list + */ +static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int bd_count = 0; + int sg_count; + int sg_len; + u64 addr; + int i; + + BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); + + sg_count = scsi_dma_map(sc); + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + bd[bd_count].buffer_addr_lo = addr & 0xffffffff; + bd[bd_count].buffer_addr_hi = addr >> 32; + bd[bd_count].buffer_length = sg_len; + bd[bd_count].flags = 0; + if (bd_count == 0) + bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; + + byte_count += sg_len; + bd_count++; + } + + if (bd_count) + bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; + + BUG_ON(byte_count != scsi_bufflen(sc)); + return bd_count; +} + +/** + * bnx2i_iscsi_map_sg_list - maps SG list + * @cmd: iscsi cmd struct pointer + * + * creates BD list table for the command + */ +static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) +{ + int bd_count; + + bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); + if (!bd_count) { + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; + + bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; + bd[0].buffer_length = bd[0].flags = 0; + } + cmd->io_tbl.bd_valid = bd_count; +} + + +/** + * bnx2i_iscsi_unmap_sg_list - unmaps SG list + * @cmd: iscsi cmd struct pointer + * + * unmap IO buffers and invalidate the BD table + */ +void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + + if (cmd->io_tbl.bd_valid && sc) { + scsi_dma_unmap(sc); + cmd->io_tbl.bd_valid = 0; + } +} + +static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) +{ + memset(&cmd->req, 0x00, sizeof(cmd->req)); + cmd->req.op_code = 0xFF; + cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; + cmd->req.bd_list_addr_hi = + (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); + +} + + +/** + * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' + * @hba: pointer to adapter instance + * @bnx2i_conn: pointer to iscsi connection + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) + * + * update iscsi cid table entry with connection pointer. This enables + * driver to quickly get hold of connection structure pointer in + * completion/interrupt thread using iscsi context ID + */ +static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn, + u32 iscsi_cid) +{ + if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "conn bind - entry #%d not free\n", iscsi_cid); + return -EBUSY; + } + + hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; + return 0; +} + + +/** + * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) + */ +struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, + u16 iscsi_cid) +{ + if (!hba->cid_que.conn_cid_tbl) { + printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); + return NULL; + + } else if (iscsi_cid >= hba->max_active_conns) { + printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); + return NULL; + } + return hba->cid_que.conn_cid_tbl[iscsi_cid]; +} + + +/** + * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool + * @hba: pointer to adapter instance + */ +static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) +{ + int idx; + + if (!hba->cid_que.cid_free_cnt) + return -1; + + idx = hba->cid_que.cid_q_cons_idx; + hba->cid_que.cid_q_cons_idx++; + if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) + hba->cid_que.cid_q_cons_idx = 0; + + hba->cid_que.cid_free_cnt--; + return hba->cid_que.cid_que[idx]; +} + + +/** + * bnx2i_free_iscsi_cid - returns tcp port to free list + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to free + */ +static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) +{ + int idx; + + if (iscsi_cid == (u16) -1) + return; + + hba->cid_que.cid_free_cnt++; + + idx = hba->cid_que.cid_q_prod_idx; + hba->cid_que.cid_que[idx] = iscsi_cid; + hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; + hba->cid_que.cid_q_prod_idx++; + if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) + hba->cid_que.cid_q_prod_idx = 0; +} + + +/** + * bnx2i_setup_free_cid_que - sets up free iscsi cid queue + * @hba: pointer to adapter instance + * + * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, + * and initialize table attributes + */ +static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) +{ + int mem_size; + int i; + + mem_size = hba->max_active_conns * sizeof(u32); + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); + if (!hba->cid_que.cid_que_base) + return -ENOMEM; + + mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); + if (!hba->cid_que.conn_cid_tbl) { + kfree(hba->cid_que.cid_que_base); + hba->cid_que.cid_que_base = NULL; + return -ENOMEM; + } + + hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; + hba->cid_que.cid_q_prod_idx = 0; + hba->cid_que.cid_q_cons_idx = 0; + hba->cid_que.cid_q_max_idx = hba->max_active_conns; + hba->cid_que.cid_free_cnt = hba->max_active_conns; + + for (i = 0; i < hba->max_active_conns; i++) { + hba->cid_que.cid_que[i] = i; + hba->cid_que.conn_cid_tbl[i] = NULL; + } + return 0; +} + + +/** + * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources + * @hba: pointer to adapter instance + */ +static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) +{ + kfree(hba->cid_que.cid_que_base); + hba->cid_que.cid_que_base = NULL; + + kfree(hba->cid_que.conn_cid_tbl); + hba->cid_que.conn_cid_tbl = NULL; +} + + +/** + * bnx2i_alloc_ep - allocates ep structure from global pool + * @hba: pointer to adapter instance + * + * routine allocates a free endpoint structure from global pool and + * a tcp port to be used for this connection. Global resource lock, + * 'bnx2i_resc_lock' is held while accessing shared global data structures + */ +static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) +{ + struct iscsi_endpoint *ep; + struct bnx2i_endpoint *bnx2i_ep; + u32 ec_div; + + ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); + if (!ep) { + printk(KERN_ERR "bnx2i: Could not allocate ep\n"); + return NULL; + } + + bnx2i_ep = ep->dd_data; + bnx2i_ep->cls_ep = ep; + INIT_LIST_HEAD(&bnx2i_ep->link); + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->ep_iscsi_cid = (u16) -1; + bnx2i_ep->hba = hba; + bnx2i_ep->hba_age = hba->age; + + ec_div = event_coal_div; + while (ec_div >>= 1) + bnx2i_ep->ec_shift += 1; + + hba->ofld_conns_active++; + init_waitqueue_head(&bnx2i_ep->ofld_wait); + return ep; +} + + +/** + * bnx2i_free_ep - free endpoint + * @ep: pointer to iscsi endpoint structure + */ +static void bnx2i_free_ep(struct iscsi_endpoint *ep) +{ + struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; + unsigned long flags; + + spin_lock_irqsave(&bnx2i_resc_lock, flags); + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->hba->ofld_conns_active--; + + if (bnx2i_ep->ep_iscsi_cid != (u16) -1) + bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); + + if (bnx2i_ep->conn) { + bnx2i_ep->conn->ep = NULL; + bnx2i_ep->conn = NULL; + } + + bnx2i_ep->hba = NULL; + spin_unlock_irqrestore(&bnx2i_resc_lock, flags); + iscsi_destroy_endpoint(ep); +} + + +/** + * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command + * @hba: adapter instance pointer + * @session: iscsi session pointer + * @cmd: iscsi command structure + */ +static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, + struct bnx2i_cmd *cmd) +{ + struct io_bdt *io = &cmd->io_tbl; + struct iscsi_bd *bd; + + io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), + &io->bd_tbl_dma, GFP_KERNEL); + if (!io->bd_tbl) { + iscsi_session_printk(KERN_ERR, session, "Could not " + "allocate bdt.\n"); + return -ENOMEM; + } + io->bd_valid = 0; + return 0; +} + +/** + * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table + * @hba: adapter instance pointer + * @session: iscsi session pointer + */ +static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct bnx2i_cmd *cmd = task->dd_data; + + if (cmd->io_tbl.bd_tbl) + dma_free_coherent(&hba->pcidev->dev, + ISCSI_MAX_BDS_PER_CMD * + sizeof(struct iscsi_bd), + cmd->io_tbl.bd_tbl, + cmd->io_tbl.bd_tbl_dma); + } + +} + + +/** + * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session + * @hba: adapter instance pointer + * @session: iscsi session pointer + */ +static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct bnx2i_cmd *cmd = task->dd_data; + + task->hdr = &cmd->hdr; + task->hdr_max = sizeof(struct iscsi_hdr); + + if (bnx2i_alloc_bdt(hba, session, cmd)) + goto free_bdts; + } + + return 0; + +free_bdts: + bnx2i_destroy_cmd_pool(hba, session); + return -ENOMEM; +} + + +/** + * bnx2i_setup_mp_bdt - allocate BD table resources + * @hba: pointer to adapter structure + * + * Allocate memory for dummy buffer and associated BD + * table to be used by middle path (MP) requests + */ +static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) +{ + int rc = 0; + struct iscsi_bd *mp_bdt; + u64 addr; + + hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + &hba->mp_bd_dma, GFP_KERNEL); + if (!hba->mp_bd_tbl) { + printk(KERN_ERR "unable to allocate Middle Path BDT\n"); + rc = -1; + goto out; + } + + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, + CNIC_PAGE_SIZE, + &hba->dummy_buf_dma, GFP_KERNEL); + if (!hba->dummy_buffer) { + printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + hba->mp_bd_tbl, hba->mp_bd_dma); + hba->mp_bd_tbl = NULL; + rc = -1; + goto out; + } + + mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; + addr = (unsigned long) hba->dummy_buf_dma; + mp_bdt->buffer_addr_lo = addr & 0xffffffff; + mp_bdt->buffer_addr_hi = addr >> 32; + mp_bdt->buffer_length = CNIC_PAGE_SIZE; + mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; +out: + return rc; +} + + +/** + * bnx2i_free_mp_bdt - releases ITT back to free pool + * @hba: pointer to adapter instance + * + * free MP dummy buffer and associated BD table + */ +static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) +{ + if (hba->mp_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + hba->mp_bd_tbl, hba->mp_bd_dma); + hba->mp_bd_tbl = NULL; + } + if (hba->dummy_buffer) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + hba->dummy_buffer, hba->dummy_buf_dma); + hba->dummy_buffer = NULL; + } + return; +} + +/** + * bnx2i_drop_session - notifies iscsid of connection error. + * @cls_session: iscsi cls session pointer + * + * This notifies iscsid that there is a error, so it can initiate + * recovery. + * + * This relies on caller using the iscsi class iterator so the object + * is refcounted and does not disapper from under us. + */ +void bnx2i_drop_session(struct iscsi_cls_session *cls_session) +{ + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); +} + +/** + * bnx2i_ep_destroy_list_add - add an entry to EP destroy list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport identifier) structure + * + * EP destroy queue manager + */ +static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_add_tail(&ep->link, &hba->ep_destroy_list); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + +/** + * bnx2i_ep_destroy_list_del - add an entry to EP destroy list + * + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport identifier) structure + * + * EP destroy queue manager + */ +static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_del_init(&ep->link); + write_unlock_bh(&hba->ep_rdwr_lock); + + return 0; +} + +/** + * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport identifier) structure + * + * pending conn offload completion queue manager + */ +static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_add_tail(&ep->link, &hba->ep_ofld_list); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + +/** + * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport identifier) structure + * + * pending conn offload completion queue manager + */ +static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_del_init(&ep->link); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + + +/** + * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints + * + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to find + * + */ +struct bnx2i_endpoint * +bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) +{ + struct list_head *list; + struct list_head *tmp; + struct bnx2i_endpoint *ep = NULL; + + read_lock_bh(&hba->ep_rdwr_lock); + list_for_each_safe(list, tmp, &hba->ep_ofld_list) { + ep = (struct bnx2i_endpoint *)list; + + if (ep->ep_iscsi_cid == iscsi_cid) + break; + ep = NULL; + } + read_unlock_bh(&hba->ep_rdwr_lock); + + if (!ep) + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); + return ep; +} + +/** + * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to find + * + */ +struct bnx2i_endpoint * +bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) +{ + struct list_head *list; + struct list_head *tmp; + struct bnx2i_endpoint *ep = NULL; + + read_lock_bh(&hba->ep_rdwr_lock); + list_for_each_safe(list, tmp, &hba->ep_destroy_list) { + ep = (struct bnx2i_endpoint *)list; + + if (ep->ep_iscsi_cid == iscsi_cid) + break; + ep = NULL; + } + read_unlock_bh(&hba->ep_rdwr_lock); + + if (!ep) + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); + + return ep; +} + +/** + * bnx2i_ep_active_list_add - add an entry to ep active list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport identifier) structure + * + * current active conn queue manager + */ +static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_add_tail(&ep->link, &hba->ep_active_list); + write_unlock_bh(&hba->ep_rdwr_lock); +} + + +/** + * bnx2i_ep_active_list_del - deletes an entry to ep active list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport identifier) structure + * + * current active conn queue manager + */ +static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_del_init(&ep->link); + write_unlock_bh(&hba->ep_rdwr_lock); +} + + +/** + * bnx2i_setup_host_queue_size - assigns shost->can_queue param + * @hba: pointer to adapter instance + * @shost: scsi host pointer + * + * Initializes 'can_queue' parameter based on how many outstanding commands + * the device can handle. Each device 5708/5709/57710 has different + * capabilities + */ +static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, + struct Scsi_Host *shost) +{ + if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; + else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; + else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; + else + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; +} + + +/** + * bnx2i_alloc_hba - allocate and init adapter instance + * @cnic: cnic device pointer + * + * allocate & initialize adapter structure and call other + * support routines to do per adapter initialization + */ +struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) +{ + struct Scsi_Host *shost; + struct bnx2i_hba *hba; + + shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); + if (!shost) + return NULL; + shost->dma_boundary = cnic->pcidev->dma_mask; + shost->transportt = bnx2i_scsi_xport_template; + shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1; + shost->max_channel = 0; + shost->max_lun = 512; + shost->max_cmd_len = 16; + + hba = iscsi_host_priv(shost); + hba->shost = shost; + hba->netdev = cnic->netdev; + /* Get PCI related information and update hba struct members */ + hba->pcidev = cnic->pcidev; + pci_dev_get(hba->pcidev); + hba->pci_did = hba->pcidev->device; + hba->pci_vid = hba->pcidev->vendor; + hba->pci_sdid = hba->pcidev->subsystem_device; + hba->pci_svid = hba->pcidev->subsystem_vendor; + hba->pci_func = PCI_FUNC(hba->pcidev->devfn); + hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); + + bnx2i_identify_device(hba, cnic); + bnx2i_setup_host_queue_size(hba, shost); + + hba->reg_base = pci_resource_start(hba->pcidev, 0); + if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { + hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2); + if (!hba->regview) + goto ioreg_map_err; + } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + hba->regview = pci_iomap(hba->pcidev, 0, 4096); + if (!hba->regview) + goto ioreg_map_err; + } + + if (bnx2i_setup_mp_bdt(hba)) + goto mp_bdt_mem_err; + + INIT_LIST_HEAD(&hba->ep_ofld_list); + INIT_LIST_HEAD(&hba->ep_active_list); + INIT_LIST_HEAD(&hba->ep_destroy_list); + rwlock_init(&hba->ep_rdwr_lock); + + hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; + + /* different values for 5708/5709/57710 */ + hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; + + if (bnx2i_setup_free_cid_que(hba)) + goto cid_que_err; + + /* SQ/RQ/CQ size can be changed via sysfx interface */ + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) + hba->max_sqes = sq_size; + else + hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; + } else { /* 5706/5708/5709 */ + if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) + hba->max_sqes = sq_size; + else + hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; + } + + hba->max_rqes = rq_size; + hba->max_cqes = hba->max_sqes + rq_size; + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) + hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; + } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) + hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; + + hba->num_ccell = hba->max_sqes / 2; + + spin_lock_init(&hba->lock); + mutex_init(&hba->net_dev_lock); + init_waitqueue_head(&hba->eh_wait); + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + hba->hba_shutdown_tmo = 30 * HZ; + hba->conn_teardown_tmo = 20 * HZ; + hba->conn_ctx_destroy_tmo = 6 * HZ; + } else { /* 5706/5708/5709 */ + hba->hba_shutdown_tmo = 20 * HZ; + hba->conn_teardown_tmo = 10 * HZ; + hba->conn_ctx_destroy_tmo = 2 * HZ; + } + +#ifdef CONFIG_32BIT + spin_lock_init(&hba->stat_lock); +#endif + memset(&hba->stats, 0, sizeof(struct iscsi_stats_info)); + + if (iscsi_host_add(shost, &hba->pcidev->dev)) + goto free_dump_mem; + return hba; + +free_dump_mem: + bnx2i_release_free_cid_que(hba); +cid_que_err: + bnx2i_free_mp_bdt(hba); +mp_bdt_mem_err: + if (hba->regview) { + pci_iounmap(hba->pcidev, hba->regview); + hba->regview = NULL; + } +ioreg_map_err: + pci_dev_put(hba->pcidev); + scsi_host_put(shost); + return NULL; +} + +/** + * bnx2i_free_hba- releases hba structure and resources held by the adapter + * @hba: pointer to adapter instance + * + * free adapter structure and call various cleanup routines. + */ +void bnx2i_free_hba(struct bnx2i_hba *hba) +{ + struct Scsi_Host *shost = hba->shost; + + iscsi_host_remove(shost, false); + INIT_LIST_HEAD(&hba->ep_ofld_list); + INIT_LIST_HEAD(&hba->ep_active_list); + INIT_LIST_HEAD(&hba->ep_destroy_list); + + if (hba->regview) { + pci_iounmap(hba->pcidev, hba->regview); + hba->regview = NULL; + } + pci_dev_put(hba->pcidev); + bnx2i_free_mp_bdt(hba); + bnx2i_release_free_cid_que(hba); + iscsi_host_free(shost); +} + +/** + * bnx2i_conn_free_login_resources - free DMA resources used for login process + * @hba: pointer to adapter instance + * @bnx2i_conn: iscsi connection pointer + * + * Login related resources, mostly BDT & payload DMA memory is freed + */ +static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + if (bnx2i_conn->gen_pdu.resp_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + bnx2i_conn->gen_pdu.resp_bd_tbl, + bnx2i_conn->gen_pdu.resp_bd_dma); + bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; + } + + if (bnx2i_conn->gen_pdu.req_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + bnx2i_conn->gen_pdu.req_bd_tbl, + bnx2i_conn->gen_pdu.req_bd_dma); + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; + } + + if (bnx2i_conn->gen_pdu.resp_buf) { + dma_free_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_dma_addr); + bnx2i_conn->gen_pdu.resp_buf = NULL; + } + + if (bnx2i_conn->gen_pdu.req_buf) { + dma_free_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.req_buf, + bnx2i_conn->gen_pdu.req_dma_addr); + bnx2i_conn->gen_pdu.req_buf = NULL; + } +} + +/** + * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. + * @hba: pointer to adapter instance + * @bnx2i_conn: iscsi connection pointer + * + * Mgmt task DNA resources are allocated in this routine. + */ +static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + /* Allocate memory for login request/response buffers */ + bnx2i_conn->gen_pdu.req_buf = + dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &bnx2i_conn->gen_pdu.req_dma_addr, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.req_buf == NULL) + goto login_req_buf_failure; + + bnx2i_conn->gen_pdu.req_buf_size = 0; + bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; + + bnx2i_conn->gen_pdu.resp_buf = + dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &bnx2i_conn->gen_pdu.resp_dma_addr, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.resp_buf == NULL) + goto login_resp_buf_failure; + + bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; + bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; + + bnx2i_conn->gen_pdu.req_bd_tbl = + dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); + if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) + goto login_req_bd_tbl_failure; + + bnx2i_conn->gen_pdu.resp_bd_tbl = + dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + &bnx2i_conn->gen_pdu.resp_bd_dma, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) + goto login_resp_bd_tbl_failure; + + return 0; + +login_resp_bd_tbl_failure: + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + bnx2i_conn->gen_pdu.req_bd_tbl, + bnx2i_conn->gen_pdu.req_bd_dma); + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; + +login_req_bd_tbl_failure: + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_dma_addr); + bnx2i_conn->gen_pdu.resp_buf = NULL; +login_resp_buf_failure: + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.req_buf, + bnx2i_conn->gen_pdu.req_dma_addr); + bnx2i_conn->gen_pdu.req_buf = NULL; +login_req_buf_failure: + iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, + "login resource alloc failed!!\n"); + return -ENOMEM; + +} + + +/** + * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. + * @bnx2i_conn: iscsi connection pointer + * + * Allocates buffers and BD tables before shipping requests to cnic + * for PDUs prepared by 'iscsid' daemon + */ +static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) +{ + struct iscsi_bd *bd_tbl; + + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; + + bd_tbl->buffer_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; + bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - + bnx2i_conn->gen_pdu.req_buf; + bd_tbl->reserved0 = 0; + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; + + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; + bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; + bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; + bd_tbl->reserved0 = 0; + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; +} + + +/** + * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. + * @task: transport layer task pointer + * + * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, + * Nop-out and Logout requests flow through this path. + */ +static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) +{ + struct bnx2i_cmd *cmd = task->dd_data; + struct bnx2i_conn *bnx2i_conn = cmd->conn; + int rc = 0; + char *buf; + int data_len; + + bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + bnx2i_send_iscsi_login(bnx2i_conn, task); + break; + case ISCSI_OP_NOOP_OUT: + data_len = bnx2i_conn->gen_pdu.req_buf_size; + buf = bnx2i_conn->gen_pdu.req_buf; + if (data_len) + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, + buf, data_len, 1); + else + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, + NULL, 0, 1); + break; + case ISCSI_OP_LOGOUT: + rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); + break; + case ISCSI_OP_SCSI_TMFUNC: + rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); + break; + case ISCSI_OP_TEXT: + rc = bnx2i_send_iscsi_text(bnx2i_conn, task); + break; + default: + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "send_gen: unsupported op 0x%x\n", + task->hdr->opcode); + } + return rc; +} + + +/********************************************************************** + * SCSI-ML Interface + **********************************************************************/ + +/** + * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe + * @sc: SCSI-ML command pointer + * @cmd: iscsi cmd pointer + */ +static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) +{ + u32 dword; + int lpcnt; + u8 *srcp; + u32 *dstp; + u32 scsi_lun[2]; + + int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); + cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); + cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); + + lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); + srcp = (u8 *) sc->cmnd; + dstp = (u32 *) cmd->req.cdb; + while (lpcnt--) { + memcpy(&dword, (const void *) srcp, 4); + *dstp = cpu_to_be32(dword); + srcp += 4; + dstp++; + } + if (sc->cmd_len & 0x3) { + dword = (u32) srcp[0] | ((u32) srcp[1] << 8); + *dstp = cpu_to_be32(dword); + } +} + +static void bnx2i_cleanup_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + + /* + * mgmt task or cmd was never sent to us to transmit. + */ + if (!task->sc || task->state == ISCSI_TASK_PENDING) + return; + /* + * need to clean-up task context to claim dma buffers + */ + if (task->state == ISCSI_TASK_ABRT_TMF) { + bnx2i_send_cmd_cleanup_req(hba, task->dd_data); + + spin_unlock_bh(&conn->session->back_lock); + wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, + msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); + spin_lock_bh(&conn->session->back_lock); + } + bnx2i_iscsi_unmap_sg_list(task->dd_data); +} + +/** + * bnx2i_mtask_xmit - transmit mtask to chip for further processing + * @conn: transport layer conn structure pointer + * @task: transport layer command structure pointer + */ +static int +bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +{ + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + struct bnx2i_cmd *cmd = task->dd_data; + + memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); + + bnx2i_setup_cmd_wqe_template(cmd); + bnx2i_conn->gen_pdu.req_buf_size = task->data_count; + + /* Tx PDU/data length count */ + ADD_STATS_64(hba, tx_pdus, 1); + ADD_STATS_64(hba, tx_bytes, task->data_count); + + if (task->data_count) { + memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, + task->data_count); + bnx2i_conn->gen_pdu.req_wr_ptr = + bnx2i_conn->gen_pdu.req_buf + task->data_count; + } + cmd->conn = conn->dd_data; + cmd->scsi_cmd = NULL; + return bnx2i_iscsi_send_generic_request(task); +} + +/** + * bnx2i_task_xmit - transmit iscsi command to chip for further processing + * @task: transport layer command structure pointer + * + * maps SG buffers and send request to chip/firmware in the form of SQ WQE + */ +static int bnx2i_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct scsi_cmnd *sc = task->sc; + struct bnx2i_cmd *cmd = task->dd_data; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; + + if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > + hba->max_sqes) + return -ENOMEM; + + /* + * If there is no scsi_cmnd this must be a mgmt task + */ + if (!sc) + return bnx2i_mtask_xmit(conn, task); + + bnx2i_setup_cmd_wqe_template(cmd); + cmd->req.op_code = ISCSI_OP_SCSI_CMD; + cmd->conn = bnx2i_conn; + cmd->scsi_cmd = sc; + cmd->req.total_data_transfer_length = scsi_bufflen(sc); + cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); + + bnx2i_iscsi_map_sg_list(cmd); + bnx2i_cpy_scsi_cdb(sc, cmd); + + cmd->req.op_attr = ISCSI_ATTR_SIMPLE; + if (sc->sc_data_direction == DMA_TO_DEVICE) { + cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; + cmd->req.itt = task->itt | + (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); + bnx2i_setup_write_cmd_bd_info(task); + } else { + if (scsi_bufflen(sc)) + cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; + cmd->req.itt = task->itt | + (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); + } + + cmd->req.num_bds = cmd->io_tbl.bd_valid; + if (!cmd->io_tbl.bd_valid) { + cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; + cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); + cmd->req.num_bds = 1; + } + + bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); + return 0; +} + +/** + * bnx2i_session_create - create a new iscsi session + * @ep: pointer to iscsi endpoint + * @cmds_max: user specified maximum commands + * @qdepth: scsi queue depth to support + * @initial_cmdsn: initial iscsi CMDSN to be used for this session + * + * Creates a new iSCSI session instance on given device. + */ +static struct iscsi_cls_session * +bnx2i_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *cls_session; + struct bnx2i_hba *hba; + struct bnx2i_endpoint *bnx2i_ep; + + if (!ep) { + printk(KERN_ERR "bnx2i: missing ep.\n"); + return NULL; + } + + bnx2i_ep = ep->dd_data; + shost = bnx2i_ep->hba->shost; + hba = iscsi_host_priv(shost); + if (bnx2i_adapter_ready(hba)) + return NULL; + + /* + * user can override hw limit as long as it is within + * the min/max. + */ + if (cmds_max > hba->max_sqes) + cmds_max = hba->max_sqes; + else if (cmds_max < BNX2I_SQ_WQES_MIN) + cmds_max = BNX2I_SQ_WQES_MIN; + + cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, + cmds_max, 0, sizeof(struct bnx2i_cmd), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) + return NULL; + + if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) + goto session_teardown; + return cls_session; + +session_teardown: + iscsi_session_teardown(cls_session); + return NULL; +} + + +/** + * bnx2i_session_destroy - destroys iscsi session + * @cls_session: pointer to iscsi cls session + * + * Destroys previously created iSCSI session instance and releases + * all resources held by it + */ +static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + + bnx2i_destroy_cmd_pool(hba, session); + iscsi_session_teardown(cls_session); +} + + +/** + * bnx2i_conn_create - create iscsi connection instance + * @cls_session: pointer to iscsi cls session + * @cid: iscsi cid as per rfc (not NX2's CID terminology) + * + * Creates a new iSCSI connection instance for a given session + */ +static struct iscsi_cls_conn * +bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_conn *bnx2i_conn; + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + + cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), + cid); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + + bnx2i_conn = conn->dd_data; + bnx2i_conn->cls_conn = cls_conn; + bnx2i_conn->hba = hba; + + atomic_set(&bnx2i_conn->work_cnt, 0); + + /* 'ep' ptr will be assigned in bind() call */ + bnx2i_conn->ep = NULL; + init_completion(&bnx2i_conn->cmd_cleanup_cmpl); + + if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { + iscsi_conn_printk(KERN_ALERT, conn, + "conn_new: login resc alloc failed!!\n"); + goto free_conn; + } + + return cls_conn; + +free_conn: + iscsi_conn_teardown(cls_conn); + return NULL; +} + +/** + * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together + * @cls_session: pointer to iscsi cls session + * @cls_conn: pointer to iscsi cls conn + * @transport_fd: 64-bit EP handle + * @is_leading: leading connection on this session? + * + * Binds together iSCSI session instance, iSCSI connection instance + * and the TCP connection. This routine returns error code if + * TCP connection does not belong on the device iSCSI sess/conn + * is bound + */ +static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_endpoint *bnx2i_ep; + struct iscsi_endpoint *ep; + int ret_code; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; + /* + * Forcefully terminate all in progress connection recovery at the + * earliest, either in bind(), send_pdu(LOGIN), or conn_start() + */ + if (bnx2i_adapter_ready(hba)) { + ret_code = -EIO; + goto put_ep; + } + + bnx2i_ep = ep->dd_data; + if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || + (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) { + /* Peer disconnect via' FIN or RST */ + ret_code = -EINVAL; + goto put_ep; + } + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + ret_code = -EINVAL; + goto put_ep; + } + + if (bnx2i_ep->hba != hba) { + /* Error - TCP connection does not belong to this device + */ + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, + "conn bind, ep=0x%p (%s) does not", + bnx2i_ep, bnx2i_ep->hba->netdev->name); + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, + "belong to hba (%s)\n", + hba->netdev->name); + ret_code = -EEXIST; + goto put_ep; + } + bnx2i_ep->conn = bnx2i_conn; + bnx2i_conn->ep = bnx2i_ep; + bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; + bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; + + ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, + bnx2i_ep->ep_iscsi_cid); + + /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 + * driver needs to explicitly replenish RQ index during setup. + */ + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) + bnx2i_put_rq_buf(bnx2i_conn, 0); + + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); +put_ep: + iscsi_put_endpoint(ep); + return ret_code; +} + + +/** + * bnx2i_conn_destroy - destroy iscsi connection instance & release resources + * @cls_conn: pointer to iscsi cls conn + * + * Destroy an iSCSI connection instance and release memory resources held by + * this connection + */ +static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct Scsi_Host *shost; + struct bnx2i_hba *hba; + struct bnx2i_work *work, *tmp; + unsigned cpu = 0; + struct bnx2i_percpu_s *p; + + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); + hba = iscsi_host_priv(shost); + + bnx2i_conn_free_login_resources(hba, bnx2i_conn); + + if (atomic_read(&bnx2i_conn->work_cnt)) { + for_each_online_cpu(cpu) { + p = &per_cpu(bnx2i_percpu, cpu); + spin_lock_bh(&p->p_work_lock); + list_for_each_entry_safe(work, tmp, + &p->work_list, list) { + if (work->session == conn->session && + work->bnx2i_conn == bnx2i_conn) { + list_del_init(&work->list); + kfree(work); + if (!atomic_dec_and_test( + &bnx2i_conn->work_cnt)) + break; + } + } + spin_unlock_bh(&p->p_work_lock); + } + } + + iscsi_conn_teardown(cls_conn); +} + + +/** + * bnx2i_ep_get_param - return iscsi ep parameter to caller + * @ep: pointer to iscsi endpoint + * @param: parameter type identifier + * @buf: buffer pointer + * + * returns iSCSI ep parameters + */ +static int bnx2i_ep_get_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; + struct bnx2i_hba *hba = bnx2i_ep->hba; + int len = -ENOTCONN; + + if (!hba) + return -ENOTCONN; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + mutex_lock(&hba->net_dev_lock); + if (bnx2i_ep->cm_sk) + len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port); + mutex_unlock(&hba->net_dev_lock); + break; + case ISCSI_PARAM_CONN_ADDRESS: + mutex_lock(&hba->net_dev_lock); + if (bnx2i_ep->cm_sk) + len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip); + mutex_unlock(&hba->net_dev_lock); + break; + default: + return -ENOSYS; + } + + return len; +} + +/** + * bnx2i_host_get_param - returns host (adapter) related parameters + * @shost: scsi host pointer + * @param: parameter type identifier + * @buf: buffer pointer + */ +static int bnx2i_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct bnx2i_hba *hba = iscsi_host_priv(shost); + int len = 0; + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); + break; + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sprintf(buf, "%s\n", hba->netdev->name); + break; + case ISCSI_HOST_PARAM_IPADDRESS: { + struct list_head *active_list = &hba->ep_active_list; + + read_lock_bh(&hba->ep_rdwr_lock); + if (!list_empty(&hba->ep_active_list)) { + struct bnx2i_endpoint *bnx2i_ep; + struct cnic_sock *csk; + + bnx2i_ep = list_first_entry(active_list, + struct bnx2i_endpoint, + link); + csk = bnx2i_ep->cm_sk; + if (test_bit(SK_F_IPV6, &csk->flags)) + len = sprintf(buf, "%pI6\n", csk->src_ip); + else + len = sprintf(buf, "%pI4\n", csk->src_ip); + } + read_unlock_bh(&hba->ep_rdwr_lock); + break; + } + default: + return iscsi_host_get_param(shost, param, buf); + } + return len; +} + +/** + * bnx2i_conn_start - completes iscsi connection migration to FFP + * @cls_conn: pointer to iscsi cls conn + * + * last call in FFP migration to handover iscsi conn to the driver + */ +static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + + bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; + bnx2i_update_iscsi_conn(conn); + + /* + * this should normally not sleep for a long time so it should + * not disrupt the caller. + */ + timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0); + bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; + add_timer(&bnx2i_conn->ep->ofld_timer); + /* update iSCSI context for this conn, wait for CNIC to complete */ + wait_event_interruptible(bnx2i_conn->ep->ofld_wait, + bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_conn->ep->ofld_timer); + + iscsi_conn_start(cls_conn); + return 0; +} + + +/** + * bnx2i_conn_get_stats - returns iSCSI stats + * @cls_conn: pointer to iscsi cls conn + * @stats: pointer to iscsi statistic struct + */ +static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + strcpy(stats->custom[0].desc, "eh_abort_cnt"); + stats->custom[0].value = conn->eh_abort_cnt; + stats->custom_length = 1; +} + + +/** + * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices + * @dst_addr: target IP address + * + * check if route resolves to BNX2 device + */ +static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) +{ + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; + struct bnx2i_hba *hba; + struct cnic_dev *cnic = NULL; + + hba = get_adapter_list_head(); + if (hba && hba->cnic) + cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); + if (!cnic) { + printk(KERN_ALERT "bnx2i: no route," + "can't connect using cnic\n"); + goto no_nx2_route; + } + hba = bnx2i_find_hba_for_cnic(cnic); + if (!hba) + goto no_nx2_route; + + if (bnx2i_adapter_ready(hba)) { + printk(KERN_ALERT "bnx2i: check route, hba not found\n"); + goto no_nx2_route; + } + if (hba->netdev->mtu > hba->mtu_supported) { + printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", + hba->netdev->name, hba->netdev->mtu); + printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", + hba->mtu_supported); + goto no_nx2_route; + } + return hba; +no_nx2_route: + return NULL; +} + + +/** + * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources + * @hba: pointer to adapter instance + * @ep: endpoint (transport identifier) structure + * + * destroys cm_sock structure and on chip iscsi context + */ +static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) + hba->cnic->cm_destroy(ep->cm_sk); + + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && + ep->state == EP_STATE_DISCONN_TIMEDOUT) { + if (ep->conn && ep->conn->cls_conn && + ep->conn->cls_conn->dd_data) { + struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; + + /* Must suspend all rx queue activity for this ep */ + set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); + } + /* CONN_DISCONNECT timeout may or may not be an issue depending + * on what transcribed in TCP layer, different targets behave + * differently + */ + printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, " + "please submit GRC Dump, NW/PCIe trace, " + "driver msgs to developers for analysis\n", + hba->netdev->name); + } + + ep->state = EP_STATE_CLEANUP_START; + timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0); + ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; + add_timer(&ep->ofld_timer); + + bnx2i_ep_destroy_list_add(hba, ep); + + /* destroy iSCSI context, wait for it to complete */ + if (bnx2i_send_conn_destroy(hba, ep)) + ep->state = EP_STATE_CLEANUP_CMPL; + + wait_event_interruptible(ep->ofld_wait, + (ep->state != EP_STATE_CLEANUP_START)); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&ep->ofld_timer); + + bnx2i_ep_destroy_list_del(hba, ep); + + if (ep->state != EP_STATE_CLEANUP_CMPL) + /* should never happen */ + printk(KERN_ALERT "bnx2i - conn destroy failed\n"); + + return 0; +} + + +/** + * bnx2i_ep_connect - establish TCP connection to target portal + * @shost: scsi host + * @dst_addr: target IP address + * @non_blocking: blocking or non-blocking call + * + * this routine initiates the TCP/IP connection by invoking Option-2 i/f + * with l5_core and the CNIC. This is a multi-step process of resolving + * route to target, create a iscsi connection context, handshaking with + * CNIC module to create/initialize the socket struct and finally + * sending down option-2 request to complete TCP 3-way handshake + */ +static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking) +{ + u32 iscsi_cid = BNX2I_CID_RESERVED; + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; + struct sockaddr_in6 *desti6; + struct bnx2i_endpoint *bnx2i_ep; + struct bnx2i_hba *hba; + struct cnic_dev *cnic; + struct cnic_sockaddr saddr; + struct iscsi_endpoint *ep; + int rc = 0; + + if (shost) { + /* driver is given scsi host to work with */ + hba = iscsi_host_priv(shost); + } else + /* + * check if the given destination can be reached through + * a iscsi capable NetXtreme2 device + */ + hba = bnx2i_check_route(dst_addr); + + if (!hba) { + rc = -EINVAL; + goto nohba; + } + mutex_lock(&hba->net_dev_lock); + + if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) { + rc = -EPERM; + goto check_busy; + } + cnic = hba->cnic; + ep = bnx2i_alloc_ep(hba); + if (!ep) { + rc = -ENOMEM; + goto check_busy; + } + bnx2i_ep = ep->dd_data; + + atomic_set(&bnx2i_ep->num_active_cmds, 0); + iscsi_cid = bnx2i_alloc_iscsi_cid(hba); + if (iscsi_cid == -1) { + printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " + "iscsi cid\n", hba->netdev->name); + rc = -ENOMEM; + bnx2i_free_ep(ep); + goto check_busy; + } + bnx2i_ep->hba_age = hba->age; + + rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); + if (rc != 0) { + printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error" + "\n", hba->netdev->name); + rc = -ENOMEM; + goto qp_resc_err; + } + + bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; + bnx2i_ep->state = EP_STATE_OFLD_START; + bnx2i_ep_ofld_list_add(hba, bnx2i_ep); + + timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); + bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; + add_timer(&bnx2i_ep->ofld_timer); + + if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { + if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { + printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", + hba->netdev->name, bnx2i_ep->ep_iscsi_cid); + rc = -EBUSY; + } else + rc = -ENOSPC; + printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe" + "\n", hba->netdev->name); + bnx2i_ep_ofld_list_del(hba, bnx2i_ep); + goto conn_failed; + } + + /* Wait for CNIC hardware to setup conn context and return 'cid' */ + wait_event_interruptible(bnx2i_ep->ofld_wait, + bnx2i_ep->state != EP_STATE_OFLD_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_ep->ofld_timer); + + bnx2i_ep_ofld_list_del(hba, bnx2i_ep); + + if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { + if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { + printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", + hba->netdev->name, bnx2i_ep->ep_iscsi_cid); + rc = -EBUSY; + } else + rc = -ENOSPC; + goto conn_failed; + } + + rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, + iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); + if (rc) { + rc = -EINVAL; + /* Need to terminate and cleanup the connection */ + goto release_ep; + } + + bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; + bnx2i_ep->cm_sk->snd_buf = 256 * 1024; + clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); + + memset(&saddr, 0, sizeof(saddr)); + if (dst_addr->sa_family == AF_INET) { + desti = (struct sockaddr_in *) dst_addr; + saddr.remote.v4 = *desti; + saddr.local.v4.sin_family = desti->sin_family; + } else if (dst_addr->sa_family == AF_INET6) { + desti6 = (struct sockaddr_in6 *) dst_addr; + saddr.remote.v6 = *desti6; + saddr.local.v6.sin6_family = desti6->sin6_family; + } + + bnx2i_ep->timestamp = jiffies; + bnx2i_ep->state = EP_STATE_CONNECT_START; + if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + rc = -EINVAL; + goto conn_failed; + } else + rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); + if (rc) + goto release_ep; + + bnx2i_ep_active_list_add(hba, bnx2i_ep); + + rc = bnx2i_map_ep_dbell_regs(bnx2i_ep); + if (rc) + goto del_active_ep; + + mutex_unlock(&hba->net_dev_lock); + return ep; + +del_active_ep: + bnx2i_ep_active_list_del(hba, bnx2i_ep); +release_ep: + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { + mutex_unlock(&hba->net_dev_lock); + return ERR_PTR(rc); + } +conn_failed: + bnx2i_free_qp_resc(hba, bnx2i_ep); +qp_resc_err: + bnx2i_free_ep(ep); +check_busy: + mutex_unlock(&hba->net_dev_lock); +nohba: + return ERR_PTR(rc); +} + + +/** + * bnx2i_ep_poll - polls for TCP connection establishement + * @ep: TCP connection (endpoint) handle + * @timeout_ms: timeout value in milli secs + * + * polls for TCP connect request to complete + */ +static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct bnx2i_endpoint *bnx2i_ep; + int rc = 0; + + bnx2i_ep = ep->dd_data; + if ((bnx2i_ep->state == EP_STATE_IDLE) || + (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || + (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) + return -1; + if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) + return 1; + + rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, + ((bnx2i_ep->state == + EP_STATE_OFLD_FAILED) || + (bnx2i_ep->state == + EP_STATE_CONNECT_FAILED) || + (bnx2i_ep->state == + EP_STATE_CONNECT_COMPL)), + msecs_to_jiffies(timeout_ms)); + if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) + rc = -1; + + if (rc > 0) + return 1; + else if (!rc) + return 0; /* timeout */ + else + return rc; +} + + +/** + * bnx2i_ep_tcp_conn_active - check EP state transition + * @bnx2i_ep: endpoint pointer + * + * check if underlying TCP connection is active + */ +static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) +{ + int ret; + int cnic_dev_10g = 0; + + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) + cnic_dev_10g = 1; + + switch (bnx2i_ep->state) { + case EP_STATE_CLEANUP_FAILED: + case EP_STATE_OFLD_FAILED: + case EP_STATE_DISCONN_TIMEDOUT: + ret = 0; + break; + case EP_STATE_CONNECT_START: + case EP_STATE_CONNECT_FAILED: + case EP_STATE_CONNECT_COMPL: + case EP_STATE_ULP_UPDATE_START: + case EP_STATE_ULP_UPDATE_COMPL: + case EP_STATE_TCP_FIN_RCVD: + case EP_STATE_LOGOUT_SENT: + case EP_STATE_LOGOUT_RESP_RCVD: + case EP_STATE_ULP_UPDATE_FAILED: + ret = 1; + break; + case EP_STATE_TCP_RST_RCVD: + if (cnic_dev_10g) + ret = 0; + else + ret = 1; + break; + default: + ret = 0; + } + + return ret; +} + + +/** + * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw + * @bnx2i_ep: TCP connection (bnx2i endpoint) handle + * + * executes TCP connection teardown process + */ +int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) +{ + struct bnx2i_hba *hba = bnx2i_ep->hba; + struct cnic_dev *cnic; + struct iscsi_session *session = NULL; + struct iscsi_conn *conn = NULL; + int ret = 0; + int close = 0; + int close_ret = 0; + + if (!hba) + return 0; + + cnic = hba->cnic; + if (!cnic) + return 0; + + if (bnx2i_ep->state == EP_STATE_IDLE || + bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) + return 0; + + if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) + goto destroy_conn; + + if (bnx2i_ep->conn) { + conn = bnx2i_ep->conn->cls_conn->dd_data; + session = conn->session; + } + + timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); + bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; + add_timer(&bnx2i_ep->ofld_timer); + + if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) + goto out; + + if (session) { + spin_lock_bh(&session->frwd_lock); + if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) { + if (session->state == ISCSI_STATE_LOGGING_OUT) { + if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { + /* Logout sent, but no resp */ + printk(KERN_ALERT "bnx2i (%s): WARNING" + " logout response was not " + "received!\n", + bnx2i_ep->hba->netdev->name); + } else if (bnx2i_ep->state == + EP_STATE_LOGOUT_RESP_RCVD) + close = 1; + } + } else + close = 1; + + spin_unlock_bh(&session->frwd_lock); + } + + bnx2i_ep->state = EP_STATE_DISCONN_START; + + if (close) + close_ret = cnic->cm_close(bnx2i_ep->cm_sk); + else + close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); + + if (close_ret) + printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n", + bnx2i_ep->hba->netdev->name, close, close_ret); + else + /* wait for option-2 conn teardown */ + wait_event_interruptible(bnx2i_ep->ofld_wait, + ((bnx2i_ep->state != EP_STATE_DISCONN_START) + && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD))); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_ep->ofld_timer); + +destroy_conn: + bnx2i_ep_active_list_del(hba, bnx2i_ep); + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) + return -EINVAL; +out: + bnx2i_ep->state = EP_STATE_IDLE; + return ret; +} + + +/** + * bnx2i_ep_disconnect - executes TCP connection teardown process + * @ep: TCP connection (iscsi endpoint) handle + * + * executes TCP connection teardown process + */ +static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct bnx2i_endpoint *bnx2i_ep; + struct bnx2i_conn *bnx2i_conn = NULL; + struct bnx2i_hba *hba; + + bnx2i_ep = ep->dd_data; + + /* driver should not attempt connection cleanup until TCP_CONNECT + * completes either successfully or fails. Timeout is 9-secs, so + * wait for it to complete + */ + while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && + !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) + msleep(250); + + if (bnx2i_ep->conn) + bnx2i_conn = bnx2i_ep->conn; + hba = bnx2i_ep->hba; + + mutex_lock(&hba->net_dev_lock); + + if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) + goto out; + + if (bnx2i_ep->state == EP_STATE_IDLE) + goto free_resc; + + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || + (bnx2i_ep->hba_age != hba->age)) { + bnx2i_ep_active_list_del(hba, bnx2i_ep); + goto free_resc; + } + + /* Do all chip cleanup here */ + if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { + mutex_unlock(&hba->net_dev_lock); + return; + } +free_resc: + bnx2i_free_qp_resc(hba, bnx2i_ep); + + if (bnx2i_conn) + bnx2i_conn->ep = NULL; + + bnx2i_free_ep(ep); +out: + mutex_unlock(&hba->net_dev_lock); + + wake_up_interruptible(&hba->eh_wait); +} + + +/** + * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler + * @shost: scsi host pointer + * @params: pointer to buffer containing iscsi path message + */ +static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) +{ + struct bnx2i_hba *hba = iscsi_host_priv(shost); + char *buf = (char *) params; + u16 len = sizeof(*params); + + /* handled by cnic driver */ + hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, + len); + + return 0; +} + +static umode_t bnx2i_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + case ISCSI_PARAM_BOOT_ROOT: + case ISCSI_PARAM_BOOT_NIC: + case ISCSI_PARAM_BOOT_TARGET: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} + +/* + * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template + * used while registering with the scsi host and iSCSI transport module. + */ +static const struct scsi_host_template bnx2i_host_template = { + .module = THIS_MODULE, + .name = "QLogic Offload iSCSI Initiator", + .proc_name = "bnx2i", + .queuecommand = iscsi_queuecommand, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .change_queue_depth = scsi_change_queue_depth, + .target_alloc = iscsi_target_alloc, + .can_queue = 2048, + .max_sectors = 127, + .cmd_per_lun = 128, + .this_id = -1, + .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, + .shost_groups = bnx2i_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct iscsi_cmd), +}; + +struct iscsi_transport bnx2i_iscsi_transport = { + .owner = THIS_MODULE, + .name = "bnx2i", + .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | + CAP_MULTI_R2T | CAP_DATADGST | + CAP_DATA_PATH_OFFLOAD | + CAP_TEXT_NEGO, + .create_session = bnx2i_session_create, + .destroy_session = bnx2i_session_destroy, + .create_conn = bnx2i_conn_create, + .bind_conn = bnx2i_conn_bind, + .unbind_conn = iscsi_conn_unbind, + .destroy_conn = bnx2i_conn_destroy, + .attr_is_visible = bnx2i_attr_is_visible, + .set_param = iscsi_set_param, + .get_conn_param = iscsi_conn_get_param, + .get_session_param = iscsi_session_get_param, + .get_host_param = bnx2i_host_get_param, + .start_conn = bnx2i_conn_start, + .stop_conn = iscsi_conn_stop, + .send_pdu = iscsi_conn_send_pdu, + .xmit_task = bnx2i_task_xmit, + .get_stats = bnx2i_conn_get_stats, + /* TCP connect - disconnect - option-2 interface calls */ + .get_ep_param = bnx2i_ep_get_param, + .ep_connect = bnx2i_ep_connect, + .ep_poll = bnx2i_ep_poll, + .ep_disconnect = bnx2i_ep_disconnect, + .set_path = bnx2i_nl_set_path, + /* Error recovery timeout call */ + .session_recovery_timedout = iscsi_session_recovery_timedout, + .cleanup_task = bnx2i_cleanup_task, +}; diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c new file mode 100644 index 000000000..d6b0bbb51 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c @@ -0,0 +1,158 @@ +/* bnx2i_sysfs.c: QLogic NetXtreme II iSCSI driver. + * + * Copyright (c) 2004 - 2013 Broadcom Corporation + * Copyright (c) 2014, QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) + * Maintained by: QLogic-Storage-Upstream@qlogic.com + */ + +#include "bnx2i.h" + +/** + * bnx2i_dev_to_hba - maps dev pointer to adapter struct + * @dev: device pointer + * + * Map device to hba structure + */ +static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev) +{ + struct Scsi_Host *shost = class_to_shost(dev); + return iscsi_host_priv(shost); +} + + +/** + * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size + * @dev: device pointer + * @attr: device attribute (unused) + * @buf: buffer to return current SQ size parameter + * + * Returns current SQ size parameter, this paramater determines the number + * outstanding iSCSI commands supported on a connection + */ +static ssize_t bnx2i_show_sq_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + return sprintf(buf, "0x%x\n", hba->max_sqes); +} + + +/** + * bnx2i_set_sq_info - update send queue (SQ) size parameter + * @dev: device pointer + * @attr: device attribute (unused) + * @buf: buffer to return current SQ size parameter + * @count: parameter buffer size + * + * Interface for user to change shared queue size allocated for each conn + * Must be within SQ limits and a power of 2. For the latter this is needed + * because of how libiscsi preallocates tasks. + */ +static ssize_t bnx2i_set_sq_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + u32 val; + int max_sq_size; + + if (hba->ofld_conns_active) + goto skip_config; + + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + max_sq_size = BNX2I_5770X_SQ_WQES_MAX; + else + max_sq_size = BNX2I_570X_SQ_WQES_MAX; + + if (sscanf(buf, " 0x%x ", &val) > 0) { + if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) && + (is_power_of_2(val))) + hba->max_sqes = val; + } + + return count; + +skip_config: + printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n"); + return 0; +} + + +/** + * bnx2i_show_ccell_info - returns command cell (HQ) size + * @dev: device pointer + * @attr: device attribute (unused) + * @buf: buffer to return current SQ size parameter + * + * returns per-connection TCP history queue size parameter + */ +static ssize_t bnx2i_show_ccell_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + return sprintf(buf, "0x%x\n", hba->num_ccell); +} + + +/** + * bnx2i_set_ccell_info - set command cell (HQ) size + * @dev: device pointer + * @attr: device attribute (unused) + * @buf: buffer to return current SQ size parameter + * @count: parameter buffer size + * + * updates per-connection TCP history queue size parameter + */ +static ssize_t bnx2i_set_ccell_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 val; + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + if (hba->ofld_conns_active) + goto skip_config; + + if (sscanf(buf, " 0x%x ", &val) > 0) { + if ((val >= BNX2I_CCELLS_MIN) && + (val <= BNX2I_CCELLS_MAX)) { + hba->num_ccell = val; + } + } + + return count; + +skip_config: + printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n"); + return 0; +} + + +static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR, + bnx2i_show_sq_info, bnx2i_set_sq_info); +static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR, + bnx2i_show_ccell_info, bnx2i_set_ccell_info); + +static struct attribute *bnx2i_dev_attributes[] = { + &dev_attr_sq_size.attr, + &dev_attr_num_ccell.attr, + NULL +}; + +static const struct attribute_group bnx2i_dev_attr_group = { + .attrs = bnx2i_dev_attributes +}; + +const struct attribute_group *bnx2i_dev_groups[] = { + &bnx2i_dev_attr_group, + NULL +}; diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c new file mode 100644 index 000000000..8d72b2553 --- /dev/null +++ b/drivers/scsi/bvme6000_scsi.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux. + * + * Based on work by Alan Hourihane and Kars de Jong + * + * Rewritten to use 53c700.c by Richard Hirst + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "53c700.h" + +MODULE_AUTHOR("Richard Hirst "); +MODULE_DESCRIPTION("BVME6000 NCR53C710 driver"); +MODULE_LICENSE("GPL"); + +static struct scsi_host_template bvme6000_scsi_driver_template = { + .name = "BVME6000 NCR53c710 SCSI", + .proc_name = "BVME6000", + .this_id = 7, + .module = THIS_MODULE, +}; + +static struct platform_device *bvme6000_scsi_device; + +static int +bvme6000_probe(struct platform_device *dev) +{ + struct Scsi_Host *host; + struct NCR_700_Host_Parameters *hostdata; + + if (!MACH_IS_BVME6000) + goto out; + + hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); + if (!hostdata) { + printk(KERN_ERR "bvme6000-scsi: " + "Failed to allocate host data\n"); + goto out; + } + + /* Fill in the required pieces of hostdata */ + hostdata->base = (void __iomem *)BVME_NCR53C710_BASE; + hostdata->clock = 40; /* XXX - depends on the CPU clock! */ + hostdata->chip710 = 1; + hostdata->dmode_extra = DMODE_FC2; + hostdata->dcntl_extra = EA_710; + hostdata->ctest7_extra = CTEST7_TT1; + + /* and register the chip */ + host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, + &dev->dev); + if (!host) { + printk(KERN_ERR "bvme6000-scsi: No host detected; " + "board configuration problem?\n"); + goto out_free; + } + host->base = BVME_NCR53C710_BASE; + host->this_id = 7; + host->irq = BVME_IRQ_SCSI; + if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi", + host)) { + printk(KERN_ERR "bvme6000-scsi: request_irq failed\n"); + goto out_put_host; + } + + platform_set_drvdata(dev, host); + scsi_scan_host(host); + + return 0; + + out_put_host: + scsi_host_put(host); + out_free: + kfree(hostdata); + out: + return -ENODEV; +} + +static int +bvme6000_device_remove(struct platform_device *dev) +{ + struct Scsi_Host *host = platform_get_drvdata(dev); + struct NCR_700_Host_Parameters *hostdata = shost_priv(host); + + scsi_remove_host(host); + NCR_700_release(host); + kfree(hostdata); + free_irq(host->irq, host); + + return 0; +} + +static struct platform_driver bvme6000_scsi_driver = { + .driver = { + .name = "bvme6000-scsi", + }, + .probe = bvme6000_probe, + .remove = bvme6000_device_remove, +}; + +static int __init bvme6000_scsi_init(void) +{ + int err; + + err = platform_driver_register(&bvme6000_scsi_driver); + if (err) + return err; + + bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi", + -1, NULL, 0); + if (IS_ERR(bvme6000_scsi_device)) { + platform_driver_unregister(&bvme6000_scsi_driver); + return PTR_ERR(bvme6000_scsi_device); + } + + return 0; +} + +static void __exit bvme6000_scsi_exit(void) +{ + platform_device_unregister(bvme6000_scsi_device); + platform_driver_unregister(&bvme6000_scsi_driver); +} + +module_init(bvme6000_scsi_init); +module_exit(bvme6000_scsi_exit); diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c new file mode 100644 index 000000000..cb0a399be --- /dev/null +++ b/drivers/scsi/ch.c @@ -0,0 +1,1031 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SCSI Media Changer device driver for Linux 2.6 + * + * (c) 1996-2003 Gerd Knorr + * + */ + +#define VERSION "0.25" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* here are all the ioctls */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CH_DT_MAX 16 +#define CH_TYPES 8 +#define CH_MAX_DEVS 128 + +MODULE_DESCRIPTION("device driver for scsi media changer devices"); +MODULE_AUTHOR("Gerd Knorr "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR); +MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER); + +static int init = 1; +module_param(init, int, 0444); +MODULE_PARM_DESC(init, \ + "initialize element status on driver load (default: on)"); + +static int timeout_move = 300; +module_param(timeout_move, int, 0644); +MODULE_PARM_DESC(timeout_move,"timeout for move commands " + "(default: 300 seconds)"); + +static int timeout_init = 3600; +module_param(timeout_init, int, 0644); +MODULE_PARM_DESC(timeout_init,"timeout for INITIALIZE ELEMENT STATUS " + "(default: 3600 seconds)"); + +static int verbose = 1; +module_param(verbose, int, 0644); +MODULE_PARM_DESC(verbose,"be verbose (default: on)"); + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more " + "detailed sense codes on scsi errors (default: off)"); + +static int dt_id[CH_DT_MAX] = { [ 0 ... (CH_DT_MAX-1) ] = -1 }; +static int dt_lun[CH_DT_MAX]; +module_param_array(dt_id, int, NULL, 0444); +module_param_array(dt_lun, int, NULL, 0444); + +/* tell the driver about vendor-specific slots */ +static int vendor_firsts[CH_TYPES-4]; +static int vendor_counts[CH_TYPES-4]; +module_param_array(vendor_firsts, int, NULL, 0444); +module_param_array(vendor_counts, int, NULL, 0444); + +static const char * vendor_labels[CH_TYPES-4] = { + "v0", "v1", "v2", "v3" +}; +// module_param_string_array(vendor_labels, NULL, 0444); + +#define ch_printk(prefix, ch, fmt, a...) \ + sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a) + +#define DPRINTK(fmt, arg...) \ +do { \ + if (debug) \ + ch_printk(KERN_DEBUG, ch, fmt, ##arg); \ +} while (0) +#define VPRINTK(level, fmt, arg...) \ +do { \ + if (verbose) \ + ch_printk(level, ch, fmt, ##arg); \ +} while (0) + +/* ------------------------------------------------------------------- */ + +#define MAX_RETRIES 1 + +static struct class * ch_sysfs_class; + +typedef struct { + struct kref ref; + struct list_head list; + int minor; + char name[8]; + struct scsi_device *device; + struct scsi_device **dt; /* ptrs to data transfer elements */ + u_int firsts[CH_TYPES]; + u_int counts[CH_TYPES]; + u_int unit_attention; + u_int voltags; + struct mutex lock; +} scsi_changer; + +static DEFINE_IDR(ch_index_idr); +static DEFINE_SPINLOCK(ch_index_lock); + +static const struct { + unsigned char sense; + unsigned char asc; + unsigned char ascq; + int errno; +} ch_err[] = { +/* Just filled in what looks right. Hav'nt checked any standard paper for + these errno assignments, so they may be wrong... */ + { + .sense = ILLEGAL_REQUEST, + .asc = 0x21, + .ascq = 0x01, + .errno = EBADSLT, /* Invalid element address */ + },{ + .sense = ILLEGAL_REQUEST, + .asc = 0x28, + .ascq = 0x01, + .errno = EBADE, /* Import or export element accessed */ + },{ + .sense = ILLEGAL_REQUEST, + .asc = 0x3B, + .ascq = 0x0D, + .errno = EXFULL, /* Medium destination element full */ + },{ + .sense = ILLEGAL_REQUEST, + .asc = 0x3B, + .ascq = 0x0E, + .errno = EBADE, /* Medium source element empty */ + },{ + .sense = ILLEGAL_REQUEST, + .asc = 0x20, + .ascq = 0x00, + .errno = EBADRQC, /* Invalid command operation code */ + },{ + /* end of list */ + } +}; + +/* ------------------------------------------------------------------- */ + +static int ch_find_errno(struct scsi_sense_hdr *sshdr) +{ + int i,errno = 0; + + /* Check to see if additional sense information is available */ + if (scsi_sense_valid(sshdr) && + sshdr->asc != 0) { + for (i = 0; ch_err[i].errno != 0; i++) { + if (ch_err[i].sense == sshdr->sense_key && + ch_err[i].asc == sshdr->asc && + ch_err[i].ascq == sshdr->ascq) { + errno = -ch_err[i].errno; + break; + } + } + } + if (errno == 0) + errno = -EIO; + return errno; +} + +static int +ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, + void *buffer, unsigned int buflength, enum req_op op) +{ + int errno, retries = 0, timeout, result; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + + timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS) + ? timeout_init : timeout_move; + + retry: + errno = 0; + result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength, + timeout * HZ, MAX_RETRIES, &exec_args); + if (result < 0) + return result; + if (scsi_sense_valid(&sshdr)) { + if (debug) + scsi_print_sense_hdr(ch->device, ch->name, &sshdr); + errno = ch_find_errno(&sshdr); + + switch(sshdr.sense_key) { + case UNIT_ATTENTION: + ch->unit_attention = 1; + if (retries++ < 3) + goto retry; + break; + } + } + return errno; +} + +/* ------------------------------------------------------------------------ */ + +static int +ch_elem_to_typecode(scsi_changer *ch, u_int elem) +{ + int i; + + for (i = 0; i < CH_TYPES; i++) { + if (elem >= ch->firsts[i] && + elem < ch->firsts[i] + + ch->counts[i]) + return i+1; + } + return 0; +} + +static int +ch_read_element_status(scsi_changer *ch, u_int elem, char *data) +{ + u_char cmd[12]; + u_char *buffer; + int result; + + buffer = kmalloc(512, GFP_KERNEL); + if(!buffer) + return -ENOMEM; + + retry: + memset(cmd,0,sizeof(cmd)); + cmd[0] = READ_ELEMENT_STATUS; + cmd[1] = ((ch->device->lun & 0x7) << 5) | + (ch->voltags ? 0x10 : 0) | + ch_elem_to_typecode(ch,elem); + cmd[2] = (elem >> 8) & 0xff; + cmd[3] = elem & 0xff; + cmd[5] = 1; + cmd[9] = 255; + if (0 == (result = ch_do_scsi(ch, cmd, 12, + buffer, 256, REQ_OP_DRV_IN))) { + if (((buffer[16] << 8) | buffer[17]) != elem) { + DPRINTK("asked for element 0x%02x, got 0x%02x\n", + elem,(buffer[16] << 8) | buffer[17]); + kfree(buffer); + return -EIO; + } + memcpy(data,buffer+16,16); + } else { + if (ch->voltags) { + ch->voltags = 0; + VPRINTK(KERN_INFO, "device has no volume tag support\n"); + goto retry; + } + DPRINTK("READ ELEMENT STATUS for element 0x%x failed\n",elem); + } + kfree(buffer); + return result; +} + +static int +ch_init_elem(scsi_changer *ch) +{ + int err; + u_char cmd[6]; + + VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n"); + memset(cmd,0,sizeof(cmd)); + cmd[0] = INITIALIZE_ELEMENT_STATUS; + cmd[1] = (ch->device->lun & 0x7) << 5; + err = ch_do_scsi(ch, cmd, 6, NULL, 0, REQ_OP_DRV_IN); + VPRINTK(KERN_INFO, "... finished\n"); + return err; +} + +static int +ch_readconfig(scsi_changer *ch) +{ + u_char cmd[10], data[16]; + u_char *buffer; + int result,id,lun,i; + u_int elem; + + buffer = kzalloc(512, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + memset(cmd,0,sizeof(cmd)); + cmd[0] = MODE_SENSE; + cmd[1] = (ch->device->lun & 0x7) << 5; + cmd[2] = 0x1d; + cmd[4] = 255; + result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN); + if (0 != result) { + cmd[1] |= (1<<3); + result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN); + } + if (0 == result) { + ch->firsts[CHET_MT] = + (buffer[buffer[3]+ 6] << 8) | buffer[buffer[3]+ 7]; + ch->counts[CHET_MT] = + (buffer[buffer[3]+ 8] << 8) | buffer[buffer[3]+ 9]; + ch->firsts[CHET_ST] = + (buffer[buffer[3]+10] << 8) | buffer[buffer[3]+11]; + ch->counts[CHET_ST] = + (buffer[buffer[3]+12] << 8) | buffer[buffer[3]+13]; + ch->firsts[CHET_IE] = + (buffer[buffer[3]+14] << 8) | buffer[buffer[3]+15]; + ch->counts[CHET_IE] = + (buffer[buffer[3]+16] << 8) | buffer[buffer[3]+17]; + ch->firsts[CHET_DT] = + (buffer[buffer[3]+18] << 8) | buffer[buffer[3]+19]; + ch->counts[CHET_DT] = + (buffer[buffer[3]+20] << 8) | buffer[buffer[3]+21]; + VPRINTK(KERN_INFO, "type #1 (mt): 0x%x+%d [medium transport]\n", + ch->firsts[CHET_MT], + ch->counts[CHET_MT]); + VPRINTK(KERN_INFO, "type #2 (st): 0x%x+%d [storage]\n", + ch->firsts[CHET_ST], + ch->counts[CHET_ST]); + VPRINTK(KERN_INFO, "type #3 (ie): 0x%x+%d [import/export]\n", + ch->firsts[CHET_IE], + ch->counts[CHET_IE]); + VPRINTK(KERN_INFO, "type #4 (dt): 0x%x+%d [data transfer]\n", + ch->firsts[CHET_DT], + ch->counts[CHET_DT]); + } else { + VPRINTK(KERN_INFO, "reading element address assignment page failed!\n"); + } + + /* vendor specific element types */ + for (i = 0; i < 4; i++) { + if (0 == vendor_counts[i]) + continue; + if (NULL == vendor_labels[i]) + continue; + ch->firsts[CHET_V1+i] = vendor_firsts[i]; + ch->counts[CHET_V1+i] = vendor_counts[i]; + VPRINTK(KERN_INFO, "type #%d (v%d): 0x%x+%d [%s, vendor specific]\n", + i+5,i+1,vendor_firsts[i],vendor_counts[i], + vendor_labels[i]); + } + + /* look up the devices of the data transfer elements */ + ch->dt = kcalloc(ch->counts[CHET_DT], sizeof(*ch->dt), + GFP_KERNEL); + + if (!ch->dt) { + kfree(buffer); + return -ENOMEM; + } + + for (elem = 0; elem < ch->counts[CHET_DT]; elem++) { + id = -1; + lun = 0; + if (elem < CH_DT_MAX && -1 != dt_id[elem]) { + id = dt_id[elem]; + lun = dt_lun[elem]; + VPRINTK(KERN_INFO, "dt 0x%x: [insmod option] ", + elem+ch->firsts[CHET_DT]); + } else if (0 != ch_read_element_status + (ch,elem+ch->firsts[CHET_DT],data)) { + VPRINTK(KERN_INFO, "dt 0x%x: READ ELEMENT STATUS failed\n", + elem+ch->firsts[CHET_DT]); + } else { + VPRINTK(KERN_INFO, "dt 0x%x: ",elem+ch->firsts[CHET_DT]); + if (data[6] & 0x80) { + VPRINTK(KERN_CONT, "not this SCSI bus\n"); + ch->dt[elem] = NULL; + } else if (0 == (data[6] & 0x30)) { + VPRINTK(KERN_CONT, "ID/LUN unknown\n"); + ch->dt[elem] = NULL; + } else { + id = ch->device->id; + lun = 0; + if (data[6] & 0x20) id = data[7]; + if (data[6] & 0x10) lun = data[6] & 7; + } + } + if (-1 != id) { + VPRINTK(KERN_CONT, "ID %i, LUN %i, ",id,lun); + ch->dt[elem] = + scsi_device_lookup(ch->device->host, + ch->device->channel, + id,lun); + if (!ch->dt[elem]) { + /* should not happen */ + VPRINTK(KERN_CONT, "Huh? device not found!\n"); + } else { + VPRINTK(KERN_CONT, "name: %8.8s %16.16s %4.4s\n", + ch->dt[elem]->vendor, + ch->dt[elem]->model, + ch->dt[elem]->rev); + } + } + } + ch->voltags = 1; + kfree(buffer); + + return 0; +} + +/* ------------------------------------------------------------------------ */ + +static int +ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate) +{ + u_char cmd[10]; + + DPRINTK("position: 0x%x\n",elem); + if (0 == trans) + trans = ch->firsts[CHET_MT]; + memset(cmd,0,sizeof(cmd)); + cmd[0] = POSITION_TO_ELEMENT; + cmd[1] = (ch->device->lun & 0x7) << 5; + cmd[2] = (trans >> 8) & 0xff; + cmd[3] = trans & 0xff; + cmd[4] = (elem >> 8) & 0xff; + cmd[5] = elem & 0xff; + cmd[8] = rotate ? 1 : 0; + return ch_do_scsi(ch, cmd, 10, NULL, 0, REQ_OP_DRV_IN); +} + +static int +ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate) +{ + u_char cmd[12]; + + DPRINTK("move: 0x%x => 0x%x\n",src,dest); + if (0 == trans) + trans = ch->firsts[CHET_MT]; + memset(cmd,0,sizeof(cmd)); + cmd[0] = MOVE_MEDIUM; + cmd[1] = (ch->device->lun & 0x7) << 5; + cmd[2] = (trans >> 8) & 0xff; + cmd[3] = trans & 0xff; + cmd[4] = (src >> 8) & 0xff; + cmd[5] = src & 0xff; + cmd[6] = (dest >> 8) & 0xff; + cmd[7] = dest & 0xff; + cmd[10] = rotate ? 1 : 0; + return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN); +} + +static int +ch_exchange(scsi_changer *ch, u_int trans, u_int src, + u_int dest1, u_int dest2, int rotate1, int rotate2) +{ + u_char cmd[12]; + + DPRINTK("exchange: 0x%x => 0x%x => 0x%x\n", + src,dest1,dest2); + if (0 == trans) + trans = ch->firsts[CHET_MT]; + memset(cmd,0,sizeof(cmd)); + cmd[0] = EXCHANGE_MEDIUM; + cmd[1] = (ch->device->lun & 0x7) << 5; + cmd[2] = (trans >> 8) & 0xff; + cmd[3] = trans & 0xff; + cmd[4] = (src >> 8) & 0xff; + cmd[5] = src & 0xff; + cmd[6] = (dest1 >> 8) & 0xff; + cmd[7] = dest1 & 0xff; + cmd[8] = (dest2 >> 8) & 0xff; + cmd[9] = dest2 & 0xff; + cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0); + + return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN); +} + +static void +ch_check_voltag(char *tag) +{ + int i; + + for (i = 0; i < 32; i++) { + /* restrict to ascii */ + if (tag[i] >= 0x7f || tag[i] < 0x20) + tag[i] = ' '; + /* don't allow search wildcards */ + if (tag[i] == '?' || + tag[i] == '*') + tag[i] = ' '; + } +} + +static int +ch_set_voltag(scsi_changer *ch, u_int elem, + int alternate, int clear, u_char *tag) +{ + u_char cmd[12]; + u_char *buffer; + int result; + + buffer = kzalloc(512, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + DPRINTK("%s %s voltag: 0x%x => \"%s\"\n", + clear ? "clear" : "set", + alternate ? "alternate" : "primary", + elem, tag); + memset(cmd,0,sizeof(cmd)); + cmd[0] = SEND_VOLUME_TAG; + cmd[1] = ((ch->device->lun & 0x7) << 5) | + ch_elem_to_typecode(ch,elem); + cmd[2] = (elem >> 8) & 0xff; + cmd[3] = elem & 0xff; + cmd[5] = clear + ? (alternate ? 0x0d : 0x0c) + : (alternate ? 0x0b : 0x0a); + + cmd[9] = 255; + + memcpy(buffer,tag,32); + ch_check_voltag(buffer); + + result = ch_do_scsi(ch, cmd, 12, buffer, 256, REQ_OP_DRV_OUT); + kfree(buffer); + return result; +} + +static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest) +{ + int retval = 0; + u_char data[16]; + unsigned int i; + + mutex_lock(&ch->lock); + for (i = 0; i < ch->counts[type]; i++) { + if (0 != ch_read_element_status + (ch, ch->firsts[type]+i,data)) { + retval = -EIO; + break; + } + put_user(data[2], dest+i); + if (data[2] & CESTATUS_EXCEPT) + VPRINTK(KERN_INFO, "element 0x%x: asc=0x%x, ascq=0x%x\n", + ch->firsts[type]+i, + (int)data[4],(int)data[5]); + retval = ch_read_element_status + (ch, ch->firsts[type]+i,data); + if (0 != retval) + break; + } + mutex_unlock(&ch->lock); + return retval; +} + +/* ------------------------------------------------------------------------ */ + +static void ch_destroy(struct kref *ref) +{ + scsi_changer *ch = container_of(ref, scsi_changer, ref); + + ch->device = NULL; + kfree(ch->dt); + kfree(ch); +} + +static int +ch_release(struct inode *inode, struct file *file) +{ + scsi_changer *ch = file->private_data; + + scsi_device_put(ch->device); + file->private_data = NULL; + kref_put(&ch->ref, ch_destroy); + return 0; +} + +static int +ch_open(struct inode *inode, struct file *file) +{ + scsi_changer *ch; + int minor = iminor(inode); + + spin_lock(&ch_index_lock); + ch = idr_find(&ch_index_idr, minor); + + if (ch == NULL || !kref_get_unless_zero(&ch->ref)) { + spin_unlock(&ch_index_lock); + return -ENXIO; + } + spin_unlock(&ch_index_lock); + if (scsi_device_get(ch->device)) { + kref_put(&ch->ref, ch_destroy); + return -ENXIO; + } + /* Synchronize with ch_probe() */ + mutex_lock(&ch->lock); + file->private_data = ch; + mutex_unlock(&ch->lock); + return 0; +} + +static int +ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit) +{ + if (type >= CH_TYPES || unit >= ch->counts[type]) + return -1; + return 0; +} + +struct changer_element_status32 { + int ces_type; + compat_uptr_t ces_data; +}; +#define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32) + +static long ch_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + scsi_changer *ch = file->private_data; + int retval; + void __user *argp = (void __user *)arg; + + retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, + file->f_flags & O_NDELAY); + if (retval) + return retval; + + switch (cmd) { + case CHIOGPARAMS: + { + struct changer_params params; + + params.cp_curpicker = 0; + params.cp_npickers = ch->counts[CHET_MT]; + params.cp_nslots = ch->counts[CHET_ST]; + params.cp_nportals = ch->counts[CHET_IE]; + params.cp_ndrives = ch->counts[CHET_DT]; + + if (copy_to_user(argp, ¶ms, sizeof(params))) + return -EFAULT; + return 0; + } + case CHIOGVPARAMS: + { + struct changer_vendor_params vparams; + + memset(&vparams,0,sizeof(vparams)); + if (ch->counts[CHET_V1]) { + vparams.cvp_n1 = ch->counts[CHET_V1]; + strncpy(vparams.cvp_label1,vendor_labels[0],16); + } + if (ch->counts[CHET_V2]) { + vparams.cvp_n2 = ch->counts[CHET_V2]; + strncpy(vparams.cvp_label2,vendor_labels[1],16); + } + if (ch->counts[CHET_V3]) { + vparams.cvp_n3 = ch->counts[CHET_V3]; + strncpy(vparams.cvp_label3,vendor_labels[2],16); + } + if (ch->counts[CHET_V4]) { + vparams.cvp_n4 = ch->counts[CHET_V4]; + strncpy(vparams.cvp_label4,vendor_labels[3],16); + } + if (copy_to_user(argp, &vparams, sizeof(vparams))) + return -EFAULT; + return 0; + } + + case CHIOPOSITION: + { + struct changer_position pos; + + if (copy_from_user(&pos, argp, sizeof (pos))) + return -EFAULT; + + if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) { + DPRINTK("CHIOPOSITION: invalid parameter\n"); + return -EBADSLT; + } + mutex_lock(&ch->lock); + retval = ch_position(ch,0, + ch->firsts[pos.cp_type] + pos.cp_unit, + pos.cp_flags & CP_INVERT); + mutex_unlock(&ch->lock); + return retval; + } + + case CHIOMOVE: + { + struct changer_move mv; + + if (copy_from_user(&mv, argp, sizeof (mv))) + return -EFAULT; + + if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) || + 0 != ch_checkrange(ch, mv.cm_totype, mv.cm_tounit )) { + DPRINTK("CHIOMOVE: invalid parameter\n"); + return -EBADSLT; + } + + mutex_lock(&ch->lock); + retval = ch_move(ch,0, + ch->firsts[mv.cm_fromtype] + mv.cm_fromunit, + ch->firsts[mv.cm_totype] + mv.cm_tounit, + mv.cm_flags & CM_INVERT); + mutex_unlock(&ch->lock); + return retval; + } + + case CHIOEXCHANGE: + { + struct changer_exchange mv; + + if (copy_from_user(&mv, argp, sizeof (mv))) + return -EFAULT; + + if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) || + 0 != ch_checkrange(ch, mv.ce_fdsttype, mv.ce_fdstunit) || + 0 != ch_checkrange(ch, mv.ce_sdsttype, mv.ce_sdstunit)) { + DPRINTK("CHIOEXCHANGE: invalid parameter\n"); + return -EBADSLT; + } + + mutex_lock(&ch->lock); + retval = ch_exchange + (ch,0, + ch->firsts[mv.ce_srctype] + mv.ce_srcunit, + ch->firsts[mv.ce_fdsttype] + mv.ce_fdstunit, + ch->firsts[mv.ce_sdsttype] + mv.ce_sdstunit, + mv.ce_flags & CE_INVERT1, mv.ce_flags & CE_INVERT2); + mutex_unlock(&ch->lock); + return retval; + } + + case CHIOGSTATUS: + { + struct changer_element_status ces; + + if (copy_from_user(&ces, argp, sizeof (ces))) + return -EFAULT; + if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES) + return -EINVAL; + + return ch_gstatus(ch, ces.ces_type, ces.ces_data); + } +#ifdef CONFIG_COMPAT + case CHIOGSTATUS32: + { + struct changer_element_status32 ces32; + + if (copy_from_user(&ces32, argp, sizeof(ces32))) + return -EFAULT; + if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES) + return -EINVAL; + + return ch_gstatus(ch, ces32.ces_type, + compat_ptr(ces32.ces_data)); + } +#endif + case CHIOGELEM: + { + struct changer_get_element cge; + u_char ch_cmd[12]; + u_char *buffer; + unsigned int elem; + int result,i; + + if (copy_from_user(&cge, argp, sizeof (cge))) + return -EFAULT; + + if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit)) + return -EINVAL; + elem = ch->firsts[cge.cge_type] + cge.cge_unit; + + buffer = kmalloc(512, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + mutex_lock(&ch->lock); + + voltag_retry: + memset(ch_cmd, 0, sizeof(ch_cmd)); + ch_cmd[0] = READ_ELEMENT_STATUS; + ch_cmd[1] = ((ch->device->lun & 0x7) << 5) | + (ch->voltags ? 0x10 : 0) | + ch_elem_to_typecode(ch,elem); + ch_cmd[2] = (elem >> 8) & 0xff; + ch_cmd[3] = elem & 0xff; + ch_cmd[5] = 1; + ch_cmd[9] = 255; + + result = ch_do_scsi(ch, ch_cmd, 12, buffer, 256, REQ_OP_DRV_IN); + if (!result) { + cge.cge_status = buffer[18]; + cge.cge_flags = 0; + if (buffer[18] & CESTATUS_EXCEPT) { + cge.cge_errno = EIO; + } + if (buffer[25] & 0x80) { + cge.cge_flags |= CGE_SRC; + if (buffer[25] & 0x40) + cge.cge_flags |= CGE_INVERT; + elem = (buffer[26]<<8) | buffer[27]; + for (i = 0; i < 4; i++) { + if (elem >= ch->firsts[i] && + elem < ch->firsts[i] + ch->counts[i]) { + cge.cge_srctype = i; + cge.cge_srcunit = elem-ch->firsts[i]; + } + } + } + if ((buffer[22] & 0x30) == 0x30) { + cge.cge_flags |= CGE_IDLUN; + cge.cge_id = buffer[23]; + cge.cge_lun = buffer[22] & 7; + } + if (buffer[9] & 0x80) { + cge.cge_flags |= CGE_PVOLTAG; + memcpy(cge.cge_pvoltag,buffer+28,36); + } + if (buffer[9] & 0x40) { + cge.cge_flags |= CGE_AVOLTAG; + memcpy(cge.cge_avoltag,buffer+64,36); + } + } else if (ch->voltags) { + ch->voltags = 0; + VPRINTK(KERN_INFO, "device has no volume tag support\n"); + goto voltag_retry; + } + kfree(buffer); + mutex_unlock(&ch->lock); + + if (copy_to_user(argp, &cge, sizeof (cge))) + return -EFAULT; + return result; + } + + case CHIOINITELEM: + { + mutex_lock(&ch->lock); + retval = ch_init_elem(ch); + mutex_unlock(&ch->lock); + return retval; + } + + case CHIOSVOLTAG: + { + struct changer_set_voltag csv; + int elem; + + if (copy_from_user(&csv, argp, sizeof(csv))) + return -EFAULT; + + if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) { + DPRINTK("CHIOSVOLTAG: invalid parameter\n"); + return -EBADSLT; + } + elem = ch->firsts[csv.csv_type] + csv.csv_unit; + mutex_lock(&ch->lock); + retval = ch_set_voltag(ch, elem, + csv.csv_flags & CSV_AVOLTAG, + csv.csv_flags & CSV_CLEARTAG, + csv.csv_voltag); + mutex_unlock(&ch->lock); + return retval; + } + + default: + return scsi_ioctl(ch->device, file->f_mode & FMODE_WRITE, cmd, + argp); + + } +} + +/* ------------------------------------------------------------------------ */ + +static int ch_probe(struct device *dev) +{ + struct scsi_device *sd = to_scsi_device(dev); + struct device *class_dev; + int ret; + scsi_changer *ch; + + if (sd->type != TYPE_MEDIUM_CHANGER) + return -ENODEV; + + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (NULL == ch) + return -ENOMEM; + + idr_preload(GFP_KERNEL); + spin_lock(&ch_index_lock); + ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT); + spin_unlock(&ch_index_lock); + idr_preload_end(); + + if (ret < 0) { + if (ret == -ENOSPC) + ret = -ENODEV; + goto free_ch; + } + + ch->minor = ret; + sprintf(ch->name,"ch%d",ch->minor); + ret = scsi_device_get(sd); + if (ret) { + sdev_printk(KERN_WARNING, sd, "ch%d: failed to get device\n", + ch->minor); + goto remove_idr; + } + + mutex_init(&ch->lock); + kref_init(&ch->ref); + ch->device = sd; + class_dev = device_create(ch_sysfs_class, dev, + MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch, + "s%s", ch->name); + if (IS_ERR(class_dev)) { + sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n", + ch->minor); + ret = PTR_ERR(class_dev); + goto put_device; + } + + mutex_lock(&ch->lock); + ret = ch_readconfig(ch); + if (ret) { + mutex_unlock(&ch->lock); + goto destroy_dev; + } + if (init) + ch_init_elem(ch); + + mutex_unlock(&ch->lock); + dev_set_drvdata(dev, ch); + sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); + + return 0; +destroy_dev: + device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor)); +put_device: + scsi_device_put(sd); +remove_idr: + idr_remove(&ch_index_idr, ch->minor); +free_ch: + kfree(ch); + return ret; +} + +static int ch_remove(struct device *dev) +{ + scsi_changer *ch = dev_get_drvdata(dev); + + spin_lock(&ch_index_lock); + idr_remove(&ch_index_idr, ch->minor); + dev_set_drvdata(dev, NULL); + spin_unlock(&ch_index_lock); + + device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); + scsi_device_put(ch->device); + kref_put(&ch->ref, ch_destroy); + return 0; +} + +static struct scsi_driver ch_template = { + .gendrv = { + .name = "ch", + .owner = THIS_MODULE, + .probe = ch_probe, + .remove = ch_remove, + }, +}; + +static const struct file_operations changer_fops = { + .owner = THIS_MODULE, + .open = ch_open, + .release = ch_release, + .unlocked_ioctl = ch_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .llseek = noop_llseek, +}; + +static int __init init_ch_module(void) +{ + int rc; + + printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n"); + ch_sysfs_class = class_create("scsi_changer"); + if (IS_ERR(ch_sysfs_class)) { + rc = PTR_ERR(ch_sysfs_class); + return rc; + } + rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops); + if (rc < 0) { + printk("Unable to get major %d for SCSI-Changer\n", + SCSI_CHANGER_MAJOR); + goto fail1; + } + rc = scsi_register_driver(&ch_template.gendrv); + if (rc < 0) + goto fail2; + return 0; + + fail2: + unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); + fail1: + class_destroy(ch_sysfs_class); + return rc; +} + +static void __exit exit_ch_module(void) +{ + scsi_unregister_driver(&ch_template.gendrv); + unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); + class_destroy(ch_sysfs_class); + idr_destroy(&ch_index_idr); +} + +module_init(init_ch_module); +module_exit(exit_ch_module); diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c new file mode 100644 index 000000000..340785536 --- /dev/null +++ b/drivers/scsi/constants.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ASCII values for a number of symbolic constants, printing functions, + * etc. + * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422) + * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) + * by D. Gilbert and aeb (20020609) + * Updated to SPC-4 T10/1713-D Rev 36g, D. Gilbert 20130701 + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Commands with service actions that change the command name */ +#define THIRD_PARTY_COPY_OUT 0x83 +#define THIRD_PARTY_COPY_IN 0x84 + +struct sa_name_list { + int opcode; + const struct value_name_pair *arr; + int arr_sz; +}; + +struct value_name_pair { + int value; + const char * name; +}; + +static const char * cdb_byte0_names[] = { +/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", +/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, + "Reassign Blocks", +/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL, +/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", +/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)", + "Reserve(6)", +/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)", +/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", +/* 1e-1f */ "Prevent/Allow Medium Removal", NULL, +/* 20-22 */ NULL, NULL, NULL, +/* 23-28 */ "Read Format Capacities", "Set Window", + "Read Capacity(10)", NULL, NULL, "Read(10)", +/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)", + "Read updated block", +/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal", +/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", +/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)", + "Read Defect Data(10)", +/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", + "Read Buffer", +/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", +/* 40-41 */ "Change Definition", "Write Same(10)", +/* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP", + "Read density support", "Play audio(10)", "Get configuration", + "Play audio msf", "Sanitize/Play audio track/index", +/* 49-4f */ "Play track relative(10)", "Get event status notification", + "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", + NULL, +/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", + "Reserve track", "Send OPC info", "Mode Select(10)", +/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue", + "Mode Sense(10)", "Close track/session", +/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", + "Persistent reserve out", +/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, +/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, +/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, +/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB", + "Variable length", +/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", + "Third party copy out", "Third party copy in", +/* 85-89 */ "ATA command pass through(16)", "Access control in", + "Access control out", "Read(16)", "Compare and Write", +/* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes", + "Write and verify(16)", "Verify(16)", +/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", + "Lock/unlock cache(16)", "Write same(16)", NULL, +/* 95-99 */ NULL, NULL, NULL, NULL, NULL, +/* 9a-9f */ NULL, NULL, NULL, "Service action bidirectional", + "Service action in(16)", "Service action out(16)", +/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank", + "Security protocol in", "Maintenance in", "Maintenance out", + "Move medium/play audio(12)", +/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", + "Play track relative(12)", +/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", + "Read DVD structure", "Write and verify(12)", +/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", +/* b2-b4 */ "Search data low(12)", "Set limits(12)", + "Read element status attached", +/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming", +/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", +/* ba-bc */ "Redundancy group (in), Scan", + "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd", +/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd", + "Volume set (out), Send DVD structure", +}; + +static const struct value_name_pair maint_in_arr[] = { + {0x5, "Report identifying information"}, + {0xa, "Report target port groups"}, + {0xb, "Report aliases"}, + {0xc, "Report supported operation codes"}, + {0xd, "Report supported task management functions"}, + {0xe, "Report priority"}, + {0xf, "Report timestamp"}, + {0x10, "Management protocol in"}, +}; +#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) + +static const struct value_name_pair maint_out_arr[] = { + {0x6, "Set identifying information"}, + {0xa, "Set target port groups"}, + {0xb, "Change aliases"}, + {0xc, "Remove I_T nexus"}, + {0xe, "Set priority"}, + {0xf, "Set timestamp"}, + {0x10, "Management protocol out"}, +}; +#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) + +static const struct value_name_pair serv_in12_arr[] = { + {0x1, "Read media serial number"}, +}; +#define SERV_IN12_SZ ARRAY_SIZE(serv_in12_arr) + +static const struct value_name_pair serv_out12_arr[] = { + {-1, "dummy entry"}, +}; +#define SERV_OUT12_SZ ARRAY_SIZE(serv_out12_arr) + +static const struct value_name_pair serv_bidi_arr[] = { + {-1, "dummy entry"}, +}; +#define SERV_BIDI_SZ ARRAY_SIZE(serv_bidi_arr) + +static const struct value_name_pair serv_in16_arr[] = { + {0x10, "Read capacity(16)"}, + {0x11, "Read long(16)"}, + {0x12, "Get LBA status"}, + {0x13, "Report referrals"}, +}; +#define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) + +static const struct value_name_pair serv_out16_arr[] = { + {0x11, "Write long(16)"}, + {0x1f, "Notify data transfer device(16)"}, +}; +#define SERV_OUT16_SZ ARRAY_SIZE(serv_out16_arr) + +static const struct value_name_pair pr_in_arr[] = { + {0x0, "Persistent reserve in, read keys"}, + {0x1, "Persistent reserve in, read reservation"}, + {0x2, "Persistent reserve in, report capabilities"}, + {0x3, "Persistent reserve in, read full status"}, +}; +#define PR_IN_SZ ARRAY_SIZE(pr_in_arr) + +static const struct value_name_pair pr_out_arr[] = { + {0x0, "Persistent reserve out, register"}, + {0x1, "Persistent reserve out, reserve"}, + {0x2, "Persistent reserve out, release"}, + {0x3, "Persistent reserve out, clear"}, + {0x4, "Persistent reserve out, preempt"}, + {0x5, "Persistent reserve out, preempt and abort"}, + {0x6, "Persistent reserve out, register and ignore existing key"}, + {0x7, "Persistent reserve out, register and move"}, +}; +#define PR_OUT_SZ ARRAY_SIZE(pr_out_arr) + +/* SPC-4 rev 34 renamed the Extended Copy opcode to Third Party Copy Out. + LID1 (List Identifier length: 1 byte) is the Extended Copy found in SPC-2 + and SPC-3 */ +static const struct value_name_pair tpc_out_arr[] = { + {0x0, "Extended copy(LID1)"}, + {0x1, "Extended copy(LID4)"}, + {0x10, "Populate token"}, + {0x11, "Write using token"}, + {0x1c, "Copy operation abort"}, +}; +#define TPC_OUT_SZ ARRAY_SIZE(tpc_out_arr) + +static const struct value_name_pair tpc_in_arr[] = { + {0x0, "Receive copy status(LID1)"}, + {0x1, "Receive copy data(LID1)"}, + {0x3, "Receive copy operating parameters"}, + {0x4, "Receive copy failure details(LID1)"}, + {0x5, "Receive copy status(LID4)"}, + {0x6, "Receive copy data(LID4)"}, + {0x7, "Receive ROD token information"}, + {0x8, "Report all ROD tokens"}, +}; +#define TPC_IN_SZ ARRAY_SIZE(tpc_in_arr) + + +static const struct value_name_pair variable_length_arr[] = { + {0x1, "Rebuild(32)"}, + {0x2, "Regenerate(32)"}, + {0x3, "Xdread(32)"}, + {0x4, "Xdwrite(32)"}, + {0x5, "Xdwrite extended(32)"}, + {0x6, "Xpwrite(32)"}, + {0x7, "Xdwriteread(32)"}, + {0x8, "Xdwrite extended(64)"}, + {0x9, "Read(32)"}, + {0xa, "Verify(32)"}, + {0xb, "Write(32)"}, + {0xc, "Write an verify(32)"}, + {0xd, "Write same(32)"}, + {0x8801, "Format OSD"}, + {0x8802, "Create (osd)"}, + {0x8803, "List (osd)"}, + {0x8805, "Read (osd)"}, + {0x8806, "Write (osd)"}, + {0x8807, "Append (osd)"}, + {0x8808, "Flush (osd)"}, + {0x880a, "Remove (osd)"}, + {0x880b, "Create partition (osd)"}, + {0x880c, "Remove partition (osd)"}, + {0x880e, "Get attributes (osd)"}, + {0x880f, "Set attributes (osd)"}, + {0x8812, "Create and write (osd)"}, + {0x8815, "Create collection (osd)"}, + {0x8816, "Remove collection (osd)"}, + {0x8817, "List collection (osd)"}, + {0x8818, "Set key (osd)"}, + {0x8819, "Set master key (osd)"}, + {0x881a, "Flush collection (osd)"}, + {0x881b, "Flush partition (osd)"}, + {0x881c, "Flush OSD"}, + {0x8f7e, "Perform SCSI command (osd)"}, + {0x8f7f, "Perform task management function (osd)"}, +}; +#define VARIABLE_LENGTH_SZ ARRAY_SIZE(variable_length_arr) + +static struct sa_name_list sa_names_arr[] = { + {VARIABLE_LENGTH_CMD, variable_length_arr, VARIABLE_LENGTH_SZ}, + {MAINTENANCE_IN, maint_in_arr, MAINT_IN_SZ}, + {MAINTENANCE_OUT, maint_out_arr, MAINT_OUT_SZ}, + {PERSISTENT_RESERVE_IN, pr_in_arr, PR_IN_SZ}, + {PERSISTENT_RESERVE_OUT, pr_out_arr, PR_OUT_SZ}, + {SERVICE_ACTION_IN_12, serv_in12_arr, SERV_IN12_SZ}, + {SERVICE_ACTION_OUT_12, serv_out12_arr, SERV_OUT12_SZ}, + {SERVICE_ACTION_BIDIRECTIONAL, serv_bidi_arr, SERV_BIDI_SZ}, + {SERVICE_ACTION_IN_16, serv_in16_arr, SERV_IN16_SZ}, + {SERVICE_ACTION_OUT_16, serv_out16_arr, SERV_OUT16_SZ}, + {THIRD_PARTY_COPY_IN, tpc_in_arr, TPC_IN_SZ}, + {THIRD_PARTY_COPY_OUT, tpc_out_arr, TPC_OUT_SZ}, + {0, NULL, 0}, +}; + +bool scsi_opcode_sa_name(int opcode, int service_action, + const char **cdb_name, const char **sa_name) +{ + struct sa_name_list *sa_name_ptr; + const struct value_name_pair *arr = NULL; + int arr_sz, k; + + *cdb_name = NULL; + if (opcode >= VENDOR_SPECIFIC_CDB) + return false; + + if (opcode < ARRAY_SIZE(cdb_byte0_names)) + *cdb_name = cdb_byte0_names[opcode]; + + for (sa_name_ptr = sa_names_arr; sa_name_ptr->arr; ++sa_name_ptr) { + if (sa_name_ptr->opcode == opcode) { + arr = sa_name_ptr->arr; + arr_sz = sa_name_ptr->arr_sz; + break; + } + } + if (!arr) + return false; + + for (k = 0; k < arr_sz; ++k, ++arr) { + if (service_action == arr->value) + break; + } + if (k < arr_sz) + *sa_name = arr->name; + + return true; +} + +struct error_info { + unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ + unsigned short size; +}; + +/* + * There are 700+ entries in this table. To save space, we don't store + * (code, pointer) pairs, which would make sizeof(struct + * error_info)==16 on 64 bits. Rather, the second element just stores + * the size (including \0) of the corresponding string, and we use the + * sum of these to get the appropriate offset into additional_text + * defined below. This approach saves 12 bytes per entry. + */ +static const struct error_info additional[] = +{ +#define SENSE_CODE(c, s) {c, sizeof(s)}, +#include "sense_codes.h" +#undef SENSE_CODE +}; + +static const char *additional_text = +#define SENSE_CODE(c, s) s "\0" +#include "sense_codes.h" +#undef SENSE_CODE + ; + +struct error_info2 { + unsigned char code1, code2_min, code2_max; + const char * str; + const char * fmt; +}; + +static const struct error_info2 additional2[] = +{ + {0x40, 0x00, 0x7f, "Ram failure", ""}, + {0x40, 0x80, 0xff, "Diagnostic failure on component", ""}, + {0x41, 0x00, 0xff, "Data path failure", ""}, + {0x42, 0x00, 0xff, "Power-on or self-test failure", ""}, + {0x4D, 0x00, 0xff, "Tagged overlapped commands", "task tag "}, + {0x70, 0x00, 0xff, "Decompression exception", "short algorithm id of "}, + {0, 0, 0, NULL, NULL} +}; + +/* description of the sense key values */ +static const char * const snstext[] = { + "No Sense", /* 0: There is no sense information */ + "Recovered Error", /* 1: The last command completed successfully + but used error correction */ + "Not Ready", /* 2: The addressed target is not ready */ + "Medium Error", /* 3: Data error detected on the medium */ + "Hardware Error", /* 4: Controller or device failure */ + "Illegal Request", /* 5: Error in request */ + "Unit Attention", /* 6: Removable medium was changed, or + the target has been reset, or ... */ + "Data Protect", /* 7: Access to the data is blocked */ + "Blank Check", /* 8: Reached unexpected written or unwritten + region of the medium */ + "Vendor Specific(9)", + "Copy Aborted", /* A: COPY or COMPARE was aborted */ + "Aborted Command", /* B: The target aborted the command */ + "Equal", /* C: A SEARCH DATA command found data equal, + reserved in SPC-4 rev 36 */ + "Volume Overflow", /* D: Medium full with still data to be written */ + "Miscompare", /* E: Source data and data on the medium + do not agree */ + "Completed", /* F: command completed sense data reported, + may occur for successful command */ +}; + +/* Get sense key string or NULL if not available */ +const char * +scsi_sense_key_string(unsigned char key) +{ + if (key < ARRAY_SIZE(snstext)) + return snstext[key]; + return NULL; +} +EXPORT_SYMBOL(scsi_sense_key_string); + +/* + * Get additional sense code string or NULL if not available. + * This string may contain a "%x" and should be printed with ascq as arg. + */ +const char * +scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) +{ + int i; + unsigned short code = ((asc << 8) | ascq); + unsigned offset = 0; + + *fmt = NULL; + for (i = 0; i < ARRAY_SIZE(additional); i++) { + if (additional[i].code12 == code) + return additional_text + offset; + offset += additional[i].size; + } + for (i = 0; additional2[i].fmt; i++) { + if (additional2[i].code1 == asc && + ascq >= additional2[i].code2_min && + ascq <= additional2[i].code2_max) { + *fmt = additional2[i].fmt; + return additional2[i].str; + } + } + return NULL; +} +EXPORT_SYMBOL(scsi_extd_sense_format); + +static const char * const hostbyte_table[]={ +"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", +"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", +"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", +"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE", +"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" }; + +const char *scsi_hostbyte_string(int result) +{ + enum scsi_host_status hb = host_byte(result); + const char *hb_string = NULL; + + if (hb < ARRAY_SIZE(hostbyte_table)) + hb_string = hostbyte_table[hb]; + return hb_string; +} +EXPORT_SYMBOL(scsi_hostbyte_string); + +#define scsi_mlreturn_name(result) { result, #result } +static const struct value_name_pair scsi_mlreturn_arr[] = { + scsi_mlreturn_name(NEEDS_RETRY), + scsi_mlreturn_name(SUCCESS), + scsi_mlreturn_name(FAILED), + scsi_mlreturn_name(QUEUED), + scsi_mlreturn_name(SOFT_ERROR), + scsi_mlreturn_name(ADD_TO_MLQUEUE), + scsi_mlreturn_name(TIMEOUT_ERROR), + scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED), + scsi_mlreturn_name(FAST_IO_FAIL) +}; + +const char *scsi_mlreturn_string(int result) +{ + const struct value_name_pair *arr = scsi_mlreturn_arr; + int k; + + for (k = 0; k < ARRAY_SIZE(scsi_mlreturn_arr); ++k, ++arr) { + if (result == arr->value) + return arr->name; + } + return NULL; +} +EXPORT_SYMBOL(scsi_mlreturn_string); diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig new file mode 100644 index 000000000..c6c03f9e3 --- /dev/null +++ b/drivers/scsi/csiostor/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_CHELSIO_FCOE + tristate "Chelsio Communications FCoE support" + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + select FW_LOADER + help + This driver supports FCoE Offload functionality over + Chelsio T4-based 10Gb Converged Network Adapters. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module choose M here; the module + will be called csiostor. diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile new file mode 100644 index 000000000..d047e22ea --- /dev/null +++ b/drivers/scsi/csiostor/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +## Chelsio FCoE driver +# +## + +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 + +obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o + +csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ + csio_hw.o csio_hw_t5.o csio_isr.o \ + csio_mb.o csio_rnode.o csio_wr.o diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c new file mode 100644 index 000000000..200e50089 --- /dev/null +++ b/drivers/scsi/csiostor/csio_attr.c @@ -0,0 +1,805 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "csio_init.h" + +static void +csio_vport_set_state(struct csio_lnode *ln); + +/* + * csio_reg_rnode - Register a remote port with FC transport. + * @rn: Rnode representing remote port. + * + * Call fc_remote_port_add() to register this remote port with FC transport. + * If remote port is Initiator OR Target OR both, change the role appropriately. + * + */ +void +csio_reg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct fc_rport_identifiers ids; + struct fc_rport *rport; + struct csio_service_parms *sp; + + ids.node_name = wwn_to_u64(csio_rn_wwnn(rn)); + ids.port_name = wwn_to_u64(csio_rn_wwpn(rn)); + ids.port_id = rn->nport_id; + ids.roles = FC_RPORT_ROLE_UNKNOWN; + + if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) { + rport = rn->rport; + CSIO_ASSERT(rport != NULL); + goto update_role; + } + + rn->rport = fc_remote_port_add(shost, 0, &ids); + if (!rn->rport) { + csio_ln_err(ln, "Failed to register rport = 0x%x.\n", + rn->nport_id); + return; + } + + ln->num_reg_rnodes++; + rport = rn->rport; + spin_lock_irq(shost->host_lock); + *((struct csio_rnode **)rport->dd_data) = rn; + spin_unlock_irq(shost->host_lock); + + sp = &rn->rn_sparm; + rport->maxframe_size = ntohs(sp->csp.sp_bb_data); + if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID) + rport->supported_classes = FC_COS_CLASS3; + else + rport->supported_classes = FC_COS_UNSPECIFIED; +update_role: + if (rn->role & CSIO_RNFR_INITIATOR) + ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; + if (rn->role & CSIO_RNFR_TARGET) + ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + + if (ids.roles != FC_RPORT_ROLE_UNKNOWN) + fc_remote_port_rolechg(rport, ids.roles); + + rn->scsi_id = rport->scsi_target_id; + + csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n", + rn->nport_id, ids.roles); +} + +/* + * csio_unreg_rnode - Unregister a remote port with FC transport. + * @rn: Rnode representing remote port. + * + * Call fc_remote_port_delete() to unregister this remote port with FC + * transport. + * + */ +void +csio_unreg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct fc_rport *rport = rn->rport; + + rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET); + fc_remote_port_delete(rport); + ln->num_reg_rnodes--; + + csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id); +} + +/* + * csio_lnode_async_event - Async events from local port. + * @ln: lnode representing local port. + * + * Async events from local node that FC transport/SCSI ML + * should be made aware of (Eg: RSCN). + */ +void +csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt) +{ + switch (fc_evt) { + case CSIO_LN_FC_RSCN: + /* Get payload of rscn from ln */ + /* For each RSCN entry */ + /* + * fc_host_post_event(shost, + * fc_get_event_number(), + * FCH_EVT_RSCN, + * rscn_entry); + */ + break; + case CSIO_LN_FC_LINKUP: + /* send fc_host_post_event */ + /* set vport state */ + if (csio_is_npiv_ln(ln)) + csio_vport_set_state(ln); + + break; + case CSIO_LN_FC_LINKDOWN: + /* send fc_host_post_event */ + /* set vport state */ + if (csio_is_npiv_ln(ln)) + csio_vport_set_state(ln); + + break; + case CSIO_LN_FC_ATTRIB_UPDATE: + csio_fchost_attr_init(ln); + break; + default: + break; + } +} + +/* + * csio_fchost_attr_init - Initialize FC transport attributes + * @ln: Lnode. + * + */ +void +csio_fchost_attr_init(struct csio_lnode *ln) +{ + struct Scsi_Host *shost = csio_ln_to_shost(ln); + + fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln)); + fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln)); + + fc_host_supported_classes(shost) = FC_COS_CLASS3; + fc_host_max_npiv_vports(shost) = + (csio_lnode_to_hw(ln))->fres_info.max_vnps; + fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT | + FC_PORTSPEED_1GBIT; + + fc_host_maxframe_size(shost) = ntohs(ln->ln_sparm.csp.sp_bb_data); + memset(fc_host_supported_fc4s(shost), 0, + sizeof(fc_host_supported_fc4s(shost))); + fc_host_supported_fc4s(shost)[7] = 1; + + memset(fc_host_active_fc4s(shost), 0, + sizeof(fc_host_active_fc4s(shost))); + fc_host_active_fc4s(shost)[7] = 1; +} + +/* + * csio_get_host_port_id - sysfs entries for nport_id is + * populated/cached from this function + */ +static void +csio_get_host_port_id(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + fc_host_port_id(shost) = ln->nport_id; + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_port_type - Return FC local port type. + * @shost: scsi host. + * + */ +static void +csio_get_host_port_type(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + if (csio_is_npiv_ln(ln)) + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + else + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_port_state - Return FC local port state. + * @shost: scsi host. + * + */ +static void +csio_get_host_port_state(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + char state[16]; + + spin_lock_irq(&hw->lock); + + csio_lnode_state_to_str(ln, state); + if (!strcmp(state, "READY")) + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + else if (!strcmp(state, "OFFLINE")) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + else + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_host_speed - Return link speed to FC transport. + * @shost: scsi host. + * + */ +static void +csio_get_host_speed(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + switch (hw->pport[ln->portid].link_speed) { + case FW_PORT_CAP32_SPEED_1G: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case FW_PORT_CAP32_SPEED_10G: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case FW_PORT_CAP32_SPEED_25G: + fc_host_speed(shost) = FC_PORTSPEED_25GBIT; + break; + case FW_PORT_CAP32_SPEED_40G: + fc_host_speed(shost) = FC_PORTSPEED_40GBIT; + break; + case FW_PORT_CAP32_SPEED_50G: + fc_host_speed(shost) = FC_PORTSPEED_50GBIT; + break; + case FW_PORT_CAP32_SPEED_100G: + fc_host_speed(shost) = FC_PORTSPEED_100GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_host_fabric_name - Return fabric name + * @shost: scsi host. + * + */ +static void +csio_get_host_fabric_name(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_rnode *rn = NULL; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI); + if (rn) + fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn)); + else + fc_host_fabric_name(shost) = 0; + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_host_speed - Return FC transport statistics. + * @ln: Lnode. + * + */ +static struct fc_host_statistics * +csio_get_stats(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct fc_host_statistics *fhs = &ln->fch_stats; + struct fw_fcoe_port_stats fcoe_port_stats; + uint64_t seconds; + + memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats)); + csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats); + + fhs->tx_frames += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) + + be64_to_cpu(fcoe_port_stats.tx_mcast_frames) + + be64_to_cpu(fcoe_port_stats.tx_ucast_frames) + + be64_to_cpu(fcoe_port_stats.tx_offload_frames)); + fhs->tx_words += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) + + be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) + + be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) + + be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) / + CSIO_WORD_TO_BYTE; + fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) + + be64_to_cpu(fcoe_port_stats.rx_mcast_frames) + + be64_to_cpu(fcoe_port_stats.rx_ucast_frames)); + fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) + + be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) + + be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) / + CSIO_WORD_TO_BYTE; + fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames); + fhs->fcp_input_requests += ln->stats.n_input_requests; + fhs->fcp_output_requests += ln->stats.n_output_requests; + fhs->fcp_control_requests += ln->stats.n_control_requests; + fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20; + fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20; + fhs->link_failure_count = ln->stats.n_link_down; + /* Reset stats for the device */ + seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start; + do_div(seconds, 1000); + fhs->seconds_since_last_reset = seconds; + + return fhs; +} + +/* + * csio_set_rport_loss_tmo - Set the rport dev loss timeout + * @rport: fc rport. + * @timeout: new value for dev loss tmo. + * + * If timeout is non zero set the dev_loss_tmo to timeout, else set + * dev_loss_tmo to one. + */ +static void +csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} + +static void +csio_vport_set_state(struct csio_lnode *ln) +{ + struct fc_vport *fc_vport = ln->fc_vport; + struct csio_lnode *pln = ln->pln; + char state[16]; + + /* Set fc vport state based on phyiscal lnode */ + csio_lnode_state_to_str(pln, state); + if (strcmp(state, "READY")) { + fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); + return; + } + + if (!(pln->flags & CSIO_LNF_NPIVSUPP)) { + fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP); + return; + } + + /* Set fc vport state based on virtual lnode */ + csio_lnode_state_to_str(ln, state); + if (strcmp(state, "READY")) { + fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); + return; + } + fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); +} + +static int +csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln) +{ + struct csio_lnode *pln; + struct csio_mb *mbp; + struct fw_fcoe_vnp_cmd *rsp; + int ret = 0; + int retry = 0; + + /* Issue VNP cmd to alloc vport */ + /* Allocate Mbox request */ + spin_lock_irq(&hw->lock); + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + ret = -ENOMEM; + goto out; + } + + pln = ln->pln; + ln->fcf_flowid = pln->fcf_flowid; + ln->portid = pln->portid; + + csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + pln->fcf_flowid, pln->vnp_flowid, 0, + csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL); + + for (retry = 0; retry < 3; retry++) { + /* FW is expected to complete vnp cmd in immediate mode + * without much delay. + * Otherwise, there will be increase in IO latency since HW + * lock is held till completion of vnp mbox cmd. + */ + ret = csio_mb_issue(hw, mbp); + if (ret != -EBUSY) + break; + + /* Retry if mbox returns busy */ + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + if (ret) { + csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); + goto out_free; + } + + /* Process Mbox response of VNP command */ + rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); + if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { + csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n", + FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); + ret = -EINVAL; + goto out_free; + } + + ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET( + ntohl(rsp->gen_wwn_to_vnpi)); + memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); + memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); + + csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid); + csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n", + ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1], + ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3], + ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5], + ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]); + csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n", + ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1], + ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3], + ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5], + ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]); + +out_free: + mempool_free(mbp, hw->mb_mempool); +out: + spin_unlock_irq(&hw->lock); + return ret; +} + +static int +csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln) +{ + struct csio_mb *mbp; + struct fw_fcoe_vnp_cmd *rsp; + int ret = 0; + int retry = 0; + + /* Issue VNP cmd to free vport */ + /* Allocate Mbox request */ + + spin_lock_irq(&hw->lock); + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + ret = -ENOMEM; + goto out; + } + + csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + ln->fcf_flowid, ln->vnp_flowid, + NULL); + + for (retry = 0; retry < 3; retry++) { + ret = csio_mb_issue(hw, mbp); + if (ret != -EBUSY) + break; + + /* Retry if mbox returns busy */ + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + if (ret) { + csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); + goto out_free; + } + + /* Process Mbox response of VNP command */ + rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); + if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { + csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n", + FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); + ret = -EINVAL; + } + +out_free: + mempool_free(mbp, hw->mb_mempool); +out: + spin_unlock_irq(&hw->lock); + return ret; +} + +static int +csio_vport_create(struct fc_vport *fc_vport, bool disable) +{ + struct Scsi_Host *shost = fc_vport->shost; + struct csio_lnode *pln = shost_priv(shost); + struct csio_lnode *ln = NULL; + struct csio_hw *hw = csio_lnode_to_hw(pln); + uint8_t wwn[8]; + int ret = -1; + + ln = csio_shost_init(hw, &fc_vport->dev, false, pln); + if (!ln) + goto error; + + if (fc_vport->node_name != 0) { + u64_to_wwn(fc_vport->node_name, wwn); + + if (!CSIO_VALID_WWN(wwn)) { + csio_ln_err(ln, + "vport create failed. Invalid wwnn\n"); + goto error; + } + memcpy(csio_ln_wwnn(ln), wwn, 8); + } + + if (fc_vport->port_name != 0) { + u64_to_wwn(fc_vport->port_name, wwn); + + if (!CSIO_VALID_WWN(wwn)) { + csio_ln_err(ln, + "vport create failed. Invalid wwpn\n"); + goto error; + } + + if (csio_lnode_lookup_by_wwpn(hw, wwn)) { + csio_ln_err(ln, + "vport create failed. wwpn already exists\n"); + goto error; + } + memcpy(csio_ln_wwpn(ln), wwn, 8); + } + + fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); + ln->fc_vport = fc_vport; + + if (csio_fcoe_alloc_vnp(hw, ln)) + goto error; + + *(struct csio_lnode **)fc_vport->dd_data = ln; + if (!fc_vport->node_name) + fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); + if (!fc_vport->port_name) + fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln)); + csio_fchost_attr_init(ln); + return 0; +error: + if (ln) + csio_shost_exit(ln); + + return ret; +} + +static int +csio_vport_delete(struct fc_vport *fc_vport) +{ + struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data; + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct csio_hw *hw = csio_lnode_to_hw(ln); + int rmv; + + spin_lock_irq(&hw->lock); + rmv = csio_is_hw_removing(hw); + spin_unlock_irq(&hw->lock); + + if (rmv) { + csio_shost_exit(ln); + return 0; + } + + /* Quiesce ios and send remove event to lnode */ + scsi_block_requests(shost); + spin_lock_irq(&hw->lock); + csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln); + csio_lnode_close(ln); + spin_unlock_irq(&hw->lock); + scsi_unblock_requests(shost); + + /* Free vnp */ + if (fc_vport->vport_state != FC_VPORT_DISABLED) + csio_fcoe_free_vnp(hw, ln); + + csio_shost_exit(ln); + return 0; +} + +static int +csio_vport_disable(struct fc_vport *fc_vport, bool disable) +{ + struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data; + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + /* disable vport */ + if (disable) { + /* Quiesce ios and send stop event to lnode */ + scsi_block_requests(shost); + spin_lock_irq(&hw->lock); + csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln); + csio_lnode_stop(ln); + spin_unlock_irq(&hw->lock); + scsi_unblock_requests(shost); + + /* Free vnp */ + csio_fcoe_free_vnp(hw, ln); + fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); + csio_ln_err(ln, "vport disabled\n"); + return 0; + } else { + /* enable vport */ + fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); + if (csio_fcoe_alloc_vnp(hw, ln)) { + csio_ln_err(ln, "vport enabled failed.\n"); + return -1; + } + csio_ln_err(ln, "vport enabled\n"); + return 0; + } +} + +static void +csio_dev_loss_tmo_callbk(struct fc_rport *rport) +{ + struct csio_rnode *rn; + struct csio_hw *hw; + struct csio_lnode *ln; + + rn = *((struct csio_rnode **)rport->dd_data); + ln = csio_rnode_to_lnode(rn); + hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + + /* return if driver is being removed or same rnode comes back online */ + if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn)) + goto out; + + csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n", + rn, rn->nport_id, csio_rn_flowid(rn)); + + CSIO_INC_STATS(ln, n_dev_loss_tmo); + + /* + * enqueue devloss event to event worker thread to serialize all + * rnode events. + */ + if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) { + CSIO_INC_STATS(hw, n_evt_drop); + goto out; + } + + if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irq(&hw->lock); + schedule_work(&hw->evtq_work); + return; + } + +out: + spin_unlock_irq(&hw->lock); +} + +/* FC transport functions template - Physical port */ +struct fc_function_template csio_fc_transport_funcs = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_maxframe_size = 1, + + .get_host_port_id = csio_get_host_port_id, + .show_host_port_id = 1, + + .get_host_port_type = csio_get_host_port_type, + .show_host_port_type = 1, + + .get_host_port_state = csio_get_host_port_state, + .show_host_port_state = 1, + + .show_host_active_fc4s = 1, + .get_host_speed = csio_get_host_speed, + .show_host_speed = 1, + .get_host_fabric_name = csio_get_host_fabric_name, + .show_host_fabric_name = 1, + + .get_fc_host_stats = csio_get_stats, + + .dd_fcrport_size = sizeof(struct csio_rnode *), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .show_starget_port_id = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + + .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk, + .dd_fcvport_size = sizeof(struct csio_lnode *), + + .vport_create = csio_vport_create, + .vport_disable = csio_vport_disable, + .vport_delete = csio_vport_delete, +}; + +/* FC transport functions template - Virtual port */ +struct fc_function_template csio_fc_transport_vport_funcs = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_maxframe_size = 1, + + .get_host_port_id = csio_get_host_port_id, + .show_host_port_id = 1, + + .get_host_port_type = csio_get_host_port_type, + .show_host_port_type = 1, + + .get_host_port_state = csio_get_host_port_state, + .show_host_port_state = 1, + .show_host_active_fc4s = 1, + + .get_host_speed = csio_get_host_speed, + .show_host_speed = 1, + + .get_host_fabric_name = csio_get_host_fabric_name, + .show_host_fabric_name = 1, + + .get_fc_host_stats = csio_get_stats, + + .dd_fcrport_size = sizeof(struct csio_rnode *), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .show_starget_port_id = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + + .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk, + +}; diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h new file mode 100644 index 000000000..c38017b4a --- /dev/null +++ b/drivers/scsi/csiostor/csio_defs.h @@ -0,0 +1,121 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_DEFS_H__ +#define __CSIO_DEFS_H__ + +#include +#include +#include +#include +#include +#include +#include + +#define CSIO_INVALID_IDX 0xFFFFFFFF +#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++) +#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--) +#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false) +#define CSIO_DID_MASK 0xFFFFFF +#define CSIO_WORD_TO_BYTE 4 + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +static inline int +csio_list_deleted(struct list_head *list) +{ + return ((list->next == list) && (list->prev == list)); +} + +#define csio_list_next(elem) (((struct list_head *)(elem))->next) +#define csio_list_prev(elem) (((struct list_head *)(elem))->prev) + +/* State machine */ +typedef void (*csio_sm_state_t)(void *, uint32_t); + +struct csio_sm { + struct list_head sm_list; + csio_sm_state_t sm_state; +}; + +static inline void +csio_set_state(void *smp, void *state) +{ + ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state; +} + +static inline void +csio_init_state(struct csio_sm *smp, void *state) +{ + csio_set_state(smp, state); +} + +static inline void +csio_post_event(void *smp, uint32_t evt) +{ + ((struct csio_sm *)smp)->sm_state(smp, evt); +} + +static inline csio_sm_state_t +csio_get_state(void *smp) +{ + return ((struct csio_sm *)smp)->sm_state; +} + +static inline bool +csio_match_state(void *smp, void *state) +{ + return (csio_get_state(smp) == (csio_sm_state_t)state); +} + +#define CSIO_ASSERT(cond) BUG_ON(!(cond)) + +#ifdef __CSIO_DEBUG__ +#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c)) +#else +#define CSIO_DB_ASSERT(__c) +#endif + +#endif /* ifndef __CSIO_DEFS_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c new file mode 100644 index 000000000..e43c5413c --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw.c @@ -0,0 +1,4434 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" + +int csio_dbg_level = 0xFEFF; +unsigned int csio_port_mask = 0xf; + +/* Default FW event queue entries. */ +static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; + +/* Default MSI param level */ +int csio_msi = 2; + +/* FCoE function instances */ +static int dev_num; + +/* FCoE Adapter types & its description */ +static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { + {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, + {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, + {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"}, + {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, + {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, + {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, + {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, + {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, + {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, + {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, + {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, + {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, + {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, + {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, + {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, + {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, + {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, + {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, + {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, + {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}, + {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"}, + {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"} +}; + +static void csio_mgmtm_cleanup(struct csio_mgmtm *); +static void csio_hw_mbm_cleanup(struct csio_hw *); + +/* State machine forward declarations */ +static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); + +static void csio_hw_initialize(struct csio_hw *hw); +static void csio_evtq_stop(struct csio_hw *hw); +static void csio_evtq_start(struct csio_hw *hw); + +int csio_is_hw_ready(struct csio_hw *hw) +{ + return csio_match_state(hw, csio_hws_ready); +} + +int csio_is_hw_removing(struct csio_hw *hw) +{ + return csio_match_state(hw, csio_hws_removing); +} + + +/* + * csio_hw_wait_op_done_val - wait until an operation is completed + * @hw: the HW module + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int +csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, + int polarity, int attempts, int delay, uint32_t *valp) +{ + uint32_t val; + while (1) { + val = csio_rd_reg32(hw, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +/* + * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @hw: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void +csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, + unsigned int mask, unsigned int val) +{ + csio_wr_reg32(hw, addr, TP_PIO_ADDR_A); + val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask; + csio_wr_reg32(hw, val, TP_PIO_DATA_A); +} + +void +csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, + uint32_t value) +{ + uint32_t val = csio_rd_reg32(hw, reg) & ~mask; + + csio_wr_reg32(hw, val | value, reg); + /* Flush */ + csio_rd_reg32(hw, reg); + +} + +static int +csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) +{ + return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, + addr, len, buf, 0); +} + +/* + * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. + */ +#define EEPROM_MAX_RD_POLL 40 +#define EEPROM_MAX_WR_POLL 6 +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0x400 +#define VPD_BASE_OLD 0 +#define VPD_LEN 1024 +#define VPD_INFO_FLD_HDR_SIZE 3 + +/* + * csio_hw_seeprom_read - read a serial EEPROM location + * @hw: hw to read + * @addr: EEPROM virtual address + * @data: where to store the read data + * + * Read a 32-bit word from a location in serial EEPROM using the card's PCI + * VPD capability. Note that this function must be called with a virtual + * address. + */ +static int +csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) +{ + uint16_t val = 0; + int attempts = EEPROM_MAX_RD_POLL; + uint32_t base = hw->params.pci.vpd_cap_addr; + + if (addr >= EEPROMVSIZE || (addr & 3)) + return -EINVAL; + + pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); + + do { + udelay(10); + pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); + } while (!(val & PCI_VPD_ADDR_F) && --attempts); + + if (!(val & PCI_VPD_ADDR_F)) { + csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); + return -EINVAL; + } + + pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); + *data = le32_to_cpu(*(__le32 *)data); + + return 0; +} + +/* + * Partial EEPROM Vital Product Data structure. Includes only the ID and + * VPD-R sections. + */ +struct t4_vpd_hdr { + u8 id_tag; + u8 id_len[2]; + u8 id_data[ID_LEN]; + u8 vpdr_tag; + u8 vpdr_len[2]; +}; + +/* + * csio_hw_get_vpd_keyword_val - Locates an information field keyword in + * the VPD + * @v: Pointer to buffered vpd data structure + * @kw: The keyword to search for + * + * Returns the value of the information field keyword or + * -EINVAL otherwise. + */ +static int +csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) +{ + int32_t i; + int32_t offset , len; + const uint8_t *buf = &v->id_tag; + const uint8_t *vpdr_len = &v->vpdr_tag; + offset = sizeof(struct t4_vpd_hdr); + len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); + + if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) + return -EINVAL; + + for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { + if (memcmp(buf + i , kw, 2) == 0) { + i += VPD_INFO_FLD_HDR_SIZE; + return i; + } + + i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; + } + + return -EINVAL; +} + +static int +csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) +{ + *pos = pci_find_capability(pdev, cap); + if (*pos) + return 0; + + return -1; +} + +/* + * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM + * @hw: HW module + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int +csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) +{ + int i, ret, ec, sn, addr; + uint8_t *vpd, csum; + const struct t4_vpd_hdr *v; + /* To get around compilation warning from strstrip */ + char __always_unused *s; + + if (csio_is_valid_vpd(hw)) + return 0; + + ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, + &hw->params.pci.vpd_cap_addr); + if (ret) + return -EINVAL; + + vpd = kzalloc(VPD_LEN, GFP_ATOMIC); + if (vpd == NULL) + return -ENOMEM; + + /* + * Card information normally starts at VPD_BASE but early cards had + * it at 0. + */ + ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); + addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; + + for (i = 0; i < VPD_LEN; i += 4) { + ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); + if (ret) { + kfree(vpd); + return ret; + } + } + + /* Reset the VPD flag! */ + hw->flags &= (~CSIO_HWF_VPD_VALID); + + v = (const struct t4_vpd_hdr *)vpd; + +#define FIND_VPD_KW(var, name) do { \ + var = csio_hw_get_vpd_keyword_val(v, name); \ + if (var < 0) { \ + csio_err(hw, "missing VPD keyword " name "\n"); \ + kfree(vpd); \ + return -EINVAL; \ + } \ +} while (0) + + FIND_VPD_KW(i, "RV"); + for (csum = 0; i >= 0; i--) + csum += vpd[i]; + + if (csum) { + csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); + kfree(vpd); + return -EINVAL; + } + FIND_VPD_KW(ec, "EC"); + FIND_VPD_KW(sn, "SN"); +#undef FIND_VPD_KW + + memcpy(p->id, v->id_data, ID_LEN); + s = strstrip(p->id); + memcpy(p->ec, vpd + ec, EC_LEN); + s = strstrip(p->ec); + i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; + memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); + s = strstrip(p->sn); + + csio_valid_vpd_copied(hw); + + kfree(vpd); + return 0; +} + +/* + * csio_hw_sf1_read - read data from the serial flash + * @hw: the HW module + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int +csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, + int32_t lock, uint32_t *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) + return -EBUSY; + + csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) | + BYTECNT_V(byte_cnt - 1), SF_OP_A); + ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, + 10, NULL); + if (!ret) + *valp = csio_rd_reg32(hw, SF_DATA_A); + return ret; +} + +/* + * csio_hw_sf1_write - write data to the serial flash + * @hw: the HW module + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int +csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, + int32_t lock, uint32_t val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) + return -EBUSY; + + csio_wr_reg32(hw, val, SF_DATA_A); + csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | + OP_V(1) | SF_LOCK_V(lock), SF_OP_A); + + return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, + 10, NULL); +} + +/* + * csio_hw_flash_wait_op - wait for a flash operation to complete + * @hw: the HW module + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int +csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) +{ + int ret; + uint32_t status; + + while (1) { + ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); + if (ret != 0) + return ret; + + ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); + if (ret != 0) + return ret; + + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/* + * csio_hw_read_flash - read words from serial flash + * @hw: the HW module + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianess. + */ +static int +csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, + uint32_t *data, int32_t byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); + if (ret != 0) + return ret; + + ret = csio_hw_sf1_read(hw, 1, 1, 0, data); + if (ret != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = (__force __u32) htonl(*data); + } + return 0; +} + +/* + * csio_hw_write_flash - write up to a page of data to the serial flash + * @hw: the hw + * @addr: the start address to write + * @n: length of data to write in bytes + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. All the data must be written to the same page. + */ +static int +csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, + uint32_t n, const uint8_t *data) +{ + int ret = -EINVAL; + uint32_t buf[64]; + uint32_t i, c, left, val, offset = addr & 0xff; + + if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); + if (ret != 0) + goto unlock; + + ret = csio_hw_sf1_write(hw, 4, 1, 1, val); + if (ret != 0) + goto unlock; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = csio_hw_sf1_write(hw, c, c != left, 1, val); + if (ret) + goto unlock; + } + ret = csio_hw_flash_wait_op(hw, 8, 1); + if (ret) + goto unlock; + + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + + /* Read the page to verify the write succeeded */ + ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (uint8_t *)buf + offset, n)) { + csio_err(hw, + "failed to correctly write the flash page at %#x\n", + addr); + return -EINVAL; + } + + return 0; + +unlock: + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + return ret; +} + +/* + * csio_hw_flash_erase_sectors - erase a range of flash sectors + * @hw: the HW module + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given inclusive range. + */ +static int +csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) +{ + int ret = 0; + + while (start <= end) { + + ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); + if (ret != 0) + goto out; + + ret = csio_hw_sf1_write(hw, 4, 0, 1, + SF_ERASE_SECTOR | (start << 8)); + if (ret != 0) + goto out; + + ret = csio_hw_flash_wait_op(hw, 14, 500); + if (ret != 0) + goto out; + + start++; + } +out: + if (ret) + csio_err(hw, "erase of flash sector %d failed, error %d\n", + start, ret); + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + return 0; +} + +static void +csio_hw_print_fw_version(struct csio_hw *hw, char *str) +{ + csio_info(hw, "%s: %u.%u.%u.%u\n", str, + FW_HDR_FW_VER_MAJOR_G(hw->fwrev), + FW_HDR_FW_VER_MINOR_G(hw->fwrev), + FW_HDR_FW_VER_MICRO_G(hw->fwrev), + FW_HDR_FW_VER_BUILD_G(hw->fwrev)); +} + +/* + * csio_hw_get_fw_version - read the firmware version + * @hw: HW module + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +static int +csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) +{ + return csio_hw_read_flash(hw, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/* + * csio_hw_get_tp_version - read the TP microcode version + * @hw: HW module + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +static int +csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) +{ + return csio_hw_read_flash(hw, FLASH_FW_START + + offsetof(struct fw_hdr, tp_microcode_ver), 1, + vers, 0); +} + +/* + * csio_hw_fw_dload - download firmware. + * @hw: HW module + * @fw_data: firmware image to write. + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + */ +static int +csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) +{ + uint32_t csum; + int32_t addr; + int ret; + uint32_t i; + uint8_t first_page[SF_PAGE_SIZE]; + const __be32 *p = (const __be32 *)fw_data; + struct fw_hdr *hdr = (struct fw_hdr *)fw_data; + uint32_t sf_sec_size; + + if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { + csio_err(hw, "Serial Flash data invalid\n"); + return -EINVAL; + } + + if (!size) { + csio_err(hw, "FW image has no data\n"); + return -EINVAL; + } + + if (size & 511) { + csio_err(hw, "FW image size not multiple of 512 bytes\n"); + return -EINVAL; + } + + if (ntohs(hdr->len512) * 512 != size) { + csio_err(hw, "FW image size differs from size in FW header\n"); + return -EINVAL; + } + + if (size > FLASH_FW_MAX_SIZE) { + csio_err(hw, "FW image too large, max is %u bytes\n", + FLASH_FW_MAX_SIZE); + return -EINVAL; + } + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); + return -EINVAL; + } + + sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; + i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ + + csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", + FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1); + + ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC, + FLASH_FW_START_SEC + i - 1); + if (ret) { + csio_err(hw, "Flash Erase failed\n"); + goto out; + } + + /* + * We write the correct version at the end so the driver can see a bad + * version if the FW write fails. Start by writing a copy of the + * first page with a bad version. + */ + memcpy(first_page, fw_data, SF_PAGE_SIZE); + ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); + ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page); + if (ret) + goto out; + + csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", + FW_IMG_START, FW_IMG_START + size); + + addr = FLASH_FW_START; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + fw_data += SF_PAGE_SIZE; + ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); + if (ret) + goto out; + } + + ret = csio_hw_write_flash(hw, + FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), + (const uint8_t *)&hdr->fw_ver); + +out: + if (ret) + csio_err(hw, "firmware download failed, error %d\n", ret); + return ret; +} + +static int +csio_hw_get_flash_params(struct csio_hw *hw) +{ + /* Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting code. All flash parts have 64KB sectors. + */ + static struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; + } supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + + u32 part, manufacturer; + u32 density, size = 0; + u32 flashid = 0; + int ret; + + ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid); + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + if (ret) + return ret; + + /* Check to see if it's one of our non-standard supported Flash parts. + */ + for (part = 0; part < ARRAY_SIZE(supported_flash); part++) + if (supported_flash[part].vendor_and_model_id == flashid) { + hw->params.sf_size = supported_flash[part].size_mb; + hw->params.sf_nsec = + hw->params.sf_size / SF_SEC_SIZE; + goto found; + } + + /* Decode Flash part size. The code below looks repetitive with + * common encodings, but that's not guaranteed in the JEDEC + * specification for the Read JEDEC ID command. The only thing that + * we're guaranteed by the JEDEC specification is where the + * Manufacturer ID is in the returned result. After that each + * Manufacturer ~could~ encode things completely differently. + * Note, all Flash parts must have 64KB sectors. + */ + manufacturer = flashid & 0xff; + switch (manufacturer) { + case 0x20: { /* Micron/Numonix */ + /* This Density -> Size decoding table is taken from Micron + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x14 ... 0x19: /* 1MB - 32MB */ + size = 1 << density; + break; + case 0x20: /* 64MB */ + size = 1 << 26; + break; + case 0x21: /* 128MB */ + size = 1 << 27; + break; + case 0x22: /* 256MB */ + size = 1 << 28; + } + break; + } + case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ + /* This Density -> Size decoding table is taken from ISSI + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x16: /* 32 MB */ + size = 1 << 25; + break; + case 0x17: /* 64MB */ + size = 1 << 26; + } + break; + } + case 0xc2: /* Macronix */ + case 0xef: /* Winbond */ { + /* This Density -> Size decoding table is taken from + * Macronix and Winbond Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: /* 8MB */ + case 0x18: /* 16MB */ + size = 1 << density; + } + } + } + + /* If we didn't recognize the FLASH part, that's no real issue: the + * Hardware/Software contract says that Hardware will _*ALWAYS*_ + * use a FLASH part which is at least 4MB in size and has 64KB + * sectors. The unrecognized FLASH part is likely to be much larger + * than 4MB, but that's all we really need. + */ + if (size == 0) { + csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n", + flashid); + size = 1 << 22; + } + + /* Store decoded Flash size */ + hw->params.sf_size = size; + hw->params.sf_nsec = size / SF_SEC_SIZE; + +found: + if (hw->params.sf_size < FLASH_MIN_SIZE) + csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n", + flashid, hw->params.sf_size, FLASH_MIN_SIZE); + return 0; +} + +/*****************************************************************************/ +/* HW State machine assists */ +/*****************************************************************************/ + +static int +csio_hw_dev_ready(struct csio_hw *hw) +{ + uint32_t reg; + int cnt = 6; + int src_pf; + + while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && + (--cnt != 0)) + mdelay(100); + + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + src_pf = SOURCEPF_G(reg); + else + src_pf = T6_SOURCEPF_G(reg); + + if ((cnt == 0) && (((int32_t)(src_pf) < 0) || + (src_pf >= CSIO_MAX_PFN))) { + csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); + return -EIO; + } + + hw->pfn = src_pf; + + return 0; +} + +/* + * csio_do_hello - Perform the HELLO FW Mailbox command and process response. + * @hw: HW module + * @state: Device state + * + * FW_HELLO_CMD has to be polled for completion. + */ +static int +csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) +{ + struct csio_mb *mbp; + int rv = 0; + enum fw_retval retval; + uint8_t mpfn; + char state_str[16]; + int retries = FW_CMD_HELLO_RETRIES; + + memset(state_str, 0, sizeof(state_str)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + rv = -ENOMEM; + CSIO_INC_STATS(hw, n_err_nomem); + goto out; + } + +retry: + csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, + hw->pfn, CSIO_MASTER_MAY, NULL); + + rv = csio_mb_issue(hw, mbp); + if (rv) { + csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); + goto out_free_mb; + } + + csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); + if (retval != FW_SUCCESS) { + csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); + rv = -EINVAL; + goto out_free_mb; + } + + /* Firmware has designated us to be master */ + if (hw->pfn == mpfn) { + hw->flags |= CSIO_HWF_MASTER; + } else if (*state == CSIO_DEV_STATE_UNINIT) { + /* + * If we're not the Master PF then we need to wait around for + * the Master PF Driver to finish setting up the adapter. + * + * Note that we also do this wait if we're a non-Master-capable + * PF and there is no current Master PF; a Master PF may show up + * momentarily and we wouldn't want to fail pointlessly. (This + * can happen when an OS loads lots of different drivers rapidly + * at the same time). In this case, the Master PF returned by + * the firmware will be PCIE_FW_MASTER_MASK so the test below + * will work ... + */ + + int waiting = FW_CMD_HELLO_TIMEOUT; + + /* + * Wait for the firmware to either indicate an error or + * initialized state. If we see either of these we bail out + * and report the issue to the caller. If we exhaust the + * "hello timeout" and we haven't exhausted our retries, try + * again. Otherwise bail with a timeout error. + */ + for (;;) { + uint32_t pcie_fw; + + spin_unlock_irq(&hw->lock); + msleep(50); + spin_lock_irq(&hw->lock); + waiting -= 50; + + /* + * If neither Error nor Initialized are indicated + * by the firmware keep waiting till we exhaust our + * timeout ... and then retry if we haven't exhausted + * our retries ... + */ + pcie_fw = csio_rd_reg32(hw, PCIE_FW_A); + if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { + if (waiting <= 0) { + if (retries-- > 0) + goto retry; + + rv = -ETIMEDOUT; + break; + } + continue; + } + + /* + * We either have an Error or Initialized condition + * report errors preferentially. + */ + if (state) { + if (pcie_fw & PCIE_FW_ERR_F) { + *state = CSIO_DEV_STATE_ERR; + rv = -ETIMEDOUT; + } else if (pcie_fw & PCIE_FW_INIT_F) + *state = CSIO_DEV_STATE_INIT; + } + + /* + * If we arrived before a Master PF was selected and + * there's not a valid Master PF, grab its identity + * for our caller. + */ + if (mpfn == PCIE_FW_MASTER_M && + (pcie_fw & PCIE_FW_MASTER_VLD_F)) + mpfn = PCIE_FW_MASTER_G(pcie_fw); + break; + } + hw->flags &= ~CSIO_HWF_MASTER; + } + + switch (*state) { + case CSIO_DEV_STATE_UNINIT: + strcpy(state_str, "Initializing"); + break; + case CSIO_DEV_STATE_INIT: + strcpy(state_str, "Initialized"); + break; + case CSIO_DEV_STATE_ERR: + strcpy(state_str, "Error"); + break; + default: + strcpy(state_str, "Unknown"); + break; + } + + if (hw->pfn == mpfn) + csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", + hw->pfn, state_str); + else + csio_info(hw, + "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", + hw->pfn, mpfn, state_str); + +out_free_mb: + mempool_free(mbp, hw->mb_mempool); +out: + return rv; +} + +/* + * csio_do_bye - Perform the BYE FW Mailbox command and process response. + * @hw: HW module + * + */ +static int +csio_do_bye(struct csio_hw *hw) +{ + struct csio_mb *mbp; + enum fw_retval retval; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of BYE command failed\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_do_reset- Perform the device reset. + * @hw: HW module + * @fw_rst: FW reset + * + * If fw_rst is set, issues FW reset mbox cmd otherwise + * does PIO reset. + * Performs reset of the function. + */ +static int +csio_do_reset(struct csio_hw *hw, bool fw_rst) +{ + struct csio_mb *mbp; + enum fw_retval retval; + + if (!fw_rst) { + /* PIO reset */ + csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); + mdelay(2000); + return 0; + } + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, + PIORSTMODE_F | PIORST_F, 0, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of RESET command failed.n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +static int +csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) +{ + struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; + uint16_t caps; + + caps = ntohs(rsp->fcoecaps); + + if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { + csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); + return -EINVAL; + } + + if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { + csio_err(hw, "No FCoE Control Offload capability\n"); + return -EINVAL; + } + + return 0; +} + +/* + * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET + * @hw: the HW module + * @mbox: mailbox to use for the FW RESET command (if desired) + * @force: force uP into RESET even if FW RESET command fails + * + * Issues a RESET command to firmware (if desired) with a HALT indication + * and then puts the microprocessor into RESET state. The RESET command + * will only be issued if a legitimate mailbox is provided (mbox <= + * PCIE_FW_MASTER_MASK). + * + * This is generally used in order for the host to safely manipulate the + * adapter without fear of conflicting with whatever the firmware might + * be doing. The only way out of this state is to RESTART the firmware + * ... + */ +static int +csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) +{ + enum fw_retval retval = 0; + + /* + * If a legitimate mailbox is provided, issue a RESET command + * with a HALT indication. + */ + if (mbox <= PCIE_FW_MASTER_M) { + struct csio_mb *mbp; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, + PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F, + NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of RESET command failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + mempool_free(mbp, hw->mb_mempool); + } + + /* + * Normally we won't complete the operation if the firmware RESET + * command fails but if our caller insists we'll go ahead and put the + * uP into RESET. This can be useful if the firmware is hung or even + * missing ... We'll have to take the risk of putting the uP into + * RESET without the cooperation of firmware in that case. + * + * We also force the firmware's HALT flag to be on in case we bypassed + * the firmware RESET command above or we're dealing with old firmware + * which doesn't have the HALT capability. This will serve as a flag + * for the incoming firmware to know that it's coming out of a HALT + * rather than a RESET ... if it's new enough to understand that ... + */ + if (retval == 0 || force) { + csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); + csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, + PCIE_FW_HALT_F); + } + + /* + * And we always return the result of the firmware RESET command + * even when we force the uP into RESET ... + */ + return retval ? -EINVAL : 0; +} + +/* + * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET + * @hw: the HW module + * @reset: if we want to do a RESET to restart things + * + * Restart firmware previously halted by csio_hw_fw_halt(). On successful + * return the previous PF Master remains as the new PF Master and there + * is no need to issue a new HELLO command, etc. + * + * We do this in two ways: + * + * 1. If we're dealing with newer firmware we'll simply want to take + * the chip's microprocessor out of RESET. This will cause the + * firmware to start up from its start vector. And then we'll loop + * until the firmware indicates it's started again (PCIE_FW.HALT + * reset to 0) or we timeout. + * + * 2. If we're dealing with older firmware then we'll need to RESET + * the chip since older firmware won't recognize the PCIE_FW.HALT + * flag and automatically RESET itself on startup. + */ +static int +csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) +{ + if (reset) { + /* + * Since we're directing the RESET instead of the firmware + * doing it automatically, we need to clear the PCIE_FW.HALT + * bit. + */ + csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0); + + /* + * If we've been given a valid mailbox, first try to get the + * firmware to do the RESET. If that works, great and we can + * return success. Otherwise, if we haven't been given a + * valid mailbox or the RESET command failed, fall back to + * hitting the chip with a hammer. + */ + if (mbox <= PCIE_FW_MASTER_M) { + csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); + msleep(100); + if (csio_do_reset(hw, true) == 0) + return 0; + } + + csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); + msleep(2000); + } else { + int ms; + + csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); + for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { + if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F)) + return 0; + msleep(100); + ms += 100; + } + return -ETIMEDOUT; + } + return 0; +} + +/* + * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW + * @hw: the HW module + * @mbox: mailbox to use for the FW RESET command (if desired) + * @fw_data: the firmware image to write + * @size: image size + * @force: force upgrade even if firmware doesn't cooperate + * + * Perform all of the steps necessary for upgrading an adapter's + * firmware image. Normally this requires the cooperation of the + * existing firmware in order to halt all existing activities + * but if an invalid mailbox token is passed in we skip that step + * (though we'll still put the adapter microprocessor into RESET in + * that case). + * + * On successful return the new firmware will have been loaded and + * the adapter will have been fully RESET losing all previous setup + * state. On unsuccessful return the adapter may be completely hosed ... + * positive errno indicates that the adapter is ~probably~ intact, a + * negative errno indicates that things are looking bad ... + */ +static int +csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, + const u8 *fw_data, uint32_t size, int32_t force) +{ + const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; + int reset, ret; + + ret = csio_hw_fw_halt(hw, mbox, force); + if (ret != 0 && !force) + return ret; + + ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); + if (ret != 0) + return ret; + + /* + * Older versions of the firmware don't understand the new + * PCIE_FW.HALT flag and so won't know to perform a RESET when they + * restart. So for newly loaded older firmware we'll have to do the + * RESET for it so it starts up on a clean slate. We can tell if + * the newly loaded firmware will handle this right by checking + * its header flags to see if it advertises the capability. + */ + reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); + return csio_hw_fw_restart(hw, mbox, reset); +} + +/* + * csio_get_device_params - Get device parameters. + * @hw: HW module + * + */ +static int +csio_get_device_params(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_mb *mbp; + enum fw_retval retval; + u32 param[6]; + int i, j = 0; + + /* Initialize portids to -1 */ + for (i = 0; i < CSIO_MAX_PPORTS; i++) + hw->pport[i].portid = -1; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get port vec information. */ + param[0] = FW_PARAM_DEV(PORTVEC); + + /* Get Core clock. */ + param[1] = FW_PARAM_DEV(CCLK); + + /* Get EQ id start and end. */ + param[2] = FW_PARAM_PFVF(EQ_START); + param[3] = FW_PARAM_PFVF(EQ_END); + + /* Get IQ id start and end. */ + param[4] = FW_PARAM_PFVF(IQFLINT_START); + param[5] = FW_PARAM_PFVF(IQFLINT_END); + + csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, + ARRAY_SIZE(param), param, NULL, false, NULL); + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_params_rsp(hw, mbp, &retval, + ARRAY_SIZE(param), param); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + /* cache the information. */ + hw->port_vec = param[0]; + hw->vpd.cclk = param[1]; + wrm->fw_eq_start = param[2]; + wrm->fw_iq_start = param[4]; + + /* Using FW configured max iqs & eqs */ + if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || + !csio_is_hw_master(hw)) { + hw->cfg_niq = param[5] - param[4] + 1; + hw->cfg_neq = param[3] - param[2] + 1; + csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", + hw->cfg_niq, hw->cfg_neq); + } + + hw->port_vec &= csio_port_mask; + + hw->num_pports = hweight32(hw->port_vec); + + csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", + hw->port_vec, hw->num_pports); + + for (i = 0; i < hw->num_pports; i++) { + while ((hw->port_vec & (1 << j)) == 0) + j++; + hw->pport[i].portid = j++; + csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); + } + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + + +/* + * csio_config_device_caps - Get and set device capabilities. + * @hw: HW module + * + */ +static int +csio_config_device_caps(struct csio_hw *hw) +{ + struct csio_mb *mbp; + enum fw_retval retval; + int rv = -EINVAL; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get device capabilities */ + csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); + goto out; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); + goto out; + } + + /* Validate device capabilities */ + rv = csio_hw_validate_caps(hw, mbp); + if (rv != 0) + goto out; + + /* Don't config device capabilities if already configured */ + if (hw->fw_state == CSIO_DEV_STATE_INIT) { + rv = 0; + goto out; + } + + /* Write back desired device capabilities */ + csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, + false, true, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); + goto out; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); + goto out; + } + + rv = 0; +out: + mempool_free(mbp, hw->mb_mempool); + return rv; +} + +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) +{ + fw_port_cap32_t fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP32_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP32_FC_TX; + + return fw_pause; +} + +static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) +{ + fw_port_cap32_t fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP32_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; + + return fw_fec; +} + +/** + * fwcap_to_fwspeed - return highest speed in Port Capabilities + * @acaps: advertised Port Capabilities + * + * Get the highest speed for the port from the advertised Port + * Capabilities. + */ +fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) +{ + #define TEST_SPEED_RETURN(__caps_speed) \ + do { \ + if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return FW_PORT_CAP32_SPEED_##__caps_speed; \ + } while (0) + + TEST_SPEED_RETURN(400G); + TEST_SPEED_RETURN(200G); + TEST_SPEED_RETURN(100G); + TEST_SPEED_RETURN(50G); + TEST_SPEED_RETURN(40G); + TEST_SPEED_RETURN(25G); + TEST_SPEED_RETURN(10G); + TEST_SPEED_RETURN(1G); + TEST_SPEED_RETURN(100M); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + + #define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + + #undef CAP16_TO_CAP32 + + return caps32; +} + +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + + #define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(FORCE_PAUSE); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(MDISTRAIGHT); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + + #undef CAP32_TO_CAP16 + + return caps16; +} + +/** + * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities + * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value + * + * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new + * 32-bit Port Capabilities value. + */ +fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) +{ + fw_port_cap32_t linkattr = 0; + + /* The format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else. + */ + if (lstatus & FW_PORT_CMD_RXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & FW_PORT_CMD_TXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + return linkattr; +} + +/** + * csio_init_link_config - initialize a link's SW state + * @lc: pointer to structure holding the link state + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) +{ + lc->pcaps = pcaps; + lc->def_acaps = acaps; + lc->lpacaps = 0; + lc->speed_caps = 0; + lc->speed = 0; + lc->requested_fc = PAUSE_RX | PAUSE_TX; + lc->fc = lc->requested_fc; + + /* + * For Forward Error Control, we default to whatever the Firmware + * tells us the Link is currently advertising. + */ + lc->requested_fec = FEC_AUTO; + lc->fec = fwcap_to_cc_fec(lc->def_acaps); + + /* If the Port is capable of Auto-Negtotiation, initialize it as + * "enabled" and copy over all of the Physical Port Capabilities + * to the Advertised Port Capabilities. Otherwise mark it as + * Auto-Negotiate disabled and select the highest supported speed + * for the link. Note parallel structure in t4_link_l1cfg_core() + * and t4_handle_get_port_info(). + */ + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = lc->pcaps & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->acaps = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps, + uint32_t *rcaps) +{ + unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); + fw_port_cap32_t fw_fc, cc_fec, fw_fec, lrcap; + + lc->link_ok = 0; + + /* + * Convert driver coding of Pause Frame Flow Control settings into the + * Firmware's API. + */ + fw_fc = cc_to_fwcap_pause(lc->requested_fc); + + /* + * Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpretation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = fwcap_to_cc_fec(lc->def_acaps); + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); + + /* Figure out what our Requested Port Capabilities are going to be. + * Note parallel structure in t4_handle_get_port_info() and + * init_link_config(). + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; + } else if (lc->autoneg == AUTONEG_DISABLE) { + lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; + } else { + lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi; + } + + *rcaps = lrcap; +} + +/* + * csio_enable_ports - Bring up all available ports. + * @hw: HW module. + * + */ +static int +csio_enable_ports(struct csio_hw *hw) +{ + struct csio_mb *mbp; + u16 fw_caps = FW_CAPS_UNKNOWN; + enum fw_retval retval; + uint8_t portid; + fw_port_cap32_t pcaps, acaps, rcaps; + int i; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + for (i = 0; i < hw->num_pports; i++) { + portid = hw->pport[i].portid; + + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + + csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, + hw->pfn, 0, 1, ¶m, &val, true, + NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_params_rsp(hw, mbp, &retval, + 0, NULL); + fw_caps = retval ? FW_CAPS16 : FW_CAPS32; + } + + /* Read PORT information */ + csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, + false, 0, fw_caps, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps, + &pcaps, &acaps); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", + portid, retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps); + + csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps); + + /* Write back PORT information */ + csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, + true, rcaps, fw_caps, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", + portid, retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + } /* For all ports */ + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_get_fcoe_resinfo - Read fcoe fw resource info. + * @hw: HW module + * Issued with lock held. + */ +static int +csio_get_fcoe_resinfo(struct csio_hw *hw) +{ + struct csio_fcoe_res_info *res_info = &hw->fres_info; + struct fw_fcoe_res_info_cmd *rsp; + struct csio_mb *mbp; + enum fw_retval retval; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get FCoE FW resource information */ + csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); + retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + res_info->e_d_tov = ntohs(rsp->e_d_tov); + res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); + res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); + res_info->r_r_tov = ntohs(rsp->r_r_tov); + res_info->max_xchgs = ntohl(rsp->max_xchgs); + res_info->max_ssns = ntohl(rsp->max_ssns); + res_info->used_xchgs = ntohl(rsp->used_xchgs); + res_info->used_ssns = ntohl(rsp->used_ssns); + res_info->max_fcfs = ntohl(rsp->max_fcfs); + res_info->max_vnps = ntohl(rsp->max_vnps); + res_info->used_fcfs = ntohl(rsp->used_fcfs); + res_info->used_vnps = ntohl(rsp->used_vnps); + + csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, + res_info->max_xchgs); + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +static int +csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) +{ + struct csio_mb *mbp; + enum fw_retval retval; + u32 _param[1]; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* + * Find out whether we're dealing with a version of + * the firmware which has configuration file support. + */ + _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); + + csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, + ARRAY_SIZE(_param), _param, NULL, false, NULL); + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_params_rsp(hw, mbp, &retval, + ARRAY_SIZE(_param), _param); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + mempool_free(mbp, hw->mb_mempool); + *param = _param[0]; + + return 0; +} + +static int +csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) +{ + int ret = 0; + const struct firmware *cf; + struct pci_dev *pci_dev = hw->pdev; + struct device *dev = &pci_dev->dev; + unsigned int mtype = 0, maddr = 0; + uint32_t *cfg_data; + int value_to_add = 0; + const char *fw_cfg_file; + + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) + fw_cfg_file = FW_CFG_NAME_T5; + else + fw_cfg_file = FW_CFG_NAME_T6; + + if (request_firmware(&cf, fw_cfg_file, dev) < 0) { + csio_err(hw, "could not find config file %s, err: %d\n", + fw_cfg_file, ret); + return -ENOENT; + } + + if (cf->size%4 != 0) + value_to_add = 4 - (cf->size % 4); + + cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); + if (cfg_data == NULL) { + ret = -ENOMEM; + goto leave; + } + + memcpy((void *)cfg_data, (const void *)cf->data, cf->size); + if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { + ret = -EINVAL; + goto leave; + } + + mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); + maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; + + ret = csio_memory_write(hw, mtype, maddr, + cf->size + value_to_add, cfg_data); + + if ((ret == 0) && (value_to_add != 0)) { + union { + u32 word; + char buf[4]; + } last; + size_t size = cf->size & ~0x3; + int i; + + last.word = cfg_data[size >> 2]; + for (i = value_to_add; i < 4; i++) + last.buf[i] = 0; + ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); + } + if (ret == 0) { + csio_info(hw, "config file upgraded to %s\n", fw_cfg_file); + snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file); + } + +leave: + kfree(cfg_data); + release_firmware(cf); + return ret; +} + +/* + * HW initialization: contact FW, obtain config, perform basic init. + * + * If the firmware we're dealing with has Configuration File support, then + * we use that to perform all configuration -- either using the configuration + * file stored in flash on the adapter or using a filesystem-local file + * if available. + * + * If we don't have configuration file support in the firmware, then we'll + * have to set things up the old fashioned way with hard-coded register + * writes and firmware commands ... + */ + +/* + * Attempt to initialize the HW via a Firmware Configuration File. + */ +static int +csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) +{ + struct csio_mb *mbp = NULL; + struct fw_caps_config_cmd *caps_cmd; + unsigned int mtype, maddr; + int rv = -EINVAL; + uint32_t finiver = 0, finicsum = 0, cfcsum = 0; + char path[64]; + char *config_name = NULL; + + /* + * Reset device if necessary + */ + if (reset) { + rv = csio_do_reset(hw, true); + if (rv != 0) + goto bye; + } + + /* + * If we have a configuration file in host , + * then use that. Otherwise, use the configuration file stored + * in the HW flash ... + */ + spin_unlock_irq(&hw->lock); + rv = csio_hw_flash_config(hw, fw_cfg_param, path); + spin_lock_irq(&hw->lock); + if (rv != 0) { + /* + * config file was not found. Use default + * config file from flash. + */ + config_name = "On FLASH"; + mtype = FW_MEMTYPE_CF_FLASH; + maddr = hw->chip_ops->chip_flash_cfg_addr(hw); + } else { + config_name = path; + mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); + maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; + } + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + /* + * Tell the firmware to process the indicated Configuration File. + * If there are no errors and the caller has provided return value + * pointers for the [fini] section version, checksum and computed + * checksum, pass those back to the caller. + */ + caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); + CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); + caps_cmd->op_to_write = + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + caps_cmd->cfvalid_to_len16 = + htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | + FW_LEN16(*caps_cmd)); + + if (csio_mb_issue(hw, mbp)) { + rv = -EINVAL; + goto bye; + } + + rv = csio_mb_fw_retval(mbp); + /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the + * firmware. A very few early versions of the firmware didn't + * have one embedded but we can ignore those. + */ + if (rv == ENOENT) { + CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); + caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); + + if (csio_mb_issue(hw, mbp)) { + rv = -EINVAL; + goto bye; + } + + rv = csio_mb_fw_retval(mbp); + config_name = "Firmware Default"; + } + if (rv != FW_SUCCESS) + goto bye; + + finiver = ntohl(caps_cmd->finiver); + finicsum = ntohl(caps_cmd->finicsum); + cfcsum = ntohl(caps_cmd->cfcsum); + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd->op_to_write = + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); + caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); + + if (csio_mb_issue(hw, mbp)) { + rv = -EINVAL; + goto bye; + } + + rv = csio_mb_fw_retval(mbp); + if (rv != FW_SUCCESS) { + csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); + goto bye; + } + + if (finicsum != cfcsum) { + csio_warn(hw, + "Config File checksum mismatch: csum=%#x, computed=%#x\n", + finicsum, cfcsum); + } + + /* Validate device capabilities */ + rv = csio_hw_validate_caps(hw, mbp); + if (rv != 0) + goto bye; + + mempool_free(mbp, hw->mb_mempool); + mbp = NULL; + + /* + * Note that we're operating with parameters + * not supplied by the driver, rather than from hard-wired + * initialization constants buried in the driver. + */ + hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; + + /* device parameters */ + rv = csio_get_device_params(hw); + if (rv != 0) + goto bye; + + /* Configure SGE */ + csio_wr_sge_init(hw); + + /* + * And finally tell the firmware to initialize itself using the + * parameters from the Configuration File. + */ + /* Post event to notify completion of configuration */ + csio_post_event(&hw->sm, CSIO_HWE_INIT); + + csio_info(hw, "Successfully configure using Firmware " + "Configuration File %s, version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); + return 0; + + /* + * Something bad happened. Return the error ... + */ +bye: + if (mbp) + mempool_free(mbp, hw->mb_mempool); + hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; + csio_warn(hw, "Configuration file error %d\n", rv); + return rv; +} + +/* Is the given firmware API compatible with the one the driver was compiled + * with? + */ +static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) +{ + + /* short circuit if it's the exact same firmware version */ + if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) + return 1; + +#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) + if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && + SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) + return 1; +#undef SAME_INTF + + return 0; +} + +/* The firmware in the filesystem is usable, but should it be installed? + * This routine explains itself in detail if it indicates the filesystem + * firmware should be installed. + */ +static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable, + int k, int c) +{ + const char *reason; + + if (!card_fw_usable) { + reason = "incompatible or unusable"; + goto install; + } + + if (k > c) { + reason = "older than the version supported with this driver"; + goto install; + } + + return 0; + +install: + csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, " + "installing firmware %u.%u.%u.%u on card.\n", + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); + + return 1; +} + +static struct fw_info fw_info_array[] = { + { + .chip = CHELSIO_T5, + .fs_name = FW_CFG_NAME_T5, + .fw_mod_name = FW_FNAME_T5, + .fw_hdr = { + .chip = FW_HDR_CHIP_T5, + .fw_ver = __cpu_to_be32(FW_VERSION(T5)), + .intfver_nic = FW_INTFVER(T5, NIC), + .intfver_vnic = FW_INTFVER(T5, VNIC), + .intfver_ri = FW_INTFVER(T5, RI), + .intfver_iscsi = FW_INTFVER(T5, ISCSI), + .intfver_fcoe = FW_INTFVER(T5, FCOE), + }, + }, { + .chip = CHELSIO_T6, + .fs_name = FW_CFG_NAME_T6, + .fw_mod_name = FW_FNAME_T6, + .fw_hdr = { + .chip = FW_HDR_CHIP_T6, + .fw_ver = __cpu_to_be32(FW_VERSION(T6)), + .intfver_nic = FW_INTFVER(T6, NIC), + .intfver_vnic = FW_INTFVER(T6, VNIC), + .intfver_ri = FW_INTFVER(T6, RI), + .intfver_iscsi = FW_INTFVER(T6, ISCSI), + .intfver_fcoe = FW_INTFVER(T6, FCOE), + }, + } +}; + +static struct fw_info *find_fw_info(int chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { + if (fw_info_array[i].chip == chip) + return &fw_info_array[i]; + } + return NULL; +} + +static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum csio_dev_state state, + int *reset) +{ + int ret, card_fw_usable, fs_fw_usable; + const struct fw_hdr *fs_fw; + const struct fw_hdr *drv_fw; + + drv_fw = &fw_info->fw_hdr; + + /* Read the header of the firmware on the card */ + ret = csio_hw_read_flash(hw, FLASH_FW_START, + sizeof(*card_fw) / sizeof(uint32_t), + (uint32_t *)card_fw, 1); + if (ret == 0) { + card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); + } else { + csio_err(hw, + "Unable to read card's firmware header: %d\n", ret); + card_fw_usable = 0; + } + + if (fw_data != NULL) { + fs_fw = (const void *)fw_data; + fs_fw_usable = fw_compatible(drv_fw, fs_fw); + } else { + fs_fw = NULL; + fs_fw_usable = 0; + } + + if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && + (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { + /* Common case: the firmware on the card is an exact match and + * the filesystem one is an exact match too, or the filesystem + * one is absent/incompatible. + */ + } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT && + csio_should_install_fs_fw(hw, card_fw_usable, + be32_to_cpu(fs_fw->fw_ver), + be32_to_cpu(card_fw->fw_ver))) { + ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data, + fw_size, 0); + if (ret != 0) { + csio_err(hw, + "failed to install firmware: %d\n", ret); + goto bye; + } + + /* Installed successfully, update the cached header too. */ + memcpy(card_fw, fs_fw, sizeof(*card_fw)); + card_fw_usable = 1; + *reset = 0; /* already reset as part of load_fw */ + } + + if (!card_fw_usable) { + uint32_t d, c, k; + + d = be32_to_cpu(drv_fw->fw_ver); + c = be32_to_cpu(card_fw->fw_ver); + k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; + + csio_err(hw, "Cannot find a usable firmware: " + "chip state %d, " + "driver compiled with %d.%d.%d.%d, " + "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", + state, + FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), + FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); + ret = -EINVAL; + goto bye; + } + + /* We're using whatever's on the card and it's known to be good. */ + hw->fwrev = be32_to_cpu(card_fw->fw_ver); + hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); + +bye: + return ret; +} + +/* + * Returns -EINVAL if attempts to flash the firmware failed, + * -ENOMEM if memory allocation failed else returns 0, + * if flashing was not attempted because the card had the + * latest firmware ECANCELED is returned + */ +static int +csio_hw_flash_fw(struct csio_hw *hw, int *reset) +{ + int ret = -ECANCELED; + const struct firmware *fw; + struct fw_info *fw_info; + struct fw_hdr *card_fw; + struct pci_dev *pci_dev = hw->pdev; + struct device *dev = &pci_dev->dev ; + const u8 *fw_data = NULL; + unsigned int fw_size = 0; + const char *fw_bin_file; + + /* This is the firmware whose headers the driver was compiled + * against + */ + fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id)); + if (fw_info == NULL) { + csio_err(hw, + "unable to get firmware info for chip %d.\n", + CHELSIO_CHIP_VERSION(hw->chip_id)); + return -EINVAL; + } + + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); + if (!card_fw) + return -ENOMEM; + + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) + fw_bin_file = FW_FNAME_T5; + else + fw_bin_file = FW_FNAME_T6; + + if (request_firmware(&fw, fw_bin_file, dev) < 0) { + csio_err(hw, "could not find firmware image %s, err: %d\n", + fw_bin_file, ret); + } else { + fw_data = fw->data; + fw_size = fw->size; + } + + /* upgrade FW logic */ + ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, + hw->fw_state, reset); + + /* Cleaning up */ + if (fw != NULL) + release_firmware(fw); + kfree(card_fw); + return ret; +} + +static int csio_hw_check_fwver(struct csio_hw *hw) +{ + if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) && + (hw->fwrev < CSIO_MIN_T6_FW)) { + csio_hw_print_fw_version(hw, "T6 unsupported fw"); + return -1; + } + + return 0; +} + +/* + * csio_hw_configure - Configure HW + * @hw - HW module + * + */ +static void +csio_hw_configure(struct csio_hw *hw) +{ + int reset = 1; + int rv; + u32 param[1]; + + rv = csio_hw_dev_ready(hw); + if (rv != 0) { + CSIO_INC_STATS(hw, n_err_fatal); + csio_post_event(&hw->sm, CSIO_HWE_FATAL); + goto out; + } + + /* HW version */ + hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A); + + /* Needed for FW download */ + rv = csio_hw_get_flash_params(hw); + if (rv != 0) { + csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); + csio_post_event(&hw->sm, CSIO_HWE_FATAL); + goto out; + } + + /* Set PCIe completion timeout to 4 seconds */ + if (pci_is_pcie(hw->pdev)) + pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); + + hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); + + rv = csio_hw_get_fw_version(hw, &hw->fwrev); + if (rv != 0) + goto out; + + csio_hw_print_fw_version(hw, "Firmware revision"); + + rv = csio_do_hello(hw, &hw->fw_state); + if (rv != 0) { + CSIO_INC_STATS(hw, n_err_fatal); + csio_post_event(&hw->sm, CSIO_HWE_FATAL); + goto out; + } + + /* Read vpd */ + rv = csio_hw_get_vpd_params(hw, &hw->vpd); + if (rv != 0) + goto out; + + csio_hw_get_fw_version(hw, &hw->fwrev); + csio_hw_get_tp_version(hw, &hw->tp_vers); + if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { + + /* Do firmware update */ + spin_unlock_irq(&hw->lock); + rv = csio_hw_flash_fw(hw, &reset); + spin_lock_irq(&hw->lock); + + if (rv != 0) + goto out; + + rv = csio_hw_check_fwver(hw); + if (rv < 0) + goto out; + + /* If the firmware doesn't support Configuration Files, + * return an error. + */ + rv = csio_hw_check_fwconfig(hw, param); + if (rv != 0) { + csio_info(hw, "Firmware doesn't support " + "Firmware Configuration files\n"); + goto out; + } + + /* The firmware provides us with a memory buffer where we can + * load a Configuration File from the host if we want to + * override the Configuration File in flash. + */ + rv = csio_hw_use_fwconfig(hw, reset, param); + if (rv == -ENOENT) { + csio_info(hw, "Could not initialize " + "adapter, error%d\n", rv); + goto out; + } + if (rv != 0) { + csio_info(hw, "Could not initialize " + "adapter, error%d\n", rv); + goto out; + } + + } else { + rv = csio_hw_check_fwver(hw); + if (rv < 0) + goto out; + + if (hw->fw_state == CSIO_DEV_STATE_INIT) { + + hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; + + /* device parameters */ + rv = csio_get_device_params(hw); + if (rv != 0) + goto out; + + /* Get device capabilities */ + rv = csio_config_device_caps(hw); + if (rv != 0) + goto out; + + /* Configure SGE */ + csio_wr_sge_init(hw); + + /* Post event to notify completion of configuration */ + csio_post_event(&hw->sm, CSIO_HWE_INIT); + goto out; + } + } /* if not master */ + +out: + return; +} + +/* + * csio_hw_initialize - Initialize HW + * @hw - HW module + * + */ +static void +csio_hw_initialize(struct csio_hw *hw) +{ + struct csio_mb *mbp; + enum fw_retval retval; + int rv; + int i; + + if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) + goto out; + + csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); + goto free_and_out; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", + retval); + goto free_and_out; + } + + mempool_free(mbp, hw->mb_mempool); + } + + rv = csio_get_fcoe_resinfo(hw); + if (rv != 0) { + csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); + goto out; + } + + spin_unlock_irq(&hw->lock); + rv = csio_config_queues(hw); + spin_lock_irq(&hw->lock); + + if (rv != 0) { + csio_err(hw, "Config of queues failed!: %d\n", rv); + goto out; + } + + for (i = 0; i < hw->num_pports; i++) + hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; + + if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { + rv = csio_enable_ports(hw); + if (rv != 0) { + csio_err(hw, "Failed to enable ports: %d\n", rv); + goto out; + } + } + + csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); + return; + +free_and_out: + mempool_free(mbp, hw->mb_mempool); +out: + return; +} + +#define PF_INTR_MASK (PFSW_F | PFCIM_F) + +/* + * csio_hw_intr_enable - Enable HW interrupts + * @hw: Pointer to HW module. + * + * Enable interrupts in HW registers. + */ +static void +csio_hw_intr_enable(struct csio_hw *hw) +{ + uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); + u32 pf = 0; + uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); + + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + else + pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + + /* + * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up + * by FW, so do nothing for INTX. + */ + if (hw->intr_mode == CSIO_IM_MSIX) + csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), + AIVEC_V(AIVEC_M), vec); + else if (hw->intr_mode == CSIO_IM_MSI) + csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), + AIVEC_V(AIVEC_M), 0); + + csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A)); + + /* Turn on MB interrupts - this will internally flush PIO as well */ + csio_mb_intr_enable(hw); + + /* These are common registers - only a master can modify them */ + if (csio_is_hw_master(hw)) { + /* + * Disable the Serial FLASH interrupt, if enabled! + */ + pl &= (~SF_F); + csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); + + csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F | + EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F | + ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F | + ERR_DATA_CPL_ON_HIGH_QID1_F | + ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | + ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | + ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | + ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F, + SGE_INT_ENABLE3_A); + csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf); + } + + hw->flags |= CSIO_HWF_HW_INTR_ENABLED; + +} + +/* + * csio_hw_intr_disable - Disable HW interrupts + * @hw: Pointer to HW module. + * + * Turn off Mailbox and PCI_PF_CFG interrupts. + */ +void +csio_hw_intr_disable(struct csio_hw *hw) +{ + u32 pf = 0; + + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + else + pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + + if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) + return; + + hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; + + csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A)); + if (csio_is_hw_master(hw)) + csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0); + + /* Turn off MB interrupts */ + csio_mb_intr_disable(hw); + +} + +void +csio_hw_fatal_err(struct csio_hw *hw) +{ + csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0); + csio_hw_intr_disable(hw); + + /* Do not reset HW, we may need FW state for debugging */ + csio_fatal(hw, "HW Fatal error encountered!\n"); +} + +/*****************************************************************************/ +/* START: HW SM */ +/*****************************************************************************/ +/* + * csio_hws_uninit - Uninit state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_CFG: + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_configuring - Configuring state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_INIT: + csio_set_state(&hw->sm, csio_hws_initializing); + csio_hw_initialize(hw); + break; + + case CSIO_HWE_INIT_DONE: + csio_set_state(&hw->sm, csio_hws_ready); + /* Fan out event to all lnode SMs */ + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); + break; + + case CSIO_HWE_FATAL: + csio_set_state(&hw->sm, csio_hws_uninit); + break; + + case CSIO_HWE_PCI_REMOVE: + csio_do_bye(hw); + break; + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_initializing - Initializing state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_INIT_DONE: + csio_set_state(&hw->sm, csio_hws_ready); + + /* Fan out event to all lnode SMs */ + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); + + /* Enable interrupts */ + csio_hw_intr_enable(hw); + break; + + case CSIO_HWE_FATAL: + csio_set_state(&hw->sm, csio_hws_uninit); + break; + + case CSIO_HWE_PCI_REMOVE: + csio_do_bye(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_ready - Ready state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) +{ + /* Remember the event */ + hw->evtflag = evt; + + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_HBA_RESET: + case CSIO_HWE_FW_DLOAD: + case CSIO_HWE_SUSPEND: + case CSIO_HWE_PCI_REMOVE: + case CSIO_HWE_PCIERR_DETECTED: + csio_set_state(&hw->sm, csio_hws_quiescing); + /* cleanup all outstanding cmds */ + if (evt == CSIO_HWE_HBA_RESET || + evt == CSIO_HWE_PCIERR_DETECTED) + csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); + else + csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); + + csio_hw_intr_disable(hw); + csio_hw_mbm_cleanup(hw); + csio_evtq_stop(hw); + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); + csio_evtq_flush(hw); + csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); + csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); + break; + + case CSIO_HWE_FATAL: + csio_set_state(&hw->sm, csio_hws_uninit); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_quiescing - Quiescing state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_QUIESCED: + switch (hw->evtflag) { + case CSIO_HWE_FW_DLOAD: + csio_set_state(&hw->sm, csio_hws_resetting); + /* Download firmware */ + fallthrough; + + case CSIO_HWE_HBA_RESET: + csio_set_state(&hw->sm, csio_hws_resetting); + /* Start reset of the HBA */ + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); + csio_wr_destroy_queues(hw, false); + csio_do_reset(hw, false); + csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); + break; + + case CSIO_HWE_PCI_REMOVE: + csio_set_state(&hw->sm, csio_hws_removing); + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); + csio_wr_destroy_queues(hw, true); + /* Now send the bye command */ + csio_do_bye(hw); + break; + + case CSIO_HWE_SUSPEND: + csio_set_state(&hw->sm, csio_hws_quiesced); + break; + + case CSIO_HWE_PCIERR_DETECTED: + csio_set_state(&hw->sm, csio_hws_pcierr); + csio_wr_destroy_queues(hw, false); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + + } + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_quiesced - Quiesced state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_RESUME: + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_resetting - HW Resetting state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_HBA_RESET_DONE: + csio_evtq_start(hw); + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_removing - PCI Hotplug removing state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_HBA_RESET: + if (!csio_is_hw_master(hw)) + break; + /* + * The BYE should have already been issued, so we can't + * use the mailbox interface. Hence we use the PL_RST + * register directly. + */ + csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); + csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); + mdelay(2000); + break; + + /* Should never receive any new events */ + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + + } +} + +/* + * csio_hws_pcierr - PCI Error state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_PCIERR_SLOT_RESET: + csio_evtq_start(hw); + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/*****************************************************************************/ +/* END: HW SM */ +/*****************************************************************************/ + +/* + * csio_handle_intr_status - table driven interrupt handler + * @hw: HW instance + * @reg: the interrupt status register to process + * @acts: table of interrupt actions + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occurred. The actions include + * optionally emitting a warning or alert message. The table is terminated + * by an entry specifying mask 0. Returns the number of fatal interrupt + * conditions. + */ +int +csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, + const struct intr_info *acts) +{ + int fatal = 0; + unsigned int mask = 0; + unsigned int status = csio_rd_reg32(hw, reg); + + for ( ; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + csio_fatal(hw, "Fatal %s (0x%x)\n", + acts->msg, status & acts->mask); + } else if (acts->msg) + csio_info(hw, "%s (0x%x)\n", + acts->msg, status & acts->mask); + mask |= acts->mask; + } + status &= mask; + if (status) /* clear processed interrupts */ + csio_wr_reg32(hw, status, reg); + return fatal; +} + +/* + * TP interrupt handler. + */ +static void csio_tp_intr_handler(struct csio_hw *hw) +{ + static struct intr_info tp_intr_info[] = { + { 0x3fffffff, "TP parity error", -1, 1 }, + { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * SGE interrupt handler. + */ +static void csio_sge_intr_handler(struct csio_hw *hw) +{ + uint64_t v; + + static struct intr_info sge_intr_info[] = { + { ERR_CPL_EXCEED_IQE_SIZE_F, + "SGE received CPL exceeding IQE size", -1, 1 }, + { ERR_INVALID_CIDX_INC_F, + "SGE GTS CIDX increment too large", -1, 0 }, + { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, + { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 }, + { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, + "SGE IQID > 1023 received CPL for FL", -1, 0 }, + { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, + 0 }, + { ERR_ING_CTXT_PRIO_F, + "SGE too many priority ingress contexts", -1, 0 }, + { ERR_EGR_CTXT_PRIO_F, + "SGE too many priority egress contexts", -1, 0 }, + { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, + { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, + { 0, NULL, 0, 0 } + }; + + v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) | + ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32); + if (v) { + csio_fatal(hw, "SGE parity error (%#llx)\n", + (unsigned long long)v); + csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), + SGE_INT_CAUSE1_A); + csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A); + } + + v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info); + + if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) || + v != 0) + csio_hw_fatal_err(hw); +} + +#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ + OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) +#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ + IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) + +/* + * CIM interrupt handler. + */ +static void csio_cim_intr_handler(struct csio_hw *hw) +{ + static struct intr_info cim_intr_info[] = { + { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, + { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, + { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, + { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, + { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, + { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, + { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info cim_upintr_info[] = { + { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, + { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, + { ILLWRINT_F, "CIM illegal write", -1, 1 }, + { ILLRDINT_F, "CIM illegal read", -1, 1 }, + { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, + { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, + { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, + { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, + { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, + { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, + { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, + { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, + { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, + { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, + { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, + { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, + { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, + { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, + { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, + { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, + { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, + { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, + { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, + { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, + { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, + { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, + { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, + { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + int fat; + + fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A, + cim_intr_info) + + csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A, + cim_upintr_info); + if (fat) + csio_hw_fatal_err(hw); +} + +/* + * ULP RX interrupt handler. + */ +static void csio_ulprx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info ulprx_intr_info[] = { + { 0x1800000, "ULPRX context error", -1, 1 }, + { 0x7fffff, "ULPRX parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * ULP TX interrupt handler. + */ +static void csio_ulptx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info ulptx_intr_info[] = { + { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, + 0 }, + { 0xfffffff, "ULPTX parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * PM TX interrupt handler. + */ +static void csio_pmtx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pmtx_intr_info[] = { + { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, + { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, + { 0xffffff0, "PMTX framing error", -1, 1 }, + { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1, + 1 }, + { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, + { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * PM RX interrupt handler. + */ +static void csio_pmrx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pmrx_intr_info[] = { + { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, + { 0x3ffff0, "PMRX framing error", -1, 1 }, + { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1, + 1 }, + { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, + { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * CPL switch interrupt handler. + */ +static void csio_cplsw_intr_handler(struct csio_hw *hw) +{ + static struct intr_info cplsw_intr_info[] = { + { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, + { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, + { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, + { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, + { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, + { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * LE interrupt handler. + */ +static void csio_le_intr_handler(struct csio_hw *hw) +{ + enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); + + static struct intr_info le_intr_info[] = { + { LIPMISS_F, "LE LIP miss", -1, 0 }, + { LIP0_F, "LE 0 LIP error", -1, 0 }, + { PARITYERR_F, "LE parity error", -1, 1 }, + { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { REQQPARERR_F, "LE request queue parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + static struct intr_info t6_le_intr_info[] = { + { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, + { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { TCAMINTPERR_F, "LE parity error", -1, 1 }, + { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, + (chip == CHELSIO_T5) ? + le_intr_info : t6_le_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * MPS interrupt handler. + */ +static void csio_mps_intr_handler(struct csio_hw *hw) +{ + static struct intr_info mps_rx_intr_info[] = { + { 0xffffff, "MPS Rx parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_tx_intr_info[] = { + { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", + -1, 1 }, + { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", + -1, 1 }, + { BUBBLE_F, "MPS Tx underflow", -1, 1 }, + { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR_F, "MPS Tx framing error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_trc_intr_info[] = { + { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, + { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", + -1, 1 }, + { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_stat_sram_intr_info[] = { + { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_stat_tx_intr_info[] = { + { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_stat_rx_intr_info[] = { + { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_cls_intr_info[] = { + { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, + { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, + { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + int fat; + + fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A, + mps_rx_intr_info) + + csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A, + mps_tx_intr_info) + + csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A, + mps_trc_intr_info) + + csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A, + mps_stat_sram_intr_info) + + csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, + mps_stat_tx_intr_info) + + csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, + mps_stat_rx_intr_info) + + csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A, + mps_cls_intr_info); + + csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A); + csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */ + if (fat) + csio_hw_fatal_err(hw); +} + +#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ + ECC_UE_INT_CAUSE_F) + +/* + * EDC/MC interrupt handler. + */ +static void csio_mem_intr_handler(struct csio_hw *hw, int idx) +{ + static const char name[3][5] = { "EDC0", "EDC1", "MC" }; + + unsigned int addr, cnt_addr, v; + + if (idx <= MEM_EDC1) { + addr = EDC_REG(EDC_INT_CAUSE_A, idx); + cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); + } else { + addr = MC_INT_CAUSE_A; + cnt_addr = MC_ECC_STATUS_A; + } + + v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; + if (v & PERR_INT_CAUSE_F) + csio_fatal(hw, "%s FIFO parity error\n", name[idx]); + if (v & ECC_CE_INT_CAUSE_F) { + uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr)); + + csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr); + csio_warn(hw, "%u %s correctable ECC data error%s\n", + cnt, name[idx], cnt > 1 ? "s" : ""); + } + if (v & ECC_UE_INT_CAUSE_F) + csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); + + csio_wr_reg32(hw, v, addr); + if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) + csio_hw_fatal_err(hw); +} + +/* + * MA interrupt handler. + */ +static void csio_ma_intr_handler(struct csio_hw *hw) +{ + uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A); + + if (status & MEM_PERR_INT_CAUSE_F) + csio_fatal(hw, "MA parity error, parity status %#x\n", + csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A)); + if (status & MEM_WRAP_INT_CAUSE_F) { + v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A); + csio_fatal(hw, + "MA address wrap-around error by client %u to address %#x\n", + MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4); + } + csio_wr_reg32(hw, status, MA_INT_CAUSE_A); + csio_hw_fatal_err(hw); +} + +/* + * SMB interrupt handler. + */ +static void csio_smb_intr_handler(struct csio_hw *hw) +{ + static struct intr_info smb_intr_info[] = { + { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, + { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, + { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * NC-SI interrupt handler. + */ +static void csio_ncsi_intr_handler(struct csio_hw *hw) +{ + static struct intr_info ncsi_intr_info[] = { + { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, + { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, + { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, + { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * XGMAC interrupt handler. + */ +static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) +{ + uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); + + v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; + if (!v) + return; + + if (v & TXFIFO_PRTY_ERR_F) + csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); + if (v & RXFIFO_PRTY_ERR_F) + csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); + csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); + csio_hw_fatal_err(hw); +} + +/* + * PL interrupt handler. + */ +static void csio_pl_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pl_intr_info[] = { + { FATALPERR_F, "T4 fatal parity error", -1, 1 }, + { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * csio_hw_slow_intr_handler - control path interrupt handler + * @hw: HW module + * + * Interrupt handler for non-data global interrupt events, e.g., errors. + * The designation 'slow' is because it involves register reads, while + * data interrupts typically don't involve any MMIOs. + */ +int +csio_hw_slow_intr_handler(struct csio_hw *hw) +{ + uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A); + + if (!(cause & CSIO_GLBL_INTR_MASK)) { + CSIO_INC_STATS(hw, n_plint_unexp); + return 0; + } + + csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); + + CSIO_INC_STATS(hw, n_plint_cnt); + + if (cause & CIM_F) + csio_cim_intr_handler(hw); + + if (cause & MPS_F) + csio_mps_intr_handler(hw); + + if (cause & NCSI_F) + csio_ncsi_intr_handler(hw); + + if (cause & PL_F) + csio_pl_intr_handler(hw); + + if (cause & SMB_F) + csio_smb_intr_handler(hw); + + if (cause & XGMAC0_F) + csio_xgmac_intr_handler(hw, 0); + + if (cause & XGMAC1_F) + csio_xgmac_intr_handler(hw, 1); + + if (cause & XGMAC_KR0_F) + csio_xgmac_intr_handler(hw, 2); + + if (cause & XGMAC_KR1_F) + csio_xgmac_intr_handler(hw, 3); + + if (cause & PCIE_F) + hw->chip_ops->chip_pcie_intr_handler(hw); + + if (cause & MC_F) + csio_mem_intr_handler(hw, MEM_MC); + + if (cause & EDC0_F) + csio_mem_intr_handler(hw, MEM_EDC0); + + if (cause & EDC1_F) + csio_mem_intr_handler(hw, MEM_EDC1); + + if (cause & LE_F) + csio_le_intr_handler(hw); + + if (cause & TP_F) + csio_tp_intr_handler(hw); + + if (cause & MA_F) + csio_ma_intr_handler(hw); + + if (cause & PM_TX_F) + csio_pmtx_intr_handler(hw); + + if (cause & PM_RX_F) + csio_pmrx_intr_handler(hw); + + if (cause & ULP_RX_F) + csio_ulprx_intr_handler(hw); + + if (cause & CPL_SWITCH_F) + csio_cplsw_intr_handler(hw); + + if (cause & SGE_F) + csio_sge_intr_handler(hw); + + if (cause & ULP_TX_F) + csio_ulptx_intr_handler(hw); + + /* Clear the interrupts just processed for which we are the master. */ + csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A); + csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */ + + return 1; +} + +/***************************************************************************** + * HW <--> mailbox interfacing routines. + ****************************************************************************/ +/* + * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions + * + * @data: Private data pointer. + * + * Called from worker thread context. + */ +static void +csio_mberr_worker(void *data) +{ + struct csio_hw *hw = (struct csio_hw *)data; + struct csio_mbm *mbm = &hw->mbm; + LIST_HEAD(cbfn_q); + struct csio_mb *mbp_next; + int rv; + + del_timer_sync(&mbm->timer); + + spin_lock_irq(&hw->lock); + if (list_empty(&mbm->cbfn_q)) { + spin_unlock_irq(&hw->lock); + return; + } + + list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); + mbm->stats.n_cbfnq = 0; + + /* Try to start waiting mailboxes */ + if (!list_empty(&mbm->req_q)) { + mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); + list_del_init(&mbp_next->list); + + rv = csio_mb_issue(hw, mbp_next); + if (rv != 0) + list_add_tail(&mbp_next->list, &mbm->req_q); + else + CSIO_DEC_STATS(mbm, n_activeq); + } + spin_unlock_irq(&hw->lock); + + /* Now callback completions */ + csio_mb_completions(hw, &cbfn_q); +} + +/* + * csio_hw_mb_timer - Top-level Mailbox timeout handler. + * + * @data: private data pointer + * + **/ +static void +csio_hw_mb_timer(struct timer_list *t) +{ + struct csio_mbm *mbm = from_timer(mbm, t, timer); + struct csio_hw *hw = mbm->hw; + struct csio_mb *mbp = NULL; + + spin_lock_irq(&hw->lock); + mbp = csio_mb_tmo_handler(hw); + spin_unlock_irq(&hw->lock); + + /* Call back the function for the timed-out Mailbox */ + if (mbp) + mbp->mb_cbfn(hw, mbp); + +} + +/* + * csio_hw_mbm_cleanup - Cleanup Mailbox module. + * @hw: HW module + * + * Called with lock held, should exit with lock held. + * Cancels outstanding mailboxes (waiting, in-flight) and gathers them + * into a local queue. Drops lock and calls the completions. Holds + * lock and returns. + */ +static void +csio_hw_mbm_cleanup(struct csio_hw *hw) +{ + LIST_HEAD(cbfn_q); + + csio_mb_cancel_all(hw, &cbfn_q); + + spin_unlock_irq(&hw->lock); + csio_mb_completions(hw, &cbfn_q); + spin_lock_irq(&hw->lock); +} + +/***************************************************************************** + * Event handling + ****************************************************************************/ +int +csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, + uint16_t len) +{ + struct csio_evt_msg *evt_entry = NULL; + + if (type >= CSIO_EVT_MAX) + return -EINVAL; + + if (len > CSIO_EVT_MSG_SIZE) + return -EINVAL; + + if (hw->flags & CSIO_HWF_FWEVT_STOP) + return -EINVAL; + + if (list_empty(&hw->evt_free_q)) { + csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", + type, len); + return -ENOMEM; + } + + evt_entry = list_first_entry(&hw->evt_free_q, + struct csio_evt_msg, list); + list_del_init(&evt_entry->list); + + /* copy event msg and queue the event */ + evt_entry->type = type; + memcpy((void *)evt_entry->data, evt_msg, len); + list_add_tail(&evt_entry->list, &hw->evt_active_q); + + CSIO_DEC_STATS(hw, n_evt_freeq); + CSIO_INC_STATS(hw, n_evt_activeq); + + return 0; +} + +static int +csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, + uint16_t len, bool msg_sg) +{ + struct csio_evt_msg *evt_entry = NULL; + struct csio_fl_dma_buf *fl_sg; + uint32_t off = 0; + unsigned long flags; + int n, ret = 0; + + if (type >= CSIO_EVT_MAX) + return -EINVAL; + + if (len > CSIO_EVT_MSG_SIZE) + return -EINVAL; + + spin_lock_irqsave(&hw->lock, flags); + if (hw->flags & CSIO_HWF_FWEVT_STOP) { + ret = -EINVAL; + goto out; + } + + if (list_empty(&hw->evt_free_q)) { + csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", + type, len); + ret = -ENOMEM; + goto out; + } + + evt_entry = list_first_entry(&hw->evt_free_q, + struct csio_evt_msg, list); + list_del_init(&evt_entry->list); + + /* copy event msg and queue the event */ + evt_entry->type = type; + + /* If Payload in SG list*/ + if (msg_sg) { + fl_sg = (struct csio_fl_dma_buf *) evt_msg; + for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { + memcpy((void *)((uintptr_t)evt_entry->data + off), + fl_sg->flbufs[n].vaddr, + fl_sg->flbufs[n].len); + off += fl_sg->flbufs[n].len; + } + } else + memcpy((void *)evt_entry->data, evt_msg, len); + + list_add_tail(&evt_entry->list, &hw->evt_active_q); + CSIO_DEC_STATS(hw, n_evt_freeq); + CSIO_INC_STATS(hw, n_evt_activeq); +out: + spin_unlock_irqrestore(&hw->lock, flags); + return ret; +} + +static void +csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) +{ + if (evt_entry) { + spin_lock_irq(&hw->lock); + list_del_init(&evt_entry->list); + list_add_tail(&evt_entry->list, &hw->evt_free_q); + CSIO_DEC_STATS(hw, n_evt_activeq); + CSIO_INC_STATS(hw, n_evt_freeq); + spin_unlock_irq(&hw->lock); + } +} + +void +csio_evtq_flush(struct csio_hw *hw) +{ + uint32_t count; + count = 30; + while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); +} + +static void +csio_evtq_stop(struct csio_hw *hw) +{ + hw->flags |= CSIO_HWF_FWEVT_STOP; +} + +static void +csio_evtq_start(struct csio_hw *hw) +{ + hw->flags &= ~CSIO_HWF_FWEVT_STOP; +} + +static void +csio_evtq_cleanup(struct csio_hw *hw) +{ + struct list_head *evt_entry, *next_entry; + + /* Release outstanding events from activeq to freeq*/ + if (!list_empty(&hw->evt_active_q)) + list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); + + hw->stats.n_evt_activeq = 0; + hw->flags &= ~CSIO_HWF_FWEVT_PENDING; + + /* Freeup event entry */ + list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { + kfree(evt_entry); + CSIO_DEC_STATS(hw, n_evt_freeq); + } + + hw->stats.n_evt_freeq = 0; +} + + +static void +csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv) +{ + __u8 op; + void *msg = NULL; + uint32_t msg_len = 0; + bool msg_sg = 0; + + op = ((struct rss_header *) wr)->opcode; + if (op == CPL_FW6_PLD) { + CSIO_INC_STATS(hw, n_cpl_fw6_pld); + if (!flb || !flb->totlen) { + CSIO_INC_STATS(hw, n_cpl_unexp); + return; + } + + msg = (void *) flb; + msg_len = flb->totlen; + msg_sg = 1; + } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { + + CSIO_INC_STATS(hw, n_cpl_fw6_msg); + /* skip RSS header */ + msg = (void *)((uintptr_t)wr + sizeof(__be64)); + msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : + sizeof(struct cpl_fw4_msg); + } else { + csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); + CSIO_INC_STATS(hw, n_cpl_unexp); + return; + } + + /* + * Enqueue event to EventQ. Events processing happens + * in Event worker thread context + */ + if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, + (uint16_t)msg_len, msg_sg)) + CSIO_INC_STATS(hw, n_evt_drop); +} + +void +csio_evtq_worker(struct work_struct *work) +{ + struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); + struct list_head *evt_entry, *next_entry; + LIST_HEAD(evt_q); + struct csio_evt_msg *evt_msg; + struct cpl_fw6_msg *msg; + struct csio_rnode *rn; + int rv = 0; + uint8_t evtq_stop = 0; + + csio_dbg(hw, "event worker thread active evts#%d\n", + hw->stats.n_evt_activeq); + + spin_lock_irq(&hw->lock); + while (!list_empty(&hw->evt_active_q)) { + list_splice_tail_init(&hw->evt_active_q, &evt_q); + spin_unlock_irq(&hw->lock); + + list_for_each_safe(evt_entry, next_entry, &evt_q) { + evt_msg = (struct csio_evt_msg *) evt_entry; + + /* Drop events if queue is STOPPED */ + spin_lock_irq(&hw->lock); + if (hw->flags & CSIO_HWF_FWEVT_STOP) + evtq_stop = 1; + spin_unlock_irq(&hw->lock); + if (evtq_stop) { + CSIO_INC_STATS(hw, n_evt_drop); + goto free_evt; + } + + switch (evt_msg->type) { + case CSIO_EVT_FW: + msg = (struct cpl_fw6_msg *)(evt_msg->data); + + if ((msg->opcode == CPL_FW6_MSG || + msg->opcode == CPL_FW4_MSG) && + !msg->type) { + rv = csio_mb_fwevt_handler(hw, + msg->data); + if (!rv) + break; + /* Handle any remaining fw events */ + csio_fcoe_fwevt_handler(hw, + msg->opcode, msg->data); + } else if (msg->opcode == CPL_FW6_PLD) { + + csio_fcoe_fwevt_handler(hw, + msg->opcode, msg->data); + } else { + csio_warn(hw, + "Unhandled FW msg op %x type %x\n", + msg->opcode, msg->type); + CSIO_INC_STATS(hw, n_evt_drop); + } + break; + + case CSIO_EVT_MBX: + csio_mberr_worker(hw); + break; + + case CSIO_EVT_DEV_LOSS: + memcpy(&rn, evt_msg->data, sizeof(rn)); + csio_rnode_devloss_handler(rn); + break; + + default: + csio_warn(hw, "Unhandled event %x on evtq\n", + evt_msg->type); + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +free_evt: + csio_free_evt(hw, evt_msg); + } + + spin_lock_irq(&hw->lock); + } + hw->flags &= ~CSIO_HWF_FWEVT_PENDING; + spin_unlock_irq(&hw->lock); +} + +int +csio_fwevtq_handler(struct csio_hw *hw) +{ + int rv; + + if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { + CSIO_INC_STATS(hw, n_int_stray); + return -EINVAL; + } + + rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, + csio_process_fwevtq_entry, NULL); + return rv; +} + +/**************************************************************************** + * Entry points + ****************************************************************************/ + +/* Management module */ +/* + * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. + * mgmt - mgmt module + * @io_req - io request + * + * Return - 0:if given IO Req exists in active Q. + * -EINVAL :if lookup fails. + */ +int +csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) +{ + struct list_head *tmp; + + /* Lookup ioreq in the ACTIVEQ */ + list_for_each(tmp, &mgmtm->active_q) { + if (io_req == (struct csio_ioreq *)tmp) + return 0; + } + return -EINVAL; +} + +#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ + +/* + * csio_mgmts_tmo_handler - MGMT IO Timeout handler. + * @data - Event data. + * + * Return - none. + */ +static void +csio_mgmt_tmo_handler(struct timer_list *t) +{ + struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer); + struct list_head *tmp; + struct csio_ioreq *io_req; + + csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); + + spin_lock_irq(&mgmtm->hw->lock); + + list_for_each(tmp, &mgmtm->active_q) { + io_req = (struct csio_ioreq *) tmp; + io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); + + if (!io_req->tmo) { + /* Dequeue the request from retry Q. */ + tmp = csio_list_prev(tmp); + list_del_init(&io_req->sm.sm_list); + if (io_req->io_cbfn) { + /* io_req will be freed by completion handler */ + io_req->wr_status = -ETIMEDOUT; + io_req->io_cbfn(mgmtm->hw, io_req); + } else { + CSIO_DB_ASSERT(0); + } + } + } + + /* If retry queue is not empty, re-arm timer */ + if (!list_empty(&mgmtm->active_q)) + mod_timer(&mgmtm->mgmt_timer, + jiffies + msecs_to_jiffies(ECM_MIN_TMO)); + spin_unlock_irq(&mgmtm->hw->lock); +} + +static void +csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) +{ + struct csio_hw *hw = mgmtm->hw; + struct csio_ioreq *io_req; + struct list_head *tmp; + uint32_t count; + + count = 30; + /* Wait for all outstanding req to complete gracefully */ + while ((!list_empty(&mgmtm->active_q)) && count--) { + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + /* release outstanding req from ACTIVEQ */ + list_for_each(tmp, &mgmtm->active_q) { + io_req = (struct csio_ioreq *) tmp; + tmp = csio_list_prev(tmp); + list_del_init(&io_req->sm.sm_list); + mgmtm->stats.n_active--; + if (io_req->io_cbfn) { + /* io_req will be freed by completion handler */ + io_req->wr_status = -ETIMEDOUT; + io_req->io_cbfn(mgmtm->hw, io_req); + } + } +} + +/* + * csio_mgmt_init - Mgmt module init entry point + * @mgmtsm - mgmt module + * @hw - HW module + * + * Initialize mgmt timer, resource wait queue, active queue, + * completion q. Allocate Egress and Ingress + * WR queues and save off the queue index returned by the WR + * module for future use. Allocate and save off mgmt reqs in the + * mgmt_req_freelist for future use. Make sure their SM is initialized + * to uninit state. + * Returns: 0 - on success + * -ENOMEM - on error. + */ +static int +csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) +{ + timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0); + + INIT_LIST_HEAD(&mgmtm->active_q); + INIT_LIST_HEAD(&mgmtm->cbfn_q); + + mgmtm->hw = hw; + /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ + + return 0; +} + +/* + * csio_mgmtm_exit - MGMT module exit entry point + * @mgmtsm - mgmt module + * + * This function called during MGMT module uninit. + * Stop timers, free ioreqs allocated. + * Returns: None + * + */ +static void +csio_mgmtm_exit(struct csio_mgmtm *mgmtm) +{ + del_timer_sync(&mgmtm->mgmt_timer); +} + + +/** + * csio_hw_start - Kicks off the HW State machine + * @hw: Pointer to HW module. + * + * It is assumed that the initialization is a synchronous operation. + * So when we return after posting the event, the HW SM should be in + * the ready state, if there were no errors during init. + */ +int +csio_hw_start(struct csio_hw *hw) +{ + spin_lock_irq(&hw->lock); + csio_post_event(&hw->sm, CSIO_HWE_CFG); + spin_unlock_irq(&hw->lock); + + if (csio_is_hw_ready(hw)) + return 0; + else if (csio_match_state(hw, csio_hws_uninit)) + return -EINVAL; + else + return -ENODEV; +} + +int +csio_hw_stop(struct csio_hw *hw) +{ + csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); + + if (csio_is_hw_removing(hw)) + return 0; + else + return -EINVAL; +} + +/* Max reset retries */ +#define CSIO_MAX_RESET_RETRIES 3 + +/** + * csio_hw_reset - Reset the hardware + * @hw: HW module. + * + * Caller should hold lock across this function. + */ +int +csio_hw_reset(struct csio_hw *hw) +{ + if (!csio_is_hw_master(hw)) + return -EPERM; + + if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { + csio_dbg(hw, "Max hw reset attempts reached.."); + return -EINVAL; + } + + hw->rst_retries++; + csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); + + if (csio_is_hw_ready(hw)) { + hw->rst_retries = 0; + hw->stats.n_reset_start = jiffies_to_msecs(jiffies); + return 0; + } else + return -EINVAL; +} + +/* + * csio_hw_get_device_id - Caches the Adapter's vendor & device id. + * @hw: HW module. + */ +static void +csio_hw_get_device_id(struct csio_hw *hw) +{ + /* Is the adapter device id cached already ?*/ + if (csio_is_dev_id_cached(hw)) + return; + + /* Get the PCI vendor & device id */ + pci_read_config_word(hw->pdev, PCI_VENDOR_ID, + &hw->params.pci.vendor_id); + pci_read_config_word(hw->pdev, PCI_DEVICE_ID, + &hw->params.pci.device_id); + + csio_dev_id_cached(hw); + hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); + +} /* csio_hw_get_device_id */ + +/* + * csio_hw_set_description - Set the model, description of the hw. + * @hw: HW module. + * @ven_id: PCI Vendor ID + * @dev_id: PCI Device ID + */ +static void +csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) +{ + uint32_t adap_type, prot_type; + + if (ven_id == CSIO_VENDOR_ID) { + prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); + adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); + + if (prot_type == CSIO_T5_FCOE_ASIC) { + memcpy(hw->hw_ver, + csio_t5_fcoe_adapters[adap_type].model_no, 16); + memcpy(hw->model_desc, + csio_t5_fcoe_adapters[adap_type].description, + 32); + } else { + char tempName[32] = "Chelsio FCoE Controller"; + memcpy(hw->model_desc, tempName, 32); + } + } +} /* csio_hw_set_description */ + +/** + * csio_hw_init - Initialize HW module. + * @hw: Pointer to HW module. + * + * Initialize the members of the HW module. + */ +int +csio_hw_init(struct csio_hw *hw) +{ + int rv = -EINVAL; + uint32_t i; + uint16_t ven_id, dev_id; + struct csio_evt_msg *evt_entry; + + INIT_LIST_HEAD(&hw->sm.sm_list); + csio_init_state(&hw->sm, csio_hws_uninit); + spin_lock_init(&hw->lock); + INIT_LIST_HEAD(&hw->sln_head); + + /* Get the PCI vendor & device id */ + csio_hw_get_device_id(hw); + + strcpy(hw->name, CSIO_HW_NAME); + + /* Initialize the HW chip ops T5 specific ops */ + hw->chip_ops = &t5_ops; + + /* Set the model & its description */ + + ven_id = hw->params.pci.vendor_id; + dev_id = hw->params.pci.device_id; + + csio_hw_set_description(hw, ven_id, dev_id); + + /* Initialize default log level */ + hw->params.log_level = (uint32_t) csio_dbg_level; + + csio_set_fwevt_intr_idx(hw, -1); + csio_set_nondata_intr_idx(hw, -1); + + /* Init all the modules: Mailbox, WorkRequest and Transport */ + if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) + goto err; + + rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); + if (rv) + goto err_mbm_exit; + + rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); + if (rv) + goto err_wrm_exit; + + rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); + if (rv) + goto err_scsim_exit; + /* Pre-allocate evtq and initialize them */ + INIT_LIST_HEAD(&hw->evt_active_q); + INIT_LIST_HEAD(&hw->evt_free_q); + for (i = 0; i < csio_evtq_sz; i++) { + + evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); + if (!evt_entry) { + rv = -ENOMEM; + csio_err(hw, "Failed to initialize eventq"); + goto err_evtq_cleanup; + } + + list_add_tail(&evt_entry->list, &hw->evt_free_q); + CSIO_INC_STATS(hw, n_evt_freeq); + } + + hw->dev_num = dev_num; + dev_num++; + + return 0; + +err_evtq_cleanup: + csio_evtq_cleanup(hw); + csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); +err_scsim_exit: + csio_scsim_exit(csio_hw_to_scsim(hw)); +err_wrm_exit: + csio_wrm_exit(csio_hw_to_wrm(hw), hw); +err_mbm_exit: + csio_mbm_exit(csio_hw_to_mbm(hw)); +err: + return rv; +} + +/** + * csio_hw_exit - Un-initialize HW module. + * @hw: Pointer to HW module. + * + */ +void +csio_hw_exit(struct csio_hw *hw) +{ + csio_evtq_cleanup(hw); + csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); + csio_scsim_exit(csio_hw_to_scsim(hw)); + csio_wrm_exit(csio_hw_to_wrm(hw), hw); + csio_mbm_exit(csio_hw_to_mbm(hw)); +} diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h new file mode 100644 index 000000000..e351af6e7 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw.h @@ -0,0 +1,666 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_HW_H__ +#define __CSIO_HW_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4_hw.h" +#include "csio_hw_chip.h" +#include "csio_wr.h" +#include "csio_mb.h" +#include "csio_scsi.h" +#include "csio_defs.h" +#include "t4_regs.h" +#include "t4_msg.h" + +/* + * An error value used by host. Should not clash with FW defined return values. + */ +#define FW_HOSTERROR 255 + +#define CSIO_HW_NAME "Chelsio FCoE Adapter" +#define CSIO_MAX_PFN 8 +#define CSIO_MAX_PPORTS 4 + +#define CSIO_MAX_LUN 0xFFFF +#define CSIO_MAX_QUEUE 2048 +#define CSIO_MAX_CMD_PER_LUN 32 +#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024) +#define CSIO_MAX_SECTOR_SIZE 128 +#define CSIO_MIN_T6_FW 0x01102D00 /* FW 1.16.45.0 */ + +/* Interrupts */ +#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode + * (Forward intr iq + fw iq) */ +#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */ +#define CSIO_MAX_SCSI_CPU 128 +#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS) +#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS) + +/* Queues */ +enum { + CSIO_INTR_WRSIZE = 128, + CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE), + CSIO_FWEVT_WRSIZE = 128, + CSIO_FWEVT_IQLEN = 128, + CSIO_FWEVT_FLBUFS = 64, + CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN), + CSIO_HW_NIQ = 1, + CSIO_HW_NFLQ = 1, + CSIO_HW_NEQ = 1, + CSIO_HW_NINTXQ = 1, +}; + +struct csio_msix_entries { + void *dev_id; /* Priv object associated w/ this msix*/ + char desc[24]; /* Description of this vector */ +}; + +struct csio_scsi_qset { + int iq_idx; /* Ingress index */ + int eq_idx; /* Egress index */ + uint32_t intr_idx; /* MSIX Vector index */ +}; + +struct csio_scsi_cpu_info { + int16_t max_cpus; +}; + +extern int csio_dbg_level; +extern unsigned int csio_port_mask; +extern int csio_msi; + +#define CSIO_VENDOR_ID 0x1425 +#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 +#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF + +#define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \ + EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \ + PM_TX_F | PM_RX_F | ULP_RX_F | \ + CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F) + +/* + * Hard parameters used to initialize the card in the absence of a + * configuration file. + */ +enum { + /* General */ + CSIO_SGE_DBFIFO_INT_THRESH = 10, + + CSIO_SGE_RX_DMA_OFFSET = 2, + + CSIO_SGE_FLBUF_SIZE1 = 65536, + CSIO_SGE_FLBUF_SIZE2 = 1536, + CSIO_SGE_FLBUF_SIZE3 = 9024, + CSIO_SGE_FLBUF_SIZE4 = 9216, + CSIO_SGE_FLBUF_SIZE5 = 2048, + CSIO_SGE_FLBUF_SIZE6 = 128, + CSIO_SGE_FLBUF_SIZE7 = 8192, + CSIO_SGE_FLBUF_SIZE8 = 16384, + + CSIO_SGE_TIMER_VAL_0 = 5, + CSIO_SGE_TIMER_VAL_1 = 10, + CSIO_SGE_TIMER_VAL_2 = 20, + CSIO_SGE_TIMER_VAL_3 = 50, + CSIO_SGE_TIMER_VAL_4 = 100, + CSIO_SGE_TIMER_VAL_5 = 200, + + CSIO_SGE_INT_CNT_VAL_0 = 1, + CSIO_SGE_INT_CNT_VAL_1 = 4, + CSIO_SGE_INT_CNT_VAL_2 = 8, + CSIO_SGE_INT_CNT_VAL_3 = 16, +}; + +/* Slowpath events */ +enum csio_evt { + CSIO_EVT_FW = 0, /* FW event */ + CSIO_EVT_MBX, /* MBX event */ + CSIO_EVT_SCN, /* State change notification */ + CSIO_EVT_DEV_LOSS, /* Device loss event */ + CSIO_EVT_MAX, /* Max supported event */ +}; + +#define CSIO_EVT_MSG_SIZE 512 +#define CSIO_EVTQ_SIZE 512 + +/* Event msg */ +struct csio_evt_msg { + struct list_head list; /* evt queue*/ + enum csio_evt type; + uint8_t data[CSIO_EVT_MSG_SIZE]; +}; + +enum { + SERNUM_LEN = 16, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ +}; + +enum { + SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ +}; + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ +}; + +/* Management module */ +enum { + CSIO_MGMT_EQ_WRSIZE = 512, + CSIO_MGMT_IQ_WRSIZE = 128, + CSIO_MGMT_EQLEN = 64, + CSIO_MGMT_IQLEN = 64, +}; + +#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE) +#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE) + +/* mgmt module stats */ +struct csio_mgmtm_stats { + uint32_t n_abort_req; /* Total abort request */ + uint32_t n_abort_rsp; /* Total abort response */ + uint32_t n_close_req; /* Total close request */ + uint32_t n_close_rsp; /* Total close response */ + uint32_t n_err; /* Total Errors */ + uint32_t n_drop; /* Total request dropped */ + uint32_t n_active; /* Count of active_q */ + uint32_t n_cbfn; /* Count of cbfn_q */ +}; + +/* MGMT module */ +struct csio_mgmtm { + struct csio_hw *hw; /* Pointer to HW moduel */ + int eq_idx; /* Egress queue index */ + int iq_idx; /* Ingress queue index */ + int msi_vec; /* MSI vector */ + struct list_head active_q; /* Outstanding ELS/CT */ + struct list_head abort_q; /* Outstanding abort req */ + struct list_head cbfn_q; /* Completion queue */ + struct list_head mgmt_req_freelist; /* Free poll of reqs */ + /* ELSCT request freelist*/ + struct timer_list mgmt_timer; /* MGMT timer */ + struct csio_mgmtm_stats stats; /* ELS/CT stats */ +}; + +struct csio_adap_desc { + char model_no[16]; + char description[32]; +}; + +struct pci_params { + uint16_t vendor_id; + uint16_t device_id; + int vpd_cap_addr; + uint16_t speed; + uint8_t width; +}; + +/* User configurable hw parameters */ +struct csio_hw_params { + uint32_t sf_size; /* serial flash + * size in bytes + */ + uint32_t sf_nsec; /* # of flash sectors */ + struct pci_params pci; + uint32_t log_level; /* Module-level for + * debug log. + */ +}; + +struct csio_vpd { + uint32_t cclk; + uint8_t ec[EC_LEN + 1]; + uint8_t sn[SERNUM_LEN + 1]; + uint8_t id[ID_LEN + 1]; +}; + +/* Firmware Port Capabilities types. */ + +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ +}; + +enum cc_pause { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +enum cc_fec { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ +}; + +struct link_config { + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t def_acaps; /* default advertised capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + fw_port_cap32_t lpacaps; /* peer advertised capabilities */ + + fw_port_cap32_t speed_caps; /* speed(s) user has requested */ + unsigned int speed; /* actual link speed (Mb/s) */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec requested_fec; /* Forward Error Correction: */ + enum cc_fec fec; /* requested and actual in use */ + + unsigned char autoneg; /* autonegotiating? */ + + unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ +}; + +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) + +#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ + FW_PORT_CAP32_ANEG) + +/* Enable or disable autonegotiation. */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +struct csio_pport { + uint16_t pcap; + uint16_t acap; + uint8_t portid; + uint8_t link_status; + uint16_t link_speed; + uint8_t mac[6]; + uint8_t mod_type; + uint8_t rsvd1; + uint8_t rsvd2; + uint8_t rsvd3; + struct link_config link_cfg; +}; + +/* fcoe resource information */ +struct csio_fcoe_res_info { + uint16_t e_d_tov; + uint16_t r_a_tov_seq; + uint16_t r_a_tov_els; + uint16_t r_r_tov; + uint32_t max_xchgs; + uint32_t max_ssns; + uint32_t used_xchgs; + uint32_t used_ssns; + uint32_t max_fcfs; + uint32_t max_vnps; + uint32_t used_fcfs; + uint32_t used_vnps; +}; + +/* HW State machine Events */ +enum csio_hw_ev { + CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */ + CSIO_HWE_INIT, /* Config done, start Init */ + CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */ + CSIO_HWE_FATAL, /* Fatal error during initialization */ + CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */ + CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */ + CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */ + CSIO_HWE_QUIESCED, /* HBA quiesced */ + CSIO_HWE_HBA_RESET, /* HBA reset requested */ + CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */ + CSIO_HWE_FW_DLOAD, /* FW download requested */ + CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */ + CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */ + CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */ + CSIO_HWE_MAX, /* Max HW event */ +}; + +/* hw stats */ +struct csio_hw_stats { + uint32_t n_evt_activeq; /* Number of event in active Q */ + uint32_t n_evt_freeq; /* Number of event in free Q */ + uint32_t n_evt_drop; /* Number of event droped */ + uint32_t n_evt_unexp; /* Number of unexpected events */ + uint32_t n_pcich_offline;/* Number of pci channel offline */ + uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */ + uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/ + uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/ + uint32_t n_cpl_unexp; /* Number of unexpected cpl */ + uint32_t n_mbint_unexp; /* Number of unexpected mbox */ + /* interrupt */ + uint32_t n_plint_unexp; /* Number of unexpected PL */ + /* interrupt */ + uint32_t n_plint_cnt; /* Number of PL interrupt */ + uint32_t n_int_stray; /* Number of stray interrupt */ + uint32_t n_err; /* Number of hw errors */ + uint32_t n_err_fatal; /* Number of fatal errors */ + uint32_t n_err_nomem; /* Number of memory alloc failure */ + uint32_t n_err_io; /* Number of IO failure */ + enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */ + uint64_t n_reset_start; /* Start time after the reset */ + uint32_t rsvd1; +}; + +/* Defines for hw->flags */ +#define CSIO_HWF_MASTER 0x00000001 /* This is the Master + * function for the + * card. + */ +#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt + * enable bit set? + */ +#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */ +#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been + * allocated memory. + */ +#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been + * allocated in FW. + */ +#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */ +#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device + * id cached */ +#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing + * FW events + */ +#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config + * params + */ +#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts + * enabled? + */ +#define CSIO_HWF_ROOT_NO_RELAXED_ORDERING 0x00000400 /* Is PCIe relaxed + * ordering enabled + */ + +#define csio_is_hw_intr_enabled(__hw) \ + ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED) +#define csio_is_host_intr_enabled(__hw) \ + ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED) +#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER) +#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID) +#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED) +#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID) +#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED) + +/* Defines for intr_mode */ +enum csio_intr_mode { + CSIO_IM_NONE = 0, + CSIO_IM_INTX = 1, + CSIO_IM_MSI = 2, + CSIO_IM_MSIX = 3, +}; + +/* Master HW structure: One per function */ +struct csio_hw { + struct csio_sm sm; /* State machine: should + * be the 1st member. + */ + spinlock_t lock; /* Lock for hw */ + + struct csio_scsim scsim; /* SCSI module*/ + struct csio_wrm wrm; /* Work request module*/ + struct pci_dev *pdev; /* PCI device */ + + void __iomem *regstart; /* Virtual address of + * register map + */ + /* SCSI queue sets */ + uint32_t num_sqsets; /* Number of SCSI + * queue sets */ + uint32_t num_scsi_msix_cpus; /* Number of CPUs that + * will be used + * for ingress + * processing. + */ + + struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU]; + struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS]; + + uint32_t evtflag; /* Event flag */ + uint32_t flags; /* HW flags */ + + struct csio_mgmtm mgmtm; /* management module */ + struct csio_mbm mbm; /* Mailbox module */ + + /* Lnodes */ + uint32_t num_lns; /* Number of lnodes */ + struct csio_lnode *rln; /* Root lnode */ + struct list_head sln_head; /* Sibling node list + * list + */ + int intr_iq_idx; /* Forward interrupt + * queue. + */ + int fwevt_iq_idx; /* FW evt queue */ + struct work_struct evtq_work; /* Worker thread for + * HW events. + */ + struct list_head evt_free_q; /* freelist of evt + * elements + */ + struct list_head evt_active_q; /* active evt queue*/ + + /* board related info */ + char name[32]; + char hw_ver[16]; + char model_desc[32]; + char drv_version[32]; + char fwrev_str[32]; + uint32_t optrom_ver; + uint32_t fwrev; + uint32_t tp_vers; + char chip_ver; + uint16_t chip_id; /* Tells T4/T5 chip */ + enum csio_dev_state fw_state; + struct csio_vpd vpd; + + uint8_t pfn; /* Physical Function + * number + */ + uint32_t port_vec; /* Port vector */ + uint8_t num_pports; /* Number of physical + * ports. + */ + uint8_t rst_retries; /* Reset retries */ + uint8_t cur_evt; /* current s/m evt */ + uint8_t prev_evt; /* Previous s/m evt */ + uint32_t dev_num; /* device number */ + struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */ + struct csio_hw_params params; /* Hw parameters */ + + struct dma_pool *scsi_dma_pool; /* DMA pool for SCSI */ + mempool_t *mb_mempool; /* Mailbox memory pool*/ + mempool_t *rnode_mempool; /* rnode memory pool */ + + /* Interrupt */ + enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */ + uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt + * index + */ + uint32_t nondata_intr_idx; /* nondata MSIX/intr + * idx + */ + + uint8_t cfg_neq; /* FW configured no of + * egress queues + */ + uint8_t cfg_niq; /* FW configured no of + * iq queues. + */ + + struct csio_fcoe_res_info fres_info; /* Fcoe resource info */ + struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific + * Operations + */ + + /* MSIX vectors */ + struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS]; + + struct dentry *debugfs_root; /* Debug FS */ + struct csio_hw_stats stats; /* Hw statistics */ +}; + +/* Register access macros */ +#define csio_reg(_b, _r) ((_b) + (_r)) + +#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r))) +#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r))) +#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r))) +#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r))) + +#define csio_wr_reg8(_h, _v, _r) writeb((_v), \ + csio_reg((_h)->regstart, (_r))) +#define csio_wr_reg16(_h, _v, _r) writew((_v), \ + csio_reg((_h)->regstart, (_r))) +#define csio_wr_reg32(_h, _v, _r) writel((_v), \ + csio_reg((_h)->regstart, (_r))) +#define csio_wr_reg64(_h, _v, _r) writeq((_v), \ + csio_reg((_h)->regstart, (_r))) + +void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t); + +/* Core clocks <==> uSecs */ +static inline uint32_t +csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks) +{ + /* add Core Clock / 2 to round ticks to nearest uS */ + return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk; +} + +static inline uint32_t +csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us) +{ + return (us * hw->vpd.cclk) / 1000; +} + +/* Easy access macros */ +#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm)) +#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm)) +#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim)) +#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm)) + +#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number) +#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn)) +#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn)) + +#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i)) +#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx) +#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i)) +#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx) + +/* Printing/logging */ +#define CSIO_DEVID(__dev) ((__dev)->dev_num) +#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF) +#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF) + +#define csio_info(__hw, __fmt, ...) \ + dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#define csio_fatal(__hw, __fmt, ...) \ + dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#define csio_err(__hw, __fmt, ...) \ + dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#define csio_warn(__hw, __fmt, ...) \ + dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#ifdef __CSIO_DEBUG__ +#define csio_dbg(__hw, __fmt, ...) \ + csio_info((__hw), __fmt, ##__VA_ARGS__); +#else +#define csio_dbg(__hw, __fmt, ...) +#endif + +int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int, + int, int, uint32_t *); +void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int, + unsigned int, unsigned int); +int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *); +void csio_hw_intr_disable(struct csio_hw *); +int csio_hw_slow_intr_handler(struct csio_hw *); +int csio_handle_intr_status(struct csio_hw *, unsigned int, + const struct intr_info *); + +fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); +fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32); +fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); + +int csio_hw_start(struct csio_hw *); +int csio_hw_stop(struct csio_hw *); +int csio_hw_reset(struct csio_hw *); +int csio_is_hw_ready(struct csio_hw *); +int csio_is_hw_removing(struct csio_hw *); + +int csio_fwevtq_handler(struct csio_hw *); +void csio_evtq_worker(struct work_struct *); +int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t); +void csio_evtq_flush(struct csio_hw *hw); + +int csio_request_irqs(struct csio_hw *); +void csio_intr_enable(struct csio_hw *); +void csio_intr_disable(struct csio_hw *, bool); +void csio_hw_fatal_err(struct csio_hw *); + +struct csio_lnode *csio_lnode_alloc(struct csio_hw *); +int csio_config_queues(struct csio_hw *); + +int csio_hw_init(struct csio_hw *); +void csio_hw_exit(struct csio_hw *); +#endif /* ifndef __CSIO_HW_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h new file mode 100644 index 000000000..aaabdbe11 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw_chip.h @@ -0,0 +1,135 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_HW_CHIP_H__ +#define __CSIO_HW_CHIP_H__ + +#include "csio_defs.h" + +/* Define MACRO values */ +#define CSIO_HW_T5 0x5000 +#define CSIO_T5_FCOE_ASIC 0x5600 +#define CSIO_HW_T6 0x6000 +#define CSIO_T6_FCOE_ASIC 0x6600 +#define CSIO_HW_CHIP_MASK 0xF000 + +#define T5_REGMAP_SIZE (332 * 1024) +#define FW_FNAME_T5 "cxgb4/t5fw.bin" +#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt" +#define FW_FNAME_T6 "cxgb4/t6fw.bin" +#define FW_CFG_NAME_T6 "cxgb4/t6-config.txt" + +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_FPGA 0x100 +#define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 + +enum chip_type { + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, + + T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0), + T6_FIRST_REV = T6_A0, + T6_LAST_REV = T6_A0, +}; + +static inline int csio_is_t5(uint16_t chip) +{ + return (chip == CSIO_HW_T5); +} + +static inline int csio_is_t6(uint16_t chip) +{ + return (chip == CSIO_HW_T6); +} + +/* Define MACRO DEFINITIONS */ +#define CSIO_DEVICE(devid, idx) \ + { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } + +#include "t4fw_api.h" +#include "t4fw_version.h" + +#define FW_VERSION(chip) ( \ + FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) +#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) + +struct fw_info { + u8 chip; + char *fs_name; + char *fw_mod_name; + struct fw_hdr fw_hdr; +}; + +/* Declare ENUMS */ +enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; + +enum { + MEMWIN_APERTURE = 2048, + MEMWIN_BASE = 0x1b800, +}; + +/* Slow path handlers */ +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/* T4/T5 Chip specific ops */ +struct csio_hw; +struct csio_hw_chip_ops { + int (*chip_set_mem_win)(struct csio_hw *, uint32_t); + void (*chip_pcie_intr_handler)(struct csio_hw *); + uint32_t (*chip_flash_cfg_addr)(struct csio_hw *); + int (*chip_mc_read)(struct csio_hw *, int, uint32_t, + __be32 *, uint64_t *); + int (*chip_edc_read)(struct csio_hw *, int, uint32_t, + __be32 *, uint64_t *); + int (*chip_memory_rw)(struct csio_hw *, u32, int, u32, + u32, uint32_t *, int); + void (*chip_dfs_create_ext_mem)(struct csio_hw *); +}; + +extern struct csio_hw_chip_ops t5_ops; + +#endif /* #ifndef __CSIO_HW_CHIP_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c new file mode 100644 index 000000000..86fded97d --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw_t5.c @@ -0,0 +1,369 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "csio_hw.h" +#include "csio_init.h" + +static int +csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win) +{ + u32 mem_win_base; + /* + * Truncation intentional: we only read the bottom 32-bits of the + * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to + * read BAR0 instead of using pci_resource_start() because we could be + * operating from within a Virtual Machine which is trapping our + * accesses to our Configuration Space and we need to set up the PCI-E + * Memory Window decoders with the actual addresses which will be + * coming across the PCI-E link. + */ + + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win_base = MEMWIN_BASE; + + /* + * Set up memory window for accessing adapter memory ranges. (Read + * back MA register to ensure that changes propagate before we attempt + * to use the new values.) + */ + csio_wr_reg32(hw, mem_win_base | BIR_V(0) | + WINDOW_V(ilog2(MEMWIN_APERTURE) - 10), + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); + + return 0; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void +csio_t5_pcie_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pcie_intr_info[] = { + { MSTGRPPERR_F, "Master Response Read Queue parity error", + -1, 1 }, + { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 }, + { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 }, + { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 }, + { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error", + -1, 1 }, + { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error", + -1, 1 }, + { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 }, + { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 }, + { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 }, + { DREQWRPERR_F, "PCI DMA channel write request parity error", + -1, 1 }, + { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 }, + { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR_F, "PCI FID parity error", -1, 1 }, + { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 }, + { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 }, + { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 }, + { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error", + -1, 1 }, + { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error", + -1, 1 }, + { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 }, + { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 }, + { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 }, + { READRSPERR_F, "Outbound read error", -1, 0 }, + { 0, NULL, 0, 0 } + }; + + int fat; + fat = csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info); + if (fat) + csio_hw_fatal_err(hw); +} + +/* + * csio_t5_flash_cfg_addr - return the address of the flash configuration file + * @hw: the HW module + * + * Return the address within the flash where the Firmware Configuration + * File is stored. + */ +static unsigned int +csio_t5_flash_cfg_addr(struct csio_hw *hw) +{ + return FLASH_CFG_START; +} + +/* + * csio_t5_mc_read - read from MC through backdoor accesses + * @hw: the hw module + * @idx: index to the register + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; + uint32_t mc_bist_data_pattern_reg; + + mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx); + mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx); + mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx); + mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx); + + if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg); + csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg); + csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg); + csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1), + mc_bist_cmd_reg); + i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F, + 0, 10, 1, NULL); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/* + * csio_t5_edc_read - read from EDC through backdoor accesses + * @hw: the hw module + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; + uint32_t edc_bist_cmd_data_pattern; + +/* + * These macro are missing in t4_regs.h file. + */ +#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) +#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) + + edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx); + edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx); + edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx); + edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx); +#undef EDC_REG_T5 +#undef EDC_STRIDE_T5 + + if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg); + csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg); + csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern); + csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1), + edc_bist_cmd_reg); + i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F, + 0, 10, 1, NULL); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +/* + * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window + * @hw: the csio_hw + * @win: PCI-E memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1 + * @addr: address within indicated memory type + * @len: amount of memory to transfer + * @buf: host memory buffer + * @dir: direction of transfer 1 => read, 0 => write + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address, length and host buffer must be aligned on + * 32-bit boundaries. The memory is transferred as a raw byte sequence + * from/to the firmware's memory. If this memory contains data + * structures which contain multi-byte integers, it's the callers + * responsibility to perform appropriate byte order conversions. + */ +static int +csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, + u32 len, uint32_t *buf, int dir) +{ + u32 pos, start, offset, memoffset; + u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + + /* + * Argument sanity checks ... + */ + if ((addr & 0x3) || (len & 0x3)) + return -EINVAL; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- T4 + * MEM_MC0 = 2 -- For T5 + * MEM_MC1 = 3 -- For T5 + */ + edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A)); + if (mtype != MEM_MC1) + memoffset = (mtype * (edc_size * 1024 * 1024)); + else { + mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw, + MA_EXT_MEMORY_BAR_A)); + memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + /* Determine the PCIE_MEM_ACCESS_OFFSET */ + addr = addr + memoffset; + + /* + * Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); + mem_aperture = 1 << (WINDOW_V(mem_reg) + 10); + mem_base = PCIEOFST_G(mem_reg) << 10; + + start = addr & ~(mem_aperture-1); + offset = addr - start; + win_pf = PFNUM_V(hw->pfn); + + csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n", + mem_reg, mem_aperture); + csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n", + mem_base, memoffset); + csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n", + start, offset, win_pf); + csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n", + mtype, addr, len); + + for (pos = start; len > 0; pos += mem_aperture, offset = 0) { + /* + * Move PCI-E Memory Window to our current transfer + * position. Read it back to ensure that changes propagate + * before we attempt to use the new value. + */ + csio_wr_reg32(hw, pos | win_pf, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); + + while (offset < mem_aperture && len > 0) { + if (dir) + *buf++ = csio_rd_reg32(hw, mem_base + offset); + else + csio_wr_reg32(hw, *buf++, mem_base + offset); + + offset += sizeof(__be32); + len -= sizeof(__be32); + } + } + return 0; +} + +/* + * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values + * @hw: the csio_hw + * + * This function creates files in the debugfs with external memory region + * MC0 & MC1. + */ +static void +csio_t5_dfs_create_ext_mem(struct csio_hw *hw) +{ + u32 size; + int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); + + if (i & EXT_MEM_ENABLE_F) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A); + csio_add_debugfs_mem(hw, "mc0", MEM_MC0, + EXT_MEM_SIZE_G(size)); + } + if (i & EXT_MEM1_ENABLE_F) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR_A); + csio_add_debugfs_mem(hw, "mc1", MEM_MC1, + EXT_MEM_SIZE_G(size)); + } +} + +/* T5 adapter specific function */ +struct csio_hw_chip_ops t5_ops = { + .chip_set_mem_win = csio_t5_set_mem_win, + .chip_pcie_intr_handler = csio_t5_pcie_intr_handler, + .chip_flash_cfg_addr = csio_t5_flash_cfg_addr, + .chip_mc_read = csio_t5_mc_read, + .chip_edc_read = csio_t5_edc_read, + .chip_memory_rw = csio_t5_memory_rw, + .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem, +}; diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c new file mode 100644 index 000000000..0c32faefa --- /dev/null +++ b/drivers/scsi/csiostor/csio_init.c @@ -0,0 +1,1256 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "csio_init.h" +#include "csio_defs.h" + +#define CSIO_MIN_MEMPOOL_SZ 64 + +static struct dentry *csio_debugfs_root; + +static struct scsi_transport_template *csio_fcoe_transport; +static struct scsi_transport_template *csio_fcoe_transport_vport; + +/* + * debugfs support + */ +static ssize_t +csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file_inode(file)->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct csio_hw *hw = file->private_data - mem; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len; + int ret, ofst; + __be32 data[16]; + + if (mem == MEM_MC) + ret = hw->chip_ops->chip_mc_read(hw, 0, pos, + data, NULL); + else + ret = hw->chip_ops->chip_edc_read(hw, mem, pos, + data, NULL); + if (ret) + return ret; + + ofst = pos % sizeof(data); + len = min(count, sizeof(data) - ofst); + if (copy_to_user(buf, (u8 *)data + ofst, len)) + return -EFAULT; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations csio_mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = csio_mem_read, + .llseek = default_llseek, +}; + +void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, + unsigned int idx, unsigned int size_mb) +{ + debugfs_create_file_size(name, S_IRUSR, hw->debugfs_root, + (void *)hw + idx, &csio_mem_debugfs_fops, + size_mb << 20); +} + +static int csio_setup_debugfs(struct csio_hw *hw) +{ + int i; + + if (IS_ERR_OR_NULL(hw->debugfs_root)) + return -1; + + i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); + if (i & EDRAM0_ENABLE_F) + csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); + if (i & EDRAM1_ENABLE_F) + csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); + + hw->chip_ops->chip_dfs_create_ext_mem(hw); + return 0; +} + +/* + * csio_dfs_create - Creates and sets up per-hw debugfs. + * + */ +static int +csio_dfs_create(struct csio_hw *hw) +{ + if (csio_debugfs_root) { + hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev), + csio_debugfs_root); + csio_setup_debugfs(hw); + } + + return 0; +} + +/* + * csio_dfs_destroy - Destroys per-hw debugfs. + */ +static void +csio_dfs_destroy(struct csio_hw *hw) +{ + debugfs_remove_recursive(hw->debugfs_root); +} + +/* + * csio_dfs_init - Debug filesystem initialization for the module. + * + */ +static void +csio_dfs_init(void) +{ + csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); +} + +/* + * csio_dfs_exit - debugfs cleanup for the module. + */ +static void +csio_dfs_exit(void) +{ + debugfs_remove(csio_debugfs_root); +} + +/* + * csio_pci_init - PCI initialization. + * @pdev: PCI device. + * @bars: Bitmask of bars to be requested. + * + * Initializes the PCI function by enabling MMIO, setting bus + * mastership and setting DMA mask. + */ +static int +csio_pci_init(struct pci_dev *pdev, int *bars) +{ + int rv = -ENODEV; + + *bars = pci_select_bars(pdev, IORESOURCE_MEM); + + if (pci_enable_device_mem(pdev)) + goto err; + + if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME)) + goto err_disable_device; + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rv) + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rv) { + rv = -ENODEV; + dev_err(&pdev->dev, "No suitable DMA available.\n"); + goto err_release_regions; + } + + return 0; + +err_release_regions: + pci_release_selected_regions(pdev, *bars); +err_disable_device: + pci_disable_device(pdev); +err: + return rv; + +} + +/* + * csio_pci_exit - PCI unitialization. + * @pdev: PCI device. + * @bars: Bars to be released. + * + */ +static void +csio_pci_exit(struct pci_dev *pdev, int *bars) +{ + pci_release_selected_regions(pdev, *bars); + pci_disable_device(pdev); +} + +/* + * csio_hw_init_workers - Initialize the HW module's worker threads. + * @hw: HW module. + * + */ +static void +csio_hw_init_workers(struct csio_hw *hw) +{ + INIT_WORK(&hw->evtq_work, csio_evtq_worker); +} + +static void +csio_hw_exit_workers(struct csio_hw *hw) +{ + cancel_work_sync(&hw->evtq_work); +} + +static int +csio_create_queues(struct csio_hw *hw) +{ + int i, j; + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + int rv; + struct csio_scsi_cpu_info *info; + + if (hw->flags & CSIO_HWF_Q_FW_ALLOCED) + return 0; + + if (hw->intr_mode != CSIO_IM_MSIX) { + rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx, + 0, hw->pport[0].portid, false, NULL); + if (rv != 0) { + csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv); + return rv; + } + } + + /* FW event queue */ + rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx, + csio_get_fwevt_intr_idx(hw), + hw->pport[0].portid, true, NULL); + if (rv != 0) { + csio_err(hw, "FW event IQ config failed!: %d\n", rv); + return rv; + } + + /* Create mgmt queue */ + rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx, + mgmtm->iq_idx, hw->pport[0].portid, NULL); + + if (rv != 0) { + csio_err(hw, "Mgmt EQ create failed!: %d\n", rv); + goto err; + } + + /* Create SCSI queues */ + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + + for (j = 0; j < info->max_cpus; j++) { + struct csio_scsi_qset *sqset = &hw->sqset[i][j]; + + rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx, + sqset->intr_idx, i, false, NULL); + if (rv != 0) { + csio_err(hw, + "SCSI module IQ config failed [%d][%d]:%d\n", + i, j, rv); + goto err; + } + rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx, + sqset->iq_idx, i, NULL); + if (rv != 0) { + csio_err(hw, + "SCSI module EQ config failed [%d][%d]:%d\n", + i, j, rv); + goto err; + } + } /* for all CPUs */ + } /* For all ports */ + + hw->flags |= CSIO_HWF_Q_FW_ALLOCED; + return 0; +err: + csio_wr_destroy_queues(hw, true); + return -EINVAL; +} + +/* + * csio_config_queues - Configure the DMA queues. + * @hw: HW module. + * + * Allocates memory for queues are registers them with FW. + */ +int +csio_config_queues(struct csio_hw *hw) +{ + int i, j, idx, k = 0; + int rv; + struct csio_scsi_qset *sqset; + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + struct csio_scsi_qset *orig; + struct csio_scsi_cpu_info *info; + + if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED) + return csio_create_queues(hw); + + /* Calculate number of SCSI queues for MSIX we would like */ + hw->num_scsi_msix_cpus = num_online_cpus(); + hw->num_sqsets = num_online_cpus() * hw->num_pports; + + if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) { + hw->num_sqsets = CSIO_MAX_SCSI_QSETS; + hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU; + } + + /* Initialize max_cpus, may get reduced during msix allocations */ + for (i = 0; i < hw->num_pports; i++) + hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus; + + csio_dbg(hw, "nsqsets:%d scpus:%d\n", + hw->num_sqsets, hw->num_scsi_msix_cpus); + + csio_intr_enable(hw); + + if (hw->intr_mode != CSIO_IM_MSIX) { + + /* Allocate Forward interrupt iq. */ + hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE, + CSIO_INTR_WRSIZE, CSIO_INGRESS, + (void *)hw, 0, 0, NULL); + if (hw->intr_iq_idx == -1) { + csio_err(hw, + "Forward interrupt queue creation failed\n"); + goto intr_disable; + } + } + + /* Allocate the FW evt queue */ + hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE, + CSIO_FWEVT_WRSIZE, + CSIO_INGRESS, (void *)hw, + CSIO_FWEVT_FLBUFS, 0, + csio_fwevt_intx_handler); + if (hw->fwevt_iq_idx == -1) { + csio_err(hw, "FW evt queue creation failed\n"); + goto intr_disable; + } + + /* Allocate the mgmt queue */ + mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE, + CSIO_MGMT_EQ_WRSIZE, + CSIO_EGRESS, (void *)hw, 0, 0, NULL); + if (mgmtm->eq_idx == -1) { + csio_err(hw, "Failed to alloc egress queue for mgmt module\n"); + goto intr_disable; + } + + /* Use FW IQ for MGMT req completion */ + mgmtm->iq_idx = hw->fwevt_iq_idx; + + /* Allocate SCSI queues */ + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + + for (j = 0; j < hw->num_scsi_msix_cpus; j++) { + sqset = &hw->sqset[i][j]; + + if (j >= info->max_cpus) { + k = j % info->max_cpus; + orig = &hw->sqset[i][k]; + sqset->eq_idx = orig->eq_idx; + sqset->iq_idx = orig->iq_idx; + continue; + } + + idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0, + CSIO_EGRESS, (void *)hw, 0, 0, + NULL); + if (idx == -1) { + csio_err(hw, "EQ creation failed for idx:%d\n", + idx); + goto intr_disable; + } + + sqset->eq_idx = idx; + + idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE, + CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS, + (void *)hw, 0, 0, + csio_scsi_intx_handler); + if (idx == -1) { + csio_err(hw, "IQ creation failed for idx:%d\n", + idx); + goto intr_disable; + } + sqset->iq_idx = idx; + } /* for all CPUs */ + } /* For all ports */ + + hw->flags |= CSIO_HWF_Q_MEM_ALLOCED; + + rv = csio_create_queues(hw); + if (rv != 0) + goto intr_disable; + + /* + * Now request IRQs for the vectors. In the event of a failure, + * cleanup is handled internally by this function. + */ + rv = csio_request_irqs(hw); + if (rv != 0) + return -EINVAL; + + return 0; + +intr_disable: + csio_intr_disable(hw, false); + + return -EINVAL; +} + +static int +csio_resource_alloc(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + int rv = -ENOMEM; + + wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ + + CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ); + + hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, + sizeof(struct csio_mb)); + if (!hw->mb_mempool) + goto err; + + hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, + sizeof(struct csio_rnode)); + if (!hw->rnode_mempool) + goto err_free_mb_mempool; + + hw->scsi_dma_pool = dma_pool_create("csio_scsi_dma_pool", + &hw->pdev->dev, CSIO_SCSI_RSP_LEN, + 8, 0); + if (!hw->scsi_dma_pool) + goto err_free_rn_pool; + + return 0; + +err_free_rn_pool: + mempool_destroy(hw->rnode_mempool); + hw->rnode_mempool = NULL; +err_free_mb_mempool: + mempool_destroy(hw->mb_mempool); + hw->mb_mempool = NULL; +err: + return rv; +} + +static void +csio_resource_free(struct csio_hw *hw) +{ + dma_pool_destroy(hw->scsi_dma_pool); + hw->scsi_dma_pool = NULL; + mempool_destroy(hw->rnode_mempool); + hw->rnode_mempool = NULL; + mempool_destroy(hw->mb_mempool); + hw->mb_mempool = NULL; +} + +/* + * csio_hw_alloc - Allocate and initialize the HW module. + * @pdev: PCI device. + * + * Allocates HW structure, DMA, memory resources, maps BARS to + * host memory and initializes HW module. + */ +static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev) +{ + struct csio_hw *hw; + + hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL); + if (!hw) + goto err; + + hw->pdev = pdev; + strncpy(hw->drv_version, CSIO_DRV_VERSION, 32); + + /* memory pool/DMA pool allocation */ + if (csio_resource_alloc(hw)) + goto err_free_hw; + + /* Get the start address of registers from BAR 0 */ + hw->regstart = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->regstart) { + csio_err(hw, "Could not map BAR 0, regstart = %p\n", + hw->regstart); + goto err_resource_free; + } + + csio_hw_init_workers(hw); + + if (csio_hw_init(hw)) + goto err_unmap_bar; + + csio_dfs_create(hw); + + csio_dbg(hw, "hw:%p\n", hw); + + return hw; + +err_unmap_bar: + csio_hw_exit_workers(hw); + iounmap(hw->regstart); +err_resource_free: + csio_resource_free(hw); +err_free_hw: + kfree(hw); +err: + return NULL; +} + +/* + * csio_hw_free - Uninitialize and free the HW module. + * @hw: The HW module + * + * Disable interrupts, uninit the HW module, free resources, free hw. + */ +static void +csio_hw_free(struct csio_hw *hw) +{ + csio_intr_disable(hw, true); + csio_hw_exit_workers(hw); + csio_hw_exit(hw); + iounmap(hw->regstart); + csio_dfs_destroy(hw); + csio_resource_free(hw); + kfree(hw); +} + +/** + * csio_shost_init - Create and initialize the lnode module. + * @hw: The HW module. + * @dev: The device associated with this invocation. + * @probe: Called from probe context or not? + * @pln: Parent lnode if any. + * + * Allocates lnode structure via scsi_host_alloc, initializes + * shost, initializes lnode module and registers with SCSI ML + * via scsi_host_add. This function is shared between physical and + * virtual node ports. + */ +struct csio_lnode * +csio_shost_init(struct csio_hw *hw, struct device *dev, + bool probe, struct csio_lnode *pln) +{ + struct Scsi_Host *shost = NULL; + struct csio_lnode *ln; + + csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth; + csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth; + + /* + * hw->pdev is the physical port's PCI dev structure, + * which will be different from the NPIV dev structure. + */ + if (dev == &hw->pdev->dev) + shost = scsi_host_alloc( + &csio_fcoe_shost_template, + sizeof(struct csio_lnode)); + else + shost = scsi_host_alloc( + &csio_fcoe_shost_vport_template, + sizeof(struct csio_lnode)); + + if (!shost) + goto err; + + ln = shost_priv(shost); + memset(ln, 0, sizeof(struct csio_lnode)); + + /* Link common lnode to this lnode */ + ln->dev_num = (shost->host_no << 16); + + shost->can_queue = CSIO_MAX_QUEUE; + shost->this_id = -1; + shost->unique_id = shost->host_no; + shost->max_cmd_len = 16; /* Max CDB length supported */ + shost->max_id = min_t(uint32_t, csio_fcoe_rnodes, + hw->fres_info.max_ssns); + shost->max_lun = CSIO_MAX_LUN; + if (dev == &hw->pdev->dev) + shost->transportt = csio_fcoe_transport; + else + shost->transportt = csio_fcoe_transport_vport; + + /* root lnode */ + if (!hw->rln) + hw->rln = ln; + + /* Other initialization here: Common, Transport specific */ + if (csio_lnode_init(ln, hw, pln)) + goto err_shost_put; + + if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev)) + goto err_lnode_exit; + + return ln; + +err_lnode_exit: + csio_lnode_exit(ln); +err_shost_put: + scsi_host_put(shost); +err: + return NULL; +} + +/** + * csio_shost_exit - De-instantiate the shost. + * @ln: The lnode module corresponding to the shost. + * + */ +void +csio_shost_exit(struct csio_lnode *ln) +{ + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + /* Inform transport */ + fc_remove_host(shost); + + /* Inform SCSI ML */ + scsi_remove_host(shost); + + /* Flush all the events, so that any rnode removal events + * already queued are all handled, before we remove the lnode. + */ + spin_lock_irq(&hw->lock); + csio_evtq_flush(hw); + spin_unlock_irq(&hw->lock); + + csio_lnode_exit(ln); + scsi_host_put(shost); +} + +struct csio_lnode * +csio_lnode_alloc(struct csio_hw *hw) +{ + return csio_shost_init(hw, &hw->pdev->dev, false, NULL); +} + +void +csio_lnodes_block_request(struct csio_hw *hw) +{ + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct csio_lnode *ln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_block_requests(shost); + + } + kfree(lnode_list); +} + +void +csio_lnodes_unblock_request(struct csio_hw *hw) +{ + struct csio_lnode *ln; + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_unblock_requests(shost); + } + kfree(lnode_list); +} + +void +csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid) +{ + struct csio_lnode *ln; + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + if (sln->portid != portid) + continue; + + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_block_requests(shost); + } + kfree(lnode_list); +} + +void +csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid) +{ + struct csio_lnode *ln; + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + if (sln->portid != portid) + continue; + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_unblock_requests(shost); + } + kfree(lnode_list); +} + +void +csio_lnodes_exit(struct csio_hw *hw, bool npiv) +{ + struct csio_lnode *sln; + struct csio_lnode *ln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n"); + return; + } + + /* Get all child lnodes(NPIV ports) */ + spin_lock_irq(&hw->lock); + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + /* Delete NPIV lnodes */ + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + fc_vport_terminate(ln->fc_vport); + } + + /* Delete only npiv lnodes */ + if (npiv) + goto free_lnodes; + + cur_cnt = 0; + /* Get all physical lnodes */ + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + lnode_list[cur_cnt++] = sln; + } + spin_unlock_irq(&hw->lock); + + /* Delete physical lnodes */ + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]); + csio_shost_exit(lnode_list[ii]); + } + +free_lnodes: + kfree(lnode_list); +} + +/* + * csio_lnode_init_post: Set lnode attributes after starting HW. + * @ln: lnode. + * + */ +static void +csio_lnode_init_post(struct csio_lnode *ln) +{ + struct Scsi_Host *shost = csio_ln_to_shost(ln); + + csio_fchost_attr_init(ln); + + scsi_scan_host(shost); +} + +/* + * csio_probe_one - Instantiate this function. + * @pdev: PCI device + * @id: Device ID + * + * This is the .probe() callback of the driver. This function: + * - Initializes the PCI function by enabling MMIO, setting bus + * mastership and setting DMA mask. + * - Allocates HW structure, DMA, memory resources, maps BARS to + * host memory and initializes HW module. + * - Allocates lnode structure via scsi_host_alloc, initializes + * shost, initialized lnode module and registers with SCSI ML + * via scsi_host_add. + * - Enables interrupts, and starts the chip by kicking off the + * HW state machine. + * - Once hardware is ready, initiated scan of the host via + * scsi_scan_host. + */ +static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int rv; + int bars; + int i; + struct csio_hw *hw; + struct csio_lnode *ln; + + /* probe only T5 and T6 cards */ + if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) && + !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK))) + return -ENODEV; + + rv = csio_pci_init(pdev, &bars); + if (rv) + goto err; + + hw = csio_hw_alloc(pdev); + if (!hw) { + rv = -ENODEV; + goto err_pci_exit; + } + + if (!pcie_relaxed_ordering_enabled(pdev)) + hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING; + + pci_set_drvdata(pdev, hw); + + rv = csio_hw_start(hw); + if (rv) { + if (rv == -EINVAL) { + dev_err(&pdev->dev, + "Failed to start FW, continuing in debug mode.\n"); + return 0; + } + goto err_lnode_exit; + } + + sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(hw->fwrev), + FW_HDR_FW_VER_MINOR_G(hw->fwrev), + FW_HDR_FW_VER_MICRO_G(hw->fwrev), + FW_HDR_FW_VER_BUILD_G(hw->fwrev)); + + for (i = 0; i < hw->num_pports; i++) { + ln = csio_shost_init(hw, &pdev->dev, true, NULL); + if (!ln) { + rv = -ENODEV; + break; + } + /* Initialize portid */ + ln->portid = hw->pport[i].portid; + + spin_lock_irq(&hw->lock); + if (csio_lnode_start(ln) != 0) + rv = -ENODEV; + spin_unlock_irq(&hw->lock); + + if (rv) + break; + + csio_lnode_init_post(ln); + } + + if (rv) + goto err_lnode_exit; + + return 0; + +err_lnode_exit: + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + csio_hw_stop(hw); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + csio_lnodes_exit(hw, 0); + csio_hw_free(hw); +err_pci_exit: + csio_pci_exit(pdev, &bars); +err: + dev_err(&pdev->dev, "probe of device failed: %d\n", rv); + return rv; +} + +/* + * csio_remove_one - Remove one instance of the driver at this PCI function. + * @pdev: PCI device + * + * Used during hotplug operation. + */ +static void csio_remove_one(struct pci_dev *pdev) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + int bars = pci_select_bars(pdev, IORESOURCE_MEM); + + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + + /* Stops lnode, Rnode s/m + * Quiesce IOs. + * All sessions with remote ports are unregistered. + */ + csio_hw_stop(hw); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + + csio_lnodes_exit(hw, 0); + csio_hw_free(hw); + csio_pci_exit(pdev, &bars); +} + +/* + * csio_pci_error_detected - PCI error was detected + * @pdev: PCI device + * + */ +static pci_ers_result_t +csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + + /* Post PCI error detected evt to HW s/m + * HW s/m handles this evt by quiescing IOs, unregisters rports + * and finally takes the device to offline. + */ + csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + csio_lnodes_exit(hw, 0); + csio_intr_disable(hw, true); + pci_disable_device(pdev); + return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; +} + +/* + * csio_pci_slot_reset - PCI slot has been reset. + * @pdev: PCI device + * + */ +static pci_ers_result_t +csio_pci_slot_reset(struct pci_dev *pdev) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + int ready; + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "cannot re-enable device in slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + /* Bring HW s/m to ready state. + * but don't resume IOs. + */ + spin_lock_irq(&hw->lock); + csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET); + ready = csio_is_hw_ready(hw); + spin_unlock_irq(&hw->lock); + + if (ready) { + return PCI_ERS_RESULT_RECOVERED; + } else { + dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } +} + +/* + * csio_pci_resume - Resume normal operations + * @pdev: PCI device + * + */ +static void +csio_pci_resume(struct pci_dev *pdev) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + struct csio_lnode *ln; + int rv = 0; + int i; + + /* Bring the LINK UP and Resume IO */ + + for (i = 0; i < hw->num_pports; i++) { + ln = csio_shost_init(hw, &pdev->dev, true, NULL); + if (!ln) { + rv = -ENODEV; + break; + } + /* Initialize portid */ + ln->portid = hw->pport[i].portid; + + spin_lock_irq(&hw->lock); + if (csio_lnode_start(ln) != 0) + rv = -ENODEV; + spin_unlock_irq(&hw->lock); + + if (rv) + break; + + csio_lnode_init_post(ln); + } + + if (rv) + goto err_resume_exit; + + return; + +err_resume_exit: + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + csio_hw_stop(hw); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + csio_lnodes_exit(hw, 0); + csio_hw_free(hw); + dev_err(&pdev->dev, "resume of device failed: %d\n", rv); +} + +static struct pci_error_handlers csio_err_handler = { + .error_detected = csio_pci_error_detected, + .slot_reset = csio_pci_slot_reset, + .resume = csio_pci_resume, +}; + +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct pci_device_id csio_pci_tbl[] = { +/* Define for FCoE uses PF6 */ +#define CH_PCI_DEVICE_ID_FUNCTION 0x6 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { PCI_VDEVICE(CHELSIO, (devid)), 0 } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } + +#include "t4_pci_id_tbl.h" + +static struct pci_driver csio_pci_driver = { + .name = KBUILD_MODNAME, + .driver = { + .owner = THIS_MODULE, + }, + .id_table = csio_pci_tbl, + .probe = csio_probe_one, + .remove = csio_remove_one, + .err_handler = &csio_err_handler, +}; + +/* + * csio_init - Chelsio storage driver initialization function. + * + */ +static int __init +csio_init(void) +{ + int rv = -ENOMEM; + + pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION); + + csio_dfs_init(); + + csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs); + if (!csio_fcoe_transport) + goto err; + + csio_fcoe_transport_vport = + fc_attach_transport(&csio_fc_transport_vport_funcs); + if (!csio_fcoe_transport_vport) + goto err_vport; + + rv = pci_register_driver(&csio_pci_driver); + if (rv) + goto err_pci; + + return 0; + +err_pci: + fc_release_transport(csio_fcoe_transport_vport); +err_vport: + fc_release_transport(csio_fcoe_transport); +err: + csio_dfs_exit(); + return rv; +} + +/* + * csio_exit - Chelsio storage driver uninitialization . + * + * Function that gets called in the unload path. + */ +static void __exit +csio_exit(void) +{ + pci_unregister_driver(&csio_pci_driver); + csio_dfs_exit(); + fc_release_transport(csio_fcoe_transport_vport); + fc_release_transport(csio_fcoe_transport); +} + +module_init(csio_init); +module_exit(csio_exit); +MODULE_AUTHOR(CSIO_DRV_AUTHOR); +MODULE_DESCRIPTION(CSIO_DRV_DESC); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DEVICE_TABLE(pci, csio_pci_tbl); +MODULE_VERSION(CSIO_DRV_VERSION); +MODULE_FIRMWARE(FW_FNAME_T5); +MODULE_FIRMWARE(FW_FNAME_T6); +MODULE_SOFTDEP("pre: cxgb4"); diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h new file mode 100644 index 000000000..202442543 --- /dev/null +++ b/drivers/scsi/csiostor/csio_init.h @@ -0,0 +1,136 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_INIT_H__ +#define __CSIO_INIT_H__ + +#include +#include +#include +#include +#include +#include + +#include "csio_scsi.h" +#include "csio_lnode.h" +#include "csio_rnode.h" +#include "csio_hw.h" + +#define CSIO_DRV_AUTHOR "Chelsio Communications" +#define CSIO_DRV_DESC "Chelsio FCoE driver" +#define CSIO_DRV_VERSION "1.0.0-ko" + +extern struct fc_function_template csio_fc_transport_funcs; +extern struct fc_function_template csio_fc_transport_vport_funcs; + +void csio_fchost_attr_init(struct csio_lnode *); + +/* INTx handlers */ +void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, void *); + +void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, void *); + +/* Common os lnode APIs */ +void csio_lnodes_block_request(struct csio_hw *); +void csio_lnodes_unblock_request(struct csio_hw *); +void csio_lnodes_block_by_port(struct csio_hw *, uint8_t); +void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t); + +struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool, + struct csio_lnode *); +void csio_shost_exit(struct csio_lnode *); +void csio_lnodes_exit(struct csio_hw *, bool); + +/* DebugFS helper routines */ +void csio_add_debugfs_mem(struct csio_hw *, const char *, + unsigned int, unsigned int); + +static inline struct Scsi_Host * +csio_ln_to_shost(struct csio_lnode *ln) +{ + return container_of((void *)ln, struct Scsi_Host, hostdata[0]); +} + +/* SCSI -- locking version of get/put ioreqs */ +static inline struct csio_ioreq * +csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim) +{ + struct csio_ioreq *ioreq; + unsigned long flags; + + spin_lock_irqsave(&scsim->freelist_lock, flags); + ioreq = csio_get_scsi_ioreq(scsim); + spin_unlock_irqrestore(&scsim->freelist_lock, flags); + + return ioreq; +} + +static inline void +csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim, + struct csio_ioreq *ioreq) +{ + unsigned long flags; + + spin_lock_irqsave(&scsim->freelist_lock, flags); + csio_put_scsi_ioreq(scsim, ioreq); + spin_unlock_irqrestore(&scsim->freelist_lock, flags); +} + +/* Called in interrupt context */ +static inline void +csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim, + struct list_head *reqlist, int n) +{ + unsigned long flags; + + spin_lock_irqsave(&scsim->freelist_lock, flags); + csio_put_scsi_ioreq_list(scsim, reqlist, n); + spin_unlock_irqrestore(&scsim->freelist_lock, flags); +} + +/* Called in interrupt context */ +static inline void +csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim, + struct list_head *reqlist, int n) +{ + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + csio_put_scsi_ddp_list(scsim, reqlist, n); + spin_unlock_irqrestore(&hw->lock, flags); +} + +#endif /* ifndef __CSIO_INIT_H__ */ diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c new file mode 100644 index 000000000..b2540402f --- /dev/null +++ b/drivers/scsi/csiostor/csio_isr.c @@ -0,0 +1,610 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "csio_init.h" +#include "csio_hw.h" + +static irqreturn_t +csio_nondata_isr(int irq, void *dev_id) +{ + struct csio_hw *hw = (struct csio_hw *) dev_id; + int rv; + unsigned long flags; + + if (unlikely(!hw)) + return IRQ_NONE; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + spin_lock_irqsave(&hw->lock, flags); + csio_hw_slow_intr_handler(hw); + rv = csio_mb_isr_handler(hw); + + if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irqrestore(&hw->lock, flags); + schedule_work(&hw->evtq_work); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&hw->lock, flags); + return IRQ_HANDLED; +} + +/* + * csio_fwevt_handler - Common FW event handler routine. + * @hw: HW module. + * + * This is the ISR for FW events. It is shared b/w MSIX + * and INTx handlers. + */ +static void +csio_fwevt_handler(struct csio_hw *hw) +{ + int rv; + unsigned long flags; + + rv = csio_fwevtq_handler(hw); + + spin_lock_irqsave(&hw->lock, flags); + if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irqrestore(&hw->lock, flags); + schedule_work(&hw->evtq_work); + return; + } + spin_unlock_irqrestore(&hw->lock, flags); + +} /* csio_fwevt_handler */ + +/* + * csio_fwevt_isr() - FW events MSIX ISR + * @irq: + * @dev_id: + * + * Process WRs on the FW event queue. + * + */ +static irqreturn_t +csio_fwevt_isr(int irq, void *dev_id) +{ + struct csio_hw *hw = (struct csio_hw *) dev_id; + + if (unlikely(!hw)) + return IRQ_NONE; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + csio_fwevt_handler(hw); + + return IRQ_HANDLED; +} + +/* + * csio_fwevt_isr() - INTx wrapper for handling FW events. + * @irq: + * @dev_id: + */ +void +csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv) +{ + csio_fwevt_handler(hw); +} /* csio_fwevt_intx_handler */ + +/* + * csio_process_scsi_cmpl - Process a SCSI WR completion. + * @hw: HW module. + * @wr: The completed WR from the ingress queue. + * @len: Length of the WR. + * @flb: Freelist buffer array. + * + */ +static void +csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *cbfn_q) +{ + struct csio_ioreq *ioreq; + uint8_t *scsiwr; + uint8_t subop; + void *cmnd; + unsigned long flags; + + ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr); + if (likely(ioreq)) { + if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) { + subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET( + ((struct fw_scsi_abrt_cls_wr *) + scsiwr)->sub_opcode_to_chk_all_io); + + csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n", + subop ? "Close" : "Abort", + ioreq, ioreq->wr_status); + + spin_lock_irqsave(&hw->lock, flags); + if (subop) + csio_scsi_closed(ioreq, + (struct list_head *)cbfn_q); + else + csio_scsi_aborted(ioreq, + (struct list_head *)cbfn_q); + /* + * We call scsi_done for I/Os that driver thinks aborts + * have timed out. If there is a race caused by FW + * completing abort at the exact same time that the + * driver has deteced the abort timeout, the following + * check prevents calling of scsi_done twice for the + * same command: once from the eh_abort_handler, another + * from csio_scsi_isr_handler(). This also avoids the + * need to check if csio_scsi_cmnd(req) is NULL in the + * fast path. + */ + cmnd = csio_scsi_cmnd(ioreq); + if (unlikely(cmnd == NULL)) + list_del_init(&ioreq->sm.sm_list); + + spin_unlock_irqrestore(&hw->lock, flags); + + if (unlikely(cmnd == NULL)) + csio_put_scsi_ioreq_lock(hw, + csio_hw_to_scsim(hw), ioreq); + } else { + spin_lock_irqsave(&hw->lock, flags); + csio_scsi_completed(ioreq, (struct list_head *)cbfn_q); + spin_unlock_irqrestore(&hw->lock, flags); + } + } +} + +/* + * csio_scsi_isr_handler() - Common SCSI ISR handler. + * @iq: Ingress queue pointer. + * + * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx + * by calling csio_wr_process_iq_idx. If there are completions on the + * isr_cbfn_q, yank them out into a local queue and call their io_cbfns. + * Once done, add these completions onto the freelist. + * This routine is shared b/w MSIX and INTx. + */ +static inline irqreturn_t +csio_scsi_isr_handler(struct csio_q *iq) +{ + struct csio_hw *hw = (struct csio_hw *)iq->owner; + LIST_HEAD(cbfn_q); + struct list_head *tmp; + struct csio_scsim *scm; + struct csio_ioreq *ioreq; + int isr_completions = 0; + + scm = csio_hw_to_scsim(hw); + + if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, + &cbfn_q) != 0)) + return IRQ_NONE; + + /* Call back the completion routines */ + list_for_each(tmp, &cbfn_q) { + ioreq = (struct csio_ioreq *)tmp; + isr_completions++; + ioreq->io_cbfn(hw, ioreq); + /* Release ddp buffer if used for this req */ + if (unlikely(ioreq->dcopy)) + csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list, + ioreq->nsge); + } + + if (isr_completions) { + /* Return the ioreqs back to ioreq->freelist */ + csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q, + isr_completions); + } + + return IRQ_HANDLED; +} + +/* + * csio_scsi_isr() - SCSI MSIX handler + * @irq: + * @dev_id: + * + * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler() + * for handling SCSI completions. + */ +static irqreturn_t +csio_scsi_isr(int irq, void *dev_id) +{ + struct csio_q *iq = (struct csio_q *) dev_id; + struct csio_hw *hw; + + if (unlikely(!iq)) + return IRQ_NONE; + + hw = (struct csio_hw *)iq->owner; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + csio_scsi_isr_handler(iq); + + return IRQ_HANDLED; +} + +/* + * csio_scsi_intx_handler() - SCSI INTx handler + * @irq: + * @dev_id: + * + * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler() + * for handling SCSI completions. + */ +void +csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv) +{ + struct csio_q *iq = priv; + + csio_scsi_isr_handler(iq); + +} /* csio_scsi_intx_handler */ + +/* + * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE. + * @irq: + * @dev_id: + * + * + */ +static irqreturn_t +csio_fcoe_isr(int irq, void *dev_id) +{ + struct csio_hw *hw = (struct csio_hw *) dev_id; + struct csio_q *intx_q = NULL; + int rv; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + + if (unlikely(!hw)) + return IRQ_NONE; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + /* Disable the interrupt for this PCI function. */ + if (hw->intr_mode == CSIO_IM_INTX) + csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A)); + + /* + * The read in the following function will flush the + * above write. + */ + if (csio_hw_slow_intr_handler(hw)) + ret = IRQ_HANDLED; + + /* Get the INTx Forward interrupt IQ. */ + intx_q = csio_get_q(hw, hw->intr_iq_idx); + + CSIO_DB_ASSERT(intx_q); + + /* IQ handler is not possible for intx_q, hence pass in NULL */ + if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0)) + ret = IRQ_HANDLED; + + spin_lock_irqsave(&hw->lock, flags); + rv = csio_mb_isr_handler(hw); + if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irqrestore(&hw->lock, flags); + schedule_work(&hw->evtq_work); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&hw->lock, flags); + + return ret; +} + +static void +csio_add_msix_desc(struct csio_hw *hw) +{ + int i; + struct csio_msix_entries *entryp = &hw->msix_entries[0]; + int k = CSIO_EXTRA_VECS; + int len = sizeof(entryp->desc) - 1; + int cnt = hw->num_sqsets + k; + + /* Non-data vector */ + memset(entryp->desc, 0, len + 1); + snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata", + CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); + + entryp++; + memset(entryp->desc, 0, len + 1); + snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt", + CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); + entryp++; + + /* Name SCSI vecs */ + for (i = k; i < cnt; i++, entryp++) { + memset(entryp->desc, 0, len + 1); + snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d", + CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), + CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS); + } +} + +int +csio_request_irqs(struct csio_hw *hw) +{ + int rv, i, j, k = 0; + struct csio_msix_entries *entryp = &hw->msix_entries[0]; + struct csio_scsi_cpu_info *info; + struct pci_dev *pdev = hw->pdev; + + if (hw->intr_mode != CSIO_IM_MSIX) { + rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr, + hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED, + KBUILD_MODNAME, hw); + if (rv) { + csio_err(hw, "Failed to allocate interrupt line.\n"); + goto out_free_irqs; + } + + goto out; + } + + /* Add the MSIX vector descriptions */ + csio_add_msix_desc(hw); + + rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0, + entryp[k].desc, hw); + if (rv) { + csio_err(hw, "IRQ request failed for vec %d err:%d\n", + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; + } + + entryp[k++].dev_id = hw; + + rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0, + entryp[k].desc, hw); + if (rv) { + csio_err(hw, "IRQ request failed for vec %d err:%d\n", + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; + } + + entryp[k++].dev_id = (void *)hw; + + /* Allocate IRQs for SCSI */ + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + for (j = 0; j < info->max_cpus; j++, k++) { + struct csio_scsi_qset *sqset = &hw->sqset[i][j]; + struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; + + rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0, + entryp[k].desc, q); + if (rv) { + csio_err(hw, + "IRQ request failed for vec %d err:%d\n", + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; + } + + entryp[k].dev_id = q; + + } /* for all scsi cpus */ + } /* for all ports */ + +out: + hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; + return 0; + +out_free_irqs: + for (i = 0; i < k; i++) + free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id); + pci_free_irq_vectors(hw->pdev); + return -EINVAL; +} + +/* Reduce per-port max possible CPUs */ +static void +csio_reduce_sqsets(struct csio_hw *hw, int cnt) +{ + int i; + struct csio_scsi_cpu_info *info; + + while (cnt < hw->num_sqsets) { + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + if (info->max_cpus > 1) { + info->max_cpus--; + hw->num_sqsets--; + if (hw->num_sqsets <= cnt) + break; + } + } + } + + csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets); +} + +static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs) +{ + struct csio_hw *hw = affd->priv; + u8 i; + + if (!nvecs) + return; + + if (nvecs < hw->num_pports) { + affd->nr_sets = 1; + affd->set_size[0] = nvecs; + return; + } + + affd->nr_sets = hw->num_pports; + for (i = 0; i < hw->num_pports; i++) + affd->set_size[i] = nvecs / hw->num_pports; +} + +static int +csio_enable_msix(struct csio_hw *hw) +{ + int i, j, k, n, min, cnt; + int extra = CSIO_EXTRA_VECS; + struct csio_scsi_cpu_info *info; + struct irq_affinity desc = { + .pre_vectors = CSIO_EXTRA_VECS, + .calc_sets = csio_calc_sets, + .priv = hw, + }; + + if (hw->num_pports > IRQ_AFFINITY_MAX_SETS) + return -ENOSPC; + + min = hw->num_pports + extra; + cnt = hw->num_sqsets + extra; + + /* Max vectors required based on #niqs configured in fw */ + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) + cnt = min_t(uint8_t, hw->cfg_niq, cnt); + + csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); + + cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); + if (cnt < 0) + return cnt; + + if (cnt < (hw->num_sqsets + extra)) { + csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); + csio_reduce_sqsets(hw, cnt - extra); + } + + /* Distribute vectors */ + k = 0; + csio_set_nondata_intr_idx(hw, k); + csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++); + csio_set_fwevt_intr_idx(hw, k++); + + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + + for (j = 0; j < hw->num_scsi_msix_cpus; j++) { + n = (j % info->max_cpus) + k; + hw->sqset[i][j].intr_idx = n; + } + + k += info->max_cpus; + } + + return 0; +} + +void +csio_intr_enable(struct csio_hw *hw) +{ + hw->intr_mode = CSIO_IM_NONE; + hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; + + /* Try MSIX, then MSI or fall back to INTx */ + if ((csio_msi == 2) && !csio_enable_msix(hw)) + hw->intr_mode = CSIO_IM_MSIX; + else { + /* Max iqs required based on #niqs configured in fw */ + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || + !csio_is_hw_master(hw)) { + int extra = CSIO_EXTRA_MSI_IQS; + + if (hw->cfg_niq < (hw->num_sqsets + extra)) { + csio_dbg(hw, "Reducing sqsets to %d\n", + hw->cfg_niq - extra); + csio_reduce_sqsets(hw, hw->cfg_niq - extra); + } + } + + if ((csio_msi == 1) && !pci_enable_msi(hw->pdev)) + hw->intr_mode = CSIO_IM_MSI; + else + hw->intr_mode = CSIO_IM_INTX; + } + + csio_dbg(hw, "Using %s interrupt mode.\n", + (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" : + ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx")); +} + +void +csio_intr_disable(struct csio_hw *hw, bool free) +{ + csio_hw_intr_disable(hw); + + if (free) { + int i; + + switch (hw->intr_mode) { + case CSIO_IM_MSIX: + for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) { + free_irq(pci_irq_vector(hw->pdev, i), + hw->msix_entries[i].dev_id); + } + break; + case CSIO_IM_MSI: + case CSIO_IM_INTX: + free_irq(pci_irq_vector(hw->pdev, 0), hw); + break; + default: + break; + } + } + + pci_free_irq_vectors(hw->pdev); + hw->intr_mode = CSIO_IM_NONE; + hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; +} diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c new file mode 100644 index 000000000..d5ac93897 --- /dev/null +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -0,0 +1,2152 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "csio_hw.h" +#include "csio_mb.h" +#include "csio_lnode.h" +#include "csio_rnode.h" + +int csio_fcoe_rnodes = 1024; +int csio_fdmi_enable = 1; + +#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1) + +/* Lnode SM declarations */ +static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev); +static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev); +static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev); +static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev); + +static int csio_ln_mgmt_submit_req(struct csio_ioreq *, + void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), + enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t); + +/* LN event mapping */ +static enum csio_ln_ev fwevt_to_lnevt[] = { + CSIO_LNE_NONE, /* None */ + CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */ + CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */ + CSIO_LNE_NONE, /* PLOGI_RCVD */ + CSIO_LNE_NONE, /* PLOGO_RCVD */ + CSIO_LNE_NONE, /* PRLI_ACC_RCVD */ + CSIO_LNE_NONE, /* PRLI_RJT_RCVD */ + CSIO_LNE_NONE, /* PRLI_RCVD */ + CSIO_LNE_NONE, /* PRLO_RCVD */ + CSIO_LNE_NONE, /* NPORT_ID_CHGD */ + CSIO_LNE_LOGO, /* FLOGO_RCVD */ + CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */ + CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */ + CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */ + CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */ + CSIO_LNE_NONE, /* FDISC_RJT_RCVD */ + CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */ + CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */ + CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */ + CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ + CSIO_LNE_NONE, /* PRLI_TMO */ + CSIO_LNE_NONE, /* ADISC_TMO */ + CSIO_LNE_NONE, /* RSCN_DEV_LOST */ + CSIO_LNE_NONE, /* SCR_ACC_RCVD */ + CSIO_LNE_NONE, /* ADISC_RJT_RCVD */ + CSIO_LNE_NONE, /* LOGO_SNT */ + CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */ +}; + +#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ + CSIO_LNE_NONE : \ + fwevt_to_lnevt[_evt]) + +#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd) +#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason) +#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan) +#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN)) + +/* + * csio_ln_match_by_portid - lookup lnode using given portid. + * @hw: HW module + * @portid: port-id. + * + * If found, returns lnode matching given portid otherwise returns NULL. + */ +static struct csio_lnode * +csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) +{ + struct csio_lnode *ln; + struct list_head *tmp; + + /* Match siblings lnode with portid */ + list_for_each(tmp, &hw->sln_head) { + ln = (struct csio_lnode *) tmp; + if (ln->portid == portid) + return ln; + } + + return NULL; +} + +/* + * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id. + * @hw - HW module + * @vnpi - vnp index. + * Returns - If found, returns lnode matching given vnp id + * otherwise returns NULL. + */ +static struct csio_lnode * +csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id) +{ + struct list_head *tmp1, *tmp2; + struct csio_lnode *sln = NULL, *cln = NULL; + + if (list_empty(&hw->sln_head)) { + CSIO_INC_STATS(hw, n_lnlkup_miss); + return NULL; + } + /* Traverse sibling lnodes */ + list_for_each(tmp1, &hw->sln_head) { + sln = (struct csio_lnode *) tmp1; + + /* Match sibling lnode */ + if (sln->vnp_flowid == vnp_id) + return sln; + + if (list_empty(&sln->cln_head)) + continue; + + /* Traverse children lnodes */ + list_for_each(tmp2, &sln->cln_head) { + cln = (struct csio_lnode *) tmp2; + + if (cln->vnp_flowid == vnp_id) + return cln; + } + } + CSIO_INC_STATS(hw, n_lnlkup_miss); + return NULL; +} + +/** + * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn. + * @hw: HW module. + * @wwpn: WWPN. + * + * If found, returns lnode matching given wwpn, returns NULL otherwise. + */ +struct csio_lnode * +csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn) +{ + struct list_head *tmp1, *tmp2; + struct csio_lnode *sln = NULL, *cln = NULL; + + if (list_empty(&hw->sln_head)) { + CSIO_INC_STATS(hw, n_lnlkup_miss); + return NULL; + } + /* Traverse sibling lnodes */ + list_for_each(tmp1, &hw->sln_head) { + sln = (struct csio_lnode *) tmp1; + + /* Match sibling lnode */ + if (!memcmp(csio_ln_wwpn(sln), wwpn, 8)) + return sln; + + if (list_empty(&sln->cln_head)) + continue; + + /* Traverse children lnodes */ + list_for_each(tmp2, &sln->cln_head) { + cln = (struct csio_lnode *) tmp2; + + if (!memcmp(csio_ln_wwpn(cln), wwpn, 8)) + return cln; + } + } + return NULL; +} + +/* FDMI */ +static void +csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op) +{ + struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf; + cmd->ct_rev = FC_CT_REV; + cmd->ct_fs_type = type; + cmd->ct_fs_subtype = sub_type; + cmd->ct_cmd = htons(op); +} + +static int +csio_hostname(uint8_t *buf, size_t buf_len) +{ + if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0) + return 0; + return -1; +} + +static int +csio_osname(uint8_t *buf, size_t buf_len) +{ + if (snprintf(buf, buf_len, "%s %s %s", + init_utsname()->sysname, + init_utsname()->release, + init_utsname()->version) > 0) + return 0; + + return -1; +} + +static inline void +csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len) +{ + uint16_t len; + struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; + + if (WARN_ON(val_len > U16_MAX)) + return; + + len = val_len; + + ae->type = htons(type); + len += 4; /* includes attribute type and length */ + len = (len + 3) & ~3; /* should be multiple of 4 bytes */ + ae->len = htons(len); + memcpy(ae->value, val, val_len); + if (len > val_len) + memset(ae->value + val_len, 0, len - val_len); + *ptr += len; +} + +/* + * csio_ln_fdmi_done - FDMI registeration completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + void *cmd; + struct csio_lnode *ln = fdmi_req->lnode; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } +} + +/* + * csio_ln_fdmi_rhba_cbfn - RHBA completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + void *cmd; + uint8_t *pld; + uint32_t len = 0; + __be32 val; + __be16 mfs; + uint32_t numattrs = 0; + struct csio_lnode *ln = fdmi_req->lnode; + struct fs_fdmi_attrs *attrib_blk; + struct fc_fdmi_port_name *port_name; + uint8_t buf[64]; + uint8_t *fc4_type; + unsigned long flags; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } + + if (!csio_is_rnode_ready(fdmi_req->rnode)) { + CSIO_INC_STATS(ln, n_fdmi_err); + return; + } + + /* Prepare CT hdr for RPA cmd */ + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA); + + /* Prepare RPA payload */ + pld = (uint8_t *)csio_ct_get_pld(cmd); + port_name = (struct fc_fdmi_port_name *)pld; + memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); + pld += sizeof(*port_name); + + /* Start appending Port attributes */ + attrib_blk = (struct fs_fdmi_attrs *)pld; + attrib_blk->numattrs = 0; + len += sizeof(attrib_blk->numattrs); + pld += sizeof(attrib_blk->numattrs); + + fc4_type = &buf[0]; + memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); + fc4_type[2] = 1; + fc4_type[7] = 1; + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES, + fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); + numattrs++; + val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, + &val, + FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); + numattrs++; + + if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G) + val = htonl(FC_PORTSPEED_1GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G) + val = htonl(FC_PORTSPEED_10GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G) + val = htonl(FC_PORTSPEED_25GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G) + val = htonl(FC_PORTSPEED_40GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G) + val = htonl(FC_PORTSPEED_50GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G) + val = htonl(FC_PORTSPEED_100GBIT); + else + val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, + &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); + numattrs++; + + mfs = ln->ln_sparm.csp.sp_bb_data; + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, + &mfs, sizeof(mfs)); + numattrs++; + + strcpy(buf, "csiostor"); + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, + strlen(buf)); + numattrs++; + + if (!csio_hostname(buf, sizeof(buf))) { + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, + buf, strlen(buf)); + numattrs++; + } + attrib_blk->numattrs = htonl(numattrs); + len = (uint32_t)(pld - (uint8_t *)cmd); + + /* Submit FDMI RPA request */ + spin_lock_irqsave(&hw->lock, flags); + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +/* + * csio_ln_fdmi_dprt_cbfn - DPRT completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + void *cmd; + uint8_t *pld; + uint32_t len = 0; + uint32_t numattrs = 0; + __be32 maxpayload = htonl(65536); + struct fc_fdmi_hba_identifier *hbaid; + struct csio_lnode *ln = fdmi_req->lnode; + struct fc_fdmi_rpl *reg_pl; + struct fs_fdmi_attrs *attrib_blk; + uint8_t buf[64]; + unsigned long flags; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + if (!csio_is_rnode_ready(fdmi_req->rnode)) { + CSIO_INC_STATS(ln, n_fdmi_err); + return; + } + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } + + /* Prepare CT hdr for RHBA cmd */ + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA); + len = FC_CT_HDR_LEN; + + /* Prepare RHBA payload */ + pld = (uint8_t *)csio_ct_get_pld(cmd); + hbaid = (struct fc_fdmi_hba_identifier *)pld; + memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */ + pld += sizeof(*hbaid); + + /* Register one port per hba */ + reg_pl = (struct fc_fdmi_rpl *)pld; + reg_pl->numport = htonl(1); + memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8); + pld += sizeof(*reg_pl); + + /* Start appending HBA attributes hba */ + attrib_blk = (struct fs_fdmi_attrs *)pld; + attrib_blk->numattrs = 0; + len += sizeof(attrib_blk->numattrs); + pld += sizeof(attrib_blk->numattrs); + + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln), + FC_FDMI_HBA_ATTR_NODENAME_LEN); + numattrs++; + + memset(buf, 0, sizeof(buf)); + + strcpy(buf, "Chelsio Communications"); + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, + strlen(buf)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, + hw->vpd.sn, sizeof(hw->vpd.sn)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, + sizeof(hw->vpd.id)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, + hw->model_desc, strlen(hw->model_desc)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, + hw->hw_ver, sizeof(hw->hw_ver)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, + hw->fwrev_str, strlen(hw->fwrev_str)); + numattrs++; + + if (!csio_osname(buf, sizeof(buf))) { + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, + buf, strlen(buf)); + numattrs++; + } + + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, + &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); + len = (uint32_t)(pld - (uint8_t *)cmd); + numattrs++; + attrib_blk->numattrs = htonl(numattrs); + + /* Submit FDMI RHBA request */ + spin_lock_irqsave(&hw->lock, flags); + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +/* + * csio_ln_fdmi_dhba_cbfn - DHBA completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + struct csio_lnode *ln = fdmi_req->lnode; + void *cmd; + struct fc_fdmi_port_name *port_name; + uint32_t len; + unsigned long flags; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + if (!csio_is_rnode_ready(fdmi_req->rnode)) { + CSIO_INC_STATS(ln, n_fdmi_err); + return; + } + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } + + /* Send FDMI cmd to de-register any Port attributes if registered + * before + */ + + /* Prepare FDMI DPRT cmd */ + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT); + len = FC_CT_HDR_LEN; + port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd); + memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); + len += sizeof(*port_name); + + /* Submit FDMI request */ + spin_lock_irqsave(&hw->lock, flags); + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +/** + * csio_ln_fdmi_start - Start an FDMI request. + * @ln: lnode + * @context: session context + * + * Issued with lock held. + */ +int +csio_ln_fdmi_start(struct csio_lnode *ln, void *context) +{ + struct csio_ioreq *fdmi_req; + struct csio_rnode *fdmi_rn = (struct csio_rnode *)context; + void *cmd; + struct fc_fdmi_hba_identifier *hbaid; + uint32_t len; + + if (!(ln->flags & CSIO_LNF_FDMI_ENABLE)) + return -EPROTONOSUPPORT; + + if (!csio_is_rnode_ready(fdmi_rn)) + CSIO_INC_STATS(ln, n_fdmi_err); + + /* Send FDMI cmd to de-register any HBA attributes if registered + * before + */ + + fdmi_req = ln->mgmt_req; + fdmi_req->lnode = ln; + fdmi_req->rnode = fdmi_rn; + + /* Prepare FDMI DHBA cmd */ + cmd = fdmi_req->dma_buf.vaddr; + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA); + len = FC_CT_HDR_LEN; + + hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd); + memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); + len += sizeof(*hbaid); + + /* Submit FDMI request */ + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n"); + } + + return 0; +} + +/* + * csio_ln_vnp_read_cbfn - vnp read completion handler. + * @hw: HW lnode + * @cbfn: Completion handler. + * + * Reads vnp response and updates ln parameters. + */ +static void +csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) +{ + struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv); + struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); + struct fc_els_csp *csp; + struct fc_els_cssp *clsp; + enum fw_retval retval; + __be32 nport_id = 0; + + retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); + if (retval != FW_SUCCESS) { + csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); + mempool_free(mbp, hw->mb_mempool); + return; + } + + spin_lock_irq(&hw->lock); + + memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac)); + memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3); + ln->nport_id = ntohl(nport_id); + ln->nport_id = ln->nport_id >> 8; + + /* Update WWNs */ + /* + * This may look like a duplication of what csio_fcoe_enable_link() + * does, but is absolutely necessary if the vnpi changes between + * a FCOE LINK UP and FCOE LINK DOWN. + */ + memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); + memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); + + /* Copy common sparam */ + csp = (struct fc_els_csp *)rsp->cmn_srv_parms; + ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver; + ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver; + ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred; + ln->ln_sparm.csp.sp_features = csp->sp_features; + ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data; + ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov; + ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov; + + /* Copy word 0 & word 1 of class sparam */ + clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1; + ln->ln_sparm.clsp[2].cp_class = clsp->cp_class; + ln->ln_sparm.clsp[2].cp_init = clsp->cp_init; + ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip; + ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs; + + spin_unlock_irq(&hw->lock); + + mempool_free(mbp, hw->mb_mempool); + + /* Send an event to update local attribs */ + csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE); +} + +/* + * csio_ln_vnp_read - Read vnp params. + * @ln: lnode + * @cbfn: Completion handler. + * + * Issued with lock held. + */ +static int +csio_ln_vnp_read(struct csio_lnode *ln, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_hw *hw = ln->hwp; + struct csio_mb *mbp; + + /* Allocate Mbox request */ + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Prepare VNP Command */ + csio_fcoe_vnp_read_init_mb(ln, mbp, + CSIO_MB_DEFAULT_TMO, + ln->fcf_flowid, + ln->vnp_flowid, + cbfn); + + /* Issue MBOX cmd */ + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Failed to issue mbox FCoE VNP command\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + return 0; +} + +/* + * csio_fcoe_enable_link - Enable fcoe link. + * @ln: lnode + * @enable: enable/disable + * Issued with lock held. + * Issues mbox cmd to bring up FCOE link on port associated with given ln. + */ +static int +csio_fcoe_enable_link(struct csio_lnode *ln, bool enable) +{ + struct csio_hw *hw = ln->hwp; + struct csio_mb *mbp; + enum fw_retval retval; + uint8_t portid; + uint8_t sub_op; + struct fw_fcoe_link_cmd *lcmd; + int i; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + portid = ln->portid; + sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN; + + csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n", + sub_op ? "UP" : "DOWN", portid); + + csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + portid, sub_op, 0, 0, 0, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, + "FCOE LINK %s cmd on port[%d] failed with " + "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + if (!enable) + goto out; + + lcmd = (struct fw_fcoe_link_cmd *)mbp->mb; + + memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8); + memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8); + + for (i = 0; i < CSIO_MAX_PPORTS; i++) + if (hw->pport[i].portid == portid) + memcpy(hw->pport[i].mac, lcmd->phy_mac, 6); + +out: + mempool_free(mbp, hw->mb_mempool); + return 0; +} + +/* + * csio_ln_read_fcf_cbfn - Read fcf parameters + * @ln: lnode + * + * read fcf response and Update ln fcf information. + */ +static void +csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp) +{ + struct csio_lnode *ln = (struct csio_lnode *)mbp->priv; + struct csio_fcf_info *fcf_info; + struct fw_fcoe_fcf_cmd *rsp = + (struct fw_fcoe_fcf_cmd *)(mbp->mb); + enum fw_retval retval; + + retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + if (retval != FW_SUCCESS) { + csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return; + } + + spin_lock_irq(&hw->lock); + fcf_info = ln->fcfinfo; + fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET( + ntohs(rsp->priority_pkd)); + fcf_info->vf_id = ntohs(rsp->vf_id); + fcf_info->vlan_id = rsp->vlan_id; + fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size); + fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv); + fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi)); + fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid); + fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid); + fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid); + fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid); + memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map)); + memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac)); + memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id)); + memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric)); + memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac)); + + spin_unlock_irq(&hw->lock); + + mempool_free(mbp, hw->mb_mempool); +} + +/* + * csio_ln_read_fcf_entry - Read fcf entry. + * @ln: lnode + * @cbfn: Completion handler. + * + * Issued with lock held. + */ +static int +csio_ln_read_fcf_entry(struct csio_lnode *ln, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_hw *hw = ln->hwp; + struct csio_mb *mbp; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get FCoE FCF information */ + csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + ln->portid, ln->fcf_flowid, cbfn); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FCOE FCF cmd\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + return 0; +} + +/* + * csio_handle_link_up - Logical Linkup event. + * @hw - HW module. + * @portid - Physical port number + * @fcfi - FCF index. + * @vnpi - VNP index. + * Returns - none. + * + * This event is received from FW, when virtual link is established between + * Physical port[ENode] and FCF. If its new vnpi, then local node object is + * created on this FCF and set to [ONLINE] state. + * Lnode waits for FW_RDEV_CMD event to be received indicating that + * Fabric login is completed and lnode moves to [READY] state. + * + * This called with hw lock held + */ +static void +csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, + uint32_t vnpi) +{ + struct csio_lnode *ln = NULL; + + /* Lookup lnode based on vnpi */ + ln = csio_ln_lookup_by_vnpi(hw, vnpi); + if (!ln) { + /* Pick lnode based on portid */ + ln = csio_ln_lookup_by_portid(hw, portid); + if (!ln) { + csio_err(hw, "failed to lookup fcoe lnode on port:%d\n", + portid); + CSIO_DB_ASSERT(0); + return; + } + + /* Check if lnode has valid vnp flowid */ + if (ln->vnp_flowid != CSIO_INVALID_IDX) { + /* New VN-Port */ + spin_unlock_irq(&hw->lock); + csio_lnode_alloc(hw); + spin_lock_irq(&hw->lock); + if (!ln) { + csio_err(hw, + "failed to allocate fcoe lnode" + "for port:%d vnpi:x%x\n", + portid, vnpi); + CSIO_DB_ASSERT(0); + return; + } + ln->portid = portid; + } + ln->vnp_flowid = vnpi; + ln->dev_num &= ~0xFFFF; + ln->dev_num |= vnpi; + } + + /*Initialize fcfi */ + ln->fcf_flowid = fcfi; + + csio_info(hw, "Port:%d - FCOE LINK UP\n", portid); + + CSIO_INC_STATS(ln, n_link_up); + + /* Send LINKUP event to SM */ + csio_post_event(&ln->sm, CSIO_LNE_LINKUP); +} + +/* + * csio_post_event_rns + * @ln - FCOE lnode + * @evt - Given rnode event + * Returns - none + * + * Posts given rnode event to all FCOE rnodes connected with given Lnode. + * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE + * event. + * + * This called with hw lock held + */ +static void +csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp, *next; + struct csio_rnode *rn; + + list_for_each_safe(tmp, next, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + csio_post_event(&rn->sm, evt); + } +} + +/* + * csio_cleanup_rns + * @ln - FCOE lnode + * Returns - none + * + * Frees all FCOE rnodes connected with given Lnode. + * + * This called with hw lock held + */ +static void +csio_cleanup_rns(struct csio_lnode *ln) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp, *next_rn; + struct csio_rnode *rn; + + list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + csio_put_rnode(ln, rn); + } + +} + +/* + * csio_post_event_lns + * @ln - FCOE lnode + * @evt - Given lnode event + * Returns - none + * + * Posts given lnode event to all FCOE lnodes connected with given Lnode. + * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE + * event. + * + * This called with hw lock held + */ +static void +csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct list_head *tmp; + struct csio_lnode *cln, *sln; + + /* If NPIV lnode, send evt only to that and return */ + if (csio_is_npiv_ln(ln)) { + csio_post_event(&ln->sm, evt); + return; + } + + sln = ln; + /* Traverse children lnodes list and send evt */ + list_for_each(tmp, &sln->cln_head) { + cln = (struct csio_lnode *) tmp; + csio_post_event(&cln->sm, evt); + } + + /* Send evt to parent lnode */ + csio_post_event(&ln->sm, evt); +} + +/* + * csio_ln_down - Lcoal nport is down + * @ln - FCOE Lnode + * Returns - none + * + * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes. + * + * This called with hw lock held + */ +static void +csio_ln_down(struct csio_lnode *ln) +{ + csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN); +} + +/* + * csio_handle_link_down - Logical Linkdown event. + * @hw - HW module. + * @portid - Physical port number + * @fcfi - FCF index. + * @vnpi - VNP index. + * Returns - none + * + * This event is received from FW, when virtual link goes down between + * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on + * this vnpi[VN-Port] will be de-instantiated. + * + * This called with hw lock held + */ +static void +csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, + uint32_t vnpi) +{ + struct csio_fcf_info *fp; + struct csio_lnode *ln; + + /* Lookup lnode based on vnpi */ + ln = csio_ln_lookup_by_vnpi(hw, vnpi); + if (ln) { + fp = ln->fcfinfo; + CSIO_INC_STATS(ln, n_link_down); + + /*Warn if linkdown received if lnode is not in ready state */ + if (!csio_is_lnode_ready(ln)) { + csio_ln_warn(ln, + "warn: FCOE link is already in offline " + "Ignoring Fcoe linkdown event on portid %d\n", + portid); + CSIO_INC_STATS(ln, n_evt_drop); + return; + } + + /* Verify portid */ + if (fp->portid != portid) { + csio_ln_warn(ln, + "warn: FCOE linkdown recv with " + "invalid port %d\n", portid); + CSIO_INC_STATS(ln, n_evt_drop); + return; + } + + /* verify fcfi */ + if (ln->fcf_flowid != fcfi) { + csio_ln_warn(ln, + "warn: FCOE linkdown recv with " + "invalid fcfi x%x\n", fcfi); + CSIO_INC_STATS(ln, n_evt_drop); + return; + } + + csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid); + + /* Send LINK_DOWN event to lnode s/m */ + csio_ln_down(ln); + + return; + } else { + csio_warn(hw, + "warn: FCOE linkdown recv with invalid vnpi x%x\n", + vnpi); + CSIO_INC_STATS(hw, n_evt_drop); + } +} + +/* + * csio_is_lnode_ready - Checks FCOE lnode is in ready state. + * @ln: Lnode module + * + * Returns True if FCOE lnode is in ready state. + */ +int +csio_is_lnode_ready(struct csio_lnode *ln) +{ + return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); +} + +/*****************************************************************************/ +/* START: Lnode SM */ +/*****************************************************************************/ +/* + * csio_lns_uninit - The request in uninit state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "uninit" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_lnode *rln = hw->rln; + int rv; + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_LINKUP: + csio_set_state(&ln->sm, csio_lns_online); + /* Read FCF only for physical lnode */ + if (csio_is_phys_ln(ln)) { + rv = csio_ln_read_fcf_entry(ln, + csio_ln_read_fcf_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + break; + } + + /* Add FCF record */ + list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); + } + + rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + } + break; + + case CSIO_LNE_DOWN_LINK: + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[uninit].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + break; + } /* switch event */ +} + +/* + * csio_lns_online - The request in online state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "online" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_LINKUP: + csio_ln_warn(ln, + "warn: FCOE link is up already " + "Ignoring linkup on port:%d\n", ln->portid); + CSIO_INC_STATS(ln, n_evt_drop); + break; + + case CSIO_LNE_FAB_INIT_DONE: + csio_set_state(&ln->sm, csio_lns_ready); + + spin_unlock_irq(&hw->lock); + csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP); + spin_lock_irq(&hw->lock); + + break; + + case CSIO_LNE_LINK_DOWN: + case CSIO_LNE_DOWN_LINK: + csio_set_state(&ln->sm, csio_lns_uninit); + if (csio_is_phys_ln(ln)) { + /* Remove FCF entry */ + list_del_init(&ln->fcfinfo->list); + } + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[uninit].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + + break; + } /* switch event */ +} + +/* + * csio_lns_ready - The request in ready state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "ready" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_FAB_INIT_DONE: + csio_ln_dbg(ln, + "ignoring event %d recv from did x%x" + "in ln state[ready].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_drop); + break; + + case CSIO_LNE_LINK_DOWN: + csio_set_state(&ln->sm, csio_lns_offline); + csio_post_event_rns(ln, CSIO_RNFE_DOWN); + + spin_unlock_irq(&hw->lock); + csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); + spin_lock_irq(&hw->lock); + + if (csio_is_phys_ln(ln)) { + /* Remove FCF entry */ + list_del_init(&ln->fcfinfo->list); + } + break; + + case CSIO_LNE_DOWN_LINK: + csio_set_state(&ln->sm, csio_lns_offline); + csio_post_event_rns(ln, CSIO_RNFE_DOWN); + + /* Host need to issue aborts in case if FW has not returned + * WRs with status "ABORTED" + */ + spin_unlock_irq(&hw->lock); + csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); + spin_lock_irq(&hw->lock); + + if (csio_is_phys_ln(ln)) { + /* Remove FCF entry */ + list_del_init(&ln->fcfinfo->list); + } + break; + + case CSIO_LNE_CLOSE: + csio_set_state(&ln->sm, csio_lns_uninit); + csio_post_event_rns(ln, CSIO_RNFE_CLOSE); + break; + + case CSIO_LNE_LOGO: + csio_set_state(&ln->sm, csio_lns_offline); + csio_post_event_rns(ln, CSIO_RNFE_DOWN); + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[uninit].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + CSIO_DB_ASSERT(0); + break; + } /* switch event */ +} + +/* + * csio_lns_offline - The request in offline state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "offline" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_lnode *rln = hw->rln; + int rv; + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_LINKUP: + csio_set_state(&ln->sm, csio_lns_online); + /* Read FCF only for physical lnode */ + if (csio_is_phys_ln(ln)) { + rv = csio_ln_read_fcf_entry(ln, + csio_ln_read_fcf_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + break; + } + + /* Add FCF record */ + list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); + } + + rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + } + break; + + case CSIO_LNE_LINK_DOWN: + case CSIO_LNE_DOWN_LINK: + case CSIO_LNE_LOGO: + csio_ln_dbg(ln, + "ignoring event %d recv from did x%x" + "in ln state[offline].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_drop); + break; + + case CSIO_LNE_CLOSE: + csio_set_state(&ln->sm, csio_lns_uninit); + csio_post_event_rns(ln, CSIO_RNFE_CLOSE); + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[offline]\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + CSIO_DB_ASSERT(0); + break; + } /* switch event */ +} + +/*****************************************************************************/ +/* END: Lnode SM */ +/*****************************************************************************/ + +static void +csio_free_fcfinfo(struct kref *kref) +{ + struct csio_fcf_info *fcfinfo = container_of(kref, + struct csio_fcf_info, kref); + kfree(fcfinfo); +} + +/* Helper routines for attributes */ +/* + * csio_lnode_state_to_str - Get current state of FCOE lnode. + * @ln - lnode + * @str - state of lnode. + * + */ +void +csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) +{ + if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { + strcpy(str, "UNINIT"); + return; + } + if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { + strcpy(str, "READY"); + return; + } + if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { + strcpy(str, "OFFLINE"); + return; + } + strcpy(str, "UNKNOWN"); +} /* csio_lnode_state_to_str */ + + +int +csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid, + struct fw_fcoe_port_stats *port_stats) +{ + struct csio_mb *mbp; + struct fw_fcoe_port_cmd_params portparams; + enum fw_retval retval; + int idx; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + csio_err(hw, "FCoE FCF PARAMS command out of memory!\n"); + return -EINVAL; + } + portparams.portid = portid; + + for (idx = 1; idx <= 3; idx++) { + portparams.idx = (idx-1)*6 + 1; + portparams.nstats = 6; + if (idx == 3) + portparams.nstats = 4; + csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, + &portparams, NULL); + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FCoE port params failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + csio_mb_process_portparams_rsp(hw, mbp, &retval, + &portparams, port_stats); + } + + mempool_free(mbp, hw->mb_mempool); + return 0; +} + +/* + * csio_ln_mgmt_wr_handler -Mgmt Work Request handler. + * @wr - WR. + * @len - WR len. + * This handler is invoked when an outstanding mgmt WR is completed. + * Its invoked in the context of FW event worker thread for every + * mgmt event received. + * Return - none. + */ + +static void +csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len) +{ + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + struct csio_ioreq *io_req = NULL; + struct fw_fcoe_els_ct_wr *wr_cmd; + + + wr_cmd = (struct fw_fcoe_els_ct_wr *) wr; + + if (len < sizeof(struct fw_fcoe_els_ct_wr)) { + csio_err(mgmtm->hw, + "Invalid ELS CT WR length recvd, len:%x\n", len); + mgmtm->stats.n_err++; + return; + } + + io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); + io_req->wr_status = csio_wr_status(wr_cmd); + + /* lookup ioreq exists in our active Q */ + spin_lock_irq(&hw->lock); + if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { + csio_err(mgmtm->hw, + "Error- Invalid IO handle recv in WR. handle: %p\n", + io_req); + mgmtm->stats.n_err++; + spin_unlock_irq(&hw->lock); + return; + } + + mgmtm = csio_hw_to_mgmtm(hw); + + /* Dequeue from active queue */ + list_del_init(&io_req->sm.sm_list); + mgmtm->stats.n_active--; + spin_unlock_irq(&hw->lock); + + /* io_req will be freed by completion handler */ + if (io_req->io_cbfn) + io_req->io_cbfn(hw, io_req); +} + +/** + * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events. + * @hw: HW module + * @cpl_op: CPL opcode + * @cmd: FW cmd/WR. + * + * Process received FCoE cmd/WR event from FW. + */ +void +csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd) +{ + struct csio_lnode *ln; + struct csio_rnode *rn; + uint8_t portid, opcode = *(uint8_t *)cmd; + struct fw_fcoe_link_cmd *lcmd; + struct fw_wr_hdr *wr; + struct fw_rdev_wr *rdev_wr; + enum fw_fcoe_link_status lstatus; + uint32_t fcfi, rdev_flowid, vnpi; + enum csio_ln_ev evt; + + if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) { + + lcmd = (struct fw_fcoe_link_cmd *)cmd; + lstatus = lcmd->lstatus; + portid = FW_FCOE_LINK_CMD_PORTID_GET( + ntohl(lcmd->op_to_portid)); + fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi)); + vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd)); + + if (lstatus == FCOE_LINKUP) { + + /* HW lock here */ + spin_lock_irq(&hw->lock); + csio_handle_link_up(hw, portid, fcfi, vnpi); + spin_unlock_irq(&hw->lock); + /* HW un lock here */ + + } else if (lstatus == FCOE_LINKDOWN) { + + /* HW lock here */ + spin_lock_irq(&hw->lock); + csio_handle_link_down(hw, portid, fcfi, vnpi); + spin_unlock_irq(&hw->lock); + /* HW un lock here */ + } else { + csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n", + lcmd->lstatus); + CSIO_INC_STATS(hw, n_cpl_unexp); + } + } else if (cpl_op == CPL_FW6_PLD) { + wr = (struct fw_wr_hdr *) (cmd + 4); + if (FW_WR_OP_G(be32_to_cpu(wr->hi)) + == FW_RDEV_WR) { + + rdev_wr = (struct fw_rdev_wr *) (cmd + 4); + + rdev_flowid = FW_RDEV_WR_FLOWID_GET( + ntohl(rdev_wr->alloc_to_len16)); + vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET( + ntohl(rdev_wr->flags_to_assoc_flowid)); + + csio_dbg(hw, + "FW_RDEV_WR: flowid:x%x ev_cause:x%x " + "vnpi:0x%x\n", rdev_flowid, + rdev_wr->event_cause, vnpi); + + if (rdev_wr->protocol != PROT_FCOE) { + csio_err(hw, + "FW_RDEV_WR: invalid proto:x%x " + "received with flowid:x%x\n", + rdev_wr->protocol, + rdev_flowid); + CSIO_INC_STATS(hw, n_evt_drop); + return; + } + + /* HW lock here */ + spin_lock_irq(&hw->lock); + ln = csio_ln_lookup_by_vnpi(hw, vnpi); + if (!ln) { + csio_err(hw, + "FW_DEV_WR: invalid vnpi:x%x received " + "with flowid:x%x\n", vnpi, rdev_flowid); + CSIO_INC_STATS(hw, n_evt_drop); + goto out_pld; + } + + rn = csio_confirm_rnode(ln, rdev_flowid, + &rdev_wr->u.fcoe_rdev); + if (!rn) { + csio_ln_dbg(ln, + "Failed to confirm rnode " + "for flowid:x%x\n", rdev_flowid); + CSIO_INC_STATS(hw, n_evt_drop); + goto out_pld; + } + + /* save previous event for debugging */ + ln->prev_evt = ln->cur_evt; + ln->cur_evt = rdev_wr->event_cause; + CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]); + + /* Translate all the fabric events to lnode SM events */ + evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause); + if (evt) { + csio_ln_dbg(ln, + "Posting event to lnode event:%d " + "cause:%d flowid:x%x\n", evt, + rdev_wr->event_cause, rdev_flowid); + csio_post_event(&ln->sm, evt); + } + + /* Handover event to rn SM here. */ + csio_rnode_fwevt_handler(rn, rdev_wr->event_cause); +out_pld: + spin_unlock_irq(&hw->lock); + return; + } else { + csio_warn(hw, "unexpected WR op(0x%x) recv\n", + FW_WR_OP_G(be32_to_cpu((wr->hi)))); + CSIO_INC_STATS(hw, n_cpl_unexp); + } + } else if (cpl_op == CPL_FW6_MSG) { + wr = (struct fw_wr_hdr *) (cmd); + if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { + csio_ln_mgmt_wr_handler(hw, wr, + sizeof(struct fw_fcoe_els_ct_wr)); + } else { + csio_warn(hw, "unexpected WR op(0x%x) recv\n", + FW_WR_OP_G(be32_to_cpu((wr->hi)))); + CSIO_INC_STATS(hw, n_cpl_unexp); + } + } else { + csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode); + CSIO_INC_STATS(hw, n_cpl_unexp); + } +} + +/** + * csio_lnode_start - Kickstart lnode discovery. + * @ln: lnode + * + * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command. + */ +int +csio_lnode_start(struct csio_lnode *ln) +{ + int rv = 0; + if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) { + rv = csio_fcoe_enable_link(ln, 1); + ln->flags |= CSIO_LNF_LINK_ENABLE; + } + + return rv; +} + +/** + * csio_lnode_stop - Stop the lnode. + * @ln: lnode + * + * This routine is invoked by HW module to stop lnode and its associated NPIV + * lnodes. + */ +void +csio_lnode_stop(struct csio_lnode *ln) +{ + csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK); + if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) { + csio_fcoe_enable_link(ln, 0); + ln->flags &= ~CSIO_LNF_LINK_ENABLE; + } + csio_ln_dbg(ln, "stopping ln :%p\n", ln); +} + +/** + * csio_lnode_close - Close an lnode. + * @ln: lnode + * + * This routine is invoked by HW module to close an lnode and its + * associated NPIV lnodes. Lnode and its associated NPIV lnodes are + * set to uninitialized state. + */ +void +csio_lnode_close(struct csio_lnode *ln) +{ + csio_post_event_lns(ln, CSIO_LNE_CLOSE); + if (csio_is_phys_ln(ln)) + ln->vnp_flowid = CSIO_INVALID_IDX; + + csio_ln_dbg(ln, "closed ln :%p\n", ln); +} + +/* + * csio_ln_prep_ecwr - Prepare ELS/CT WR. + * @io_req - IO request. + * @wr_len - WR len + * @immd_len - WR immediate data + * @sub_op - Sub opcode + * @sid - source portid. + * @did - destination portid + * @flow_id - flowid + * @fw_wr - ELS/CT WR to be prepared. + * Returns: 0 - on success + */ +static int +csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, + uint32_t immd_len, uint8_t sub_op, uint32_t sid, + uint32_t did, uint32_t flow_id, uint8_t *fw_wr) +{ + struct fw_fcoe_els_ct_wr *wr; + __be32 port_id; + + wr = (struct fw_fcoe_els_ct_wr *)fw_wr; + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) | + FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); + + wr_len = DIV_ROUND_UP(wr_len, 16); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) | + FW_WR_LEN16_V(wr_len)); + wr->els_ct_type = sub_op; + wr->ctl_pri = 0; + wr->cp_en_class = 0; + wr->cookie = io_req->fw_handle; + wr->iqid = cpu_to_be16(csio_q_physiqid( + io_req->lnode->hwp, io_req->iq_idx)); + wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1); + wr->tmo_val = (uint8_t) io_req->tmo; + port_id = htonl(sid); + memcpy(wr->l_id, PORT_ID_PTR(port_id), 3); + port_id = htonl(did); + memcpy(wr->r_id, PORT_ID_PTR(port_id), 3); + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len); + wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr); + return 0; +} + +/* + * csio_ln_mgmt_submit_wr - Post elsct work request. + * @mgmtm - mgmtm + * @io_req - io request. + * @sub_op - ELS or CT request type + * @pld - Dma Payload buffer + * @pld_len - Payload len + * Prepares ELSCT Work request and sents it to FW. + * Returns: 0 - on success + */ +static int +csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, + uint8_t sub_op, struct csio_dma_buf *pld, + uint32_t pld_len) +{ + struct csio_wr_pair wrp; + struct csio_lnode *ln = io_req->lnode; + struct csio_rnode *rn = io_req->rnode; + struct csio_hw *hw = mgmtm->hw; + uint8_t fw_wr[64]; + struct ulptx_sgl dsgl; + uint32_t wr_size = 0; + uint8_t im_len = 0; + uint32_t wr_off = 0; + + int ret = 0; + + /* Calculate WR Size for this ELS REQ */ + wr_size = sizeof(struct fw_fcoe_els_ct_wr); + + /* Send as immediate data if pld < 256 */ + if (pld_len < 256) { + wr_size += ALIGN(pld_len, 8); + im_len = (uint8_t)pld_len; + } else + wr_size += sizeof(struct ulptx_sgl); + + /* Roundup WR size in units of 16 bytes */ + wr_size = ALIGN(wr_size, 16); + + /* Get WR to send ELS REQ */ + ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp); + if (ret != 0) { + csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n", + io_req, ret); + return ret; + } + + /* Prepare Generic WR used by all ELS/CT cmd */ + csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op, + ln->nport_id, rn->nport_id, + csio_rn_flowid(rn), + &fw_wr[0]); + + /* Copy ELS/CT WR CMD */ + csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off, + sizeof(struct fw_fcoe_els_ct_wr)); + wr_off += sizeof(struct fw_fcoe_els_ct_wr); + + /* Copy payload to Immediate section of WR */ + if (im_len) + csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); + else { + /* Program DSGL to dma payload */ + dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | + ULPTX_MORE_F | ULPTX_NSGE_V(1)); + dsgl.len0 = cpu_to_be32(pld_len); + dsgl.addr0 = cpu_to_be64(pld->paddr); + csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), + sizeof(struct ulptx_sgl)); + } + + /* Issue work request to xmit ELS/CT req to FW */ + csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false); + return ret; +} + +/* + * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request. + * @io_req - IO Request + * @io_cbfn - Completion handler. + * @req_type - ELS or CT request type + * @pld - Dma Payload buffer + * @pld_len - Payload len + * + * + * This API used submit managment ELS/CT request. + * This called with hw lock held + * Returns: 0 - on success + * -ENOMEM - on error. + */ +static int +csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, + void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), + enum fcoe_cmn_type req_type, struct csio_dma_buf *pld, + uint32_t pld_len) +{ + struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode); + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + int rv; + + BUG_ON(pld_len > pld->len); + + io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ + io_req->fw_handle = (uintptr_t) (io_req); + io_req->eq_idx = mgmtm->eq_idx; + io_req->iq_idx = mgmtm->iq_idx; + + rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len); + if (rv == 0) { + list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q); + mgmtm->stats.n_active++; + } + return rv; +} + +/* + * csio_ln_fdmi_init - FDMI Init entry point. + * @ln: lnode + */ +static int +csio_ln_fdmi_init(struct csio_lnode *ln) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_dma_buf *dma_buf; + + /* Allocate MGMT request required for FDMI */ + ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); + if (!ln->mgmt_req) { + csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n"); + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Allocate Dma buffers for FDMI response Payload */ + dma_buf = &ln->mgmt_req->dma_buf; + dma_buf->len = 2048; + dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len, + &dma_buf->paddr, GFP_KERNEL); + if (!dma_buf->vaddr) { + csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); + kfree(ln->mgmt_req); + ln->mgmt_req = NULL; + return -ENOMEM; + } + + ln->flags |= CSIO_LNF_FDMI_ENABLE; + return 0; +} + +/* + * csio_ln_fdmi_exit - FDMI exit entry point. + * @ln: lnode + */ +static int +csio_ln_fdmi_exit(struct csio_lnode *ln) +{ + struct csio_dma_buf *dma_buf; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + if (!ln->mgmt_req) + return 0; + + dma_buf = &ln->mgmt_req->dma_buf; + if (dma_buf->vaddr) + dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr, + dma_buf->paddr); + + kfree(ln->mgmt_req); + return 0; +} + +int +csio_scan_done(struct csio_lnode *ln, unsigned long ticks, + unsigned long time, unsigned long max_scan_ticks, + unsigned long delta_scan_ticks) +{ + int rv = 0; + + if (time >= max_scan_ticks) + return 1; + + if (!ln->tgt_scan_tick) + ln->tgt_scan_tick = ticks; + + if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) { + if (!ln->last_scan_ntgts) + ln->last_scan_ntgts = ln->n_scsi_tgts; + else { + if (ln->last_scan_ntgts == ln->n_scsi_tgts) + return 1; + + ln->last_scan_ntgts = ln->n_scsi_tgts; + } + ln->tgt_scan_tick = ticks; + } + return rv; +} + +/* + * csio_notify_lnodes: + * @hw: HW module + * @note: Notification + * + * Called from the HW SM to fan out notifications to the + * Lnode SM. Since the HW SM is entered with lock held, + * there is no need to hold locks here. + * + */ +void +csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note) +{ + struct list_head *tmp; + struct csio_lnode *ln; + + csio_dbg(hw, "Notifying all nodes of event %d\n", note); + + /* Traverse children lnodes list and send evt */ + list_for_each(tmp, &hw->sln_head) { + ln = (struct csio_lnode *) tmp; + + switch (note) { + case CSIO_LN_NOTIFY_HWREADY: + csio_lnode_start(ln); + break; + + case CSIO_LN_NOTIFY_HWRESET: + case CSIO_LN_NOTIFY_HWREMOVE: + csio_lnode_close(ln); + break; + + case CSIO_LN_NOTIFY_HWSTOP: + csio_lnode_stop(ln); + break; + + default: + break; + + } + } +} + +/* + * csio_disable_lnodes: + * @hw: HW module + * @portid:port id + * @disable: disable/enable flag. + * If disable=1, disables all lnode hosted on given physical port. + * otherwise enables all the lnodes on given phsysical port. + * This routine need to called with hw lock held. + */ +void +csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable) +{ + struct list_head *tmp; + struct csio_lnode *ln; + + csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid); + + /* Traverse sibling lnodes list and send evt */ + list_for_each(tmp, &hw->sln_head) { + ln = (struct csio_lnode *) tmp; + if (ln->portid != portid) + continue; + + if (disable) + csio_lnode_stop(ln); + else + csio_lnode_start(ln); + } +} + +/* + * csio_ln_init - Initialize an lnode. + * @ln: lnode + * + */ +static int +csio_ln_init(struct csio_lnode *ln) +{ + int rv = -EINVAL; + struct csio_lnode *pln; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + csio_init_state(&ln->sm, csio_lns_uninit); + ln->vnp_flowid = CSIO_INVALID_IDX; + ln->fcf_flowid = CSIO_INVALID_IDX; + + if (csio_is_root_ln(ln)) { + + /* This is the lnode used during initialization */ + + ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL); + if (!ln->fcfinfo) { + csio_ln_err(ln, "Failed to alloc FCF record\n"); + CSIO_INC_STATS(hw, n_err_nomem); + goto err; + } + + INIT_LIST_HEAD(&ln->fcf_lsthead); + kref_init(&ln->fcfinfo->kref); + + if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) + goto err; + + } else { /* Either a non-root physical or a virtual lnode */ + + /* + * THe rest is common for non-root physical and NPIV lnodes. + * Just get references to all other modules + */ + + if (csio_is_npiv_ln(ln)) { + /* NPIV */ + pln = csio_parent_lnode(ln); + kref_get(&pln->fcfinfo->kref); + ln->fcfinfo = pln->fcfinfo; + } else { + /* Another non-root physical lnode (FCF) */ + ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), + GFP_KERNEL); + if (!ln->fcfinfo) { + csio_ln_err(ln, "Failed to alloc FCF info\n"); + CSIO_INC_STATS(hw, n_err_nomem); + goto err; + } + + kref_init(&ln->fcfinfo->kref); + + if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) + goto err; + } + + } /* if (!csio_is_root_ln(ln)) */ + + return 0; +err: + return rv; +} + +static void +csio_ln_exit(struct csio_lnode *ln) +{ + struct csio_lnode *pln; + + csio_cleanup_rns(ln); + if (csio_is_npiv_ln(ln)) { + pln = csio_parent_lnode(ln); + kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo); + } else { + kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo); + if (csio_fdmi_enable) + csio_ln_fdmi_exit(ln); + } + ln->fcfinfo = NULL; +} + +/* + * csio_lnode_init - Initialize the members of an lnode. + * @ln: lnode + */ +int +csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw, + struct csio_lnode *pln) +{ + int rv = -EINVAL; + + /* Link this lnode to hw */ + csio_lnode_to_hw(ln) = hw; + + /* Link child to parent if child lnode */ + if (pln) + ln->pln = pln; + else + ln->pln = NULL; + + /* Initialize scsi_tgt and timers to zero */ + ln->n_scsi_tgts = 0; + ln->last_scan_ntgts = 0; + ln->tgt_scan_tick = 0; + + /* Initialize rnode list */ + INIT_LIST_HEAD(&ln->rnhead); + INIT_LIST_HEAD(&ln->cln_head); + + /* Initialize log level for debug */ + ln->params.log_level = hw->params.log_level; + + if (csio_ln_init(ln)) + goto err; + + /* Add lnode to list of sibling or children lnodes */ + spin_lock_irq(&hw->lock); + list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head); + if (pln) + pln->num_vports++; + spin_unlock_irq(&hw->lock); + + hw->num_lns++; + + return 0; +err: + csio_lnode_to_hw(ln) = NULL; + return rv; +} + +/** + * csio_lnode_exit - De-instantiate an lnode. + * @ln: lnode + * + */ +void +csio_lnode_exit(struct csio_lnode *ln) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + csio_ln_exit(ln); + + /* Remove this lnode from hw->sln_head */ + spin_lock_irq(&hw->lock); + + list_del_init(&ln->sm.sm_list); + + /* If it is children lnode, decrement the + * counter in its parent lnode + */ + if (ln->pln) + ln->pln->num_vports--; + + /* Update root lnode pointer */ + if (list_empty(&hw->sln_head)) + hw->rln = NULL; + else + hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head); + + spin_unlock_irq(&hw->lock); + + csio_lnode_to_hw(ln) = NULL; + hw->num_lns--; +} diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h new file mode 100644 index 000000000..372a67d12 --- /dev/null +++ b/drivers/scsi/csiostor/csio_lnode.h @@ -0,0 +1,255 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_LNODE_H__ +#define __CSIO_LNODE_H__ + +#include +#include +#include +#include + + +#include "csio_defs.h" +#include "csio_hw.h" + +#define CSIO_FCOE_MAX_NPIV 128 +#define CSIO_FCOE_MAX_RNODES 2048 + +/* FDMI port attribute unknown speed */ +#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000 + +extern int csio_fcoe_rnodes; +extern int csio_fdmi_enable; + +/* State machine evets */ +enum csio_ln_ev { + CSIO_LNE_NONE = (uint32_t)0, + CSIO_LNE_LINKUP, + CSIO_LNE_FAB_INIT_DONE, + CSIO_LNE_LINK_DOWN, + CSIO_LNE_DOWN_LINK, + CSIO_LNE_LOGO, + CSIO_LNE_CLOSE, + CSIO_LNE_MAX_EVENT, +}; + + +struct csio_fcf_info { + struct list_head list; + uint8_t priority; + uint8_t mac[6]; + uint8_t name_id[8]; + uint8_t fabric[8]; + uint16_t vf_id; + uint8_t vlan_id; + uint16_t max_fcoe_size; + uint8_t fc_map[3]; + uint32_t fka_adv; + uint32_t fcfi; + uint8_t get_next:1; + uint8_t link_aff:1; + uint8_t fpma:1; + uint8_t spma:1; + uint8_t login:1; + uint8_t portid; + uint8_t spma_mac[6]; + struct kref kref; +}; + +/* Defines for flags */ +#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */ +#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */ +#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */ +#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */ + +/* Transport events */ +enum csio_ln_fc_evt { + CSIO_LN_FC_LINKUP = 1, + CSIO_LN_FC_LINKDOWN, + CSIO_LN_FC_RSCN, + CSIO_LN_FC_ATTRIB_UPDATE, +}; + +/* Lnode stats */ +struct csio_lnode_stats { + uint32_t n_link_up; /* Link down */ + uint32_t n_link_down; /* Link up */ + uint32_t n_err; /* error */ + uint32_t n_err_nomem; /* memory not available */ + uint32_t n_inval_parm; /* Invalid parameters */ + uint32_t n_evt_unexp; /* unexpected event */ + uint32_t n_evt_drop; /* dropped event */ + uint32_t n_rnode_match; /* matched rnode */ + uint32_t n_dev_loss_tmo; /* Device loss timeout */ + uint32_t n_fdmi_err; /* fdmi err */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ + enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ + uint32_t n_rnode_alloc; /* rnode allocated */ + uint32_t n_rnode_free; /* rnode freed */ + uint32_t n_rnode_nomem; /* rnode alloc failure */ + uint32_t n_input_requests; /* Input Requests */ + uint32_t n_output_requests; /* Output Requests */ + uint32_t n_control_requests; /* Control Requests */ + uint32_t n_input_bytes; /* Input Bytes */ + uint32_t n_output_bytes; /* Output Bytes */ + uint32_t rsvd1; +}; + +/* Common Lnode params */ +struct csio_lnode_params { + uint32_t ra_tov; + uint32_t fcfi; + uint32_t log_level; /* Module level for debugging */ +}; + +struct csio_service_parms { + struct fc_els_csp csp; /* Common service parms */ + uint8_t wwpn[8]; /* WWPN */ + uint8_t wwnn[8]; /* WWNN */ + struct fc_els_cssp clsp[4]; /* Class service params */ + uint8_t vvl[16]; /* Vendor version level */ +}; + +/* Lnode */ +struct csio_lnode { + struct csio_sm sm; /* State machine + sibling + * lnode list. + */ + struct csio_hw *hwp; /* Pointer to the HW module */ + uint8_t portid; /* Port ID */ + uint8_t rsvd1; + uint16_t rsvd2; + uint32_t dev_num; /* Device number */ + uint32_t flags; /* Flags */ + struct list_head fcf_lsthead; /* FCF entries */ + struct csio_fcf_info *fcfinfo; /* FCF in use */ + struct csio_ioreq *mgmt_req; /* MGMT request */ + + /* FCoE identifiers */ + uint8_t mac[6]; + uint32_t nport_id; + struct csio_service_parms ln_sparm; /* Service parms */ + + /* Firmware identifiers */ + uint32_t fcf_flowid; /*fcf flowid */ + uint32_t vnp_flowid; + uint16_t ssn_cnt; /* Registered Session */ + uint8_t cur_evt; /* Current event */ + uint8_t prev_evt; /* Previous event */ + + /* Children */ + struct list_head cln_head; /* Head of the children lnode + * list. + */ + uint32_t num_vports; /* Total NPIV/children LNodes*/ + struct csio_lnode *pln; /* Parent lnode of child + * lnodes. + */ + struct list_head cmpl_q; /* Pending I/Os on this lnode */ + + /* Remote node information */ + struct list_head rnhead; /* Head of rnode list */ + uint32_t num_reg_rnodes; /* Number of rnodes registered + * with the host. + */ + uint32_t n_scsi_tgts; /* Number of scsi targets + * found + */ + uint32_t last_scan_ntgts;/* Number of scsi targets + * found per last scan. + */ + uint32_t tgt_scan_tick; /* timer started after + * new tgt found + */ + /* FC transport data */ + struct fc_vport *fc_vport; + struct fc_host_statistics fch_stats; + + struct csio_lnode_stats stats; /* Common lnode stats */ + struct csio_lnode_params params; /* Common lnode params */ +}; + +#define csio_lnode_to_hw(ln) ((ln)->hwp) +#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln) +#define csio_parent_lnode(ln) ((ln)->pln) +#define csio_ln_flowid(ln) ((ln)->vnp_flowid) +#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn) +#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn) + +#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0) +#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0) +#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0) + + +#define csio_ln_dbg(_ln, _fmt, ...) \ + csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ + CSIO_DEVID_LO(_ln), ##__VA_ARGS__); + +#define csio_ln_err(_ln, _fmt, ...) \ + csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ + CSIO_DEVID_LO(_ln), ##__VA_ARGS__); + +#define csio_ln_warn(_ln, _fmt, ...) \ + csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ + CSIO_DEVID_LO(_ln), ##__VA_ARGS__); + +/* HW->Lnode notifications */ +enum csio_ln_notify { + CSIO_LN_NOTIFY_HWREADY = 1, + CSIO_LN_NOTIFY_HWSTOP, + CSIO_LN_NOTIFY_HWREMOVE, + CSIO_LN_NOTIFY_HWRESET, +}; + +void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *); +int csio_is_lnode_ready(struct csio_lnode *); +void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str); +struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *); +int csio_get_phy_port_stats(struct csio_hw *, uint8_t , + struct fw_fcoe_port_stats *); +int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long, + unsigned long, unsigned long); +void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify); +void csio_disable_lnodes(struct csio_hw *, uint8_t, bool); +void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt); +int csio_ln_fdmi_start(struct csio_lnode *, void *); +int csio_lnode_start(struct csio_lnode *); +void csio_lnode_stop(struct csio_lnode *); +void csio_lnode_close(struct csio_lnode *); +int csio_lnode_init(struct csio_lnode *, struct csio_hw *, + struct csio_lnode *); +void csio_lnode_exit(struct csio_lnode *); + +#endif /* ifndef __CSIO_LNODE_H__ */ diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c new file mode 100644 index 000000000..94810b19e --- /dev/null +++ b/drivers/scsi/csiostor/csio_mb.c @@ -0,0 +1,1690 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" +#include "csio_mb.h" +#include "csio_wr.h" + +#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL) + +/* MB Command/Response Helpers */ +/* + * csio_mb_fw_retval - FW return value from a mailbox response. + * @mbp: Mailbox structure + * + */ +enum fw_retval +csio_mb_fw_retval(struct csio_mb *mbp) +{ + struct fw_cmd_hdr *hdr; + + hdr = (struct fw_cmd_hdr *)(mbp->mb); + + return FW_CMD_RETVAL_G(ntohl(hdr->lo)); +} + +/* + * csio_mb_hello - FW HELLO command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @m_mbox: Master mailbox number, if any. + * @a_mbox: Mailbox number for asycn notifications. + * @master: Device mastership. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->err_to_clearinit = htonl( + FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) | + FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ? + m_mbox : FW_HELLO_CMD_MBMASTER_M) | + FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) | + FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT_F); + +} + +/* + * csio_mb_process_hello_rsp - FW HELLO response processing helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @retval: Mailbox return value from Firmware + * @state: State that the function is in. + * @mpfn: Master pfn + * + */ +void +csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, enum csio_dev_state *state, + uint8_t *mpfn) +{ + struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb); + uint32_t value; + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + + if (*retval == FW_SUCCESS) { + hw->fwrev = ntohl(rsp->fwrev); + + value = ntohl(rsp->err_to_clearinit); + *mpfn = FW_HELLO_CMD_MBMASTER_G(value); + + if (value & FW_HELLO_CMD_INIT_F) + *state = CSIO_DEV_STATE_INIT; + else if (value & FW_HELLO_CMD_ERR_F) + *state = CSIO_DEV_STATE_ERR; + else + *state = CSIO_DEV_STATE_UNINIT; + } +} + +/* + * csio_mb_bye - FW BYE command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @cbfn: Callback, if any. + * + */ +void +csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} + +/* + * csio_mb_reset - FW RESET command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @reset: Type of reset. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + int reset, int halt, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->val = htonl(reset); + cmdp->halt_pkd = htonl(halt); + +} + +/* + * csio_mb_params - FW PARAMS command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: Command timeout. + * @pf: PF number. + * @vf: VF number. + * @nparams: Number of parameters + * @params: Parameter mnemonic array. + * @val: Parameter value array. + * @wr: Write/Read PARAMS. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + unsigned int pf, unsigned int vf, unsigned int nparams, + const u32 *params, u32 *val, bool wr, + void (*cbfn)(struct csio_hw *, struct csio_mb *)) +{ + uint32_t i; + uint32_t temp_params = 0, temp_val = 0; + struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb); + __be32 *p = &cmdp->param[0].mnem; + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) | + FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + /* Write Params */ + if (wr) { + while (nparams--) { + temp_params = *params++; + temp_val = *val++; + + *p++ = htonl(temp_params); + *p++ = htonl(temp_val); + } + } else { + for (i = 0; i < nparams; i++, p += 2) { + temp_params = *params++; + *p = htonl(temp_params); + } + } + +} + +/* + * csio_mb_process_read_params_rsp - FW PARAMS response processing helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @retval: Mailbox return value from Firmware + * @nparams: Number of parameters + * @val: Parameter value array. + * + */ +void +csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, unsigned int nparams, + u32 *val) +{ + struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb); + uint32_t i; + __be32 *p = &rsp->param[0].val; + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + + if (*retval == FW_SUCCESS) + for (i = 0; i < nparams; i++, p += 2) + *val++ = ntohl(*p); +} + +/* + * csio_mb_ldst - FW LDST command + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: timeout + * @reg: register + * + */ +void +csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg) +{ + struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); + CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1); + + /* + * Construct and send the Firmware LDST Command to retrieve the + * specified PCI-E Configuration Space register. + */ + ldst_cmd->op_to_addrspace = + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); + ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd)); + ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); + ldst_cmd->u.pcie.ctrl_to_fn = + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn)); + ldst_cmd->u.pcie.r = (uint8_t)reg; +} + +/* + * + * csio_mb_caps_config - FW Read/Write Capabilities command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @wr: Write if 1, Read if 0 + * @init: Turn on initiator mode. + * @tgt: Turn on target mode. + * @cofld: If 1, Control Offload for FCoE + * @cbfn: Callback, if any. + * + * This helper assumes that cmdp has MB payload from a previous CAPS + * read command. + */ +void +csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + bool wr, bool init, bool tgt, bool cofld, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_caps_config_cmd *cmdp = + (struct fw_caps_config_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F)); + cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + /* Read config */ + if (!wr) + return; + + /* Write config */ + cmdp->fcoecaps = 0; + + if (cofld) + cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD); + if (init) + cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR); + if (tgt) + cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET); +} + +/* + * csio_mb_port- FW PORT command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: COmmand timeout + * @portid: Port ID to get/set info + * @wr: Write/Read PORT information. + * @fc: Flow control + * @caps: Port capabilites to set. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + u8 portid, bool wr, uint32_t fc, uint16_t fw_caps, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) | + FW_PORT_CMD_PORTID_V(portid)); + if (!wr) { + cmdp->action_to_len16 = htonl( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + return; + } + + /* Set port */ + cmdp->action_to_len16 = htonl( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_L1_CFG + : FW_PORT_ACTION_L1_CFG32) | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + if (fw_caps == FW_CAPS16) + cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc)); + else + cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); +} + +/* + * csio_mb_process_read_port_rsp - FW PORT command response processing helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @retval: Mailbox return value from Firmware + * @caps: port capabilities + * + */ +void +csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, uint16_t fw_caps, + u32 *pcaps, u32 *acaps) +{ + struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb); + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16)); + + if (*retval == FW_SUCCESS) { + if (fw_caps == FW_CAPS16) { + *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); + *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); + } else { + *pcaps = be32_to_cpu(rsp->u.info32.pcaps32); + *acaps = be32_to_cpu(rsp->u.info32.acaps32); + } + } +} + +/* + * csio_mb_initialize - FW INITIALIZE command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: COmmand timeout + * @cbfn: Callback, if any. + * + */ +void +csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} + +/* + * csio_mb_iq_alloc - Initializes the mailbox to allocate an + * Ingress DMA queue in the firmware. + * + * @hw: The hw structure + * @mbp: Mailbox structure to initialize + * @priv: Private object + * @mb_tmo: Mailbox time-out period (in ms). + * @iq_params: Ingress queue params needed for allocation. + * @cbfn: The call-back function + * + * + */ +static void +csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); + + cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + cmdp->type_to_iqandstindex = htonl( + FW_IQ_CMD_VIID_V(iq_params->viid) | + FW_IQ_CMD_TYPE_V(iq_params->type) | + FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch)); + + cmdp->fl0size = htons(iq_params->fl0size); + cmdp->fl0size = htons(iq_params->fl1size); + +} /* csio_mb_iq_alloc */ + +/* + * csio_mb_iq_write - Initializes the mailbox for writing into an + * Ingress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private object + * @mb_tmo: Mailbox time-out period (in ms). + * @cascaded_req: TRUE - if this request is cascased with iq-alloc request. + * @iq_params: Ingress queue params needed for writing. + * @cbfn: The call-back function + * + * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, + * because this IQ write request can be cascaded with a previous + * IQ alloc request, and we dont want to over-write the bits set by + * that request. This logic will work even in a non-cascaded case, since the + * cmdp structure is zeroed out by CSIO_INIT_MBP. + */ +static void +csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, bool cascaded_req, + struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); + + uint32_t iq_start_stop = (iq_params->iq_start) ? + FW_IQ_CMD_IQSTART_F : + FW_IQ_CMD_IQSTOP_F; + int relaxed = !(hw->flags & CSIO_HWF_ROOT_NO_RELAXED_ORDERING); + + /* + * If this IQ write is cascaded with IQ alloc request, do not + * re-initialize with 0's. + * + */ + if (!cascaded_req) + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); + cmdp->alloc_to_len16 |= htonl(iq_start_stop | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->iqid |= htons(iq_params->iqid); + cmdp->fl0id |= htons(iq_params->fl0id); + cmdp->fl1id |= htons(iq_params->fl1id); + cmdp->type_to_iqandstindex |= htonl( + FW_IQ_CMD_IQANDST_V(iq_params->iqandst) | + FW_IQ_CMD_IQANUS_V(iq_params->iqanus) | + FW_IQ_CMD_IQANUD_V(iq_params->iqanud) | + FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex)); + cmdp->iqdroprss_to_iqesize |= htons( + FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) | + FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) | + FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) | + FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) | + FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) | + FW_IQ_CMD_IQESIZE_V(iq_params->iqesize)); + + cmdp->iqsize |= htons(iq_params->iqsize); + cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr); + + if (iq_params->type == 0) { + cmdp->iqns_to_fl0congen |= htonl( + FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)| + FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen)); + } + + if (iq_params->fl0size && iq_params->fl0addr && + (iq_params->fl0id != 0xFFFF)) { + + cmdp->iqns_to_fl0congen |= htonl( + FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)| + FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) | + FW_IQ_CMD_FL0FETCHRO_V(relaxed) | + FW_IQ_CMD_FL0DATARO_V(relaxed) | + FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) | + FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen)); + cmdp->fl0dcaen_to_fl0cidxfthresh |= htons( + FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) | + FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) | + FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) | + FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) | + FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh)); + cmdp->fl0size |= htons(iq_params->fl0size); + cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr); + } +} /* csio_mb_iq_write */ + +/* + * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an + * Ingress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data. + * @mb_tmo: Mailbox time-out period (in ms). + * @iq_params: Ingress queue params needed for allocation & writing. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn); + csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn); +} /* csio_mb_iq_alloc_write */ + +/* + * csio_mb_iq_alloc_write_rsp - Process the allocation & writing + * of ingress DMA queue mailbox's response. + * + * @hw: The HW structure. + * @mbp: Mailbox structure to initialize. + * @retval: Firmware return value. + * @iq_params: Ingress queue parameters, after allocation and write. + * + */ +void +csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *ret_val, + struct csio_iq_params *iq_params) +{ + struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb); + + *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); + if (*ret_val == FW_SUCCESS) { + iq_params->physiqid = ntohs(rsp->physiqid); + iq_params->iqid = ntohs(rsp->iqid); + iq_params->fl0id = ntohs(rsp->fl0id); + iq_params->fl1id = ntohs(rsp->fl1id); + } else { + iq_params->physiqid = iq_params->iqid = + iq_params->fl0id = iq_params->fl1id = 0; + } +} /* csio_mb_iq_alloc_write_rsp */ + +/* + * csio_mb_iq_free - Initializes the mailbox for freeing a + * specified Ingress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data + * @mb_tmo: Mailbox time-out period (in ms). + * @iq_params: Parameters of ingress queue, that is to be freed. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type)); + + cmdp->iqid = htons(iq_params->iqid); + cmdp->fl0id = htons(iq_params->fl0id); + cmdp->fl1id = htons(iq_params->fl1id); + +} /* csio_mb_iq_free */ + +/* + * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating + * an offload-egress queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data + * @mb_tmo: Mailbox time-out period (in ms). + * @eq_ofld_params: (Offload) Egress queue parameters. + * @cbfn: The call-back function + * + * + */ +static void +csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_mb_eq_ofld_alloc */ + +/* + * csio_mb_eq_ofld_write - Initializes the mailbox for writing + * an alloacted offload-egress queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data + * @mb_tmo: Mailbox time-out period (in ms). + * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request. + * @eq_ofld_params: (Offload) Egress queue parameters. + * @cbfn: The call-back function + * + * + * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, + * because this EQ write request can be cascaded with a previous + * EQ alloc request, and we dont want to over-write the bits set by + * that request. This logic will work even in a non-cascaded case, since the + * cmdp structure is zeroed out by CSIO_INIT_MBP. + */ +static void +csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, bool cascaded_req, + struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + uint32_t eq_start_stop = (eq_ofld_params->eqstart) ? + FW_EQ_OFLD_CMD_EQSTART_F : + FW_EQ_OFLD_CMD_EQSTOP_F; + + /* + * If this EQ write is cascaded with EQ alloc request, do not + * re-initialize with 0's. + * + */ + if (!cascaded_req) + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 |= htonl(eq_start_stop | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); + + cmdp->fetchszm_to_iqid |= htonl( + FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) | + FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) | + FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) | + FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid)); + + cmdp->dcaen_to_eqsize |= htonl( + FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) | + FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) | + FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) | + FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) | + FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) | + FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) | + FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize)); + + cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr); + +} /* csio_mb_eq_ofld_write */ + +/* + * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation + * writing into an Engress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data. + * @mb_tmo: Mailbox time-out period (in ms). + * @eq_ofld_params: (Offload) Egress queue parameters. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, + void *priv, uint32_t mb_tmo, + struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn); + csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true, + eq_ofld_params, cbfn); +} /* csio_mb_eq_ofld_alloc_write */ + +/* + * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation + * & write egress DMA queue mailbox's response. + * + * @hw: The HW structure. + * @mbp: Mailbox structure to initialize. + * @retval: Firmware return value. + * @eq_ofld_params: (Offload) Egress queue parameters. + * + */ +void +csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw, + struct csio_mb *mbp, enum fw_retval *ret_val, + struct csio_eq_params *eq_ofld_params) +{ + struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); + + if (*ret_val == FW_SUCCESS) { + eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G( + ntohl(rsp->eqid_pkd)); + eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G( + ntohl(rsp->physeqid_pkd)); + } else + eq_ofld_params->eqid = 0; + +} /* csio_mb_eq_ofld_alloc_write_rsp */ + +/* + * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a + * specified Engress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data area. + * @mb_tmo: Mailbox time-out period (in ms). + * @eq_ofld_params: (Offload) Egress queue parameters, that is to be freed. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); + +} /* csio_mb_eq_ofld_free */ + +/* + * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link + * condition. + * + * @ln: The Lnode structure + * @mbp: Mailbox structure to initialize + * @mb_tmo: Mailbox time-out period (in ms). + * @cbfn: The call back function. + * + * + */ +void +csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode, + uint8_t cos, bool link_status, uint32_t fcfi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_link_cmd *cmdp = + (struct fw_fcoe_link_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_portid = htonl(( + FW_CMD_OP_V(FW_FCOE_LINK_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_FCOE_LINK_CMD_PORTID(port_id))); + cmdp->sub_opcode_fcfi = htonl( + FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) | + FW_FCOE_LINK_CMD_FCFI(fcfi)); + cmdp->lstatus = link_status; + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_write_fcoe_link_cond_init_mb */ + +/* + * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE + * resource information(FW_GET_RES_INFO_CMD). + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @mb_tmo: Mailbox time-out period (in ms). + * @cbfn: The call-back function + * + * + */ +void +csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp, + uint32_t mb_tmo, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_res_info_cmd *cmdp = + (struct fw_fcoe_res_info_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); + + cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F)); + + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_fcoe_read_res_info_init_mb */ + +/* + * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP + * in the firmware (FW_FCOE_VNP_CMD). + * + * @ln: The Lnode structure. + * @mbp: Mailbox structure to initialize. + * @mb_tmo: Mailbox time-out period (in ms). + * @fcfi: FCF Index. + * @vnpi: vnpi + * @iqid: iqid + * @vnport_wwnn: vnport WWNN + * @vnport_wwpn: vnport WWPN + * @cbfn: The call-back function. + * + * + */ +void +csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid, + uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8], + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_vnp_cmd *cmdp = + (struct fw_fcoe_vnp_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_FCOE_VNP_CMD_FCFI(fcfi))); + + cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); + + cmdp->iqid = htons(iqid); + + if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn)) + cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN); + + if (vnport_wwnn) + memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8); + if (vnport_wwpn) + memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8); + +} /* csio_fcoe_vnp_alloc_init_mb */ + +/* + * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd. + * @ln: The Lnode structure. + * @mbp: Mailbox structure to initialize. + * @mb_tmo: Mailbox time-out period (in ms). + * @fcfi: FCF Index. + * @vnpi: vnpi + * @cbfn: The call-back handler. + */ +void +csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_vnp_cmd *cmdp = + (struct fw_fcoe_vnp_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_FCOE_VNP_CMD_FCFI(fcfi)); + cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); +} + +/* + * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an + * alloacted VNP in the firmware (FW_FCOE_VNP_CMD). + * + * @ln: The Lnode structure. + * @mbp: Mailbox structure to initialize. + * @mb_tmo: Mailbox time-out period (in ms). + * @fcfi: FCF flow id + * @vnpi: VNP flow id + * @cbfn: The call-back function. + * Return: None + */ +void +csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_vnp_cmd *cmdp = + (struct fw_fcoe_vnp_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_FCOE_VNP_CMD_FCFI(fcfi)); + cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); +} + +/* + * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the + * FCF records. + * + * @ln: The Lnode structure + * @mbp: Mailbox structure to initialize + * @mb_tmo: Mailbox time-out period (in ms). + * @fcf_params: FC-Forwarder parameters. + * @cbfn: The call-back function + * + * + */ +void +csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t portid, uint32_t fcfi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_fcf_cmd *cmdp = + (struct fw_fcoe_fcf_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_FCOE_FCF_CMD_FCFI(fcfi)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_fcoe_read_fcf_init_mb */ + +void +csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp, + uint32_t mb_tmo, + struct fw_fcoe_port_cmd_params *portparams, + void (*cbfn)(struct csio_hw *, + struct csio_mb *)) +{ + struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); + mbp->mb_size = 64; + + cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16)); + + cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) | + FW_FCOE_STATS_CMD_PORT(portparams->portid); + + cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) | + FW_FCOE_STATS_CMD_PORT_VALID; + +} /* csio_fcoe_read_portparams_init_mb */ + +void +csio_mb_process_portparams_rsp(struct csio_hw *hw, + struct csio_mb *mbp, + enum fw_retval *retval, + struct fw_fcoe_port_cmd_params *portparams, + struct fw_fcoe_port_stats *portstats) +{ + struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb); + struct fw_fcoe_port_stats stats; + uint8_t *src; + uint8_t *dst; + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16)); + + memset(&stats, 0, sizeof(struct fw_fcoe_port_stats)); + + if (*retval == FW_SUCCESS) { + dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8); + src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8); + memcpy(dst, src, (portparams->nstats * 8)); + if (portparams->idx == 1) { + /* Get the first 6 flits from the Mailbox */ + portstats->tx_bcast_bytes = stats.tx_bcast_bytes; + portstats->tx_bcast_frames = stats.tx_bcast_frames; + portstats->tx_mcast_bytes = stats.tx_mcast_bytes; + portstats->tx_mcast_frames = stats.tx_mcast_frames; + portstats->tx_ucast_bytes = stats.tx_ucast_bytes; + portstats->tx_ucast_frames = stats.tx_ucast_frames; + } + if (portparams->idx == 7) { + /* Get the second 6 flits from the Mailbox */ + portstats->tx_drop_frames = stats.tx_drop_frames; + portstats->tx_offload_bytes = stats.tx_offload_bytes; + portstats->tx_offload_frames = stats.tx_offload_frames; +#if 0 + portstats->rx_pf_bytes = stats.rx_pf_bytes; + portstats->rx_pf_frames = stats.rx_pf_frames; +#endif + portstats->rx_bcast_bytes = stats.rx_bcast_bytes; + portstats->rx_bcast_frames = stats.rx_bcast_frames; + portstats->rx_mcast_bytes = stats.rx_mcast_bytes; + } + if (portparams->idx == 13) { + /* Get the last 4 flits from the Mailbox */ + portstats->rx_mcast_frames = stats.rx_mcast_frames; + portstats->rx_ucast_bytes = stats.rx_ucast_bytes; + portstats->rx_ucast_frames = stats.rx_ucast_frames; + portstats->rx_err_frames = stats.rx_err_frames; + } + } +} + +/* Entry points/APIs for MB module */ +/* + * csio_mb_intr_enable - Enable Interrupts from mailboxes. + * @hw: The HW structure + * + * Enables CIM interrupt bit in appropriate INT_ENABLE registers. + */ +void +csio_mb_intr_enable(struct csio_hw *hw) +{ + csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); + csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); +} + +/* + * csio_mb_intr_disable - Disable Interrupts from mailboxes. + * @hw: The HW structure + * + * Disable bit in HostInterruptEnable CIM register. + */ +void +csio_mb_intr_disable(struct csio_hw *hw) +{ + csio_wr_reg32(hw, MBMSGRDYINTEN_V(0), + MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); + csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); +} + +static void +csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd) +{ + struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd; + + if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) { + csio_info(hw, "FW print message:\n"); + csio_info(hw, "\tdebug->dprtstridx = %d\n", + ntohs(dbg->u.prt.dprtstridx)); + csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam0)); + csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam1)); + csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam2)); + csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam3)); + } else { + /* This is a FW assertion */ + csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + dbg->u.assert.filename_0_7, + ntohl(dbg->u.assert.line), + ntohl(dbg->u.assert.x), + ntohl(dbg->u.assert.y)); + } +} + +static void +csio_mb_debug_cmd_handler(struct csio_hw *hw) +{ + int i; + __be64 cmd[CSIO_MB_MAX_REGS]; + uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); + uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); + int size = sizeof(struct fw_debug_cmd); + + /* Copy mailbox data */ + for (i = 0; i < size; i += 8) + cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i)); + + csio_mb_dump_fw_dbg(hw, cmd); + + /* Notify FW of mailbox by setting owner as UP */ + csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | + MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); + + csio_rd_reg32(hw, ctl_reg); + wmb(); +} + +/* + * csio_mb_issue - generic routine for issuing Mailbox commands. + * @hw: The HW structure + * @mbp: Mailbox command to issue + * + * Caller should hold hw lock across this call. + */ +int +csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) +{ + uint32_t owner, ctl; + int i; + uint32_t ii; + __be64 *cmd = mbp->mb; + __be64 hdr; + struct csio_mbm *mbm = &hw->mbm; + uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); + uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); + int size = mbp->mb_size; + int rv = -EINVAL; + struct fw_cmd_hdr *fw_hdr; + + /* Determine mode */ + if (mbp->mb_cbfn == NULL) { + /* Need to issue/get results in the same context */ + if (mbp->tmo < CSIO_MB_POLL_FREQ) { + csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo); + goto error_out; + } + } else if (!csio_is_host_intr_enabled(hw) || + !csio_is_hw_intr_enabled(hw)) { + csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n", + *((uint8_t *)mbp->mb)); + goto error_out; + } + + if (mbm->mcurrent != NULL) { + /* Queue mbox cmd, if another mbox cmd is active */ + if (mbp->mb_cbfn == NULL) { + rv = -EBUSY; + csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n", + hw->pfn, *((uint8_t *)mbp->mb)); + + goto error_out; + } else { + list_add_tail(&mbp->list, &mbm->req_q); + CSIO_INC_STATS(mbm, n_activeq); + + return 0; + } + } + + /* Now get ownership of mailbox */ + owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); + + if (!csio_mb_is_host_owner(owner)) { + + for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++) + owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); + /* + * Mailbox unavailable. In immediate mode, fail the command. + * In other modes, enqueue the request. + */ + if (!csio_mb_is_host_owner(owner)) { + if (mbp->mb_cbfn == NULL) { + rv = owner ? -EBUSY : -ETIMEDOUT; + + csio_dbg(hw, + "Couldn't own Mailbox %x op:0x%x " + "owner:%x\n", + hw->pfn, *((uint8_t *)mbp->mb), owner); + goto error_out; + } else { + if (mbm->mcurrent == NULL) { + csio_err(hw, + "Couldn't own Mailbox %x " + "op:0x%x owner:%x\n", + hw->pfn, *((uint8_t *)mbp->mb), + owner); + csio_err(hw, + "No outstanding driver" + " mailbox as well\n"); + goto error_out; + } + } + } + } + + /* Mailbox is available, copy mailbox data into it */ + for (i = 0; i < size; i += 8) { + csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i); + cmd++; + } + + CSIO_DUMP_MB(hw, hw->pfn, data_reg); + + /* Start completion timers in non-immediate modes and notify FW */ + if (mbp->mb_cbfn != NULL) { + mbm->mcurrent = mbp; + mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo)); + csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | + MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); + } else + csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW), + ctl_reg); + + /* Flush posted writes */ + csio_rd_reg32(hw, ctl_reg); + wmb(); + + CSIO_INC_STATS(mbm, n_req); + + if (mbp->mb_cbfn) + return 0; + + /* Poll for completion in immediate mode */ + cmd = mbp->mb; + + for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) { + mdelay(CSIO_MB_POLL_FREQ); + + /* Check for response */ + ctl = csio_rd_reg32(hw, ctl_reg); + if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { + + if (!(ctl & MBMSGVALID_F)) { + csio_wr_reg32(hw, 0, ctl_reg); + continue; + } + + CSIO_DUMP_MB(hw, hw->pfn, data_reg); + + hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); + fw_hdr = (struct fw_cmd_hdr *)&hdr; + + switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { + case FW_DEBUG_CMD: + csio_mb_debug_cmd_handler(hw); + continue; + } + + /* Copy response */ + for (i = 0; i < size; i += 8) + *cmd++ = cpu_to_be64(csio_rd_reg64 + (hw, data_reg + i)); + csio_wr_reg32(hw, 0, ctl_reg); + + if (csio_mb_fw_retval(mbp) != FW_SUCCESS) + CSIO_INC_STATS(mbm, n_err); + + CSIO_INC_STATS(mbm, n_rsp); + return 0; + } + } + + CSIO_INC_STATS(mbm, n_tmo); + + csio_err(hw, "Mailbox %x op:0x%x timed out!\n", + hw->pfn, *((uint8_t *)cmd)); + + return -ETIMEDOUT; + +error_out: + CSIO_INC_STATS(mbm, n_err); + return rv; +} + +/* + * csio_mb_completions - Completion handler for Mailbox commands + * @hw: The HW structure + * @cbfn_q: Completion queue. + * + */ +void +csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q) +{ + struct csio_mb *mbp; + struct csio_mbm *mbm = &hw->mbm; + enum fw_retval rv; + + while (!list_empty(cbfn_q)) { + mbp = list_first_entry(cbfn_q, struct csio_mb, list); + list_del_init(&mbp->list); + + rv = csio_mb_fw_retval(mbp); + if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR)) + CSIO_INC_STATS(mbm, n_err); + else if (rv != FW_HOSTERROR) + CSIO_INC_STATS(mbm, n_rsp); + + if (mbp->mb_cbfn) + mbp->mb_cbfn(hw, mbp); + } +} + +static void +csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id) +{ + static char *mod_str[] = { + NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" + }; + + struct csio_pport *port = &hw->pport[port_id]; + + if (port->mod_type == FW_PORT_MOD_TYPE_NONE) + csio_info(hw, "Port:%d - port module unplugged\n", port_id); + else if (port->mod_type < ARRAY_SIZE(mod_str)) + csio_info(hw, "Port:%d - %s port module inserted\n", port_id, + mod_str[port->mod_type]); + else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + csio_info(hw, + "Port:%d - unsupported optical port module " + "inserted\n", port_id); + else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + csio_info(hw, + "Port:%d - unknown port module inserted, forcing " + "TWINAX\n", port_id); + else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR) + csio_info(hw, "Port:%d - transceiver module error\n", port_id); + else + csio_info(hw, "Port:%d - unknown module type %d inserted\n", + port_id, port->mod_type); +} + +int +csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd) +{ + uint8_t opcode = *(uint8_t *)cmd; + struct fw_port_cmd *pcmd; + uint8_t port_id; + uint32_t link_status; + uint16_t action; + uint8_t mod_type; + fw_port_cap32_t linkattr; + + if (opcode == FW_PORT_CMD) { + pcmd = (struct fw_port_cmd *)cmd; + port_id = FW_PORT_CMD_PORTID_G( + ntohl(pcmd->op_to_portid)); + action = FW_PORT_CMD_ACTION_G( + ntohl(pcmd->action_to_len16)); + if (action != FW_PORT_ACTION_GET_PORT_INFO && + action != FW_PORT_ACTION_GET_PORT_INFO32) { + csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n", + action); + return -EINVAL; + } + + if (action == FW_PORT_ACTION_GET_PORT_INFO) { + link_status = ntohl(pcmd->u.info.lstatus_to_modtype); + mod_type = FW_PORT_CMD_MODTYPE_G(link_status); + linkattr = lstatus_to_fwcap(link_status); + + hw->pport[port_id].link_status = + FW_PORT_CMD_LSTATUS_G(link_status); + } else { + link_status = + ntohl(pcmd->u.info32.lstatus32_to_cbllen32); + mod_type = FW_PORT_CMD_MODTYPE32_G(link_status); + linkattr = ntohl(pcmd->u.info32.linkattr32); + + hw->pport[port_id].link_status = + FW_PORT_CMD_LSTATUS32_G(link_status); + } + + hw->pport[port_id].link_speed = fwcap_to_fwspeed(linkattr); + + csio_info(hw, "Port:%x - LINK %s\n", port_id, + hw->pport[port_id].link_status ? "UP" : "DOWN"); + + if (mod_type != hw->pport[port_id].mod_type) { + hw->pport[port_id].mod_type = mod_type; + csio_mb_portmod_changed(hw, port_id); + } + } else if (opcode == FW_DEBUG_CMD) { + csio_mb_dump_fw_dbg(hw, cmd); + } else { + csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode); + return -EINVAL; + } + + return 0; +} + +/* + * csio_mb_isr_handler - Handle mailboxes related interrupts. + * @hw: The HW structure + * + * Called from the ISR to handle Mailbox related interrupts. + * HW Lock should be held across this call. + */ +int +csio_mb_isr_handler(struct csio_hw *hw) +{ + struct csio_mbm *mbm = &hw->mbm; + struct csio_mb *mbp = mbm->mcurrent; + __be64 *cmd; + uint32_t ctl, cim_cause, pl_cause; + int i; + uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); + uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); + int size; + __be64 hdr; + struct fw_cmd_hdr *fw_hdr; + + pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A)); + cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); + + if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) { + CSIO_INC_STATS(hw, n_mbint_unexp); + return -EINVAL; + } + + /* + * The cause registers below HAVE to be cleared in the SAME + * order as below: The low level cause register followed by + * the upper level cause register. In other words, CIM-cause + * first followed by PL-Cause next. + */ + csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); + csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A)); + + ctl = csio_rd_reg32(hw, ctl_reg); + + if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { + + CSIO_DUMP_MB(hw, hw->pfn, data_reg); + + if (!(ctl & MBMSGVALID_F)) { + csio_warn(hw, + "Stray mailbox interrupt recvd," + " mailbox data not valid\n"); + csio_wr_reg32(hw, 0, ctl_reg); + /* Flush */ + csio_rd_reg32(hw, ctl_reg); + return -EINVAL; + } + + hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); + fw_hdr = (struct fw_cmd_hdr *)&hdr; + + switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { + case FW_DEBUG_CMD: + csio_mb_debug_cmd_handler(hw); + return -EINVAL; +#if 0 + case FW_ERROR_CMD: + case FW_INITIALIZE_CMD: /* When we are not master */ +#endif + } + + CSIO_ASSERT(mbp != NULL); + + cmd = mbp->mb; + size = mbp->mb_size; + /* Get response */ + for (i = 0; i < size; i += 8) + *cmd++ = cpu_to_be64(csio_rd_reg64 + (hw, data_reg + i)); + + csio_wr_reg32(hw, 0, ctl_reg); + /* Flush */ + csio_rd_reg32(hw, ctl_reg); + + mbm->mcurrent = NULL; + + /* Add completion to tail of cbfn queue */ + list_add_tail(&mbp->list, &mbm->cbfn_q); + CSIO_INC_STATS(mbm, n_cbfnq); + + /* + * Enqueue event to EventQ. Events processing happens + * in Event worker thread context + */ + if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp))) + CSIO_INC_STATS(hw, n_evt_drop); + + return 0; + + } else { + /* + * We can get here if mailbox MSIX vector is shared, + * or in INTx case. Or a stray interrupt. + */ + csio_dbg(hw, "Host not owner, no mailbox interrupt\n"); + CSIO_INC_STATS(hw, n_int_stray); + return -EINVAL; + } +} + +/* + * csio_mb_tmo_handler - Timeout handler + * @hw: The HW structure + * + */ +struct csio_mb * +csio_mb_tmo_handler(struct csio_hw *hw) +{ + struct csio_mbm *mbm = &hw->mbm; + struct csio_mb *mbp = mbm->mcurrent; + struct fw_cmd_hdr *fw_hdr; + + /* + * Could be a race b/w the completion handler and the timer + * and the completion handler won that race. + */ + if (mbp == NULL) { + CSIO_DB_ASSERT(0); + return NULL; + } + + fw_hdr = (struct fw_cmd_hdr *)(mbp->mb); + + csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn, + FW_CMD_OP_G(ntohl(fw_hdr->hi))); + + mbm->mcurrent = NULL; + CSIO_INC_STATS(mbm, n_tmo); + fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT)); + + return mbp; +} + +/* + * csio_mb_cancel_all - Cancel all waiting commands. + * @hw: The HW structure + * @cbfn_q: The callback queue. + * + * Caller should hold hw lock across this call. + */ +void +csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q) +{ + struct csio_mb *mbp; + struct csio_mbm *mbm = &hw->mbm; + struct fw_cmd_hdr *hdr; + struct list_head *tmp; + + if (mbm->mcurrent) { + mbp = mbm->mcurrent; + + /* Stop mailbox completion timer */ + del_timer_sync(&mbm->timer); + + /* Add completion to tail of cbfn queue */ + list_add_tail(&mbp->list, cbfn_q); + mbm->mcurrent = NULL; + } + + if (!list_empty(&mbm->req_q)) { + list_splice_tail_init(&mbm->req_q, cbfn_q); + mbm->stats.n_activeq = 0; + } + + if (!list_empty(&mbm->cbfn_q)) { + list_splice_tail_init(&mbm->cbfn_q, cbfn_q); + mbm->stats.n_cbfnq = 0; + } + + if (list_empty(cbfn_q)) + return; + + list_for_each(tmp, cbfn_q) { + mbp = (struct csio_mb *)tmp; + hdr = (struct fw_cmd_hdr *)(mbp->mb); + + csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n", + hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi))); + + CSIO_INC_STATS(mbm, n_cancel); + hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR)); + } +} + +/* + * csio_mbm_init - Initialize Mailbox module + * @mbm: Mailbox module + * @hw: The HW structure + * @timer: Timing function for interrupting mailboxes + * + * Initialize timer and the request/response queues. + */ +int +csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw, + void (*timer_fn)(struct timer_list *)) +{ + mbm->hw = hw; + timer_setup(&mbm->timer, timer_fn, 0); + + INIT_LIST_HEAD(&mbm->req_q); + INIT_LIST_HEAD(&mbm->cbfn_q); + csio_set_mb_intr_idx(mbm, -1); + + return 0; +} + +/* + * csio_mbm_exit - Uninitialize mailbox module + * @mbm: Mailbox module + * + * Stop timer. + */ +void +csio_mbm_exit(struct csio_mbm *mbm) +{ + del_timer_sync(&mbm->timer); + + CSIO_DB_ASSERT(mbm->mcurrent == NULL); + CSIO_DB_ASSERT(list_empty(&mbm->req_q)); + CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q)); +} diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h new file mode 100644 index 000000000..b07e891c5 --- /dev/null +++ b/drivers/scsi/csiostor/csio_mb.h @@ -0,0 +1,263 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_MB_H__ +#define __CSIO_MB_H__ + +#include +#include + +#include "t4fw_api.h" +#include "t4fw_api_stor.h" +#include "csio_defs.h" + +#define CSIO_STATS_OFFSET (2) +#define CSIO_NUM_STATS_PER_MB (6) + +struct fw_fcoe_port_cmd_params { + uint8_t portid; + uint8_t idx; + uint8_t nstats; +}; + +#define CSIO_DUMP_MB(__hw, __num, __mb) \ + csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \ + (unsigned long long)csio_rd_reg64(__hw, __mb), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 56)) + +#define CSIO_MB_MAX_REGS 8 +#define CSIO_MAX_MB_SIZE 64 +#define CSIO_MB_POLL_FREQ 5 /* 5 ms */ +#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT + +/* Device master in HELLO command */ +enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST }; + +enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL }; + +enum csio_dev_state { + CSIO_DEV_STATE_UNINIT, + CSIO_DEV_STATE_INIT, + CSIO_DEV_STATE_ERR +}; + +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0)) + +#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \ +do { \ + if (__clear) \ + memset((__cp), 0, \ + CSIO_MB_MAX_REGS * sizeof(__be64)); \ + INIT_LIST_HEAD(&(__mbp)->list); \ + (__mbp)->tmo = (__tmo); \ + (__mbp)->priv = (void *)(__priv); \ + (__mbp)->mb_cbfn = (__fn); \ + (__mbp)->mb_size = sizeof(*(__cp)); \ +} while (0) + +struct csio_mbm_stats { + uint32_t n_req; /* number of mbox req */ + uint32_t n_rsp; /* number of mbox rsp */ + uint32_t n_activeq; /* number of mbox req active Q */ + uint32_t n_cbfnq; /* number of mbox req cbfn Q */ + uint32_t n_tmo; /* number of mbox timeout */ + uint32_t n_cancel; /* number of mbox cancel */ + uint32_t n_err; /* number of mbox error */ +}; + +/* Driver version of Mailbox */ +struct csio_mb { + struct list_head list; /* for req/resp */ + /* queue in driver */ + __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */ + int mb_size; /* Size of this + * mailbox. + */ + uint32_t tmo; /* Timeout */ + struct completion cmplobj; /* MB Completion + * object + */ + void (*mb_cbfn) (struct csio_hw *, struct csio_mb *); + /* Callback fn */ + void *priv; /* Owner private ptr */ +}; + +struct csio_mbm { + uint32_t a_mbox; /* Async mbox num */ + uint32_t intr_idx; /* Interrupt index */ + struct timer_list timer; /* Mbox timer */ + struct csio_hw *hw; /* Hardware pointer */ + struct list_head req_q; /* Mbox request queue */ + struct list_head cbfn_q; /* Mbox completion q */ + struct csio_mb *mcurrent; /* Current mailbox */ + uint32_t req_q_cnt; /* Outstanding mbox + * cmds + */ + struct csio_mbm_stats stats; /* Statistics */ +}; + +#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i)) +#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx) + +struct csio_iq_params; +struct csio_eq_params; + +enum fw_retval csio_mb_fw_retval(struct csio_mb *); + +/* MB helpers */ +void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t, + uint32_t, uint32_t, enum csio_dev_master, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, enum csio_dev_state *, + uint8_t *); + +void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int, + unsigned int, unsigned int, const u32 *, u32 *, bool, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, unsigned int , u32 *); + +void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + int reg); + +void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t, + bool, bool, bool, bool, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t, + uint8_t, bool, uint32_t, uint16_t, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, uint16_t, + uint32_t *, uint32_t *); + +void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *, + uint32_t, struct csio_iq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, struct csio_iq_params *); + +void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *, + uint32_t, struct csio_iq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *, + uint32_t, struct csio_eq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, struct csio_eq_params *); + +void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *, + uint32_t , struct csio_eq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *, + uint32_t, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint32_t , uint32_t , uint16_t, + uint8_t [8], uint8_t [8], + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint32_t , uint32_t , + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t , uint32_t, uint32_t , + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint32_t, uint32_t, + void (*cbfn) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, + struct csio_mb *mbp, uint32_t mb_tmo, + struct fw_fcoe_port_cmd_params *portparams, + void (*cbfn)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, + struct fw_fcoe_port_cmd_params *portparams, + struct fw_fcoe_port_stats *portstats); + +/* MB module functions */ +int csio_mbm_init(struct csio_mbm *, struct csio_hw *, + void (*)(struct timer_list *)); +void csio_mbm_exit(struct csio_mbm *); +void csio_mb_intr_enable(struct csio_hw *); +void csio_mb_intr_disable(struct csio_hw *); + +int csio_mb_issue(struct csio_hw *, struct csio_mb *); +void csio_mb_completions(struct csio_hw *, struct list_head *); +int csio_mb_fwevt_handler(struct csio_hw *, __be64 *); +int csio_mb_isr_handler(struct csio_hw *); +struct csio_mb *csio_mb_tmo_handler(struct csio_hw *); +void csio_mb_cancel_all(struct csio_hw *, struct list_head *); + +#endif /* ifndef __CSIO_MB_H__ */ diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c new file mode 100644 index 000000000..713e13adf --- /dev/null +++ b/drivers/scsi/csiostor/csio_rnode.c @@ -0,0 +1,921 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" + +static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *); +static void csio_rnode_exit(struct csio_rnode *); + +/* Static machine forward declarations */ +static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev); +static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev); +static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev); +static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev); + +/* RNF event mapping */ +static enum csio_rn_ev fwevt_to_rnevt[] = { + CSIO_RNFE_NONE, /* None */ + CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */ + CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */ + CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */ + CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */ + CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */ + CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */ + CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */ + CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */ + CSIO_RNFE_NONE, /* NPORT_ID_CHGD */ + CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */ + CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */ + CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */ + CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */ + CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */ + CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */ + CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */ + CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */ + CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */ + CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ + CSIO_RNFE_NONE, /* PRLI_TMO */ + CSIO_RNFE_NONE, /* ADISC_TMO */ + CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */ + CSIO_RNFE_NONE, /* SCR_ACC_RCVD */ + CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */ + CSIO_RNFE_NONE, /* LOGO_SNT */ + CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */ +}; + +#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ + CSIO_RNFE_NONE : \ + fwevt_to_rnevt[_evt]) +int +csio_is_rnode_ready(struct csio_rnode *rn) +{ + return csio_match_state(rn, csio_rns_ready); +} + +static int +csio_is_rnode_uninit(struct csio_rnode *rn) +{ + return csio_match_state(rn, csio_rns_uninit); +} + +static int +csio_is_rnode_wka(uint8_t rport_type) +{ + if ((rport_type == FLOGI_VFPORT) || + (rport_type == FDISC_VFPORT) || + (rport_type == NS_VNPORT) || + (rport_type == FDMI_VNPORT)) + return 1; + + return 0; +} + +/* + * csio_rn_lookup - Finds the rnode with the given flowid + * @ln - lnode + * @flowid - flowid. + * + * Does the rnode lookup on the given lnode and flowid.If no matching entry + * found, NULL is returned. + */ +static struct csio_rnode * +csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp; + struct csio_rnode *rn; + + list_for_each(tmp, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + if (rn->flowid == flowid) + return rn; + } + + return NULL; +} + +/* + * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn + * @ln: lnode + * @wwpn: wwpn + * + * Does the rnode lookup on the given lnode and wwpn. If no matching entry + * found, NULL is returned. + */ +static struct csio_rnode * +csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp; + struct csio_rnode *rn; + + list_for_each(tmp, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + if (!memcmp(csio_rn_wwpn(rn), wwpn, 8)) + return rn; + } + + return NULL; +} + +/** + * csio_rnode_lookup_portid - Finds the rnode with the given portid + * @ln: lnode + * @portid: port id + * + * Lookup the rnode list for a given portid. If no matching entry + * found, NULL is returned. + */ +struct csio_rnode * +csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp; + struct csio_rnode *rn; + + list_for_each(tmp, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + if (rn->nport_id == portid) + return rn; + } + + return NULL; +} + +static int +csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid, + uint32_t *vnp_flowid) +{ + struct csio_rnode *rnhead; + struct list_head *tmp, *tmp1; + struct csio_rnode *rn; + struct csio_lnode *ln_tmp; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + list_for_each(tmp1, &hw->sln_head) { + ln_tmp = (struct csio_lnode *) tmp1; + if (ln_tmp == ln) + continue; + + rnhead = (struct csio_rnode *)&ln_tmp->rnhead; + list_for_each(tmp, &rnhead->sm.sm_list) { + + rn = (struct csio_rnode *) tmp; + if (csio_is_rnode_ready(rn)) { + if (rn->flowid == rdev_flowid) { + *vnp_flowid = csio_ln_flowid(ln_tmp); + return 1; + } + } + } + } + + return 0; +} + +static struct csio_rnode * +csio_alloc_rnode(struct csio_lnode *ln) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC); + if (!rn) + goto err; + + memset(rn, 0, sizeof(struct csio_rnode)); + if (csio_rnode_init(rn, ln)) + goto err_free; + + CSIO_INC_STATS(ln, n_rnode_alloc); + + return rn; + +err_free: + mempool_free(rn, hw->rnode_mempool); +err: + CSIO_INC_STATS(ln, n_rnode_nomem); + return NULL; +} + +static void +csio_free_rnode(struct csio_rnode *rn) +{ + struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn)); + + csio_rnode_exit(rn); + CSIO_INC_STATS(rn->lnp, n_rnode_free); + mempool_free(rn, hw->rnode_mempool); +} + +/* + * csio_get_rnode - Gets rnode with the given flowid + * @ln - lnode + * @flowid - flow id. + * + * Does the rnode lookup on the given lnode and flowid. If no matching + * rnode found, then new rnode with given npid is allocated and returned. + */ +static struct csio_rnode * +csio_get_rnode(struct csio_lnode *ln, uint32_t flowid) +{ + struct csio_rnode *rn; + + rn = csio_rn_lookup(ln, flowid); + if (!rn) { + rn = csio_alloc_rnode(ln); + if (!rn) + return NULL; + + rn->flowid = flowid; + } + + return rn; +} + +/* + * csio_put_rnode - Frees the given rnode + * @ln - lnode + * @flowid - flow id. + * + * Does the rnode lookup on the given lnode and flowid. If no matching + * rnode found, then new rnode with given npid is allocated and returned. + */ +void +csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn) +{ + CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0); + csio_free_rnode(rn); +} + +/* + * csio_confirm_rnode - confirms rnode based on wwpn. + * @ln: lnode + * @rdev_flowid: remote device flowid + * @rdevp: remote device params + * This routines searches other rnode in list having same wwpn of new rnode. + * If there is a match, then matched rnode is returned and otherwise new rnode + * is returned. + * returns rnode. + */ +struct csio_rnode * +csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid, + struct fcoe_rdev_entry *rdevp) +{ + uint8_t rport_type; + struct csio_rnode *rn, *match_rn; + uint32_t vnp_flowid = 0; + __be32 *port_id; + + port_id = (__be32 *)&rdevp->r_id[0]; + rport_type = + FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type); + + /* Drop rdev event for cntrl port */ + if (rport_type == FAB_CTLR_VNPORT) { + csio_ln_dbg(ln, + "Unhandled rport_type:%d recv in rdev evt " + "ssni:x%x\n", rport_type, rdev_flowid); + return NULL; + } + + /* Lookup on flowid */ + rn = csio_rn_lookup(ln, rdev_flowid); + if (!rn) { + + /* Drop events with duplicate flowid */ + if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) { + csio_ln_warn(ln, + "ssni:%x already active on vnpi:%x", + rdev_flowid, vnp_flowid); + return NULL; + } + + /* Lookup on wwpn for NPORTs */ + rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn); + if (!rn) + goto alloc_rnode; + + } else { + /* Lookup well-known ports with nport id */ + if (csio_is_rnode_wka(rport_type)) { + match_rn = csio_rnode_lookup_portid(ln, + ((ntohl(*port_id) >> 8) & CSIO_DID_MASK)); + if (match_rn == NULL) { + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + goto alloc_rnode; + } + + /* + * Now compare the wwpn to confirm that + * same port relogged in. If so update the matched rn. + * Else, go ahead and alloc a new rnode. + */ + if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) { + if (rn == match_rn) + goto found_rnode; + csio_ln_dbg(ln, + "nport_id:x%x and wwpn:%llx" + " match for ssni:x%x\n", + rn->nport_id, + wwn_to_u64(rdevp->wwpn), + rdev_flowid); + if (csio_is_rnode_ready(rn)) { + csio_ln_warn(ln, + "rnode is already" + "active ssni:x%x\n", + rdev_flowid); + CSIO_ASSERT(0); + } + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + rn = match_rn; + + /* Update rn */ + goto found_rnode; + } + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + goto alloc_rnode; + } + + /* wwpn match */ + if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8)) + goto found_rnode; + + /* Search for rnode that have same wwpn */ + match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn); + if (match_rn != NULL) { + csio_ln_dbg(ln, + "ssni:x%x changed for rport name(wwpn):%llx " + "did:x%x\n", rdev_flowid, + wwn_to_u64(rdevp->wwpn), + match_rn->nport_id); + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + rn = match_rn; + } else { + csio_ln_dbg(ln, + "rnode wwpn mismatch found ssni:x%x " + "name(wwpn):%llx\n", + rdev_flowid, + wwn_to_u64(csio_rn_wwpn(rn))); + if (csio_is_rnode_ready(rn)) { + csio_ln_warn(ln, + "rnode is already active " + "wwpn:%llx ssni:x%x\n", + wwn_to_u64(csio_rn_wwpn(rn)), + rdev_flowid); + CSIO_ASSERT(0); + } + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + goto alloc_rnode; + } + } + +found_rnode: + csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n", + rn, rdev_flowid, wwn_to_u64(rdevp->wwpn)); + + /* Update flowid */ + csio_rn_flowid(rn) = rdev_flowid; + + /* update rdev entry */ + rn->rdev_entry = rdevp; + CSIO_INC_STATS(ln, n_rnode_match); + return rn; + +alloc_rnode: + rn = csio_get_rnode(ln, rdev_flowid); + if (!rn) + return NULL; + + csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n", + rn, rdev_flowid, wwn_to_u64(rdevp->wwpn)); + + /* update rdev entry */ + rn->rdev_entry = rdevp; + return rn; +} + +/* + * csio_rn_verify_rparams - verify rparams. + * @ln: lnode + * @rn: rnode + * @rdevp: remote device params + * returns success if rparams are verified. + */ +static int +csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn, + struct fcoe_rdev_entry *rdevp) +{ + uint8_t null[8]; + uint8_t rport_type; + uint8_t fc_class; + __be32 *did; + + did = (__be32 *) &rdevp->r_id[0]; + rport_type = + FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type); + switch (rport_type) { + case FLOGI_VFPORT: + rn->role = CSIO_RNFR_FABRIC; + if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) { + csio_ln_err(ln, "ssni:x%x invalid fabric portid\n", + csio_rn_flowid(rn)); + return -EINVAL; + } + /* NPIV support */ + if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos)) + ln->flags |= CSIO_LNF_NPIVSUPP; + + break; + + case NS_VNPORT: + rn->role = CSIO_RNFR_NS; + if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) { + csio_ln_err(ln, "ssni:x%x invalid fabric portid\n", + csio_rn_flowid(rn)); + return -EINVAL; + } + break; + + case REG_FC4_VNPORT: + case REG_VNPORT: + rn->role = CSIO_RNFR_NPORT; + if (rdevp->event_cause == PRLI_ACC_RCVD || + rdevp->event_cause == PRLI_RCVD) { + if (FW_RDEV_WR_TASK_RETRY_ID_GET( + rdevp->enh_disc_to_tgt)) + rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW; + + if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt)) + rn->fcp_flags |= FCP_SPPF_RETRY; + + if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt)) + rn->fcp_flags |= FCP_SPPF_CONF_COMPL; + + if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt)) + rn->role |= CSIO_RNFR_TARGET; + + if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt)) + rn->role |= CSIO_RNFR_INITIATOR; + } + + break; + + case FDMI_VNPORT: + case FAB_CTLR_VNPORT: + rn->role = 0; + break; + + default: + csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n", + csio_rn_flowid(rn), rport_type); + return -EINVAL; + } + + /* validate wwpn/wwnn for Name server/remote port */ + if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) { + memset(null, 0, 8); + if (!memcmp(rdevp->wwnn, null, 8)) { + csio_ln_err(ln, + "ssni:x%x invalid wwnn received from" + " rport did:x%x\n", + csio_rn_flowid(rn), + (ntohl(*did) & CSIO_DID_MASK)); + return -EINVAL; + } + + if (!memcmp(rdevp->wwpn, null, 8)) { + csio_ln_err(ln, + "ssni:x%x invalid wwpn received from" + " rport did:x%x\n", + csio_rn_flowid(rn), + (ntohl(*did) & CSIO_DID_MASK)); + return -EINVAL; + } + + } + + /* Copy wwnn, wwpn and nport id */ + rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK; + memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8); + memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8); + rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz; + fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos); + rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID); + + return 0; +} + +static void +__csio_reg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_unlock_irq(&hw->lock); + csio_reg_rnode(rn); + spin_lock_irq(&hw->lock); + + if (rn->role & CSIO_RNFR_TARGET) + ln->n_scsi_tgts++; + + if (rn->nport_id == FC_FID_MGMT_SERV) + csio_ln_fdmi_start(ln, (void *) rn); +} + +static void +__csio_unreg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct csio_hw *hw = csio_lnode_to_hw(ln); + LIST_HEAD(tmp_q); + int cmpl = 0; + + if (!list_empty(&rn->host_cmpl_q)) { + csio_dbg(hw, "Returning completion queue I/Os\n"); + list_splice_tail_init(&rn->host_cmpl_q, &tmp_q); + cmpl = 1; + } + + if (rn->role & CSIO_RNFR_TARGET) { + ln->n_scsi_tgts--; + ln->last_scan_ntgts--; + } + + spin_unlock_irq(&hw->lock); + csio_unreg_rnode(rn); + spin_lock_irq(&hw->lock); + + /* Cleanup I/Os that were waiting for rnode to unregister */ + if (cmpl) + csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q); + +} + +/*****************************************************************************/ +/* START: Rnode SM */ +/*****************************************************************************/ + +/* + * csio_rns_uninit - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) { + csio_set_state(&rn->sm, csio_rns_ready); + __csio_reg_rnode(rn); + } else { + CSIO_INC_STATS(rn, n_err_inval); + } + break; + case CSIO_RNFE_LOGO_RECV: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv " + "in rn state[uninit]\n", csio_rn_flowid(rn), evt); + CSIO_INC_STATS(rn, n_evt_drop); + break; + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv " + "in rn state[uninit]\n", csio_rn_flowid(rn), evt); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/* + * csio_rns_ready - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv from did:x%x " + "in rn state[ready]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_drop); + break; + + case CSIO_RNFE_PRLI_DONE: + case CSIO_RNFE_PRLI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) + __csio_reg_rnode(rn); + else + CSIO_INC_STATS(rn, n_err_inval); + + break; + case CSIO_RNFE_DOWN: + csio_set_state(&rn->sm, csio_rns_offline); + __csio_unreg_rnode(rn); + + /* FW expected to internally aborted outstanding SCSI WRs + * and return all SCSI WRs to host with status "ABORTED". + */ + break; + + case CSIO_RNFE_LOGO_RECV: + csio_set_state(&rn->sm, csio_rns_offline); + + __csio_unreg_rnode(rn); + + /* FW expected to internally aborted outstanding SCSI WRs + * and return all SCSI WRs to host with status "ABORTED". + */ + break; + + case CSIO_RNFE_CLOSE: + /* + * Each rnode receives CLOSE event when driver is removed or + * device is reset + * Note: All outstanding IOs on remote port need to returned + * to uppper layer with appropriate error before sending + * CLOSE event + */ + csio_set_state(&rn->sm, csio_rns_uninit); + __csio_unreg_rnode(rn); + break; + + case CSIO_RNFE_NAME_MISSING: + csio_set_state(&rn->sm, csio_rns_disappeared); + __csio_unreg_rnode(rn); + + /* + * FW expected to internally aborted outstanding SCSI WRs + * and return all SCSI WRs to host with status "ABORTED". + */ + + break; + + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv from did:x%x " + "in rn state[uninit]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/* + * csio_rns_offline - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) { + csio_set_state(&rn->sm, csio_rns_ready); + __csio_reg_rnode(rn); + } else { + CSIO_INC_STATS(rn, n_err_inval); + csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); + } + break; + + case CSIO_RNFE_DOWN: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv from did:x%x " + "in rn state[offline]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_drop); + break; + + case CSIO_RNFE_CLOSE: + /* Each rnode receives CLOSE event when driver is removed or + * device is reset + * Note: All outstanding IOs on remote port need to returned + * to uppper layer with appropriate error before sending + * CLOSE event + */ + csio_set_state(&rn->sm, csio_rns_uninit); + break; + + case CSIO_RNFE_NAME_MISSING: + csio_set_state(&rn->sm, csio_rns_disappeared); + break; + + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv from did:x%x " + "in rn state[offline]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/* + * csio_rns_disappeared - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) { + csio_set_state(&rn->sm, csio_rns_ready); + __csio_reg_rnode(rn); + } else { + CSIO_INC_STATS(rn, n_err_inval); + csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); + } + break; + + case CSIO_RNFE_CLOSE: + /* Each rnode receives CLOSE event when driver is removed or + * device is reset. + * Note: All outstanding IOs on remote port need to returned + * to uppper layer with appropriate error before sending + * CLOSE event + */ + csio_set_state(&rn->sm, csio_rns_uninit); + break; + + case CSIO_RNFE_DOWN: + case CSIO_RNFE_NAME_MISSING: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv from did x%x" + "in rn state[disappeared]\n", csio_rn_flowid(rn), + evt, rn->nport_id); + break; + + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv from did x%x" + "in rn state[disappeared]\n", csio_rn_flowid(rn), + evt, rn->nport_id); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/*****************************************************************************/ +/* END: Rnode SM */ +/*****************************************************************************/ + +/* + * csio_rnode_devloss_handler - Device loss event handler + * @rn: rnode + * + * Post event to close rnode SM and free rnode. + */ +void +csio_rnode_devloss_handler(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + + /* ignore if same rnode came back as online */ + if (csio_is_rnode_ready(rn)) + return; + + csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); + + /* Free rn if in uninit state */ + if (csio_is_rnode_uninit(rn)) + csio_put_rnode(ln, rn); +} + +/** + * csio_rnode_fwevt_handler - Event handler for firmware rnode events. + * @rn: rnode + * @fwevt: firmware event to handle + */ +void +csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + enum csio_rn_ev evt; + + evt = CSIO_FWE_TO_RNFE(fwevt); + if (!evt) { + csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n", + csio_rn_flowid(rn), fwevt); + CSIO_INC_STATS(rn, n_evt_unexp); + return; + } + CSIO_INC_STATS(rn, n_evt_fw[fwevt]); + + /* Track previous & current events for debugging */ + rn->prev_evt = rn->cur_evt; + rn->cur_evt = fwevt; + + /* Post event to rnode SM */ + csio_post_event(&rn->sm, evt); + + /* Free rn if in uninit state */ + if (csio_is_rnode_uninit(rn)) + csio_put_rnode(ln, rn); +} + +/* + * csio_rnode_init - Initialize rnode. + * @rn: RNode + * @ln: Associated lnode + * + * Caller is responsible for holding the lock. The lock is required + * to be held for inserting the rnode in ln->rnhead list. + */ +static int +csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln) +{ + csio_rnode_to_lnode(rn) = ln; + csio_init_state(&rn->sm, csio_rns_uninit); + INIT_LIST_HEAD(&rn->host_cmpl_q); + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + + /* Add rnode to list of lnodes->rnhead */ + list_add_tail(&rn->sm.sm_list, &ln->rnhead); + + return 0; +} + +static void +csio_rnode_exit(struct csio_rnode *rn) +{ + list_del_init(&rn->sm.sm_list); + CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q)); +} diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h new file mode 100644 index 000000000..433434221 --- /dev/null +++ b/drivers/scsi/csiostor/csio_rnode.h @@ -0,0 +1,141 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_RNODE_H__ +#define __CSIO_RNODE_H__ + +#include "csio_defs.h" + +/* State machine evets */ +enum csio_rn_ev { + CSIO_RNFE_NONE = (uint32_t)0, /* None */ + CSIO_RNFE_LOGGED_IN, /* [N/F]Port login + * complete. + */ + CSIO_RNFE_PRLI_DONE, /* PRLI completed */ + CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */ + CSIO_RNFE_PRLI_RECV, /* Received PLOGI */ + CSIO_RNFE_LOGO_RECV, /* Received LOGO */ + CSIO_RNFE_PRLO_RECV, /* Received PRLO */ + CSIO_RNFE_DOWN, /* Rnode is down */ + CSIO_RNFE_CLOSE, /* Close rnode */ + CSIO_RNFE_NAME_MISSING, /* Rnode name missing + * in name server. + */ + CSIO_RNFE_MAX_EVENT, +}; + +/* rnode stats */ +struct csio_rnode_stats { + uint32_t n_err; /* error */ + uint32_t n_err_inval; /* invalid parameter */ + uint32_t n_err_nomem; /* error nomem */ + uint32_t n_evt_unexp; /* unexpected event */ + uint32_t n_evt_drop; /* unexpected event */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ + enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ + uint32_t n_lun_rst; /* Number of resets of + * of LUNs under this + * target + */ + uint32_t n_lun_rst_fail; /* Number of LUN reset + * failures. + */ + uint32_t n_tgt_rst; /* Number of target resets */ + uint32_t n_tgt_rst_fail; /* Number of target reset + * failures. + */ +}; + +/* Defines for rnode role */ +#define CSIO_RNFR_INITIATOR 0x1 +#define CSIO_RNFR_TARGET 0x2 +#define CSIO_RNFR_FABRIC 0x4 +#define CSIO_RNFR_NS 0x8 +#define CSIO_RNFR_NPORT 0x10 + +struct csio_rnode { + struct csio_sm sm; /* State machine - + * should be the + * 1st member + */ + struct csio_lnode *lnp; /* Pointer to owning + * Lnode */ + uint32_t flowid; /* Firmware ID */ + struct list_head host_cmpl_q; /* SCSI IOs + * pending to completed + * to Mid-layer. + */ + /* FC identifiers for remote node */ + uint32_t nport_id; + uint16_t fcp_flags; /* FCP Flags */ + uint8_t cur_evt; /* Current event */ + uint8_t prev_evt; /* Previous event */ + uint32_t role; /* Fabric/Target/ + * Initiator/NS + */ + struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */ + struct csio_service_parms rn_sparm; + + /* FC transport attributes */ + struct fc_rport *rport; /* FC transport rport */ + uint32_t supp_classes; /* Supported FC classes */ + uint32_t maxframe_size; /* Max Frame size */ + uint32_t scsi_id; /* Transport given SCSI id */ + + struct csio_rnode_stats stats; /* Common rnode stats */ +}; + +#define csio_rn_flowid(rn) ((rn)->flowid) +#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn) +#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn) +#define csio_rnode_to_lnode(rn) ((rn)->lnp) + +int csio_is_rnode_ready(struct csio_rnode *rn); +void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str); + +struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t); +struct csio_rnode *csio_confirm_rnode(struct csio_lnode *, + uint32_t, struct fcoe_rdev_entry *); + +void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt); + +void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn); + +void csio_reg_rnode(struct csio_rnode *); +void csio_unreg_rnode(struct csio_rnode *); + +void csio_rnode_devloss_handler(struct csio_rnode *); + +#endif /* ifndef __CSIO_RNODE_H__ */ diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c new file mode 100644 index 000000000..05e1a63e0 --- /dev/null +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -0,0 +1,2529 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" +#include "csio_scsi.h" +#include "csio_init.h" + +int csio_scsi_eqsize = 65536; +int csio_scsi_iqlen = 128; +int csio_scsi_ioreqs = 2048; +uint32_t csio_max_scan_tmo; +uint32_t csio_delta_scan_tmo = 5; +int csio_lun_qdepth = 32; + +static int csio_ddp_descs = 128; + +static int csio_do_abrt_cls(struct csio_hw *, + struct csio_ioreq *, bool); + +static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); + +/* + * csio_scsi_match_io - Match an ioreq with the given SCSI level data. + * @ioreq: The I/O request + * @sld: Level information + * + * Should be called with lock held. + * + */ +static bool +csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld) +{ + struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq); + + switch (sld->level) { + case CSIO_LEV_LUN: + if (scmnd == NULL) + return false; + + return ((ioreq->lnode == sld->lnode) && + (ioreq->rnode == sld->rnode) && + ((uint64_t)scmnd->device->lun == sld->oslun)); + + case CSIO_LEV_RNODE: + return ((ioreq->lnode == sld->lnode) && + (ioreq->rnode == sld->rnode)); + case CSIO_LEV_LNODE: + return (ioreq->lnode == sld->lnode); + case CSIO_LEV_ALL: + return true; + default: + return false; + } +} + +/* + * csio_scsi_gather_active_ios - Gather active I/Os based on level + * @scm: SCSI module + * @sld: Level information + * @dest: The queue where these I/Os have to be gathered. + * + * Should be called with lock held. + */ +static void +csio_scsi_gather_active_ios(struct csio_scsim *scm, + struct csio_scsi_level_data *sld, + struct list_head *dest) +{ + struct list_head *tmp, *next; + + if (list_empty(&scm->active_q)) + return; + + /* Just splice the entire active_q into dest */ + if (sld->level == CSIO_LEV_ALL) { + list_splice_tail_init(&scm->active_q, dest); + return; + } + + list_for_each_safe(tmp, next, &scm->active_q) { + if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) { + list_del_init(tmp); + list_add_tail(tmp, dest); + } + } +} + +static inline bool +csio_scsi_itnexus_loss_error(uint16_t error) +{ + switch (error) { + case FW_ERR_LINK_DOWN: + case FW_RDEV_NOT_READY: + case FW_ERR_RDEV_LOST: + case FW_ERR_RDEV_LOGO: + case FW_ERR_RDEV_IMPL_LOGO: + return true; + } + return false; +} + +/* + * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. + * @req: IO req structure. + * @addr: DMA location to place the payload. + * + * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests. + */ +static inline void +csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) +{ + struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + /* Check for Task Management */ + if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) { + int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); + fcp_cmnd->fc_tm_flags = 0; + fcp_cmnd->fc_cmdref = 0; + + memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); + + if (req->nsge) + if (req->datadir == DMA_TO_DEVICE) + fcp_cmnd->fc_flags = FCP_CFL_WRDATA; + else + fcp_cmnd->fc_flags = FCP_CFL_RDDATA; + else + fcp_cmnd->fc_flags = 0; + } else { + memset(fcp_cmnd, 0, sizeof(*fcp_cmnd)); + int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); + fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags; + } +} + +/* + * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR. + * @req: IO req structure. + * @addr: DMA location to place the payload. + * @size: Size of WR (including FW WR + immed data + rsp SG entry + * + * Wrapper for populating fw_scsi_cmd_wr. + */ +static inline void +csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; + struct csio_dma_buf *dma_buf; + uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | + FW_SCSI_CMD_WR_IMMDLEN(imm)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V( + DIV_ROUND_UP(size, 16))); + + wr->cookie = (uintptr_t) req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t) req->tmo; + wr->r3 = 0; + memset(&wr->r5, 0, 8); + + /* Get RSP DMA buffer */ + dma_buf = &req->dma_buf; + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(dma_buf->len); + wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); + + wr->r6 = 0; + + wr->u.fcoe.ctl_pri = 0; + wr->u.fcoe.cp_en_class = 0; + wr->u.fcoe.r4_lo[0] = 0; + wr->u.fcoe.r4_lo[1] = 0; + + /* Frame a FCP command */ + csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + + sizeof(struct fw_scsi_cmd_wr))); +} + +#define CSIO_SCSI_CMD_WR_SZ(_imm) \ + (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \ + ALIGN((_imm), 16)) /* Immed data */ + +#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \ + (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16)) + +/* + * csio_scsi_cmd - Create a SCSI CMD WR. + * @req: IO req structure. + * + * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR. + * + */ +static inline void +csio_scsi_cmd(struct csio_ioreq *req) +{ + struct csio_wr_pair wrp; + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (unlikely(req->drv_status != 0)) + return; + + if (wrp.size1 >= size) { + /* Initialize WR in one shot */ + csio_scsi_init_cmd_wr(req, wrp.addr1, size); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } +} + +/* + * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL + * @hw: HW module + * @req: IO request + * @sgl: ULP TX SGL pointer. + * + */ +static inline void +csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, + struct ulptx_sgl *sgl) +{ + struct ulptx_sge_pair *sge_pair = NULL; + struct scatterlist *sgel; + uint32_t i = 0; + uint32_t xfer_len; + struct list_head *tmp; + struct csio_dma_buf *dma_buf; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F | + ULPTX_NSGE_V(req->nsge)); + /* Now add the data SGLs */ + if (likely(!req->dcopy)) { + scsi_for_each_sg(scmnd, sgel, req->nsge, i) { + if (i == 0) { + sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); + sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); + sge_pair = (struct ulptx_sge_pair *)(sgl + 1); + continue; + } + if ((i - 1) & 0x1) { + sge_pair->addr[1] = cpu_to_be64( + sg_dma_address(sgel)); + sge_pair->len[1] = cpu_to_be32( + sg_dma_len(sgel)); + sge_pair++; + } else { + sge_pair->addr[0] = cpu_to_be64( + sg_dma_address(sgel)); + sge_pair->len[0] = cpu_to_be32( + sg_dma_len(sgel)); + } + } + } else { + /* Program sg elements with driver's DDP buffer */ + xfer_len = scsi_bufflen(scmnd); + list_for_each(tmp, &req->gen_list) { + dma_buf = (struct csio_dma_buf *)tmp; + if (i == 0) { + sgl->addr0 = cpu_to_be64(dma_buf->paddr); + sgl->len0 = cpu_to_be32( + min(xfer_len, dma_buf->len)); + sge_pair = (struct ulptx_sge_pair *)(sgl + 1); + } else if ((i - 1) & 0x1) { + sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); + sge_pair->len[1] = cpu_to_be32( + min(xfer_len, dma_buf->len)); + sge_pair++; + } else { + sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); + sge_pair->len[0] = cpu_to_be32( + min(xfer_len, dma_buf->len)); + } + xfer_len -= min(xfer_len, dma_buf->len); + i++; + } + } +} + +/* + * csio_scsi_init_read_wr - Initialize the READ SCSI WR. + * @req: IO req structure. + * @wrp: DMA location to place the payload. + * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL + * + * Wrapper for populating fw_scsi_read_wr. + */ +static inline void +csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp; + struct ulptx_sgl *sgl; + struct csio_dma_buf *dma_buf; + uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) | + FW_SCSI_READ_WR_IMMDLEN(imm)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); + wr->cookie = (uintptr_t)req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t)(req->tmo); + wr->use_xfer_cnt = 1; + wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + /* Get RSP DMA buffer */ + dma_buf = &req->dma_buf; + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(dma_buf->len); + wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); + + wr->r4 = 0; + + wr->u.fcoe.ctl_pri = 0; + wr->u.fcoe.cp_en_class = 0; + wr->u.fcoe.r3_lo[0] = 0; + wr->u.fcoe.r3_lo[1] = 0; + csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + + sizeof(struct fw_scsi_read_wr))); + + /* Move WR pointer past command and immediate data */ + sgl = (struct ulptx_sgl *)((uintptr_t)wrp + + sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); + + /* Fill in the DSGL */ + csio_scsi_init_ultptx_dsgl(hw, req, sgl); +} + +/* + * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR. + * @req: IO req structure. + * @wrp: DMA location to place the payload. + * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL + * + * Wrapper for populating fw_scsi_write_wr. + */ +static inline void +csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp; + struct ulptx_sgl *sgl; + struct csio_dma_buf *dma_buf; + uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) | + FW_SCSI_WRITE_WR_IMMDLEN(imm)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); + wr->cookie = (uintptr_t)req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t)(req->tmo); + wr->use_xfer_cnt = 1; + wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + /* Get RSP DMA buffer */ + dma_buf = &req->dma_buf; + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(dma_buf->len); + wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); + + wr->r4 = 0; + + wr->u.fcoe.ctl_pri = 0; + wr->u.fcoe.cp_en_class = 0; + wr->u.fcoe.r3_lo[0] = 0; + wr->u.fcoe.r3_lo[1] = 0; + csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + + sizeof(struct fw_scsi_write_wr))); + + /* Move WR pointer past command and immediate data */ + sgl = (struct ulptx_sgl *)((uintptr_t)wrp + + sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); + + /* Fill in the DSGL */ + csio_scsi_init_ultptx_dsgl(hw, req, sgl); +} + +/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */ +#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ +do { \ + (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \ + ALIGN((imm), 16) + /* Immed data */ \ + sizeof(struct ulptx_sgl); /* ulptx_sgl */ \ + \ + if (unlikely((req)->nsge > 1)) \ + (sz) += (sizeof(struct ulptx_sge_pair) * \ + (ALIGN(((req)->nsge - 1), 2) / 2)); \ + /* Data SGE */ \ +} while (0) + +/* + * csio_scsi_read - Create a SCSI READ WR. + * @req: IO req structure. + * + * Gets a WR slot in the ingress queue and initializes it with + * SCSI READ WR. + * + */ +static inline void +csio_scsi_read(struct csio_ioreq *req) +{ + struct csio_wr_pair wrp; + uint32_t size; + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + + CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); + size = ALIGN(size, 16); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (likely(req->drv_status == 0)) { + if (likely(wrp.size1 >= size)) { + /* Initialize WR in one shot */ + csio_scsi_init_read_wr(req, wrp.addr1, size); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_read_wr(req, (void *)tmpwr, size); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } + } +} + +/* + * csio_scsi_write - Create a SCSI WRITE WR. + * @req: IO req structure. + * + * Gets a WR slot in the ingress queue and initializes it with + * SCSI WRITE WR. + * + */ +static inline void +csio_scsi_write(struct csio_ioreq *req) +{ + struct csio_wr_pair wrp; + uint32_t size; + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + + CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); + size = ALIGN(size, 16); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (likely(req->drv_status == 0)) { + if (likely(wrp.size1 >= size)) { + /* Initialize WR in one shot */ + csio_scsi_init_write_wr(req, wrp.addr1, size); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_write_wr(req, (void *)tmpwr, size); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } + } +} + +/* + * csio_setup_ddp - Setup DDP buffers for Read request. + * @req: IO req structure. + * + * Checks SGLs/Data buffers are virtually contiguous required for DDP. + * If contiguous,driver posts SGLs in the WR otherwise post internal + * buffers for such request for DDP. + */ +static inline void +csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) +{ +#ifdef __CSIO_DEBUG__ + struct csio_hw *hw = req->lnode->hwp; +#endif + struct scatterlist *sgel = NULL; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + uint64_t sg_addr = 0; + uint32_t ddp_pagesz = 4096; + uint32_t buf_off; + struct csio_dma_buf *dma_buf = NULL; + uint32_t alloc_len = 0; + uint32_t xfer_len = 0; + uint32_t sg_len = 0; + uint32_t i; + + scsi_for_each_sg(scmnd, sgel, req->nsge, i) { + sg_addr = sg_dma_address(sgel); + sg_len = sg_dma_len(sgel); + + buf_off = sg_addr & (ddp_pagesz - 1); + + /* Except 1st buffer,all buffer addr have to be Page aligned */ + if (i != 0 && buf_off) { + csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n", + sg_addr, sg_len); + goto unaligned; + } + + /* Except last buffer,all buffer must end on page boundary */ + if ((i != (req->nsge - 1)) && + ((buf_off + sg_len) & (ddp_pagesz - 1))) { + csio_dbg(hw, + "SGL addr not ending on page boundary" + "(%llx:%d)\n", sg_addr, sg_len); + goto unaligned; + } + } + + /* SGL's are virtually contiguous. HW will DDP to SGLs */ + req->dcopy = 0; + csio_scsi_read(req); + + return; + +unaligned: + CSIO_INC_STATS(scsim, n_unaligned); + /* + * For unaligned SGLs, driver will allocate internal DDP buffer. + * Once command is completed data from DDP buffer copied to SGLs + */ + req->dcopy = 1; + + /* Use gen_list to store the DDP buffers */ + INIT_LIST_HEAD(&req->gen_list); + xfer_len = scsi_bufflen(scmnd); + + i = 0; + /* Allocate ddp buffers for this request */ + while (alloc_len < xfer_len) { + dma_buf = csio_get_scsi_ddp(scsim); + if (dma_buf == NULL || i > scsim->max_sge) { + req->drv_status = -EBUSY; + break; + } + alloc_len += dma_buf->len; + /* Added to IO req */ + list_add_tail(&dma_buf->list, &req->gen_list); + i++; + } + + if (!req->drv_status) { + /* set number of ddp bufs used */ + req->nsge = i; + csio_scsi_read(req); + return; + } + + /* release dma descs */ + if (i > 0) + csio_put_scsi_ddp_list(scsim, &req->gen_list, i); +} + +/* + * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. + * @req: IO req structure. + * @addr: DMA location to place the payload. + * @size: Size of WR + * @abort: abort OR close + * + * Wrapper for populating fw_scsi_cmd_wr. + */ +static inline void +csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, + bool abort) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V( + DIV_ROUND_UP(size, 16))); + + wr->cookie = (uintptr_t) req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t) req->tmo; + /* 0 for CHK_ALL_IO tells FW to look up t_cookie */ + wr->sub_opcode_to_chk_all_io = + (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) | + FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0)); + wr->r3[0] = 0; + wr->r3[1] = 0; + wr->r3[2] = 0; + wr->r3[3] = 0; + /* Since we re-use the same ioreq for abort as well */ + wr->t_cookie = (uintptr_t) req; +} + +static inline void +csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) +{ + struct csio_wr_pair wrp; + struct csio_hw *hw = req->lnode->hwp; + uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (req->drv_status != 0) + return; + + if (wrp.size1 >= size) { + /* Initialize WR in one shot */ + csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } +} + +/*****************************************************************************/ +/* START: SCSI SM */ +/*****************************************************************************/ +static void +csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_START_IO: + + if (req->nsge) { + if (req->datadir == DMA_TO_DEVICE) { + req->dcopy = 0; + csio_scsi_write(req); + } else + csio_setup_ddp(scsim, req); + } else { + csio_scsi_cmd(req); + } + + if (likely(req->drv_status == 0)) { + /* change state and enqueue on active_q */ + csio_set_state(&req->sm, csio_scsis_io_active); + list_add_tail(&req->sm.sm_list, &scsim->active_q); + csio_wr_issue(hw, req->eq_idx, false); + CSIO_INC_STATS(scsim, n_active); + + return; + } + break; + + case CSIO_SCSIE_START_TM: + csio_scsi_cmd(req); + if (req->drv_status == 0) { + /* + * NOTE: We collect the affected I/Os prior to issuing + * LUN reset, and not after it. This is to prevent + * aborting I/Os that get issued after the LUN reset, + * but prior to LUN reset completion (in the event that + * the host stack has not blocked I/Os to a LUN that is + * being reset. + */ + csio_set_state(&req->sm, csio_scsis_tm_active); + list_add_tail(&req->sm.sm_list, &scsim->active_q); + csio_wr_issue(hw, req->eq_idx, false); + CSIO_INC_STATS(scsim, n_tm_active); + } + return; + + case CSIO_SCSIE_ABORT: + case CSIO_SCSIE_CLOSE: + /* + * NOTE: + * We could get here due to : + * - a window in the cleanup path of the SCSI module + * (csio_scsi_abort_io()). Please see NOTE in this function. + * - a window in the time we tried to issue an abort/close + * of a request to FW, and the FW completed the request + * itself. + * Print a message for now, and return INVAL either way. + */ + req->drv_status = -EINVAL; + csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + struct csio_rnode *rn; + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + CSIO_DEC_STATS(scm, n_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + /* + * In MSIX mode, with multiple queues, the SCSI compeltions + * could reach us sooner than the FW events sent to indicate + * I-T nexus loss (link down, remote device logo etc). We + * dont want to be returning such I/Os to the upper layer + * immediately, since we wouldnt have reported the I-T nexus + * loss itself. This forces us to serialize such completions + * with the reporting of the I-T nexus loss. Therefore, we + * internally queue up such up such completions in the rnode. + * The reporting of I-T nexus loss to the upper layer is then + * followed by the returning of I/Os in this internal queue. + * Having another state alongwith another queue helps us take + * actions for events such as ABORT received while we are + * in this rnode queue. + */ + if (unlikely(req->wr_status != FW_SUCCESS)) { + rn = req->rnode; + /* + * FW says remote device is lost, but rnode + * doesnt reflect it. + */ + if (csio_scsi_itnexus_loss_error(req->wr_status) && + csio_is_rnode_ready(rn)) { + csio_set_state(&req->sm, + csio_scsis_shost_cmpl_await); + list_add_tail(&req->sm.sm_list, + &rn->host_cmpl_q); + } + } + + break; + + case CSIO_SCSIE_ABORT: + csio_scsi_abrt_cls(req, SCSI_ABORT); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_aborting); + } + break; + + case CSIO_SCSIE_CLOSE: + csio_scsi_abrt_cls(req, SCSI_CLOSE); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_closing); + } + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + CSIO_DEC_STATS(scm, n_tm_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + + break; + + case CSIO_SCSIE_ABORT: + csio_scsi_abrt_cls(req, SCSI_ABORT); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_aborting); + } + break; + + + case CSIO_SCSIE_CLOSE: + csio_scsi_abrt_cls(req, SCSI_CLOSE); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_closing); + } + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_tm_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + csio_dbg(hw, + "ioreq %p recvd cmpltd (wr_status:%d) " + "in aborting st\n", req, req->wr_status); + /* + * Use -ECANCELED to explicitly tell the ABORTED event that + * the original I/O was returned to driver by FW. + * We dont really care if the I/O was returned with success by + * FW (because the ABORT and completion of the I/O crossed each + * other), or any other return value. Once we are in aborting + * state, the success or failure of the I/O is unimportant to + * us. + */ + req->drv_status = -ECANCELED; + break; + + case CSIO_SCSIE_ABORT: + CSIO_INC_STATS(scm, n_abrt_dups); + break; + + case CSIO_SCSIE_ABORTED: + + csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", + req, req->wr_status, req->drv_status); + /* + * Check if original I/O WR completed before the Abort + * completion. + */ + if (req->drv_status != -ECANCELED) { + csio_warn(hw, + "Abort completed before original I/O," + " req:%p\n", req); + CSIO_DB_ASSERT(0); + } + + /* + * There are the following possible scenarios: + * 1. The abort completed successfully, FW returned FW_SUCCESS. + * 2. The completion of an I/O and the receipt of + * abort for that I/O by the FW crossed each other. + * The FW returned FW_EINVAL. The original I/O would have + * returned with FW_SUCCESS or any other SCSI error. + * 3. The FW couldn't sent the abort out on the wire, as there + * was an I-T nexus loss (link down, remote device logged + * out etc). FW sent back an appropriate IT nexus loss status + * for the abort. + * 4. FW sent an abort, but abort timed out (remote device + * didnt respond). FW replied back with + * FW_SCSI_ABORT_TIMEDOUT. + * 5. FW couldn't genuinely abort the request for some reason, + * and sent us an error. + * + * The first 3 scenarios are treated as succesful abort + * operations by the host, while the last 2 are failed attempts + * to abort. Manipulate the return value of the request + * appropriately, so that host can convey these results + * back to the upper layer. + */ + if ((req->wr_status == FW_SUCCESS) || + (req->wr_status == FW_EINVAL) || + csio_scsi_itnexus_loss_error(req->wr_status)) + req->wr_status = FW_SCSI_ABORT_REQUESTED; + + CSIO_DEC_STATS(scm, n_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + case CSIO_SCSIE_CLOSE: + /* + * We can receive this event from the module + * cleanup paths, if the FW forgot to reply to the ABORT WR + * and left this ioreq in this state. For now, just ignore + * the event. The CLOSE event is sent to this state, as + * the LINK may have already gone down. + */ + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + csio_dbg(hw, + "ioreq %p recvd cmpltd (wr_status:%d) " + "in closing st\n", req, req->wr_status); + /* + * Use -ECANCELED to explicitly tell the CLOSED event that + * the original I/O was returned to driver by FW. + * We dont really care if the I/O was returned with success by + * FW (because the CLOSE and completion of the I/O crossed each + * other), or any other return value. Once we are in aborting + * state, the success or failure of the I/O is unimportant to + * us. + */ + req->drv_status = -ECANCELED; + break; + + case CSIO_SCSIE_CLOSED: + /* + * Check if original I/O WR completed before the Close + * completion. + */ + if (req->drv_status != -ECANCELED) { + csio_fatal(hw, + "Close completed before original I/O," + " req:%p\n", req); + CSIO_DB_ASSERT(0); + } + + /* + * Either close succeeded, or we issued close to FW at the + * same time FW compelted it to us. Either way, the I/O + * is closed. + */ + CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || + (req->wr_status == FW_EINVAL)); + req->wr_status = FW_SCSI_CLOSE_REQUESTED; + + CSIO_DEC_STATS(scm, n_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + case CSIO_SCSIE_CLOSE: + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + switch (evt) { + case CSIO_SCSIE_ABORT: + case CSIO_SCSIE_CLOSE: + /* + * Just succeed the abort request, and hope that + * the remote device unregister path will cleanup + * this I/O to the upper layer within a sane + * amount of time. + */ + /* + * A close can come in during a LINK DOWN. The FW would have + * returned us the I/O back, but not the remote device lost + * FW event. In this interval, if the I/O times out at the upper + * layer, a close can come in. Take the same action as abort: + * return success, and hope that the remote device unregister + * path will cleanup this I/O. If the FW still doesnt send + * the msg, the close times out, and the upper layer resorts + * to the next level of error recovery. + */ + req->drv_status = 0; + break; + case CSIO_SCSIE_DRVCLEANUP: + csio_set_state(&req->sm, csio_scsis_uninit); + break; + default: + csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", + evt, req); + CSIO_DB_ASSERT(0); + } +} + +/* + * csio_scsi_cmpl_handler - WR completion handler for SCSI. + * @hw: HW module. + * @wr: The completed WR from the ingress queue. + * @len: Length of the WR. + * @flb: Freelist buffer array. + * @priv: Private object + * @scsiwr: Pointer to SCSI WR. + * + * This is the WR completion handler called per completion from the + * ISR. It is called with lock held. It walks past the RSS and CPL message + * header where the actual WR is present. + * It then gets the status, WR handle (ioreq pointer) and the len of + * the WR, based on WR opcode. Only on a non-good status is the entire + * WR copied into the WR cache (ioreq->fw_wr). + * The ioreq corresponding to the WR is returned to the caller. + * NOTE: The SCSI queue doesnt allocate a freelist today, hence + * no freelist buffer is expected. + */ +struct csio_ioreq * +csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr) +{ + struct csio_ioreq *ioreq = NULL; + struct cpl_fw6_msg *cpl; + uint8_t *tempwr; + uint8_t status; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + /* skip RSS header */ + cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64)); + + if (unlikely(cpl->opcode != CPL_FW6_MSG)) { + csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n", + cpl->opcode); + CSIO_INC_STATS(scm, n_inval_cplop); + return NULL; + } + + tempwr = (uint8_t *)(cpl->data); + status = csio_wr_status(tempwr); + *scsiwr = tempwr; + + if (likely((*tempwr == FW_SCSI_READ_WR) || + (*tempwr == FW_SCSI_WRITE_WR) || + (*tempwr == FW_SCSI_CMD_WR))) { + ioreq = (struct csio_ioreq *)((uintptr_t) + (((struct fw_scsi_read_wr *)tempwr)->cookie)); + CSIO_DB_ASSERT(virt_addr_valid(ioreq)); + + ioreq->wr_status = status; + + return ioreq; + } + + if (*tempwr == FW_SCSI_ABRT_CLS_WR) { + ioreq = (struct csio_ioreq *)((uintptr_t) + (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); + CSIO_DB_ASSERT(virt_addr_valid(ioreq)); + + ioreq->wr_status = status; + return ioreq; + } + + csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr); + CSIO_INC_STATS(scm, n_inval_scsiop); + return NULL; +} + +/* + * csio_scsi_cleanup_io_q - Cleanup the given queue. + * @scm: SCSI module. + * @q: Queue to be cleaned up. + * + * Called with lock held. Has to exit with lock held. + */ +void +csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) +{ + struct csio_hw *hw = scm->hw; + struct csio_ioreq *ioreq; + struct list_head *tmp, *next; + struct scsi_cmnd *scmnd; + + /* Call back the completion routines of the active_q */ + list_for_each_safe(tmp, next, q) { + ioreq = (struct csio_ioreq *)tmp; + csio_scsi_drvcleanup(ioreq); + list_del_init(&ioreq->sm.sm_list); + scmnd = csio_scsi_cmnd(ioreq); + spin_unlock_irq(&hw->lock); + + /* + * Upper layers may have cleared this command, hence this + * check to avoid accessing stale references. + */ + if (scmnd != NULL) + ioreq->io_cbfn(hw, ioreq); + + spin_lock_irq(&scm->freelist_lock); + csio_put_scsi_ioreq(scm, ioreq); + spin_unlock_irq(&scm->freelist_lock); + + spin_lock_irq(&hw->lock); + } +} + +#define CSIO_SCSI_ABORT_Q_POLL_MS 2000 + +static void +csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd) +{ + struct csio_lnode *ln = ioreq->lnode; + struct csio_hw *hw = ln->hwp; + int ready = 0; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + int rv; + + if (csio_scsi_cmnd(ioreq) != scmnd) { + CSIO_INC_STATS(scsim, n_abrt_race_comp); + return; + } + + ready = csio_is_lnode_ready(ln); + + rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); + if (rv != 0) { + if (ready) + CSIO_INC_STATS(scsim, n_abrt_busy_error); + else + CSIO_INC_STATS(scsim, n_cls_busy_error); + } +} + +/* + * csio_scsi_abort_io_q - Abort all I/Os on given queue + * @scm: SCSI module. + * @q: Queue to abort. + * @tmo: Timeout in ms + * + * Attempt to abort all I/Os on given queue, and wait for a max + * of tmo milliseconds for them to complete. Returns success + * if all I/Os are aborted. Else returns -ETIMEDOUT. + * Should be entered with lock held. Exits with lock held. + * NOTE: + * Lock has to be held across the loop that aborts I/Os, since dropping the lock + * in between can cause the list to be corrupted. As a result, the caller + * of this function has to ensure that the number of I/os to be aborted + * is finite enough to not cause lock-held-for-too-long issues. + */ +static int +csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) +{ + struct csio_hw *hw = scm->hw; + struct list_head *tmp, *next; + int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); + struct scsi_cmnd *scmnd; + + if (list_empty(q)) + return 0; + + csio_dbg(hw, "Aborting SCSI I/Os\n"); + + /* Now abort/close I/Os in the queue passed */ + list_for_each_safe(tmp, next, q) { + scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp); + csio_abrt_cls((struct csio_ioreq *)tmp, scmnd); + } + + /* Wait till all active I/Os are completed/aborted/closed */ + while (!list_empty(q) && count--) { + spin_unlock_irq(&hw->lock); + msleep(CSIO_SCSI_ABORT_Q_POLL_MS); + spin_lock_irq(&hw->lock); + } + + /* all aborts completed */ + if (list_empty(q)) + return 0; + + return -ETIMEDOUT; +} + +/* + * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. + * @scm: SCSI module. + * @abort: abort required. + * Called with lock held, should exit with lock held. + * Can sleep when waiting for I/Os to complete. + */ +int +csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort) +{ + struct csio_hw *hw = scm->hw; + int rv = 0; + int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); + + /* No I/Os pending */ + if (list_empty(&scm->active_q)) + return 0; + + /* Wait until all active I/Os are completed */ + while (!list_empty(&scm->active_q) && count--) { + spin_unlock_irq(&hw->lock); + msleep(CSIO_SCSI_ABORT_Q_POLL_MS); + spin_lock_irq(&hw->lock); + } + + /* all I/Os completed */ + if (list_empty(&scm->active_q)) + return 0; + + /* Else abort */ + if (abort) { + rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); + if (rv == 0) + return rv; + csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); + } + + csio_scsi_cleanup_io_q(scm, &scm->active_q); + + CSIO_DB_ASSERT(list_empty(&scm->active_q)); + + return rv; +} + +/* + * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. + * @scm: SCSI module. + * @lnode: lnode + * + * Called with lock held, should exit with lock held. + * Can sleep (with dropped lock) when waiting for I/Os to complete. + */ +int +csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln) +{ + struct csio_hw *hw = scm->hw; + struct csio_scsi_level_data sld; + int rv; + int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); + + csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln); + + sld.level = CSIO_LEV_LNODE; + sld.lnode = ln; + INIT_LIST_HEAD(&ln->cmpl_q); + csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); + + /* No I/Os pending on this lnode */ + if (list_empty(&ln->cmpl_q)) + return 0; + + /* Wait until all active I/Os on this lnode are completed */ + while (!list_empty(&ln->cmpl_q) && count--) { + spin_unlock_irq(&hw->lock); + msleep(CSIO_SCSI_ABORT_Q_POLL_MS); + spin_lock_irq(&hw->lock); + } + + /* all I/Os completed */ + if (list_empty(&ln->cmpl_q)) + return 0; + + csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln); + + /* I/Os are pending, abort them */ + rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); + if (rv != 0) { + csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); + csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); + } + + CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); + + return rv; +} + +static ssize_t +csio_show_hw_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + if (csio_is_hw_ready(hw)) + return sysfs_emit(buf, "ready\n"); + + return sysfs_emit(buf, "not ready\n"); +} + +/* Device reset */ +static ssize_t +csio_device_reset(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + if (*buf != '1') + return -EINVAL; + + /* Delete NPIV lnodes */ + csio_lnodes_exit(hw, 1); + + /* Block upper IOs */ + csio_lnodes_block_request(hw); + + spin_lock_irq(&hw->lock); + csio_hw_reset(hw); + spin_unlock_irq(&hw->lock); + + /* Unblock upper IOs */ + csio_lnodes_unblock_request(hw); + return count; +} + +/* disable port */ +static ssize_t +csio_disable_port(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + bool disable; + + if (*buf == '1' || *buf == '0') + disable = (*buf == '1') ? true : false; + else + return -EINVAL; + + /* Block upper IOs */ + csio_lnodes_block_by_port(hw, ln->portid); + + spin_lock_irq(&hw->lock); + csio_disable_lnodes(hw, ln->portid, disable); + spin_unlock_irq(&hw->lock); + + /* Unblock upper IOs */ + csio_lnodes_unblock_by_port(hw, ln->portid); + return count; +} + +/* Show debug level */ +static ssize_t +csio_show_dbg_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + + return sysfs_emit(buf, "%x\n", ln->params.log_level); +} + +/* Store debug level */ +static ssize_t +csio_store_dbg_level(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + uint32_t dbg_level = 0; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "%i", &dbg_level)) + return -EINVAL; + + ln->params.log_level = dbg_level; + hw->params.log_level = dbg_level; + + return 0; +} + +static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); +static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset); +static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port); +static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, + csio_store_dbg_level); + +static struct attribute *csio_fcoe_lport_attrs[] = { + &dev_attr_hw_state.attr, + &dev_attr_device_reset.attr, + &dev_attr_disable_port.attr, + &dev_attr_dbg_level.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(csio_fcoe_lport); + +static ssize_t +csio_show_num_reg_rnodes(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + + return sysfs_emit(buf, "%d\n", ln->num_reg_rnodes); +} + +static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); + +static struct attribute *csio_fcoe_vport_attrs[] = { + &dev_attr_num_reg_rnodes.attr, + &dev_attr_dbg_level.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(csio_fcoe_vport); + +static inline uint32_t +csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + struct scatterlist *sg; + uint32_t bytes_left; + uint32_t bytes_copy; + uint32_t buf_off = 0; + uint32_t start_off = 0; + uint32_t sg_off = 0; + void *sg_addr; + void *buf_addr; + struct csio_dma_buf *dma_buf; + + bytes_left = scsi_bufflen(scmnd); + sg = scsi_sglist(scmnd); + dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); + + /* Copy data from driver buffer to SGs of SCSI CMD */ + while (bytes_left > 0 && sg && dma_buf) { + if (buf_off >= dma_buf->len) { + buf_off = 0; + dma_buf = (struct csio_dma_buf *) + csio_list_next(dma_buf); + continue; + } + + if (start_off >= sg->length) { + start_off -= sg->length; + sg = sg_next(sg); + continue; + } + + buf_addr = dma_buf->vaddr + buf_off; + sg_off = sg->offset + start_off; + bytes_copy = min((dma_buf->len - buf_off), + sg->length - start_off); + bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), + bytes_copy); + + sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT)); + if (!sg_addr) { + csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", + sg, req); + break; + } + + csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n", + sg_addr, sg_off, buf_addr, bytes_copy); + memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy); + kunmap_atomic(sg_addr); + + start_off += bytes_copy; + buf_off += bytes_copy; + bytes_left -= bytes_copy; + } + + if (bytes_left > 0) + return DID_ERROR; + else + return DID_OK; +} + +/* + * csio_scsi_err_handler - SCSI error handler. + * @hw: HW module. + * @req: IO request. + * + */ +static inline void +csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + struct csio_scsim *scm = csio_hw_to_scsim(hw); + struct fcp_resp_with_ext *fcp_resp; + struct fcp_resp_rsp_info *rsp_info; + struct csio_dma_buf *dma_buf; + uint8_t flags, scsi_status = 0; + uint32_t host_status = DID_OK; + uint32_t rsp_len = 0, sns_len = 0; + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + + + switch (req->wr_status) { + case FW_HOSTERROR: + if (unlikely(!csio_is_hw_ready(hw))) + return; + + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_hosterror); + + break; + case FW_SCSI_RSP_ERR: + dma_buf = &req->dma_buf; + fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; + rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); + flags = fcp_resp->resp.fr_flags; + scsi_status = fcp_resp->resp.fr_status; + + if (flags & FCP_RSP_LEN_VAL) { + rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); + if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) || + (rsp_info->rsp_code != FCP_TMF_CMPL)) { + host_status = DID_ERROR; + goto out; + } + } + + if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { + sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); + if (sns_len > SCSI_SENSE_BUFFERSIZE) + sns_len = SCSI_SENSE_BUFFERSIZE; + + memcpy(cmnd->sense_buffer, + &rsp_info->_fr_resvd[0] + rsp_len, sns_len); + CSIO_INC_STATS(scm, n_autosense); + } + + scsi_set_resid(cmnd, 0); + + /* Under run */ + if (flags & FCP_RESID_UNDER) { + scsi_set_resid(cmnd, + be32_to_cpu(fcp_resp->ext.fr_resid)); + + if (!(flags & FCP_SNS_LEN_VAL) && + (scsi_status == SAM_STAT_GOOD) && + ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) + < cmnd->underflow)) + host_status = DID_ERROR; + } else if (flags & FCP_RESID_OVER) + host_status = DID_ERROR; + + CSIO_INC_STATS(scm, n_rsperror); + break; + + case FW_SCSI_OVER_FLOW_ERR: + csio_warn(hw, + "Over-flow error,cmnd:0x%x expected len:0x%x" + " resid:0x%x\n", cmnd->cmnd[0], + scsi_bufflen(cmnd), scsi_get_resid(cmnd)); + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_ovflerror); + break; + + case FW_SCSI_UNDER_FLOW_ERR: + csio_warn(hw, + "Under-flow error,cmnd:0x%x expected" + " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n", + cmnd->cmnd[0], scsi_bufflen(cmnd), + scsi_get_resid(cmnd), cmnd->device->lun, + rn->flowid); + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_unflerror); + break; + + case FW_SCSI_ABORT_REQUESTED: + case FW_SCSI_ABORTED: + case FW_SCSI_CLOSE_REQUESTED: + csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, + cmnd->cmnd[0], + (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? + "closed" : "aborted"); + /* + * csio_eh_abort_handler checks this value to + * succeed or fail the abort request. + */ + host_status = DID_REQUEUE; + if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) + CSIO_INC_STATS(scm, n_closed); + else + CSIO_INC_STATS(scm, n_aborted); + break; + + case FW_SCSI_ABORT_TIMEDOUT: + /* FW timed out the abort itself */ + csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", + req, cmnd, req->wr_status); + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_abrt_timedout); + break; + + case FW_RDEV_NOT_READY: + /* + * In firmware, a RDEV can get into this state + * temporarily, before moving into dissapeared/lost + * state. So, the driver should complete the request equivalent + * to device-disappeared! + */ + CSIO_INC_STATS(scm, n_rdev_nr_error); + host_status = DID_ERROR; + break; + + case FW_ERR_RDEV_LOST: + CSIO_INC_STATS(scm, n_rdev_lost_error); + host_status = DID_ERROR; + break; + + case FW_ERR_RDEV_LOGO: + CSIO_INC_STATS(scm, n_rdev_logo_error); + host_status = DID_ERROR; + break; + + case FW_ERR_RDEV_IMPL_LOGO: + host_status = DID_ERROR; + break; + + case FW_ERR_LINK_DOWN: + CSIO_INC_STATS(scm, n_link_down_error); + host_status = DID_ERROR; + break; + + case FW_FCOE_NO_XCHG: + CSIO_INC_STATS(scm, n_no_xchg_error); + host_status = DID_ERROR; + break; + + default: + csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", + req->wr_status, req, cmnd); + CSIO_DB_ASSERT(0); + + CSIO_INC_STATS(scm, n_unknown_error); + host_status = DID_ERROR; + break; + } + +out: + if (req->nsge > 0) { + scsi_dma_unmap(cmnd); + if (req->dcopy && (host_status == DID_OK)) + host_status = csio_scsi_copy_to_sgl(hw, req); + } + + cmnd->result = (((host_status) << 16) | scsi_status); + scsi_done(cmnd); + + /* Wake up waiting threads */ + csio_scsi_cmnd(req) = NULL; + complete(&req->cmplobj); +} + +/* + * csio_scsi_cbfn - SCSI callback function. + * @hw: HW module. + * @req: IO request. + * + */ +static void +csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + uint8_t scsi_status = SAM_STAT_GOOD; + uint32_t host_status = DID_OK; + + if (likely(req->wr_status == FW_SUCCESS)) { + if (req->nsge > 0) { + scsi_dma_unmap(cmnd); + if (req->dcopy) + host_status = csio_scsi_copy_to_sgl(hw, req); + } + + cmnd->result = (((host_status) << 16) | scsi_status); + scsi_done(cmnd); + csio_scsi_cmnd(req) = NULL; + CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); + } else { + /* Error handling */ + csio_scsi_err_handler(hw, req); + } +} + +/** + * csio_queuecommand - Entry point to kickstart an I/O request. + * @host: The scsi_host pointer. + * @cmnd: The I/O request from ML. + * + * This routine does the following: + * - Checks for HW and Rnode module readiness. + * - Gets a free ioreq structure (which is already initialized + * to uninit during its allocation). + * - Maps SG elements. + * - Initializes ioreq members. + * - Kicks off the SCSI state machine for this IO. + * - Returns busy status on error. + */ +static int +csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) +{ + struct csio_lnode *ln = shost_priv(host); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + struct csio_ioreq *ioreq = NULL; + unsigned long flags; + int nsge = 0; + int rv = SCSI_MLQUEUE_HOST_BUSY, nr; + int retval; + struct csio_scsi_qset *sqset; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))]; + + nr = fc_remote_port_chkready(rport); + if (nr) { + cmnd->result = nr; + CSIO_INC_STATS(scsim, n_rn_nr_error); + goto err_done; + } + + if (unlikely(!csio_is_hw_ready(hw))) { + cmnd->result = (DID_REQUEUE << 16); + CSIO_INC_STATS(scsim, n_hw_nr_error); + goto err_done; + } + + /* Get req->nsge, if there are SG elements to be mapped */ + nsge = scsi_dma_map(cmnd); + if (unlikely(nsge < 0)) { + CSIO_INC_STATS(scsim, n_dmamap_error); + goto err; + } + + /* Do we support so many mappings? */ + if (unlikely(nsge > scsim->max_sge)) { + csio_warn(hw, + "More SGEs than can be supported." + " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); + CSIO_INC_STATS(scsim, n_unsupp_sge_error); + goto err_dma_unmap; + } + + /* Get a free ioreq structure - SM is already set to uninit */ + ioreq = csio_get_scsi_ioreq_lock(hw, scsim); + if (!ioreq) { + csio_err(hw, "Out of I/O request elements. Active #:%d\n", + scsim->stats.n_active); + CSIO_INC_STATS(scsim, n_no_req_error); + goto err_dma_unmap; + } + + ioreq->nsge = nsge; + ioreq->lnode = ln; + ioreq->rnode = rn; + ioreq->iq_idx = sqset->iq_idx; + ioreq->eq_idx = sqset->eq_idx; + ioreq->wr_status = 0; + ioreq->drv_status = 0; + csio_scsi_cmnd(ioreq) = (void *)cmnd; + ioreq->tmo = 0; + ioreq->datadir = cmnd->sc_data_direction; + + if (cmnd->sc_data_direction == DMA_TO_DEVICE) { + CSIO_INC_STATS(ln, n_output_requests); + ln->stats.n_output_bytes += scsi_bufflen(cmnd); + } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { + CSIO_INC_STATS(ln, n_input_requests); + ln->stats.n_input_bytes += scsi_bufflen(cmnd); + } else + CSIO_INC_STATS(ln, n_control_requests); + + /* Set cbfn */ + ioreq->io_cbfn = csio_scsi_cbfn; + + /* Needed during abort */ + cmnd->host_scribble = (unsigned char *)ioreq; + csio_priv(cmnd)->fc_tm_flags = 0; + + /* Kick off SCSI IO SM on the ioreq */ + spin_lock_irqsave(&hw->lock, flags); + retval = csio_scsi_start_io(ioreq); + spin_unlock_irqrestore(&hw->lock, flags); + + if (retval != 0) { + csio_err(hw, "ioreq: %p couldn't be started, status:%d\n", + ioreq, retval); + CSIO_INC_STATS(scsim, n_busy_error); + goto err_put_req; + } + + return 0; + +err_put_req: + csio_put_scsi_ioreq_lock(hw, scsim, ioreq); +err_dma_unmap: + if (nsge > 0) + scsi_dma_unmap(cmnd); +err: + return rv; + +err_done: + scsi_done(cmnd); + return 0; +} + +static int +csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort) +{ + int rv; + int cpu = smp_processor_id(); + struct csio_lnode *ln = ioreq->lnode; + struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; + + ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; + /* + * Use current processor queue for posting the abort/close, but retain + * the ingress queue ID of the original I/O being aborted/closed - we + * need the abort/close completion to be received on the same queue + * as the original I/O. + */ + ioreq->eq_idx = sqset->eq_idx; + + if (abort == SCSI_ABORT) + rv = csio_scsi_abort(ioreq); + else + rv = csio_scsi_close(ioreq); + + return rv; +} + +static int +csio_eh_abort_handler(struct scsi_cmnd *cmnd) +{ + struct csio_ioreq *ioreq; + struct csio_lnode *ln = shost_priv(cmnd->device->host); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + int ready = 0, ret; + unsigned long tmo = 0; + int rv; + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + + ret = fc_block_scsi_eh(cmnd); + if (ret) + return ret; + + ioreq = (struct csio_ioreq *)cmnd->host_scribble; + if (!ioreq) + return SUCCESS; + + if (!rn) + return FAILED; + + csio_dbg(hw, + "Request to abort ioreq:%p cmd:%p cdb:%08llx" + " ssni:0x%x lun:%llu iq:0x%x\n", + ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, + cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); + + if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) { + CSIO_INC_STATS(scsim, n_abrt_race_comp); + return SUCCESS; + } + + ready = csio_is_lnode_ready(ln); + tmo = CSIO_SCSI_ABRT_TMO_MS; + + reinit_completion(&ioreq->cmplobj); + spin_lock_irq(&hw->lock); + rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); + spin_unlock_irq(&hw->lock); + + if (rv != 0) { + if (rv == -EINVAL) { + /* Return success, if abort/close request issued on + * already completed IO + */ + return SUCCESS; + } + if (ready) + CSIO_INC_STATS(scsim, n_abrt_busy_error); + else + CSIO_INC_STATS(scsim, n_cls_busy_error); + + goto inval_scmnd; + } + + wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); + + /* FW didnt respond to abort within our timeout */ + if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { + + csio_err(hw, "Abort timed out -- req: %p\n", ioreq); + CSIO_INC_STATS(scsim, n_abrt_timedout); + +inval_scmnd: + if (ioreq->nsge > 0) + scsi_dma_unmap(cmnd); + + spin_lock_irq(&hw->lock); + csio_scsi_cmnd(ioreq) = NULL; + spin_unlock_irq(&hw->lock); + + cmnd->result = (DID_ERROR << 16); + scsi_done(cmnd); + + return FAILED; + } + + /* FW successfully aborted the request */ + if (host_byte(cmnd->result) == DID_REQUEUE) { + csio_info(hw, + "Aborted SCSI command to (%d:%llu) tag %u\n", + cmnd->device->id, cmnd->device->lun, + scsi_cmd_to_rq(cmnd)->tag); + return SUCCESS; + } else { + csio_info(hw, + "Failed to abort SCSI command, (%d:%llu) tag %u\n", + cmnd->device->id, cmnd->device->lun, + scsi_cmd_to_rq(cmnd)->tag); + return FAILED; + } +} + +/* + * csio_tm_cbfn - TM callback function. + * @hw: HW module. + * @req: IO request. + * + * Cache the result in 'cmnd', since ioreq will be freed soon + * after we return from here, and the waiting thread shouldnt trust + * the ioreq contents. + */ +static void +csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + struct csio_dma_buf *dma_buf; + uint8_t flags = 0; + struct fcp_resp_with_ext *fcp_resp; + struct fcp_resp_rsp_info *rsp_info; + + csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", + req, req->wr_status); + + /* Cache FW return status */ + csio_priv(cmnd)->wr_status = req->wr_status; + + /* Special handling based on FCP response */ + + /* + * FW returns us this error, if flags were set. FCP4 says + * FCP_RSP_LEN_VAL in flags shall be set for TM completions. + * So if a target were to set this bit, we expect that the + * rsp_code is set to FCP_TMF_CMPL for a successful TM + * completion. Any other rsp_code means TM operation failed. + * If a target were to just ignore setting flags, we treat + * the TM operation as success, and FW returns FW_SUCCESS. + */ + if (req->wr_status == FW_SCSI_RSP_ERR) { + dma_buf = &req->dma_buf; + fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; + rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); + + flags = fcp_resp->resp.fr_flags; + + /* Modify return status if flags indicate success */ + if (flags & FCP_RSP_LEN_VAL) + if (rsp_info->rsp_code == FCP_TMF_CMPL) + csio_priv(cmnd)->wr_status = FW_SUCCESS; + + csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); + } + + /* Wake up the TM handler thread */ + csio_scsi_cmnd(req) = NULL; +} + +static int +csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) +{ + struct csio_lnode *ln = shost_priv(cmnd->device->host); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + struct csio_ioreq *ioreq = NULL; + struct csio_scsi_qset *sqset; + unsigned long flags; + int retval; + int count, ret; + LIST_HEAD(local_q); + struct csio_scsi_level_data sld; + + if (!rn) + goto fail; + + csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n", + cmnd->device->lun, rn->flowid, rn->scsi_id); + + if (!csio_is_lnode_ready(ln)) { + csio_err(hw, + "LUN reset cannot be issued on non-ready" + " local node vnpi:0x%x (LUN:%llu)\n", + ln->vnp_flowid, cmnd->device->lun); + goto fail; + } + + /* Lnode is ready, now wait on rport node readiness */ + ret = fc_block_scsi_eh(cmnd); + if (ret) + return ret; + + /* + * If we have blocked in the previous call, at this point, either the + * remote node has come back online, or device loss timer has fired + * and the remote node is destroyed. Allow the LUN reset only for + * the former case, since LUN reset is a TMF I/O on the wire, and we + * need a valid session to issue it. + */ + if (fc_remote_port_chkready(rn->rport)) { + csio_err(hw, + "LUN reset cannot be issued on non-ready" + " remote node ssni:0x%x (LUN:%llu)\n", + rn->flowid, cmnd->device->lun); + goto fail; + } + + /* Get a free ioreq structure - SM is already set to uninit */ + ioreq = csio_get_scsi_ioreq_lock(hw, scsim); + + if (!ioreq) { + csio_err(hw, "Out of IO request elements. Active # :%d\n", + scsim->stats.n_active); + goto fail; + } + + sqset = &hw->sqset[ln->portid][smp_processor_id()]; + ioreq->nsge = 0; + ioreq->lnode = ln; + ioreq->rnode = rn; + ioreq->iq_idx = sqset->iq_idx; + ioreq->eq_idx = sqset->eq_idx; + + csio_scsi_cmnd(ioreq) = cmnd; + cmnd->host_scribble = (unsigned char *)ioreq; + csio_priv(cmnd)->wr_status = 0; + + csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET; + ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; + + /* + * FW times the LUN reset for ioreq->tmo, so we got to wait a little + * longer (10s for now) than that to allow FW to return the timed + * out command. + */ + count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); + + /* Set cbfn */ + ioreq->io_cbfn = csio_tm_cbfn; + + /* Save of the ioreq info for later use */ + sld.level = CSIO_LEV_LUN; + sld.lnode = ioreq->lnode; + sld.rnode = ioreq->rnode; + sld.oslun = cmnd->device->lun; + + spin_lock_irqsave(&hw->lock, flags); + /* Kick off TM SM on the ioreq */ + retval = csio_scsi_start_tm(ioreq); + spin_unlock_irqrestore(&hw->lock, flags); + + if (retval != 0) { + csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", + ioreq, retval); + goto fail_ret_ioreq; + } + + csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n", + count * (CSIO_SCSI_TM_POLL_MS / 1000)); + /* Wait for completion */ + while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) + && count--) + msleep(CSIO_SCSI_TM_POLL_MS); + + /* LUN reset timed-out */ + if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { + csio_err(hw, "LUN reset (%d:%llu) timed out\n", + cmnd->device->id, cmnd->device->lun); + + spin_lock_irq(&hw->lock); + csio_scsi_drvcleanup(ioreq); + list_del_init(&ioreq->sm.sm_list); + spin_unlock_irq(&hw->lock); + + goto fail_ret_ioreq; + } + + /* LUN reset returned, check cached status */ + if (csio_priv(cmnd)->wr_status != FW_SUCCESS) { + csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n", + cmnd->device->id, cmnd->device->lun, + csio_priv(cmnd)->wr_status); + goto fail; + } + + /* LUN reset succeeded, Start aborting affected I/Os */ + /* + * Since the host guarantees during LUN reset that there + * will not be any more I/Os to that LUN, until the LUN reset + * completes, we gather pending I/Os after the LUN reset. + */ + spin_lock_irq(&hw->lock); + csio_scsi_gather_active_ios(scsim, &sld, &local_q); + + retval = csio_scsi_abort_io_q(scsim, &local_q, 30000); + spin_unlock_irq(&hw->lock); + + /* Aborts may have timed out */ + if (retval != 0) { + csio_err(hw, + "Attempt to abort I/Os during LUN reset of %llu" + " returned %d\n", cmnd->device->lun, retval); + /* Return I/Os back to active_q */ + spin_lock_irq(&hw->lock); + list_splice_tail_init(&local_q, &scsim->active_q); + spin_unlock_irq(&hw->lock); + goto fail; + } + + CSIO_INC_STATS(rn, n_lun_rst); + + csio_info(hw, "LUN reset occurred (%d:%llu)\n", + cmnd->device->id, cmnd->device->lun); + + return SUCCESS; + +fail_ret_ioreq: + csio_put_scsi_ioreq_lock(hw, scsim, ioreq); +fail: + CSIO_INC_STATS(rn, n_lun_rst_fail); + return FAILED; +} + +static int +csio_slave_alloc(struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); + + return 0; +} + +static int +csio_slave_configure(struct scsi_device *sdev) +{ + scsi_change_queue_depth(sdev, csio_lun_qdepth); + return 0; +} + +static void +csio_slave_destroy(struct scsi_device *sdev) +{ + sdev->hostdata = NULL; +} + +static int +csio_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct csio_lnode *ln = shost_priv(shost); + int rv = 1; + + spin_lock_irq(shost->host_lock); + if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) + goto out; + + rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ, + csio_delta_scan_tmo * HZ); +out: + spin_unlock_irq(shost->host_lock); + + return rv; +} + +struct scsi_host_template csio_fcoe_shost_template = { + .module = THIS_MODULE, + .name = CSIO_DRV_DESC, + .proc_name = KBUILD_MODNAME, + .queuecommand = csio_queuecommand, + .cmd_size = sizeof(struct csio_cmd_priv), + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = csio_eh_abort_handler, + .eh_device_reset_handler = csio_eh_lun_reset_handler, + .slave_alloc = csio_slave_alloc, + .slave_configure = csio_slave_configure, + .slave_destroy = csio_slave_destroy, + .scan_finished = csio_scan_finished, + .this_id = -1, + .sg_tablesize = CSIO_SCSI_MAX_SGE, + .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, + .shost_groups = csio_fcoe_lport_groups, + .max_sectors = CSIO_MAX_SECTOR_SIZE, +}; + +struct scsi_host_template csio_fcoe_shost_vport_template = { + .module = THIS_MODULE, + .name = CSIO_DRV_DESC, + .proc_name = KBUILD_MODNAME, + .queuecommand = csio_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = csio_eh_abort_handler, + .eh_device_reset_handler = csio_eh_lun_reset_handler, + .slave_alloc = csio_slave_alloc, + .slave_configure = csio_slave_configure, + .slave_destroy = csio_slave_destroy, + .scan_finished = csio_scan_finished, + .this_id = -1, + .sg_tablesize = CSIO_SCSI_MAX_SGE, + .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, + .shost_groups = csio_fcoe_vport_groups, + .max_sectors = CSIO_MAX_SECTOR_SIZE, +}; + +/* + * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. + * @scm: SCSI Module + * @hw: HW device. + * @buf_size: buffer size + * @num_buf : Number of buffers. + * + * This routine allocates DMA buffers required for SCSI Data xfer, if + * each SGL buffer for a SCSI Read request posted by SCSI midlayer are + * not virtually contiguous. + */ +static int +csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, + int buf_size, int num_buf) +{ + int n = 0; + struct list_head *tmp; + struct csio_dma_buf *ddp_desc = NULL; + uint32_t unit_size = 0; + + if (!num_buf) + return 0; + + if (!buf_size) + return -EINVAL; + + INIT_LIST_HEAD(&scm->ddp_freelist); + + /* Align buf size to page size */ + buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; + /* Initialize dma descriptors */ + for (n = 0; n < num_buf; n++) { + /* Set unit size to request size */ + unit_size = buf_size; + ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); + if (!ddp_desc) { + csio_err(hw, + "Failed to allocate ddp descriptors," + " Num allocated = %d.\n", + scm->stats.n_free_ddp); + goto no_mem; + } + + /* Allocate Dma buffers for DDP */ + ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size, + &ddp_desc->paddr, GFP_KERNEL); + if (!ddp_desc->vaddr) { + csio_err(hw, + "SCSI response DMA buffer (ddp) allocation" + " failed!\n"); + kfree(ddp_desc); + goto no_mem; + } + + ddp_desc->len = unit_size; + + /* Added it to scsi ddp freelist */ + list_add_tail(&ddp_desc->list, &scm->ddp_freelist); + CSIO_INC_STATS(scm, n_free_ddp); + } + + return 0; +no_mem: + /* release dma descs back to freelist and free dma memory */ + list_for_each(tmp, &scm->ddp_freelist) { + ddp_desc = (struct csio_dma_buf *) tmp; + tmp = csio_list_prev(tmp); + dma_free_coherent(&hw->pdev->dev, ddp_desc->len, + ddp_desc->vaddr, ddp_desc->paddr); + list_del_init(&ddp_desc->list); + kfree(ddp_desc); + } + scm->stats.n_free_ddp = 0; + + return -ENOMEM; +} + +/* + * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs. + * @scm: SCSI Module + * @hw: HW device. + * + * This routine frees ddp buffers. + */ +static void +csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw) +{ + struct list_head *tmp; + struct csio_dma_buf *ddp_desc; + + /* release dma descs back to freelist and free dma memory */ + list_for_each(tmp, &scm->ddp_freelist) { + ddp_desc = (struct csio_dma_buf *) tmp; + tmp = csio_list_prev(tmp); + dma_free_coherent(&hw->pdev->dev, ddp_desc->len, + ddp_desc->vaddr, ddp_desc->paddr); + list_del_init(&ddp_desc->list); + kfree(ddp_desc); + } + scm->stats.n_free_ddp = 0; +} + +/** + * csio_scsim_init - Initialize SCSI Module + * @scm: SCSI Module + * @hw: HW module + * + */ +int +csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) +{ + int i; + struct csio_ioreq *ioreq; + struct csio_dma_buf *dma_buf; + + INIT_LIST_HEAD(&scm->active_q); + scm->hw = hw; + + scm->proto_cmd_len = sizeof(struct fcp_cmnd); + scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; + scm->max_sge = CSIO_SCSI_MAX_SGE; + + spin_lock_init(&scm->freelist_lock); + + /* Pre-allocate ioreqs and initialize them */ + INIT_LIST_HEAD(&scm->ioreq_freelist); + for (i = 0; i < csio_scsi_ioreqs; i++) { + + ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); + if (!ioreq) { + csio_err(hw, + "I/O request element allocation failed, " + " Num allocated = %d.\n", + scm->stats.n_free_ioreq); + + goto free_ioreq; + } + + /* Allocate Dma buffers for Response Payload */ + dma_buf = &ioreq->dma_buf; + dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL, + &dma_buf->paddr); + if (!dma_buf->vaddr) { + csio_err(hw, + "SCSI response DMA buffer allocation" + " failed!\n"); + kfree(ioreq); + goto free_ioreq; + } + + dma_buf->len = scm->proto_rsp_len; + + /* Set state to uninit */ + csio_init_state(&ioreq->sm, csio_scsis_uninit); + INIT_LIST_HEAD(&ioreq->gen_list); + init_completion(&ioreq->cmplobj); + + list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); + CSIO_INC_STATS(scm, n_free_ioreq); + } + + if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs)) + goto free_ioreq; + + return 0; + +free_ioreq: + /* + * Free up existing allocations, since an error + * from here means we are returning for good + */ + while (!list_empty(&scm->ioreq_freelist)) { + struct csio_sm *tmp; + + tmp = list_first_entry(&scm->ioreq_freelist, + struct csio_sm, sm_list); + list_del_init(&tmp->sm_list); + ioreq = (struct csio_ioreq *)tmp; + + dma_buf = &ioreq->dma_buf; + dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr, + dma_buf->paddr); + + kfree(ioreq); + } + + scm->stats.n_free_ioreq = 0; + + return -ENOMEM; +} + +/** + * csio_scsim_exit: Uninitialize SCSI Module + * @scm: SCSI Module + * + */ +void +csio_scsim_exit(struct csio_scsim *scm) +{ + struct csio_ioreq *ioreq; + struct csio_dma_buf *dma_buf; + + while (!list_empty(&scm->ioreq_freelist)) { + struct csio_sm *tmp; + + tmp = list_first_entry(&scm->ioreq_freelist, + struct csio_sm, sm_list); + list_del_init(&tmp->sm_list); + ioreq = (struct csio_ioreq *)tmp; + + dma_buf = &ioreq->dma_buf; + dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr, + dma_buf->paddr); + + kfree(ioreq); + } + + scm->stats.n_free_ioreq = 0; + + csio_scsi_free_ddp_bufs(scm, scm->hw); +} diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h new file mode 100644 index 000000000..39dda3c88 --- /dev/null +++ b/drivers/scsi/csiostor/csio_scsi.h @@ -0,0 +1,352 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_SCSI_H__ +#define __CSIO_SCSI_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "csio_defs.h" +#include "csio_wr.h" + +extern struct scsi_host_template csio_fcoe_shost_template; +extern struct scsi_host_template csio_fcoe_shost_vport_template; + +extern int csio_scsi_eqsize; +extern int csio_scsi_iqlen; +extern int csio_scsi_ioreqs; +extern uint32_t csio_max_scan_tmo; +extern uint32_t csio_delta_scan_tmo; +extern int csio_lun_qdepth; + +/* + **************************** NOTE ******************************* + * How do we calculate MAX FCoE SCSI SGEs? Here is the math: + * Max Egress WR size = 512 bytes + * One SCSI egress WR has the following fixed no of bytes: + * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR + * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD + * ------ + * 80 + * ------ + * That leaves us with 512 - 96 = 432 bytes for data SGE. Using + * struct ulptx_sgl header for the SGE consumes: + * - 4 bytes for cmnd_sge. + * - 12 bytes for the first SGL. + * That leaves us with 416 bytes for the remaining SGE pairs. Which is + * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs, + * or 34 SGEs. Adding the first SGE fetches us 35 SGEs. + */ +#define CSIO_SCSI_MAX_SGE 35 +#define CSIO_SCSI_ABRT_TMO_MS 60000 +#define CSIO_SCSI_LUNRST_TMO_MS 60000 +#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than + * all TM timeouts. + */ +#define CSIO_SCSI_IQ_WRSZ 128 +#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ) + +#define CSIO_MAX_SNS_LEN 128 +#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN) + +/* Reference to scsi_cmnd */ +#define csio_scsi_cmnd(req) ((req)->scratch1) + +struct csio_scsi_stats { + uint64_t n_tot_success; /* Total number of good I/Os */ + uint32_t n_rn_nr_error; /* No. of remote-node-not- + * ready errors + */ + uint32_t n_hw_nr_error; /* No. of hw-module-not- + * ready errors + */ + uint32_t n_dmamap_error; /* No. of DMA map erros */ + uint32_t n_unsupp_sge_error; /* No. of too-many-SGes + * errors. + */ + uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */ + uint32_t n_busy_error; /* No. of -EBUSY errors */ + uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */ + uint32_t n_rsperror; /* No. of response errors */ + uint32_t n_autosense; /* No. of auto sense replies */ + uint32_t n_ovflerror; /* No. of overflow errors */ + uint32_t n_unflerror; /* No. of underflow errors */ + uint32_t n_rdev_nr_error;/* No. of rdev not + * ready errors + */ + uint32_t n_rdev_lost_error;/* No. of rdev lost errors */ + uint32_t n_rdev_logo_error;/* No. of rdev logo errors */ + uint32_t n_link_down_error;/* No. of link down errors */ + uint32_t n_no_xchg_error; /* No. no exchange error */ + uint32_t n_unknown_error;/* No. of unhandled errors */ + uint32_t n_aborted; /* No. of aborted I/Os */ + uint32_t n_abrt_timedout; /* No. of abort timedouts */ + uint32_t n_abrt_fail; /* No. of abort failures */ + uint32_t n_abrt_dups; /* No. of duplicate aborts */ + uint32_t n_abrt_race_comp; /* No. of aborts that raced + * with completions. + */ + uint32_t n_abrt_busy_error;/* No. of abort failures + * due to -EBUSY. + */ + uint32_t n_closed; /* No. of closed I/Os */ + uint32_t n_cls_busy_error; /* No. of close failures + * due to -EBUSY. + */ + uint32_t n_active; /* No. of IOs in active_q */ + uint32_t n_tm_active; /* No. of TMs in active_q */ + uint32_t n_wcbfn; /* No. of I/Os in worker + * cbfn q + */ + uint32_t n_free_ioreq; /* No. of freelist entries */ + uint32_t n_free_ddp; /* No. of DDP freelist */ + uint32_t n_unaligned; /* No. of Unaligned SGls */ + uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */ + uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/ +}; + +struct csio_scsim { + struct csio_hw *hw; /* Pointer to HW moduel */ + uint8_t max_sge; /* Max SGE */ + uint8_t proto_cmd_len; /* Proto specific SCSI + * cmd length + */ + uint16_t proto_rsp_len; /* Proto specific SCSI + * response length + */ + spinlock_t freelist_lock; /* Lock for ioreq freelist */ + struct list_head active_q; /* Outstanding SCSI I/Os */ + struct list_head ioreq_freelist; /* Free list of ioreq's */ + struct list_head ddp_freelist; /* DDP descriptor freelist */ + struct csio_scsi_stats stats; /* This module's statistics */ +}; + +/* State machine defines */ +enum csio_scsi_ev { + CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */ + CSIO_SCSIE_START_TM, /* Start a TM IO */ + CSIO_SCSIE_COMPLETED, /* IO Completed */ + CSIO_SCSIE_ABORT, /* Abort IO */ + CSIO_SCSIE_ABORTED, /* IO Aborted */ + CSIO_SCSIE_CLOSE, /* Close exchange */ + CSIO_SCSIE_CLOSED, /* Exchange closed */ + CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually + * cleanup this I/O. + */ +}; + +enum csio_scsi_lev { + CSIO_LEV_ALL = 1, + CSIO_LEV_LNODE, + CSIO_LEV_RNODE, + CSIO_LEV_LUN, +}; + +struct csio_scsi_level_data { + enum csio_scsi_lev level; + struct csio_rnode *rnode; + struct csio_lnode *lnode; + uint64_t oslun; +}; + +struct csio_cmd_priv { + uint8_t fc_tm_flags; /* task management flags */ + uint16_t wr_status; +}; + +static inline struct csio_cmd_priv *csio_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static inline struct csio_ioreq * +csio_get_scsi_ioreq(struct csio_scsim *scm) +{ + struct csio_sm *req; + + if (likely(!list_empty(&scm->ioreq_freelist))) { + req = list_first_entry(&scm->ioreq_freelist, + struct csio_sm, sm_list); + list_del_init(&req->sm_list); + CSIO_DEC_STATS(scm, n_free_ioreq); + return (struct csio_ioreq *)req; + } else + return NULL; +} + +static inline void +csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq) +{ + list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); + CSIO_INC_STATS(scm, n_free_ioreq); +} + +static inline void +csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist, + int n) +{ + list_splice_init(reqlist, &scm->ioreq_freelist); + scm->stats.n_free_ioreq += n; +} + +static inline struct csio_dma_buf * +csio_get_scsi_ddp(struct csio_scsim *scm) +{ + struct csio_dma_buf *ddp; + + if (likely(!list_empty(&scm->ddp_freelist))) { + ddp = list_first_entry(&scm->ddp_freelist, + struct csio_dma_buf, list); + list_del_init(&ddp->list); + CSIO_DEC_STATS(scm, n_free_ddp); + return ddp; + } else + return NULL; +} + +static inline void +csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp) +{ + list_add_tail(&ddp->list, &scm->ddp_freelist); + CSIO_INC_STATS(scm, n_free_ddp); +} + +static inline void +csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist, + int n) +{ + list_splice_tail_init(reqlist, &scm->ddp_freelist); + scm->stats.n_free_ddp += n; +} + +static inline void +csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED); + if (csio_list_deleted(&ioreq->sm.sm_list)) + list_add_tail(&ioreq->sm.sm_list, cbfn_q); +} + +static inline void +csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED); + list_add_tail(&ioreq->sm.sm_list, cbfn_q); +} + +static inline void +csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED); + list_add_tail(&ioreq->sm.sm_list, cbfn_q); +} + +static inline void +csio_scsi_drvcleanup(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP); +} + +/* + * csio_scsi_start_io - Kick starts the IO SM. + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_start_io(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO); + return ioreq->drv_status; +} + +/* + * csio_scsi_start_tm - Kicks off the Task management IO SM. + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_start_tm(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM); + return ioreq->drv_status; +} + +/* + * csio_scsi_abort - Abort an IO request + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_abort(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT); + return ioreq->drv_status; +} + +/* + * csio_scsi_close - Close an IO request + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_close(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE); + return ioreq->drv_status; +} + +void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *); +int csio_scsim_cleanup_io(struct csio_scsim *, bool abort); +int csio_scsim_cleanup_io_lnode(struct csio_scsim *, + struct csio_lnode *); +struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, + void *, uint8_t **); +int csio_scsi_qconfig(struct csio_hw *); +int csio_scsim_init(struct csio_scsim *, struct csio_hw *); +void csio_scsim_exit(struct csio_scsim *); + +#endif /* __CSIO_SCSI_H__ */ diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c new file mode 100644 index 000000000..a516df019 --- /dev/null +++ b/drivers/scsi/csiostor/csio_wr.c @@ -0,0 +1,1719 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +#include "t4_values.h" +#include "csio_hw.h" +#include "csio_wr.h" +#include "csio_mb.h" +#include "csio_defs.h" + +int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */ +static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */ + +int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */ +static int csio_sge_timer_reg = 1; + +#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \ + csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A) + +static void +csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) +{ + sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A + + reg * sizeof(uint32_t)); +} + +/* Free list buffer size */ +static inline uint32_t +csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf) +{ + return sge->sge_fl_buf_size[buf->paddr & 0xF]; +} + +/* Size of the egress queue status page */ +static inline uint32_t +csio_wr_qstat_pgsz(struct csio_hw *hw) +{ + return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; +} + +/* Ring freelist doorbell */ +static inline void +csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) +{ + /* + * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ + * number of bytes in the freelist queue. This translates to atleast + * 8 freelist buffer pointers (since each pointer is 8 bytes). + */ + if (flq->inc_idx >= 8) { + csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) | + PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F, + MYPF_REG(SGE_PF_KDOORBELL_A)); + flq->inc_idx &= 7; + } +} + +/* Write a 0 cidx increment value to enable SGE interrupts for this queue */ +static void +csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid) +{ + csio_wr_reg32(hw, CIDXINC_V(0) | + INGRESSQID_V(iqid) | + TIMERREG_V(X_TIMERREG_RESTART_COUNTER), + MYPF_REG(SGE_PF_GTS_A)); +} + +/* + * csio_wr_fill_fl - Populate the FL buffers of a FL queue. + * @hw: HW module. + * @flq: Freelist queue. + * + * Fill up freelist buffer entries with buffers of size specified + * in the size register. + * + */ +static int +csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + __be64 *d = (__be64 *)(flq->vstart); + struct csio_dma_buf *buf = &flq->un.fl.bufs[0]; + uint64_t paddr; + int sreg = flq->un.fl.sreg; + int n = flq->credits; + + while (n--) { + buf->len = sge->sge_fl_buf_size[sreg]; + buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len, + &buf->paddr, GFP_KERNEL); + if (!buf->vaddr) { + csio_err(hw, "Could only fill %d buffers!\n", n + 1); + return -ENOMEM; + } + + paddr = buf->paddr | (sreg & 0xF); + + *d++ = cpu_to_be64(paddr); + buf++; + } + + return 0; +} + +/* + * csio_wr_update_fl - + * @hw: HW module. + * @flq: Freelist queue. + * + * + */ +static inline void +csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n) +{ + + flq->inc_idx += n; + flq->pidx += n; + if (unlikely(flq->pidx >= flq->credits)) + flq->pidx -= (uint16_t)flq->credits; + + CSIO_INC_STATS(flq, n_flq_refill); +} + +/* + * csio_wr_alloc_q - Allocate a WR queue and initialize it. + * @hw: HW module + * @qsize: Size of the queue in bytes + * @wrsize: Since of WR in this queue, if fixed. + * @type: Type of queue (Ingress/Egress/Freelist) + * @owner: Module that owns this queue. + * @nflb: Number of freelist buffers for FL. + * @sreg: What is the FL buffer size register? + * @iq_int_handler: Ingress queue handler in INTx mode. + * + * This function allocates and sets up a queue for the caller + * of size qsize, aligned at the required boundary. This is subject to + * be free entries being available in the queue array. If one is found, + * it is initialized with the allocated queue, marked as being used (owner), + * and a handle returned to the caller in form of the queue's index + * into the q_arr array. + * If user has indicated a freelist (by specifying nflb > 0), create + * another queue (with its own index into q_arr) for the freelist. Allocate + * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist + * idx in the ingress queue's flq.idx. This is how a Freelist is associated + * with its owning ingress queue. + */ +int +csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, + uint16_t type, void *owner, uint32_t nflb, int sreg, + iq_handler_t iq_intx_handler) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q, *flq; + int free_idx = wrm->free_qidx; + int ret_idx = free_idx; + uint32_t qsz; + int flq_idx; + + if (free_idx >= wrm->num_q) { + csio_err(hw, "No more free queues.\n"); + return -1; + } + + switch (type) { + case CSIO_EGRESS: + qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw); + break; + case CSIO_INGRESS: + switch (wrsize) { + case 16: + case 32: + case 64: + case 128: + break; + default: + csio_err(hw, "Invalid Ingress queue WR size:%d\n", + wrsize); + return -1; + } + + /* + * Number of elements must be a multiple of 16 + * So this includes status page size + */ + qsz = ALIGN(qsize/wrsize, 16) * wrsize; + + break; + case CSIO_FREELIST: + qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw); + break; + default: + csio_err(hw, "Invalid queue type: 0x%x\n", type); + return -1; + } + + q = wrm->q_arr[free_idx]; + + q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart, + GFP_KERNEL); + if (!q->vstart) { + csio_err(hw, + "Failed to allocate DMA memory for " + "queue at id: %d size: %d\n", free_idx, qsize); + return -1; + } + + q->type = type; + q->owner = owner; + q->pidx = q->cidx = q->inc_idx = 0; + q->size = qsz; + q->wr_sz = wrsize; /* If using fixed size WRs */ + + wrm->free_qidx++; + + if (type == CSIO_INGRESS) { + /* Since queue area is set to zero */ + q->un.iq.genbit = 1; + + /* + * Ingress queue status page size is always the size of + * the ingress queue entry. + */ + q->credits = (qsz - q->wr_sz) / q->wr_sz; + q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz + - q->wr_sz); + + /* Allocate memory for FL if requested */ + if (nflb > 0) { + flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64), + sizeof(__be64), CSIO_FREELIST, + owner, 0, sreg, NULL); + if (flq_idx == -1) { + csio_err(hw, + "Failed to allocate FL queue" + " for IQ idx:%d\n", free_idx); + return -1; + } + + /* Associate the new FL with the Ingress quue */ + q->un.iq.flq_idx = flq_idx; + + flq = wrm->q_arr[q->un.iq.flq_idx]; + flq->un.fl.bufs = kcalloc(flq->credits, + sizeof(struct csio_dma_buf), + GFP_KERNEL); + if (!flq->un.fl.bufs) { + csio_err(hw, + "Failed to allocate FL queue bufs" + " for IQ idx:%d\n", free_idx); + return -1; + } + + flq->un.fl.packen = 0; + flq->un.fl.offset = 0; + flq->un.fl.sreg = sreg; + + /* Fill up the free list buffers */ + if (csio_wr_fill_fl(hw, flq)) + return -1; + + /* + * Make sure in a FLQ, atleast 1 credit (8 FL buffers) + * remains unpopulated,otherwise HW thinks + * FLQ is empty. + */ + flq->pidx = flq->inc_idx = flq->credits - 8; + } else { + q->un.iq.flq_idx = -1; + } + + /* Associate the IQ INTx handler. */ + q->un.iq.iq_intx_handler = iq_intx_handler; + + csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID; + + } else if (type == CSIO_EGRESS) { + q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ; + q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz + - csio_wr_qstat_pgsz(hw)); + csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID; + } else { /* Freelist */ + q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64); + q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz + - csio_wr_qstat_pgsz(hw)); + csio_q_flid(hw, ret_idx) = CSIO_MAX_QID; + } + + return ret_idx; +} + +/* + * csio_wr_iq_create_rsp - Response handler for IQ creation. + * @hw: The HW module. + * @mbp: Mailbox. + * @iq_idx: Ingress queue that got created. + * + * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids. + */ +static int +csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx) +{ + struct csio_iq_params iqp; + enum fw_retval retval; + uint32_t iq_id; + int flq_idx; + + memset(&iqp, 0, sizeof(struct csio_iq_params)); + + csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp); + + if (retval != FW_SUCCESS) { + csio_err(hw, "IQ cmd returned 0x%x!\n", retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_q_iqid(hw, iq_idx) = iqp.iqid; + csio_q_physiqid(hw, iq_idx) = iqp.physiqid; + csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0; + csio_q_inc_idx(hw, iq_idx) = 0; + + /* Actual iq-id. */ + iq_id = iqp.iqid - hw->wrm.fw_iq_start; + + /* Set the iq-id to iq map table. */ + if (iq_id >= CSIO_MAX_IQ) { + csio_err(hw, + "Exceeding MAX_IQ(%d) supported!" + " iqid:%d rel_iqid:%d FW iq_start:%d\n", + CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + csio_q_set_intr_map(hw, iq_idx, iq_id); + + /* + * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE + * ingress context of this queue. This will block interrupts to + * this queue until the next GTS write. Therefore, we do a + * 0-cidx increment GTS write for this queue just to clear the + * interrupt_sent bit. This will re-enable interrupts to this + * queue. + */ + csio_wr_sge_intr_enable(hw, iqp.physiqid); + + flq_idx = csio_q_iq_flq_idx(hw, iq_idx); + if (flq_idx != -1) { + struct csio_q *flq = hw->wrm.q_arr[flq_idx]; + + csio_q_flid(hw, flq_idx) = iqp.fl0id; + csio_q_cidx(hw, flq_idx) = 0; + csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8; + csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8; + + /* Now update SGE about the buffers allocated during init */ + csio_wr_ring_fldb(hw, flq); + } + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_wr_iq_create - Configure an Ingress queue with FW. + * @hw: The HW module. + * @priv: Private data object. + * @iq_idx: Ingress queue index in the WR module. + * @vec: MSIX vector. + * @portid: PCIE Channel to be associated with this queue. + * @async: Is this a FW asynchronous message handling queue? + * @cbfn: Completion callback. + * + * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox + * with alloc/write bits set. + */ +int +csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx, + uint32_t vec, uint8_t portid, bool async, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_mb *mbp; + struct csio_iq_params iqp; + int flq_idx; + + memset(&iqp, 0, sizeof(struct csio_iq_params)); + csio_q_portid(hw, iq_idx) = portid; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + csio_err(hw, "IQ command out of memory!\n"); + return -ENOMEM; + } + + switch (hw->intr_mode) { + case CSIO_IM_INTX: + case CSIO_IM_MSI: + /* For interrupt forwarding queue only */ + if (hw->intr_iq_idx == iq_idx) + iqp.iqandst = X_INTERRUPTDESTINATION_PCIE; + else + iqp.iqandst = X_INTERRUPTDESTINATION_IQ; + iqp.iqandstindex = + csio_q_physiqid(hw, hw->intr_iq_idx); + break; + case CSIO_IM_MSIX: + iqp.iqandst = X_INTERRUPTDESTINATION_PCIE; + iqp.iqandstindex = (uint16_t)vec; + break; + case CSIO_IM_NONE: + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + /* Pass in the ingress queue cmd parameters */ + iqp.pfn = hw->pfn; + iqp.vfn = 0; + iqp.iq_start = 1; + iqp.viid = 0; + iqp.type = FW_IQ_TYPE_FL_INT_CAP; + iqp.iqasynch = async; + if (csio_intr_coalesce_cnt) + iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER; + else + iqp.iqanus = X_UPDATESCHEDULING_TIMER; + iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT; + iqp.iqpciech = portid; + iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg; + + switch (csio_q_wr_sz(hw, iq_idx)) { + case 16: + iqp.iqesize = 0; break; + case 32: + iqp.iqesize = 1; break; + case 64: + iqp.iqesize = 2; break; + case 128: + iqp.iqesize = 3; break; + } + + iqp.iqsize = csio_q_size(hw, iq_idx) / + csio_q_wr_sz(hw, iq_idx); + iqp.iqaddr = csio_q_pstart(hw, iq_idx); + + flq_idx = csio_q_iq_flq_idx(hw, iq_idx); + if (flq_idx != -1) { + enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); + struct csio_q *flq = hw->wrm.q_arr[flq_idx]; + + iqp.fl0paden = 1; + iqp.fl0packen = flq->un.fl.packen ? 1 : 0; + iqp.fl0fbmin = X_FETCHBURSTMIN_64B; + iqp.fl0fbmax = ((chip == CHELSIO_T5) ? + X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B); + iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ; + iqp.fl0addr = csio_q_pstart(hw, flq_idx); + } + + csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of IQ cmd failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_iq_create_rsp(hw, mbp, iq_idx); +} + +/* + * csio_wr_eq_create_rsp - Response handler for EQ creation. + * @hw: The HW module. + * @mbp: Mailbox. + * @eq_idx: Egress queue that got created. + * + * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids. + */ +static int +csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx) +{ + struct csio_eq_params eqp; + enum fw_retval retval; + + memset(&eqp, 0, sizeof(struct csio_eq_params)); + + csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp); + + if (retval != FW_SUCCESS) { + csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid; + csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid; + csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0; + csio_q_inc_idx(hw, eq_idx) = 0; + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_wr_eq_create - Configure an Egress queue with FW. + * @hw: HW module. + * @priv: Private data. + * @eq_idx: Egress queue index in the WR module. + * @iq_idx: Associated ingress queue index. + * @cbfn: Completion callback. + * + * This API configures a offload egress queue with FW by issuing a + * FW_EQ_OFLD_CMD (with alloc + write ) mailbox. + */ +int +csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx, + int iq_idx, uint8_t portid, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_mb *mbp; + struct csio_eq_params eqp; + + memset(&eqp, 0, sizeof(struct csio_eq_params)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + csio_err(hw, "EQ command out of memory!\n"); + return -ENOMEM; + } + + eqp.pfn = hw->pfn; + eqp.vfn = 0; + eqp.eqstart = 1; + eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE; + eqp.iqid = csio_q_iqid(hw, iq_idx); + eqp.fbmin = X_FETCHBURSTMIN_64B; + eqp.fbmax = X_FETCHBURSTMAX_512B; + eqp.cidxfthresh = 0; + eqp.pciechn = portid; + eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ; + eqp.eqaddr = csio_q_pstart(hw, eq_idx); + + csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, + &eqp, cbfn); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of EQ OFLD cmd failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx); +} + +/* + * csio_wr_iq_destroy_rsp - Response handler for IQ removal. + * @hw: The HW module. + * @mbp: Mailbox. + * @iq_idx: Ingress queue that was freed. + * + * Handle FW_IQ_CMD (free) mailbox completion. + */ +static int +csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx) +{ + enum fw_retval retval = csio_mb_fw_retval(mbp); + int rv = 0; + + if (retval != FW_SUCCESS) + rv = -EINVAL; + + mempool_free(mbp, hw->mb_mempool); + + return rv; +} + +/* + * csio_wr_iq_destroy - Free an ingress queue. + * @hw: The HW module. + * @priv: Private data object. + * @iq_idx: Ingress queue index to destroy + * @cbfn: Completion callback. + * + * This API frees an ingress queue by issuing the FW_IQ_CMD + * with the free bit set. + */ +static int +csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx, + void (*cbfn)(struct csio_hw *, struct csio_mb *)) +{ + int rv = 0; + struct csio_mb *mbp; + struct csio_iq_params iqp; + int flq_idx; + + memset(&iqp, 0, sizeof(struct csio_iq_params)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) + return -ENOMEM; + + iqp.pfn = hw->pfn; + iqp.vfn = 0; + iqp.iqid = csio_q_iqid(hw, iq_idx); + iqp.type = FW_IQ_TYPE_FL_INT_CAP; + + flq_idx = csio_q_iq_flq_idx(hw, iq_idx); + if (flq_idx != -1) + iqp.fl0id = csio_q_flid(hw, flq_idx); + else + iqp.fl0id = 0xFFFF; + + iqp.fl1id = 0xFFFF; + + csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn); + + rv = csio_mb_issue(hw, mbp); + if (rv != 0) { + mempool_free(mbp, hw->mb_mempool); + return rv; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx); +} + +/* + * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation. + * @hw: The HW module. + * @mbp: Mailbox. + * @eq_idx: Egress queue that was freed. + * + * Handle FW_OFLD_EQ_CMD (free) mailbox completion. + */ +static int +csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx) +{ + enum fw_retval retval = csio_mb_fw_retval(mbp); + int rv = 0; + + if (retval != FW_SUCCESS) + rv = -EINVAL; + + mempool_free(mbp, hw->mb_mempool); + + return rv; +} + +/* + * csio_wr_eq_destroy - Free an Egress queue. + * @hw: The HW module. + * @priv: Private data object. + * @eq_idx: Egress queue index to destroy + * @cbfn: Completion callback. + * + * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD + * with the free bit set. + */ +static int +csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + int rv = 0; + struct csio_mb *mbp; + struct csio_eq_params eqp; + + memset(&eqp, 0, sizeof(struct csio_eq_params)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) + return -ENOMEM; + + eqp.pfn = hw->pfn; + eqp.vfn = 0; + eqp.eqid = csio_q_eqid(hw, eq_idx); + + csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn); + + rv = csio_mb_issue(hw, mbp); + if (rv != 0) { + mempool_free(mbp, hw->mb_mempool); + return rv; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx); +} + +/* + * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page + * @hw: HW module + * @qidx: Egress queue index + * + * Cleanup the Egress queue status page. + */ +static void +csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) +{ + struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; + struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; + + memset(stp, 0, sizeof(*stp)); +} + +/* + * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ + * @hw: HW module + * @qidx: Ingress queue index + * + * Cleanup the footer entries in the given ingress queue, + * set to 1 the internal copy of genbit. + */ +static void +csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q = wrm->q_arr[qidx]; + void *wr; + struct csio_iqwr_footer *ftr; + uint32_t i = 0; + + /* set to 1 since we are just about zero out genbit */ + q->un.iq.genbit = 1; + + for (i = 0; i < q->credits; i++) { + /* Get the WR */ + wr = (void *)((uintptr_t)q->vstart + + (i * q->wr_sz)); + /* Get the footer */ + ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + + (q->wr_sz - sizeof(*ftr))); + /* Zero out footer */ + memset(ftr, 0, sizeof(*ftr)); + } +} + +int +csio_wr_destroy_queues(struct csio_hw *hw, bool cmd) +{ + int i, flq_idx; + struct csio_q *q; + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + int rv; + + for (i = 0; i < wrm->free_qidx; i++) { + q = wrm->q_arr[i]; + + switch (q->type) { + case CSIO_EGRESS: + if (csio_q_eqid(hw, i) != CSIO_MAX_QID) { + csio_wr_cleanup_eq_stpg(hw, i); + if (!cmd) { + csio_q_eqid(hw, i) = CSIO_MAX_QID; + continue; + } + + rv = csio_wr_eq_destroy(hw, NULL, i, NULL); + if ((rv == -EBUSY) || (rv == -ETIMEDOUT)) + cmd = false; + + csio_q_eqid(hw, i) = CSIO_MAX_QID; + } + fallthrough; + case CSIO_INGRESS: + if (csio_q_iqid(hw, i) != CSIO_MAX_QID) { + csio_wr_cleanup_iq_ftr(hw, i); + if (!cmd) { + csio_q_iqid(hw, i) = CSIO_MAX_QID; + flq_idx = csio_q_iq_flq_idx(hw, i); + if (flq_idx != -1) + csio_q_flid(hw, flq_idx) = + CSIO_MAX_QID; + continue; + } + + rv = csio_wr_iq_destroy(hw, NULL, i, NULL); + if ((rv == -EBUSY) || (rv == -ETIMEDOUT)) + cmd = false; + + csio_q_iqid(hw, i) = CSIO_MAX_QID; + flq_idx = csio_q_iq_flq_idx(hw, i); + if (flq_idx != -1) + csio_q_flid(hw, flq_idx) = CSIO_MAX_QID; + } + break; + default: + break; + } + } + + hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED; + + return 0; +} + +/* + * csio_wr_get - Get requested size of WR entry/entries from queue. + * @hw: HW module. + * @qidx: Index of queue. + * @size: Cumulative size of Work request(s). + * @wrp: Work request pair. + * + * If requested credits are available, return the start address of the + * work request in the work request pair. Set pidx accordingly and + * return. + * + * NOTE about WR pair: + * ================== + * A WR can start towards the end of a queue, and then continue at the + * beginning, since the queue is considered to be circular. This will + * require a pair of address/size to be passed back to the caller - + * hence Work request pair format. + */ +int +csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size, + struct csio_wr_pair *wrp) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q = wrm->q_arr[qidx]; + void *cwr = (void *)((uintptr_t)(q->vstart) + + (q->pidx * CSIO_QCREDIT_SZ)); + struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; + uint16_t cidx = q->cidx = ntohs(stp->cidx); + uint16_t pidx = q->pidx; + uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ); + int req_credits = req_sz / CSIO_QCREDIT_SZ; + int credits; + + CSIO_DB_ASSERT(q->owner != NULL); + CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); + CSIO_DB_ASSERT(cidx <= q->credits); + + /* Calculate credits */ + if (pidx > cidx) { + credits = q->credits - (pidx - cidx) - 1; + } else if (cidx > pidx) { + credits = cidx - pidx - 1; + } else { + /* cidx == pidx, empty queue */ + credits = q->credits; + CSIO_INC_STATS(q, n_qempty); + } + + /* + * Check if we have enough credits. + * credits = 1 implies queue is full. + */ + if (!credits || (req_credits > credits)) { + CSIO_INC_STATS(q, n_qfull); + return -EBUSY; + } + + /* + * If we are here, we have enough credits to satisfy the + * request. Check if we are near the end of q, and if WR spills over. + * If it does, use the first addr/size to cover the queue until + * the end. Fit the remainder portion of the request at the top + * of queue and return it in the second addr/len. Set pidx + * accordingly. + */ + if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) { + wrp->addr1 = cwr; + wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr); + wrp->addr2 = q->vstart; + wrp->size2 = req_sz - wrp->size1; + q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) / + CSIO_QCREDIT_SZ); + CSIO_INC_STATS(q, n_qwrap); + CSIO_INC_STATS(q, n_eq_wr_split); + } else { + wrp->addr1 = cwr; + wrp->size1 = req_sz; + wrp->addr2 = NULL; + wrp->size2 = 0; + q->pidx += (uint16_t)req_credits; + + /* We are the end of queue, roll back pidx to top of queue */ + if (unlikely(q->pidx == q->credits)) { + q->pidx = 0; + CSIO_INC_STATS(q, n_qwrap); + } + } + + q->inc_idx = (uint16_t)req_credits; + + CSIO_INC_STATS(q, n_tot_reqs); + + return 0; +} + +/* + * csio_wr_copy_to_wrp - Copies given data into WR. + * @data_buf - Data buffer + * @wrp - Work request pair. + * @wr_off - Work request offset. + * @data_len - Data length. + * + * Copies the given data in Work Request. Work request pair(wrp) specifies + * address information of Work request. + * Returns: none + */ +void +csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp, + uint32_t wr_off, uint32_t data_len) +{ + uint32_t nbytes; + + /* Number of space available in buffer addr1 of WRP */ + nbytes = ((wrp->size1 - wr_off) >= data_len) ? + data_len : (wrp->size1 - wr_off); + + memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes); + data_len -= nbytes; + + /* Write the remaining data from the begining of circular buffer */ + if (data_len) { + CSIO_DB_ASSERT(data_len <= wrp->size2); + CSIO_DB_ASSERT(wrp->addr2 != NULL); + memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len); + } +} + +/* + * csio_wr_issue - Notify chip of Work request. + * @hw: HW module. + * @qidx: Index of queue. + * @prio: 0: Low priority, 1: High priority + * + * Rings the SGE Doorbell by writing the current producer index of the passed + * in queue into the register. + * + */ +int +csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q = wrm->q_arr[qidx]; + + CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); + + wmb(); + /* Ring SGE Doorbell writing q->pidx into it */ + csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) | + PIDX_T5_V(q->inc_idx) | DBTYPE_F, + MYPF_REG(SGE_PF_KDOORBELL_A)); + q->inc_idx = 0; + + return 0; +} + +static inline uint32_t +csio_wr_avail_qcredits(struct csio_q *q) +{ + if (q->pidx > q->cidx) + return q->pidx - q->cidx; + else if (q->cidx > q->pidx) + return q->credits - (q->cidx - q->pidx); + else + return 0; /* cidx == pidx, empty queue */ +} + +/* + * csio_wr_inval_flq_buf - Invalidate a free list buffer entry. + * @hw: HW module. + * @flq: The freelist queue. + * + * Invalidate the driver's version of a freelist buffer entry, + * without freeing the associated the DMA memory. The entry + * to be invalidated is picked up from the current Free list + * queue cidx. + * + */ +static inline void +csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq) +{ + flq->cidx++; + if (flq->cidx == flq->credits) { + flq->cidx = 0; + CSIO_INC_STATS(flq, n_qwrap); + } +} + +/* + * csio_wr_process_fl - Process a freelist completion. + * @hw: HW module. + * @q: The ingress queue attached to the Freelist. + * @wr: The freelist completion WR in the ingress queue. + * @len_to_qid: The lower 32-bits of the first flit of the RSP footer + * @iq_handler: Caller's handler for this completion. + * @priv: Private pointer of caller + * + */ +static inline void +csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, + void *wr, uint32_t len_to_qid, + void (*iq_handler)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *priv) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + struct csio_fl_dma_buf flb; + struct csio_dma_buf *buf, *fbuf; + uint32_t bufsz, len, lastlen = 0; + struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; + + CSIO_DB_ASSERT(flq != NULL); + + len = len_to_qid; + + if (len & IQWRF_NEWBUF) { + if (flq->un.fl.offset > 0) { + csio_wr_inval_flq_buf(hw, flq); + flq->un.fl.offset = 0; + } + len = IQWRF_LEN_GET(len); + } + + CSIO_DB_ASSERT(len != 0); + + flb.totlen = len; + + /* Consume all freelist buffers used for len bytes */ + for (fbuf = flb.flbufs; ; fbuf++) { + buf = &flq->un.fl.bufs[flq->cidx]; + bufsz = csio_wr_fl_bufsz(sge, buf); + + fbuf->paddr = buf->paddr; + fbuf->vaddr = buf->vaddr; + + flb.offset = flq->un.fl.offset; + lastlen = min(bufsz, len); + fbuf->len = lastlen; + + len -= lastlen; + if (!len) + break; + csio_wr_inval_flq_buf(hw, flq); + } + + flb.defer_free = flq->un.fl.packen ? 0 : 1; + + iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer), + &flb, priv); + + if (flq->un.fl.packen) + flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align); + else + csio_wr_inval_flq_buf(hw, flq); + +} + +/* + * csio_is_new_iqwr - Is this a new Ingress queue entry ? + * @q: Ingress quueue. + * @ftr: Ingress queue WR SGE footer. + * + * The entry is new if our generation bit matches the corresponding + * bit in the footer of the current WR. + */ +static inline bool +csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr) +{ + return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT)); +} + +/* + * csio_wr_process_iq - Process elements in Ingress queue. + * @hw: HW pointer + * @qidx: Index of queue + * @iq_handler: Handler for this queue + * @priv: Caller's private pointer + * + * This routine walks through every entry of the ingress queue, calling + * the provided iq_handler with the entry, until the generation bit + * flips. + */ +int +csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q, + void (*iq_handler)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *priv) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz)); + struct csio_iqwr_footer *ftr; + uint32_t wr_type, fw_qid, qid; + struct csio_q *q_completed; + struct csio_q *flq = csio_iq_has_fl(q) ? + wrm->q_arr[q->un.iq.flq_idx] : NULL; + int rv = 0; + + /* Get the footer */ + ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + + (q->wr_sz - sizeof(*ftr))); + + /* + * When q wrapped around last time, driver should have inverted + * ic.genbit as well. + */ + while (csio_is_new_iqwr(q, ftr)) { + + CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <= + (uintptr_t)q->vwrap); + rmb(); + wr_type = IQWRF_TYPE_GET(ftr->u.type_gen); + + switch (wr_type) { + case X_RSPD_TYPE_CPL: + /* Subtract footer from WR len */ + iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv); + break; + case X_RSPD_TYPE_FLBUF: + csio_wr_process_fl(hw, q, wr, + ntohl(ftr->pldbuflen_qid), + iq_handler, priv); + break; + case X_RSPD_TYPE_INTR: + fw_qid = ntohl(ftr->pldbuflen_qid); + qid = fw_qid - wrm->fw_iq_start; + q_completed = hw->wrm.intr_map[qid]; + + if (unlikely(qid == + csio_q_physiqid(hw, hw->intr_iq_idx))) { + /* + * We are already in the Forward Interrupt + * Interrupt Queue Service! Do-not service + * again! + * + */ + } else { + CSIO_DB_ASSERT(q_completed); + CSIO_DB_ASSERT( + q_completed->un.iq.iq_intx_handler); + + /* Call the queue handler. */ + q_completed->un.iq.iq_intx_handler(hw, NULL, + 0, NULL, (void *)q_completed); + } + break; + default: + csio_warn(hw, "Unknown resp type 0x%x received\n", + wr_type); + CSIO_INC_STATS(q, n_rsp_unknown); + break; + } + + /* + * Ingress *always* has fixed size WR entries. Therefore, + * there should always be complete WRs towards the end of + * queue. + */ + if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) { + + /* Roll over to start of queue */ + q->cidx = 0; + wr = q->vstart; + + /* Toggle genbit */ + q->un.iq.genbit ^= 0x1; + + CSIO_INC_STATS(q, n_qwrap); + } else { + q->cidx++; + wr = (void *)((uintptr_t)(q->vstart) + + (q->cidx * q->wr_sz)); + } + + ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + + (q->wr_sz - sizeof(*ftr))); + q->inc_idx++; + + } /* while (q->un.iq.genbit == hdr->genbit) */ + + /* + * We need to re-arm SGE interrupts in case we got a stray interrupt, + * especially in msix mode. With INTx, this may be a common occurence. + */ + if (unlikely(!q->inc_idx)) { + CSIO_INC_STATS(q, n_stray_comp); + rv = -EINVAL; + goto restart; + } + + /* Replenish free list buffers if pending falls below low water mark */ + if (flq) { + uint32_t avail = csio_wr_avail_qcredits(flq); + if (avail <= 16) { + /* Make sure in FLQ, atleast 1 credit (8 FL buffers) + * remains unpopulated otherwise HW thinks + * FLQ is empty. + */ + csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail); + csio_wr_ring_fldb(hw, flq); + } + } + +restart: + /* Now inform SGE about our incremental index value */ + csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) | + INGRESSQID_V(q->un.iq.physiqid) | + TIMERREG_V(csio_sge_timer_reg), + MYPF_REG(SGE_PF_GTS_A)); + q->stats.n_tot_rsps += q->inc_idx; + + q->inc_idx = 0; + + return rv; +} + +int +csio_wr_process_iq_idx(struct csio_hw *hw, int qidx, + void (*iq_handler)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *priv) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *iq = wrm->q_arr[qidx]; + + return csio_wr_process_iq(hw, iq, iq_handler, priv); +} + +static int +csio_closest_timer(struct csio_sge *s, int time) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int +csio_closest_thresh(struct csio_sge *s, int cnt) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = cnt - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static void +csio_wr_fixup_host_params(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + uint32_t clsz = L1_CACHE_BYTES; + uint32_t s_hps = PAGE_SHIFT - 10; + uint32_t stat_len = clsz > 64 ? 128 : 64; + u32 fl_align = clsz < 32 ? 32 : clsz; + u32 pack_align; + u32 ingpad, ingpack; + + csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) | + HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) | + HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) | + HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps), + SGE_HOST_PAGE_SIZE_A); + + /* T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. + */ + + /* We want the Packing Boundary to be based on the Cache Line + * Size in order to help avoid False Sharing performance + * issues between CPUs, etc. We also want the Packing + * Boundary to incorporate the PCI-E Maximum Payload Size. We + * get best performance when the Packing Boundary is a + * multiple of the Maximum Payload Size. + */ + pack_align = fl_align; + if (pci_is_pcie(hw->pdev)) { + u32 mps, mps_log; + u16 devctl; + + /* The PCIe Device Control Maximum Payload Size field + * [bits 7:5] encodes sizes as powers of 2 starting at + * 128 bytes. + */ + pcie_capability_read_word(hw->pdev, PCI_EXP_DEVCTL, &devctl); + mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; + mps = 1 << mps_log; + if (mps > pack_align) + pack_align = mps; + } + + /* T5/T6 have a special interpretation of the "0" + * value for the Packing Boundary. This corresponds to 16 + * bytes instead of the expected 32 bytes. + */ + if (pack_align <= 16) { + ingpack = INGPACKBOUNDARY_16B_X; + fl_align = 16; + } else if (pack_align == 32) { + ingpack = INGPACKBOUNDARY_64B_X; + fl_align = 64; + } else { + u32 pack_align_log = fls(pack_align) - 1; + + ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X; + fl_align = pack_align; + } + + /* Use the smallest Ingress Padding which isn't smaller than + * the Memory Controller Read/Write Size. We'll take that as + * being 8 bytes since we don't know of any system with a + * wider Memory Controller Bus Width. + */ + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + ingpad = INGPADBOUNDARY_32B_X; + else + ingpad = T6_INGPADBOUNDARY_8B_X; + + csio_set_reg_field(hw, SGE_CONTROL_A, + INGPADBOUNDARY_V(INGPADBOUNDARY_M) | + EGRSTATUSPAGESIZE_F, + INGPADBOUNDARY_V(ingpad) | + EGRSTATUSPAGESIZE_V(stat_len != 64)); + csio_set_reg_field(hw, SGE_CONTROL2_A, + INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), + INGPACKBOUNDARY_V(ingpack)); + + /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ + csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A); + + /* + * If using hard params, the following will get set correctly + * in csio_wr_set_sge(). + */ + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { + csio_wr_reg32(hw, + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) + + fl_align - 1) & ~(fl_align - 1), + SGE_FL_BUFFER_SIZE2_A); + csio_wr_reg32(hw, + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) + + fl_align - 1) & ~(fl_align - 1), + SGE_FL_BUFFER_SIZE3_A); + } + + sge->csio_fl_align = fl_align; + + csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A); + + /* default value of rx_dma_offset of the NIC driver */ + csio_set_reg_field(hw, SGE_CONTROL_A, + PKTSHIFT_V(PKTSHIFT_M), + PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET)); + + csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A, + CSUM_HAS_PSEUDO_HDR_F, 0); +} + +static void +csio_init_intr_coalesce_parms(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + + csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt); + if (csio_intr_coalesce_cnt) { + csio_sge_thresh_reg = 0; + csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER; + return; + } + + csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time); +} + +/* + * csio_wr_get_sge - Get SGE register values. + * @hw: HW module. + * + * Used by non-master functions and by master-functions relying on config file. + */ +static void +csio_wr_get_sge(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + uint32_t ingpad; + int i; + u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; + u32 ingress_rx_threshold; + + sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A); + + ingpad = INGPADBOUNDARY_G(sge->sge_control); + + switch (ingpad) { + case X_INGPCIEBOUNDARY_32B: + sge->csio_fl_align = 32; break; + case X_INGPCIEBOUNDARY_64B: + sge->csio_fl_align = 64; break; + case X_INGPCIEBOUNDARY_128B: + sge->csio_fl_align = 128; break; + case X_INGPCIEBOUNDARY_256B: + sge->csio_fl_align = 256; break; + case X_INGPCIEBOUNDARY_512B: + sge->csio_fl_align = 512; break; + case X_INGPCIEBOUNDARY_1024B: + sge->csio_fl_align = 1024; break; + case X_INGPCIEBOUNDARY_2048B: + sge->csio_fl_align = 2048; break; + case X_INGPCIEBOUNDARY_4096B: + sge->csio_fl_align = 4096; break; + } + + for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) + csio_get_flbuf_size(hw, sge, i); + + timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A); + timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A); + timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A); + + sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE0_G(timer_value_0_and_1)); + sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE1_G(timer_value_0_and_1)); + sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE2_G(timer_value_2_and_3)); + sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE3_G(timer_value_2_and_3)); + sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE4_G(timer_value_4_and_5)); + sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE5_G(timer_value_4_and_5)); + + ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A); + sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); + sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); + sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); + sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); + + csio_init_intr_coalesce_parms(hw); +} + +/* + * csio_wr_set_sge - Initialize SGE registers + * @hw: HW module. + * + * Used by Master function to initialize SGE registers in the absence + * of a config file. + */ +static void +csio_wr_set_sge(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + int i; + + /* + * Set up our basic SGE mode to deliver CPL messages to our Ingress + * Queue and Packet Date to the Free List. + */ + csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F); + + sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A); + + /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */ + + /* + * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows + * and generate an interrupt when this occurs so we can recover. + */ + csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A, + LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M), + LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH)); + csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A, + HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M), + HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH)); + + csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F, + ENABLE_DROP_F); + + /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ + + CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); + csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1) + & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A); + csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1) + & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A); + CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); + CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); + CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); + CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7); + CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8); + + for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) + csio_get_flbuf_size(hw, sge, i); + + /* Initialize interrupt coalescing attributes */ + sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0; + sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1; + sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2; + sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3; + sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4; + sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5; + + sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0; + sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1; + sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2; + sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3; + + csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) | + THRESHOLD_1_V(sge->counter_val[1]) | + THRESHOLD_2_V(sge->counter_val[2]) | + THRESHOLD_3_V(sge->counter_val[3]), + SGE_INGRESS_RX_THRESHOLD_A); + + csio_wr_reg32(hw, + TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) | + TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])), + SGE_TIMER_VALUE_0_AND_1_A); + + csio_wr_reg32(hw, + TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) | + TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])), + SGE_TIMER_VALUE_2_AND_3_A); + + csio_wr_reg32(hw, + TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) | + TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])), + SGE_TIMER_VALUE_4_AND_5_A); + + csio_init_intr_coalesce_parms(hw); +} + +void +csio_wr_sge_init(struct csio_hw *hw) +{ + /* + * If we are master and chip is not initialized: + * - If we plan to use the config file, we need to fixup some + * host specific registers, and read the rest of the SGE + * configuration. + * - If we dont plan to use the config file, we need to initialize + * SGE entirely, including fixing the host specific registers. + * If we are master and chip is initialized, just read and work off of + * the already initialized SGE values. + * If we arent the master, we are only allowed to read and work off of + * the already initialized SGE values. + * + * Therefore, before calling this function, we assume that the master- + * ship of the card, state and whether to use config file or not, have + * already been decided. + */ + if (csio_is_hw_master(hw)) { + if (hw->fw_state != CSIO_DEV_STATE_INIT) + csio_wr_fixup_host_params(hw); + + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) + csio_wr_get_sge(hw); + else + csio_wr_set_sge(hw); + } else + csio_wr_get_sge(hw); +} + +/* + * csio_wrm_init - Initialize Work request module. + * @wrm: WR module + * @hw: HW pointer + * + * Allocates memory for an array of queue pointers starting at q_arr. + */ +int +csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw) +{ + int i; + + if (!wrm->num_q) { + csio_err(hw, "Num queues is not set\n"); + return -EINVAL; + } + + wrm->q_arr = kcalloc(wrm->num_q, sizeof(struct csio_q *), GFP_KERNEL); + if (!wrm->q_arr) + goto err; + + for (i = 0; i < wrm->num_q; i++) { + wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL); + if (!wrm->q_arr[i]) { + while (--i >= 0) + kfree(wrm->q_arr[i]); + goto err_free_arr; + } + } + wrm->free_qidx = 0; + + return 0; + +err_free_arr: + kfree(wrm->q_arr); +err: + return -ENOMEM; +} + +/* + * csio_wrm_exit - Initialize Work request module. + * @wrm: WR module + * @hw: HW module + * + * Uninitialize WR module. Free q_arr and pointers in it. + * We have the additional job of freeing the DMA memory associated + * with the queues. + */ +void +csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw) +{ + int i; + uint32_t j; + struct csio_q *q; + struct csio_dma_buf *buf; + + for (i = 0; i < wrm->num_q; i++) { + q = wrm->q_arr[i]; + + if (wrm->free_qidx && (i < wrm->free_qidx)) { + if (q->type == CSIO_FREELIST) { + if (!q->un.fl.bufs) + continue; + for (j = 0; j < q->credits; j++) { + buf = &q->un.fl.bufs[j]; + if (!buf->vaddr) + continue; + dma_free_coherent(&hw->pdev->dev, + buf->len, buf->vaddr, + buf->paddr); + } + kfree(q->un.fl.bufs); + } + dma_free_coherent(&hw->pdev->dev, q->size, + q->vstart, q->pstart); + } + kfree(q); + } + + hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED; + + kfree(wrm->q_arr); +} diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h new file mode 100644 index 000000000..0c0dd9a65 --- /dev/null +++ b/drivers/scsi/csiostor/csio_wr.h @@ -0,0 +1,512 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_WR_H__ +#define __CSIO_WR_H__ + +#include + +#include "csio_defs.h" +#include "t4fw_api.h" +#include "t4fw_api_stor.h" + +/* + * SGE register field values. + */ +#define X_INGPCIEBOUNDARY_32B 0 +#define X_INGPCIEBOUNDARY_64B 1 +#define X_INGPCIEBOUNDARY_128B 2 +#define X_INGPCIEBOUNDARY_256B 3 +#define X_INGPCIEBOUNDARY_512B 4 +#define X_INGPCIEBOUNDARY_1024B 5 +#define X_INGPCIEBOUNDARY_2048B 6 +#define X_INGPCIEBOUNDARY_4096B 7 + +/* GTS register */ +#define X_TIMERREG_COUNTER0 0 +#define X_TIMERREG_COUNTER1 1 +#define X_TIMERREG_COUNTER2 2 +#define X_TIMERREG_COUNTER3 3 +#define X_TIMERREG_COUNTER4 4 +#define X_TIMERREG_COUNTER5 5 +#define X_TIMERREG_RESTART_COUNTER 6 +#define X_TIMERREG_UPDATE_CIDX 7 + +/* + * Egress Context field values + */ +#define X_FETCHBURSTMIN_16B 0 +#define X_FETCHBURSTMIN_32B 1 +#define X_FETCHBURSTMIN_64B 2 +#define X_FETCHBURSTMIN_128B 3 + +#define X_FETCHBURSTMAX_64B 0 +#define X_FETCHBURSTMAX_128B 1 +#define X_FETCHBURSTMAX_256B 2 +#define X_FETCHBURSTMAX_512B 3 + +#define X_HOSTFCMODE_NONE 0 +#define X_HOSTFCMODE_INGRESS_QUEUE 1 +#define X_HOSTFCMODE_STATUS_PAGE 2 +#define X_HOSTFCMODE_BOTH 3 + +/* + * Ingress Context field values + */ +#define X_UPDATESCHEDULING_TIMER 0 +#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1 + +#define X_UPDATEDELIVERY_NONE 0 +#define X_UPDATEDELIVERY_INTERRUPT 1 +#define X_UPDATEDELIVERY_STATUS_PAGE 2 +#define X_UPDATEDELIVERY_BOTH 3 + +#define X_INTERRUPTDESTINATION_PCIE 0 +#define X_INTERRUPTDESTINATION_IQ 1 + +#define X_RSPD_TYPE_FLBUF 0 +#define X_RSPD_TYPE_CPL 1 +#define X_RSPD_TYPE_INTR 2 + +/* WR status is at the same position as retval in a CMD header */ +#define csio_wr_status(_wr) \ + (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo))) + +struct csio_hw; + +extern int csio_intr_coalesce_cnt; +extern int csio_intr_coalesce_time; + +/* Ingress queue params */ +struct csio_iq_params { + + uint8_t iq_start:1; + uint8_t iq_stop:1; + uint8_t pfn:3; + + uint8_t vfn; + + uint16_t physiqid; + uint16_t iqid; + + uint16_t fl0id; + uint16_t fl1id; + + uint8_t viid; + + uint8_t type; + uint8_t iqasynch; + uint8_t reserved4; + + uint8_t iqandst; + uint8_t iqanus; + uint8_t iqanud; + + uint16_t iqandstindex; + + uint8_t iqdroprss; + uint8_t iqpciech; + uint8_t iqdcaen; + + uint8_t iqdcacpu; + uint8_t iqintcntthresh; + uint8_t iqo; + + uint8_t iqcprio; + uint8_t iqesize; + + uint16_t iqsize; + + uint64_t iqaddr; + + uint8_t iqflintiqhsen; + uint8_t reserved5; + uint8_t iqflintcongen; + uint8_t iqflintcngchmap; + + uint32_t reserved6; + + uint8_t fl0hostfcmode; + uint8_t fl0cprio; + uint8_t fl0paden; + uint8_t fl0packen; + uint8_t fl0congen; + uint8_t fl0dcaen; + + uint8_t fl0dcacpu; + uint8_t fl0fbmin; + + uint8_t fl0fbmax; + uint8_t fl0cidxfthresho; + uint8_t fl0cidxfthresh; + + uint16_t fl0size; + + uint64_t fl0addr; + + uint64_t reserved7; + + uint8_t fl1hostfcmode; + uint8_t fl1cprio; + uint8_t fl1paden; + uint8_t fl1packen; + uint8_t fl1congen; + uint8_t fl1dcaen; + + uint8_t fl1dcacpu; + uint8_t fl1fbmin; + + uint8_t fl1fbmax; + uint8_t fl1cidxfthresho; + uint8_t fl1cidxfthresh; + + uint16_t fl1size; + + uint64_t fl1addr; +}; + +/* Egress queue params */ +struct csio_eq_params { + + uint8_t pfn; + uint8_t vfn; + + uint8_t eqstart:1; + uint8_t eqstop:1; + + uint16_t physeqid; + uint32_t eqid; + + uint8_t hostfcmode:2; + uint8_t cprio:1; + uint8_t pciechn:3; + + uint16_t iqid; + + uint8_t dcaen:1; + uint8_t dcacpu:5; + + uint8_t fbmin:3; + uint8_t fbmax:3; + + uint8_t cidxfthresho:1; + uint8_t cidxfthresh:3; + + uint16_t eqsize; + + uint64_t eqaddr; +}; + +struct csio_dma_buf { + struct list_head list; + void *vaddr; /* Virtual address */ + dma_addr_t paddr; /* Physical address */ + uint32_t len; /* Buffer size */ +}; + +/* Generic I/O request structure */ +struct csio_ioreq { + struct csio_sm sm; /* SM, List + * should be the first member + */ + int iq_idx; /* Ingress queue index */ + int eq_idx; /* Egress queue index */ + uint32_t nsge; /* Number of SG elements */ + uint32_t tmo; /* Driver timeout */ + uint32_t datadir; /* Data direction */ + struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */ + uint16_t wr_status; /* WR completion status */ + int16_t drv_status; /* Driver internal status */ + struct csio_lnode *lnode; /* Owner lnode */ + struct csio_rnode *rnode; /* Src/destination rnode */ + void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *); + /* completion callback */ + void *scratch1; /* Scratch area 1. + */ + void *scratch2; /* Scratch area 2. */ + struct list_head gen_list; /* Any list associated with + * this ioreq. + */ + uint64_t fw_handle; /* Unique handle passed + * to FW + */ + uint8_t dcopy; /* Data copy required */ + uint8_t reserved1; + uint16_t reserved2; + struct completion cmplobj; /* ioreq completion object */ +} ____cacheline_aligned_in_smp; + +/* + * Egress status page for egress cidx updates + */ +struct csio_qstatus_page { + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + + +enum { + CSIO_MAX_FLBUF_PER_IQWR = 4, + CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments + * in bytes + */ + CSIO_MAX_QID = 0xFFFF, + CSIO_MAX_IQ = 128, + + CSIO_SGE_NTIMERS = 6, + CSIO_SGE_NCOUNTERS = 4, + CSIO_SGE_FL_SIZE_REGS = 16, +}; + +/* Defines for type */ +enum { + CSIO_EGRESS = 1, + CSIO_INGRESS = 2, + CSIO_FREELIST = 3, +}; + +/* + * Structure for footer (last 2 flits) of Ingress Queue Entry. + */ +struct csio_iqwr_footer { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + } u; +}; + +#define IQWRF_NEWBUF (1 << 31) +#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU) +#define IQWRF_GEN_SHIFT 7 +#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U) + + +/* + * WR pair: + * ======== + * A WR can start towards the end of a queue, and then continue at the + * beginning, since the queue is considered to be circular. This will + * require a pair of address/len to be passed back to the caller - + * hence the Work request pair structure. + */ +struct csio_wr_pair { + void *addr1; + uint32_t size1; + void *addr2; + uint32_t size2; +}; + +/* + * The following structure is used by ingress processing to return the + * free list buffers to consumers. + */ +struct csio_fl_dma_buf { + struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR]; + /* Freelist DMA buffers */ + int offset; /* Offset within the + * first FL buf. + */ + uint32_t totlen; /* Total length */ + uint8_t defer_free; /* Free of buffer can + * deferred + */ +}; + +/* Data-types */ +typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, void *); + +struct csio_iq { + uint16_t iqid; /* Queue ID */ + uint16_t physiqid; /* Physical Queue ID */ + uint16_t genbit; /* Generation bit, + * initially set to 1 + */ + int flq_idx; /* Freelist queue index */ + iq_handler_t iq_intx_handler; /* IQ INTx handler routine */ +}; + +struct csio_eq { + uint16_t eqid; /* Qid */ + uint16_t physeqid; /* Physical Queue ID */ + uint8_t wrap[512]; /* Temp area for q-wrap around*/ +}; + +struct csio_fl { + uint16_t flid; /* Qid */ + uint16_t packen; /* Packing enabled? */ + int offset; /* Offset within FL buf */ + int sreg; /* Size register */ + struct csio_dma_buf *bufs; /* Free list buffer ptr array + * indexed using flq->cidx/pidx + */ +}; + +struct csio_qstats { + uint32_t n_tot_reqs; /* Total no. of Requests */ + uint32_t n_tot_rsps; /* Total no. of responses */ + uint32_t n_qwrap; /* Queue wraps */ + uint32_t n_eq_wr_split; /* Number of split EQ WRs */ + uint32_t n_qentry; /* Queue entry */ + uint32_t n_qempty; /* Queue empty */ + uint32_t n_qfull; /* Queue fulls */ + uint32_t n_rsp_unknown; /* Unknown response type */ + uint32_t n_stray_comp; /* Stray completion intr */ + uint32_t n_flq_refill; /* Number of FL refills */ +}; + +/* Queue metadata */ +struct csio_q { + uint16_t type; /* Type: Ingress/Egress/FL */ + uint16_t pidx; /* producer index */ + uint16_t cidx; /* consumer index */ + uint16_t inc_idx; /* Incremental index */ + uint32_t wr_sz; /* Size of all WRs in this q + * if fixed + */ + void *vstart; /* Base virtual address + * of queue + */ + void *vwrap; /* Virtual end address to + * wrap around at + */ + uint32_t credits; /* Size of queue in credits */ + void *owner; /* Owner */ + union { /* Queue contexts */ + struct csio_iq iq; + struct csio_eq eq; + struct csio_fl fl; + } un; + + dma_addr_t pstart; /* Base physical address of + * queue + */ + uint32_t portid; /* PCIE Channel */ + uint32_t size; /* Size of queue in bytes */ + struct csio_qstats stats; /* Statistics */ +} ____cacheline_aligned_in_smp; + +struct csio_sge { + uint32_t csio_fl_align; /* Calculated and cached + * for fast path + */ + uint32_t sge_control; /* padding, boundaries, + * lengths, etc. + */ + uint32_t sge_host_page_size; /* Host page size */ + uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS]; + /* free list buffer sizes */ + uint16_t timer_val[CSIO_SGE_NTIMERS]; + uint8_t counter_val[CSIO_SGE_NCOUNTERS]; +}; + +/* Work request module */ +struct csio_wrm { + int num_q; /* Number of queues */ + struct csio_q **q_arr; /* Array of queue pointers + * allocated dynamically + * based on configured values + */ + uint32_t fw_iq_start; /* Start ID of IQ for this fn*/ + uint32_t fw_eq_start; /* Start ID of EQ for this fn*/ + struct csio_q *intr_map[CSIO_MAX_IQ]; + /* IQ-id to IQ map table. */ + int free_qidx; /* queue idx of free queue */ + struct csio_sge sge; /* SGE params */ +}; + +#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx]) +#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type) +#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx) +#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx) +#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx) +#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart) +#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart) +#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size) +#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits) +#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid) +#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz) +#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid) +#define csio_q_physiqid(__hw, __idx) \ + ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid) +#define csio_q_iq_flq_idx(__hw, __idx) \ + ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx) +#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid) +#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid) + +#define csio_q_physeqid(__hw, __idx) \ + ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid) +#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1) + +#define csio_q_iq_to_flid(__hw, __iq_idx) \ + csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx) +#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \ + (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx) +#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap) + +struct csio_mb; + +int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t, + uint16_t, void *, uint32_t, int, iq_handler_t); +int csio_wr_iq_create(struct csio_hw *, void *, int, + uint32_t, uint8_t, bool, + void (*)(struct csio_hw *, struct csio_mb *)); +int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t, + void (*)(struct csio_hw *, struct csio_mb *)); +int csio_wr_destroy_queues(struct csio_hw *, bool cmd); + + +int csio_wr_get(struct csio_hw *, int, uint32_t, + struct csio_wr_pair *); +void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t); +int csio_wr_issue(struct csio_hw *, int, bool); +int csio_wr_process_iq(struct csio_hw *, struct csio_q *, + void (*)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *); +int csio_wr_process_iq_idx(struct csio_hw *, int, + void (*)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *); + +void csio_wr_sge_init(struct csio_hw *); +int csio_wrm_init(struct csio_wrm *, struct csio_hw *); +void csio_wrm_exit(struct csio_wrm *, struct csio_hw *); + +#endif /* ifndef __CSIO_WR_H__ */ diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h new file mode 100644 index 000000000..097e52c0f --- /dev/null +++ b/drivers/scsi/csiostor/t4fw_api_stor.h @@ -0,0 +1,539 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _T4FW_API_STOR_H_ +#define _T4FW_API_STOR_H_ + + +/****************************************************************************** + * R E T U R N V A L U E S + ********************************/ + +enum fw_fcoe_link_sub_op { + FCOE_LINK_DOWN = 0x0, + FCOE_LINK_UP = 0x1, + FCOE_LINK_COND = 0x2, +}; + +enum fw_fcoe_link_status { + FCOE_LINKDOWN = 0x0, + FCOE_LINKUP = 0x1, +}; + +enum fw_ofld_prot { + PROT_FCOE = 0x1, + PROT_ISCSI = 0x2, +}; + +enum rport_type_fcoe { + FLOGI_VFPORT = 0x1, /* 0xfffffe */ + FDISC_VFPORT = 0x2, /* 0xfffffe */ + NS_VNPORT = 0x3, /* 0xfffffc */ + REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */ + REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */ + FDMI_VNPORT = 0x6, /* 0xfffffa */ + FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */ +}; + +enum event_cause_fcoe { + PLOGI_ACC_RCVD = 0x01, + PLOGI_RJT_RCVD = 0x02, + PLOGI_RCVD = 0x03, + PLOGO_RCVD = 0x04, + PRLI_ACC_RCVD = 0x05, + PRLI_RJT_RCVD = 0x06, + PRLI_RCVD = 0x07, + PRLO_RCVD = 0x08, + NPORT_ID_CHGD = 0x09, + FLOGO_RCVD = 0x0a, + CLR_VIRT_LNK_RCVD = 0x0b, + FLOGI_ACC_RCVD = 0x0c, + FLOGI_RJT_RCVD = 0x0d, + FDISC_ACC_RCVD = 0x0e, + FDISC_RJT_RCVD = 0x0f, + FLOGI_TMO_MAX_RETRY = 0x10, + IMPL_LOGO_ADISC_ACC = 0x11, + IMPL_LOGO_ADISC_RJT = 0x12, + IMPL_LOGO_ADISC_CNFLT = 0x13, + PRLI_TMO = 0x14, + ADISC_TMO = 0x15, + RSCN_DEV_LOST = 0x16, + SCR_ACC_RCVD = 0x17, + ADISC_RJT_RCVD = 0x18, + LOGO_SNT = 0x19, + PROTO_ERR_IMPL_LOGO = 0x1a, +}; + +enum fcoe_cmn_type { + FCOE_ELS, + FCOE_CT, + FCOE_SCSI_CMD, + FCOE_UNSOL_ELS, +}; + +enum fw_wr_stor_opcodes { + FW_RDEV_WR = 0x38, + FW_FCOE_ELS_CT_WR = 0x30, + FW_SCSI_WRITE_WR = 0x31, + FW_SCSI_READ_WR = 0x32, + FW_SCSI_CMD_WR = 0x33, + FW_SCSI_ABRT_CLS_WR = 0x34, +}; + +struct fw_rdev_wr { + __be32 op_to_immdlen; + __be32 alloc_to_len16; + __be64 cookie; + u8 protocol; + u8 event_cause; + u8 cur_state; + u8 prev_state; + __be32 flags_to_assoc_flowid; + union rdev_entry { + struct fcoe_rdev_entry { + __be32 flowid; + u8 protocol; + u8 event_cause; + u8 flags; + u8 rjt_reason; + u8 cur_login_st; + u8 prev_login_st; + __be16 rcv_fr_sz; + u8 rd_xfer_rdy_to_rport_type; + u8 vft_to_qos; + u8 org_proc_assoc_to_acc_rsp_code; + u8 enh_disc_to_tgt; + u8 wwnn[8]; + u8 wwpn[8]; + __be16 iqid; + u8 fc_oui[3]; + u8 r_id[3]; + } fcoe_rdev; + struct iscsi_rdev_entry { + __be32 flowid; + u8 protocol; + u8 event_cause; + u8 flags; + u8 r3; + __be16 iscsi_opts; + __be16 tcp_opts; + __be16 ip_opts; + __be16 max_rcv_len; + __be16 max_snd_len; + __be16 first_brst_len; + __be16 max_brst_len; + __be16 r4; + __be16 def_time2wait; + __be16 def_time2ret; + __be16 nop_out_intrvl; + __be16 non_scsi_to; + __be16 isid; + __be16 tsid; + __be16 port; + __be16 tpgt; + u8 r5[6]; + __be16 iqid; + } iscsi_rdev; + } u; +}; + +#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff) +#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f) +#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1) +#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3) +#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1) +#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1) +#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1) +#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1) +#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1) + +struct fw_fcoe_els_ct_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 els_ct_type; + u8 ctl_pri; + u8 cp_en_class; + __be16 xfer_cnt; + u8 fl_to_sp; + u8 l_id[3]; + u8 r5; + u8 r_id[3]; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r6; +}; + +#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24) +#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff) +#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0) +#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff) +#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0) + +struct fw_scsi_write_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 use_xfer_cnt; + union fw_scsi_write_priv { + struct fcoe_write_priv { + u8 ctl_pri; + u8 cp_en_class; + u8 r3_lo[2]; + } fcoe; + struct iscsi_write_priv { + u8 r3[4]; + } iscsi; + } u; + __be32 xfer_cnt; + __be32 ini_xfer_cnt; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r4; +}; + +#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0) + +struct fw_scsi_read_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 use_xfer_cnt; + union fw_scsi_read_priv { + struct fcoe_read_priv { + u8 ctl_pri; + u8 cp_en_class; + u8 r3_lo[2]; + } fcoe; + struct iscsi_read_priv { + u8 r3[4]; + } iscsi; + } u; + __be32 xfer_cnt; + __be32 ini_xfer_cnt; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r4; +}; + +#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0) + +struct fw_scsi_cmd_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 r3; + union fw_scsi_cmd_priv { + struct fcoe_cmd_priv { + u8 ctl_pri; + u8 cp_en_class; + u8 r4_lo[2]; + } fcoe; + struct iscsi_cmd_priv { + u8 r4[4]; + } iscsi; + } u; + u8 r5[8]; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r6; +}; + +#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0) + +#define SCSI_ABORT 0 +#define SCSI_CLOSE 1 + +struct fw_scsi_abrt_cls_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 sub_opcode_to_chk_all_io; + u8 r3[4]; + u64 t_cookie; +}; + +#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2) +#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f) +#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0) + +enum fw_cmd_stor_opcodes { + FW_FCOE_RES_INFO_CMD = 0x31, + FW_FCOE_LINK_CMD = 0x32, + FW_FCOE_VNP_CMD = 0x33, + FW_FCOE_SPARAMS_CMD = 0x35, + FW_FCOE_STATS_CMD = 0x37, + FW_FCOE_FCF_CMD = 0x38, +}; + +struct fw_fcoe_res_info_cmd { + __be32 op_to_read; + __be32 retval_len16; + __be16 e_d_tov; + __be16 r_a_tov_seq; + __be16 r_a_tov_els; + __be16 r_r_tov; + __be32 max_xchgs; + __be32 max_ssns; + __be32 used_xchgs; + __be32 used_ssns; + __be32 max_fcfs; + __be32 max_vnps; + __be32 used_fcfs; + __be32 used_vnps; +}; + +struct fw_fcoe_link_cmd { + __be32 op_to_portid; + __be32 retval_len16; + __be32 sub_opcode_fcfi; + u8 r3; + u8 lstatus; + __be16 flags; + u8 r4; + u8 set_vlan; + __be16 vlan_id; + __be32 vnpi_pkd; + __be16 r6; + u8 phy_mac[6]; + u8 vnport_wwnn[8]; + u8 vnport_wwpn[8]; +}; + +#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0) +#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) +#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U) +#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0) +#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff) +#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff) + +struct fw_fcoe_vnp_cmd { + __be32 op_to_fcfi; + __be32 alloc_to_len16; + __be32 gen_wwn_to_vnpi; + __be32 vf_id; + __be16 iqid; + u8 vnport_mac[6]; + u8 vnport_wwnn[8]; + u8 vnport_wwpn[8]; + u8 cmn_srv_parms[16]; + u8 clsp_word_0_1[8]; +}; + +#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0) +#define FW_FCOE_VNP_CMD_ALLOC (1U << 31) +#define FW_FCOE_VNP_CMD_FREE (1U << 30) +#define FW_FCOE_VNP_CMD_MODIFY (1U << 29) +#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22) +#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20) +#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0) +#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff) + +struct fw_fcoe_sparams_cmd { + __be32 op_to_portid; + __be32 retval_len16; + u8 r3[7]; + u8 cos; + u8 lport_wwnn[8]; + u8 lport_wwpn[8]; + u8 cmn_srv_parms[16]; + u8 cls_srv_parms[16]; +}; + +#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0) + +struct fw_fcoe_stats_cmd { + __be32 op_to_flowid; + __be32 free_to_len16; + union fw_fcoe_stats { + struct fw_fcoe_stats_ctl { + u8 nstats_port; + u8 port_valid_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_fcoe_port_stats { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } port_stats; + struct fw_fcoe_fcf_stats { + __be32 fip_tx_bytes; + __be32 fip_tx_fr; + __be64 fcf_ka; + __be64 mcast_adv_rcvd; + __be16 ucast_adv_rcvd; + __be16 sol_sent; + __be16 vlan_req; + __be16 vlan_rpl; + __be16 clr_vlink; + __be16 link_down; + __be16 link_up; + __be16 logo; + __be16 flogi_req; + __be16 flogi_rpl; + __be16 fdisc_req; + __be16 fdisc_rpl; + __be16 fka_prd_chg; + __be16 fc_map_chg; + __be16 vfid_chg; + u8 no_fka_req; + u8 no_vnp; + } fcf_stats; + struct fw_fcoe_pcb_stats { + __be64 tx_bytes; + __be64 tx_frames; + __be64 rx_bytes; + __be64 rx_frames; + __be32 vnp_ka; + __be32 unsol_els_rcvd; + __be64 unsol_cmd_rcvd; + __be16 implicit_logo; + __be16 flogi_inv_sparm; + __be16 fdisc_inv_sparm; + __be16 flogi_rjt; + __be16 fdisc_rjt; + __be16 no_ssn; + __be16 mac_flt_fail; + __be16 inv_fr_rcvd; + } pcb_stats; + struct fw_fcoe_scb_stats { + __be64 tx_bytes; + __be64 tx_frames; + __be64 rx_bytes; + __be64 rx_frames; + __be32 host_abrt_req; + __be32 adap_auto_abrt; + __be32 adap_abrt_rsp; + __be32 host_ios_req; + __be16 ssn_offl_ios; + __be16 ssn_not_rdy_ios; + u8 rx_data_ddp_err; + u8 ddp_flt_set_err; + __be16 rx_data_fr_err; + u8 bad_st_abrt_req; + u8 no_io_abrt_req; + u8 abort_tmo; + u8 abort_tmo_2; + __be32 abort_req; + u8 no_ppod_res_tmo; + u8 bp_tmo; + u8 adap_auto_cls; + u8 no_io_cls_req; + __be32 host_cls_req; + __be64 unsol_cmd_rcvd; + __be32 plogi_req_rcvd; + __be32 prli_req_rcvd; + __be16 logo_req_rcvd; + __be16 prlo_req_rcvd; + __be16 plogi_rjt_rcvd; + __be16 prli_rjt_rcvd; + __be32 adisc_req_rcvd; + __be32 rscn_rcvd; + __be32 rrq_req_rcvd; + __be32 unsol_els_rcvd; + u8 adisc_rjt_rcvd; + u8 scr_rjt; + u8 ct_rjt; + u8 inval_bls_rcvd; + __be32 ba_rjt_rcvd; + } scb_stats; + } u; +}; + +#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0) +#define FW_FCOE_STATS_CMD_FREE (1U << 30) +#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0) +#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7) +#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0) + +struct fw_fcoe_fcf_cmd { + __be32 op_to_fcfi; + __be32 retval_len16; + __be16 priority_pkd; + u8 mac[6]; + u8 name_id[8]; + u8 fabric[8]; + __be16 vf_id; + __be16 max_fcoe_size; + u8 vlan_id; + u8 fc_map[3]; + __be32 fka_adv; + __be32 r6; + u8 r7_hi; + u8 fpma_to_portid; + u8 spma_mac[6]; + __be64 r8; +}; + +#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0) +#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff) +#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff) +#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1) +#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1) +#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1) +#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) + +#endif /* _T4FW_API_STOR_H_ */ diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig new file mode 100644 index 000000000..75f9428a8 --- /dev/null +++ b/drivers/scsi/cxgbi/Kconfig @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +source "drivers/scsi/cxgbi/cxgb3i/Kconfig" +source "drivers/scsi/cxgbi/cxgb4i/Kconfig" diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile new file mode 100644 index 000000000..abfd38a26 --- /dev/null +++ b/drivers/scsi/cxgbi/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb + +obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/ +obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild new file mode 100644 index 000000000..8d8a43f5e --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb + +obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig new file mode 100644 index 000000000..e20e6f3bf --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_CXGB3_ISCSI + tristate "Chelsio T3 iSCSI support" + depends on PCI && INET && (IPV6 || IPV6=n) + select NETDEVICES + select ETHERNET + select NET_VENDOR_CHELSIO + select CHELSIO_T3 + select CHELSIO_LIB + select SCSI_ISCSI_ATTRS + help + This driver supports iSCSI offload for the Chelsio T3 devices. diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c new file mode 100644 index 000000000..ec6530240 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -0,0 +1,1415 @@ +/* + * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management + * + * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this + * release for licensing terms and conditions. + * + * Written by: Dimitris Michailidis (dm@chelsio.com) + * Karen Xie (kxie@chelsio.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include + +#include "common.h" +#include "t3_cpl.h" +#include "t3cdev.h" +#include "cxgb3_defs.h" +#include "cxgb3_ctl_defs.h" +#include "cxgb3_offload.h" +#include "firmware_exports.h" +#include "cxgb3i.h" + +static unsigned int dbg_level; +#include "../libcxgbi.h" + +#define DRV_MODULE_NAME "cxgb3i" +#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" +#define DRV_MODULE_VERSION "2.0.1-ko" +#define DRV_MODULE_RELDATE "Apr. 2015" + +static char version[] = + DRV_MODULE_DESC " " DRV_MODULE_NAME + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Chelsio Communications, Inc."); +MODULE_DESCRIPTION(DRV_MODULE_DESC); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_LICENSE("GPL"); + +module_param(dbg_level, uint, 0644); +MODULE_PARM_DESC(dbg_level, "debug flag (default=0)"); + +static int cxgb3i_rcv_win = 256 * 1024; +module_param(cxgb3i_rcv_win, int, 0644); +MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)"); + +static int cxgb3i_snd_win = 128 * 1024; +module_param(cxgb3i_snd_win, int, 0644); +MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); + +static int cxgb3i_rx_credit_thres = 10 * 1024; +module_param(cxgb3i_rx_credit_thres, int, 0644); +MODULE_PARM_DESC(cxgb3i_rx_credit_thres, + "RX credits return threshold in bytes (default=10KB)"); + +static unsigned int cxgb3i_max_connect = 8 * 1024; +module_param(cxgb3i_max_connect, uint, 0644); +MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)"); + +static unsigned int cxgb3i_sport_base = 20000; +module_param(cxgb3i_sport_base, uint, 0644); +MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)"); + +static void cxgb3i_dev_open(struct t3cdev *); +static void cxgb3i_dev_close(struct t3cdev *); +static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32); + +static struct cxgb3_client t3_client = { + .name = DRV_MODULE_NAME, + .handlers = cxgb3i_cpl_handlers, + .add = cxgb3i_dev_open, + .remove = cxgb3i_dev_close, + .event_handler = cxgb3i_dev_event_handler, +}; + +static const struct scsi_host_template cxgb3i_host_template = { + .module = THIS_MODULE, + .name = DRV_MODULE_NAME, + .proc_name = DRV_MODULE_NAME, + .can_queue = CXGB3I_SCSI_HOST_QDEPTH, + .queuecommand = iscsi_queuecommand, + .change_queue_depth = scsi_change_queue_depth, + .sg_tablesize = SG_ALL, + .max_sectors = 0xFFFF, + .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .target_alloc = iscsi_target_alloc, + .dma_boundary = PAGE_SIZE - 1, + .this_id = -1, + .track_queue_depth = 1, + .cmd_size = sizeof(struct iscsi_cmd), +}; + +static struct iscsi_transport cxgb3i_iscsi_transport = { + .owner = THIS_MODULE, + .name = DRV_MODULE_NAME, + /* owner and name should be set already */ + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST + | CAP_DATADGST | CAP_DIGEST_OFFLOAD | + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, + .attr_is_visible = cxgbi_attr_is_visible, + .get_host_param = cxgbi_get_host_param, + .set_host_param = cxgbi_set_host_param, + /* session management */ + .create_session = cxgbi_create_session, + .destroy_session = cxgbi_destroy_session, + .get_session_param = iscsi_session_get_param, + /* connection management */ + .create_conn = cxgbi_create_conn, + .bind_conn = cxgbi_bind_conn, + .unbind_conn = iscsi_conn_unbind, + .destroy_conn = iscsi_tcp_conn_teardown, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_conn_stop, + .get_conn_param = iscsi_conn_get_param, + .set_param = cxgbi_set_conn_param, + .get_stats = cxgbi_get_conn_stats, + /* pdu xmit req from user space */ + .send_pdu = iscsi_conn_send_pdu, + /* task */ + .init_task = iscsi_tcp_task_init, + .xmit_task = iscsi_tcp_task_xmit, + .cleanup_task = cxgbi_cleanup_task, + /* pdu */ + .alloc_pdu = cxgbi_conn_alloc_pdu, + .init_pdu = cxgbi_conn_init_pdu, + .xmit_pdu = cxgbi_conn_xmit_pdu, + .parse_pdu_itt = cxgbi_parse_pdu_itt, + /* TCP connect/disconnect */ + .get_ep_param = cxgbi_get_ep_param, + .ep_connect = cxgbi_ep_connect, + .ep_poll = cxgbi_ep_poll, + .ep_disconnect = cxgbi_ep_disconnect, + /* Error recovery timeout call */ + .session_recovery_timedout = iscsi_session_recovery_timedout, +}; + +static struct scsi_transport_template *cxgb3i_stt; + +/* + * CPL (Chelsio Protocol Language) defines a message passing interface between + * the host driver and Chelsio asic. + * The section below implments CPLs that related to iscsi tcp connection + * open/close/abort and data send/receive. + */ + +static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); + +static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, + const struct l2t_entry *e) +{ + unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win); + struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; + + skb->priority = CPL_PRIORITY_SETUP; + + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + + req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS | + V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | + V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); + req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | + V_RCV_BUFSIZ(csk->rcv_win >> 10)); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", + csk, csk->state, csk->flags, csk->atid, + &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->mss_idx, e->idx, e->smt_idx); + + l2t_send(csk->cdev->lldev, skb, csk->l2t); +} + +static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) +{ + cxgbi_sock_act_open_req_arp_failure(NULL, skb); +} + +/* + * CPL connection close request: host -> + * + * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to + * the write queue (i.e., after any unsent txt data). + */ +static void send_close_req(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->cpl_close; + struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; + unsigned int tid = csk->tid; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + csk->cpl_close = NULL; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); + req->wr.wr_lo = htonl(V_WR_TID(tid)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); + req->rsvd = htonl(csk->write_seq); + + cxgbi_sock_skb_entail(csk, skb); + if (csk->state >= CTP_ESTABLISHED) + push_tx_frames(csk, 1); +} + +/* + * CPL connection abort request: host -> + * + * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs + * for the same connection and also that we do not try to send a message + * after the connection has closed. + */ +static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) +{ + struct cpl_abort_req *req = cplhdr(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "t3dev 0x%p, tid %u, skb 0x%p.\n", + tdev, GET_TID(req), skb); + req->cmd = CPL_ABORT_NO_RST; + cxgb3_ofld_send(tdev, skb); +} + +static void send_abort_req(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->cpl_abort_req; + struct cpl_abort_req *req; + + if (unlikely(csk->state == CTP_ABORTING || !skb)) + return; + cxgbi_sock_set_state(csk, CTP_ABORTING); + cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); + /* Purge the send queue so we don't send anything after an abort. */ + cxgbi_sock_purge_write_queue(csk); + + csk->cpl_abort_req = NULL; + req = (struct cpl_abort_req *)skb->head; + skb->priority = CPL_PRIORITY_DATA; + set_arp_failure_handler(skb, abort_arp_failure); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); + req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); + req->rsvd0 = htonl(csk->snd_nxt); + req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); + req->cmd = CPL_ABORT_SEND_RST; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, + req->rsvd1); + + l2t_send(csk->cdev->lldev, skb, csk->l2t); +} + +/* + * CPL connection abort reply: host -> + * + * Send an ABORT_RPL message in response of the ABORT_REQ received. + */ +static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) +{ + struct sk_buff *skb = csk->cpl_abort_rpl; + struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, status %d.\n", + csk, csk->state, csk->flags, csk->tid, rst_status); + + csk->cpl_abort_rpl = NULL; + skb->priority = CPL_PRIORITY_DATA; + rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); + rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); + OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); + rpl->cmd = rst_status; + cxgb3_ofld_send(csk->cdev->lldev, skb); +} + +/* + * CPL connection rx data ack: host -> + * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of + * credits sent. + */ +static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) +{ + struct sk_buff *skb; + struct cpl_rx_data_ack *req; + u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", + csk, csk->state, csk->flags, csk->tid, credits, dack); + + skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); + if (!skb) { + pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); + return 0; + } + req = (struct cpl_rx_data_ack *)skb->head; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); + req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) | + V_RX_CREDITS(credits)); + skb->priority = CPL_PRIORITY_ACK; + cxgb3_ofld_send(csk->cdev->lldev, skb); + return credits; +} + +/* + * CPL connection tx data: host -> + * + * Send iscsi PDU via TX_DATA CPL message. Returns the number of + * credits sent. + * Each TX_DATA consumes work request credit (wrs), so we need to keep track of + * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). + */ + +static unsigned int wrlen __read_mostly; +static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; + +static void init_wr_tab(unsigned int wr_len) +{ + int i; + + if (skb_wrs[1]) /* already initialized */ + return; + for (i = 1; i < SKB_WR_LIST_SIZE; i++) { + int sgl_len = (3 * i) / 2 + (i & 1); + + sgl_len += 3; + skb_wrs[i] = (sgl_len <= wr_len + ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); + } + wrlen = wr_len * 8; +} + +static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, + int len, int req_completion) +{ + struct tx_data_wr *req; + struct l2t_entry *l2t = csk->l2t; + + skb_reset_transport_header(skb); + req = __skb_push(skb, sizeof(*req)); + req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | + (req_completion ? F_WR_COMPL : 0)); + req->wr_lo = htonl(V_WR_TID(csk->tid)); + /* len includes the length of any HW ULP additions */ + req->len = htonl(len); + /* V_TX_ULP_SUBMODE sets both the mode and submode */ + req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) | + V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); + req->sndseq = htonl(csk->snd_nxt); + req->param = htonl(V_TX_PORT(l2t->smt_idx)); + + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { + req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | + V_TX_CPU_IDX(csk->rss_qid)); + /* sendbuffer is in units of 32KB. */ + req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15)); + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); + } +} + +/* + * push_tx_frames -- start transmit + * + * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a + * connection's send queue and sends them on to T3. Must be called with the + * connection's lock held. Returns the amount of send buffer space that was + * freed as a result of sending queued data to T3. + */ + +static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) +{ + int total_size = 0; + struct sk_buff *skb; + + if (unlikely(csk->state < CTP_ESTABLISHED || + csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, in closing state.\n", + csk, csk->state, csk->flags, csk->tid); + return 0; + } + + while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { + int len = skb->len; /* length before skb_push */ + int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); + int wrs_needed = skb_wrs[frags]; + + if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) + wrs_needed = 1; + + WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); + + if (csk->wr_cred < wrs_needed) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n", + csk, skb->len, skb->data_len, frags, + wrs_needed, csk->wr_cred); + break; + } + + __skb_unlink(skb, &csk->write_queue); + skb->priority = CPL_PRIORITY_DATA; + skb->csum = wrs_needed; /* remember this until the WR_ACK */ + csk->wr_cred -= wrs_needed; + csk->wr_una_cred += wrs_needed; + cxgbi_sock_enqueue_wr(csk, skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, " + "left %u, unack %u.\n", + csk, skb->len, skb->data_len, frags, skb->csum, + csk->wr_cred, csk->wr_una_cred); + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { + if ((req_completion && + csk->wr_una_cred == wrs_needed) || + csk->wr_una_cred >= csk->wr_max_cred / 2) { + req_completion = 1; + csk->wr_una_cred = 0; + } + len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)); + make_tx_data_wr(csk, skb, len, req_completion); + csk->snd_nxt += len; + cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); + } + total_size += skb->truesize; + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, tid 0x%x, send skb 0x%p.\n", + csk, csk->tid, skb); + set_arp_failure_handler(skb, arp_failure_skb_discard); + l2t_send(csk->cdev->lldev, skb, csk->l2t); + } + return total_size; +} + +/* + * Process a CPL_ACT_ESTABLISH message: -> host + * Updates connection state from an active establish CPL message. Runs with + * the connection lock held. + */ + +static inline void free_atid(struct cxgbi_sock *csk) +{ + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { + cxgb3_free_atid(csk->cdev->lldev, csk->atid); + cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_put(csk); + } +} + +static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_act_establish *req = cplhdr(skb); + unsigned int tid = GET_TID(req); + unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", + atid, atid, csk, csk->state, csk->flags, rcv_isn); + + cxgbi_sock_get(csk); + cxgbi_sock_set_flag(csk, CTPF_HAS_TID); + csk->tid = tid; + cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); + + free_atid(csk); + + csk->rss_qid = G_QNUM(ntohs(skb->csum)); + + spin_lock_bh(&csk->lock); + if (csk->retry_timer.function) { + del_timer(&csk->retry_timer); + csk->retry_timer.function = NULL; + } + + if (unlikely(csk->state != CTP_ACTIVE_OPEN)) + pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", + csk, csk->state, csk->flags, csk->tid); + + csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; + if (csk->rcv_win > (M_RCV_BUFSIZ << 10)) + csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10); + + cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); + + if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) + /* upper layer has requested closing */ + send_abort_req(csk); + else { + if (skb_queue_len(&csk->write_queue)) + push_tx_frames(csk, 1); + cxgbi_conn_tx_open(csk); + } + + spin_unlock_bh(&csk->lock); + __kfree_skb(skb); + return 0; +} + +/* + * Process a CPL_ACT_OPEN_RPL message: -> host + * Handle active open failures. + */ +static int act_open_rpl_status_to_errno(int status) +{ + switch (status) { + case CPL_ERR_CONN_RESET: + return -ECONNREFUSED; + case CPL_ERR_ARP_MISS: + return -EHOSTUNREACH; + case CPL_ERR_CONN_TIMEDOUT: + return -ETIMEDOUT; + case CPL_ERR_TCAM_FULL: + return -ENOMEM; + case CPL_ERR_CONN_EXIST: + return -EADDRINUSE; + default: + return -EIO; + } +} + +static void act_open_retry_timer(struct timer_list *t) +{ + struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); + struct sk_buff *skb; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); + if (!skb) + cxgbi_sock_fail_act_open(csk, -ENOMEM); + else { + skb->sk = (struct sock *)csk; + set_arp_failure_handler(skb, act_open_arp_failure); + send_act_open_req(csk, skb, csk->l2t); + } + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} + +static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_act_open_rpl *rpl = cplhdr(skb); + + pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n", + csk, csk->state, csk->flags, csk->atid, rpl->status, + &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), + &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); + + if (rpl->status != CPL_ERR_TCAM_FULL && + rpl->status != CPL_ERR_CONN_EXIST && + rpl->status != CPL_ERR_ARP_MISS) + cxgb3_queue_tid_release(tdev, GET_TID(rpl)); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + if (rpl->status == CPL_ERR_CONN_EXIST && + csk->retry_timer.function != act_open_retry_timer) { + csk->retry_timer.function = act_open_retry_timer; + mod_timer(&csk->retry_timer, jiffies + HZ / 2); + } else + cxgbi_sock_fail_act_open(csk, + act_open_rpl_status_to_errno(rpl->status)); + + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); + __kfree_skb(skb); + return 0; +} + +/* + * Process PEER_CLOSE CPL messages: -> host + * Handle peer FIN. + */ +static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_rcv_peer_close(csk); + __kfree_skb(skb); + return 0; +} + +/* + * Process CLOSE_CONN_RPL CPL message: -> host + * Process a peer ACK to our FIN. + */ +static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, + void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_close_con_rpl *rpl = cplhdr(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, snxt %u.\n", + csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); + + cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); + __kfree_skb(skb); + return 0; +} + +/* + * Process ABORT_REQ_RSS CPL message: -> host + * Process abort requests. If we are waiting for an ABORT_RPL we ignore this + * request except that we need to reply to it. + */ + +static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, + int *need_rst) +{ + switch (abort_reason) { + case CPL_ERR_BAD_SYN: + case CPL_ERR_CONN_RESET: + return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; + case CPL_ERR_XMIT_TIMEDOUT: + case CPL_ERR_PERSIST_TIMEDOUT: + case CPL_ERR_FINWAIT2_TIMEDOUT: + case CPL_ERR_KEEPALIVE_TIMEDOUT: + return -ETIMEDOUT; + default: + return -EIO; + } +} + +static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + const struct cpl_abort_req_rss *req = cplhdr(skb); + struct cxgbi_sock *csk = ctx; + int rst_status = CPL_ABORT_NO_RST; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + if (req->status == CPL_ERR_RTX_NEG_ADVICE || + req->status == CPL_ERR_PERSIST_NEG_ADVICE) { + goto done; + } + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { + cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); + cxgbi_sock_set_state(csk, CTP_ABORTING); + goto out; + } + + cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); + send_abort_rpl(csk, rst_status); + + if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { + csk->err = abort_status_to_errno(csk, req->status, &rst_status); + cxgbi_sock_closed(csk); + } + +out: + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +done: + __kfree_skb(skb); + return 0; +} + +/* + * Process ABORT_RPL_RSS CPL message: -> host + * Process abort replies. We only process these messages if we anticipate + * them as the coordination between SW and HW in this area is somewhat lacking + * and sometimes we get ABORT_RPLs after we are done with the connection that + * originated the ABORT_REQ. + */ +static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + struct cpl_abort_rpl_rss *rpl = cplhdr(skb); + struct cxgbi_sock *csk = ctx; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", + rpl->status, csk, csk ? csk->state : 0, + csk ? csk->flags : 0UL); + /* + * Ignore replies to post-close aborts indicating that the abort was + * requested too late. These connections are terminated when we get + * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss + * arrives the TID is either no longer used or it has been recycled. + */ + if (rpl->status == CPL_ERR_ABORT_FAILED) + goto rel_skb; + /* + * Sometimes we've already closed the connection, e.g., a post-close + * abort races with ABORT_REQ_RSS, the latter frees the connection + * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, + * but FW turns the ABORT_REQ into a regular one and so we get + * ABORT_RPL_RSS with status 0 and no connection. + */ + if (csk) + cxgbi_sock_rcv_abort_rpl(csk); +rel_skb: + __kfree_skb(skb); + return 0; +} + +/* + * Process RX_ISCSI_HDR CPL message: -> host + * Handle received PDUs, the payload could be DDP'ed. If not, the payload + * follow after the bhs. + */ +static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); + struct cpl_iscsi_hdr_norss data_cpl; + struct cpl_rx_data_ddp_norss ddp_cpl; + unsigned int hdr_len, data_len, status; + unsigned int len; + int err; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n", + csk, csk->state, csk->flags, csk->tid, skb, skb->len); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); + cxgbi_skcb_flags(skb) = 0; + + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); + + len = hdr_len = ntohs(hdr_cpl->len); + /* msg coalesce is off or not enough data received */ + if (skb->len <= hdr_len) { + pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n", + csk->cdev->ports[csk->port_id]->name, csk->tid, + skb->len, hdr_len); + goto abort_conn; + } + cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); + + err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, + sizeof(ddp_cpl)); + if (err < 0) { + pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n", + csk->cdev->ports[csk->port_id]->name, csk->tid, + skb->len, sizeof(ddp_cpl), err); + goto abort_conn; + } + + cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); + cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); + cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); + status = ntohl(ddp_cpl.ddp_status); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n", + csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); + + if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); + if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); + if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); + + if (skb->len > (hdr_len + sizeof(ddp_cpl))) { + err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); + if (err < 0) { + pr_err("%s: tid %u, cp %zu/%u failed %d.\n", + csk->cdev->ports[csk->port_id]->name, + csk->tid, sizeof(data_cpl), skb->len, err); + goto abort_conn; + } + data_len = ntohs(data_cpl.len); + log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n", + skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); + len += sizeof(data_cpl) + data_len; + } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); + + csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); + __pskb_trim(skb, len); + __skb_queue_tail(&csk->receive_queue, skb); + cxgbi_conn_pdu_ready(csk); + + spin_unlock_bh(&csk->lock); + return 0; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); + __kfree_skb(skb); + return 0; +} + +/* + * Process TX_DATA_ACK CPL messages: -> host + * Process an acknowledgment of WR completion. Advance snd_una and send the + * next batch of work requests from the write queue. + */ +static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_wr_ack *hdr = cplhdr(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, cr %u.\n", + csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); + + cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); + __kfree_skb(skb); + return 0; +} + +/* + * for each connection, pre-allocate skbs needed for close/abort requests. So + * that we can service the request right away. + */ +static int alloc_cpls(struct cxgbi_sock *csk) +{ + csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, + GFP_KERNEL); + if (!csk->cpl_close) + return -ENOMEM; + csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, + GFP_KERNEL); + if (!csk->cpl_abort_req) + goto free_cpl_skbs; + + csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, + GFP_KERNEL); + if (!csk->cpl_abort_rpl) + goto free_cpl_skbs; + + return 0; + +free_cpl_skbs: + cxgbi_sock_free_cpl_skbs(csk); + return -ENOMEM; +} + +static void l2t_put(struct cxgbi_sock *csk) +{ + struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; + + if (csk->l2t) { + l2t_release(t3dev, csk->l2t); + csk->l2t = NULL; + cxgbi_sock_put(csk); + } +} + +/* + * release_offload_resources - release offload resource + * Release resources held by an offload connection (TID, L2T entry, etc.) + */ +static void release_offload_resources(struct cxgbi_sock *csk) +{ + struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + csk->rss_qid = 0; + cxgbi_sock_free_cpl_skbs(csk); + + if (csk->wr_cred != csk->wr_max_cred) { + cxgbi_sock_purge_wr_queue(csk); + cxgbi_sock_reset_wr_list(csk); + } + l2t_put(csk); + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) + free_atid(csk); + else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { + cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); + cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); + cxgbi_sock_put(csk); + } + csk->dst = NULL; + csk->cdev = NULL; +} + +static void update_address(struct cxgbi_hba *chba) +{ + if (chba->ipv4addr) { + if (chba->vdev && + chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) { + cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr); + cxgb3i_set_private_ipv4addr(chba->ndev, 0); + pr_info("%s set %pI4.\n", + chba->vdev->name, &chba->ipv4addr); + } else if (chba->ipv4addr != + cxgb3i_get_private_ipv4addr(chba->ndev)) { + cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr); + pr_info("%s set %pI4.\n", + chba->ndev->name, &chba->ipv4addr); + } + } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) { + if (chba->vdev) + cxgb3i_set_private_ipv4addr(chba->vdev, 0); + cxgb3i_set_private_ipv4addr(chba->ndev, 0); + } +} + +static int init_act_open(struct cxgbi_sock *csk) +{ + struct dst_entry *dst = csk->dst; + struct cxgbi_device *cdev = csk->cdev; + struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; + struct net_device *ndev = cdev->ports[csk->port_id]; + struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; + struct sk_buff *skb = NULL; + int ret; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); + + update_address(chba); + if (chba->ipv4addr) + csk->saddr.sin_addr.s_addr = chba->ipv4addr; + + csk->rss_qid = 0; + csk->l2t = t3_l2t_get(t3dev, dst, ndev, + &csk->daddr.sin_addr.s_addr); + if (!csk->l2t) { + pr_err("NO l2t available.\n"); + return -EINVAL; + } + cxgbi_sock_get(csk); + + csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); + if (csk->atid < 0) { + pr_err("NO atid available.\n"); + ret = -EINVAL; + goto put_sock; + } + cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_get(csk); + + skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); + if (!skb) { + ret = -ENOMEM; + goto free_atid; + } + skb->sk = (struct sock *)csk; + set_arp_failure_handler(skb, act_open_arp_failure); + csk->snd_win = cxgb3i_snd_win; + csk->rcv_win = cxgb3i_rcv_win; + + csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; + csk->wr_una_cred = 0; + csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); + cxgbi_sock_reset_wr_list(csk); + csk->err = 0; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n", + csk, csk->state, csk->flags, + &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), + &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); + + cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); + send_act_open_req(csk, skb, csk->l2t); + return 0; + +free_atid: + cxgb3_free_atid(t3dev, csk->atid); +put_sock: + cxgbi_sock_put(csk); + l2t_release(t3dev, csk->l2t); + csk->l2t = NULL; + + return ret; +} + +cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = do_act_establish, + [CPL_ACT_OPEN_RPL] = do_act_open_rpl, + [CPL_PEER_CLOSE] = do_peer_close, + [CPL_ABORT_REQ_RSS] = do_abort_req, + [CPL_ABORT_RPL_RSS] = do_abort_rpl, + [CPL_CLOSE_CON_RPL] = do_close_con_rpl, + [CPL_TX_DMA_ACK] = do_wr_ack, + [CPL_ISCSI_HDR] = do_iscsi_hdr, +}; + +/** + * cxgb3i_ofld_init - allocate and initialize resources for each adapter found + * @cdev: cxgbi adapter + */ +static int cxgb3i_ofld_init(struct cxgbi_device *cdev) +{ + struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; + struct adap_ports port; + struct ofld_page_info rx_page_info; + unsigned int wr_len; + int rc; + + if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 || + t3dev->ctl(t3dev, GET_PORTS, &port) < 0 || + t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { + pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev); + return -EINVAL; + } + + if (cxgb3i_max_connect > CXGBI_MAX_CONN) + cxgb3i_max_connect = CXGBI_MAX_CONN; + + rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base, + cxgb3i_max_connect); + if (rc < 0) + return rc; + + init_wr_tab(wr_len); + cdev->csk_release_offload_resources = release_offload_resources; + cdev->csk_push_tx_frames = push_tx_frames; + cdev->csk_send_abort_req = send_abort_req; + cdev->csk_send_close_req = send_close_req; + cdev->csk_send_rx_credits = send_rx_credits; + cdev->csk_alloc_cpls = alloc_cpls; + cdev->csk_init_act_open = init_act_open; + + pr_info("cdev 0x%p, offload up, added.\n", cdev); + return 0; +} + +/* + * functions to program the pagepod in h/w + */ +static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) +{ + struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; + + memset(req, 0, sizeof(*req)); + + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); + req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | + V_ULPTX_CMD(ULP_MEM_WRITE)); + req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) | + V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1)); +} + +static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) +{ + return ((struct t3cdev *)cdev->lldev)->ulp_iscsi; +} + +static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo) +{ + unsigned int idx = ttinfo->idx; + unsigned int npods = ttinfo->npods; + struct scatterlist *sg = ttinfo->sgl; + struct cxgbi_pagepod *ppod; + struct ulp_mem_io *req; + unsigned int sg_off; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + int i; + + for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { + struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + + IPPOD_SIZE, 0, GFP_ATOMIC); + + if (!skb) + return -ENOMEM; + ulp_mem_io_set_hdr(skb, pm_addr); + req = (struct ulp_mem_io *)skb->head; + ppod = (struct cxgbi_pagepod *)(req + 1); + sg_off = i * PPOD_PAGES_MAX; + cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg, + &sg_off); + skb->priority = CPL_PRIORITY_CONTROL; + cxgb3_ofld_send(ppm->lldev, skb); + } + return 0; +} + +static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, + struct cxgbi_task_tag_info *ttinfo) +{ + unsigned int idx = ttinfo->idx; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int npods = ttinfo->npods; + int i; + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, clear idx %u, npods %u.\n", + cdev, idx, npods); + + for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { + struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + + IPPOD_SIZE, 0, GFP_ATOMIC); + + if (!skb) { + pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n", + cdev, idx, i, npods); + continue; + } + ulp_mem_io_set_hdr(skb, pm_addr); + skb->priority = CPL_PRIORITY_CONTROL; + cxgb3_ofld_send(ppm->lldev, skb); + } +} + +static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, + unsigned int tid, int pg_idx) +{ + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, + GFP_KERNEL); + struct cpl_set_tcb_field *req; + u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; + + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); + if (!skb) + return -ENOMEM; + + /* set up ulp submode and page size */ + req = (struct cpl_set_tcb_field *)skb->head; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply = V_NO_REPLY(1); + req->cpu_idx = 0; + req->word = htons(31); + req->mask = cpu_to_be64(0xF0000000); + req->val = cpu_to_be64(val << 28); + skb->priority = CPL_PRIORITY_CONTROL; + + cxgb3_ofld_send(csk->cdev->lldev, skb); + return 0; +} + +/** + * ddp_setup_conn_digest - setup conn. digest setting + * @csk: cxgb tcp socket + * @tid: connection id + * @hcrc: header digest enabled + * @dcrc: data digest enabled + * set up the iscsi digest settings for a connection identified by tid + */ +static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, + int hcrc, int dcrc) +{ + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, + GFP_KERNEL); + struct cpl_set_tcb_field *req; + u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); + + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); + if (!skb) + return -ENOMEM; + + /* set up ulp submode and page size */ + req = (struct cpl_set_tcb_field *)skb->head; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply = V_NO_REPLY(1); + req->cpu_idx = 0; + req->word = htons(31); + req->mask = cpu_to_be64(0x0F000000); + req->val = cpu_to_be64(val << 24); + skb->priority = CPL_PRIORITY_CONTROL; + + cxgb3_ofld_send(csk->cdev->lldev, skb); + return 0; +} + +/** + * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource + * @cdev: cxgb3i adapter + * initialize the ddp pagepod manager for a given adapter + */ +static int cxgb3i_ddp_init(struct cxgbi_device *cdev) +{ + struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; + struct net_device *ndev = cdev->ports[0]; + struct cxgbi_tag_format tformat; + unsigned int ppmax, tagmask = 0; + struct ulp_iscsi_info uinfo; + int i, err; + + err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); + if (err < 0) { + pr_err("%s, failed to get iscsi param %d.\n", + ndev->name, err); + return err; + } + if (uinfo.llimit >= uinfo.ulimit) { + pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n", + ndev->name, uinfo.llimit, uinfo.ulimit); + return -EACCES; + } + + ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT; + tagmask = cxgbi_tagmask_set(ppmax); + + pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n", + ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask, + tagmask); + + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); + for (i = 0; i < 4; i++) + tformat.pgsz_order[i] = uinfo.pgsz_factor[i]; + cxgbi_tagmask_check(tagmask, &tformat); + + err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat, + (uinfo.ulimit - uinfo.llimit + 1), + uinfo.llimit, uinfo.llimit, 0, 0, 0); + if (err) + return err; + + if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) { + uinfo.tagmask = tagmask; + uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT); + + err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); + if (err < 0) { + pr_err("T3 %s fail to set iscsi param %d.\n", + ndev->name, err); + cdev->flags |= CXGBI_FLAG_DDP_OFF; + } + err = 0; + } + + cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; + cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; + cdev->csk_ddp_set_map = ddp_set_map; + cdev->csk_ddp_clear_map = ddp_clear_map; + cdev->cdev2ppm = cdev2ppm; + cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); + + return 0; +} + +static void cxgb3i_dev_close(struct t3cdev *t3dev) +{ + struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); + + if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) { + pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0); + return; + } + + cxgbi_device_unregister(cdev); +} + +/** + * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings + * @t3dev: t3cdev adapter + */ +static void cxgb3i_dev_open(struct t3cdev *t3dev) +{ + struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); + struct adapter *adapter = tdev2adap(t3dev); + int i, err; + + if (cdev) { + pr_info("0x%p, updating.\n", cdev); + return; + } + + cdev = cxgbi_device_register(0, adapter->params.nports); + if (!cdev) { + pr_warn("device 0x%p register failed.\n", t3dev); + return; + } + + cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET; + cdev->lldev = t3dev; + cdev->pdev = adapter->pdev; + cdev->ports = adapter->port; + cdev->nports = adapter->params.nports; + cdev->mtus = adapter->params.mtus; + cdev->nmtus = NMTUS; + cdev->rx_credit_thres = cxgb3i_rx_credit_thres; + cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; + cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); + cdev->itp = &cxgb3i_iscsi_transport; + + err = cxgb3i_ddp_init(cdev); + if (err) { + pr_info("0x%p ddp init failed %d\n", cdev, err); + goto err_out; + } + + err = cxgb3i_ofld_init(cdev); + if (err) { + pr_info("0x%p offload init failed\n", cdev); + goto err_out; + } + + err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN, + &cxgb3i_host_template, cxgb3i_stt); + if (err) + goto err_out; + + for (i = 0; i < cdev->nports; i++) + cdev->hbas[i]->ipv4addr = + cxgb3i_get_private_ipv4addr(cdev->ports[i]); + + pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", + cdev, cdev ? cdev->flags : 0, t3dev, err); + return; + +err_out: + cxgbi_device_unregister(cdev); +} + +static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) +{ + struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); + + log_debug(1 << CXGBI_DBG_TOE, + "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n", + t3dev, cdev, event, port); + if (!cdev) + return; + + switch (event) { + case OFFLOAD_STATUS_DOWN: + cdev->flags |= CXGBI_FLAG_ADAPTER_RESET; + break; + case OFFLOAD_STATUS_UP: + cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET; + break; + } +} + +/** + * cxgb3i_init_module - module init entry point + * + * initialize any driver wide global data structures and register itself + * with the cxgb3 module + */ +static int __init cxgb3i_init_module(void) +{ + int rc; + + printk(KERN_INFO "%s", version); + + rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt); + if (rc < 0) + return rc; + + cxgb3_register_client(&t3_client); + return 0; +} + +/** + * cxgb3i_exit_module - module cleanup/exit entry point + * + * go through the driver hba list and for each hba, release any resource held. + * and unregisters iscsi transport and the cxgb3 module + */ +static void __exit cxgb3i_exit_module(void) +{ + cxgb3_unregister_client(&t3_client); + cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3); + cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt); +} + +module_init(cxgb3i_init_module); +module_exit(cxgb3i_exit_module); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h new file mode 100644 index 000000000..b0430c935 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h @@ -0,0 +1,62 @@ +/* + * cxgb3i.h: Chelsio S3xx iSCSI driver. + * + * Copyright (c) 2008-2015 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#ifndef __CXGB3I_H__ +#define __CXGB3I_H__ + +#define CXGB3I_SCSI_HOST_QDEPTH 1024 +#define CXGB3I_MAX_LUN 512 +#define ISCSI_PDU_NONPAYLOAD_MAX \ + (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE) + +/*for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ +#define CXGB3I_TX_HEADER_LEN \ + (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) + +extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; + +static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev) +{ + return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr; +} + +static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev, + unsigned int addr) +{ + struct port_info *pi = (struct port_info *)netdev_priv(ndev); + + pi->iscsic.flags = addr ? 1 : 0; + pi->iscsi_ipv4addr = addr; + if (addr) + memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN); +} + +struct cpl_iscsi_hdr_norss { + union opcode_tid ot; + u16 pdu_len_ddp; + u16 len; + u32 seq; + u16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data_ddp_norss { + union opcode_tid ot; + u16 urg; + u16 len; + u32 seq; + u32 nxt_seq; + u32 ulp_crc; + u32 ddp_status; +}; +#endif diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild new file mode 100644 index 000000000..fd3e0c964 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb + +obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig new file mode 100644 index 000000000..63c8a0f3c --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_CXGB4_ISCSI + tristate "Chelsio T4 iSCSI support" + depends on PCI && INET && (IPV6 || IPV6=n) + depends on PTP_1588_CLOCK_OPTIONAL + depends on THERMAL || !THERMAL + depends on ETHERNET + depends on TLS || TLS=n + select NET_VENDOR_CHELSIO + select CHELSIO_T4 + select CHELSIO_LIB + select SCSI_ISCSI_ATTRS + help + This driver supports iSCSI offload for the Chelsio T4 devices. diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c new file mode 100644 index 000000000..c07d2e3b4 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -0,0 +1,2473 @@ +/* + * cxgb4i.c: Chelsio T4 iSCSI driver. + * + * Copyright (c) 2010-2015 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Rakesh Ranjan (rranjan@chelsio.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4_regs.h" +#include "t4_msg.h" +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "t4fw_api.h" +#include "l2t.h" +#include "cxgb4i.h" +#include "clip_tbl.h" + +static unsigned int dbg_level; + +#include "../libcxgbi.h" + +#ifdef CONFIG_CHELSIO_T4_DCB +#include +#include "cxgb4_dcb.h" +#endif + +#define DRV_MODULE_NAME "cxgb4i" +#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" +#define DRV_MODULE_VERSION "0.9.5-ko" +#define DRV_MODULE_RELDATE "Apr. 2015" + +static char version[] = + DRV_MODULE_DESC " " DRV_MODULE_NAME + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Chelsio Communications, Inc."); +MODULE_DESCRIPTION(DRV_MODULE_DESC); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_LICENSE("GPL"); + +module_param(dbg_level, uint, 0644); +MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); + +#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) +static int cxgb4i_rcv_win = -1; +module_param(cxgb4i_rcv_win, int, 0644); +MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes"); + +#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) +static int cxgb4i_snd_win = -1; +module_param(cxgb4i_snd_win, int, 0644); +MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); + +static int cxgb4i_rx_credit_thres = 10 * 1024; +module_param(cxgb4i_rx_credit_thres, int, 0644); +MODULE_PARM_DESC(cxgb4i_rx_credit_thres, + "RX credits return threshold in bytes (default=10KB)"); + +static unsigned int cxgb4i_max_connect = (8 * 1024); +module_param(cxgb4i_max_connect, uint, 0644); +MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); + +static unsigned short cxgb4i_sport_base = 20000; +module_param(cxgb4i_sport_base, ushort, 0644); +MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); + +typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); + +static void *t4_uld_add(const struct cxgb4_lld_info *); +static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); +static int t4_uld_state_change(void *, enum cxgb4_state state); +static inline int send_tx_flowc_wr(struct cxgbi_sock *); + +static const struct cxgb4_uld_info cxgb4i_uld_info = { + .name = DRV_MODULE_NAME, + .nrxq = MAX_ULD_QSETS, + .ntxq = MAX_ULD_QSETS, + .rxq_size = 1024, + .lro = false, + .add = t4_uld_add, + .rx_handler = t4_uld_rx_handler, + .state_change = t4_uld_state_change, +}; + +static struct scsi_host_template cxgb4i_host_template = { + .module = THIS_MODULE, + .name = DRV_MODULE_NAME, + .proc_name = DRV_MODULE_NAME, + .can_queue = CXGB4I_SCSI_HOST_QDEPTH, + .queuecommand = iscsi_queuecommand, + .change_queue_depth = scsi_change_queue_depth, + .sg_tablesize = SG_ALL, + .max_sectors = 0xFFFF, + .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .target_alloc = iscsi_target_alloc, + .dma_boundary = PAGE_SIZE - 1, + .this_id = -1, + .track_queue_depth = 1, + .cmd_size = sizeof(struct iscsi_cmd), +}; + +static struct iscsi_transport cxgb4i_iscsi_transport = { + .owner = THIS_MODULE, + .name = DRV_MODULE_NAME, + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | + CAP_DATADGST | CAP_DIGEST_OFFLOAD | + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, + .attr_is_visible = cxgbi_attr_is_visible, + .get_host_param = cxgbi_get_host_param, + .set_host_param = cxgbi_set_host_param, + /* session management */ + .create_session = cxgbi_create_session, + .destroy_session = cxgbi_destroy_session, + .get_session_param = iscsi_session_get_param, + /* connection management */ + .create_conn = cxgbi_create_conn, + .bind_conn = cxgbi_bind_conn, + .unbind_conn = iscsi_conn_unbind, + .destroy_conn = iscsi_tcp_conn_teardown, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_conn_stop, + .get_conn_param = iscsi_conn_get_param, + .set_param = cxgbi_set_conn_param, + .get_stats = cxgbi_get_conn_stats, + /* pdu xmit req from user space */ + .send_pdu = iscsi_conn_send_pdu, + /* task */ + .init_task = iscsi_tcp_task_init, + .xmit_task = iscsi_tcp_task_xmit, + .cleanup_task = cxgbi_cleanup_task, + /* pdu */ + .alloc_pdu = cxgbi_conn_alloc_pdu, + .init_pdu = cxgbi_conn_init_pdu, + .xmit_pdu = cxgbi_conn_xmit_pdu, + .parse_pdu_itt = cxgbi_parse_pdu_itt, + /* TCP connect/disconnect */ + .get_ep_param = cxgbi_get_ep_param, + .ep_connect = cxgbi_ep_connect, + .ep_poll = cxgbi_ep_poll, + .ep_disconnect = cxgbi_ep_disconnect, + /* Error recovery timeout call */ + .session_recovery_timedout = iscsi_session_recovery_timedout, +}; + +#ifdef CONFIG_CHELSIO_T4_DCB +static int +cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *); + +static struct notifier_block cxgb4_dcb_change = { + .notifier_call = cxgb4_dcb_change_notify, +}; +#endif + +static struct scsi_transport_template *cxgb4i_stt; + +/* + * CPL (Chelsio Protocol Language) defines a message passing interface between + * the host driver and Chelsio asic. + * The section below implments CPLs that related to iscsi tcp connection + * open/close/abort and data send/receive. + */ + +#define RCV_BUFSIZ_MASK 0x3FFU +#define MAX_IMM_TX_PKT_LEN 256 + +static int push_tx_frames(struct cxgbi_sock *, int); + +/* + * is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static inline bool is_ofld_imm(const struct sk_buff *skb) +{ + int len = skb->len; + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) + len += sizeof(struct fw_ofld_tx_data_wr); + + if (likely(cxgbi_skcb_test_flag((struct sk_buff *)skb, SKCBF_TX_ISO))) + len += sizeof(struct cpl_tx_data_iso); + + return (len <= MAX_IMM_OFLD_TX_DATA_WR_LEN); +} + +static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); + unsigned long long opt0; + unsigned int opt2; + unsigned int qid_atid = ((unsigned int)csk->atid) | + (((unsigned int)csk->rss_qid) << 14); + + opt0 = KEEP_ALIVE_F | + WND_SCALE_V(wscale) | + MSS_IDX_V(csk->mss_idx) | + L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | + TX_CHAN_V(csk->tx_chan) | + SMAC_SEL_V(csk->smac_idx) | + ULP_MODE_V(ULP_MODE_ISCSI) | + RCV_BUFSIZ_V(csk->rcv_win >> 10); + + opt2 = RX_CHANNEL_V(0) | + RSS_QUEUE_VALID_F | + RSS_QUEUE_V(csk->rss_qid); + + if (is_t4(lldi->adapter_type)) { + struct cpl_act_open_req *req = + (struct cpl_act_open_req *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = cpu_to_be32(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t)); + opt2 |= RX_FC_VALID_F; + req->opt2 = cpu_to_be32(opt2); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", + csk, &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->atid, csk->rss_qid); + } else if (is_t5(lldi->adapter_type)) { + struct cpl_t5_act_open_req *req = + (struct cpl_t5_act_open_req *)skb->head; + u32 isn = (get_random_u32() & ~7UL) - 1; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = cpu_to_be64(FILTER_TUPLE_V( + cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + req->rsvd = cpu_to_be32(isn); + opt2 |= T5_ISS_VALID; + opt2 |= T5_OPT_2_VALID_F; + + req->opt2 = cpu_to_be32(opt2); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", + csk, &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->atid, csk->rss_qid); + } else { + struct cpl_t6_act_open_req *req = + (struct cpl_t6_act_open_req *)skb->head; + u32 isn = (get_random_u32() & ~7UL) - 1; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = cpu_to_be64(FILTER_TUPLE_V( + cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + req->rsvd = cpu_to_be32(isn); + + opt2 |= T5_ISS_VALID; + opt2 |= RX_FC_DISABLE_F; + opt2 |= T5_OPT_2_VALID_F; + + req->opt2 = cpu_to_be32(opt2); + req->rsvd2 = cpu_to_be32(0); + req->opt3 = cpu_to_be32(0); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", + csk, &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->atid, csk->rss_qid); + } + + set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); + + pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", + (&csk->saddr), (&csk->daddr), + CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, + csk->state, csk->flags, csk->atid, csk->rss_qid); + + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); + unsigned long long opt0; + unsigned int opt2; + unsigned int qid_atid = ((unsigned int)csk->atid) | + (((unsigned int)csk->rss_qid) << 14); + + opt0 = KEEP_ALIVE_F | + WND_SCALE_V(wscale) | + MSS_IDX_V(csk->mss_idx) | + L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | + TX_CHAN_V(csk->tx_chan) | + SMAC_SEL_V(csk->smac_idx) | + ULP_MODE_V(ULP_MODE_ISCSI) | + RCV_BUFSIZ_V(csk->rcv_win >> 10); + + opt2 = RX_CHANNEL_V(0) | + RSS_QUEUE_VALID_F | + RSS_QUEUE_V(csk->rss_qid); + + if (is_t4(lldi->adapter_type)) { + struct cpl_act_open_req6 *req = + (struct cpl_act_open_req6 *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_atid)); + req->local_port = csk->saddr6.sin6_port; + req->peer_port = csk->daddr6.sin6_port; + + req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); + req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + + 8); + req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); + req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + + 8); + + req->opt0 = cpu_to_be64(opt0); + + opt2 |= RX_FC_VALID_F; + req->opt2 = cpu_to_be32(opt2); + + req->params = cpu_to_be32(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t)); + } else if (is_t5(lldi->adapter_type)) { + struct cpl_t5_act_open_req6 *req = + (struct cpl_t5_act_open_req6 *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_atid)); + req->local_port = csk->saddr6.sin6_port; + req->peer_port = csk->daddr6.sin6_port; + req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); + req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + + 8); + req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); + req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + + 8); + req->opt0 = cpu_to_be64(opt0); + + opt2 |= T5_OPT_2_VALID_F; + req->opt2 = cpu_to_be32(opt2); + + req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + } else { + struct cpl_t6_act_open_req6 *req = + (struct cpl_t6_act_open_req6 *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_atid)); + req->local_port = csk->saddr6.sin6_port; + req->peer_port = csk->daddr6.sin6_port; + req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); + req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + + 8); + req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); + req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + + 8); + req->opt0 = cpu_to_be64(opt0); + + opt2 |= RX_FC_DISABLE_F; + opt2 |= T5_OPT_2_VALID_F; + + req->opt2 = cpu_to_be32(opt2); + + req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + + req->rsvd2 = cpu_to_be32(0); + req->opt3 = cpu_to_be32(0); + } + + set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); + + pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", + CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, + csk->flags, csk->atid, + &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), + &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), + csk->rss_qid); + + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); +} +#endif + +static void send_close_req(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->cpl_close; + struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; + unsigned int tid = csk->tid; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, tid %u.\n", + csk, csk->state, csk->flags, csk->tid); + csk->cpl_close = NULL; + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + INIT_TP_WR(req, tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); + req->rsvd = 0; + + cxgbi_sock_skb_entail(csk, skb); + if (csk->state >= CTP_ESTABLISHED) + push_tx_frames(csk, 1); +} + +static void abort_arp_failure(void *handle, struct sk_buff *skb) +{ + struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; + struct cpl_abort_req *req; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, tid %u, abort.\n", + csk, csk->state, csk->flags, csk->tid); + req = (struct cpl_abort_req *)skb->data; + req->cmd = CPL_ABORT_NO_RST; + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); +} + +static void send_abort_req(struct cxgbi_sock *csk) +{ + struct cpl_abort_req *req; + struct sk_buff *skb = csk->cpl_abort_req; + + if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) + return; + + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { + send_tx_flowc_wr(csk); + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); + } + + cxgbi_sock_set_state(csk, CTP_ABORTING); + cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); + cxgbi_sock_purge_write_queue(csk); + + csk->cpl_abort_req = NULL; + req = (struct cpl_abort_req *)skb->head; + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + req->cmd = CPL_ABORT_SEND_RST; + t4_set_arp_err_handler(skb, csk, abort_arp_failure); + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); + req->rsvd0 = htonl(csk->snd_nxt); + req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, + req->rsvd1); + + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); +} + +static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) +{ + struct sk_buff *skb = csk->cpl_abort_rpl; + struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, status %d.\n", + csk, csk->state, csk->flags, csk->tid, rst_status); + + csk->cpl_abort_rpl = NULL; + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + INIT_TP_WR(rpl, csk->tid); + OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); + rpl->cmd = rst_status; + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); +} + +/* + * CPL connection rx data ack: host -> + * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of + * credits sent. + */ +static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) +{ + struct sk_buff *skb; + struct cpl_rx_data_ack *req; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, credit %u.\n", + csk, csk->state, csk->flags, csk->tid, credits); + + skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); + if (!skb) { + pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); + return 0; + } + req = (struct cpl_rx_data_ack *)skb->head; + + set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, + csk->tid)); + req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) + | RX_FORCE_ACK_F); + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); + return credits; +} + +/* + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/* + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + flits = skb_transport_offset(skb) / 8; + cnt = skb_shinfo(skb)->nr_frags; + if (skb_tail_pointer(skb) != skb_transport_header(skb)) + cnt++; + return flits + sgl_len(cnt); +} + +#define FLOWC_WR_NPARAMS_MIN 9 +static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) +{ + int nparams, flowclen16, flowclen; + + nparams = FLOWC_WR_NPARAMS_MIN; +#ifdef CONFIG_CHELSIO_T4_DCB + nparams++; +#endif + flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); + flowclen16 = DIV_ROUND_UP(flowclen, 16); + flowclen = flowclen16 * 16; + /* + * Return the number of 16-byte credits used by the FlowC request. + * Pass back the nparams and actual FlowC length if requested. + */ + if (nparamsp) + *nparamsp = nparams; + if (flowclenp) + *flowclenp = flowclen; + + return flowclen16; +} + +static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) +{ + struct sk_buff *skb; + struct fw_flowc_wr *flowc; + int nparams, flowclen16, flowclen; + +#ifdef CONFIG_CHELSIO_T4_DCB + u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; +#endif + flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); + skb = alloc_wr(flowclen, 0, GFP_ATOMIC); + flowc = (struct fw_flowc_wr *)skb->head; + flowc->op_to_nparams = + htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); + flowc->flowid_len16 = + htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); + flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; + flowc->mnemval[0].val = htonl(csk->cdev->pfvf); + flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; + flowc->mnemval[1].val = htonl(csk->tx_chan); + flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; + flowc->mnemval[2].val = htonl(csk->tx_chan); + flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; + flowc->mnemval[3].val = htonl(csk->rss_qid); + flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; + flowc->mnemval[4].val = htonl(csk->snd_nxt); + flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; + flowc->mnemval[5].val = htonl(csk->rcv_nxt); + flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; + flowc->mnemval[6].val = htonl(csk->snd_win); + flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; + flowc->mnemval[7].val = htonl(csk->advmss); + flowc->mnemval[8].mnemonic = 0; + flowc->mnemval[8].val = 0; + flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; + if (csk->cdev->skb_iso_txhdr) + flowc->mnemval[8].val = cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB); + else + flowc->mnemval[8].val = cpu_to_be32(16128); +#ifdef CONFIG_CHELSIO_T4_DCB + flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO; + if (vlan == CPL_L2T_VLAN_NONE) { + pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n", + csk->tid); + flowc->mnemval[9].val = cpu_to_be32(0); + } else { + flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT); + } +#endif + + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", + csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, + csk->snd_nxt, csk->rcv_nxt, csk->snd_win, + csk->advmss); + + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); + + return flowclen16; +} + +static void +cxgb4i_make_tx_iso_cpl(struct sk_buff *skb, struct cpl_tx_data_iso *cpl) +{ + struct cxgbi_iso_info *info = (struct cxgbi_iso_info *)skb->head; + u32 imm_en = !!(info->flags & CXGBI_ISO_INFO_IMM_ENABLE); + u32 fslice = !!(info->flags & CXGBI_ISO_INFO_FSLICE); + u32 lslice = !!(info->flags & CXGBI_ISO_INFO_LSLICE); + u32 pdu_type = (info->op == ISCSI_OP_SCSI_CMD) ? 0 : 1; + u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3; + + cpl->op_to_scsi = cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) | + CPL_TX_DATA_ISO_FIRST_V(fslice) | + CPL_TX_DATA_ISO_LAST_V(lslice) | + CPL_TX_DATA_ISO_CPLHDRLEN_V(0) | + CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) | + CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) | + CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en) | + CPL_TX_DATA_ISO_SCSI_V(pdu_type)); + + cpl->ahs_len = info->ahs; + cpl->mpdu = cpu_to_be16(DIV_ROUND_UP(info->mpdu, 4)); + cpl->burst_size = cpu_to_be32(info->burst_size); + cpl->len = cpu_to_be32(info->len); + cpl->reserved2_seglen_offset = + cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info->segment_offset)); + cpl->datasn_offset = cpu_to_be32(info->datasn_offset); + cpl->buffer_offset = cpu_to_be32(info->buffer_offset); + cpl->reserved3 = cpu_to_be32(0); + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, " + "burst_size %u, iso_len %u\n", + info->flags, info->op, info->ahs, info->num_pdu, + info->mpdu, info->burst_size << 2, info->len); +} + +static void +cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen, + int len, u32 credits, int compl) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct fw_ofld_tx_data_wr *req; + struct cpl_tx_data_iso *cpl; + u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3; + u32 wr_ulp_mode = 0; + u32 hdr_size = sizeof(*req); + u32 opcode = FW_OFLD_TX_DATA_WR; + u32 immlen = 0; + u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : + T6_TX_FORCE_F; + + if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) { + hdr_size += sizeof(struct cpl_tx_data_iso); + opcode = FW_ISCSI_TX_DATA_WR; + immlen += sizeof(struct cpl_tx_data_iso); + submode |= 8; + } + + if (is_ofld_imm(skb)) + immlen += dlen; + + req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, hdr_size); + req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) | + FW_WR_COMPL_V(compl) | + FW_WR_IMMDLEN_V(immlen)); + req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | + FW_WR_LEN16_V(credits)); + req->plen = cpu_to_be32(len); + cpl = (struct cpl_tx_data_iso *)(req + 1); + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) + cxgb4i_make_tx_iso_cpl(skb, cpl); + + if (submode) + wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | + FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); + + req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode | force | + FW_OFLD_TX_DATA_WR_SHOVE_V(1U)); + + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); +} + +static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) +{ + int total_size = 0; + struct sk_buff *skb; + + if (unlikely(csk->state < CTP_ESTABLISHED || + csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | + 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, in closing state.\n", + csk, csk->state, csk->flags, csk->tid); + return 0; + } + + while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) { + struct cxgbi_iso_info *iso_cpl; + u32 dlen = skb->len; + u32 len = skb->len; + u32 iso_cpl_len = 0; + u32 flowclen16 = 0; + u32 credits_needed; + u32 num_pdu = 1, hdr_len; + + if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) + iso_cpl_len = sizeof(struct cpl_tx_data_iso); + + if (is_ofld_imm(skb)) + credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16); + else + credits_needed = + DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb)) + + iso_cpl_len, 16); + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) + credits_needed += + DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr), 16); + + /* + * Assumes the initial credits is large enough to support + * fw_flowc_wr plus largest possible first payload + */ + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { + flowclen16 = send_tx_flowc_wr(csk); + csk->wr_cred -= flowclen16; + csk->wr_una_cred += flowclen16; + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); + } + + if (csk->wr_cred < credits_needed) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, skb %u/%u, wr %d < %u.\n", + csk, skb->len, skb->data_len, + credits_needed, csk->wr_cred); + + csk->no_tx_credits++; + break; + } + + csk->no_tx_credits = 0; + + __skb_unlink(skb, &csk->write_queue); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + skb->csum = (__force __wsum)(credits_needed + flowclen16); + csk->wr_cred -= credits_needed; + csk->wr_una_cred += credits_needed; + cxgbi_sock_enqueue_wr(csk, skb); + + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", + csk, skb->len, skb->data_len, credits_needed, + csk->wr_cred, csk->wr_una_cred); + + if (!req_completion && + ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || + after(csk->write_seq, (csk->snd_una + csk->snd_win / 2)))) + req_completion = 1; + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { + u32 ulp_mode = cxgbi_skcb_tx_ulp_mode(skb); + + if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) { + iso_cpl = (struct cxgbi_iso_info *)skb->head; + num_pdu = iso_cpl->num_pdu; + hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb); + len += (cxgbi_ulp_extra_len(ulp_mode) * num_pdu) + + (hdr_len * (num_pdu - 1)); + } else { + len += cxgbi_ulp_extra_len(ulp_mode); + } + + cxgb4i_make_tx_data_wr(csk, skb, dlen, len, + credits_needed, req_completion); + csk->snd_nxt += len; + cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); + } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && + (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { + struct cpl_close_con_req *req = + (struct cpl_close_con_req *)skb->data; + + req->wr.wr_hi |= cpu_to_be32(FW_WR_COMPL_F); + } + + total_size += skb->truesize; + t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", + csk, csk->state, csk->flags, csk->tid, skb, len); + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); + } + return total_size; +} + +static inline void free_atid(struct cxgbi_sock *csk) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { + cxgb4_free_atid(lldi->tids, csk->atid); + cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_put(csk); + } +} + +static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; + unsigned short tcp_opt = ntohs(req->tcp_opt); + unsigned int tid = GET_TID(req); + unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + u32 rcv_isn = be32_to_cpu(req->rcv_isn); + + csk = lookup_atid(t, atid); + if (unlikely(!csk)) { + pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); + goto rel_skb; + } + + if (csk->atid != atid) { + pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", + atid, csk, csk->state, csk->flags, csk->tid, csk->atid); + goto rel_skb; + } + + pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", + (&csk->saddr), (&csk->daddr), + atid, tid, csk, csk->state, csk->flags, rcv_isn); + + module_put(cdev->owner); + + cxgbi_sock_get(csk); + csk->tid = tid; + cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); + cxgbi_sock_set_flag(csk, CTPF_HAS_TID); + + free_atid(csk); + + spin_lock_bh(&csk->lock); + if (unlikely(csk->state != CTP_ACTIVE_OPEN)) + pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", + csk, csk->state, csk->flags, csk->tid); + + if (csk->retry_timer.function) { + del_timer(&csk->retry_timer); + csk->retry_timer.function = NULL; + } + + csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; + /* + * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't + * pass through opt0. + */ + if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) + csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); + + csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; + if (TCPOPT_TSTAMP_G(tcp_opt)) + csk->advmss -= 12; + if (csk->advmss < 128) + csk->advmss = 128; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, mss_idx %u, advmss %u.\n", + csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); + + cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); + + if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) + send_abort_req(csk); + else { + if (skb_queue_len(&csk->write_queue)) + push_tx_frames(csk, 0); + cxgbi_conn_tx_open(csk); + } + spin_unlock_bh(&csk->lock); + +rel_skb: + __kfree_skb(skb); +} + +static int act_open_rpl_status_to_errno(int status) +{ + switch (status) { + case CPL_ERR_CONN_RESET: + return -ECONNREFUSED; + case CPL_ERR_ARP_MISS: + return -EHOSTUNREACH; + case CPL_ERR_CONN_TIMEDOUT: + return -ETIMEDOUT; + case CPL_ERR_TCAM_FULL: + return -ENOMEM; + case CPL_ERR_CONN_EXIST: + return -EADDRINUSE; + default: + return -EIO; + } +} + +static void csk_act_open_retry_timer(struct timer_list *t) +{ + struct sk_buff *skb = NULL; + struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, + struct l2t_entry *); + int t4 = is_t4(lldi->adapter_type), size, size6; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (t4) { + size = sizeof(struct cpl_act_open_req); + size6 = sizeof(struct cpl_act_open_req6); + } else { + size = sizeof(struct cpl_t5_act_open_req); + size6 = sizeof(struct cpl_t5_act_open_req6); + } + + if (csk->csk_family == AF_INET) { + send_act_open_func = send_act_open_req; + skb = alloc_wr(size, 0, GFP_ATOMIC); +#if IS_ENABLED(CONFIG_IPV6) + } else { + send_act_open_func = send_act_open_req6; + skb = alloc_wr(size6, 0, GFP_ATOMIC); +#endif + } + + if (!skb) + cxgbi_sock_fail_act_open(csk, -ENOMEM); + else { + skb->sk = (struct sock *)csk; + t4_set_arp_err_handler(skb, csk, + cxgbi_sock_act_open_req_arp_failure); + send_act_open_func(csk, skb, csk->l2t); + } + + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); + +} + +static inline bool is_neg_adv(unsigned int status) +{ + return status == CPL_ERR_RTX_NEG_ADVICE || + status == CPL_ERR_KEEPALV_NEG_ADVICE || + status == CPL_ERR_PERSIST_NEG_ADVICE; +} + +static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + unsigned int atid = + TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); + unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_atid(t, atid); + if (unlikely(!csk)) { + pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); + goto rel_skb; + } + + pr_info_ipaddr("tid %u/%u, status %u.\n" + "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), + atid, tid, status, csk, csk->state, csk->flags); + + if (is_neg_adv(status)) + goto rel_skb; + + module_put(cdev->owner); + + if (status && status != CPL_ERR_TCAM_FULL && + status != CPL_ERR_CONN_EXIST && + status != CPL_ERR_ARP_MISS) + cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), + csk->csk_family); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (status == CPL_ERR_CONN_EXIST && + csk->retry_timer.function != csk_act_open_retry_timer) { + csk->retry_timer.function = csk_act_open_retry_timer; + mod_timer(&csk->retry_timer, jiffies + HZ / 2); + } else + cxgbi_sock_fail_act_open(csk, + act_open_rpl_status_to_errno(status)); + + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; + unsigned int tid = GET_TID(req); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", + (&csk->saddr), (&csk->daddr), + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_rcv_peer_close(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", + (&csk->saddr), (&csk->daddr), + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); +rel_skb: + __kfree_skb(skb); +} + +static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, + int *need_rst) +{ + switch (abort_reason) { + case CPL_ERR_BAD_SYN: + case CPL_ERR_CONN_RESET: + return csk->state > CTP_ESTABLISHED ? + -EPIPE : -ECONNRESET; + case CPL_ERR_XMIT_TIMEDOUT: + case CPL_ERR_PERSIST_TIMEDOUT: + case CPL_ERR_FINWAIT2_TIMEDOUT: + case CPL_ERR_KEEPALIVE_TIMEDOUT: + return -ETIMEDOUT; + default: + return -EIO; + } +} + +static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; + unsigned int tid = GET_TID(req); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + int rst_status = CPL_ABORT_NO_RST; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", + (&csk->saddr), (&csk->daddr), + csk, csk->state, csk->flags, csk->tid, req->status); + + if (is_neg_adv(req->status)) + goto rel_skb; + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); + + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { + send_tx_flowc_wr(csk); + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); + } + + cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); + cxgbi_sock_set_state(csk, CTP_ABORTING); + + send_abort_rpl(csk, rst_status); + + if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { + csk->err = abort_status_to_errno(csk, req->status, &rst_status); + cxgbi_sock_closed(csk); + } + + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (!csk) + goto rel_skb; + + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", + (&csk->saddr), (&csk->daddr), csk, + csk->state, csk->flags, csk->tid, rpl->status); + + if (rpl->status == CPL_ERR_ABORT_FAILED) + goto rel_skb; + + cxgbi_sock_rcv_abort_rpl(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; + unsigned int tid = GET_TID(cpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (!csk) { + pr_err("can't find connection for tid %u.\n", tid); + } else { + /* not expecting this, reset the connection. */ + pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); + spin_lock_bh(&csk->lock); + send_abort_req(csk); + spin_unlock_bh(&csk->lock); + } + __kfree_skb(skb); +} + +static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; + unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); + unsigned int tid = GET_TID(cpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, skb, skb->len, + pdu_len_ddp); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); + cxgbi_skcb_flags(skb) = 0; + + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(*cpl)); + __pskb_trim(skb, ntohs(cpl->len)); + + if (!csk->skb_ulp_lhdr) { + unsigned char *bhs; + unsigned int hlen, dlen, plen; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", + csk, csk->state, csk->flags, csk->tid, skb); + csk->skb_ulp_lhdr = skb; + cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); + + if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) && + (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { + pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", + csk->tid, cxgbi_skcb_tcp_seq(skb), + csk->rcv_nxt); + goto abort_conn; + } + + bhs = skb->data; + hlen = ntohs(cpl->len); + dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; + + plen = ISCSI_PDU_LEN_G(pdu_len_ddp); + if (is_t4(lldi->adapter_type)) + plen -= 40; + + if ((hlen + dlen) != plen) { + pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " + "mismatch %u != %u + %u, seq 0x%x.\n", + csk->tid, plen, hlen, dlen, + cxgbi_skcb_tcp_seq(skb)); + goto abort_conn; + } + + cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); + if (dlen) + cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; + csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", + csk, skb, *bhs, hlen, dlen, + ntohl(*((unsigned int *)(bhs + 16))), + ntohl(*((unsigned int *)(bhs + 24)))); + + } else { + struct sk_buff *lskb = csk->skb_ulp_lhdr; + + cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", + csk, csk->state, csk->flags, skb, lskb); + } + + __skb_queue_tail(&csk->receive_queue, skb); + spin_unlock_bh(&csk->lock); + return; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + +static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + struct sk_buff *lskb; + u32 tid = GET_TID(cpl); + u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, skb, + skb->len, pdu_len_ddp); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); + cxgbi_skcb_flags(skb) = 0; + + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(*cpl)); + __pskb_trim(skb, ntohs(cpl->len)); + + if (!csk->skb_ulp_lhdr) + csk->skb_ulp_lhdr = skb; + + lskb = csk->skb_ulp_lhdr; + cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", + csk, csk->state, csk->flags, skb, lskb); + + __skb_queue_tail(&csk->receive_queue, skb); + spin_unlock_bh(&csk->lock); + return; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + +static void +cxgb4i_process_ddpvld(struct cxgbi_sock *csk, + struct sk_buff *skb, u32 ddpvld) +{ + if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { + pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", + csk, skb, ddpvld, cxgbi_skcb_flags(skb)); + cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); + } + + if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { + pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", + csk, skb, ddpvld, cxgbi_skcb_flags(skb)); + cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); + } + + if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", + csk, skb, ddpvld); + cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); + } + + if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && + !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", + csk, skb, ddpvld); + cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); + } +} + +static void do_rx_data_ddp(struct cxgbi_device *cdev, + struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct sk_buff *lskb; + struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + u32 ddpvld = be32_to_cpu(rpl->ddpvld); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", + csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + if (!csk->skb_ulp_lhdr) { + pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); + goto abort_conn; + } + + lskb = csk->skb_ulp_lhdr; + csk->skb_ulp_lhdr = NULL; + + cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); + + if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) + pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", + csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); + + cxgb4i_process_ddpvld(csk, lskb, ddpvld); + + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lskb 0x%p, f 0x%lx.\n", + csk, lskb, cxgbi_skcb_flags(lskb)); + + cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); + cxgbi_conn_pdu_ready(csk); + spin_unlock_bh(&csk->lock); + goto rel_skb; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + +static void +do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + struct sk_buff *data_skb = NULL; + u32 tid = GET_TID(rpl); + u32 ddpvld = be32_to_cpu(rpl->ddpvld); + u32 seq = be32_to_cpu(rpl->seq); + u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " + "pdu_len_ddp %u, status %u.\n", + csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, + ntohs(rpl->len), pdu_len_ddp, rpl->status); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = seq; + cxgbi_skcb_flags(skb) = 0; + cxgbi_skcb_rx_pdulen(skb) = 0; + + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(*rpl)); + __pskb_trim(skb, be16_to_cpu(rpl->len)); + + csk->rcv_nxt = seq + pdu_len_ddp; + + if (csk->skb_ulp_lhdr) { + data_skb = skb_peek(&csk->receive_queue); + if (!data_skb || + !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) { + pr_err("Error! freelist data not found 0x%p, tid %u\n", + data_skb, tid); + + goto abort_conn; + } + __skb_unlink(data_skb, &csk->receive_queue); + + cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA); + + __skb_queue_tail(&csk->receive_queue, skb); + __skb_queue_tail(&csk->receive_queue, data_skb); + } else { + __skb_queue_tail(&csk->receive_queue, skb); + } + + csk->skb_ulp_lhdr = NULL; + + cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); + cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); + cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL); + cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); + + cxgb4i_process_ddpvld(csk, skb, ddpvld); + + log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n", + csk, skb, cxgbi_skcb_flags(skb)); + + cxgbi_conn_pdu_ready(csk); + spin_unlock_bh(&csk->lock); + + return; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + +static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) + pr_err("can't find connection for tid %u.\n", tid); + else { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), + rpl->seq_vld); + } + __kfree_skb(skb); +} + +static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + struct cxgbi_sock *csk; + + csk = lookup_tid(t, tid); + if (!csk) { + pr_err("can't find conn. for tid %u.\n", tid); + return; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,%lx,%u, status 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, rpl->status); + + if (rpl->status != CPL_ERR_NONE) { + pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", + csk, tid, rpl->status); + csk->err = -EINVAL; + } + + complete(&csk->cmpl); + + __kfree_skb(skb); +} + +static int alloc_cpls(struct cxgbi_sock *csk) +{ + csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), + 0, GFP_KERNEL); + if (!csk->cpl_close) + return -ENOMEM; + + csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), + 0, GFP_KERNEL); + if (!csk->cpl_abort_req) + goto free_cpls; + + csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), + 0, GFP_KERNEL); + if (!csk->cpl_abort_rpl) + goto free_cpls; + return 0; + +free_cpls: + cxgbi_sock_free_cpl_skbs(csk); + return -ENOMEM; +} + +static inline void l2t_put(struct cxgbi_sock *csk) +{ + if (csk->l2t) { + cxgb4_l2t_release(csk->l2t); + csk->l2t = NULL; + cxgbi_sock_put(csk); + } +} + +static void release_offload_resources(struct cxgbi_sock *csk) +{ + struct cxgb4_lld_info *lldi; +#if IS_ENABLED(CONFIG_IPV6) + struct net_device *ndev = csk->cdev->ports[csk->port_id]; +#endif + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_free_cpl_skbs(csk); + cxgbi_sock_purge_write_queue(csk); + if (csk->wr_cred != csk->wr_max_cred) { + cxgbi_sock_purge_wr_queue(csk); + cxgbi_sock_reset_wr_list(csk); + } + + l2t_put(csk); +#if IS_ENABLED(CONFIG_IPV6) + if (csk->csk_family == AF_INET6) + cxgb4_clip_release(ndev, + (const u32 *)&csk->saddr6.sin6_addr, 1); +#endif + + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) + free_atid(csk); + else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { + lldi = cxgbi_cdev_priv(csk->cdev); + cxgb4_remove_tid(lldi->tids, 0, csk->tid, + csk->csk_family); + cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); + cxgbi_sock_put(csk); + } + csk->dst = NULL; +} + +#ifdef CONFIG_CHELSIO_T4_DCB +static inline u8 get_iscsi_dcb_state(struct net_device *ndev) +{ + return ndev->dcbnl_ops->getstate(ndev); +} + +static int select_priority(int pri_mask) +{ + if (!pri_mask) + return 0; + return (ffs(pri_mask) - 1); +} + +static u8 get_iscsi_dcb_priority(struct net_device *ndev) +{ + int rv; + u8 caps; + + struct dcb_app iscsi_dcb_app = { + .protocol = 3260 + }; + + rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); + if (rv) + return 0; + + if (caps & DCB_CAP_DCBX_VER_IEEE) { + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; + rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); + if (!rv) { + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; + rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); + } + } else if (caps & DCB_CAP_DCBX_VER_CEE) { + iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; + rv = dcb_getapp(ndev, &iscsi_dcb_app); + } + + log_debug(1 << CXGBI_DBG_ISCSI, + "iSCSI priority is set to %u\n", select_priority(rv)); + return select_priority(rv); +} +#endif + +static int init_act_open(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct net_device *ndev = cdev->ports[csk->port_id]; + struct sk_buff *skb = NULL; + struct neighbour *n = NULL; + void *daddr; + unsigned int step; + unsigned int rxq_idx; + unsigned int size, size6; + unsigned int linkspeed; + unsigned int rcv_winf, snd_winf; +#ifdef CONFIG_CHELSIO_T4_DCB + u8 priority = 0; +#endif + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + if (csk->csk_family == AF_INET) + daddr = &csk->daddr.sin_addr.s_addr; +#if IS_ENABLED(CONFIG_IPV6) + else if (csk->csk_family == AF_INET6) + daddr = &csk->daddr6.sin6_addr; +#endif + else { + pr_err("address family 0x%x not supported\n", csk->csk_family); + goto rel_resource; + } + + n = dst_neigh_lookup(csk->dst, daddr); + + if (!n) { + pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); + goto rel_resource; + } + + if (!(n->nud_state & NUD_VALID)) + neigh_event_send(n, NULL); + + csk->atid = cxgb4_alloc_atid(lldi->tids, csk); + if (csk->atid < 0) { + pr_err("%s, NO atid available.\n", ndev->name); + goto rel_resource_without_clip; + } + cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_get(csk); + +#ifdef CONFIG_CHELSIO_T4_DCB + if (get_iscsi_dcb_state(ndev)) + priority = get_iscsi_dcb_priority(ndev); + + csk->dcb_priority = priority; + csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); +#else + csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); +#endif + if (!csk->l2t) { + pr_err("%s, cannot alloc l2t.\n", ndev->name); + goto rel_resource_without_clip; + } + cxgbi_sock_get(csk); + +#if IS_ENABLED(CONFIG_IPV6) + if (csk->csk_family == AF_INET6) + cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); +#endif + + if (is_t4(lldi->adapter_type)) { + size = sizeof(struct cpl_act_open_req); + size6 = sizeof(struct cpl_act_open_req6); + } else if (is_t5(lldi->adapter_type)) { + size = sizeof(struct cpl_t5_act_open_req); + size6 = sizeof(struct cpl_t5_act_open_req6); + } else { + size = sizeof(struct cpl_t6_act_open_req); + size6 = sizeof(struct cpl_t6_act_open_req6); + } + + if (csk->csk_family == AF_INET) + skb = alloc_wr(size, 0, GFP_NOIO); +#if IS_ENABLED(CONFIG_IPV6) + else + skb = alloc_wr(size6, 0, GFP_NOIO); +#endif + + if (!skb) + goto rel_resource; + skb->sk = (struct sock *)csk; + t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); + + if (!csk->mtu) + csk->mtu = dst_mtu(csk->dst); + cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); + csk->tx_chan = cxgb4_port_chan(ndev); + csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; + step = lldi->ntxq / lldi->nchan; + csk->txq_idx = cxgb4_port_idx(ndev) * step; + step = lldi->nrxq / lldi->nchan; + rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step); + cdev->rxq_idx_cntr++; + csk->rss_qid = lldi->rxq_ids[rxq_idx]; + linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; + csk->snd_win = cxgb4i_snd_win; + csk->rcv_win = cxgb4i_rcv_win; + if (cxgb4i_rcv_win <= 0) { + csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; + rcv_winf = linkspeed / SPEED_10000; + if (rcv_winf) + csk->rcv_win *= rcv_winf; + } + if (cxgb4i_snd_win <= 0) { + csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; + snd_winf = linkspeed / SPEED_10000; + if (snd_winf) + csk->snd_win *= snd_winf; + } + csk->wr_cred = lldi->wr_cred - + DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); + csk->wr_max_cred = csk->wr_cred; + csk->wr_una_cred = 0; + cxgbi_sock_reset_wr_list(csk); + csk->err = 0; + + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", + (&csk->saddr), (&csk->daddr), csk, csk->state, + csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, + csk->mtu, csk->mss_idx, csk->smac_idx); + + /* must wait for either a act_open_rpl or act_open_establish */ + if (!try_module_get(cdev->owner)) { + pr_err("%s, try_module_get failed.\n", ndev->name); + goto rel_resource; + } + + cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); + if (csk->csk_family == AF_INET) + send_act_open_req(csk, skb, csk->l2t); +#if IS_ENABLED(CONFIG_IPV6) + else + send_act_open_req6(csk, skb, csk->l2t); +#endif + neigh_release(n); + + return 0; + +rel_resource: +#if IS_ENABLED(CONFIG_IPV6) + if (csk->csk_family == AF_INET6) + cxgb4_clip_release(ndev, + (const u32 *)&csk->saddr6.sin6_addr, 1); +#endif +rel_resource_without_clip: + if (n) + neigh_release(n); + if (skb) + __kfree_skb(skb); + return -EINVAL; +} + +static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = do_act_establish, + [CPL_ACT_OPEN_RPL] = do_act_open_rpl, + [CPL_PEER_CLOSE] = do_peer_close, + [CPL_ABORT_REQ_RSS] = do_abort_req_rss, + [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, + [CPL_CLOSE_CON_RPL] = do_close_con_rpl, + [CPL_FW4_ACK] = do_fw4_ack, + [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, + [CPL_ISCSI_DATA] = do_rx_iscsi_data, + [CPL_SET_TCB_RPL] = do_set_tcb_rpl, + [CPL_RX_DATA_DDP] = do_rx_data_ddp, + [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, + [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, + [CPL_RX_DATA] = do_rx_data, +}; + +static int cxgb4i_ofld_init(struct cxgbi_device *cdev) +{ + int rc; + + if (cxgb4i_max_connect > CXGB4I_MAX_CONN) + cxgb4i_max_connect = CXGB4I_MAX_CONN; + + rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, + cxgb4i_max_connect); + if (rc < 0) + return rc; + + cdev->csk_release_offload_resources = release_offload_resources; + cdev->csk_push_tx_frames = push_tx_frames; + cdev->csk_send_abort_req = send_abort_req; + cdev->csk_send_close_req = send_close_req; + cdev->csk_send_rx_credits = send_rx_credits; + cdev->csk_alloc_cpls = alloc_cpls; + cdev->csk_init_act_open = init_act_open; + + pr_info("cdev 0x%p, offload up, added.\n", cdev); + return 0; +} + +static inline void +ulp_mem_io_set_hdr(struct cxgbi_device *cdev, + struct ulp_mem_io *req, + unsigned int wr_len, unsigned int dlen, + unsigned int pm_addr, + int tid) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); + + INIT_ULPTX_WR(req, wr_len, 0, tid); + req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | + FW_WR_ATOMIC_V(0)); + req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | + T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); + req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); + req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); + req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); + + idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); + idata->len = htonl(dlen); +} + +static struct sk_buff * +ddp_ppod_init_idata(struct cxgbi_device *cdev, + struct cxgbi_ppm *ppm, + unsigned int idx, unsigned int npods, + unsigned int tid) +{ + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int dlen = npods << PPOD_SIZE_SHIFT; + unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + + sizeof(struct ulptx_idata) + dlen, 16); + struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); + + if (!skb) { + pr_err("%s: %s idx %u, npods %u, OOM.\n", + __func__, ppm->ndev->name, idx, npods); + return NULL; + } + + ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, + pm_addr, tid); + + return skb; +} + +static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo, + unsigned int idx, unsigned int npods, + struct scatterlist **sg_pp, + unsigned int *sg_off) +{ + struct cxgbi_device *cdev = csk->cdev; + struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, + csk->tid); + struct ulp_mem_io *req; + struct ulptx_idata *idata; + struct cxgbi_pagepod *ppod; + int i; + + if (!skb) + return -ENOMEM; + + req = (struct ulp_mem_io *)skb->head; + idata = (struct ulptx_idata *)(req + 1); + ppod = (struct cxgbi_pagepod *)(idata + 1); + + for (i = 0; i < npods; i++, ppod++) + cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); + + cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); + cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + + spin_lock_bh(&csk->lock); + cxgbi_sock_skb_entail(csk, skb); + spin_unlock_bh(&csk->lock); + + return 0; +} + +static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo) +{ + unsigned int pidx = ttinfo->idx; + unsigned int npods = ttinfo->npods; + unsigned int i, cnt; + int err = 0; + struct scatterlist *sg = ttinfo->sgl; + unsigned int offset = 0; + + ttinfo->cid = csk->port_id; + + for (i = 0; i < npods; i += cnt, pidx += cnt) { + cnt = npods - i; + + if (cnt > ULPMEM_IDATA_MAX_NPPODS) + cnt = ULPMEM_IDATA_MAX_NPPODS; + err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, + &sg, &offset); + if (err < 0) + break; + } + + return err; +} + +static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, + int pg_idx) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + + if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) + return 0; + + skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* set up ulp page size */ + req = (struct cpl_set_tcb_field *)skb->head; + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 8); + req->val = cpu_to_be64(pg_idx << 8); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); + + reinit_completion(&csk->cmpl); + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); + wait_for_completion(&csk->cmpl); + + return csk->err; +} + +static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, + int hcrc, int dcrc) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + + if (!hcrc && !dcrc) + return 0; + + skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + csk->hcrc_len = (hcrc ? 4 : 0); + csk->dcrc_len = (dcrc ? 4 : 0); + /* set up ulp submode */ + req = (struct cpl_set_tcb_field *)skb->head; + INIT_TP_WR(req, tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 4); + req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | + (dcrc ? ULP_CRC_DATA : 0)) << 4); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); + + reinit_completion(&csk->cmpl); + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); + wait_for_completion(&csk->cmpl); + + return csk->err; +} + +static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) +{ + return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) + (cxgbi_cdev_priv(cdev)))->iscsi_ppm); +} + +static int cxgb4i_ddp_init(struct cxgbi_device *cdev) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct net_device *ndev = cdev->ports[0]; + struct cxgbi_tag_format tformat; + int i, err; + + if (!lldi->vr->iscsi.size) { + pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); + return -EACCES; + } + + cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; + + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); + for (i = 0; i < 4; i++) + tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) + & 0xF; + cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); + + pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x", + lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size); + + err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, + lldi->vr->iscsi.size, lldi->iscsi_llimit, + lldi->vr->iscsi.start, 2, + lldi->vr->ppod_edram.start, + lldi->vr->ppod_edram.size); + + if (err < 0) + return err; + + cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; + cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; + cdev->csk_ddp_set_map = ddp_set_map; + cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->cdev2ppm = cdev2ppm; + + return 0; +} + +static bool is_memfree(struct adapter *adap) +{ + u32 io; + + io = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (is_t5(adap->params.chip)) { + if ((io & EXT_MEM0_ENABLE_F) || (io & EXT_MEM1_ENABLE_F)) + return false; + } else if (io & EXT_MEM_ENABLE_F) { + return false; + } + + return true; +} + +static void *t4_uld_add(const struct cxgb4_lld_info *lldi) +{ + struct cxgbi_device *cdev; + struct port_info *pi; + struct net_device *ndev; + struct adapter *adap; + struct tid_info *t; + u32 max_cmds = CXGB4I_SCSI_HOST_QDEPTH; + u32 max_conn = CXGBI_MAX_CONN; + int i, rc; + + cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); + if (!cdev) { + pr_info("t4 device 0x%p, register failed.\n", lldi); + return NULL; + } + pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", + cdev, lldi->adapter_type, lldi->nports, + lldi->ports[0]->name, lldi->nchan, lldi->ntxq, + lldi->nrxq, lldi->wr_cred); + for (i = 0; i < lldi->nrxq; i++) + log_debug(1 << CXGBI_DBG_DEV, + "t4 0x%p, rxq id #%d: %u.\n", + cdev, i, lldi->rxq_ids[i]); + + memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); + cdev->flags = CXGBI_FLAG_DEV_T4; + cdev->pdev = lldi->pdev; + cdev->ports = lldi->ports; + cdev->nports = lldi->nports; + cdev->mtus = lldi->mtus; + cdev->nmtus = NMTUS; + cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= + CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; + cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; + cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); + cdev->itp = &cxgb4i_iscsi_transport; + cdev->owner = THIS_MODULE; + + cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf); + pr_info("cdev 0x%p,%s, pfvf %u.\n", + cdev, lldi->ports[0]->name, cdev->pfvf); + + rc = cxgb4i_ddp_init(cdev); + if (rc) { + pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc); + goto err_out; + } + + ndev = cdev->ports[0]; + adap = netdev2adap(ndev); + if (adap) { + t = &adap->tids; + if (t->ntids <= CXGBI_MAX_CONN) + max_conn = t->ntids; + + if (is_memfree(adap)) { + cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF; + max_cmds = CXGB4I_SCSI_HOST_QDEPTH >> 2; + + pr_info("%s: 0x%p, tid %u, SO adapter.\n", + ndev->name, cdev, t->ntids); + } + } else { + pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev); + } + + /* ISO is enabled in T5/T6 firmware version >= 1.13.43.0 */ + if (!is_t4(lldi->adapter_type) && + (lldi->fw_vers >= 0x10d2b00) && + !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF)) + cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso); + + rc = cxgb4i_ofld_init(cdev); + if (rc) { + pr_info("t4 0x%p ofld init failed.\n", cdev); + goto err_out; + } + + cxgb4i_host_template.can_queue = max_cmds; + rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn, + &cxgb4i_host_template, cxgb4i_stt); + if (rc) + goto err_out; + + for (i = 0; i < cdev->nports; i++) { + pi = netdev_priv(lldi->ports[i]); + cdev->hbas[i]->port_id = pi->port_id; + } + return cdev; + +err_out: + cxgbi_device_unregister(cdev); + return ERR_PTR(-ENOMEM); +} + +#define RX_PULL_LEN 128 +static int t4_uld_rx_handler(void *handle, const __be64 *rsp, + const struct pkt_gl *pgl) +{ + const struct cpl_act_establish *rpl; + struct sk_buff *skb; + unsigned int opc; + struct cxgbi_device *cdev = handle; + + if (pgl == NULL) { + unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; + + skb = alloc_wr(len, 0, GFP_ATOMIC); + if (!skb) + goto nomem; + skb_copy_to_linear_data(skb, &rsp[1], len); + } else { + if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { + pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", + pgl->va, be64_to_cpu(*rsp), + be64_to_cpu(*(u64 *)pgl->va), + pgl->tot_len); + return 0; + } + skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) + goto nomem; + } + + rpl = (struct cpl_act_establish *)skb->data; + opc = rpl->ot.opcode; + log_debug(1 << CXGBI_DBG_TOE, + "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", + cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); + if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) { + pr_err("No handler for opcode 0x%x.\n", opc); + __kfree_skb(skb); + } else + cxgb4i_cplhandlers[opc](cdev, skb); + + return 0; +nomem: + log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); + return 1; +} + +static int t4_uld_state_change(void *handle, enum cxgb4_state state) +{ + struct cxgbi_device *cdev = handle; + + switch (state) { + case CXGB4_STATE_UP: + pr_info("cdev 0x%p, UP.\n", cdev); + break; + case CXGB4_STATE_START_RECOVERY: + pr_info("cdev 0x%p, RECOVERY.\n", cdev); + /* close all connections */ + break; + case CXGB4_STATE_DOWN: + pr_info("cdev 0x%p, DOWN.\n", cdev); + break; + case CXGB4_STATE_DETACH: + pr_info("cdev 0x%p, DETACH.\n", cdev); + cxgbi_device_unregister(cdev); + break; + default: + pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); + break; + } + return 0; +} + +#ifdef CONFIG_CHELSIO_T4_DCB +static int +cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val, + void *data) +{ + int i, port = 0xFF; + struct net_device *ndev; + struct cxgbi_device *cdev = NULL; + struct dcb_app_type *iscsi_app = data; + struct cxgbi_ports_map *pmap; + u8 priority; + + if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { + if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && + (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) + return NOTIFY_DONE; + + priority = iscsi_app->app.priority; + } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { + if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) + return NOTIFY_DONE; + + if (!iscsi_app->app.priority) + return NOTIFY_DONE; + + priority = ffs(iscsi_app->app.priority) - 1; + } else { + return NOTIFY_DONE; + } + + if (iscsi_app->app.protocol != 3260) + return NOTIFY_DONE; + + log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n", + iscsi_app->ifindex, priority); + + ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); + if (!ndev) + return NOTIFY_DONE; + + cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port); + + dev_put(ndev); + if (!cdev) + return NOTIFY_DONE; + + pmap = &cdev->pmap; + + for (i = 0; i < pmap->used; i++) { + if (pmap->port_csk[i]) { + struct cxgbi_sock *csk = pmap->port_csk[i]; + + if (csk->dcb_priority != priority) { + iscsi_conn_failure(csk->user_data, + ISCSI_ERR_CONN_FAILED); + pr_info("Restarting iSCSI connection %p with " + "priority %u->%u.\n", csk, + csk->dcb_priority, priority); + } + } + } + return NOTIFY_OK; +} +#endif + +static int __init cxgb4i_init_module(void) +{ + int rc; + + printk(KERN_INFO "%s", version); + + rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); + if (rc < 0) + return rc; + cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); + +#ifdef CONFIG_CHELSIO_T4_DCB + pr_info("%s dcb enabled.\n", DRV_MODULE_NAME); + register_dcbevent_notifier(&cxgb4_dcb_change); +#endif + return 0; +} + +static void __exit cxgb4i_exit_module(void) +{ +#ifdef CONFIG_CHELSIO_T4_DCB + unregister_dcbevent_notifier(&cxgb4_dcb_change); +#endif + cxgb4_unregister_uld(CXGB4_ULD_ISCSI); + cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); + cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); +} + +module_init(cxgb4i_init_module); +module_exit(cxgb4i_exit_module); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h new file mode 100644 index 000000000..2fd9c76fc --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h @@ -0,0 +1,28 @@ +/* + * cxgb4i.h: Chelsio T4 iSCSI driver. + * + * Copyright (c) 2010-2015 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Written by: Rakesh Ranjan (rranjan@chelsio.com) + */ + +#ifndef __CXGB4I_H__ +#define __CXGB4I_H__ + +#define CXGB4I_SCSI_HOST_QDEPTH 1024 +#define CXGB4I_MAX_CONN 16384 +#define CXGB4I_MAX_TARGET CXGB4I_MAX_CONN +#define CXGB4I_MAX_LUN 0x1000 + +/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ +#define CXGB4I_TX_HEADER_LEN \ + (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) + +#define T5_ISS_VALID (1 << 18) + +#endif /* __CXGB4I_H__ */ diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c new file mode 100644 index 000000000..abde60a50 --- /dev/null +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -0,0 +1,3097 @@ +/* + * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. + * + * Copyright (c) 2010-2015 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Written by: Rakesh Ranjan (rranjan@chelsio.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* ip_dev_find */ +#include +#include + +static unsigned int dbg_level; + +#include "libcxgbi.h" + +#define DRV_MODULE_NAME "libcxgbi" +#define DRV_MODULE_DESC "Chelsio iSCSI driver library" +#define DRV_MODULE_VERSION "0.9.1-ko" +#define DRV_MODULE_RELDATE "Apr. 2015" + +static char version[] = + DRV_MODULE_DESC " " DRV_MODULE_NAME + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Chelsio Communications, Inc."); +MODULE_DESCRIPTION(DRV_MODULE_DESC); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_LICENSE("GPL"); + +module_param(dbg_level, uint, 0644); +MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); + + +/* + * cxgbi device management + * maintains a list of the cxgbi devices + */ +static LIST_HEAD(cdev_list); +static DEFINE_MUTEX(cdev_mutex); + +static LIST_HEAD(cdev_rcu_list); +static DEFINE_SPINLOCK(cdev_rcu_lock); + +static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age) +{ + if (age) + *age = sw_tag & 0x7FFF; + if (idx) + *idx = (sw_tag >> 16) & 0x7FFF; +} + +int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, + unsigned int max_conn) +{ + struct cxgbi_ports_map *pmap = &cdev->pmap; + + pmap->port_csk = kvzalloc(array_size(max_conn, + sizeof(struct cxgbi_sock *)), + GFP_KERNEL | __GFP_NOWARN); + if (!pmap->port_csk) { + pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); + return -ENOMEM; + } + + pmap->max_connect = max_conn; + pmap->sport_base = base; + spin_lock_init(&pmap->lock); + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); + +void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) +{ + struct cxgbi_ports_map *pmap = &cdev->pmap; + struct cxgbi_sock *csk; + int i; + + for (i = 0; i < pmap->max_connect; i++) { + if (pmap->port_csk[i]) { + csk = pmap->port_csk[i]; + pmap->port_csk[i] = NULL; + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p, cdev 0x%p, offload down.\n", + csk, cdev); + spin_lock_bh(&csk->lock); + cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); + cxgbi_sock_closed(csk); + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); + } + } +} +EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); + +static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) +{ + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p# %u.\n", cdev, cdev->nports); + cxgbi_hbas_remove(cdev); + cxgbi_device_portmap_cleanup(cdev); + if (cdev->cdev2ppm) + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); + if (cdev->pmap.max_connect) + kvfree(cdev->pmap.port_csk); + kfree(cdev); +} + +struct cxgbi_device *cxgbi_device_register(unsigned int extra, + unsigned int nports) +{ + struct cxgbi_device *cdev; + + cdev = kzalloc(sizeof(*cdev) + extra + nports * + (sizeof(struct cxgbi_hba *) + + sizeof(struct net_device *)), + GFP_KERNEL); + if (!cdev) { + pr_warn("nport %d, OOM.\n", nports); + return NULL; + } + cdev->ports = (struct net_device **)(cdev + 1); + cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * + sizeof(struct net_device *)); + if (extra) + cdev->dd_data = ((char *)cdev->hbas) + + nports * sizeof(struct cxgbi_hba *); + spin_lock_init(&cdev->pmap.lock); + + mutex_lock(&cdev_mutex); + list_add_tail(&cdev->list_head, &cdev_list); + mutex_unlock(&cdev_mutex); + + spin_lock(&cdev_rcu_lock); + list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); + spin_unlock(&cdev_rcu_lock); + + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p# %u.\n", cdev, nports); + return cdev; +} +EXPORT_SYMBOL_GPL(cxgbi_device_register); + +void cxgbi_device_unregister(struct cxgbi_device *cdev) +{ + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p# %u,%s.\n", + cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); + + mutex_lock(&cdev_mutex); + list_del(&cdev->list_head); + mutex_unlock(&cdev_mutex); + + spin_lock(&cdev_rcu_lock); + list_del_rcu(&cdev->rcu_node); + spin_unlock(&cdev_rcu_lock); + synchronize_rcu(); + + cxgbi_device_destroy(cdev); +} +EXPORT_SYMBOL_GPL(cxgbi_device_unregister); + +void cxgbi_device_unregister_all(unsigned int flag) +{ + struct cxgbi_device *cdev, *tmp; + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + if ((cdev->flags & flag) == flag) { + mutex_unlock(&cdev_mutex); + cxgbi_device_unregister(cdev); + mutex_lock(&cdev_mutex); + } + } + mutex_unlock(&cdev_mutex); +} +EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); + +struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) +{ + struct cxgbi_device *cdev, *tmp; + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + if (cdev->lldev == lldev) { + mutex_unlock(&cdev_mutex); + return cdev; + } + } + mutex_unlock(&cdev_mutex); + + log_debug(1 << CXGBI_DBG_DEV, + "lldev 0x%p, NO match found.\n", lldev); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); + +struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, + int *port) +{ + struct net_device *vdev = NULL; + struct cxgbi_device *cdev, *tmp; + int i; + + if (is_vlan_dev(ndev)) { + vdev = ndev; + ndev = vlan_dev_real_dev(ndev); + log_debug(1 << CXGBI_DBG_DEV, + "vlan dev %s -> %s.\n", vdev->name, ndev->name); + } + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + for (i = 0; i < cdev->nports; i++) { + if (ndev == cdev->ports[i]) { + cdev->hbas[i]->vdev = vdev; + mutex_unlock(&cdev_mutex); + if (port) + *port = i; + return cdev; + } + } + } + mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); + +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, + int *port) +{ + struct net_device *vdev = NULL; + struct cxgbi_device *cdev; + int i; + + if (is_vlan_dev(ndev)) { + vdev = ndev; + ndev = vlan_dev_real_dev(ndev); + pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); + } + + rcu_read_lock(); + list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { + for (i = 0; i < cdev->nports; i++) { + if (ndev == cdev->ports[i]) { + cdev->hbas[i]->vdev = vdev; + rcu_read_unlock(); + if (port) + *port = i; + return cdev; + } + } + } + rcu_read_unlock(); + + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); + +static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, + int *port) +{ + struct net_device *vdev = NULL; + struct cxgbi_device *cdev, *tmp; + int i; + + if (is_vlan_dev(ndev)) { + vdev = ndev; + ndev = vlan_dev_real_dev(ndev); + pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); + } + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + for (i = 0; i < cdev->nports; i++) { + if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, + MAX_ADDR_LEN)) { + cdev->hbas[i]->vdev = vdev; + mutex_unlock(&cdev_mutex); + if (port) + *port = i; + return cdev; + } + } + } + mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match mac found.\n", + ndev, ndev->name); + return NULL; +} + +void cxgbi_hbas_remove(struct cxgbi_device *cdev) +{ + int i; + struct cxgbi_hba *chba; + + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p#%u.\n", cdev, cdev->nports); + + for (i = 0; i < cdev->nports; i++) { + chba = cdev->hbas[i]; + if (chba) { + cdev->hbas[i] = NULL; + iscsi_host_remove(chba->shost, false); + pci_dev_put(cdev->pdev); + iscsi_host_free(chba->shost); + } + } +} +EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); + +int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, + unsigned int max_conns, const struct scsi_host_template *sht, + struct scsi_transport_template *stt) +{ + struct cxgbi_hba *chba; + struct Scsi_Host *shost; + int i, err; + + log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); + + for (i = 0; i < cdev->nports; i++) { + shost = iscsi_host_alloc(sht, sizeof(*chba), 1); + if (!shost) { + pr_info("0x%p, p%d, %s, host alloc failed.\n", + cdev, i, cdev->ports[i]->name); + err = -ENOMEM; + goto err_out; + } + + shost->transportt = stt; + shost->max_lun = max_lun; + shost->max_id = max_conns - 1; + shost->max_channel = 0; + shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; + + chba = iscsi_host_priv(shost); + chba->cdev = cdev; + chba->ndev = cdev->ports[i]; + chba->shost = shost; + + shost->can_queue = sht->can_queue - ISCSI_MGMT_CMDS_MAX; + + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p#%d %s: chba 0x%p.\n", + cdev, i, cdev->ports[i]->name, chba); + + pci_dev_get(cdev->pdev); + err = iscsi_host_add(shost, &cdev->pdev->dev); + if (err) { + pr_info("cdev 0x%p, p#%d %s, host add failed.\n", + cdev, i, cdev->ports[i]->name); + pci_dev_put(cdev->pdev); + scsi_host_put(shost); + goto err_out; + } + + cdev->hbas[i] = chba; + } + + return 0; + +err_out: + cxgbi_hbas_remove(cdev); + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_hbas_add); + +/* + * iSCSI offload + * + * - source port management + * To find a free source port in the port allocation map we use a very simple + * rotor scheme to look for the next free port. + * + * If a source port has been specified make sure that it doesn't collide with + * our normal source port allocation map. If it's outside the range of our + * allocation/deallocation scheme just let them use it. + * + * If the source port is outside our allocation range, the caller is + * responsible for keeping track of their port usage. + */ + +static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev, + unsigned char port_id) +{ + struct cxgbi_ports_map *pmap = &cdev->pmap; + unsigned int i; + unsigned int used; + + if (!pmap->max_connect || !pmap->used) + return NULL; + + spin_lock_bh(&pmap->lock); + used = pmap->used; + for (i = 0; used && i < pmap->max_connect; i++) { + struct cxgbi_sock *csk = pmap->port_csk[i]; + + if (csk) { + if (csk->port_id == port_id) { + spin_unlock_bh(&pmap->lock); + return csk; + } + used--; + } + } + spin_unlock_bh(&pmap->lock); + + return NULL; +} + +static int sock_get_port(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_ports_map *pmap = &cdev->pmap; + unsigned int start; + int idx; + __be16 *port; + + if (!pmap->max_connect) { + pr_err("cdev 0x%p, p#%u %s, NO port map.\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name); + return -EADDRNOTAVAIL; + } + + if (csk->csk_family == AF_INET) + port = &csk->saddr.sin_port; + else /* ipv6 */ + port = &csk->saddr6.sin6_port; + + if (*port) { + pr_err("source port NON-ZERO %u.\n", + ntohs(*port)); + return -EADDRINUSE; + } + + spin_lock_bh(&pmap->lock); + if (pmap->used >= pmap->max_connect) { + spin_unlock_bh(&pmap->lock); + pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name); + return -EADDRNOTAVAIL; + } + + start = idx = pmap->next; + do { + if (++idx >= pmap->max_connect) + idx = 0; + if (!pmap->port_csk[idx]) { + pmap->used++; + *port = htons(pmap->sport_base + idx); + pmap->next = idx; + pmap->port_csk[idx] = csk; + spin_unlock_bh(&pmap->lock); + cxgbi_sock_get(csk); + log_debug(1 << CXGBI_DBG_SOCK, + "cdev 0x%p, p#%u %s, p %u, %u.\n", + cdev, csk->port_id, + cdev->ports[csk->port_id]->name, + pmap->sport_base + idx, pmap->next); + return 0; + } + } while (idx != start); + spin_unlock_bh(&pmap->lock); + + /* should not happen */ + pr_warn("cdev 0x%p, p#%u %s, next %u?\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name, + pmap->next); + return -EADDRNOTAVAIL; +} + +static void sock_put_port(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_ports_map *pmap = &cdev->pmap; + __be16 *port; + + if (csk->csk_family == AF_INET) + port = &csk->saddr.sin_port; + else /* ipv6 */ + port = &csk->saddr6.sin6_port; + + if (*port) { + int idx = ntohs(*port) - pmap->sport_base; + + *port = 0; + if (idx < 0 || idx >= pmap->max_connect) { + pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", + cdev, csk->port_id, + cdev->ports[csk->port_id]->name, + ntohs(*port)); + return; + } + + spin_lock_bh(&pmap->lock); + pmap->port_csk[idx] = NULL; + pmap->used--; + spin_unlock_bh(&pmap->lock); + + log_debug(1 << CXGBI_DBG_SOCK, + "cdev 0x%p, p#%u %s, release %u.\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name, + pmap->sport_base + idx); + + cxgbi_sock_put(csk); + } +} + +/* + * iscsi tcp connection + */ +void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) +{ + if (csk->cpl_close) { + kfree_skb(csk->cpl_close); + csk->cpl_close = NULL; + } + if (csk->cpl_abort_req) { + kfree_skb(csk->cpl_abort_req); + csk->cpl_abort_req = NULL; + } + if (csk->cpl_abort_rpl) { + kfree_skb(csk->cpl_abort_rpl); + csk->cpl_abort_rpl = NULL; + } +} +EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); + +static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) +{ + struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); + + if (!csk) { + pr_info("alloc csk %zu failed.\n", sizeof(*csk)); + return NULL; + } + + if (cdev->csk_alloc_cpls(csk) < 0) { + pr_info("csk 0x%p, alloc cpls failed.\n", csk); + kfree(csk); + return NULL; + } + + spin_lock_init(&csk->lock); + kref_init(&csk->refcnt); + skb_queue_head_init(&csk->receive_queue); + skb_queue_head_init(&csk->write_queue); + timer_setup(&csk->retry_timer, NULL, 0); + init_completion(&csk->cmpl); + rwlock_init(&csk->callback_lock); + csk->cdev = cdev; + csk->flags = 0; + cxgbi_sock_set_state(csk, CTP_CLOSED); + + log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); + + return csk; +} + +static struct rtable *find_route_ipv4(struct flowi4 *fl4, + __be32 saddr, __be32 daddr, + __be16 sport, __be16 dport, u8 tos, + int ifindex) +{ + struct rtable *rt; + + rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, + dport, sport, IPPROTO_TCP, tos, ifindex); + if (IS_ERR(rt)) + return NULL; + + return rt; +} + +static struct cxgbi_sock * +cxgbi_check_route(struct sockaddr *dst_addr, int ifindex) +{ + struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; + struct dst_entry *dst; + struct net_device *ndev; + struct cxgbi_device *cdev; + struct rtable *rt = NULL; + struct neighbour *n; + struct flowi4 fl4; + struct cxgbi_sock *csk = NULL; + unsigned int mtu = 0; + int port = 0xFFFF; + int err = 0; + + rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, + daddr->sin_port, 0, ifindex); + if (!rt) { + pr_info("no route to ipv4 0x%x, port %u.\n", + be32_to_cpu(daddr->sin_addr.s_addr), + be16_to_cpu(daddr->sin_port)); + err = -ENETUNREACH; + goto err_out; + } + dst = &rt->dst; + n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); + if (!n) { + err = -ENODEV; + goto rel_rt; + } + ndev = n->dev; + + if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { + pr_info("multi-cast route %pI4, port %u, dev %s.\n", + &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), + ndev->name); + err = -ENETUNREACH; + goto rel_neigh; + } + + if (ndev->flags & IFF_LOOPBACK) { + ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); + if (!ndev) { + err = -ENETUNREACH; + goto rel_neigh; + } + mtu = ndev->mtu; + pr_info("rt dev %s, loopback -> %s, mtu %u.\n", + n->dev->name, ndev->name, mtu); + } + + if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { + pr_info("%s interface not up.\n", ndev->name); + err = -ENETDOWN; + goto rel_neigh; + } + + cdev = cxgbi_device_find_by_netdev(ndev, &port); + if (!cdev) + cdev = cxgbi_device_find_by_mac(ndev, &port); + if (!cdev) { + pr_info("dst %pI4, %s, NOT cxgbi device.\n", + &daddr->sin_addr.s_addr, ndev->name); + err = -ENETUNREACH; + goto rel_neigh; + } + log_debug(1 << CXGBI_DBG_SOCK, + "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", + &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), + port, ndev->name, cdev); + + csk = cxgbi_sock_create(cdev); + if (!csk) { + err = -ENOMEM; + goto rel_neigh; + } + csk->cdev = cdev; + csk->port_id = port; + csk->mtu = mtu; + csk->dst = dst; + + csk->csk_family = AF_INET; + csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; + csk->daddr.sin_port = daddr->sin_port; + csk->daddr.sin_family = daddr->sin_family; + csk->saddr.sin_family = daddr->sin_family; + csk->saddr.sin_addr.s_addr = fl4.saddr; + neigh_release(n); + + return csk; + +rel_neigh: + neigh_release(n); + +rel_rt: + ip_rt_put(rt); +err_out: + return ERR_PTR(err); +} + +#if IS_ENABLED(CONFIG_IPV6) +static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, + const struct in6_addr *daddr, + int ifindex) +{ + struct flowi6 fl; + + memset(&fl, 0, sizeof(fl)); + fl.flowi6_oif = ifindex; + if (saddr) + memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); + if (daddr) + memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); + return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); +} + +static struct cxgbi_sock * +cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex) +{ + struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; + struct dst_entry *dst; + struct net_device *ndev; + struct cxgbi_device *cdev; + struct rt6_info *rt = NULL; + struct neighbour *n; + struct in6_addr pref_saddr; + struct cxgbi_sock *csk = NULL; + unsigned int mtu = 0; + int port = 0xFFFF; + int err = 0; + + rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex); + + if (!rt) { + pr_info("no route to ipv6 %pI6 port %u\n", + daddr6->sin6_addr.s6_addr, + be16_to_cpu(daddr6->sin6_port)); + err = -ENETUNREACH; + goto err_out; + } + + dst = &rt->dst; + + n = dst_neigh_lookup(dst, &daddr6->sin6_addr); + + if (!n) { + pr_info("%pI6, port %u, dst no neighbour.\n", + daddr6->sin6_addr.s6_addr, + be16_to_cpu(daddr6->sin6_port)); + err = -ENETUNREACH; + goto rel_rt; + } + ndev = n->dev; + + if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { + pr_info("%s interface not up.\n", ndev->name); + err = -ENETDOWN; + goto rel_rt; + } + + if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) { + pr_info("multi-cast route %pI6 port %u, dev %s.\n", + daddr6->sin6_addr.s6_addr, + ntohs(daddr6->sin6_port), ndev->name); + err = -ENETUNREACH; + goto rel_rt; + } + + cdev = cxgbi_device_find_by_netdev(ndev, &port); + if (!cdev) + cdev = cxgbi_device_find_by_mac(ndev, &port); + if (!cdev) { + pr_info("dst %pI6 %s, NOT cxgbi device.\n", + daddr6->sin6_addr.s6_addr, ndev->name); + err = -ENETUNREACH; + goto rel_rt; + } + log_debug(1 << CXGBI_DBG_SOCK, + "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", + daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, + ndev->name, cdev); + + csk = cxgbi_sock_create(cdev); + if (!csk) { + err = -ENOMEM; + goto rel_rt; + } + csk->cdev = cdev; + csk->port_id = port; + csk->mtu = mtu; + csk->dst = dst; + + rt6_get_prefsrc(rt, &pref_saddr); + if (ipv6_addr_any(&pref_saddr)) { + struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); + + err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, + &daddr6->sin6_addr, 0, &pref_saddr); + if (err) { + pr_info("failed to get source address to reach %pI6\n", + &daddr6->sin6_addr); + goto rel_rt; + } + } + + csk->csk_family = AF_INET6; + csk->daddr6.sin6_addr = daddr6->sin6_addr; + csk->daddr6.sin6_port = daddr6->sin6_port; + csk->daddr6.sin6_family = daddr6->sin6_family; + csk->saddr6.sin6_family = daddr6->sin6_family; + csk->saddr6.sin6_addr = pref_saddr; + + neigh_release(n); + return csk; + +rel_rt: + if (n) + neigh_release(n); + + ip6_rt_put(rt); + if (csk) + cxgbi_sock_closed(csk); +err_out: + return ERR_PTR(err); +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, + unsigned int opt) +{ + csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; + dst_confirm(csk->dst); + smp_mb(); + cxgbi_sock_set_state(csk, CTP_ESTABLISHED); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_established); + +static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", + csk, csk->state, csk->flags, csk->user_data); + + if (csk->state != CTP_ESTABLISHED) { + read_lock_bh(&csk->callback_lock); + if (csk->user_data) + iscsi_conn_failure(csk->user_data, + ISCSI_ERR_TCP_CONN_CLOSE); + read_unlock_bh(&csk->callback_lock); + } +} + +void cxgbi_sock_closed(struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); + if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) + return; + if (csk->saddr.sin_port) + sock_put_port(csk); + if (csk->dst) + dst_release(csk->dst); + csk->cdev->csk_release_offload_resources(csk); + cxgbi_sock_set_state(csk, CTP_CLOSED); + cxgbi_inform_iscsi_conn_closing(csk); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_closed); + +static void need_active_close(struct cxgbi_sock *csk) +{ + int data_lost; + int close_req = 0; + + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + spin_lock_bh(&csk->lock); + if (csk->dst) + dst_confirm(csk->dst); + data_lost = skb_queue_len(&csk->receive_queue); + __skb_queue_purge(&csk->receive_queue); + + if (csk->state == CTP_ACTIVE_OPEN) + cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); + else if (csk->state == CTP_ESTABLISHED) { + close_req = 1; + cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); + } else if (csk->state == CTP_PASSIVE_CLOSE) { + close_req = 1; + cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); + } + + if (close_req) { + if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) || + data_lost) + csk->cdev->csk_send_abort_req(csk); + else + csk->cdev->csk_send_close_req(csk); + } + + spin_unlock_bh(&csk->lock); +} + +void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) +{ + pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", + csk, csk->state, csk->flags, + &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, + &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, + errno); + + cxgbi_sock_set_state(csk, CTP_CONNECTING); + csk->err = errno; + cxgbi_sock_closed(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); + +void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) +{ + struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; + struct module *owner = csk->cdev->owner; + + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + if (csk->state == CTP_ACTIVE_OPEN) + cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); + __kfree_skb(skb); + + module_put(owner); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); + +void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) +{ + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); + if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { + cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); + if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) + pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_closed(csk); + } + + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); + +void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) + goto done; + + switch (csk->state) { + case CTP_ESTABLISHED: + cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); + break; + case CTP_ACTIVE_CLOSE: + cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); + break; + case CTP_CLOSE_WAIT_1: + cxgbi_sock_closed(csk); + break; + case CTP_ABORTING: + break; + default: + pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + } + cxgbi_inform_iscsi_conn_closing(csk); +done: + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); + +void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) +{ + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + csk->snd_una = snd_nxt - 1; + if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) + goto done; + + switch (csk->state) { + case CTP_ACTIVE_CLOSE: + cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); + break; + case CTP_CLOSE_WAIT_1: + case CTP_CLOSE_WAIT_2: + cxgbi_sock_closed(csk); + break; + case CTP_ABORTING: + break; + default: + pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + } +done: + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); + +void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, + unsigned int snd_una, int seq_chk) +{ + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", + csk, csk->state, csk->flags, csk->tid, credits, + csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); + + spin_lock_bh(&csk->lock); + + csk->wr_cred += credits; + if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) + csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; + + while (credits) { + struct sk_buff *p = cxgbi_sock_peek_wr(csk); + + if (unlikely(!p)) { + pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", + csk, csk->state, csk->flags, csk->tid, credits, + csk->wr_cred, csk->wr_una_cred); + break; + } + + if (unlikely(credits < p->csum)) { + pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", + csk, csk->state, csk->flags, csk->tid, + credits, csk->wr_cred, csk->wr_una_cred, + p->csum); + p->csum -= credits; + break; + } else { + cxgbi_sock_dequeue_wr(csk); + credits -= p->csum; + kfree_skb(p); + } + } + + cxgbi_sock_check_wr_invariants(csk); + + if (seq_chk) { + if (unlikely(before(snd_una, csk->snd_una))) { + pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", + csk, csk->state, csk->flags, csk->tid, snd_una, + csk->snd_una); + goto done; + } + + if (csk->snd_una != snd_una) { + csk->snd_una = snd_una; + dst_confirm(csk->dst); + } + } + + if (skb_queue_len(&csk->write_queue)) { + if (csk->cdev->csk_push_tx_frames(csk, 0)) + cxgbi_conn_tx_open(csk); + } else + cxgbi_conn_tx_open(csk); +done: + spin_unlock_bh(&csk->lock); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); + +static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, + unsigned short mtu) +{ + int i = 0; + + while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) + ++i; + + return i; +} + +unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) +{ + unsigned int idx; + struct dst_entry *dst = csk->dst; + + csk->advmss = dst_metric_advmss(dst); + + if (csk->advmss > pmtu - 40) + csk->advmss = pmtu - 40; + if (csk->advmss < csk->cdev->mtus[0] - 40) + csk->advmss = csk->cdev->mtus[0] - 40; + idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); + + return idx; +} +EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); + +void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) +{ + cxgbi_skcb_tcp_seq(skb) = csk->write_seq; + __skb_queue_tail(&csk->write_queue, skb); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); + +void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) +{ + struct sk_buff *skb; + + while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) + kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); + +void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) +{ + int pending = cxgbi_sock_count_pending_wrs(csk); + + if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) + pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", + csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); + +static inline void +scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, + unsigned int *sgcnt, unsigned int *dlen, + unsigned int prot) +{ + struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb; + + *sgl = sdb->table.sgl; + *sgcnt = sdb->table.nents; + *dlen = sdb->length; + /* Caution: for protection sdb, sdb->length is invalid */ +} + +void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod, + struct cxgbi_task_tag_info *ttinfo, + struct scatterlist **sg_pp, unsigned int *sg_off) +{ + struct scatterlist *sg = sg_pp ? *sg_pp : NULL; + unsigned int offset = sg_off ? *sg_off : 0; + dma_addr_t addr = 0UL; + unsigned int len = 0; + int i; + + memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); + + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } + + for (i = 0; i < PPOD_PAGES_MAX; i++) { + if (sg) { + ppod->addr[i] = cpu_to_be64(addr + offset); + offset += PAGE_SIZE; + if (offset == (len + sg->offset)) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } + } + } else { + ppod->addr[i] = 0ULL; + } + } + + /* + * the fifth address needs to be repeated in the next ppod, so do + * not move sg + */ + if (sg_pp) { + *sg_pp = sg; + *sg_off = offset; + } + + if (offset == len) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } + } + ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; +} +EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod); + +/* + * APIs interacting with open-iscsi libraries + */ + +int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, + struct cxgbi_tag_format *tformat, + unsigned int iscsi_size, unsigned int llimit, + unsigned int start, unsigned int rsvd_factor, + unsigned int edram_start, unsigned int edram_size) +{ + int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev, + cdev->lldev, tformat, iscsi_size, llimit, start, + rsvd_factor, edram_start, edram_size); + + if (err >= 0) { + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); + + if (ppm->ppmax < 1024 || + ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) + cdev->flags |= CXGBI_FLAG_DDP_OFF; + err = 0; + } else { + cdev->flags |= CXGBI_FLAG_DDP_OFF; + } + + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup); + +static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents) +{ + int i; + int last_sgidx = nents - 1; + struct scatterlist *sg = sgl; + + for (i = 0; i < nents; i++, sg = sg_next(sg)) { + unsigned int len = sg->length + sg->offset; + + if ((sg->offset & 0x3) || (i && sg->offset) || + ((i != last_sgidx) && len != PAGE_SIZE)) { + log_debug(1 << CXGBI_DBG_DDP, + "sg %u/%u, %u,%u, not aligned.\n", + i, nents, sg->offset, sg->length); + goto err_out; + } + } + return 0; +err_out: + return -EINVAL; +} + +static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn, + struct cxgbi_task_data *tdata, u32 sw_tag, + unsigned int xferlen) +{ + struct cxgbi_sock *csk = cconn->cep->csk; + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + struct scatterlist *sgl = ttinfo->sgl; + unsigned int sgcnt = ttinfo->nents; + unsigned int sg_offset = sgl->offset; + int err; + + if (cdev->flags & CXGBI_FLAG_DDP_OFF) { + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p DDP off.\n", cdev); + return -EINVAL; + } + + if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt || + ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) { + log_debug(1 << CXGBI_DBG_DDP, + "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", + ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX, + xferlen, ttinfo->nents); + return -EINVAL; + } + + /* make sure the buffer is suitable for ddp */ + if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0) + return -EINVAL; + + ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> + PAGE_SHIFT; + + /* + * the ddp tag will be used for the itt in the outgoing pdu, + * the itt genrated by libiscsi is saved in the ppm and can be + * retrieved via the ddp tag + */ + err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, + &ttinfo->tag, (unsigned long)sw_tag); + if (err < 0) { + cconn->ddp_full++; + return err; + } + ttinfo->npods = err; + + /* setup dma from scsi command sgl */ + sgl->offset = 0; + err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + sgl->offset = sg_offset; + if (err == 0) { + pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", + __func__, sw_tag, xferlen, sgcnt); + goto rel_ppods; + } + if (err != ttinfo->nr_pages) { + log_debug(1 << CXGBI_DBG_DDP, + "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n", + __func__, sw_tag, xferlen, sgcnt, err); + } + + ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED; + ttinfo->cid = csk->port_id; + + cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, + xferlen, &ttinfo->hdr); + + if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) { + /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */ + ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID; + } else { + /* write ppod from control queue now */ + err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); + if (err < 0) + goto rel_ppods; + } + + return 0; + +rel_ppods: + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + + if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) { + ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED; + dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + } + return -EINVAL; +} + +static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) +{ + struct scsi_cmnd *sc = task->sc; + struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + u32 tag = ntohl((__force u32)hdr_itt); + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, task 0x%p, release tag 0x%x.\n", + cdev, task, tag); + if (sc && sc->sc_data_direction == DMA_FROM_DEVICE && + cxgbi_ppm_is_ddp_tag(ppm, tag)) { + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + + if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ)) + cdev->csk_ddp_clear_map(cdev, ppm, ttinfo); + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, + DMA_FROM_DEVICE); + } +} + +static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age) +{ + /* assume idx and age both are < 0x7FFF (32767) */ + return (idx << 16) | age; +} + +static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) +{ + struct scsi_cmnd *sc = task->sc; + struct iscsi_conn *conn = task->conn; + struct iscsi_session *sess = conn->session; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age); + u32 tag = 0; + int err = -EINVAL; + + if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) { + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + + scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents, + &tdata->dlen, 0); + err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen); + if (!err) + tag = ttinfo->tag; + else + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", + cconn->cep->csk, task, tdata->dlen, + ttinfo->nents); + } + + if (err < 0) { + err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag); + if (err < 0) + return err; + } + /* the itt need to sent in big-endian order */ + *hdr_itt = (__force itt_t)htonl(tag); + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", + cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); + return 0; +} + +void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + u32 tag = ntohl((__force u32)itt); + u32 sw_bits; + + if (ppm) { + if (cxgbi_ppm_is_ddp_tag(ppm, tag)) + sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag); + else + sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag); + } else { + sw_bits = tag; + } + + cxgbi_decode_sw_tag(sw_bits, idx, age); + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", + cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, + age ? *age : 0xFF); +} +EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); + +void cxgbi_conn_tx_open(struct cxgbi_sock *csk) +{ + struct iscsi_conn *conn = csk->user_data; + + if (conn) { + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p, cid %d.\n", csk, conn->id); + iscsi_conn_queue_xmit(conn); + } +} +EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); + +/* + * pdu receive, interact with libiscsi_tcp + */ +static inline int read_pdu_skb(struct iscsi_conn *conn, + struct sk_buff *skb, + unsigned int offset, + int offloaded) +{ + int status = 0; + int bytes_read; + + bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); + switch (status) { + case ISCSI_TCP_CONN_ERR: + pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", + skb, offset, offloaded); + return -EIO; + case ISCSI_TCP_SUSPENDED: + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", + skb, offset, offloaded, bytes_read); + /* no transfer - just have caller flush queue */ + return bytes_read; + case ISCSI_TCP_SKB_DONE: + pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", + skb, offset, offloaded); + /* + * pdus should always fit in the skb and we should get + * segment done notifcation. + */ + iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); + return -EFAULT; + case ISCSI_TCP_SEGMENT_DONE: + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", + skb, offset, offloaded, bytes_read); + return bytes_read; + default: + pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", + skb, offset, offloaded, status); + return -EINVAL; + } +} + +static int +skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn, + struct sk_buff *skb) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + int err; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", + conn, skb, skb->len, cxgbi_skcb_flags(skb)); + + if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { + pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); + iscsi_conn_failure(conn, ISCSI_ERR_PROTO); + return -EIO; + } + + if (conn->hdrdgst_en && + cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { + pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); + iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); + return -EIO; + } + + if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) && + cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) { + /* If completion flag is set and data is directly + * placed in to the host memory then update + * task->exp_datasn to the datasn in completion + * iSCSI hdr as T6 adapter generates completion only + * for the last pdu of a sequence. + */ + itt_t itt = ((struct iscsi_data *)skb->data)->itt; + struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt); + u32 data_sn = be32_to_cpu(((struct iscsi_data *) + skb->data)->datasn); + if (task && task->sc) { + struct iscsi_tcp_task *tcp_task = task->dd_data; + + tcp_task->exp_datasn = data_sn; + } + } + + err = read_pdu_skb(conn, skb, 0, 0); + if (likely(err >= 0)) { + struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data; + u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK; + + if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP)) + cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD); + } + + return err; +} + +static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, + struct sk_buff *skb, unsigned int offset) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + bool offloaded = 0; + int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", + conn, skb, skb->len, cxgbi_skcb_flags(skb)); + + if (conn->datadgst_en && + cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { + pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", + conn, lskb, cxgbi_skcb_flags(lskb)); + iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); + return -EIO; + } + + if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) + return 0; + + /* coalesced, add header digest length */ + if (lskb == skb && conn->hdrdgst_en) + offset += ISCSI_DIGEST_SIZE; + + if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) + offloaded = 1; + + if (opcode == ISCSI_OP_SCSI_DATA_IN) + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", + skb, opcode, ntohl(tcp_conn->in.hdr->itt), + tcp_conn->in.datalen, offloaded ? "is" : "not"); + + return read_pdu_skb(conn, skb, offset, offloaded); +} + +static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) +{ + struct cxgbi_device *cdev = csk->cdev; + int must_send; + u32 credits; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", + csk, csk->state, csk->flags, csk->tid, csk->copied_seq, + csk->rcv_wup, cdev->rx_credit_thres, + csk->rcv_win); + + if (!cdev->rx_credit_thres) + return; + + if (csk->state != CTP_ESTABLISHED) + return; + + credits = csk->copied_seq - csk->rcv_wup; + if (unlikely(!credits)) + return; + must_send = credits + 16384 >= csk->rcv_win; + if (must_send || credits >= cdev->rx_credit_thres) + csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); +} + +void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct iscsi_conn *conn = csk->user_data; + struct sk_buff *skb; + unsigned int read = 0; + int err = 0; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, conn 0x%p.\n", csk, conn); + + if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n", + csk, conn, conn ? conn->id : 0xFF, + conn ? conn->flags : 0xFF); + return; + } + + while (!err) { + skb = skb_peek(&csk->receive_queue); + if (!skb || + !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { + if (skb) + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, NOT ready 0x%lx.\n", + skb, cxgbi_skcb_flags(skb)); + break; + } + __skb_unlink(skb, &csk->receive_queue); + + read += cxgbi_skcb_rx_pdulen(skb); + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", + csk, skb, skb->len, cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + + if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { + err = skb_read_pdu_bhs(csk, conn, skb); + if (err < 0) { + pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + goto skb_done; + } + err = skb_read_pdu_data(conn, skb, skb, + err + cdev->skb_rx_extra); + if (err < 0) + pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + } else { + err = skb_read_pdu_bhs(csk, conn, skb); + if (err < 0) { + pr_err("bhs, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + goto skb_done; + } + + if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { + struct sk_buff *dskb; + + dskb = skb_peek(&csk->receive_queue); + if (!dskb) { + pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," + " plen %u, NO data.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + err = -EIO; + goto skb_done; + } + __skb_unlink(dskb, &csk->receive_queue); + + err = skb_read_pdu_data(conn, skb, dskb, 0); + if (err < 0) + pr_err("data, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u, dskb 0x%p," + "%u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb), + dskb, dskb->len); + __kfree_skb(dskb); + } else + err = skb_read_pdu_data(conn, skb, skb, 0); + } +skb_done: + __kfree_skb(skb); + + if (err < 0) + break; + } + + log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); + if (read) { + csk->copied_seq += read; + csk_return_rx_credits(csk, read); + conn->rxdata_octets += read; + } + + if (err < 0) { + pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", + csk, conn, err, read); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + } +} +EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); + +static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, + unsigned int offset, unsigned int *off, + struct scatterlist **sgp) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, sgcnt, i) { + if (offset < sg->length) { + *off = offset; + *sgp = sg; + return 0; + } + offset -= sg->length; + } + return -EFAULT; +} + +static int +sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, + unsigned int dlen, struct page_frag *frags, + int frag_max, u32 *dlimit) +{ + unsigned int datalen = dlen; + unsigned int sglen = sg->length - sgoffset; + struct page *page = sg_page(sg); + int i; + + i = 0; + do { + unsigned int copy; + + if (!sglen) { + sg = sg_next(sg); + if (!sg) { + pr_warn("sg %d NULL, len %u/%u.\n", + i, datalen, dlen); + return -EINVAL; + } + sgoffset = 0; + sglen = sg->length; + page = sg_page(sg); + + } + copy = min(datalen, sglen); + if (i && page == frags[i - 1].page && + sgoffset + sg->offset == + frags[i - 1].offset + frags[i - 1].size) { + frags[i - 1].size += copy; + } else { + if (i >= frag_max) { + pr_warn("too many pages %u, dlen %u.\n", + frag_max, dlen); + *dlimit = dlen - datalen; + return -EINVAL; + } + + frags[i].page = page; + frags[i].offset = sg->offset + sgoffset; + frags[i].size = copy; + i++; + } + datalen -= copy; + sgoffset += copy; + sglen -= copy; + } while (datalen); + + return i; +} + +static void cxgbi_task_data_sgl_check(struct iscsi_task *task) +{ + struct scsi_cmnd *sc = task->sc; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct scatterlist *sg, *sgl = NULL; + u32 sgcnt = 0; + int i; + + tdata->flags = CXGBI_TASK_SGL_CHECKED; + if (!sc) + return; + + scmd_get_params(sc, &sgl, &sgcnt, &tdata->dlen, 0); + if (!sgl || !sgcnt) { + tdata->flags |= CXGBI_TASK_SGL_COPY; + return; + } + + for_each_sg(sgl, sg, sgcnt, i) { + if (page_count(sg_page(sg)) < 1) { + tdata->flags |= CXGBI_TASK_SGL_COPY; + return; + } + } +} + +static int +cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count, + u32 *dlimit) +{ + struct scsi_cmnd *sc = task->sc; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct scatterlist *sgl = NULL; + struct scatterlist *sg; + u32 dlen = 0; + u32 sgcnt; + int err; + + if (!sc) + return 0; + + scmd_get_params(sc, &sgl, &sgcnt, &dlen, 0); + if (!sgl || !sgcnt) + return 0; + + err = sgl_seek_offset(sgl, sgcnt, offset, &tdata->sgoffset, &sg); + if (err < 0) { + pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n", + sgcnt, offset, tdata->dlen); + return err; + } + err = sgl_read_to_frags(sg, tdata->sgoffset, count, + tdata->frags, MAX_SKB_FRAGS, dlimit); + if (err < 0) { + log_debug(1 << CXGBI_DBG_ISCSI, + "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n", + sgcnt, offset, count, tdata->dlen, *dlimit); + return err; + } + tdata->offset = offset; + tdata->count = count; + tdata->nr_frags = err; + tdata->total_count = count; + tdata->total_offset = offset; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "%s: offset %u, count %u,\n" + "err %u, total_count %u, total_offset %u\n", + __func__, offset, count, err, tdata->total_count, tdata->total_offset); + + return 0; +} + +int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = task->conn->session; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct scsi_cmnd *sc = task->sc; + u32 headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; + u32 max_txdata_len = conn->max_xmit_dlength; + u32 iso_tx_rsvd = 0, local_iso_info = 0; + u32 last_tdata_offset, last_tdata_count; + int err = 0; + + if (!tcp_task) { + pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n", + task, tcp_task, tdata); + return -ENOMEM; + } + if (!csk) { + pr_err("task 0x%p, csk gone.\n", task); + return -EPIPE; + } + + op &= ISCSI_OPCODE_MASK; + + tcp_task->dd_data = tdata; + task->hdr = NULL; + + last_tdata_count = tdata->count; + last_tdata_offset = tdata->offset; + + if ((op == ISCSI_OP_SCSI_DATA_OUT) || + ((op == ISCSI_OP_SCSI_CMD) && + (sc->sc_data_direction == DMA_TO_DEVICE))) { + u32 remaining_data_tosend, dlimit = 0; + u32 max_pdu_size, max_num_pdu, num_pdu; + u32 count; + + /* Preserve conn->max_xmit_dlength because it can get updated to + * ISO data size. + */ + if (task->state == ISCSI_TASK_PENDING) + tdata->max_xmit_dlength = conn->max_xmit_dlength; + + if (!tdata->offset) + cxgbi_task_data_sgl_check(task); + + remaining_data_tosend = + tdata->dlen - tdata->offset - tdata->count; + +recalculate_sgl: + max_txdata_len = tdata->max_xmit_dlength; + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "tdata->dlen %u, remaining to send %u " + "conn->max_xmit_dlength %u, " + "tdata->max_xmit_dlength %u\n", + tdata->dlen, remaining_data_tosend, + conn->max_xmit_dlength, tdata->max_xmit_dlength); + + if (cdev->skb_iso_txhdr && !csk->disable_iso && + (remaining_data_tosend > tdata->max_xmit_dlength) && + !(remaining_data_tosend % 4)) { + u32 max_iso_data; + + if ((op == ISCSI_OP_SCSI_CMD) && + session->initial_r2t_en) + goto no_iso; + + max_pdu_size = tdata->max_xmit_dlength + + ISCSI_PDU_NONPAYLOAD_LEN; + max_iso_data = rounddown(CXGBI_MAX_ISO_DATA_IN_SKB, + csk->advmss); + max_num_pdu = max_iso_data / max_pdu_size; + + num_pdu = (remaining_data_tosend + + tdata->max_xmit_dlength - 1) / + tdata->max_xmit_dlength; + + if (num_pdu > max_num_pdu) + num_pdu = max_num_pdu; + + conn->max_xmit_dlength = tdata->max_xmit_dlength * num_pdu; + max_txdata_len = conn->max_xmit_dlength; + iso_tx_rsvd = cdev->skb_iso_txhdr; + local_iso_info = sizeof(struct cxgbi_iso_info); + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "max_pdu_size %u, max_num_pdu %u, " + "max_txdata %u, num_pdu %u\n", + max_pdu_size, max_num_pdu, + max_txdata_len, num_pdu); + } +no_iso: + count = min_t(u32, max_txdata_len, remaining_data_tosend); + err = cxgbi_task_data_sgl_read(task, + tdata->offset + tdata->count, + count, &dlimit); + if (unlikely(err < 0)) { + log_debug(1 << CXGBI_DBG_ISCSI, + "task 0x%p, tcp_task 0x%p, tdata 0x%p, " + "sgl err %d, count %u, dlimit %u\n", + task, tcp_task, tdata, err, count, dlimit); + if (dlimit) { + remaining_data_tosend = + rounddown(dlimit, + tdata->max_xmit_dlength); + if (!remaining_data_tosend) + remaining_data_tosend = dlimit; + + dlimit = 0; + + conn->max_xmit_dlength = remaining_data_tosend; + goto recalculate_sgl; + } + + pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, " + "sgl err %d\n", + task, tcp_task, tdata, err); + goto ret_err; + } + + if ((tdata->flags & CXGBI_TASK_SGL_COPY) || + (tdata->nr_frags > MAX_SKB_FRAGS)) + headroom += conn->max_xmit_dlength; + } + + tdata->skb = alloc_skb(local_iso_info + cdev->skb_tx_rsvd + + iso_tx_rsvd + headroom, GFP_ATOMIC); + if (!tdata->skb) { + tdata->count = last_tdata_count; + tdata->offset = last_tdata_offset; + err = -ENOMEM; + goto ret_err; + } + + skb_reserve(tdata->skb, local_iso_info + cdev->skb_tx_rsvd + + iso_tx_rsvd); + + if (task->sc) { + task->hdr = (struct iscsi_hdr *)tdata->skb->data; + } else { + task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC); + if (!task->hdr) { + __kfree_skb(tdata->skb); + tdata->skb = NULL; + return -ENOMEM; + } + } + + task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; + + if (iso_tx_rsvd) + cxgbi_skcb_set_flag(tdata->skb, SKCBF_TX_ISO); + + /* data_out uses scsi_cmd's itt */ + if (op != ISCSI_OP_SCSI_DATA_OUT) + task_reserve_itt(task, &task->hdr->itt); + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", + task, op, tdata->skb, cdev->skb_tx_rsvd, headroom, + conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt)); + + return 0; + +ret_err: + conn->max_xmit_dlength = tdata->max_xmit_dlength; + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); + +static int +cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb, + u32 count) +{ + struct cxgbi_iso_info *iso_info = (struct cxgbi_iso_info *)skb->head; + struct iscsi_r2t_info *r2t; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct iscsi_tcp_task *tcp_task = task->dd_data; + u32 burst_size = 0, r2t_dlength = 0, dlength; + u32 max_pdu_len = tdata->max_xmit_dlength; + u32 segment_offset = 0; + u32 num_pdu; + + if (unlikely(!cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) + return 0; + + memset(iso_info, 0, sizeof(struct cxgbi_iso_info)); + + if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) { + iso_info->flags |= CXGBI_ISO_INFO_IMM_ENABLE; + burst_size = count; + } + + dlength = ntoh24(task->hdr->dlength); + dlength = min(dlength, max_pdu_len); + hton24(task->hdr->dlength, dlength); + + num_pdu = (count + max_pdu_len - 1) / max_pdu_len; + + if (iscsi_task_has_unsol_data(task)) + r2t = &task->unsol_r2t; + else + r2t = tcp_task->r2t; + + if (r2t) { + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "count %u, tdata->count %u, num_pdu %u," + "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n", + count, tdata->count, num_pdu, task->hdr_len, + r2t->data_length, r2t->sent); + + r2t_dlength = r2t->data_length - r2t->sent; + segment_offset = r2t->sent; + r2t->datasn += num_pdu - 1; + } + + if (!r2t || !r2t->sent) + iso_info->flags |= CXGBI_ISO_INFO_FSLICE; + + if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL) + iso_info->flags |= CXGBI_ISO_INFO_LSLICE; + + task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; + + iso_info->op = task->hdr->opcode; + iso_info->ahs = task->hdr->hlength; + iso_info->num_pdu = num_pdu; + iso_info->mpdu = max_pdu_len; + iso_info->burst_size = (burst_size + r2t_dlength) >> 2; + iso_info->len = count + task->hdr_len; + iso_info->segment_offset = segment_offset; + + cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len; + return 0; +} + +static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) +{ + if (hcrc || dcrc) { + u8 submode = 0; + + if (hcrc) + submode |= 1; + if (dcrc) + submode |= 2; + cxgbi_skcb_tx_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; + } else + cxgbi_skcb_tx_ulp_mode(skb) = 0; +} + +static struct page *rsvd_page; + +int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, + unsigned int count) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct sk_buff *skb; + struct scsi_cmnd *sc = task->sc; + u32 expected_count, expected_offset; + u32 datalen = count, dlimit = 0; + u32 i, padlen = iscsi_padding(count); + struct page *pg; + int err; + + if (!tcp_task || (tcp_task->dd_data != tdata)) { + pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", + task, task->sc, tcp_task, + tcp_task ? tcp_task->dd_data : NULL, tdata); + return -EINVAL; + } + skb = tdata->skb; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", + task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, + be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count); + + skb_put(skb, task->hdr_len); + tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); + if (!count) { + tdata->count = count; + tdata->offset = offset; + tdata->nr_frags = 0; + tdata->total_offset = 0; + tdata->total_count = 0; + if (tdata->max_xmit_dlength) + conn->max_xmit_dlength = tdata->max_xmit_dlength; + cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO); + return 0; + } + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "data->total_count %u, tdata->total_offset %u\n", + tdata->total_count, tdata->total_offset); + + expected_count = tdata->total_count; + expected_offset = tdata->total_offset; + + if ((count != expected_count) || + (offset != expected_offset)) { + err = cxgbi_task_data_sgl_read(task, offset, count, &dlimit); + if (err < 0) { + pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p " + "dlimit %u, sgl err %d.\n", task, task->sc, + tcp_task, tcp_task ? tcp_task->dd_data : NULL, + tdata, dlimit, err); + return err; + } + } + + /* Restore original value of conn->max_xmit_dlength because + * it can get updated to ISO data size. + */ + conn->max_xmit_dlength = tdata->max_xmit_dlength; + + if (sc) { + struct page_frag *frag = tdata->frags; + + if ((tdata->flags & CXGBI_TASK_SGL_COPY) || + (tdata->nr_frags > MAX_SKB_FRAGS) || + (padlen && (tdata->nr_frags == + MAX_SKB_FRAGS))) { + char *dst = skb->data + task->hdr_len; + + /* data fits in the skb's headroom */ + for (i = 0; i < tdata->nr_frags; i++, frag++) { + char *src = kmap_atomic(frag->page); + + memcpy(dst, src + frag->offset, frag->size); + dst += frag->size; + kunmap_atomic(src); + } + + if (padlen) { + memset(dst, 0, padlen); + padlen = 0; + } + skb_put(skb, count + padlen); + } else { + for (i = 0; i < tdata->nr_frags; i++, frag++) { + get_page(frag->page); + skb_fill_page_desc(skb, i, frag->page, + frag->offset, frag->size); + } + + skb->len += count; + skb->data_len += count; + skb->truesize += count; + } + } else { + pg = virt_to_head_page(task->data); + get_page(pg); + skb_fill_page_desc(skb, 0, pg, + task->data - (char *)page_address(pg), + count); + skb->len += count; + skb->data_len += count; + skb->truesize += count; + } + + if (padlen) { + get_page(rsvd_page); + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rsvd_page, 0, padlen); + + skb->data_len += padlen; + skb->truesize += padlen; + skb->len += padlen; + } + + if (likely(count > tdata->max_xmit_dlength)) + cxgbi_prep_iso_info(task, skb, count); + else + cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO); + + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); + +static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_iso_info *iso_cpl; + u32 frags = skb_shinfo(skb)->nr_frags; + u32 extra_len, num_pdu, hdr_len; + u32 iso_tx_rsvd = 0; + + if (csk->state != CTP_ESTABLISHED) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", + csk, csk->state, csk->flags, csk->tid); + return -EPIPE; + } + + if (csk->err) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", + csk, csk->state, csk->flags, csk->tid, csk->err); + return -EPIPE; + } + + if ((cdev->flags & CXGBI_FLAG_DEV_T3) && + before((csk->snd_win + csk->snd_una), csk->write_seq)) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", + csk, csk->state, csk->flags, csk->tid, csk->write_seq, + csk->snd_una, csk->snd_win); + return -ENOBUFS; + } + + if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) + iso_tx_rsvd = cdev->skb_iso_txhdr; + + if (unlikely(skb_headroom(skb) < (cdev->skb_tx_rsvd + iso_tx_rsvd))) { + pr_err("csk 0x%p, skb head %u < %u.\n", + csk, skb_headroom(skb), cdev->skb_tx_rsvd); + return -EINVAL; + } + + if (skb->len != skb->data_len) + frags++; + + if (frags >= SKB_WR_LIST_SIZE) { + pr_err("csk 0x%p, frags %u, %u,%u >%u.\n", + csk, skb_shinfo(skb)->nr_frags, skb->len, + skb->data_len, (unsigned int)SKB_WR_LIST_SIZE); + return -EINVAL; + } + + cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); + skb_reset_transport_header(skb); + cxgbi_sock_skb_entail(csk, skb); + + extra_len = cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)); + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) { + iso_cpl = (struct cxgbi_iso_info *)skb->head; + num_pdu = iso_cpl->num_pdu; + hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb); + extra_len = (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)) * + num_pdu) + (hdr_len * (num_pdu - 1)); + } + + csk->write_seq += (skb->len + extra_len); + + return 0; +} + +static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb) +{ + struct cxgbi_device *cdev = csk->cdev; + int len = skb->len; + int err; + + spin_lock_bh(&csk->lock); + err = cxgbi_sock_tx_queue_up(csk, skb); + if (err < 0) { + spin_unlock_bh(&csk->lock); + return err; + } + + if (likely(skb_queue_len(&csk->write_queue))) + cdev->csk_push_tx_frames(csk, 0); + spin_unlock_bh(&csk->lock); + return len; +} + +int cxgbi_conn_xmit_pdu(struct iscsi_task *task) +{ + struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + struct sk_buff *skb; + struct cxgbi_sock *csk = NULL; + u32 pdulen = 0; + u32 datalen; + int err; + + if (!tcp_task || (tcp_task->dd_data != tdata)) { + pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", + task, task->sc, tcp_task, + tcp_task ? tcp_task->dd_data : NULL, tdata); + return -EINVAL; + } + + skb = tdata->skb; + if (!skb) { + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p, skb NULL.\n", task); + return 0; + } + + if (cconn && cconn->cep) + csk = cconn->cep->csk; + + if (!csk) { + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p, csk gone.\n", task); + return -EPIPE; + } + + tdata->skb = NULL; + datalen = skb->data_len; + + /* write ppod first if using ofldq to write ppod */ + if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { + struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); + + ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID; + if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) + pr_err("task 0x%p, ppod writing using ofldq failed.\n", + task); + /* continue. Let fl get the data */ + } + + if (!task->sc) + memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX); + + err = cxgbi_sock_send_skb(csk, skb); + if (err > 0) { + pdulen += err; + + log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, rv %d.\n", + task, task->sc, err); + + if (task->conn->hdrdgst_en) + pdulen += ISCSI_DIGEST_SIZE; + + if (datalen && task->conn->datadgst_en) + pdulen += ISCSI_DIGEST_SIZE; + + task->conn->txdata_octets += pdulen; + + if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) { + if (time_after(jiffies, csk->prev_iso_ts + HZ)) { + csk->disable_iso = false; + csk->prev_iso_ts = 0; + log_debug(1 << CXGBI_DBG_PDU_TX, + "enable iso: csk 0x%p\n", csk); + } + } + + return 0; + } + + if (err == -EAGAIN || err == -ENOBUFS) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", + task, skb, skb->len, skb->data_len, err); + /* reset skb to send when we are called again */ + tdata->skb = skb; + + if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) && + (csk->no_tx_credits++ >= 2)) { + csk->disable_iso = true; + csk->prev_iso_ts = jiffies; + log_debug(1 << CXGBI_DBG_PDU_TX, + "disable iso:csk 0x%p, ts:%lu\n", + csk, csk->prev_iso_ts); + } + + return err; + } + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", + task->itt, skb, skb->len, skb->data_len, err); + __kfree_skb(skb); + iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); + iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); + +void cxgbi_cleanup_task(struct iscsi_task *task) +{ + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + + if (!tcp_task || (tcp_task->dd_data != tdata)) { + pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", + task, task->sc, tcp_task, + tcp_task ? tcp_task->dd_data : NULL, tdata); + return; + } + + log_debug(1 << CXGBI_DBG_ISCSI, + "task 0x%p, skb 0x%p, itt 0x%x.\n", + task, tdata->skb, task->hdr_itt); + + tcp_task->dd_data = NULL; + + if (!task->sc) + kfree(task->hdr); + task->hdr = NULL; + + /* never reached the xmit task callout */ + if (tdata->skb) { + __kfree_skb(tdata->skb); + tdata->skb = NULL; + } + + task_release_itt(task, task->hdr_itt); + memset(tdata, 0, sizeof(*tdata)); + + iscsi_tcp_cleanup_task(task); +} +EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); + +void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + stats->custom_length = 1; + strcpy(stats->custom[0].desc, "eh_abort_cnt"); + stats->custom[0].value = conn->eh_abort_cnt; +} +EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); + +static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); + unsigned int max_def = 512 * MAX_SKB_FRAGS; + unsigned int max = max(max_def, headroom); + + max = min(cconn->chba->cdev->tx_max_size, max); + if (conn->max_xmit_dlength) + conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); + else + conn->max_xmit_dlength = max; + cxgbi_align_pdu_size(conn->max_xmit_dlength); + + return 0; +} + +static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + unsigned int max = cconn->chba->cdev->rx_max_size; + + cxgbi_align_pdu_size(max); + + if (conn->max_recv_dlength) { + if (conn->max_recv_dlength > max) { + pr_err("MaxRecvDataSegmentLength %u > %u.\n", + conn->max_recv_dlength, max); + return -EINVAL; + } + conn->max_recv_dlength = min(conn->max_recv_dlength, max); + cxgbi_align_pdu_size(conn->max_recv_dlength); + } else + conn->max_recv_dlength = max; + + return 0; +} + +int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_sock *csk = cconn->cep->csk; + int err; + + log_debug(1 << CXGBI_DBG_ISCSI, + "cls_conn 0x%p, param %d, buf(%d) %s.\n", + cls_conn, param, buflen, buf); + + switch (param) { + case ISCSI_PARAM_HDRDGST_EN: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err && conn->hdrdgst_en) + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, + conn->hdrdgst_en, + conn->datadgst_en); + break; + case ISCSI_PARAM_DATADGST_EN: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err && conn->datadgst_en) + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, + conn->hdrdgst_en, + conn->datadgst_en); + break; + case ISCSI_PARAM_MAX_R2T: + return iscsi_tcp_set_max_r2t(conn, buf); + case ISCSI_PARAM_MAX_RECV_DLENGTH: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err) + err = cxgbi_conn_max_recv_dlength(conn); + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err) + err = cxgbi_conn_max_xmit_dlength(conn); + break; + default: + return iscsi_set_param(cls_conn, param, buf, buflen); + } + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); + +int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, + char *buf) +{ + struct cxgbi_endpoint *cep = ep->dd_data; + struct cxgbi_sock *csk; + + log_debug(1 << CXGBI_DBG_ISCSI, + "cls_conn 0x%p, param %d.\n", ep, param); + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + if (!cep) + return -ENOTCONN; + + csk = cep->csk; + if (!csk) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &csk->daddr, param, buf); + default: + break; + } + return -ENOSYS; +} +EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); + +struct iscsi_cls_conn * +cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) +{ + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; + struct cxgbi_conn *cconn; + + cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); + if (!cls_conn) + return NULL; + + conn = cls_conn->dd_data; + tcp_conn = conn->dd_data; + cconn = tcp_conn->dd_data; + cconn->iconn = conn; + + log_debug(1 << CXGBI_DBG_ISCSI, + "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", + cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); + + return cls_conn; +} +EXPORT_SYMBOL_GPL(cxgbi_create_conn); + +int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + u64 transport_eph, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_ppm *ppm; + struct iscsi_endpoint *ep; + struct cxgbi_endpoint *cep; + struct cxgbi_sock *csk; + int err; + + ep = iscsi_lookup_endpoint(transport_eph); + if (!ep) + return -EINVAL; + + /* setup ddp pagesize */ + cep = ep->dd_data; + csk = cep->csk; + + ppm = csk->cdev->cdev2ppm(csk->cdev); + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, + ppm->tformat.pgsz_idx_dflt); + if (err < 0) + goto put_ep; + + err = iscsi_conn_bind(cls_session, cls_conn, is_leading); + if (err) { + err = -EINVAL; + goto put_ep; + } + + /* calculate the tag idx bits needed for this conn based on cmds_max */ + cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; + + write_lock_bh(&csk->callback_lock); + csk->user_data = conn; + cconn->chba = cep->chba; + cconn->cep = cep; + cep->cconn = cconn; + write_unlock_bh(&csk->callback_lock); + + cxgbi_conn_max_xmit_dlength(conn); + cxgbi_conn_max_recv_dlength(conn); + + log_debug(1 << CXGBI_DBG_ISCSI, + "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", + cls_session, cls_conn, ep, cconn, csk); + /* init recv engine */ + iscsi_tcp_hdr_recv_prep(tcp_conn); + +put_ep: + iscsi_put_endpoint(ep); + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_bind_conn); + +struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, + u16 cmds_max, u16 qdepth, + u32 initial_cmdsn) +{ + struct cxgbi_endpoint *cep; + struct cxgbi_hba *chba; + struct Scsi_Host *shost; + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + + if (!ep) { + pr_err("missing endpoint.\n"); + return NULL; + } + + cep = ep->dd_data; + chba = cep->chba; + shost = chba->shost; + + BUG_ON(chba != iscsi_host_priv(shost)); + + cls_session = iscsi_session_setup(chba->cdev->itp, shost, + cmds_max, 0, + sizeof(struct iscsi_tcp_task) + + sizeof(struct cxgbi_task_data), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) + return NULL; + + session = cls_session->dd_data; + if (iscsi_tcp_r2tpool_alloc(session)) + goto remove_session; + + log_debug(1 << CXGBI_DBG_ISCSI, + "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); + return cls_session; + +remove_session: + iscsi_session_teardown(cls_session); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_create_session); + +void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) +{ + log_debug(1 << CXGBI_DBG_ISCSI, + "cls sess 0x%p.\n", cls_session); + + iscsi_tcp_r2tpool_free(cls_session->dd_data); + iscsi_session_teardown(cls_session); +} +EXPORT_SYMBOL_GPL(cxgbi_destroy_session); + +int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, + char *buf, int buflen) +{ + struct cxgbi_hba *chba = iscsi_host_priv(shost); + + if (!chba->ndev) { + shost_printk(KERN_ERR, shost, "Could not get host param. " + "netdev for host not set.\n"); + return -ENODEV; + } + + log_debug(1 << CXGBI_DBG_ISCSI, + "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", + shost, chba, chba->ndev->name, param, buflen, buf); + + switch (param) { + case ISCSI_HOST_PARAM_IPADDRESS: + { + __be32 addr = in_aton(buf); + log_debug(1 << CXGBI_DBG_ISCSI, + "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); + cxgbi_set_iscsi_ipv4(chba, addr); + return 0; + } + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_NETDEV_NAME: + return 0; + default: + return iscsi_host_set_param(shost, param, buf, buflen); + } +} +EXPORT_SYMBOL_GPL(cxgbi_set_host_param); + +int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, + char *buf) +{ + struct cxgbi_hba *chba = iscsi_host_priv(shost); + int len = 0; + + if (!chba->ndev) { + shost_printk(KERN_ERR, shost, "Could not get host param. " + "netdev for host not set.\n"); + return -ENODEV; + } + + log_debug(1 << CXGBI_DBG_ISCSI, + "shost 0x%p, hba 0x%p,%s, param %d.\n", + shost, chba, chba->ndev->name, param); + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); + break; + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sprintf(buf, "%s\n", chba->ndev->name); + break; + case ISCSI_HOST_PARAM_IPADDRESS: + { + struct cxgbi_sock *csk = find_sock_on_port(chba->cdev, + chba->port_id); + if (csk) { + len = sprintf(buf, "%pIS", + (struct sockaddr *)&csk->saddr); + } + log_debug(1 << CXGBI_DBG_ISCSI, + "hba %s, addr %s.\n", chba->ndev->name, buf); + break; + } + default: + return iscsi_host_get_param(shost, param, buf); + } + + return len; +} +EXPORT_SYMBOL_GPL(cxgbi_get_host_param); + +struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking) +{ + struct iscsi_endpoint *ep; + struct cxgbi_endpoint *cep; + struct cxgbi_hba *hba = NULL; + struct cxgbi_sock *csk; + int ifindex = 0; + int err = -EINVAL; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, + "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", + shost, non_blocking, dst_addr); + + if (shost) { + hba = iscsi_host_priv(shost); + if (!hba) { + pr_info("shost 0x%p, priv NULL.\n", shost); + goto err_out; + } + } + +check_route: + if (dst_addr->sa_family == AF_INET) { + csk = cxgbi_check_route(dst_addr, ifindex); +#if IS_ENABLED(CONFIG_IPV6) + } else if (dst_addr->sa_family == AF_INET6) { + csk = cxgbi_check_route6(dst_addr, ifindex); +#endif + } else { + pr_info("address family 0x%x NOT supported.\n", + dst_addr->sa_family); + err = -EAFNOSUPPORT; + return (struct iscsi_endpoint *)ERR_PTR(err); + } + + if (IS_ERR(csk)) + return (struct iscsi_endpoint *)csk; + cxgbi_sock_get(csk); + + if (!hba) + hba = csk->cdev->hbas[csk->port_id]; + else if (hba != csk->cdev->hbas[csk->port_id]) { + if (ifindex != hba->ndev->ifindex) { + cxgbi_sock_put(csk); + cxgbi_sock_closed(csk); + ifindex = hba->ndev->ifindex; + goto check_route; + } + + pr_info("Could not connect through requested host %u" + "hba 0x%p != 0x%p (%u).\n", + shost->host_no, hba, + csk->cdev->hbas[csk->port_id], csk->port_id); + err = -ENOSPC; + goto release_conn; + } + + err = sock_get_port(csk); + if (err) + goto release_conn; + + cxgbi_sock_set_state(csk, CTP_CONNECTING); + err = csk->cdev->csk_init_act_open(csk); + if (err) + goto release_conn; + + if (cxgbi_sock_is_closing(csk)) { + err = -ENOSPC; + pr_info("csk 0x%p is closing.\n", csk); + goto release_conn; + } + + ep = iscsi_create_endpoint(sizeof(*cep)); + if (!ep) { + err = -ENOMEM; + pr_info("iscsi alloc ep, OOM.\n"); + goto release_conn; + } + + cep = ep->dd_data; + cep->csk = csk; + cep->chba = hba; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, + "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", + ep, cep, csk, hba, hba->ndev->name); + return ep; + +release_conn: + cxgbi_sock_put(csk); + cxgbi_sock_closed(csk); +err_out: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(cxgbi_ep_connect); + +int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct cxgbi_endpoint *cep = ep->dd_data; + struct cxgbi_sock *csk = cep->csk; + + if (!cxgbi_sock_is_established(csk)) + return 0; + return 1; +} +EXPORT_SYMBOL_GPL(cxgbi_ep_poll); + +void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct cxgbi_endpoint *cep = ep->dd_data; + struct cxgbi_conn *cconn = cep->cconn; + struct cxgbi_sock *csk = cep->csk; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, + "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", + ep, cep, cconn, csk, csk->state, csk->flags); + + if (cconn && cconn->iconn) { + write_lock_bh(&csk->callback_lock); + cep->csk->user_data = NULL; + cconn->cep = NULL; + write_unlock_bh(&csk->callback_lock); + } + iscsi_destroy_endpoint(ep); + + if (likely(csk->state >= CTP_ESTABLISHED)) + need_active_close(csk); + else + cxgbi_sock_closed(csk); + + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); + +int cxgbi_iscsi_init(struct iscsi_transport *itp, + struct scsi_transport_template **stt) +{ + *stt = iscsi_register_transport(itp); + if (*stt == NULL) { + pr_err("unable to register %s transport 0x%p.\n", + itp->name, itp); + return -ENODEV; + } + log_debug(1 << CXGBI_DBG_ISCSI, + "%s, registered iscsi transport 0x%p.\n", + itp->name, stt); + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); + +void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, + struct scsi_transport_template **stt) +{ + if (*stt) { + log_debug(1 << CXGBI_DBG_ISCSI, + "de-register transport 0x%p, %s, stt 0x%p.\n", + itp, itp->name, *stt); + *stt = NULL; + iscsi_unregister_transport(itp); + } +} +EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); + +umode_t cxgbi_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); + +static int __init libcxgbi_init_module(void) +{ + pr_info("%s", version); + + BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < + sizeof(struct cxgbi_skb_cb)); + rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!rsvd_page) + return -ENOMEM; + + return 0; +} + +static void __exit libcxgbi_exit_module(void) +{ + cxgbi_device_unregister_all(0xFF); + put_page(rsvd_page); + return; +} + +module_init(libcxgbi_init_module); +module_exit(libcxgbi_exit_module); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h new file mode 100644 index 000000000..d92cf1dcc --- /dev/null +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -0,0 +1,646 @@ +/* + * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. + * + * Copyright (c) 2010-2015 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Written by: Rakesh Ranjan (rranjan@chelsio.com) + */ + +#ifndef __LIBCXGBI_H__ +#define __LIBCXGBI_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +enum cxgbi_dbg_flag { + CXGBI_DBG_ISCSI, + CXGBI_DBG_DDP, + CXGBI_DBG_TOE, + CXGBI_DBG_SOCK, + + CXGBI_DBG_PDU_TX, + CXGBI_DBG_PDU_RX, + CXGBI_DBG_DEV, +}; + +#define log_debug(level, fmt, ...) \ + do { \ + if (dbg_level & (level)) \ + pr_info(fmt, ##__VA_ARGS__); \ + } while (0) + +#define pr_info_ipaddr(fmt_trail, \ + addr1, addr2, args_trail...) \ +do { \ + if (!((1 << CXGBI_DBG_SOCK) & dbg_level)) \ + break; \ + pr_info("%pISpc - %pISpc, " fmt_trail, \ + addr1, addr2, args_trail); \ +} while (0) + +/* max. connections per adapter */ +#define CXGBI_MAX_CONN 16384 + +/* always allocate rooms for AHS */ +#define SKB_TX_ISCSI_PDU_HEADER_MAX \ + (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) + +#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/ + +/* + * align pdu size to multiple of 512 for better performance + */ +#define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0) + +#define ULP2_MODE_ISCSI 2 + +#define ULP2_MAX_PKT_SIZE 16224 +#define ULP2_MAX_PDU_PAYLOAD \ + (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) + +#define CXGBI_ULP2_MAX_ISO_PAYLOAD 65535 + +#define CXGBI_MAX_ISO_DATA_IN_SKB \ + min_t(u32, MAX_SKB_FRAGS << PAGE_SHIFT, CXGBI_ULP2_MAX_ISO_PAYLOAD) + +#define cxgbi_is_iso_config(csk) ((csk)->cdev->skb_iso_txhdr) +#define cxgbi_is_iso_disabled(csk) ((csk)->disable_iso) + +/* + * For iscsi connections HW may inserts digest bytes into the pdu. Those digest + * bytes are not sent by the host but are part of the TCP payload and therefore + * consume TCP sequence space. + */ +static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 }; +static inline unsigned int cxgbi_ulp_extra_len(int submode) +{ + return ulp2_extra_len[submode & 3]; +} + +#define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ +#define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ +#define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ +#define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ + +/* + * sge_opaque_hdr - + * Opaque version of structure the SGE stores at skb->head of TX_DATA packets + * and for which we must reserve space. + */ +struct sge_opaque_hdr { + void *dev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +struct cxgbi_sock { + struct cxgbi_device *cdev; + + int tid; + int atid; + unsigned long flags; + unsigned int mtu; + unsigned short rss_qid; + unsigned short txq_idx; + unsigned short advmss; + unsigned int tx_chan; + unsigned int rx_chan; + unsigned int mss_idx; + unsigned int smac_idx; + unsigned char port_id; + int wr_max_cred; + int wr_cred; + int wr_una_cred; +#ifdef CONFIG_CHELSIO_T4_DCB + u8 dcb_priority; +#endif + unsigned char hcrc_len; + unsigned char dcrc_len; + + void *l2t; + struct sk_buff *wr_pending_head; + struct sk_buff *wr_pending_tail; + struct sk_buff *cpl_close; + struct sk_buff *cpl_abort_req; + struct sk_buff *cpl_abort_rpl; + struct sk_buff *skb_ulp_lhdr; + spinlock_t lock; + struct kref refcnt; + unsigned int state; + unsigned int csk_family; + union { + struct sockaddr_in saddr; + struct sockaddr_in6 saddr6; + }; + union { + struct sockaddr_in daddr; + struct sockaddr_in6 daddr6; + }; + struct dst_entry *dst; + struct sk_buff_head receive_queue; + struct sk_buff_head write_queue; + struct timer_list retry_timer; + struct completion cmpl; + int err; + rwlock_t callback_lock; + void *user_data; + + u32 rcv_nxt; + u32 copied_seq; + u32 rcv_wup; + u32 snd_nxt; + u32 snd_una; + u32 write_seq; + u32 snd_win; + u32 rcv_win; + + bool disable_iso; + u32 no_tx_credits; + unsigned long prev_iso_ts; +}; + +/* + * connection states + */ +enum cxgbi_sock_states{ + CTP_CLOSED, + CTP_CONNECTING, + CTP_ACTIVE_OPEN, + CTP_ESTABLISHED, + CTP_ACTIVE_CLOSE, + CTP_PASSIVE_CLOSE, + CTP_CLOSE_WAIT_1, + CTP_CLOSE_WAIT_2, + CTP_ABORTING, +}; + +/* + * Connection flags -- many to track some close related events. + */ +enum cxgbi_sock_flags { + CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */ + CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */ + CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */ + CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */ + CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */ + CTPF_HAS_ATID, /* reserved atid */ + CTPF_HAS_TID, /* reserved hw tid */ + CTPF_OFFLOAD_DOWN, /* offload function off */ + CTPF_LOGOUT_RSP_RCVD, /* received logout response */ +}; + +struct cxgbi_skb_rx_cb { + __u32 ddigest; + __u32 pdulen; +}; + +struct cxgbi_skb_tx_cb { + void *handle; + void *arp_err_handler; + struct sk_buff *wr_next; + u16 iscsi_hdr_len; + u8 ulp_mode; +}; + +enum cxgbi_skcb_flags { + SKCBF_TX_NEED_HDR, /* packet needs a header */ + SKCBF_TX_MEM_WRITE, /* memory write */ + SKCBF_TX_FLAG_COMPL, /* wr completion flag */ + SKCBF_RX_COALESCED, /* received whole pdu */ + SKCBF_RX_HDR, /* received pdu header */ + SKCBF_RX_DATA, /* received pdu payload */ + SKCBF_RX_STATUS, /* received ddp status */ + SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */ + SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ + SKCBF_RX_HCRC_ERR, /* header digest error */ + SKCBF_RX_DCRC_ERR, /* data digest error */ + SKCBF_RX_PAD_ERR, /* padding byte error */ + SKCBF_TX_ISO, /* iso cpl in tx skb */ +}; + +struct cxgbi_skb_cb { + union { + struct cxgbi_skb_rx_cb rx; + struct cxgbi_skb_tx_cb tx; + }; + unsigned long flags; + unsigned int seq; +}; + +#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) +#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags) +#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq) +#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest) +#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen) +#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next) +#define cxgbi_skcb_tx_iscsi_hdrlen(skb) (CXGBI_SKB_CB(skb)->tx.iscsi_hdr_len) +#define cxgbi_skcb_tx_ulp_mode(skb) (CXGBI_SKB_CB(skb)->tx.ulp_mode) + +static inline void cxgbi_skcb_set_flag(struct sk_buff *skb, + enum cxgbi_skcb_flags flag) +{ + __set_bit(flag, &(cxgbi_skcb_flags(skb))); +} + +static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb, + enum cxgbi_skcb_flags flag) +{ + __clear_bit(flag, &(cxgbi_skcb_flags(skb))); +} + +static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb, + enum cxgbi_skcb_flags flag) +{ + return test_bit(flag, &(cxgbi_skcb_flags(skb))); +} + +static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk, + enum cxgbi_sock_flags flag) +{ + __set_bit(flag, &csk->flags); + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, bit %d.\n", + csk, csk->state, csk->flags, flag); +} + +static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk, + enum cxgbi_sock_flags flag) +{ + __clear_bit(flag, &csk->flags); + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, bit %d.\n", + csk, csk->state, csk->flags, flag); +} + +static inline int cxgbi_sock_flag(struct cxgbi_sock *csk, + enum cxgbi_sock_flags flag) +{ + if (csk == NULL) + return 0; + return test_bit(flag, &csk->flags); +} + +static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, state -> %u.\n", + csk, csk->state, csk->flags, state); + csk->state = state; +} + +static inline void cxgbi_sock_free(struct kref *kref) +{ + struct cxgbi_sock *csk = container_of(kref, + struct cxgbi_sock, + refcnt); + if (csk) { + log_debug(1 << CXGBI_DBG_SOCK, + "free csk 0x%p, state %u, flags 0x%lx\n", + csk, csk->state, csk->flags); + kfree(csk); + } +} + +static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "%s, put csk 0x%p, ref %u-1.\n", + fn, csk, kref_read(&csk->refcnt)); + kref_put(&csk->refcnt, cxgbi_sock_free); +} +#define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk) + +static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "%s, get csk 0x%p, ref %u+1.\n", + fn, csk, kref_read(&csk->refcnt)); + kref_get(&csk->refcnt); +} +#define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk) + +static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk) +{ + return csk->state >= CTP_ACTIVE_CLOSE; +} + +static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk) +{ + return csk->state == CTP_ESTABLISHED; +} + +static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&csk->write_queue))) + __kfree_skb(skb); +} + +static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win) +{ + unsigned int wscale = 0; + + while (wscale < 14 && (65535 << wscale) < win) + wscale++; + return wscale; +} + +static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp) +{ + struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp); + + if (skb) { + __skb_put(skb, wrlen); + memset(skb->head, 0, wrlen + dlen); + } else + pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen); + return skb; +} + + +/* + * The number of WRs needed for an skb depends on the number of fragments + * in the skb and whether it has any payload in its main body. This maps the + * length of the gather list represented by an skb into the # of necessary WRs. + * The extra two fragments are for iscsi bhs and payload padding. + */ +#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) + +static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk) +{ + csk->wr_pending_head = csk->wr_pending_tail = NULL; +} + +static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, + struct sk_buff *skb) +{ + cxgbi_skcb_tx_wr_next(skb) = NULL; + /* + * We want to take an extra reference since both us and the driver + * need to free the packet before it's really freed. + */ + skb_get(skb); + + if (!csk->wr_pending_head) + csk->wr_pending_head = skb; + else + cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb; + csk->wr_pending_tail = skb; +} + +static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk) +{ + int n = 0; + const struct sk_buff *skb = csk->wr_pending_head; + + while (skb) { + n += skb->csum; + skb = cxgbi_skcb_tx_wr_next(skb); + } + return n; +} + +static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk) +{ + return csk->wr_pending_head; +} + +static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->wr_pending_head; + + if (likely(skb)) { + csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb); + cxgbi_skcb_tx_wr_next(skb) = NULL; + } + return skb; +} + +void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *); +void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *); +void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *); +void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int); +void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *); +void cxgbi_sock_closed(struct cxgbi_sock *); +void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int); +void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *); +void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *); +void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32); +void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int, + int); +unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int); +void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *); + +struct cxgbi_hba { + struct net_device *ndev; + struct net_device *vdev; /* vlan dev */ + struct Scsi_Host *shost; + struct cxgbi_device *cdev; + __be32 ipv4addr; + unsigned char port_id; +}; + +struct cxgbi_ports_map { + unsigned int max_connect; + unsigned int used; + unsigned short sport_base; + spinlock_t lock; + unsigned int next; + struct cxgbi_sock **port_csk; +}; + +#define CXGBI_FLAG_DEV_T3 0x1 +#define CXGBI_FLAG_DEV_T4 0x2 +#define CXGBI_FLAG_ADAPTER_RESET 0x4 +#define CXGBI_FLAG_IPV4_SET 0x10 +#define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40 +#define CXGBI_FLAG_DDP_OFF 0x100 +#define CXGBI_FLAG_DEV_ISO_OFF 0x400 + +struct cxgbi_device { + struct list_head list_head; + struct list_head rcu_node; + unsigned int flags; + struct net_device **ports; + void *lldev; + struct cxgbi_hba **hbas; + const unsigned short *mtus; + unsigned char nmtus; + unsigned char nports; + struct pci_dev *pdev; + struct dentry *debugfs_root; + struct iscsi_transport *itp; + struct module *owner; + + unsigned int pfvf; + unsigned int rx_credit_thres; + unsigned int skb_tx_rsvd; + u32 skb_iso_txhdr; + unsigned int skb_rx_extra; /* for msg coalesced mode */ + unsigned int tx_max_size; + unsigned int rx_max_size; + unsigned int rxq_idx_cntr; + struct cxgbi_ports_map pmap; + + void (*dev_ddp_cleanup)(struct cxgbi_device *); + struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); + int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, + struct cxgbi_task_tag_info *); + void (*csk_ddp_clear_map)(struct cxgbi_device *cdev, + struct cxgbi_ppm *, + struct cxgbi_task_tag_info *); + int (*csk_ddp_setup_digest)(struct cxgbi_sock *, + unsigned int, int, int); + int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, + unsigned int, int); + + void (*csk_release_offload_resources)(struct cxgbi_sock *); + int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); + u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); + int (*csk_push_tx_frames)(struct cxgbi_sock *, int); + void (*csk_send_abort_req)(struct cxgbi_sock *); + void (*csk_send_close_req)(struct cxgbi_sock *); + int (*csk_alloc_cpls)(struct cxgbi_sock *); + int (*csk_init_act_open)(struct cxgbi_sock *); + + void *dd_data; +}; +#define cxgbi_cdev_priv(cdev) ((cdev)->dd_data) + +struct cxgbi_conn { + struct cxgbi_endpoint *cep; + struct iscsi_conn *iconn; + struct cxgbi_hba *chba; + u32 task_idx_bits; + unsigned int ddp_full; + unsigned int ddp_tag_full; +}; + +struct cxgbi_endpoint { + struct cxgbi_conn *cconn; + struct cxgbi_hba *chba; + struct cxgbi_sock *csk; +}; + +struct cxgbi_task_data { +#define CXGBI_TASK_SGL_CHECKED 0x1 +#define CXGBI_TASK_SGL_COPY 0x2 + u8 flags; + unsigned short nr_frags; + struct page_frag frags[MAX_SKB_FRAGS]; + struct sk_buff *skb; + unsigned int dlen; + unsigned int offset; + unsigned int count; + unsigned int sgoffset; + u32 total_count; + u32 total_offset; + u32 max_xmit_dlength; + struct cxgbi_task_tag_info ttinfo; +}; +#define iscsi_task_cxgbi_data(task) \ + ((task)->dd_data + sizeof(struct iscsi_tcp_task)) + +struct cxgbi_iso_info { +#define CXGBI_ISO_INFO_FSLICE 0x1 +#define CXGBI_ISO_INFO_LSLICE 0x2 +#define CXGBI_ISO_INFO_IMM_ENABLE 0x4 + u8 flags; + u8 op; + u8 ahs; + u8 num_pdu; + u32 mpdu; + u32 burst_size; + u32 len; + u32 segment_offset; + u32 datasn_offset; + u32 buffer_offset; +}; + +static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) +{ + if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET) + chba->ipv4addr = ipaddr; + else + pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n", + chba->ndev->name); +} + +struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); +void cxgbi_device_unregister(struct cxgbi_device *); +void cxgbi_device_unregister_all(unsigned int flag); +struct cxgbi_device *cxgbi_device_find_by_lldev(void *); +struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *, + int *); +int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, + const struct scsi_host_template *, + struct scsi_transport_template *); +void cxgbi_hbas_remove(struct cxgbi_device *); + +int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, + unsigned int max_conn); +void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev); + +void cxgbi_conn_tx_open(struct cxgbi_sock *); +void cxgbi_conn_pdu_ready(struct cxgbi_sock *); +int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8); +int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int); +int cxgbi_conn_xmit_pdu(struct iscsi_task *); + +void cxgbi_cleanup_task(struct iscsi_task *task); + +umode_t cxgbi_attr_is_visible(int param_type, int param); +void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); +int cxgbi_set_conn_param(struct iscsi_cls_conn *, + enum iscsi_param, char *, int); +int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *); +struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); +int cxgbi_bind_conn(struct iscsi_cls_session *, + struct iscsi_cls_conn *, u64, int); +void cxgbi_destroy_session(struct iscsi_cls_session *); +struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *, + u16, u16, u32); +int cxgbi_set_host_param(struct Scsi_Host *, + enum iscsi_host_param, char *, int); +int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *); +struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *, + struct sockaddr *, int); +int cxgbi_ep_poll(struct iscsi_endpoint *, int); +void cxgbi_ep_disconnect(struct iscsi_endpoint *); + +int cxgbi_iscsi_init(struct iscsi_transport *, + struct scsi_transport_template **); +void cxgbi_iscsi_cleanup(struct iscsi_transport *, + struct scsi_transport_template **); +void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *); +int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, + unsigned int, unsigned int); +int cxgbi_ddp_cleanup(struct cxgbi_device *); +void cxgbi_ddp_page_size_factor(int *); +void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *, + struct cxgbi_task_tag_info *, + struct scatterlist **sg_pp, unsigned int *sg_off); +int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, + struct cxgbi_tag_format *tformat, + unsigned int iscsi_size, unsigned int llimit, + unsigned int start, unsigned int rsvd_factor, + unsigned int edram_start, unsigned int edram_size); +#endif /*__LIBCXGBI_H__*/ diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig new file mode 100644 index 000000000..5533bdcb0 --- /dev/null +++ b/drivers/scsi/cxlflash/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# IBM CXL-attached Flash Accelerator SCSI Driver +# + +config CXLFLASH + tristate "Support for IBM CAPI Flash" + depends on PCI && SCSI && (CXL || OCXL) && EEH + select IRQ_POLL + default m + help + Allows CAPI Accelerated IO to Flash + If unsure, say N. diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile new file mode 100644 index 000000000..fd2f0dd9d --- /dev/null +++ b/drivers/scsi/cxlflash/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CXLFLASH) += cxlflash.o +cxlflash-y += main.o superpipe.o lunmgt.o vlun.o +cxlflash-$(CONFIG_CXL) += cxl_hw.o +cxlflash-$(CONFIG_OCXL) += ocxl_hw.o diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h new file mode 100644 index 000000000..181e0445e --- /dev/null +++ b/drivers/scsi/cxlflash/backend.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CXL Flash Device Driver + * + * Written by: Matthew R. Ochs , IBM Corporation + * Uma Krishnan , IBM Corporation + * + * Copyright (C) 2018 IBM Corporation + */ + +#ifndef _CXLFLASH_BACKEND_H +#define _CXLFLASH_BACKEND_H + +extern const struct cxlflash_backend_ops cxlflash_cxl_ops; +extern const struct cxlflash_backend_ops cxlflash_ocxl_ops; + +struct cxlflash_backend_ops { + struct module *module; + void __iomem * (*psa_map)(void *ctx_cookie); + void (*psa_unmap)(void __iomem *addr); + int (*process_element)(void *ctx_cookie); + int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler, + void *cookie, char *name); + void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie); + u64 (*get_irq_objhndl)(void *ctx_cookie, int irq); + int (*start_context)(void *ctx_cookie); + int (*stop_context)(void *ctx_cookie); + int (*afu_reset)(void *ctx_cookie); + void (*set_master)(void *ctx_cookie); + void * (*get_context)(struct pci_dev *dev, void *afu_cookie); + void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie); + int (*release_context)(void *ctx_cookie); + void (*perst_reloads_same_image)(void *afu_cookie, bool image); + ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf, + size_t count); + int (*allocate_afu_irqs)(void *ctx_cookie, int num); + void (*free_afu_irqs)(void *ctx_cookie); + void * (*create_afu)(struct pci_dev *dev); + void (*destroy_afu)(void *afu_cookie); + struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops, + int *fd); + void * (*fops_get_context)(struct file *file); + int (*start_work)(void *ctx_cookie, u64 irqs); + int (*fd_mmap)(struct file *file, struct vm_area_struct *vm); + int (*fd_release)(struct inode *inode, struct file *file); +}; + +#endif /* _CXLFLASH_BACKEND_H */ diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h new file mode 100644 index 000000000..de6229e27 --- /dev/null +++ b/drivers/scsi/cxlflash/common.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#ifndef _CXLFLASH_COMMON_H +#define _CXLFLASH_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "backend.h" + +extern const struct file_operations cxlflash_cxl_fops; + +#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */ +#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */ +#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */ + +#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK)) +#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1)) + +#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */ +#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */ +#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */ + +#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */ +#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */ +#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants + * max_sectors + * in units of + * 512 byte + * sectors + */ + +#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry)) + +/* AFU command retry limit */ +#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */ + +/* Command management definitions */ +#define CXLFLASH_MAX_CMDS 256 +#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS + +/* RRQ for master issued cmds */ +#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS + +/* SQ for master issued cmds */ +#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS + +/* Hardware queue definitions */ +#define CXLFLASH_DEF_HWQS 1 +#define CXLFLASH_MAX_HWQS 8 +#define PRIMARY_HWQ 0 + + +static inline void check_sizes(void) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK); + BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS); +} + +/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */ +#define CMD_BUFSIZE SIZE_4K + +enum cxlflash_lr_state { + LINK_RESET_INVALID, + LINK_RESET_REQUIRED, + LINK_RESET_COMPLETE +}; + +enum cxlflash_init_state { + INIT_STATE_NONE, + INIT_STATE_PCI, + INIT_STATE_AFU, + INIT_STATE_SCSI, + INIT_STATE_CDEV +}; + +enum cxlflash_state { + STATE_PROBING, /* Initial state during probe */ + STATE_PROBED, /* Temporary state, probe completed but EEH occurred */ + STATE_NORMAL, /* Normal running state, everything good */ + STATE_RESET, /* Reset state, trying to reset/recover */ + STATE_FAILTERM /* Failed/terminating state, error out users/threads */ +}; + +enum cxlflash_hwq_mode { + HWQ_MODE_RR, /* Roundrobin (default) */ + HWQ_MODE_TAG, /* Distribute based on block MQ tag */ + HWQ_MODE_CPU, /* CPU affinity */ + MAX_HWQ_MODE +}; + +/* + * Each context has its own set of resource handles that is visible + * only from that context. + */ + +struct cxlflash_cfg { + struct afu *afu; + + const struct cxlflash_backend_ops *ops; + struct pci_dev *dev; + struct pci_device_id *dev_id; + struct Scsi_Host *host; + int num_fc_ports; + struct cdev cdev; + struct device *chardev; + + ulong cxlflash_regs_pci; + + struct work_struct work_q; + enum cxlflash_init_state init_state; + enum cxlflash_lr_state lr_state; + int lr_port; + atomic_t scan_host_needed; + + void *afu_cookie; + + atomic_t recovery_threads; + struct mutex ctx_recovery_mutex; + struct mutex ctx_tbl_list_mutex; + struct rw_semaphore ioctl_rwsem; + struct ctx_info *ctx_tbl[MAX_CONTEXT]; + struct list_head ctx_err_recovery; /* contexts w/ recovery pending */ + struct file_operations cxl_fops; + + /* Parameters that are LUN table related */ + int last_lun_index[MAX_FC_PORTS]; + int promote_lun_index; + struct list_head lluns; /* list of llun_info structs */ + + wait_queue_head_t tmf_waitq; + spinlock_t tmf_slock; + bool tmf_active; + bool ws_unmap; /* Write-same unmap supported */ + wait_queue_head_t reset_waitq; + enum cxlflash_state state; + async_cookie_t async_reset_cookie; +}; + +struct afu_cmd { + struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */ + struct sisl_ioasa sa; /* IOASA must follow IOARCB */ + struct afu *parent; + struct scsi_cmnd *scp; + struct completion cevent; + struct list_head queue; + u32 hwq_index; + + u8 cmd_tmf:1, + cmd_aborted:1; + + struct list_head list; /* Pending commands link */ + + /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned. + * However for performance reasons the IOARCB/IOASA should be + * cache line aligned. + */ +} __aligned(cache_line_size()); + +static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc) +{ + return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd)); +} + +static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc) +{ + struct afu_cmd *afuc = sc_to_afuc(sc); + + INIT_LIST_HEAD(&afuc->queue); + return afuc; +} + +static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) +{ + struct afu_cmd *afuc = sc_to_afuc(sc); + + memset(afuc, 0, sizeof(*afuc)); + return sc_to_afuci(sc); +} + +struct hwq { + /* Stuff requiring alignment go first. */ + struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */ + u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ + + /* Beware of alignment till here. Preferably introduce new + * fields after this point + */ + struct afu *afu; + void *ctx_cookie; + struct sisl_host_map __iomem *host_map; /* MC host map */ + struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ + ctx_hndl_t ctx_hndl; /* master's context handle */ + u32 index; /* Index of this hwq */ + int num_irqs; /* Number of interrupts requested for context */ + struct list_head pending_cmds; /* Commands pending completion */ + + atomic_t hsq_credits; + spinlock_t hsq_slock; /* Hardware send queue lock */ + struct sisl_ioarcb *hsq_start; + struct sisl_ioarcb *hsq_end; + struct sisl_ioarcb *hsq_curr; + spinlock_t hrrq_slock; + u64 *hrrq_start; + u64 *hrrq_end; + u64 *hrrq_curr; + bool toggle; + bool hrrq_online; + + s64 room; + + struct irq_poll irqpoll; +} __aligned(cache_line_size()); + +struct afu { + struct hwq hwqs[CXLFLASH_MAX_HWQS]; + int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd); + int (*context_reset)(struct hwq *hwq); + + /* AFU HW */ + struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ + + atomic_t cmds_active; /* Number of currently active AFU commands */ + struct mutex sync_active; /* Mutex to serialize AFU commands */ + u64 hb; + u32 internal_lun; /* User-desired LUN mode for this AFU */ + + u32 num_hwqs; /* Number of hardware queues */ + u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */ + enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */ + u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */ + + char version[16]; + u64 interface_version; + + u32 irqpoll_weight; + struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */ +}; + +static inline struct hwq *get_hwq(struct afu *afu, u32 index) +{ + WARN_ON(index >= CXLFLASH_MAX_HWQS); + + return &afu->hwqs[index]; +} + +static inline bool afu_is_irqpoll_enabled(struct afu *afu) +{ + return !!afu->irqpoll_weight; +} + +static inline bool afu_has_cap(struct afu *afu, u64 cap) +{ + u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT; + + return afu_cap & cap; +} + +static inline bool afu_is_ocxl_lisn(struct afu *afu) +{ + return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN); +} + +static inline bool afu_is_afu_debug(struct afu *afu) +{ + return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG); +} + +static inline bool afu_is_lun_provision(struct afu *afu) +{ + return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION); +} + +static inline bool afu_is_sq_cmd_mode(struct afu *afu) +{ + return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE); +} + +static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu) +{ + return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE); +} + +static inline u64 lun_to_lunid(u64 lun) +{ + __be64 lun_id; + + int_to_scsilun(lun, (struct scsi_lun *)&lun_id); + return be64_to_cpu(lun_id); +} + +static inline struct fc_port_bank __iomem *get_fc_port_bank( + struct cxlflash_cfg *cfg, int i) +{ + struct afu *afu = cfg->afu; + + return &afu->afu_map->global.bank[CHAN2PORTBANK(i)]; +} + +static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i) +{ + struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i); + + return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0]; +} + +static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i) +{ + struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i); + + return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0]; +} + +int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode); +void cxlflash_list_init(void); +void cxlflash_term_global_luns(void); +void cxlflash_free_errpage(void); +int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg); +void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg); +int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg); +void cxlflash_term_local_luns(struct cxlflash_cfg *cfg); +void cxlflash_restore_luntable(struct cxlflash_cfg *cfg); + +#endif /* ifndef _CXLFLASH_COMMON_H */ diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c new file mode 100644 index 000000000..b814130f3 --- /dev/null +++ b/drivers/scsi/cxlflash/cxl_hw.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * CXL Flash Device Driver + * + * Written by: Matthew R. Ochs , IBM Corporation + * Uma Krishnan , IBM Corporation + * + * Copyright (C) 2018 IBM Corporation + */ + +#include + +#include "backend.h" + +/* + * The following routines map the cxlflash backend operations to existing CXL + * kernel API function and are largely simple shims that provide an abstraction + * for converting generic context and AFU cookies into cxl_context or cxl_afu + * pointers. + */ + +static void __iomem *cxlflash_psa_map(void *ctx_cookie) +{ + return cxl_psa_map(ctx_cookie); +} + +static void cxlflash_psa_unmap(void __iomem *addr) +{ + cxl_psa_unmap(addr); +} + +static int cxlflash_process_element(void *ctx_cookie) +{ + return cxl_process_element(ctx_cookie); +} + +static int cxlflash_map_afu_irq(void *ctx_cookie, int num, + irq_handler_t handler, void *cookie, char *name) +{ + return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name); +} + +static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) +{ + cxl_unmap_afu_irq(ctx_cookie, num, cookie); +} + +static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq) +{ + /* Dummy fop for cxl */ + return 0; +} + +static int cxlflash_start_context(void *ctx_cookie) +{ + return cxl_start_context(ctx_cookie, 0, NULL); +} + +static int cxlflash_stop_context(void *ctx_cookie) +{ + return cxl_stop_context(ctx_cookie); +} + +static int cxlflash_afu_reset(void *ctx_cookie) +{ + return cxl_afu_reset(ctx_cookie); +} + +static void cxlflash_set_master(void *ctx_cookie) +{ + cxl_set_master(ctx_cookie); +} + +static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie) +{ + return cxl_get_context(dev); +} + +static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie) +{ + return cxl_dev_context_init(dev); +} + +static int cxlflash_release_context(void *ctx_cookie) +{ + return cxl_release_context(ctx_cookie); +} + +static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image) +{ + cxl_perst_reloads_same_image(afu_cookie, image); +} + +static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev, + void *buf, size_t count) +{ + return cxl_read_adapter_vpd(dev, buf, count); +} + +static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num) +{ + return cxl_allocate_afu_irqs(ctx_cookie, num); +} + +static void cxlflash_free_afu_irqs(void *ctx_cookie) +{ + cxl_free_afu_irqs(ctx_cookie); +} + +static void *cxlflash_create_afu(struct pci_dev *dev) +{ + return cxl_pci_to_afu(dev); +} + +static void cxlflash_destroy_afu(void *afu) +{ + /* Dummy fop for cxl */ +} + +static struct file *cxlflash_get_fd(void *ctx_cookie, + struct file_operations *fops, int *fd) +{ + return cxl_get_fd(ctx_cookie, fops, fd); +} + +static void *cxlflash_fops_get_context(struct file *file) +{ + return cxl_fops_get_context(file); +} + +static int cxlflash_start_work(void *ctx_cookie, u64 irqs) +{ + struct cxl_ioctl_start_work work = { 0 }; + + work.num_interrupts = irqs; + work.flags = CXL_START_WORK_NUM_IRQS; + + return cxl_start_work(ctx_cookie, &work); +} + +static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm) +{ + return cxl_fd_mmap(file, vm); +} + +static int cxlflash_fd_release(struct inode *inode, struct file *file) +{ + return cxl_fd_release(inode, file); +} + +const struct cxlflash_backend_ops cxlflash_cxl_ops = { + .module = THIS_MODULE, + .psa_map = cxlflash_psa_map, + .psa_unmap = cxlflash_psa_unmap, + .process_element = cxlflash_process_element, + .map_afu_irq = cxlflash_map_afu_irq, + .unmap_afu_irq = cxlflash_unmap_afu_irq, + .get_irq_objhndl = cxlflash_get_irq_objhndl, + .start_context = cxlflash_start_context, + .stop_context = cxlflash_stop_context, + .afu_reset = cxlflash_afu_reset, + .set_master = cxlflash_set_master, + .get_context = cxlflash_get_context, + .dev_context_init = cxlflash_dev_context_init, + .release_context = cxlflash_release_context, + .perst_reloads_same_image = cxlflash_perst_reloads_same_image, + .read_adapter_vpd = cxlflash_read_adapter_vpd, + .allocate_afu_irqs = cxlflash_allocate_afu_irqs, + .free_afu_irqs = cxlflash_free_afu_irqs, + .create_afu = cxlflash_create_afu, + .destroy_afu = cxlflash_destroy_afu, + .get_fd = cxlflash_get_fd, + .fops_get_context = cxlflash_fops_get_context, + .start_work = cxlflash_start_work, + .fd_mmap = cxlflash_fd_mmap, + .fd_release = cxlflash_fd_release, +}; diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c new file mode 100644 index 000000000..e0e15b44a --- /dev/null +++ b/drivers/scsi/cxlflash/lunmgt.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#include + +#include +#include + +#include +#include + +#include "sislite.h" +#include "common.h" +#include "vlun.h" +#include "superpipe.h" + +/** + * create_local() - allocate and initialize a local LUN information structure + * @sdev: SCSI device associated with LUN. + * @wwid: World Wide Node Name for LUN. + * + * Return: Allocated local llun_info structure on success, NULL on failure + */ +static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = NULL; + + lli = kzalloc(sizeof(*lli), GFP_KERNEL); + if (unlikely(!lli)) { + dev_err(dev, "%s: could not allocate lli\n", __func__); + goto out; + } + + lli->sdev = sdev; + lli->host_no = sdev->host->host_no; + lli->in_table = false; + + memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN); +out: + return lli; +} + +/** + * create_global() - allocate and initialize a global LUN information structure + * @sdev: SCSI device associated with LUN. + * @wwid: World Wide Node Name for LUN. + * + * Return: Allocated global glun_info structure on success, NULL on failure + */ +static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct glun_info *gli = NULL; + + gli = kzalloc(sizeof(*gli), GFP_KERNEL); + if (unlikely(!gli)) { + dev_err(dev, "%s: could not allocate gli\n", __func__); + goto out; + } + + mutex_init(&gli->mutex); + memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN); +out: + return gli; +} + +/** + * lookup_local() - find a local LUN information structure by WWID + * @cfg: Internal structure associated with the host. + * @wwid: WWID associated with LUN. + * + * Return: Found local lun_info structure on success, NULL on failure + */ +static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid) +{ + struct llun_info *lli, *temp; + + list_for_each_entry_safe(lli, temp, &cfg->lluns, list) + if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) + return lli; + + return NULL; +} + +/** + * lookup_global() - find a global LUN information structure by WWID + * @wwid: WWID associated with LUN. + * + * Return: Found global lun_info structure on success, NULL on failure + */ +static struct glun_info *lookup_global(u8 *wwid) +{ + struct glun_info *gli, *temp; + + list_for_each_entry_safe(gli, temp, &global.gluns, list) + if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) + return gli; + + return NULL; +} + +/** + * find_and_create_lun() - find or create a local LUN information structure + * @sdev: SCSI device associated with LUN. + * @wwid: WWID associated with LUN. + * + * The LUN is kept both in a local list (per adapter) and in a global list + * (across all adapters). Certain attributes of the LUN are local to the + * adapter (such as index, port selection mask, etc.). + * + * The block allocation map is shared across all adapters (i.e. associated + * wih the global list). Since different attributes are associated with + * the per adapter and global entries, allocate two separate structures for each + * LUN (one local, one global). + * + * Keep a pointer back from the local to the global entry. + * + * This routine assumes the caller holds the global mutex. + * + * Return: Found/Allocated local lun_info structure on success, NULL on failure + */ +static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = NULL; + struct glun_info *gli = NULL; + + if (unlikely(!wwid)) + goto out; + + lli = lookup_local(cfg, wwid); + if (lli) + goto out; + + lli = create_local(sdev, wwid); + if (unlikely(!lli)) + goto out; + + gli = lookup_global(wwid); + if (gli) { + lli->parent = gli; + list_add(&lli->list, &cfg->lluns); + goto out; + } + + gli = create_global(sdev, wwid); + if (unlikely(!gli)) { + kfree(lli); + lli = NULL; + goto out; + } + + lli->parent = gli; + list_add(&lli->list, &cfg->lluns); + + list_add(&gli->list, &global.gluns); + +out: + dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli); + return lli; +} + +/** + * cxlflash_term_local_luns() - Delete all entries from local LUN list, free. + * @cfg: Internal structure associated with the host. + */ +void cxlflash_term_local_luns(struct cxlflash_cfg *cfg) +{ + struct llun_info *lli, *temp; + + mutex_lock(&global.mutex); + list_for_each_entry_safe(lli, temp, &cfg->lluns, list) { + list_del(&lli->list); + kfree(lli); + } + mutex_unlock(&global.mutex); +} + +/** + * cxlflash_list_init() - initializes the global LUN list + */ +void cxlflash_list_init(void) +{ + INIT_LIST_HEAD(&global.gluns); + mutex_init(&global.mutex); + global.err_page = NULL; +} + +/** + * cxlflash_term_global_luns() - frees resources associated with global LUN list + */ +void cxlflash_term_global_luns(void) +{ + struct glun_info *gli, *temp; + + mutex_lock(&global.mutex); + list_for_each_entry_safe(gli, temp, &global.gluns, list) { + list_del(&gli->list); + cxlflash_ba_terminate(&gli->blka.ba_lun); + kfree(gli); + } + mutex_unlock(&global.mutex); +} + +/** + * cxlflash_manage_lun() - handles LUN management activities + * @sdev: SCSI device associated with LUN. + * @manage: Manage ioctl data structure. + * + * This routine is used to notify the driver about a LUN's WWID and associate + * SCSI devices (sdev) with a global LUN instance. Additionally it serves to + * change a LUN's operating mode: legacy or superpipe. + * + * Return: 0 on success, -errno on failure + */ +int cxlflash_manage_lun(struct scsi_device *sdev, + struct dk_cxlflash_manage_lun *manage) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = NULL; + int rc = 0; + u64 flags = manage->hdr.flags; + u32 chan = sdev->channel; + + mutex_lock(&global.mutex); + lli = find_and_create_lun(sdev, manage->wwid); + dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n", + __func__, get_unaligned_be64(&manage->wwid[0]), + get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli); + if (unlikely(!lli)) { + rc = -ENOMEM; + goto out; + } + + if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) { + /* + * Update port selection mask based upon channel, store off LUN + * in unpacked, AFU-friendly format, and hang LUN reference in + * the sdev. + */ + lli->port_sel |= CHAN2PORTMASK(chan); + lli->lun_id[chan] = lun_to_lunid(sdev->lun); + sdev->hostdata = lli; + } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) { + if (lli->parent->mode != MODE_NONE) + rc = -EBUSY; + else { + /* + * Clean up local LUN for this port and reset table + * tracking when no more references exist. + */ + sdev->hostdata = NULL; + lli->port_sel &= ~CHAN2PORTMASK(chan); + if (lli->port_sel == 0U) + lli->in_table = false; + } + } + + dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n", + __func__, lli->port_sel, chan, lli->lun_id[chan]); + +out: + mutex_unlock(&global.mutex); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c new file mode 100644 index 000000000..debd36974 --- /dev/null +++ b/drivers/scsi/cxlflash/main.c @@ -0,0 +1,3967 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "main.h" +#include "sislite.h" +#include "common.h" + +MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); +MODULE_AUTHOR("Manoj N. Kumar "); +MODULE_AUTHOR("Matthew R. Ochs "); +MODULE_LICENSE("GPL"); + +static struct class *cxlflash_class; +static u32 cxlflash_major; +static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); + +/** + * process_cmd_err() - command error handler + * @cmd: AFU command that experienced the error. + * @scp: SCSI command associated with the AFU command in error. + * + * Translates error bits from AFU command to SCSI command results. + */ +static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) +{ + struct afu *afu = cmd->parent; + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct sisl_ioasa *ioasa; + u32 resid; + + ioasa = &(cmd->sa); + + if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { + resid = ioasa->resid; + scsi_set_resid(scp, resid); + dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", + __func__, cmd, scp, resid); + } + + if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { + dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", + __func__, cmd, scp); + scp->result = (DID_ERROR << 16); + } + + dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " + "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, + ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, + ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); + + if (ioasa->rc.scsi_rc) { + /* We have a SCSI status */ + if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { + memcpy(scp->sense_buffer, ioasa->sense_data, + SISL_SENSE_DATA_LEN); + scp->result = ioasa->rc.scsi_rc; + } else + scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); + } + + /* + * We encountered an error. Set scp->result based on nature + * of error. + */ + if (ioasa->rc.fc_rc) { + /* We have an FC status */ + switch (ioasa->rc.fc_rc) { + case SISL_FC_RC_LINKDOWN: + scp->result = (DID_REQUEUE << 16); + break; + case SISL_FC_RC_RESID: + /* This indicates an FCP resid underrun */ + if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { + /* If the SISL_RC_FLAGS_OVERRUN flag was set, + * then we will handle this error else where. + * If not then we must handle it here. + * This is probably an AFU bug. + */ + scp->result = (DID_ERROR << 16); + } + break; + case SISL_FC_RC_RESIDERR: + /* Resid mismatch between adapter and device */ + case SISL_FC_RC_TGTABORT: + case SISL_FC_RC_ABORTOK: + case SISL_FC_RC_ABORTFAIL: + case SISL_FC_RC_NOLOGI: + case SISL_FC_RC_ABORTPEND: + case SISL_FC_RC_WRABORTPEND: + case SISL_FC_RC_NOEXP: + case SISL_FC_RC_INUSE: + scp->result = (DID_ERROR << 16); + break; + } + } + + if (ioasa->rc.afu_rc) { + /* We have an AFU error */ + switch (ioasa->rc.afu_rc) { + case SISL_AFU_RC_NO_CHANNELS: + scp->result = (DID_NO_CONNECT << 16); + break; + case SISL_AFU_RC_DATA_DMA_ERR: + switch (ioasa->afu_extra) { + case SISL_AFU_DMA_ERR_PAGE_IN: + /* Retry */ + scp->result = (DID_IMM_RETRY << 16); + break; + case SISL_AFU_DMA_ERR_INVALID_EA: + default: + scp->result = (DID_ERROR << 16); + } + break; + case SISL_AFU_RC_OUT_OF_DATA_BUFS: + /* Retry */ + scp->result = (DID_ERROR << 16); + break; + default: + scp->result = (DID_ERROR << 16); + } + } +} + +/** + * cmd_complete() - command completion handler + * @cmd: AFU command that has completed. + * + * For SCSI commands this routine prepares and submits commands that have + * either completed or timed out to the SCSI stack. For internal commands + * (TMF or AFU), this routine simply notifies the originator that the + * command has completed. + */ +static void cmd_complete(struct afu_cmd *cmd) +{ + struct scsi_cmnd *scp; + ulong lock_flags; + struct afu *afu = cmd->parent; + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); + + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + list_del(&cmd->list); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); + + if (cmd->scp) { + scp = cmd->scp; + if (unlikely(cmd->sa.ioasc)) + process_cmd_err(cmd, scp); + else + scp->result = (DID_OK << 16); + + dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", + __func__, scp, scp->result, cmd->sa.ioasc); + scsi_done(scp); + } else if (cmd->cmd_tmf) { + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + cfg->tmf_active = false; + wake_up_all_locked(&cfg->tmf_waitq); + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + } else + complete(&cmd->cevent); +} + +/** + * flush_pending_cmds() - flush all pending commands on this hardware queue + * @hwq: Hardware queue to flush. + * + * The hardware send queue lock associated with this hardware queue must be + * held when calling this routine. + */ +static void flush_pending_cmds(struct hwq *hwq) +{ + struct cxlflash_cfg *cfg = hwq->afu->parent; + struct afu_cmd *cmd, *tmp; + struct scsi_cmnd *scp; + ulong lock_flags; + + list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { + /* Bypass command when on a doneq, cmd_complete() will handle */ + if (!list_empty(&cmd->queue)) + continue; + + list_del(&cmd->list); + + if (cmd->scp) { + scp = cmd->scp; + scp->result = (DID_IMM_RETRY << 16); + scsi_done(scp); + } else { + cmd->cmd_aborted = true; + + if (cmd->cmd_tmf) { + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + cfg->tmf_active = false; + wake_up_all_locked(&cfg->tmf_waitq); + spin_unlock_irqrestore(&cfg->tmf_slock, + lock_flags); + } else + complete(&cmd->cevent); + } + } +} + +/** + * context_reset() - reset context via specified register + * @hwq: Hardware queue owning the context to be reset. + * @reset_reg: MMIO register to perform reset. + * + * When the reset is successful, the SISLite specification guarantees that + * the AFU has aborted all currently pending I/O. Accordingly, these commands + * must be flushed. + * + * Return: 0 on success, -errno on failure + */ +static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) +{ + struct cxlflash_cfg *cfg = hwq->afu->parent; + struct device *dev = &cfg->dev->dev; + int rc = -ETIMEDOUT; + int nretry = 0; + u64 val = 0x1; + ulong lock_flags; + + dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); + + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + + writeq_be(val, reset_reg); + do { + val = readq_be(reset_reg); + if ((val & 0x1) == 0x0) { + rc = 0; + break; + } + + /* Double delay each time */ + udelay(1 << nretry); + } while (nretry++ < MC_ROOM_RETRY_CNT); + + if (!rc) + flush_pending_cmds(hwq); + + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); + + dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n", + __func__, rc, val, nretry); + return rc; +} + +/** + * context_reset_ioarrin() - reset context via IOARRIN register + * @hwq: Hardware queue owning the context to be reset. + * + * Return: 0 on success, -errno on failure + */ +static int context_reset_ioarrin(struct hwq *hwq) +{ + return context_reset(hwq, &hwq->host_map->ioarrin); +} + +/** + * context_reset_sq() - reset context via SQ_CONTEXT_RESET register + * @hwq: Hardware queue owning the context to be reset. + * + * Return: 0 on success, -errno on failure + */ +static int context_reset_sq(struct hwq *hwq) +{ + return context_reset(hwq, &hwq->host_map->sq_ctx_reset); +} + +/** + * send_cmd_ioarrin() - sends an AFU command via IOARRIN register + * @afu: AFU associated with the host. + * @cmd: AFU command to send. + * + * Return: + * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure + */ +static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); + int rc = 0; + s64 room; + ulong lock_flags; + + /* + * To avoid the performance penalty of MMIO, spread the update of + * 'room' over multiple commands. + */ + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + if (--hwq->room < 0) { + room = readq_be(&hwq->host_map->cmd_room); + if (room <= 0) { + dev_dbg_ratelimited(dev, "%s: no cmd_room to send " + "0x%02X, room=0x%016llX\n", + __func__, cmd->rcb.cdb[0], room); + hwq->room = 0; + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + hwq->room = room - 1; + } + + list_add(&cmd->list, &hwq->pending_cmds); + writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); +out: + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); + dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", + __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); + return rc; +} + +/** + * send_cmd_sq() - sends an AFU command via SQ ring + * @afu: AFU associated with the host. + * @cmd: AFU command to send. + * + * Return: + * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure + */ +static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); + int rc = 0; + int newval; + ulong lock_flags; + + newval = atomic_dec_if_positive(&hwq->hsq_credits); + if (newval <= 0) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + cmd->rcb.ioasa = &cmd->sa; + + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + + *hwq->hsq_curr = cmd->rcb; + if (hwq->hsq_curr < hwq->hsq_end) + hwq->hsq_curr++; + else + hwq->hsq_curr = hwq->hsq_start; + + list_add(&cmd->list, &hwq->pending_cmds); + writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); + + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); +out: + dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " + "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, + cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, + readq_be(&hwq->host_map->sq_head), + readq_be(&hwq->host_map->sq_tail)); + return rc; +} + +/** + * wait_resp() - polls for a response or timeout to a sent AFU command + * @afu: AFU associated with the host. + * @cmd: AFU command that was sent. + * + * Return: 0 on success, -errno on failure + */ +static int wait_resp(struct afu *afu, struct afu_cmd *cmd) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + int rc = 0; + ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); + + timeout = wait_for_completion_timeout(&cmd->cevent, timeout); + if (!timeout) + rc = -ETIMEDOUT; + + if (cmd->cmd_aborted) + rc = -EAGAIN; + + if (unlikely(cmd->sa.ioasc != 0)) { + dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", + __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); + rc = -EIO; + } + + return rc; +} + +/** + * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command + * @host: SCSI host associated with device. + * @scp: SCSI command to send. + * @afu: SCSI command to send. + * + * Hashes a command based upon the hardware queue mode. + * + * Return: Trusted index of target hardware queue + */ +static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, + struct afu *afu) +{ + u32 tag; + u32 hwq = 0; + + if (afu->num_hwqs == 1) + return 0; + + switch (afu->hwq_mode) { + case HWQ_MODE_RR: + hwq = afu->hwq_rr_count++ % afu->num_hwqs; + break; + case HWQ_MODE_TAG: + tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp)); + hwq = blk_mq_unique_tag_to_hwq(tag); + break; + case HWQ_MODE_CPU: + hwq = smp_processor_id() % afu->num_hwqs; + break; + default: + WARN_ON_ONCE(1); + } + + return hwq; +} + +/** + * send_tmf() - sends a Task Management Function (TMF) + * @cfg: Internal structure associated with the host. + * @sdev: SCSI device destined for TMF. + * @tmfcmd: TMF command to send. + * + * Return: + * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure + */ +static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev, + u64 tmfcmd) +{ + struct afu *afu = cfg->afu; + struct afu_cmd *cmd = NULL; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); + bool needs_deletion = false; + char *buf = NULL; + ulong lock_flags; + int rc = 0; + ulong to; + + buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); + if (unlikely(!buf)) { + dev_err(dev, "%s: no memory for command\n", __func__); + rc = -ENOMEM; + goto out; + } + + cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); + INIT_LIST_HEAD(&cmd->queue); + + /* When Task Management Function is active do not send another */ + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + if (cfg->tmf_active) + wait_event_interruptible_lock_irq(cfg->tmf_waitq, + !cfg->tmf_active, + cfg->tmf_slock); + cfg->tmf_active = true; + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + + cmd->parent = afu; + cmd->cmd_tmf = true; + cmd->hwq_index = hwq->index; + + cmd->rcb.ctx_id = hwq->ctx_hndl; + cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; + cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel); + cmd->rcb.lun_id = lun_to_lunid(sdev->lun); + cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | + SISL_REQ_FLAGS_SUP_UNDERRUN | + SISL_REQ_FLAGS_TMF_CMD); + memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); + + rc = afu->send_cmd(afu, cmd); + if (unlikely(rc)) { + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + cfg->tmf_active = false; + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + goto out; + } + + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + to = msecs_to_jiffies(5000); + to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, + !cfg->tmf_active, + cfg->tmf_slock, + to); + if (!to) { + dev_err(dev, "%s: TMF timed out\n", __func__); + rc = -ETIMEDOUT; + needs_deletion = true; + } else if (cmd->cmd_aborted) { + dev_err(dev, "%s: TMF aborted\n", __func__); + rc = -EAGAIN; + } else if (cmd->sa.ioasc) { + dev_err(dev, "%s: TMF failed ioasc=%08x\n", + __func__, cmd->sa.ioasc); + rc = -EIO; + } + cfg->tmf_active = false; + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + + if (needs_deletion) { + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + list_del(&cmd->list); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); + } +out: + kfree(buf); + return rc; +} + +/** + * cxlflash_driver_info() - information handler for this host driver + * @host: SCSI host associated with device. + * + * Return: A string describing the device. + */ +static const char *cxlflash_driver_info(struct Scsi_Host *host) +{ + return CXLFLASH_ADAPTER_NAME; +} + +/** + * cxlflash_queuecommand() - sends a mid-layer request + * @host: SCSI host associated with device. + * @scp: SCSI command to send. + * + * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure + */ +static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) +{ + struct cxlflash_cfg *cfg = shost_priv(host); + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct afu_cmd *cmd = sc_to_afuci(scp); + struct scatterlist *sg = scsi_sglist(scp); + int hwq_index = cmd_to_target_hwq(host, scp, afu); + struct hwq *hwq = get_hwq(afu, hwq_index); + u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; + ulong lock_flags; + int rc = 0; + + dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " + "cdb=(%08x-%08x-%08x-%08x)\n", + __func__, scp, host->host_no, scp->device->channel, + scp->device->id, scp->device->lun, + get_unaligned_be32(&((u32 *)scp->cmnd)[0]), + get_unaligned_be32(&((u32 *)scp->cmnd)[1]), + get_unaligned_be32(&((u32 *)scp->cmnd)[2]), + get_unaligned_be32(&((u32 *)scp->cmnd)[3])); + + /* + * If a Task Management Function is active, wait for it to complete + * before continuing with regular commands. + */ + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + if (cfg->tmf_active) { + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + + switch (cfg->state) { + case STATE_PROBING: + case STATE_PROBED: + case STATE_RESET: + dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + case STATE_FAILTERM: + dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); + scp->result = (DID_NO_CONNECT << 16); + scsi_done(scp); + rc = 0; + goto out; + default: + atomic_inc(&afu->cmds_active); + break; + } + + if (likely(sg)) { + cmd->rcb.data_len = sg->length; + cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); + } + + cmd->scp = scp; + cmd->parent = afu; + cmd->hwq_index = hwq_index; + + cmd->sa.ioasc = 0; + cmd->rcb.ctx_id = hwq->ctx_hndl; + cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; + cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); + cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); + + if (scp->sc_data_direction == DMA_TO_DEVICE) + req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + + cmd->rcb.req_flags = req_flags; + memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); + + rc = afu->send_cmd(afu, cmd); + atomic_dec(&afu->cmds_active); +out: + return rc; +} + +/** + * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe + * @cfg: Internal structure associated with the host. + */ +static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) +{ + struct pci_dev *pdev = cfg->dev; + + if (pci_channel_offline(pdev)) + wait_event_timeout(cfg->reset_waitq, + !pci_channel_offline(pdev), + CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); +} + +/** + * free_mem() - free memory associated with the AFU + * @cfg: Internal structure associated with the host. + */ +static void free_mem(struct cxlflash_cfg *cfg) +{ + struct afu *afu = cfg->afu; + + if (cfg->afu) { + free_pages((ulong)afu, get_order(sizeof(struct afu))); + cfg->afu = NULL; + } +} + +/** + * cxlflash_reset_sync() - synchronizing point for asynchronous resets + * @cfg: Internal structure associated with the host. + */ +static void cxlflash_reset_sync(struct cxlflash_cfg *cfg) +{ + if (cfg->async_reset_cookie == 0) + return; + + /* Wait until all async calls prior to this cookie have completed */ + async_synchronize_cookie(cfg->async_reset_cookie + 1); + cfg->async_reset_cookie = 0; +} + +/** + * stop_afu() - stops the AFU command timers and unmaps the MMIO space + * @cfg: Internal structure associated with the host. + * + * Safe to call with AFU in a partially allocated/initialized state. + * + * Cancels scheduled worker threads, waits for any active internal AFU + * commands to timeout, disables IRQ polling and then unmaps the MMIO space. + */ +static void stop_afu(struct cxlflash_cfg *cfg) +{ + struct afu *afu = cfg->afu; + struct hwq *hwq; + int i; + + cancel_work_sync(&cfg->work_q); + if (!current_is_async()) + cxlflash_reset_sync(cfg); + + if (likely(afu)) { + while (atomic_read(&afu->cmds_active)) + ssleep(1); + + if (afu_is_irqpoll_enabled(afu)) { + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + irq_poll_disable(&hwq->irqpoll); + } + } + + if (likely(afu->afu_map)) { + cfg->ops->psa_unmap(afu->afu_map); + afu->afu_map = NULL; + } + } +} + +/** + * term_intr() - disables all AFU interrupts + * @cfg: Internal structure associated with the host. + * @level: Depth of allocation, where to begin waterfall tear down. + * @index: Index of the hardware queue. + * + * Safe to call with AFU/MC in partially allocated/initialized state. + */ +static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, + u32 index) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq; + + if (!afu) { + dev_err(dev, "%s: returning with NULL afu\n", __func__); + return; + } + + hwq = get_hwq(afu, index); + + if (!hwq->ctx_cookie) { + dev_err(dev, "%s: returning with NULL MC\n", __func__); + return; + } + + switch (level) { + case UNMAP_THREE: + /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ + if (index == PRIMARY_HWQ) + cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); + fallthrough; + case UNMAP_TWO: + cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); + fallthrough; + case UNMAP_ONE: + cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); + fallthrough; + case FREE_IRQ: + cfg->ops->free_afu_irqs(hwq->ctx_cookie); + fallthrough; + case UNDO_NOOP: + /* No action required */ + break; + } +} + +/** + * term_mc() - terminates the master context + * @cfg: Internal structure associated with the host. + * @index: Index of the hardware queue. + * + * Safe to call with AFU/MC in partially allocated/initialized state. + */ +static void term_mc(struct cxlflash_cfg *cfg, u32 index) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq; + ulong lock_flags; + + if (!afu) { + dev_err(dev, "%s: returning with NULL afu\n", __func__); + return; + } + + hwq = get_hwq(afu, index); + + if (!hwq->ctx_cookie) { + dev_err(dev, "%s: returning with NULL MC\n", __func__); + return; + } + + WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie)); + if (index != PRIMARY_HWQ) + WARN_ON(cfg->ops->release_context(hwq->ctx_cookie)); + hwq->ctx_cookie = NULL; + + spin_lock_irqsave(&hwq->hrrq_slock, lock_flags); + hwq->hrrq_online = false; + spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags); + + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + flush_pending_cmds(hwq); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); +} + +/** + * term_afu() - terminates the AFU + * @cfg: Internal structure associated with the host. + * + * Safe to call with AFU/MC in partially allocated/initialized state. + */ +static void term_afu(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + int k; + + /* + * Tear down is carefully orchestrated to ensure + * no interrupts can come in when the problem state + * area is unmapped. + * + * 1) Disable all AFU interrupts for each master + * 2) Unmap the problem state area + * 3) Stop each master context + */ + for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) + term_intr(cfg, UNMAP_THREE, k); + + stop_afu(cfg); + + for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) + term_mc(cfg, k); + + dev_dbg(dev, "%s: returning\n", __func__); +} + +/** + * notify_shutdown() - notifies device of pending shutdown + * @cfg: Internal structure associated with the host. + * @wait: Whether to wait for shutdown processing to complete. + * + * This function will notify the AFU that the adapter is being shutdown + * and will wait for shutdown processing to complete if wait is true. + * This notification should flush pending I/Os to the device and halt + * further I/Os until the next AFU reset is issued and device restarted. + */ +static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct dev_dependent_vals *ddv; + __be64 __iomem *fc_port_regs; + u64 reg, status; + int i, retry_cnt = 0; + + ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; + if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) + return; + + if (!afu || !afu->afu_map) { + dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); + return; + } + + /* Notify AFU */ + for (i = 0; i < cfg->num_fc_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); + + reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); + reg |= SISL_FC_SHUTDOWN_NORMAL; + writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); + } + + if (!wait) + return; + + /* Wait up to 1.5 seconds for shutdown processing to complete */ + for (i = 0; i < cfg->num_fc_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); + retry_cnt = 0; + + while (true) { + status = readq_be(&fc_port_regs[FC_STATUS / 8]); + if (status & SISL_STATUS_SHUTDOWN_COMPLETE) + break; + if (++retry_cnt >= MC_RETRY_CNT) { + dev_dbg(dev, "%s: port %d shutdown processing " + "not yet completed\n", __func__, i); + break; + } + msleep(100 * retry_cnt); + } + } +} + +/** + * cxlflash_get_minor() - gets the first available minor number + * + * Return: Unique minor number that can be used to create the character device. + */ +static int cxlflash_get_minor(void) +{ + int minor; + long bit; + + bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); + if (bit >= CXLFLASH_MAX_ADAPTERS) + return -1; + + minor = bit & MINORMASK; + set_bit(minor, cxlflash_minor); + return minor; +} + +/** + * cxlflash_put_minor() - releases the minor number + * @minor: Minor number that is no longer needed. + */ +static void cxlflash_put_minor(int minor) +{ + clear_bit(minor, cxlflash_minor); +} + +/** + * cxlflash_release_chrdev() - release the character device for the host + * @cfg: Internal structure associated with the host. + */ +static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg) +{ + device_unregister(cfg->chardev); + cfg->chardev = NULL; + cdev_del(&cfg->cdev); + cxlflash_put_minor(MINOR(cfg->cdev.dev)); +} + +/** + * cxlflash_remove() - PCI entry point to tear down host + * @pdev: PCI device associated with the host. + * + * Safe to use as a cleanup in partially allocated/initialized state. Note that + * the reset_waitq is flushed as part of the stop/termination of user contexts. + */ +static void cxlflash_remove(struct pci_dev *pdev) +{ + struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); + struct device *dev = &pdev->dev; + ulong lock_flags; + + if (!pci_is_enabled(pdev)) { + dev_dbg(dev, "%s: Device is disabled\n", __func__); + return; + } + + /* Yield to running recovery threads before continuing with remove */ + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && + cfg->state != STATE_PROBING); + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + if (cfg->tmf_active) + wait_event_interruptible_lock_irq(cfg->tmf_waitq, + !cfg->tmf_active, + cfg->tmf_slock); + spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + + /* Notify AFU and wait for shutdown processing to complete */ + notify_shutdown(cfg, true); + + cfg->state = STATE_FAILTERM; + cxlflash_stop_term_user_contexts(cfg); + + switch (cfg->init_state) { + case INIT_STATE_CDEV: + cxlflash_release_chrdev(cfg); + fallthrough; + case INIT_STATE_SCSI: + cxlflash_term_local_luns(cfg); + scsi_remove_host(cfg->host); + fallthrough; + case INIT_STATE_AFU: + term_afu(cfg); + fallthrough; + case INIT_STATE_PCI: + cfg->ops->destroy_afu(cfg->afu_cookie); + pci_disable_device(pdev); + fallthrough; + case INIT_STATE_NONE: + free_mem(cfg); + scsi_host_put(cfg->host); + break; + } + + dev_dbg(dev, "%s: returning\n", __func__); +} + +/** + * alloc_mem() - allocates the AFU and its command pool + * @cfg: Internal structure associated with the host. + * + * A partially allocated state remains on failure. + * + * Return: + * 0 on success + * -ENOMEM on failure to allocate memory + */ +static int alloc_mem(struct cxlflash_cfg *cfg) +{ + int rc = 0; + struct device *dev = &cfg->dev->dev; + + /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ + cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(sizeof(struct afu))); + if (unlikely(!cfg->afu)) { + dev_err(dev, "%s: cannot get %d free pages\n", + __func__, get_order(sizeof(struct afu))); + rc = -ENOMEM; + goto out; + } + cfg->afu->parent = cfg; + cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; + cfg->afu->afu_map = NULL; +out: + return rc; +} + +/** + * init_pci() - initializes the host as a PCI device + * @cfg: Internal structure associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int init_pci(struct cxlflash_cfg *cfg) +{ + struct pci_dev *pdev = cfg->dev; + struct device *dev = &cfg->dev->dev; + int rc = 0; + + rc = pci_enable_device(pdev); + if (rc || pci_channel_offline(pdev)) { + if (pci_channel_offline(pdev)) { + cxlflash_wait_for_pci_err_recovery(cfg); + rc = pci_enable_device(pdev); + } + + if (rc) { + dev_err(dev, "%s: Cannot enable adapter\n", __func__); + cxlflash_wait_for_pci_err_recovery(cfg); + goto out; + } + } + +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * init_scsi() - adds the host to the SCSI stack and kicks off host scan + * @cfg: Internal structure associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int init_scsi(struct cxlflash_cfg *cfg) +{ + struct pci_dev *pdev = cfg->dev; + struct device *dev = &cfg->dev->dev; + int rc = 0; + + rc = scsi_add_host(cfg->host, &pdev->dev); + if (rc) { + dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); + goto out; + } + + scsi_scan_host(cfg->host); + +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * set_port_online() - transitions the specified host FC port to online state + * @fc_regs: Top of MMIO region defined for specified port. + * + * The provided MMIO region must be mapped prior to call. Online state means + * that the FC link layer has synced, completed the handshaking process, and + * is ready for login to start. + */ +static void set_port_online(__be64 __iomem *fc_regs) +{ + u64 cmdcfg; + + cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); + cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ + cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ + writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); +} + +/** + * set_port_offline() - transitions the specified host FC port to offline state + * @fc_regs: Top of MMIO region defined for specified port. + * + * The provided MMIO region must be mapped prior to call. + */ +static void set_port_offline(__be64 __iomem *fc_regs) +{ + u64 cmdcfg; + + cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); + cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ + cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ + writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); +} + +/** + * wait_port_online() - waits for the specified host FC port come online + * @fc_regs: Top of MMIO region defined for specified port. + * @delay_us: Number of microseconds to delay between reading port status. + * @nretry: Number of cycles to retry reading port status. + * + * The provided MMIO region must be mapped prior to call. This will timeout + * when the cable is not plugged in. + * + * Return: + * TRUE (1) when the specified port is online + * FALSE (0) when the specified port fails to come online after timeout + */ +static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) +{ + u64 status; + + WARN_ON(delay_us < 1000); + + do { + msleep(delay_us / 1000); + status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); + if (status == U64_MAX) + nretry /= 2; + } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && + nretry--); + + return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); +} + +/** + * wait_port_offline() - waits for the specified host FC port go offline + * @fc_regs: Top of MMIO region defined for specified port. + * @delay_us: Number of microseconds to delay between reading port status. + * @nretry: Number of cycles to retry reading port status. + * + * The provided MMIO region must be mapped prior to call. + * + * Return: + * TRUE (1) when the specified port is offline + * FALSE (0) when the specified port fails to go offline after timeout + */ +static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) +{ + u64 status; + + WARN_ON(delay_us < 1000); + + do { + msleep(delay_us / 1000); + status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); + if (status == U64_MAX) + nretry /= 2; + } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && + nretry--); + + return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); +} + +/** + * afu_set_wwpn() - configures the WWPN for the specified host FC port + * @afu: AFU associated with the host that owns the specified FC port. + * @port: Port number being configured. + * @fc_regs: Top of MMIO region defined for specified port. + * @wwpn: The world-wide-port-number previously discovered for port. + * + * The provided MMIO region must be mapped prior to call. As part of the + * sequence to configure the WWPN, the port is toggled offline and then back + * online. This toggling action can cause this routine to delay up to a few + * seconds. When configured to use the internal LUN feature of the AFU, a + * failure to come online is overridden. + */ +static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, + u64 wwpn) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + + set_port_offline(fc_regs); + if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) { + dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", + __func__, port); + } + + writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); + + set_port_online(fc_regs); + if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) { + dev_dbg(dev, "%s: wait on port %d to go online timed out\n", + __func__, port); + } +} + +/** + * afu_link_reset() - resets the specified host FC port + * @afu: AFU associated with the host that owns the specified FC port. + * @port: Port number being configured. + * @fc_regs: Top of MMIO region defined for specified port. + * + * The provided MMIO region must be mapped prior to call. The sequence to + * reset the port involves toggling it offline and then back online. This + * action can cause this routine to delay up to a few seconds. An effort + * is made to maintain link with the device by switching to host to use + * the alternate port exclusively while the reset takes place. + * failure to come online is overridden. + */ +static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + u64 port_sel; + + /* first switch the AFU to the other links, if any */ + port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); + port_sel &= ~(1ULL << port); + writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); + cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); + + set_port_offline(fc_regs); + if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) + dev_err(dev, "%s: wait on port %d to go offline timed out\n", + __func__, port); + + set_port_online(fc_regs); + if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, + FC_PORT_STATUS_RETRY_CNT)) + dev_err(dev, "%s: wait on port %d to go online timed out\n", + __func__, port); + + /* switch back to include this port */ + port_sel |= (1ULL << port); + writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); + cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); + + dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); +} + +/** + * afu_err_intr_init() - clears and initializes the AFU for error interrupts + * @afu: AFU associated with the host. + */ +static void afu_err_intr_init(struct afu *afu) +{ + struct cxlflash_cfg *cfg = afu->parent; + __be64 __iomem *fc_port_regs; + int i; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); + u64 reg; + + /* global async interrupts: AFU clears afu_ctrl on context exit + * if async interrupts were sent to that context. This prevents + * the AFU form sending further async interrupts when + * there is + * nobody to receive them. + */ + + /* mask all */ + writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); + /* set LISN# to send and point to primary master context */ + reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); + + if (afu->internal_lun) + reg |= 1; /* Bit 63 indicates local lun */ + writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); + /* clear all */ + writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); + /* unmask bits that are of interest */ + /* note: afu can send an interrupt after this step */ + writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); + /* clear again in case a bit came on after previous clear but before */ + /* unmask */ + writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); + + /* Clear/Set internal lun bits */ + fc_port_regs = get_fc_port_regs(cfg, 0); + reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); + reg &= SISL_FC_INTERNAL_MASK; + if (afu->internal_lun) + reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); + writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); + + /* now clear FC errors */ + for (i = 0; i < cfg->num_fc_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); + + writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); + writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); + } + + /* sync interrupts for master's IOARRIN write */ + /* note that unlike asyncs, there can be no pending sync interrupts */ + /* at this time (this is a fresh context and master has not written */ + /* IOARRIN yet), so there is nothing to clear. */ + + /* set LISN#, it is always sent to the context that wrote IOARRIN */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + reg = readq_be(&hwq->host_map->ctx_ctrl); + WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); + reg |= SISL_MSI_SYNC_ERROR; + writeq_be(reg, &hwq->host_map->ctx_ctrl); + writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); + } +} + +/** + * cxlflash_sync_err_irq() - interrupt handler for synchronous errors + * @irq: Interrupt number. + * @data: Private data provided at interrupt registration, the AFU. + * + * Return: Always return IRQ_HANDLED. + */ +static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) +{ + struct hwq *hwq = (struct hwq *)data; + struct cxlflash_cfg *cfg = hwq->afu->parent; + struct device *dev = &cfg->dev->dev; + u64 reg; + u64 reg_unmasked; + + reg = readq_be(&hwq->host_map->intr_status); + reg_unmasked = (reg & SISL_ISTATUS_UNMASK); + + if (reg_unmasked == 0UL) { + dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", + __func__, reg); + goto cxlflash_sync_err_irq_exit; + } + + dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", + __func__, reg); + + writeq_be(reg_unmasked, &hwq->host_map->intr_clear); + +cxlflash_sync_err_irq_exit: + return IRQ_HANDLED; +} + +/** + * process_hrrq() - process the read-response queue + * @hwq: HWQ associated with the host. + * @doneq: Queue of commands harvested from the RRQ. + * @budget: Threshold of RRQ entries to process. + * + * This routine must be called holding the disabled RRQ spin lock. + * + * Return: The number of entries processed. + */ +static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) +{ + struct afu *afu = hwq->afu; + struct afu_cmd *cmd; + struct sisl_ioasa *ioasa; + struct sisl_ioarcb *ioarcb; + bool toggle = hwq->toggle; + int num_hrrq = 0; + u64 entry, + *hrrq_start = hwq->hrrq_start, + *hrrq_end = hwq->hrrq_end, + *hrrq_curr = hwq->hrrq_curr; + + /* Process ready RRQ entries up to the specified budget (if any) */ + while (true) { + entry = *hrrq_curr; + + if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) + break; + + entry &= ~SISL_RESP_HANDLE_T_BIT; + + if (afu_is_sq_cmd_mode(afu)) { + ioasa = (struct sisl_ioasa *)entry; + cmd = container_of(ioasa, struct afu_cmd, sa); + } else { + ioarcb = (struct sisl_ioarcb *)entry; + cmd = container_of(ioarcb, struct afu_cmd, rcb); + } + + list_add_tail(&cmd->queue, doneq); + + /* Advance to next entry or wrap and flip the toggle bit */ + if (hrrq_curr < hrrq_end) + hrrq_curr++; + else { + hrrq_curr = hrrq_start; + toggle ^= SISL_RESP_HANDLE_T_BIT; + } + + atomic_inc(&hwq->hsq_credits); + num_hrrq++; + + if (budget > 0 && num_hrrq >= budget) + break; + } + + hwq->hrrq_curr = hrrq_curr; + hwq->toggle = toggle; + + return num_hrrq; +} + +/** + * process_cmd_doneq() - process a queue of harvested RRQ commands + * @doneq: Queue of completed commands. + * + * Note that upon return the queue can no longer be trusted. + */ +static void process_cmd_doneq(struct list_head *doneq) +{ + struct afu_cmd *cmd, *tmp; + + WARN_ON(list_empty(doneq)); + + list_for_each_entry_safe(cmd, tmp, doneq, queue) + cmd_complete(cmd); +} + +/** + * cxlflash_irqpoll() - process a queue of harvested RRQ commands + * @irqpoll: IRQ poll structure associated with queue to poll. + * @budget: Threshold of RRQ entries to process per poll. + * + * Return: The number of entries processed. + */ +static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); + unsigned long hrrq_flags; + LIST_HEAD(doneq); + int num_entries = 0; + + spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); + + num_entries = process_hrrq(hwq, &doneq, budget); + if (num_entries < budget) + irq_poll_complete(irqpoll); + + spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); + + process_cmd_doneq(&doneq); + return num_entries; +} + +/** + * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) + * @irq: Interrupt number. + * @data: Private data provided at interrupt registration, the AFU. + * + * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. + */ +static irqreturn_t cxlflash_rrq_irq(int irq, void *data) +{ + struct hwq *hwq = (struct hwq *)data; + struct afu *afu = hwq->afu; + unsigned long hrrq_flags; + LIST_HEAD(doneq); + int num_entries = 0; + + spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); + + /* Silently drop spurious interrupts when queue is not online */ + if (!hwq->hrrq_online) { + spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); + return IRQ_HANDLED; + } + + if (afu_is_irqpoll_enabled(afu)) { + irq_poll_sched(&hwq->irqpoll); + spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); + return IRQ_HANDLED; + } + + num_entries = process_hrrq(hwq, &doneq, -1); + spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); + + if (num_entries == 0) + return IRQ_NONE; + + process_cmd_doneq(&doneq); + return IRQ_HANDLED; +} + +/* + * Asynchronous interrupt information table + * + * NOTE: + * - Order matters here as this array is indexed by bit position. + * + * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro + * as complex and complains due to a lack of parentheses/braces. + */ +#define ASTATUS_FC(_a, _b, _c, _d) \ + { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } + +#define BUILD_SISL_ASTATUS_FC_PORT(_a) \ + ASTATUS_FC(_a, LINK_UP, "link up", 0), \ + ASTATUS_FC(_a, LINK_DN, "link down", 0), \ + ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ + ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ + ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ + ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ + ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ + ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) + +static const struct asyc_intr_info ainfo[] = { + BUILD_SISL_ASTATUS_FC_PORT(1), + BUILD_SISL_ASTATUS_FC_PORT(0), + BUILD_SISL_ASTATUS_FC_PORT(3), + BUILD_SISL_ASTATUS_FC_PORT(2) +}; + +/** + * cxlflash_async_err_irq() - interrupt handler for asynchronous errors + * @irq: Interrupt number. + * @data: Private data provided at interrupt registration, the AFU. + * + * Return: Always return IRQ_HANDLED. + */ +static irqreturn_t cxlflash_async_err_irq(int irq, void *data) +{ + struct hwq *hwq = (struct hwq *)data; + struct afu *afu = hwq->afu; + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + const struct asyc_intr_info *info; + struct sisl_global_map __iomem *global = &afu->afu_map->global; + __be64 __iomem *fc_port_regs; + u64 reg_unmasked; + u64 reg; + u64 bit; + u8 port; + + reg = readq_be(&global->regs.aintr_status); + reg_unmasked = (reg & SISL_ASTATUS_UNMASK); + + if (unlikely(reg_unmasked == 0)) { + dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", + __func__, reg); + goto out; + } + + /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ + writeq_be(reg_unmasked, &global->regs.aintr_clear); + + /* Check each bit that is on */ + for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { + if (unlikely(bit >= ARRAY_SIZE(ainfo))) { + WARN_ON_ONCE(1); + continue; + } + + info = &ainfo[bit]; + if (unlikely(info->status != 1ULL << bit)) { + WARN_ON_ONCE(1); + continue; + } + + port = info->port; + fc_port_regs = get_fc_port_regs(cfg, port); + + dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", + __func__, port, info->desc, + readq_be(&fc_port_regs[FC_STATUS / 8])); + + /* + * Do link reset first, some OTHER errors will set FC_ERROR + * again if cleared before or w/o a reset + */ + if (info->action & LINK_RESET) { + dev_err(dev, "%s: FC Port %d: resetting link\n", + __func__, port); + cfg->lr_state = LINK_RESET_REQUIRED; + cfg->lr_port = port; + schedule_work(&cfg->work_q); + } + + if (info->action & CLR_FC_ERROR) { + reg = readq_be(&fc_port_regs[FC_ERROR / 8]); + + /* + * Since all errors are unmasked, FC_ERROR and FC_ERRCAP + * should be the same and tracing one is sufficient. + */ + + dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", + __func__, port, reg); + + writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); + writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); + } + + if (info->action & SCAN_HOST) { + atomic_inc(&cfg->scan_host_needed); + schedule_work(&cfg->work_q); + } + } + +out: + return IRQ_HANDLED; +} + +/** + * read_vpd() - obtains the WWPNs from VPD + * @cfg: Internal structure associated with the host. + * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs + * + * Return: 0 on success, -errno on failure + */ +static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) +{ + struct device *dev = &cfg->dev->dev; + struct pci_dev *pdev = cfg->dev; + int i, k, rc = 0; + unsigned int kw_size; + ssize_t vpd_size; + char vpd_data[CXLFLASH_VPD_LEN]; + char tmp_buf[WWPN_BUF_LEN] = { 0 }; + const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *) + cfg->dev_id->driver_data; + const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED; + const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; + + /* Get the VPD data from the device */ + vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); + if (unlikely(vpd_size <= 0)) { + dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", + __func__, vpd_size); + rc = -ENODEV; + goto out; + } + + /* + * Find the offset of the WWPN tag within the read only + * VPD data and validate the found field (partials are + * no good to us). Convert the ASCII data to an integer + * value. Note that we must copy to a temporary buffer + * because the conversion service requires that the ASCII + * string be terminated. + * + * Allow for WWPN not being found for all devices, setting + * the returned WWPN to zero when not found. Notify with a + * log error for cards that should have had WWPN keywords + * in the VPD - cards requiring WWPN will not have their + * ports programmed and operate in an undefined state. + */ + for (k = 0; k < cfg->num_fc_ports; k++) { + i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + wwpn_vpd_tags[k], &kw_size); + if (i == -ENOENT) { + if (wwpn_vpd_required) + dev_err(dev, "%s: Port %d WWPN not found\n", + __func__, k); + wwpn[k] = 0ULL; + continue; + } + + if (i < 0 || kw_size != WWPN_LEN) { + dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", + __func__, k); + rc = -ENODEV; + goto out; + } + + memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); + rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); + if (unlikely(rc)) { + dev_err(dev, "%s: WWPN conversion failed for port %d\n", + __func__, k); + rc = -ENODEV; + goto out; + } + + dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); + } + +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * init_pcr() - initialize the provisioning and control registers + * @cfg: Internal structure associated with the host. + * + * Also sets up fast access to the mapped registers and initializes AFU + * command fields that never change. + */ +static void init_pcr(struct cxlflash_cfg *cfg) +{ + struct afu *afu = cfg->afu; + struct sisl_ctrl_map __iomem *ctrl_map; + struct hwq *hwq; + void *cookie; + int i; + + for (i = 0; i < MAX_CONTEXT; i++) { + ctrl_map = &afu->afu_map->ctrls[i].ctrl; + /* Disrupt any clients that could be running */ + /* e.g. clients that survived a master restart */ + writeq_be(0, &ctrl_map->rht_start); + writeq_be(0, &ctrl_map->rht_cnt_id); + writeq_be(0, &ctrl_map->ctx_cap); + } + + /* Copy frequently used fields into hwq */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + cookie = hwq->ctx_cookie; + + hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie); + hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; + hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; + + /* Program the Endian Control for the master context */ + writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); + } +} + +/** + * init_global() - initialize AFU global registers + * @cfg: Internal structure associated with the host. + */ +static int init_global(struct cxlflash_cfg *cfg) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq; + struct sisl_host_map __iomem *hmap; + __be64 __iomem *fc_port_regs; + u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ + int i = 0, num_ports = 0; + int rc = 0; + int j; + void *ctx; + u64 reg; + + rc = read_vpd(cfg, &wwpn[0]); + if (rc) { + dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); + goto out; + } + + /* Set up RRQ and SQ in HWQ for master issued cmds */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + hmap = hwq->host_map; + + writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); + writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); + hwq->hrrq_online = true; + + if (afu_is_sq_cmd_mode(afu)) { + writeq_be((u64)hwq->hsq_start, &hmap->sq_start); + writeq_be((u64)hwq->hsq_end, &hmap->sq_end); + } + } + + /* AFU configuration */ + reg = readq_be(&afu->afu_map->global.regs.afu_config); + reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; + /* enable all auto retry options and control endianness */ + /* leave others at default: */ + /* CTX_CAP write protected, mbox_r does not clear on read and */ + /* checker on if dual afu */ + writeq_be(reg, &afu->afu_map->global.regs.afu_config); + + /* Global port select: select either port */ + if (afu->internal_lun) { + /* Only use port 0 */ + writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); + num_ports = 0; + } else { + writeq_be(PORT_MASK(cfg->num_fc_ports), + &afu->afu_map->global.regs.afu_port_sel); + num_ports = cfg->num_fc_ports; + } + + for (i = 0; i < num_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); + + /* Unmask all errors (but they are still masked at AFU) */ + writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); + /* Clear CRC error cnt & set a threshold */ + (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); + writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); + + /* Set WWPNs. If already programmed, wwpn[i] is 0 */ + if (wwpn[i] != 0) + afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); + /* Programming WWPN back to back causes additional + * offline/online transitions and a PLOGI + */ + msleep(100); + } + + if (afu_is_ocxl_lisn(afu)) { + /* Set up the LISN effective address for each master */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + ctx = hwq->ctx_cookie; + + for (j = 0; j < hwq->num_irqs; j++) { + reg = cfg->ops->get_irq_objhndl(ctx, j); + writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]); + } + + reg = hwq->ctx_hndl; + writeq_be(SISL_LISN_PASID(reg, reg), + &hwq->ctrl_map->lisn_pasid[0]); + writeq_be(SISL_LISN_PASID(0UL, reg), + &hwq->ctrl_map->lisn_pasid[1]); + } + } + + /* Set up master's own CTX_CAP to allow real mode, host translation */ + /* tables, afu cmds and read/write GSCSI cmds. */ + /* First, unlock ctx_cap write by reading mbox */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ + writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | + SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | + SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), + &hwq->ctrl_map->ctx_cap); + } + + /* + * Determine write-same unmap support for host by evaluating the unmap + * sector support bit of the context control register associated with + * the primary hardware queue. Note that while this status is reflected + * in a context register, the outcome can be assumed to be host-wide. + */ + hwq = get_hwq(afu, PRIMARY_HWQ); + reg = readq_be(&hwq->host_map->ctx_ctrl); + if (reg & SISL_CTX_CTRL_UNMAP_SECTOR) + cfg->ws_unmap = true; + + /* Initialize heartbeat */ + afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); +out: + return rc; +} + +/** + * start_afu() - initializes and starts the AFU + * @cfg: Internal structure associated with the host. + */ +static int start_afu(struct cxlflash_cfg *cfg) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq; + int rc = 0; + int i; + + init_pcr(cfg); + + /* Initialize each HWQ */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + /* After an AFU reset, RRQ entries are stale, clear them */ + memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); + + /* Initialize RRQ pointers */ + hwq->hrrq_start = &hwq->rrq_entry[0]; + hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; + hwq->hrrq_curr = hwq->hrrq_start; + hwq->toggle = 1; + + /* Initialize spin locks */ + spin_lock_init(&hwq->hrrq_slock); + spin_lock_init(&hwq->hsq_slock); + + /* Initialize SQ */ + if (afu_is_sq_cmd_mode(afu)) { + memset(&hwq->sq, 0, sizeof(hwq->sq)); + hwq->hsq_start = &hwq->sq[0]; + hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; + hwq->hsq_curr = hwq->hsq_start; + + atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); + } + + /* Initialize IRQ poll */ + if (afu_is_irqpoll_enabled(afu)) + irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, + cxlflash_irqpoll); + + } + + rc = init_global(cfg); + + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * init_intr() - setup interrupt handlers for the master context + * @cfg: Internal structure associated with the host. + * @hwq: Hardware queue to initialize. + * + * Return: 0 on success, -errno on failure + */ +static enum undo_level init_intr(struct cxlflash_cfg *cfg, + struct hwq *hwq) +{ + struct device *dev = &cfg->dev->dev; + void *ctx = hwq->ctx_cookie; + int rc = 0; + enum undo_level level = UNDO_NOOP; + bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); + int num_irqs = hwq->num_irqs; + + rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs); + if (unlikely(rc)) { + dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", + __func__, rc); + level = UNDO_NOOP; + goto out; + } + + rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, + "SISL_MSI_SYNC_ERROR"); + if (unlikely(rc <= 0)) { + dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); + level = FREE_IRQ; + goto out; + } + + rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, + "SISL_MSI_RRQ_UPDATED"); + if (unlikely(rc <= 0)) { + dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); + level = UNMAP_ONE; + goto out; + } + + /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ + if (!is_primary_hwq) + goto out; + + rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, + "SISL_MSI_ASYNC_ERROR"); + if (unlikely(rc <= 0)) { + dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); + level = UNMAP_TWO; + goto out; + } +out: + return level; +} + +/** + * init_mc() - create and register as the master context + * @cfg: Internal structure associated with the host. + * @index: HWQ Index of the master context. + * + * Return: 0 on success, -errno on failure + */ +static int init_mc(struct cxlflash_cfg *cfg, u32 index) +{ + void *ctx; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(cfg->afu, index); + int rc = 0; + int num_irqs; + enum undo_level level; + + hwq->afu = cfg->afu; + hwq->index = index; + INIT_LIST_HEAD(&hwq->pending_cmds); + + if (index == PRIMARY_HWQ) { + ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie); + num_irqs = 3; + } else { + ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); + num_irqs = 2; + } + if (IS_ERR_OR_NULL(ctx)) { + rc = -ENOMEM; + goto err1; + } + + WARN_ON(hwq->ctx_cookie); + hwq->ctx_cookie = ctx; + hwq->num_irqs = num_irqs; + + /* Set it up as a master with the CXL */ + cfg->ops->set_master(ctx); + + /* Reset AFU when initializing primary context */ + if (index == PRIMARY_HWQ) { + rc = cfg->ops->afu_reset(ctx); + if (unlikely(rc)) { + dev_err(dev, "%s: AFU reset failed rc=%d\n", + __func__, rc); + goto err1; + } + } + + level = init_intr(cfg, hwq); + if (unlikely(level)) { + dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); + goto err2; + } + + /* Finally, activate the context by starting it */ + rc = cfg->ops->start_context(hwq->ctx_cookie); + if (unlikely(rc)) { + dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); + level = UNMAP_THREE; + goto err2; + } + +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +err2: + term_intr(cfg, level, index); + if (index != PRIMARY_HWQ) + cfg->ops->release_context(ctx); +err1: + hwq->ctx_cookie = NULL; + goto out; +} + +/** + * get_num_afu_ports() - determines and configures the number of AFU ports + * @cfg: Internal structure associated with the host. + * + * This routine determines the number of AFU ports by converting the global + * port selection mask. The converted value is only valid following an AFU + * reset (explicit or power-on). This routine must be invoked shortly after + * mapping as other routines are dependent on the number of ports during the + * initialization sequence. + * + * To support legacy AFUs that might not have reflected an initial global + * port mask (value read is 0), default to the number of ports originally + * supported by the cxlflash driver (2) before hardware with other port + * offerings was introduced. + */ +static void get_num_afu_ports(struct cxlflash_cfg *cfg) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + u64 port_mask; + int num_fc_ports = LEGACY_FC_PORTS; + + port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); + if (port_mask != 0ULL) + num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); + + dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n", + __func__, port_mask, num_fc_ports); + + cfg->num_fc_ports = num_fc_ports; + cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); +} + +/** + * init_afu() - setup as master context and start AFU + * @cfg: Internal structure associated with the host. + * + * This routine is a higher level of control for configuring the + * AFU on probe and reset paths. + * + * Return: 0 on success, -errno on failure + */ +static int init_afu(struct cxlflash_cfg *cfg) +{ + u64 reg; + int rc = 0; + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct hwq *hwq; + int i; + + cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true); + + mutex_init(&afu->sync_active); + afu->num_hwqs = afu->desired_hwqs; + for (i = 0; i < afu->num_hwqs; i++) { + rc = init_mc(cfg, i); + if (rc) { + dev_err(dev, "%s: init_mc failed rc=%d index=%d\n", + __func__, rc, i); + goto err1; + } + } + + /* Map the entire MMIO space of the AFU using the first context */ + hwq = get_hwq(afu, PRIMARY_HWQ); + afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie); + if (!afu->afu_map) { + dev_err(dev, "%s: psa_map failed\n", __func__); + rc = -ENOMEM; + goto err1; + } + + /* No byte reverse on reading afu_version or string will be backwards */ + reg = readq(&afu->afu_map->global.regs.afu_version); + memcpy(afu->version, ®, sizeof(reg)); + afu->interface_version = + readq_be(&afu->afu_map->global.regs.interface_version); + if ((afu->interface_version + 1) == 0) { + dev_err(dev, "Back level AFU, please upgrade. AFU version %s " + "interface version %016llx\n", afu->version, + afu->interface_version); + rc = -EINVAL; + goto err1; + } + + if (afu_is_sq_cmd_mode(afu)) { + afu->send_cmd = send_cmd_sq; + afu->context_reset = context_reset_sq; + } else { + afu->send_cmd = send_cmd_ioarrin; + afu->context_reset = context_reset_ioarrin; + } + + dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, + afu->version, afu->interface_version); + + get_num_afu_ports(cfg); + + rc = start_afu(cfg); + if (rc) { + dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); + goto err1; + } + + afu_err_intr_init(cfg->afu); + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + hwq->room = readq_be(&hwq->host_map->cmd_room); + } + + /* Restore the LUN mappings */ + cxlflash_restore_luntable(cfg); +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; + +err1: + for (i = afu->num_hwqs - 1; i >= 0; i--) { + term_intr(cfg, UNMAP_THREE, i); + term_mc(cfg, i); + } + goto out; +} + +/** + * afu_reset() - resets the AFU + * @cfg: Internal structure associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int afu_reset(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + int rc = 0; + + /* Stop the context before the reset. Since the context is + * no longer available restart it after the reset is complete + */ + term_afu(cfg); + + rc = init_afu(cfg); + + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * drain_ioctls() - wait until all currently executing ioctls have completed + * @cfg: Internal structure associated with the host. + * + * Obtain write access to read/write semaphore that wraps ioctl + * handling to 'drain' ioctls currently executing. + */ +static void drain_ioctls(struct cxlflash_cfg *cfg) +{ + down_write(&cfg->ioctl_rwsem); + up_write(&cfg->ioctl_rwsem); +} + +/** + * cxlflash_async_reset_host() - asynchronous host reset handler + * @data: Private data provided while scheduling reset. + * @cookie: Cookie that can be used for checkpointing. + */ +static void cxlflash_async_reset_host(void *data, async_cookie_t cookie) +{ + struct cxlflash_cfg *cfg = data; + struct device *dev = &cfg->dev->dev; + int rc = 0; + + if (cfg->state != STATE_RESET) { + dev_dbg(dev, "%s: Not performing a reset, state=%d\n", + __func__, cfg->state); + goto out; + } + + drain_ioctls(cfg); + cxlflash_mark_contexts_error(cfg); + rc = afu_reset(cfg); + if (rc) + cfg->state = STATE_FAILTERM; + else + cfg->state = STATE_NORMAL; + wake_up_all(&cfg->reset_waitq); + +out: + scsi_unblock_requests(cfg->host); +} + +/** + * cxlflash_schedule_async_reset() - schedule an asynchronous host reset + * @cfg: Internal structure associated with the host. + */ +static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + + if (cfg->state != STATE_NORMAL) { + dev_dbg(dev, "%s: Not performing reset state=%d\n", + __func__, cfg->state); + return; + } + + cfg->state = STATE_RESET; + scsi_block_requests(cfg->host); + cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host, + cfg); +} + +/** + * send_afu_cmd() - builds and sends an internal AFU command + * @afu: AFU associated with the host. + * @rcb: Pre-populated IOARCB describing command to send. + * + * The AFU can only take one internal AFU command at a time. This limitation is + * enforced by using a mutex to provide exclusive access to the AFU during the + * operation. This design point requires calling threads to not be on interrupt + * context due to the possibility of sleeping during concurrent AFU operations. + * + * The command status is optionally passed back to the caller when the caller + * populates the IOASA field of the IOARCB with a pointer to an IOASA structure. + * + * Return: + * 0 on success, -errno on failure + */ +static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct afu_cmd *cmd = NULL; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); + ulong lock_flags; + char *buf = NULL; + int rc = 0; + int nretry = 0; + + if (cfg->state != STATE_NORMAL) { + dev_dbg(dev, "%s: Sync not required state=%u\n", + __func__, cfg->state); + return 0; + } + + mutex_lock(&afu->sync_active); + atomic_inc(&afu->cmds_active); + buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); + if (unlikely(!buf)) { + dev_err(dev, "%s: no memory for command\n", __func__); + rc = -ENOMEM; + goto out; + } + + cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); + +retry: + memset(cmd, 0, sizeof(*cmd)); + memcpy(&cmd->rcb, rcb, sizeof(*rcb)); + INIT_LIST_HEAD(&cmd->queue); + init_completion(&cmd->cevent); + cmd->parent = afu; + cmd->hwq_index = hwq->index; + cmd->rcb.ctx_id = hwq->ctx_hndl; + + dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n", + __func__, afu, cmd, cmd->rcb.cdb[0], nretry); + + rc = afu->send_cmd(afu, cmd); + if (unlikely(rc)) { + rc = -ENOBUFS; + goto out; + } + + rc = wait_resp(afu, cmd); + switch (rc) { + case -ETIMEDOUT: + rc = afu->context_reset(hwq); + if (rc) { + /* Delete the command from pending_cmds list */ + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); + list_del(&cmd->list); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); + + cxlflash_schedule_async_reset(cfg); + break; + } + fallthrough; /* to retry */ + case -EAGAIN: + if (++nretry < 2) + goto retry; + fallthrough; /* to exit */ + default: + break; + } + + if (rcb->ioasa) + *rcb->ioasa = cmd->sa; +out: + atomic_dec(&afu->cmds_active); + mutex_unlock(&afu->sync_active); + kfree(buf); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * cxlflash_afu_sync() - builds and sends an AFU sync command + * @afu: AFU associated with the host. + * @ctx: Identifies context requesting sync. + * @res: Identifies resource requesting sync. + * @mode: Type of sync to issue (lightweight, heavyweight, global). + * + * AFU sync operations are only necessary and allowed when the device is + * operating normally. When not operating normally, sync requests can occur as + * part of cleaning up resources associated with an adapter prior to removal. + * In this scenario, these requests are simply ignored (safe due to the AFU + * going away). + * + * Return: + * 0 on success, -errno on failure + */ +int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct sisl_ioarcb rcb = { 0 }; + + dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n", + __func__, afu, ctx, res, mode); + + rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; + rcb.msi = SISL_MSI_RRQ_UPDATED; + rcb.timeout = MC_AFU_SYNC_TIMEOUT; + + rcb.cdb[0] = SISL_AFU_CMD_SYNC; + rcb.cdb[1] = mode; + put_unaligned_be16(ctx, &rcb.cdb[2]); + put_unaligned_be32(res, &rcb.cdb[4]); + + return send_afu_cmd(afu, &rcb); +} + +/** + * cxlflash_eh_abort_handler() - abort a SCSI command + * @scp: SCSI command to abort. + * + * CXL Flash devices do not support a single command abort. Reset the context + * as per SISLite specification. Flush any pending commands in the hardware + * queue before the reset. + * + * Return: SUCCESS/FAILED as defined in scsi/scsi.h + */ +static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp) +{ + int rc = FAILED; + struct Scsi_Host *host = scp->device->host; + struct cxlflash_cfg *cfg = shost_priv(host); + struct afu_cmd *cmd = sc_to_afuc(scp); + struct device *dev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); + + dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " + "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, + scp->device->channel, scp->device->id, scp->device->lun, + get_unaligned_be32(&((u32 *)scp->cmnd)[0]), + get_unaligned_be32(&((u32 *)scp->cmnd)[1]), + get_unaligned_be32(&((u32 *)scp->cmnd)[2]), + get_unaligned_be32(&((u32 *)scp->cmnd)[3])); + + /* When the state is not normal, another reset/reload is in progress. + * Return failed and the mid-layer will invoke host reset handler. + */ + if (cfg->state != STATE_NORMAL) { + dev_dbg(dev, "%s: Invalid state for abort, state=%d\n", + __func__, cfg->state); + goto out; + } + + rc = afu->context_reset(hwq); + if (unlikely(rc)) + goto out; + + rc = SUCCESS; + +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * cxlflash_eh_device_reset_handler() - reset a single LUN + * @scp: SCSI command to send. + * + * Return: + * SUCCESS as defined in scsi/scsi.h + * FAILED as defined in scsi/scsi.h + */ +static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) +{ + int rc = SUCCESS; + struct scsi_device *sdev = scp->device; + struct Scsi_Host *host = sdev->host; + struct cxlflash_cfg *cfg = shost_priv(host); + struct device *dev = &cfg->dev->dev; + int rcr = 0; + + dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__, + host->host_no, sdev->channel, sdev->id, sdev->lun); +retry: + switch (cfg->state) { + case STATE_NORMAL: + rcr = send_tmf(cfg, sdev, TMF_LUN_RESET); + if (unlikely(rcr)) + rc = FAILED; + break; + case STATE_RESET: + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); + goto retry; + default: + rc = FAILED; + break; + } + + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * cxlflash_eh_host_reset_handler() - reset the host adapter + * @scp: SCSI command from stack identifying host. + * + * Following a reset, the state is evaluated again in case an EEH occurred + * during the reset. In such a scenario, the host reset will either yield + * until the EEH recovery is complete or return success or failure based + * upon the current device state. + * + * Return: + * SUCCESS as defined in scsi/scsi.h + * FAILED as defined in scsi/scsi.h + */ +static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) +{ + int rc = SUCCESS; + int rcr = 0; + struct Scsi_Host *host = scp->device->host; + struct cxlflash_cfg *cfg = shost_priv(host); + struct device *dev = &cfg->dev->dev; + + dev_dbg(dev, "%s: %d\n", __func__, host->host_no); + + switch (cfg->state) { + case STATE_NORMAL: + cfg->state = STATE_RESET; + drain_ioctls(cfg); + cxlflash_mark_contexts_error(cfg); + rcr = afu_reset(cfg); + if (rcr) { + rc = FAILED; + cfg->state = STATE_FAILTERM; + } else + cfg->state = STATE_NORMAL; + wake_up_all(&cfg->reset_waitq); + ssleep(1); + fallthrough; + case STATE_RESET: + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); + if (cfg->state == STATE_NORMAL) + break; + fallthrough; + default: + rc = FAILED; + break; + } + + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * cxlflash_change_queue_depth() - change the queue depth for the device + * @sdev: SCSI device destined for queue depth change. + * @qdepth: Requested queue depth value to set. + * + * The requested queue depth is capped to the maximum supported value. + * + * Return: The actual queue depth set. + */ +static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + + if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) + qdepth = CXLFLASH_MAX_CMDS_PER_LUN; + + scsi_change_queue_depth(sdev, qdepth); + return sdev->queue_depth; +} + +/** + * cxlflash_show_port_status() - queries and presents the current port status + * @port: Desired port for status reporting. + * @cfg: Internal structure associated with the host. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf or -EINVAL. + */ +static ssize_t cxlflash_show_port_status(u32 port, + struct cxlflash_cfg *cfg, + char *buf) +{ + struct device *dev = &cfg->dev->dev; + char *disp_status; + u64 status; + __be64 __iomem *fc_port_regs; + + WARN_ON(port >= MAX_FC_PORTS); + + if (port >= cfg->num_fc_ports) { + dev_info(dev, "%s: Port %d not supported on this card.\n", + __func__, port); + return -EINVAL; + } + + fc_port_regs = get_fc_port_regs(cfg, port); + status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); + status &= FC_MTIP_STATUS_MASK; + + if (status == FC_MTIP_STATUS_ONLINE) + disp_status = "online"; + else if (status == FC_MTIP_STATUS_OFFLINE) + disp_status = "offline"; + else + disp_status = "unknown"; + + return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); +} + +/** + * port0_show() - queries and presents the current status of port 0 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port0_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_status(0, cfg, buf); +} + +/** + * port1_show() - queries and presents the current status of port 1 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port1_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_status(1, cfg, buf); +} + +/** + * port2_show() - queries and presents the current status of port 2 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port2_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_status(2, cfg, buf); +} + +/** + * port3_show() - queries and presents the current status of port 3 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port3_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_status(3, cfg, buf); +} + +/** + * lun_mode_show() - presents the current LUN mode of the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the LUN mode. + * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t lun_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + + return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); +} + +/** + * lun_mode_store() - sets the LUN mode of the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the LUN mode. + * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. + * @count: Length of data resizing in @buf. + * + * The CXL Flash AFU supports a dummy LUN mode where the external + * links and storage are not required. Space on the FPGA is used + * to create 1 or 2 small LUNs which are presented to the system + * as if they were a normal storage device. This feature is useful + * during development and also provides manufacturing with a way + * to test the AFU without an actual device. + * + * 0 = external LUN[s] (default) + * 1 = internal LUN (1 x 64K, 512B blocks, id 0) + * 2 = internal LUN (1 x 64K, 4K blocks, id 0) + * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) + * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t lun_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct cxlflash_cfg *cfg = shost_priv(shost); + struct afu *afu = cfg->afu; + int rc; + u32 lun_mode; + + rc = kstrtouint(buf, 10, &lun_mode); + if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { + afu->internal_lun = lun_mode; + + /* + * When configured for internal LUN, there is only one channel, + * channel number 0, else there will be one less than the number + * of fc ports for this card. + */ + if (afu->internal_lun) + shost->max_channel = 0; + else + shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); + + afu_reset(cfg); + scsi_scan_host(cfg->host); + } + + return count; +} + +/** + * ioctl_version_show() - presents the current ioctl version of the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the ioctl version. + * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t ioctl_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t bytes = 0; + + bytes = scnprintf(buf, PAGE_SIZE, + "disk: %u\n", DK_CXLFLASH_VERSION_0); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + "host: %u\n", HT_CXLFLASH_VERSION_0); + + return bytes; +} + +/** + * cxlflash_show_port_lun_table() - queries and presents the port LUN table + * @port: Desired port for status reporting. + * @cfg: Internal structure associated with the host. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf or -EINVAL. + */ +static ssize_t cxlflash_show_port_lun_table(u32 port, + struct cxlflash_cfg *cfg, + char *buf) +{ + struct device *dev = &cfg->dev->dev; + __be64 __iomem *fc_port_luns; + int i; + ssize_t bytes = 0; + + WARN_ON(port >= MAX_FC_PORTS); + + if (port >= cfg->num_fc_ports) { + dev_info(dev, "%s: Port %d not supported on this card.\n", + __func__, port); + return -EINVAL; + } + + fc_port_luns = get_fc_port_luns(cfg, port); + + for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + "%03d: %016llx\n", + i, readq_be(&fc_port_luns[i])); + return bytes; +} + +/** + * port0_lun_table_show() - presents the current LUN table of port 0 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port0_lun_table_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_lun_table(0, cfg, buf); +} + +/** + * port1_lun_table_show() - presents the current LUN table of port 1 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port1_lun_table_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_lun_table(1, cfg, buf); +} + +/** + * port2_lun_table_show() - presents the current LUN table of port 2 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port2_lun_table_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_lun_table(2, cfg, buf); +} + +/** + * port3_lun_table_show() - presents the current LUN table of port 3 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port3_lun_table_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_lun_table(3, cfg, buf); +} + +/** + * irqpoll_weight_show() - presents the current IRQ poll weight for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the IRQ poll weight. + * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll + * weight in ASCII. + * + * An IRQ poll weight of 0 indicates polling is disabled. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t irqpoll_weight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + + return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); +} + +/** + * irqpoll_weight_store() - sets the current IRQ poll weight for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the IRQ poll weight. + * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll + * weight in ASCII. + * @count: Length of data resizing in @buf. + * + * An IRQ poll weight of 0 indicates polling is disabled. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t irqpoll_weight_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct device *cfgdev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct hwq *hwq; + u32 weight; + int rc, i; + + rc = kstrtouint(buf, 10, &weight); + if (rc) + return -EINVAL; + + if (weight > 256) { + dev_info(cfgdev, + "Invalid IRQ poll weight. It must be 256 or less.\n"); + return -EINVAL; + } + + if (weight == afu->irqpoll_weight) { + dev_info(cfgdev, + "Current IRQ poll weight has the same weight.\n"); + return -EINVAL; + } + + if (afu_is_irqpoll_enabled(afu)) { + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + irq_poll_disable(&hwq->irqpoll); + } + } + + afu->irqpoll_weight = weight; + + if (weight > 0) { + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); + } + } + + return count; +} + +/** + * num_hwqs_show() - presents the number of hardware queues for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the number of hardware queues. + * @buf: Buffer of length PAGE_SIZE to report back the number of hardware + * queues in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t num_hwqs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + + return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs); +} + +/** + * num_hwqs_store() - sets the number of hardware queues for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the number of hardware queues. + * @buf: Buffer of length PAGE_SIZE containing the number of hardware + * queues in ASCII. + * @count: Length of data resizing in @buf. + * + * n > 0: num_hwqs = n + * n = 0: num_hwqs = num_online_cpus() + * n < 0: num_online_cpus() / abs(n) + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t num_hwqs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + int rc; + int nhwqs, num_hwqs; + + rc = kstrtoint(buf, 10, &nhwqs); + if (rc) + return -EINVAL; + + if (nhwqs >= 1) + num_hwqs = nhwqs; + else if (nhwqs == 0) + num_hwqs = num_online_cpus(); + else + num_hwqs = num_online_cpus() / abs(nhwqs); + + afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); + WARN_ON_ONCE(afu->desired_hwqs == 0); + +retry: + switch (cfg->state) { + case STATE_NORMAL: + cfg->state = STATE_RESET; + drain_ioctls(cfg); + cxlflash_mark_contexts_error(cfg); + rc = afu_reset(cfg); + if (rc) + cfg->state = STATE_FAILTERM; + else + cfg->state = STATE_NORMAL; + wake_up_all(&cfg->reset_waitq); + break; + case STATE_RESET: + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); + if (cfg->state == STATE_NORMAL) + goto retry; + fallthrough; + default: + /* Ideally should not happen */ + dev_err(dev, "%s: Device is not ready, state=%d\n", + __func__, cfg->state); + break; + } + + return count; +} + +static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" }; + +/** + * hwq_mode_show() - presents the HWQ steering mode for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the HWQ steering mode. + * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode + * as a character string. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t hwq_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + + return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]); +} + +/** + * hwq_mode_store() - sets the HWQ steering mode for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the HWQ steering mode. + * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode + * as a character string. + * @count: Length of data resizing in @buf. + * + * rr = Round-Robin + * tag = Block MQ Tagging + * cpu = CPU Affinity + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t hwq_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct cxlflash_cfg *cfg = shost_priv(shost); + struct device *cfgdev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + int i; + u32 mode = MAX_HWQ_MODE; + + for (i = 0; i < MAX_HWQ_MODE; i++) { + if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) { + mode = i; + break; + } + } + + if (mode >= MAX_HWQ_MODE) { + dev_info(cfgdev, "Invalid HWQ steering mode.\n"); + return -EINVAL; + } + + afu->hwq_mode = mode; + + return count; +} + +/** + * mode_show() - presents the current mode of the device + * @dev: Generic device associated with the device. + * @attr: Device attribute representing the device mode. + * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + sdev->hostdata ? "superpipe" : "legacy"); +} + +/* + * Host attributes + */ +static DEVICE_ATTR_RO(port0); +static DEVICE_ATTR_RO(port1); +static DEVICE_ATTR_RO(port2); +static DEVICE_ATTR_RO(port3); +static DEVICE_ATTR_RW(lun_mode); +static DEVICE_ATTR_RO(ioctl_version); +static DEVICE_ATTR_RO(port0_lun_table); +static DEVICE_ATTR_RO(port1_lun_table); +static DEVICE_ATTR_RO(port2_lun_table); +static DEVICE_ATTR_RO(port3_lun_table); +static DEVICE_ATTR_RW(irqpoll_weight); +static DEVICE_ATTR_RW(num_hwqs); +static DEVICE_ATTR_RW(hwq_mode); + +static struct attribute *cxlflash_host_attrs[] = { + &dev_attr_port0.attr, + &dev_attr_port1.attr, + &dev_attr_port2.attr, + &dev_attr_port3.attr, + &dev_attr_lun_mode.attr, + &dev_attr_ioctl_version.attr, + &dev_attr_port0_lun_table.attr, + &dev_attr_port1_lun_table.attr, + &dev_attr_port2_lun_table.attr, + &dev_attr_port3_lun_table.attr, + &dev_attr_irqpoll_weight.attr, + &dev_attr_num_hwqs.attr, + &dev_attr_hwq_mode.attr, + NULL +}; + +ATTRIBUTE_GROUPS(cxlflash_host); + +/* + * Device attributes + */ +static DEVICE_ATTR_RO(mode); + +static struct attribute *cxlflash_dev_attrs[] = { + &dev_attr_mode.attr, + NULL +}; + +ATTRIBUTE_GROUPS(cxlflash_dev); + +/* + * Host template + */ +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = CXLFLASH_ADAPTER_NAME, + .info = cxlflash_driver_info, + .ioctl = cxlflash_ioctl, + .proc_name = CXLFLASH_NAME, + .queuecommand = cxlflash_queuecommand, + .eh_abort_handler = cxlflash_eh_abort_handler, + .eh_device_reset_handler = cxlflash_eh_device_reset_handler, + .eh_host_reset_handler = cxlflash_eh_host_reset_handler, + .change_queue_depth = cxlflash_change_queue_depth, + .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, + .can_queue = CXLFLASH_MAX_CMDS, + .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, + .this_id = -1, + .sg_tablesize = 1, /* No scatter gather support */ + .max_sectors = CXLFLASH_MAX_SECTORS, + .shost_groups = cxlflash_host_groups, + .sdev_groups = cxlflash_dev_groups, +}; + +/* + * Device dependent values + */ +static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, + CXLFLASH_WWPN_VPD_REQUIRED }; +static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, + CXLFLASH_NOTIFY_SHUTDOWN }; +static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, + (CXLFLASH_NOTIFY_SHUTDOWN | + CXLFLASH_OCXL_DEV) }; + +/* + * PCI device binding table + */ +static struct pci_device_id cxlflash_pci_table[] = { + {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, + {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, + {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, + {} +}; + +MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); + +/** + * cxlflash_worker_thread() - work thread handler for the AFU + * @work: Work structure contained within cxlflash associated with host. + * + * Handles the following events: + * - Link reset which cannot be performed on interrupt context due to + * blocking up to a few seconds + * - Rescan the host + */ +static void cxlflash_worker_thread(struct work_struct *work) +{ + struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, + work_q); + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + __be64 __iomem *fc_port_regs; + int port; + ulong lock_flags; + + /* Avoid MMIO if the device has failed */ + + if (cfg->state != STATE_NORMAL) + return; + + spin_lock_irqsave(cfg->host->host_lock, lock_flags); + + if (cfg->lr_state == LINK_RESET_REQUIRED) { + port = cfg->lr_port; + if (port < 0) + dev_err(dev, "%s: invalid port index %d\n", + __func__, port); + else { + spin_unlock_irqrestore(cfg->host->host_lock, + lock_flags); + + /* The reset can block... */ + fc_port_regs = get_fc_port_regs(cfg, port); + afu_link_reset(afu, port, fc_port_regs); + spin_lock_irqsave(cfg->host->host_lock, lock_flags); + } + + cfg->lr_state = LINK_RESET_COMPLETE; + } + + spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); + + if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) + scsi_scan_host(cfg->host); +} + +/** + * cxlflash_chr_open() - character device open handler + * @inode: Device inode associated with this character device. + * @file: File pointer for this device. + * + * Only users with admin privileges are allowed to open the character device. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_chr_open(struct inode *inode, struct file *file) +{ + struct cxlflash_cfg *cfg; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev); + file->private_data = cfg; + + return 0; +} + +/** + * decode_hioctl() - translates encoded host ioctl to easily identifiable string + * @cmd: The host ioctl command to decode. + * + * Return: A string identifying the decoded host ioctl. + */ +static char *decode_hioctl(unsigned int cmd) +{ + switch (cmd) { + case HT_CXLFLASH_LUN_PROVISION: + return __stringify_1(HT_CXLFLASH_LUN_PROVISION); + } + + return "UNKNOWN"; +} + +/** + * cxlflash_lun_provision() - host LUN provisioning handler + * @cfg: Internal structure associated with the host. + * @lunprov: Kernel copy of userspace ioctl data structure. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, + struct ht_cxlflash_lun_provision *lunprov) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct sisl_ioarcb rcb; + struct sisl_ioasa asa; + __be64 __iomem *fc_port_regs; + u16 port = lunprov->port; + u16 scmd = lunprov->hdr.subcmd; + u16 type; + u64 reg; + u64 size; + u64 lun_id; + int rc = 0; + + if (!afu_is_lun_provision(afu)) { + rc = -ENOTSUPP; + goto out; + } + + if (port >= cfg->num_fc_ports) { + rc = -EINVAL; + goto out; + } + + switch (scmd) { + case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN: + type = SISL_AFU_LUN_PROVISION_CREATE; + size = lunprov->size; + lun_id = 0; + break; + case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN: + type = SISL_AFU_LUN_PROVISION_DELETE; + size = 0; + lun_id = lunprov->lun_id; + break; + case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT: + fc_port_regs = get_fc_port_regs(cfg, port); + + reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]); + lunprov->max_num_luns = reg; + reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]); + lunprov->cur_num_luns = reg; + reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]); + lunprov->max_cap_port = reg; + reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]); + lunprov->cur_cap_port = reg; + + goto out; + default: + rc = -EINVAL; + goto out; + } + + memset(&rcb, 0, sizeof(rcb)); + memset(&asa, 0, sizeof(asa)); + rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; + rcb.lun_id = lun_id; + rcb.msi = SISL_MSI_RRQ_UPDATED; + rcb.timeout = MC_LUN_PROV_TIMEOUT; + rcb.ioasa = &asa; + + rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION; + rcb.cdb[1] = type; + rcb.cdb[2] = port; + put_unaligned_be64(size, &rcb.cdb[8]); + + rc = send_afu_cmd(afu, &rcb); + if (rc) { + dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", + __func__, rc, asa.ioasc, asa.afu_extra); + goto out; + } + + if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) { + lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo; + memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid)); + } +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * cxlflash_afu_debug() - host AFU debug handler + * @cfg: Internal structure associated with the host. + * @afu_dbg: Kernel copy of userspace ioctl data structure. + * + * For debug requests requiring a data buffer, always provide an aligned + * (cache line) buffer to the AFU to appease any alignment requirements. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, + struct ht_cxlflash_afu_debug *afu_dbg) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct sisl_ioarcb rcb; + struct sisl_ioasa asa; + char *buf = NULL; + char *kbuf = NULL; + void __user *ubuf = (__force void __user *)afu_dbg->data_ea; + u16 req_flags = SISL_REQ_FLAGS_AFU_CMD; + u32 ulen = afu_dbg->data_len; + bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE; + int rc = 0; + + if (!afu_is_afu_debug(afu)) { + rc = -ENOTSUPP; + goto out; + } + + if (ulen) { + req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN; + + if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) { + rc = -EINVAL; + goto out; + } + + buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL); + if (unlikely(!buf)) { + rc = -ENOMEM; + goto out; + } + + kbuf = PTR_ALIGN(buf, cache_line_size()); + + if (is_write) { + req_flags |= SISL_REQ_FLAGS_HOST_WRITE; + + if (copy_from_user(kbuf, ubuf, ulen)) { + rc = -EFAULT; + goto out; + } + } + } + + memset(&rcb, 0, sizeof(rcb)); + memset(&asa, 0, sizeof(asa)); + + rcb.req_flags = req_flags; + rcb.msi = SISL_MSI_RRQ_UPDATED; + rcb.timeout = MC_AFU_DEBUG_TIMEOUT; + rcb.ioasa = &asa; + + if (ulen) { + rcb.data_len = ulen; + rcb.data_ea = (uintptr_t)kbuf; + } + + rcb.cdb[0] = SISL_AFU_CMD_DEBUG; + memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd, + HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN); + + rc = send_afu_cmd(afu, &rcb); + if (rc) { + dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", + __func__, rc, asa.ioasc, asa.afu_extra); + goto out; + } + + if (ulen && !is_write) { + if (copy_to_user(ubuf, kbuf, ulen)) + rc = -EFAULT; + } +out: + kfree(buf); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * cxlflash_chr_ioctl() - character device IOCTL handler + * @file: File pointer for this device. + * @cmd: IOCTL command. + * @arg: Userspace ioctl data structure. + * + * A read/write semaphore is used to implement a 'drain' of currently + * running ioctls. The read semaphore is taken at the beginning of each + * ioctl thread and released upon concluding execution. Additionally the + * semaphore should be released and then reacquired in any ioctl execution + * path which will wait for an event to occur that is outside the scope of + * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, + * a thread simply needs to acquire the write semaphore. + * + * Return: 0 on success, -errno on failure + */ +static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + typedef int (*hioctl) (struct cxlflash_cfg *, void *); + + struct cxlflash_cfg *cfg = file->private_data; + struct device *dev = &cfg->dev->dev; + char buf[sizeof(union cxlflash_ht_ioctls)]; + void __user *uarg = (void __user *)arg; + struct ht_cxlflash_hdr *hdr; + size_t size = 0; + bool known_ioctl = false; + int idx = 0; + int rc = 0; + hioctl do_ioctl = NULL; + + static const struct { + size_t size; + hioctl ioctl; + } ioctl_tbl[] = { /* NOTE: order matters here */ + { sizeof(struct ht_cxlflash_lun_provision), + (hioctl)cxlflash_lun_provision }, + { sizeof(struct ht_cxlflash_afu_debug), + (hioctl)cxlflash_afu_debug }, + }; + + /* Hold read semaphore so we can drain if needed */ + down_read(&cfg->ioctl_rwsem); + + dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n", + __func__, cmd, idx, sizeof(ioctl_tbl)); + + switch (cmd) { + case HT_CXLFLASH_LUN_PROVISION: + case HT_CXLFLASH_AFU_DEBUG: + known_ioctl = true; + idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd); + size = ioctl_tbl[idx].size; + do_ioctl = ioctl_tbl[idx].ioctl; + + if (likely(do_ioctl)) + break; + + fallthrough; + default: + rc = -EINVAL; + goto out; + } + + if (unlikely(copy_from_user(&buf, uarg, size))) { + dev_err(dev, "%s: copy_from_user() fail " + "size=%lu cmd=%d (%s) uarg=%p\n", + __func__, size, cmd, decode_hioctl(cmd), uarg); + rc = -EFAULT; + goto out; + } + + hdr = (struct ht_cxlflash_hdr *)&buf; + if (hdr->version != HT_CXLFLASH_VERSION_0) { + dev_dbg(dev, "%s: Version %u not supported for %s\n", + __func__, hdr->version, decode_hioctl(cmd)); + rc = -EINVAL; + goto out; + } + + if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) { + dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); + rc = -EINVAL; + goto out; + } + + rc = do_ioctl(cfg, (void *)&buf); + if (likely(!rc)) + if (unlikely(copy_to_user(uarg, &buf, size))) { + dev_err(dev, "%s: copy_to_user() fail " + "size=%lu cmd=%d (%s) uarg=%p\n", + __func__, size, cmd, decode_hioctl(cmd), uarg); + rc = -EFAULT; + } + + /* fall through to exit */ + +out: + up_read(&cfg->ioctl_rwsem); + if (unlikely(rc && known_ioctl)) + dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n", + __func__, decode_hioctl(cmd), cmd, rc); + else + dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n", + __func__, decode_hioctl(cmd), cmd, rc); + return rc; +} + +/* + * Character device file operations + */ +static const struct file_operations cxlflash_chr_fops = { + .owner = THIS_MODULE, + .open = cxlflash_chr_open, + .unlocked_ioctl = cxlflash_chr_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +/** + * init_chrdev() - initialize the character device for the host + * @cfg: Internal structure associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int init_chrdev(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + struct device *char_dev; + dev_t devno; + int minor; + int rc = 0; + + minor = cxlflash_get_minor(); + if (unlikely(minor < 0)) { + dev_err(dev, "%s: Exhausted allowed adapters\n", __func__); + rc = -ENOSPC; + goto out; + } + + devno = MKDEV(cxlflash_major, minor); + cdev_init(&cfg->cdev, &cxlflash_chr_fops); + + rc = cdev_add(&cfg->cdev, devno, 1); + if (rc) { + dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc); + goto err1; + } + + char_dev = device_create(cxlflash_class, NULL, devno, + NULL, "cxlflash%d", minor); + if (IS_ERR(char_dev)) { + rc = PTR_ERR(char_dev); + dev_err(dev, "%s: device_create failed rc=%d\n", + __func__, rc); + goto err2; + } + + cfg->chardev = char_dev; +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +err2: + cdev_del(&cfg->cdev); +err1: + cxlflash_put_minor(minor); + goto out; +} + +/** + * cxlflash_probe() - PCI entry point to add host + * @pdev: PCI device associated with the host. + * @dev_id: PCI device id associated with device. + * + * The device will initially start out in a 'probing' state and + * transition to the 'normal' state at the end of a successful + * probe. Should an EEH event occur during probe, the notification + * thread (error_detected()) will wait until the probe handler + * is nearly complete. At that time, the device will be moved to + * a 'probed' state and the EEH thread woken up to drive the slot + * reset and recovery (device moves to 'normal' state). Meanwhile, + * the probe will be allowed to exit successfully. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_probe(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + struct Scsi_Host *host; + struct cxlflash_cfg *cfg = NULL; + struct device *dev = &pdev->dev; + struct dev_dependent_vals *ddv; + int rc = 0; + int k; + + dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", + __func__, pdev->irq); + + ddv = (struct dev_dependent_vals *)dev_id->driver_data; + driver_template.max_sectors = ddv->max_sectors; + + host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); + if (!host) { + dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); + rc = -ENOMEM; + goto out; + } + + host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; + host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; + host->unique_id = host->host_no; + host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; + + cfg = shost_priv(host); + cfg->state = STATE_PROBING; + cfg->host = host; + rc = alloc_mem(cfg); + if (rc) { + dev_err(dev, "%s: alloc_mem failed\n", __func__); + rc = -ENOMEM; + scsi_host_put(cfg->host); + goto out; + } + + cfg->init_state = INIT_STATE_NONE; + cfg->dev = pdev; + cfg->cxl_fops = cxlflash_cxl_fops; + cfg->ops = cxlflash_assign_ops(ddv); + WARN_ON_ONCE(!cfg->ops); + + /* + * Promoted LUNs move to the top of the LUN table. The rest stay on + * the bottom half. The bottom half grows from the end (index = 255), + * whereas the top half grows from the beginning (index = 0). + * + * Initialize the last LUN index for all possible ports. + */ + cfg->promote_lun_index = 0; + + for (k = 0; k < MAX_FC_PORTS; k++) + cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; + + cfg->dev_id = (struct pci_device_id *)dev_id; + + init_waitqueue_head(&cfg->tmf_waitq); + init_waitqueue_head(&cfg->reset_waitq); + + INIT_WORK(&cfg->work_q, cxlflash_worker_thread); + cfg->lr_state = LINK_RESET_INVALID; + cfg->lr_port = -1; + spin_lock_init(&cfg->tmf_slock); + mutex_init(&cfg->ctx_tbl_list_mutex); + mutex_init(&cfg->ctx_recovery_mutex); + init_rwsem(&cfg->ioctl_rwsem); + INIT_LIST_HEAD(&cfg->ctx_err_recovery); + INIT_LIST_HEAD(&cfg->lluns); + + pci_set_drvdata(pdev, cfg); + + rc = init_pci(cfg); + if (rc) { + dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); + goto out_remove; + } + cfg->init_state = INIT_STATE_PCI; + + cfg->afu_cookie = cfg->ops->create_afu(pdev); + if (unlikely(!cfg->afu_cookie)) { + dev_err(dev, "%s: create_afu failed\n", __func__); + rc = -ENOMEM; + goto out_remove; + } + + rc = init_afu(cfg); + if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { + dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); + goto out_remove; + } + cfg->init_state = INIT_STATE_AFU; + + rc = init_scsi(cfg); + if (rc) { + dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); + goto out_remove; + } + cfg->init_state = INIT_STATE_SCSI; + + rc = init_chrdev(cfg); + if (rc) { + dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc); + goto out_remove; + } + cfg->init_state = INIT_STATE_CDEV; + + if (wq_has_sleeper(&cfg->reset_waitq)) { + cfg->state = STATE_PROBED; + wake_up_all(&cfg->reset_waitq); + } else + cfg->state = STATE_NORMAL; +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; + +out_remove: + cfg->state = STATE_PROBED; + cxlflash_remove(pdev); + goto out; +} + +/** + * cxlflash_pci_error_detected() - called when a PCI error is detected + * @pdev: PCI device struct. + * @state: PCI channel state. + * + * When an EEH occurs during an active reset, wait until the reset is + * complete and then take action based upon the device state. + * + * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT + */ +static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + int rc = 0; + struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); + struct device *dev = &cfg->dev->dev; + + dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); + + switch (state) { + case pci_channel_io_frozen: + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && + cfg->state != STATE_PROBING); + if (cfg->state == STATE_FAILTERM) + return PCI_ERS_RESULT_DISCONNECT; + + cfg->state = STATE_RESET; + scsi_block_requests(cfg->host); + drain_ioctls(cfg); + rc = cxlflash_mark_contexts_error(cfg); + if (unlikely(rc)) + dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", + __func__, rc); + term_afu(cfg); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + cfg->state = STATE_FAILTERM; + wake_up_all(&cfg->reset_waitq); + scsi_unblock_requests(cfg->host); + return PCI_ERS_RESULT_DISCONNECT; + default: + break; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * cxlflash_pci_slot_reset() - called when PCI slot has been reset + * @pdev: PCI device struct. + * + * This routine is called by the pci error recovery code after the PCI + * slot has been reset, just before we should resume normal operations. + * + * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT + */ +static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) +{ + int rc = 0; + struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); + struct device *dev = &cfg->dev->dev; + + dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); + + rc = init_afu(cfg); + if (unlikely(rc)) { + dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * cxlflash_pci_resume() - called when normal operation can resume + * @pdev: PCI device struct + */ +static void cxlflash_pci_resume(struct pci_dev *pdev) +{ + struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); + struct device *dev = &cfg->dev->dev; + + dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); + + cfg->state = STATE_NORMAL; + wake_up_all(&cfg->reset_waitq); + scsi_unblock_requests(cfg->host); +} + +/** + * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class + * @dev: Character device. + * @mode: Mode that can be used to verify access. + * + * Return: Allocated string describing the devtmpfs structure. + */ +static char *cxlflash_devnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev)); +} + +/** + * cxlflash_class_init() - create character device class + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_class_init(void) +{ + dev_t devno; + int rc = 0; + + rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash"); + if (unlikely(rc)) { + pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc); + goto out; + } + + cxlflash_major = MAJOR(devno); + + cxlflash_class = class_create("cxlflash"); + if (IS_ERR(cxlflash_class)) { + rc = PTR_ERR(cxlflash_class); + pr_err("%s: class_create failed rc=%d\n", __func__, rc); + goto err; + } + + cxlflash_class->devnode = cxlflash_devnode; +out: + pr_debug("%s: returning rc=%d\n", __func__, rc); + return rc; +err: + unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); + goto out; +} + +/** + * cxlflash_class_exit() - destroy character device class + */ +static void cxlflash_class_exit(void) +{ + dev_t devno = MKDEV(cxlflash_major, 0); + + class_destroy(cxlflash_class); + unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); +} + +static const struct pci_error_handlers cxlflash_err_handler = { + .error_detected = cxlflash_pci_error_detected, + .slot_reset = cxlflash_pci_slot_reset, + .resume = cxlflash_pci_resume, +}; + +/* + * PCI device structure + */ +static struct pci_driver cxlflash_driver = { + .name = CXLFLASH_NAME, + .id_table = cxlflash_pci_table, + .probe = cxlflash_probe, + .remove = cxlflash_remove, + .shutdown = cxlflash_remove, + .err_handler = &cxlflash_err_handler, +}; + +/** + * init_cxlflash() - module entry point + * + * Return: 0 on success, -errno on failure + */ +static int __init init_cxlflash(void) +{ + int rc; + + check_sizes(); + cxlflash_list_init(); + rc = cxlflash_class_init(); + if (unlikely(rc)) + goto out; + + rc = pci_register_driver(&cxlflash_driver); + if (unlikely(rc)) + goto err; +out: + pr_debug("%s: returning rc=%d\n", __func__, rc); + return rc; +err: + cxlflash_class_exit(); + goto out; +} + +/** + * exit_cxlflash() - module exit point + */ +static void __exit exit_cxlflash(void) +{ + cxlflash_term_global_luns(); + cxlflash_free_errpage(); + + pci_unregister_driver(&cxlflash_driver); + cxlflash_class_exit(); +} + +module_init(init_cxlflash); +module_exit(exit_cxlflash); diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h new file mode 100644 index 000000000..0bfb98eff --- /dev/null +++ b/drivers/scsi/cxlflash/main.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#ifndef _CXLFLASH_MAIN_H +#define _CXLFLASH_MAIN_H + +#include +#include +#include +#include + +#include "backend.h" + +#define CXLFLASH_NAME "cxlflash" +#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter" +#define CXLFLASH_MAX_ADAPTERS 32 + +#define PCI_DEVICE_ID_IBM_CORSA 0x04F0 +#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600 +#define PCI_DEVICE_ID_IBM_BRIARD 0x0624 + +/* Since there is only one target, make it 0 */ +#define CXLFLASH_TARGET 0 +#define CXLFLASH_MAX_CDB_LEN 16 + +/* Really only one target per bus since the Texan is directly attached */ +#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1 +#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536 + +#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ) + +/* FC defines */ +#define FC_MTIP_CMDCONFIG 0x010 +#define FC_MTIP_STATUS 0x018 +#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */ +#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */ +#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */ +#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */ + +#define FC_PNAME 0x300 +#define FC_CONFIG 0x320 +#define FC_CONFIG2 0x328 +#define FC_STATUS 0x330 +#define FC_ERROR 0x380 +#define FC_ERRCAP 0x388 +#define FC_ERRMSK 0x390 +#define FC_CNT_CRCERR 0x538 +#define FC_CRC_THRESH 0x580 + +#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL +#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL + +#define FC_MTIP_STATUS_MASK 0x30ULL +#define FC_MTIP_STATUS_ONLINE 0x20ULL +#define FC_MTIP_STATUS_OFFLINE 0x10ULL + +/* TIMEOUT and RETRY definitions */ + +/* AFU command timeout values */ +#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */ +#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */ +#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */ + +/* AFU command room retry limit */ +#define MC_ROOM_RETRY_CNT 10 + +/* FC CRC clear periodic timer */ +#define MC_CRC_THRESH 100 /* threshold in 5 mins */ + +#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */ +#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */ + +/* VPD defines */ +#define CXLFLASH_VPD_LEN 256 +#define WWPN_LEN 16 +#define WWPN_BUF_LEN (WWPN_LEN + 1) + +enum undo_level { + UNDO_NOOP = 0, + FREE_IRQ, + UNMAP_ONE, + UNMAP_TWO, + UNMAP_THREE +}; + +struct dev_dependent_vals { + u64 max_sectors; + u64 flags; +#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL +#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL +#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL +}; + +static inline const struct cxlflash_backend_ops * +cxlflash_assign_ops(struct dev_dependent_vals *ddv) +{ + const struct cxlflash_backend_ops *ops = NULL; + +#ifdef CONFIG_OCXL_BASE + if (ddv->flags & CXLFLASH_OCXL_DEV) + ops = &cxlflash_ocxl_ops; +#endif + +#ifdef CONFIG_CXL_BASE + if (!(ddv->flags & CXLFLASH_OCXL_DEV)) + ops = &cxlflash_cxl_ops; +#endif + + return ops; +} + +struct asyc_intr_info { + u64 status; + char *desc; + u8 port; + u8 action; +#define CLR_FC_ERROR 0x01 +#define LINK_RESET 0x02 +#define SCAN_HOST 0x04 +}; + +#endif /* _CXLFLASH_MAIN_H */ diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c new file mode 100644 index 000000000..6542818e5 --- /dev/null +++ b/drivers/scsi/cxlflash/ocxl_hw.c @@ -0,0 +1,1399 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * CXL Flash Device Driver + * + * Written by: Matthew R. Ochs , IBM Corporation + * Uma Krishnan , IBM Corporation + * + * Copyright (C) 2018 IBM Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "backend.h" +#include "ocxl_hw.h" + +/* + * Pseudo-filesystem to allocate inodes. + */ + +#define OCXLFLASH_FS_MAGIC 0x1697698f + +static int ocxlflash_fs_cnt; +static struct vfsmount *ocxlflash_vfs_mount; + +static int ocxlflash_fs_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM; +} + +static struct file_system_type ocxlflash_fs_type = { + .name = "ocxlflash", + .owner = THIS_MODULE, + .init_fs_context = ocxlflash_fs_init_fs_context, + .kill_sb = kill_anon_super, +}; + +/* + * ocxlflash_release_mapping() - release the memory mapping + * @ctx: Context whose mapping is to be released. + */ +static void ocxlflash_release_mapping(struct ocxlflash_context *ctx) +{ + if (ctx->mapping) + simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); + ctx->mapping = NULL; +} + +/* + * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file + * @dev: Generic device of the host. + * @name: Name of the pseudo filesystem. + * @fops: File operations. + * @priv: Private data. + * @flags: Flags for the file. + * + * Return: pointer to the file on success, ERR_PTR on failure + */ +static struct file *ocxlflash_getfile(struct device *dev, const char *name, + const struct file_operations *fops, + void *priv, int flags) +{ + struct file *file; + struct inode *inode; + int rc; + + if (fops->owner && !try_module_get(fops->owner)) { + dev_err(dev, "%s: Owner does not exist\n", __func__); + rc = -ENOENT; + goto err1; + } + + rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount, + &ocxlflash_fs_cnt); + if (unlikely(rc < 0)) { + dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n", + __func__, rc); + goto err2; + } + + inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb); + if (IS_ERR(inode)) { + rc = PTR_ERR(inode); + dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n", + __func__, rc); + goto err3; + } + + file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name, + flags & (O_ACCMODE | O_NONBLOCK), fops); + if (IS_ERR(file)) { + rc = PTR_ERR(file); + dev_err(dev, "%s: alloc_file failed rc=%d\n", + __func__, rc); + goto err4; + } + + file->private_data = priv; +out: + return file; +err4: + iput(inode); +err3: + simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); +err2: + module_put(fops->owner); +err1: + file = ERR_PTR(rc); + goto out; +} + +/** + * ocxlflash_psa_map() - map the process specific MMIO space + * @ctx_cookie: Adapter context for which the mapping needs to be done. + * + * Return: MMIO pointer of the mapped region + */ +static void __iomem *ocxlflash_psa_map(void *ctx_cookie) +{ + struct ocxlflash_context *ctx = ctx_cookie; + struct device *dev = ctx->hw_afu->dev; + + mutex_lock(&ctx->state_mutex); + if (ctx->state != STARTED) { + dev_err(dev, "%s: Context not started, state=%d\n", __func__, + ctx->state); + mutex_unlock(&ctx->state_mutex); + return NULL; + } + mutex_unlock(&ctx->state_mutex); + + return ioremap(ctx->psn_phys, ctx->psn_size); +} + +/** + * ocxlflash_psa_unmap() - unmap the process specific MMIO space + * @addr: MMIO pointer to unmap. + */ +static void ocxlflash_psa_unmap(void __iomem *addr) +{ + iounmap(addr); +} + +/** + * ocxlflash_process_element() - get process element of the adapter context + * @ctx_cookie: Adapter context associated with the process element. + * + * Return: process element of the adapter context + */ +static int ocxlflash_process_element(void *ctx_cookie) +{ + struct ocxlflash_context *ctx = ctx_cookie; + + return ctx->pe; +} + +/** + * afu_map_irq() - map the interrupt of the adapter context + * @flags: Flags. + * @ctx: Adapter context. + * @num: Per-context AFU interrupt number. + * @handler: Interrupt handler to register. + * @cookie: Interrupt handler private data. + * @name: Name of the interrupt. + * + * Return: 0 on success, -errno on failure + */ +static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num, + irq_handler_t handler, void *cookie, char *name) +{ + struct ocxl_hw_afu *afu = ctx->hw_afu; + struct device *dev = afu->dev; + struct ocxlflash_irqs *irq; + struct xive_irq_data *xd; + u32 virq; + int rc = 0; + + if (num < 0 || num >= ctx->num_irqs) { + dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); + rc = -ENOENT; + goto out; + } + + irq = &ctx->irqs[num]; + virq = irq_create_mapping(NULL, irq->hwirq); + if (unlikely(!virq)) { + dev_err(dev, "%s: irq_create_mapping failed\n", __func__); + rc = -ENOMEM; + goto out; + } + + rc = request_irq(virq, handler, 0, name, cookie); + if (unlikely(rc)) { + dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc); + goto err1; + } + + xd = irq_get_handler_data(virq); + if (unlikely(!xd)) { + dev_err(dev, "%s: Can't get interrupt data\n", __func__); + rc = -ENXIO; + goto err2; + } + + irq->virq = virq; + irq->vtrig = xd->trig_mmio; +out: + return rc; +err2: + free_irq(virq, cookie); +err1: + irq_dispose_mapping(virq); + goto out; +} + +/** + * ocxlflash_map_afu_irq() - map the interrupt of the adapter context + * @ctx_cookie: Adapter context. + * @num: Per-context AFU interrupt number. + * @handler: Interrupt handler to register. + * @cookie: Interrupt handler private data. + * @name: Name of the interrupt. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_map_afu_irq(void *ctx_cookie, int num, + irq_handler_t handler, void *cookie, + char *name) +{ + return afu_map_irq(0, ctx_cookie, num, handler, cookie, name); +} + +/** + * afu_unmap_irq() - unmap the interrupt + * @flags: Flags. + * @ctx: Adapter context. + * @num: Per-context AFU interrupt number. + * @cookie: Interrupt handler private data. + */ +static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num, + void *cookie) +{ + struct ocxl_hw_afu *afu = ctx->hw_afu; + struct device *dev = afu->dev; + struct ocxlflash_irqs *irq; + + if (num < 0 || num >= ctx->num_irqs) { + dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); + return; + } + + irq = &ctx->irqs[num]; + + if (irq_find_mapping(NULL, irq->hwirq)) { + free_irq(irq->virq, cookie); + irq_dispose_mapping(irq->virq); + } + + memset(irq, 0, sizeof(*irq)); +} + +/** + * ocxlflash_unmap_afu_irq() - unmap the interrupt + * @ctx_cookie: Adapter context. + * @num: Per-context AFU interrupt number. + * @cookie: Interrupt handler private data. + */ +static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) +{ + return afu_unmap_irq(0, ctx_cookie, num, cookie); +} + +/** + * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt + * @ctx_cookie: Context associated with the interrupt. + * @irq: Interrupt number. + * + * Return: effective address of the mapped region + */ +static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq) +{ + struct ocxlflash_context *ctx = ctx_cookie; + + if (irq < 0 || irq >= ctx->num_irqs) + return 0; + + return (__force u64)ctx->irqs[irq].vtrig; +} + +/** + * ocxlflash_xsl_fault() - callback when translation error is triggered + * @data: Private data provided at callback registration, the context. + * @addr: Address that triggered the error. + * @dsisr: Value of dsisr register. + */ +static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr) +{ + struct ocxlflash_context *ctx = data; + + spin_lock(&ctx->slock); + ctx->fault_addr = addr; + ctx->fault_dsisr = dsisr; + ctx->pending_fault = true; + spin_unlock(&ctx->slock); + + wake_up_all(&ctx->wq); +} + +/** + * start_context() - local routine to start a context + * @ctx: Adapter context to be started. + * + * Assign the context specific MMIO space, add and enable the PE. + * + * Return: 0 on success, -errno on failure + */ +static int start_context(struct ocxlflash_context *ctx) +{ + struct ocxl_hw_afu *afu = ctx->hw_afu; + struct ocxl_afu_config *acfg = &afu->acfg; + void *link_token = afu->link_token; + struct pci_dev *pdev = afu->pdev; + struct device *dev = afu->dev; + bool master = ctx->master; + struct mm_struct *mm; + int rc = 0; + u32 pid; + + mutex_lock(&ctx->state_mutex); + if (ctx->state != OPENED) { + dev_err(dev, "%s: Context state invalid, state=%d\n", + __func__, ctx->state); + rc = -EINVAL; + goto out; + } + + if (master) { + ctx->psn_size = acfg->global_mmio_size; + ctx->psn_phys = afu->gmmio_phys; + } else { + ctx->psn_size = acfg->pp_mmio_stride; + ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size); + } + + /* pid and mm not set for master contexts */ + if (master) { + pid = 0; + mm = NULL; + } else { + pid = current->mm->context.id; + mm = current->mm; + } + + rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, + pci_dev_id(pdev), mm, ocxlflash_xsl_fault, + ctx); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n", + __func__, rc); + goto out; + } + + ctx->state = STARTED; +out: + mutex_unlock(&ctx->state_mutex); + return rc; +} + +/** + * ocxlflash_start_context() - start a kernel context + * @ctx_cookie: Adapter context to be started. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_start_context(void *ctx_cookie) +{ + struct ocxlflash_context *ctx = ctx_cookie; + + return start_context(ctx); +} + +/** + * ocxlflash_stop_context() - stop a context + * @ctx_cookie: Adapter context to be stopped. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_stop_context(void *ctx_cookie) +{ + struct ocxlflash_context *ctx = ctx_cookie; + struct ocxl_hw_afu *afu = ctx->hw_afu; + struct ocxl_afu_config *acfg = &afu->acfg; + struct pci_dev *pdev = afu->pdev; + struct device *dev = afu->dev; + enum ocxlflash_ctx_state state; + int rc = 0; + + mutex_lock(&ctx->state_mutex); + state = ctx->state; + ctx->state = CLOSED; + mutex_unlock(&ctx->state_mutex); + if (state != STARTED) + goto out; + + rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos, + ctx->pe); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n", + __func__, rc); + /* If EBUSY, PE could be referenced in future by the AFU */ + if (rc == -EBUSY) + goto out; + } + + rc = ocxl_link_remove_pe(afu->link_token, ctx->pe); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n", + __func__, rc); + goto out; + } +out: + return rc; +} + +/** + * ocxlflash_afu_reset() - reset the AFU + * @ctx_cookie: Adapter context. + */ +static int ocxlflash_afu_reset(void *ctx_cookie) +{ + struct ocxlflash_context *ctx = ctx_cookie; + struct device *dev = ctx->hw_afu->dev; + + /* Pending implementation from OCXL transport services */ + dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__); + + /* Silently return success until it is implemented */ + return 0; +} + +/** + * ocxlflash_set_master() - sets the context as master + * @ctx_cookie: Adapter context to set as master. + */ +static void ocxlflash_set_master(void *ctx_cookie) +{ + struct ocxlflash_context *ctx = ctx_cookie; + + ctx->master = true; +} + +/** + * ocxlflash_get_context() - obtains the context associated with the host + * @pdev: PCI device associated with the host. + * @afu_cookie: Hardware AFU associated with the host. + * + * Return: returns the pointer to host adapter context + */ +static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie) +{ + struct ocxl_hw_afu *afu = afu_cookie; + + return afu->ocxl_ctx; +} + +/** + * ocxlflash_dev_context_init() - allocate and initialize an adapter context + * @pdev: PCI device associated with the host. + * @afu_cookie: Hardware AFU associated with the host. + * + * Return: returns the adapter context on success, ERR_PTR on failure + */ +static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie) +{ + struct ocxl_hw_afu *afu = afu_cookie; + struct device *dev = afu->dev; + struct ocxlflash_context *ctx; + int rc; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (unlikely(!ctx)) { + dev_err(dev, "%s: Context allocation failed\n", __func__); + rc = -ENOMEM; + goto err1; + } + + idr_preload(GFP_KERNEL); + rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT); + idr_preload_end(); + if (unlikely(rc < 0)) { + dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc); + goto err2; + } + + spin_lock_init(&ctx->slock); + init_waitqueue_head(&ctx->wq); + mutex_init(&ctx->state_mutex); + + ctx->state = OPENED; + ctx->pe = rc; + ctx->master = false; + ctx->mapping = NULL; + ctx->hw_afu = afu; + ctx->irq_bitmap = 0; + ctx->pending_irq = false; + ctx->pending_fault = false; +out: + return ctx; +err2: + kfree(ctx); +err1: + ctx = ERR_PTR(rc); + goto out; +} + +/** + * ocxlflash_release_context() - releases an adapter context + * @ctx_cookie: Adapter context to be released. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_release_context(void *ctx_cookie) +{ + struct ocxlflash_context *ctx = ctx_cookie; + struct device *dev; + int rc = 0; + + if (!ctx) + goto out; + + dev = ctx->hw_afu->dev; + mutex_lock(&ctx->state_mutex); + if (ctx->state >= STARTED) { + dev_err(dev, "%s: Context in use, state=%d\n", __func__, + ctx->state); + mutex_unlock(&ctx->state_mutex); + rc = -EBUSY; + goto out; + } + mutex_unlock(&ctx->state_mutex); + + idr_remove(&ctx->hw_afu->idr, ctx->pe); + ocxlflash_release_mapping(ctx); + kfree(ctx); +out: + return rc; +} + +/** + * ocxlflash_perst_reloads_same_image() - sets the image reload policy + * @afu_cookie: Hardware AFU associated with the host. + * @image: Whether to load the same image on PERST. + */ +static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image) +{ + struct ocxl_hw_afu *afu = afu_cookie; + + afu->perst_same_image = image; +} + +/** + * ocxlflash_read_adapter_vpd() - reads the adapter VPD + * @pdev: PCI device associated with the host. + * @buf: Buffer to get the VPD data. + * @count: Size of buffer (maximum bytes that can be read). + * + * Return: size of VPD on success, -errno on failure + */ +static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf, + size_t count) +{ + return pci_read_vpd(pdev, 0, count, buf); +} + +/** + * free_afu_irqs() - internal service to free interrupts + * @ctx: Adapter context. + */ +static void free_afu_irqs(struct ocxlflash_context *ctx) +{ + struct ocxl_hw_afu *afu = ctx->hw_afu; + struct device *dev = afu->dev; + int i; + + if (!ctx->irqs) { + dev_err(dev, "%s: Interrupts not allocated\n", __func__); + return; + } + + for (i = ctx->num_irqs; i >= 0; i--) + ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq); + + kfree(ctx->irqs); + ctx->irqs = NULL; +} + +/** + * alloc_afu_irqs() - internal service to allocate interrupts + * @ctx: Context associated with the request. + * @num: Number of interrupts requested. + * + * Return: 0 on success, -errno on failure + */ +static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num) +{ + struct ocxl_hw_afu *afu = ctx->hw_afu; + struct device *dev = afu->dev; + struct ocxlflash_irqs *irqs; + int rc = 0; + int hwirq; + int i; + + if (ctx->irqs) { + dev_err(dev, "%s: Interrupts already allocated\n", __func__); + rc = -EEXIST; + goto out; + } + + if (num > OCXL_MAX_IRQS) { + dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num); + rc = -EINVAL; + goto out; + } + + irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL); + if (unlikely(!irqs)) { + dev_err(dev, "%s: Context irqs allocation failed\n", __func__); + rc = -ENOMEM; + goto out; + } + + for (i = 0; i < num; i++) { + rc = ocxl_link_irq_alloc(afu->link_token, &hwirq); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n", + __func__, rc); + goto err; + } + + irqs[i].hwirq = hwirq; + } + + ctx->irqs = irqs; + ctx->num_irqs = num; +out: + return rc; +err: + for (i = i-1; i >= 0; i--) + ocxl_link_free_irq(afu->link_token, irqs[i].hwirq); + kfree(irqs); + goto out; +} + +/** + * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts + * @ctx_cookie: Context associated with the request. + * @num: Number of interrupts requested. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num) +{ + return alloc_afu_irqs(ctx_cookie, num); +} + +/** + * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context + * @ctx_cookie: Adapter context. + */ +static void ocxlflash_free_afu_irqs(void *ctx_cookie) +{ + free_afu_irqs(ctx_cookie); +} + +/** + * ocxlflash_unconfig_afu() - unconfigure the AFU + * @afu: AFU associated with the host. + */ +static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu) +{ + if (afu->gmmio_virt) { + iounmap(afu->gmmio_virt); + afu->gmmio_virt = NULL; + } +} + +/** + * ocxlflash_destroy_afu() - destroy the AFU structure + * @afu_cookie: AFU to be freed. + */ +static void ocxlflash_destroy_afu(void *afu_cookie) +{ + struct ocxl_hw_afu *afu = afu_cookie; + int pos; + + if (!afu) + return; + + ocxlflash_release_context(afu->ocxl_ctx); + idr_destroy(&afu->idr); + + /* Disable the AFU */ + pos = afu->acfg.dvsec_afu_control_pos; + ocxl_config_set_afu_state(afu->pdev, pos, 0); + + ocxlflash_unconfig_afu(afu); + kfree(afu); +} + +/** + * ocxlflash_config_fn() - configure the host function + * @pdev: PCI device associated with the host. + * @afu: AFU associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) +{ + struct ocxl_fn_config *fcfg = &afu->fcfg; + struct device *dev = &pdev->dev; + u16 base, enabled, supported; + int rc = 0; + + /* Read DVSEC config of the function */ + rc = ocxl_config_read_function(pdev, fcfg); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n", + __func__, rc); + goto out; + } + + /* Check if function has AFUs defined, only 1 per function supported */ + if (fcfg->max_afu_index >= 0) { + afu->is_present = true; + if (fcfg->max_afu_index != 0) + dev_warn(dev, "%s: Unexpected AFU index value %d\n", + __func__, fcfg->max_afu_index); + } + + rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n", + __func__, rc); + goto out; + } + + afu->fn_actag_base = base; + afu->fn_actag_enabled = enabled; + + ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled); + dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n", + __func__, base, enabled); + + rc = ocxl_link_setup(pdev, 0, &afu->link_token); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n", + __func__, rc); + goto out; + } + + rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n", + __func__, rc); + goto err; + } +out: + return rc; +err: + ocxl_link_release(pdev, afu->link_token); + goto out; +} + +/** + * ocxlflash_unconfig_fn() - unconfigure the host function + * @pdev: PCI device associated with the host. + * @afu: AFU associated with the host. + */ +static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) +{ + ocxl_link_release(pdev, afu->link_token); +} + +/** + * ocxlflash_map_mmio() - map the AFU MMIO space + * @afu: AFU associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu) +{ + struct ocxl_afu_config *acfg = &afu->acfg; + struct pci_dev *pdev = afu->pdev; + struct device *dev = afu->dev; + phys_addr_t gmmio, ppmmio; + int rc = 0; + + rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash"); + if (unlikely(rc)) { + dev_err(dev, "%s: pci_request_region for global failed rc=%d\n", + __func__, rc); + goto out; + } + gmmio = pci_resource_start(pdev, acfg->global_mmio_bar); + gmmio += acfg->global_mmio_offset; + + rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash"); + if (unlikely(rc)) { + dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n", + __func__, rc); + goto err1; + } + ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar); + ppmmio += acfg->pp_mmio_offset; + + afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size); + if (unlikely(!afu->gmmio_virt)) { + dev_err(dev, "%s: MMIO mapping failed\n", __func__); + rc = -ENOMEM; + goto err2; + } + + afu->gmmio_phys = gmmio; + afu->ppmmio_phys = ppmmio; +out: + return rc; +err2: + pci_release_region(pdev, acfg->pp_mmio_bar); +err1: + pci_release_region(pdev, acfg->global_mmio_bar); + goto out; +} + +/** + * ocxlflash_config_afu() - configure the host AFU + * @pdev: PCI device associated with the host. + * @afu: AFU associated with the host. + * + * Must be called _after_ host function configuration. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu) +{ + struct ocxl_afu_config *acfg = &afu->acfg; + struct ocxl_fn_config *fcfg = &afu->fcfg; + struct device *dev = &pdev->dev; + int count; + int base; + int pos; + int rc = 0; + + /* This HW AFU function does not have any AFUs defined */ + if (!afu->is_present) + goto out; + + /* Read AFU config at index 0 */ + rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n", + __func__, rc); + goto out; + } + + /* Only one AFU per function is supported, so actag_base is same */ + base = afu->fn_actag_base; + count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled); + pos = acfg->dvsec_afu_control_pos; + + ocxl_config_set_afu_actag(pdev, pos, base, count); + dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count); + afu->afu_actag_base = base; + afu->afu_actag_enabled = count; + afu->max_pasid = 1 << acfg->pasid_supported_log; + + ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log); + + rc = ocxlflash_map_mmio(afu); + if (unlikely(rc)) { + dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n", + __func__, rc); + goto out; + } + + /* Enable the AFU */ + ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1); +out: + return rc; +} + +/** + * ocxlflash_create_afu() - create the AFU for OCXL + * @pdev: PCI device associated with the host. + * + * Return: AFU on success, NULL on failure + */ +static void *ocxlflash_create_afu(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct ocxlflash_context *ctx; + struct ocxl_hw_afu *afu; + int rc; + + afu = kzalloc(sizeof(*afu), GFP_KERNEL); + if (unlikely(!afu)) { + dev_err(dev, "%s: HW AFU allocation failed\n", __func__); + goto out; + } + + afu->pdev = pdev; + afu->dev = dev; + idr_init(&afu->idr); + + rc = ocxlflash_config_fn(pdev, afu); + if (unlikely(rc)) { + dev_err(dev, "%s: Function configuration failed rc=%d\n", + __func__, rc); + goto err1; + } + + rc = ocxlflash_config_afu(pdev, afu); + if (unlikely(rc)) { + dev_err(dev, "%s: AFU configuration failed rc=%d\n", + __func__, rc); + goto err2; + } + + ctx = ocxlflash_dev_context_init(pdev, afu); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n", + __func__, rc); + goto err3; + } + + afu->ocxl_ctx = ctx; +out: + return afu; +err3: + ocxlflash_unconfig_afu(afu); +err2: + ocxlflash_unconfig_fn(pdev, afu); +err1: + idr_destroy(&afu->idr); + kfree(afu); + afu = NULL; + goto out; +} + +/** + * ctx_event_pending() - check for any event pending on the context + * @ctx: Context to be checked. + * + * Return: true if there is an event pending, false if none pending + */ +static inline bool ctx_event_pending(struct ocxlflash_context *ctx) +{ + if (ctx->pending_irq || ctx->pending_fault) + return true; + + return false; +} + +/** + * afu_poll() - poll the AFU for events on the context + * @file: File associated with the adapter context. + * @poll: Poll structure from the user. + * + * Return: poll mask + */ +static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) +{ + struct ocxlflash_context *ctx = file->private_data; + struct device *dev = ctx->hw_afu->dev; + ulong lock_flags; + int mask = 0; + + poll_wait(file, &ctx->wq, poll); + + spin_lock_irqsave(&ctx->slock, lock_flags); + if (ctx_event_pending(ctx)) + mask |= POLLIN | POLLRDNORM; + else if (ctx->state == CLOSED) + mask |= POLLERR; + spin_unlock_irqrestore(&ctx->slock, lock_flags); + + dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n", + __func__, ctx->pe, mask); + + return mask; +} + +/** + * afu_read() - perform a read on the context for any event + * @file: File associated with the adapter context. + * @buf: Buffer to receive the data. + * @count: Size of buffer (maximum bytes that can be read). + * @off: Offset. + * + * Return: size of the data read on success, -errno on failure + */ +static ssize_t afu_read(struct file *file, char __user *buf, size_t count, + loff_t *off) +{ + struct ocxlflash_context *ctx = file->private_data; + struct device *dev = ctx->hw_afu->dev; + struct cxl_event event; + ulong lock_flags; + ssize_t esize; + ssize_t rc; + int bit; + DEFINE_WAIT(event_wait); + + if (*off != 0) { + dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n", + __func__, *off); + rc = -EINVAL; + goto out; + } + + spin_lock_irqsave(&ctx->slock, lock_flags); + + for (;;) { + prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); + + if (ctx_event_pending(ctx) || (ctx->state == CLOSED)) + break; + + if (file->f_flags & O_NONBLOCK) { + dev_err(dev, "%s: File cannot be blocked on I/O\n", + __func__); + rc = -EAGAIN; + goto err; + } + + if (signal_pending(current)) { + dev_err(dev, "%s: Signal pending on the process\n", + __func__); + rc = -ERESTARTSYS; + goto err; + } + + spin_unlock_irqrestore(&ctx->slock, lock_flags); + schedule(); + spin_lock_irqsave(&ctx->slock, lock_flags); + } + + finish_wait(&ctx->wq, &event_wait); + + memset(&event, 0, sizeof(event)); + event.header.process_element = ctx->pe; + event.header.size = sizeof(struct cxl_event_header); + if (ctx->pending_irq) { + esize = sizeof(struct cxl_event_afu_interrupt); + event.header.size += esize; + event.header.type = CXL_EVENT_AFU_INTERRUPT; + + bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs); + clear_bit(bit, &ctx->irq_bitmap); + event.irq.irq = bit + 1; + if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs)) + ctx->pending_irq = false; + } else if (ctx->pending_fault) { + event.header.size += sizeof(struct cxl_event_data_storage); + event.header.type = CXL_EVENT_DATA_STORAGE; + event.fault.addr = ctx->fault_addr; + event.fault.dsisr = ctx->fault_dsisr; + ctx->pending_fault = false; + } + + spin_unlock_irqrestore(&ctx->slock, lock_flags); + + if (copy_to_user(buf, &event, event.header.size)) { + dev_err(dev, "%s: copy_to_user failed\n", __func__); + rc = -EFAULT; + goto out; + } + + rc = event.header.size; +out: + return rc; +err: + finish_wait(&ctx->wq, &event_wait); + spin_unlock_irqrestore(&ctx->slock, lock_flags); + goto out; +} + +/** + * afu_release() - release and free the context + * @inode: File inode pointer. + * @file: File associated with the context. + * + * Return: 0 on success, -errno on failure + */ +static int afu_release(struct inode *inode, struct file *file) +{ + struct ocxlflash_context *ctx = file->private_data; + int i; + + /* Unmap and free the interrupts associated with the context */ + for (i = ctx->num_irqs; i >= 0; i--) + afu_unmap_irq(0, ctx, i, ctx); + free_afu_irqs(ctx); + + return ocxlflash_release_context(ctx); +} + +/** + * ocxlflash_mmap_fault() - mmap fault handler + * @vmf: VM fault associated with current fault. + * + * Return: 0 on success, -errno on failure + */ +static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ocxlflash_context *ctx = vma->vm_file->private_data; + struct device *dev = ctx->hw_afu->dev; + u64 mmio_area, offset; + + offset = vmf->pgoff << PAGE_SHIFT; + if (offset >= ctx->psn_size) + return VM_FAULT_SIGBUS; + + mutex_lock(&ctx->state_mutex); + if (ctx->state != STARTED) { + dev_err(dev, "%s: Context not started, state=%d\n", + __func__, ctx->state); + mutex_unlock(&ctx->state_mutex); + return VM_FAULT_SIGBUS; + } + mutex_unlock(&ctx->state_mutex); + + mmio_area = ctx->psn_phys; + mmio_area += offset; + + return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); +} + +static const struct vm_operations_struct ocxlflash_vmops = { + .fault = ocxlflash_mmap_fault, +}; + +/** + * afu_mmap() - map the fault handler operations + * @file: File associated with the context. + * @vma: VM area associated with mapping. + * + * Return: 0 on success, -errno on failure + */ +static int afu_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ocxlflash_context *ctx = file->private_data; + + if ((vma_pages(vma) + vma->vm_pgoff) > + (ctx->psn_size >> PAGE_SHIFT)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &ocxlflash_vmops; + return 0; +} + +static const struct file_operations ocxl_afu_fops = { + .owner = THIS_MODULE, + .poll = afu_poll, + .read = afu_read, + .release = afu_release, + .mmap = afu_mmap, +}; + +#define PATCH_FOPS(NAME) \ + do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0) + +/** + * ocxlflash_get_fd() - get file descriptor for an adapter context + * @ctx_cookie: Adapter context. + * @fops: File operations to be associated. + * @fd: File descriptor to be returned back. + * + * Return: pointer to the file on success, ERR_PTR on failure + */ +static struct file *ocxlflash_get_fd(void *ctx_cookie, + struct file_operations *fops, int *fd) +{ + struct ocxlflash_context *ctx = ctx_cookie; + struct device *dev = ctx->hw_afu->dev; + struct file *file; + int flags, fdtmp; + int rc = 0; + char *name = NULL; + + /* Only allow one fd per context */ + if (ctx->mapping) { + dev_err(dev, "%s: Context is already mapped to an fd\n", + __func__); + rc = -EEXIST; + goto err1; + } + + flags = O_RDWR | O_CLOEXEC; + + /* This code is similar to anon_inode_getfd() */ + rc = get_unused_fd_flags(flags); + if (unlikely(rc < 0)) { + dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n", + __func__, rc); + goto err1; + } + fdtmp = rc; + + /* Patch the file ops that are not defined */ + if (fops) { + PATCH_FOPS(poll); + PATCH_FOPS(read); + PATCH_FOPS(release); + PATCH_FOPS(mmap); + } else /* Use default ops */ + fops = (struct file_operations *)&ocxl_afu_fops; + + name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe); + file = ocxlflash_getfile(dev, name, fops, ctx, flags); + kfree(name); + if (IS_ERR(file)) { + rc = PTR_ERR(file); + dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n", + __func__, rc); + goto err2; + } + + ctx->mapping = file->f_mapping; + *fd = fdtmp; +out: + return file; +err2: + put_unused_fd(fdtmp); +err1: + file = ERR_PTR(rc); + goto out; +} + +/** + * ocxlflash_fops_get_context() - get the context associated with the file + * @file: File associated with the adapter context. + * + * Return: pointer to the context + */ +static void *ocxlflash_fops_get_context(struct file *file) +{ + return file->private_data; +} + +/** + * ocxlflash_afu_irq() - interrupt handler for user contexts + * @irq: Interrupt number. + * @data: Private data provided at interrupt registration, the context. + * + * Return: Always return IRQ_HANDLED. + */ +static irqreturn_t ocxlflash_afu_irq(int irq, void *data) +{ + struct ocxlflash_context *ctx = data; + struct device *dev = ctx->hw_afu->dev; + int i; + + dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n", + __func__, ctx->pe, irq); + + for (i = 0; i < ctx->num_irqs; i++) { + if (ctx->irqs[i].virq == irq) + break; + } + if (unlikely(i >= ctx->num_irqs)) { + dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__); + goto out; + } + + spin_lock(&ctx->slock); + set_bit(i - 1, &ctx->irq_bitmap); + ctx->pending_irq = true; + spin_unlock(&ctx->slock); + + wake_up_all(&ctx->wq); +out: + return IRQ_HANDLED; +} + +/** + * ocxlflash_start_work() - start a user context + * @ctx_cookie: Context to be started. + * @num_irqs: Number of interrupts requested. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs) +{ + struct ocxlflash_context *ctx = ctx_cookie; + struct ocxl_hw_afu *afu = ctx->hw_afu; + struct device *dev = afu->dev; + char *name; + int rc = 0; + int i; + + rc = alloc_afu_irqs(ctx, num_irqs); + if (unlikely(rc < 0)) { + dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc); + goto out; + } + + for (i = 0; i < num_irqs; i++) { + name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i", + dev_name(dev), ctx->pe, i); + rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name); + kfree(name); + if (unlikely(rc < 0)) { + dev_err(dev, "%s: afu_map_irq failed rc=%d\n", + __func__, rc); + goto err; + } + } + + rc = start_context(ctx); + if (unlikely(rc)) { + dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc); + goto err; + } +out: + return rc; +err: + for (i = i-1; i >= 0; i--) + afu_unmap_irq(0, ctx, i, ctx); + free_afu_irqs(ctx); + goto out; +}; + +/** + * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor + * @file: File installed with adapter file descriptor. + * @vma: VM area associated with mapping. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma) +{ + return afu_mmap(file, vma); +} + +/** + * ocxlflash_fd_release() - release the context associated with the file + * @inode: File inode pointer. + * @file: File associated with the adapter context. + * + * Return: 0 on success, -errno on failure + */ +static int ocxlflash_fd_release(struct inode *inode, struct file *file) +{ + return afu_release(inode, file); +} + +/* Backend ops to ocxlflash services */ +const struct cxlflash_backend_ops cxlflash_ocxl_ops = { + .module = THIS_MODULE, + .psa_map = ocxlflash_psa_map, + .psa_unmap = ocxlflash_psa_unmap, + .process_element = ocxlflash_process_element, + .map_afu_irq = ocxlflash_map_afu_irq, + .unmap_afu_irq = ocxlflash_unmap_afu_irq, + .get_irq_objhndl = ocxlflash_get_irq_objhndl, + .start_context = ocxlflash_start_context, + .stop_context = ocxlflash_stop_context, + .afu_reset = ocxlflash_afu_reset, + .set_master = ocxlflash_set_master, + .get_context = ocxlflash_get_context, + .dev_context_init = ocxlflash_dev_context_init, + .release_context = ocxlflash_release_context, + .perst_reloads_same_image = ocxlflash_perst_reloads_same_image, + .read_adapter_vpd = ocxlflash_read_adapter_vpd, + .allocate_afu_irqs = ocxlflash_allocate_afu_irqs, + .free_afu_irqs = ocxlflash_free_afu_irqs, + .create_afu = ocxlflash_create_afu, + .destroy_afu = ocxlflash_destroy_afu, + .get_fd = ocxlflash_get_fd, + .fops_get_context = ocxlflash_fops_get_context, + .start_work = ocxlflash_start_work, + .fd_mmap = ocxlflash_fd_mmap, + .fd_release = ocxlflash_fd_release, +}; diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h new file mode 100644 index 000000000..f2fe88816 --- /dev/null +++ b/drivers/scsi/cxlflash/ocxl_hw.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CXL Flash Device Driver + * + * Written by: Matthew R. Ochs , IBM Corporation + * Uma Krishnan , IBM Corporation + * + * Copyright (C) 2018 IBM Corporation + */ + +#define OCXL_MAX_IRQS 4 /* Max interrupts per process */ + +struct ocxlflash_irqs { + int hwirq; + u32 virq; + void __iomem *vtrig; +}; + +/* OCXL hardware AFU associated with the host */ +struct ocxl_hw_afu { + struct ocxlflash_context *ocxl_ctx; /* Host context */ + struct pci_dev *pdev; /* PCI device */ + struct device *dev; /* Generic device */ + bool perst_same_image; /* Same image loaded on perst */ + + struct ocxl_fn_config fcfg; /* DVSEC config of the function */ + struct ocxl_afu_config acfg; /* AFU configuration data */ + + int fn_actag_base; /* Function acTag base */ + int fn_actag_enabled; /* Function acTag number enabled */ + int afu_actag_base; /* AFU acTag base */ + int afu_actag_enabled; /* AFU acTag number enabled */ + + phys_addr_t ppmmio_phys; /* Per process MMIO space */ + phys_addr_t gmmio_phys; /* Global AFU MMIO space */ + void __iomem *gmmio_virt; /* Global MMIO map */ + + void *link_token; /* Link token for the SPA */ + struct idr idr; /* IDR to manage contexts */ + int max_pasid; /* Maximum number of contexts */ + bool is_present; /* Function has AFUs defined */ +}; + +enum ocxlflash_ctx_state { + CLOSED, + OPENED, + STARTED +}; + +struct ocxlflash_context { + struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */ + struct address_space *mapping; /* Mapping for pseudo filesystem */ + bool master; /* Whether this is a master context */ + int pe; /* Process element */ + + phys_addr_t psn_phys; /* Process mapping */ + u64 psn_size; /* Process mapping size */ + + spinlock_t slock; /* Protects irq/fault/event updates */ + wait_queue_head_t wq; /* Wait queue for poll and interrupts */ + struct mutex state_mutex; /* Mutex to update context state */ + enum ocxlflash_ctx_state state; /* Context state */ + + struct ocxlflash_irqs *irqs; /* Pointer to array of structures */ + int num_irqs; /* Number of interrupts */ + bool pending_irq; /* Pending interrupt on the context */ + ulong irq_bitmap; /* Bits indicating pending irq num */ + + u64 fault_addr; /* Address that triggered the fault */ + u64 fault_dsisr; /* Value of dsisr register at fault */ + bool pending_fault; /* Pending translation fault */ +}; diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h new file mode 100644 index 000000000..ab315c595 --- /dev/null +++ b/drivers/scsi/cxlflash/sislite.h @@ -0,0 +1,560 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#ifndef _SISLITE_H +#define _SISLITE_H + +#include + +typedef u16 ctx_hndl_t; +typedef u32 res_hndl_t; + +#define SIZE_4K 4096 +#define SIZE_64K 65536 + +/* + * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness + * except for SCSI CDB which remains big endian per SCSI standards. + */ +struct sisl_ioarcb { + u16 ctx_id; /* ctx_hndl_t */ + u16 req_flags; +#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */ +#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U + +#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */ + +#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */ +#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U +#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U +#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U + +#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */ + +#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */ + +#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */ +#define SISL_REQ_FLAGS_HOST_READ 0x0000U + + union { + u32 res_hndl; /* res_hndl_t */ + u32 port_sel; /* this is a selection mask: + * 0x1 -> port#0 can be selected, + * 0x2 -> port#1 can be selected. + * Can be bitwise ORed. + */ + }; + u64 lun_id; + u32 data_len; /* 4K for read/write */ + u32 ioadl_len; + union { + u64 data_ea; /* min 16 byte aligned */ + u64 ioadl_ea; + }; + u8 msi; /* LISN to send on RRQ write */ +#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */ +#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */ +#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */ +#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */ + + u8 rrq; /* 0 for a single RRQ */ + u16 timeout; /* in units specified by req_flags */ + u32 rsvd1; + u8 cdb[16]; /* must be in big endian */ +#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */ +#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */ +#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */ + +#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */ +#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */ + + union { + u64 reserved; /* Reserved for IOARRIN mode */ + struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */ + }; +} __packed; + +struct sisl_rc { + u8 flags; +#define SISL_RC_FLAGS_SENSE_VALID 0x80U +#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U +#define SISL_RC_FLAGS_OVERRUN 0x20U +#define SISL_RC_FLAGS_UNDERRUN 0x10U + + u8 afu_rc; +#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */ +#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */ +#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */ +#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra + * may retry if afu_retry is off + * possible on master exit + */ +#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */ +#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */ +#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */ +#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra + * may retry if afu_retry is off + * possible on master exit + */ +#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */ + +#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */ + + /* NO_CHANNELS means the FC ports selected by dest_port in + * IOARCB or in the LXT entry are down when the AFU tried to select + * a FC port. If the port went down on an active IO, it will set + * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead. + */ +#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */ +#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or + * afu reset/master restart + */ +#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */ +#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra + * may retry if afu_retry is off + */ + + u8 scsi_rc; /* SCSI status byte, retry as appropriate */ +#define SISL_SCSI_RC_CHECK 0x02U +#define SISL_SCSI_RC_BUSY 0x08u + + u8 fc_rc; /* retry */ + /* + * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for + * commands that are in flight when a link goes down or is logged out. + * If the link is down or logged out before AFU selects the port, either + * it will choose the other port or we will get afu_rc=0x20 (no_channel) + * if there is no valid port to use. + * + * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these + * would happen if a frame is dropped and something times out. + * NOLOGI or LINKDOWN can be retried if the other port is up. + * RESIDERR can be retried as well. + * + * ABORTFAIL might indicate that lots of frames are getting CRC errors. + * So it maybe retried once and reset the link if it happens again. + * The link can also be reset on the CRC error threshold interrupt. + */ +#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */ +#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */ +#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */ +#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */ +#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */ +#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */ +#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */ +#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */ +#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */ +#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI + * reported len, possibly due to dropped + * frames + */ +#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */ +}; + +#define SISL_SENSE_DATA_LEN 20 /* Sense data length */ +#define SISL_WWID_DATA_LEN 16 /* WWID data length */ + +/* + * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required, + * host native endianness + */ +struct sisl_ioasa { + union { + struct sisl_rc rc; + u32 ioasc; +#define SISL_IOASC_GOOD_COMPLETION 0x00000000U + }; + + union { + u32 resid; + u32 lunid_hi; + }; + + u8 port; + u8 afu_extra; + /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR): + * afu_exta contains PSL response code. Useful codes are: + */ +#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action + * Enabled N/A + * Disabled retry + */ +#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error + * afu_rc Implies + * 0x04, 0x14 master exit. + * 0x31 user error. + */ + /* when afu rc=0x20 (no channels): + * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask. + */ +#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2) +#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03) + + u8 scsi_extra; + u8 fc_extra; + + union { + u8 sense_data[SISL_SENSE_DATA_LEN]; + struct { + u32 lunid_lo; + u8 wwid[SISL_WWID_DATA_LEN]; + }; + }; + + /* These fields are defined by the SISlite architecture for the + * host to use as they see fit for their implementation. + */ + union { + u64 host_use[4]; + u8 host_use_b[32]; + }; +} __packed; + +#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */ + +/* MMIO space is required to support only 64-bit access */ + +/* + * This AFU has two mechanisms to deal with endian-ness. + * One is a global configuration (in the afu_config) register + * below that specifies the endian-ness of the host. + * The other is a per context (i.e. application) specification + * controlled by the endian_ctrl field here. Since the master + * context is one such application the master context's + * endian-ness is set to be the same as the host. + * + * As per the SISlite spec, the MMIO registers are always + * big endian. + */ +#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL +#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL + +#ifdef __BIG_ENDIAN +#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE +#else +#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE +#endif + +/* per context host transport MMIO */ +struct sisl_host_map { + __be64 endian_ctrl; /* Per context Endian Control. The AFU will + * operate on whatever the context is of the + * host application. + */ + + __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl. + * Only recovery in a PERM_ERR is a context + * exit since there is no way to tell which + * command caused the error. + */ +#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */ +#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */ +#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */ +#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */ +#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */ +#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */ +#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */ +#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */ +#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */ +#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */ + /* Page in wait accessing RCB/IOASA/RRQ is reported in b63. + * Same error in data/LXT/RHT access is reported via IOASA. + */ +#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be + * generated when AFU + * auto retry is + * disabled. If user + * can determine the + * command that caused + * the error, it can + * be retried. + */ +#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */ +#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */ + + __be64 intr_clear; + __be64 intr_mask; + __be64 ioarrin; /* only write what cmd_room permits */ + __be64 rrq_start; /* start & end are both inclusive */ + __be64 rrq_end; /* write sequence: start followed by end */ + __be64 cmd_room; + __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ +#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */ +#define SISL_CTX_CTRL_LISN_MASK (0xFFULL) + __be64 mbox_w; /* restricted use */ + __be64 sq_start; /* Submission Queue (R/W): write sequence and */ + __be64 sq_end; /* inclusion semantics are the same as RRQ */ + __be64 sq_head; /* Submission Queue Head (R): for debugging */ + __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */ + __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */ +}; + +/* per context provisioning & control MMIO */ +struct sisl_ctrl_map { + __be64 rht_start; + __be64 rht_cnt_id; + /* both cnt & ctx_id args must be ULL */ +#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32)) + + __be64 ctx_cap; /* afu_rc below is when the capability is violated */ +#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */ +#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */ +#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */ +#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */ +#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */ +#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */ +#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */ +#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */ + __be64 mbox_r; + __be64 lisn_pasid[2]; + /* pasid _a arg must be ULL */ +#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b)) + __be64 lisn_ea[3]; +}; + +/* single copy global regs */ +struct sisl_global_regs { + __be64 aintr_status; + /* + * In cxlflash, FC port/link are arranged in port pairs, each + * gets a byte of status: + * + * *_OTHER: other err, FC_ERRCAP[31:20] + * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in + * *_CRC_T: CRC threshold exceeded + * *_LOGI_R: login state machine timed out and retrying + * *_LOGI_F: login failed, FC_ERROR[19:0] + * *_LOGI_S: login succeeded + * *_LINK_DN: link online to offline + * *_LINK_UP: link offline to online + */ +#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */ +#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */ +#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */ +#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */ +#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */ +#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */ +#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */ +#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */ + +#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */ +#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */ +#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */ +#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */ +#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */ +#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */ +#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */ +#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */ + +#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */ +#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */ +#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */ +#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */ +#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */ +#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */ +#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */ +#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */ + +#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */ +#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */ +#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */ +#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */ +#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */ +#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */ +#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */ +#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */ + +#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */ +#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK) +#define SISL_FC_INTERNAL_SHIFT 32 + +#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL +#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL + +#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL +#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL + +#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */ +#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */ + + __be64 aintr_clear; + __be64 aintr_mask; + __be64 afu_ctrl; + __be64 afu_hb; + __be64 afu_scratch_pad; + __be64 afu_port_sel; +#define SISL_AFUCONF_AR_IOARCB 0x4000ULL +#define SISL_AFUCONF_AR_LXT 0x2000ULL +#define SISL_AFUCONF_AR_RHT 0x1000ULL +#define SISL_AFUCONF_AR_DATA 0x0800ULL +#define SISL_AFUCONF_AR_RSRC 0x0400ULL +#define SISL_AFUCONF_AR_IOASA 0x0200ULL +#define SISL_AFUCONF_AR_RRQ 0x0100ULL +/* Aggregate all Auto Retry Bits */ +#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \ + SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \ + SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \ + SISL_AFUCONF_AR_RRQ) +#ifdef __BIG_ENDIAN +#define SISL_AFUCONF_ENDIAN 0x0000ULL +#else +#define SISL_AFUCONF_ENDIAN 0x0020ULL +#endif +#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL + __be64 afu_config; + __be64 rsvd[0xf8]; + __le64 afu_version; + __be64 interface_version; +#define SISL_INTVER_CAP_SHIFT 16 +#define SISL_INTVER_MAJ_SHIFT 8 +#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL +#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL +#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL +#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL +#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL +#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL +#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL +#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL +#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL +#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL +}; + +#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */ +#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */ +#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \ + CXLFLASH_MAX_FC_BANKS) +#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */ +#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */ +#define CXLFLASH_NUM_REGS 512 /* number of registers per port */ + +struct fc_port_bank { + __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS]; + __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS]; +}; + +struct sisl_global_map { + union { + struct sisl_global_regs regs; + char page0[SIZE_4K]; /* page 0 */ + }; + + char page1[SIZE_4K]; /* page 1 */ + + struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */ + + /* pages 10 - 15 are reserved */ + +}; + +/* + * CXL Flash Memory Map + * + * +-------------------------------+ + * | 512 * 64 KB User MMIO | + * | (per context) | + * | User Accessible | + * +-------------------------------+ + * | 512 * 128 B per context | + * | Provisioning and Control | + * | Trusted Process accessible | + * +-------------------------------+ + * | 64 KB Global | + * | Trusted Process accessible | + * +-------------------------------+ + */ +struct cxlflash_afu_map { + union { + struct sisl_host_map host; + char harea[SIZE_64K]; /* 64KB each */ + } hosts[CXLFLASH_MAX_CONTEXT]; + + union { + struct sisl_ctrl_map ctrl; + char carea[cache_line_size()]; /* 128B each */ + } ctrls[CXLFLASH_MAX_CONTEXT]; + + union { + struct sisl_global_map global; + char garea[SIZE_64K]; /* 64KB single block */ + }; +}; + +/* + * LXT - LBA Translation Table + * LXT control blocks + */ +struct sisl_lxt_entry { + u64 rlba_base; /* bits 0:47 is base + * b48:55 is lun index + * b58:59 is write & read perms + * (if no perm, afu_rc=0x15) + * b60:63 is port_sel mask + */ +}; + +/* + * RHT - Resource Handle Table + * Per the SISlite spec, RHT entries are to be 16-byte aligned + */ +struct sisl_rht_entry { + struct sisl_lxt_entry *lxt_start; + u32 lxt_cnt; + u16 rsvd; + u8 fp; /* format & perm nibbles. + * (if no perm, afu_rc=0x05) + */ + u8 nmask; +} __packed __aligned(16); + +struct sisl_rht_entry_f1 { + u64 lun_id; + union { + struct { + u8 valid; + u8 rsvd[5]; + u8 fp; + u8 port_sel; + }; + + u64 dw; + }; +} __packed __aligned(16); + +/* make the fp byte */ +#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm)) + +/* make the fp byte for a clone from a source fp and clone flags + * flags must be only 2 LSB bits. + */ +#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags))) + +#define RHT_PERM_READ 0x01U +#define RHT_PERM_WRITE 0x02U +#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE) + +/* extract the perm bits from a fp */ +#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW) + +#define PORT0 0x01U +#define PORT1 0x02U +#define PORT2 0x04U +#define PORT3 0x08U +#define PORT_MASK(_n) ((1 << (_n)) - 1) + +/* AFU Sync Mode byte */ +#define AFU_LW_SYNC 0x0U +#define AFU_HW_SYNC 0x1U +#define AFU_GSYNC 0x2U + +/* Special Task Management Function CDB */ +#define TMF_LUN_RESET 0x1U +#define TMF_CLEAR_ACA 0x2U + +#endif /* _SISLITE_H */ diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c new file mode 100644 index 000000000..e1b55b03e --- /dev/null +++ b/drivers/scsi/cxlflash/superpipe.c @@ -0,0 +1,2220 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "sislite.h" +#include "common.h" +#include "vlun.h" +#include "superpipe.h" + +struct cxlflash_global global; + +/** + * marshal_rele_to_resize() - translate release to resize structure + * @release: Source structure from which to translate/copy. + * @resize: Destination structure for the translate/copy. + */ +static void marshal_rele_to_resize(struct dk_cxlflash_release *release, + struct dk_cxlflash_resize *resize) +{ + resize->hdr = release->hdr; + resize->context_id = release->context_id; + resize->rsrc_handle = release->rsrc_handle; +} + +/** + * marshal_det_to_rele() - translate detach to release structure + * @detach: Destination structure for the translate/copy. + * @release: Source structure from which to translate/copy. + */ +static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, + struct dk_cxlflash_release *release) +{ + release->hdr = detach->hdr; + release->context_id = detach->context_id; +} + +/** + * marshal_udir_to_rele() - translate udirect to release structure + * @udirect: Source structure from which to translate/copy. + * @release: Destination structure for the translate/copy. + */ +static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect, + struct dk_cxlflash_release *release) +{ + release->hdr = udirect->hdr; + release->context_id = udirect->context_id; + release->rsrc_handle = udirect->rsrc_handle; +} + +/** + * cxlflash_free_errpage() - frees resources associated with global error page + */ +void cxlflash_free_errpage(void) +{ + + mutex_lock(&global.mutex); + if (global.err_page) { + __free_page(global.err_page); + global.err_page = NULL; + } + mutex_unlock(&global.mutex); +} + +/** + * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts + * @cfg: Internal structure associated with the host. + * + * When the host needs to go down, all users must be quiesced and their + * memory freed. This is accomplished by putting the contexts in error + * state which will notify the user and let them 'drive' the tear down. + * Meanwhile, this routine camps until all user contexts have been removed. + * + * Note that the main loop in this routine will always execute at least once + * to flush the reset_waitq. + */ +void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + int i, found = true; + + cxlflash_mark_contexts_error(cfg); + + while (true) { + for (i = 0; i < MAX_CONTEXT; i++) + if (cfg->ctx_tbl[i]) { + found = true; + break; + } + + if (!found && list_empty(&cfg->ctx_err_recovery)) + return; + + dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n", + __func__); + wake_up_all(&cfg->reset_waitq); + ssleep(1); + found = false; + } +} + +/** + * find_error_context() - locates a context by cookie on the error recovery list + * @cfg: Internal structure associated with the host. + * @rctxid: Desired context by id. + * @file: Desired context by file. + * + * Return: Found context on success, NULL on failure + */ +static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid, + struct file *file) +{ + struct ctx_info *ctxi; + + list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list) + if ((ctxi->ctxid == rctxid) || (ctxi->file == file)) + return ctxi; + + return NULL; +} + +/** + * get_context() - obtains a validated and locked context reference + * @cfg: Internal structure associated with the host. + * @rctxid: Desired context (raw, un-decoded format). + * @arg: LUN information or file associated with request. + * @ctx_ctrl: Control information to 'steer' desired lookup. + * + * NOTE: despite the name pid, in linux, current->pid actually refers + * to the lightweight process id (tid) and can change if the process is + * multi threaded. The tgid remains constant for the process and only changes + * when the process of fork. For all intents and purposes, think of tgid + * as a pid in the traditional sense. + * + * Return: Validated context on success, NULL on failure + */ +struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, + void *arg, enum ctx_ctrl ctx_ctrl) +{ + struct device *dev = &cfg->dev->dev; + struct ctx_info *ctxi = NULL; + struct lun_access *lun_access = NULL; + struct file *file = NULL; + struct llun_info *lli = arg; + u64 ctxid = DECODE_CTXID(rctxid); + int rc; + pid_t pid = task_tgid_nr(current), ctxpid = 0; + + if (ctx_ctrl & CTX_CTRL_FILE) { + lli = NULL; + file = (struct file *)arg; + } + + if (ctx_ctrl & CTX_CTRL_CLONE) + pid = task_ppid_nr(current); + + if (likely(ctxid < MAX_CONTEXT)) { + while (true) { + mutex_lock(&cfg->ctx_tbl_list_mutex); + ctxi = cfg->ctx_tbl[ctxid]; + if (ctxi) + if ((file && (ctxi->file != file)) || + (!file && (ctxi->ctxid != rctxid))) + ctxi = NULL; + + if ((ctx_ctrl & CTX_CTRL_ERR) || + (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK))) + ctxi = find_error_context(cfg, rctxid, file); + if (!ctxi) { + mutex_unlock(&cfg->ctx_tbl_list_mutex); + goto out; + } + + /* + * Need to acquire ownership of the context while still + * under the table/list lock to serialize with a remove + * thread. Use the 'try' to avoid stalling the + * table/list lock for a single context. + * + * Note that the lock order is: + * + * cfg->ctx_tbl_list_mutex -> ctxi->mutex + * + * Therefore release ctx_tbl_list_mutex before retrying. + */ + rc = mutex_trylock(&ctxi->mutex); + mutex_unlock(&cfg->ctx_tbl_list_mutex); + if (rc) + break; /* got the context's lock! */ + } + + if (ctxi->unavail) + goto denied; + + ctxpid = ctxi->pid; + if (likely(!(ctx_ctrl & CTX_CTRL_NOPID))) + if (pid != ctxpid) + goto denied; + + if (lli) { + list_for_each_entry(lun_access, &ctxi->luns, list) + if (lun_access->lli == lli) + goto out; + goto denied; + } + } + +out: + dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u " + "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, + ctx_ctrl); + + return ctxi; + +denied: + mutex_unlock(&ctxi->mutex); + ctxi = NULL; + goto out; +} + +/** + * put_context() - release a context that was retrieved from get_context() + * @ctxi: Context to release. + * + * For now, releasing the context equates to unlocking it's mutex. + */ +void put_context(struct ctx_info *ctxi) +{ + mutex_unlock(&ctxi->mutex); +} + +/** + * afu_attach() - attach a context to the AFU + * @cfg: Internal structure associated with the host. + * @ctxi: Context to attach. + * + * Upon setting the context capabilities, they must be confirmed with + * a read back operation as the context might have been closed since + * the mailbox was unlocked. When this occurs, registration is failed. + * + * Return: 0 on success, -errno on failure + */ +static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) +{ + struct device *dev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; + int rc = 0; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); + u64 val; + int i; + + /* Unlock cap and restrict user to read/write cmds in translated mode */ + readq_be(&ctrl_map->mbox_r); + val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); + writeq_be(val, &ctrl_map->ctx_cap); + val = readq_be(&ctrl_map->ctx_cap); + if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { + dev_err(dev, "%s: ctx may be closed val=%016llx\n", + __func__, val); + rc = -EAGAIN; + goto out; + } + + if (afu_is_ocxl_lisn(afu)) { + /* Set up the LISN effective address for each interrupt */ + for (i = 0; i < ctxi->irqs; i++) { + val = cfg->ops->get_irq_objhndl(ctxi->ctx, i); + writeq_be(val, &ctrl_map->lisn_ea[i]); + } + + /* Use primary HWQ PASID as identifier for all interrupts */ + val = hwq->ctx_hndl; + writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]); + writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]); + } + + /* Set up MMIO registers pointing to the RHT */ + writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); + val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); + writeq_be(val, &ctrl_map->rht_cnt_id); +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * read_cap16() - issues a SCSI READ_CAP16 command + * @sdev: SCSI device associated with LUN. + * @lli: LUN destined for capacity request. + * + * The READ_CAP16 can take quite a while to complete. Should an EEH occur while + * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of + * the recovery, the handler drains all currently running ioctls, waiting until + * they have completed before proceeding with a reset. As this routine is used + * on the ioctl path, this can create a condition where the EEH handler becomes + * stuck, infinitely waiting for this ioctl thread. To avoid this behavior, + * temporarily unmark this thread as an ioctl thread by releasing the ioctl + * read semaphore. This will allow the EEH handler to proceed with a recovery + * while this thread is still running. Once the scsi_execute_cmd() returns, + * reacquire the ioctl read semaphore and check the adapter state in case it + * changed while inside of scsi_execute_cmd(). The state check will wait if the + * adapter is still being recovered or return a failure if the recovery failed. + * In the event that the adapter reset failed, simply return the failure as the + * ioctl would be unable to continue. + * + * Note that the above puts a requirement on this routine to only be called on + * an ioctl thread. + * + * Return: 0 on success, -errno on failure + */ +static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct glun_info *gli = lli->parent; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + u8 *cmd_buf = NULL; + u8 *scsi_cmd = NULL; + int rc = 0; + int result = 0; + int retry_cnt = 0; + u32 to = CMD_TIMEOUT * HZ; + +retry: + cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); + scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); + if (unlikely(!cmd_buf || !scsi_cmd)) { + rc = -ENOMEM; + goto out; + } + + scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */ + scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ + put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); + + dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__, + retry_cnt ? "re" : "", scsi_cmd[0]); + + /* Drop the ioctl read semaphore across lengthy call */ + up_read(&cfg->ioctl_rwsem); + result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf, + CMD_BUFSIZE, to, CMD_RETRIES, &exec_args); + down_read(&cfg->ioctl_rwsem); + rc = check_state(cfg); + if (rc) { + dev_err(dev, "%s: Failed state result=%08x\n", + __func__, result); + rc = -ENODEV; + goto out; + } + + if (result > 0 && scsi_sense_valid(&sshdr)) { + if (result & SAM_STAT_CHECK_CONDITION) { + switch (sshdr.sense_key) { + case NO_SENSE: + case RECOVERED_ERROR: + case NOT_READY: + result &= ~SAM_STAT_CHECK_CONDITION; + break; + case UNIT_ATTENTION: + switch (sshdr.asc) { + case 0x29: /* Power on Reset or Device Reset */ + fallthrough; + case 0x2A: /* Device capacity changed */ + case 0x3F: /* Report LUNs changed */ + /* Retry the command once more */ + if (retry_cnt++ < 1) { + kfree(cmd_buf); + kfree(scsi_cmd); + goto retry; + } + } + break; + default: + break; + } + } + } + + if (result) { + dev_err(dev, "%s: command failed, result=%08x\n", + __func__, result); + rc = -EIO; + goto out; + } + + /* + * Read cap was successful, grab values from the buffer; + * note that we don't need to worry about unaligned access + * as the buffer is allocated on an aligned boundary. + */ + mutex_lock(&gli->mutex); + gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0])); + gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8])); + mutex_unlock(&gli->mutex); + +out: + kfree(cmd_buf); + kfree(scsi_cmd); + + dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n", + __func__, gli->max_lba, gli->blk_len, rc); + return rc; +} + +/** + * get_rhte() - obtains validated resource handle table entry reference + * @ctxi: Context owning the resource handle. + * @rhndl: Resource handle associated with entry. + * @lli: LUN associated with request. + * + * Return: Validated RHTE on success, NULL on failure + */ +struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, + struct llun_info *lli) +{ + struct cxlflash_cfg *cfg = ctxi->cfg; + struct device *dev = &cfg->dev->dev; + struct sisl_rht_entry *rhte = NULL; + + if (unlikely(!ctxi->rht_start)) { + dev_dbg(dev, "%s: Context does not have allocated RHT\n", + __func__); + goto out; + } + + if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { + dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", + __func__, rhndl); + goto out; + } + + if (unlikely(ctxi->rht_lun[rhndl] != lli)) { + dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n", + __func__, rhndl); + goto out; + } + + rhte = &ctxi->rht_start[rhndl]; + if (unlikely(rhte->nmask == 0)) { + dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n", + __func__, rhndl); + rhte = NULL; + goto out; + } + +out: + return rhte; +} + +/** + * rhte_checkout() - obtains free/empty resource handle table entry + * @ctxi: Context owning the resource handle. + * @lli: LUN associated with request. + * + * Return: Free RHTE on success, NULL on failure + */ +struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, + struct llun_info *lli) +{ + struct cxlflash_cfg *cfg = ctxi->cfg; + struct device *dev = &cfg->dev->dev; + struct sisl_rht_entry *rhte = NULL; + int i; + + /* Find a free RHT entry */ + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) + if (ctxi->rht_start[i].nmask == 0) { + rhte = &ctxi->rht_start[i]; + ctxi->rht_out++; + break; + } + + if (likely(rhte)) + ctxi->rht_lun[i] = lli; + + dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i); + return rhte; +} + +/** + * rhte_checkin() - releases a resource handle table entry + * @ctxi: Context owning the resource handle. + * @rhte: RHTE to release. + */ +void rhte_checkin(struct ctx_info *ctxi, + struct sisl_rht_entry *rhte) +{ + u32 rsrc_handle = rhte - ctxi->rht_start; + + rhte->nmask = 0; + rhte->fp = 0; + ctxi->rht_out--; + ctxi->rht_lun[rsrc_handle] = NULL; + ctxi->rht_needs_ws[rsrc_handle] = false; +} + +/** + * rht_format1() - populates a RHTE for format 1 + * @rhte: RHTE to populate. + * @lun_id: LUN ID of LUN associated with RHTE. + * @perm: Desired permissions for RHTE. + * @port_sel: Port selection mask + */ +static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm, + u32 port_sel) +{ + /* + * Populate the Format 1 RHT entry for direct access (physical + * LUN) using the synchronization sequence defined in the + * SISLite specification. + */ + struct sisl_rht_entry_f1 dummy = { 0 }; + struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; + + memset(rhte_f1, 0, sizeof(*rhte_f1)); + rhte_f1->fp = SISL_RHT_FP(1U, 0); + dma_wmb(); /* Make setting of format bit visible */ + + rhte_f1->lun_id = lun_id; + dma_wmb(); /* Make setting of LUN id visible */ + + /* + * Use a dummy RHT Format 1 entry to build the second dword + * of the entry that must be populated in a single write when + * enabled (valid bit set to TRUE). + */ + dummy.valid = 0x80; + dummy.fp = SISL_RHT_FP(1U, perm); + dummy.port_sel = port_sel; + rhte_f1->dw = dummy.dw; + + dma_wmb(); /* Make remaining RHT entry fields visible */ +} + +/** + * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode + * @gli: LUN to attach. + * @mode: Desired mode of the LUN. + * @locked: Mutex status on current thread. + * + * Return: 0 on success, -errno on failure + */ +int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) +{ + int rc = 0; + + if (!locked) + mutex_lock(&gli->mutex); + + if (gli->mode == MODE_NONE) + gli->mode = mode; + else if (gli->mode != mode) { + pr_debug("%s: gli_mode=%d requested_mode=%d\n", + __func__, gli->mode, mode); + rc = -EINVAL; + goto out; + } + + gli->users++; + WARN_ON(gli->users <= 0); +out: + pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n", + __func__, rc, gli->mode, gli->users); + if (!locked) + mutex_unlock(&gli->mutex); + return rc; +} + +/** + * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode + * @gli: LUN to detach. + * + * When resetting the mode, terminate block allocation resources as they + * are no longer required (service is safe to call even when block allocation + * resources were not present - such as when transitioning from physical mode). + * These resources will be reallocated when needed (subsequent transition to + * virtual mode). + */ +void cxlflash_lun_detach(struct glun_info *gli) +{ + mutex_lock(&gli->mutex); + WARN_ON(gli->mode == MODE_NONE); + if (--gli->users == 0) { + gli->mode = MODE_NONE; + cxlflash_ba_terminate(&gli->blka.ba_lun); + } + pr_debug("%s: gli->users=%u\n", __func__, gli->users); + WARN_ON(gli->users < 0); + mutex_unlock(&gli->mutex); +} + +/** + * _cxlflash_disk_release() - releases the specified resource entry + * @sdev: SCSI device associated with LUN. + * @ctxi: Context owning resources. + * @release: Release ioctl data structure. + * + * For LUNs in virtual mode, the virtual LUN associated with the specified + * resource handle is resized to 0 prior to releasing the RHTE. Note that the + * AFU sync should _not_ be performed when the context is sitting on the error + * recovery list. A context on the error recovery list is not known to the AFU + * due to reset. When the context is recovered, it will be reattached and made + * known again to the AFU. + * + * Return: 0 on success, -errno on failure + */ +int _cxlflash_disk_release(struct scsi_device *sdev, + struct ctx_info *ctxi, + struct dk_cxlflash_release *release) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct afu *afu = cfg->afu; + bool put_ctx = false; + + struct dk_cxlflash_resize size; + res_hndl_t rhndl = release->rsrc_handle; + + int rc = 0; + int rcr = 0; + u64 ctxid = DECODE_CTXID(release->context_id), + rctxid = release->context_id; + + struct sisl_rht_entry *rhte; + struct sisl_rht_entry_f1 *rhte_f1; + + dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n", + __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); + + if (!ctxi) { + ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", + __func__, ctxid); + rc = -EINVAL; + goto out; + } + + put_ctx = true; + } + + rhte = get_rhte(ctxi, rhndl, lli); + if (unlikely(!rhte)) { + dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", + __func__, rhndl); + rc = -EINVAL; + goto out; + } + + /* + * Resize to 0 for virtual LUNS by setting the size + * to 0. This will clear LXT_START and LXT_CNT fields + * in the RHT entry and properly sync with the AFU. + * + * Afterwards we clear the remaining fields. + */ + switch (gli->mode) { + case MODE_VIRTUAL: + marshal_rele_to_resize(release, &size); + size.req_size = 0; + rc = _cxlflash_vlun_resize(sdev, ctxi, &size); + if (rc) { + dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc); + goto out; + } + + break; + case MODE_PHYSICAL: + /* + * Clear the Format 1 RHT entry for direct access + * (physical LUN) using the synchronization sequence + * defined in the SISLite specification. + */ + rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; + + rhte_f1->valid = 0; + dma_wmb(); /* Make revocation of RHT entry visible */ + + rhte_f1->lun_id = 0; + dma_wmb(); /* Make clearing of LUN id visible */ + + rhte_f1->dw = 0; + dma_wmb(); /* Make RHT entry bottom-half clearing visible */ + + if (!ctxi->err_recovery_active) { + rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (unlikely(rcr)) + dev_dbg(dev, "%s: AFU sync failed rc=%d\n", + __func__, rcr); + } + break; + default: + WARN(1, "Unsupported LUN mode!"); + goto out; + } + + rhte_checkin(ctxi, rhte); + cxlflash_lun_detach(gli); + +out: + if (put_ctx) + put_context(ctxi); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +int cxlflash_disk_release(struct scsi_device *sdev, + struct dk_cxlflash_release *release) +{ + return _cxlflash_disk_release(sdev, NULL, release); +} + +/** + * destroy_context() - releases a context + * @cfg: Internal structure associated with the host. + * @ctxi: Context to release. + * + * This routine is safe to be called with a a non-initialized context. + * Also note that the routine conditionally checks for the existence + * of the context control map before clearing the RHT registers and + * context capabilities because it is possible to destroy a context + * while the context is in the error state (previous mapping was + * removed [so there is no need to worry about clearing] and context + * is waiting for a new mapping). + */ +static void destroy_context(struct cxlflash_cfg *cfg, + struct ctx_info *ctxi) +{ + struct afu *afu = cfg->afu; + + if (ctxi->initialized) { + WARN_ON(!list_empty(&ctxi->luns)); + + /* Clear RHT registers and drop all capabilities for context */ + if (afu->afu_map && ctxi->ctrl_map) { + writeq_be(0, &ctxi->ctrl_map->rht_start); + writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); + writeq_be(0, &ctxi->ctrl_map->ctx_cap); + } + } + + /* Free memory associated with context */ + free_page((ulong)ctxi->rht_start); + kfree(ctxi->rht_needs_ws); + kfree(ctxi->rht_lun); + kfree(ctxi); +} + +/** + * create_context() - allocates and initializes a context + * @cfg: Internal structure associated with the host. + * + * Return: Allocated context on success, NULL on failure + */ +static struct ctx_info *create_context(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + struct ctx_info *ctxi = NULL; + struct llun_info **lli = NULL; + u8 *ws = NULL; + struct sisl_rht_entry *rhte; + + ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL); + lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); + ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); + if (unlikely(!ctxi || !lli || !ws)) { + dev_err(dev, "%s: Unable to allocate context\n", __func__); + goto err; + } + + rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); + if (unlikely(!rhte)) { + dev_err(dev, "%s: Unable to allocate RHT\n", __func__); + goto err; + } + + ctxi->rht_lun = lli; + ctxi->rht_needs_ws = ws; + ctxi->rht_start = rhte; +out: + return ctxi; + +err: + kfree(ws); + kfree(lli); + kfree(ctxi); + ctxi = NULL; + goto out; +} + +/** + * init_context() - initializes a previously allocated context + * @ctxi: Previously allocated context + * @cfg: Internal structure associated with the host. + * @ctx: Previously obtained context cookie. + * @ctxid: Previously obtained process element associated with CXL context. + * @file: Previously obtained file associated with CXL context. + * @perms: User-specified permissions. + * @irqs: User-specified number of interrupts. + */ +static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, + void *ctx, int ctxid, struct file *file, u32 perms, + u64 irqs) +{ + struct afu *afu = cfg->afu; + + ctxi->rht_perms = perms; + ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; + ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); + ctxi->irqs = irqs; + ctxi->pid = task_tgid_nr(current); /* tgid = pid */ + ctxi->ctx = ctx; + ctxi->cfg = cfg; + ctxi->file = file; + ctxi->initialized = true; + mutex_init(&ctxi->mutex); + kref_init(&ctxi->kref); + INIT_LIST_HEAD(&ctxi->luns); + INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ +} + +/** + * remove_context() - context kref release handler + * @kref: Kernel reference associated with context to be removed. + * + * When a context no longer has any references it can safely be removed + * from global access and destroyed. Note that it is assumed the thread + * relinquishing access to the context holds its mutex. + */ +static void remove_context(struct kref *kref) +{ + struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref); + struct cxlflash_cfg *cfg = ctxi->cfg; + u64 ctxid = DECODE_CTXID(ctxi->ctxid); + + /* Remove context from table/error list */ + WARN_ON(!mutex_is_locked(&ctxi->mutex)); + ctxi->unavail = true; + mutex_unlock(&ctxi->mutex); + mutex_lock(&cfg->ctx_tbl_list_mutex); + mutex_lock(&ctxi->mutex); + + if (!list_empty(&ctxi->list)) + list_del(&ctxi->list); + cfg->ctx_tbl[ctxid] = NULL; + mutex_unlock(&cfg->ctx_tbl_list_mutex); + mutex_unlock(&ctxi->mutex); + + /* Context now completely uncoupled/unreachable */ + destroy_context(cfg, ctxi); +} + +/** + * _cxlflash_disk_detach() - detaches a LUN from a context + * @sdev: SCSI device associated with LUN. + * @ctxi: Context owning resources. + * @detach: Detach ioctl data structure. + * + * As part of the detach, all per-context resources associated with the LUN + * are cleaned up. When detaching the last LUN for a context, the context + * itself is cleaned up and released. + * + * Return: 0 on success, -errno on failure + */ +static int _cxlflash_disk_detach(struct scsi_device *sdev, + struct ctx_info *ctxi, + struct dk_cxlflash_detach *detach) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct lun_access *lun_access, *t; + struct dk_cxlflash_release rel; + bool put_ctx = false; + + int i; + int rc = 0; + u64 ctxid = DECODE_CTXID(detach->context_id), + rctxid = detach->context_id; + + dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid); + + if (!ctxi) { + ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", + __func__, ctxid); + rc = -EINVAL; + goto out; + } + + put_ctx = true; + } + + /* Cleanup outstanding resources tied to this LUN */ + if (ctxi->rht_out) { + marshal_det_to_rele(detach, &rel); + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { + if (ctxi->rht_lun[i] == lli) { + rel.rsrc_handle = i; + _cxlflash_disk_release(sdev, ctxi, &rel); + } + + /* No need to loop further if we're done */ + if (ctxi->rht_out == 0) + break; + } + } + + /* Take our LUN out of context, free the node */ + list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) + if (lun_access->lli == lli) { + list_del(&lun_access->list); + kfree(lun_access); + lun_access = NULL; + break; + } + + /* + * Release the context reference and the sdev reference that + * bound this LUN to the context. + */ + if (kref_put(&ctxi->kref, remove_context)) + put_ctx = false; + scsi_device_put(sdev); +out: + if (put_ctx) + put_context(ctxi); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +static int cxlflash_disk_detach(struct scsi_device *sdev, + struct dk_cxlflash_detach *detach) +{ + return _cxlflash_disk_detach(sdev, NULL, detach); +} + +/** + * cxlflash_cxl_release() - release handler for adapter file descriptor + * @inode: File-system inode associated with fd. + * @file: File installed with adapter file descriptor. + * + * This routine is the release handler for the fops registered with + * the CXL services on an initial attach for a context. It is called + * when a close (explicity by the user or as part of a process tear + * down) is performed on the adapter file descriptor returned to the + * user. The user should be aware that explicitly performing a close + * considered catastrophic and subsequent usage of the superpipe API + * with previously saved off tokens will fail. + * + * This routine derives the context reference and calls detach for + * each LUN associated with the context.The final detach operation + * causes the context itself to be freed. With exception to when the + * CXL process element (context id) lookup fails (a case that should + * theoretically never occur), every call into this routine results + * in a complete freeing of a context. + * + * Detaching the LUN is typically an ioctl() operation and the underlying + * code assumes that ioctl_rwsem has been acquired as a reader. To support + * that design point, the semaphore is acquired and released around detach. + * + * Return: 0 on success + */ +static int cxlflash_cxl_release(struct inode *inode, struct file *file) +{ + struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, + cxl_fops); + void *ctx = cfg->ops->fops_get_context(file); + struct device *dev = &cfg->dev->dev; + struct ctx_info *ctxi = NULL; + struct dk_cxlflash_detach detach = { { 0 }, 0 }; + struct lun_access *lun_access, *t; + enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; + int ctxid; + + ctxid = cfg->ops->process_element(ctx); + if (unlikely(ctxid < 0)) { + dev_err(dev, "%s: Context %p was closed ctxid=%d\n", + __func__, ctx, ctxid); + goto out; + } + + ctxi = get_context(cfg, ctxid, file, ctrl); + if (unlikely(!ctxi)) { + ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); + if (!ctxi) { + dev_dbg(dev, "%s: ctxid=%d already free\n", + __func__, ctxid); + goto out_release; + } + + dev_dbg(dev, "%s: Another process owns ctxid=%d\n", + __func__, ctxid); + put_context(ctxi); + goto out; + } + + dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid); + + down_read(&cfg->ioctl_rwsem); + detach.context_id = ctxi->ctxid; + list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) + _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); + up_read(&cfg->ioctl_rwsem); +out_release: + cfg->ops->fd_release(inode, file); +out: + dev_dbg(dev, "%s: returning\n", __func__); + return 0; +} + +/** + * unmap_context() - clears a previously established mapping + * @ctxi: Context owning the mapping. + * + * This routine is used to switch between the error notification page + * (dummy page of all 1's) and the real mapping (established by the CXL + * fault handler). + */ +static void unmap_context(struct ctx_info *ctxi) +{ + unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1); +} + +/** + * get_err_page() - obtains and allocates the error notification page + * @cfg: Internal structure associated with the host. + * + * Return: error notification page on success, NULL on failure + */ +static struct page *get_err_page(struct cxlflash_cfg *cfg) +{ + struct page *err_page = global.err_page; + struct device *dev = &cfg->dev->dev; + + if (unlikely(!err_page)) { + err_page = alloc_page(GFP_KERNEL); + if (unlikely(!err_page)) { + dev_err(dev, "%s: Unable to allocate err_page\n", + __func__); + goto out; + } + + memset(page_address(err_page), -1, PAGE_SIZE); + + /* Serialize update w/ other threads to avoid a leak */ + mutex_lock(&global.mutex); + if (likely(!global.err_page)) + global.err_page = err_page; + else { + __free_page(err_page); + err_page = global.err_page; + } + mutex_unlock(&global.mutex); + } + +out: + dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page); + return err_page; +} + +/** + * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor + * @vmf: VM fault associated with current fault. + * + * To support error notification via MMIO, faults are 'caught' by this routine + * that was inserted before passing back the adapter file descriptor on attach. + * When a fault occurs, this routine evaluates if error recovery is active and + * if so, installs the error page to 'notify' the user about the error state. + * During normal operation, the fault is simply handled by the original fault + * handler that was installed by CXL services as part of initializing the + * adapter file descriptor. The VMA's page protection bits are toggled to + * indicate cached/not-cached depending on the memory backing the fault. + * + * Return: 0 on success, VM_FAULT_SIGBUS on failure + */ +static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct file *file = vma->vm_file; + struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, + cxl_fops); + void *ctx = cfg->ops->fops_get_context(file); + struct device *dev = &cfg->dev->dev; + struct ctx_info *ctxi = NULL; + struct page *err_page = NULL; + enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; + vm_fault_t rc = 0; + int ctxid; + + ctxid = cfg->ops->process_element(ctx); + if (unlikely(ctxid < 0)) { + dev_err(dev, "%s: Context %p was closed ctxid=%d\n", + __func__, ctx, ctxid); + goto err; + } + + ctxi = get_context(cfg, ctxid, file, ctrl); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); + goto err; + } + + dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid); + + if (likely(!ctxi->err_recovery_active)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + rc = ctxi->cxl_mmap_vmops->fault(vmf); + } else { + dev_dbg(dev, "%s: err recovery active, use err_page\n", + __func__); + + err_page = get_err_page(cfg); + if (unlikely(!err_page)) { + dev_err(dev, "%s: Could not get err_page\n", __func__); + rc = VM_FAULT_RETRY; + goto out; + } + + get_page(err_page); + vmf->page = err_page; + vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); + } + +out: + if (likely(ctxi)) + put_context(ctxi); + dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc); + return rc; + +err: + rc = VM_FAULT_SIGBUS; + goto out; +} + +/* + * Local MMAP vmops to 'catch' faults + */ +static const struct vm_operations_struct cxlflash_mmap_vmops = { + .fault = cxlflash_mmap_fault, +}; + +/** + * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor + * @file: File installed with adapter file descriptor. + * @vma: VM area associated with mapping. + * + * Installs local mmap vmops to 'catch' faults for error notification support. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, + cxl_fops); + void *ctx = cfg->ops->fops_get_context(file); + struct device *dev = &cfg->dev->dev; + struct ctx_info *ctxi = NULL; + enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; + int ctxid; + int rc = 0; + + ctxid = cfg->ops->process_element(ctx); + if (unlikely(ctxid < 0)) { + dev_err(dev, "%s: Context %p was closed ctxid=%d\n", + __func__, ctx, ctxid); + rc = -EIO; + goto out; + } + + ctxi = get_context(cfg, ctxid, file, ctrl); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); + rc = -EIO; + goto out; + } + + dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid); + + rc = cfg->ops->fd_mmap(file, vma); + if (likely(!rc)) { + /* Insert ourself in the mmap fault handler path */ + ctxi->cxl_mmap_vmops = vma->vm_ops; + vma->vm_ops = &cxlflash_mmap_vmops; + } + +out: + if (likely(ctxi)) + put_context(ctxi); + return rc; +} + +const struct file_operations cxlflash_cxl_fops = { + .owner = THIS_MODULE, + .mmap = cxlflash_cxl_mmap, + .release = cxlflash_cxl_release, +}; + +/** + * cxlflash_mark_contexts_error() - move contexts to error state and list + * @cfg: Internal structure associated with the host. + * + * A context is only moved over to the error list when there are no outstanding + * references to it. This ensures that a running operation has completed. + * + * Return: 0 on success, -errno on failure + */ +int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg) +{ + int i, rc = 0; + struct ctx_info *ctxi = NULL; + + mutex_lock(&cfg->ctx_tbl_list_mutex); + + for (i = 0; i < MAX_CONTEXT; i++) { + ctxi = cfg->ctx_tbl[i]; + if (ctxi) { + mutex_lock(&ctxi->mutex); + cfg->ctx_tbl[i] = NULL; + list_add(&ctxi->list, &cfg->ctx_err_recovery); + ctxi->err_recovery_active = true; + ctxi->ctrl_map = NULL; + unmap_context(ctxi); + mutex_unlock(&ctxi->mutex); + } + } + + mutex_unlock(&cfg->ctx_tbl_list_mutex); + return rc; +} + +/* + * Dummy NULL fops + */ +static const struct file_operations null_fops = { + .owner = THIS_MODULE, +}; + +/** + * check_state() - checks and responds to the current adapter state + * @cfg: Internal structure associated with the host. + * + * This routine can block and should only be used on process context. + * It assumes that the caller is an ioctl thread and holding the ioctl + * read semaphore. This is temporarily let up across the wait to allow + * for draining actively running ioctls. Also note that when waking up + * from waiting in reset, the state is unknown and must be checked again + * before proceeding. + * + * Return: 0 on success, -errno on failure + */ +int check_state(struct cxlflash_cfg *cfg) +{ + struct device *dev = &cfg->dev->dev; + int rc = 0; + +retry: + switch (cfg->state) { + case STATE_RESET: + dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__); + up_read(&cfg->ioctl_rwsem); + rc = wait_event_interruptible(cfg->reset_waitq, + cfg->state != STATE_RESET); + down_read(&cfg->ioctl_rwsem); + if (unlikely(rc)) + break; + goto retry; + case STATE_FAILTERM: + dev_dbg(dev, "%s: Failed/Terminating\n", __func__); + rc = -ENODEV; + break; + default: + break; + } + + return rc; +} + +/** + * cxlflash_disk_attach() - attach a LUN to a context + * @sdev: SCSI device associated with LUN. + * @attach: Attach ioctl data structure. + * + * Creates a context and attaches LUN to it. A LUN can only be attached + * one time to a context (subsequent attaches for the same context/LUN pair + * are not supported). Additional LUNs can be attached to a context by + * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_disk_attach(struct scsi_device *sdev, + struct dk_cxlflash_attach *attach) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct ctx_info *ctxi = NULL; + struct lun_access *lun_access = NULL; + int rc = 0; + u32 perms; + int ctxid = -1; + u64 irqs = attach->num_interrupts; + u64 flags = 0UL; + u64 rctxid = 0UL; + struct file *file = NULL; + + void *ctx = NULL; + + int fd = -1; + + if (irqs > 4) { + dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n", + __func__, irqs); + rc = -EINVAL; + goto out; + } + + if (gli->max_lba == 0) { + dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n", + __func__, lli->lun_id[sdev->channel]); + rc = read_cap16(sdev, lli); + if (rc) { + dev_err(dev, "%s: Invalid device rc=%d\n", + __func__, rc); + rc = -ENODEV; + goto out; + } + dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba); + dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len); + } + + if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { + rctxid = attach->context_id; + ctxi = get_context(cfg, rctxid, NULL, 0); + if (!ctxi) { + dev_dbg(dev, "%s: Bad context rctxid=%016llx\n", + __func__, rctxid); + rc = -EINVAL; + goto out; + } + + list_for_each_entry(lun_access, &ctxi->luns, list) + if (lun_access->lli == lli) { + dev_dbg(dev, "%s: Already attached\n", + __func__); + rc = -EINVAL; + goto out; + } + } + + rc = scsi_device_get(sdev); + if (unlikely(rc)) { + dev_err(dev, "%s: Unable to get sdev reference\n", __func__); + goto out; + } + + lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); + if (unlikely(!lun_access)) { + dev_err(dev, "%s: Unable to allocate lun_access\n", __func__); + rc = -ENOMEM; + goto err; + } + + lun_access->lli = lli; + lun_access->sdev = sdev; + + /* Non-NULL context indicates reuse (another context reference) */ + if (ctxi) { + dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n", + __func__, rctxid); + kref_get(&ctxi->kref); + list_add(&lun_access->list, &ctxi->luns); + goto out_attach; + } + + ctxi = create_context(cfg); + if (unlikely(!ctxi)) { + dev_err(dev, "%s: Failed to create context ctxid=%d\n", + __func__, ctxid); + rc = -ENOMEM; + goto err; + } + + ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); + if (IS_ERR_OR_NULL(ctx)) { + dev_err(dev, "%s: Could not initialize context %p\n", + __func__, ctx); + rc = -ENODEV; + goto err; + } + + rc = cfg->ops->start_work(ctx, irqs); + if (unlikely(rc)) { + dev_dbg(dev, "%s: Could not start context rc=%d\n", + __func__, rc); + goto err; + } + + ctxid = cfg->ops->process_element(ctx); + if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { + dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); + rc = -EPERM; + goto err; + } + + file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); + if (unlikely(fd < 0)) { + rc = -ENODEV; + dev_err(dev, "%s: Could not get file descriptor\n", __func__); + goto err; + } + + /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ + perms = SISL_RHT_PERM(attach->hdr.flags + 1); + + /* Context mutex is locked upon return */ + init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs); + + rc = afu_attach(cfg, ctxi); + if (unlikely(rc)) { + dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); + goto err; + } + + /* + * No error paths after this point. Once the fd is installed it's + * visible to user space and can't be undone safely on this thread. + * There is no need to worry about a deadlock here because no one + * knows about us yet; we can be the only one holding our mutex. + */ + list_add(&lun_access->list, &ctxi->luns); + mutex_lock(&cfg->ctx_tbl_list_mutex); + mutex_lock(&ctxi->mutex); + cfg->ctx_tbl[ctxid] = ctxi; + mutex_unlock(&cfg->ctx_tbl_list_mutex); + fd_install(fd, file); + +out_attach: + if (fd != -1) + flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD; + if (afu_is_sq_cmd_mode(afu)) + flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; + + attach->hdr.return_flags = flags; + attach->context_id = ctxi->ctxid; + attach->block_size = gli->blk_len; + attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); + attach->last_lba = gli->max_lba; + attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT; + attach->max_xfer /= gli->blk_len; + +out: + attach->adap_fd = fd; + + if (ctxi) + put_context(ctxi); + + dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n", + __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); + return rc; + +err: + /* Cleanup CXL context; okay to 'stop' even if it was not started */ + if (!IS_ERR_OR_NULL(ctx)) { + cfg->ops->stop_context(ctx); + cfg->ops->release_context(ctx); + ctx = NULL; + } + + /* + * Here, we're overriding the fops with a dummy all-NULL fops because + * fput() calls the release fop, which will cause us to mistakenly + * call into the CXL code. Rather than try to add yet more complexity + * to that routine (cxlflash_cxl_release) we should try to fix the + * issue here. + */ + if (fd > 0) { + file->f_op = &null_fops; + fput(file); + put_unused_fd(fd); + fd = -1; + file = NULL; + } + + /* Cleanup our context */ + if (ctxi) { + destroy_context(cfg, ctxi); + ctxi = NULL; + } + + kfree(lun_access); + scsi_device_put(sdev); + goto out; +} + +/** + * recover_context() - recovers a context in error + * @cfg: Internal structure associated with the host. + * @ctxi: Context to release. + * @adap_fd: Adapter file descriptor associated with new/recovered context. + * + * Restablishes the state for a context-in-error. + * + * Return: 0 on success, -errno on failure + */ +static int recover_context(struct cxlflash_cfg *cfg, + struct ctx_info *ctxi, + int *adap_fd) +{ + struct device *dev = &cfg->dev->dev; + int rc = 0; + int fd = -1; + int ctxid = -1; + struct file *file; + void *ctx; + struct afu *afu = cfg->afu; + + ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); + if (IS_ERR_OR_NULL(ctx)) { + dev_err(dev, "%s: Could not initialize context %p\n", + __func__, ctx); + rc = -ENODEV; + goto out; + } + + rc = cfg->ops->start_work(ctx, ctxi->irqs); + if (unlikely(rc)) { + dev_dbg(dev, "%s: Could not start context rc=%d\n", + __func__, rc); + goto err1; + } + + ctxid = cfg->ops->process_element(ctx); + if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { + dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); + rc = -EPERM; + goto err2; + } + + file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); + if (unlikely(fd < 0)) { + rc = -ENODEV; + dev_err(dev, "%s: Could not get file descriptor\n", __func__); + goto err2; + } + + /* Update with new MMIO area based on updated context id */ + ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; + + rc = afu_attach(cfg, ctxi); + if (rc) { + dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); + goto err3; + } + + /* + * No error paths after this point. Once the fd is installed it's + * visible to user space and can't be undone safely on this thread. + */ + ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); + ctxi->ctx = ctx; + ctxi->file = file; + + /* + * Put context back in table (note the reinit of the context list); + * we must first drop the context's mutex and then acquire it in + * order with the table/list mutex to avoid a deadlock - safe to do + * here because no one can find us at this moment in time. + */ + mutex_unlock(&ctxi->mutex); + mutex_lock(&cfg->ctx_tbl_list_mutex); + mutex_lock(&ctxi->mutex); + list_del_init(&ctxi->list); + cfg->ctx_tbl[ctxid] = ctxi; + mutex_unlock(&cfg->ctx_tbl_list_mutex); + fd_install(fd, file); + *adap_fd = fd; +out: + dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n", + __func__, ctxid, fd, rc); + return rc; + +err3: + fput(file); + put_unused_fd(fd); +err2: + cfg->ops->stop_context(ctx); +err1: + cfg->ops->release_context(ctx); + goto out; +} + +/** + * cxlflash_afu_recover() - initiates AFU recovery + * @sdev: SCSI device associated with LUN. + * @recover: Recover ioctl data structure. + * + * Only a single recovery is allowed at a time to avoid exhausting CXL + * resources (leading to recovery failure) in the event that we're up + * against the maximum number of contexts limit. For similar reasons, + * a context recovery is retried if there are multiple recoveries taking + * place at the same time and the failure was due to CXL services being + * unable to keep up. + * + * As this routine is called on ioctl context, it holds the ioctl r/w + * semaphore that is used to drain ioctls in recovery scenarios. The + * implementation to achieve the pacing described above (a local mutex) + * requires that the ioctl r/w semaphore be dropped and reacquired to + * avoid a 3-way deadlock when multiple process recoveries operate in + * parallel. + * + * Because a user can detect an error condition before the kernel, it is + * quite possible for this routine to act as the kernel's EEH detection + * source (MMIO read of mbox_r). Because of this, there is a window of + * time where an EEH might have been detected but not yet 'serviced' + * (callback invoked, causing the device to enter reset state). To avoid + * looping in this routine during that window, a 1 second sleep is in place + * between the time the MMIO failure is detected and the time a wait on the + * reset wait queue is attempted via check_state(). + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_afu_recover(struct scsi_device *sdev, + struct dk_cxlflash_recover_afu *recover) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct afu *afu = cfg->afu; + struct ctx_info *ctxi = NULL; + struct mutex *mutex = &cfg->ctx_recovery_mutex; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); + u64 flags; + u64 ctxid = DECODE_CTXID(recover->context_id), + rctxid = recover->context_id; + long reg; + bool locked = true; + int lretry = 20; /* up to 2 seconds */ + int new_adap_fd = -1; + int rc = 0; + + atomic_inc(&cfg->recovery_threads); + up_read(&cfg->ioctl_rwsem); + rc = mutex_lock_interruptible(mutex); + down_read(&cfg->ioctl_rwsem); + if (rc) { + locked = false; + goto out; + } + + rc = check_state(cfg); + if (rc) { + dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc); + rc = -ENODEV; + goto out; + } + + dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n", + __func__, recover->reason, rctxid); + +retry: + /* Ensure that this process is attached to the context */ + ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); + rc = -EINVAL; + goto out; + } + + if (ctxi->err_recovery_active) { +retry_recover: + rc = recover_context(cfg, ctxi, &new_adap_fd); + if (unlikely(rc)) { + dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n", + __func__, ctxid, rc); + if ((rc == -ENODEV) && + ((atomic_read(&cfg->recovery_threads) > 1) || + (lretry--))) { + dev_dbg(dev, "%s: Going to try again\n", + __func__); + mutex_unlock(mutex); + msleep(100); + rc = mutex_lock_interruptible(mutex); + if (rc) { + locked = false; + goto out; + } + goto retry_recover; + } + + goto out; + } + + ctxi->err_recovery_active = false; + + flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | + DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; + if (afu_is_sq_cmd_mode(afu)) + flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; + + recover->hdr.return_flags = flags; + recover->context_id = ctxi->ctxid; + recover->adap_fd = new_adap_fd; + recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); + goto out; + } + + /* Test if in error state */ + reg = readq_be(&hwq->ctrl_map->mbox_r); + if (reg == -1) { + dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__); + + /* + * Before checking the state, put back the context obtained with + * get_context() as it is no longer needed and sleep for a short + * period of time (see prolog notes). + */ + put_context(ctxi); + ctxi = NULL; + ssleep(1); + rc = check_state(cfg); + if (unlikely(rc)) + goto out; + goto retry; + } + + dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__); +out: + if (likely(ctxi)) + put_context(ctxi); + if (locked) + mutex_unlock(mutex); + atomic_dec_if_positive(&cfg->recovery_threads); + return rc; +} + +/** + * process_sense() - evaluates and processes sense data + * @sdev: SCSI device associated with LUN. + * @verify: Verify ioctl data structure. + * + * Return: 0 on success, -errno on failure + */ +static int process_sense(struct scsi_device *sdev, + struct dk_cxlflash_verify *verify) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + u64 prev_lba = gli->max_lba; + struct scsi_sense_hdr sshdr = { 0 }; + int rc = 0; + + rc = scsi_normalize_sense((const u8 *)&verify->sense_data, + DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); + if (!rc) { + dev_err(dev, "%s: Failed to normalize sense data\n", __func__); + rc = -EINVAL; + goto out; + } + + switch (sshdr.sense_key) { + case NO_SENSE: + case RECOVERED_ERROR: + case NOT_READY: + break; + case UNIT_ATTENTION: + switch (sshdr.asc) { + case 0x29: /* Power on Reset or Device Reset */ + fallthrough; + case 0x2A: /* Device settings/capacity changed */ + rc = read_cap16(sdev, lli); + if (rc) { + rc = -ENODEV; + break; + } + if (prev_lba != gli->max_lba) + dev_dbg(dev, "%s: Capacity changed old=%lld " + "new=%lld\n", __func__, prev_lba, + gli->max_lba); + break; + case 0x3F: /* Report LUNs changed, Rescan. */ + scsi_scan_host(cfg->host); + break; + default: + rc = -EIO; + break; + } + break; + default: + rc = -EIO; + break; + } +out: + dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__, + sshdr.sense_key, sshdr.asc, sshdr.ascq, rc); + return rc; +} + +/** + * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes + * @sdev: SCSI device associated with LUN. + * @verify: Verify ioctl data structure. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_disk_verify(struct scsi_device *sdev, + struct dk_cxlflash_verify *verify) +{ + int rc = 0; + struct ctx_info *ctxi = NULL; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct sisl_rht_entry *rhte = NULL; + res_hndl_t rhndl = verify->rsrc_handle; + u64 ctxid = DECODE_CTXID(verify->context_id), + rctxid = verify->context_id; + u64 last_lba = 0; + + dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, " + "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle, + verify->hint, verify->hdr.flags); + + ctxi = get_context(cfg, rctxid, lli, 0); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); + rc = -EINVAL; + goto out; + } + + rhte = get_rhte(ctxi, rhndl, lli); + if (unlikely(!rhte)) { + dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", + __func__, rhndl); + rc = -EINVAL; + goto out; + } + + /* + * Look at the hint/sense to see if it requires us to redrive + * inquiry (i.e. the Unit attention is due to the WWN changing). + */ + if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) { + /* Can't hold mutex across process_sense/read_cap16, + * since we could have an intervening EEH event. + */ + ctxi->unavail = true; + mutex_unlock(&ctxi->mutex); + rc = process_sense(sdev, verify); + if (unlikely(rc)) { + dev_err(dev, "%s: Failed to validate sense data (%d)\n", + __func__, rc); + mutex_lock(&ctxi->mutex); + ctxi->unavail = false; + goto out; + } + mutex_lock(&ctxi->mutex); + ctxi->unavail = false; + } + + switch (gli->mode) { + case MODE_PHYSICAL: + last_lba = gli->max_lba; + break; + case MODE_VIRTUAL: + /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */ + last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len); + last_lba /= CXLFLASH_BLOCK_SIZE; + last_lba--; + break; + default: + WARN(1, "Unsupported LUN mode!"); + } + + verify->last_lba = last_lba; + +out: + if (likely(ctxi)) + put_context(ctxi); + dev_dbg(dev, "%s: returning rc=%d llba=%llx\n", + __func__, rc, verify->last_lba); + return rc; +} + +/** + * decode_ioctl() - translates an encoded ioctl to an easily identifiable string + * @cmd: The ioctl command to decode. + * + * Return: A string identifying the decoded ioctl. + */ +static char *decode_ioctl(unsigned int cmd) +{ + switch (cmd) { + case DK_CXLFLASH_ATTACH: + return __stringify_1(DK_CXLFLASH_ATTACH); + case DK_CXLFLASH_USER_DIRECT: + return __stringify_1(DK_CXLFLASH_USER_DIRECT); + case DK_CXLFLASH_USER_VIRTUAL: + return __stringify_1(DK_CXLFLASH_USER_VIRTUAL); + case DK_CXLFLASH_VLUN_RESIZE: + return __stringify_1(DK_CXLFLASH_VLUN_RESIZE); + case DK_CXLFLASH_RELEASE: + return __stringify_1(DK_CXLFLASH_RELEASE); + case DK_CXLFLASH_DETACH: + return __stringify_1(DK_CXLFLASH_DETACH); + case DK_CXLFLASH_VERIFY: + return __stringify_1(DK_CXLFLASH_VERIFY); + case DK_CXLFLASH_VLUN_CLONE: + return __stringify_1(DK_CXLFLASH_VLUN_CLONE); + case DK_CXLFLASH_RECOVER_AFU: + return __stringify_1(DK_CXLFLASH_RECOVER_AFU); + case DK_CXLFLASH_MANAGE_LUN: + return __stringify_1(DK_CXLFLASH_MANAGE_LUN); + } + + return "UNKNOWN"; +} + +/** + * cxlflash_disk_direct_open() - opens a direct (physical) disk + * @sdev: SCSI device associated with LUN. + * @arg: UDirect ioctl data structure. + * + * On successful return, the user is informed of the resource handle + * to be used to identify the direct lun and the size (in blocks) of + * the direct lun in last LBA format. + * + * Return: 0 on success, -errno on failure + */ +static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct dk_cxlflash_release rel = { { 0 }, 0 }; + + struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; + + u64 ctxid = DECODE_CTXID(pphys->context_id), + rctxid = pphys->context_id; + u64 lun_size = 0; + u64 last_lba = 0; + u64 rsrc_handle = -1; + u32 port = CHAN2PORTMASK(sdev->channel); + + int rc = 0; + + struct ctx_info *ctxi = NULL; + struct sisl_rht_entry *rhte = NULL; + + dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); + + rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); + if (unlikely(rc)) { + dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__); + goto out; + } + + ctxi = get_context(cfg, rctxid, lli, 0); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); + rc = -EINVAL; + goto err1; + } + + rhte = rhte_checkout(ctxi, lli); + if (unlikely(!rhte)) { + dev_dbg(dev, "%s: Too many opens ctxid=%lld\n", + __func__, ctxid); + rc = -EMFILE; /* too many opens */ + goto err1; + } + + rsrc_handle = (rhte - ctxi->rht_start); + + rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port); + + last_lba = gli->max_lba; + pphys->hdr.return_flags = 0; + pphys->last_lba = last_lba; + pphys->rsrc_handle = rsrc_handle; + + rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); + if (unlikely(rc)) { + dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc); + goto err2; + } + +out: + if (likely(ctxi)) + put_context(ctxi); + dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", + __func__, rsrc_handle, rc, last_lba); + return rc; + +err2: + marshal_udir_to_rele(pphys, &rel); + _cxlflash_disk_release(sdev, ctxi, &rel); + goto out; +err1: + cxlflash_lun_detach(gli); + goto out; +} + +/** + * ioctl_common() - common IOCTL handler for driver + * @sdev: SCSI device associated with LUN. + * @cmd: IOCTL command. + * + * Handles common fencing operations that are valid for multiple ioctls. Always + * allow through ioctls that are cleanup oriented in nature, even when operating + * in a failed/terminating state. + * + * Return: 0 on success, -errno on failure + */ +static int ioctl_common(struct scsi_device *sdev, unsigned int cmd) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + int rc = 0; + + if (unlikely(!lli)) { + dev_dbg(dev, "%s: Unknown LUN\n", __func__); + rc = -EINVAL; + goto out; + } + + rc = check_state(cfg); + if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) { + switch (cmd) { + case DK_CXLFLASH_VLUN_RESIZE: + case DK_CXLFLASH_RELEASE: + case DK_CXLFLASH_DETACH: + dev_dbg(dev, "%s: Command override rc=%d\n", + __func__, rc); + rc = 0; + break; + } + } +out: + return rc; +} + +/** + * cxlflash_ioctl() - IOCTL handler for driver + * @sdev: SCSI device associated with LUN. + * @cmd: IOCTL command. + * @arg: Userspace ioctl data structure. + * + * A read/write semaphore is used to implement a 'drain' of currently + * running ioctls. The read semaphore is taken at the beginning of each + * ioctl thread and released upon concluding execution. Additionally the + * semaphore should be released and then reacquired in any ioctl execution + * path which will wait for an event to occur that is outside the scope of + * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, + * a thread simply needs to acquire the write semaphore. + * + * Return: 0 on success, -errno on failure + */ +int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) +{ + typedef int (*sioctl) (struct scsi_device *, void *); + + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct dk_cxlflash_hdr *hdr; + char buf[sizeof(union cxlflash_ioctls)]; + size_t size = 0; + bool known_ioctl = false; + int idx; + int rc = 0; + struct Scsi_Host *shost = sdev->host; + sioctl do_ioctl = NULL; + + static const struct { + size_t size; + sioctl ioctl; + } ioctl_tbl[] = { /* NOTE: order matters here */ + {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach}, + {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open}, + {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release}, + {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach}, + {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify}, + {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover}, + {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun}, + {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open}, + {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize}, + {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone}, + }; + + /* Hold read semaphore so we can drain if needed */ + down_read(&cfg->ioctl_rwsem); + + /* Restrict command set to physical support only for internal LUN */ + if (afu->internal_lun) + switch (cmd) { + case DK_CXLFLASH_RELEASE: + case DK_CXLFLASH_USER_VIRTUAL: + case DK_CXLFLASH_VLUN_RESIZE: + case DK_CXLFLASH_VLUN_CLONE: + dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n", + __func__, decode_ioctl(cmd), afu->internal_lun); + rc = -EINVAL; + goto cxlflash_ioctl_exit; + } + + switch (cmd) { + case DK_CXLFLASH_ATTACH: + case DK_CXLFLASH_USER_DIRECT: + case DK_CXLFLASH_RELEASE: + case DK_CXLFLASH_DETACH: + case DK_CXLFLASH_VERIFY: + case DK_CXLFLASH_RECOVER_AFU: + case DK_CXLFLASH_USER_VIRTUAL: + case DK_CXLFLASH_VLUN_RESIZE: + case DK_CXLFLASH_VLUN_CLONE: + dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n", + __func__, decode_ioctl(cmd), cmd, shost->host_no, + sdev->channel, sdev->id, sdev->lun); + rc = ioctl_common(sdev, cmd); + if (unlikely(rc)) + goto cxlflash_ioctl_exit; + + fallthrough; + + case DK_CXLFLASH_MANAGE_LUN: + known_ioctl = true; + idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH); + size = ioctl_tbl[idx].size; + do_ioctl = ioctl_tbl[idx].ioctl; + + if (likely(do_ioctl)) + break; + + fallthrough; + default: + rc = -EINVAL; + goto cxlflash_ioctl_exit; + } + + if (unlikely(copy_from_user(&buf, arg, size))) { + dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n", + __func__, size, cmd, decode_ioctl(cmd), arg); + rc = -EFAULT; + goto cxlflash_ioctl_exit; + } + + hdr = (struct dk_cxlflash_hdr *)&buf; + if (hdr->version != DK_CXLFLASH_VERSION_0) { + dev_dbg(dev, "%s: Version %u not supported for %s\n", + __func__, hdr->version, decode_ioctl(cmd)); + rc = -EINVAL; + goto cxlflash_ioctl_exit; + } + + if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { + dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); + rc = -EINVAL; + goto cxlflash_ioctl_exit; + } + + rc = do_ioctl(sdev, (void *)&buf); + if (likely(!rc)) + if (unlikely(copy_to_user(arg, &buf, size))) { + dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n", + __func__, size, cmd, decode_ioctl(cmd), arg); + rc = -EFAULT; + } + + /* fall through to exit */ + +cxlflash_ioctl_exit: + up_read(&cfg->ioctl_rwsem); + if (unlikely(rc && known_ioctl)) + dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " + "returned rc %d\n", __func__, + decode_ioctl(cmd), cmd, shost->host_no, + sdev->channel, sdev->id, sdev->lun, rc); + else + dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " + "returned rc %d\n", __func__, decode_ioctl(cmd), + cmd, shost->host_no, sdev->channel, sdev->id, + sdev->lun, rc); + return rc; +} diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h new file mode 100644 index 000000000..0e3b45964 --- /dev/null +++ b/drivers/scsi/cxlflash/superpipe.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#ifndef _CXLFLASH_SUPERPIPE_H +#define _CXLFLASH_SUPERPIPE_H + +extern struct cxlflash_global global; + +/* + * Terminology: use afu (and not adapter) to refer to the HW. + * Adapter is the entire slot and includes PSL out of which + * only the AFU is visible to user space. + */ + +/* Chunk size parms: note sislite minimum chunk size is + * 0x10000 LBAs corresponding to a NMASK or 16. + */ +#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */ + +#define CMD_TIMEOUT 30 /* 30 secs */ +#define CMD_RETRIES 5 /* 5 retries for scsi_execute */ + +#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */ + +enum lun_mode { + MODE_NONE = 0, + MODE_VIRTUAL, + MODE_PHYSICAL +}; + +/* Global (entire driver, spans adapters) lun_info structure */ +struct glun_info { + u64 max_lba; /* from read cap(16) */ + u32 blk_len; /* from read cap(16) */ + enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */ + int users; /* Number of users w/ references to LUN */ + + u8 wwid[16]; + + struct mutex mutex; + + struct blka blka; + struct list_head list; +}; + +/* Local (per-adapter) lun_info structure */ +struct llun_info { + u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */ + u32 lun_index; /* Index in the LUN table */ + u32 host_no; /* host_no from Scsi_host */ + u32 port_sel; /* What port to use for this LUN */ + bool in_table; /* Whether a LUN table entry was created */ + + u8 wwid[16]; /* Keep a duplicate copy here? */ + + struct glun_info *parent; /* Pointer to entry in global LUN structure */ + struct scsi_device *sdev; + struct list_head list; +}; + +struct lun_access { + struct llun_info *lli; + struct scsi_device *sdev; + struct list_head list; +}; + +enum ctx_ctrl { + CTX_CTRL_CLONE = (1 << 1), + CTX_CTRL_ERR = (1 << 2), + CTX_CTRL_ERR_FALLBACK = (1 << 3), + CTX_CTRL_NOPID = (1 << 4), + CTX_CTRL_FILE = (1 << 5) +}; + +#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id) +#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF) + +struct ctx_info { + struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */ + struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment), + * alloc/free on attach/detach + */ + u32 rht_out; /* Number of checked out RHT entries */ + u32 rht_perms; /* User-defined permissions for RHT entries */ + struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */ + u8 *rht_needs_ws; /* User-desired write-same function per RHTE */ + + u64 ctxid; + u64 irqs; /* Number of interrupts requested for context */ + pid_t pid; + bool initialized; + bool unavail; + bool err_recovery_active; + struct mutex mutex; /* Context protection */ + struct kref kref; + void *ctx; + struct cxlflash_cfg *cfg; + struct list_head luns; /* LUNs attached to this context */ + const struct vm_operations_struct *cxl_mmap_vmops; + struct file *file; + struct list_head list; /* Link contexts in error recovery */ +}; + +struct cxlflash_global { + struct mutex mutex; + struct list_head gluns;/* list of glun_info structs */ + struct page *err_page; /* One page of all 0xF for error notification */ +}; + +int cxlflash_vlun_resize(struct scsi_device *sdev, + struct dk_cxlflash_resize *resize); +int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi, + struct dk_cxlflash_resize *resize); + +int cxlflash_disk_release(struct scsi_device *sdev, + struct dk_cxlflash_release *release); +int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi, + struct dk_cxlflash_release *release); + +int cxlflash_disk_clone(struct scsi_device *sdev, + struct dk_cxlflash_clone *clone); + +int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg); + +int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked); +void cxlflash_lun_detach(struct glun_info *gli); + +struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg, + enum ctx_ctrl ctrl); +void put_context(struct ctx_info *ctxi); + +struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, + struct llun_info *lli); + +struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, + struct llun_info *lli); +void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte); + +void cxlflash_ba_terminate(struct ba_lun *ba_lun); + +int cxlflash_manage_lun(struct scsi_device *sdev, + struct dk_cxlflash_manage_lun *manage); + +int check_state(struct cxlflash_cfg *cfg); + +#endif /* ifndef _CXLFLASH_SUPERPIPE_H */ diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c new file mode 100644 index 000000000..cbd5a648a --- /dev/null +++ b/drivers/scsi/cxlflash/vlun.c @@ -0,0 +1,1337 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sislite.h" +#include "common.h" +#include "vlun.h" +#include "superpipe.h" + +/** + * marshal_virt_to_resize() - translate uvirtual to resize structure + * @virt: Source structure from which to translate/copy. + * @resize: Destination structure for the translate/copy. + */ +static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt, + struct dk_cxlflash_resize *resize) +{ + resize->hdr = virt->hdr; + resize->context_id = virt->context_id; + resize->rsrc_handle = virt->rsrc_handle; + resize->req_size = virt->lun_size; + resize->last_lba = virt->last_lba; +} + +/** + * marshal_clone_to_rele() - translate clone to release structure + * @clone: Source structure from which to translate/copy. + * @release: Destination structure for the translate/copy. + */ +static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone, + struct dk_cxlflash_release *release) +{ + release->hdr = clone->hdr; + release->context_id = clone->context_id_dst; +} + +/** + * ba_init() - initializes a block allocator + * @ba_lun: Block allocator to initialize. + * + * Return: 0 on success, -errno on failure + */ +static int ba_init(struct ba_lun *ba_lun) +{ + struct ba_lun_info *bali = NULL; + int lun_size_au = 0, i = 0; + int last_word_underflow = 0; + u64 *lam; + + pr_debug("%s: Initializing LUN: lun_id=%016llx " + "ba_lun->lsize=%lx ba_lun->au_size=%lX\n", + __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size); + + /* Calculate bit map size */ + lun_size_au = ba_lun->lsize / ba_lun->au_size; + if (lun_size_au == 0) { + pr_debug("%s: Requested LUN size of 0!\n", __func__); + return -EINVAL; + } + + /* Allocate lun information container */ + bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL); + if (unlikely(!bali)) { + pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n", + __func__, ba_lun->lun_id); + return -ENOMEM; + } + + bali->total_aus = lun_size_au; + bali->lun_bmap_size = lun_size_au / BITS_PER_LONG; + + if (lun_size_au % BITS_PER_LONG) + bali->lun_bmap_size++; + + /* Allocate bitmap space */ + bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)), + GFP_KERNEL); + if (unlikely(!bali->lun_alloc_map)) { + pr_err("%s: Failed to allocate lun allocation map: " + "lun_id=%016llx\n", __func__, ba_lun->lun_id); + kfree(bali); + return -ENOMEM; + } + + /* Initialize the bit map size and set all bits to '1' */ + bali->free_aun_cnt = lun_size_au; + + for (i = 0; i < bali->lun_bmap_size; i++) + bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL; + + /* If the last word not fully utilized, mark extra bits as allocated */ + last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG); + last_word_underflow -= bali->free_aun_cnt; + if (last_word_underflow > 0) { + lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1]; + for (i = (HIBIT - last_word_underflow + 1); + i < BITS_PER_LONG; + i++) + clear_bit(i, (ulong *)lam); + } + + /* Initialize high elevator index, low/curr already at 0 from kzalloc */ + bali->free_high_idx = bali->lun_bmap_size; + + /* Allocate clone map */ + bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)), + GFP_KERNEL); + if (unlikely(!bali->aun_clone_map)) { + pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n", + __func__, ba_lun->lun_id); + kfree(bali->lun_alloc_map); + kfree(bali); + return -ENOMEM; + } + + /* Pass the allocated LUN info as a handle to the user */ + ba_lun->ba_lun_handle = bali; + + pr_debug("%s: Successfully initialized the LUN: " + "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n", + __func__, ba_lun->lun_id, bali->lun_bmap_size, + bali->free_aun_cnt); + return 0; +} + +/** + * find_free_range() - locates a free bit within the block allocator + * @low: First word in block allocator to start search. + * @high: Last word in block allocator to search. + * @bali: LUN information structure owning the block allocator to search. + * @bit_word: Passes back the word in the block allocator owning the free bit. + * + * Return: The bit position within the passed back word, -1 on failure + */ +static int find_free_range(u32 low, + u32 high, + struct ba_lun_info *bali, int *bit_word) +{ + int i; + u64 bit_pos = -1; + ulong *lam, num_bits; + + for (i = low; i < high; i++) + if (bali->lun_alloc_map[i] != 0) { + lam = (ulong *)&bali->lun_alloc_map[i]; + num_bits = (sizeof(*lam) * BITS_PER_BYTE); + bit_pos = find_first_bit(lam, num_bits); + + pr_devel("%s: Found free bit %llu in LUN " + "map entry %016llx at bitmap index = %d\n", + __func__, bit_pos, bali->lun_alloc_map[i], i); + + *bit_word = i; + bali->free_aun_cnt--; + clear_bit(bit_pos, lam); + break; + } + + return bit_pos; +} + +/** + * ba_alloc() - allocates a block from the block allocator + * @ba_lun: Block allocator from which to allocate a block. + * + * Return: The allocated block, -1 on failure + */ +static u64 ba_alloc(struct ba_lun *ba_lun) +{ + u64 bit_pos = -1; + int bit_word = 0; + struct ba_lun_info *bali = NULL; + + bali = ba_lun->ba_lun_handle; + + pr_debug("%s: Received block allocation request: " + "lun_id=%016llx free_aun_cnt=%llx\n", + __func__, ba_lun->lun_id, bali->free_aun_cnt); + + if (bali->free_aun_cnt == 0) { + pr_debug("%s: No space left on LUN: lun_id=%016llx\n", + __func__, ba_lun->lun_id); + return -1ULL; + } + + /* Search to find a free entry, curr->high then low->curr */ + bit_pos = find_free_range(bali->free_curr_idx, + bali->free_high_idx, bali, &bit_word); + if (bit_pos == -1) { + bit_pos = find_free_range(bali->free_low_idx, + bali->free_curr_idx, + bali, &bit_word); + if (bit_pos == -1) { + pr_debug("%s: Could not find an allocation unit on LUN:" + " lun_id=%016llx\n", __func__, ba_lun->lun_id); + return -1ULL; + } + } + + /* Update the free_curr_idx */ + if (bit_pos == HIBIT) + bali->free_curr_idx = bit_word + 1; + else + bali->free_curr_idx = bit_word; + + pr_debug("%s: Allocating AU number=%llx lun_id=%016llx " + "free_aun_cnt=%llx\n", __func__, + ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id, + bali->free_aun_cnt); + + return (u64) ((bit_word * BITS_PER_LONG) + bit_pos); +} + +/** + * validate_alloc() - validates the specified block has been allocated + * @bali: LUN info owning the block allocator. + * @aun: Block to validate. + * + * Return: 0 on success, -1 on failure + */ +static int validate_alloc(struct ba_lun_info *bali, u64 aun) +{ + int idx = 0, bit_pos = 0; + + idx = aun / BITS_PER_LONG; + bit_pos = aun % BITS_PER_LONG; + + if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx])) + return -1; + + return 0; +} + +/** + * ba_free() - frees a block from the block allocator + * @ba_lun: Block allocator from which to allocate a block. + * @to_free: Block to free. + * + * Return: 0 on success, -1 on failure + */ +static int ba_free(struct ba_lun *ba_lun, u64 to_free) +{ + int idx = 0, bit_pos = 0; + struct ba_lun_info *bali = NULL; + + bali = ba_lun->ba_lun_handle; + + if (validate_alloc(bali, to_free)) { + pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n", + __func__, to_free, ba_lun->lun_id); + return -1; + } + + pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx " + "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id, + bali->free_aun_cnt); + + if (bali->aun_clone_map[to_free] > 0) { + pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n", + __func__, to_free, ba_lun->lun_id, + bali->aun_clone_map[to_free]); + bali->aun_clone_map[to_free]--; + return 0; + } + + idx = to_free / BITS_PER_LONG; + bit_pos = to_free % BITS_PER_LONG; + + set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]); + bali->free_aun_cnt++; + + if (idx < bali->free_low_idx) + bali->free_low_idx = idx; + else if (idx > bali->free_high_idx) + bali->free_high_idx = idx; + + pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x " + "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx, + ba_lun->lun_id, bali->free_aun_cnt); + + return 0; +} + +/** + * ba_clone() - Clone a chunk of the block allocation table + * @ba_lun: Block allocator from which to allocate a block. + * @to_clone: Block to clone. + * + * Return: 0 on success, -1 on failure + */ +static int ba_clone(struct ba_lun *ba_lun, u64 to_clone) +{ + struct ba_lun_info *bali = ba_lun->ba_lun_handle; + + if (validate_alloc(bali, to_clone)) { + pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n", + __func__, to_clone, ba_lun->lun_id); + return -1; + } + + pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n", + __func__, to_clone, ba_lun->lun_id); + + if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) { + pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n", + __func__, to_clone, ba_lun->lun_id); + return -1; + } + + bali->aun_clone_map[to_clone]++; + + return 0; +} + +/** + * ba_space() - returns the amount of free space left in the block allocator + * @ba_lun: Block allocator. + * + * Return: Amount of free space in block allocator + */ +static u64 ba_space(struct ba_lun *ba_lun) +{ + struct ba_lun_info *bali = ba_lun->ba_lun_handle; + + return bali->free_aun_cnt; +} + +/** + * cxlflash_ba_terminate() - frees resources associated with the block allocator + * @ba_lun: Block allocator. + * + * Safe to call in a partially allocated state. + */ +void cxlflash_ba_terminate(struct ba_lun *ba_lun) +{ + struct ba_lun_info *bali = ba_lun->ba_lun_handle; + + if (bali) { + kfree(bali->aun_clone_map); + kfree(bali->lun_alloc_map); + kfree(bali); + ba_lun->ba_lun_handle = NULL; + } +} + +/** + * init_vlun() - initializes a LUN for virtual use + * @lli: LUN information structure that owns the block allocator. + * + * Return: 0 on success, -errno on failure + */ +static int init_vlun(struct llun_info *lli) +{ + int rc = 0; + struct glun_info *gli = lli->parent; + struct blka *blka = &gli->blka; + + memset(blka, 0, sizeof(*blka)); + mutex_init(&blka->mutex); + + /* LUN IDs are unique per port, save the index instead */ + blka->ba_lun.lun_id = lli->lun_index; + blka->ba_lun.lsize = gli->max_lba + 1; + blka->ba_lun.lba_size = gli->blk_len; + + blka->ba_lun.au_size = MC_CHUNK_SIZE; + blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE; + + rc = ba_init(&blka->ba_lun); + if (unlikely(rc)) + pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc); + + pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli); + return rc; +} + +/** + * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN + * @sdev: SCSI device associated with LUN. + * @lba: Logical block address to start write same. + * @nblks: Number of logical blocks to write same. + * + * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur + * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As + * part of the recovery, the handler drains all currently running ioctls, + * waiting until they have completed before proceeding with a reset. As this + * routine is used on the ioctl path, this can create a condition where the + * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To + * avoid this behavior, temporarily unmark this thread as an ioctl thread by + * releasing the ioctl read semaphore. This will allow the EEH handler to + * proceed with a recovery while this thread is still running. Once the + * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the + * adapter state in case it changed while inside of scsi_execute_cmd(). The + * state check will wait if the adapter is still being recovered or return a + * failure if the recovery failed. In the event that the adapter reset failed, + * simply return the failure as the ioctl would be unable to continue. + * + * Note that the above puts a requirement on this routine to only be called on + * an ioctl thread. + * + * Return: 0 on success, -errno on failure + */ +static int write_same16(struct scsi_device *sdev, + u64 lba, + u32 nblks) +{ + u8 *cmd_buf = NULL; + u8 *scsi_cmd = NULL; + int rc = 0; + int result = 0; + u64 offset = lba; + int left = nblks; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + const u32 s = ilog2(sdev->sector_size) - 9; + const u32 to = sdev->request_queue->rq_timeout; + const u32 ws_limit = + sdev->request_queue->limits.max_write_zeroes_sectors >> s; + + cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); + scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); + if (unlikely(!cmd_buf || !scsi_cmd)) { + rc = -ENOMEM; + goto out; + } + + while (left > 0) { + + scsi_cmd[0] = WRITE_SAME_16; + scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0; + put_unaligned_be64(offset, &scsi_cmd[2]); + put_unaligned_be32(ws_limit < left ? ws_limit : left, + &scsi_cmd[10]); + + /* Drop the ioctl read semaphore across lengthy call */ + up_read(&cfg->ioctl_rwsem); + result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT, + cmd_buf, CMD_BUFSIZE, to, + CMD_RETRIES, NULL); + down_read(&cfg->ioctl_rwsem); + rc = check_state(cfg); + if (rc) { + dev_err(dev, "%s: Failed state result=%08x\n", + __func__, result); + rc = -ENODEV; + goto out; + } + + if (result) { + dev_err_ratelimited(dev, "%s: command failed for " + "offset=%lld result=%08x\n", + __func__, offset, result); + rc = -EIO; + goto out; + } + left -= ws_limit; + offset += ws_limit; + } + +out: + kfree(cmd_buf); + kfree(scsi_cmd); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * grow_lxt() - expands the translation table associated with the specified RHTE + * @afu: AFU associated with the host. + * @sdev: SCSI device associated with LUN. + * @ctxid: Context ID of context owning the RHTE. + * @rhndl: Resource handle associated with the RHTE. + * @rhte: Resource handle entry (RHTE). + * @new_size: Number of translation entries associated with RHTE. + * + * By design, this routine employs a 'best attempt' allocation and will + * truncate the requested size down if there is not sufficient space in + * the block allocator to satisfy the request but there does exist some + * amount of space. The user is made aware of this by returning the size + * allocated. + * + * Return: 0 on success, -errno on failure + */ +static int grow_lxt(struct afu *afu, + struct scsi_device *sdev, + ctx_hndl_t ctxid, + res_hndl_t rhndl, + struct sisl_rht_entry *rhte, + u64 *new_size) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct blka *blka = &gli->blka; + u32 av_size; + u32 ngrps, ngrps_old; + u64 aun; /* chunk# allocated by block allocator */ + u64 delta = *new_size - rhte->lxt_cnt; + u64 my_new_size; + int i, rc = 0; + + /* + * Check what is available in the block allocator before re-allocating + * LXT array. This is done up front under the mutex which must not be + * released until after allocation is complete. + */ + mutex_lock(&blka->mutex); + av_size = ba_space(&blka->ba_lun); + if (unlikely(av_size <= 0)) { + dev_dbg(dev, "%s: ba_space error av_size=%d\n", + __func__, av_size); + mutex_unlock(&blka->mutex); + rc = -ENOSPC; + goto out; + } + + if (av_size < delta) + delta = av_size; + + lxt_old = rhte->lxt_start; + ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt); + ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta); + + if (ngrps != ngrps_old) { + /* reallocate to fit new size */ + lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps), + GFP_KERNEL); + if (unlikely(!lxt)) { + mutex_unlock(&blka->mutex); + rc = -ENOMEM; + goto out; + } + + /* copy over all old entries */ + memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt)); + } else + lxt = lxt_old; + + /* nothing can fail from now on */ + my_new_size = rhte->lxt_cnt + delta; + + /* add new entries to the end */ + for (i = rhte->lxt_cnt; i < my_new_size; i++) { + /* + * Due to the earlier check of available space, ba_alloc + * cannot fail here. If it did due to internal error, + * leave a rlba_base of -1u which will likely be a + * invalid LUN (too large). + */ + aun = ba_alloc(&blka->ba_lun); + if ((aun == -1ULL) || (aun >= blka->nchunk)) + dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu " + "max=%llu\n", __func__, aun, blka->nchunk - 1); + + /* select both ports, use r/w perms from RHT */ + lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) | + (lli->lun_index << LXT_LUNIDX_SHIFT) | + (RHT_PERM_RW << LXT_PERM_SHIFT | + lli->port_sel)); + } + + mutex_unlock(&blka->mutex); + + /* + * The following sequence is prescribed in the SISlite spec + * for syncing up with the AFU when adding LXT entries. + */ + dma_wmb(); /* Make LXT updates are visible */ + + rhte->lxt_start = lxt; + dma_wmb(); /* Make RHT entry's LXT table update visible */ + + rhte->lxt_cnt = my_new_size; + dma_wmb(); /* Make RHT entry's LXT table size update visible */ + + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); + if (unlikely(rc)) + rc = -EAGAIN; + + /* free old lxt if reallocated */ + if (lxt != lxt_old) + kfree(lxt_old); + *new_size = my_new_size; +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * shrink_lxt() - reduces translation table associated with the specified RHTE + * @afu: AFU associated with the host. + * @sdev: SCSI device associated with LUN. + * @rhndl: Resource handle associated with the RHTE. + * @rhte: Resource handle entry (RHTE). + * @ctxi: Context owning resources. + * @new_size: Number of translation entries associated with RHTE. + * + * Return: 0 on success, -errno on failure + */ +static int shrink_lxt(struct afu *afu, + struct scsi_device *sdev, + res_hndl_t rhndl, + struct sisl_rht_entry *rhte, + struct ctx_info *ctxi, + u64 *new_size) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct sisl_lxt_entry *lxt, *lxt_old; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct blka *blka = &gli->blka; + ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid); + bool needs_ws = ctxi->rht_needs_ws[rhndl]; + bool needs_sync = !ctxi->err_recovery_active; + u32 ngrps, ngrps_old; + u64 aun; /* chunk# allocated by block allocator */ + u64 delta = rhte->lxt_cnt - *new_size; + u64 my_new_size; + int i, rc = 0; + + lxt_old = rhte->lxt_start; + ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt); + ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta); + + if (ngrps != ngrps_old) { + /* Reallocate to fit new size unless new size is 0 */ + if (ngrps) { + lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps), + GFP_KERNEL); + if (unlikely(!lxt)) { + rc = -ENOMEM; + goto out; + } + + /* Copy over old entries that will remain */ + memcpy(lxt, lxt_old, + (sizeof(*lxt) * (rhte->lxt_cnt - delta))); + } else + lxt = NULL; + } else + lxt = lxt_old; + + /* Nothing can fail from now on */ + my_new_size = rhte->lxt_cnt - delta; + + /* + * The following sequence is prescribed in the SISlite spec + * for syncing up with the AFU when removing LXT entries. + */ + rhte->lxt_cnt = my_new_size; + dma_wmb(); /* Make RHT entry's LXT table size update visible */ + + rhte->lxt_start = lxt; + dma_wmb(); /* Make RHT entry's LXT table update visible */ + + if (needs_sync) { + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (unlikely(rc)) + rc = -EAGAIN; + } + + if (needs_ws) { + /* + * Mark the context as unavailable, so that we can release + * the mutex safely. + */ + ctxi->unavail = true; + mutex_unlock(&ctxi->mutex); + } + + /* Free LBAs allocated to freed chunks */ + mutex_lock(&blka->mutex); + for (i = delta - 1; i >= 0; i--) { + aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT; + if (needs_ws) + write_same16(sdev, aun, MC_CHUNK_SIZE); + ba_free(&blka->ba_lun, aun); + } + mutex_unlock(&blka->mutex); + + if (needs_ws) { + /* Make the context visible again */ + mutex_lock(&ctxi->mutex); + ctxi->unavail = false; + } + + /* Free old lxt if reallocated */ + if (lxt != lxt_old) + kfree(lxt_old); + *new_size = my_new_size; +out: + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * _cxlflash_vlun_resize() - changes the size of a virtual LUN + * @sdev: SCSI device associated with LUN owning virtual LUN. + * @ctxi: Context owning resources. + * @resize: Resize ioctl data structure. + * + * On successful return, the user is informed of the new size (in blocks) + * of the virtual LUN in last LBA format. When the size of the virtual + * LUN is zero, the last LBA is reflected as -1. See comment in the + * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts + * on the error recovery list. + * + * Return: 0 on success, -errno on failure + */ +int _cxlflash_vlun_resize(struct scsi_device *sdev, + struct ctx_info *ctxi, + struct dk_cxlflash_resize *resize) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct afu *afu = cfg->afu; + bool put_ctx = false; + + res_hndl_t rhndl = resize->rsrc_handle; + u64 new_size; + u64 nsectors; + u64 ctxid = DECODE_CTXID(resize->context_id), + rctxid = resize->context_id; + + struct sisl_rht_entry *rhte; + + int rc = 0; + + /* + * The requested size (req_size) is always assumed to be in 4k blocks, + * so we have to convert it here from 4k to chunk size. + */ + nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len; + new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE); + + dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n", + __func__, ctxid, resize->rsrc_handle, resize->req_size, + new_size); + + if (unlikely(gli->mode != MODE_VIRTUAL)) { + dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n", + __func__, gli->mode); + rc = -EINVAL; + goto out; + + } + + if (!ctxi) { + ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); + if (unlikely(!ctxi)) { + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", + __func__, ctxid); + rc = -EINVAL; + goto out; + } + + put_ctx = true; + } + + rhte = get_rhte(ctxi, rhndl, lli); + if (unlikely(!rhte)) { + dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n", + __func__, rhndl); + rc = -EINVAL; + goto out; + } + + if (new_size > rhte->lxt_cnt) + rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size); + else if (new_size < rhte->lxt_cnt) + rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size); + else { + /* + * Rare case where there is already sufficient space, just + * need to perform a translation sync with the AFU. This + * scenario likely follows a previous sync failure during + * a resize operation. Accordingly, perform the heavyweight + * form of translation sync as it is unknown which type of + * resize failed previously. + */ + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); + if (unlikely(rc)) { + rc = -EAGAIN; + goto out; + } + } + + resize->hdr.return_flags = 0; + resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len); + resize->last_lba /= CXLFLASH_BLOCK_SIZE; + resize->last_lba--; + +out: + if (put_ctx) + put_context(ctxi); + dev_dbg(dev, "%s: resized to %llu returning rc=%d\n", + __func__, resize->last_lba, rc); + return rc; +} + +int cxlflash_vlun_resize(struct scsi_device *sdev, + struct dk_cxlflash_resize *resize) +{ + return _cxlflash_vlun_resize(sdev, NULL, resize); +} + +/** + * cxlflash_restore_luntable() - Restore LUN table to prior state + * @cfg: Internal structure associated with the host. + */ +void cxlflash_restore_luntable(struct cxlflash_cfg *cfg) +{ + struct llun_info *lli, *temp; + u32 lind; + int k; + struct device *dev = &cfg->dev->dev; + __be64 __iomem *fc_port_luns; + + mutex_lock(&global.mutex); + + list_for_each_entry_safe(lli, temp, &cfg->lluns, list) { + if (!lli->in_table) + continue; + + lind = lli->lun_index; + dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind); + + for (k = 0; k < cfg->num_fc_ports; k++) + if (lli->port_sel & (1 << k)) { + fc_port_luns = get_fc_port_luns(cfg, k); + writeq_be(lli->lun_id[k], &fc_port_luns[lind]); + dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); + } + } + + mutex_unlock(&global.mutex); +} + +/** + * get_num_ports() - compute number of ports from port selection mask + * @psm: Port selection mask. + * + * Return: Population count of port selection mask + */ +static inline u8 get_num_ports(u32 psm) +{ + static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3, + 1, 2, 2, 3, 2, 3, 3, 4 }; + + return bits[psm & 0xf]; +} + +/** + * init_luntable() - write an entry in the LUN table + * @cfg: Internal structure associated with the host. + * @lli: Per adapter LUN information structure. + * + * On successful return, a LUN table entry is created: + * - at the top for LUNs visible on multiple ports. + * - at the bottom for LUNs visible only on one port. + * + * Return: 0 on success, -errno on failure + */ +static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli) +{ + u32 chan; + u32 lind; + u32 nports; + int rc = 0; + int k; + struct device *dev = &cfg->dev->dev; + __be64 __iomem *fc_port_luns; + + mutex_lock(&global.mutex); + + if (lli->in_table) + goto out; + + nports = get_num_ports(lli->port_sel); + if (nports == 0 || nports > cfg->num_fc_ports) { + WARN(1, "Unsupported port configuration nports=%u", nports); + rc = -EIO; + goto out; + } + + if (nports > 1) { + /* + * When LUN is visible from multiple ports, we will put + * it in the top half of the LUN table. + */ + for (k = 0; k < cfg->num_fc_ports; k++) { + if (!(lli->port_sel & (1 << k))) + continue; + + if (cfg->promote_lun_index == cfg->last_lun_index[k]) { + rc = -ENOSPC; + goto out; + } + } + + lind = lli->lun_index = cfg->promote_lun_index; + dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind); + + for (k = 0; k < cfg->num_fc_ports; k++) { + if (!(lli->port_sel & (1 << k))) + continue; + + fc_port_luns = get_fc_port_luns(cfg, k); + writeq_be(lli->lun_id[k], &fc_port_luns[lind]); + dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); + } + + cfg->promote_lun_index++; + } else { + /* + * When LUN is visible only from one port, we will put + * it in the bottom half of the LUN table. + */ + chan = PORTMASK2CHAN(lli->port_sel); + if (cfg->promote_lun_index == cfg->last_lun_index[chan]) { + rc = -ENOSPC; + goto out; + } + + lind = lli->lun_index = cfg->last_lun_index[chan]; + fc_port_luns = get_fc_port_luns(cfg, chan); + writeq_be(lli->lun_id[chan], &fc_port_luns[lind]); + cfg->last_lun_index[chan]--; + dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n", + __func__, lind, chan, lli->lun_id[chan]); + } + + lli->in_table = true; +out: + mutex_unlock(&global.mutex); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +} + +/** + * cxlflash_disk_virtual_open() - open a virtual disk of specified size + * @sdev: SCSI device associated with LUN owning virtual LUN. + * @arg: UVirtual ioctl data structure. + * + * On successful return, the user is informed of the resource handle + * to be used to identify the virtual LUN and the size (in blocks) of + * the virtual LUN in last LBA format. When the size of the virtual LUN + * is zero, the last LBA is reflected as -1. + * + * Return: 0 on success, -errno on failure + */ +int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + + struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg; + struct dk_cxlflash_resize resize; + + u64 ctxid = DECODE_CTXID(virt->context_id), + rctxid = virt->context_id; + u64 lun_size = virt->lun_size; + u64 last_lba = 0; + u64 rsrc_handle = -1; + + int rc = 0; + + struct ctx_info *ctxi = NULL; + struct sisl_rht_entry *rhte = NULL; + + dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); + + /* Setup the LUNs block allocator on first call */ + mutex_lock(&gli->mutex); + if (gli->mode == MODE_NONE) { + rc = init_vlun(lli); + if (rc) { + dev_err(dev, "%s: init_vlun failed rc=%d\n", + __func__, rc); + rc = -ENOMEM; + goto err0; + } + } + + rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true); + if (unlikely(rc)) { + dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__); + goto err0; + } + mutex_unlock(&gli->mutex); + + rc = init_luntable(cfg, lli); + if (rc) { + dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc); + goto err1; + } + + ctxi = get_context(cfg, rctxid, lli, 0); + if (unlikely(!ctxi)) { + dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); + rc = -EINVAL; + goto err1; + } + + rhte = rhte_checkout(ctxi, lli); + if (unlikely(!rhte)) { + dev_err(dev, "%s: too many opens ctxid=%llu\n", + __func__, ctxid); + rc = -EMFILE; /* too many opens */ + goto err1; + } + + rsrc_handle = (rhte - ctxi->rht_start); + + /* Populate RHT format 0 */ + rhte->nmask = MC_RHT_NMASK; + rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms); + + /* Resize even if requested size is 0 */ + marshal_virt_to_resize(virt, &resize); + resize.rsrc_handle = rsrc_handle; + rc = _cxlflash_vlun_resize(sdev, ctxi, &resize); + if (rc) { + dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc); + goto err2; + } + last_lba = resize.last_lba; + + if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME) + ctxi->rht_needs_ws[rsrc_handle] = true; + + virt->hdr.return_flags = 0; + virt->last_lba = last_lba; + virt->rsrc_handle = rsrc_handle; + + if (get_num_ports(lli->port_sel) > 1) + virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE; +out: + if (likely(ctxi)) + put_context(ctxi); + dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", + __func__, rsrc_handle, rc, last_lba); + return rc; + +err2: + rhte_checkin(ctxi, rhte); +err1: + cxlflash_lun_detach(gli); + goto out; +err0: + /* Special common cleanup prior to successful LUN attach */ + cxlflash_ba_terminate(&gli->blka.ba_lun); + mutex_unlock(&gli->mutex); + goto out; +} + +/** + * clone_lxt() - copies translation tables from source to destination RHTE + * @afu: AFU associated with the host. + * @blka: Block allocator associated with LUN. + * @ctxid: Context ID of context owning the RHTE. + * @rhndl: Resource handle associated with the RHTE. + * @rhte: Destination resource handle entry (RHTE). + * @rhte_src: Source resource handle entry (RHTE). + * + * Return: 0 on success, -errno on failure + */ +static int clone_lxt(struct afu *afu, + struct blka *blka, + ctx_hndl_t ctxid, + res_hndl_t rhndl, + struct sisl_rht_entry *rhte, + struct sisl_rht_entry *rhte_src) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + struct sisl_lxt_entry *lxt = NULL; + bool locked = false; + u32 ngrps; + u64 aun; /* chunk# allocated by block allocator */ + int j; + int i = 0; + int rc = 0; + + ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt); + + if (ngrps) { + /* allocate new LXTs for clone */ + lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps), + GFP_KERNEL); + if (unlikely(!lxt)) { + rc = -ENOMEM; + goto out; + } + + /* copy over */ + memcpy(lxt, rhte_src->lxt_start, + (sizeof(*lxt) * rhte_src->lxt_cnt)); + + /* clone the LBAs in block allocator via ref_cnt, note that the + * block allocator mutex must be held until it is established + * that this routine will complete without the need for a + * cleanup. + */ + mutex_lock(&blka->mutex); + locked = true; + for (i = 0; i < rhte_src->lxt_cnt; i++) { + aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT); + if (ba_clone(&blka->ba_lun, aun) == -1ULL) { + rc = -EIO; + goto err; + } + } + } + + /* + * The following sequence is prescribed in the SISlite spec + * for syncing up with the AFU when adding LXT entries. + */ + dma_wmb(); /* Make LXT updates are visible */ + + rhte->lxt_start = lxt; + dma_wmb(); /* Make RHT entry's LXT table update visible */ + + rhte->lxt_cnt = rhte_src->lxt_cnt; + dma_wmb(); /* Make RHT entry's LXT table size update visible */ + + rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); + if (unlikely(rc)) { + rc = -EAGAIN; + goto err2; + } + +out: + if (locked) + mutex_unlock(&blka->mutex); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; +err2: + /* Reset the RHTE */ + rhte->lxt_cnt = 0; + dma_wmb(); + rhte->lxt_start = NULL; + dma_wmb(); +err: + /* free the clones already made */ + for (j = 0; j < i; j++) { + aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT); + ba_free(&blka->ba_lun, aun); + } + kfree(lxt); + goto out; +} + +/** + * cxlflash_disk_clone() - clone a context by making snapshot of another + * @sdev: SCSI device associated with LUN owning virtual LUN. + * @clone: Clone ioctl data structure. + * + * This routine effectively performs cxlflash_disk_open operation for each + * in-use virtual resource in the source context. Note that the destination + * context must be in pristine state and cannot have any resource handles + * open at the time of the clone. + * + * Return: 0 on success, -errno on failure + */ +int cxlflash_disk_clone(struct scsi_device *sdev, + struct dk_cxlflash_clone *clone) +{ + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; + struct llun_info *lli = sdev->hostdata; + struct glun_info *gli = lli->parent; + struct blka *blka = &gli->blka; + struct afu *afu = cfg->afu; + struct dk_cxlflash_release release = { { 0 }, 0 }; + + struct ctx_info *ctxi_src = NULL, + *ctxi_dst = NULL; + struct lun_access *lun_access_src, *lun_access_dst; + u32 perms; + u64 ctxid_src = DECODE_CTXID(clone->context_id_src), + ctxid_dst = DECODE_CTXID(clone->context_id_dst), + rctxid_src = clone->context_id_src, + rctxid_dst = clone->context_id_dst; + int i, j; + int rc = 0; + bool found; + LIST_HEAD(sidecar); + + dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n", + __func__, ctxid_src, ctxid_dst); + + /* Do not clone yourself */ + if (unlikely(rctxid_src == rctxid_dst)) { + rc = -EINVAL; + goto out; + } + + if (unlikely(gli->mode != MODE_VIRTUAL)) { + rc = -EINVAL; + dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n", + __func__, gli->mode); + goto out; + } + + ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE); + ctxi_dst = get_context(cfg, rctxid_dst, lli, 0); + if (unlikely(!ctxi_src || !ctxi_dst)) { + dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n", + __func__, ctxid_src, ctxid_dst); + rc = -EINVAL; + goto out; + } + + /* Verify there is no open resource handle in the destination context */ + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) + if (ctxi_dst->rht_start[i].nmask != 0) { + rc = -EINVAL; + goto out; + } + + /* Clone LUN access list */ + list_for_each_entry(lun_access_src, &ctxi_src->luns, list) { + found = false; + list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list) + if (lun_access_dst->sdev == lun_access_src->sdev) { + found = true; + break; + } + + if (!found) { + lun_access_dst = kzalloc(sizeof(*lun_access_dst), + GFP_KERNEL); + if (unlikely(!lun_access_dst)) { + dev_err(dev, "%s: lun_access allocation fail\n", + __func__); + rc = -ENOMEM; + goto out; + } + + *lun_access_dst = *lun_access_src; + list_add(&lun_access_dst->list, &sidecar); + } + } + + if (unlikely(!ctxi_src->rht_out)) { + dev_dbg(dev, "%s: Nothing to clone\n", __func__); + goto out_success; + } + + /* User specified permission on attach */ + perms = ctxi_dst->rht_perms; + + /* + * Copy over checked-out RHT (and their associated LXT) entries by + * hand, stopping after we've copied all outstanding entries and + * cleaning up if the clone fails. + * + * Note: This loop is equivalent to performing cxlflash_disk_open and + * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into + * account by attaching after each successful RHT entry clone. In the + * event that a clone failure is experienced, the LUN detach is handled + * via the cleanup performed by _cxlflash_disk_release. + */ + for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { + if (ctxi_src->rht_out == ctxi_dst->rht_out) + break; + if (ctxi_src->rht_start[i].nmask == 0) + continue; + + /* Consume a destination RHT entry */ + ctxi_dst->rht_out++; + ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask; + ctxi_dst->rht_start[i].fp = + SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms); + ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i]; + + rc = clone_lxt(afu, blka, ctxid_dst, i, + &ctxi_dst->rht_start[i], + &ctxi_src->rht_start[i]); + if (rc) { + marshal_clone_to_rele(clone, &release); + for (j = 0; j < i; j++) { + release.rsrc_handle = j; + _cxlflash_disk_release(sdev, ctxi_dst, + &release); + } + + /* Put back the one we failed on */ + rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]); + goto err; + } + + cxlflash_lun_attach(gli, gli->mode, false); + } + +out_success: + list_splice(&sidecar, &ctxi_dst->luns); + + /* fall through */ +out: + if (ctxi_src) + put_context(ctxi_src); + if (ctxi_dst) + put_context(ctxi_dst); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); + return rc; + +err: + list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list) + kfree(lun_access_src); + goto out; +} diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h new file mode 100644 index 000000000..68e3ea52f --- /dev/null +++ b/drivers/scsi/cxlflash/vlun.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CXL Flash Device Driver + * + * Written by: Manoj N. Kumar , IBM Corporation + * Matthew R. Ochs , IBM Corporation + * + * Copyright (C) 2015 IBM Corporation + */ + +#ifndef _CXLFLASH_VLUN_H +#define _CXLFLASH_VLUN_H + +/* RHT - Resource Handle Table */ +#define MC_RHT_NMASK 16 /* in bits */ +#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */ + +#define HIBIT (BITS_PER_LONG - 1) + +#define MAX_AUN_CLONE_CNT 0xFF + +/* + * LXT - LBA Translation Table + * + * +-------+-------+-------+-------+-------+-------+-------+---+---+ + * | RLBA_BASE |LUN_IDX| P |SEL| + * +-------+-------+-------+-------+-------+-------+-------+---+---+ + * + * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE). + * AFU ORes the low order bits from the virtual LBA (offset into the chunk) + * with RLBA_BASE. The result is the physical LBA to be sent to storage. + * The LXT Entry also contains an index to a LUN TBL and a bitmask of which + * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed + * with a global port select bit-mask maintained by the driver. + * In addition, it has permission bits that are ANDed with the + * RHT permissions to arrive at the final permissions for the chunk. + * + * LXT tables are allocated dynamically in groups. This is done to avoid + * a malloc/free overhead each time the LXT has to grow or shrink. + * + * Based on the current lxt_cnt (used), it is always possible to know + * how many are allocated (used+free). The number of allocated entries is + * not stored anywhere. + * + * The LXT table is re-allocated whenever it needs to cross into another group. + */ +#define LXT_GROUP_SIZE 8 +#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */ +#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */ +#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */ + +struct ba_lun_info { + u64 *lun_alloc_map; + u32 lun_bmap_size; + u32 total_aus; + u64 free_aun_cnt; + + /* indices to be used for elevator lookup of free map */ + u32 free_low_idx; + u32 free_curr_idx; + u32 free_high_idx; + + u8 *aun_clone_map; +}; + +struct ba_lun { + u64 lun_id; + u64 wwpn; + size_t lsize; /* LUN size in number of LBAs */ + size_t lba_size; /* LBA size in number of bytes */ + size_t au_size; /* Allocation Unit size in number of LBAs */ + struct ba_lun_info *ba_lun_handle; +}; + +/* Block Allocator */ +struct blka { + struct ba_lun ba_lun; + u64 nchunk; /* number of chunks */ + struct mutex mutex; +}; + +#endif /* ifndef _CXLFLASH_SUPERPIPE_H */ diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c new file mode 100644 index 000000000..c8e86f8a6 --- /dev/null +++ b/drivers/scsi/dc395x.c @@ -0,0 +1,4693 @@ +/* + * dc395x.c + * + * Device Driver for Tekram DC395(U/UW/F), DC315(U) + * PCI SCSI Bus Master Host Adapter + * (SCSI chip set used Tekram ASIC TRM-S1040) + * + * Authors: + * C.L. Huang + * Erich Chen + * (C) Copyright 1995-1999 Tekram Technology Co., Ltd. + * + * Kurt Garloff + * (C) 1999-2000 Kurt Garloff + * + * Oliver Neukum + * Ali Akcaagac + * Jamie Lenehan + * (C) 2003 + * + * License: GNU GPL + * + ************************************************************************* + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ************************************************************************ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "dc395x.h" + +#define DC395X_NAME "dc395x" +#define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040" +#define DC395X_VERSION "v2.05, 2004/03/08" + +/*--------------------------------------------------------------------------- + Features + ---------------------------------------------------------------------------*/ +/* + * Set to disable parts of the driver + */ +/*#define DC395x_NO_DISCONNECT*/ +/*#define DC395x_NO_TAGQ*/ +/*#define DC395x_NO_SYNC*/ +/*#define DC395x_NO_WIDE*/ + +/*--------------------------------------------------------------------------- + Debugging + ---------------------------------------------------------------------------*/ +/* + * Types of debugging that can be enabled and disabled + */ +#define DBG_KG 0x0001 +#define DBG_0 0x0002 +#define DBG_1 0x0004 +#define DBG_SG 0x0020 +#define DBG_FIFO 0x0040 +#define DBG_PIO 0x0080 + + +/* + * Set set of things to output debugging for. + * Undefine to remove all debugging + */ +/*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/ +/*#define DEBUG_MASK DBG_0*/ + + +/* + * Output a kernel mesage at the specified level and append the + * driver name and a ": " to the start of the message + */ +#define dprintkl(level, format, arg...) \ + printk(level DC395X_NAME ": " format , ## arg) + + +#ifdef DEBUG_MASK +/* + * print a debug message - this is formated with KERN_DEBUG, then the + * driver name followed by a ": " and then the message is output. + * This also checks that the specified debug level is enabled before + * outputing the message + */ +#define dprintkdbg(type, format, arg...) \ + do { \ + if ((type) & (DEBUG_MASK)) \ + dprintkl(KERN_DEBUG , format , ## arg); \ + } while (0) + +/* + * Check if the specified type of debugging is enabled + */ +#define debug_enabled(type) ((DEBUG_MASK) & (type)) + +#else +/* + * No debugging. Do nothing + */ +#define dprintkdbg(type, format, arg...) \ + do {} while (0) +#define debug_enabled(type) (0) + +#endif + + +#ifndef PCI_VENDOR_ID_TEKRAM +#define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */ +#endif +#ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040 +#define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */ +#endif + + +#define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags) +#define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags) + +#define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address))) +#define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address))) +#define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address))) +#define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address)) +#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address)) +#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address)) + +#define TAG_NONE 255 + +/* + * srb->segement_x is the hw sg list. It is always allocated as a + * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not + * cross a page boundy. + */ +#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) + + +struct SGentry { + u32 address; /* bus! address */ + u32 length; +}; + +/* The SEEPROM structure for TRM_S1040 */ +struct NVRamTarget { + u8 cfg0; /* Target configuration byte 0 */ + u8 period; /* Target period */ + u8 cfg2; /* Target configuration byte 2 */ + u8 cfg3; /* Target configuration byte 3 */ +}; + +struct NvRamType { + u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */ + u8 sub_sys_id[2]; /* 2,3 Sub System ID */ + u8 sub_class; /* 4 Sub Class */ + u8 vendor_id[2]; /* 5,6 Vendor ID */ + u8 device_id[2]; /* 7,8 Device ID */ + u8 reserved; /* 9 Reserved */ + struct NVRamTarget target[DC395x_MAX_SCSI_ID]; + /** 10,11,12,13 + ** 14,15,16,17 + ** .... + ** .... + ** 70,71,72,73 + */ + u8 scsi_id; /* 74 Host Adapter SCSI ID */ + u8 channel_cfg; /* 75 Channel configuration */ + u8 delay_time; /* 76 Power on delay time */ + u8 max_tag; /* 77 Maximum tags */ + u8 reserved0; /* 78 */ + u8 boot_target; /* 79 */ + u8 boot_lun; /* 80 */ + u8 reserved1; /* 81 */ + u16 reserved2[22]; /* 82,..125 */ + u16 cksum; /* 126,127 */ +}; + +struct ScsiReqBlk { + struct list_head list; /* next/prev ptrs for srb lists */ + struct DeviceCtlBlk *dcb; + struct scsi_cmnd *cmd; + + struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */ + dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */ + + u8 sg_count; /* No of HW sg entries for this request */ + u8 sg_index; /* Index of HW sg entry for this request */ + size_t total_xfer_length; /* Total number of bytes remaining to be transferred */ + size_t request_length; /* Total number of bytes in this request */ + /* + * The sense buffer handling function, request_sense, uses + * the first hw sg entry (segment_x[0]) and the transfer + * length (total_xfer_length). While doing this it stores the + * original values into the last sg hw list + * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the + * total_xfer_length in xferred. These values are restored in + * pci_unmap_srb_sense. This is the only place xferred is used. + */ + size_t xferred; /* Saved copy of total_xfer_length */ + + u16 state; + + u8 msgin_buf[6]; + u8 msgout_buf[6]; + + u8 adapter_status; + u8 target_status; + u8 msg_count; + u8 end_message; + + u8 tag_number; + u8 status; + u8 retry_count; + u8 flag; + + u8 scsi_phase; +}; + +struct DeviceCtlBlk { + struct list_head list; /* next/prev ptrs for the dcb list */ + struct AdapterCtlBlk *acb; + struct list_head srb_going_list; /* head of going srb list */ + struct list_head srb_waiting_list; /* head of waiting srb list */ + + struct ScsiReqBlk *active_srb; + u32 tag_mask; + + u16 max_command; + + u8 target_id; /* SCSI Target ID (SCSI Only) */ + u8 target_lun; /* SCSI Log. Unit (SCSI Only) */ + u8 identify_msg; + u8 dev_mode; + + u8 inquiry7; /* To store Inquiry flags */ + u8 sync_mode; /* 0:async mode */ + u8 min_nego_period; /* for nego. */ + u8 sync_period; /* for reg. */ + + u8 sync_offset; /* for reg. and nego.(low nibble) */ + u8 flag; + u8 dev_type; + u8 init_tcq_flag; +}; + +struct AdapterCtlBlk { + struct Scsi_Host *scsi_host; + + unsigned long io_port_base; + unsigned long io_port_len; + + struct list_head dcb_list; /* head of going dcb list */ + struct DeviceCtlBlk *dcb_run_robin; + struct DeviceCtlBlk *active_dcb; + + struct list_head srb_free_list; /* head of free srb list */ + struct ScsiReqBlk *tmp_srb; + struct timer_list waiting_timer; + struct timer_list selto_timer; + + unsigned long last_reset; + + u16 srb_count; + + u8 sel_timeout; + + unsigned int irq_level; + u8 tag_max_num; + u8 acb_flag; + u8 gmode2; + + u8 config; + u8 lun_chk; + u8 scan_devices; + u8 hostid_bit; + + u8 dcb_map[DC395x_MAX_SCSI_ID]; + struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32]; + + struct pci_dev *dev; + + u8 msg_len; + + struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT]; + struct ScsiReqBlk srb; + + struct NvRamType eeprom; /* eeprom settings for this adapter */ +}; + + +/*--------------------------------------------------------------------------- + Forward declarations + ---------------------------------------------------------------------------*/ +static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status); +static void set_basic_config(struct AdapterCtlBlk *acb); +static void cleanup_after_transfer(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb); +static void reset_scsi_bus(struct AdapterCtlBlk *acb); +static void data_io_transfer(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb, u16 io_dir); +static void disconnect(struct AdapterCtlBlk *acb); +static void reselect(struct AdapterCtlBlk *acb); +static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb); +static inline void enable_msgout_abort(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb); +static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb); +static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code, + struct scsi_cmnd *cmd, u8 force); +static void scsi_reset_detect(struct AdapterCtlBlk *acb); +static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb); +static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb); +static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb); +static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb); +static void set_xfer_rate(struct AdapterCtlBlk *acb, + struct DeviceCtlBlk *dcb); +static void waiting_timeout(struct timer_list *t); + + +/*--------------------------------------------------------------------------- + Static Data + ---------------------------------------------------------------------------*/ +static u16 current_sync_offset = 0; + +static void *dc395x_scsi_phase0[] = { + data_out_phase0,/* phase:0 */ + data_in_phase0, /* phase:1 */ + command_phase0, /* phase:2 */ + status_phase0, /* phase:3 */ + nop0, /* phase:4 PH_BUS_FREE .. initial phase */ + nop0, /* phase:5 PH_BUS_FREE .. initial phase */ + msgout_phase0, /* phase:6 */ + msgin_phase0, /* phase:7 */ +}; + +static void *dc395x_scsi_phase1[] = { + data_out_phase1,/* phase:0 */ + data_in_phase1, /* phase:1 */ + command_phase1, /* phase:2 */ + status_phase1, /* phase:3 */ + nop1, /* phase:4 PH_BUS_FREE .. initial phase */ + nop1, /* phase:5 PH_BUS_FREE .. initial phase */ + msgout_phase1, /* phase:6 */ + msgin_phase1, /* phase:7 */ +}; + +/* + *Fast20: 000 50ns, 20.0 MHz + * 001 75ns, 13.3 MHz + * 010 100ns, 10.0 MHz + * 011 125ns, 8.0 MHz + * 100 150ns, 6.6 MHz + * 101 175ns, 5.7 MHz + * 110 200ns, 5.0 MHz + * 111 250ns, 4.0 MHz + * + *Fast40(LVDS): 000 25ns, 40.0 MHz + * 001 50ns, 20.0 MHz + * 010 75ns, 13.3 MHz + * 011 100ns, 10.0 MHz + * 100 125ns, 8.0 MHz + * 101 150ns, 6.6 MHz + * 110 175ns, 5.7 MHz + * 111 200ns, 5.0 MHz + */ +/*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/ + +/* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */ +static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 }; +static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 }; + + +/*--------------------------------------------------------------------------- + Configuration + ---------------------------------------------------------------------------*/ +/* + * Module/boot parameters currently effect *all* instances of the + * card in the system. + */ + +/* + * Command line parameters are stored in a structure below. + * These are the index's into the structure for the various + * command line options. + */ +#define CFG_ADAPTER_ID 0 +#define CFG_MAX_SPEED 1 +#define CFG_DEV_MODE 2 +#define CFG_ADAPTER_MODE 3 +#define CFG_TAGS 4 +#define CFG_RESET_DELAY 5 + +#define CFG_NUM 6 /* number of configuration items */ + + +/* + * Value used to indicate that a command line override + * hasn't been used to modify the value. + */ +#define CFG_PARAM_UNSET -1 + + +/* + * Hold command line parameters. + */ +struct ParameterData { + int value; /* value of this setting */ + int min; /* minimum value */ + int max; /* maximum value */ + int def; /* default value */ + int safe; /* safe value */ +}; +static struct ParameterData cfg_data[] = { + { /* adapter id */ + CFG_PARAM_UNSET, + 0, + 15, + 7, + 7 + }, + { /* max speed */ + CFG_PARAM_UNSET, + 0, + 7, + 1, /* 13.3Mhz */ + 4, /* 6.7Hmz */ + }, + { /* dev mode */ + CFG_PARAM_UNSET, + 0, + 0x3f, + NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO | + NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING | + NTC_DO_SEND_START, + NTC_DO_PARITY_CHK | NTC_DO_SEND_START + }, + { /* adapter mode */ + CFG_PARAM_UNSET, + 0, + 0x2f, + NAC_SCANLUN | + NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET + /*| NAC_ACTIVE_NEG*/, + NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08 + }, + { /* tags */ + CFG_PARAM_UNSET, + 0, + 5, + 3, /* 16 tags (??) */ + 2, + }, + { /* reset delay */ + CFG_PARAM_UNSET, + 0, + 180, + 1, /* 1 second */ + 10, /* 10 seconds */ + } +}; + + +/* + * Safe settings. If set to zero the BIOS/default values with + * command line overrides will be used. If set to 1 then safe and + * slow settings will be used. + */ +static bool use_safe_settings = 0; +module_param_named(safe, use_safe_settings, bool, 0); +MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false"); + + +module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0); +MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)"); + +module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0); +MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz"); + +module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0); +MODULE_PARM_DESC(dev_mode, "Device mode."); + +module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0); +MODULE_PARM_DESC(adapter_mode, "Adapter mode."); + +module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0); +MODULE_PARM_DESC(tags, "Number of tags (1< cfg_data[i].max) + cfg_data[i].value = cfg_data[i].def; + } +} + + + +/* + * Mapping from the eeprom delay index value (index into this array) + * to the number of actual seconds that the delay should be for. + */ +static char eeprom_index_to_delay_map[] = + { 1, 3, 5, 10, 16, 30, 60, 120 }; + + +/** + * eeprom_index_to_delay - Take the eeprom delay setting and convert it + * into a number of seconds. + * + * @eeprom: The eeprom structure in which we find the delay index to map. + **/ +static void eeprom_index_to_delay(struct NvRamType *eeprom) +{ + eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time]; +} + + +/** + * delay_to_eeprom_index - Take a delay in seconds and return the + * closest eeprom index which will delay for at least that amount of + * seconds. + * + * @delay: The delay, in seconds, to find the eeprom index for. + **/ +static int delay_to_eeprom_index(int delay) +{ + u8 idx = 0; + while (idx < 7 && eeprom_index_to_delay_map[idx] < delay) + idx++; + return idx; +} + + +/** + * eeprom_override - Override the eeprom settings, in the provided + * eeprom structure, with values that have been set on the command + * line. + * + * @eeprom: The eeprom data to override with command line options. + **/ +static void eeprom_override(struct NvRamType *eeprom) +{ + u8 id; + + /* Adapter Settings */ + if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET) + eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value; + + if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET) + eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value; + + if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET) + eeprom->delay_time = delay_to_eeprom_index( + cfg_data[CFG_RESET_DELAY].value); + + if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET) + eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value; + + /* Device Settings */ + for (id = 0; id < DC395x_MAX_SCSI_ID; id++) { + if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET) + eeprom->target[id].cfg0 = + (u8)cfg_data[CFG_DEV_MODE].value; + + if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET) + eeprom->target[id].period = + (u8)cfg_data[CFG_MAX_SPEED].value; + + } +} + + +/*--------------------------------------------------------------------------- + ---------------------------------------------------------------------------*/ + +static unsigned int list_size(struct list_head *head) +{ + unsigned int count = 0; + struct list_head *pos; + list_for_each(pos, head) + count++; + return count; +} + + +static struct DeviceCtlBlk *dcb_get_next(struct list_head *head, + struct DeviceCtlBlk *pos) +{ + int use_next = 0; + struct DeviceCtlBlk* next = NULL; + struct DeviceCtlBlk* i; + + if (list_empty(head)) + return NULL; + + /* find supplied dcb and then select the next one */ + list_for_each_entry(i, head, list) + if (use_next) { + next = i; + break; + } else if (i == pos) { + use_next = 1; + } + /* if no next one take the head one (ie, wraparound) */ + if (!next) + list_for_each_entry(i, head, list) { + next = i; + break; + } + + return next; +} + + +static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) +{ + if (srb->tag_number < 255) { + dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */ + srb->tag_number = 255; + } +} + + +/* Find cmd in SRB list */ +static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd, + struct list_head *head) +{ + struct ScsiReqBlk *i; + list_for_each_entry(i, head, list) + if (i->cmd == cmd) + return i; + return NULL; +} + +/* Sets the timer to wake us up */ +static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to) +{ + if (timer_pending(&acb->waiting_timer)) + return; + if (time_before(jiffies + to, acb->last_reset - HZ / 2)) + acb->waiting_timer.expires = + acb->last_reset - HZ / 2 + 1; + else + acb->waiting_timer.expires = jiffies + to + 1; + add_timer(&acb->waiting_timer); +} + + +/* Send the next command from the waiting list to the bus */ +static void waiting_process_next(struct AdapterCtlBlk *acb) +{ + struct DeviceCtlBlk *start = NULL; + struct DeviceCtlBlk *pos; + struct DeviceCtlBlk *dcb; + struct ScsiReqBlk *srb; + struct list_head *dcb_list_head = &acb->dcb_list; + + if (acb->active_dcb + || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) + return; + + if (timer_pending(&acb->waiting_timer)) + del_timer(&acb->waiting_timer); + + if (list_empty(dcb_list_head)) + return; + + /* + * Find the starting dcb. Need to find it again in the list + * since the list may have changed since we set the ptr to it + */ + list_for_each_entry(dcb, dcb_list_head, list) + if (dcb == acb->dcb_run_robin) { + start = dcb; + break; + } + if (!start) { + /* This can happen! */ + start = list_entry(dcb_list_head->next, typeof(*start), list); + acb->dcb_run_robin = start; + } + + + /* + * Loop over the dcb, but we start somewhere (potentially) in + * the middle of the loop so we need to manully do this. + */ + pos = start; + do { + struct list_head *waiting_list_head = &pos->srb_waiting_list; + + /* Make sure, the next another device gets scheduled ... */ + acb->dcb_run_robin = dcb_get_next(dcb_list_head, + acb->dcb_run_robin); + + if (list_empty(waiting_list_head) || + pos->max_command <= list_size(&pos->srb_going_list)) { + /* move to next dcb */ + pos = dcb_get_next(dcb_list_head, pos); + } else { + srb = list_entry(waiting_list_head->next, + struct ScsiReqBlk, list); + + /* Try to send to the bus */ + if (!start_scsi(acb, pos, srb)) + list_move(&srb->list, &pos->srb_going_list); + else + waiting_set_timer(acb, HZ/50); + break; + } + } while (pos != start); +} + + +/* Wake up waiting queue */ +static void waiting_timeout(struct timer_list *t) +{ + unsigned long flags; + struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer); + dprintkdbg(DBG_1, + "waiting_timeout: Queue woken up by timer. acb=%p\n", acb); + DC395x_LOCK_IO(acb->scsi_host, flags); + waiting_process_next(acb); + DC395x_UNLOCK_IO(acb->scsi_host, flags); +} + + +/* Get the DCB for a given ID/LUN combination */ +static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun) +{ + return acb->children[id][lun]; +} + + +/* Send SCSI Request Block (srb) to adapter (acb) */ +static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) +{ + struct DeviceCtlBlk *dcb = srb->dcb; + + if (dcb->max_command <= list_size(&dcb->srb_going_list) || + acb->active_dcb || + (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) { + list_add_tail(&srb->list, &dcb->srb_waiting_list); + waiting_process_next(acb); + return; + } + + if (!start_scsi(acb, dcb, srb)) { + list_add_tail(&srb->list, &dcb->srb_going_list); + } else { + list_add(&srb->list, &dcb->srb_waiting_list); + waiting_set_timer(acb, HZ / 50); + } +} + +/* Prepare SRB for being sent to Device DCB w/ command *cmd */ +static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb) +{ + int nseg; + enum dma_data_direction dir = cmd->sc_data_direction; + dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n", + cmd, dcb->target_id, dcb->target_lun); + + srb->dcb = dcb; + srb->cmd = cmd; + srb->sg_count = 0; + srb->total_xfer_length = 0; + srb->sg_bus_addr = 0; + srb->sg_index = 0; + srb->adapter_status = 0; + srb->target_status = 0; + srb->msg_count = 0; + srb->status = 0; + srb->flag = 0; + srb->state = 0; + srb->retry_count = 0; + srb->tag_number = TAG_NONE; + srb->scsi_phase = PH_BUS_FREE; /* initial phase */ + srb->end_message = 0; + + nseg = scsi_dma_map(cmd); + BUG_ON(nseg < 0); + + if (dir == DMA_NONE || !nseg) { + dprintkdbg(DBG_0, + "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n", + cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd), + srb->segment_x[0].address); + } else { + int i; + u32 reqlen = scsi_bufflen(cmd); + struct scatterlist *sg; + struct SGentry *sgp = srb->segment_x; + + srb->sg_count = nseg; + + dprintkdbg(DBG_0, + "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n", + reqlen, scsi_sglist(cmd), scsi_sg_count(cmd), + srb->sg_count); + + scsi_for_each_sg(cmd, sg, srb->sg_count, i) { + u32 busaddr = (u32)sg_dma_address(sg); + u32 seglen = (u32)sg->length; + sgp[i].address = busaddr; + sgp[i].length = seglen; + srb->total_xfer_length += seglen; + } + sgp += srb->sg_count - 1; + + /* + * adjust last page if too big as it is allocated + * on even page boundaries + */ + if (srb->total_xfer_length > reqlen) { + sgp->length -= (srb->total_xfer_length - reqlen); + srb->total_xfer_length = reqlen; + } + + /* Fixup for WIDE padding - make sure length is even */ + if (dcb->sync_period & WIDE_SYNC && + srb->total_xfer_length % 2) { + srb->total_xfer_length++; + sgp->length++; + } + + srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev, + srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE); + + dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n", + srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN); + } + + srb->request_length = srb->total_xfer_length; +} + + +/** + * dc395x_queue_command_lck - queue scsi command passed from the mid + * layer, invoke 'done' on completion + * + * @cmd: pointer to scsi command object + * + * Returns 1 if the adapter (host) is busy, else returns 0. One + * reason for an adapter to be busy is that the number + * of outstanding queued commands is already equal to + * struct Scsi_Host::can_queue . + * + * Required: if struct Scsi_Host::can_queue is ever non-zero + * then this function is required. + * + * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave") + * and is expected to be held on return. + * + */ +static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + struct DeviceCtlBlk *dcb; + struct ScsiReqBlk *srb; + struct AdapterCtlBlk *acb = + (struct AdapterCtlBlk *)cmd->device->host->hostdata; + dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n", + cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]); + + /* Assume BAD_TARGET; will be cleared later */ + set_host_byte(cmd, DID_BAD_TARGET); + + /* ignore invalid targets */ + if (cmd->device->id >= acb->scsi_host->max_id || + cmd->device->lun >= acb->scsi_host->max_lun || + cmd->device->lun >31) { + goto complete; + } + + /* does the specified lun on the specified device exist */ + if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) { + dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n", + cmd->device->id, (u8)cmd->device->lun); + goto complete; + } + + /* do we have a DCB for the device */ + dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); + if (!dcb) { + /* should never happen */ + dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>", + cmd->device->id, (u8)cmd->device->lun); + goto complete; + } + + set_host_byte(cmd, DID_OK); + set_status_byte(cmd, SAM_STAT_GOOD); + + srb = list_first_entry_or_null(&acb->srb_free_list, + struct ScsiReqBlk, list); + if (!srb) { + /* + * Return 1 since we are unable to queue this command at this + * point in time. + */ + dprintkdbg(DBG_0, "queue_command: No free srb's\n"); + return 1; + } + list_del(&srb->list); + + build_srb(cmd, dcb, srb); + + if (!list_empty(&dcb->srb_waiting_list)) { + /* append to waiting queue */ + list_add_tail(&srb->list, &dcb->srb_waiting_list); + waiting_process_next(acb); + } else { + /* process immediately */ + send_srb(acb, srb); + } + dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd); + return 0; + +complete: + /* + * Complete the command immediatey, and then return 0 to + * indicate that we have handled the command. This is usually + * done when the commad is for things like non existent + * devices. + */ + done(cmd); + return 0; +} + +static DEF_SCSI_QCMD(dc395x_queue_command) + +static void dump_register_info(struct AdapterCtlBlk *acb, + struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) +{ + u16 pstat; + struct pci_dev *dev = acb->dev; + pci_read_config_word(dev, PCI_STATUS, &pstat); + if (!dcb) + dcb = acb->active_dcb; + if (!srb && dcb) + srb = dcb->active_srb; + if (srb) { + if (!srb->cmd) + dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n", + srb, srb->cmd); + else + dprintkl(KERN_INFO, "dump: srb=%p cmd=%p " + "cmnd=0x%02x <%02i-%i>\n", + srb, srb->cmd, + srb->cmd->cmnd[0], srb->cmd->device->id, + (u8)srb->cmd->device->lun); + printk(" sglist=%p cnt=%i idx=%i len=%zu\n", + srb->segment_x, srb->sg_count, srb->sg_index, + srb->total_xfer_length); + printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n", + srb->state, srb->status, srb->scsi_phase, + (acb->active_dcb) ? "" : "not"); + } + dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x " + "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x " + "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x " + "config2=0x%02x cmd=0x%02x selto=0x%02x}\n", + DC395x_read16(acb, TRM_S1040_SCSI_STATUS), + DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), + DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL), + DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS), + DC395x_read8(acb, TRM_S1040_SCSI_SYNC), + DC395x_read8(acb, TRM_S1040_SCSI_TARGETID), + DC395x_read8(acb, TRM_S1040_SCSI_IDMSG), + DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), + DC395x_read8(acb, TRM_S1040_SCSI_INTEN), + DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0), + DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2), + DC395x_read8(acb, TRM_S1040_SCSI_COMMAND), + DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT)); + dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x " + "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x " + "ctctr=0x%08x addr=0x%08x:0x%08x}\n", + DC395x_read16(acb, TRM_S1040_DMA_COMMAND), + DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), + DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), + DC395x_read8(acb, TRM_S1040_DMA_STATUS), + DC395x_read8(acb, TRM_S1040_DMA_INTEN), + DC395x_read16(acb, TRM_S1040_DMA_CONFIG), + DC395x_read32(acb, TRM_S1040_DMA_XCNT), + DC395x_read32(acb, TRM_S1040_DMA_CXCNT), + DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR), + DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR)); + dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} " + "pci{status=0x%04x}\n", + DC395x_read8(acb, TRM_S1040_GEN_CONTROL), + DC395x_read8(acb, TRM_S1040_GEN_STATUS), + DC395x_read8(acb, TRM_S1040_GEN_TIMER), + pstat); +} + + +static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt) +{ +#if debug_enabled(DBG_FIFO) + u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); + u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); + if (!(fifocnt & 0x40)) + dprintkdbg(DBG_FIFO, + "clear_fifo: (%i bytes) on phase %02x in %s\n", + fifocnt & 0x3f, lines, txt); +#endif + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO); +} + + +static void reset_dev_param(struct AdapterCtlBlk *acb) +{ + struct DeviceCtlBlk *dcb; + struct NvRamType *eeprom = &acb->eeprom; + dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb); + + list_for_each_entry(dcb, &acb->dcb_list, list) { + u8 period_index; + + dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE); + dcb->sync_period = 0; + dcb->sync_offset = 0; + + dcb->dev_mode = eeprom->target[dcb->target_id].cfg0; + period_index = eeprom->target[dcb->target_id].period & 0x07; + dcb->min_nego_period = clock_period[period_index]; + if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO) + || !(acb->config & HCC_WIDE_CARD)) + dcb->sync_mode &= ~WIDE_NEGO_ENABLE; + } +} + + +/* + * perform a hard reset on the SCSI bus + * @cmd - some command for this host (for fetching hooks) + * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003). + */ +static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) +{ + struct AdapterCtlBlk *acb = + (struct AdapterCtlBlk *)cmd->device->host->hostdata; + dprintkl(KERN_INFO, + "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n", + cmd, cmd->device->id, (u8)cmd->device->lun, cmd); + + if (timer_pending(&acb->waiting_timer)) + del_timer(&acb->waiting_timer); + + /* + * disable interrupt + */ + DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00); + DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00); + DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); + + reset_scsi_bus(acb); + udelay(500); + + /* We may be in serious trouble. Wait some seconds */ + acb->last_reset = + jiffies + 3 * HZ / 2 + + HZ * acb->eeprom.delay_time; + + /* + * re-enable interrupt + */ + /* Clear SCSI FIFO */ + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); + clear_fifo(acb, "eh_bus_reset"); + /* Delete pending IRQ */ + DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); + set_basic_config(acb); + + reset_dev_param(acb); + doing_srb_done(acb, DID_RESET, cmd, 0); + acb->active_dcb = NULL; + acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */ + waiting_process_next(acb); + + return SUCCESS; +} + +static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd) +{ + int rc; + + spin_lock_irq(cmd->device->host->host_lock); + rc = __dc395x_eh_bus_reset(cmd); + spin_unlock_irq(cmd->device->host->host_lock); + + return rc; +} + +/* + * abort an errant SCSI command + * @cmd - command to be aborted + * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003). + */ +static int dc395x_eh_abort(struct scsi_cmnd *cmd) +{ + /* + * Look into our command queues: If it has not been sent already, + * we remove it and return success. Otherwise fail. + */ + struct AdapterCtlBlk *acb = + (struct AdapterCtlBlk *)cmd->device->host->hostdata; + struct DeviceCtlBlk *dcb; + struct ScsiReqBlk *srb; + dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n", + cmd, cmd->device->id, (u8)cmd->device->lun, cmd); + + dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); + if (!dcb) { + dprintkl(KERN_DEBUG, "eh_abort: No such device\n"); + return FAILED; + } + + srb = find_cmd(cmd, &dcb->srb_waiting_list); + if (srb) { + list_del(&srb->list); + pci_unmap_srb_sense(acb, srb); + pci_unmap_srb(acb, srb); + free_tag(dcb, srb); + list_add_tail(&srb->list, &acb->srb_free_list); + dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n"); + set_host_byte(cmd, DID_ABORT); + return SUCCESS; + } + srb = find_cmd(cmd, &dcb->srb_going_list); + if (srb) { + dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n"); + /* XXX: Should abort the command here */ + } else { + dprintkl(KERN_DEBUG, "eh_abort: Command not found\n"); + } + return FAILED; +} + + +/* SDTR */ +static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb) +{ + u8 *ptr = srb->msgout_buf + srb->msg_count; + if (srb->msg_count > 1) { + dprintkl(KERN_INFO, + "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n", + srb->msg_count, srb->msgout_buf[0], + srb->msgout_buf[1]); + return; + } + if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) { + dcb->sync_offset = 0; + dcb->min_nego_period = 200 >> 2; + } else if (dcb->sync_offset == 0) + dcb->sync_offset = SYNC_NEGO_OFFSET; + + srb->msg_count += spi_populate_sync_msg(ptr, dcb->min_nego_period, + dcb->sync_offset); + srb->state |= SRB_DO_SYNC_NEGO; +} + + +/* WDTR */ +static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb) +{ + u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) & + (acb->config & HCC_WIDE_CARD)) ? 1 : 0; + u8 *ptr = srb->msgout_buf + srb->msg_count; + if (srb->msg_count > 1) { + dprintkl(KERN_INFO, + "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n", + srb->msg_count, srb->msgout_buf[0], + srb->msgout_buf[1]); + return; + } + srb->msg_count += spi_populate_width_msg(ptr, wide); + srb->state |= SRB_DO_WIDE_NEGO; +} + + +#if 0 +/* Timer to work around chip flaw: When selecting and the bus is + * busy, we sometimes miss a Selection timeout IRQ */ +void selection_timeout_missed(unsigned long ptr); +/* Sets the timer to wake us up */ +static void selto_timer(struct AdapterCtlBlk *acb) +{ + if (timer_pending(&acb->selto_timer)) + return; + acb->selto_timer.function = selection_timeout_missed; + acb->selto_timer.data = (unsigned long) acb; + if (time_before + (jiffies + HZ, acb->last_reset + HZ / 2)) + acb->selto_timer.expires = + acb->last_reset + HZ / 2 + 1; + else + acb->selto_timer.expires = jiffies + HZ + 1; + add_timer(&acb->selto_timer); +} + + +void selection_timeout_missed(unsigned long ptr) +{ + unsigned long flags; + struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr; + struct ScsiReqBlk *srb; + dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n"); + if (!acb->active_dcb || !acb->active_dcb->active_srb) { + dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n"); + return; + } + DC395x_LOCK_IO(acb->scsi_host, flags); + srb = acb->active_dcb->active_srb; + disconnect(acb); + DC395x_UNLOCK_IO(acb->scsi_host, flags); +} +#endif + + +static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, + struct ScsiReqBlk* srb) +{ + u16 __maybe_unused s_stat2, return_code; + u8 s_stat, scsicommand, i, identify_message; + u8 *ptr; + dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n", + dcb->target_id, dcb->target_lun, srb); + + srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */ + + s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); + s_stat2 = 0; + s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); +#if 1 + if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) { + dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n", + s_stat, s_stat2); + /* + * Try anyway? + * + * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection + * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed! + * (This is likely to be a bug in the hardware. Obviously, most people + * only have one initiator per SCSI bus.) + * Instead let this fail and have the timer make sure the command is + * tried again after a short time + */ + /*selto_timer (acb); */ + return 1; + } +#endif + if (acb->active_dcb) { + dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a" + "command while another command (0x%p) is active.", + srb->cmd, + acb->active_dcb->active_srb ? + acb->active_dcb->active_srb->cmd : 0); + return 1; + } + if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { + dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd); + return 1; + } + /* Allow starting of SCSI commands half a second before we allow the mid-level + * to queue them again after a reset */ + if (time_before(jiffies, acb->last_reset - HZ / 2)) { + dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); + return 1; + } + + /* Flush FIFO */ + clear_fifo(acb, "start_scsi"); + DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); + DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); + DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); + DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); + srb->scsi_phase = PH_BUS_FREE; /* initial phase */ + + identify_message = dcb->identify_msg; + /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */ + /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */ + if (srb->flag & AUTO_REQSENSE) + identify_message &= 0xBF; + + if (((srb->cmd->cmnd[0] == INQUIRY) + || (srb->cmd->cmnd[0] == REQUEST_SENSE) + || (srb->flag & AUTO_REQSENSE)) + && (((dcb->sync_mode & WIDE_NEGO_ENABLE) + && !(dcb->sync_mode & WIDE_NEGO_DONE)) + || ((dcb->sync_mode & SYNC_NEGO_ENABLE) + && !(dcb->sync_mode & SYNC_NEGO_DONE))) + && (dcb->target_lun == 0)) { + srb->msgout_buf[0] = identify_message; + srb->msg_count = 1; + scsicommand = SCMD_SEL_ATNSTOP; + srb->state = SRB_MSGOUT; +#ifndef SYNC_FIRST + if (dcb->sync_mode & WIDE_NEGO_ENABLE + && dcb->inquiry7 & SCSI_INQ_WBUS16) { + build_wdtr(acb, dcb, srb); + goto no_cmd; + } +#endif + if (dcb->sync_mode & SYNC_NEGO_ENABLE + && dcb->inquiry7 & SCSI_INQ_SYNC) { + build_sdtr(acb, dcb, srb); + goto no_cmd; + } + if (dcb->sync_mode & WIDE_NEGO_ENABLE + && dcb->inquiry7 & SCSI_INQ_WBUS16) { + build_wdtr(acb, dcb, srb); + goto no_cmd; + } + srb->msg_count = 0; + } + /* Send identify message */ + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message); + + scsicommand = SCMD_SEL_ATN; + srb->state = SRB_START_; +#ifndef DC395x_NO_TAGQ + if ((dcb->sync_mode & EN_TAG_QUEUEING) + && (identify_message & 0xC0)) { + /* Send Tag message */ + u32 tag_mask = 1; + u8 tag_number = 0; + while (tag_mask & dcb->tag_mask + && tag_number < dcb->max_command) { + tag_mask = tag_mask << 1; + tag_number++; + } + if (tag_number >= dcb->max_command) { + dprintkl(KERN_WARNING, "start_scsi: (0x%p) " + "Out of tags target=<%02i-%i>)\n", + srb->cmd, srb->cmd->device->id, + (u8)srb->cmd->device->lun); + srb->state = SRB_READY; + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, + DO_HWRESELECT); + return 1; + } + /* Send Tag id */ + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SIMPLE_QUEUE_TAG); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number); + dcb->tag_mask |= tag_mask; + srb->tag_number = tag_number; + scsicommand = SCMD_SEL_ATN3; + srb->state = SRB_START_; + } +#endif +/*polling:*/ + /* Send CDB ..command block ......... */ + dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, + srb->cmd->cmnd[0], srb->tag_number); + if (srb->flag & AUTO_REQSENSE) { + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); + } else { + ptr = (u8 *)srb->cmd->cmnd; + for (i = 0; i < srb->cmd->cmd_len; i++) + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++); + } + no_cmd: + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, + DO_HWRESELECT | DO_DATALATCH); + if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { + /* + * If start_scsi return 1: + * we caught an interrupt (must be reset or reselection ... ) + * : Let's process it first! + */ + dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n", + srb->cmd, dcb->target_id, dcb->target_lun); + srb->state = SRB_READY; + free_tag(dcb, srb); + srb->msg_count = 0; + return_code = 1; + /* This IRQ should NOT get lost, as we did not acknowledge it */ + } else { + /* + * If start_scsi returns 0: + * we know that the SCSI processor is free + */ + srb->scsi_phase = PH_BUS_FREE; /* initial phase */ + dcb->active_srb = srb; + acb->active_dcb = dcb; + return_code = 0; + /* it's important for atn stop */ + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, + DO_DATALATCH | DO_HWRESELECT); + /* SCSI command */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand); + } + return return_code; +} + + +#define DC395x_ENABLE_MSGOUT \ + DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \ + srb->state |= SRB_MSGOUT + + +/* abort command */ +static inline void enable_msgout_abort(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb) +{ + srb->msgout_buf[0] = ABORT; + srb->msg_count = 1; + DC395x_ENABLE_MSGOUT; + srb->state &= ~SRB_MSGIN; + srb->state |= SRB_MSGOUT; +} + + +/** + * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to + * have been triggered for this card. + * + * @acb: a pointer to the adpter control block + * @scsi_status: the status return when we checked the card + **/ +static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb, + u16 scsi_status) +{ + struct DeviceCtlBlk *dcb; + struct ScsiReqBlk *srb; + u16 phase; + u8 scsi_intstatus; + unsigned long flags; + void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *, + u16 *); + + DC395x_LOCK_IO(acb->scsi_host, flags); + + /* This acknowledges the IRQ */ + scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); + if ((scsi_status & 0x2007) == 0x2002) + dprintkl(KERN_DEBUG, + "COP after COP completed? %04x\n", scsi_status); + if (debug_enabled(DBG_KG)) { + if (scsi_intstatus & INT_SELTIMEOUT) + dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n"); + } + /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */ + + if (timer_pending(&acb->selto_timer)) + del_timer(&acb->selto_timer); + + if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) { + disconnect(acb); /* bus free interrupt */ + goto out_unlock; + } + if (scsi_intstatus & INT_RESELECTED) { + reselect(acb); + goto out_unlock; + } + if (scsi_intstatus & INT_SELECT) { + dprintkl(KERN_INFO, "Host does not support target mode!\n"); + goto out_unlock; + } + if (scsi_intstatus & INT_SCSIRESET) { + scsi_reset_detect(acb); + goto out_unlock; + } + if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) { + dcb = acb->active_dcb; + if (!dcb) { + dprintkl(KERN_DEBUG, + "Oops: BusService (%04x %02x) w/o ActiveDCB!\n", + scsi_status, scsi_intstatus); + goto out_unlock; + } + srb = dcb->active_srb; + if (dcb->flag & ABORT_DEV_) { + dprintkdbg(DBG_0, "MsgOut Abort Device.....\n"); + enable_msgout_abort(acb, srb); + } + + /* software sequential machine */ + phase = (u16)srb->scsi_phase; + + /* + * 62037 or 62137 + * call dc395x_scsi_phase0[]... "phase entry" + * handle every phase before start transfer + */ + /* data_out_phase0, phase:0 */ + /* data_in_phase0, phase:1 */ + /* command_phase0, phase:2 */ + /* status_phase0, phase:3 */ + /* nop0, phase:4 PH_BUS_FREE .. initial phase */ + /* nop0, phase:5 PH_BUS_FREE .. initial phase */ + /* msgout_phase0, phase:6 */ + /* msgin_phase0, phase:7 */ + dc395x_statev = dc395x_scsi_phase0[phase]; + dc395x_statev(acb, srb, &scsi_status); + + /* + * if there were any exception occurred scsi_status + * will be modify to bus free phase new scsi_status + * transfer out from ... previous dc395x_statev + */ + srb->scsi_phase = scsi_status & PHASEMASK; + phase = (u16)scsi_status & PHASEMASK; + + /* + * call dc395x_scsi_phase1[]... "phase entry" handle + * every phase to do transfer + */ + /* data_out_phase1, phase:0 */ + /* data_in_phase1, phase:1 */ + /* command_phase1, phase:2 */ + /* status_phase1, phase:3 */ + /* nop1, phase:4 PH_BUS_FREE .. initial phase */ + /* nop1, phase:5 PH_BUS_FREE .. initial phase */ + /* msgout_phase1, phase:6 */ + /* msgin_phase1, phase:7 */ + dc395x_statev = dc395x_scsi_phase1[phase]; + dc395x_statev(acb, srb, &scsi_status); + } + out_unlock: + DC395x_UNLOCK_IO(acb->scsi_host, flags); +} + + +static irqreturn_t dc395x_interrupt(int irq, void *dev_id) +{ + struct AdapterCtlBlk *acb = dev_id; + u16 scsi_status; + u8 dma_status; + irqreturn_t handled = IRQ_NONE; + + /* + * Check for pending interrupt + */ + scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); + dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS); + if (scsi_status & SCSIINTERRUPT) { + /* interrupt pending - let's process it! */ + dc395x_handle_interrupt(acb, scsi_status); + handled = IRQ_HANDLED; + } + else if (dma_status & 0x20) { + /* Error from the DMA engine */ + dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status); +#if 0 + dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n"); + if (acb->active_dcb) { + acb->active_dcb-> flag |= ABORT_DEV_; + if (acb->active_dcb->active_srb) + enable_msgout_abort(acb, acb->active_dcb->active_srb); + } + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO); +#else + dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n"); + acb = NULL; +#endif + handled = IRQ_HANDLED; + } + + return handled; +} + + +static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd); + if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) + *pscsi_status = PH_BUS_FREE; /*.. initial phase */ + + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + srb->state &= ~SRB_MSGOUT; +} + + +static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + u16 i; + u8 *ptr; + dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd); + + clear_fifo(acb, "msgout_phase1"); + if (!(srb->state & SRB_MSGOUT)) { + srb->state |= SRB_MSGOUT; + dprintkl(KERN_DEBUG, + "msgout_phase1: (0x%p) Phase unexpected\n", + srb->cmd); /* So what ? */ + } + if (!srb->msg_count) { + dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n", + srb->cmd); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); + /* it's important for atn stop */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); + return; + } + ptr = (u8 *)srb->msgout_buf; + for (i = 0; i < srb->msg_count; i++) + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++); + srb->msg_count = 0; + if (srb->msgout_buf[0] == ABORT_TASK_SET) + srb->state = SRB_ABORT_SENT; + + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); +} + + +static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); +} + + +static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + struct DeviceCtlBlk *dcb; + u8 *ptr; + u16 i; + dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd); + + clear_fifo(acb, "command_phase1"); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN); + if (!(srb->flag & AUTO_REQSENSE)) { + ptr = (u8 *)srb->cmd->cmnd; + for (i = 0; i < srb->cmd->cmd_len; i++) { + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr); + ptr++; + } + } else { + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); + dcb = acb->active_dcb; + /* target id */ + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); + } + srb->state |= SRB_COMMAND; + /* it's important for atn stop */ + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); + /* SCSI command */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); +} + + +/* + * Verify that the remaining space in the hw sg lists is the same as + * the count of remaining bytes in srb->total_xfer_length + */ +static void sg_verify_length(struct ScsiReqBlk *srb) +{ + if (debug_enabled(DBG_SG)) { + unsigned len = 0; + unsigned idx = srb->sg_index; + struct SGentry *psge = srb->segment_x + idx; + for (; idx < srb->sg_count; psge++, idx++) + len += psge->length; + if (len != srb->total_xfer_length) + dprintkdbg(DBG_SG, + "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n", + srb->total_xfer_length, len); + } +} + + +/* + * Compute the next Scatter Gather list index and adjust its length + * and address if necessary + */ +static void sg_update_list(struct ScsiReqBlk *srb, u32 left) +{ + u8 idx; + u32 xferred = srb->total_xfer_length - left; /* bytes transferred */ + struct SGentry *psge = srb->segment_x + srb->sg_index; + + dprintkdbg(DBG_0, + "sg_update_list: Transferred %i of %i bytes, %i remain\n", + xferred, srb->total_xfer_length, left); + if (xferred == 0) { + /* nothing to update since we did not transfer any data */ + return; + } + + sg_verify_length(srb); + srb->total_xfer_length = left; /* update remaining count */ + for (idx = srb->sg_index; idx < srb->sg_count; idx++) { + if (xferred >= psge->length) { + /* Complete SG entries done */ + xferred -= psge->length; + } else { + /* Partial SG entry done */ + dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev, + srb->sg_bus_addr, SEGMENTX_LEN, + DMA_TO_DEVICE); + psge->length -= xferred; + psge->address += xferred; + srb->sg_index = idx; + dma_sync_single_for_device(&srb->dcb->acb->dev->dev, + srb->sg_bus_addr, SEGMENTX_LEN, + DMA_TO_DEVICE); + break; + } + psge++; + } + sg_verify_length(srb); +} + + +/* + * We have transferred a single byte (PIO mode?) and need to update + * the count of bytes remaining (total_xfer_length) and update the sg + * entry to either point to next byte in the current sg entry, or of + * already at the end to point to the start of the next sg entry + */ +static void sg_subtract_one(struct ScsiReqBlk *srb) +{ + sg_update_list(srb, srb->total_xfer_length - 1); +} + + +/* + * cleanup_after_transfer + * + * Makes sure, DMA and SCSI engine are empty, after the transfer has finished + * KG: Currently called from StatusPhase1 () + * Should probably also be called from other places + * Best might be to call it in DataXXPhase0, if new phase will differ + */ +static void cleanup_after_transfer(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb) +{ + /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */ + if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */ + if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40)) + clear_fifo(acb, "cleanup/in"); + if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); + } else { /* write */ + if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); + if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40)) + clear_fifo(acb, "cleanup/out"); + } + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); +} + + +/* + * Those no of bytes will be transferred w/ PIO through the SCSI FIFO + * Seems to be needed for unknown reasons; could be a hardware bug :-( + */ +#define DC395x_LASTPIO 4 + + +static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + struct DeviceCtlBlk *dcb = srb->dcb; + u16 scsi_status = *pscsi_status; + u32 d_left_counter = 0; + dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); + + /* + * KG: We need to drain the buffers before we draw any conclusions! + * This means telling the DMA to push the rest into SCSI, telling + * SCSI to push the rest to the bus. + * However, the device might have been the one to stop us (phase + * change), and the data in transit just needs to be accounted so + * it can be retransmitted.) + */ + /* + * KG: Stop DMA engine pushing more data into the SCSI FIFO + * If we need more data, the DMA SG list will be freshly set up, anyway + */ + dprintkdbg(DBG_PIO, "data_out_phase0: " + "DMA{fifocnt=0x%02x fifostat=0x%02x} " + "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n", + DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), + DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), + DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), + DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status, + srb->total_xfer_length); + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO); + + if (!(srb->state & SRB_XFERPAD)) { + if (scsi_status & PARITYERROR) + srb->status |= PARITY_ERROR; + + /* + * KG: Right, we can't just rely on the SCSI_COUNTER, because this + * is the no of bytes it got from the DMA engine not the no it + * transferred successfully to the device. (And the difference could + * be as much as the FIFO size, I guess ...) + */ + if (!(scsi_status & SCSIXFERDONE)) { + /* + * when data transfer from DMA FIFO to SCSI FIFO + * if there was some data left in SCSI FIFO + */ + d_left_counter = + (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & + 0x1F); + if (dcb->sync_period & WIDE_SYNC) + d_left_counter <<= 1; + + dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n" + "SCSI{fifocnt=0x%02x cnt=0x%08x} " + "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n", + DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), + (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", + DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), + DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), + DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), + DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), + DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); + } + /* + * calculate all the residue data that not yet tranfered + * SCSI transfer counter + left in SCSI FIFO data + * + * .....TRM_S1040_SCSI_COUNTER (24bits) + * The counter always decrement by one for every SCSI byte transfer. + * .....TRM_S1040_SCSI_FIFOCNT ( 5bits) + * The counter is SCSI FIFO offset counter (in units of bytes or! words) + */ + if (srb->total_xfer_length > DC395x_LASTPIO) + d_left_counter += + DC395x_read32(acb, TRM_S1040_SCSI_COUNTER); + + /* Is this a good idea? */ + /*clear_fifo(acb, "DOP1"); */ + /* KG: What is this supposed to be useful for? WIDE padding stuff? */ + if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC + && scsi_bufflen(srb->cmd) % 2) { + d_left_counter = 0; + dprintkl(KERN_INFO, + "data_out_phase0: Discard 1 byte (0x%02x)\n", + scsi_status); + } + /* + * KG: Oops again. Same thinko as above: The SCSI might have been + * faster than the DMA engine, so that it ran out of data. + * In that case, we have to do just nothing! + * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or? + */ + /* + * KG: This is nonsense: We have been WRITING data to the bus + * If the SCSI engine has no bytes left, how should the DMA engine? + */ + if (d_left_counter == 0) { + srb->total_xfer_length = 0; + } else { + /* + * if transfer not yet complete + * there were some data residue in SCSI FIFO or + * SCSI transfer counter not empty + */ + long oldxferred = + srb->total_xfer_length - d_left_counter; + const int diff = + (dcb->sync_period & WIDE_SYNC) ? 2 : 1; + sg_update_list(srb, d_left_counter); + /* KG: Most ugly hack! Apparently, this works around a chip bug */ + if ((srb->segment_x[srb->sg_index].length == + diff && scsi_sg_count(srb->cmd)) + || ((oldxferred & ~PAGE_MASK) == + (PAGE_SIZE - diff)) + ) { + dprintkl(KERN_INFO, "data_out_phase0: " + "Work around chip bug (%i)?\n", diff); + d_left_counter = + srb->total_xfer_length - diff; + sg_update_list(srb, d_left_counter); + /*srb->total_xfer_length -= diff; */ + /*srb->virt_addr += diff; */ + /*if (srb->cmd->use_sg) */ + /* srb->sg_index++; */ + } + } + } + if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) { + cleanup_after_transfer(acb, srb); + } +} + + +static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); + clear_fifo(acb, "data_out_phase1"); + /* do prepare before transfer when data out phase */ + data_io_transfer(acb, srb, XFERDATAOUT); +} + +static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + u16 scsi_status = *pscsi_status; + + dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); + + /* + * KG: DataIn is much more tricky than DataOut. When the device is finished + * and switches to another phase, the SCSI engine should be finished too. + * But: There might still be bytes left in its FIFO to be fetched by the DMA + * engine and transferred to memory. + * We should wait for the FIFOs to be emptied by that (is there any way to + * enforce this?) and then stop the DMA engine, because it might think, that + * there are more bytes to follow. Yes, the device might disconnect prior to + * having all bytes transferred! + * Also we should make sure that all data from the DMA engine buffer's really + * made its way to the system memory! Some documentation on this would not + * seem to be a bad idea, actually. + */ + if (!(srb->state & SRB_XFERPAD)) { + u32 d_left_counter; + unsigned int sc, fc; + + if (scsi_status & PARITYERROR) { + dprintkl(KERN_INFO, "data_in_phase0: (0x%p) " + "Parity Error\n", srb->cmd); + srb->status |= PARITY_ERROR; + } + /* + * KG: We should wait for the DMA FIFO to be empty ... + * but: it would be better to wait first for the SCSI FIFO and then the + * the DMA FIFO to become empty? How do we know, that the device not already + * sent data to the FIFO in a MsgIn phase, eg.? + */ + if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) { +#if 0 + int ctr = 6000000; + dprintkl(KERN_DEBUG, + "DIP0: Wait for DMA FIFO to flush ...\n"); + /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */ + /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */ + /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */ + while (! + (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) & + 0x80) && --ctr); + if (ctr < 6000000 - 1) + dprintkl(KERN_DEBUG + "DIP0: Had to wait for DMA ...\n"); + if (!ctr) + dprintkl(KERN_ERR, + "Deadlock in DIP0 waiting for DMA FIFO empty!!\n"); + /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */ +#endif + dprintkdbg(DBG_KG, "data_in_phase0: " + "DMA{fifocnt=0x%02x fifostat=0x%02x}\n", + DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), + DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT)); + } + /* Now: Check remainig data: The SCSI counters should tell us ... */ + sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER); + fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); + d_left_counter = sc + ((fc & 0x1f) + << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 : + 0)); + dprintkdbg(DBG_KG, "data_in_phase0: " + "SCSI{fifocnt=0x%02x%s ctr=0x%08x} " + "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} " + "Remain{totxfer=%i scsi_fifo+ctr=%i}\n", + fc, + (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", + sc, + fc, + DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), + DC395x_read32(acb, TRM_S1040_DMA_CXCNT), + srb->total_xfer_length, d_left_counter); +#if DC395x_LASTPIO + /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */ + if (d_left_counter + && srb->total_xfer_length <= DC395x_LASTPIO) { + size_t left_io = srb->total_xfer_length; + + /*u32 addr = (srb->segment_x[srb->sg_index].address); */ + /*sg_update_list (srb, d_left_counter); */ + dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) " + "for remaining %i bytes:", + fc & 0x1f, + (srb->dcb->sync_period & WIDE_SYNC) ? + "words" : "bytes", + srb->total_xfer_length); + if (srb->dcb->sync_period & WIDE_SYNC) + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, + CFG2_WIDEFIFO); + while (left_io) { + unsigned char *virt, *base = NULL; + unsigned long flags = 0; + size_t len = left_io; + size_t offset = srb->request_length - left_io; + + local_irq_save(flags); + /* Assumption: it's inside one page as it's at most 4 bytes and + I just assume it's on a 4-byte boundary */ + base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd), + srb->sg_count, &offset, &len); + virt = base + offset; + + left_io -= len; + + while (len) { + u8 byte; + byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + *virt++ = byte; + + if (debug_enabled(DBG_PIO)) + printk(" %02x", byte); + + d_left_counter--; + sg_subtract_one(srb); + + len--; + + fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); + + if (fc == 0x40) { + left_io = 0; + break; + } + } + + WARN_ON((fc != 0x40) == !d_left_counter); + + if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) { + /* Read the last byte ... */ + if (srb->total_xfer_length > 0) { + u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + + *virt++ = byte; + srb->total_xfer_length--; + if (debug_enabled(DBG_PIO)) + printk(" %02x", byte); + } + + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); + } + + scsi_kunmap_atomic_sg(base); + local_irq_restore(flags); + } + /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */ + /*srb->total_xfer_length = 0; */ + if (debug_enabled(DBG_PIO)) + printk("\n"); + } +#endif /* DC395x_LASTPIO */ + +#if 0 + /* + * KG: This was in DATAOUT. Does it also belong here? + * Nobody seems to know what counter and fifo_cnt count exactly ... + */ + if (!(scsi_status & SCSIXFERDONE)) { + /* + * when data transfer from DMA FIFO to SCSI FIFO + * if there was some data left in SCSI FIFO + */ + d_left_counter = + (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & + 0x1F); + if (srb->dcb->sync_period & WIDE_SYNC) + d_left_counter <<= 1; + /* + * if WIDE scsi SCSI FIFOCNT unit is word !!! + * so need to *= 2 + * KG: Seems to be correct ... + */ + } +#endif + /* KG: This should not be needed any more! */ + if (d_left_counter == 0 + || (scsi_status & SCSIXFERCNT_2_ZERO)) { +#if 0 + int ctr = 6000000; + u8 TempDMAstatus; + do { + TempDMAstatus = + DC395x_read8(acb, TRM_S1040_DMA_STATUS); + } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr); + if (!ctr) + dprintkl(KERN_ERR, + "Deadlock in DataInPhase0 waiting for DMA!!\n"); + srb->total_xfer_length = 0; +#endif + srb->total_xfer_length = d_left_counter; + } else { /* phase changed */ + /* + * parsing the case: + * when a transfer not yet complete + * but be disconnected by target + * if transfer not yet complete + * there were some data residue in SCSI FIFO or + * SCSI transfer counter not empty + */ + sg_update_list(srb, d_left_counter); + } + } + /* KG: The target may decide to disconnect: Empty FIFO before! */ + if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) { + cleanup_after_transfer(acb, srb); + } +} + + +static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); + data_io_transfer(acb, srb, XFERDATAIN); +} + + +static void data_io_transfer(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb, u16 io_dir) +{ + struct DeviceCtlBlk *dcb = srb->dcb; + u8 bval; + dprintkdbg(DBG_0, + "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, + ((io_dir & DMACMD_DIR) ? 'r' : 'w'), + srb->total_xfer_length, srb->sg_index, srb->sg_count); + if (srb == acb->tmp_srb) + dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n"); + if (srb->sg_index >= srb->sg_count) { + /* can't happen? out of bounds error */ + return; + } + + if (srb->total_xfer_length > DC395x_LASTPIO) { + u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS); + /* + * KG: What should we do: Use SCSI Cmd 0x90/0x92? + * Maybe, even ABORTXFER would be appropriate + */ + if (dma_status & XFERPENDING) { + dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! " + "Expect trouble!\n"); + dump_register_info(acb, dcb, srb); + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); + } + /* clear_fifo(acb, "IO"); */ + /* + * load what physical address of Scatter/Gather list table + * want to be transfer + */ + srb->state |= SRB_DATA_XFER; + DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0); + if (scsi_sg_count(srb->cmd)) { /* with S/G */ + io_dir |= DMACMD_SG; + DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR, + srb->sg_bus_addr + + sizeof(struct SGentry) * + srb->sg_index); + /* load how many bytes in the sg list table */ + DC395x_write32(acb, TRM_S1040_DMA_XCNT, + ((u32)(srb->sg_count - + srb->sg_index) << 3)); + } else { /* without S/G */ + io_dir &= ~DMACMD_SG; + DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR, + srb->segment_x[0].address); + DC395x_write32(acb, TRM_S1040_DMA_XCNT, + srb->segment_x[0].length); + } + /* load total transfer length (24bits) max value 16Mbyte */ + DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, + srb->total_xfer_length); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + if (io_dir & DMACMD_DIR) { /* read */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, + SCMD_DMA_IN); + DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir); + } else { + DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir); + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, + SCMD_DMA_OUT); + } + + } +#if DC395x_LASTPIO + else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */ + /* + * load what physical address of Scatter/Gather list table + * want to be transfer + */ + srb->state |= SRB_DATA_XFER; + /* load total transfer length (24bits) max value 16Mbyte */ + DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, + srb->total_xfer_length); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + if (io_dir & DMACMD_DIR) { /* read */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, + SCMD_FIFO_IN); + } else { /* write */ + int ln = srb->total_xfer_length; + size_t left_io = srb->total_xfer_length; + + if (srb->dcb->sync_period & WIDE_SYNC) + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, + CFG2_WIDEFIFO); + + while (left_io) { + unsigned char *virt, *base = NULL; + unsigned long flags = 0; + size_t len = left_io; + size_t offset = srb->request_length - left_io; + + local_irq_save(flags); + /* Again, max 4 bytes */ + base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd), + srb->sg_count, &offset, &len); + virt = base + offset; + + left_io -= len; + + while (len--) { + if (debug_enabled(DBG_PIO)) + printk(" %02x", *virt); + + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++); + + sg_subtract_one(srb); + } + + scsi_kunmap_atomic_sg(base); + local_irq_restore(flags); + } + if (srb->dcb->sync_period & WIDE_SYNC) { + if (ln % 2) { + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); + if (debug_enabled(DBG_PIO)) + printk(" |00"); + } + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); + } + /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */ + if (debug_enabled(DBG_PIO)) + printk("\n"); + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, + SCMD_FIFO_OUT); + } + } +#endif /* DC395x_LASTPIO */ + else { /* xfer pad */ + if (srb->sg_count) { + srb->adapter_status = H_OVER_UNDER_RUN; + srb->status |= OVER_RUN; + } + /* + * KG: despite the fact that we are using 16 bits I/O ops + * the SCSI FIFO is only 8 bits according to the docs + * (we can set bit 1 in 0x8f to serialize FIFO access ...) + */ + if (dcb->sync_period & WIDE_SYNC) { + DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2); + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, + CFG2_WIDEFIFO); + if (io_dir & DMACMD_DIR) { + DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + } else { + /* Danger, Robinson: If you find KGs + * scattered over the wide disk, the driver + * or chip is to blame :-( */ + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K'); + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G'); + } + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); + } else { + DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); + /* Danger, Robinson: If you find a collection of Ks on your disk + * something broke :-( */ + if (io_dir & DMACMD_DIR) + DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + else + DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K'); + } + srb->state |= SRB_XFERPAD; + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + /* SCSI command */ + bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT; + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval); + } +} + + +static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); + srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ + srb->state = SRB_COMPLETED; + *pscsi_status = PH_BUS_FREE; /*.. initial phase */ + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT); +} + + +static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); + srb->state = SRB_STATUS; + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); +} + + +/* Check if the message is complete */ +static inline u8 msgin_completed(u8 * msgbuf, u32 len) +{ + if (*msgbuf == EXTENDED_MESSAGE) { + if (len < 2) + return 0; + if (len < msgbuf[1] + 2) + return 0; + } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */ + if (len < 2) + return 0; + return 1; +} + +/* reject_msg */ +static inline void msgin_reject(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb) +{ + srb->msgout_buf[0] = MESSAGE_REJECT; + srb->msg_count = 1; + DC395x_ENABLE_MSGOUT; + srb->state &= ~SRB_MSGIN; + srb->state |= SRB_MSGOUT; + dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n", + srb->msgin_buf[0], + srb->dcb->target_id, srb->dcb->target_lun); +} + + +static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, + struct DeviceCtlBlk *dcb, u8 tag) +{ + struct ScsiReqBlk *srb = NULL; + struct ScsiReqBlk *i; + dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n", + srb->cmd, tag, srb); + + if (!(dcb->tag_mask & (1 << tag))) + dprintkl(KERN_DEBUG, + "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n", + dcb->tag_mask, tag); + + if (list_empty(&dcb->srb_going_list)) + goto mingx0; + list_for_each_entry(i, &dcb->srb_going_list, list) { + if (i->tag_number == tag) { + srb = i; + break; + } + } + if (!srb) + goto mingx0; + + dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n", + srb->cmd, srb->dcb->target_id, srb->dcb->target_lun); + if (dcb->flag & ABORT_DEV_) { + /*srb->state = SRB_ABORT_SENT; */ + enable_msgout_abort(acb, srb); + } + + if (!(srb->state & SRB_DISCONNECT)) + goto mingx0; + + memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len); + srb->state |= dcb->active_srb->state; + srb->state |= SRB_DATA_XFER; + dcb->active_srb = srb; + /* How can we make the DORS happy? */ + return srb; + + mingx0: + srb = acb->tmp_srb; + srb->state = SRB_UNEXPECT_RESEL; + dcb->active_srb = srb; + srb->msgout_buf[0] = ABORT_TASK; + srb->msg_count = 1; + DC395x_ENABLE_MSGOUT; + dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag); + return srb; +} + + +static inline void reprogram_regs(struct AdapterCtlBlk *acb, + struct DeviceCtlBlk *dcb) +{ + DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); + DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); + DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); + set_xfer_rate(acb, dcb); +} + + +/* set async transfer mode */ +static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) +{ + struct DeviceCtlBlk *dcb = srb->dcb; + dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n", + dcb->target_id, dcb->target_lun); + + dcb->sync_mode &= ~(SYNC_NEGO_ENABLE); + dcb->sync_mode |= SYNC_NEGO_DONE; + /*dcb->sync_period &= 0; */ + dcb->sync_offset = 0; + dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */ + srb->state &= ~SRB_DO_SYNC_NEGO; + reprogram_regs(acb, dcb); + if ((dcb->sync_mode & WIDE_NEGO_ENABLE) + && !(dcb->sync_mode & WIDE_NEGO_DONE)) { + build_wdtr(acb, dcb, srb); + DC395x_ENABLE_MSGOUT; + dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n"); + } +} + + +/* set sync transfer mode */ +static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) +{ + struct DeviceCtlBlk *dcb = srb->dcb; + u8 bval; + int fact; + dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins " + "(%02i.%01i MHz) Offset %i\n", + dcb->target_id, srb->msgin_buf[3] << 2, + (250 / srb->msgin_buf[3]), + ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3], + srb->msgin_buf[4]); + + if (srb->msgin_buf[4] > 15) + srb->msgin_buf[4] = 15; + if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) + dcb->sync_offset = 0; + else if (dcb->sync_offset == 0) + dcb->sync_offset = srb->msgin_buf[4]; + if (srb->msgin_buf[4] > dcb->sync_offset) + srb->msgin_buf[4] = dcb->sync_offset; + else + dcb->sync_offset = srb->msgin_buf[4]; + bval = 0; + while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval] + || dcb->min_nego_period > + clock_period[bval])) + bval++; + if (srb->msgin_buf[3] < clock_period[bval]) + dprintkl(KERN_INFO, + "msgin_set_sync: Increase sync nego period to %ins\n", + clock_period[bval] << 2); + srb->msgin_buf[3] = clock_period[bval]; + dcb->sync_period &= 0xf0; + dcb->sync_period |= ALT_SYNC | bval; + dcb->min_nego_period = srb->msgin_buf[3]; + + if (dcb->sync_period & WIDE_SYNC) + fact = 500; + else + fact = 250; + + dprintkl(KERN_INFO, + "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n", + dcb->target_id, (fact == 500) ? "Wide16" : "", + dcb->min_nego_period << 2, dcb->sync_offset, + (fact / dcb->min_nego_period), + ((fact % dcb->min_nego_period) * 10 + + dcb->min_nego_period / 2) / dcb->min_nego_period); + + if (!(srb->state & SRB_DO_SYNC_NEGO)) { + /* Reply with corrected SDTR Message */ + dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n", + srb->msgin_buf[3] << 2, srb->msgin_buf[4]); + + memcpy(srb->msgout_buf, srb->msgin_buf, 5); + srb->msg_count = 5; + DC395x_ENABLE_MSGOUT; + dcb->sync_mode |= SYNC_NEGO_DONE; + } else { + if ((dcb->sync_mode & WIDE_NEGO_ENABLE) + && !(dcb->sync_mode & WIDE_NEGO_DONE)) { + build_wdtr(acb, dcb, srb); + DC395x_ENABLE_MSGOUT; + dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n"); + } + } + srb->state &= ~SRB_DO_SYNC_NEGO; + dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE; + + reprogram_regs(acb, dcb); +} + + +static inline void msgin_set_nowide(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb) +{ + struct DeviceCtlBlk *dcb = srb->dcb; + dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id); + + dcb->sync_period &= ~WIDE_SYNC; + dcb->sync_mode &= ~(WIDE_NEGO_ENABLE); + dcb->sync_mode |= WIDE_NEGO_DONE; + srb->state &= ~SRB_DO_WIDE_NEGO; + reprogram_regs(acb, dcb); + if ((dcb->sync_mode & SYNC_NEGO_ENABLE) + && !(dcb->sync_mode & SYNC_NEGO_DONE)) { + build_sdtr(acb, dcb, srb); + DC395x_ENABLE_MSGOUT; + dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n"); + } +} + +static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) +{ + struct DeviceCtlBlk *dcb = srb->dcb; + u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO + && acb->config & HCC_WIDE_CARD) ? 1 : 0; + dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id); + + if (srb->msgin_buf[3] > wide) + srb->msgin_buf[3] = wide; + /* Completed */ + if (!(srb->state & SRB_DO_WIDE_NEGO)) { + dprintkl(KERN_DEBUG, + "msgin_set_wide: Wide nego initiated <%02i>\n", + dcb->target_id); + memcpy(srb->msgout_buf, srb->msgin_buf, 4); + srb->msg_count = 4; + srb->state |= SRB_DO_WIDE_NEGO; + DC395x_ENABLE_MSGOUT; + } + + dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE); + if (srb->msgin_buf[3] > 0) + dcb->sync_period |= WIDE_SYNC; + else + dcb->sync_period &= ~WIDE_SYNC; + srb->state &= ~SRB_DO_WIDE_NEGO; + /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */ + dprintkdbg(DBG_1, + "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n", + (8 << srb->msgin_buf[3]), dcb->target_id); + reprogram_regs(acb, dcb); + if ((dcb->sync_mode & SYNC_NEGO_ENABLE) + && !(dcb->sync_mode & SYNC_NEGO_DONE)) { + build_sdtr(acb, dcb, srb); + DC395x_ENABLE_MSGOUT; + dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n"); + } +} + + +/* + * extended message codes: + * + * code description + * + * 02h Reserved + * 00h MODIFY DATA POINTER + * 01h SYNCHRONOUS DATA TRANSFER REQUEST + * 03h WIDE DATA TRANSFER REQUEST + * 04h - 7Fh Reserved + * 80h - FFh Vendor specific + */ +static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + struct DeviceCtlBlk *dcb = acb->active_dcb; + dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd); + + srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + if (msgin_completed(srb->msgin_buf, acb->msg_len)) { + /* Now eval the msg */ + switch (srb->msgin_buf[0]) { + case DISCONNECT: + srb->state = SRB_DISCONNECT; + break; + + case SIMPLE_QUEUE_TAG: + case HEAD_OF_QUEUE_TAG: + case ORDERED_QUEUE_TAG: + srb = + msgin_qtag(acb, dcb, + srb->msgin_buf[1]); + break; + + case MESSAGE_REJECT: + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, + DO_CLRATN | DO_DATALATCH); + /* A sync nego message was rejected ! */ + if (srb->state & SRB_DO_SYNC_NEGO) { + msgin_set_async(acb, srb); + break; + } + /* A wide nego message was rejected ! */ + if (srb->state & SRB_DO_WIDE_NEGO) { + msgin_set_nowide(acb, srb); + break; + } + enable_msgout_abort(acb, srb); + /*srb->state |= SRB_ABORT_SENT */ + break; + + case EXTENDED_MESSAGE: + /* SDTR */ + if (srb->msgin_buf[1] == 3 + && srb->msgin_buf[2] == EXTENDED_SDTR) { + msgin_set_sync(acb, srb); + break; + } + /* WDTR */ + if (srb->msgin_buf[1] == 2 + && srb->msgin_buf[2] == EXTENDED_WDTR + && srb->msgin_buf[3] <= 2) { /* sanity check ... */ + msgin_set_wide(acb, srb); + break; + } + msgin_reject(acb, srb); + break; + + case IGNORE_WIDE_RESIDUE: + /* Discard wide residual */ + dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n"); + break; + + case COMMAND_COMPLETE: + /* nothing has to be done */ + break; + + case SAVE_POINTERS: + /* + * SAVE POINTER may be ignored as we have the struct + * ScsiReqBlk* associated with the scsi command. + */ + dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " + "SAVE POINTER rem=%i Ignore\n", + srb->cmd, srb->total_xfer_length); + break; + + case RESTORE_POINTERS: + dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n"); + break; + + case ABORT: + dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " + "<%02i-%i> ABORT msg\n", + srb->cmd, dcb->target_id, + dcb->target_lun); + dcb->flag |= ABORT_DEV_; + enable_msgout_abort(acb, srb); + break; + + default: + /* reject unknown messages */ + if (srb->msgin_buf[0] & IDENTIFY_BASE) { + dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n"); + srb->msg_count = 1; + srb->msgout_buf[0] = dcb->identify_msg; + DC395x_ENABLE_MSGOUT; + srb->state |= SRB_MSGOUT; + /*break; */ + } + msgin_reject(acb, srb); + } + + /* Clear counter and MsgIn state */ + srb->state &= ~SRB_MSGIN; + acb->msg_len = 0; + } + *pscsi_status = PH_BUS_FREE; + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT); +} + + +static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ + dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd); + clear_fifo(acb, "msgin_phase1"); + DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); + if (!(srb->state & SRB_MSGIN)) { + srb->state &= ~SRB_DISCONNECT; + srb->state |= SRB_MSGIN; + } + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + /* SCSI command */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN); +} + + +static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ +} + + +static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, + u16 *pscsi_status) +{ +} + + +static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) +{ + struct DeviceCtlBlk *i; + + /* set all lun device's period, offset */ + if (dcb->identify_msg & 0x07) + return; + + if (acb->scan_devices) { + current_sync_offset = dcb->sync_offset; + return; + } + + list_for_each_entry(i, &acb->dcb_list, list) + if (i->target_id == dcb->target_id) { + i->sync_period = dcb->sync_period; + i->sync_offset = dcb->sync_offset; + i->sync_mode = dcb->sync_mode; + i->min_nego_period = dcb->min_nego_period; + } +} + + +static void disconnect(struct AdapterCtlBlk *acb) +{ + struct DeviceCtlBlk *dcb = acb->active_dcb; + struct ScsiReqBlk *srb; + + if (!dcb) { + dprintkl(KERN_ERR, "disconnect: No such device\n"); + udelay(500); + /* Suspend queue for a while */ + acb->last_reset = + jiffies + HZ / 2 + + HZ * acb->eeprom.delay_time; + clear_fifo(acb, "disconnectEx"); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); + return; + } + srb = dcb->active_srb; + acb->active_dcb = NULL; + dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd); + + srb->scsi_phase = PH_BUS_FREE; /* initial phase */ + clear_fifo(acb, "disconnect"); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); + if (srb->state & SRB_UNEXPECT_RESEL) { + dprintkl(KERN_ERR, + "disconnect: Unexpected reselection <%02i-%i>\n", + dcb->target_id, dcb->target_lun); + srb->state = 0; + waiting_process_next(acb); + } else if (srb->state & SRB_ABORT_SENT) { + dcb->flag &= ~ABORT_DEV_; + acb->last_reset = jiffies + HZ / 2 + 1; + dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); + doing_srb_done(acb, DID_ABORT, srb->cmd, 1); + waiting_process_next(acb); + } else { + if ((srb->state & (SRB_START_ + SRB_MSGOUT)) + || !(srb-> + state & (SRB_DISCONNECT | SRB_COMPLETED))) { + /* + * Selection time out + * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED) + */ + /* Unexp. Disc / Sel Timeout */ + if (srb->state != SRB_START_ + && srb->state != SRB_MSGOUT) { + srb->state = SRB_READY; + dprintkl(KERN_DEBUG, + "disconnect: (0x%p) Unexpected\n", + srb->cmd); + srb->target_status = SCSI_STAT_SEL_TIMEOUT; + goto disc1; + } else { + /* Normal selection timeout */ + dprintkdbg(DBG_KG, "disconnect: (0x%p) " + "<%02i-%i> SelTO\n", srb->cmd, + dcb->target_id, dcb->target_lun); + if (srb->retry_count++ > DC395x_MAX_RETRIES + || acb->scan_devices) { + srb->target_status = + SCSI_STAT_SEL_TIMEOUT; + goto disc1; + } + free_tag(dcb, srb); + list_move(&srb->list, &dcb->srb_waiting_list); + dprintkdbg(DBG_KG, + "disconnect: (0x%p) Retry\n", + srb->cmd); + waiting_set_timer(acb, HZ / 20); + } + } else if (srb->state & SRB_DISCONNECT) { + u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); + /* + * SRB_DISCONNECT (This is what we expect!) + */ + if (bval & 0x40) { + dprintkdbg(DBG_0, "disconnect: SCSI bus stat " + " 0x%02x: ACK set! Other controllers?\n", + bval); + /* It could come from another initiator, therefore don't do much ! */ + } else + waiting_process_next(acb); + } else if (srb->state & SRB_COMPLETED) { + disc1: + /* + ** SRB_COMPLETED + */ + free_tag(dcb, srb); + dcb->active_srb = NULL; + srb->state = SRB_FREE; + srb_done(acb, dcb, srb); + } + } +} + + +static void reselect(struct AdapterCtlBlk *acb) +{ + struct DeviceCtlBlk *dcb = acb->active_dcb; + struct ScsiReqBlk *srb = NULL; + u16 rsel_tar_lun_id; + u8 id, lun; + dprintkdbg(DBG_0, "reselect: acb=%p\n", acb); + + clear_fifo(acb, "reselect"); + /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */ + /* Read Reselected Target ID and LUN */ + rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID); + if (dcb) { /* Arbitration lost but Reselection win */ + srb = dcb->active_srb; + if (!srb) { + dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, " + "but active_srb == NULL\n"); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + return; + } + /* Why the if ? */ + if (!acb->scan_devices) { + dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> " + "Arb lost but Resel win rsel=%i stat=0x%04x\n", + srb->cmd, dcb->target_id, + dcb->target_lun, rsel_tar_lun_id, + DC395x_read16(acb, TRM_S1040_SCSI_STATUS)); + /*srb->state |= SRB_DISCONNECT; */ + + srb->state = SRB_READY; + free_tag(dcb, srb); + list_move(&srb->list, &dcb->srb_waiting_list); + waiting_set_timer(acb, HZ / 20); + + /* return; */ + } + } + /* Read Reselected Target Id and LUN */ + if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8))) + dprintkl(KERN_DEBUG, "reselect: Expects identify msg. " + "Got %i!\n", rsel_tar_lun_id); + id = rsel_tar_lun_id & 0xff; + lun = (rsel_tar_lun_id >> 8) & 7; + dcb = find_dcb(acb, id, lun); + if (!dcb) { + dprintkl(KERN_ERR, "reselect: From non existent device " + "<%02i-%i>\n", id, lun); + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + return; + } + acb->active_dcb = dcb; + + if (!(dcb->dev_mode & NTC_DO_DISCONNECT)) + dprintkl(KERN_DEBUG, "reselect: in spite of forbidden " + "disconnection? <%02i-%i>\n", + dcb->target_id, dcb->target_lun); + + if (dcb->sync_mode & EN_TAG_QUEUEING) { + srb = acb->tmp_srb; + dcb->active_srb = srb; + } else { + /* There can be only one! */ + srb = dcb->active_srb; + if (!srb || !(srb->state & SRB_DISCONNECT)) { + /* + * abort command + */ + dprintkl(KERN_DEBUG, + "reselect: w/o disconnected cmds <%02i-%i>\n", + dcb->target_id, dcb->target_lun); + srb = acb->tmp_srb; + srb->state = SRB_UNEXPECT_RESEL; + dcb->active_srb = srb; + enable_msgout_abort(acb, srb); + } else { + if (dcb->flag & ABORT_DEV_) { + /*srb->state = SRB_ABORT_SENT; */ + enable_msgout_abort(acb, srb); + } else + srb->state = SRB_DATA_XFER; + + } + } + srb->scsi_phase = PH_BUS_FREE; /* initial phase */ + + /* Program HA ID, target ID, period and offset */ + dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id); + DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */ + DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */ + DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */ + DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */ + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ + /* SCSI command */ + DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT); +} + + +static inline u8 tagq_blacklist(char *name) +{ +#ifndef DC395x_NO_TAGQ +#if 0 + u8 i; + for (i = 0; i < BADDEVCNT; i++) + if (memcmp(name, DC395x_baddevname1[i], 28) == 0) + return 1; +#endif + return 0; +#else + return 1; +#endif +} + + +static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr) +{ + /* Check for SCSI format (ANSI and Response data format) */ + if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) { + if ((ptr->Flags & SCSI_INQ_CMDQUEUE) + && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) && + /*(dcb->dev_mode & NTC_DO_DISCONNECT) */ + /* ((dcb->dev_type == TYPE_DISK) + || (dcb->dev_type == TYPE_MOD)) && */ + !tagq_blacklist(((char *)ptr) + 8)) { + if (dcb->max_command == 1) + dcb->max_command = + dcb->acb->tag_max_num; + dcb->sync_mode |= EN_TAG_QUEUEING; + /*dcb->tag_mask = 0; */ + } else + dcb->max_command = 1; + } +} + + +static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiInqData *ptr) +{ + u8 bval1 = ptr->DevType & SCSI_DEVTYPE; + dcb->dev_type = bval1; + /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */ + disc_tagq_set(dcb, ptr); +} + + +/* unmap mapped pci regions from SRB */ +static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) +{ + struct scsi_cmnd *cmd = srb->cmd; + enum dma_data_direction dir = cmd->sc_data_direction; + + if (scsi_sg_count(cmd) && dir != DMA_NONE) { + /* unmap DC395x SG list */ + dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", + srb->sg_bus_addr, SEGMENTX_LEN); + dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN, + DMA_TO_DEVICE); + dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", + scsi_sg_count(cmd), scsi_bufflen(cmd)); + /* unmap the sg segments */ + scsi_dma_unmap(cmd); + } +} + + +/* unmap mapped pci sense buffer from SRB */ +static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb, + struct ScsiReqBlk *srb) +{ + if (!(srb->flag & AUTO_REQSENSE)) + return; + /* Unmap sense buffer */ + dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n", + srb->segment_x[0].address); + dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address, + srb->segment_x[0].length, DMA_FROM_DEVICE); + /* Restore SG stuff */ + srb->total_xfer_length = srb->xferred; + srb->segment_x[0].address = + srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address; + srb->segment_x[0].length = + srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length; +} + + +/* + * Complete execution of a SCSI command + * Signal completion to the generic SCSI driver + */ +static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb) +{ + u8 tempcnt, status; + struct scsi_cmnd *cmd = srb->cmd; + enum dma_data_direction dir = cmd->sc_data_direction; + int ckc_only = 1; + + dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd, + srb->cmd->device->id, (u8)srb->cmd->device->lun); + dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", + srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, + scsi_sgtalbe(cmd)); + status = srb->target_status; + set_host_byte(cmd, DID_OK); + set_status_byte(cmd, SAM_STAT_GOOD); + if (srb->flag & AUTO_REQSENSE) { + dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n"); + pci_unmap_srb_sense(acb, srb); + /* + ** target status.......................... + */ + srb->flag &= ~AUTO_REQSENSE; + srb->adapter_status = 0; + srb->target_status = SAM_STAT_CHECK_CONDITION; + if (debug_enabled(DBG_1)) { + switch (cmd->sense_buffer[2] & 0x0f) { + case NOT_READY: + dprintkl(KERN_DEBUG, + "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", + cmd->cmnd[0], dcb->target_id, + dcb->target_lun, status, acb->scan_devices); + break; + case UNIT_ATTENTION: + dprintkl(KERN_DEBUG, + "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", + cmd->cmnd[0], dcb->target_id, + dcb->target_lun, status, acb->scan_devices); + break; + case ILLEGAL_REQUEST: + dprintkl(KERN_DEBUG, + "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", + cmd->cmnd[0], dcb->target_id, + dcb->target_lun, status, acb->scan_devices); + break; + case MEDIUM_ERROR: + dprintkl(KERN_DEBUG, + "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", + cmd->cmnd[0], dcb->target_id, + dcb->target_lun, status, acb->scan_devices); + break; + case HARDWARE_ERROR: + dprintkl(KERN_DEBUG, + "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", + cmd->cmnd[0], dcb->target_id, + dcb->target_lun, status, acb->scan_devices); + break; + } + if (cmd->sense_buffer[7] >= 6) + printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x " + "(0x%08x 0x%08x)\n", + cmd->sense_buffer[2], cmd->sense_buffer[12], + cmd->sense_buffer[13], + *((unsigned int *)(cmd->sense_buffer + 3)), + *((unsigned int *)(cmd->sense_buffer + 8))); + else + printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n", + cmd->sense_buffer[2], + *((unsigned int *)(cmd->sense_buffer + 3))); + } + + if (status == SAM_STAT_CHECK_CONDITION) { + set_host_byte(cmd, DID_BAD_TARGET); + goto ckc_e; + } + dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n"); + + set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); + + goto ckc_e; + } + +/*************************************************************/ + if (status) { + /* + * target status.......................... + */ + if (status == SAM_STAT_CHECK_CONDITION) { + request_sense(acb, dcb, srb); + return; + } else if (status == SAM_STAT_TASK_SET_FULL) { + tempcnt = (u8)list_size(&dcb->srb_going_list); + dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n", + dcb->target_id, dcb->target_lun, tempcnt); + if (tempcnt > 1) + tempcnt--; + dcb->max_command = tempcnt; + free_tag(dcb, srb); + list_move(&srb->list, &dcb->srb_waiting_list); + waiting_set_timer(acb, HZ / 20); + srb->adapter_status = 0; + srb->target_status = 0; + return; + } else if (status == SCSI_STAT_SEL_TIMEOUT) { + srb->adapter_status = H_SEL_TIMEOUT; + srb->target_status = 0; + set_host_byte(cmd, DID_NO_CONNECT); + } else { + srb->adapter_status = 0; + set_host_byte(cmd, DID_ERROR); + set_status_byte(cmd, status); + } + } else { + /* + ** process initiator status.......................... + */ + status = srb->adapter_status; + if (status & H_OVER_UNDER_RUN) { + srb->target_status = 0; + scsi_msg_to_host_byte(cmd, srb->end_message); + } else if (srb->status & PARITY_ERROR) { + set_host_byte(cmd, DID_PARITY); + } else { /* No error */ + + srb->adapter_status = 0; + srb->target_status = 0; + } + } + + ckc_only = 0; +/* Check Error Conditions */ + ckc_e: + + pci_unmap_srb(acb, srb); + + if (cmd->cmnd[0] == INQUIRY) { + unsigned char *base = NULL; + struct ScsiInqData *ptr; + unsigned long flags = 0; + struct scatterlist* sg = scsi_sglist(cmd); + size_t offset = 0, len = sizeof(struct ScsiInqData); + + local_irq_save(flags); + base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len); + ptr = (struct ScsiInqData *)(base + offset); + + if (!ckc_only && get_host_byte(cmd) == DID_OK + && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8 + && dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2) + dcb->inquiry7 = ptr->Flags; + + /*if( srb->cmd->cmnd[0] == INQUIRY && */ + /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */ + if ((get_host_byte(cmd) == DID_OK) || + (get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) { + if (!dcb->init_tcq_flag) { + add_dev(acb, dcb, ptr); + dcb->init_tcq_flag = 1; + } + } + + scsi_kunmap_atomic_sg(base); + local_irq_restore(flags); + } + + /* Here is the info for Doug Gilbert's sg3 ... */ + scsi_set_resid(cmd, srb->total_xfer_length); + if (debug_enabled(DBG_KG)) { + if (srb->total_xfer_length) + dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> " + "cmnd=0x%02x Missed %i bytes\n", + cmd, cmd->device->id, (u8)cmd->device->lun, + cmd->cmnd[0], srb->total_xfer_length); + } + + if (srb != acb->tmp_srb) { + /* Add to free list */ + dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n", + cmd, cmd->result); + list_move_tail(&srb->list, &acb->srb_free_list); + } else { + dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); + } + + scsi_done(cmd); + waiting_process_next(acb); +} + + +/* abort all cmds in our queues */ +static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, + struct scsi_cmnd *cmd, u8 force) +{ + struct DeviceCtlBlk *dcb; + dprintkl(KERN_INFO, "doing_srb_done: pids "); + + list_for_each_entry(dcb, &acb->dcb_list, list) { + struct ScsiReqBlk *srb; + struct ScsiReqBlk *tmp; + struct scsi_cmnd *p; + + list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) { + p = srb->cmd; + printk("G:%p(%02i-%i) ", p, + p->device->id, (u8)p->device->lun); + list_del(&srb->list); + free_tag(dcb, srb); + list_add_tail(&srb->list, &acb->srb_free_list); + set_host_byte(p, did_flag); + set_status_byte(p, SAM_STAT_GOOD); + pci_unmap_srb_sense(acb, srb); + pci_unmap_srb(acb, srb); + if (force) { + /* For new EH, we normally don't need to give commands back, + * as they all complete or all time out */ + scsi_done(p); + } + } + if (!list_empty(&dcb->srb_going_list)) + dprintkl(KERN_DEBUG, + "How could the ML send cmnds to the Going queue? <%02i-%i>\n", + dcb->target_id, dcb->target_lun); + if (dcb->tag_mask) + dprintkl(KERN_DEBUG, + "tag_mask for <%02i-%i> should be empty, is %08x!\n", + dcb->target_id, dcb->target_lun, + dcb->tag_mask); + + /* Waiting queue */ + list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) { + p = srb->cmd; + + printk("W:%p<%02i-%i>", p, p->device->id, + (u8)p->device->lun); + list_move_tail(&srb->list, &acb->srb_free_list); + set_host_byte(p, did_flag); + set_status_byte(p, SAM_STAT_GOOD); + pci_unmap_srb_sense(acb, srb); + pci_unmap_srb(acb, srb); + if (force) { + /* For new EH, we normally don't need to give commands back, + * as they all complete or all time out */ + scsi_done(cmd); + } + } + if (!list_empty(&dcb->srb_waiting_list)) + dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n", + list_size(&dcb->srb_waiting_list), dcb->target_id, + dcb->target_lun); + dcb->flag &= ~ABORT_DEV_; + } + printk("\n"); +} + + +static void reset_scsi_bus(struct AdapterCtlBlk *acb) +{ + dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb); + acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */ + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); + + while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET)) + /* nothing */; +} + + +static void set_basic_config(struct AdapterCtlBlk *acb) +{ + u8 bval; + u16 wval; + DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout); + if (acb->config & HCC_PARITY) + bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK; + else + bval = PHASELATCH | INITIATOR | BLOCKRST; + + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval); + + /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */ + DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */ + /* program Host ID */ + DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); + /* set ansynchronous transfer */ + DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00); + /* Turn LED control off */ + wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F; + DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval); + /* DMA config */ + wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL; + wval |= + DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ; + DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval); + /* Clear pending interrupt status */ + DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); + /* Enable SCSI interrupt */ + DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F); + DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR + /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */ + ); +} + + +static void scsi_reset_detect(struct AdapterCtlBlk *acb) +{ + dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb); + /* delay half a second */ + if (timer_pending(&acb->waiting_timer)) + del_timer(&acb->waiting_timer); + + DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); + /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */ + udelay(500); + /* Maybe we locked up the bus? Then lets wait even longer ... */ + acb->last_reset = + jiffies + 5 * HZ / 2 + + HZ * acb->eeprom.delay_time; + + clear_fifo(acb, "scsi_reset_detect"); + set_basic_config(acb); + /*1.25 */ + /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */ + + if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */ + acb->acb_flag |= RESET_DONE; + } else { + acb->acb_flag |= RESET_DETECT; + reset_dev_param(acb); + doing_srb_done(acb, DID_RESET, NULL, 1); + /*DC395x_RecoverSRB( acb ); */ + acb->active_dcb = NULL; + acb->acb_flag = 0; + waiting_process_next(acb); + } +} + + +static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + struct ScsiReqBlk *srb) +{ + struct scsi_cmnd *cmd = srb->cmd; + dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n", + cmd, cmd->device->id, (u8)cmd->device->lun); + + srb->flag |= AUTO_REQSENSE; + srb->adapter_status = 0; + srb->target_status = 0; + + /* KG: Can this prevent crap sense data ? */ + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + + /* Save some data */ + srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address = + srb->segment_x[0].address; + srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length = + srb->segment_x[0].length; + srb->xferred = srb->total_xfer_length; + /* srb->segment_x : a one entry of S/G list table */ + srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE; + srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE; + /* Map sense buffer */ + srb->segment_x[0].address = dma_map_single(&acb->dev->dev, + cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, + DMA_FROM_DEVICE); + dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n", + cmd->sense_buffer, srb->segment_x[0].address, + SCSI_SENSE_BUFFERSIZE); + srb->sg_count = 1; + srb->sg_index = 0; + + if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */ + dprintkl(KERN_DEBUG, + "request_sense: (0x%p) failed <%02i-%i>\n", + srb->cmd, dcb->target_id, dcb->target_lun); + list_move(&srb->list, &dcb->srb_waiting_list); + waiting_set_timer(acb, HZ / 100); + } +} + + +/** + * device_alloc - Allocate a new device instance. This create the + * devices instance and sets up all the data items. The adapter + * instance is required to obtain confiuration information for this + * device. This does *not* add this device to the adapters device + * list. + * + * @acb: The adapter to obtain configuration information from. + * @target: The target for the new device. + * @lun: The lun for the new device. + * + * Return the new device if successful or NULL on failure. + **/ +static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, + u8 target, u8 lun) +{ + struct NvRamType *eeprom = &acb->eeprom; + u8 period_index = eeprom->target[target].period & 0x07; + struct DeviceCtlBlk *dcb; + + dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC); + dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun); + if (!dcb) + return NULL; + dcb->acb = NULL; + INIT_LIST_HEAD(&dcb->srb_going_list); + INIT_LIST_HEAD(&dcb->srb_waiting_list); + dcb->active_srb = NULL; + dcb->tag_mask = 0; + dcb->max_command = 1; + dcb->target_id = target; + dcb->target_lun = lun; + dcb->dev_mode = eeprom->target[target].cfg0; +#ifndef DC395x_NO_DISCONNECT + dcb->identify_msg = + IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun); +#else + dcb->identify_msg = IDENTIFY(0, lun); +#endif + dcb->inquiry7 = 0; + dcb->sync_mode = 0; + dcb->min_nego_period = clock_period[period_index]; + dcb->sync_period = 0; + dcb->sync_offset = 0; + dcb->flag = 0; + +#ifndef DC395x_NO_WIDE + if ((dcb->dev_mode & NTC_DO_WIDE_NEGO) + && (acb->config & HCC_WIDE_CARD)) + dcb->sync_mode |= WIDE_NEGO_ENABLE; +#endif +#ifndef DC395x_NO_SYNC + if (dcb->dev_mode & NTC_DO_SYNC_NEGO) + if (!(lun) || current_sync_offset) + dcb->sync_mode |= SYNC_NEGO_ENABLE; +#endif + if (dcb->target_lun != 0) { + /* Copy settings */ + struct DeviceCtlBlk *p = NULL, *iter; + + list_for_each_entry(iter, &acb->dcb_list, list) + if (iter->target_id == dcb->target_id) { + p = iter; + break; + } + + if (!p) { + kfree(dcb); + return NULL; + } + + dprintkdbg(DBG_1, + "device_alloc: <%02i-%i> copy from <%02i-%i>\n", + dcb->target_id, dcb->target_lun, + p->target_id, p->target_lun); + dcb->sync_mode = p->sync_mode; + dcb->sync_period = p->sync_period; + dcb->min_nego_period = p->min_nego_period; + dcb->sync_offset = p->sync_offset; + dcb->inquiry7 = p->inquiry7; + } + return dcb; +} + + +/** + * adapter_add_device - Adds the device instance to the adaptor instance. + * + * @acb: The adapter device to be updated + * @dcb: A newly created and initialised device instance to add. + **/ +static void adapter_add_device(struct AdapterCtlBlk *acb, + struct DeviceCtlBlk *dcb) +{ + /* backpointer to adapter */ + dcb->acb = acb; + + /* set run_robin to this device if it is currently empty */ + if (list_empty(&acb->dcb_list)) + acb->dcb_run_robin = dcb; + + /* add device to list */ + list_add_tail(&dcb->list, &acb->dcb_list); + + /* update device maps */ + acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun); + acb->children[dcb->target_id][dcb->target_lun] = dcb; +} + + +/** + * adapter_remove_device - Removes the device instance from the adaptor + * instance. The device instance is not check in any way or freed by this. + * The caller is expected to take care of that. This will simply remove the + * device from the adapters data strcutures. + * + * @acb: The adapter device to be updated + * @dcb: A device that has previously been added to the adapter. + **/ +static void adapter_remove_device(struct AdapterCtlBlk *acb, + struct DeviceCtlBlk *dcb) +{ + struct DeviceCtlBlk *i; + struct DeviceCtlBlk *tmp; + dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n", + dcb->target_id, dcb->target_lun); + + /* fix up any pointers to this device that we have in the adapter */ + if (acb->active_dcb == dcb) + acb->active_dcb = NULL; + if (acb->dcb_run_robin == dcb) + acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb); + + /* unlink from list */ + list_for_each_entry_safe(i, tmp, &acb->dcb_list, list) + if (dcb == i) { + list_del(&i->list); + break; + } + + /* clear map and children */ + acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun); + acb->children[dcb->target_id][dcb->target_lun] = NULL; + dcb->acb = NULL; +} + + +/** + * adapter_remove_and_free_device - Removes a single device from the adapter + * and then frees the device information. + * + * @acb: The adapter device to be updated + * @dcb: A device that has previously been added to the adapter. + */ +static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb, + struct DeviceCtlBlk *dcb) +{ + if (list_size(&dcb->srb_going_list) > 1) { + dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> " + "Won't remove because of %i active requests.\n", + dcb->target_id, dcb->target_lun, + list_size(&dcb->srb_going_list)); + return; + } + adapter_remove_device(acb, dcb); + kfree(dcb); +} + + +/** + * adapter_remove_and_free_all_devices - Removes and frees all of the + * devices associated with the specified adapter. + * + * @acb: The adapter from which all devices should be removed. + **/ +static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb) +{ + struct DeviceCtlBlk *dcb; + struct DeviceCtlBlk *tmp; + dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n", + list_size(&acb->dcb_list)); + + list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list) + adapter_remove_and_free_device(acb, dcb); +} + + +/** + * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new + * scsi device that we need to deal with. We allocate a new device and then + * insert that device into the adapters device list. + * + * @scsi_device: The new scsi device that we need to handle. + **/ +static int dc395x_slave_alloc(struct scsi_device *scsi_device) +{ + struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata; + struct DeviceCtlBlk *dcb; + + dcb = device_alloc(acb, scsi_device->id, scsi_device->lun); + if (!dcb) + return -ENOMEM; + adapter_add_device(acb, dcb); + + return 0; +} + + +/** + * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a + * device that is going away. + * + * @scsi_device: The new scsi device that we need to handle. + **/ +static void dc395x_slave_destroy(struct scsi_device *scsi_device) +{ + struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata; + struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun); + if (dcb) + adapter_remove_and_free_device(acb, dcb); +} + + + + +/** + * trms1040_wait_30us: wait for 30 us + * + * Waits for 30us (using the chip by the looks of it..) + * + * @io_port: base I/O address + **/ +static void trms1040_wait_30us(unsigned long io_port) +{ + /* ScsiPortStallExecution(30); wait 30 us */ + outb(5, io_port + TRM_S1040_GEN_TIMER); + while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT)) + /* nothing */ ; +} + + +/** + * trms1040_write_cmd - write the secified command and address to + * chip + * + * @io_port: base I/O address + * @cmd: SB + op code (command) to send + * @addr: address to send + **/ +static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr) +{ + int i; + u8 send_data; + + /* program SB + OP code */ + for (i = 0; i < 3; i++, cmd <<= 1) { + send_data = NVR_SELECT; + if (cmd & 0x04) /* Start from bit 2 */ + send_data |= NVR_BITOUT; + + outb(send_data, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + outb((send_data | NVR_CLOCK), + io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + } + + /* send address */ + for (i = 0; i < 7; i++, addr <<= 1) { + send_data = NVR_SELECT; + if (addr & 0x40) /* Start from bit 6 */ + send_data |= NVR_BITOUT; + + outb(send_data, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + outb((send_data | NVR_CLOCK), + io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + } + outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); +} + + +/** + * trms1040_set_data - store a single byte in the eeprom + * + * Called from write all to write a single byte into the SSEEPROM + * Which is done one bit at a time. + * + * @io_port: base I/O address + * @addr: offset into EEPROM + * @byte: bytes to write + **/ +static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte) +{ + int i; + u8 send_data; + + /* Send write command & address */ + trms1040_write_cmd(io_port, 0x05, addr); + + /* Write data */ + for (i = 0; i < 8; i++, byte <<= 1) { + send_data = NVR_SELECT; + if (byte & 0x80) /* Start from bit 7 */ + send_data |= NVR_BITOUT; + + outb(send_data, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + } + outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + + /* Disable chip select */ + outb(0, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + + outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + + /* Wait for write ready */ + while (1) { + outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + + outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + + if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN) + break; + } + + /* Disable chip select */ + outb(0, io_port + TRM_S1040_GEN_NVRAM); +} + + +/** + * trms1040_write_all - write 128 bytes to the eeprom + * + * Write the supplied 128 bytes to the chips SEEPROM + * + * @eeprom: the data to write + * @io_port: the base io port + **/ +static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port) +{ + u8 *b_eeprom = (u8 *)eeprom; + u8 addr; + + /* Enable SEEPROM */ + outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM), + io_port + TRM_S1040_GEN_CONTROL); + + /* write enable */ + trms1040_write_cmd(io_port, 0x04, 0xFF); + outb(0, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + + /* write */ + for (addr = 0; addr < 128; addr++, b_eeprom++) + trms1040_set_data(io_port, addr, *b_eeprom); + + /* write disable */ + trms1040_write_cmd(io_port, 0x04, 0x00); + outb(0, io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + + /* Disable SEEPROM */ + outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM), + io_port + TRM_S1040_GEN_CONTROL); +} + + +/** + * trms1040_get_data - get a single byte from the eeprom + * + * Called from read all to read a single byte into the SSEEPROM + * Which is done one bit at a time. + * + * @io_port: base I/O address + * @addr: offset into SEEPROM + * + * Returns the byte read. + **/ +static u8 trms1040_get_data(unsigned long io_port, u8 addr) +{ + int i; + u8 read_byte; + u8 result = 0; + + /* Send read command & address */ + trms1040_write_cmd(io_port, 0x06, addr); + + /* read data */ + for (i = 0; i < 8; i++) { + outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); + trms1040_wait_30us(io_port); + outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); + + /* Get data bit while falling edge */ + read_byte = inb(io_port + TRM_S1040_GEN_NVRAM); + result <<= 1; + if (read_byte & NVR_BITIN) + result |= 1; + + trms1040_wait_30us(io_port); + } + + /* Disable chip select */ + outb(0, io_port + TRM_S1040_GEN_NVRAM); + return result; +} + + +/** + * trms1040_read_all - read all bytes from the eeprom + * + * Read the 128 bytes from the SEEPROM. + * + * @eeprom: where to store the data + * @io_port: the base io port + **/ +static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port) +{ + u8 *b_eeprom = (u8 *)eeprom; + u8 addr; + + /* Enable SEEPROM */ + outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM), + io_port + TRM_S1040_GEN_CONTROL); + + /* read details */ + for (addr = 0; addr < 128; addr++, b_eeprom++) + *b_eeprom = trms1040_get_data(io_port, addr); + + /* Disable SEEPROM */ + outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM), + io_port + TRM_S1040_GEN_CONTROL); +} + + + +/** + * check_eeprom - get and check contents of the eeprom + * + * Read seeprom 128 bytes into the memory provider in eeprom. + * Checks the checksum and if it's not correct it uses a set of default + * values. + * + * @eeprom: caller allocated strcuture to read the eeprom data into + * @io_port: io port to read from + **/ +static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port) +{ + u16 *w_eeprom = (u16 *)eeprom; + u16 w_addr; + u16 cksum; + u32 d_addr; + u32 *d_eeprom; + + trms1040_read_all(eeprom, io_port); /* read eeprom */ + + cksum = 0; + for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64; + w_addr++, w_eeprom++) + cksum += *w_eeprom; + if (cksum != 0x1234) { + /* + * Checksum is wrong. + * Load a set of defaults into the eeprom buffer + */ + dprintkl(KERN_WARNING, + "EEProm checksum error: using default values and options.\n"); + eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM; + eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8); + eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040; + eeprom->sub_sys_id[1] = + (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8); + eeprom->sub_class = 0x00; + eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM; + eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8); + eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040; + eeprom->device_id[1] = + (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8); + eeprom->reserved = 0x00; + + for (d_addr = 0, d_eeprom = (u32 *)eeprom->target; + d_addr < 16; d_addr++, d_eeprom++) + *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */ + + *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */ + *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */ + for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++) + *d_eeprom = 0x00; + + /* Now load defaults (maybe set by boot/module params) */ + set_safe_settings(); + fix_settings(); + eeprom_override(eeprom); + + eeprom->cksum = 0x00; + for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom; + w_addr < 63; w_addr++, w_eeprom++) + cksum += *w_eeprom; + + *w_eeprom = 0x1234 - cksum; + trms1040_write_all(eeprom, io_port); + eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value; + } else { + set_safe_settings(); + eeprom_index_to_delay(eeprom); + eeprom_override(eeprom); + } +} + + +/** + * print_eeprom_settings - output the eeprom settings + * to the kernel log so people can see what they were. + * + * @eeprom: The eeprom data strucutre to show details for. + **/ +static void print_eeprom_settings(struct NvRamType *eeprom) +{ + dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n", + eeprom->scsi_id, + eeprom->target[0].period, + clock_speed[eeprom->target[0].period] / 10, + clock_speed[eeprom->target[0].period] % 10, + eeprom->target[0].cfg0); + dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n", + eeprom->channel_cfg, eeprom->max_tag, + 1 << eeprom->max_tag, eeprom->delay_time); +} + + +/* Free SG tables */ +static void adapter_sg_tables_free(struct AdapterCtlBlk *acb) +{ + int i; + const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; + + for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) + kfree(acb->srb_array[i].segment_x); +} + + +/* + * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*) + * should never cross a page boundary */ +static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) +{ + const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1) + *SEGMENTX_LEN; + int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE; + const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; + int srb_idx = 0; + unsigned i = 0; + struct SGentry *ptr; + + for (i = 0; i < DC395x_MAX_SRB_CNT; i++) + acb->srb_array[i].segment_x = NULL; + + dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); + while (pages--) { + ptr = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!ptr) { + adapter_sg_tables_free(acb); + return 1; + } + dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n", + PAGE_SIZE, ptr, srb_idx); + i = 0; + while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT) + acb->srb_array[srb_idx++].segment_x = + ptr + (i++ * DC395x_MAX_SG_LISTENTRY); + } + if (i < srbs_per_page) + acb->srb.segment_x = + ptr + (i * DC395x_MAX_SG_LISTENTRY); + else + dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); + return 0; +} + + + +/** + * adapter_print_config - print adapter connection and termination + * config + * + * The io port in the adapter needs to have been set before calling + * this function. + * + * @acb: The adapter to print the information for. + **/ +static void adapter_print_config(struct AdapterCtlBlk *acb) +{ + u8 bval; + + bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS); + dprintkl(KERN_INFO, "%sConnectors: ", + ((bval & WIDESCSI) ? "(Wide) " : "")); + if (!(bval & CON5068)) + printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50"); + if (!(bval & CON68)) + printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)"); + if (!(bval & CON50)) + printk("int50 "); + if ((bval & (CON5068 | CON50 | CON68)) == + 0 /*(CON5068 | CON50 | CON68) */ ) + printk(" Oops! (All 3?) "); + bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL); + printk(" Termination: "); + if (bval & DIS_TERM) + printk("Disabled\n"); + else { + if (bval & AUTOTERM) + printk("Auto "); + if (bval & LOW8TERM) + printk("Low "); + if (bval & UP8TERM) + printk("High "); + printk("\n"); + } +} + + +/** + * adapter_init_params - Initialize the various parameters in the + * adapter structure. Note that the pointer to the scsi_host is set + * early (when this instance is created) and the io_port and irq + * values are set later after they have been reserved. This just gets + * everything set to a good starting position. + * + * The eeprom structure in the adapter needs to have been set before + * calling this function. + * + * @acb: The adapter to initialize. + **/ +static void adapter_init_params(struct AdapterCtlBlk *acb) +{ + struct NvRamType *eeprom = &acb->eeprom; + int i; + + /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */ + /* NOTE: acb->io_port_base is set at port registration time */ + /* NOTE: acb->io_port_len is set at port registration time */ + + INIT_LIST_HEAD(&acb->dcb_list); + acb->dcb_run_robin = NULL; + acb->active_dcb = NULL; + + INIT_LIST_HEAD(&acb->srb_free_list); + /* temp SRB for Q tag used or abort command used */ + acb->tmp_srb = &acb->srb; + timer_setup(&acb->waiting_timer, waiting_timeout, 0); + timer_setup(&acb->selto_timer, NULL, 0); + + acb->srb_count = DC395x_MAX_SRB_CNT; + + acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */ + /* NOTE: acb->irq_level is set at IRQ registration time */ + + acb->tag_max_num = 1 << eeprom->max_tag; + if (acb->tag_max_num > 30) + acb->tag_max_num = 30; + + acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */ + acb->gmode2 = eeprom->channel_cfg; + acb->config = 0; /* NOTE: actually set in adapter_init_chip */ + + if (eeprom->channel_cfg & NAC_SCANLUN) + acb->lun_chk = 1; + acb->scan_devices = 1; + + acb->scsi_host->this_id = eeprom->scsi_id; + acb->hostid_bit = (1 << acb->scsi_host->this_id); + + for (i = 0; i < DC395x_MAX_SCSI_ID; i++) + acb->dcb_map[i] = 0; + + acb->msg_len = 0; + + /* link static array of srbs into the srb free list */ + for (i = 0; i < acb->srb_count - 1; i++) + list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list); +} + + +/** + * adapter_init_scsi_host - Initialize the scsi host instance based on + * values that we have already stored in the adapter instance. There's + * some mention that a lot of these are deprecated, so we won't use + * them (we'll use the ones in the adapter instance) but we'll fill + * them in in case something else needs them. + * + * The eeprom structure, irq and io ports in the adapter need to have + * been set before calling this function. + * + * @host: The scsi host instance to fill in the values for. + **/ +static void adapter_init_scsi_host(struct Scsi_Host *host) +{ + struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata; + struct NvRamType *eeprom = &acb->eeprom; + + host->max_cmd_len = 24; + host->can_queue = DC395x_MAX_CMD_QUEUE; + host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN; + host->this_id = (int)eeprom->scsi_id; + host->io_port = acb->io_port_base; + host->n_io_port = acb->io_port_len; + host->dma_channel = -1; + host->unique_id = acb->io_port_base; + host->irq = acb->irq_level; + acb->last_reset = jiffies; + + host->max_id = 16; + if (host->max_id - 1 == eeprom->scsi_id) + host->max_id--; + + if (eeprom->channel_cfg & NAC_SCANLUN) + host->max_lun = 8; + else + host->max_lun = 1; +} + + +/** + * adapter_init_chip - Get the chip into a know state and figure out + * some of the settings that apply to this adapter. + * + * The io port in the adapter needs to have been set before calling + * this function. The config will be configured correctly on return. + * + * @acb: The adapter which we are to init. + **/ +static void adapter_init_chip(struct AdapterCtlBlk *acb) +{ + struct NvRamType *eeprom = &acb->eeprom; + + /* Mask all the interrupt */ + DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00); + DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00); + + /* Reset SCSI module */ + DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); + + /* Reset PCI/DMA module */ + DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); + udelay(20); + + /* program configuration 0 */ + acb->config = HCC_AUTOTERM | HCC_PARITY; + if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI) + acb->config |= HCC_WIDE_CARD; + + if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET) + acb->config |= HCC_SCSI_RESET; + + if (acb->config & HCC_SCSI_RESET) { + dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n"); + DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); + + /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */ + /*spin_unlock_irq (&io_request_lock); */ + udelay(500); + + acb->last_reset = + jiffies + HZ / 2 + + HZ * acb->eeprom.delay_time; + + /*spin_lock_irq (&io_request_lock); */ + } +} + + +/** + * adapter_init - Grab the resource for the card, setup the adapter + * information, set the card into a known state, create the various + * tables etc etc. This basically gets all adapter information all up + * to date, initialised and gets the chip in sync with it. + * + * @acb: The adapter which we are to init. + * @io_port: The base I/O port + * @io_port_len: The I/O port size + * @irq: IRQ + * + * Returns 0 if the initialization succeeds, any other value on + * failure. + **/ +static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, + u32 io_port_len, unsigned int irq) +{ + if (!request_region(io_port, io_port_len, DC395X_NAME)) { + dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port); + goto failed; + } + /* store port base to indicate we have registered it */ + acb->io_port_base = io_port; + acb->io_port_len = io_port_len; + + if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) { + /* release the region we just claimed */ + dprintkl(KERN_INFO, "Failed to register IRQ\n"); + goto failed; + } + /* store irq to indicate we have registered it */ + acb->irq_level = irq; + + /* get eeprom configuration information and command line settings etc */ + check_eeprom(&acb->eeprom, io_port); + print_eeprom_settings(&acb->eeprom); + + /* setup adapter control block */ + adapter_init_params(acb); + + /* display card connectors/termination settings */ + adapter_print_config(acb); + + if (adapter_sg_tables_alloc(acb)) { + dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n"); + goto failed; + } + adapter_init_scsi_host(acb->scsi_host); + adapter_init_chip(acb); + set_basic_config(acb); + + dprintkdbg(DBG_0, + "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p " + "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n", + acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk), + sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk)); + return 0; + +failed: + if (acb->irq_level) + free_irq(acb->irq_level, acb); + if (acb->io_port_base) + release_region(acb->io_port_base, acb->io_port_len); + adapter_sg_tables_free(acb); + + return 1; +} + + +/** + * adapter_uninit_chip - cleanly shut down the scsi controller chip, + * stopping all operations and disabling interrupt generation on the + * card. + * + * @acb: The adapter which we are to shutdown. + **/ +static void adapter_uninit_chip(struct AdapterCtlBlk *acb) +{ + /* disable interrupts */ + DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0); + DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0); + + /* reset the scsi bus */ + if (acb->config & HCC_SCSI_RESET) + reset_scsi_bus(acb); + + /* clear any pending interrupt state */ + DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); +} + + + +/** + * adapter_uninit - Shut down the chip and release any resources that + * we had allocated. Once this returns the adapter should not be used + * anymore. + * + * @acb: The adapter which we are to un-initialize. + **/ +static void adapter_uninit(struct AdapterCtlBlk *acb) +{ + unsigned long flags; + DC395x_LOCK_IO(acb->scsi_host, flags); + + /* remove timers */ + if (timer_pending(&acb->waiting_timer)) + del_timer(&acb->waiting_timer); + if (timer_pending(&acb->selto_timer)) + del_timer(&acb->selto_timer); + + adapter_uninit_chip(acb); + adapter_remove_and_free_all_devices(acb); + DC395x_UNLOCK_IO(acb->scsi_host, flags); + + if (acb->irq_level) + free_irq(acb->irq_level, acb); + if (acb->io_port_base) + release_region(acb->io_port_base, acb->io_port_len); + + adapter_sg_tables_free(acb); +} + + +#undef YESNO +#define YESNO(YN) \ + if (YN) seq_printf(m, " Yes ");\ + else seq_printf(m, " No ") + +static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata; + int spd, spd1; + struct DeviceCtlBlk *dcb; + unsigned long flags; + int dev; + + seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n" + " Driver Version " DC395X_VERSION "\n"); + + DC395x_LOCK_IO(acb->scsi_host, flags); + + seq_printf(m, "SCSI Host Nr %i, ", host->host_no); + seq_printf(m, "DC395U/UW/F DC315/U %s\n", + (acb->config & HCC_WIDE_CARD) ? "Wide" : ""); + seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base); + seq_printf(m, "irq_level 0x%04x, ", acb->irq_level); + seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); + + seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun); + seq_printf(m, "AdapterID %i\n", host->this_id); + + seq_printf(m, "tag_max_num %i", acb->tag_max_num); + /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */ + seq_printf(m, ", FilterCfg 0x%02x", + DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1)); + seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time); + /*seq_printf(m, "\n"); */ + + seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list)); + seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]); + seq_printf(m, " %8ph\n", &acb->dcb_map[8]); + + seq_puts(m, + "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n"); + + dev = 0; + list_for_each_entry(dcb, &acb->dcb_list, list) { + int nego_period; + seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id, + dcb->target_lun); + YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK); + YESNO(dcb->sync_offset); + YESNO(dcb->sync_period & WIDE_SYNC); + YESNO(dcb->dev_mode & NTC_DO_DISCONNECT); + YESNO(dcb->dev_mode & NTC_DO_SEND_START); + YESNO(dcb->sync_mode & EN_TAG_QUEUEING); + nego_period = clock_period[dcb->sync_period & 0x07] << 2; + if (dcb->sync_offset) + seq_printf(m, " %03i ns ", nego_period); + else + seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2)); + + if (dcb->sync_offset & 0x0f) { + spd = 1000 / (nego_period); + spd1 = 1000 % (nego_period); + spd1 = (spd1 * 10 + nego_period / 2) / (nego_period); + seq_printf(m, " %2i.%1i M %02i ", spd, spd1, + (dcb->sync_offset & 0x0f)); + } else + seq_puts(m, " "); + + /* Add more info ... */ + seq_printf(m, " %02i\n", dcb->max_command); + dev++; + } + + if (timer_pending(&acb->waiting_timer)) + seq_puts(m, "Waiting queue timer running\n"); + else + seq_putc(m, '\n'); + + list_for_each_entry(dcb, &acb->dcb_list, list) { + struct ScsiReqBlk *srb; + if (!list_empty(&dcb->srb_waiting_list)) + seq_printf(m, "DCB (%02i-%i): Waiting: %i:", + dcb->target_id, dcb->target_lun, + list_size(&dcb->srb_waiting_list)); + list_for_each_entry(srb, &dcb->srb_waiting_list, list) + seq_printf(m, " %p", srb->cmd); + if (!list_empty(&dcb->srb_going_list)) + seq_printf(m, "\nDCB (%02i-%i): Going : %i:", + dcb->target_id, dcb->target_lun, + list_size(&dcb->srb_going_list)); + list_for_each_entry(srb, &dcb->srb_going_list, list) + seq_printf(m, " %p", srb->cmd); + if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) + seq_putc(m, '\n'); + } + + if (debug_enabled(DBG_1)) { + seq_printf(m, "DCB list for ACB %p:\n", acb); + list_for_each_entry(dcb, &acb->dcb_list, list) { + seq_printf(m, "%p -> ", dcb); + } + seq_puts(m, "END\n"); + } + + DC395x_UNLOCK_IO(acb->scsi_host, flags); + return 0; +} + + +static const struct scsi_host_template dc395x_driver_template = { + .module = THIS_MODULE, + .proc_name = DC395X_NAME, + .show_info = dc395x_show_info, + .name = DC395X_BANNER " " DC395X_VERSION, + .queuecommand = dc395x_queue_command, + .slave_alloc = dc395x_slave_alloc, + .slave_destroy = dc395x_slave_destroy, + .can_queue = DC395x_MAX_CAN_QUEUE, + .this_id = 7, + .sg_tablesize = DC395x_MAX_SG_TABLESIZE, + .cmd_per_lun = DC395x_MAX_CMD_PER_LUN, + .eh_abort_handler = dc395x_eh_abort, + .eh_bus_reset_handler = dc395x_eh_bus_reset, + .dma_boundary = PAGE_SIZE - 1, +}; + + +/** + * banner_display - Display banner on first instance of driver + * initialized. + **/ +static void banner_display(void) +{ + static int banner_done = 0; + if (!banner_done) + { + dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION); + banner_done = 1; + } +} + + +/** + * dc395x_init_one - Initialise a single instance of the adapter. + * + * The PCI layer will call this once for each instance of the adapter + * that it finds in the system. The pci_dev strcuture indicates which + * instance we are being called from. + * + * @dev: The PCI device to initialize. + * @id: Looks like a pointer to the entry in our pci device table + * that was actually matched by the PCI subsystem. + * + * Returns 0 on success, or an error code (-ve) on failure. + **/ +static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct Scsi_Host *scsi_host = NULL; + struct AdapterCtlBlk *acb = NULL; + unsigned long io_port_base; + unsigned int io_port_len; + unsigned int irq; + + dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev)); + banner_display(); + + if (pci_enable_device(dev)) + { + dprintkl(KERN_INFO, "PCI Enable device failed.\n"); + return -ENODEV; + } + io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK; + io_port_len = pci_resource_len(dev, 0); + irq = dev->irq; + dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq); + + /* allocate scsi host information (includes out adapter) */ + scsi_host = scsi_host_alloc(&dc395x_driver_template, + sizeof(struct AdapterCtlBlk)); + if (!scsi_host) { + dprintkl(KERN_INFO, "scsi_host_alloc failed\n"); + goto fail; + } + acb = (struct AdapterCtlBlk*)scsi_host->hostdata; + acb->scsi_host = scsi_host; + acb->dev = dev; + + /* initialise the adapter and everything we need */ + if (adapter_init(acb, io_port_base, io_port_len, irq)) { + dprintkl(KERN_INFO, "adapter init failed\n"); + acb = NULL; + goto fail; + } + + pci_set_master(dev); + + /* get the scsi mid level to scan for new devices on the bus */ + if (scsi_add_host(scsi_host, &dev->dev)) { + dprintkl(KERN_ERR, "scsi_add_host failed\n"); + goto fail; + } + pci_set_drvdata(dev, scsi_host); + scsi_scan_host(scsi_host); + + return 0; + +fail: + if (acb != NULL) + adapter_uninit(acb); + if (scsi_host != NULL) + scsi_host_put(scsi_host); + pci_disable_device(dev); + return -ENODEV; +} + + +/** + * dc395x_remove_one - Called to remove a single instance of the + * adapter. + * + * @dev: The PCI device to initialize. + **/ +static void dc395x_remove_one(struct pci_dev *dev) +{ + struct Scsi_Host *scsi_host = pci_get_drvdata(dev); + struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata); + + dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb); + + scsi_remove_host(scsi_host); + adapter_uninit(acb); + pci_disable_device(dev); + scsi_host_put(scsi_host); +} + + +static struct pci_device_id dc395x_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_TEKRAM, + .device = PCI_DEVICE_ID_TEKRAM_TRMS1040, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + {} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, dc395x_pci_table); + + +static struct pci_driver dc395x_driver = { + .name = DC395X_NAME, + .id_table = dc395x_pci_table, + .probe = dc395x_init_one, + .remove = dc395x_remove_one, +}; +module_pci_driver(dc395x_driver); + +MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff"); +MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/dc395x.h b/drivers/scsi/dc395x.h new file mode 100644 index 000000000..24a36c046 --- /dev/null +++ b/drivers/scsi/dc395x.h @@ -0,0 +1,611 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/************************************************************************/ +/* */ +/* dc395x.h */ +/* */ +/* Device Driver for Tekram DC395(U/UW/F), DC315(U) */ +/* PCI SCSI Bus Master Host Adapter */ +/* (SCSI chip set used Tekram ASIC TRM-S1040) */ +/* */ +/************************************************************************/ +#ifndef DC395x_H +#define DC395x_H + +/************************************************************************/ +/* */ +/* Initial values */ +/* */ +/************************************************************************/ +#define DC395x_MAX_CMD_QUEUE 32 +/* #define DC395x_MAX_QTAGS 32 */ +#define DC395x_MAX_QTAGS 16 +#define DC395x_MAX_SCSI_ID 16 +#define DC395x_MAX_CMD_PER_LUN DC395x_MAX_QTAGS +#define DC395x_MAX_SG_TABLESIZE 64 /* HW limitation */ +#define DC395x_MAX_SG_LISTENTRY 64 /* Must be equal or lower to previous */ + /* item */ +#define DC395x_MAX_SRB_CNT 63 +/* #define DC395x_MAX_CAN_QUEUE 7 * DC395x_MAX_QTAGS */ +#define DC395x_MAX_CAN_QUEUE DC395x_MAX_SRB_CNT +#define DC395x_END_SCAN 2 +#define DC395x_SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */ +#define DC395x_MAX_RETRIES 3 + +#if 0 +#define SYNC_FIRST +#endif + +#define NORM_REC_LVL 0 + +/************************************************************************/ +/* */ +/* Various definitions */ +/* */ +/************************************************************************/ +#define BIT31 0x80000000 +#define BIT30 0x40000000 +#define BIT29 0x20000000 +#define BIT28 0x10000000 +#define BIT27 0x08000000 +#define BIT26 0x04000000 +#define BIT25 0x02000000 +#define BIT24 0x01000000 +#define BIT23 0x00800000 +#define BIT22 0x00400000 +#define BIT21 0x00200000 +#define BIT20 0x00100000 +#define BIT19 0x00080000 +#define BIT18 0x00040000 +#define BIT17 0x00020000 +#define BIT16 0x00010000 +#define BIT15 0x00008000 +#define BIT14 0x00004000 +#define BIT13 0x00002000 +#define BIT12 0x00001000 +#define BIT11 0x00000800 +#define BIT10 0x00000400 +#define BIT9 0x00000200 +#define BIT8 0x00000100 +#define BIT7 0x00000080 +#define BIT6 0x00000040 +#define BIT5 0x00000020 +#define BIT4 0x00000010 +#define BIT3 0x00000008 +#define BIT2 0x00000004 +#define BIT1 0x00000002 +#define BIT0 0x00000001 + +/* UnitCtrlFlag */ +#define UNIT_ALLOCATED BIT0 +#define UNIT_INFO_CHANGED BIT1 +#define FORMATING_MEDIA BIT2 +#define UNIT_RETRY BIT3 + +/* UnitFlags */ +#define DASD_SUPPORT BIT0 +#define SCSI_SUPPORT BIT1 +#define ASPI_SUPPORT BIT2 + +/* SRBState machine definition */ +#define SRB_FREE 0x0000 +#define SRB_WAIT 0x0001 +#define SRB_READY 0x0002 +#define SRB_MSGOUT 0x0004 /* arbitration+msg_out 1st byte */ +#define SRB_MSGIN 0x0008 +#define SRB_EXTEND_MSGIN 0x0010 +#define SRB_COMMAND 0x0020 +#define SRB_START_ 0x0040 /* arbitration+msg_out+command_out */ +#define SRB_DISCONNECT 0x0080 +#define SRB_DATA_XFER 0x0100 +#define SRB_XFERPAD 0x0200 +#define SRB_STATUS 0x0400 +#define SRB_COMPLETED 0x0800 +#define SRB_ABORT_SENT 0x1000 +#define SRB_DO_SYNC_NEGO 0x2000 +#define SRB_DO_WIDE_NEGO 0x4000 +#define SRB_UNEXPECT_RESEL 0x8000 + +/************************************************************************/ +/* */ +/* ACB Config */ +/* */ +/************************************************************************/ +#define HCC_WIDE_CARD 0x20 +#define HCC_SCSI_RESET 0x10 +#define HCC_PARITY 0x08 +#define HCC_AUTOTERM 0x04 +#define HCC_LOW8TERM 0x02 +#define HCC_UP8TERM 0x01 + +/* ACBFlag */ +#define RESET_DEV BIT0 +#define RESET_DETECT BIT1 +#define RESET_DONE BIT2 + +/* DCBFlag */ +#define ABORT_DEV_ BIT0 + +/* SRBstatus */ +#define SRB_OK BIT0 +#define ABORTION BIT1 +#define OVER_RUN BIT2 +#define UNDER_RUN BIT3 +#define PARITY_ERROR BIT4 +#define SRB_ERROR BIT5 + +/* SRBFlag */ +#define DATAOUT BIT7 +#define DATAIN BIT6 +#define RESIDUAL_VALID BIT5 +#define ENABLE_TIMER BIT4 +#define RESET_DEV0 BIT2 +#define ABORT_DEV BIT1 +#define AUTO_REQSENSE BIT0 + +/* Adapter status */ +#define H_STATUS_GOOD 0 +#define H_SEL_TIMEOUT 0x11 +#define H_OVER_UNDER_RUN 0x12 +#define H_UNEXP_BUS_FREE 0x13 +#define H_TARGET_PHASE_F 0x14 +#define H_INVALID_CCB_OP 0x16 +#define H_LINK_CCB_BAD 0x17 +#define H_BAD_TARGET_DIR 0x18 +#define H_DUPLICATE_CCB 0x19 +#define H_BAD_CCB_OR_SG 0x1A +#define H_ABORT 0x0FF + +/* SCSI BUS Status byte codes */ +#define SCSI_STAT_UNEXP_BUS_F 0xFD /* Unexpect Bus Free */ +#define SCSI_STAT_BUS_RST_DETECT 0xFE /* Scsi Bus Reset detected */ +#define SCSI_STAT_SEL_TIMEOUT 0xFF /* Selection Time out */ + +/* Sync_Mode */ +#define SYNC_WIDE_TAG_ATNT_DISABLE 0 +#define SYNC_NEGO_ENABLE BIT0 +#define SYNC_NEGO_DONE BIT1 +#define WIDE_NEGO_ENABLE BIT2 +#define WIDE_NEGO_DONE BIT3 +#define WIDE_NEGO_STATE BIT4 +#define EN_TAG_QUEUEING BIT5 +#define EN_ATN_STOP BIT6 + +#define SYNC_NEGO_OFFSET 15 + +/* cmd->result */ +#define STATUS_MASK_ 0xFF +#define MSG_MASK 0xFF00 +#define RETURN_MASK 0xFF0000 + +/************************************************************************/ +/* */ +/* Inquiry Data format */ +/* */ +/************************************************************************/ +struct ScsiInqData +{ /* INQ */ + u8 DevType; /* Periph Qualifier & Periph Dev Type */ + u8 RMB_TypeMod; /* rem media bit & Dev Type Modifier */ + u8 Vers; /* ISO, ECMA, & ANSI versions */ + u8 RDF; /* AEN, TRMIOP, & response data format */ + u8 AddLen; /* length of additional data */ + u8 Res1; /* reserved */ + u8 Res2; /* reserved */ + u8 Flags; /* RelADr, Wbus32, Wbus16, Sync, etc. */ + u8 VendorID[8]; /* Vendor Identification */ + u8 ProductID[16]; /* Product Identification */ + u8 ProductRev[4]; /* Product Revision */ +}; + + /* Inquiry byte 0 masks */ +#define SCSI_DEVTYPE 0x1F /* Peripheral Device Type */ +#define SCSI_PERIPHQUAL 0xE0 /* Peripheral Qualifier */ + /* Inquiry byte 1 mask */ +#define SCSI_REMOVABLE_MEDIA 0x80 /* Removable Media bit (1=removable) */ + /* Peripheral Device Type definitions */ + /* See include/scsi/scsi.h */ +#define TYPE_NODEV SCSI_DEVTYPE /* Unknown or no device type */ +#ifndef TYPE_PRINTER /* */ +# define TYPE_PRINTER 0x02 /* Printer device */ +#endif /* */ +#ifndef TYPE_COMM /* */ +# define TYPE_COMM 0x09 /* Communications device */ +#endif + +/************************************************************************/ +/* */ +/* Inquiry flag definitions (Inq data byte 7) */ +/* */ +/************************************************************************/ +#define SCSI_INQ_RELADR 0x80 /* device supports relative addressing */ +#define SCSI_INQ_WBUS32 0x40 /* device supports 32 bit data xfers */ +#define SCSI_INQ_WBUS16 0x20 /* device supports 16 bit data xfers */ +#define SCSI_INQ_SYNC 0x10 /* device supports synchronous xfer */ +#define SCSI_INQ_LINKED 0x08 /* device supports linked commands */ +#define SCSI_INQ_CMDQUEUE 0x02 /* device supports command queueing */ +#define SCSI_INQ_SFTRE 0x01 /* device supports soft resets */ + +#define ENABLE_CE 1 +#define DISABLE_CE 0 +#define EEPROM_READ 0x80 + +/************************************************************************/ +/* */ +/* The PCI configuration register offset for TRM_S1040 */ +/* */ +/************************************************************************/ +#define TRM_S1040_ID 0x00 /* Vendor and Device ID */ +#define TRM_S1040_COMMAND 0x04 /* PCI command register */ +#define TRM_S1040_IOBASE 0x10 /* I/O Space base address */ +#define TRM_S1040_ROMBASE 0x30 /* Expansion ROM Base Address */ +#define TRM_S1040_INTLINE 0x3C /* Interrupt line */ + +/************************************************************************/ +/* */ +/* The SCSI register offset for TRM_S1040 */ +/* */ +/************************************************************************/ +#define TRM_S1040_SCSI_STATUS 0x80 /* SCSI Status (R) */ +#define COMMANDPHASEDONE 0x2000 /* SCSI command phase done */ +#define SCSIXFERDONE 0x0800 /* SCSI SCSI transfer done */ +#define SCSIXFERCNT_2_ZERO 0x0100 /* SCSI SCSI transfer count to zero */ +#define SCSIINTERRUPT 0x0080 /* SCSI interrupt pending */ +#define COMMANDABORT 0x0040 /* SCSI command abort */ +#define SEQUENCERACTIVE 0x0020 /* SCSI sequencer active */ +#define PHASEMISMATCH 0x0010 /* SCSI phase mismatch */ +#define PARITYERROR 0x0008 /* SCSI parity error */ + +#define PHASEMASK 0x0007 /* Phase MSG/CD/IO */ +#define PH_DATA_OUT 0x00 /* Data out phase */ +#define PH_DATA_IN 0x01 /* Data in phase */ +#define PH_COMMAND 0x02 /* Command phase */ +#define PH_STATUS 0x03 /* Status phase */ +#define PH_BUS_FREE 0x05 /* Invalid phase used as bus free */ +#define PH_MSG_OUT 0x06 /* Message out phase */ +#define PH_MSG_IN 0x07 /* Message in phase */ + +#define TRM_S1040_SCSI_CONTROL 0x80 /* SCSI Control (W) */ +#define DO_CLRATN 0x0400 /* Clear ATN */ +#define DO_SETATN 0x0200 /* Set ATN */ +#define DO_CMDABORT 0x0100 /* Abort SCSI command */ +#define DO_RSTMODULE 0x0010 /* Reset SCSI chip */ +#define DO_RSTSCSI 0x0008 /* Reset SCSI bus */ +#define DO_CLRFIFO 0x0004 /* Clear SCSI transfer FIFO */ +#define DO_DATALATCH 0x0002 /* Enable SCSI bus data input (latched) */ +/* #define DO_DATALATCH 0x0000 */ /* KG: DISable SCSI bus data latch */ +#define DO_HWRESELECT 0x0001 /* Enable hardware reselection */ + +#define TRM_S1040_SCSI_FIFOCNT 0x82 /* SCSI FIFO Counter 5bits(R) */ +#define TRM_S1040_SCSI_SIGNAL 0x83 /* SCSI low level signal (R/W) */ + +#define TRM_S1040_SCSI_INTSTATUS 0x84 /* SCSI Interrupt Status (R) */ +#define INT_SCAM 0x80 /* SCAM selection interrupt */ +#define INT_SELECT 0x40 /* Selection interrupt */ +#define INT_SELTIMEOUT 0x20 /* Selection timeout interrupt */ +#define INT_DISCONNECT 0x10 /* Bus disconnected interrupt */ +#define INT_RESELECTED 0x08 /* Reselected interrupt */ +#define INT_SCSIRESET 0x04 /* SCSI reset detected interrupt */ +#define INT_BUSSERVICE 0x02 /* Bus service interrupt */ +#define INT_CMDDONE 0x01 /* SCSI command done interrupt */ + +#define TRM_S1040_SCSI_OFFSET 0x84 /* SCSI Offset Count (W) */ + +/************************************************************************/ +/* */ +/* Bit Name Definition */ +/* --------- ------------- ---------------------------- */ +/* 07-05 0 RSVD Reversed. Always 0. */ +/* 04 0 OFFSET4 Reversed for LVDS. Always 0. */ +/* 03-00 0 OFFSET[03:00] Offset number from 0 to 15 */ +/* */ +/************************************************************************/ + +#define TRM_S1040_SCSI_SYNC 0x85 /* SCSI Synchronous Control (R/W) */ +#define LVDS_SYNC 0x20 /* Enable LVDS synchronous */ +#define WIDE_SYNC 0x10 /* Enable WIDE synchronous */ +#define ALT_SYNC 0x08 /* Enable Fast-20 alternate synchronous */ + +/************************************************************************/ +/* */ +/* SYNCM 7 6 5 4 3 2 1 0 */ +/* Name RSVD RSVD LVDS WIDE ALTPERD PERIOD2 PERIOD1 PERIOD0 */ +/* Default 0 0 0 0 0 0 0 0 */ +/* */ +/* Bit Name Definition */ +/* --------- ------------- --------------------------- */ +/* 07-06 0 RSVD Reversed. Always read 0 */ +/* 05 0 LVDS Reversed. Always read 0 */ +/* 04 0 WIDE/WSCSI Enable wide (16-bits) SCSI */ +/* transfer. */ +/* 03 0 ALTPERD/ALTPD Alternate (Sync./Period) mode. */ +/* */ +/* @@ When this bit is set, */ +/* the synchronous period bits 2:0 */ +/* in the Synchronous Mode register */ +/* are used to transfer data */ +/* at the Fast-20 rate. */ +/* @@ When this bit is unset, */ +/* the synchronous period bits 2:0 */ +/* in the Synchronous Mode Register */ +/* are used to transfer data */ +/* at the Fast-10 rate (or Fast-40 w/ LVDS). */ +/* */ +/* 02-00 0 PERIOD[2:0]/ Synchronous SCSI Transfer Rate. */ +/* SXPD[02:00] These 3 bits specify */ +/* the Synchronous SCSI Transfer */ +/* Rate for Fast-20 and Fast-10. */ +/* These bits are also reset */ +/* by a SCSI Bus reset. */ +/* */ +/* For Fast-10 bit ALTPD = 0 and LVDS = 0 */ +/* and bit2,bit1,bit0 is defined as follows : */ +/* */ +/* 000 100ns, 10.0 MHz */ +/* 001 150ns, 6.6 MHz */ +/* 010 200ns, 5.0 MHz */ +/* 011 250ns, 4.0 MHz */ +/* 100 300ns, 3.3 MHz */ +/* 101 350ns, 2.8 MHz */ +/* 110 400ns, 2.5 MHz */ +/* 111 450ns, 2.2 MHz */ +/* */ +/* For Fast-20 bit ALTPD = 1 and LVDS = 0 */ +/* and bit2,bit1,bit0 is defined as follows : */ +/* */ +/* 000 50ns, 20.0 MHz */ +/* 001 75ns, 13.3 MHz */ +/* 010 100ns, 10.0 MHz */ +/* 011 125ns, 8.0 MHz */ +/* 100 150ns, 6.6 MHz */ +/* 101 175ns, 5.7 MHz */ +/* 110 200ns, 5.0 MHz */ +/* 111 250ns, 4.0 MHz KG: Maybe 225ns, 4.4 MHz */ +/* */ +/* For Fast-40 bit ALTPD = 0 and LVDS = 1 */ +/* and bit2,bit1,bit0 is defined as follows : */ +/* */ +/* 000 25ns, 40.0 MHz */ +/* 001 50ns, 20.0 MHz */ +/* 010 75ns, 13.3 MHz */ +/* 011 100ns, 10.0 MHz */ +/* 100 125ns, 8.0 MHz */ +/* 101 150ns, 6.6 MHz */ +/* 110 175ns, 5.7 MHz */ +/* 111 200ns, 5.0 MHz */ +/* */ +/************************************************************************/ + +#define TRM_S1040_SCSI_TARGETID 0x86 /* SCSI Target ID (R/W) */ +#define TRM_S1040_SCSI_IDMSG 0x87 /* SCSI Identify Message (R) */ +#define TRM_S1040_SCSI_HOSTID 0x87 /* SCSI Host ID (W) */ +#define TRM_S1040_SCSI_COUNTER 0x88 /* SCSI Transfer Counter 24bits(R/W) */ + +#define TRM_S1040_SCSI_INTEN 0x8C /* SCSI Interrupt Enable (R/W) */ +#define EN_SCAM 0x80 /* Enable SCAM selection interrupt */ +#define EN_SELECT 0x40 /* Enable selection interrupt */ +#define EN_SELTIMEOUT 0x20 /* Enable selection timeout interrupt */ +#define EN_DISCONNECT 0x10 /* Enable bus disconnected interrupt */ +#define EN_RESELECTED 0x08 /* Enable reselected interrupt */ +#define EN_SCSIRESET 0x04 /* Enable SCSI reset detected interrupt */ +#define EN_BUSSERVICE 0x02 /* Enable bus service interrupt */ +#define EN_CMDDONE 0x01 /* Enable SCSI command done interrupt */ + +#define TRM_S1040_SCSI_CONFIG0 0x8D /* SCSI Configuration 0 (R/W) */ +#define PHASELATCH 0x40 /* Enable phase latch */ +#define INITIATOR 0x20 /* Enable initiator mode */ +#define PARITYCHECK 0x10 /* Enable parity check */ +#define BLOCKRST 0x01 /* Disable SCSI reset1 */ + +#define TRM_S1040_SCSI_CONFIG1 0x8E /* SCSI Configuration 1 (R/W) */ +#define ACTIVE_NEGPLUS 0x10 /* Enhance active negation */ +#define FILTER_DISABLE 0x08 /* Disable SCSI data filter */ +#define FAST_FILTER 0x04 /* ? */ +#define ACTIVE_NEG 0x02 /* Enable active negation */ + +#define TRM_S1040_SCSI_CONFIG2 0x8F /* SCSI Configuration 2 (R/W) */ +#define CFG2_WIDEFIFO 0x02 /* */ + +#define TRM_S1040_SCSI_COMMAND 0x90 /* SCSI Command (R/W) */ +#define SCMD_COMP 0x12 /* Command complete */ +#define SCMD_SEL_ATN 0x60 /* Selection with ATN */ +#define SCMD_SEL_ATN3 0x64 /* Selection with ATN3 */ +#define SCMD_SEL_ATNSTOP 0xB8 /* Selection with ATN and Stop */ +#define SCMD_FIFO_OUT 0xC0 /* SCSI FIFO transfer out */ +#define SCMD_DMA_OUT 0xC1 /* SCSI DMA transfer out */ +#define SCMD_FIFO_IN 0xC2 /* SCSI FIFO transfer in */ +#define SCMD_DMA_IN 0xC3 /* SCSI DMA transfer in */ +#define SCMD_MSGACCEPT 0xD8 /* Message accept */ + +/************************************************************************/ +/* */ +/* Code Command Description */ +/* ---- ---------------------------------------- */ +/* 02 Enable reselection with FIFO */ +/* 40 Select without ATN with FIFO */ +/* 60 Select with ATN with FIFO */ +/* 64 Select with ATN3 with FIFO */ +/* A0 Select with ATN and stop with FIFO */ +/* C0 Transfer information out with FIFO */ +/* C1 Transfer information out with DMA */ +/* C2 Transfer information in with FIFO */ +/* C3 Transfer information in with DMA */ +/* 12 Initiator command complete with FIFO */ +/* 50 Initiator transfer information out sequence without ATN */ +/* with FIFO */ +/* 70 Initiator transfer information out sequence with ATN */ +/* with FIFO */ +/* 74 Initiator transfer information out sequence with ATN3 */ +/* with FIFO */ +/* 52 Initiator transfer information in sequence without ATN */ +/* with FIFO */ +/* 72 Initiator transfer information in sequence with ATN */ +/* with FIFO */ +/* 76 Initiator transfer information in sequence with ATN3 */ +/* with FIFO */ +/* 90 Initiator transfer information out command complete */ +/* with FIFO */ +/* 92 Initiator transfer information in command complete */ +/* with FIFO */ +/* D2 Enable selection */ +/* 08 Reselection */ +/* 48 Disconnect command with FIFO */ +/* 88 Terminate command with FIFO */ +/* C8 Target command complete with FIFO */ +/* 18 SCAM Arbitration/ Selection */ +/* 5A Enable reselection */ +/* 98 Select without ATN with FIFO */ +/* B8 Select with ATN with FIFO */ +/* D8 Message Accepted */ +/* 58 NOP */ +/* */ +/************************************************************************/ + +#define TRM_S1040_SCSI_TIMEOUT 0x91 /* SCSI Time Out Value (R/W) */ +#define TRM_S1040_SCSI_FIFO 0x98 /* SCSI FIFO (R/W) */ + +#define TRM_S1040_SCSI_TCR0 0x9C /* SCSI Target Control 0 (R/W) */ +#define TCR0_WIDE_NEGO_DONE 0x8000 /* Wide nego done */ +#define TCR0_SYNC_NEGO_DONE 0x4000 /* Synchronous nego done */ +#define TCR0_ENABLE_LVDS 0x2000 /* Enable LVDS synchronous */ +#define TCR0_ENABLE_WIDE 0x1000 /* Enable WIDE synchronous */ +#define TCR0_ENABLE_ALT 0x0800 /* Enable alternate synchronous */ +#define TCR0_PERIOD_MASK 0x0700 /* Transfer rate */ + +#define TCR0_DO_WIDE_NEGO 0x0080 /* Do wide NEGO */ +#define TCR0_DO_SYNC_NEGO 0x0040 /* Do sync NEGO */ +#define TCR0_DISCONNECT_EN 0x0020 /* Disconnection enable */ +#define TCR0_OFFSET_MASK 0x001F /* Offset number */ + +#define TRM_S1040_SCSI_TCR1 0x9E /* SCSI Target Control 1 (R/W) */ +#define MAXTAG_MASK 0x7F00 /* Maximum tags (127) */ +#define NON_TAG_BUSY 0x0080 /* Non tag command active */ +#define ACTTAG_MASK 0x007F /* Active tags */ + +/************************************************************************/ +/* */ +/* The DMA register offset for TRM_S1040 */ +/* */ +/************************************************************************/ +#define TRM_S1040_DMA_COMMAND 0xA0 /* DMA Command (R/W) */ +#define DMACMD_SG 0x02 /* Enable HW S/G support */ +#define DMACMD_DIR 0x01 /* 1 = read from SCSI write to Host */ +#define XFERDATAIN_SG 0x0103 /* Transfer data in w/ SG */ +#define XFERDATAOUT_SG 0x0102 /* Transfer data out w/ SG */ +#define XFERDATAIN 0x0101 /* Transfer data in w/o SG */ +#define XFERDATAOUT 0x0100 /* Transfer data out w/o SG */ + +#define TRM_S1040_DMA_FIFOCNT 0xA1 /* DMA FIFO Counter (R) */ + +#define TRM_S1040_DMA_CONTROL 0xA1 /* DMA Control (W) */ +#define DMARESETMODULE 0x10 /* Reset PCI/DMA module */ +#define STOPDMAXFER 0x08 /* Stop DMA transfer */ +#define ABORTXFER 0x04 /* Abort DMA transfer */ +#define CLRXFIFO 0x02 /* Clear DMA transfer FIFO */ +#define STARTDMAXFER 0x01 /* Start DMA transfer */ + +#define TRM_S1040_DMA_FIFOSTAT 0xA2 /* DMA FIFO Status (R) */ + +#define TRM_S1040_DMA_STATUS 0xA3 /* DMA Interrupt Status (R/W) */ +#define XFERPENDING 0x80 /* Transfer pending */ +#define SCSIBUSY 0x40 /* SCSI busy */ +#define GLOBALINT 0x20 /* DMA_INTEN bit 0-4 set */ +#define FORCEDMACOMP 0x10 /* Force DMA transfer complete */ +#define DMAXFERERROR 0x08 /* DMA transfer error */ +#define DMAXFERABORT 0x04 /* DMA transfer abort */ +#define DMAXFERCOMP 0x02 /* Bus Master XFER Complete status */ +#define SCSICOMP 0x01 /* SCSI complete interrupt */ + +#define TRM_S1040_DMA_INTEN 0xA4 /* DMA Interrupt Enable (R/W) */ +#define EN_FORCEDMACOMP 0x10 /* Force DMA transfer complete */ +#define EN_DMAXFERERROR 0x08 /* DMA transfer error */ +#define EN_DMAXFERABORT 0x04 /* DMA transfer abort */ +#define EN_DMAXFERCOMP 0x02 /* Bus Master XFER Complete status */ +#define EN_SCSIINTR 0x01 /* Enable SCSI complete interrupt */ + +#define TRM_S1040_DMA_CONFIG 0xA6 /* DMA Configuration (R/W) */ +#define DMA_ENHANCE 0x8000 /* Enable DMA enhance feature (SG?) */ +#define DMA_PCI_DUAL_ADDR 0x4000 /* */ +#define DMA_CFG_RES 0x2000 /* Always 1 */ +#define DMA_AUTO_CLR_FIFO 0x1000 /* DISable DMA auto clear FIFO */ +#define DMA_MEM_MULTI_READ 0x0800 /* */ +#define DMA_MEM_WRITE_INVAL 0x0400 /* Memory write and invalidate */ +#define DMA_FIFO_CTRL 0x0300 /* Control FIFO operation with DMA */ +#define DMA_FIFO_HALF_HALF 0x0200 /* Keep half filled on both read/write */ + +#define TRM_S1040_DMA_XCNT 0xA8 /* DMA Transfer Counter (R/W), 24bits */ +#define TRM_S1040_DMA_CXCNT 0xAC /* DMA Current Transfer Counter (R) */ +#define TRM_S1040_DMA_XLOWADDR 0xB0 /* DMA Transfer Physical Low Address */ +#define TRM_S1040_DMA_XHIGHADDR 0xB4 /* DMA Transfer Physical High Address */ + +/************************************************************************/ +/* */ +/* The general register offset for TRM_S1040 */ +/* */ +/************************************************************************/ +#define TRM_S1040_GEN_CONTROL 0xD4 /* Global Control */ +#define CTRL_LED 0x80 /* Control onboard LED */ +#define EN_EEPROM 0x10 /* Enable EEPROM programming */ +#define DIS_TERM 0x08 /* Disable onboard termination */ +#define AUTOTERM 0x04 /* Enable Auto SCSI terminator */ +#define LOW8TERM 0x02 /* Enable Lower 8 bit SCSI terminator */ +#define UP8TERM 0x01 /* Enable Upper 8 bit SCSI terminator */ + +#define TRM_S1040_GEN_STATUS 0xD5 /* Global Status */ +#define GTIMEOUT 0x80 /* Global timer reach 0 */ +#define EXT68HIGH 0x40 /* Higher 8 bit connected externally */ +#define INT68HIGH 0x20 /* Higher 8 bit connected internally */ +#define CON5068 0x10 /* External 50/68 pin connected (low) */ +#define CON68 0x08 /* Internal 68 pin connected (low) */ +#define CON50 0x04 /* Internal 50 pin connected (low!) */ +#define WIDESCSI 0x02 /* Wide SCSI card */ +#define STATUS_LOAD_DEFAULT 0x01 /* */ + +#define TRM_S1040_GEN_NVRAM 0xD6 /* Serial NON-VOLATILE RAM port */ +#define NVR_BITOUT 0x08 /* Serial data out */ +#define NVR_BITIN 0x04 /* Serial data in */ +#define NVR_CLOCK 0x02 /* Serial clock */ +#define NVR_SELECT 0x01 /* Serial select */ + +#define TRM_S1040_GEN_EDATA 0xD7 /* Parallel EEPROM data port */ +#define TRM_S1040_GEN_EADDRESS 0xD8 /* Parallel EEPROM address */ +#define TRM_S1040_GEN_TIMER 0xDB /* Global timer */ + +/************************************************************************/ +/* */ +/* NvmTarCfg0: Target configuration byte 0 :..pDCB->DevMode */ +/* */ +/************************************************************************/ +#define NTC_DO_WIDE_NEGO 0x20 /* Wide negotiate */ +#define NTC_DO_TAG_QUEUEING 0x10 /* Enable SCSI tag queuing */ +#define NTC_DO_SEND_START 0x08 /* Send start command SPINUP */ +#define NTC_DO_DISCONNECT 0x04 /* Enable SCSI disconnect */ +#define NTC_DO_SYNC_NEGO 0x02 /* Sync negotiation */ +#define NTC_DO_PARITY_CHK 0x01 /* (it should define at NAC) */ + /* Parity check enable */ + +/************************************************************************/ +/* */ +/* Nvram Initiater bits definition */ +/* */ +/************************************************************************/ +#if 0 +#define MORE2_DRV BIT0 +#define GREATER_1G BIT1 +#define RST_SCSI_BUS BIT2 +#define ACTIVE_NEGATION BIT3 +#define NO_SEEK BIT4 +#define LUN_CHECK BIT5 +#endif + +/************************************************************************/ +/* */ +/* Nvram Adapter Cfg bits definition */ +/* */ +/************************************************************************/ +#define NAC_SCANLUN 0x20 /* Include LUN as BIOS device */ +#define NAC_POWERON_SCSI_RESET 0x04 /* Power on reset enable */ +#define NAC_GREATER_1G 0x02 /* > 1G support enable */ +#define NAC_GT2DRIVES 0x01 /* Support more than 2 drives */ +/* #define NAC_DO_PARITY_CHK 0x08 */ /* Parity check enable */ + +#endif diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig new file mode 100644 index 000000000..368eb94c2 --- /dev/null +++ b/drivers/scsi/device_handler/Kconfig @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# SCSI Device Handler configuration +# + +menuconfig SCSI_DH + bool "SCSI Device Handlers" + depends on SCSI + default n + help + SCSI Device Handlers provide device specific support for + devices utilized in multipath configurations. Say Y here to + select support for specific hardware. + +config SCSI_DH_RDAC + tristate "LSI RDAC Device Handler" + depends on SCSI_DH && SCSI + help + If you have a LSI RDAC select y. Otherwise, say N. + +config SCSI_DH_HP_SW + tristate "HP/COMPAQ MSA Device Handler" + depends on SCSI_DH && SCSI + help + If you have a HP/COMPAQ MSA device that requires START_STOP to + be sent to start it and cannot upgrade the firmware then select y. + Otherwise, say N. + +config SCSI_DH_EMC + tristate "EMC CLARiiON Device Handler" + depends on SCSI_DH && SCSI + help + If you have a EMC CLARiiON select y. Otherwise, say N. + +config SCSI_DH_ALUA + tristate "SPC-3 ALUA Device Handler" + depends on SCSI_DH && SCSI + help + SCSI Device handler for generic SPC-3 Asymmetric Logical Unit + Access (ALUA). + diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile new file mode 100644 index 000000000..0a603aefd --- /dev/null +++ b/drivers/scsi/device_handler/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# SCSI Device Handler +# +obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o +obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o +obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o +obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c new file mode 100644 index 000000000..a226dc1b6 --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -0,0 +1,1315 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Generic SCSI-3 ALUA SCSI Device Handler + * + * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ALUA_DH_NAME "alua" +#define ALUA_DH_VER "2.0" + +#define TPGS_SUPPORT_NONE 0x00 +#define TPGS_SUPPORT_OPTIMIZED 0x01 +#define TPGS_SUPPORT_NONOPTIMIZED 0x02 +#define TPGS_SUPPORT_STANDBY 0x04 +#define TPGS_SUPPORT_UNAVAILABLE 0x08 +#define TPGS_SUPPORT_LBA_DEPENDENT 0x10 +#define TPGS_SUPPORT_OFFLINE 0x40 +#define TPGS_SUPPORT_TRANSITION 0x80 +#define TPGS_SUPPORT_ALL 0xdf + +#define RTPG_FMT_MASK 0x70 +#define RTPG_FMT_EXT_HDR 0x10 + +#define TPGS_MODE_UNINITIALIZED -1 +#define TPGS_MODE_NONE 0x0 +#define TPGS_MODE_IMPLICIT 0x1 +#define TPGS_MODE_EXPLICIT 0x2 + +#define ALUA_RTPG_SIZE 128 +#define ALUA_FAILOVER_TIMEOUT 60 +#define ALUA_FAILOVER_RETRIES 5 +#define ALUA_RTPG_DELAY_MSECS 5 +#define ALUA_RTPG_RETRY_DELAY 2 + +/* device handler flags */ +#define ALUA_OPTIMIZE_STPG 0x01 +#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 +/* State machine flags */ +#define ALUA_PG_RUN_RTPG 0x10 +#define ALUA_PG_RUN_STPG 0x20 +#define ALUA_PG_RUNNING 0x40 + +static uint optimize_stpg; +module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); + +static LIST_HEAD(port_group_list); +static DEFINE_SPINLOCK(port_group_lock); +static struct workqueue_struct *kaluad_wq; + +struct alua_port_group { + struct kref kref; + struct rcu_head rcu; + struct list_head node; + struct list_head dh_list; + unsigned char device_id_str[256]; + int device_id_len; + int group_id; + int tpgs; + int state; + int pref; + int valid_states; + unsigned flags; /* used for optimizing STPG */ + unsigned char transition_tmo; + unsigned long expiry; + unsigned long interval; + struct delayed_work rtpg_work; + spinlock_t lock; + struct list_head rtpg_list; + struct scsi_device *rtpg_sdev; +}; + +struct alua_dh_data { + struct list_head node; + struct alua_port_group __rcu *pg; + int group_id; + spinlock_t pg_lock; + struct scsi_device *sdev; + int init_error; + struct mutex init_mutex; + bool disabled; +}; + +struct alua_queue_data { + struct list_head entry; + activate_complete callback_fn; + void *callback_data; +}; + +#define ALUA_POLICY_SWITCH_CURRENT 0 +#define ALUA_POLICY_SWITCH_ALL 1 + +static void alua_rtpg_work(struct work_struct *work); +static bool alua_rtpg_queue(struct alua_port_group *pg, + struct scsi_device *sdev, + struct alua_queue_data *qdata, bool force); +static void alua_check(struct scsi_device *sdev, bool force); + +static void release_port_group(struct kref *kref) +{ + struct alua_port_group *pg; + + pg = container_of(kref, struct alua_port_group, kref); + if (pg->rtpg_sdev) + flush_delayed_work(&pg->rtpg_work); + spin_lock(&port_group_lock); + list_del(&pg->node); + spin_unlock(&port_group_lock); + kfree_rcu(pg, rcu); +} + +/* + * submit_rtpg - Issue a REPORT TARGET GROUP STATES command + * @sdev: sdev the command should be sent to + */ +static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, + int bufflen, struct scsi_sense_hdr *sshdr, int flags) +{ + u8 cdb[MAX_COMMAND_SIZE]; + blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | + REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; + + /* Prepare the command. */ + memset(cdb, 0x0, MAX_COMMAND_SIZE); + cdb[0] = MAINTENANCE_IN; + if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP)) + cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; + else + cdb[1] = MI_REPORT_TARGET_PGS; + put_unaligned_be32(bufflen, &cdb[6]); + + return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen, + ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, &exec_args); +} + +/* + * submit_stpg - Issue a SET TARGET PORT GROUP command + * + * Currently we're only setting the current target port group state + * to 'active/optimized' and let the array firmware figure out + * the states of the remaining groups. + */ +static int submit_stpg(struct scsi_device *sdev, int group_id, + struct scsi_sense_hdr *sshdr) +{ + u8 cdb[MAX_COMMAND_SIZE]; + unsigned char stpg_data[8]; + int stpg_len = 8; + blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | + REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; + + /* Prepare the data buffer */ + memset(stpg_data, 0, stpg_len); + stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL; + put_unaligned_be16(group_id, &stpg_data[6]); + + /* Prepare the command. */ + memset(cdb, 0x0, MAX_COMMAND_SIZE); + cdb[0] = MAINTENANCE_OUT; + cdb[1] = MO_SET_TARGET_PGS; + put_unaligned_be32(stpg_len, &cdb[6]); + + return scsi_execute_cmd(sdev, cdb, opf, stpg_data, + stpg_len, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, &exec_args); +} + +static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, + int group_id) +{ + struct alua_port_group *pg; + + if (!id_str || !id_size || !strlen(id_str)) + return NULL; + + list_for_each_entry(pg, &port_group_list, node) { + if (pg->group_id != group_id) + continue; + if (!pg->device_id_len || pg->device_id_len != id_size) + continue; + if (strncmp(pg->device_id_str, id_str, id_size)) + continue; + if (!kref_get_unless_zero(&pg->kref)) + continue; + return pg; + } + + return NULL; +} + +/* + * alua_alloc_pg - Allocate a new port_group structure + * @sdev: scsi device + * @group_id: port group id + * @tpgs: target port group settings + * + * Allocate a new port_group structure for a given + * device. + */ +static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, + int group_id, int tpgs) +{ + struct alua_port_group *pg, *tmp_pg; + + pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); + if (!pg) + return ERR_PTR(-ENOMEM); + + pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, + sizeof(pg->device_id_str)); + if (pg->device_id_len <= 0) { + /* + * TPGS supported but no device identification found. + * Generate private device identification. + */ + sdev_printk(KERN_INFO, sdev, + "%s: No device descriptors found\n", + ALUA_DH_NAME); + pg->device_id_str[0] = '\0'; + pg->device_id_len = 0; + } + pg->group_id = group_id; + pg->tpgs = tpgs; + pg->state = SCSI_ACCESS_STATE_OPTIMAL; + pg->valid_states = TPGS_SUPPORT_ALL; + if (optimize_stpg) + pg->flags |= ALUA_OPTIMIZE_STPG; + kref_init(&pg->kref); + INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); + INIT_LIST_HEAD(&pg->rtpg_list); + INIT_LIST_HEAD(&pg->node); + INIT_LIST_HEAD(&pg->dh_list); + spin_lock_init(&pg->lock); + + spin_lock(&port_group_lock); + tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, + group_id); + if (tmp_pg) { + spin_unlock(&port_group_lock); + kfree(pg); + return tmp_pg; + } + + list_add(&pg->node, &port_group_list); + spin_unlock(&port_group_lock); + + return pg; +} + +/* + * alua_check_tpgs - Evaluate TPGS setting + * @sdev: device to be checked + * + * Examine the TPGS setting of the sdev to find out if ALUA + * is supported. + */ +static int alua_check_tpgs(struct scsi_device *sdev) +{ + int tpgs = TPGS_MODE_NONE; + + /* + * ALUA support for non-disk devices is fraught with + * difficulties, so disable it for now. + */ + if (sdev->type != TYPE_DISK) { + sdev_printk(KERN_INFO, sdev, + "%s: disable for non-disk devices\n", + ALUA_DH_NAME); + return tpgs; + } + + tpgs = scsi_device_tpgs(sdev); + switch (tpgs) { + case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: + sdev_printk(KERN_INFO, sdev, + "%s: supports implicit and explicit TPGS\n", + ALUA_DH_NAME); + break; + case TPGS_MODE_EXPLICIT: + sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", + ALUA_DH_NAME); + break; + case TPGS_MODE_IMPLICIT: + sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", + ALUA_DH_NAME); + break; + case TPGS_MODE_NONE: + sdev_printk(KERN_INFO, sdev, "%s: not supported\n", + ALUA_DH_NAME); + break; + default: + sdev_printk(KERN_INFO, sdev, + "%s: unsupported TPGS setting %d\n", + ALUA_DH_NAME, tpgs); + tpgs = TPGS_MODE_NONE; + break; + } + + return tpgs; +} + +/* + * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 + * @sdev: device to be checked + * + * Extract the relative target port and the target port group + * descriptor from the list of identificators. + */ +static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, + int tpgs) +{ + int rel_port = -1, group_id; + struct alua_port_group *pg, *old_pg = NULL; + bool pg_updated = false; + unsigned long flags; + + group_id = scsi_vpd_tpg_id(sdev, &rel_port); + if (group_id < 0) { + /* + * Internal error; TPGS supported but required + * VPD identification descriptors not present. + * Disable ALUA support + */ + sdev_printk(KERN_INFO, sdev, + "%s: No target port descriptors found\n", + ALUA_DH_NAME); + return SCSI_DH_DEV_UNSUPP; + } + + pg = alua_alloc_pg(sdev, group_id, tpgs); + if (IS_ERR(pg)) { + if (PTR_ERR(pg) == -ENOMEM) + return SCSI_DH_NOMEM; + return SCSI_DH_DEV_UNSUPP; + } + if (pg->device_id_len) + sdev_printk(KERN_INFO, sdev, + "%s: device %s port group %x rel port %x\n", + ALUA_DH_NAME, pg->device_id_str, + group_id, rel_port); + else + sdev_printk(KERN_INFO, sdev, + "%s: port group %x rel port %x\n", + ALUA_DH_NAME, group_id, rel_port); + + kref_get(&pg->kref); + + /* Check for existing port group references */ + spin_lock(&h->pg_lock); + old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); + if (old_pg != pg) { + /* port group has changed. Update to new port group */ + if (h->pg) { + spin_lock_irqsave(&old_pg->lock, flags); + list_del_rcu(&h->node); + spin_unlock_irqrestore(&old_pg->lock, flags); + } + rcu_assign_pointer(h->pg, pg); + pg_updated = true; + } + + spin_lock_irqsave(&pg->lock, flags); + if (pg_updated) + list_add_rcu(&h->node, &pg->dh_list); + spin_unlock_irqrestore(&pg->lock, flags); + + spin_unlock(&h->pg_lock); + + alua_rtpg_queue(pg, sdev, NULL, true); + kref_put(&pg->kref, release_port_group); + + if (old_pg) + kref_put(&old_pg->kref, release_port_group); + + return SCSI_DH_OK; +} + +static char print_alua_state(unsigned char state) +{ + switch (state) { + case SCSI_ACCESS_STATE_OPTIMAL: + return 'A'; + case SCSI_ACCESS_STATE_ACTIVE: + return 'N'; + case SCSI_ACCESS_STATE_STANDBY: + return 'S'; + case SCSI_ACCESS_STATE_UNAVAILABLE: + return 'U'; + case SCSI_ACCESS_STATE_LBA: + return 'L'; + case SCSI_ACCESS_STATE_OFFLINE: + return 'O'; + case SCSI_ACCESS_STATE_TRANSITIONING: + return 'T'; + default: + return 'X'; + } +} + +static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { + /* + * LUN Not Accessible - ALUA state transition + */ + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + rcu_read_unlock(); + alua_check(sdev, false); + return NEEDS_RETRY; + } + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { + /* + * Power On, Reset, or Bus Device Reset. + * Might have obscured a state transition, + * so schedule a recheck. + */ + alua_check(sdev, true); + return ADD_TO_MLQUEUE; + } + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) + /* + * Device internal reset + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) + /* + * Mode Parameters Changed + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { + /* + * ALUA state changed + */ + alua_check(sdev, true); + return ADD_TO_MLQUEUE; + } + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { + /* + * Implicit ALUA state transition failed + */ + alua_check(sdev, true); + return ADD_TO_MLQUEUE; + } + if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) + /* + * Inquiry data has changed + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) + /* + * REPORTED_LUNS_DATA_HAS_CHANGED is reported + * when switching controllers on targets like + * Intel Multi-Flex. We can just retry. + */ + return ADD_TO_MLQUEUE; + break; + } + + return SCSI_RETURN_NOT_HANDLED; +} + +/* + * alua_tur - Send a TEST UNIT READY + * @sdev: device to which the TEST UNIT READY command should be send + * + * Send a TEST UNIT READY to @sdev to figure out the device state + * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, + * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise. + */ +static int alua_tur(struct scsi_device *sdev) +{ + struct scsi_sense_hdr sense_hdr; + int retval; + + retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, &sense_hdr); + if (sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) + return SCSI_DH_RETRY; + else if (retval) + return SCSI_DH_IO; + else + return SCSI_DH_OK; +} + +/* + * alua_rtpg - Evaluate REPORT TARGET GROUP STATES + * @sdev: the device to be evaluated. + * + * Evaluate the Target Port Group State. + * Returns SCSI_DH_DEV_OFFLINED if the path is + * found to be unusable. + */ +static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) +{ + struct scsi_sense_hdr sense_hdr; + struct alua_port_group *tmp_pg; + int len, k, off, bufflen = ALUA_RTPG_SIZE; + int group_id_old, state_old, pref_old, valid_states_old; + unsigned char *desc, *buff; + unsigned err; + int retval; + unsigned int tpg_desc_tbl_off; + unsigned char orig_transition_tmo; + unsigned long flags; + bool transitioning_sense = false; + + group_id_old = pg->group_id; + state_old = pg->state; + pref_old = pg->pref; + valid_states_old = pg->valid_states; + + if (!pg->expiry) { + unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; + + if (pg->transition_tmo) + transition_tmo = pg->transition_tmo * HZ; + + pg->expiry = round_jiffies_up(jiffies + transition_tmo); + } + + buff = kzalloc(bufflen, GFP_KERNEL); + if (!buff) + return SCSI_DH_DEV_TEMP_BUSY; + + retry: + err = 0; + retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); + + if (retval) { + /* + * Some (broken) implementations have a habit of returning + * an error during things like firmware update etc. + * But if the target only supports active/optimized there's + * not much we can do; it's not that we can switch paths + * or anything. + * So ignore any errors to avoid spurious failures during + * path failover. + */ + if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) { + sdev_printk(KERN_INFO, sdev, + "%s: ignoring rtpg result %d\n", + ALUA_DH_NAME, retval); + kfree(buff); + return SCSI_DH_OK; + } + if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { + sdev_printk(KERN_INFO, sdev, + "%s: rtpg failed, result %d\n", + ALUA_DH_NAME, retval); + kfree(buff); + if (retval < 0) + return SCSI_DH_DEV_TEMP_BUSY; + if (host_byte(retval) == DID_NO_CONNECT) + return SCSI_DH_RES_TEMP_UNAVAIL; + return SCSI_DH_IO; + } + + /* + * submit_rtpg() has failed on existing arrays + * when requesting extended header info, and + * the array doesn't support extended headers, + * even though it shouldn't according to T10. + * The retry without rtpg_ext_hdr_req set + * handles this. + * Note: some arrays return a sense key of ILLEGAL_REQUEST + * with ASC 00h if they don't support the extended header. + */ + if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && + sense_hdr.sense_key == ILLEGAL_REQUEST) { + pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; + goto retry; + } + /* + * If the array returns with 'ALUA state transition' + * sense code here it cannot return RTPG data during + * transition. So set the state to 'transitioning' directly. + */ + if (sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { + transitioning_sense = true; + goto skip_rtpg; + } + /* + * Retry on any other UNIT ATTENTION occurred. + */ + if (sense_hdr.sense_key == UNIT_ATTENTION) + err = SCSI_DH_RETRY; + if (err == SCSI_DH_RETRY && + pg->expiry != 0 && time_before(jiffies, pg->expiry)) { + sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + kfree(buff); + return err; + } + sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + kfree(buff); + pg->expiry = 0; + return SCSI_DH_IO; + } + + len = get_unaligned_be32(&buff[0]) + 4; + + if (len > bufflen) { + /* Resubmit with the correct length */ + kfree(buff); + bufflen = len; + buff = kmalloc(bufflen, GFP_KERNEL); + if (!buff) { + sdev_printk(KERN_WARNING, sdev, + "%s: kmalloc buffer failed\n",__func__); + /* Temporary failure, bypass */ + pg->expiry = 0; + return SCSI_DH_DEV_TEMP_BUSY; + } + goto retry; + } + + orig_transition_tmo = pg->transition_tmo; + if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0) + pg->transition_tmo = buff[5]; + else + pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; + + if (orig_transition_tmo != pg->transition_tmo) { + sdev_printk(KERN_INFO, sdev, + "%s: transition timeout set to %d seconds\n", + ALUA_DH_NAME, pg->transition_tmo); + pg->expiry = jiffies + pg->transition_tmo * HZ; + } + + if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) + tpg_desc_tbl_off = 8; + else + tpg_desc_tbl_off = 4; + + for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off; + k < len; + k += off, desc += off) { + u16 group_id = get_unaligned_be16(&desc[2]); + + spin_lock_irqsave(&port_group_lock, flags); + tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, + group_id); + spin_unlock_irqrestore(&port_group_lock, flags); + if (tmp_pg) { + if (spin_trylock_irqsave(&tmp_pg->lock, flags)) { + if ((tmp_pg == pg) || + !(tmp_pg->flags & ALUA_PG_RUNNING)) { + struct alua_dh_data *h; + + tmp_pg->state = desc[0] & 0x0f; + tmp_pg->pref = desc[0] >> 7; + rcu_read_lock(); + list_for_each_entry_rcu(h, + &tmp_pg->dh_list, node) { + if (!h->sdev) + continue; + h->sdev->access_state = desc[0]; + } + rcu_read_unlock(); + } + if (tmp_pg == pg) + tmp_pg->valid_states = desc[1]; + spin_unlock_irqrestore(&tmp_pg->lock, flags); + } + kref_put(&tmp_pg->kref, release_port_group); + } + off = 8 + (desc[7] * 4); + } + + skip_rtpg: + spin_lock_irqsave(&pg->lock, flags); + if (transitioning_sense) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + + if (group_id_old != pg->group_id || state_old != pg->state || + pref_old != pg->pref || valid_states_old != pg->valid_states) + sdev_printk(KERN_INFO, sdev, + "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", + ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), + pg->pref ? "preferred" : "non-preferred", + pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', + pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', + pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', + pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', + pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s', + pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', + pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); + + switch (pg->state) { + case SCSI_ACCESS_STATE_TRANSITIONING: + if (time_before(jiffies, pg->expiry)) { + /* State transition, retry */ + pg->interval = ALUA_RTPG_RETRY_DELAY; + err = SCSI_DH_RETRY; + } else { + struct alua_dh_data *h; + + /* Transitioning time exceeded, set port to standby */ + err = SCSI_DH_IO; + pg->state = SCSI_ACCESS_STATE_STANDBY; + pg->expiry = 0; + rcu_read_lock(); + list_for_each_entry_rcu(h, &pg->dh_list, node) { + if (!h->sdev) + continue; + h->sdev->access_state = + (pg->state & SCSI_ACCESS_STATE_MASK); + if (pg->pref) + h->sdev->access_state |= + SCSI_ACCESS_STATE_PREFERRED; + } + rcu_read_unlock(); + } + break; + case SCSI_ACCESS_STATE_OFFLINE: + /* Path unusable */ + err = SCSI_DH_DEV_OFFLINED; + pg->expiry = 0; + break; + default: + /* Useable path if active */ + err = SCSI_DH_OK; + pg->expiry = 0; + break; + } + spin_unlock_irqrestore(&pg->lock, flags); + kfree(buff); + return err; +} + +/* + * alua_stpg - Issue a SET TARGET PORT GROUP command + * + * Issue a SET TARGET PORT GROUP command and evaluate the + * response. Returns SCSI_DH_RETRY per default to trigger + * a re-evaluation of the target group state or SCSI_DH_OK + * if no further action needs to be taken. + */ +static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) +{ + int retval; + struct scsi_sense_hdr sense_hdr; + + if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { + /* Only implicit ALUA supported, retry */ + return SCSI_DH_RETRY; + } + switch (pg->state) { + case SCSI_ACCESS_STATE_OPTIMAL: + return SCSI_DH_OK; + case SCSI_ACCESS_STATE_ACTIVE: + if ((pg->flags & ALUA_OPTIMIZE_STPG) && + !pg->pref && + (pg->tpgs & TPGS_MODE_IMPLICIT)) + return SCSI_DH_OK; + break; + case SCSI_ACCESS_STATE_STANDBY: + case SCSI_ACCESS_STATE_UNAVAILABLE: + break; + case SCSI_ACCESS_STATE_OFFLINE: + return SCSI_DH_IO; + case SCSI_ACCESS_STATE_TRANSITIONING: + break; + default: + sdev_printk(KERN_INFO, sdev, + "%s: stpg failed, unhandled TPGS state %d", + ALUA_DH_NAME, pg->state); + return SCSI_DH_NOSYS; + } + retval = submit_stpg(sdev, pg->group_id, &sense_hdr); + + if (retval) { + if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { + sdev_printk(KERN_INFO, sdev, + "%s: stpg failed, result %d", + ALUA_DH_NAME, retval); + if (retval < 0) + return SCSI_DH_DEV_TEMP_BUSY; + } else { + sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + } + } + /* Retry RTPG */ + return SCSI_DH_RETRY; +} + +/* + * The caller must call scsi_device_put() on the returned pointer if it is not + * NULL. + */ +static struct scsi_device * __must_check +alua_rtpg_select_sdev(struct alua_port_group *pg) +{ + struct alua_dh_data *h; + struct scsi_device *sdev = NULL, *prev_sdev; + + lockdep_assert_held(&pg->lock); + if (WARN_ON(!pg->rtpg_sdev)) + return NULL; + + /* + * RCU protection isn't necessary for dh_list here + * as we hold pg->lock, but for access to h->pg. + */ + rcu_read_lock(); + list_for_each_entry_rcu(h, &pg->dh_list, node) { + if (!h->sdev) + continue; + if (h->sdev == pg->rtpg_sdev) { + h->disabled = true; + continue; + } + if (rcu_dereference(h->pg) == pg && + !h->disabled && + !scsi_device_get(h->sdev)) { + sdev = h->sdev; + break; + } + } + rcu_read_unlock(); + + if (!sdev) { + pr_warn("%s: no device found for rtpg\n", + (pg->device_id_len ? + (char *)pg->device_id_str : "(nameless PG)")); + return NULL; + } + + sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n"); + + prev_sdev = pg->rtpg_sdev; + pg->rtpg_sdev = sdev; + + return prev_sdev; +} + +static void alua_rtpg_work(struct work_struct *work) +{ + struct alua_port_group *pg = + container_of(work, struct alua_port_group, rtpg_work.work); + struct scsi_device *sdev, *prev_sdev = NULL; + LIST_HEAD(qdata_list); + int err = SCSI_DH_OK; + struct alua_queue_data *qdata, *tmp; + struct alua_dh_data *h; + unsigned long flags; + + spin_lock_irqsave(&pg->lock, flags); + sdev = pg->rtpg_sdev; + if (!sdev) { + WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); + WARN_ON(pg->flags & ALUA_PG_RUN_STPG); + spin_unlock_irqrestore(&pg->lock, flags); + kref_put(&pg->kref, release_port_group); + return; + } + pg->flags |= ALUA_PG_RUNNING; + if (pg->flags & ALUA_PG_RUN_RTPG) { + int state = pg->state; + + pg->flags &= ~ALUA_PG_RUN_RTPG; + spin_unlock_irqrestore(&pg->lock, flags); + if (state == SCSI_ACCESS_STATE_TRANSITIONING) { + if (alua_tur(sdev) == SCSI_DH_RETRY) { + spin_lock_irqsave(&pg->lock, flags); + pg->flags &= ~ALUA_PG_RUNNING; + pg->flags |= ALUA_PG_RUN_RTPG; + if (!pg->interval) + pg->interval = ALUA_RTPG_RETRY_DELAY; + spin_unlock_irqrestore(&pg->lock, flags); + queue_delayed_work(kaluad_wq, &pg->rtpg_work, + pg->interval * HZ); + return; + } + /* Send RTPG on failure or if TUR indicates SUCCESS */ + } + err = alua_rtpg(sdev, pg); + spin_lock_irqsave(&pg->lock, flags); + + /* If RTPG failed on the current device, try using another */ + if (err == SCSI_DH_RES_TEMP_UNAVAIL && + (prev_sdev = alua_rtpg_select_sdev(pg))) + err = SCSI_DH_IMM_RETRY; + + if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY || + pg->flags & ALUA_PG_RUN_RTPG) { + pg->flags &= ~ALUA_PG_RUNNING; + if (err == SCSI_DH_IMM_RETRY) + pg->interval = 0; + else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) + pg->interval = ALUA_RTPG_RETRY_DELAY; + pg->flags |= ALUA_PG_RUN_RTPG; + spin_unlock_irqrestore(&pg->lock, flags); + goto queue_rtpg; + } + if (err != SCSI_DH_OK) + pg->flags &= ~ALUA_PG_RUN_STPG; + } + if (pg->flags & ALUA_PG_RUN_STPG) { + pg->flags &= ~ALUA_PG_RUN_STPG; + spin_unlock_irqrestore(&pg->lock, flags); + err = alua_stpg(sdev, pg); + spin_lock_irqsave(&pg->lock, flags); + if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { + pg->flags |= ALUA_PG_RUN_RTPG; + pg->interval = 0; + pg->flags &= ~ALUA_PG_RUNNING; + spin_unlock_irqrestore(&pg->lock, flags); + goto queue_rtpg; + } + } + + list_splice_init(&pg->rtpg_list, &qdata_list); + /* + * We went through an RTPG, for good or bad. + * Re-enable all devices for the next attempt. + */ + list_for_each_entry(h, &pg->dh_list, node) + h->disabled = false; + pg->rtpg_sdev = NULL; + spin_unlock_irqrestore(&pg->lock, flags); + + if (prev_sdev) + scsi_device_put(prev_sdev); + + list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { + list_del(&qdata->entry); + if (qdata->callback_fn) + qdata->callback_fn(qdata->callback_data, err); + kfree(qdata); + } + spin_lock_irqsave(&pg->lock, flags); + pg->flags &= ~ALUA_PG_RUNNING; + spin_unlock_irqrestore(&pg->lock, flags); + scsi_device_put(sdev); + kref_put(&pg->kref, release_port_group); + return; + +queue_rtpg: + if (prev_sdev) + scsi_device_put(prev_sdev); + queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); +} + +/** + * alua_rtpg_queue() - cause RTPG to be submitted asynchronously + * @pg: ALUA port group associated with @sdev. + * @sdev: SCSI device for which to submit an RTPG. + * @qdata: Information about the callback to invoke after the RTPG. + * @force: Whether or not to submit an RTPG if a work item that will submit an + * RTPG already has been scheduled. + * + * Returns true if and only if alua_rtpg_work() will be called asynchronously. + * That function is responsible for calling @qdata->fn(). + * + * Context: may be called from atomic context (alua_check()) only if the caller + * holds an sdev reference. + */ +static bool alua_rtpg_queue(struct alua_port_group *pg, + struct scsi_device *sdev, + struct alua_queue_data *qdata, bool force) +{ + int start_queue = 0; + unsigned long flags; + + if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) + return false; + + spin_lock_irqsave(&pg->lock, flags); + if (qdata) { + list_add_tail(&qdata->entry, &pg->rtpg_list); + pg->flags |= ALUA_PG_RUN_STPG; + force = true; + } + if (pg->rtpg_sdev == NULL) { + struct alua_dh_data *h = sdev->handler_data; + + rcu_read_lock(); + if (h && rcu_dereference(h->pg) == pg) { + pg->interval = 0; + pg->flags |= ALUA_PG_RUN_RTPG; + kref_get(&pg->kref); + pg->rtpg_sdev = sdev; + start_queue = 1; + } + rcu_read_unlock(); + } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { + pg->flags |= ALUA_PG_RUN_RTPG; + /* Do not queue if the worker is already running */ + if (!(pg->flags & ALUA_PG_RUNNING)) { + kref_get(&pg->kref); + start_queue = 1; + } + } + + spin_unlock_irqrestore(&pg->lock, flags); + + if (start_queue) { + if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, + msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) + sdev = NULL; + else + kref_put(&pg->kref, release_port_group); + } + if (sdev) + scsi_device_put(sdev); + + return true; +} + +/* + * alua_initialize - Initialize ALUA state + * @sdev: the device to be initialized + * + * For the prep_fn to work correctly we have + * to initialize the ALUA state for the device. + */ +static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) +{ + int err = SCSI_DH_DEV_UNSUPP, tpgs; + + mutex_lock(&h->init_mutex); + h->disabled = false; + tpgs = alua_check_tpgs(sdev); + if (tpgs != TPGS_MODE_NONE) + err = alua_check_vpd(sdev, h, tpgs); + h->init_error = err; + mutex_unlock(&h->init_mutex); + return err; +} +/* + * alua_set_params - set/unset the optimize flag + * @sdev: device on the path to be activated + * params - parameters in the following format + * "no_of_params\0param1\0param2\0param3\0...\0" + * For example, to set the flag pass the following parameters + * from multipath.conf + * hardware_handler "2 alua 1" + */ +static int alua_set_params(struct scsi_device *sdev, const char *params) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg = NULL; + unsigned int optimize = 0, argc; + const char *p = params; + int result = SCSI_DH_OK; + unsigned long flags; + + if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) + return -EINVAL; + + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg) { + rcu_read_unlock(); + return -ENXIO; + } + spin_lock_irqsave(&pg->lock, flags); + if (optimize) + pg->flags |= ALUA_OPTIMIZE_STPG; + else + pg->flags &= ~ALUA_OPTIMIZE_STPG; + spin_unlock_irqrestore(&pg->lock, flags); + rcu_read_unlock(); + + return result; +} + +/* + * alua_activate - activate a path + * @sdev: device on the path to be activated + * + * We're currently switching the port group to be activated only and + * let the array figure out the rest. + * There may be other arrays which require us to switch all port groups + * based on a certain policy. But until we actually encounter them it + * should be okay. + */ +static int alua_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct alua_dh_data *h = sdev->handler_data; + int err = SCSI_DH_OK; + struct alua_queue_data *qdata; + struct alua_port_group *pg; + + qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); + if (!qdata) { + err = SCSI_DH_RES_TEMP_UNAVAIL; + goto out; + } + qdata->callback_fn = fn; + qdata->callback_data = data; + + mutex_lock(&h->init_mutex); + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg || !kref_get_unless_zero(&pg->kref)) { + rcu_read_unlock(); + kfree(qdata); + err = h->init_error; + mutex_unlock(&h->init_mutex); + goto out; + } + rcu_read_unlock(); + mutex_unlock(&h->init_mutex); + + if (alua_rtpg_queue(pg, sdev, qdata, true)) { + fn = NULL; + } else { + kfree(qdata); + err = SCSI_DH_DEV_OFFLINED; + } + kref_put(&pg->kref, release_port_group); +out: + if (fn) + fn(data, err); + return 0; +} + +/* + * alua_check - check path status + * @sdev: device on the path to be checked + * + * Check the device status + */ +static void alua_check(struct scsi_device *sdev, bool force) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg || !kref_get_unless_zero(&pg->kref)) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + alua_rtpg_queue(pg, sdev, NULL, force); + kref_put(&pg->kref, release_port_group); +} + +/* + * alua_prep_fn - request callback + * + * Fail I/O to all paths not in state + * active/optimized or active/non-optimized. + */ +static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; + + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + state = pg->state; + rcu_read_unlock(); + + switch (state) { + case SCSI_ACCESS_STATE_OPTIMAL: + case SCSI_ACCESS_STATE_ACTIVE: + case SCSI_ACCESS_STATE_LBA: + case SCSI_ACCESS_STATE_TRANSITIONING: + return BLK_STS_OK; + default: + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } +} + +static void alua_rescan(struct scsi_device *sdev) +{ + struct alua_dh_data *h = sdev->handler_data; + + alua_initialize(sdev, h); +} + +/* + * alua_bus_attach - Attach device handler + * @sdev: device to be attached to + */ +static int alua_bus_attach(struct scsi_device *sdev) +{ + struct alua_dh_data *h; + int err; + + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + spin_lock_init(&h->pg_lock); + rcu_assign_pointer(h->pg, NULL); + h->init_error = SCSI_DH_OK; + h->sdev = sdev; + INIT_LIST_HEAD(&h->node); + + mutex_init(&h->init_mutex); + err = alua_initialize(sdev, h); + if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) + goto failed; + + sdev->handler_data = h; + return SCSI_DH_OK; +failed: + kfree(h); + return err; +} + +/* + * alua_bus_detach - Detach device handler + * @sdev: device to be detached from + */ +static void alua_bus_detach(struct scsi_device *sdev) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + + spin_lock(&h->pg_lock); + pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); + rcu_assign_pointer(h->pg, NULL); + spin_unlock(&h->pg_lock); + if (pg) { + spin_lock_irq(&pg->lock); + list_del_rcu(&h->node); + spin_unlock_irq(&pg->lock); + kref_put(&pg->kref, release_port_group); + } + sdev->handler_data = NULL; + synchronize_rcu(); + kfree(h); +} + +static struct scsi_device_handler alua_dh = { + .name = ALUA_DH_NAME, + .module = THIS_MODULE, + .attach = alua_bus_attach, + .detach = alua_bus_detach, + .prep_fn = alua_prep_fn, + .check_sense = alua_check_sense, + .activate = alua_activate, + .rescan = alua_rescan, + .set_params = alua_set_params, +}; + +static int __init alua_init(void) +{ + int r; + + kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); + if (!kaluad_wq) + return -ENOMEM; + + r = scsi_register_device_handler(&alua_dh); + if (r != 0) { + printk(KERN_ERR "%s: Failed to register scsi device handler", + ALUA_DH_NAME); + destroy_workqueue(kaluad_wq); + } + return r; +} + +static void __exit alua_exit(void) +{ + scsi_unregister_device_handler(&alua_dh); + destroy_workqueue(kaluad_wq); +} + +module_init(alua_init); +module_exit(alua_exit); + +MODULE_DESCRIPTION("DM Multipath ALUA support"); +MODULE_AUTHOR("Hannes Reinecke "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ALUA_DH_VER); diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c new file mode 100644 index 000000000..3cf88db2d --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Target driver for EMC CLARiiON AX/CX-series hardware. + * Based on code from Lars Marowsky-Bree + * and Ed Goggin . + * + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006 Mike Christie + */ +#include +#include +#include +#include +#include +#include + +#define CLARIION_NAME "emc" + +#define CLARIION_TRESPASS_PAGE 0x22 +#define CLARIION_BUFFER_SIZE 0xFC +#define CLARIION_TIMEOUT (60 * HZ) +#define CLARIION_RETRIES 3 +#define CLARIION_UNBOUND_LU -1 +#define CLARIION_SP_A 0 +#define CLARIION_SP_B 1 + +/* Flags */ +#define CLARIION_SHORT_TRESPASS 1 +#define CLARIION_HONOR_RESERVATIONS 2 + +/* LUN states */ +#define CLARIION_LUN_UNINITIALIZED -1 +#define CLARIION_LUN_UNBOUND 0 +#define CLARIION_LUN_BOUND 1 +#define CLARIION_LUN_OWNED 2 + +static unsigned char long_trespass[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x09, /* Page length - 2 */ + 0x01, /* Trespass code */ + 0xff, 0xff, /* Trespass target */ + 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ +}; + +static unsigned char short_trespass[] = { + 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x02, /* Page length - 2 */ + 0x01, /* Trespass code */ + 0xff, /* Trespass target */ +}; + +static const char * lun_state[] = +{ + "not bound", + "bound", + "owned", +}; + +struct clariion_dh_data { + /* + * Flags: + * CLARIION_SHORT_TRESPASS + * Use short trespass command (FC-series) or the long version + * (default for AX/CX CLARiiON arrays). + * + * CLARIION_HONOR_RESERVATIONS + * Whether or not (default) to honor SCSI reservations when + * initiating a switch-over. + */ + unsigned flags; + /* + * I/O buffer for both MODE_SELECT and INQUIRY commands. + */ + unsigned char buffer[CLARIION_BUFFER_SIZE]; + /* + * LUN state + */ + int lun_state; + /* + * SP Port number + */ + int port; + /* + * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this + * path's mapped LUN + */ + int default_sp; + /* + * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this + * path's mapped LUN + */ + int current_sp; +}; + +/* + * Parse MODE_SELECT cmd reply. + */ +static int trespass_endio(struct scsi_device *sdev, + struct scsi_sense_hdr *sshdr) +{ + int err = SCSI_DH_IO; + + sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, " + "0x%2x, 0x%2x while sending CLARiiON trespass " + "command.\n", CLARIION_NAME, sshdr->sense_key, + sshdr->asc, sshdr->ascq); + + if (sshdr->sense_key == 0x05 && sshdr->asc == 0x04 && + sshdr->ascq == 0x00) { + /* + * Array based copy in progress -- do not send + * mode_select or copy will be aborted mid-stream. + */ + sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " + "progress while sending CLARiiON trespass " + "command.\n", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + } else if (sshdr->sense_key == 0x02 && sshdr->asc == 0x04 && + sshdr->ascq == 0x03) { + /* + * LUN Not Ready - Manual Intervention Required + * indicates in-progress ucode upgrade (NDU). + */ + sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " + "ucode upgrade NDU operation while sending " + "CLARiiON trespass command.\n", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + } else + err = SCSI_DH_DEV_FAILED; + return err; +} + +static int parse_sp_info_reply(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + int err = SCSI_DH_OK; + + /* check for in-progress ucode upgrade (NDU) */ + if (csdev->buffer[48] != 0) { + sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress " + "ucode upgrade NDU operation while finding " + "current active SP.", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + goto out; + } + if (csdev->buffer[4] > 2) { + /* Invalid buffer format */ + sdev_printk(KERN_NOTICE, sdev, + "%s: invalid VPD page 0xC0 format\n", + CLARIION_NAME); + err = SCSI_DH_NOSYS; + goto out; + } + switch (csdev->buffer[28] & 0x0f) { + case 6: + sdev_printk(KERN_NOTICE, sdev, + "%s: ALUA failover mode detected\n", + CLARIION_NAME); + break; + case 4: + /* Linux failover */ + break; + default: + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid failover mode %d\n", + CLARIION_NAME, csdev->buffer[28] & 0x0f); + err = SCSI_DH_NOSYS; + goto out; + } + + csdev->default_sp = csdev->buffer[5]; + csdev->lun_state = csdev->buffer[4]; + csdev->current_sp = csdev->buffer[8]; + csdev->port = csdev->buffer[7]; + if (csdev->lun_state == CLARIION_LUN_OWNED) + sdev->access_state = SCSI_ACCESS_STATE_OPTIMAL; + else + sdev->access_state = SCSI_ACCESS_STATE_STANDBY; + if (csdev->default_sp == csdev->current_sp) + sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED; +out: + return err; +} + +#define emc_default_str "FC (Legacy)" + +static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer) +{ + unsigned char len = buffer[4] + 5; + char *sp_model = NULL; + unsigned char sp_len, serial_len; + + if (len < 160) { + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid information section length %d\n", + CLARIION_NAME, len); + /* Check for old FC arrays */ + if (!strncmp(buffer + 8, "DGC", 3)) { + /* Old FC array, not supporting extended information */ + sp_model = emc_default_str; + } + goto out; + } + + /* + * Parse extended information for SP model number + */ + serial_len = buffer[160]; + if (serial_len == 0 || serial_len + 161 > len) { + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid array serial number length %d\n", + CLARIION_NAME, serial_len); + goto out; + } + sp_len = buffer[99]; + if (sp_len == 0 || serial_len + sp_len + 161 > len) { + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid model number length %d\n", + CLARIION_NAME, sp_len); + goto out; + } + sp_model = &buffer[serial_len + 161]; + /* Strip whitespace at the end */ + while (sp_len > 1 && sp_model[sp_len - 1] == ' ') + sp_len--; + + sp_model[sp_len] = '\0'; + +out: + return sp_model; +} + +static int send_trespass_cmd(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + unsigned char *page22; + unsigned char cdb[MAX_COMMAND_SIZE]; + int err, res = SCSI_DH_OK, len; + struct scsi_sense_hdr sshdr; + blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | + REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + + if (csdev->flags & CLARIION_SHORT_TRESPASS) { + page22 = short_trespass; + if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) + /* Set Honor Reservations bit */ + page22[6] |= 0x80; + len = sizeof(short_trespass); + cdb[0] = MODE_SELECT; + cdb[1] = 0x10; + cdb[4] = len; + } else { + page22 = long_trespass; + if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) + /* Set Honor Reservations bit */ + page22[10] |= 0x80; + len = sizeof(long_trespass); + cdb[0] = MODE_SELECT_10; + cdb[8] = len; + } + BUG_ON((len > CLARIION_BUFFER_SIZE)); + memcpy(csdev->buffer, page22, len); + + err = scsi_execute_cmd(sdev, cdb, opf, csdev->buffer, len, + CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, + &exec_args); + if (err) { + if (scsi_sense_valid(&sshdr)) + res = trespass_endio(sdev, &sshdr); + else { + sdev_printk(KERN_INFO, sdev, + "%s: failed to send MODE SELECT: %x\n", + CLARIION_NAME, err); + res = SCSI_DH_IO; + } + } + + return res; +} + +static enum scsi_disposition clariion_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03) + /* + * LUN Not Ready - Manual Intervention Required + * indicates this is a passive path. + * + * FIXME: However, if this is seen and EVPD C0 + * indicates that this is due to a NDU in + * progress, we should set FAIL_PATH too. + * This indicates we might have to do a SCSI + * inquiry in the end_io path. Ugh. + * + * Can return FAILED only when we want the error + * recovery process to kick in. + */ + return SUCCESS; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01) + /* + * An array based copy is in progress. Do not + * fail the path, do not bypass to another PG, + * do not retry. Fail the IO immediately. + * (Actually this is the same conclusion as in + * the default handler, but lets make sure.) + * + * Can return FAILED only when we want the error + * recovery process to kick in. + */ + return SUCCESS; + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) + /* + * Unit Attention Code. This is the first IO + * to the new path, so just retry. + */ + return ADD_TO_MLQUEUE; + break; + } + + return SCSI_RETURN_NOT_HANDLED; +} + +static blk_status_t clariion_prep_fn(struct scsi_device *sdev, + struct request *req) +{ + struct clariion_dh_data *h = sdev->handler_data; + + if (h->lun_state != CLARIION_LUN_OWNED) { + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } + + return BLK_STS_OK; +} + +static int clariion_std_inquiry(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + int err = SCSI_DH_OK; + char *sp_model; + + sp_model = parse_sp_model(sdev, sdev->inquiry); + if (!sp_model) { + err = SCSI_DH_DEV_UNSUPP; + goto out; + } + + /* + * FC Series arrays do not support long trespass + */ + if (!strlen(sp_model) || !strncmp(sp_model, "FC",2)) + csdev->flags |= CLARIION_SHORT_TRESPASS; + + sdev_printk(KERN_INFO, sdev, + "%s: detected Clariion %s, flags %x\n", + CLARIION_NAME, sp_model, csdev->flags); +out: + return err; +} + +static int clariion_send_inquiry(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + int err = SCSI_DH_IO; + + if (!scsi_get_vpd_page(sdev, 0xC0, csdev->buffer, + CLARIION_BUFFER_SIZE)) + err = parse_sp_info_reply(sdev, csdev); + + return err; +} + +static int clariion_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct clariion_dh_data *csdev = sdev->handler_data; + int result; + + result = clariion_send_inquiry(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + + if (csdev->lun_state == CLARIION_LUN_OWNED) + goto done; + + result = send_trespass_cmd(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n", + CLARIION_NAME, + csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" ); + + /* Update status */ + result = clariion_send_inquiry(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + +done: + sdev_printk(KERN_INFO, sdev, + "%s: at SP %c Port %d (%s, default SP %c)\n", + CLARIION_NAME, csdev->current_sp + 'A', + csdev->port, lun_state[csdev->lun_state], + csdev->default_sp + 'A'); + + if (fn) + fn(data, result); + return 0; +} +/* + * params - parameters in the following format + * "no_of_params\0param1\0param2\0param3\0...\0" + * for example, string for 2 parameters with value 10 and 21 + * is specified as "2\010\021\0". + */ +static int clariion_set_params(struct scsi_device *sdev, const char *params) +{ + struct clariion_dh_data *csdev = sdev->handler_data; + unsigned int hr = 0, st = 0, argc; + const char *p = params; + int result = SCSI_DH_OK; + + if ((sscanf(params, "%u", &argc) != 1) || (argc != 2)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &st) != 1) || (st > 1)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &hr) != 1) || (hr > 1)) + return -EINVAL; + + if (st) + csdev->flags |= CLARIION_SHORT_TRESPASS; + else + csdev->flags &= ~CLARIION_SHORT_TRESPASS; + + if (hr) + csdev->flags |= CLARIION_HONOR_RESERVATIONS; + else + csdev->flags &= ~CLARIION_HONOR_RESERVATIONS; + + /* + * If this path is owned, we have to send a trespass command + * with the new parameters. If not, simply return. Next trespass + * command would use the parameters. + */ + if (csdev->lun_state != CLARIION_LUN_OWNED) + goto done; + + csdev->lun_state = CLARIION_LUN_UNINITIALIZED; + result = send_trespass_cmd(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + + /* Update status */ + result = clariion_send_inquiry(sdev, csdev); + +done: + return result; +} + +static int clariion_bus_attach(struct scsi_device *sdev) +{ + struct clariion_dh_data *h; + int err; + + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + h->lun_state = CLARIION_LUN_UNINITIALIZED; + h->default_sp = CLARIION_UNBOUND_LU; + h->current_sp = CLARIION_UNBOUND_LU; + + err = clariion_std_inquiry(sdev, h); + if (err != SCSI_DH_OK) + goto failed; + + err = clariion_send_inquiry(sdev, h); + if (err != SCSI_DH_OK) + goto failed; + + sdev_printk(KERN_INFO, sdev, + "%s: connected to SP %c Port %d (%s, default SP %c)\n", + CLARIION_NAME, h->current_sp + 'A', + h->port, lun_state[h->lun_state], + h->default_sp + 'A'); + + sdev->handler_data = h; + return SCSI_DH_OK; + +failed: + kfree(h); + return err; +} + +static void clariion_bus_detach(struct scsi_device *sdev) +{ + kfree(sdev->handler_data); + sdev->handler_data = NULL; +} + +static struct scsi_device_handler clariion_dh = { + .name = CLARIION_NAME, + .module = THIS_MODULE, + .attach = clariion_bus_attach, + .detach = clariion_bus_detach, + .check_sense = clariion_check_sense, + .activate = clariion_activate, + .prep_fn = clariion_prep_fn, + .set_params = clariion_set_params, +}; + +static int __init clariion_init(void) +{ + int r; + + r = scsi_register_device_handler(&clariion_dh); + if (r != 0) + printk(KERN_ERR "%s: Failed to register scsi device handler.", + CLARIION_NAME); + return r; +} + +static void __exit clariion_exit(void) +{ + scsi_unregister_device_handler(&clariion_dh); +} + +module_init(clariion_init); +module_exit(clariion_exit); + +MODULE_DESCRIPTION("EMC CX/AX/FC-family driver"); +MODULE_AUTHOR("Mike Christie , Chandra Seetharaman "); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c new file mode 100644 index 000000000..5f2f943d9 --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be + * upgraded. + * + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006 Mike Christie + * Copyright (C) 2008 Hannes Reinecke + */ + +#include +#include +#include +#include +#include +#include + +#define HP_SW_NAME "hp_sw" + +#define HP_SW_TIMEOUT (60 * HZ) +#define HP_SW_RETRIES 3 + +#define HP_SW_PATH_UNINITIALIZED -1 +#define HP_SW_PATH_ACTIVE 0 +#define HP_SW_PATH_PASSIVE 1 + +struct hp_sw_dh_data { + int path_state; + int retries; + int retry_cnt; + struct scsi_device *sdev; +}; + +static int hp_sw_start_stop(struct hp_sw_dh_data *); + +/* + * tur_done - Handle TEST UNIT READY return status + * @sdev: sdev the command has been sent to + * @errors: blk error code + * + * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path + */ +static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h, + struct scsi_sense_hdr *sshdr) +{ + int ret = SCSI_DH_IO; + + switch (sshdr->sense_key) { + case UNIT_ATTENTION: + ret = SCSI_DH_IMM_RETRY; + break; + case NOT_READY: + if (sshdr->asc == 0x04 && sshdr->ascq == 2) { + /* + * LUN not ready - Initialization command required + * + * This is the passive path + */ + h->path_state = HP_SW_PATH_PASSIVE; + ret = SCSI_DH_OK; + break; + } + fallthrough; + default: + sdev_printk(KERN_WARNING, sdev, + "%s: sending tur failed, sense %x/%x/%x\n", + HP_SW_NAME, sshdr->sense_key, sshdr->asc, + sshdr->ascq); + break; + } + return ret; +} + +/* + * hp_sw_tur - Send TEST UNIT READY + * @sdev: sdev command should be sent to + * + * Use the TEST UNIT READY command to determine + * the path state. + */ +static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) +{ + unsigned char cmd[6] = { TEST_UNIT_READY }; + struct scsi_sense_hdr sshdr; + int ret = SCSI_DH_OK, res; + blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | + REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + +retry: + res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, + HP_SW_RETRIES, &exec_args); + if (res) { + if (scsi_sense_valid(&sshdr)) + ret = tur_done(sdev, h, &sshdr); + else { + sdev_printk(KERN_WARNING, sdev, + "%s: sending tur failed with %x\n", + HP_SW_NAME, res); + ret = SCSI_DH_IO; + } + } else { + h->path_state = HP_SW_PATH_ACTIVE; + ret = SCSI_DH_OK; + } + if (ret == SCSI_DH_IMM_RETRY) + goto retry; + + return ret; +} + +/* + * hp_sw_start_stop - Send START STOP UNIT command + * @sdev: sdev command should be sent to + * + * Sending START STOP UNIT activates the SP. + */ +static int hp_sw_start_stop(struct hp_sw_dh_data *h) +{ + unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 }; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdev = h->sdev; + int res, rc = SCSI_DH_OK; + int retry_cnt = HP_SW_RETRIES; + blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | + REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + +retry: + res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, + HP_SW_RETRIES, &exec_args); + if (res) { + if (!scsi_sense_valid(&sshdr)) { + sdev_printk(KERN_WARNING, sdev, + "%s: sending start_stop_unit failed, " + "no sense available\n", HP_SW_NAME); + return SCSI_DH_IO; + } + switch (sshdr.sense_key) { + case NOT_READY: + if (sshdr.asc == 0x04 && sshdr.ascq == 3) { + /* + * LUN not ready - manual intervention required + * + * Switch-over in progress, retry. + */ + if (--retry_cnt) + goto retry; + rc = SCSI_DH_RETRY; + break; + } + fallthrough; + default: + sdev_printk(KERN_WARNING, sdev, + "%s: sending start_stop_unit failed, " + "sense %x/%x/%x\n", HP_SW_NAME, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + rc = SCSI_DH_IO; + } + } + return rc; +} + +static blk_status_t hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) +{ + struct hp_sw_dh_data *h = sdev->handler_data; + + if (h->path_state != HP_SW_PATH_ACTIVE) { + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } + + return BLK_STS_OK; +} + +/* + * hp_sw_activate - Activate a path + * @sdev: sdev on the path to be activated + * + * The HP Active/Passive firmware is pretty simple; + * the passive path reports NOT READY with sense codes + * 0x04/0x02; a START STOP UNIT command will then + * activate the passive path (and deactivate the + * previously active one). + */ +static int hp_sw_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + int ret = SCSI_DH_OK; + struct hp_sw_dh_data *h = sdev->handler_data; + + ret = hp_sw_tur(sdev, h); + + if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) + ret = hp_sw_start_stop(h); + + if (fn) + fn(data, ret); + return 0; +} + +static int hp_sw_bus_attach(struct scsi_device *sdev) +{ + struct hp_sw_dh_data *h; + int ret; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + h->path_state = HP_SW_PATH_UNINITIALIZED; + h->retries = HP_SW_RETRIES; + h->sdev = sdev; + + ret = hp_sw_tur(sdev, h); + if (ret != SCSI_DH_OK) + goto failed; + if (h->path_state == HP_SW_PATH_UNINITIALIZED) { + ret = SCSI_DH_NOSYS; + goto failed; + } + + sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", + HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? + "active":"passive"); + + sdev->handler_data = h; + return SCSI_DH_OK; +failed: + kfree(h); + return ret; +} + +static void hp_sw_bus_detach( struct scsi_device *sdev ) +{ + kfree(sdev->handler_data); + sdev->handler_data = NULL; +} + +static struct scsi_device_handler hp_sw_dh = { + .name = HP_SW_NAME, + .module = THIS_MODULE, + .attach = hp_sw_bus_attach, + .detach = hp_sw_bus_detach, + .activate = hp_sw_activate, + .prep_fn = hp_sw_prep_fn, +}; + +static int __init hp_sw_init(void) +{ + return scsi_register_device_handler(&hp_sw_dh); +} + +static void __exit hp_sw_exit(void) +{ + scsi_unregister_device_handler(&hp_sw_dh); +} + +module_init(hp_sw_init); +module_exit(hp_sw_exit); + +MODULE_DESCRIPTION("HP Active/Passive driver"); +MODULE_AUTHOR("Mike Christie +#include +#include +#include +#include +#include + +#define RDAC_NAME "rdac" +#define RDAC_RETRY_COUNT 5 + +/* + * LSI mode page stuff + * + * These struct definitions and the forming of the + * mode page were taken from the LSI RDAC 2.4 GPL'd + * driver, and then converted to Linux conventions. + */ +#define RDAC_QUIESCENCE_TIME 20 +/* + * Page Codes + */ +#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c + +/* + * Controller modes definitions + */ +#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 + +/* + * RDAC Options field + */ +#define RDAC_FORCED_QUIESENCE 0x02 + +#define RDAC_TIMEOUT (60 * HZ) +#define RDAC_RETRIES 3 + +struct rdac_mode_6_hdr { + u8 data_len; + u8 medium_type; + u8 device_params; + u8 block_desc_len; +}; + +struct rdac_mode_10_hdr { + u16 data_len; + u8 medium_type; + u8 device_params; + u16 reserved; + u16 block_desc_len; +}; + +struct rdac_mode_common { + u8 controller_serial[16]; + u8 alt_controller_serial[16]; + u8 rdac_mode[2]; + u8 alt_rdac_mode[2]; + u8 quiescence_timeout; + u8 rdac_options; +}; + +struct rdac_pg_legacy { + struct rdac_mode_6_hdr hdr; + u8 page_code; + u8 page_len; + struct rdac_mode_common common; +#define MODE6_MAX_LUN 32 + u8 lun_table[MODE6_MAX_LUN]; + u8 reserved2[32]; + u8 reserved3; + u8 reserved4; +}; + +struct rdac_pg_expanded { + struct rdac_mode_10_hdr hdr; + u8 page_code; + u8 subpage_code; + u8 page_len[2]; + struct rdac_mode_common common; + u8 lun_table[256]; + u8 reserved3; + u8 reserved4; +}; + +struct c9_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC9 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "vace" */ + u8 avte_cvp; + u8 path_prio; + u8 reserved2[38]; +}; + +#define SUBSYS_ID_LEN 16 +#define SLOT_ID_LEN 2 +#define ARRAY_LABEL_LEN 31 + +struct c4_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC4 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "subs" */ + u8 subsys_id[SUBSYS_ID_LEN]; + u8 revision[4]; + u8 slot_id[SLOT_ID_LEN]; + u8 reserved[2]; +}; + +#define UNIQUE_ID_LEN 16 +struct c8_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC8 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "edid" */ + u8 reserved2[3]; + u8 vol_uniq_id_len; + u8 vol_uniq_id[16]; + u8 vol_user_label_len; + u8 vol_user_label[60]; + u8 array_uniq_id_len; + u8 array_unique_id[UNIQUE_ID_LEN]; + u8 array_user_label_len; + u8 array_user_label[60]; + u8 lun[8]; +}; + +struct rdac_controller { + u8 array_id[UNIQUE_ID_LEN]; + int use_ms10; + struct kref kref; + struct list_head node; /* list of all controllers */ + union { + struct rdac_pg_legacy legacy; + struct rdac_pg_expanded expanded; + } mode_select; + u8 index; + u8 array_name[ARRAY_LABEL_LEN]; + struct Scsi_Host *host; + spinlock_t ms_lock; + int ms_queued; + struct work_struct ms_work; + struct scsi_device *ms_sdev; + struct list_head ms_head; + struct list_head dh_list; +}; + +struct c2_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC2 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "swr4" */ + u8 sw_version[3]; + u8 sw_date[3]; + u8 features_enabled; + u8 max_lun_supported; + u8 partitions[239]; /* Total allocation length should be 0xFF */ +}; + +struct rdac_dh_data { + struct list_head node; + struct rdac_controller *ctlr; + struct scsi_device *sdev; +#define UNINITIALIZED_LUN (1 << 8) + unsigned lun; + +#define RDAC_MODE 0 +#define RDAC_MODE_AVT 1 +#define RDAC_MODE_IOSHIP 2 + unsigned char mode; + +#define RDAC_STATE_ACTIVE 0 +#define RDAC_STATE_PASSIVE 1 + unsigned char state; + +#define RDAC_LUN_UNOWNED 0 +#define RDAC_LUN_OWNED 1 + char lun_state; + +#define RDAC_PREFERRED 0 +#define RDAC_NON_PREFERRED 1 + char preferred; + + union { + struct c2_inquiry c2; + struct c4_inquiry c4; + struct c8_inquiry c8; + struct c9_inquiry c9; + } inq; +}; + +static const char *mode[] = { + "RDAC", + "AVT", + "IOSHIP", +}; +static const char *lun_state[] = +{ + "unowned", + "owned", +}; + +struct rdac_queue_data { + struct list_head entry; + struct rdac_dh_data *h; + activate_complete callback_fn; + void *callback_data; +}; + +static LIST_HEAD(ctlr_list); +static DEFINE_SPINLOCK(list_lock); +static struct workqueue_struct *kmpath_rdacd; +static void send_mode_select(struct work_struct *work); + +/* + * module parameter to enable rdac debug logging. + * 2 bits for each type of logging, only two types defined for now + * Can be enhanced if required at later point + */ +static int rdac_logging = 1; +module_param(rdac_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, " + "Default is 1 - failover logging enabled, " + "set it to 0xF to enable all the logs"); + +#define RDAC_LOG_FAILOVER 0 +#define RDAC_LOG_SENSE 2 + +#define RDAC_LOG_BITS 2 + +#define RDAC_LOG_LEVEL(SHIFT) \ + ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1)) + +#define RDAC_LOG(SHIFT, sdev, f, arg...) \ +do { \ + if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \ + sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ +} while (0); + +static unsigned int rdac_failover_get(struct rdac_controller *ctlr, + struct list_head *list, + unsigned char *cdb) +{ + struct rdac_mode_common *common; + unsigned data_size; + struct rdac_queue_data *qdata; + u8 *lun_table; + + if (ctlr->use_ms10) { + struct rdac_pg_expanded *rdac_pg; + + data_size = sizeof(struct rdac_pg_expanded); + rdac_pg = &ctlr->mode_select.expanded; + memset(rdac_pg, 0, data_size); + common = &rdac_pg->common; + rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; + rdac_pg->subpage_code = 0x1; + rdac_pg->page_len[0] = 0x01; + rdac_pg->page_len[1] = 0x28; + lun_table = rdac_pg->lun_table; + } else { + struct rdac_pg_legacy *rdac_pg; + + data_size = sizeof(struct rdac_pg_legacy); + rdac_pg = &ctlr->mode_select.legacy; + memset(rdac_pg, 0, data_size); + common = &rdac_pg->common; + rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; + rdac_pg->page_len = 0x68; + lun_table = rdac_pg->lun_table; + } + common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; + common->quiescence_timeout = RDAC_QUIESCENCE_TIME; + common->rdac_options = RDAC_FORCED_QUIESENCE; + + list_for_each_entry(qdata, list, entry) { + lun_table[qdata->h->lun] = 0x81; + } + + /* Prepare the command. */ + if (ctlr->use_ms10) { + cdb[0] = MODE_SELECT_10; + cdb[7] = data_size >> 8; + cdb[8] = data_size & 0xff; + } else { + cdb[0] = MODE_SELECT; + cdb[4] = data_size; + } + + return data_size; +} + +static void release_controller(struct kref *kref) +{ + struct rdac_controller *ctlr; + ctlr = container_of(kref, struct rdac_controller, kref); + + list_del(&ctlr->node); + kfree(ctlr); +} + +static struct rdac_controller *get_controller(int index, char *array_name, + u8 *array_id, struct scsi_device *sdev) +{ + struct rdac_controller *ctlr, *tmp; + + list_for_each_entry(tmp, &ctlr_list, node) { + if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) && + (tmp->index == index) && + (tmp->host == sdev->host)) { + kref_get(&tmp->kref); + return tmp; + } + } + ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); + if (!ctlr) + return NULL; + + /* initialize fields of controller */ + memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN); + ctlr->index = index; + ctlr->host = sdev->host; + memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); + + kref_init(&ctlr->kref); + ctlr->use_ms10 = -1; + ctlr->ms_queued = 0; + ctlr->ms_sdev = NULL; + spin_lock_init(&ctlr->ms_lock); + INIT_WORK(&ctlr->ms_work, send_mode_select); + INIT_LIST_HEAD(&ctlr->ms_head); + list_add(&ctlr->node, &ctlr_list); + INIT_LIST_HEAD(&ctlr->dh_list); + + return ctlr; +} + +static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, + char *array_name, u8 *array_id) +{ + int err = SCSI_DH_IO, i; + struct c8_inquiry *inqp = &h->inq.c8; + + if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp, + sizeof(struct c8_inquiry))) { + if (inqp->page_code != 0xc8) + return SCSI_DH_NOSYS; + if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || + inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') + return SCSI_DH_NOSYS; + h->lun = inqp->lun[7]; /* Uses only the last byte */ + + for(i=0; iarray_user_label[(2*i)+1]; + + *(array_name+ARRAY_LABEL_LEN-1) = '\0'; + memset(array_id, 0, UNIQUE_ID_LEN); + memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len); + err = SCSI_DH_OK; + } + return err; +} + +static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) +{ + int err = SCSI_DH_IO, access_state; + struct rdac_dh_data *tmp; + struct c9_inquiry *inqp = &h->inq.c9; + + h->state = RDAC_STATE_ACTIVE; + if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp, + sizeof(struct c9_inquiry))) { + /* detect the operating mode */ + if ((inqp->avte_cvp >> 5) & 0x1) + h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ + else if (inqp->avte_cvp >> 7) + h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */ + else + h->mode = RDAC_MODE; /* LUN in RDAC mode */ + + /* Update ownership */ + if (inqp->avte_cvp & 0x1) { + h->lun_state = RDAC_LUN_OWNED; + access_state = SCSI_ACCESS_STATE_OPTIMAL; + } else { + h->lun_state = RDAC_LUN_UNOWNED; + if (h->mode == RDAC_MODE) { + h->state = RDAC_STATE_PASSIVE; + access_state = SCSI_ACCESS_STATE_STANDBY; + } else + access_state = SCSI_ACCESS_STATE_ACTIVE; + } + + /* Update path prio*/ + if (inqp->path_prio & 0x1) { + h->preferred = RDAC_PREFERRED; + access_state |= SCSI_ACCESS_STATE_PREFERRED; + } else + h->preferred = RDAC_NON_PREFERRED; + rcu_read_lock(); + list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) { + /* h->sdev should always be valid */ + BUG_ON(!tmp->sdev); + tmp->sdev->access_state = access_state; + } + rcu_read_unlock(); + err = SCSI_DH_OK; + } + + return err; +} + +static int initialize_controller(struct scsi_device *sdev, + struct rdac_dh_data *h, char *array_name, u8 *array_id) +{ + int err = SCSI_DH_IO, index; + struct c4_inquiry *inqp = &h->inq.c4; + + if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp, + sizeof(struct c4_inquiry))) { + /* get the controller index */ + if (inqp->slot_id[1] == 0x31) + index = 0; + else + index = 1; + + spin_lock(&list_lock); + h->ctlr = get_controller(index, array_name, array_id, sdev); + if (!h->ctlr) + err = SCSI_DH_RES_TEMP_UNAVAIL; + else { + h->sdev = sdev; + list_add_rcu(&h->node, &h->ctlr->dh_list); + } + spin_unlock(&list_lock); + err = SCSI_DH_OK; + } + return err; +} + +static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) +{ + int err = SCSI_DH_IO; + struct c2_inquiry *inqp = &h->inq.c2; + + if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp, + sizeof(struct c2_inquiry))) { + /* + * If more than MODE6_MAX_LUN luns are supported, use + * mode select 10 + */ + if (inqp->max_lun_supported >= MODE6_MAX_LUN) + h->ctlr->use_ms10 = 1; + else + h->ctlr->use_ms10 = 0; + err = SCSI_DH_OK; + } + return err; +} + +static int mode_select_handle_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + int err = SCSI_DH_IO; + struct rdac_dh_data *h = sdev->handler_data; + + if (!scsi_sense_valid(sense_hdr)) + goto done; + + switch (sense_hdr->sense_key) { + case NO_SENSE: + case ABORTED_COMMAND: + case UNIT_ATTENTION: + err = SCSI_DH_RETRY; + break; + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) + /* LUN Not Ready and is in the Process of Becoming + * Ready + */ + err = SCSI_DH_RETRY; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36) + /* + * Command Lock contention + */ + err = SCSI_DH_IMM_RETRY; + break; + default: + break; + } + + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "MODE_SELECT returned with sense %02x/%02x/%02x", + (char *) h->ctlr->array_name, h->ctlr->index, + sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); + +done: + return err; +} + +static void send_mode_select(struct work_struct *work) +{ + struct rdac_controller *ctlr = + container_of(work, struct rdac_controller, ms_work); + struct scsi_device *sdev = ctlr->ms_sdev; + struct rdac_dh_data *h = sdev->handler_data; + int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT; + struct rdac_queue_data *tmp, *qdata; + LIST_HEAD(list); + unsigned char cdb[MAX_COMMAND_SIZE]; + struct scsi_sense_hdr sshdr; + unsigned int data_size; + blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | + REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + + spin_lock(&ctlr->ms_lock); + list_splice_init(&ctlr->ms_head, &list); + ctlr->ms_queued = 0; + ctlr->ms_sdev = NULL; + spin_unlock(&ctlr->ms_lock); + + retry: + memset(cdb, 0, sizeof(cdb)); + + data_size = rdac_failover_get(ctlr, &list, cdb); + + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "%s MODE_SELECT command", + (char *) h->ctlr->array_name, h->ctlr->index, + (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); + + if (scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size, + RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args)) { + err = mode_select_handle_sense(sdev, &sshdr); + if (err == SCSI_DH_RETRY && retry_cnt--) + goto retry; + if (err == SCSI_DH_IMM_RETRY) + goto retry; + } + if (err == SCSI_DH_OK) { + h->state = RDAC_STATE_ACTIVE; + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "MODE_SELECT completed", + (char *) h->ctlr->array_name, h->ctlr->index); + } + + list_for_each_entry_safe(qdata, tmp, &list, entry) { + list_del(&qdata->entry); + if (err == SCSI_DH_OK) + qdata->h->state = RDAC_STATE_ACTIVE; + if (qdata->callback_fn) + qdata->callback_fn(qdata->callback_data, err); + kfree(qdata); + } + return; +} + +static int queue_mode_select(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct rdac_queue_data *qdata; + struct rdac_controller *ctlr; + + qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); + if (!qdata) + return SCSI_DH_RETRY; + + qdata->h = sdev->handler_data; + qdata->callback_fn = fn; + qdata->callback_data = data; + + ctlr = qdata->h->ctlr; + spin_lock(&ctlr->ms_lock); + list_add_tail(&qdata->entry, &ctlr->ms_head); + if (!ctlr->ms_queued) { + ctlr->ms_queued = 1; + ctlr->ms_sdev = sdev; + queue_work(kmpath_rdacd, &ctlr->ms_work); + } + spin_unlock(&ctlr->ms_lock); + return SCSI_DH_OK; +} + +static int rdac_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct rdac_dh_data *h = sdev->handler_data; + int err = SCSI_DH_OK; + int act = 0; + + err = check_ownership(sdev, h); + if (err != SCSI_DH_OK) + goto done; + + switch (h->mode) { + case RDAC_MODE: + if (h->lun_state == RDAC_LUN_UNOWNED) + act = 1; + break; + case RDAC_MODE_IOSHIP: + if ((h->lun_state == RDAC_LUN_UNOWNED) && + (h->preferred == RDAC_PREFERRED)) + act = 1; + break; + default: + break; + } + + if (act) { + err = queue_mode_select(sdev, fn, data); + if (err == SCSI_DH_OK) + return 0; + } +done: + if (fn) + fn(data, err); + return 0; +} + +static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req) +{ + struct rdac_dh_data *h = sdev->handler_data; + + if (h->state != RDAC_STATE_ACTIVE) { + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } + + return BLK_STS_OK; +} + +static enum scsi_disposition rdac_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + struct rdac_dh_data *h = sdev->handler_data; + + RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " + "I/O returned with sense %02x/%02x/%02x", + (char *) h->ctlr->array_name, h->ctlr->index, + sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); + + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) + /* LUN Not Ready - Logical Unit Not Ready and is in + * the process of becoming ready + * Just retry. + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) + /* LUN Not Ready - Storage firmware incompatible + * Manual code synchonisation required. + * + * Nothing we can do here. Try to bypass the path. + */ + return SUCCESS; + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1) + /* LUN Not Ready - Quiescense in progress + * + * Just retry and wait. + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02) + /* LUN Not Ready - Quiescense in progress + * or has been achieved + * Just retry. + */ + return ADD_TO_MLQUEUE; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { + /* Invalid Request - Current Logical Unit Ownership. + * Controller is not the current owner of the LUN, + * Fail the path, so that the other path be used. + */ + h->state = RDAC_STATE_PASSIVE; + return SUCCESS; + } + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) + /* + * Power On, Reset, or Bus Device Reset, just retry. + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02) + /* + * Quiescence in progress , just retry. + */ + return ADD_TO_MLQUEUE; + break; + } + /* success just means we do not care what scsi-ml does */ + return SCSI_RETURN_NOT_HANDLED; +} + +static int rdac_bus_attach(struct scsi_device *sdev) +{ + struct rdac_dh_data *h; + int err; + char array_name[ARRAY_LABEL_LEN]; + char array_id[UNIQUE_ID_LEN]; + + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + h->lun = UNINITIALIZED_LUN; + h->state = RDAC_STATE_ACTIVE; + + err = get_lun_info(sdev, h, array_name, array_id); + if (err != SCSI_DH_OK) + goto failed; + + err = initialize_controller(sdev, h, array_name, array_id); + if (err != SCSI_DH_OK) + goto failed; + + err = check_ownership(sdev, h); + if (err != SCSI_DH_OK) + goto clean_ctlr; + + err = set_mode_select(sdev, h); + if (err != SCSI_DH_OK) + goto clean_ctlr; + + sdev_printk(KERN_NOTICE, sdev, + "%s: LUN %d (%s) (%s)\n", + RDAC_NAME, h->lun, mode[(int)h->mode], + lun_state[(int)h->lun_state]); + + sdev->handler_data = h; + return SCSI_DH_OK; + +clean_ctlr: + spin_lock(&list_lock); + kref_put(&h->ctlr->kref, release_controller); + spin_unlock(&list_lock); + +failed: + kfree(h); + return err; +} + +static void rdac_bus_detach( struct scsi_device *sdev ) +{ + struct rdac_dh_data *h = sdev->handler_data; + + if (h->ctlr && h->ctlr->ms_queued) + flush_workqueue(kmpath_rdacd); + + spin_lock(&list_lock); + if (h->ctlr) { + list_del_rcu(&h->node); + kref_put(&h->ctlr->kref, release_controller); + } + spin_unlock(&list_lock); + sdev->handler_data = NULL; + synchronize_rcu(); + kfree(h); +} + +static struct scsi_device_handler rdac_dh = { + .name = RDAC_NAME, + .module = THIS_MODULE, + .prep_fn = rdac_prep_fn, + .check_sense = rdac_check_sense, + .attach = rdac_bus_attach, + .detach = rdac_bus_detach, + .activate = rdac_activate, +}; + +static int __init rdac_init(void) +{ + int r; + + r = scsi_register_device_handler(&rdac_dh); + if (r != 0) { + printk(KERN_ERR "Failed to register scsi device handler."); + goto done; + } + + /* + * Create workqueue to handle mode selects for rdac + */ + kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); + if (!kmpath_rdacd) { + scsi_unregister_device_handler(&rdac_dh); + printk(KERN_ERR "kmpath_rdacd creation failed.\n"); + + r = -EINVAL; + } +done: + return r; +} + +static void __exit rdac_exit(void) +{ + destroy_workqueue(kmpath_rdacd); + scsi_unregister_device_handler(&rdac_dh); +} + +module_init(rdac_init); +module_exit(rdac_exit); + +MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver"); +MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); +MODULE_VERSION("01.00.0000.0000"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c new file mode 100644 index 000000000..dfb091d34 --- /dev/null +++ b/drivers/scsi/dmx3191d.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + dmx3191d.c - driver for the Domex DMX3191D SCSI card. + Copyright (C) 2000 by Massimo Piccioni + Portions Copyright (C) 2004 by Christoph Hellwig + + Based on the generic NCR5380 driver by Drew Eckhardt et al. + +*/ + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Definitions for the generic 5380 driver. + */ + +#define NCR5380_read(reg) inb(hostdata->base + (reg)) +#define NCR5380_write(reg, value) outb(value, hostdata->base + (reg)) + +#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none +#define NCR5380_dma_recv_setup NCR5380_dma_setup_none +#define NCR5380_dma_send_setup NCR5380_dma_setup_none +#define NCR5380_dma_residual NCR5380_dma_residual_none + +#define NCR5380_implementation_fields /* none */ + +#include "NCR5380.h" +#include "NCR5380.c" + +#define DMX3191D_DRIVER_NAME "dmx3191d" +#define DMX3191D_REGION_LEN 8 + + +static const struct scsi_host_template dmx3191d_driver_template = { + .module = THIS_MODULE, + .proc_name = DMX3191D_DRIVER_NAME, + .name = "Domex DMX3191D", + .info = NCR5380_info, + .queuecommand = NCR5380_queue_command, + .eh_abort_handler = NCR5380_abort, + .eh_host_reset_handler = NCR5380_host_reset, + .can_queue = 32, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct NCR5380_cmd), +}; + +static int dmx3191d_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct Scsi_Host *shost; + struct NCR5380_hostdata *hostdata; + unsigned long io; + int error = -ENODEV; + + if (pci_enable_device(pdev)) + goto out; + + io = pci_resource_start(pdev, 0); + if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { + printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n", + io, io + DMX3191D_REGION_LEN); + goto out_disable_device; + } + + shost = scsi_host_alloc(&dmx3191d_driver_template, + sizeof(struct NCR5380_hostdata)); + if (!shost) + goto out_release_region; + + hostdata = shost_priv(shost); + hostdata->base = io; + + /* This card does not seem to raise an interrupt on pdev->irq. + * Steam-powered SCSI controllers run without an IRQ anyway. + */ + shost->irq = NO_IRQ; + + error = NCR5380_init(shost, 0); + if (error) + goto out_host_put; + + NCR5380_maybe_reset_bus(shost); + + pci_set_drvdata(pdev, shost); + + error = scsi_add_host(shost, &pdev->dev); + if (error) + goto out_exit; + + scsi_scan_host(shost); + return 0; + +out_exit: + NCR5380_exit(shost); +out_host_put: + scsi_host_put(shost); + out_release_region: + release_region(io, DMX3191D_REGION_LEN); + out_disable_device: + pci_disable_device(pdev); + out: + return error; +} + +static void dmx3191d_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct NCR5380_hostdata *hostdata = shost_priv(shost); + unsigned long io = hostdata->base; + + scsi_remove_host(shost); + + NCR5380_exit(shost); + scsi_host_put(shost); + release_region(io, DMX3191D_REGION_LEN); + pci_disable_device(pdev); +} + +static struct pci_device_id dmx3191d_pci_tbl[] = { + {PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, + { } +}; +MODULE_DEVICE_TABLE(pci, dmx3191d_pci_tbl); + +static struct pci_driver dmx3191d_pci_driver = { + .name = DMX3191D_DRIVER_NAME, + .id_table = dmx3191d_pci_tbl, + .probe = dmx3191d_probe_one, + .remove = dmx3191d_remove_one, +}; + +module_pci_driver(dmx3191d_pci_driver); + +MODULE_AUTHOR("Massimo Piccioni "); +MODULE_DESCRIPTION("Domex DMX3191D SCSI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/elx/Kconfig b/drivers/scsi/elx/Kconfig new file mode 100644 index 000000000..831daea7a --- /dev/null +++ b/drivers/scsi/elx/Kconfig @@ -0,0 +1,9 @@ +config SCSI_EFCT + tristate "Emulex Fibre Channel Target" + depends on PCI && SCSI + depends on TARGET_CORE + depends on SCSI_FC_ATTRS + select CRC_T10DIF + help + The efct driver provides enhanced SCSI Target Mode + support for specific SLI-4 adapters. diff --git a/drivers/scsi/elx/Makefile b/drivers/scsi/elx/Makefile new file mode 100644 index 000000000..a8537d7a2 --- /dev/null +++ b/drivers/scsi/elx/Makefile @@ -0,0 +1,18 @@ +#// SPDX-License-Identifier: GPL-2.0 +#/* +# * Copyright (C) 2021 Broadcom. All Rights Reserved. The term +# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. +# */ + + +obj-$(CONFIG_SCSI_EFCT) := efct.o + +efct-objs := efct/efct_driver.o efct/efct_io.o efct/efct_scsi.o \ + efct/efct_xport.o efct/efct_hw.o efct/efct_hw_queues.o \ + efct/efct_lio.o efct/efct_unsol.o + +efct-objs += libefc/efc_cmds.o libefc/efc_domain.o libefc/efc_fabric.o \ + libefc/efc_node.o libefc/efc_nport.o libefc/efc_device.o \ + libefc/efclib.o libefc/efc_sm.o libefc/efc_els.o + +efct-objs += libefc_sli/sli4.o diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c new file mode 100644 index 000000000..49fd2cfed --- /dev/null +++ b/drivers/scsi/elx/efct/efct_driver.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efct_driver.h" + +#include "efct_hw.h" +#include "efct_unsol.h" +#include "efct_scsi.h" + +LIST_HEAD(efct_devices); + +static int logmask; +module_param(logmask, int, 0444); +MODULE_PARM_DESC(logmask, "logging bitmask (default 0)"); + +static struct libefc_function_template efct_libefc_templ = { + .issue_mbox_rqst = efct_issue_mbox_rqst, + .send_els = efct_els_hw_srrs_send, + .send_bls = efct_efc_bls_send, + + .new_nport = efct_scsi_tgt_new_nport, + .del_nport = efct_scsi_tgt_del_nport, + .scsi_new_node = efct_scsi_new_initiator, + .scsi_del_node = efct_scsi_del_initiator, + .hw_seq_free = efct_efc_hw_sequence_free, +}; + +static int +efct_device_init(void) +{ + int rc; + + /* driver-wide init for target-server */ + rc = efct_scsi_tgt_driver_init(); + if (rc) { + pr_err("efct_scsi_tgt_init failed rc=%d\n", rc); + return rc; + } + + rc = efct_scsi_reg_fc_transport(); + if (rc) { + efct_scsi_tgt_driver_exit(); + pr_err("failed to register to FC host\n"); + return rc; + } + + return 0; +} + +static void +efct_device_shutdown(void) +{ + efct_scsi_release_fc_transport(); + + efct_scsi_tgt_driver_exit(); +} + +static void * +efct_device_alloc(u32 nid) +{ + struct efct *efct = NULL; + + efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid); + if (!efct) + return efct; + + INIT_LIST_HEAD(&efct->list_entry); + list_add_tail(&efct->list_entry, &efct_devices); + + return efct; +} + +static void +efct_teardown_msix(struct efct *efct) +{ + u32 i; + + for (i = 0; i < efct->n_msix_vec; i++) { + free_irq(pci_irq_vector(efct->pci, i), + &efct->intr_context[i]); + } + + pci_free_irq_vectors(efct->pci); +} + +static int +efct_efclib_config(struct efct *efct, struct libefc_function_template *tt) +{ + struct efc *efc; + struct sli4 *sli; + int rc = 0; + + efc = kzalloc(sizeof(*efc), GFP_KERNEL); + if (!efc) + return -ENOMEM; + + efct->efcport = efc; + + memcpy(&efc->tt, tt, sizeof(*tt)); + efc->base = efct; + efc->pci = efct->pci; + + efc->def_wwnn = efct_get_wwnn(&efct->hw); + efc->def_wwpn = efct_get_wwpn(&efct->hw); + efc->enable_tgt = 1; + efc->log_level = EFC_LOG_LIB; + + sli = &efct->hw.sli; + efc->max_xfer_size = sli->sge_supported_length * + sli_get_max_sgl(&efct->hw.sli); + efc->sli = sli; + efc->fcfi = efct->hw.fcf_indicator; + + rc = efcport_init(efc); + if (rc) + efc_log_err(efc, "efcport_init failed\n"); + + return rc; +} + +static int efct_request_firmware_update(struct efct *efct); + +static const char* +efct_pci_model(u16 device) +{ + switch (device) { + case EFCT_DEVICE_LANCER_G6: return "LPE31004"; + case EFCT_DEVICE_LANCER_G7: return "LPE36000"; + default: return "unknown"; + } +} + +static int +efct_device_attach(struct efct *efct) +{ + u32 rc = 0, i = 0; + + if (efct->attached) { + efc_log_err(efct, "Device is already attached\n"); + return -EIO; + } + + snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc", + efct->instance_index); + + efct->logmask = logmask; + efct->filter_def = EFCT_DEFAULT_FILTER; + efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC; + + efct->model = efct_pci_model(efct->pci->device); + + efct->efct_req_fw_upgrade = true; + + /* Allocate transport object and bring online */ + efct->xport = efct_xport_alloc(efct); + if (!efct->xport) { + efc_log_err(efct, "failed to allocate transport object\n"); + rc = -ENOMEM; + goto out; + } + + rc = efct_xport_attach(efct->xport); + if (rc) { + efc_log_err(efct, "failed to attach transport object\n"); + goto xport_out; + } + + rc = efct_xport_initialize(efct->xport); + if (rc) { + efc_log_err(efct, "failed to initialize transport object\n"); + goto xport_out; + } + + rc = efct_efclib_config(efct, &efct_libefc_templ); + if (rc) { + efc_log_err(efct, "failed to init efclib\n"); + goto efclib_out; + } + + for (i = 0; i < efct->n_msix_vec; i++) { + efc_log_debug(efct, "irq %d enabled\n", i); + enable_irq(pci_irq_vector(efct->pci, i)); + } + + efct->attached = true; + + if (efct->efct_req_fw_upgrade) + efct_request_firmware_update(efct); + + return rc; + +efclib_out: + efct_xport_detach(efct->xport); +xport_out: + efct_xport_free(efct->xport); + efct->xport = NULL; +out: + return rc; +} + +static int +efct_device_detach(struct efct *efct) +{ + int i; + + if (!efct || !efct->attached) { + pr_err("Device is not attached\n"); + return -EIO; + } + + if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN)) + efc_log_err(efct, "Transport Shutdown timed out\n"); + + for (i = 0; i < efct->n_msix_vec; i++) + disable_irq(pci_irq_vector(efct->pci, i)); + + efct_xport_detach(efct->xport); + + efct_xport_free(efct->xport); + efct->xport = NULL; + + efcport_destroy(efct->efcport); + kfree(efct->efcport); + + efct->attached = false; + + return 0; +} + +static void +efct_fw_write_cb(int status, u32 actual_write_length, + u32 change_status, void *arg) +{ + struct efct_fw_write_result *result = arg; + + result->status = status; + result->actual_xfer = actual_write_length; + result->change_status = change_status; + + complete(&result->done); +} + +static int +efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len, + u8 *change_status) +{ + int rc = 0; + u32 bytes_left; + u32 xfer_size; + u32 offset; + struct efc_dma dma; + int last = 0; + struct efct_fw_write_result result; + + init_completion(&result.done); + + bytes_left = buf_len; + offset = 0; + + dma.size = FW_WRITE_BUFSIZE; + dma.virt = dma_alloc_coherent(&efct->pci->dev, + dma.size, &dma.phys, GFP_KERNEL); + if (!dma.virt) + return -ENOMEM; + + while (bytes_left > 0) { + if (bytes_left > FW_WRITE_BUFSIZE) + xfer_size = FW_WRITE_BUFSIZE; + else + xfer_size = bytes_left; + + memcpy(dma.virt, buf + offset, xfer_size); + + if (bytes_left == xfer_size) + last = 1; + + efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset, + last, efct_fw_write_cb, &result); + + if (wait_for_completion_interruptible(&result.done) != 0) { + rc = -ENXIO; + break; + } + + if (result.actual_xfer == 0 || result.status != 0) { + rc = -EFAULT; + break; + } + + if (last) + *change_status = result.change_status; + + bytes_left -= result.actual_xfer; + offset += result.actual_xfer; + } + + dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys); + return rc; +} + +static int +efct_fw_reset(struct efct *efct) +{ + /* + * Firmware reset to activate the new firmware. + * Function 0 will update and load the new firmware + * during attach. + */ + if (timer_pending(&efct->xport->stats_timer)) + del_timer(&efct->xport->stats_timer); + + if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) { + efc_log_info(efct, "failed to reset firmware\n"); + return -EIO; + } + + efc_log_info(efct, "successfully reset firmware.Now resetting port\n"); + + efct_device_detach(efct); + return efct_device_attach(efct); +} + +static int +efct_request_firmware_update(struct efct *efct) +{ + int rc = 0; + u8 file_name[256], fw_change_status = 0; + const struct firmware *fw; + struct efct_hw_grp_hdr *fw_image; + + snprintf(file_name, 256, "%s.grp", efct->model); + + rc = request_firmware(&fw, file_name, &efct->pci->dev); + if (rc) { + efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name); + return rc; + } + + fw_image = (struct efct_hw_grp_hdr *)fw->data; + + if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision, + strnlen(fw_image->revision, 16))) { + efc_log_debug(efct, + "Skip update. Firmware is already up to date.\n"); + goto exit; + } + + efc_log_info(efct, "Firmware update is initiated. %s -> %s\n", + efct->hw.sli.fw_name[0], fw_image->revision); + + rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status); + if (rc) { + efc_log_err(efct, "Firmware update failed. rc = %d\n", rc); + goto exit; + } + + efc_log_info(efct, "Firmware updated successfully\n"); + switch (fw_change_status) { + case 0x00: + efc_log_info(efct, "New firmware is active.\n"); + break; + case 0x01: + efc_log_info(efct, + "System reboot needed to activate the new firmware\n"); + break; + case 0x02: + case 0x03: + efc_log_info(efct, + "firmware reset to activate the new firmware\n"); + efct_fw_reset(efct); + break; + default: + efc_log_info(efct, "Unexpected value change_status:%d\n", + fw_change_status); + break; + } + +exit: + release_firmware(fw); + + return rc; +} + +static void +efct_device_free(struct efct *efct) +{ + if (efct) { + list_del(&efct->list_entry); + kfree(efct); + } +} + +static int +efct_device_interrupts_required(struct efct *efct) +{ + int rc; + + rc = efct_hw_setup(&efct->hw, efct, efct->pci); + if (rc < 0) + return rc; + + return efct->hw.config.n_eq; +} + +static irqreturn_t +efct_intr_thread(int irq, void *handle) +{ + struct efct_intr_context *intr_ctx = handle; + struct efct *efct = intr_ctx->efct; + + efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec); + return IRQ_HANDLED; +} + +static irqreturn_t +efct_intr_msix(int irq, void *handle) +{ + return IRQ_WAKE_THREAD; +} + +static int +efct_setup_msix(struct efct *efct, u32 num_intrs) +{ + int rc = 0, i; + + if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) { + dev_err(&efct->pci->dev, + "%s : MSI-X not available\n", __func__); + return -EIO; + } + + efct->n_msix_vec = num_intrs; + + rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + + if (rc < 0) { + dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc); + return rc; + } + + for (i = 0; i < num_intrs; i++) { + struct efct_intr_context *intr_ctx = NULL; + + intr_ctx = &efct->intr_context[i]; + intr_ctx->efct = efct; + intr_ctx->index = i; + + rc = request_threaded_irq(pci_irq_vector(efct->pci, i), + efct_intr_msix, efct_intr_thread, 0, + EFCT_DRIVER_NAME, intr_ctx); + if (rc) { + dev_err(&efct->pci->dev, + "Failed to register %d vector: %d\n", i, rc); + goto out; + } + } + + return rc; + +out: + while (--i >= 0) + free_irq(pci_irq_vector(efct->pci, i), + &efct->intr_context[i]); + + pci_free_irq_vectors(efct->pci); + return rc; +} + +static struct pci_device_id efct_pci_table[] = { + {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0}, + {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0}, + {} /* terminate list */ +}; + +static int +efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct efct *efct = NULL; + int rc; + u32 i, r; + int num_interrupts = 0; + int nid; + + dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME); + + rc = pci_enable_device_mem(pdev); + if (rc) + return rc; + + pci_set_master(pdev); + + rc = pci_set_mwi(pdev); + if (rc) { + dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc); + goto mwi_out; + } + + rc = pci_request_regions(pdev, EFCT_DRIVER_NAME); + if (rc) { + dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc); + goto req_regions_out; + } + + /* Fetch the Numa node id for this device */ + nid = dev_to_node(&pdev->dev); + if (nid < 0) { + dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid); + nid = 0; + } + + /* Allocate efct */ + efct = efct_device_alloc(nid); + if (!efct) { + dev_err(&pdev->dev, "Failed to allocate efct\n"); + rc = -ENOMEM; + goto alloc_out; + } + + efct->pci = pdev; + efct->numa_node = nid; + + /* Map all memory BARs */ + for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + efct->reg[r] = ioremap(pci_resource_start(pdev, i), + pci_resource_len(pdev, i)); + r++; + } + + /* + * If the 64-bit attribute is set, both this BAR and the + * next form the complete address. Skip processing the + * next BAR. + */ + if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64) + i++; + } + + pci_set_drvdata(pdev, efct); + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n"); + goto dma_mask_out; + } + + num_interrupts = efct_device_interrupts_required(efct); + if (num_interrupts < 0) { + efc_log_err(efct, "efct_device_interrupts_required failed\n"); + rc = -1; + goto dma_mask_out; + } + + /* + * Initialize MSIX interrupts, note, + * efct_setup_msix() enables the interrupt + */ + rc = efct_setup_msix(efct, num_interrupts); + if (rc) { + dev_err(&pdev->dev, "Can't setup msix\n"); + goto dma_mask_out; + } + /* Disable interrupt for now */ + for (i = 0; i < efct->n_msix_vec; i++) { + efc_log_debug(efct, "irq %d disabled\n", i); + disable_irq(pci_irq_vector(efct->pci, i)); + } + + rc = efct_device_attach(efct); + if (rc) + goto attach_out; + + return 0; + +attach_out: + efct_teardown_msix(efct); +dma_mask_out: + pci_set_drvdata(pdev, NULL); + + for (i = 0; i < EFCT_PCI_MAX_REGS; i++) { + if (efct->reg[i]) + iounmap(efct->reg[i]); + } + efct_device_free(efct); +alloc_out: + pci_release_regions(pdev); +req_regions_out: + pci_clear_mwi(pdev); +mwi_out: + pci_disable_device(pdev); + return rc; +} + +static void +efct_pci_remove(struct pci_dev *pdev) +{ + struct efct *efct = pci_get_drvdata(pdev); + u32 i; + + if (!efct) + return; + + efct_device_detach(efct); + + efct_teardown_msix(efct); + + for (i = 0; i < EFCT_PCI_MAX_REGS; i++) { + if (efct->reg[i]) + iounmap(efct->reg[i]); + } + + pci_set_drvdata(pdev, NULL); + + efct_device_free(efct); + + pci_release_regions(pdev); + + pci_disable_device(pdev); +} + +static void +efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev) +{ + if (efct) { + efc_log_debug(efct, + "PCI channel disable preparing for reset\n"); + efct_device_detach(efct); + /* Disable interrupt and pci device */ + efct_teardown_msix(efct); + } + pci_disable_device(pdev); +} + +static void +efct_device_prep_for_recover(struct efct *efct) +{ + if (efct) { + efc_log_debug(efct, "PCI channel preparing for recovery\n"); + efct_hw_io_abort_all(&efct->hw); + } +} + +/** + * efct_pci_io_error_detected - method for handling PCI I/O error + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is registered to the PCI subsystem for error handling. This + * function is called by the PCI subsystem after a PCI bus error affecting + * this device has been detected. When this routine is invoked, it dispatches + * device error detected handling routine, which will perform the proper + * error detected operation. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + */ +static pci_ers_result_t +efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct efct *efct = pci_get_drvdata(pdev); + pci_ers_result_t rc; + + switch (state) { + case pci_channel_io_normal: + efct_device_prep_for_recover(efct); + rc = PCI_ERS_RESULT_CAN_RECOVER; + break; + case pci_channel_io_frozen: + efct_device_prep_for_reset(efct, pdev); + rc = PCI_ERS_RESULT_NEED_RESET; + break; + case pci_channel_io_perm_failure: + efct_device_detach(efct); + rc = PCI_ERS_RESULT_DISCONNECT; + break; + default: + efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state); + efct_device_prep_for_reset(efct, pdev); + rc = PCI_ERS_RESULT_NEED_RESET; + break; + } + + return rc; +} + +static pci_ers_result_t +efct_pci_io_slot_reset(struct pci_dev *pdev) +{ + int rc; + struct efct *efct = pci_get_drvdata(pdev); + + rc = pci_enable_device_mem(pdev); + if (rc) { + efc_log_err(efct, "failed to enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* + * As the new kernel behavior of pci_restore_state() API call clears + * device saved_state flag, need to save the restored state again. + */ + + pci_save_state(pdev); + + pci_set_master(pdev); + + rc = efct_setup_msix(efct, efct->n_msix_vec); + if (rc) + efc_log_err(efct, "rc %d returned, IRQ allocation failed\n", + rc); + + /* Perform device reset */ + efct_device_detach(efct); + /* Bring device to online*/ + efct_device_attach(efct); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void +efct_pci_io_resume(struct pci_dev *pdev) +{ + struct efct *efct = pci_get_drvdata(pdev); + + /* Perform device reset */ + efct_device_detach(efct); + /* Bring device to online*/ + efct_device_attach(efct); +} + +MODULE_DEVICE_TABLE(pci, efct_pci_table); + +static struct pci_error_handlers efct_pci_err_handler = { + .error_detected = efct_pci_io_error_detected, + .slot_reset = efct_pci_io_slot_reset, + .resume = efct_pci_io_resume, +}; + +static struct pci_driver efct_pci_driver = { + .name = EFCT_DRIVER_NAME, + .id_table = efct_pci_table, + .probe = efct_pci_probe, + .remove = efct_pci_remove, + .err_handler = &efct_pci_err_handler, +}; + +static +int __init efct_init(void) +{ + int rc; + + rc = efct_device_init(); + if (rc) { + pr_err("efct_device_init failed rc=%d\n", rc); + return rc; + } + + rc = pci_register_driver(&efct_pci_driver); + if (rc) { + pr_err("pci_register_driver failed rc=%d\n", rc); + efct_device_shutdown(); + } + + return rc; +} + +static void __exit efct_exit(void) +{ + pci_unregister_driver(&efct_pci_driver); + efct_device_shutdown(); +} + +module_init(efct_init); +module_exit(efct_exit); +MODULE_VERSION(EFCT_DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Broadcom"); diff --git a/drivers/scsi/elx/efct/efct_driver.h b/drivers/scsi/elx/efct/efct_driver.h new file mode 100644 index 000000000..0e3c931db --- /dev/null +++ b/drivers/scsi/elx/efct/efct_driver.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#if !defined(__EFCT_DRIVER_H__) +#define __EFCT_DRIVER_H__ + +/*************************************************************************** + * OS specific includes + */ +#include +#include +#include +#include "../include/efc_common.h" +#include "../libefc/efclib.h" +#include "efct_hw.h" +#include "efct_io.h" +#include "efct_xport.h" + +#define EFCT_DRIVER_NAME "efct" +#define EFCT_DRIVER_VERSION "1.0.0.0" + +/* EFCT_DEFAULT_FILTER- + * MRQ filter to segregate the IO flow. + */ +#define EFCT_DEFAULT_FILTER "0x01ff22ff,0,0,0" + +/* EFCT_OS_MAX_ISR_TIME_MSEC - + * maximum time driver code should spend in an interrupt + * or kernel thread context without yielding + */ +#define EFCT_OS_MAX_ISR_TIME_MSEC 1000 + +#define EFCT_FC_MAX_SGL 64 +#define EFCT_FC_DIF_SEED 0 + +/* Watermark */ +#define EFCT_WATERMARK_HIGH_PCT 90 +#define EFCT_WATERMARK_LOW_PCT 80 +#define EFCT_IO_WATERMARK_PER_INITIATOR 8 + +#define EFCT_PCI_MAX_REGS 6 +#define MAX_PCI_INTERRUPTS 16 + +struct efct_intr_context { + struct efct *efct; + u32 index; +}; + +struct efct { + struct pci_dev *pci; + void __iomem *reg[EFCT_PCI_MAX_REGS]; + + u32 n_msix_vec; + bool attached; + bool soft_wwn_enable; + u8 efct_req_fw_upgrade; + struct efct_intr_context intr_context[MAX_PCI_INTERRUPTS]; + u32 numa_node; + + char name[EFC_NAME_LENGTH]; + u32 instance_index; + struct list_head list_entry; + struct efct_scsi_tgt tgt_efct; + struct efct_xport *xport; + struct efc *efcport; + struct Scsi_Host *shost; + int logmask; + u32 max_isr_time_msec; + + const char *desc; + + const char *model; + + struct efct_hw hw; + + u32 rq_selection_policy; + char *filter_def; + int topology; + + /* Look up for target node */ + struct xarray lookup; + + /* + * Target IO timer value: + * Zero: target command timeout disabled. + * Non-zero: Timeout value, in seconds, for target commands + */ + u32 target_io_timer_sec; + + int speed; + struct dentry *sess_debugfs_dir; +}; + +#define FW_WRITE_BUFSIZE (64 * 1024) + +struct efct_fw_write_result { + struct completion done; + int status; + u32 actual_xfer; + u32 change_status; +}; + +extern struct list_head efct_devices; + +#endif /* __EFCT_DRIVER_H__ */ diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c new file mode 100644 index 000000000..5a5525054 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_hw.c @@ -0,0 +1,3580 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efct_driver.h" +#include "efct_hw.h" +#include "efct_unsol.h" + +struct efct_hw_link_stat_cb_arg { + void (*cb)(int status, u32 num_counters, + struct efct_hw_link_stat_counts *counters, void *arg); + void *arg; +}; + +struct efct_hw_host_stat_cb_arg { + void (*cb)(int status, u32 num_counters, + struct efct_hw_host_stat_counts *counters, void *arg); + void *arg; +}; + +struct efct_hw_fw_wr_cb_arg { + void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg); + void *arg; +}; + +struct efct_mbox_rqst_ctx { + int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg); + void *arg; +}; + +static int +efct_hw_link_event_init(struct efct_hw *hw) +{ + hw->link.status = SLI4_LINK_STATUS_MAX; + hw->link.topology = SLI4_LINK_TOPO_NONE; + hw->link.medium = SLI4_LINK_MEDIUM_MAX; + hw->link.speed = 0; + hw->link.loop_map = NULL; + hw->link.fc_id = U32_MAX; + + return 0; +} + +static int +efct_hw_read_max_dump_size(struct efct_hw *hw) +{ + u8 buf[SLI4_BMBX_SIZE]; + struct efct *efct = hw->os; + int rc = 0; + struct sli4_rsp_cmn_set_dump_location *rsp; + + /* attempt to detemine the dump size for function 0 only. */ + if (PCI_FUNC(efct->pci->devfn) != 0) + return rc; + + if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0)) + return -EIO; + + rsp = (struct sli4_rsp_cmn_set_dump_location *) + (buf + offsetof(struct sli4_cmd_sli_config, payload.embed)); + + rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); + if (rc != 0) { + efc_log_debug(hw->os, "set dump location cmd failed\n"); + return rc; + } + + hw->dump_size = + le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN; + + efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size); + + return rc; +} + +static int +__efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg) +{ + struct sli4_cmd_read_topology *read_topo = + (struct sli4_cmd_read_topology *)mqe; + u8 speed; + struct efc_domain_record drec = {0}; + struct efct *efct = hw->os; + + if (status || le16_to_cpu(read_topo->hdr.status)) { + efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, + le16_to_cpu(read_topo->hdr.status)); + return -EIO; + } + + switch (le32_to_cpu(read_topo->dw2_attentype) & + SLI4_READTOPO_ATTEN_TYPE) { + case SLI4_READ_TOPOLOGY_LINK_UP: + hw->link.status = SLI4_LINK_STATUS_UP; + break; + case SLI4_READ_TOPOLOGY_LINK_DOWN: + hw->link.status = SLI4_LINK_STATUS_DOWN; + break; + case SLI4_READ_TOPOLOGY_LINK_NO_ALPA: + hw->link.status = SLI4_LINK_STATUS_NO_ALPA; + break; + default: + hw->link.status = SLI4_LINK_STATUS_MAX; + break; + } + + switch (read_topo->topology) { + case SLI4_READ_TOPO_NON_FC_AL: + hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL; + break; + case SLI4_READ_TOPO_FC_AL: + hw->link.topology = SLI4_LINK_TOPO_FC_AL; + if (hw->link.status == SLI4_LINK_STATUS_UP) + hw->link.loop_map = hw->loop_map.virt; + hw->link.fc_id = read_topo->acquired_al_pa; + break; + default: + hw->link.topology = SLI4_LINK_TOPO_MAX; + break; + } + + hw->link.medium = SLI4_LINK_MEDIUM_FC; + + speed = (le32_to_cpu(read_topo->currlink_state) & + SLI4_READTOPO_LINKSTATE_SPEED) >> 8; + switch (speed) { + case SLI4_READ_TOPOLOGY_SPEED_1G: + hw->link.speed = 1 * 1000; + break; + case SLI4_READ_TOPOLOGY_SPEED_2G: + hw->link.speed = 2 * 1000; + break; + case SLI4_READ_TOPOLOGY_SPEED_4G: + hw->link.speed = 4 * 1000; + break; + case SLI4_READ_TOPOLOGY_SPEED_8G: + hw->link.speed = 8 * 1000; + break; + case SLI4_READ_TOPOLOGY_SPEED_16G: + hw->link.speed = 16 * 1000; + break; + case SLI4_READ_TOPOLOGY_SPEED_32G: + hw->link.speed = 32 * 1000; + break; + case SLI4_READ_TOPOLOGY_SPEED_64G: + hw->link.speed = 64 * 1000; + break; + case SLI4_READ_TOPOLOGY_SPEED_128G: + hw->link.speed = 128 * 1000; + break; + } + + drec.speed = hw->link.speed; + drec.fc_id = hw->link.fc_id; + drec.is_nport = true; + efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec); + + return 0; +} + +static int +efct_hw_cb_link(void *ctx, void *e) +{ + struct efct_hw *hw = ctx; + struct sli4_link_event *event = e; + struct efc_domain *d = NULL; + int rc = 0; + struct efct *efct = hw->os; + + efct_hw_link_event_init(hw); + + switch (event->status) { + case SLI4_LINK_STATUS_UP: + + hw->link = *event; + efct->efcport->link_status = EFC_LINK_STATUS_UP; + + if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) { + struct efc_domain_record drec = {0}; + + efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n", + event->speed); + drec.speed = event->speed; + drec.fc_id = event->fc_id; + drec.is_nport = true; + efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, + &drec); + } else if (event->topology == SLI4_LINK_TOPO_FC_AL) { + u8 buf[SLI4_BMBX_SIZE]; + + efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n", + event->speed); + + if (!sli_cmd_read_topology(&hw->sli, buf, + &hw->loop_map)) { + rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT, + __efct_read_topology_cb, NULL); + } + + if (rc) + efc_log_debug(hw->os, "READ_TOPOLOGY failed\n"); + } else { + efc_log_info(hw->os, "%s(%#x), speed is %d\n", + "Link Up, unsupported topology ", + event->topology, event->speed); + } + break; + case SLI4_LINK_STATUS_DOWN: + efc_log_info(hw->os, "Link down\n"); + + hw->link.status = event->status; + efct->efcport->link_status = EFC_LINK_STATUS_DOWN; + + d = efct->efcport->domain; + if (d) + efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d); + break; + default: + efc_log_debug(hw->os, "unhandled link status %#x\n", + event->status); + break; + } + + return 0; +} + +int +efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev) +{ + u32 i, max_sgl, cpus; + + if (hw->hw_setup_called) + return 0; + + /* + * efct_hw_init() relies on NULL pointers indicating that a structure + * needs allocation. If a structure is non-NULL, efct_hw_init() won't + * free/realloc that memory + */ + memset(hw, 0, sizeof(struct efct_hw)); + + hw->hw_setup_called = true; + + hw->os = os; + + mutex_init(&hw->bmbx_lock); + spin_lock_init(&hw->cmd_lock); + INIT_LIST_HEAD(&hw->cmd_head); + INIT_LIST_HEAD(&hw->cmd_pending); + hw->cmd_head_count = 0; + + /* Create mailbox command ctx pool */ + hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ, + sizeof(struct efct_command_ctx)); + if (!hw->cmd_ctx_pool) { + efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n"); + return -EIO; + } + + /* Create mailbox request ctx pool for library callback */ + hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ, + sizeof(struct efct_mbox_rqst_ctx)); + if (!hw->mbox_rqst_pool) { + efc_log_err(hw->os, "failed to allocate mbox request pool\n"); + return -EIO; + } + + spin_lock_init(&hw->io_lock); + INIT_LIST_HEAD(&hw->io_inuse); + INIT_LIST_HEAD(&hw->io_free); + INIT_LIST_HEAD(&hw->io_wait_free); + + atomic_set(&hw->io_alloc_failed_count, 0); + + hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4; + if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) { + efc_log_err(hw->os, "SLI setup failed\n"); + return -EIO; + } + + efct_hw_link_event_init(hw); + + sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw); + + /* + * Set all the queue sizes to the maximum allowed. + */ + for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) + hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i]; + /* + * Adjust the size of the WQs so that the CQ is twice as big as + * the WQ to allow for 2 completions per IO. This allows us to + * handle multi-phase as well as aborts. + */ + hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2; + + /* + * The RQ assignment for RQ pair mode. + */ + + hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD; + hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size; + + cpus = num_possible_cpus(); + hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus; + + max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED; + max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl; + hw->config.n_sgl = max_sgl; + + (void)efct_hw_read_max_dump_size(hw); + + return 0; +} + +static void +efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id) +{ + efc_log_info(hw->os, + "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n", + j, hw->config.filter_def[j], i, id); +} + +static inline void +efct_hw_init_free_io(struct efct_hw_io *io) +{ + /* + * Set io->done to NULL, to avoid any callbacks, should + * a completion be received for one of these IOs + */ + io->done = NULL; + io->abort_done = NULL; + io->status_saved = false; + io->abort_in_progress = false; + io->type = 0xFFFF; + io->wq = NULL; +} + +static bool efct_hw_iotype_is_originator(u16 io_type) +{ + switch (io_type) { + case EFCT_HW_FC_CT: + case EFCT_HW_ELS_REQ: + return true; + default: + return false; + } +} + +static void +efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io) +{ + /* Restore the default */ + io->sgl = &io->def_sgl; + io->sgl_count = io->def_sgl_count; +} + +static void +efct_hw_wq_process_io(void *arg, u8 *cqe, int status) +{ + struct efct_hw_io *io = arg; + struct efct_hw *hw = io->hw; + struct sli4_fc_wcqe *wcqe = (void *)cqe; + u32 len = 0; + u32 ext = 0; + + /* clear xbusy flag if WCQE[XB] is clear */ + if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0) + io->xbusy = false; + + /* get extended CQE status */ + switch (io->type) { + case EFCT_HW_BLS_ACC: + case EFCT_HW_BLS_RJT: + break; + case EFCT_HW_ELS_REQ: + sli_fc_els_did(&hw->sli, cqe, &ext); + len = sli_fc_response_length(&hw->sli, cqe); + break; + case EFCT_HW_ELS_RSP: + case EFCT_HW_FC_CT_RSP: + break; + case EFCT_HW_FC_CT: + len = sli_fc_response_length(&hw->sli, cqe); + break; + case EFCT_HW_IO_TARGET_WRITE: + len = sli_fc_io_length(&hw->sli, cqe); + break; + case EFCT_HW_IO_TARGET_READ: + len = sli_fc_io_length(&hw->sli, cqe); + break; + case EFCT_HW_IO_TARGET_RSP: + break; + case EFCT_HW_IO_DNRX_REQUEUE: + /* release the count for re-posting the buffer */ + /* efct_hw_io_free(hw, io); */ + break; + default: + efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n", + io->type, io->indicator); + break; + } + if (status) { + ext = sli_fc_ext_status(&hw->sli, cqe); + /* + * If we're not an originator IO, and XB is set, then issue + * abort for the IO from within the HW + */ + if (efct_hw_iotype_is_originator(io->type) && + wcqe->flags & SLI4_WCQE_XB) { + int rc; + + efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", + io->indicator, io->reqtag); + + /* + * Because targets may send a response when the IO + * completes using the same XRI, we must wait for the + * XRI_ABORTED CQE to issue the IO callback + */ + rc = efct_hw_io_abort(hw, io, false, NULL, NULL); + if (rc == 0) { + /* + * latch status to return after abort is + * complete + */ + io->status_saved = true; + io->saved_status = status; + io->saved_ext = ext; + io->saved_len = len; + goto exit_efct_hw_wq_process_io; + } else if (rc == -EINPROGRESS) { + /* + * Already being aborted by someone else (ABTS + * perhaps). Just return original + * error. + */ + efc_log_debug(hw->os, "%s%#x tag=%#x\n", + "abort in progress xri=", + io->indicator, io->reqtag); + + } else { + /* Failed to abort for some other reason, log + * error + */ + efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n", + "Failed to abort xri=", + io->indicator, io->reqtag, rc); + } + } + } + + if (io->done) { + efct_hw_done_t done = io->done; + + io->done = NULL; + + if (io->status_saved) { + /* use latched status if exists */ + status = io->saved_status; + len = io->saved_len; + ext = io->saved_ext; + io->status_saved = false; + } + + /* Restore default SGL */ + efct_hw_io_restore_sgl(hw, io); + done(io, len, status, ext, io->arg); + } + +exit_efct_hw_wq_process_io: + return; +} + +static int +efct_hw_setup_io(struct efct_hw *hw) +{ + u32 i = 0; + struct efct_hw_io *io = NULL; + uintptr_t xfer_virt = 0; + uintptr_t xfer_phys = 0; + u32 index; + bool new_alloc = true; + struct efc_dma *dma; + struct efct *efct = hw->os; + + if (!hw->io) { + hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL); + if (!hw->io) + return -ENOMEM; + + memset(hw->io, 0, hw->config.n_io * sizeof(io)); + + for (i = 0; i < hw->config.n_io; i++) { + hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL); + if (!hw->io[i]) + goto error; + } + + /* Create WQE buffs for IO */ + hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size), + GFP_KERNEL); + if (!hw->wqe_buffs) { + kfree(hw->io); + return -ENOMEM; + } + + } else { + /* re-use existing IOs, including SGLs */ + new_alloc = false; + } + + if (new_alloc) { + dma = &hw->xfer_rdy; + dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io; + dma->virt = dma_alloc_coherent(&efct->pci->dev, + dma->size, &dma->phys, GFP_KERNEL); + if (!dma->virt) + return -ENOMEM; + } + xfer_virt = (uintptr_t)hw->xfer_rdy.virt; + xfer_phys = hw->xfer_rdy.phys; + + /* Initialize the pool of HW IO objects */ + for (i = 0; i < hw->config.n_io; i++) { + struct hw_wq_callback *wqcb; + + io = hw->io[i]; + + /* initialize IO fields */ + io->hw = hw; + + /* Assign a WQE buff */ + io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size]; + + /* Allocate the request tag for this IO */ + wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io); + if (!wqcb) { + efc_log_err(hw->os, "can't allocate request tag\n"); + return -ENOSPC; + } + io->reqtag = wqcb->instance_index; + + /* Now for the fields that are initialized on each free */ + efct_hw_init_free_io(io); + + /* The XB flag isn't cleared on IO free, so init to zero */ + io->xbusy = 0; + + if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI, + &io->indicator, &index)) { + efc_log_err(hw->os, + "sli_resource_alloc failed @ %d\n", i); + return -ENOMEM; + } + + if (new_alloc) { + dma = &io->def_sgl; + dma->size = hw->config.n_sgl * + sizeof(struct sli4_sge); + dma->virt = dma_alloc_coherent(&efct->pci->dev, + dma->size, &dma->phys, + GFP_KERNEL); + if (!dma->virt) { + efc_log_err(hw->os, "dma_alloc fail %d\n", i); + memset(&io->def_sgl, 0, + sizeof(struct efc_dma)); + return -ENOMEM; + } + } + io->def_sgl_count = hw->config.n_sgl; + io->sgl = &io->def_sgl; + io->sgl_count = io->def_sgl_count; + + if (hw->xfer_rdy.size) { + io->xfer_rdy.virt = (void *)xfer_virt; + io->xfer_rdy.phys = xfer_phys; + io->xfer_rdy.size = sizeof(struct fcp_txrdy); + + xfer_virt += sizeof(struct fcp_txrdy); + xfer_phys += sizeof(struct fcp_txrdy); + } + } + + return 0; +error: + for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { + kfree(hw->io[i]); + hw->io[i] = NULL; + } + + kfree(hw->io); + hw->io = NULL; + + return -ENOMEM; +} + +static int +efct_hw_init_prereg_io(struct efct_hw *hw) +{ + u32 i, idx = 0; + struct efct_hw_io *io = NULL; + u8 cmd[SLI4_BMBX_SIZE]; + int rc = 0; + u32 n_rem; + u32 n = 0; + u32 sgls_per_request = 256; + struct efc_dma **sgls = NULL; + struct efc_dma req; + struct efct *efct = hw->os; + + sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL); + if (!sgls) + return -ENOMEM; + + memset(&req, 0, sizeof(struct efc_dma)); + req.size = 32 + sgls_per_request * 16; + req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys, + GFP_KERNEL); + if (!req.virt) { + kfree(sgls); + return -ENOMEM; + } + + for (n_rem = hw->config.n_io; n_rem; n_rem -= n) { + /* Copy address of SGL's into local sgls[] array, break + * out if the xri is not contiguous. + */ + u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem; + + for (n = 0; n < min; n++) { + /* Check that we have contiguous xri values */ + if (n > 0) { + if (hw->io[idx + n]->indicator != + hw->io[idx + n - 1]->indicator + 1) + break; + } + + sgls[n] = hw->io[idx + n]->sgl; + } + + if (sli_cmd_post_sgl_pages(&hw->sli, cmd, + hw->io[idx]->indicator, n, sgls, NULL, &req)) { + rc = -EIO; + break; + } + + rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL); + if (rc) { + efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc); + break; + } + + /* Add to tail if successful */ + for (i = 0; i < n; i++, idx++) { + io = hw->io[idx]; + io->state = EFCT_HW_IO_STATE_FREE; + INIT_LIST_HEAD(&io->list_entry); + list_add_tail(&io->list_entry, &hw->io_free); + } + } + + dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys); + memset(&req, 0, sizeof(struct efc_dma)); + kfree(sgls); + + return rc; +} + +static int +efct_hw_init_io(struct efct_hw *hw) +{ + u32 i, idx = 0; + bool prereg = false; + struct efct_hw_io *io = NULL; + int rc = 0; + + prereg = hw->sli.params.sgl_pre_registered; + + if (prereg) + return efct_hw_init_prereg_io(hw); + + for (i = 0; i < hw->config.n_io; i++, idx++) { + io = hw->io[idx]; + io->state = EFCT_HW_IO_STATE_FREE; + INIT_LIST_HEAD(&io->list_entry); + list_add_tail(&io->list_entry, &hw->io_free); + } + + return rc; +} + +static int +efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint) +{ + int rc = 0; + u8 buf[SLI4_BMBX_SIZE]; + struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param; + + memset(¶m, 0, sizeof(param)); + param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint); + /* build the set_features command */ + sli_cmd_common_set_features(&hw->sli, buf, + SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), ¶m); + + rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); + if (rc) + efc_log_warn(hw->os, "set FDT hint %d failed: %d\n", + fdt_xfer_hint, rc); + else + efc_log_info(hw->os, "Set FTD transfer hint to %d\n", + le32_to_cpu(param.fdt_xfer_hint)); + + return rc; +} + +static int +efct_hw_config_rq(struct efct_hw *hw) +{ + u32 min_rq_count, i, rc; + struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; + u8 buf[SLI4_BMBX_SIZE]; + + efc_log_info(hw->os, "using REG_FCFI standard\n"); + + /* + * Set the filter match/mask values from hw's + * filter_def values + */ + for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { + rq_cfg[i].rq_id = cpu_to_le16(0xffff); + rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i]; + rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8); + rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16); + rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24); + } + + /* + * Update the rq_id's of the FCF configuration + * (don't update more than the number of rq_cfg + * elements) + */ + min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ? + hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG; + for (i = 0; i < min_rq_count; i++) { + struct hw_rq *rq = hw->hw_rq[i]; + u32 j; + + for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) { + u32 mask = (rq->filter_mask != 0) ? + rq->filter_mask : 1; + + if (!(mask & (1U << j))) + continue; + + rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id); + efct_logfcfi(hw, j, i, rq->hdr->id); + } + } + + rc = -EIO; + if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg)) + rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); + + if (rc != 0) { + efc_log_err(hw->os, "FCFI registration failed\n"); + return rc; + } + hw->fcf_indicator = + le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi); + + return rc; +} + +static int +efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index) +{ + u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0; + struct hw_rq *rq; + struct sli4_cmd_reg_fcfi_mrq *rsp = NULL; + struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG]; + u32 rc, i; + + if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) + goto issue_cmd; + + /* Set the filter match/mask values from hw's filter_def values */ + for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { + rq_filter[i].rq_id = cpu_to_le16(0xffff); + rq_filter[i].type_mask = (u8)hw->config.filter_def[i]; + rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8); + rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16); + rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24); + } + + rq = hw->hw_rq[0]; + rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id); + rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id); + + mrq_bitmask = 0x2; +issue_cmd: + efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n", + hw->hw_rq_count, hw->config.rq_selection_policy, mode); + /* Invoke REG_FCFI_MRQ */ + rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index, + hw->config.rq_selection_policy, mrq_bitmask, + hw->hw_mrq_count, rq_filter); + if (rc) { + efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n"); + return -EIO; + } + + rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); + + rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf; + + if ((rc) || (le16_to_cpu(rsp->hdr.status))) { + efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n", + rsp->hdr.command, le16_to_cpu(rsp->hdr.status)); + return -EIO; + } + + if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) + hw->fcf_indicator = le16_to_cpu(rsp->fcfi); + + return 0; +} + +static void +efct_hw_queue_hash_add(struct efct_queue_hash *hash, + u16 id, u16 index) +{ + u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1); + + /* + * Since the hash is always bigger than the number of queues, then we + * never have to worry about an infinite loop. + */ + while (hash[hash_index].in_use) + hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1); + + /* not used, claim the entry */ + hash[hash_index].id = id; + hash[hash_index].in_use = true; + hash[hash_index].index = index; +} + +static int +efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable) +{ + int rc = 0; + u8 buf[SLI4_BMBX_SIZE]; + struct sli4_rqst_cmn_set_features_health_check param; + u32 health_check_flag = 0; + + memset(¶m, 0, sizeof(param)); + + if (enable) + health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE; + + if (query) + health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY; + + param.health_check_dword = cpu_to_le32(health_check_flag); + + /* build the set_features command */ + sli_cmd_common_set_features(&hw->sli, buf, + SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), ¶m); + + rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); + if (rc) + efc_log_err(hw->os, "efct_hw_command returns %d\n", rc); + else + efc_log_debug(hw->os, "SLI Port Health Check is enabled\n"); + + return rc; +} + +int +efct_hw_init(struct efct_hw *hw) +{ + int rc; + u32 i = 0; + int rem_count; + unsigned long flags = 0; + struct efct_hw_io *temp; + struct efc_dma *dma; + + /* + * Make sure the command lists are empty. If this is start-of-day, + * they'll be empty since they were just initialized in efct_hw_setup. + * If we've just gone through a reset, the command and command pending + * lists should have been cleaned up as part of the reset + * (efct_hw_reset()). + */ + spin_lock_irqsave(&hw->cmd_lock, flags); + if (!list_empty(&hw->cmd_head)) { + spin_unlock_irqrestore(&hw->cmd_lock, flags); + efc_log_err(hw->os, "command found on cmd list\n"); + return -EIO; + } + if (!list_empty(&hw->cmd_pending)) { + spin_unlock_irqrestore(&hw->cmd_lock, flags); + efc_log_err(hw->os, "command found on pending list\n"); + return -EIO; + } + spin_unlock_irqrestore(&hw->cmd_lock, flags); + + /* Free RQ buffers if prevously allocated */ + efct_hw_rx_free(hw); + + /* + * The IO queues must be initialized here for the reset case. The + * efct_hw_init_io() function will re-add the IOs to the free list. + * The cmd_head list should be OK since we free all entries in + * efct_hw_command_cancel() that is called in the efct_hw_reset(). + */ + + /* If we are in this function due to a reset, there may be stale items + * on lists that need to be removed. Clean them up. + */ + rem_count = 0; + while ((!list_empty(&hw->io_wait_free))) { + rem_count++; + temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io, + list_entry); + list_del_init(&temp->list_entry); + } + if (rem_count > 0) + efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n", + rem_count); + + rem_count = 0; + while ((!list_empty(&hw->io_inuse))) { + rem_count++; + temp = list_first_entry(&hw->io_inuse, struct efct_hw_io, + list_entry); + list_del_init(&temp->list_entry); + } + if (rem_count > 0) + efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n", + rem_count); + + rem_count = 0; + while ((!list_empty(&hw->io_free))) { + rem_count++; + temp = list_first_entry(&hw->io_free, struct efct_hw_io, + list_entry); + list_del_init(&temp->list_entry); + } + if (rem_count > 0) + efc_log_debug(hw->os, "rmvd %d items from io_free list\n", + rem_count); + + /* If MRQ not required, Make sure we dont request feature. */ + if (hw->config.n_rq == 1) + hw->sli.features &= (~SLI4_REQFEAT_MRQP); + + if (sli_init(&hw->sli)) { + efc_log_err(hw->os, "SLI failed to initialize\n"); + return -EIO; + } + + if (hw->sliport_healthcheck) { + rc = efct_hw_config_sli_port_health_check(hw, 0, 1); + if (rc != 0) { + efc_log_err(hw->os, "Enable port Health check fail\n"); + return rc; + } + } + + /* + * Set FDT transfer hint, only works on Lancer + */ + if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) { + /* + * Non-fatal error. In particular, we can disregard failure to + * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware + * that do not support EFCT_HW_FDT_XFER_HINT feature. + */ + efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT); + } + + /* zero the hashes */ + memset(hw->cq_hash, 0, sizeof(hw->cq_hash)); + efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n", + EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE); + + memset(hw->rq_hash, 0, sizeof(hw->rq_hash)); + efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n", + EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE); + + memset(hw->wq_hash, 0, sizeof(hw->wq_hash)); + efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n", + EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE); + + rc = efct_hw_init_queues(hw); + if (rc) + return rc; + + rc = efct_hw_map_wq_cpu(hw); + if (rc) + return rc; + + /* Allocate and p_st RQ buffers */ + rc = efct_hw_rx_allocate(hw); + if (rc) { + efc_log_err(hw->os, "rx_allocate failed\n"); + return rc; + } + + rc = efct_hw_rx_post(hw); + if (rc) { + efc_log_err(hw->os, "WARNING - error posting RQ buffers\n"); + return rc; + } + + if (hw->config.n_eq == 1) { + rc = efct_hw_config_rq(hw); + if (rc) { + efc_log_err(hw->os, "config rq failed %d\n", rc); + return rc; + } + } else { + rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0); + if (rc != 0) { + efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n"); + return rc; + } + + rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0); + if (rc != 0) { + efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n"); + return rc; + } + } + + /* + * Allocate the WQ request tag pool, if not previously allocated + * (the request tag value is 16 bits, thus the pool allocation size + * of 64k) + */ + hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw); + if (!hw->wq_reqtag_pool) { + efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n"); + return -ENOMEM; + } + + rc = efct_hw_setup_io(hw); + if (rc) { + efc_log_err(hw->os, "IO allocation failure\n"); + return rc; + } + + rc = efct_hw_init_io(hw); + if (rc) { + efc_log_err(hw->os, "IO initialization failure\n"); + return rc; + } + + dma = &hw->loop_map; + dma->size = SLI4_MIN_LOOP_MAP_BYTES; + dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys, + GFP_KERNEL); + if (!dma->virt) + return -EIO; + + /* + * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ + * entries + */ + for (i = 0; i < hw->eq_count; i++) + sli_queue_arm(&hw->sli, &hw->eq[i], true); + + /* + * Initialize RQ hash + */ + for (i = 0; i < hw->rq_count; i++) + efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i); + + /* + * Initialize WQ hash + */ + for (i = 0; i < hw->wq_count; i++) + efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i); + + /* + * Arming the CQ allows (e.g.) MQ completions to write CQ entries + */ + for (i = 0; i < hw->cq_count; i++) { + efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i); + sli_queue_arm(&hw->sli, &hw->cq[i], true); + } + + /* Set RQ process limit*/ + for (i = 0; i < hw->hw_rq_count; i++) { + struct hw_rq *rq = hw->hw_rq[i]; + + hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2; + } + + /* record the fact that the queues are functional */ + hw->state = EFCT_HW_STATE_ACTIVE; + /* + * Allocate a HW IOs for send frame. + */ + hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw); + if (!hw->hw_wq[0]->send_frame_io) + efc_log_err(hw->os, "alloc for send_frame_io failed\n"); + + /* Initialize send frame sequence id */ + atomic_set(&hw->send_frame_seq_id, 0); + + return 0; +} + +int +efct_hw_parse_filter(struct efct_hw *hw, void *value) +{ + int rc = 0; + char *p = NULL; + char *token; + u32 idx = 0; + + for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) + hw->config.filter_def[idx] = 0; + + p = kstrdup(value, GFP_KERNEL); + if (!p || !*p) { + efc_log_err(hw->os, "p is NULL\n"); + return -ENOMEM; + } + + idx = 0; + while ((token = strsep(&p, ",")) && *token) { + if (kstrtou32(token, 0, &hw->config.filter_def[idx++])) + efc_log_err(hw->os, "kstrtoint failed\n"); + + if (!p || !*p) + break; + + if (idx == ARRAY_SIZE(hw->config.filter_def)) + break; + } + kfree(p); + + return rc; +} + +u64 +efct_get_wwnn(struct efct_hw *hw) +{ + struct sli4 *sli = &hw->sli; + u8 p[8]; + + memcpy(p, sli->wwnn, sizeof(p)); + return get_unaligned_be64(p); +} + +u64 +efct_get_wwpn(struct efct_hw *hw) +{ + struct sli4 *sli = &hw->sli; + u8 p[8]; + + memcpy(p, sli->wwpn, sizeof(p)); + return get_unaligned_be64(p); +} + +static struct efc_hw_rq_buffer * +efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count, + u32 size) +{ + struct efct *efct = hw->os; + struct efc_hw_rq_buffer *rq_buf = NULL; + struct efc_hw_rq_buffer *prq; + u32 i; + + if (!count) + return NULL; + + rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL); + if (!rq_buf) + return NULL; + memset(rq_buf, 0, sizeof(*rq_buf) * count); + + for (i = 0, prq = rq_buf; i < count; i ++, prq++) { + prq->rqindex = rqindex; + prq->dma.size = size; + prq->dma.virt = dma_alloc_coherent(&efct->pci->dev, + prq->dma.size, + &prq->dma.phys, + GFP_KERNEL); + if (!prq->dma.virt) { + efc_log_err(hw->os, "DMA allocation failed\n"); + kfree(rq_buf); + return NULL; + } + } + return rq_buf; +} + +static void +efct_hw_rx_buffer_free(struct efct_hw *hw, + struct efc_hw_rq_buffer *rq_buf, + u32 count) +{ + struct efct *efct = hw->os; + u32 i; + struct efc_hw_rq_buffer *prq; + + if (rq_buf) { + for (i = 0, prq = rq_buf; i < count; i++, prq++) { + dma_free_coherent(&efct->pci->dev, + prq->dma.size, prq->dma.virt, + prq->dma.phys); + memset(&prq->dma, 0, sizeof(struct efc_dma)); + } + + kfree(rq_buf); + } +} + +int +efct_hw_rx_allocate(struct efct_hw *hw) +{ + struct efct *efct = hw->os; + u32 i; + int rc = 0; + u32 rqindex = 0; + u32 hdr_size = EFCT_HW_RQ_SIZE_HDR; + u32 payload_size = hw->config.rq_default_buffer_size; + + rqindex = 0; + + for (i = 0; i < hw->hw_rq_count; i++) { + struct hw_rq *rq = hw->hw_rq[i]; + + /* Allocate header buffers */ + rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex, + rq->entry_count, + hdr_size); + if (!rq->hdr_buf) { + efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n"); + rc = -EIO; + break; + } + + efc_log_debug(hw->os, + "rq[%2d] rq_id %02d header %4d by %4d bytes\n", + i, rq->hdr->id, rq->entry_count, hdr_size); + + rqindex++; + + /* Allocate payload buffers */ + rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex, + rq->entry_count, + payload_size); + if (!rq->payload_buf) { + efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n"); + rc = -EIO; + break; + } + efc_log_debug(hw->os, + "rq[%2d] rq_id %02d default %4d by %4d bytes\n", + i, rq->data->id, rq->entry_count, payload_size); + rqindex++; + } + + return rc ? -EIO : 0; +} + +int +efct_hw_rx_post(struct efct_hw *hw) +{ + u32 i; + u32 idx; + u32 rq_idx; + int rc = 0; + + if (!hw->seq_pool) { + u32 count = 0; + + for (i = 0; i < hw->hw_rq_count; i++) + count += hw->hw_rq[i]->entry_count; + + hw->seq_pool = kmalloc_array(count, + sizeof(struct efc_hw_sequence), GFP_KERNEL); + if (!hw->seq_pool) + return -ENOMEM; + } + + /* + * In RQ pair mode, we MUST post the header and payload buffer at the + * same time. + */ + for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { + struct hw_rq *rq = hw->hw_rq[rq_idx]; + + for (i = 0; i < rq->entry_count - 1; i++) { + struct efc_hw_sequence *seq; + + seq = hw->seq_pool + idx; + idx++; + seq->header = &rq->hdr_buf[i]; + seq->payload = &rq->payload_buf[i]; + rc = efct_hw_sequence_free(hw, seq); + if (rc) + break; + } + if (rc) + break; + } + + if (rc && hw->seq_pool) + kfree(hw->seq_pool); + + return rc; +} + +void +efct_hw_rx_free(struct efct_hw *hw) +{ + u32 i; + + /* Free hw_rq buffers */ + for (i = 0; i < hw->hw_rq_count; i++) { + struct hw_rq *rq = hw->hw_rq[i]; + + if (rq) { + efct_hw_rx_buffer_free(hw, rq->hdr_buf, + rq->entry_count); + rq->hdr_buf = NULL; + efct_hw_rx_buffer_free(hw, rq->payload_buf, + rq->entry_count); + rq->payload_buf = NULL; + } + } +} + +static int +efct_hw_cmd_submit_pending(struct efct_hw *hw) +{ + int rc = 0; + + /* Assumes lock held */ + + /* Only submit MQE if there's room */ + while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) && + !list_empty(&hw->cmd_pending)) { + struct efct_command_ctx *ctx; + + ctx = list_first_entry(&hw->cmd_pending, + struct efct_command_ctx, list_entry); + if (!ctx) + break; + + list_del_init(&ctx->list_entry); + + list_add_tail(&ctx->list_entry, &hw->cmd_head); + hw->cmd_head_count++; + if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) { + efc_log_debug(hw->os, + "sli_queue_write failed: %d\n", rc); + rc = -EIO; + break; + } + } + return rc; +} + +int +efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg) +{ + int rc = -EIO; + unsigned long flags = 0; + void *bmbx = NULL; + + /* + * If the chip is in an error state (UE'd) then reject this mailbox + * command. + */ + if (sli_fw_error_status(&hw->sli) > 0) { + efc_log_crit(hw->os, "Chip in an error state - reset needed\n"); + efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n", + sli_reg_read_status(&hw->sli), + sli_reg_read_err1(&hw->sli), + sli_reg_read_err2(&hw->sli)); + + return -EIO; + } + + /* + * Send a mailbox command to the hardware, and either wait for + * a completion (EFCT_CMD_POLL) or get an optional asynchronous + * completion (EFCT_CMD_NOWAIT). + */ + + if (opts == EFCT_CMD_POLL) { + mutex_lock(&hw->bmbx_lock); + bmbx = hw->sli.bmbx.virt; + + memcpy(bmbx, cmd, SLI4_BMBX_SIZE); + + if (sli_bmbx_command(&hw->sli) == 0) { + rc = 0; + memcpy(cmd, bmbx, SLI4_BMBX_SIZE); + } + mutex_unlock(&hw->bmbx_lock); + } else if (opts == EFCT_CMD_NOWAIT) { + struct efct_command_ctx *ctx = NULL; + + if (hw->state != EFCT_HW_STATE_ACTIVE) { + efc_log_err(hw->os, "Can't send command, HW state=%d\n", + hw->state); + return -EIO; + } + + ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC); + if (!ctx) + return -ENOSPC; + + memset(ctx, 0, sizeof(struct efct_command_ctx)); + + if (cb) { + ctx->cb = cb; + ctx->arg = arg; + } + + memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE); + ctx->ctx = hw; + + spin_lock_irqsave(&hw->cmd_lock, flags); + + /* Add to pending list */ + INIT_LIST_HEAD(&ctx->list_entry); + list_add_tail(&ctx->list_entry, &hw->cmd_pending); + + /* Submit as much of the pending list as we can */ + rc = efct_hw_cmd_submit_pending(hw); + + spin_unlock_irqrestore(&hw->cmd_lock, flags); + } + + return rc; +} + +static int +efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe, + size_t size) +{ + struct efct_command_ctx *ctx = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&hw->cmd_lock, flags); + if (!list_empty(&hw->cmd_head)) { + ctx = list_first_entry(&hw->cmd_head, + struct efct_command_ctx, list_entry); + list_del_init(&ctx->list_entry); + } + if (!ctx) { + efc_log_err(hw->os, "no command context\n"); + spin_unlock_irqrestore(&hw->cmd_lock, flags); + return -EIO; + } + + hw->cmd_head_count--; + + /* Post any pending requests */ + efct_hw_cmd_submit_pending(hw); + + spin_unlock_irqrestore(&hw->cmd_lock, flags); + + if (ctx->cb) { + memcpy(ctx->buf, mqe, size); + ctx->cb(hw, status, ctx->buf, ctx->arg); + } + + mempool_free(ctx, hw->cmd_ctx_pool); + + return 0; +} + +static int +efct_hw_mq_process(struct efct_hw *hw, + int status, struct sli4_queue *mq) +{ + u8 mqe[SLI4_BMBX_SIZE]; + int rc; + + rc = sli_mq_read(&hw->sli, mq, mqe); + if (!rc) + rc = efct_hw_command_process(hw, status, mqe, mq->size); + + return rc; +} + +static int +efct_hw_command_cancel(struct efct_hw *hw) +{ + unsigned long flags = 0; + int rc = 0; + + spin_lock_irqsave(&hw->cmd_lock, flags); + + /* + * Manually clean up remaining commands. Note: since this calls + * efct_hw_command_process(), we'll also process the cmd_pending + * list, so no need to manually clean that out. + */ + while (!list_empty(&hw->cmd_head)) { + u8 mqe[SLI4_BMBX_SIZE] = { 0 }; + struct efct_command_ctx *ctx; + + ctx = list_first_entry(&hw->cmd_head, + struct efct_command_ctx, list_entry); + + efc_log_debug(hw->os, "hung command %08x\n", + !ctx ? U32_MAX : *((u32 *)ctx->buf)); + spin_unlock_irqrestore(&hw->cmd_lock, flags); + rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE); + spin_lock_irqsave(&hw->cmd_lock, flags); + } + + spin_unlock_irqrestore(&hw->cmd_lock, flags); + + return rc; +} + +static void +efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg) +{ + struct efct_mbox_rqst_ctx *ctx = arg; + + if (ctx) { + if (ctx->callback) + (*ctx->callback)(hw->os->efcport, status, mqe, + ctx->arg); + + mempool_free(ctx, hw->mbox_rqst_pool); + } +} + +int +efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg) +{ + struct efct_mbox_rqst_ctx *ctx; + struct efct *efct = base; + struct efct_hw *hw = &efct->hw; + int rc; + + /* + * Allocate a callback context (which includes the mbox cmd buffer), + * we need this to be persistent as the mbox cmd submission may be + * queued and executed later execution. + */ + ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC); + if (!ctx) + return -EIO; + + ctx->callback = cb; + ctx->arg = arg; + + rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx); + if (rc) { + efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc); + mempool_free(ctx, hw->mbox_rqst_pool); + return -EIO; + } + + return 0; +} + +static inline struct efct_hw_io * +_efct_hw_io_alloc(struct efct_hw *hw) +{ + struct efct_hw_io *io = NULL; + + if (!list_empty(&hw->io_free)) { + io = list_first_entry(&hw->io_free, struct efct_hw_io, + list_entry); + list_del(&io->list_entry); + } + if (io) { + INIT_LIST_HEAD(&io->list_entry); + list_add_tail(&io->list_entry, &hw->io_inuse); + io->state = EFCT_HW_IO_STATE_INUSE; + io->abort_reqtag = U32_MAX; + io->wq = hw->wq_cpu_array[raw_smp_processor_id()]; + if (!io->wq) { + efc_log_err(hw->os, "WQ not assigned for cpu:%d\n", + raw_smp_processor_id()); + io->wq = hw->hw_wq[0]; + } + kref_init(&io->ref); + io->release = efct_hw_io_free_internal; + } else { + atomic_add(1, &hw->io_alloc_failed_count); + } + + return io; +} + +struct efct_hw_io * +efct_hw_io_alloc(struct efct_hw *hw) +{ + struct efct_hw_io *io = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&hw->io_lock, flags); + io = _efct_hw_io_alloc(hw); + spin_unlock_irqrestore(&hw->io_lock, flags); + + return io; +} + +static void +efct_hw_io_free_move_correct_list(struct efct_hw *hw, + struct efct_hw_io *io) +{ + /* + * When an IO is freed, depending on the exchange busy flag, + * move it to the correct list. + */ + if (io->xbusy) { + /* + * add to wait_free list and wait for XRI_ABORTED CQEs to clean + * up + */ + INIT_LIST_HEAD(&io->list_entry); + list_add_tail(&io->list_entry, &hw->io_wait_free); + io->state = EFCT_HW_IO_STATE_WAIT_FREE; + } else { + /* IO not busy, add to free list */ + INIT_LIST_HEAD(&io->list_entry); + list_add_tail(&io->list_entry, &hw->io_free); + io->state = EFCT_HW_IO_STATE_FREE; + } +} + +static inline void +efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io) +{ + /* initialize IO fields */ + efct_hw_init_free_io(io); + + /* Restore default SGL */ + efct_hw_io_restore_sgl(hw, io); +} + +void +efct_hw_io_free_internal(struct kref *arg) +{ + unsigned long flags = 0; + struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref); + struct efct_hw *hw = io->hw; + + /* perform common cleanup */ + efct_hw_io_free_common(hw, io); + + spin_lock_irqsave(&hw->io_lock, flags); + /* remove from in-use list */ + if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) { + list_del_init(&io->list_entry); + efct_hw_io_free_move_correct_list(hw, io); + } + spin_unlock_irqrestore(&hw->io_lock, flags); +} + +int +efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io) +{ + return kref_put(&io->ref, io->release); +} + +struct efct_hw_io * +efct_hw_io_lookup(struct efct_hw *hw, u32 xri) +{ + u32 ioindex; + + ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0]; + return hw->io[ioindex]; +} + +int +efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io, + enum efct_hw_io_type type) +{ + struct sli4_sge *data = NULL; + u32 i = 0; + u32 skips = 0; + u32 sge_flags = 0; + + if (!io) { + efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io); + return -EIO; + } + + /* Clear / reset the scatter-gather list */ + io->sgl = &io->def_sgl; + io->sgl_count = io->def_sgl_count; + io->first_data_sge = 0; + + memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge)); + io->n_sge = 0; + io->sge_offset = 0; + + io->type = type; + + data = io->sgl->virt; + + /* + * Some IO types have underlying hardware requirements on the order + * of SGEs. Process all special entries here. + */ + switch (type) { + case EFCT_HW_IO_TARGET_WRITE: + + /* populate host resident XFER_RDY buffer */ + sge_flags = le32_to_cpu(data->dw2_flags); + sge_flags &= (~SLI4_SGE_TYPE_MASK); + sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT); + data->buffer_address_high = + cpu_to_le32(upper_32_bits(io->xfer_rdy.phys)); + data->buffer_address_low = + cpu_to_le32(lower_32_bits(io->xfer_rdy.phys)); + data->buffer_length = cpu_to_le32(io->xfer_rdy.size); + data->dw2_flags = cpu_to_le32(sge_flags); + data++; + + skips = EFCT_TARGET_WRITE_SKIPS; + + io->n_sge = 1; + break; + case EFCT_HW_IO_TARGET_READ: + /* + * For FCP_TSEND64, the first 2 entries are SKIP SGE's + */ + skips = EFCT_TARGET_READ_SKIPS; + break; + case EFCT_HW_IO_TARGET_RSP: + /* + * No skips, etc. for FCP_TRSP64 + */ + break; + default: + efc_log_err(hw->os, "unsupported IO type %#x\n", type); + return -EIO; + } + + /* + * Write skip entries + */ + for (i = 0; i < skips; i++) { + sge_flags = le32_to_cpu(data->dw2_flags); + sge_flags &= (~SLI4_SGE_TYPE_MASK); + sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT); + data->dw2_flags = cpu_to_le32(sge_flags); + data++; + } + + io->n_sge += skips; + + /* + * Set last + */ + sge_flags = le32_to_cpu(data->dw2_flags); + sge_flags |= SLI4_SGE_LAST; + data->dw2_flags = cpu_to_le32(sge_flags); + + return 0; +} + +int +efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io, + uintptr_t addr, u32 length) +{ + struct sli4_sge *data = NULL; + u32 sge_flags = 0; + + if (!io || !addr || !length) { + efc_log_err(hw->os, + "bad parameter hw=%p io=%p addr=%lx length=%u\n", + hw, io, addr, length); + return -EIO; + } + + if (length > hw->sli.sge_supported_length) { + efc_log_err(hw->os, + "length of SGE %d bigger than allowed %d\n", + length, hw->sli.sge_supported_length); + return -EIO; + } + + data = io->sgl->virt; + data += io->n_sge; + + sge_flags = le32_to_cpu(data->dw2_flags); + sge_flags &= ~SLI4_SGE_TYPE_MASK; + sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT; + sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK; + sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset; + + data->buffer_address_high = cpu_to_le32(upper_32_bits(addr)); + data->buffer_address_low = cpu_to_le32(lower_32_bits(addr)); + data->buffer_length = cpu_to_le32(length); + + /* + * Always assume this is the last entry and mark as such. + * If this is not the first entry unset the "last SGE" + * indication for the previous entry + */ + sge_flags |= SLI4_SGE_LAST; + data->dw2_flags = cpu_to_le32(sge_flags); + + if (io->n_sge) { + sge_flags = le32_to_cpu(data[-1].dw2_flags); + sge_flags &= ~SLI4_SGE_LAST; + data[-1].dw2_flags = cpu_to_le32(sge_flags); + } + + /* Set first_data_bde if not previously set */ + if (io->first_data_sge == 0) + io->first_data_sge = io->n_sge; + + io->sge_offset += length; + io->n_sge++; + + return 0; +} + +void +efct_hw_io_abort_all(struct efct_hw *hw) +{ + struct efct_hw_io *io_to_abort = NULL; + struct efct_hw_io *next_io = NULL; + + list_for_each_entry_safe(io_to_abort, next_io, + &hw->io_inuse, list_entry) { + efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL); + } +} + +static void +efct_hw_wq_process_abort(void *arg, u8 *cqe, int status) +{ + struct efct_hw_io *io = arg; + struct efct_hw *hw = io->hw; + u32 ext = 0; + u32 len = 0; + struct hw_wq_callback *wqcb; + + /* + * For IOs that were aborted internally, we may need to issue the + * callback here depending on whether a XRI_ABORTED CQE is expected ot + * not. If the status is Local Reject/No XRI, then + * issue the callback now. + */ + ext = sli_fc_ext_status(&hw->sli, cqe); + if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT && + ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) { + efct_hw_done_t done = io->done; + + io->done = NULL; + + /* + * Use latched status as this is always saved for an internal + * abort Note: We won't have both a done and abort_done + * function, so don't worry about + * clobbering the len, status and ext fields. + */ + status = io->saved_status; + len = io->saved_len; + ext = io->saved_ext; + io->status_saved = false; + done(io, len, status, ext, io->arg); + } + + if (io->abort_done) { + efct_hw_done_t done = io->abort_done; + + io->abort_done = NULL; + done(io, len, status, ext, io->abort_arg); + } + + /* clear abort bit to indicate abort is complete */ + io->abort_in_progress = false; + + /* Free the WQ callback */ + if (io->abort_reqtag == U32_MAX) { + efc_log_err(hw->os, "HW IO already freed\n"); + return; + } + + wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag); + efct_hw_reqtag_free(hw, wqcb); + + /* + * Call efct_hw_io_free() because this releases the WQ reservation as + * well as doing the refcount put. Don't duplicate the code here. + */ + (void)efct_hw_io_free(hw, io); +} + +static void +efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe) +{ + struct sli4_abort_wqe *abort = (void *)wqe->wqebuf; + + memset(abort, 0, hw->sli.wqe_size); + + abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; + abort->ia_ir_byte |= wqe->send_abts ? 0 : 1; + + /* Suppress ABTS retries */ + abort->ia_ir_byte |= SLI4_ABRT_WQE_IR; + + abort->t_tag = cpu_to_le32(wqe->id); + abort->command = SLI4_WQE_ABORT; + abort->request_tag = cpu_to_le16(wqe->abort_reqtag); + + abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD); + + abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); +} + +int +efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort, + bool send_abts, void *cb, void *arg) +{ + struct hw_wq_callback *wqcb; + unsigned long flags = 0; + + if (!io_to_abort) { + efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", + hw, io_to_abort); + return -EIO; + } + + if (hw->state != EFCT_HW_STATE_ACTIVE) { + efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n", + hw->state); + return -EIO; + } + + /* take a reference on IO being aborted */ + if (kref_get_unless_zero(&io_to_abort->ref) == 0) { + /* command no longer active */ + efc_log_debug(hw->os, + "io not active xri=0x%x tag=0x%x\n", + io_to_abort->indicator, io_to_abort->reqtag); + return -ENOENT; + } + + /* Must have a valid WQ reference */ + if (!io_to_abort->wq) { + efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n", + io_to_abort->indicator); + /* efct_ref_get(): same function */ + kref_put(&io_to_abort->ref, io_to_abort->release); + return -ENOENT; + } + + /* + * Validation checks complete; now check to see if already being + * aborted, if not set the flag. + */ + if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) { + /* efct_ref_get(): same function */ + kref_put(&io_to_abort->ref, io_to_abort->release); + efc_log_debug(hw->os, + "io already being aborted xri=0x%x tag=0x%x\n", + io_to_abort->indicator, io_to_abort->reqtag); + return -EINPROGRESS; + } + + /* + * If we got here, the possibilities are: + * - host owned xri + * - io_to_abort->wq_index != U32_MAX + * - submit ABORT_WQE to same WQ + * - port owned xri: + * - rxri: io_to_abort->wq_index == U32_MAX + * - submit ABORT_WQE to any WQ + * - non-rxri + * - io_to_abort->index != U32_MAX + * - submit ABORT_WQE to same WQ + * - io_to_abort->index == U32_MAX + * - submit ABORT_WQE to any WQ + */ + io_to_abort->abort_done = cb; + io_to_abort->abort_arg = arg; + + /* Allocate a request tag for the abort portion of this IO */ + wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort); + if (!wqcb) { + efc_log_err(hw->os, "can't allocate request tag\n"); + return -ENOSPC; + } + + io_to_abort->abort_reqtag = wqcb->instance_index; + io_to_abort->wqe.send_abts = send_abts; + io_to_abort->wqe.id = io_to_abort->indicator; + io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag; + + /* + * If the wqe is on the pending list, then set this wqe to be + * aborted when the IO's wqe is removed from the list. + */ + if (io_to_abort->wq) { + spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags); + if (io_to_abort->wqe.list_entry.next) { + io_to_abort->wqe.abort_wqe_submit_needed = true; + spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, + flags); + return 0; + } + spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags); + } + + efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe); + + /* ABORT_WQE does not actually utilize an XRI on the Port, + * therefore, keep xbusy as-is to track the exchange's state, + * not the ABORT_WQE's state + */ + if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) { + io_to_abort->abort_in_progress = false; + /* efct_ref_get(): same function */ + kref_put(&io_to_abort->ref, io_to_abort->release); + return -EIO; + } + + return 0; +} + +void +efct_hw_reqtag_pool_free(struct efct_hw *hw) +{ + u32 i; + struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; + struct hw_wq_callback *wqcb = NULL; + + if (reqtag_pool) { + for (i = 0; i < U16_MAX; i++) { + wqcb = reqtag_pool->tags[i]; + if (!wqcb) + continue; + + kfree(wqcb); + } + kfree(reqtag_pool); + hw->wq_reqtag_pool = NULL; + } +} + +struct reqtag_pool * +efct_hw_reqtag_pool_alloc(struct efct_hw *hw) +{ + u32 i = 0; + struct reqtag_pool *reqtag_pool; + struct hw_wq_callback *wqcb; + + reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL); + if (!reqtag_pool) + return NULL; + + INIT_LIST_HEAD(&reqtag_pool->freelist); + /* initialize reqtag pool lock */ + spin_lock_init(&reqtag_pool->lock); + for (i = 0; i < U16_MAX; i++) { + wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL); + if (!wqcb) + break; + + reqtag_pool->tags[i] = wqcb; + wqcb->instance_index = i; + wqcb->callback = NULL; + wqcb->arg = NULL; + INIT_LIST_HEAD(&wqcb->list_entry); + list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist); + } + + return reqtag_pool; +} + +struct hw_wq_callback * +efct_hw_reqtag_alloc(struct efct_hw *hw, + void (*callback)(void *arg, u8 *cqe, int status), + void *arg) +{ + struct hw_wq_callback *wqcb = NULL; + struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; + unsigned long flags = 0; + + if (!callback) + return wqcb; + + spin_lock_irqsave(&reqtag_pool->lock, flags); + + if (!list_empty(&reqtag_pool->freelist)) { + wqcb = list_first_entry(&reqtag_pool->freelist, + struct hw_wq_callback, list_entry); + } + + if (wqcb) { + list_del_init(&wqcb->list_entry); + spin_unlock_irqrestore(&reqtag_pool->lock, flags); + wqcb->callback = callback; + wqcb->arg = arg; + } else { + spin_unlock_irqrestore(&reqtag_pool->lock, flags); + } + + return wqcb; +} + +void +efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb) +{ + unsigned long flags = 0; + struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; + + if (!wqcb->callback) + efc_log_err(hw->os, "WQCB is already freed\n"); + + spin_lock_irqsave(&reqtag_pool->lock, flags); + wqcb->callback = NULL; + wqcb->arg = NULL; + INIT_LIST_HEAD(&wqcb->list_entry); + list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist); + spin_unlock_irqrestore(&reqtag_pool->lock, flags); +} + +struct hw_wq_callback * +efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index) +{ + struct hw_wq_callback *wqcb; + + wqcb = hw->wq_reqtag_pool->tags[instance_index]; + if (!wqcb) + efc_log_err(hw->os, "wqcb for instance %d is null\n", + instance_index); + + return wqcb; +} + +int +efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id) +{ + int index = -1; + int i = id & (EFCT_HW_Q_HASH_SIZE - 1); + + /* + * Since the hash is always bigger than the maximum number of Qs, then + * we never have to worry about an infinite loop. We will always find + * an unused entry. + */ + do { + if (hash[i].in_use && hash[i].id == id) + index = hash[i].index; + else + i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1); + } while (index == -1 && hash[i].in_use); + + return index; +} + +int +efct_hw_process(struct efct_hw *hw, u32 vector, + u32 max_isr_time_msec) +{ + struct hw_eq *eq; + + /* + * The caller should disable interrupts if they wish to prevent us + * from processing during a shutdown. The following states are defined: + * EFCT_HW_STATE_UNINITIALIZED - No queues allocated + * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, + * queues are cleared. + * EFCT_HW_STATE_ACTIVE - Chip and queues are operational + * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions + * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox + * completions. + */ + if (hw->state == EFCT_HW_STATE_UNINITIALIZED) + return 0; + + /* Get pointer to struct hw_eq */ + eq = hw->hw_eq[vector]; + if (!eq) + return 0; + + eq->use_count++; + + return efct_hw_eq_process(hw, eq, max_isr_time_msec); +} + +int +efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq, + u32 max_isr_time_msec) +{ + u8 eqe[sizeof(struct sli4_eqe)] = { 0 }; + u32 tcheck_count; + u64 tstart; + u64 telapsed; + bool done = false; + + tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS; + tstart = jiffies_to_msecs(jiffies); + + while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) { + u16 cq_id = 0; + int rc; + + rc = sli_eq_parse(&hw->sli, eqe, &cq_id); + if (unlikely(rc)) { + if (rc == SLI4_EQE_STATUS_EQ_FULL) { + u32 i; + + /* + * Received a sentinel EQE indicating the + * EQ is full. Process all CQs + */ + for (i = 0; i < hw->cq_count; i++) + efct_hw_cq_process(hw, hw->hw_cq[i]); + continue; + } else { + return rc; + } + } else { + int index; + + index = efct_hw_queue_hash_find(hw->cq_hash, cq_id); + + if (likely(index >= 0)) + efct_hw_cq_process(hw, hw->hw_cq[index]); + else + efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id); + } + + if (eq->queue->n_posted > eq->queue->posted_limit) + sli_queue_arm(&hw->sli, eq->queue, false); + + if (tcheck_count && (--tcheck_count == 0)) { + tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS; + telapsed = jiffies_to_msecs(jiffies) - tstart; + if (telapsed >= max_isr_time_msec) + done = true; + } + } + sli_queue_eq_arm(&hw->sli, eq->queue, true); + + return 0; +} + +static int +_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe) +{ + int queue_rc; + + /* Every so often, set the wqec bit to generate comsummed completions */ + if (wq->wqec_count) + wq->wqec_count--; + + if (wq->wqec_count == 0) { + struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf; + + genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC; + wq->wqec_count = wq->wqec_set_count; + } + + /* Decrement WQ free count */ + wq->free_count--; + + queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf); + + return (queue_rc < 0) ? -EIO : 0; +} + +static void +hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count) +{ + struct efct_hw_wqe *wqe; + unsigned long flags = 0; + + spin_lock_irqsave(&wq->queue->lock, flags); + + /* Update free count with value passed in */ + wq->free_count += update_free_count; + + while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) { + wqe = list_first_entry(&wq->pending_list, + struct efct_hw_wqe, list_entry); + list_del_init(&wqe->list_entry); + _efct_hw_wq_write(wq, wqe); + + if (wqe->abort_wqe_submit_needed) { + wqe->abort_wqe_submit_needed = false; + efct_hw_fill_abort_wqe(wq->hw, wqe); + INIT_LIST_HEAD(&wqe->list_entry); + list_add_tail(&wqe->list_entry, &wq->pending_list); + wq->wq_pending_count++; + } + } + + spin_unlock_irqrestore(&wq->queue->lock, flags); +} + +void +efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq) +{ + u8 cqe[sizeof(struct sli4_mcqe)]; + u16 rid = U16_MAX; + /* completion type */ + enum sli4_qentry ctype; + u32 n_processed = 0; + u32 tstart, telapsed; + + tstart = jiffies_to_msecs(jiffies); + + while (!sli_cq_read(&hw->sli, cq->queue, cqe)) { + int status; + + status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); + /* + * The sign of status is significant. If status is: + * == 0 : call completed correctly and + * the CQE indicated success + * > 0 : call completed correctly and + * the CQE indicated an error + * < 0 : call failed and no information is available about the + * CQE + */ + if (status < 0) { + if (status == SLI4_MCQE_STATUS_NOT_COMPLETED) + /* + * Notification that an entry was consumed, + * but not completed + */ + continue; + + break; + } + + switch (ctype) { + case SLI4_QENTRY_ASYNC: + sli_cqe_async(&hw->sli, cqe); + break; + case SLI4_QENTRY_MQ: + /* + * Process MQ entry. Note there is no way to determine + * the MQ_ID from the completion entry. + */ + efct_hw_mq_process(hw, status, hw->mq); + break; + case SLI4_QENTRY_WQ: + efct_hw_wq_process(hw, cq, cqe, status, rid); + break; + case SLI4_QENTRY_WQ_RELEASE: { + u32 wq_id = rid; + int index; + struct hw_wq *wq = NULL; + + index = efct_hw_queue_hash_find(hw->wq_hash, wq_id); + + if (likely(index >= 0)) { + wq = hw->hw_wq[index]; + } else { + efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id); + break; + } + /* Submit any HW IOs that are on the WQ pending list */ + hw_wq_submit_pending(wq, wq->wqec_set_count); + + break; + } + + case SLI4_QENTRY_RQ: + efct_hw_rqpair_process_rq(hw, cq, cqe); + break; + case SLI4_QENTRY_XABT: { + efct_hw_xabt_process(hw, cq, cqe, rid); + break; + } + default: + efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n", + ctype, rid); + break; + } + + n_processed++; + if (n_processed == cq->queue->proc_limit) + break; + + if (cq->queue->n_posted >= cq->queue->posted_limit) + sli_queue_arm(&hw->sli, cq->queue, false); + } + + sli_queue_arm(&hw->sli, cq->queue, true); + + if (n_processed > cq->queue->max_num_processed) + cq->queue->max_num_processed = n_processed; + telapsed = jiffies_to_msecs(jiffies) - tstart; + if (telapsed > cq->queue->max_process_time) + cq->queue->max_process_time = telapsed; +} + +void +efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq, + u8 *cqe, int status, u16 rid) +{ + struct hw_wq_callback *wqcb; + + if (rid == EFCT_HW_REQUE_XRI_REGTAG) { + if (status) + efc_log_err(hw->os, "reque xri failed, status = %d\n", + status); + return; + } + + wqcb = efct_hw_reqtag_get_instance(hw, rid); + if (!wqcb) { + efc_log_err(hw->os, "invalid request tag: x%x\n", rid); + return; + } + + if (!wqcb->callback) { + efc_log_err(hw->os, "wqcb callback is NULL\n"); + return; + } + + (*wqcb->callback)(wqcb->arg, cqe, status); +} + +void +efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq, + u8 *cqe, u16 rid) +{ + /* search IOs wait free list */ + struct efct_hw_io *io = NULL; + unsigned long flags = 0; + + io = efct_hw_io_lookup(hw, rid); + if (!io) { + /* IO lookup failure should never happen */ + efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid); + return; + } + + if (!io->xbusy) + efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); + else + /* mark IO as no longer busy */ + io->xbusy = false; + + /* + * For IOs that were aborted internally, we need to issue any pending + * callback here. + */ + if (io->done) { + efct_hw_done_t done = io->done; + void *arg = io->arg; + + /* + * Use latched status as this is always saved for an internal + * abort + */ + int status = io->saved_status; + u32 len = io->saved_len; + u32 ext = io->saved_ext; + + io->done = NULL; + io->status_saved = false; + + done(io, len, status, ext, arg); + } + + spin_lock_irqsave(&hw->io_lock, flags); + if (io->state == EFCT_HW_IO_STATE_INUSE || + io->state == EFCT_HW_IO_STATE_WAIT_FREE) { + /* if on wait_free list, caller has already freed IO; + * remove from wait_free list and add to free list. + * if on in-use list, already marked as no longer busy; + * just leave there and wait for caller to free. + */ + if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) { + io->state = EFCT_HW_IO_STATE_FREE; + list_del_init(&io->list_entry); + efct_hw_io_free_move_correct_list(hw, io); + } + } + spin_unlock_irqrestore(&hw->io_lock, flags); +} + +static int +efct_hw_flush(struct efct_hw *hw) +{ + u32 i = 0; + + /* Process any remaining completions */ + for (i = 0; i < hw->eq_count; i++) + efct_hw_process(hw, i, ~0); + + return 0; +} + +int +efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe) +{ + int rc = 0; + unsigned long flags = 0; + + spin_lock_irqsave(&wq->queue->lock, flags); + if (list_empty(&wq->pending_list)) { + if (wq->free_count > 0) { + rc = _efct_hw_wq_write(wq, wqe); + } else { + INIT_LIST_HEAD(&wqe->list_entry); + list_add_tail(&wqe->list_entry, &wq->pending_list); + wq->wq_pending_count++; + } + + spin_unlock_irqrestore(&wq->queue->lock, flags); + return rc; + } + + INIT_LIST_HEAD(&wqe->list_entry); + list_add_tail(&wqe->list_entry, &wq->pending_list); + wq->wq_pending_count++; + while (wq->free_count > 0) { + wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe, + list_entry); + if (!wqe) + break; + + list_del_init(&wqe->list_entry); + rc = _efct_hw_wq_write(wq, wqe); + if (rc) + break; + + if (wqe->abort_wqe_submit_needed) { + wqe->abort_wqe_submit_needed = false; + efct_hw_fill_abort_wqe(wq->hw, wqe); + + INIT_LIST_HEAD(&wqe->list_entry); + list_add_tail(&wqe->list_entry, &wq->pending_list); + wq->wq_pending_count++; + } + } + + spin_unlock_irqrestore(&wq->queue->lock, flags); + + return rc; +} + +int +efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls) +{ + struct efct *efct = efc->base; + + return efct_hw_bls_send(efct, type, bls, NULL, NULL); +} + +int +efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params, + void *cb, void *arg) +{ + struct efct_hw *hw = &efct->hw; + struct efct_hw_io *hio; + struct sli_bls_payload bls; + int rc; + + if (hw->state != EFCT_HW_STATE_ACTIVE) { + efc_log_err(hw->os, + "cannot send BLS, HW state=%d\n", hw->state); + return -EIO; + } + + hio = efct_hw_io_alloc(hw); + if (!hio) { + efc_log_err(hw->os, "HIO allocation failed\n"); + return -EIO; + } + + hio->done = cb; + hio->arg = arg; + + bls_params->xri = hio->indicator; + bls_params->tag = hio->reqtag; + + if (type == FC_RCTL_BA_ACC) { + hio->type = EFCT_HW_BLS_ACC; + bls.type = SLI4_SLI_BLS_ACC; + memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc)); + } else { + hio->type = EFCT_HW_BLS_RJT; + bls.type = SLI4_SLI_BLS_RJT; + memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt)); + } + + bls.ox_id = cpu_to_le16(bls_params->ox_id); + bls.rx_id = cpu_to_le16(bls_params->rx_id); + + if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, + &bls, bls_params)) { + efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n"); + return -EIO; + } + + hio->xbusy = true; + + /* + * Add IO to active io wqe list before submitting, in case the + * wcqe processing preempts this thread. + */ + hio->wq->use_count++; + rc = efct_hw_wq_write(hio->wq, &hio->wqe); + if (rc >= 0) { + /* non-negative return is success */ + rc = 0; + } else { + /* failed to write wqe, remove from active wqe list */ + efc_log_err(hw->os, + "sli_queue_write failed: %d\n", rc); + hio->xbusy = false; + } + + return rc; +} + +static int +efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status, + u32 ext_status, void *arg) +{ + struct efc_disc_io *io = arg; + + efc_disc_io_complete(io, length, status, ext_status); + return 0; +} + +static inline void +efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params) +{ + u8 *cmd = io->req.virt; + + params->cmd = *cmd; + params->s_id = io->s_id; + params->d_id = io->d_id; + params->ox_id = io->iparam.els.ox_id; + params->rpi = io->rpi; + params->vpi = io->vpi; + params->rpi_registered = io->rpi_registered; + params->xmit_len = io->xmit_len; + params->rsp_len = io->rsp_len; + params->timeout = io->iparam.els.timeout; +} + +static inline void +efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params) +{ + params->r_ctl = io->iparam.ct.r_ctl; + params->type = io->iparam.ct.type; + params->df_ctl = io->iparam.ct.df_ctl; + params->d_id = io->d_id; + params->ox_id = io->iparam.ct.ox_id; + params->rpi = io->rpi; + params->vpi = io->vpi; + params->rpi_registered = io->rpi_registered; + params->xmit_len = io->xmit_len; + params->rsp_len = io->rsp_len; + params->timeout = io->iparam.ct.timeout; +} + +/** + * efct_els_hw_srrs_send() - Send a single request and response cmd. + * @efc: efc library structure + * @io: Discovery IO used to hold els and ct cmd context. + * + * This routine supports communication sequences consisting of a single + * request and single response between two endpoints. Examples include: + * - Sending an ELS request. + * - Sending an ELS response - To send an ELS response, the caller must provide + * the OX_ID from the received request. + * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request, + * the caller must provide the R_CTL, TYPE, and DF_CTL + * values to place in the FC frame header. + * + * Return: Status of the request. + */ +int +efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io) +{ + struct efct *efct = efc->base; + struct efct_hw_io *hio; + struct efct_hw *hw = &efct->hw; + struct efc_dma *send = &io->req; + struct efc_dma *receive = &io->rsp; + struct sli4_sge *sge = NULL; + int rc = 0; + u32 len = io->xmit_len; + u32 sge0_flags; + u32 sge1_flags; + + hio = efct_hw_io_alloc(hw); + if (!hio) { + pr_err("HIO alloc failed\n"); + return -EIO; + } + + if (hw->state != EFCT_HW_STATE_ACTIVE) { + efc_log_debug(hw->os, + "cannot send SRRS, HW state=%d\n", hw->state); + return -EIO; + } + + hio->done = efct_els_ssrs_send_cb; + hio->arg = io; + + sge = hio->sgl->virt; + + /* clear both SGE */ + memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge)); + + sge0_flags = le32_to_cpu(sge[0].dw2_flags); + sge1_flags = le32_to_cpu(sge[1].dw2_flags); + if (send->size) { + sge[0].buffer_address_high = + cpu_to_le32(upper_32_bits(send->phys)); + sge[0].buffer_address_low = + cpu_to_le32(lower_32_bits(send->phys)); + + sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT); + + sge[0].buffer_length = cpu_to_le32(len); + } + + if (io->io_type == EFC_DISC_IO_ELS_REQ || + io->io_type == EFC_DISC_IO_CT_REQ) { + sge[1].buffer_address_high = + cpu_to_le32(upper_32_bits(receive->phys)); + sge[1].buffer_address_low = + cpu_to_le32(lower_32_bits(receive->phys)); + + sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT); + sge1_flags |= SLI4_SGE_LAST; + + sge[1].buffer_length = cpu_to_le32(receive->size); + } else { + sge0_flags |= SLI4_SGE_LAST; + } + + sge[0].dw2_flags = cpu_to_le32(sge0_flags); + sge[1].dw2_flags = cpu_to_le32(sge1_flags); + + switch (io->io_type) { + case EFC_DISC_IO_ELS_REQ: { + struct sli_els_params els_params; + + hio->type = EFCT_HW_ELS_REQ; + efct_fill_els_params(io, &els_params); + els_params.xri = hio->indicator; + els_params.tag = hio->reqtag; + + if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, + &els_params)) { + efc_log_err(hw->os, "REQ WQE error\n"); + rc = -EIO; + } + break; + } + case EFC_DISC_IO_ELS_RESP: { + struct sli_els_params els_params; + + hio->type = EFCT_HW_ELS_RSP; + efct_fill_els_params(io, &els_params); + els_params.xri = hio->indicator; + els_params.tag = hio->reqtag; + if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send, + &els_params)){ + efc_log_err(hw->os, "RSP WQE error\n"); + rc = -EIO; + } + break; + } + case EFC_DISC_IO_CT_REQ: { + struct sli_ct_params ct_params; + + hio->type = EFCT_HW_FC_CT; + efct_fill_ct_params(io, &ct_params); + ct_params.xri = hio->indicator; + ct_params.tag = hio->reqtag; + if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, + &ct_params)){ + efc_log_err(hw->os, "GEN WQE error\n"); + rc = -EIO; + } + break; + } + case EFC_DISC_IO_CT_RESP: { + struct sli_ct_params ct_params; + + hio->type = EFCT_HW_FC_CT_RSP; + efct_fill_ct_params(io, &ct_params); + ct_params.xri = hio->indicator; + ct_params.tag = hio->reqtag; + if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, + &ct_params)){ + efc_log_err(hw->os, "XMIT SEQ WQE error\n"); + rc = -EIO; + } + break; + } + default: + efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type); + rc = -EIO; + } + + if (rc == 0) { + hio->xbusy = true; + + /* + * Add IO to active io wqe list before submitting, in case the + * wcqe processing preempts this thread. + */ + hio->wq->use_count++; + rc = efct_hw_wq_write(hio->wq, &hio->wqe); + if (rc >= 0) { + /* non-negative return is success */ + rc = 0; + } else { + /* failed to write wqe, remove from active wqe list */ + efc_log_err(hw->os, + "sli_queue_write failed: %d\n", rc); + hio->xbusy = false; + } + } + + return rc; +} + +int +efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type, + struct efct_hw_io *io, union efct_hw_io_param_u *iparam, + void *cb, void *arg) +{ + int rc = 0; + bool send_wqe = true; + + if (!io) { + pr_err("bad parm hw=%p io=%p\n", hw, io); + return -EIO; + } + + if (hw->state != EFCT_HW_STATE_ACTIVE) { + efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); + return -EIO; + } + + /* + * Save state needed during later stages + */ + io->type = type; + io->done = cb; + io->arg = arg; + + /* + * Format the work queue entry used to send the IO + */ + switch (type) { + case EFCT_HW_IO_TARGET_WRITE: { + u16 *flags = &iparam->fcp_tgt.flags; + struct fcp_txrdy *xfer = io->xfer_rdy.virt; + + /* + * Fill in the XFER_RDY for IF_TYPE 0 devices + */ + xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset); + xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len); + + if (io->xbusy) + *flags |= SLI4_IO_CONTINUATION; + else + *flags &= ~SLI4_IO_CONTINUATION; + iparam->fcp_tgt.xri = io->indicator; + iparam->fcp_tgt.tag = io->reqtag; + + if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, + &io->def_sgl, io->first_data_sge, + SLI4_CQ_DEFAULT, + 0, 0, &iparam->fcp_tgt)) { + efc_log_err(hw->os, "TRECEIVE WQE error\n"); + rc = -EIO; + } + break; + } + case EFCT_HW_IO_TARGET_READ: { + u16 *flags = &iparam->fcp_tgt.flags; + + if (io->xbusy) + *flags |= SLI4_IO_CONTINUATION; + else + *flags &= ~SLI4_IO_CONTINUATION; + + iparam->fcp_tgt.xri = io->indicator; + iparam->fcp_tgt.tag = io->reqtag; + + if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, + &io->def_sgl, io->first_data_sge, + SLI4_CQ_DEFAULT, + 0, 0, &iparam->fcp_tgt)) { + efc_log_err(hw->os, "TSEND WQE error\n"); + rc = -EIO; + } + break; + } + case EFCT_HW_IO_TARGET_RSP: { + u16 *flags = &iparam->fcp_tgt.flags; + + if (io->xbusy) + *flags |= SLI4_IO_CONTINUATION; + else + *flags &= ~SLI4_IO_CONTINUATION; + + iparam->fcp_tgt.xri = io->indicator; + iparam->fcp_tgt.tag = io->reqtag; + + if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, + &io->def_sgl, SLI4_CQ_DEFAULT, + 0, &iparam->fcp_tgt)) { + efc_log_err(hw->os, "TRSP WQE error\n"); + rc = -EIO; + } + + break; + } + default: + efc_log_err(hw->os, "unsupported IO type %#x\n", type); + rc = -EIO; + } + + if (send_wqe && rc == 0) { + io->xbusy = true; + + /* + * Add IO to active io wqe list before submitting, in case the + * wcqe processing preempts this thread. + */ + hw->tcmd_wq_submit[io->wq->instance]++; + io->wq->use_count++; + rc = efct_hw_wq_write(io->wq, &io->wqe); + if (rc >= 0) { + /* non-negative return is success */ + rc = 0; + } else { + /* failed to write wqe, remove from active wqe list */ + efc_log_err(hw->os, + "sli_queue_write failed: %d\n", rc); + io->xbusy = false; + } + } + + return rc; +} + +int +efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr, + u8 sof, u8 eof, struct efc_dma *payload, + struct efct_hw_send_frame_context *ctx, + void (*callback)(void *arg, u8 *cqe, int status), + void *arg) +{ + int rc; + struct efct_hw_wqe *wqe; + u32 xri; + struct hw_wq *wq; + + wqe = &ctx->wqe; + + /* populate the callback object */ + ctx->hw = hw; + + /* Fetch and populate request tag */ + ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg); + if (!ctx->wqcb) { + efc_log_err(hw->os, "can't allocate request tag\n"); + return -ENOSPC; + } + + wq = hw->hw_wq[0]; + + /* Set XRI and RX_ID in the header based on which WQ, and which + * send_frame_io we are using + */ + xri = wq->send_frame_io->indicator; + + /* Build the send frame WQE */ + rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, + sof, eof, (u32 *)hdr, payload, payload->len, + EFCT_HW_SEND_FRAME_TIMEOUT, xri, + ctx->wqcb->instance_index); + if (rc) { + efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc); + return -EIO; + } + + /* Write to WQ */ + rc = efct_hw_wq_write(wq, wqe); + if (rc) { + efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc); + return -EIO; + } + + wq->use_count++; + + return 0; +} + +static int +efct_hw_cb_link_stat(struct efct_hw *hw, int status, + u8 *mqe, void *arg) +{ + struct sli4_cmd_read_link_stats *mbox_rsp; + struct efct_hw_link_stat_cb_arg *cb_arg = arg; + struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX]; + u32 num_counters, i; + u32 mbox_rsp_flags = 0; + + mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe; + mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags); + num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13; + memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) * + EFCT_HW_LINK_STAT_MAX); + + /* Fill overflow counts, mask starts from SLI4_READ_LNKSTAT_W02OF*/ + for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++) + counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2))); + + counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = + le32_to_cpu(mbox_rsp->linkfail_errcnt); + counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = + le32_to_cpu(mbox_rsp->losssync_errcnt); + counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = + le32_to_cpu(mbox_rsp->losssignal_errcnt); + counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = + le32_to_cpu(mbox_rsp->primseq_errcnt); + counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = + le32_to_cpu(mbox_rsp->inval_txword_errcnt); + counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter = + le32_to_cpu(mbox_rsp->crc_errcnt); + counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = + le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt); + counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = + le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt); + counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = + le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt); + counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = + le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit); + counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = + le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit); + counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = + le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit); + counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = + le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit); + counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter = + le32_to_cpu(mbox_rsp->rx_eofa_cnt); + counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = + le32_to_cpu(mbox_rsp->rx_eofdti_cnt); + counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = + le32_to_cpu(mbox_rsp->rx_eofni_cnt); + counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter = + le32_to_cpu(mbox_rsp->rx_soff_cnt); + counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = + le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt); + counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = + le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt); + counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = + le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt); + + if (cb_arg) { + if (cb_arg->cb) { + if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status)) + status = le16_to_cpu(mbox_rsp->hdr.status); + cb_arg->cb(status, num_counters, counts, cb_arg->arg); + } + + kfree(cb_arg); + } + + return 0; +} + +int +efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters, + u8 clear_overflow_flags, u8 clear_all_counters, + void (*cb)(int status, u32 num_counters, + struct efct_hw_link_stat_counts *counters, + void *arg), + void *arg) +{ + int rc = -EIO; + struct efct_hw_link_stat_cb_arg *cb_arg; + u8 mbxdata[SLI4_BMBX_SIZE]; + + cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC); + if (!cb_arg) + return -ENOMEM; + + cb_arg->cb = cb; + cb_arg->arg = arg; + + /* Send the HW command */ + if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters, + clear_overflow_flags, clear_all_counters)) + rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT, + efct_hw_cb_link_stat, cb_arg); + + if (rc) + kfree(cb_arg); + + return rc; +} + +static int +efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg) +{ + struct sli4_cmd_read_status *mbox_rsp = + (struct sli4_cmd_read_status *)mqe; + struct efct_hw_host_stat_cb_arg *cb_arg = arg; + struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX]; + u32 num_counters = EFCT_HW_HOST_STAT_MAX; + + memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) * + EFCT_HW_HOST_STAT_MAX); + + counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter = + le32_to_cpu(mbox_rsp->trans_kbyte_cnt); + counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter = + le32_to_cpu(mbox_rsp->recv_kbyte_cnt); + counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter = + le32_to_cpu(mbox_rsp->trans_frame_cnt); + counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter = + le32_to_cpu(mbox_rsp->recv_frame_cnt); + counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter = + le32_to_cpu(mbox_rsp->trans_seq_cnt); + counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter = + le32_to_cpu(mbox_rsp->recv_seq_cnt); + counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = + le32_to_cpu(mbox_rsp->tot_exchanges_orig); + counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = + le32_to_cpu(mbox_rsp->tot_exchanges_resp); + counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = + le32_to_cpu(mbox_rsp->recv_p_bsy_cnt); + counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter = + le32_to_cpu(mbox_rsp->recv_f_bsy_cnt); + counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = + le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt); + counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = + le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt); + counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = + le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt); + counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = + le32_to_cpu(mbox_rsp->empty_xri_pool_cnt); + + if (cb_arg) { + if (cb_arg->cb) { + if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status)) + status = le16_to_cpu(mbox_rsp->hdr.status); + cb_arg->cb(status, num_counters, counts, cb_arg->arg); + } + + kfree(cb_arg); + } + + return 0; +} + +int +efct_hw_get_host_stats(struct efct_hw *hw, u8 cc, + void (*cb)(int status, u32 num_counters, + struct efct_hw_host_stat_counts *counters, + void *arg), + void *arg) +{ + int rc = -EIO; + struct efct_hw_host_stat_cb_arg *cb_arg; + u8 mbxdata[SLI4_BMBX_SIZE]; + + cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC); + if (!cb_arg) + return -ENOMEM; + + cb_arg->cb = cb; + cb_arg->arg = arg; + + /* Send the HW command to get the host stats */ + if (!sli_cmd_read_status(&hw->sli, mbxdata, cc)) + rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT, + efct_hw_cb_host_stat, cb_arg); + + if (rc) { + efc_log_debug(hw->os, "READ_HOST_STATS failed\n"); + kfree(cb_arg); + } + + return rc; +} + +struct efct_hw_async_call_ctx { + efct_hw_async_cb_t callback; + void *arg; + u8 cmd[SLI4_BMBX_SIZE]; +}; + +static void +efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg) +{ + struct efct_hw_async_call_ctx *ctx = arg; + + if (ctx) { + if (ctx->callback) + (*ctx->callback)(hw, status, mqe, ctx->arg); + + kfree(ctx); + } +} + +int +efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg) +{ + struct efct_hw_async_call_ctx *ctx; + int rc; + + /* + * Allocate a callback context (which includes the mbox cmd buffer), + * we need this to be persistent as the mbox cmd submission may be + * queued and executed later execution. + */ + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->callback = callback; + ctx->arg = arg; + + /* Build and send a NOP mailbox command */ + if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) { + efc_log_err(hw->os, "COMMON_NOP format failure\n"); + kfree(ctx); + return -EIO; + } + + rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb, + ctx); + if (rc) { + efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc); + kfree(ctx); + return -EIO; + } + return 0; +} + +static int +efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg) +{ + struct sli4_cmd_sli_config *mbox_rsp = + (struct sli4_cmd_sli_config *)mqe; + struct sli4_rsp_cmn_write_object *wr_obj_rsp; + struct efct_hw_fw_wr_cb_arg *cb_arg = arg; + u32 bytes_written; + u16 mbox_status; + u32 change_status; + + wr_obj_rsp = (struct sli4_rsp_cmn_write_object *) + &mbox_rsp->payload.embed; + bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length); + mbox_status = le16_to_cpu(mbox_rsp->hdr.status); + change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) & + RSP_CHANGE_STATUS); + + if (cb_arg) { + if (cb_arg->cb) { + if (!status && mbox_status) + status = mbox_status; + cb_arg->cb(status, bytes_written, change_status, + cb_arg->arg); + } + + kfree(cb_arg); + } + + return 0; +} + +int +efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size, + u32 offset, int last, + void (*cb)(int status, u32 bytes_written, + u32 change_status, void *arg), + void *arg) +{ + int rc = -EIO; + u8 mbxdata[SLI4_BMBX_SIZE]; + struct efct_hw_fw_wr_cb_arg *cb_arg; + int noc = 0; + + cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL); + if (!cb_arg) + return -ENOMEM; + + cb_arg->cb = cb; + cb_arg->arg = arg; + + /* Write a portion of a firmware image to the device */ + if (!sli_cmd_common_write_object(&hw->sli, mbxdata, + noc, last, size, offset, "/prg/", + dma)) + rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT, + efct_hw_cb_fw_write, cb_arg); + + if (rc != 0) { + efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n"); + kfree(cb_arg); + } + + return rc; +} + +static int +efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe, + void *arg) +{ + return 0; +} + +int +efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl, + uintptr_t value, + void (*cb)(int status, uintptr_t value, void *arg), + void *arg) +{ + int rc = -EIO; + u8 link[SLI4_BMBX_SIZE]; + u32 speed = 0; + u8 reset_alpa = 0; + + switch (ctrl) { + case EFCT_HW_PORT_INIT: + if (!sli_cmd_config_link(&hw->sli, link)) + rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT, + efct_hw_cb_port_control, NULL); + + if (rc != 0) { + efc_log_err(hw->os, "CONFIG_LINK failed\n"); + break; + } + speed = hw->config.speed; + reset_alpa = (u8)(value & 0xff); + + rc = -EIO; + if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa)) + rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT, + efct_hw_cb_port_control, NULL); + /* Free buffer on error, since no callback is coming */ + if (rc) + efc_log_err(hw->os, "INIT_LINK failed\n"); + break; + + case EFCT_HW_PORT_SHUTDOWN: + if (!sli_cmd_down_link(&hw->sli, link)) + rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT, + efct_hw_cb_port_control, NULL); + /* Free buffer on error, since no callback is coming */ + if (rc) + efc_log_err(hw->os, "DOWN_LINK failed\n"); + break; + + default: + efc_log_debug(hw->os, "unhandled control %#x\n", ctrl); + break; + } + + return rc; +} + +void +efct_hw_teardown(struct efct_hw *hw) +{ + u32 i = 0; + u32 destroy_queues; + u32 free_memory; + struct efc_dma *dma; + struct efct *efct = hw->os; + + destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE); + free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED); + + /* Cancel Sliport Healthcheck */ + if (hw->sliport_healthcheck) { + hw->sliport_healthcheck = 0; + efct_hw_config_sli_port_health_check(hw, 0, 0); + } + + if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) { + hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS; + + efct_hw_flush(hw); + + if (list_empty(&hw->cmd_head)) + efc_log_debug(hw->os, + "All commands completed on MQ queue\n"); + else + efc_log_debug(hw->os, + "Some cmds still pending on MQ queue\n"); + + /* Cancel any remaining commands */ + efct_hw_command_cancel(hw); + } else { + hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS; + } + + dma_free_coherent(&efct->pci->dev, + hw->rnode_mem.size, hw->rnode_mem.virt, + hw->rnode_mem.phys); + memset(&hw->rnode_mem, 0, sizeof(struct efc_dma)); + + if (hw->io) { + for (i = 0; i < hw->config.n_io; i++) { + if (hw->io[i] && hw->io[i]->sgl && + hw->io[i]->sgl->virt) { + dma_free_coherent(&efct->pci->dev, + hw->io[i]->sgl->size, + hw->io[i]->sgl->virt, + hw->io[i]->sgl->phys); + } + kfree(hw->io[i]); + hw->io[i] = NULL; + } + kfree(hw->io); + hw->io = NULL; + kfree(hw->wqe_buffs); + hw->wqe_buffs = NULL; + } + + dma = &hw->xfer_rdy; + dma_free_coherent(&efct->pci->dev, + dma->size, dma->virt, dma->phys); + memset(dma, 0, sizeof(struct efc_dma)); + + dma = &hw->loop_map; + dma_free_coherent(&efct->pci->dev, + dma->size, dma->virt, dma->phys); + memset(dma, 0, sizeof(struct efc_dma)); + + for (i = 0; i < hw->wq_count; i++) + sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, + free_memory); + + for (i = 0; i < hw->rq_count; i++) + sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, + free_memory); + + for (i = 0; i < hw->mq_count; i++) + sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, + free_memory); + + for (i = 0; i < hw->cq_count; i++) + sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, + free_memory); + + for (i = 0; i < hw->eq_count; i++) + sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, + free_memory); + + /* Free rq buffers */ + efct_hw_rx_free(hw); + + efct_hw_queue_teardown(hw); + + kfree(hw->wq_cpu_array); + + sli_teardown(&hw->sli); + + /* record the fact that the queues are non-functional */ + hw->state = EFCT_HW_STATE_UNINITIALIZED; + + /* free sequence free pool */ + kfree(hw->seq_pool); + hw->seq_pool = NULL; + + /* free hw_wq_callback pool */ + efct_hw_reqtag_pool_free(hw); + + mempool_destroy(hw->cmd_ctx_pool); + mempool_destroy(hw->mbox_rqst_pool); + + /* Mark HW setup as not having been called */ + hw->hw_setup_called = false; +} + +static int +efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset, + enum efct_hw_state prev_state) +{ + int rc = 0; + + switch (reset) { + case EFCT_HW_RESET_FUNCTION: + efc_log_debug(hw->os, "issuing function level reset\n"); + if (sli_reset(&hw->sli)) { + efc_log_err(hw->os, "sli_reset failed\n"); + rc = -EIO; + } + break; + case EFCT_HW_RESET_FIRMWARE: + efc_log_debug(hw->os, "issuing firmware reset\n"); + if (sli_fw_reset(&hw->sli)) { + efc_log_err(hw->os, "sli_soft_reset failed\n"); + rc = -EIO; + } + /* + * Because the FW reset leaves the FW in a non-running state, + * follow that with a regular reset. + */ + efc_log_debug(hw->os, "issuing function level reset\n"); + if (sli_reset(&hw->sli)) { + efc_log_err(hw->os, "sli_reset failed\n"); + rc = -EIO; + } + break; + default: + efc_log_err(hw->os, "unknown type - no reset performed\n"); + hw->state = prev_state; + rc = -EINVAL; + break; + } + + return rc; +} + +int +efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset) +{ + int rc = 0; + enum efct_hw_state prev_state = hw->state; + + if (hw->state != EFCT_HW_STATE_ACTIVE) + efc_log_debug(hw->os, + "HW state %d is not active\n", hw->state); + + hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS; + + /* + * If the prev_state is already reset/teardown in progress, + * don't continue further + */ + if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS || + prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS) + return efct_hw_sli_reset(hw, reset, prev_state); + + if (prev_state != EFCT_HW_STATE_UNINITIALIZED) { + efct_hw_flush(hw); + + if (list_empty(&hw->cmd_head)) + efc_log_debug(hw->os, + "All commands completed on MQ queue\n"); + else + efc_log_err(hw->os, + "Some commands still pending on MQ queue\n"); + } + + /* Reset the chip */ + rc = efct_hw_sli_reset(hw, reset, prev_state); + if (rc == -EINVAL) + return -EIO; + + return rc; +} diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h new file mode 100644 index 000000000..f3f4aa78d --- /dev/null +++ b/drivers/scsi/elx/efct/efct_hw.h @@ -0,0 +1,764 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#ifndef _EFCT_HW_H +#define _EFCT_HW_H + +#include "../libefc_sli/sli4.h" + +/* + * EFCT PCI IDs + */ +#define EFCT_VENDOR_ID 0x10df +/* LightPulse 16Gb x 4 FC (lancer-g6) */ +#define EFCT_DEVICE_LANCER_G6 0xe307 +/* LightPulse 32Gb x 4 FC (lancer-g7) */ +#define EFCT_DEVICE_LANCER_G7 0xf407 + +/*Default RQ entries len used by driver*/ +#define EFCT_HW_RQ_ENTRIES_MIN 512 +#define EFCT_HW_RQ_ENTRIES_DEF 1024 +#define EFCT_HW_RQ_ENTRIES_MAX 4096 + +/*Defines the size of the RQ buffers used for each RQ*/ +#define EFCT_HW_RQ_SIZE_HDR 128 +#define EFCT_HW_RQ_SIZE_PAYLOAD 1024 + +/*Define the maximum number of multi-receive queues*/ +#define EFCT_HW_MAX_MRQS 8 + +/* + * Define count of when to set the WQEC bit in a submitted + * WQE, causing a consummed/released completion to be posted. + */ +#define EFCT_HW_WQEC_SET_COUNT 32 + +/*Send frame timeout in seconds*/ +#define EFCT_HW_SEND_FRAME_TIMEOUT 10 + +/* + * FDT Transfer Hint value, reads greater than this value + * will be segmented to implement fairness. A value of zero disables + * the feature. + */ +#define EFCT_HW_FDT_XFER_HINT 8192 + +#define EFCT_HW_TIMECHECK_ITERATIONS 100 +#define EFCT_HW_MAX_NUM_MQ 1 +#define EFCT_HW_MAX_NUM_RQ 32 +#define EFCT_HW_MAX_NUM_EQ 16 +#define EFCT_HW_MAX_NUM_WQ 32 +#define EFCT_HW_DEF_NUM_EQ 1 + +#define OCE_HW_MAX_NUM_MRQ_PAIRS 16 + +#define EFCT_HW_MQ_DEPTH 128 +#define EFCT_HW_EQ_DEPTH 1024 + +/* + * A CQ will be assinged to each WQ + * (CQ must have 2X entries of the WQ for abort + * processing), plus a separate one for each RQ PAIR and one for MQ + */ +#define EFCT_HW_MAX_NUM_CQ \ + ((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2)) + +#define EFCT_HW_Q_HASH_SIZE 128 +#define EFCT_HW_RQ_HEADER_SIZE 128 +#define EFCT_HW_RQ_HEADER_INDEX 0 + +#define EFCT_HW_REQUE_XRI_REGTAG 65534 + +/* Options for efct_hw_command() */ +enum efct_cmd_opts { + /* command executes synchronously and busy-waits for completion */ + EFCT_CMD_POLL, + /* command executes asynchronously. Uses callback */ + EFCT_CMD_NOWAIT, +}; + +enum efct_hw_reset { + EFCT_HW_RESET_FUNCTION, + EFCT_HW_RESET_FIRMWARE, + EFCT_HW_RESET_MAX +}; + +enum efct_hw_topo { + EFCT_HW_TOPOLOGY_AUTO, + EFCT_HW_TOPOLOGY_NPORT, + EFCT_HW_TOPOLOGY_LOOP, + EFCT_HW_TOPOLOGY_NONE, + EFCT_HW_TOPOLOGY_MAX +}; + +/* pack fw revision values into a single uint64_t */ +#define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) \ + | ((uint64_t)(c) << 16) | ((uint64_t)(d))) + +#define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d) + +enum efct_hw_io_type { + EFCT_HW_ELS_REQ, + EFCT_HW_ELS_RSP, + EFCT_HW_FC_CT, + EFCT_HW_FC_CT_RSP, + EFCT_HW_BLS_ACC, + EFCT_HW_BLS_RJT, + EFCT_HW_IO_TARGET_READ, + EFCT_HW_IO_TARGET_WRITE, + EFCT_HW_IO_TARGET_RSP, + EFCT_HW_IO_DNRX_REQUEUE, + EFCT_HW_IO_MAX, +}; + +enum efct_hw_io_state { + EFCT_HW_IO_STATE_FREE, + EFCT_HW_IO_STATE_INUSE, + EFCT_HW_IO_STATE_WAIT_FREE, + EFCT_HW_IO_STATE_WAIT_SEC_HIO, +}; + +#define EFCT_TARGET_WRITE_SKIPS 1 +#define EFCT_TARGET_READ_SKIPS 2 + +struct efct_hw; +struct efct_io; + +#define EFCT_CMD_CTX_POOL_SZ 32 +/** + * HW command context. + * Stores the state for the asynchronous commands sent to the hardware. + */ +struct efct_command_ctx { + struct list_head list_entry; + int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg); + void *arg; /* Argument for callback */ + /* buffer holding command / results */ + u8 buf[SLI4_BMBX_SIZE]; + void *ctx; /* upper layer context */ +}; + +struct efct_hw_sgl { + uintptr_t addr; + size_t len; +}; + +union efct_hw_io_param_u { + struct sli_bls_params bls; + struct sli_els_params els; + struct sli_ct_params fc_ct; + struct sli_fcp_tgt_params fcp_tgt; +}; + +/* WQ steering mode */ +enum efct_hw_wq_steering { + EFCT_HW_WQ_STEERING_CLASS, + EFCT_HW_WQ_STEERING_REQUEST, + EFCT_HW_WQ_STEERING_CPU, +}; + +/* HW wqe object */ +struct efct_hw_wqe { + struct list_head list_entry; + bool abort_wqe_submit_needed; + bool send_abts; + u32 id; + u32 abort_reqtag; + u8 *wqebuf; +}; + +struct efct_hw_io; +/* Typedef for HW "done" callback */ +typedef int (*efct_hw_done_t)(struct efct_hw_io *, u32 len, int status, + u32 ext, void *ul_arg); + +/** + * HW IO object. + * + * Stores the per-IO information necessary + * for both SLI and efct. + * @ref: reference counter for hw io object + * @state: state of IO: free, busy, wait_free + * @list_entry used for busy, wait_free, free lists + * @wqe Work queue object, with link for pending + * @hw pointer back to hardware context + * @xfer_rdy transfer ready data + * @type IO type + * @xbusy Exchange is active in FW + * @abort_in_progress if TRUE, abort is in progress + * @status_saved if TRUE, latched status should be returned + * @wq_class WQ class if steering mode is Class + * @reqtag request tag for this HW IO + * @wq WQ assigned to the exchange + * @done Function called on IO completion + * @arg argument passed to IO done callback + * @abort_done Function called on abort completion + * @abort_arg argument passed to abort done callback + * @wq_steering WQ steering mode request + * @saved_status Saved status + * @saved_len Status length + * @saved_ext Saved extended status + * @eq EQ on which this HIO came up + * @sge_offset SGE data offset + * @def_sgl_count Count of SGEs in default SGL + * @abort_reqtag request tag for an abort of this HW IO + * @indicator Exchange indicator + * @def_sgl default SGL + * @sgl pointer to current active SGL + * @sgl_count count of SGEs in io->sgl + * @first_data_sge index of first data SGE + * @n_sge number of active SGEs + */ +struct efct_hw_io { + struct kref ref; + enum efct_hw_io_state state; + void (*release)(struct kref *arg); + struct list_head list_entry; + struct efct_hw_wqe wqe; + + struct efct_hw *hw; + struct efc_dma xfer_rdy; + u16 type; + bool xbusy; + int abort_in_progress; + bool status_saved; + u8 wq_class; + u16 reqtag; + + struct hw_wq *wq; + efct_hw_done_t done; + void *arg; + efct_hw_done_t abort_done; + void *abort_arg; + + enum efct_hw_wq_steering wq_steering; + + u32 saved_status; + u32 saved_len; + u32 saved_ext; + + struct hw_eq *eq; + u32 sge_offset; + u32 def_sgl_count; + u32 abort_reqtag; + u32 indicator; + struct efc_dma def_sgl; + struct efc_dma *sgl; + u32 sgl_count; + u32 first_data_sge; + u32 n_sge; +}; + +enum efct_hw_port { + EFCT_HW_PORT_INIT, + EFCT_HW_PORT_SHUTDOWN, +}; + +/* Node group rpi reference */ +struct efct_hw_rpi_ref { + atomic_t rpi_count; + atomic_t rpi_attached; +}; + +enum efct_hw_link_stat { + EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT, + EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT, + EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT, + EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT, + EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT, + EFCT_HW_LINK_STAT_CRC_COUNT, + EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT, + EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT, + EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT, + EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT, + EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT, + EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT, + EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT, + EFCT_HW_LINK_STAT_RCV_EOFA_COUNT, + EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT, + EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT, + EFCT_HW_LINK_STAT_RCV_SOFF_COUNT, + EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT, + EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT, + EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT, + EFCT_HW_LINK_STAT_MAX, +}; + +enum efct_hw_host_stat { + EFCT_HW_HOST_STAT_TX_KBYTE_COUNT, + EFCT_HW_HOST_STAT_RX_KBYTE_COUNT, + EFCT_HW_HOST_STAT_TX_FRAME_COUNT, + EFCT_HW_HOST_STAT_RX_FRAME_COUNT, + EFCT_HW_HOST_STAT_TX_SEQ_COUNT, + EFCT_HW_HOST_STAT_RX_SEQ_COUNT, + EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG, + EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP, + EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT, + EFCT_HW_HOST_STAT_RX_F_BSY_COUNT, + EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT, + EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT, + EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT, + EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT, + EFCT_HW_HOST_STAT_MAX, +}; + +enum efct_hw_state { + EFCT_HW_STATE_UNINITIALIZED, + EFCT_HW_STATE_QUEUES_ALLOCATED, + EFCT_HW_STATE_ACTIVE, + EFCT_HW_STATE_RESET_IN_PROGRESS, + EFCT_HW_STATE_TEARDOWN_IN_PROGRESS, +}; + +struct efct_hw_link_stat_counts { + u8 overflow; + u32 counter; +}; + +struct efct_hw_host_stat_counts { + u32 counter; +}; + +/* Structure used for the hash lookup of queue IDs */ +struct efct_queue_hash { + bool in_use; + u16 id; + u16 index; +}; + +/* WQ callback object */ +struct hw_wq_callback { + u16 instance_index; /* use for request tag */ + void (*callback)(void *arg, u8 *cqe, int status); + void *arg; + struct list_head list_entry; +}; + +struct reqtag_pool { + spinlock_t lock; /* pool lock */ + struct hw_wq_callback *tags[U16_MAX]; + struct list_head freelist; +}; + +struct efct_hw_config { + u32 n_eq; + u32 n_cq; + u32 n_mq; + u32 n_rq; + u32 n_wq; + u32 n_io; + u32 n_sgl; + u32 speed; + u32 topology; + /* size of the buffers for first burst */ + u32 rq_default_buffer_size; + u8 esoc; + /* MRQ RQ selection policy */ + u8 rq_selection_policy; + /* RQ quanta if rq_selection_policy == 2 */ + u8 rr_quanta; + u32 filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; +}; + +struct efct_hw { + struct efct *os; + struct sli4 sli; + u16 ulp_start; + u16 ulp_max; + u32 dump_size; + enum efct_hw_state state; + bool hw_setup_called; + u8 sliport_healthcheck; + u16 fcf_indicator; + + /* HW configuration */ + struct efct_hw_config config; + + /* calculated queue sizes for each type */ + u32 num_qentries[SLI4_QTYPE_MAX]; + + /* Storage for SLI queue objects */ + struct sli4_queue wq[EFCT_HW_MAX_NUM_WQ]; + struct sli4_queue rq[EFCT_HW_MAX_NUM_RQ]; + u16 hw_rq_lookup[EFCT_HW_MAX_NUM_RQ]; + struct sli4_queue mq[EFCT_HW_MAX_NUM_MQ]; + struct sli4_queue cq[EFCT_HW_MAX_NUM_CQ]; + struct sli4_queue eq[EFCT_HW_MAX_NUM_EQ]; + + /* HW queue */ + u32 eq_count; + u32 cq_count; + u32 mq_count; + u32 wq_count; + u32 rq_count; + u32 cmd_head_count; + struct list_head eq_list; + + struct efct_queue_hash cq_hash[EFCT_HW_Q_HASH_SIZE]; + struct efct_queue_hash rq_hash[EFCT_HW_Q_HASH_SIZE]; + struct efct_queue_hash wq_hash[EFCT_HW_Q_HASH_SIZE]; + + /* Storage for HW queue objects */ + struct hw_wq *hw_wq[EFCT_HW_MAX_NUM_WQ]; + struct hw_rq *hw_rq[EFCT_HW_MAX_NUM_RQ]; + struct hw_mq *hw_mq[EFCT_HW_MAX_NUM_MQ]; + struct hw_cq *hw_cq[EFCT_HW_MAX_NUM_CQ]; + struct hw_eq *hw_eq[EFCT_HW_MAX_NUM_EQ]; + /* count of hw_rq[] entries */ + u32 hw_rq_count; + /* count of multirq RQs */ + u32 hw_mrq_count; + + struct hw_wq **wq_cpu_array; + + /* Sequence objects used in incoming frame processing */ + struct efc_hw_sequence *seq_pool; + + /* Maintain an ordered, linked list of outstanding HW commands. */ + struct mutex bmbx_lock; + spinlock_t cmd_lock; + struct list_head cmd_head; + struct list_head cmd_pending; + mempool_t *cmd_ctx_pool; + mempool_t *mbox_rqst_pool; + + struct sli4_link_event link; + + /* pointer array of IO objects */ + struct efct_hw_io **io; + /* array of WQE buffs mapped to IO objects */ + u8 *wqe_buffs; + + /* IO lock to synchronize list access */ + spinlock_t io_lock; + /* List of IO objects in use */ + struct list_head io_inuse; + /* List of IO objects waiting to be freed */ + struct list_head io_wait_free; + /* List of IO objects available for allocation */ + struct list_head io_free; + + struct efc_dma loop_map; + + struct efc_dma xfer_rdy; + + struct efc_dma rnode_mem; + + atomic_t io_alloc_failed_count; + + /* stat: wq sumbit count */ + u32 tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ]; + /* stat: wq complete count */ + u32 tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ]; + + atomic_t send_frame_seq_id; + struct reqtag_pool *wq_reqtag_pool; +}; + +enum efct_hw_io_count_type { + EFCT_HW_IO_INUSE_COUNT, + EFCT_HW_IO_FREE_COUNT, + EFCT_HW_IO_WAIT_FREE_COUNT, + EFCT_HW_IO_N_TOTAL_IO_COUNT, +}; + +/* HW queue data structures */ +struct hw_eq { + struct list_head list_entry; + enum sli4_qtype type; + u32 instance; + u32 entry_count; + u32 entry_size; + struct efct_hw *hw; + struct sli4_queue *queue; + struct list_head cq_list; + u32 use_count; +}; + +struct hw_cq { + struct list_head list_entry; + enum sli4_qtype type; + u32 instance; + u32 entry_count; + u32 entry_size; + struct hw_eq *eq; + struct sli4_queue *queue; + struct list_head q_list; + u32 use_count; +}; + +struct hw_q { + struct list_head list_entry; + enum sli4_qtype type; +}; + +struct hw_mq { + struct list_head list_entry; + enum sli4_qtype type; + u32 instance; + + u32 entry_count; + u32 entry_size; + struct hw_cq *cq; + struct sli4_queue *queue; + + u32 use_count; +}; + +struct hw_wq { + struct list_head list_entry; + enum sli4_qtype type; + u32 instance; + struct efct_hw *hw; + + u32 entry_count; + u32 entry_size; + struct hw_cq *cq; + struct sli4_queue *queue; + u32 class; + + /* WQ consumed */ + u32 wqec_set_count; + u32 wqec_count; + u32 free_count; + u32 total_submit_count; + struct list_head pending_list; + + /* HW IO allocated for use with Send Frame */ + struct efct_hw_io *send_frame_io; + + /* Stats */ + u32 use_count; + u32 wq_pending_count; +}; + +struct hw_rq { + struct list_head list_entry; + enum sli4_qtype type; + u32 instance; + + u32 entry_count; + u32 use_count; + u32 hdr_entry_size; + u32 first_burst_entry_size; + u32 data_entry_size; + bool is_mrq; + u32 base_mrq_id; + + struct hw_cq *cq; + + u8 filter_mask; + struct sli4_queue *hdr; + struct sli4_queue *first_burst; + struct sli4_queue *data; + + struct efc_hw_rq_buffer *hdr_buf; + struct efc_hw_rq_buffer *fb_buf; + struct efc_hw_rq_buffer *payload_buf; + /* RQ tracker for this RQ */ + struct efc_hw_sequence **rq_tracker; +}; + +struct efct_hw_send_frame_context { + struct efct_hw *hw; + struct hw_wq_callback *wqcb; + struct efct_hw_wqe wqe; + void (*callback)(int status, void *arg); + void *arg; + + /* General purpose elements */ + struct efc_hw_sequence *seq; + struct efc_dma payload; +}; + +struct efct_hw_grp_hdr { + u32 size; + __be32 magic_number; + u32 word2; + u8 rev_name[128]; + u8 date[12]; + u8 revision[32]; +}; + +static inline int +efct_hw_get_link_speed(struct efct_hw *hw) { + return hw->link.speed; +} + +int +efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev); +int efct_hw_init(struct efct_hw *hw); +int +efct_hw_parse_filter(struct efct_hw *hw, void *value); +int +efct_hw_init_queues(struct efct_hw *hw); +int +efct_hw_map_wq_cpu(struct efct_hw *hw); +uint64_t +efct_get_wwnn(struct efct_hw *hw); +uint64_t +efct_get_wwpn(struct efct_hw *hw); + +int efct_hw_rx_allocate(struct efct_hw *hw); +int efct_hw_rx_post(struct efct_hw *hw); +void efct_hw_rx_free(struct efct_hw *hw); +int +efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, + void *arg); +int +efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg); + +struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw); +int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io); +u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io); +int +efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type, + struct efct_hw_io *io, union efct_hw_io_param_u *iparam, + void *cb, void *arg); +int +efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io, + struct efc_dma *sgl, + u32 sgl_count); +int +efct_hw_io_init_sges(struct efct_hw *hw, + struct efct_hw_io *io, enum efct_hw_io_type type); + +int +efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io, + uintptr_t addr, u32 length); +int +efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort, + bool send_abts, void *cb, void *arg); +u32 +efct_hw_io_get_count(struct efct_hw *hw, + enum efct_hw_io_count_type io_count_type); +struct efct_hw_io +*efct_hw_io_lookup(struct efct_hw *hw, u32 indicator); +void efct_hw_io_abort_all(struct efct_hw *hw); +void efct_hw_io_free_internal(struct kref *arg); + +/* HW WQ request tag API */ +struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw); +void efct_hw_reqtag_pool_free(struct efct_hw *hw); +struct hw_wq_callback +*efct_hw_reqtag_alloc(struct efct_hw *hw, + void (*callback)(void *arg, u8 *cqe, + int status), void *arg); +void +efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb); +struct hw_wq_callback +*efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index); + +/* RQ completion handlers for RQ pair mode */ +int +efct_hw_rqpair_process_rq(struct efct_hw *hw, + struct hw_cq *cq, u8 *cqe); +int +efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq); +static inline void +efct_hw_sequence_copy(struct efc_hw_sequence *dst, + struct efc_hw_sequence *src) +{ + /* Copy src to dst, then zero out the linked list link */ + *dst = *src; +} + +int +efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq); + +static inline int +efct_hw_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq) +{ + /* Only RQ pair mode is supported */ + return efct_hw_rqpair_sequence_free(hw, seq); +} + +int +efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq, + u32 max_isr_time_msec); +void efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq); +void +efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq, + u8 *cqe, int status, u16 rid); +void +efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq, + u8 *cqe, u16 rid); +int +efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec); +int +efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id); +int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe); +int +efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr, + u8 sof, u8 eof, struct efc_dma *payload, + struct efct_hw_send_frame_context *ctx, + void (*callback)(void *arg, u8 *cqe, int status), + void *arg); +int +efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io); +int +efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls); +int +efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params, + void *cb, void *arg); + +/* Function for retrieving link statistics */ +int +efct_hw_get_link_stats(struct efct_hw *hw, + u8 req_ext_counters, + u8 clear_overflow_flags, + u8 clear_all_counters, + void (*efct_hw_link_stat_cb_t)(int status, + u32 num_counters, + struct efct_hw_link_stat_counts *counters, void *arg), + void *arg); +/* Function for retrieving host statistics */ +int +efct_hw_get_host_stats(struct efct_hw *hw, + u8 cc, + void (*efct_hw_host_stat_cb_t)(int status, + u32 num_counters, + struct efct_hw_host_stat_counts *counters, void *arg), + void *arg); +int +efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, + u32 size, u32 offset, int last, + void (*cb)(int status, u32 bytes_written, + u32 change_status, void *arg), + void *arg); +typedef void (*efct_hw_async_cb_t)(struct efct_hw *hw, int status, + u8 *mqe, void *arg); +int +efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg); + +struct hw_eq *efct_hw_new_eq(struct efct_hw *hw, u32 entry_count); +struct hw_cq *efct_hw_new_cq(struct hw_eq *eq, u32 entry_count); +u32 +efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[], + u32 num_cqs, u32 entry_count); +struct hw_mq *efct_hw_new_mq(struct hw_cq *cq, u32 entry_count); +struct hw_wq +*efct_hw_new_wq(struct hw_cq *cq, u32 entry_count); +u32 +efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[], + u32 num_rq_pairs, u32 entry_count); +void efct_hw_del_eq(struct hw_eq *eq); +void efct_hw_del_cq(struct hw_cq *cq); +void efct_hw_del_mq(struct hw_mq *mq); +void efct_hw_del_wq(struct hw_wq *wq); +void efct_hw_del_rq(struct hw_rq *rq); +void efct_hw_queue_teardown(struct efct_hw *hw); +void efct_hw_teardown(struct efct_hw *hw); +int +efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset); + +int +efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl, + uintptr_t value, + void (*cb)(int status, uintptr_t value, void *arg), + void *arg); + +#endif /* __EFCT_H__ */ diff --git a/drivers/scsi/elx/efct/efct_hw_queues.c b/drivers/scsi/elx/efct/efct_hw_queues.c new file mode 100644 index 000000000..3a1d1a586 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_hw_queues.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efct_driver.h" +#include "efct_hw.h" +#include "efct_unsol.h" + +int +efct_hw_init_queues(struct efct_hw *hw) +{ + struct hw_eq *eq = NULL; + struct hw_cq *cq = NULL; + struct hw_wq *wq = NULL; + struct hw_mq *mq = NULL; + + struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ]; + struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ]; + struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ]; + u32 i = 0, j; + + hw->eq_count = 0; + hw->cq_count = 0; + hw->mq_count = 0; + hw->wq_count = 0; + hw->rq_count = 0; + hw->hw_rq_count = 0; + INIT_LIST_HEAD(&hw->eq_list); + + for (i = 0; i < hw->config.n_eq; i++) { + /* Create EQ */ + eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH); + if (!eq) { + efct_hw_queue_teardown(hw); + return -ENOMEM; + } + + eqs[i] = eq; + + /* Create one MQ */ + if (!i) { + cq = efct_hw_new_cq(eq, + hw->num_qentries[SLI4_QTYPE_CQ]); + if (!cq) { + efct_hw_queue_teardown(hw); + return -ENOMEM; + } + + mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH); + if (!mq) { + efct_hw_queue_teardown(hw); + return -ENOMEM; + } + } + + /* Create WQ */ + cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]); + if (!cq) { + efct_hw_queue_teardown(hw); + return -ENOMEM; + } + + wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]); + if (!wq) { + efct_hw_queue_teardown(hw); + return -ENOMEM; + } + } + + /* Create CQ set */ + if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) { + efct_hw_queue_teardown(hw); + return -EIO; + } + + /* Create RQ set */ + if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) { + efct_hw_queue_teardown(hw); + return -EIO; + } + + for (j = 0; j < i ; j++) { + rqs[j]->filter_mask = 0; + rqs[j]->is_mrq = true; + rqs[j]->base_mrq_id = rqs[0]->hdr->id; + } + + hw->hw_mrq_count = i; + + return 0; +} + +int +efct_hw_map_wq_cpu(struct efct_hw *hw) +{ + struct efct *efct = hw->os; + u32 cpu = 0, i; + + /* Init cpu_map array */ + hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *), + GFP_KERNEL); + if (!hw->wq_cpu_array) + return -ENOMEM; + + for (i = 0; i < hw->config.n_eq; i++) { + const struct cpumask *maskp; + + /* Get a CPU mask for all CPUs affinitized to this vector */ + maskp = pci_irq_get_affinity(efct->pci, i); + if (!maskp) { + efc_log_debug(efct, "maskp null for vector:%d\n", i); + continue; + } + + /* Loop through all CPUs associated with vector idx */ + for_each_cpu_and(cpu, maskp, cpu_present_mask) { + efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i); + hw->wq_cpu_array[cpu] = hw->hw_wq[i]; + } + } + + return 0; +} + +struct hw_eq * +efct_hw_new_eq(struct efct_hw *hw, u32 entry_count) +{ + struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL); + + if (!eq) + return NULL; + + eq->type = SLI4_QTYPE_EQ; + eq->hw = hw; + eq->entry_count = entry_count; + eq->instance = hw->eq_count++; + eq->queue = &hw->eq[eq->instance]; + INIT_LIST_HEAD(&eq->cq_list); + + if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count, + NULL)) { + efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance); + kfree(eq); + return NULL; + } + + sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8); + hw->hw_eq[eq->instance] = eq; + INIT_LIST_HEAD(&eq->list_entry); + list_add_tail(&eq->list_entry, &hw->eq_list); + efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, + eq->queue->id, eq->entry_count); + return eq; +} + +struct hw_cq * +efct_hw_new_cq(struct hw_eq *eq, u32 entry_count) +{ + struct efct_hw *hw = eq->hw; + struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL); + + if (!cq) + return NULL; + + cq->eq = eq; + cq->type = SLI4_QTYPE_CQ; + cq->instance = eq->hw->cq_count++; + cq->entry_count = entry_count; + cq->queue = &hw->cq[cq->instance]; + + INIT_LIST_HEAD(&cq->q_list); + + if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue, + cq->entry_count, eq->queue)) { + efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n", + eq->instance, eq->entry_count); + kfree(cq); + return NULL; + } + + hw->hw_cq[cq->instance] = cq; + INIT_LIST_HEAD(&cq->list_entry); + list_add_tail(&cq->list_entry, &eq->cq_list); + efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, + cq->queue->id, cq->entry_count); + return cq; +} + +u32 +efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[], + u32 num_cqs, u32 entry_count) +{ + u32 i; + struct efct_hw *hw = eqs[0]->hw; + struct sli4 *sli4 = &hw->sli; + struct hw_cq *cq = NULL; + struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT]; + struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT]; + + /* Initialise CQS pointers to NULL */ + for (i = 0; i < num_cqs; i++) + cqs[i] = NULL; + + for (i = 0; i < num_cqs; i++) { + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + goto error; + + cqs[i] = cq; + cq->eq = eqs[i]; + cq->type = SLI4_QTYPE_CQ; + cq->instance = hw->cq_count++; + cq->entry_count = entry_count; + cq->queue = &hw->cq[cq->instance]; + qs[i] = cq->queue; + assefct[i] = eqs[i]->queue; + INIT_LIST_HEAD(&cq->q_list); + } + + if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) { + efc_log_err(hw->os, "Failed to create CQ Set.\n"); + goto error; + } + + for (i = 0; i < num_cqs; i++) { + hw->hw_cq[cqs[i]->instance] = cqs[i]; + INIT_LIST_HEAD(&cqs[i]->list_entry); + list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list); + } + + return 0; + +error: + for (i = 0; i < num_cqs; i++) { + kfree(cqs[i]); + cqs[i] = NULL; + } + return -EIO; +} + +struct hw_mq * +efct_hw_new_mq(struct hw_cq *cq, u32 entry_count) +{ + struct efct_hw *hw = cq->eq->hw; + struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL); + + if (!mq) + return NULL; + + mq->cq = cq; + mq->type = SLI4_QTYPE_MQ; + mq->instance = cq->eq->hw->mq_count++; + mq->entry_count = entry_count; + mq->entry_size = EFCT_HW_MQ_DEPTH; + mq->queue = &hw->mq[mq->instance]; + + if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size, + cq->queue)) { + efc_log_err(hw->os, "MQ allocation failure\n"); + kfree(mq); + return NULL; + } + + hw->hw_mq[mq->instance] = mq; + INIT_LIST_HEAD(&mq->list_entry); + list_add_tail(&mq->list_entry, &cq->q_list); + efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, + mq->queue->id, mq->entry_count); + return mq; +} + +struct hw_wq * +efct_hw_new_wq(struct hw_cq *cq, u32 entry_count) +{ + struct efct_hw *hw = cq->eq->hw; + struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL); + + if (!wq) + return NULL; + + wq->hw = cq->eq->hw; + wq->cq = cq; + wq->type = SLI4_QTYPE_WQ; + wq->instance = cq->eq->hw->wq_count++; + wq->entry_count = entry_count; + wq->queue = &hw->wq[wq->instance]; + wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT; + wq->wqec_count = wq->wqec_set_count; + wq->free_count = wq->entry_count - 1; + INIT_LIST_HEAD(&wq->pending_list); + + if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue, + wq->entry_count, cq->queue)) { + efc_log_err(hw->os, "WQ allocation failure\n"); + kfree(wq); + return NULL; + } + + hw->hw_wq[wq->instance] = wq; + INIT_LIST_HEAD(&wq->list_entry); + list_add_tail(&wq->list_entry, &cq->q_list); + efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n", + wq->instance, wq->queue->id, wq->entry_count, wq->class); + return wq; +} + +u32 +efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[], + u32 num_rq_pairs, u32 entry_count) +{ + struct efct_hw *hw = cqs[0]->eq->hw; + struct hw_rq *rq = NULL; + struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL }; + u32 i, q_count, size; + + /* Initialise RQS pointers */ + for (i = 0; i < num_rq_pairs; i++) + rqs[i] = NULL; + + /* + * Allocate an RQ object SET, where each element in set + * encapsulates 2 SLI queues (for rq pair) + */ + for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) { + rq = kzalloc(sizeof(*rq), GFP_KERNEL); + if (!rq) + goto error; + + rqs[i] = rq; + rq->instance = hw->hw_rq_count++; + rq->cq = cqs[i]; + rq->type = SLI4_QTYPE_RQ; + rq->entry_count = entry_count; + + /* Header RQ */ + rq->hdr = &hw->rq[hw->rq_count]; + rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE; + hw->hw_rq_lookup[hw->rq_count] = rq->instance; + hw->rq_count++; + qs[q_count] = rq->hdr; + + /* Data RQ */ + rq->data = &hw->rq[hw->rq_count]; + rq->data_entry_size = hw->config.rq_default_buffer_size; + hw->hw_rq_lookup[hw->rq_count] = rq->instance; + hw->rq_count++; + qs[q_count + 1] = rq->data; + + rq->rq_tracker = NULL; + } + + if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs, + cqs[0]->queue->id, + rqs[0]->entry_count, + rqs[0]->hdr_entry_size, + rqs[0]->data_entry_size)) { + efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n", + cqs[0]->queue->id); + goto error; + } + + for (i = 0; i < num_rq_pairs; i++) { + hw->hw_rq[rqs[i]->instance] = rqs[i]; + INIT_LIST_HEAD(&rqs[i]->list_entry); + list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list); + size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count; + rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL); + if (!rqs[i]->rq_tracker) + goto error; + } + + return 0; + +error: + for (i = 0; i < num_rq_pairs; i++) { + if (rqs[i]) { + kfree(rqs[i]->rq_tracker); + kfree(rqs[i]); + } + } + + return -EIO; +} + +void +efct_hw_del_eq(struct hw_eq *eq) +{ + struct hw_cq *cq; + struct hw_cq *cq_next; + + if (!eq) + return; + + list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry) + efct_hw_del_cq(cq); + list_del(&eq->list_entry); + eq->hw->hw_eq[eq->instance] = NULL; + kfree(eq); +} + +void +efct_hw_del_cq(struct hw_cq *cq) +{ + struct hw_q *q; + struct hw_q *q_next; + + if (!cq) + return; + + list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) { + switch (q->type) { + case SLI4_QTYPE_MQ: + efct_hw_del_mq((struct hw_mq *)q); + break; + case SLI4_QTYPE_WQ: + efct_hw_del_wq((struct hw_wq *)q); + break; + case SLI4_QTYPE_RQ: + efct_hw_del_rq((struct hw_rq *)q); + break; + default: + break; + } + } + list_del(&cq->list_entry); + cq->eq->hw->hw_cq[cq->instance] = NULL; + kfree(cq); +} + +void +efct_hw_del_mq(struct hw_mq *mq) +{ + if (!mq) + return; + + list_del(&mq->list_entry); + mq->cq->eq->hw->hw_mq[mq->instance] = NULL; + kfree(mq); +} + +void +efct_hw_del_wq(struct hw_wq *wq) +{ + if (!wq) + return; + + list_del(&wq->list_entry); + wq->cq->eq->hw->hw_wq[wq->instance] = NULL; + kfree(wq); +} + +void +efct_hw_del_rq(struct hw_rq *rq) +{ + struct efct_hw *hw = NULL; + + if (!rq) + return; + /* Free RQ tracker */ + kfree(rq->rq_tracker); + rq->rq_tracker = NULL; + list_del(&rq->list_entry); + hw = rq->cq->eq->hw; + hw->hw_rq[rq->instance] = NULL; + kfree(rq); +} + +void +efct_hw_queue_teardown(struct efct_hw *hw) +{ + struct hw_eq *eq; + struct hw_eq *eq_next; + + if (!hw->eq_list.next) + return; + + list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry) + efct_hw_del_eq(eq); +} + +static inline int +efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id) +{ + return efct_hw_queue_hash_find(hw->rq_hash, rq_id); +} + +static struct efc_hw_sequence * +efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex) +{ + struct sli4_queue *rq_hdr = &hw->rq[rqindex]; + struct efc_hw_sequence *seq = NULL; + struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; + unsigned long flags = 0; + + if (bufindex >= rq_hdr->length) { + efc_log_err(hw->os, + "RQidx %d bufidx %d exceed ring len %d for id %d\n", + rqindex, bufindex, rq_hdr->length, rq_hdr->id); + return NULL; + } + + /* rq_hdr lock also covers rqindex+1 queue */ + spin_lock_irqsave(&rq_hdr->lock, flags); + + seq = rq->rq_tracker[bufindex]; + rq->rq_tracker[bufindex] = NULL; + + if (!seq) { + efc_log_err(hw->os, + "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n", + rqindex, bufindex, rq_hdr->index); + } + + spin_unlock_irqrestore(&rq_hdr->lock, flags); + return seq; +} + +int +efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq, + u8 *cqe) +{ + u16 rq_id; + u32 index; + int rqindex; + int rq_status; + u32 h_len; + u32 p_len; + struct efc_hw_sequence *seq; + struct hw_rq *rq; + + rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, + &rq_id, &index); + if (rq_status != 0) { + switch (rq_status) { + case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED: + case SLI4_FC_ASYNC_RQ_DMA_FAILURE: + /* just get RQ buffer then return to chip */ + rqindex = efct_hw_rqpair_find(hw, rq_id); + if (rqindex < 0) { + efc_log_debug(hw->os, + "status=%#x: lookup fail id=%#x\n", + rq_status, rq_id); + break; + } + + /* get RQ buffer */ + seq = efct_hw_rqpair_get(hw, rqindex, index); + + /* return to chip */ + if (efct_hw_rqpair_sequence_free(hw, seq)) { + efc_log_debug(hw->os, + "status=%#x,fail rtrn buf to RQ\n", + rq_status); + break; + } + break; + case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED: + case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC: + /* + * since RQ buffers were not consumed, cannot return + * them to chip + */ + efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n", + rq_status); + fallthrough; + default: + break; + } + return -EIO; + } + + rqindex = efct_hw_rqpair_find(hw, rq_id); + if (rqindex < 0) { + efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n", + rq_id); + return -EIO; + } + + rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; + rq->use_count++; + + seq = efct_hw_rqpair_get(hw, rqindex, index); + if (WARN_ON(!seq)) + return -EIO; + + seq->hw = hw; + + sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len); + seq->header->dma.len = h_len; + seq->payload->dma.len = p_len; + seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe); + seq->hw_priv = cq->eq; + + efct_unsolicited_cb(hw->os, seq); + + return 0; +} + +static int +efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq) +{ + struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex]; + struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex]; + u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex]; + struct hw_rq *rq = hw->hw_rq[hw_rq_index]; + u32 phys_hdr[2]; + u32 phys_payload[2]; + int qindex_hdr; + int qindex_payload; + unsigned long flags = 0; + + /* Update the RQ verification lookup tables */ + phys_hdr[0] = upper_32_bits(seq->header->dma.phys); + phys_hdr[1] = lower_32_bits(seq->header->dma.phys); + phys_payload[0] = upper_32_bits(seq->payload->dma.phys); + phys_payload[1] = lower_32_bits(seq->payload->dma.phys); + + /* rq_hdr lock also covers payload / header->rqindex+1 queue */ + spin_lock_irqsave(&rq_hdr->lock, flags); + + /* + * Note: The header must be posted last for buffer pair mode because + * posting on the header queue posts the payload queue as well. + * We do not ring the payload queue independently in RQ pair mode. + */ + qindex_payload = sli_rq_write(&hw->sli, rq_payload, + (void *)phys_payload); + qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr); + if (qindex_hdr < 0 || + qindex_payload < 0) { + efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id); + spin_unlock_irqrestore(&rq_hdr->lock, flags); + return -EIO; + } + + /* ensure the indexes are the same */ + WARN_ON(qindex_hdr != qindex_payload); + + /* Update the lookup table */ + if (!rq->rq_tracker[qindex_hdr]) { + rq->rq_tracker[qindex_hdr] = seq; + } else { + efc_log_debug(hw->os, + "expected rq_tracker[%d][%d] buffer to be NULL\n", + hw_rq_index, qindex_hdr); + } + + spin_unlock_irqrestore(&rq_hdr->lock, flags); + return 0; +} + +int +efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq) +{ + int rc = 0; + + /* + * Post the data buffer first. Because in RQ pair mode, ringing the + * doorbell of the header ring will post the data buffer as well. + */ + if (efct_hw_rqpair_put(hw, seq)) { + efc_log_err(hw->os, "error writing buffers\n"); + return -EIO; + } + + return rc; +} + +int +efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq) +{ + struct efct *efct = efc->base; + + return efct_hw_rqpair_sequence_free(&efct->hw, seq); +} diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c new file mode 100644 index 000000000..c612f0a48 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_io.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efct_driver.h" +#include "efct_hw.h" +#include "efct_io.h" + +struct efct_io_pool { + struct efct *efct; + spinlock_t lock; /* IO pool lock */ + u32 io_num_ios; /* Total IOs allocated */ + struct efct_io *ios[EFCT_NUM_SCSI_IOS]; + struct list_head freelist; + +}; + +struct efct_io_pool * +efct_io_pool_create(struct efct *efct, u32 num_sgl) +{ + u32 i = 0; + struct efct_io_pool *io_pool; + struct efct_io *io; + + /* Allocate the IO pool */ + io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL); + if (!io_pool) + return NULL; + + io_pool->efct = efct; + INIT_LIST_HEAD(&io_pool->freelist); + /* initialize IO pool lock */ + spin_lock_init(&io_pool->lock); + + for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) { + io = kzalloc(sizeof(*io), GFP_KERNEL); + if (!io) + break; + + io_pool->io_num_ios++; + io_pool->ios[i] = io; + io->tag = i; + io->instance_index = i; + + /* Allocate a response buffer */ + io->rspbuf.size = SCSI_RSP_BUF_LENGTH; + io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev, + io->rspbuf.size, + &io->rspbuf.phys, GFP_KERNEL); + if (!io->rspbuf.virt) { + efc_log_err(efct, "dma_alloc rspbuf failed\n"); + efct_io_pool_free(io_pool); + return NULL; + } + + /* Allocate SGL */ + io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL); + if (!io->sgl) { + efct_io_pool_free(io_pool); + return NULL; + } + + io->sgl_allocated = num_sgl; + io->sgl_count = 0; + + INIT_LIST_HEAD(&io->list_entry); + list_add_tail(&io->list_entry, &io_pool->freelist); + } + + return io_pool; +} + +int +efct_io_pool_free(struct efct_io_pool *io_pool) +{ + struct efct *efct; + u32 i; + struct efct_io *io; + + if (io_pool) { + efct = io_pool->efct; + + for (i = 0; i < io_pool->io_num_ios; i++) { + io = io_pool->ios[i]; + if (!io) + continue; + + kfree(io->sgl); + dma_free_coherent(&efct->pci->dev, + io->rspbuf.size, io->rspbuf.virt, + io->rspbuf.phys); + memset(&io->rspbuf, 0, sizeof(struct efc_dma)); + } + + kfree(io_pool); + efct->xport->io_pool = NULL; + } + + return 0; +} + +struct efct_io * +efct_io_pool_io_alloc(struct efct_io_pool *io_pool) +{ + struct efct_io *io = NULL; + struct efct *efct; + unsigned long flags = 0; + + efct = io_pool->efct; + + spin_lock_irqsave(&io_pool->lock, flags); + + if (!list_empty(&io_pool->freelist)) { + io = list_first_entry(&io_pool->freelist, struct efct_io, + list_entry); + list_del_init(&io->list_entry); + } + + spin_unlock_irqrestore(&io_pool->lock, flags); + + if (!io) + return NULL; + + io->io_type = EFCT_IO_TYPE_MAX; + io->hio_type = EFCT_HW_IO_MAX; + io->hio = NULL; + io->transferred = 0; + io->efct = efct; + io->timeout = 0; + io->sgl_count = 0; + io->tgt_task_tag = 0; + io->init_task_tag = 0; + io->hw_tag = 0; + io->display_name = "pending"; + io->seq_init = 0; + io->io_free = 0; + io->release = NULL; + atomic_add_return(1, &efct->xport->io_active_count); + atomic_add_return(1, &efct->xport->io_total_alloc); + return io; +} + +/* Free an object used to track an IO */ +void +efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io) +{ + struct efct *efct; + struct efct_hw_io *hio = NULL; + unsigned long flags = 0; + + efct = io_pool->efct; + + spin_lock_irqsave(&io_pool->lock, flags); + hio = io->hio; + io->hio = NULL; + io->io_free = 1; + INIT_LIST_HEAD(&io->list_entry); + list_add(&io->list_entry, &io_pool->freelist); + spin_unlock_irqrestore(&io_pool->lock, flags); + + if (hio) + efct_hw_io_free(&efct->hw, hio); + + atomic_sub_return(1, &efct->xport->io_active_count); + atomic_add_return(1, &efct->xport->io_total_free); +} + +/* Find an I/O given it's node and ox_id */ +struct efct_io * +efct_io_find_tgt_io(struct efct *efct, struct efct_node *node, + u16 ox_id, u16 rx_id) +{ + struct efct_io *io = NULL; + unsigned long flags = 0; + u8 found = false; + + spin_lock_irqsave(&node->active_ios_lock, flags); + list_for_each_entry(io, &node->active_ios, list_entry) { + if ((io->cmd_tgt && io->init_task_tag == ox_id) && + (rx_id == 0xffff || io->tgt_task_tag == rx_id)) { + if (kref_get_unless_zero(&io->ref)) + found = true; + break; + } + } + spin_unlock_irqrestore(&node->active_ios_lock, flags); + return found ? io : NULL; +} diff --git a/drivers/scsi/elx/efct/efct_io.h b/drivers/scsi/elx/efct/efct_io.h new file mode 100644 index 000000000..bb0f51811 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_io.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#if !defined(__EFCT_IO_H__) +#define __EFCT_IO_H__ + +#include "efct_lio.h" + +#define EFCT_LOG_ENABLE_IO_ERRORS(efct) \ + (((efct) != NULL) ? (((efct)->logmask & (1U << 6)) != 0) : 0) + +#define io_error_log(io, fmt, ...) \ + do { \ + if (EFCT_LOG_ENABLE_IO_ERRORS(io->efct)) \ + efc_log_warn(io->efct, fmt, ##__VA_ARGS__); \ + } while (0) + +#define SCSI_CMD_BUF_LENGTH 48 +#define SCSI_RSP_BUF_LENGTH (FCP_RESP_WITH_EXT + SCSI_SENSE_BUFFERSIZE) +#define EFCT_NUM_SCSI_IOS 8192 + +enum efct_io_type { + EFCT_IO_TYPE_IO = 0, + EFCT_IO_TYPE_ELS, + EFCT_IO_TYPE_CT, + EFCT_IO_TYPE_CT_RESP, + EFCT_IO_TYPE_BLS_RESP, + EFCT_IO_TYPE_ABORT, + + EFCT_IO_TYPE_MAX, +}; + +enum efct_els_state { + EFCT_ELS_REQUEST = 0, + EFCT_ELS_REQUEST_DELAYED, + EFCT_ELS_REQUEST_DELAY_ABORT, + EFCT_ELS_REQ_ABORT, + EFCT_ELS_REQ_ABORTED, + EFCT_ELS_ABORT_IO_COMPL, +}; + +/** + * Scsi target IO object + * @efct: pointer back to efct + * @instance_index: unique instance index value + * @io: IO display name + * @node: pointer to node + * @list_entry: io list entry + * @io_pending_link: io pending list entry + * @ref: reference counter + * @release: release callback function + * @init_task_tag: initiator task tag (OX_ID) for back-end and SCSI logging + * @tgt_task_tag: target task tag (RX_ID) for back-end and SCSI logging + * @hw_tag: HW layer unique IO id + * @tag: unique IO identifier + * @sgl: SGL + * @sgl_allocated: Number of allocated SGEs + * @sgl_count: Number of SGEs in this SGL + * @tgt_io: backend target private IO data + * @exp_xfer_len: expected data transfer length, based on FC header + * @hw_priv: Declarations private to HW/SLI + * @io_type: indicates what this struct efct_io structure is used for + * @hio: hw io object + * @transferred: Number of bytes transferred + * @auto_resp: set if auto_trsp was set + * @low_latency: set if low latency request + * @wq_steering: selected WQ steering request + * @wq_class: selected WQ class if steering is class + * @xfer_req: transfer size for current request + * @scsi_tgt_cb: target callback function + * @scsi_tgt_cb_arg: target callback function argument + * @abort_cb: abort callback function + * @abort_cb_arg: abort callback function argument + * @bls_cb: BLS callback function + * @bls_cb_arg: BLS callback function argument + * @tmf_cmd: TMF command being processed + * @abort_rx_id: rx_id from the ABTS that initiated the command abort + * @cmd_tgt: True if this is a Target command + * @send_abts: when aborting, indicates ABTS is to be sent + * @cmd_ini: True if this is an Initiator command + * @seq_init: True if local node has sequence initiative + * @iparam: iparams for hw io send call + * @hio_type: HW IO type + * @wire_len: wire length + * @hw_cb: saved HW callback + * @io_to_abort: for abort handling, pointer to IO to abort + * @rspbuf: SCSI Response buffer + * @timeout: Timeout value in seconds for this IO + * @cs_ctl: CS_CTL priority for this IO + * @io_free: Is io object in freelist + * @app_id: application id + */ +struct efct_io { + struct efct *efct; + u32 instance_index; + const char *display_name; + struct efct_node *node; + + struct list_head list_entry; + struct list_head io_pending_link; + struct kref ref; + void (*release)(struct kref *arg); + u32 init_task_tag; + u32 tgt_task_tag; + u32 hw_tag; + u32 tag; + struct efct_scsi_sgl *sgl; + u32 sgl_allocated; + u32 sgl_count; + struct efct_scsi_tgt_io tgt_io; + u32 exp_xfer_len; + + void *hw_priv; + + enum efct_io_type io_type; + struct efct_hw_io *hio; + size_t transferred; + + bool auto_resp; + bool low_latency; + u8 wq_steering; + u8 wq_class; + u64 xfer_req; + efct_scsi_io_cb_t scsi_tgt_cb; + void *scsi_tgt_cb_arg; + efct_scsi_io_cb_t abort_cb; + void *abort_cb_arg; + efct_scsi_io_cb_t bls_cb; + void *bls_cb_arg; + enum efct_scsi_tmf_cmd tmf_cmd; + u16 abort_rx_id; + + bool cmd_tgt; + bool send_abts; + bool cmd_ini; + bool seq_init; + union efct_hw_io_param_u iparam; + enum efct_hw_io_type hio_type; + u64 wire_len; + void *hw_cb; + + struct efct_io *io_to_abort; + + struct efc_dma rspbuf; + u32 timeout; + u8 cs_ctl; + u8 io_free; + u32 app_id; +}; + +struct efct_io_cb_arg { + int status; + int ext_status; + void *app; +}; + +struct efct_io_pool * +efct_io_pool_create(struct efct *efct, u32 num_sgl); +int +efct_io_pool_free(struct efct_io_pool *io_pool); +u32 +efct_io_pool_allocated(struct efct_io_pool *io_pool); + +struct efct_io * +efct_io_pool_io_alloc(struct efct_io_pool *io_pool); +void +efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io); +struct efct_io * +efct_io_find_tgt_io(struct efct *efct, struct efct_node *node, + u16 ox_id, u16 rx_id); +#endif /* __EFCT_IO_H__ */ diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c new file mode 100644 index 000000000..a982b9cf9 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_lio.c @@ -0,0 +1,1675 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include +#include +#include "efct_driver.h" +#include "efct_lio.h" + +/* + * lio_wq is used to call the LIO backed during creation or deletion of + * sessions. This brings serialization to the session management as we create + * single threaded work queue. + */ +static struct workqueue_struct *lio_wq; + +static int +efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn) +{ + u8 a[8]; + + put_unaligned_be64(wwn, a); + return snprintf(str, len, "%s%8phC", pre, a); +} + +static int +efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv) +{ + int num; + u8 b[8]; + + if (npiv) { + num = sscanf(name, + "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx", + &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6], + &b[7]); + } else { + num = sscanf(name, + "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6], + &b[7]); + } + + if (num != 8) + return -EINVAL; + + *wwp = get_unaligned_be64(b); + return 0; +} + +static int +efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn) +{ + unsigned int cnt = size; + int rc; + + *wwpn = *wwnn = 0; + if (name[cnt - 1] == '\n' || name[cnt - 1] == 0) + cnt--; + + /* validate we have enough characters for WWPN */ + if ((cnt != (16 + 1 + 16)) || (name[16] != ':')) + return -EINVAL; + + rc = efct_lio_parse_wwn(&name[0], wwpn, 1); + if (rc) + return rc; + + rc = efct_lio_parse_wwn(&name[17], wwnn, 1); + if (rc) + return rc; + + return 0; +} + +static ssize_t +efct_lio_tpg_enable_show(struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled); +} + +static ssize_t +efct_lio_tpg_enable_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + struct efct *efct; + struct efc *efc; + unsigned long op; + + if (!tpg->nport || !tpg->nport->efct) { + pr_err("%s: Unable to find EFCT device\n", __func__); + return -EINVAL; + } + + efct = tpg->nport->efct; + efc = efct->efcport; + + if (kstrtoul(page, 0, &op) < 0) + return -EINVAL; + + if (op == 1) { + int ret; + + tpg->enabled = true; + efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt); + + ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE); + if (ret) { + efct->tgt_efct.lio_nport = NULL; + efc_log_debug(efct, "cannot bring port online\n"); + return ret; + } + } else if (op == 0) { + efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt); + + if (efc->domain && efc->domain->nport) + efct_scsi_tgt_del_nport(efc, efc->domain->nport); + + tpg->enabled = false; + } else { + return -EINVAL; + } + + return count; +} + +static ssize_t +efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled); +} + +static ssize_t +efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + struct efct_lio_vport *lio_vport = tpg->vport; + struct efct *efct; + struct efc *efc; + unsigned long op; + + if (kstrtoul(page, 0, &op) < 0) + return -EINVAL; + + if (!lio_vport) { + pr_err("Unable to find vport\n"); + return -EINVAL; + } + + efct = lio_vport->efct; + efc = efct->efcport; + + if (op == 1) { + tpg->enabled = true; + efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt); + + if (efc->domain) { + int ret; + + ret = efc_nport_vport_new(efc->domain, + lio_vport->npiv_wwpn, + lio_vport->npiv_wwnn, + U32_MAX, false, true, + NULL, NULL); + if (ret != 0) { + efc_log_err(efct, "Failed to create Vport\n"); + return ret; + } + return count; + } + + if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn, + lio_vport->npiv_wwpn, U32_MAX, + false, true, NULL, NULL))) + return -ENOMEM; + + } else if (op == 0) { + efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt); + + tpg->enabled = false; + /* only physical nport should exist, free lio_nport + * allocated in efct_lio_make_nport + */ + if (efc->domain) { + efc_nport_vport_del(efct->efcport, efc->domain, + lio_vport->npiv_wwpn, + lio_vport->npiv_wwnn); + return count; + } + } else { + return -EINVAL; + } + return count; +} + +static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->nport->wwpn_str; +} + +static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->vport->wwpn_str; +} + +static u16 efct_lio_get_tag(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->tpgt; +} + +static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->tpgt; +} + +static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->tpg_attrib.demo_mode_write_protect; +} + +static int +efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->tpg_attrib.demo_mode_write_protect; +} + +static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->tpg_attrib.prod_mode_write_protect; +} + +static int +efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + return tpg->tpg_attrib.prod_mode_write_protect; +} + +static int efct_lio_check_stop_free(struct se_cmd *se_cmd) +{ + struct efct_scsi_tgt_io *ocp = + container_of(se_cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE); + return target_put_sess_cmd(se_cmd); +} + +static int +efct_lio_abort_tgt_cb(struct efct_io *io, + enum efct_scsi_io_status scsi_status, + u32 flags, void *arg) +{ + efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status); + return 0; +} + +static void +efct_lio_aborted_task(struct se_cmd *se_cmd) +{ + struct efct_scsi_tgt_io *ocp = + container_of(se_cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK); + + if (ocp->rsp_sent) + return; + + /* command has been aborted, cleanup here */ + ocp->aborting = true; + ocp->err = EFCT_SCSI_STATUS_ABORTED; + /* terminate the exchange */ + efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL); +} + +static void efct_lio_release_cmd(struct se_cmd *se_cmd) +{ + struct efct_scsi_tgt_io *ocp = + container_of(se_cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); + struct efct *efct = io->efct; + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD); + efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD); + efct_scsi_io_complete(io); + atomic_sub_return(1, &efct->tgt_efct.ios_in_use); +} + +static void efct_lio_close_session(struct se_session *se_sess) +{ + struct efc_node *node = se_sess->fabric_sess_ptr; + + pr_debug("se_sess=%p node=%p", se_sess, node); + + if (!node) { + pr_debug("node is NULL"); + return; + } + + efc_node_post_shutdown(node, NULL); +} + +static int efct_lio_get_cmd_state(struct se_cmd *cmd) +{ + struct efct_scsi_tgt_io *ocp = + container_of(cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); + + return io->tgt_io.state; +} + +static int +efct_lio_sg_map(struct efct_io *io) +{ + struct efct_scsi_tgt_io *ocp = &io->tgt_io; + struct se_cmd *cmd = &ocp->cmd; + + ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg, + cmd->t_data_nents, cmd->data_direction); + if (ocp->seg_map_cnt == 0) + return -EFAULT; + return 0; +} + +static void +efct_lio_sg_unmap(struct efct_io *io) +{ + struct efct_scsi_tgt_io *ocp = &io->tgt_io; + struct se_cmd *cmd = &ocp->cmd; + + if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg)) + return; + + dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg, + ocp->seg_map_cnt, cmd->data_direction); + ocp->seg_map_cnt = 0; +} + +static int +efct_lio_status_done(struct efct_io *io, + enum efct_scsi_io_status scsi_status, + u32 flags, void *arg) +{ + struct efct_scsi_tgt_io *ocp = &io->tgt_io; + + efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE); + if (scsi_status != EFCT_SCSI_STATUS_GOOD) { + efct_lio_io_printf(io, "callback completed with error=%d\n", + scsi_status); + ocp->err = scsi_status; + } + if (ocp->seg_map_cnt) + efct_lio_sg_unmap(io); + + efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n", + scsi_status, ocp->err, flags, ocp->ddir); + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); + transport_generic_free_cmd(&io->tgt_io.cmd, 0); + return 0; +} + +static int +efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status, + u32 flags, void *arg); + +static int +efct_lio_write_pending(struct se_cmd *cmd) +{ + struct efct_scsi_tgt_io *ocp = + container_of(cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); + struct efct_scsi_sgl *sgl = io->sgl; + struct scatterlist *sg; + u32 flags = 0, cnt, curcnt; + u64 length = 0; + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING); + efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n", + cmd->transport_state, cmd->se_cmd_flags); + + if (ocp->seg_cnt == 0) { + ocp->seg_cnt = cmd->t_data_nents; + ocp->cur_seg = 0; + if (efct_lio_sg_map(io)) { + efct_lio_io_printf(io, "efct_lio_sg_map failed\n"); + return -EFAULT; + } + } + curcnt = (ocp->seg_map_cnt - ocp->cur_seg); + curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated; + /* find current sg */ + for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++, + sg = sg_next(sg)) + ;/* do nothing */ + + for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) { + sgl[cnt].addr = sg_dma_address(sg); + sgl[cnt].dif_addr = 0; + sgl[cnt].len = sg_dma_len(sg); + length += sgl[cnt].len; + ocp->cur_seg++; + } + + if (ocp->cur_seg == ocp->seg_cnt) + flags = EFCT_SCSI_LAST_DATAPHASE; + + return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length, + efct_lio_datamove_done, NULL); +} + +static int +efct_lio_queue_data_in(struct se_cmd *cmd) +{ + struct efct_scsi_tgt_io *ocp = + container_of(cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); + struct efct_scsi_sgl *sgl = io->sgl; + struct scatterlist *sg = NULL; + uint flags = 0, cnt = 0, curcnt = 0; + u64 length = 0; + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN); + + if (ocp->seg_cnt == 0) { + if (cmd->data_length) { + ocp->seg_cnt = cmd->t_data_nents; + ocp->cur_seg = 0; + if (efct_lio_sg_map(io)) { + efct_lio_io_printf(io, + "efct_lio_sg_map failed\n"); + return -EAGAIN; + } + } else { + /* If command length is 0, send the response status */ + struct efct_scsi_cmd_resp rsp; + + memset(&rsp, 0, sizeof(rsp)); + efct_lio_io_printf(io, + "cmd : %p length 0, send status\n", + cmd); + return efct_scsi_send_resp(io, 0, &rsp, + efct_lio_status_done, NULL); + } + } + curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated); + + while (cnt < curcnt) { + sg = &cmd->t_data_sg[ocp->cur_seg]; + sgl[cnt].addr = sg_dma_address(sg); + sgl[cnt].dif_addr = 0; + if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length) + sgl[cnt].len = cmd->data_length - ocp->transferred_len; + else + sgl[cnt].len = sg_dma_len(sg); + + ocp->transferred_len += sgl[cnt].len; + length += sgl[cnt].len; + ocp->cur_seg++; + cnt++; + if (ocp->transferred_len == cmd->data_length) + break; + } + + if (ocp->transferred_len == cmd->data_length) { + flags = EFCT_SCSI_LAST_DATAPHASE; + ocp->seg_cnt = ocp->cur_seg; + } + + /* If there is residual, disable Auto Good Response */ + if (cmd->residual_count) + flags |= EFCT_SCSI_NO_AUTO_RESPONSE; + + efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA); + + return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length, + efct_lio_datamove_done, NULL); +} + +static void +efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status, + u32 flags) +{ + struct efct_scsi_cmd_resp rsp; + struct efct_scsi_tgt_io *ocp = &io->tgt_io; + struct se_cmd *cmd = &io->tgt_io.cmd; + int rc; + + if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) { + ocp->rsp_sent = true; + efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); + transport_generic_free_cmd(&io->tgt_io.cmd, 0); + return; + } + + /* send check condition if an error occurred */ + memset(&rsp, 0, sizeof(rsp)); + rsp.scsi_status = cmd->scsi_status; + rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer; + rsp.sense_data_length = cmd->scsi_sense_length; + + /* Check for residual underrun or overrun */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) + rsp.residual = -cmd->residual_count; + else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) + rsp.residual = cmd->residual_count; + + rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL); + efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP); + if (rc != 0) { + efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc); + efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); + transport_generic_free_cmd(&io->tgt_io.cmd, 0); + } else { + ocp->rsp_sent = true; + } +} + +static int +efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status, + u32 flags, void *arg) +{ + struct efct_scsi_tgt_io *ocp = &io->tgt_io; + + efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE); + if (scsi_status != EFCT_SCSI_STATUS_GOOD) { + efct_lio_io_printf(io, "callback completed with error=%d\n", + scsi_status); + ocp->err = scsi_status; + } + efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt); + if (ocp->seg_map_cnt) { + if (ocp->err == EFCT_SCSI_STATUS_GOOD && + ocp->cur_seg < ocp->seg_cnt) { + int rc; + + efct_lio_io_printf(io, "continuing cmd at segm=%d\n", + ocp->cur_seg); + if (ocp->ddir == DMA_TO_DEVICE) + rc = efct_lio_write_pending(&ocp->cmd); + else + rc = efct_lio_queue_data_in(&ocp->cmd); + if (!rc) + return 0; + + ocp->err = EFCT_SCSI_STATUS_ERROR; + efct_lio_io_printf(io, "could not continue command\n"); + } + efct_lio_sg_unmap(io); + } + + if (io->tgt_io.aborting) { + efct_lio_io_printf(io, "IO done aborted\n"); + return 0; + } + + if (ocp->ddir == DMA_TO_DEVICE) { + efct_lio_io_printf(io, "Write done, trans_state=0x%x\n", + io->tgt_io.cmd.transport_state); + if (scsi_status != EFCT_SCSI_STATUS_GOOD) { + transport_generic_request_failure(&io->tgt_io.cmd, + TCM_CHECK_CONDITION_ABORT_CMD); + efct_set_lio_io_state(io, + EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE); + } else { + efct_set_lio_io_state(io, + EFCT_LIO_STATE_TGT_EXECUTE_CMD); + target_execute_cmd(&io->tgt_io.cmd); + } + } else { + efct_lio_send_resp(io, scsi_status, flags); + } + return 0; +} + +static int +efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status, + u32 flags, void *arg) +{ + efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n", + &io->tgt_io.cmd, scsi_status, flags); + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); + transport_generic_free_cmd(&io->tgt_io.cmd, 0); + return 0; +} + +static int +efct_lio_null_tmf_done(struct efct_io *tmfio, + enum efct_scsi_io_status scsi_status, + u32 flags, void *arg) +{ + efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n", + &tmfio->tgt_io.cmd, scsi_status, flags); + + /* free struct efct_io only, no active se_cmd */ + efct_scsi_io_complete(tmfio); + return 0; +} + +static int +efct_lio_queue_status(struct se_cmd *cmd) +{ + struct efct_scsi_cmd_resp rsp; + struct efct_scsi_tgt_io *ocp = + container_of(cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); + int rc = 0; + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS); + efct_lio_io_printf(io, + "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n", + cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags, + cmd->scsi_sense_length); + + memset(&rsp, 0, sizeof(rsp)); + rsp.scsi_status = cmd->scsi_status; + rsp.sense_data = (u8 *)io->tgt_io.sense_buffer; + rsp.sense_data_length = cmd->scsi_sense_length; + + /* Check for residual underrun or overrun, mark negitive value for + * underrun to recognize in HW + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) + rsp.residual = -cmd->residual_count; + else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) + rsp.residual = cmd->residual_count; + + rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL); + efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP); + if (rc == 0) + ocp->rsp_sent = true; + return rc; +} + +static void efct_lio_queue_tm_rsp(struct se_cmd *cmd) +{ + struct efct_scsi_tgt_io *ocp = + container_of(cmd, struct efct_scsi_tgt_io, cmd); + struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io); + struct se_tmr_req *se_tmr = cmd->se_tmr_req; + u8 rspcode; + + efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n", + cmd, se_tmr->function, se_tmr->response); + switch (se_tmr->response) { + case TMR_FUNCTION_COMPLETE: + rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE; + break; + case TMR_TASK_DOES_NOT_EXIST: + rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND; + break; + case TMR_LUN_DOES_NOT_EXIST: + rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER; + break; + case TMR_FUNCTION_REJECTED: + default: + rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED; + break; + } + efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL); +} + +static struct efct *efct_find_wwpn(u64 wwpn) +{ + struct efct *efct; + + /* Search for the HBA that has this WWPN */ + list_for_each_entry(efct, &efct_devices, list_entry) { + + if (wwpn == efct_get_wwpn(&efct->hw)) + return efct; + } + + return NULL; +} + +static struct se_wwn * +efct_lio_make_nport(struct target_fabric_configfs *tf, + struct config_group *group, const char *name) +{ + struct efct_lio_nport *lio_nport; + struct efct *efct; + int ret; + u64 wwpn; + + ret = efct_lio_parse_wwn(name, &wwpn, 0); + if (ret) + return ERR_PTR(ret); + + efct = efct_find_wwpn(wwpn); + if (!efct) { + pr_err("cannot find EFCT for base wwpn %s\n", name); + return ERR_PTR(-ENXIO); + } + + lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL); + if (!lio_nport) + return ERR_PTR(-ENOMEM); + + lio_nport->efct = efct; + lio_nport->wwpn = wwpn; + efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str), + "naa.", wwpn); + efct->tgt_efct.lio_nport = lio_nport; + + return &lio_nport->nport_wwn; +} + +static struct se_wwn * +efct_lio_npiv_make_nport(struct target_fabric_configfs *tf, + struct config_group *group, const char *name) +{ + struct efct_lio_vport *lio_vport; + struct efct *efct; + int ret; + u64 p_wwpn, npiv_wwpn, npiv_wwnn; + char *p, *pbuf, tmp[128]; + struct efct_lio_vport_list_t *vport_list; + struct fc_vport *new_fc_vport; + struct fc_vport_identifiers vport_id; + unsigned long flags = 0; + + snprintf(tmp, sizeof(tmp), "%s", name); + pbuf = &tmp[0]; + + p = strsep(&pbuf, "@"); + + if (!p || !pbuf) { + pr_err("Unable to find separator operator(@)\n"); + return ERR_PTR(-EINVAL); + } + + ret = efct_lio_parse_wwn(p, &p_wwpn, 0); + if (ret) + return ERR_PTR(ret); + + ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn, + &npiv_wwnn); + if (ret) + return ERR_PTR(ret); + + efct = efct_find_wwpn(p_wwpn); + if (!efct) { + pr_err("cannot find EFCT for base wwpn %s\n", name); + return ERR_PTR(-ENXIO); + } + + lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL); + if (!lio_vport) + return ERR_PTR(-ENOMEM); + + lio_vport->efct = efct; + lio_vport->wwpn = p_wwpn; + lio_vport->npiv_wwpn = npiv_wwpn; + lio_vport->npiv_wwnn = npiv_wwnn; + + efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str), + "naa.", npiv_wwpn); + + vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL); + if (!vport_list) { + kfree(lio_vport); + return ERR_PTR(-ENOMEM); + } + + vport_list->lio_vport = lio_vport; + + memset(&vport_id, 0, sizeof(vport_id)); + vport_id.port_name = npiv_wwpn; + vport_id.node_name = npiv_wwnn; + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; + vport_id.vport_type = FC_PORTTYPE_NPIV; + vport_id.disable = false; + + new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id); + if (!new_fc_vport) { + efc_log_err(efct, "fc_vport_create failed\n"); + kfree(lio_vport); + kfree(vport_list); + return ERR_PTR(-ENOMEM); + } + + lio_vport->fc_vport = new_fc_vport; + spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); + INIT_LIST_HEAD(&vport_list->list_entry); + list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list); + spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags); + + return &lio_vport->vport_wwn; +} + +static void +efct_lio_drop_nport(struct se_wwn *wwn) +{ + struct efct_lio_nport *lio_nport = + container_of(wwn, struct efct_lio_nport, nport_wwn); + struct efct *efct = lio_nport->efct; + + /* only physical nport should exist, free lio_nport allocated + * in efct_lio_make_nport. + */ + kfree(efct->tgt_efct.lio_nport); + efct->tgt_efct.lio_nport = NULL; +} + +static void +efct_lio_npiv_drop_nport(struct se_wwn *wwn) +{ + struct efct_lio_vport *lio_vport = + container_of(wwn, struct efct_lio_vport, vport_wwn); + struct efct_lio_vport_list_t *vport, *next_vport; + struct efct *efct = lio_vport->efct; + unsigned long flags = 0; + + if (lio_vport->fc_vport) + fc_vport_terminate(lio_vport->fc_vport); + + spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); + + list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list, + list_entry) { + if (vport->lio_vport == lio_vport) { + list_del(&vport->list_entry); + kfree(vport->lio_vport); + kfree(vport); + break; + } + } + spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags); +} + +static struct se_portal_group * +efct_lio_make_tpg(struct se_wwn *wwn, const char *name) +{ + struct efct_lio_nport *lio_nport = + container_of(wwn, struct efct_lio_nport, nport_wwn); + struct efct_lio_tpg *tpg; + struct efct *efct; + unsigned long n; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); + if (!tpg) + return ERR_PTR(-ENOMEM); + + tpg->nport = lio_nport; + tpg->tpgt = n; + tpg->enabled = false; + + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; + tpg->tpg_attrib.session_deletion_wait = 1; + + ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP); + if (ret < 0) { + kfree(tpg); + return NULL; + } + efct = lio_nport->efct; + efct->tgt_efct.tpg = tpg; + efc_log_debug(efct, "create portal group %d\n", tpg->tpgt); + + xa_init(&efct->lookup); + return &tpg->tpg; +} + +static void +efct_lio_drop_tpg(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + struct efct *efct = tpg->nport->efct; + + efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt); + tpg->nport->efct->tgt_efct.tpg = NULL; + core_tpg_deregister(se_tpg); + xa_destroy(&efct->lookup); + kfree(tpg); +} + +static struct se_portal_group * +efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name) +{ + struct efct_lio_vport *lio_vport = + container_of(wwn, struct efct_lio_vport, vport_wwn); + struct efct_lio_tpg *tpg; + struct efct *efct; + unsigned long n; + int ret; + + efct = lio_vport->efct; + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX) + return ERR_PTR(-EINVAL); + + if (n != 1) { + efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n); + return ERR_PTR(-EINVAL); + } + + tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); + if (!tpg) + return ERR_PTR(-ENOMEM); + + tpg->vport = lio_vport; + tpg->tpgt = n; + tpg->enabled = false; + + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; + tpg->tpg_attrib.session_deletion_wait = 1; + + ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP); + + if (ret < 0) { + kfree(tpg); + return NULL; + } + lio_vport->tpg = tpg; + efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt); + + return &tpg->tpg; +} + +static void +efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg) +{ + struct efct_lio_tpg *tpg = + container_of(se_tpg, struct efct_lio_tpg, tpg); + + efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n", + tpg->tpgt); + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +static int +efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name) +{ + struct efct_lio_nacl *nacl; + u64 wwnn; + + if (efct_lio_parse_wwn(name, &wwnn, 0) < 0) + return -EINVAL; + + nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl); + nacl->nport_wwnn = wwnn; + + efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn); + return 0; +} + +static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg) +{ + struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg); + + return tpg->tpg_attrib.demo_mode_login_only; +} + +static int +efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg) +{ + struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg); + + return tpg->tpg_attrib.demo_mode_login_only; +} + +static struct efct_lio_tpg * +efct_get_vport_tpg(struct efc_node *node) +{ + struct efct *efct; + u64 wwpn = node->nport->wwpn; + struct efct_lio_vport_list_t *vport, *next; + struct efct_lio_vport *lio_vport = NULL; + struct efct_lio_tpg *tpg = NULL; + unsigned long flags = 0; + + efct = node->efc->base; + spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); + list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list, + list_entry) { + lio_vport = vport->lio_vport; + if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) { + efc_log_debug(efct, "found tpg on vport\n"); + tpg = lio_vport->tpg; + break; + } + } + spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags); + return tpg; +} + +static void +_efct_tgt_node_free(struct kref *arg) +{ + struct efct_node *tgt_node = container_of(arg, struct efct_node, ref); + struct efc_node *node = tgt_node->node; + + efc_scsi_del_initiator_complete(node->efc, node); + kfree(tgt_node); +} + +static int efct_session_cb(struct se_portal_group *se_tpg, + struct se_session *se_sess, void *private) +{ + struct efc_node *node = private; + struct efct_node *tgt_node; + struct efct *efct = node->efc->base; + + tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL); + if (!tgt_node) + return -ENOMEM; + + kref_init(&tgt_node->ref); + tgt_node->release = _efct_tgt_node_free; + + tgt_node->session = se_sess; + node->tgt_node = tgt_node; + tgt_node->efct = efct; + + tgt_node->node = node; + + tgt_node->node_fc_id = node->rnode.fc_id; + tgt_node->port_fc_id = node->nport->fc_id; + tgt_node->vpi = node->nport->indicator; + tgt_node->rpi = node->rnode.indicator; + + spin_lock_init(&tgt_node->active_ios_lock); + INIT_LIST_HEAD(&tgt_node->active_ios); + + return 0; +} + +int efct_scsi_tgt_new_device(struct efct *efct) +{ + u32 total_ios; + + /* Get the max settings */ + efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli); + efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli); + + /* initialize IO watermark fields */ + atomic_set(&efct->tgt_efct.ios_in_use, 0); + total_ios = efct->hw.config.n_io; + efc_log_debug(efct, "total_ios=%d\n", total_ios); + efct->tgt_efct.watermark_min = + (total_ios * EFCT_WATERMARK_LOW_PCT) / 100; + efct->tgt_efct.watermark_max = + (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100; + atomic_set(&efct->tgt_efct.io_high_watermark, + efct->tgt_efct.watermark_max); + atomic_set(&efct->tgt_efct.watermark_hit, 0); + atomic_set(&efct->tgt_efct.initiator_count, 0); + + lio_wq = create_singlethread_workqueue("efct_lio_worker"); + if (!lio_wq) { + efc_log_err(efct, "workqueue create failed\n"); + return -EIO; + } + + spin_lock_init(&efct->tgt_efct.efct_lio_lock); + INIT_LIST_HEAD(&efct->tgt_efct.vport_list); + + return 0; +} + +int efct_scsi_tgt_del_device(struct efct *efct) +{ + flush_workqueue(lio_wq); + + return 0; +} + +int +efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport) +{ + struct efct *efct = nport->efc->base; + + efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name, + efct->tgt_efct.lio_nport->wwpn_str); + + return 0; +} + +void +efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport) +{ + efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name); +} + +static void efct_lio_setup_session(struct work_struct *work) +{ + struct efct_lio_wq_data *wq_data = + container_of(work, struct efct_lio_wq_data, work); + struct efct *efct = wq_data->efct; + struct efc_node *node = wq_data->ptr; + char wwpn[WWN_NAME_LEN]; + struct efct_lio_tpg *tpg; + struct efct_node *tgt_node; + struct se_portal_group *se_tpg; + struct se_session *se_sess; + int watermark; + int ini_count; + u64 id; + + /* Check to see if it's belongs to vport, + * if not get physical port + */ + tpg = efct_get_vport_tpg(node); + if (tpg) { + se_tpg = &tpg->tpg; + } else if (efct->tgt_efct.tpg) { + tpg = efct->tgt_efct.tpg; + se_tpg = &tpg->tpg; + } else { + efc_log_err(efct, "failed to init session\n"); + return; + } + + /* + * Format the FCP Initiator port_name into colon + * separated values to match the format by our explicit + * ConfigFS NodeACLs. + */ + efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node)); + + se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn, + node, efct_session_cb); + if (IS_ERR(se_sess)) { + efc_log_err(efct, "failed to setup session\n"); + kfree(wq_data); + efc_scsi_sess_reg_complete(node, -EIO); + return; + } + + tgt_node = node->tgt_node; + id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id; + + efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n", + se_sess, node, id); + + if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL))) + efc_log_err(efct, "Node lookup store failed\n"); + + efc_scsi_sess_reg_complete(node, 0); + + /* update IO watermark: increment initiator count */ + ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count); + watermark = efct->tgt_efct.watermark_max - + ini_count * EFCT_IO_WATERMARK_PER_INITIATOR; + watermark = (efct->tgt_efct.watermark_min > watermark) ? + efct->tgt_efct.watermark_min : watermark; + atomic_set(&efct->tgt_efct.io_high_watermark, watermark); + + kfree(wq_data); +} + +int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node) +{ + struct efct *efct = node->efc->base; + struct efct_lio_wq_data *wq_data; + + /* + * Since LIO only supports initiator validation at thread level, + * we are open minded and accept all callers. + */ + wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); + if (!wq_data) + return -ENOMEM; + + wq_data->ptr = node; + wq_data->efct = efct; + INIT_WORK(&wq_data->work, efct_lio_setup_session); + queue_work(lio_wq, &wq_data->work); + return EFC_SCSI_CALL_ASYNC; +} + +static void efct_lio_remove_session(struct work_struct *work) +{ + struct efct_lio_wq_data *wq_data = + container_of(work, struct efct_lio_wq_data, work); + struct efct *efct = wq_data->efct; + struct efc_node *node = wq_data->ptr; + struct efct_node *tgt_node; + struct se_session *se_sess; + + tgt_node = node->tgt_node; + if (!tgt_node) { + /* base driver has sent back-to-back requests + * to unreg session with no intervening + * register + */ + efc_log_err(efct, "unreg session for NULL session\n"); + efc_scsi_del_initiator_complete(node->efc, node); + return; + } + + se_sess = tgt_node->session; + efc_log_debug(efct, "unreg session se_sess=%p node=%p\n", + se_sess, node); + + /* first flag all session commands to complete */ + target_stop_session(se_sess); + + /* now wait for session commands to complete */ + target_wait_for_sess_cmds(se_sess); + target_remove_session(se_sess); + tgt_node->session = NULL; + node->tgt_node = NULL; + kref_put(&tgt_node->ref, tgt_node->release); + + kfree(wq_data); +} + +int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason) +{ + struct efct *efct = node->efc->base; + struct efct_node *tgt_node = node->tgt_node; + struct efct_lio_wq_data *wq_data; + int watermark; + int ini_count; + u64 id; + + if (reason == EFCT_SCSI_INITIATOR_MISSING) + return EFC_SCSI_CALL_COMPLETE; + + if (!tgt_node) { + efc_log_err(efct, "tgt_node is NULL\n"); + return -EIO; + } + + wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); + if (!wq_data) + return -ENOMEM; + + id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id; + xa_erase(&efct->lookup, id); + + wq_data->ptr = node; + wq_data->efct = efct; + INIT_WORK(&wq_data->work, efct_lio_remove_session); + queue_work(lio_wq, &wq_data->work); + + /* + * update IO watermark: decrement initiator count + */ + ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count); + + watermark = efct->tgt_efct.watermark_max - + ini_count * EFCT_IO_WATERMARK_PER_INITIATOR; + watermark = (efct->tgt_efct.watermark_min > watermark) ? + efct->tgt_efct.watermark_min : watermark; + atomic_set(&efct->tgt_efct.io_high_watermark, watermark); + + return EFC_SCSI_CALL_ASYNC; +} + +void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, + u32 cdb_len, u32 flags) +{ + struct efct_scsi_tgt_io *ocp = &io->tgt_io; + struct se_cmd *se_cmd = &io->tgt_io.cmd; + struct efct *efct = io->efct; + char *ddir; + struct efct_node *tgt_node; + struct se_session *se_sess; + int rc = 0; + + memset(ocp, 0, sizeof(struct efct_scsi_tgt_io)); + efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD); + atomic_add_return(1, &efct->tgt_efct.ios_in_use); + + /* set target timeout */ + io->timeout = efct->target_io_timer_sec; + + if (flags & EFCT_SCSI_CMD_SIMPLE) + ocp->task_attr = TCM_SIMPLE_TAG; + else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE) + ocp->task_attr = TCM_HEAD_TAG; + else if (flags & EFCT_SCSI_CMD_ORDERED) + ocp->task_attr = TCM_ORDERED_TAG; + else if (flags & EFCT_SCSI_CMD_ACA) + ocp->task_attr = TCM_ACA_TAG; + + switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) { + case EFCT_SCSI_CMD_DIR_IN: + ddir = "FROM_INITIATOR"; + ocp->ddir = DMA_TO_DEVICE; + break; + case EFCT_SCSI_CMD_DIR_OUT: + ddir = "TO_INITIATOR"; + ocp->ddir = DMA_FROM_DEVICE; + break; + case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT: + ddir = "BIDIR"; + ocp->ddir = DMA_BIDIRECTIONAL; + break; + default: + ddir = "NONE"; + ocp->ddir = DMA_NONE; + break; + } + + ocp->lun = lun; + efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n", + cdb[0], ddir, io->exp_xfer_len); + + tgt_node = io->node; + se_sess = tgt_node->session; + if (!se_sess) { + efc_log_err(efct, "No session found to submit IO se_cmd: %p\n", + &ocp->cmd); + efct_scsi_io_free(io); + return; + } + + efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD); + rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0], + ocp->lun, io->exp_xfer_len, ocp->task_attr, + ocp->ddir, TARGET_SCF_ACK_KREF); + if (rc) { + efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd); + efct_scsi_io_free(io); + return; + } + + if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, + NULL, 0, GFP_ATOMIC)) + return; + + target_submit(se_cmd); +} + +int +efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd, + struct efct_io *io_to_abort, u32 flags) +{ + unsigned char tmr_func; + struct efct *efct = tmfio->efct; + struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io; + struct efct_node *tgt_node; + struct se_session *se_sess; + int rc; + + memset(ocp, 0, sizeof(struct efct_scsi_tgt_io)); + efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF); + atomic_add_return(1, &efct->tgt_efct.ios_in_use); + efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n", + tmfio->display_name, cmd, lun); + + switch (cmd) { + case EFCT_SCSI_TMF_ABORT_TASK: + tmr_func = TMR_ABORT_TASK; + break; + case EFCT_SCSI_TMF_ABORT_TASK_SET: + tmr_func = TMR_ABORT_TASK_SET; + break; + case EFCT_SCSI_TMF_CLEAR_TASK_SET: + tmr_func = TMR_CLEAR_TASK_SET; + break; + case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET: + tmr_func = TMR_LUN_RESET; + break; + case EFCT_SCSI_TMF_CLEAR_ACA: + tmr_func = TMR_CLEAR_ACA; + break; + case EFCT_SCSI_TMF_TARGET_RESET: + tmr_func = TMR_TARGET_WARM_RESET; + break; + case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT: + case EFCT_SCSI_TMF_QUERY_TASK_SET: + default: + goto tmf_fail; + } + + tmfio->tgt_io.tmf = tmr_func; + tmfio->tgt_io.lun = lun; + tmfio->tgt_io.io_to_abort = io_to_abort; + + tgt_node = tmfio->node; + + se_sess = tgt_node->session; + if (!se_sess) + return 0; + + rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func, + GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF); + + efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR); + if (rc) + goto tmf_fail; + + return 0; + +tmf_fail: + efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED, + NULL, efct_lio_null_tmf_done, NULL); + return 0; +} + +/* Start items for efct_lio_tpg_attrib_cit */ + +#define DEF_EFCT_TPG_ATTRIB(name) \ + \ +static ssize_t efct_lio_tpg_attrib_##name##_show( \ + struct config_item *item, char *page) \ +{ \ + struct se_portal_group *se_tpg = to_tpg(item); \ + struct efct_lio_tpg *tpg = container_of(se_tpg, \ + struct efct_lio_tpg, tpg); \ + \ + return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ +} \ + \ +static ssize_t efct_lio_tpg_attrib_##name##_store( \ + struct config_item *item, const char *page, size_t count) \ +{ \ + struct se_portal_group *se_tpg = to_tpg(item); \ + struct efct_lio_tpg *tpg = container_of(se_tpg, \ + struct efct_lio_tpg, tpg); \ + struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \ + unsigned long val; \ + int ret; \ + \ + ret = kstrtoul(page, 0, &val); \ + if (ret < 0) { \ + pr_err("kstrtoul() failed with ret: %d\n", ret); \ + return ret; \ + } \ + \ + if (val != 0 && val != 1) { \ + pr_err("Illegal boolean value %lu\n", val); \ + return -EINVAL; \ + } \ + \ + a->name = val; \ + \ + return count; \ +} \ +CONFIGFS_ATTR(efct_lio_tpg_attrib_, name) + +DEF_EFCT_TPG_ATTRIB(generate_node_acls); +DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls); +DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect); +DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect); +DEF_EFCT_TPG_ATTRIB(demo_mode_login_only); +DEF_EFCT_TPG_ATTRIB(session_deletion_wait); + +static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = { + &efct_lio_tpg_attrib_attr_generate_node_acls, + &efct_lio_tpg_attrib_attr_cache_dynamic_acls, + &efct_lio_tpg_attrib_attr_demo_mode_write_protect, + &efct_lio_tpg_attrib_attr_prod_mode_write_protect, + &efct_lio_tpg_attrib_attr_demo_mode_login_only, + &efct_lio_tpg_attrib_attr_session_deletion_wait, + NULL, +}; + +#define DEF_EFCT_NPIV_TPG_ATTRIB(name) \ + \ +static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \ + struct config_item *item, char *page) \ +{ \ + struct se_portal_group *se_tpg = to_tpg(item); \ + struct efct_lio_tpg *tpg = container_of(se_tpg, \ + struct efct_lio_tpg, tpg); \ + \ + return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ +} \ + \ +static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \ + struct config_item *item, const char *page, size_t count) \ +{ \ + struct se_portal_group *se_tpg = to_tpg(item); \ + struct efct_lio_tpg *tpg = container_of(se_tpg, \ + struct efct_lio_tpg, tpg); \ + struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \ + unsigned long val; \ + int ret; \ + \ + ret = kstrtoul(page, 0, &val); \ + if (ret < 0) { \ + pr_err("kstrtoul() failed with ret: %d\n", ret); \ + return ret; \ + } \ + \ + if (val != 0 && val != 1) { \ + pr_err("Illegal boolean value %lu\n", val); \ + return -EINVAL; \ + } \ + \ + a->name = val; \ + \ + return count; \ +} \ +CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name) + +DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls); +DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls); +DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect); +DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect); +DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only); +DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait); + +static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = { + &efct_lio_npiv_tpg_attrib_attr_generate_node_acls, + &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls, + &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect, + &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect, + &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only, + &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait, + NULL, +}; + +CONFIGFS_ATTR(efct_lio_tpg_, enable); +static struct configfs_attribute *efct_lio_tpg_attrs[] = { + &efct_lio_tpg_attr_enable, NULL }; +CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable); +static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = { + &efct_lio_npiv_tpg_attr_enable, NULL }; + +static const struct target_core_fabric_ops efct_lio_ops = { + .module = THIS_MODULE, + .fabric_name = "efct", + .node_acl_size = sizeof(struct efct_lio_nacl), + .max_data_sg_nents = 65535, + .tpg_get_wwn = efct_lio_get_fabric_wwn, + .tpg_get_tag = efct_lio_get_tag, + .fabric_init_nodeacl = efct_lio_init_nodeacl, + .tpg_check_demo_mode = efct_lio_check_demo_mode, + .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect, + .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect, + .check_stop_free = efct_lio_check_stop_free, + .aborted_task = efct_lio_aborted_task, + .release_cmd = efct_lio_release_cmd, + .close_session = efct_lio_close_session, + .write_pending = efct_lio_write_pending, + .get_cmd_state = efct_lio_get_cmd_state, + .queue_data_in = efct_lio_queue_data_in, + .queue_status = efct_lio_queue_status, + .queue_tm_rsp = efct_lio_queue_tm_rsp, + .fabric_make_wwn = efct_lio_make_nport, + .fabric_drop_wwn = efct_lio_drop_nport, + .fabric_make_tpg = efct_lio_make_tpg, + .fabric_drop_tpg = efct_lio_drop_tpg, + .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only, + .tpg_check_prot_fabric_only = NULL, + .sess_get_initiator_sid = NULL, + .tfc_tpg_base_attrs = efct_lio_tpg_attrs, + .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs, +}; + +static const struct target_core_fabric_ops efct_lio_npiv_ops = { + .module = THIS_MODULE, + .fabric_name = "efct_npiv", + .node_acl_size = sizeof(struct efct_lio_nacl), + .max_data_sg_nents = 65535, + .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn, + .tpg_get_tag = efct_lio_get_npiv_tag, + .fabric_init_nodeacl = efct_lio_init_nodeacl, + .tpg_check_demo_mode = efct_lio_check_demo_mode, + .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = + efct_lio_npiv_check_demo_write_protect, + .tpg_check_prod_mode_write_protect = + efct_lio_npiv_check_prod_write_protect, + .check_stop_free = efct_lio_check_stop_free, + .aborted_task = efct_lio_aborted_task, + .release_cmd = efct_lio_release_cmd, + .close_session = efct_lio_close_session, + .write_pending = efct_lio_write_pending, + .get_cmd_state = efct_lio_get_cmd_state, + .queue_data_in = efct_lio_queue_data_in, + .queue_status = efct_lio_queue_status, + .queue_tm_rsp = efct_lio_queue_tm_rsp, + .fabric_make_wwn = efct_lio_npiv_make_nport, + .fabric_drop_wwn = efct_lio_npiv_drop_nport, + .fabric_make_tpg = efct_lio_npiv_make_tpg, + .fabric_drop_tpg = efct_lio_npiv_drop_tpg, + .tpg_check_demo_mode_login_only = + efct_lio_npiv_check_demo_mode_login_only, + .tpg_check_prot_fabric_only = NULL, + .sess_get_initiator_sid = NULL, + .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs, + .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs, +}; + +int efct_scsi_tgt_driver_init(void) +{ + int rc; + + /* Register the top level struct config_item_type with TCM core */ + rc = target_register_template(&efct_lio_ops); + if (rc < 0) { + pr_err("target_fabric_configfs_register failed with %d\n", rc); + return rc; + } + rc = target_register_template(&efct_lio_npiv_ops); + if (rc < 0) { + pr_err("target_fabric_configfs_register failed with %d\n", rc); + target_unregister_template(&efct_lio_ops); + return rc; + } + return 0; +} + +int efct_scsi_tgt_driver_exit(void) +{ + target_unregister_template(&efct_lio_ops); + target_unregister_template(&efct_lio_npiv_ops); + return 0; +} diff --git a/drivers/scsi/elx/efct/efct_lio.h b/drivers/scsi/elx/efct/efct_lio.h new file mode 100644 index 000000000..569a0d4b1 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_lio.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#ifndef __EFCT_LIO_H__ +#define __EFCT_LIO_H__ + +#include "efct_scsi.h" +#include + +#define efct_lio_io_printf(io, fmt, ...) \ + efc_log_debug(io->efct, \ + "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt,\ + io->node->display_name, io->instance_index, \ + io->init_task_tag, io->tgt_task_tag, io->hw_tag,\ + ##__VA_ARGS__) + +#define efct_lio_tmfio_printf(io, fmt, ...) \ + efc_log_debug(io->efct, \ + "[%s] [%04x][i:%04x t:%04x h:%04x][f:%02x]" fmt,\ + io->node->display_name, io->instance_index, \ + io->init_task_tag, io->tgt_task_tag, io->hw_tag,\ + io->tgt_io.tmf, ##__VA_ARGS__) + +#define efct_set_lio_io_state(io, value) (io->tgt_io.state |= value) + +struct efct_lio_wq_data { + struct efct *efct; + void *ptr; + struct work_struct work; +}; + +/* Target private efct structure */ +struct efct_scsi_tgt { + u32 max_sge; + u32 max_sgl; + + /* + * Variables used to send task set full. We are using a high watermark + * method to send task set full. We will reserve a fixed number of IOs + * per initiator plus a fudge factor. Once we reach this number, + * then the target will start sending task set full/busy responses. + */ + atomic_t initiator_count; + atomic_t ios_in_use; + atomic_t io_high_watermark; + + atomic_t watermark_hit; + int watermark_min; + int watermark_max; + + struct efct_lio_nport *lio_nport; + struct efct_lio_tpg *tpg; + + struct list_head vport_list; + /* Protects vport list*/ + spinlock_t efct_lio_lock; + + u64 wwnn; +}; + +struct efct_scsi_tgt_nport { + struct efct_lio_nport *lio_nport; +}; + +struct efct_node { + struct list_head list_entry; + struct kref ref; + void (*release)(struct kref *arg); + struct efct *efct; + struct efc_node *node; + struct se_session *session; + spinlock_t active_ios_lock; + struct list_head active_ios; + char display_name[EFC_NAME_LENGTH]; + u32 port_fc_id; + u32 node_fc_id; + u32 vpi; + u32 rpi; + u32 abort_cnt; +}; + +#define EFCT_LIO_STATE_SCSI_RECV_CMD (1 << 0) +#define EFCT_LIO_STATE_TGT_SUBMIT_CMD (1 << 1) +#define EFCT_LIO_STATE_TFO_QUEUE_DATA_IN (1 << 2) +#define EFCT_LIO_STATE_TFO_WRITE_PENDING (1 << 3) +#define EFCT_LIO_STATE_TGT_EXECUTE_CMD (1 << 4) +#define EFCT_LIO_STATE_SCSI_SEND_RD_DATA (1 << 5) +#define EFCT_LIO_STATE_TFO_CHK_STOP_FREE (1 << 6) +#define EFCT_LIO_STATE_SCSI_DATA_DONE (1 << 7) +#define EFCT_LIO_STATE_TFO_QUEUE_STATUS (1 << 8) +#define EFCT_LIO_STATE_SCSI_SEND_RSP (1 << 9) +#define EFCT_LIO_STATE_SCSI_RSP_DONE (1 << 10) +#define EFCT_LIO_STATE_TGT_GENERIC_FREE (1 << 11) +#define EFCT_LIO_STATE_SCSI_RECV_TMF (1 << 12) +#define EFCT_LIO_STATE_TGT_SUBMIT_TMR (1 << 13) +#define EFCT_LIO_STATE_TFO_WRITE_PEND_STATUS (1 << 14) +#define EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE (1 << 15) + +#define EFCT_LIO_STATE_TFO_ABORTED_TASK (1 << 29) +#define EFCT_LIO_STATE_TFO_RELEASE_CMD (1 << 30) +#define EFCT_LIO_STATE_SCSI_CMPL_CMD (1u << 31) + +struct efct_scsi_tgt_io { + struct se_cmd cmd; + unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; + enum dma_data_direction ddir; + int task_attr; + u64 lun; + + u32 state; + u8 tmf; + struct efct_io *io_to_abort; + u32 seg_map_cnt; + u32 seg_cnt; + u32 cur_seg; + enum efct_scsi_io_status err; + bool aborting; + bool rsp_sent; + u32 transferred_len; +}; + +/* Handler return codes */ +enum { + SCSI_HANDLER_DATAPHASE_STARTED = 1, + SCSI_HANDLER_RESP_STARTED, + SCSI_HANDLER_VALIDATED_DATAPHASE_STARTED, + SCSI_CMD_NOT_SUPPORTED, +}; + +#define WWN_NAME_LEN 32 +struct efct_lio_vport { + u64 wwpn; + u64 npiv_wwpn; + u64 npiv_wwnn; + unsigned char wwpn_str[WWN_NAME_LEN]; + struct se_wwn vport_wwn; + struct efct_lio_tpg *tpg; + struct efct *efct; + struct Scsi_Host *shost; + struct fc_vport *fc_vport; + atomic_t enable; +}; + +struct efct_lio_nport { + u64 wwpn; + unsigned char wwpn_str[WWN_NAME_LEN]; + struct se_wwn nport_wwn; + struct efct_lio_tpg *tpg; + struct efct *efct; + atomic_t enable; +}; + +struct efct_lio_tpg_attrib { + u32 generate_node_acls; + u32 cache_dynamic_acls; + u32 demo_mode_write_protect; + u32 prod_mode_write_protect; + u32 demo_mode_login_only; + bool session_deletion_wait; +}; + +struct efct_lio_tpg { + struct se_portal_group tpg; + struct efct_lio_nport *nport; + struct efct_lio_vport *vport; + struct efct_lio_tpg_attrib tpg_attrib; + unsigned short tpgt; + bool enabled; +}; + +struct efct_lio_nacl { + u64 nport_wwnn; + char nport_name[WWN_NAME_LEN]; + struct se_session *session; + struct se_node_acl se_node_acl; +}; + +struct efct_lio_vport_list_t { + struct list_head list_entry; + struct efct_lio_vport *lio_vport; +}; + +int efct_scsi_tgt_driver_init(void); +int efct_scsi_tgt_driver_exit(void); + +#endif /*__EFCT_LIO_H__ */ diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c new file mode 100644 index 000000000..afb154992 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_scsi.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efct_driver.h" +#include "efct_hw.h" + +#define enable_tsend_auto_resp(efct) 1 +#define enable_treceive_auto_resp(efct) 0 + +#define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]" + +#define scsi_io_printf(io, fmt, ...) \ + efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \ + io->node->display_name, io->instance_index,\ + io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__) + +#define EFCT_LOG_ENABLE_SCSI_TRACE(efct) \ + (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0) + +#define scsi_io_trace(io, fmt, ...) \ + do { \ + if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \ + scsi_io_printf(io, fmt, ##__VA_ARGS__); \ + } while (0) + +struct efct_io * +efct_scsi_io_alloc(struct efct_node *node) +{ + struct efct *efct; + struct efct_xport *xport; + struct efct_io *io; + unsigned long flags; + + efct = node->efct; + + xport = efct->xport; + + io = efct_io_pool_io_alloc(efct->xport->io_pool); + if (!io) { + efc_log_err(efct, "IO alloc Failed\n"); + atomic_add_return(1, &xport->io_alloc_failed_count); + return NULL; + } + + /* initialize refcount */ + kref_init(&io->ref); + io->release = _efct_scsi_io_free; + + /* set generic fields */ + io->efct = efct; + io->node = node; + kref_get(&node->ref); + + /* set type and name */ + io->io_type = EFCT_IO_TYPE_IO; + io->display_name = "scsi_io"; + + io->cmd_ini = false; + io->cmd_tgt = true; + + /* Add to node's active_ios list */ + INIT_LIST_HEAD(&io->list_entry); + spin_lock_irqsave(&node->active_ios_lock, flags); + list_add(&io->list_entry, &node->active_ios); + + spin_unlock_irqrestore(&node->active_ios_lock, flags); + + return io; +} + +void +_efct_scsi_io_free(struct kref *arg) +{ + struct efct_io *io = container_of(arg, struct efct_io, ref); + struct efct *efct = io->efct; + struct efct_node *node = io->node; + unsigned long flags = 0; + + scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name); + + if (io->io_free) { + efc_log_err(efct, "IO already freed.\n"); + return; + } + + spin_lock_irqsave(&node->active_ios_lock, flags); + list_del_init(&io->list_entry); + spin_unlock_irqrestore(&node->active_ios_lock, flags); + + kref_put(&node->ref, node->release); + io->node = NULL; + efct_io_pool_io_free(efct->xport->io_pool, io); +} + +void +efct_scsi_io_free(struct efct_io *io) +{ + scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name); + WARN_ON(!refcount_read(&io->ref.refcount)); + kref_put(&io->ref, io->release); +} + +static void +efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status, + u32 ext_status, void *app) +{ + u32 flags = 0; + struct efct_io *io = app; + struct efct *efct; + enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD; + efct_scsi_io_cb_t cb; + + if (!io || !io->efct) { + pr_err("%s: IO can not be NULL\n", __func__); + return; + } + + scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status); + + efct = io->efct; + + io->transferred += length; + + if (!io->scsi_tgt_cb) { + efct_scsi_check_pending(efct); + return; + } + + /* Call target server completion */ + cb = io->scsi_tgt_cb; + + /* Clear the callback before invoking the callback */ + io->scsi_tgt_cb = NULL; + + /* if status was good, and auto-good-response was set, + * then callback target-server with IO_CMPL_RSP_SENT, + * otherwise send IO_CMPL + */ + if (status == 0 && io->auto_resp) + flags |= EFCT_SCSI_IO_CMPL_RSP_SENT; + else + flags |= EFCT_SCSI_IO_CMPL; + + switch (status) { + case SLI4_FC_WCQE_STATUS_SUCCESS: + scsi_stat = EFCT_SCSI_STATUS_GOOD; + break; + case SLI4_FC_WCQE_STATUS_DI_ERROR: + if (ext_status & SLI4_FC_DI_ERROR_GE) + scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR; + else if (ext_status & SLI4_FC_DI_ERROR_AE) + scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR; + else if (ext_status & SLI4_FC_DI_ERROR_RE) + scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR; + else + scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR; + break; + case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: + switch (ext_status) { + case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET: + case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: + scsi_stat = EFCT_SCSI_STATUS_ABORTED; + break; + case SLI4_FC_LOCAL_REJECT_INVALID_RPI: + scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST; + break; + case SLI4_FC_LOCAL_REJECT_NO_XRI: + scsi_stat = EFCT_SCSI_STATUS_NO_IO; + break; + default: + /*we have seen 0x0d(TX_DMA_FAILED err)*/ + scsi_stat = EFCT_SCSI_STATUS_ERROR; + break; + } + break; + + case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT: + /* target IO timed out */ + scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED; + break; + + case SLI4_FC_WCQE_STATUS_SHUTDOWN: + /* Target IO cancelled by HW */ + scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN; + break; + + default: + scsi_stat = EFCT_SCSI_STATUS_ERROR; + break; + } + + cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg); + + efct_scsi_check_pending(efct); +} + +static int +efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio, + struct efct_scsi_sgl *sgl, u32 sgl_count, + enum efct_hw_io_type type) +{ + int rc; + u32 i; + struct efct *efct = hw->os; + + /* Initialize HW SGL */ + rc = efct_hw_io_init_sges(hw, hio, type); + if (rc) { + efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc); + return -EIO; + } + + for (i = 0; i < sgl_count; i++) { + /* Add data SGE */ + rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len); + if (rc) { + efc_log_err(efct, "add sge failed cnt=%d rc=%d\n", + sgl_count, rc); + return rc; + } + } + + return 0; +} + +static void efc_log_sgl(struct efct_io *io) +{ + struct efct_hw_io *hio = io->hio; + struct sli4_sge *data = NULL; + u32 *dword = NULL; + u32 i; + u32 n_sge; + + scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n", + upper_32_bits(hio->def_sgl.phys), + lower_32_bits(hio->def_sgl.phys)); + n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count; + for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) { + dword = (u32 *)data; + + scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n", + i, dword[0], dword[1], dword[2], dword[3]); + + if (dword[2] & (1U << 31)) + break; + } +} + +static void +efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status, + u8 *mqe, void *arg) +{ + struct efct_io *io = arg; + + if (io) { + efct_hw_done_t cb = io->hw_cb; + + if (!io->hw_cb) + return; + + io->hw_cb = NULL; + (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io); + } +} + +static int +efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio) +{ + int rc = 0; + struct efct *efct = io->efct; + + /* Got a HW IO; + * update ini/tgt_task_tag with HW IO info and dispatch + */ + io->hio = hio; + if (io->cmd_tgt) + io->tgt_task_tag = hio->indicator; + else if (io->cmd_ini) + io->init_task_tag = hio->indicator; + io->hw_tag = hio->reqtag; + + hio->eq = io->hw_priv; + + /* Copy WQ steering */ + switch (io->wq_steering) { + case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT: + hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS; + break; + case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT: + hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST; + break; + case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT: + hio->wq_steering = EFCT_HW_WQ_STEERING_CPU; + break; + } + + switch (io->io_type) { + case EFCT_IO_TYPE_IO: + rc = efct_scsi_build_sgls(&efct->hw, io->hio, + io->sgl, io->sgl_count, io->hio_type); + if (rc) + break; + + if (EFCT_LOG_ENABLE_SCSI_TRACE(efct)) + efc_log_sgl(io); + + if (io->app_id) + io->iparam.fcp_tgt.app_id = io->app_id; + + io->iparam.fcp_tgt.vpi = io->node->vpi; + io->iparam.fcp_tgt.rpi = io->node->rpi; + io->iparam.fcp_tgt.s_id = io->node->port_fc_id; + io->iparam.fcp_tgt.d_id = io->node->node_fc_id; + io->iparam.fcp_tgt.xmit_len = io->wire_len; + + rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio, + &io->iparam, io->hw_cb, io); + break; + default: + scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type); + rc = -EIO; + break; + } + return rc; +} + +static int +efct_scsi_io_dispatch_no_hw_io(struct efct_io *io) +{ + int rc; + + switch (io->io_type) { + case EFCT_IO_TYPE_ABORT: { + struct efct_hw_io *hio_to_abort = NULL; + + hio_to_abort = io->io_to_abort->hio; + + if (!hio_to_abort) { + /* + * If "IO to abort" does not have an + * associated HW IO, immediately make callback with + * success. The command must have been sent to + * the backend, but the data phase has not yet + * started, so we don't have a HW IO. + * + * Note: since the backend shims should be + * taking a reference on io_to_abort, it should not + * be possible to have been completed and freed by + * the backend before the abort got here. + */ + scsi_io_printf(io, "IO: not active\n"); + ((efct_hw_done_t)io->hw_cb)(io->hio, 0, + SLI4_FC_WCQE_STATUS_SUCCESS, 0, io); + rc = 0; + break; + } + + /* HW IO is valid, abort it */ + scsi_io_printf(io, "aborting\n"); + rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort, + io->send_abts, io->hw_cb, io); + if (rc) { + int status = SLI4_FC_WCQE_STATUS_SUCCESS; + efct_hw_done_t cb = io->hw_cb; + + if (rc != -ENOENT && rc != -EINPROGRESS) { + status = -1; + scsi_io_printf(io, "Failed to abort IO rc=%d\n", + rc); + } + cb(io->hio, 0, status, 0, io); + rc = 0; + } + + break; + } + default: + scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type); + rc = -EIO; + break; + } + return rc; +} + +static struct efct_io * +efct_scsi_dispatch_pending(struct efct *efct) +{ + struct efct_xport *xport = efct->xport; + struct efct_io *io = NULL; + struct efct_hw_io *hio; + unsigned long flags = 0; + int status; + + spin_lock_irqsave(&xport->io_pending_lock, flags); + + if (!list_empty(&xport->io_pending_list)) { + io = list_first_entry(&xport->io_pending_list, struct efct_io, + io_pending_link); + list_del_init(&io->io_pending_link); + } + + if (!io) { + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + return NULL; + } + + if (io->io_type == EFCT_IO_TYPE_ABORT) { + hio = NULL; + } else { + hio = efct_hw_io_alloc(&efct->hw); + if (!hio) { + /* + * No HW IO available.Put IO back on + * the front of pending list + */ + list_add(&xport->io_pending_list, &io->io_pending_link); + io = NULL; + } else { + hio->eq = io->hw_priv; + } + } + + /* Must drop the lock before dispatching the IO */ + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + + if (!io) + return NULL; + + /* + * We pulled an IO off the pending list, + * and either got an HW IO or don't need one + */ + atomic_sub_return(1, &xport->io_pending_count); + if (!hio) + status = efct_scsi_io_dispatch_no_hw_io(io); + else + status = efct_scsi_io_dispatch_hw_io(io, hio); + if (status) { + /* + * Invoke the HW callback, but do so in the + * separate execution context,provided by the + * NOP mailbox completion processing context + * by using efct_hw_async_call() + */ + if (efct_hw_async_call(&efct->hw, + efct_scsi_check_pending_async_cb, io)) { + efc_log_debug(efct, "call hw async failed\n"); + } + } + + return io; +} + +void +efct_scsi_check_pending(struct efct *efct) +{ + struct efct_xport *xport = efct->xport; + struct efct_io *io = NULL; + int count = 0; + unsigned long flags = 0; + int dispatch = 0; + + /* Guard against recursion */ + if (atomic_add_return(1, &xport->io_pending_recursing)) { + /* This function is already running. Decrement and return. */ + atomic_sub_return(1, &xport->io_pending_recursing); + return; + } + + while (efct_scsi_dispatch_pending(efct)) + count++; + + if (count) { + atomic_sub_return(1, &xport->io_pending_recursing); + return; + } + + /* + * If nothing was removed from the list, + * we might be in a case where we need to abort an + * active IO and the abort is on the pending list. + * Look for an abort we can dispatch. + */ + + spin_lock_irqsave(&xport->io_pending_lock, flags); + + list_for_each_entry(io, &xport->io_pending_list, io_pending_link) { + if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) { + /* This IO has a HW IO, so it is + * active. Dispatch the abort. + */ + dispatch = 1; + list_del_init(&io->io_pending_link); + atomic_sub_return(1, &xport->io_pending_count); + break; + } + } + + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + + if (dispatch) { + if (efct_scsi_io_dispatch_no_hw_io(io)) { + if (efct_hw_async_call(&efct->hw, + efct_scsi_check_pending_async_cb, io)) { + efc_log_debug(efct, "hw async failed\n"); + } + } + } + + atomic_sub_return(1, &xport->io_pending_recursing); +} + +int +efct_scsi_io_dispatch(struct efct_io *io, void *cb) +{ + struct efct_hw_io *hio; + struct efct *efct = io->efct; + struct efct_xport *xport = efct->xport; + unsigned long flags = 0; + + io->hw_cb = cb; + + /* + * if this IO already has a HW IO, then this is either + * not the first phase of the IO. Send it to the HW. + */ + if (io->hio) + return efct_scsi_io_dispatch_hw_io(io, io->hio); + + /* + * We don't already have a HW IO associated with the IO. First check + * the pending list. If not empty, add IO to the tail and process the + * pending list. + */ + spin_lock_irqsave(&xport->io_pending_lock, flags); + if (!list_empty(&xport->io_pending_list)) { + /* + * If this is a low latency request, + * the put at the front of the IO pending + * queue, otherwise put it at the end of the queue. + */ + if (io->low_latency) { + INIT_LIST_HEAD(&io->io_pending_link); + list_add(&xport->io_pending_list, &io->io_pending_link); + } else { + INIT_LIST_HEAD(&io->io_pending_link); + list_add_tail(&io->io_pending_link, + &xport->io_pending_list); + } + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + atomic_add_return(1, &xport->io_pending_count); + atomic_add_return(1, &xport->io_total_pending); + + /* process pending list */ + efct_scsi_check_pending(efct); + return 0; + } + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + + /* + * We don't have a HW IO associated with the IO and there's nothing + * on the pending list. Attempt to allocate a HW IO and dispatch it. + */ + hio = efct_hw_io_alloc(&io->efct->hw); + if (!hio) { + /* Couldn't get a HW IO. Save this IO on the pending list */ + spin_lock_irqsave(&xport->io_pending_lock, flags); + INIT_LIST_HEAD(&io->io_pending_link); + list_add_tail(&io->io_pending_link, &xport->io_pending_list); + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + + atomic_add_return(1, &xport->io_total_pending); + atomic_add_return(1, &xport->io_pending_count); + return 0; + } + + /* We successfully allocated a HW IO; dispatch to HW */ + return efct_scsi_io_dispatch_hw_io(io, hio); +} + +int +efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb) +{ + struct efct *efct = io->efct; + struct efct_xport *xport = efct->xport; + unsigned long flags = 0; + + io->hw_cb = cb; + + /* + * For aborts, we don't need a HW IO, but we still want + * to pass through the pending list to preserve ordering. + * Thus, if the pending list is not empty, add this abort + * to the pending list and process the pending list. + */ + spin_lock_irqsave(&xport->io_pending_lock, flags); + if (!list_empty(&xport->io_pending_list)) { + INIT_LIST_HEAD(&io->io_pending_link); + list_add_tail(&io->io_pending_link, &xport->io_pending_list); + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + atomic_add_return(1, &xport->io_pending_count); + atomic_add_return(1, &xport->io_total_pending); + + /* process pending list */ + efct_scsi_check_pending(efct); + return 0; + } + spin_unlock_irqrestore(&xport->io_pending_lock, flags); + + /* nothing on pending list, dispatch abort */ + return efct_scsi_io_dispatch_no_hw_io(io); +} + +static inline int +efct_scsi_xfer_data(struct efct_io *io, u32 flags, + struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len, + enum efct_hw_io_type type, int enable_ar, + efct_scsi_io_cb_t cb, void *arg) +{ + struct efct *efct; + size_t residual = 0; + + io->sgl_count = sgl_count; + + efct = io->efct; + + scsi_io_trace(io, "%s wire_len %llu\n", + (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv", + xwire_len); + + io->hio_type = type; + + io->scsi_tgt_cb = cb; + io->scsi_tgt_cb_arg = arg; + + residual = io->exp_xfer_len - io->transferred; + io->wire_len = (xwire_len < residual) ? xwire_len : residual; + residual = (xwire_len - io->wire_len); + + memset(&io->iparam, 0, sizeof(io->iparam)); + io->iparam.fcp_tgt.ox_id = io->init_task_tag; + io->iparam.fcp_tgt.offset = io->transferred; + io->iparam.fcp_tgt.cs_ctl = io->cs_ctl; + io->iparam.fcp_tgt.timeout = io->timeout; + + /* if this is the last data phase and there is no residual, enable + * auto-good-response + */ + if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 && + ((io->transferred + io->wire_len) == io->exp_xfer_len) && + (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) { + io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE; + io->auto_resp = true; + } else { + io->auto_resp = false; + } + + /* save this transfer length */ + io->xfer_req = io->wire_len; + + /* Adjust the transferred count to account for overrun + * when the residual is calculated in efct_scsi_send_resp + */ + io->transferred += residual; + + /* Adjust the SGL size if there is overrun */ + + if (residual) { + struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1]; + + while (residual) { + size_t len = sgl_ptr->len; + + if (len > residual) { + sgl_ptr->len = len - residual; + residual = 0; + } else { + sgl_ptr->len = 0; + residual -= len; + io->sgl_count--; + } + sgl_ptr--; + } + } + + /* Set latency and WQ steering */ + io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0; + io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >> + EFCT_SCSI_WQ_STEERING_SHIFT; + io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >> + EFCT_SCSI_WQ_CLASS_SHIFT; + + if (efct->xport) { + struct efct_xport *xport = efct->xport; + + if (type == EFCT_HW_IO_TARGET_READ) { + xport->fcp_stats.input_requests++; + xport->fcp_stats.input_bytes += xwire_len; + } else if (type == EFCT_HW_IO_TARGET_WRITE) { + xport->fcp_stats.output_requests++; + xport->fcp_stats.output_bytes += xwire_len; + } + } + return efct_scsi_io_dispatch(io, efct_target_io_cb); +} + +int +efct_scsi_send_rd_data(struct efct_io *io, u32 flags, + struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len, + efct_scsi_io_cb_t cb, void *arg) +{ + return efct_scsi_xfer_data(io, flags, sgl, sgl_count, + len, EFCT_HW_IO_TARGET_READ, + enable_tsend_auto_resp(io->efct), cb, arg); +} + +int +efct_scsi_recv_wr_data(struct efct_io *io, u32 flags, + struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len, + efct_scsi_io_cb_t cb, void *arg) +{ + return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len, + EFCT_HW_IO_TARGET_WRITE, + enable_treceive_auto_resp(io->efct), cb, arg); +} + +int +efct_scsi_send_resp(struct efct_io *io, u32 flags, + struct efct_scsi_cmd_resp *rsp, + efct_scsi_io_cb_t cb, void *arg) +{ + struct efct *efct; + int residual; + /* Always try auto resp */ + bool auto_resp = true; + u8 scsi_status = 0; + u16 scsi_status_qualifier = 0; + u8 *sense_data = NULL; + u32 sense_data_length = 0; + + efct = io->efct; + + if (rsp) { + scsi_status = rsp->scsi_status; + scsi_status_qualifier = rsp->scsi_status_qualifier; + sense_data = rsp->sense_data; + sense_data_length = rsp->sense_data_length; + residual = rsp->residual; + } else { + residual = io->exp_xfer_len - io->transferred; + } + + io->wire_len = 0; + io->hio_type = EFCT_HW_IO_TARGET_RSP; + + io->scsi_tgt_cb = cb; + io->scsi_tgt_cb_arg = arg; + + memset(&io->iparam, 0, sizeof(io->iparam)); + io->iparam.fcp_tgt.ox_id = io->init_task_tag; + io->iparam.fcp_tgt.offset = 0; + io->iparam.fcp_tgt.cs_ctl = io->cs_ctl; + io->iparam.fcp_tgt.timeout = io->timeout; + + /* Set low latency queueing request */ + io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0; + io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >> + EFCT_SCSI_WQ_STEERING_SHIFT; + io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >> + EFCT_SCSI_WQ_CLASS_SHIFT; + + if (scsi_status != 0 || residual || sense_data_length) { + struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt; + u8 *sns_data; + + if (!fcprsp) { + efc_log_err(efct, "NULL response buffer\n"); + return -EIO; + } + + sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp); + + auto_resp = false; + + memset(fcprsp, 0, sizeof(*fcprsp)); + + io->wire_len += sizeof(*fcprsp); + + fcprsp->resp.fr_status = scsi_status; + fcprsp->resp.fr_retry_delay = + cpu_to_be16(scsi_status_qualifier); + + /* set residual status if necessary */ + if (residual != 0) { + /* FCP: if data transferred is less than the + * amount expected, then this is an underflow. + * If data transferred would have been greater + * than the amount expected this is an overflow + */ + if (residual > 0) { + fcprsp->resp.fr_flags |= FCP_RESID_UNDER; + fcprsp->ext.fr_resid = cpu_to_be32(residual); + } else { + fcprsp->resp.fr_flags |= FCP_RESID_OVER; + fcprsp->ext.fr_resid = cpu_to_be32(-residual); + } + } + + if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) { + if (sense_data_length > SCSI_SENSE_BUFFERSIZE) { + efc_log_err(efct, "Sense exceeds max size.\n"); + return -EIO; + } + + fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL; + memcpy(sns_data, sense_data, sense_data_length); + fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length); + io->wire_len += sense_data_length; + } + + io->sgl[0].addr = io->rspbuf.phys; + io->sgl[0].dif_addr = 0; + io->sgl[0].len = io->wire_len; + io->sgl_count = 1; + } + + if (auto_resp) + io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE; + + return efct_scsi_io_dispatch(io, efct_target_io_cb); +} + +static int +efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status, + u32 ext_status, void *app) +{ + struct efct_io *io = app; + struct efct *efct; + enum efct_scsi_io_status bls_status; + + efct = io->efct; + + /* BLS isn't really a "SCSI" concept, but use SCSI status */ + if (status) { + io_error_log(io, "s=%#x x=%#x\n", status, ext_status); + bls_status = EFCT_SCSI_STATUS_ERROR; + } else { + bls_status = EFCT_SCSI_STATUS_GOOD; + } + + if (io->bls_cb) { + efct_scsi_io_cb_t bls_cb = io->bls_cb; + void *bls_cb_arg = io->bls_cb_arg; + + io->bls_cb = NULL; + io->bls_cb_arg = NULL; + + /* invoke callback */ + bls_cb(io, bls_status, 0, bls_cb_arg); + } + + efct_scsi_check_pending(efct); + return 0; +} + +static int +efct_target_send_bls_resp(struct efct_io *io, + efct_scsi_io_cb_t cb, void *arg) +{ + struct efct_node *node = io->node; + struct sli_bls_params *bls = &io->iparam.bls; + struct efct *efct = node->efct; + struct fc_ba_acc *acc; + int rc; + + /* fill out IO structure with everything needed to send BA_ACC */ + memset(&io->iparam, 0, sizeof(io->iparam)); + bls->ox_id = io->init_task_tag; + bls->rx_id = io->abort_rx_id; + bls->vpi = io->node->vpi; + bls->rpi = io->node->rpi; + bls->s_id = U32_MAX; + bls->d_id = io->node->node_fc_id; + bls->rpi_registered = true; + + acc = (void *)bls->payload; + acc->ba_ox_id = cpu_to_be16(bls->ox_id); + acc->ba_rx_id = cpu_to_be16(bls->rx_id); + acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX); + + /* generic io fields have already been populated */ + + /* set type and BLS-specific fields */ + io->io_type = EFCT_IO_TYPE_BLS_RESP; + io->display_name = "bls_rsp"; + io->hio_type = EFCT_HW_BLS_ACC; + io->bls_cb = cb; + io->bls_cb_arg = arg; + + /* dispatch IO */ + rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls, + efct_target_bls_resp_cb, io); + return rc; +} + +static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status, + u32 ext_status, void *app) +{ + struct efct_io *io = app; + + efct_scsi_io_free(io); + return 0; +} + +struct efct_io * +efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr) +{ + struct efct_node *node = io->node; + struct sli_bls_params *bls = &io->iparam.bls; + struct efct *efct = node->efct; + struct fc_ba_rjt *acc; + int rc; + + /* fill out BLS Response-specific fields */ + io->io_type = EFCT_IO_TYPE_BLS_RESP; + io->display_name = "ba_rjt"; + io->hio_type = EFCT_HW_BLS_RJT; + io->init_task_tag = be16_to_cpu(hdr->fh_ox_id); + + /* fill out iparam fields */ + memset(&io->iparam, 0, sizeof(io->iparam)); + bls->ox_id = be16_to_cpu(hdr->fh_ox_id); + bls->rx_id = be16_to_cpu(hdr->fh_rx_id); + bls->vpi = io->node->vpi; + bls->rpi = io->node->rpi; + bls->s_id = U32_MAX; + bls->d_id = io->node->node_fc_id; + bls->rpi_registered = true; + + acc = (void *)bls->payload; + acc->br_reason = ELS_RJT_UNAB; + acc->br_explan = ELS_EXPL_NONE; + + rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb, + io); + if (rc) { + efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc); + efct_scsi_io_free(io); + io = NULL; + } + return io; +} + +int +efct_scsi_send_tmf_resp(struct efct_io *io, + enum efct_scsi_tmf_resp rspcode, + u8 addl_rsp_info[3], + efct_scsi_io_cb_t cb, void *arg) +{ + int rc; + struct { + struct fcp_resp_with_ext rsp_ext; + struct fcp_resp_rsp_info info; + } *fcprsp; + u8 fcp_rspcode; + + io->wire_len = 0; + + switch (rspcode) { + case EFCT_SCSI_TMF_FUNCTION_COMPLETE: + fcp_rspcode = FCP_TMF_CMPL; + break; + case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED: + case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND: + fcp_rspcode = FCP_TMF_CMPL; + break; + case EFCT_SCSI_TMF_FUNCTION_REJECTED: + fcp_rspcode = FCP_TMF_REJECTED; + break; + case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER: + fcp_rspcode = FCP_TMF_INVALID_LUN; + break; + case EFCT_SCSI_TMF_SERVICE_DELIVERY: + fcp_rspcode = FCP_TMF_FAILED; + break; + default: + fcp_rspcode = FCP_TMF_REJECTED; + break; + } + + io->hio_type = EFCT_HW_IO_TARGET_RSP; + + io->scsi_tgt_cb = cb; + io->scsi_tgt_cb_arg = arg; + + if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) { + rc = efct_target_send_bls_resp(io, cb, arg); + return rc; + } + + /* populate the FCP TMF response */ + fcprsp = io->rspbuf.virt; + memset(fcprsp, 0, sizeof(*fcprsp)); + + fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL; + + if (addl_rsp_info) { + memcpy(fcprsp->info._fr_resvd, addl_rsp_info, + sizeof(fcprsp->info._fr_resvd)); + } + fcprsp->info.rsp_code = fcp_rspcode; + + io->wire_len = sizeof(*fcprsp); + + fcprsp->rsp_ext.ext.fr_rsp_len = + cpu_to_be32(sizeof(struct fcp_resp_rsp_info)); + + io->sgl[0].addr = io->rspbuf.phys; + io->sgl[0].dif_addr = 0; + io->sgl[0].len = io->wire_len; + io->sgl_count = 1; + + memset(&io->iparam, 0, sizeof(io->iparam)); + io->iparam.fcp_tgt.ox_id = io->init_task_tag; + io->iparam.fcp_tgt.offset = 0; + io->iparam.fcp_tgt.cs_ctl = io->cs_ctl; + io->iparam.fcp_tgt.timeout = io->timeout; + + rc = efct_scsi_io_dispatch(io, efct_target_io_cb); + + return rc; +} + +static int +efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status, + u32 ext_status, void *app) +{ + struct efct_io *io = app; + struct efct *efct; + enum efct_scsi_io_status scsi_status; + efct_scsi_io_cb_t abort_cb; + void *abort_cb_arg; + + efct = io->efct; + + if (!io->abort_cb) + goto done; + + abort_cb = io->abort_cb; + abort_cb_arg = io->abort_cb_arg; + + io->abort_cb = NULL; + io->abort_cb_arg = NULL; + + switch (status) { + case SLI4_FC_WCQE_STATUS_SUCCESS: + scsi_status = EFCT_SCSI_STATUS_GOOD; + break; + case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: + switch (ext_status) { + case SLI4_FC_LOCAL_REJECT_NO_XRI: + scsi_status = EFCT_SCSI_STATUS_NO_IO; + break; + case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS: + scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS; + break; + default: + /*we have seen 0x15 (abort in progress)*/ + scsi_status = EFCT_SCSI_STATUS_ERROR; + break; + } + break; + case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: + scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE; + break; + default: + scsi_status = EFCT_SCSI_STATUS_ERROR; + break; + } + /* invoke callback */ + abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg); + +done: + /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */ + kref_put(&io->io_to_abort->ref, io->io_to_abort->release); + + efct_io_pool_io_free(efct->xport->io_pool, io); + + efct_scsi_check_pending(efct); + return 0; +} + +int +efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg) +{ + struct efct *efct; + struct efct_xport *xport; + int rc; + struct efct_io *abort_io = NULL; + + efct = io->efct; + xport = efct->xport; + + /* take a reference on IO being aborted */ + if (kref_get_unless_zero(&io->ref) == 0) { + /* command no longer active */ + scsi_io_printf(io, "command no longer active\n"); + return -EIO; + } + + /* + * allocate a new IO to send the abort request. Use efct_io_alloc() + * directly, as we need an IO object that will not fail allocation + * due to allocations being disabled (in efct_scsi_io_alloc()) + */ + abort_io = efct_io_pool_io_alloc(efct->xport->io_pool); + if (!abort_io) { + atomic_add_return(1, &xport->io_alloc_failed_count); + kref_put(&io->ref, io->release); + return -EIO; + } + + /* Save the target server callback and argument */ + /* set generic fields */ + abort_io->cmd_tgt = true; + abort_io->node = io->node; + + /* set type and abort-specific fields */ + abort_io->io_type = EFCT_IO_TYPE_ABORT; + abort_io->display_name = "tgt_abort"; + abort_io->io_to_abort = io; + abort_io->send_abts = false; + abort_io->abort_cb = cb; + abort_io->abort_cb_arg = arg; + + /* now dispatch IO */ + rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb); + if (rc) + kref_put(&io->ref, io->release); + return rc; +} + +void +efct_scsi_io_complete(struct efct_io *io) +{ + if (io->io_free) { + efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n", + io->tag); + return; + } + + scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name); + kref_put(&io->ref, io->release); +} diff --git a/drivers/scsi/elx/efct/efct_scsi.h b/drivers/scsi/elx/efct/efct_scsi.h new file mode 100644 index 000000000..b04faffa3 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_scsi.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#if !defined(__EFCT_SCSI_H__) +#define __EFCT_SCSI_H__ +#include +#include + +/* efct_scsi_rcv_cmd() efct_scsi_rcv_tmf() flags */ +#define EFCT_SCSI_CMD_DIR_IN (1 << 0) +#define EFCT_SCSI_CMD_DIR_OUT (1 << 1) +#define EFCT_SCSI_CMD_SIMPLE (1 << 2) +#define EFCT_SCSI_CMD_HEAD_OF_QUEUE (1 << 3) +#define EFCT_SCSI_CMD_ORDERED (1 << 4) +#define EFCT_SCSI_CMD_UNTAGGED (1 << 5) +#define EFCT_SCSI_CMD_ACA (1 << 6) +#define EFCT_SCSI_FIRST_BURST_ERR (1 << 7) +#define EFCT_SCSI_FIRST_BURST_ABORTED (1 << 8) + +/* efct_scsi_send_rd_data/recv_wr_data/send_resp flags */ +#define EFCT_SCSI_LAST_DATAPHASE (1 << 0) +#define EFCT_SCSI_NO_AUTO_RESPONSE (1 << 1) +#define EFCT_SCSI_LOW_LATENCY (1 << 2) + +#define EFCT_SCSI_SNS_BUF_VALID(sense) ((sense) && \ + (0x70 == (((const u8 *)(sense))[0] & 0x70))) + +#define EFCT_SCSI_WQ_STEERING_SHIFT 16 +#define EFCT_SCSI_WQ_STEERING_MASK (0xf << EFCT_SCSI_WQ_STEERING_SHIFT) +#define EFCT_SCSI_WQ_STEERING_CLASS (0 << EFCT_SCSI_WQ_STEERING_SHIFT) +#define EFCT_SCSI_WQ_STEERING_REQUEST (1 << EFCT_SCSI_WQ_STEERING_SHIFT) +#define EFCT_SCSI_WQ_STEERING_CPU (2 << EFCT_SCSI_WQ_STEERING_SHIFT) + +#define EFCT_SCSI_WQ_CLASS_SHIFT (20) +#define EFCT_SCSI_WQ_CLASS_MASK (0xf << EFCT_SCSI_WQ_CLASS_SHIFT) +#define EFCT_SCSI_WQ_CLASS(x) ((x & EFCT_SCSI_WQ_CLASS_MASK) << \ + EFCT_SCSI_WQ_CLASS_SHIFT) + +#define EFCT_SCSI_WQ_CLASS_LOW_LATENCY 1 + +struct efct_scsi_cmd_resp { + u8 scsi_status; + u16 scsi_status_qualifier; + u8 *response_data; + u32 response_data_length; + u8 *sense_data; + u32 sense_data_length; + int residual; + u32 response_wire_length; +}; + +struct efct_vport { + struct efct *efct; + bool is_vport; + struct fc_host_statistics fc_host_stats; + struct Scsi_Host *shost; + struct fc_vport *fc_vport; + u64 npiv_wwpn; + u64 npiv_wwnn; +}; + +/* Status values returned by IO callbacks */ +enum efct_scsi_io_status { + EFCT_SCSI_STATUS_GOOD = 0, + EFCT_SCSI_STATUS_ABORTED, + EFCT_SCSI_STATUS_ERROR, + EFCT_SCSI_STATUS_DIF_GUARD_ERR, + EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR, + EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR, + EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR, + EFCT_SCSI_STATUS_PROTOCOL_CRC_ERROR, + EFCT_SCSI_STATUS_NO_IO, + EFCT_SCSI_STATUS_ABORT_IN_PROGRESS, + EFCT_SCSI_STATUS_CHECK_RESPONSE, + EFCT_SCSI_STATUS_COMMAND_TIMEOUT, + EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED, + EFCT_SCSI_STATUS_SHUTDOWN, + EFCT_SCSI_STATUS_NEXUS_LOST, +}; + +struct efct_node; +struct efct_io; +struct efc_node; +struct efc_nport; + +/* Callback used by send_rd_data(), recv_wr_data(), send_resp() */ +typedef int (*efct_scsi_io_cb_t)(struct efct_io *io, + enum efct_scsi_io_status status, + u32 flags, void *arg); + +/* Callback used by send_rd_io(), send_wr_io() */ +typedef int (*efct_scsi_rsp_io_cb_t)(struct efct_io *io, + enum efct_scsi_io_status status, + struct efct_scsi_cmd_resp *rsp, + u32 flags, void *arg); + +/* efct_scsi_cb_t flags */ +#define EFCT_SCSI_IO_CMPL (1 << 0) +/* IO completed, response sent */ +#define EFCT_SCSI_IO_CMPL_RSP_SENT (1 << 1) +#define EFCT_SCSI_IO_ABORTED (1 << 2) + +/* efct_scsi_recv_tmf() request values */ +enum efct_scsi_tmf_cmd { + EFCT_SCSI_TMF_ABORT_TASK = 1, + EFCT_SCSI_TMF_QUERY_TASK_SET, + EFCT_SCSI_TMF_ABORT_TASK_SET, + EFCT_SCSI_TMF_CLEAR_TASK_SET, + EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT, + EFCT_SCSI_TMF_LOGICAL_UNIT_RESET, + EFCT_SCSI_TMF_CLEAR_ACA, + EFCT_SCSI_TMF_TARGET_RESET, +}; + +/* efct_scsi_send_tmf_resp() response values */ +enum efct_scsi_tmf_resp { + EFCT_SCSI_TMF_FUNCTION_COMPLETE = 1, + EFCT_SCSI_TMF_FUNCTION_SUCCEEDED, + EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND, + EFCT_SCSI_TMF_FUNCTION_REJECTED, + EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER, + EFCT_SCSI_TMF_SERVICE_DELIVERY, +}; + +struct efct_scsi_sgl { + uintptr_t addr; + uintptr_t dif_addr; + size_t len; +}; + +enum efct_scsi_io_role { + EFCT_SCSI_IO_ROLE_ORIGINATOR, + EFCT_SCSI_IO_ROLE_RESPONDER, +}; + +struct efct_io * +efct_scsi_io_alloc(struct efct_node *node); +void efct_scsi_io_free(struct efct_io *io); +struct efct_io *efct_io_get_instance(struct efct *efct, u32 index); + +int efct_scsi_tgt_driver_init(void); +int efct_scsi_tgt_driver_exit(void); +int efct_scsi_tgt_new_device(struct efct *efct); +int efct_scsi_tgt_del_device(struct efct *efct); +int +efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport); +void +efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport); + +int +efct_scsi_new_initiator(struct efc *efc, struct efc_node *node); + +enum efct_scsi_del_initiator_reason { + EFCT_SCSI_INITIATOR_DELETED, + EFCT_SCSI_INITIATOR_MISSING, +}; + +int +efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason); +void +efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, u32 cdb_len, + u32 flags); +int +efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd, + struct efct_io *abortio, u32 flags); +int +efct_scsi_send_rd_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl, + u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg); +int +efct_scsi_recv_wr_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl, + u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg); +int +efct_scsi_send_resp(struct efct_io *io, u32 flags, + struct efct_scsi_cmd_resp *rsp, efct_scsi_io_cb_t cb, void *arg); +int +efct_scsi_send_tmf_resp(struct efct_io *io, enum efct_scsi_tmf_resp rspcode, + u8 addl_rsp_info[3], efct_scsi_io_cb_t cb, void *arg); +int +efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg); + +void efct_scsi_io_complete(struct efct_io *io); + +int efct_scsi_reg_fc_transport(void); +void efct_scsi_release_fc_transport(void); +int efct_scsi_new_device(struct efct *efct); +void efct_scsi_del_device(struct efct *efct); +void _efct_scsi_io_free(struct kref *arg); + +int +efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost); +struct efct_vport * +efct_scsi_new_vport(struct efct *efct, struct device *dev); + +int efct_scsi_io_dispatch(struct efct_io *io, void *cb); +int efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb); +void efct_scsi_check_pending(struct efct *efct); +struct efct_io * +efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr); + +#endif /* __EFCT_SCSI_H__ */ diff --git a/drivers/scsi/elx/efct/efct_unsol.c b/drivers/scsi/elx/efct/efct_unsol.c new file mode 100644 index 000000000..e6addab66 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_unsol.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efct_driver.h" +#include "efct_unsol.h" + +#define frame_printf(efct, hdr, fmt, ...) \ + do { \ + char s_id_text[16]; \ + efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \ + s_id_text, sizeof(s_id_text)); \ + efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \ + ntoh24((hdr)->fh_d_id), s_id_text, \ + (hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \ + be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \ + } while (0) + +static struct efct_node * +efct_node_find(struct efct *efct, u32 port_id, u32 node_id) +{ + struct efct_node *node; + u64 id = (u64)port_id << 32 | node_id; + + /* + * During node shutdown, Lookup will be removed first, + * before announcing to backend. So, no new IOs will be allowed + */ + /* Find a target node, given s_id and d_id */ + node = xa_load(&efct->lookup, id); + if (node) + kref_get(&node->ref); + + return node; +} + +static int +efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq) +{ + struct efct_node *node; + struct fc_frame_header *hdr; + u32 s_id, d_id; + + hdr = seq->header->dma.virt; + + /* extract the s_id and d_id */ + s_id = ntoh24(hdr->fh_s_id); + d_id = ntoh24(hdr->fh_d_id); + + if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS)) + return -EIO; + + if (hdr->fh_type == FC_TYPE_FCP) { + node = efct_node_find(efct, d_id, s_id); + if (!node) { + efc_log_err(efct, + "Node not found, drop cmd d_id:%x s_id:%x\n", + d_id, s_id); + efct_hw_sequence_free(&efct->hw, seq); + return 0; + } + + efct_dispatch_fcp_cmd(node, seq); + } else { + node = efct_node_find(efct, d_id, s_id); + if (!node) { + efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n", + d_id, s_id); + return -EIO; + } + + efc_log_err(efct, "Received ABTS for Node:%p\n", node); + efct_node_recv_abts_frame(node, seq); + } + + kref_put(&node->ref, node->release); + efct_hw_sequence_free(&efct->hw, seq); + return 0; +} + +int +efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq) +{ + struct efct *efct = arg; + + /* Process FCP command */ + if (!efct_dispatch_frame(efct, seq)) + return 0; + + /* Forward frame to discovery lib */ + efc_dispatch_frame(efct->efcport, seq); + return 0; +} + +static int +efct_fc_tmf_rejected_cb(struct efct_io *io, + enum efct_scsi_io_status scsi_status, + u32 flags, void *arg) +{ + efct_scsi_io_free(io); + return 0; +} + +static void +efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun) +{ + u32 i; + struct { + u32 mask; + enum efct_scsi_tmf_cmd cmd; + } tmflist[] = { + {FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET}, + {FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET}, + {FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET}, + {FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET}, + {FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} }; + + io->exp_xfer_len = 0; + + for (i = 0; i < ARRAY_SIZE(tmflist); i++) { + if (tmflist[i].mask & tm_flags) { + io->tmf_cmd = tmflist[i].cmd; + efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0); + break; + } + } + if (i == ARRAY_SIZE(tmflist)) { + /* Not handled */ + efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags); + efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED, + NULL, efct_fc_tmf_rejected_cb, NULL); + } +} + +static int +efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq) +{ + /* + * If we received less than FCP_CMND_IU bytes, assume that the frame is + * corrupted in some way and drop it. + * This was seen when jamming the FCTL + * fill bytes field. + */ + if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) { + struct fc_frame_header *fchdr = seq->header->dma.virt; + + efc_log_debug(efct, + "drop ox_id %04x payload (%zd) less than (%zd)\n", + be16_to_cpu(fchdr->fh_ox_id), + seq->payload->dma.len, sizeof(struct fcp_cmnd)); + return -EIO; + } + return 0; +} + +static void +efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd, + struct fc_frame_header *fchdr, bool sit) +{ + io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id); + /* note, tgt_task_tag, hw_tag set when HW io is allocated */ + io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl); + io->transferred = 0; + + /* The upper 7 bits of CS_CTL is the frame priority thru the SAN. + * Our assertion here is, the priority given to a frame containing + * the FCP cmd should be the priority given to ALL frames contained + * in that IO. Thus we need to save the incoming CS_CTL here. + */ + if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17) + io->cs_ctl = fchdr->fh_cs_ctl; + else + io->cs_ctl = 0; + + io->seq_init = sit; +} + +static u32 +efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd) +{ + u32 flags = 0; + + switch (cmnd->fc_pri_ta & FCP_PTA_MASK) { + case FCP_PTA_SIMPLE: + flags |= EFCT_SCSI_CMD_SIMPLE; + break; + case FCP_PTA_HEADQ: + flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE; + break; + case FCP_PTA_ORDERED: + flags |= EFCT_SCSI_CMD_ORDERED; + break; + case FCP_PTA_ACA: + flags |= EFCT_SCSI_CMD_ACA; + break; + } + if (cmnd->fc_flags & FCP_CFL_WRDATA) + flags |= EFCT_SCSI_CMD_DIR_IN; + if (cmnd->fc_flags & FCP_CFL_RDDATA) + flags |= EFCT_SCSI_CMD_DIR_OUT; + + return flags; +} + +static void +efct_sframe_common_send_cb(void *arg, u8 *cqe, int status) +{ + struct efct_hw_send_frame_context *ctx = arg; + struct efct_hw *hw = ctx->hw; + + /* Free WQ completion callback */ + efct_hw_reqtag_free(hw, ctx->wqcb); + + /* Free sequence */ + efct_hw_sequence_free(hw, ctx->seq); +} + +static int +efct_sframe_common_send(struct efct_node *node, + struct efc_hw_sequence *seq, + enum fc_rctl r_ctl, u32 f_ctl, + u8 type, void *payload, u32 payload_len) +{ + struct efct *efct = node->efct; + struct efct_hw *hw = &efct->hw; + int rc = 0; + struct fc_frame_header *req_hdr = seq->header->dma.virt; + struct fc_frame_header hdr; + struct efct_hw_send_frame_context *ctx; + + u32 heap_size = seq->payload->dma.size; + uintptr_t heap_phys_base = seq->payload->dma.phys; + u8 *heap_virt_base = seq->payload->dma.virt; + u32 heap_offset = 0; + + /* Build the FC header reusing the RQ header DMA buffer */ + memset(&hdr, 0, sizeof(hdr)); + hdr.fh_r_ctl = r_ctl; + /* send it back to whomever sent it to us */ + memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id)); + memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id)); + hdr.fh_type = type; + hton24(hdr.fh_f_ctl, f_ctl); + hdr.fh_ox_id = req_hdr->fh_ox_id; + hdr.fh_rx_id = req_hdr->fh_rx_id; + hdr.fh_cs_ctl = 0; + hdr.fh_df_ctl = 0; + hdr.fh_seq_cnt = 0; + hdr.fh_parm_offset = 0; + + /* + * send_frame_seq_id is an atomic, we just let it increment, + * while storing only the low 8 bits to hdr->seq_id + */ + hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id); + hdr.fh_seq_id--; + + /* Allocate and fill in the send frame request context */ + ctx = (void *)(heap_virt_base + heap_offset); + heap_offset += sizeof(*ctx); + if (heap_offset > heap_size) { + efc_log_err(efct, "Fill send frame failed offset %d size %d\n", + heap_offset, heap_size); + return -EIO; + } + + memset(ctx, 0, sizeof(*ctx)); + + /* Save sequence */ + ctx->seq = seq; + + /* Allocate a response payload DMA buffer from the heap */ + ctx->payload.phys = heap_phys_base + heap_offset; + ctx->payload.virt = heap_virt_base + heap_offset; + ctx->payload.size = payload_len; + ctx->payload.len = payload_len; + heap_offset += payload_len; + if (heap_offset > heap_size) { + efc_log_err(efct, "Fill send frame failed offset %d size %d\n", + heap_offset, heap_size); + return -EIO; + } + + /* Copy the payload in */ + memcpy(ctx->payload.virt, payload, payload_len); + + /* Send */ + rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3, + FC_EOF_T, &ctx->payload, ctx, + efct_sframe_common_send_cb, ctx); + if (rc) + efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc); + + return rc; +} + +static int +efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq, + void *rsp, u32 rsp_len) +{ + return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS, + FC_FC_EX_CTX | + FC_FC_LAST_SEQ | + FC_FC_END_SEQ | + FC_FC_SEQ_INIT, + FC_TYPE_FCP, + rsp, rsp_len); +} + +static int +efct_sframe_send_task_set_full_or_busy(struct efct_node *node, + struct efc_hw_sequence *seq) +{ + struct fcp_resp_with_ext fcprsp; + struct fcp_cmnd *fcpcmd = seq->payload->dma.virt; + int rc = 0; + unsigned long flags = 0; + struct efct *efct = node->efct; + + /* construct task set full or busy response */ + memset(&fcprsp, 0, sizeof(fcprsp)); + spin_lock_irqsave(&node->active_ios_lock, flags); + fcprsp.resp.fr_status = list_empty(&node->active_ios) ? + SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL; + spin_unlock_irqrestore(&node->active_ios_lock, flags); + *((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl); + + /* send it using send_frame */ + rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp)); + if (rc) + efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc); + + return rc; +} + +int +efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq) +{ + struct efct *efct = node->efct; + struct fc_frame_header *fchdr = seq->header->dma.virt; + struct fcp_cmnd *cmnd = NULL; + struct efct_io *io = NULL; + u32 lun; + + if (!seq->payload) { + efc_log_err(efct, "Sequence payload is NULL.\n"); + return -EIO; + } + + cmnd = seq->payload->dma.virt; + + /* perform FCP_CMND validation check(s) */ + if (efct_validate_fcp_cmd(efct, seq)) + return -EIO; + + lun = scsilun_to_int(&cmnd->fc_lun); + if (lun == U32_MAX) + return -EIO; + + io = efct_scsi_io_alloc(node); + if (!io) { + int rc; + + /* Use SEND_FRAME to send task set full or busy */ + rc = efct_sframe_send_task_set_full_or_busy(node, seq); + if (rc) + efc_log_err(efct, "Failed to send busy task: %d\n", rc); + + return rc; + } + + io->hw_priv = seq->hw_priv; + + io->app_id = 0; + + /* RQ pair, if we got here, SIT=1 */ + efct_populate_io_fcp_cmd(io, cmnd, fchdr, true); + + if (cmnd->fc_tm_flags) { + efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun); + } else { + u32 flags = efct_get_flags_fcp_cmd(cmnd); + + if (cmnd->fc_flags & FCP_CFL_LEN_MASK) { + efc_log_err(efct, "Additional CDB not supported\n"); + return -EIO; + } + /* + * Can return failure for things like task set full and UAs, + * no need to treat as a dropped frame if rc != 0 + */ + efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb, + sizeof(cmnd->fc_cdb), flags); + } + + return 0; +} + +static int +efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr) +{ + struct efct_node *node = io->node; + struct efct *efct = io->efct; + u16 ox_id = be16_to_cpu(hdr->fh_ox_id); + u16 rx_id = be16_to_cpu(hdr->fh_rx_id); + struct efct_io *abortio; + + /* Find IO and attempt to take a reference on it */ + abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id); + + if (abortio) { + /* Got a reference on the IO. Hold it until backend + * is notified below + */ + efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n", + ox_id, rx_id); + + /* + * Save the ox_id for the ABTS as the init_task_tag in our + * manufactured + * TMF IO object + */ + io->display_name = "abts"; + io->init_task_tag = ox_id; + /* don't set tgt_task_tag, don't want to confuse with XRI */ + + /* + * Save the rx_id from the ABTS as it is + * needed for the BLS response, + * regardless of the IO context's rx_id + */ + io->abort_rx_id = rx_id; + + /* Call target server command abort */ + io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK; + efct_scsi_recv_tmf(io, abortio->tgt_io.lun, + EFCT_SCSI_TMF_ABORT_TASK, abortio, 0); + + /* + * Backend will have taken an additional + * reference on the IO if needed; + * done with current reference. + */ + kref_put(&abortio->ref, abortio->release); + } else { + /* + * Either IO was not found or it has been + * freed between finding it + * and attempting to get the reference, + */ + efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n", + ox_id); + + /* Send a BA_RJT */ + efct_bls_send_rjt(io, hdr); + } + return 0; +} + +int +efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq) +{ + struct efct *efct = node->efct; + struct fc_frame_header *hdr = seq->header->dma.virt; + struct efct_io *io = NULL; + + node->abort_cnt++; + io = efct_scsi_io_alloc(node); + if (io) { + io->hw_priv = seq->hw_priv; + /* If we got this far, SIT=1 */ + io->seq_init = 1; + + /* fill out generic fields */ + io->efct = efct; + io->node = node; + io->cmd_tgt = true; + + efct_process_abts(io, seq->header->dma.virt); + } else { + efc_log_err(efct, + "SCSI IO allocation failed for ABTS received "); + efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n", + ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id), + be16_to_cpu(hdr->fh_ox_id), + be16_to_cpu(hdr->fh_rx_id)); + } + + return 0; +} diff --git a/drivers/scsi/elx/efct/efct_unsol.h b/drivers/scsi/elx/efct/efct_unsol.h new file mode 100644 index 000000000..16d1e3ba1 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_unsol.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#if !defined(__OSC_UNSOL_H__) +#define __OSC_UNSOL_H__ + +int +efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq); +int +efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq); +int +efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq); + +#endif /* __OSC_UNSOL_H__ */ diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c new file mode 100644 index 000000000..cf4dced20 --- /dev/null +++ b/drivers/scsi/elx/efct/efct_xport.c @@ -0,0 +1,1111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efct_driver.h" +#include "efct_unsol.h" + +static struct dentry *efct_debugfs_root; +static atomic_t efct_debugfs_count; + +static const struct scsi_host_template efct_template = { + .module = THIS_MODULE, + .name = EFCT_DRIVER_NAME, + .supported_mode = MODE_TARGET, +}; + +/* globals */ +static struct fc_function_template efct_xport_functions; +static struct fc_function_template efct_vport_functions; + +static struct scsi_transport_template *efct_xport_fc_tt; +static struct scsi_transport_template *efct_vport_fc_tt; + +struct efct_xport * +efct_xport_alloc(struct efct *efct) +{ + struct efct_xport *xport; + + xport = kzalloc(sizeof(*xport), GFP_KERNEL); + if (!xport) + return xport; + + xport->efct = efct; + return xport; +} + +static int +efct_xport_init_debugfs(struct efct *efct) +{ + /* Setup efct debugfs root directory */ + if (!efct_debugfs_root) { + efct_debugfs_root = debugfs_create_dir("efct", NULL); + atomic_set(&efct_debugfs_count, 0); + } + + /* Create a directory for sessions in root */ + if (!efct->sess_debugfs_dir) { + efct->sess_debugfs_dir = debugfs_create_dir("sessions", + efct_debugfs_root); + if (IS_ERR(efct->sess_debugfs_dir)) { + efc_log_err(efct, + "failed to create debugfs entry for sessions\n"); + goto debugfs_fail; + } + atomic_inc(&efct_debugfs_count); + } + + return 0; + +debugfs_fail: + return -EIO; +} + +static void efct_xport_delete_debugfs(struct efct *efct) +{ + /* Remove session debugfs directory */ + debugfs_remove(efct->sess_debugfs_dir); + efct->sess_debugfs_dir = NULL; + atomic_dec(&efct_debugfs_count); + + if (atomic_read(&efct_debugfs_count) == 0) { + /* remove root debugfs directory */ + debugfs_remove(efct_debugfs_root); + efct_debugfs_root = NULL; + } +} + +int +efct_xport_attach(struct efct_xport *xport) +{ + struct efct *efct = xport->efct; + int rc; + + rc = efct_hw_setup(&efct->hw, efct, efct->pci); + if (rc) { + efc_log_err(efct, "%s: Can't setup hardware\n", efct->desc); + return rc; + } + + efct_hw_parse_filter(&efct->hw, (void *)efct->filter_def); + + xport->io_pool = efct_io_pool_create(efct, efct->hw.config.n_sgl); + if (!xport->io_pool) { + efc_log_err(efct, "Can't allocate IO pool\n"); + return -ENOMEM; + } + + return 0; +} + +static void +efct_xport_link_stats_cb(int status, u32 num_counters, + struct efct_hw_link_stat_counts *counters, void *arg) +{ + union efct_xport_stats_u *result = arg; + + result->stats.link_stats.link_failure_error_count = + counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter; + result->stats.link_stats.loss_of_sync_error_count = + counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter; + result->stats.link_stats.primitive_sequence_error_count = + counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter; + result->stats.link_stats.invalid_transmission_word_error_count = + counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter; + result->stats.link_stats.crc_error_count = + counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter; + + complete(&result->stats.done); +} + +static void +efct_xport_host_stats_cb(int status, u32 num_counters, + struct efct_hw_host_stat_counts *counters, void *arg) +{ + union efct_xport_stats_u *result = arg; + + result->stats.host_stats.transmit_kbyte_count = + counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter; + result->stats.host_stats.receive_kbyte_count = + counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter; + result->stats.host_stats.transmit_frame_count = + counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter; + result->stats.host_stats.receive_frame_count = + counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter; + + complete(&result->stats.done); +} + +static void +efct_xport_async_link_stats_cb(int status, u32 num_counters, + struct efct_hw_link_stat_counts *counters, + void *arg) +{ + union efct_xport_stats_u *result = arg; + + result->stats.link_stats.link_failure_error_count = + counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter; + result->stats.link_stats.loss_of_sync_error_count = + counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter; + result->stats.link_stats.primitive_sequence_error_count = + counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter; + result->stats.link_stats.invalid_transmission_word_error_count = + counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter; + result->stats.link_stats.crc_error_count = + counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter; +} + +static void +efct_xport_async_host_stats_cb(int status, u32 num_counters, + struct efct_hw_host_stat_counts *counters, + void *arg) +{ + union efct_xport_stats_u *result = arg; + + result->stats.host_stats.transmit_kbyte_count = + counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter; + result->stats.host_stats.receive_kbyte_count = + counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter; + result->stats.host_stats.transmit_frame_count = + counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter; + result->stats.host_stats.receive_frame_count = + counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter; +} + +static void +efct_xport_config_stats_timer(struct efct *efct); + +static void +efct_xport_stats_timer_cb(struct timer_list *t) +{ + struct efct_xport *xport = from_timer(xport, t, stats_timer); + struct efct *efct = xport->efct; + + efct_xport_config_stats_timer(efct); +} + +static void +efct_xport_config_stats_timer(struct efct *efct) +{ + u32 timeout = 3 * 1000; + struct efct_xport *xport = NULL; + + if (!efct) { + pr_err("%s: failed to locate EFCT device\n", __func__); + return; + } + + xport = efct->xport; + efct_hw_get_link_stats(&efct->hw, 0, 0, 0, + efct_xport_async_link_stats_cb, + &xport->fc_xport_stats); + efct_hw_get_host_stats(&efct->hw, 0, efct_xport_async_host_stats_cb, + &xport->fc_xport_stats); + + timer_setup(&xport->stats_timer, + &efct_xport_stats_timer_cb, 0); + mod_timer(&xport->stats_timer, + jiffies + msecs_to_jiffies(timeout)); +} + +int +efct_xport_initialize(struct efct_xport *xport) +{ + struct efct *efct = xport->efct; + int rc = 0; + + /* Initialize io lists */ + spin_lock_init(&xport->io_pending_lock); + INIT_LIST_HEAD(&xport->io_pending_list); + atomic_set(&xport->io_active_count, 0); + atomic_set(&xport->io_pending_count, 0); + atomic_set(&xport->io_total_free, 0); + atomic_set(&xport->io_total_pending, 0); + atomic_set(&xport->io_alloc_failed_count, 0); + atomic_set(&xport->io_pending_recursing, 0); + + rc = efct_hw_init(&efct->hw); + if (rc) { + efc_log_err(efct, "efct_hw_init failure\n"); + goto out; + } + + rc = efct_scsi_tgt_new_device(efct); + if (rc) { + efc_log_err(efct, "failed to initialize target\n"); + goto hw_init_out; + } + + rc = efct_scsi_new_device(efct); + if (rc) { + efc_log_err(efct, "failed to initialize initiator\n"); + goto tgt_dev_out; + } + + /* Get FC link and host statistics perodically*/ + efct_xport_config_stats_timer(efct); + + efct_xport_init_debugfs(efct); + + return rc; + +tgt_dev_out: + efct_scsi_tgt_del_device(efct); + +hw_init_out: + efct_hw_teardown(&efct->hw); +out: + return rc; +} + +int +efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd, + union efct_xport_stats_u *result) +{ + int rc = 0; + struct efct *efct = NULL; + union efct_xport_stats_u value; + + efct = xport->efct; + + switch (cmd) { + case EFCT_XPORT_CONFIG_PORT_STATUS: + if (xport->configured_link_state == 0) { + /* + * Initial state is offline. configured_link_state is + * set to online explicitly when port is brought online + */ + xport->configured_link_state = EFCT_XPORT_PORT_OFFLINE; + } + result->value = xport->configured_link_state; + break; + + case EFCT_XPORT_PORT_STATUS: + /* Determine port status based on link speed. */ + value.value = efct_hw_get_link_speed(&efct->hw); + if (value.value == 0) + result->value = EFCT_XPORT_PORT_OFFLINE; + else + result->value = EFCT_XPORT_PORT_ONLINE; + break; + + case EFCT_XPORT_LINK_SPEED: + result->value = efct_hw_get_link_speed(&efct->hw); + break; + + case EFCT_XPORT_LINK_STATISTICS: + memcpy((void *)result, &efct->xport->fc_xport_stats, + sizeof(union efct_xport_stats_u)); + break; + case EFCT_XPORT_LINK_STAT_RESET: { + /* Create a completion to synchronize the stat reset process */ + init_completion(&result->stats.done); + + /* First reset the link stats */ + rc = efct_hw_get_link_stats(&efct->hw, 0, 1, 1, + efct_xport_link_stats_cb, result); + if (rc) + break; + + /* Wait for completion to be signaled when the cmd completes */ + if (wait_for_completion_interruptible(&result->stats.done)) { + /* Undefined failure */ + efc_log_debug(efct, "sem wait failed\n"); + rc = -EIO; + break; + } + + /* Next reset the host stats */ + rc = efct_hw_get_host_stats(&efct->hw, 1, + efct_xport_host_stats_cb, result); + + if (rc) + break; + + /* Wait for completion to be signaled when the cmd completes */ + if (wait_for_completion_interruptible(&result->stats.done)) { + /* Undefined failure */ + efc_log_debug(efct, "sem wait failed\n"); + rc = -EIO; + break; + } + break; + } + default: + rc = -EIO; + break; + } + + return rc; +} + +static int +efct_get_link_supported_speeds(struct efct *efct) +{ + u32 supported_speeds = 0; + u32 link_module_type, i; + struct { + u32 lmt_speed; + u32 speed; + } supported_speed_list[] = { + {SLI4_LINK_MODULE_TYPE_1GB, FC_PORTSPEED_1GBIT}, + {SLI4_LINK_MODULE_TYPE_2GB, FC_PORTSPEED_2GBIT}, + {SLI4_LINK_MODULE_TYPE_4GB, FC_PORTSPEED_4GBIT}, + {SLI4_LINK_MODULE_TYPE_8GB, FC_PORTSPEED_8GBIT}, + {SLI4_LINK_MODULE_TYPE_16GB, FC_PORTSPEED_16GBIT}, + {SLI4_LINK_MODULE_TYPE_32GB, FC_PORTSPEED_32GBIT}, + {SLI4_LINK_MODULE_TYPE_64GB, FC_PORTSPEED_64GBIT}, + {SLI4_LINK_MODULE_TYPE_128GB, FC_PORTSPEED_128GBIT}, + }; + + link_module_type = sli_get_lmt(&efct->hw.sli); + + /* populate link supported speeds */ + for (i = 0; i < ARRAY_SIZE(supported_speed_list); i++) { + if (link_module_type & supported_speed_list[i].lmt_speed) + supported_speeds |= supported_speed_list[i].speed; + } + + return supported_speeds; +} + +int +efct_scsi_new_device(struct efct *efct) +{ + struct Scsi_Host *shost = NULL; + int error = 0; + struct efct_vport *vport = NULL; + + shost = scsi_host_alloc(&efct_template, sizeof(*vport)); + if (!shost) { + efc_log_err(efct, "failed to allocate Scsi_Host struct\n"); + return -ENOMEM; + } + + /* save shost to initiator-client context */ + efct->shost = shost; + + /* save efct information to shost LLD-specific space */ + vport = (struct efct_vport *)shost->hostdata; + vport->efct = efct; + + /* + * Set initial can_queue value to the max SCSI IOs. This is the maximum + * global queue depth (as opposed to the per-LUN queue depth -- + * .cmd_per_lun This may need to be adjusted for I+T mode. + */ + shost->can_queue = efct->hw.config.n_io; + shost->max_cmd_len = 16; /* 16-byte CDBs */ + shost->max_id = 0xffff; + shost->max_lun = 0xffffffff; + + /* + * can only accept (from mid-layer) as many SGEs as we've + * pre-registered + */ + shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli); + + /* attach FC Transport template to shost */ + shost->transportt = efct_xport_fc_tt; + efc_log_debug(efct, "transport template=%p\n", efct_xport_fc_tt); + + /* get pci_dev structure and add host to SCSI ML */ + error = scsi_add_host_with_dma(shost, &efct->pci->dev, + &efct->pci->dev); + if (error) { + efc_log_debug(efct, "failed scsi_add_host_with_dma\n"); + return -EIO; + } + + /* Set symbolic name for host port */ + snprintf(fc_host_symbolic_name(shost), + sizeof(fc_host_symbolic_name(shost)), + "Emulex %s FV%s DV%s", efct->model, + efct->hw.sli.fw_name[0], EFCT_DRIVER_VERSION); + + /* Set host port supported classes */ + fc_host_supported_classes(shost) = FC_COS_CLASS3; + + fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct); + + fc_host_node_name(shost) = efct_get_wwnn(&efct->hw); + fc_host_port_name(shost) = efct_get_wwpn(&efct->hw); + fc_host_max_npiv_vports(shost) = 128; + + return 0; +} + +struct scsi_transport_template * +efct_attach_fc_transport(void) +{ + struct scsi_transport_template *efct_fc_template = NULL; + + efct_fc_template = fc_attach_transport(&efct_xport_functions); + + if (!efct_fc_template) + pr_err("failed to attach EFCT with fc transport\n"); + + return efct_fc_template; +} + +struct scsi_transport_template * +efct_attach_vport_fc_transport(void) +{ + struct scsi_transport_template *efct_fc_template = NULL; + + efct_fc_template = fc_attach_transport(&efct_vport_functions); + + if (!efct_fc_template) + pr_err("failed to attach EFCT with fc transport\n"); + + return efct_fc_template; +} + +int +efct_scsi_reg_fc_transport(void) +{ + /* attach to appropriate scsi_tranport_* module */ + efct_xport_fc_tt = efct_attach_fc_transport(); + if (!efct_xport_fc_tt) { + pr_err("%s: failed to attach to scsi_transport_*", __func__); + return -EIO; + } + + efct_vport_fc_tt = efct_attach_vport_fc_transport(); + if (!efct_vport_fc_tt) { + pr_err("%s: failed to attach to scsi_transport_*", __func__); + efct_release_fc_transport(efct_xport_fc_tt); + efct_xport_fc_tt = NULL; + return -EIO; + } + + return 0; +} + +void +efct_scsi_release_fc_transport(void) +{ + /* detach from scsi_transport_* */ + efct_release_fc_transport(efct_xport_fc_tt); + efct_xport_fc_tt = NULL; + if (efct_vport_fc_tt) + efct_release_fc_transport(efct_vport_fc_tt); + + efct_vport_fc_tt = NULL; +} + +void +efct_xport_detach(struct efct_xport *xport) +{ + struct efct *efct = xport->efct; + + /* free resources associated with target-server and initiator-client */ + efct_scsi_tgt_del_device(efct); + + efct_scsi_del_device(efct); + + /*Shutdown FC Statistics timer*/ + if (timer_pending(&xport->stats_timer)) + del_timer(&xport->stats_timer); + + efct_hw_teardown(&efct->hw); + + efct_xport_delete_debugfs(efct); +} + +static void +efct_xport_domain_free_cb(struct efc *efc, void *arg) +{ + struct completion *done = arg; + + complete(done); +} + +int +efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...) +{ + u32 rc = 0; + struct efct *efct = NULL; + va_list argp; + + efct = xport->efct; + + switch (cmd) { + case EFCT_XPORT_PORT_ONLINE: { + /* Bring the port on-line */ + rc = efct_hw_port_control(&efct->hw, EFCT_HW_PORT_INIT, 0, + NULL, NULL); + if (rc) + efc_log_err(efct, + "%s: Can't init port\n", efct->desc); + else + xport->configured_link_state = cmd; + break; + } + case EFCT_XPORT_PORT_OFFLINE: { + if (efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN, 0, + NULL, NULL)) + efc_log_err(efct, "port shutdown failed\n"); + else + xport->configured_link_state = cmd; + break; + } + + case EFCT_XPORT_SHUTDOWN: { + struct completion done; + unsigned long timeout; + + /* if a PHYSDEV reset was performed (e.g. hw dump), will affect + * all PCI functions; orderly shutdown won't work, + * just force free + */ + if (sli_reset_required(&efct->hw.sli)) { + struct efc_domain *domain = efct->efcport->domain; + + if (domain) + efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, + domain); + } else { + efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN, + 0, NULL, NULL); + } + + init_completion(&done); + + efc_register_domain_free_cb(efct->efcport, + efct_xport_domain_free_cb, &done); + + efc_log_debug(efct, "Waiting %d seconds for domain shutdown\n", + (EFC_SHUTDOWN_TIMEOUT_USEC / 1000000)); + + timeout = usecs_to_jiffies(EFC_SHUTDOWN_TIMEOUT_USEC); + if (!wait_for_completion_timeout(&done, timeout)) { + efc_log_err(efct, "Domain shutdown timed out!!\n"); + WARN_ON(1); + } + + efc_register_domain_free_cb(efct->efcport, NULL, NULL); + + /* Free up any saved virtual ports */ + efc_vport_del_all(efct->efcport); + break; + } + + /* + * Set wwnn for the port. This will be used instead of the default + * provided by FW. + */ + case EFCT_XPORT_WWNN_SET: { + u64 wwnn; + + /* Retrieve arguments */ + va_start(argp, cmd); + wwnn = va_arg(argp, uint64_t); + va_end(argp); + + efc_log_debug(efct, " WWNN %016llx\n", wwnn); + xport->req_wwnn = wwnn; + + break; + } + /* + * Set wwpn for the port. This will be used instead of the default + * provided by FW. + */ + case EFCT_XPORT_WWPN_SET: { + u64 wwpn; + + /* Retrieve arguments */ + va_start(argp, cmd); + wwpn = va_arg(argp, uint64_t); + va_end(argp); + + efc_log_debug(efct, " WWPN %016llx\n", wwpn); + xport->req_wwpn = wwpn; + + break; + } + + default: + break; + } + return rc; +} + +void +efct_xport_free(struct efct_xport *xport) +{ + if (xport) { + efct_io_pool_free(xport->io_pool); + + kfree(xport); + } +} + +void +efct_release_fc_transport(struct scsi_transport_template *transport_template) +{ + if (transport_template) + pr_err("releasing transport layer\n"); + + /* Releasing FC transport */ + fc_release_transport(transport_template); +} + +static void +efct_xport_remove_host(struct Scsi_Host *shost) +{ + fc_remove_host(shost); +} + +void +efct_scsi_del_device(struct efct *efct) +{ + if (!efct->shost) + return; + + efc_log_debug(efct, "Unregistering with Transport Layer\n"); + efct_xport_remove_host(efct->shost); + efc_log_debug(efct, "Unregistering with SCSI Midlayer\n"); + scsi_remove_host(efct->shost); + scsi_host_put(efct->shost); + efct->shost = NULL; +} + +static void +efct_get_host_port_id(struct Scsi_Host *shost) +{ + struct efct_vport *vport = (struct efct_vport *)shost->hostdata; + struct efct *efct = vport->efct; + struct efc *efc = efct->efcport; + struct efc_nport *nport; + + if (efc->domain && efc->domain->nport) { + nport = efc->domain->nport; + fc_host_port_id(shost) = nport->fc_id; + } +} + +static void +efct_get_host_port_type(struct Scsi_Host *shost) +{ + struct efct_vport *vport = (struct efct_vport *)shost->hostdata; + struct efct *efct = vport->efct; + struct efc *efc = efct->efcport; + int type = FC_PORTTYPE_UNKNOWN; + + if (efc->domain && efc->domain->nport) { + if (efc->domain->is_loop) { + type = FC_PORTTYPE_LPORT; + } else { + struct efc_nport *nport = efc->domain->nport; + + if (nport->is_vport) + type = FC_PORTTYPE_NPIV; + else if (nport->topology == EFC_NPORT_TOPO_P2P) + type = FC_PORTTYPE_PTP; + else if (nport->topology == EFC_NPORT_TOPO_UNKNOWN) + type = FC_PORTTYPE_UNKNOWN; + else + type = FC_PORTTYPE_NPORT; + } + } + fc_host_port_type(shost) = type; +} + +static void +efct_get_host_vport_type(struct Scsi_Host *shost) +{ + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; +} + +static void +efct_get_host_port_state(struct Scsi_Host *shost) +{ + struct efct_vport *vport = (struct efct_vport *)shost->hostdata; + struct efct *efct = vport->efct; + union efct_xport_stats_u status; + int rc; + + rc = efct_xport_status(efct->xport, EFCT_XPORT_PORT_STATUS, &status); + if ((!rc) && (status.value == EFCT_XPORT_PORT_ONLINE)) + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + else + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; +} + +static void +efct_get_host_speed(struct Scsi_Host *shost) +{ + struct efct_vport *vport = (struct efct_vport *)shost->hostdata; + struct efct *efct = vport->efct; + struct efc *efc = efct->efcport; + union efct_xport_stats_u speed; + u32 fc_speed = FC_PORTSPEED_UNKNOWN; + int rc; + + if (!efc->domain || !efc->domain->nport) { + fc_host_speed(shost) = fc_speed; + return; + } + + rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_SPEED, &speed); + if (!rc) { + switch (speed.value) { + case 1000: + fc_speed = FC_PORTSPEED_1GBIT; + break; + case 2000: + fc_speed = FC_PORTSPEED_2GBIT; + break; + case 4000: + fc_speed = FC_PORTSPEED_4GBIT; + break; + case 8000: + fc_speed = FC_PORTSPEED_8GBIT; + break; + case 10000: + fc_speed = FC_PORTSPEED_10GBIT; + break; + case 16000: + fc_speed = FC_PORTSPEED_16GBIT; + break; + case 32000: + fc_speed = FC_PORTSPEED_32GBIT; + break; + case 64000: + fc_speed = FC_PORTSPEED_64GBIT; + break; + case 128000: + fc_speed = FC_PORTSPEED_128GBIT; + break; + } + } + + fc_host_speed(shost) = fc_speed; +} + +static void +efct_get_host_fabric_name(struct Scsi_Host *shost) +{ + struct efct_vport *vport = (struct efct_vport *)shost->hostdata; + struct efct *efct = vport->efct; + struct efc *efc = efct->efcport; + + if (efc->domain) { + struct fc_els_flogi *sp = + (struct fc_els_flogi *) + efc->domain->flogi_service_params; + + fc_host_fabric_name(shost) = be64_to_cpu(sp->fl_wwnn); + } +} + +static struct fc_host_statistics * +efct_get_stats(struct Scsi_Host *shost) +{ + struct efct_vport *vport = (struct efct_vport *)shost->hostdata; + struct efct *efct = vport->efct; + union efct_xport_stats_u stats; + struct efct_xport *xport = efct->xport; + int rc = 0; + + rc = efct_xport_status(xport, EFCT_XPORT_LINK_STATISTICS, &stats); + if (rc) { + pr_err("efct_xport_status returned non 0 - %d\n", rc); + return NULL; + } + + vport->fc_host_stats.loss_of_sync_count = + stats.stats.link_stats.loss_of_sync_error_count; + vport->fc_host_stats.link_failure_count = + stats.stats.link_stats.link_failure_error_count; + vport->fc_host_stats.prim_seq_protocol_err_count = + stats.stats.link_stats.primitive_sequence_error_count; + vport->fc_host_stats.invalid_tx_word_count = + stats.stats.link_stats.invalid_transmission_word_error_count; + vport->fc_host_stats.invalid_crc_count = + stats.stats.link_stats.crc_error_count; + /* mbox returns kbyte count so we need to convert to words */ + vport->fc_host_stats.tx_words = + stats.stats.host_stats.transmit_kbyte_count * 256; + /* mbox returns kbyte count so we need to convert to words */ + vport->fc_host_stats.rx_words = + stats.stats.host_stats.receive_kbyte_count * 256; + vport->fc_host_stats.tx_frames = + stats.stats.host_stats.transmit_frame_count; + vport->fc_host_stats.rx_frames = + stats.stats.host_stats.receive_frame_count; + + vport->fc_host_stats.fcp_input_requests = + xport->fcp_stats.input_requests; + vport->fc_host_stats.fcp_output_requests = + xport->fcp_stats.output_requests; + vport->fc_host_stats.fcp_output_megabytes = + xport->fcp_stats.output_bytes >> 20; + vport->fc_host_stats.fcp_input_megabytes = + xport->fcp_stats.input_bytes >> 20; + vport->fc_host_stats.fcp_control_requests = + xport->fcp_stats.control_requests; + + return &vport->fc_host_stats; +} + +static void +efct_reset_stats(struct Scsi_Host *shost) +{ + struct efct_vport *vport = (struct efct_vport *)shost->hostdata; + struct efct *efct = vport->efct; + /* argument has no purpose for this action */ + union efct_xport_stats_u dummy; + int rc; + + rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_STAT_RESET, &dummy); + if (rc) + pr_err("efct_xport_status returned non 0 - %d\n", rc); +} + +static int +efct_issue_lip(struct Scsi_Host *shost) +{ + struct efct_vport *vport = + shost ? (struct efct_vport *)shost->hostdata : NULL; + struct efct *efct = vport ? vport->efct : NULL; + + if (!shost || !vport || !efct) { + pr_err("%s: shost=%p vport=%p efct=%p\n", __func__, + shost, vport, efct); + return -EPERM; + } + + /* + * Bring the link down gracefully then re-init the link. + * The firmware will re-initialize the Fibre Channel interface as + * required. It does not issue a LIP. + */ + + if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_OFFLINE)) + efc_log_debug(efct, "EFCT_XPORT_PORT_OFFLINE failed\n"); + + if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE)) + efc_log_debug(efct, "EFCT_XPORT_PORT_ONLINE failed\n"); + + return 0; +} + +struct efct_vport * +efct_scsi_new_vport(struct efct *efct, struct device *dev) +{ + struct Scsi_Host *shost = NULL; + int error = 0; + struct efct_vport *vport = NULL; + + shost = scsi_host_alloc(&efct_template, sizeof(*vport)); + if (!shost) { + efc_log_err(efct, "failed to allocate Scsi_Host struct\n"); + return NULL; + } + + /* save efct information to shost LLD-specific space */ + vport = (struct efct_vport *)shost->hostdata; + vport->efct = efct; + vport->is_vport = true; + + shost->can_queue = efct->hw.config.n_io; + shost->max_cmd_len = 16; /* 16-byte CDBs */ + shost->max_id = 0xffff; + shost->max_lun = 0xffffffff; + + /* can only accept (from mid-layer) as many SGEs as we've pre-regited*/ + shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli); + + /* attach FC Transport template to shost */ + shost->transportt = efct_vport_fc_tt; + efc_log_debug(efct, "vport transport template=%p\n", + efct_vport_fc_tt); + + /* get pci_dev structure and add host to SCSI ML */ + error = scsi_add_host_with_dma(shost, dev, &efct->pci->dev); + if (error) { + efc_log_debug(efct, "failed scsi_add_host_with_dma\n"); + return NULL; + } + + /* Set symbolic name for host port */ + snprintf(fc_host_symbolic_name(shost), + sizeof(fc_host_symbolic_name(shost)), + "Emulex %s FV%s DV%s", efct->model, efct->hw.sli.fw_name[0], + EFCT_DRIVER_VERSION); + + /* Set host port supported classes */ + fc_host_supported_classes(shost) = FC_COS_CLASS3; + + fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct); + vport->shost = shost; + + return vport; +} + +int efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost) +{ + if (shost) { + efc_log_debug(efct, + "Unregistering vport with Transport Layer\n"); + efct_xport_remove_host(shost); + efc_log_debug(efct, "Unregistering vport with SCSI Midlayer\n"); + scsi_remove_host(shost); + scsi_host_put(shost); + return 0; + } + return -EIO; +} + +static int +efct_vport_create(struct fc_vport *fc_vport, bool disable) +{ + struct Scsi_Host *shost = fc_vport ? fc_vport->shost : NULL; + struct efct_vport *pport = shost ? + (struct efct_vport *)shost->hostdata : + NULL; + struct efct *efct = pport ? pport->efct : NULL; + struct efct_vport *vport = NULL; + + if (!fc_vport || !shost || !efct) + goto fail; + + vport = efct_scsi_new_vport(efct, &fc_vport->dev); + if (!vport) { + efc_log_err(efct, "failed to create vport\n"); + goto fail; + } + + vport->fc_vport = fc_vport; + vport->npiv_wwpn = fc_vport->port_name; + vport->npiv_wwnn = fc_vport->node_name; + fc_host_node_name(vport->shost) = vport->npiv_wwnn; + fc_host_port_name(vport->shost) = vport->npiv_wwpn; + *(struct efct_vport **)fc_vport->dd_data = vport; + + return 0; + +fail: + return -EIO; +} + +static int +efct_vport_delete(struct fc_vport *fc_vport) +{ + struct efct_vport *vport = *(struct efct_vport **)fc_vport->dd_data; + struct Scsi_Host *shost = vport ? vport->shost : NULL; + struct efct *efct = vport ? vport->efct : NULL; + int rc; + + rc = efct_scsi_del_vport(efct, shost); + + if (rc) + pr_err("%s: vport delete failed\n", __func__); + + return rc; +} + +static int +efct_vport_disable(struct fc_vport *fc_vport, bool disable) +{ + return 0; +} + +static struct fc_function_template efct_xport_functions = { + .get_host_port_id = efct_get_host_port_id, + .get_host_port_type = efct_get_host_port_type, + .get_host_port_state = efct_get_host_port_state, + .get_host_speed = efct_get_host_speed, + .get_host_fabric_name = efct_get_host_fabric_name, + + .get_fc_host_stats = efct_get_stats, + .reset_fc_host_stats = efct_reset_stats, + + .issue_fc_host_lip = efct_issue_lip, + + .vport_disable = efct_vport_disable, + + /* allocation lengths for host-specific data */ + .dd_fcrport_size = sizeof(struct efct_rport_data), + .dd_fcvport_size = 128, /* should be sizeof(...) */ + + /* remote port fixed attributes */ + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_rport_dev_loss_tmo = 1, + + /* target dynamic attributes */ + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + + /* host fixed attributes */ + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + + /* host dynamic attributes */ + .show_host_port_id = 1, + .show_host_port_type = 1, + .show_host_port_state = 1, + /* active_fc4s is shown but doesn't change (thus no get function) */ + .show_host_active_fc4s = 1, + .show_host_speed = 1, + .show_host_fabric_name = 1, + .show_host_symbolic_name = 1, + .vport_create = efct_vport_create, + .vport_delete = efct_vport_delete, +}; + +static struct fc_function_template efct_vport_functions = { + .get_host_port_id = efct_get_host_port_id, + .get_host_port_type = efct_get_host_vport_type, + .get_host_port_state = efct_get_host_port_state, + .get_host_speed = efct_get_host_speed, + .get_host_fabric_name = efct_get_host_fabric_name, + + .get_fc_host_stats = efct_get_stats, + .reset_fc_host_stats = efct_reset_stats, + + .issue_fc_host_lip = efct_issue_lip, + + /* allocation lengths for host-specific data */ + .dd_fcrport_size = sizeof(struct efct_rport_data), + .dd_fcvport_size = 128, /* should be sizeof(...) */ + + /* remote port fixed attributes */ + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_rport_dev_loss_tmo = 1, + + /* target dynamic attributes */ + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + + /* host fixed attributes */ + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + + /* host dynamic attributes */ + .show_host_port_id = 1, + .show_host_port_type = 1, + .show_host_port_state = 1, + /* active_fc4s is shown but doesn't change (thus no get function) */ + .show_host_active_fc4s = 1, + .show_host_speed = 1, + .show_host_fabric_name = 1, + .show_host_symbolic_name = 1, +}; diff --git a/drivers/scsi/elx/efct/efct_xport.h b/drivers/scsi/elx/efct/efct_xport.h new file mode 100644 index 000000000..89f3c20ec --- /dev/null +++ b/drivers/scsi/elx/efct/efct_xport.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#if !defined(__EFCT_XPORT_H__) +#define __EFCT_XPORT_H__ + +enum efct_xport_ctrl { + EFCT_XPORT_PORT_ONLINE = 1, + EFCT_XPORT_PORT_OFFLINE, + EFCT_XPORT_SHUTDOWN, + EFCT_XPORT_POST_NODE_EVENT, + EFCT_XPORT_WWNN_SET, + EFCT_XPORT_WWPN_SET, +}; + +enum efct_xport_status { + EFCT_XPORT_PORT_STATUS, + EFCT_XPORT_CONFIG_PORT_STATUS, + EFCT_XPORT_LINK_SPEED, + EFCT_XPORT_IS_SUPPORTED_LINK_SPEED, + EFCT_XPORT_LINK_STATISTICS, + EFCT_XPORT_LINK_STAT_RESET, + EFCT_XPORT_IS_QUIESCED +}; + +struct efct_xport_link_stats { + bool rec; + bool gec; + bool w02of; + bool w03of; + bool w04of; + bool w05of; + bool w06of; + bool w07of; + bool w08of; + bool w09of; + bool w10of; + bool w11of; + bool w12of; + bool w13of; + bool w14of; + bool w15of; + bool w16of; + bool w17of; + bool w18of; + bool w19of; + bool w20of; + bool w21of; + bool clrc; + bool clof1; + u32 link_failure_error_count; + u32 loss_of_sync_error_count; + u32 loss_of_signal_error_count; + u32 primitive_sequence_error_count; + u32 invalid_transmission_word_error_count; + u32 crc_error_count; + u32 primitive_sequence_event_timeout_count; + u32 elastic_buffer_overrun_error_count; + u32 arbitration_fc_al_timeout_count; + u32 advertised_receive_bufftor_to_buffer_credit; + u32 current_receive_buffer_to_buffer_credit; + u32 advertised_transmit_buffer_to_buffer_credit; + u32 current_transmit_buffer_to_buffer_credit; + u32 received_eofa_count; + u32 received_eofdti_count; + u32 received_eofni_count; + u32 received_soff_count; + u32 received_dropped_no_aer_count; + u32 received_dropped_no_available_rpi_resources_count; + u32 received_dropped_no_available_xri_resources_count; +}; + +struct efct_xport_host_stats { + bool cc; + u32 transmit_kbyte_count; + u32 receive_kbyte_count; + u32 transmit_frame_count; + u32 receive_frame_count; + u32 transmit_sequence_count; + u32 receive_sequence_count; + u32 total_exchanges_originator; + u32 total_exchanges_responder; + u32 receive_p_bsy_count; + u32 receive_f_bsy_count; + u32 dropped_frames_due_to_no_rq_buffer_count; + u32 empty_rq_timeout_count; + u32 dropped_frames_due_to_no_xri_count; + u32 empty_xri_pool_count; +}; + +struct efct_xport_host_statistics { + struct completion done; + struct efct_xport_link_stats link_stats; + struct efct_xport_host_stats host_stats; +}; + +union efct_xport_stats_u { + u32 value; + struct efct_xport_host_statistics stats; +}; + +struct efct_xport_fcp_stats { + u64 input_bytes; + u64 output_bytes; + u64 input_requests; + u64 output_requests; + u64 control_requests; +}; + +struct efct_xport { + struct efct *efct; + /* wwpn requested by user for primary nport */ + u64 req_wwpn; + /* wwnn requested by user for primary nport */ + u64 req_wwnn; + + /* Nodes */ + /* number of allocated nodes */ + u32 nodes_count; + /* used to track how often IO pool is empty */ + atomic_t io_alloc_failed_count; + /* array of pointers to nodes */ + struct efc_node **nodes; + + /* Io pool and counts */ + /* pointer to IO pool */ + struct efct_io_pool *io_pool; + /* lock for io_pending_list */ + spinlock_t io_pending_lock; + /* list of IOs waiting for HW resources + * lock: xport->io_pending_lock + * link: efct_io_s->io_pending_link + */ + struct list_head io_pending_list; + /* count of totals IOS allocated */ + atomic_t io_total_alloc; + /* count of totals IOS free'd */ + atomic_t io_total_free; + /* count of totals IOS that were pended */ + atomic_t io_total_pending; + /* count of active IOS */ + atomic_t io_active_count; + /* count of pending IOS */ + atomic_t io_pending_count; + /* non-zero if efct_scsi_check_pending is executing */ + atomic_t io_pending_recursing; + + /* Port */ + /* requested link state */ + u32 configured_link_state; + + /* Timer for Statistics */ + struct timer_list stats_timer; + union efct_xport_stats_u fc_xport_stats; + struct efct_xport_fcp_stats fcp_stats; +}; + +struct efct_rport_data { + struct efc_node *node; +}; + +struct efct_xport * +efct_xport_alloc(struct efct *efct); +int +efct_xport_attach(struct efct_xport *xport); +int +efct_xport_initialize(struct efct_xport *xport); +void +efct_xport_detach(struct efct_xport *xport); +int +efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...); +int +efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd, + union efct_xport_stats_u *result); +void +efct_xport_free(struct efct_xport *xport); + +struct scsi_transport_template *efct_attach_fc_transport(void); +struct scsi_transport_template *efct_attach_vport_fc_transport(void); +void +efct_release_fc_transport(struct scsi_transport_template *transport_template); + +#endif /* __EFCT_XPORT_H__ */ diff --git a/drivers/scsi/elx/include/efc_common.h b/drivers/scsi/elx/include/efc_common.h new file mode 100644 index 000000000..8d57f69ac --- /dev/null +++ b/drivers/scsi/elx/include/efc_common.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#ifndef __EFC_COMMON_H__ +#define __EFC_COMMON_H__ + +#include + +struct efc_dma { + void *virt; + void *alloc; + dma_addr_t phys; + + size_t size; + size_t len; + struct pci_dev *pdev; +}; + +#define efc_log_crit(efc, fmt, args...) \ + dev_crit(&((efc)->pci)->dev, fmt, ##args) + +#define efc_log_err(efc, fmt, args...) \ + dev_err(&((efc)->pci)->dev, fmt, ##args) + +#define efc_log_warn(efc, fmt, args...) \ + dev_warn(&((efc)->pci)->dev, fmt, ##args) + +#define efc_log_info(efc, fmt, args...) \ + dev_info(&((efc)->pci)->dev, fmt, ##args) + +#define efc_log_debug(efc, fmt, args...) \ + dev_dbg(&((efc)->pci)->dev, fmt, ##args) + +#endif /* __EFC_COMMON_H__ */ diff --git a/drivers/scsi/elx/libefc/efc.h b/drivers/scsi/elx/libefc/efc.h new file mode 100644 index 000000000..468ff3cc9 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#ifndef __EFC_H__ +#define __EFC_H__ + +#include "../include/efc_common.h" +#include "efclib.h" +#include "efc_sm.h" +#include "efc_cmds.h" +#include "efc_domain.h" +#include "efc_nport.h" +#include "efc_node.h" +#include "efc_fabric.h" +#include "efc_device.h" +#include "efc_els.h" + +#define EFC_MAX_REMOTE_NODES 2048 +#define NODE_SPARAMS_SIZE 256 + +enum efc_scsi_del_initiator_reason { + EFC_SCSI_INITIATOR_DELETED, + EFC_SCSI_INITIATOR_MISSING, +}; + +enum efc_scsi_del_target_reason { + EFC_SCSI_TARGET_DELETED, + EFC_SCSI_TARGET_MISSING, +}; + +#define EFC_FC_ELS_DEFAULT_RETRIES 3 + +#define domain_sm_trace(domain) \ + efc_log_debug(domain->efc, "[domain:%s] %-20s %-20s\n", \ + domain->display_name, __func__, efc_sm_event_name(evt)) \ + +#define domain_trace(domain, fmt, ...) \ + efc_log_debug(domain->efc, \ + "[%s]" fmt, domain->display_name, ##__VA_ARGS__) \ + +#define node_sm_trace() \ + efc_log_debug(node->efc, "[%s] %-20s %-20s\n", \ + node->display_name, __func__, efc_sm_event_name(evt)) \ + +#define nport_sm_trace(nport) \ + efc_log_debug(nport->efc, \ + "[%s] %-20s %-20s\n", nport->display_name, __func__, efc_sm_event_name(evt)) \ + +#endif /* __EFC_H__ */ diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c new file mode 100644 index 000000000..da4ac8a4c --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_cmds.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efclib.h" +#include "../libefc_sli/sli4.h" +#include "efc_cmds.h" +#include "efc_sm.h" + +static void +efc_nport_free_resources(struct efc_nport *nport, int evt, void *data) +{ + struct efc *efc = nport->efc; + + /* Clear the nport attached flag */ + nport->attached = false; + + /* Free the service parameters buffer */ + if (nport->dma.virt) { + dma_free_coherent(&efc->pci->dev, nport->dma.size, + nport->dma.virt, nport->dma.phys); + memset(&nport->dma, 0, sizeof(struct efc_dma)); + } + + /* Free the SLI resources */ + sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator); + + efc_nport_cb(efc, evt, nport); +} + +static int +efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status) +{ + struct efc *efc = nport->efc; + struct sli4_mbox_command_header *hdr = + (struct sli4_mbox_command_header *)mqe; + + if (status || le16_to_cpu(hdr->status)) { + efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n", + nport->indicator, status, le16_to_cpu(hdr->status)); + return -EIO; + } + + return 0; +} + +static int +efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg) +{ + struct efc_nport *nport = arg; + int evt = EFC_EVT_NPORT_FREE_OK; + int rc; + + rc = efc_nport_get_mbox_status(nport, mqe, status); + if (rc) + evt = EFC_EVT_NPORT_FREE_FAIL; + + efc_nport_free_resources(nport, evt, mqe); + return rc; +} + +static void +efc_nport_free_unreg_vpi(struct efc_nport *nport) +{ + struct efc *efc = nport->efc; + int rc; + u8 data[SLI4_BMBX_SIZE]; + + rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator, + SLI4_UNREG_TYPE_PORT); + if (rc) { + efc_log_err(efc, "UNREG_VPI format failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data); + return; + } + + rc = efc->tt.issue_mbox_rqst(efc->base, data, + efc_nport_free_unreg_vpi_cb, nport); + if (rc) { + efc_log_err(efc, "UNREG_VPI command failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data); + } +} + +static void +efc_nport_send_evt(struct efc_nport *nport, int evt, void *data) +{ + struct efc *efc = nport->efc; + + /* Now inform the registered callbacks */ + efc_nport_cb(efc, evt, nport); + + /* Set the nport attached flag */ + if (evt == EFC_EVT_NPORT_ATTACH_OK) + nport->attached = true; + + /* If there is a pending free request, then handle it now */ + if (nport->free_req_pending) + efc_nport_free_unreg_vpi(nport); +} + +static int +efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg) +{ + struct efc_nport *nport = arg; + + if (efc_nport_get_mbox_status(nport, mqe, status)) { + efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe); + return -EIO; + } + + efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe); + return 0; +} + +static void +efc_nport_alloc_init_vpi(struct efc_nport *nport) +{ + struct efc *efc = nport->efc; + u8 data[SLI4_BMBX_SIZE]; + int rc; + + /* If there is a pending free request, then handle it now */ + if (nport->free_req_pending) { + efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data); + return; + } + + rc = sli_cmd_init_vpi(efc->sli, data, + nport->indicator, nport->domain->indicator); + if (rc) { + efc_log_err(efc, "INIT_VPI format failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); + return; + } + + rc = efc->tt.issue_mbox_rqst(efc->base, data, + efc_nport_alloc_init_vpi_cb, nport); + if (rc) { + efc_log_err(efc, "INIT_VPI command failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); + } +} + +static int +efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg) +{ + struct efc_nport *nport = arg; + u8 *payload = NULL; + + if (efc_nport_get_mbox_status(nport, mqe, status)) { + efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe); + return -EIO; + } + + payload = nport->dma.virt; + + memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, + sizeof(nport->sli_wwpn)); + memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, + sizeof(nport->sli_wwnn)); + + dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt, + nport->dma.phys); + memset(&nport->dma, 0, sizeof(struct efc_dma)); + efc_nport_alloc_init_vpi(nport); + return 0; +} + +static void +efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport) +{ + u8 data[SLI4_BMBX_SIZE]; + int rc; + + /* Allocate memory for the service parameters */ + nport->dma.size = EFC_SPARAM_DMA_SZ; + nport->dma.virt = dma_alloc_coherent(&efc->pci->dev, + nport->dma.size, &nport->dma.phys, + GFP_KERNEL); + if (!nport->dma.virt) { + efc_log_err(efc, "Failed to allocate DMA memory\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); + return; + } + + rc = sli_cmd_read_sparm64(efc->sli, data, + &nport->dma, nport->indicator); + if (rc) { + efc_log_err(efc, "READ_SPARM64 format failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); + return; + } + + rc = efc->tt.issue_mbox_rqst(efc->base, data, + efc_nport_alloc_read_sparm64_cb, nport); + if (rc) { + efc_log_err(efc, "READ_SPARM64 command failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); + } +} + +int +efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport, + struct efc_domain *domain, u8 *wwpn) +{ + u32 index; + + nport->indicator = U32_MAX; + nport->free_req_pending = false; + + if (wwpn) + memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn)); + + /* + * allocate a VPI object for the port and stores it in the + * indicator field of the port object. + */ + if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI, + &nport->indicator, &index)) { + efc_log_err(efc, "VPI allocation failure\n"); + return -EIO; + } + + if (domain) { + /* + * If the WWPN is NULL, fetch the default + * WWPN and WWNN before initializing the VPI + */ + if (!wwpn) + efc_nport_alloc_read_sparm64(efc, nport); + else + efc_nport_alloc_init_vpi(nport); + } else if (!wwpn) { + /* domain NULL and wwpn non-NULL */ + efc_log_err(efc, "need WWN for physical port\n"); + sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator); + return -EIO; + } + + return 0; +} + +static int +efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe, + void *arg) +{ + struct efc_nport *nport = arg; + + nport->attaching = false; + if (efc_nport_get_mbox_status(nport, mqe, status)) { + efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe); + return -EIO; + } + + efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe); + return 0; +} + +int +efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id) +{ + u8 buf[SLI4_BMBX_SIZE]; + int rc = 0; + + if (!nport) { + efc_log_err(efc, "bad param(s) nport=%p\n", nport); + return -EIO; + } + + nport->fc_id = fc_id; + + /* register previously-allocated VPI with the device */ + rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id, + nport->sli_wwpn, nport->indicator, + nport->domain->indicator, false); + if (rc) { + efc_log_err(efc, "REG_VPI format failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf); + return rc; + } + + rc = efc->tt.issue_mbox_rqst(efc->base, buf, + efc_nport_attach_reg_vpi_cb, nport); + if (rc) { + efc_log_err(efc, "REG_VPI command failure\n"); + efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf); + } else { + nport->attaching = true; + } + + return rc; +} + +int +efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport) +{ + if (!nport) { + efc_log_err(efc, "bad parameter(s) nport=%p\n", nport); + return -EIO; + } + + /* Issue the UNREG_VPI command to free the assigned VPI context */ + if (nport->attached) + efc_nport_free_unreg_vpi(nport); + else if (nport->attaching) + nport->free_req_pending = true; + else + efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL); + + return 0; +} + +static int +efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status) +{ + struct efc *efc = domain->efc; + struct sli4_mbox_command_header *hdr = + (struct sli4_mbox_command_header *)mqe; + + if (status || le16_to_cpu(hdr->status)) { + efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n", + domain->indicator, status, + le16_to_cpu(hdr->status)); + return -EIO; + } + + return 0; +} + +static void +efc_domain_free_resources(struct efc_domain *domain, int evt, void *data) +{ + struct efc *efc = domain->efc; + + /* Free the service parameters buffer */ + if (domain->dma.virt) { + dma_free_coherent(&efc->pci->dev, + domain->dma.size, domain->dma.virt, + domain->dma.phys); + memset(&domain->dma, 0, sizeof(struct efc_dma)); + } + + /* Free the SLI resources */ + sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator); + + efc_domain_cb(efc, evt, domain); +} + +static void +efc_domain_send_nport_evt(struct efc_domain *domain, + int port_evt, int domain_evt, void *data) +{ + struct efc *efc = domain->efc; + + /* Send alloc/attach ok to the physical nport */ + efc_nport_send_evt(domain->nport, port_evt, NULL); + + /* Now inform the registered callbacks */ + efc_domain_cb(efc, domain_evt, domain); +} + +static int +efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, + void *arg) +{ + struct efc_domain *domain = arg; + + if (efc_domain_get_mbox_status(domain, mqe, status)) { + efc_domain_free_resources(domain, + EFC_HW_DOMAIN_ALLOC_FAIL, mqe); + return -EIO; + } + + efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK, + EFC_HW_DOMAIN_ALLOC_OK, mqe); + return 0; +} + +static void +efc_domain_alloc_read_sparm64(struct efc_domain *domain) +{ + struct efc *efc = domain->efc; + u8 data[SLI4_BMBX_SIZE]; + int rc; + + rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0); + if (rc) { + efc_log_err(efc, "READ_SPARM64 format failure\n"); + efc_domain_free_resources(domain, + EFC_HW_DOMAIN_ALLOC_FAIL, data); + return; + } + + rc = efc->tt.issue_mbox_rqst(efc->base, data, + efc_domain_alloc_read_sparm64_cb, domain); + if (rc) { + efc_log_err(efc, "READ_SPARM64 command failure\n"); + efc_domain_free_resources(domain, + EFC_HW_DOMAIN_ALLOC_FAIL, data); + } +} + +static int +efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe, + void *arg) +{ + struct efc_domain *domain = arg; + + if (efc_domain_get_mbox_status(domain, mqe, status)) { + efc_domain_free_resources(domain, + EFC_HW_DOMAIN_ALLOC_FAIL, mqe); + return -EIO; + } + + efc_domain_alloc_read_sparm64(domain); + return 0; +} + +static void +efc_domain_alloc_init_vfi(struct efc_domain *domain) +{ + struct efc *efc = domain->efc; + struct efc_nport *nport = domain->nport; + u8 data[SLI4_BMBX_SIZE]; + int rc; + + /* + * For FC, the HW alread registered an FCFI. + * Copy FCF information into the domain and jump to INIT_VFI. + */ + domain->fcf_indicator = efc->fcfi; + rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator, + domain->fcf_indicator, nport->indicator); + if (rc) { + efc_log_err(efc, "INIT_VFI format failure\n"); + efc_domain_free_resources(domain, + EFC_HW_DOMAIN_ALLOC_FAIL, data); + return; + } + + efc_log_err(efc, "%s issue mbox\n", __func__); + rc = efc->tt.issue_mbox_rqst(efc->base, data, + efc_domain_alloc_init_vfi_cb, domain); + if (rc) { + efc_log_err(efc, "INIT_VFI command failure\n"); + efc_domain_free_resources(domain, + EFC_HW_DOMAIN_ALLOC_FAIL, data); + } +} + +int +efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf) +{ + u32 index; + + if (!domain || !domain->nport) { + efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n", + domain, domain ? domain->nport : NULL); + return -EIO; + } + + /* allocate memory for the service parameters */ + domain->dma.size = EFC_SPARAM_DMA_SZ; + domain->dma.virt = dma_alloc_coherent(&efc->pci->dev, + domain->dma.size, + &domain->dma.phys, GFP_KERNEL); + if (!domain->dma.virt) { + efc_log_err(efc, "Failed to allocate DMA memory\n"); + return -EIO; + } + + domain->fcf = fcf; + domain->fcf_indicator = U32_MAX; + domain->indicator = U32_MAX; + + if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator, + &index)) { + efc_log_err(efc, "VFI allocation failure\n"); + + dma_free_coherent(&efc->pci->dev, + domain->dma.size, domain->dma.virt, + domain->dma.phys); + memset(&domain->dma, 0, sizeof(struct efc_dma)); + + return -EIO; + } + + efc_domain_alloc_init_vfi(domain); + return 0; +} + +static int +efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe, + void *arg) +{ + struct efc_domain *domain = arg; + + if (efc_domain_get_mbox_status(domain, mqe, status)) { + efc_domain_free_resources(domain, + EFC_HW_DOMAIN_ATTACH_FAIL, mqe); + return -EIO; + } + + efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK, + EFC_HW_DOMAIN_ATTACH_OK, mqe); + return 0; +} + +int +efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id) +{ + u8 buf[SLI4_BMBX_SIZE]; + int rc = 0; + + if (!domain) { + efc_log_err(efc, "bad param(s) domain=%p\n", domain); + return -EIO; + } + + domain->nport->fc_id = fc_id; + + rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator, + domain->fcf_indicator, domain->dma, + domain->nport->indicator, domain->nport->sli_wwpn, + domain->nport->fc_id); + if (rc) { + efc_log_err(efc, "REG_VFI format failure\n"); + goto cleanup; + } + + rc = efc->tt.issue_mbox_rqst(efc->base, buf, + efc_domain_attach_reg_vfi_cb, domain); + if (rc) { + efc_log_err(efc, "REG_VFI command failure\n"); + goto cleanup; + } + + return rc; + +cleanup: + efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf); + + return rc; +} + +static int +efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg) +{ + struct efc_domain *domain = arg; + int evt = EFC_HW_DOMAIN_FREE_OK; + int rc; + + rc = efc_domain_get_mbox_status(domain, mqe, status); + if (rc) { + evt = EFC_HW_DOMAIN_FREE_FAIL; + rc = -EIO; + } + + efc_domain_free_resources(domain, evt, mqe); + return rc; +} + +static void +efc_domain_free_unreg_vfi(struct efc_domain *domain) +{ + struct efc *efc = domain->efc; + int rc; + u8 data[SLI4_BMBX_SIZE]; + + rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator, + SLI4_UNREG_TYPE_DOMAIN); + if (rc) { + efc_log_err(efc, "UNREG_VFI format failure\n"); + goto cleanup; + } + + rc = efc->tt.issue_mbox_rqst(efc->base, data, + efc_domain_free_unreg_vfi_cb, domain); + if (rc) { + efc_log_err(efc, "UNREG_VFI command failure\n"); + goto cleanup; + } + + return; + +cleanup: + efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data); +} + +int +efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain) +{ + if (!domain) { + efc_log_err(efc, "bad parameter(s) domain=%p\n", domain); + return -EIO; + } + + efc_domain_free_unreg_vfi(domain); + return 0; +} + +int +efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr, + struct efc_nport *nport) +{ + /* Check for invalid indicator */ + if (rnode->indicator != U32_MAX) { + efc_log_err(efc, + "RPI allocation failure addr=%#x rpi=%#x\n", + fc_addr, rnode->indicator); + return -EIO; + } + + /* NULL SLI port indicates an unallocated remote node */ + rnode->nport = NULL; + + if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI, + &rnode->indicator, &rnode->index)) { + efc_log_err(efc, "RPI allocation failure addr=%#x\n", + fc_addr); + return -EIO; + } + + rnode->fc_id = fc_addr; + rnode->nport = nport; + + return 0; +} + +static int +efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg) +{ + struct efc_remote_node *rnode = arg; + struct sli4_mbox_command_header *hdr = + (struct sli4_mbox_command_header *)mqe; + int evt = 0; + + if (status || le16_to_cpu(hdr->status)) { + efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status, + le16_to_cpu(hdr->status)); + rnode->attached = false; + evt = EFC_EVT_NODE_ATTACH_FAIL; + } else { + rnode->attached = true; + evt = EFC_EVT_NODE_ATTACH_OK; + } + + efc_remote_node_cb(efc, evt, rnode); + + return 0; +} + +int +efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode, + struct efc_dma *sparms) +{ + int rc = -EIO; + u8 buf[SLI4_BMBX_SIZE]; + + if (!rnode || !sparms) { + efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n", + rnode, sparms); + return -EIO; + } + + /* + * If the attach count is non-zero, this RPI has already been reg'd. + * Otherwise, register the RPI + */ + if (rnode->index == U32_MAX) { + efc_log_err(efc, "bad parameter rnode->index invalid\n"); + return -EIO; + } + + /* Update a remote node object with the remote port's service params */ + if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator, + rnode->nport->indicator, rnode->fc_id, sparms, 0, 0)) + rc = efc->tt.issue_mbox_rqst(efc->base, buf, + efc_cmd_node_attach_cb, rnode); + + return rc; +} + +int +efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode) +{ + int rc = 0; + + if (!rnode) { + efc_log_err(efc, "bad parameter rnode=%p\n", rnode); + return -EIO; + } + + if (rnode->nport) { + if (rnode->attached) { + efc_log_err(efc, "rnode is still attached\n"); + return -EIO; + } + if (rnode->indicator != U32_MAX) { + if (sli_resource_free(efc->sli, SLI4_RSRC_RPI, + rnode->indicator)) { + efc_log_err(efc, + "RPI free fail RPI %d addr=%#x\n", + rnode->indicator, rnode->fc_id); + rc = -EIO; + } else { + rnode->indicator = U32_MAX; + rnode->index = U32_MAX; + } + } + } + + return rc; +} + +static int +efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg) +{ + struct efc_remote_node *rnode = arg; + struct sli4_mbox_command_header *hdr = + (struct sli4_mbox_command_header *)mqe; + int evt = EFC_EVT_NODE_FREE_FAIL; + int rc = 0; + + if (status || le16_to_cpu(hdr->status)) { + efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status, + le16_to_cpu(hdr->status)); + + /* + * In certain cases, a non-zero MQE status is OK (all must be + * true): + * - node is attached + * - status is 0x1400 + */ + if (!rnode->attached || + (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG)) + rc = -EIO; + } + + if (!rc) { + rnode->attached = false; + evt = EFC_EVT_NODE_FREE_OK; + } + + efc_remote_node_cb(efc, evt, rnode); + + return rc; +} + +int +efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode) +{ + u8 buf[SLI4_BMBX_SIZE]; + int rc = -EIO; + + if (!rnode) { + efc_log_err(efc, "bad parameter rnode=%p\n", rnode); + return -EIO; + } + + if (rnode->nport) { + if (!rnode->attached) + return -EIO; + + rc = -EIO; + + if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator, + SLI4_RSRC_RPI, U32_MAX)) + rc = efc->tt.issue_mbox_rqst(efc->base, buf, + efc_cmd_node_free_cb, rnode); + + if (rc != 0) { + efc_log_err(efc, "UNREG_RPI failed\n"); + rc = -EIO; + } + } + + return rc; +} diff --git a/drivers/scsi/elx/libefc/efc_cmds.h b/drivers/scsi/elx/libefc/efc_cmds.h new file mode 100644 index 000000000..4d353ab04 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_cmds.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#ifndef __EFC_CMDS_H__ +#define __EFC_CMDS_H__ + +#define EFC_SPARAM_DMA_SZ 112 +int +efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport, + struct efc_domain *domain, u8 *wwpn); +int +efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id); +int +efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport); +int +efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf); +int +efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id); +int +efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain); +int +efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode); +int +efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode); +int +efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode, + struct efc_dma *sparms); +int +efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr, + struct efc_nport *nport); + +#endif /* __EFC_CMDS_H */ diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c new file mode 100644 index 000000000..52be01333 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_device.c @@ -0,0 +1,1602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * device_sm Node State Machine: Remote Device States + */ + +#include "efc.h" +#include "efc_device.h" +#include "efc_fabric.h" + +void +efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id) +{ + int rc = EFC_SCSI_CALL_COMPLETE; + struct efc *efc = node->efc; + + node->ls_acc_oxid = ox_id; + node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI; + + /* + * Wait for backend session registration + * to complete before sending PRLI resp + */ + + if (node->init) { + efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n", + node->display_name, node->wwpn, node->wwnn); + if (node->nport->enable_tgt) + rc = efc->tt.scsi_new_node(efc, node); + } + + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL); + + if (rc == EFC_SCSI_CALL_COMPLETE) + efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL); +} + +static void +__efc_d_common(const char *funcname, struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = NULL; + struct efc *efc = NULL; + + node = ctx->app; + efc = node->efc; + + switch (evt) { + /* Handle shutdown events */ + case EFC_EVT_SHUTDOWN: + efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name, + funcname, efc_sm_event_name(evt)); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_node_transition(node, __efc_d_initiate_shutdown, NULL); + break; + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + efc_log_debug(efc, "[%s] %-20s %-20s\n", + node->display_name, funcname, + efc_sm_event_name(evt)); + node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO; + efc_node_transition(node, __efc_d_initiate_shutdown, NULL); + break; + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name, + funcname, efc_sm_event_name(evt)); + node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO; + efc_node_transition(node, __efc_d_initiate_shutdown, NULL); + break; + + default: + /* call default event handler common to all nodes */ + __efc_node_common(funcname, ctx, evt, arg); + } +} + +static void +__efc_d_wait_del_node(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + /* + * State is entered when a node sends a delete initiator/target call + * to the target-server/initiator-client and needs to wait for that + * work to complete. + */ + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + fallthrough; + + case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: + case EFC_EVT_ALL_CHILD_NODES_FREE: + /* These are expected events. */ + break; + + case EFC_EVT_NODE_DEL_INI_COMPLETE: + case EFC_EVT_NODE_DEL_TGT_COMPLETE: + /* + * node has either been detached or is in the process + * of being detached, + * call common node's initiate cleanup function + */ + efc_node_initiate_cleanup(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_REQ_FAIL: + /* Can happen as ELS IO IO's complete */ + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + break; + + /* ignore shutdown events as we're already in shutdown path */ + case EFC_EVT_SHUTDOWN: + /* have default shutdown event take precedence */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + fallthrough; + + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + break; + case EFC_EVT_DOMAIN_ATTACH_OK: + /* don't care about domain_attach_ok */ + break; + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +static void +__efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + fallthrough; + + case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: + case EFC_EVT_ALL_CHILD_NODES_FREE: + /* These are expected events. */ + break; + + case EFC_EVT_NODE_DEL_INI_COMPLETE: + case EFC_EVT_NODE_DEL_TGT_COMPLETE: + efc_node_transition(node, __efc_d_wait_del_node, NULL); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_REQ_FAIL: + /* Can happen as ELS IO IO's complete */ + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + break; + + /* ignore shutdown events as we're already in shutdown path */ + case EFC_EVT_SHUTDOWN: + /* have default shutdown event take precedence */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + fallthrough; + + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + break; + case EFC_EVT_DOMAIN_ATTACH_OK: + /* don't care about domain_attach_ok */ + break; + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: { + int rc = EFC_SCSI_CALL_COMPLETE; + + /* assume no wait needed */ + node->els_io_enabled = false; + + /* make necessary delete upcall(s) */ + if (node->init && !node->targ) { + efc_log_info(node->efc, + "[%s] delete (initiator) WWPN %s WWNN %s\n", + node->display_name, + node->wwpn, node->wwnn); + efc_node_transition(node, + __efc_d_wait_del_node, + NULL); + if (node->nport->enable_tgt) + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_INITIATOR_DELETED); + + if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0) + efc_node_post_event(node, + EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); + + } else if (node->targ && !node->init) { + efc_log_info(node->efc, + "[%s] delete (target) WWPN %s WWNN %s\n", + node->display_name, + node->wwpn, node->wwnn); + efc_node_transition(node, + __efc_d_wait_del_node, + NULL); + if (node->nport->enable_ini) + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_TARGET_DELETED); + + if (rc == EFC_SCSI_CALL_COMPLETE) + efc_node_post_event(node, + EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); + + } else if (node->init && node->targ) { + efc_log_info(node->efc, + "[%s] delete (I+T) WWPN %s WWNN %s\n", + node->display_name, node->wwpn, node->wwnn); + efc_node_transition(node, __efc_d_wait_del_ini_tgt, + NULL); + if (node->nport->enable_tgt) + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_INITIATOR_DELETED); + + if (rc == EFC_SCSI_CALL_COMPLETE) + efc_node_post_event(node, + EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); + /* assume no wait needed */ + rc = EFC_SCSI_CALL_COMPLETE; + if (node->nport->enable_ini) + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_TARGET_DELETED); + + if (rc == EFC_SCSI_CALL_COMPLETE) + efc_node_post_event(node, + EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); + } + + /* we've initiated the upcalls as needed, now kick off the node + * detach to precipitate the aborting of outstanding exchanges + * associated with said node + * + * Beware: if we've made upcall(s), we've already transitioned + * to a new state by the time we execute this. + * consider doing this before the upcalls? + */ + if (node->attached) { + /* issue hw node free; don't care if succeeds right + * away or sometime later, will check node->attached + * later in shutdown process + */ + rc = efc_cmd_node_detach(efc, &node->rnode); + if (rc < 0) + node_printf(node, + "Failed freeing HW node, rc=%d\n", + rc); + } + + /* if neither initiator nor target, proceed to cleanup */ + if (!node->init && !node->targ) { + /* + * node has either been detached or is in + * the process of being detached, + * call common node's initiate cleanup function + */ + efc_node_initiate_cleanup(node); + } + break; + } + case EFC_EVT_ALL_CHILD_NODES_FREE: + /* Ignore, this can happen if an ELS is + * aborted while in a delay/retry state + */ + break; + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_loop(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_DOMAIN_ATTACH_OK: { + /* send PLOGI automatically if initiator */ + efc_node_init_device(node, true); + break; + } + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +efc_send_ls_acc_after_attach(struct efc_node *node, + struct fc_frame_header *hdr, + enum efc_node_send_ls_acc ls) +{ + u16 ox_id = be16_to_cpu(hdr->fh_ox_id); + + /* Save the OX_ID for sending LS_ACC sometime later */ + WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE); + + node->ls_acc_oxid = ox_id; + node->send_ls_acc = ls; + node->ls_acc_did = ntoh24(hdr->fh_d_id); +} + +void +efc_process_prli_payload(struct efc_node *node, void *prli) +{ + struct { + struct fc_els_prli prli; + struct fc_els_spp sp; + } *pp; + + pp = prli; + node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0; + node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0; +} + +void +__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_CMPL_FAIL: + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_node_transition(node, __efc_d_initiate_shutdown, NULL); + break; + + case EFC_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */ + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + efc_node_transition(node, __efc_d_port_logged_in, NULL); + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_REQ_OK: + case EFC_EVT_SRRS_ELS_REQ_RJT: + case EFC_EVT_SRRS_ELS_REQ_FAIL: + /* LOGO response received, sent shutdown */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + node_printf(node, + "LOGO sent (evt=%s), shutdown node\n", + efc_sm_event_name(evt)); + /* sm: / post explicit logout */ + efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, + NULL); + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +efc_node_init_device(struct efc_node *node, bool send_plogi) +{ + node->send_plogi = send_plogi; + if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) && + (node->rnode.fc_id != FC_FID_DOM_MGR)) { + node->nodedb_state = __efc_d_init; + efc_node_transition(node, __efc_node_paused, NULL); + } else { + efc_node_transition(node, __efc_d_init, NULL); + } +} + +static void +efc_d_check_plogi_topology(struct efc_node *node, u32 d_id) +{ + switch (node->nport->topology) { + case EFC_NPORT_TOPO_P2P: + /* we're not attached and nport is p2p, + * need to attach + */ + efc_domain_attach(node->nport->domain, d_id); + efc_node_transition(node, __efc_d_wait_domain_attach, NULL); + break; + case EFC_NPORT_TOPO_FABRIC: + /* we're not attached and nport is fabric, domain + * attach should have already been requested as part + * of the fabric state machine, wait for it + */ + efc_node_transition(node, __efc_d_wait_domain_attach, NULL); + break; + case EFC_NPORT_TOPO_UNKNOWN: + /* Two possibilities: + * 1. received a PLOGI before our FLOGI has completed + * (possible since completion comes in on another + * CQ), thus we don't know what we're connected to + * yet; transition to a state to wait for the + * fabric node to tell us; + * 2. PLOGI received before link went down and we + * haven't performed domain attach yet. + * Note: we cannot distinguish between 1. and 2. + * so have to assume PLOGI + * was received after link back up. + */ + node_printf(node, "received PLOGI, unknown topology did=0x%x\n", + d_id); + efc_node_transition(node, __efc_d_wait_topology_notify, NULL); + break; + default: + node_printf(node, "received PLOGI, unexpected topology %d\n", + node->nport->topology); + } +} + +void +__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + /* + * This state is entered when a node is instantiated, + * either having been discovered from a name services query, + * or having received a PLOGI/FLOGI. + */ + switch (evt) { + case EFC_EVT_ENTER: + if (!node->send_plogi) + break; + /* only send if we have initiator capability, + * and domain is attached + */ + if (node->nport->enable_ini && + node->nport->domain->attached) { + efc_send_plogi(node); + + efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL); + } else { + node_printf(node, "not sending plogi nport.ini=%d,", + node->nport->enable_ini); + node_printf(node, "domain attached=%d\n", + node->nport->domain->attached); + } + break; + case EFC_EVT_PLOGI_RCVD: { + /* T, or I+T */ + struct fc_frame_header *hdr = cbdata->header->dma.virt; + int rc; + + efc_node_save_sparms(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PLOGI); + + /* domain not attached; several possibilities: */ + if (!node->nport->domain->attached) { + efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id)); + break; + } + + /* domain already attached */ + rc = efc_node_attach(node); + efc_node_transition(node, __efc_d_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); + + break; + } + + case EFC_EVT_FDISC_RCVD: { + __efc_d_common(__func__, ctx, evt, arg); + break; + } + + case EFC_EVT_FLOGI_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + u32 d_id = ntoh24(hdr->fh_d_id); + + /* sm: / save sparams, send FLOGI acc */ + memcpy(node->nport->domain->flogi_service_params, + cbdata->payload->dma.virt, + sizeof(struct fc_els_flogi)); + + /* send FC LS_ACC response, override s_id */ + efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P); + + efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id); + + if (efc_p2p_setup(node->nport)) { + node_printf(node, "p2p failed, shutting down node\n"); + efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); + break; + } + + efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL); + break; + } + + case EFC_EVT_LOGO_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + if (!node->nport->domain->attached) { + /* most likely a frame left over from before a link + * down; drop and + * shut node down w/ "explicit logout" so pending + * frames are processed + */ + node_printf(node, "%s domain not attached, dropping\n", + efc_sm_event_name(evt)); + efc_node_post_event(node, + EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); + break; + } + + efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); + efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); + break; + } + + case EFC_EVT_PRLI_RCVD: + case EFC_EVT_PRLO_RCVD: + case EFC_EVT_PDISC_RCVD: + case EFC_EVT_ADISC_RCVD: + case EFC_EVT_RSCN_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + if (!node->nport->domain->attached) { + /* most likely a frame left over from before a link + * down; drop and shut node down w/ "explicit logout" + * so pending frames are processed + */ + node_printf(node, "%s domain not attached, dropping\n", + efc_sm_event_name(evt)); + + efc_node_post_event(node, + EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, + NULL); + break; + } + node_printf(node, "%s received, sending reject\n", + efc_sm_event_name(evt)); + + efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), + ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0); + + break; + } + + case EFC_EVT_FCP_CMD_RCVD: { + /* note: problem, we're now expecting an ELS REQ completion + * from both the LOGO and PLOGI + */ + if (!node->nport->domain->attached) { + /* most likely a frame left over from before a + * link down; drop and + * shut node down w/ "explicit logout" so pending + * frames are processed + */ + node_printf(node, "%s domain not attached, dropping\n", + efc_sm_event_name(evt)); + efc_node_post_event(node, + EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, + NULL); + break; + } + + /* Send LOGO */ + node_printf(node, "FCP_CMND received, send LOGO\n"); + if (efc_send_logo(node)) { + /* + * failed to send LOGO, go ahead and cleanup node + * anyways + */ + node_printf(node, "Failed to send LOGO\n"); + efc_node_post_event(node, + EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, + NULL); + } else { + /* sent LOGO, wait for response */ + efc_node_transition(node, + __efc_d_wait_logo_rsp, NULL); + } + break; + } + case EFC_EVT_DOMAIN_ATTACH_OK: + /* don't care about domain_attach_ok */ + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + int rc; + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_PLOGI_RCVD: { + /* T, or I+T */ + /* received PLOGI with svc parms, go ahead and attach node + * when PLOGI that was sent ultimately completes, it'll be a + * no-op + * + * If there is an outstanding PLOGI sent, can we set a flag + * to indicate that we don't want to retry it if it times out? + */ + efc_node_save_sparms(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PLOGI); + /* sm: domain->attached / efc_node_attach */ + rc = efc_node_attach(node); + efc_node_transition(node, __efc_d_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, + EFC_EVT_NODE_ATTACH_FAIL, NULL); + + break; + } + + case EFC_EVT_PRLI_RCVD: + /* I, or I+T */ + /* sent PLOGI and before completion was seen, received the + * PRLI from the remote node (WCQEs and RCQEs come in on + * different queues and order of processing cannot be assumed) + * Save OXID so PRLI can be sent after the attach and continue + * to wait for PLOGI response + */ + efc_process_prli_payload(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PRLI); + efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli, + NULL); + break; + + case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */ + case EFC_EVT_PRLO_RCVD: + case EFC_EVT_PDISC_RCVD: + case EFC_EVT_FDISC_RCVD: + case EFC_EVT_ADISC_RCVD: + case EFC_EVT_RSCN_RCVD: + case EFC_EVT_SCR_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + node_printf(node, "%s received, sending reject\n", + efc_sm_event_name(evt)); + + efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), + ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0); + + break; + } + + case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ + /* Completion from PLOGI sent */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / save sparams, efc_node_attach */ + efc_node_save_sparms(node, cbdata->els_rsp.virt); + rc = efc_node_attach(node); + efc_node_transition(node, __efc_d_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, + EFC_EVT_NODE_ATTACH_FAIL, NULL); + + break; + + case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ + /* PLOGI failed, shutdown the node */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); + break; + + case EFC_EVT_SRRS_ELS_REQ_RJT: + /* Our PLOGI was rejected, this is ok in some cases */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + break; + + case EFC_EVT_FCP_CMD_RCVD: { + /* not logged in yet and outstanding PLOGI so don't send LOGO, + * just drop + */ + node_printf(node, "FCP_CMND received, drop\n"); + break; + } + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + int rc; + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + /* + * Since we've received a PRLI, we have a port login and will + * just need to wait for the PLOGI response to do the node + * attach and then we can send the LS_ACC for the PRLI. If, + * during this time, we receive FCP_CMNDs (which is possible + * since we've already sent a PRLI and our peer may have + * accepted). At this time, we are not waiting on any other + * unsolicited frames to continue with the login process. Thus, + * it will not hurt to hold frames here. + */ + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ + /* Completion from PLOGI sent */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / save sparams, efc_node_attach */ + efc_node_save_sparms(node, cbdata->els_rsp.virt); + rc = efc_node_attach(node); + efc_node_transition(node, __efc_d_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, + NULL); + + break; + + case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ + case EFC_EVT_SRRS_ELS_REQ_RJT: + /* PLOGI failed, shutdown the node */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + int rc; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_DOMAIN_ATTACH_OK: + WARN_ON(!node->nport->domain->attached); + /* sm: / efc_node_attach */ + rc = efc_node_attach(node); + efc_node_transition(node, __efc_d_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, + NULL); + + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + int rc; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { + enum efc_nport_topology *topology = arg; + + WARN_ON(node->nport->domain->attached); + + WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); + + node_printf(node, "topology notification, topology=%d\n", + *topology); + + /* At the time the PLOGI was received, the topology was unknown, + * so we didn't know which node would perform the domain attach: + * 1. The node from which the PLOGI was sent (p2p) or + * 2. The node to which the FLOGI was sent (fabric). + */ + if (*topology == EFC_NPORT_TOPO_P2P) { + /* if this is p2p, need to attach to the domain using + * the d_id from the PLOGI received + */ + efc_domain_attach(node->nport->domain, + node->ls_acc_did); + } + /* else, if this is fabric, the domain attach + * should be performed by the fabric node (node sending FLOGI); + * just wait for attach to complete + */ + + efc_node_transition(node, __efc_d_wait_domain_attach, NULL); + break; + } + case EFC_EVT_DOMAIN_ATTACH_OK: + WARN_ON(!node->nport->domain->attached); + node_printf(node, "domain attach ok\n"); + /* sm: / efc_node_attach */ + rc = efc_node_attach(node); + efc_node_transition(node, __efc_d_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, + EFC_EVT_NODE_ATTACH_FAIL, NULL); + + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_node_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_NODE_ATTACH_OK: + node->attached = true; + switch (node->send_ls_acc) { + case EFC_NODE_SEND_LS_ACC_PLOGI: { + /* sm: send_plogi_acc is set / send PLOGI acc */ + /* Normal case for T, or I+T */ + efc_send_plogi_acc(node, node->ls_acc_oxid); + efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl, + NULL); + node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; + node->ls_acc_io = NULL; + break; + } + case EFC_NODE_SEND_LS_ACC_PRLI: { + efc_d_send_prli_rsp(node, node->ls_acc_oxid); + node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; + node->ls_acc_io = NULL; + break; + } + case EFC_NODE_SEND_LS_ACC_NONE: + default: + /* Normal case for I */ + /* sm: send_plogi_acc is not set / send PLOGI acc */ + efc_node_transition(node, + __efc_d_port_logged_in, NULL); + break; + } + break; + + case EFC_EVT_NODE_ATTACH_FAIL: + /* node attach failed, shutdown the node */ + node->attached = false; + node_printf(node, "node attach failed\n"); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_node_transition(node, __efc_d_initiate_shutdown, NULL); + break; + + /* Handle shutdown events */ + case EFC_EVT_SHUTDOWN: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_node_transition(node, __efc_d_wait_attach_evt_shutdown, + NULL); + break; + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO; + efc_node_transition(node, __efc_d_wait_attach_evt_shutdown, + NULL); + break; + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO; + efc_node_transition(node, + __efc_d_wait_attach_evt_shutdown, NULL); + break; + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + /* wait for any of these attach events and then shutdown */ + case EFC_EVT_NODE_ATTACH_OK: + node->attached = true; + node_printf(node, "Attach evt=%s, proceed to shutdown\n", + efc_sm_event_name(evt)); + efc_node_transition(node, __efc_d_initiate_shutdown, NULL); + break; + + case EFC_EVT_NODE_ATTACH_FAIL: + /* node attach failed, shutdown the node */ + node->attached = false; + node_printf(node, "Attach evt=%s, proceed to shutdown\n", + efc_sm_event_name(evt)); + efc_node_transition(node, __efc_d_initiate_shutdown, NULL); + break; + + /* ignore shutdown events as we're already in shutdown path */ + case EFC_EVT_SHUTDOWN: + /* have default shutdown event take precedence */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + fallthrough; + + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_port_logged_in(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + /* Normal case for I or I+T */ + if (node->nport->enable_ini && + !(node->rnode.fc_id != FC_FID_DOM_MGR)) { + /* sm: if enable_ini / send PRLI */ + efc_send_prli(node); + /* can now expect ELS_REQ_OK/FAIL/RJT */ + } + break; + + case EFC_EVT_FCP_CMD_RCVD: { + break; + } + + case EFC_EVT_PRLI_RCVD: { + /* Normal case for T or I+T */ + struct fc_frame_header *hdr = cbdata->header->dma.virt; + struct { + struct fc_els_prli prli; + struct fc_els_spp sp; + } *pp; + + pp = cbdata->payload->dma.virt; + if (pp->sp.spp_type != FC_TYPE_FCP) { + /*Only FCP is supported*/ + efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), + ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0); + break; + } + + efc_process_prli_payload(node, cbdata->payload->dma.virt); + efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id)); + break; + } + + case EFC_EVT_NODE_SESS_REG_OK: + if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI) + efc_send_prli_acc(node, node->ls_acc_oxid); + + node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; + efc_node_transition(node, __efc_d_device_ready, NULL); + break; + + case EFC_EVT_NODE_SESS_REG_FAIL: + efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB, + ELS_EXPL_UNSUPR, 0); + node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; + break; + + case EFC_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */ + /* Normal case for I or I+T */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / process PRLI payload */ + efc_process_prli_payload(node, cbdata->els_rsp.virt); + efc_node_transition(node, __efc_d_device_ready, NULL); + break; + } + + case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */ + /* I, I+T, assume some link failure, shutdown node */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); + break; + } + + case EFC_EVT_SRRS_ELS_REQ_RJT: { + /* PRLI rejected by remote + * Normal for I, I+T (connected to an I) + * Node doesn't want to be a target, stay here and wait for a + * PRLI from the remote node + * if it really wants to connect to us as target + */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + break; + } + + case EFC_EVT_SRRS_ELS_CMPL_OK: { + /* Normal T, I+T, target-server rejected the process login */ + /* This would be received only in the case where we sent + * LS_RJT for the PRLI, so + * do nothing. (note: as T only we could shutdown the node) + */ + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + break; + } + + case EFC_EVT_PLOGI_RCVD: { + /*sm: / save sparams, set send_plogi_acc, + *post implicit logout + * Save plogi parameters + */ + efc_node_save_sparms(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PLOGI); + + /* Restart node attach with new service parameters, + * and send ACC + */ + efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, + NULL); + break; + } + + case EFC_EVT_LOGO_RCVD: { + /* I, T, I+T */ + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + node_printf(node, "%s received attached=%d\n", + efc_sm_event_name(evt), + node->attached); + /* sm: / send LOGO acc */ + efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); + efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); + break; + } + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_CMPL_OK: + case EFC_EVT_SRRS_ELS_CMPL_FAIL: + /* sm: / post explicit logout */ + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + efc_node_post_event(node, + EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); + break; + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_device_ready(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + if (evt != EFC_EVT_FCP_CMD_RCVD) + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + node->fcp_enabled = true; + if (node->targ) { + efc_log_info(efc, + "[%s] found (target) WWPN %s WWNN %s\n", + node->display_name, + node->wwpn, node->wwnn); + if (node->nport->enable_ini) + efc->tt.scsi_new_node(efc, node); + } + break; + + case EFC_EVT_EXIT: + node->fcp_enabled = false; + break; + + case EFC_EVT_PLOGI_RCVD: { + /* sm: / save sparams, set send_plogi_acc, post implicit + * logout + * Save plogi parameters + */ + efc_node_save_sparms(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PLOGI); + + /* + * Restart node attach with new service parameters, + * and send ACC + */ + efc_node_post_event(node, + EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); + break; + } + + case EFC_EVT_PRLI_RCVD: { + /* T, I+T: remote initiator is slow to get started */ + struct fc_frame_header *hdr = cbdata->header->dma.virt; + struct { + struct fc_els_prli prli; + struct fc_els_spp sp; + } *pp; + + pp = cbdata->payload->dma.virt; + if (pp->sp.spp_type != FC_TYPE_FCP) { + /*Only FCP is supported*/ + efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), + ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0); + break; + } + + efc_process_prli_payload(node, cbdata->payload->dma.virt); + efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id)); + break; + } + + case EFC_EVT_PRLO_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + /* sm: / send PRLO acc */ + efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id)); + /* need implicit logout? */ + break; + } + + case EFC_EVT_LOGO_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + node_printf(node, "%s received attached=%d\n", + efc_sm_event_name(evt), node->attached); + /* sm: / send LOGO acc */ + efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); + efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); + break; + } + + case EFC_EVT_ADISC_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + /* sm: / send ADISC acc */ + efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id)); + break; + } + + case EFC_EVT_ABTS_RCVD: + /* sm: / process ABTS */ + efc_log_err(efc, "Unexpected event:%s\n", + efc_sm_event_name(evt)); + break; + + case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: + break; + + case EFC_EVT_NODE_REFOUND: + break; + + case EFC_EVT_NODE_MISSING: + if (node->nport->enable_rscn) + efc_node_transition(node, __efc_d_device_gone, NULL); + + break; + + case EFC_EVT_SRRS_ELS_CMPL_OK: + /* T, or I+T, PRLI accept completed ok */ + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + break; + + case EFC_EVT_SRRS_ELS_CMPL_FAIL: + /* T, or I+T, PRLI accept failed to complete */ + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + node_printf(node, "Failed to send PRLI LS_ACC\n"); + break; + + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_device_gone(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: { + int rc = EFC_SCSI_CALL_COMPLETE; + int rc_2 = EFC_SCSI_CALL_COMPLETE; + static const char * const labels[] = { + "none", "initiator", "target", "initiator+target" + }; + + efc_log_info(efc, "[%s] missing (%s) WWPN %s WWNN %s\n", + node->display_name, + labels[(node->targ << 1) | (node->init)], + node->wwpn, node->wwnn); + + switch (efc_node_get_enable(node)) { + case EFC_NODE_ENABLE_T_TO_T: + case EFC_NODE_ENABLE_I_TO_T: + case EFC_NODE_ENABLE_IT_TO_T: + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_TARGET_MISSING); + break; + + case EFC_NODE_ENABLE_T_TO_I: + case EFC_NODE_ENABLE_I_TO_I: + case EFC_NODE_ENABLE_IT_TO_I: + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_INITIATOR_MISSING); + break; + + case EFC_NODE_ENABLE_T_TO_IT: + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_INITIATOR_MISSING); + break; + + case EFC_NODE_ENABLE_I_TO_IT: + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_TARGET_MISSING); + break; + + case EFC_NODE_ENABLE_IT_TO_IT: + rc = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_INITIATOR_MISSING); + rc_2 = efc->tt.scsi_del_node(efc, node, + EFC_SCSI_TARGET_MISSING); + break; + + default: + rc = EFC_SCSI_CALL_COMPLETE; + break; + } + + if (rc == EFC_SCSI_CALL_COMPLETE && + rc_2 == EFC_SCSI_CALL_COMPLETE) + efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); + + break; + } + case EFC_EVT_NODE_REFOUND: + /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */ + + /* reauthenticate with PLOGI/PRLI */ + /* efc_node_transition(node, __efc_d_discovered, NULL); */ + + /* reauthenticate with ADISC */ + /* sm: / send ADISC */ + efc_send_adisc(node); + efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL); + break; + + case EFC_EVT_PLOGI_RCVD: { + /* sm: / save sparams, set send_plogi_acc, post implicit + * logout + * Save plogi parameters + */ + efc_node_save_sparms(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PLOGI); + + /* + * Restart node attach with new service parameters, and send + * ACC + */ + efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, + NULL); + break; + } + + case EFC_EVT_FCP_CMD_RCVD: { + /* most likely a stale frame (received prior to link down), + * if attempt to send LOGO, will probably timeout and eat + * up 20s; thus, drop FCP_CMND + */ + node_printf(node, "FCP_CMND received, drop\n"); + break; + } + case EFC_EVT_LOGO_RCVD: { + /* I, T, I+T */ + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + node_printf(node, "%s received attached=%d\n", + efc_sm_event_name(evt), node->attached); + /* sm: / send LOGO acc */ + efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); + efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); + break; + } + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} + +void +__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: + if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_node_transition(node, __efc_d_device_ready, NULL); + break; + + case EFC_EVT_SRRS_ELS_REQ_RJT: + /* received an LS_RJT, in this case, send shutdown + * (explicit logo) event which will unregister the node, + * and start over with PLOGI + */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC, + __efc_d_common, __func__)) + return; + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / post explicit logout */ + efc_node_post_event(node, + EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, + NULL); + break; + + case EFC_EVT_LOGO_RCVD: { + /* In this case, we have the equivalent of an LS_RJT for + * the ADISC, so we need to abort the ADISC, and re-login + * with PLOGI + */ + /* sm: / request abort, send LOGO acc */ + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + node_printf(node, "%s received attached=%d\n", + efc_sm_event_name(evt), node->attached); + + efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); + efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); + break; + } + default: + __efc_d_common(__func__, ctx, evt, arg); + } +} diff --git a/drivers/scsi/elx/libefc/efc_device.h b/drivers/scsi/elx/libefc/efc_device.h new file mode 100644 index 000000000..3cf1d8c66 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_device.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * Node state machine functions for remote device node sm + */ + +#ifndef __EFCT_DEVICE_H__ +#define __EFCT_DEVICE_H__ +void +efc_node_init_device(struct efc_node *node, bool send_plogi); +void +efc_process_prli_payload(struct efc_node *node, + void *prli); +void +efc_d_send_prli_rsp(struct efc_node *node, uint16_t ox_id); +void +efc_send_ls_acc_after_attach(struct efc_node *node, + struct fc_frame_header *hdr, + enum efc_node_send_ls_acc ls); +void +__efc_d_wait_loop(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg); +void +__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_node_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_port_logged_in(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_device_ready(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_device_gone(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + +#endif /* __EFCT_DEVICE_H__ */ diff --git a/drivers/scsi/elx/libefc/efc_domain.c b/drivers/scsi/elx/libefc/efc_domain.c new file mode 100644 index 000000000..ca9d7ff2c --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_domain.c @@ -0,0 +1,1088 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * domain_sm Domain State Machine: States + */ + +#include "efc.h" + +int +efc_domain_cb(void *arg, int event, void *data) +{ + struct efc *efc = arg; + struct efc_domain *domain = NULL; + int rc = 0; + unsigned long flags = 0; + + if (event != EFC_HW_DOMAIN_FOUND) + domain = data; + + /* Accept domain callback events from the user driver */ + spin_lock_irqsave(&efc->lock, flags); + switch (event) { + case EFC_HW_DOMAIN_FOUND: { + u64 fcf_wwn = 0; + struct efc_domain_record *drec = data; + + /* extract the fcf_wwn */ + fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn)); + + efc_log_debug(efc, "Domain found: wwn %016llX\n", fcf_wwn); + + /* lookup domain, or allocate a new one */ + domain = efc->domain; + if (!domain) { + domain = efc_domain_alloc(efc, fcf_wwn); + if (!domain) { + efc_log_err(efc, "efc_domain_alloc() failed\n"); + rc = -1; + break; + } + efc_sm_transition(&domain->drvsm, __efc_domain_init, + NULL); + } + efc_domain_post_event(domain, EFC_EVT_DOMAIN_FOUND, drec); + break; + } + + case EFC_HW_DOMAIN_LOST: + domain_trace(domain, "EFC_HW_DOMAIN_LOST:\n"); + efc->hold_frames = true; + efc_domain_post_event(domain, EFC_EVT_DOMAIN_LOST, NULL); + break; + + case EFC_HW_DOMAIN_ALLOC_OK: + domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_OK:\n"); + efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_OK, NULL); + break; + + case EFC_HW_DOMAIN_ALLOC_FAIL: + domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_FAIL:\n"); + efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_FAIL, + NULL); + break; + + case EFC_HW_DOMAIN_ATTACH_OK: + domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_OK:\n"); + efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_OK, NULL); + break; + + case EFC_HW_DOMAIN_ATTACH_FAIL: + domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_FAIL:\n"); + efc_domain_post_event(domain, + EFC_EVT_DOMAIN_ATTACH_FAIL, NULL); + break; + + case EFC_HW_DOMAIN_FREE_OK: + domain_trace(domain, "EFC_HW_DOMAIN_FREE_OK:\n"); + efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_OK, NULL); + break; + + case EFC_HW_DOMAIN_FREE_FAIL: + domain_trace(domain, "EFC_HW_DOMAIN_FREE_FAIL:\n"); + efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_FAIL, NULL); + break; + + default: + efc_log_warn(efc, "unsupported event %#x\n", event); + } + spin_unlock_irqrestore(&efc->lock, flags); + + if (efc->domain && domain->req_accept_frames) { + domain->req_accept_frames = false; + efc->hold_frames = false; + } + + return rc; +} + +static void +_efc_domain_free(struct kref *arg) +{ + struct efc_domain *domain = container_of(arg, struct efc_domain, ref); + struct efc *efc = domain->efc; + + if (efc->domain_free_cb) + (*efc->domain_free_cb)(efc, efc->domain_free_cb_arg); + + kfree(domain); +} + +void +efc_domain_free(struct efc_domain *domain) +{ + struct efc *efc; + + efc = domain->efc; + + /* Hold frames to clear the domain pointer from the xport lookup */ + efc->hold_frames = false; + + efc_log_debug(efc, "Domain free: wwn %016llX\n", domain->fcf_wwn); + + xa_destroy(&domain->lookup); + efc->domain = NULL; + kref_put(&domain->ref, domain->release); +} + +struct efc_domain * +efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn) +{ + struct efc_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_ATOMIC); + if (!domain) + return NULL; + + domain->efc = efc; + domain->drvsm.app = domain; + + /* initialize refcount */ + kref_init(&domain->ref); + domain->release = _efc_domain_free; + + xa_init(&domain->lookup); + + INIT_LIST_HEAD(&domain->nport_list); + efc->domain = domain; + domain->fcf_wwn = fcf_wwn; + efc_log_debug(efc, "Domain allocated: wwn %016llX\n", domain->fcf_wwn); + + return domain; +} + +void +efc_register_domain_free_cb(struct efc *efc, + void (*callback)(struct efc *efc, void *arg), + void *arg) +{ + /* Register a callback to be called when the domain is freed */ + efc->domain_free_cb = callback; + efc->domain_free_cb_arg = arg; + if (!efc->domain && callback) + (*callback)(efc, arg); +} + +static void +__efc_domain_common(const char *funcname, struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_domain *domain = ctx->app; + + switch (evt) { + case EFC_EVT_ENTER: + case EFC_EVT_REENTER: + case EFC_EVT_EXIT: + case EFC_EVT_ALL_CHILD_NODES_FREE: + /* + * this can arise if an FLOGI fails on the NPORT, + * and the NPORT is shutdown + */ + break; + default: + efc_log_warn(domain->efc, "%-20s %-20s not handled\n", + funcname, efc_sm_event_name(evt)); + } +} + +static void +__efc_domain_common_shutdown(const char *funcname, struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_domain *domain = ctx->app; + + switch (evt) { + case EFC_EVT_ENTER: + case EFC_EVT_REENTER: + case EFC_EVT_EXIT: + break; + case EFC_EVT_DOMAIN_FOUND: + /* save drec, mark domain_found_pending */ + memcpy(&domain->pending_drec, arg, + sizeof(domain->pending_drec)); + domain->domain_found_pending = true; + break; + case EFC_EVT_DOMAIN_LOST: + /* unmark domain_found_pending */ + domain->domain_found_pending = false; + break; + + default: + efc_log_warn(domain->efc, "%-20s %-20s not handled\n", + funcname, efc_sm_event_name(evt)); + } +} + +#define std_domain_state_decl(...)\ + struct efc_domain *domain = NULL;\ + struct efc *efc = NULL;\ + \ + WARN_ON(!ctx || !ctx->app);\ + domain = ctx->app;\ + WARN_ON(!domain->efc);\ + efc = domain->efc + +void +__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + switch (evt) { + case EFC_EVT_ENTER: + domain->attached = false; + break; + + case EFC_EVT_DOMAIN_FOUND: { + u32 i; + struct efc_domain_record *drec = arg; + struct efc_nport *nport; + + u64 my_wwnn = efc->req_wwnn; + u64 my_wwpn = efc->req_wwpn; + __be64 bewwpn; + + if (my_wwpn == 0 || my_wwnn == 0) { + efc_log_debug(efc, "using default hardware WWN config\n"); + my_wwpn = efc->def_wwpn; + my_wwnn = efc->def_wwnn; + } + + efc_log_debug(efc, "Create nport WWPN %016llX WWNN %016llX\n", + my_wwpn, my_wwnn); + + /* Allocate a nport and transition to __efc_nport_allocated */ + nport = efc_nport_alloc(domain, my_wwpn, my_wwnn, U32_MAX, + efc->enable_ini, efc->enable_tgt); + + if (!nport) { + efc_log_err(efc, "efc_nport_alloc() failed\n"); + break; + } + efc_sm_transition(&nport->sm, __efc_nport_allocated, NULL); + + bewwpn = cpu_to_be64(nport->wwpn); + + /* allocate struct efc_nport object for local port + * Note: drec->fc_id is ALPA from read_topology only if loop + */ + if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) { + efc_log_err(efc, "Can't allocate port\n"); + efc_nport_free(nport); + break; + } + + domain->is_loop = drec->is_loop; + + /* + * If the loop position map includes ALPA == 0, + * then we are in a public loop (NL_PORT) + * Note that the first element of the loopmap[] + * contains the count of elements, and if + * ALPA == 0 is present, it will occupy the first + * location after the count. + */ + domain->is_nlport = drec->map.loop[1] == 0x00; + + if (!domain->is_loop) { + /* Initiate HW domain alloc */ + if (efc_cmd_domain_alloc(efc, domain, drec->index)) { + efc_log_err(efc, + "Failed to initiate HW domain allocation\n"); + break; + } + efc_sm_transition(ctx, __efc_domain_wait_alloc, arg); + break; + } + + efc_log_debug(efc, "%s fc_id=%#x speed=%d\n", + drec->is_loop ? + (domain->is_nlport ? + "public-loop" : "loop") : "other", + drec->fc_id, drec->speed); + + nport->fc_id = drec->fc_id; + nport->topology = EFC_NPORT_TOPO_FC_AL; + snprintf(nport->display_name, sizeof(nport->display_name), + "s%06x", drec->fc_id); + + if (efc->enable_ini) { + u32 count = drec->map.loop[0]; + + efc_log_debug(efc, "%d position map entries\n", + count); + for (i = 1; i <= count; i++) { + if (drec->map.loop[i] != drec->fc_id) { + struct efc_node *node; + + efc_log_debug(efc, "%#x -> %#x\n", + drec->fc_id, + drec->map.loop[i]); + node = efc_node_alloc(nport, + drec->map.loop[i], + false, true); + if (!node) { + efc_log_err(efc, + "efc_node_alloc() failed\n"); + break; + } + efc_node_transition(node, + __efc_d_wait_loop, + NULL); + } + } + } + + /* Initiate HW domain alloc */ + if (efc_cmd_domain_alloc(efc, domain, drec->index)) { + efc_log_err(efc, + "Failed to initiate HW domain allocation\n"); + break; + } + efc_sm_transition(ctx, __efc_domain_wait_alloc, arg); + break; + } + default: + __efc_domain_common(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_wait_alloc(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + switch (evt) { + case EFC_EVT_DOMAIN_ALLOC_OK: { + struct fc_els_flogi *sp; + struct efc_nport *nport; + + nport = domain->nport; + if (WARN_ON(!nport)) + return; + + sp = (struct fc_els_flogi *)nport->service_params; + + /* Save the domain service parameters */ + memcpy(domain->service_params + 4, domain->dma.virt, + sizeof(struct fc_els_flogi) - 4); + memcpy(nport->service_params + 4, domain->dma.virt, + sizeof(struct fc_els_flogi) - 4); + + /* + * Update the nport's service parameters, + * user might have specified non-default names + */ + sp->fl_wwpn = cpu_to_be64(nport->wwpn); + sp->fl_wwnn = cpu_to_be64(nport->wwnn); + + /* + * Take the loop topology path, + * unless we are an NL_PORT (public loop) + */ + if (domain->is_loop && !domain->is_nlport) { + /* + * For loop, we already have our FC ID + * and don't need fabric login. + * Transition to the allocated state and + * post an event to attach to + * the domain. Note that this breaks the + * normal action/transition + * pattern here to avoid a race with the + * domain attach callback. + */ + /* sm: is_loop / domain_attach */ + efc_sm_transition(ctx, __efc_domain_allocated, NULL); + __efc_domain_attach_internal(domain, nport->fc_id); + break; + } + { + struct efc_node *node; + + /* alloc fabric node, send FLOGI */ + node = efc_node_find(nport, FC_FID_FLOGI); + if (node) { + efc_log_err(efc, + "Fabric Controller node already exists\n"); + break; + } + node = efc_node_alloc(nport, FC_FID_FLOGI, + false, false); + if (!node) { + efc_log_err(efc, + "Error: efc_node_alloc() failed\n"); + } else { + efc_node_transition(node, + __efc_fabric_init, NULL); + } + /* Accept frames */ + domain->req_accept_frames = true; + } + /* sm: / start fabric logins */ + efc_sm_transition(ctx, __efc_domain_allocated, NULL); + break; + } + + case EFC_EVT_DOMAIN_ALLOC_FAIL: + efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;", + efc_sm_event_name(evt)); + efc_log_err(efc, "shutting down domain\n"); + domain->req_domain_free = true; + break; + + case EFC_EVT_DOMAIN_FOUND: + /* Should not happen */ + break; + + case EFC_EVT_DOMAIN_LOST: + efc_log_debug(efc, + "%s received while waiting for hw_domain_alloc()\n", + efc_sm_event_name(evt)); + efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL); + break; + + default: + __efc_domain_common(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_allocated(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + switch (evt) { + case EFC_EVT_DOMAIN_REQ_ATTACH: { + int rc = 0; + u32 fc_id; + + if (WARN_ON(!arg)) + return; + + fc_id = *((u32 *)arg); + efc_log_debug(efc, "Requesting hw domain attach fc_id x%x\n", + fc_id); + /* Update nport lookup */ + rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport, + GFP_ATOMIC)); + if (rc) { + efc_log_err(efc, "Sport lookup store failed: %d\n", rc); + return; + } + + /* Update display name for the nport */ + efc_node_fcid_display(fc_id, domain->nport->display_name, + sizeof(domain->nport->display_name)); + + /* Issue domain attach call */ + rc = efc_cmd_domain_attach(efc, domain, fc_id); + if (rc) { + efc_log_err(efc, "efc_hw_domain_attach failed: %d\n", + rc); + return; + } + /* sm: / domain_attach */ + efc_sm_transition(ctx, __efc_domain_wait_attach, NULL); + break; + } + + case EFC_EVT_DOMAIN_FOUND: + /* Should not happen */ + efc_log_err(efc, "%s: evt: %d should not happen\n", + __func__, evt); + break; + + case EFC_EVT_DOMAIN_LOST: { + efc_log_debug(efc, + "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n", + efc_sm_event_name(evt)); + if (!list_empty(&domain->nport_list)) { + /* + * if there are nports, transition to + * wait state and send shutdown to each + * nport + */ + struct efc_nport *nport = NULL, *nport_next = NULL; + + efc_sm_transition(ctx, __efc_domain_wait_nports_free, + NULL); + list_for_each_entry_safe(nport, nport_next, + &domain->nport_list, + list_entry) { + efc_sm_post_event(&nport->sm, + EFC_EVT_SHUTDOWN, NULL); + } + } else { + /* no nports exist, free domain */ + efc_sm_transition(ctx, __efc_domain_wait_shutdown, + NULL); + if (efc_cmd_domain_free(efc, domain)) + efc_log_err(efc, "hw_domain_free failed\n"); + } + + break; + } + + default: + __efc_domain_common(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_wait_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + switch (evt) { + case EFC_EVT_DOMAIN_ATTACH_OK: { + struct efc_node *node = NULL; + struct efc_nport *nport, *next_nport; + unsigned long index; + + /* + * Set domain notify pending state to avoid + * duplicate domain event post + */ + domain->domain_notify_pend = true; + + /* Mark as attached */ + domain->attached = true; + + /* Transition to ready */ + /* sm: / forward event to all nports and nodes */ + efc_sm_transition(ctx, __efc_domain_ready, NULL); + + /* We have an FCFI, so we can accept frames */ + domain->req_accept_frames = true; + + /* + * Notify all nodes that the domain attach request + * has completed + * Note: nport will have already received notification + * of nport attached as a result of the HW's port attach. + */ + list_for_each_entry_safe(nport, next_nport, + &domain->nport_list, list_entry) { + xa_for_each(&nport->lookup, index, node) { + efc_node_post_event(node, + EFC_EVT_DOMAIN_ATTACH_OK, + NULL); + } + } + domain->domain_notify_pend = false; + break; + } + + case EFC_EVT_DOMAIN_ATTACH_FAIL: + efc_log_debug(efc, + "%s received while waiting for hw attach\n", + efc_sm_event_name(evt)); + break; + + case EFC_EVT_DOMAIN_FOUND: + /* Should not happen */ + efc_log_err(efc, "%s: evt: %d should not happen\n", + __func__, evt); + break; + + case EFC_EVT_DOMAIN_LOST: + /* + * Domain lost while waiting for an attach to complete, + * go to a state that waits for the domain attach to + * complete, then handle domain lost + */ + efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL); + break; + + case EFC_EVT_DOMAIN_REQ_ATTACH: + /* + * In P2P we can get an attach request from + * the other FLOGI path, so drop this one + */ + break; + + default: + __efc_domain_common(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + switch (evt) { + case EFC_EVT_ENTER: { + /* start any pending vports */ + if (efc_vport_start(domain)) { + efc_log_debug(domain->efc, + "efc_vport_start didn't start vports\n"); + } + break; + } + case EFC_EVT_DOMAIN_LOST: { + if (!list_empty(&domain->nport_list)) { + /* + * if there are nports, transition to wait state + * and send shutdown to each nport + */ + struct efc_nport *nport = NULL, *nport_next = NULL; + + efc_sm_transition(ctx, __efc_domain_wait_nports_free, + NULL); + list_for_each_entry_safe(nport, nport_next, + &domain->nport_list, + list_entry) { + efc_sm_post_event(&nport->sm, + EFC_EVT_SHUTDOWN, NULL); + } + } else { + /* no nports exist, free domain */ + efc_sm_transition(ctx, __efc_domain_wait_shutdown, + NULL); + if (efc_cmd_domain_free(efc, domain)) + efc_log_err(efc, "hw_domain_free failed\n"); + } + break; + } + + case EFC_EVT_DOMAIN_FOUND: + /* Should not happen */ + efc_log_err(efc, "%s: evt: %d should not happen\n", + __func__, evt); + break; + + case EFC_EVT_DOMAIN_REQ_ATTACH: { + /* can happen during p2p */ + u32 fc_id; + + fc_id = *((u32 *)arg); + + /* Assume that the domain is attached */ + WARN_ON(!domain->attached); + + /* + * Verify that the requested FC_ID + * is the same as the one we're working with + */ + WARN_ON(domain->nport->fc_id != fc_id); + break; + } + + default: + __efc_domain_common(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + /* Wait for nodes to free prior to the domain shutdown */ + switch (evt) { + case EFC_EVT_ALL_CHILD_NODES_FREE: { + int rc; + + /* sm: / efc_hw_domain_free */ + efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL); + + /* Request efc_hw_domain_free and wait for completion */ + rc = efc_cmd_domain_free(efc, domain); + if (rc) { + efc_log_err(efc, "efc_hw_domain_free() failed: %d\n", + rc); + } + break; + } + default: + __efc_domain_common_shutdown(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + switch (evt) { + case EFC_EVT_DOMAIN_FREE_OK: + /* sm: / domain_free */ + if (domain->domain_found_pending) { + /* + * save fcf_wwn and drec from this domain, + * free current domain and allocate + * a new one with the same fcf_wwn + * could use a SLI-4 "re-register VPI" + * operation here? + */ + u64 fcf_wwn = domain->fcf_wwn; + struct efc_domain_record drec = domain->pending_drec; + + efc_log_debug(efc, "Reallocating domain\n"); + domain->req_domain_free = true; + domain = efc_domain_alloc(efc, fcf_wwn); + + if (!domain) { + efc_log_err(efc, + "efc_domain_alloc() failed\n"); + return; + } + /* + * got a new domain; at this point, + * there are at least two domains + * once the req_domain_free flag is processed, + * the associated domain will be removed. + */ + efc_sm_transition(&domain->drvsm, __efc_domain_init, + NULL); + efc_sm_post_event(&domain->drvsm, + EFC_EVT_DOMAIN_FOUND, &drec); + } else { + domain->req_domain_free = true; + } + break; + default: + __efc_domain_common_shutdown(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + std_domain_state_decl(); + + domain_sm_trace(domain); + + /* + * Wait for the domain alloc/attach completion + * after receiving a domain lost. + */ + switch (evt) { + case EFC_EVT_DOMAIN_ALLOC_OK: + case EFC_EVT_DOMAIN_ATTACH_OK: { + if (!list_empty(&domain->nport_list)) { + /* + * if there are nports, transition to + * wait state and send shutdown to each nport + */ + struct efc_nport *nport = NULL, *nport_next = NULL; + + efc_sm_transition(ctx, __efc_domain_wait_nports_free, + NULL); + list_for_each_entry_safe(nport, nport_next, + &domain->nport_list, + list_entry) { + efc_sm_post_event(&nport->sm, + EFC_EVT_SHUTDOWN, NULL); + } + } else { + /* no nports exist, free domain */ + efc_sm_transition(ctx, __efc_domain_wait_shutdown, + NULL); + if (efc_cmd_domain_free(efc, domain)) + efc_log_err(efc, "hw_domain_free() failed\n"); + } + break; + } + case EFC_EVT_DOMAIN_ALLOC_FAIL: + case EFC_EVT_DOMAIN_ATTACH_FAIL: + efc_log_err(efc, "[domain] %-20s: failed\n", + efc_sm_event_name(evt)); + break; + + default: + __efc_domain_common_shutdown(__func__, ctx, evt, arg); + } +} + +void +__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id) +{ + memcpy(domain->dma.virt, + ((uint8_t *)domain->flogi_service_params) + 4, + sizeof(struct fc_els_flogi) - 4); + (void)efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_REQ_ATTACH, + &s_id); +} + +void +efc_domain_attach(struct efc_domain *domain, u32 s_id) +{ + __efc_domain_attach_internal(domain, s_id); +} + +int +efc_domain_post_event(struct efc_domain *domain, + enum efc_sm_event event, void *arg) +{ + int rc; + bool req_domain_free; + + rc = efc_sm_post_event(&domain->drvsm, event, arg); + + req_domain_free = domain->req_domain_free; + domain->req_domain_free = false; + + if (req_domain_free) + efc_domain_free(domain); + + return rc; +} + +static void +efct_domain_process_pending(struct efc_domain *domain) +{ + struct efc *efc = domain->efc; + struct efc_hw_sequence *seq = NULL; + u32 processed = 0; + unsigned long flags = 0; + + for (;;) { + /* need to check for hold frames condition after each frame + * processed because any given frame could cause a transition + * to a state that holds frames + */ + if (efc->hold_frames) + break; + + /* Get next frame/sequence */ + spin_lock_irqsave(&efc->pend_frames_lock, flags); + + if (!list_empty(&efc->pend_frames)) { + seq = list_first_entry(&efc->pend_frames, + struct efc_hw_sequence, list_entry); + list_del(&seq->list_entry); + } + + if (!seq) { + processed = efc->pend_frames_processed; + efc->pend_frames_processed = 0; + spin_unlock_irqrestore(&efc->pend_frames_lock, flags); + break; + } + efc->pend_frames_processed++; + + spin_unlock_irqrestore(&efc->pend_frames_lock, flags); + + /* now dispatch frame(s) to dispatch function */ + if (efc_domain_dispatch_frame(domain, seq)) + efc->tt.hw_seq_free(efc, seq); + + seq = NULL; + } + + if (processed != 0) + efc_log_debug(efc, "%u domain frames held and processed\n", + processed); +} + +void +efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq) +{ + struct efc_domain *domain = efc->domain; + + /* + * If we are holding frames or the domain is not yet registered or + * there's already frames on the pending list, + * then add the new frame to pending list + */ + if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) { + unsigned long flags = 0; + + spin_lock_irqsave(&efc->pend_frames_lock, flags); + INIT_LIST_HEAD(&seq->list_entry); + list_add_tail(&seq->list_entry, &efc->pend_frames); + spin_unlock_irqrestore(&efc->pend_frames_lock, flags); + + if (domain) { + /* immediately process pending frames */ + efct_domain_process_pending(domain); + } + } else { + /* + * We are not holding frames and pending list is empty, + * just process frame. A non-zero return means the frame + * was not handled - so cleanup + */ + if (efc_domain_dispatch_frame(domain, seq)) + efc->tt.hw_seq_free(efc, seq); + } +} + +int +efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq) +{ + struct efc_domain *domain = (struct efc_domain *)arg; + struct efc *efc = domain->efc; + struct fc_frame_header *hdr; + struct efc_node *node = NULL; + struct efc_nport *nport = NULL; + unsigned long flags = 0; + u32 s_id, d_id, rc = EFC_HW_SEQ_FREE; + + if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) { + efc_log_err(efc, "Sequence header or payload is null\n"); + return rc; + } + + hdr = seq->header->dma.virt; + + /* extract the s_id and d_id */ + s_id = ntoh24(hdr->fh_s_id); + d_id = ntoh24(hdr->fh_d_id); + + spin_lock_irqsave(&efc->lock, flags); + + nport = efc_nport_find(domain, d_id); + if (!nport) { + if (hdr->fh_type == FC_TYPE_FCP) { + /* Drop frame */ + efc_log_warn(efc, "FCP frame with invalid d_id x%x\n", + d_id); + goto out; + } + + /* p2p will use this case */ + nport = domain->nport; + if (!nport || !kref_get_unless_zero(&nport->ref)) { + efc_log_err(efc, "Physical nport is NULL\n"); + goto out; + } + } + + /* Lookup the node given the remote s_id */ + node = efc_node_find(nport, s_id); + + /* If not found, then create a new node */ + if (!node) { + /* + * If this is solicited data or control based on R_CTL and + * there is no node context, then we can drop the frame + */ + if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) || + (hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) { + efc_log_debug(efc, "sol data/ctrl frame without node\n"); + goto out_release; + } + + node = efc_node_alloc(nport, s_id, false, false); + if (!node) { + efc_log_err(efc, "efc_node_alloc() failed\n"); + goto out_release; + } + /* don't send PLOGI on efc_d_init entry */ + efc_node_init_device(node, false); + } + + if (node->hold_frames || !list_empty(&node->pend_frames)) { + /* add frame to node's pending list */ + spin_lock(&node->pend_frames_lock); + INIT_LIST_HEAD(&seq->list_entry); + list_add_tail(&seq->list_entry, &node->pend_frames); + spin_unlock(&node->pend_frames_lock); + rc = EFC_HW_SEQ_HOLD; + goto out_release; + } + + /* now dispatch frame to the node frame handler */ + efc_node_dispatch_frame(node, seq); + +out_release: + kref_put(&nport->ref, nport->release); +out: + spin_unlock_irqrestore(&efc->lock, flags); + return rc; +} + +void +efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq) +{ + struct fc_frame_header *hdr = seq->header->dma.virt; + u32 port_id; + struct efc_node *node = (struct efc_node *)arg; + struct efc *efc = node->efc; + + port_id = ntoh24(hdr->fh_s_id); + + if (WARN_ON(port_id != node->rnode.fc_id)) + return; + + if ((!(ntoh24(hdr->fh_f_ctl) & FC_FC_END_SEQ)) || + !(ntoh24(hdr->fh_f_ctl) & FC_FC_SEQ_INIT)) { + node_printf(node, + "Drop frame hdr = %08x %08x %08x %08x %08x %08x\n", + cpu_to_be32(((u32 *)hdr)[0]), + cpu_to_be32(((u32 *)hdr)[1]), + cpu_to_be32(((u32 *)hdr)[2]), + cpu_to_be32(((u32 *)hdr)[3]), + cpu_to_be32(((u32 *)hdr)[4]), + cpu_to_be32(((u32 *)hdr)[5])); + return; + } + + switch (hdr->fh_r_ctl) { + case FC_RCTL_ELS_REQ: + case FC_RCTL_ELS_REP: + efc_node_recv_els_frame(node, seq); + break; + + case FC_RCTL_BA_ABTS: + case FC_RCTL_BA_ACC: + case FC_RCTL_BA_RJT: + case FC_RCTL_BA_NOP: + efc_log_err(efc, "Received ABTS:\n"); + break; + + case FC_RCTL_DD_UNSOL_CMD: + case FC_RCTL_DD_UNSOL_CTL: + switch (hdr->fh_type) { + case FC_TYPE_FCP: + if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_UNSOL_CMD) { + if (!node->fcp_enabled) { + efc_node_recv_fcp_cmd(node, seq); + break; + } + efc_log_err(efc, "Recvd FCP CMD. Drop IO\n"); + } else if ((hdr->fh_r_ctl & 0xf) == + FC_RCTL_DD_SOL_DATA) { + node_printf(node, + "solicited data recvd. Drop IO\n"); + } + break; + + case FC_TYPE_CT: + efc_node_recv_ct_frame(node, seq); + break; + default: + break; + } + break; + default: + efc_log_err(efc, "Unhandled frame rctl: %02x\n", hdr->fh_r_ctl); + } +} diff --git a/drivers/scsi/elx/libefc/efc_domain.h b/drivers/scsi/elx/libefc/efc_domain.h new file mode 100644 index 000000000..5468ea7ab --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_domain.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * Declare driver's domain handler exported interface + */ + +#ifndef __EFCT_DOMAIN_H__ +#define __EFCT_DOMAIN_H__ + +struct efc_domain * +efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn); +void +efc_domain_free(struct efc_domain *domain); + +void +__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg); +void +__efc_domain_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg); +void +__efc_domain_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg); +void +__efc_domain_wait_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg); +void +__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg); +void +__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg); +void +__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg); +void +__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg); +void +efc_domain_attach(struct efc_domain *domain, u32 s_id); +int +efc_domain_post_event(struct efc_domain *domain, enum efc_sm_event event, + void *arg); +void +__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id); + +int +efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq); +void +efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq); + +#endif /* __EFCT_DOMAIN_H__ */ diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c new file mode 100644 index 000000000..84bc81d7c --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_els.c @@ -0,0 +1,1094 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * Functions to build and send ELS/CT/BLS commands and responses. + */ + +#include "efc.h" +#include "efc_els.h" +#include "../libefc_sli/sli4.h" + +#define EFC_LOG_ENABLE_ELS_TRACE(efc) \ + (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0) + +#define node_els_trace() \ + do { \ + if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \ + efc_log_info(efc, "[%s] %-20s\n", \ + node->display_name, __func__); \ + } while (0) + +#define els_io_printf(els, fmt, ...) \ + efc_log_err((struct efc *)els->node->efc,\ + "[%s] %-8s " fmt, \ + els->node->display_name,\ + els->display_name, ##__VA_ARGS__) + +#define EFC_ELS_RSP_LEN 1024 +#define EFC_ELS_GID_PT_RSP_LEN 8096 + +struct efc_els_io_req * +efc_els_io_alloc(struct efc_node *node, u32 reqlen) +{ + return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN); +} + +struct efc_els_io_req * +efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen) +{ + struct efc *efc; + struct efc_els_io_req *els; + unsigned long flags = 0; + + efc = node->efc; + + if (!node->els_io_enabled) { + efc_log_err(efc, "els io alloc disabled\n"); + return NULL; + } + + els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC); + if (!els) { + atomic_add_return(1, &efc->els_io_alloc_failed_count); + return NULL; + } + + /* initialize refcount */ + kref_init(&els->ref); + els->release = _efc_els_io_free; + + /* populate generic io fields */ + els->node = node; + + /* now allocate DMA for request and response */ + els->io.req.size = reqlen; + els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size, + &els->io.req.phys, GFP_KERNEL); + if (!els->io.req.virt) { + mempool_free(els, efc->els_io_pool); + return NULL; + } + + els->io.rsp.size = rsplen; + els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size, + &els->io.rsp.phys, GFP_KERNEL); + if (!els->io.rsp.virt) { + dma_free_coherent(&efc->pci->dev, els->io.req.size, + els->io.req.virt, els->io.req.phys); + mempool_free(els, efc->els_io_pool); + els = NULL; + } + + if (els) { + /* initialize fields */ + els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES; + + /* add els structure to ELS IO list */ + INIT_LIST_HEAD(&els->list_entry); + spin_lock_irqsave(&node->els_ios_lock, flags); + list_add_tail(&els->list_entry, &node->els_ios_list); + spin_unlock_irqrestore(&node->els_ios_lock, flags); + } + + return els; +} + +void +efc_els_io_free(struct efc_els_io_req *els) +{ + kref_put(&els->ref, els->release); +} + +void +_efc_els_io_free(struct kref *arg) +{ + struct efc_els_io_req *els = + container_of(arg, struct efc_els_io_req, ref); + struct efc *efc; + struct efc_node *node; + int send_empty_event = false; + unsigned long flags = 0; + + node = els->node; + efc = node->efc; + + spin_lock_irqsave(&node->els_ios_lock, flags); + + list_del(&els->list_entry); + /* Send list empty event if the IO allocator + * is disabled, and the list is empty + * If node->els_io_enabled was not checked, + * the event would be posted continually + */ + send_empty_event = (!node->els_io_enabled && + list_empty(&node->els_ios_list)); + + spin_unlock_irqrestore(&node->els_ios_lock, flags); + + /* free ELS request and response buffers */ + dma_free_coherent(&efc->pci->dev, els->io.rsp.size, + els->io.rsp.virt, els->io.rsp.phys); + dma_free_coherent(&efc->pci->dev, els->io.req.size, + els->io.req.virt, els->io.req.phys); + + mempool_free(els, efc->els_io_pool); + + if (send_empty_event) + efc_scsi_io_list_empty(node->efc, node); +} + +static void +efc_els_retry(struct efc_els_io_req *els); + +static void +efc_els_delay_timer_cb(struct timer_list *t) +{ + struct efc_els_io_req *els = from_timer(els, t, delay_timer); + + /* Retry delay timer expired, retry the ELS request */ + efc_els_retry(els); +} + +static int +efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status) +{ + struct efc_els_io_req *els; + struct efc_node *node; + struct efc *efc; + struct efc_node_cb cbdata; + u32 reason_code; + + els = arg; + node = els->node; + efc = node->efc; + + if (status) + els_io_printf(els, "status x%x ext x%x\n", status, ext_status); + + /* set the response len element of els->rsp */ + els->io.rsp.len = length; + + cbdata.status = status; + cbdata.ext_status = ext_status; + cbdata.header = NULL; + cbdata.els_rsp = els->io.rsp; + + /* set the response len element of els->rsp */ + cbdata.rsp_len = length; + + /* FW returns the number of bytes received on the link in + * the WCQE, not the amount placed in the buffer; use this info to + * check if there was an overrun. + */ + if (length > els->io.rsp.size) { + efc_log_warn(efc, + "ELS response returned len=%d > buflen=%zu\n", + length, els->io.rsp.size); + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); + return 0; + } + + /* Post event to ELS IO object */ + switch (status) { + case SLI4_FC_WCQE_STATUS_SUCCESS: + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata); + break; + + case SLI4_FC_WCQE_STATUS_LS_RJT: + reason_code = (ext_status >> 16) & 0xff; + + /* delay and retry if reason code is Logical Busy */ + switch (reason_code) { + case ELS_RJT_BUSY: + els->node->els_req_cnt--; + els_io_printf(els, + "LS_RJT Logical Busy, delay and retry\n"); + timer_setup(&els->delay_timer, + efc_els_delay_timer_cb, 0); + mod_timer(&els->delay_timer, + jiffies + msecs_to_jiffies(5000)); + break; + default: + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT, + &cbdata); + break; + } + break; + + case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: + switch (ext_status) { + case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT: + efc_els_retry(els); + break; + default: + efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n", + ext_status); + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, + &cbdata); + break; + } + break; + default: /* Other error */ + efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n", + status, ext_status); + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); + break; + } + + return 0; +} + +void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status, + u32 ext_status) +{ + struct efc_els_io_req *els = + container_of(io, struct efc_els_io_req, io); + + WARN_ON_ONCE(!els->cb); + + ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status); +} + +static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els, + enum efc_disc_io_type io_type) +{ + int rc = 0; + struct efc *efc = node->efc; + struct efc_node_cb cbdata; + + /* update ELS request counter */ + els->node->els_req_cnt++; + + /* Prepare the IO request details */ + els->io.io_type = io_type; + els->io.xmit_len = els->io.req.size; + els->io.rsp_len = els->io.rsp.size; + els->io.rpi = node->rnode.indicator; + els->io.vpi = node->nport->indicator; + els->io.s_id = node->nport->fc_id; + els->io.d_id = node->rnode.fc_id; + + if (node->rnode.attached) + els->io.rpi_registered = true; + + els->cb = efc_els_req_cb; + + rc = efc->tt.send_els(efc, &els->io); + if (!rc) + return rc; + + cbdata.status = EFC_STATUS_INVALID; + cbdata.ext_status = EFC_STATUS_INVALID; + cbdata.els_rsp = els->io.rsp; + efc_log_err(efc, "efc_els_send failed: %d\n", rc); + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); + + return rc; +} + +static void +efc_els_retry(struct efc_els_io_req *els) +{ + struct efc *efc; + struct efc_node_cb cbdata; + u32 rc; + + efc = els->node->efc; + cbdata.status = EFC_STATUS_INVALID; + cbdata.ext_status = EFC_STATUS_INVALID; + cbdata.els_rsp = els->io.rsp; + + if (els->els_retries_remaining) { + els->els_retries_remaining--; + rc = efc->tt.send_els(efc, &els->io); + } else { + rc = -EIO; + } + + if (rc) { + efc_log_err(efc, "ELS retries exhausted\n"); + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); + } +} + +static int +efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status) +{ + struct efc_els_io_req *els; + struct efc_node *node; + struct efc *efc; + struct efc_node_cb cbdata; + + els = arg; + node = els->node; + efc = node->efc; + + cbdata.status = status; + cbdata.ext_status = ext_status; + cbdata.header = NULL; + cbdata.els_rsp = els->io.rsp; + + /* Post node event */ + switch (status) { + case SLI4_FC_WCQE_STATUS_SUCCESS: + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata); + break; + + default: /* Other error */ + efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n", + node->display_name, els->display_name, + status, ext_status); + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata); + break; + } + + return 0; +} + +static int +efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen) +{ + int rc = 0; + struct efc_node_cb cbdata; + struct efc_node *node = els->node; + struct efc *efc = node->efc; + + /* increment ELS completion counter */ + node->els_cmpl_cnt++; + + els->io.io_type = EFC_DISC_IO_ELS_RESP; + els->cb = efc_els_acc_cb; + + /* Prepare the IO request details */ + els->io.xmit_len = rsplen; + els->io.rsp_len = els->io.rsp.size; + els->io.rpi = node->rnode.indicator; + els->io.vpi = node->nport->indicator; + if (node->nport->fc_id != U32_MAX) + els->io.s_id = node->nport->fc_id; + else + els->io.s_id = els->io.iparam.els.s_id; + els->io.d_id = node->rnode.fc_id; + + if (node->attached) + els->io.rpi_registered = true; + + rc = efc->tt.send_els(efc, &els->io); + if (!rc) + return rc; + + cbdata.status = EFC_STATUS_INVALID; + cbdata.ext_status = EFC_STATUS_INVALID; + cbdata.els_rsp = els->io.rsp; + efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata); + + return rc; +} + +int +efc_send_plogi(struct efc_node *node) +{ + struct efc_els_io_req *els; + struct efc *efc = node->efc; + struct fc_els_flogi *plogi; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*plogi)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + els->display_name = "plogi"; + + /* Build PLOGI request */ + plogi = els->io.req.virt; + + memcpy(plogi, node->nport->service_params, sizeof(*plogi)); + + plogi->fl_cmd = ELS_PLOGI; + memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd)); + + return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); +} + +int +efc_send_flogi(struct efc_node *node) +{ + struct efc_els_io_req *els; + struct efc *efc; + struct fc_els_flogi *flogi; + + efc = node->efc; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*flogi)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->display_name = "flogi"; + + /* Build FLOGI request */ + flogi = els->io.req.virt; + + memcpy(flogi, node->nport->service_params, sizeof(*flogi)); + flogi->fl_cmd = ELS_FLOGI; + memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd)); + + return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); +} + +int +efc_send_fdisc(struct efc_node *node) +{ + struct efc_els_io_req *els; + struct efc *efc; + struct fc_els_flogi *fdisc; + + efc = node->efc; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*fdisc)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->display_name = "fdisc"; + + /* Build FDISC request */ + fdisc = els->io.req.virt; + + memcpy(fdisc, node->nport->service_params, sizeof(*fdisc)); + fdisc->fl_cmd = ELS_FDISC; + memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd)); + + return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); +} + +int +efc_send_prli(struct efc_node *node) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els; + struct { + struct fc_els_prli prli; + struct fc_els_spp spp; + } *pp; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*pp)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->display_name = "prli"; + + /* Build PRLI request */ + pp = els->io.req.virt; + + memset(pp, 0, sizeof(*pp)); + + pp->prli.prli_cmd = ELS_PRLI; + pp->prli.prli_spp_len = 16; + pp->prli.prli_len = cpu_to_be16(sizeof(*pp)); + pp->spp.spp_type = FC_TYPE_FCP; + pp->spp.spp_type_ext = 0; + pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR; + pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS | + (node->nport->enable_ini ? + FCP_SPPF_INIT_FCN : 0) | + (node->nport->enable_tgt ? + FCP_SPPF_TARG_FCN : 0)); + + return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); +} + +int +efc_send_logo(struct efc_node *node) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els; + struct fc_els_logo *logo; + struct fc_els_flogi *sparams; + + node_els_trace(); + + sparams = (struct fc_els_flogi *)node->nport->service_params; + + els = efc_els_io_alloc(node, sizeof(*logo)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->display_name = "logo"; + + /* Build LOGO request */ + + logo = els->io.req.virt; + + memset(logo, 0, sizeof(*logo)); + logo->fl_cmd = ELS_LOGO; + hton24(logo->fl_n_port_id, node->rnode.nport->fc_id); + logo->fl_n_port_wwn = sparams->fl_wwpn; + + return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); +} + +int +efc_send_adisc(struct efc_node *node) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els; + struct fc_els_adisc *adisc; + struct fc_els_flogi *sparams; + struct efc_nport *nport = node->nport; + + node_els_trace(); + + sparams = (struct fc_els_flogi *)node->nport->service_params; + + els = efc_els_io_alloc(node, sizeof(*adisc)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->display_name = "adisc"; + + /* Build ADISC request */ + + adisc = els->io.req.virt; + + memset(adisc, 0, sizeof(*adisc)); + adisc->adisc_cmd = ELS_ADISC; + hton24(adisc->adisc_hard_addr, nport->fc_id); + adisc->adisc_wwpn = sparams->fl_wwpn; + adisc->adisc_wwnn = sparams->fl_wwnn; + hton24(adisc->adisc_port_id, node->rnode.nport->fc_id); + + return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); +} + +int +efc_send_scr(struct efc_node *node) +{ + struct efc_els_io_req *els; + struct efc *efc = node->efc; + struct fc_els_scr *req; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*req)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->display_name = "scr"; + + req = els->io.req.virt; + + memset(req, 0, sizeof(*req)); + req->scr_cmd = ELS_SCR; + req->scr_reg_func = ELS_SCRF_FULL; + + return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); +} + +int +efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code, + u32 reason_code_expl, u32 vendor_unique) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els = NULL; + struct fc_els_ls_rjt *rjt; + + els = efc_els_io_alloc(node, sizeof(*rjt)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + node_els_trace(); + + els->display_name = "ls_rjt"; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + + rjt = els->io.req.virt; + memset(rjt, 0, sizeof(*rjt)); + + rjt->er_cmd = ELS_LS_RJT; + rjt->er_reason = reason_code; + rjt->er_explan = reason_code_expl; + + return efc_els_send_rsp(els, sizeof(*rjt)); +} + +int +efc_send_plogi_acc(struct efc_node *node, u32 ox_id) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els = NULL; + struct fc_els_flogi *plogi; + struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*plogi)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + els->display_name = "plogi_acc"; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + + plogi = els->io.req.virt; + + /* copy our port's service parameters to payload */ + memcpy(plogi, node->nport->service_params, sizeof(*plogi)); + plogi->fl_cmd = ELS_LS_ACC; + memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd)); + + /* Set Application header support bit if requested */ + if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST)) + plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST); + + return efc_els_send_rsp(els, sizeof(*plogi)); +} + +int +efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els = NULL; + struct fc_els_flogi *flogi; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*flogi)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + els->display_name = "flogi_p2p_acc"; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + els->io.iparam.els.s_id = s_id; + + flogi = els->io.req.virt; + + /* copy our port's service parameters to payload */ + memcpy(flogi, node->nport->service_params, sizeof(*flogi)); + flogi->fl_cmd = ELS_LS_ACC; + memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd)); + + memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp)); + + return efc_els_send_rsp(els, sizeof(*flogi)); +} + +int +efc_send_prli_acc(struct efc_node *node, u32 ox_id) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els = NULL; + struct { + struct fc_els_prli prli; + struct fc_els_spp spp; + } *pp; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*pp)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + els->display_name = "prli_acc"; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + + pp = els->io.req.virt; + memset(pp, 0, sizeof(*pp)); + + pp->prli.prli_cmd = ELS_LS_ACC; + pp->prli.prli_spp_len = 0x10; + pp->prli.prli_len = cpu_to_be16(sizeof(*pp)); + pp->spp.spp_type = FC_TYPE_FCP; + pp->spp.spp_type_ext = 0; + pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK; + + pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS | + (node->nport->enable_ini ? + FCP_SPPF_INIT_FCN : 0) | + (node->nport->enable_tgt ? + FCP_SPPF_TARG_FCN : 0)); + + return efc_els_send_rsp(els, sizeof(*pp)); +} + +int +efc_send_prlo_acc(struct efc_node *node, u32 ox_id) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els = NULL; + struct { + struct fc_els_prlo prlo; + struct fc_els_spp spp; + } *pp; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*pp)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + els->display_name = "prlo_acc"; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + + pp = els->io.req.virt; + memset(pp, 0, sizeof(*pp)); + pp->prlo.prlo_cmd = ELS_LS_ACC; + pp->prlo.prlo_obs = 0x10; + pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp)); + + pp->spp.spp_type = FC_TYPE_FCP; + pp->spp.spp_type_ext = 0; + pp->spp.spp_flags = FC_SPP_RESP_ACK; + + return efc_els_send_rsp(els, sizeof(*pp)); +} + +int +efc_send_ls_acc(struct efc_node *node, u32 ox_id) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els = NULL; + struct fc_els_ls_acc *acc; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*acc)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + els->display_name = "ls_acc"; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + + acc = els->io.req.virt; + memset(acc, 0, sizeof(*acc)); + + acc->la_cmd = ELS_LS_ACC; + + return efc_els_send_rsp(els, sizeof(*acc)); +} + +int +efc_send_logo_acc(struct efc_node *node, u32 ox_id) +{ + struct efc_els_io_req *els = NULL; + struct efc *efc = node->efc; + struct fc_els_ls_acc *logo; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*logo)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + els->display_name = "logo_acc"; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + + logo = els->io.req.virt; + memset(logo, 0, sizeof(*logo)); + + logo->la_cmd = ELS_LS_ACC; + + return efc_els_send_rsp(els, sizeof(*logo)); +} + +int +efc_send_adisc_acc(struct efc_node *node, u32 ox_id) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els = NULL; + struct fc_els_adisc *adisc; + struct fc_els_flogi *sparams; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*adisc)); + if (!els) { + efc_log_err(efc, "els IO alloc failed\n"); + return -EIO; + } + + els->display_name = "adisc_acc"; + + /* Go ahead and send the ELS_ACC */ + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + els->io.iparam.els.ox_id = ox_id; + + sparams = (struct fc_els_flogi *)node->nport->service_params; + adisc = els->io.req.virt; + memset(adisc, 0, sizeof(*adisc)); + adisc->adisc_cmd = ELS_LS_ACC; + adisc->adisc_wwpn = sparams->fl_wwpn; + adisc->adisc_wwnn = sparams->fl_wwnn; + hton24(adisc->adisc_port_id, node->rnode.nport->fc_id); + + return efc_els_send_rsp(els, sizeof(*adisc)); +} + +static inline void +fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size) +{ + hdr->ct_rev = FC_CT_REV; + hdr->ct_fs_type = FC_FST_DIR; + hdr->ct_fs_subtype = FC_NS_SUBTYPE; + hdr->ct_options = 0; + hdr->ct_cmd = cpu_to_be16(cmd); + /* words */ + hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32))); + hdr->ct_reason = 0; + hdr->ct_explan = 0; + hdr->ct_vendor = 0; +} + +int +efc_ns_send_rftid(struct efc_node *node) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els; + struct { + struct fc_ct_hdr hdr; + struct fc_ns_rft_id rftid; + } *ct; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*ct)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; + els->io.iparam.ct.type = FC_TYPE_CT; + els->io.iparam.ct.df_ctl = 0; + els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; + + els->display_name = "rftid"; + + ct = els->io.req.virt; + memset(ct, 0, sizeof(*ct)); + fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID, + sizeof(struct fc_ns_rft_id)); + + hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id); + ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] = + cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW)); + + return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); +} + +int +efc_ns_send_rffid(struct efc_node *node) +{ + struct efc *efc = node->efc; + struct efc_els_io_req *els; + struct { + struct fc_ct_hdr hdr; + struct fc_ns_rff_id rffid; + } *ct; + + node_els_trace(); + + els = efc_els_io_alloc(node, sizeof(*ct)); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; + els->io.iparam.ct.type = FC_TYPE_CT; + els->io.iparam.ct.df_ctl = 0; + els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; + + els->display_name = "rffid"; + ct = els->io.req.virt; + + memset(ct, 0, sizeof(*ct)); + fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID, + sizeof(struct fc_ns_rff_id)); + + hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id); + if (node->nport->enable_ini) + ct->rffid.fr_feat |= FCP_FEAT_INIT; + if (node->nport->enable_tgt) + ct->rffid.fr_feat |= FCP_FEAT_TARG; + ct->rffid.fr_type = FC_TYPE_FCP; + + return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); +} + +int +efc_ns_send_gidpt(struct efc_node *node) +{ + struct efc_els_io_req *els = NULL; + struct efc *efc = node->efc; + struct { + struct fc_ct_hdr hdr; + struct fc_ns_gid_pt gidpt; + } *ct; + + node_els_trace(); + + els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; + els->io.iparam.ct.type = FC_TYPE_CT; + els->io.iparam.ct.df_ctl = 0; + els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; + + els->display_name = "gidpt"; + + ct = els->io.req.virt; + + memset(ct, 0, sizeof(*ct)); + fcct_build_req_header(&ct->hdr, FC_NS_GID_PT, + sizeof(struct fc_ns_gid_pt)); + + ct->gidpt.fn_pt_type = FC_TYPE_FCP; + + return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); +} + +void +efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg) +{ + /* don't want further events that could come; e.g. abort requests + * from the node state machine; thus, disable state machine + */ + els->els_req_free = true; + efc_node_post_els_resp(els->node, evt, arg); + + efc_els_io_free(els); +} + +static int +efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status) +{ + struct efc_els_io_req *els = arg; + + efc_els_io_free(els); + + return 0; +} + +int +efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id, + struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, + u32 reason_code, u32 reason_code_explanation) +{ + struct efc_els_io_req *els = NULL; + struct fc_ct_hdr *rsp = NULL; + + els = efc_els_io_alloc(node, 256); + if (!els) { + efc_log_err(efc, "IO alloc failed\n"); + return -EIO; + } + + rsp = els->io.rsp.virt; + + *rsp = *ct_hdr; + + fcct_build_req_header(rsp, cmd_rsp_code, 0); + rsp->ct_reason = reason_code; + rsp->ct_explan = reason_code_explanation; + + els->display_name = "ct_rsp"; + els->cb = efc_ct_acc_cb; + + /* Prepare the IO request details */ + els->io.io_type = EFC_DISC_IO_CT_RESP; + els->io.xmit_len = sizeof(*rsp); + + els->io.rpi = node->rnode.indicator; + els->io.d_id = node->rnode.fc_id; + + memset(&els->io.iparam, 0, sizeof(els->io.iparam)); + + els->io.iparam.ct.ox_id = ox_id; + els->io.iparam.ct.r_ctl = 3; + els->io.iparam.ct.type = FC_TYPE_CT; + els->io.iparam.ct.df_ctl = 0; + els->io.iparam.ct.timeout = 5; + + if (efc->tt.send_els(efc, &els->io)) { + efc_els_io_free(els); + return -EIO; + } + return 0; +} + +int +efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr) +{ + struct sli_bls_params bls; + struct fc_ba_acc *acc; + struct efc *efc = node->efc; + + memset(&bls, 0, sizeof(bls)); + bls.ox_id = be16_to_cpu(hdr->fh_ox_id); + bls.rx_id = be16_to_cpu(hdr->fh_rx_id); + bls.s_id = ntoh24(hdr->fh_d_id); + bls.d_id = node->rnode.fc_id; + bls.rpi = node->rnode.indicator; + bls.vpi = node->nport->indicator; + + acc = (void *)bls.payload; + acc->ba_ox_id = cpu_to_be16(bls.ox_id); + acc->ba_rx_id = cpu_to_be16(bls.rx_id); + acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX); + + return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls); +} diff --git a/drivers/scsi/elx/libefc/efc_els.h b/drivers/scsi/elx/libefc/efc_els.h new file mode 100644 index 000000000..3c4f820f6 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_els.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#ifndef __EFC_ELS_H__ +#define __EFC_ELS_H__ + +#define EFC_STATUS_INVALID INT_MAX +#define EFC_ELS_IO_POOL_SZ 1024 + +struct efc_els_io_req { + struct list_head list_entry; + struct kref ref; + void (*release)(struct kref *arg); + struct efc_node *node; + void *cb; + u32 els_retries_remaining; + bool els_req_free; + struct timer_list delay_timer; + + const char *display_name; + + struct efc_disc_io io; +}; + +typedef int(*efc_hw_srrs_cb_t)(void *arg, u32 length, int status, + u32 ext_status); + +void _efc_els_io_free(struct kref *arg); +struct efc_els_io_req * +efc_els_io_alloc(struct efc_node *node, u32 reqlen); +struct efc_els_io_req * +efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen); +void efc_els_io_free(struct efc_els_io_req *els); + +/* ELS command send */ +typedef void (*els_cb_t)(struct efc_node *node, + struct efc_node_cb *cbdata, void *arg); +int +efc_send_plogi(struct efc_node *node); +int +efc_send_flogi(struct efc_node *node); +int +efc_send_fdisc(struct efc_node *node); +int +efc_send_prli(struct efc_node *node); +int +efc_send_prlo(struct efc_node *node); +int +efc_send_logo(struct efc_node *node); +int +efc_send_adisc(struct efc_node *node); +int +efc_send_pdisc(struct efc_node *node); +int +efc_send_scr(struct efc_node *node); +int +efc_ns_send_rftid(struct efc_node *node); +int +efc_ns_send_rffid(struct efc_node *node); +int +efc_ns_send_gidpt(struct efc_node *node); +void +efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg); + +/* ELS acc send */ +int +efc_send_ls_acc(struct efc_node *node, u32 ox_id); +int +efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_cod, + u32 reason_code_expl, u32 vendor_unique); +int +efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id); +int +efc_send_flogi_acc(struct efc_node *node, u32 ox_id, u32 is_fport); +int +efc_send_plogi_acc(struct efc_node *node, u32 ox_id); +int +efc_send_prli_acc(struct efc_node *node, u32 ox_id); +int +efc_send_logo_acc(struct efc_node *node, u32 ox_id); +int +efc_send_prlo_acc(struct efc_node *node, u32 ox_id); +int +efc_send_adisc_acc(struct efc_node *node, u32 ox_id); + +int +efc_bls_send_acc_hdr(struct efc *efc, struct efc_node *node, + struct fc_frame_header *hdr); +int +efc_bls_send_rjt_hdr(struct efc_els_io_req *io, struct fc_frame_header *hdr); + +int +efc_els_io_list_empty(struct efc_node *node, struct list_head *list); + +/* CT */ +int +efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id, + struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code, + u32 reason_code_explanation); + +int +efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr); + +#endif /* __EFC_ELS_H__ */ diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c new file mode 100644 index 000000000..9661eea93 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_fabric.c @@ -0,0 +1,1563 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * This file implements remote node state machines for: + * - Fabric logins. + * - Fabric controller events. + * - Name/directory services interaction. + * - Point-to-point logins. + */ + +/* + * fabric_sm Node State Machine: Fabric States + * ns_sm Node State Machine: Name/Directory Services States + * p2p_sm Node State Machine: Point-to-Point Node States + */ + +#include "efc.h" + +static void +efc_fabric_initiate_shutdown(struct efc_node *node) +{ + struct efc *efc = node->efc; + + node->els_io_enabled = false; + + if (node->attached) { + int rc; + + /* issue hw node free; don't care if succeeds right away + * or sometime later, will check node->attached later in + * shutdown process + */ + rc = efc_cmd_node_detach(efc, &node->rnode); + if (rc < 0) { + node_printf(node, "Failed freeing HW node, rc=%d\n", + rc); + } + } + /* + * node has either been detached or is in the process of being detached, + * call common node's initiate cleanup function + */ + efc_node_initiate_cleanup(node); +} + +static void +__efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = NULL; + + node = ctx->app; + + switch (evt) { + case EFC_EVT_DOMAIN_ATTACH_OK: + break; + case EFC_EVT_SHUTDOWN: + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + break; + + default: + /* call default event handler common to all nodes */ + __efc_node_common(funcname, ctx, evt, arg); + } +} + +void +__efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg) +{ + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_REENTER: + efc_log_debug(efc, ">>> reenter !!\n"); + fallthrough; + + case EFC_EVT_ENTER: + /* send FLOGI */ + efc_send_flogi(node); + efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL); + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +efc_fabric_set_topology(struct efc_node *node, + enum efc_nport_topology topology) +{ + node->nport->topology = topology; +} + +void +efc_fabric_notify_topology(struct efc_node *node) +{ + struct efc_node *tmp_node; + unsigned long index; + + /* + * now loop through the nodes in the nport + * and send topology notification + */ + xa_for_each(&node->nport->lookup, index, tmp_node) { + if (tmp_node != node) { + efc_node_post_event(tmp_node, + EFC_EVT_NPORT_TOPOLOGY_NOTIFY, + &node->nport->topology); + } + } +} + +static bool efc_rnode_is_nport(struct fc_els_flogi *rsp) +{ + return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT); +} + +void +__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: { + if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + + memcpy(node->nport->domain->flogi_service_params, + cbdata->els_rsp.virt, + sizeof(struct fc_els_flogi)); + + /* Check to see if the fabric is an F_PORT or and N_PORT */ + if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) { + /* sm: if not nport / efc_domain_attach */ + /* ext_status has the fc_id, attach domain */ + efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC); + efc_fabric_notify_topology(node); + WARN_ON(node->nport->domain->attached); + efc_domain_attach(node->nport->domain, + cbdata->ext_status); + efc_node_transition(node, + __efc_fabric_wait_domain_attach, + NULL); + break; + } + + /* sm: if nport and p2p_winner / efc_domain_attach */ + efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P); + if (efc_p2p_setup(node->nport)) { + node_printf(node, + "p2p setup failed, shutting down node\n"); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + break; + } + + if (node->nport->p2p_winner) { + efc_node_transition(node, + __efc_p2p_wait_domain_attach, + NULL); + if (node->nport->domain->attached && + !node->nport->domain->domain_notify_pend) { + /* + * already attached, + * just send ATTACH_OK + */ + node_printf(node, + "p2p winner, domain already attached\n"); + efc_node_post_event(node, + EFC_EVT_DOMAIN_ATTACH_OK, + NULL); + } + } else { + /* + * peer is p2p winner; + * PLOGI will be received on the + * remote SID=1 node; + * this node has served its purpose + */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + } + + break; + } + + case EFC_EVT_ELS_REQ_ABORTED: + case EFC_EVT_SRRS_ELS_REQ_RJT: + case EFC_EVT_SRRS_ELS_REQ_FAIL: { + struct efc_nport *nport = node->nport; + /* + * with these errors, we have no recovery, + * so shutdown the nport, leave the link + * up and the domain ready + */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI, + __efc_fabric_common, __func__)) { + return; + } + node_printf(node, + "FLOGI failed evt=%s, shutting down nport [%s]\n", + efc_sm_event_name(evt), nport->display_name); + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_vport_fabric_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + /* sm: / send FDISC */ + efc_send_fdisc(node); + efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL); + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: { + /* fc_id is in ext_status */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC, + __efc_fabric_common, __func__)) { + return; + } + + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / efc_nport_attach */ + efc_nport_attach(node->nport, cbdata->ext_status); + efc_node_transition(node, __efc_fabric_wait_domain_attach, + NULL); + break; + } + + case EFC_EVT_SRRS_ELS_REQ_RJT: + case EFC_EVT_SRRS_ELS_REQ_FAIL: { + if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_log_err(node->efc, "FDISC failed, shutting down nport\n"); + /* sm: / shutdown nport */ + efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL); + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +static int +efc_start_ns_node(struct efc_nport *nport) +{ + struct efc_node *ns; + + /* Instantiate a name services node */ + ns = efc_node_find(nport, FC_FID_DIR_SERV); + if (!ns) { + ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false); + if (!ns) + return -EIO; + } + /* + * for found ns, should we be transitioning from here? + * breaks transition only + * 1. from within state machine or + * 2. if after alloc + */ + if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER) + efc_node_pause(ns, __efc_ns_init); + else + efc_node_transition(ns, __efc_ns_init, NULL); + return 0; +} + +static int +efc_start_fabctl_node(struct efc_nport *nport) +{ + struct efc_node *fabctl; + + fabctl = efc_node_find(nport, FC_FID_FCTRL); + if (!fabctl) { + fabctl = efc_node_alloc(nport, FC_FID_FCTRL, + false, false); + if (!fabctl) + return -EIO; + } + /* + * for found ns, should we be transitioning from here? + * breaks transition only + * 1. from within state machine or + * 2. if after alloc + */ + efc_node_transition(fabctl, __efc_fabctl_init, NULL); + return 0; +} + +void +__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + case EFC_EVT_DOMAIN_ATTACH_OK: + case EFC_EVT_NPORT_ATTACH_OK: { + int rc; + + rc = efc_start_ns_node(node->nport); + if (rc) + return; + + /* sm: if enable_ini / start fabctl node */ + /* Instantiate the fabric controller (sends SCR) */ + if (node->nport->enable_rscn) { + rc = efc_start_fabctl_node(node->nport); + if (rc) + return; + } + efc_node_transition(node, __efc_fabric_idle, NULL); + break; + } + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_DOMAIN_ATTACH_OK: + break; + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + /* sm: / send PLOGI */ + efc_send_plogi(node); + efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL); + break; + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: { + int rc; + + /* Save service parameters */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / save sparams, efc_node_attach */ + efc_node_save_sparms(node, cbdata->els_rsp.virt); + rc = efc_node_attach(node); + efc_node_transition(node, __efc_ns_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, + NULL); + break; + } + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_NODE_ATTACH_OK: + node->attached = true; + /* sm: / send RFTID */ + efc_ns_send_rftid(node); + efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL); + break; + + case EFC_EVT_NODE_ATTACH_FAIL: + /* node attach failed, shutdown the node */ + node->attached = false; + node_printf(node, "Node attach failed\n"); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + break; + + case EFC_EVT_SHUTDOWN: + node_printf(node, "Shutdown event received\n"); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_node_transition(node, + __efc_fabric_wait_attach_evt_shutdown, + NULL); + break; + + /* + * if receive RSCN just ignore, + * we haven't sent GID_PT yet (ACC sent by fabctl node) + */ + case EFC_EVT_RSCN_RCVD: + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + /* wait for any of these attach events and then shutdown */ + case EFC_EVT_NODE_ATTACH_OK: + node->attached = true; + node_printf(node, "Attach evt=%s, proceed to shutdown\n", + efc_sm_event_name(evt)); + efc_fabric_initiate_shutdown(node); + break; + + case EFC_EVT_NODE_ATTACH_FAIL: + node->attached = false; + node_printf(node, "Attach evt=%s, proceed to shutdown\n", + efc_sm_event_name(evt)); + efc_fabric_initiate_shutdown(node); + break; + + /* ignore shutdown event as we're already in shutdown path */ + case EFC_EVT_SHUTDOWN: + node_printf(node, "Shutdown event received\n"); + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: + if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / send RFFID */ + efc_ns_send_rffid(node); + efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL); + break; + + /* + * if receive RSCN just ignore, + * we haven't sent GID_PT yet (ACC sent by fabctl node) + */ + case EFC_EVT_RSCN_RCVD: + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + /* + * Waits for an RFFID response event; + * if rscn enabled, a GIDPT name services request is issued. + */ + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: { + if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + if (node->nport->enable_rscn) { + /* sm: if enable_rscn / send GIDPT */ + efc_ns_send_gidpt(node); + + efc_node_transition(node, __efc_ns_gidpt_wait_rsp, + NULL); + } else { + /* if 'T' only, we're done, go to idle */ + efc_node_transition(node, __efc_ns_idle, NULL); + } + break; + } + /* + * if receive RSCN just ignore, + * we haven't sent GID_PT yet (ACC sent by fabctl node) + */ + case EFC_EVT_RSCN_RCVD: + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +static int +efc_process_gidpt_payload(struct efc_node *node, + void *data, u32 gidpt_len) +{ + u32 i, j; + struct efc_node *newnode; + struct efc_nport *nport = node->nport; + struct efc *efc = node->efc; + u32 port_id = 0, port_count, plist_count; + struct efc_node *n; + struct efc_node **active_nodes; + int residual; + struct { + struct fc_ct_hdr hdr; + struct fc_gid_pn_resp pn_rsp; + } *rsp; + struct fc_gid_pn_resp *gidpt; + unsigned long index; + + rsp = data; + gidpt = &rsp->pn_rsp; + residual = be16_to_cpu(rsp->hdr.ct_mr_size); + + if (residual != 0) + efc_log_debug(node->efc, "residual is %u words\n", residual); + + if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) { + node_printf(node, + "GIDPT request failed: rsn x%x rsn_expl x%x\n", + rsp->hdr.ct_reason, rsp->hdr.ct_explan); + return -EIO; + } + + plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt); + + /* Count the number of nodes */ + port_count = 0; + xa_for_each(&nport->lookup, index, n) { + port_count++; + } + + /* Allocate a buffer for all nodes */ + active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC); + if (!active_nodes) { + node_printf(node, "efc_malloc failed\n"); + return -EIO; + } + + /* Fill buffer with fc_id of active nodes */ + i = 0; + xa_for_each(&nport->lookup, index, n) { + port_id = n->rnode.fc_id; + switch (port_id) { + case FC_FID_FLOGI: + case FC_FID_FCTRL: + case FC_FID_DIR_SERV: + break; + default: + if (port_id != FC_FID_DOM_MGR) + active_nodes[i++] = n; + break; + } + } + + /* update the active nodes buffer */ + for (i = 0; i < plist_count; i++) { + hton24(gidpt[i].fp_fid, port_id); + + for (j = 0; j < port_count; j++) { + if (active_nodes[j] && + port_id == active_nodes[j]->rnode.fc_id) { + active_nodes[j] = NULL; + } + } + + if (gidpt[i].fp_resvd & FC_NS_FID_LAST) + break; + } + + /* Those remaining in the active_nodes[] are now gone ! */ + for (i = 0; i < port_count; i++) { + /* + * if we're an initiator and the remote node + * is a target, then post the node missing event. + * if we're target and we have enabled + * target RSCN, then post the node missing event. + */ + if (!active_nodes[i]) + continue; + + if ((node->nport->enable_ini && active_nodes[i]->targ) || + (node->nport->enable_tgt && enable_target_rscn(efc))) { + efc_node_post_event(active_nodes[i], + EFC_EVT_NODE_MISSING, NULL); + } else { + node_printf(node, + "GID_PT: skipping non-tgt port_id x%06x\n", + active_nodes[i]->rnode.fc_id); + } + } + kfree(active_nodes); + + for (i = 0; i < plist_count; i++) { + hton24(gidpt[i].fp_fid, port_id); + + /* Don't create node for ourselves */ + if (port_id == node->rnode.nport->fc_id) { + if (gidpt[i].fp_resvd & FC_NS_FID_LAST) + break; + continue; + } + + newnode = efc_node_find(nport, port_id); + if (!newnode) { + if (!node->nport->enable_ini) + continue; + + newnode = efc_node_alloc(nport, port_id, false, false); + if (!newnode) { + efc_log_err(efc, "efc_node_alloc() failed\n"); + return -EIO; + } + /* + * send PLOGI automatically + * if initiator + */ + efc_node_init_device(newnode, true); + } + + if (node->nport->enable_ini && newnode->targ) { + efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND, + NULL); + } + + if (gidpt[i].fp_resvd & FC_NS_FID_LAST) + break; + } + return 0; +} + +void +__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + /* + * Wait for a GIDPT response from the name server. Process the FC_IDs + * that are reported by creating new remote ports, as needed. + */ + + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: { + if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / process GIDPT payload */ + efc_process_gidpt_payload(node, cbdata->els_rsp.virt, + cbdata->els_rsp.len); + efc_node_transition(node, __efc_ns_idle, NULL); + break; + } + + case EFC_EVT_SRRS_ELS_REQ_FAIL: { + /* not much we can do; will retry with the next RSCN */ + node_printf(node, "GID_PT failed to complete\n"); + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_node_transition(node, __efc_ns_idle, NULL); + break; + } + + /* if receive RSCN here, queue up another discovery processing */ + case EFC_EVT_RSCN_RCVD: { + node_printf(node, "RSCN received during GID_PT processing\n"); + node->rscn_pending = true; + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + /* + * Wait for RSCN received events (posted from the fabric controller) + * and restart the GIDPT name services query and processing. + */ + + switch (evt) { + case EFC_EVT_ENTER: + if (!node->rscn_pending) + break; + + node_printf(node, "RSCN pending, restart discovery\n"); + node->rscn_pending = false; + fallthrough; + + case EFC_EVT_RSCN_RCVD: { + /* sm: / send GIDPT */ + /* + * If target RSCN processing is enabled, + * and this is target only (not initiator), + * and tgt_rscn_delay is non-zero, + * then we delay issuing the GID_PT + */ + if (efc->tgt_rscn_delay_msec != 0 && + !node->nport->enable_ini && node->nport->enable_tgt && + enable_target_rscn(efc)) { + efc_node_transition(node, __efc_ns_gidpt_delay, NULL); + } else { + efc_ns_send_gidpt(node); + efc_node_transition(node, __efc_ns_gidpt_wait_rsp, + NULL); + } + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +static void +gidpt_delay_timer_cb(struct timer_list *t) +{ + struct efc_node *node = from_timer(node, t, gidpt_delay_timer); + + del_timer(&node->gidpt_delay_timer); + + efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL); +} + +void +__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: { + u64 delay_msec, tmp; + + /* + * Compute the delay time. + * Set to tgt_rscn_delay, if the time since last GIDPT + * is less than tgt_rscn_period, then use tgt_rscn_period. + */ + delay_msec = efc->tgt_rscn_delay_msec; + tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec; + if (tmp < efc->tgt_rscn_period_msec) + delay_msec = efc->tgt_rscn_period_msec; + + timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb, + 0); + mod_timer(&node->gidpt_delay_timer, + jiffies + msecs_to_jiffies(delay_msec)); + + break; + } + + case EFC_EVT_GIDPT_DELAY_EXPIRED: + node->time_last_gidpt_msec = jiffies_to_msecs(jiffies); + + efc_ns_send_gidpt(node); + efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL); + break; + + case EFC_EVT_RSCN_RCVD: { + efc_log_debug(efc, + "RSCN received while in GIDPT delay - no action\n"); + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_fabctl_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + /* no need to login to fabric controller, just send SCR */ + efc_send_scr(node); + efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL); + break; + + case EFC_EVT_NODE_ATTACH_OK: + node->attached = true; + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + /* + * Fabric controller node state machine: + * Wait for an SCR response from the fabric controller. + */ + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: + if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + efc_node_transition(node, __efc_fabctl_ready, NULL); + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +static void +efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata) +{ + struct efc *efc = node->efc; + struct efc_nport *nport = node->nport; + struct efc_node *ns; + + /* Forward this event to the name-services node */ + ns = efc_node_find(nport, FC_FID_DIR_SERV); + if (ns) + efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata); + else + efc_log_warn(efc, "can't find name server node\n"); +} + +void +__efc_fabctl_ready(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + /* + * Fabric controller node state machine: Ready. + * In this state, the fabric controller sends a RSCN, which is received + * by this node and is forwarded to the name services node object; and + * the RSCN LS_ACC is sent. + */ + switch (evt) { + case EFC_EVT_RSCN_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + /* + * sm: / process RSCN (forward to name services node), + * send LS_ACC + */ + efc_process_rscn(node, cbdata); + efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id)); + efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl, + NULL); + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_CMPL_OK: + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + efc_node_transition(node, __efc_fabctl_ready, NULL); + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +static uint64_t +efc_get_wwpn(struct fc_els_flogi *sp) +{ + return be64_to_cpu(sp->fl_wwnn); +} + +static int +efc_rnode_is_winner(struct efc_nport *nport) +{ + struct fc_els_flogi *remote_sp; + u64 remote_wwpn; + u64 local_wwpn = nport->wwpn; + u64 wwn_bump = 0; + + remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params; + remote_wwpn = efc_get_wwpn(remote_sp); + + local_wwpn ^= wwn_bump; + + efc_log_debug(nport->efc, "r: %llx\n", + be64_to_cpu(remote_sp->fl_wwpn)); + efc_log_debug(nport->efc, "l: %llx\n", local_wwpn); + + if (remote_wwpn == local_wwpn) { + efc_log_warn(nport->efc, + "WWPN of remote node [%08x %08x] matches local WWPN\n", + (u32)(local_wwpn >> 32ll), + (u32)local_wwpn); + return -1; + } + + return (remote_wwpn > local_wwpn); +} + +void +__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_DOMAIN_ATTACH_OK: { + struct efc_nport *nport = node->nport; + struct efc_node *rnode; + + /* + * this transient node (SID=0 (recv'd FLOGI) + * or DID=fabric (sent FLOGI)) + * is the p2p winner, will use a separate node + * to send PLOGI to peer + */ + WARN_ON(!node->nport->p2p_winner); + + rnode = efc_node_find(nport, node->nport->p2p_remote_port_id); + if (rnode) { + /* + * the "other" transient p2p node has + * already kicked off the + * new node from which PLOGI is sent + */ + node_printf(node, + "Node with fc_id x%x already exists\n", + rnode->rnode.fc_id); + } else { + /* + * create new node (SID=1, DID=2) + * from which to send PLOGI + */ + rnode = efc_node_alloc(nport, + nport->p2p_remote_port_id, + false, false); + if (!rnode) { + efc_log_err(efc, "node alloc failed\n"); + return; + } + + efc_fabric_notify_topology(node); + /* sm: / allocate p2p remote node */ + efc_node_transition(rnode, __efc_p2p_rnode_init, + NULL); + } + + /* + * the transient node (SID=0 or DID=fabric) + * has served its purpose + */ + if (node->rnode.fc_id == 0) { + /* + * if this is the SID=0 node, + * move to the init state in case peer + * has restarted FLOGI discovery and FLOGI is pending + */ + /* don't send PLOGI on efc_d_init entry */ + efc_node_init_device(node, false); + } else { + /* + * if this is the DID=fabric node + * (we initiated FLOGI), shut it down + */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + } + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_p2p_rnode_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + /* sm: / send PLOGI */ + efc_send_plogi(node); + efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL); + break; + + case EFC_EVT_ABTS_RCVD: + /* sm: send BA_ACC */ + efc_send_bls_acc(node, cbdata->header->dma.virt); + + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_CMPL_OK: + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + + /* sm: if p2p_winner / domain_attach */ + if (node->nport->p2p_winner) { + efc_node_transition(node, + __efc_p2p_wait_domain_attach, + NULL); + if (!node->nport->domain->attached) { + node_printf(node, "Domain not attached\n"); + efc_domain_attach(node->nport->domain, + node->nport->p2p_port_id); + } else { + node_printf(node, "Domain already attached\n"); + efc_node_post_event(node, + EFC_EVT_DOMAIN_ATTACH_OK, + NULL); + } + } else { + /* this node has served its purpose; + * we'll expect a PLOGI on a separate + * node (remote SID=0x1); return this node + * to init state in case peer + * restarts discovery -- it may already + * have (pending frames may exist). + */ + /* don't send PLOGI on efc_d_init entry */ + efc_node_init_device(node, false); + } + break; + + case EFC_EVT_SRRS_ELS_CMPL_FAIL: + /* + * LS_ACC failed, possibly due to link down; + * shutdown node and wait + * for FLOGI discovery to restart + */ + node_printf(node, "FLOGI LS_ACC failed, shutting down\n"); + WARN_ON(!node->els_cmpl_cnt); + node->els_cmpl_cnt--; + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + break; + + case EFC_EVT_ABTS_RCVD: { + /* sm: / send BA_ACC */ + efc_send_bls_acc(node, cbdata->header->dma.virt); + break; + } + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_SRRS_ELS_REQ_OK: { + int rc; + + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / save sparams, efc_node_attach */ + efc_node_save_sparms(node, cbdata->els_rsp.virt); + rc = efc_node_attach(node); + efc_node_transition(node, __efc_p2p_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, + NULL); + break; + } + case EFC_EVT_SRRS_ELS_REQ_FAIL: { + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_fabric_common, __func__)) { + return; + } + node_printf(node, "PLOGI failed, shutting down\n"); + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + break; + } + + case EFC_EVT_PLOGI_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + /* if we're in external loopback mode, just send LS_ACC */ + if (node->efc->external_loopback) { + efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id)); + } else { + /* + * if this isn't external loopback, + * pass to default handler + */ + __efc_fabric_common(__func__, ctx, evt, arg); + } + break; + } + case EFC_EVT_PRLI_RCVD: + /* I, or I+T */ + /* sent PLOGI and before completion was seen, received the + * PRLI from the remote node (WCQEs and RCQEs come in on + * different queues and order of processing cannot be assumed) + * Save OXID so PRLI can be sent after the attach and continue + * to wait for PLOGI response + */ + efc_process_prli_payload(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PRLI); + efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli, + NULL); + break; + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + /* + * Since we've received a PRLI, we have a port login and will + * just need to wait for the PLOGI response to do the node + * attach and then we can send the LS_ACC for the PRLI. If, + * during this time, we receive FCP_CMNDs (which is possible + * since we've already sent a PRLI and our peer may have + * accepted). + * At this time, we are not waiting on any other unsolicited + * frames to continue with the login process. Thus, it will not + * hurt to hold frames here. + */ + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */ + int rc; + + /* Completion from PLOGI sent */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + /* sm: / save sparams, efc_node_attach */ + efc_node_save_sparms(node, cbdata->els_rsp.virt); + rc = efc_node_attach(node); + efc_node_transition(node, __efc_p2p_wait_node_attach, NULL); + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, + NULL); + break; + } + case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ + case EFC_EVT_SRRS_ELS_REQ_RJT: + /* PLOGI failed, shutdown the node */ + if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, + __efc_fabric_common, __func__)) { + return; + } + WARN_ON(!node->els_req_cnt); + node->els_req_cnt--; + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +void +__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node_cb *cbdata = arg; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_NODE_ATTACH_OK: + node->attached = true; + switch (node->send_ls_acc) { + case EFC_NODE_SEND_LS_ACC_PRLI: { + efc_d_send_prli_rsp(node->ls_acc_io, + node->ls_acc_oxid); + node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; + node->ls_acc_io = NULL; + break; + } + case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */ + case EFC_NODE_SEND_LS_ACC_NONE: + default: + /* Normal case for I */ + /* sm: send_plogi_acc is not set / send PLOGI acc */ + efc_node_transition(node, __efc_d_port_logged_in, + NULL); + break; + } + break; + + case EFC_EVT_NODE_ATTACH_FAIL: + /* node attach failed, shutdown the node */ + node->attached = false; + node_printf(node, "Node attach failed\n"); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_fabric_initiate_shutdown(node); + break; + + case EFC_EVT_SHUTDOWN: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + efc_node_transition(node, + __efc_fabric_wait_attach_evt_shutdown, + NULL); + break; + case EFC_EVT_PRLI_RCVD: + node_printf(node, "%s: PRLI received before node is attached\n", + efc_sm_event_name(evt)); + efc_process_prli_payload(node, cbdata->payload->dma.virt); + efc_send_ls_acc_after_attach(node, + cbdata->header->dma.virt, + EFC_NODE_SEND_LS_ACC_PRLI); + break; + + default: + __efc_fabric_common(__func__, ctx, evt, arg); + } +} + +int +efc_p2p_setup(struct efc_nport *nport) +{ + struct efc *efc = nport->efc; + int rnode_winner; + + rnode_winner = efc_rnode_is_winner(nport); + + /* set nport flags to indicate p2p "winner" */ + if (rnode_winner == 1) { + nport->p2p_remote_port_id = 0; + nport->p2p_port_id = 0; + nport->p2p_winner = false; + } else if (rnode_winner == 0) { + nport->p2p_remote_port_id = 2; + nport->p2p_port_id = 1; + nport->p2p_winner = true; + } else { + /* no winner; only okay if external loopback enabled */ + if (nport->efc->external_loopback) { + /* + * External loopback mode enabled; + * local nport and remote node + * will be registered with an NPortID = 1; + */ + efc_log_debug(efc, + "External loopback mode enabled\n"); + nport->p2p_remote_port_id = 1; + nport->p2p_port_id = 1; + nport->p2p_winner = true; + } else { + efc_log_warn(efc, + "failed to determine p2p winner\n"); + return rnode_winner; + } + } + return 0; +} diff --git a/drivers/scsi/elx/libefc/efc_fabric.h b/drivers/scsi/elx/libefc/efc_fabric.h new file mode 100644 index 000000000..b0947ae6f --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_fabric.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * Declarations for the interface exported by efc_fabric + */ + +#ifndef __EFCT_FABRIC_H__ +#define __EFCT_FABRIC_H__ +#include "scsi/fc/fc_els.h" +#include "scsi/fc/fc_fs.h" +#include "scsi/fc/fc_ns.h" + +void +__efc_fabric_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabric_domain_attach_wait(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + +void +__efc_vport_fabric_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabric_wait_nport_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + +void +__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg); +void +__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_ns_logo_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event, void *arg); +void +__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg); +void +__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabctl_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabctl_wait_node_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabctl_ready(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_fabric_idle(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + +void +__efc_p2p_rnode_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_p2p_domain_attach_wait(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + +int +efc_p2p_setup(struct efc_nport *nport); +void +efc_fabric_set_topology(struct efc_node *node, + enum efc_nport_topology topology); +void efc_fabric_notify_topology(struct efc_node *node); + +#endif /* __EFCT_FABRIC_H__ */ diff --git a/drivers/scsi/elx/libefc/efc_node.c b/drivers/scsi/elx/libefc/efc_node.c new file mode 100644 index 000000000..a1b4ce6a2 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_node.c @@ -0,0 +1,1102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#include "efc.h" + +int +efc_remote_node_cb(void *arg, int event, void *data) +{ + struct efc *efc = arg; + struct efc_remote_node *rnode = data; + struct efc_node *node = rnode->node; + unsigned long flags = 0; + + spin_lock_irqsave(&efc->lock, flags); + efc_node_post_event(node, event, NULL); + spin_unlock_irqrestore(&efc->lock, flags); + + return 0; +} + +struct efc_node * +efc_node_find(struct efc_nport *nport, u32 port_id) +{ + /* Find an FC node structure given the FC port ID */ + return xa_load(&nport->lookup, port_id); +} + +static void +_efc_node_free(struct kref *arg) +{ + struct efc_node *node = container_of(arg, struct efc_node, ref); + struct efc *efc = node->efc; + struct efc_dma *dma; + + dma = &node->sparm_dma_buf; + dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys); + memset(dma, 0, sizeof(struct efc_dma)); + mempool_free(node, efc->node_pool); +} + +struct efc_node *efc_node_alloc(struct efc_nport *nport, + u32 port_id, bool init, bool targ) +{ + int rc; + struct efc_node *node = NULL; + struct efc *efc = nport->efc; + struct efc_dma *dma; + + if (nport->shutting_down) { + efc_log_debug(efc, "node allocation when shutting down %06x", + port_id); + return NULL; + } + + node = mempool_alloc(efc->node_pool, GFP_ATOMIC); + if (!node) { + efc_log_err(efc, "node allocation failed %06x", port_id); + return NULL; + } + memset(node, 0, sizeof(*node)); + + dma = &node->sparm_dma_buf; + dma->size = NODE_SPARAMS_SIZE; + dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys); + if (!dma->virt) { + efc_log_err(efc, "node dma alloc failed\n"); + goto dma_fail; + } + node->rnode.indicator = U32_MAX; + node->nport = nport; + + node->efc = efc; + node->init = init; + node->targ = targ; + + spin_lock_init(&node->pend_frames_lock); + INIT_LIST_HEAD(&node->pend_frames); + spin_lock_init(&node->els_ios_lock); + INIT_LIST_HEAD(&node->els_ios_list); + node->els_io_enabled = true; + + rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport); + if (rc) { + efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc); + goto hw_alloc_fail; + } + + node->rnode.node = node; + node->sm.app = node; + node->evtdepth = 0; + + efc_node_update_display_name(node); + + rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC)); + if (rc) { + efc_log_err(efc, "Node lookup store failed: %d\n", rc); + goto xa_fail; + } + + /* initialize refcount */ + kref_init(&node->ref); + node->release = _efc_node_free; + kref_get(&nport->ref); + + return node; + +xa_fail: + efc_node_free_resources(efc, &node->rnode); +hw_alloc_fail: + dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys); +dma_fail: + mempool_free(node, efc->node_pool); + return NULL; +} + +void +efc_node_free(struct efc_node *node) +{ + struct efc_nport *nport; + struct efc *efc; + int rc = 0; + struct efc_node *ns = NULL; + + nport = node->nport; + efc = node->efc; + + node_printf(node, "Free'd\n"); + + if (node->refound) { + /* + * Save the name server node. We will send fake RSCN event at + * the end to handle ignored RSCN event during node deletion + */ + ns = efc_node_find(node->nport, FC_FID_DIR_SERV); + } + + if (!node->nport) { + efc_log_err(efc, "Node already Freed\n"); + return; + } + + /* Free HW resources */ + rc = efc_node_free_resources(efc, &node->rnode); + if (rc < 0) + efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc); + + /* if the gidpt_delay_timer is still running, then delete it */ + if (timer_pending(&node->gidpt_delay_timer)) + del_timer(&node->gidpt_delay_timer); + + xa_erase(&nport->lookup, node->rnode.fc_id); + + /* + * If the node_list is empty, + * then post a ALL_CHILD_NODES_FREE event to the nport, + * after the lock is released. + * The nport may be free'd as a result of the event. + */ + if (xa_empty(&nport->lookup)) + efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE, + NULL); + + node->nport = NULL; + node->sm.current_state = NULL; + + kref_put(&nport->ref, nport->release); + kref_put(&node->ref, node->release); + + if (ns) { + /* sending fake RSCN event to name server node */ + efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL); + } +} + +static void +efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length) +{ + if (!dma || !buffer || !buffer_length) + return; + + if (buffer_length > dma->size) + buffer_length = dma->size; + + memcpy(dma->virt, buffer, buffer_length); + dma->len = buffer_length; +} + +int +efc_node_attach(struct efc_node *node) +{ + int rc = 0; + struct efc_nport *nport = node->nport; + struct efc_domain *domain = nport->domain; + struct efc *efc = node->efc; + + if (!domain->attached) { + efc_log_err(efc, "Warning: unattached domain\n"); + return -EIO; + } + /* Update node->wwpn/wwnn */ + + efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn), + efc_node_get_wwpn(node)); + efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn), + efc_node_get_wwnn(node)); + + efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4, + sizeof(node->service_params) - 4); + + /* take lock to protect node->rnode.attached */ + rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf); + if (rc < 0) + efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc); + + return rc; +} + +void +efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length) +{ + switch (fc_id) { + case FC_FID_FLOGI: + snprintf(buffer, buffer_length, "fabric"); + break; + case FC_FID_FCTRL: + snprintf(buffer, buffer_length, "fabctl"); + break; + case FC_FID_DIR_SERV: + snprintf(buffer, buffer_length, "nserve"); + break; + default: + if (fc_id == FC_FID_DOM_MGR) { + snprintf(buffer, buffer_length, "dctl%02x", + (fc_id & 0x0000ff)); + } else { + snprintf(buffer, buffer_length, "%06x", fc_id); + } + break; + } +} + +void +efc_node_update_display_name(struct efc_node *node) +{ + u32 port_id = node->rnode.fc_id; + struct efc_nport *nport = node->nport; + char portid_display[16]; + + efc_node_fcid_display(port_id, portid_display, sizeof(portid_display)); + + snprintf(node->display_name, sizeof(node->display_name), "%s.%s", + nport->display_name, portid_display); +} + +void +efc_node_send_ls_io_cleanup(struct efc_node *node) +{ + if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) { + efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n", + node->display_name, node->ls_acc_oxid); + + node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; + node->ls_acc_io = NULL; + } +} + +static void efc_node_handle_implicit_logo(struct efc_node *node) +{ + int rc; + + /* + * currently, only case for implicit logo is PLOGI + * recvd. Thus, node's ELS IO pending list won't be + * empty (PLOGI will be on it) + */ + WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); + node_printf(node, "Reason: implicit logout, re-authenticate\n"); + + /* Re-attach node with the same HW node resources */ + node->req_free = false; + rc = efc_node_attach(node); + efc_node_transition(node, __efc_d_wait_node_attach, NULL); + node->els_io_enabled = true; + + if (rc < 0) + efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); +} + +static void efc_node_handle_explicit_logo(struct efc_node *node) +{ + s8 pend_frames_empty; + unsigned long flags = 0; + + /* cleanup any pending LS_ACC ELSs */ + efc_node_send_ls_io_cleanup(node); + + spin_lock_irqsave(&node->pend_frames_lock, flags); + pend_frames_empty = list_empty(&node->pend_frames); + spin_unlock_irqrestore(&node->pend_frames_lock, flags); + + /* + * there are two scenarios where we want to keep + * this node alive: + * 1. there are pending frames that need to be + * processed or + * 2. we're an initiator and the remote node is + * a target and we need to re-authenticate + */ + node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty); + node_printf(node, "nport.ini=%d node.tgt=%d\n", + node->nport->enable_ini, node->targ); + if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) { + u8 send_plogi = false; + + if (node->nport->enable_ini && node->targ) { + /* + * we're an initiator and + * node shutting down is a target; + * we'll need to re-authenticate in + * initial state + */ + send_plogi = true; + } + + /* + * transition to __efc_d_init + * (will retain HW node resources) + */ + node->els_io_enabled = true; + node->req_free = false; + + /* + * either pending frames exist or we are re-authenticating + * with PLOGI (or both); in either case, return to initial + * state + */ + efc_node_init_device(node, send_plogi); + } + /* else: let node shutdown occur */ +} + +static void +efc_node_purge_pending(struct efc_node *node) +{ + struct efc *efc = node->efc; + struct efc_hw_sequence *frame, *next; + unsigned long flags = 0; + + spin_lock_irqsave(&node->pend_frames_lock, flags); + + list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) { + list_del(&frame->list_entry); + efc->tt.hw_seq_free(efc, frame); + } + + spin_unlock_irqrestore(&node->pend_frames_lock, flags); +} + +void +__efc_node_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: { + efc_node_hold_frames(node); + WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list)); + /* by default, we will be freeing node after we unwind */ + node->req_free = true; + + switch (node->shutdown_reason) { + case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO: + /* Node shutdown b/c of PLOGI received when node + * already logged in. We have PLOGI service + * parameters, so submit node attach; we won't be + * freeing this node + */ + + efc_node_handle_implicit_logo(node); + break; + + case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO: + efc_node_handle_explicit_logo(node); + break; + + case EFC_NODE_SHUTDOWN_DEFAULT: + default: { + /* + * shutdown due to link down, + * node going away (xport event) or + * nport shutdown, purge pending and + * proceed to cleanup node + */ + + /* cleanup any pending LS_ACC ELSs */ + efc_node_send_ls_io_cleanup(node); + + node_printf(node, + "Shutdown reason: default, purge pending\n"); + efc_node_purge_pending(node); + break; + } + } + + break; + } + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + default: + __efc_node_common(__func__, ctx, evt, arg); + } +} + +static bool +efc_node_check_els_quiesced(struct efc_node *node) +{ + /* check to see if ELS requests, completions are quiesced */ + if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 && + efc_els_io_list_empty(node, &node->els_ios_list)) { + if (!node->attached) { + /* hw node detach already completed, proceed */ + node_printf(node, "HW node not attached\n"); + efc_node_transition(node, + __efc_node_wait_ios_shutdown, + NULL); + } else { + /* + * hw node detach hasn't completed, + * transition and wait + */ + node_printf(node, "HW node still attached\n"); + efc_node_transition(node, __efc_node_wait_node_free, + NULL); + } + return true; + } + return false; +} + +void +efc_node_initiate_cleanup(struct efc_node *node) +{ + /* + * if ELS's have already been quiesced, will move to next state + * if ELS's have not been quiesced, abort them + */ + if (!efc_node_check_els_quiesced(node)) { + efc_node_hold_frames(node); + efc_node_transition(node, __efc_node_wait_els_shutdown, NULL); + } +} + +void +__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + bool check_quiesce = false; + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + /* Node state machine: Wait for all ELSs to complete */ + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + if (efc_els_io_list_empty(node, &node->els_ios_list)) { + node_printf(node, "All ELS IOs complete\n"); + check_quiesce = true; + } + break; + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_REQ_OK: + case EFC_EVT_SRRS_ELS_REQ_FAIL: + case EFC_EVT_SRRS_ELS_REQ_RJT: + case EFC_EVT_ELS_REQ_ABORTED: + if (WARN_ON(!node->els_req_cnt)) + break; + node->els_req_cnt--; + check_quiesce = true; + break; + + case EFC_EVT_SRRS_ELS_CMPL_OK: + case EFC_EVT_SRRS_ELS_CMPL_FAIL: + if (WARN_ON(!node->els_cmpl_cnt)) + break; + node->els_cmpl_cnt--; + check_quiesce = true; + break; + + case EFC_EVT_ALL_CHILD_NODES_FREE: + /* all ELS IO's complete */ + node_printf(node, "All ELS IOs complete\n"); + WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list)); + check_quiesce = true; + break; + + case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: + check_quiesce = true; + break; + + case EFC_EVT_DOMAIN_ATTACH_OK: + /* don't care about domain_attach_ok */ + break; + + /* ignore shutdown events as we're already in shutdown path */ + case EFC_EVT_SHUTDOWN: + /* have default shutdown event take precedence */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + fallthrough; + + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + break; + + default: + __efc_node_common(__func__, ctx, evt, arg); + } + + if (check_quiesce) + efc_node_check_els_quiesced(node); +} + +void +__efc_node_wait_node_free(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_NODE_FREE_OK: + /* node is officially no longer attached */ + node->attached = false; + efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL); + break; + + case EFC_EVT_ALL_CHILD_NODES_FREE: + case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: + /* As IOs and ELS IO's complete we expect to get these events */ + break; + + case EFC_EVT_DOMAIN_ATTACH_OK: + /* don't care about domain_attach_ok */ + break; + + /* ignore shutdown events as we're already in shutdown path */ + case EFC_EVT_SHUTDOWN: + /* have default shutdown event take precedence */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + fallthrough; + + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + node_printf(node, "%s received\n", efc_sm_event_name(evt)); + break; + default: + __efc_node_common(__func__, ctx, evt, arg); + } +} + +void +__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + struct efc *efc = node->efc; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + switch (evt) { + case EFC_EVT_ENTER: + efc_node_hold_frames(node); + + /* first check to see if no ELS IOs are outstanding */ + if (efc_els_io_list_empty(node, &node->els_ios_list)) + /* If there are any active IOS, Free them. */ + efc_node_transition(node, __efc_node_shutdown, NULL); + break; + + case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: + case EFC_EVT_ALL_CHILD_NODES_FREE: + if (efc_els_io_list_empty(node, &node->els_ios_list)) + efc_node_transition(node, __efc_node_shutdown, NULL); + break; + + case EFC_EVT_EXIT: + efc_node_accept_frames(node); + break; + + case EFC_EVT_SRRS_ELS_REQ_FAIL: + /* Can happen as ELS IO IO's complete */ + if (WARN_ON(!node->els_req_cnt)) + break; + node->els_req_cnt--; + break; + + /* ignore shutdown events as we're already in shutdown path */ + case EFC_EVT_SHUTDOWN: + /* have default shutdown event take precedence */ + node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; + fallthrough; + + case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: + case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: + efc_log_debug(efc, "[%s] %-20s\n", node->display_name, + efc_sm_event_name(evt)); + break; + case EFC_EVT_DOMAIN_ATTACH_OK: + /* don't care about domain_attach_ok */ + break; + default: + __efc_node_common(__func__, ctx, evt, arg); + } +} + +void +__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = NULL; + struct efc *efc = NULL; + struct efc_node_cb *cbdata = arg; + + node = ctx->app; + efc = node->efc; + + switch (evt) { + case EFC_EVT_ENTER: + case EFC_EVT_REENTER: + case EFC_EVT_EXIT: + case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: + case EFC_EVT_NODE_MISSING: + case EFC_EVT_FCP_CMD_RCVD: + break; + + case EFC_EVT_NODE_REFOUND: + node->refound = true; + break; + + /* + * node->attached must be set appropriately + * for all node attach/detach events + */ + case EFC_EVT_NODE_ATTACH_OK: + node->attached = true; + break; + + case EFC_EVT_NODE_FREE_OK: + case EFC_EVT_NODE_ATTACH_FAIL: + node->attached = false; + break; + + /* + * handle any ELS completions that + * other states either didn't care about + * or forgot about + */ + case EFC_EVT_SRRS_ELS_CMPL_OK: + case EFC_EVT_SRRS_ELS_CMPL_FAIL: + if (WARN_ON(!node->els_cmpl_cnt)) + break; + node->els_cmpl_cnt--; + break; + + /* + * handle any ELS request completions that + * other states either didn't care about + * or forgot about + */ + case EFC_EVT_SRRS_ELS_REQ_OK: + case EFC_EVT_SRRS_ELS_REQ_FAIL: + case EFC_EVT_SRRS_ELS_REQ_RJT: + case EFC_EVT_ELS_REQ_ABORTED: + if (WARN_ON(!node->els_req_cnt)) + break; + node->els_req_cnt--; + break; + + case EFC_EVT_ELS_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + /* + * Unsupported ELS was received, + * send LS_RJT, command not supported + */ + efc_log_debug(efc, + "[%s] (%s) ELS x%02x, LS_RJT not supported\n", + node->display_name, funcname, + ((u8 *)cbdata->payload->dma.virt)[0]); + + efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), + ELS_RJT_UNSUP, ELS_EXPL_NONE, 0); + break; + } + + case EFC_EVT_PLOGI_RCVD: + case EFC_EVT_FLOGI_RCVD: + case EFC_EVT_LOGO_RCVD: + case EFC_EVT_PRLI_RCVD: + case EFC_EVT_PRLO_RCVD: + case EFC_EVT_PDISC_RCVD: + case EFC_EVT_FDISC_RCVD: + case EFC_EVT_ADISC_RCVD: + case EFC_EVT_RSCN_RCVD: + case EFC_EVT_SCR_RCVD: { + struct fc_frame_header *hdr = cbdata->header->dma.virt; + + /* sm: / send ELS_RJT */ + efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n", + node->display_name, funcname, + efc_sm_event_name(evt)); + /* if we didn't catch this in a state, send generic LS_RJT */ + efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), + ELS_RJT_UNAB, ELS_EXPL_NONE, 0); + break; + } + case EFC_EVT_ABTS_RCVD: { + efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n", + node->display_name, funcname, + efc_sm_event_name(evt)); + + /* sm: / send BA_ACC */ + efc_send_bls_acc(node, cbdata->header->dma.virt); + break; + } + + default: + efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n", + node->display_name, funcname, + efc_sm_event_name(evt)); + } +} + +void +efc_node_save_sparms(struct efc_node *node, void *payload) +{ + memcpy(node->service_params, payload, sizeof(node->service_params)); +} + +void +efc_node_post_event(struct efc_node *node, + enum efc_sm_event evt, void *arg) +{ + bool free_node = false; + + node->evtdepth++; + + efc_sm_post_event(&node->sm, evt, arg); + + /* If our event call depth is one and + * we're not holding frames + * then we can dispatch any pending frames. + * We don't want to allow the efc_process_node_pending() + * call to recurse. + */ + if (!node->hold_frames && node->evtdepth == 1) + efc_process_node_pending(node); + + node->evtdepth--; + + /* + * Free the node object if so requested, + * and we're at an event call depth of zero + */ + if (node->evtdepth == 0 && node->req_free) + free_node = true; + + if (free_node) + efc_node_free(node); +} + +void +efc_node_transition(struct efc_node *node, + void (*state)(struct efc_sm_ctx *, + enum efc_sm_event, void *), void *data) +{ + struct efc_sm_ctx *ctx = &node->sm; + + if (ctx->current_state == state) { + efc_node_post_event(node, EFC_EVT_REENTER, data); + } else { + efc_node_post_event(node, EFC_EVT_EXIT, data); + ctx->current_state = state; + efc_node_post_event(node, EFC_EVT_ENTER, data); + } +} + +void +efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name) +{ + memset(buf, 0, buf_len); + + snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name); +} + +u64 +efc_node_get_wwpn(struct efc_node *node) +{ + struct fc_els_flogi *sp = + (struct fc_els_flogi *)node->service_params; + + return be64_to_cpu(sp->fl_wwpn); +} + +u64 +efc_node_get_wwnn(struct efc_node *node) +{ + struct fc_els_flogi *sp = + (struct fc_els_flogi *)node->service_params; + + return be64_to_cpu(sp->fl_wwnn); +} + +int +efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg, + u8 cmd, void (*efc_node_common_func)(const char *, + struct efc_sm_ctx *, enum efc_sm_event, void *), + const char *funcname) +{ + return 0; +} + +int +efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg, + u16 cmd, void (*efc_node_common_func)(const char *, + struct efc_sm_ctx *, enum efc_sm_event, void *), + const char *funcname) +{ + return 0; +} + +int +efc_els_io_list_empty(struct efc_node *node, struct list_head *list) +{ + int empty; + unsigned long flags = 0; + + spin_lock_irqsave(&node->els_ios_lock, flags); + empty = list_empty(list); + spin_unlock_irqrestore(&node->els_ios_lock, flags); + return empty; +} + +void +efc_node_pause(struct efc_node *node, + void (*state)(struct efc_sm_ctx *, + enum efc_sm_event, void *)) + +{ + node->nodedb_state = state; + efc_node_transition(node, __efc_node_paused, NULL); +} + +void +__efc_node_paused(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_node *node = ctx->app; + + efc_node_evt_set(ctx, evt, __func__); + + node_sm_trace(); + + /* + * This state is entered when a state is "paused". When resumed, the + * node is transitioned to a previously saved state (node->ndoedb_state) + */ + switch (evt) { + case EFC_EVT_ENTER: + node_printf(node, "Paused\n"); + break; + + case EFC_EVT_RESUME: { + void (*pf)(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + + pf = node->nodedb_state; + + node->nodedb_state = NULL; + efc_node_transition(node, pf, NULL); + break; + } + + case EFC_EVT_DOMAIN_ATTACH_OK: + break; + + case EFC_EVT_SHUTDOWN: + node->req_free = true; + break; + + default: + __efc_node_common(__func__, ctx, evt, arg); + } +} + +void +efc_node_recv_els_frame(struct efc_node *node, + struct efc_hw_sequence *seq) +{ + u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp); + struct { + u32 cmd; + enum efc_sm_event evt; + u32 payload_size; + } els_cmd_list[] = { + {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)}, + {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)}, + {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)}, + {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size}, + {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size}, + {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, + {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, + {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)}, + {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD}, + {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD}, + }; + struct efc_node_cb cbdata; + u8 *buf = seq->payload->dma.virt; + enum efc_sm_event evt = EFC_EVT_ELS_RCVD; + u32 i; + + memset(&cbdata, 0, sizeof(cbdata)); + cbdata.header = seq->header; + cbdata.payload = seq->payload; + + /* find a matching event for the ELS command */ + for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) { + if (els_cmd_list[i].cmd == buf[0]) { + evt = els_cmd_list[i].evt; + break; + } + } + + efc_node_post_event(node, evt, &cbdata); +} + +void +efc_node_recv_ct_frame(struct efc_node *node, + struct efc_hw_sequence *seq) +{ + struct fc_ct_hdr *iu = seq->payload->dma.virt; + struct fc_frame_header *hdr = seq->header->dma.virt; + struct efc *efc = node->efc; + u16 gscmd = be16_to_cpu(iu->ct_cmd); + + efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n", + node->display_name, gscmd); + efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu, + FC_FS_RJT, FC_FS_RJT_UNSUP, 0); +} + +void +efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq) +{ + struct efc_node_cb cbdata; + + memset(&cbdata, 0, sizeof(cbdata)); + cbdata.header = seq->header; + cbdata.payload = seq->payload; + + efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata); +} + +void +efc_process_node_pending(struct efc_node *node) +{ + struct efc *efc = node->efc; + struct efc_hw_sequence *seq = NULL; + u32 pend_frames_processed = 0; + unsigned long flags = 0; + + for (;;) { + /* need to check for hold frames condition after each frame + * processed because any given frame could cause a transition + * to a state that holds frames + */ + if (node->hold_frames) + break; + + seq = NULL; + /* Get next frame/sequence */ + spin_lock_irqsave(&node->pend_frames_lock, flags); + + if (!list_empty(&node->pend_frames)) { + seq = list_first_entry(&node->pend_frames, + struct efc_hw_sequence, list_entry); + list_del(&seq->list_entry); + } + spin_unlock_irqrestore(&node->pend_frames_lock, flags); + + if (!seq) { + pend_frames_processed = node->pend_frames_processed; + node->pend_frames_processed = 0; + break; + } + node->pend_frames_processed++; + + /* now dispatch frame(s) to dispatch function */ + efc_node_dispatch_frame(node, seq); + efc->tt.hw_seq_free(efc, seq); + } + + if (pend_frames_processed != 0) + efc_log_debug(efc, "%u node frames held and processed\n", + pend_frames_processed); +} + +void +efc_scsi_sess_reg_complete(struct efc_node *node, u32 status) +{ + unsigned long flags = 0; + enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK; + struct efc *efc = node->efc; + + if (status) + evt = EFC_EVT_NODE_SESS_REG_FAIL; + + spin_lock_irqsave(&efc->lock, flags); + /* Notify the node to resume */ + efc_node_post_event(node, evt, NULL); + spin_unlock_irqrestore(&efc->lock, flags); +} + +void +efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&efc->lock, flags); + /* Notify the node to resume */ + efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); + spin_unlock_irqrestore(&efc->lock, flags); +} + +void +efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&efc->lock, flags); + /* Notify the node to resume */ + efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); + spin_unlock_irqrestore(&efc->lock, flags); +} + +void +efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&efc->lock, flags); + efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL); + spin_unlock_irqrestore(&efc->lock, flags); +} + +void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg) +{ + struct efc *efc = node->efc; + unsigned long flags = 0; + + spin_lock_irqsave(&efc->lock, flags); + efc_node_post_event(node, evt, arg); + spin_unlock_irqrestore(&efc->lock, flags); +} + +void efc_node_post_shutdown(struct efc_node *node, void *arg) +{ + unsigned long flags = 0; + struct efc *efc = node->efc; + + spin_lock_irqsave(&efc->lock, flags); + efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg); + spin_unlock_irqrestore(&efc->lock, flags); +} diff --git a/drivers/scsi/elx/libefc/efc_node.h b/drivers/scsi/elx/libefc/efc_node.h new file mode 100644 index 000000000..e9c600ac4 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_node.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#if !defined(__EFC_NODE_H__) +#define __EFC_NODE_H__ +#include "scsi/fc/fc_ns.h" + +#define EFC_NODEDB_PAUSE_FABRIC_LOGIN (1 << 0) +#define EFC_NODEDB_PAUSE_NAMESERVER (1 << 1) +#define EFC_NODEDB_PAUSE_NEW_NODES (1 << 2) + +#define MAX_ACC_REJECT_PAYLOAD sizeof(struct fc_els_ls_rjt) + +#define scsi_io_printf(io, fmt, ...) \ + efc_log_debug(io->efc, "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt, \ + io->node->display_name, io->instance_index, io->init_task_tag, \ + io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__) + +static inline void +efc_node_evt_set(struct efc_sm_ctx *ctx, enum efc_sm_event evt, + const char *handler) +{ + struct efc_node *node = ctx->app; + + if (evt == EFC_EVT_ENTER) { + strncpy(node->current_state_name, handler, + sizeof(node->current_state_name)); + } else if (evt == EFC_EVT_EXIT) { + strncpy(node->prev_state_name, node->current_state_name, + sizeof(node->prev_state_name)); + strncpy(node->current_state_name, "invalid", + sizeof(node->current_state_name)); + } + node->prev_evt = node->current_evt; + node->current_evt = evt; +} + +/** + * hold frames in pending frame list + * + * Unsolicited receive frames are held on the node pending frame list, + * rather than being processed. + */ + +static inline void +efc_node_hold_frames(struct efc_node *node) +{ + node->hold_frames = true; +} + +/** + * accept frames + * + * Unsolicited receive frames processed rather than being held on the node + * pending frame list. + */ + +static inline void +efc_node_accept_frames(struct efc_node *node) +{ + node->hold_frames = false; +} + +/* + * Node initiator/target enable defines + * All combinations of the SLI port (nport) initiator/target enable, + * and remote node initiator/target enable are enumerated. + * ex: EFC_NODE_ENABLE_T_TO_IT decodes to target mode is enabled on SLI port + * and I+T is enabled on remote node. + */ +enum efc_node_enable { + EFC_NODE_ENABLE_x_TO_x, + EFC_NODE_ENABLE_x_TO_T, + EFC_NODE_ENABLE_x_TO_I, + EFC_NODE_ENABLE_x_TO_IT, + EFC_NODE_ENABLE_T_TO_x, + EFC_NODE_ENABLE_T_TO_T, + EFC_NODE_ENABLE_T_TO_I, + EFC_NODE_ENABLE_T_TO_IT, + EFC_NODE_ENABLE_I_TO_x, + EFC_NODE_ENABLE_I_TO_T, + EFC_NODE_ENABLE_I_TO_I, + EFC_NODE_ENABLE_I_TO_IT, + EFC_NODE_ENABLE_IT_TO_x, + EFC_NODE_ENABLE_IT_TO_T, + EFC_NODE_ENABLE_IT_TO_I, + EFC_NODE_ENABLE_IT_TO_IT, +}; + +static inline enum efc_node_enable +efc_node_get_enable(struct efc_node *node) +{ + u32 retval = 0; + + if (node->nport->enable_ini) + retval |= (1U << 3); + if (node->nport->enable_tgt) + retval |= (1U << 2); + if (node->init) + retval |= (1U << 1); + if (node->targ) + retval |= (1U << 0); + return (enum efc_node_enable)retval; +} + +int +efc_node_check_els_req(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg, + u8 cmd, void (*efc_node_common_func)(const char *, + struct efc_sm_ctx *, enum efc_sm_event, void *), + const char *funcname); +int +efc_node_check_ns_req(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg, + u16 cmd, void (*efc_node_common_func)(const char *, + struct efc_sm_ctx *, enum efc_sm_event, void *), + const char *funcname); +int +efc_node_attach(struct efc_node *node); +struct efc_node * +efc_node_alloc(struct efc_nport *nport, u32 port_id, + bool init, bool targ); +void +efc_node_free(struct efc_node *efc); +void +efc_node_update_display_name(struct efc_node *node); +void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt, + void *arg); + +void +__efc_node_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_node_wait_node_free(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +efc_node_save_sparms(struct efc_node *node, void *payload); +void +efc_node_transition(struct efc_node *node, + void (*state)(struct efc_sm_ctx *, enum efc_sm_event, + void *), void *data); +void +__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + +void +efc_node_initiate_cleanup(struct efc_node *node); + +void +efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name); + +void +efc_node_pause(struct efc_node *node, + void (*state)(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg)); +void +__efc_node_paused(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +int +efc_node_active_ios_empty(struct efc_node *node); +void +efc_node_send_ls_io_cleanup(struct efc_node *node); + +int +efc_els_io_list_empty(struct efc_node *node, struct list_head *list); + +void +efc_process_node_pending(struct efc_node *domain); + +u64 efc_node_get_wwnn(struct efc_node *node); +struct efc_node * +efc_node_find(struct efc_nport *nport, u32 id); +void +efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg); +void +efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *s); +void +efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq); +void +efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq); + +#endif /* __EFC_NODE_H__ */ diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c new file mode 100644 index 000000000..2e83a6679 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_nport.c @@ -0,0 +1,777 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * NPORT + * + * Port object for physical port and NPIV ports. + */ + +/* + * NPORT REFERENCE COUNTING + * + * A nport reference should be taken when: + * - an nport is allocated + * - a vport populates associated nport + * - a remote node is allocated + * - a unsolicited frame is processed + * The reference should be dropped when: + * - the unsolicited frame processesing is done + * - the remote node is removed + * - the vport is removed + * - the nport is removed + */ + +#include "efc.h" + +void +efc_nport_cb(void *arg, int event, void *data) +{ + struct efc *efc = arg; + struct efc_nport *nport = data; + unsigned long flags = 0; + + efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event)); + + spin_lock_irqsave(&efc->lock, flags); + efc_sm_post_event(&nport->sm, event, NULL); + spin_unlock_irqrestore(&efc->lock, flags); +} + +static struct efc_nport * +efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn) +{ + struct efc_nport *nport = NULL; + + /* Find a nport, given the WWNN and WWPN */ + list_for_each_entry(nport, &domain->nport_list, list_entry) { + if (nport->wwnn == wwnn && nport->wwpn == wwpn) + return nport; + } + return NULL; +} + +static void +_efc_nport_free(struct kref *arg) +{ + struct efc_nport *nport = container_of(arg, struct efc_nport, ref); + + kfree(nport); +} + +struct efc_nport * +efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn, + u32 fc_id, bool enable_ini, bool enable_tgt) +{ + struct efc_nport *nport; + + if (domain->efc->enable_ini) + enable_ini = 0; + + /* Return a failure if this nport has already been allocated */ + if ((wwpn != 0) || (wwnn != 0)) { + nport = efc_nport_find_wwn(domain, wwnn, wwpn); + if (nport) { + efc_log_err(domain->efc, + "NPORT %016llX %016llX already allocated\n", + wwnn, wwpn); + return NULL; + } + } + + nport = kzalloc(sizeof(*nport), GFP_ATOMIC); + if (!nport) + return nport; + + /* initialize refcount */ + kref_init(&nport->ref); + nport->release = _efc_nport_free; + + nport->efc = domain->efc; + snprintf(nport->display_name, sizeof(nport->display_name), "------"); + nport->domain = domain; + xa_init(&nport->lookup); + nport->instance_index = domain->nport_count++; + nport->sm.app = nport; + nport->enable_ini = enable_ini; + nport->enable_tgt = enable_tgt; + nport->enable_rscn = (nport->enable_ini || + (nport->enable_tgt && enable_target_rscn(nport->efc))); + + /* Copy service parameters from domain */ + memcpy(nport->service_params, domain->service_params, + sizeof(struct fc_els_flogi)); + + /* Update requested fc_id */ + nport->fc_id = fc_id; + + /* Update the nport's service parameters for the new wwn's */ + nport->wwpn = wwpn; + nport->wwnn = wwnn; + snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX", + (unsigned long long)wwnn); + + /* + * if this is the "first" nport of the domain, + * then make it the "phys" nport + */ + if (list_empty(&domain->nport_list)) + domain->nport = nport; + + INIT_LIST_HEAD(&nport->list_entry); + list_add_tail(&nport->list_entry, &domain->nport_list); + + kref_get(&domain->ref); + + efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name); + + return nport; +} + +void +efc_nport_free(struct efc_nport *nport) +{ + struct efc_domain *domain; + + if (!nport) + return; + + domain = nport->domain; + efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name); + list_del(&nport->list_entry); + /* + * if this is the physical nport, + * then clear it out of the domain + */ + if (nport == domain->nport) + domain->nport = NULL; + + xa_destroy(&nport->lookup); + xa_erase(&domain->lookup, nport->fc_id); + + if (list_empty(&domain->nport_list)) + efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE, + NULL); + + kref_put(&domain->ref, domain->release); + kref_put(&nport->ref, nport->release); +} + +struct efc_nport * +efc_nport_find(struct efc_domain *domain, u32 d_id) +{ + struct efc_nport *nport; + + /* Find a nport object, given an FC_ID */ + nport = xa_load(&domain->lookup, d_id); + if (!nport || !kref_get_unless_zero(&nport->ref)) + return NULL; + + return nport; +} + +int +efc_nport_attach(struct efc_nport *nport, u32 fc_id) +{ + int rc; + struct efc_node *node; + struct efc *efc = nport->efc; + unsigned long index; + + /* Set our lookup */ + rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC)); + if (rc) { + efc_log_err(efc, "Sport lookup store failed: %d\n", rc); + return rc; + } + + /* Update our display_name */ + efc_node_fcid_display(fc_id, nport->display_name, + sizeof(nport->display_name)); + + xa_for_each(&nport->lookup, index, node) { + efc_node_update_display_name(node); + } + + efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n", + nport->display_name, fc_id); + + /* Register a nport, given an FC_ID */ + rc = efc_cmd_nport_attach(efc, nport, fc_id); + if (rc < 0) { + efc_log_err(nport->efc, + "efc_hw_port_attach failed: %d\n", rc); + return -EIO; + } + return 0; +} + +static void +efc_nport_shutdown(struct efc_nport *nport) +{ + struct efc *efc = nport->efc; + struct efc_node *node; + unsigned long index; + + xa_for_each(&nport->lookup, index, node) { + if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) { + efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); + continue; + } + + /* + * If this is a vport, logout of the fabric + * controller so that it deletes the vport + * on the switch. + */ + /* if link is down, don't send logo */ + if (efc->link_status == EFC_LINK_STATUS_DOWN) { + efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); + continue; + } + + efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n", + node->display_name); + + if (!efc_send_logo(node)) { + /* sent LOGO, wait for response */ + efc_node_transition(node, __efc_d_wait_logo_rsp, NULL); + continue; + } + + /* + * failed to send LOGO, + * go ahead and cleanup node anyways + */ + node_printf(node, "Failed to send LOGO\n"); + efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); + } +} + +static void +efc_vport_link_down(struct efc_nport *nport) +{ + struct efc *efc = nport->efc; + struct efc_vport *vport; + + /* Clear the nport reference in the vport specification */ + list_for_each_entry(vport, &efc->vport_list, list_entry) { + if (vport->nport == nport) { + kref_put(&nport->ref, nport->release); + vport->nport = NULL; + break; + } + } +} + +static void +__efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + struct efc_domain *domain = nport->domain; + struct efc *efc = nport->efc; + + switch (evt) { + case EFC_EVT_ENTER: + case EFC_EVT_REENTER: + case EFC_EVT_EXIT: + case EFC_EVT_ALL_CHILD_NODES_FREE: + break; + case EFC_EVT_NPORT_ATTACH_OK: + efc_sm_transition(ctx, __efc_nport_attached, NULL); + break; + case EFC_EVT_SHUTDOWN: + /* Flag this nport as shutting down */ + nport->shutting_down = true; + + if (nport->is_vport) + efc_vport_link_down(nport); + + if (xa_empty(&nport->lookup)) { + /* Remove the nport from the domain's lookup table */ + xa_erase(&domain->lookup, nport->fc_id); + efc_sm_transition(ctx, __efc_nport_wait_port_free, + NULL); + if (efc_cmd_nport_free(efc, nport)) { + efc_log_debug(nport->efc, + "efc_hw_port_free failed\n"); + /* Not much we can do, free the nport anyways */ + efc_nport_free(nport); + } + } else { + /* sm: node list is not empty / shutdown nodes */ + efc_sm_transition(ctx, + __efc_nport_wait_shutdown, NULL); + efc_nport_shutdown(nport); + } + break; + default: + efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n", + nport->display_name, funcname, + efc_sm_event_name(evt)); + } +} + +void +__efc_nport_allocated(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + struct efc_domain *domain = nport->domain; + + nport_sm_trace(nport); + + switch (evt) { + /* the physical nport is attached */ + case EFC_EVT_NPORT_ATTACH_OK: + WARN_ON(nport != domain->nport); + efc_sm_transition(ctx, __efc_nport_attached, NULL); + break; + + case EFC_EVT_NPORT_ALLOC_OK: + /* ignore */ + break; + default: + __efc_nport_common(__func__, ctx, evt, arg); + } +} + +void +__efc_nport_vport_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + struct efc *efc = nport->efc; + + nport_sm_trace(nport); + + switch (evt) { + case EFC_EVT_ENTER: { + __be64 be_wwpn = cpu_to_be64(nport->wwpn); + + if (nport->wwpn == 0) + efc_log_debug(efc, "vport: letting f/w select WWN\n"); + + if (nport->fc_id != U32_MAX) { + efc_log_debug(efc, "vport: hard coding port id: %x\n", + nport->fc_id); + } + + efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL); + /* If wwpn is zero, then we'll let the f/w assign wwpn*/ + if (efc_cmd_nport_alloc(efc, nport, nport->domain, + nport->wwpn == 0 ? NULL : + (uint8_t *)&be_wwpn)) { + efc_log_err(efc, "Can't allocate port\n"); + break; + } + + break; + } + default: + __efc_nport_common(__func__, ctx, evt, arg); + } +} + +void +__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + struct efc *efc = nport->efc; + + nport_sm_trace(nport); + + switch (evt) { + case EFC_EVT_NPORT_ALLOC_OK: { + struct fc_els_flogi *sp; + + sp = (struct fc_els_flogi *)nport->service_params; + + if (nport->wwnn == 0) { + nport->wwnn = be64_to_cpu(nport->sli_wwnn); + nport->wwpn = be64_to_cpu(nport->sli_wwpn); + snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), + "%016llX", nport->wwpn); + } + + /* Update the nport's service parameters */ + sp->fl_wwpn = cpu_to_be64(nport->wwpn); + sp->fl_wwnn = cpu_to_be64(nport->wwnn); + + /* + * if nport->fc_id is uninitialized, + * then request that the fabric node use FDISC + * to find an fc_id. + * Otherwise we're restoring vports, or we're in + * fabric emulation mode, so attach the fc_id + */ + if (nport->fc_id == U32_MAX) { + struct efc_node *fabric; + + fabric = efc_node_alloc(nport, FC_FID_FLOGI, false, + false); + if (!fabric) { + efc_log_err(efc, "efc_node_alloc() failed\n"); + return; + } + efc_node_transition(fabric, __efc_vport_fabric_init, + NULL); + } else { + snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), + "%016llX", nport->wwpn); + efc_nport_attach(nport, nport->fc_id); + } + efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL); + break; + } + default: + __efc_nport_common(__func__, ctx, evt, arg); + } +} + +void +__efc_nport_vport_allocated(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + struct efc *efc = nport->efc; + + nport_sm_trace(nport); + + /* + * This state is entered after the nport is allocated; + * it then waits for a fabric node + * FDISC to complete, which requests a nport attach. + * The nport attach complete is handled in this state. + */ + switch (evt) { + case EFC_EVT_NPORT_ATTACH_OK: { + struct efc_node *node; + + /* Find our fabric node, and forward this event */ + node = efc_node_find(nport, FC_FID_FLOGI); + if (!node) { + efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI); + break; + } + /* sm: / forward nport attach to fabric node */ + efc_node_post_event(node, evt, NULL); + efc_sm_transition(ctx, __efc_nport_attached, NULL); + break; + } + default: + __efc_nport_common(__func__, ctx, evt, arg); + } +} + +static void +efc_vport_update_spec(struct efc_nport *nport) +{ + struct efc *efc = nport->efc; + struct efc_vport *vport; + unsigned long flags = 0; + + spin_lock_irqsave(&efc->vport_lock, flags); + list_for_each_entry(vport, &efc->vport_list, list_entry) { + if (vport->nport == nport) { + vport->wwnn = nport->wwnn; + vport->wwpn = nport->wwpn; + vport->tgt_data = nport->tgt_data; + vport->ini_data = nport->ini_data; + break; + } + } + spin_unlock_irqrestore(&efc->vport_lock, flags); +} + +void +__efc_nport_attached(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + struct efc *efc = nport->efc; + + nport_sm_trace(nport); + + switch (evt) { + case EFC_EVT_ENTER: { + struct efc_node *node; + unsigned long index; + + efc_log_debug(efc, + "[%s] NPORT attached WWPN %016llX WWNN %016llX\n", + nport->display_name, + nport->wwpn, nport->wwnn); + + xa_for_each(&nport->lookup, index, node) + efc_node_update_display_name(node); + + efc->tt.new_nport(efc, nport); + + /* + * Update the vport (if its not the physical nport) + * parameters + */ + if (nport->is_vport) + efc_vport_update_spec(nport); + break; + } + + case EFC_EVT_EXIT: + efc_log_debug(efc, + "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n", + nport->display_name, + nport->wwpn, nport->wwnn); + + efc->tt.del_nport(efc, nport); + break; + default: + __efc_nport_common(__func__, ctx, evt, arg); + } +} + +void +__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + struct efc_domain *domain = nport->domain; + struct efc *efc = nport->efc; + + nport_sm_trace(nport); + + switch (evt) { + case EFC_EVT_NPORT_ALLOC_OK: + case EFC_EVT_NPORT_ALLOC_FAIL: + case EFC_EVT_NPORT_ATTACH_OK: + case EFC_EVT_NPORT_ATTACH_FAIL: + /* ignore these events - just wait for the all free event */ + break; + + case EFC_EVT_ALL_CHILD_NODES_FREE: { + /* + * Remove the nport from the domain's + * sparse vector lookup table + */ + xa_erase(&domain->lookup, nport->fc_id); + efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL); + if (efc_cmd_nport_free(efc, nport)) { + efc_log_err(nport->efc, "efc_hw_port_free failed\n"); + /* Not much we can do, free the nport anyways */ + efc_nport_free(nport); + } + break; + } + default: + __efc_nport_common(__func__, ctx, evt, arg); + } +} + +void +__efc_nport_wait_port_free(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg) +{ + struct efc_nport *nport = ctx->app; + + nport_sm_trace(nport); + + switch (evt) { + case EFC_EVT_NPORT_ATTACH_OK: + /* Ignore as we are waiting for the free CB */ + break; + case EFC_EVT_NPORT_FREE_OK: { + /* All done, free myself */ + efc_nport_free(nport); + break; + } + default: + __efc_nport_common(__func__, ctx, evt, arg); + } +} + +static int +efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport) +{ + struct efc_nport *nport; + + lockdep_assert_held(&domain->efc->lock); + + nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id, + vport->enable_ini, vport->enable_tgt); + vport->nport = nport; + if (!nport) + return -EIO; + + kref_get(&nport->ref); + nport->is_vport = true; + nport->tgt_data = vport->tgt_data; + nport->ini_data = vport->ini_data; + + efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL); + + return 0; +} + +int +efc_vport_start(struct efc_domain *domain) +{ + struct efc *efc = domain->efc; + struct efc_vport *vport; + struct efc_vport *next; + int rc = 0; + unsigned long flags = 0; + + /* Use the vport spec to find the associated vports and start them */ + spin_lock_irqsave(&efc->vport_lock, flags); + list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) { + if (!vport->nport) { + if (efc_vport_nport_alloc(domain, vport)) + rc = -EIO; + } + } + spin_unlock_irqrestore(&efc->vport_lock, flags); + + return rc; +} + +int +efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn, + u32 fc_id, bool ini, bool tgt, void *tgt_data, + void *ini_data) +{ + struct efc *efc = domain->efc; + struct efc_vport *vport; + int rc = 0; + unsigned long flags = 0; + + if (ini && domain->efc->enable_ini == 0) { + efc_log_debug(efc, "driver initiator mode not enabled\n"); + return -EIO; + } + + if (tgt && domain->efc->enable_tgt == 0) { + efc_log_debug(efc, "driver target mode not enabled\n"); + return -EIO; + } + + /* + * Create a vport spec if we need to recreate + * this vport after a link up event + */ + vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt, + tgt_data, ini_data); + if (!vport) { + efc_log_err(efc, "failed to create vport object entry\n"); + return -EIO; + } + + spin_lock_irqsave(&efc->lock, flags); + rc = efc_vport_nport_alloc(domain, vport); + spin_unlock_irqrestore(&efc->lock, flags); + + return rc; +} + +int +efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, + u64 wwpn, uint64_t wwnn) +{ + struct efc_nport *nport; + struct efc_vport *vport; + struct efc_vport *next; + unsigned long flags = 0; + + spin_lock_irqsave(&efc->vport_lock, flags); + /* walk the efc_vport_list and remove from there */ + list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) { + if (vport->wwpn == wwpn && vport->wwnn == wwnn) { + list_del(&vport->list_entry); + kfree(vport); + break; + } + } + spin_unlock_irqrestore(&efc->vport_lock, flags); + + if (!domain) { + /* No domain means no nport to look for */ + return 0; + } + + spin_lock_irqsave(&efc->lock, flags); + list_for_each_entry(nport, &domain->nport_list, list_entry) { + if (nport->wwpn == wwpn && nport->wwnn == wwnn) { + kref_put(&nport->ref, nport->release); + /* Shutdown this NPORT */ + efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); + break; + } + } + + spin_unlock_irqrestore(&efc->lock, flags); + return 0; +} + +void +efc_vport_del_all(struct efc *efc) +{ + struct efc_vport *vport; + struct efc_vport *next; + unsigned long flags = 0; + + spin_lock_irqsave(&efc->vport_lock, flags); + list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) { + list_del(&vport->list_entry); + kfree(vport); + } + spin_unlock_irqrestore(&efc->vport_lock, flags); +} + +struct efc_vport * +efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn, + u32 fc_id, bool enable_ini, + bool enable_tgt, void *tgt_data, void *ini_data) +{ + struct efc_vport *vport; + unsigned long flags = 0; + + /* + * walk the efc_vport_list and return failure + * if a valid(vport with non zero WWPN and WWNN) vport entry + * is already created + */ + spin_lock_irqsave(&efc->vport_lock, flags); + list_for_each_entry(vport, &efc->vport_list, list_entry) { + if ((wwpn && vport->wwpn == wwpn) && + (wwnn && vport->wwnn == wwnn)) { + efc_log_err(efc, + "VPORT %016llX %016llX already allocated\n", + wwnn, wwpn); + spin_unlock_irqrestore(&efc->vport_lock, flags); + return NULL; + } + } + + vport = kzalloc(sizeof(*vport), GFP_ATOMIC); + if (!vport) { + spin_unlock_irqrestore(&efc->vport_lock, flags); + return NULL; + } + + vport->wwnn = wwnn; + vport->wwpn = wwpn; + vport->fc_id = fc_id; + vport->enable_tgt = enable_tgt; + vport->enable_ini = enable_ini; + vport->tgt_data = tgt_data; + vport->ini_data = ini_data; + + INIT_LIST_HEAD(&vport->list_entry); + list_add_tail(&vport->list_entry, &efc->vport_list); + spin_unlock_irqrestore(&efc->vport_lock, flags); + return vport; +} diff --git a/drivers/scsi/elx/libefc/efc_nport.h b/drivers/scsi/elx/libefc/efc_nport.h new file mode 100644 index 000000000..b575ea205 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_nport.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/** + * EFC FC port (NPORT) exported declarations + * + */ + +#ifndef __EFC_NPORT_H__ +#define __EFC_NPORT_H__ + +struct efc_nport * +efc_nport_find(struct efc_domain *domain, u32 d_id); +struct efc_nport * +efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn, + u32 fc_id, bool enable_ini, bool enable_tgt); +void +efc_nport_free(struct efc_nport *nport); +int +efc_nport_attach(struct efc_nport *nport, u32 fc_id); + +void +__efc_nport_allocated(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_nport_wait_port_free(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_nport_vport_init(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_nport_vport_allocated(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); +void +__efc_nport_attached(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + +int +efc_vport_start(struct efc_domain *domain); + +#endif /* __EFC_NPORT_H__ */ diff --git a/drivers/scsi/elx/libefc/efc_sm.c b/drivers/scsi/elx/libefc/efc_sm.c new file mode 100644 index 000000000..afd963782 --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_sm.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * Generic state machine framework. + */ +#include "efc.h" +#include "efc_sm.h" + +/** + * efc_sm_post_event() - Post an event to a context. + * + * @ctx: State machine context + * @evt: Event to post + * @data: Event-specific data (if any) + */ +int +efc_sm_post_event(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *data) +{ + if (!ctx->current_state) + return -EIO; + + ctx->current_state(ctx, evt, data); + return 0; +} + +void +efc_sm_transition(struct efc_sm_ctx *ctx, + void (*state)(struct efc_sm_ctx *, + enum efc_sm_event, void *), void *data) + +{ + if (ctx->current_state == state) { + efc_sm_post_event(ctx, EFC_EVT_REENTER, data); + } else { + efc_sm_post_event(ctx, EFC_EVT_EXIT, data); + ctx->current_state = state; + efc_sm_post_event(ctx, EFC_EVT_ENTER, data); + } +} + +static char *event_name[] = EFC_SM_EVENT_NAME; + +const char *efc_sm_event_name(enum efc_sm_event evt) +{ + if (evt > EFC_EVT_LAST) + return "unknown"; + + return event_name[evt]; +} diff --git a/drivers/scsi/elx/libefc/efc_sm.h b/drivers/scsi/elx/libefc/efc_sm.h new file mode 100644 index 000000000..e26867b4d --- /dev/null +++ b/drivers/scsi/elx/libefc/efc_sm.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + */ + +/** + * Generic state machine framework declarations. + */ + +#ifndef _EFC_SM_H +#define _EFC_SM_H + +struct efc_sm_ctx; + +/* State Machine events */ +enum efc_sm_event { + /* Common Events */ + EFC_EVT_ENTER, + EFC_EVT_REENTER, + EFC_EVT_EXIT, + EFC_EVT_SHUTDOWN, + EFC_EVT_ALL_CHILD_NODES_FREE, + EFC_EVT_RESUME, + EFC_EVT_TIMER_EXPIRED, + + /* Domain Events */ + EFC_EVT_RESPONSE, + EFC_EVT_ERROR, + + EFC_EVT_DOMAIN_FOUND, + EFC_EVT_DOMAIN_ALLOC_OK, + EFC_EVT_DOMAIN_ALLOC_FAIL, + EFC_EVT_DOMAIN_REQ_ATTACH, + EFC_EVT_DOMAIN_ATTACH_OK, + EFC_EVT_DOMAIN_ATTACH_FAIL, + EFC_EVT_DOMAIN_LOST, + EFC_EVT_DOMAIN_FREE_OK, + EFC_EVT_DOMAIN_FREE_FAIL, + EFC_EVT_HW_DOMAIN_REQ_ATTACH, + EFC_EVT_HW_DOMAIN_REQ_FREE, + + /* Sport Events */ + EFC_EVT_NPORT_ALLOC_OK, + EFC_EVT_NPORT_ALLOC_FAIL, + EFC_EVT_NPORT_ATTACH_OK, + EFC_EVT_NPORT_ATTACH_FAIL, + EFC_EVT_NPORT_FREE_OK, + EFC_EVT_NPORT_FREE_FAIL, + EFC_EVT_NPORT_TOPOLOGY_NOTIFY, + EFC_EVT_HW_PORT_ALLOC_OK, + EFC_EVT_HW_PORT_ALLOC_FAIL, + EFC_EVT_HW_PORT_ATTACH_OK, + EFC_EVT_HW_PORT_REQ_ATTACH, + EFC_EVT_HW_PORT_REQ_FREE, + EFC_EVT_HW_PORT_FREE_OK, + + /* Login Events */ + EFC_EVT_SRRS_ELS_REQ_OK, + EFC_EVT_SRRS_ELS_CMPL_OK, + EFC_EVT_SRRS_ELS_REQ_FAIL, + EFC_EVT_SRRS_ELS_CMPL_FAIL, + EFC_EVT_SRRS_ELS_REQ_RJT, + EFC_EVT_NODE_ATTACH_OK, + EFC_EVT_NODE_ATTACH_FAIL, + EFC_EVT_NODE_FREE_OK, + EFC_EVT_NODE_FREE_FAIL, + EFC_EVT_ELS_FRAME, + EFC_EVT_ELS_REQ_TIMEOUT, + EFC_EVT_ELS_REQ_ABORTED, + /* request an ELS IO be aborted */ + EFC_EVT_ABORT_ELS, + /* ELS abort process complete */ + EFC_EVT_ELS_ABORT_CMPL, + + EFC_EVT_ABTS_RCVD, + + /* node is not in the GID_PT payload */ + EFC_EVT_NODE_MISSING, + /* node is allocated and in the GID_PT payload */ + EFC_EVT_NODE_REFOUND, + /* node shutting down due to PLOGI recvd (implicit logo) */ + EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, + /* node shutting down due to LOGO recvd/sent (explicit logo) */ + EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, + + EFC_EVT_PLOGI_RCVD, + EFC_EVT_FLOGI_RCVD, + EFC_EVT_LOGO_RCVD, + EFC_EVT_PRLI_RCVD, + EFC_EVT_PRLO_RCVD, + EFC_EVT_PDISC_RCVD, + EFC_EVT_FDISC_RCVD, + EFC_EVT_ADISC_RCVD, + EFC_EVT_RSCN_RCVD, + EFC_EVT_SCR_RCVD, + EFC_EVT_ELS_RCVD, + + EFC_EVT_FCP_CMD_RCVD, + + EFC_EVT_GIDPT_DELAY_EXPIRED, + + /* SCSI Target Server events */ + EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, + EFC_EVT_NODE_DEL_INI_COMPLETE, + EFC_EVT_NODE_DEL_TGT_COMPLETE, + EFC_EVT_NODE_SESS_REG_OK, + EFC_EVT_NODE_SESS_REG_FAIL, + + /* Must be last */ + EFC_EVT_LAST +}; + +/* State Machine event name lookup array */ +#define EFC_SM_EVENT_NAME { \ + [EFC_EVT_ENTER] = "EFC_EVT_ENTER", \ + [EFC_EVT_REENTER] = "EFC_EVT_REENTER", \ + [EFC_EVT_EXIT] = "EFC_EVT_EXIT", \ + [EFC_EVT_SHUTDOWN] = "EFC_EVT_SHUTDOWN", \ + [EFC_EVT_ALL_CHILD_NODES_FREE] = "EFC_EVT_ALL_CHILD_NODES_FREE",\ + [EFC_EVT_RESUME] = "EFC_EVT_RESUME", \ + [EFC_EVT_TIMER_EXPIRED] = "EFC_EVT_TIMER_EXPIRED", \ + [EFC_EVT_RESPONSE] = "EFC_EVT_RESPONSE", \ + [EFC_EVT_ERROR] = "EFC_EVT_ERROR", \ + [EFC_EVT_DOMAIN_FOUND] = "EFC_EVT_DOMAIN_FOUND", \ + [EFC_EVT_DOMAIN_ALLOC_OK] = "EFC_EVT_DOMAIN_ALLOC_OK", \ + [EFC_EVT_DOMAIN_ALLOC_FAIL] = "EFC_EVT_DOMAIN_ALLOC_FAIL", \ + [EFC_EVT_DOMAIN_REQ_ATTACH] = "EFC_EVT_DOMAIN_REQ_ATTACH", \ + [EFC_EVT_DOMAIN_ATTACH_OK] = "EFC_EVT_DOMAIN_ATTACH_OK", \ + [EFC_EVT_DOMAIN_ATTACH_FAIL] = "EFC_EVT_DOMAIN_ATTACH_FAIL", \ + [EFC_EVT_DOMAIN_LOST] = "EFC_EVT_DOMAIN_LOST", \ + [EFC_EVT_DOMAIN_FREE_OK] = "EFC_EVT_DOMAIN_FREE_OK", \ + [EFC_EVT_DOMAIN_FREE_FAIL] = "EFC_EVT_DOMAIN_FREE_FAIL", \ + [EFC_EVT_HW_DOMAIN_REQ_ATTACH] = "EFC_EVT_HW_DOMAIN_REQ_ATTACH",\ + [EFC_EVT_HW_DOMAIN_REQ_FREE] = "EFC_EVT_HW_DOMAIN_REQ_FREE", \ + [EFC_EVT_NPORT_ALLOC_OK] = "EFC_EVT_NPORT_ALLOC_OK", \ + [EFC_EVT_NPORT_ALLOC_FAIL] = "EFC_EVT_NPORT_ALLOC_FAIL", \ + [EFC_EVT_NPORT_ATTACH_OK] = "EFC_EVT_NPORT_ATTACH_OK", \ + [EFC_EVT_NPORT_ATTACH_FAIL] = "EFC_EVT_NPORT_ATTACH_FAIL", \ + [EFC_EVT_NPORT_FREE_OK] = "EFC_EVT_NPORT_FREE_OK", \ + [EFC_EVT_NPORT_FREE_FAIL] = "EFC_EVT_NPORT_FREE_FAIL", \ + [EFC_EVT_NPORT_TOPOLOGY_NOTIFY] = "EFC_EVT_NPORT_TOPOLOGY_NOTIFY",\ + [EFC_EVT_HW_PORT_ALLOC_OK] = "EFC_EVT_HW_PORT_ALLOC_OK", \ + [EFC_EVT_HW_PORT_ALLOC_FAIL] = "EFC_EVT_HW_PORT_ALLOC_FAIL", \ + [EFC_EVT_HW_PORT_ATTACH_OK] = "EFC_EVT_HW_PORT_ATTACH_OK", \ + [EFC_EVT_HW_PORT_REQ_ATTACH] = "EFC_EVT_HW_PORT_REQ_ATTACH", \ + [EFC_EVT_HW_PORT_REQ_FREE] = "EFC_EVT_HW_PORT_REQ_FREE", \ + [EFC_EVT_HW_PORT_FREE_OK] = "EFC_EVT_HW_PORT_FREE_OK", \ + [EFC_EVT_SRRS_ELS_REQ_OK] = "EFC_EVT_SRRS_ELS_REQ_OK", \ + [EFC_EVT_SRRS_ELS_CMPL_OK] = "EFC_EVT_SRRS_ELS_CMPL_OK", \ + [EFC_EVT_SRRS_ELS_REQ_FAIL] = "EFC_EVT_SRRS_ELS_REQ_FAIL", \ + [EFC_EVT_SRRS_ELS_CMPL_FAIL] = "EFC_EVT_SRRS_ELS_CMPL_FAIL", \ + [EFC_EVT_SRRS_ELS_REQ_RJT] = "EFC_EVT_SRRS_ELS_REQ_RJT", \ + [EFC_EVT_NODE_ATTACH_OK] = "EFC_EVT_NODE_ATTACH_OK", \ + [EFC_EVT_NODE_ATTACH_FAIL] = "EFC_EVT_NODE_ATTACH_FAIL", \ + [EFC_EVT_NODE_FREE_OK] = "EFC_EVT_NODE_FREE_OK", \ + [EFC_EVT_NODE_FREE_FAIL] = "EFC_EVT_NODE_FREE_FAIL", \ + [EFC_EVT_ELS_FRAME] = "EFC_EVT_ELS_FRAME", \ + [EFC_EVT_ELS_REQ_TIMEOUT] = "EFC_EVT_ELS_REQ_TIMEOUT", \ + [EFC_EVT_ELS_REQ_ABORTED] = "EFC_EVT_ELS_REQ_ABORTED", \ + [EFC_EVT_ABORT_ELS] = "EFC_EVT_ABORT_ELS", \ + [EFC_EVT_ELS_ABORT_CMPL] = "EFC_EVT_ELS_ABORT_CMPL", \ + [EFC_EVT_ABTS_RCVD] = "EFC_EVT_ABTS_RCVD", \ + [EFC_EVT_NODE_MISSING] = "EFC_EVT_NODE_MISSING", \ + [EFC_EVT_NODE_REFOUND] = "EFC_EVT_NODE_REFOUND", \ + [EFC_EVT_SHUTDOWN_IMPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_IMPLICIT_LOGO",\ + [EFC_EVT_SHUTDOWN_EXPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_EXPLICIT_LOGO",\ + [EFC_EVT_PLOGI_RCVD] = "EFC_EVT_PLOGI_RCVD", \ + [EFC_EVT_FLOGI_RCVD] = "EFC_EVT_FLOGI_RCVD", \ + [EFC_EVT_LOGO_RCVD] = "EFC_EVT_LOGO_RCVD", \ + [EFC_EVT_PRLI_RCVD] = "EFC_EVT_PRLI_RCVD", \ + [EFC_EVT_PRLO_RCVD] = "EFC_EVT_PRLO_RCVD", \ + [EFC_EVT_PDISC_RCVD] = "EFC_EVT_PDISC_RCVD", \ + [EFC_EVT_FDISC_RCVD] = "EFC_EVT_FDISC_RCVD", \ + [EFC_EVT_ADISC_RCVD] = "EFC_EVT_ADISC_RCVD", \ + [EFC_EVT_RSCN_RCVD] = "EFC_EVT_RSCN_RCVD", \ + [EFC_EVT_SCR_RCVD] = "EFC_EVT_SCR_RCVD", \ + [EFC_EVT_ELS_RCVD] = "EFC_EVT_ELS_RCVD", \ + [EFC_EVT_FCP_CMD_RCVD] = "EFC_EVT_FCP_CMD_RCVD", \ + [EFC_EVT_NODE_DEL_INI_COMPLETE] = "EFC_EVT_NODE_DEL_INI_COMPLETE",\ + [EFC_EVT_NODE_DEL_TGT_COMPLETE] = "EFC_EVT_NODE_DEL_TGT_COMPLETE",\ + [EFC_EVT_LAST] = "EFC_EVT_LAST", \ +} + +int +efc_sm_post_event(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *data); +void +efc_sm_transition(struct efc_sm_ctx *ctx, + void (*state)(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg), + void *data); +void efc_sm_disable(struct efc_sm_ctx *ctx); +const char *efc_sm_event_name(enum efc_sm_event evt); + +#endif /* ! _EFC_SM_H */ diff --git a/drivers/scsi/elx/libefc/efclib.c b/drivers/scsi/elx/libefc/efclib.c new file mode 100644 index 000000000..dd3e3d0a4 --- /dev/null +++ b/drivers/scsi/elx/libefc/efclib.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * LIBEFC LOCKING + * + * The critical sections protected by the efc's spinlock are quite broad and + * may be improved upon in the future. The libefc code and its locking doesn't + * influence the I/O path, so excessive locking doesn't impact I/O performance. + * + * The strategy is to lock whenever processing a request from user driver. This + * means that the entry points into the libefc library are protected by efc + * lock. So all the state machine transitions are protected. + */ + +#include +#include +#include "efc.h" + +int efcport_init(struct efc *efc) +{ + u32 rc = 0; + + spin_lock_init(&efc->lock); + INIT_LIST_HEAD(&efc->vport_list); + efc->hold_frames = false; + spin_lock_init(&efc->pend_frames_lock); + INIT_LIST_HEAD(&efc->pend_frames); + + /* Create Node pool */ + efc->node_pool = mempool_create_kmalloc_pool(EFC_MAX_REMOTE_NODES, + sizeof(struct efc_node)); + if (!efc->node_pool) { + efc_log_err(efc, "Can't allocate node pool\n"); + return -ENOMEM; + } + + efc->node_dma_pool = dma_pool_create("node_dma_pool", &efc->pci->dev, + NODE_SPARAMS_SIZE, 0, 0); + if (!efc->node_dma_pool) { + efc_log_err(efc, "Can't allocate node dma pool\n"); + mempool_destroy(efc->node_pool); + return -ENOMEM; + } + + efc->els_io_pool = mempool_create_kmalloc_pool(EFC_ELS_IO_POOL_SZ, + sizeof(struct efc_els_io_req)); + if (!efc->els_io_pool) { + efc_log_err(efc, "Can't allocate els io pool\n"); + return -ENOMEM; + } + + return rc; +} + +static void +efc_purge_pending(struct efc *efc) +{ + struct efc_hw_sequence *frame, *next; + unsigned long flags = 0; + + spin_lock_irqsave(&efc->pend_frames_lock, flags); + + list_for_each_entry_safe(frame, next, &efc->pend_frames, list_entry) { + list_del(&frame->list_entry); + efc->tt.hw_seq_free(efc, frame); + } + + spin_unlock_irqrestore(&efc->pend_frames_lock, flags); +} + +void efcport_destroy(struct efc *efc) +{ + efc_purge_pending(efc); + mempool_destroy(efc->els_io_pool); + mempool_destroy(efc->node_pool); + dma_pool_destroy(efc->node_dma_pool); +} diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h new file mode 100644 index 000000000..57e338612 --- /dev/null +++ b/drivers/scsi/elx/libefc/efclib.h @@ -0,0 +1,623 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +#ifndef __EFCLIB_H__ +#define __EFCLIB_H__ + +#include "scsi/fc/fc_els.h" +#include "scsi/fc/fc_fs.h" +#include "scsi/fc/fc_ns.h" +#include "scsi/fc/fc_gs.h" +#include "scsi/fc_frame.h" +#include "../include/efc_common.h" +#include "../libefc_sli/sli4.h" + +#define EFC_SERVICE_PARMS_LENGTH 120 +#define EFC_NAME_LENGTH 32 +#define EFC_SM_NAME_LENGTH 64 +#define EFC_DISPLAY_BUS_INFO_LENGTH 16 + +#define EFC_WWN_LENGTH 32 + +#define EFC_FC_ELS_DEFAULT_RETRIES 3 + +/* Timeouts */ +#define EFC_FC_ELS_SEND_DEFAULT_TIMEOUT 0 +#define EFC_FC_FLOGI_TIMEOUT_SEC 5 +#define EFC_SHUTDOWN_TIMEOUT_USEC 30000000 + +/* Return values for calls from base driver to libefc */ +#define EFC_SCSI_CALL_COMPLETE 0 +#define EFC_SCSI_CALL_ASYNC 1 + +/* Local port topology */ +enum efc_nport_topology { + EFC_NPORT_TOPO_UNKNOWN = 0, + EFC_NPORT_TOPO_FABRIC, + EFC_NPORT_TOPO_P2P, + EFC_NPORT_TOPO_FC_AL, +}; + +#define enable_target_rscn(efc) 1 + +enum efc_node_shutd_rsn { + EFC_NODE_SHUTDOWN_DEFAULT = 0, + EFC_NODE_SHUTDOWN_EXPLICIT_LOGO, + EFC_NODE_SHUTDOWN_IMPLICIT_LOGO, +}; + +enum efc_node_send_ls_acc { + EFC_NODE_SEND_LS_ACC_NONE = 0, + EFC_NODE_SEND_LS_ACC_PLOGI, + EFC_NODE_SEND_LS_ACC_PRLI, +}; + +#define EFC_LINK_STATUS_UP 0 +#define EFC_LINK_STATUS_DOWN 1 + +enum efc_sm_event; + +/* State machine context header */ +struct efc_sm_ctx { + void (*current_state)(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + + const char *description; + void *app; +}; + +/* Description of discovered Fabric Domain */ +struct efc_domain_record { + u32 index; + u32 priority; + u8 address[6]; + u8 wwn[8]; + union { + u8 vlan[512]; + u8 loop[128]; + } map; + u32 speed; + u32 fc_id; + bool is_loop; + bool is_nport; +}; + +/* Domain events */ +enum efc_hw_domain_event { + EFC_HW_DOMAIN_ALLOC_OK, + EFC_HW_DOMAIN_ALLOC_FAIL, + EFC_HW_DOMAIN_ATTACH_OK, + EFC_HW_DOMAIN_ATTACH_FAIL, + EFC_HW_DOMAIN_FREE_OK, + EFC_HW_DOMAIN_FREE_FAIL, + EFC_HW_DOMAIN_LOST, + EFC_HW_DOMAIN_FOUND, + EFC_HW_DOMAIN_CHANGED, +}; + +/** + * Fibre Channel port object + * + * @list_entry: nport list entry + * @ref: reference count, each node takes a reference + * @release: function to free nport object + * @efc: pointer back to efc + * @instance_index: unique instance index value + * @display_name: port display name + * @is_vport: Is NPIV port + * @free_req_pending: pending request to free resources + * @attached: mark attached if reg VPI succeeds + * @p2p_winner: TRUE if we're the point-to-point winner + * @domain: pointer back to domain + * @wwpn: port wwpn + * @wwnn: port wwnn + * @tgt_data: target backend private port data + * @ini_data: initiator backend private port data + * @indicator: VPI + * @fc_id: port FC address + * @dma: memory for Service Parameters + * @wwnn_str: wwpn string + * @sli_wwpn: SLI provided wwpn + * @sli_wwnn: SLI provided wwnn + * @sm: nport state machine context + * @lookup: fc_id to node lookup object + * @enable_ini: SCSI initiator enabled for this port + * @enable_tgt: SCSI target enabled for this port + * @enable_rscn: port will be expecting RSCN + * @shutting_down: nport in process of shutting down + * @p2p_port_id: our port id for point-to-point + * @topology: topology: fabric/p2p/unknown + * @service_params: login parameters + * @p2p_remote_port_id: remote node's port id for point-to-point + */ + +struct efc_nport { + struct list_head list_entry; + struct kref ref; + void (*release)(struct kref *arg); + struct efc *efc; + u32 instance_index; + char display_name[EFC_NAME_LENGTH]; + bool is_vport; + bool free_req_pending; + bool attached; + bool attaching; + bool p2p_winner; + struct efc_domain *domain; + u64 wwpn; + u64 wwnn; + void *tgt_data; + void *ini_data; + + u32 indicator; + u32 fc_id; + struct efc_dma dma; + + u8 wwnn_str[EFC_WWN_LENGTH]; + __be64 sli_wwpn; + __be64 sli_wwnn; + + struct efc_sm_ctx sm; + struct xarray lookup; + bool enable_ini; + bool enable_tgt; + bool enable_rscn; + bool shutting_down; + u32 p2p_port_id; + enum efc_nport_topology topology; + u8 service_params[EFC_SERVICE_PARMS_LENGTH]; + u32 p2p_remote_port_id; +}; + +/** + * Fibre Channel domain object + * + * This object is a container for the various SLI components needed + * to connect to the domain of a FC or FCoE switch + * @efc: pointer back to efc + * @instance_index: unique instance index value + * @display_name: Node display name + * @nport_list: linked list of nports associated with this domain + * @ref: Reference count, each nport takes a reference + * @release: Function to free domain object + * @ini_domain: initiator backend private domain data + * @tgt_domain: target backend private domain data + * @sm: state machine context + * @fcf: FC Forwarder table index + * @fcf_indicator: FCFI + * @indicator: VFI + * @nport_count: Number of nports allocated + * @dma: memory for Service Parameters + * @fcf_wwn: WWN for FCF/switch + * @drvsm: driver domain sm context + * @attached: set true after attach completes + * @is_fc: is FC + * @is_loop: is loop topology + * @is_nlport: is public loop + * @domain_found_pending:A domain found is pending, drec is updated + * @req_domain_free: True if domain object should be free'd + * @req_accept_frames: set in domain state machine to enable frames + * @domain_notify_pend: Set in domain SM to avoid duplicate node event post + * @pending_drec: Pending drec if a domain found is pending + * @service_params: any nports service parameters + * @flogi_service_params:Fabric/P2p service parameters from FLOGI + * @lookup: d_id to node lookup object + * @nport: Pointer to first (physical) SLI port + */ +struct efc_domain { + struct efc *efc; + char display_name[EFC_NAME_LENGTH]; + struct list_head nport_list; + struct kref ref; + void (*release)(struct kref *arg); + void *ini_domain; + void *tgt_domain; + + /* Declarations private to HW/SLI */ + u32 fcf; + u32 fcf_indicator; + u32 indicator; + u32 nport_count; + struct efc_dma dma; + + /* Declarations private to FC trannport */ + u64 fcf_wwn; + struct efc_sm_ctx drvsm; + bool attached; + bool is_fc; + bool is_loop; + bool is_nlport; + bool domain_found_pending; + bool req_domain_free; + bool req_accept_frames; + bool domain_notify_pend; + + struct efc_domain_record pending_drec; + u8 service_params[EFC_SERVICE_PARMS_LENGTH]; + u8 flogi_service_params[EFC_SERVICE_PARMS_LENGTH]; + + struct xarray lookup; + + struct efc_nport *nport; +}; + +/** + * Remote Node object + * + * This object represents a connection between the SLI port and another + * Nx_Port on the fabric. Note this can be either a well known port such + * as a F_Port (i.e. ff:ff:fe) or another N_Port. + * @indicator: RPI + * @fc_id: FC address + * @attached: true if attached + * @nport: associated SLI port + * @node: associated node + */ +struct efc_remote_node { + u32 indicator; + u32 index; + u32 fc_id; + + bool attached; + + struct efc_nport *nport; + void *node; +}; + +/** + * FC Node object + * @efc: pointer back to efc structure + * @display_name: Node display name + * @nort: Assosiated nport pointer. + * @hold_frames: hold incoming frames if true + * @els_io_enabled: Enable allocating els ios for this node + * @els_ios_lock: lock to protect the els ios list + * @els_ios_list: ELS I/O's for this node + * @ini_node: backend initiator private node data + * @tgt_node: backend target private node data + * @rnode: Remote node + * @sm: state machine context + * @evtdepth: current event posting nesting depth + * @req_free: this node is to be free'd + * @attached: node is attached (REGLOGIN complete) + * @fcp_enabled: node is enabled to handle FCP + * @rscn_pending: for name server node RSCN is pending + * @send_plogi: send PLOGI accept, upon completion of node attach + * @send_plogi_acc: TRUE if io_alloc() is enabled. + * @send_ls_acc: type of LS acc to send + * @ls_acc_io: SCSI IO for LS acc + * @ls_acc_oxid: OX_ID for pending accept + * @ls_acc_did: D_ID for pending accept + * @shutdown_reason: reason for node shutdown + * @sparm_dma_buf: service parameters buffer + * @service_params: plogi/acc frame from remote device + * @pend_frames_lock: lock for inbound pending frames list + * @pend_frames: inbound pending frames list + * @pend_frames_processed:count of frames processed in hold frames interval + * @ox_id_in_use: used to verify one at a time us of ox_id + * @els_retries_remaining:for ELS, number of retries remaining + * @els_req_cnt: number of outstanding ELS requests + * @els_cmpl_cnt: number of outstanding ELS completions + * @abort_cnt: Abort counter for debugging purpos + * @current_state_name: current node state + * @prev_state_name: previous node state + * @current_evt: current event + * @prev_evt: previous event + * @targ: node is target capable + * @init: node is init capable + * @refound: Handle node refound case when node is being deleted + * @els_io_pend_list: list of pending (not yet processed) ELS IOs + * @els_io_active_list: list of active (processed) ELS IOs + * @nodedb_state: Node debugging, saved state + * @gidpt_delay_timer: GIDPT delay timer + * @time_last_gidpt_msec:Start time of last target RSCN GIDPT + * @wwnn: remote port WWNN + * @wwpn: remote port WWPN + */ +struct efc_node { + struct efc *efc; + char display_name[EFC_NAME_LENGTH]; + struct efc_nport *nport; + struct kref ref; + void (*release)(struct kref *arg); + bool hold_frames; + bool els_io_enabled; + bool send_plogi_acc; + bool send_plogi; + bool rscn_pending; + bool fcp_enabled; + bool attached; + bool req_free; + + spinlock_t els_ios_lock; + struct list_head els_ios_list; + void *ini_node; + void *tgt_node; + + struct efc_remote_node rnode; + /* Declarations private to FC trannport */ + struct efc_sm_ctx sm; + u32 evtdepth; + + enum efc_node_send_ls_acc send_ls_acc; + void *ls_acc_io; + u32 ls_acc_oxid; + u32 ls_acc_did; + enum efc_node_shutd_rsn shutdown_reason; + bool targ; + bool init; + bool refound; + struct efc_dma sparm_dma_buf; + u8 service_params[EFC_SERVICE_PARMS_LENGTH]; + spinlock_t pend_frames_lock; + struct list_head pend_frames; + u32 pend_frames_processed; + u32 ox_id_in_use; + u32 els_retries_remaining; + u32 els_req_cnt; + u32 els_cmpl_cnt; + u32 abort_cnt; + + char current_state_name[EFC_SM_NAME_LENGTH]; + char prev_state_name[EFC_SM_NAME_LENGTH]; + int current_evt; + int prev_evt; + + void (*nodedb_state)(struct efc_sm_ctx *ctx, + enum efc_sm_event evt, void *arg); + struct timer_list gidpt_delay_timer; + u64 time_last_gidpt_msec; + + char wwnn[EFC_WWN_LENGTH]; + char wwpn[EFC_WWN_LENGTH]; +}; + +/** + * NPIV port + * + * Collection of the information required to restore a virtual port across + * link events + * @wwnn: node name + * @wwpn: port name + * @fc_id: port id + * @tgt_data: target backend pointer + * @ini_data: initiator backend pointe + * @nport: Used to match record after attaching for update + * + */ + +struct efc_vport { + struct list_head list_entry; + u64 wwnn; + u64 wwpn; + u32 fc_id; + bool enable_tgt; + bool enable_ini; + void *tgt_data; + void *ini_data; + struct efc_nport *nport; +}; + +#define node_printf(node, fmt, args...) \ + efc_log_info(node->efc, "[%s] " fmt, node->display_name, ##args) + +/* Node SM IO Context Callback structure */ +struct efc_node_cb { + int status; + int ext_status; + struct efc_hw_rq_buffer *header; + struct efc_hw_rq_buffer *payload; + struct efc_dma els_rsp; + + /* Actual length of data received */ + int rsp_len; +}; + +struct efc_hw_rq_buffer { + u16 rqindex; + struct efc_dma dma; +}; + +/** + * FC sequence object + * + * Defines a general FC sequence object + * @hw: HW that owns this sequence + * @fcfi: FCFI associated with sequence + * @header: Received frame header + * @payload: Received frame header + * @hw_priv: HW private context + */ +struct efc_hw_sequence { + struct list_head list_entry; + void *hw; + u8 fcfi; + struct efc_hw_rq_buffer *header; + struct efc_hw_rq_buffer *payload; + void *hw_priv; +}; + +enum efc_disc_io_type { + EFC_DISC_IO_ELS_REQ, + EFC_DISC_IO_ELS_RESP, + EFC_DISC_IO_CT_REQ, + EFC_DISC_IO_CT_RESP +}; + +struct efc_io_els_params { + u32 s_id; + u16 ox_id; + u8 timeout; +}; + +struct efc_io_ct_params { + u8 r_ctl; + u8 type; + u8 df_ctl; + u8 timeout; + u16 ox_id; +}; + +union efc_disc_io_param { + struct efc_io_els_params els; + struct efc_io_ct_params ct; +}; + +struct efc_disc_io { + struct efc_dma req; /* send buffer */ + struct efc_dma rsp; /* receive buffer */ + enum efc_disc_io_type io_type; /* EFC_DISC_IO_TYPE enum*/ + u16 xmit_len; /* Length of els request*/ + u16 rsp_len; /* Max length of rsps to be rcvd */ + u32 rpi; /* Registered RPI */ + u32 vpi; /* VPI for this nport */ + u32 s_id; + u32 d_id; + bool rpi_registered; /* if false, use tmp RPI */ + union efc_disc_io_param iparam; +}; + +/* Return value indiacating the sequence can not be freed */ +#define EFC_HW_SEQ_HOLD 0 +/* Return value indiacating the sequence can be freed */ +#define EFC_HW_SEQ_FREE 1 + +struct libefc_function_template { + /*Sport*/ + int (*new_nport)(struct efc *efc, struct efc_nport *sp); + void (*del_nport)(struct efc *efc, struct efc_nport *sp); + + /*Scsi Node*/ + int (*scsi_new_node)(struct efc *efc, struct efc_node *n); + int (*scsi_del_node)(struct efc *efc, struct efc_node *n, int reason); + + int (*issue_mbox_rqst)(void *efct, void *buf, void *cb, void *arg); + /*Send ELS IO*/ + int (*send_els)(struct efc *efc, struct efc_disc_io *io); + /*Send BLS IO*/ + int (*send_bls)(struct efc *efc, u32 type, struct sli_bls_params *bls); + /*Free HW frame*/ + int (*hw_seq_free)(struct efc *efc, struct efc_hw_sequence *seq); +}; + +#define EFC_LOG_LIB 0x01 +#define EFC_LOG_NODE 0x02 +#define EFC_LOG_PORT 0x04 +#define EFC_LOG_DOMAIN 0x08 +#define EFC_LOG_ELS 0x10 +#define EFC_LOG_DOMAIN_SM 0x20 +#define EFC_LOG_SM 0x40 + +/* efc library port structure */ +struct efc { + void *base; + struct pci_dev *pci; + struct sli4 *sli; + u32 fcfi; + u64 req_wwpn; + u64 req_wwnn; + + u64 def_wwpn; + u64 def_wwnn; + u64 max_xfer_size; + mempool_t *node_pool; + struct dma_pool *node_dma_pool; + u32 nodes_count; + + u32 link_status; + + struct list_head vport_list; + /* lock to protect the vport list */ + spinlock_t vport_lock; + + struct libefc_function_template tt; + /* lock to protect the discovery library. + * Refer to efclib.c for more details. + */ + spinlock_t lock; + + bool enable_ini; + bool enable_tgt; + + u32 log_level; + + struct efc_domain *domain; + void (*domain_free_cb)(struct efc *efc, void *arg); + void *domain_free_cb_arg; + + u64 tgt_rscn_delay_msec; + u64 tgt_rscn_period_msec; + + bool external_loopback; + u32 nodedb_mask; + u32 logmask; + mempool_t *els_io_pool; + atomic_t els_io_alloc_failed_count; + + /* hold pending frames */ + bool hold_frames; + /* lock to protect pending frames list access */ + spinlock_t pend_frames_lock; + struct list_head pend_frames; + /* count of pending frames that were processed */ + u32 pend_frames_processed; + +}; + +/* + * EFC library registration + * **********************************/ +int efcport_init(struct efc *efc); +void efcport_destroy(struct efc *efc); +/* + * EFC Domain + * **********************************/ +int efc_domain_cb(void *arg, int event, void *data); +void +efc_register_domain_free_cb(struct efc *efc, + void (*callback)(struct efc *efc, void *arg), + void *arg); + +/* + * EFC nport + * **********************************/ +void efc_nport_cb(void *arg, int event, void *data); +struct efc_vport * +efc_vport_create_spec(struct efc *efc, u64 wwnn, u64 wwpn, u32 fc_id, + bool enable_ini, bool enable_tgt, + void *tgt_data, void *ini_data); +int efc_nport_vport_new(struct efc_domain *domain, u64 wwpn, + u64 wwnn, u32 fc_id, bool ini, bool tgt, + void *tgt_data, void *ini_data); +int efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, + u64 wwpn, u64 wwnn); + +void efc_vport_del_all(struct efc *efc); + +/* + * EFC Node + * **********************************/ +int efc_remote_node_cb(void *arg, int event, void *data); +void efc_node_fcid_display(u32 fc_id, char *buffer, u32 buf_len); +void efc_node_post_shutdown(struct efc_node *node, void *arg); +u64 efc_node_get_wwpn(struct efc_node *node); + +/* + * EFC FCP/ELS/CT interface + * **********************************/ +void efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq); +void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status, + u32 ext_status); + +/* + * EFC SCSI INTERACTION LAYER + * **********************************/ +void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status); +void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node); +void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node); +void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node); + +#endif /* __EFCLIB_H__ */ diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c new file mode 100644 index 000000000..5e7fb110b --- /dev/null +++ b/drivers/scsi/elx/libefc_sli/sli4.c @@ -0,0 +1,5155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + */ + +/* + * All common (i.e. transport-independent) SLI-4 functions are implemented + * in this file. + */ +#include "sli4.h" + +static struct sli4_asic_entry_t sli4_asic_table[] = { + { SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5}, + { SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5}, + { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6}, + { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6}, + { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6}, + { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6}, + { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7}, + { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7}, +}; + +/* Convert queue type enum (SLI_QTYPE_*) into a string */ +static char *SLI4_QNAME[] = { + "Event Queue", + "Completion Queue", + "Mailbox Queue", + "Work Queue", + "Receive Queue", + "Undefined" +}; + +/** + * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer. + * + * @sli4: SLI context pointer. + * @buf: Destination buffer for the command. + * @length: Length in bytes of attached command. + * @dma: DMA buffer for non-embedded commands. + * Return: Command payload buffer. + */ +static void * +sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length, + struct efc_dma *dma) +{ + struct sli4_cmd_sli_config *config; + u32 flags; + + if (length > sizeof(config->payload.embed) && !dma) { + efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n", + length); + return NULL; + } + + memset(buf, 0, SLI4_BMBX_SIZE); + + config = buf; + + config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG; + if (!dma) { + flags = SLI4_SLICONF_EMB; + config->dw1_flags = cpu_to_le32(flags); + config->payload_len = cpu_to_le32(length); + return config->payload.embed; + } + + flags = SLI4_SLICONF_PMDCMD_VAL_1; + flags &= ~SLI4_SLICONF_EMB; + config->dw1_flags = cpu_to_le32(flags); + + config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys)); + config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys)); + config->payload.mem.length = + cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN); + config->payload_len = cpu_to_le32(dma->size); + /* save pointer to DMA for BMBX dumping purposes */ + sli4->bmbx_non_emb_pmd = dma; + return dma->virt; +} + +/** + * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command. + * + * @sli4: SLI context pointer. + * @buf: Destination buffer for the command. + * @qmem: DMA memory for queue. + * @eq_id: EQ id assosiated with this cq. + * Return: status -EIO/0. + */ +static int +sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem, + u16 eq_id) +{ + struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL; + u32 p; + uintptr_t addr; + u32 num_pages = 0; + size_t cmd_size = 0; + u32 page_size = 0; + u32 n_cqe = 0; + u32 dw5_flags = 0; + u16 dw6w1_arm = 0; + __le32 len; + + /* First calculate number of pages and the mailbox cmd length */ + n_cqe = qmem->size / SLI4_CQE_BYTES; + switch (n_cqe) { + case 256: + case 512: + case 1024: + case 2048: + page_size = SZ_4K; + break; + case 4096: + page_size = SZ_8K; + break; + default: + return -EIO; + } + num_pages = sli_page_count(qmem->size, page_size); + + cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2) + + SZ_DMAADDR * num_pages; + + cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL); + if (!cqv2) + return -EIO; + + len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages); + sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON, + CMD_V2, len); + cqv2->page_size = page_size / SLI_PAGE_SIZE; + + /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */ + cqv2->num_pages = cpu_to_le16(num_pages); + if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES) + return -EIO; + + switch (num_pages) { + case 1: + dw5_flags |= SLI4_CQ_CNT_VAL(256); + break; + case 2: + dw5_flags |= SLI4_CQ_CNT_VAL(512); + break; + case 4: + dw5_flags |= SLI4_CQ_CNT_VAL(1024); + break; + case 8: + dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); + cqv2->cqe_count = cpu_to_le16(n_cqe); + break; + default: + efc_log_err(sli4, "num_pages %d not valid\n", num_pages); + return -EIO; + } + + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID; + + dw5_flags |= SLI4_CREATE_CQV2_EVT; + dw5_flags |= SLI4_CREATE_CQV2_VALID; + + cqv2->dw5_flags = cpu_to_le32(dw5_flags); + cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm); + cqv2->eq_id = cpu_to_le16(eq_id); + + for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { + cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); + cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); + } + + return 0; +} + +static int +sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem) +{ + struct sli4_rqst_cmn_create_eq *eq; + u32 p; + uintptr_t addr; + u16 num_pages; + u32 dw5_flags = 0; + u32 dw6_flags = 0, ver; + + eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq), + NULL); + if (!eq) + return -EIO; + + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + ver = CMD_V2; + else + ver = CMD_V0; + + sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON, + ver, SLI4_RQST_PYLD_LEN(cmn_create_eq)); + + /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ + num_pages = qmem->size / SLI_PAGE_SIZE; + eq->num_pages = cpu_to_le16(num_pages); + + switch (num_pages) { + case 1: + dw5_flags |= SLI4_EQE_SIZE_4; + dw6_flags |= SLI4_EQ_CNT_VAL(1024); + break; + case 2: + dw5_flags |= SLI4_EQE_SIZE_4; + dw6_flags |= SLI4_EQ_CNT_VAL(2048); + break; + case 4: + dw5_flags |= SLI4_EQE_SIZE_4; + dw6_flags |= SLI4_EQ_CNT_VAL(4096); + break; + default: + efc_log_err(sli4, "num_pages %d not valid\n", num_pages); + return -EIO; + } + + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + dw5_flags |= SLI4_CREATE_EQ_AUTOVALID; + + dw5_flags |= SLI4_CREATE_EQ_VALID; + dw6_flags &= (~SLI4_CREATE_EQ_ARM); + eq->dw5_flags = cpu_to_le32(dw5_flags); + eq->dw6_flags = cpu_to_le32(dw6_flags); + eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI); + + for (p = 0, addr = qmem->phys; p < num_pages; + p++, addr += SLI_PAGE_SIZE) { + eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr)); + eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr)); + } + + return 0; +} + +static int +sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem, + u16 cq_id) +{ + struct sli4_rqst_cmn_create_mq_ext *mq; + u32 p; + uintptr_t addr; + u32 num_pages; + u16 dw6w1_flags = 0; + + mq = sli_config_cmd_init(sli4, buf, + SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL); + if (!mq) + return -EIO; + + sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN(cmn_create_mq_ext)); + + /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */ + num_pages = qmem->size / SLI_PAGE_SIZE; + mq->num_pages = cpu_to_le16(num_pages); + switch (num_pages) { + case 1: + dw6w1_flags |= SLI4_MQE_SIZE_16; + break; + case 2: + dw6w1_flags |= SLI4_MQE_SIZE_32; + break; + case 4: + dw6w1_flags |= SLI4_MQE_SIZE_64; + break; + case 8: + dw6w1_flags |= SLI4_MQE_SIZE_128; + break; + default: + efc_log_info(sli4, "num_pages %d not valid\n", num_pages); + return -EIO; + } + + mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL); + + if (sli4->params.mq_create_version) { + mq->cq_id_v1 = cpu_to_le16(cq_id); + mq->hdr.dw3_version = cpu_to_le32(CMD_V1); + } else { + dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT); + } + mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL); + + mq->dw6w1_flags = cpu_to_le16(dw6w1_flags); + for (p = 0, addr = qmem->phys; p < num_pages; + p++, addr += SLI_PAGE_SIZE) { + mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); + mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); + } + + return 0; +} + +int +sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id) +{ + struct sli4_rqst_wq_create *wq; + u32 p; + uintptr_t addr; + u32 page_size = 0; + u32 n_wqe = 0; + u16 num_pages; + + wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create), + NULL); + if (!wq) + return -EIO; + + sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC, + CMD_V1, SLI4_RQST_PYLD_LEN(wq_create)); + n_wqe = qmem->size / sli4->wqe_size; + + switch (qmem->size) { + case 4096: + case 8192: + case 16384: + case 32768: + page_size = SZ_4K; + break; + case 65536: + page_size = SZ_8K; + break; + case 131072: + page_size = SZ_16K; + break; + case 262144: + page_size = SZ_32K; + break; + case 524288: + page_size = SZ_64K; + break; + default: + return -EIO; + } + + /* valid values for number of pages(num_pages): 1-8 */ + num_pages = sli_page_count(qmem->size, page_size); + wq->num_pages = cpu_to_le16(num_pages); + if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES) + return -EIO; + + wq->cq_id = cpu_to_le16(cq_id); + + wq->page_size = page_size / SLI_PAGE_SIZE; + + if (sli4->wqe_size == SLI4_WQE_EXT_BYTES) + wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE; + else + wq->wqe_size_byte |= SLI4_WQE_SIZE; + + wq->wqe_count = cpu_to_le16(n_wqe); + + for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { + wq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); + wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); + } + + return 0; +} + +static int +sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem, + u16 cq_id, u16 buffer_size) +{ + struct sli4_rqst_rq_create_v1 *rq; + u32 p; + uintptr_t addr; + u32 num_pages; + + rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1), + NULL); + if (!rq) + return -EIO; + + sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, + CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1)); + /* Disable "no buffer warnings" to avoid Lancer bug */ + rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB; + + /* valid values for number of pages: 1-8 (sec 4.5.6) */ + num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); + rq->num_pages = cpu_to_le16(num_pages); + if (!num_pages || + num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) { + efc_log_info(sli4, "num_pages %d not valid, max %d\n", + num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES); + return -EIO; + } + + /* + * RQE count is the total number of entries (note not lg2(# entries)) + */ + rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE); + + rq->rqe_size_byte |= SLI4_RQE_SIZE_8; + + rq->page_size = SLI4_RQ_PAGE_SIZE_4096; + + if (buffer_size < sli4->rq_min_buf_size || + buffer_size > sli4->rq_max_buf_size) { + efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n", + buffer_size, sli4->rq_min_buf_size, + sli4->rq_max_buf_size); + return -EIO; + } + rq->buffer_size = cpu_to_le32(buffer_size); + + rq->cq_id = cpu_to_le16(cq_id); + + for (p = 0, addr = qmem->phys; + p < num_pages; + p++, addr += SLI_PAGE_SIZE) { + rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); + rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); + } + + return 0; +} + +static int +sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs, + struct sli4_queue *qs[], u32 base_cq_id, + u32 header_buffer_size, + u32 payload_buffer_size, struct efc_dma *dma) +{ + struct sli4_rqst_rq_create_v2 *req = NULL; + u32 i, p, offset = 0; + u32 payload_size, page_count; + uintptr_t addr; + u32 num_pages; + __le32 len; + + page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs; + + /* Payload length must accommodate both request and response */ + payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) + + SZ_DMAADDR * page_count, + sizeof(struct sli4_rsp_cmn_create_queue_set)); + + dma->size = payload_size; + dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, + &dma->phys, GFP_KERNEL); + if (!dma->virt) + return -EIO; + + memset(dma->virt, 0, payload_size); + + req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); + if (!req) + return -EIO; + + len = SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count); + sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, + CMD_V2, len); + /* Fill Payload fields */ + req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB; + num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE); + req->num_pages = cpu_to_le16(num_pages); + req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE); + req->rqe_size_byte |= SLI4_RQE_SIZE_8; + req->page_size = SLI4_RQ_PAGE_SIZE_4096; + req->rq_count = num_rqs; + req->base_cq_id = cpu_to_le16(base_cq_id); + req->hdr_buffer_size = cpu_to_le16(header_buffer_size); + req->payload_buffer_size = cpu_to_le16(payload_buffer_size); + + for (i = 0; i < num_rqs; i++) { + for (p = 0, addr = qs[i]->dma.phys; p < num_pages; + p++, addr += SLI_PAGE_SIZE) { + req->page_phys_addr[offset].low = + cpu_to_le32(lower_32_bits(addr)); + req->page_phys_addr[offset].high = + cpu_to_le32(upper_32_bits(addr)); + offset++; + } + } + + return 0; +} + +static void +__sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q) +{ + if (!q->dma.size) + return; + + dma_free_coherent(&sli4->pci->dev, q->dma.size, + q->dma.virt, q->dma.phys); + memset(&q->dma, 0, sizeof(struct efc_dma)); +} + +int +__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype, + size_t size, u32 n_entries, u32 align) +{ + if (q->dma.virt) { + efc_log_err(sli4, "%s failed\n", __func__); + return -EIO; + } + + memset(q, 0, sizeof(struct sli4_queue)); + + q->dma.size = size * n_entries; + q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size, + &q->dma.phys, GFP_KERNEL); + if (!q->dma.virt) { + memset(&q->dma, 0, sizeof(struct efc_dma)); + efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]); + return -EIO; + } + + memset(q->dma.virt, 0, size * n_entries); + + spin_lock_init(&q->lock); + + q->type = qtype; + q->size = size; + q->length = n_entries; + + if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) { + /* For prism, phase will be flipped after + * a sweep through eq and cq + */ + q->phase = 1; + } + + /* Limit to hwf the queue size per interrupt */ + q->proc_limit = n_entries / 2; + + if (q->type == SLI4_QTYPE_EQ) + q->posted_limit = q->length / 2; + else + q->posted_limit = 64; + + return 0; +} + +int +sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, + u32 n_entries, u32 buffer_size, + struct sli4_queue *cq, bool is_hdr) +{ + if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE, + n_entries, SLI_PAGE_SIZE)) + return -EIO; + + if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id, + buffer_size)) + goto error; + + if (__sli_create_queue(sli4, q)) + goto error; + + if (is_hdr && q->id & 1) { + efc_log_info(sli4, "bad header RQ_ID %d\n", q->id); + goto error; + } else if (!is_hdr && (q->id & 1) == 0) { + efc_log_info(sli4, "bad data RQ_ID %d\n", q->id); + goto error; + } + + if (is_hdr) + q->u.flag |= SLI4_QUEUE_FLAG_HDR; + else + q->u.flag &= ~SLI4_QUEUE_FLAG_HDR; + + return 0; + +error: + __sli_queue_destroy(sli4, q); + return -EIO; +} + +int +sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, + struct sli4_queue *qs[], u32 base_cq_id, + u32 n_entries, u32 header_buffer_size, + u32 payload_buffer_size) +{ + u32 i; + struct efc_dma dma = {0}; + struct sli4_rsp_cmn_create_queue_set *rsp = NULL; + void __iomem *db_regaddr = NULL; + u32 num_rqs = num_rq_pairs * 2; + + for (i = 0; i < num_rqs; i++) { + if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ, + SLI4_RQE_SIZE, n_entries, + SLI_PAGE_SIZE)) { + goto error; + } + } + + if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id, + header_buffer_size, payload_buffer_size, + &dma)) { + goto error; + } + + if (sli_bmbx_command(sli4)) { + efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n"); + goto error; + } + + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; + else + db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; + + rsp = dma.virt; + if (rsp->hdr.status) { + efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n", + rsp->hdr.status, rsp->hdr.additional_status); + goto error; + } + + for (i = 0; i < num_rqs; i++) { + qs[i]->id = i + le16_to_cpu(rsp->q_id); + if ((qs[i]->id & 1) == 0) + qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR; + else + qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR; + + qs[i]->db_regaddr = db_regaddr; + } + + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); + + return 0; + +error: + for (i = 0; i < num_rqs; i++) + __sli_queue_destroy(sli4, qs[i]); + + if (dma.virt) + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, + dma.phys); + + return -EIO; +} + +static int +sli_res_sli_config(struct sli4 *sli4, void *buf) +{ + struct sli4_cmd_sli_config *sli_config = buf; + + /* sanity check */ + if (!buf || sli_config->hdr.command != + SLI4_MBX_CMD_SLI_CONFIG) { + efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf, + buf ? sli_config->hdr.command : -1); + return -EIO; + } + + if (le16_to_cpu(sli_config->hdr.status)) + return le16_to_cpu(sli_config->hdr.status); + + if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB) + return sli_config->payload.embed[4]; + + efc_log_info(sli4, "external buffers not supported\n"); + return -EIO; +} + +int +__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q) +{ + struct sli4_rsp_cmn_create_queue *res_q = NULL; + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox write fail %s\n", + SLI4_QNAME[q->type]); + return -EIO; + } + if (sli_res_sli_config(sli4, sli4->bmbx.virt)) { + efc_log_err(sli4, "bad status create %s\n", + SLI4_QNAME[q->type]); + return -EIO; + } + res_q = (void *)((u8 *)sli4->bmbx.virt + + offsetof(struct sli4_cmd_sli_config, payload)); + + if (res_q->hdr.status) { + efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n", + SLI4_QNAME[q->type], res_q->hdr.status, + res_q->hdr.additional_status); + return -EIO; + } + q->id = le16_to_cpu(res_q->q_id); + switch (q->type) { + case SLI4_QTYPE_EQ: + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG; + else + q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; + break; + case SLI4_QTYPE_CQ: + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; + else + q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; + break; + case SLI4_QTYPE_MQ: + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG; + else + q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG; + break; + case SLI4_QTYPE_RQ: + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; + else + q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; + break; + case SLI4_QTYPE_WQ: + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG; + else + q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG; + break; + default: + break; + } + + return 0; +} + +int +sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype) +{ + u32 size = 0; + + switch (qtype) { + case SLI4_QTYPE_EQ: + size = sizeof(u32); + break; + case SLI4_QTYPE_CQ: + size = 16; + break; + case SLI4_QTYPE_MQ: + size = 256; + break; + case SLI4_QTYPE_WQ: + size = sli4->wqe_size; + break; + case SLI4_QTYPE_RQ: + size = SLI4_RQE_SIZE; + break; + default: + efc_log_info(sli4, "unknown queue type %d\n", qtype); + return -1; + } + return size; +} + +int +sli_queue_alloc(struct sli4 *sli4, u32 qtype, + struct sli4_queue *q, u32 n_entries, + struct sli4_queue *assoc) +{ + int size; + u32 align = 0; + + /* get queue size */ + size = sli_get_queue_entry_size(sli4, qtype); + if (size < 0) + return -EIO; + align = SLI_PAGE_SIZE; + + if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) + return -EIO; + + switch (qtype) { + case SLI4_QTYPE_EQ: + if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) && + !__sli_create_queue(sli4, q)) + return 0; + + break; + case SLI4_QTYPE_CQ: + if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma, + assoc ? assoc->id : 0) && + !__sli_create_queue(sli4, q)) + return 0; + + break; + case SLI4_QTYPE_MQ: + assoc->u.flag |= SLI4_QUEUE_FLAG_MQ; + if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt, + &q->dma, assoc->id) && + !__sli_create_queue(sli4, q)) + return 0; + + break; + case SLI4_QTYPE_WQ: + if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma, + assoc ? assoc->id : 0) && + !__sli_create_queue(sli4, q)) + return 0; + + break; + default: + efc_log_info(sli4, "unknown queue type %d\n", qtype); + } + + __sli_queue_destroy(sli4, q); + return -EIO; +} + +static int sli_cmd_cq_set_create(struct sli4 *sli4, + struct sli4_queue *qs[], u32 num_cqs, + struct sli4_queue *eqs[], + struct efc_dma *dma) +{ + struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL; + uintptr_t addr; + u32 i, offset = 0, page_bytes = 0, payload_size; + u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq; + u32 dw5_flags = 0; + u16 dw6w1_flags = 0; + __le32 req_len; + + n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES; + switch (n_cqe) { + case 256: + case 512: + case 1024: + case 2048: + page_size = 1; + break; + case 4096: + page_size = 2; + break; + default: + return -EIO; + } + + page_bytes = page_size * SLI_PAGE_SIZE; + num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes); + payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) + + (SZ_DMAADDR * num_pages_cq * num_cqs), + sizeof(struct sli4_rsp_cmn_create_queue_set)); + + dma->size = payload_size; + dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, + &dma->phys, GFP_KERNEL); + if (!dma->virt) + return -EIO; + + memset(dma->virt, 0, payload_size); + + req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); + if (!req) + return -EIO; + + req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0, + SZ_DMAADDR * num_pages_cq * num_cqs); + sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC, + CMD_V0, req_len); + req->page_size = page_size; + + req->num_pages = cpu_to_le16(num_pages_cq); + switch (num_pages_cq) { + case 1: + dw5_flags |= SLI4_CQ_CNT_VAL(256); + break; + case 2: + dw5_flags |= SLI4_CQ_CNT_VAL(512); + break; + case 4: + dw5_flags |= SLI4_CQ_CNT_VAL(1024); + break; + case 8: + dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); + dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT); + break; + default: + efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq); + return -EIO; + } + + dw5_flags |= SLI4_CREATE_CQSETV0_EVT; + dw5_flags |= SLI4_CREATE_CQSETV0_VALID; + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID; + + dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM; + + req->dw5_flags = cpu_to_le32(dw5_flags); + req->dw6w1_flags = cpu_to_le16(dw6w1_flags); + + req->num_cq_req = cpu_to_le16(num_cqs); + + /* Fill page addresses of all the CQs. */ + for (i = 0; i < num_cqs; i++) { + req->eq_id[i] = cpu_to_le16(eqs[i]->id); + for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq; + p++, addr += page_bytes) { + req->page_phys_addr[offset].low = + cpu_to_le32(lower_32_bits(addr)); + req->page_phys_addr[offset].high = + cpu_to_le32(upper_32_bits(addr)); + offset++; + } + } + + return 0; +} + +int +sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], + u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[]) +{ + u32 i; + struct efc_dma dma = {0}; + struct sli4_rsp_cmn_create_queue_set *res; + void __iomem *db_regaddr; + + /* Align the queue DMA memory */ + for (i = 0; i < num_cqs; i++) { + if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES, + n_entries, SLI_PAGE_SIZE)) + goto error; + } + + if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma)) + goto error; + + if (sli_bmbx_command(sli4)) + goto error; + + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; + else + db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; + + res = dma.virt; + if (res->hdr.status) { + efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n", + res->hdr.status, res->hdr.additional_status); + goto error; + } + + /* Check if we got all requested CQs. */ + if (le16_to_cpu(res->num_q_allocated) != num_cqs) { + efc_log_crit(sli4, "Requested count CQs doesn't match.\n"); + goto error; + } + /* Fill the resp cq ids. */ + for (i = 0; i < num_cqs; i++) { + qs[i]->id = le16_to_cpu(res->q_id) + i; + qs[i]->db_regaddr = db_regaddr; + } + + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); + + return 0; + +error: + for (i = 0; i < num_cqs; i++) + __sli_queue_destroy(sli4, qs[i]); + + if (dma.virt) + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, + dma.phys); + + return -EIO; +} + +static int +sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id) +{ + struct sli4_rqst_cmn_destroy_q *req; + + /* Payload length must accommodate both request and response */ + req = sli_config_cmd_init(sli4, sli4->bmbx.virt, + SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL); + if (!req) + return -EIO; + + sli_cmd_fill_hdr(&req->hdr, opc, subsystem, + CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q)); + req->q_id = cpu_to_le16(q_id); + + return 0; +} + +int +sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, + u32 destroy_queues, u32 free_memory) +{ + int rc = 0; + u8 opcode, subsystem; + struct sli4_rsp_hdr *res; + + if (!q) { + efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q); + return -EIO; + } + + if (!destroy_queues) + goto free_mem; + + switch (q->type) { + case SLI4_QTYPE_EQ: + opcode = SLI4_CMN_DESTROY_EQ; + subsystem = SLI4_SUBSYSTEM_COMMON; + break; + case SLI4_QTYPE_CQ: + opcode = SLI4_CMN_DESTROY_CQ; + subsystem = SLI4_SUBSYSTEM_COMMON; + break; + case SLI4_QTYPE_MQ: + opcode = SLI4_CMN_DESTROY_MQ; + subsystem = SLI4_SUBSYSTEM_COMMON; + break; + case SLI4_QTYPE_WQ: + opcode = SLI4_OPC_WQ_DESTROY; + subsystem = SLI4_SUBSYSTEM_FC; + break; + case SLI4_QTYPE_RQ: + opcode = SLI4_OPC_RQ_DESTROY; + subsystem = SLI4_SUBSYSTEM_FC; + break; + default: + efc_log_info(sli4, "bad queue type %d\n", q->type); + rc = -EIO; + goto free_mem; + } + + rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id); + if (rc) + goto free_mem; + + rc = sli_bmbx_command(sli4); + if (rc) + goto free_mem; + + rc = sli_res_sli_config(sli4, sli4->bmbx.virt); + if (rc) + goto free_mem; + + res = (void *)((u8 *)sli4->bmbx.virt + + offsetof(struct sli4_cmd_sli_config, payload)); + if (res->status) { + efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n", + SLI4_QNAME[q->type], res->status, + res->additional_status); + rc = -EIO; + goto free_mem; + } + +free_mem: + if (free_memory) + __sli_queue_destroy(sli4, q); + + return rc; +} + +int +sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) +{ + u32 val; + unsigned long flags = 0; + u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; + + spin_lock_irqsave(&q->lock, flags); + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); + else + val = sli_format_eq_db_data(q->n_posted, q->id, a); + + writel(val, q->db_regaddr); + q->n_posted = 0; + spin_unlock_irqrestore(&q->lock, flags); + + return 0; +} + +int +sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) +{ + u32 val = 0; + unsigned long flags = 0; + u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; + + spin_lock_irqsave(&q->lock, flags); + + switch (q->type) { + case SLI4_QTYPE_EQ: + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); + else + val = sli_format_eq_db_data(q->n_posted, q->id, a); + + writel(val, q->db_regaddr); + q->n_posted = 0; + break; + case SLI4_QTYPE_CQ: + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) + val = sli_format_if6_cq_db_data(q->n_posted, q->id, a); + else + val = sli_format_cq_db_data(q->n_posted, q->id, a); + + writel(val, q->db_regaddr); + q->n_posted = 0; + break; + default: + efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n", + SLI4_QNAME[q->type]); + } + + spin_unlock_irqrestore(&q->lock, flags); + + return 0; +} + +int +sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) +{ + u8 *qe = q->dma.virt; + u32 qindex; + u32 val = 0; + + qindex = q->index; + qe += q->index * q->size; + + if (sli4->params.perf_wq_id_association) + sli_set_wq_id_association(entry, q->id); + + memcpy(qe, entry, q->size); + val = sli_format_wq_db_data(q->id); + + writel(val, q->db_regaddr); + q->index = (q->index + 1) & (q->length - 1); + + return qindex; +} + +int +sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) +{ + u8 *qe = q->dma.virt; + u32 qindex; + u32 val = 0; + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + qindex = q->index; + qe += q->index * q->size; + + memcpy(qe, entry, q->size); + val = sli_format_mq_db_data(q->id); + writel(val, q->db_regaddr); + q->index = (q->index + 1) & (q->length - 1); + spin_unlock_irqrestore(&q->lock, flags); + + return qindex; +} + +int +sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) +{ + u8 *qe = q->dma.virt; + u32 qindex; + u32 val = 0; + + qindex = q->index; + qe += q->index * q->size; + + memcpy(qe, entry, q->size); + + /* + * In RQ-pair, an RQ either contains the FC header + * (i.e. is_hdr == TRUE) or the payload. + * + * Don't ring doorbell for payload RQ + */ + if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR)) + goto skip; + + val = sli_format_rq_db_data(q->id); + writel(val, q->db_regaddr); +skip: + q->index = (q->index + 1) & (q->length - 1); + + return qindex; +} + +int +sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) +{ + u8 *qe = q->dma.virt; + unsigned long flags = 0; + u16 wflags = 0; + + spin_lock_irqsave(&q->lock, flags); + + qe += q->index * q->size; + + /* Check if eqe is valid */ + wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags); + + if ((wflags & SLI4_EQE_VALID) != q->phase) { + spin_unlock_irqrestore(&q->lock, flags); + return -EIO; + } + + if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { + wflags &= ~SLI4_EQE_VALID; + ((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags); + } + + memcpy(entry, qe, q->size); + q->index = (q->index + 1) & (q->length - 1); + q->n_posted++; + /* + * For prism, the phase value will be used + * to check the validity of eq/cq entries. + * The value toggles after a complete sweep + * through the queue. + */ + + if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) + q->phase ^= (u16)0x1; + + spin_unlock_irqrestore(&q->lock, flags); + + return 0; +} + +int +sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) +{ + u8 *qe = q->dma.virt; + unsigned long flags = 0; + u32 dwflags = 0; + bool valid_bit_set; + + spin_lock_irqsave(&q->lock, flags); + + qe += q->index * q->size; + + /* Check if cqe is valid */ + dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags); + valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0; + + if (valid_bit_set != q->phase) { + spin_unlock_irqrestore(&q->lock, flags); + return -EIO; + } + + if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { + dwflags &= ~SLI4_MCQE_VALID; + ((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags); + } + + memcpy(entry, qe, q->size); + q->index = (q->index + 1) & (q->length - 1); + q->n_posted++; + /* + * For prism, the phase value will be used + * to check the validity of eq/cq entries. + * The value toggles after a complete sweep + * through the queue. + */ + + if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) + q->phase ^= (u16)0x1; + + spin_unlock_irqrestore(&q->lock, flags); + + return 0; +} + +int +sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) +{ + u8 *qe = q->dma.virt; + unsigned long flags = 0; + + spin_lock_irqsave(&q->lock, flags); + + qe += q->u.r_idx * q->size; + + /* Check if mqe is valid */ + if (q->index == q->u.r_idx) { + spin_unlock_irqrestore(&q->lock, flags); + return -EIO; + } + + memcpy(entry, qe, q->size); + q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1); + + spin_unlock_irqrestore(&q->lock, flags); + + return 0; +} + +int +sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id) +{ + struct sli4_eqe *eqe = (void *)buf; + int rc = 0; + u16 flags = 0; + u16 majorcode; + u16 minorcode; + + if (!buf || !cq_id) { + efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n", + sli4, buf, cq_id); + return -EIO; + } + + flags = le16_to_cpu(eqe->dw0w0_flags); + majorcode = (flags & SLI4_EQE_MJCODE) >> 1; + minorcode = (flags & SLI4_EQE_MNCODE) >> 4; + switch (majorcode) { + case SLI4_MAJOR_CODE_STANDARD: + *cq_id = le16_to_cpu(eqe->resource_id); + break; + case SLI4_MAJOR_CODE_SENTINEL: + efc_log_info(sli4, "sentinel EQE\n"); + rc = SLI4_EQE_STATUS_EQ_FULL; + break; + default: + efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n", + majorcode, minorcode); + rc = -EIO; + } + + return rc; +} + +int +sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe, + enum sli4_qentry *etype, u16 *q_id) +{ + int rc = 0; + + if (!cq || !cqe || !etype) { + efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n", + sli4, cq, cqe, etype, q_id); + return -EINVAL; + } + + /* Parse a CQ entry to retrieve the event type and the queue id */ + if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) { + struct sli4_mcqe *mcqe = (void *)cqe; + + if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) { + *etype = SLI4_QENTRY_ASYNC; + } else { + *etype = SLI4_QENTRY_MQ; + rc = sli_cqe_mq(sli4, mcqe); + } + *q_id = -1; + } else { + rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id); + } + + return rc; +} + +int +sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type, + bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id) +{ + struct sli4_abort_wqe *abort = buf; + + memset(buf, 0, sli->wqe_size); + + switch (type) { + case SLI4_ABORT_XRI: + abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; + if (mask) { + efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask", + mask, ids); + mask = 0; + } + break; + case SLI4_ABORT_ABORT_ID: + abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG; + break; + case SLI4_ABORT_REQUEST_ID: + abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG; + break; + default: + efc_log_info(sli, "unsupported type %#x\n", type); + return -EIO; + } + + abort->ia_ir_byte |= send_abts ? 0 : 1; + + /* Suppress ABTS retries */ + abort->ia_ir_byte |= SLI4_ABRT_WQE_IR; + + abort->t_mask = cpu_to_le32(mask); + abort->t_tag = cpu_to_le32(ids); + abort->command = SLI4_WQE_ABORT; + abort->request_tag = cpu_to_le16(tag); + + abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD); + + abort->cq_id = cpu_to_le16(cq_id); + abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE; + + return 0; +} + +int +sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, + struct sli_els_params *params) +{ + struct sli4_els_request64_wqe *els = buf; + struct sli4_sge *sge = sgl->virt; + bool is_fabric = false; + struct sli4_bde *bptr; + + memset(buf, 0, sli->wqe_size); + + bptr = &els->els_request_payload; + if (sli->params.sgl_pre_registered) { + els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL; + + els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE; + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (params->xmit_len & SLI4_BDE_LEN_MASK)); + + bptr->u.data.low = sge[0].buffer_address_low; + bptr->u.data.high = sge[0].buffer_address_high; + } else { + els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | + ((2 * sizeof(struct sli4_sge)) & + SLI4_BDE_LEN_MASK)); + bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); + bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); + } + + els->els_request_payload_length = cpu_to_le32(params->xmit_len); + els->max_response_payload_length = cpu_to_le32(params->rsp_len); + + els->xri_tag = cpu_to_le16(params->xri); + els->timer = params->timeout; + els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3; + + els->command = SLI4_WQE_ELS_REQUEST64; + + els->request_tag = cpu_to_le16(params->tag); + + els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD; + + els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD; + + /* figure out the ELS_ID value from the request buffer */ + + switch (params->cmd) { + case ELS_LOGO: + els->cmdtype_elsid_byte |= + SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT; + if (params->rpi_registered) { + els->ct_byte |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT; + els->context_tag = cpu_to_le16(params->rpi); + } else { + els->ct_byte |= + SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; + els->context_tag = cpu_to_le16(params->vpi); + } + if (params->d_id == FC_FID_FLOGI) + is_fabric = true; + break; + case ELS_FDISC: + if (params->d_id == FC_FID_FLOGI) + is_fabric = true; + if (params->s_id == 0) { + els->cmdtype_elsid_byte |= + SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT; + is_fabric = true; + } else { + els->cmdtype_elsid_byte |= + SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; + } + els->ct_byte |= + SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; + els->context_tag = cpu_to_le16(params->vpi); + els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT); + break; + case ELS_FLOGI: + els->ct_byte |= + SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; + els->context_tag = cpu_to_le16(params->vpi); + /* + * Set SP here ... we haven't done a REG_VPI yet + * need to maybe not set this when we have + * completed VFI/VPI registrations ... + * + * Use the FC_ID of the SPORT if it has been allocated, + * otherwise use an S_ID of zero. + */ + els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT); + if (params->s_id != U32_MAX) + els->sid_sp_dword |= cpu_to_le32(params->s_id); + break; + case ELS_PLOGI: + els->cmdtype_elsid_byte |= + SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT; + els->ct_byte |= + SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; + els->context_tag = cpu_to_le16(params->vpi); + break; + case ELS_SCR: + els->cmdtype_elsid_byte |= + SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; + els->ct_byte |= + SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; + els->context_tag = cpu_to_le16(params->vpi); + break; + default: + els->cmdtype_elsid_byte |= + SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; + if (params->rpi_registered) { + els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI << + SLI4_REQ_WQE_CT_SHFT); + els->context_tag = cpu_to_le16(params->vpi); + } else { + els->ct_byte |= + SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; + els->context_tag = cpu_to_le16(params->vpi); + } + break; + } + + if (is_fabric) + els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC; + else + els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC; + + els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); + + if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) != + SLI4_GENERIC_CONTEXT_RPI) + els->remote_id_dword = cpu_to_le32(params->d_id); + + if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) == + SLI4_GENERIC_CONTEXT_VPI) + els->temporary_rpi = cpu_to_le16(params->rpi); + + return 0; +} + +int +sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri, + u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout) +{ + struct sli4_fcp_icmnd64_wqe *icmnd = buf; + struct sli4_sge *sge = NULL; + struct sli4_bde *bptr; + u32 len; + + memset(buf, 0, sli->wqe_size); + + if (!sgl || !sgl->virt) { + efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", + sgl, sgl ? sgl->virt : NULL); + return -EIO; + } + sge = sgl->virt; + bptr = &icmnd->bde; + if (sli->params.sgl_pre_registered) { + icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL; + + icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE; + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[0].buffer_length) & + SLI4_BDE_LEN_MASK)); + + bptr->u.data.low = sge[0].buffer_address_low; + bptr->u.data.high = sge[0].buffer_address_high; + } else { + icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | + (sgl->size & SLI4_BDE_LEN_MASK)); + + bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); + bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); + } + + len = le32_to_cpu(sge[0].buffer_length) + + le32_to_cpu(sge[1].buffer_length); + icmnd->payload_offset_length = cpu_to_le16(len); + icmnd->xri_tag = cpu_to_le16(xri); + icmnd->context_tag = cpu_to_le16(rpi); + icmnd->timer = timeout; + + /* WQE word 4 contains read transfer length */ + icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT; + icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; + icmnd->command = SLI4_WQE_FCP_ICMND64; + icmnd->dif_ct_bs_byte |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT; + + icmnd->abort_tag = cpu_to_le32(xri); + + icmnd->request_tag = cpu_to_le16(tag); + icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1; + icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2; + icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE; + icmnd->cq_id = cpu_to_le16(cq_id); + + return 0; +} + +int +sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag, + u16 cq_id, u32 rpi, u32 rnode_fcid, + u8 dif, u8 bs, u8 timeout) +{ + struct sli4_fcp_iread64_wqe *iread = buf; + struct sli4_sge *sge = NULL; + struct sli4_bde *bptr; + u32 sge_flags, len; + + memset(buf, 0, sli->wqe_size); + + if (!sgl || !sgl->virt) { + efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", + sgl, sgl ? sgl->virt : NULL); + return -EIO; + } + + sge = sgl->virt; + bptr = &iread->bde; + if (sli->params.sgl_pre_registered) { + iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL; + + iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[0].buffer_length) & + SLI4_BDE_LEN_MASK)); + + bptr->u.blp.low = sge[0].buffer_address_low; + bptr->u.blp.high = sge[0].buffer_address_high; + } else { + iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | + (sgl->size & SLI4_BDE_LEN_MASK)); + + bptr->u.blp.low = + cpu_to_le32(lower_32_bits(sgl->phys)); + bptr->u.blp.high = + cpu_to_le32(upper_32_bits(sgl->phys)); + + /* + * fill out fcp_cmnd buffer len and change resp buffer to be of + * type "skip" (note: response will still be written to sge[1] + * if necessary) + */ + len = le32_to_cpu(sge[0].buffer_length); + iread->fcp_cmd_buffer_length = cpu_to_le16(len); + + sge_flags = le32_to_cpu(sge[1].dw2_flags); + sge_flags &= (~SLI4_SGE_TYPE_MASK); + sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT); + sge[1].dw2_flags = cpu_to_le32(sge_flags); + } + + len = le32_to_cpu(sge[0].buffer_length) + + le32_to_cpu(sge[1].buffer_length); + iread->payload_offset_length = cpu_to_le16(len); + iread->total_transfer_length = cpu_to_le32(xfer_len); + + iread->xri_tag = cpu_to_le16(xri); + iread->context_tag = cpu_to_le16(rpi); + + iread->timer = timeout; + + /* WQE word 4 contains read transfer length */ + iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT; + iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; + iread->command = SLI4_WQE_FCP_IREAD64; + iread->dif_ct_bs_byte |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT; + iread->dif_ct_bs_byte |= dif; + iread->dif_ct_bs_byte |= bs << SLI4_IR_WQE_BS_SHFT; + + iread->abort_tag = cpu_to_le32(xri); + + iread->request_tag = cpu_to_le16(tag); + iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1; + iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2; + iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD; + iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE; + iread->cq_id = cpu_to_le16(cq_id); + + if (sli->params.perf_hint) { + bptr = &iread->first_data_bde; + bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[first_data_sge].buffer_length) & + SLI4_BDE_LEN_MASK)); + bptr->u.data.low = + sge[first_data_sge].buffer_address_low; + bptr->u.data.high = + sge[first_data_sge].buffer_address_high; + } + + return 0; +} + +int +sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u32 xfer_len, + u32 first_burst, u16 xri, u16 tag, + u16 cq_id, u32 rpi, + u32 rnode_fcid, + u8 dif, u8 bs, u8 timeout) +{ + struct sli4_fcp_iwrite64_wqe *iwrite = buf; + struct sli4_sge *sge = NULL; + struct sli4_bde *bptr; + u32 sge_flags, min, len; + + memset(buf, 0, sli->wqe_size); + + if (!sgl || !sgl->virt) { + efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", + sgl, sgl ? sgl->virt : NULL); + return -EIO; + } + sge = sgl->virt; + bptr = &iwrite->bde; + if (sli->params.sgl_pre_registered) { + iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL; + + iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE; + bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK)); + bptr->u.data.low = sge[0].buffer_address_low; + bptr->u.data.high = sge[0].buffer_address_high; + } else { + iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL; + + bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (sgl->size & SLI4_BDE_LEN_MASK)); + + bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); + bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); + + /* + * fill out fcp_cmnd buffer len and change resp buffer to be of + * type "skip" (note: response will still be written to sge[1] + * if necessary) + */ + len = le32_to_cpu(sge[0].buffer_length); + iwrite->fcp_cmd_buffer_length = cpu_to_le16(len); + sge_flags = le32_to_cpu(sge[1].dw2_flags); + sge_flags &= ~SLI4_SGE_TYPE_MASK; + sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT); + sge[1].dw2_flags = cpu_to_le32(sge_flags); + } + + len = le32_to_cpu(sge[0].buffer_length) + + le32_to_cpu(sge[1].buffer_length); + iwrite->payload_offset_length = cpu_to_le16(len); + iwrite->total_transfer_length = cpu_to_le16(xfer_len); + min = (xfer_len < first_burst) ? xfer_len : first_burst; + iwrite->initial_transfer_length = cpu_to_le16(min); + + iwrite->xri_tag = cpu_to_le16(xri); + iwrite->context_tag = cpu_to_le16(rpi); + + iwrite->timer = timeout; + /* WQE word 4 contains read transfer length */ + iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT; + iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; + iwrite->command = SLI4_WQE_FCP_IWRITE64; + iwrite->dif_ct_bs_byte |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT; + iwrite->dif_ct_bs_byte |= dif; + iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT; + + iwrite->abort_tag = cpu_to_le32(xri); + + iwrite->request_tag = cpu_to_le16(tag); + iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1; + iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2; + iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE; + iwrite->cq_id = cpu_to_le16(cq_id); + + if (sli->params.perf_hint) { + bptr = &iwrite->first_data_bde; + + bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[first_data_sge].buffer_length) & + SLI4_BDE_LEN_MASK)); + + bptr->u.data.low = sge[first_data_sge].buffer_address_low; + bptr->u.data.high = sge[first_data_sge].buffer_address_high; + } + + return 0; +} + +int +sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, + struct sli_fcp_tgt_params *params) +{ + struct sli4_fcp_treceive64_wqe *trecv = buf; + struct sli4_fcp_128byte_wqe *trecv_128 = buf; + struct sli4_sge *sge = NULL; + struct sli4_bde *bptr; + + memset(buf, 0, sli->wqe_size); + + if (!sgl || !sgl->virt) { + efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", + sgl, sgl ? sgl->virt : NULL); + return -EIO; + } + sge = sgl->virt; + bptr = &trecv->bde; + if (sli->params.sgl_pre_registered) { + trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL; + + trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[0].buffer_length) + & SLI4_BDE_LEN_MASK)); + + bptr->u.data.low = sge[0].buffer_address_low; + bptr->u.data.high = sge[0].buffer_address_high; + + trecv->payload_offset_length = sge[0].buffer_length; + } else { + trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL; + + /* if data is a single physical address, use a BDE */ + if (!dif && + params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) { + trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE; + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[2].buffer_length) + & SLI4_BDE_LEN_MASK)); + + bptr->u.data.low = sge[2].buffer_address_low; + bptr->u.data.high = sge[2].buffer_address_high; + } else { + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | + (sgl->size & SLI4_BDE_LEN_MASK)); + bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); + bptr->u.blp.high = + cpu_to_le32(upper_32_bits(sgl->phys)); + } + } + + trecv->relative_offset = cpu_to_le32(params->offset); + + if (params->flags & SLI4_IO_CONTINUATION) + trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC; + + trecv->xri_tag = cpu_to_le16(params->xri); + + trecv->context_tag = cpu_to_le16(params->rpi); + + /* WQE uses relative offset */ + trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT; + + if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) + trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR; + + trecv->command = SLI4_WQE_FCP_TRECEIVE64; + trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; + trecv->dif_ct_bs_byte |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT; + trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT; + + trecv->remote_xid = cpu_to_le16(params->ox_id); + + trecv->request_tag = cpu_to_le16(params->tag); + + trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD; + + trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2; + + trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE; + + trecv->cq_id = cpu_to_le16(cq_id); + + trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len); + + if (sli->params.perf_hint) { + bptr = &trecv->first_data_bde; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[first_data_sge].buffer_length) & + SLI4_BDE_LEN_MASK)); + bptr->u.data.low = sge[first_data_sge].buffer_address_low; + bptr->u.data.high = sge[first_data_sge].buffer_address_high; + } + + /* The upper 7 bits of csctl is the priority */ + if (params->cs_ctl & SLI4_MASK_CCP) { + trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE; + trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP); + } + + if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES && + !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) { + trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID; + trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES; + trecv_128->dw[31] = params->app_id; + } + return 0; +} + +int +sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf, + struct efc_dma *sgl, u32 first_data_sge, + u16 sec_xri, u16 cq_id, u8 dif, u8 bs, + struct sli_fcp_tgt_params *params) +{ + int rc; + + rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge, + cq_id, dif, bs, params); + if (!rc) { + struct sli4_fcp_treceive64_wqe *trecv = buf; + + trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64; + trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri); + } + return rc; +} + +int +sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params) +{ + struct sli4_fcp_trsp64_wqe *trsp = buf; + struct sli4_fcp_128byte_wqe *trsp_128 = buf; + + memset(buf, 0, sli4->wqe_size); + + if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) { + trsp->class_ag_byte |= SLI4_TRSP_WQE_AG; + } else { + struct sli4_sge *sge = sgl->virt; + struct sli4_bde *bptr; + + if (sli4->params.sgl_pre_registered || port_owned) + trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE; + else + trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL; + bptr = &trsp->bde; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[0].buffer_length) & + SLI4_BDE_LEN_MASK)); + bptr->u.data.low = sge[0].buffer_address_low; + bptr->u.data.high = sge[0].buffer_address_high; + + trsp->fcp_response_length = cpu_to_le32(params->xmit_len); + } + + if (params->flags & SLI4_IO_CONTINUATION) + trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC; + + trsp->xri_tag = cpu_to_le16(params->xri); + trsp->rpi = cpu_to_le16(params->rpi); + + trsp->command = SLI4_WQE_FCP_TRSP64; + trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3; + + trsp->remote_xid = cpu_to_le16(params->ox_id); + trsp->request_tag = cpu_to_le16(params->tag); + if (params->flags & SLI4_IO_DNRX) + trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX; + else + trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX; + + trsp->lloc1_appid |= 0x1; + trsp->cq_id = cpu_to_le16(cq_id); + trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE; + + /* The upper 7 bits of csctl is the priority */ + if (params->cs_ctl & SLI4_MASK_CCP) { + trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE; + trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP); + } + + if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES && + !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) { + trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID; + trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES; + trsp_128->dw[31] = params->app_id; + } + return 0; +} + +int +sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, + struct sli_fcp_tgt_params *params) +{ + struct sli4_fcp_tsend64_wqe *tsend = buf; + struct sli4_fcp_128byte_wqe *tsend_128 = buf; + struct sli4_sge *sge = NULL; + struct sli4_bde *bptr; + + memset(buf, 0, sli4->wqe_size); + + if (!sgl || !sgl->virt) { + efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", + sgl, sgl ? sgl->virt : NULL); + return -EIO; + } + sge = sgl->virt; + + bptr = &tsend->bde; + if (sli4->params.sgl_pre_registered) { + tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL; + + tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[2].buffer_length) & + SLI4_BDE_LEN_MASK)); + + /* TSEND64_WQE specifies first two SGE are skipped (3rd is + * valid) + */ + bptr->u.data.low = sge[2].buffer_address_low; + bptr->u.data.high = sge[2].buffer_address_high; + } else { + tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL; + + /* if data is a single physical address, use a BDE */ + if (!dif && + params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) { + tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[2].buffer_length) & + SLI4_BDE_LEN_MASK)); + /* + * TSEND64_WQE specifies first two SGE are skipped + * (i.e. 3rd is valid) + */ + bptr->u.data.low = + sge[2].buffer_address_low; + bptr->u.data.high = + sge[2].buffer_address_high; + } else { + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | + (sgl->size & + SLI4_BDE_LEN_MASK)); + bptr->u.blp.low = + cpu_to_le32(lower_32_bits(sgl->phys)); + bptr->u.blp.high = + cpu_to_le32(upper_32_bits(sgl->phys)); + } + } + + tsend->relative_offset = cpu_to_le32(params->offset); + + if (params->flags & SLI4_IO_CONTINUATION) + tsend->dw10byte2 |= SLI4_TSEND_XC; + + tsend->xri_tag = cpu_to_le16(params->xri); + + tsend->rpi = cpu_to_le16(params->rpi); + /* WQE uses relative offset */ + tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT; + + if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) + tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR; + + tsend->command = SLI4_WQE_FCP_TSEND64; + tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3; + tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT; + tsend->ct_byte |= dif; + tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT; + + tsend->remote_xid = cpu_to_le16(params->ox_id); + + tsend->request_tag = cpu_to_le16(params->tag); + + tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2; + + tsend->cq_id = cpu_to_le16(cq_id); + + tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE; + + tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len); + + if (sli4->params.perf_hint) { + bptr = &tsend->first_data_bde; + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (le32_to_cpu(sge[first_data_sge].buffer_length) & + SLI4_BDE_LEN_MASK)); + bptr->u.data.low = + sge[first_data_sge].buffer_address_low; + bptr->u.data.high = + sge[first_data_sge].buffer_address_high; + } + + /* The upper 7 bits of csctl is the priority */ + if (params->cs_ctl & SLI4_MASK_CCP) { + tsend->dw10byte2 |= SLI4_TSEND_CCPE; + tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP); + } + + if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES && + !(tsend->dw10byte2 & SLI4_TSEND_EAT)) { + tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID; + tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES; + tsend_128->dw[31] = params->app_id; + } + return 0; +} + +int +sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + struct sli_ct_params *params) +{ + struct sli4_gen_request64_wqe *gen = buf; + struct sli4_sge *sge = NULL; + struct sli4_bde *bptr; + + memset(buf, 0, sli4->wqe_size); + + if (!sgl || !sgl->virt) { + efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", + sgl, sgl ? sgl->virt : NULL); + return -EIO; + } + sge = sgl->virt; + bptr = &gen->bde; + + if (sli4->params.sgl_pre_registered) { + gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL; + + gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE; + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (params->xmit_len & SLI4_BDE_LEN_MASK)); + + bptr->u.data.low = sge[0].buffer_address_low; + bptr->u.data.high = sge[0].buffer_address_high; + } else { + gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL; + + bptr->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | + ((2 * sizeof(struct sli4_sge)) & + SLI4_BDE_LEN_MASK)); + + bptr->u.blp.low = + cpu_to_le32(lower_32_bits(sgl->phys)); + bptr->u.blp.high = + cpu_to_le32(upper_32_bits(sgl->phys)); + } + + gen->request_payload_length = cpu_to_le32(params->xmit_len); + gen->max_response_payload_length = cpu_to_le32(params->rsp_len); + + gen->df_ctl = params->df_ctl; + gen->type = params->type; + gen->r_ctl = params->r_ctl; + + gen->xri_tag = cpu_to_le16(params->xri); + + gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT; + gen->context_tag = cpu_to_le16(params->rpi); + + gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3; + + gen->command = SLI4_WQE_GEN_REQUEST64; + + gen->timer = params->timeout; + + gen->request_tag = cpu_to_le16(params->tag); + + gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD; + + gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD; + + gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE; + + gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); + + return 0; +} + +int +sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr, + struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri, + u16 req_tag) +{ + struct sli4_send_frame_wqe *sf = buf; + + memset(buf, 0, sli->wqe_size); + + sf->dw10flags1 |= SLI4_SF_WQE_DBDE; + sf->bde.bde_type_buflen = cpu_to_le32(req_len & + SLI4_BDE_LEN_MASK); + sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys)); + sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys)); + + /* Copy FC header */ + sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]); + sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]); + sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]); + sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]); + sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]); + sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]); + + sf->frame_length = cpu_to_le32(req_len); + + sf->xri_tag = cpu_to_le16(xri); + sf->dw7flags0 &= ~SLI4_SF_PU; + sf->context_tag = 0; + + sf->ct_byte &= ~SLI4_SF_CT; + sf->command = SLI4_WQE_SEND_FRAME; + sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3; + sf->timer = timeout; + + sf->request_tag = cpu_to_le16(req_tag); + sf->eof = eof; + sf->sof = sof; + + sf->dw10flags1 &= ~SLI4_SF_QOSD; + sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1; + sf->dw10flags2 &= ~SLI4_SF_XC; + + sf->dw10flags1 |= SLI4_SF_XBL; + + sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE; + sf->cq_id = cpu_to_le16(0xffff); + + return 0; +} + +int +sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf, + struct sli_bls_payload *payload, + struct sli_bls_params *params) +{ + struct sli4_xmit_bls_rsp_wqe *bls = buf; + u32 dw_ridflags = 0; + + /* + * Callers can either specify RPI or S_ID, but not both + */ + if (params->rpi_registered && params->s_id != U32_MAX) { + efc_log_info(sli, "S_ID specified for attached remote node %d\n", + params->rpi); + return -EIO; + } + + memset(buf, 0, sli->wqe_size); + + if (payload->type == SLI4_SLI_BLS_ACC) { + bls->payload_word0 = + cpu_to_le32((payload->u.acc.seq_id_last << 16) | + (payload->u.acc.seq_id_validity << 24)); + bls->high_seq_cnt = payload->u.acc.high_seq_cnt; + bls->low_seq_cnt = payload->u.acc.low_seq_cnt; + } else if (payload->type == SLI4_SLI_BLS_RJT) { + bls->payload_word0 = + cpu_to_le32(*((u32 *)&payload->u.rjt)); + dw_ridflags |= SLI4_BLS_RSP_WQE_AR; + } else { + efc_log_info(sli, "bad BLS type %#x\n", payload->type); + return -EIO; + } + + bls->ox_id = payload->ox_id; + bls->rx_id = payload->rx_id; + + if (params->rpi_registered) { + bls->dw8flags0 |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT; + bls->context_tag = cpu_to_le16(params->rpi); + } else { + bls->dw8flags0 |= + SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT; + bls->context_tag = cpu_to_le16(params->vpi); + + bls->local_n_port_id_dword |= + cpu_to_le32(params->s_id & 0x00ffffff); + + dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) | + (params->d_id & SLI4_BLS_RSP_RID); + + bls->temporary_rpi = cpu_to_le16(params->rpi); + } + + bls->xri_tag = cpu_to_le16(params->xri); + + bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3; + + bls->command = SLI4_WQE_XMIT_BLS_RSP; + + bls->request_tag = cpu_to_le16(params->tag); + + bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD; + + bls->remote_id_dword = cpu_to_le32(dw_ridflags); + bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); + + bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE; + + return 0; +} + +int +sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp, + struct sli_els_params *params) +{ + struct sli4_xmit_els_rsp64_wqe *els = buf; + + memset(buf, 0, sli->wqe_size); + + if (sli->params.sgl_pre_registered) + els->flags2 |= SLI4_ELS_DBDE; + else + els->flags2 |= SLI4_ELS_XBL; + + els->els_response_payload.bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (params->rsp_len & SLI4_BDE_LEN_MASK)); + els->els_response_payload.u.data.low = + cpu_to_le32(lower_32_bits(rsp->phys)); + els->els_response_payload.u.data.high = + cpu_to_le32(upper_32_bits(rsp->phys)); + + els->els_response_payload_length = cpu_to_le32(params->rsp_len); + + els->xri_tag = cpu_to_le16(params->xri); + + els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3; + + els->command = SLI4_WQE_ELS_RSP64; + + els->request_tag = cpu_to_le16(params->tag); + + els->ox_id = cpu_to_le16(params->ox_id); + + els->flags2 |= SLI4_ELS_QOSD; + + els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN; + + els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); + + if (params->rpi_registered) { + els->ct_byte |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET; + els->context_tag = cpu_to_le16(params->rpi); + return 0; + } + + els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET; + els->context_tag = cpu_to_le16(params->vpi); + els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID); + els->temporary_rpi = cpu_to_le16(params->rpi); + if (params->s_id != U32_MAX) { + els->sid_dw |= + cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID)); + } + + return 0; +} + +int +sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload, + struct sli_ct_params *params) +{ + struct sli4_xmit_sequence64_wqe *xmit = buf; + + memset(buf, 0, sli4->wqe_size); + + if (!payload || !payload->virt) { + efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", + payload, payload ? payload->virt : NULL); + return -EIO; + } + + if (sli4->params.sgl_pre_registered) + xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE); + else + xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL); + + xmit->bde.bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (params->rsp_len & SLI4_BDE_LEN_MASK)); + xmit->bde.u.data.low = + cpu_to_le32(lower_32_bits(payload->phys)); + xmit->bde.u.data.high = + cpu_to_le32(upper_32_bits(payload->phys)); + xmit->sequence_payload_len = cpu_to_le32(params->rsp_len); + + xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff); + + xmit->relative_offset = 0; + + /* sequence initiative - this matches what is seen from + * FC switches in response to FCGS commands + */ + xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI); + xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */ + xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */ + xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */ + xmit->df_ctl = params->df_ctl; + xmit->type = params->type; + xmit->r_ctl = params->r_ctl; + + xmit->xri_tag = cpu_to_le16(params->xri); + xmit->context_tag = cpu_to_le16(params->rpi); + + xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF; + xmit->dw7flags0 |= + SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT; + xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS; + + xmit->command = SLI4_WQE_XMIT_SEQUENCE64; + xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3; + xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU; + xmit->timer = params->timeout; + + xmit->abort_tag = 0; + xmit->request_tag = cpu_to_le16(params->tag); + xmit->remote_xid = cpu_to_le16(params->ox_id); + + xmit->dw10w0 |= + cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT); + + xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE; + + xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT); + + xmit->cq_id = cpu_to_le16(0xFFFF); + + return 0; +} + +int +sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id) +{ + struct sli4_requeue_xri_wqe *requeue = buf; + + memset(buf, 0, sli4->wqe_size); + + requeue->command = SLI4_WQE_REQUEUE_XRI; + requeue->xri_tag = cpu_to_le16(xri); + requeue->request_tag = cpu_to_le16(tag); + requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC); + requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD); + requeue->cq_id = cpu_to_le16(cq_id); + requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE; + return 0; +} + +int +sli_fc_process_link_attention(struct sli4 *sli4, void *acqe) +{ + struct sli4_link_attention *link_attn = acqe; + struct sli4_link_event event = { 0 }; + + efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n", + link_attn->link_number, link_attn->attn_type, + link_attn->topology, link_attn->port_speed, + link_attn->port_fault); + efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n", + link_attn->shared_link_status, + le16_to_cpu(link_attn->logical_link_speed), + le32_to_cpu(link_attn->event_tag)); + + if (!sli4->link) + return -EIO; + + event.medium = SLI4_LINK_MEDIUM_FC; + + switch (link_attn->attn_type) { + case SLI4_LNK_ATTN_TYPE_LINK_UP: + event.status = SLI4_LINK_STATUS_UP; + break; + case SLI4_LNK_ATTN_TYPE_LINK_DOWN: + event.status = SLI4_LINK_STATUS_DOWN; + break; + case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA: + efc_log_info(sli4, "attn_type: no hard alpa\n"); + event.status = SLI4_LINK_STATUS_NO_ALPA; + break; + default: + efc_log_info(sli4, "attn_type: unknown\n"); + break; + } + + switch (link_attn->event_type) { + case SLI4_EVENT_LINK_ATTENTION: + break; + case SLI4_EVENT_SHARED_LINK_ATTENTION: + efc_log_info(sli4, "event_type: FC shared link event\n"); + break; + default: + efc_log_info(sli4, "event_type: unknown\n"); + break; + } + + switch (link_attn->topology) { + case SLI4_LNK_ATTN_P2P: + event.topology = SLI4_LINK_TOPO_NON_FC_AL; + break; + case SLI4_LNK_ATTN_FC_AL: + event.topology = SLI4_LINK_TOPO_FC_AL; + break; + case SLI4_LNK_ATTN_INTERNAL_LOOPBACK: + efc_log_info(sli4, "topology Internal loopback\n"); + event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL; + break; + case SLI4_LNK_ATTN_SERDES_LOOPBACK: + efc_log_info(sli4, "topology serdes loopback\n"); + event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL; + break; + default: + efc_log_info(sli4, "topology: unknown\n"); + break; + } + + event.speed = link_attn->port_speed * 1000; + + sli4->link(sli4->link_arg, (void *)&event); + + return 0; +} + +int +sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq, + u8 *cqe, enum sli4_qentry *etype, u16 *r_id) +{ + u8 code = cqe[SLI4_CQE_CODE_OFFSET]; + int rc; + + switch (code) { + case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION: + { + struct sli4_fc_wcqe *wcqe = (void *)cqe; + + *etype = SLI4_QENTRY_WQ; + *r_id = le16_to_cpu(wcqe->request_tag); + rc = wcqe->status; + + /* Flag errors except for FCP_RSP_FAILURE */ + if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) { + efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n", + wcqe->status, wcqe->hw_status, + le16_to_cpu(wcqe->request_tag)); + efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n", + le32_to_cpu(wcqe->wqe_specific_1), + le32_to_cpu(wcqe->wqe_specific_2), + (wcqe->flags & SLI4_WCQE_XB)); + efc_log_info(sli4, " %08X %08X %08X %08X\n", + ((u32 *)cqe)[0], ((u32 *)cqe)[1], + ((u32 *)cqe)[2], ((u32 *)cqe)[3]); + } + + break; + } + case SLI4_CQE_CODE_RQ_ASYNC: + { + struct sli4_fc_async_rcqe *rcqe = (void *)cqe; + + *etype = SLI4_QENTRY_RQ; + *r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID; + rc = rcqe->status; + break; + } + case SLI4_CQE_CODE_RQ_ASYNC_V1: + { + struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe; + + *etype = SLI4_QENTRY_RQ; + *r_id = le16_to_cpu(rcqe->rq_id); + rc = rcqe->status; + break; + } + case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: + { + struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe; + + *etype = SLI4_QENTRY_OPT_WRITE_CMD; + *r_id = le16_to_cpu(optcqe->rq_id); + rc = optcqe->status; + break; + } + case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA: + { + struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe; + + *etype = SLI4_QENTRY_OPT_WRITE_DATA; + *r_id = le16_to_cpu(dcqe->xri); + rc = dcqe->status; + + /* Flag errors */ + if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) { + efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n", + dcqe->status); + efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n", + dcqe->hw_status, le16_to_cpu(dcqe->xri), + le32_to_cpu(dcqe->total_data_placed), + ((u32 *)cqe)[3], + (dcqe->flags & SLI4_OCQE_XB)); + } + break; + } + case SLI4_CQE_CODE_RQ_COALESCING: + { + struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe; + + *etype = SLI4_QENTRY_RQ; + *r_id = le16_to_cpu(rcqe->rq_id); + rc = rcqe->status; + break; + } + case SLI4_CQE_CODE_XRI_ABORTED: + { + struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe; + + *etype = SLI4_QENTRY_XABT; + *r_id = le16_to_cpu(xa->xri); + rc = 0; + break; + } + case SLI4_CQE_CODE_RELEASE_WQE: + { + struct sli4_fc_wqec *wqec = (void *)cqe; + + *etype = SLI4_QENTRY_WQ_RELEASE; + *r_id = le16_to_cpu(wqec->wq_id); + rc = 0; + break; + } + default: + efc_log_info(sli4, "CQE completion code %d not handled\n", + code); + *etype = SLI4_QENTRY_MAX; + *r_id = U16_MAX; + rc = -EINVAL; + } + + return rc; +} + +u32 +sli_fc_response_length(struct sli4 *sli4, u8 *cqe) +{ + struct sli4_fc_wcqe *wcqe = (void *)cqe; + + return le32_to_cpu(wcqe->wqe_specific_1); +} + +u32 +sli_fc_io_length(struct sli4 *sli4, u8 *cqe) +{ + struct sli4_fc_wcqe *wcqe = (void *)cqe; + + return le32_to_cpu(wcqe->wqe_specific_1); +} + +int +sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id) +{ + struct sli4_fc_wcqe *wcqe = (void *)cqe; + + *d_id = 0; + + if (wcqe->status) + return -EIO; + *d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff; + return 0; +} + +u32 +sli_fc_ext_status(struct sli4 *sli4, u8 *cqe) +{ + struct sli4_fc_wcqe *wcqe = (void *)cqe; + u32 mask; + + switch (wcqe->status) { + case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: + mask = U32_MAX; + break; + case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: + case SLI4_FC_WCQE_STATUS_CMD_REJECT: + mask = 0xff; + break; + case SLI4_FC_WCQE_STATUS_NPORT_RJT: + case SLI4_FC_WCQE_STATUS_FABRIC_RJT: + case SLI4_FC_WCQE_STATUS_NPORT_BSY: + case SLI4_FC_WCQE_STATUS_FABRIC_BSY: + case SLI4_FC_WCQE_STATUS_LS_RJT: + mask = U32_MAX; + break; + case SLI4_FC_WCQE_STATUS_DI_ERROR: + mask = U32_MAX; + break; + default: + mask = 0; + } + + return le32_to_cpu(wcqe->wqe_specific_2) & mask; +} + +int +sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index) +{ + int rc = -EIO; + u8 code = 0; + u16 rq_element_index; + + *rq_id = 0; + *index = U32_MAX; + + code = cqe[SLI4_CQE_CODE_OFFSET]; + + /* Retrieve the RQ index from the completion */ + if (code == SLI4_CQE_CODE_RQ_ASYNC) { + struct sli4_fc_async_rcqe *rcqe = (void *)cqe; + + *rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID; + rq_element_index = + le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX; + *index = rq_element_index; + if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) { + rc = 0; + } else { + rc = rcqe->status; + efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n", + rcqe->status, + sli_fc_get_status_string(rcqe->status), + le16_to_cpu(rcqe->fcfi_rq_id_word) & + SLI4_RACQE_RQ_ID); + + efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n", + le16_to_cpu(rcqe->data_placement_length), + rcqe->sof_byte, rcqe->eof_byte, + rcqe->hdpl_byte & SLI4_RACQE_HDPL); + } + } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) { + struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe; + + *rq_id = le16_to_cpu(rcqe_v1->rq_id); + rq_element_index = + (le16_to_cpu(rcqe_v1->rq_elmt_indx_word) & + SLI4_RACQE_RQ_EL_INDX); + *index = rq_element_index; + if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) { + rc = 0; + } else { + rc = rcqe_v1->status; + efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n", + rcqe_v1->status, + sli_fc_get_status_string(rcqe_v1->status), + le16_to_cpu(rcqe_v1->rq_id), rq_element_index); + + efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n", + le16_to_cpu(rcqe_v1->data_placement_length), + rcqe_v1->sof_byte, rcqe_v1->eof_byte, + rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL); + } + } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) { + struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe; + + *rq_id = le16_to_cpu(optcqe->rq_id); + *index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX; + if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) { + rc = 0; + } else { + rc = optcqe->status; + efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n", + optcqe->status, + sli_fc_get_status_string(optcqe->status), + le16_to_cpu(optcqe->rq_id), *index, + le16_to_cpu(optcqe->data_placement_length)); + + efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n", + (optcqe->hdpl_vld & SLI4_OCQE_HDPL), + (optcqe->flags1 & SLI4_OCQE_OOX), + (optcqe->flags1 & SLI4_OCQE_AGXR), + optcqe->xri, le16_to_cpu(optcqe->rpi)); + } + } else if (code == SLI4_CQE_CODE_RQ_COALESCING) { + struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe; + + rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) & + SLI4_RCQE_RQ_EL_INDX); + + *rq_id = le16_to_cpu(rcqe->rq_id); + if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) { + *index = rq_element_index; + rc = 0; + } else { + *index = U32_MAX; + rc = rcqe->status; + + efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n", + rcqe->status, + sli_fc_get_status_string(rcqe->status), + le16_to_cpu(rcqe->rq_id), rq_element_index); + efc_log_info(sli4, "rq_id=%#x sdpl=%x\n", + le16_to_cpu(rcqe->rq_id), + le16_to_cpu(rcqe->seq_placement_length)); + } + } else { + struct sli4_fc_async_rcqe *rcqe = (void *)cqe; + + *index = U32_MAX; + rc = rcqe->status; + + efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n", + rcqe->status, + le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID, + (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX), + le16_to_cpu(rcqe->data_placement_length)); + efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n", + rcqe->sof_byte, rcqe->eof_byte, + rcqe->hdpl_byte & SLI4_RACQE_HDPL); + } + + return rc; +} + +static int +sli_bmbx_wait(struct sli4 *sli4, u32 msec) +{ + u32 val; + unsigned long end; + + /* Wait for the bootstrap mailbox to report "ready" */ + end = jiffies + msecs_to_jiffies(msec); + do { + val = readl(sli4->reg[0] + SLI4_BMBX_REG); + if (val & SLI4_BMBX_RDY) + return 0; + + usleep_range(1000, 2000); + } while (time_before(jiffies, end)); + + return -EIO; +} + +static int +sli_bmbx_write(struct sli4 *sli4) +{ + u32 val; + + /* write buffer location to bootstrap mailbox register */ + val = sli_bmbx_write_hi(sli4->bmbx.phys); + writel(val, (sli4->reg[0] + SLI4_BMBX_REG)); + + if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { + efc_log_crit(sli4, "BMBX WRITE_HI failed\n"); + return -EIO; + } + val = sli_bmbx_write_lo(sli4->bmbx.phys); + writel(val, (sli4->reg[0] + SLI4_BMBX_REG)); + + /* wait for SLI Port to set ready bit */ + return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC); +} + +int +sli_bmbx_command(struct sli4 *sli4) +{ + void *cqe = (u8 *)sli4->bmbx.virt + SLI4_BMBX_SIZE; + + if (sli_fw_error_status(sli4) > 0) { + efc_log_crit(sli4, "Chip is in an error state -Mailbox command rejected"); + efc_log_crit(sli4, " status=%#x error1=%#x error2=%#x\n", + sli_reg_read_status(sli4), + sli_reg_read_err1(sli4), + sli_reg_read_err2(sli4)); + return -EIO; + } + + /* Submit a command to the bootstrap mailbox and check the status */ + if (sli_bmbx_write(sli4)) { + efc_log_crit(sli4, "bmbx write fail phys=%pad reg=%#x\n", + &sli4->bmbx.phys, readl(sli4->reg[0] + SLI4_BMBX_REG)); + return -EIO; + } + + /* check completion queue entry status */ + if (le32_to_cpu(((struct sli4_mcqe *)cqe)->dw3_flags) & + SLI4_MCQE_VALID) { + return sli_cqe_mq(sli4, cqe); + } + efc_log_crit(sli4, "invalid or wrong type\n"); + return -EIO; +} + +int +sli_cmd_config_link(struct sli4 *sli4, void *buf) +{ + struct sli4_cmd_config_link *config_link = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + config_link->hdr.command = SLI4_MBX_CMD_CONFIG_LINK; + + /* Port interprets zero in a field as "use default value" */ + + return 0; +} + +int +sli_cmd_down_link(struct sli4 *sli4, void *buf) +{ + struct sli4_mbox_command_header *hdr = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + hdr->command = SLI4_MBX_CMD_DOWN_LINK; + + /* Port interprets zero in a field as "use default value" */ + + return 0; +} + +int +sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki) +{ + struct sli4_cmd_dump4 *cmd = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + cmd->hdr.command = SLI4_MBX_CMD_DUMP; + cmd->type_dword = cpu_to_le32(0x4); + cmd->wki_selection = cpu_to_le16(wki); + return 0; +} + +int +sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, u32 page_num, + struct efc_dma *dma) +{ + struct sli4_rqst_cmn_read_transceiver_data *req = NULL; + u32 psize; + + if (!dma) + psize = SLI4_CFG_PYLD_LENGTH(cmn_read_transceiver_data); + else + psize = dma->size; + + req = sli_config_cmd_init(sli4, buf, psize, dma); + if (!req) + return -EIO; + + sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_READ_TRANS_DATA, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN(cmn_read_transceiver_data)); + + req->page_number = cpu_to_le32(page_num); + req->port = cpu_to_le32(sli4->port_number); + + return 0; +} + +int +sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_ext_counters, + u8 clear_overflow_flags, + u8 clear_all_counters) +{ + struct sli4_cmd_read_link_stats *cmd = buf; + u32 flags; + + memset(buf, 0, SLI4_BMBX_SIZE); + + cmd->hdr.command = SLI4_MBX_CMD_READ_LNK_STAT; + + flags = 0; + if (req_ext_counters) + flags |= SLI4_READ_LNKSTAT_REC; + if (clear_all_counters) + flags |= SLI4_READ_LNKSTAT_CLRC; + if (clear_overflow_flags) + flags |= SLI4_READ_LNKSTAT_CLOF; + + cmd->dw1_flags = cpu_to_le32(flags); + return 0; +} + +int +sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear_counters) +{ + struct sli4_cmd_read_status *cmd = buf; + u32 flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + cmd->hdr.command = SLI4_MBX_CMD_READ_STATUS; + if (clear_counters) + flags |= SLI4_READSTATUS_CLEAR_COUNTERS; + else + flags &= ~SLI4_READSTATUS_CLEAR_COUNTERS; + + cmd->dw1_flags = cpu_to_le32(flags); + return 0; +} + +int +sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, u8 reset_alpa) +{ + struct sli4_cmd_init_link *init_link = buf; + u32 flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + init_link->hdr.command = SLI4_MBX_CMD_INIT_LINK; + + init_link->sel_reset_al_pa_dword = + cpu_to_le32(reset_alpa); + flags &= ~SLI4_INIT_LINK_F_LOOPBACK; + + init_link->link_speed_sel_code = cpu_to_le32(speed); + switch (speed) { + case SLI4_LINK_SPEED_1G: + case SLI4_LINK_SPEED_2G: + case SLI4_LINK_SPEED_4G: + case SLI4_LINK_SPEED_8G: + case SLI4_LINK_SPEED_16G: + case SLI4_LINK_SPEED_32G: + case SLI4_LINK_SPEED_64G: + flags |= SLI4_INIT_LINK_F_FIXED_SPEED; + break; + case SLI4_LINK_SPEED_10G: + efc_log_info(sli4, "unsupported FC speed %d\n", speed); + init_link->flags0 = cpu_to_le32(flags); + return -EIO; + } + + switch (sli4->topology) { + case SLI4_READ_CFG_TOPO_FC: + /* Attempt P2P but failover to FC-AL */ + flags |= SLI4_INIT_LINK_F_FAIL_OVER; + flags |= SLI4_INIT_LINK_F_P2P_FAIL_OVER; + break; + case SLI4_READ_CFG_TOPO_FC_AL: + flags |= SLI4_INIT_LINK_F_FCAL_ONLY; + if (speed == SLI4_LINK_SPEED_16G || + speed == SLI4_LINK_SPEED_32G) { + efc_log_info(sli4, "unsupported FC-AL speed %d\n", + speed); + init_link->flags0 = cpu_to_le32(flags); + return -EIO; + } + break; + case SLI4_READ_CFG_TOPO_NON_FC_AL: + flags |= SLI4_INIT_LINK_F_P2P_ONLY; + break; + default: + + efc_log_info(sli4, "unsupported topology %#x\n", sli4->topology); + + init_link->flags0 = cpu_to_le32(flags); + return -EIO; + } + + flags &= ~SLI4_INIT_LINK_F_UNFAIR; + flags &= ~SLI4_INIT_LINK_F_NO_LIRP; + flags &= ~SLI4_INIT_LINK_F_LOOP_VALID_CHK; + flags &= ~SLI4_INIT_LINK_F_NO_LISA; + flags &= ~SLI4_INIT_LINK_F_PICK_HI_ALPA; + init_link->flags0 = cpu_to_le32(flags); + + return 0; +} + +int +sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, u16 vpi) +{ + struct sli4_cmd_init_vfi *init_vfi = buf; + u16 flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + init_vfi->hdr.command = SLI4_MBX_CMD_INIT_VFI; + init_vfi->vfi = cpu_to_le16(vfi); + init_vfi->fcfi = cpu_to_le16(fcfi); + + /* + * If the VPI is valid, initialize it at the same time as + * the VFI + */ + if (vpi != U16_MAX) { + flags |= SLI4_INIT_VFI_FLAG_VP; + init_vfi->flags0_word = cpu_to_le16(flags); + init_vfi->vpi = cpu_to_le16(vpi); + } + + return 0; +} + +int +sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi) +{ + struct sli4_cmd_init_vpi *init_vpi = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + init_vpi->hdr.command = SLI4_MBX_CMD_INIT_VPI; + init_vpi->vpi = cpu_to_le16(vpi); + init_vpi->vfi = cpu_to_le16(vfi); + + return 0; +} + +int +sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 xri_base, u16 xri_count) +{ + struct sli4_cmd_post_xri *post_xri = buf; + u16 xri_count_flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + post_xri->hdr.command = SLI4_MBX_CMD_POST_XRI; + post_xri->xri_base = cpu_to_le16(xri_base); + xri_count_flags = xri_count & SLI4_POST_XRI_COUNT; + xri_count_flags |= SLI4_POST_XRI_FLAG_ENX; + xri_count_flags |= SLI4_POST_XRI_FLAG_VAL; + post_xri->xri_count_flags = cpu_to_le16(xri_count_flags); + + return 0; +} + +int +sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri) +{ + struct sli4_cmd_release_xri *release_xri = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + release_xri->hdr.command = SLI4_MBX_CMD_RELEASE_XRI; + release_xri->xri_count_word = cpu_to_le16(num_xri & + SLI4_RELEASE_XRI_COUNT); + + return 0; +} + +static int +sli_cmd_read_config(struct sli4 *sli4, void *buf) +{ + struct sli4_cmd_read_config *read_config = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + read_config->hdr.command = SLI4_MBX_CMD_READ_CONFIG; + + return 0; +} + +int +sli_cmd_read_nvparms(struct sli4 *sli4, void *buf) +{ + struct sli4_cmd_read_nvparms *read_nvparms = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + read_nvparms->hdr.command = SLI4_MBX_CMD_READ_NVPARMS; + + return 0; +} + +int +sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, u8 *wwnn, + u8 hard_alpa, u32 preferred_d_id) +{ + struct sli4_cmd_write_nvparms *write_nvparms = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + write_nvparms->hdr.command = SLI4_MBX_CMD_WRITE_NVPARMS; + memcpy(write_nvparms->wwpn, wwpn, 8); + memcpy(write_nvparms->wwnn, wwnn, 8); + + write_nvparms->hard_alpa_d_id = + cpu_to_le32((preferred_d_id << 8) | hard_alpa); + return 0; +} + +static int +sli_cmd_read_rev(struct sli4 *sli4, void *buf, struct efc_dma *vpd) +{ + struct sli4_cmd_read_rev *read_rev = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + read_rev->hdr.command = SLI4_MBX_CMD_READ_REV; + + if (vpd && vpd->size) { + read_rev->flags0_word |= cpu_to_le16(SLI4_READ_REV_FLAG_VPD); + + read_rev->available_length_dword = + cpu_to_le32(vpd->size & + SLI4_READ_REV_AVAILABLE_LENGTH); + + read_rev->hostbuf.low = + cpu_to_le32(lower_32_bits(vpd->phys)); + read_rev->hostbuf.high = + cpu_to_le32(upper_32_bits(vpd->phys)); + } + + return 0; +} + +int +sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi) +{ + struct sli4_cmd_read_sparm64 *read_sparm64 = buf; + + if (vpi == U16_MAX) { + efc_log_err(sli4, "special VPI not supported!!!\n"); + return -EIO; + } + + if (!dma || !dma->phys) { + efc_log_err(sli4, "bad DMA buffer\n"); + return -EIO; + } + + memset(buf, 0, SLI4_BMBX_SIZE); + + read_sparm64->hdr.command = SLI4_MBX_CMD_READ_SPARM64; + + read_sparm64->bde_64.bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (dma->size & SLI4_BDE_LEN_MASK)); + read_sparm64->bde_64.u.data.low = + cpu_to_le32(lower_32_bits(dma->phys)); + read_sparm64->bde_64.u.data.high = + cpu_to_le32(upper_32_bits(dma->phys)); + + read_sparm64->vpi = cpu_to_le16(vpi); + + return 0; +} + +int +sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma) +{ + struct sli4_cmd_read_topology *read_topo = buf; + + if (!dma || !dma->size) + return -EIO; + + if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) { + efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size); + return -EIO; + } + + memset(buf, 0, SLI4_BMBX_SIZE); + + read_topo->hdr.command = SLI4_MBX_CMD_READ_TOPOLOGY; + + memset(dma->virt, 0, dma->size); + + read_topo->bde_loop_map.bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (dma->size & SLI4_BDE_LEN_MASK)); + read_topo->bde_loop_map.u.data.low = + cpu_to_le32(lower_32_bits(dma->phys)); + read_topo->bde_loop_map.u.data.high = + cpu_to_le32(upper_32_bits(dma->phys)); + + return 0; +} + +int +sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index, + struct sli4_cmd_rq_cfg *rq_cfg) +{ + struct sli4_cmd_reg_fcfi *reg_fcfi = buf; + u32 i; + + memset(buf, 0, SLI4_BMBX_SIZE); + + reg_fcfi->hdr.command = SLI4_MBX_CMD_REG_FCFI; + + reg_fcfi->fcf_index = cpu_to_le16(index); + + for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { + switch (i) { + case 0: + reg_fcfi->rqid0 = rq_cfg[0].rq_id; + break; + case 1: + reg_fcfi->rqid1 = rq_cfg[1].rq_id; + break; + case 2: + reg_fcfi->rqid2 = rq_cfg[2].rq_id; + break; + case 3: + reg_fcfi->rqid3 = rq_cfg[3].rq_id; + break; + } + reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; + reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; + reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask; + reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match; + } + + return 0; +} + +int +sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 fcf_index, + u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs, + struct sli4_cmd_rq_cfg *rq_cfg) +{ + struct sli4_cmd_reg_fcfi_mrq *reg_fcfi_mrq = buf; + u32 i; + u32 mrq_flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + reg_fcfi_mrq->hdr.command = SLI4_MBX_CMD_REG_FCFI_MRQ; + if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { + reg_fcfi_mrq->fcf_index = cpu_to_le16(fcf_index); + goto done; + } + + reg_fcfi_mrq->dw8_vlan = cpu_to_le32(SLI4_REGFCFI_MRQ_MODE); + + for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { + reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; + reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; + reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask; + reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match; + + switch (i) { + case 3: + reg_fcfi_mrq->rqid3 = rq_cfg[i].rq_id; + break; + case 2: + reg_fcfi_mrq->rqid2 = rq_cfg[i].rq_id; + break; + case 1: + reg_fcfi_mrq->rqid1 = rq_cfg[i].rq_id; + break; + case 0: + reg_fcfi_mrq->rqid0 = rq_cfg[i].rq_id; + break; + } + } + + mrq_flags = num_mrqs & SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS; + mrq_flags |= (mrq_bit_mask << 8); + mrq_flags |= (rq_selection_policy << 12); + reg_fcfi_mrq->dw9_mrqflags = cpu_to_le32(mrq_flags); +done: + return 0; +} + +int +sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id, + struct efc_dma *dma, u8 update, u8 enable_t10_pi) +{ + struct sli4_cmd_reg_rpi *reg_rpi = buf; + u32 rportid_flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + reg_rpi->hdr.command = SLI4_MBX_CMD_REG_RPI; + + reg_rpi->rpi = cpu_to_le16(rpi); + + rportid_flags = fc_id & SLI4_REGRPI_REMOTE_N_PORTID; + + if (update) + rportid_flags |= SLI4_REGRPI_UPD; + else + rportid_flags &= ~SLI4_REGRPI_UPD; + + if (enable_t10_pi) + rportid_flags |= SLI4_REGRPI_ETOW; + else + rportid_flags &= ~SLI4_REGRPI_ETOW; + + reg_rpi->dw2_rportid_flags = cpu_to_le32(rportid_flags); + + reg_rpi->bde_64.bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK)); + reg_rpi->bde_64.u.data.low = + cpu_to_le32(lower_32_bits(dma->phys)); + reg_rpi->bde_64.u.data.high = + cpu_to_le32(upper_32_bits(dma->phys)); + + reg_rpi->vpi = cpu_to_le16(vpi); + + return 0; +} + +int +sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size, + u16 vfi, u16 fcfi, struct efc_dma dma, + u16 vpi, __be64 sli_wwpn, u32 fc_id) +{ + struct sli4_cmd_reg_vfi *reg_vfi = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + reg_vfi->hdr.command = SLI4_MBX_CMD_REG_VFI; + + reg_vfi->vfi = cpu_to_le16(vfi); + + reg_vfi->fcfi = cpu_to_le16(fcfi); + + reg_vfi->sparm.bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK)); + reg_vfi->sparm.u.data.low = + cpu_to_le32(lower_32_bits(dma.phys)); + reg_vfi->sparm.u.data.high = + cpu_to_le32(upper_32_bits(dma.phys)); + + reg_vfi->e_d_tov = cpu_to_le32(sli4->e_d_tov); + reg_vfi->r_a_tov = cpu_to_le32(sli4->r_a_tov); + + reg_vfi->dw0w1_flags |= cpu_to_le16(SLI4_REGVFI_VP); + reg_vfi->vpi = cpu_to_le16(vpi); + memcpy(reg_vfi->wwpn, &sli_wwpn, sizeof(reg_vfi->wwpn)); + reg_vfi->dw10_lportid_flags = cpu_to_le32(fc_id); + + return 0; +} + +int +sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, __be64 sli_wwpn, + u16 vpi, u16 vfi, bool update) +{ + struct sli4_cmd_reg_vpi *reg_vpi = buf; + u32 flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + reg_vpi->hdr.command = SLI4_MBX_CMD_REG_VPI; + + flags = (fc_id & SLI4_REGVPI_LOCAL_N_PORTID); + if (update) + flags |= SLI4_REGVPI_UPD; + else + flags &= ~SLI4_REGVPI_UPD; + + reg_vpi->dw2_lportid_flags = cpu_to_le32(flags); + memcpy(reg_vpi->wwpn, &sli_wwpn, sizeof(reg_vpi->wwpn)); + reg_vpi->vpi = cpu_to_le16(vpi); + reg_vpi->vfi = cpu_to_le16(vfi); + + return 0; +} + +static int +sli_cmd_request_features(struct sli4 *sli4, void *buf, u32 features_mask, + bool query) +{ + struct sli4_cmd_request_features *req_features = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + req_features->hdr.command = SLI4_MBX_CMD_RQST_FEATURES; + + if (query) + req_features->dw1_qry = cpu_to_le32(SLI4_REQFEAT_QRY); + + req_features->cmd = cpu_to_le32(features_mask); + + return 0; +} + +int +sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator) +{ + struct sli4_cmd_unreg_fcfi *unreg_fcfi = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + unreg_fcfi->hdr.command = SLI4_MBX_CMD_UNREG_FCFI; + unreg_fcfi->fcfi = cpu_to_le16(indicator); + + return 0; +} + +int +sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator, + enum sli4_resource which, u32 fc_id) +{ + struct sli4_cmd_unreg_rpi *unreg_rpi = buf; + u32 flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + unreg_rpi->hdr.command = SLI4_MBX_CMD_UNREG_RPI; + switch (which) { + case SLI4_RSRC_RPI: + flags |= SLI4_UNREG_RPI_II_RPI; + if (fc_id == U32_MAX) + break; + + flags |= SLI4_UNREG_RPI_DP; + unreg_rpi->dw2_dest_n_portid = + cpu_to_le32(fc_id & SLI4_UNREG_RPI_DEST_N_PORTID_MASK); + break; + case SLI4_RSRC_VPI: + flags |= SLI4_UNREG_RPI_II_VPI; + break; + case SLI4_RSRC_VFI: + flags |= SLI4_UNREG_RPI_II_VFI; + break; + case SLI4_RSRC_FCFI: + flags |= SLI4_UNREG_RPI_II_FCFI; + break; + default: + efc_log_info(sli4, "unknown type %#x\n", which); + return -EIO; + } + + unreg_rpi->dw1w1_flags = cpu_to_le16(flags); + unreg_rpi->index = cpu_to_le16(indicator); + + return 0; +} + +int +sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 index, u32 which) +{ + struct sli4_cmd_unreg_vfi *unreg_vfi = buf; + + memset(buf, 0, SLI4_BMBX_SIZE); + + unreg_vfi->hdr.command = SLI4_MBX_CMD_UNREG_VFI; + switch (which) { + case SLI4_UNREG_TYPE_DOMAIN: + unreg_vfi->index = cpu_to_le16(index); + break; + case SLI4_UNREG_TYPE_FCF: + unreg_vfi->index = cpu_to_le16(index); + break; + case SLI4_UNREG_TYPE_ALL: + unreg_vfi->index = cpu_to_le16(U32_MAX); + break; + default: + return -EIO; + } + + if (which != SLI4_UNREG_TYPE_DOMAIN) + unreg_vfi->dw2_flags = cpu_to_le16(SLI4_UNREG_VFI_II_FCFI); + + return 0; +} + +int +sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 indicator, u32 which) +{ + struct sli4_cmd_unreg_vpi *unreg_vpi = buf; + u32 flags = 0; + + memset(buf, 0, SLI4_BMBX_SIZE); + + unreg_vpi->hdr.command = SLI4_MBX_CMD_UNREG_VPI; + unreg_vpi->index = cpu_to_le16(indicator); + switch (which) { + case SLI4_UNREG_TYPE_PORT: + flags |= SLI4_UNREG_VPI_II_VPI; + break; + case SLI4_UNREG_TYPE_DOMAIN: + flags |= SLI4_UNREG_VPI_II_VFI; + break; + case SLI4_UNREG_TYPE_FCF: + flags |= SLI4_UNREG_VPI_II_FCFI; + break; + case SLI4_UNREG_TYPE_ALL: + /* override indicator */ + unreg_vpi->index = cpu_to_le16(U32_MAX); + flags |= SLI4_UNREG_VPI_II_FCFI; + break; + default: + return -EIO; + } + + unreg_vpi->dw2w0_flags = cpu_to_le16(flags); + return 0; +} + +static int +sli_cmd_common_modify_eq_delay(struct sli4 *sli4, void *buf, + struct sli4_queue *q, int num_q, u32 shift, + u32 delay_mult) +{ + struct sli4_rqst_cmn_modify_eq_delay *req = NULL; + int i; + + req = sli_config_cmd_init(sli4, buf, + SLI4_CFG_PYLD_LENGTH(cmn_modify_eq_delay), NULL); + if (!req) + return -EIO; + + sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_MODIFY_EQ_DELAY, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN(cmn_modify_eq_delay)); + req->num_eq = cpu_to_le32(num_q); + + for (i = 0; i < num_q; i++) { + req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id); + req->eq_delay_record[i].phase = cpu_to_le32(shift); + req->eq_delay_record[i].delay_multiplier = + cpu_to_le32(delay_mult); + } + + return 0; +} + +void +sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf, + size_t size, u16 timeout) +{ + struct sli4_rqst_lowlevel_set_watchdog *req = NULL; + + req = sli_config_cmd_init(sli4, buf, + SLI4_CFG_PYLD_LENGTH(lowlevel_set_watchdog), NULL); + if (!req) + return; + + sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_LOWLEVEL_SET_WATCHDOG, + SLI4_SUBSYSTEM_LOWLEVEL, CMD_V0, + SLI4_RQST_PYLD_LEN(lowlevel_set_watchdog)); + req->watchdog_timeout = cpu_to_le16(timeout); +} + +static int +sli_cmd_common_get_cntl_attributes(struct sli4 *sli4, void *buf, + struct efc_dma *dma) +{ + struct sli4_rqst_hdr *hdr = NULL; + + hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma); + if (!hdr) + return -EIO; + + hdr->opcode = SLI4_CMN_GET_CNTL_ATTRIBUTES; + hdr->subsystem = SLI4_SUBSYSTEM_COMMON; + hdr->request_length = cpu_to_le32(dma->size); + + return 0; +} + +static int +sli_cmd_common_get_cntl_addl_attributes(struct sli4 *sli4, void *buf, + struct efc_dma *dma) +{ + struct sli4_rqst_hdr *hdr = NULL; + + hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma); + if (!hdr) + return -EIO; + + hdr->opcode = SLI4_CMN_GET_CNTL_ADDL_ATTRS; + hdr->subsystem = SLI4_SUBSYSTEM_COMMON; + hdr->request_length = cpu_to_le32(dma->size); + + return 0; +} + +int +sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context) +{ + struct sli4_rqst_cmn_nop *nop = NULL; + + nop = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_nop), + NULL); + if (!nop) + return -EIO; + + sli_cmd_fill_hdr(&nop->hdr, SLI4_CMN_NOP, SLI4_SUBSYSTEM_COMMON, + CMD_V0, SLI4_RQST_PYLD_LEN(cmn_nop)); + + memcpy(&nop->context, &context, sizeof(context)); + + return 0; +} + +int +sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, u16 rtype) +{ + struct sli4_rqst_cmn_get_resource_extent_info *ext = NULL; + + ext = sli_config_cmd_init(sli4, buf, + SLI4_RQST_CMDSZ(cmn_get_resource_extent_info), NULL); + if (!ext) + return -EIO; + + sli_cmd_fill_hdr(&ext->hdr, SLI4_CMN_GET_RSC_EXTENT_INFO, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN(cmn_get_resource_extent_info)); + + ext->resource_type = cpu_to_le16(rtype); + + return 0; +} + +int +sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf) +{ + struct sli4_rqst_hdr *hdr = NULL; + + hdr = sli_config_cmd_init(sli4, buf, + SLI4_CFG_PYLD_LENGTH(cmn_get_sli4_params), NULL); + if (!hdr) + return -EIO; + + hdr->opcode = SLI4_CMN_GET_SLI4_PARAMS; + hdr->subsystem = SLI4_SUBSYSTEM_COMMON; + hdr->request_length = SLI4_RQST_PYLD_LEN(cmn_get_sli4_params); + + return 0; +} + +static int +sli_cmd_common_get_port_name(struct sli4 *sli4, void *buf) +{ + struct sli4_rqst_cmn_get_port_name *pname; + + pname = sli_config_cmd_init(sli4, buf, + SLI4_CFG_PYLD_LENGTH(cmn_get_port_name), NULL); + if (!pname) + return -EIO; + + sli_cmd_fill_hdr(&pname->hdr, SLI4_CMN_GET_PORT_NAME, + SLI4_SUBSYSTEM_COMMON, CMD_V1, + SLI4_RQST_PYLD_LEN(cmn_get_port_name)); + + /* Set the port type value (ethernet=0, FC=1) for V1 commands */ + pname->port_type = SLI4_PORT_TYPE_FC; + + return 0; +} + +int +sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc, + u16 eof, u32 desired_write_length, + u32 offset, char *obj_name, + struct efc_dma *dma) +{ + struct sli4_rqst_cmn_write_object *wr_obj = NULL; + struct sli4_bde *bde; + u32 dwflags = 0; + + wr_obj = sli_config_cmd_init(sli4, buf, + SLI4_RQST_CMDSZ(cmn_write_object) + sizeof(*bde), NULL); + if (!wr_obj) + return -EIO; + + sli_cmd_fill_hdr(&wr_obj->hdr, SLI4_CMN_WRITE_OBJECT, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN_VAR(cmn_write_object, sizeof(*bde))); + + if (noc) + dwflags |= SLI4_RQ_DES_WRITE_LEN_NOC; + if (eof) + dwflags |= SLI4_RQ_DES_WRITE_LEN_EOF; + dwflags |= (desired_write_length & SLI4_RQ_DES_WRITE_LEN); + + wr_obj->desired_write_len_dword = cpu_to_le32(dwflags); + + wr_obj->write_offset = cpu_to_le32(offset); + strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1); + wr_obj->host_buffer_descriptor_count = cpu_to_le32(1); + + bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor; + + /* Setup to transfer xfer_size bytes to device */ + bde->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (desired_write_length & SLI4_BDE_LEN_MASK)); + bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys)); + bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys)); + + return 0; +} + +int +sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name) +{ + struct sli4_rqst_cmn_delete_object *req = NULL; + + req = sli_config_cmd_init(sli4, buf, + SLI4_RQST_CMDSZ(cmn_delete_object), NULL); + if (!req) + return -EIO; + + sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_DELETE_OBJECT, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN(cmn_delete_object)); + + strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1); + return 0; +} + +int +sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len, + u32 offset, char *obj_name, struct efc_dma *dma) +{ + struct sli4_rqst_cmn_read_object *rd_obj = NULL; + struct sli4_bde *bde; + + rd_obj = sli_config_cmd_init(sli4, buf, + SLI4_RQST_CMDSZ(cmn_read_object) + sizeof(*bde), NULL); + if (!rd_obj) + return -EIO; + + sli_cmd_fill_hdr(&rd_obj->hdr, SLI4_CMN_READ_OBJECT, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN_VAR(cmn_read_object, sizeof(*bde))); + rd_obj->desired_read_length_dword = + cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN); + + rd_obj->read_offset = cpu_to_le32(offset); + strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1); + rd_obj->host_buffer_descriptor_count = cpu_to_le32(1); + + bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor; + + /* Setup to transfer xfer_size bytes to device */ + bde->bde_type_buflen = + cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | + (desired_read_len & SLI4_BDE_LEN_MASK)); + if (dma) { + bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys)); + bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys)); + } else { + bde->u.data.low = 0; + bde->u.data.high = 0; + } + + return 0; +} + +int +sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, struct efc_dma *cmd, + struct efc_dma *resp) +{ + struct sli4_rqst_dmtf_exec_clp_cmd *clp_cmd = NULL; + + clp_cmd = sli_config_cmd_init(sli4, buf, + SLI4_RQST_CMDSZ(dmtf_exec_clp_cmd), NULL); + if (!clp_cmd) + return -EIO; + + sli_cmd_fill_hdr(&clp_cmd->hdr, DMTF_EXEC_CLP_CMD, SLI4_SUBSYSTEM_DMTF, + CMD_V0, SLI4_RQST_PYLD_LEN(dmtf_exec_clp_cmd)); + + clp_cmd->cmd_buf_length = cpu_to_le32(cmd->size); + clp_cmd->cmd_buf_addr_low = cpu_to_le32(lower_32_bits(cmd->phys)); + clp_cmd->cmd_buf_addr_high = cpu_to_le32(upper_32_bits(cmd->phys)); + clp_cmd->resp_buf_length = cpu_to_le32(resp->size); + clp_cmd->resp_buf_addr_low = cpu_to_le32(lower_32_bits(resp->phys)); + clp_cmd->resp_buf_addr_high = cpu_to_le32(upper_32_bits(resp->phys)); + return 0; +} + +int +sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, bool query, + bool is_buffer_list, + struct efc_dma *buffer, u8 fdb) +{ + struct sli4_rqst_cmn_set_dump_location *set_dump_loc = NULL; + u32 buffer_length_flag = 0; + + set_dump_loc = sli_config_cmd_init(sli4, buf, + SLI4_RQST_CMDSZ(cmn_set_dump_location), NULL); + if (!set_dump_loc) + return -EIO; + + sli_cmd_fill_hdr(&set_dump_loc->hdr, SLI4_CMN_SET_DUMP_LOCATION, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN(cmn_set_dump_location)); + + if (is_buffer_list) + buffer_length_flag |= SLI4_CMN_SET_DUMP_BLP; + + if (query) + buffer_length_flag |= SLI4_CMN_SET_DUMP_QRY; + + if (fdb) + buffer_length_flag |= SLI4_CMN_SET_DUMP_FDB; + + if (buffer) { + set_dump_loc->buf_addr_low = + cpu_to_le32(lower_32_bits(buffer->phys)); + set_dump_loc->buf_addr_high = + cpu_to_le32(upper_32_bits(buffer->phys)); + + buffer_length_flag |= + buffer->len & SLI4_CMN_SET_DUMP_BUFFER_LEN; + } else { + set_dump_loc->buf_addr_low = 0; + set_dump_loc->buf_addr_high = 0; + set_dump_loc->buffer_length_dword = 0; + } + set_dump_loc->buffer_length_dword = cpu_to_le32(buffer_length_flag); + return 0; +} + +int +sli_cmd_common_set_features(struct sli4 *sli4, void *buf, u32 feature, + u32 param_len, void *parameter) +{ + struct sli4_rqst_cmn_set_features *cmd = NULL; + + cmd = sli_config_cmd_init(sli4, buf, + SLI4_RQST_CMDSZ(cmn_set_features), NULL); + if (!cmd) + return -EIO; + + sli_cmd_fill_hdr(&cmd->hdr, SLI4_CMN_SET_FEATURES, + SLI4_SUBSYSTEM_COMMON, CMD_V0, + SLI4_RQST_PYLD_LEN(cmn_set_features)); + + cmd->feature = cpu_to_le32(feature); + cmd->param_len = cpu_to_le32(param_len); + memcpy(cmd->params, parameter, param_len); + + return 0; +} + +int +sli_cqe_mq(struct sli4 *sli4, void *buf) +{ + struct sli4_mcqe *mcqe = buf; + u32 dwflags = le32_to_cpu(mcqe->dw3_flags); + /* + * Firmware can split mbx completions into two MCQEs: first with only + * the "consumed" bit set and a second with the "complete" bit set. + * Thus, ignore MCQE unless "complete" is set. + */ + if (!(dwflags & SLI4_MCQE_COMPLETED)) + return SLI4_MCQE_STATUS_NOT_COMPLETED; + + if (le16_to_cpu(mcqe->completion_status)) { + efc_log_info(sli4, "status(st=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n", + le16_to_cpu(mcqe->completion_status), + le16_to_cpu(mcqe->extended_status), + (dwflags & SLI4_MCQE_CONSUMED), + (dwflags & SLI4_MCQE_COMPLETED), + (dwflags & SLI4_MCQE_AE), + (dwflags & SLI4_MCQE_VALID)); + } + + return le16_to_cpu(mcqe->completion_status); +} + +int +sli_cqe_async(struct sli4 *sli4, void *buf) +{ + struct sli4_acqe *acqe = buf; + int rc = -EIO; + + if (!buf) { + efc_log_err(sli4, "bad parameter sli4=%p buf=%p\n", sli4, buf); + return -EIO; + } + + switch (acqe->event_code) { + case SLI4_ACQE_EVENT_CODE_LINK_STATE: + efc_log_info(sli4, "Unsupported by FC link, evt code:%#x\n", + acqe->event_code); + break; + case SLI4_ACQE_EVENT_CODE_GRP_5: + efc_log_info(sli4, "ACQE GRP5\n"); + break; + case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT: + efc_log_info(sli4, "ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n", + acqe->event_type, + le32_to_cpu(acqe->event_data[0]), + le32_to_cpu(acqe->event_data[1])); + break; + case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT: + rc = sli_fc_process_link_attention(sli4, buf); + break; + default: + efc_log_info(sli4, "ACQE unknown=%#x\n", acqe->event_code); + } + + return rc; +} + +bool +sli_fw_ready(struct sli4 *sli4) +{ + u32 val; + + /* Determine if the chip FW is in a ready state */ + val = sli_reg_read_status(sli4); + return (val & SLI4_PORT_STATUS_RDY) ? 1 : 0; +} + +static bool +sli_wait_for_fw_ready(struct sli4 *sli4, u32 timeout_ms) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(timeout_ms); + + do { + if (sli_fw_ready(sli4)) + return true; + + usleep_range(1000, 2000); + } while (time_before(jiffies, end)); + + return false; +} + +static bool +sli_sliport_reset(struct sli4 *sli4) +{ + bool rc; + u32 val; + + val = SLI4_PORT_CTRL_IP; + /* Initialize port, endian */ + writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG)); + + rc = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); + if (!rc) + efc_log_crit(sli4, "port failed to become ready after initialization\n"); + + return rc; +} + +static bool +sli_fw_init(struct sli4 *sli4) +{ + /* + * Is firmware ready for operation? + */ + if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) { + efc_log_crit(sli4, "FW status is NOT ready\n"); + return false; + } + + /* + * Reset port to a known state + */ + return sli_sliport_reset(sli4); +} + +static int +sli_request_features(struct sli4 *sli4, u32 *features, bool query) +{ + struct sli4_cmd_request_features *req_features = sli4->bmbx.virt; + + if (sli_cmd_request_features(sli4, sli4->bmbx.virt, *features, query)) { + efc_log_err(sli4, "bad REQUEST_FEATURES write\n"); + return -EIO; + } + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox write fail\n"); + return -EIO; + } + + if (le16_to_cpu(req_features->hdr.status)) { + efc_log_err(sli4, "REQUEST_FEATURES bad status %#x\n", + le16_to_cpu(req_features->hdr.status)); + return -EIO; + } + + *features = le32_to_cpu(req_features->resp); + return 0; +} + +void +sli_calc_max_qentries(struct sli4 *sli4) +{ + enum sli4_qtype q; + u32 qentries; + + for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) { + sli4->qinfo.max_qentries[q] = + sli_convert_mask_to_count(sli4->qinfo.count_method[q], + sli4->qinfo.count_mask[q]); + } + + /* single, contiguous DMA allocations will be called for each queue + * of size (max_qentries * queue entry size); since these can be large, + * check against the OS max DMA allocation size + */ + for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) { + qentries = sli4->qinfo.max_qentries[q]; + + efc_log_info(sli4, "[%s]: max_qentries from %d to %d\n", + SLI4_QNAME[q], + sli4->qinfo.max_qentries[q], qentries); + sli4->qinfo.max_qentries[q] = qentries; + } +} + +static int +sli_get_read_config(struct sli4 *sli4) +{ + struct sli4_rsp_read_config *conf = sli4->bmbx.virt; + u32 i, total; + u32 *base; + + if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) { + efc_log_err(sli4, "bad READ_CONFIG write\n"); + return -EIO; + } + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox fail (READ_CONFIG)\n"); + return -EIO; + } + + if (le16_to_cpu(conf->hdr.status)) { + efc_log_err(sli4, "READ_CONFIG bad status %#x\n", + le16_to_cpu(conf->hdr.status)); + return -EIO; + } + + sli4->params.has_extents = + le32_to_cpu(conf->ext_dword) & SLI4_READ_CFG_RESP_RESOURCE_EXT; + if (sli4->params.has_extents) { + efc_log_err(sli4, "extents not supported\n"); + return -EIO; + } + + base = sli4->ext[0].base; + if (!base) { + int size = SLI4_RSRC_MAX * sizeof(u32); + + base = kzalloc(size, GFP_KERNEL); + if (!base) + return -EIO; + } + + for (i = 0; i < SLI4_RSRC_MAX; i++) { + sli4->ext[i].number = 1; + sli4->ext[i].n_alloc = 0; + sli4->ext[i].base = &base[i]; + } + + sli4->ext[SLI4_RSRC_VFI].base[0] = le16_to_cpu(conf->vfi_base); + sli4->ext[SLI4_RSRC_VFI].size = le16_to_cpu(conf->vfi_count); + + sli4->ext[SLI4_RSRC_VPI].base[0] = le16_to_cpu(conf->vpi_base); + sli4->ext[SLI4_RSRC_VPI].size = le16_to_cpu(conf->vpi_count); + + sli4->ext[SLI4_RSRC_RPI].base[0] = le16_to_cpu(conf->rpi_base); + sli4->ext[SLI4_RSRC_RPI].size = le16_to_cpu(conf->rpi_count); + + sli4->ext[SLI4_RSRC_XRI].base[0] = le16_to_cpu(conf->xri_base); + sli4->ext[SLI4_RSRC_XRI].size = le16_to_cpu(conf->xri_count); + + sli4->ext[SLI4_RSRC_FCFI].base[0] = 0; + sli4->ext[SLI4_RSRC_FCFI].size = le16_to_cpu(conf->fcfi_count); + + for (i = 0; i < SLI4_RSRC_MAX; i++) { + total = sli4->ext[i].number * sli4->ext[i].size; + sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL); + if (!sli4->ext[i].use_map) { + efc_log_err(sli4, "bitmap memory allocation failed %d\n", + i); + return -EIO; + } + sli4->ext[i].map_size = total; + } + + sli4->topology = (le32_to_cpu(conf->topology_dword) & + SLI4_READ_CFG_RESP_TOPOLOGY) >> 24; + switch (sli4->topology) { + case SLI4_READ_CFG_TOPO_FC: + efc_log_info(sli4, "FC (unknown)\n"); + break; + case SLI4_READ_CFG_TOPO_NON_FC_AL: + efc_log_info(sli4, "FC (direct attach)\n"); + break; + case SLI4_READ_CFG_TOPO_FC_AL: + efc_log_info(sli4, "FC (arbitrated loop)\n"); + break; + default: + efc_log_info(sli4, "bad topology %#x\n", sli4->topology); + } + + sli4->e_d_tov = le16_to_cpu(conf->e_d_tov); + sli4->r_a_tov = le16_to_cpu(conf->r_a_tov); + + sli4->link_module_type = le16_to_cpu(conf->lmt); + + sli4->qinfo.max_qcount[SLI4_QTYPE_EQ] = le16_to_cpu(conf->eq_count); + sli4->qinfo.max_qcount[SLI4_QTYPE_CQ] = le16_to_cpu(conf->cq_count); + sli4->qinfo.max_qcount[SLI4_QTYPE_WQ] = le16_to_cpu(conf->wq_count); + sli4->qinfo.max_qcount[SLI4_QTYPE_RQ] = le16_to_cpu(conf->rq_count); + + /* + * READ_CONFIG doesn't give the max number of MQ. Applications + * will typically want 1, but we may need another at some future + * date. Dummy up a "max" MQ count here. + */ + sli4->qinfo.max_qcount[SLI4_QTYPE_MQ] = SLI4_USER_MQ_COUNT; + return 0; +} + +static int +sli_get_sli4_parameters(struct sli4 *sli4) +{ + struct sli4_rsp_cmn_get_sli4_params *parms; + u32 dw_loopback; + u32 dw_eq_pg_cnt; + u32 dw_cq_pg_cnt; + u32 dw_mq_pg_cnt; + u32 dw_wq_pg_cnt; + u32 dw_rq_pg_cnt; + u32 dw_sgl_pg_cnt; + + if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt)) + return -EIO; + + parms = (struct sli4_rsp_cmn_get_sli4_params *) + (((u8 *)sli4->bmbx.virt) + + offsetof(struct sli4_cmd_sli_config, payload.embed)); + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox write fail\n"); + return -EIO; + } + + if (parms->hdr.status) { + efc_log_err(sli4, "COMMON_GET_SLI4_PARAMETERS bad status %#x", + parms->hdr.status); + efc_log_err(sli4, "additional status %#x\n", + parms->hdr.additional_status); + return -EIO; + } + + dw_loopback = le32_to_cpu(parms->dw16_loopback_scope); + dw_eq_pg_cnt = le32_to_cpu(parms->dw6_eq_page_cnt); + dw_cq_pg_cnt = le32_to_cpu(parms->dw8_cq_page_cnt); + dw_mq_pg_cnt = le32_to_cpu(parms->dw10_mq_page_cnt); + dw_wq_pg_cnt = le32_to_cpu(parms->dw12_wq_page_cnt); + dw_rq_pg_cnt = le32_to_cpu(parms->dw14_rq_page_cnt); + + sli4->params.auto_reg = (dw_loopback & SLI4_PARAM_AREG); + sli4->params.auto_xfer_rdy = (dw_loopback & SLI4_PARAM_AGXF); + sli4->params.hdr_template_req = (dw_loopback & SLI4_PARAM_HDRR); + sli4->params.t10_dif_inline_capable = (dw_loopback & SLI4_PARAM_TIMM); + sli4->params.t10_dif_separate_capable = (dw_loopback & SLI4_PARAM_TSMM); + + sli4->params.mq_create_version = GET_Q_CREATE_VERSION(dw_mq_pg_cnt); + sli4->params.cq_create_version = GET_Q_CREATE_VERSION(dw_cq_pg_cnt); + + sli4->rq_min_buf_size = le16_to_cpu(parms->min_rq_buffer_size); + sli4->rq_max_buf_size = le32_to_cpu(parms->max_rq_buffer_size); + + sli4->qinfo.qpage_count[SLI4_QTYPE_EQ] = + (dw_eq_pg_cnt & SLI4_PARAM_EQ_PAGE_CNT_MASK); + sli4->qinfo.qpage_count[SLI4_QTYPE_CQ] = + (dw_cq_pg_cnt & SLI4_PARAM_CQ_PAGE_CNT_MASK); + sli4->qinfo.qpage_count[SLI4_QTYPE_MQ] = + (dw_mq_pg_cnt & SLI4_PARAM_MQ_PAGE_CNT_MASK); + sli4->qinfo.qpage_count[SLI4_QTYPE_WQ] = + (dw_wq_pg_cnt & SLI4_PARAM_WQ_PAGE_CNT_MASK); + sli4->qinfo.qpage_count[SLI4_QTYPE_RQ] = + (dw_rq_pg_cnt & SLI4_PARAM_RQ_PAGE_CNT_MASK); + + /* save count methods and masks for each queue type */ + + sli4->qinfo.count_mask[SLI4_QTYPE_EQ] = + le16_to_cpu(parms->eqe_count_mask); + sli4->qinfo.count_method[SLI4_QTYPE_EQ] = + GET_Q_CNT_METHOD(dw_eq_pg_cnt); + + sli4->qinfo.count_mask[SLI4_QTYPE_CQ] = + le16_to_cpu(parms->cqe_count_mask); + sli4->qinfo.count_method[SLI4_QTYPE_CQ] = + GET_Q_CNT_METHOD(dw_cq_pg_cnt); + + sli4->qinfo.count_mask[SLI4_QTYPE_MQ] = + le16_to_cpu(parms->mqe_count_mask); + sli4->qinfo.count_method[SLI4_QTYPE_MQ] = + GET_Q_CNT_METHOD(dw_mq_pg_cnt); + + sli4->qinfo.count_mask[SLI4_QTYPE_WQ] = + le16_to_cpu(parms->wqe_count_mask); + sli4->qinfo.count_method[SLI4_QTYPE_WQ] = + GET_Q_CNT_METHOD(dw_wq_pg_cnt); + + sli4->qinfo.count_mask[SLI4_QTYPE_RQ] = + le16_to_cpu(parms->rqe_count_mask); + sli4->qinfo.count_method[SLI4_QTYPE_RQ] = + GET_Q_CNT_METHOD(dw_rq_pg_cnt); + + /* now calculate max queue entries */ + sli_calc_max_qentries(sli4); + + dw_sgl_pg_cnt = le32_to_cpu(parms->dw18_sgl_page_cnt); + + /* max # of pages */ + sli4->max_sgl_pages = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_CNT_MASK); + + /* bit map of available sizes */ + sli4->sgl_page_sizes = (dw_sgl_pg_cnt & + SLI4_PARAM_SGL_PAGE_SZS_MASK) >> 8; + /* ignore HLM here. Use value from REQUEST_FEATURES */ + sli4->sge_supported_length = le32_to_cpu(parms->sge_supported_length); + sli4->params.sgl_pre_reg_required = (dw_loopback & SLI4_PARAM_SGLR); + /* default to using pre-registered SGL's */ + sli4->params.sgl_pre_registered = true; + + sli4->params.perf_hint = dw_loopback & SLI4_PARAM_PHON; + sli4->params.perf_wq_id_association = (dw_loopback & SLI4_PARAM_PHWQ); + + sli4->rq_batch = (le16_to_cpu(parms->dw15w1_rq_db_window) & + SLI4_PARAM_RQ_DB_WINDOW_MASK) >> 12; + + /* Use the highest available WQE size. */ + if (((dw_wq_pg_cnt & SLI4_PARAM_WQE_SZS_MASK) >> 8) & + SLI4_128BYTE_WQE_SUPPORT) + sli4->wqe_size = SLI4_WQE_EXT_BYTES; + else + sli4->wqe_size = SLI4_WQE_BYTES; + + return 0; +} + +static int +sli_get_ctrl_attributes(struct sli4 *sli4) +{ + struct sli4_rsp_cmn_get_cntl_attributes *attr; + struct sli4_rsp_cmn_get_cntl_addl_attributes *add_attr; + struct efc_dma data; + u32 psize; + + /* + * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily + * uses VPD DMA buffer as the response won't fit in the embedded + * buffer. + */ + memset(sli4->vpd_data.virt, 0, sli4->vpd_data.size); + if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, + &sli4->vpd_data)) { + efc_log_err(sli4, "bad COMMON_GET_CNTL_ATTRIBUTES write\n"); + return -EIO; + } + + attr = sli4->vpd_data.virt; + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox write fail\n"); + return -EIO; + } + + if (attr->hdr.status) { + efc_log_err(sli4, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x", + attr->hdr.status); + efc_log_err(sli4, "additional status %#x\n", + attr->hdr.additional_status); + return -EIO; + } + + sli4->port_number = attr->port_num_type_flags & SLI4_CNTL_ATTR_PORTNUM; + + memcpy(sli4->bios_version_string, attr->bios_version_str, + sizeof(sli4->bios_version_string)); + + /* get additional attributes */ + psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes); + data.size = psize; + data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size, + &data.phys, GFP_KERNEL); + if (!data.virt) { + memset(&data, 0, sizeof(struct efc_dma)); + efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n"); + return -EIO; + } + + if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, + &data)) { + efc_log_err(sli4, "bad GET_CNTL_ADDL_ATTR write\n"); + dma_free_coherent(&sli4->pci->dev, data.size, + data.virt, data.phys); + return -EIO; + } + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "mailbox fail (GET_CNTL_ADDL_ATTR)\n"); + dma_free_coherent(&sli4->pci->dev, data.size, + data.virt, data.phys); + return -EIO; + } + + add_attr = data.virt; + if (add_attr->hdr.status) { + efc_log_err(sli4, "GET_CNTL_ADDL_ATTR bad status %#x\n", + add_attr->hdr.status); + dma_free_coherent(&sli4->pci->dev, data.size, + data.virt, data.phys); + return -EIO; + } + + memcpy(sli4->ipl_name, add_attr->ipl_file_name, sizeof(sli4->ipl_name)); + + efc_log_info(sli4, "IPL:%s\n", (char *)sli4->ipl_name); + + dma_free_coherent(&sli4->pci->dev, data.size, data.virt, + data.phys); + memset(&data, 0, sizeof(struct efc_dma)); + return 0; +} + +static int +sli_get_fw_rev(struct sli4 *sli4) +{ + struct sli4_cmd_read_rev *read_rev = sli4->bmbx.virt; + + if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, &sli4->vpd_data)) + return -EIO; + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox write fail (READ_REV)\n"); + return -EIO; + } + + if (le16_to_cpu(read_rev->hdr.status)) { + efc_log_err(sli4, "READ_REV bad status %#x\n", + le16_to_cpu(read_rev->hdr.status)); + return -EIO; + } + + sli4->fw_rev[0] = le32_to_cpu(read_rev->first_fw_id); + memcpy(sli4->fw_name[0], read_rev->first_fw_name, + sizeof(sli4->fw_name[0])); + + sli4->fw_rev[1] = le32_to_cpu(read_rev->second_fw_id); + memcpy(sli4->fw_name[1], read_rev->second_fw_name, + sizeof(sli4->fw_name[1])); + + sli4->hw_rev[0] = le32_to_cpu(read_rev->first_hw_rev); + sli4->hw_rev[1] = le32_to_cpu(read_rev->second_hw_rev); + sli4->hw_rev[2] = le32_to_cpu(read_rev->third_hw_rev); + + efc_log_info(sli4, "FW1:%s (%08x) / FW2:%s (%08x)\n", + read_rev->first_fw_name, le32_to_cpu(read_rev->first_fw_id), + read_rev->second_fw_name, le32_to_cpu(read_rev->second_fw_id)); + + efc_log_info(sli4, "HW1: %08x / HW2: %08x\n", + le32_to_cpu(read_rev->first_hw_rev), + le32_to_cpu(read_rev->second_hw_rev)); + + /* Check that all VPD data was returned */ + if (le32_to_cpu(read_rev->returned_vpd_length) != + le32_to_cpu(read_rev->actual_vpd_length)) { + efc_log_info(sli4, "VPD length: avail=%d return=%d actual=%d\n", + le32_to_cpu(read_rev->available_length_dword) & + SLI4_READ_REV_AVAILABLE_LENGTH, + le32_to_cpu(read_rev->returned_vpd_length), + le32_to_cpu(read_rev->actual_vpd_length)); + } + sli4->vpd_length = le32_to_cpu(read_rev->returned_vpd_length); + return 0; +} + +static int +sli_get_config(struct sli4 *sli4) +{ + struct sli4_rsp_cmn_get_port_name *port_name; + struct sli4_cmd_read_nvparms *read_nvparms; + + /* + * Read the device configuration + */ + if (sli_get_read_config(sli4)) + return -EIO; + + if (sli_get_sli4_parameters(sli4)) + return -EIO; + + if (sli_get_ctrl_attributes(sli4)) + return -EIO; + + if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt)) + return -EIO; + + port_name = (struct sli4_rsp_cmn_get_port_name *) + (((u8 *)sli4->bmbx.virt) + + offsetof(struct sli4_cmd_sli_config, payload.embed)); + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox fail (GET_PORT_NAME)\n"); + return -EIO; + } + + sli4->port_name[0] = port_name->port_name[sli4->port_number]; + sli4->port_name[1] = '\0'; + + if (sli_get_fw_rev(sli4)) + return -EIO; + + if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt)) { + efc_log_err(sli4, "bad READ_NVPARMS write\n"); + return -EIO; + } + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox fail (READ_NVPARMS)\n"); + return -EIO; + } + + read_nvparms = sli4->bmbx.virt; + if (le16_to_cpu(read_nvparms->hdr.status)) { + efc_log_err(sli4, "READ_NVPARMS bad status %#x\n", + le16_to_cpu(read_nvparms->hdr.status)); + return -EIO; + } + + memcpy(sli4->wwpn, read_nvparms->wwpn, sizeof(sli4->wwpn)); + memcpy(sli4->wwnn, read_nvparms->wwnn, sizeof(sli4->wwnn)); + + efc_log_info(sli4, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + sli4->wwpn[0], sli4->wwpn[1], sli4->wwpn[2], sli4->wwpn[3], + sli4->wwpn[4], sli4->wwpn[5], sli4->wwpn[6], sli4->wwpn[7]); + efc_log_info(sli4, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + sli4->wwnn[0], sli4->wwnn[1], sli4->wwnn[2], sli4->wwnn[3], + sli4->wwnn[4], sli4->wwnn[5], sli4->wwnn[6], sli4->wwnn[7]); + + return 0; +} + +int +sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, + void __iomem *reg[]) +{ + u32 intf = U32_MAX; + u32 pci_class_rev = 0; + u32 rev_id = 0; + u32 family = 0; + u32 asic_id = 0; + u32 i; + struct sli4_asic_entry_t *asic; + + memset(sli4, 0, sizeof(struct sli4)); + + sli4->os = os; + sli4->pci = pdev; + + for (i = 0; i < 6; i++) + sli4->reg[i] = reg[i]; + /* + * Read the SLI_INTF register to discover the register layout + * and other capability information + */ + if (pci_read_config_dword(pdev, SLI4_INTF_REG, &intf)) + return -EIO; + + if ((intf & SLI4_INTF_VALID_MASK) != (u32)SLI4_INTF_VALID_VALUE) { + efc_log_err(sli4, "SLI_INTF is not valid\n"); + return -EIO; + } + + /* driver only support SLI-4 */ + if ((intf & SLI4_INTF_REV_MASK) != SLI4_INTF_REV_S4) { + efc_log_err(sli4, "Unsupported SLI revision (intf=%#x)\n", intf); + return -EIO; + } + + sli4->sli_family = intf & SLI4_INTF_FAMILY_MASK; + + sli4->if_type = intf & SLI4_INTF_IF_TYPE_MASK; + efc_log_info(sli4, "status=%#x error1=%#x error2=%#x\n", + sli_reg_read_status(sli4), + sli_reg_read_err1(sli4), + sli_reg_read_err2(sli4)); + + /* + * set the ASIC type and revision + */ + if (pci_read_config_dword(pdev, PCI_CLASS_REVISION, &pci_class_rev)) + return -EIO; + + rev_id = pci_class_rev & 0xff; + family = sli4->sli_family; + if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) { + if (!pci_read_config_dword(pdev, SLI4_ASIC_ID_REG, &asic_id)) + family = asic_id & SLI4_ASIC_GEN_MASK; + } + + for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); + i++, asic++) { + if (rev_id == asic->rev_id && family == asic->family) { + sli4->asic_type = family; + sli4->asic_rev = rev_id; + break; + } + } + /* Fail if no matching asic type/rev was found */ + if (!sli4->asic_type) { + efc_log_err(sli4, "no matching asic family/rev found: %02x/%02x\n", + family, rev_id); + return -EIO; + } + + /* + * The bootstrap mailbox is equivalent to a MQ with a single 256 byte + * entry, a CQ with a single 16 byte entry, and no event queue. + * Alignment must be 16 bytes as the low order address bits in the + * address register are also control / status. + */ + sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe); + sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size, + &sli4->bmbx.phys, GFP_KERNEL); + if (!sli4->bmbx.virt) { + memset(&sli4->bmbx, 0, sizeof(struct efc_dma)); + efc_log_err(sli4, "bootstrap mailbox allocation failed\n"); + return -EIO; + } + + if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) { + efc_log_err(sli4, "bad alignment for bootstrap mailbox\n"); + return -EIO; + } + + efc_log_info(sli4, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt, + upper_32_bits(sli4->bmbx.phys), + lower_32_bits(sli4->bmbx.phys), sli4->bmbx.size); + + /* 4096 is arbitrary. What should this value actually be? */ + sli4->vpd_data.size = 4096; + sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev, + sli4->vpd_data.size, + &sli4->vpd_data.phys, + GFP_KERNEL); + if (!sli4->vpd_data.virt) { + memset(&sli4->vpd_data, 0, sizeof(struct efc_dma)); + /* Note that failure isn't fatal in this specific case */ + efc_log_info(sli4, "VPD buffer allocation failed\n"); + } + + if (!sli_fw_init(sli4)) { + efc_log_err(sli4, "FW initialization failed\n"); + return -EIO; + } + + /* + * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true + * in addition to any other desired features + */ + sli4->features = (SLI4_REQFEAT_IAAB | SLI4_REQFEAT_NPIV | + SLI4_REQFEAT_DIF | SLI4_REQFEAT_VF | + SLI4_REQFEAT_FCPC | SLI4_REQFEAT_IAAR | + SLI4_REQFEAT_HLM | SLI4_REQFEAT_PERFH | + SLI4_REQFEAT_RXSEQ | SLI4_REQFEAT_RXRI | + SLI4_REQFEAT_MRQP); + + /* use performance hints if available */ + if (sli4->params.perf_hint) + sli4->features |= SLI4_REQFEAT_PERFH; + + if (sli_request_features(sli4, &sli4->features, true)) + return -EIO; + + if (sli_get_config(sli4)) + return -EIO; + + return 0; +} + +int +sli_init(struct sli4 *sli4) +{ + if (sli4->params.has_extents) { + efc_log_info(sli4, "extend allocation not supported\n"); + return -EIO; + } + + sli4->features &= (~SLI4_REQFEAT_HLM); + sli4->features &= (~SLI4_REQFEAT_RXSEQ); + sli4->features &= (~SLI4_REQFEAT_RXRI); + + if (sli_request_features(sli4, &sli4->features, false)) + return -EIO; + + return 0; +} + +int +sli_reset(struct sli4 *sli4) +{ + u32 i; + + if (!sli_fw_init(sli4)) { + efc_log_crit(sli4, "FW initialization failed\n"); + return -EIO; + } + + kfree(sli4->ext[0].base); + sli4->ext[0].base = NULL; + + for (i = 0; i < SLI4_RSRC_MAX; i++) { + bitmap_free(sli4->ext[i].use_map); + sli4->ext[i].use_map = NULL; + sli4->ext[i].base = NULL; + } + + return sli_get_config(sli4); +} + +int +sli_fw_reset(struct sli4 *sli4) +{ + /* + * Firmware must be ready before issuing the reset. + */ + if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) { + efc_log_crit(sli4, "FW status is NOT ready\n"); + return -EIO; + } + + /* Lancer uses PHYDEV_CONTROL */ + writel(SLI4_PHYDEV_CTRL_FRST, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG)); + + /* wait for the FW to become ready after the reset */ + if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) { + efc_log_crit(sli4, "Failed to be ready after firmware reset\n"); + return -EIO; + } + return 0; +} + +void +sli_teardown(struct sli4 *sli4) +{ + u32 i; + + kfree(sli4->ext[0].base); + sli4->ext[0].base = NULL; + + for (i = 0; i < SLI4_RSRC_MAX; i++) { + sli4->ext[i].base = NULL; + + bitmap_free(sli4->ext[i].use_map); + sli4->ext[i].use_map = NULL; + } + + if (!sli_sliport_reset(sli4)) + efc_log_err(sli4, "FW deinitialization failed\n"); + + dma_free_coherent(&sli4->pci->dev, sli4->vpd_data.size, + sli4->vpd_data.virt, sli4->vpd_data.phys); + memset(&sli4->vpd_data, 0, sizeof(struct efc_dma)); + + dma_free_coherent(&sli4->pci->dev, sli4->bmbx.size, + sli4->bmbx.virt, sli4->bmbx.phys); + memset(&sli4->bmbx, 0, sizeof(struct efc_dma)); +} + +int +sli_callback(struct sli4 *sli4, enum sli4_callback which, + void *func, void *arg) +{ + if (!func) { + efc_log_err(sli4, "bad parameter sli4=%p which=%#x func=%p\n", + sli4, which, func); + return -EIO; + } + + switch (which) { + case SLI4_CB_LINK: + sli4->link = func; + sli4->link_arg = arg; + break; + default: + efc_log_info(sli4, "unknown callback %#x\n", which); + return -EIO; + } + + return 0; +} + +int +sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq, + u32 num_eq, u32 shift, u32 delay_mult) +{ + sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, eq, num_eq, + shift, delay_mult); + + if (sli_bmbx_command(sli4)) { + efc_log_crit(sli4, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n"); + return -EIO; + } + if (sli_res_sli_config(sli4, sli4->bmbx.virt)) { + efc_log_err(sli4, "bad status MODIFY EQ DELAY\n"); + return -EIO; + } + + return 0; +} + +int +sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype, + u32 *rid, u32 *index) +{ + int rc = 0; + u32 size; + u32 ext_idx; + u32 item_idx; + u32 position; + + *rid = U32_MAX; + *index = U32_MAX; + + switch (rtype) { + case SLI4_RSRC_VFI: + case SLI4_RSRC_VPI: + case SLI4_RSRC_RPI: + case SLI4_RSRC_XRI: + position = + find_first_zero_bit(sli4->ext[rtype].use_map, + sli4->ext[rtype].map_size); + if (position >= sli4->ext[rtype].map_size) { + efc_log_err(sli4, "out of resource %d (alloc=%d)\n", + rtype, sli4->ext[rtype].n_alloc); + rc = -EIO; + break; + } + set_bit(position, sli4->ext[rtype].use_map); + *index = position; + + size = sli4->ext[rtype].size; + + ext_idx = *index / size; + item_idx = *index % size; + + *rid = sli4->ext[rtype].base[ext_idx] + item_idx; + + sli4->ext[rtype].n_alloc++; + break; + default: + rc = -EIO; + } + + return rc; +} + +int +sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid) +{ + int rc = -EIO; + u32 x; + u32 size, *base; + + switch (rtype) { + case SLI4_RSRC_VFI: + case SLI4_RSRC_VPI: + case SLI4_RSRC_RPI: + case SLI4_RSRC_XRI: + /* + * Figure out which extent contains the resource ID. I.e. find + * the extent such that + * extent->base <= resource ID < extent->base + extent->size + */ + base = sli4->ext[rtype].base; + size = sli4->ext[rtype].size; + + /* + * In the case of FW reset, this may be cleared + * but the force_free path will still attempt to + * free the resource. Prevent a NULL pointer access. + */ + if (!base) + break; + + for (x = 0; x < sli4->ext[rtype].number; x++) { + if ((rid < base[x] || (rid >= (base[x] + size)))) + continue; + + rid -= base[x]; + clear_bit((x * size) + rid, sli4->ext[rtype].use_map); + rc = 0; + break; + } + break; + default: + break; + } + + return rc; +} + +int +sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype) +{ + int rc = -EIO; + u32 i; + + switch (rtype) { + case SLI4_RSRC_VFI: + case SLI4_RSRC_VPI: + case SLI4_RSRC_RPI: + case SLI4_RSRC_XRI: + for (i = 0; i < sli4->ext[rtype].map_size; i++) + clear_bit(i, sli4->ext[rtype].use_map); + rc = 0; + break; + default: + break; + } + + return rc; +} + +int sli_raise_ue(struct sli4 *sli4, u8 dump) +{ + u32 val = 0; + + if (dump == SLI4_FUNC_DESC_DUMP) { + val = SLI4_PORT_CTRL_FDD | SLI4_PORT_CTRL_IP; + writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG)); + } else { + val = SLI4_PHYDEV_CTRL_FRST; + + if (dump == SLI4_CHIP_LEVEL_DUMP) + val |= SLI4_PHYDEV_CTRL_DD; + writel(val, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG)); + } + + return 0; +} + +int sli_dump_is_ready(struct sli4 *sli4) +{ + int rc = SLI4_DUMP_READY_STATUS_NOT_READY; + u32 port_val; + u32 bmbx_val; + + /* + * Ensure that the port is ready AND the mailbox is + * ready before signaling that the dump is ready to go. + */ + port_val = sli_reg_read_status(sli4); + bmbx_val = readl(sli4->reg[0] + SLI4_BMBX_REG); + + if ((bmbx_val & SLI4_BMBX_RDY) && + (port_val & SLI4_PORT_STATUS_RDY)) { + if (port_val & SLI4_PORT_STATUS_DIP) + rc = SLI4_DUMP_READY_STATUS_DD_PRESENT; + else if (port_val & SLI4_PORT_STATUS_FDP) + rc = SLI4_DUMP_READY_STATUS_FDB_PRESENT; + } + + return rc; +} + +bool sli_reset_required(struct sli4 *sli4) +{ + u32 val; + + val = sli_reg_read_status(sli4); + return (val & SLI4_PORT_STATUS_RN); +} + +int +sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri, + u32 xri_count, struct efc_dma *page0[], + struct efc_dma *page1[], struct efc_dma *dma) +{ + struct sli4_rqst_post_sgl_pages *post = NULL; + u32 i; + __le32 req_len; + + post = sli_config_cmd_init(sli4, buf, + SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma); + if (!post) + return -EIO; + + /* payload size calculation */ + /* 4 = xri_start + xri_count */ + /* xri_count = # of XRI's registered */ + /* sizeof(uint64_t) = physical address size */ + /* 2 = # of physical addresses per page set */ + req_len = cpu_to_le32(4 + (xri_count * (sizeof(uint64_t) * 2))); + sli_cmd_fill_hdr(&post->hdr, SLI4_OPC_POST_SGL_PAGES, SLI4_SUBSYSTEM_FC, + CMD_V0, req_len); + post->xri_start = cpu_to_le16(xri); + post->xri_count = cpu_to_le16(xri_count); + + for (i = 0; i < xri_count; i++) { + post->page_set[i].page0_low = + cpu_to_le32(lower_32_bits(page0[i]->phys)); + post->page_set[i].page0_high = + cpu_to_le32(upper_32_bits(page0[i]->phys)); + } + + if (page1) { + for (i = 0; i < xri_count; i++) { + post->page_set[i].page1_low = + cpu_to_le32(lower_32_bits(page1[i]->phys)); + post->page_set[i].page1_high = + cpu_to_le32(upper_32_bits(page1[i]->phys)); + } + } + + return 0; +} + +int +sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma, + u16 rpi, struct efc_dma *payload_dma) +{ + struct sli4_rqst_post_hdr_templates *req = NULL; + uintptr_t phys = 0; + u32 i = 0; + u32 page_count, payload_size; + + page_count = sli_page_count(dma->size, SLI_PAGE_SIZE); + + payload_size = ((sizeof(struct sli4_rqst_post_hdr_templates) + + (page_count * SZ_DMAADDR)) - sizeof(struct sli4_rqst_hdr)); + + if (page_count > 16) { + /* + * We can't fit more than 16 descriptors into an embedded mbox + * command, it has to be non-embedded + */ + payload_dma->size = payload_size; + payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev, + payload_dma->size, + &payload_dma->phys, GFP_KERNEL); + if (!payload_dma->virt) { + memset(payload_dma, 0, sizeof(struct efc_dma)); + efc_log_err(sli4, "mbox payload memory allocation fail\n"); + return -EIO; + } + req = sli_config_cmd_init(sli4, buf, payload_size, payload_dma); + } else { + req = sli_config_cmd_init(sli4, buf, payload_size, NULL); + } + + if (!req) + return -EIO; + + if (rpi == U16_MAX) + rpi = sli4->ext[SLI4_RSRC_RPI].base[0]; + + sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_POST_HDR_TEMPLATES, + SLI4_SUBSYSTEM_FC, CMD_V0, + SLI4_RQST_PYLD_LEN(post_hdr_templates)); + + req->rpi_offset = cpu_to_le16(rpi); + req->page_count = cpu_to_le16(page_count); + phys = dma->phys; + for (i = 0; i < page_count; i++) { + req->page_descriptor[i].low = cpu_to_le32(lower_32_bits(phys)); + req->page_descriptor[i].high = cpu_to_le32(upper_32_bits(phys)); + + phys += SLI_PAGE_SIZE; + } + + return 0; +} + +u32 +sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi) +{ + u32 bytes = 0; + + /* Check if header templates needed */ + if (sli4->params.hdr_template_req) + /* round up to a page */ + bytes = round_up(n_rpi * SLI4_HDR_TEMPLATE_SIZE, SLI_PAGE_SIZE); + + return bytes; +} + +const char * +sli_fc_get_status_string(u32 status) +{ + static struct { + u32 code; + const char *label; + } lookup[] = { + {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"}, + {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"}, + {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"}, + {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"}, + {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"}, + {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"}, + {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"}, + {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"}, + {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"}, + {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"}, + {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"}, + {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"}, + {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, + "RQ_INSUFF_BUF_NEEDED"}, + {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"}, + {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"}, + {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"}, + {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"}, + {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"}, + {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, + "RQ_INSUFF_XRI_NEEDED"}, + {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"}, + {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"}, + {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"}, + }; + u32 i; + + for (i = 0; i < ARRAY_SIZE(lookup); i++) { + if (status == lookup[i].code) + return lookup[i].label; + } + return "unknown"; +} diff --git a/drivers/scsi/elx/libefc_sli/sli4.h b/drivers/scsi/elx/libefc_sli/sli4.h new file mode 100644 index 000000000..38af166cc --- /dev/null +++ b/drivers/scsi/elx/libefc_sli/sli4.h @@ -0,0 +1,4132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + */ + +/* + * All common SLI-4 structures and function prototypes. + */ + +#ifndef _SLI4_H +#define _SLI4_H + +#include +#include +#include "scsi/fc/fc_els.h" +#include "scsi/fc/fc_fs.h" +#include "../include/efc_common.h" + +/************************************************************************* + * Common SLI-4 register offsets and field definitions + */ + +/* SLI_INTF - SLI Interface Definition Register */ +#define SLI4_INTF_REG 0x0058 +enum sli4_intf { + SLI4_INTF_REV_SHIFT = 4, + SLI4_INTF_REV_MASK = 0xf0, + + SLI4_INTF_REV_S3 = 0x30, + SLI4_INTF_REV_S4 = 0x40, + + SLI4_INTF_FAMILY_SHIFT = 8, + SLI4_INTF_FAMILY_MASK = 0x0f00, + + SLI4_FAMILY_CHECK_ASIC_TYPE = 0x0f00, + + SLI4_INTF_IF_TYPE_SHIFT = 12, + SLI4_INTF_IF_TYPE_MASK = 0xf000, + + SLI4_INTF_IF_TYPE_2 = 0x2000, + SLI4_INTF_IF_TYPE_6 = 0x6000, + + SLI4_INTF_VALID_SHIFT = 29, + SLI4_INTF_VALID_MASK = 0xe0000000, + + SLI4_INTF_VALID_VALUE = 0xc0000000, +}; + +/* ASIC_ID - SLI ASIC Type and Revision Register */ +#define SLI4_ASIC_ID_REG 0x009c +enum sli4_asic { + SLI4_ASIC_GEN_SHIFT = 8, + SLI4_ASIC_GEN_MASK = 0xff00, + SLI4_ASIC_GEN_5 = 0x0b00, + SLI4_ASIC_GEN_6 = 0x0c00, + SLI4_ASIC_GEN_7 = 0x0d00, +}; + +enum sli4_acic_revisions { + SLI4_ASIC_REV_A0 = 0x00, + SLI4_ASIC_REV_A1 = 0x01, + SLI4_ASIC_REV_A2 = 0x02, + SLI4_ASIC_REV_A3 = 0x03, + SLI4_ASIC_REV_B0 = 0x10, + SLI4_ASIC_REV_B1 = 0x11, + SLI4_ASIC_REV_B2 = 0x12, + SLI4_ASIC_REV_C0 = 0x20, + SLI4_ASIC_REV_C1 = 0x21, + SLI4_ASIC_REV_C2 = 0x22, + SLI4_ASIC_REV_D0 = 0x30, +}; + +struct sli4_asic_entry_t { + u32 rev_id; + u32 family; +}; + +/* BMBX - Bootstrap Mailbox Register */ +#define SLI4_BMBX_REG 0x0160 +enum sli4_bmbx { + SLI4_BMBX_MASK_HI = 0x3, + SLI4_BMBX_MASK_LO = 0xf, + SLI4_BMBX_RDY = 1 << 0, + SLI4_BMBX_HI = 1 << 1, + SLI4_BMBX_SIZE = 256, +}; + +static inline u32 +sli_bmbx_write_hi(u64 addr) { + u32 val; + + val = upper_32_bits(addr) & ~SLI4_BMBX_MASK_HI; + val |= SLI4_BMBX_HI; + + return val; +} + +static inline u32 +sli_bmbx_write_lo(u64 addr) { + u32 val; + + val = (upper_32_bits(addr) & SLI4_BMBX_MASK_HI) << 30; + val |= ((addr) & ~SLI4_BMBX_MASK_LO) >> 2; + + return val; +} + +/* SLIPORT_CONTROL - SLI Port Control Register */ +#define SLI4_PORT_CTRL_REG 0x0408 +enum sli4_port_ctrl { + SLI4_PORT_CTRL_IP = 1u << 27, + SLI4_PORT_CTRL_IDIS = 1u << 22, + SLI4_PORT_CTRL_FDD = 1u << 31, +}; + +/* SLI4_SLIPORT_ERROR - SLI Port Error Register */ +#define SLI4_PORT_ERROR1 0x040c +#define SLI4_PORT_ERROR2 0x0410 + +/* EQCQ_DOORBELL - EQ and CQ Doorbell Register */ +#define SLI4_EQCQ_DB_REG 0x120 +enum sli4_eqcq_e { + SLI4_EQ_ID_LO_MASK = 0x01ff, + + SLI4_CQ_ID_LO_MASK = 0x03ff, + + SLI4_EQCQ_CI_EQ = 0x0200, + + SLI4_EQCQ_QT_EQ = 0x00000400, + SLI4_EQCQ_QT_CQ = 0x00000000, + + SLI4_EQCQ_ID_HI_SHIFT = 11, + SLI4_EQCQ_ID_HI_MASK = 0xf800, + + SLI4_EQCQ_NUM_SHIFT = 16, + SLI4_EQCQ_NUM_MASK = 0x1fff0000, + + SLI4_EQCQ_ARM = 0x20000000, + SLI4_EQCQ_UNARM = 0x00000000, +}; + +static inline u32 +sli_format_eq_db_data(u16 num_popped, u16 id, u32 arm) { + u32 reg; + + reg = (id & SLI4_EQ_ID_LO_MASK) | SLI4_EQCQ_QT_EQ; + reg |= (((id) >> 9) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK; + reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK; + reg |= arm | SLI4_EQCQ_CI_EQ; + + return reg; +} + +static inline u32 +sli_format_cq_db_data(u16 num_popped, u16 id, u32 arm) { + u32 reg; + + reg = ((id) & SLI4_CQ_ID_LO_MASK) | SLI4_EQCQ_QT_CQ; + reg |= (((id) >> 10) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK; + reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK; + reg |= arm; + + return reg; +} + +/* EQ_DOORBELL - EQ Doorbell Register for IF_TYPE = 6*/ +#define SLI4_IF6_EQ_DB_REG 0x120 +enum sli4_eq_e { + SLI4_IF6_EQ_ID_MASK = 0x0fff, + + SLI4_IF6_EQ_NUM_SHIFT = 16, + SLI4_IF6_EQ_NUM_MASK = 0x1fff0000, +}; + +static inline u32 +sli_format_if6_eq_db_data(u16 num_popped, u16 id, u32 arm) { + u32 reg; + + reg = id & SLI4_IF6_EQ_ID_MASK; + reg |= (num_popped << SLI4_IF6_EQ_NUM_SHIFT) & SLI4_IF6_EQ_NUM_MASK; + reg |= arm; + + return reg; +} + +/* CQ_DOORBELL - CQ Doorbell Register for IF_TYPE = 6 */ +#define SLI4_IF6_CQ_DB_REG 0xc0 +enum sli4_cq_e { + SLI4_IF6_CQ_ID_MASK = 0xffff, + + SLI4_IF6_CQ_NUM_SHIFT = 16, + SLI4_IF6_CQ_NUM_MASK = 0x1fff0000, +}; + +static inline u32 +sli_format_if6_cq_db_data(u16 num_popped, u16 id, u32 arm) { + u32 reg; + + reg = id & SLI4_IF6_CQ_ID_MASK; + reg |= ((num_popped) << SLI4_IF6_CQ_NUM_SHIFT) & SLI4_IF6_CQ_NUM_MASK; + reg |= arm; + + return reg; +} + +/* MQ_DOORBELL - MQ Doorbell Register */ +#define SLI4_MQ_DB_REG 0x0140 +#define SLI4_IF6_MQ_DB_REG 0x0160 +enum sli4_mq_e { + SLI4_MQ_ID_MASK = 0xffff, + + SLI4_MQ_NUM_SHIFT = 16, + SLI4_MQ_NUM_MASK = 0x3fff0000, +}; + +static inline u32 +sli_format_mq_db_data(u16 id) { + u32 reg; + + reg = id & SLI4_MQ_ID_MASK; + reg |= (1 << SLI4_MQ_NUM_SHIFT) & SLI4_MQ_NUM_MASK; + + return reg; +} + +/* RQ_DOORBELL - RQ Doorbell Register */ +#define SLI4_RQ_DB_REG 0x0a0 +#define SLI4_IF6_RQ_DB_REG 0x0080 +enum sli4_rq_e { + SLI4_RQ_DB_ID_MASK = 0xffff, + + SLI4_RQ_DB_NUM_SHIFT = 16, + SLI4_RQ_DB_NUM_MASK = 0x3fff0000, +}; + +static inline u32 +sli_format_rq_db_data(u16 id) { + u32 reg; + + reg = id & SLI4_RQ_DB_ID_MASK; + reg |= (1 << SLI4_RQ_DB_NUM_SHIFT) & SLI4_RQ_DB_NUM_MASK; + + return reg; +} + +/* WQ_DOORBELL - WQ Doorbell Register */ +#define SLI4_IO_WQ_DB_REG 0x040 +#define SLI4_IF6_WQ_DB_REG 0x040 +enum sli4_wq_e { + SLI4_WQ_ID_MASK = 0xffff, + + SLI4_WQ_IDX_SHIFT = 16, + SLI4_WQ_IDX_MASK = 0xff0000, + + SLI4_WQ_NUM_SHIFT = 24, + SLI4_WQ_NUM_MASK = 0x0ff00000, +}; + +static inline u32 +sli_format_wq_db_data(u16 id) { + u32 reg; + + reg = id & SLI4_WQ_ID_MASK; + reg |= (1 << SLI4_WQ_NUM_SHIFT) & SLI4_WQ_NUM_MASK; + + return reg; +} + +/* SLIPORT_STATUS - SLI Port Status Register */ +#define SLI4_PORT_STATUS_REGOFF 0x0404 +enum sli4_port_status { + SLI4_PORT_STATUS_FDP = 1u << 21, + SLI4_PORT_STATUS_RDY = 1u << 23, + SLI4_PORT_STATUS_RN = 1u << 24, + SLI4_PORT_STATUS_DIP = 1u << 25, + SLI4_PORT_STATUS_OTI = 1u << 29, + SLI4_PORT_STATUS_ERR = 1u << 31, +}; + +#define SLI4_PHYDEV_CTRL_REG 0x0414 +#define SLI4_PHYDEV_CTRL_FRST (1 << 1) +#define SLI4_PHYDEV_CTRL_DD (1 << 2) + +/* Register name enums */ +enum sli4_regname_en { + SLI4_REG_BMBX, + SLI4_REG_EQ_DOORBELL, + SLI4_REG_CQ_DOORBELL, + SLI4_REG_RQ_DOORBELL, + SLI4_REG_IO_WQ_DOORBELL, + SLI4_REG_MQ_DOORBELL, + SLI4_REG_PHYSDEV_CONTROL, + SLI4_REG_PORT_CONTROL, + SLI4_REG_PORT_ERROR1, + SLI4_REG_PORT_ERROR2, + SLI4_REG_PORT_SEMAPHORE, + SLI4_REG_PORT_STATUS, + SLI4_REG_UNKWOWN /* must be last */ +}; + +struct sli4_reg { + u32 rset; + u32 off; +}; + +struct sli4_dmaaddr { + __le32 low; + __le32 high; +}; + +/* + * a 3-word Buffer Descriptor Entry with + * address 1st 2 words, length last word + */ +struct sli4_bufptr { + struct sli4_dmaaddr addr; + __le32 length; +}; + +/* Buffer Descriptor Entry (BDE) */ +enum sli4_bde_e { + SLI4_BDE_LEN_MASK = 0x00ffffff, + SLI4_BDE_TYPE_MASK = 0xff000000, +}; + +struct sli4_bde { + __le32 bde_type_buflen; + union { + struct sli4_dmaaddr data; + struct { + __le32 offset; + __le32 rsvd2; + } imm; + struct sli4_dmaaddr blp; + } u; +}; + +/* Buffer Descriptors */ +enum sli4_bde_type { + SLI4_BDE_TYPE_SHIFT = 24, + SLI4_BDE_TYPE_64 = 0x00, /* Generic 64-bit data */ + SLI4_BDE_TYPE_IMM = 0x01, /* Immediate data */ + SLI4_BDE_TYPE_BLP = 0x40, /* Buffer List Pointer */ +}; + +#define SLI4_BDE_TYPE_VAL(type) \ + (SLI4_BDE_TYPE_##type << SLI4_BDE_TYPE_SHIFT) + +/* Scatter-Gather Entry (SGE) */ +#define SLI4_SGE_MAX_RESERVED 3 + +enum sli4_sge_type { + /* DW2 */ + SLI4_SGE_DATA_OFFSET_MASK = 0x07ffffff, + /*DW2W1*/ + SLI4_SGE_TYPE_SHIFT = 27, + SLI4_SGE_TYPE_MASK = 0x78000000, + /*SGE Types*/ + SLI4_SGE_TYPE_DATA = 0x00, + SLI4_SGE_TYPE_DIF = 0x04, /* Data Integrity Field */ + SLI4_SGE_TYPE_LSP = 0x05, /* List Segment Pointer */ + SLI4_SGE_TYPE_PEDIF = 0x06, /* Post Encryption Engine DIF */ + SLI4_SGE_TYPE_PESEED = 0x07, /* Post Encryption DIF Seed */ + SLI4_SGE_TYPE_DISEED = 0x08, /* DIF Seed */ + SLI4_SGE_TYPE_ENC = 0x09, /* Encryption */ + SLI4_SGE_TYPE_ATM = 0x0a, /* DIF Application Tag Mask */ + SLI4_SGE_TYPE_SKIP = 0x0c, /* SKIP */ + + SLI4_SGE_LAST = 1u << 31, +}; + +struct sli4_sge { + __le32 buffer_address_high; + __le32 buffer_address_low; + __le32 dw2_flags; + __le32 buffer_length; +}; + +/* T10 DIF Scatter-Gather Entry (SGE) */ +struct sli4_dif_sge { + __le32 buffer_address_high; + __le32 buffer_address_low; + __le32 dw2_flags; + __le32 rsvd12; +}; + +/* Data Integrity Seed (DISEED) SGE */ +enum sli4_diseed_sge_flags { + /* DW2W1 */ + SLI4_DISEED_SGE_HS = 1 << 2, + SLI4_DISEED_SGE_WS = 1 << 3, + SLI4_DISEED_SGE_IC = 1 << 4, + SLI4_DISEED_SGE_ICS = 1 << 5, + SLI4_DISEED_SGE_ATRT = 1 << 6, + SLI4_DISEED_SGE_AT = 1 << 7, + SLI4_DISEED_SGE_FAT = 1 << 8, + SLI4_DISEED_SGE_NA = 1 << 9, + SLI4_DISEED_SGE_HI = 1 << 10, + + /* DW3W1 */ + SLI4_DISEED_SGE_BS_MASK = 0x0007, + SLI4_DISEED_SGE_AI = 1 << 3, + SLI4_DISEED_SGE_ME = 1 << 4, + SLI4_DISEED_SGE_RE = 1 << 5, + SLI4_DISEED_SGE_CE = 1 << 6, + SLI4_DISEED_SGE_NR = 1 << 7, + + SLI4_DISEED_SGE_OP_RX_SHIFT = 8, + SLI4_DISEED_SGE_OP_RX_MASK = 0x0f00, + SLI4_DISEED_SGE_OP_TX_SHIFT = 12, + SLI4_DISEED_SGE_OP_TX_MASK = 0xf000, +}; + +/* Opcode values */ +enum sli4_diseed_sge_opcodes { + SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CRC, + SLI4_DISEED_SGE_OP_IN_CRC_OUT_NODIF, + SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CSUM, + SLI4_DISEED_SGE_OP_IN_CSUM_OUT_NODIF, + SLI4_DISEED_SGE_OP_IN_CRC_OUT_CRC, + SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CSUM, + SLI4_DISEED_SGE_OP_IN_CRC_OUT_CSUM, + SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CRC, + SLI4_DISEED_SGE_OP_IN_RAW_OUT_RAW, +}; + +#define SLI4_DISEED_SGE_OP_RX_VALUE(stype) \ + (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_RX_SHIFT) +#define SLI4_DISEED_SGE_OP_TX_VALUE(stype) \ + (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_TX_SHIFT) + +struct sli4_diseed_sge { + __le32 ref_tag_cmp; + __le32 ref_tag_repl; + __le16 app_tag_repl; + __le16 dw2w1_flags; + __le16 app_tag_cmp; + __le16 dw3w1_flags; +}; + +/* List Segment Pointer Scatter-Gather Entry (SGE) */ +#define SLI4_LSP_SGE_SEGLEN 0x00ffffff + +struct sli4_lsp_sge { + __le32 buffer_address_high; + __le32 buffer_address_low; + __le32 dw2_flags; + __le32 dw3_seglen; +}; + +enum sli4_eqe_e { + SLI4_EQE_VALID = 1, + SLI4_EQE_MJCODE = 0xe, + SLI4_EQE_MNCODE = 0xfff0, +}; + +struct sli4_eqe { + __le16 dw0w0_flags; + __le16 resource_id; +}; + +#define SLI4_MAJOR_CODE_STANDARD 0 +#define SLI4_MAJOR_CODE_SENTINEL 1 + +/* Sentinel EQE indicating the EQ is full */ +#define SLI4_EQE_STATUS_EQ_FULL 2 + +enum sli4_mcqe_e { + SLI4_MCQE_CONSUMED = 1u << 27, + SLI4_MCQE_COMPLETED = 1u << 28, + SLI4_MCQE_AE = 1u << 30, + SLI4_MCQE_VALID = 1u << 31, +}; + +/* Entry was consumed but not completed */ +#define SLI4_MCQE_STATUS_NOT_COMPLETED -2 + +struct sli4_mcqe { + __le16 completion_status; + __le16 extended_status; + __le32 mqe_tag_low; + __le32 mqe_tag_high; + __le32 dw3_flags; +}; + +enum sli4_acqe_e { + SLI4_ACQE_AE = 1 << 6, /* async event - this is an ACQE */ + SLI4_ACQE_VAL = 1 << 7, /* valid - contents of CQE are valid */ +}; + +struct sli4_acqe { + __le32 event_data[3]; + u8 rsvd12; + u8 event_code; + u8 event_type; + u8 ae_val; +}; + +enum sli4_acqe_event_code { + SLI4_ACQE_EVENT_CODE_LINK_STATE = 0x01, + SLI4_ACQE_EVENT_CODE_FIP = 0x02, + SLI4_ACQE_EVENT_CODE_DCBX = 0x03, + SLI4_ACQE_EVENT_CODE_ISCSI = 0x04, + SLI4_ACQE_EVENT_CODE_GRP_5 = 0x05, + SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT = 0x10, + SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT = 0x11, + SLI4_ACQE_EVENT_CODE_VF_EVENT = 0x12, + SLI4_ACQE_EVENT_CODE_MR_EVENT = 0x13, +}; + +enum sli4_qtype { + SLI4_QTYPE_EQ, + SLI4_QTYPE_CQ, + SLI4_QTYPE_MQ, + SLI4_QTYPE_WQ, + SLI4_QTYPE_RQ, + SLI4_QTYPE_MAX, /* must be last */ +}; + +#define SLI4_USER_MQ_COUNT 1 +#define SLI4_MAX_CQ_SET_COUNT 16 +#define SLI4_MAX_RQ_SET_COUNT 16 + +enum sli4_qentry { + SLI4_QENTRY_ASYNC, + SLI4_QENTRY_MQ, + SLI4_QENTRY_RQ, + SLI4_QENTRY_WQ, + SLI4_QENTRY_WQ_RELEASE, + SLI4_QENTRY_OPT_WRITE_CMD, + SLI4_QENTRY_OPT_WRITE_DATA, + SLI4_QENTRY_XABT, + SLI4_QENTRY_MAX /* must be last */ +}; + +enum sli4_queue_flags { + SLI4_QUEUE_FLAG_MQ = 1 << 0, /* CQ has MQ/Async completion */ + SLI4_QUEUE_FLAG_HDR = 1 << 1, /* RQ for packet headers */ + SLI4_QUEUE_FLAG_RQBATCH = 1 << 2, /* RQ index increment by 8 */ +}; + +/* Generic Command Request header */ +enum sli4_cmd_version { + CMD_V0, + CMD_V1, + CMD_V2, +}; + +struct sli4_rqst_hdr { + u8 opcode; + u8 subsystem; + __le16 rsvd2; + __le32 timeout; + __le32 request_length; + __le32 dw3_version; +}; + +/* Generic Command Response header */ +struct sli4_rsp_hdr { + u8 opcode; + u8 subsystem; + __le16 rsvd2; + u8 status; + u8 additional_status; + __le16 rsvd6; + __le32 response_length; + __le32 actual_response_length; +}; + +#define SLI4_QUEUE_RQ_BATCH 8 + +#define SZ_DMAADDR sizeof(struct sli4_dmaaddr) +#define SLI4_RQST_CMDSZ(stype) sizeof(struct sli4_rqst_##stype) + +#define SLI4_RQST_PYLD_LEN(stype) \ + cpu_to_le32(sizeof(struct sli4_rqst_##stype) - \ + sizeof(struct sli4_rqst_hdr)) + +#define SLI4_RQST_PYLD_LEN_VAR(stype, varpyld) \ + cpu_to_le32((sizeof(struct sli4_rqst_##stype) + \ + varpyld) - sizeof(struct sli4_rqst_hdr)) + +#define SLI4_CFG_PYLD_LENGTH(stype) \ + max(sizeof(struct sli4_rqst_##stype), \ + sizeof(struct sli4_rsp_##stype)) + +enum sli4_create_cqv2_e { + /* DW5_flags values*/ + SLI4_CREATE_CQV2_CLSWM_MASK = 0x00003000, + SLI4_CREATE_CQV2_NODELAY = 0x00004000, + SLI4_CREATE_CQV2_AUTOVALID = 0x00008000, + SLI4_CREATE_CQV2_CQECNT_MASK = 0x18000000, + SLI4_CREATE_CQV2_VALID = 0x20000000, + SLI4_CREATE_CQV2_EVT = 0x80000000, + /* DW6W1_flags values*/ + SLI4_CREATE_CQV2_ARM = 0x8000, +}; + +struct sli4_rqst_cmn_create_cq_v2 { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + u8 page_size; + u8 rsvd19; + __le32 dw5_flags; + __le16 eq_id; + __le16 dw6w1_arm; + __le16 cqe_count; + __le16 rsvd30; + __le32 rsvd32; + struct sli4_dmaaddr page_phys_addr[]; +}; + +enum sli4_create_cqset_e { + /* DW5_flags values*/ + SLI4_CREATE_CQSETV0_CLSWM_MASK = 0x00003000, + SLI4_CREATE_CQSETV0_NODELAY = 0x00004000, + SLI4_CREATE_CQSETV0_AUTOVALID = 0x00008000, + SLI4_CREATE_CQSETV0_CQECNT_MASK = 0x18000000, + SLI4_CREATE_CQSETV0_VALID = 0x20000000, + SLI4_CREATE_CQSETV0_EVT = 0x80000000, + /* DW5W1_flags values */ + SLI4_CREATE_CQSETV0_CQE_COUNT = 0x7fff, + SLI4_CREATE_CQSETV0_ARM = 0x8000, +}; + +struct sli4_rqst_cmn_create_cq_set_v0 { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + u8 page_size; + u8 rsvd19; + __le32 dw5_flags; + __le16 num_cq_req; + __le16 dw6w1_flags; + __le16 eq_id[16]; + struct sli4_dmaaddr page_phys_addr[]; +}; + +/* CQE count */ +enum sli4_cq_cnt { + SLI4_CQ_CNT_256, + SLI4_CQ_CNT_512, + SLI4_CQ_CNT_1024, + SLI4_CQ_CNT_LARGE, +}; + +#define SLI4_CQ_CNT_SHIFT 27 +#define SLI4_CQ_CNT_VAL(type) (SLI4_CQ_CNT_##type << SLI4_CQ_CNT_SHIFT) + +#define SLI4_CQE_BYTES (4 * sizeof(u32)) + +#define SLI4_CREATE_CQV2_MAX_PAGES 8 + +/* Generic Common Create EQ/CQ/MQ/WQ/RQ Queue completion */ +struct sli4_rsp_cmn_create_queue { + struct sli4_rsp_hdr hdr; + __le16 q_id; + u8 rsvd18; + u8 ulp; + __le32 db_offset; + __le16 db_rs; + __le16 db_fmt; +}; + +struct sli4_rsp_cmn_create_queue_set { + struct sli4_rsp_hdr hdr; + __le16 q_id; + __le16 num_q_allocated; +}; + +/* Common Destroy Queue */ +struct sli4_rqst_cmn_destroy_q { + struct sli4_rqst_hdr hdr; + __le16 q_id; + __le16 rsvd; +}; + +struct sli4_rsp_cmn_destroy_q { + struct sli4_rsp_hdr hdr; +}; + +/* Modify the delay multiplier for EQs */ +struct sli4_eqdelay_rec { + __le32 eq_id; + __le32 phase; + __le32 delay_multiplier; +}; + +struct sli4_rqst_cmn_modify_eq_delay { + struct sli4_rqst_hdr hdr; + __le32 num_eq; + struct sli4_eqdelay_rec eq_delay_record[8]; +}; + +struct sli4_rsp_cmn_modify_eq_delay { + struct sli4_rsp_hdr hdr; +}; + +enum sli4_create_cq_e { + /* DW5 */ + SLI4_CREATE_EQ_AUTOVALID = 1u << 28, + SLI4_CREATE_EQ_VALID = 1u << 29, + SLI4_CREATE_EQ_EQESZ = 1u << 31, + /* DW6 */ + SLI4_CREATE_EQ_COUNT = 7 << 26, + SLI4_CREATE_EQ_ARM = 1u << 31, + /* DW7 */ + SLI4_CREATE_EQ_DELAYMULTI_SHIFT = 13, + SLI4_CREATE_EQ_DELAYMULTI_MASK = 0x007fe000, + SLI4_CREATE_EQ_DELAYMULTI = 0x00040000, +}; + +struct sli4_rqst_cmn_create_eq { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + __le16 rsvd18; + __le32 dw5_flags; + __le32 dw6_flags; + __le32 dw7_delaymulti; + __le32 rsvd32; + struct sli4_dmaaddr page_address[8]; +}; + +struct sli4_rsp_cmn_create_eq { + struct sli4_rsp_cmn_create_queue q_rsp; +}; + +/* EQ count */ +enum sli4_eq_cnt { + SLI4_EQ_CNT_256, + SLI4_EQ_CNT_512, + SLI4_EQ_CNT_1024, + SLI4_EQ_CNT_2048, + SLI4_EQ_CNT_4096 = 3, +}; + +#define SLI4_EQ_CNT_SHIFT 26 +#define SLI4_EQ_CNT_VAL(type) (SLI4_EQ_CNT_##type << SLI4_EQ_CNT_SHIFT) + +#define SLI4_EQE_SIZE_4 0 +#define SLI4_EQE_SIZE_16 1 + +/* Create a Mailbox Queue; accommodate v0 and v1 forms. */ +enum sli4_create_mq_flags { + /* DW6W1 */ + SLI4_CREATE_MQEXT_RINGSIZE = 0xf, + SLI4_CREATE_MQEXT_CQID_SHIFT = 6, + SLI4_CREATE_MQEXT_CQIDV0_MASK = 0xffc0, + /* DW7 */ + SLI4_CREATE_MQEXT_VAL = 1u << 31, + /* DW8 */ + SLI4_CREATE_MQEXT_ACQV = 1u << 0, + SLI4_CREATE_MQEXT_ASYNC_CQIDV0 = 0x7fe, +}; + +struct sli4_rqst_cmn_create_mq_ext { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + __le16 cq_id_v1; + __le32 async_event_bitmap; + __le16 async_cq_id_v1; + __le16 dw6w1_flags; + __le32 dw7_val; + __le32 dw8_flags; + __le32 rsvd36; + struct sli4_dmaaddr page_phys_addr[]; +}; + +struct sli4_rsp_cmn_create_mq_ext { + struct sli4_rsp_cmn_create_queue q_rsp; +}; + +enum sli4_mqe_size { + SLI4_MQE_SIZE_16 = 0x05, + SLI4_MQE_SIZE_32, + SLI4_MQE_SIZE_64, + SLI4_MQE_SIZE_128, +}; + +enum sli4_async_evt { + SLI4_ASYNC_EVT_LINK_STATE = 1 << 1, + SLI4_ASYNC_EVT_FIP = 1 << 2, + SLI4_ASYNC_EVT_GRP5 = 1 << 5, + SLI4_ASYNC_EVT_FC = 1 << 16, + SLI4_ASYNC_EVT_SLI_PORT = 1 << 17, +}; + +#define SLI4_ASYNC_EVT_FC_ALL \ + (SLI4_ASYNC_EVT_LINK_STATE | \ + SLI4_ASYNC_EVT_FIP | \ + SLI4_ASYNC_EVT_GRP5 | \ + SLI4_ASYNC_EVT_FC | \ + SLI4_ASYNC_EVT_SLI_PORT) + +/* Create a Completion Queue. */ +struct sli4_rqst_cmn_create_cq_v0 { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + __le16 rsvd18; + __le32 dw5_flags; + __le32 dw6_flags; + __le32 rsvd28; + __le32 rsvd32; + struct sli4_dmaaddr page_phys_addr[]; +}; + +enum sli4_create_rq_e { + SLI4_RQ_CREATE_DUA = 0x1, + SLI4_RQ_CREATE_BQU = 0x2, + + SLI4_RQE_SIZE = 8, + SLI4_RQE_SIZE_8 = 0x2, + SLI4_RQE_SIZE_16 = 0x3, + SLI4_RQE_SIZE_32 = 0x4, + SLI4_RQE_SIZE_64 = 0x5, + SLI4_RQE_SIZE_128 = 0x6, + + SLI4_RQ_PAGE_SIZE_4096 = 0x1, + SLI4_RQ_PAGE_SIZE_8192 = 0x2, + SLI4_RQ_PAGE_SIZE_16384 = 0x4, + SLI4_RQ_PAGE_SIZE_32768 = 0x8, + SLI4_RQ_PAGE_SIZE_64536 = 0x10, + + SLI4_RQ_CREATE_V0_MAX_PAGES = 8, + SLI4_RQ_CREATE_V0_MIN_BUF_SIZE = 128, + SLI4_RQ_CREATE_V0_MAX_BUF_SIZE = 2048, +}; + +struct sli4_rqst_rq_create { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + u8 dua_bqu_byte; + u8 ulp; + __le16 rsvd16; + u8 rqe_count_byte; + u8 rsvd19; + __le32 rsvd20; + __le16 buffer_size; + __le16 cq_id; + __le32 rsvd28; + struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V0_MAX_PAGES]; +}; + +struct sli4_rsp_rq_create { + struct sli4_rsp_cmn_create_queue rsp; +}; + +enum sli4_create_rqv1_e { + SLI4_RQ_CREATE_V1_DNB = 0x80, + SLI4_RQ_CREATE_V1_MAX_PAGES = 8, + SLI4_RQ_CREATE_V1_MIN_BUF_SIZE = 64, + SLI4_RQ_CREATE_V1_MAX_BUF_SIZE = 2048, +}; + +struct sli4_rqst_rq_create_v1 { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + u8 rsvd14; + u8 dim_dfd_dnb; + u8 page_size; + u8 rqe_size_byte; + __le16 rqe_count; + __le32 rsvd20; + __le16 rsvd24; + __le16 cq_id; + __le32 buffer_size; + struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V1_MAX_PAGES]; +}; + +struct sli4_rsp_rq_create_v1 { + struct sli4_rsp_cmn_create_queue rsp; +}; + +#define SLI4_RQCREATEV2_DNB 0x80 + +struct sli4_rqst_rq_create_v2 { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + u8 rq_count; + u8 dim_dfd_dnb; + u8 page_size; + u8 rqe_size_byte; + __le16 rqe_count; + __le16 hdr_buffer_size; + __le16 payload_buffer_size; + __le16 base_cq_id; + __le16 rsvd26; + __le32 rsvd42; + struct sli4_dmaaddr page_phys_addr[]; +}; + +struct sli4_rsp_rq_create_v2 { + struct sli4_rsp_cmn_create_queue rsp; +}; + +#define SLI4_CQE_CODE_OFFSET 14 + +enum sli4_cqe_code { + SLI4_CQE_CODE_WORK_REQUEST_COMPLETION = 0x01, + SLI4_CQE_CODE_RELEASE_WQE, + SLI4_CQE_CODE_RSVD, + SLI4_CQE_CODE_RQ_ASYNC, + SLI4_CQE_CODE_XRI_ABORTED, + SLI4_CQE_CODE_RQ_COALESCING, + SLI4_CQE_CODE_RQ_CONSUMPTION, + SLI4_CQE_CODE_MEASUREMENT_REPORTING, + SLI4_CQE_CODE_RQ_ASYNC_V1, + SLI4_CQE_CODE_RQ_COALESCING_V1, + SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD, + SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA, +}; + +#define SLI4_WQ_CREATE_MAX_PAGES 8 + +struct sli4_rqst_wq_create { + struct sli4_rqst_hdr hdr; + __le16 num_pages; + __le16 cq_id; + u8 page_size; + u8 wqe_size_byte; + __le16 wqe_count; + __le32 rsvd; + struct sli4_dmaaddr page_phys_addr[SLI4_WQ_CREATE_MAX_PAGES]; +}; + +struct sli4_rsp_wq_create { + struct sli4_rsp_cmn_create_queue rsp; +}; + +enum sli4_link_attention_flags { + SLI4_LNK_ATTN_TYPE_LINK_UP = 0x01, + SLI4_LNK_ATTN_TYPE_LINK_DOWN = 0x02, + SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA = 0x03, + + SLI4_LNK_ATTN_P2P = 0x01, + SLI4_LNK_ATTN_FC_AL = 0x02, + SLI4_LNK_ATTN_INTERNAL_LOOPBACK = 0x03, + SLI4_LNK_ATTN_SERDES_LOOPBACK = 0x04, +}; + +struct sli4_link_attention { + u8 link_number; + u8 attn_type; + u8 topology; + u8 port_speed; + u8 port_fault; + u8 shared_link_status; + __le16 logical_link_speed; + __le32 event_tag; + u8 rsvd12; + u8 event_code; + u8 event_type; + u8 flags; +}; + +enum sli4_link_event_type { + SLI4_EVENT_LINK_ATTENTION = 0x01, + SLI4_EVENT_SHARED_LINK_ATTENTION = 0x02, +}; + +enum sli4_wcqe_flags { + SLI4_WCQE_XB = 0x10, + SLI4_WCQE_QX = 0x80, +}; + +struct sli4_fc_wcqe { + u8 hw_status; + u8 status; + __le16 request_tag; + __le32 wqe_specific_1; + __le32 wqe_specific_2; + u8 rsvd12; + u8 qx_byte; + u8 code; + u8 flags; +}; + +/* FC WQ consumed CQ queue entry */ +struct sli4_fc_wqec { + __le32 rsvd0; + __le32 rsvd1; + __le16 wqe_index; + __le16 wq_id; + __le16 rsvd12; + u8 code; + u8 vld_byte; +}; + +/* FC Completion Status Codes. */ +enum sli4_wcqe_status { + SLI4_FC_WCQE_STATUS_SUCCESS, + SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, + SLI4_FC_WCQE_STATUS_REMOTE_STOP, + SLI4_FC_WCQE_STATUS_LOCAL_REJECT, + SLI4_FC_WCQE_STATUS_NPORT_RJT, + SLI4_FC_WCQE_STATUS_FABRIC_RJT, + SLI4_FC_WCQE_STATUS_NPORT_BSY, + SLI4_FC_WCQE_STATUS_FABRIC_BSY, + SLI4_FC_WCQE_STATUS_RSVD, + SLI4_FC_WCQE_STATUS_LS_RJT, + SLI4_FC_WCQE_STATUS_RX_BUF_OVERRUN, + SLI4_FC_WCQE_STATUS_CMD_REJECT, + SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, + SLI4_FC_WCQE_STATUS_RSVD1, + SLI4_FC_WCQE_STATUS_ELS_CMPLT_NO_AUTOREG, + SLI4_FC_WCQE_STATUS_RSVD2, + SLI4_FC_WCQE_STATUS_RQ_SUCCESS, + SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, + SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, + SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, + SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, + SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, + SLI4_FC_WCQE_STATUS_DI_ERROR, + SLI4_FC_WCQE_STATUS_BA_RJT, + SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, + SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, + SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, + SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, + + /* driver generated status codes */ + SLI4_FC_WCQE_STATUS_DISPATCH_ERROR = 0xfd, + SLI4_FC_WCQE_STATUS_SHUTDOWN = 0xfe, + SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT = 0xff, +}; + +/* DI_ERROR Extended Status */ +enum sli4_fc_di_error_status { + SLI4_FC_DI_ERROR_GE = 1 << 0, + SLI4_FC_DI_ERROR_AE = 1 << 1, + SLI4_FC_DI_ERROR_RE = 1 << 2, + SLI4_FC_DI_ERROR_TDPV = 1 << 3, + SLI4_FC_DI_ERROR_UDB = 1 << 4, + SLI4_FC_DI_ERROR_EDIR = 1 << 5, +}; + +/* WQE DIF field contents */ +enum sli4_dif_fields { + SLI4_DIF_DISABLED, + SLI4_DIF_PASS_THROUGH, + SLI4_DIF_STRIP, + SLI4_DIF_INSERT, +}; + +/* Work Queue Entry (WQE) types */ +enum sli4_wqe_types { + SLI4_WQE_ABORT = 0x0f, + SLI4_WQE_ELS_REQUEST64 = 0x8a, + SLI4_WQE_FCP_IBIDIR64 = 0xac, + SLI4_WQE_FCP_IREAD64 = 0x9a, + SLI4_WQE_FCP_IWRITE64 = 0x98, + SLI4_WQE_FCP_ICMND64 = 0x9c, + SLI4_WQE_FCP_TRECEIVE64 = 0xa1, + SLI4_WQE_FCP_CONT_TRECEIVE64 = 0xe5, + SLI4_WQE_FCP_TRSP64 = 0xa3, + SLI4_WQE_FCP_TSEND64 = 0x9f, + SLI4_WQE_GEN_REQUEST64 = 0xc2, + SLI4_WQE_SEND_FRAME = 0xe1, + SLI4_WQE_XMIT_BCAST64 = 0x84, + SLI4_WQE_XMIT_BLS_RSP = 0x97, + SLI4_WQE_ELS_RSP64 = 0x95, + SLI4_WQE_XMIT_SEQUENCE64 = 0x82, + SLI4_WQE_REQUEUE_XRI = 0x93, +}; + +/* WQE command types */ +enum sli4_wqe_cmds { + SLI4_CMD_FCP_IREAD64_WQE = 0x00, + SLI4_CMD_FCP_ICMND64_WQE = 0x00, + SLI4_CMD_FCP_IWRITE64_WQE = 0x01, + SLI4_CMD_FCP_TRECEIVE64_WQE = 0x02, + SLI4_CMD_FCP_TRSP64_WQE = 0x03, + SLI4_CMD_FCP_TSEND64_WQE = 0x07, + SLI4_CMD_GEN_REQUEST64_WQE = 0x08, + SLI4_CMD_XMIT_BCAST64_WQE = 0x08, + SLI4_CMD_XMIT_BLS_RSP64_WQE = 0x08, + SLI4_CMD_ABORT_WQE = 0x08, + SLI4_CMD_XMIT_SEQUENCE64_WQE = 0x08, + SLI4_CMD_REQUEUE_XRI_WQE = 0x0a, + SLI4_CMD_SEND_FRAME_WQE = 0x0a, +}; + +#define SLI4_WQE_SIZE 0x05 +#define SLI4_WQE_EXT_SIZE 0x06 + +#define SLI4_WQE_BYTES (16 * sizeof(u32)) +#define SLI4_WQE_EXT_BYTES (32 * sizeof(u32)) + +/* Mask for ccp (CS_CTL) */ +#define SLI4_MASK_CCP 0xfe + +/* Generic WQE */ +enum sli4_gen_wqe_flags { + SLI4_GEN_WQE_EBDECNT = 0xf, + SLI4_GEN_WQE_LEN_LOC = 0x3 << 7, + SLI4_GEN_WQE_QOSD = 1 << 9, + SLI4_GEN_WQE_XBL = 1 << 11, + SLI4_GEN_WQE_HLM = 1 << 12, + SLI4_GEN_WQE_IOD = 1 << 13, + SLI4_GEN_WQE_DBDE = 1 << 14, + SLI4_GEN_WQE_WQES = 1 << 15, + + SLI4_GEN_WQE_PRI = 0x7, + SLI4_GEN_WQE_PV = 1 << 3, + SLI4_GEN_WQE_EAT = 1 << 4, + SLI4_GEN_WQE_XC = 1 << 5, + SLI4_GEN_WQE_CCPE = 1 << 7, + + SLI4_GEN_WQE_CMDTYPE = 0xf, + SLI4_GEN_WQE_WQEC = 1 << 7, +}; + +struct sli4_generic_wqe { + __le32 cmd_spec0_5[6]; + __le16 xri_tag; + __le16 context_tag; + u8 ct_byte; + u8 command; + u8 class_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 rsvd34; + __le16 dw10w0_flags; + u8 eat_xc_ccpe; + u8 ccp; + u8 cmdtype_wqec_byte; + u8 rsvd41; + __le16 cq_id; +}; + +/* WQE used to abort exchanges. */ +enum sli4_abort_wqe_flags { + SLI4_ABRT_WQE_IR = 0x02, + + SLI4_ABRT_WQE_EBDECNT = 0xf, + SLI4_ABRT_WQE_LEN_LOC = 0x3 << 7, + SLI4_ABRT_WQE_QOSD = 1 << 9, + SLI4_ABRT_WQE_XBL = 1 << 11, + SLI4_ABRT_WQE_IOD = 1 << 13, + SLI4_ABRT_WQE_DBDE = 1 << 14, + SLI4_ABRT_WQE_WQES = 1 << 15, + + SLI4_ABRT_WQE_PRI = 0x7, + SLI4_ABRT_WQE_PV = 1 << 3, + SLI4_ABRT_WQE_EAT = 1 << 4, + SLI4_ABRT_WQE_XC = 1 << 5, + SLI4_ABRT_WQE_CCPE = 1 << 7, + + SLI4_ABRT_WQE_CMDTYPE = 0xf, + SLI4_ABRT_WQE_WQEC = 1 << 7, +}; + +struct sli4_abort_wqe { + __le32 rsvd0; + __le32 rsvd4; + __le32 ext_t_tag; + u8 ia_ir_byte; + u8 criteria; + __le16 rsvd10; + __le32 ext_t_mask; + __le32 t_mask; + __le16 xri_tag; + __le16 context_tag; + u8 ct_byte; + u8 command; + u8 class_byte; + u8 timer; + __le32 t_tag; + __le16 request_tag; + __le16 rsvd34; + __le16 dw10w0_flags; + u8 eat_xc_ccpe; + u8 ccp; + u8 cmdtype_wqec_byte; + u8 rsvd41; + __le16 cq_id; +}; + +enum sli4_abort_criteria { + SLI4_ABORT_CRITERIA_XRI_TAG = 0x01, + SLI4_ABORT_CRITERIA_ABORT_TAG, + SLI4_ABORT_CRITERIA_REQUEST_TAG, + SLI4_ABORT_CRITERIA_EXT_ABORT_TAG, +}; + +enum sli4_abort_type { + SLI4_ABORT_XRI, + SLI4_ABORT_ABORT_ID, + SLI4_ABORT_REQUEST_ID, + SLI4_ABORT_MAX, /* must be last */ +}; + +/* WQE used to create an ELS request. */ +enum sli4_els_req_wqe_flags { + SLI4_REQ_WQE_QOSD = 0x2, + SLI4_REQ_WQE_DBDE = 0x40, + SLI4_REQ_WQE_XBL = 0x8, + SLI4_REQ_WQE_XC = 0x20, + SLI4_REQ_WQE_IOD = 0x20, + SLI4_REQ_WQE_HLM = 0x10, + SLI4_REQ_WQE_CCPE = 0x80, + SLI4_REQ_WQE_EAT = 0x10, + SLI4_REQ_WQE_WQES = 0x80, + SLI4_REQ_WQE_PU_SHFT = 4, + SLI4_REQ_WQE_CT_SHFT = 2, + SLI4_REQ_WQE_CT = 0xc, + SLI4_REQ_WQE_ELSID_SHFT = 4, + SLI4_REQ_WQE_SP_SHFT = 24, + SLI4_REQ_WQE_LEN_LOC_BIT1 = 0x80, + SLI4_REQ_WQE_LEN_LOC_BIT2 = 0x1, +}; + +struct sli4_els_request64_wqe { + struct sli4_bde els_request_payload; + __le32 els_request_payload_length; + __le32 sid_sp_dword; + __le32 remote_id_dword; + __le16 xri_tag; + __le16 context_tag; + u8 ct_byte; + u8 command; + u8 class_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 temporary_rpi; + u8 len_loc1_byte; + u8 qosd_xbl_hlm_iod_dbde_wqes; + u8 eat_xc_ccpe; + u8 ccp; + u8 cmdtype_elsid_byte; + u8 rsvd41; + __le16 cq_id; + struct sli4_bde els_response_payload_bde; + __le32 max_response_payload_length; +}; + +/* WQE used to create an FCP initiator no data command. */ +enum sli4_icmd_wqe_flags { + SLI4_ICMD_WQE_DBDE = 0x40, + SLI4_ICMD_WQE_XBL = 0x8, + SLI4_ICMD_WQE_XC = 0x20, + SLI4_ICMD_WQE_IOD = 0x20, + SLI4_ICMD_WQE_HLM = 0x10, + SLI4_ICMD_WQE_CCPE = 0x80, + SLI4_ICMD_WQE_EAT = 0x10, + SLI4_ICMD_WQE_APPID = 0x10, + SLI4_ICMD_WQE_WQES = 0x80, + SLI4_ICMD_WQE_PU_SHFT = 4, + SLI4_ICMD_WQE_CT_SHFT = 2, + SLI4_ICMD_WQE_BS_SHFT = 4, + SLI4_ICMD_WQE_LEN_LOC_BIT1 = 0x80, + SLI4_ICMD_WQE_LEN_LOC_BIT2 = 0x1, +}; + +struct sli4_fcp_icmnd64_wqe { + struct sli4_bde bde; + __le16 payload_offset_length; + __le16 fcp_cmd_buffer_length; + __le32 rsvd12; + __le32 remote_n_port_id_dword; + __le16 xri_tag; + __le16 context_tag; + u8 dif_ct_bs_byte; + u8 command; + u8 class_pu_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 rsvd34; + u8 len_loc1_byte; + u8 qosd_xbl_hlm_iod_dbde_wqes; + u8 eat_xc_ccpe; + u8 ccp; + u8 cmd_type_byte; + u8 rsvd41; + __le16 cq_id; + __le32 rsvd44; + __le32 rsvd48; + __le32 rsvd52; + __le32 rsvd56; +}; + +/* WQE used to create an FCP initiator read. */ +enum sli4_ir_wqe_flags { + SLI4_IR_WQE_DBDE = 0x40, + SLI4_IR_WQE_XBL = 0x8, + SLI4_IR_WQE_XC = 0x20, + SLI4_IR_WQE_IOD = 0x20, + SLI4_IR_WQE_HLM = 0x10, + SLI4_IR_WQE_CCPE = 0x80, + SLI4_IR_WQE_EAT = 0x10, + SLI4_IR_WQE_APPID = 0x10, + SLI4_IR_WQE_WQES = 0x80, + SLI4_IR_WQE_PU_SHFT = 4, + SLI4_IR_WQE_CT_SHFT = 2, + SLI4_IR_WQE_BS_SHFT = 4, + SLI4_IR_WQE_LEN_LOC_BIT1 = 0x80, + SLI4_IR_WQE_LEN_LOC_BIT2 = 0x1, +}; + +struct sli4_fcp_iread64_wqe { + struct sli4_bde bde; + __le16 payload_offset_length; + __le16 fcp_cmd_buffer_length; + + __le32 total_transfer_length; + + __le32 remote_n_port_id_dword; + + __le16 xri_tag; + __le16 context_tag; + + u8 dif_ct_bs_byte; + u8 command; + u8 class_pu_byte; + u8 timer; + + __le32 abort_tag; + + __le16 request_tag; + __le16 rsvd34; + + u8 len_loc1_byte; + u8 qosd_xbl_hlm_iod_dbde_wqes; + u8 eat_xc_ccpe; + u8 ccp; + + u8 cmd_type_byte; + u8 rsvd41; + __le16 cq_id; + + __le32 rsvd44; + struct sli4_bde first_data_bde; +}; + +/* WQE used to create an FCP initiator write. */ +enum sli4_iwr_wqe_flags { + SLI4_IWR_WQE_DBDE = 0x40, + SLI4_IWR_WQE_XBL = 0x8, + SLI4_IWR_WQE_XC = 0x20, + SLI4_IWR_WQE_IOD = 0x20, + SLI4_IWR_WQE_HLM = 0x10, + SLI4_IWR_WQE_DNRX = 0x10, + SLI4_IWR_WQE_CCPE = 0x80, + SLI4_IWR_WQE_EAT = 0x10, + SLI4_IWR_WQE_APPID = 0x10, + SLI4_IWR_WQE_WQES = 0x80, + SLI4_IWR_WQE_PU_SHFT = 4, + SLI4_IWR_WQE_CT_SHFT = 2, + SLI4_IWR_WQE_BS_SHFT = 4, + SLI4_IWR_WQE_LEN_LOC_BIT1 = 0x80, + SLI4_IWR_WQE_LEN_LOC_BIT2 = 0x1, +}; + +struct sli4_fcp_iwrite64_wqe { + struct sli4_bde bde; + __le16 payload_offset_length; + __le16 fcp_cmd_buffer_length; + __le16 total_transfer_length; + __le16 initial_transfer_length; + __le16 xri_tag; + __le16 context_tag; + u8 dif_ct_bs_byte; + u8 command; + u8 class_pu_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 rsvd34; + u8 len_loc1_byte; + u8 qosd_xbl_hlm_iod_dbde_wqes; + u8 eat_xc_ccpe; + u8 ccp; + u8 cmd_type_byte; + u8 rsvd41; + __le16 cq_id; + __le32 remote_n_port_id_dword; + struct sli4_bde first_data_bde; +}; + +struct sli4_fcp_128byte_wqe { + u32 dw[32]; +}; + +/* WQE used to create an FCP target receive */ +enum sli4_trcv_wqe_flags { + SLI4_TRCV_WQE_DBDE = 0x40, + SLI4_TRCV_WQE_XBL = 0x8, + SLI4_TRCV_WQE_AR = 0x8, + SLI4_TRCV_WQE_XC = 0x20, + SLI4_TRCV_WQE_IOD = 0x20, + SLI4_TRCV_WQE_HLM = 0x10, + SLI4_TRCV_WQE_DNRX = 0x10, + SLI4_TRCV_WQE_CCPE = 0x80, + SLI4_TRCV_WQE_EAT = 0x10, + SLI4_TRCV_WQE_APPID = 0x10, + SLI4_TRCV_WQE_WQES = 0x80, + SLI4_TRCV_WQE_PU_SHFT = 4, + SLI4_TRCV_WQE_CT_SHFT = 2, + SLI4_TRCV_WQE_BS_SHFT = 4, + SLI4_TRCV_WQE_LEN_LOC_BIT2 = 0x1, +}; + +struct sli4_fcp_treceive64_wqe { + struct sli4_bde bde; + __le32 payload_offset_length; + __le32 relative_offset; + union { + __le16 sec_xri_tag; + __le16 rsvd; + __le32 dword; + } dword5; + __le16 xri_tag; + __le16 context_tag; + u8 dif_ct_bs_byte; + u8 command; + u8 class_ar_pu_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 remote_xid; + u8 lloc1_appid; + u8 qosd_xbl_hlm_iod_dbde_wqes; + u8 eat_xc_ccpe; + u8 ccp; + u8 cmd_type_byte; + u8 rsvd41; + __le16 cq_id; + __le32 fcp_data_receive_length; + struct sli4_bde first_data_bde; +}; + +/* WQE used to create an FCP target response */ +enum sli4_trsp_wqe_flags { + SLI4_TRSP_WQE_AG = 0x8, + SLI4_TRSP_WQE_DBDE = 0x40, + SLI4_TRSP_WQE_XBL = 0x8, + SLI4_TRSP_WQE_XC = 0x20, + SLI4_TRSP_WQE_HLM = 0x10, + SLI4_TRSP_WQE_DNRX = 0x10, + SLI4_TRSP_WQE_CCPE = 0x80, + SLI4_TRSP_WQE_EAT = 0x10, + SLI4_TRSP_WQE_APPID = 0x10, + SLI4_TRSP_WQE_WQES = 0x80, +}; + +struct sli4_fcp_trsp64_wqe { + struct sli4_bde bde; + __le32 fcp_response_length; + __le32 rsvd12; + __le32 dword5; + __le16 xri_tag; + __le16 rpi; + u8 ct_dnrx_byte; + u8 command; + u8 class_ag_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 remote_xid; + u8 lloc1_appid; + u8 qosd_xbl_hlm_dbde_wqes; + u8 eat_xc_ccpe; + u8 ccp; + u8 cmd_type_byte; + u8 rsvd41; + __le16 cq_id; + __le32 rsvd44; + __le32 rsvd48; + __le32 rsvd52; + __le32 rsvd56; +}; + +/* WQE used to create an FCP target send (DATA IN). */ +enum sli4_tsend_wqe_flags { + SLI4_TSEND_WQE_XBL = 0x8, + SLI4_TSEND_WQE_DBDE = 0x40, + SLI4_TSEND_WQE_IOD = 0x20, + SLI4_TSEND_WQE_QOSD = 0x2, + SLI4_TSEND_WQE_HLM = 0x10, + SLI4_TSEND_WQE_PU_SHFT = 4, + SLI4_TSEND_WQE_AR = 0x8, + SLI4_TSEND_CT_SHFT = 2, + SLI4_TSEND_BS_SHFT = 4, + SLI4_TSEND_LEN_LOC_BIT2 = 0x1, + SLI4_TSEND_CCPE = 0x80, + SLI4_TSEND_APPID_VALID = 0x20, + SLI4_TSEND_WQES = 0x80, + SLI4_TSEND_XC = 0x20, + SLI4_TSEND_EAT = 0x10, +}; + +struct sli4_fcp_tsend64_wqe { + struct sli4_bde bde; + __le32 payload_offset_length; + __le32 relative_offset; + __le32 dword5; + __le16 xri_tag; + __le16 rpi; + u8 ct_byte; + u8 command; + u8 class_pu_ar_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 remote_xid; + u8 dw10byte0; + u8 ll_qd_xbl_hlm_iod_dbde; + u8 dw10byte2; + u8 ccp; + u8 cmd_type_byte; + u8 rsvd45; + __le16 cq_id; + __le32 fcp_data_transmit_length; + struct sli4_bde first_data_bde; +}; + +/* WQE used to create a general request. */ +enum sli4_gen_req_wqe_flags { + SLI4_GEN_REQ64_WQE_XBL = 0x8, + SLI4_GEN_REQ64_WQE_DBDE = 0x40, + SLI4_GEN_REQ64_WQE_IOD = 0x20, + SLI4_GEN_REQ64_WQE_QOSD = 0x2, + SLI4_GEN_REQ64_WQE_HLM = 0x10, + SLI4_GEN_REQ64_CT_SHFT = 2, +}; + +struct sli4_gen_request64_wqe { + struct sli4_bde bde; + __le32 request_payload_length; + __le32 relative_offset; + u8 rsvd17; + u8 df_ctl; + u8 type; + u8 r_ctl; + __le16 xri_tag; + __le16 context_tag; + u8 ct_byte; + u8 command; + u8 class_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 rsvd34; + u8 dw10flags0; + u8 dw10flags1; + u8 dw10flags2; + u8 ccp; + u8 cmd_type_byte; + u8 rsvd41; + __le16 cq_id; + __le32 remote_n_port_id_dword; + __le32 rsvd48; + __le32 rsvd52; + __le32 max_response_payload_length; +}; + +/* WQE used to create a send frame request */ +enum sli4_sf_wqe_flags { + SLI4_SF_WQE_DBDE = 0x40, + SLI4_SF_PU = 0x30, + SLI4_SF_CT = 0xc, + SLI4_SF_QOSD = 0x2, + SLI4_SF_LEN_LOC_BIT1 = 0x80, + SLI4_SF_LEN_LOC_BIT2 = 0x1, + SLI4_SF_XC = 0x20, + SLI4_SF_XBL = 0x8, +}; + +struct sli4_send_frame_wqe { + struct sli4_bde bde; + __le32 frame_length; + __le32 fc_header_0_1[2]; + __le16 xri_tag; + __le16 context_tag; + u8 ct_byte; + u8 command; + u8 dw7flags0; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + u8 eof; + u8 sof; + u8 dw10flags0; + u8 dw10flags1; + u8 dw10flags2; + u8 ccp; + u8 cmd_type_byte; + u8 rsvd41; + __le16 cq_id; + __le32 fc_header_2_5[4]; +}; + +/* WQE used to create a transmit sequence */ +enum sli4_seq_wqe_flags { + SLI4_SEQ_WQE_DBDE = 0x4000, + SLI4_SEQ_WQE_XBL = 0x800, + SLI4_SEQ_WQE_SI = 0x4, + SLI4_SEQ_WQE_FT = 0x8, + SLI4_SEQ_WQE_XO = 0x40, + SLI4_SEQ_WQE_LS = 0x80, + SLI4_SEQ_WQE_DIF = 0x3, + SLI4_SEQ_WQE_BS = 0x70, + SLI4_SEQ_WQE_PU = 0x30, + SLI4_SEQ_WQE_HLM = 0x1000, + SLI4_SEQ_WQE_IOD_SHIFT = 13, + SLI4_SEQ_WQE_CT_SHIFT = 2, + SLI4_SEQ_WQE_LEN_LOC_SHIFT = 7, +}; + +struct sli4_xmit_sequence64_wqe { + struct sli4_bde bde; + __le32 remote_n_port_id_dword; + __le32 relative_offset; + u8 dw5flags0; + u8 df_ctl; + u8 type; + u8 r_ctl; + __le16 xri_tag; + __le16 context_tag; + u8 dw7flags0; + u8 command; + u8 dw7flags1; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 remote_xid; + __le16 dw10w0; + u8 dw10flags0; + u8 ccp; + u8 cmd_type_wqec_byte; + u8 rsvd45; + __le16 cq_id; + __le32 sequence_payload_len; + __le32 rsvd48; + __le32 rsvd52; + __le32 rsvd56; +}; + +/* + * WQE used unblock the specified XRI and to release + * it to the SLI Port's free pool. + */ +enum sli4_requeue_wqe_flags { + SLI4_REQU_XRI_WQE_XC = 0x20, + SLI4_REQU_XRI_WQE_QOSD = 0x2, +}; + +struct sli4_requeue_xri_wqe { + __le32 rsvd0; + __le32 rsvd4; + __le32 rsvd8; + __le32 rsvd12; + __le32 rsvd16; + __le32 rsvd20; + __le16 xri_tag; + __le16 context_tag; + u8 ct_byte; + u8 command; + u8 class_byte; + u8 timer; + __le32 rsvd32; + __le16 request_tag; + __le16 rsvd34; + __le16 flags0; + __le16 flags1; + __le16 flags2; + u8 ccp; + u8 cmd_type_wqec_byte; + u8 rsvd42; + __le16 cq_id; + __le32 rsvd44; + __le32 rsvd48; + __le32 rsvd52; + __le32 rsvd56; +}; + +/* WQE used to create a BLS response */ +enum sli4_bls_rsp_wqe_flags { + SLI4_BLS_RSP_RID = 0xffffff, + SLI4_BLS_RSP_WQE_AR = 0x40000000, + SLI4_BLS_RSP_WQE_CT_SHFT = 2, + SLI4_BLS_RSP_WQE_QOSD = 0x2, + SLI4_BLS_RSP_WQE_HLM = 0x10, +}; + +struct sli4_xmit_bls_rsp_wqe { + __le32 payload_word0; + __le16 rx_id; + __le16 ox_id; + __le16 high_seq_cnt; + __le16 low_seq_cnt; + __le32 rsvd12; + __le32 local_n_port_id_dword; + __le32 remote_id_dword; + __le16 xri_tag; + __le16 context_tag; + u8 dw8flags0; + u8 command; + u8 dw8flags1; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 rsvd38; + u8 dw11flags0; + u8 dw11flags1; + u8 dw11flags2; + u8 ccp; + u8 dw12flags0; + u8 rsvd45; + __le16 cq_id; + __le16 temporary_rpi; + u8 rsvd50; + u8 rsvd51; + __le32 rsvd52; + __le32 rsvd56; + __le32 rsvd60; +}; + +enum sli_bls_type { + SLI4_SLI_BLS_ACC, + SLI4_SLI_BLS_RJT, + SLI4_SLI_BLS_MAX +}; + +struct sli_bls_payload { + enum sli_bls_type type; + __le16 ox_id; + __le16 rx_id; + union { + struct { + u8 seq_id_validity; + u8 seq_id_last; + u8 rsvd2; + u8 rsvd3; + u16 ox_id; + u16 rx_id; + __le16 low_seq_cnt; + __le16 high_seq_cnt; + } acc; + struct { + u8 vendor_unique; + u8 reason_explanation; + u8 reason_code; + u8 rsvd3; + } rjt; + } u; +}; + +/* WQE used to create an ELS response */ + +enum sli4_els_rsp_flags { + SLI4_ELS_SID = 0xffffff, + SLI4_ELS_RID = 0xffffff, + SLI4_ELS_DBDE = 0x40, + SLI4_ELS_XBL = 0x8, + SLI4_ELS_IOD = 0x20, + SLI4_ELS_QOSD = 0x2, + SLI4_ELS_XC = 0x20, + SLI4_ELS_CT_OFFSET = 0X2, + SLI4_ELS_SP = 0X1000000, + SLI4_ELS_HLM = 0X10, +}; + +struct sli4_xmit_els_rsp64_wqe { + struct sli4_bde els_response_payload; + __le32 els_response_payload_length; + __le32 sid_dw; + __le32 rid_dw; + __le16 xri_tag; + __le16 context_tag; + u8 ct_byte; + u8 command; + u8 class_byte; + u8 timer; + __le32 abort_tag; + __le16 request_tag; + __le16 ox_id; + u8 flags1; + u8 flags2; + u8 flags3; + u8 flags4; + u8 cmd_type_wqec; + u8 rsvd34; + __le16 cq_id; + __le16 temporary_rpi; + __le16 rsvd38; + u32 rsvd40; + u32 rsvd44; + u32 rsvd48; +}; + +/* Local Reject Reason Codes */ +enum sli4_fc_local_rej_codes { + SLI4_FC_LOCAL_REJECT_UNKNOWN, + SLI4_FC_LOCAL_REJECT_MISSING_CONTINUE, + SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT, + SLI4_FC_LOCAL_REJECT_INTERNAL_ERROR, + SLI4_FC_LOCAL_REJECT_INVALID_RPI, + SLI4_FC_LOCAL_REJECT_NO_XRI, + SLI4_FC_LOCAL_REJECT_ILLEGAL_COMMAND, + SLI4_FC_LOCAL_REJECT_XCHG_DROPPED, + SLI4_FC_LOCAL_REJECT_ILLEGAL_FIELD, + SLI4_FC_LOCAL_REJECT_RPI_SUSPENDED, + SLI4_FC_LOCAL_REJECT_RSVD, + SLI4_FC_LOCAL_REJECT_RSVD1, + SLI4_FC_LOCAL_REJECT_NO_ABORT_MATCH, + SLI4_FC_LOCAL_REJECT_TX_DMA_FAILED, + SLI4_FC_LOCAL_REJECT_RX_DMA_FAILED, + SLI4_FC_LOCAL_REJECT_ILLEGAL_FRAME, + SLI4_FC_LOCAL_REJECT_RSVD2, + SLI4_FC_LOCAL_REJECT_NO_RESOURCES, //0x11 + SLI4_FC_LOCAL_REJECT_FCP_CONF_FAILURE, + SLI4_FC_LOCAL_REJECT_ILLEGAL_LENGTH, + SLI4_FC_LOCAL_REJECT_UNSUPPORTED_FEATURE, + SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS, + SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED, + SLI4_FC_LOCAL_REJECT_RCV_BUFFER_TIMEOUT, + SLI4_FC_LOCAL_REJECT_LOOP_OPEN_FAILURE, + SLI4_FC_LOCAL_REJECT_RSVD3, + SLI4_FC_LOCAL_REJECT_LINK_DOWN, + SLI4_FC_LOCAL_REJECT_CORRUPTED_DATA, + SLI4_FC_LOCAL_REJECT_CORRUPTED_RPI, + SLI4_FC_LOCAL_REJECT_OUTOFORDER_DATA, + SLI4_FC_LOCAL_REJECT_OUTOFORDER_ACK, + SLI4_FC_LOCAL_REJECT_DUP_FRAME, + SLI4_FC_LOCAL_REJECT_LINK_CONTROL_FRAME, //0x20 + SLI4_FC_LOCAL_REJECT_BAD_HOST_ADDRESS, + SLI4_FC_LOCAL_REJECT_RSVD4, + SLI4_FC_LOCAL_REJECT_MISSING_HDR_BUFFER, + SLI4_FC_LOCAL_REJECT_MSEQ_CHAIN_CORRUPTED, + SLI4_FC_LOCAL_REJECT_ABORTMULT_REQUESTED, + SLI4_FC_LOCAL_REJECT_BUFFER_SHORTAGE = 0x28, + SLI4_FC_LOCAL_REJECT_RCV_XRIBUF_WAITING, + SLI4_FC_LOCAL_REJECT_INVALID_VPI = 0x2e, + SLI4_FC_LOCAL_REJECT_NO_FPORT_DETECTED, + SLI4_FC_LOCAL_REJECT_MISSING_XRIBUF, + SLI4_FC_LOCAL_REJECT_RSVD5, + SLI4_FC_LOCAL_REJECT_INVALID_XRI, + SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET = 0x40, + SLI4_FC_LOCAL_REJECT_MISSING_RELOFFSET, + SLI4_FC_LOCAL_REJECT_INSUFF_BUFFERSPACE, + SLI4_FC_LOCAL_REJECT_MISSING_SI, + SLI4_FC_LOCAL_REJECT_MISSING_ES, + SLI4_FC_LOCAL_REJECT_INCOMPLETE_XFER, + SLI4_FC_LOCAL_REJECT_SLER_FAILURE, + SLI4_FC_LOCAL_REJECT_SLER_CMD_RCV_FAILURE, + SLI4_FC_LOCAL_REJECT_SLER_REC_RJT_ERR, + SLI4_FC_LOCAL_REJECT_SLER_REC_SRR_RETRY_ERR, + SLI4_FC_LOCAL_REJECT_SLER_SRR_RJT_ERR, + SLI4_FC_LOCAL_REJECT_RSVD6, + SLI4_FC_LOCAL_REJECT_SLER_RRQ_RJT_ERR, + SLI4_FC_LOCAL_REJECT_SLER_RRQ_RETRY_ERR, + SLI4_FC_LOCAL_REJECT_SLER_ABTS_ERR, +}; + +enum sli4_async_rcqe_flags { + SLI4_RACQE_RQ_EL_INDX = 0xfff, + SLI4_RACQE_FCFI = 0x3f, + SLI4_RACQE_HDPL = 0x3f, + SLI4_RACQE_RQ_ID = 0xffc0, +}; + +struct sli4_fc_async_rcqe { + u8 rsvd0; + u8 status; + __le16 rq_elmt_indx_word; + __le32 rsvd4; + __le16 fcfi_rq_id_word; + __le16 data_placement_length; + u8 sof_byte; + u8 eof_byte; + u8 code; + u8 hdpl_byte; +}; + +struct sli4_fc_async_rcqe_v1 { + u8 rsvd0; + u8 status; + __le16 rq_elmt_indx_word; + u8 fcfi_byte; + u8 rsvd5; + __le16 rsvd6; + __le16 rq_id; + __le16 data_placement_length; + u8 sof_byte; + u8 eof_byte; + u8 code; + u8 hdpl_byte; +}; + +enum sli4_fc_async_rq_status { + SLI4_FC_ASYNC_RQ_SUCCESS = 0x10, + SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED, + SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED, + SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC, + SLI4_FC_ASYNC_RQ_DMA_FAILURE, +}; + +#define SLI4_RCQE_RQ_EL_INDX 0xfff + +struct sli4_fc_coalescing_rcqe { + u8 rsvd0; + u8 status; + __le16 rq_elmt_indx_word; + __le32 rsvd4; + __le16 rq_id; + __le16 seq_placement_length; + __le16 rsvd14; + u8 code; + u8 vld_byte; +}; + +#define SLI4_FC_COALESCE_RQ_SUCCESS 0x10 +#define SLI4_FC_COALESCE_RQ_INSUFF_XRI_NEEDED 0x18 + +enum sli4_optimized_write_cmd_cqe_flags { + SLI4_OCQE_RQ_EL_INDX = 0x7f, /* DW0 bits 16:30 */ + SLI4_OCQE_FCFI = 0x3f, /* DW1 bits 0:6 */ + SLI4_OCQE_OOX = 1 << 6, /* DW1 bit 15 */ + SLI4_OCQE_AGXR = 1 << 7, /* DW1 bit 16 */ + SLI4_OCQE_HDPL = 0x3f, /* DW3 bits 24:29*/ +}; + +struct sli4_fc_optimized_write_cmd_cqe { + u8 rsvd0; + u8 status; + __le16 w1; + u8 flags0; + u8 flags1; + __le16 xri; + __le16 rq_id; + __le16 data_placement_length; + __le16 rpi; + u8 code; + u8 hdpl_vld; +}; + +#define SLI4_OCQE_XB 0x10 + +struct sli4_fc_optimized_write_data_cqe { + u8 hw_status; + u8 status; + __le16 xri; + __le32 total_data_placed; + __le32 extended_status; + __le16 rsvd12; + u8 code; + u8 flags; +}; + +struct sli4_fc_xri_aborted_cqe { + u8 rsvd0; + u8 status; + __le16 rsvd2; + __le32 extended_status; + __le16 xri; + __le16 remote_xid; + __le16 rsvd12; + u8 code; + u8 flags; +}; + +enum sli4_generic_ctx { + SLI4_GENERIC_CONTEXT_RPI, + SLI4_GENERIC_CONTEXT_VPI, + SLI4_GENERIC_CONTEXT_VFI, + SLI4_GENERIC_CONTEXT_FCFI, +}; + +#define SLI4_GENERIC_CLASS_CLASS_2 0x1 +#define SLI4_GENERIC_CLASS_CLASS_3 0x2 + +#define SLI4_ELS_REQUEST64_DIR_WRITE 0x0 +#define SLI4_ELS_REQUEST64_DIR_READ 0x1 + +enum sli4_els_request { + SLI4_ELS_REQUEST64_OTHER, + SLI4_ELS_REQUEST64_LOGO, + SLI4_ELS_REQUEST64_FDISC, + SLI4_ELS_REQUEST64_FLOGIN, + SLI4_ELS_REQUEST64_PLOGI, +}; + +enum sli4_els_cmd_type { + SLI4_ELS_REQUEST64_CMD_GEN = 0x08, + SLI4_ELS_REQUEST64_CMD_NON_FABRIC = 0x0c, + SLI4_ELS_REQUEST64_CMD_FABRIC = 0x0d, +}; + +#define SLI_PAGE_SIZE SZ_4K + +#define SLI4_BMBX_TIMEOUT_MSEC 30000 +#define SLI4_FW_READY_TIMEOUT_MSEC 30000 + +#define SLI4_BMBX_DELAY_US 1000 /* 1 ms */ +#define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */ + +static inline u32 +sli_page_count(size_t bytes, u32 page_size) +{ + if (!page_size) + return 0; + + return (bytes + (page_size - 1)) >> __ffs(page_size); +} + +/************************************************************************* + * SLI-4 mailbox command formats and definitions + */ + +struct sli4_mbox_command_header { + u8 resvd0; + u8 command; + __le16 status; /* Port writes to indicate success/fail */ +}; + +enum sli4_mbx_cmd_value { + SLI4_MBX_CMD_CONFIG_LINK = 0x07, + SLI4_MBX_CMD_DUMP = 0x17, + SLI4_MBX_CMD_DOWN_LINK = 0x06, + SLI4_MBX_CMD_INIT_LINK = 0x05, + SLI4_MBX_CMD_INIT_VFI = 0xa3, + SLI4_MBX_CMD_INIT_VPI = 0xa4, + SLI4_MBX_CMD_POST_XRI = 0xa7, + SLI4_MBX_CMD_RELEASE_XRI = 0xac, + SLI4_MBX_CMD_READ_CONFIG = 0x0b, + SLI4_MBX_CMD_READ_STATUS = 0x0e, + SLI4_MBX_CMD_READ_NVPARMS = 0x02, + SLI4_MBX_CMD_READ_REV = 0x11, + SLI4_MBX_CMD_READ_LNK_STAT = 0x12, + SLI4_MBX_CMD_READ_SPARM64 = 0x8d, + SLI4_MBX_CMD_READ_TOPOLOGY = 0x95, + SLI4_MBX_CMD_REG_FCFI = 0xa0, + SLI4_MBX_CMD_REG_FCFI_MRQ = 0xaf, + SLI4_MBX_CMD_REG_RPI = 0x93, + SLI4_MBX_CMD_REG_RX_RQ = 0xa6, + SLI4_MBX_CMD_REG_VFI = 0x9f, + SLI4_MBX_CMD_REG_VPI = 0x96, + SLI4_MBX_CMD_RQST_FEATURES = 0x9d, + SLI4_MBX_CMD_SLI_CONFIG = 0x9b, + SLI4_MBX_CMD_UNREG_FCFI = 0xa2, + SLI4_MBX_CMD_UNREG_RPI = 0x14, + SLI4_MBX_CMD_UNREG_VFI = 0xa1, + SLI4_MBX_CMD_UNREG_VPI = 0x97, + SLI4_MBX_CMD_WRITE_NVPARMS = 0x03, + SLI4_MBX_CMD_CFG_AUTO_XFER_RDY = 0xad, +}; + +enum sli4_mbx_status { + SLI4_MBX_STATUS_SUCCESS = 0x0000, + SLI4_MBX_STATUS_FAILURE = 0x0001, + SLI4_MBX_STATUS_RPI_NOT_REG = 0x1400, +}; + +/* CONFIG_LINK - configure link-oriented parameters, + * such as default N_Port_ID address and various timers + */ +enum sli4_cmd_config_link_flags { + SLI4_CFG_LINK_BBSCN = 0xf00, + SLI4_CFG_LINK_CSCN = 0x1000, +}; + +struct sli4_cmd_config_link { + struct sli4_mbox_command_header hdr; + u8 maxbbc; + u8 rsvd5; + u8 rsvd6; + u8 rsvd7; + u8 alpa; + __le16 n_port_id; + u8 rsvd11; + __le32 rsvd12; + __le32 e_d_tov; + __le32 lp_tov; + __le32 r_a_tov; + __le32 r_t_tov; + __le32 al_tov; + __le32 rsvd36; + __le32 bbscn_dword; +}; + +#define SLI4_DUMP4_TYPE 0xf + +#define SLI4_WKI_TAG_SAT_TEM 0x1040 + +struct sli4_cmd_dump4 { + struct sli4_mbox_command_header hdr; + __le32 type_dword; + __le16 wki_selection; + __le16 rsvd10; + __le32 rsvd12; + __le32 returned_byte_cnt; + __le32 resp_data[59]; +}; + +/* INIT_LINK - initialize the link for a FC port */ +enum sli4_init_link_flags { + SLI4_INIT_LINK_F_LOOPBACK = 1 << 0, + + SLI4_INIT_LINK_F_P2P_ONLY = 1 << 1, + SLI4_INIT_LINK_F_FCAL_ONLY = 2 << 1, + SLI4_INIT_LINK_F_FCAL_FAIL_OVER = 0 << 1, + SLI4_INIT_LINK_F_P2P_FAIL_OVER = 1 << 1, + + SLI4_INIT_LINK_F_UNFAIR = 1 << 6, + SLI4_INIT_LINK_F_NO_LIRP = 1 << 7, + SLI4_INIT_LINK_F_LOOP_VALID_CHK = 1 << 8, + SLI4_INIT_LINK_F_NO_LISA = 1 << 9, + SLI4_INIT_LINK_F_FAIL_OVER = 1 << 10, + SLI4_INIT_LINK_F_FIXED_SPEED = 1 << 11, + SLI4_INIT_LINK_F_PICK_HI_ALPA = 1 << 15, + +}; + +enum sli4_fc_link_speed { + SLI4_LINK_SPEED_1G = 1, + SLI4_LINK_SPEED_2G, + SLI4_LINK_SPEED_AUTO_1_2, + SLI4_LINK_SPEED_4G, + SLI4_LINK_SPEED_AUTO_4_1, + SLI4_LINK_SPEED_AUTO_4_2, + SLI4_LINK_SPEED_AUTO_4_2_1, + SLI4_LINK_SPEED_8G, + SLI4_LINK_SPEED_AUTO_8_1, + SLI4_LINK_SPEED_AUTO_8_2, + SLI4_LINK_SPEED_AUTO_8_2_1, + SLI4_LINK_SPEED_AUTO_8_4, + SLI4_LINK_SPEED_AUTO_8_4_1, + SLI4_LINK_SPEED_AUTO_8_4_2, + SLI4_LINK_SPEED_10G, + SLI4_LINK_SPEED_16G, + SLI4_LINK_SPEED_AUTO_16_8_4, + SLI4_LINK_SPEED_AUTO_16_8, + SLI4_LINK_SPEED_32G, + SLI4_LINK_SPEED_AUTO_32_16_8, + SLI4_LINK_SPEED_AUTO_32_16, + SLI4_LINK_SPEED_64G, + SLI4_LINK_SPEED_AUTO_64_32_16, + SLI4_LINK_SPEED_AUTO_64_32, + SLI4_LINK_SPEED_128G, + SLI4_LINK_SPEED_AUTO_128_64_32, + SLI4_LINK_SPEED_AUTO_128_64, +}; + +struct sli4_cmd_init_link { + struct sli4_mbox_command_header hdr; + __le32 sel_reset_al_pa_dword; + __le32 flags0; + __le32 link_speed_sel_code; +}; + +/* INIT_VFI - initialize the VFI resource */ +enum sli4_init_vfi_flags { + SLI4_INIT_VFI_FLAG_VP = 0x1000, + SLI4_INIT_VFI_FLAG_VF = 0x2000, + SLI4_INIT_VFI_FLAG_VT = 0x4000, + SLI4_INIT_VFI_FLAG_VR = 0x8000, + + SLI4_INIT_VFI_VFID = 0x1fff, + SLI4_INIT_VFI_PRI = 0xe000, + + SLI4_INIT_VFI_HOP_COUNT = 0xff000000, +}; + +struct sli4_cmd_init_vfi { + struct sli4_mbox_command_header hdr; + __le16 vfi; + __le16 flags0_word; + __le16 fcfi; + __le16 vpi; + __le32 vf_id_pri_dword; + __le32 hop_cnt_dword; +}; + +/* INIT_VPI - initialize the VPI resource */ +struct sli4_cmd_init_vpi { + struct sli4_mbox_command_header hdr; + __le16 vpi; + __le16 vfi; +}; + +/* POST_XRI - post XRI resources to the SLI Port */ +enum sli4_post_xri_flags { + SLI4_POST_XRI_COUNT = 0xfff, + SLI4_POST_XRI_FLAG_ENX = 0x1000, + SLI4_POST_XRI_FLAG_DL = 0x2000, + SLI4_POST_XRI_FLAG_DI = 0x4000, + SLI4_POST_XRI_FLAG_VAL = 0x8000, +}; + +struct sli4_cmd_post_xri { + struct sli4_mbox_command_header hdr; + __le16 xri_base; + __le16 xri_count_flags; +}; + +/* RELEASE_XRI - Release XRI resources from the SLI Port */ +enum sli4_release_xri_flags { + SLI4_RELEASE_XRI_REL_XRI_CNT = 0x1f, + SLI4_RELEASE_XRI_COUNT = 0x1f, +}; + +struct sli4_cmd_release_xri { + struct sli4_mbox_command_header hdr; + __le16 rel_xri_count_word; + __le16 xri_count_word; + + struct { + __le16 xri_tag0; + __le16 xri_tag1; + } xri_tbl[62]; +}; + +/* READ_CONFIG - read SLI port configuration parameters */ +struct sli4_cmd_read_config { + struct sli4_mbox_command_header hdr; +}; + +enum sli4_read_cfg_resp_flags { + SLI4_READ_CFG_RESP_RESOURCE_EXT = 0x80000000, /* DW1 */ + SLI4_READ_CFG_RESP_TOPOLOGY = 0xff000000, /* DW2 */ +}; + +enum sli4_read_cfg_topo { + SLI4_READ_CFG_TOPO_FC = 0x1, /* FC topology unknown */ + SLI4_READ_CFG_TOPO_NON_FC_AL = 0x2, /* FC point-to-point or fabric */ + SLI4_READ_CFG_TOPO_FC_AL = 0x3, /* FC-AL topology */ +}; + +/* Link Module Type */ +enum sli4_read_cfg_lmt { + SLI4_LINK_MODULE_TYPE_1GB = 0x0004, + SLI4_LINK_MODULE_TYPE_2GB = 0x0008, + SLI4_LINK_MODULE_TYPE_4GB = 0x0040, + SLI4_LINK_MODULE_TYPE_8GB = 0x0080, + SLI4_LINK_MODULE_TYPE_16GB = 0x0200, + SLI4_LINK_MODULE_TYPE_32GB = 0x0400, + SLI4_LINK_MODULE_TYPE_64GB = 0x0800, + SLI4_LINK_MODULE_TYPE_128GB = 0x1000, +}; + +struct sli4_rsp_read_config { + struct sli4_mbox_command_header hdr; + __le32 ext_dword; + __le32 topology_dword; + __le32 resvd8; + __le16 e_d_tov; + __le16 resvd14; + __le32 resvd16; + __le16 r_a_tov; + __le16 resvd22; + __le32 resvd24; + __le32 resvd28; + __le16 lmt; + __le16 resvd34; + __le32 resvd36; + __le32 resvd40; + __le16 xri_base; + __le16 xri_count; + __le16 rpi_base; + __le16 rpi_count; + __le16 vpi_base; + __le16 vpi_count; + __le16 vfi_base; + __le16 vfi_count; + __le16 resvd60; + __le16 fcfi_count; + __le16 rq_count; + __le16 eq_count; + __le16 wq_count; + __le16 cq_count; + __le32 pad[45]; +}; + +/* READ_NVPARMS - read SLI port configuration parameters */ +enum sli4_read_nvparms_flags { + SLI4_READ_NVPARAMS_HARD_ALPA = 0xff, + SLI4_READ_NVPARAMS_PREFERRED_D_ID = 0xffffff00, +}; + +struct sli4_cmd_read_nvparms { + struct sli4_mbox_command_header hdr; + __le32 resvd0; + __le32 resvd4; + __le32 resvd8; + __le32 resvd12; + u8 wwpn[8]; + u8 wwnn[8]; + __le32 hard_alpa_d_id; +}; + +/* WRITE_NVPARMS - write SLI port configuration parameters */ +struct sli4_cmd_write_nvparms { + struct sli4_mbox_command_header hdr; + __le32 resvd0; + __le32 resvd4; + __le32 resvd8; + __le32 resvd12; + u8 wwpn[8]; + u8 wwnn[8]; + __le32 hard_alpa_d_id; +}; + +/* READ_REV - read the Port revision levels */ +enum { + SLI4_READ_REV_FLAG_SLI_LEVEL = 0xf, + SLI4_READ_REV_FLAG_FCOEM = 0x10, + SLI4_READ_REV_FLAG_CEEV = 0x60, + SLI4_READ_REV_FLAG_VPD = 0x2000, + + SLI4_READ_REV_AVAILABLE_LENGTH = 0xffffff, +}; + +struct sli4_cmd_read_rev { + struct sli4_mbox_command_header hdr; + __le16 resvd0; + __le16 flags0_word; + __le32 first_hw_rev; + __le32 second_hw_rev; + __le32 resvd12; + __le32 third_hw_rev; + u8 fc_ph_low; + u8 fc_ph_high; + u8 feature_level_low; + u8 feature_level_high; + __le32 resvd24; + __le32 first_fw_id; + u8 first_fw_name[16]; + __le32 second_fw_id; + u8 second_fw_name[16]; + __le32 rsvd18[30]; + __le32 available_length_dword; + struct sli4_dmaaddr hostbuf; + __le32 returned_vpd_length; + __le32 actual_vpd_length; +}; + +/* READ_SPARM64 - read the Port service parameters */ +#define SLI4_READ_SPARM64_WWPN_OFFSET (4 * sizeof(u32)) +#define SLI4_READ_SPARM64_WWNN_OFFSET (6 * sizeof(u32)) + +struct sli4_cmd_read_sparm64 { + struct sli4_mbox_command_header hdr; + __le32 resvd0; + __le32 resvd4; + struct sli4_bde bde_64; + __le16 vpi; + __le16 resvd22; + __le16 port_name_start; + __le16 port_name_len; + __le16 node_name_start; + __le16 node_name_len; +}; + +/* READ_TOPOLOGY - read the link event information */ +enum sli4_read_topo_e { + SLI4_READTOPO_ATTEN_TYPE = 0xff, + SLI4_READTOPO_FLAG_IL = 0x100, + SLI4_READTOPO_FLAG_PB_RECVD = 0x200, + + SLI4_READTOPO_LINKSTATE_RECV = 0x3, + SLI4_READTOPO_LINKSTATE_TRANS = 0xc, + SLI4_READTOPO_LINKSTATE_MACHINE = 0xf0, + SLI4_READTOPO_LINKSTATE_SPEED = 0xff00, + SLI4_READTOPO_LINKSTATE_TF = 0x40000000, + SLI4_READTOPO_LINKSTATE_LU = 0x80000000, + + SLI4_READTOPO_SCN_BBSCN = 0xf, + SLI4_READTOPO_SCN_CBBSCN = 0xf0, + + SLI4_READTOPO_R_T_TOV = 0x1ff, + SLI4_READTOPO_AL_TOV = 0xf000, + + SLI4_READTOPO_PB_FLAG = 0x80, + + SLI4_READTOPO_INIT_N_PORTID = 0xffffff, +}; + +#define SLI4_MIN_LOOP_MAP_BYTES 128 + +struct sli4_cmd_read_topology { + struct sli4_mbox_command_header hdr; + __le32 event_tag; + __le32 dw2_attentype; + u8 topology; + u8 lip_type; + u8 lip_al_ps; + u8 al_pa_granted; + struct sli4_bde bde_loop_map; + __le32 linkdown_state; + __le32 currlink_state; + u8 max_bbc; + u8 init_bbc; + u8 scn_flags; + u8 rsvd39; + __le16 dw10w0_al_rt_tov; + __le16 lp_tov; + u8 acquired_al_pa; + u8 pb_flags; + __le16 specified_al_pa; + __le32 dw12_init_n_port_id; +}; + +enum sli4_read_topo_link { + SLI4_READ_TOPOLOGY_LINK_UP = 0x1, + SLI4_READ_TOPOLOGY_LINK_DOWN, + SLI4_READ_TOPOLOGY_LINK_NO_ALPA, +}; + +enum sli4_read_topo { + SLI4_READ_TOPO_UNKNOWN = 0x0, + SLI4_READ_TOPO_NON_FC_AL, + SLI4_READ_TOPO_FC_AL, +}; + +enum sli4_read_topo_speed { + SLI4_READ_TOPOLOGY_SPEED_NONE = 0x00, + SLI4_READ_TOPOLOGY_SPEED_1G = 0x04, + SLI4_READ_TOPOLOGY_SPEED_2G = 0x08, + SLI4_READ_TOPOLOGY_SPEED_4G = 0x10, + SLI4_READ_TOPOLOGY_SPEED_8G = 0x20, + SLI4_READ_TOPOLOGY_SPEED_10G = 0x40, + SLI4_READ_TOPOLOGY_SPEED_16G = 0x80, + SLI4_READ_TOPOLOGY_SPEED_32G = 0x90, + SLI4_READ_TOPOLOGY_SPEED_64G = 0xa0, + SLI4_READ_TOPOLOGY_SPEED_128G = 0xb0, +}; + +/* REG_FCFI - activate a FC Forwarder */ +struct sli4_cmd_reg_fcfi_rq_cfg { + u8 r_ctl_mask; + u8 r_ctl_match; + u8 type_mask; + u8 type_match; +}; + +enum sli4_regfcfi_tag { + SLI4_REGFCFI_VLAN_TAG = 0xfff, + SLI4_REGFCFI_VLANTAG_VALID = 0x1000, +}; + +#define SLI4_CMD_REG_FCFI_NUM_RQ_CFG 4 +struct sli4_cmd_reg_fcfi { + struct sli4_mbox_command_header hdr; + __le16 fcf_index; + __le16 fcfi; + __le16 rqid1; + __le16 rqid0; + __le16 rqid3; + __le16 rqid2; + struct sli4_cmd_reg_fcfi_rq_cfg + rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; + __le32 dw8_vlan; +}; + +#define SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG 4 +#define SLI4_CMD_REG_FCFI_MRQ_MAX_NUM_RQ 32 +#define SLI4_CMD_REG_FCFI_SET_FCFI_MODE 0 +#define SLI4_CMD_REG_FCFI_SET_MRQ_MODE 1 + +enum sli4_reg_fcfi_mrq { + SLI4_REGFCFI_MRQ_VLAN_TAG = 0xfff, + SLI4_REGFCFI_MRQ_VLANTAG_VALID = 0x1000, + SLI4_REGFCFI_MRQ_MODE = 0x2000, + + SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS = 0xff, + SLI4_REGFCFI_MRQ_FILTER_BITMASK = 0xf00, + SLI4_REGFCFI_MRQ_RQ_SEL_POLICY = 0xf000, +}; + +struct sli4_cmd_reg_fcfi_mrq { + struct sli4_mbox_command_header hdr; + __le16 fcf_index; + __le16 fcfi; + __le16 rqid1; + __le16 rqid0; + __le16 rqid3; + __le16 rqid2; + struct sli4_cmd_reg_fcfi_rq_cfg + rq_cfg[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG]; + __le32 dw8_vlan; + __le32 dw9_mrqflags; +}; + +struct sli4_cmd_rq_cfg { + __le16 rq_id; + u8 r_ctl_mask; + u8 r_ctl_match; + u8 type_mask; + u8 type_match; +}; + +/* REG_RPI - register a Remote Port Indicator */ +enum sli4_reg_rpi { + SLI4_REGRPI_REMOTE_N_PORTID = 0xffffff, /* DW2 */ + SLI4_REGRPI_UPD = 0x1000000, + SLI4_REGRPI_ETOW = 0x8000000, + SLI4_REGRPI_TERP = 0x20000000, + SLI4_REGRPI_CI = 0x80000000, +}; + +struct sli4_cmd_reg_rpi { + struct sli4_mbox_command_header hdr; + __le16 rpi; + __le16 rsvd2; + __le32 dw2_rportid_flags; + struct sli4_bde bde_64; + __le16 vpi; + __le16 rsvd26; +}; + +#define SLI4_REG_RPI_BUF_LEN 0x70 + +/* REG_VFI - register a Virtual Fabric Indicator */ +enum sli_reg_vfi { + SLI4_REGVFI_VP = 0x1000, /* DW1 */ + SLI4_REGVFI_UPD = 0x2000, + + SLI4_REGVFI_LOCAL_N_PORTID = 0xffffff, /* DW10 */ +}; + +struct sli4_cmd_reg_vfi { + struct sli4_mbox_command_header hdr; + __le16 vfi; + __le16 dw0w1_flags; + __le16 fcfi; + __le16 vpi; + u8 wwpn[8]; + struct sli4_bde sparm; + __le32 e_d_tov; + __le32 r_a_tov; + __le32 dw10_lportid_flags; +}; + +/* REG_VPI - register a Virtual Port Indicator */ +enum sli4_reg_vpi { + SLI4_REGVPI_LOCAL_N_PORTID = 0xffffff, + SLI4_REGVPI_UPD = 0x1000000, +}; + +struct sli4_cmd_reg_vpi { + struct sli4_mbox_command_header hdr; + __le32 rsvd0; + __le32 dw2_lportid_flags; + u8 wwpn[8]; + __le32 rsvd12; + __le16 vpi; + __le16 vfi; +}; + +/* REQUEST_FEATURES - request / query SLI features */ +enum sli4_req_features_flags { + SLI4_REQFEAT_QRY = 0x1, /* Dw1 */ + + SLI4_REQFEAT_IAAB = 1 << 0, /* DW2 & DW3 */ + SLI4_REQFEAT_NPIV = 1 << 1, + SLI4_REQFEAT_DIF = 1 << 2, + SLI4_REQFEAT_VF = 1 << 3, + SLI4_REQFEAT_FCPI = 1 << 4, + SLI4_REQFEAT_FCPT = 1 << 5, + SLI4_REQFEAT_FCPC = 1 << 6, + SLI4_REQFEAT_RSVD = 1 << 7, + SLI4_REQFEAT_RQD = 1 << 8, + SLI4_REQFEAT_IAAR = 1 << 9, + SLI4_REQFEAT_HLM = 1 << 10, + SLI4_REQFEAT_PERFH = 1 << 11, + SLI4_REQFEAT_RXSEQ = 1 << 12, + SLI4_REQFEAT_RXRI = 1 << 13, + SLI4_REQFEAT_DCL2 = 1 << 14, + SLI4_REQFEAT_RSCO = 1 << 15, + SLI4_REQFEAT_MRQP = 1 << 16, +}; + +struct sli4_cmd_request_features { + struct sli4_mbox_command_header hdr; + __le32 dw1_qry; + __le32 cmd; + __le32 resp; +}; + +/* + * SLI_CONFIG - submit a configuration command to Port + * + * Command is either embedded as part of the payload (embed) or located + * in a separate memory buffer (mem) + */ +enum sli4_sli_config { + SLI4_SLICONF_EMB = 0x1, /* DW1 */ + SLI4_SLICONF_PMDCMD_SHIFT = 3, + SLI4_SLICONF_PMDCMD_MASK = 0xf8, + SLI4_SLICONF_PMDCMD_VAL_1 = 8, + SLI4_SLICONF_PMDCNT = 0xf8, + + SLI4_SLICONF_PMD_LEN = 0x00ffffff, +}; + +struct sli4_cmd_sli_config { + struct sli4_mbox_command_header hdr; + __le32 dw1_flags; + __le32 payload_len; + __le32 rsvd12[3]; + union { + u8 embed[58 * sizeof(u32)]; + struct sli4_bufptr mem; + } payload; +}; + +/* READ_STATUS - read tx/rx status of a particular port */ +#define SLI4_READSTATUS_CLEAR_COUNTERS 0x1 + +struct sli4_cmd_read_status { + struct sli4_mbox_command_header hdr; + __le32 dw1_flags; + __le32 rsvd4; + __le32 trans_kbyte_cnt; + __le32 recv_kbyte_cnt; + __le32 trans_frame_cnt; + __le32 recv_frame_cnt; + __le32 trans_seq_cnt; + __le32 recv_seq_cnt; + __le32 tot_exchanges_orig; + __le32 tot_exchanges_resp; + __le32 recv_p_bsy_cnt; + __le32 recv_f_bsy_cnt; + __le32 no_rq_buf_dropped_frames_cnt; + __le32 empty_rq_timeout_cnt; + __le32 no_xri_dropped_frames_cnt; + __le32 empty_xri_pool_cnt; +}; + +/* READ_LNK_STAT - read link status of a particular port */ +enum sli4_read_link_stats_flags { + SLI4_READ_LNKSTAT_REC = 1u << 0, + SLI4_READ_LNKSTAT_GEC = 1u << 1, + SLI4_READ_LNKSTAT_W02OF = 1u << 2, + SLI4_READ_LNKSTAT_W03OF = 1u << 3, + SLI4_READ_LNKSTAT_W04OF = 1u << 4, + SLI4_READ_LNKSTAT_W05OF = 1u << 5, + SLI4_READ_LNKSTAT_W06OF = 1u << 6, + SLI4_READ_LNKSTAT_W07OF = 1u << 7, + SLI4_READ_LNKSTAT_W08OF = 1u << 8, + SLI4_READ_LNKSTAT_W09OF = 1u << 9, + SLI4_READ_LNKSTAT_W10OF = 1u << 10, + SLI4_READ_LNKSTAT_W11OF = 1u << 11, + SLI4_READ_LNKSTAT_W12OF = 1u << 12, + SLI4_READ_LNKSTAT_W13OF = 1u << 13, + SLI4_READ_LNKSTAT_W14OF = 1u << 14, + SLI4_READ_LNKSTAT_W15OF = 1u << 15, + SLI4_READ_LNKSTAT_W16OF = 1u << 16, + SLI4_READ_LNKSTAT_W17OF = 1u << 17, + SLI4_READ_LNKSTAT_W18OF = 1u << 18, + SLI4_READ_LNKSTAT_W19OF = 1u << 19, + SLI4_READ_LNKSTAT_W20OF = 1u << 20, + SLI4_READ_LNKSTAT_W21OF = 1u << 21, + SLI4_READ_LNKSTAT_CLRC = 1u << 30, + SLI4_READ_LNKSTAT_CLOF = 1u << 31, +}; + +struct sli4_cmd_read_link_stats { + struct sli4_mbox_command_header hdr; + __le32 dw1_flags; + __le32 linkfail_errcnt; + __le32 losssync_errcnt; + __le32 losssignal_errcnt; + __le32 primseq_errcnt; + __le32 inval_txword_errcnt; + __le32 crc_errcnt; + __le32 primseq_eventtimeout_cnt; + __le32 elastic_bufoverrun_errcnt; + __le32 arbit_fc_al_timeout_cnt; + __le32 adv_rx_buftor_to_buf_credit; + __le32 curr_rx_buf_to_buf_credit; + __le32 adv_tx_buf_to_buf_credit; + __le32 curr_tx_buf_to_buf_credit; + __le32 rx_eofa_cnt; + __le32 rx_eofdti_cnt; + __le32 rx_eofni_cnt; + __le32 rx_soff_cnt; + __le32 rx_dropped_no_aer_cnt; + __le32 rx_dropped_no_avail_rpi_rescnt; + __le32 rx_dropped_no_avail_xri_rescnt; +}; + +/* Format a WQE with WQ_ID Association performance hint */ +static inline void +sli_set_wq_id_association(void *entry, u16 q_id) +{ + u32 *wqe = entry; + + /* + * Set Word 10, bit 0 to zero + * Set Word 10, bits 15:1 to the WQ ID + */ + wqe[10] &= ~0xffff; + wqe[10] |= q_id << 1; +} + +/* UNREG_FCFI - unregister a FCFI */ +struct sli4_cmd_unreg_fcfi { + struct sli4_mbox_command_header hdr; + __le32 rsvd0; + __le16 fcfi; + __le16 rsvd6; +}; + +/* UNREG_RPI - unregister one or more RPI */ +enum sli4_unreg_rpi { + SLI4_UNREG_RPI_DP = 0x2000, + SLI4_UNREG_RPI_II_SHIFT = 14, + SLI4_UNREG_RPI_II_MASK = 0xc000, + SLI4_UNREG_RPI_II_RPI = 0x0000, + SLI4_UNREG_RPI_II_VPI = 0x4000, + SLI4_UNREG_RPI_II_VFI = 0x8000, + SLI4_UNREG_RPI_II_FCFI = 0xc000, + + SLI4_UNREG_RPI_DEST_N_PORTID_MASK = 0x00ffffff, +}; + +struct sli4_cmd_unreg_rpi { + struct sli4_mbox_command_header hdr; + __le16 index; + __le16 dw1w1_flags; + __le32 dw2_dest_n_portid; +}; + +/* UNREG_VFI - unregister one or more VFI */ +enum sli4_unreg_vfi { + SLI4_UNREG_VFI_II_SHIFT = 14, + SLI4_UNREG_VFI_II_MASK = 0xc000, + SLI4_UNREG_VFI_II_VFI = 0x0000, + SLI4_UNREG_VFI_II_FCFI = 0xc000, +}; + +struct sli4_cmd_unreg_vfi { + struct sli4_mbox_command_header hdr; + __le32 rsvd0; + __le16 index; + __le16 dw2_flags; +}; + +enum sli4_unreg_type { + SLI4_UNREG_TYPE_PORT, + SLI4_UNREG_TYPE_DOMAIN, + SLI4_UNREG_TYPE_FCF, + SLI4_UNREG_TYPE_ALL +}; + +/* UNREG_VPI - unregister one or more VPI */ +enum sli4_unreg_vpi { + SLI4_UNREG_VPI_II_SHIFT = 14, + SLI4_UNREG_VPI_II_MASK = 0xc000, + SLI4_UNREG_VPI_II_VPI = 0x0000, + SLI4_UNREG_VPI_II_VFI = 0x8000, + SLI4_UNREG_VPI_II_FCFI = 0xc000, +}; + +struct sli4_cmd_unreg_vpi { + struct sli4_mbox_command_header hdr; + __le32 rsvd0; + __le16 index; + __le16 dw2w0_flags; +}; + +/* AUTO_XFER_RDY - Configure the auto-generate XFER-RDY feature */ +struct sli4_cmd_config_auto_xfer_rdy { + struct sli4_mbox_command_header hdr; + __le32 rsvd0; + __le32 max_burst_len; +}; + +#define SLI4_CONFIG_AUTO_XFERRDY_BLKSIZE 0xffff + +struct sli4_cmd_config_auto_xfer_rdy_hp { + struct sli4_mbox_command_header hdr; + __le32 rsvd0; + __le32 max_burst_len; + __le32 dw3_esoc_flags; + __le16 block_size; + __le16 rsvd14; +}; + +/************************************************************************* + * SLI-4 common configuration command formats and definitions + */ + +/* + * Subsystem values. + */ +enum sli4_subsystem { + SLI4_SUBSYSTEM_COMMON = 0x01, + SLI4_SUBSYSTEM_LOWLEVEL = 0x0b, + SLI4_SUBSYSTEM_FC = 0x0c, + SLI4_SUBSYSTEM_DMTF = 0x11, +}; + +#define SLI4_OPC_LOWLEVEL_SET_WATCHDOG 0X36 + +/* + * Common opcode (OPC) values. + */ +enum sli4_cmn_opcode { + SLI4_CMN_FUNCTION_RESET = 0x3d, + SLI4_CMN_CREATE_CQ = 0x0c, + SLI4_CMN_CREATE_CQ_SET = 0x1d, + SLI4_CMN_DESTROY_CQ = 0x36, + SLI4_CMN_MODIFY_EQ_DELAY = 0x29, + SLI4_CMN_CREATE_EQ = 0x0d, + SLI4_CMN_DESTROY_EQ = 0x37, + SLI4_CMN_CREATE_MQ_EXT = 0x5a, + SLI4_CMN_DESTROY_MQ = 0x35, + SLI4_CMN_GET_CNTL_ATTRIBUTES = 0x20, + SLI4_CMN_NOP = 0x21, + SLI4_CMN_GET_RSC_EXTENT_INFO = 0x9a, + SLI4_CMN_GET_SLI4_PARAMS = 0xb5, + SLI4_CMN_QUERY_FW_CONFIG = 0x3a, + SLI4_CMN_GET_PORT_NAME = 0x4d, + + SLI4_CMN_WRITE_FLASHROM = 0x07, + /* TRANSCEIVER Data */ + SLI4_CMN_READ_TRANS_DATA = 0x49, + SLI4_CMN_GET_CNTL_ADDL_ATTRS = 0x79, + SLI4_CMN_GET_FUNCTION_CFG = 0xa0, + SLI4_CMN_GET_PROFILE_CFG = 0xa4, + SLI4_CMN_SET_PROFILE_CFG = 0xa5, + SLI4_CMN_GET_PROFILE_LIST = 0xa6, + SLI4_CMN_GET_ACTIVE_PROFILE = 0xa7, + SLI4_CMN_SET_ACTIVE_PROFILE = 0xa8, + SLI4_CMN_READ_OBJECT = 0xab, + SLI4_CMN_WRITE_OBJECT = 0xac, + SLI4_CMN_DELETE_OBJECT = 0xae, + SLI4_CMN_READ_OBJECT_LIST = 0xad, + SLI4_CMN_SET_DUMP_LOCATION = 0xb8, + SLI4_CMN_SET_FEATURES = 0xbf, + SLI4_CMN_GET_RECFG_LINK_INFO = 0xc9, + SLI4_CMN_SET_RECNG_LINK_ID = 0xca, +}; + +/* DMTF opcode (OPC) values */ +#define DMTF_EXEC_CLP_CMD 0x01 + +/* + * COMMON_FUNCTION_RESET + * + * Resets the Port, returning it to a power-on state. This configuration + * command does not have a payload and should set/expect the lengths to + * be zero. + */ +struct sli4_rqst_cmn_function_reset { + struct sli4_rqst_hdr hdr; +}; + +struct sli4_rsp_cmn_function_reset { + struct sli4_rsp_hdr hdr; +}; + +/* + * COMMON_GET_CNTL_ATTRIBUTES + * + * Query for information about the SLI Port + */ +enum sli4_cntrl_attr_flags { + SLI4_CNTL_ATTR_PORTNUM = 0x3f, + SLI4_CNTL_ATTR_PORTTYPE = 0xc0, +}; + +struct sli4_rsp_cmn_get_cntl_attributes { + struct sli4_rsp_hdr hdr; + u8 version_str[32]; + u8 manufacturer_name[32]; + __le32 supported_modes; + u8 eprom_version_lo; + u8 eprom_version_hi; + __le16 rsvd17; + __le32 mbx_ds_version; + __le32 ep_fw_ds_version; + u8 ncsi_version_str[12]; + __le32 def_extended_timeout; + u8 model_number[32]; + u8 description[64]; + u8 serial_number[32]; + u8 ip_version_str[32]; + u8 fw_version_str[32]; + u8 bios_version_str[32]; + u8 redboot_version_str[32]; + u8 driver_version_str[32]; + u8 fw_on_flash_version_str[32]; + __le32 functionalities_supported; + __le16 max_cdb_length; + u8 asic_revision; + u8 generational_guid0; + __le32 generational_guid1_12[3]; + __le16 generational_guid13_14; + u8 generational_guid15; + u8 hba_port_count; + __le16 default_link_down_timeout; + u8 iscsi_version_min_max; + u8 multifunctional_device; + u8 cache_valid; + u8 hba_status; + u8 max_domains_supported; + u8 port_num_type_flags; + __le32 firmware_post_status; + __le32 hba_mtu; + u8 iscsi_features; + u8 rsvd121[3]; + __le16 pci_vendor_id; + __le16 pci_device_id; + __le16 pci_sub_vendor_id; + __le16 pci_sub_system_id; + u8 pci_bus_number; + u8 pci_device_number; + u8 pci_function_number; + u8 interface_type; + __le64 unique_identifier; + u8 number_of_netfilters; + u8 rsvd122[3]; +}; + +/* + * COMMON_GET_CNTL_ATTRIBUTES + * + * This command queries the controller information from the Flash ROM. + */ +struct sli4_rqst_cmn_get_cntl_addl_attributes { + struct sli4_rqst_hdr hdr; +}; + +struct sli4_rsp_cmn_get_cntl_addl_attributes { + struct sli4_rsp_hdr hdr; + __le16 ipl_file_number; + u8 ipl_file_version; + u8 rsvd4; + u8 on_die_temperature; + u8 rsvd5[3]; + __le32 driver_advanced_features_supported; + __le32 rsvd7[4]; + char universal_bios_version[32]; + char x86_bios_version[32]; + char efi_bios_version[32]; + char fcode_version[32]; + char uefi_bios_version[32]; + char uefi_nic_version[32]; + char uefi_fcode_version[32]; + char uefi_iscsi_version[32]; + char iscsi_x86_bios_version[32]; + char pxe_x86_bios_version[32]; + u8 default_wwpn[8]; + u8 ext_phy_version[32]; + u8 fc_universal_bios_version[32]; + u8 fc_x86_bios_version[32]; + u8 fc_efi_bios_version[32]; + u8 fc_fcode_version[32]; + u8 ext_phy_crc_label[8]; + u8 ipl_file_name[16]; + u8 rsvd139[72]; +}; + +/* + * COMMON_NOP + * + * This command does not do anything; it only returns + * the payload in the completion. + */ +struct sli4_rqst_cmn_nop { + struct sli4_rqst_hdr hdr; + __le32 context[2]; +}; + +struct sli4_rsp_cmn_nop { + struct sli4_rsp_hdr hdr; + __le32 context[2]; +}; + +struct sli4_rqst_cmn_get_resource_extent_info { + struct sli4_rqst_hdr hdr; + __le16 resource_type; + __le16 rsvd16; +}; + +enum sli4_rsc_type { + SLI4_RSC_TYPE_VFI = 0x20, + SLI4_RSC_TYPE_VPI = 0x21, + SLI4_RSC_TYPE_RPI = 0x22, + SLI4_RSC_TYPE_XRI = 0x23, +}; + +struct sli4_rsp_cmn_get_resource_extent_info { + struct sli4_rsp_hdr hdr; + __le16 resource_extent_count; + __le16 resource_extent_size; +}; + +#define SLI4_128BYTE_WQE_SUPPORT 0x02 + +#define GET_Q_CNT_METHOD(m) \ + (((m) & SLI4_PARAM_Q_CNT_MTHD_MASK) >> SLI4_PARAM_Q_CNT_MTHD_SHFT) +#define GET_Q_CREATE_VERSION(v) \ + (((v) & SLI4_PARAM_QV_MASK) >> SLI4_PARAM_QV_SHIFT) + +enum sli4_rsp_get_params_e { + /*GENERIC*/ + SLI4_PARAM_Q_CNT_MTHD_SHFT = 24, + SLI4_PARAM_Q_CNT_MTHD_MASK = 0xf << 24, + SLI4_PARAM_QV_SHIFT = 14, + SLI4_PARAM_QV_MASK = 3 << 14, + + /* DW4 */ + SLI4_PARAM_PROTO_TYPE_MASK = 0xff, + /* DW5 */ + SLI4_PARAM_FT = 1 << 0, + SLI4_PARAM_SLI_REV_MASK = 0xf << 4, + SLI4_PARAM_SLI_FAM_MASK = 0xf << 8, + SLI4_PARAM_IF_TYPE_MASK = 0xf << 12, + SLI4_PARAM_SLI_HINT1_MASK = 0xff << 16, + SLI4_PARAM_SLI_HINT2_MASK = 0x1f << 24, + /* DW6 */ + SLI4_PARAM_EQ_PAGE_CNT_MASK = 0xf << 0, + SLI4_PARAM_EQE_SZS_MASK = 0xf << 8, + SLI4_PARAM_EQ_PAGE_SZS_MASK = 0xff << 16, + /* DW8 */ + SLI4_PARAM_CQ_PAGE_CNT_MASK = 0xf << 0, + SLI4_PARAM_CQE_SZS_MASK = 0xf << 8, + SLI4_PARAM_CQ_PAGE_SZS_MASK = 0xff << 16, + /* DW10 */ + SLI4_PARAM_MQ_PAGE_CNT_MASK = 0xf << 0, + SLI4_PARAM_MQ_PAGE_SZS_MASK = 0xff << 16, + /* DW12 */ + SLI4_PARAM_WQ_PAGE_CNT_MASK = 0xf << 0, + SLI4_PARAM_WQE_SZS_MASK = 0xf << 8, + SLI4_PARAM_WQ_PAGE_SZS_MASK = 0xff << 16, + /* DW14 */ + SLI4_PARAM_RQ_PAGE_CNT_MASK = 0xf << 0, + SLI4_PARAM_RQE_SZS_MASK = 0xf << 8, + SLI4_PARAM_RQ_PAGE_SZS_MASK = 0xff << 16, + /* DW15W1*/ + SLI4_PARAM_RQ_DB_WINDOW_MASK = 0xf000, + /* DW16 */ + SLI4_PARAM_FC = 1 << 0, + SLI4_PARAM_EXT = 1 << 1, + SLI4_PARAM_HDRR = 1 << 2, + SLI4_PARAM_SGLR = 1 << 3, + SLI4_PARAM_FBRR = 1 << 4, + SLI4_PARAM_AREG = 1 << 5, + SLI4_PARAM_TGT = 1 << 6, + SLI4_PARAM_TERP = 1 << 7, + SLI4_PARAM_ASSI = 1 << 8, + SLI4_PARAM_WCHN = 1 << 9, + SLI4_PARAM_TCCA = 1 << 10, + SLI4_PARAM_TRTY = 1 << 11, + SLI4_PARAM_TRIR = 1 << 12, + SLI4_PARAM_PHOFF = 1 << 13, + SLI4_PARAM_PHON = 1 << 14, + SLI4_PARAM_PHWQ = 1 << 15, + SLI4_PARAM_BOUND_4GA = 1 << 16, + SLI4_PARAM_RXC = 1 << 17, + SLI4_PARAM_HLM = 1 << 18, + SLI4_PARAM_IPR = 1 << 19, + SLI4_PARAM_RXRI = 1 << 20, + SLI4_PARAM_SGLC = 1 << 21, + SLI4_PARAM_TIMM = 1 << 22, + SLI4_PARAM_TSMM = 1 << 23, + SLI4_PARAM_OAS = 1 << 25, + SLI4_PARAM_LC = 1 << 26, + SLI4_PARAM_AGXF = 1 << 27, + SLI4_PARAM_LOOPBACK_MASK = 0xf << 28, + /* DW18 */ + SLI4_PARAM_SGL_PAGE_CNT_MASK = 0xf << 0, + SLI4_PARAM_SGL_PAGE_SZS_MASK = 0xff << 8, + SLI4_PARAM_SGL_PP_ALIGN_MASK = 0xff << 16, +}; + +struct sli4_rqst_cmn_get_sli4_params { + struct sli4_rqst_hdr hdr; +}; + +struct sli4_rsp_cmn_get_sli4_params { + struct sli4_rsp_hdr hdr; + __le32 dw4_protocol_type; + __le32 dw5_sli; + __le32 dw6_eq_page_cnt; + __le16 eqe_count_mask; + __le16 rsvd26; + __le32 dw8_cq_page_cnt; + __le16 cqe_count_mask; + __le16 rsvd34; + __le32 dw10_mq_page_cnt; + __le16 mqe_count_mask; + __le16 rsvd42; + __le32 dw12_wq_page_cnt; + __le16 wqe_count_mask; + __le16 rsvd50; + __le32 dw14_rq_page_cnt; + __le16 rqe_count_mask; + __le16 dw15w1_rq_db_window; + __le32 dw16_loopback_scope; + __le32 sge_supported_length; + __le32 dw18_sgl_page_cnt; + __le16 min_rq_buffer_size; + __le16 rsvd75; + __le32 max_rq_buffer_size; + __le16 physical_xri_max; + __le16 physical_rpi_max; + __le16 physical_vpi_max; + __le16 physical_vfi_max; + __le32 rsvd88; + __le16 frag_num_field_offset; + __le16 frag_num_field_size; + __le16 sgl_index_field_offset; + __le16 sgl_index_field_size; + __le32 chain_sge_initial_value_lo; + __le32 chain_sge_initial_value_hi; +}; + +/*Port Types*/ +enum sli4_port_types { + SLI4_PORT_TYPE_ETH = 0, + SLI4_PORT_TYPE_FC = 1, +}; + +struct sli4_rqst_cmn_get_port_name { + struct sli4_rqst_hdr hdr; + u8 port_type; + u8 rsvd4[3]; +}; + +struct sli4_rsp_cmn_get_port_name { + struct sli4_rsp_hdr hdr; + char port_name[4]; +}; + +struct sli4_rqst_cmn_write_flashrom { + struct sli4_rqst_hdr hdr; + __le32 flash_rom_access_opcode; + __le32 flash_rom_access_operation_type; + __le32 data_buffer_size; + __le32 offset; + u8 data_buffer[4]; +}; + +/* + * COMMON_READ_TRANSCEIVER_DATA + * + * This command reads SFF transceiver data(Format is defined + * by the SFF-8472 specification). + */ +struct sli4_rqst_cmn_read_transceiver_data { + struct sli4_rqst_hdr hdr; + __le32 page_number; + __le32 port; +}; + +struct sli4_rsp_cmn_read_transceiver_data { + struct sli4_rsp_hdr hdr; + __le32 page_number; + __le32 port; + u8 page_data[128]; + u8 page_data_2[128]; +}; + +#define SLI4_REQ_DESIRE_READLEN 0xffffff + +struct sli4_rqst_cmn_read_object { + struct sli4_rqst_hdr hdr; + __le32 desired_read_length_dword; + __le32 read_offset; + u8 object_name[104]; + __le32 host_buffer_descriptor_count; + struct sli4_bde host_buffer_descriptor[]; +}; + +#define RSP_COM_READ_OBJ_EOF 0x80000000 + +struct sli4_rsp_cmn_read_object { + struct sli4_rsp_hdr hdr; + __le32 actual_read_length; + __le32 eof_dword; +}; + +enum sli4_rqst_write_object_flags { + SLI4_RQ_DES_WRITE_LEN = 0xffffff, + SLI4_RQ_DES_WRITE_LEN_NOC = 0x40000000, + SLI4_RQ_DES_WRITE_LEN_EOF = 0x80000000, +}; + +struct sli4_rqst_cmn_write_object { + struct sli4_rqst_hdr hdr; + __le32 desired_write_len_dword; + __le32 write_offset; + u8 object_name[104]; + __le32 host_buffer_descriptor_count; + struct sli4_bde host_buffer_descriptor[]; +}; + +#define RSP_CHANGE_STATUS 0xff + +struct sli4_rsp_cmn_write_object { + struct sli4_rsp_hdr hdr; + __le32 actual_write_length; + __le32 change_status_dword; +}; + +struct sli4_rqst_cmn_delete_object { + struct sli4_rqst_hdr hdr; + __le32 rsvd4; + __le32 rsvd5; + u8 object_name[104]; +}; + +#define SLI4_RQ_OBJ_LIST_READ_LEN 0xffffff + +struct sli4_rqst_cmn_read_object_list { + struct sli4_rqst_hdr hdr; + __le32 desired_read_length_dword; + __le32 read_offset; + u8 object_name[104]; + __le32 host_buffer_descriptor_count; + struct sli4_bde host_buffer_descriptor[]; +}; + +enum sli4_rqst_set_dump_flags { + SLI4_CMN_SET_DUMP_BUFFER_LEN = 0xffffff, + SLI4_CMN_SET_DUMP_FDB = 0x20000000, + SLI4_CMN_SET_DUMP_BLP = 0x40000000, + SLI4_CMN_SET_DUMP_QRY = 0x80000000, +}; + +struct sli4_rqst_cmn_set_dump_location { + struct sli4_rqst_hdr hdr; + __le32 buffer_length_dword; + __le32 buf_addr_low; + __le32 buf_addr_high; +}; + +struct sli4_rsp_cmn_set_dump_location { + struct sli4_rsp_hdr hdr; + __le32 buffer_length_dword; +}; + +enum sli4_dump_level { + SLI4_DUMP_LEVEL_NONE, + SLI4_CHIP_LEVEL_DUMP, + SLI4_FUNC_DESC_DUMP, +}; + +enum sli4_dump_state { + SLI4_DUMP_STATE_NONE, + SLI4_CHIP_DUMP_STATE_VALID, + SLI4_FUNC_DUMP_STATE_VALID, +}; + +enum sli4_dump_status { + SLI4_DUMP_READY_STATUS_NOT_READY, + SLI4_DUMP_READY_STATUS_DD_PRESENT, + SLI4_DUMP_READY_STATUS_FDB_PRESENT, + SLI4_DUMP_READY_STATUS_SKIP_DUMP, + SLI4_DUMP_READY_STATUS_FAILED = -1, +}; + +enum sli4_set_features { + SLI4_SET_FEATURES_DIF_SEED = 0x01, + SLI4_SET_FEATURES_XRI_TIMER = 0x03, + SLI4_SET_FEATURES_MAX_PCIE_SPEED = 0x04, + SLI4_SET_FEATURES_FCTL_CHECK = 0x05, + SLI4_SET_FEATURES_FEC = 0x06, + SLI4_SET_FEATURES_PCIE_RECV_DETECT = 0x07, + SLI4_SET_FEATURES_DIF_MEMORY_MODE = 0x08, + SLI4_SET_FEATURES_DISABLE_SLI_PORT_PAUSE_STATE = 0x09, + SLI4_SET_FEATURES_ENABLE_PCIE_OPTIONS = 0x0a, + SLI4_SET_FEAT_CFG_AUTO_XFER_RDY_T10PI = 0x0c, + SLI4_SET_FEATURES_ENABLE_MULTI_RECEIVE_QUEUE = 0x0d, + SLI4_SET_FEATURES_SET_FTD_XFER_HINT = 0x0f, + SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK = 0x11, +}; + +struct sli4_rqst_cmn_set_features { + struct sli4_rqst_hdr hdr; + __le32 feature; + __le32 param_len; + __le32 params[8]; +}; + +struct sli4_rqst_cmn_set_features_dif_seed { + __le16 seed; + __le16 rsvd16; +}; + +enum sli4_rqst_set_mrq_features { + SLI4_RQ_MULTIRQ_ISR = 0x1, + SLI4_RQ_MULTIRQ_AUTOGEN_XFER_RDY = 0x2, + + SLI4_RQ_MULTIRQ_NUM_RQS = 0xff, + SLI4_RQ_MULTIRQ_RQ_SELECT = 0xf00, +}; + +struct sli4_rqst_cmn_set_features_multirq { + __le32 auto_gen_xfer_dword; + __le32 num_rqs_dword; +}; + +enum sli4_rqst_health_check_flags { + SLI4_RQ_HEALTH_CHECK_ENABLE = 0x1, + SLI4_RQ_HEALTH_CHECK_QUERY = 0x2, +}; + +struct sli4_rqst_cmn_set_features_health_check { + __le32 health_check_dword; +}; + +struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint { + __le32 fdt_xfer_hint; +}; + +struct sli4_rqst_dmtf_exec_clp_cmd { + struct sli4_rqst_hdr hdr; + __le32 cmd_buf_length; + __le32 resp_buf_length; + __le32 cmd_buf_addr_low; + __le32 cmd_buf_addr_high; + __le32 resp_buf_addr_low; + __le32 resp_buf_addr_high; +}; + +struct sli4_rsp_dmtf_exec_clp_cmd { + struct sli4_rsp_hdr hdr; + __le32 rsvd4; + __le32 resp_length; + __le32 rsvd6; + __le32 rsvd7; + __le32 rsvd8; + __le32 rsvd9; + __le32 clp_status; + __le32 clp_detailed_status; +}; + +#define SLI4_PROTOCOL_FC 0x10 +#define SLI4_PROTOCOL_DEFAULT 0xff + +struct sli4_rspource_descriptor_v1 { + u8 descriptor_type; + u8 descriptor_length; + __le16 rsvd16; + __le32 type_specific[]; +}; + +enum sli4_pcie_desc_flags { + SLI4_PCIE_DESC_IMM = 0x4000, + SLI4_PCIE_DESC_NOSV = 0x8000, + + SLI4_PCIE_DESC_PF_NO = 0x3ff0000, + + SLI4_PCIE_DESC_MISSN_ROLE = 0xff, + SLI4_PCIE_DESC_PCHG = 0x8000000, + SLI4_PCIE_DESC_SCHG = 0x10000000, + SLI4_PCIE_DESC_XCHG = 0x20000000, + SLI4_PCIE_DESC_XROM = 0xc0000000 +}; + +struct sli4_pcie_resource_descriptor_v1 { + u8 descriptor_type; + u8 descriptor_length; + __le16 imm_nosv_dword; + __le32 pf_number_dword; + __le32 rsvd3; + u8 sriov_state; + u8 pf_state; + u8 pf_type; + u8 rsvd4; + __le16 number_of_vfs; + __le16 rsvd5; + __le32 mission_roles_dword; + __le32 rsvd7[16]; +}; + +struct sli4_rqst_cmn_get_function_config { + struct sli4_rqst_hdr hdr; +}; + +struct sli4_rsp_cmn_get_function_config { + struct sli4_rsp_hdr hdr; + __le32 desc_count; + __le32 desc[54]; +}; + +/* Link Config Descriptor for link config functions */ +struct sli4_link_config_descriptor { + u8 link_config_id; + u8 rsvd1[3]; + __le32 config_description[8]; +}; + +#define MAX_LINK_DES 10 + +struct sli4_rqst_cmn_get_reconfig_link_info { + struct sli4_rqst_hdr hdr; +}; + +struct sli4_rsp_cmn_get_reconfig_link_info { + struct sli4_rsp_hdr hdr; + u8 active_link_config_id; + u8 rsvd17; + u8 next_link_config_id; + u8 rsvd19; + __le32 link_configuration_descriptor_count; + struct sli4_link_config_descriptor + desc[MAX_LINK_DES]; +}; + +enum sli4_set_reconfig_link_flags { + SLI4_SET_RECONFIG_LINKID_NEXT = 0xff, + SLI4_SET_RECONFIG_LINKID_FD = 1u << 31, +}; + +struct sli4_rqst_cmn_set_reconfig_link_id { + struct sli4_rqst_hdr hdr; + __le32 dw4_flags; +}; + +struct sli4_rsp_cmn_set_reconfig_link_id { + struct sli4_rsp_hdr hdr; +}; + +struct sli4_rqst_lowlevel_set_watchdog { + struct sli4_rqst_hdr hdr; + __le16 watchdog_timeout; + __le16 rsvd18; +}; + +struct sli4_rsp_lowlevel_set_watchdog { + struct sli4_rsp_hdr hdr; + __le32 rsvd; +}; + +/* FC opcode (OPC) values */ +enum sli4_fc_opcodes { + SLI4_OPC_WQ_CREATE = 0x1, + SLI4_OPC_WQ_DESTROY = 0x2, + SLI4_OPC_POST_SGL_PAGES = 0x3, + SLI4_OPC_RQ_CREATE = 0x5, + SLI4_OPC_RQ_DESTROY = 0x6, + SLI4_OPC_READ_FCF_TABLE = 0x8, + SLI4_OPC_POST_HDR_TEMPLATES = 0xb, + SLI4_OPC_REDISCOVER_FCF = 0x10, +}; + +/* Use the default CQ associated with the WQ */ +#define SLI4_CQ_DEFAULT 0xffff + +/* + * POST_SGL_PAGES + * + * Register the scatter gather list (SGL) memory and + * associate it with an XRI. + */ +struct sli4_rqst_post_sgl_pages { + struct sli4_rqst_hdr hdr; + __le16 xri_start; + __le16 xri_count; + struct { + __le32 page0_low; + __le32 page0_high; + __le32 page1_low; + __le32 page1_high; + } page_set[10]; +}; + +struct sli4_rsp_post_sgl_pages { + struct sli4_rsp_hdr hdr; +}; + +struct sli4_rqst_post_hdr_templates { + struct sli4_rqst_hdr hdr; + __le16 rpi_offset; + __le16 page_count; + struct sli4_dmaaddr page_descriptor[]; +}; + +#define SLI4_HDR_TEMPLATE_SIZE 64 + +enum sli4_io_flags { +/* The XRI associated with this IO is already active */ + SLI4_IO_CONTINUATION = 1 << 0, +/* Automatically generate a good RSP frame */ + SLI4_IO_AUTO_GOOD_RESPONSE = 1 << 1, + SLI4_IO_NO_ABORT = 1 << 2, +/* Set the DNRX bit because no auto xref rdy buffer is posted */ + SLI4_IO_DNRX = 1 << 3, +}; + +enum sli4_callback { + SLI4_CB_LINK, + SLI4_CB_MAX, +}; + +enum sli4_link_status { + SLI4_LINK_STATUS_UP, + SLI4_LINK_STATUS_DOWN, + SLI4_LINK_STATUS_NO_ALPA, + SLI4_LINK_STATUS_MAX, +}; + +enum sli4_link_topology { + SLI4_LINK_TOPO_NON_FC_AL = 1, + SLI4_LINK_TOPO_FC_AL, + SLI4_LINK_TOPO_LOOPBACK_INTERNAL, + SLI4_LINK_TOPO_LOOPBACK_EXTERNAL, + SLI4_LINK_TOPO_NONE, + SLI4_LINK_TOPO_MAX, +}; + +enum sli4_link_medium { + SLI4_LINK_MEDIUM_ETHERNET, + SLI4_LINK_MEDIUM_FC, + SLI4_LINK_MEDIUM_MAX, +}; +/******Driver specific structures******/ + +struct sli4_queue { + /* Common to all queue types */ + struct efc_dma dma; + spinlock_t lock; /* Lock to protect the doorbell register + * writes and queue reads + */ + u32 index; /* current host entry index */ + u16 size; /* entry size */ + u16 length; /* number of entries */ + u16 n_posted; /* number entries posted for CQ, EQ */ + u16 id; /* Port assigned xQ_ID */ + u8 type; /* queue type ie EQ, CQ, ... */ + void __iomem *db_regaddr; /* register address for the doorbell */ + u16 phase; /* For if_type = 6, this value toggle + * for each iteration of the queue, + * a queue entry is valid when a cqe + * valid bit matches this value + */ + u32 proc_limit; /* limit CQE processed per iteration */ + u32 posted_limit; /* CQE/EQE process before ring db */ + u32 max_num_processed; + u64 max_process_time; + union { + u32 r_idx; /* "read" index (MQ only) */ + u32 flag; + } u; +}; + +/* Parameters used to populate WQE*/ +struct sli_bls_params { + u32 s_id; + u32 d_id; + u16 ox_id; + u16 rx_id; + u32 rpi; + u32 vpi; + bool rpi_registered; + u8 payload[12]; + u16 xri; + u16 tag; +}; + +struct sli_els_params { + u32 s_id; + u32 d_id; + u16 ox_id; + u32 rpi; + u32 vpi; + bool rpi_registered; + u32 xmit_len; + u32 rsp_len; + u8 timeout; + u8 cmd; + u16 xri; + u16 tag; +}; + +struct sli_ct_params { + u8 r_ctl; + u8 type; + u8 df_ctl; + u8 timeout; + u16 ox_id; + u32 d_id; + u32 rpi; + u32 vpi; + bool rpi_registered; + u32 xmit_len; + u32 rsp_len; + u16 xri; + u16 tag; +}; + +struct sli_fcp_tgt_params { + u32 s_id; + u32 d_id; + u32 rpi; + u32 vpi; + u32 offset; + u16 ox_id; + u16 flags; + u8 cs_ctl; + u8 timeout; + u32 app_id; + u32 xmit_len; + u16 xri; + u16 tag; +}; + +struct sli4_link_event { + enum sli4_link_status status; + enum sli4_link_topology topology; + enum sli4_link_medium medium; + u32 speed; + u8 *loop_map; + u32 fc_id; +}; + +enum sli4_resource { + SLI4_RSRC_VFI, + SLI4_RSRC_VPI, + SLI4_RSRC_RPI, + SLI4_RSRC_XRI, + SLI4_RSRC_FCFI, + SLI4_RSRC_MAX, +}; + +struct sli4_extent { + u32 number; + u32 size; + u32 n_alloc; + u32 *base; + unsigned long *use_map; + u32 map_size; +}; + +struct sli4_queue_info { + u16 max_qcount[SLI4_QTYPE_MAX]; + u32 max_qentries[SLI4_QTYPE_MAX]; + u16 count_mask[SLI4_QTYPE_MAX]; + u16 count_method[SLI4_QTYPE_MAX]; + u32 qpage_count[SLI4_QTYPE_MAX]; +}; + +struct sli4_params { + u8 has_extents; + u8 auto_reg; + u8 auto_xfer_rdy; + u8 hdr_template_req; + u8 perf_hint; + u8 perf_wq_id_association; + u8 cq_create_version; + u8 mq_create_version; + u8 high_login_mode; + u8 sgl_pre_registered; + u8 sgl_pre_reg_required; + u8 t10_dif_inline_capable; + u8 t10_dif_separate_capable; +}; + +struct sli4 { + void *os; + struct pci_dev *pci; + void __iomem *reg[PCI_STD_NUM_BARS]; + + u32 sli_rev; + u32 sli_family; + u32 if_type; + + u16 asic_type; + u16 asic_rev; + + u16 e_d_tov; + u16 r_a_tov; + struct sli4_queue_info qinfo; + u16 link_module_type; + u8 rq_batch; + u8 port_number; + char port_name[2]; + u16 rq_min_buf_size; + u32 rq_max_buf_size; + u8 topology; + u8 wwpn[8]; + u8 wwnn[8]; + u32 fw_rev[2]; + u8 fw_name[2][16]; + char ipl_name[16]; + u32 hw_rev[3]; + char modeldesc[64]; + char bios_version_string[32]; + u32 wqe_size; + u32 vpd_length; + /* + * Tracks the port resources using extents metaphor. For + * devices that don't implement extents (i.e. + * has_extents == FALSE), the code models each resource as + * a single large extent. + */ + struct sli4_extent ext[SLI4_RSRC_MAX]; + u32 features; + struct sli4_params params; + u32 sge_supported_length; + u32 sgl_page_sizes; + u32 max_sgl_pages; + + /* + * Callback functions + */ + int (*link)(void *ctx, void *event); + void *link_arg; + + struct efc_dma bmbx; + + /* Save pointer to physical memory descriptor for non-embedded + * SLI_CONFIG commands for BMBX dumping purposes + */ + struct efc_dma *bmbx_non_emb_pmd; + + struct efc_dma vpd_data; +}; + +static inline void +sli_cmd_fill_hdr(struct sli4_rqst_hdr *hdr, u8 opc, u8 sub, u32 ver, __le32 len) +{ + hdr->opcode = opc; + hdr->subsystem = sub; + hdr->dw3_version = cpu_to_le32(ver); + hdr->request_length = len; +} + +/** + * Get / set parameter functions + */ + +static inline u32 +sli_get_max_sge(struct sli4 *sli4) +{ + return sli4->sge_supported_length; +} + +static inline u32 +sli_get_max_sgl(struct sli4 *sli4) +{ + if (sli4->sgl_page_sizes != 1) { + efc_log_err(sli4, "unsupported SGL page sizes %#x\n", + sli4->sgl_page_sizes); + return 0; + } + + return (sli4->max_sgl_pages * SLI_PAGE_SIZE) / sizeof(struct sli4_sge); +} + +static inline enum sli4_link_medium +sli_get_medium(struct sli4 *sli4) +{ + switch (sli4->topology) { + case SLI4_READ_CFG_TOPO_FC: + case SLI4_READ_CFG_TOPO_FC_AL: + case SLI4_READ_CFG_TOPO_NON_FC_AL: + return SLI4_LINK_MEDIUM_FC; + default: + return SLI4_LINK_MEDIUM_MAX; + } +} + +static inline u32 +sli_get_lmt(struct sli4 *sli4) +{ + return sli4->link_module_type; +} + +static inline int +sli_set_topology(struct sli4 *sli4, u32 value) +{ + int rc = 0; + + switch (value) { + case SLI4_READ_CFG_TOPO_FC: + case SLI4_READ_CFG_TOPO_FC_AL: + case SLI4_READ_CFG_TOPO_NON_FC_AL: + sli4->topology = value; + break; + default: + efc_log_err(sli4, "unsupported topology %#x\n", value); + rc = -1; + } + + return rc; +} + +static inline u32 +sli_convert_mask_to_count(u32 method, u32 mask) +{ + u32 count = 0; + + if (method) { + count = 1 << (31 - __builtin_clz(mask)); + count *= 16; + } else { + count = mask; + } + + return count; +} + +static inline u32 +sli_reg_read_status(struct sli4 *sli) +{ + return readl(sli->reg[0] + SLI4_PORT_STATUS_REGOFF); +} + +static inline int +sli_fw_error_status(struct sli4 *sli4) +{ + return (sli_reg_read_status(sli4) & SLI4_PORT_STATUS_ERR) ? 1 : 0; +} + +static inline u32 +sli_reg_read_err1(struct sli4 *sli) +{ + return readl(sli->reg[0] + SLI4_PORT_ERROR1); +} + +static inline u32 +sli_reg_read_err2(struct sli4 *sli) +{ + return readl(sli->reg[0] + SLI4_PORT_ERROR2); +} + +static inline int +sli_fc_rqe_length(struct sli4 *sli4, void *cqe, u32 *len_hdr, + u32 *len_data) +{ + struct sli4_fc_async_rcqe *rcqe = cqe; + + *len_hdr = *len_data = 0; + + if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) { + *len_hdr = rcqe->hdpl_byte & SLI4_RACQE_HDPL; + *len_data = le16_to_cpu(rcqe->data_placement_length); + return 0; + } else { + return -1; + } +} + +static inline u8 +sli_fc_rqe_fcfi(struct sli4 *sli4, void *cqe) +{ + u8 code = ((u8 *)cqe)[SLI4_CQE_CODE_OFFSET]; + u8 fcfi = U8_MAX; + + switch (code) { + case SLI4_CQE_CODE_RQ_ASYNC: { + struct sli4_fc_async_rcqe *rcqe = cqe; + + fcfi = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_FCFI; + break; + } + case SLI4_CQE_CODE_RQ_ASYNC_V1: { + struct sli4_fc_async_rcqe_v1 *rcqev1 = cqe; + + fcfi = rcqev1->fcfi_byte & SLI4_RACQE_FCFI; + break; + } + case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: { + struct sli4_fc_optimized_write_cmd_cqe *opt_wr = cqe; + + fcfi = opt_wr->flags0 & SLI4_OCQE_FCFI; + break; + } + } + + return fcfi; +} + +/**************************************************************************** + * Function prototypes + */ +int +sli_cmd_config_link(struct sli4 *sli4, void *buf); +int +sli_cmd_down_link(struct sli4 *sli4, void *buf); +int +sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki); +int +sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, + u32 page_num, struct efc_dma *dma); +int +sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_stats, + u8 clear_overflow_flags, u8 clear_all_counters); +int +sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear); +int +sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, + u8 reset_alpa); +int +sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, + u16 vpi); +int +sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi); +int +sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 base, u16 cnt); +int +sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri); +int +sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, + struct efc_dma *dma, u16 vpi); +int +sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma); +int +sli_cmd_read_nvparms(struct sli4 *sli4, void *buf); +int +sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, + u8 *wwnn, u8 hard_alpa, u32 preferred_d_id); +int +sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index, + struct sli4_cmd_rq_cfg *rq_cfg); +int +sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 index, + u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs, + struct sli4_cmd_rq_cfg *rq_cfg); +int +sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id, + struct efc_dma *dma, u8 update, u8 enable_t10_pi); +int +sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator); +int +sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator, + enum sli4_resource which, u32 fc_id); +int +sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, + __be64 sli_wwpn, u16 vpi, u16 vfi, bool update); +int +sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size, + u16 vfi, u16 fcfi, struct efc_dma dma, + u16 vpi, __be64 sli_wwpn, u32 fc_id); +int +sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 id, u32 type); +int +sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 idx, u32 type); +int +sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context); +int +sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, + u16 rtype); +int +sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf); +int +sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc, + u16 eof, u32 len, u32 offset, char *name, struct efc_dma *dma); +int +sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *object_name); +int +sli_cmd_common_read_object(struct sli4 *sli4, void *buf, + u32 length, u32 offset, char *name, struct efc_dma *dma); +int +sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, + struct efc_dma *cmd, struct efc_dma *resp); +int +sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, + bool query, bool is_buffer_list, struct efc_dma *dma, u8 fdb); +int +sli_cmd_common_set_features(struct sli4 *sli4, void *buf, + u32 feature, u32 param_len, void *parameter); + +int sli_cqe_mq(struct sli4 *sli4, void *buf); +int sli_cqe_async(struct sli4 *sli4, void *buf); + +int +sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, void __iomem *r[]); +void sli_calc_max_qentries(struct sli4 *sli4); +int sli_init(struct sli4 *sli4); +int sli_reset(struct sli4 *sli4); +int sli_fw_reset(struct sli4 *sli4); +void sli_teardown(struct sli4 *sli4); +int +sli_callback(struct sli4 *sli4, enum sli4_callback cb, void *func, void *arg); +int +sli_bmbx_command(struct sli4 *sli4); +int +__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype, + size_t size, u32 n_entries, u32 align); +int +__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q); +int +sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq, u32 num_eq, + u32 shift, u32 delay_mult); +int +sli_queue_alloc(struct sli4 *sli4, u32 qtype, struct sli4_queue *q, + u32 n_entries, struct sli4_queue *assoc); +int +sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], u32 num_cqs, + u32 n_entries, struct sli4_queue *eqs[]); +int +sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype); +int +sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, u32 destroy_queues, + u32 free_memory); +int +sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm); +int +sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm); + +int +sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry); +int +sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry); +int +sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry); +int +sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry); +int +sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry); +int +sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry); +int +sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype, u32 *rid, + u32 *index); +int +sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid); +int +sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype); +int +sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id); +int +sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe, + enum sli4_qentry *etype, u16 *q_id); + +int sli_raise_ue(struct sli4 *sli4, u8 dump); +int sli_dump_is_ready(struct sli4 *sli4); +bool sli_reset_required(struct sli4 *sli4); +bool sli_fw_ready(struct sli4 *sli4); + +int +sli_fc_process_link_attention(struct sli4 *sli4, void *acqe); +int +sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq, + u8 *cqe, enum sli4_qentry *etype, + u16 *rid); +u32 sli_fc_response_length(struct sli4 *sli4, u8 *cqe); +u32 sli_fc_io_length(struct sli4 *sli4, u8 *cqe); +int sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id); +u32 sli_fc_ext_status(struct sli4 *sli4, u8 *cqe); +int +sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index); +int +sli_cmd_wq_create(struct sli4 *sli4, void *buf, + struct efc_dma *qmem, u16 cq_id); +int sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri, + u32 xri_count, struct efc_dma *page0[], struct efc_dma *page1[], + struct efc_dma *dma); +int +sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, + struct efc_dma *dma, u16 rpi, struct efc_dma *payload_dma); +int +sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, u32 n_entries, + u32 buffer_size, struct sli4_queue *cq, bool is_hdr); +int +sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, struct sli4_queue *q[], + u32 base_cq_id, u32 num, u32 hdr_buf_size, u32 data_buf_size); +u32 sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi); +int +sli_abort_wqe(struct sli4 *sli4, void *buf, enum sli4_abort_type type, + bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id); + +int +sli_send_frame_wqe(struct sli4 *sli4, void *buf, u8 sof, u8 eof, + u32 *hdr, struct efc_dma *payload, u32 req_len, u8 timeout, + u16 xri, u16 req_tag); + +int +sli_xmit_els_rsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *rsp, + struct sli_els_params *params); + +int +sli_els_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + struct sli_els_params *params); + +int +sli_fcp_icmnd64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, u16 xri, + u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout); + +int +sli_fcp_iread64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u32 xfer_len, u16 xri, + u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 dif, u8 bs, + u8 timeout); + +int +sli_fcp_iwrite64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u32 xfer_len, + u32 first_burst, u16 xri, u16 tag, u16 cq_id, u32 rpi, + u32 rnode_fcid, u8 dif, u8 bs, u8 timeout); + +int +sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, + struct sli_fcp_tgt_params *params); +int +sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u16 sec_xri, u16 cq_id, u8 dif, + u8 bs, struct sli_fcp_tgt_params *params); + +int +sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params); + +int +sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, + struct sli_fcp_tgt_params *params); +int +sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, + struct sli_ct_params *params); + +int +sli_xmit_bls_rsp64_wqe(struct sli4 *sli4, void *buf, + struct sli_bls_payload *payload, struct sli_bls_params *params); + +int +sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload, + struct sli_ct_params *params); + +int +sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id); +void +sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf, size_t size, + u16 timeout); + +const char *sli_fc_get_status_string(u32 status); + +#endif /* !_SLI4_H */ diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig new file mode 100644 index 000000000..c9b43f7fc --- /dev/null +++ b/drivers/scsi/esas2r/Kconfig @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_ESAS2R + tristate "ATTO Technology's ExpressSAS RAID adapter driver" + depends on PCI && SCSI + help + This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers. diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile new file mode 100644 index 000000000..279d9cb3c --- /dev/null +++ b/drivers/scsi/esas2r/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o + +esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \ + esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \ + esas2r_vda.o esas2r_main.o diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h new file mode 100644 index 000000000..dd3437412 --- /dev/null +++ b/drivers/scsi/esas2r/atioctl.h @@ -0,0 +1,1255 @@ +/* linux/drivers/scsi/esas2r/atioctl.h + * ATTO IOCTL Handling + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "atvda.h" + +#ifndef ATIOCTL_H +#define ATIOCTL_H + +#define EXPRESS_IOCTL_SIGNATURE "Express" +#define EXPRESS_IOCTL_SIGNATURE_SIZE 8 + +/* structure definitions for IOCTls */ + +struct __packed atto_express_ioctl_header { + u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE]; + u8 return_code; + +#define IOCTL_SUCCESS 0 +#define IOCTL_ERR_INVCMD 101 +#define IOCTL_INIT_FAILED 102 +#define IOCTL_NOT_IMPLEMENTED 103 +#define IOCTL_BAD_CHANNEL 104 +#define IOCTL_TARGET_OVERRUN 105 +#define IOCTL_TARGET_NOT_ENABLED 106 +#define IOCTL_BAD_FLASH_IMGTYPE 107 +#define IOCTL_OUT_OF_RESOURCES 108 +#define IOCTL_GENERAL_ERROR 109 +#define IOCTL_INVALID_PARAM 110 + + u8 channel; + u8 retries; + u8 pad[5]; +}; + +/* + * NOTE - if channel == 0xFF, the request is + * handled on the adapter it came in on. + */ +#define MAX_NODE_NAMES 256 + +struct __packed atto_firmware_rw_request { + u8 function; + #define FUNC_FW_DOWNLOAD 0x09 + #define FUNC_FW_UPLOAD 0x12 + + u8 img_type; + #define FW_IMG_FW 0x01 + #define FW_IMG_BIOS 0x02 + #define FW_IMG_NVR 0x03 + #define FW_IMG_RAW 0x04 + #define FW_IMG_FM_API 0x05 + #define FW_IMG_FS_API 0x06 + + u8 pad[2]; + u32 img_offset; + u32 img_size; + u8 image[0x80000]; +}; + +struct __packed atto_param_rw_request { + u16 code; + char data_buffer[512]; +}; + +#define MAX_CHANNEL 256 + +struct __packed atto_channel_list { + u32 num_channels; + u8 channel[MAX_CHANNEL]; +}; + +struct __packed atto_channel_info { + u8 major_rev; + u8 minor_rev; + u8 IRQ; + u8 revision_id; + u8 pci_bus; + u8 pci_dev_func; + u8 core_rev; + u8 host_no; + u16 device_id; + u16 vendor_id; + u16 ven_dev_id; + u8 pad[3]; + u32 hbaapi_rev; +}; + +/* + * CSMI control codes + * class independent + */ +#define CSMI_CC_GET_DRVR_INFO 1 +#define CSMI_CC_GET_CNTLR_CFG 2 +#define CSMI_CC_GET_CNTLR_STS 3 +#define CSMI_CC_FW_DOWNLOAD 4 + +/* RAID class */ +#define CSMI_CC_GET_RAID_INFO 10 +#define CSMI_CC_GET_RAID_CFG 11 + +/* HBA class */ +#define CSMI_CC_GET_PHY_INFO 20 +#define CSMI_CC_SET_PHY_INFO 21 +#define CSMI_CC_GET_LINK_ERRORS 22 +#define CSMI_CC_SMP_PASSTHRU 23 +#define CSMI_CC_SSP_PASSTHRU 24 +#define CSMI_CC_STP_PASSTHRU 25 +#define CSMI_CC_GET_SATA_SIG 26 +#define CSMI_CC_GET_SCSI_ADDR 27 +#define CSMI_CC_GET_DEV_ADDR 28 +#define CSMI_CC_TASK_MGT 29 +#define CSMI_CC_GET_CONN_INFO 30 + +/* PHY class */ +#define CSMI_CC_PHY_CTRL 60 + +/* + * CSMI status codes + * class independent + */ +#define CSMI_STS_SUCCESS 0 +#define CSMI_STS_FAILED 1 +#define CSMI_STS_BAD_CTRL_CODE 2 +#define CSMI_STS_INV_PARAM 3 +#define CSMI_STS_WRITE_ATTEMPTED 4 + +/* RAID class */ +#define CSMI_STS_INV_RAID_SET 1000 + +/* HBA class */ +#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS +#define CSMI_STS_PHY_UNCHANGEABLE 2000 +#define CSMI_STS_INV_LINK_RATE 2001 +#define CSMI_STS_INV_PHY 2002 +#define CSMI_STS_INV_PHY_FOR_PORT 2003 +#define CSMI_STS_PHY_UNSELECTABLE 2004 +#define CSMI_STS_SELECT_PHY_OR_PORT 2005 +#define CSMI_STS_INV_PORT 2006 +#define CSMI_STS_PORT_UNSELECTABLE 2007 +#define CSMI_STS_CONNECTION_FAILED 2008 +#define CSMI_STS_NO_SATA_DEV 2009 +#define CSMI_STS_NO_SATA_SIGNATURE 2010 +#define CSMI_STS_SCSI_EMULATION 2011 +#define CSMI_STS_NOT_AN_END_DEV 2012 +#define CSMI_STS_NO_SCSI_ADDR 2013 +#define CSMI_STS_NO_DEV_ADDR 2014 + +/* CSMI class independent structures */ +struct atto_csmi_get_driver_info { + char name[81]; + char description[81]; + u16 major_rev; + u16 minor_rev; + u16 build_rev; + u16 release_rev; + u16 csmi_major_rev; + u16 csmi_minor_rev; + #define CSMI_MAJOR_REV_0_81 0 + #define CSMI_MINOR_REV_0_81 81 + + #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81 + #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81 +}; + +struct atto_csmi_get_pci_bus_addr { + u8 bus_num; + u8 device_num; + u8 function_num; + u8 reserved; +}; + +struct atto_csmi_get_cntlr_cfg { + u32 base_io_addr; + + struct { + u32 base_memaddr_lo; + u32 base_memaddr_hi; + }; + + u32 board_id; + u16 slot_num; + #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF + + u8 cntlr_class; + #define CSMI_CNTLR_CLASS_HBA 5 + + u8 io_bus_type; + #define CSMI_BUS_TYPE_PCI 3 + #define CSMI_BUS_TYPE_PCMCIA 4 + + union { + struct atto_csmi_get_pci_bus_addr pci_addr; + u8 reserved[32]; + }; + + char serial_num[81]; + u16 major_rev; + u16 minor_rev; + u16 build_rev; + u16 release_rev; + u16 bios_major_rev; + u16 bios_minor_rev; + u16 bios_build_rev; + u16 bios_release_rev; + u32 cntlr_flags; + #define CSMI_CNTLRF_SAS_HBA 0x00000001 + #define CSMI_CNTLRF_SAS_RAID 0x00000002 + #define CSMI_CNTLRF_SATA_HBA 0x00000004 + #define CSMI_CNTLRF_SATA_RAID 0x00000008 + #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000 + #define CSMI_CNTLRF_FWD_ONLINE 0x00020000 + #define CSMI_CNTLRF_FWD_SRESET 0x00040000 + #define CSMI_CNTLRF_FWD_HRESET 0x00080000 + #define CSMI_CNTLRF_FWD_RROM 0x00100000 + + u16 rrom_major_rev; + u16 rrom_minor_rev; + u16 rrom_build_rev; + u16 rrom_release_rev; + u16 rrom_biosmajor_rev; + u16 rrom_biosminor_rev; + u16 rrom_biosbuild_rev; + u16 rrom_biosrelease_rev; + u8 reserved2[7]; +}; + +struct atto_csmi_get_cntlr_sts { + u32 status; + #define CSMI_CNTLR_STS_GOOD 1 + #define CSMI_CNTLR_STS_FAILED 2 + #define CSMI_CNTLR_STS_OFFLINE 3 + #define CSMI_CNTLR_STS_POWEROFF 4 + + u32 offline_reason; + #define CSMI_OFFLINE_NO_REASON 0 + #define CSMI_OFFLINE_INITIALIZING 1 + #define CSMI_OFFLINE_BUS_DEGRADED 2 + #define CSMI_OFFLINE_BUS_FAILURE 3 + + u8 reserved[28]; +}; + +struct atto_csmi_fw_download { + u32 buffer_len; + u32 download_flags; + #define CSMI_FWDF_VALIDATE 0x00000001 + #define CSMI_FWDF_SOFT_RESET 0x00000002 + #define CSMI_FWDF_HARD_RESET 0x00000004 + + u8 reserved[32]; + u16 status; + #define CSMI_FWD_STS_SUCCESS 0 + #define CSMI_FWD_STS_FAILED 1 + #define CSMI_FWD_STS_USING_RROM 2 + #define CSMI_FWD_STS_REJECT 3 + #define CSMI_FWD_STS_DOWNREV 4 + + u16 severity; + #define CSMI_FWD_SEV_INFO 0 + #define CSMI_FWD_SEV_WARNING 1 + #define CSMI_FWD_SEV_ERROR 2 + #define CSMI_FWD_SEV_FATAL 3 + +}; + +/* CSMI RAID class structures */ +struct atto_csmi_get_raid_info { + u32 num_raid_sets; + u32 max_drivesper_set; + u8 reserved[92]; +}; + +struct atto_csmi_raid_drives { + char model[40]; + char firmware[8]; + char serial_num[40]; + u8 sas_addr[8]; + u8 lun[8]; + u8 drive_sts; + #define CSMI_DRV_STS_OK 0 + #define CSMI_DRV_STS_REBUILDING 1 + #define CSMI_DRV_STS_FAILED 2 + #define CSMI_DRV_STS_DEGRADED 3 + + u8 drive_usage; + #define CSMI_DRV_USE_NOT_USED 0 + #define CSMI_DRV_USE_MEMBER 1 + #define CSMI_DRV_USE_SPARE 2 + + u8 reserved[30]; /* spec says 22 */ +}; + +struct atto_csmi_get_raid_cfg { + u32 raid_set_index; + u32 capacity; + u32 stripe_size; + u8 raid_type; + u8 status; + u8 information; + u8 drive_cnt; + u8 reserved[20]; + + struct atto_csmi_raid_drives drives[1]; +}; + +/* CSMI HBA class structures */ +struct atto_csmi_phy_entity { + u8 ident_frame[0x1C]; + u8 port_id; + u8 neg_link_rate; + u8 min_link_rate; + u8 max_link_rate; + u8 phy_change_cnt; + u8 auto_discover; + #define CSMI_DISC_NOT_SUPPORTED 0x00 + #define CSMI_DISC_NOT_STARTED 0x01 + #define CSMI_DISC_IN_PROGRESS 0x02 + #define CSMI_DISC_COMPLETE 0x03 + #define CSMI_DISC_ERROR 0x04 + + u8 reserved[2]; + u8 attach_ident_frame[0x1C]; +}; + +struct atto_csmi_get_phy_info { + u8 number_of_phys; + u8 reserved[3]; + struct atto_csmi_phy_entity + phy[32]; +}; + +struct atto_csmi_set_phy_info { + u8 phy_id; + u8 neg_link_rate; + #define CSMI_NEG_RATE_NEGOTIATE 0x00 + #define CSMI_NEG_RATE_PHY_DIS 0x01 + + u8 prog_minlink_rate; + u8 prog_maxlink_rate; + u8 signal_class; + #define CSMI_SIG_CLASS_UNKNOWN 0x00 + #define CSMI_SIG_CLASS_DIRECT 0x01 + #define CSMI_SIG_CLASS_SERVER 0x02 + #define CSMI_SIG_CLASS_ENCLOSURE 0x03 + + u8 reserved[3]; +}; + +struct atto_csmi_get_link_errors { + u8 phy_id; + u8 reset_cnts; + #define CSMI_RESET_CNTS_NO 0x00 + #define CSMI_RESET_CNTS_YES 0x01 + + u8 reserved[2]; + u32 inv_dw_cnt; + u32 disp_err_cnt; + u32 loss_ofdw_sync_cnt; + u32 phy_reseterr_cnt; + + /* + * The following field has been added by ATTO for ease of + * implementation of additional statistics. Drivers must validate + * the length of the IOCTL payload prior to filling them in so CSMI + * complaint applications function correctly. + */ + + u32 crc_err_cnt; +}; + +struct atto_csmi_smp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u32 req_len; + u8 smp_req[1020]; + u8 conn_sts; + u8 reserved2[3]; + u32 rsp_len; + u8 smp_rsp[1020]; +}; + +struct atto_csmi_ssp_passthru_sts { + u8 conn_sts; + u8 reserved[3]; + u8 data_present; + u8 status; + u16 rsp_length; + u8 rsp[256]; + u32 data_bytes; +}; + +struct atto_csmi_ssp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u8 lun[8]; + u8 cdb_len; + u8 add_cdb_len; + u8 reserved2[2]; + u8 cdb[16]; + u32 flags; + #define CSMI_SSPF_DD_READ 0x00000001 + #define CSMI_SSPF_DD_WRITE 0x00000002 + #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004 + #define CSMI_SSPF_TA_SIMPLE 0x00000000 + #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010 + #define CSMI_SSPF_TA_ORDERED 0x00000020 + #define CSMI_SSPF_TA_ACA 0x00000040 + + u8 add_cdb[24]; + u32 data_len; + + struct atto_csmi_ssp_passthru_sts sts; +}; + +struct atto_csmi_stp_passthru_sts { + u8 conn_sts; + u8 reserved[3]; + u8 sts_fis[20]; + u32 scr[16]; + u32 data_bytes; +}; + +struct atto_csmi_stp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u8 reserved2[4]; + u8 command_fis[20]; + u32 flags; + #define CSMI_STPF_DD_READ 0x00000001 + #define CSMI_STPF_DD_WRITE 0x00000002 + #define CSMI_STPF_DD_UNSPECIFIED 0x00000004 + #define CSMI_STPF_PIO 0x00000010 + #define CSMI_STPF_DMA 0x00000020 + #define CSMI_STPF_PACKET 0x00000040 + #define CSMI_STPF_DMA_QUEUED 0x00000080 + #define CSMI_STPF_EXECUTE_DIAG 0x00000100 + #define CSMI_STPF_RESET_DEVICE 0x00000200 + + u32 data_len; + + struct atto_csmi_stp_passthru_sts sts; +}; + +struct atto_csmi_get_sata_sig { + u8 phy_id; + u8 reserved[3]; + u8 reg_dth_fis[20]; +}; + +struct atto_csmi_get_scsi_addr { + u8 sas_addr[8]; + u8 sas_lun[8]; + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; +}; + +struct atto_csmi_get_dev_addr { + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; + u8 sas_addr[8]; + u8 sas_lun[8]; +}; + +struct atto_csmi_task_mgmt { + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; + u32 flags; + #define CSMI_TMF_TASK_IU 0x00000001 + #define CSMI_TMF_HARD_RST 0x00000002 + #define CSMI_TMF_SUPPRESS_RSLT 0x00000004 + + u32 queue_tag; + u32 reserved; + u8 task_mgt_func; + u8 reserved2[7]; + u32 information; + #define CSMI_TM_INFO_TEST 1 + #define CSMI_TM_INFO_EXCEEDED 2 + #define CSMI_TM_INFO_DEMAND 3 + #define CSMI_TM_INFO_TRIGGER 4 + + struct atto_csmi_ssp_passthru_sts sts; + +}; + +struct atto_csmi_get_conn_info { + u32 pinout; + #define CSMI_CON_UNKNOWN 0x00000001 + #define CSMI_CON_SFF_8482 0x00000002 + #define CSMI_CON_SFF_8470_LANE_1 0x00000100 + #define CSMI_CON_SFF_8470_LANE_2 0x00000200 + #define CSMI_CON_SFF_8470_LANE_3 0x00000400 + #define CSMI_CON_SFF_8470_LANE_4 0x00000800 + #define CSMI_CON_SFF_8484_LANE_1 0x00010000 + #define CSMI_CON_SFF_8484_LANE_2 0x00020000 + #define CSMI_CON_SFF_8484_LANE_3 0x00040000 + #define CSMI_CON_SFF_8484_LANE_4 0x00080000 + + u8 connector[16]; + u8 location; + #define CSMI_CON_INTERNAL 0x02 + #define CSMI_CON_EXTERNAL 0x04 + #define CSMI_CON_SWITCHABLE 0x08 + #define CSMI_CON_AUTO 0x10 + + u8 reserved[15]; +}; + +/* CSMI PHY class structures */ +struct atto_csmi_character { + u8 type_flags; + #define CSMI_CTF_POS_DISP 0x01 + #define CSMI_CTF_NEG_DISP 0x02 + #define CSMI_CTF_CTRL_CHAR 0x04 + + u8 value; +}; + +struct atto_csmi_pc_ctrl { + u8 type; + #define CSMI_PC_TYPE_UNDEFINED 0x00 + #define CSMI_PC_TYPE_SATA 0x01 + #define CSMI_PC_TYPE_SAS 0x02 + u8 rate; + u8 reserved[6]; + u32 vendor_unique[8]; + u32 tx_flags; + #define CSMI_PC_TXF_PREEMP_DIS 0x00000001 + + signed char tx_amplitude; + signed char tx_preemphasis; + signed char tx_slew_rate; + signed char tx_reserved[13]; + u8 tx_vendor_unique[64]; + u32 rx_flags; + #define CSMI_PC_RXF_EQ_DIS 0x00000001 + + signed char rx_threshold; + signed char rx_equalization_gain; + signed char rx_reserved[14]; + u8 rx_vendor_unique[64]; + u32 pattern_flags; + #define CSMI_PC_PATF_FIXED 0x00000001 + #define CSMI_PC_PATF_DIS_SCR 0x00000002 + #define CSMI_PC_PATF_DIS_ALIGN 0x00000004 + #define CSMI_PC_PATF_DIS_SSC 0x00000008 + + u8 fixed_pattern; + #define CSMI_PC_FP_CJPAT 0x00000001 + #define CSMI_PC_FP_ALIGN 0x00000002 + + u8 user_pattern_len; + u8 pattern_reserved[6]; + + struct atto_csmi_character user_pattern_buffer[16]; +}; + +struct atto_csmi_phy_ctrl { + u32 function; + #define CSMI_PC_FUNC_GET_SETUP 0x00000100 + + u8 phy_id; + u16 len_of_cntl; + u8 num_of_cntls; + u8 reserved[4]; + u32 link_flags; + #define CSMI_PHY_ACTIVATE_CTRL 0x00000001 + #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002 + #define CSMI_PHY_AUTO_COMWAKE 0x00000004 + + u8 spinup_rate; + u8 link_reserved[7]; + u32 vendor_unique[8]; + + struct atto_csmi_pc_ctrl control[1]; +}; + +union atto_ioctl_csmi { + struct atto_csmi_get_driver_info drvr_info; + struct atto_csmi_get_cntlr_cfg cntlr_cfg; + struct atto_csmi_get_cntlr_sts cntlr_sts; + struct atto_csmi_fw_download fw_dwnld; + struct atto_csmi_get_raid_info raid_info; + struct atto_csmi_get_raid_cfg raid_cfg; + struct atto_csmi_get_phy_info get_phy_info; + struct atto_csmi_set_phy_info set_phy_info; + struct atto_csmi_get_link_errors link_errs; + struct atto_csmi_smp_passthru smp_pass_thru; + struct atto_csmi_ssp_passthru ssp_pass_thru; + struct atto_csmi_stp_passthru stp_pass_thru; + struct atto_csmi_task_mgmt tsk_mgt; + struct atto_csmi_get_sata_sig sata_sig; + struct atto_csmi_get_scsi_addr scsi_addr; + struct atto_csmi_get_dev_addr dev_addr; + struct atto_csmi_get_conn_info conn_info[32]; + struct atto_csmi_phy_ctrl phy_ctrl; +}; + +struct atto_csmi { + u32 control_code; + u32 status; + union atto_ioctl_csmi data; +}; + +struct atto_module_info { + void *adapter; + void *pci_dev; + void *scsi_host; + unsigned short host_no; + union { + struct { + u64 node_name; + u64 port_name; + }; + u64 sas_addr; + }; +}; + +#define ATTO_FUNC_GET_ADAP_INFO 0x00 +#define ATTO_VER_GET_ADAP_INFO0 0 +#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0 + +struct __packed atto_hba_get_adapter_info { + + struct { + u16 vendor_id; + u16 device_id; + u16 ss_vendor_id; + u16 ss_device_id; + u8 class_code[3]; + u8 rev_id; + u8 bus_num; + u8 dev_num; + u8 func_num; + u8 link_width_max; + u8 link_width_curr; + #define ATTO_GAI_PCILW_UNKNOWN 0x00 + + u8 link_speed_max; + u8 link_speed_curr; + #define ATTO_GAI_PCILS_UNKNOWN 0x00 + #define ATTO_GAI_PCILS_GEN1 0x01 + #define ATTO_GAI_PCILS_GEN2 0x02 + #define ATTO_GAI_PCILS_GEN3 0x03 + + u8 interrupt_mode; + #define ATTO_GAI_PCIIM_UNKNOWN 0x00 + #define ATTO_GAI_PCIIM_LEGACY 0x01 + #define ATTO_GAI_PCIIM_MSI 0x02 + #define ATTO_GAI_PCIIM_MSIX 0x03 + + u8 msi_vector_cnt; + u8 reserved[19]; + } pci; + + u8 adap_type; + #define ATTO_GAI_AT_EPCIU320 0x00 + #define ATTO_GAI_AT_ESASRAID 0x01 + #define ATTO_GAI_AT_ESASRAID2 0x02 + #define ATTO_GAI_AT_ESASHBA 0x03 + #define ATTO_GAI_AT_ESASHBA2 0x04 + #define ATTO_GAI_AT_CELERITY 0x05 + #define ATTO_GAI_AT_CELERITY8 0x06 + #define ATTO_GAI_AT_FASTFRAME 0x07 + #define ATTO_GAI_AT_ESASHBA3 0x08 + #define ATTO_GAI_AT_CELERITY16 0x09 + #define ATTO_GAI_AT_TLSASHBA 0x0A + #define ATTO_GAI_AT_ESASHBA4 0x0B + + u8 adap_flags; + #define ATTO_GAI_AF_DEGRADED 0x01 + #define ATTO_GAI_AF_SPT_SUPP 0x02 + #define ATTO_GAI_AF_DEVADDR_SUPP 0x04 + #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08 + #define ATTO_GAI_AF_TEST_SUPP 0x10 + #define ATTO_GAI_AF_DIAG_SUPP 0x20 + #define ATTO_GAI_AF_VIRT_SES 0x40 + #define ATTO_GAI_AF_CONN_CTRL 0x80 + + u8 num_ports; + u8 num_phys; + u8 drvr_rev_major; + u8 drvr_rev_minor; + u8 drvr_revsub_minor; + u8 drvr_rev_build; + char drvr_rev_ascii[16]; + char drvr_name[32]; + char firmware_rev[16]; + char flash_rev[16]; + char model_name_short[16]; + char model_name[32]; + u32 num_targets; + u32 num_targsper_bus; + u32 num_lunsper_targ; + u8 num_busses; + u8 num_connectors; + u8 adap_flags2; + #define ATTO_GAI_AF2_FCOE_SUPP 0x01 + #define ATTO_GAI_AF2_NIC_SUPP 0x02 + #define ATTO_GAI_AF2_LOCATE_SUPP 0x04 + #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08 + #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10 + #define ATTO_GAI_AF2_NPIV_SUPP 0x20 + #define ATTO_GAI_AF2_MP_SUPP 0x40 + + u8 num_temp_sensors; + u32 num_targets_backend; + u32 tunnel_flags; + #define ATTO_GAI_TF_MEM_RW 0x00000001 + #define ATTO_GAI_TF_TRACE 0x00000002 + #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004 + #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008 + #define ATTO_GAI_TF_PHY_CTRL 0x00000010 + #define ATTO_GAI_TF_CONN_CTRL 0x00000020 + #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040 + + u8 reserved3[0x138]; +}; + +#define ATTO_FUNC_GET_ADAP_ADDR 0x01 +#define ATTO_VER_GET_ADAP_ADDR0 0 +#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0 + +struct __packed atto_hba_get_adapter_address { + + u8 addr_type; + #define ATTO_GAA_AT_PORT 0x00 + #define ATTO_GAA_AT_NODE 0x01 + #define ATTO_GAA_AT_CURR_MAC 0x02 + #define ATTO_GAA_AT_PERM_MAC 0x03 + #define ATTO_GAA_AT_VNIC 0x04 + + u8 port_id; + u16 addr_len; + u8 address[256]; +}; + +#define ATTO_FUNC_MEM_RW 0x02 +#define ATTO_VER_MEM_RW0 0 +#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0 + +struct __packed atto_hba_memory_read_write { + u8 mem_func; + u8 mem_type; + union { + u8 pci_index; + u8 i2c_dev; + }; + u8 i2c_status; + u32 length; + u64 address; + u8 reserved[48]; + +}; + +#define ATTO_FUNC_TRACE 0x03 +#define ATTO_VER_TRACE0 0 +#define ATTO_VER_TRACE1 1 +#define ATTO_VER_TRACE ATTO_VER_TRACE1 + +struct __packed atto_hba_trace { + u8 trace_func; + #define ATTO_TRC_TF_GET_INFO 0x00 + #define ATTO_TRC_TF_ENABLE 0x01 + #define ATTO_TRC_TF_DISABLE 0x02 + #define ATTO_TRC_TF_SET_MASK 0x03 + #define ATTO_TRC_TF_UPLOAD 0x04 + #define ATTO_TRC_TF_RESET 0x05 + + u8 trace_type; + #define ATTO_TRC_TT_DRIVER 0x00 + #define ATTO_TRC_TT_FWCOREDUMP 0x01 + + u8 reserved[2]; + u32 current_offset; + u32 total_length; + u32 trace_mask; + u8 reserved2[48]; + u8 contents[]; +}; + +#define ATTO_FUNC_SCSI_PASS_THRU 0x04 +#define ATTO_VER_SCSI_PASS_THRU0 0 +#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0 + +struct __packed atto_hba_scsi_pass_thru { + u8 cdb[32]; + u8 cdb_length; + u8 req_status; + #define ATTO_SPT_RS_SUCCESS 0x00 + #define ATTO_SPT_RS_FAILED 0x01 + #define ATTO_SPT_RS_OVERRUN 0x02 + #define ATTO_SPT_RS_UNDERRUN 0x03 + #define ATTO_SPT_RS_NO_DEVICE 0x04 + #define ATTO_SPT_RS_NO_LUN 0x05 + #define ATTO_SPT_RS_TIMEOUT 0x06 + #define ATTO_SPT_RS_BUS_RESET 0x07 + #define ATTO_SPT_RS_ABORTED 0x08 + #define ATTO_SPT_RS_BUSY 0x09 + #define ATTO_SPT_RS_DEGRADED 0x0A + + u8 scsi_status; + u8 sense_length; + u32 flags; + #define ATTO_SPTF_DATA_IN 0x00000001 + #define ATTO_SPTF_DATA_OUT 0x00000002 + #define ATTO_SPTF_SIMPLE_Q 0x00000004 + #define ATTO_SPTF_HEAD_OF_Q 0x00000008 + #define ATTO_SPTF_ORDERED_Q 0x00000010 + + u32 timeout; + u32 target_id; + u8 lun[8]; + u32 residual_length; + u8 sense_data[0xFC]; + u8 reserved[0x28]; +}; + +#define ATTO_FUNC_GET_DEV_ADDR 0x05 +#define ATTO_VER_GET_DEV_ADDR0 0 +#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0 + +struct __packed atto_hba_get_device_address { + u8 addr_type; + #define ATTO_GDA_AT_PORT 0x00 + #define ATTO_GDA_AT_NODE 0x01 + #define ATTO_GDA_AT_MAC 0x02 + #define ATTO_GDA_AT_PORTID 0x03 + #define ATTO_GDA_AT_UNIQUE 0x04 + + u8 reserved; + u16 addr_len; + u32 target_id; + u8 address[256]; +}; + +/* The following functions are supported by firmware but do not have any + * associated driver structures + */ +#define ATTO_FUNC_PHY_CTRL 0x06 +#define ATTO_FUNC_CONN_CTRL 0x0C +#define ATTO_FUNC_ADAP_CTRL 0x0E +#define ATTO_VER_ADAP_CTRL0 0 +#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0 + +struct __packed atto_hba_adap_ctrl { + u8 adap_func; + #define ATTO_AC_AF_HARD_RST 0x00 + #define ATTO_AC_AF_GET_STATE 0x01 + #define ATTO_AC_AF_GET_TEMP 0x02 + + u8 adap_state; + #define ATTO_AC_AS_UNKNOWN 0x00 + #define ATTO_AC_AS_OK 0x01 + #define ATTO_AC_AS_RST_SCHED 0x02 + #define ATTO_AC_AS_RST_IN_PROG 0x03 + #define ATTO_AC_AS_RST_DISC 0x04 + #define ATTO_AC_AS_DEGRADED 0x05 + #define ATTO_AC_AS_DISABLED 0x06 + #define ATTO_AC_AS_TEMP 0x07 + + u8 reserved[2]; + + union { + struct { + u8 temp_sensor; + u8 temp_state; + + #define ATTO_AC_TS_UNSUPP 0x00 + #define ATTO_AC_TS_UNKNOWN 0x01 + #define ATTO_AC_TS_INIT_FAILED 0x02 + #define ATTO_AC_TS_NORMAL 0x03 + #define ATTO_AC_TS_OUT_OF_RANGE 0x04 + #define ATTO_AC_TS_FAULT 0x05 + + signed short temp_value; + signed short temp_lower_lim; + signed short temp_upper_lim; + char temp_desc[32]; + u8 reserved2[20]; + }; + }; +}; + +#define ATTO_FUNC_GET_DEV_INFO 0x0F +#define ATTO_VER_GET_DEV_INFO0 0 +#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0 + +struct __packed atto_hba_sas_device_info { + + #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16 + + u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */ + #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV + u32 exp_target_id; + u32 sas_port_mask; + u8 sas_level; + #define ATTO_SDI_SAS_LVL_INV 0xFF + + u8 slot_num; + #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV + + u8 dev_type; + #define ATTO_SDI_DT_END_DEVICE 0 + #define ATTO_SDI_DT_EXPANDER 1 + #define ATTO_SDI_DT_PORT_MULT 2 + + u8 ini_flags; + u8 tgt_flags; + u8 link_rate; /* SMP_RATE_XXX */ + u8 loc_flags; + #define ATTO_SDI_LF_DIRECT 0x01 + #define ATTO_SDI_LF_EXPANDER 0x02 + #define ATTO_SDI_LF_PORT_MULT 0x04 + u8 pm_port; + u8 reserved[0x60]; +}; + +union atto_hba_device_info { + struct atto_hba_sas_device_info sas_dev_info; +}; + +struct __packed atto_hba_get_device_info { + u32 target_id; + u8 info_type; + #define ATTO_GDI_IT_UNKNOWN 0x00 + #define ATTO_GDI_IT_SAS 0x01 + #define ATTO_GDI_IT_FC 0x02 + #define ATTO_GDI_IT_FCOE 0x03 + + u8 reserved[11]; + union atto_hba_device_info dev_info; +}; + +struct atto_ioctl { + u8 version; + u8 function; /* ATTO_FUNC_XXX */ + u8 status; +#define ATTO_STS_SUCCESS 0x00 +#define ATTO_STS_FAILED 0x01 +#define ATTO_STS_INV_VERSION 0x02 +#define ATTO_STS_OUT_OF_RSRC 0x03 +#define ATTO_STS_INV_FUNC 0x04 +#define ATTO_STS_UNSUPPORTED 0x05 +#define ATTO_STS_INV_ADAPTER 0x06 +#define ATTO_STS_INV_DRVR_VER 0x07 +#define ATTO_STS_INV_PARAM 0x08 +#define ATTO_STS_TIMEOUT 0x09 +#define ATTO_STS_NOT_APPL 0x0A +#define ATTO_STS_DEGRADED 0x0B + + u8 flags; + #define HBAF_TUNNEL 0x01 + + u32 data_length; + u8 reserved2[56]; + + union { + u8 byte[1]; + struct atto_hba_get_adapter_info get_adap_info; + struct atto_hba_get_adapter_address get_adap_addr; + struct atto_hba_scsi_pass_thru scsi_pass_thru; + struct atto_hba_get_device_address get_dev_addr; + struct atto_hba_adap_ctrl adap_ctrl; + struct atto_hba_get_device_info get_dev_info; + struct atto_hba_trace trace; + } data; + +}; + +struct __packed atto_ioctl_vda_scsi_cmd { + + #define ATTO_VDA_SCSI_VER0 0 + #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0 + + u8 cdb[16]; + u32 flags; + u32 data_length; + u32 residual_length; + u16 target_id; + u8 sense_len; + u8 scsi_stat; + u8 reserved[8]; + u8 sense_data[80]; +}; + +struct __packed atto_ioctl_vda_flash_cmd { + + #define ATTO_VDA_FLASH_VER0 0 + #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0 + + u32 flash_addr; + u32 data_length; + u8 sub_func; + u8 reserved[15]; + + union { + struct { + u32 flash_size; + u32 page_size; + u8 prod_info[32]; + } info; + + struct { + char file_name[16]; /* 8.3 fname, NULL term, wc=* */ + u32 file_size; + } file; + } data; + +}; + +struct __packed atto_ioctl_vda_diag_cmd { + + #define ATTO_VDA_DIAG_VER0 0 + #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0 + + u64 local_addr; + u32 data_length; + u8 sub_func; + u8 flags; + u8 reserved[3]; +}; + +struct __packed atto_ioctl_vda_cli_cmd { + + #define ATTO_VDA_CLI_VER0 0 + #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0 + + u32 cmd_rsp_len; +}; + +struct __packed atto_ioctl_vda_smp_cmd { + + #define ATTO_VDA_SMP_VER0 0 + #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0 + + u64 dest; + u32 cmd_rsp_len; +}; + +struct __packed atto_ioctl_vda_cfg_cmd { + + #define ATTO_VDA_CFG_VER0 0 + #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0 + + u32 data_length; + u8 cfg_func; + u8 reserved[11]; + + union { + u8 bytes[112]; + struct atto_vda_cfg_init init; + } data; + +}; + +struct __packed atto_ioctl_vda_mgt_cmd { + + #define ATTO_VDA_MGT_VER0 0 + #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0 + + u8 mgt_func; + u8 scan_generation; + u16 dev_index; + u32 data_length; + u8 reserved[8]; + union { + u8 bytes[112]; + struct atto_vda_devinfo dev_info; + struct atto_vda_grp_info grp_info; + struct atto_vdapart_info part_info; + struct atto_vda_dh_info dh_info; + struct atto_vda_metrics_info metrics_info; + struct atto_vda_schedule_info sched_info; + struct atto_vda_n_vcache_info nvcache_info; + struct atto_vda_buzzer_info buzzer_info; + struct atto_vda_adapter_info adapter_info; + struct atto_vda_temp_info temp_info; + struct atto_vda_fan_info fan_info; + } data; +}; + +struct __packed atto_ioctl_vda_gsv_cmd { + + #define ATTO_VDA_GSV_VER0 0 + #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0 + + u8 rsp_len; + u8 reserved[7]; + u8 version_info[]; + #define ATTO_VDA_VER_UNSUPPORTED 0xFF + +}; + +struct __packed atto_ioctl_vda { + u8 version; + u8 function; /* VDA_FUNC_XXXX */ + u8 status; /* ATTO_STS_XXX */ + u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */ + u32 data_length; + u8 reserved[8]; + + union { + struct atto_ioctl_vda_scsi_cmd scsi; + struct atto_ioctl_vda_flash_cmd flash; + struct atto_ioctl_vda_diag_cmd diag; + struct atto_ioctl_vda_cli_cmd cli; + struct atto_ioctl_vda_smp_cmd smp; + struct atto_ioctl_vda_cfg_cmd cfg; + struct atto_ioctl_vda_mgt_cmd mgt; + struct atto_ioctl_vda_gsv_cmd gsv; + u8 cmd_info[256]; + } cmd; + + union { + u8 data[1]; + struct atto_vda_devinfo2 dev_info2; + } data; + +}; + +struct __packed atto_ioctl_smp { + u8 version; + #define ATTO_SMP_VERSION0 0 + #define ATTO_SMP_VERSION1 1 + #define ATTO_SMP_VERSION2 2 + #define ATTO_SMP_VERSION ATTO_SMP_VERSION2 + + u8 function; +#define ATTO_SMP_FUNC_DISC_SMP 0x00 +#define ATTO_SMP_FUNC_DISC_TARG 0x01 +#define ATTO_SMP_FUNC_SEND_CMD 0x02 +#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03 +#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04 +#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05 + + u8 status; /* ATTO_STS_XXX */ + u8 smp_status; /* if status == ATTO_STS_SUCCESS */ + #define ATTO_SMP_STS_SUCCESS 0x00 + #define ATTO_SMP_STS_FAILURE 0x01 + #define ATTO_SMP_STS_RESCAN 0x02 + #define ATTO_SMP_STS_NOT_FOUND 0x03 + + u16 target_id; + u8 phy_id; + u8 dev_index; + u64 smp_sas_addr; + u64 targ_sas_addr; + u32 req_length; + u32 rsp_length; + u8 flags; + #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */ + + u8 reserved[31]; + + union { + u8 byte[1]; + u32 dword[1]; + } data; + +}; + +struct __packed atto_express_ioctl { + struct atto_express_ioctl_header header; + + union { + struct atto_firmware_rw_request fwrw; + struct atto_param_rw_request prw; + struct atto_channel_list chanlist; + struct atto_channel_info chaninfo; + struct atto_ioctl ioctl_hba; + struct atto_module_info modinfo; + struct atto_ioctl_vda ioctl_vda; + struct atto_ioctl_smp ioctl_smp; + struct atto_csmi csmi; + + } data; +}; + +/* The struct associated with the code is listed after the definition */ +#define EXPRESS_IOCTL_MIN 0x4500 +#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */ +#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */ +#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */ +#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */ +#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */ +#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */ +#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */ +#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */ +#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */ +#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */ +#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */ +#define EXPRESS_CSMI 0x450B /* CSMI */ +#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */ +#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */ +#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */ +#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */ +#define EXPRESS_IOCTL_MAX 0x450F + +#endif diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h new file mode 100644 index 000000000..5fc1f991d --- /dev/null +++ b/drivers/scsi/esas2r/atvda.h @@ -0,0 +1,1319 @@ +/* linux/drivers/scsi/esas2r/atvda.h + * ATTO VDA interface definitions + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + + +#ifndef ATVDA_H +#define ATVDA_H + +struct __packed atto_dev_addr { + u64 dev_port; + u64 hba_port; + u8 lun; + u8 flags; + #define VDA_DEVADDRF_SATA 0x01 + #define VDA_DEVADDRF_SSD 0x02 + u8 link_speed; /* VDALINKSPEED_xxx */ + u8 pad[1]; +}; + +/* dev_addr2 was added for 64-bit alignment */ + +struct __packed atto_dev_addr2 { + u64 dev_port; + u64 hba_port; + u8 lun; + u8 flags; + u8 link_speed; + u8 pad[5]; +}; + +struct __packed atto_vda_sge { + u32 length; + u64 address; +}; + + +/* VDA request function codes */ + +#define VDA_FUNC_SCSI 0x00 +#define VDA_FUNC_FLASH 0x01 +#define VDA_FUNC_DIAG 0x02 +#define VDA_FUNC_AE 0x03 +#define VDA_FUNC_CLI 0x04 +#define VDA_FUNC_IOCTL 0x05 +#define VDA_FUNC_CFG 0x06 +#define VDA_FUNC_MGT 0x07 +#define VDA_FUNC_GSV 0x08 + + +/* VDA request status values. for host driver considerations, values for + * SCSI requests start at zero. other requests may use these values as well. */ + +#define RS_SUCCESS 0x00 /*! successful completion */ +#define RS_INV_FUNC 0x01 /*! invalid command function */ +#define RS_BUSY 0x02 /*! insufficient resources */ +#define RS_SEL 0x03 /*! no target at target_id */ +#define RS_NO_LUN 0x04 /*! invalid LUN */ +#define RS_TIMEOUT 0x05 /*! request timeout */ +#define RS_OVERRUN 0x06 /*! data overrun */ +#define RS_UNDERRUN 0x07 /*! data underrun */ +#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */ +#define RS_ABORTED 0x0A /*! command aborted */ +#define RS_RESID_MISM 0x0B /*! residual length incorrect */ +#define RS_TM_FAILED 0x0C /*! task management failed */ +#define RS_RESET 0x0D /*! aborted due to bus reset */ +#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */ +#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */ +#define RS_UNSUPPORTED 0x10 /*! unsupported request */ +#define RS_SEL2 0x70 /*! internal generated RS_SEL */ +#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */ +#define RS_MGT_BASE 0x80 /*! base of VDA management errors */ +#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00) +#define RS_DEV_INVALID (RS_MGT_BASE + 0x01) +#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02) +#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03) +#define RS_DEV_LOST (RS_MGT_BASE + 0x04) +#define RS_SCAN_GEN (RS_MGT_BASE + 0x05) +#define RS_GRP_INVALID (RS_MGT_BASE + 0x08) +#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09) +#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A) +#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B) +#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C) +#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D) +#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E) +#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F) +#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10) +#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11) +#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12) +#define RS_CFG_SAVE (RS_MGT_BASE + 0x14) +#define RS_PART_LAST (RS_MGT_BASE + 0x18) +#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19) +#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A) +#define RS_PART_TARGET (RS_MGT_BASE + 0x1B) +#define RS_PART_LUN (RS_MGT_BASE + 0x1C) +#define RS_PART_DUP (RS_MGT_BASE + 0x1D) +#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E) +#define RS_PART_MAX (RS_MGT_BASE + 0x1F) +#define RS_PART_CAP (RS_MGT_BASE + 0x20) +#define RS_PART_STATE (RS_MGT_BASE + 0x21) +#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22) +#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23) +#define RS_HS_ERROR (RS_MGT_BASE + 0x24) +#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25) +#define RS_BAD_PARAM (RS_MGT_BASE + 0x26) +#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27) +#define RS_FLS_BASE 0xB0 /*! base of VDA errors */ +#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00) +#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01) +#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02) +#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03) +#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04) +#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05) +#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06) +#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07) +#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08) +#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */ +#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0) +#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1) +#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2) +#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3) +#define RS_DEGRADED 0xFB /*! degraded mode */ +#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */ +#define RS_VDA_INTERNAL 0xFD /*! catch-all */ +#define RS_PENDING 0xFE /*! pending, not started */ +#define RS_STARTED 0xFF /*! started */ + + +/* flash request subfunctions. these are used in both the IOCTL and the + * driver-firmware interface (VDA_FUNC_FLASH). */ + +#define VDA_FLASH_BEGINW 0x00 +#define VDA_FLASH_READ 0x01 +#define VDA_FLASH_WRITE 0x02 +#define VDA_FLASH_COMMIT 0x03 +#define VDA_FLASH_CANCEL 0x04 +#define VDA_FLASH_INFO 0x05 +#define VDA_FLASH_FREAD 0x06 +#define VDA_FLASH_FWRITE 0x07 +#define VDA_FLASH_FINFO 0x08 + + +/* IOCTL request subfunctions. these identify the payload type for + * VDA_FUNC_IOCTL. + */ + +#define VDA_IOCTL_HBA 0x00 +#define VDA_IOCTL_CSMI 0x01 +#define VDA_IOCTL_SMP 0x02 + +struct __packed atto_vda_devinfo { + struct atto_dev_addr dev_addr; + u8 vendor_id[8]; + u8 product_id[16]; + u8 revision[4]; + u64 capacity; + u32 block_size; + u8 dev_type; + + union { + u8 dev_status; + #define VDADEVSTAT_INVALID 0x00 + #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID + #define VDADEVSTAT_ASSIGNED 0x01 + #define VDADEVSTAT_SPARE 0x02 + #define VDADEVSTAT_UNAVAIL 0x03 + #define VDADEVSTAT_PT_MAINT 0x04 + #define VDADEVSTAT_LCLSPARE 0x05 + #define VDADEVSTAT_UNUSEABLE 0x06 + #define VDADEVSTAT_AVAIL 0xFF + + u8 op_ctrl; + #define VDA_DEV_OP_CTRL_START 0x01 + #define VDA_DEV_OP_CTRL_HALT 0x02 + #define VDA_DEV_OP_CTRL_RESUME 0x03 + #define VDA_DEV_OP_CTRL_CANCEL 0x04 + }; + + u8 member_state; + #define VDAMBRSTATE_ONLINE 0x00 + #define VDAMBRSTATE_DEGRADED 0x01 + #define VDAMBRSTATE_UNAVAIL 0x02 + #define VDAMBRSTATE_FAULTED 0x03 + #define VDAMBRSTATE_MISREAD 0x04 + #define VDAMBRSTATE_INCOMPAT 0x05 + + u8 operation; + #define VDAOP_NONE 0x00 + #define VDAOP_REBUILD 0x01 + #define VDAOP_ERASE 0x02 + #define VDAOP_PATTERN 0x03 + #define VDAOP_CONVERSION 0x04 + #define VDAOP_FULL_INIT 0x05 + #define VDAOP_QUICK_INIT 0x06 + #define VDAOP_SECT_SCAN 0x07 + #define VDAOP_SECT_SCAN_PARITY 0x08 + #define VDAOP_SECT_SCAN_PARITY_FIX 0x09 + #define VDAOP_RECOV_REBUILD 0x0A + + u8 op_status; + #define VDAOPSTAT_OK 0x00 + #define VDAOPSTAT_FAULTED 0x01 + #define VDAOPSTAT_HALTED 0x02 + #define VDAOPSTAT_INT 0x03 + + u8 progress; /* 0 - 100% */ + u16 ses_dev_index; + #define VDASESDI_INVALID 0xFFFF + + u8 serial_no[32]; + + union { + u16 target_id; + #define VDATGTID_INVALID 0xFFFF + + u16 features_mask; + }; + + u16 lun; + u16 features; + #define VDADEVFEAT_ENC_SERV 0x0001 + #define VDADEVFEAT_IDENT 0x0002 + #define VDADEVFEAT_DH_SUPP 0x0004 + #define VDADEVFEAT_PHYS_ID 0x0008 + + u8 ses_element_id; + u8 link_speed; + #define VDALINKSPEED_UNKNOWN 0x00 + #define VDALINKSPEED_1GB 0x01 + #define VDALINKSPEED_1_5GB 0x02 + #define VDALINKSPEED_2GB 0x03 + #define VDALINKSPEED_3GB 0x04 + #define VDALINKSPEED_4GB 0x05 + #define VDALINKSPEED_6GB 0x06 + #define VDALINKSPEED_8GB 0x07 + + u16 phys_target_id; + u8 reserved[2]; +}; + + +/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it + * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore, + * the entire structure is DMaed between the firmware and host buffer and + * the data will always be in little endian format. + */ + +struct __packed atto_vda_devinfo2 { + struct atto_dev_addr dev_addr; + u8 vendor_id[8]; + u8 product_id[16]; + u8 revision[4]; + u64 capacity; + u32 block_size; + u8 dev_type; + u8 dev_status; + u8 member_state; + u8 operation; + u8 op_status; + u8 progress; + u16 ses_dev_index; + u8 serial_no[32]; + union { + u16 target_id; + u16 features_mask; + }; + + u16 lun; + u16 features; + u8 ses_element_id; + u8 link_speed; + u16 phys_target_id; + u8 reserved[2]; + +/* This is where fields specific to struct atto_vda_devinfo2 begin. Note + * that the structure version started at one so applications that unionize this + * structure with atto_vda_dev_info can differentiate them if desired. + */ + + u8 version; + #define VDADEVINFO_VERSION0 0x00 + #define VDADEVINFO_VERSION1 0x01 + #define VDADEVINFO_VERSION2 0x02 + #define VDADEVINFO_VERSION3 0x03 + #define VDADEVINFO_VERSION VDADEVINFO_VERSION3 + + u8 reserved2[3]; + + /* sector scanning fields */ + + u32 ss_curr_errors; + u64 ss_curr_scanned; + u32 ss_curr_recvrd; + u32 ss_scan_length; + u32 ss_total_errors; + u32 ss_total_recvrd; + u32 ss_num_scans; + + /* grp_name was added in version 2 of this structure. */ + + char grp_name[15]; + u8 reserved3[4]; + + /* dev_addr_list was added in version 3 of this structure. */ + + u8 num_dev_addr; + struct atto_dev_addr2 dev_addr_list[8]; +}; + + +struct __packed atto_vda_grp_info { + u8 grp_index; + #define VDA_MAX_RAID_GROUPS 32 + + char grp_name[15]; + u64 capacity; + u32 block_size; + u32 interleave; + u8 type; + #define VDA_GRP_TYPE_RAID0 0 + #define VDA_GRP_TYPE_RAID1 1 + #define VDA_GRP_TYPE_RAID4 4 + #define VDA_GRP_TYPE_RAID5 5 + #define VDA_GRP_TYPE_RAID6 6 + #define VDA_GRP_TYPE_RAID10 10 + #define VDA_GRP_TYPE_RAID40 40 + #define VDA_GRP_TYPE_RAID50 50 + #define VDA_GRP_TYPE_RAID60 60 + #define VDA_GRP_TYPE_DVRAID_HS 252 + #define VDA_GRP_TYPE_DVRAID_NOHS 253 + #define VDA_GRP_TYPE_JBOD 254 + #define VDA_GRP_TYPE_SPARE 255 + + union { + u8 status; + #define VDA_GRP_STAT_INVALID 0x00 + #define VDA_GRP_STAT_NEW 0x01 + #define VDA_GRP_STAT_WAITING 0x02 + #define VDA_GRP_STAT_ONLINE 0x03 + #define VDA_GRP_STAT_DEGRADED 0x04 + #define VDA_GRP_STAT_OFFLINE 0x05 + #define VDA_GRP_STAT_DELETED 0x06 + #define VDA_GRP_STAT_RECOV_BASIC 0x07 + #define VDA_GRP_STAT_RECOV_EXTREME 0x08 + + u8 op_ctrl; + #define VDA_GRP_OP_CTRL_START 0x01 + #define VDA_GRP_OP_CTRL_HALT 0x02 + #define VDA_GRP_OP_CTRL_RESUME 0x03 + #define VDA_GRP_OP_CTRL_CANCEL 0x04 + }; + + u8 rebuild_state; + #define VDA_RBLD_NONE 0x00 + #define VDA_RBLD_REBUILD 0x01 + #define VDA_RBLD_ERASE 0x02 + #define VDA_RBLD_PATTERN 0x03 + #define VDA_RBLD_CONV 0x04 + #define VDA_RBLD_FULL_INIT 0x05 + #define VDA_RBLD_QUICK_INIT 0x06 + #define VDA_RBLD_SECT_SCAN 0x07 + #define VDA_RBLD_SECT_SCAN_PARITY 0x08 + #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09 + #define VDA_RBLD_RECOV_REBUILD 0x0A + #define VDA_RBLD_RECOV_BASIC 0x0B + #define VDA_RBLD_RECOV_EXTREME 0x0C + + u8 span_depth; + u8 progress; + u8 mirror_width; + u8 stripe_width; + u8 member_cnt; + + union { + u16 members[32]; + #define VDA_MEMBER_MISSING 0xFFFF + #define VDA_MEMBER_NEW 0xFFFE + u16 features_mask; + }; + + u16 features; + #define VDA_GRP_FEAT_HOTSWAP 0x0001 + #define VDA_GRP_FEAT_SPDRD_MASK 0x0006 + #define VDA_GRP_FEAT_SPDRD_DIS 0x0000 + #define VDA_GRP_FEAT_SPDRD_ENB 0x0002 + #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004 + #define VDA_GRP_FEAT_IDENT 0x0008 + #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030 + #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010 + #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020 + #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030 + #define VDA_GRP_FEAT_WRITE_CACHE 0x0040 + #define VDA_GRP_FEAT_RBLD_RESUME 0x0080 + #define VDA_GRP_FEAT_SECT_RESUME 0x0100 + #define VDA_GRP_FEAT_INIT_RESUME 0x0200 + #define VDA_GRP_FEAT_SSD 0x0400 + #define VDA_GRP_FEAT_BOOT_DEV 0x0800 + + /* + * for backward compatibility, a prefetch value of zero means the + * setting is ignored/unsupported. therefore, the firmware supported + * 0-6 values are incremented to 1-7. + */ + + u8 prefetch; + u8 op_status; + #define VDAGRPOPSTAT_MASK 0x0F + #define VDAGRPOPSTAT_INVALID 0x00 + #define VDAGRPOPSTAT_OK 0x01 + #define VDAGRPOPSTAT_FAULTED 0x02 + #define VDAGRPOPSTAT_HALTED 0x03 + #define VDAGRPOPSTAT_INT 0x04 + #define VDAGRPOPPROC_MASK 0xF0 + #define VDAGRPOPPROC_STARTABLE 0x10 + #define VDAGRPOPPROC_CANCELABLE 0x20 + #define VDAGRPOPPROC_RESUMABLE 0x40 + #define VDAGRPOPPROC_HALTABLE 0x80 + u8 over_provision; + u8 reserved[3]; + +}; + + +struct __packed atto_vdapart_info { + u8 part_no; + #define VDA_MAX_PARTITIONS 128 + + char grp_name[15]; + u64 part_size; + u64 start_lba; + u32 block_size; + u16 target_id; + u8 LUN; + char serial_no[41]; + u8 features; + #define VDAPI_FEAT_WRITE_CACHE 0x01 + + u8 reserved[7]; +}; + + +struct __packed atto_vda_dh_info { + u8 req_type; + #define VDADH_RQTYPE_CACHE 0x01 + #define VDADH_RQTYPE_FETCH 0x02 + #define VDADH_RQTYPE_SET_STAT 0x03 + #define VDADH_RQTYPE_GET_STAT 0x04 + + u8 req_qual; + #define VDADH_RQQUAL_SMART 0x01 + #define VDADH_RQQUAL_MEDDEF 0x02 + #define VDADH_RQQUAL_INFOEXC 0x04 + + u8 num_smart_attribs; + u8 status; + #define VDADH_STAT_DISABLE 0x00 + #define VDADH_STAT_ENABLE 0x01 + + u32 med_defect_cnt; + u32 info_exc_cnt; + u8 smart_status; + #define VDADH_SMARTSTAT_OK 0x00 + #define VDADH_SMARTSTAT_ERR 0x01 + + u8 reserved[35]; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_dh_smart { + u8 attrib_id; + u8 current_val; + u8 worst; + u8 threshold; + u8 raw_data[6]; + u8 raw_attrib_status; + #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01 + #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02 + #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04 + #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08 + #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10 + #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20 + + u8 calc_attrib_status; + #define VDADHSM_CALCSTAT_UNKNOWN 0x00 + #define VDADHSM_CALCSTAT_GOOD 0x01 + #define VDADHSM_CALCSTAT_PREFAIL 0x02 + #define VDADHSM_CALCSTAT_OLDAGE 0x03 + + u8 reserved[4]; +}; + + +struct __packed atto_vda_metrics_info { + u8 data_version; + #define VDAMET_VERSION0 0x00 + #define VDAMET_VERSION VDAMET_VERSION0 + + u8 metrics_action; + #define VDAMET_METACT_NONE 0x00 + #define VDAMET_METACT_START 0x01 + #define VDAMET_METACT_STOP 0x02 + #define VDAMET_METACT_RETRIEVE 0x03 + #define VDAMET_METACT_CLEAR 0x04 + + u8 test_action; + #define VDAMET_TSTACT_NONE 0x00 + #define VDAMET_TSTACT_STRT_INIT 0x01 + #define VDAMET_TSTACT_STRT_READ 0x02 + #define VDAMET_TSTACT_STRT_VERIFY 0x03 + #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04 + #define VDAMET_TSTACT_STOP 0x05 + + u8 num_dev_indexes; + #define VDAMET_ALL_DEVICES 0xFF + + u16 dev_indexes[32]; + u8 reserved[12]; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_metrics_data { + u16 dev_index; + u16 length; + #define VDAMD_LEN_LAST 0x8000 + #define VDAMD_LEN_MASK 0x0FFF + + u32 flags; + #define VDAMDF_RUN 0x00000007 + #define VDAMDF_RUN_READ 0x00000001 + #define VDAMDF_RUN_WRITE 0x00000002 + #define VDAMDF_RUN_ALL 0x00000004 + #define VDAMDF_READ 0x00000010 + #define VDAMDF_WRITE 0x00000020 + #define VDAMDF_ALL 0x00000040 + #define VDAMDF_DRIVETEST 0x40000000 + #define VDAMDF_NEW 0x80000000 + + u64 total_read_data; + u64 total_write_data; + u64 total_read_io; + u64 total_write_io; + u64 read_start_time; + u64 read_stop_time; + u64 write_start_time; + u64 write_stop_time; + u64 read_maxio_time; + u64 wpvdadmetricsdatarite_maxio_time; + u64 read_totalio_time; + u64 write_totalio_time; + u64 read_total_errs; + u64 write_total_errs; + u64 read_recvrd_errs; + u64 write_recvrd_errs; + u64 miscompares; +}; + + +struct __packed atto_vda_schedule_info { + u8 schedule_type; + #define VDASI_SCHTYPE_ONETIME 0x01 + #define VDASI_SCHTYPE_DAILY 0x02 + #define VDASI_SCHTYPE_WEEKLY 0x03 + + u8 operation; + #define VDASI_OP_NONE 0x00 + #define VDASI_OP_CREATE 0x01 + #define VDASI_OP_CANCEL 0x02 + + u8 hour; + u8 minute; + u8 day; + #define VDASI_DAY_NONE 0x00 + + u8 progress; + #define VDASI_PROG_NONE 0xFF + + u8 event_type; + #define VDASI_EVTTYPE_SECT_SCAN 0x01 + #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02 + #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03 + + u8 recurrences; + #define VDASI_RECUR_FOREVER 0x00 + + u32 id; + #define VDASI_ID_NONE 0x00 + + char grp_name[15]; + u8 reserved[85]; +}; + + +struct __packed atto_vda_n_vcache_info { + u8 super_cap_status; + #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00 + #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01 + #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02 + + u8 nvcache_module_status; + #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00 + #define VDANVCI_NVCACHEMODULE_PRESENT 0x01 + + u8 protection_mode; + #define VDANVCI_PROTMODE_HI_PROTECT 0x00 + #define VDANVCI_PROTMODE_HI_PERFORM 0x01 + + u8 reserved[109]; +}; + + +struct __packed atto_vda_buzzer_info { + u8 status; + #define VDABUZZI_BUZZER_OFF 0x00 + #define VDABUZZI_BUZZER_ON 0x01 + #define VDABUZZI_BUZZER_LAST 0x02 + + u8 reserved[3]; + u32 duration; + #define VDABUZZI_DURATION_INDEFINITE 0xffffffff + + u8 reserved2[104]; +}; + + +struct __packed atto_vda_adapter_info { + u8 version; + #define VDAADAPINFO_VERSION0 0x00 + #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0 + + u8 reserved; + signed short utc_offset; + u32 utc_time; + u32 features; + #define VDA_ADAP_FEAT_IDENT 0x0001 + #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002 + #define VDA_ADAP_FEAT_UTC_TIME 0x0004 + + u32 valid_features; + char active_config[33]; + u8 temp_count; + u8 fan_count; + u8 reserved3[61]; +}; + + +struct __packed atto_vda_temp_info { + u8 temp_index; + u8 max_op_temp; + u8 min_op_temp; + u8 op_temp_warn; + u8 temperature; + u8 type; + #define VDA_TEMP_TYPE_CPU 1 + + u8 reserved[106]; +}; + + +struct __packed atto_vda_fan_info { + u8 fan_index; + u8 status; + #define VDA_FAN_STAT_UNKNOWN 0 + #define VDA_FAN_STAT_NORMAL 1 + #define VDA_FAN_STAT_FAIL 2 + + u16 crit_pvdafaninfothreshold; + u16 warn_threshold; + u16 speed; + u8 reserved[104]; +}; + + +/* VDA management commands */ + +#define VDAMGT_DEV_SCAN 0x00 +#define VDAMGT_DEV_INFO 0x01 +#define VDAMGT_DEV_CLEAN 0x02 +#define VDAMGT_DEV_IDENTIFY 0x03 +#define VDAMGT_DEV_IDENTSTOP 0x04 +#define VDAMGT_DEV_PT_INFO 0x05 +#define VDAMGT_DEV_FEATURES 0x06 +#define VDAMGT_DEV_PT_FEATURES 0x07 +#define VDAMGT_DEV_HEALTH_REQ 0x08 +#define VDAMGT_DEV_METRICS 0x09 +#define VDAMGT_DEV_INFO2 0x0A +#define VDAMGT_DEV_OPERATION 0x0B +#define VDAMGT_DEV_INFO2_BYADDR 0x0C +#define VDAMGT_GRP_INFO 0x10 +#define VDAMGT_GRP_CREATE 0x11 +#define VDAMGT_GRP_DELETE 0x12 +#define VDAMGT_ADD_STORAGE 0x13 +#define VDAMGT_MEMBER_ADD 0x14 +#define VDAMGT_GRP_COMMIT 0x15 +#define VDAMGT_GRP_REBUILD 0x16 +#define VDAMGT_GRP_COMMIT_INIT 0x17 +#define VDAMGT_QUICK_RAID 0x18 +#define VDAMGT_GRP_FEATURES 0x19 +#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A +#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B +#define VDAMGT_GRP_OPERATION 0x1C +#define VDAMGT_CFG_SAVE 0x20 +#define VDAMGT_LAST_ERROR 0x21 +#define VDAMGT_ADAP_INFO 0x22 +#define VDAMGT_ADAP_FEATURES 0x23 +#define VDAMGT_TEMP_INFO 0x24 +#define VDAMGT_FAN_INFO 0x25 +#define VDAMGT_PART_INFO 0x30 +#define VDAMGT_PART_MAP 0x31 +#define VDAMGT_PART_UNMAP 0x32 +#define VDAMGT_PART_AUTOMAP 0x33 +#define VDAMGT_PART_SPLIT 0x34 +#define VDAMGT_PART_MERGE 0x35 +#define VDAMGT_SPARE_LIST 0x40 +#define VDAMGT_SPARE_ADD 0x41 +#define VDAMGT_SPARE_REMOVE 0x42 +#define VDAMGT_LOCAL_SPARE_ADD 0x43 +#define VDAMGT_SCHEDULE_EVENT 0x50 +#define VDAMGT_SCHEDULE_INFO 0x51 +#define VDAMGT_NVCACHE_INFO 0x60 +#define VDAMGT_NVCACHE_SET 0x61 +#define VDAMGT_BUZZER_INFO 0x70 +#define VDAMGT_BUZZER_SET 0x71 + + +struct __packed atto_vda_ae_hdr { + u8 bylength; + u8 byflags; + #define VDAAE_HDRF_EVENT_ACK 0x01 + + u8 byversion; + #define VDAAE_HDR_VER_0 0 + + u8 bytype; + #define VDAAE_HDR_TYPE_RAID 1 + #define VDAAE_HDR_TYPE_LU 2 + #define VDAAE_HDR_TYPE_DISK 3 + #define VDAAE_HDR_TYPE_RESET 4 + #define VDAAE_HDR_TYPE_LOG_INFO 5 + #define VDAAE_HDR_TYPE_LOG_WARN 6 + #define VDAAE_HDR_TYPE_LOG_CRIT 7 + #define VDAAE_HDR_TYPE_LOG_FAIL 8 + #define VDAAE_HDR_TYPE_NVC 9 + #define VDAAE_HDR_TYPE_TLG_INFO 10 + #define VDAAE_HDR_TYPE_TLG_WARN 11 + #define VDAAE_HDR_TYPE_TLG_CRIT 12 + #define VDAAE_HDR_TYPE_PWRMGT 13 + #define VDAAE_HDR_TYPE_MUTE 14 + #define VDAAE_HDR_TYPE_DEV 15 +}; + + +struct __packed atto_vda_ae_raid { + struct atto_vda_ae_hdr hdr; + u32 dwflags; + #define VDAAE_GROUP_STATE 0x00000001 + #define VDAAE_RBLD_STATE 0x00000002 + #define VDAAE_RBLD_PROG 0x00000004 + #define VDAAE_MEMBER_CHG 0x00000008 + #define VDAAE_PART_CHG 0x00000010 + #define VDAAE_MEM_STATE_CHG 0x00000020 + + u8 bygroup_state; + #define VDAAE_RAID_INVALID 0 + #define VDAAE_RAID_NEW 1 + #define VDAAE_RAID_WAITING 2 + #define VDAAE_RAID_ONLINE 3 + #define VDAAE_RAID_DEGRADED 4 + #define VDAAE_RAID_OFFLINE 5 + #define VDAAE_RAID_DELETED 6 + #define VDAAE_RAID_BASIC 7 + #define VDAAE_RAID_EXTREME 8 + #define VDAAE_RAID_UNKNOWN 9 + + u8 byrebuild_state; + #define VDAAE_RBLD_NONE 0 + #define VDAAE_RBLD_REBUILD 1 + #define VDAAE_RBLD_ERASE 2 + #define VDAAE_RBLD_PATTERN 3 + #define VDAAE_RBLD_CONV 4 + #define VDAAE_RBLD_FULL_INIT 5 + #define VDAAE_RBLD_QUICK_INIT 6 + #define VDAAE_RBLD_SECT_SCAN 7 + #define VDAAE_RBLD_SECT_SCAN_PARITY 8 + #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9 + #define VDAAE_RBLD_RECOV_REBUILD 10 + #define VDAAE_RBLD_UNKNOWN 11 + + u8 byrebuild_progress; + u8 op_status; + #define VDAAE_GRPOPSTAT_MASK 0x0F + #define VDAAE_GRPOPSTAT_INVALID 0x00 + #define VDAAE_GRPOPSTAT_OK 0x01 + #define VDAAE_GRPOPSTAT_FAULTED 0x02 + #define VDAAE_GRPOPSTAT_HALTED 0x03 + #define VDAAE_GRPOPSTAT_INT 0x04 + #define VDAAE_GRPOPPROC_MASK 0xF0 + #define VDAAE_GRPOPPROC_STARTABLE 0x10 + #define VDAAE_GRPOPPROC_CANCELABLE 0x20 + #define VDAAE_GRPOPPROC_RESUMABLE 0x40 + #define VDAAE_GRPOPPROC_HALTABLE 0x80 + char acname[15]; + u8 byreserved; + u8 byreserved2[0x80 - 0x1C]; +}; + + +struct __packed atto_vda_ae_lu_tgt_lun { + u16 wtarget_id; + u8 bylun; + u8 byreserved; +}; + + +struct __packed atto_vda_ae_lu_tgt_lun_raid { + u16 wtarget_id; + u8 bylun; + u8 byreserved; + u32 dwinterleave; + u32 dwblock_size; +}; + + +struct __packed atto_vda_ae_lu { + struct atto_vda_ae_hdr hdr; + u32 dwevent; + #define VDAAE_LU_DISC 0x00000001 + #define VDAAE_LU_LOST 0x00000002 + #define VDAAE_LU_STATE 0x00000004 + #define VDAAE_LU_PASSTHROUGH 0x10000000 + #define VDAAE_LU_PHYS_ID 0x20000000 + + u8 bystate; + #define VDAAE_LU_UNDEFINED 0 + #define VDAAE_LU_NOT_PRESENT 1 + #define VDAAE_LU_OFFLINE 2 + #define VDAAE_LU_ONLINE 3 + #define VDAAE_LU_DEGRADED 4 + #define VDAAE_LU_FACTORY_DISABLED 5 + #define VDAAE_LU_DELETED 6 + #define VDAAE_LU_BUSSCAN 7 + #define VDAAE_LU_UNKNOWN 8 + + u8 byreserved; + u16 wphys_target_id; + + union { + struct atto_vda_ae_lu_tgt_lun tgtlun; + struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid; + } id; +}; + + +struct __packed atto_vda_ae_disk { + struct atto_vda_ae_hdr hdr; +}; + + +#define VDAAE_LOG_STRSZ 64 + +struct __packed atto_vda_ae_log { + struct atto_vda_ae_hdr hdr; + char aclog_ascii[VDAAE_LOG_STRSZ]; +}; + + +#define VDAAE_TLG_STRSZ 56 + +struct __packed atto_vda_ae_timestamp_log { + struct atto_vda_ae_hdr hdr; + u32 dwtimestamp; + char aclog_ascii[VDAAE_TLG_STRSZ]; +}; + + +struct __packed atto_vda_ae_nvc { + struct atto_vda_ae_hdr hdr; +}; + + +struct __packed atto_vda_ae_dev { + struct atto_vda_ae_hdr hdr; + struct atto_dev_addr devaddr; +}; + + +union atto_vda_ae { + struct atto_vda_ae_hdr hdr; + struct atto_vda_ae_disk disk; + struct atto_vda_ae_lu lu; + struct atto_vda_ae_raid raid; + struct atto_vda_ae_log log; + struct atto_vda_ae_timestamp_log tslog; + struct atto_vda_ae_nvc nvcache; + struct atto_vda_ae_dev dev; +}; + + +struct __packed atto_vda_date_and_time { + u8 flags; + #define VDA_DT_DAY_MASK 0x07 + #define VDA_DT_DAY_NONE 0x00 + #define VDA_DT_DAY_SUN 0x01 + #define VDA_DT_DAY_MON 0x02 + #define VDA_DT_DAY_TUE 0x03 + #define VDA_DT_DAY_WED 0x04 + #define VDA_DT_DAY_THU 0x05 + #define VDA_DT_DAY_FRI 0x06 + #define VDA_DT_DAY_SAT 0x07 + #define VDA_DT_PM 0x40 + #define VDA_DT_MILITARY 0x80 + + u8 seconds; + u8 minutes; + u8 hours; + u8 day; + u8 month; + u16 year; +}; + +#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */ +#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */ +#define SGE_LAST 0x01000000 /*! last entry */ +#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */ +#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */ +#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */ +#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */ + + +struct __packed atto_vda_cfg_init { + struct atto_vda_date_and_time date_time; + u32 sgl_page_size; + u32 vda_version; + u32 fw_version; + u32 fw_build; + u32 fw_release; + u32 epoch_time; + u32 ioctl_tunnel; + #define VDA_ITF_MEM_RW 0x00000001 + #define VDA_ITF_TRACE 0x00000002 + #define VDA_ITF_SCSI_PASS_THRU 0x00000004 + #define VDA_ITF_GET_DEV_ADDR 0x00000008 + #define VDA_ITF_PHY_CTRL 0x00000010 + #define VDA_ITF_CONN_CTRL 0x00000020 + #define VDA_ITF_GET_DEV_INFO 0x00000040 + + u32 num_targets_backend; + u8 reserved[0x48]; +}; + + +/* configuration commands */ + +#define VDA_CFG_INIT 0x00 +#define VDA_CFG_GET_INIT 0x01 +#define VDA_CFG_GET_INIT2 0x02 + + +/*! physical region descriptor (PRD) aka scatter/gather entry */ + +struct __packed atto_physical_region_description { + u64 address; + u32 ctl_len; + #define PRD_LEN_LIMIT 0x003FFFFF + #define PRD_LEN_MAX 0x003FF000 + #define PRD_NXT_PRD_CNT 0x0000007F + #define PRD_CHAIN 0x01000000 + #define PRD_DATA 0x00000000 + #define PRD_INT_SEL 0xF0000000 + #define PRD_INT_SEL_F0 0x00000000 + #define PRD_INT_SEL_F1 0x40000000 + #define PRD_INT_SEL_F2 0x80000000 + #define PRD_INT_SEL_F3 0xc0000000 + #define PRD_INT_SEL_SRAM 0x10000000 + #define PRD_INT_SEL_PBSR 0x20000000 + +}; + +/* Request types. NOTE that ALL requests have the same layout for the first + * few bytes. + */ +struct __packed atto_vda_req_header { + u32 length; + u8 function; + u8 variable1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; +}; + + +#define FCP_CDB_SIZE 16 + +struct __packed atto_vda_scsi_req { + u32 length; + u8 function; /* VDA_FUNC_SCSI */ + u8 sense_len; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 flags; + #define FCP_CMND_LUN_MASK 0x000000FF + #define FCP_CMND_TA_MASK 0x00000700 + #define FCP_CMND_TA_SIMPL_Q 0x00000000 + #define FCP_CMND_TA_HEAD_Q 0x00000100 + #define FCP_CMND_TA_ORDRD_Q 0x00000200 + #define FCP_CMND_TA_ACA 0x00000400 + #define FCP_CMND_PRI_MASK 0x00007800 + #define FCP_CMND_TM_MASK 0x00FF0000 + #define FCP_CMND_ATS 0x00020000 + #define FCP_CMND_CTS 0x00040000 + #define FCP_CMND_LRS 0x00100000 + #define FCP_CMND_TRS 0x00200000 + #define FCP_CMND_CLA 0x00400000 + #define FCP_CMND_TRM 0x00800000 + #define FCP_CMND_DATA_DIR 0x03000000 + #define FCP_CMND_WRD 0x01000000 + #define FCP_CMND_RDD 0x02000000 + + u8 cdb[FCP_CDB_SIZE]; + union { + struct __packed { + u64 ppsense_buf; + u16 target_id; + u8 iblk_cnt_prd; + u8 reserved; + }; + + struct atto_physical_region_description sense_buff_prd; + }; + + union { + struct atto_vda_sge sge[1]; + + u32 abort_handle; + u32 dwords[245]; + struct atto_physical_region_description prd[1]; + } u; +}; + + +struct __packed atto_vda_flash_req { + u32 length; + u8 function; /* VDA_FUNC_FLASH */ + u8 sub_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 flash_addr; + u8 checksum; + u8 rsvd[3]; + + union { + struct { + char file_name[16]; /* 8.3 fname, NULL term, wc=* */ + struct atto_vda_sge sge[1]; + } file; + + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[2]; + } data; +}; + + +struct __packed atto_vda_diag_req { + u32 length; + u8 function; /* VDA_FUNC_DIAG */ + u8 sub_func; + #define VDA_DIAG_STATUS 0x00 + #define VDA_DIAG_RESET 0x01 + #define VDA_DIAG_PAUSE 0x02 + #define VDA_DIAG_RESUME 0x03 + #define VDA_DIAG_READ 0x04 + #define VDA_DIAG_WRITE 0x05 + + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 rsvd; + u64 local_addr; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_ae_req { + u32 length; + u8 function; /* VDA_FUNC_AE */ + u8 reserved1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + + union { + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[1]; + }; +}; + + +struct __packed atto_vda_cli_req { + u32 length; + u8 function; /* VDA_FUNC_CLI */ + u8 reserved1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 cmd_rsp_len; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_ioctl_req { + u32 length; + u8 function; /* VDA_FUNC_IOCTL */ + u8 sub_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + + union { + struct atto_vda_sge reserved_sge; + struct atto_physical_region_description reserved_prde; + }; + + union { + struct { + u32 ctrl_code; + u16 target_id; + u8 lun; + u8 reserved; + } csmi; + }; + + union { + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[1]; + }; +}; + + +struct __packed atto_vda_cfg_req { + u32 length; + u8 function; /* VDA_FUNC_CFG */ + u8 sub_func; + u8 rsvd1; + u8 sg_list_offset; + u32 handle; + + union { + u8 bytes[116]; + struct atto_vda_cfg_init init; + struct atto_vda_sge sge; + struct atto_physical_region_description prde; + } data; +}; + + +struct __packed atto_vda_mgmt_req { + u32 length; + u8 function; /* VDA_FUNC_MGT */ + u8 mgt_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u8 scan_generation; + u8 payld_sglst_offset; + u16 dev_index; + u32 payld_length; + u32 pad; + union { + struct atto_vda_sge sge[2]; + struct atto_physical_region_description prde[2]; + }; + struct atto_vda_sge payld_sge[1]; +}; + + +union atto_vda_req { + struct atto_vda_scsi_req scsi; + struct atto_vda_flash_req flash; + struct atto_vda_diag_req diag; + struct atto_vda_ae_req ae; + struct atto_vda_cli_req cli; + struct atto_vda_ioctl_req ioctl; + struct atto_vda_cfg_req cfg; + struct atto_vda_mgmt_req mgt; + u8 bytes[1024]; +}; + +/* Outbound response structures */ + +struct __packed atto_vda_scsi_rsp { + u8 scsi_stat; + u8 sense_len; + u8 rsvd[2]; + u32 residual_length; +}; + +struct __packed atto_vda_flash_rsp { + u32 file_size; +}; + +struct __packed atto_vda_ae_rsp { + u32 length; +}; + +struct __packed atto_vda_cli_rsp { + u32 cmd_rsp_len; +}; + +struct __packed atto_vda_ioctl_rsp { + union { + struct { + u32 csmi_status; + u16 target_id; + u8 lun; + u8 reserved; + } csmi; + }; +}; + +struct __packed atto_vda_cfg_rsp { + u16 vda_version; + u16 fw_release; + u32 fw_build; +}; + +struct __packed atto_vda_mgmt_rsp { + u32 length; + u16 dev_index; + u8 scan_generation; +}; + +union atto_vda_func_rsp { + struct atto_vda_scsi_rsp scsi_rsp; + struct atto_vda_flash_rsp flash_rsp; + struct atto_vda_ae_rsp ae_rsp; + struct atto_vda_cli_rsp cli_rsp; + struct atto_vda_ioctl_rsp ioctl_rsp; + struct atto_vda_cfg_rsp cfg_rsp; + struct atto_vda_mgmt_rsp mgt_rsp; + u32 dwords[2]; +}; + +struct __packed atto_vda_ob_rsp { + u32 handle; + u8 req_stat; + u8 rsvd[3]; + + union atto_vda_func_rsp + func_rsp; +}; + +struct __packed atto_vda_ae_data { + u8 event_data[256]; +}; + +struct __packed atto_vda_mgmt_data { + union { + u8 bytes[112]; + struct atto_vda_devinfo dev_info; + struct atto_vda_grp_info grp_info; + struct atto_vdapart_info part_info; + struct atto_vda_dh_info dev_health_info; + struct atto_vda_metrics_info metrics_info; + struct atto_vda_schedule_info sched_info; + struct atto_vda_n_vcache_info nvcache_info; + struct atto_vda_buzzer_info buzzer_info; + } data; +}; + +union atto_vda_rsp_data { + struct atto_vda_ae_data ae_data; + struct atto_vda_mgmt_data mgt_data; + u8 sense_data[252]; + #define SENSE_DATA_SZ 252; + u8 bytes[256]; +}; + +#endif diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h new file mode 100644 index 000000000..ed63f7a9e --- /dev/null +++ b/drivers/scsi/esas2r/esas2r.h @@ -0,0 +1,1426 @@ +/* + * linux/drivers/scsi/esas2r/esas2r.h + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "esas2r_log.h" +#include "atioctl.h" +#include "atvda.h" + +#ifndef ESAS2R_H +#define ESAS2R_H + +/* Global Variables */ +extern struct esas2r_adapter *esas2r_adapters[]; +extern u8 *esas2r_buffered_ioctl; +extern dma_addr_t esas2r_buffered_ioctl_addr; +extern u32 esas2r_buffered_ioctl_size; +extern struct pci_dev *esas2r_buffered_ioctl_pcid; +#define SGL_PG_SZ_MIN 64 +#define SGL_PG_SZ_MAX 1024 +extern int sgl_page_size; +#define NUM_SGL_MIN 8 +#define NUM_SGL_MAX 2048 +extern int num_sg_lists; +#define NUM_REQ_MIN 4 +#define NUM_REQ_MAX 256 +extern int num_requests; +#define NUM_AE_MIN 2 +#define NUM_AE_MAX 8 +extern int num_ae_requests; +extern int cmd_per_lun; +extern int can_queue; +extern int esas2r_max_sectors; +extern int sg_tablesize; +extern int interrupt_mode; +extern int num_io_requests; + +/* Macro defintions */ +#define ESAS2R_MAX_ID 255 +#define MAX_ADAPTERS 32 +#define ESAS2R_DRVR_NAME "esas2r" +#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter" +#define ESAS2R_MAX_DEVICES 32 +#define ATTONODE_NAME "ATTONode" +#define ESAS2R_MAJOR_REV 1 +#define ESAS2R_MINOR_REV 00 +#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \ + DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV) +#define ESAS2R_COPYRIGHT_YEARS "2001-2013" +#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384 +#define ESAS2R_DEFAULT_CMD_PER_LUN 64 +#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024 +#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num) +#define NUM_TO_STR(num) #num + +#define ESAS2R_SGL_ALIGN 16 +#define ESAS2R_LIST_ALIGN 16 +#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA +#define ESAS2R_DATA_BUF_LEN 256 +#define ESAS2R_DEFAULT_TMO 5000 +#define ESAS2R_DISC_BUF_LEN 512 +#define ESAS2R_FWCOREDUMP_SZ 0x80000 +#define ESAS2R_NUM_PHYS 8 +#define ESAS2R_TARG_ID_INV 0xFFFF +#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK +#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK +#define ESAS2R_INT_DIS_MASK 0 +#define ESAS2R_MAX_TARGETS 256 +#define ESAS2R_KOBJ_NAME_LEN 20 + +/* u16 (WORD) component macros */ +#define LOBYTE(w) ((u8)(u16)(w)) +#define HIBYTE(w) ((u8)(((u16)(w)) >> 8)) +#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8))) + +/* u32 (DWORD) component macros */ +#define LOWORD(d) ((u16)(u32)(d)) +#define HIWORD(d) ((u16)(((u32)(d)) >> 16)) +#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16))) + +/* macro to get the lowest nonzero bit of a value */ +#define LOBIT(x) ((x) & (0 - (x))) + +/* These functions are provided to access the chip's control registers. + * The register is specified by its byte offset from the register base + * for the adapter. + */ +#define esas2r_read_register_dword(a, reg) \ + readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG) + +#define esas2r_write_register_dword(a, reg, data) \ + writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG)) + +#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r) + +/* This function is provided to access the chip's data window. The + * register is specified by its byte offset from the window base + * for the adapter. + */ +#define esas2r_read_data_byte(a, reg) \ + readb((void __iomem *)a->data_window + (reg)) + +/* ATTO vendor and device Ids */ +#define ATTO_VENDOR_ID 0x117C +#define ATTO_DID_INTEL_IOP348 0x002C +#define ATTO_DID_MV_88RC9580 0x0049 +#define ATTO_DID_MV_88RC9580TS 0x0066 +#define ATTO_DID_MV_88RC9580TSE 0x0067 +#define ATTO_DID_MV_88RC9580TL 0x0068 + +/* ATTO subsystem device Ids */ +#define ATTO_SSDID_TBT 0x4000 +#define ATTO_TSSC_3808 0x4066 +#define ATTO_TSSC_3808E 0x4067 +#define ATTO_TLSH_1068 0x4068 +#define ATTO_ESAS_R680 0x0049 +#define ATTO_ESAS_R608 0x004A +#define ATTO_ESAS_R60F 0x004B +#define ATTO_ESAS_R6F0 0x004C +#define ATTO_ESAS_R644 0x004D +#define ATTO_ESAS_R648 0x004E + +/* + * flash definitions & structures + * define the code types + */ +#define FBT_CPYR 0xAA00 +#define FBT_SETUP 0xAA02 +#define FBT_FLASH_VER 0xAA04 + +/* offsets to various locations in flash */ +#define FLS_OFFSET_BOOT (u32)(0x00700000) +#define FLS_OFFSET_NVR (u32)(0x007C0000) +#define FLS_OFFSET_CPYR FLS_OFFSET_NVR +#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT) +#define FLS_BLOCK_SIZE (u32)(0x00020000) +#define FI_NVR_2KB 0x0800 +#define FI_NVR_8KB 0x2000 +#define FM_BUF_SZ 0x800 + +/* + * marvell frey (88R9580) register definitions + * chip revision identifiers + */ +#define MVR_FREY_B2 0xB2 + +/* + * memory window definitions. window 0 is the data window with definitions + * of MW_DATA_XXX. window 1 is the register window with definitions of + * MW_REG_XXX. + */ +#define MW_REG_WINDOW_SIZE (u32)(0x00040000) +#define MW_REG_OFFSET_HWREG (u32)(0x00000000) +#define MW_REG_OFFSET_PCI (u32)(0x00008000) +#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG) +#define MW_DATA_WINDOW_SIZE (u32)(0x00020000) +#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000) +#define MW_DATA_ADDR_SRAM (u32)(0xF4000000) +#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000) + +/* + * the following registers are for the communication + * list interface (AKA message unit (MU)) + */ +#define MU_IN_LIST_ADDR_LO (u32)(0x00004000) +#define MU_IN_LIST_ADDR_HI (u32)(0x00004004) + +#define MU_IN_LIST_WRITE (u32)(0x00004018) + #define MU_ILW_TOGGLE (u32)(0x00004000) + +#define MU_IN_LIST_READ (u32)(0x0000401C) + #define MU_ILR_TOGGLE (u32)(0x00004000) + #define MU_ILIC_LIST (u32)(0x0000000F) + #define MU_ILIC_LIST_F0 (u32)(0x00000000) + #define MU_ILIC_DEST (u32)(0x00000F00) + #define MU_ILIC_DEST_DDR (u32)(0x00000200) +#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028) + +#define MU_IN_LIST_CONFIG (u32)(0x0000402C) + #define MU_ILC_ENABLE (u32)(0x00000001) + #define MU_ILC_ENTRY_MASK (u32)(0x000000F0) + #define MU_ILC_ENTRY_4_DW (u32)(0x00000020) + #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000) + #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000) + #define MU_ILC_NUMBER_SHIFT 16 + +#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050) +#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054) + +#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058) +#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C) + +#define MU_OUT_LIST_WRITE (u32)(0x00004068) + #define MU_OLW_TOGGLE (u32)(0x00004000) + +#define MU_OUT_LIST_COPY (u32)(0x0000406C) + #define MU_OLC_TOGGLE (u32)(0x00004000) + #define MU_OLC_WRT_PTR (u32)(0x00003FFF) + +#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078) + #define MU_OLIC_LIST (u32)(0x0000000F) + #define MU_OLIC_LIST_F0 (u32)(0x00000000) + #define MU_OLIC_SOURCE (u32)(0x00000F00) + #define MU_OLIC_SOURCE_DDR (u32)(0x00000200) + +#define MU_OUT_LIST_CONFIG (u32)(0x0000407C) + #define MU_OLC_ENABLE (u32)(0x00000001) + #define MU_OLC_ENTRY_MASK (u32)(0x000000F0) + #define MU_OLC_ENTRY_4_DW (u32)(0x00000020) + #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000) + #define MU_OLC_NUMBER_SHIFT 16 + +#define MU_OUT_LIST_INT_STAT (u32)(0x00004088) + #define MU_OLIS_INT (u32)(0x00000001) + +#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C) + #define MU_OLIS_MASK (u32)(0x00000001) + +/* + * the maximum size of the communication lists is two greater than the + * maximum amount of VDA requests. the extra are to prevent queue overflow. + */ +#define ESAS2R_MAX_NUM_REQS 256 +#define ESAS2R_NUM_EXTRA 2 +#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA) + +/* + * the following registers are for the CPU interface + */ +#define MU_CTL_STATUS_IN (u32)(0x00010108) + #define MU_CTL_IN_FULL_RST (u32)(0x00000020) +#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130) + #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000) +#define MU_DOORBELL_IN (u32)(0x00010460) + #define DRBL_RESET_BUS (u32)(0x00000002) + #define DRBL_PAUSE_AE (u32)(0x00000004) + #define DRBL_RESUME_AE (u32)(0x00000008) + #define DRBL_MSG_IFC_DOWN (u32)(0x00000010) + #define DRBL_FLASH_REQ (u32)(0x00000020) + #define DRBL_FLASH_DONE (u32)(0x00000040) + #define DRBL_FORCE_INT (u32)(0x00000080) + #define DRBL_MSG_IFC_INIT (u32)(0x00000100) + #define DRBL_POWER_DOWN (u32)(0x00000200) + #define DRBL_DRV_VER_1 (u32)(0x00010000) + #define DRBL_DRV_VER DRBL_DRV_VER_1 +#define MU_DOORBELL_IN_ENB (u32)(0x00010464) +#define MU_DOORBELL_OUT (u32)(0x00010480) + #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000) + #define DRBL_UNUSED_HANDLER (u32)(0x00100000) + #define DRBL_UNDEF_INSTR (u32)(0x00200000) + #define DRBL_PREFETCH_ABORT (u32)(0x00300000) + #define DRBL_DATA_ABORT (u32)(0x00400000) + #define DRBL_JUMP_TO_ZERO (u32)(0x00500000) + #define DRBL_FW_RESET (u32)(0x00080000) + #define DRBL_FW_VER_MSK (u32)(0x00070000) + #define DRBL_FW_VER_0 (u32)(0x00000000) + #define DRBL_FW_VER_1 (u32)(0x00010000) + #define DRBL_FW_VER DRBL_FW_VER_1 +#define MU_DOORBELL_OUT_ENB (u32)(0x00010484) + #define DRBL_ENB_MASK (u32)(0x00F803FF) +#define MU_INT_STATUS_OUT (u32)(0x00010200) + #define MU_INTSTAT_POST_OUT (u32)(0x00000010) + #define MU_INTSTAT_DRBL_IN (u32)(0x00000100) + #define MU_INTSTAT_DRBL (u32)(0x00001000) + #define MU_INTSTAT_MASK (u32)(0x00001010) +#define MU_INT_MASK_OUT (u32)(0x0001020C) + +/* PCI express registers accessed via window 1 */ +#define MVR_PCI_WIN1_REMAP (u32)(0x00008438) + #define MVRPW1R_ENABLE (u32)(0x00000001) + + +/* structures */ + +/* inbound list dynamic source entry */ +struct esas2r_inbound_list_source_entry { + u64 address; + u32 length; + #define HWILSE_INTERFACE_F0 0x00000000 + u32 reserved; +}; + +/* PCI data structure in expansion ROM images */ +struct __packed esas2r_boot_header { + char signature[4]; + u16 vendor_id; + u16 device_id; + u16 VPD; + u16 struct_length; + u8 struct_revision; + u8 class_code[3]; + u16 image_length; + u16 code_revision; + u8 code_type; + #define CODE_TYPE_PC 0 + #define CODE_TYPE_OPEN 1 + #define CODE_TYPE_EFI 3 + u8 indicator; + #define INDICATOR_LAST 0x80 + u8 reserved[2]; +}; + +struct __packed esas2r_boot_image { + u16 signature; + u8 reserved[22]; + u16 header_offset; + u16 pnp_offset; +}; + +struct __packed esas2r_pc_image { + u16 signature; + u8 length; + u8 entry_point[3]; + u8 checksum; + u16 image_end; + u16 min_size; + u8 rom_flags; + u8 reserved[12]; + u16 header_offset; + u16 pnp_offset; + struct esas2r_boot_header boot_image; +}; + +struct __packed esas2r_efi_image { + u16 signature; + u16 length; + u32 efi_signature; + #define EFI_ROM_SIG 0x00000EF1 + u16 image_type; + #define EFI_IMAGE_APP 10 + #define EFI_IMAGE_BSD 11 + #define EFI_IMAGE_RTD 12 + u16 machine_type; + #define EFI_MACHINE_IA32 0x014c + #define EFI_MACHINE_IA64 0x0200 + #define EFI_MACHINE_X64 0x8664 + #define EFI_MACHINE_EBC 0x0EBC + u16 compression; + #define EFI_UNCOMPRESSED 0x0000 + #define EFI_COMPRESSED 0x0001 + u8 reserved[8]; + u16 efi_offset; + u16 header_offset; + u16 reserved2; + struct esas2r_boot_header boot_image; +}; + +struct esas2r_adapter; +struct esas2r_sg_context; +struct esas2r_request; + +typedef void (*RQCALLBK) (struct esas2r_adapter *a, + struct esas2r_request *rq); +typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); + +struct esas2r_component_header { + u8 img_type; + #define CH_IT_FW 0x00 + #define CH_IT_NVR 0x01 + #define CH_IT_BIOS 0x02 + #define CH_IT_MAC 0x03 + #define CH_IT_CFG 0x04 + #define CH_IT_EFI 0x05 + u8 status; + #define CH_STAT_PENDING 0xff + #define CH_STAT_FAILED 0x00 + #define CH_STAT_SUCCESS 0x01 + #define CH_STAT_RETRY 0x02 + #define CH_STAT_INVALID 0x03 + u8 pad[2]; + u32 version; + u32 length; + u32 image_offset; +}; + +#define FI_REL_VER_SZ 16 + +struct esas2r_flash_img_v0 { + u8 fi_version; + #define FI_VERSION_0 00 + u8 status; + u8 adap_typ; + u8 action; + u32 length; + u16 checksum; + u16 driver_error; + u16 flags; + u16 num_comps; + #define FI_NUM_COMPS_V0 5 + u8 rel_version[FI_REL_VER_SZ]; + struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0]; + u8 scratch_buf[FM_BUF_SZ]; +}; + +struct esas2r_flash_img { + u8 fi_version; + #define FI_VERSION_1 01 + u8 status; + #define FI_STAT_SUCCESS 0x00 + #define FI_STAT_FAILED 0x01 + #define FI_STAT_REBOOT 0x02 + #define FI_STAT_ADAPTYP 0x03 + #define FI_STAT_INVALID 0x04 + #define FI_STAT_CHKSUM 0x05 + #define FI_STAT_LENGTH 0x06 + #define FI_STAT_UNKNOWN 0x07 + #define FI_STAT_IMG_VER 0x08 + #define FI_STAT_BUSY 0x09 + #define FI_STAT_DUAL 0x0A + #define FI_STAT_MISSING 0x0B + #define FI_STAT_UNSUPP 0x0C + #define FI_STAT_ERASE 0x0D + #define FI_STAT_FLASH 0x0E + #define FI_STAT_DEGRADED 0x0F + u8 adap_typ; + #define FI_AT_UNKNWN 0xFF + #define FI_AT_SUN_LAKE 0x0B + #define FI_AT_MV_9580 0x0F + u8 action; + #define FI_ACT_DOWN 0x00 + #define FI_ACT_UP 0x01 + #define FI_ACT_UPSZ 0x02 + #define FI_ACT_MAX 0x02 + #define FI_ACT_DOWN1 0x80 + u32 length; + u16 checksum; + u16 driver_error; + u16 flags; + #define FI_FLG_NVR_DEF 0x0001 + u16 num_comps; + #define FI_NUM_COMPS_V1 6 + u8 rel_version[FI_REL_VER_SZ]; + struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1]; + u8 scratch_buf[FM_BUF_SZ]; +}; + +/* definitions for flash script (FS) commands */ +struct esas2r_ioctlfs_command { + u8 command; + #define ESAS2R_FS_CMD_ERASE 0 + #define ESAS2R_FS_CMD_READ 1 + #define ESAS2R_FS_CMD_BEGINW 2 + #define ESAS2R_FS_CMD_WRITE 3 + #define ESAS2R_FS_CMD_COMMIT 4 + #define ESAS2R_FS_CMD_CANCEL 5 + u8 checksum; + u8 reserved[2]; + u32 flash_addr; + u32 length; + u32 image_offset; +}; + +struct esas2r_ioctl_fs { + u8 version; + #define ESAS2R_FS_VER 0 + u8 status; + u8 driver_error; + u8 adap_type; + #define ESAS2R_FS_AT_ESASRAID2 3 + #define ESAS2R_FS_AT_TSSASRAID2 4 + #define ESAS2R_FS_AT_TSSASRAID2E 5 + #define ESAS2R_FS_AT_TLSASHBA 6 + u8 driver_ver; + u8 reserved[11]; + struct esas2r_ioctlfs_command command; + u8 data[1]; +}; + +struct esas2r_sas_nvram { + u8 signature[4]; + u8 version; + #define SASNVR_VERSION_0 0x00 + #define SASNVR_VERSION SASNVR_VERSION_0 + u8 checksum; + #define SASNVR_CKSUM_SEED 0x5A + u8 max_lun_for_target; + u8 pci_latency; + #define SASNVR_PCILAT_DIS 0x00 + #define SASNVR_PCILAT_MIN 0x10 + #define SASNVR_PCILAT_MAX 0xF8 + u8 options1; + #define SASNVR1_BOOT_DRVR 0x01 + #define SASNVR1_BOOT_SCAN 0x02 + #define SASNVR1_DIS_PCI_MWI 0x04 + #define SASNVR1_FORCE_ORD_Q 0x08 + #define SASNVR1_CACHELINE_0 0x10 + #define SASNVR1_DIS_DEVSORT 0x20 + #define SASNVR1_PWR_MGT_EN 0x40 + #define SASNVR1_WIDEPORT 0x80 + u8 options2; + #define SASNVR2_SINGLE_BUS 0x01 + #define SASNVR2_SLOT_BIND 0x02 + #define SASNVR2_EXP_PROG 0x04 + #define SASNVR2_CMDTHR_LUN 0x08 + #define SASNVR2_HEARTBEAT 0x10 + #define SASNVR2_INT_CONNECT 0x20 + #define SASNVR2_SW_MUX_CTRL 0x40 + #define SASNVR2_DISABLE_NCQ 0x80 + u8 int_coalescing; + #define SASNVR_COAL_DIS 0x00 + #define SASNVR_COAL_LOW 0x01 + #define SASNVR_COAL_MED 0x02 + #define SASNVR_COAL_HI 0x03 + u8 cmd_throttle; + #define SASNVR_CMDTHR_NONE 0x00 + u8 dev_wait_time; + u8 dev_wait_count; + u8 spin_up_delay; + #define SASNVR_SPINUP_MAX 0x14 + u8 ssp_align_rate; + u8 sas_addr[8]; + u8 phy_speed[16]; + #define SASNVR_SPEED_AUTO 0x00 + #define SASNVR_SPEED_1_5GB 0x01 + #define SASNVR_SPEED_3GB 0x02 + #define SASNVR_SPEED_6GB 0x03 + #define SASNVR_SPEED_12GB 0x04 + u8 phy_mux[16]; + #define SASNVR_MUX_DISABLED 0x00 + #define SASNVR_MUX_1_5GB 0x01 + #define SASNVR_MUX_3GB 0x02 + #define SASNVR_MUX_6GB 0x03 + u8 phy_flags[16]; + #define SASNVR_PHF_DISABLED 0x01 + #define SASNVR_PHF_RD_ONLY 0x02 + u8 sort_type; + #define SASNVR_SORT_SAS_ADDR 0x00 + #define SASNVR_SORT_H308_CONN 0x01 + #define SASNVR_SORT_PHY_ID 0x02 + #define SASNVR_SORT_SLOT_ID 0x03 + u8 dpm_reqcmd_lmt; + u8 dpm_stndby_time; + u8 dpm_active_time; + u8 phy_target_id[16]; + #define SASNVR_PTI_DISABLED 0xFF + u8 virt_ses_mode; + #define SASNVR_VSMH_DISABLED 0x00 + u8 read_write_mode; + #define SASNVR_RWM_DEFAULT 0x00 + u8 link_down_to; + u8 reserved[0xA1]; +}; + +typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr); + +struct esas2r_sg_context { + struct esas2r_adapter *adapter; + struct esas2r_request *first_req; + u32 length; + u8 *cur_offset; + PGETPHYSADDR get_phys_addr; + union { + struct { + struct atto_vda_sge *curr; + struct atto_vda_sge *last; + struct atto_vda_sge *limit; + struct atto_vda_sge *chain; + } a64; + struct { + struct atto_physical_region_description *curr; + struct atto_physical_region_description *chain; + u32 sgl_max_cnt; + u32 sge_cnt; + } prd; + } sge; + struct scatterlist *cur_sgel; + u8 *exp_offset; + int num_sgel; + int sgel_count; +}; + +struct esas2r_target { + u8 flags; + #define TF_PASS_THRU 0x01 + #define TF_USED 0x02 + u8 new_target_state; + u8 target_state; + u8 buffered_target_state; +#define TS_NOT_PRESENT 0x00 +#define TS_PRESENT 0x05 +#define TS_LUN_CHANGE 0x06 +#define TS_INVALID 0xFF + u32 block_size; + u32 inter_block; + u32 inter_byte; + u16 virt_targ_id; + u16 phys_targ_id; + u8 identifier_len; + u64 sas_addr; + u8 identifier[60]; + struct atto_vda_ae_lu lu_event; +}; + +struct esas2r_request { + struct list_head comp_list; + struct list_head req_list; + union atto_vda_req *vrq; + struct esas2r_mem_desc *vrq_md; + union { + void *data_buf; + union atto_vda_rsp_data *vda_rsp_data; + }; + u8 *sense_buf; + struct list_head sg_table_head; + struct esas2r_mem_desc *sg_table; + u32 timeout; + #define RQ_TIMEOUT_S1 0xFFFFFFFF + #define RQ_TIMEOUT_S2 0xFFFFFFFE + #define RQ_MAX_TIMEOUT 0xFFFFFFFD + u16 target_id; + u8 req_type; + #define RT_INI_REQ 0x01 + #define RT_DISC_REQ 0x02 + u8 sense_len; + union atto_vda_func_rsp func_rsp; + RQCALLBK comp_cb; + RQCALLBK interrupt_cb; + void *interrupt_cx; + u8 flags; + #define RF_1ST_IBLK_BASE 0x04 + #define RF_FAILURE_OK 0x08 + u8 req_stat; + u16 vda_req_sz; + #define RQ_SIZE_DEFAULT 0 + u64 lba; + RQCALLBK aux_req_cb; + void *aux_req_cx; + u32 blk_len; + u32 max_blk_len; + union { + struct scsi_cmnd *cmd; + u8 *task_management_status_ptr; + }; +}; + +struct esas2r_flash_context { + struct esas2r_flash_img *fi; + RQCALLBK interrupt_cb; + u8 *sgc_offset; + u8 *scratch; + u32 fi_hdr_len; + u8 task; + #define FMTSK_ERASE_BOOT 0 + #define FMTSK_WRTBIOS 1 + #define FMTSK_READBIOS 2 + #define FMTSK_WRTMAC 3 + #define FMTSK_READMAC 4 + #define FMTSK_WRTEFI 5 + #define FMTSK_READEFI 6 + #define FMTSK_WRTCFG 7 + #define FMTSK_READCFG 8 + u8 func; + u16 num_comps; + u32 cmp_len; + u32 flsh_addr; + u32 curr_len; + u8 comp_typ; + struct esas2r_sg_context sgc; +}; + +struct esas2r_disc_context { + u8 disc_evt; + #define DCDE_DEV_CHANGE 0x01 + #define DCDE_DEV_SCAN 0x02 + u8 state; + #define DCS_DEV_RMV 0x00 + #define DCS_DEV_ADD 0x01 + #define DCS_BLOCK_DEV_SCAN 0x02 + #define DCS_RAID_GRP_INFO 0x03 + #define DCS_PART_INFO 0x04 + #define DCS_PT_DEV_INFO 0x05 + #define DCS_PT_DEV_ADDR 0x06 + #define DCS_DISC_DONE 0xFF + u16 flags; + #define DCF_DEV_CHANGE 0x0001 + #define DCF_DEV_SCAN 0x0002 + #define DCF_POLLED 0x8000 + u32 interleave; + u32 block_size; + u16 dev_ix; + u8 part_num; + u8 raid_grp_ix; + char raid_grp_name[16]; + struct esas2r_target *curr_targ; + u16 curr_virt_id; + u16 curr_phys_id; + u8 scan_gen; + u8 dev_addr_type; + u64 sas_addr; +}; + +struct esas2r_mem_desc { + struct list_head next_desc; + void *virt_addr; + u64 phys_addr; + void *pad; + void *esas2r_data; + u32 esas2r_param; + u32 size; +}; + +enum fw_event_type { + fw_event_null, + fw_event_lun_change, + fw_event_present, + fw_event_not_present, + fw_event_vda_ae +}; + +struct esas2r_vda_ae { + u32 signature; +#define ESAS2R_VDA_EVENT_SIG 0x4154544F + u8 bus_number; + u8 devfn; + u8 pad[2]; + union atto_vda_ae vda_ae; +}; + +struct esas2r_fw_event_work { + struct list_head list; + struct delayed_work work; + struct esas2r_adapter *a; + enum fw_event_type type; + u8 data[sizeof(struct esas2r_vda_ae)]; +}; + +enum state { + FW_INVALID_ST, + FW_STATUS_ST, + FW_COMMAND_ST +}; + +struct esas2r_firmware { + enum state state; + struct esas2r_flash_img header; + u8 *data; + u64 phys; + int orig_len; + void *header_buff; + u64 header_buff_phys; +}; + +struct esas2r_adapter { + struct esas2r_target targetdb[ESAS2R_MAX_TARGETS]; + struct esas2r_target *targetdb_end; + unsigned char *regs; + unsigned char *data_window; + long flags; + #define AF_PORT_CHANGE 0 + #define AF_CHPRST_NEEDED 1 + #define AF_CHPRST_PENDING 2 + #define AF_CHPRST_DETECTED 3 + #define AF_BUSRST_NEEDED 4 + #define AF_BUSRST_PENDING 5 + #define AF_BUSRST_DETECTED 6 + #define AF_DISABLED 7 + #define AF_FLASH_LOCK 8 + #define AF_OS_RESET 9 + #define AF_FLASHING 10 + #define AF_POWER_MGT 11 + #define AF_NVR_VALID 12 + #define AF_DEGRADED_MODE 13 + #define AF_DISC_PENDING 14 + #define AF_TASKLET_SCHEDULED 15 + #define AF_HEARTBEAT 16 + #define AF_HEARTBEAT_ENB 17 + #define AF_NOT_PRESENT 18 + #define AF_CHPRST_STARTED 19 + #define AF_FIRST_INIT 20 + #define AF_POWER_DOWN 21 + #define AF_DISC_IN_PROG 22 + #define AF_COMM_LIST_TOGGLE 23 + #define AF_LEGACY_SGE_MODE 24 + #define AF_DISC_POLLED 25 + long flags2; + #define AF2_SERIAL_FLASH 0 + #define AF2_DEV_SCAN 1 + #define AF2_DEV_CNT_OK 2 + #define AF2_COREDUMP_AVAIL 3 + #define AF2_COREDUMP_SAVED 4 + #define AF2_VDA_POWER_DOWN 5 + #define AF2_THUNDERLINK 6 + #define AF2_THUNDERBOLT 7 + #define AF2_INIT_DONE 8 + #define AF2_INT_PENDING 9 + #define AF2_TIMER_TICK 10 + #define AF2_IRQ_CLAIMED 11 + #define AF2_MSI_ENABLED 12 + atomic_t disable_cnt; + atomic_t dis_ints_cnt; + u32 int_stat; + u32 int_mask; + u32 volatile *outbound_copy; + struct list_head avail_request; + spinlock_t request_lock; + spinlock_t sg_list_lock; + spinlock_t queue_lock; + spinlock_t mem_lock; + struct list_head free_sg_list_head; + struct esas2r_mem_desc *sg_list_mds; + struct list_head active_list; + struct list_head defer_list; + struct esas2r_request **req_table; + union { + u16 prev_dev_cnt; + u32 heartbeat_time; + #define ESAS2R_HEARTBEAT_TIME (3000) + }; + u32 chip_uptime; + #define ESAS2R_CHP_UPTIME_MAX (60000) + #define ESAS2R_CHP_UPTIME_CNT (20000) + u64 uncached_phys; + u8 *uncached; + struct esas2r_sas_nvram *nvram; + struct esas2r_request general_req; + u8 init_msg; + #define ESAS2R_INIT_MSG_START 1 + #define ESAS2R_INIT_MSG_INIT 2 + #define ESAS2R_INIT_MSG_GET_INIT 3 + #define ESAS2R_INIT_MSG_REINIT 4 + u16 cmd_ref_no; + u32 fw_version; + u32 fw_build; + u32 chip_init_time; + #define ESAS2R_CHPRST_TIME (180000) + #define ESAS2R_CHPRST_WAIT_TIME (2000) + u32 last_tick_time; + u32 window_base; + RQBUILDSGL build_sgl; + struct esas2r_request *first_ae_req; + u32 list_size; + u32 last_write; + u32 last_read; + u16 max_vdareq_size; + u16 disc_wait_cnt; + struct esas2r_mem_desc inbound_list_md; + struct esas2r_mem_desc outbound_list_md; + struct esas2r_disc_context disc_ctx; + u8 *disc_buffer; + u32 disc_start_time; + u32 disc_wait_time; + u32 flash_ver; + char flash_rev[16]; + char fw_rev[16]; + char image_type[16]; + struct esas2r_flash_context flash_context; + u32 num_targets_backend; + u32 ioctl_tunnel; + struct tasklet_struct tasklet; + struct pci_dev *pcid; + struct Scsi_Host *host; + unsigned int index; + char name[32]; + struct timer_list timer; + struct esas2r_firmware firmware; + wait_queue_head_t nvram_waiter; + int nvram_command_done; + wait_queue_head_t fm_api_waiter; + int fm_api_command_done; + wait_queue_head_t vda_waiter; + int vda_command_done; + u8 *vda_buffer; + u64 ppvda_buffer; +#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data)) +#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ) + wait_queue_head_t fs_api_waiter; + int fs_api_command_done; + u64 ppfs_api_buffer; + u8 *fs_api_buffer; + u32 fs_api_buffer_size; + wait_queue_head_t buffered_ioctl_waiter; + int buffered_ioctl_done; + int uncached_size; + struct workqueue_struct *fw_event_q; + struct list_head fw_event_list; + spinlock_t fw_event_lock; + u8 fw_events_off; /* if '1', then ignore events */ + char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN]; + /* + * intr_mode stores the interrupt mode currently being used by this + * adapter. it is based on the interrupt_mode module parameter, but + * can be changed based on the ability (or not) to utilize the + * mode requested by the parameter. + */ + int intr_mode; +#define INTR_MODE_LEGACY 0 +#define INTR_MODE_MSI 1 +#define INTR_MODE_MSIX 2 + struct esas2r_sg_context fm_api_sgc; + u8 *save_offset; + struct list_head vrq_mds_head; + struct esas2r_mem_desc *vrq_mds; + int num_vrqs; + struct mutex fm_api_mutex; + struct mutex fs_api_mutex; + struct semaphore nvram_semaphore; + struct atto_ioctl *local_atto_ioctl; + u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ]; + unsigned int sysfs_fw_created:1; + unsigned int sysfs_fs_created:1; + unsigned int sysfs_vda_created:1; + unsigned int sysfs_hw_created:1; + unsigned int sysfs_live_nvram_created:1; + unsigned int sysfs_default_nvram_created:1; +}; + +/* + * Function Declarations + * SCSI functions + */ +const char *esas2r_info(struct Scsi_Host *); +int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *data); +int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg); +int esas2r_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); +u8 handle_hba_ioctl(struct esas2r_adapter *a, + struct atto_ioctl *ioctl_hba); +int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); +int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); +long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); + +/* SCSI error handler (eh) functions */ +int esas2r_eh_abort(struct scsi_cmnd *cmd); +int esas2r_device_reset(struct scsi_cmnd *cmd); +int esas2r_host_reset(struct scsi_cmnd *cmd); +int esas2r_bus_reset(struct scsi_cmnd *cmd); +int esas2r_target_reset(struct scsi_cmnd *cmd); + +/* Internal functions */ +int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, + int index); +int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, + int count); +int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, + int count); +int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, + int count); +void esas2r_adapter_tasklet(unsigned long context); +irqreturn_t esas2r_interrupt(int irq, void *dev_id); +irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id); +void esas2r_kickoff_timer(struct esas2r_adapter *a); + +extern const struct dev_pm_ops esas2r_pm_ops; + +void esas2r_fw_event_off(struct esas2r_adapter *a); +void esas2r_fw_event_on(struct esas2r_adapter *a); +bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *nvram); +void esas2r_nvram_get_defaults(struct esas2r_adapter *a, + struct esas2r_sas_nvram *nvram); +void esas2r_complete_request_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_reset_detected(struct esas2r_adapter *a); +void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id, + u8 state); +int esas2r_req_status_to_error(u8 req_stat); +void esas2r_kill_adapter(int i); +void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq); +struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a); +u32 esas2r_get_uncached_size(struct esas2r_adapter *a); +bool esas2r_init_adapter_struct(struct esas2r_adapter *a, + void **uncached_area); +bool esas2r_check_adapter(struct esas2r_adapter *a); +bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll); +void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq); +bool esas2r_send_task_mgmt(struct esas2r_adapter *a, + struct esas2r_request *rqaux, u8 task_mgt_func); +void esas2r_do_tasklet_tasks(struct esas2r_adapter *a); +void esas2r_adapter_interrupt(struct esas2r_adapter *a); +void esas2r_do_deferred_processes(struct esas2r_adapter *a); +void esas2r_reset_bus(struct esas2r_adapter *a); +void esas2r_reset_adapter(struct esas2r_adapter *a); +void esas2r_timer_tick(struct esas2r_adapter *a); +const char *esas2r_get_model_name(struct esas2r_adapter *a); +const char *esas2r_get_model_name_short(struct esas2r_adapter *a); +u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time, + u32 *delay); +void esas2r_build_flash_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 cksum, + u32 addr, + u32 length); +void esas2r_build_mgt_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 scan_gen, + u16 dev_index, + u32 length, + void *data); +void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); +void esas2r_build_cli_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u32 cmd_rsp_len); +void esas2r_build_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u8 sub_func); +void esas2r_build_cfg_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u32 length, + void *data); +void esas2r_power_down(struct esas2r_adapter *a); +bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll); +void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq); +u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo); +bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_ioctl_fs *fs, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc); +bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from, + u32 size); +bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from, + u32 size); +bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, + struct esas2r_request *rq, struct esas2r_sg_context *sgc); +void esas2r_force_interrupt(struct esas2r_adapter *a); +void esas2r_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_process_adapter_reset(struct esas2r_adapter *a); +void esas2r_complete_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_dummy_complete(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq); +void esas2r_start_vda_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +bool esas2r_read_flash_rev(struct esas2r_adapter *a); +bool esas2r_read_image_type(struct esas2r_adapter *a); +bool esas2r_nvram_read_direct(struct esas2r_adapter *a); +bool esas2r_nvram_validate(struct esas2r_adapter *a); +void esas2r_nvram_set_defaults(struct esas2r_adapter *a); +bool esas2r_print_flash_rev(struct esas2r_adapter *a); +void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt); +bool esas2r_init_msgs(struct esas2r_adapter *a); +bool esas2r_is_adapter_present(struct esas2r_adapter *a); +void esas2r_nuxi_mgt_data(u8 function, void *data); +void esas2r_nuxi_cfg_data(u8 function, void *data); +void esas2r_nuxi_ae_data(union atto_vda_ae *ae); +void esas2r_reset_chip(struct esas2r_adapter *a); +void esas2r_log_request_failure(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_polled_interrupt(struct esas2r_adapter *a); +bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, + u8 status); +bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); +bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); +void esas2r_targ_db_initialize(struct esas2r_adapter *a); +void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify); +void esas2r_targ_db_report_changes(struct esas2r_adapter *a); +struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, + struct esas2r_disc_context *dc); +struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, + struct esas2r_disc_context *dc, + u8 *ident, + u8 ident_len); +void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t); +struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, + u64 *sas_addr); +struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, + void *identifier, + u8 ident_len); +u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id); +struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, + u16 virt_id); +u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a); +void esas2r_disc_initialize(struct esas2r_adapter *a); +void esas2r_disc_start_waiting(struct esas2r_adapter *a); +void esas2r_disc_check_for_work(struct esas2r_adapter *a); +void esas2r_disc_check_complete(struct esas2r_adapter *a); +void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt); +bool esas2r_disc_start_port(struct esas2r_adapter *a); +void esas2r_disc_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str); +bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, + struct atto_ioctl_vda *vi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc); +void esas2r_queue_fw_event(struct esas2r_adapter *a, + enum fw_event_type type, + void *data, + int data_sz); + +/* Inline functions */ + +/* Allocate a chip scatter/gather list entry */ +static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) +{ + unsigned long flags; + struct list_head *sgl; + struct esas2r_mem_desc *result = NULL; + + spin_lock_irqsave(&a->sg_list_lock, flags); + if (likely(!list_empty(&a->free_sg_list_head))) { + sgl = a->free_sg_list_head.next; + result = list_entry(sgl, struct esas2r_mem_desc, next_desc); + list_del_init(sgl); + } + spin_unlock_irqrestore(&a->sg_list_lock, flags); + + return result; +} + +/* Initialize a scatter/gather context */ +static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc, + struct esas2r_adapter *a, + struct esas2r_request *rq, + struct atto_vda_sge *first) +{ + sgc->adapter = a; + sgc->first_req = rq; + + /* + * set the limit pointer such that an SGE pointer above this value + * would be the first one to overflow the SGL. + */ + sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq + + (sizeof(union + atto_vda_req) / + 8) + - sizeof(struct + atto_vda_sge)); + if (first) { + sgc->sge.a64.last = + sgc->sge.a64.curr = first; + rq->vrq->scsi.sg_list_offset = (u8) + ((u8 *)first - + (u8 *)rq->vrq); + } else { + sgc->sge.a64.last = + sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0]; + rq->vrq->scsi.sg_list_offset = + (u8)offsetof(struct atto_vda_scsi_req, u.sge); + } + sgc->sge.a64.chain = NULL; +} + +static inline void esas2r_rq_init_request(struct esas2r_request *rq, + struct esas2r_adapter *a) +{ + union atto_vda_req *vrq = rq->vrq; + + INIT_LIST_HEAD(&rq->sg_table_head); + rq->data_buf = (void *)(vrq + 1); + rq->interrupt_cb = NULL; + rq->comp_cb = esas2r_complete_request_cb; + rq->flags = 0; + rq->timeout = 0; + rq->req_stat = RS_PENDING; + rq->req_type = RT_INI_REQ; + + /* clear the outbound response */ + rq->func_rsp.dwords[0] = 0; + rq->func_rsp.dwords[1] = 0; + + /* + * clear the size of the VDA request. esas2r_build_sg_list() will + * only allow the size of the request to grow. there are some + * management requests that go through there twice and the second + * time through sets a smaller request size. if this is not modified + * at all we'll set it to the size of the entire VDA request. + */ + rq->vda_req_sz = RQ_SIZE_DEFAULT; + + /* req_table entry should be NULL at this point - if not, halt */ + + if (a->req_table[LOWORD(vrq->scsi.handle)]) { + esas2r_bugon(); + } + + /* fill in the table for this handle so we can get back to the + * request. + */ + a->req_table[LOWORD(vrq->scsi.handle)] = rq; + + /* + * add a reference number to the handle to make it unique (until it + * wraps of course) while preserving the least significant word + */ + vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle; + + /* + * the following formats a SCSI request. the caller can override as + * necessary. clear_vda_request can be called to clear the VDA + * request for another type of request. + */ + vrq->scsi.function = VDA_FUNC_SCSI; + vrq->scsi.sense_len = SENSE_DATA_SZ; + + /* clear out sg_list_offset and chain_offset */ + vrq->scsi.sg_list_offset = 0; + vrq->scsi.chain_offset = 0; + vrq->scsi.flags = 0; + vrq->scsi.reserved = 0; + + /* set the sense buffer to be the data payload buffer */ + vrq->scsi.ppsense_buf + = cpu_to_le64(rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); +} + +static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq, + struct esas2r_adapter *a) +{ + unsigned long flags; + + if (list_empty(&rq->sg_table_head)) + return; + + spin_lock_irqsave(&a->sg_list_lock, flags); + list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head); + spin_unlock_irqrestore(&a->sg_list_lock, flags); +} + +static inline void esas2r_rq_destroy_request(struct esas2r_request *rq, + struct esas2r_adapter *a) + +{ + esas2r_rq_free_sg_lists(rq, a); + a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL; + rq->data_buf = NULL; +} + +static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a) +{ + + return test_bit(AF_BUSRST_NEEDED, &a->flags) || + test_bit(AF_BUSRST_DETECTED, &a->flags) || + test_bit(AF_CHPRST_NEEDED, &a->flags) || + test_bit(AF_CHPRST_DETECTED, &a->flags) || + test_bit(AF_PORT_CHANGE, &a->flags); + +} + +/* + * Build the scatter/gather list for an I/O request according to the + * specifications placed in the esas2r_sg_context. The caller must initialize + * struct esas2r_sg_context prior to the initial call by calling + * esas2r_sgc_init() + */ +static inline bool esas2r_build_sg_list(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0)) + return true; + + return (*a->build_sgl)(a, sgc); +} + +static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a) +{ + if (atomic_inc_return(&a->dis_ints_cnt) == 1) + esas2r_write_register_dword(a, MU_INT_MASK_OUT, + ESAS2R_INT_DIS_MASK); +} + +static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a) +{ + if (atomic_dec_return(&a->dis_ints_cnt) == 0) + esas2r_write_register_dword(a, MU_INT_MASK_OUT, + ESAS2R_INT_ENB_MASK); +} + +/* Schedule a TASKLET to perform non-interrupt tasks that may require delays + * or long completion times. + */ +static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a) +{ + /* make sure we don't schedule twice */ + if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags)) + tasklet_hi_schedule(&a->tasklet); +} + +static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a) +{ + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + (a->nvram->options2 & SASNVR2_HEARTBEAT)) + set_bit(AF_HEARTBEAT_ENB, &a->flags); + else + clear_bit(AF_HEARTBEAT_ENB, &a->flags); +} + +static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a) +{ + clear_bit(AF_HEARTBEAT_ENB, &a->flags); + clear_bit(AF_HEARTBEAT, &a->flags); +} + +/* Set the initial state for resetting the adapter on the next pass through + * esas2r_do_deferred. + */ +static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a) +{ + esas2r_disable_heartbeat(a); + + set_bit(AF_CHPRST_NEEDED, &a->flags); + set_bit(AF_CHPRST_PENDING, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); +} + +/* See if an interrupt is pending on the adapter. */ +static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a) +{ + u32 intstat; + + if (a->int_mask == 0) + return false; + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if ((intstat & a->int_mask) == 0) + return false; + + esas2r_disable_chip_interrupts(a); + + a->int_stat = intstat; + a->int_mask = 0; + + return true; +} + +static inline u16 esas2r_targ_get_id(struct esas2r_target *t, + struct esas2r_adapter *a) +{ + return (u16)(uintptr_t)(t - a->targetdb); +} + +/* Build and start an asynchronous event request */ +static inline void esas2r_start_ae_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + unsigned long flags; + + esas2r_build_ae_req(a, rq); + + spin_lock_irqsave(&a->queue_lock, flags); + esas2r_start_vda_request(a, rq); + spin_unlock_irqrestore(&a->queue_lock, flags); +} + +static inline void esas2r_comp_list_drain(struct esas2r_adapter *a, + struct list_head *comp_list) +{ + struct esas2r_request *rq; + struct list_head *element, *next; + + list_for_each_safe(element, next, comp_list) { + rq = list_entry(element, struct esas2r_request, comp_list); + list_del_init(element); + esas2r_complete_request(a, rq); + } +} + +/* sysfs handlers */ +extern struct bin_attribute bin_attr_fw; +extern struct bin_attribute bin_attr_fs; +extern struct bin_attribute bin_attr_vda; +extern struct bin_attribute bin_attr_hw; +extern struct bin_attribute bin_attr_live_nvram; +extern struct bin_attribute bin_attr_default_nvram; + +#endif /* ESAS2R_H */ diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c new file mode 100644 index 000000000..ba42536d1 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_disc.c @@ -0,0 +1,1185 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_disc.c + * esas2r device discovery routines + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +/* Miscellaneous internal discovery routines */ +static void esas2r_disc_abort(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_continue(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a); +static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr); +static bool esas2r_disc_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); + +/* Internal discovery routines that process the states */ +static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_dev_add(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_part_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); + +void esas2r_disc_initialize(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *nvr = a->nvram; + + esas2r_trace_enter(); + + clear_bit(AF_DISC_IN_PROG, &a->flags); + clear_bit(AF2_DEV_SCAN, &a->flags2); + clear_bit(AF2_DEV_CNT_OK, &a->flags2); + + a->disc_start_time = jiffies_to_msecs(jiffies); + a->disc_wait_time = nvr->dev_wait_time * 1000; + a->disc_wait_cnt = nvr->dev_wait_count; + + if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS) + a->disc_wait_cnt = ESAS2R_MAX_TARGETS; + + /* + * If we are doing chip reset or power management processing, always + * wait for devices. use the NVRAM device count if it is greater than + * previously discovered devices. + */ + + esas2r_hdebug("starting discovery..."); + + a->general_req.interrupt_cx = NULL; + + if (test_bit(AF_CHPRST_DETECTED, &a->flags) || + test_bit(AF_POWER_MGT, &a->flags)) { + if (a->prev_dev_cnt == 0) { + /* Don't bother waiting if there is nothing to wait + * for. + */ + a->disc_wait_time = 0; + } else { + /* + * Set the device wait count to what was previously + * found. We don't care if the user only configured + * a time because we know the exact count to wait for. + * There is no need to honor the user's wishes to + * always wait the full time. + */ + a->disc_wait_cnt = a->prev_dev_cnt; + + /* + * bump the minimum wait time to 15 seconds since the + * default is 3 (system boot or the boot driver usually + * buys us more time). + */ + if (a->disc_wait_time < 15000) + a->disc_wait_time = 15000; + } + } + + esas2r_trace("disc wait count: %d", a->disc_wait_cnt); + esas2r_trace("disc wait time: %d", a->disc_wait_time); + + if (a->disc_wait_time == 0) + esas2r_disc_check_complete(a); + + esas2r_trace_exit(); +} + +void esas2r_disc_start_waiting(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->mem_lock, flags); + + if (a->disc_ctx.disc_evt) + esas2r_disc_start_port(a); + + spin_unlock_irqrestore(&a->mem_lock, flags); +} + +void esas2r_disc_check_for_work(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + + /* service any pending interrupts first */ + + esas2r_polled_interrupt(a); + + /* + * now, interrupt processing may have queued up a discovery event. go + * see if we have one to start. we couldn't start it in the ISR since + * polled discovery would cause a deadlock. + */ + + esas2r_disc_start_waiting(a); + + if (rq->interrupt_cx == NULL) + return; + + if (rq->req_stat == RS_STARTED + && rq->timeout <= RQ_MAX_TIMEOUT) { + /* wait for the current discovery request to complete. */ + esas2r_wait_request(a, rq); + + if (rq->req_stat == RS_TIMEOUT) { + esas2r_disc_abort(a, rq); + esas2r_local_reset_adapter(a); + return; + } + } + + if (rq->req_stat == RS_PENDING + || rq->req_stat == RS_STARTED) + return; + + esas2r_disc_continue(a, rq); +} + +void esas2r_disc_check_complete(struct esas2r_adapter *a) +{ + unsigned long flags; + + esas2r_trace_enter(); + + /* check to see if we should be waiting for devices */ + if (a->disc_wait_time) { + u32 currtime = jiffies_to_msecs(jiffies); + u32 time = currtime - a->disc_start_time; + + /* + * Wait until the device wait time is exhausted or the device + * wait count is satisfied. + */ + if (time < a->disc_wait_time + && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt + || a->disc_wait_cnt == 0)) { + /* After three seconds of waiting, schedule a scan. */ + if (time >= 3000 + && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + } + + esas2r_trace_exit(); + return; + } + + /* + * We are done waiting...we think. Adjust the wait time to + * consume events after the count is met. + */ + if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2)) + a->disc_wait_time = time + 3000; + + /* If we haven't done a full scan yet, do it now. */ + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + esas2r_trace_exit(); + return; + } + + /* + * Now, if there is still time left to consume events, continue + * waiting. + */ + if (time < a->disc_wait_time) { + esas2r_trace_exit(); + return; + } + } else { + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + } + } + + /* We want to stop waiting for devices. */ + a->disc_wait_time = 0; + + if (test_bit(AF_DISC_POLLED, &a->flags) && + test_bit(AF_DISC_IN_PROG, &a->flags)) { + /* + * Polled discovery is still pending so continue the active + * discovery until it is done. At that point, we will stop + * polled discovery and transition to interrupt driven + * discovery. + */ + } else { + /* + * Done waiting for devices. Note that we get here immediately + * after deferred waiting completes because that is interrupt + * driven; i.e. There is no transition. + */ + esas2r_disc_fix_curr_requests(a); + clear_bit(AF_DISC_PENDING, &a->flags); + + /* + * We have deferred target state changes until now because we + * don't want to report any removals (due to the first arrival) + * until the device wait time expires. + */ + set_bit(AF_PORT_CHANGE, &a->flags); + } + + esas2r_trace_exit(); +} + +void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt) +{ + struct esas2r_disc_context *dc = &a->disc_ctx; + + esas2r_trace_enter(); + + esas2r_trace("disc_event: %d", disc_evt); + + /* Initialize the discovery context */ + dc->disc_evt |= disc_evt; + + /* + * Don't start discovery before or during polled discovery. if we did, + * we would have a deadlock if we are in the ISR already. + */ + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_POLLED, &a->flags)) + esas2r_disc_start_port(a); + + esas2r_trace_exit(); +} + +bool esas2r_disc_start_port(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + struct esas2r_disc_context *dc = &a->disc_ctx; + bool ret; + + esas2r_trace_enter(); + + if (test_bit(AF_DISC_IN_PROG, &a->flags)) { + esas2r_trace_exit(); + + return false; + } + + /* If there is a discovery waiting, process it. */ + if (dc->disc_evt) { + if (test_bit(AF_DISC_POLLED, &a->flags) + && a->disc_wait_time == 0) { + /* + * We are doing polled discovery, but we no longer want + * to wait for devices. Stop polled discovery and + * transition to interrupt driven discovery. + */ + + esas2r_trace_exit(); + + return false; + } + } else { + /* Discovery is complete. */ + + esas2r_hdebug("disc done"); + + set_bit(AF_PORT_CHANGE, &a->flags); + + esas2r_trace_exit(); + + return false; + } + + /* Handle the discovery context */ + esas2r_trace("disc_evt: %d", dc->disc_evt); + set_bit(AF_DISC_IN_PROG, &a->flags); + dc->flags = 0; + + if (test_bit(AF_DISC_POLLED, &a->flags)) + dc->flags |= DCF_POLLED; + + rq->interrupt_cx = dc; + rq->req_stat = RS_SUCCESS; + + /* Decode the event code */ + if (dc->disc_evt & DCDE_DEV_SCAN) { + dc->disc_evt &= ~DCDE_DEV_SCAN; + + dc->flags |= DCF_DEV_SCAN; + dc->state = DCS_BLOCK_DEV_SCAN; + } else if (dc->disc_evt & DCDE_DEV_CHANGE) { + dc->disc_evt &= ~DCDE_DEV_CHANGE; + + dc->flags |= DCF_DEV_CHANGE; + dc->state = DCS_DEV_RMV; + } + + /* Continue interrupt driven discovery */ + if (!test_bit(AF_DISC_POLLED, &a->flags)) + ret = esas2r_disc_continue(a, rq); + else + ret = true; + + esas2r_trace_exit(); + + return ret; +} + +static bool esas2r_disc_continue(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + + /* Device discovery/removal */ + while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) { + rslt = false; + + switch (dc->state) { + case DCS_DEV_RMV: + + rslt = esas2r_disc_dev_remove(a, rq); + break; + + case DCS_DEV_ADD: + + rslt = esas2r_disc_dev_add(a, rq); + break; + + case DCS_BLOCK_DEV_SCAN: + + rslt = esas2r_disc_block_dev_scan(a, rq); + break; + + case DCS_RAID_GRP_INFO: + + rslt = esas2r_disc_raid_grp_info(a, rq); + break; + + case DCS_PART_INFO: + + rslt = esas2r_disc_part_info(a, rq); + break; + + case DCS_PT_DEV_INFO: + + rslt = esas2r_disc_passthru_dev_info(a, rq); + break; + case DCS_PT_DEV_ADDR: + + rslt = esas2r_disc_passthru_dev_addr(a, rq); + break; + case DCS_DISC_DONE: + + dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN); + break; + + default: + + esas2r_bugon(); + dc->state = DCS_DISC_DONE; + break; + } + + if (rslt) + return true; + } + + /* Discovery is done...for now. */ + rq->interrupt_cx = NULL; + + if (!test_bit(AF_DISC_PENDING, &a->flags)) + esas2r_disc_fix_curr_requests(a); + + clear_bit(AF_DISC_IN_PROG, &a->flags); + + /* Start the next discovery. */ + return esas2r_disc_start_port(a); +} + +static bool esas2r_disc_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + unsigned long flags; + + /* Set the timeout to a minimum value. */ + if (rq->timeout < ESAS2R_DEFAULT_TMO) + rq->timeout = ESAS2R_DEFAULT_TMO; + + /* + * Override the request type to distinguish discovery requests. If we + * end up deferring the request, esas2r_disc_local_start_request() + * will be called to restart it. + */ + rq->req_type = RT_DISC_REQ; + + spin_lock_irqsave(&a->queue_lock, flags); + + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_FLASHING, &a->flags)) + esas2r_disc_local_start_request(a, rq); + else + list_add_tail(&rq->req_list, &a->defer_list); + + spin_unlock_irqrestore(&a->queue_lock, flags); + + return true; +} + +void esas2r_disc_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_trace_enter(); + + list_add_tail(&rq->req_list, &a->active_list); + + esas2r_start_vda_request(a, rq); + + esas2r_trace_exit(); + + return; +} + +static void esas2r_disc_abort(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + + esas2r_trace_enter(); + + /* abort the current discovery */ + + dc->state = DCS_DISC_DONE; + + esas2r_trace_exit(); +} + +static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + + esas2r_trace_enter(); + + esas2r_rq_init_request(rq, a); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_DEV_SCAN, + 0, + 0, + 0, + NULL); + + rq->comp_cb = esas2r_disc_block_dev_scan_cb; + + rq->timeout = 30000; + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SUCCESS) + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix = 0; + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vda_grp_info *grpinfo; + + esas2r_trace_enter(); + + esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix); + + if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) { + dc->state = DCS_DISC_DONE; + + esas2r_trace_exit(); + + return false; + } + + esas2r_rq_init_request(rq, a); + + grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; + + memset(grpinfo, 0, sizeof(struct atto_vda_grp_info)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_GRP_INFO, + dc->scan_gen, + 0, + sizeof(struct atto_vda_grp_info), + NULL); + + grpinfo->grp_index = dc->raid_grp_ix; + + rq->comp_cb = esas2r_disc_raid_grp_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vda_grp_info *grpinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->raid_grp_ix = 0; + goto done; + } + + if (rq->req_stat == RS_SUCCESS) { + grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; + + if (grpinfo->status != VDA_GRP_STAT_ONLINE + && grpinfo->status != VDA_GRP_STAT_DEGRADED) { + /* go to the next group. */ + + dc->raid_grp_ix++; + } else { + memcpy(&dc->raid_grp_name[0], + &grpinfo->grp_name[0], + sizeof(grpinfo->grp_name)); + + dc->interleave = le32_to_cpu(grpinfo->interleave); + dc->block_size = le32_to_cpu(grpinfo->block_size); + + dc->state = DCS_PART_INFO; + dc->part_num = 0; + } + } else { + if (!(rq->req_stat == RS_GRP_INVALID)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for RAID group info failed - " + "returned with %x", + rq->req_stat); + } + + dc->dev_ix = 0; + dc->state = DCS_PT_DEV_INFO; + } + +done: + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_part_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vdapart_info *partinfo; + + esas2r_trace_enter(); + + esas2r_trace("part_num: %d", dc->part_num); + + if (dc->part_num >= VDA_MAX_PARTITIONS) { + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix++; + + esas2r_trace_exit(); + + return false; + } + + esas2r_rq_init_request(rq, a); + + partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; + + memset(partinfo, 0, sizeof(struct atto_vdapart_info)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_PART_INFO, + dc->scan_gen, + 0, + sizeof(struct atto_vdapart_info), + NULL); + + partinfo->part_no = dc->part_num; + + memcpy(&partinfo->grp_name[0], + &dc->raid_grp_name[0], + sizeof(partinfo->grp_name)); + + rq->comp_cb = esas2r_disc_part_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vdapart_info *partinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->raid_grp_ix = 0; + dc->state = DCS_RAID_GRP_INFO; + } else if (rq->req_stat == RS_SUCCESS) { + partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; + + dc->part_num = partinfo->part_no; + + dc->curr_virt_id = le16_to_cpu(partinfo->target_id); + + esas2r_targ_db_add_raid(a, dc); + + dc->part_num++; + } else { + if (!(rq->req_stat == RS_PART_LAST)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for RAID group partition info " + "failed - status:%d", rq->req_stat); + } + + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix++; + } + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vda_devinfo *devinfo; + + esas2r_trace_enter(); + + esas2r_trace("dev_ix: %d", dc->dev_ix); + + esas2r_rq_init_request(rq, a); + + devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; + + memset(devinfo, 0, sizeof(struct atto_vda_devinfo)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_DEV_PT_INFO, + dc->scan_gen, + dc->dev_ix, + sizeof(struct atto_vda_devinfo), + NULL); + + rq->comp_cb = esas2r_disc_passthru_dev_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vda_devinfo *devinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->dev_ix = 0; + dc->state = DCS_PT_DEV_INFO; + } else if (rq->req_stat == RS_SUCCESS) { + devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; + + dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index); + + dc->curr_virt_id = le16_to_cpu(devinfo->target_id); + + if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) { + dc->curr_phys_id = + le16_to_cpu(devinfo->phys_target_id); + dc->dev_addr_type = ATTO_GDA_AT_PORT; + dc->state = DCS_PT_DEV_ADDR; + + esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); + esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); + } else { + dc->dev_ix++; + } + } else { + if (!(rq->req_stat == RS_DEV_INVALID)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for device information failed - " + "status:%d", rq->req_stat); + } + + dc->state = DCS_DISC_DONE; + } + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_ioctl *hi; + struct esas2r_sg_context sgc; + + esas2r_trace_enter(); + + esas2r_rq_init_request(rq, a); + + /* format the request. */ + + sgc.cur_offset = NULL; + sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr; + sgc.length = offsetof(struct atto_ioctl, data) + + sizeof(struct atto_hba_get_device_address); + + esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge); + + esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA); + + if (!esas2r_build_sg_list(a, rq, &sgc)) { + esas2r_rq_destroy_request(rq, a); + + esas2r_trace_exit(); + + return false; + } + + rq->comp_cb = esas2r_disc_passthru_dev_addr_cb; + + rq->interrupt_cx = dc; + + /* format the IOCTL data. */ + + hi = (struct atto_ioctl *)a->disc_buffer; + + memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN); + + hi->version = ATTO_VER_GET_DEV_ADDR0; + hi->function = ATTO_FUNC_GET_DEV_ADDR; + hi->flags = HBAF_TUNNEL; + + hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id); + hi->data.get_dev_addr.addr_type = dc->dev_addr_type; + + /* start it up. */ + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t = NULL; + unsigned long flags; + struct atto_ioctl *hi; + u16 addrlen; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + hi = (struct atto_ioctl *)a->disc_buffer; + + if (rq->req_stat == RS_SUCCESS + && hi->status == ATTO_STS_SUCCESS) { + addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len); + + if (dc->dev_addr_type == ATTO_GDA_AT_PORT) { + if (addrlen == sizeof(u64)) + memcpy(&dc->sas_addr, + &hi->data.get_dev_addr.address[0], + addrlen); + else + memset(&dc->sas_addr, 0, sizeof(dc->sas_addr)); + + /* Get the unique identifier. */ + dc->dev_addr_type = ATTO_GDA_AT_UNIQUE; + + goto next_dev_addr; + } else { + /* Add the pass through target. */ + if (HIBYTE(addrlen) == 0) { + t = esas2r_targ_db_add_pthru(a, + dc, + &hi->data. + get_dev_addr. + address[0], + (u8)hi->data. + get_dev_addr. + addr_len); + + if (t) + memcpy(&t->sas_addr, &dc->sas_addr, + sizeof(t->sas_addr)); + } else { + /* getting the back end data failed */ + + esas2r_log(ESAS2R_LOG_WARN, + "an error occurred retrieving the " + "back end data (%s:%d)", + __func__, + __LINE__); + } + } + } else { + /* getting the back end data failed */ + + esas2r_log(ESAS2R_LOG_WARN, + "an error occurred retrieving the back end data - " + "rq->req_stat:%d hi->status:%d", + rq->req_stat, hi->status); + } + + /* proceed to the next device. */ + + if (dc->flags & DCF_DEV_SCAN) { + dc->dev_ix++; + dc->state = DCS_PT_DEV_INFO; + } else if (dc->flags & DCF_DEV_CHANGE) { + dc->curr_targ++; + dc->state = DCS_DEV_ADD; + } else { + esas2r_bugon(); + } + +next_dev_addr: + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = sgc->adapter; + + if (sgc->length > ESAS2R_DISC_BUF_LEN) { + esas2r_bugon(); + } + + *addr = a->uncached_phys + + (u64)((u8 *)a->disc_buffer - a->uncached); + + return sgc->length; +} + +static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t; + struct esas2r_target *t2; + + esas2r_trace_enter(); + + /* process removals. */ + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->new_target_state != TS_NOT_PRESENT) + continue; + + t->new_target_state = TS_INVALID; + + /* remove the right target! */ + + t2 = + esas2r_targ_db_find_by_virt_id(a, + esas2r_targ_get_id(t, + a)); + + if (t2) + esas2r_targ_db_remove(a, t2); + } + + /* removals complete. process arrivals. */ + + dc->state = DCS_DEV_ADD; + dc->curr_targ = a->targetdb; + + esas2r_trace_exit(); + + return false; +} + +static bool esas2r_disc_dev_add(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t = dc->curr_targ; + + if (t >= a->targetdb_end) { + /* done processing state changes. */ + + dc->state = DCS_DISC_DONE; + } else if (t->new_target_state == TS_PRESENT) { + struct atto_vda_ae_lu *luevt = &t->lu_event; + + esas2r_trace_enter(); + + /* clear this now in case more events come in. */ + + t->new_target_state = TS_INVALID; + + /* setup the discovery context for adding this device. */ + + dc->curr_virt_id = esas2r_targ_get_id(t, a); + + if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) + && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) { + dc->block_size = luevt->id.tgtlun_raid.dwblock_size; + dc->interleave = luevt->id.tgtlun_raid.dwinterleave; + } else { + dc->block_size = 0; + dc->interleave = 0; + } + + /* determine the device type being added. */ + + if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) { + if (luevt->dwevent & VDAAE_LU_PHYS_ID) { + dc->state = DCS_PT_DEV_ADDR; + dc->dev_addr_type = ATTO_GDA_AT_PORT; + dc->curr_phys_id = luevt->wphys_target_id; + } else { + esas2r_log(ESAS2R_LOG_WARN, + "luevt->dwevent does not have the " + "VDAAE_LU_PHYS_ID bit set (%s:%d)", + __func__, __LINE__); + } + } else { + dc->raid_grp_name[0] = 0; + + esas2r_targ_db_add_raid(a, dc); + } + + esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); + esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); + esas2r_trace("dwevent: %d", luevt->dwevent); + + esas2r_trace_exit(); + } + + if (dc->state == DCS_DEV_ADD) { + /* go to the next device. */ + + dc->curr_targ++; + } + + return false; +} + +/* + * When discovery is done, find all requests on defer queue and + * test if they need to be modified. If a target is no longer present + * then complete the request with RS_SEL. Otherwise, update the + * target_id since after a hibernate it can be a different value. + * VDA does not make passthrough target IDs persistent. + */ +static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a) +{ + unsigned long flags; + struct esas2r_target *t; + struct esas2r_request *rq; + struct list_head *element; + + /* update virt_targ_id in any outstanding esas2r_requests */ + + spin_lock_irqsave(&a->queue_lock, flags); + + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + t = a->targetdb + rq->target_id; + + if (t->target_state == TS_PRESENT) + rq->vrq->scsi.target_id = le16_to_cpu( + t->virt_targ_id); + else + rq->req_stat = RS_SEL; + } + + } + + spin_unlock_irqrestore(&a->queue_lock, flags); +} diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c new file mode 100644 index 000000000..f910e2553 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -0,0 +1,1522 @@ + +/* + * linux/drivers/scsi/esas2r/esas2r_flash.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* local macro defs */ +#define esas2r_nvramcalc_cksum(n) \ + (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \ + SASNVR_CKSUM_SEED)) +#define esas2r_nvramcalc_xor_cksum(n) \ + (esas2r_calc_byte_xor_cksum((u8 *)(n), \ + sizeof(struct esas2r_sas_nvram), 0)) + +#define ESAS2R_FS_DRVR_VER 2 + +static struct esas2r_sas_nvram default_sas_nvram = { + { 'E', 'S', 'A', 'S' }, /* signature */ + SASNVR_VERSION, /* version */ + 0, /* checksum */ + 31, /* max_lun_for_target */ + SASNVR_PCILAT_MAX, /* pci_latency */ + SASNVR1_BOOT_DRVR, /* options1 */ + SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */ + | SASNVR2_SW_MUX_CTRL, + SASNVR_COAL_DIS, /* int_coalescing */ + SASNVR_CMDTHR_NONE, /* cmd_throttle */ + 3, /* dev_wait_time */ + 1, /* dev_wait_count */ + 0, /* spin_up_delay */ + 0, /* ssp_align_rate */ + { 0x50, 0x01, 0x08, 0x60, /* sas_addr */ + 0x00, 0x00, 0x00, 0x00 }, + { SASNVR_SPEED_AUTO }, /* phy_speed */ + { SASNVR_MUX_DISABLED }, /* SAS multiplexing */ + { 0 }, /* phy_flags */ + SASNVR_SORT_SAS_ADDR, /* sort_type */ + 3, /* dpm_reqcmd_lmt */ + 3, /* dpm_stndby_time */ + 0, /* dpm_active_time */ + { 0 }, /* phy_target_id */ + SASNVR_VSMH_DISABLED, /* virt_ses_mode */ + SASNVR_RWM_DEFAULT, /* read_write_mode */ + 0, /* link down timeout */ + { 0 } /* reserved */ +}; + +static u8 cmd_to_fls_func[] = { + 0xFF, + VDA_FLASH_READ, + VDA_FLASH_BEGINW, + VDA_FLASH_WRITE, + VDA_FLASH_COMMIT, + VDA_FLASH_CANCEL +}; + +static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed) +{ + u32 cksum = seed; + u8 *p = (u8 *)&cksum; + + while (len) { + if (((uintptr_t)addr & 3) == 0) + break; + + cksum = cksum ^ *addr; + addr++; + len--; + } + while (len >= sizeof(u32)) { + cksum = cksum ^ *(u32 *)addr; + addr += 4; + len -= 4; + } + while (len--) { + cksum = cksum ^ *addr; + addr++; + } + return p[0] ^ p[1] ^ p[2] ^ p[3]; +} + +static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed) +{ + u8 *p = (u8 *)addr; + u8 cksum = seed; + + while (len--) + cksum = cksum + p[len]; + return cksum; +} + +/* Interrupt callback to process FM API write requests. */ +static void esas2r_fmapi_callback(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + + if (rq->req_stat == RS_SUCCESS) { + /* Last request was successful. See what to do now. */ + switch (vrq->sub_func) { + case VDA_FLASH_BEGINW: + if (fc->sgc.cur_offset == NULL) + goto commit; + + vrq->sub_func = VDA_FLASH_WRITE; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_WRITE: +commit: + vrq->sub_func = VDA_FLASH_COMMIT; + rq->req_stat = RS_PENDING; + rq->interrupt_cb = fc->interrupt_cb; + break; + + default: + break; + } + } + + if (rq->req_stat != RS_PENDING) + /* + * All done. call the real callback to complete the FM API + * request. We should only get here if a BEGINW or WRITE + * operation failed. + */ + (*fc->interrupt_cb)(a, rq); +} + +/* + * Build a flash request based on the flash context. The request status + * is filled in on an error. + */ +static void build_flash_msg(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_sg_context *sgc = &fc->sgc; + u8 cksum = 0; + + /* calculate the checksum */ + if (fc->func == VDA_FLASH_BEGINW) { + if (sgc->cur_offset) + cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset, + sgc->length, + 0); + rq->interrupt_cb = esas2r_fmapi_callback; + } else { + rq->interrupt_cb = fc->interrupt_cb; + } + esas2r_build_flash_req(a, + rq, + fc->func, + cksum, + fc->flsh_addr, + sgc->length); + + esas2r_rq_free_sg_lists(rq, a); + + /* + * remember the length we asked for. we have to keep track of + * the current amount done so we know how much to compare when + * doing the verification phase. + */ + fc->curr_len = fc->sgc.length; + + if (sgc->cur_offset) { + /* setup the S/G context to build the S/G table */ + esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + rq->req_stat = RS_BUSY; + return; + } + } else { + fc->sgc.length = 0; + } + + /* update the flsh_addr to the next one to write to */ + fc->flsh_addr += fc->curr_len; +} + +/* determine the method to process the flash request */ +static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + /* + * assume we have more to do. if we return with the status set to + * RS_PENDING, FM API tasks will continue. + */ + rq->req_stat = RS_PENDING; + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + /* not supported for now */; + else + build_flash_msg(a, rq); + + return rq->req_stat == RS_PENDING; +} + +/* boot image fixer uppers called before downloading the image. */ +static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS]; + struct esas2r_pc_image *pi; + struct esas2r_boot_header *bh; + + pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset); + bh = + (struct esas2r_boot_header *)((u8 *)pi + + le16_to_cpu(pi->header_offset)); + bh->device_id = cpu_to_le16(a->pcid->device); + + /* Recalculate the checksum in the PNP header if there */ + if (pi->pnp_offset) { + u8 *pnp_header_bytes = + ((u8 *)pi + le16_to_cpu(pi->pnp_offset)); + + /* Identifier - dword that starts at byte 10 */ + *((u32 *)&pnp_header_bytes[10]) = + cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor, + a->pcid->subsystem_device)); + + /* Checksum - byte 9 */ + pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes, + 32, 0); + } + + /* Recalculate the checksum needed by the PC */ + pi->checksum = pi->checksum - + esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0); +} + +static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI]; + u32 len = ch->length; + u32 offset = ch->image_offset; + struct esas2r_efi_image *ei; + struct esas2r_boot_header *bh; + + while (len) { + u32 thislen; + + ei = (struct esas2r_efi_image *)((u8 *)fi + offset); + bh = (struct esas2r_boot_header *)((u8 *)ei + + le16_to_cpu( + ei->header_offset)); + bh->device_id = cpu_to_le16(a->pcid->device); + thislen = (u32)le16_to_cpu(bh->image_length) * 512; + + if (thislen > len) + break; + + len -= thislen; + offset += thislen; + } +} + +/* Complete a FM API request with the specified status. */ +static bool complete_fmapi_req(struct esas2r_adapter *a, + struct esas2r_request *rq, u8 fi_stat) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_flash_img *fi = fc->fi; + + fi->status = fi_stat; + fi->driver_error = rq->req_stat; + rq->interrupt_cb = NULL; + rq->req_stat = RS_SUCCESS; + + if (fi_stat != FI_STAT_IMG_VER) + memset(fc->scratch, 0, FM_BUF_SZ); + + esas2r_enable_heartbeat(a); + clear_bit(AF_FLASH_LOCK, &a->flags); + return false; +} + +/* Process each phase of the flash download process. */ +static void fw_download_proc(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_flash_img *fi = fc->fi; + struct esas2r_component_header *ch; + u32 len; + u8 *p, *q; + + /* If the previous operation failed, just return. */ + if (rq->req_stat != RS_SUCCESS) + goto error; + + /* + * If an upload just completed and the compare length is non-zero, + * then we just read back part of the image we just wrote. verify the + * section and continue reading until the entire image is verified. + */ + if (fc->func == VDA_FLASH_READ + && fc->cmp_len) { + ch = &fi->cmp_hdr[fc->comp_typ]; + + p = fc->scratch; + q = (u8 *)fi /* start of the whole gob */ + + ch->image_offset /* start of the current image */ + + ch->length /* end of the current image */ + - fc->cmp_len; /* where we are now */ + + /* + * NOTE - curr_len is the exact count of bytes for the read + * even when the end is read and its not a full buffer + */ + for (len = fc->curr_len; len; len--) + if (*p++ != *q++) + goto error; + + fc->cmp_len -= fc->curr_len; /* # left to compare */ + + /* Update fc and determine the length for the next upload */ + if (fc->cmp_len > FM_BUF_SZ) + fc->sgc.length = FM_BUF_SZ; + else + fc->sgc.length = fc->cmp_len; + + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - (u8 *)fi); + } + + /* + * This code uses a 'while' statement since the next component may + * have a length = zero. This can happen since some components are + * not required. At the end of this 'while' we set up the length + * for the next request and therefore sgc.length can be = 0. + */ + while (fc->sgc.length == 0) { + ch = &fi->cmp_hdr[fc->comp_typ]; + + switch (fc->task) { + case FMTSK_ERASE_BOOT: + /* the BIOS image is written next */ + ch = &fi->cmp_hdr[CH_IT_BIOS]; + if (ch->length == 0) + goto no_bios; + + fc->task = FMTSK_WRTBIOS; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_BIOS; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTBIOS: + /* + * The BIOS image has been written - read it and + * verify it + */ + fc->task = FMTSK_READBIOS; + fc->func = VDA_FLASH_READ; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READBIOS: +no_bios: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The MAC image is written next */ + ch = &fi->cmp_hdr[CH_IT_MAC]; + if (ch->length == 0) + goto no_mac; + + fc->task = FMTSK_WRTMAC; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_MAC; + fc->flsh_addr = FLS_OFFSET_BOOT + + fi->cmp_hdr[CH_IT_BIOS].length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTMAC: + /* The MAC image has been written - read and verify */ + fc->task = FMTSK_READMAC; + fc->func = VDA_FLASH_READ; + fc->flsh_addr -= ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READMAC: +no_mac: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The EFI image is written next */ + ch = &fi->cmp_hdr[CH_IT_EFI]; + if (ch->length == 0) + goto no_efi; + + fc->task = FMTSK_WRTEFI; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_EFI; + fc->flsh_addr = FLS_OFFSET_BOOT + + fi->cmp_hdr[CH_IT_BIOS].length + + fi->cmp_hdr[CH_IT_MAC].length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTEFI: + /* The EFI image has been written - read and verify */ + fc->task = FMTSK_READEFI; + fc->func = VDA_FLASH_READ; + fc->flsh_addr -= ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READEFI: +no_efi: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The CFG image is written next */ + ch = &fi->cmp_hdr[CH_IT_CFG]; + + if (ch->length == 0) + goto no_cfg; + fc->task = FMTSK_WRTCFG; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_CFG; + fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTCFG: + /* The CFG image has been written - read and verify */ + fc->task = FMTSK_READCFG; + fc->func = VDA_FLASH_READ; + fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READCFG: +no_cfg: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* + * The download is complete. If in degraded mode, + * attempt a chip reset. + */ + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + esas2r_local_reset_adapter(a); + + a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; + esas2r_print_flash_rev(a); + + /* Update the type of boot image on the card */ + memcpy(a->image_type, fi->rel_version, + sizeof(fi->rel_version)); + complete_fmapi_req(a, rq, FI_STAT_SUCCESS); + return; + } + + /* If verifying, don't try reading more than what's there */ + if (fc->func == VDA_FLASH_READ + && fc->sgc.length > fc->cmp_len) + fc->sgc.length = fc->cmp_len; + } + + /* Build the request to perform the next action */ + if (!load_image(a, rq)) { +error: + if (fc->comp_typ < fi->num_comps) { + ch = &fi->cmp_hdr[fc->comp_typ]; + ch->status = CH_STAT_FAILED; + } + + complete_fmapi_req(a, rq, FI_STAT_FAILED); + } +} + +/* Determine the flash image adaptyp for this adapter */ +static u8 get_fi_adap_type(struct esas2r_adapter *a) +{ + u8 type; + + /* use the device ID to get the correct adap_typ for this HBA */ + switch (a->pcid->device) { + case ATTO_DID_INTEL_IOP348: + type = FI_AT_SUN_LAKE; + break; + + case ATTO_DID_MV_88RC9580: + case ATTO_DID_MV_88RC9580TS: + case ATTO_DID_MV_88RC9580TSE: + case ATTO_DID_MV_88RC9580TL: + type = FI_AT_MV_9580; + break; + + default: + type = FI_AT_UNKNWN; + break; + } + + return type; +} + +/* Size of config + copyright + flash_ver images, 0 for failure. */ +static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver) +{ + u16 *pw = (u16 *)cfg - 1; + u32 sz = 0; + u32 len = length; + + if (len == 0) + len = FM_BUF_SZ; + + if (flash_ver) + *flash_ver = 0; + + while (true) { + u16 type; + u16 size; + + type = le16_to_cpu(*pw--); + size = le16_to_cpu(*pw--); + + if (type != FBT_CPYR + && type != FBT_SETUP + && type != FBT_FLASH_VER) + break; + + if (type == FBT_FLASH_VER + && flash_ver) + *flash_ver = le32_to_cpu(*(u32 *)(pw - 1)); + + sz += size + (2 * sizeof(u16)); + pw -= size / sizeof(u16); + + if (sz > len - (2 * sizeof(u16))) + break; + } + + /* See if we are comparing the size to the specified length */ + if (length && sz != length) + return 0; + + return sz; +} + +/* Verify that the boot image is valid */ +static u8 chk_boot(u8 *boot_img, u32 length) +{ + struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img; + u16 hdroffset = le16_to_cpu(bi->header_offset); + struct esas2r_boot_header *bh; + + if (bi->signature != le16_to_cpu(0xaa55) + || (long)hdroffset > + (long)(65536L - sizeof(struct esas2r_boot_header)) + || (hdroffset & 3) + || (hdroffset < sizeof(struct esas2r_boot_image)) + || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length)) + return 0xff; + + bh = (struct esas2r_boot_header *)((char *)bi + hdroffset); + + if (bh->signature[0] != 'P' + || bh->signature[1] != 'C' + || bh->signature[2] != 'I' + || bh->signature[3] != 'R' + || le16_to_cpu(bh->struct_length) < + (u16)sizeof(struct esas2r_boot_header) + || bh->class_code[2] != 0x01 + || bh->class_code[1] != 0x04 + || bh->class_code[0] != 0x00 + || (bh->code_type != CODE_TYPE_PC + && bh->code_type != CODE_TYPE_OPEN + && bh->code_type != CODE_TYPE_EFI)) + return 0xff; + + return bh->code_type; +} + +/* The sum of all the WORDS of the image */ +static u16 calc_fi_checksum(struct esas2r_flash_context *fc) +{ + struct esas2r_flash_img *fi = fc->fi; + u16 cksum; + u32 len; + u16 *pw; + + for (len = (fi->length - fc->fi_hdr_len) / 2, + pw = (u16 *)((u8 *)fi + fc->fi_hdr_len), + cksum = 0; + len; + len--, pw++) + cksum = cksum + le16_to_cpu(*pw); + + return cksum; +} + +/* + * Verify the flash image structure. The following verifications will + * be performed: + * 1) verify the fi_version is correct + * 2) verify the checksum of the entire image. + * 3) validate the adap_typ, action and length fields. + * 4) validate each component header. check the img_type and + * length fields + * 5) validate each component image. validate signatures and + * local checksums + */ +static bool verify_fi(struct esas2r_adapter *a, + struct esas2r_flash_context *fc) +{ + struct esas2r_flash_img *fi = fc->fi; + u8 type; + bool imgerr; + u16 i; + u32 len; + struct esas2r_component_header *ch; + + /* Verify the length - length must even since we do a word checksum */ + len = fi->length; + + if ((len & 1) + || len < fc->fi_hdr_len) { + fi->status = FI_STAT_LENGTH; + return false; + } + + /* Get adapter type and verify type in flash image */ + type = get_fi_adap_type(a); + if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) { + fi->status = FI_STAT_ADAPTYP; + return false; + } + + /* + * Loop through each component and verify the img_type and length + * fields. Keep a running count of the sizes sooze we can verify total + * size to additive size. + */ + imgerr = false; + + for (i = 0, len = 0, ch = fi->cmp_hdr; + i < fi->num_comps; + i++, ch++) { + bool cmperr = false; + + /* + * Verify that the component header has the same index as the + * image type. The headers must be ordered correctly + */ + if (i != ch->img_type) { + imgerr = true; + ch->status = CH_STAT_INVALID; + continue; + } + + switch (ch->img_type) { + case CH_IT_BIOS: + type = CODE_TYPE_PC; + break; + + case CH_IT_MAC: + type = CODE_TYPE_OPEN; + break; + + case CH_IT_EFI: + type = CODE_TYPE_EFI; + break; + } + + switch (ch->img_type) { + case CH_IT_FW: + case CH_IT_NVR: + break; + + case CH_IT_BIOS: + case CH_IT_MAC: + case CH_IT_EFI: + if (ch->length & 0x1ff) + cmperr = true; + + /* Test if component image is present */ + if (ch->length == 0) + break; + + /* Image is present - verify the image */ + if (chk_boot((u8 *)fi + ch->image_offset, ch->length) + != type) + cmperr = true; + + break; + + case CH_IT_CFG: + + /* Test if component image is present */ + if (ch->length == 0) { + cmperr = true; + break; + } + + /* Image is present - verify the image */ + if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length, + ch->length, NULL)) + cmperr = true; + + break; + + default: + + fi->status = FI_STAT_UNKNOWN; + return false; + } + + if (cmperr) { + imgerr = true; + ch->status = CH_STAT_INVALID; + } else { + ch->status = CH_STAT_PENDING; + len += ch->length; + } + } + + if (imgerr) { + fi->status = FI_STAT_MISSING; + return false; + } + + /* Compare fi->length to the sum of ch->length fields */ + if (len != fi->length - fc->fi_hdr_len) { + fi->status = FI_STAT_LENGTH; + return false; + } + + /* Compute the checksum - it should come out zero */ + if (fi->checksum != calc_fi_checksum(fc)) { + fi->status = FI_STAT_CHKSUM; + return false; + } + + return true; +} + +/* Fill in the FS IOCTL response data from a completed request. */ +static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)rq->interrupt_cx; + + if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) + esas2r_enable_heartbeat(a); + + fs->driver_error = rq->req_stat; + + if (fs->driver_error == RS_SUCCESS) + fs->status = ATTO_STS_SUCCESS; + else + fs->status = ATTO_STS_FAILED; +} + +/* Prepare an FS IOCTL request to be sent to the firmware. */ +bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_ioctl_fs *fs, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func); + struct esas2r_ioctlfs_command *fsc = &fs->command; + u8 func = 0; + u32 datalen; + + fs->status = ATTO_STS_FAILED; + fs->driver_error = RS_PENDING; + + if (fs->version > ESAS2R_FS_VER) { + fs->status = ATTO_STS_INV_VERSION; + return false; + } + + if (fsc->command >= cmdcnt) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + func = cmd_to_fls_func[fsc->command]; + if (func == 0xFF) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + if (fsc->command != ESAS2R_FS_CMD_CANCEL) { + if ((a->pcid->device != ATTO_DID_MV_88RC9580 + || fs->adap_type != ESAS2R_FS_AT_ESASRAID2) + && (a->pcid->device != ATTO_DID_MV_88RC9580TS + || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2) + && (a->pcid->device != ATTO_DID_MV_88RC9580TSE + || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E) + && (a->pcid->device != ATTO_DID_MV_88RC9580TL + || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) { + fs->status = ATTO_STS_INV_ADAPTER; + return false; + } + + if (fs->driver_ver > ESAS2R_FS_DRVR_VER) { + fs->status = ATTO_STS_INV_DRVR_VER; + return false; + } + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { + fs->status = ATTO_STS_DEGRADED; + return false; + } + + rq->interrupt_cb = esas2r_complete_fs_ioctl; + rq->interrupt_cx = fs; + datalen = le32_to_cpu(fsc->length); + esas2r_build_flash_req(a, + rq, + func, + fsc->checksum, + le32_to_cpu(fsc->flash_addr), + datalen); + + if (func == VDA_FLASH_WRITE + || func == VDA_FLASH_READ) { + if (datalen == 0) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge); + sgc->length = datalen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + fs->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + + if (func == VDA_FLASH_COMMIT) + esas2r_disable_heartbeat(a); + + esas2r_start_request(a, rq); + + return true; +} + +static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function) +{ + u32 starttime; + u32 timeout; + u32 intstat; + u32 doorbell; + + /* Disable chip interrupts awhile */ + if (function == DRBL_FLASH_REQ) + esas2r_disable_chip_interrupts(a); + + /* Issue the request to the firmware */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, function); + + /* Now wait for the firmware to process it */ + starttime = jiffies_to_msecs(jiffies); + + if (test_bit(AF_CHPRST_PENDING, &a->flags) || + test_bit(AF_DISC_PENDING, &a->flags)) + timeout = 40000; + else + timeout = 5000; + + while (true) { + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (intstat & MU_INTSTAT_DRBL) { + /* Got a doorbell interrupt. Check for the function */ + doorbell = + esas2r_read_register_dword(a, MU_DOORBELL_OUT); + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + if (doorbell & function) + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { + /* + * Iimeout. If we were requesting flash access, + * indicate we are done so the firmware knows we gave + * up. If this was a REQ, we also need to re-enable + * chip interrupts. + */ + if (function == DRBL_FLASH_REQ) { + esas2r_hdebug("flash access timeout"); + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_FLASH_DONE); + esas2r_enable_chip_interrupts(a); + } else { + esas2r_hdebug("flash release timeout"); + } + + return false; + } + } + + /* if we're done, re-enable chip interrupts */ + if (function == DRBL_FLASH_DONE) + esas2r_enable_chip_interrupts(a); + + return true; +} + +#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE) + +bool esas2r_read_flash_block(struct esas2r_adapter *a, + void *to, + u32 from, + u32 size) +{ + u8 *end = (u8 *)to; + + /* Try to acquire access to the flash */ + if (!esas2r_flash_access(a, DRBL_FLASH_REQ)) + return false; + + while (size) { + u32 len; + u32 offset; + u32 iatvr; + + if (test_bit(AF2_SERIAL_FLASH, &a->flags2)) + iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); + else + iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); + + esas2r_map_data_window(a, iatvr); + offset = from & (WINDOW_SIZE - 1); + len = size; + + if (len > WINDOW_SIZE - offset) + len = WINDOW_SIZE - offset; + + from += len; + size -= len; + + while (len--) { + *end++ = esas2r_read_data_byte(a, offset); + offset++; + } + } + + /* Release flash access */ + esas2r_flash_access(a, DRBL_FLASH_DONE); + return true; +} + +bool esas2r_read_flash_rev(struct esas2r_adapter *a) +{ + u8 bytes[256]; + u16 *pw; + u16 *pwstart; + u16 type; + u16 size; + u32 sz; + + sz = sizeof(bytes); + pw = (u16 *)(bytes + sz); + pwstart = (u16 *)bytes + 2; + + if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz)) + goto invalid_rev; + + while (pw >= pwstart) { + pw--; + type = le16_to_cpu(*pw); + pw--; + size = le16_to_cpu(*pw); + pw -= size / 2; + + if (type == FBT_CPYR + || type == FBT_SETUP + || pw < pwstart) + continue; + + if (type == FBT_FLASH_VER) + a->flash_ver = le32_to_cpu(*(u32 *)pw); + + break; + } + +invalid_rev: + return esas2r_print_flash_rev(a); +} + +bool esas2r_print_flash_rev(struct esas2r_adapter *a) +{ + u16 year = LOWORD(a->flash_ver); + u8 day = LOBYTE(HIWORD(a->flash_ver)); + u8 month = HIBYTE(HIWORD(a->flash_ver)); + + if (day == 0 + || month == 0 + || day > 31 + || month > 12 + || year < 2006 + || year > 9999) { + strcpy(a->flash_rev, "not found"); + a->flash_ver = 0; + return false; + } + + sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year); + esas2r_hdebug("flash version: %s", a->flash_rev); + return true; +} + +/* + * Find the type of boot image type that is currently in the flash. + * The chip only has a 64 KB PCI-e expansion ROM + * size so only one image can be flashed at a time. + */ +bool esas2r_read_image_type(struct esas2r_adapter *a) +{ + u8 bytes[256]; + struct esas2r_boot_image *bi; + struct esas2r_boot_header *bh; + u32 sz; + u32 len; + u32 offset; + + /* Start at the base of the boot images and look for a valid image */ + sz = sizeof(bytes); + len = FLS_LENGTH_BOOT; + offset = 0; + + while (true) { + if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT + + offset, + sz)) + goto invalid_rev; + + bi = (struct esas2r_boot_image *)bytes; + bh = (struct esas2r_boot_header *)((u8 *)bi + + le16_to_cpu( + bi->header_offset)); + if (bi->signature != cpu_to_le16(0xAA55)) + goto invalid_rev; + + if (bh->code_type == CODE_TYPE_PC) { + strcpy(a->image_type, "BIOS"); + + return true; + } else if (bh->code_type == CODE_TYPE_EFI) { + struct esas2r_efi_image *ei; + + /* + * So we have an EFI image. There are several types + * so see which architecture we have. + */ + ei = (struct esas2r_efi_image *)bytes; + + switch (le16_to_cpu(ei->machine_type)) { + case EFI_MACHINE_IA32: + strcpy(a->image_type, "EFI 32-bit"); + return true; + + case EFI_MACHINE_IA64: + strcpy(a->image_type, "EFI itanium"); + return true; + + case EFI_MACHINE_X64: + strcpy(a->image_type, "EFI 64-bit"); + return true; + + case EFI_MACHINE_EBC: + strcpy(a->image_type, "EFI EBC"); + return true; + + default: + goto invalid_rev; + } + } else { + u32 thislen; + + /* jump to the next image */ + thislen = (u32)le16_to_cpu(bh->image_length) * 512; + if (thislen == 0 + || thislen + offset > len + || bh->indicator == INDICATOR_LAST) + break; + + offset += thislen; + } + } + +invalid_rev: + strcpy(a->image_type, "no boot images"); + return false; +} + +/* + * Read and validate current NVRAM parameters by accessing + * physical NVRAM directly. if currently stored parameters are + * invalid, use the defaults. + */ +bool esas2r_nvram_read_direct(struct esas2r_adapter *a) +{ + bool result; + + if (down_interruptible(&a->nvram_semaphore)) + return false; + + if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR, + sizeof(struct esas2r_sas_nvram))) { + esas2r_hdebug("NVRAM read failed, using defaults"); + up(&a->nvram_semaphore); + return false; + } + + result = esas2r_nvram_validate(a); + + up(&a->nvram_semaphore); + + return result; +} + +/* Interrupt callback to process NVRAM completions. */ +static void esas2r_nvram_callback(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + if (rq->req_stat == RS_SUCCESS) { + /* last request was successful. see what to do now. */ + + switch (vrq->sub_func) { + case VDA_FLASH_BEGINW: + vrq->sub_func = VDA_FLASH_WRITE; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_WRITE: + vrq->sub_func = VDA_FLASH_COMMIT; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_READ: + esas2r_nvram_validate(a); + break; + + case VDA_FLASH_COMMIT: + default: + break; + } + } + + if (rq->req_stat != RS_PENDING) { + /* update the NVRAM state */ + if (rq->req_stat == RS_SUCCESS) + set_bit(AF_NVR_VALID, &a->flags); + else + clear_bit(AF_NVR_VALID, &a->flags); + + esas2r_enable_heartbeat(a); + + up(&a->nvram_semaphore); + } +} + +/* + * Write the contents of nvram to the adapter's physical NVRAM. + * The cached copy of the NVRAM is also updated. + */ +bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *nvram) +{ + struct esas2r_sas_nvram *n = nvram; + u8 sas_address_bytes[8]; + u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0]; + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return false; + + if (down_interruptible(&a->nvram_semaphore)) + return false; + + if (n == NULL) + n = a->nvram; + + /* check the validity of the settings */ + if (n->version > SASNVR_VERSION) { + up(&a->nvram_semaphore); + return false; + } + + memcpy(&sas_address_bytes[0], n->sas_addr, 8); + + if (sas_address_bytes[0] != 0x50 + || sas_address_bytes[1] != 0x01 + || sas_address_bytes[2] != 0x08 + || (sas_address_bytes[3] & 0xF0) != 0x60 + || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) { + up(&a->nvram_semaphore); + return false; + } + + if (n->spin_up_delay > SASNVR_SPINUP_MAX) + n->spin_up_delay = SASNVR_SPINUP_MAX; + + n->version = SASNVR_VERSION; + n->checksum = n->checksum - esas2r_nvramcalc_cksum(n); + memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram)); + + /* write the NVRAM */ + n = a->nvram; + esas2r_disable_heartbeat(a); + + esas2r_build_flash_req(a, + rq, + VDA_FLASH_BEGINW, + esas2r_nvramcalc_xor_cksum(n), + FLS_OFFSET_NVR, + sizeof(struct esas2r_sas_nvram)); + + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { + + vrq->data.sge[0].length = + cpu_to_le32(SGE_LAST | + sizeof(struct esas2r_sas_nvram)); + vrq->data.sge[0].address = cpu_to_le64( + a->uncached_phys + (u64)((u8 *)n - a->uncached)); + } else { + vrq->data.prde[0].ctl_len = + cpu_to_le32(sizeof(struct esas2r_sas_nvram)); + vrq->data.prde[0].address = cpu_to_le64( + a->uncached_phys + + (u64)((u8 *)n - a->uncached)); + } + rq->interrupt_cb = esas2r_nvram_callback; + esas2r_start_request(a, rq); + return true; +} + +/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */ +bool esas2r_nvram_validate(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *n = a->nvram; + bool rslt = false; + + if (n->signature[0] != 'E' + || n->signature[1] != 'S' + || n->signature[2] != 'A' + || n->signature[3] != 'S') { + esas2r_hdebug("invalid NVRAM signature"); + } else if (esas2r_nvramcalc_cksum(n)) { + esas2r_hdebug("invalid NVRAM checksum"); + } else if (n->version > SASNVR_VERSION) { + esas2r_hdebug("invalid NVRAM version"); + } else { + set_bit(AF_NVR_VALID, &a->flags); + rslt = true; + } + + if (rslt == false) { + esas2r_hdebug("using defaults"); + esas2r_nvram_set_defaults(a); + } + + return rslt; +} + +/* + * Set the cached NVRAM to defaults. note that this function sets the default + * NVRAM when it has been determined that the physical NVRAM is invalid. + * In this case, the SAS address is fabricated. + */ +void esas2r_nvram_set_defaults(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *n = a->nvram; + u32 time = jiffies_to_msecs(jiffies); + + clear_bit(AF_NVR_VALID, &a->flags); + *n = default_sas_nvram; + n->sas_addr[3] |= 0x0F; + n->sas_addr[4] = HIBYTE(LOWORD(time)); + n->sas_addr[5] = LOBYTE(LOWORD(time)); + n->sas_addr[6] = a->pcid->bus->number; + n->sas_addr[7] = a->pcid->devfn; +} + +void esas2r_nvram_get_defaults(struct esas2r_adapter *a, + struct esas2r_sas_nvram *nvram) +{ + u8 sas_addr[8]; + + /* + * in case we are copying the defaults into the adapter, copy the SAS + * address out first. + */ + memcpy(&sas_addr[0], a->nvram->sas_addr, 8); + *nvram = default_sas_nvram; + memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); +} + +bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, + struct esas2r_request *rq, struct esas2r_sg_context *sgc) +{ + struct esas2r_flash_context *fc = &a->flash_context; + u8 j; + struct esas2r_component_header *ch; + + if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) { + /* flag was already set */ + fi->status = FI_STAT_BUSY; + return false; + } + + memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context)); + sgc = &fc->sgc; + fc->fi = fi; + fc->sgc_offset = sgc->cur_offset; + rq->req_stat = RS_SUCCESS; + rq->interrupt_cx = fc; + + switch (fi->fi_version) { + case FI_VERSION_1: + fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf; + fc->num_comps = FI_NUM_COMPS_V1; + fc->fi_hdr_len = sizeof(struct esas2r_flash_img); + break; + + default: + return complete_fmapi_req(a, rq, FI_STAT_IMG_VER); + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return complete_fmapi_req(a, rq, FI_STAT_DEGRADED); + + switch (fi->action) { + case FI_ACT_DOWN: /* Download the components */ + /* Verify the format of the flash image */ + if (!verify_fi(a, fc)) + return complete_fmapi_req(a, rq, fi->status); + + /* Adjust the BIOS fields that are dependent on the HBA */ + ch = &fi->cmp_hdr[CH_IT_BIOS]; + + if (ch->length) + fix_bios(a, fi); + + /* Adjust the EFI fields that are dependent on the HBA */ + ch = &fi->cmp_hdr[CH_IT_EFI]; + + if (ch->length) + fix_efi(a, fi); + + /* + * Since the image was just modified, compute the checksum on + * the modified image. First update the CRC for the composite + * expansion ROM image. + */ + fi->checksum = calc_fi_checksum(fc); + + /* Disable the heartbeat */ + esas2r_disable_heartbeat(a); + + /* Now start up the download sequence */ + fc->task = FMTSK_ERASE_BOOT; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_CFG; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->sgc.length = FLS_LENGTH_BOOT; + fc->sgc.cur_offset = NULL; + + /* Setup the callback address */ + fc->interrupt_cb = fw_download_proc; + break; + + case FI_ACT_UPSZ: /* Get upload sizes */ + fi->adap_typ = get_fi_adap_type(a); + fi->flags = 0; + fi->num_comps = fc->num_comps; + fi->length = fc->fi_hdr_len; + + /* Report the type of boot image in the rel_version string */ + memcpy(fi->rel_version, a->image_type, + sizeof(fi->rel_version)); + + /* Build the component headers */ + for (j = 0, ch = fi->cmp_hdr; + j < fi->num_comps; + j++, ch++) { + ch->img_type = j; + ch->status = CH_STAT_PENDING; + ch->length = 0; + ch->version = 0xffffffff; + ch->image_offset = 0; + ch->pad[0] = 0; + ch->pad[1] = 0; + } + + if (a->flash_ver != 0) { + fi->cmp_hdr[CH_IT_BIOS].version = + fi->cmp_hdr[CH_IT_MAC].version = + fi->cmp_hdr[CH_IT_EFI].version = + fi->cmp_hdr[CH_IT_CFG].version + = a->flash_ver; + + fi->cmp_hdr[CH_IT_BIOS].status = + fi->cmp_hdr[CH_IT_MAC].status = + fi->cmp_hdr[CH_IT_EFI].status = + fi->cmp_hdr[CH_IT_CFG].status = + CH_STAT_SUCCESS; + + return complete_fmapi_req(a, rq, FI_STAT_SUCCESS); + } + + fallthrough; + + case FI_ACT_UP: /* Upload the components */ + default: + return complete_fmapi_req(a, rq, FI_STAT_INVALID); + } + + /* + * If we make it here, fc has been setup to do the first task. Call + * load_image to format the request, start it, and get out. The + * interrupt code will call the callback when the first message is + * complete. + */ + if (!load_image(a, rq)) + return complete_fmapi_req(a, rq, FI_STAT_FAILED); + + esas2r_start_request(a, rq); + + return true; +} diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c new file mode 100644 index 000000000..c1a5ab662 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -0,0 +1,1699 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_init.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +static bool esas2r_initmem_alloc(struct esas2r_adapter *a, + struct esas2r_mem_desc *mem_desc, + u32 align) +{ + mem_desc->esas2r_param = mem_desc->size + align; + mem_desc->virt_addr = NULL; + mem_desc->phys_addr = 0; + mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, + (size_t)mem_desc-> + esas2r_param, + (dma_addr_t *)&mem_desc-> + phys_addr, + GFP_KERNEL); + + if (mem_desc->esas2r_data == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate %lu bytes of consistent memory!", + (long + unsigned + int)mem_desc->esas2r_param); + return false; + } + + mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); + mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); + memset(mem_desc->virt_addr, 0, mem_desc->size); + return true; +} + +static void esas2r_initmem_free(struct esas2r_adapter *a, + struct esas2r_mem_desc *mem_desc) +{ + if (mem_desc->virt_addr == NULL) + return; + + /* + * Careful! phys_addr and virt_addr may have been adjusted from the + * original allocation in order to return the desired alignment. That + * means we have to use the original address (in esas2r_data) and size + * (esas2r_param) and calculate the original physical address based on + * the difference between the requested and actual allocation size. + */ + if (mem_desc->phys_addr) { + int unalign = ((u8 *)mem_desc->virt_addr) - + ((u8 *)mem_desc->esas2r_data); + + dma_free_coherent(&a->pcid->dev, + (size_t)mem_desc->esas2r_param, + mem_desc->esas2r_data, + (dma_addr_t)(mem_desc->phys_addr - unalign)); + } else { + kfree(mem_desc->esas2r_data); + } + + mem_desc->virt_addr = NULL; +} + +static bool alloc_vda_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_mem_desc *memdesc = kzalloc( + sizeof(struct esas2r_mem_desc), GFP_KERNEL); + + if (memdesc == NULL) { + esas2r_hdebug("could not alloc mem for vda request memdesc\n"); + return false; + } + + memdesc->size = sizeof(union atto_vda_req) + + ESAS2R_DATA_BUF_LEN; + + if (!esas2r_initmem_alloc(a, memdesc, 256)) { + esas2r_hdebug("could not alloc mem for vda request\n"); + kfree(memdesc); + return false; + } + + a->num_vrqs++; + list_add(&memdesc->next_desc, &a->vrq_mds_head); + + rq->vrq_md = memdesc; + rq->vrq = (union atto_vda_req *)memdesc->virt_addr; + rq->vrq->scsi.handle = a->num_vrqs; + + return true; +} + +static void esas2r_unmap_regions(struct esas2r_adapter *a) +{ + if (a->regs) + iounmap((void __iomem *)a->regs); + + a->regs = NULL; + + pci_release_region(a->pcid, 2); + + if (a->data_window) + iounmap((void __iomem *)a->data_window); + + a->data_window = NULL; + + pci_release_region(a->pcid, 0); +} + +static int esas2r_map_regions(struct esas2r_adapter *a) +{ + int error; + + a->regs = NULL; + a->data_window = NULL; + + error = pci_request_region(a->pcid, 2, a->name); + if (error != 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "pci_request_region(2) failed, error %d", + error); + + return error; + } + + a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), + pci_resource_len(a->pcid, 2)); + if (a->regs == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "ioremap failed for regs mem region\n"); + pci_release_region(a->pcid, 2); + return -EFAULT; + } + + error = pci_request_region(a->pcid, 0, a->name); + if (error != 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "pci_request_region(2) failed, error %d", + error); + esas2r_unmap_regions(a); + return error; + } + + a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, + 0), + pci_resource_len(a->pcid, 0)); + if (a->data_window == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "ioremap failed for data_window mem region\n"); + esas2r_unmap_regions(a); + return -EFAULT; + } + + return 0; +} + +static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) +{ + int i; + + /* Set up interrupt mode based on the requested value */ + switch (intr_mode) { + case INTR_MODE_LEGACY: +use_legacy_interrupts: + a->intr_mode = INTR_MODE_LEGACY; + break; + + case INTR_MODE_MSI: + i = pci_enable_msi(a->pcid); + if (i != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "failed to enable MSI for adapter %d, " + "falling back to legacy interrupts " + "(err=%d)", a->index, + i); + goto use_legacy_interrupts; + } + a->intr_mode = INTR_MODE_MSI; + set_bit(AF2_MSI_ENABLED, &a->flags2); + break; + + + default: + esas2r_log(ESAS2R_LOG_WARN, + "unknown interrupt_mode %d requested, " + "falling back to legacy interrupt", + interrupt_mode); + goto use_legacy_interrupts; + } +} + +static void esas2r_claim_interrupts(struct esas2r_adapter *a) +{ + unsigned long flags = 0; + + if (a->intr_mode == INTR_MODE_LEGACY) + flags |= IRQF_SHARED; + + esas2r_log(ESAS2R_LOG_INFO, + "esas2r_claim_interrupts irq=%d (%p, %s, %lx)", + a->pcid->irq, a, a->name, flags); + + if (request_irq(a->pcid->irq, + (a->intr_mode == + INTR_MODE_LEGACY) ? esas2r_interrupt : + esas2r_msi_interrupt, + flags, + a->name, + a)) { + esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", + a->pcid->irq); + return; + } + + set_bit(AF2_IRQ_CLAIMED, &a->flags2); + esas2r_log(ESAS2R_LOG_INFO, + "claimed IRQ %d flags: 0x%lx", + a->pcid->irq, flags); +} + +int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, + int index) +{ + struct esas2r_adapter *a; + u64 bus_addr = 0; + int i; + void *next_uncached; + struct esas2r_request *first_request, *last_request; + bool dma64 = false; + + if (index >= MAX_ADAPTERS) { + esas2r_log(ESAS2R_LOG_CRIT, + "tried to init invalid adapter index %u!", + index); + return 0; + } + + if (esas2r_adapters[index]) { + esas2r_log(ESAS2R_LOG_CRIT, + "tried to init existing adapter index %u!", + index); + return 0; + } + + a = (struct esas2r_adapter *)host->hostdata; + memset(a, 0, sizeof(struct esas2r_adapter)); + a->pcid = pcid; + a->host = host; + + if (sizeof(dma_addr_t) > 4 && + dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) && + !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64))) + dma64 = true; + + if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) { + esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask"); + esas2r_kill_adapter(index); + return 0; + } + + esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev, + "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32"); + + esas2r_adapters[index] = a; + sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); + esas2r_debug("new adapter %p, name %s", a, a->name); + spin_lock_init(&a->request_lock); + spin_lock_init(&a->fw_event_lock); + mutex_init(&a->fm_api_mutex); + mutex_init(&a->fs_api_mutex); + sema_init(&a->nvram_semaphore, 1); + + esas2r_fw_event_off(a); + snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", + a->index); + a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); + + init_waitqueue_head(&a->buffered_ioctl_waiter); + init_waitqueue_head(&a->nvram_waiter); + init_waitqueue_head(&a->fm_api_waiter); + init_waitqueue_head(&a->fs_api_waiter); + init_waitqueue_head(&a->vda_waiter); + + INIT_LIST_HEAD(&a->general_req.req_list); + INIT_LIST_HEAD(&a->active_list); + INIT_LIST_HEAD(&a->defer_list); + INIT_LIST_HEAD(&a->free_sg_list_head); + INIT_LIST_HEAD(&a->avail_request); + INIT_LIST_HEAD(&a->vrq_mds_head); + INIT_LIST_HEAD(&a->fw_event_list); + + first_request = (struct esas2r_request *)((u8 *)(a + 1)); + + for (last_request = first_request, i = 1; i < num_requests; + last_request++, i++) { + INIT_LIST_HEAD(&last_request->req_list); + list_add_tail(&last_request->comp_list, &a->avail_request); + if (!alloc_vda_req(a, last_request)) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate a VDA request!"); + esas2r_kill_adapter(index); + return 0; + } + } + + esas2r_debug("requests: %p to %p (%d, %d)", first_request, + last_request, + sizeof(*first_request), + num_requests); + + if (esas2r_map_regions(a) != 0) { + esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); + esas2r_kill_adapter(index); + return 0; + } + + a->index = index; + + /* interrupts will be disabled until we are done with init */ + atomic_inc(&a->dis_ints_cnt); + atomic_inc(&a->disable_cnt); + set_bit(AF_CHPRST_PENDING, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); + set_bit(AF_FIRST_INIT, &a->flags); + set_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->init_msg = ESAS2R_INIT_MSG_START; + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + + esas2r_setup_interrupts(a, interrupt_mode); + + a->uncached_size = esas2r_get_uncached_size(a); + a->uncached = dma_alloc_coherent(&pcid->dev, + (size_t)a->uncached_size, + (dma_addr_t *)&bus_addr, + GFP_KERNEL); + if (a->uncached == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate %d bytes of consistent memory!", + a->uncached_size); + esas2r_kill_adapter(index); + return 0; + } + + a->uncached_phys = bus_addr; + + esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", + a->uncached_size, + a->uncached, + upper_32_bits(bus_addr), + lower_32_bits(bus_addr)); + memset(a->uncached, 0, a->uncached_size); + next_uncached = a->uncached; + + if (!esas2r_init_adapter_struct(a, + &next_uncached)) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to initialize adapter structure (2)!"); + esas2r_kill_adapter(index); + return 0; + } + + tasklet_init(&a->tasklet, + esas2r_adapter_tasklet, + (unsigned long)a); + + /* + * Disable chip interrupts to prevent spurious interrupts + * until we claim the IRQ. + */ + esas2r_disable_chip_interrupts(a); + esas2r_check_adapter(a); + + if (!esas2r_init_adapter_hw(a, true)) { + esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); + } else { + esas2r_debug("esas2r_init_adapter ok"); + } + + esas2r_claim_interrupts(a); + + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) + esas2r_enable_chip_interrupts(a); + + set_bit(AF2_INIT_DONE, &a->flags2); + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) + esas2r_kickoff_timer(a); + esas2r_debug("esas2r_init_adapter done for %p (%d)", + a, a->disable_cnt); + + return 1; +} + +static void esas2r_adapter_power_down(struct esas2r_adapter *a, + int power_management) +{ + struct esas2r_mem_desc *memdesc, *next; + + if ((test_bit(AF2_INIT_DONE, &a->flags2)) + && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { + if (!power_management) { + del_timer_sync(&a->timer); + tasklet_kill(&a->tasklet); + } + esas2r_power_down(a); + + /* + * There are versions of firmware that do not handle the sync + * cache command correctly. Stall here to ensure that the + * cache is lazily flushed. + */ + mdelay(500); + esas2r_debug("chip halted"); + } + + /* Remove sysfs binary files */ + if (a->sysfs_fw_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); + a->sysfs_fw_created = 0; + } + + if (a->sysfs_fs_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); + a->sysfs_fs_created = 0; + } + + if (a->sysfs_vda_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); + a->sysfs_vda_created = 0; + } + + if (a->sysfs_hw_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); + a->sysfs_hw_created = 0; + } + + if (a->sysfs_live_nvram_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, + &bin_attr_live_nvram); + a->sysfs_live_nvram_created = 0; + } + + if (a->sysfs_default_nvram_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, + &bin_attr_default_nvram); + a->sysfs_default_nvram_created = 0; + } + + /* Clean up interrupts */ + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "free_irq(%d) called", a->pcid->irq); + + free_irq(a->pcid->irq, a); + esas2r_debug("IRQ released"); + clear_bit(AF2_IRQ_CLAIMED, &a->flags2); + } + + if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { + pci_disable_msi(a->pcid); + clear_bit(AF2_MSI_ENABLED, &a->flags2); + esas2r_debug("MSI disabled"); + } + + if (a->inbound_list_md.virt_addr) + esas2r_initmem_free(a, &a->inbound_list_md); + + if (a->outbound_list_md.virt_addr) + esas2r_initmem_free(a, &a->outbound_list_md); + + list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, + next_desc) { + esas2r_initmem_free(a, memdesc); + } + + /* Following frees everything allocated via alloc_vda_req */ + list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { + esas2r_initmem_free(a, memdesc); + list_del(&memdesc->next_desc); + kfree(memdesc); + } + + kfree(a->first_ae_req); + a->first_ae_req = NULL; + + kfree(a->sg_list_mds); + a->sg_list_mds = NULL; + + kfree(a->req_table); + a->req_table = NULL; + + if (a->regs) { + esas2r_unmap_regions(a); + a->regs = NULL; + a->data_window = NULL; + esas2r_debug("regions unmapped"); + } +} + +/* Release/free allocated resources for specified adapters. */ +void esas2r_kill_adapter(int i) +{ + struct esas2r_adapter *a = esas2r_adapters[i]; + + if (a) { + unsigned long flags; + struct workqueue_struct *wq; + esas2r_debug("killing adapter %p [%d] ", a, i); + esas2r_fw_event_off(a); + esas2r_adapter_power_down(a, 0); + if (esas2r_buffered_ioctl && + (a->pcid == esas2r_buffered_ioctl_pcid)) { + dma_free_coherent(&a->pcid->dev, + (size_t)esas2r_buffered_ioctl_size, + esas2r_buffered_ioctl, + esas2r_buffered_ioctl_addr); + esas2r_buffered_ioctl = NULL; + } + + if (a->vda_buffer) { + dma_free_coherent(&a->pcid->dev, + (size_t)VDA_MAX_BUFFER_SIZE, + a->vda_buffer, + (dma_addr_t)a->ppvda_buffer); + a->vda_buffer = NULL; + } + if (a->fs_api_buffer) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->fs_api_buffer_size, + a->fs_api_buffer, + (dma_addr_t)a->ppfs_api_buffer); + a->fs_api_buffer = NULL; + } + + kfree(a->local_atto_ioctl); + a->local_atto_ioctl = NULL; + + spin_lock_irqsave(&a->fw_event_lock, flags); + wq = a->fw_event_q; + a->fw_event_q = NULL; + spin_unlock_irqrestore(&a->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + + if (a->uncached) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->uncached_size, + a->uncached, + (dma_addr_t)a->uncached_phys); + a->uncached = NULL; + esas2r_debug("uncached area freed"); + } + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "pci_disable_device() called. msix_enabled: %d " + "msi_enabled: %d irq: %d pin: %d", + a->pcid->msix_enabled, + a->pcid->msi_enabled, + a->pcid->irq, + a->pcid->pin); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "before pci_disable_device() enable_cnt: %d", + a->pcid->enable_cnt.counter); + + pci_disable_device(a->pcid); + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "after pci_disable_device() enable_cnt: %d", + a->pcid->enable_cnt.counter); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "pci_set_drv_data(%p, NULL) called", + a->pcid); + + pci_set_drvdata(a->pcid, NULL); + esas2r_adapters[i] = NULL; + + if (test_bit(AF2_INIT_DONE, &a->flags2)) { + clear_bit(AF2_INIT_DONE, &a->flags2); + + set_bit(AF_DEGRADED_MODE, &a->flags); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->host->shost_gendev), + "scsi_remove_host() called"); + + scsi_remove_host(a->host); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(a->host); + } + } +} + +static int __maybe_unused esas2r_suspend(struct device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(dev); + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + + esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()"); + if (!a) + return -ENODEV; + + esas2r_adapter_power_down(a, 1); + esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0"); + return 0; +} + +static int __maybe_unused esas2r_resume(struct device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(dev); + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + int rez = 0; + + esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()"); + + if (!a) { + rez = -ENODEV; + goto error_exit; + } + + if (esas2r_map_regions(a) != 0) { + esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); + rez = -ENOMEM; + goto error_exit; + } + + /* Set up interupt mode */ + esas2r_setup_interrupts(a, a->intr_mode); + + /* + * Disable chip interrupts to prevent spurious interrupts until we + * claim the IRQ. + */ + esas2r_disable_chip_interrupts(a); + if (!esas2r_power_up(a, true)) { + esas2r_debug("yikes, esas2r_power_up failed"); + rez = -ENOMEM; + goto error_exit; + } + + esas2r_claim_interrupts(a); + + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { + /* + * Now that system interrupt(s) are claimed, we can enable + * chip interrupts. + */ + esas2r_enable_chip_interrupts(a); + esas2r_kickoff_timer(a); + } else { + esas2r_debug("yikes, unable to claim IRQ"); + esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); + rez = -ENOMEM; + goto error_exit; + } + +error_exit: + esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d", + rez); + return rez; +} + +SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume); + +bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) +{ + set_bit(AF_DEGRADED_MODE, &a->flags); + esas2r_log(ESAS2R_LOG_CRIT, + "setting adapter to degraded mode: %s\n", error_str); + return false; +} + +u32 esas2r_get_uncached_size(struct esas2r_adapter *a) +{ + return sizeof(struct esas2r_sas_nvram) + + ALIGN(ESAS2R_DISC_BUF_LEN, 8) + + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ + + 8 + + (num_sg_lists * (u16)sgl_page_size) + + ALIGN((num_requests + num_ae_requests + 1 + + ESAS2R_LIST_EXTRA) * + sizeof(struct esas2r_inbound_list_source_entry), + 8) + + ALIGN((num_requests + num_ae_requests + 1 + + ESAS2R_LIST_EXTRA) * + sizeof(struct atto_vda_ob_rsp), 8) + + 256; /* VDA request and buffer align */ +} + +static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) +{ + if (pci_is_pcie(a->pcid)) { + u16 devcontrol; + + pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol); + + if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > + PCI_EXP_DEVCTL_READRQ_512B) { + esas2r_log(ESAS2R_LOG_INFO, + "max read request size > 512B"); + + devcontrol &= ~PCI_EXP_DEVCTL_READRQ; + devcontrol |= PCI_EXP_DEVCTL_READRQ_512B; + pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL, + devcontrol); + } + } +} + +/* + * Determine the organization of the uncached data area and + * finish initializing the adapter structure + */ +bool esas2r_init_adapter_struct(struct esas2r_adapter *a, + void **uncached_area) +{ + u32 i; + u8 *high; + struct esas2r_inbound_list_source_entry *element; + struct esas2r_request *rq; + struct esas2r_mem_desc *sgl; + + spin_lock_init(&a->sg_list_lock); + spin_lock_init(&a->mem_lock); + spin_lock_init(&a->queue_lock); + + a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; + + if (!alloc_vda_req(a, &a->general_req)) { + esas2r_hdebug( + "failed to allocate a VDA request for the general req!"); + return false; + } + + /* allocate requests for asynchronous events */ + a->first_ae_req = + kcalloc(num_ae_requests, sizeof(struct esas2r_request), + GFP_KERNEL); + + if (a->first_ae_req == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for asynchronous events"); + return false; + } + + /* allocate the S/G list memory descriptors */ + a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc), + GFP_KERNEL); + + if (a->sg_list_mds == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for s/g list descriptors"); + return false; + } + + /* allocate the request table */ + a->req_table = + kcalloc(num_requests + num_ae_requests + 1, + sizeof(struct esas2r_request *), + GFP_KERNEL); + + if (a->req_table == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for the request table"); + return false; + } + + /* initialize PCI configuration space */ + esas2r_init_pci_cfg_space(a); + + /* + * the thunder_stream boards all have a serial flash part that has a + * different base address on the AHB bus. + */ + if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) + && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) + a->flags2 |= AF2_THUNDERBOLT; + + if (test_bit(AF2_THUNDERBOLT, &a->flags2)) + a->flags2 |= AF2_SERIAL_FLASH; + + if (a->pcid->subsystem_device == ATTO_TLSH_1068) + a->flags2 |= AF2_THUNDERLINK; + + /* Uncached Area */ + high = (u8 *)*uncached_area; + + /* initialize the scatter/gather table pages */ + + for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { + sgl->size = sgl_page_size; + + list_add_tail(&sgl->next_desc, &a->free_sg_list_head); + + if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { + /* Allow the driver to load if the minimum count met. */ + if (i < NUM_SGL_MIN) + return false; + break; + } + } + + /* compute the size of the lists */ + a->list_size = num_requests + ESAS2R_LIST_EXTRA; + + /* allocate the inbound list */ + a->inbound_list_md.size = a->list_size * + sizeof(struct + esas2r_inbound_list_source_entry); + + if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { + esas2r_hdebug("failed to allocate IB list"); + return false; + } + + /* allocate the outbound list */ + a->outbound_list_md.size = a->list_size * + sizeof(struct atto_vda_ob_rsp); + + if (!esas2r_initmem_alloc(a, &a->outbound_list_md, + ESAS2R_LIST_ALIGN)) { + esas2r_hdebug("failed to allocate IB list"); + return false; + } + + /* allocate the NVRAM structure */ + a->nvram = (struct esas2r_sas_nvram *)high; + high += sizeof(struct esas2r_sas_nvram); + + /* allocate the discovery buffer */ + a->disc_buffer = high; + high += ESAS2R_DISC_BUF_LEN; + high = PTR_ALIGN(high, 8); + + /* allocate the outbound list copy pointer */ + a->outbound_copy = (u32 volatile *)high; + high += sizeof(u32); + + if (!test_bit(AF_NVR_VALID, &a->flags)) + esas2r_nvram_set_defaults(a); + + /* update the caller's uncached memory area pointer */ + *uncached_area = (void *)high; + + /* initialize the allocated memory */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + esas2r_targ_db_initialize(a); + + /* prime parts of the inbound list */ + element = + (struct esas2r_inbound_list_source_entry *)a-> + inbound_list_md. + virt_addr; + + for (i = 0; i < a->list_size; i++) { + element->address = 0; + element->reserved = 0; + element->length = cpu_to_le32(HWILSE_INTERFACE_F0 + | (sizeof(union + atto_vda_req) + / + sizeof(u32))); + element++; + } + + /* init the AE requests */ + for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, + i++) { + INIT_LIST_HEAD(&rq->req_list); + if (!alloc_vda_req(a, rq)) { + esas2r_hdebug( + "failed to allocate a VDA request!"); + return false; + } + + esas2r_rq_init_request(rq, a); + + /* override the completion function */ + rq->comp_cb = esas2r_ae_complete; + } + } + + return true; +} + +/* This code will verify that the chip is operational. */ +bool esas2r_check_adapter(struct esas2r_adapter *a) +{ + u32 starttime; + u32 doorbell; + u64 ppaddr; + u32 dw; + + /* + * if the chip reset detected flag is set, we can bypass a bunch of + * stuff. + */ + if (test_bit(AF_CHPRST_DETECTED, &a->flags)) + goto skip_chip_reset; + + /* + * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver + * may have left them enabled or we may be recovering from a fault. + */ + esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); + esas2r_flush_register_dword(a, MU_INT_MASK_OUT); + + /* + * wait for the firmware to become ready by forcing an interrupt and + * waiting for a response. + */ + starttime = jiffies_to_msecs(jiffies); + + while (true) { + esas2r_force_interrupt(a); + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell == 0xFFFFFFFF) { + /* + * Give the firmware up to two seconds to enable + * register access after a reset. + */ + if ((jiffies_to_msecs(jiffies) - starttime) > 2000) + return esas2r_set_degraded_mode(a, + "unable to access registers"); + } else if (doorbell & DRBL_FORCE_INT) { + u32 ver = (doorbell & DRBL_FW_VER_MSK); + + /* + * This driver supports version 0 and version 1 of + * the API + */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + + if (ver == DRBL_FW_VER_0) { + set_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + } else if (ver == DRBL_FW_VER_1) { + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 1024; + a->build_sgl = esas2r_build_sg_list_prd; + } else { + return esas2r_set_degraded_mode(a, + "unknown firmware version"); + } + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { + esas2r_hdebug("FW ready TMO"); + esas2r_bugon(); + + return esas2r_set_degraded_mode(a, + "firmware start has timed out"); + } + } + + /* purge any asynchronous events since we will repost them later */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(50)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug("timeout waiting for interface down"); + break; + } + } +skip_chip_reset: + /* + * first things first, before we go changing any of these registers + * disable the communication lists. + */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); + dw &= ~MU_ILC_ENABLE; + esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); + dw &= ~MU_OLC_ENABLE; + esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); + + /* configure the communication list addresses */ + ppaddr = a->inbound_list_md.phys_addr; + esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, + upper_32_bits(ppaddr)); + ppaddr = a->outbound_list_md.phys_addr; + esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, + upper_32_bits(ppaddr)); + ppaddr = a->uncached_phys + + ((u8 *)a->outbound_copy - a->uncached); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, + upper_32_bits(ppaddr)); + + /* reset the read and write pointers */ + *a->outbound_copy = + a->last_write = + a->last_read = a->list_size - 1; + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); + esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, + MU_OLW_TOGGLE | a->last_write); + + /* configure the interface select fields */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); + dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); + esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, + (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); + dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); + esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, + (dw | MU_OLIC_LIST_F0 | + MU_OLIC_SOURCE_DDR)); + + /* finish configuring the communication lists */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); + dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); + dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC + | (a->list_size << MU_ILC_NUMBER_SHIFT); + esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); + dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); + dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); + esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); + + /* + * notify the firmware that we're done setting up the communication + * list registers. wait here until the firmware is done configuring + * its lists. it will signal that it is done by enabling the lists. + */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_INIT) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug( + "timeout waiting for communication list init"); + esas2r_bugon(); + return esas2r_set_degraded_mode(a, + "timeout waiting for communication list init"); + } + } + + /* + * flag whether the firmware supports the power down doorbell. we + * determine this by reading the inbound doorbell enable mask. + */ + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); + if (doorbell & DRBL_POWER_DOWN) + set_bit(AF2_VDA_POWER_DOWN, &a->flags2); + else + clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); + + /* + * enable assertion of outbound queue and doorbell interrupts in the + * main interrupt cause register. + */ + esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); + esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); + return true; +} + +/* Process the initialization message just completed and format the next one. */ +static bool esas2r_format_init_msg(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u32 msg = a->init_msg; + struct atto_vda_cfg_init *ci; + + a->init_msg = 0; + + switch (msg) { + case ESAS2R_INIT_MSG_START: + case ESAS2R_INIT_MSG_REINIT: + { + esas2r_hdebug("CFG init"); + esas2r_build_cfg_req(a, + rq, + VDA_CFG_INIT, + 0, + NULL); + ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; + ci->sgl_page_size = cpu_to_le32(sgl_page_size); + /* firmware interface overflows in y2106 */ + ci->epoch_time = cpu_to_le32(ktime_get_real_seconds()); + rq->flags |= RF_FAILURE_OK; + a->init_msg = ESAS2R_INIT_MSG_INIT; + break; + } + + case ESAS2R_INIT_MSG_INIT: + if (rq->req_stat == RS_SUCCESS) { + u32 major; + u32 minor; + u16 fw_release; + + a->fw_version = le16_to_cpu( + rq->func_rsp.cfg_rsp.vda_version); + a->fw_build = rq->func_rsp.cfg_rsp.fw_build; + fw_release = le16_to_cpu( + rq->func_rsp.cfg_rsp.fw_release); + major = LOBYTE(fw_release); + minor = HIBYTE(fw_release); + a->fw_version += (major << 16) + (minor << 24); + } else { + esas2r_hdebug("FAILED"); + } + + /* + * the 2.71 and earlier releases of R6xx firmware did not error + * unsupported config requests correctly. + */ + + if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) + || (be32_to_cpu(a->fw_version) > 0x00524702)) { + esas2r_hdebug("CFG get init"); + esas2r_build_cfg_req(a, + rq, + VDA_CFG_GET_INIT2, + sizeof(struct atto_vda_cfg_init), + NULL); + + rq->vrq->cfg.sg_list_offset = offsetof( + struct atto_vda_cfg_req, + data.sge); + rq->vrq->cfg.data.prde.ctl_len = + cpu_to_le32(sizeof(struct atto_vda_cfg_init)); + rq->vrq->cfg.data.prde.address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + rq->flags |= RF_FAILURE_OK; + a->init_msg = ESAS2R_INIT_MSG_GET_INIT; + break; + } + fallthrough; + + case ESAS2R_INIT_MSG_GET_INIT: + if (msg == ESAS2R_INIT_MSG_GET_INIT) { + ci = (struct atto_vda_cfg_init *)rq->data_buf; + if (rq->req_stat == RS_SUCCESS) { + a->num_targets_backend = + le32_to_cpu(ci->num_targets_backend); + a->ioctl_tunnel = + le32_to_cpu(ci->ioctl_tunnel); + } else { + esas2r_hdebug("FAILED"); + } + } + fallthrough; + + default: + rq->req_stat = RS_SUCCESS; + return false; + } + return true; +} + +/* + * Perform initialization messages via the request queue. Messages are + * performed with interrupts disabled. + */ +bool esas2r_init_msgs(struct esas2r_adapter *a) +{ + bool success = true; + struct esas2r_request *rq = &a->general_req; + + esas2r_rq_init_request(rq, a); + rq->comp_cb = esas2r_dummy_complete; + + if (a->init_msg == 0) + a->init_msg = ESAS2R_INIT_MSG_REINIT; + + while (a->init_msg) { + if (esas2r_format_init_msg(a, rq)) { + unsigned long flags; + while (true) { + spin_lock_irqsave(&a->queue_lock, flags); + esas2r_start_vda_request(a, rq); + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_wait_request(a, rq); + if (rq->req_stat != RS_PENDING) + break; + } + } + + if (rq->req_stat == RS_SUCCESS + || ((rq->flags & RF_FAILURE_OK) + && rq->req_stat != RS_TIMEOUT)) + continue; + + esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", + a->init_msg, rq->req_stat, rq->flags); + a->init_msg = ESAS2R_INIT_MSG_START; + success = false; + break; + } + + esas2r_rq_destroy_request(rq, a); + return success; +} + +/* Initialize the adapter chip */ +bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) +{ + bool rslt = false; + struct esas2r_request *rq; + u32 i; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + goto exit; + + if (!test_bit(AF_NVR_VALID, &a->flags)) { + if (!esas2r_nvram_read_direct(a)) + esas2r_log(ESAS2R_LOG_WARN, + "invalid/missing NVRAM parameters"); + } + + if (!esas2r_init_msgs(a)) { + esas2r_set_degraded_mode(a, "init messages failed"); + goto exit; + } + + /* The firmware is ready. */ + clear_bit(AF_DEGRADED_MODE, &a->flags); + clear_bit(AF_CHPRST_PENDING, &a->flags); + + /* Post all the async event requests */ + for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) + esas2r_start_ae_request(a, rq); + + if (!a->flash_rev[0]) + esas2r_read_flash_rev(a); + + if (!a->image_type[0]) + esas2r_read_image_type(a); + + if (a->fw_version == 0) + a->fw_rev[0] = 0; + else + sprintf(a->fw_rev, "%1d.%02d", + (int)LOBYTE(HIWORD(a->fw_version)), + (int)HIBYTE(HIWORD(a->fw_version))); + + esas2r_hdebug("firmware revision: %s", a->fw_rev); + + if (test_bit(AF_CHPRST_DETECTED, &a->flags) + && (test_bit(AF_FIRST_INIT, &a->flags))) { + esas2r_enable_chip_interrupts(a); + return true; + } + + /* initialize discovery */ + esas2r_disc_initialize(a); + + /* + * wait for the device wait time to expire here if requested. this is + * usually requested during initial driver load and possibly when + * resuming from a low power state. deferred device waiting will use + * interrupts. chip reset recovery always defers device waiting to + * avoid being in a TASKLET too long. + */ + if (init_poll) { + u32 currtime = a->disc_start_time; + u32 nexttick = 100; + u32 deltatime; + + /* + * Block Tasklets from getting scheduled and indicate this is + * polled discovery. + */ + set_bit(AF_TASKLET_SCHEDULED, &a->flags); + set_bit(AF_DISC_POLLED, &a->flags); + + /* + * Temporarily bring the disable count to zero to enable + * deferred processing. Note that the count is already zero + * after the first initialization. + */ + if (test_bit(AF_FIRST_INIT, &a->flags)) + atomic_dec(&a->disable_cnt); + + while (test_bit(AF_DISC_PENDING, &a->flags)) { + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + /* + * Determine the need for a timer tick based on the + * delta time between this and the last iteration of + * this loop. We don't use the absolute time because + * then we would have to worry about when nexttick + * wraps and currtime hasn't yet. + */ + deltatime = jiffies_to_msecs(jiffies) - currtime; + currtime += deltatime; + + /* + * Process any waiting discovery as long as the chip is + * up. If a chip reset happens during initial polling, + * we have to make sure the timer tick processes the + * doorbell indicating the firmware is ready. + */ + if (!test_bit(AF_CHPRST_PENDING, &a->flags)) + esas2r_disc_check_for_work(a); + + /* Simulate a timer tick. */ + if (nexttick <= deltatime) { + + /* Time for a timer tick */ + nexttick += 100; + esas2r_timer_tick(a); + } + + if (nexttick > deltatime) + nexttick -= deltatime; + + /* Do any deferred processing */ + if (esas2r_is_tasklet_pending(a)) + esas2r_do_tasklet_tasks(a); + + } + + if (test_bit(AF_FIRST_INIT, &a->flags)) + atomic_inc(&a->disable_cnt); + + clear_bit(AF_DISC_POLLED, &a->flags); + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); + } + + + esas2r_targ_db_report_changes(a); + + /* + * For cases where (a) the initialization messages processing may + * handle an interrupt for a port event and a discovery is waiting, but + * we are not waiting for devices, or (b) the device wait time has been + * exhausted but there is still discovery pending, start any leftover + * discovery in interrupt driven mode. + */ + esas2r_disc_start_waiting(a); + + /* Enable chip interrupts */ + a->int_mask = ESAS2R_INT_STS_MASK; + esas2r_enable_chip_interrupts(a); + esas2r_enable_heartbeat(a); + rslt = true; + +exit: + /* + * Regardless of whether initialization was successful, certain things + * need to get done before we exit. + */ + + if (test_bit(AF_CHPRST_DETECTED, &a->flags) && + test_bit(AF_FIRST_INIT, &a->flags)) { + /* + * Reinitialization was performed during the first + * initialization. Only clear the chip reset flag so the + * original device polling is not cancelled. + */ + if (!rslt) + clear_bit(AF_CHPRST_PENDING, &a->flags); + } else { + /* First initialization or a subsequent re-init is complete. */ + if (!rslt) { + clear_bit(AF_CHPRST_PENDING, &a->flags); + clear_bit(AF_DISC_PENDING, &a->flags); + } + + + /* Enable deferred processing after the first initialization. */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + clear_bit(AF_FIRST_INIT, &a->flags); + + if (atomic_dec_return(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + } + } + + return rslt; +} + +void esas2r_reset_adapter(struct esas2r_adapter *a) +{ + set_bit(AF_OS_RESET, &a->flags); + esas2r_local_reset_adapter(a); + esas2r_schedule_tasklet(a); +} + +void esas2r_reset_chip(struct esas2r_adapter *a) +{ + if (!esas2r_is_adapter_present(a)) + return; + + /* + * Before we reset the chip, save off the VDA core dump. The VDA core + * dump is located in the upper 512KB of the onchip SRAM. Make sure + * to not overwrite a previous crash that was saved. + */ + if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && + !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { + esas2r_read_mem_block(a, + a->fw_coredump_buff, + MW_DATA_ADDR_SRAM + 0x80000, + ESAS2R_FWCOREDUMP_SZ); + + set_bit(AF2_COREDUMP_SAVED, &a->flags2); + } + + clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); + + /* Reset the chip */ + if (a->pcid->revision == MVR_FREY_B2) + esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, + MU_CTL_IN_FULL_RST2); + else + esas2r_write_register_dword(a, MU_CTL_STATUS_IN, + MU_CTL_IN_FULL_RST); + + + /* Stall a little while to let the reset condition clear */ + mdelay(10); +} + +static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) +{ + u32 starttime; + u32 doorbell; + + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_POWER_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { + esas2r_hdebug("Timeout waiting for power down"); + break; + } + } +} + +/* + * Perform power management processing including managing device states, adapter + * states, interrupts, and I/O. + */ +void esas2r_power_down(struct esas2r_adapter *a) +{ + set_bit(AF_POWER_MGT, &a->flags); + set_bit(AF_POWER_DOWN, &a->flags); + + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { + u32 starttime; + u32 doorbell; + + /* + * We are currently running OK and will be reinitializing later. + * increment the disable count to coordinate with + * esas2r_init_adapter. We don't have to do this in degraded + * mode since we never enabled interrupts in the first place. + */ + esas2r_disable_chip_interrupts(a); + esas2r_disable_heartbeat(a); + + /* wait for any VDA activity to clear before continuing */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_MSG_IFC_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = + esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug( + "timeout waiting for interface down"); + break; + } + } + + /* + * For versions of firmware that support it tell them the driver + * is powering down. + */ + if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) + esas2r_power_down_notify_firmware(a); + } + + /* Suspend I/O processing. */ + set_bit(AF_OS_RESET, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); + set_bit(AF_CHPRST_PENDING, &a->flags); + + esas2r_process_adapter_reset(a); + + /* Remove devices now that I/O is cleaned up. */ + a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); + esas2r_targ_db_remove_all(a, false); +} + +/* + * Perform power management processing including managing device states, adapter + * states, interrupts, and I/O. + */ +bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) +{ + bool ret; + + clear_bit(AF_POWER_DOWN, &a->flags); + esas2r_init_pci_cfg_space(a); + set_bit(AF_FIRST_INIT, &a->flags); + atomic_inc(&a->disable_cnt); + + /* reinitialize the adapter */ + ret = esas2r_check_adapter(a); + if (!esas2r_init_adapter_hw(a, init_poll)) + ret = false; + + /* send the reset asynchronous event */ + esas2r_send_reset_ae(a, true); + + /* clear this flag after initialization. */ + clear_bit(AF_POWER_MGT, &a->flags); + return ret; +} + +bool esas2r_is_adapter_present(struct esas2r_adapter *a) +{ + if (test_bit(AF_NOT_PRESENT, &a->flags)) + return false; + + if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { + set_bit(AF_NOT_PRESENT, &a->flags); + + return false; + } + return true; +} + +const char *esas2r_get_model_name(struct esas2r_adapter *a) +{ + switch (a->pcid->subsystem_device) { + case ATTO_ESAS_R680: + return "ATTO ExpressSAS R680"; + + case ATTO_ESAS_R608: + return "ATTO ExpressSAS R608"; + + case ATTO_ESAS_R60F: + return "ATTO ExpressSAS R60F"; + + case ATTO_ESAS_R6F0: + return "ATTO ExpressSAS R6F0"; + + case ATTO_ESAS_R644: + return "ATTO ExpressSAS R644"; + + case ATTO_ESAS_R648: + return "ATTO ExpressSAS R648"; + + case ATTO_TSSC_3808: + return "ATTO ThunderStream SC 3808D"; + + case ATTO_TSSC_3808E: + return "ATTO ThunderStream SC 3808E"; + + case ATTO_TLSH_1068: + return "ATTO ThunderLink SH 1068"; + } + + return "ATTO SAS Controller"; +} + +const char *esas2r_get_model_name_short(struct esas2r_adapter *a) +{ + switch (a->pcid->subsystem_device) { + case ATTO_ESAS_R680: + return "R680"; + + case ATTO_ESAS_R608: + return "R608"; + + case ATTO_ESAS_R60F: + return "R60F"; + + case ATTO_ESAS_R6F0: + return "R6F0"; + + case ATTO_ESAS_R644: + return "R644"; + + case ATTO_ESAS_R648: + return "R648"; + + case ATTO_TSSC_3808: + return "SC 3808D"; + + case ATTO_TSSC_3808E: + return "SC 3808E"; + + case ATTO_TLSH_1068: + return "SH 1068"; + } + + return "unknown"; +} diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c new file mode 100644 index 000000000..5281d9356 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_int.c @@ -0,0 +1,944 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_int.c + * esas2r interrupt handling + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +/* Local function prototypes */ +static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell); +static void esas2r_get_outbound_responses(struct esas2r_adapter *a); +static void esas2r_process_bus_reset(struct esas2r_adapter *a); + +/* + * Poll the adapter for interrupts and service them. + * This function handles both legacy interrupts and MSI. + */ +void esas2r_polled_interrupt(struct esas2r_adapter *a) +{ + u32 intstat; + u32 doorbell; + + esas2r_disable_chip_interrupts(a); + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (intstat & MU_INTSTAT_POST_OUT) { + /* clear the interrupt */ + + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + + esas2r_get_outbound_responses(a); + } + + if (intstat & MU_INTSTAT_DRBL) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + esas2r_enable_chip_interrupts(a); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +/* + * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler + * schedules a TASKLET to process events, whereas the MSI handler just + * processes interrupt events directly. + */ +irqreturn_t esas2r_interrupt(int irq, void *dev_id) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; + + if (!esas2r_adapter_interrupt_pending(a)) + return IRQ_NONE; + + set_bit(AF2_INT_PENDING, &a->flags2); + esas2r_schedule_tasklet(a); + + return IRQ_HANDLED; +} + +void esas2r_adapter_interrupt(struct esas2r_adapter *a) +{ + u32 doorbell; + + if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) { + /* clear the interrupt */ + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + esas2r_get_outbound_responses(a); + } + + if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + a->int_mask = ESAS2R_INT_STS_MASK; + + esas2r_enable_chip_interrupts(a); + + if (likely(atomic_read(&a->disable_cnt) == 0)) + esas2r_do_deferred_processes(a); +} + +irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; + u32 intstat; + u32 doorbell; + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (likely(intstat & MU_INTSTAT_POST_OUT)) { + /* clear the interrupt */ + + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + + esas2r_get_outbound_responses(a); + } + + if (unlikely(intstat & MU_INTSTAT_DRBL)) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + /* + * Work around a chip bug and force a new MSI to be sent if one is + * still pending. + */ + esas2r_disable_chip_interrupts(a); + esas2r_enable_chip_interrupts(a); + + if (likely(atomic_read(&a->disable_cnt) == 0)) + esas2r_do_deferred_processes(a); + + esas2r_do_tasklet_tasks(a); + + return 1; +} + + + +static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct atto_vda_ob_rsp *rsp) +{ + + /* + * For I/O requests, only copy the response if an error + * occurred and setup a callback to do error processing. + */ + if (unlikely(rq->req_stat != RS_SUCCESS)) { + memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); + + if (rq->req_stat == RS_ABORTED) { + if (rq->timeout > RQ_MAX_TIMEOUT) + rq->req_stat = RS_TIMEOUT; + } else if (rq->req_stat == RS_SCSI_ERROR) { + u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; + + esas2r_trace("scsistatus: %x", scsistatus); + + /* Any of these are a good result. */ + if (scsistatus == SAM_STAT_GOOD || scsistatus == + SAM_STAT_CONDITION_MET || scsistatus == + SAM_STAT_INTERMEDIATE || scsistatus == + SAM_STAT_INTERMEDIATE_CONDITION_MET) { + rq->req_stat = RS_SUCCESS; + rq->func_rsp.scsi_rsp.scsi_stat = + SAM_STAT_GOOD; + } + } + } +} + +static void esas2r_get_outbound_responses(struct esas2r_adapter *a) +{ + struct atto_vda_ob_rsp *rsp; + u32 rspput_ptr; + u32 rspget_ptr; + struct esas2r_request *rq; + u32 handle; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* Get the outbound limit and pointers */ + rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR; + rspget_ptr = a->last_read; + + esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr); + + /* If we don't have anything to process, get out */ + if (unlikely(rspget_ptr == rspput_ptr)) { + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_trace_exit(); + return; + } + + /* Make sure the firmware is healthy */ + if (unlikely(rspput_ptr >= a->list_size)) { + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_bugon(); + esas2r_local_reset_adapter(a); + esas2r_trace_exit(); + return; + } + + do { + rspget_ptr++; + + if (rspget_ptr >= a->list_size) + rspget_ptr = 0; + + rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr + + rspget_ptr; + + handle = rsp->handle; + + /* Verify the handle range */ + if (unlikely(LOWORD(handle) == 0 + || LOWORD(handle) > num_requests + + num_ae_requests + 1)) { + esas2r_bugon(); + continue; + } + + /* Get the request for this handle */ + rq = a->req_table[LOWORD(handle)]; + + if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) { + esas2r_bugon(); + continue; + } + + list_del(&rq->req_list); + + /* Get the completion status */ + rq->req_stat = rsp->req_stat; + + esas2r_trace("handle: %x", handle); + esas2r_trace("rq: %p", rq); + esas2r_trace("req_status: %x", rq->req_stat); + + if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { + esas2r_handle_outbound_rsp_err(a, rq, rsp); + } else { + /* + * Copy the outbound completion struct for non-I/O + * requests. + */ + memcpy(&rq->func_rsp, &rsp->func_rsp, + sizeof(rsp->func_rsp)); + } + + /* Queue the request for completion. */ + list_add_tail(&rq->comp_list, &comp_list); + + } while (rspget_ptr != rspput_ptr); + + a->last_read = rspget_ptr; + spin_unlock_irqrestore(&a->queue_lock, flags); + + esas2r_comp_list_drain(a, &comp_list); + esas2r_trace_exit(); +} + +/* + * Perform all deferred processes for the adapter. Deferred + * processes can only be done while the current interrupt + * disable_cnt for the adapter is zero. + */ +void esas2r_do_deferred_processes(struct esas2r_adapter *a) +{ + int startreqs = 2; + struct esas2r_request *rq; + unsigned long flags; + + /* + * startreqs is used to control starting requests + * that are on the deferred queue + * = 0 - do not start any requests + * = 1 - can start discovery requests + * = 2 - can start any request + */ + + if (test_bit(AF_CHPRST_PENDING, &a->flags) || + test_bit(AF_FLASHING, &a->flags)) + startreqs = 0; + else if (test_bit(AF_DISC_PENDING, &a->flags)) + startreqs = 1; + + atomic_inc(&a->disable_cnt); + + /* Clear off the completed list to be processed later. */ + + if (esas2r_is_tasklet_pending(a)) { + esas2r_schedule_tasklet(a); + + startreqs = 0; + } + + /* + * If we can start requests then traverse the defer queue + * looking for requests to start or complete + */ + if (startreqs && !list_empty(&a->defer_list)) { + LIST_HEAD(comp_list); + struct list_head *element, *next; + + spin_lock_irqsave(&a->queue_lock, flags); + + list_for_each_safe(element, next, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, + req_list); + + if (rq->req_stat != RS_PENDING) { + list_del(element); + list_add_tail(&rq->comp_list, &comp_list); + } + /* + * Process discovery and OS requests separately. We + * can't hold up discovery requests when discovery is + * pending. In general, there may be different sets of + * conditions for starting different types of requests. + */ + else if (rq->req_type == RT_DISC_REQ) { + list_del(element); + esas2r_disc_local_start_request(a, rq); + } else if (startreqs == 2) { + list_del(element); + esas2r_local_start_request(a, rq); + + /* + * Flashing could have been set by last local + * start + */ + if (test_bit(AF_FLASHING, &a->flags)) + break; + } + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_comp_list_drain(a, &comp_list); + } + + atomic_dec(&a->disable_cnt); +} + +/* + * Process an adapter reset (or one that is about to happen) + * by making sure all outstanding requests are completed that + * haven't been already. + */ +void esas2r_process_adapter_reset(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + unsigned long flags; + struct esas2r_disc_context *dc; + + LIST_HEAD(comp_list); + struct list_head *element; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* abort the active discovery, if any. */ + + if (rq->interrupt_cx) { + dc = (struct esas2r_disc_context *)rq->interrupt_cx; + + dc->disc_evt = 0; + + clear_bit(AF_DISC_IN_PROG, &a->flags); + } + + /* + * just clear the interrupt callback for now. it will be dequeued if + * and when we find it on the active queue and we don't want the + * callback called. also set the dummy completion callback in case we + * were doing an I/O request. + */ + + rq->interrupt_cx = NULL; + rq->interrupt_cb = NULL; + + rq->comp_cb = esas2r_dummy_complete; + + /* Reset the read and write pointers */ + + *a->outbound_copy = + a->last_write = + a->last_read = a->list_size - 1; + + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); + + /* Kill all the requests on the active list */ + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->req_stat == RS_STARTED) + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, &comp_list); + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_comp_list_drain(a, &comp_list); + esas2r_process_bus_reset(a); + esas2r_trace_exit(); +} + +static void esas2r_process_bus_reset(struct esas2r_adapter *a) +{ + struct esas2r_request *rq; + struct list_head *element; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + + esas2r_hdebug("reset detected"); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* kill all the requests on the deferred queue */ + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, &comp_list); + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + esas2r_comp_list_drain(a, &comp_list); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + clear_bit(AF_OS_RESET, &a->flags); + + esas2r_trace_exit(); +} + +static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) +{ + + clear_bit(AF_CHPRST_NEEDED, &a->flags); + clear_bit(AF_BUSRST_NEEDED, &a->flags); + clear_bit(AF_BUSRST_DETECTED, &a->flags); + clear_bit(AF_BUSRST_PENDING, &a->flags); + /* + * Make sure we don't get attempt more than 3 resets + * when the uptime between resets does not exceed one + * minute. This will stop any situation where there is + * really something wrong with the hardware. The way + * this works is that we start with uptime ticks at 0. + * Each time we do a reset, we add 20 seconds worth to + * the count. Each time a timer tick occurs, as long + * as a chip reset is not pending, we decrement the + * tick count. If the uptime ticks ever gets to 60 + * seconds worth, we disable the adapter from that + * point forward. Three strikes, you're out. + */ + if (!esas2r_is_adapter_present(a) || (a->chip_uptime >= + ESAS2R_CHP_UPTIME_MAX)) { + esas2r_hdebug("*** adapter disabled ***"); + + /* + * Ok, some kind of hard failure. Make sure we + * exit this loop with chip interrupts + * permanently disabled so we don't lock up the + * entire system. Also flag degraded mode to + * prevent the heartbeat from trying to recover. + */ + + set_bit(AF_DEGRADED_MODE, &a->flags); + set_bit(AF_DISABLED, &a->flags); + clear_bit(AF_CHPRST_PENDING, &a->flags); + clear_bit(AF_DISC_PENDING, &a->flags); + + esas2r_disable_chip_interrupts(a); + a->int_mask = 0; + esas2r_process_adapter_reset(a); + + esas2r_log(ESAS2R_LOG_CRIT, + "Adapter disabled because of hardware failure"); + } else { + bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags); + + if (!alrdyrst) + /* + * Only disable interrupts if this is + * the first reset attempt. + */ + esas2r_disable_chip_interrupts(a); + + if ((test_bit(AF_POWER_MGT, &a->flags)) && + !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) { + /* + * Don't reset the chip on the first + * deferred power up attempt. + */ + } else { + esas2r_hdebug("*** resetting chip ***"); + esas2r_reset_chip(a); + } + + /* Kick off the reinitialization */ + a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; + a->chip_init_time = jiffies_to_msecs(jiffies); + if (!test_bit(AF_POWER_MGT, &a->flags)) { + esas2r_process_adapter_reset(a); + + if (!alrdyrst) { + /* Remove devices now that I/O is cleaned up. */ + a->prev_dev_cnt = + esas2r_targ_db_get_tgt_cnt(a); + esas2r_targ_db_remove_all(a, false); + } + } + + a->int_mask = 0; + } +} + +static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) +{ + while (test_bit(AF_CHPRST_DETECTED, &a->flags)) { + /* + * Balance the enable in esas2r_initadapter_hw. + * Esas2r_power_down already took care of it for power + * management. + */ + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_POWER_MGT, &a->flags)) + esas2r_disable_chip_interrupts(a); + + /* Reinitialize the chip. */ + esas2r_check_adapter(a); + esas2r_init_adapter_hw(a, 0); + + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) + break; + + if (test_bit(AF_POWER_MGT, &a->flags)) { + /* Recovery from power management. */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + /* Chip reset during normal power up */ + esas2r_log(ESAS2R_LOG_CRIT, + "The firmware was reset during a normal power-up sequence"); + } else { + /* Deferred power up complete. */ + clear_bit(AF_POWER_MGT, &a->flags); + esas2r_send_reset_ae(a, true); + } + } else { + /* Recovery from online chip reset. */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + /* Chip reset during driver load */ + } else { + /* Chip reset after driver load */ + esas2r_send_reset_ae(a, false); + } + + esas2r_log(ESAS2R_LOG_CRIT, + "Recovering from a chip reset while the chip was online"); + } + + clear_bit(AF_CHPRST_STARTED, &a->flags); + esas2r_enable_chip_interrupts(a); + + /* + * Clear this flag last! this indicates that the chip has been + * reset already during initialization. + */ + clear_bit(AF_CHPRST_DETECTED, &a->flags); + } +} + + +/* Perform deferred tasks when chip interrupts are disabled */ +void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) +{ + + if (test_bit(AF_CHPRST_NEEDED, &a->flags) || + test_bit(AF_CHPRST_DETECTED, &a->flags)) { + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) + esas2r_chip_rst_needed_during_tasklet(a); + + esas2r_handle_chip_rst_during_tasklet(a); + } + + if (test_bit(AF_BUSRST_NEEDED, &a->flags)) { + esas2r_hdebug("hard resetting bus"); + + clear_bit(AF_BUSRST_NEEDED, &a->flags); + + if (test_bit(AF_FLASHING, &a->flags)) + set_bit(AF_BUSRST_DETECTED, &a->flags); + else + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_RESET_BUS); + } + + if (test_bit(AF_BUSRST_DETECTED, &a->flags)) { + esas2r_process_bus_reset(a); + + esas2r_log_dev(ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "scsi_report_bus_reset() called"); + + scsi_report_bus_reset(a->host, 0); + + clear_bit(AF_BUSRST_DETECTED, &a->flags); + clear_bit(AF_BUSRST_PENDING, &a->flags); + + esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); + } + + if (test_bit(AF_PORT_CHANGE, &a->flags)) { + clear_bit(AF_PORT_CHANGE, &a->flags); + + esas2r_targ_db_report_changes(a); + } + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell) +{ + if (!(doorbell & DRBL_FORCE_INT)) { + esas2r_trace_enter(); + esas2r_trace("doorbell: %x", doorbell); + } + + /* First clear the doorbell bits */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); + + if (doorbell & DRBL_RESET_BUS) + set_bit(AF_BUSRST_DETECTED, &a->flags); + + if (doorbell & DRBL_FORCE_INT) + clear_bit(AF_HEARTBEAT, &a->flags); + + if (doorbell & DRBL_PANIC_REASON_MASK) { + esas2r_hdebug("*** Firmware Panic ***"); + esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked"); + } + + if (doorbell & DRBL_FW_RESET) { + set_bit(AF2_COREDUMP_AVAIL, &a->flags2); + esas2r_local_reset_adapter(a); + } + + if (!(doorbell & DRBL_FORCE_INT)) { + esas2r_trace_exit(); + } +} + +void esas2r_force_interrupt(struct esas2r_adapter *a) +{ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT | + DRBL_DRV_VER); +} + + +static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae, + u16 target, u32 length) +{ + struct esas2r_target *t = a->targetdb + target; + u32 cplen = length; + unsigned long flags; + + if (cplen > sizeof(t->lu_event)) + cplen = sizeof(t->lu_event); + + esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent); + esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate); + + spin_lock_irqsave(&a->mem_lock, flags); + + t->new_target_state = TS_INVALID; + + if (ae->lu.dwevent & VDAAE_LU_LOST) { + t->new_target_state = TS_NOT_PRESENT; + } else { + switch (ae->lu.bystate) { + case VDAAE_LU_NOT_PRESENT: + case VDAAE_LU_OFFLINE: + case VDAAE_LU_DELETED: + case VDAAE_LU_FACTORY_DISABLED: + t->new_target_state = TS_NOT_PRESENT; + break; + + case VDAAE_LU_ONLINE: + case VDAAE_LU_DEGRADED: + t->new_target_state = TS_PRESENT; + break; + } + } + + if (t->new_target_state != TS_INVALID) { + memcpy(&t->lu_event, &ae->lu, cplen); + + esas2r_disc_queue_event(a, DCDE_DEV_CHANGE); + } + + spin_unlock_irqrestore(&a->mem_lock, flags); +} + + + +void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + union atto_vda_ae *ae = + (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data; + u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length); + union atto_vda_ae *last = + (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data + + length); + + esas2r_trace_enter(); + esas2r_trace("length: %d", length); + + if (length > sizeof(struct atto_vda_ae_data) + || (length & 3) != 0 + || length == 0) { + esas2r_log(ESAS2R_LOG_WARN, + "The AE request response length (%p) is too long: %d", + rq, length); + + esas2r_hdebug("aereq->length (0x%x) too long", length); + esas2r_bugon(); + + last = ae; + } + + while (ae < last) { + u16 target; + + esas2r_trace("ae: %p", ae); + esas2r_trace("ae->hdr: %p", &(ae->hdr)); + + length = ae->hdr.bylength; + + if (length > (u32)((u8 *)last - (u8 *)ae) + || (length & 3) != 0 + || length == 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "the async event length is invalid (%p): %d", + ae, length); + + esas2r_hdebug("ae->hdr.length (0x%x) invalid", length); + esas2r_bugon(); + + break; + } + + esas2r_nuxi_ae_data(ae); + + esas2r_queue_fw_event(a, fw_event_vda_ae, ae, + sizeof(union atto_vda_ae)); + + switch (ae->hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + + if (ae->raid.dwflags & (VDAAE_GROUP_STATE + | VDAAE_RBLD_STATE + | VDAAE_MEMBER_CHG + | VDAAE_PART_CHG)) { + esas2r_log(ESAS2R_LOG_INFO, + "RAID event received - name:%s rebuild_state:%d group_state:%d", + ae->raid.acname, + ae->raid.byrebuild_state, + ae->raid.bygroup_state); + } + + break; + + case VDAAE_HDR_TYPE_LU: + esas2r_log(ESAS2R_LOG_INFO, + "LUN event received: event:%d target_id:%d LUN:%d state:%d", + ae->lu.dwevent, + ae->lu.id.tgtlun.wtarget_id, + ae->lu.id.tgtlun.bylun, + ae->lu.bystate); + + target = ae->lu.id.tgtlun.wtarget_id; + + if (target < ESAS2R_MAX_TARGETS) + esas2r_lun_event(a, ae, target, length); + + break; + + case VDAAE_HDR_TYPE_DISK: + esas2r_log(ESAS2R_LOG_INFO, "Disk event received"); + break; + + default: + + /* Silently ignore the rest and let the apps deal with + * them. + */ + + break; + } + + ae = (union atto_vda_ae *)((u8 *)ae + length); + } + + /* Now requeue it. */ + esas2r_start_ae_request(a, rq); + esas2r_trace_exit(); +} + +/* Send an asynchronous event for a chip reset or power management. */ +void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt) +{ + struct atto_vda_ae_hdr ae; + + if (pwr_mgt) + ae.bytype = VDAAE_HDR_TYPE_PWRMGT; + else + ae.bytype = VDAAE_HDR_TYPE_RESET; + + ae.byversion = VDAAE_HDR_VER_0; + ae.byflags = 0; + ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr); + + if (pwr_mgt) { + esas2r_hdebug("*** sending power management AE ***"); + } else { + esas2r_hdebug("*** sending reset AE ***"); + } + + esas2r_queue_fw_event(a, fw_event_vda_ae, &ae, + sizeof(union atto_vda_ae)); +} + +void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq) +{} + +static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u8 snslen, snslen2; + + snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len; + + if (snslen > rq->sense_len) + snslen = rq->sense_len; + + if (snslen) { + if (rq->sense_buf) + memcpy(rq->sense_buf, rq->data_buf, snslen); + else + rq->sense_buf = (u8 *)rq->data_buf; + + /* See about possible sense data */ + if (snslen2 > 0x0c) { + u8 *s = (u8 *)rq->data_buf; + + esas2r_trace_enter(); + + /* Report LUNS data has changed */ + if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) { + esas2r_trace("rq->target_id: %d", + rq->target_id); + esas2r_target_state_changed(a, rq->target_id, + TS_LUN_CHANGE); + } + + esas2r_trace("add_sense_key=%x", s[0x0c]); + esas2r_trace("add_sense_qual=%x", s[0x0d]); + esas2r_trace_exit(); + } + } + + rq->sense_len = snslen; +} + + +void esas2r_complete_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + if (rq->vrq->scsi.function == VDA_FUNC_FLASH + && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) + clear_bit(AF_FLASHING, &a->flags); + + /* See if we setup a callback to do special processing */ + + if (rq->interrupt_cb) { + (*rq->interrupt_cb)(a, rq); + + if (rq->req_stat == RS_PENDING) { + esas2r_start_request(a, rq); + return; + } + } + + if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI) + && unlikely(rq->req_stat != RS_SUCCESS)) { + esas2r_check_req_rsp_sense(a, rq); + esas2r_log_request_failure(a, rq); + } + + (*rq->comp_cb)(a, rq); +} diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c new file mode 100644 index 000000000..a8df916cd --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_io.c @@ -0,0 +1,877 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_io.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + struct esas2r_target *t = NULL; + struct esas2r_request *startrq = rq; + unsigned long flags; + + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) || + test_bit(AF_POWER_DOWN, &a->flags))) { + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) + rq->req_stat = RS_SEL2; + else + rq->req_stat = RS_DEGRADED; + } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { + t = a->targetdb + rq->target_id; + + if (unlikely(t >= a->targetdb_end + || !(t->flags & TF_USED))) { + rq->req_stat = RS_SEL; + } else { + /* copy in the target ID. */ + rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); + + /* + * Test if we want to report RS_SEL for missing target. + * Note that if AF_DISC_PENDING is set than this will + * go on the defer queue. + */ + if (unlikely(t->target_state != TS_PRESENT && + !test_bit(AF_DISC_PENDING, &a->flags))) + rq->req_stat = RS_SEL; + } + } + + if (unlikely(rq->req_stat != RS_PENDING)) { + esas2r_complete_request(a, rq); + return; + } + + esas2r_trace("rq=%p", rq); + esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle); + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + esas2r_trace("rq->target_id=%d", rq->target_id); + esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags); + } + + spin_lock_irqsave(&a->queue_lock, flags); + + if (likely(list_empty(&a->defer_list) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_FLASHING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags))) + esas2r_local_start_request(a, startrq); + else + list_add_tail(&startrq->req_list, &a->defer_list); + + spin_unlock_irqrestore(&a->queue_lock, flags); +} + +/* + * Starts the specified request. all requests have RS_PENDING set when this + * routine is called. The caller is usually esas2r_start_request, but + * esas2r_do_deferred_processes will start request that are deferred. + * + * The caller must ensure that requests can be started. + * + * esas2r_start_request will defer a request if there are already requests + * waiting or there is a chip reset pending. once the reset condition clears, + * esas2r_do_deferred_processes will call this function to start the request. + * + * When a request is started, it is placed on the active list and queued to + * the controller. + */ +void esas2r_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_trace_enter(); + esas2r_trace("rq=%p", rq); + esas2r_trace("rq->vrq:%p", rq->vrq); + esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr); + + if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH + && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) + set_bit(AF_FLASHING, &a->flags); + + list_add_tail(&rq->req_list, &a->active_list); + esas2r_start_vda_request(a, rq); + esas2r_trace_exit(); + return; +} + +void esas2r_start_vda_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_inbound_list_source_entry *element; + u32 dw; + + rq->req_stat = RS_STARTED; + /* + * Calculate the inbound list entry location and the current state of + * toggle bit. + */ + a->last_write++; + if (a->last_write >= a->list_size) { + a->last_write = 0; + /* update the toggle bit */ + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) + clear_bit(AF_COMM_LIST_TOGGLE, &a->flags); + else + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); + } + + element = + (struct esas2r_inbound_list_source_entry *)a->inbound_list_md. + virt_addr + + a->last_write; + + /* Set the VDA request size if it was never modified */ + if (rq->vda_req_sz == RQ_SIZE_DEFAULT) + rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32)); + + element->address = cpu_to_le64(rq->vrq_md->phys_addr); + element->length = cpu_to_le32(rq->vda_req_sz); + + /* Update the write pointer */ + dw = a->last_write; + + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) + dw |= MU_ILW_TOGGLE; + + esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); + esas2r_trace("dw:%x", dw); + esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz); + esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw); +} + +/* + * Build the scatter/gather list for an I/O request according to the + * specifications placed in the s/g context. The caller must initialize + * context prior to the initial call by calling esas2r_sgc_init(). + */ +bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + union atto_vda_req *vrq = rq->vrq; + + while (sgc->length) { + u32 rem = 0; + u64 addr; + u32 len; + + len = (*sgc->get_phys_addr)(sgc, &addr); + + if (unlikely(len == 0)) + return false; + + /* if current length is more than what's left, stop there */ + if (unlikely(len > sgc->length)) + len = sgc->length; + +another_entry: + /* limit to a round number less than the maximum length */ + if (len > SGE_LEN_MAX) { + /* + * Save the remainder of the split. Whenever we limit + * an entry we come back around to build entries out + * of the leftover. We do this to prevent multiple + * calls to the get_phys_addr() function for an SGE + * that is too large. + */ + rem = len - SGE_LEN_MAX; + len = SGE_LEN_MAX; + } + + /* See if we need to allocate a new SGL */ + if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { + u8 sgelen; + struct esas2r_mem_desc *sgl; + + /* + * If no SGls are available, return failure. The + * caller can call us later with the current context + * to pick up here. + */ + sgl = esas2r_alloc_sgl(a); + + if (unlikely(sgl == NULL)) + return false; + + /* Calculate the length of the last SGE filled in */ + sgelen = (u8)((u8 *)sgc->sge.a64.curr + - (u8 *)sgc->sge.a64.last); + + /* + * Copy the last SGE filled in to the first entry of + * the new SGL to make room for the chain entry. + */ + memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); + + /* Figure out the new curr pointer in the new segment */ + sgc->sge.a64.curr = + (struct atto_vda_sge *)((u8 *)sgl->virt_addr + + sgelen); + + /* Set the limit pointer and build the chain entry */ + sgc->sge.a64.limit = + (struct atto_vda_sge *)((u8 *)sgl->virt_addr + + sgl_page_size + - sizeof(struct + atto_vda_sge)); + sgc->sge.a64.last->length = cpu_to_le32( + SGE_CHAIN | SGE_ADDR_64); + sgc->sge.a64.last->address = + cpu_to_le64(sgl->phys_addr); + + /* + * Now, if there was a previous chain entry, then + * update it to contain the length of this segment + * and size of this chain. otherwise this is the + * first SGL, so set the chain_offset in the request. + */ + if (sgc->sge.a64.chain) { + sgc->sge.a64.chain->length |= + cpu_to_le32( + ((u8 *)(sgc->sge.a64. + last + 1) + - (u8 *)rq->sg_table-> + virt_addr) + + sizeof(struct atto_vda_sge) * + LOBIT(SGE_CHAIN_SZ)); + } else { + vrq->scsi.chain_offset = (u8) + ((u8 *)sgc-> + sge.a64.last - + (u8 *)vrq); + + /* + * This is the first SGL, so set the + * chain_offset and the VDA request size in + * the request. + */ + rq->vda_req_sz = + (vrq->scsi.chain_offset + + sizeof(struct atto_vda_sge) + + 3) + / sizeof(u32); + } + + /* + * Remember this so when we get a new SGL filled in we + * can update the length of this chain entry. + */ + sgc->sge.a64.chain = sgc->sge.a64.last; + + /* Now link the new SGL onto the primary request. */ + list_add(&sgl->next_desc, &rq->sg_table_head); + } + + /* Update last one filled in */ + sgc->sge.a64.last = sgc->sge.a64.curr; + + /* Build the new SGE and update the S/G context */ + sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len); + sgc->sge.a64.curr->address = cpu_to_le32(addr); + sgc->sge.a64.curr++; + sgc->cur_offset += len; + sgc->length -= len; + + /* + * Check if we previously split an entry. If so we have to + * pick up where we left off. + */ + if (rem) { + addr += len; + len = rem; + rem = 0; + goto another_entry; + } + } + + /* Mark the end of the SGL */ + sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST); + + /* + * If there was a previous chain entry, update the length to indicate + * the length of this last segment. + */ + if (sgc->sge.a64.chain) { + sgc->sge.a64.chain->length |= cpu_to_le32( + ((u8 *)(sgc->sge.a64.curr) - + (u8 *)rq->sg_table->virt_addr)); + } else { + u16 reqsize; + + /* + * The entire VDA request was not used so lets + * set the size of the VDA request to be DMA'd + */ + reqsize = + ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq) + + sizeof(struct atto_vda_sge) + 3) / sizeof(u32); + + /* + * Only update the request size if it is bigger than what is + * already there. We can come in here twice for some management + * commands. + */ + if (reqsize > rq->vda_req_sz) + rq->vda_req_sz = reqsize; + } + return true; +} + + +/* + * Create PRD list for each I-block consumed by the command. This routine + * determines how much data is required from each I-block being consumed + * by the command. The first and last I-blocks can be partials and all of + * the I-blocks in between are for a full I-block of data. + * + * The interleave size is used to determine the number of bytes in the 1st + * I-block and the remaining I-blocks are what remeains. + */ +static bool esas2r_build_prd_iblk(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + u64 addr; + u32 len; + struct esas2r_mem_desc *sgl; + u32 numchain = 1; + u32 rem = 0; + + while (sgc->length) { + /* Get the next address/length pair */ + + len = (*sgc->get_phys_addr)(sgc, &addr); + + if (unlikely(len == 0)) + return false; + + /* If current length is more than what's left, stop there */ + + if (unlikely(len > sgc->length)) + len = sgc->length; + +another_entry: + /* Limit to a round number less than the maximum length */ + + if (len > PRD_LEN_MAX) { + /* + * Save the remainder of the split. whenever we limit + * an entry we come back around to build entries out + * of the leftover. We do this to prevent multiple + * calls to the get_phys_addr() function for an SGE + * that is too large. + */ + rem = len - PRD_LEN_MAX; + len = PRD_LEN_MAX; + } + + /* See if we need to allocate a new SGL */ + if (sgc->sge.prd.sge_cnt == 0) { + if (len == sgc->length) { + /* + * We only have 1 PRD entry left. + * It can be placed where the chain + * entry would have gone + */ + + /* Build the simple SGE */ + sgc->sge.prd.curr->ctl_len = cpu_to_le32( + PRD_DATA | len); + sgc->sge.prd.curr->address = cpu_to_le64(addr); + + /* Adjust length related fields */ + sgc->cur_offset += len; + sgc->length -= len; + + /* We use the reserved chain entry for data */ + numchain = 0; + + break; + } + + if (sgc->sge.prd.chain) { + /* + * Fill # of entries of current SGL in previous + * chain the length of this current SGL may not + * full. + */ + + sgc->sge.prd.chain->ctl_len |= cpu_to_le32( + sgc->sge.prd.sgl_max_cnt); + } + + /* + * If no SGls are available, return failure. The + * caller can call us later with the current context + * to pick up here. + */ + + sgl = esas2r_alloc_sgl(a); + + if (unlikely(sgl == NULL)) + return false; + + /* + * Link the new SGL onto the chain + * They are in reverse order + */ + list_add(&sgl->next_desc, &rq->sg_table_head); + + /* + * An SGL was just filled in and we are starting + * a new SGL. Prime the chain of the ending SGL with + * info that points to the new SGL. The length gets + * filled in when the new SGL is filled or ended + */ + + sgc->sge.prd.chain = sgc->sge.prd.curr; + + sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN); + sgc->sge.prd.chain->address = + cpu_to_le64(sgl->phys_addr); + + /* + * Start a new segment. + * Take one away and save for chain SGE + */ + + sgc->sge.prd.curr = + (struct atto_physical_region_description *)sgl + -> + virt_addr; + sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1; + } + + sgc->sge.prd.sge_cnt--; + /* Build the simple SGE */ + sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len); + sgc->sge.prd.curr->address = cpu_to_le64(addr); + + /* Used another element. Point to the next one */ + + sgc->sge.prd.curr++; + + /* Adjust length related fields */ + + sgc->cur_offset += len; + sgc->length -= len; + + /* + * Check if we previously split an entry. If so we have to + * pick up where we left off. + */ + + if (rem) { + addr += len; + len = rem; + rem = 0; + goto another_entry; + } + } + + if (!list_empty(&rq->sg_table_head)) { + if (sgc->sge.prd.chain) { + sgc->sge.prd.chain->ctl_len |= + cpu_to_le32(sgc->sge.prd.sgl_max_cnt + - sgc->sge.prd.sge_cnt + - numchain); + } + } + + return true; +} + +bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + u32 len = sgc->length; + struct esas2r_target *t = a->targetdb + rq->target_id; + u8 is_i_o = 0; + u16 reqsize; + struct atto_physical_region_description *curr_iblk_chn; + u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0]; + + /* + * extract LBA from command so we can determine + * the I-Block boundary + */ + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && t->target_state == TS_PRESENT + && !(t->flags & TF_PASS_THRU)) { + u32 lbalo = 0; + + switch (rq->vrq->scsi.cdb[0]) { + case READ_16: + case WRITE_16: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[9], + cdb[8]), + MAKEWORD(cdb[7], + cdb[6])); + is_i_o = 1; + break; + } + + case READ_12: + case WRITE_12: + case READ_10: + case WRITE_10: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[5], + cdb[4]), + MAKEWORD(cdb[3], + cdb[2])); + is_i_o = 1; + break; + } + + case READ_6: + case WRITE_6: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[3], + cdb[2]), + MAKEWORD(cdb[1] & 0x1F, + 0)); + is_i_o = 1; + break; + } + + default: + break; + } + + if (is_i_o) { + u32 startlba; + + rq->vrq->scsi.iblk_cnt_prd = 0; + + /* Determine size of 1st I-block PRD list */ + startlba = t->inter_block - (lbalo & (t->inter_block - + 1)); + sgc->length = startlba * t->block_size; + + /* Chk if the 1st iblk chain starts at base of Iblock */ + if ((lbalo & (t->inter_block - 1)) == 0) + rq->flags |= RF_1ST_IBLK_BASE; + + if (sgc->length > len) + sgc->length = len; + } else { + sgc->length = len; + } + } else { + sgc->length = len; + } + + /* get our starting chain address */ + + curr_iblk_chn = + (struct atto_physical_region_description *)sgc->sge.a64.curr; + + sgc->sge.prd.sgl_max_cnt = sgl_page_size / + sizeof(struct + atto_physical_region_description); + + /* create all of the I-block PRD lists */ + + while (len) { + sgc->sge.prd.sge_cnt = 0; + sgc->sge.prd.chain = NULL; + sgc->sge.prd.curr = curr_iblk_chn; + + /* increment to next I-Block */ + + len -= sgc->length; + + /* go build the next I-Block PRD list */ + + if (unlikely(!esas2r_build_prd_iblk(a, sgc))) + return false; + + curr_iblk_chn++; + + if (is_i_o) { + rq->vrq->scsi.iblk_cnt_prd++; + + if (len > t->inter_byte) + sgc->length = t->inter_byte; + else + sgc->length = len; + } + } + + /* figure out the size used of the VDA request */ + + reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq)) + / sizeof(u32); + + /* + * only update the request size if it is bigger than what is + * already there. we can come in here twice for some management + * commands. + */ + + if (reqsize > rq->vda_req_sz) + rq->vda_req_sz = reqsize; + + return true; +} + +static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime) +{ + u32 delta = currtime - a->chip_init_time; + + if (delta <= ESAS2R_CHPRST_WAIT_TIME) { + /* Wait before accessing registers */ + } else if (delta >= ESAS2R_CHPRST_TIME) { + /* + * The last reset failed so try again. Reset + * processing will give up after three tries. + */ + esas2r_local_reset_adapter(a); + } else { + /* We can now see if the firmware is ready */ + u32 doorbell; + + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) { + esas2r_force_interrupt(a); + } else { + u32 ver = (doorbell & DRBL_FW_VER_MSK); + + /* Driver supports API version 0 and 1 */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + if (ver == DRBL_FW_VER_0) { + set_bit(AF_CHPRST_DETECTED, &a->flags); + set_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + } else if (ver == DRBL_FW_VER_1) { + set_bit(AF_CHPRST_DETECTED, &a->flags); + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 1024; + a->build_sgl = esas2r_build_sg_list_prd; + } else { + esas2r_local_reset_adapter(a); + } + } + } +} + + +/* This function must be called once per timer tick */ +void esas2r_timer_tick(struct esas2r_adapter *a) +{ + u32 currtime = jiffies_to_msecs(jiffies); + u32 deltatime = currtime - a->last_tick_time; + + a->last_tick_time = currtime; + + /* count down the uptime */ + if (a->chip_uptime && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags)) { + if (deltatime >= a->chip_uptime) + a->chip_uptime = 0; + else + a->chip_uptime -= deltatime; + } + + if (test_bit(AF_CHPRST_PENDING, &a->flags)) { + if (!test_bit(AF_CHPRST_NEEDED, &a->flags) && + !test_bit(AF_CHPRST_DETECTED, &a->flags)) + esas2r_handle_pending_reset(a, currtime); + } else { + if (test_bit(AF_DISC_PENDING, &a->flags)) + esas2r_disc_check_complete(a); + if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) { + if (test_bit(AF_HEARTBEAT, &a->flags)) { + if ((currtime - a->heartbeat_time) >= + ESAS2R_HEARTBEAT_TIME) { + clear_bit(AF_HEARTBEAT, &a->flags); + esas2r_hdebug("heartbeat failed"); + esas2r_log(ESAS2R_LOG_CRIT, + "heartbeat failed"); + esas2r_bugon(); + esas2r_local_reset_adapter(a); + } + } else { + set_bit(AF_HEARTBEAT, &a->flags); + a->heartbeat_time = currtime; + esas2r_force_interrupt(a); + } + } + } + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +/* + * Send the specified task management function to the target and LUN + * specified in rqaux. in addition, immediately abort any commands that + * are queued but not sent to the device according to the rules specified + * by the task management function. + */ +bool esas2r_send_task_mgmt(struct esas2r_adapter *a, + struct esas2r_request *rqaux, u8 task_mgt_func) +{ + u16 targetid = rqaux->target_id; + u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags); + bool ret = false; + struct esas2r_request *rq; + struct list_head *next, *element; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + esas2r_trace("rqaux:%p", rqaux); + esas2r_trace("task_mgt_func:%x", task_mgt_func); + spin_lock_irqsave(&a->queue_lock, flags); + + /* search the defer queue looking for requests for the device */ + list_for_each_safe(element, next, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && rq->target_id == targetid + && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun + || task_mgt_func == 0x20)) { /* target reset */ + /* Found a request affected by the task management */ + if (rq->req_stat == RS_PENDING) { + /* + * The request is pending or waiting. We can + * safelycomplete the request now. + */ + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, + &comp_list); + } + } + } + + /* Send the task management request to the firmware */ + rqaux->sense_len = 0; + rqaux->vrq->scsi.length = 0; + rqaux->target_id = targetid; + rqaux->vrq->scsi.flags |= cpu_to_le32(lun); + memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb)); + rqaux->vrq->scsi.flags |= + cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); + + if (test_bit(AF_FLASHING, &a->flags)) { + /* Assume success. if there are active requests, return busy */ + rqaux->req_stat = RS_SUCCESS; + + list_for_each_safe(element, next, &a->active_list) { + rq = list_entry(element, struct esas2r_request, + req_list); + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && rq->target_id == targetid + && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun + || task_mgt_func == 0x20)) /* target reset */ + rqaux->req_stat = RS_BUSY; + } + + ret = true; + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + if (!test_bit(AF_FLASHING, &a->flags)) + esas2r_start_request(a, rqaux); + + esas2r_comp_list_drain(a, &comp_list); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + esas2r_trace_exit(); + + return ret; +} + +void esas2r_reset_bus(struct esas2r_adapter *a) +{ + esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); + + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags)) { + set_bit(AF_BUSRST_NEEDED, &a->flags); + set_bit(AF_BUSRST_PENDING, &a->flags); + set_bit(AF_OS_RESET, &a->flags); + + esas2r_schedule_tasklet(a); + } +} + +bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, + u8 status) +{ + esas2r_trace_enter(); + esas2r_trace("rq:%p", rq); + list_del_init(&rq->req_list); + if (rq->timeout > RQ_MAX_TIMEOUT) { + /* + * The request timed out, but we could not abort it because a + * chip reset occurred. Return busy status. + */ + rq->req_stat = RS_BUSY; + esas2r_trace_exit(); + return true; + } + + rq->req_stat = status; + esas2r_trace_exit(); + return true; +} diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c new file mode 100644 index 000000000..055d2e87a --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -0,0 +1,2087 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_ioctl.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* + * Buffered ioctl handlers. A buffered ioctl is one which requires that we + * allocate a DMA-able memory area to communicate with the firmware. In + * order to prevent continually allocating and freeing consistent memory, + * we will allocate a global buffer the first time we need it and re-use + * it for subsequent ioctl calls that require it. + */ + +u8 *esas2r_buffered_ioctl; +dma_addr_t esas2r_buffered_ioctl_addr; +u32 esas2r_buffered_ioctl_size; +struct pci_dev *esas2r_buffered_ioctl_pcid; + +static DEFINE_SEMAPHORE(buffered_ioctl_semaphore, 1); +typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, + struct esas2r_request *, + struct esas2r_sg_context *, + void *); +typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, + struct esas2r_request *, void *); + +struct esas2r_buffered_ioctl { + struct esas2r_adapter *a; + void *ioctl; + u32 length; + u32 control_code; + u32 offset; + BUFFERED_IOCTL_CALLBACK + callback; + void *context; + BUFFERED_IOCTL_DONE_CALLBACK + done_callback; + void *done_context; + +}; + +static void complete_fm_api_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->fm_api_command_done = 1; + wake_up_interruptible(&a->fm_api_waiter); +} + +/* Callbacks for building scatter/gather lists for FM API requests */ +static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = sgc->cur_offset - a->save_offset; + + (*addr) = a->firmware.phys + offset; + return a->firmware.orig_len - offset; +} + +static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = sgc->cur_offset - a->save_offset; + + (*addr) = a->firmware.header_buff_phys + offset; + return sizeof(struct esas2r_flash_img) - offset; +} + +/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ +static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_request *rq; + + if (mutex_lock_interruptible(&a->fm_api_mutex)) { + fi->status = FI_STAT_BUSY; + return; + } + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + fi->status = FI_STAT_BUSY; + goto free_sem; + } + + if (fi == &a->firmware.header) { + a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev, + (size_t)sizeof( + struct + esas2r_flash_img), + (dma_addr_t *)&a-> + firmware. + header_buff_phys, + GFP_KERNEL); + + if (a->firmware.header_buff == NULL) { + esas2r_debug("failed to allocate header buffer!"); + fi->status = FI_STAT_BUSY; + goto free_req; + } + + memcpy(a->firmware.header_buff, fi, + sizeof(struct esas2r_flash_img)); + a->save_offset = a->firmware.header_buff; + a->fm_api_sgc.get_phys_addr = + (PGETPHYSADDR)get_physaddr_fm_api_header; + } else { + a->save_offset = (u8 *)fi; + a->fm_api_sgc.get_phys_addr = + (PGETPHYSADDR)get_physaddr_fm_api; + } + + rq->comp_cb = complete_fm_api_req; + a->fm_api_command_done = 0; + a->fm_api_sgc.cur_offset = a->save_offset; + + if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, + &a->fm_api_sgc)) + goto all_done; + + /* Now wait around for it to complete. */ + while (!a->fm_api_command_done) + wait_event_interruptible(a->fm_api_waiter, + a->fm_api_command_done); +all_done: + if (fi == &a->firmware.header) { + memcpy(fi, a->firmware.header_buff, + sizeof(struct esas2r_flash_img)); + + dma_free_coherent(&a->pcid->dev, + (size_t)sizeof(struct esas2r_flash_img), + a->firmware.header_buff, + (dma_addr_t)a->firmware.header_buff_phys); + } +free_req: + esas2r_free_request(a, (struct esas2r_request *)rq); +free_sem: + mutex_unlock(&a->fm_api_mutex); + return; + +} + +static void complete_nvr_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->nvram_command_done = 1; + wake_up_interruptible(&a->nvram_waiter); +} + +/* Callback for building scatter/gather lists for buffered ioctls */ +static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, + u64 *addr) +{ + int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; + + (*addr) = esas2r_buffered_ioctl_addr + offset; + return esas2r_buffered_ioctl_size - offset; +} + +static void complete_buffered_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->buffered_ioctl_done = 1; + wake_up_interruptible(&a->buffered_ioctl_waiter); +} + +static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) +{ + struct esas2r_adapter *a = bi->a; + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + u8 result = IOCTL_SUCCESS; + + if (down_interruptible(&buffered_ioctl_semaphore)) + return IOCTL_OUT_OF_RESOURCES; + + /* allocate a buffer or use the existing buffer. */ + if (esas2r_buffered_ioctl) { + if (esas2r_buffered_ioctl_size < bi->length) { + /* free the too-small buffer and get a new one */ + dma_free_coherent(&a->pcid->dev, + (size_t)esas2r_buffered_ioctl_size, + esas2r_buffered_ioctl, + esas2r_buffered_ioctl_addr); + + goto allocate_buffer; + } + } else { +allocate_buffer: + esas2r_buffered_ioctl_size = bi->length; + esas2r_buffered_ioctl_pcid = a->pcid; + esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev, + (size_t) + esas2r_buffered_ioctl_size, + & + esas2r_buffered_ioctl_addr, + GFP_KERNEL); + } + + if (!esas2r_buffered_ioctl) { + esas2r_log(ESAS2R_LOG_CRIT, + "could not allocate %d bytes of consistent memory " + "for a buffered ioctl!", + bi->length); + + esas2r_debug("buffered ioctl alloc failure"); + result = IOCTL_OUT_OF_RESOURCES; + goto exit_cleanly; + } + + memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "could not allocate an internal request"); + + result = IOCTL_OUT_OF_RESOURCES; + esas2r_debug("buffered ioctl - no requests"); + goto exit_cleanly; + } + + a->buffered_ioctl_done = 0; + rq->comp_cb = complete_buffered_ioctl_req; + sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; + sgc.length = esas2r_buffered_ioctl_size; + + if (!(*bi->callback)(a, rq, &sgc, bi->context)) { + /* completed immediately, no need to wait */ + a->buffered_ioctl_done = 0; + goto free_andexit_cleanly; + } + + /* now wait around for it to complete. */ + while (!a->buffered_ioctl_done) + wait_event_interruptible(a->buffered_ioctl_waiter, + a->buffered_ioctl_done); + +free_andexit_cleanly: + if (result == IOCTL_SUCCESS && bi->done_callback) + (*bi->done_callback)(a, rq, bi->done_context); + + esas2r_free_request(a, rq); + +exit_cleanly: + if (result == IOCTL_SUCCESS) + memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); + + up(&buffered_ioctl_semaphore); + return result; +} + +/* SMP ioctl support */ +static int smp_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, void *context) +{ + struct atto_ioctl_smp *si = + (struct atto_ioctl_smp *)esas2r_buffered_ioctl; + + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + si->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + + esas2r_start_request(a, rq); + return true; +} + +static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = si; + bi.length = sizeof(struct atto_ioctl_smp) + + le32_to_cpu(si->req_length) + + le32_to_cpu(si->rsp_length); + bi.offset = 0; + bi.callback = smp_ioctl_callback; + return handle_buffered_ioctl(&bi); +} + + +/* CSMI ioctl support */ +static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); + rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); + + /* Now call the original completion callback. */ + (*rq->aux_req_cb)(a, rq); +} + +/* Tunnel a CSMI IOCTL to the back end driver for processing. */ +static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, + union atto_ioctl_csmi *ci, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, + u32 ctrl_code, + u16 target_id) +{ + struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return false; + + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI); + ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); + ioctl->csmi.target_id = cpu_to_le16(target_id); + ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); + + /* + * Always usurp the completion callback since the interrupt callback + * mechanism may be used. + */ + rq->aux_req_cx = ci; + rq->aux_req_cb = rq->comp_cb; + rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; + + if (!esas2r_build_sg_list(a, rq, sgc)) + return false; + + esas2r_start_request(a, rq); + return true; +} + +static bool check_lun(struct scsi_lun lun) +{ + bool result; + + result = ((lun.scsi_lun[7] == 0) && + (lun.scsi_lun[6] == 0) && + (lun.scsi_lun[5] == 0) && + (lun.scsi_lun[4] == 0) && + (lun.scsi_lun[3] == 0) && + (lun.scsi_lun[2] == 0) && +/* Byte 1 is intentionally skipped */ + (lun.scsi_lun[0] == 0)); + + return result; +} + +static int csmi_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, void *context) +{ + struct atto_csmi *ci = (struct atto_csmi *)context; + union atto_ioctl_csmi *ioctl_csmi = + (union atto_ioctl_csmi *)esas2r_buffered_ioctl; + u8 path = 0; + u8 tid = 0; + u8 lun = 0; + u32 sts = CSMI_STS_SUCCESS; + struct esas2r_target *t; + unsigned long flags; + + if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { + struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; + + path = gda->path_id; + tid = gda->target_id; + lun = gda->lun; + } else if (ci->control_code == CSMI_CC_TASK_MGT) { + struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; + + path = tm->path_id; + tid = tm->target_id; + lun = tm->lun; + } + + if (path > 0) { + rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( + CSMI_STS_INV_PARAM); + return false; + } + + rq->target_id = tid; + rq->vrq->scsi.flags |= cpu_to_le32(lun); + + switch (ci->control_code) { + case CSMI_CC_GET_DRVR_INFO: + { + struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; + + strcpy(gdi->description, esas2r_get_model_name(a)); + gdi->csmi_major_rev = CSMI_MAJOR_REV; + gdi->csmi_minor_rev = CSMI_MINOR_REV; + break; + } + + case CSMI_CC_GET_CNTLR_CFG: + { + struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; + + gcc->base_io_addr = 0; + pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2, + &gcc->base_memaddr_lo); + pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3, + &gcc->base_memaddr_hi); + gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, + a->pcid->subsystem_vendor); + gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; + gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; + gcc->io_bus_type = CSMI_BUS_TYPE_PCI; + gcc->pci_addr.bus_num = a->pcid->bus->number; + gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); + gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); + + memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); + + gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); + gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); + gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); + gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); + gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); + gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); + gcc->bios_build_rev = LOWORD(a->flash_ver); + + if (test_bit(AF2_THUNDERLINK, &a->flags2)) + gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA + | CSMI_CNTLRF_SATA_HBA; + else + gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID + | CSMI_CNTLRF_SATA_RAID; + + gcc->rrom_major_rev = 0; + gcc->rrom_minor_rev = 0; + gcc->rrom_build_rev = 0; + gcc->rrom_release_rev = 0; + gcc->rrom_biosmajor_rev = 0; + gcc->rrom_biosminor_rev = 0; + gcc->rrom_biosbuild_rev = 0; + gcc->rrom_biosrelease_rev = 0; + break; + } + + case CSMI_CC_GET_CNTLR_STS: + { + struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + gcs->status = CSMI_CNTLR_STS_FAILED; + else + gcs->status = CSMI_CNTLR_STS_GOOD; + + gcs->offline_reason = CSMI_OFFLINE_NO_REASON; + break; + } + + case CSMI_CC_FW_DOWNLOAD: + case CSMI_CC_GET_RAID_INFO: + case CSMI_CC_GET_RAID_CFG: + + sts = CSMI_STS_BAD_CTRL_CODE; + break; + + case CSMI_CC_SMP_PASSTHRU: + case CSMI_CC_SSP_PASSTHRU: + case CSMI_CC_STP_PASSTHRU: + case CSMI_CC_GET_PHY_INFO: + case CSMI_CC_SET_PHY_INFO: + case CSMI_CC_GET_LINK_ERRORS: + case CSMI_CC_GET_SATA_SIG: + case CSMI_CC_GET_CONN_INFO: + case CSMI_CC_PHY_CTRL: + + if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, + ci->control_code, + ESAS2R_TARG_ID_INV)) { + sts = CSMI_STS_FAILED; + break; + } + + return true; + + case CSMI_CC_GET_SCSI_ADDR: + { + struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; + + struct scsi_lun lun; + + memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); + + if (!check_lun(lun)) { + sts = CSMI_STS_NO_SCSI_ADDR; + break; + } + + /* make sure the device is present */ + spin_lock_irqsave(&a->mem_lock, flags); + t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr); + spin_unlock_irqrestore(&a->mem_lock, flags); + + if (t == NULL) { + sts = CSMI_STS_NO_SCSI_ADDR; + break; + } + + gsa->host_index = 0xFF; + gsa->lun = gsa->sas_lun[1]; + rq->target_id = esas2r_targ_get_id(t, a); + break; + } + + case CSMI_CC_GET_DEV_ADDR: + { + struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; + + /* make sure the target is present */ + t = a->targetdb + rq->target_id; + + if (t >= a->targetdb_end + || t->target_state != TS_PRESENT + || t->sas_addr == 0) { + sts = CSMI_STS_NO_DEV_ADDR; + break; + } + + /* fill in the result */ + *(u64 *)gda->sas_addr = t->sas_addr; + memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); + gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); + break; + } + + case CSMI_CC_TASK_MGT: + + /* make sure the target is present */ + t = a->targetdb + rq->target_id; + + if (t >= a->targetdb_end + || t->target_state != TS_PRESENT + || !(t->flags & TF_PASS_THRU)) { + sts = CSMI_STS_NO_DEV_ADDR; + break; + } + + if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, + ci->control_code, + t->phys_targ_id)) { + sts = CSMI_STS_FAILED; + break; + } + + return true; + + default: + + sts = CSMI_STS_BAD_CTRL_CODE; + break; + } + + rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); + + return false; +} + + +static void csmi_ioctl_done_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, void *context) +{ + struct atto_csmi *ci = (struct atto_csmi *)context; + union atto_ioctl_csmi *ioctl_csmi = + (union atto_ioctl_csmi *)esas2r_buffered_ioctl; + + switch (ci->control_code) { + case CSMI_CC_GET_DRVR_INFO: + { + struct atto_csmi_get_driver_info *gdi = + &ioctl_csmi->drvr_info; + + strcpy(gdi->name, ESAS2R_VERSION_STR); + + gdi->major_rev = ESAS2R_MAJOR_REV; + gdi->minor_rev = ESAS2R_MINOR_REV; + gdi->build_rev = 0; + gdi->release_rev = 0; + break; + } + + case CSMI_CC_GET_SCSI_ADDR: + { + struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; + + if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == + CSMI_STS_SUCCESS) { + gsa->target_id = rq->target_id; + gsa->path_id = 0; + } + + break; + } + } + + ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); +} + + +static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = &ci->data; + bi.length = sizeof(union atto_ioctl_csmi); + bi.offset = 0; + bi.callback = csmi_ioctl_callback; + bi.context = ci; + bi.done_callback = csmi_ioctl_done_callback; + bi.done_context = ci; + + return handle_buffered_ioctl(&bi); +} + +/* ATTO HBA ioctl support */ + +/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ +static bool hba_ioctl_tunnel(struct esas2r_adapter *a, + struct atto_ioctl *hi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + hi->status = ATTO_STS_OUT_OF_RSRC; + + return false; + } + + esas2r_start_request(a, rq); + + return true; +} + +static void scsi_passthru_comp_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; + struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; + u8 sts = ATTO_SPT_RS_FAILED; + + spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; + spt->sense_length = rq->sense_len; + spt->residual_length = + le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); + + switch (rq->req_stat) { + case RS_SUCCESS: + case RS_SCSI_ERROR: + sts = ATTO_SPT_RS_SUCCESS; + break; + case RS_UNDERRUN: + sts = ATTO_SPT_RS_UNDERRUN; + break; + case RS_OVERRUN: + sts = ATTO_SPT_RS_OVERRUN; + break; + case RS_SEL: + case RS_SEL2: + sts = ATTO_SPT_RS_NO_DEVICE; + break; + case RS_NO_LUN: + sts = ATTO_SPT_RS_NO_LUN; + break; + case RS_TIMEOUT: + sts = ATTO_SPT_RS_TIMEOUT; + break; + case RS_DEGRADED: + sts = ATTO_SPT_RS_DEGRADED; + break; + case RS_BUSY: + sts = ATTO_SPT_RS_BUSY; + break; + case RS_ABORTED: + sts = ATTO_SPT_RS_ABORTED; + break; + case RS_RESET: + sts = ATTO_SPT_RS_BUS_RESET; + break; + } + + spt->req_status = sts; + + /* Update the target ID to the next one present. */ + spt->target_id = + esas2r_targ_db_find_next_present(a, (u16)spt->target_id); + + /* Done, call the completion callback. */ + (*rq->aux_req_cb)(a, rq); +} + +static int hba_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, + void *context) +{ + struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; + + hi->status = ATTO_STS_SUCCESS; + + switch (hi->function) { + case ATTO_FUNC_GET_ADAP_INFO: + { + u8 *class_code = (u8 *)&a->pcid->class; + + struct atto_hba_get_adapter_info *gai = + &hi->data.get_adap_info; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_GET_ADAP_INFO0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_ADAP_INFO0; + break; + } + + memset(gai, 0, sizeof(*gai)); + + gai->pci.vendor_id = a->pcid->vendor; + gai->pci.device_id = a->pcid->device; + gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; + gai->pci.ss_device_id = a->pcid->subsystem_device; + gai->pci.class_code[0] = class_code[0]; + gai->pci.class_code[1] = class_code[1]; + gai->pci.class_code[2] = class_code[2]; + gai->pci.rev_id = a->pcid->revision; + gai->pci.bus_num = a->pcid->bus->number; + gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); + gai->pci.func_num = PCI_FUNC(a->pcid->devfn); + + if (pci_is_pcie(a->pcid)) { + u16 stat; + u32 caps; + + pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA, + &stat); + pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP, + &caps); + + gai->pci.link_speed_curr = + (u8)(stat & PCI_EXP_LNKSTA_CLS); + gai->pci.link_speed_max = + (u8)(caps & PCI_EXP_LNKCAP_SLS); + gai->pci.link_width_curr = + (u8)((stat & PCI_EXP_LNKSTA_NLW) + >> PCI_EXP_LNKSTA_NLW_SHIFT); + gai->pci.link_width_max = + (u8)((caps & PCI_EXP_LNKCAP_MLW) + >> 4); + } + + gai->pci.msi_vector_cnt = 1; + + if (a->pcid->msix_enabled) + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; + else if (a->pcid->msi_enabled) + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; + else + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; + + gai->adap_type = ATTO_GAI_AT_ESASRAID2; + + if (test_bit(AF2_THUNDERLINK, &a->flags2)) + gai->adap_type = ATTO_GAI_AT_TLSASHBA; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + gai->adap_flags |= ATTO_GAI_AF_DEGRADED; + + gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | + ATTO_GAI_AF_DEVADDR_SUPP; + + if (a->pcid->subsystem_device == ATTO_ESAS_R60F + || a->pcid->subsystem_device == ATTO_ESAS_R608 + || a->pcid->subsystem_device == ATTO_ESAS_R644 + || a->pcid->subsystem_device == ATTO_TSSC_3808E) + gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; + + gai->num_ports = ESAS2R_NUM_PHYS; + gai->num_phys = ESAS2R_NUM_PHYS; + + strcpy(gai->firmware_rev, a->fw_rev); + strcpy(gai->flash_rev, a->flash_rev); + strcpy(gai->model_name_short, esas2r_get_model_name_short(a)); + strcpy(gai->model_name, esas2r_get_model_name(a)); + + gai->num_targets = ESAS2R_MAX_TARGETS; + + gai->num_busses = 1; + gai->num_targsper_bus = gai->num_targets; + gai->num_lunsper_targ = 256; + + if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 + || a->pcid->subsystem_device == ATTO_ESAS_R60F) + gai->num_connectors = 4; + else + gai->num_connectors = 2; + + gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; + + gai->num_targets_backend = a->num_targets_backend; + + gai->tunnel_flags = a->ioctl_tunnel + & (ATTO_GAI_TF_MEM_RW + | ATTO_GAI_TF_TRACE + | ATTO_GAI_TF_SCSI_PASS_THRU + | ATTO_GAI_TF_GET_DEV_ADDR + | ATTO_GAI_TF_PHY_CTRL + | ATTO_GAI_TF_CONN_CTRL + | ATTO_GAI_TF_GET_DEV_INFO); + break; + } + + case ATTO_FUNC_GET_ADAP_ADDR: + { + struct atto_hba_get_adapter_address *gaa = + &hi->data.get_adap_addr; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_ADAP_ADDR0; + } else if (gaa->addr_type == ATTO_GAA_AT_PORT + || gaa->addr_type == ATTO_GAA_AT_NODE) { + if (gaa->addr_type == ATTO_GAA_AT_PORT + && gaa->port_id >= ESAS2R_NUM_PHYS) { + hi->status = ATTO_STS_NOT_APPL; + } else { + memcpy((u64 *)gaa->address, + &a->nvram->sas_addr[0], sizeof(u64)); + gaa->addr_len = sizeof(u64); + } + } else { + hi->status = ATTO_STS_INV_PARAM; + } + + break; + } + + case ATTO_FUNC_MEM_RW: + { + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + hi->status = ATTO_STS_UNSUPPORTED; + + break; + } + + case ATTO_FUNC_TRACE: + { + struct atto_hba_trace *trc = &hi->data.trace; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_TRACE1) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_TRACE1; + break; + } + + if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP + && hi->version >= ATTO_VER_TRACE1) { + if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { + u32 len = hi->data_length; + u32 offset = trc->current_offset; + u32 total_len = ESAS2R_FWCOREDUMP_SZ; + + /* Size is zero if a core dump isn't present */ + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) + total_len = 0; + + if (len > total_len) + len = total_len; + + if (offset >= total_len + || offset + len > total_len + || len == 0) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + memcpy(trc->contents, + a->fw_coredump_buff + offset, + len); + hi->data_length = len; + } else if (trc->trace_func == ATTO_TRC_TF_RESET) { + memset(a->fw_coredump_buff, 0, + ESAS2R_FWCOREDUMP_SZ); + + clear_bit(AF2_COREDUMP_SAVED, &a->flags2); + } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + /* Always return all the info we can. */ + trc->trace_mask = 0; + trc->current_offset = 0; + trc->total_length = ESAS2R_FWCOREDUMP_SZ; + + /* Return zero length buffer if core dump not present */ + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) + trc->total_length = 0; + } else { + hi->status = ATTO_STS_UNSUPPORTED; + } + + break; + } + + case ATTO_FUNC_SCSI_PASS_THRU: + { + struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; + struct scsi_lun lun; + + memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_SCSI_PASS_THRU0; + break; + } + + if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + esas2r_sgc_init(sgc, a, rq, NULL); + + sgc->length = hi->data_length; + sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) + + sizeof(struct atto_hba_scsi_pass_thru); + + /* Finish request initialization */ + rq->target_id = (u16)spt->target_id; + rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); + memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); + rq->vrq->scsi.length = cpu_to_le32(hi->data_length); + rq->sense_len = spt->sense_length; + rq->sense_buf = (u8 *)spt->sense_data; + /* NOTE: we ignore spt->timeout */ + + /* + * always usurp the completion callback since the interrupt + * callback mechanism may be used. + */ + + rq->aux_req_cx = hi; + rq->aux_req_cb = rq->comp_cb; + rq->comp_cb = scsi_passthru_comp_cb; + + if (spt->flags & ATTO_SPTF_DATA_IN) { + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); + } else if (spt->flags & ATTO_SPTF_DATA_OUT) { + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); + } else { + if (sgc->length) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + } + + if (spt->flags & ATTO_SPTF_ORDERED_Q) + rq->vrq->scsi.flags |= + cpu_to_le32(FCP_CMND_TA_ORDRD_Q); + else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); + + + if (!esas2r_build_sg_list(a, rq, sgc)) { + hi->status = ATTO_STS_OUT_OF_RSRC; + break; + } + + esas2r_start_request(a, rq); + + return true; + } + + case ATTO_FUNC_GET_DEV_ADDR: + { + struct atto_hba_get_device_address *gda = + &hi->data.get_dev_addr; + struct esas2r_target *t; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_GET_DEV_ADDR0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_DEV_ADDR0; + break; + } + + if (gda->target_id >= ESAS2R_MAX_TARGETS) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + t = a->targetdb + (u16)gda->target_id; + + if (t->target_state != TS_PRESENT) { + hi->status = ATTO_STS_FAILED; + } else if (gda->addr_type == ATTO_GDA_AT_PORT) { + if (t->sas_addr == 0) { + hi->status = ATTO_STS_UNSUPPORTED; + } else { + *(u64 *)gda->address = t->sas_addr; + + gda->addr_len = sizeof(u64); + } + } else if (gda->addr_type == ATTO_GDA_AT_NODE) { + hi->status = ATTO_STS_NOT_APPL; + } else { + hi->status = ATTO_STS_INV_PARAM; + } + + /* update the target ID to the next one present. */ + + gda->target_id = + esas2r_targ_db_find_next_present(a, + (u16)gda->target_id); + break; + } + + case ATTO_FUNC_PHY_CTRL: + case ATTO_FUNC_CONN_CTRL: + { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + case ATTO_FUNC_ADAP_CTRL: + { + struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_ADAP_CTRL0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_ADAP_CTRL0; + break; + } + + if (ac->adap_func == ATTO_AC_AF_HARD_RST) { + esas2r_reset_adapter(a); + } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) + ac->adap_state = ATTO_AC_AS_RST_SCHED; + else if (test_bit(AF_CHPRST_PENDING, &a->flags)) + ac->adap_state = ATTO_AC_AS_RST_IN_PROG; + else if (test_bit(AF_DISC_PENDING, &a->flags)) + ac->adap_state = ATTO_AC_AS_RST_DISC; + else if (test_bit(AF_DISABLED, &a->flags)) + ac->adap_state = ATTO_AC_AS_DISABLED; + else if (test_bit(AF_DEGRADED_MODE, &a->flags)) + ac->adap_state = ATTO_AC_AS_DEGRADED; + else + ac->adap_state = ATTO_AC_AS_OK; + + break; + } + + case ATTO_FUNC_GET_DEV_INFO: + { + struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; + struct esas2r_target *t; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_GET_DEV_INFO0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_DEV_INFO0; + break; + } + + if (gdi->target_id >= ESAS2R_MAX_TARGETS) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + t = a->targetdb + (u16)gdi->target_id; + + /* update the target ID to the next one present. */ + + gdi->target_id = + esas2r_targ_db_find_next_present(a, + (u16)gdi->target_id); + + if (t->target_state != TS_PRESENT) { + hi->status = ATTO_STS_FAILED; + break; + } + + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + default: + + hi->status = ATTO_STS_INV_FUNC; + break; + } + + return false; +} + +static void hba_ioctl_done_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, void *context) +{ + struct atto_ioctl *ioctl_hba = + (struct atto_ioctl *)esas2r_buffered_ioctl; + + esas2r_debug("hba_ioctl_done_callback %d", a->index); + + if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { + struct atto_hba_get_adapter_info *gai = + &ioctl_hba->data.get_adap_info; + + esas2r_debug("ATTO_FUNC_GET_ADAP_INFO"); + + gai->drvr_rev_major = ESAS2R_MAJOR_REV; + gai->drvr_rev_minor = ESAS2R_MINOR_REV; + + strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR); + strcpy(gai->drvr_name, ESAS2R_DRVR_NAME); + + gai->num_busses = 1; + gai->num_targsper_bus = ESAS2R_MAX_ID + 1; + gai->num_lunsper_targ = 1; + } +} + +u8 handle_hba_ioctl(struct esas2r_adapter *a, + struct atto_ioctl *ioctl_hba) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = ioctl_hba; + bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; + bi.callback = hba_ioctl_callback; + bi.context = NULL; + bi.done_callback = hba_ioctl_done_callback; + bi.done_context = NULL; + bi.offset = 0; + + return handle_buffered_ioctl(&bi); +} + + +int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *data) +{ + int result = 0; + + a->nvram_command_done = 0; + rq->comp_cb = complete_nvr_req; + + if (esas2r_nvram_write(a, rq, data)) { + /* now wait around for it to complete. */ + while (!a->nvram_command_done) + wait_event_interruptible(a->nvram_waiter, + a->nvram_command_done); + ; + + /* done, check the status. */ + if (rq->req_stat == RS_SUCCESS) + result = 1; + } + return result; +} + + +/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ +int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg) +{ + struct atto_express_ioctl *ioctl = NULL; + struct esas2r_adapter *a; + struct esas2r_request *rq; + u16 code; + int err; + + esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg); + + if ((arg == NULL) + || (cmd < EXPRESS_IOCTL_MIN) + || (cmd > EXPRESS_IOCTL_MAX)) + return -ENOTSUPP; + + ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl)); + if (IS_ERR(ioctl)) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler access_ok failed for cmd %u, address %p", + cmd, arg); + return PTR_ERR(ioctl); + } + + /* verify the signature */ + + if (memcmp(ioctl->header.signature, + EXPRESS_IOCTL_SIGNATURE, + EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { + esas2r_log(ESAS2R_LOG_WARN, "invalid signature"); + kfree(ioctl); + + return -ENOTSUPP; + } + + /* assume success */ + + ioctl->header.return_code = IOCTL_SUCCESS; + err = 0; + + /* + * handle EXPRESS_IOCTL_GET_CHANNELS + * without paying attention to channel + */ + + if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { + int i = 0, k = 0; + + ioctl->data.chanlist.num_channels = 0; + + while (i < MAX_ADAPTERS) { + if (esas2r_adapters[i]) { + ioctl->data.chanlist.num_channels++; + ioctl->data.chanlist.channel[k] = i; + k++; + } + i++; + } + + goto ioctl_done; + } + + /* get the channel */ + + if (ioctl->header.channel == 0xFF) { + a = (struct esas2r_adapter *)hostdata; + } else { + if (ioctl->header.channel >= MAX_ADAPTERS || + esas2r_adapters[ioctl->header.channel] == NULL) { + ioctl->header.return_code = IOCTL_BAD_CHANNEL; + esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); + kfree(ioctl); + + return -ENOTSUPP; + } + a = esas2r_adapters[ioctl->header.channel]; + } + + switch (cmd) { + case EXPRESS_IOCTL_RW_FIRMWARE: + + if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { + err = esas2r_write_fw(a, + (char *)ioctl->data.fwrw.image, + 0, + sizeof(struct + atto_express_ioctl)); + + if (err >= 0) { + err = esas2r_read_fw(a, + (char *)ioctl->data.fwrw. + image, + 0, + sizeof(struct + atto_express_ioctl)); + } + } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { + err = esas2r_write_fs(a, + (char *)ioctl->data.fwrw.image, + 0, + sizeof(struct + atto_express_ioctl)); + + if (err >= 0) { + err = esas2r_read_fs(a, + (char *)ioctl->data.fwrw. + image, + 0, + sizeof(struct + atto_express_ioctl)); + } + } else { + ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; + } + + break; + + case EXPRESS_IOCTL_READ_PARAMS: + + memcpy(ioctl->data.prw.data_buffer, a->nvram, + sizeof(struct esas2r_sas_nvram)); + ioctl->data.prw.code = 1; + break; + + case EXPRESS_IOCTL_WRITE_PARAMS: + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + kfree(ioctl); + esas2r_log(ESAS2R_LOG_WARN, + "could not allocate an internal request"); + return -ENOMEM; + } + + code = esas2r_write_params(a, rq, + (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); + ioctl->data.prw.code = code; + + esas2r_free_request(a, rq); + + break; + + case EXPRESS_IOCTL_DEFAULT_PARAMS: + + esas2r_nvram_get_defaults(a, + (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); + ioctl->data.prw.code = 1; + break; + + case EXPRESS_IOCTL_CHAN_INFO: + + ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; + ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; + ioctl->data.chaninfo.IRQ = a->pcid->irq; + ioctl->data.chaninfo.device_id = a->pcid->device; + ioctl->data.chaninfo.vendor_id = a->pcid->vendor; + ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; + ioctl->data.chaninfo.revision_id = a->pcid->revision; + ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; + ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; + ioctl->data.chaninfo.core_rev = 0; + ioctl->data.chaninfo.host_no = a->host->host_no; + ioctl->data.chaninfo.hbaapi_rev = 0; + break; + + case EXPRESS_IOCTL_SMP: + ioctl->header.return_code = handle_smp_ioctl(a, + &ioctl->data. + ioctl_smp); + break; + + case EXPRESS_CSMI: + ioctl->header.return_code = + handle_csmi_ioctl(a, &ioctl->data.csmi); + break; + + case EXPRESS_IOCTL_HBA: + ioctl->header.return_code = handle_hba_ioctl(a, + &ioctl->data. + ioctl_hba); + break; + + case EXPRESS_IOCTL_VDA: + err = esas2r_write_vda(a, + (char *)&ioctl->data.ioctl_vda, + 0, + sizeof(struct atto_ioctl_vda) + + ioctl->data.ioctl_vda.data_length); + + if (err >= 0) { + err = esas2r_read_vda(a, + (char *)&ioctl->data.ioctl_vda, + 0, + sizeof(struct atto_ioctl_vda) + + ioctl->data.ioctl_vda.data_length); + } + + + + + break; + + case EXPRESS_IOCTL_GET_MOD_INFO: + + ioctl->data.modinfo.adapter = a; + ioctl->data.modinfo.pci_dev = a->pcid; + ioctl->data.modinfo.scsi_host = a->host; + ioctl->data.modinfo.host_no = a->host->host_no; + + break; + + default: + esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd); + ioctl->header.return_code = IOCTL_ERR_INVCMD; + } + +ioctl_done: + + if (err < 0) { + esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err, + cmd); + + switch (err) { + case -ENOMEM: + case -EBUSY: + ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; + break; + + case -ENOSYS: + case -EINVAL: + ioctl->header.return_code = IOCTL_INVALID_PARAM; + break; + + default: + ioctl->header.return_code = IOCTL_GENERAL_ERROR; + break; + } + + } + + /* Always copy the buffer back, if only to pick up the status */ + err = copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); + if (err != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)", + err, cmd); + kfree(ioctl); + + return -EFAULT; + } + + kfree(ioctl); + + return 0; +} + +int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg) +{ + return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); +} + +static void free_fw_buffers(struct esas2r_adapter *a) +{ + if (a->firmware.data) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->firmware.orig_len, + a->firmware.data, + (dma_addr_t)a->firmware.phys); + + a->firmware.data = NULL; + } +} + +static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) +{ + free_fw_buffers(a); + + a->firmware.orig_len = length; + + a->firmware.data = dma_alloc_coherent(&a->pcid->dev, + (size_t)length, + (dma_addr_t *)&a->firmware.phys, + GFP_KERNEL); + + if (!a->firmware.data) { + esas2r_debug("buffer alloc failed!"); + return 0; + } + + return 1; +} + +/* Handle a call to read firmware. */ +int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) +{ + esas2r_trace_enter(); + /* if the cached header is a status, simply copy it over and return. */ + if (a->firmware.state == FW_STATUS_ST) { + int size = min_t(int, count, sizeof(a->firmware.header)); + esas2r_trace_exit(); + memcpy(buf, &a->firmware.header, size); + esas2r_debug("esas2r_read_fw: STATUS size %d", size); + return size; + } + + /* + * if the cached header is a command, do it if at + * offset 0, otherwise copy the pieces. + */ + + if (a->firmware.state == FW_COMMAND_ST) { + u32 length = a->firmware.header.length; + esas2r_trace_exit(); + + esas2r_debug("esas2r_read_fw: COMMAND length %d off %d", + length, + off); + + if (off == 0) { + if (a->firmware.header.action == FI_ACT_UP) { + if (!allocate_fw_buffers(a, length)) + return -ENOMEM; + + + /* copy header over */ + + memcpy(a->firmware.data, + &a->firmware.header, + sizeof(a->firmware.header)); + + do_fm_api(a, + (struct esas2r_flash_img *)a->firmware.data); + } else if (a->firmware.header.action == FI_ACT_UPSZ) { + int size = + min((int)count, + (int)sizeof(a->firmware.header)); + do_fm_api(a, &a->firmware.header); + memcpy(buf, &a->firmware.header, size); + esas2r_debug("FI_ACT_UPSZ size %d", size); + return size; + } else { + esas2r_debug("invalid action %d", + a->firmware.header.action); + return -ENOSYS; + } + } + + if (count + off > length) + count = length - off; + + if (count < 0) + return 0; + + if (!a->firmware.data) { + esas2r_debug( + "read: nonzero offset but no buffer available!"); + return -ENOMEM; + } + + esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off, + count, + length); + + memcpy(buf, &a->firmware.data[off], count); + + /* when done, release the buffer */ + + if (length <= off + count) { + esas2r_debug("esas2r_read_fw: freeing buffer!"); + + free_fw_buffers(a); + } + + return count; + } + + esas2r_trace_exit(); + esas2r_debug("esas2r_read_fw: invalid firmware state %d", + a->firmware.state); + + return -EINVAL; +} + +/* Handle a call to write firmware. */ +int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + u32 length; + + if (off == 0) { + struct esas2r_flash_img *header = + (struct esas2r_flash_img *)buf; + + /* assume version 0 flash image */ + + int min_size = sizeof(struct esas2r_flash_img_v0); + + a->firmware.state = FW_INVALID_ST; + + /* validate the version field first */ + + if (count < 4 + || header->fi_version > FI_VERSION_1) { + esas2r_debug( + "esas2r_write_fw: short header or invalid version"); + return -EINVAL; + } + + /* See if its a version 1 flash image */ + + if (header->fi_version == FI_VERSION_1) + min_size = sizeof(struct esas2r_flash_img); + + /* If this is the start, the header must be full and valid. */ + if (count < min_size) { + esas2r_debug("esas2r_write_fw: short header, aborting"); + return -EINVAL; + } + + /* Make sure the size is reasonable. */ + length = header->length; + + if (length > 1024 * 1024) { + esas2r_debug( + "esas2r_write_fw: hosed, length %d fi_version %d", + length, header->fi_version); + return -EINVAL; + } + + /* + * If this is a write command, allocate memory because + * we have to cache everything. otherwise, just cache + * the header, because the read op will do the command. + */ + + if (header->action == FI_ACT_DOWN) { + if (!allocate_fw_buffers(a, length)) + return -ENOMEM; + + /* + * Store the command, so there is context on subsequent + * calls. + */ + memcpy(&a->firmware.header, + buf, + sizeof(*header)); + } else if (header->action == FI_ACT_UP + || header->action == FI_ACT_UPSZ) { + /* Save the command, result will be picked up on read */ + memcpy(&a->firmware.header, + buf, + sizeof(*header)); + + a->firmware.state = FW_COMMAND_ST; + + esas2r_debug( + "esas2r_write_fw: COMMAND, count %d, action %d ", + count, header->action); + + /* + * Pretend we took the whole buffer, + * so we don't get bothered again. + */ + + return count; + } else { + esas2r_debug("esas2r_write_fw: invalid action %d ", + a->firmware.header.action); + return -ENOSYS; + } + } else { + length = a->firmware.header.length; + } + + /* + * We only get here on a download command, regardless of offset. + * the chunks written by the system need to be cached, and when + * the final one arrives, issue the fmapi command. + */ + + if (off + count > length) + count = length - off; + + if (count > 0) { + esas2r_debug("esas2r_write_fw: off %d count %d length %d", off, + count, + length); + + /* + * On a full upload, the system tries sending the whole buffer. + * there's nothing to do with it, so just drop it here, before + * trying to copy over into unallocated memory! + */ + if (a->firmware.header.action == FI_ACT_UP) + return count; + + if (!a->firmware.data) { + esas2r_debug( + "write: nonzero offset but no buffer available!"); + return -ENOMEM; + } + + memcpy(&a->firmware.data[off], buf, count); + + if (length == off + count) { + do_fm_api(a, + (struct esas2r_flash_img *)a->firmware.data); + + /* + * Now copy the header result to be picked up by the + * next read + */ + memcpy(&a->firmware.header, + a->firmware.data, + sizeof(a->firmware.header)); + + a->firmware.state = FW_STATUS_ST; + + esas2r_debug("write completed"); + + /* + * Since the system has the data buffered, the only way + * this can leak is if a root user writes a program + * that writes a shorter buffer than it claims, and the + * copyin fails. + */ + free_fw_buffers(a); + } + } + + return count; +} + +/* Callback for the completion of a VDA request. */ +static void vda_complete_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->vda_command_done = 1; + wake_up_interruptible(&a->vda_waiter); +} + +/* Scatter/gather callback for VDA requests */ +static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; + + (*addr) = a->ppvda_buffer + offset; + return VDA_MAX_BUFFER_SIZE - offset; +} + +/* Handle a call to read a VDA command. */ +int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) +{ + if (!a->vda_buffer) + return -ENOMEM; + + if (off == 0) { + struct esas2r_request *rq; + struct atto_ioctl_vda *vi = + (struct atto_ioctl_vda *)a->vda_buffer; + struct esas2r_sg_context sgc; + bool wait_for_completion; + + /* + * Presumeably, someone has already written to the vda_buffer, + * and now they are reading the node the response, so now we + * will actually issue the request to the chip and reply. + */ + + /* allocate a request */ + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_debug("esas2r_read_vda: out of requests"); + return -EBUSY; + } + + rq->comp_cb = vda_complete_req; + + sgc.first_req = rq; + sgc.adapter = a; + sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; + + a->vda_command_done = 0; + + wait_for_completion = + esas2r_process_vda_ioctl(a, vi, rq, &sgc); + + if (wait_for_completion) { + /* now wait around for it to complete. */ + + while (!a->vda_command_done) + wait_event_interruptible(a->vda_waiter, + a->vda_command_done); + } + + esas2r_free_request(a, (struct esas2r_request *)rq); + } + + if (off > VDA_MAX_BUFFER_SIZE) + return 0; + + if (count + off > VDA_MAX_BUFFER_SIZE) + count = VDA_MAX_BUFFER_SIZE - off; + + if (count < 0) + return 0; + + memcpy(buf, a->vda_buffer + off, count); + + return count; +} + +/* Handle a call to write a VDA command. */ +int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + /* + * allocate memory for it, if not already done. once allocated, + * we will keep it around until the driver is unloaded. + */ + + if (!a->vda_buffer) { + dma_addr_t dma_addr; + a->vda_buffer = dma_alloc_coherent(&a->pcid->dev, + (size_t) + VDA_MAX_BUFFER_SIZE, + &dma_addr, + GFP_KERNEL); + + a->ppvda_buffer = dma_addr; + } + + if (!a->vda_buffer) + return -ENOMEM; + + if (off > VDA_MAX_BUFFER_SIZE) + return 0; + + if (count + off > VDA_MAX_BUFFER_SIZE) + count = VDA_MAX_BUFFER_SIZE - off; + + if (count < 1) + return 0; + + memcpy(a->vda_buffer + off, buf, count); + + return count; +} + +/* Callback for the completion of an FS_API request.*/ +static void fs_api_complete_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->fs_api_command_done = 1; + + wake_up_interruptible(&a->fs_api_waiter); +} + +/* Scatter/gather callback for VDA requests */ +static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)a->fs_api_buffer; + u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; + + (*addr) = a->ppfs_api_buffer + offset; + + return a->fs_api_buffer_size - offset; +} + +/* Handle a call to read firmware via FS_API. */ +int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) +{ + if (!a->fs_api_buffer) + return -ENOMEM; + + if (off == 0) { + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)a->fs_api_buffer; + + /* If another flash request is already in progress, return. */ + if (mutex_lock_interruptible(&a->fs_api_mutex)) { +busy: + fs->status = ATTO_STS_OUT_OF_RSRC; + return -EBUSY; + } + + /* + * Presumeably, someone has already written to the + * fs_api_buffer, and now they are reading the node the + * response, so now we will actually issue the request to the + * chip and reply. Allocate a request + */ + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_debug("esas2r_read_fs: out of requests"); + mutex_unlock(&a->fs_api_mutex); + goto busy; + } + + rq->comp_cb = fs_api_complete_req; + + /* Set up the SGCONTEXT for to build the s/g table */ + + sgc.cur_offset = fs->data; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; + + a->fs_api_command_done = 0; + + if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) { + if (fs->status == ATTO_STS_OUT_OF_RSRC) + count = -EBUSY; + + goto dont_wait; + } + + /* Now wait around for it to complete. */ + + while (!a->fs_api_command_done) + wait_event_interruptible(a->fs_api_waiter, + a->fs_api_command_done); + ; +dont_wait: + /* Free the request and keep going */ + mutex_unlock(&a->fs_api_mutex); + esas2r_free_request(a, (struct esas2r_request *)rq); + + /* Pick up possible error code from above */ + if (count < 0) + return count; + } + + if (off > a->fs_api_buffer_size) + return 0; + + if (count + off > a->fs_api_buffer_size) + count = a->fs_api_buffer_size - off; + + if (count < 0) + return 0; + + memcpy(buf, a->fs_api_buffer + off, count); + + return count; +} + +/* Handle a call to write firmware via FS_API. */ +int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + if (off == 0) { + struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; + u32 length = fs->command.length + offsetof( + struct esas2r_ioctl_fs, + data); + + /* + * Special case, for BEGIN commands, the length field + * is lying to us, so just get enough for the header. + */ + + if (fs->command.command == ESAS2R_FS_CMD_BEGINW) + length = offsetof(struct esas2r_ioctl_fs, data); + + /* + * Beginning a command. We assume we'll get at least + * enough in the first write so we can look at the + * header and see how much we need to alloc. + */ + + if (count < offsetof(struct esas2r_ioctl_fs, data)) + return -EINVAL; + + /* Allocate a buffer or use the existing buffer. */ + if (a->fs_api_buffer) { + if (a->fs_api_buffer_size < length) { + /* Free too-small buffer and get a new one */ + dma_free_coherent(&a->pcid->dev, + (size_t)a->fs_api_buffer_size, + a->fs_api_buffer, + (dma_addr_t)a->ppfs_api_buffer); + + goto re_allocate_buffer; + } + } else { +re_allocate_buffer: + a->fs_api_buffer_size = length; + + a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev, + (size_t)a->fs_api_buffer_size, + (dma_addr_t *)&a->ppfs_api_buffer, + GFP_KERNEL); + } + } + + if (!a->fs_api_buffer) + return -ENOMEM; + + if (off > a->fs_api_buffer_size) + return 0; + + if (count + off > a->fs_api_buffer_size) + count = a->fs_api_buffer_size - off; + + if (count < 1) + return 0; + + memcpy(a->fs_api_buffer + off, buf, count); + + return count; +} diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c new file mode 100644 index 000000000..d6c87a0ba --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_log.c @@ -0,0 +1,252 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_log.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* + * this module within the driver is tasked with providing logging functionality. + * the event_log_level module parameter controls the level of messages that are + * written to the system log. the default level of messages that are written + * are critical and warning messages. if other types of messages are desired, + * one simply needs to load the module with the correct value for the + * event_log_level module parameter. for example: + * + * insmod event_log_level=1 + * + * will load the module and only critical events will be written by this module + * to the system log. if critical, warning, and information-level messages are + * desired, the correct value for the event_log_level module parameter + * would be as follows: + * + * insmod event_log_level=3 + */ + +#define EVENT_LOG_BUFF_SIZE 1024 + +static long event_log_level = ESAS2R_LOG_DFLT; + +module_param(event_log_level, long, S_IRUGO | S_IRUSR); +MODULE_PARM_DESC(event_log_level, + "Specifies the level of events to report to the system log. Critical and warning level events are logged by default."); + +/* A shared buffer to use for formatting messages. */ +static char event_buffer[EVENT_LOG_BUFF_SIZE]; + +/* A lock to protect the shared buffer used for formatting messages. */ +static DEFINE_SPINLOCK(event_buffer_lock); + +/* + * translates an esas2r-defined logging event level to a kernel logging level. + * + * @param [in] level the esas2r-defined logging event level to translate + * + * @return the corresponding kernel logging level. + */ +static const char *translate_esas2r_event_level_to_kernel(const long level) +{ + switch (level) { + case ESAS2R_LOG_CRIT: + return KERN_CRIT; + + case ESAS2R_LOG_WARN: + return KERN_WARNING; + + case ESAS2R_LOG_INFO: + return KERN_INFO; + + case ESAS2R_LOG_DEBG: + case ESAS2R_LOG_TRCE: + default: + return KERN_DEBUG; + } +} + +#pragma GCC diagnostic push +#ifndef __clang__ +#pragma GCC diagnostic ignored "-Wsuggest-attribute=format" +#endif + +/* + * the master logging function. this function will format the message as + * outlined by the formatting string, the input device information and the + * substitution arguments and output the resulting string to the system log. + * + * @param [in] level the event log level of the message + * @param [in] dev the device information + * @param [in] format the formatting string for the message + * @param [in] args the substition arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +static int esas2r_log_master(const long level, + const struct device *dev, + const char *format, + va_list args) +{ + if (level <= event_log_level) { + unsigned long flags = 0; + int retval = 0; + char *buffer = event_buffer; + size_t buflen = EVENT_LOG_BUFF_SIZE; + const char *fmt_nodev = "%s%s: "; + const char *fmt_dev = "%s%s [%s, %s, %s]"; + const char *slevel = + translate_esas2r_event_level_to_kernel(level); + + spin_lock_irqsave(&event_buffer_lock, flags); + + memset(buffer, 0, buflen); + + /* + * format the level onto the beginning of the string and do + * some pointer arithmetic to move the pointer to the point + * where the actual message can be inserted. + */ + + if (dev == NULL) { + snprintf(buffer, buflen, fmt_nodev, slevel, + ESAS2R_DRVR_NAME); + } else { + snprintf(buffer, buflen, fmt_dev, slevel, + ESAS2R_DRVR_NAME, + (dev->driver ? dev->driver->name : "unknown"), + (dev->bus ? dev->bus->name : "unknown"), + dev_name(dev)); + } + + buffer += strlen(event_buffer); + buflen -= strlen(event_buffer); + + retval = vsnprintf(buffer, buflen, format, args); + if (retval < 0) { + spin_unlock_irqrestore(&event_buffer_lock, flags); + return -1; + } + + /* + * Put a line break at the end of the formatted string so that + * we don't wind up with run-on messages. + */ + printk("%s\n", event_buffer); + + spin_unlock_irqrestore(&event_buffer_lock, flags); + } + + return 0; +} + +#pragma GCC diagnostic pop + +/* + * formats and logs a message to the system log. + * + * @param [in] level the event level of the message + * @param [in] format the formating string for the message + * @param [in] ... the substitution arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log(const long level, const char *format, ...) +{ + int retval = 0; + va_list args; + + va_start(args, format); + + retval = esas2r_log_master(level, NULL, format, args); + + va_end(args); + + return retval; +} + +/* + * formats and logs a message to the system log. this message will include + * device information. + * + * @param [in] level the event level of the message + * @param [in] dev the device information + * @param [in] format the formatting string for the message + * @param [in] ... the substitution arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log_dev(const long level, + const struct device *dev, + const char *format, + ...) +{ + int retval = 0; + va_list args; + + va_start(args, format); + + retval = esas2r_log_master(level, dev, format, args); + + va_end(args); + + return retval; +} + +/* + * formats and logs a message to the system log. this message will include + * device information. + * + * @param [in] level the event level of the message + * @param [in] buf + * @param [in] len + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log_hexdump(const long level, + const void *buf, + size_t len) +{ + if (level <= event_log_level) { + print_hex_dump(translate_esas2r_event_level_to_kernel(level), + "", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + } + + return 1; +} diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h new file mode 100644 index 000000000..75b9d23cd --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_log.h @@ -0,0 +1,118 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_log.h + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef __esas2r_log_h__ +#define __esas2r_log_h__ + +struct device; + +enum { + ESAS2R_LOG_NONE = 0, /* no events logged */ + ESAS2R_LOG_CRIT = 1, /* critical events */ + ESAS2R_LOG_WARN = 2, /* warning events */ + ESAS2R_LOG_INFO = 3, /* info events */ + ESAS2R_LOG_DEBG = 4, /* debugging events */ + ESAS2R_LOG_TRCE = 5, /* tracing events */ + +#ifdef ESAS2R_TRACE + ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE +#else + ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN +#endif +}; + +__printf(2, 3) int esas2r_log(const long level, const char *format, ...); +__printf(3, 4) int esas2r_log_dev(const long level, + const struct device *dev, + const char *format, + ...); +int esas2r_log_hexdump(const long level, + const void *buf, + size_t len); + +/* + * the following macros are provided specifically for debugging and tracing + * messages. esas2r_debug() is provided for generic non-hardware layer + * debugging and tracing events. esas2r_hdebug is provided specifically for + * hardware layer debugging and tracing events. + */ + +#ifdef ESAS2R_DEBUG +#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args) +#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args) +#else +#define esas2r_debug(f, args ...) +#define esas2r_hdebug(f, args ...) +#endif /* ESAS2R_DEBUG */ + +/* + * the following macros are provided in order to trace the driver and catch + * some more serious bugs. be warned, enabling these macros may *severely* + * impact performance. + */ + +#ifdef ESAS2R_TRACE +#define esas2r_bugon() \ + do { \ + esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \ + " - dumping stack and stopping kernel", __func__, \ + __LINE__); \ + dump_stack(); \ + BUG(); \ + } while (0) + +#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \ + __func__, __FILE__, __LINE__) +#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \ + __func__, __FILE__, __LINE__) +#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \ + f, __func__, __FILE__, __LINE__, \ + ## args) +#else +#define esas2r_bugon() +#define esas2r_trace_enter() +#define esas2r_trace_exit() +#define esas2r_trace(f, args ...) +#endif /* ESAS2R_TRACE */ + +#endif /* __esas2r_log_h__ */ diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c new file mode 100644 index 000000000..f700a16cd --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -0,0 +1,1912 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_main.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver"); +MODULE_AUTHOR("ATTO Technology, Inc."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ESAS2R_VERSION_STR); + +/* global definitions */ + +static int found_adapters; +struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS]; + +#define ESAS2R_VDA_EVENT_PORT1 54414 +#define ESAS2R_VDA_EVENT_PORT2 54415 +#define ESAS2R_VDA_EVENT_SOCK_COUNT 2 + +static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *host = class_to_shost(dev); + + return (struct esas2r_adapter *)host->hostdata; +} + +static ssize_t read_fw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_fw(a, buf, off, count); +} + +static ssize_t write_fw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_write_fw(a, buf, off, count); +} + +static ssize_t read_fs(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_fs(a, buf, off, count); +} + +static ssize_t write_fs(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min(sizeof(struct esas2r_ioctl_fs), count); + int result = 0; + + result = esas2r_write_fs(a, buf, off, count); + + if (result < 0) + result = 0; + + return length; +} + +static ssize_t read_vda(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_vda(a, buf, off, count); +} + +static ssize_t write_vda(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_write_vda(a, buf, off, count); +} + +static ssize_t read_live_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE); + + memcpy(buf, a->nvram, length); + return length; +} + +static ssize_t write_live_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + struct esas2r_request *rq; + int result = -EFAULT; + + rq = esas2r_alloc_request(a); + if (rq == NULL) + return -ENOMEM; + + if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf)) + result = count; + + esas2r_free_request(a, rq); + + return result; +} + +static ssize_t read_default_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf); + + return sizeof(struct esas2r_sas_nvram); +} + +static ssize_t read_hw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE); + + if (!a->local_atto_ioctl) + return -ENOMEM; + + if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS) + return -ENOMEM; + + memcpy(buf, a->local_atto_ioctl, length); + + return length; +} + +static ssize_t write_hw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min(sizeof(struct atto_ioctl), count); + + if (!a->local_atto_ioctl) { + a->local_atto_ioctl = kmalloc(sizeof(struct atto_ioctl), + GFP_KERNEL); + if (a->local_atto_ioctl == NULL) { + esas2r_log(ESAS2R_LOG_WARN, + "write_hw kzalloc failed for %zu bytes", + sizeof(struct atto_ioctl)); + return -ENOMEM; + } + } + + memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl)); + memcpy(a->local_atto_ioctl, buf, length); + + return length; +} + +#define ESAS2R_RW_BIN_ATTR(_name) \ + struct bin_attribute bin_attr_ ## _name = { \ + .attr = \ + { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \ + .size = 0, \ + .read = read_ ## _name, \ + .write = write_ ## _name } + +ESAS2R_RW_BIN_ATTR(fw); +ESAS2R_RW_BIN_ATTR(fs); +ESAS2R_RW_BIN_ATTR(vda); +ESAS2R_RW_BIN_ATTR(hw); +ESAS2R_RW_BIN_ATTR(live_nvram); + +struct bin_attribute bin_attr_default_nvram = { + .attr = { .name = "default_nvram", .mode = S_IRUGO }, + .size = 0, + .read = read_default_nvram, + .write = NULL +}; + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .show_info = esas2r_show_info, + .name = ESAS2R_LONGNAME, + .info = esas2r_info, + .ioctl = esas2r_ioctl, + .queuecommand = esas2r_queuecommand, + .eh_abort_handler = esas2r_eh_abort, + .eh_device_reset_handler = esas2r_device_reset, + .eh_bus_reset_handler = esas2r_bus_reset, + .eh_host_reset_handler = esas2r_host_reset, + .eh_target_reset_handler = esas2r_target_reset, + .can_queue = 128, + .this_id = -1, + .sg_tablesize = SG_CHUNK_SIZE, + .cmd_per_lun = + ESAS2R_DEFAULT_CMD_PER_LUN, + .proc_name = ESAS2R_DRVR_NAME, + .change_queue_depth = scsi_change_queue_depth, + .max_sectors = 0xFFFF, +}; + +int sgl_page_size = 512; +module_param(sgl_page_size, int, 0); +MODULE_PARM_DESC(sgl_page_size, + "Scatter/gather list (SGL) page size in number of S/G " + "entries. If your application is doing a lot of very large " + "transfers, you may want to increase the SGL page size. " + "Default 512."); + +int num_sg_lists = 1024; +module_param(num_sg_lists, int, 0); +MODULE_PARM_DESC(num_sg_lists, + "Number of scatter/gather lists. Default 1024."); + +int sg_tablesize = SG_CHUNK_SIZE; +module_param(sg_tablesize, int, 0); +MODULE_PARM_DESC(sg_tablesize, + "Maximum number of entries in a scatter/gather table."); + +int num_requests = 256; +module_param(num_requests, int, 0); +MODULE_PARM_DESC(num_requests, + "Number of requests. Default 256."); + +int num_ae_requests = 4; +module_param(num_ae_requests, int, 0); +MODULE_PARM_DESC(num_ae_requests, + "Number of VDA asynchronous event requests. Default 4."); + +int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN; +module_param(cmd_per_lun, int, 0); +MODULE_PARM_DESC(cmd_per_lun, + "Maximum number of commands per LUN. Default " + DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) "."); + +int can_queue = 128; +module_param(can_queue, int, 0); +MODULE_PARM_DESC(can_queue, + "Maximum number of commands per adapter. Default 128."); + +int esas2r_max_sectors = 0xFFFF; +module_param(esas2r_max_sectors, int, 0); +MODULE_PARM_DESC(esas2r_max_sectors, + "Maximum number of disk sectors in a single data transfer. " + "Default 65535 (largest possible setting)."); + +int interrupt_mode = 1; +module_param(interrupt_mode, int, 0); +MODULE_PARM_DESC(interrupt_mode, + "Defines the interrupt mode to use. 0 for legacy" + ", 1 for MSI. Default is MSI (1)."); + +static const struct pci_device_id + esas2r_pci_table[] = { + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E, + 0, + 0, 0 }, + { 0, 0, 0, 0, + 0, + 0, 0 } +}; + +MODULE_DEVICE_TABLE(pci, esas2r_pci_table); + +static int +esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id); + +static void +esas2r_remove(struct pci_dev *pcid); + +static struct pci_driver + esas2r_pci_driver = { + .name = ESAS2R_DRVR_NAME, + .id_table = esas2r_pci_table, + .probe = esas2r_probe, + .remove = esas2r_remove, + .driver.pm = &esas2r_pm_ops, +}; + +static int esas2r_probe(struct pci_dev *pcid, + const struct pci_device_id *id) +{ + struct Scsi_Host *host = NULL; + struct esas2r_adapter *a; + int err; + + size_t host_alloc_size = sizeof(struct esas2r_adapter) + + ((num_requests) + + 1) * sizeof(struct esas2r_request); + + esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev), + "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x", + pcid->vendor, + pcid->device, + pcid->subsystem_vendor, + pcid->subsystem_device); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "before pci_enable_device() " + "enable_cnt: %d", + pcid->enable_cnt.counter); + + err = pci_enable_device(pcid); + if (err != 0) { + esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev), + "pci_enable_device() FAIL (%d)", + err); + return -ENODEV; + } + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "pci_enable_device() OK"); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "after pci_enable_device() enable_cnt: %d", + pcid->enable_cnt.counter); + + host = scsi_host_alloc(&driver_template, host_alloc_size); + if (host == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL"); + return -ENODEV; + } + + memset(host->hostdata, 0, host_alloc_size); + + a = (struct esas2r_adapter *)host->hostdata; + + esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host); + + /* override max LUN and max target id */ + + host->max_id = ESAS2R_MAX_ID + 1; + host->max_lun = 255; + + /* we can handle 16-byte CDbs */ + + host->max_cmd_len = 16; + + host->can_queue = can_queue; + host->cmd_per_lun = cmd_per_lun; + host->this_id = host->max_id + 1; + host->max_channel = 0; + host->unique_id = found_adapters; + host->sg_tablesize = sg_tablesize; + host->max_sectors = esas2r_max_sectors; + + /* set to bus master for BIOses that don't do it for us */ + + esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called"); + + pci_set_master(pcid); + + if (!esas2r_init_adapter(host, pcid, found_adapters)) { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to initialize device at PCI bus %x:%x", + pcid->bus->number, + pcid->devfn); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(host); + + return 0; + + } + + esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid, + host->hostdata); + + pci_set_drvdata(pcid, host); + + esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called"); + + err = scsi_add_host(host, &pcid->dev); + + if (err) { + esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err); + esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev), + "scsi_add_host() FAIL"); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(host); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "pci_set_drvdata(%p, NULL) called", + pcid); + + pci_set_drvdata(pcid, NULL); + + return -ENODEV; + } + + + esas2r_fw_event_on(a); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_scan_host() called"); + + scsi_scan_host(host); + + /* Add sysfs binary files */ + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: fw"); + else + a->sysfs_fw_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: fs"); + else + a->sysfs_fs_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: vda"); + else + a->sysfs_vda_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: hw"); + else + a->sysfs_hw_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: live_nvram"); + else + a->sysfs_live_nvram_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, + &bin_attr_default_nvram)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: default_nvram"); + else + a->sysfs_default_nvram_created = 1; + + found_adapters++; + + return 0; +} + +static void esas2r_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "esas2r_remove(%p) called; " + "host:%p", pdev, + host); + + esas2r_kill_adapter(a->index); + found_adapters--; +} + +static int __init esas2r_init(void) +{ + int i; + + esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); + + /* verify valid parameters */ + + if (can_queue < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: can_queue must be at least 1, value " + "forced."); + can_queue = 1; + } else if (can_queue > 2048) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: can_queue must be no larger than 2048, " + "value forced."); + can_queue = 2048; + } + + if (cmd_per_lun < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: cmd_per_lun must be at least 1, value " + "forced."); + cmd_per_lun = 1; + } else if (cmd_per_lun > 2048) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: cmd_per_lun must be no larger than " + "2048, value forced."); + cmd_per_lun = 2048; + } + + if (sg_tablesize < 32) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: sg_tablesize must be at least 32, " + "value forced."); + sg_tablesize = 32; + } + + if (esas2r_max_sectors < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: esas2r_max_sectors must be at least " + "1, value forced."); + esas2r_max_sectors = 1; + } else if (esas2r_max_sectors > 0xffff) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: esas2r_max_sectors must be no larger " + "than 0xffff, value forced."); + esas2r_max_sectors = 0xffff; + } + + sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1); + + if (sgl_page_size < SGL_PG_SZ_MIN) + sgl_page_size = SGL_PG_SZ_MIN; + else if (sgl_page_size > SGL_PG_SZ_MAX) + sgl_page_size = SGL_PG_SZ_MAX; + + if (num_sg_lists < NUM_SGL_MIN) + num_sg_lists = NUM_SGL_MIN; + else if (num_sg_lists > NUM_SGL_MAX) + num_sg_lists = NUM_SGL_MAX; + + if (num_requests < NUM_REQ_MIN) + num_requests = NUM_REQ_MIN; + else if (num_requests > NUM_REQ_MAX) + num_requests = NUM_REQ_MAX; + + if (num_ae_requests < NUM_AE_MIN) + num_ae_requests = NUM_AE_MIN; + else if (num_ae_requests > NUM_AE_MAX) + num_ae_requests = NUM_AE_MAX; + + /* set up other globals */ + + for (i = 0; i < MAX_ADAPTERS; i++) + esas2r_adapters[i] = NULL; + + return pci_register_driver(&esas2r_pci_driver); +} + +/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */ +static const struct file_operations esas2r_proc_fops = { + .compat_ioctl = compat_ptr_ioctl, + .unlocked_ioctl = esas2r_proc_ioctl, +}; + +static const struct proc_ops esas2r_proc_ops = { + .proc_lseek = default_llseek, + .proc_ioctl = esas2r_proc_ioctl, +#ifdef CONFIG_COMPAT + .proc_compat_ioctl = compat_ptr_ioctl, +#endif +}; + +static struct Scsi_Host *esas2r_proc_host; +static int esas2r_proc_major; + +long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + return esas2r_ioctl_handler(esas2r_proc_host->hostdata, + cmd, (void __user *)arg); +} + +static void __exit esas2r_exit(void) +{ + esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); + + if (esas2r_proc_major > 0) { + struct proc_dir_entry *proc_dir; + + esas2r_log(ESAS2R_LOG_INFO, "unregister proc"); + + proc_dir = scsi_template_proc_dir(esas2r_proc_host->hostt); + if (proc_dir) + remove_proc_entry(ATTONODE_NAME, proc_dir); + unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME); + + esas2r_proc_major = 0; + } + + esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called"); + + pci_unregister_driver(&esas2r_pci_driver); +} + +int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; + + struct esas2r_target *t; + int dev_count = 0; + + esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no); + + seq_printf(m, ESAS2R_LONGNAME "\n" + "Driver version: "ESAS2R_VERSION_STR "\n" + "Flash version: %s\n" + "Firmware version: %s\n" + "Copyright "ESAS2R_COPYRIGHT_YEARS "\n" + "http://www.attotech.com\n" + "\n", + a->flash_rev, + a->fw_rev[0] ? a->fw_rev : "(none)"); + + + seq_printf(m, "Adapter information:\n" + "--------------------\n" + "Model: %s\n" + "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n", + esas2r_get_model_name(a), + a->nvram->sas_addr[0], + a->nvram->sas_addr[1], + a->nvram->sas_addr[2], + a->nvram->sas_addr[3], + a->nvram->sas_addr[4], + a->nvram->sas_addr[5], + a->nvram->sas_addr[6], + a->nvram->sas_addr[7]); + + seq_puts(m, "\n" + "Discovered devices:\n" + "\n" + " # Target ID\n" + "---------------\n"); + + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->buffered_target_state == TS_PRESENT) { + seq_printf(m, " %3d %3d\n", + ++dev_count, + (u16)(uintptr_t)(t - a->targetdb)); + } + + if (dev_count == 0) + seq_puts(m, "none\n"); + + seq_putc(m, '\n'); + return 0; + +} + +const char *esas2r_info(struct Scsi_Host *sh) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; + static char esas2r_info_str[512]; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev), + "esas2r_info() called"); + + /* + * if we haven't done so already, register as a char driver + * and stick a node under "/proc/scsi/esas2r/ATTOnode" + */ + + if (esas2r_proc_major <= 0) { + esas2r_proc_host = sh; + + esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME, + &esas2r_proc_fops); + + esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev), + "register_chrdev (major %d)", + esas2r_proc_major); + + if (esas2r_proc_major > 0) { + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *pde = NULL; + + proc_dir = scsi_template_proc_dir(sh->hostt); + if (proc_dir) + pde = proc_create(ATTONODE_NAME, 0, proc_dir, + &esas2r_proc_ops); + + if (!pde) { + esas2r_log_dev(ESAS2R_LOG_WARN, + &(sh->shost_gendev), + "failed to create_proc_entry"); + esas2r_proc_major = -1; + } + } + } + + sprintf(esas2r_info_str, + ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)" + " driver version: "ESAS2R_VERSION_STR " firmware version: " + "%s\n", + a->pcid->bus->number, a->pcid->devfn, a->pcid->irq, + a->fw_rev[0] ? a->fw_rev : "(none)"); + + return esas2r_info_str; +} + +/* Callback for building a request scatter/gather list */ +static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr) +{ + u32 len; + + if (likely(sgc->cur_offset == sgc->exp_offset)) { + /* + * the normal case: caller used all bytes from previous call, so + * expected offset is the same as the current offset. + */ + + if (sgc->sgel_count < sgc->num_sgel) { + /* retrieve next segment, except for first time */ + if (sgc->exp_offset > (u8 *)0) { + /* advance current segment */ + sgc->cur_sgel = sg_next(sgc->cur_sgel); + ++(sgc->sgel_count); + } + + + len = sg_dma_len(sgc->cur_sgel); + (*addr) = sg_dma_address(sgc->cur_sgel); + + /* save the total # bytes returned to caller so far */ + sgc->exp_offset += len; + + } else { + len = 0; + } + } else if (sgc->cur_offset < sgc->exp_offset) { + /* + * caller did not use all bytes from previous call. need to + * compute the address based on current segment. + */ + + len = sg_dma_len(sgc->cur_sgel); + (*addr) = sg_dma_address(sgc->cur_sgel); + + sgc->exp_offset -= len; + + /* calculate PA based on prev segment address and offsets */ + *addr = *addr + + (sgc->cur_offset - sgc->exp_offset); + + sgc->exp_offset += len; + + /* re-calculate length based on offset */ + len = lower_32_bits( + sgc->exp_offset - sgc->cur_offset); + } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */ + /* + * we don't expect the caller to skip ahead. + * cur_offset will never exceed the len we return + */ + len = 0; + } + + return len; +} + +int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + unsigned bufflen; + + /* Assume success, if it fails we will fix the result later. */ + cmd->result = DID_OK << 16; + + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) { + cmd->result = DID_NO_CONNECT << 16; + scsi_done(cmd); + return 0; + } + + rq = esas2r_alloc_request(a); + if (unlikely(rq == NULL)) { + esas2r_debug("esas2r_alloc_request failed"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + rq->cmd = cmd; + bufflen = scsi_bufflen(cmd); + + if (likely(bufflen != 0)) { + if (cmd->sc_data_direction == DMA_TO_DEVICE) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); + } + + memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len); + rq->vrq->scsi.length = cpu_to_le32(bufflen); + rq->target_id = cmd->device->id; + rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); + rq->sense_buf = cmd->sense_buffer; + rq->sense_len = SCSI_SENSE_BUFFERSIZE; + + esas2r_sgc_init(&sgc, a, rq, NULL); + + sgc.length = bufflen; + sgc.cur_offset = NULL; + + sgc.cur_sgel = scsi_sglist(cmd); + sgc.exp_offset = NULL; + sgc.num_sgel = scsi_dma_map(cmd); + sgc.sgel_count = 0; + + if (unlikely(sgc.num_sgel < 0)) { + esas2r_free_request(a, rq); + return SCSI_MLQUEUE_HOST_BUSY; + } + + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc; + + if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) { + scsi_dma_unmap(cmd); + esas2r_free_request(a, rq); + return SCSI_MLQUEUE_HOST_BUSY; + } + + esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id, + (int)cmd->device->lun); + + esas2r_start_request(a, rq); + + return 0; +} + +static void complete_task_management_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + (*rq->task_management_status_ptr) = rq->req_stat; + esas2r_free_request(a, rq); +} + +/* + * Searches the specified queue for the specified queue for the command + * to abort. + * + * Return 0 on failure, 1 if command was not found, 2 if command was found + */ +static int esas2r_check_active_queue(struct esas2r_adapter *a, + struct esas2r_request **abort_request, + struct scsi_cmnd *cmd, + struct list_head *queue) +{ + bool found = false; + struct esas2r_request *ar = *abort_request; + struct esas2r_request *rq; + struct list_head *element, *next; + + list_for_each_safe(element, next, queue) { + + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->cmd == cmd) { + + /* Found the request. See what to do with it. */ + if (queue == &a->active_list) { + /* + * We are searching the active queue, which + * means that we need to send an abort request + * to the firmware. + */ + ar = esas2r_alloc_request(a); + if (ar == NULL) { + esas2r_log_dev(ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "unable to allocate an abort request for cmd %p", + cmd); + return 0; /* Failure */ + } + + /* + * Task management request must be formatted + * with a lock held. + */ + ar->sense_len = 0; + ar->vrq->scsi.length = 0; + ar->target_id = rq->target_id; + ar->vrq->scsi.flags |= cpu_to_le32( + (u8)le32_to_cpu(rq->vrq->scsi.flags)); + + memset(ar->vrq->scsi.cdb, 0, + sizeof(ar->vrq->scsi.cdb)); + + ar->vrq->scsi.flags |= cpu_to_le32( + FCP_CMND_TRM); + ar->vrq->scsi.u.abort_handle = + rq->vrq->scsi.handle; + } else { + /* + * The request is pending but not active on + * the firmware. Just free it now and we'll + * report the successful abort below. + */ + list_del_init(&rq->req_list); + esas2r_free_request(a, rq); + } + + found = true; + break; + } + + } + + if (!found) + return 1; /* Not found */ + + return 2; /* found */ + + +} + +int esas2r_eh_abort(struct scsi_cmnd *cmd) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *abort_request = NULL; + unsigned long flags; + struct list_head *queue; + int result; + + esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { + cmd->result = DID_ABORT << 16; + + scsi_set_resid(cmd, 0); + + scsi_done(cmd); + + return SUCCESS; + } + + spin_lock_irqsave(&a->queue_lock, flags); + + /* + * Run through the defer and active queues looking for the request + * to abort. + */ + + queue = &a->defer_list; + +check_active_queue: + + result = esas2r_check_active_queue(a, &abort_request, cmd, queue); + + if (!result) { + spin_unlock_irqrestore(&a->queue_lock, flags); + return FAILED; + } else if (result == 2 && (queue == &a->defer_list)) { + queue = &a->active_list; + goto check_active_queue; + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + if (abort_request) { + u8 task_management_status = RS_PENDING; + + /* + * the request is already active, so we need to tell + * the firmware to abort it and wait for the response. + */ + + abort_request->comp_cb = complete_task_management_request; + abort_request->task_management_status_ptr = + &task_management_status; + + esas2r_start_request(a, abort_request); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + while (task_management_status == RS_PENDING) + msleep(10); + + /* + * Once we get here, the original request will have been + * completed by the firmware and the abort request will have + * been cleaned up. we're done! + */ + + return SUCCESS; + } + + /* + * If we get here, either we found the inactive request and + * freed it, or we didn't find it at all. Either way, success! + */ + + cmd->result = DID_ABORT << 16; + + scsi_set_resid(cmd, 0); + + scsi_done(cmd); + + return SUCCESS; +} + +static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + + if (host_reset) + esas2r_reset_adapter(a); + else + esas2r_reset_bus(a); + + /* above call sets the AF_OS_RESET flag. wait for it to clear. */ + + while (test_bit(AF_OS_RESET, &a->flags)) { + msleep(10); + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + + return SUCCESS; +} + +int esas2r_host_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd); + + return esas2r_host_bus_reset(cmd, true); +} + +int esas2r_bus_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd); + + return esas2r_host_bus_reset(cmd, false); +} + +static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *rq; + u8 task_management_status = RS_PENDING; + bool completed; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + +retry: + rq = esas2r_alloc_request(a); + if (rq == NULL) { + if (target_reset) { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to allocate a request for a " + "target reset (%d)!", + cmd->device->id); + } else { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to allocate a request for a " + "device reset (%d:%llu)!", + cmd->device->id, + cmd->device->lun); + } + + + return FAILED; + } + + rq->target_id = cmd->device->id; + rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); + rq->req_stat = RS_PENDING; + + rq->comp_cb = complete_task_management_request; + rq->task_management_status_ptr = &task_management_status; + + if (target_reset) { + esas2r_debug("issuing target reset (%p) to id %d", rq, + cmd->device->id); + completed = esas2r_send_task_mgmt(a, rq, 0x20); + } else { + esas2r_debug("issuing device reset (%p) to id %d lun %d", rq, + cmd->device->id, cmd->device->lun); + completed = esas2r_send_task_mgmt(a, rq, 0x10); + } + + if (completed) { + /* Task management cmd completed right away, need to free it. */ + + esas2r_free_request(a, rq); + } else { + /* + * Wait for firmware to complete the request. Completion + * callback will free it. + */ + while (task_management_status == RS_PENDING) + msleep(10); + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + + if (task_management_status == RS_BUSY) { + /* + * Busy, probably because we are flashing. Wait a bit and + * try again. + */ + msleep(100); + goto retry; + } + + return SUCCESS; +} + +int esas2r_device_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd); + + return esas2r_dev_targ_reset(cmd, false); + +} + +int esas2r_target_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd); + + return esas2r_dev_targ_reset(cmd, true); +} + +void esas2r_log_request_failure(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u8 reqstatus = rq->req_stat; + + if (reqstatus == RS_SUCCESS) + return; + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + if (reqstatus == RS_SCSI_ERROR) { + if (rq->func_rsp.scsi_rsp.sense_len >= 13) { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x", + rq->sense_buf[2], rq->sense_buf[12], + rq->sense_buf[13], + rq->vrq->scsi.cdb[0]); + } else { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - SCSI error CDB:%x\n", + rq->vrq->scsi.cdb[0]); + } + } else if ((rq->vrq->scsi.cdb[0] != INQUIRY + && rq->vrq->scsi.cdb[0] != REPORT_LUNS) + || (reqstatus != RS_SEL + && reqstatus != RS_SEL2)) { + if ((reqstatus == RS_UNDERRUN) && + (rq->vrq->scsi.cdb[0] == INQUIRY)) { + /* Don't log inquiry underruns */ + } else { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - cdb:%x reqstatus:%d target:%d", + rq->vrq->scsi.cdb[0], reqstatus, + rq->target_id); + } + } + } +} + +void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + u32 starttime; + u32 timeout; + + starttime = jiffies_to_msecs(jiffies); + timeout = rq->timeout ? rq->timeout : 5000; + + while (true) { + esas2r_polled_interrupt(a); + + if (rq->req_stat != RS_STARTED) + break; + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { + esas2r_hdebug("request TMO"); + esas2r_bugon(); + + rq->req_stat = RS_TIMEOUT; + + esas2r_local_reset_adapter(a); + return; + } + } +} + +u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo) +{ + u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1); + u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE; + + if (a->window_base != base) { + esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP, + base | MVRPW1R_ENABLE); + esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP); + a->window_base = base; + } + + return offset; +} + +/* Read a block of data from chip memory */ +bool esas2r_read_mem_block(struct esas2r_adapter *a, + void *to, + u32 from, + u32 size) +{ + u8 *end = (u8 *)to; + + while (size) { + u32 len; + u32 offset; + u32 iatvr; + + iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE); + + esas2r_map_data_window(a, iatvr); + + offset = from & (MW_DATA_WINDOW_SIZE - 1); + len = size; + + if (len > MW_DATA_WINDOW_SIZE - offset) + len = MW_DATA_WINDOW_SIZE - offset; + + from += len; + size -= len; + + while (len--) { + *end++ = esas2r_read_data_byte(a, offset); + offset++; + } + } + + return true; +} + +void esas2r_nuxi_mgt_data(u8 function, void *data) +{ + struct atto_vda_grp_info *g; + struct atto_vda_devinfo *d; + struct atto_vdapart_info *p; + struct atto_vda_dh_info *h; + struct atto_vda_metrics_info *m; + struct atto_vda_schedule_info *s; + struct atto_vda_buzzer_info *b; + u8 i; + + switch (function) { + case VDAMGT_BUZZER_INFO: + case VDAMGT_BUZZER_SET: + + b = (struct atto_vda_buzzer_info *)data; + + b->duration = le32_to_cpu(b->duration); + break; + + case VDAMGT_SCHEDULE_INFO: + case VDAMGT_SCHEDULE_EVENT: + + s = (struct atto_vda_schedule_info *)data; + + s->id = le32_to_cpu(s->id); + + break; + + case VDAMGT_DEV_INFO: + case VDAMGT_DEV_CLEAN: + case VDAMGT_DEV_PT_INFO: + case VDAMGT_DEV_FEATURES: + case VDAMGT_DEV_PT_FEATURES: + case VDAMGT_DEV_OPERATION: + + d = (struct atto_vda_devinfo *)data; + + d->capacity = le64_to_cpu(d->capacity); + d->block_size = le32_to_cpu(d->block_size); + d->ses_dev_index = le16_to_cpu(d->ses_dev_index); + d->target_id = le16_to_cpu(d->target_id); + d->lun = le16_to_cpu(d->lun); + d->features = le16_to_cpu(d->features); + break; + + case VDAMGT_GRP_INFO: + case VDAMGT_GRP_CREATE: + case VDAMGT_GRP_DELETE: + case VDAMGT_ADD_STORAGE: + case VDAMGT_MEMBER_ADD: + case VDAMGT_GRP_COMMIT: + case VDAMGT_GRP_REBUILD: + case VDAMGT_GRP_COMMIT_INIT: + case VDAMGT_QUICK_RAID: + case VDAMGT_GRP_FEATURES: + case VDAMGT_GRP_COMMIT_INIT_AUTOMAP: + case VDAMGT_QUICK_RAID_INIT_AUTOMAP: + case VDAMGT_SPARE_LIST: + case VDAMGT_SPARE_ADD: + case VDAMGT_SPARE_REMOVE: + case VDAMGT_LOCAL_SPARE_ADD: + case VDAMGT_GRP_OPERATION: + + g = (struct atto_vda_grp_info *)data; + + g->capacity = le64_to_cpu(g->capacity); + g->block_size = le32_to_cpu(g->block_size); + g->interleave = le32_to_cpu(g->interleave); + g->features = le16_to_cpu(g->features); + + for (i = 0; i < 32; i++) + g->members[i] = le16_to_cpu(g->members[i]); + + break; + + case VDAMGT_PART_INFO: + case VDAMGT_PART_MAP: + case VDAMGT_PART_UNMAP: + case VDAMGT_PART_AUTOMAP: + case VDAMGT_PART_SPLIT: + case VDAMGT_PART_MERGE: + + p = (struct atto_vdapart_info *)data; + + p->part_size = le64_to_cpu(p->part_size); + p->start_lba = le32_to_cpu(p->start_lba); + p->block_size = le32_to_cpu(p->block_size); + p->target_id = le16_to_cpu(p->target_id); + break; + + case VDAMGT_DEV_HEALTH_REQ: + + h = (struct atto_vda_dh_info *)data; + + h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt); + h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt); + break; + + case VDAMGT_DEV_METRICS: + + m = (struct atto_vda_metrics_info *)data; + + for (i = 0; i < 32; i++) + m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]); + + break; + + default: + break; + } +} + +void esas2r_nuxi_cfg_data(u8 function, void *data) +{ + struct atto_vda_cfg_init *ci; + + switch (function) { + case VDA_CFG_INIT: + case VDA_CFG_GET_INIT: + case VDA_CFG_GET_INIT2: + + ci = (struct atto_vda_cfg_init *)data; + + ci->date_time.year = le16_to_cpu(ci->date_time.year); + ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size); + ci->vda_version = le32_to_cpu(ci->vda_version); + ci->epoch_time = le32_to_cpu(ci->epoch_time); + ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel); + ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend); + break; + + default: + break; + } +} + +void esas2r_nuxi_ae_data(union atto_vda_ae *ae) +{ + struct atto_vda_ae_raid *r = &ae->raid; + struct atto_vda_ae_lu *l = &ae->lu; + + switch (ae->hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + + r->dwflags = le32_to_cpu(r->dwflags); + break; + + case VDAAE_HDR_TYPE_LU: + + l->dwevent = le32_to_cpu(l->dwevent); + l->wphys_target_id = le16_to_cpu(l->wphys_target_id); + l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id); + + if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) { + l->id.tgtlun_raid.dwinterleave + = le32_to_cpu(l->id.tgtlun_raid.dwinterleave); + l->id.tgtlun_raid.dwblock_size + = le32_to_cpu(l->id.tgtlun_raid.dwblock_size); + } + + break; + + case VDAAE_HDR_TYPE_DISK: + default: + break; + } +} + +void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + unsigned long flags; + + esas2r_rq_destroy_request(rq, a); + spin_lock_irqsave(&a->request_lock, flags); + list_add(&rq->comp_list, &a->avail_request); + spin_unlock_irqrestore(&a->request_lock, flags); +} + +struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a) +{ + struct esas2r_request *rq; + unsigned long flags; + + spin_lock_irqsave(&a->request_lock, flags); + + if (unlikely(list_empty(&a->avail_request))) { + spin_unlock_irqrestore(&a->request_lock, flags); + return NULL; + } + + rq = list_first_entry(&a->avail_request, struct esas2r_request, + comp_list); + list_del(&rq->comp_list); + spin_unlock_irqrestore(&a->request_lock, flags); + esas2r_rq_init_request(rq, a); + + return rq; + +} + +void esas2r_complete_request_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_debug("completing request %p\n", rq); + + scsi_dma_unmap(rq->cmd); + + if (unlikely(rq->req_stat != RS_SUCCESS)) { + esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id, + rq->req_stat, + rq->func_rsp.scsi_rsp.scsi_stat, + rq->cmd); + + rq->cmd->result = + ((esas2r_req_status_to_error(rq->req_stat) << 16) + | rq->func_rsp.scsi_rsp.scsi_stat); + + if (rq->req_stat == RS_UNDERRUN) + scsi_set_resid(rq->cmd, + le32_to_cpu(rq->func_rsp.scsi_rsp. + residual_length)); + else + scsi_set_resid(rq->cmd, 0); + } + + scsi_done(rq->cmd); + + esas2r_free_request(a, rq); +} + +/* Run tasklet to handle stuff outside of interrupt context. */ +void esas2r_adapter_tasklet(unsigned long context) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)context; + + if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) { + clear_bit(AF2_TIMER_TICK, &a->flags2); + esas2r_timer_tick(a); + } + + if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) { + clear_bit(AF2_INT_PENDING, &a->flags2); + esas2r_adapter_interrupt(a); + } + + if (esas2r_is_tasklet_pending(a)) + esas2r_do_tasklet_tasks(a); + + if (esas2r_is_tasklet_pending(a) + || (test_bit(AF2_INT_PENDING, &a->flags2)) + || (test_bit(AF2_TIMER_TICK, &a->flags2))) { + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); + esas2r_schedule_tasklet(a); + } else { + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); + } +} + +static void esas2r_timer_callback(struct timer_list *t); + +void esas2r_kickoff_timer(struct esas2r_adapter *a) +{ + timer_setup(&a->timer, esas2r_timer_callback, 0); + + a->timer.expires = jiffies + + msecs_to_jiffies(100); + + add_timer(&a->timer); +} + +static void esas2r_timer_callback(struct timer_list *t) +{ + struct esas2r_adapter *a = from_timer(a, t, timer); + + set_bit(AF2_TIMER_TICK, &a->flags2); + + esas2r_schedule_tasklet(a); + + esas2r_kickoff_timer(a); +} + +/* + * Firmware events need to be handled outside of interrupt context + * so we schedule a delayed_work to handle them. + */ + +static void +esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event) +{ + unsigned long flags; + struct esas2r_adapter *a = fw_event->a; + + spin_lock_irqsave(&a->fw_event_lock, flags); + list_del(&fw_event->list); + kfree(fw_event); + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void +esas2r_fw_event_off(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->fw_event_lock, flags); + a->fw_events_off = 1; + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void +esas2r_fw_event_on(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->fw_event_lock, flags); + a->fw_events_off = 0; + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id) +{ + int ret; + struct scsi_device *scsi_dev; + + scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); + + if (scsi_dev) { + esas2r_log_dev( + ESAS2R_LOG_WARN, + &(scsi_dev-> + sdev_gendev), + "scsi device already exists at id %d", target_id); + + scsi_device_put(scsi_dev); + } else { + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(a->host-> + shost_gendev), + "scsi_add_device() called for 0:%d:0", + target_id); + + ret = scsi_add_device(a->host, 0, target_id, 0); + if (ret) { + esas2r_log_dev( + ESAS2R_LOG_CRIT, + &(a->host-> + shost_gendev), + "scsi_add_device failed with %d for id %d", + ret, target_id); + } + } +} + +static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id) +{ + struct scsi_device *scsi_dev; + + scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); + + if (scsi_dev) { + scsi_device_set_state(scsi_dev, SDEV_OFFLINE); + + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(scsi_dev-> + sdev_gendev), + "scsi_remove_device() called for 0:%d:0", + target_id); + + scsi_remove_device(scsi_dev); + + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(scsi_dev-> + sdev_gendev), + "scsi_device_put() called"); + + scsi_device_put(scsi_dev); + } else { + esas2r_log_dev( + ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "no target found at id %d", + target_id); + } +} + +/* + * Sends a firmware asynchronous event to anyone who happens to be + * listening on the defined ATTO VDA event ports. + */ +static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event) +{ + struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data; + char *type; + + switch (ae->vda_ae.hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + type = "RAID group state change"; + break; + + case VDAAE_HDR_TYPE_LU: + type = "Mapped destination LU change"; + break; + + case VDAAE_HDR_TYPE_DISK: + type = "Physical disk inventory change"; + break; + + case VDAAE_HDR_TYPE_RESET: + type = "Firmware reset"; + break; + + case VDAAE_HDR_TYPE_LOG_INFO: + type = "Event Log message (INFO level)"; + break; + + case VDAAE_HDR_TYPE_LOG_WARN: + type = "Event Log message (WARN level)"; + break; + + case VDAAE_HDR_TYPE_LOG_CRIT: + type = "Event Log message (CRIT level)"; + break; + + case VDAAE_HDR_TYPE_LOG_FAIL: + type = "Event Log message (FAIL level)"; + break; + + case VDAAE_HDR_TYPE_NVC: + type = "NVCache change"; + break; + + case VDAAE_HDR_TYPE_TLG_INFO: + type = "Time stamped log message (INFO level)"; + break; + + case VDAAE_HDR_TYPE_TLG_WARN: + type = "Time stamped log message (WARN level)"; + break; + + case VDAAE_HDR_TYPE_TLG_CRIT: + type = "Time stamped log message (CRIT level)"; + break; + + case VDAAE_HDR_TYPE_PWRMGT: + type = "Power management"; + break; + + case VDAAE_HDR_TYPE_MUTE: + type = "Mute button pressed"; + break; + + case VDAAE_HDR_TYPE_DEV: + type = "Device attribute change"; + break; + + default: + type = "Unknown"; + break; + } + + esas2r_log(ESAS2R_LOG_WARN, + "An async event of type \"%s\" was received from the firmware. The event contents are:", + type); + esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae, + ae->vda_ae.hdr.bylength); + +} + +static void +esas2r_firmware_event_work(struct work_struct *work) +{ + struct esas2r_fw_event_work *fw_event = + container_of(work, struct esas2r_fw_event_work, work.work); + + struct esas2r_adapter *a = fw_event->a; + + u16 target_id = *(u16 *)&fw_event->data[0]; + + if (a->fw_events_off) + goto done; + + switch (fw_event->type) { + case fw_event_null: + break; /* do nothing */ + + case fw_event_lun_change: + esas2r_remove_device(a, target_id); + esas2r_add_device(a, target_id); + break; + + case fw_event_present: + esas2r_add_device(a, target_id); + break; + + case fw_event_not_present: + esas2r_remove_device(a, target_id); + break; + + case fw_event_vda_ae: + esas2r_send_ae_event(fw_event); + break; + } + +done: + esas2r_free_fw_event(fw_event); +} + +void esas2r_queue_fw_event(struct esas2r_adapter *a, + enum fw_event_type type, + void *data, + int data_sz) +{ + struct esas2r_fw_event_work *fw_event; + unsigned long flags; + + fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC); + if (!fw_event) { + esas2r_log(ESAS2R_LOG_WARN, + "esas2r_queue_fw_event failed to alloc"); + return; + } + + if (type == fw_event_vda_ae) { + struct esas2r_vda_ae *ae = + (struct esas2r_vda_ae *)fw_event->data; + + ae->signature = ESAS2R_VDA_EVENT_SIG; + ae->bus_number = a->pcid->bus->number; + ae->devfn = a->pcid->devfn; + memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae)); + } else { + memcpy(fw_event->data, data, data_sz); + } + + fw_event->type = type; + fw_event->a = a; + + spin_lock_irqsave(&a->fw_event_lock, flags); + list_add_tail(&fw_event->list, &a->fw_event_list); + INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work); + queue_delayed_work_on( + smp_processor_id(), a->fw_event_q, &fw_event->work, + msecs_to_jiffies(1)); + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id, + u8 state) +{ + if (state == TS_LUN_CHANGE) + esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id, + sizeof(targ_id)); + else if (state == TS_PRESENT) + esas2r_queue_fw_event(a, fw_event_present, &targ_id, + sizeof(targ_id)); + else if (state == TS_NOT_PRESENT) + esas2r_queue_fw_event(a, fw_event_not_present, &targ_id, + sizeof(targ_id)); +} + +/* Translate status to a Linux SCSI mid-layer error code */ +int esas2r_req_status_to_error(u8 req_stat) +{ + switch (req_stat) { + case RS_OVERRUN: + case RS_UNDERRUN: + case RS_SUCCESS: + /* + * NOTE: SCSI mid-layer wants a good status for a SCSI error, because + * it will check the scsi_stat value in the completion anyway. + */ + case RS_SCSI_ERROR: + return DID_OK; + + case RS_SEL: + case RS_SEL2: + return DID_NO_CONNECT; + + case RS_RESET: + return DID_RESET; + + case RS_ABORTED: + return DID_ABORT; + + case RS_BUSY: + return DID_BUS_BUSY; + } + + /* everything else is just an error. */ + + return DID_ERROR; +} + +module_init(esas2r_init); +module_exit(esas2r_exit); diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c new file mode 100644 index 000000000..bf45beaad --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_targdb.c @@ -0,0 +1,306 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_targdb.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +void esas2r_targ_db_initialize(struct esas2r_adapter *a) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + memset(t, 0, sizeof(struct esas2r_target)); + + t->target_state = TS_NOT_PRESENT; + t->buffered_target_state = TS_NOT_PRESENT; + t->new_target_state = TS_INVALID; + } +} + +void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify) +{ + struct esas2r_target *t; + unsigned long flags; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->target_state != TS_PRESENT) + continue; + + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_targ_db_remove(a, t); + spin_unlock_irqrestore(&a->mem_lock, flags); + + if (notify) { + esas2r_trace("remove id:%d", esas2r_targ_get_id(t, + a)); + esas2r_target_state_changed(a, esas2r_targ_get_id(t, + a), + TS_NOT_PRESENT); + } + } +} + +void esas2r_targ_db_report_changes(struct esas2r_adapter *a) +{ + struct esas2r_target *t; + unsigned long flags; + + esas2r_trace_enter(); + + if (test_bit(AF_DISC_PENDING, &a->flags)) { + esas2r_trace_exit(); + return; + } + + for (t = a->targetdb; t < a->targetdb_end; t++) { + u8 state = TS_INVALID; + + spin_lock_irqsave(&a->mem_lock, flags); + if (t->buffered_target_state != t->target_state) + state = t->buffered_target_state = t->target_state; + + spin_unlock_irqrestore(&a->mem_lock, flags); + if (state != TS_INVALID) { + esas2r_trace("targ_db_report_changes:%d", + esas2r_targ_get_id( + t, + a)); + esas2r_trace("state:%d", state); + + esas2r_target_state_changed(a, + esas2r_targ_get_id(t, + a), + state); + } + } + + esas2r_trace_exit(); +} + +struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, + struct esas2r_disc_context * + dc) +{ + struct esas2r_target *t; + + esas2r_trace_enter(); + + if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { + esas2r_bugon(); + esas2r_trace_exit(); + return NULL; + } + + t = a->targetdb + dc->curr_virt_id; + + if (t->target_state == TS_PRESENT) { + esas2r_trace_exit(); + return NULL; + } + + esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name, + esas2r_targ_get_id( + t, + a)); + + if (dc->interleave == 0 + || dc->block_size == 0) { + /* these are invalid values, don't create the target entry. */ + + esas2r_hdebug("invalid RAID group dimensions"); + + esas2r_trace_exit(); + + return NULL; + } + + t->block_size = dc->block_size; + t->inter_byte = dc->interleave; + t->inter_block = dc->interleave / dc->block_size; + t->virt_targ_id = dc->curr_virt_id; + t->phys_targ_id = ESAS2R_TARG_ID_INV; + + t->flags &= ~TF_PASS_THRU; + t->flags |= TF_USED; + + t->identifier_len = 0; + + t->target_state = TS_PRESENT; + + return t; +} + +struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, + struct esas2r_disc_context *dc, + u8 *ident, + u8 ident_len) +{ + struct esas2r_target *t; + + esas2r_trace_enter(); + + if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { + esas2r_bugon(); + esas2r_trace_exit(); + return NULL; + } + + /* see if we found this device before. */ + + t = esas2r_targ_db_find_by_ident(a, ident, ident_len); + + if (t == NULL) { + t = a->targetdb + dc->curr_virt_id; + + if (ident_len > sizeof(t->identifier) + || t->target_state == TS_PRESENT) { + esas2r_trace_exit(); + return NULL; + } + } + + esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a), + dc->curr_virt_id, + dc->curr_phys_id); + + t->block_size = 0; + t->inter_byte = 0; + t->inter_block = 0; + t->virt_targ_id = dc->curr_virt_id; + t->phys_targ_id = dc->curr_phys_id; + t->identifier_len = ident_len; + + memcpy(t->identifier, ident, ident_len); + + t->flags |= TF_PASS_THRU | TF_USED; + + t->target_state = TS_PRESENT; + + return t; +} + +void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t) +{ + esas2r_trace_enter(); + + t->target_state = TS_NOT_PRESENT; + + esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a)); + + esas2r_trace_exit(); +} + +struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, + u64 *sas_addr) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->sas_addr == *sas_addr) + return t; + + return NULL; +} + +struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, + void *identifier, + u8 ident_len) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (ident_len == t->identifier_len + && memcmp(&t->identifier[0], identifier, + ident_len) == 0) + return t; + } + + return NULL; +} + +u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id) +{ + u16 id = target_id + 1; + + while (id < ESAS2R_MAX_TARGETS) { + struct esas2r_target *t = a->targetdb + id; + + if (t->target_state == TS_PRESENT) + break; + + id++; + } + + return id; +} + +struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, + u16 virt_id) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->target_state != TS_PRESENT) + continue; + + if (t->virt_targ_id == virt_id) + return t; + } + + return NULL; +} + +u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a) +{ + u16 devcnt = 0; + struct esas2r_target *t; + unsigned long flags; + + spin_lock_irqsave(&a->mem_lock, flags); + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->target_state == TS_PRESENT) + devcnt++; + + spin_unlock_irqrestore(&a->mem_lock, flags); + + return devcnt; +} diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c new file mode 100644 index 000000000..30028e56d --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -0,0 +1,524 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_vda.c + * esas2r driver VDA firmware interface functions + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +static u8 esas2r_vdaioctl_versions[] = { + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_FLASH_VER, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_CLI_VER, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_CFG_VER, + ATTO_VDA_MGT_VER, + ATTO_VDA_GSV_VER +}; + +static void clear_vda_request(struct esas2r_request *rq); + +static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq); + +/* Prepare a VDA IOCTL request to be sent to the firmware. */ +bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, + struct atto_ioctl_vda *vi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + u32 datalen = 0; + struct atto_vda_sge *firstsg = NULL; + u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions); + + vi->status = ATTO_STS_SUCCESS; + vi->vda_status = RS_PENDING; + + if (vi->function >= vercnt) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (vi->version > esas2r_vdaioctl_versions[vi->function]) { + vi->status = ATTO_STS_INV_VERSION; + return false; + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { + vi->status = ATTO_STS_DEGRADED; + return false; + } + + if (vi->function != VDA_FUNC_SCSI) + clear_vda_request(rq); + + rq->vrq->scsi.function = vi->function; + rq->interrupt_cb = esas2r_complete_vda_ioctl; + rq->interrupt_cx = vi; + + switch (vi->function) { + case VDA_FUNC_FLASH: + + if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD + && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE + && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO) + datalen = vi->data_length; + + rq->vrq->flash.length = cpu_to_le32(datalen); + rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; + + memcpy(rq->vrq->flash.data.file.file_name, + vi->cmd.flash.data.file.file_name, + sizeof(vi->cmd.flash.data.file.file_name)); + + firstsg = rq->vrq->flash.data.file.sge; + break; + + case VDA_FUNC_CLI: + + datalen = vi->data_length; + + rq->vrq->cli.cmd_rsp_len = + cpu_to_le32(vi->cmd.cli.cmd_rsp_len); + rq->vrq->cli.length = cpu_to_le32(datalen); + + firstsg = rq->vrq->cli.sge; + break; + + case VDA_FUNC_MGT: + { + u8 *cmdcurr_offset = sgc->cur_offset + - offsetof(struct atto_ioctl_vda, data) + + offsetof(struct atto_ioctl_vda, cmd) + + offsetof(struct atto_ioctl_vda_mgt_cmd, + data); + /* + * build the data payload SGL here first since + * esas2r_sgc_init() will modify the S/G list offset for the + * management SGL (which is built below where the data SGL is + * usually built). + */ + + if (vi->data_length) { + u32 payldlen = 0; + + if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ + || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) { + rq->vrq->mgt.payld_sglst_offset = + (u8)offsetof(struct atto_vda_mgmt_req, + payld_sge); + + payldlen = vi->data_length; + datalen = vi->cmd.mgt.data_length; + } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2 + || vi->cmd.mgt.mgt_func == + VDAMGT_DEV_INFO2_BYADDR) { + datalen = vi->data_length; + cmdcurr_offset = sgc->cur_offset; + } else { + vi->status = ATTO_STS_INV_PARAM; + return false; + } + + /* Setup the length so building the payload SGL works */ + rq->vrq->mgt.length = cpu_to_le32(datalen); + + if (payldlen) { + rq->vrq->mgt.payld_length = + cpu_to_le32(payldlen); + + esas2r_sgc_init(sgc, a, rq, + rq->vrq->mgt.payld_sge); + sgc->length = payldlen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + vi->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + } else { + datalen = vi->cmd.mgt.data_length; + + rq->vrq->mgt.length = cpu_to_le32(datalen); + } + + /* + * Now that the payload SGL is built, if any, setup to build + * the management SGL. + */ + firstsg = rq->vrq->mgt.sge; + sgc->cur_offset = cmdcurr_offset; + + /* Finish initializing the management request. */ + rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func; + rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation; + rq->vrq->mgt.dev_index = + cpu_to_le32(vi->cmd.mgt.dev_index); + + esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); + break; + } + + case VDA_FUNC_CFG: + + if (vi->data_length + || vi->cmd.cfg.data_length == 0) { + vi->status = ATTO_STS_INV_PARAM; + return false; + } + + if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func; + rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length); + + if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { + memcpy(&rq->vrq->cfg.data, + &vi->cmd.cfg.data, + vi->cmd.cfg.data_length); + + esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, + &rq->vrq->cfg.data); + } else { + vi->status = ATTO_STS_INV_FUNC; + + return false; + } + + break; + + case VDA_FUNC_GSV: + + vi->cmd.gsv.rsp_len = vercnt; + + memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions, + vercnt); + + vi->vda_status = RS_SUCCESS; + break; + + default: + + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (datalen) { + esas2r_sgc_init(sgc, a, rq, firstsg); + sgc->length = datalen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + vi->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + + esas2r_start_request(a, rq); + + return true; +} + +static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx; + + vi->vda_status = rq->req_stat; + + switch (vi->function) { + case VDA_FUNC_FLASH: + + if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO + || vi->cmd.flash.sub_func == VDA_FLASH_FREAD) + vi->cmd.flash.data.file.file_size = + le32_to_cpu(rq->func_rsp.flash_rsp.file_size); + + break; + + case VDA_FUNC_MGT: + + vi->cmd.mgt.scan_generation = + rq->func_rsp.mgt_rsp.scan_generation; + vi->cmd.mgt.dev_index = le16_to_cpu( + rq->func_rsp.mgt_rsp.dev_index); + + if (vi->data_length == 0) + vi->cmd.mgt.data_length = + le32_to_cpu(rq->func_rsp.mgt_rsp.length); + + esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); + break; + + case VDA_FUNC_CFG: + + if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { + struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; + struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; + char buf[sizeof(cfg->data.init.fw_release) + 1]; + + cfg->data_length = + cpu_to_le32(sizeof(struct atto_vda_cfg_init)); + cfg->data.init.vda_version = + le32_to_cpu(rsp->vda_version); + cfg->data.init.fw_build = rsp->fw_build; + + snprintf(buf, sizeof(buf), "%1.1u.%2.2u", + (int)LOBYTE(le16_to_cpu(rsp->fw_release)), + (int)HIBYTE(le16_to_cpu(rsp->fw_release))); + + memcpy(&cfg->data.init.fw_release, buf, + sizeof(cfg->data.init.fw_release)); + + if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') + cfg->data.init.fw_version = + cfg->data.init.fw_build; + else + cfg->data.init.fw_version = + cfg->data.init.fw_release; + } else { + esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, + &vi->cmd.cfg.data); + } + + break; + + case VDA_FUNC_CLI: + + vi->cmd.cli.cmd_rsp_len = + le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len); + break; + + default: + + break; + } +} + +/* Build a flash VDA request. */ +void esas2r_build_flash_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 cksum, + u32 addr, + u32 length) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_FLASH; + + if (sub_func == VDA_FLASH_BEGINW + || sub_func == VDA_FLASH_WRITE + || sub_func == VDA_FLASH_READ) + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req, + data.sge); + + vrq->length = cpu_to_le32(length); + vrq->flash_addr = cpu_to_le32(addr); + vrq->checksum = cksum; + vrq->sub_func = sub_func; +} + +/* Build a VDA management request. */ +void esas2r_build_mgt_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 scan_gen, + u16 dev_index, + u32 length, + void *data) +{ + struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_MGT; + + vrq->mgt_func = sub_func; + vrq->scan_generation = scan_gen; + vrq->dev_index = cpu_to_le16(dev_index); + vrq->length = cpu_to_le32(length); + + if (vrq->length) { + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { + vrq->sg_list_offset = (u8)offsetof( + struct atto_vda_mgmt_req, sge); + + vrq->sge[0].length = cpu_to_le32(SGE_LAST | length); + vrq->sge[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } else { + vrq->sg_list_offset = (u8)offsetof( + struct atto_vda_mgmt_req, prde); + + vrq->prde[0].ctl_len = cpu_to_le32(length); + vrq->prde[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } + } + + if (data) { + esas2r_nuxi_mgt_data(sub_func, data); + + memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data, + length); + } +} + +/* Build a VDA asyncronous event (AE) request. */ +void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + struct atto_vda_ae_req *vrq = &rq->vrq->ae; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_AE; + + vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); + + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { + vrq->sg_list_offset = + (u8)offsetof(struct atto_vda_ae_req, sge); + vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length); + vrq->sge[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } else { + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req, + prde); + vrq->prde[0].ctl_len = cpu_to_le32(vrq->length); + vrq->prde[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } +} + +/* Build a VDA CLI request. */ +void esas2r_build_cli_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u32 cmd_rsp_len) +{ + struct atto_vda_cli_req *vrq = &rq->vrq->cli; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_CLI; + + vrq->length = cpu_to_le32(length); + vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len); + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge); +} + +/* Build a VDA IOCTL request. */ +void esas2r_build_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u8 sub_func) +{ + struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_IOCTL; + + vrq->length = cpu_to_le32(length); + vrq->sub_func = sub_func; + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge); +} + +/* Build a VDA configuration request. */ +void esas2r_build_cfg_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u32 length, + void *data) +{ + struct atto_vda_cfg_req *vrq = &rq->vrq->cfg; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_CFG; + + vrq->sub_func = sub_func; + vrq->length = cpu_to_le32(length); + + if (data) { + esas2r_nuxi_cfg_data(sub_func, data); + + memcpy(&vrq->data, data, length); + } +} + +static void clear_vda_request(struct esas2r_request *rq) +{ + u32 handle = rq->vrq->scsi.handle; + + memset(rq->vrq, 0, sizeof(*rq->vrq)); + + rq->vrq->scsi.handle = handle; + + rq->req_stat = RS_PENDING; + + /* since the data buffer is separate clear that too */ + + memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN); + + /* + * Setup next and prev pointer in case the request is not going through + * esas2r_start_request(). + */ + + INIT_LIST_HEAD(&rq->req_list); +} diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c new file mode 100644 index 000000000..97816a0e6 --- /dev/null +++ b/drivers/scsi/esp_scsi.c @@ -0,0 +1,2909 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* esp_scsi.c: ESP SCSI driver. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "esp_scsi.h" + +#define DRV_MODULE_NAME "esp" +#define PFX DRV_MODULE_NAME ": " +#define DRV_VERSION "2.000" +#define DRV_MODULE_RELDATE "April 19, 2007" + +/* SCSI bus reset settle time in seconds. */ +static int esp_bus_reset_settle = 3; + +static u32 esp_debug; +#define ESP_DEBUG_INTR 0x00000001 +#define ESP_DEBUG_SCSICMD 0x00000002 +#define ESP_DEBUG_RESET 0x00000004 +#define ESP_DEBUG_MSGIN 0x00000008 +#define ESP_DEBUG_MSGOUT 0x00000010 +#define ESP_DEBUG_CMDDONE 0x00000020 +#define ESP_DEBUG_DISCONNECT 0x00000040 +#define ESP_DEBUG_DATASTART 0x00000080 +#define ESP_DEBUG_DATADONE 0x00000100 +#define ESP_DEBUG_RECONNECT 0x00000200 +#define ESP_DEBUG_AUTOSENSE 0x00000400 +#define ESP_DEBUG_EVENT 0x00000800 +#define ESP_DEBUG_COMMAND 0x00001000 + +#define esp_log_intr(f, a...) \ +do { if (esp_debug & ESP_DEBUG_INTR) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_reset(f, a...) \ +do { if (esp_debug & ESP_DEBUG_RESET) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_msgin(f, a...) \ +do { if (esp_debug & ESP_DEBUG_MSGIN) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_msgout(f, a...) \ +do { if (esp_debug & ESP_DEBUG_MSGOUT) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_cmddone(f, a...) \ +do { if (esp_debug & ESP_DEBUG_CMDDONE) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_disconnect(f, a...) \ +do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_datastart(f, a...) \ +do { if (esp_debug & ESP_DEBUG_DATASTART) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_datadone(f, a...) \ +do { if (esp_debug & ESP_DEBUG_DATADONE) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_reconnect(f, a...) \ +do { if (esp_debug & ESP_DEBUG_RECONNECT) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_autosense(f, a...) \ +do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_event(f, a...) \ +do { if (esp_debug & ESP_DEBUG_EVENT) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_command(f, a...) \ +do { if (esp_debug & ESP_DEBUG_COMMAND) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_read8(REG) esp->ops->esp_read8(esp, REG) +#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) + +static void esp_log_fill_regs(struct esp *esp, + struct esp_event_ent *p) +{ + p->sreg = esp->sreg; + p->seqreg = esp->seqreg; + p->sreg2 = esp->sreg2; + p->ireg = esp->ireg; + p->select_state = esp->select_state; + p->event = esp->event; +} + +void scsi_esp_cmd(struct esp *esp, u8 val) +{ + struct esp_event_ent *p; + int idx = esp->esp_event_cur; + + p = &esp->esp_event_log[idx]; + p->type = ESP_EVENT_TYPE_CMD; + p->val = val; + esp_log_fill_regs(esp, p); + + esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); + + esp_log_command("cmd[%02x]\n", val); + esp_write8(val, ESP_CMD); +} +EXPORT_SYMBOL(scsi_esp_cmd); + +static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd) +{ + if (esp->flags & ESP_FLAG_USE_FIFO) { + int i; + + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + for (i = 0; i < len; i++) + esp_write8(esp->command_block[i], ESP_FDATA); + scsi_esp_cmd(esp, cmd); + } else { + if (esp->rev == FASHME) + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + cmd |= ESP_CMD_DMA; + esp->ops->send_dma_cmd(esp, esp->command_block_dma, + len, max_len, 0, cmd); + } +} + +static void esp_event(struct esp *esp, u8 val) +{ + struct esp_event_ent *p; + int idx = esp->esp_event_cur; + + p = &esp->esp_event_log[idx]; + p->type = ESP_EVENT_TYPE_EVENT; + p->val = val; + esp_log_fill_regs(esp, p); + + esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); + + esp->event = val; +} + +static void esp_dump_cmd_log(struct esp *esp) +{ + int idx = esp->esp_event_cur; + int stop = idx; + + shost_printk(KERN_INFO, esp->host, "Dumping command log\n"); + do { + struct esp_event_ent *p = &esp->esp_event_log[idx]; + + shost_printk(KERN_INFO, esp->host, + "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] " + "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", + idx, + p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT", + p->val, p->sreg, p->seqreg, + p->sreg2, p->ireg, p->select_state, p->event); + + idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); + } while (idx != stop); +} + +static void esp_flush_fifo(struct esp *esp) +{ + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + if (esp->rev == ESP236) { + int lim = 1000; + + while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { + if (--lim == 0) { + shost_printk(KERN_ALERT, esp->host, + "ESP_FF_BYTES will not clear!\n"); + break; + } + udelay(1); + } + } +} + +static void hme_read_fifo(struct esp *esp) +{ + int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; + int idx = 0; + + while (fcnt--) { + esp->fifo[idx++] = esp_read8(ESP_FDATA); + esp->fifo[idx++] = esp_read8(ESP_FDATA); + } + if (esp->sreg2 & ESP_STAT2_F1BYTE) { + esp_write8(0, ESP_FDATA); + esp->fifo[idx++] = esp_read8(ESP_FDATA); + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + } + esp->fifo_cnt = idx; +} + +static void esp_set_all_config3(struct esp *esp, u8 val) +{ + int i; + + for (i = 0; i < ESP_MAX_TARGET; i++) + esp->target[i].esp_config3 = val; +} + +/* Reset the ESP chip, _not_ the SCSI bus. */ +static void esp_reset_esp(struct esp *esp) +{ + /* Now reset the ESP chip */ + scsi_esp_cmd(esp, ESP_CMD_RC); + scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); + if (esp->rev == FAST) + esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2); + scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); + + /* This is the only point at which it is reliable to read + * the ID-code for a fast ESP chip variants. + */ + esp->max_period = ((35 * esp->ccycle) / 1000); + if (esp->rev == FAST) { + u8 family_code = ESP_FAMILY(esp_read8(ESP_UID)); + + if (family_code == ESP_UID_F236) { + esp->rev = FAS236; + } else if (family_code == ESP_UID_HME) { + esp->rev = FASHME; /* Version is usually '5'. */ + } else if (family_code == ESP_UID_FSC) { + esp->rev = FSC; + /* Enable Active Negation */ + esp_write8(ESP_CONFIG4_RADE, ESP_CFG4); + } else { + esp->rev = FAS100A; + } + esp->min_period = ((4 * esp->ccycle) / 1000); + } else { + esp->min_period = ((5 * esp->ccycle) / 1000); + } + if (esp->rev == FAS236) { + /* + * The AM53c974 chip returns the same ID as FAS236; + * try to configure glitch eater. + */ + u8 config4 = ESP_CONFIG4_GE1; + esp_write8(config4, ESP_CFG4); + config4 = esp_read8(ESP_CFG4); + if (config4 & ESP_CONFIG4_GE1) { + esp->rev = PCSCSI; + esp_write8(esp->config4, ESP_CFG4); + } + } + esp->max_period = (esp->max_period + 3)>>2; + esp->min_period = (esp->min_period + 3)>>2; + + esp_write8(esp->config1, ESP_CFG1); + switch (esp->rev) { + case ESP100: + /* nothing to do */ + break; + + case ESP100A: + esp_write8(esp->config2, ESP_CFG2); + break; + + case ESP236: + /* Slow 236 */ + esp_write8(esp->config2, ESP_CFG2); + esp->prev_cfg3 = esp->target[0].esp_config3; + esp_write8(esp->prev_cfg3, ESP_CFG3); + break; + + case FASHME: + esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); + fallthrough; + + case FAS236: + case PCSCSI: + case FSC: + esp_write8(esp->config2, ESP_CFG2); + if (esp->rev == FASHME) { + u8 cfg3 = esp->target[0].esp_config3; + + cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; + if (esp->scsi_id >= 8) + cfg3 |= ESP_CONFIG3_IDBIT3; + esp_set_all_config3(esp, cfg3); + } else { + u32 cfg3 = esp->target[0].esp_config3; + + cfg3 |= ESP_CONFIG3_FCLK; + esp_set_all_config3(esp, cfg3); + } + esp->prev_cfg3 = esp->target[0].esp_config3; + esp_write8(esp->prev_cfg3, ESP_CFG3); + if (esp->rev == FASHME) { + esp->radelay = 80; + } else { + if (esp->flags & ESP_FLAG_DIFFERENTIAL) + esp->radelay = 0; + else + esp->radelay = 96; + } + break; + + case FAS100A: + /* Fast 100a */ + esp_write8(esp->config2, ESP_CFG2); + esp_set_all_config3(esp, + (esp->target[0].esp_config3 | + ESP_CONFIG3_FCLOCK)); + esp->prev_cfg3 = esp->target[0].esp_config3; + esp_write8(esp->prev_cfg3, ESP_CFG3); + esp->radelay = 32; + break; + + default: + break; + } + + /* Reload the configuration registers */ + esp_write8(esp->cfact, ESP_CFACT); + + esp->prev_stp = 0; + esp_write8(esp->prev_stp, ESP_STP); + + esp->prev_soff = 0; + esp_write8(esp->prev_soff, ESP_SOFF); + + esp_write8(esp->neg_defp, ESP_TIMEO); + + /* Eat any bitrot in the chip */ + esp_read8(ESP_INTRPT); + udelay(100); +} + +static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) +{ + struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); + struct scatterlist *sg = scsi_sglist(cmd); + int total = 0, i; + struct scatterlist *s; + + if (cmd->sc_data_direction == DMA_NONE) + return; + + if (esp->flags & ESP_FLAG_NO_DMA_MAP) { + /* + * For pseudo DMA and PIO we need the virtual address instead of + * a dma address, so perform an identity mapping. + */ + spriv->num_sg = scsi_sg_count(cmd); + + scsi_for_each_sg(cmd, s, spriv->num_sg, i) { + s->dma_address = (uintptr_t)sg_virt(s); + total += sg_dma_len(s); + } + } else { + spriv->num_sg = scsi_dma_map(cmd); + scsi_for_each_sg(cmd, s, spriv->num_sg, i) + total += sg_dma_len(s); + } + spriv->cur_residue = sg_dma_len(sg); + spriv->prv_sg = NULL; + spriv->cur_sg = sg; + spriv->tot_residue = total; +} + +static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, + struct scsi_cmnd *cmd) +{ + struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + return ent->sense_dma + + (ent->sense_ptr - cmd->sense_buffer); + } + + return sg_dma_address(p->cur_sg) + + (sg_dma_len(p->cur_sg) - + p->cur_residue); +} + +static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, + struct scsi_cmnd *cmd) +{ + struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + return SCSI_SENSE_BUFFERSIZE - + (ent->sense_ptr - cmd->sense_buffer); + } + return p->cur_residue; +} + +static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, + struct scsi_cmnd *cmd, unsigned int len) +{ + struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + ent->sense_ptr += len; + return; + } + + p->cur_residue -= len; + p->tot_residue -= len; + if (p->cur_residue < 0 || p->tot_residue < 0) { + shost_printk(KERN_ERR, esp->host, + "Data transfer overflow.\n"); + shost_printk(KERN_ERR, esp->host, + "cur_residue[%d] tot_residue[%d] len[%u]\n", + p->cur_residue, p->tot_residue, len); + p->cur_residue = 0; + p->tot_residue = 0; + } + if (!p->cur_residue && p->tot_residue) { + p->prv_sg = p->cur_sg; + p->cur_sg = sg_next(p->cur_sg); + p->cur_residue = sg_dma_len(p->cur_sg); + } +} + +static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) +{ + if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) + scsi_dma_unmap(cmd); +} + +static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) +{ + struct scsi_cmnd *cmd = ent->cmd; + struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + ent->saved_sense_ptr = ent->sense_ptr; + return; + } + ent->saved_cur_residue = spriv->cur_residue; + ent->saved_prv_sg = spriv->prv_sg; + ent->saved_cur_sg = spriv->cur_sg; + ent->saved_tot_residue = spriv->tot_residue; +} + +static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) +{ + struct scsi_cmnd *cmd = ent->cmd; + struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + ent->sense_ptr = ent->saved_sense_ptr; + return; + } + spriv->cur_residue = ent->saved_cur_residue; + spriv->prv_sg = ent->saved_prv_sg; + spriv->cur_sg = ent->saved_cur_sg; + spriv->tot_residue = ent->saved_tot_residue; +} + +static void esp_write_tgt_config3(struct esp *esp, int tgt) +{ + if (esp->rev > ESP100A) { + u8 val = esp->target[tgt].esp_config3; + + if (val != esp->prev_cfg3) { + esp->prev_cfg3 = val; + esp_write8(val, ESP_CFG3); + } + } +} + +static void esp_write_tgt_sync(struct esp *esp, int tgt) +{ + u8 off = esp->target[tgt].esp_offset; + u8 per = esp->target[tgt].esp_period; + + if (off != esp->prev_soff) { + esp->prev_soff = off; + esp_write8(off, ESP_SOFF); + } + if (per != esp->prev_stp) { + esp->prev_stp = per; + esp_write8(per, ESP_STP); + } +} + +static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) +{ + if (esp->rev == FASHME) { + /* Arbitrary segment boundaries, 24-bit counts. */ + if (dma_len > (1U << 24)) + dma_len = (1U << 24); + } else { + u32 base, end; + + /* ESP chip limits other variants by 16-bits of transfer + * count. Actually on FAS100A and FAS236 we could get + * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB + * in the ESP_CFG2 register but that causes other unwanted + * changes so we don't use it currently. + */ + if (dma_len > (1U << 16)) + dma_len = (1U << 16); + + /* All of the DMA variants hooked up to these chips + * cannot handle crossing a 24-bit address boundary. + */ + base = dma_addr & ((1U << 24) - 1U); + end = base + dma_len; + if (end > (1U << 24)) + end = (1U <<24); + dma_len = end - base; + } + return dma_len; +} + +static int esp_need_to_nego_wide(struct esp_target_data *tp) +{ + struct scsi_target *target = tp->starget; + + return spi_width(target) != tp->nego_goal_width; +} + +static int esp_need_to_nego_sync(struct esp_target_data *tp) +{ + struct scsi_target *target = tp->starget; + + /* When offset is zero, period is "don't care". */ + if (!spi_offset(target) && !tp->nego_goal_offset) + return 0; + + if (spi_offset(target) == tp->nego_goal_offset && + spi_period(target) == tp->nego_goal_period) + return 0; + + return 1; +} + +static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, + struct esp_lun_data *lp) +{ + if (!ent->orig_tag[0]) { + /* Non-tagged, slot already taken? */ + if (lp->non_tagged_cmd) + return -EBUSY; + + if (lp->hold) { + /* We are being held by active tagged + * commands. + */ + if (lp->num_tagged) + return -EBUSY; + + /* Tagged commands completed, we can unplug + * the queue and run this untagged command. + */ + lp->hold = 0; + } else if (lp->num_tagged) { + /* Plug the queue until num_tagged decreases + * to zero in esp_free_lun_tag. + */ + lp->hold = 1; + return -EBUSY; + } + + lp->non_tagged_cmd = ent; + return 0; + } + + /* Tagged command. Check that it isn't blocked by a non-tagged one. */ + if (lp->non_tagged_cmd || lp->hold) + return -EBUSY; + + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); + + lp->tagged_cmds[ent->orig_tag[1]] = ent; + lp->num_tagged++; + + return 0; +} + +static void esp_free_lun_tag(struct esp_cmd_entry *ent, + struct esp_lun_data *lp) +{ + if (ent->orig_tag[0]) { + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); + lp->tagged_cmds[ent->orig_tag[1]] = NULL; + lp->num_tagged--; + } else { + BUG_ON(lp->non_tagged_cmd != ent); + lp->non_tagged_cmd = NULL; + } +} + +static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent) +{ + ent->sense_ptr = ent->cmd->sense_buffer; + if (esp->flags & ESP_FLAG_NO_DMA_MAP) { + ent->sense_dma = (uintptr_t)ent->sense_ptr; + return; + } + + ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); +} + +static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent) +{ + if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) + dma_unmap_single(esp->dev, ent->sense_dma, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + ent->sense_ptr = NULL; +} + +/* When a contingent allegiance condition is created, we force feed a + * REQUEST_SENSE command to the device to fetch the sense data. I + * tried many other schemes, relying on the scsi error handling layer + * to send out the REQUEST_SENSE automatically, but this was difficult + * to get right especially in the presence of applications like smartd + * which use SG_IO to send out their own REQUEST_SENSE commands. + */ +static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) +{ + struct scsi_cmnd *cmd = ent->cmd; + struct scsi_device *dev = cmd->device; + int tgt, lun; + u8 *p, val; + + tgt = dev->id; + lun = dev->lun; + + + if (!ent->sense_ptr) { + esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", + tgt, lun); + esp_map_sense(esp, ent); + } + ent->saved_sense_ptr = ent->sense_ptr; + + esp->active_cmd = ent; + + p = esp->command_block; + esp->msg_out_len = 0; + + *p++ = IDENTIFY(0, lun); + *p++ = REQUEST_SENSE; + *p++ = ((dev->scsi_level <= SCSI_2) ? + (lun << 5) : 0); + *p++ = 0; + *p++ = 0; + *p++ = SCSI_SENSE_BUFFERSIZE; + *p++ = 0; + + esp->select_state = ESP_SELECT_BASIC; + + val = tgt; + if (esp->rev == FASHME) + val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; + esp_write8(val, ESP_BUSID); + + esp_write_tgt_sync(esp, tgt); + esp_write_tgt_config3(esp, tgt); + + val = (p - esp->command_block); + + esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA); +} + +static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) +{ + struct esp_cmd_entry *ent; + + list_for_each_entry(ent, &esp->queued_cmds, list) { + struct scsi_cmnd *cmd = ent->cmd; + struct scsi_device *dev = cmd->device; + struct esp_lun_data *lp = dev->hostdata; + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + ent->tag[0] = 0; + ent->tag[1] = 0; + return ent; + } + + if (!spi_populate_tag_msg(&ent->tag[0], cmd)) { + ent->tag[0] = 0; + ent->tag[1] = 0; + } + ent->orig_tag[0] = ent->tag[0]; + ent->orig_tag[1] = ent->tag[1]; + + if (esp_alloc_lun_tag(ent, lp) < 0) + continue; + + return ent; + } + + return NULL; +} + +static void esp_maybe_execute_command(struct esp *esp) +{ + struct esp_target_data *tp; + struct scsi_device *dev; + struct scsi_cmnd *cmd; + struct esp_cmd_entry *ent; + bool select_and_stop = false; + int tgt, lun, i; + u32 val, start_cmd; + u8 *p; + + if (esp->active_cmd || + (esp->flags & ESP_FLAG_RESETTING)) + return; + + ent = find_and_prep_issuable_command(esp); + if (!ent) + return; + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + esp_autosense(esp, ent); + return; + } + + cmd = ent->cmd; + dev = cmd->device; + tgt = dev->id; + lun = dev->lun; + tp = &esp->target[tgt]; + + list_move(&ent->list, &esp->active_cmds); + + esp->active_cmd = ent; + + esp_map_dma(esp, cmd); + esp_save_pointers(esp, ent); + + if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12)) + select_and_stop = true; + + p = esp->command_block; + + esp->msg_out_len = 0; + if (tp->flags & ESP_TGT_CHECK_NEGO) { + /* Need to negotiate. If the target is broken + * go for synchronous transfers and non-wide. + */ + if (tp->flags & ESP_TGT_BROKEN) { + tp->flags &= ~ESP_TGT_DISCONNECT; + tp->nego_goal_period = 0; + tp->nego_goal_offset = 0; + tp->nego_goal_width = 0; + tp->nego_goal_tags = 0; + } + + /* If the settings are not changing, skip this. */ + if (spi_width(tp->starget) == tp->nego_goal_width && + spi_period(tp->starget) == tp->nego_goal_period && + spi_offset(tp->starget) == tp->nego_goal_offset) { + tp->flags &= ~ESP_TGT_CHECK_NEGO; + goto build_identify; + } + + if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { + esp->msg_out_len = + spi_populate_width_msg(&esp->msg_out[0], + (tp->nego_goal_width ? + 1 : 0)); + tp->flags |= ESP_TGT_NEGO_WIDE; + } else if (esp_need_to_nego_sync(tp)) { + esp->msg_out_len = + spi_populate_sync_msg(&esp->msg_out[0], + tp->nego_goal_period, + tp->nego_goal_offset); + tp->flags |= ESP_TGT_NEGO_SYNC; + } else { + tp->flags &= ~ESP_TGT_CHECK_NEGO; + } + + /* If there are multiple message bytes, use Select and Stop */ + if (esp->msg_out_len) + select_and_stop = true; + } + +build_identify: + *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun); + + if (ent->tag[0] && esp->rev == ESP100) { + /* ESP100 lacks select w/atn3 command, use select + * and stop instead. + */ + select_and_stop = true; + } + + if (select_and_stop) { + esp->cmd_bytes_left = cmd->cmd_len; + esp->cmd_bytes_ptr = &cmd->cmnd[0]; + + if (ent->tag[0]) { + for (i = esp->msg_out_len - 1; + i >= 0; i--) + esp->msg_out[i + 2] = esp->msg_out[i]; + esp->msg_out[0] = ent->tag[0]; + esp->msg_out[1] = ent->tag[1]; + esp->msg_out_len += 2; + } + + start_cmd = ESP_CMD_SELAS; + esp->select_state = ESP_SELECT_MSGOUT; + } else { + start_cmd = ESP_CMD_SELA; + if (ent->tag[0]) { + *p++ = ent->tag[0]; + *p++ = ent->tag[1]; + + start_cmd = ESP_CMD_SA3; + } + + for (i = 0; i < cmd->cmd_len; i++) + *p++ = cmd->cmnd[i]; + + esp->select_state = ESP_SELECT_BASIC; + } + val = tgt; + if (esp->rev == FASHME) + val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; + esp_write8(val, ESP_BUSID); + + esp_write_tgt_sync(esp, tgt); + esp_write_tgt_config3(esp, tgt); + + val = (p - esp->command_block); + + if (esp_debug & ESP_DEBUG_SCSICMD) { + printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); + for (i = 0; i < cmd->cmd_len; i++) + printk("%02x ", cmd->cmnd[i]); + printk("]\n"); + } + + esp_send_dma_cmd(esp, val, 16, start_cmd); +} + +static struct esp_cmd_entry *esp_get_ent(struct esp *esp) +{ + struct list_head *head = &esp->esp_cmd_pool; + struct esp_cmd_entry *ret; + + if (list_empty(head)) { + ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); + } else { + ret = list_entry(head->next, struct esp_cmd_entry, list); + list_del(&ret->list); + memset(ret, 0, sizeof(*ret)); + } + return ret; +} + +static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) +{ + list_add(&ent->list, &esp->esp_cmd_pool); +} + +static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, + struct scsi_cmnd *cmd, unsigned char host_byte) +{ + struct scsi_device *dev = cmd->device; + int tgt = dev->id; + int lun = dev->lun; + + esp->active_cmd = NULL; + esp_unmap_dma(esp, cmd); + esp_free_lun_tag(ent, dev->hostdata); + cmd->result = 0; + set_host_byte(cmd, host_byte); + if (host_byte == DID_OK) + set_status_byte(cmd, ent->status); + + if (ent->eh_done) { + complete(ent->eh_done); + ent->eh_done = NULL; + } + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { + esp_unmap_sense(esp, ent); + + /* Restore the message/status bytes to what we actually + * saw originally. Also, report that we are providing + * the sense data. + */ + cmd->result = SAM_STAT_CHECK_CONDITION; + + ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; + if (esp_debug & ESP_DEBUG_AUTOSENSE) { + int i; + + printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", + esp->host->unique_id, tgt, lun); + for (i = 0; i < 18; i++) + printk("%02x ", cmd->sense_buffer[i]); + printk("]\n"); + } + } + + scsi_done(cmd); + + list_del(&ent->list); + esp_put_ent(esp, ent); + + esp_maybe_execute_command(esp); +} + +static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) +{ + struct scsi_device *dev = ent->cmd->device; + struct esp_lun_data *lp = dev->hostdata; + + scsi_track_queue_full(dev, lp->num_tagged - 1); +} + +static int esp_queuecommand_lck(struct scsi_cmnd *cmd) +{ + struct scsi_device *dev = cmd->device; + struct esp *esp = shost_priv(dev->host); + struct esp_cmd_priv *spriv; + struct esp_cmd_entry *ent; + + ent = esp_get_ent(esp); + if (!ent) + return SCSI_MLQUEUE_HOST_BUSY; + + ent->cmd = cmd; + + spriv = ESP_CMD_PRIV(cmd); + spriv->num_sg = 0; + + list_add_tail(&ent->list, &esp->queued_cmds); + + esp_maybe_execute_command(esp); + + return 0; +} + +static DEF_SCSI_QCMD(esp_queuecommand) + +static int esp_check_gross_error(struct esp *esp) +{ + if (esp->sreg & ESP_STAT_SPAM) { + /* Gross Error, could be one of: + * - top of fifo overwritten + * - top of command register overwritten + * - DMA programmed with wrong direction + * - improper phase change + */ + shost_printk(KERN_ERR, esp->host, + "Gross error sreg[%02x]\n", esp->sreg); + /* XXX Reset the chip. XXX */ + return 1; + } + return 0; +} + +static int esp_check_spur_intr(struct esp *esp) +{ + switch (esp->rev) { + case ESP100: + case ESP100A: + /* The interrupt pending bit of the status register cannot + * be trusted on these revisions. + */ + esp->sreg &= ~ESP_STAT_INTR; + break; + + default: + if (!(esp->sreg & ESP_STAT_INTR)) { + if (esp->ireg & ESP_INTR_SR) + return 1; + + /* If the DMA is indicating interrupt pending and the + * ESP is not, the only possibility is a DMA error. + */ + if (!esp->ops->dma_error(esp)) { + shost_printk(KERN_ERR, esp->host, + "Spurious irq, sreg=%02x.\n", + esp->sreg); + return -1; + } + + shost_printk(KERN_ERR, esp->host, "DMA error\n"); + + /* XXX Reset the chip. XXX */ + return -1; + } + break; + } + + return 0; +} + +static void esp_schedule_reset(struct esp *esp) +{ + esp_log_reset("esp_schedule_reset() from %ps\n", + __builtin_return_address(0)); + esp->flags |= ESP_FLAG_RESETTING; + esp_event(esp, ESP_EVENT_RESET); +} + +/* In order to avoid having to add a special half-reconnected state + * into the driver we just sit here and poll through the rest of + * the reselection process to get the tag message bytes. + */ +static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, + struct esp_lun_data *lp) +{ + struct esp_cmd_entry *ent; + int i; + + if (!lp->num_tagged) { + shost_printk(KERN_ERR, esp->host, + "Reconnect w/num_tagged==0\n"); + return NULL; + } + + esp_log_reconnect("reconnect tag, "); + + for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { + if (esp->ops->irq_pending(esp)) + break; + } + if (i == ESP_QUICKIRQ_LIMIT) { + shost_printk(KERN_ERR, esp->host, + "Reconnect IRQ1 timeout\n"); + return NULL; + } + + esp->sreg = esp_read8(ESP_STATUS); + esp->ireg = esp_read8(ESP_INTRPT); + + esp_log_reconnect("IRQ(%d:%x:%x), ", + i, esp->ireg, esp->sreg); + + if (esp->ireg & ESP_INTR_DC) { + shost_printk(KERN_ERR, esp->host, + "Reconnect, got disconnect.\n"); + return NULL; + } + + if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { + shost_printk(KERN_ERR, esp->host, + "Reconnect, not MIP sreg[%02x].\n", esp->sreg); + return NULL; + } + + /* DMA in the tag bytes... */ + esp->command_block[0] = 0xff; + esp->command_block[1] = 0xff; + esp->ops->send_dma_cmd(esp, esp->command_block_dma, + 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); + + /* ACK the message. */ + scsi_esp_cmd(esp, ESP_CMD_MOK); + + for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { + if (esp->ops->irq_pending(esp)) { + esp->sreg = esp_read8(ESP_STATUS); + esp->ireg = esp_read8(ESP_INTRPT); + if (esp->ireg & ESP_INTR_FDONE) + break; + } + udelay(1); + } + if (i == ESP_RESELECT_TAG_LIMIT) { + shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n"); + return NULL; + } + esp->ops->dma_drain(esp); + esp->ops->dma_invalidate(esp); + + esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", + i, esp->ireg, esp->sreg, + esp->command_block[0], + esp->command_block[1]); + + if (esp->command_block[0] < SIMPLE_QUEUE_TAG || + esp->command_block[0] > ORDERED_QUEUE_TAG) { + shost_printk(KERN_ERR, esp->host, + "Reconnect, bad tag type %02x.\n", + esp->command_block[0]); + return NULL; + } + + ent = lp->tagged_cmds[esp->command_block[1]]; + if (!ent) { + shost_printk(KERN_ERR, esp->host, + "Reconnect, no entry for tag %02x.\n", + esp->command_block[1]); + return NULL; + } + + return ent; +} + +static int esp_reconnect(struct esp *esp) +{ + struct esp_cmd_entry *ent; + struct esp_target_data *tp; + struct esp_lun_data *lp; + struct scsi_device *dev; + int target, lun; + + BUG_ON(esp->active_cmd); + if (esp->rev == FASHME) { + /* FASHME puts the target and lun numbers directly + * into the fifo. + */ + target = esp->fifo[0]; + lun = esp->fifo[1] & 0x7; + } else { + u8 bits = esp_read8(ESP_FDATA); + + /* Older chips put the lun directly into the fifo, but + * the target is given as a sample of the arbitration + * lines on the bus at reselection time. So we should + * see the ID of the ESP and the one reconnecting target + * set in the bitmap. + */ + if (!(bits & esp->scsi_id_mask)) + goto do_reset; + bits &= ~esp->scsi_id_mask; + if (!bits || (bits & (bits - 1))) + goto do_reset; + + target = ffs(bits) - 1; + lun = (esp_read8(ESP_FDATA) & 0x7); + + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + if (esp->rev == ESP100) { + u8 ireg = esp_read8(ESP_INTRPT); + /* This chip has a bug during reselection that can + * cause a spurious illegal-command interrupt, which + * we simply ACK here. Another possibility is a bus + * reset so we must check for that. + */ + if (ireg & ESP_INTR_SR) + goto do_reset; + } + scsi_esp_cmd(esp, ESP_CMD_NULL); + } + + esp_write_tgt_sync(esp, target); + esp_write_tgt_config3(esp, target); + + scsi_esp_cmd(esp, ESP_CMD_MOK); + + if (esp->rev == FASHME) + esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, + ESP_BUSID); + + tp = &esp->target[target]; + dev = __scsi_device_lookup_by_target(tp->starget, lun); + if (!dev) { + shost_printk(KERN_ERR, esp->host, + "Reconnect, no lp tgt[%u] lun[%u]\n", + target, lun); + goto do_reset; + } + lp = dev->hostdata; + + ent = lp->non_tagged_cmd; + if (!ent) { + ent = esp_reconnect_with_tag(esp, lp); + if (!ent) + goto do_reset; + } + + esp->active_cmd = ent; + + esp_event(esp, ESP_EVENT_CHECK_PHASE); + esp_restore_pointers(esp, ent); + esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; + return 1; + +do_reset: + esp_schedule_reset(esp); + return 0; +} + +static int esp_finish_select(struct esp *esp) +{ + struct esp_cmd_entry *ent; + struct scsi_cmnd *cmd; + + /* No longer selecting. */ + esp->select_state = ESP_SELECT_NONE; + + esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; + ent = esp->active_cmd; + cmd = ent->cmd; + + if (esp->ops->dma_error(esp)) { + /* If we see a DMA error during or as a result of selection, + * all bets are off. + */ + esp_schedule_reset(esp); + esp_cmd_is_done(esp, ent, cmd, DID_ERROR); + return 0; + } + + esp->ops->dma_invalidate(esp); + + if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { + struct esp_target_data *tp = &esp->target[cmd->device->id]; + + /* Carefully back out of the selection attempt. Release + * resources (such as DMA mapping & TAG) and reset state (such + * as message out and command delivery variables). + */ + if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { + esp_unmap_dma(esp, cmd); + esp_free_lun_tag(ent, cmd->device->hostdata); + tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); + esp->cmd_bytes_ptr = NULL; + esp->cmd_bytes_left = 0; + } else { + esp_unmap_sense(esp, ent); + } + + /* Now that the state is unwound properly, put back onto + * the issue queue. This command is no longer active. + */ + list_move(&ent->list, &esp->queued_cmds); + esp->active_cmd = NULL; + + /* Return value ignored by caller, it directly invokes + * esp_reconnect(). + */ + return 0; + } + + if (esp->ireg == ESP_INTR_DC) { + struct scsi_device *dev = cmd->device; + + /* Disconnect. Make sure we re-negotiate sync and + * wide parameters if this target starts responding + * again in the future. + */ + esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; + + scsi_esp_cmd(esp, ESP_CMD_ESEL); + esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET); + return 1; + } + + if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { + /* Selection successful. On pre-FAST chips we have + * to do a NOP and possibly clean out the FIFO. + */ + if (esp->rev <= ESP236) { + int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; + + scsi_esp_cmd(esp, ESP_CMD_NULL); + + if (!fcnt && + (!esp->prev_soff || + ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) + esp_flush_fifo(esp); + } + + /* If we are doing a Select And Stop command, negotiation, etc. + * we'll do the right thing as we transition to the next phase. + */ + esp_event(esp, ESP_EVENT_CHECK_PHASE); + return 0; + } + + shost_printk(KERN_INFO, esp->host, + "Unexpected selection completion ireg[%x]\n", esp->ireg); + esp_schedule_reset(esp); + return 0; +} + +static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, + struct scsi_cmnd *cmd) +{ + int fifo_cnt, ecount, bytes_sent, flush_fifo; + + fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; + if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) + fifo_cnt <<= 1; + + ecount = 0; + if (!(esp->sreg & ESP_STAT_TCNT)) { + ecount = ((unsigned int)esp_read8(ESP_TCLOW) | + (((unsigned int)esp_read8(ESP_TCMED)) << 8)); + if (esp->rev == FASHME) + ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; + if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB)) + ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16; + } + + bytes_sent = esp->data_dma_len; + bytes_sent -= ecount; + bytes_sent -= esp->send_cmd_residual; + + /* + * The am53c974 has a DMA 'peculiarity'. The doc states: + * In some odd byte conditions, one residual byte will + * be left in the SCSI FIFO, and the FIFO Flags will + * never count to '0 '. When this happens, the residual + * byte should be retrieved via PIO following completion + * of the BLAST operation. + */ + if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) { + size_t count = 1; + size_t offset = bytes_sent; + u8 bval = esp_read8(ESP_FDATA); + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) + ent->sense_ptr[bytes_sent] = bval; + else { + struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); + u8 *ptr; + + ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg, + &offset, &count); + if (likely(ptr)) { + *(ptr + offset) = bval; + scsi_kunmap_atomic_sg(ptr); + } + } + bytes_sent += fifo_cnt; + ent->flags &= ~ESP_CMD_FLAG_RESIDUAL; + } + if (!(ent->flags & ESP_CMD_FLAG_WRITE)) + bytes_sent -= fifo_cnt; + + flush_fifo = 0; + if (!esp->prev_soff) { + /* Synchronous data transfer, always flush fifo. */ + flush_fifo = 1; + } else { + if (esp->rev == ESP100) { + u32 fflags, phase; + + /* ESP100 has a chip bug where in the synchronous data + * phase it can mistake a final long REQ pulse from the + * target as an extra data byte. Fun. + * + * To detect this case we resample the status register + * and fifo flags. If we're still in a data phase and + * we see spurious chunks in the fifo, we return error + * to the caller which should reset and set things up + * such that we only try future transfers to this + * target in synchronous mode. + */ + esp->sreg = esp_read8(ESP_STATUS); + phase = esp->sreg & ESP_STAT_PMASK; + fflags = esp_read8(ESP_FFLAGS); + + if ((phase == ESP_DOP && + (fflags & ESP_FF_ONOTZERO)) || + (phase == ESP_DIP && + (fflags & ESP_FF_FBYTES))) + return -1; + } + if (!(ent->flags & ESP_CMD_FLAG_WRITE)) + flush_fifo = 1; + } + + if (flush_fifo) + esp_flush_fifo(esp); + + return bytes_sent; +} + +static void esp_setsync(struct esp *esp, struct esp_target_data *tp, + u8 scsi_period, u8 scsi_offset, + u8 esp_stp, u8 esp_soff) +{ + spi_period(tp->starget) = scsi_period; + spi_offset(tp->starget) = scsi_offset; + spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; + + if (esp_soff) { + esp_stp &= 0x1f; + esp_soff |= esp->radelay; + if (esp->rev >= FAS236) { + u8 bit = ESP_CONFIG3_FSCSI; + if (esp->rev >= FAS100A) + bit = ESP_CONFIG3_FAST; + + if (scsi_period < 50) { + if (esp->rev == FASHME) + esp_soff &= ~esp->radelay; + tp->esp_config3 |= bit; + } else { + tp->esp_config3 &= ~bit; + } + esp->prev_cfg3 = tp->esp_config3; + esp_write8(esp->prev_cfg3, ESP_CFG3); + } + } + + tp->esp_period = esp->prev_stp = esp_stp; + tp->esp_offset = esp->prev_soff = esp_soff; + + esp_write8(esp_soff, ESP_SOFF); + esp_write8(esp_stp, ESP_STP); + + tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); + + spi_display_xfer_agreement(tp->starget); +} + +static void esp_msgin_reject(struct esp *esp) +{ + struct esp_cmd_entry *ent = esp->active_cmd; + struct scsi_cmnd *cmd = ent->cmd; + struct esp_target_data *tp; + int tgt; + + tgt = cmd->device->id; + tp = &esp->target[tgt]; + + if (tp->flags & ESP_TGT_NEGO_WIDE) { + tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); + + if (!esp_need_to_nego_sync(tp)) { + tp->flags &= ~ESP_TGT_CHECK_NEGO; + scsi_esp_cmd(esp, ESP_CMD_RATN); + } else { + esp->msg_out_len = + spi_populate_sync_msg(&esp->msg_out[0], + tp->nego_goal_period, + tp->nego_goal_offset); + tp->flags |= ESP_TGT_NEGO_SYNC; + scsi_esp_cmd(esp, ESP_CMD_SATN); + } + return; + } + + if (tp->flags & ESP_TGT_NEGO_SYNC) { + tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); + tp->esp_period = 0; + tp->esp_offset = 0; + esp_setsync(esp, tp, 0, 0, 0, 0); + scsi_esp_cmd(esp, ESP_CMD_RATN); + return; + } + + shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n"); + esp_schedule_reset(esp); +} + +static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) +{ + u8 period = esp->msg_in[3]; + u8 offset = esp->msg_in[4]; + u8 stp; + + if (!(tp->flags & ESP_TGT_NEGO_SYNC)) + goto do_reject; + + if (offset > 15) + goto do_reject; + + if (offset) { + int one_clock; + + if (period > esp->max_period) { + period = offset = 0; + goto do_sdtr; + } + if (period < esp->min_period) + goto do_reject; + + one_clock = esp->ccycle / 1000; + stp = DIV_ROUND_UP(period << 2, one_clock); + if (stp && esp->rev >= FAS236) { + if (stp >= 50) + stp--; + } + } else { + stp = 0; + } + + esp_setsync(esp, tp, period, offset, stp, offset); + return; + +do_reject: + esp->msg_out[0] = MESSAGE_REJECT; + esp->msg_out_len = 1; + scsi_esp_cmd(esp, ESP_CMD_SATN); + return; + +do_sdtr: + tp->nego_goal_period = period; + tp->nego_goal_offset = offset; + esp->msg_out_len = + spi_populate_sync_msg(&esp->msg_out[0], + tp->nego_goal_period, + tp->nego_goal_offset); + scsi_esp_cmd(esp, ESP_CMD_SATN); +} + +static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) +{ + int size = 8 << esp->msg_in[3]; + u8 cfg3; + + if (esp->rev != FASHME) + goto do_reject; + + if (size != 8 && size != 16) + goto do_reject; + + if (!(tp->flags & ESP_TGT_NEGO_WIDE)) + goto do_reject; + + cfg3 = tp->esp_config3; + if (size == 16) { + tp->flags |= ESP_TGT_WIDE; + cfg3 |= ESP_CONFIG3_EWIDE; + } else { + tp->flags &= ~ESP_TGT_WIDE; + cfg3 &= ~ESP_CONFIG3_EWIDE; + } + tp->esp_config3 = cfg3; + esp->prev_cfg3 = cfg3; + esp_write8(cfg3, ESP_CFG3); + + tp->flags &= ~ESP_TGT_NEGO_WIDE; + + spi_period(tp->starget) = 0; + spi_offset(tp->starget) = 0; + if (!esp_need_to_nego_sync(tp)) { + tp->flags &= ~ESP_TGT_CHECK_NEGO; + scsi_esp_cmd(esp, ESP_CMD_RATN); + } else { + esp->msg_out_len = + spi_populate_sync_msg(&esp->msg_out[0], + tp->nego_goal_period, + tp->nego_goal_offset); + tp->flags |= ESP_TGT_NEGO_SYNC; + scsi_esp_cmd(esp, ESP_CMD_SATN); + } + return; + +do_reject: + esp->msg_out[0] = MESSAGE_REJECT; + esp->msg_out_len = 1; + scsi_esp_cmd(esp, ESP_CMD_SATN); +} + +static void esp_msgin_extended(struct esp *esp) +{ + struct esp_cmd_entry *ent = esp->active_cmd; + struct scsi_cmnd *cmd = ent->cmd; + struct esp_target_data *tp; + int tgt = cmd->device->id; + + tp = &esp->target[tgt]; + if (esp->msg_in[2] == EXTENDED_SDTR) { + esp_msgin_sdtr(esp, tp); + return; + } + if (esp->msg_in[2] == EXTENDED_WDTR) { + esp_msgin_wdtr(esp, tp); + return; + } + + shost_printk(KERN_INFO, esp->host, + "Unexpected extended msg type %x\n", esp->msg_in[2]); + + esp->msg_out[0] = MESSAGE_REJECT; + esp->msg_out_len = 1; + scsi_esp_cmd(esp, ESP_CMD_SATN); +} + +/* Analyze msgin bytes received from target so far. Return non-zero + * if there are more bytes needed to complete the message. + */ +static int esp_msgin_process(struct esp *esp) +{ + u8 msg0 = esp->msg_in[0]; + int len = esp->msg_in_len; + + if (msg0 & 0x80) { + /* Identify */ + shost_printk(KERN_INFO, esp->host, + "Unexpected msgin identify\n"); + return 0; + } + + switch (msg0) { + case EXTENDED_MESSAGE: + if (len == 1) + return 1; + if (len < esp->msg_in[1] + 2) + return 1; + esp_msgin_extended(esp); + return 0; + + case IGNORE_WIDE_RESIDUE: { + struct esp_cmd_entry *ent; + struct esp_cmd_priv *spriv; + if (len == 1) + return 1; + + if (esp->msg_in[1] != 1) + goto do_reject; + + ent = esp->active_cmd; + spriv = ESP_CMD_PRIV(ent->cmd); + + if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { + spriv->cur_sg = spriv->prv_sg; + spriv->cur_residue = 1; + } else + spriv->cur_residue++; + spriv->tot_residue++; + return 0; + } + case NOP: + return 0; + case RESTORE_POINTERS: + esp_restore_pointers(esp, esp->active_cmd); + return 0; + case SAVE_POINTERS: + esp_save_pointers(esp, esp->active_cmd); + return 0; + + case COMMAND_COMPLETE: + case DISCONNECT: { + struct esp_cmd_entry *ent = esp->active_cmd; + + ent->message = msg0; + esp_event(esp, ESP_EVENT_FREE_BUS); + esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; + return 0; + } + case MESSAGE_REJECT: + esp_msgin_reject(esp); + return 0; + + default: + do_reject: + esp->msg_out[0] = MESSAGE_REJECT; + esp->msg_out_len = 1; + scsi_esp_cmd(esp, ESP_CMD_SATN); + return 0; + } +} + +static int esp_process_event(struct esp *esp) +{ + int write, i; + +again: + write = 0; + esp_log_event("process event %d phase %x\n", + esp->event, esp->sreg & ESP_STAT_PMASK); + switch (esp->event) { + case ESP_EVENT_CHECK_PHASE: + switch (esp->sreg & ESP_STAT_PMASK) { + case ESP_DOP: + esp_event(esp, ESP_EVENT_DATA_OUT); + break; + case ESP_DIP: + esp_event(esp, ESP_EVENT_DATA_IN); + break; + case ESP_STATP: + esp_flush_fifo(esp); + scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); + esp_event(esp, ESP_EVENT_STATUS); + esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; + return 1; + + case ESP_MOP: + esp_event(esp, ESP_EVENT_MSGOUT); + break; + + case ESP_MIP: + esp_event(esp, ESP_EVENT_MSGIN); + break; + + case ESP_CMDP: + esp_event(esp, ESP_EVENT_CMD_START); + break; + + default: + shost_printk(KERN_INFO, esp->host, + "Unexpected phase, sreg=%02x\n", + esp->sreg); + esp_schedule_reset(esp); + return 0; + } + goto again; + + case ESP_EVENT_DATA_IN: + write = 1; + fallthrough; + + case ESP_EVENT_DATA_OUT: { + struct esp_cmd_entry *ent = esp->active_cmd; + struct scsi_cmnd *cmd = ent->cmd; + dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); + unsigned int dma_len = esp_cur_dma_len(ent, cmd); + + if (esp->rev == ESP100) + scsi_esp_cmd(esp, ESP_CMD_NULL); + + if (write) + ent->flags |= ESP_CMD_FLAG_WRITE; + else + ent->flags &= ~ESP_CMD_FLAG_WRITE; + + if (esp->ops->dma_length_limit) + dma_len = esp->ops->dma_length_limit(esp, dma_addr, + dma_len); + else + dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); + + esp->data_dma_len = dma_len; + + if (!dma_len) { + shost_printk(KERN_ERR, esp->host, + "DMA length is zero!\n"); + shost_printk(KERN_ERR, esp->host, + "cur adr[%08llx] len[%08x]\n", + (unsigned long long)esp_cur_dma_addr(ent, cmd), + esp_cur_dma_len(ent, cmd)); + esp_schedule_reset(esp); + return 0; + } + + esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n", + (unsigned long long)dma_addr, dma_len, write); + + esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, + write, ESP_CMD_DMA | ESP_CMD_TI); + esp_event(esp, ESP_EVENT_DATA_DONE); + break; + } + case ESP_EVENT_DATA_DONE: { + struct esp_cmd_entry *ent = esp->active_cmd; + struct scsi_cmnd *cmd = ent->cmd; + int bytes_sent; + + if (esp->ops->dma_error(esp)) { + shost_printk(KERN_INFO, esp->host, + "data done, DMA error, resetting\n"); + esp_schedule_reset(esp); + return 0; + } + + if (ent->flags & ESP_CMD_FLAG_WRITE) { + /* XXX parity errors, etc. XXX */ + + esp->ops->dma_drain(esp); + } + esp->ops->dma_invalidate(esp); + + if (esp->ireg != ESP_INTR_BSERV) { + /* We should always see exactly a bus-service + * interrupt at the end of a successful transfer. + */ + shost_printk(KERN_INFO, esp->host, + "data done, not BSERV, resetting\n"); + esp_schedule_reset(esp); + return 0; + } + + bytes_sent = esp_data_bytes_sent(esp, ent, cmd); + + esp_log_datadone("data done flgs[%x] sent[%d]\n", + ent->flags, bytes_sent); + + if (bytes_sent < 0) { + /* XXX force sync mode for this target XXX */ + esp_schedule_reset(esp); + return 0; + } + + esp_advance_dma(esp, ent, cmd, bytes_sent); + esp_event(esp, ESP_EVENT_CHECK_PHASE); + goto again; + } + + case ESP_EVENT_STATUS: { + struct esp_cmd_entry *ent = esp->active_cmd; + + if (esp->ireg & ESP_INTR_FDONE) { + ent->status = esp_read8(ESP_FDATA); + ent->message = esp_read8(ESP_FDATA); + scsi_esp_cmd(esp, ESP_CMD_MOK); + } else if (esp->ireg == ESP_INTR_BSERV) { + ent->status = esp_read8(ESP_FDATA); + ent->message = 0xff; + esp_event(esp, ESP_EVENT_MSGIN); + return 0; + } + + if (ent->message != COMMAND_COMPLETE) { + shost_printk(KERN_INFO, esp->host, + "Unexpected message %x in status\n", + ent->message); + esp_schedule_reset(esp); + return 0; + } + + esp_event(esp, ESP_EVENT_FREE_BUS); + esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; + break; + } + case ESP_EVENT_FREE_BUS: { + struct esp_cmd_entry *ent = esp->active_cmd; + struct scsi_cmnd *cmd = ent->cmd; + + if (ent->message == COMMAND_COMPLETE || + ent->message == DISCONNECT) + scsi_esp_cmd(esp, ESP_CMD_ESEL); + + if (ent->message == COMMAND_COMPLETE) { + esp_log_cmddone("Command done status[%x] message[%x]\n", + ent->status, ent->message); + if (ent->status == SAM_STAT_TASK_SET_FULL) + esp_event_queue_full(esp, ent); + + if (ent->status == SAM_STAT_CHECK_CONDITION && + !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { + ent->flags |= ESP_CMD_FLAG_AUTOSENSE; + esp_autosense(esp, ent); + } else { + esp_cmd_is_done(esp, ent, cmd, DID_OK); + } + } else if (ent->message == DISCONNECT) { + esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n", + cmd->device->id, + ent->tag[0], ent->tag[1]); + + esp->active_cmd = NULL; + esp_maybe_execute_command(esp); + } else { + shost_printk(KERN_INFO, esp->host, + "Unexpected message %x in freebus\n", + ent->message); + esp_schedule_reset(esp); + return 0; + } + if (esp->active_cmd) + esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; + break; + } + case ESP_EVENT_MSGOUT: { + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + + if (esp_debug & ESP_DEBUG_MSGOUT) { + int i; + printk("ESP: Sending message [ "); + for (i = 0; i < esp->msg_out_len; i++) + printk("%02x ", esp->msg_out[i]); + printk("]\n"); + } + + if (esp->rev == FASHME) { + int i; + + /* Always use the fifo. */ + for (i = 0; i < esp->msg_out_len; i++) { + esp_write8(esp->msg_out[i], ESP_FDATA); + esp_write8(0, ESP_FDATA); + } + scsi_esp_cmd(esp, ESP_CMD_TI); + } else { + if (esp->msg_out_len == 1) { + esp_write8(esp->msg_out[0], ESP_FDATA); + scsi_esp_cmd(esp, ESP_CMD_TI); + } else if (esp->flags & ESP_FLAG_USE_FIFO) { + for (i = 0; i < esp->msg_out_len; i++) + esp_write8(esp->msg_out[i], ESP_FDATA); + scsi_esp_cmd(esp, ESP_CMD_TI); + } else { + /* Use DMA. */ + memcpy(esp->command_block, + esp->msg_out, + esp->msg_out_len); + + esp->ops->send_dma_cmd(esp, + esp->command_block_dma, + esp->msg_out_len, + esp->msg_out_len, + 0, + ESP_CMD_DMA|ESP_CMD_TI); + } + } + esp_event(esp, ESP_EVENT_MSGOUT_DONE); + break; + } + case ESP_EVENT_MSGOUT_DONE: + if (esp->rev == FASHME) { + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + } else { + if (esp->msg_out_len > 1) + esp->ops->dma_invalidate(esp); + + /* XXX if the chip went into disconnected mode, + * we can't run the phase state machine anyway. + */ + if (!(esp->ireg & ESP_INTR_DC)) + scsi_esp_cmd(esp, ESP_CMD_NULL); + } + + esp->msg_out_len = 0; + + esp_event(esp, ESP_EVENT_CHECK_PHASE); + goto again; + case ESP_EVENT_MSGIN: + if (esp->ireg & ESP_INTR_BSERV) { + if (esp->rev == FASHME) { + if (!(esp_read8(ESP_STATUS2) & + ESP_STAT2_FEMPTY)) + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + } else { + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + if (esp->rev == ESP100) + scsi_esp_cmd(esp, ESP_CMD_NULL); + } + scsi_esp_cmd(esp, ESP_CMD_TI); + esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; + return 1; + } + if (esp->ireg & ESP_INTR_FDONE) { + u8 val; + + if (esp->rev == FASHME) + val = esp->fifo[0]; + else + val = esp_read8(ESP_FDATA); + esp->msg_in[esp->msg_in_len++] = val; + + esp_log_msgin("Got msgin byte %x\n", val); + + if (!esp_msgin_process(esp)) + esp->msg_in_len = 0; + + if (esp->rev == FASHME) + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + + scsi_esp_cmd(esp, ESP_CMD_MOK); + + /* Check whether a bus reset is to be done next */ + if (esp->event == ESP_EVENT_RESET) + return 0; + + if (esp->event != ESP_EVENT_FREE_BUS) + esp_event(esp, ESP_EVENT_CHECK_PHASE); + } else { + shost_printk(KERN_INFO, esp->host, + "MSGIN neither BSERV not FDON, resetting"); + esp_schedule_reset(esp); + return 0; + } + break; + case ESP_EVENT_CMD_START: + memcpy(esp->command_block, esp->cmd_bytes_ptr, + esp->cmd_bytes_left); + esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI); + esp_event(esp, ESP_EVENT_CMD_DONE); + esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; + break; + case ESP_EVENT_CMD_DONE: + esp->ops->dma_invalidate(esp); + if (esp->ireg & ESP_INTR_BSERV) { + esp_event(esp, ESP_EVENT_CHECK_PHASE); + goto again; + } + esp_schedule_reset(esp); + return 0; + + case ESP_EVENT_RESET: + scsi_esp_cmd(esp, ESP_CMD_RS); + break; + + default: + shost_printk(KERN_INFO, esp->host, + "Unexpected event %x, resetting\n", esp->event); + esp_schedule_reset(esp); + return 0; + } + return 1; +} + +static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) +{ + struct scsi_cmnd *cmd = ent->cmd; + + esp_unmap_dma(esp, cmd); + esp_free_lun_tag(ent, cmd->device->hostdata); + cmd->result = DID_RESET << 16; + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) + esp_unmap_sense(esp, ent); + + scsi_done(cmd); + list_del(&ent->list); + esp_put_ent(esp, ent); +} + +static void esp_clear_hold(struct scsi_device *dev, void *data) +{ + struct esp_lun_data *lp = dev->hostdata; + + BUG_ON(lp->num_tagged); + lp->hold = 0; +} + +static void esp_reset_cleanup(struct esp *esp) +{ + struct esp_cmd_entry *ent, *tmp; + int i; + + list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { + struct scsi_cmnd *cmd = ent->cmd; + + list_del(&ent->list); + cmd->result = DID_RESET << 16; + scsi_done(cmd); + esp_put_ent(esp, ent); + } + + list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { + if (ent == esp->active_cmd) + esp->active_cmd = NULL; + esp_reset_cleanup_one(esp, ent); + } + + BUG_ON(esp->active_cmd != NULL); + + /* Force renegotiation of sync/wide transfers. */ + for (i = 0; i < ESP_MAX_TARGET; i++) { + struct esp_target_data *tp = &esp->target[i]; + + tp->esp_period = 0; + tp->esp_offset = 0; + tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | + ESP_CONFIG3_FSCSI | + ESP_CONFIG3_FAST); + tp->flags &= ~ESP_TGT_WIDE; + tp->flags |= ESP_TGT_CHECK_NEGO; + + if (tp->starget) + __starget_for_each_device(tp->starget, NULL, + esp_clear_hold); + } + esp->flags &= ~ESP_FLAG_RESETTING; +} + +/* Runs under host->lock */ +static void __esp_interrupt(struct esp *esp) +{ + int finish_reset, intr_done; + u8 phase; + + /* + * Once INTRPT is read STATUS and SSTEP are cleared. + */ + esp->sreg = esp_read8(ESP_STATUS); + esp->seqreg = esp_read8(ESP_SSTEP); + esp->ireg = esp_read8(ESP_INTRPT); + + if (esp->flags & ESP_FLAG_RESETTING) { + finish_reset = 1; + } else { + if (esp_check_gross_error(esp)) + return; + + finish_reset = esp_check_spur_intr(esp); + if (finish_reset < 0) + return; + } + + if (esp->ireg & ESP_INTR_SR) + finish_reset = 1; + + if (finish_reset) { + esp_reset_cleanup(esp); + if (esp->eh_reset) { + complete(esp->eh_reset); + esp->eh_reset = NULL; + } + return; + } + + phase = (esp->sreg & ESP_STAT_PMASK); + if (esp->rev == FASHME) { + if (((phase != ESP_DIP && phase != ESP_DOP) && + esp->select_state == ESP_SELECT_NONE && + esp->event != ESP_EVENT_STATUS && + esp->event != ESP_EVENT_DATA_DONE) || + (esp->ireg & ESP_INTR_RSEL)) { + esp->sreg2 = esp_read8(ESP_STATUS2); + if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || + (esp->sreg2 & ESP_STAT2_F1BYTE)) + hme_read_fifo(esp); + } + } + + esp_log_intr("intr sreg[%02x] seqreg[%02x] " + "sreg2[%02x] ireg[%02x]\n", + esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); + + intr_done = 0; + + if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { + shost_printk(KERN_INFO, esp->host, + "unexpected IREG %02x\n", esp->ireg); + if (esp->ireg & ESP_INTR_IC) + esp_dump_cmd_log(esp); + + esp_schedule_reset(esp); + } else { + if (esp->ireg & ESP_INTR_RSEL) { + if (esp->active_cmd) + (void) esp_finish_select(esp); + intr_done = esp_reconnect(esp); + } else { + /* Some combination of FDONE, BSERV, DC. */ + if (esp->select_state != ESP_SELECT_NONE) + intr_done = esp_finish_select(esp); + } + } + while (!intr_done) + intr_done = esp_process_event(esp); +} + +irqreturn_t scsi_esp_intr(int irq, void *dev_id) +{ + struct esp *esp = dev_id; + unsigned long flags; + irqreturn_t ret; + + spin_lock_irqsave(esp->host->host_lock, flags); + ret = IRQ_NONE; + if (esp->ops->irq_pending(esp)) { + ret = IRQ_HANDLED; + for (;;) { + int i; + + __esp_interrupt(esp); + if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) + break; + esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; + + for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { + if (esp->ops->irq_pending(esp)) + break; + } + if (i == ESP_QUICKIRQ_LIMIT) + break; + } + } + spin_unlock_irqrestore(esp->host->host_lock, flags); + + return ret; +} +EXPORT_SYMBOL(scsi_esp_intr); + +static void esp_get_revision(struct esp *esp) +{ + u8 val; + + esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); + if (esp->config2 == 0) { + esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); + esp_write8(esp->config2, ESP_CFG2); + + val = esp_read8(ESP_CFG2); + val &= ~ESP_CONFIG2_MAGIC; + + esp->config2 = 0; + if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { + /* + * If what we write to cfg2 does not come back, + * cfg2 is not implemented. + * Therefore this must be a plain esp100. + */ + esp->rev = ESP100; + return; + } + } + + esp_set_all_config3(esp, 5); + esp->prev_cfg3 = 5; + esp_write8(esp->config2, ESP_CFG2); + esp_write8(0, ESP_CFG3); + esp_write8(esp->prev_cfg3, ESP_CFG3); + + val = esp_read8(ESP_CFG3); + if (val != 5) { + /* The cfg2 register is implemented, however + * cfg3 is not, must be esp100a. + */ + esp->rev = ESP100A; + } else { + esp_set_all_config3(esp, 0); + esp->prev_cfg3 = 0; + esp_write8(esp->prev_cfg3, ESP_CFG3); + + /* All of cfg{1,2,3} implemented, must be one of + * the fas variants, figure out which one. + */ + if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { + esp->rev = FAST; + esp->sync_defp = SYNC_DEFP_FAST; + } else { + esp->rev = ESP236; + } + } +} + +static void esp_init_swstate(struct esp *esp) +{ + int i; + + INIT_LIST_HEAD(&esp->queued_cmds); + INIT_LIST_HEAD(&esp->active_cmds); + INIT_LIST_HEAD(&esp->esp_cmd_pool); + + /* Start with a clear state, domain validation (via ->slave_configure, + * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged + * commands. + */ + for (i = 0 ; i < ESP_MAX_TARGET; i++) { + esp->target[i].flags = 0; + esp->target[i].nego_goal_period = 0; + esp->target[i].nego_goal_offset = 0; + esp->target[i].nego_goal_width = 0; + esp->target[i].nego_goal_tags = 0; + } +} + +/* This places the ESP into a known state at boot time. */ +static void esp_bootup_reset(struct esp *esp) +{ + u8 val; + + /* Reset the DMA */ + esp->ops->reset_dma(esp); + + /* Reset the ESP */ + esp_reset_esp(esp); + + /* Reset the SCSI bus, but tell ESP not to generate an irq */ + val = esp_read8(ESP_CFG1); + val |= ESP_CONFIG1_SRRDISAB; + esp_write8(val, ESP_CFG1); + + scsi_esp_cmd(esp, ESP_CMD_RS); + udelay(400); + + esp_write8(esp->config1, ESP_CFG1); + + /* Eat any bitrot in the chip and we are done... */ + esp_read8(ESP_INTRPT); +} + +static void esp_set_clock_params(struct esp *esp) +{ + int fhz; + u8 ccf; + + /* This is getting messy but it has to be done correctly or else + * you get weird behavior all over the place. We are trying to + * basically figure out three pieces of information. + * + * a) Clock Conversion Factor + * + * This is a representation of the input crystal clock frequency + * going into the ESP on this machine. Any operation whose timing + * is longer than 400ns depends on this value being correct. For + * example, you'll get blips for arbitration/selection during high + * load or with multiple targets if this is not set correctly. + * + * b) Selection Time-Out + * + * The ESP isn't very bright and will arbitrate for the bus and try + * to select a target forever if you let it. This value tells the + * ESP when it has taken too long to negotiate and that it should + * interrupt the CPU so we can see what happened. The value is + * computed as follows (from NCR/Symbios chip docs). + * + * (Time Out Period) * (Input Clock) + * STO = ---------------------------------- + * (8192) * (Clock Conversion Factor) + * + * We use a time out period of 250ms (ESP_BUS_TIMEOUT). + * + * c) Imperical constants for synchronous offset and transfer period + * register values + * + * This entails the smallest and largest sync period we could ever + * handle on this ESP. + */ + fhz = esp->cfreq; + + ccf = ((fhz / 1000000) + 4) / 5; + if (ccf == 1) + ccf = 2; + + /* If we can't find anything reasonable, just assume 20MHZ. + * This is the clock frequency of the older sun4c's where I've + * been unable to find the clock-frequency PROM property. All + * other machines provide useful values it seems. + */ + if (fhz <= 5000000 || ccf < 1 || ccf > 8) { + fhz = 20000000; + ccf = 4; + } + + esp->cfact = (ccf == 8 ? 0 : ccf); + esp->cfreq = fhz; + esp->ccycle = ESP_HZ_TO_CYCLE(fhz); + esp->ctick = ESP_TICK(ccf, esp->ccycle); + esp->neg_defp = ESP_NEG_DEFP(fhz, ccf); + esp->sync_defp = SYNC_DEFP_SLOW; +} + +static const char *esp_chip_names[] = { + "ESP100", + "ESP100A", + "ESP236", + "FAS236", + "AM53C974", + "53CF9x-2", + "FAS100A", + "FAST", + "FASHME", +}; + +static struct scsi_transport_template *esp_transport_template; + +int scsi_esp_register(struct esp *esp) +{ + static int instance; + int err; + + if (!esp->num_tags) + esp->num_tags = ESP_DEFAULT_TAGS; + esp->host->transportt = esp_transport_template; + esp->host->max_lun = ESP_MAX_LUN; + esp->host->cmd_per_lun = 2; + esp->host->unique_id = instance; + + esp_set_clock_params(esp); + + esp_get_revision(esp); + + esp_init_swstate(esp); + + esp_bootup_reset(esp); + + dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n", + esp->host->unique_id, esp->regs, esp->dma_regs, + esp->host->irq); + dev_printk(KERN_INFO, esp->dev, + "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", + esp->host->unique_id, esp_chip_names[esp->rev], + esp->cfreq / 1000000, esp->cfact, esp->scsi_id); + + /* Let the SCSI bus reset settle. */ + ssleep(esp_bus_reset_settle); + + err = scsi_add_host(esp->host, esp->dev); + if (err) + return err; + + instance++; + + scsi_scan_host(esp->host); + + return 0; +} +EXPORT_SYMBOL(scsi_esp_register); + +void scsi_esp_unregister(struct esp *esp) +{ + scsi_remove_host(esp->host); +} +EXPORT_SYMBOL(scsi_esp_unregister); + +static int esp_target_alloc(struct scsi_target *starget) +{ + struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); + struct esp_target_data *tp = &esp->target[starget->id]; + + tp->starget = starget; + + return 0; +} + +static void esp_target_destroy(struct scsi_target *starget) +{ + struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); + struct esp_target_data *tp = &esp->target[starget->id]; + + tp->starget = NULL; +} + +static int esp_slave_alloc(struct scsi_device *dev) +{ + struct esp *esp = shost_priv(dev->host); + struct esp_target_data *tp = &esp->target[dev->id]; + struct esp_lun_data *lp; + + lp = kzalloc(sizeof(*lp), GFP_KERNEL); + if (!lp) + return -ENOMEM; + dev->hostdata = lp; + + spi_min_period(tp->starget) = esp->min_period; + spi_max_offset(tp->starget) = 15; + + if (esp->flags & ESP_FLAG_WIDE_CAPABLE) + spi_max_width(tp->starget) = 1; + else + spi_max_width(tp->starget) = 0; + + return 0; +} + +static int esp_slave_configure(struct scsi_device *dev) +{ + struct esp *esp = shost_priv(dev->host); + struct esp_target_data *tp = &esp->target[dev->id]; + + if (dev->tagged_supported) + scsi_change_queue_depth(dev, esp->num_tags); + + tp->flags |= ESP_TGT_DISCONNECT; + + if (!spi_initial_dv(dev->sdev_target)) + spi_dv_device(dev); + + return 0; +} + +static void esp_slave_destroy(struct scsi_device *dev) +{ + struct esp_lun_data *lp = dev->hostdata; + + kfree(lp); + dev->hostdata = NULL; +} + +static int esp_eh_abort_handler(struct scsi_cmnd *cmd) +{ + struct esp *esp = shost_priv(cmd->device->host); + struct esp_cmd_entry *ent, *tmp; + struct completion eh_done; + unsigned long flags; + + /* XXX This helps a lot with debugging but might be a bit + * XXX much for the final driver. + */ + spin_lock_irqsave(esp->host->host_lock, flags); + shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n", + cmd, cmd->cmnd[0]); + ent = esp->active_cmd; + if (ent) + shost_printk(KERN_ERR, esp->host, + "Current command [%p:%02x]\n", + ent->cmd, ent->cmd->cmnd[0]); + list_for_each_entry(ent, &esp->queued_cmds, list) { + shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n", + ent->cmd, ent->cmd->cmnd[0]); + } + list_for_each_entry(ent, &esp->active_cmds, list) { + shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n", + ent->cmd, ent->cmd->cmnd[0]); + } + esp_dump_cmd_log(esp); + spin_unlock_irqrestore(esp->host->host_lock, flags); + + spin_lock_irqsave(esp->host->host_lock, flags); + + ent = NULL; + list_for_each_entry(tmp, &esp->queued_cmds, list) { + if (tmp->cmd == cmd) { + ent = tmp; + break; + } + } + + if (ent) { + /* Easiest case, we didn't even issue the command + * yet so it is trivial to abort. + */ + list_del(&ent->list); + + cmd->result = DID_ABORT << 16; + scsi_done(cmd); + + esp_put_ent(esp, ent); + + goto out_success; + } + + init_completion(&eh_done); + + ent = esp->active_cmd; + if (ent && ent->cmd == cmd) { + /* Command is the currently active command on + * the bus. If we already have an output message + * pending, no dice. + */ + if (esp->msg_out_len) + goto out_failure; + + /* Send out an abort, encouraging the target to + * go to MSGOUT phase by asserting ATN. + */ + esp->msg_out[0] = ABORT_TASK_SET; + esp->msg_out_len = 1; + ent->eh_done = &eh_done; + + scsi_esp_cmd(esp, ESP_CMD_SATN); + } else { + /* The command is disconnected. This is not easy to + * abort. For now we fail and let the scsi error + * handling layer go try a scsi bus reset or host + * reset. + * + * What we could do is put together a scsi command + * solely for the purpose of sending an abort message + * to the target. Coming up with all the code to + * cook up scsi commands, special case them everywhere, + * etc. is for questionable gain and it would be better + * if the generic scsi error handling layer could do at + * least some of that for us. + * + * Anyways this is an area for potential future improvement + * in this driver. + */ + goto out_failure; + } + + spin_unlock_irqrestore(esp->host->host_lock, flags); + + if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { + spin_lock_irqsave(esp->host->host_lock, flags); + ent->eh_done = NULL; + spin_unlock_irqrestore(esp->host->host_lock, flags); + + return FAILED; + } + + return SUCCESS; + +out_success: + spin_unlock_irqrestore(esp->host->host_lock, flags); + return SUCCESS; + +out_failure: + /* XXX This might be a good location to set ESP_TGT_BROKEN + * XXX since we know which target/lun in particular is + * XXX causing trouble. + */ + spin_unlock_irqrestore(esp->host->host_lock, flags); + return FAILED; +} + +static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) +{ + struct esp *esp = shost_priv(cmd->device->host); + struct completion eh_reset; + unsigned long flags; + + init_completion(&eh_reset); + + spin_lock_irqsave(esp->host->host_lock, flags); + + esp->eh_reset = &eh_reset; + + /* XXX This is too simple... We should add lots of + * XXX checks here so that if we find that the chip is + * XXX very wedged we return failure immediately so + * XXX that we can perform a full chip reset. + */ + esp->flags |= ESP_FLAG_RESETTING; + scsi_esp_cmd(esp, ESP_CMD_RS); + + spin_unlock_irqrestore(esp->host->host_lock, flags); + + ssleep(esp_bus_reset_settle); + + if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { + spin_lock_irqsave(esp->host->host_lock, flags); + esp->eh_reset = NULL; + spin_unlock_irqrestore(esp->host->host_lock, flags); + + return FAILED; + } + + return SUCCESS; +} + +/* All bets are off, reset the entire device. */ +static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + struct esp *esp = shost_priv(cmd->device->host); + unsigned long flags; + + spin_lock_irqsave(esp->host->host_lock, flags); + esp_bootup_reset(esp); + esp_reset_cleanup(esp); + spin_unlock_irqrestore(esp->host->host_lock, flags); + + ssleep(esp_bus_reset_settle); + + return SUCCESS; +} + +static const char *esp_info(struct Scsi_Host *host) +{ + return "esp"; +} + +const struct scsi_host_template scsi_esp_template = { + .module = THIS_MODULE, + .name = "esp", + .info = esp_info, + .queuecommand = esp_queuecommand, + .target_alloc = esp_target_alloc, + .target_destroy = esp_target_destroy, + .slave_alloc = esp_slave_alloc, + .slave_configure = esp_slave_configure, + .slave_destroy = esp_slave_destroy, + .eh_abort_handler = esp_eh_abort_handler, + .eh_bus_reset_handler = esp_eh_bus_reset_handler, + .eh_host_reset_handler = esp_eh_host_reset_handler, + .can_queue = 7, + .this_id = 7, + .sg_tablesize = SG_ALL, + .max_sectors = 0xffff, + .skip_settle_delay = 1, + .cmd_size = sizeof(struct esp_cmd_priv), +}; +EXPORT_SYMBOL(scsi_esp_template); + +static void esp_get_signalling(struct Scsi_Host *host) +{ + struct esp *esp = shost_priv(host); + enum spi_signal_type type; + + if (esp->flags & ESP_FLAG_DIFFERENTIAL) + type = SPI_SIGNAL_HVD; + else + type = SPI_SIGNAL_SE; + + spi_signalling(host) = type; +} + +static void esp_set_offset(struct scsi_target *target, int offset) +{ + struct Scsi_Host *host = dev_to_shost(target->dev.parent); + struct esp *esp = shost_priv(host); + struct esp_target_data *tp = &esp->target[target->id]; + + if (esp->flags & ESP_FLAG_DISABLE_SYNC) + tp->nego_goal_offset = 0; + else + tp->nego_goal_offset = offset; + tp->flags |= ESP_TGT_CHECK_NEGO; +} + +static void esp_set_period(struct scsi_target *target, int period) +{ + struct Scsi_Host *host = dev_to_shost(target->dev.parent); + struct esp *esp = shost_priv(host); + struct esp_target_data *tp = &esp->target[target->id]; + + tp->nego_goal_period = period; + tp->flags |= ESP_TGT_CHECK_NEGO; +} + +static void esp_set_width(struct scsi_target *target, int width) +{ + struct Scsi_Host *host = dev_to_shost(target->dev.parent); + struct esp *esp = shost_priv(host); + struct esp_target_data *tp = &esp->target[target->id]; + + tp->nego_goal_width = (width ? 1 : 0); + tp->flags |= ESP_TGT_CHECK_NEGO; +} + +static struct spi_function_template esp_transport_ops = { + .set_offset = esp_set_offset, + .show_offset = 1, + .set_period = esp_set_period, + .show_period = 1, + .set_width = esp_set_width, + .show_width = 1, + .get_signalling = esp_get_signalling, +}; + +static int __init esp_init(void) +{ + esp_transport_template = spi_attach_transport(&esp_transport_ops); + if (!esp_transport_template) + return -ENODEV; + + return 0; +} + +static void __exit esp_exit(void) +{ + spi_release_transport(esp_transport_template); +} + +MODULE_DESCRIPTION("ESP SCSI driver core"); +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_param(esp_bus_reset_settle, int, 0); +MODULE_PARM_DESC(esp_bus_reset_settle, + "ESP scsi bus reset delay in seconds"); + +module_param(esp_debug, int, 0); +MODULE_PARM_DESC(esp_debug, +"ESP bitmapped debugging message enable value:\n" +" 0x00000001 Log interrupt events\n" +" 0x00000002 Log scsi commands\n" +" 0x00000004 Log resets\n" +" 0x00000008 Log message in events\n" +" 0x00000010 Log message out events\n" +" 0x00000020 Log command completion\n" +" 0x00000040 Log disconnects\n" +" 0x00000080 Log data start\n" +" 0x00000100 Log data done\n" +" 0x00000200 Log reconnects\n" +" 0x00000400 Log auto-sense data\n" +); + +module_init(esp_init); +module_exit(esp_exit); + +#ifdef CONFIG_SCSI_ESP_PIO +static inline unsigned int esp_wait_for_fifo(struct esp *esp) +{ + int i = 500000; + + do { + unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; + + if (fbytes) + return fbytes; + + udelay(1); + } while (--i); + + shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n", + esp_read8(ESP_STATUS)); + return 0; +} + +static inline int esp_wait_for_intr(struct esp *esp) +{ + int i = 500000; + + do { + esp->sreg = esp_read8(ESP_STATUS); + if (esp->sreg & ESP_STAT_INTR) + return 0; + + udelay(1); + } while (--i); + + shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n", + esp->sreg); + return 1; +} + +#define ESP_FIFO_SIZE 16 + +void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, + u32 dma_count, int write, u8 cmd) +{ + u8 phase = esp->sreg & ESP_STAT_PMASK; + + cmd &= ~ESP_CMD_DMA; + esp->send_cmd_error = 0; + + if (write) { + u8 *dst = (u8 *)addr; + u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV); + + scsi_esp_cmd(esp, cmd); + + while (1) { + if (!esp_wait_for_fifo(esp)) + break; + + *dst++ = readb(esp->fifo_reg); + --esp_count; + + if (!esp_count) + break; + + if (esp_wait_for_intr(esp)) { + esp->send_cmd_error = 1; + break; + } + + if ((esp->sreg & ESP_STAT_PMASK) != phase) + break; + + esp->ireg = esp_read8(ESP_INTRPT); + if (esp->ireg & mask) { + esp->send_cmd_error = 1; + break; + } + + if (phase == ESP_MIP) + esp_write8(ESP_CMD_MOK, ESP_CMD); + + esp_write8(ESP_CMD_TI, ESP_CMD); + } + } else { + unsigned int n = ESP_FIFO_SIZE; + u8 *src = (u8 *)addr; + + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + + if (n > esp_count) + n = esp_count; + writesb(esp->fifo_reg, src, n); + src += n; + esp_count -= n; + + scsi_esp_cmd(esp, cmd); + + while (esp_count) { + if (esp_wait_for_intr(esp)) { + esp->send_cmd_error = 1; + break; + } + + if ((esp->sreg & ESP_STAT_PMASK) != phase) + break; + + esp->ireg = esp_read8(ESP_INTRPT); + if (esp->ireg & ~ESP_INTR_BSERV) { + esp->send_cmd_error = 1; + break; + } + + n = ESP_FIFO_SIZE - + (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); + + if (n > esp_count) + n = esp_count; + writesb(esp->fifo_reg, src, n); + src += n; + esp_count -= n; + + esp_write8(ESP_CMD_TI, ESP_CMD); + } + } + + esp->send_cmd_residual = esp_count; +} +EXPORT_SYMBOL(esp_send_pio_cmd); +#endif diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h new file mode 100644 index 000000000..00cd7c0cc --- /dev/null +++ b/drivers/scsi/esp_scsi.h @@ -0,0 +1,585 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* esp_scsi.h: Defines and structures for the ESP driver. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifndef _ESP_SCSI_H +#define _ESP_SCSI_H + + /* Access Description Offset */ +#define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */ +#define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */ +#define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */ +#define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */ +#define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */ +#define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */ +#define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */ +#define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */ +#define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */ +#define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */ +#define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */ +#define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ +#define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */ +#define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */ +#define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ +#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ +#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ +#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ +#define ESP_CFG4 0x0dUL /* rw Fourth cfg register 0x34 */ +#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ +#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ +#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ +#define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */ +#define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ + +#define SBUS_ESP_REG_SIZE 0x40UL + +/* Bitfield meanings for the above registers. */ + +/* ESP config reg 1, read-write, found on all ESP chips */ +#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ +#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ +#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ +#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ +#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ +#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ + +/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ +#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ +#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ +#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ +#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */ +#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ +#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ +#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ +#define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ +#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */ +#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */ +#define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ +#define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ +#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ + +/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ +#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ +#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ +#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ +#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ +#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ +#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ +#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ +#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ +#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ +#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ +#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ +#define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ +#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ +#define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ +#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ +#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ + +/* ESP config register 4 read-write */ +#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */ +#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */ +#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */ +#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */ +#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */ +#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 (am53c974) */ +#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 (am53c974) */ + +#define ESP_CONFIG_GE_12NS (0) +#define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1) +#define ESP_CONFIG_GE_35NS (ESP_CONFIG_GE0) +#define ESP_CONFIG_GE_0NS (ESP_CONFIG_GE0 | ESP_CONFIG_GE1) + +/* ESP command register read-write */ +/* Group 1 commands: These may be sent at any point in time to the ESP + * chip. None of them can generate interrupts 'cept + * the "SCSI bus reset" command if you have not disabled + * SCSI reset interrupts in the config1 ESP register. + */ +#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ +#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ +#define ESP_CMD_RC 0x02 /* Chip reset */ +#define ESP_CMD_RS 0x03 /* SCSI bus reset */ + +/* Group 2 commands: ESP must be an initiator and connected to a target + * for these commands to work. + */ +#define ESP_CMD_TI 0x10 /* Transfer Information */ +#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ +#define ESP_CMD_MOK 0x12 /* Message okie-dokie */ +#define ESP_CMD_TPAD 0x18 /* Transfer Pad */ +#define ESP_CMD_SATN 0x1a /* Set ATN */ +#define ESP_CMD_RATN 0x1b /* De-assert ATN */ + +/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected + * to a target as the initiator for these commands to work. + */ +#define ESP_CMD_SMSG 0x20 /* Send message */ +#define ESP_CMD_SSTAT 0x21 /* Send status */ +#define ESP_CMD_SDATA 0x22 /* Send data */ +#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ +#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ +#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ +#define ESP_CMD_DCNCT 0x27 /* Disconnect */ +#define ESP_CMD_RMSG 0x28 /* Receive Message */ +#define ESP_CMD_RCMD 0x29 /* Receive Command */ +#define ESP_CMD_RDATA 0x2a /* Receive Data */ +#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ + +/* Group 4 commands: The ESP must be in the disconnected state and must + * not be connected to any targets as initiator for + * these commands to work. + */ +#define ESP_CMD_RSEL 0x40 /* Reselect */ +#define ESP_CMD_SEL 0x41 /* Select w/o ATN */ +#define ESP_CMD_SELA 0x42 /* Select w/ATN */ +#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ +#define ESP_CMD_ESEL 0x44 /* Enable selection */ +#define ESP_CMD_DSEL 0x45 /* Disable selections */ +#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ +#define ESP_CMD_RSEL3 0x47 /* Reselect3 */ + +/* This bit enables the ESP's DMA on the SBus */ +#define ESP_CMD_DMA 0x80 /* Do DMA? */ + +/* ESP status register read-only */ +#define ESP_STAT_PIO 0x01 /* IO phase bit */ +#define ESP_STAT_PCD 0x02 /* CD phase bit */ +#define ESP_STAT_PMSG 0x04 /* MSG phase bit */ +#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ +#define ESP_STAT_TDONE 0x08 /* Transfer Completed */ +#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ +#define ESP_STAT_PERR 0x20 /* Parity error */ +#define ESP_STAT_SPAM 0x40 /* Real bad error */ +/* This indicates the 'interrupt pending' condition on esp236, it is a reserved + * bit on other revs of the ESP. + */ +#define ESP_STAT_INTR 0x80 /* Interrupt */ + +/* The status register can be masked with ESP_STAT_PMASK and compared + * with the following values to determine the current phase the ESP + * (at least thinks it) is in. For our purposes we also add our own + * software 'done' bit for our phase management engine. + */ +#define ESP_DOP (0) /* Data Out */ +#define ESP_DIP (ESP_STAT_PIO) /* Data In */ +#define ESP_CMDP (ESP_STAT_PCD) /* Command */ +#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ +#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ +#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ + +/* HME only: status 2 register */ +#define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ +#define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ +#define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ +#define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ +#define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ +#define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ +#define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ +#define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ + +/* ESP interrupt register read-only */ +#define ESP_INTR_S 0x01 /* Select w/o ATN */ +#define ESP_INTR_SATN 0x02 /* Select w/ATN */ +#define ESP_INTR_RSEL 0x04 /* Reselected */ +#define ESP_INTR_FDONE 0x08 /* Function done */ +#define ESP_INTR_BSERV 0x10 /* Bus service */ +#define ESP_INTR_DC 0x20 /* Disconnect */ +#define ESP_INTR_IC 0x40 /* Illegal command given */ +#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ + +/* ESP sequence step register read-only */ +#define ESP_STEP_VBITS 0x07 /* Valid bits */ +#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ +#define ESP_STEP_SID 0x01 /* One msg byte sent */ +#define ESP_STEP_NCMD 0x02 /* Was not in command phase */ +#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd + * bytes to be lost + */ +#define ESP_STEP_FINI4 0x04 /* Command was sent ok */ + +/* Ho hum, some ESP's set the step register to this as well... */ +#define ESP_STEP_FINI5 0x05 +#define ESP_STEP_FINI6 0x06 +#define ESP_STEP_FINI7 0x07 + +/* ESP chip-test register read-write */ +#define ESP_TEST_TARG 0x01 /* Target test mode */ +#define ESP_TEST_INI 0x02 /* Initiator test mode */ +#define ESP_TEST_TS 0x04 /* Tristate test mode */ + +/* ESP unique ID register read-only, found on fas236+fas100a only */ +#define ESP_UID_FAM 0xf8 /* ESP family bitmask */ + +#define ESP_FAMILY(uid) (((uid) & ESP_UID_FAM) >> 3) + +/* Values for the ESP family bits */ +#define ESP_UID_F100A 0x00 /* ESP FAS100A */ +#define ESP_UID_F236 0x02 /* ESP FAS236 */ +#define ESP_UID_HME 0x0a /* FAS HME */ +#define ESP_UID_FSC 0x14 /* NCR/Symbios Logic 53CF9x-2 */ + +/* ESP fifo flags register read-only */ +/* Note that the following implies a 16 byte FIFO on the ESP. */ +#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ +#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ +#define ESP_FF_SSTEP 0xe0 /* Sequence step */ + +/* ESP clock conversion factor register write-only */ +#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ +#define ESP_CCF_NEVER 0x01 /* Set it to this and die */ +#define ESP_CCF_F2 0x02 /* 10MHz */ +#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ +#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ +#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ +#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ +#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ + +/* HME only... */ +#define ESP_BUSID_RESELID 0x10 +#define ESP_BUSID_CTR32BIT 0x40 + +#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */ +#define ESP_TIMEO_CONST 8192 +#define ESP_NEG_DEFP(mhz, cfact) \ + ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) +#define ESP_HZ_TO_CYCLE(hertz) ((1000000000) / ((hertz) / 1000)) +#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) + +/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high + * input clock rates we try to do 10mb/s although I don't think a transfer can + * even run that fast with an ESP even with DMA2 scatter gather pipelining. + */ +#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ +#define SYNC_DEFP_FAST 0x19 /* 10mb/s */ + +struct esp_cmd_priv { + int num_sg; + int cur_residue; + struct scatterlist *prv_sg; + struct scatterlist *cur_sg; + int tot_residue; +}; + +#define ESP_CMD_PRIV(cmd) ((struct esp_cmd_priv *)scsi_cmd_priv(cmd)) + +/* NOTE: this enum is ordered based on chip features! */ +enum esp_rev { + ESP100, /* NCR53C90 - very broken */ + ESP100A, /* NCR53C90A */ + ESP236, + FAS236, + PCSCSI, /* AM53c974 */ + FSC, /* NCR/Symbios Logic 53CF9x-2 */ + FAS100A, + FAST, + FASHME, +}; + +struct esp_cmd_entry { + struct list_head list; + + struct scsi_cmnd *cmd; + + unsigned int saved_cur_residue; + struct scatterlist *saved_prv_sg; + struct scatterlist *saved_cur_sg; + unsigned int saved_tot_residue; + + u8 flags; +#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ +#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ +#define ESP_CMD_FLAG_RESIDUAL 0x08 /* AM53c974 BLAST residual */ + + u8 tag[2]; + u8 orig_tag[2]; + + u8 status; + u8 message; + + unsigned char *sense_ptr; + unsigned char *saved_sense_ptr; + dma_addr_t sense_dma; + + struct completion *eh_done; +}; + +#define ESP_DEFAULT_TAGS 16 + +#define ESP_MAX_TARGET 16 +#define ESP_MAX_LUN 8 +#define ESP_MAX_TAG 256 + +struct esp_lun_data { + struct esp_cmd_entry *non_tagged_cmd; + int num_tagged; + int hold; + struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG]; +}; + +struct esp_target_data { + /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which + * match the currently negotiated settings for this target. The SCSI + * protocol values are maintained in spi_{offset,period,wide}(starget). + */ + u8 esp_period; + u8 esp_offset; + u8 esp_config3; + + u8 flags; +#define ESP_TGT_WIDE 0x01 +#define ESP_TGT_DISCONNECT 0x02 +#define ESP_TGT_NEGO_WIDE 0x04 +#define ESP_TGT_NEGO_SYNC 0x08 +#define ESP_TGT_CHECK_NEGO 0x40 +#define ESP_TGT_BROKEN 0x80 + + /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this + * device we will try to negotiate the following parameters. + */ + u8 nego_goal_period; + u8 nego_goal_offset; + u8 nego_goal_width; + u8 nego_goal_tags; + + struct scsi_target *starget; +}; + +struct esp_event_ent { + u8 type; +#define ESP_EVENT_TYPE_EVENT 0x01 +#define ESP_EVENT_TYPE_CMD 0x02 + u8 val; + + u8 sreg; + u8 seqreg; + u8 sreg2; + u8 ireg; + u8 select_state; + u8 event; + u8 __pad; +}; + +struct esp; +struct esp_driver_ops { + /* Read and write the ESP 8-bit registers. On some + * applications of the ESP chip the registers are at 4-byte + * instead of 1-byte intervals. + */ + void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg); + u8 (*esp_read8)(struct esp *esp, unsigned long reg); + + /* Return non-zero if there is an IRQ pending. Usually this + * status bit lives in the DMA controller sitting in front of + * the ESP. This has to be accurate or else the ESP interrupt + * handler will not run. + */ + int (*irq_pending)(struct esp *esp); + + /* Return the maximum allowable size of a DMA transfer for a + * given buffer. + */ + u32 (*dma_length_limit)(struct esp *esp, u32 dma_addr, + u32 dma_len); + + /* Reset the DMA engine entirely. On return, ESP interrupts + * should be enabled. Often the interrupt enabling is + * controlled in the DMA engine. + */ + void (*reset_dma)(struct esp *esp); + + /* Drain any pending DMA in the DMA engine after a transfer. + * This is for writes to memory. + */ + void (*dma_drain)(struct esp *esp); + + /* Invalidate the DMA engine after a DMA transfer. */ + void (*dma_invalidate)(struct esp *esp); + + /* Setup an ESP command that will use a DMA transfer. + * The 'esp_count' specifies what transfer length should be + * programmed into the ESP transfer counter registers, whereas + * the 'dma_count' is the length that should be programmed into + * the DMA controller. Usually they are the same. If 'write' + * is non-zero, this transfer is a write into memory. 'cmd' + * holds the ESP command that should be issued by calling + * scsi_esp_cmd() at the appropriate time while programming + * the DMA hardware. + */ + void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count, + u32 dma_count, int write, u8 cmd); + + /* Return non-zero if the DMA engine is reporting an error + * currently. + */ + int (*dma_error)(struct esp *esp); +}; + +#define ESP_MAX_MSG_SZ 8 +#define ESP_EVENT_LOG_SZ 32 + +#define ESP_QUICKIRQ_LIMIT 100 +#define ESP_RESELECT_TAG_LIMIT 2500 + +struct esp { + void __iomem *regs; + void __iomem *dma_regs; + + const struct esp_driver_ops *ops; + + struct Scsi_Host *host; + struct device *dev; + + struct esp_cmd_entry *active_cmd; + + struct list_head queued_cmds; + struct list_head active_cmds; + + u8 *command_block; + dma_addr_t command_block_dma; + + unsigned int data_dma_len; + + /* The following are used to determine the cause of an IRQ. Upon every + * IRQ entry we synchronize these with the hardware registers. + */ + u8 sreg; + u8 seqreg; + u8 sreg2; + u8 ireg; + + u32 prev_hme_dmacsr; + u8 prev_soff; + u8 prev_stp; + u8 prev_cfg3; + u8 num_tags; + + struct list_head esp_cmd_pool; + + struct esp_target_data target[ESP_MAX_TARGET]; + + int fifo_cnt; + u8 fifo[16]; + + struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ]; + int esp_event_cur; + + u8 msg_out[ESP_MAX_MSG_SZ]; + int msg_out_len; + + u8 msg_in[ESP_MAX_MSG_SZ]; + int msg_in_len; + + u8 bursts; + u8 config1; + u8 config2; + u8 config4; + + u8 scsi_id; + u32 scsi_id_mask; + + enum esp_rev rev; + + u32 flags; +#define ESP_FLAG_DIFFERENTIAL 0x00000001 +#define ESP_FLAG_RESETTING 0x00000002 +#define ESP_FLAG_WIDE_CAPABLE 0x00000008 +#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 +#define ESP_FLAG_DISABLE_SYNC 0x00000020 +#define ESP_FLAG_USE_FIFO 0x00000040 +#define ESP_FLAG_NO_DMA_MAP 0x00000080 + + u8 select_state; +#define ESP_SELECT_NONE 0x00 /* Not selecting */ +#define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */ +#define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */ + + /* When we are not selecting, we are expecting an event. */ + u8 event; +#define ESP_EVENT_NONE 0x00 +#define ESP_EVENT_CMD_START 0x01 +#define ESP_EVENT_CMD_DONE 0x02 +#define ESP_EVENT_DATA_IN 0x03 +#define ESP_EVENT_DATA_OUT 0x04 +#define ESP_EVENT_DATA_DONE 0x05 +#define ESP_EVENT_MSGIN 0x06 +#define ESP_EVENT_MSGIN_MORE 0x07 +#define ESP_EVENT_MSGIN_DONE 0x08 +#define ESP_EVENT_MSGOUT 0x09 +#define ESP_EVENT_MSGOUT_DONE 0x0a +#define ESP_EVENT_STATUS 0x0b +#define ESP_EVENT_FREE_BUS 0x0c +#define ESP_EVENT_CHECK_PHASE 0x0d +#define ESP_EVENT_RESET 0x10 + + /* Probed in esp_get_clock_params() */ + u32 cfact; + u32 cfreq; + u32 ccycle; + u32 ctick; + u32 neg_defp; + u32 sync_defp; + + /* Computed in esp_reset_esp() */ + u32 max_period; + u32 min_period; + u32 radelay; + + /* ESP_CMD_SELAS command state */ + u8 *cmd_bytes_ptr; + int cmd_bytes_left; + + struct completion *eh_reset; + + void *dma; + int dmarev; + + /* These are used by esp_send_pio_cmd() */ + u8 __iomem *fifo_reg; + int send_cmd_error; + u32 send_cmd_residual; +}; + +/* A front-end driver for the ESP chip should do the following in + * it's device probe routine: + * 1) Allocate the host and private area using scsi_host_alloc() + * with size 'sizeof(struct esp)'. The first argument to + * scsi_host_alloc() should be &scsi_esp_template. + * 2) Set host->max_id as appropriate. + * 3) Set esp->host to the scsi_host itself, and esp->dev + * to the device object pointer. + * 4) Hook up esp->ops to the front-end implementation. + * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE + * in esp->flags. + * 6) Map the DMA and ESP chip registers. + * 7) DMA map the ESP command block, store the DMA address + * in esp->command_block_dma. + * 8) Register the scsi_esp_intr() interrupt handler. + * 9) Probe for and provide the following chip properties: + * esp->scsi_id (assign to esp->host->this_id too) + * esp->scsi_id_mask + * If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL + * esp->cfreq + * DMA burst bit mask in esp->bursts, if necessary + * 10) Perform any actions necessary before the ESP device can + * be programmed for the first time. On some configs, for + * example, the DMA engine has to be reset before ESP can + * be programmed. + * 11) If necessary, call dev_set_drvdata() as needed. + * 12) Call scsi_esp_register() with prepared 'esp' structure. + * 13) Check scsi_esp_register() return value, release all resources + * if an error was returned. + */ +extern const struct scsi_host_template scsi_esp_template; +extern int scsi_esp_register(struct esp *); + +extern void scsi_esp_unregister(struct esp *); +extern irqreturn_t scsi_esp_intr(int, void *); +extern void scsi_esp_cmd(struct esp *, u8); + +extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count, + u32 dma_count, int write, u8 cmd); + +#endif /* !(_ESP_SCSI_H) */ diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile new file mode 100644 index 000000000..1183e80a0 --- /dev/null +++ b/drivers/scsi/fcoe/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_FCOE) += fcoe.o +obj-$(CONFIG_LIBFCOE) += libfcoe.o + +libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c new file mode 100644 index 000000000..f1429f270 --- /dev/null +++ b/drivers/scsi/fcoe/fcoe.c @@ -0,0 +1,2824 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "fcoe.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("FCoE"); +MODULE_LICENSE("GPL v2"); + +/* Performance tuning parameters for fcoe */ +static unsigned int fcoe_ddp_min = 4096; +module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ + "Direct Data Placement (DDP)."); + +unsigned int fcoe_debug_logging; +module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +static unsigned int fcoe_e_d_tov = 2 * 1000; +module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000"); + +static unsigned int fcoe_r_a_tov = 2 * 2 * 1000; +module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000"); + +static DEFINE_MUTEX(fcoe_config_mutex); + +static struct workqueue_struct *fcoe_wq; + +/* fcoe host list */ +/* must only by accessed under the RTNL mutex */ +static LIST_HEAD(fcoe_hostlist); +static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); + +/* Function Prototypes */ +static int fcoe_reset(struct Scsi_Host *); +static int fcoe_xmit(struct fc_lport *, struct fc_frame *); +static int fcoe_rcv(struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *); +static void fcoe_percpu_clean(struct fc_lport *); +static int fcoe_link_ok(struct fc_lport *); + +static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); +static int fcoe_hostlist_add(const struct fc_lport *); +static void fcoe_hostlist_del(const struct fc_lport *); + +static int fcoe_device_notification(struct notifier_block *, ulong, void *); +static void fcoe_dev_setup(void); +static void fcoe_dev_cleanup(void); +static struct fcoe_interface +*fcoe_hostlist_lookup_port(const struct net_device *); + +static int fcoe_fip_recv(struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *); +static int fcoe_fip_vlan_recv(struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *); + +static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *); +static void fcoe_update_src_mac(struct fc_lport *, u8 *); +static u8 *fcoe_get_src_mac(struct fc_lport *); +static void fcoe_destroy_work(struct work_struct *); + +static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, + unsigned int); +static int fcoe_ddp_done(struct fc_lport *, u16); +static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, + unsigned int); +static int fcoe_dcb_app_notification(struct notifier_block *notifier, + ulong event, void *ptr); + +static bool fcoe_match(struct net_device *netdev); +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode); +static int fcoe_destroy(struct net_device *netdev); +static int fcoe_enable(struct net_device *netdev); +static int fcoe_disable(struct net_device *netdev); + +/* fcoe_syfs control interface handlers */ +static int fcoe_ctlr_alloc(struct net_device *netdev); +static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev); +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev); + +static struct fc_seq *fcoe_elsct_send(struct fc_lport *, + u32 did, struct fc_frame *, + unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *, u32 timeout); +static void fcoe_recv_frame(struct sk_buff *skb); + +/* notification function for packets from net device */ +static struct notifier_block fcoe_notifier = { + .notifier_call = fcoe_device_notification, +}; + +/* notification function for DCB events */ +static struct notifier_block dcb_notifier = { + .notifier_call = fcoe_dcb_app_notification, +}; + +static struct scsi_transport_template *fcoe_nport_scsi_transport; +static struct scsi_transport_template *fcoe_vport_scsi_transport; + +static int fcoe_vport_destroy(struct fc_vport *); +static int fcoe_vport_create(struct fc_vport *, bool disabled); +static int fcoe_vport_disable(struct fc_vport *, bool disable); +static void fcoe_set_vport_symbolic_name(struct fc_vport *); +static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); +static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); +static void fcoe_vport_remove(struct fc_lport *); + +static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { + .set_fcoe_ctlr_mode = fcoe_ctlr_mode, + .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled, + .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, + + .get_fcoe_fcf_selected = fcoe_fcf_get_selected, + .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id, +}; + +static struct libfc_function_template fcoe_libfc_fcn_templ = { + .frame_send = fcoe_xmit, + .ddp_setup = fcoe_ddp_setup, + .ddp_done = fcoe_ddp_done, + .ddp_target = fcoe_ddp_target, + .elsct_send = fcoe_elsct_send, + .get_lesb = fcoe_get_lesb, + .lport_set_port_id = fcoe_set_port_id, +}; + +static struct fc_function_template fcoe_nport_fc_functions = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + .show_host_serial_number = 1, + .show_host_manufacturer = 1, + .show_host_model = 1, + .show_host_model_description = 1, + .show_host_hardware_version = 1, + .show_host_driver_version = 1, + .show_host_firmware_version = 1, + .show_host_optionrom_version = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = fcoe_reset, + + .terminate_rport_io = fc_rport_terminate_io, + + .vport_create = fcoe_vport_create, + .vport_delete = fcoe_vport_destroy, + .vport_disable = fcoe_vport_disable, + .set_vport_symbolic_name = fcoe_set_vport_symbolic_name, + + .bsg_request = fc_lport_bsg_request, +}; + +static struct fc_function_template fcoe_vport_fc_functions = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + .show_host_serial_number = 1, + .show_host_manufacturer = 1, + .show_host_model = 1, + .show_host_model_description = 1, + .show_host_hardware_version = 1, + .show_host_driver_version = 1, + .show_host_firmware_version = 1, + .show_host_optionrom_version = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = fcoe_reset, + + .terminate_rport_io = fc_rport_terminate_io, + + .bsg_request = fc_lport_bsg_request, +}; + +static const struct scsi_host_template fcoe_shost_template = { + .module = THIS_MODULE, + .name = "FCoE Driver", + .proc_name = FCOE_NAME, + .queuecommand = fc_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = fc_eh_abort, + .eh_device_reset_handler = fc_eh_device_reset, + .eh_host_reset_handler = fc_eh_host_reset, + .slave_alloc = fc_slave_alloc, + .change_queue_depth = scsi_change_queue_depth, + .this_id = -1, + .cmd_per_lun = 3, + .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, + .sg_tablesize = SG_ALL, + .max_sectors = 0xffff, + .track_queue_depth = 1, + .cmd_size = sizeof(struct libfc_cmd_priv), +}; + +/** + * fcoe_interface_setup() - Setup a FCoE interface + * @fcoe: The new FCoE interface + * @netdev: The net device that the fcoe interface is on + * + * Returns : 0 for success + * Locking: must be called with the RTNL mutex held + */ +static int fcoe_interface_setup(struct fcoe_interface *fcoe, + struct net_device *netdev) +{ + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); + struct netdev_hw_addr *ha; + struct net_device *real_dev; + static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC; + const struct net_device_ops *ops; + + fcoe->netdev = netdev; + + /* Let LLD initialize for FCoE */ + ops = netdev->netdev_ops; + if (ops->ndo_fcoe_enable) { + if (ops->ndo_fcoe_enable(netdev)) + FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE" + " specific feature for LLD.\n"); + } + + /* Do not support for bonding device */ + if (netif_is_bond_master(netdev)) { + FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); + return -EOPNOTSUPP; + } + + /* look for SAN MAC address, if multiple SAN MACs exist, only + * use the first one for SPMA */ + real_dev = is_vlan_dev(netdev) ? vlan_dev_real_dev(netdev) : netdev; + fcoe->realdev = real_dev; + rcu_read_lock(); + for_each_dev_addr(real_dev, ha) { + if ((ha->type == NETDEV_HW_ADDR_T_SAN) && + (is_valid_ether_addr(ha->addr))) { + memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); + fip->spma = 1; + break; + } + } + rcu_read_unlock(); + + /* setup Source Mac Address */ + if (!fip->spma) + memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len); + + /* + * Add FCoE MAC address as second unicast MAC address + * or enter promiscuous mode if not capable of listening + * for multiple unicast MACs. + */ + dev_uc_add(netdev, flogi_maddr); + if (fip->spma) + dev_uc_add(netdev, fip->ctl_src_addr); + if (fip->mode == FIP_MODE_VN2VN) { + dev_mc_add(netdev, FIP_ALL_VN2VN_MACS); + dev_mc_add(netdev, FIP_ALL_P2P_MACS); + } else + dev_mc_add(netdev, FIP_ALL_ENODE_MACS); + + /* + * setup the receive function from ethernet driver + * on the ethertype for the given device + */ + fcoe->fcoe_packet_type.func = fcoe_rcv; + fcoe->fcoe_packet_type.type = htons(ETH_P_FCOE); + fcoe->fcoe_packet_type.dev = netdev; + dev_add_pack(&fcoe->fcoe_packet_type); + + fcoe->fip_packet_type.func = fcoe_fip_recv; + fcoe->fip_packet_type.type = htons(ETH_P_FIP); + fcoe->fip_packet_type.dev = netdev; + dev_add_pack(&fcoe->fip_packet_type); + + if (netdev != real_dev) { + fcoe->fip_vlan_packet_type.func = fcoe_fip_vlan_recv; + fcoe->fip_vlan_packet_type.type = htons(ETH_P_FIP); + fcoe->fip_vlan_packet_type.dev = real_dev; + dev_add_pack(&fcoe->fip_vlan_packet_type); + } + return 0; +} + +/** + * fcoe_interface_create() - Create a FCoE interface on a net device + * @netdev: The net device to create the FCoE interface on + * @fip_mode: The mode to use for FIP + * + * Returns: pointer to a struct fcoe_interface or NULL on error + */ +static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, + enum fip_mode fip_mode) +{ + struct fcoe_ctlr_device *ctlr_dev; + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + int size; + int err; + + if (!try_module_get(THIS_MODULE)) { + FCOE_NETDEV_DBG(netdev, + "Could not get a reference to the module\n"); + fcoe = ERR_PTR(-EBUSY); + goto out; + } + + size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface); + ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ, + size); + if (!ctlr_dev) { + FCOE_DBG("Failed to add fcoe_ctlr_device\n"); + fcoe = ERR_PTR(-ENOMEM); + goto out_putmod; + } + + ctlr = fcoe_ctlr_device_priv(ctlr_dev); + ctlr->cdev = ctlr_dev; + fcoe = fcoe_ctlr_priv(ctlr); + + dev_hold(netdev); + + /* + * Initialize FIP. + */ + fcoe_ctlr_init(ctlr, fip_mode); + ctlr->send = fcoe_fip_send; + ctlr->update_mac = fcoe_update_src_mac; + ctlr->get_src_addr = fcoe_get_src_mac; + + err = fcoe_interface_setup(fcoe, netdev); + if (err) { + fcoe_ctlr_destroy(ctlr); + fcoe_ctlr_device_delete(ctlr_dev); + dev_put(netdev); + fcoe = ERR_PTR(err); + goto out_putmod; + } + + goto out; + +out_putmod: + module_put(THIS_MODULE); +out: + return fcoe; +} + +/** + * fcoe_interface_remove() - remove FCoE interface from netdev + * @fcoe: The FCoE interface to be cleaned up + * + * Caller must be holding the RTNL mutex + */ +static void fcoe_interface_remove(struct fcoe_interface *fcoe) +{ + struct net_device *netdev = fcoe->netdev; + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); + static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC; + const struct net_device_ops *ops; + + /* + * Don't listen for Ethernet packets anymore. + * synchronize_net() ensures that the packet handlers are not running + * on another CPU. dev_remove_pack() would do that, this calls the + * unsyncronized version __dev_remove_pack() to avoid multiple delays. + */ + __dev_remove_pack(&fcoe->fcoe_packet_type); + __dev_remove_pack(&fcoe->fip_packet_type); + if (netdev != fcoe->realdev) + __dev_remove_pack(&fcoe->fip_vlan_packet_type); + synchronize_net(); + + /* Delete secondary MAC addresses */ + dev_uc_del(netdev, flogi_maddr); + if (fip->spma) + dev_uc_del(netdev, fip->ctl_src_addr); + if (fip->mode == FIP_MODE_VN2VN) { + dev_mc_del(netdev, FIP_ALL_VN2VN_MACS); + dev_mc_del(netdev, FIP_ALL_P2P_MACS); + } else + dev_mc_del(netdev, FIP_ALL_ENODE_MACS); + + /* Tell the LLD we are done w/ FCoE */ + ops = netdev->netdev_ops; + if (ops->ndo_fcoe_disable) { + if (ops->ndo_fcoe_disable(netdev)) + FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" + " specific feature for LLD.\n"); + } + fcoe->removed = 1; +} + + +/** + * fcoe_interface_cleanup() - Clean up a FCoE interface + * @fcoe: The FCoE interface to be cleaned up + */ +static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) +{ + struct net_device *netdev = fcoe->netdev; + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); + + /* Release the self-reference taken during fcoe_interface_create() */ + /* tear-down the FCoE controller */ + fcoe_ctlr_destroy(fip); + scsi_host_put(fip->lp->host); + dev_put(netdev); + module_put(THIS_MODULE); +} + +/** + * fcoe_fip_recv() - Handler for received FIP frames + * @skb: The receive skb + * @netdev: The associated net device + * @ptype: The packet_type structure which was used to register this handler + * @orig_dev: The original net_device the skb was received on. + * (in case dev is a bond) + * + * Returns: 0 for success + */ +static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct fcoe_interface *fcoe; + struct fcoe_ctlr *ctlr; + + fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); + ctlr = fcoe_to_ctlr(fcoe); + fcoe_ctlr_recv(ctlr, skb); + return 0; +} + +/** + * fcoe_fip_vlan_recv() - Handler for received FIP VLAN discovery frames + * @skb: The receive skb + * @netdev: The associated net device + * @ptype: The packet_type structure which was used to register this handler + * @orig_dev: The original net_device the skb was received on. + * (in case dev is a bond) + * + * Returns: 0 for success + */ +static int fcoe_fip_vlan_recv(struct sk_buff *skb, struct net_device *netdev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct fcoe_interface *fcoe; + struct fcoe_ctlr *ctlr; + + fcoe = container_of(ptype, struct fcoe_interface, fip_vlan_packet_type); + ctlr = fcoe_to_ctlr(fcoe); + fcoe_ctlr_recv(ctlr, skb); + return 0; +} + +/** + * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame + * @port: The FCoE port + * @skb: The FIP/FCoE packet to be sent + */ +static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb) +{ + if (port->fcoe_pending_queue.qlen) + fcoe_check_wait_queue(port->lport, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(port->lport, skb); +} + +/** + * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame + * @fip: The FCoE controller + * @skb: The FIP packet to be sent + */ +static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fcoe_interface *fcoe = fcoe_from_ctlr(fip); + struct fip_frame { + struct ethhdr eth; + struct fip_header fip; + } __packed *frame; + + /* + * Use default VLAN for FIP VLAN discovery protocol + */ + frame = (struct fip_frame *)skb->data; + if (ntohs(frame->eth.h_proto) == ETH_P_FIP && + ntohs(frame->fip.fip_op) == FIP_OP_VLAN && + fcoe->realdev != fcoe->netdev) + skb->dev = fcoe->realdev; + else + skb->dev = fcoe->netdev; + fcoe_port_send(lport_priv(fip->lp), skb); +} + +/** + * fcoe_update_src_mac() - Update the Ethernet MAC filters + * @lport: The local port to update the source MAC on + * @addr: Unicast MAC address to add + * + * Remove any previously-set unicast MAC filter. + * Add secondary FCoE MAC address filter for our OUI. + */ +static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) +{ + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + + if (!is_zero_ether_addr(port->data_src_addr)) + dev_uc_del(fcoe->netdev, port->data_src_addr); + if (!is_zero_ether_addr(addr)) + dev_uc_add(fcoe->netdev, addr); + memcpy(port->data_src_addr, addr, ETH_ALEN); +} + +/** + * fcoe_get_src_mac() - return the Ethernet source address for an lport + * @lport: libfc lport + */ +static u8 *fcoe_get_src_mac(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + + return port->data_src_addr; +} + +/** + * fcoe_lport_config() - Set up a local port + * @lport: The local port to be setup + * + * Returns: 0 for success + */ +static int fcoe_lport_config(struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = 3; + lport->max_rport_retry_count = 3; + lport->e_d_tov = fcoe_e_d_tov; + lport->r_a_tov = fcoe_r_a_tov; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->does_npiv = 1; + + fc_lport_init_stats(lport); + + /* lport fc_lport related configuration */ + fc_lport_config(lport); + + /* offload related configuration */ + lport->crc_offload = 0; + lport->seq_offload = 0; + lport->lro_enabled = 0; + lport->lro_xid = 0; + lport->lso_max = 0; + + return 0; +} + +/* + * fcoe_netdev_features_change - Updates the lport's offload flags based + * on the LLD netdev's FCoE feature flags + */ +static void fcoe_netdev_features_change(struct fc_lport *lport, + struct net_device *netdev) +{ + mutex_lock(&lport->lp_mutex); + + if (netdev->features & NETIF_F_SG) + lport->sg_supp = 1; + else + lport->sg_supp = 0; + + if (netdev->features & NETIF_F_FCOE_CRC) { + lport->crc_offload = 1; + FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); + } else { + lport->crc_offload = 0; + } + + if (netdev->features & NETIF_F_FSO) { + lport->seq_offload = 1; + lport->lso_max = min(netdev->gso_max_size, GSO_LEGACY_MAX_SIZE); + FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", + lport->lso_max); + } else { + lport->seq_offload = 0; + lport->lso_max = 0; + } + + if (netdev->fcoe_ddp_xid) { + lport->lro_enabled = 1; + lport->lro_xid = netdev->fcoe_ddp_xid; + FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", + lport->lro_xid); + } else { + lport->lro_enabled = 0; + lport->lro_xid = 0; + } + + mutex_unlock(&lport->lp_mutex); +} + +/** + * fcoe_netdev_config() - Set up net devive for SW FCoE + * @lport: The local port that is associated with the net device + * @netdev: The associated net device + * + * Must be called after fcoe_lport_config() as it will use local port mutex + * + * Returns: 0 for success + */ +static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) +{ + u32 mfs; + u64 wwnn, wwpn; + struct fcoe_interface *fcoe; + struct fcoe_ctlr *ctlr; + struct fcoe_port *port; + + /* Setup lport private data to point to fcoe softc */ + port = lport_priv(lport); + fcoe = port->priv; + ctlr = fcoe_to_ctlr(fcoe); + + /* Figure out the VLAN ID, if any */ + if (is_vlan_dev(netdev)) + lport->vlan = vlan_dev_vlan_id(netdev); + else + lport->vlan = 0; + + /* + * Determine max frame size based on underlying device and optional + * user-configured limit. If the MFS is too low, fcoe_link_ok() + * will return 0, so do this first. + */ + mfs = netdev->mtu; + if (netdev->features & NETIF_F_FCOE_MTU) { + mfs = FCOE_MTU; + FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); + } + mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); + if (fc_set_mfs(lport, mfs)) + return -EINVAL; + + /* offload features support */ + fcoe_netdev_features_change(lport, netdev); + + skb_queue_head_init(&port->fcoe_pending_queue); + port->fcoe_pending_queue_active = 0; + timer_setup(&port->timer, fcoe_queue_timer, 0); + + fcoe_link_speed_update(lport); + + if (!lport->vport) { + if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) + wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0); + fc_set_wwnn(lport, wwnn); + if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) + wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, + 2, 0); + fc_set_wwpn(lport, wwpn); + } + + return 0; +} + +/** + * fcoe_shost_config() - Set up the SCSI host associated with a local port + * @lport: The local port + * @dev: The device associated with the SCSI host + * + * Must be called after fcoe_lport_config() and fcoe_netdev_config() + * + * Returns: 0 for success + */ +static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) +{ + int rc = 0; + + /* lport scsi host config */ + lport->host->max_lun = FCOE_MAX_LUN; + lport->host->max_id = FCOE_MAX_FCP_TARGET; + lport->host->max_channel = 0; + lport->host->max_cmd_len = FCOE_MAX_CMD_LEN; + + if (lport->vport) + lport->host->transportt = fcoe_vport_scsi_transport; + else + lport->host->transportt = fcoe_nport_scsi_transport; + + /* add the new host to the SCSI-ml */ + rc = scsi_add_host(lport->host, dev); + if (rc) { + FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: " + "error on scsi_add_host\n"); + return rc; + } + + if (!lport->vport) + fc_host_max_npiv_vports(lport->host) = USHRT_MAX; + + snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, + "%s v%s over %s", FCOE_NAME, FCOE_VERSION, + fcoe_netdev(lport)->name); + + return 0; +} + + +/** + * fcoe_fdmi_info() - Get FDMI related info from net devive for SW FCoE + * @lport: The local port that is associated with the net device + * @netdev: The associated net device + * + * Must be called after fcoe_shost_config() as it will use local port mutex + * + */ +static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) +{ + struct fcoe_interface *fcoe; + struct fcoe_port *port; + struct net_device *realdev; + int rc; + + port = lport_priv(lport); + fcoe = port->priv; + realdev = fcoe->realdev; + + /* No FDMI state m/c for NPIV ports */ + if (lport->vport) + return; + + if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) { + struct netdev_fcoe_hbainfo *fdmi; + fdmi = kzalloc(sizeof(*fdmi), GFP_KERNEL); + if (!fdmi) + return; + + rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev, + fdmi); + if (rc) { + printk(KERN_INFO "fcoe: Failed to retrieve FDMI " + "information from netdev.\n"); + return; + } + + snprintf(fc_host_serial_number(lport->host), + FC_SERIAL_NUMBER_SIZE, + "%s", + fdmi->serial_number); + snprintf(fc_host_manufacturer(lport->host), + FC_SERIAL_NUMBER_SIZE, + "%s", + fdmi->manufacturer); + snprintf(fc_host_model(lport->host), + FC_SYMBOLIC_NAME_SIZE, + "%s", + fdmi->model); + snprintf(fc_host_model_description(lport->host), + FC_SYMBOLIC_NAME_SIZE, + "%s", + fdmi->model_description); + snprintf(fc_host_hardware_version(lport->host), + FC_VERSION_STRING_SIZE, + "%s", + fdmi->hardware_version); + snprintf(fc_host_driver_version(lport->host), + FC_VERSION_STRING_SIZE, + "%s", + fdmi->driver_version); + snprintf(fc_host_optionrom_version(lport->host), + FC_VERSION_STRING_SIZE, + "%s", + fdmi->optionrom_version); + snprintf(fc_host_firmware_version(lport->host), + FC_VERSION_STRING_SIZE, + "%s", + fdmi->firmware_version); + + /* Enable FDMI lport states */ + lport->fdmi_enabled = 1; + kfree(fdmi); + } else { + lport->fdmi_enabled = 0; + printk(KERN_INFO "fcoe: No FDMI support.\n"); + } +} + +/** + * fcoe_oem_match() - The match routine for the offloaded exchange manager + * @fp: The I/O frame + * + * This routine will be associated with an exchange manager (EM). When + * the libfc exchange handling code is looking for an EM to use it will + * call this routine and pass it the frame that it wishes to send. This + * routine will return True if the associated EM is to be used and False + * if the echange code should continue looking for an EM. + * + * The offload EM that this routine is associated with will handle any + * packets that are for SCSI read requests. + * + * This has been enhanced to work when FCoE stack is operating in target + * mode. + * + * Returns: True for read types I/O, otherwise returns false. + */ +static bool fcoe_oem_match(struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fcp_cmnd *fcp; + + if (fc_fcp_is_read(fr_fsp(fp)) && + (fr_fsp(fp)->data_len > fcoe_ddp_min)) + return true; + else if ((fr_fsp(fp) == NULL) && + (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) && + (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) { + fcp = fc_frame_payload_get(fp, sizeof(*fcp)); + if ((fcp->fc_flags & FCP_CFL_WRDATA) && + (ntohl(fcp->fc_dl) > fcoe_ddp_min)) + return true; + } + return false; +} + +/** + * fcoe_em_config() - Allocate and configure an exchange manager + * @lport: The local port that the new EM will be associated with + * + * Returns: 0 on success + */ +static inline int fcoe_em_config(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + struct fcoe_interface *oldfcoe = NULL; + struct net_device *old_real_dev, *cur_real_dev; + u16 min_xid = FCOE_MIN_XID; + u16 max_xid = FCOE_MAX_XID; + + /* + * Check if need to allocate an em instance for + * offload exchange ids to be shared across all VN_PORTs/lport. + */ + if (!lport->lro_enabled || !lport->lro_xid || + (lport->lro_xid >= max_xid)) { + lport->lro_xid = 0; + goto skip_oem; + } + + /* + * Reuse existing offload em instance in case + * it is already allocated on real eth device + */ + if (is_vlan_dev(fcoe->netdev)) + cur_real_dev = vlan_dev_real_dev(fcoe->netdev); + else + cur_real_dev = fcoe->netdev; + + list_for_each_entry(oldfcoe, &fcoe_hostlist, list) { + if (is_vlan_dev(oldfcoe->netdev)) + old_real_dev = vlan_dev_real_dev(oldfcoe->netdev); + else + old_real_dev = oldfcoe->netdev; + + if (cur_real_dev == old_real_dev) { + fcoe->oem = oldfcoe->oem; + break; + } + } + + if (fcoe->oem) { + if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) { + printk(KERN_ERR "fcoe_em_config: failed to add " + "offload em:%p on interface:%s\n", + fcoe->oem, fcoe->netdev->name); + return -ENOMEM; + } + } else { + fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3, + FCOE_MIN_XID, lport->lro_xid, + fcoe_oem_match); + if (!fcoe->oem) { + printk(KERN_ERR "fcoe_em_config: failed to allocate " + "em for offload exches on interface:%s\n", + fcoe->netdev->name); + return -ENOMEM; + } + } + + /* + * Exclude offload EM xid range from next EM xid range. + */ + min_xid += lport->lro_xid + 1; + +skip_oem: + if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) { + printk(KERN_ERR "fcoe_em_config: failed to " + "allocate em on interface %s\n", fcoe->netdev->name); + return -ENOMEM; + } + + return 0; +} + +/** + * fcoe_if_destroy() - Tear down a SW FCoE instance + * @lport: The local port to be destroyed + * + * Locking: Must be called with the RTNL mutex held. + * + */ +static void fcoe_if_destroy(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + struct net_device *netdev = fcoe->netdev; + + FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); + + /* Logout of the fabric */ + fc_fabric_logoff(lport); + + /* Cleanup the fc_lport */ + fc_lport_destroy(lport); + + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(lport); + + if (!is_zero_ether_addr(port->data_src_addr)) + dev_uc_del(netdev, port->data_src_addr); + if (lport->vport) + synchronize_net(); + else + fcoe_interface_remove(fcoe); + + /* Free queued packets for the per-CPU receive threads */ + fcoe_percpu_clean(lport); + + /* Detach from the scsi-ml */ + fc_remove_host(lport->host); + scsi_remove_host(lport->host); + + /* Destroy lport scsi_priv */ + fc_fcp_destroy(lport); + + /* There are no more rports or I/O, free the EM */ + fc_exch_mgr_free(lport); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(lport); + + /* + * Release the Scsi_Host for vport but hold on to + * master lport until it fcoe interface fully cleaned-up. + */ + if (lport->vport) + scsi_host_put(lport->host); +} + +/** + * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device + * @lport: The local port to setup DDP for + * @xid: The exchange ID for this DDP transfer + * @sgl: The scatterlist describing this transfer + * @sgc: The number of sg items + * + * Returns: 0 if the DDP context was not configured + */ +static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + struct net_device *netdev = fcoe_netdev(lport); + + if (netdev->netdev_ops->ndo_fcoe_ddp_setup) + return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev, + xid, sgl, + sgc); + + return 0; +} + +/** + * fcoe_ddp_target() - Call a LLD's ddp_target through the net device + * @lport: The local port to setup DDP for + * @xid: The exchange ID for this DDP transfer + * @sgl: The scatterlist describing this transfer + * @sgc: The number of sg items + * + * Returns: 0 if the DDP context was not configured + */ +static int fcoe_ddp_target(struct fc_lport *lport, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + struct net_device *netdev = fcoe_netdev(lport); + + if (netdev->netdev_ops->ndo_fcoe_ddp_target) + return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid, + sgl, sgc); + + return 0; +} + + +/** + * fcoe_ddp_done() - Call a LLD's ddp_done through the net device + * @lport: The local port to complete DDP on + * @xid: The exchange ID for this DDP transfer + * + * Returns: the length of data that have been completed by DDP + */ +static int fcoe_ddp_done(struct fc_lport *lport, u16 xid) +{ + struct net_device *netdev = fcoe_netdev(lport); + + if (netdev->netdev_ops->ndo_fcoe_ddp_done) + return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid); + return 0; +} + +/** + * fcoe_if_create() - Create a FCoE instance on an interface + * @fcoe: The FCoE interface to create a local port on + * @parent: The device pointer to be the parent in sysfs for the SCSI host + * @npiv: Indicates if the port is a vport or not + * + * Creates a fc_lport instance and a Scsi_Host instance and configure them. + * + * Returns: The allocated fc_lport or an error pointer + */ +static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, + struct device *parent, int npiv) +{ + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); + struct net_device *netdev = fcoe->netdev; + struct fc_lport *lport, *n_port; + struct fcoe_port *port; + struct Scsi_Host *shost; + int rc; + /* + * parent is only a vport if npiv is 1, + * but we'll only use vport in that case so go ahead and set it + */ + struct fc_vport *vport = dev_to_vport(parent); + + FCOE_NETDEV_DBG(netdev, "Create Interface\n"); + + if (!npiv) + lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port)); + else + lport = libfc_vport_create(vport, sizeof(*port)); + + if (!lport) { + FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); + rc = -ENOMEM; + goto out; + } + port = lport_priv(lport); + port->lport = lport; + port->priv = fcoe; + port->get_netdev = fcoe_netdev; + port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH; + port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH; + INIT_WORK(&port->destroy_work, fcoe_destroy_work); + + /* + * Need to add the lport to the hostlist + * so we catch NETDEV_CHANGE events. + */ + fcoe_hostlist_add(lport); + + /* configure a fc_lport including the exchange manager */ + rc = fcoe_lport_config(lport); + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " + "interface\n"); + goto out_host_put; + } + + if (npiv) { + FCOE_NETDEV_DBG(netdev, "Setting vport names, " + "%16.16llx %16.16llx\n", + vport->node_name, vport->port_name); + fc_set_wwnn(lport, vport->node_name); + fc_set_wwpn(lport, vport->port_name); + } + + /* configure lport network properties */ + rc = fcoe_netdev_config(lport, netdev); + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " + "interface\n"); + goto out_lp_destroy; + } + + /* configure lport scsi host properties */ + rc = fcoe_shost_config(lport, parent); + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " + "interface\n"); + goto out_lp_destroy; + } + + /* Initialize the library */ + rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1); + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " + "interface\n"); + goto out_lp_destroy; + } + + /* Initialized FDMI information */ + fcoe_fdmi_info(lport, netdev); + + /* + * fcoe_em_alloc() and fcoe_hostlist_add() both + * need to be atomic with respect to other changes to the + * hostlist since fcoe_em_alloc() looks for an existing EM + * instance on host list updated by fcoe_hostlist_add(). + * + * This is currently handled through the fcoe_config_mutex + * begin held. + */ + if (!npiv) + /* lport exch manager allocation */ + rc = fcoe_em_config(lport); + else { + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + rc = fc_exch_mgr_list_clone(n_port, lport); + } + + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n"); + goto out_lp_destroy; + } + + return lport; + +out_lp_destroy: + fc_exch_mgr_free(lport); +out_host_put: + fcoe_hostlist_del(lport); + scsi_host_put(lport->host); +out: + return ERR_PTR(rc); +} + +/** + * fcoe_if_init() - Initialization routine for fcoe.ko + * + * Attaches the SW FCoE transport to the FC transport + * + * Returns: 0 on success + */ +static int __init fcoe_if_init(void) +{ + /* attach to scsi transport */ + fcoe_nport_scsi_transport = + fc_attach_transport(&fcoe_nport_fc_functions); + if (!fcoe_nport_scsi_transport) + goto err; + + fcoe_vport_scsi_transport = + fc_attach_transport(&fcoe_vport_fc_functions); + if (!fcoe_vport_scsi_transport) + goto err_vport; + + return 0; + +err_vport: + fc_release_transport(fcoe_nport_scsi_transport); +err: + printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); + return -ENODEV; +} + +/** + * fcoe_if_exit() - Tear down fcoe.ko + * + * Detaches the SW FCoE transport from the FC transport + * + * Returns: 0 on success + */ +static int __exit fcoe_if_exit(void) +{ + fc_release_transport(fcoe_nport_scsi_transport); + fc_release_transport(fcoe_vport_scsi_transport); + fcoe_nport_scsi_transport = NULL; + fcoe_vport_scsi_transport = NULL; + return 0; +} + +static void fcoe_thread_cleanup_local(unsigned int cpu) +{ + struct page *crc_eof; + struct fcoe_percpu_s *p; + + p = per_cpu_ptr(&fcoe_percpu, cpu); + spin_lock_bh(&p->fcoe_rx_list.lock); + crc_eof = p->crc_eof_page; + p->crc_eof_page = NULL; + p->crc_eof_offset = 0; + spin_unlock_bh(&p->fcoe_rx_list.lock); + + if (crc_eof) + put_page(crc_eof); + flush_work(&p->work); +} + +/** + * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming + * command. + * + * This routine selects next CPU based on cpumask to distribute + * incoming requests in round robin. + * + * Returns: int CPU number + */ +static inline unsigned int fcoe_select_cpu(void) +{ + static unsigned int selected_cpu; + + selected_cpu = cpumask_next(selected_cpu, cpu_online_mask); + if (selected_cpu >= nr_cpu_ids) + selected_cpu = cpumask_first(cpu_online_mask); + + return selected_cpu; +} + +/** + * fcoe_rcv() - Receive packets from a net device + * @skb: The received packet + * @netdev: The net device that the packet was received on + * @ptype: The packet type context + * @olddev: The last device net device + * + * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a + * FC frame and passes the frame to libfc. + * + * Returns: 0 for success + */ +static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, + struct packet_type *ptype, struct net_device *olddev) +{ + struct fc_lport *lport; + struct fcoe_rcv_info *fr; + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + struct fc_frame_header *fh; + struct fcoe_percpu_s *fps; + struct ethhdr *eh; + unsigned int cpu; + + fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); + ctlr = fcoe_to_ctlr(fcoe); + lport = ctlr->lp; + if (unlikely(!lport)) { + FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n"); + goto err2; + } + if (!lport->link_up) + goto err2; + + FCOE_NETDEV_DBG(netdev, + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", + skb->len, skb->data_len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb), + skb->csum, skb->dev ? skb->dev->name : ""); + + + skb = skb_share_check(skb, GFP_ATOMIC); + + if (skb == NULL) + return NET_RX_DROP; + + eh = eth_hdr(skb); + + if (is_fip_mode(ctlr) && + !ether_addr_equal(eh->h_source, ctlr->dest_addr)) { + FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", + eh->h_source); + goto err; + } + + /* + * Check for minimum frame length, and make sure required FCoE + * and FC headers are pulled into the linear data area. + */ + if (unlikely((skb->len < FCOE_MIN_FRAME) || + !pskb_may_pull(skb, FCOE_HEADER_LEN))) + goto err; + + skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); + fh = (struct fc_frame_header *) skb_transport_header(skb); + + if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) { + FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n", + eh->h_dest); + goto err; + } + + fr = fcoe_dev_from_skb(skb); + fr->fr_dev = lport; + + /* + * In case the incoming frame's exchange is originated from + * the initiator, then received frame's exchange id is ANDed + * with fc_cpu_mask bits to get the same cpu on which exchange + * was originated, otherwise select cpu using rx exchange id + * or fcoe_select_cpu(). + */ + if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) + cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; + else { + if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN) + cpu = fcoe_select_cpu(); + else + cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; + } + + if (cpu >= nr_cpu_ids) + goto err; + + fps = &per_cpu(fcoe_percpu, cpu); + spin_lock(&fps->fcoe_rx_list.lock); + /* + * We now have a valid CPU that we're targeting for + * this skb. We also have this receive thread locked, + * so we're free to queue skbs into it's queue. + */ + + /* + * Note: We used to have a set of conditions under which we would + * call fcoe_recv_frame directly, rather than queuing to the rx list + * as it could save a few cycles, but doing so is prohibited, as + * fcoe_recv_frame has several paths that may sleep, which is forbidden + * in softirq context. + */ + __skb_queue_tail(&fps->fcoe_rx_list, skb); + schedule_work_on(cpu, &fps->work); + spin_unlock(&fps->fcoe_rx_list.lock); + + return NET_RX_SUCCESS; +err: + this_cpu_inc(lport->stats->ErrorFrames); +err2: + kfree_skb(skb); + return NET_RX_DROP; +} + +/** + * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC + * @skb: The packet to be transmitted + * @tlen: The total length of the trailer + * + * Returns: 0 for success + */ +static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + struct fcoe_percpu_s *fps; + int rc; + + local_lock(&fcoe_percpu.lock); + fps = this_cpu_ptr(&fcoe_percpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); + local_unlock(&fcoe_percpu.lock); + + return rc; +} + +/** + * fcoe_xmit() - Transmit a FCoE frame + * @lport: The local port that the frame is to be transmitted for + * @fp: The frame to be transmitted + * + * Return: 0 for success + */ +static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + int wlen; + u32 crc; + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + unsigned int hlen; /* header length implies the version */ + unsigned int tlen; /* trailer length */ + unsigned int elen; /* eth header, may include vlan */ + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); + u8 sof, eof; + struct fcoe_hdr *hp; + + WARN_ON((fr_len(fp) % sizeof(u32)) != 0); + + fh = fc_frame_header_get(fp); + skb = fp_skb(fp); + + if (!lport->link_up) { + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_type == FC_TYPE_ELS) && + fcoe_ctlr_els_send(ctlr, lport, skb)) + return 0; + + sof = fr_sof(fp); + eof = fr_eof(fp); + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + /* crc offload */ + if (likely(lport->crc_offload)) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_headroom(skb); + skb->csum_offset = skb->len; + crc = 0; + } else { + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + } + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + if (fcoe_alloc_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); + } else { + cp = skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp); + cp = NULL; + } + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + skb->priority = fcoe->priority; + + if (is_vlan_dev(fcoe->netdev) && + fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { + /* must set skb->dev before calling vlan_put_tag */ + skb->dev = fcoe->realdev; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_dev_vlan_id(fcoe->netdev)); + } else + skb->dev = fcoe->netdev; + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); + if (ctlr->map_dest) + memcpy(eh->h_dest + 3, fh->fh_d_id, 3); + + if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); + else + memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ + if (lport->seq_offload && fr_max_payload(fp)) { + skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; + skb_shinfo(skb)->gso_size = fr_max_payload(fp); + } else { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + } + /* update tx stats: regardless if LLD fails */ + this_cpu_inc(lport->stats->TxFrames); + this_cpu_add(lport->stats->TxWords, wlen); + + /* send down to lld */ + fr_dev(fp) = lport; + fcoe_port_send(port, skb); + return 0; +} + +/** + * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC + * @lport: The local port the frame was received on + * @fp: The received frame + * + * Return: 0 on passing filtering checks + */ +static inline int fcoe_filter_frames(struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + struct fc_frame_header *fh; + struct sk_buff *skb = (struct sk_buff *)fp; + + /* + * We only check CRC if no offload is available and if it is + * it's solicited data, in which case, the FCP layer would + * check it during the copy. + */ + if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY) + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + else + fr_flags(fp) |= FCPHF_CRC_UNCHECKED; + + fh = fc_frame_header_get(fp); + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) + return 0; + + fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; + ctlr = fcoe_to_ctlr(fcoe); + if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && + ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); + return -EINVAL; + } + + if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) || + le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) { + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + return 0; + } + + if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); + return -EINVAL; +} + +/** + * fcoe_recv_frame() - process a single received frame + * @skb: frame to process + */ +static void fcoe_recv_frame(struct sk_buff *skb) +{ + u32 fr_len; + struct fc_lport *lport; + struct fcoe_rcv_info *fr; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + struct fcoe_hdr *hp; + + fr = fcoe_dev_from_skb(skb); + lport = fr->fr_dev; + if (unlikely(!lport)) { + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); + kfree_skb(skb); + return; + } + + FCOE_NETDEV_DBG(skb->dev, + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", + skb->len, skb->data_len, + skb->head, skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb), skb->csum, + skb->dev ? skb->dev->name : ""); + + skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */ + + /* + * Frame length checks and setting up the header pointers + * was done in fcoe_rcv already. + */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { + struct fc_stats *stats; + + stats = per_cpu_ptr(lport->stats, raw_smp_processor_id()); + if (READ_ONCE(stats->ErrorFrames) < 5) + printk(KERN_WARNING "fcoe: FCoE version " + "mismatch: The frame has " + "version %x, but the " + "initiator supports version " + "%x\n", FC_FCOE_DECAPS_VER(hp), + FC_FCOE_VER); + goto drop; + } + + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + this_cpu_inc(lport->stats->RxFrames); + this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE); + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + + /* Copy out the CRC and EOF trailer for access */ + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) + goto drop; + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { + fc_exch_recv(lport, fp); + return; + } +drop: + this_cpu_inc(lport->stats->ErrorFrames); + kfree_skb(skb); +} + +/** + * fcoe_receive_work() - The per-CPU worker + * @work: The work struct + * + */ +static void fcoe_receive_work(struct work_struct *work) +{ + struct fcoe_percpu_s *p; + struct sk_buff *skb; + struct sk_buff_head tmp; + + p = container_of(work, struct fcoe_percpu_s, work); + skb_queue_head_init(&tmp); + + spin_lock_bh(&p->fcoe_rx_list.lock); + skb_queue_splice_init(&p->fcoe_rx_list, &tmp); + spin_unlock_bh(&p->fcoe_rx_list.lock); + + if (!skb_queue_len(&tmp)) + return; + + while ((skb = __skb_dequeue(&tmp))) + fcoe_recv_frame(skb); +} + +/** + * fcoe_dev_setup() - Setup the link change notification interface + */ +static void fcoe_dev_setup(void) +{ + register_dcbevent_notifier(&dcb_notifier); + register_netdevice_notifier(&fcoe_notifier); +} + +/** + * fcoe_dev_cleanup() - Cleanup the link change notification interface + */ +static void fcoe_dev_cleanup(void) +{ + unregister_dcbevent_notifier(&dcb_notifier); + unregister_netdevice_notifier(&fcoe_notifier); +} + +static struct fcoe_interface * +fcoe_hostlist_lookup_realdev_port(struct net_device *netdev) +{ + struct fcoe_interface *fcoe; + struct net_device *real_dev; + + list_for_each_entry(fcoe, &fcoe_hostlist, list) { + if (is_vlan_dev(fcoe->netdev)) + real_dev = vlan_dev_real_dev(fcoe->netdev); + else + real_dev = fcoe->netdev; + + if (netdev == real_dev) + return fcoe; + } + return NULL; +} + +static int fcoe_dcb_app_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct dcb_app_type *entry = ptr; + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + struct net_device *netdev; + int prio; + + if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE) + return NOTIFY_OK; + + netdev = dev_get_by_index(&init_net, entry->ifindex); + if (!netdev) + return NOTIFY_OK; + + fcoe = fcoe_hostlist_lookup_realdev_port(netdev); + dev_put(netdev); + if (!fcoe) + return NOTIFY_OK; + + ctlr = fcoe_to_ctlr(fcoe); + + if (entry->dcbx & DCB_CAP_DCBX_VER_CEE) + prio = ffs(entry->app.priority) - 1; + else + prio = entry->app.priority; + + if (prio < 0) + return NOTIFY_OK; + + if (entry->app.protocol == ETH_P_FIP || + entry->app.protocol == ETH_P_FCOE) + ctlr->priority = prio; + + if (entry->app.protocol == ETH_P_FCOE) + fcoe->priority = prio; + + return NOTIFY_OK; +} + +/** + * fcoe_device_notification() - Handler for net device events + * @notifier: The context of the notification + * @event: The type of event + * @ptr: The net device that the event was on + * + * This function is called by the Ethernet driver in case of link change event. + * + * Returns: 0 for success + */ +static int fcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct fcoe_ctlr_device *cdev; + struct fc_lport *lport = NULL; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + u32 link_possible = 1; + u32 mfs; + int rc = NOTIFY_OK; + + list_for_each_entry(fcoe, &fcoe_hostlist, list) { + if (fcoe->netdev == netdev) { + ctlr = fcoe_to_ctlr(fcoe); + lport = ctlr->lp; + break; + } + } + if (!lport) { + rc = NOTIFY_DONE; + goto out; + } + + switch (event) { + case NETDEV_DOWN: + case NETDEV_GOING_DOWN: + link_possible = 0; + break; + case NETDEV_UP: + case NETDEV_CHANGE: + break; + case NETDEV_CHANGEMTU: + if (netdev->features & NETIF_F_FCOE_MTU) + break; + mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); + if (mfs >= FC_MIN_MAX_FRAME) + fc_set_mfs(lport, mfs); + break; + case NETDEV_REGISTER: + break; + case NETDEV_UNREGISTER: + list_del(&fcoe->list); + fcoe_vport_remove(lport); + mutex_lock(&fcoe_config_mutex); + fcoe_if_destroy(lport); + if (!fcoe->removed) + fcoe_interface_remove(fcoe); + fcoe_interface_cleanup(fcoe); + mutex_unlock(&fcoe_config_mutex); + fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr)); + goto out; + case NETDEV_FEAT_CHANGE: + fcoe_netdev_features_change(lport, netdev); + break; + default: + FCOE_NETDEV_DBG(netdev, "Unknown event %ld " + "from netdev netlink\n", event); + } + + fcoe_link_speed_update(lport); + + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + + if (link_possible && !fcoe_link_ok(lport)) { + switch (cdev->enabled) { + case FCOE_CTLR_DISABLED: + pr_info("Link up while interface is disabled.\n"); + break; + case FCOE_CTLR_ENABLED: + case FCOE_CTLR_UNUSED: + fcoe_ctlr_link_up(ctlr); + } + } else if (fcoe_ctlr_link_down(ctlr)) { + switch (cdev->enabled) { + case FCOE_CTLR_DISABLED: + pr_info("Link down while interface is disabled.\n"); + break; + case FCOE_CTLR_ENABLED: + case FCOE_CTLR_UNUSED: + this_cpu_inc(lport->stats->LinkFailureCount); + fcoe_clean_pending_queue(lport); + } + } +out: + return rc; +} + +/** + * fcoe_disable() - Disables a FCoE interface + * @netdev : The net_device object the Ethernet interface to create on + * + * Called from fcoe transport. + * + * Returns: 0 for success + * + * Deprecated: use fcoe_ctlr_enabled() + */ +static int fcoe_disable(struct net_device *netdev) +{ + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + int rc = 0; + + mutex_lock(&fcoe_config_mutex); + + rtnl_lock(); + fcoe = fcoe_hostlist_lookup_port(netdev); + rtnl_unlock(); + + if (fcoe) { + ctlr = fcoe_to_ctlr(fcoe); + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); + } else + rc = -ENODEV; + + mutex_unlock(&fcoe_config_mutex); + return rc; +} + +/** + * fcoe_enable() - Enables a FCoE interface + * @netdev : The net_device object the Ethernet interface to create on + * + * Called from fcoe transport. + * + * Returns: 0 for success + */ +static int fcoe_enable(struct net_device *netdev) +{ + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + int rc = 0; + + mutex_lock(&fcoe_config_mutex); + rtnl_lock(); + fcoe = fcoe_hostlist_lookup_port(netdev); + rtnl_unlock(); + + if (!fcoe) { + rc = -ENODEV; + goto out; + } + + ctlr = fcoe_to_ctlr(fcoe); + + if (!fcoe_link_ok(ctlr->lp)) + fcoe_ctlr_link_up(ctlr); + +out: + mutex_unlock(&fcoe_config_mutex); + return rc; +} + +/** + * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller + * @cdev: The FCoE Controller that is being enabled or disabled + * + * fcoe_sysfs will ensure that the state of 'enabled' has + * changed, so no checking is necessary here. This routine simply + * calls fcoe_enable or fcoe_disable, both of which are deprecated. + * When those routines are removed the functionality can be merged + * here. + */ +static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); + struct fc_lport *lport = ctlr->lp; + struct net_device *netdev = fcoe_netdev(lport); + + switch (cdev->enabled) { + case FCOE_CTLR_ENABLED: + return fcoe_enable(netdev); + case FCOE_CTLR_DISABLED: + return fcoe_disable(netdev); + case FCOE_CTLR_UNUSED: + default: + return -ENOTSUPP; + } +} + +/** + * fcoe_ctlr_mode() - Switch FIP mode + * @ctlr_dev: The FCoE Controller that is being modified + * + * When the FIP mode has been changed we need to update + * the multicast addresses to ensure we get the correct + * frames. + */ +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); + + if (ctlr_dev->mode == FIP_CONN_TYPE_VN2VN && + ctlr->mode != FIP_MODE_VN2VN) { + dev_mc_del(fcoe->netdev, FIP_ALL_ENODE_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_P2P_MACS); + } else if (ctlr->mode != FIP_MODE_FABRIC) { + dev_mc_del(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_del(fcoe->netdev, FIP_ALL_P2P_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_ENODE_MACS); + } + fcoe_ctlr_set_fip_mode(ctlr_dev); +} + +/** + * fcoe_destroy() - Destroy a FCoE interface + * @netdev : The net_device object the Ethernet interface to create on + * + * Called from fcoe transport + * + * Returns: 0 for success + */ +static int fcoe_destroy(struct net_device *netdev) +{ + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + struct fc_lport *lport; + struct fcoe_port *port; + int rc = 0; + + mutex_lock(&fcoe_config_mutex); + rtnl_lock(); + fcoe = fcoe_hostlist_lookup_port(netdev); + if (!fcoe) { + rc = -ENODEV; + goto out_nodev; + } + ctlr = fcoe_to_ctlr(fcoe); + lport = ctlr->lp; + port = lport_priv(lport); + list_del(&fcoe->list); + queue_work(fcoe_wq, &port->destroy_work); +out_nodev: + rtnl_unlock(); + mutex_unlock(&fcoe_config_mutex); + return rc; +} + +/** + * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context + * @work: Handle to the FCoE port to be destroyed + */ +static void fcoe_destroy_work(struct work_struct *work) +{ + struct fcoe_ctlr_device *cdev; + struct fcoe_ctlr *ctlr; + struct fcoe_port *port; + struct fcoe_interface *fcoe; + + port = container_of(work, struct fcoe_port, destroy_work); + + fcoe_vport_remove(port->lport); + + mutex_lock(&fcoe_config_mutex); + + fcoe = port->priv; + ctlr = fcoe_to_ctlr(fcoe); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + + rtnl_lock(); + fcoe_if_destroy(port->lport); + if (!fcoe->removed) + fcoe_interface_remove(fcoe); + rtnl_unlock(); + fcoe_interface_cleanup(fcoe); + + mutex_unlock(&fcoe_config_mutex); + + fcoe_ctlr_device_delete(cdev); +} + +/** + * fcoe_match() - Check if the FCoE is supported on the given netdevice + * @netdev : The net_device object the Ethernet interface to create on + * + * Called from fcoe transport. + * + * Returns: always returns true as this is the default FCoE transport, + * i.e., support all netdevs. + */ +static bool fcoe_match(struct net_device *netdev) +{ + return true; +} + +/** + * fcoe_dcb_create() - Initialize DCB attributes and hooks + * @fcoe: The new FCoE interface + */ +static void fcoe_dcb_create(struct fcoe_interface *fcoe) +{ + int ctlr_prio = TC_PRIO_BESTEFFORT; + int fcoe_prio = TC_PRIO_INTERACTIVE; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); +#ifdef CONFIG_DCB + int dcbx; + u8 fup, up; + struct net_device *netdev = fcoe->realdev; + struct dcb_app app = { + .priority = 0, + .protocol = ETH_P_FCOE + }; + + /* setup DCB priority attributes. */ + if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) { + dcbx = netdev->dcbnl_ops->getdcbx(netdev); + + if (dcbx & DCB_CAP_DCBX_VER_IEEE) { + app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; + up = dcb_ieee_getapp_mask(netdev, &app); + app.protocol = ETH_P_FIP; + fup = dcb_ieee_getapp_mask(netdev, &app); + } else { + app.selector = DCB_APP_IDTYPE_ETHTYPE; + up = dcb_getapp(netdev, &app); + app.protocol = ETH_P_FIP; + fup = dcb_getapp(netdev, &app); + } + + fcoe_prio = ffs(up) ? ffs(up) - 1 : 0; + ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio; + } +#endif + fcoe->priority = fcoe_prio; + ctlr->priority = ctlr_prio; +} + +enum fcoe_create_link_state { + FCOE_CREATE_LINK_DOWN, + FCOE_CREATE_LINK_UP, +}; + +/** + * _fcoe_create() - (internal) Create a fcoe interface + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation + * @link_state: The ctlr link state on creation + * + * Called from either the libfcoe 'create' module parameter + * via fcoe_create or from fcoe_syfs's ctlr_create file. + * + * libfcoe's 'create' module parameter is deprecated so some + * consolidation of code can be done when that interface is + * removed. + */ +static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode, + enum fcoe_create_link_state link_state) +{ + int rc = 0; + struct fcoe_ctlr_device *ctlr_dev; + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + struct fc_lport *lport; + + mutex_lock(&fcoe_config_mutex); + rtnl_lock(); + + /* look for existing lport */ + if (fcoe_hostlist_lookup(netdev)) { + rc = -EEXIST; + goto out_nodev; + } + + fcoe = fcoe_interface_create(netdev, fip_mode); + if (IS_ERR(fcoe)) { + rc = PTR_ERR(fcoe); + goto out_nodev; + } + + ctlr = fcoe_to_ctlr(fcoe); + ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); + lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0); + if (IS_ERR(lport)) { + printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", + netdev->name); + rc = -EIO; + if (!fcoe->removed) + fcoe_interface_remove(fcoe); + rtnl_unlock(); + fcoe_interface_cleanup(fcoe); + mutex_unlock(&fcoe_config_mutex); + fcoe_ctlr_device_delete(ctlr_dev); + return rc; + } + + /* Make this the "master" N_Port */ + ctlr->lp = lport; + + /* setup DCB priority attributes. */ + fcoe_dcb_create(fcoe); + + /* start FIP Discovery and FLOGI */ + lport->boot_time = jiffies; + fc_fabric_login(lport); + + /* + * If the fcoe_ctlr_device is to be set to DISABLED + * it must be done after the lport is added to the + * hostlist, but before the rtnl_lock is released. + * This is because the rtnl_lock protects the + * hostlist that fcoe_device_notification uses. If + * the FCoE Controller is intended to be created + * DISABLED then 'enabled' needs to be considered + * handling link events. 'enabled' must be set + * before the lport can be found in the hostlist + * when a link up event is received. + */ + if (link_state == FCOE_CREATE_LINK_UP) + ctlr_dev->enabled = FCOE_CTLR_ENABLED; + else + ctlr_dev->enabled = FCOE_CTLR_DISABLED; + + if (link_state == FCOE_CREATE_LINK_UP && + !fcoe_link_ok(lport)) { + rtnl_unlock(); + fcoe_ctlr_link_up(ctlr); + mutex_unlock(&fcoe_config_mutex); + return rc; + } + +out_nodev: + rtnl_unlock(); + mutex_unlock(&fcoe_config_mutex); + + return rc; +} + +/** + * fcoe_create() - Create a fcoe interface + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation + * + * Called from fcoe transport + * + * Returns: 0 for success + */ +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode) +{ + return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP); +} + +/** + * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs + * @netdev: The net_device to be used by the allocated FCoE Controller + * + * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr + * in a link_down state. The allows the user an opportunity to configure + * the FCoE Controller from sysfs before enabling the FCoE Controller. + * + * Creating in with this routine starts the FCoE Controller in Fabric + * mode. The user can change to VN2VN or another mode before enabling. + */ +static int fcoe_ctlr_alloc(struct net_device *netdev) +{ + return _fcoe_create(netdev, FIP_MODE_FABRIC, + FCOE_CREATE_LINK_DOWN); +} + +/** + * fcoe_link_ok() - Check if the link is OK for a local port + * @lport: The local port to check link on + * + * Returns: 0 if link is UP and OK, -1 if not + * + */ +static int fcoe_link_ok(struct fc_lport *lport) +{ + struct net_device *netdev = fcoe_netdev(lport); + + if (netif_oper_up(netdev)) + return 0; + return -1; +} + +/** + * fcoe_percpu_clean() - Clear all pending skbs for an local port + * @lport: The local port whose skbs are to be cleared + * + * Must be called with fcoe_create_mutex held to single-thread completion. + * + * This flushes the pending skbs by flush the work item for each CPU. The work + * item on each possible CPU is flushed because we may have used the per-CPU + * struct of an offline CPU. + */ +static void fcoe_percpu_clean(struct fc_lport *lport) +{ + struct fcoe_percpu_s *pp; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + pp = &per_cpu(fcoe_percpu, cpu); + + flush_work(&pp->work); + } +} + +/** + * fcoe_reset() - Reset a local port + * @shost: The SCSI host associated with the local port to be reset + * + * Returns: Always 0 (return value required by FC transport template) + */ +static int fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); + struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); + + if (cdev->enabled != FCOE_CTLR_DISABLED && + !fcoe_link_ok(ctlr->lp)) + fcoe_ctlr_link_up(ctlr); + return 0; +} + +/** + * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device + * @netdev: The net device used as a key + * + * Locking: Must be called with the RNL mutex held. + * + * Returns: NULL or the FCoE interface + */ +static struct fcoe_interface * +fcoe_hostlist_lookup_port(const struct net_device *netdev) +{ + struct fcoe_interface *fcoe; + + list_for_each_entry(fcoe, &fcoe_hostlist, list) { + if (fcoe->netdev == netdev) + return fcoe; + } + return NULL; +} + +/** + * fcoe_hostlist_lookup() - Find the local port associated with a + * given net device + * @netdev: The netdevice used as a key + * + * Locking: Must be called with the RTNL mutex held + * + * Returns: NULL or the local port + */ +static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) +{ + struct fcoe_ctlr *ctlr; + struct fcoe_interface *fcoe; + + fcoe = fcoe_hostlist_lookup_port(netdev); + ctlr = fcoe_to_ctlr(fcoe); + return (fcoe) ? ctlr->lp : NULL; +} + +/** + * fcoe_hostlist_add() - Add the FCoE interface identified by a local + * port to the hostlist + * @lport: The local port that identifies the FCoE interface to be added + * + * Locking: must be called with the RTNL mutex held + * + * Returns: 0 for success + */ +static int fcoe_hostlist_add(const struct fc_lport *lport) +{ + struct fcoe_interface *fcoe; + struct fcoe_port *port; + + fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); + if (!fcoe) { + port = lport_priv(lport); + fcoe = port->priv; + list_add_tail(&fcoe->list, &fcoe_hostlist); + } + return 0; +} + +/** + * fcoe_hostlist_del() - Remove the FCoE interface identified by a local + * port to the hostlist + * @lport: The local port that identifies the FCoE interface to be added + * + * Locking: must be called with the RTNL mutex held + * + */ +static void fcoe_hostlist_del(const struct fc_lport *lport) +{ + struct fcoe_interface *fcoe; + struct fcoe_port *port; + + port = lport_priv(lport); + fcoe = port->priv; + list_del(&fcoe->list); + return; +} + +static struct fcoe_transport fcoe_sw_transport = { + .name = {FCOE_TRANSPORT_DEFAULT}, + .attached = false, + .list = LIST_HEAD_INIT(fcoe_sw_transport.list), + .match = fcoe_match, + .alloc = fcoe_ctlr_alloc, + .create = fcoe_create, + .destroy = fcoe_destroy, + .enable = fcoe_enable, + .disable = fcoe_disable, +}; + +/** + * fcoe_init() - Initialize fcoe.ko + * + * Returns: 0 on success, or a negative value on failure + */ +static int __init fcoe_init(void) +{ + struct fcoe_percpu_s *p; + unsigned int cpu; + int rc = 0; + + fcoe_wq = alloc_workqueue("fcoe", 0, 0); + if (!fcoe_wq) + return -ENOMEM; + + /* register as a fcoe transport */ + rc = fcoe_transport_attach(&fcoe_sw_transport); + if (rc) { + printk(KERN_ERR "failed to register an fcoe transport, check " + "if libfcoe is loaded\n"); + goto out_destroy; + } + + mutex_lock(&fcoe_config_mutex); + + for_each_possible_cpu(cpu) { + p = per_cpu_ptr(&fcoe_percpu, cpu); + INIT_WORK(&p->work, fcoe_receive_work); + skb_queue_head_init(&p->fcoe_rx_list); + local_lock_init(&p->lock); + } + + /* Setup link change notification */ + fcoe_dev_setup(); + + rc = fcoe_if_init(); + if (rc) + goto out_free; + + mutex_unlock(&fcoe_config_mutex); + return 0; + +out_free: + mutex_unlock(&fcoe_config_mutex); + fcoe_transport_detach(&fcoe_sw_transport); +out_destroy: + destroy_workqueue(fcoe_wq); + return rc; +} +module_init(fcoe_init); + +/** + * fcoe_exit() - Clean up fcoe.ko + * + * Returns: 0 on success or a negative value on failure + */ +static void __exit fcoe_exit(void) +{ + struct fcoe_interface *fcoe, *tmp; + struct fcoe_ctlr *ctlr; + struct fcoe_port *port; + unsigned int cpu; + + mutex_lock(&fcoe_config_mutex); + + fcoe_dev_cleanup(); + + /* releases the associated fcoe hosts */ + rtnl_lock(); + list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { + ctlr = fcoe_to_ctlr(fcoe); + port = lport_priv(ctlr->lp); + fcoe_hostlist_del(port->lport); + queue_work(fcoe_wq, &port->destroy_work); + } + rtnl_unlock(); + + for_each_possible_cpu(cpu) + fcoe_thread_cleanup_local(cpu); + + mutex_unlock(&fcoe_config_mutex); + + /* + * destroy_work's may be chained but destroy_workqueue() + * can take care of them. Just kill the fcoe_wq. + */ + destroy_workqueue(fcoe_wq); + + /* + * Detaching from the scsi transport must happen after all + * destroys are done on the fcoe_wq. destroy_workqueue will + * enusre the fcoe_wq is flushed. + */ + fcoe_if_exit(); + + /* detach from fcoe transport */ + fcoe_transport_detach(&fcoe_sw_transport); +} +module_exit(fcoe_exit); + +/** + * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler + * @seq: active sequence in the FLOGI or FDISC exchange + * @fp: response frame, or error encoded in a pointer (timeout) + * @arg: pointer to the fcoe_ctlr structure + * + * This handles MAC address management for FCoE, then passes control on to + * the libfc FLOGI response handler. + */ +static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + u8 *mac; + + if (IS_ERR(fp)) + goto done; + + mac = fr_cb(fp)->granted_mac; + /* pre-FIP */ + if (is_zero_ether_addr(mac)) + fcoe_ctlr_recv_flogi(fip, lport, fp); + else + fcoe_update_src_mac(lport, mac); +done: + fc_lport_flogi_resp(seq, fp, lport); +} + +/** + * fcoe_logo_resp() - FCoE specific LOGO response handler + * @seq: active sequence in the LOGO exchange + * @fp: response frame, or error encoded in a pointer (timeout) + * @arg: pointer to the fcoe_ctlr structure + * + * This handles MAC address management for FCoE, then passes control on to + * the libfc LOGO response handler. + */ +static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fc_lport *lport = arg; + static u8 zero_mac[ETH_ALEN] = { 0 }; + + if (!IS_ERR(fp)) + fcoe_update_src_mac(lport, zero_mac); + fc_lport_logo_resp(seq, fp, lport); +} + +/* + * fcoe_elsct_send - FCoE specific ELS handler + * + * This does special case handling of FIP encapsualted ELS exchanges for FCoE, + * using FCoE specific response handlers and passing the FIP controller as + * the argument (the lport is still available from the exchange). + * + * Most of the work here is just handed off to the libfc routine. + */ +static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (op) { + case ELS_FLOGI: + case ELS_FDISC: + if (lport->point_to_multipoint) + break; + return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, + fip, timeout); + case ELS_LOGO: + /* only hook onto fabric logouts, not port logouts */ + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + break; + return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp, + lport, timeout); + } + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} + +/** + * fcoe_vport_create() - create an fc_host/scsi_host for a vport + * @vport: fc_vport object to create a new fc_host for + * @disabled: start the new fc_host in a disabled state by default? + * + * Returns: 0 for success + */ +static int fcoe_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fcoe_port *port = lport_priv(n_port); + struct fcoe_interface *fcoe = port->priv; + struct net_device *netdev = fcoe->netdev; + struct fc_lport *vn_port; + int rc; + char buf[32]; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + printk(KERN_ERR "fcoe: Failed to create vport, " + "WWPN (0x%s) already exists\n", + buf); + return rc; + } + + mutex_lock(&fcoe_config_mutex); + rtnl_lock(); + vn_port = fcoe_if_create(fcoe, &vport->dev, 1); + rtnl_unlock(); + mutex_unlock(&fcoe_config_mutex); + + if (IS_ERR(vn_port)) { + printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n", + netdev->name); + return -EIO; + } + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + return 0; +} + +/** + * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport + * @vport: fc_vport object that is being destroyed + * + * Returns: 0 for success + */ +static int fcoe_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + + mutex_lock(&fcoe_config_mutex); + rtnl_lock(); + fcoe_if_destroy(vn_port); + rtnl_unlock(); + mutex_unlock(&fcoe_config_mutex); + + return 0; +} + +/** + * fcoe_vport_remove() - remove attached vports + * @lport: lport for which the vports should be removed + */ +static void fcoe_vport_remove(struct fc_lport *lport) +{ + struct Scsi_Host *shost; + struct fc_host_attrs *fc_host; + unsigned long flags; + struct fc_vport *vport; + struct fc_vport *next_vport; + + shost = lport->host; + fc_host = shost_to_fc_host(shost); + + /* Loop through all the vports and mark them for deletion */ + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { + continue; + } else { + vport->flags |= FC_VPORT_DELETING; + queue_work(fc_host_work_q(shost), + &vport->vport_delete_work); + } + } + spin_unlock_irqrestore(shost->host_lock, flags); + + flush_workqueue(fc_host_work_q(shost)); +} + +/** + * fcoe_vport_disable() - change vport state + * @vport: vport to bring online/offline + * @disable: should the vport be disabled? + */ +static int fcoe_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + + return 0; +} + +/** + * fcoe_set_vport_symbolic_name() - append vport string to symbolic name + * @vport: fc_vport with a new symbolic name string + * + * After generating a new symbolic name string, a new RSPN_ID request is + * sent to the name server. There is no response handler, so if it fails + * for some reason it will not be retried. + */ +static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) +{ + struct fc_lport *lport = vport->dd_data; + struct fc_frame *fp; + size_t len; + + snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, + "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION, + fcoe_netdev(lport)->name, vport->symbolic_name); + + if (lport->state != LPORT_ST_READY) + return; + + len = strnlen(fc_host_symbolic_name(lport->host), 255); + fp = fc_frame_alloc(lport, + sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_rspn) + len); + if (!fp) + return; + lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, + NULL, NULL, 3 * lport->r_a_tov); +} + +static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) +{ + struct fcoe_ctlr_device *ctlr_dev = + fcoe_fcf_dev_to_ctlr_dev(fcf_dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); + + fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev); +} + +/** + * fcoe_set_port_id() - Callback from libfc when Port_ID is set. + * @lport: the local port + * @port_id: the port ID + * @fp: the received frame, if any, that caused the port_id to be set. + * + * This routine handles the case where we received a FLOGI and are + * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi() + * so it can set the non-mapped mode and gateway address. + * + * The FLOGI LS_ACC is handled by fcoe_flogi_resp(). + */ +static void fcoe_set_port_id(struct fc_lport *lport, + u32 port_id, struct fc_frame *fp) +{ + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); + + if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) + fcoe_ctlr_recv_flogi(ctlr, lport, fp); +} diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h new file mode 100644 index 000000000..520c53512 --- /dev/null +++ b/drivers/scsi/fcoe/fcoe.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _FCOE_H_ +#define _FCOE_H_ + +#include +#include + +#define FCOE_MAX_QUEUE_DEPTH 256 +#define FCOE_MIN_QUEUE_DEPTH 32 + +#define FCOE_WORD_TO_BYTE 4 + +#define FCOE_VERSION "0.1" +#define FCOE_NAME "fcoe" +#define FCOE_VENDOR "Open-FCoE.org" + +#define FCOE_MAX_LUN 0xFFFF +#define FCOE_MAX_FCP_TARGET 256 + +#define FCOE_MAX_OUTSTANDING_COMMANDS 1024 + +#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ +#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ + +extern unsigned int fcoe_debug_logging; + +#define FCOE_LOGGING 0x01 /* General logging, not categorized */ +#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ + +#define FCOE_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(fcoe_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define FCOE_DBG(fmt, args...) \ + FCOE_CHECK_LOGGING(FCOE_LOGGING, \ + pr_info("fcoe: " fmt, ##args);) + +#define FCOE_NETDEV_DBG(netdev, fmt, args...) \ + FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ + pr_info("fcoe: %s: " fmt, \ + netdev->name, ##args);) + +/** + * struct fcoe_interface - A FCoE interface + * @list: Handle for a list of FCoE interfaces + * @netdev: The associated net device + * @fcoe_packet_type: FCoE packet type + * @fip_packet_type: FIP packet type + * @oem: The offload exchange manager for all local port + * instances associated with this port + * @removed: Indicates fcoe interface removed from net device + * @priority: Priority for the FCoE packet (DCB) + * This structure is 1:1 with a net device. + */ +struct fcoe_interface { + struct list_head list; + struct net_device *netdev; + struct net_device *realdev; + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; + struct packet_type fip_vlan_packet_type; + struct fc_exch_mgr *oem; + u8 removed; + u8 priority; +}; + +#define fcoe_to_ctlr(x) \ + (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1) + +#define fcoe_from_ctlr(x) \ + ((struct fcoe_interface *)((x) + 1)) + +/** + * fcoe_netdev() - Return the net device associated with a local port + * @lport: The local port to get the net device from + */ +static inline struct net_device *fcoe_netdev(const struct fc_lport *lport) +{ + return ((struct fcoe_interface *) + ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; +} + +#endif /* _FCOE_H_ */ diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c new file mode 100644 index 000000000..19eee108d --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -0,0 +1,3258 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2009 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "libfcoe.h" + +#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */ +#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */ + +static void fcoe_ctlr_timeout(struct timer_list *); +static void fcoe_ctlr_timer_work(struct work_struct *); +static void fcoe_ctlr_recv_work(struct work_struct *); +static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *); + +static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); +static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); +static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); +static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); + +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *, struct sk_buff *); + +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; +static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; +static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; +static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS; + +static const char * const fcoe_ctlr_states[] = { + [FIP_ST_DISABLED] = "DISABLED", + [FIP_ST_LINK_WAIT] = "LINK_WAIT", + [FIP_ST_AUTO] = "AUTO", + [FIP_ST_NON_FIP] = "NON_FIP", + [FIP_ST_ENABLED] = "ENABLED", + [FIP_ST_VNMP_START] = "VNMP_START", + [FIP_ST_VNMP_PROBE1] = "VNMP_PROBE1", + [FIP_ST_VNMP_PROBE2] = "VNMP_PROBE2", + [FIP_ST_VNMP_CLAIM] = "VNMP_CLAIM", + [FIP_ST_VNMP_UP] = "VNMP_UP", +}; + +static const char *fcoe_ctlr_state(enum fip_state state) +{ + const char *cp = "unknown"; + + if (state < ARRAY_SIZE(fcoe_ctlr_states)) + cp = fcoe_ctlr_states[state]; + if (!cp) + cp = "unknown"; + return cp; +} + +/** + * fcoe_ctlr_set_state() - Set and do debug printing for the new FIP state. + * @fip: The FCoE controller + * @state: The new state + */ +static void fcoe_ctlr_set_state(struct fcoe_ctlr *fip, enum fip_state state) +{ + if (state == fip->state) + return; + if (fip->lp) + LIBFCOE_FIP_DBG(fip, "state %s -> %s\n", + fcoe_ctlr_state(fip->state), fcoe_ctlr_state(state)); + fip->state = state; +} + +/** + * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid + * @fcf: The FCF to check + * + * Return non-zero if FCF fcoe_size has been validated. + */ +static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) +{ + return (fcf->flags & FIP_FL_SOL) != 0; +} + +/** + * fcoe_ctlr_fcf_usable() - Check if a FCF is usable + * @fcf: The FCF to check + * + * Return non-zero if the FCF is usable. + */ +static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) +{ + u16 flags = FIP_FL_SOL | FIP_FL_AVAIL; + + return (fcf->flags & flags) == flags; +} + +/** + * fcoe_ctlr_map_dest() - Set flag and OUI for mapping destination addresses + * @fip: The FCoE controller + */ +static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip) +{ + if (fip->mode == FIP_MODE_VN2VN) + hton24(fip->dest_addr, FIP_VN_FC_MAP); + else + hton24(fip->dest_addr, FIP_DEF_FC_MAP); + hton24(fip->dest_addr + 3, 0); + fip->map_dest = 1; +} + +/** + * fcoe_ctlr_init() - Initialize the FCoE Controller instance + * @fip: The FCoE controller to initialize + * @mode: FIP mode to set + */ +void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode) +{ + fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); + fip->mode = mode; + fip->fip_resp = false; + INIT_LIST_HEAD(&fip->fcfs); + mutex_init(&fip->ctlr_mutex); + spin_lock_init(&fip->ctlr_lock); + fip->flogi_oxid = FC_XID_UNKNOWN; + timer_setup(&fip->timer, fcoe_ctlr_timeout, 0); + INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); + INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work); + skb_queue_head_init(&fip->fip_recv_list); +} +EXPORT_SYMBOL(fcoe_ctlr_init); + +/** + * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The newly discovered FCF + * + * Called with fip->ctlr_mutex held + */ +static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new) +{ + struct fcoe_ctlr *fip = new->fip; + struct fcoe_ctlr_device *ctlr_dev; + struct fcoe_fcf_device *temp, *fcf_dev; + int rc = -ENOMEM; + + LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", + new->fabric_name, new->fcf_mac); + + temp = kzalloc(sizeof(*temp), GFP_KERNEL); + if (!temp) + goto out; + + temp->fabric_name = new->fabric_name; + temp->switch_name = new->switch_name; + temp->fc_map = new->fc_map; + temp->vfid = new->vfid; + memcpy(temp->mac, new->fcf_mac, ETH_ALEN); + temp->priority = new->pri; + temp->fka_period = new->fka_period; + temp->selected = 0; /* default to unselected */ + + /* + * If ctlr_dev doesn't exist then it means we're a libfcoe user + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device. + * fnic would be an example of a driver with this behavior. In this + * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we + * don't want to make sysfs changes. + */ + + ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); + if (ctlr_dev) { + mutex_lock(&ctlr_dev->lock); + fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp); + if (unlikely(!fcf_dev)) { + rc = -ENOMEM; + mutex_unlock(&ctlr_dev->lock); + goto out; + } + + /* + * The fcoe_sysfs layer can return a CONNECTED fcf that + * has a priv (fcf was never deleted) or a CONNECTED fcf + * that doesn't have a priv (fcf was deleted). However, + * libfcoe will always delete FCFs before trying to add + * them. This is ensured because both recv_adv and + * age_fcfs are protected by the the fcoe_ctlr's mutex. + * This means that we should never get a FCF with a + * non-NULL priv pointer. + */ + BUG_ON(fcf_dev->priv); + + fcf_dev->priv = new; + new->fcf_dev = fcf_dev; + mutex_unlock(&ctlr_dev->lock); + } + + list_add(&new->list, &fip->fcfs); + fip->fcf_count++; + rc = 0; + +out: + kfree(temp); + return rc; +} + +/** + * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The FCF to be removed + * + * Called with fip->ctlr_mutex held + */ +static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) +{ + struct fcoe_ctlr *fip = new->fip; + struct fcoe_ctlr_device *cdev; + struct fcoe_fcf_device *fcf_dev; + + list_del(&new->list); + fip->fcf_count--; + + /* + * If ctlr_dev doesn't exist then it means we're a libfcoe user + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device + * or a fcoe_fcf_device. + * + * fnic would be an example of a driver with this behavior. In this + * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above), + * but we don't want to make sysfs changes. + */ + cdev = fcoe_ctlr_to_ctlr_dev(fip); + if (cdev) { + mutex_lock(&cdev->lock); + fcf_dev = fcoe_fcf_to_fcf_dev(new); + WARN_ON(!fcf_dev); + new->fcf_dev = NULL; + fcoe_fcf_device_delete(fcf_dev); + mutex_unlock(&cdev->lock); + } + kfree(new); +} + +/** + * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller + * @fip: The FCoE controller whose FCFs are to be reset + * + * Called with &fcoe_ctlr lock held. + */ +static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf *next; + + fip->sel_fcf = NULL; + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + fcoe_sysfs_fcf_del(fcf); + } + WARN_ON(fip->fcf_count); + + fip->sel_time = 0; +} + +/** + * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller + * @fip: The FCoE controller to tear down + * + * This is called by FCoE drivers before freeing the &fcoe_ctlr. + * + * The receive handler will have been deleted before this to guarantee + * that no more recv_work will be scheduled. + * + * The timer routine will simply return once we set FIP_ST_DISABLED. + * This guarantees that no further timeouts or work will be scheduled. + */ +void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) +{ + cancel_work_sync(&fip->recv_work); + skb_queue_purge(&fip->fip_recv_list); + + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_set_state(fip, FIP_ST_DISABLED); + fcoe_ctlr_reset_fcfs(fip); + mutex_unlock(&fip->ctlr_mutex); + del_timer_sync(&fip->timer); + cancel_work_sync(&fip->timer_work); +} +EXPORT_SYMBOL(fcoe_ctlr_destroy); + +/** + * fcoe_ctlr_announce() - announce new FCF selection + * @fip: The FCoE controller + * + * Also sets the destination MAC for FCoE and control packets + * + * Called with neither ctlr_mutex nor ctlr_lock held. + */ +static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *sel; + struct fcoe_fcf *fcf; + unsigned long flags; + + mutex_lock(&fip->ctlr_mutex); + spin_lock_irqsave(&fip->ctlr_lock, flags); + + kfree_skb(fip->flogi_req); + fip->flogi_req = NULL; + list_for_each_entry(fcf, &fip->fcfs, list) + fcf->flogi_sent = 0; + + spin_unlock_irqrestore(&fip->ctlr_lock, flags); + sel = fip->sel_fcf; + + if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) + goto unlock; + if (!is_zero_ether_addr(fip->dest_addr)) { + printk(KERN_NOTICE "libfcoe: host%d: " + "FIP Fibre-Channel Forwarder MAC %pM deselected\n", + fip->lp->host->host_no, fip->dest_addr); + eth_zero_addr(fip->dest_addr); + } + if (sel) { + printk(KERN_INFO "libfcoe: host%d: FIP selected " + "Fibre-Channel Forwarder MAC %pM\n", + fip->lp->host->host_no, sel->fcf_mac); + memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN); + fip->map_dest = 0; + } +unlock: + mutex_unlock(&fip->ctlr_mutex); +} + +/** + * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port + * @fip: The FCoE controller to get the maximum FCoE size from + * + * Returns the maximum packet size including the FCoE header and trailer, + * but not including any Ethernet or VLAN headers. + */ +static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip) +{ + /* + * Determine the max FCoE frame size allowed, including + * FCoE header and trailer. + * Note: lp->mfs is currently the payload size, not the frame size. + */ + return fip->lp->mfs + sizeof(struct fc_frame_header) + + sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof); +} + +/** + * fcoe_ctlr_solicit() - Send a FIP solicitation + * @fip: The FCoE controller to send the solicitation on + * @fcf: The destination FCF (if NULL, a multicast solicitation is sent) + */ +static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) +{ + struct sk_buff *skb; + struct fip_sol { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + struct fip_size_desc size; + } __packed desc; + } __packed * sol; + u32 fcoe_size; + + skb = dev_alloc_skb(sizeof(*sol)); + if (!skb) + return; + + sol = (struct fip_sol *)skb->data; + + memset(sol, 0, sizeof(*sol)); + memcpy(sol->eth.h_dest, fcf ? fcf->fcf_mac : fcoe_all_fcfs, ETH_ALEN); + memcpy(sol->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + sol->eth.h_proto = htons(ETH_P_FIP); + + sol->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + sol->fip.fip_op = htons(FIP_OP_DISC); + sol->fip.fip_subcode = FIP_SC_SOL; + sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW); + sol->fip.fip_flags = htons(FIP_FL_FPMA); + if (fip->spma) + sol->fip.fip_flags |= htons(FIP_FL_SPMA); + + sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW; + memcpy(sol->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + sol->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + sol->desc.wwnn.fd_desc.fip_dlen = sizeof(sol->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &sol->desc.wwnn.fd_wwn); + + fcoe_size = fcoe_ctlr_fcoe_size(fip); + sol->desc.size.fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; + sol->desc.size.fd_desc.fip_dlen = sizeof(sol->desc.size) / FIP_BPW; + sol->desc.size.fd_size = htons(fcoe_size); + + skb_put(skb, sizeof(*sol)); + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + if (!fcf) + fip->sol_time = jiffies; +} + +/** + * fcoe_ctlr_link_up() - Start FCoE controller + * @fip: The FCoE controller to start + * + * Called from the LLD when the network link is ready. + */ +void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) +{ + mutex_lock(&fip->ctlr_mutex); + if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) { + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + } else if (fip->state == FIP_ST_LINK_WAIT) { + if (fip->mode == FIP_MODE_NON_FIP) + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); + else + fcoe_ctlr_set_state(fip, FIP_ST_AUTO); + switch (fip->mode) { + default: + LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); + fallthrough; + case FIP_MODE_AUTO: + LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); + fallthrough; + case FIP_MODE_FABRIC: + case FIP_MODE_NON_FIP: + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + break; + case FIP_MODE_VN2VN: + fcoe_ctlr_vn_start(fip); + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + break; + } + } else + mutex_unlock(&fip->ctlr_mutex); +} +EXPORT_SYMBOL(fcoe_ctlr_link_up); + +/** + * fcoe_ctlr_reset() - Reset a FCoE controller + * @fip: The FCoE controller to reset + */ +static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) +{ + fcoe_ctlr_reset_fcfs(fip); + del_timer(&fip->timer); + fip->ctlr_ka_time = 0; + fip->port_ka_time = 0; + fip->sol_time = 0; + fip->flogi_oxid = FC_XID_UNKNOWN; + fcoe_ctlr_map_dest(fip); +} + +/** + * fcoe_ctlr_link_down() - Stop a FCoE controller + * @fip: The FCoE controller to be stopped + * + * Returns non-zero if the link was up and now isn't. + * + * Called from the LLD when the network link is not ready. + * There may be multiple calls while the link is down. + */ +int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) +{ + int link_dropped; + + LIBFCOE_FIP_DBG(fip, "link down.\n"); + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_reset(fip); + link_dropped = fip->state != FIP_ST_LINK_WAIT; + fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); + mutex_unlock(&fip->ctlr_mutex); + + if (link_dropped) + fc_linkdown(fip->lp); + return link_dropped; +} +EXPORT_SYMBOL(fcoe_ctlr_link_down); + +/** + * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF + * @fip: The FCoE controller to send the FKA on + * @lport: libfc fc_lport to send from + * @ports: 0 for controller keep-alive, 1 for port keep-alive + * @sa: The source MAC address + * + * A controller keep-alive is sent every fka_period (typically 8 seconds). + * The source MAC is the native MAC address. + * + * A port keep-alive is sent every 90 seconds while logged in. + * The source MAC is the assigned mapped source address. + * The destination is the FCF's F-port. + */ +static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, + struct fc_lport *lport, + int ports, u8 *sa) +{ + struct sk_buff *skb; + struct fip_kal { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + } __packed * kal; + struct fip_vn_desc *vn; + u32 len; + struct fc_lport *lp; + struct fcoe_fcf *fcf; + + fcf = fip->sel_fcf; + lp = fip->lp; + if (!fcf || (ports && !lp->port_id)) + return; + + len = sizeof(*kal) + ports * sizeof(*vn); + skb = dev_alloc_skb(len); + if (!skb) + return; + + kal = (struct fip_kal *)skb->data; + memset(kal, 0, len); + memcpy(kal->eth.h_dest, fcf->fcf_mac, ETH_ALEN); + memcpy(kal->eth.h_source, sa, ETH_ALEN); + kal->eth.h_proto = htons(ETH_P_FIP); + + kal->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + kal->fip.fip_op = htons(FIP_OP_CTRL); + kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; + kal->fip.fip_dl_len = htons((sizeof(kal->mac) + + ports * sizeof(*vn)) / FIP_BPW); + kal->fip.fip_flags = htons(FIP_FL_FPMA); + if (fip->spma) + kal->fip.fip_flags |= htons(FIP_FL_SPMA); + + kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; + kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; + memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + if (ports) { + vn = (struct fip_vn_desc *)(kal + 1); + vn->fd_desc.fip_dtype = FIP_DT_VN_ID; + vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; + memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN); + hton24(vn->fd_fc_id, lport->port_id); + put_unaligned_be64(lport->wwpn, &vn->fd_wwpn); + } + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it + * @fip: The FCoE controller for the ELS frame + * @lport: The local port + * @dtype: The FIP descriptor type for the frame + * @skb: The FCoE ELS frame including FC header but no FCoE headers + * @d_id: The destination port ID. + * + * Returns non-zero error code on failure. + * + * The caller must check that the length is a multiple of 4. + * + * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes). + * Headroom includes the FIP encapsulation description, FIP header, and + * Ethernet header. The tailroom is for the FIP MAC descriptor. + */ +static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, + u8 dtype, struct sk_buff *skb, u32 d_id) +{ + struct fip_encaps_head { + struct ethhdr eth; + struct fip_header fip; + struct fip_encaps encaps; + } __packed * cap; + struct fc_frame_header *fh; + struct fip_mac_desc *mac; + struct fcoe_fcf *fcf; + size_t dlen; + u16 fip_flags; + u8 op; + + fh = (struct fc_frame_header *)skb->data; + op = *(u8 *)(fh + 1); + dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ + cap = skb_push(skb, sizeof(*cap)); + memset(cap, 0, sizeof(*cap)); + + if (lport->point_to_multipoint) { + if (fcoe_ctlr_vn_lookup(fip, d_id, cap->eth.h_dest)) + return -ENODEV; + fip_flags = 0; + } else { + fcf = fip->sel_fcf; + if (!fcf) + return -ENODEV; + fip_flags = fcf->flags; + fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : + FIP_FL_FPMA; + if (!fip_flags) + return -ENODEV; + memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN); + } + memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + cap->eth.h_proto = htons(ETH_P_FIP); + + cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + cap->fip.fip_op = htons(FIP_OP_LS); + if (op == ELS_LS_ACC || op == ELS_LS_RJT) + cap->fip.fip_subcode = FIP_SC_REP; + else + cap->fip.fip_subcode = FIP_SC_REQ; + cap->fip.fip_flags = htons(fip_flags); + + cap->encaps.fd_desc.fip_dtype = dtype; + cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; + + if (op != ELS_LS_RJT) { + dlen += sizeof(*mac); + mac = skb_put_zero(skb, sizeof(*mac)); + mac->fd_desc.fip_dtype = FIP_DT_MAC; + mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; + if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) { + memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); + } else if (fip->mode == FIP_MODE_VN2VN) { + hton24(mac->fd_mac, FIP_VN_FC_MAP); + hton24(mac->fd_mac + 3, fip->port_id); + } else if (fip_flags & FIP_FL_SPMA) { + LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n"); + memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); + } else { + LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n"); + /* FPMA only FLOGI. Must leave the MAC desc zeroed. */ + } + } + cap->fip.fip_dl_len = htons(dlen / FIP_BPW); + + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + return 0; +} + +/** + * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. + * @fip: FCoE controller. + * @lport: libfc fc_lport to send from + * @skb: FCoE ELS frame including FC header but no FCoE headers. + * + * Returns a non-zero error code if the frame should not be sent. + * Returns zero if the caller should send the frame with FCoE encapsulation. + * + * The caller must check that the length is a multiple of 4. + * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). + * The the skb must also be an fc_frame. + * + * This is called from the lower-level driver with spinlocks held, + * so we must not take a mutex here. + */ +int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + struct sk_buff *skb) +{ + struct fc_frame *fp; + struct fc_frame_header *fh; + unsigned long flags; + u16 old_xid; + u8 op; + u8 mac[ETH_ALEN]; + + fp = container_of(skb, struct fc_frame, skb); + fh = (struct fc_frame_header *)skb->data; + op = *(u8 *)(fh + 1); + + if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) { + old_xid = fip->flogi_oxid; + fip->flogi_oxid = ntohs(fh->fh_ox_id); + if (fip->state == FIP_ST_AUTO) { + if (old_xid == FC_XID_UNKNOWN) + fip->flogi_count = 0; + fip->flogi_count++; + if (fip->flogi_count < 3) + goto drop; + fcoe_ctlr_map_dest(fip); + return 0; + } + if (fip->state == FIP_ST_NON_FIP) + fcoe_ctlr_map_dest(fip); + } + + if (fip->state == FIP_ST_NON_FIP) + return 0; + if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN) + goto drop; + switch (op) { + case ELS_FLOGI: + op = FIP_DT_FLOGI; + if (fip->mode == FIP_MODE_VN2VN) + break; + spin_lock_irqsave(&fip->ctlr_lock, flags); + kfree_skb(fip->flogi_req); + fip->flogi_req = skb; + fip->flogi_req_send = 1; + spin_unlock_irqrestore(&fip->ctlr_lock, flags); + schedule_work(&fip->timer_work); + return -EINPROGRESS; + case ELS_FDISC: + if (ntoh24(fh->fh_s_id)) + return 0; + op = FIP_DT_FDISC; + break; + case ELS_LOGO: + if (fip->mode == FIP_MODE_VN2VN) { + if (fip->state != FIP_ST_VNMP_UP) + goto drop; + if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) + goto drop; + } else { + if (fip->state != FIP_ST_ENABLED) + return 0; + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + return 0; + } + op = FIP_DT_LOGO; + break; + case ELS_LS_ACC: + /* + * If non-FIP, we may have gotten an SID by accepting an FLOGI + * from a point-to-point connection. Switch to using + * the source mac based on the SID. The destination + * MAC in this case would have been set by receiving the + * FLOGI. + */ + if (fip->state == FIP_ST_NON_FIP) { + if (fip->flogi_oxid == FC_XID_UNKNOWN) + return 0; + fip->flogi_oxid = FC_XID_UNKNOWN; + fc_fcoe_set_mac(mac, fh->fh_d_id); + fip->update_mac(lport, mac); + } + fallthrough; + case ELS_LS_RJT: + op = fr_encaps(fp); + if (op) + break; + return 0; + default: + if (fip->state != FIP_ST_ENABLED && + fip->state != FIP_ST_VNMP_UP) + goto drop; + return 0; + } + LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n", + op, ntoh24(fh->fh_d_id)); + if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id))) + goto drop; + fip->send(fip, skb); + return -EINPROGRESS; +drop: + LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n", + op, ntoh24(fh->fh_d_id)); + kfree_skb(skb); + return -EINVAL; +} +EXPORT_SYMBOL(fcoe_ctlr_els_send); + +/** + * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller + * @fip: The FCoE controller to free FCFs on + * + * Called with lock held and preemption disabled. + * + * An FCF is considered old if we have missed two advertisements. + * That is, there have been no valid advertisement from it for 2.5 + * times its keep-alive period. + * + * In addition, determine the time when an FCF selection can occur. + * + * Also, increment the MissDiscAdvCount when no advertisement is received + * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). + * + * Returns the time in jiffies for the next call. + */ +static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf *next; + unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); + unsigned long deadline; + unsigned long sel_time = 0; + struct list_head del_list; + + INIT_LIST_HEAD(&del_list); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; + if (fip->sel_fcf == fcf) { + if (time_after(jiffies, deadline)) { + u64 miss_cnt; + + miss_cnt = this_cpu_inc_return(fip->lp->stats->MissDiscAdvCount); + printk(KERN_INFO "libfcoe: host%d: " + "Missing Discovery Advertisement " + "for fab %16.16llx count %lld\n", + fip->lp->host->host_no, fcf->fabric_name, + miss_cnt); + } else if (time_after(next_timer, deadline)) + next_timer = deadline; + } + + deadline += fcf->fka_period; + if (time_after_eq(jiffies, deadline)) { + if (fip->sel_fcf == fcf) + fip->sel_fcf = NULL; + /* + * Move to delete list so we can call + * fcoe_sysfs_fcf_del (which can sleep) + * after the put_cpu(). + */ + list_del(&fcf->list); + list_add(&fcf->list, &del_list); + this_cpu_inc(fip->lp->stats->VLinkFailureCount); + } else { + if (time_after(next_timer, deadline)) + next_timer = deadline; + if (fcoe_ctlr_mtu_valid(fcf) && + (!sel_time || time_before(sel_time, fcf->time))) + sel_time = fcf->time; + } + } + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ + fcoe_sysfs_fcf_del(fcf); + } + + if (sel_time && !fip->sel_fcf && !fip->sel_time) { + sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); + fip->sel_time = sel_time; + } + + return next_timer; +} + +/** + * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry + * @fip: The FCoE controller receiving the advertisement + * @skb: The received FIP advertisement frame + * @fcf: The resulting FCF entry + * + * Returns zero on a valid parsed advertisement, + * otherwise returns non zero value. + */ +static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, + struct sk_buff *skb, struct fcoe_fcf *fcf) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_wwn_desc *wwn; + struct fip_fab_desc *fab; + struct fip_fka_desc *fka; + unsigned long t; + size_t rlen; + size_t dlen; + u32 desc_mask; + + memset(fcf, 0, sizeof(*fcf)); + fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA); + + fiph = (struct fip_header *)skb->data; + fcf->flags = ntohs(fiph->fip_flags); + + /* + * mask of required descriptors. validating each one clears its bit. + */ + desc_mask = BIT(FIP_DT_PRI) | BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_FAB) | BIT(FIP_DT_FKA); + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + /* Drop Adv if there are duplicate critical descriptors */ + if ((desc->fip_dtype < 32) && + !(desc_mask & 1U << desc->fip_dtype)) { + LIBFCOE_FIP_DBG(fip, "Duplicate Critical " + "Descriptors in FIP adv\n"); + return -EINVAL; + } + switch (desc->fip_dtype) { + case FIP_DT_PRI: + if (dlen != sizeof(struct fip_pri_desc)) + goto len_err; + fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri; + desc_mask &= ~BIT(FIP_DT_PRI); + break; + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + memcpy(fcf->fcf_mac, + ((struct fip_mac_desc *)desc)->fd_mac, + ETH_ALEN); + memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN); + if (!is_valid_ether_addr(fcf->fcf_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP adv\n", + fcf->fcf_mac); + return -EINVAL; + } + desc_mask &= ~BIT(FIP_DT_MAC); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn); + desc_mask &= ~BIT(FIP_DT_NAME); + break; + case FIP_DT_FAB: + if (dlen != sizeof(struct fip_fab_desc)) + goto len_err; + fab = (struct fip_fab_desc *)desc; + fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn); + fcf->vfid = ntohs(fab->fd_vfid); + fcf->fc_map = ntoh24(fab->fd_map); + desc_mask &= ~BIT(FIP_DT_FAB); + break; + case FIP_DT_FKA: + if (dlen != sizeof(struct fip_fka_desc)) + goto len_err; + fka = (struct fip_fka_desc *)desc; + if (fka->fd_flags & FIP_FKA_ADV_D) + fcf->fd_flags = 1; + t = ntohl(fka->fd_fka_period); + if (t >= FCOE_CTLR_MIN_FKA) + fcf->fka_period = msecs_to_jiffies(t); + desc_mask &= ~BIT(FIP_DT_FKA); + break; + case FIP_DT_MAP_OUI: + case FIP_DT_FCOE_SIZE: + case FIP_DT_FLOGI: + case FIP_DT_FDISC: + case FIP_DT_LOGO: + case FIP_DT_ELP: + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP adv\n", desc->fip_dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + if (!fcf->fc_map || (fcf->fc_map & 0x10000)) + return -EINVAL; + if (!fcf->switch_name) + return -EINVAL; + if (desc_mask) { + LIBFCOE_FIP_DBG(fip, "adv missing descriptors mask %x\n", + desc_mask); + return -EINVAL; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + desc->fip_dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_recv_adv() - Handle an incoming advertisement + * @fip: The FCoE controller receiving the advertisement + * @skb: The received FIP packet + */ +static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf new; + unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV); + int first = 0; + int mtu_valid; + int found = 0; + int rc = 0; + + if (fcoe_ctlr_parse_adv(fip, skb, &new)) + return; + + mutex_lock(&fip->ctlr_mutex); + first = list_empty(&fip->fcfs); + list_for_each_entry(fcf, &fip->fcfs, list) { + if (fcf->switch_name == new.switch_name && + fcf->fabric_name == new.fabric_name && + fcf->fc_map == new.fc_map && + ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) { + found = 1; + break; + } + } + if (!found) { + if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT) + goto out; + + fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC); + if (!fcf) + goto out; + + memcpy(fcf, &new, sizeof(new)); + fcf->fip = fip; + rc = fcoe_sysfs_fcf_add(fcf); + if (rc) { + printk(KERN_ERR "Failed to allocate sysfs instance " + "for FCF, fab %16.16llx mac %pM\n", + new.fabric_name, new.fcf_mac); + kfree(fcf); + goto out; + } + } else { + /* + * Update the FCF's keep-alive descriptor flags. + * Other flag changes from new advertisements are + * ignored after a solicited advertisement is + * received and the FCF is selectable (usable). + */ + fcf->fd_flags = new.fd_flags; + if (!fcoe_ctlr_fcf_usable(fcf)) + fcf->flags = new.flags; + + if (fcf == fip->sel_fcf && !fcf->fd_flags) { + fip->ctlr_ka_time -= fcf->fka_period; + fip->ctlr_ka_time += new.fka_period; + if (time_before(fip->ctlr_ka_time, fip->timer.expires)) + mod_timer(&fip->timer, fip->ctlr_ka_time); + } + fcf->fka_period = new.fka_period; + memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); + } + + mtu_valid = fcoe_ctlr_mtu_valid(fcf); + fcf->time = jiffies; + if (!found) + LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", + fcf->fabric_name, fcf->fcf_mac); + + /* + * If this advertisement is not solicited and our max receive size + * hasn't been verified, send a solicited advertisement. + */ + if (!mtu_valid) + fcoe_ctlr_solicit(fip, fcf); + + /* + * If its been a while since we did a solicit, and this is + * the first advertisement we've received, do a multicast + * solicitation to gather as many advertisements as we can + * before selection occurs. + */ + if (first && time_after(jiffies, fip->sol_time + sol_tov)) + fcoe_ctlr_solicit(fip, NULL); + + /* + * Put this FCF at the head of the list for priority among equals. + * This helps in the case of an NPV switch which insists we use + * the FCF that answers multicast solicitations, not the others that + * are sending periodic multicast advertisements. + */ + if (mtu_valid) + list_move(&fcf->list, &fip->fcfs); + + /* + * If this is the first validated FCF, note the time and + * set a timer to trigger selection. + */ + if (mtu_valid && !fip->sel_fcf && !fip->sel_time && + fcoe_ctlr_fcf_usable(fcf)) { + fip->sel_time = jiffies + + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + if (!timer_pending(&fip->timer) || + time_before(fip->sel_time, fip->timer.expires)) + mod_timer(&fip->timer, fip->sel_time); + } + +out: + mutex_unlock(&fip->ctlr_mutex); +} + +/** + * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame + * @fip: The FCoE controller which received the packet + * @skb: The received FIP packet + */ +static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame *fp = (struct fc_frame *)skb; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + struct fcoe_fcf *sel; + enum fip_desc_type els_dtype = 0; + u8 els_op; + u8 sub; + u8 granted_mac[ETH_ALEN] = { 0 }; + size_t els_len = 0; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 desc_cnt = 0; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + if (sub != FIP_SC_REQ && sub != FIP_SC_REP) + goto drop; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + goto drop; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + desc_cnt++; + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + goto drop; + /* Drop ELS if there are duplicate critical descriptors */ + if (desc->fip_dtype < 32) { + if ((desc->fip_dtype != FIP_DT_MAC) && + (desc_mask & 1U << desc->fip_dtype)) { + LIBFCOE_FIP_DBG(fip, "Duplicate Critical " + "Descriptors in FIP ELS\n"); + goto drop; + } + desc_mask |= (1 << desc->fip_dtype); + } + switch (desc->fip_dtype) { + case FIP_DT_MAC: + sel = fip->sel_fcf; + if (desc_cnt == 1) { + LIBFCOE_FIP_DBG(fip, "FIP descriptors " + "received out of order\n"); + goto drop; + } + /* + * Some switch implementations send two MAC descriptors, + * with first MAC(granted_mac) being the FPMA, and the + * second one(fcoe_mac) is used as destination address + * for sending/receiving FCoE packets. FIP traffic is + * sent using fip_mac. For regular switches, both + * fip_mac and fcoe_mac would be the same. + */ + if (desc_cnt == 2) + memcpy(granted_mac, + ((struct fip_mac_desc *)desc)->fd_mac, + ETH_ALEN); + + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + + if ((desc_cnt == 3) && (sel)) + memcpy(sel->fcoe_mac, + ((struct fip_mac_desc *)desc)->fd_mac, + ETH_ALEN); + break; + case FIP_DT_FLOGI: + case FIP_DT_FDISC: + case FIP_DT_LOGO: + case FIP_DT_ELP: + if (desc_cnt != 1) { + LIBFCOE_FIP_DBG(fip, "FIP descriptors " + "received out of order\n"); + goto drop; + } + if (fh) + goto drop; + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + goto len_err; + els_len = dlen - sizeof(*els); + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + els_dtype = desc->fip_dtype; + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP adv\n", desc->fip_dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) + goto drop; + if (desc_cnt <= 2) { + LIBFCOE_FIP_DBG(fip, "FIP descriptors " + "received out of order\n"); + goto drop; + } + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + if (!fh) + goto drop; + els_op = *(u8 *)(fh + 1); + + if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && + sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) { + if (els_op == ELS_LS_ACC) { + if (!is_valid_ether_addr(granted_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC address %pM in FIP ELS\n", + granted_mac); + goto drop; + } + memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); + + if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) { + fip->flogi_oxid = FC_XID_UNKNOWN; + if (els_dtype == FIP_DT_FLOGI) + fcoe_ctlr_announce(fip); + } + } else if (els_dtype == FIP_DT_FLOGI && + !fcoe_ctlr_flogi_retry(fip)) + goto drop; /* retrying FLOGI so drop reject */ + } + + if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && + (!(1U << FIP_DT_MAC & desc_mask)))) { + LIBFCOE_FIP_DBG(fip, "Missing critical descriptors " + "in FIP ELS\n"); + goto drop; + } + + /* + * Convert skb into an fc_frame containing only the ELS. + */ + skb_pull(skb, (u8 *)fh - skb->data); + skb_trim(skb, els_len); + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_dev(fp) = lport; + fr_encaps(fp) = els_dtype; + + this_cpu_inc(lport->stats->RxFrames); + this_cpu_add(lport->stats->RxWords, skb->len / FIP_BPW); + + fc_exch_recv(lport, fp); + return; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + desc->fip_dtype, dlen); +drop: + kfree_skb(skb); +} + +/** + * fcoe_ctlr_recv_clr_vlink() - Handle an incoming link reset frame + * @fip: The FCoE controller that received the frame + * @skb: The received FIP packet + * + * There may be multiple VN_Port descriptors. + * The overall length has already been checked. + */ +static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, + struct sk_buff *skb) +{ + struct fip_desc *desc; + struct fip_mac_desc *mp; + struct fip_wwn_desc *wp; + struct fip_vn_desc *vp; + size_t rlen; + size_t dlen; + struct fcoe_fcf *fcf = fip->sel_fcf; + struct fc_lport *lport = fip->lp; + struct fc_lport *vn_port = NULL; + u32 desc_mask; + int num_vlink_desc; + int reset_phys_port = 0; + struct fip_vn_desc **vlink_desc_arr = NULL; + struct fip_header *fh = (struct fip_header *)skb->data; + struct ethhdr *eh = eth_hdr(skb); + + LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); + + if (!fcf) { + /* + * We are yet to select best FCF, but we got CVL in the + * meantime. reset the ctlr and let it rediscover the FCF + */ + LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been " + "selected yet\n"); + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_reset(fip); + mutex_unlock(&fip->ctlr_mutex); + return; + } + + /* + * If we've selected an FCF check that the CVL is from there to avoid + * processing CVLs from an unexpected source. If it is from an + * unexpected source drop it on the floor. + */ + if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) { + LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address " + "mismatch with FCF src=%pM\n", eh->h_source); + return; + } + + /* + * If we haven't logged into the fabric but receive a CVL we should + * reset everything and go back to solicitation. + */ + if (!lport->port_id) { + LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n"); + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_reset(fip); + mutex_unlock(&fip->ctlr_mutex); + fc_lport_reset(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + return; + } + + /* + * mask of required descriptors. Validating each one clears its bit. + */ + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME); + + rlen = ntohs(fh->fip_dl_len) * FIP_BPW; + desc = (struct fip_desc *)(fh + 1); + + /* + * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen' + * before determining max Vx_Port descriptor but a buggy FCF could have + * omitted either or both MAC Address and Name Identifier descriptors + */ + num_vlink_desc = rlen / sizeof(*vp); + if (num_vlink_desc) + vlink_desc_arr = kmalloc_array(num_vlink_desc, sizeof(vp), + GFP_ATOMIC); + if (!vlink_desc_arr) + return; + num_vlink_desc = 0; + + while (rlen >= sizeof(*desc)) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen > rlen) + goto err; + /* Drop CVL if there are duplicate critical descriptors */ + if ((desc->fip_dtype < 32) && + (desc->fip_dtype != FIP_DT_VN_ID) && + !(desc_mask & 1U << desc->fip_dtype)) { + LIBFCOE_FIP_DBG(fip, "Duplicate Critical " + "Descriptors in FIP CVL\n"); + goto err; + } + switch (desc->fip_dtype) { + case FIP_DT_MAC: + mp = (struct fip_mac_desc *)desc; + if (dlen < sizeof(*mp)) + goto err; + if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac)) + goto err; + desc_mask &= ~BIT(FIP_DT_MAC); + break; + case FIP_DT_NAME: + wp = (struct fip_wwn_desc *)desc; + if (dlen < sizeof(*wp)) + goto err; + if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) + goto err; + desc_mask &= ~BIT(FIP_DT_NAME); + break; + case FIP_DT_VN_ID: + vp = (struct fip_vn_desc *)desc; + if (dlen < sizeof(*vp)) + goto err; + vlink_desc_arr[num_vlink_desc++] = vp; + vn_port = fc_vport_id_lookup(lport, + ntoh24(vp->fd_fc_id)); + if (vn_port && (vn_port == lport)) { + mutex_lock(&fip->ctlr_mutex); + this_cpu_inc(lport->stats->VLinkFailureCount); + fcoe_ctlr_reset(fip); + mutex_unlock(&fip->ctlr_mutex); + } + break; + default: + /* standard says ignore unknown descriptors >= 128 */ + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) + goto err; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* + * reset only if all required descriptors were present and valid. + */ + if (desc_mask) + LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", + desc_mask); + else if (!num_vlink_desc) { + LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n"); + /* + * No Vx_Port description. Clear all NPIV ports, + * followed by physical port + */ + mutex_lock(&fip->ctlr_mutex); + this_cpu_inc(lport->stats->VLinkFailureCount); + fcoe_ctlr_reset(fip); + mutex_unlock(&fip->ctlr_mutex); + + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vn_port, &lport->vports, list) + fc_lport_reset(vn_port); + mutex_unlock(&lport->lp_mutex); + + fc_lport_reset(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + } else { + int i; + + LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); + for (i = 0; i < num_vlink_desc; i++) { + vp = vlink_desc_arr[i]; + vn_port = fc_vport_id_lookup(lport, + ntoh24(vp->fd_fc_id)); + if (!vn_port) + continue; + + /* + * 'port_id' is already validated, check MAC address and + * wwpn + */ + if (!ether_addr_equal(fip->get_src_addr(vn_port), + vp->fd_mac) || + get_unaligned_be64(&vp->fd_wwpn) != + vn_port->wwpn) + continue; + + if (vn_port == lport) + /* + * Physical port, defer processing till all + * listed NPIV ports are cleared + */ + reset_phys_port = 1; + else /* NPIV port */ + fc_lport_reset(vn_port); + } + + if (reset_phys_port) { + fc_lport_reset(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + } + } + +err: + kfree(vlink_desc_arr); +} + +/** + * fcoe_ctlr_recv() - Receive a FIP packet + * @fip: The FCoE controller that received the packet + * @skb: The received FIP packet + * + * This may be called from either NET_RX_SOFTIRQ or IRQ. + */ +void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return; + skb_queue_tail(&fip->fip_recv_list, skb); + schedule_work(&fip->recv_work); +} +EXPORT_SYMBOL(fcoe_ctlr_recv); + +/** + * fcoe_ctlr_recv_handler() - Receive a FIP frame + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is dropped. + */ +static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + struct ethhdr *eh; + enum fip_state state; + bool fip_vlan_resp = false; + u16 op; + u8 sub; + + if (skb_linearize(skb)) + goto drop; + if (skb->len < sizeof(*fiph)) + goto drop; + eh = eth_hdr(skb); + if (fip->mode == FIP_MODE_VN2VN) { + if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && + !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) && + !ether_addr_equal(eh->h_dest, fcoe_all_p2p)) + goto drop; + } else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && + !ether_addr_equal(eh->h_dest, fcoe_all_enode)) + goto drop; + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + mutex_lock(&fip->ctlr_mutex); + state = fip->state; + if (state == FIP_ST_AUTO) { + fip->map_dest = 0; + fcoe_ctlr_set_state(fip, FIP_ST_ENABLED); + state = FIP_ST_ENABLED; + LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); + } + fip_vlan_resp = fip->fip_resp; + mutex_unlock(&fip->ctlr_mutex); + + if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) + return fcoe_ctlr_vn_recv(fip, skb); + + if (fip_vlan_resp && op == FIP_OP_VLAN) { + LIBFCOE_FIP_DBG(fip, "fip vlan discovery\n"); + return fcoe_ctlr_vlan_recv(fip, skb); + } + + if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && + state != FIP_ST_VNMP_CLAIM) + goto drop; + + if (op == FIP_OP_LS) { + fcoe_ctlr_recv_els(fip, skb); /* consumes skb */ + return 0; + } + + if (state != FIP_ST_ENABLED) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) + fcoe_ctlr_recv_adv(fip, skb); + else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) + fcoe_ctlr_recv_clr_vlink(fip, skb); + kfree_skb(skb); + return 0; +drop: + kfree_skb(skb); + return -1; +} + +/** + * fcoe_ctlr_select() - Select the best FCF (if possible) + * @fip: The FCoE controller + * + * Returns the selected FCF, or NULL if none are usable. + * + * If there are conflicting advertisements, no FCF can be chosen. + * + * If there is already a selected FCF, this will choose a better one or + * an equivalent one that hasn't already been sent a FLOGI. + * + * Called with lock held. + */ +static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf *best = fip->sel_fcf; + + list_for_each_entry(fcf, &fip->fcfs, list) { + LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " + "VFID %d mac %pM map %x val %d " + "sent %u pri %u\n", + fcf->fabric_name, fcf->vfid, fcf->fcf_mac, + fcf->fc_map, fcoe_ctlr_mtu_valid(fcf), + fcf->flogi_sent, fcf->pri); + if (!fcoe_ctlr_fcf_usable(fcf)) { + LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " + "map %x %svalid %savailable\n", + fcf->fabric_name, fcf->fc_map, + (fcf->flags & FIP_FL_SOL) ? "" : "in", + (fcf->flags & FIP_FL_AVAIL) ? + "" : "un"); + continue; + } + if (!best || fcf->pri < best->pri || best->flogi_sent) + best = fcf; + if (fcf->fabric_name != best->fabric_name || + fcf->vfid != best->vfid || + fcf->fc_map != best->fc_map) { + LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " + "or FC-MAP\n"); + return NULL; + } + } + fip->sel_fcf = best; + if (best) { + LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac); + fip->port_ka_time = jiffies + + msecs_to_jiffies(FIP_VN_KA_PERIOD); + fip->ctlr_ka_time = jiffies + best->fka_period; + if (time_before(fip->ctlr_ka_time, fip->timer.expires)) + mod_timer(&fip->timer, fip->ctlr_ka_time); + } + return best; +} + +/** + * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF + * @fip: The FCoE controller + * + * Returns non-zero error if it could not be sent. + * + * Called with ctlr_mutex and ctlr_lock held. + * Caller must verify that fip->sel_fcf is not NULL. + */ +static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) +{ + struct sk_buff *skb; + struct sk_buff *skb_orig; + struct fc_frame_header *fh; + int error; + + skb_orig = fip->flogi_req; + if (!skb_orig) + return -EINVAL; + + /* + * Clone and send the FLOGI request. If clone fails, use original. + */ + skb = skb_clone(skb_orig, GFP_ATOMIC); + if (!skb) { + skb = skb_orig; + fip->flogi_req = NULL; + } + fh = (struct fc_frame_header *)skb->data; + error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb, + ntoh24(fh->fh_d_id)); + if (error) { + kfree_skb(skb); + return error; + } + fip->send(fip, skb); + fip->sel_fcf->flogi_sent = 1; + return 0; +} + +/** + * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible + * @fip: The FCoE controller + * + * Returns non-zero error code if there's no FLOGI request to retry or + * no alternate FCF available. + */ +static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + unsigned long flags; + int error; + + mutex_lock(&fip->ctlr_mutex); + spin_lock_irqsave(&fip->ctlr_lock, flags); + LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); + fcf = fcoe_ctlr_select(fip); + if (!fcf || fcf->flogi_sent) { + kfree_skb(fip->flogi_req); + fip->flogi_req = NULL; + error = -ENOENT; + } else { + fcoe_ctlr_solicit(fip, NULL); + error = fcoe_ctlr_flogi_send_locked(fip); + } + spin_unlock_irqrestore(&fip->ctlr_lock, flags); + mutex_unlock(&fip->ctlr_mutex); + return error; +} + + +/** + * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI. + * @fip: The FCoE controller that timed out + * + * Done here because fcoe_ctlr_els_send() can't get mutex. + * + * Called with ctlr_mutex held. The caller must not hold ctlr_lock. + */ +static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + unsigned long flags; + + spin_lock_irqsave(&fip->ctlr_lock, flags); + fcf = fip->sel_fcf; + if (!fcf || !fip->flogi_req_send) + goto unlock; + + LIBFCOE_FIP_DBG(fip, "sending FLOGI\n"); + + /* + * If this FLOGI is being sent due to a timeout retry + * to the same FCF as before, select a different FCF if possible. + */ + if (fcf->flogi_sent) { + LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n"); + fcf = fcoe_ctlr_select(fip); + if (!fcf || fcf->flogi_sent) { + LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n"); + list_for_each_entry(fcf, &fip->fcfs, list) + fcf->flogi_sent = 0; + fcf = fcoe_ctlr_select(fip); + } + } + if (fcf) { + fcoe_ctlr_flogi_send_locked(fip); + fip->flogi_req_send = 0; + } else /* XXX */ + LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); +unlock: + spin_unlock_irqrestore(&fip->ctlr_lock, flags); +} + +/** + * fcoe_ctlr_timeout() - FIP timeout handler + * @t: Timer context use to obtain the controller reference + */ +static void fcoe_ctlr_timeout(struct timer_list *t) +{ + struct fcoe_ctlr *fip = from_timer(fip, t, timer); + + schedule_work(&fip->timer_work); +} + +/** + * fcoe_ctlr_timer_work() - Worker thread function for timer work + * @work: Handle to a FCoE controller + * + * Ages FCFs. Triggers FCF selection if possible. + * Sends keep-alives and resets. + */ +static void fcoe_ctlr_timer_work(struct work_struct *work) +{ + struct fcoe_ctlr *fip; + struct fc_lport *vport; + u8 *mac; + u8 reset = 0; + u8 send_ctlr_ka = 0; + u8 send_port_ka = 0; + struct fcoe_fcf *sel; + struct fcoe_fcf *fcf; + unsigned long next_timer; + + fip = container_of(work, struct fcoe_ctlr, timer_work); + if (fip->mode == FIP_MODE_VN2VN) + return fcoe_ctlr_vn_timeout(fip); + mutex_lock(&fip->ctlr_mutex); + if (fip->state == FIP_ST_DISABLED) { + mutex_unlock(&fip->ctlr_mutex); + return; + } + + fcf = fip->sel_fcf; + next_timer = fcoe_ctlr_age_fcfs(fip); + + sel = fip->sel_fcf; + if (!sel && fip->sel_time) { + if (time_after_eq(jiffies, fip->sel_time)) { + sel = fcoe_ctlr_select(fip); + fip->sel_time = 0; + } else if (time_after(next_timer, fip->sel_time)) + next_timer = fip->sel_time; + } + + if (sel && fip->flogi_req_send) + fcoe_ctlr_flogi_send(fip); + else if (!sel && fcf) + reset = 1; + + if (sel && !sel->fd_flags) { + if (time_after_eq(jiffies, fip->ctlr_ka_time)) { + fip->ctlr_ka_time = jiffies + sel->fka_period; + send_ctlr_ka = 1; + } + if (time_after(next_timer, fip->ctlr_ka_time)) + next_timer = fip->ctlr_ka_time; + + if (time_after_eq(jiffies, fip->port_ka_time)) { + fip->port_ka_time = jiffies + + msecs_to_jiffies(FIP_VN_KA_PERIOD); + send_port_ka = 1; + } + if (time_after(next_timer, fip->port_ka_time)) + next_timer = fip->port_ka_time; + } + if (!list_empty(&fip->fcfs)) + mod_timer(&fip->timer, next_timer); + mutex_unlock(&fip->ctlr_mutex); + + if (reset) { + fc_lport_reset(fip->lp); + /* restart things with a solicitation */ + fcoe_ctlr_solicit(fip, NULL); + } + + if (send_ctlr_ka) + fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); + + if (send_port_ka) { + mutex_lock(&fip->lp->lp_mutex); + mac = fip->get_src_addr(fip->lp); + fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); + list_for_each_entry(vport, &fip->lp->vports, list) { + mac = fip->get_src_addr(vport); + fcoe_ctlr_send_keep_alive(fip, vport, 1, mac); + } + mutex_unlock(&fip->lp->lp_mutex); + } +} + +/** + * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames + * @recv_work: Handle to a FCoE controller + */ +static void fcoe_ctlr_recv_work(struct work_struct *recv_work) +{ + struct fcoe_ctlr *fip; + struct sk_buff *skb; + + fip = container_of(recv_work, struct fcoe_ctlr, recv_work); + while ((skb = skb_dequeue(&fip->fip_recv_list))) + fcoe_ctlr_recv_handler(fip, skb); +} + +/** + * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response + * @fip: The FCoE controller + * @lport: The local port + * @fp: The FC frame to snoop + * + * Snoop potential response to FLOGI or even incoming FLOGI. + * + * The caller has checked that we are waiting for login as indicated + * by fip->flogi_oxid != FC_XID_UNKNOWN. + * + * The caller is responsible for freeing the frame. + * Fill in the granted_mac address. + * + * Return non-zero if the frame should not be delivered to libfc. + */ +int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fc_frame_header *fh; + u8 op; + u8 *sa; + + sa = eth_hdr(&fp->skb)->h_source; + fh = fc_frame_header_get(fp); + if (fh->fh_type != FC_TYPE_ELS) + return 0; + + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && + fip->flogi_oxid == ntohs(fh->fh_ox_id)) { + + mutex_lock(&fip->ctlr_mutex); + if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) { + mutex_unlock(&fip->ctlr_mutex); + return -EINVAL; + } + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); + LIBFCOE_FIP_DBG(fip, + "received FLOGI LS_ACC using non-FIP mode\n"); + + /* + * FLOGI accepted. + * If the src mac addr is FC_OUI-based, then we mark the + * address_mode flag to use FC_OUI-based Ethernet DA. + * Otherwise we use the FCoE gateway addr + */ + if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { + fcoe_ctlr_map_dest(fip); + } else { + memcpy(fip->dest_addr, sa, ETH_ALEN); + fip->map_dest = 0; + } + fip->flogi_oxid = FC_XID_UNKNOWN; + mutex_unlock(&fip->ctlr_mutex); + fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); + } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { + /* + * Save source MAC for point-to-point responses. + */ + mutex_lock(&fip->ctlr_mutex); + if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) { + memcpy(fip->dest_addr, sa, ETH_ALEN); + fip->map_dest = 0; + if (fip->state == FIP_ST_AUTO) + LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. " + "Setting non-FIP mode\n"); + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); + } + mutex_unlock(&fip->ctlr_mutex); + } + return 0; +} +EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); + +/** + * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN + * @mac: The MAC address to convert + * @scheme: The scheme to use when converting + * @port: The port indicator for converting + * + * Returns: u64 fc world wide name + */ +u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN], + unsigned int scheme, unsigned int port) +{ + u64 wwn; + u64 host_mac; + + /* The MAC is in NO, so flip only the low 48 bits */ + host_mac = ((u64) mac[0] << 40) | + ((u64) mac[1] << 32) | + ((u64) mac[2] << 24) | + ((u64) mac[3] << 16) | + ((u64) mac[4] << 8) | + (u64) mac[5]; + + WARN_ON(host_mac >= (1ULL << 48)); + wwn = host_mac | ((u64) scheme << 60); + switch (scheme) { + case 1: + WARN_ON(port != 0); + break; + case 2: + WARN_ON(port >= 0xfff); + wwn |= (u64) port << 48; + break; + default: + WARN_ON(1); + break; + } + + return wwn; +} +EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); + +/** + * fcoe_ctlr_rport() - return the fcoe_rport for a given fc_rport_priv + * @rdata: libfc remote port + */ +static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) +{ + return container_of(rdata, struct fcoe_rport, rdata); +} + +/** + * fcoe_ctlr_vn_send() - Send a FIP VN2VN Probe Request or Reply. + * @fip: The FCoE controller + * @sub: sub-opcode for probe request, reply, or advertisement. + * @dest: The destination Ethernet MAC address + * @min_len: minimum size of the Ethernet payload to be sent + */ +static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, + enum fip_vn2vn_subcode sub, + const u8 *dest, size_t min_len) +{ + struct sk_buff *skb; + struct fip_vn2vn_probe_frame { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + struct fip_vn_desc vn; + } __packed * frame; + struct fip_fc4_feat *ff; + struct fip_size_desc *size; + u32 fcp_feat; + size_t len; + size_t dlen; + + len = sizeof(*frame); + dlen = 0; + if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { + dlen = sizeof(struct fip_fc4_feat) + + sizeof(struct fip_size_desc); + len += dlen; + } + dlen += sizeof(frame->mac) + sizeof(frame->wwnn) + sizeof(frame->vn); + len = max(len, min_len + sizeof(struct ethhdr)); + + skb = dev_alloc_skb(len); + if (!skb) + return; + + frame = (struct fip_vn2vn_probe_frame *)skb->data; + memset(frame, 0, len); + memcpy(frame->eth.h_dest, dest, ETH_ALEN); + + if (sub == FIP_SC_VN_BEACON) { + hton24(frame->eth.h_source, FIP_VN_FC_MAP); + hton24(frame->eth.h_source + 3, fip->port_id); + } else { + memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + } + frame->eth.h_proto = htons(ETH_P_FIP); + + frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + frame->fip.fip_op = htons(FIP_OP_VN2VN); + frame->fip.fip_subcode = sub; + frame->fip.fip_dl_len = htons(dlen / FIP_BPW); + + frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; + frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; + memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + frame->wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + frame->wwnn.fd_desc.fip_dlen = sizeof(frame->wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &frame->wwnn.fd_wwn); + + frame->vn.fd_desc.fip_dtype = FIP_DT_VN_ID; + frame->vn.fd_desc.fip_dlen = sizeof(frame->vn) / FIP_BPW; + hton24(frame->vn.fd_mac, FIP_VN_FC_MAP); + hton24(frame->vn.fd_mac + 3, fip->port_id); + hton24(frame->vn.fd_fc_id, fip->port_id); + put_unaligned_be64(fip->lp->wwpn, &frame->vn.fd_wwpn); + + /* + * For claims, add FC-4 features. + * TBD: Add interface to get fc-4 types and features from libfc. + */ + if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { + ff = (struct fip_fc4_feat *)(frame + 1); + ff->fd_desc.fip_dtype = FIP_DT_FC4F; + ff->fd_desc.fip_dlen = sizeof(*ff) / FIP_BPW; + ff->fd_fts = fip->lp->fcts; + + fcp_feat = 0; + if (fip->lp->service_params & FCP_SPPF_INIT_FCN) + fcp_feat |= FCP_FEAT_INIT; + if (fip->lp->service_params & FCP_SPPF_TARG_FCN) + fcp_feat |= FCP_FEAT_TARG; + fcp_feat <<= (FC_TYPE_FCP * 4) % 32; + ff->fd_ff.fd_feat[FC_TYPE_FCP * 4 / 32] = htonl(fcp_feat); + + size = (struct fip_size_desc *)(ff + 1); + size->fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; + size->fd_desc.fip_dlen = sizeof(*size) / FIP_BPW; + size->fd_size = htons(fcoe_ctlr_fcoe_size(fip)); + } + + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_vn_rport_callback - Event handler for rport events. + * @lport: The lport which is receiving the event + * @rdata: remote port private data + * @event: The event that occurred + * + * Locking Note: The rport lock must not be held when calling this function. + */ +static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct fcoe_ctlr *fip = lport->disc.priv; + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + + LIBFCOE_FIP_DBG(fip, "vn_rport_callback %x event %d\n", + rdata->ids.port_id, event); + + mutex_lock(&fip->ctlr_mutex); + switch (event) { + case RPORT_EV_READY: + frport->login_count = 0; + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + frport->login_count++; + if (frport->login_count > FCOE_CTLR_VN2VN_LOGIN_LIMIT) { + LIBFCOE_FIP_DBG(fip, + "rport FLOGI limited port_id %6.6x\n", + rdata->ids.port_id); + fc_rport_logoff(rdata); + } + break; + default: + break; + } + mutex_unlock(&fip->ctlr_mutex); +} + +static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = { + .event_callback = fcoe_ctlr_vn_rport_callback, +}; + +/** + * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode + * @lport: The local port + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) +{ + struct fc_rport_priv *rdata; + + mutex_lock(&lport->disc.disc_mutex); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { + if (kref_get_unless_zero(&rdata->kref)) { + fc_rport_logoff(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + } + } + lport->disc.disc_callback = NULL; + mutex_unlock(&lport->disc.disc_mutex); +} + +/** + * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode + * @lport: The local port + * + * Called through the local port template for discovery. + * Called without the ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop(struct fc_lport *lport) +{ + struct fcoe_ctlr *fip = lport->disc.priv; + + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_disc_stop_locked(lport); + mutex_unlock(&fip->ctlr_mutex); +} + +/** + * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode + * @lport: The local port + * + * Called through the local port template for discovery. + * Called without the ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport) +{ + fcoe_ctlr_disc_stop(lport); + fc_rport_flush_queue(); + synchronize_rcu(); +} + +/** + * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id + * @fip: The FCoE controller + * + * Called with fcoe_ctlr lock held. + */ +static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip) +{ + unsigned long wait; + u32 port_id; + + fcoe_ctlr_disc_stop_locked(fip->lp); + + /* + * Get proposed port ID. + * If this is the first try after link up, use any previous port_id. + * If there was none, use the low bits of the port_name. + * On subsequent tries, get the next random one. + * Don't use reserved IDs, use another non-zero value, just as random. + */ + port_id = fip->port_id; + if (fip->probe_tries) + port_id = prandom_u32_state(&fip->rnd_state) & 0xffff; + else if (!port_id) + port_id = fip->lp->wwpn & 0xffff; + if (!port_id || port_id == 0xffff) + port_id = 1; + fip->port_id = port_id; + + if (fip->probe_tries < FIP_VN_RLIM_COUNT) { + fip->probe_tries++; + wait = get_random_u32_below(FIP_VN_PROBE_WAIT); + } else + wait = FIP_VN_RLIM_INT; + mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_START); +} + +/** + * fcoe_ctlr_vn_start() - Start in VN2VN mode + * @fip: The FCoE controller + * + * Called with fcoe_ctlr lock held. + */ +static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) +{ + fip->probe_tries = 0; + prandom_seed_state(&fip->rnd_state, fip->lp->wwpn); + fcoe_ctlr_vn_restart(fip); +} + +/** + * fcoe_ctlr_vn_parse - parse probe request or response + * @fip: The FCoE controller + * @skb: incoming packet + * @frport: parsed FCoE rport from the probe request + * + * Returns non-zero error number on error. + * Does not consume the packet. + */ +static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, + struct sk_buff *skb, + struct fcoe_rport *frport) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_mac_desc *macd = NULL; + struct fip_wwn_desc *wwn = NULL; + struct fip_vn_desc *vn = NULL; + struct fip_size_desc *size = NULL; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 dtype; + u8 sub; + + fiph = (struct fip_header *)skb->data; + frport->flags = ntohs(fiph->fip_flags); + + sub = fiph->fip_subcode; + switch (sub) { + case FIP_SC_VN_PROBE_REQ: + case FIP_SC_VN_PROBE_REP: + case FIP_SC_VN_BEACON: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_VN_ID); + break; + case FIP_SC_VN_CLAIM_NOTIFY: + case FIP_SC_VN_CLAIM_REP: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_VN_ID) | BIT(FIP_DT_FC4F) | + BIT(FIP_DT_FCOE_SIZE); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); + return -EINVAL; + } + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + + dtype = desc->fip_dtype; + if (dtype < 32) { + if (!(desc_mask & BIT(dtype))) { + LIBFCOE_FIP_DBG(fip, + "unexpected or duplicated desc " + "desc type %u in " + "FIP VN2VN subtype %u\n", + dtype, sub); + return -EINVAL; + } + desc_mask &= ~BIT(dtype); + } + + switch (dtype) { + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + macd = (struct fip_mac_desc *)desc; + if (!is_valid_ether_addr(macd->fd_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP VN2VN\n", + macd->fd_mac); + return -EINVAL; + } + memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); + break; + case FIP_DT_VN_ID: + if (dlen != sizeof(struct fip_vn_desc)) + goto len_err; + vn = (struct fip_vn_desc *)desc; + memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); + frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id); + frport->rdata.ids.port_name = + get_unaligned_be64(&vn->fd_wwpn); + break; + case FIP_DT_FC4F: + if (dlen != sizeof(struct fip_fc4_feat)) + goto len_err; + break; + case FIP_DT_FCOE_SIZE: + if (dlen != sizeof(struct fip_size_desc)) + goto len_err; + size = (struct fip_size_desc *)desc; + frport->fcoe_len = ntohs(size->fd_size); + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP probe\n", dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (dtype < FIP_DT_NON_CRITICAL) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_vn_send_claim() - send multicast FIP VN2VN Claim Notification. + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) +{ + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_NOTIFY, fcoe_all_vn2vn, 0); + fip->sol_time = jiffies; +} + +/** + * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. + * @fip: The FCoE controller + * @frport: parsed FCoE rport from the probe request + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, + struct fcoe_rport *frport) +{ + if (frport->rdata.ids.port_id != fip->port_id) + return; + + switch (fip->state) { + case FIP_ST_VNMP_CLAIM: + case FIP_ST_VNMP_UP: + LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n", + fip->state); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, + frport->enode_mac, 0); + break; + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + /* + * Decide whether to reply to the Probe. + * Our selected address is never a "recorded" one, so + * only reply if our WWPN is greater and the + * Probe's REC bit is not set. + * If we don't reply, we will change our address. + */ + if (fip->lp->wwpn > frport->rdata.ids.port_name && + !(frport->flags & FIP_FL_REC_OR_P2P)) { + LIBFCOE_FIP_DBG(fip, "vn_probe_req: " + "port_id collision\n"); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, + frport->enode_mac, 0); + break; + } + fallthrough; + case FIP_ST_VNMP_START: + LIBFCOE_FIP_DBG(fip, "vn_probe_req: " + "restart VN2VN negotiation\n"); + fcoe_ctlr_vn_restart(fip); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n", + fip->state); + break; + } +} + +/** + * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. + * @fip: The FCoE controller + * @frport: parsed FCoE rport from the probe request + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, + struct fcoe_rport *frport) +{ + if (frport->rdata.ids.port_id != fip->port_id) + return; + switch (fip->state) { + case FIP_ST_VNMP_START: + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + case FIP_ST_VNMP_CLAIM: + LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n", + fip->state); + fcoe_ctlr_vn_restart(fip); + break; + case FIP_ST_VNMP_UP: + LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n"); + fcoe_ctlr_vn_send_claim(fip); + break; + default: + break; + } +} + +/** + * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. + * @fip: The FCoE controller + * @new: newly-parsed FCoE rport as a template for new rdata + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fc_rport_identifiers *ids; + struct fcoe_rport *frport; + u32 port_id; + + port_id = new->rdata.ids.port_id; + if (port_id == fip->port_id) + return; + + mutex_lock(&lport->disc.disc_mutex); + rdata = fc_rport_create(lport, port_id); + if (!rdata) { + mutex_unlock(&lport->disc.disc_mutex); + return; + } + mutex_lock(&rdata->rp_mutex); + mutex_unlock(&lport->disc.disc_mutex); + + rdata->ops = &fcoe_ctlr_vn_rport_ops; + rdata->disc_id = lport->disc.disc_id; + + ids = &rdata->ids; + if ((ids->port_name != -1 && + ids->port_name != new->rdata.ids.port_name) || + (ids->node_name != -1 && + ids->node_name != new->rdata.ids.node_name)) { + mutex_unlock(&rdata->rp_mutex); + LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id); + fc_rport_logoff(rdata); + mutex_lock(&rdata->rp_mutex); + } + ids->port_name = new->rdata.ids.port_name; + ids->node_name = new->rdata.ids.node_name; + mutex_unlock(&rdata->rp_mutex); + + frport = fcoe_ctlr_rport(rdata); + LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n", + port_id, frport->fcoe_len ? "old" : "new", + rdata->rp_state); + frport->fcoe_len = new->fcoe_len; + frport->flags = new->flags; + frport->login_count = new->login_count; + memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN); + memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN); + frport->time = 0; +} + +/** + * fcoe_ctlr_vn_lookup() - Find VN remote port's MAC address + * @fip: The FCoE controller + * @port_id: The port_id of the remote VN_node + * @mac: buffer which will hold the VN_NODE destination MAC address, if found. + * + * Returns non-zero error if no remote port found. + */ +static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + int ret = -1; + + rdata = fc_rport_lookup(lport, port_id); + if (rdata) { + frport = fcoe_ctlr_rport(rdata); + memcpy(mac, frport->enode_mac, ETH_ALEN); + ret = 0; + kref_put(&rdata->kref, fc_rport_destroy); + } + return ret; +} + +/** + * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification + * @fip: The FCoE controller + * @new: newly-parsed FCoE rport as a template for new rdata + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, + struct fcoe_rport *new) +{ + if (new->flags & FIP_FL_REC_OR_P2P) { + LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n"); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + return; + } + switch (fip->state) { + case FIP_ST_VNMP_START: + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + if (new->rdata.ids.port_id == fip->port_id) { + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "restart, state %d\n", + fip->state); + fcoe_ctlr_vn_restart(fip); + } + break; + case FIP_ST_VNMP_CLAIM: + case FIP_ST_VNMP_UP: + if (new->rdata.ids.port_id == fip->port_id) { + if (new->rdata.ids.port_name > fip->lp->wwpn) { + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "restart, port_id collision\n"); + fcoe_ctlr_vn_restart(fip); + break; + } + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "send claim notify\n"); + fcoe_ctlr_vn_send_claim(fip); + break; + } + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n", + new->rdata.ids.port_id); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac, + min((u32)new->fcoe_len, + fcoe_ctlr_fcoe_size(fip))); + fcoe_ctlr_vn_add(fip, new); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "ignoring claim from %x\n", + new->rdata.ids.port_id); + break; + } +} + +/** + * fcoe_ctlr_vn_claim_resp() - handle received Claim Response + * @fip: The FCoE controller that received the frame + * @new: newly-parsed FCoE rport from the Claim Response + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, + struct fcoe_rport *new) +{ + LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", + new->rdata.ids.port_id, fcoe_ctlr_state(fip->state)); + if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) + fcoe_ctlr_vn_add(fip, new); +} + +/** + * fcoe_ctlr_vn_beacon() - handle received beacon. + * @fip: The FCoE controller that received the frame + * @new: newly-parsed FCoE rport from the Beacon + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, + struct fcoe_rport *new) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + + if (new->flags & FIP_FL_REC_OR_P2P) { + LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n"); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + return; + } + rdata = fc_rport_lookup(lport, new->rdata.ids.port_id); + if (rdata) { + if (rdata->ids.node_name == new->rdata.ids.node_name && + rdata->ids.port_name == new->rdata.ids.port_name) { + frport = fcoe_ctlr_rport(rdata); + + LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n", + rdata->ids.port_id); + if (!frport->time && fip->state == FIP_ST_VNMP_UP) { + LIBFCOE_FIP_DBG(fip, "beacon expired " + "for rport %x\n", + rdata->ids.port_id); + fc_rport_login(rdata); + } + frport->time = jiffies; + } + kref_put(&rdata->kref, fc_rport_destroy); + return; + } + if (fip->state != FIP_ST_VNMP_UP) + return; + + /* + * Beacon from a new neighbor. + * Send a claim notify if one hasn't been sent recently. + * Don't add the neighbor yet. + */ + LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", + new->rdata.ids.port_id); + if (time_after(jiffies, + fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) + fcoe_ctlr_vn_send_claim(fip); +} + +/** + * fcoe_ctlr_vn_age() - Check for VN_ports without recent beacons + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + * Called only in state FIP_ST_VNMP_UP. + * Returns the soonest time for next age-out or a time far in the future. + */ +static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + unsigned long next_time; + unsigned long deadline; + + next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); + mutex_lock(&lport->disc.disc_mutex); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { + if (!kref_get_unless_zero(&rdata->kref)) + continue; + frport = fcoe_ctlr_rport(rdata); + if (!frport->time) { + kref_put(&rdata->kref, fc_rport_destroy); + continue; + } + deadline = frport->time + + msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10); + if (time_after_eq(jiffies, deadline)) { + frport->time = 0; + LIBFCOE_FIP_DBG(fip, + "port %16.16llx fc_id %6.6x beacon expired\n", + rdata->ids.port_name, rdata->ids.port_id); + fc_rport_logoff(rdata); + } else if (time_before(deadline, next_time)) + next_time = deadline; + kref_put(&rdata->kref, fc_rport_destroy); + } + mutex_unlock(&lport->disc.disc_mutex); + return next_time; +} + +/** + * fcoe_ctlr_vn_recv() - Receive a FIP frame + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is dropped. + * Always consumes the frame. + */ +static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + enum fip_vn2vn_subcode sub; + struct fcoe_rport frport = { }; + int rc, vlan_id = 0; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + + if (fip->lp->vlan) + vlan_id = skb_vlan_tag_get_id(skb); + + if (vlan_id && vlan_id != fip->lp->vlan) { + LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n", + sub, vlan_id); + rc = -EAGAIN; + goto drop; + } + + rc = fcoe_ctlr_vn_parse(fip, skb, &frport); + if (rc) { + LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); + goto drop; + } + + mutex_lock(&fip->ctlr_mutex); + switch (sub) { + case FIP_SC_VN_PROBE_REQ: + fcoe_ctlr_vn_probe_req(fip, &frport); + break; + case FIP_SC_VN_PROBE_REP: + fcoe_ctlr_vn_probe_reply(fip, &frport); + break; + case FIP_SC_VN_CLAIM_NOTIFY: + fcoe_ctlr_vn_claim_notify(fip, &frport); + break; + case FIP_SC_VN_CLAIM_REP: + fcoe_ctlr_vn_claim_resp(fip, &frport); + break; + case FIP_SC_VN_BEACON: + fcoe_ctlr_vn_beacon(fip, &frport); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); + rc = -1; + break; + } + mutex_unlock(&fip->ctlr_mutex); +drop: + kfree_skb(skb); + return rc; +} + +/** + * fcoe_ctlr_vlan_parse - parse vlan discovery request or response + * @fip: The FCoE controller + * @skb: incoming packet + * @frport: parsed FCoE rport from the probe request + * + * Returns non-zero error number on error. + * Does not consume the packet. + */ +static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, + struct sk_buff *skb, + struct fcoe_rport *frport) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_mac_desc *macd = NULL; + struct fip_wwn_desc *wwn = NULL; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 dtype; + u8 sub; + + fiph = (struct fip_header *)skb->data; + frport->flags = ntohs(fiph->fip_flags); + + sub = fiph->fip_subcode; + switch (sub) { + case FIP_SC_VL_REQ: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); + return -EINVAL; + } + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + + dtype = desc->fip_dtype; + if (dtype < 32) { + if (!(desc_mask & BIT(dtype))) { + LIBFCOE_FIP_DBG(fip, + "unexpected or duplicated desc " + "desc type %u in " + "FIP VN2VN subtype %u\n", + dtype, sub); + return -EINVAL; + } + desc_mask &= ~BIT(dtype); + } + + switch (dtype) { + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + macd = (struct fip_mac_desc *)desc; + if (!is_valid_ether_addr(macd->fd_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP VN2VN\n", + macd->fd_mac); + return -EINVAL; + } + memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP probe\n", dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (dtype < FIP_DT_NON_CRITICAL) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_vlan_send() - Send a FIP VLAN Notification + * @fip: The FCoE controller + * @sub: sub-opcode for vlan notification or vn2vn vlan notification + * @dest: The destination Ethernet MAC address + */ +static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip, + enum fip_vlan_subcode sub, + const u8 *dest) +{ + struct sk_buff *skb; + struct fip_vlan_notify_frame { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + struct fip_vlan_desc vlan; + } __packed * frame; + size_t len; + size_t dlen; + + len = sizeof(*frame); + dlen = sizeof(frame->mac) + sizeof(frame->vlan); + len = max(len, sizeof(struct ethhdr)); + + skb = dev_alloc_skb(len); + if (!skb) + return; + + LIBFCOE_FIP_DBG(fip, "fip %s vlan notification, vlan %d\n", + fip->mode == FIP_MODE_VN2VN ? "vn2vn" : "fcf", + fip->lp->vlan); + + frame = (struct fip_vlan_notify_frame *)skb->data; + memset(frame, 0, len); + memcpy(frame->eth.h_dest, dest, ETH_ALEN); + + memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + frame->eth.h_proto = htons(ETH_P_FIP); + + frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + frame->fip.fip_op = htons(FIP_OP_VLAN); + frame->fip.fip_subcode = sub; + frame->fip.fip_dl_len = htons(dlen / FIP_BPW); + + frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; + frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; + memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + frame->vlan.fd_desc.fip_dtype = FIP_DT_VLAN; + frame->vlan.fd_desc.fip_dlen = sizeof(frame->vlan) / FIP_BPW; + put_unaligned_be16(fip->lp->vlan, &frame->vlan.fd_vlan); + + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_vlan_disc_reply() - send FIP VLAN Discovery Notification. + * @fip: The FCoE controller + * @frport: The newly-parsed FCoE rport from the Discovery Request + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, + struct fcoe_rport *frport) +{ + enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; + + if (fip->mode == FIP_MODE_VN2VN) + sub = FIP_SC_VL_VN2VN_NOTE; + + fcoe_ctlr_vlan_send(fip, sub, frport->enode_mac); +} + +/** + * fcoe_ctlr_vlan_recv - vlan request receive handler for VN2VN mode. + * @fip: The FCoE controller + * @skb: The received FIP packet + */ +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + enum fip_vlan_subcode sub; + struct fcoe_rport frport = { }; + int rc; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + rc = fcoe_ctlr_vlan_parse(fip, skb, &frport); + if (rc) { + LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); + goto drop; + } + mutex_lock(&fip->ctlr_mutex); + if (sub == FIP_SC_VL_REQ) + fcoe_ctlr_vlan_disc_reply(fip, &frport); + mutex_unlock(&fip->ctlr_mutex); + +drop: + kfree_skb(skb); + return rc; +} + +/** + * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. + * @lport: The local port + * @fp: The received frame + * + * This should never be called since we don't see RSCNs or other + * fabric-generated ELSes. + */ +static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_seq_els_data rjt_data; + + rjt_data.reason = ELS_RJT_UNSUP; + rjt_data.explan = ELS_EXPL_NONE; + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); + fc_frame_free(fp); +} + +/* + * fcoe_ctlr_disc_start - start discovery for VN2VN mode. + * + * This sets a flag indicating that remote ports should be created + * and started for the peers we discover. We use the disc_callback + * pointer as that flag. Peers already discovered are created here. + * + * The lport lock is held during this call. The callback must be done + * later, without holding either the lport or discovery locks. + * The fcoe_ctlr lock may also be held during this call. + */ +static void fcoe_ctlr_disc_start(void (*callback)(struct fc_lport *, + enum fc_disc_event), + struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + struct fcoe_ctlr *fip = disc->priv; + + mutex_lock(&disc->disc_mutex); + disc->disc_callback = callback; + disc->disc_id = (disc->disc_id + 2) | 1; + disc->pending = 1; + schedule_work(&fip->timer_work); + mutex_unlock(&disc->disc_mutex); +} + +/** + * fcoe_ctlr_vn_disc() - report FIP VN_port discovery results after claim state. + * @fip: The FCoE controller + * + * Starts the FLOGI and PLOGI login process to each discovered rport for which + * we've received at least one beacon. + * Performs the discovery complete callback. + */ +static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) +{ + struct fc_lport *lport = fip->lp; + struct fc_disc *disc = &lport->disc; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + void (*callback)(struct fc_lport *, enum fc_disc_event); + + mutex_lock(&disc->disc_mutex); + callback = disc->pending ? disc->disc_callback : NULL; + disc->pending = 0; + list_for_each_entry_rcu(rdata, &disc->rports, peers) { + if (!kref_get_unless_zero(&rdata->kref)) + continue; + frport = fcoe_ctlr_rport(rdata); + if (frport->time) + fc_rport_login(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + } + mutex_unlock(&disc->disc_mutex); + if (callback) + callback(lport, DISC_EV_SUCCESS); +} + +/** + * fcoe_ctlr_vn_timeout - timer work function for VN2VN mode. + * @fip: The FCoE controller + */ +static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) +{ + unsigned long next_time; + u8 mac[ETH_ALEN]; + u32 new_port_id = 0; + + mutex_lock(&fip->ctlr_mutex); + switch (fip->state) { + case FIP_ST_VNMP_START: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n"); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT); + break; + case FIP_ST_VNMP_PROBE1: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n"); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + break; + case FIP_ST_VNMP_PROBE2: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_CLAIM); + new_port_id = fip->port_id; + hton24(mac, FIP_VN_FC_MAP); + hton24(mac + 3, new_port_id); + fcoe_ctlr_map_dest(fip); + fip->update_mac(fip->lp, mac); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n"); + fcoe_ctlr_vn_send_claim(fip); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + break; + case FIP_ST_VNMP_CLAIM: + /* + * This may be invoked either by starting discovery so don't + * go to the next state unless it's been long enough. + */ + next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT); + if (time_after_eq(jiffies, next_time)) { + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n"); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, + fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + fip->port_ka_time = next_time; + } + fcoe_ctlr_vn_disc(fip); + break; + case FIP_ST_VNMP_UP: + next_time = fcoe_ctlr_vn_age(fip); + if (time_after_eq(jiffies, fip->port_ka_time)) { + LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n"); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, + fcoe_all_vn2vn, 0); + fip->port_ka_time = jiffies + + msecs_to_jiffies(FIP_VN_BEACON_INT + + get_random_u32_below(FIP_VN_BEACON_FUZZ)); + } + if (time_before(fip->port_ka_time, next_time)) + next_time = fip->port_ka_time; + break; + case FIP_ST_LINK_WAIT: + goto unlock; + default: + WARN(1, "unexpected state %d\n", fip->state); + goto unlock; + } + mod_timer(&fip->timer, next_time); +unlock: + mutex_unlock(&fip->ctlr_mutex); + + /* If port ID is new, notify local port after dropping ctlr_mutex */ + if (new_port_id) + fc_lport_set_local_id(fip->lp, new_port_id); +} + +/** + * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode + * @lport: The local port to be (re)configured + * @fip: The FCoE controller whose mode is changing + * @fip_mode: The new fip mode + * + * Note that the we shouldn't be changing the libfc discovery settings + * (fc_disc_config) while an lport is going through the libfc state + * machine. The mode can only be changed when a fcoe_ctlr device is + * disabled, so that should ensure that this routine is only called + * when nothing is happening. + */ +static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, + enum fip_mode fip_mode) +{ + void *priv; + + WARN_ON(lport->state != LPORT_ST_RESET && + lport->state != LPORT_ST_DISABLED); + + if (fip_mode == FIP_MODE_VN2VN) { + lport->rport_priv_size = sizeof(struct fcoe_rport); + lport->point_to_multipoint = 1; + lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; + lport->tt.disc_start = fcoe_ctlr_disc_start; + lport->tt.disc_stop = fcoe_ctlr_disc_stop; + lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; + priv = fip; + } else { + lport->rport_priv_size = 0; + lport->point_to_multipoint = 0; + lport->tt.disc_recv_req = NULL; + lport->tt.disc_start = NULL; + lport->tt.disc_stop = NULL; + lport->tt.disc_stop_final = NULL; + priv = lport; + } + + fc_disc_config(lport, priv); +} + +/** + * fcoe_libfc_config() - Sets up libfc related properties for local port + * @lport: The local port to configure libfc for + * @fip: The FCoE controller in use by the local port + * @tt: The libfc function template + * @init_fcp: If non-zero, the FCP portion of libfc should be initialized + * + * Returns : 0 for success + */ +int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, + const struct libfc_function_template *tt, int init_fcp) +{ + /* Set the function pointers set by the LLDD */ + memcpy(&lport->tt, tt, sizeof(*tt)); + if (init_fcp && fc_fcp_init(lport)) + return -ENOMEM; + fc_exch_init(lport); + fc_elsct_init(lport); + fc_lport_init(lport); + fc_disc_init(lport); + fcoe_ctlr_mode_set(lport, fip, fip->mode); + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_libfc_config); + +void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev) +{ + struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_fcf *fcf; + + mutex_lock(&fip->ctlr_mutex); + mutex_lock(&ctlr_dev->lock); + + fcf = fcoe_fcf_device_priv(fcf_dev); + if (fcf) + fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0; + else + fcf_dev->selected = 0; + + mutex_unlock(&ctlr_dev->lock); + mutex_unlock(&fip->ctlr_mutex); +} +EXPORT_SYMBOL(fcoe_fcf_get_selected); + +void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fc_lport *lport = ctlr->lp; + + mutex_lock(&ctlr->ctlr_mutex); + switch (ctlr_dev->mode) { + case FIP_CONN_TYPE_VN2VN: + ctlr->mode = FIP_MODE_VN2VN; + break; + case FIP_CONN_TYPE_FABRIC: + default: + ctlr->mode = FIP_MODE_FABRIC; + break; + } + + mutex_unlock(&ctlr->ctlr_mutex); + + fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); +} +EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c new file mode 100644 index 000000000..e17957f80 --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -0,0 +1,1066 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include + +#include +#include + +/* + * OK to include local libfcoe.h for debug_logging, but cannot include + * otherwise non-netdev based fcoe solutions would have + * have to include more than fcoe_sysfs.h. + */ +#include "libfcoe.h" + +static atomic_t ctlr_num; +static atomic_t fcf_num; + +/* + * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs + * should insulate the loss of a fcf. + */ +static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */ + +module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo, + uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fcf_dev_loss_tmo, + "Maximum number of seconds that libfcoe should" + " insulate the loss of a fcf. Once this value is" + " exceeded, the fcf is removed."); + +/* + * These are used by the fcoe_*_show_function routines, they + * are intentionally placed in the .c file as they're not intended + * for use throughout the code. + */ +#define fcoe_ctlr_id(x) \ + ((x)->id) +#define fcoe_ctlr_work_q_name(x) \ + ((x)->work_q_name) +#define fcoe_ctlr_work_q(x) \ + ((x)->work_q) +#define fcoe_ctlr_devloss_work_q_name(x) \ + ((x)->devloss_work_q_name) +#define fcoe_ctlr_devloss_work_q(x) \ + ((x)->devloss_work_q) +#define fcoe_ctlr_mode(x) \ + ((x)->mode) +#define fcoe_ctlr_fcf_dev_loss_tmo(x) \ + ((x)->fcf_dev_loss_tmo) +#define fcoe_ctlr_link_fail(x) \ + ((x)->lesb.lesb_link_fail) +#define fcoe_ctlr_vlink_fail(x) \ + ((x)->lesb.lesb_vlink_fail) +#define fcoe_ctlr_miss_fka(x) \ + ((x)->lesb.lesb_miss_fka) +#define fcoe_ctlr_symb_err(x) \ + ((x)->lesb.lesb_symb_err) +#define fcoe_ctlr_err_block(x) \ + ((x)->lesb.lesb_err_block) +#define fcoe_ctlr_fcs_error(x) \ + ((x)->lesb.lesb_fcs_error) +#define fcoe_ctlr_enabled(x) \ + ((x)->enabled) +#define fcoe_fcf_state(x) \ + ((x)->state) +#define fcoe_fcf_fabric_name(x) \ + ((x)->fabric_name) +#define fcoe_fcf_switch_name(x) \ + ((x)->switch_name) +#define fcoe_fcf_fc_map(x) \ + ((x)->fc_map) +#define fcoe_fcf_vfid(x) \ + ((x)->vfid) +#define fcoe_fcf_mac(x) \ + ((x)->mac) +#define fcoe_fcf_priority(x) \ + ((x)->priority) +#define fcoe_fcf_fka_period(x) \ + ((x)->fka_period) +#define fcoe_fcf_dev_loss_tmo(x) \ + ((x)->dev_loss_tmo) +#define fcoe_fcf_selected(x) \ + ((x)->selected) +#define fcoe_fcf_vlan_id(x) \ + ((x)->vlan_id) + +/* + * dev_loss_tmo attribute + */ +static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val) +{ + int ret; + + ret = kstrtoul(buf, 0, val); + if (ret) + return -EINVAL; + /* + * Check for overflow; dev_loss_tmo is u32 + */ + if (*val > UINT_MAX) + return -EINVAL; + + return 0; +} + +static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf, + unsigned long val) +{ + if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) || + (fcf->state == FCOE_FCF_STATE_DISCONNECTED) || + (fcf->state == FCOE_FCF_STATE_DELETED)) + return -EBUSY; + /* + * Check for overflow; dev_loss_tmo is u32 + */ + if (val > UINT_MAX) + return -EINVAL; + + fcoe_fcf_dev_loss_tmo(fcf) = val; + return 0; +} + +#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \ +struct device_attribute device_attr_fcoe_##_prefix##_##_name = \ + __ATTR(_name, _mode, _show, _store) + +#define fcoe_ctlr_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ + if (ctlr->f->get_fcoe_ctlr_##field) \ + ctlr->f->get_fcoe_ctlr_##field(ctlr); \ + return snprintf(buf, sz, format_string, \ + cast fcoe_ctlr_##field(ctlr)); \ +} + +#define fcoe_fcf_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \ + if (ctlr->f->get_fcoe_fcf_##field) \ + ctlr->f->get_fcoe_fcf_##field(fcf); \ + return snprintf(buf, sz, format_string, \ + cast fcoe_fcf_##field(fcf)); \ +} + +#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ + return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \ +} + +#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ + return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \ +} + +#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \ + fcoe_ctlr_private_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ + show_fcoe_ctlr_device_##field, NULL) + +#define fcoe_ctlr_rd_attr(field, format_string, sz) \ + fcoe_ctlr_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ + show_fcoe_ctlr_device_##field, NULL) + +#define fcoe_fcf_rd_attr(field, format_string, sz) \ + fcoe_fcf_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ + show_fcoe_fcf_device_##field, NULL) + +#define fcoe_fcf_private_rd_attr(field, format_string, sz) \ + fcoe_fcf_private_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ + show_fcoe_fcf_device_##field, NULL) + +#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \ + fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \ + static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ + show_fcoe_ctlr_device_##field, NULL) + +#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \ + fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \ + static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ + show_fcoe_fcf_device_##field, NULL) + +#define fcoe_enum_name_search(title, table_type, table) \ +static const char *get_fcoe_##title##_name(enum table_type table_key) \ +{ \ + if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \ + return NULL; \ + return table[table_key]; \ +} + +static char *fip_conn_type_names[] = { + [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown", + [ FIP_CONN_TYPE_FABRIC ] = "Fabric", + [ FIP_CONN_TYPE_VN2VN ] = "VN2VN", +}; +fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names) + +static enum fip_conn_type fcoe_parse_mode(const char *buf) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) { + if (strcasecmp(buf, fip_conn_type_names[i]) == 0) + return i; + } + + return FIP_CONN_TYPE_UNKNOWN; +} + +static char *fcf_state_names[] = { + [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown", + [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected", + [ FCOE_FCF_STATE_CONNECTED ] = "Connected", +}; +fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names) +#define FCOE_FCF_STATE_MAX_NAMELEN 50 + +static ssize_t show_fcf_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); + const char *name; + name = get_fcoe_fcf_state_name(fcf->state); + if (!name) + return -EINVAL; + return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name); +} +static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL); + +#define FCOE_MAX_MODENAME_LEN 20 +static ssize_t show_ctlr_mode(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + const char *name; + + name = get_fcoe_ctlr_mode_name(ctlr->mode); + if (!name) + return -EINVAL; + return snprintf(buf, FCOE_MAX_MODENAME_LEN, + "%s\n", name); +} + +static ssize_t store_ctlr_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + char mode[FCOE_MAX_MODENAME_LEN + 1]; + + if (count > FCOE_MAX_MODENAME_LEN) + return -EINVAL; + + strncpy(mode, buf, count); + + if (mode[count - 1] == '\n') + mode[count - 1] = '\0'; + else + mode[count] = '\0'; + + switch (ctlr->enabled) { + case FCOE_CTLR_ENABLED: + LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); + return -EBUSY; + case FCOE_CTLR_DISABLED: + if (!ctlr->f->set_fcoe_ctlr_mode) { + LIBFCOE_SYSFS_DBG(ctlr, + "Mode change not supported by LLD.\n"); + return -ENOTSUPP; + } + + ctlr->mode = fcoe_parse_mode(mode); + if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { + LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", + buf); + return -EINVAL; + } + + ctlr->f->set_fcoe_ctlr_mode(ctlr); + LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); + + return count; + case FCOE_CTLR_UNUSED: + default: + LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); + return -ENOTSUPP; + } +} + +static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR, + show_ctlr_mode, store_ctlr_mode); + +static ssize_t store_ctlr_enabled(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + bool enabled; + int rc; + + if (*buf == '1') + enabled = true; + else if (*buf == '0') + enabled = false; + else + return -EINVAL; + + switch (ctlr->enabled) { + case FCOE_CTLR_ENABLED: + if (enabled) + return count; + ctlr->enabled = FCOE_CTLR_DISABLED; + break; + case FCOE_CTLR_DISABLED: + if (!enabled) + return count; + ctlr->enabled = FCOE_CTLR_ENABLED; + break; + case FCOE_CTLR_UNUSED: + return -ENOTSUPP; + } + + rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr); + if (rc) + return rc; + + return count; +} + +static char *ctlr_enabled_state_names[] = { + [ FCOE_CTLR_ENABLED ] = "1", + [ FCOE_CTLR_DISABLED ] = "0", +}; +fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state, + ctlr_enabled_state_names) +#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50 + +static ssize_t show_ctlr_enabled_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + const char *name; + + name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled); + if (!name) + return -EINVAL; + return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN, + "%s\n", name); +} + +static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, + show_ctlr_enabled_state, + store_ctlr_enabled); + +static ssize_t store_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + mutex_lock(&fip->ctlr_mutex); + if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { + if (buf[0] == '1') { + fip->fip_resp = 1; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + if (buf[0] == '0') { + fip->fip_resp = 0; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + } + mutex_unlock(&fip->ctlr_mutex); + return -EINVAL; +} + +static ssize_t show_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0); +} + +static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, + show_ctlr_fip_resp, + store_ctlr_fip_resp); + +static ssize_t +fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count) +{ + int err; + unsigned long v; + + err = kstrtoul(buf, 10, &v); + if (err || v > UINT_MAX) + return -EINVAL; + + *var = v; + + return count; +} + +static ssize_t store_ctlr_r_a_tov(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) + return -EBUSY; + if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) + return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count); + return -ENOTSUPP; +} + +static ssize_t show_ctlr_r_a_tov(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + return sprintf(buf, "%d\n", ctlr->lp->r_a_tov); +} + +static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR, + show_ctlr_r_a_tov, store_ctlr_r_a_tov); + +static ssize_t store_ctlr_e_d_tov(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) + return -EBUSY; + if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) + return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count); + return -ENOTSUPP; +} + +static ssize_t show_ctlr_e_d_tov(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + return sprintf(buf, "%d\n", ctlr->lp->e_d_tov); +} + +static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR, + show_ctlr_e_d_tov, store_ctlr_e_d_tov); + +static ssize_t +store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_fcf_device *fcf; + unsigned long val; + int rc; + + rc = fcoe_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val; + mutex_lock(&ctlr->lock); + list_for_each_entry(fcf, &ctlr->fcfs, peers) + fcoe_fcf_set_dev_loss_tmo(fcf, val); + mutex_unlock(&ctlr->lock); + return count; +} +fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, ); +static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR, + show_fcoe_ctlr_device_fcf_dev_loss_tmo, + store_private_fcoe_ctlr_fcf_dev_loss_tmo); + +/* Link Error Status Block (LESB) */ +fcoe_ctlr_rd_attr(link_fail, "%u\n", 20); +fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20); +fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20); +fcoe_ctlr_rd_attr(symb_err, "%u\n", 20); +fcoe_ctlr_rd_attr(err_block, "%u\n", 20); +fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20); + +fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); +fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long); +fcoe_fcf_private_rd_attr(priority, "%u\n", 20); +fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20); +fcoe_fcf_private_rd_attr(vfid, "%u\n", 20); +fcoe_fcf_private_rd_attr(mac, "%pM\n", 20); +fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20); +fcoe_fcf_rd_attr(selected, "%u\n", 20); +fcoe_fcf_rd_attr(vlan_id, "%u\n", 20); + +fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, ) +static ssize_t +store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); + unsigned long val; + int rc; + + rc = fcoe_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + rc = fcoe_fcf_set_dev_loss_tmo(fcf, val); + if (rc) + return rc; + return count; +} +static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR, + show_fcoe_fcf_device_dev_loss_tmo, + store_fcoe_fcf_dev_loss_tmo); + +static struct attribute *fcoe_ctlr_lesb_attrs[] = { + &device_attr_fcoe_ctlr_link_fail.attr, + &device_attr_fcoe_ctlr_vlink_fail.attr, + &device_attr_fcoe_ctlr_miss_fka.attr, + &device_attr_fcoe_ctlr_symb_err.attr, + &device_attr_fcoe_ctlr_err_block.attr, + &device_attr_fcoe_ctlr_fcs_error.attr, + NULL, +}; + +static struct attribute_group fcoe_ctlr_lesb_attr_group = { + .name = "lesb", + .attrs = fcoe_ctlr_lesb_attrs, +}; + +static struct attribute *fcoe_ctlr_attrs[] = { + &device_attr_fcoe_ctlr_fip_vlan_responder.attr, + &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, + &device_attr_fcoe_ctlr_r_a_tov.attr, + &device_attr_fcoe_ctlr_e_d_tov.attr, + &device_attr_fcoe_ctlr_enabled.attr, + &device_attr_fcoe_ctlr_mode.attr, + NULL, +}; + +static struct attribute_group fcoe_ctlr_attr_group = { + .attrs = fcoe_ctlr_attrs, +}; + +static const struct attribute_group *fcoe_ctlr_attr_groups[] = { + &fcoe_ctlr_attr_group, + &fcoe_ctlr_lesb_attr_group, + NULL, +}; + +static struct attribute *fcoe_fcf_attrs[] = { + &device_attr_fcoe_fcf_fabric_name.attr, + &device_attr_fcoe_fcf_switch_name.attr, + &device_attr_fcoe_fcf_dev_loss_tmo.attr, + &device_attr_fcoe_fcf_fc_map.attr, + &device_attr_fcoe_fcf_vfid.attr, + &device_attr_fcoe_fcf_mac.attr, + &device_attr_fcoe_fcf_priority.attr, + &device_attr_fcoe_fcf_fka_period.attr, + &device_attr_fcoe_fcf_state.attr, + &device_attr_fcoe_fcf_selected.attr, + &device_attr_fcoe_fcf_vlan_id.attr, + NULL +}; + +static struct attribute_group fcoe_fcf_attr_group = { + .attrs = fcoe_fcf_attrs, +}; + +static const struct attribute_group *fcoe_fcf_attr_groups[] = { + &fcoe_fcf_attr_group, + NULL, +}; + +static struct bus_type fcoe_bus_type; + +static int fcoe_bus_match(struct device *dev, + struct device_driver *drv) +{ + if (dev->bus == &fcoe_bus_type) + return 1; + return 0; +} + +/** + * fcoe_ctlr_device_release() - Release the FIP ctlr memory + * @dev: Pointer to the FIP ctlr's embedded device + * + * Called when the last FIP ctlr reference is released. + */ +static void fcoe_ctlr_device_release(struct device *dev) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + kfree(ctlr); +} + +/** + * fcoe_fcf_device_release() - Release the FIP fcf memory + * @dev: Pointer to the fcf's embedded device + * + * Called when the last FIP fcf reference is released. + */ +static void fcoe_fcf_device_release(struct device *dev) +{ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); + kfree(fcf); +} + +static const struct device_type fcoe_ctlr_device_type = { + .name = "fcoe_ctlr", + .groups = fcoe_ctlr_attr_groups, + .release = fcoe_ctlr_device_release, +}; + +static const struct device_type fcoe_fcf_device_type = { + .name = "fcoe_fcf", + .groups = fcoe_fcf_attr_groups, + .release = fcoe_fcf_device_release, +}; + +static ssize_t ctlr_create_store(const struct bus_type *bus, const char *buf, + size_t count) +{ + return fcoe_ctlr_create_store(buf, count); +} +static BUS_ATTR_WO(ctlr_create); + +static ssize_t ctlr_destroy_store(const struct bus_type *bus, const char *buf, + size_t count) +{ + return fcoe_ctlr_destroy_store(buf, count); +} +static BUS_ATTR_WO(ctlr_destroy); + +static struct attribute *fcoe_bus_attrs[] = { + &bus_attr_ctlr_create.attr, + &bus_attr_ctlr_destroy.attr, + NULL, +}; +ATTRIBUTE_GROUPS(fcoe_bus); + +static struct bus_type fcoe_bus_type = { + .name = "fcoe", + .match = &fcoe_bus_match, + .bus_groups = fcoe_bus_groups, +}; + +/** + * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue + * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed + */ +static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) +{ + if (!fcoe_ctlr_work_q(ctlr)) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to flush work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + return; + } + + flush_workqueue(fcoe_ctlr_work_q(ctlr)); +} + +/** + * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue + * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue + * @work: Work to queue for execution + * + * Return value: + * 1 on success / 0 already queued / < 0 for error + */ +static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, + struct work_struct *work) +{ + if (unlikely(!fcoe_ctlr_work_q(ctlr))) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to queue work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + + return -EINVAL; + } + + return queue_work(fcoe_ctlr_work_q(ctlr), work); +} + +/** + * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue + * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed + */ +static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) +{ + if (!fcoe_ctlr_devloss_work_q(ctlr)) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to flush work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + return; + } + + flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr)); +} + +/** + * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue + * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue + * @work: Work to queue for execution + * @delay: jiffies to delay the work queuing + * + * Return value: + * 1 on success / 0 already queued / < 0 for error + */ +static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, + struct delayed_work *work, + unsigned long delay) +{ + if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to queue work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + + return -EINVAL; + } + + return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); +} + +static int fcoe_fcf_device_match(struct fcoe_fcf_device *new, + struct fcoe_fcf_device *old) +{ + if (new->switch_name == old->switch_name && + new->fabric_name == old->fabric_name && + new->fc_map == old->fc_map && + ether_addr_equal(new->mac, old->mac)) + return 1; + return 0; +} + +/** + * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs + * @parent: The parent device to which the fcoe_ctlr instance + * should be attached + * @f: The LLD's FCoE sysfs function template pointer + * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD + * + * This routine allocates a FIP ctlr object with some additional memory + * for the LLD. The FIP ctlr is initialized, added to sysfs and then + * attributes are added to it. + */ +struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, + struct fcoe_sysfs_function_template *f, + int priv_size) +{ + struct fcoe_ctlr_device *ctlr; + int error = 0; + + ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size, + GFP_KERNEL); + if (!ctlr) + goto out; + + ctlr->id = atomic_inc_return(&ctlr_num) - 1; + ctlr->f = f; + ctlr->mode = FIP_CONN_TYPE_FABRIC; + INIT_LIST_HEAD(&ctlr->fcfs); + mutex_init(&ctlr->lock); + ctlr->dev.parent = parent; + ctlr->dev.bus = &fcoe_bus_type; + ctlr->dev.type = &fcoe_ctlr_device_type; + + ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; + + snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), + "ctlr_wq_%d", ctlr->id); + ctlr->work_q = create_singlethread_workqueue( + ctlr->work_q_name); + if (!ctlr->work_q) + goto out_del; + + snprintf(ctlr->devloss_work_q_name, + sizeof(ctlr->devloss_work_q_name), + "ctlr_dl_wq_%d", ctlr->id); + ctlr->devloss_work_q = create_singlethread_workqueue( + ctlr->devloss_work_q_name); + if (!ctlr->devloss_work_q) + goto out_del_q; + + dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); + error = device_register(&ctlr->dev); + if (error) { + destroy_workqueue(ctlr->devloss_work_q); + destroy_workqueue(ctlr->work_q); + put_device(&ctlr->dev); + return NULL; + } + + return ctlr; + +out_del_q: + destroy_workqueue(ctlr->work_q); + ctlr->work_q = NULL; +out_del: + kfree(ctlr); +out: + return NULL; +} +EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add); + +/** + * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs + * @ctlr: A pointer to the ctlr to be deleted + * + * Deletes a FIP ctlr and any fcfs attached + * to it. Deleting fcfs will cause their childen + * to be deleted as well. + * + * The ctlr is detached from sysfs and it's resources + * are freed (work q), but the memory is not freed + * until its last reference is released. + * + * This routine expects no locks to be held before + * calling. + * + * TODO: Currently there are no callbacks to clean up LLD data + * for a fcoe_fcf_device. LLDs must keep this in mind as they need + * to clean up each of their LLD data for all fcoe_fcf_device before + * calling fcoe_ctlr_device_delete. + */ +void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr) +{ + struct fcoe_fcf_device *fcf, *next; + /* Remove any attached fcfs */ + mutex_lock(&ctlr->lock); + list_for_each_entry_safe(fcf, next, + &ctlr->fcfs, peers) { + list_del(&fcf->peers); + fcf->state = FCOE_FCF_STATE_DELETED; + fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); + } + mutex_unlock(&ctlr->lock); + + fcoe_ctlr_device_flush_work(ctlr); + + destroy_workqueue(ctlr->devloss_work_q); + ctlr->devloss_work_q = NULL; + destroy_workqueue(ctlr->work_q); + ctlr->work_q = NULL; + + device_unregister(&ctlr->dev); +} +EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete); + +/** + * fcoe_fcf_device_final_delete() - Final delete routine + * @work: The FIP fcf's embedded work struct + * + * It is expected that the fcf has been removed from + * the FIP ctlr's list before calling this routine. + */ +static void fcoe_fcf_device_final_delete(struct work_struct *work) +{ + struct fcoe_fcf_device *fcf = + container_of(work, struct fcoe_fcf_device, delete_work); + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); + + /* + * Cancel any outstanding timers. These should really exist + * only when rmmod'ing the LLDD and we're asking for + * immediate termination of the rports + */ + if (!cancel_delayed_work(&fcf->dev_loss_work)) + fcoe_ctlr_device_flush_devloss(ctlr); + + device_unregister(&fcf->dev); +} + +/** + * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires + * @work: The FIP fcf's embedded work struct + * + * Removes the fcf from the FIP ctlr's list of fcfs and + * queues the final deletion. + */ +static void fip_timeout_deleted_fcf(struct work_struct *work) +{ + struct fcoe_fcf_device *fcf = + container_of(work, struct fcoe_fcf_device, dev_loss_work.work); + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); + + mutex_lock(&ctlr->lock); + + /* + * If the fcf is deleted or reconnected before the timer + * fires the devloss queue will be flushed, but the state will + * either be CONNECTED or DELETED. If that is the case we + * cancel deleting the fcf. + */ + if (fcf->state != FCOE_FCF_STATE_DISCONNECTED) + goto out; + + dev_printk(KERN_ERR, &fcf->dev, + "FIP fcf connection time out: removing fcf\n"); + + list_del(&fcf->peers); + fcf->state = FCOE_FCF_STATE_DELETED; + fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); + +out: + mutex_unlock(&ctlr->lock); +} + +/** + * fcoe_fcf_device_delete() - Delete a FIP fcf + * @fcf: Pointer to the fcf which is to be deleted + * + * Queues the FIP fcf on the devloss workqueue + * + * Expects the ctlr_attrs mutex to be held for fcf + * state change. + */ +void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf) +{ + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); + int timeout = fcf->dev_loss_tmo; + + if (fcf->state != FCOE_FCF_STATE_CONNECTED) + return; + + fcf->state = FCOE_FCF_STATE_DISCONNECTED; + + /* + * FCF will only be re-connected by the LLD calling + * fcoe_fcf_device_add, and it should be setting up + * priv then. + */ + fcf->priv = NULL; + + fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work, + timeout * HZ); +} +EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete); + +/** + * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system + * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent + * @new_fcf: A temporary FCF used for lookups on the current list of fcfs + * + * Expects to be called with the ctlr->lock held + */ +struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, + struct fcoe_fcf_device *new_fcf) +{ + struct fcoe_fcf_device *fcf; + int error = 0; + + list_for_each_entry(fcf, &ctlr->fcfs, peers) { + if (fcoe_fcf_device_match(new_fcf, fcf)) { + if (fcf->state == FCOE_FCF_STATE_CONNECTED) + return fcf; + + fcf->state = FCOE_FCF_STATE_CONNECTED; + + if (!cancel_delayed_work(&fcf->dev_loss_work)) + fcoe_ctlr_device_flush_devloss(ctlr); + + return fcf; + } + } + + fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC); + if (unlikely(!fcf)) + goto out; + + INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete); + INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf); + + fcf->dev.parent = &ctlr->dev; + fcf->dev.bus = &fcoe_bus_type; + fcf->dev.type = &fcoe_fcf_device_type; + fcf->id = atomic_inc_return(&fcf_num) - 1; + fcf->state = FCOE_FCF_STATE_UNKNOWN; + + fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; + + dev_set_name(&fcf->dev, "fcf_%d", fcf->id); + + fcf->fabric_name = new_fcf->fabric_name; + fcf->switch_name = new_fcf->switch_name; + fcf->fc_map = new_fcf->fc_map; + fcf->vfid = new_fcf->vfid; + memcpy(fcf->mac, new_fcf->mac, ETH_ALEN); + fcf->priority = new_fcf->priority; + fcf->fka_period = new_fcf->fka_period; + fcf->selected = new_fcf->selected; + + error = device_register(&fcf->dev); + if (error) { + put_device(&fcf->dev); + goto out; + } + + fcf->state = FCOE_FCF_STATE_CONNECTED; + list_add_tail(&fcf->peers, &ctlr->fcfs); + + return fcf; + +out: + return NULL; +} +EXPORT_SYMBOL_GPL(fcoe_fcf_device_add); + +int __init fcoe_sysfs_setup(void) +{ + atomic_set(&ctlr_num, 0); + atomic_set(&fcf_num, 0); + + return bus_register(&fcoe_bus_type); +} + +void __exit fcoe_sysfs_teardown(void) +{ + bus_unregister(&fcoe_bus_type); +} diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c new file mode 100644 index 000000000..a48d24af9 --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -0,0 +1,1057 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libfcoe.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); +MODULE_LICENSE("GPL v2"); + +static int fcoe_transport_create(const char *, const struct kernel_param *); +static int fcoe_transport_destroy(const char *, const struct kernel_param *); +static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); +static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); +static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); +static int fcoe_transport_enable(const char *, const struct kernel_param *); +static int fcoe_transport_disable(const char *, const struct kernel_param *); +static int libfcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr); + +static LIST_HEAD(fcoe_transports); +static DEFINE_MUTEX(ft_mutex); +static LIST_HEAD(fcoe_netdevs); +static DEFINE_MUTEX(fn_mutex); + +unsigned int libfcoe_debug_logging; +module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR); +__MODULE_PARM_TYPE(show, "string"); +MODULE_PARM_DESC(show, " Show attached FCoE transports"); + +module_param_call(create, fcoe_transport_create, NULL, + (void *)FIP_MODE_FABRIC, S_IWUSR); +__MODULE_PARM_TYPE(create, "string"); +MODULE_PARM_DESC(create, " Creates fcoe instance on an ethernet interface"); + +module_param_call(create_vn2vn, fcoe_transport_create, NULL, + (void *)FIP_MODE_VN2VN, S_IWUSR); +__MODULE_PARM_TYPE(create_vn2vn, "string"); +MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " + "on an Ethernet interface"); + +module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(destroy, "string"); +MODULE_PARM_DESC(destroy, " Destroys fcoe instance on an ethernet interface"); + +module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(enable, "string"); +MODULE_PARM_DESC(enable, " Enables fcoe on an ethernet interface."); + +module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(disable, "string"); +MODULE_PARM_DESC(disable, " Disables fcoe on an ethernet interface."); + +/* notification function for packets from net device */ +static struct notifier_block libfcoe_notifier = { + .notifier_call = libfcoe_device_notification, +}; + +static const struct { + u32 fc_port_speed; +#define SPEED_2000 2000 +#define SPEED_4000 4000 +#define SPEED_8000 8000 +#define SPEED_16000 16000 +#define SPEED_32000 32000 + u32 eth_port_speed; +} fcoe_port_speed_mapping[] = { + { FC_PORTSPEED_1GBIT, SPEED_1000 }, + { FC_PORTSPEED_2GBIT, SPEED_2000 }, + { FC_PORTSPEED_4GBIT, SPEED_4000 }, + { FC_PORTSPEED_8GBIT, SPEED_8000 }, + { FC_PORTSPEED_10GBIT, SPEED_10000 }, + { FC_PORTSPEED_16GBIT, SPEED_16000 }, + { FC_PORTSPEED_20GBIT, SPEED_20000 }, + { FC_PORTSPEED_25GBIT, SPEED_25000 }, + { FC_PORTSPEED_32GBIT, SPEED_32000 }, + { FC_PORTSPEED_40GBIT, SPEED_40000 }, + { FC_PORTSPEED_50GBIT, SPEED_50000 }, + { FC_PORTSPEED_100GBIT, SPEED_100000 }, +}; + +static inline u32 eth2fc_speed(u32 eth_port_speed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fcoe_port_speed_mapping); i++) { + if (fcoe_port_speed_mapping[i].eth_port_speed == eth_port_speed) + return fcoe_port_speed_mapping[i].fc_port_speed; + } + + return FC_PORTSPEED_UNKNOWN; +} + +/** + * fcoe_link_speed_update() - Update the supported and actual link speeds + * @lport: The local port to update speeds for + * + * Returns: 0 if the ethtool query was successful + * -1 if the ethtool query failed + */ +int fcoe_link_speed_update(struct fc_lport *lport) +{ + struct net_device *netdev = fcoe_get_netdev(lport); + struct ethtool_link_ksettings ecmd; + + if (!__ethtool_get_link_ksettings(netdev, &ecmd)) { + lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT | + FC_PORTSPEED_10GBIT | + FC_PORTSPEED_20GBIT | + FC_PORTSPEED_40GBIT); + + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseKX_Full)) + lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; + + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_10000baseT_Full | + SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_10000baseR_FEC)) + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; + + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_20000baseKR2_Full)) + lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; + + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_40000baseKR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseLR4_Full)) + lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; + + lport->link_speed = eth2fc_speed(ecmd.base.speed); + return 0; + } + return -1; +} +EXPORT_SYMBOL_GPL(fcoe_link_speed_update); + +/** + * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport + * @lport: The local port to update speeds for + * @fc_lesb: Pointer to the LESB to be filled up + * @netdev: Pointer to the netdev that is associated with the lport + * + * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6 + * Clause 7.11 in v1.04. + */ +void __fcoe_get_lesb(struct fc_lport *lport, + struct fc_els_lesb *fc_lesb, + struct net_device *netdev) +{ + unsigned int cpu; + u32 lfc, vlfc, mdac; + struct fc_stats *stats; + struct fcoe_fc_els_lesb *lesb; + struct rtnl_link_stats64 temp; + + lfc = 0; + vlfc = 0; + mdac = 0; + lesb = (struct fcoe_fc_els_lesb *)fc_lesb; + memset(lesb, 0, sizeof(*lesb)); + for_each_possible_cpu(cpu) { + stats = per_cpu_ptr(lport->stats, cpu); + lfc += READ_ONCE(stats->LinkFailureCount); + vlfc += READ_ONCE(stats->VLinkFailureCount); + mdac += READ_ONCE(stats->MissDiscAdvCount); + } + lesb->lesb_link_fail = htonl(lfc); + lesb->lesb_vlink_fail = htonl(vlfc); + lesb->lesb_miss_fka = htonl(mdac); + lesb->lesb_fcs_error = + htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); +} +EXPORT_SYMBOL_GPL(__fcoe_get_lesb); + +/** + * fcoe_get_lesb() - Fill the FCoE Link Error Status Block + * @lport: the local port + * @fc_lesb: the link error status block + */ +void fcoe_get_lesb(struct fc_lport *lport, + struct fc_els_lesb *fc_lesb) +{ + struct net_device *netdev = fcoe_get_netdev(lport); + + __fcoe_get_lesb(lport, fc_lesb, netdev); +} +EXPORT_SYMBOL_GPL(fcoe_get_lesb); + +/** + * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given + * fcoe controller device + * @ctlr_dev: The given fcoe controller device + * + */ +void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); + struct net_device *netdev = fcoe_get_netdev(fip->lp); + struct fc_els_lesb *fc_lesb; + + fc_lesb = (struct fc_els_lesb *)(&ctlr_dev->lesb); + __fcoe_get_lesb(fip->lp, fc_lesb, netdev); +} +EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb); + +void fcoe_wwn_to_str(u64 wwn, char *buf, int len) +{ + u8 wwpn[8]; + + u64_to_wwn(wwn, wwpn); + snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x", + wwpn[0], wwpn[1], wwpn[2], wwpn[3], + wwpn[4], wwpn[5], wwpn[6], wwpn[7]); +} +EXPORT_SYMBOL_GPL(fcoe_wwn_to_str); + +/** + * fcoe_validate_vport_create() - Validate a vport before creating it + * @vport: NPIV port to be created + * + * This routine is meant to add validation for a vport before creating it + * via fcoe_vport_create(). + * Current validations are: + * - WWPN supplied is unique for given lport + */ +int fcoe_validate_vport_create(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + int rc = 0; + char buf[32]; + + mutex_lock(&n_port->lp_mutex); + + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + /* Check if the wwpn is not same as that of the lport */ + if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { + LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the " + "base port WWPN\n", buf); + rc = -EINVAL; + goto out; + } + + /* Check if there is any existing vport with same wwpn */ + list_for_each_entry(vn_port, &n_port->vports, list) { + if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { + LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s " + "already exists\n", buf); + rc = -EINVAL; + break; + } + } +out: + mutex_unlock(&n_port->lp_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(fcoe_validate_vport_create); + +/** + * fcoe_get_wwn() - Get the world wide name from LLD if it supports it + * @netdev: the associated net device + * @wwn: the output WWN + * @type: the type of WWN (WWPN or WWNN) + * + * Returns: 0 for success + */ +int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + const struct net_device_ops *ops = netdev->netdev_ops; + + if (ops->ndo_fcoe_get_wwn) + return ops->ndo_fcoe_get_wwn(netdev, wwn, type); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(fcoe_get_wwn); + +/** + * fcoe_fc_crc() - Calculates the CRC for a given frame + * @fp: The frame to be checksumed + * + * This uses crc32() routine to calculate the CRC for a frame + * + * Return: The 32 bit CRC value + */ +u32 fcoe_fc_crc(struct fc_frame *fp) +{ + struct sk_buff *skb = fp_skb(fp); + skb_frag_t *frag; + unsigned char *data; + unsigned long off, len, clen; + u32 crc; + unsigned i; + + crc = crc32(~0, skb->data, skb_headlen(skb)); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + off = skb_frag_off(frag); + len = skb_frag_size(frag); + while (len > 0) { + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); + data = kmap_atomic( + skb_frag_page(frag) + (off >> PAGE_SHIFT)); + crc = crc32(crc, data + (off & ~PAGE_MASK), clen); + kunmap_atomic(data); + off += clen; + len -= clen; + } + } + return crc; +} +EXPORT_SYMBOL_GPL(fcoe_fc_crc); + +/** + * fcoe_start_io() - Start FCoE I/O + * @skb: The packet to be transmitted + * + * This routine is called from the net device to start transmitting + * FCoE packets. + * + * Returns: 0 for success + */ +int fcoe_start_io(struct sk_buff *skb) +{ + struct sk_buff *nskb; + int rc; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + rc = dev_queue_xmit(nskb); + if (rc != 0) + return rc; + kfree_skb(skb); + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_start_io); + + +/** + * fcoe_clean_pending_queue() - Dequeue a skb and free it + * @lport: The local port to dequeue a skb on + */ +void fcoe_clean_pending_queue(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct sk_buff *skb; + + spin_lock_bh(&port->fcoe_pending_queue.lock); + while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { + spin_unlock_bh(&port->fcoe_pending_queue.lock); + kfree_skb(skb); + spin_lock_bh(&port->fcoe_pending_queue.lock); + } + spin_unlock_bh(&port->fcoe_pending_queue.lock); +} +EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); + +/** + * fcoe_check_wait_queue() - Attempt to clear the transmit backlog + * @lport: The local port whose backlog is to be cleared + * @skb: The received FIP packet + * + * This empties the wait_queue, dequeues the head of the wait_queue queue + * and calls fcoe_start_io() for each packet. If all skb have been + * transmitted it returns the qlen. If an error occurs it restores + * wait_queue (to try again later) and returns -1. + * + * The wait_queue is used when the skb transmit fails. The failed skb + * will go in the wait_queue which will be emptied by the timer function or + * by the next skb transmit. + */ +void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) +{ + struct fcoe_port *port = lport_priv(lport); + int rc; + + spin_lock_bh(&port->fcoe_pending_queue.lock); + + if (skb) + __skb_queue_tail(&port->fcoe_pending_queue, skb); + + if (port->fcoe_pending_queue_active) + goto out; + port->fcoe_pending_queue_active = 1; + + while (port->fcoe_pending_queue.qlen) { + /* keep qlen > 0 until fcoe_start_io succeeds */ + port->fcoe_pending_queue.qlen++; + skb = __skb_dequeue(&port->fcoe_pending_queue); + + spin_unlock_bh(&port->fcoe_pending_queue.lock); + rc = fcoe_start_io(skb); + spin_lock_bh(&port->fcoe_pending_queue.lock); + + if (rc) { + __skb_queue_head(&port->fcoe_pending_queue, skb); + /* undo temporary increment above */ + port->fcoe_pending_queue.qlen--; + break; + } + /* undo temporary increment above */ + port->fcoe_pending_queue.qlen--; + } + + if (port->fcoe_pending_queue.qlen < port->min_queue_depth) + lport->qfull = 0; + if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) + mod_timer(&port->timer, jiffies + 2); + port->fcoe_pending_queue_active = 0; +out: + if (port->fcoe_pending_queue.qlen > port->max_queue_depth) + lport->qfull = 1; + spin_unlock_bh(&port->fcoe_pending_queue.lock); +} +EXPORT_SYMBOL_GPL(fcoe_check_wait_queue); + +/** + * fcoe_queue_timer() - The fcoe queue timer + * @t: Timer context use to obtain the FCoE port + * + * Calls fcoe_check_wait_queue on timeout + */ +void fcoe_queue_timer(struct timer_list *t) +{ + struct fcoe_port *port = from_timer(port, t, timer); + + fcoe_check_wait_queue(port->lport, NULL); +} +EXPORT_SYMBOL_GPL(fcoe_queue_timer); + +/** + * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC + * @skb: The packet to be transmitted + * @tlen: The total length of the trailer + * @fps: The fcoe context + * + * This routine allocates a page for frame trailers. The page is re-used if + * there is enough room left on it for the current trailer. If there isn't + * enough buffer left a new page is allocated for the trailer. Reference to + * the page from this function as well as the skbs using the page fragments + * ensure that the page is freed at the appropriate time. + * + * Returns: 0 for success + */ +int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, + struct fcoe_percpu_s *fps) +{ + struct page *page; + + page = fps->crc_eof_page; + if (!page) { + page = alloc_page(GFP_ATOMIC); + if (!page) + return -ENOMEM; + + fps->crc_eof_page = page; + fps->crc_eof_offset = 0; + } + + get_page(page); + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, + fps->crc_eof_offset, tlen); + skb->len += tlen; + skb->data_len += tlen; + skb->truesize += tlen; + fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); + + if (fps->crc_eof_offset >= PAGE_SIZE) { + fps->crc_eof_page = NULL; + fps->crc_eof_offset = 0; + put_page(page); + } + + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof); + +/** + * fcoe_transport_lookup - find an fcoe transport that matches a netdev + * @netdev: The netdev to look for from all attached transports + * + * Returns : ptr to the fcoe transport that supports this netdev or NULL + * if not found. + * + * The ft_mutex should be held when this is called + */ +static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev) +{ + struct fcoe_transport *ft = NULL; + + list_for_each_entry(ft, &fcoe_transports, list) + if (ft->match && ft->match(netdev)) + return ft; + return NULL; +} + +/** + * fcoe_transport_attach - Attaches an FCoE transport + * @ft: The fcoe transport to be attached + * + * Returns : 0 for success + */ +int fcoe_transport_attach(struct fcoe_transport *ft) +{ + int rc = 0; + + mutex_lock(&ft_mutex); + if (ft->attached) { + LIBFCOE_TRANSPORT_DBG("transport %s already attached\n", + ft->name); + rc = -EEXIST; + goto out_attach; + } + + /* Add default transport to the tail */ + if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT)) + list_add(&ft->list, &fcoe_transports); + else + list_add_tail(&ft->list, &fcoe_transports); + + ft->attached = true; + LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name); + +out_attach: + mutex_unlock(&ft_mutex); + return rc; +} +EXPORT_SYMBOL(fcoe_transport_attach); + +/** + * fcoe_transport_detach - Detaches an FCoE transport + * @ft: The fcoe transport to be attached + * + * Returns : 0 for success + */ +int fcoe_transport_detach(struct fcoe_transport *ft) +{ + int rc = 0; + struct fcoe_netdev_mapping *nm = NULL, *tmp; + + mutex_lock(&ft_mutex); + if (!ft->attached) { + LIBFCOE_TRANSPORT_DBG("transport %s already detached\n", + ft->name); + rc = -ENODEV; + goto out_attach; + } + + /* remove netdev mapping for this transport as it is going away */ + mutex_lock(&fn_mutex); + list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { + if (nm->ft == ft) { + LIBFCOE_TRANSPORT_DBG("transport %s going away, " + "remove its netdev mapping for %s\n", + ft->name, nm->netdev->name); + list_del(&nm->list); + kfree(nm); + } + } + mutex_unlock(&fn_mutex); + + list_del(&ft->list); + ft->attached = false; + LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); + +out_attach: + mutex_unlock(&ft_mutex); + return rc; + +} +EXPORT_SYMBOL(fcoe_transport_detach); + +static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) +{ + int i, j; + struct fcoe_transport *ft = NULL; + + i = j = sprintf(buffer, "Attached FCoE transports:"); + mutex_lock(&ft_mutex); + list_for_each_entry(ft, &fcoe_transports, list) { + if (i >= PAGE_SIZE - IFNAMSIZ) + break; + i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); + } + mutex_unlock(&ft_mutex); + if (i == j) + i += snprintf(&buffer[i], IFNAMSIZ, "none"); + return i; +} + +static int __init fcoe_transport_init(void) +{ + register_netdevice_notifier(&libfcoe_notifier); + return 0; +} + +static int fcoe_transport_exit(void) +{ + struct fcoe_transport *ft; + + unregister_netdevice_notifier(&libfcoe_notifier); + mutex_lock(&ft_mutex); + list_for_each_entry(ft, &fcoe_transports, list) + printk(KERN_ERR "FCoE transport %s is still attached!\n", + ft->name); + mutex_unlock(&ft_mutex); + return 0; +} + + +static int fcoe_add_netdev_mapping(struct net_device *netdev, + struct fcoe_transport *ft) +{ + struct fcoe_netdev_mapping *nm; + + nm = kmalloc(sizeof(*nm), GFP_KERNEL); + if (!nm) { + printk(KERN_ERR "Unable to allocate netdev_mapping"); + return -ENOMEM; + } + + nm->netdev = netdev; + nm->ft = ft; + + mutex_lock(&fn_mutex); + list_add(&nm->list, &fcoe_netdevs); + mutex_unlock(&fn_mutex); + return 0; +} + + +static void fcoe_del_netdev_mapping(struct net_device *netdev) +{ + struct fcoe_netdev_mapping *nm = NULL, *tmp; + + mutex_lock(&fn_mutex); + list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { + if (nm->netdev == netdev) { + list_del(&nm->list); + kfree(nm); + mutex_unlock(&fn_mutex); + return; + } + } + mutex_unlock(&fn_mutex); +} + + +/** + * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which + * it was created + * @netdev: The net device that the FCoE interface is on + * + * Returns : ptr to the fcoe transport that supports this netdev or NULL + * if not found. + * + * The ft_mutex should be held when this is called + */ +static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev) +{ + struct fcoe_transport *ft = NULL; + struct fcoe_netdev_mapping *nm; + + mutex_lock(&fn_mutex); + list_for_each_entry(nm, &fcoe_netdevs, list) { + if (netdev == nm->netdev) { + ft = nm->ft; + mutex_unlock(&fn_mutex); + return ft; + } + } + + mutex_unlock(&fn_mutex); + return NULL; +} + +/** + * fcoe_if_to_netdev() - Parse a name buffer to get a net device + * @buffer: The name of the net device + * + * Returns: NULL or a ptr to net_device + */ +static struct net_device *fcoe_if_to_netdev(const char *buffer) +{ + char *cp; + char ifname[IFNAMSIZ + 2]; + + if (buffer) { + strscpy(ifname, buffer, IFNAMSIZ); + cp = ifname + strlen(ifname); + while (--cp >= ifname && *cp == '\n') + *cp = '\0'; + return dev_get_by_name(&init_net, ifname); + } + return NULL; +} + +/** + * libfcoe_device_notification() - Handler for net device events + * @notifier: The context of the notification + * @event: The type of event + * @ptr: The net device that the event was on + * + * This function is called by the Ethernet driver in case of link change event. + * + * Returns: 0 for success + */ +static int libfcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_UNREGISTER: + LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n", + netdev->name); + fcoe_del_netdev_mapping(netdev); + break; + } + return NOTIFY_OK; +} + +ssize_t fcoe_ctlr_create_store(const char *buf, size_t count) +{ + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + int rc = 0; + int err; + + mutex_lock(&ft_mutex); + + netdev = fcoe_if_to_netdev(buf); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf); + rc = -ENODEV; + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (ft) { + LIBFCOE_TRANSPORT_DBG("transport %s already has existing " + "FCoE instance on %s.\n", + ft->name, netdev->name); + rc = -EEXIST; + goto out_putdev; + } + + ft = fcoe_transport_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + rc = -ENODEV; + goto out_putdev; + } + + /* pass to transport create */ + err = ft->alloc ? ft->alloc(netdev) : -ENODEV; + if (err) { + fcoe_del_netdev_mapping(netdev); + rc = -ENOMEM; + goto out_putdev; + } + + err = fcoe_add_netdev_mapping(netdev, ft); + if (err) { + LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " + "for FCoE transport %s for %s.\n", + ft->name, netdev->name); + rc = -ENODEV; + goto out_putdev; + } + + LIBFCOE_TRANSPORT_DBG("transport %s succeeded to create fcoe on %s.\n", + ft->name, netdev->name); + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + if (rc) + return rc; + return count; +} + +ssize_t fcoe_ctlr_destroy_store(const char *buf, size_t count) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + mutex_lock(&ft_mutex); + + netdev = fcoe_if_to_netdev(buf); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf); + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + goto out_putdev; + } + + /* pass to transport destroy */ + rc = ft->destroy(netdev); + if (rc) + goto out_putdev; + + fcoe_del_netdev_mapping(netdev); + LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", + ft->name, (rc) ? "failed" : "succeeded", + netdev->name); + rc = count; /* required for successful return */ +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + return rc; +} + +/** + * fcoe_transport_create() - Create a fcoe interface + * @buffer: The name of the Ethernet interface to create on + * @kp: The associated kernel param + * + * Called from sysfs. This holds the ft_mutex while calling the + * registered fcoe transport's create function. + * + * Returns: 0 for success + */ +static int fcoe_transport_create(const char *buffer, + const struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + enum fip_mode fip_mode = (enum fip_mode)(uintptr_t)kp->arg; + + mutex_lock(&ft_mutex); + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (ft) { + LIBFCOE_TRANSPORT_DBG("transport %s already has existing " + "FCoE instance on %s.\n", + ft->name, netdev->name); + rc = -EEXIST; + goto out_putdev; + } + + ft = fcoe_transport_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + goto out_putdev; + } + + rc = fcoe_add_netdev_mapping(netdev, ft); + if (rc) { + LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " + "for FCoE transport %s for %s.\n", + ft->name, netdev->name); + goto out_putdev; + } + + /* pass to transport create */ + rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV; + if (rc) + fcoe_del_netdev_mapping(netdev); + + LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", + ft->name, (rc) ? "failed" : "succeeded", + netdev->name); + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + return rc; +} + +/** + * fcoe_transport_destroy() - Destroy a FCoE interface + * @buffer: The name of the Ethernet interface to be destroyed + * @kp: The associated kernel parameter + * + * Called from sysfs. This holds the ft_mutex while calling the + * registered fcoe transport's destroy function. + * + * Returns: 0 for success + */ +static int fcoe_transport_destroy(const char *buffer, + const struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + mutex_lock(&ft_mutex); + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + goto out_putdev; + } + + /* pass to transport destroy */ + rc = ft->destroy ? ft->destroy(netdev) : -ENODEV; + fcoe_del_netdev_mapping(netdev); + LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", + ft->name, (rc) ? "failed" : "succeeded", + netdev->name); + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + return rc; +} + +/** + * fcoe_transport_disable() - Disables a FCoE interface + * @buffer: The name of the Ethernet interface to be disabled + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int fcoe_transport_disable(const char *buffer, + const struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + mutex_lock(&ft_mutex); + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) + goto out_nodev; + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) + goto out_putdev; + + rc = ft->disable ? ft->disable(netdev) : -ENODEV; + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + return rc; +} + +/** + * fcoe_transport_enable() - Enables a FCoE interface + * @buffer: The name of the Ethernet interface to be enabled + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int fcoe_transport_enable(const char *buffer, + const struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + mutex_lock(&ft_mutex); + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) + goto out_nodev; + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) + goto out_putdev; + + rc = ft->enable ? ft->enable(netdev) : -ENODEV; + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + return rc; +} + +/** + * libfcoe_init() - Initialization routine for libfcoe.ko + */ +static int __init libfcoe_init(void) +{ + int rc = 0; + + rc = fcoe_transport_init(); + if (rc) + return rc; + + rc = fcoe_sysfs_setup(); + if (rc) + fcoe_transport_exit(); + + return rc; +} +module_init(libfcoe_init); + +/** + * libfcoe_exit() - Tear down libfcoe.ko + */ +static void __exit libfcoe_exit(void) +{ + fcoe_sysfs_teardown(); + fcoe_transport_exit(); +} +module_exit(libfcoe_exit); diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h new file mode 100644 index 000000000..b8bdfab51 --- /dev/null +++ b/drivers/scsi/fcoe/libfcoe.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FCOE_LIBFCOE_H_ +#define _FCOE_LIBFCOE_H_ + +extern unsigned int libfcoe_debug_logging; +#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ +#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ +#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */ +#define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */ + +#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(libfcoe_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define LIBFCOE_DBG(fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ + pr_info("libfcoe: " fmt, ##args);) + +#define LIBFCOE_FIP_DBG(fip, fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ + pr_info("host%d: fip: " fmt, \ + (fip)->lp->host->host_no, ##args);) + +#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \ + pr_info("%s: " fmt, __func__, ##args);) + +#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \ + pr_info("ctlr_%d: " fmt, cdev->id, ##args);) + +#endif /* _FCOE_LIBFCOE_H_ */ diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c new file mode 100644 index 000000000..504c4e0c5 --- /dev/null +++ b/drivers/scsi/fdomain.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Future Domain TMC-16x0 and TMC-3260 SCSI host adapters + * Copyright 2019 Ondrej Zary + * + * Original driver by + * Rickard E. Faith, faith@cs.unc.edu + * + * Future Domain BIOS versions supported for autodetect: + * 2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61 + * Chips supported: + * TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70 + * Boards supported: + * Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX + * Future Domain TMC-3260 (PCI) + * Quantum ISA-200S, ISA-250MG + * Adaptec AHA-2920A (PCI) [BUT *NOT* AHA-2920C -- use aic7xxx instead] + * IBM ? + * + * NOTE: + * + * The Adaptec AHA-2920C has an Adaptec AIC-7850 chip on it. + * Use the aic7xxx driver for this board. + * + * The Adaptec AHA-2920A has a Future Domain chip on it, so this is the right + * driver for that card. Unfortunately, the boxes will probably just say + * "2920", so you'll have to look on the card for a Future Domain logo, or a + * letter after the 2920. + * + * If you have a TMC-8xx or TMC-9xx board, then this is not the driver for + * your board. + * + * DESCRIPTION: + * + * This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680 + * TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a + * 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin + * high-density external connector. The 1670 and 1680 have floppy disk + * controllers built in. The TMC-3260 is a PCI bus card. + * + * Future Domain's older boards are based on the TMC-1800 chip, and this + * driver was originally written for a TMC-1680 board with the TMC-1800 chip. + * More recently, boards are being produced with the TMC-18C50 and TMC-18C30 + * chips. + * + * Please note that the drive ordering that Future Domain implemented in BIOS + * versions 3.4 and 3.5 is the opposite of the order (currently) used by the + * rest of the SCSI industry. + * + * + * REFERENCES USED: + * + * "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation, + * 1990. + * + * "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain + * Corporation, January 1992. + * + * "LXT SCSI Products: Specifications and OEM Technical Manual (Revision + * B/September 1991)", Maxtor Corporation, 1991. + * + * "7213S product Manual (Revision P3)", Maxtor Corporation, 1992. + * + * "Draft Proposed American National Standard: Small Computer System + * Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109, + * revision 10h, October 17, 1991) + * + * Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric + * Youngdale (ericy@cais.com), 1992. + * + * Private communication, Tuong Le (Future Domain Engineering department), + * 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and + * TMC-18C30 detection.) + * + * Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page + * 60 (2.39: Disk Partition Table Layout). + * + * "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page + * 6-1. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fdomain.h" + +/* + * FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the + * 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by + * the SCSI device, an interrupt will be raised. Therefore, this could be as + * low as 0, or as high as 16. Note, however, that values which are too high + * or too low seem to prevent any interrupts from occurring, and thereby lock + * up the machine. + */ +#define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */ +#define PARITY_MASK ACTL_PAREN /* Parity enabled, 0 = disabled */ + +enum chip_type { + unknown = 0x00, + tmc1800 = 0x01, + tmc18c50 = 0x02, + tmc18c30 = 0x03, +}; + +struct fdomain { + int base; + struct scsi_cmnd *cur_cmd; + enum chip_type chip; + struct work_struct work; +}; + +static struct scsi_pointer *fdomain_scsi_pointer(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static inline void fdomain_make_bus_idle(struct fdomain *fd) +{ + outb(0, fd->base + REG_BCTL); + outb(0, fd->base + REG_MCTL); + if (fd->chip == tmc18c50 || fd->chip == tmc18c30) + /* Clear forced intr. */ + outb(ACTL_RESET | ACTL_CLRFIRQ | PARITY_MASK, + fd->base + REG_ACTL); + else + outb(ACTL_RESET | PARITY_MASK, fd->base + REG_ACTL); +} + +static enum chip_type fdomain_identify(int port) +{ + u16 id = inb(port + REG_ID_LSB) | inb(port + REG_ID_MSB) << 8; + + switch (id) { + case 0x6127: + return tmc1800; + case 0x60e9: /* 18c50 or 18c30 */ + break; + default: + return unknown; + } + + /* Try to toggle 32-bit mode. This only works on an 18c30 chip. */ + outb(CFG2_32BIT, port + REG_CFG2); + if ((inb(port + REG_CFG2) & CFG2_32BIT)) { + outb(0, port + REG_CFG2); + if ((inb(port + REG_CFG2) & CFG2_32BIT) == 0) + return tmc18c30; + } + /* If that failed, we are an 18c50. */ + return tmc18c50; +} + +static int fdomain_test_loopback(int base) +{ + int i; + + for (i = 0; i < 255; i++) { + outb(i, base + REG_LOOPBACK); + if (inb(base + REG_LOOPBACK) != i) + return 1; + } + + return 0; +} + +static void fdomain_reset(int base) +{ + outb(BCTL_RST, base + REG_BCTL); + mdelay(20); + outb(0, base + REG_BCTL); + mdelay(1150); + outb(0, base + REG_MCTL); + outb(PARITY_MASK, base + REG_ACTL); +} + +static int fdomain_select(struct Scsi_Host *sh, int target) +{ + int status; + unsigned long timeout; + struct fdomain *fd = shost_priv(sh); + + outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL); + outb(BIT(sh->this_id) | BIT(target), fd->base + REG_SCSI_DATA_NOACK); + + /* Stop arbitration and enable parity */ + outb(PARITY_MASK, fd->base + REG_ACTL); + + timeout = 350; /* 350 msec */ + + do { + status = inb(fd->base + REG_BSTAT); + if (status & BSTAT_BSY) { + /* Enable SCSI Bus */ + /* (on error, should make bus idle with 0) */ + outb(BCTL_BUSEN, fd->base + REG_BCTL); + return 0; + } + mdelay(1); + } while (--timeout); + fdomain_make_bus_idle(fd); + return 1; +} + +static void fdomain_finish_cmd(struct fdomain *fd) +{ + outb(0, fd->base + REG_ICTL); + fdomain_make_bus_idle(fd); + scsi_done(fd->cur_cmd); + fd->cur_cmd = NULL; +} + +static void fdomain_read_data(struct scsi_cmnd *cmd) +{ + struct fdomain *fd = shost_priv(cmd->device->host); + unsigned char *virt, *ptr; + size_t offset, len; + + while ((len = inw(fd->base + REG_FIFO_COUNT)) > 0) { + offset = scsi_bufflen(cmd) - scsi_get_resid(cmd); + virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd), + &offset, &len); + ptr = virt + offset; + if (len & 1) + *ptr++ = inb(fd->base + REG_FIFO); + if (len > 1) + insw(fd->base + REG_FIFO, ptr, len >> 1); + scsi_set_resid(cmd, scsi_get_resid(cmd) - len); + scsi_kunmap_atomic_sg(virt); + } +} + +static void fdomain_write_data(struct scsi_cmnd *cmd) +{ + struct fdomain *fd = shost_priv(cmd->device->host); + /* 8k FIFO for pre-tmc18c30 chips, 2k FIFO for tmc18c30 */ + int FIFO_Size = fd->chip == tmc18c30 ? 0x800 : 0x2000; + unsigned char *virt, *ptr; + size_t offset, len; + + while ((len = FIFO_Size - inw(fd->base + REG_FIFO_COUNT)) > 512) { + offset = scsi_bufflen(cmd) - scsi_get_resid(cmd); + if (len + offset > scsi_bufflen(cmd)) { + len = scsi_bufflen(cmd) - offset; + if (len == 0) + break; + } + virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd), + &offset, &len); + ptr = virt + offset; + if (len & 1) + outb(*ptr++, fd->base + REG_FIFO); + if (len > 1) + outsw(fd->base + REG_FIFO, ptr, len >> 1); + scsi_set_resid(cmd, scsi_get_resid(cmd) - len); + scsi_kunmap_atomic_sg(virt); + } +} + +static void fdomain_work(struct work_struct *work) +{ + struct fdomain *fd = container_of(work, struct fdomain, work); + struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host, + hostdata); + struct scsi_cmnd *cmd = fd->cur_cmd; + struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd); + unsigned long flags; + int status; + int done = 0; + + spin_lock_irqsave(sh->host_lock, flags); + + if (scsi_pointer->phase & in_arbitration) { + status = inb(fd->base + REG_ASTAT); + if (!(status & ASTAT_ARB)) { + set_host_byte(cmd, DID_BUS_BUSY); + fdomain_finish_cmd(fd); + goto out; + } + scsi_pointer->phase = in_selection; + + outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL); + outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL); + outb(BIT(cmd->device->host->this_id) | BIT(scmd_id(cmd)), + fd->base + REG_SCSI_DATA_NOACK); + /* Stop arbitration and enable parity */ + outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL); + goto out; + } else if (scsi_pointer->phase & in_selection) { + status = inb(fd->base + REG_BSTAT); + if (!(status & BSTAT_BSY)) { + /* Try again, for slow devices */ + if (fdomain_select(cmd->device->host, scmd_id(cmd))) { + set_host_byte(cmd, DID_NO_CONNECT); + fdomain_finish_cmd(fd); + goto out; + } + /* Stop arbitration and enable parity */ + outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL); + } + scsi_pointer->phase = in_other; + outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL); + outb(BCTL_BUSEN, fd->base + REG_BCTL); + goto out; + } + + /* fdomain_scsi_pointer(cur_cmd)->phase == in_other: this is the body of the routine */ + status = inb(fd->base + REG_BSTAT); + + if (status & BSTAT_REQ) { + switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) { + case BSTAT_CMD: /* COMMAND OUT */ + outb(cmd->cmnd[scsi_pointer->sent_command++], + fd->base + REG_SCSI_DATA); + break; + case 0: /* DATA OUT -- tmc18c50/tmc18c30 only */ + if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) { + scsi_pointer->have_data_in = -1; + outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN | + PARITY_MASK, fd->base + REG_ACTL); + } + break; + case BSTAT_IO: /* DATA IN -- tmc18c50/tmc18c30 only */ + if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) { + scsi_pointer->have_data_in = 1; + outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK, + fd->base + REG_ACTL); + } + break; + case BSTAT_CMD | BSTAT_IO: /* STATUS IN */ + scsi_pointer->Status = inb(fd->base + REG_SCSI_DATA); + break; + case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */ + outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA); + break; + case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */ + scsi_pointer->Message = inb(fd->base + REG_SCSI_DATA); + if (scsi_pointer->Message == COMMAND_COMPLETE) + ++done; + break; + } + } + + if (fd->chip == tmc1800 && !scsi_pointer->have_data_in && + scsi_pointer->sent_command >= cmd->cmd_len) { + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + scsi_pointer->have_data_in = -1; + outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN | + PARITY_MASK, fd->base + REG_ACTL); + } else { + scsi_pointer->have_data_in = 1; + outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK, + fd->base + REG_ACTL); + } + } + + if (scsi_pointer->have_data_in == -1) /* DATA OUT */ + fdomain_write_data(cmd); + + if (scsi_pointer->have_data_in == 1) /* DATA IN */ + fdomain_read_data(cmd); + + if (done) { + set_status_byte(cmd, scsi_pointer->Status); + set_host_byte(cmd, DID_OK); + scsi_msg_to_host_byte(cmd, scsi_pointer->Message); + fdomain_finish_cmd(fd); + } else { + if (scsi_pointer->phase & disconnect) { + outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT, + fd->base + REG_ICTL); + outb(0, fd->base + REG_BCTL); + } else + outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, + fd->base + REG_ICTL); + } +out: + spin_unlock_irqrestore(sh->host_lock, flags); +} + +static irqreturn_t fdomain_irq(int irq, void *dev_id) +{ + struct fdomain *fd = dev_id; + + /* Is it our IRQ? */ + if ((inb(fd->base + REG_ASTAT) & ASTAT_IRQ) == 0) + return IRQ_NONE; + + outb(0, fd->base + REG_ICTL); + + /* We usually have one spurious interrupt after each command. */ + if (!fd->cur_cmd) /* Spurious interrupt */ + return IRQ_NONE; + + schedule_work(&fd->work); + + return IRQ_HANDLED; +} + +static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd) +{ + struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd); + struct fdomain *fd = shost_priv(cmd->device->host); + unsigned long flags; + + scsi_pointer->Status = 0; + scsi_pointer->Message = 0; + scsi_pointer->have_data_in = 0; + scsi_pointer->sent_command = 0; + scsi_pointer->phase = in_arbitration; + scsi_set_resid(cmd, scsi_bufflen(cmd)); + + spin_lock_irqsave(sh->host_lock, flags); + + fd->cur_cmd = cmd; + + fdomain_make_bus_idle(fd); + + /* Start arbitration */ + outb(0, fd->base + REG_ICTL); + outb(0, fd->base + REG_BCTL); /* Disable data drivers */ + /* Set our id bit */ + outb(BIT(cmd->device->host->this_id), fd->base + REG_SCSI_DATA_NOACK); + outb(ICTL_ARB, fd->base + REG_ICTL); + /* Start arbitration */ + outb(ACTL_ARB | ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL); + + spin_unlock_irqrestore(sh->host_lock, flags); + + return 0; +} + +static int fdomain_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *sh = cmd->device->host; + struct fdomain *fd = shost_priv(sh); + unsigned long flags; + + if (!fd->cur_cmd) + return FAILED; + + spin_lock_irqsave(sh->host_lock, flags); + + fdomain_make_bus_idle(fd); + fdomain_scsi_pointer(fd->cur_cmd)->phase |= aborted; + + /* Aborts are not done well. . . */ + set_host_byte(fd->cur_cmd, DID_ABORT); + fdomain_finish_cmd(fd); + spin_unlock_irqrestore(sh->host_lock, flags); + return SUCCESS; +} + +static int fdomain_host_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *sh = cmd->device->host; + struct fdomain *fd = shost_priv(sh); + unsigned long flags; + + spin_lock_irqsave(sh->host_lock, flags); + fdomain_reset(fd->base); + spin_unlock_irqrestore(sh->host_lock, flags); + return SUCCESS; +} + +static int fdomain_biosparam(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, + int geom[]) +{ + unsigned char *p = scsi_bios_ptable(bdev); + + if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */ + && p[4]) { /* Partition type */ + geom[0] = p[5] + 1; /* heads */ + geom[1] = p[6] & 0x3f; /* sectors */ + } else { + if (capacity >= 0x7e0000) { + geom[0] = 255; /* heads */ + geom[1] = 63; /* sectors */ + } else if (capacity >= 0x200000) { + geom[0] = 128; /* heads */ + geom[1] = 63; /* sectors */ + } else { + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ + } + } + geom[2] = sector_div(capacity, geom[0] * geom[1]); + kfree(p); + + return 0; +} + +static const struct scsi_host_template fdomain_template = { + .module = THIS_MODULE, + .name = "Future Domain TMC-16x0", + .proc_name = "fdomain", + .queuecommand = fdomain_queue, + .eh_abort_handler = fdomain_abort, + .eh_host_reset_handler = fdomain_host_reset, + .bios_param = fdomain_biosparam, + .can_queue = 1, + .this_id = 7, + .sg_tablesize = 64, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct scsi_pointer), +}; + +struct Scsi_Host *fdomain_create(int base, int irq, int this_id, + struct device *dev) +{ + struct Scsi_Host *sh; + struct fdomain *fd; + enum chip_type chip; + static const char * const chip_names[] = { + "Unknown", "TMC-1800", "TMC-18C50", "TMC-18C30" + }; + unsigned long irq_flags = 0; + + chip = fdomain_identify(base); + if (!chip) + return NULL; + + fdomain_reset(base); + + if (fdomain_test_loopback(base)) + return NULL; + + if (!irq) { + dev_err(dev, "card has no IRQ assigned"); + return NULL; + } + + sh = scsi_host_alloc(&fdomain_template, sizeof(struct fdomain)); + if (!sh) + return NULL; + + if (this_id) + sh->this_id = this_id & 0x07; + + sh->irq = irq; + sh->io_port = base; + sh->n_io_port = FDOMAIN_REGION_SIZE; + + fd = shost_priv(sh); + fd->base = base; + fd->chip = chip; + INIT_WORK(&fd->work, fdomain_work); + + if (dev_is_pci(dev) || !strcmp(dev->bus->name, "pcmcia")) + irq_flags = IRQF_SHARED; + + if (request_irq(irq, fdomain_irq, irq_flags, "fdomain", fd)) + goto fail_put; + + shost_printk(KERN_INFO, sh, "%s chip at 0x%x irq %d SCSI ID %d\n", + dev_is_pci(dev) ? "TMC-36C70 (PCI bus)" : chip_names[chip], + base, irq, sh->this_id); + + if (scsi_add_host(sh, dev)) + goto fail_free_irq; + + scsi_scan_host(sh); + + return sh; + +fail_free_irq: + free_irq(irq, fd); +fail_put: + scsi_host_put(sh); + return NULL; +} +EXPORT_SYMBOL_GPL(fdomain_create); + +int fdomain_destroy(struct Scsi_Host *sh) +{ + struct fdomain *fd = shost_priv(sh); + + cancel_work_sync(&fd->work); + scsi_remove_host(sh); + if (sh->irq) + free_irq(sh->irq, fd); + scsi_host_put(sh); + return 0; +} +EXPORT_SYMBOL_GPL(fdomain_destroy); + +#ifdef CONFIG_PM_SLEEP +static int fdomain_resume(struct device *dev) +{ + struct fdomain *fd = shost_priv(dev_get_drvdata(dev)); + + fdomain_reset(fd->base); + return 0; +} + +static SIMPLE_DEV_PM_OPS(fdomain_pm_ops, NULL, fdomain_resume); +#endif /* CONFIG_PM_SLEEP */ + +MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith"); +MODULE_DESCRIPTION("Future Domain TMC-16x0/TMC-3260 SCSI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h new file mode 100644 index 000000000..93afcee20 --- /dev/null +++ b/drivers/scsi/fdomain.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define FDOMAIN_REGION_SIZE 0x10 +#define FDOMAIN_BIOS_SIZE 0x2000 + +enum { + in_arbitration = 0x02, + in_selection = 0x04, + in_other = 0x08, + disconnect = 0x10, + aborted = 0x20, + sent_ident = 0x40, +}; + +/* (@) = not present on TMC1800, (#) = not present on TMC1800 and TMC18C50 */ +#define REG_SCSI_DATA 0 /* R/W: SCSI Data (with ACK) */ +#define REG_BSTAT 1 /* R: SCSI Bus Status */ +#define BSTAT_BSY BIT(0) /* Busy */ +#define BSTAT_MSG BIT(1) /* Message */ +#define BSTAT_IO BIT(2) /* Input/Output */ +#define BSTAT_CMD BIT(3) /* Command/Data */ +#define BSTAT_REQ BIT(4) /* Request and Not Ack */ +#define BSTAT_SEL BIT(5) /* Select */ +#define BSTAT_ACK BIT(6) /* Acknowledge and Request */ +#define BSTAT_ATN BIT(7) /* Attention */ +#define REG_BCTL 1 /* W: SCSI Bus Control */ +#define BCTL_RST BIT(0) /* Bus Reset */ +#define BCTL_SEL BIT(1) /* Select */ +#define BCTL_BSY BIT(2) /* Busy */ +#define BCTL_ATN BIT(3) /* Attention */ +#define BCTL_IO BIT(4) /* Input/Output */ +#define BCTL_CMD BIT(5) /* Command/Data */ +#define BCTL_MSG BIT(6) /* Message */ +#define BCTL_BUSEN BIT(7) /* Enable bus drivers */ +#define REG_ASTAT 2 /* R: Adapter Status 1 */ +#define ASTAT_IRQ BIT(0) /* Interrupt active */ +#define ASTAT_ARB BIT(1) /* Arbitration complete */ +#define ASTAT_PARERR BIT(2) /* Parity error */ +#define ASTAT_RST BIT(3) /* SCSI reset occurred */ +#define ASTAT_FIFODIR BIT(4) /* FIFO direction */ +#define ASTAT_FIFOEN BIT(5) /* FIFO enabled */ +#define ASTAT_PAREN BIT(6) /* Parity enabled */ +#define ASTAT_BUSEN BIT(7) /* Bus drivers enabled */ +#define REG_ICTL 2 /* W: Interrupt Control */ +#define ICTL_FIFO_MASK 0x0f /* FIFO threshold, 1/16 FIFO size */ +#define ICTL_FIFO BIT(4) /* Int. on FIFO count */ +#define ICTL_ARB BIT(5) /* Int. on Arbitration complete */ +#define ICTL_SEL BIT(6) /* Int. on SCSI Select */ +#define ICTL_REQ BIT(7) /* Int. on SCSI Request */ +#define REG_FSTAT 3 /* R: Adapter Status 2 (FIFO) - (@) */ +#define FSTAT_ONOTEMPTY BIT(0) /* Output FIFO not empty */ +#define FSTAT_INOTEMPTY BIT(1) /* Input FIFO not empty */ +#define FSTAT_NOTEMPTY BIT(2) /* Main FIFO not empty */ +#define FSTAT_NOTFULL BIT(3) /* Main FIFO not full */ +#define REG_MCTL 3 /* W: SCSI Data Mode Control */ +#define MCTL_ACK_MASK 0x0f /* Acknowledge period */ +#define MCTL_ACTDEASS BIT(4) /* Active deassert of REQ and ACK */ +#define MCTL_TARGET BIT(5) /* Enable target mode */ +#define MCTL_FASTSYNC BIT(6) /* Enable Fast Synchronous */ +#define MCTL_SYNC BIT(7) /* Enable Synchronous */ +#define REG_INTCOND 4 /* R: Interrupt Condition - (@) */ +#define IRQ_FIFO BIT(1) /* FIFO interrupt */ +#define IRQ_REQ BIT(2) /* SCSI Request interrupt */ +#define IRQ_SEL BIT(3) /* SCSI Select interrupt */ +#define IRQ_ARB BIT(4) /* SCSI Arbitration interrupt */ +#define IRQ_RST BIT(5) /* SCSI Reset interrupt */ +#define IRQ_FORCED BIT(6) /* Forced interrupt */ +#define IRQ_TIMEOUT BIT(7) /* Bus timeout */ +#define REG_ACTL 4 /* W: Adapter Control 1 */ +#define ACTL_RESET BIT(0) /* Reset FIFO, parity, reset int. */ +#define ACTL_FIRQ BIT(1) /* Set Forced interrupt */ +#define ACTL_ARB BIT(2) /* Initiate Bus Arbitration */ +#define ACTL_PAREN BIT(3) /* Enable SCSI Parity */ +#define ACTL_IRQEN BIT(4) /* Enable interrupts */ +#define ACTL_CLRFIRQ BIT(5) /* Clear Forced interrupt */ +#define ACTL_FIFOWR BIT(6) /* FIFO Direction (1=write) */ +#define ACTL_FIFOEN BIT(7) /* Enable FIFO */ +#define REG_ID_LSB 5 /* R: ID Code (LSB) */ +#define REG_ACTL2 5 /* Adapter Control 2 - (@) */ +#define ACTL2_RAMOVRLY BIT(0) /* Enable RAM overlay */ +#define ACTL2_SLEEP BIT(7) /* Sleep mode */ +#define REG_ID_MSB 6 /* R: ID Code (MSB) */ +#define REG_LOOPBACK 7 /* R/W: Loopback */ +#define REG_SCSI_DATA_NOACK 8 /* R/W: SCSI Data (no ACK) */ +#define REG_ASTAT3 9 /* R: Adapter Status 3 */ +#define ASTAT3_ACTDEASS BIT(0) /* Active deassert enabled */ +#define ASTAT3_RAMOVRLY BIT(1) /* RAM overlay enabled */ +#define ASTAT3_TARGERR BIT(2) /* Target error */ +#define ASTAT3_IRQEN BIT(3) /* Interrupts enabled */ +#define ASTAT3_IRQMASK 0xf0 /* Enabled interrupts mask */ +#define REG_CFG1 10 /* R: Configuration Register 1 */ +#define CFG1_BUS BIT(0) /* 0 = ISA */ +#define CFG1_IRQ_MASK 0x0e /* IRQ jumpers */ +#define CFG1_IO_MASK 0x30 /* I/O base jumpers */ +#define CFG1_BIOS_MASK 0xc0 /* BIOS base jumpers */ +#define REG_CFG2 11 /* R/W: Configuration Register 2 (@) */ +#define CFG2_ROMDIS BIT(0) /* ROM disabled */ +#define CFG2_RAMDIS BIT(1) /* RAM disabled */ +#define CFG2_IRQEDGE BIT(2) /* Edge-triggered interrupts */ +#define CFG2_NOWS BIT(3) /* No wait states */ +#define CFG2_32BIT BIT(7) /* 32-bit mode */ +#define REG_FIFO 12 /* R/W: FIFO */ +#define REG_FIFO_COUNT 14 /* R: FIFO Data Count */ + +#ifdef CONFIG_PM_SLEEP +static const struct dev_pm_ops __maybe_unused fdomain_pm_ops; +#define FDOMAIN_PM_OPS (&fdomain_pm_ops) +#else +#define FDOMAIN_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +struct Scsi_Host *fdomain_create(int base, int irq, int this_id, + struct device *dev); +int fdomain_destroy(struct Scsi_Host *sh); diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c new file mode 100644 index 000000000..2b4280a43 --- /dev/null +++ b/drivers/scsi/fdomain_isa.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include "fdomain.h" + +#define MAXBOARDS_PARAM 4 +static int io[MAXBOARDS_PARAM] = { 0, 0, 0, 0 }; +module_param_hw_array(io, int, ioport, NULL, 0); +MODULE_PARM_DESC(io, "base I/O address of controller (0x140, 0x150, 0x160, 0x170)"); + +static int irq[MAXBOARDS_PARAM] = { 0, 0, 0, 0 }; +module_param_hw_array(irq, int, irq, NULL, 0); +MODULE_PARM_DESC(irq, "IRQ of controller (0=auto [default])"); + +static int scsi_id[MAXBOARDS_PARAM] = { 0, 0, 0, 0 }; +module_param_hw_array(scsi_id, int, other, NULL, 0); +MODULE_PARM_DESC(scsi_id, "SCSI ID of controller (default = 7)"); + +static unsigned long addresses[] = { + 0xc8000, + 0xca000, + 0xce000, + 0xde000, +}; +#define ADDRESS_COUNT ARRAY_SIZE(addresses) + +static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 }; +#define PORT_COUNT ARRAY_SIZE(ports) + +static unsigned short irqs[] = { 3, 5, 10, 11, 12, 14, 15, 0 }; + +/* This driver works *ONLY* for Future Domain cards using the TMC-1800, + * TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670, + * and 1680. These are all 16-bit cards. + * BIOS versions prior to 3.2 assigned SCSI ID 6 to SCSI adapter. + * + * The following BIOS signature signatures are for boards which do *NOT* + * work with this driver (these TMC-8xx and TMC-9xx boards may work with the + * Seagate driver): + * + * FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88 + * FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89 + * FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89 + * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90 + * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90 + * FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90 + * FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92 + * + * (The cards which do *NOT* work are all 8-bit cards -- although some of + * them have a 16-bit form-factor, the upper 8-bits are used only for IRQs + * and are *NOT* used for data. You can tell the difference by following + * the tracings on the circuit board -- if only the IRQ lines are involved, + * you have a "8-bit" card, and should *NOT* use this driver.) + */ + +static struct signature { + const char *signature; + int offset; + int length; + int this_id; + int base_offset; +} signatures[] = { +/* 1 2 3 4 5 6 */ +/* 123456789012345678901234567890123456789012345678901234567890 */ +{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 6, 0x1fcc }, +{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 6, 0x1fcc }, +{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 6, 0x1fa2 }, +{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 6, 0x1fa2 }, +{ "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 6, 0x1fa3 }, +{ "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 6, 0 }, +{ "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 7, 0 }, +{ "IBM F1 P2 BIOS v1.0011/09/92", 5, 28, 7, 0x1ff3 }, +{ "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 7, 0 }, +{ "Future Domain Corp. V1.0008/18/93", 5, 33, 7, 0 }, +{ "Future Domain Corp. V2.0108/18/93", 5, 33, 7, 0 }, +{ "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 7, 0 }, +{ "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 7, 0 }, +{ "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 7, 0 }, +{ "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 7, 0 }, +}; +#define SIGNATURE_COUNT ARRAY_SIZE(signatures) + +static int fdomain_isa_match(struct device *dev, unsigned int ndev) +{ + struct Scsi_Host *sh; + int i, base = 0, irq = 0; + unsigned long bios_base = 0; + struct signature *sig = NULL; + void __iomem *p; + static struct signature *saved_sig; + int this_id = 7; + + if (ndev < ADDRESS_COUNT) { /* scan supported ISA BIOS addresses */ + p = ioremap(addresses[ndev], FDOMAIN_BIOS_SIZE); + if (!p) + return 0; + for (i = 0; i < SIGNATURE_COUNT; i++) + if (check_signature(p + signatures[i].offset, + signatures[i].signature, + signatures[i].length)) + break; + if (i == SIGNATURE_COUNT) /* no signature found */ + goto fail_unmap; + sig = &signatures[i]; + bios_base = addresses[ndev]; + /* read I/O base from BIOS area */ + if (sig->base_offset) + base = readb(p + sig->base_offset) + + (readb(p + sig->base_offset + 1) << 8); + iounmap(p); + if (base) { + dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n", + bios_base, base); + } else { /* no I/O base in BIOS area */ + dev_info(dev, "BIOS at 0x%lx\n", bios_base); + /* save BIOS signature for later use in port probing */ + saved_sig = sig; + return 0; + } + } else /* scan supported I/O ports */ + base = ports[ndev - ADDRESS_COUNT]; + + /* use saved BIOS signature if present */ + if (!sig && saved_sig) + sig = saved_sig; + + if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa")) + return 0; + + irq = irqs[(inb(base + REG_CFG1) & CFG1_IRQ_MASK) >> 1]; + + if (sig) + this_id = sig->this_id; + + sh = fdomain_create(base, irq, this_id, dev); + if (!sh) { + release_region(base, FDOMAIN_REGION_SIZE); + return 0; + } + + dev_set_drvdata(dev, sh); + return 1; +fail_unmap: + iounmap(p); + return 0; +} + +static int fdomain_isa_param_match(struct device *dev, unsigned int ndev) +{ + struct Scsi_Host *sh; + int irq_ = irq[ndev]; + + if (!io[ndev]) + return 0; + + if (!request_region(io[ndev], FDOMAIN_REGION_SIZE, "fdomain_isa")) { + dev_err(dev, "base 0x%x already in use", io[ndev]); + return 0; + } + + if (irq_ <= 0) + irq_ = irqs[(inb(io[ndev] + REG_CFG1) & CFG1_IRQ_MASK) >> 1]; + + sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev); + if (!sh) { + dev_err(dev, "controller not found at base 0x%x", io[ndev]); + release_region(io[ndev], FDOMAIN_REGION_SIZE); + return 0; + } + + dev_set_drvdata(dev, sh); + return 1; +} + +static void fdomain_isa_remove(struct device *dev, unsigned int ndev) +{ + struct Scsi_Host *sh = dev_get_drvdata(dev); + int base = sh->io_port; + + fdomain_destroy(sh); + release_region(base, FDOMAIN_REGION_SIZE); + dev_set_drvdata(dev, NULL); +} + +static struct isa_driver fdomain_isa_driver = { + .match = fdomain_isa_match, + .remove = fdomain_isa_remove, + .driver = { + .name = "fdomain_isa", + .pm = FDOMAIN_PM_OPS, + }, +}; + +static int __init fdomain_isa_init(void) +{ + int isa_probe_count = ADDRESS_COUNT + PORT_COUNT; + + if (io[0]) { /* use module parameters if present */ + fdomain_isa_driver.match = fdomain_isa_param_match; + isa_probe_count = MAXBOARDS_PARAM; + } + + return isa_register_driver(&fdomain_isa_driver, isa_probe_count); +} + +static void __exit fdomain_isa_exit(void) +{ + isa_unregister_driver(&fdomain_isa_driver); +} + +module_init(fdomain_isa_init); +module_exit(fdomain_isa_exit); + +MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith"); +MODULE_DESCRIPTION("Future Domain TMC-16x0 ISA SCSI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/fdomain_pci.c b/drivers/scsi/fdomain_pci.c new file mode 100644 index 000000000..3e05ce7b8 --- /dev/null +++ b/drivers/scsi/fdomain_pci.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "fdomain.h" + +static int fdomain_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *d) +{ + int err; + struct Scsi_Host *sh; + + err = pci_enable_device(pdev); + if (err) + goto fail; + + err = pci_request_regions(pdev, "fdomain_pci"); + if (err) + goto disable_device; + + err = -ENODEV; + if (pci_resource_len(pdev, 0) == 0) + goto release_region; + + sh = fdomain_create(pci_resource_start(pdev, 0), pdev->irq, 7, + &pdev->dev); + if (!sh) + goto release_region; + + pci_set_drvdata(pdev, sh); + return 0; + +release_region: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +fail: + return err; +} + +static void fdomain_pci_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *sh = pci_get_drvdata(pdev); + + fdomain_destroy(sh); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_device_id fdomain_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) }, + {} +}; +MODULE_DEVICE_TABLE(pci, fdomain_pci_table); + +static struct pci_driver fdomain_pci_driver = { + .name = "fdomain_pci", + .id_table = fdomain_pci_table, + .probe = fdomain_pci_probe, + .remove = fdomain_pci_remove, + .driver.pm = FDOMAIN_PM_OPS, +}; + +module_pci_driver(fdomain_pci_driver); + +MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith"); +MODULE_DESCRIPTION("Future Domain TMC-3260 PCI SCSI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile new file mode 100644 index 000000000..6214a6b2e --- /dev/null +++ b/drivers/scsi/fnic/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FCOE_FNIC) += fnic.o + +fnic-y := \ + fnic_attrs.o \ + fnic_isr.o \ + fnic_main.o \ + fnic_res.o \ + fnic_fcs.o \ + fnic_scsi.o \ + fnic_trace.o \ + fnic_debugfs.o \ + vnic_cq.o \ + vnic_dev.o \ + vnic_intr.o \ + vnic_rq.o \ + vnic_wq_copy.o \ + vnic_wq.o diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h new file mode 100644 index 000000000..0eb4ba277 --- /dev/null +++ b/drivers/scsi/fnic/cq_desc.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _CQ_DESC_H_ +#define _CQ_DESC_H_ + +/* + * Completion queue descriptor types + */ +enum cq_desc_types { + CQ_DESC_TYPE_WQ_ENET = 0, + CQ_DESC_TYPE_DESC_COPY = 1, + CQ_DESC_TYPE_WQ_EXCH = 2, + CQ_DESC_TYPE_RQ_ENET = 3, + CQ_DESC_TYPE_RQ_FCP = 4, +}; + +/* Completion queue descriptor: 16B + * + * All completion queues have this basic layout. The + * type_specfic area is unique for each completion + * queue type. + */ +struct cq_desc { + __le16 completed_index; + __le16 q_number; + u8 type_specfic[11]; + u8 type_color; +}; + +#define CQ_DESC_TYPE_BITS 4 +#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) +#define CQ_DESC_COLOR_MASK 1 +#define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_Q_NUM_BITS 10 +#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) +#define CQ_DESC_COMP_NDX_BITS 12 +#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) + +static inline void cq_desc_dec(const struct cq_desc *desc_arg, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +#endif /* _CQ_DESC_H_ */ diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h new file mode 100644 index 000000000..b6113291c --- /dev/null +++ b/drivers/scsi/fnic/cq_enet_desc.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _CQ_ENET_DESC_H_ +#define _CQ_ENET_DESC_H_ + +#include "cq_desc.h" + +/* Ethernet completion queue descriptor: 16B */ +struct cq_enet_wq_desc { + __le16 completed_index; + __le16 q_number; + u8 reserved[11]; + u8 type_color; +}; + +static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +/* Completion queue descriptor: Ethernet receive queue, 16B */ +struct cq_enet_rq_desc { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 type_color; +}; + +#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) +#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) +#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15) + +#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \ + ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1) +#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6 + +#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14) + +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14 +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \ + ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1) +#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) + +#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4 +#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8 + +#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1) +#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1) +#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3) +#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) +#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) + +static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index, + u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, + u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, + u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof, + u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, + u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, + u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) +{ + u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); + u16 q_number_rss_type_flags = + le16_to_cpu(desc->q_number_rss_type_flags); + u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); + + *ingress_port = (completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; + *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? + 1 : 0; + *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? + 1 : 0; + *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? + 1 : 0; + + *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & + CQ_ENET_RQ_DESC_RSS_TYPE_MASK); + *csum_not_calc = (q_number_rss_type_flags & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; + + *rss_hash = le32_to_cpu(desc->rss_hash); + + *bytes_written = bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_error = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; + *vlan_stripped = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; + + *vlan = le16_to_cpu(desc->vlan); + + if (*fcoe) { + *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & + CQ_ENET_RQ_DESC_FCOE_SOF_MASK); + *fcoe_fc_crc_ok = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; + *fcoe_enc_error = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; + *fcoe_eof = (u8)((desc->checksum_fcoe >> + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & + CQ_ENET_RQ_DESC_FCOE_EOF_MASK); + *checksum = 0; + } else { + *fcoe_sof = 0; + *fcoe_fc_crc_ok = 0; + *fcoe_enc_error = 0; + *fcoe_eof = 0; + *checksum = le16_to_cpu(desc->checksum_fcoe); + } + + *tcp_udp_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; + *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; + *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; + *ipv4_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; + *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; + *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; + *ipv4_fragment = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; + *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; +} + +#endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h new file mode 100644 index 000000000..4d94329c8 --- /dev/null +++ b/drivers/scsi/fnic/cq_exch_desc.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _CQ_EXCH_DESC_H_ +#define _CQ_EXCH_DESC_H_ + +#include "cq_desc.h" + +/* Exchange completion queue descriptor: 16B */ +struct cq_exch_wq_desc { + u16 completed_index; + u16 q_number; + u16 exchange_id; + u8 tmpl; + u8 reserved0; + u32 reserved1; + u8 exch_status; + u8 reserved2[2]; + u8 type_color; +}; + +#define CQ_EXCH_WQ_STATUS_BITS 2 +#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1) + +enum cq_exch_status_types { + CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0, + CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1, + CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2, + CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3, +}; + +static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr, + u8 *type, + u8 *color, + u16 *q_number, + u16 *completed_index, + u8 *exch_status) +{ + cq_desc_dec((struct cq_desc *)desc_ptr, type, + color, q_number, completed_index); + *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK; +} + +struct cq_fcp_rq_desc { + u16 completed_index_eop_sop_prt; + u16 q_number; + u16 exchange_id; + u16 tmpl; + u16 bytes_written; + u16 vlan; + u8 sof; + u8 eof; + u8 fcs_fer_fck; + u8 type_color; +}; + +#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15) +#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14) +#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12) +#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f +#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff +#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14 +#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT) +#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15 +#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT) +#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1 +#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1 +#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT) +#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7 +#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT) + +static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr, + u8 *type, + u8 *color, + u16 *q_number, + u16 *completed_index, + u8 *eop, + u8 *sop, + u8 *fck, + u16 *exchange_id, + u16 *tmpl, + u32 *bytes_written, + u8 *sof, + u8 *eof, + u8 *ingress_port, + u8 *packet_err, + u8 *fcoe_err, + u8 *fcs_ok, + u8 *vlan_stripped, + u16 *vlan) +{ + cq_desc_dec((struct cq_desc *)desc_ptr, type, + color, q_number, completed_index); + *eop = (desc_ptr->completed_index_eop_sop_prt & + CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0; + *sop = (desc_ptr->completed_index_eop_sop_prt & + CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0; + *ingress_port = + (desc_ptr->completed_index_eop_sop_prt & + CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0; + *exchange_id = desc_ptr->exchange_id; + *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK; + *bytes_written = + desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_err = + (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >> + CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT; + *vlan_stripped = + (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >> + CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT; + *vlan = desc_ptr->vlan; + *sof = desc_ptr->sof; + *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK; + *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >> + CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT; + *eof = desc_ptr->eof; + *fcs_ok = + (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >> + CQ_FCP_RQ_DESC_FCS_OK_SHIFT; +} + +struct cq_sgl_desc { + u16 exchange_id; + u16 q_number; + u32 active_burst_offset; + u32 tot_data_bytes; + u16 tmpl; + u8 sgl_err; + u8 type_color; +}; + +enum cq_sgl_err_types { + CQ_SGL_ERR_NO_ERROR = 0, + CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */ + CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/ + CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */ + CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */ + CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */ + CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */ + CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */ + CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */ + CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */ +}; + +#define CQ_SGL_SGL_ERR_MASK 0x1f +#define CQ_SGL_TMPL_MASK 0x1f + +static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr, + u8 *type, + u8 *color, + u16 *q_number, + u16 *exchange_id, + u32 *active_burst_offset, + u32 *tot_data_bytes, + u16 *tmpl, + u8 *sgl_err) +{ + /* Cheat a little by assuming exchange_id is the same as completed + index */ + cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number, + exchange_id); + *active_burst_offset = desc_ptr->active_burst_offset; + *tot_data_bytes = desc_ptr->tot_data_bytes; + *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK; + *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK; +} + +#endif /* _CQ_EXCH_DESC_H_ */ diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h new file mode 100644 index 000000000..54a0b2ba8 --- /dev/null +++ b/drivers/scsi/fnic/fcpio.h @@ -0,0 +1,768 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _FCPIO_H_ +#define _FCPIO_H_ + +#include + +/* + * This header file includes all of the data structures used for + * communication by the host driver to the fcp firmware. + */ + +/* + * Exchange and sequence id space allocated to the host driver + */ +#define FCPIO_HOST_EXCH_RANGE_START 0x1000 +#define FCPIO_HOST_EXCH_RANGE_END 0x1fff +#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80 +#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff + +/* + * Command entry type + */ +enum fcpio_type { + /* + * Initiator request types + */ + FCPIO_ICMND_16 = 0x1, + FCPIO_ICMND_32, + FCPIO_ICMND_CMPL, + FCPIO_ITMF, + FCPIO_ITMF_CMPL, + + /* + * Target request types + */ + FCPIO_TCMND_16 = 0x11, + FCPIO_TCMND_32, + FCPIO_TDATA, + FCPIO_TXRDY, + FCPIO_TRSP, + FCPIO_TDRSP_CMPL, + FCPIO_TTMF, + FCPIO_TTMF_ACK, + FCPIO_TABORT, + FCPIO_TABORT_CMPL, + + /* + * Misc request types + */ + FCPIO_ACK = 0x20, + FCPIO_RESET, + FCPIO_RESET_CMPL, + FCPIO_FLOGI_REG, + FCPIO_FLOGI_REG_CMPL, + FCPIO_ECHO, + FCPIO_ECHO_CMPL, + FCPIO_LUNMAP_CHNG, + FCPIO_LUNMAP_REQ, + FCPIO_LUNMAP_REQ_CMPL, + FCPIO_FLOGI_FIP_REG, + FCPIO_FLOGI_FIP_REG_CMPL, +}; + +/* + * Header status codes from the firmware + */ +enum fcpio_status { + FCPIO_SUCCESS = 0, /* request was successful */ + + /* + * If a request to the firmware is rejected, the original request + * header will be returned with the status set to one of the following: + */ + FCPIO_INVALID_HEADER, /* header contains invalid data */ + FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */ + FCPIO_INVALID_PARAM, /* some parameter in request is invalid */ + FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */ + FCPIO_IO_NOT_FOUND, /* requested I/O was not found */ + + /* + * Once a request is processed, the firmware will usually return + * a cmpl message type. In cases where errors occurred, + * the header status field would be filled in with one of the following: + */ + FCPIO_ABORTED = 0x41, /* request was aborted */ + FCPIO_TIMEOUT, /* request was timed out */ + FCPIO_SGL_INVALID, /* request was aborted due to sgl error */ + FCPIO_MSS_INVALID, /* request was aborted due to mss error */ + FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */ + FCPIO_FW_ERR, /* request was terminated due to fw error */ + FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */ + FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */ + FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */ + FCPIO_CMND_REJECTED, /* request was invalid and rejected */ + FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */ + FCPIO_PATH_FAILED, /* i/o sent to current path failed */ + FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */ +}; + +/* + * The header command tag. All host requests will use the "tag" field + * to mark commands with a unique tag. When the firmware responds to + * a host request, it will copy the tag field into the response. + * + * The only firmware requests that will use the rx_id/ox_id fields instead + * of the tag field will be the target command and target task management + * requests. These two requests do not have corresponding host requests + * since they come directly from the FC initiator on the network. + */ +struct fcpio_tag { + union { + u32 req_id; + struct { + u16 rx_id; + u16 ox_id; + } ex_id; + } u; +}; + +static inline void +fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id) +{ + tag->u.req_id = id; +} + +static inline void +fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id) +{ + *id = tag->u.req_id; +} + +static inline void +fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id) +{ + tag->u.ex_id.rx_id = rx_id; + tag->u.ex_id.ox_id = ox_id; +} + +static inline void +fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id) +{ + *rx_id = tag->u.ex_id.rx_id; + *ox_id = tag->u.ex_id.ox_id; +} + +/* + * The header for an fcpio request, whether from the firmware or from the + * host driver + */ +struct fcpio_header { + u8 type; /* enum fcpio_type */ + u8 status; /* header status entry */ + u16 _resvd; /* reserved */ + struct fcpio_tag tag; /* header tag */ +}; + +static inline void +fcpio_header_enc(struct fcpio_header *hdr, + u8 type, u8 status, + struct fcpio_tag tag) +{ + hdr->type = type; + hdr->status = status; + hdr->_resvd = 0; + hdr->tag = tag; +} + +static inline void +fcpio_header_dec(struct fcpio_header *hdr, + u8 *type, u8 *status, + struct fcpio_tag *tag) +{ + *type = hdr->type; + *status = hdr->status; + *tag = hdr->tag; +} + +#define CDB_16 16 +#define CDB_32 32 +#define LUN_ADDRESS 8 + +/* + * fcpio_icmnd_16: host -> firmware request + * + * used for sending out an initiator SCSI 16-byte command + */ +struct fcpio_icmnd_16 { + u32 lunmap_id; /* index into lunmap table */ + u8 special_req_flags; /* special exchange request flags */ + u8 _resvd0[3]; /* reserved */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 sense_len; /* sense buffer length */ + u64 sgl_addr; /* scatter-gather list addr */ + u64 sense_addr; /* sense buffer address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd1; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 _resvd2; /* reserved */ + u8 d_id[3]; /* FC vNIC only: Target D_ID */ + u16 mss; /* FC vNIC only: max burst */ + u16 _resvd3; /* reserved */ + u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */ + u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */ +}; + +/* + * Special request flags + */ +#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */ + +/* + * Priority/Task Attribute settings + */ +#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */ +#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */ +#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */ +#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */ +#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */ + +/* + * Command flags + */ +#define FCPIO_ICMND_RDDATA 0x02 /* read data */ +#define FCPIO_ICMND_WRDATA 0x01 /* write data */ + +/* + * fcpio_icmnd_32: host -> firmware request + * + * used for sending out an initiator SCSI 32-byte command + */ +struct fcpio_icmnd_32 { + u32 lunmap_id; /* index into lunmap table */ + u8 special_req_flags; /* special exchange request flags */ + u8 _resvd0[3]; /* reserved */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 sense_len; /* sense buffer length */ + u64 sgl_addr; /* scatter-gather list addr */ + u64 sense_addr; /* sense buffer address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd1; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 _resvd2; /* reserved */ + u8 d_id[3]; /* FC vNIC only: Target D_ID */ + u16 mss; /* FC vNIC only: max burst */ + u16 _resvd3; /* reserved */ + u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */ + u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */ +}; + +/* + * fcpio_itmf: host -> firmware request + * + * used for requesting the firmware to abort a request and/or send out + * a task management function + * + * The t_tag field is only needed when the request type is ABT_TASK. + */ +struct fcpio_itmf { + u32 lunmap_id; /* index into lunmap table */ + u32 tm_req; /* SCSI Task Management request */ + u32 t_tag; /* header tag of fcpio to be aborted */ + u32 _resvd; /* _reserved */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 _resvd1; /* reserved */ + u8 d_id[3]; /* FC vNIC only: Target D_ID */ + u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */ + u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */ +}; + +/* + * Task Management request + */ +enum fcpio_itmf_tm_req_type { + FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */ + FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */ + FCPIO_ITMF_ABT_TASK_SET, /* abort task set */ + FCPIO_ITMF_CLR_TASK_SET, /* clear task set */ + FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */ + FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */ +}; + +/* + * fcpio_tdata: host -> firmware request + * + * used for requesting the firmware to send out a read data transfer for a + * target command + */ +struct fcpio_tdata { + u16 rx_id; /* FC rx_id of target command */ + u16 flags; /* command flags */ + u32 rel_offset; /* data sequence relative offset */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 data_len; /* length of data expected to send */ + u64 sgl_addr; /* scatter-gather list address */ +}; + +/* + * Command flags + */ +#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */ + +/* + * fcpio_txrdy: host -> firmware request + * + * used for requesting the firmware to send out a write data transfer for a + * target command + */ +struct fcpio_txrdy { + u16 rx_id; /* FC rx_id of target command */ + u16 _resvd0; /* reserved */ + u32 rel_offset; /* data sequence relative offset */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 data_len; /* length of data expected to send */ + u64 sgl_addr; /* scatter-gather list address */ +}; + +/* + * fcpio_trsp: host -> firmware request + * + * used for requesting the firmware to send out a response for a target + * command + */ +struct fcpio_trsp { + u16 rx_id; /* FC rx_id of target command */ + u16 _resvd0; /* reserved */ + u32 sense_len; /* sense data buffer length */ + u64 sense_addr; /* sense data buffer address */ + u16 _resvd1; /* reserved */ + u8 flags; /* response request flags */ + u8 scsi_status; /* SCSI status */ + u32 residual; /* SCSI data residual value of I/O */ +}; + +/* + * resposnse request flags + */ +#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */ +#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */ + +/* + * fcpio_ttmf_ack: host -> firmware response + * + * used by the host to indicate to the firmware it has received and processed + * the target tmf request + */ +struct fcpio_ttmf_ack { + u16 rx_id; /* FC rx_id of target command */ + u16 _resvd0; /* reserved */ + u32 tmf_status; /* SCSI task management status */ +}; + +/* + * fcpio_tabort: host -> firmware request + * + * used by the host to request the firmware to abort a target request that was + * received by the firmware + */ +struct fcpio_tabort { + u16 rx_id; /* rx_id of the target request */ +}; + +/* + * fcpio_reset: host -> firmware request + * + * used by the host to signal a reset of the driver to the firmware + * and to request firmware to clean up all outstanding I/O + */ +struct fcpio_reset { + u32 _resvd; +}; + +enum fcpio_flogi_reg_format_type { + FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */ + FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */ +}; + +/* + * fcpio_flogi_reg: host -> firmware request + * + * fc vnic only + * used by the host to notify the firmware of the lif's s_id + * and destination mac address format + */ +struct fcpio_flogi_reg { + u8 format; + u8 s_id[3]; /* FC vNIC only: Source S_ID */ + u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */ + u16 _resvd; + u32 r_a_tov; /* R_A_TOV in msec */ + u32 e_d_tov; /* E_D_TOV in msec */ +}; + +/* + * fcpio_echo: host -> firmware request + * + * sends a heartbeat echo request to the firmware + */ +struct fcpio_echo { + u32 _resvd; +}; + +/* + * fcpio_lunmap_req: host -> firmware request + * + * scsi vnic only + * sends a request to retrieve the lunmap table for scsi vnics + */ +struct fcpio_lunmap_req { + u64 addr; /* address of the buffer */ + u32 len; /* len of the buffer */ +}; + +/* + * fcpio_flogi_fip_reg: host -> firmware request + * + * fc vnic only + * used by the host to notify the firmware of the lif's s_id + * and destination mac address format + */ +struct fcpio_flogi_fip_reg { + u8 _resvd0; + u8 s_id[3]; /* FC vNIC only: Source S_ID */ + u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */ + u16 _resvd1; + u32 r_a_tov; /* R_A_TOV in msec */ + u32 e_d_tov; /* E_D_TOV in msec */ + u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */ + u16 _resvd2; +}; + +/* + * Basic structure for all fcpio structures that are sent from the host to the + * firmware. They are 128 bytes per structure. + */ +#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */ + +struct fcpio_host_req { + struct fcpio_header hdr; + + union { + /* + * Defines space needed for request + */ + u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)]; + + /* + * Initiator host requests + */ + struct fcpio_icmnd_16 icmnd_16; + struct fcpio_icmnd_32 icmnd_32; + struct fcpio_itmf itmf; + + /* + * Target host requests + */ + struct fcpio_tdata tdata; + struct fcpio_txrdy txrdy; + struct fcpio_trsp trsp; + struct fcpio_ttmf_ack ttmf_ack; + struct fcpio_tabort tabort; + + /* + * Misc requests + */ + struct fcpio_reset reset; + struct fcpio_flogi_reg flogi_reg; + struct fcpio_echo echo; + struct fcpio_lunmap_req lunmap_req; + struct fcpio_flogi_fip_reg flogi_fip_reg; + } u; +}; + +/* + * fcpio_icmnd_cmpl: firmware -> host response + * + * used for sending the host a response to an initiator command + */ +struct fcpio_icmnd_cmpl { + u8 _resvd0[6]; /* reserved */ + u8 flags; /* response flags */ + u8 scsi_status; /* SCSI status */ + u32 residual; /* SCSI data residual length */ + u32 sense_len; /* SCSI sense length */ +}; + +/* + * response flags + */ +#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */ +#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */ + +/* + * fcpio_itmf_cmpl: firmware -> host response + * + * used for sending the host a response for a itmf request + */ +struct fcpio_itmf_cmpl { + u32 _resvd; /* reserved */ +}; + +/* + * fcpio_tcmnd_16: firmware -> host request + * + * used by the firmware to notify the host of an incoming target SCSI 16-Byte + * request + */ +struct fcpio_tcmnd_16 { + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd2; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 _resvd1; /* reserved */ + u8 s_id[3]; /* FC vNIC only: Source S_ID */ +}; + +/* + * Priority/Task Attribute settings + */ +#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */ +#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */ +#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */ +#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */ +#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */ + +/* + * Command flags + */ +#define FCPIO_TCMND_RDDATA 0x02 /* read data */ +#define FCPIO_TCMND_WRDATA 0x01 /* write data */ + +/* + * fcpio_tcmnd_32: firmware -> host request + * + * used by the firmware to notify the host of an incoming target SCSI 32-Byte + * request + */ +struct fcpio_tcmnd_32 { + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd2; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 _resvd0; /* reserved */ + u8 s_id[3]; /* FC vNIC only: Source S_ID */ +}; + +/* + * fcpio_tdrsp_cmpl: firmware -> host response + * + * used by the firmware to notify the host of a response to a host target + * command + */ +struct fcpio_tdrsp_cmpl { + u16 rx_id; /* rx_id of the target request */ + u16 _resvd0; /* reserved */ +}; + +/* + * fcpio_ttmf: firmware -> host request + * + * used by the firmware to notify the host of an incoming task management + * function request + */ +struct fcpio_ttmf { + u8 _resvd0; /* reserved */ + u8 s_id[3]; /* FC vNIC only: Source S_ID */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 crn; /* SCSI Command Reference No. */ + u8 _resvd2[3]; /* reserved */ + u32 tmf_type; /* task management request type */ +}; + +/* + * Task Management request + */ +#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */ +#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */ +#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */ +#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */ +#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */ + +/* + * fcpio_tabort_cmpl: firmware -> host response + * + * used by the firmware to respond to a host's tabort request + */ +struct fcpio_tabort_cmpl { + u16 rx_id; /* rx_id of the target request */ + u16 _resvd0; /* reserved */ +}; + +/* + * fcpio_ack: firmware -> host response + * + * used by firmware to notify the host of the last work request received + */ +struct fcpio_ack { + u16 request_out; /* last host entry received */ + u16 _resvd; +}; + +/* + * fcpio_reset_cmpl: firmware -> host response + * + * use by firmware to respond to the host's reset request + */ +struct fcpio_reset_cmpl { + u16 vnic_id; +}; + +/* + * fcpio_flogi_reg_cmpl: firmware -> host response + * + * fc vnic only + * response to the fcpio_flogi_reg request + */ +struct fcpio_flogi_reg_cmpl { + u32 _resvd; +}; + +/* + * fcpio_echo_cmpl: firmware -> host response + * + * response to the fcpio_echo request + */ +struct fcpio_echo_cmpl { + u32 _resvd; +}; + +/* + * fcpio_lunmap_chng: firmware -> host notification + * + * scsi vnic only + * notifies the host that the lunmap tables have changed + */ +struct fcpio_lunmap_chng { + u32 _resvd; +}; + +/* + * fcpio_lunmap_req_cmpl: firmware -> host response + * + * scsi vnic only + * response for lunmap table request from the host + */ +struct fcpio_lunmap_req_cmpl { + u32 _resvd; +}; + +/* + * Basic structure for all fcpio structures that are sent from the firmware to + * the host. They are 64 bytes per structure. + */ +#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */ +struct fcpio_fw_req { + struct fcpio_header hdr; + + union { + /* + * Defines space needed for request + */ + u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)]; + + /* + * Initiator firmware responses + */ + struct fcpio_icmnd_cmpl icmnd_cmpl; + struct fcpio_itmf_cmpl itmf_cmpl; + + /* + * Target firmware new requests + */ + struct fcpio_tcmnd_16 tcmnd_16; + struct fcpio_tcmnd_32 tcmnd_32; + + /* + * Target firmware responses + */ + struct fcpio_tdrsp_cmpl tdrsp_cmpl; + struct fcpio_ttmf ttmf; + struct fcpio_tabort_cmpl tabort_cmpl; + + /* + * Firmware response to work received + */ + struct fcpio_ack ack; + + /* + * Misc requests + */ + struct fcpio_reset_cmpl reset_cmpl; + struct fcpio_flogi_reg_cmpl flogi_reg_cmpl; + struct fcpio_echo_cmpl echo_cmpl; + struct fcpio_lunmap_chng lunmap_chng; + struct fcpio_lunmap_req_cmpl lunmap_req_cmpl; + } u; +}; + +/* + * Access routines to encode and decode the color bit, which is the most + * significant bit of the MSB of the structure + */ +static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color) +{ + u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; + + if (color) + *c |= 0x80; + else + *c &= ~0x80; +} + +static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color) +{ + u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; + + *color = *c >> 7; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + + rmb(); + +} + +/* + * Lunmap table entry for scsi vnics + */ +#define FCPIO_LUNMAP_TABLE_SIZE 256 +#define FCPIO_FLAGS_LUNMAP_VALID 0x80 +#define FCPIO_FLAGS_BOOT 0x01 +struct fcpio_lunmap_entry { + u8 bus; + u8 target; + u8 lun; + u8 path_cnt; + u16 flags; + u16 update_cnt; +}; + +struct fcpio_lunmap_tbl { + u32 update_cnt; + struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE]; +}; + +#endif /* _FCPIO_H_ */ diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h new file mode 100644 index 000000000..22cef283b --- /dev/null +++ b/drivers/scsi/fnic/fnic.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _FNIC_H_ +#define _FNIC_H_ + +#include +#include +#include +#include +#include +#include +#include "fnic_io.h" +#include "fnic_res.h" +#include "fnic_trace.h" +#include "fnic_stats.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_wq_copy.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_scsi.h" + +#define DRV_NAME "fnic" +#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +#define DRV_VERSION "1.6.0.57" +#define PFX DRV_NAME ": " +#define DFX DRV_NAME "%d: " + +#define DESC_CLEAN_LOW_WATERMARK 8 +#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ +#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ +#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */ +#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ +#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ +#define FNIC_DFLT_QUEUE_DEPTH 256 +#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ + +/* + * Tag bits used for special requests. + */ +#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */ +#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */ +#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */ +#define FNIC_NO_TAG -1 + +/* + * Command flags to identify the type of command and for other future + * use. + */ +#define FNIC_NO_FLAGS 0 +#define FNIC_IO_INITIALIZED BIT(0) +#define FNIC_IO_ISSUED BIT(1) +#define FNIC_IO_DONE BIT(2) +#define FNIC_IO_REQ_NULL BIT(3) +#define FNIC_IO_ABTS_PENDING BIT(4) +#define FNIC_IO_ABORTED BIT(5) +#define FNIC_IO_ABTS_ISSUED BIT(6) +#define FNIC_IO_TERM_ISSUED BIT(7) +#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8) +#define FNIC_IO_ABT_TERM_DONE BIT(9) +#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10) +#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11) +#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */ +#define FNIC_DEV_RST_ISSUED BIT(13) +#define FNIC_DEV_RST_TIMED_OUT BIT(14) +#define FNIC_DEV_RST_ABTS_ISSUED BIT(15) +#define FNIC_DEV_RST_TERM_ISSUED BIT(16) +#define FNIC_DEV_RST_DONE BIT(17) +#define FNIC_DEV_RST_REQ_NULL BIT(18) +#define FNIC_DEV_RST_ABTS_DONE BIT(19) +#define FNIC_DEV_RST_TERM_DONE BIT(20) +#define FNIC_DEV_RST_ABTS_PENDING BIT(21) + +/* + * fnic private data per SCSI command. + * These fields are locked by the hashed io_req_lock. + */ +struct fnic_cmd_priv { + struct fnic_io_req *io_req; + enum fnic_ioreq_state state; + u32 flags; + u16 abts_status; + u16 lr_status; +}; + +static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) +{ + struct fnic_cmd_priv *fcmd = fnic_priv(cmd); + + return ((u64)fcmd->flags << 32) | fcmd->state; +} + +#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */ +#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */ +#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */ +#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */ +#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */ + +#define FNIC_MAX_FCP_TARGET 256 + +/** + * state_flags to identify host state along along with fnic's state + **/ +#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */ +#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */ + +#define FNIC_FLAGS_NONE (0) +#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \ + __FNIC_FLAGS_BLOCK_IO) + +#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO) + +#define fnic_set_state_flags(fnicp, st_flags) \ + __fnic_set_state_flags(fnicp, st_flags, 0) + +#define fnic_clear_state_flags(fnicp, st_flags) \ + __fnic_set_state_flags(fnicp, st_flags, 1) + +extern unsigned int fnic_log_level; +extern unsigned int io_completions; + +#define FNIC_MAIN_LOGGING 0x01 +#define FNIC_FCS_LOGGING 0x02 +#define FNIC_SCSI_LOGGING 0x04 +#define FNIC_ISR_LOGGING 0x08 + +#define FNIC_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(fnic_log_level & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \ + shost_printk(kern_level, host, fmt, ##args) + +extern const char *fnic_state_str[]; + +enum fnic_intx_intr_index { + FNIC_INTX_WQ_RQ_COPYWQ, + FNIC_INTX_ERR, + FNIC_INTX_NOTIFY, + FNIC_INTX_INTR_MAX, +}; + +enum fnic_msix_intr_index { + FNIC_MSIX_RQ, + FNIC_MSIX_WQ, + FNIC_MSIX_WQ_COPY, + FNIC_MSIX_ERR_NOTIFY, + FNIC_MSIX_INTR_MAX, +}; + +struct fnic_msix_entry { + int requested; + char devname[IFNAMSIZ + 11]; + irqreturn_t (*isr)(int, void *); + void *devid; +}; + +enum fnic_state { + FNIC_IN_FC_MODE = 0, + FNIC_IN_FC_TRANS_ETH_MODE, + FNIC_IN_ETH_MODE, + FNIC_IN_ETH_TRANS_FC_MODE, +}; + +#define FNIC_WQ_COPY_MAX 1 +#define FNIC_WQ_MAX 1 +#define FNIC_RQ_MAX 1 +#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX) +#define FNIC_DFLT_IO_COMPLETIONS 256 + +struct mempool; + +enum fnic_evt { + FNIC_EVT_START_VLAN_DISC = 1, + FNIC_EVT_START_FCF_DISC = 2, + FNIC_EVT_MAX, +}; + +struct fnic_event { + struct list_head list; + struct fnic *fnic; + enum fnic_evt event; +}; + +/* Per-instance private data structure */ +struct fnic { + struct fc_lport *lport; + struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ + struct vnic_dev_bar bar0; + + struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; + + struct vnic_stats *stats; + unsigned long stats_time; /* time of stats update */ + unsigned long stats_reset_time; /* time of stats reset */ + struct vnic_nic_cfg *nic_cfg; + char name[IFNAMSIZ]; + struct timer_list notify_timer; /* used for MSI interrupts */ + + unsigned int fnic_max_tag_id; + unsigned int err_intr_offset; + unsigned int link_intr_offset; + + unsigned int wq_count; + unsigned int cq_count; + + struct mutex sgreset_mutex; + spinlock_t sgreset_lock; /* lock for sgreset */ + struct scsi_cmnd *sgreset_sc; + struct dentry *fnic_stats_debugfs_host; + struct dentry *fnic_stats_debugfs_file; + struct dentry *fnic_reset_debugfs_file; + unsigned int reset_stats; + atomic64_t io_cmpl_skip; + struct fnic_stats fnic_stats; + + u32 vlan_hw_insert:1; /* let hw insert the tag */ + u32 in_remove:1; /* fnic device in removal */ + u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ + u32 link_events:1; /* set when we get any link event*/ + + struct completion *remove_wait; /* device remove thread blocks */ + + atomic_t in_flight; /* io counter */ + bool internal_reset_inprogress; + u32 _reserved; /* fill hole */ + unsigned long state_flags; /* protected by host lock */ + enum fnic_state state; + spinlock_t fnic_lock; + + u16 vlan_id; /* VLAN tag including priority */ + u8 data_src_addr[ETH_ALEN]; + u64 fcp_input_bytes; /* internal statistic */ + u64 fcp_output_bytes; /* internal statistic */ + u32 link_down_cnt; + int link_status; + + struct list_head list; + struct pci_dev *pdev; + struct vnic_fc_config config; + struct vnic_dev *vdev; + unsigned int raw_wq_count; + unsigned int wq_copy_count; + unsigned int rq_count; + int fw_ack_index[FNIC_WQ_COPY_MAX]; + unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX]; + unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX]; + unsigned int intr_count; + u32 __iomem *legacy_pba; + struct fnic_host_tag *tags; + mempool_t *io_req_pool; + mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES]; + spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */ + + struct work_struct link_work; + struct work_struct frame_work; + struct sk_buff_head frame_queue; + struct sk_buff_head tx_queue; + + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); + struct work_struct fip_frame_work; + struct sk_buff_head fip_frame_queue; + struct timer_list fip_timer; + struct list_head vlans; + spinlock_t vlans_lock; + + struct work_struct event_work; + struct list_head evlist; + /*** FIP related data members -- end ***/ + + /* copy work queue cache line section */ + ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; + /* completion queue cache line section */ + ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX]; + + spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX]; + + /* work queue cache line section */ + ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; + spinlock_t wq_lock[FNIC_WQ_MAX]; + + /* receive queue cache line section */ + ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX]; + + /* interrupt resource cache line section */ + ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; +}; + +static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) +{ + return container_of(fip, struct fnic, ctlr); +} + +extern struct workqueue_struct *fnic_event_queue; +extern struct workqueue_struct *fnic_fip_queue; +extern const struct attribute_group *fnic_host_groups[]; + +void fnic_clear_intr_mode(struct fnic *fnic); +int fnic_set_intr_mode(struct fnic *fnic); +void fnic_free_intr(struct fnic *fnic); +int fnic_request_intr(struct fnic *fnic); + +int fnic_send(struct fc_lport *, struct fc_frame *); +void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); +void fnic_handle_frame(struct work_struct *work); +void fnic_handle_link(struct work_struct *work); +void fnic_handle_event(struct work_struct *work); +int fnic_rq_cmpl_handler(struct fnic *fnic, int); +int fnic_alloc_rq_frame(struct vnic_rq *rq); +void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); +void fnic_flush_tx(struct fnic *); +void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); +void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); +void fnic_update_mac(struct fc_lport *, u8 *new); +void fnic_update_mac_locked(struct fnic *, u8 *new); + +int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); +int fnic_abort_cmd(struct scsi_cmnd *); +int fnic_device_reset(struct scsi_cmnd *); +int fnic_host_reset(struct scsi_cmnd *); +int fnic_reset(struct Scsi_Host *); +void fnic_scsi_cleanup(struct fc_lport *); +void fnic_scsi_abort_io(struct fc_lport *); +void fnic_empty_scsi_cleanup(struct fc_lport *); +void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); +int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); +int fnic_wq_cmpl_handler(struct fnic *fnic, int); +int fnic_flogi_reg_handler(struct fnic *fnic, u32); +void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, + struct fcpio_host_req *desc); +int fnic_fw_reset_handler(struct fnic *fnic); +void fnic_terminate_rport_io(struct fc_rport *); +const char *fnic_state_to_str(unsigned int state); + +void fnic_log_q_error(struct fnic *fnic); +void fnic_handle_link_event(struct fnic *fnic); + +int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); + +void fnic_handle_fip_frame(struct work_struct *work); +void fnic_handle_fip_event(struct fnic *fnic); +void fnic_fcoe_reset_vlans(struct fnic *fnic); +void fnic_fcoe_evlist_free(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct fnic *fnic); + +static inline int +fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) +{ + return ((fnic->state_flags & st_flags) == st_flags); +} +void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); +void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +#endif /* _FNIC_H_ */ diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c new file mode 100644 index 000000000..a61e0c5e6 --- /dev/null +++ b/drivers/scsi/fnic/fnic_attrs.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include +#include +#include +#include "fnic.h" + +static ssize_t fnic_show_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lp = shost_priv(class_to_shost(dev)); + struct fnic *fnic = lport_priv(lp); + + return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]); +} + +static ssize_t fnic_show_drv_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); +} + +static ssize_t fnic_show_link_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lp = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up) + ? "Link Up" : "Link Down"); +} + +static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); +static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL); +static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL); + +static struct attribute *fnic_host_attrs[] = { + &dev_attr_fnic_state.attr, + &dev_attr_drv_version.attr, + &dev_attr_link_state.attr, + NULL, +}; + +static const struct attribute_group fnic_host_attr_group = { + .attrs = fnic_host_attrs +}; + +const struct attribute_group *fnic_host_groups[] = { + &fnic_host_attr_group, + NULL +}; diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c new file mode 100644 index 000000000..2619a2d4f --- /dev/null +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -0,0 +1,718 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2012 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include +#include "fnic.h" + +static struct dentry *fnic_trace_debugfs_root; +static struct dentry *fnic_trace_debugfs_file; +static struct dentry *fnic_trace_enable; +static struct dentry *fnic_stats_debugfs_root; + +static struct dentry *fnic_fc_trace_debugfs_file; +static struct dentry *fnic_fc_rdata_trace_debugfs_file; +static struct dentry *fnic_fc_trace_enable; +static struct dentry *fnic_fc_trace_clear; + +struct fc_trace_flag_type { + u8 fc_row_file; + u8 fc_normal_file; + u8 fnic_trace; + u8 fc_trace; + u8 fc_clear; +}; + +static struct fc_trace_flag_type *fc_trc_flag; + +/* + * fnic_debugfs_init - Initialize debugfs for fnic debug logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic debugfs + * file system. If not already created, this routine will create the + * fnic directory and statistics directory for trace buffer and + * stats logging. + */ +int fnic_debugfs_init(void) +{ + fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); + + fnic_stats_debugfs_root = debugfs_create_dir("statistics", + fnic_trace_debugfs_root); + + /* Allocate memory to structure */ + fc_trc_flag = vmalloc(sizeof(struct fc_trace_flag_type)); + + if (fc_trc_flag) { + fc_trc_flag->fc_row_file = 0; + fc_trc_flag->fc_normal_file = 1; + fc_trc_flag->fnic_trace = 2; + fc_trc_flag->fc_trace = 3; + fc_trc_flag->fc_clear = 4; + return 0; + } + + return -ENOMEM; +} + +/* + * fnic_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic. + */ +void fnic_debugfs_terminate(void) +{ + debugfs_remove(fnic_stats_debugfs_root); + fnic_stats_debugfs_root = NULL; + + debugfs_remove(fnic_trace_debugfs_root); + fnic_trace_debugfs_root = NULL; + + vfree(fc_trc_flag); +} + +/* + * fnic_trace_ctrl_read - + * Read trace_enable ,fc_trace_enable + * or fc_trace_clear debugfs file + * @filp: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @cnt: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads value of variable fnic_tracing_enabled or + * fnic_fc_tracing_enabled or fnic_fc_trace_cleared + * and stores into local @buf. + * It will start reading file at @ppos and + * copy up to @cnt of data to @ubuf from @buf. + * + * Returns: + * This function returns the amount of data that was read. + */ +static ssize_t fnic_trace_ctrl_read(struct file *filp, + char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[64]; + int len; + u8 *trace_type; + len = 0; + trace_type = (u8 *)filp->private_data; + if (*trace_type == fc_trc_flag->fnic_trace) + len = sprintf(buf, "%d\n", fnic_tracing_enabled); + else if (*trace_type == fc_trc_flag->fc_trace) + len = sprintf(buf, "%d\n", fnic_fc_tracing_enabled); + else if (*trace_type == fc_trc_flag->fc_clear) + len = sprintf(buf, "%d\n", fnic_fc_trace_cleared); + else + pr_err("fnic: Cannot read to any debugfs file\n"); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +/* + * fnic_trace_ctrl_write - + * Write to trace_enable, fc_trace_enable or + * fc_trace_clear debugfs file + * @filp: The file pointer to write from. + * @ubuf: The buffer to copy the data from. + * @cnt: The number of bytes to write. + * @ppos: The position in the file to start writing to. + * + * Description: + * This routine writes data from user buffer @ubuf to buffer @buf and + * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared + * value as per user input. + * + * Returns: + * This function returns the amount of data that was written. + */ +static ssize_t fnic_trace_ctrl_write(struct file *filp, + const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[64]; + unsigned long val; + int ret; + u8 *trace_type; + trace_type = (u8 *)filp->private_data; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + if (*trace_type == fc_trc_flag->fnic_trace) + fnic_tracing_enabled = val; + else if (*trace_type == fc_trc_flag->fc_trace) + fnic_fc_tracing_enabled = val; + else if (*trace_type == fc_trc_flag->fc_clear) + fnic_fc_trace_cleared = val; + else + pr_err("fnic: cannot write to any debugfs file\n"); + + (*ppos)++; + + return cnt; +} + +static const struct file_operations fnic_trace_ctrl_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = fnic_trace_ctrl_read, + .write = fnic_trace_ctrl_write, +}; + +/* + * fnic_trace_debugfs_open - Open the fnic trace log + * @inode: The inode pointer + * @file: The file pointer to attach the log output + * + * Description: + * This routine is the entry point for the debugfs open file operation. + * It allocates the necessary buffer for the log, fills the buffer from + * the in-memory log and then returns a pointer to that log in + * the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return + * a negative error value. + */ +static int fnic_trace_debugfs_open(struct inode *inode, + struct file *file) +{ + fnic_dbgfs_t *fnic_dbg_prt; + u8 *rdata_ptr; + rdata_ptr = (u8 *)inode->i_private; + fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); + if (!fnic_dbg_prt) + return -ENOMEM; + + if (*rdata_ptr == fc_trc_flag->fnic_trace) { + fnic_dbg_prt->buffer = vzalloc(array3_size(3, trace_max_pages, + PAGE_SIZE)); + if (!fnic_dbg_prt->buffer) { + kfree(fnic_dbg_prt); + return -ENOMEM; + } + fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); + } else { + fnic_dbg_prt->buffer = + vzalloc(array3_size(3, fnic_fc_trace_max_pages, + PAGE_SIZE)); + if (!fnic_dbg_prt->buffer) { + kfree(fnic_dbg_prt); + return -ENOMEM; + } + fnic_dbg_prt->buffer_len = + fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr); + } + file->private_data = fnic_dbg_prt; + + return 0; +} + +/* + * fnic_trace_debugfs_lseek - Seek through a debugfs file + * @file: The file pointer to seek through. + * @offset: The offset to seek to or the amount to seek by. + * @howto: Indicates how to seek. + * + * Description: + * This routine is the entry point for the debugfs lseek file operation. + * The @howto parameter indicates whether @offset is the offset to directly + * seek to, or if it is a value to seek forward or reverse by. This function + * figures out what the new offset of the debugfs file will be and assigns + * that value to the f_pos field of @file. + * + * Returns: + * This function returns the new offset if successful and returns a negative + * error if unable to process the seek. + */ +static loff_t fnic_trace_debugfs_lseek(struct file *file, + loff_t offset, + int howto) +{ + fnic_dbgfs_t *fnic_dbg_prt = file->private_data; + return fixed_size_llseek(file, offset, howto, + fnic_dbg_prt->buffer_len); +} + +/* + * fnic_trace_debugfs_read - Read a debugfs file + * @file: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @pos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the buffer indicated in the private_data + * field of @file. It will start reading at @pos and copy up to @nbytes of + * data to @ubuf. + * + * Returns: + * This function returns the amount of data that was read (this could be + * less than @nbytes if the end of the file was reached). + */ +static ssize_t fnic_trace_debugfs_read(struct file *file, + char __user *ubuf, + size_t nbytes, + loff_t *pos) +{ + fnic_dbgfs_t *fnic_dbg_prt = file->private_data; + int rc = 0; + rc = simple_read_from_buffer(ubuf, nbytes, pos, + fnic_dbg_prt->buffer, + fnic_dbg_prt->buffer_len); + return rc; +} + +/* + * fnic_trace_debugfs_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_trace_debugfs_release(struct inode *inode, + struct file *file) +{ + fnic_dbgfs_t *fnic_dbg_prt = file->private_data; + + vfree(fnic_dbg_prt->buffer); + kfree(fnic_dbg_prt); + return 0; +} + +static const struct file_operations fnic_trace_debugfs_fops = { + .owner = THIS_MODULE, + .open = fnic_trace_debugfs_open, + .llseek = fnic_trace_debugfs_lseek, + .read = fnic_trace_debugfs_read, + .release = fnic_trace_debugfs_release, +}; + +/* + * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic debugfs + * file system. If not already created, this routine will create the + * create file trace to log fnic trace buffer output into debugfs and + * it will also create file trace_enable to control enable/disable of + * trace logging into trace buffer. + */ +void fnic_trace_debugfs_init(void) +{ + fnic_trace_enable = debugfs_create_file("tracing_enable", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fnic_trace), + &fnic_trace_ctrl_fops); + + fnic_trace_debugfs_file = debugfs_create_file("trace", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fnic_trace), + &fnic_trace_debugfs_fops); +} + +/* + * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic trace logging. + */ +void fnic_trace_debugfs_terminate(void) +{ + debugfs_remove(fnic_trace_debugfs_file); + fnic_trace_debugfs_file = NULL; + + debugfs_remove(fnic_trace_enable); + fnic_trace_enable = NULL; +} + +/* + * fnic_fc_trace_debugfs_init - + * Initialize debugfs for fnic control frame trace logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic_fc debugfs + * file system. If not already created, this routine will create the + * create file trace to log fnic fc trace buffer output into debugfs and + * it will also create file fc_trace_enable to control enable/disable of + * trace logging into trace buffer. + */ + +void fnic_fc_trace_debugfs_init(void) +{ + fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_trace), + &fnic_trace_ctrl_fops); + + fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_clear), + &fnic_trace_ctrl_fops); + + fnic_fc_rdata_trace_debugfs_file = + debugfs_create_file("fc_trace_rdata", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_normal_file), + &fnic_trace_debugfs_fops); + + fnic_fc_trace_debugfs_file = + debugfs_create_file("fc_trace", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_row_file), + &fnic_trace_debugfs_fops); +} + +/* + * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic_fc trace logging. + */ + +void fnic_fc_trace_debugfs_terminate(void) +{ + debugfs_remove(fnic_fc_trace_debugfs_file); + fnic_fc_trace_debugfs_file = NULL; + + debugfs_remove(fnic_fc_rdata_trace_debugfs_file); + fnic_fc_rdata_trace_debugfs_file = NULL; + + debugfs_remove(fnic_fc_trace_enable); + fnic_fc_trace_enable = NULL; + + debugfs_remove(fnic_fc_trace_clear); + fnic_fc_trace_clear = NULL; +} + +/* + * fnic_reset_stats_open - Open the reset_stats file + * @inode: The inode pointer. + * @file: The file pointer to attach the stats reset flag. + * + * Description: + * This routine opens a debugsfs file reset_stats and stores i_private data + * to debug structure to retrieve later for while performing other + * file oprations. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_reset_stats_open(struct inode *inode, struct file *file) +{ + struct stats_debug_info *debug; + + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->i_private = inode->i_private; + + file->private_data = debug; + + return 0; +} + +/* + * fnic_reset_stats_read - Read a reset_stats debugfs file + * @filp: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @cnt: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads value of variable reset_stats + * and stores into local @buf. It will start reading file at @ppos and + * copy up to @cnt of data to @ubuf from @buf. + * + * Returns: + * This function returns the amount of data that was read. + */ +static ssize_t fnic_reset_stats_read(struct file *file, + char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct stats_debug_info *debug = file->private_data; + struct fnic *fnic = (struct fnic *)debug->i_private; + char buf[64]; + int len; + + len = sprintf(buf, "%u\n", fnic->reset_stats); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +/* + * fnic_reset_stats_write - Write to reset_stats debugfs file + * @filp: The file pointer to write from. + * @ubuf: The buffer to copy the data from. + * @cnt: The number of bytes to write. + * @ppos: The position in the file to start writing to. + * + * Description: + * This routine writes data from user buffer @ubuf to buffer @buf and + * resets cumulative stats of fnic. + * + * Returns: + * This function returns the amount of data that was written. + */ +static ssize_t fnic_reset_stats_write(struct file *file, + const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct stats_debug_info *debug = file->private_data; + struct fnic *fnic = (struct fnic *)debug->i_private; + struct fnic_stats *stats = &fnic->fnic_stats; + u64 *io_stats_p = (u64 *)&stats->io_stats; + u64 *fw_stats_p = (u64 *)&stats->fw_stats; + char buf[64]; + unsigned long val; + int ret; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + fnic->reset_stats = val; + + if (fnic->reset_stats) { + /* Skip variable is used to avoid descrepancies to Num IOs + * and IO Completions stats. Skip incrementing No IO Compls + * for pending active IOs after reset stats + */ + atomic64_set(&fnic->io_cmpl_skip, + atomic64_read(&stats->io_stats.active_ios)); + memset(&stats->abts_stats, 0, sizeof(struct abort_stats)); + memset(&stats->term_stats, 0, + sizeof(struct terminate_stats)); + memset(&stats->reset_stats, 0, sizeof(struct reset_stats)); + memset(&stats->misc_stats, 0, sizeof(struct misc_stats)); + memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats)); + memset(io_stats_p+1, 0, + sizeof(struct io_path_stats) - sizeof(u64)); + memset(fw_stats_p+1, 0, + sizeof(struct fw_stats) - sizeof(u64)); + ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time); + } + + (*ppos)++; + return cnt; +} + +/* + * fnic_reset_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_reset_stats_release(struct inode *inode, + struct file *file) +{ + struct stats_debug_info *debug = file->private_data; + kfree(debug); + return 0; +} + +/* + * fnic_stats_debugfs_open - Open the stats file for specific host + * and get fnic stats. + * @inode: The inode pointer. + * @file: The file pointer to attach the specific host statistics. + * + * Description: + * This routine opens a debugsfs file stats of specific host and print + * fnic stats. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_stats_debugfs_open(struct inode *inode, + struct file *file) +{ + struct fnic *fnic = inode->i_private; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct stats_debug_info *debug; + int buf_size = 2 * PAGE_SIZE; + + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->debug_buffer = vmalloc(buf_size); + if (!debug->debug_buffer) { + kfree(debug); + return -ENOMEM; + } + + debug->buf_size = buf_size; + memset((void *)debug->debug_buffer, 0, buf_size); + debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); + + file->private_data = debug; + + return 0; +} + +/* + * fnic_stats_debugfs_read - Read a debugfs file + * @file: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @pos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the buffer indicated in the private_data + * field of @file. It will start reading at @pos and copy up to @nbytes of + * data to @ubuf. + * + * Returns: + * This function returns the amount of data that was read (this could be + * less than @nbytes if the end of the file was reached). + */ +static ssize_t fnic_stats_debugfs_read(struct file *file, + char __user *ubuf, + size_t nbytes, + loff_t *pos) +{ + struct stats_debug_info *debug = file->private_data; + int rc = 0; + rc = simple_read_from_buffer(ubuf, nbytes, pos, + debug->debug_buffer, + debug->buffer_len); + return rc; +} + +/* + * fnic_stats_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_stats_debugfs_release(struct inode *inode, + struct file *file) +{ + struct stats_debug_info *debug = file->private_data; + vfree(debug->debug_buffer); + kfree(debug); + return 0; +} + +static const struct file_operations fnic_stats_debugfs_fops = { + .owner = THIS_MODULE, + .open = fnic_stats_debugfs_open, + .read = fnic_stats_debugfs_read, + .release = fnic_stats_debugfs_release, +}; + +static const struct file_operations fnic_reset_debugfs_fops = { + .owner = THIS_MODULE, + .open = fnic_reset_stats_open, + .read = fnic_reset_stats_read, + .write = fnic_reset_stats_write, + .release = fnic_reset_stats_release, +}; + +/* + * fnic_stats_init - Initialize stats struct and create stats file per fnic + * + * Description: + * When Debugfs is configured this routine sets up the stats file per fnic + * It will create file stats and reset_stats under statistics/host# directory + * to log per fnic stats. + */ +void fnic_stats_debugfs_init(struct fnic *fnic) +{ + char name[16]; + + snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + + fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, + fnic_stats_debugfs_root); + + fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_stats_debugfs_fops); + + fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_reset_debugfs_fops); +} + +/* + * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic stats. + */ +void fnic_stats_debugfs_remove(struct fnic *fnic) +{ + if (!fnic) + return; + + debugfs_remove(fnic->fnic_stats_debugfs_file); + fnic->fnic_stats_debugfs_file = NULL; + + debugfs_remove(fnic->fnic_reset_debugfs_file); + fnic->fnic_reset_debugfs_file = NULL; + + debugfs_remove(fnic->fnic_stats_debugfs_host); + fnic->fnic_stats_debugfs_host = NULL; +} diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c new file mode 100644 index 000000000..79ddfaaf7 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -0,0 +1,1397 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fnic_io.h" +#include "fnic.h" +#include "fnic_fip.h" +#include "cq_enet_desc.h" +#include "cq_exch_desc.h" + +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; +struct workqueue_struct *fnic_fip_queue; +struct workqueue_struct *fnic_event_queue; + +static void fnic_set_eth_mode(struct fnic *); +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); + +void fnic_handle_link(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, link_work); + unsigned long flags; + int old_link_status; + u32 old_link_down_cnt; + u64 old_port_speed, new_port_speed; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + fnic->link_events = 1; /* less work to just set everytime*/ + + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + old_link_down_cnt = fnic->link_down_cnt; + old_link_status = fnic->link_status; + old_port_speed = atomic64_read( + &fnic->fnic_stats.misc_stats.current_port_speed); + + fnic->link_status = vnic_dev_link_status(fnic->vdev); + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + + new_port_speed = vnic_dev_port_speed(fnic->vdev); + atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, + new_port_speed); + if (old_port_speed != new_port_speed) + FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, + "Current vnic speed set to : %llu\n", + new_port_speed); + + switch (vnic_dev_port_speed(fnic->vdev)) { + case DCEM_PORTSPEED_10G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; + break; + case DCEM_PORTSPEED_20G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; + break; + case DCEM_PORTSPEED_25G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; + break; + case DCEM_PORTSPEED_40G: + case DCEM_PORTSPEED_4x10G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; + break; + case DCEM_PORTSPEED_100G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; + break; + default: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; + fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; + break; + } + + if (old_link_status == fnic->link_status) { + if (!fnic->link_status) { + /* DOWN -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_LE, "Link Status: DOWN->DOWN", + strlen("Link Status: DOWN->DOWN")); + } else { + if (old_link_down_cnt != fnic->link_down_cnt) { + /* UP -> DOWN -> UP */ + fnic->lport->host_stats.link_failure_count++; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, + "Link Status:UP_DOWN_UP", + strlen("Link_Status:UP_DOWN_UP") + ); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "link down\n"); + fcoe_ctlr_link_down(&fnic->ctlr); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, + "Link Status: UP_DOWN_UP_VLAN", + strlen( + "Link Status: UP_DOWN_UP_VLAN") + ); + fnic_fcoe_send_vlan_req(fnic); + return; + } + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "link up\n"); + fcoe_ctlr_link_up(&fnic->ctlr); + } else { + /* UP -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: UP_UP", + strlen("Link Status: UP_UP")); + } + } + } else if (fnic->link_status) { + /* DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + strlen("Link Status: DOWN_UP_VLAN")); + fnic_fcoe_send_vlan_req(fnic); + return; + } + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); + fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); + fcoe_ctlr_link_up(&fnic->ctlr); + } else { + /* UP -> DOWN */ + fnic->lport->host_stats.link_failure_count++; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: UP_DOWN", + strlen("Link Status: UP_DOWN")); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "deleting fip-timer during link-down\n"); + del_timer_sync(&fnic->fip_timer); + } + fcoe_ctlr_link_down(&fnic->ctlr); + } + +} + +/* + * This function passes incoming fabric frames to libFC + */ +void fnic_handle_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, frame_work); + struct fc_lport *lp = fnic->lport; + unsigned long flags; + struct sk_buff *skb; + struct fc_frame *fp; + + while ((skb = skb_dequeue(&fnic->frame_queue))) { + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + fp = (struct fc_frame *)skb; + + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fc_exch_recv(lp, fp); + } +} + +void fnic_fcoe_evlist_free(struct fnic *fnic) +{ + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + list_del(&fevt->list); + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fnic_handle_event(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, event_work); + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + if (fnic->stop_rx_link_events) { + list_del(&fevt->list); + kfree(fevt); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_del(&fevt->list); + switch (fevt->event) { + case FNIC_EVT_START_VLAN_DISC: + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + spin_lock_irqsave(&fnic->fnic_lock, flags); + break; + case FNIC_EVT_START_FCF_DISC: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start FCF Discovery\n"); + fnic_fcoe_start_fcf_disc(fnic); + break; + default: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unknown event 0x%x\n", fevt->event); + break; + } + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/** + * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is rejected with unsupported cmd with + * insufficient resource els explanation. + */ +static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, + struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + u16 op; + u8 els_op; + u8 sub; + + size_t rlen; + size_t dlen = 0; + + if (skb_linearize(skb)) + return 0; + + if (skb->len < sizeof(*fiph)) + return 0; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op != FIP_OP_LS) + return 0; + + if (sub != FIP_SC_REP) + return 0; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return 0; + + desc = (struct fip_desc *)(fiph + 1); + dlen = desc->fip_dlen * FIP_BPW; + + if (desc->fip_dtype == FIP_DT_FLOGI) { + + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + return 0; + + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + + if (!fh) + return 0; + + /* + * ELS command code, reason and explanation should be = Reject, + * unsupported command and insufficient resource + */ + els_op = *(u8 *)(fh + 1); + if (els_op == ELS_LS_RJT) { + shost_printk(KERN_INFO, lport->host, + "Flogi Request Rejected by Switch\n"); + return 1; + } + shost_printk(KERN_INFO, lport->host, + "Flogi Request Accepted by Switch\n"); + } + return 0; +} + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct sk_buff *skb; + char *eth_fr; + struct fip_vlan *vlan; + u64 vlan_tov; + + fnic_fcoe_reset_vlans(fnic); + fnic->set_vlan(fnic, 0); + + if (printk_ratelimit()) + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Sending VLAN request...\n"); + + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + /* set a timer so that we can retry if there no response */ + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); +} + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fip_header *fiph; + struct fip_desc *desc; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u16 vid; + size_t rlen; + size_t dlen; + struct fcoe_vlan *vlan; + u64 sol_time; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response...\n"); + + fiph = (struct fip_header *) skb->data; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", + ntohs(fiph->fip_op), fiph->fip_subcode); + + rlen = ntohs(fiph->fip_dl_len) * 4; + fnic_fcoe_reset_vlans(fnic); + spin_lock_irqsave(&fnic->vlans_lock, flags); + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + shost_printk(KERN_INFO, fnic->lport->host, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); + if (!vlan) { + /* retry from timer */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlans); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* any VLAN descriptors present ? */ + if (list_empty(&fnic->vlans)) { + /* retry from timer */ + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "No VLAN descriptors in FIP VLAN response\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + goto out; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(fip); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +out: + return; +} + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count = 1; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(&fnic->ctlr); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +} + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) +{ + unsigned long flags; + struct fcoe_vlan *fvlan; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; + } + + fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + if (fvlan->state == FIP_VLAN_USED) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + + if (fvlan->state == FIP_VLAN_SENT) { + fvlan->state = FIP_VLAN_USED; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; +} + +static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) +{ + struct fnic_event *fevt; + unsigned long flags; + + fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); + if (!fevt) + return; + + fevt->fnic = fnic; + fevt->event = ev; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fevt->list, &fnic->evlist); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_work(&fnic->event_work); +} + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +{ + struct fip_header *fiph; + int ret = 1; + u16 op; + u8 sub; + + if (!skb || !(skb->data)) + return -1; + + if (skb_linearize(skb)) + goto drop; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { + if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) + goto drop; + /* pass it on to fcoe */ + ret = 1; + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { + /* set the vlan as used */ + fnic_fcoe_process_vlan_resp(fnic, skb); + ret = 0; + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + /* received CVL request, restart vlan disc */ + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + /* pass it on to fcoe */ + ret = 1; + } +drop: + return ret; +} + +void fnic_handle_fip_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned long flags; + struct sk_buff *skb; + struct ethhdr *eh; + + while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->fip_frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + eh = (struct ethhdr *)skb->data; + if (eh->h_proto == htons(ETH_P_FIP)) { + skb_pull(skb, sizeof(*eh)); + if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { + dev_kfree_skb(skb); + continue; + } + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { + atomic64_inc( + &fnic_stats->vlan_stats.flogi_rejects); + shost_printk(KERN_INFO, fnic->lport->host, + "Trigger a Link down - VLAN Disc\n"); + fcoe_ctlr_link_down(&fnic->ctlr); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + dev_kfree_skb(skb); + continue; + } + fcoe_ctlr_recv(&fnic->ctlr, skb); + continue; + } + } +} + +/** + * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. + * @fnic: fnic instance. + * @skb: Ethernet Frame. + */ +static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) +{ + struct fc_frame *fp; + struct ethhdr *eh; + struct fcoe_hdr *fcoe_hdr; + struct fcoe_crc_eof *ft; + + /* + * Undo VLAN encapsulation if present. + */ + eh = (struct ethhdr *)skb->data; + if (eh->h_proto == htons(ETH_P_8021Q)) { + memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); + eh = skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + if (eh->h_proto == htons(ETH_P_FIP)) { + if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { + printk(KERN_ERR "Dropped FIP frame, as firmware " + "uses non-FIP mode, Enable FIP " + "using UCSM\n"); + goto drop; + } + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + skb_queue_tail(&fnic->fip_frame_queue, skb); + queue_work(fnic_fip_queue, &fnic->fip_frame_work); + return 1; /* let caller know packet was used */ + } + if (eh->h_proto != htons(ETH_P_FCOE)) + goto drop; + skb_set_network_header(skb, sizeof(*eh)); + skb_pull(skb, sizeof(*eh)); + + fcoe_hdr = (struct fcoe_hdr *)skb->data; + if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) + goto drop; + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_sof(fp) = fcoe_hdr->fcoe_sof; + skb_pull(skb, sizeof(struct fcoe_hdr)); + skb_reset_transport_header(skb); + + ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); + fr_eof(fp) = ft->fcoe_eof; + skb_trim(skb, skb->len - sizeof(*ft)); + return 0; +drop: + dev_kfree_skb_irq(skb); + return -1; +} + +/** + * fnic_update_mac_locked() - set data MAC address and filters. + * @fnic: fnic instance. + * @new: newly-assigned FCoE MAC address. + * + * Called with the fnic lock held. + */ +void fnic_update_mac_locked(struct fnic *fnic, u8 *new) +{ + u8 *ctl = fnic->ctlr.ctl_src_addr; + u8 *data = fnic->data_src_addr; + + if (is_zero_ether_addr(new)) + new = ctl; + if (ether_addr_equal(data, new)) + return; + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) + vnic_dev_del_addr(fnic->vdev, data); + memcpy(data, new, ETH_ALEN); + if (!ether_addr_equal(new, ctl)) + vnic_dev_add_addr(fnic->vdev, new); +} + +/** + * fnic_update_mac() - set data MAC address and filters. + * @lport: local port. + * @new: newly-assigned FCoE MAC address. + */ +void fnic_update_mac(struct fc_lport *lport, u8 *new) +{ + struct fnic *fnic = lport_priv(lport); + + spin_lock_irq(&fnic->fnic_lock); + fnic_update_mac_locked(fnic, new); + spin_unlock_irq(&fnic->fnic_lock); +} + +/** + * fnic_set_port_id() - set the port_ID after successful FLOGI. + * @lport: local port. + * @port_id: assigned FC_ID. + * @fp: received frame containing the FLOGI accept or NULL. + * + * This is called from libfc when a new FC_ID has been assigned. + * This causes us to reset the firmware to FC_MODE and setup the new MAC + * address and FC_ID. + * + * It is also called with FC_ID 0 when we're logged off. + * + * If the FC_ID is due to point-to-point, fp may be NULL. + */ +void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) +{ + struct fnic *fnic = lport_priv(lport); + u8 *mac; + int ret; + + FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", + port_id, fp); + + /* + * If we're clearing the FC_ID, change to use the ctl_src_addr. + * Set ethernet mode to send FLOGI. + */ + if (!port_id) { + fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); + fnic_set_eth_mode(fnic); + return; + } + + if (fp) { + mac = fr_cb(fp)->granted_mac; + if (is_zero_ether_addr(mac)) { + /* non-FIP - FLOGI already accepted - ignore return */ + fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); + } + fnic_update_mac(lport, mac); + } + + /* Change state to reflect transition to FC mode */ + spin_lock_irq(&fnic->fnic_lock); + if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) + fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; + else { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unexpected fnic state %s while" + " processing flogi resp\n", + fnic_state_to_str(fnic->state)); + spin_unlock_irq(&fnic->fnic_lock); + return; + } + spin_unlock_irq(&fnic->fnic_lock); + + /* + * Send FLOGI registration to firmware to set up FC mode. + * The new address will be set up when registration completes. + */ + ret = fnic_flogi_reg_handler(fnic, port_id); + + if (ret < 0) { + spin_lock_irq(&fnic->fnic_lock); + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) + fnic->state = FNIC_IN_ETH_MODE; + spin_unlock_irq(&fnic->fnic_lock); + } +} + +static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc + *cq_desc, struct vnic_rq_buf *buf, + int skipped __attribute__((unused)), + void *opaque) +{ + struct fnic *fnic = vnic_dev_priv(rq->vdev); + struct sk_buff *skb; + struct fc_frame *fp; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u8 type, color, eop, sop, ingress_port, vlan_stripped; + u8 fcoe = 0, fcoe_sof, fcoe_eof; + u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; + u8 fcs_ok = 1, packet_error = 0; + u16 q_number, completed_index, bytes_written = 0, vlan, checksum; + u32 rss_hash; + u16 exchange_id, tmpl; + u8 sof = 0; + u8 eof = 0; + u32 fcp_bytes_written = 0; + unsigned long flags; + + dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, + DMA_FROM_DEVICE); + skb = buf->os_buf; + fp = (struct fc_frame *)skb; + buf->os_buf = NULL; + + cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); + if (type == CQ_DESC_TYPE_RQ_FCP) { + cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, + &type, &color, &q_number, &completed_index, + &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, + &tmpl, &fcp_bytes_written, &sof, &eof, + &ingress_port, &packet_error, + &fcoe_enc_error, &fcs_ok, &vlan_stripped, + &vlan); + skb_trim(skb, fcp_bytes_written); + fr_sof(fp) = sof; + fr_eof(fp) = eof; + + } else if (type == CQ_DESC_TYPE_RQ_ENET) { + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, + &type, &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, + &rss_type, &csum_not_calc, &rss_hash, + &bytes_written, &packet_error, + &vlan_stripped, &vlan, &checksum, + &fcoe_sof, &fcoe_fc_crc_ok, + &fcoe_enc_error, &fcoe_eof, + &tcp_udp_csum_ok, &udp, &tcp, + &ipv4_csum_ok, &ipv6, &ipv4, + &ipv4_fragment, &fcs_ok); + skb_trim(skb, bytes_written); + if (!fcs_ok) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "fcs error. dropping packet.\n"); + goto drop; + } + if (fnic_import_rq_eth_pkt(fnic, skb)) + return; + + } else { + /* wrong CQ type*/ + shost_printk(KERN_ERR, fnic->lport->host, + "fnic rq_cmpl wrong cq type x%x\n", type); + goto drop; + } + + if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "fnic rq_cmpl fcoe x%x fcsok x%x" + " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" + " x%x\n", + fcoe, fcs_ok, packet_error, + fcoe_fc_crc_ok, fcoe_enc_error); + goto drop; + } + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto drop; + } + fr_dev(fp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, + (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + + skb_queue_tail(&fnic->frame_queue, skb); + queue_work(fnic_event_queue, &fnic->frame_work); + + return; +drop: + dev_kfree_skb_irq(skb); +} + +static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, + struct cq_desc *cq_desc, u8 type, + u16 q_number, u16 completed_index, + void *opaque) +{ + struct fnic *fnic = vnic_dev_priv(vdev); + + vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, + VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv, + NULL); + return 0; +} + +int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) +{ + unsigned int tot_rq_work_done = 0, cur_work_done; + unsigned int i; + int err; + + for (i = 0; i < fnic->rq_count; i++) { + cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, + fnic_rq_cmpl_handler_cont, + NULL); + if (cur_work_done) { + err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); + if (err) + shost_printk(KERN_ERR, fnic->lport->host, + "fnic_alloc_rq_frame can't alloc" + " frame\n"); + } + tot_rq_work_done += cur_work_done; + } + + return tot_rq_work_done; +} + +/* + * This function is called once at init time to allocate and fill RQ + * buffers. Subsequently, it is called in the interrupt context after RQ + * buffer processing to replenish the buffers in the RQ + */ +int fnic_alloc_rq_frame(struct vnic_rq *rq) +{ + struct fnic *fnic = vnic_dev_priv(rq->vdev); + struct sk_buff *skb; + u16 len; + dma_addr_t pa; + int r; + + len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; + skb = dev_alloc_skb(len); + if (!skb) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unable to allocate RQ sk_buff\n"); + return -ENOMEM; + } + skb_reset_mac_header(skb); + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb_put(skb, len); + pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, pa)) { + r = -ENOMEM; + printk(KERN_ERR "PCI mapping failed with error %d\n", r); + goto free_skb; + } + + fnic_queue_rq_desc(rq, skb, pa, len); + return 0; + +free_skb: + kfree_skb(skb); + return r; +} + +void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +{ + struct fc_frame *fp = buf->os_buf; + struct fnic *fnic = vnic_dev_priv(rq->vdev); + + dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, + DMA_FROM_DEVICE); + + dev_kfree_skb(fp_skb(fp)); + buf->os_buf = NULL; +} + +/** + * fnic_eth_send() - Send Ethernet frame. + * @fip: fcoe_ctlr instance. + * @skb: Ethernet Frame, FIP, without VLAN encapsulation. + */ +void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fnic *fnic = fnic_from_ctlr(fip); + struct vnic_wq *wq = &fnic->wq[0]; + dma_addr_t pa; + struct ethhdr *eth_hdr; + struct vlan_ethhdr *vlan_hdr; + unsigned long flags; + + if (!fnic->vlan_hw_insert) { + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr)); + memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; + vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + } else { + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + } + + pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, pa)) { + printk(KERN_ERR "DMA mapping failed\n"); + goto free_skb; + } + + spin_lock_irqsave(&fnic->wq_lock[0], flags); + if (!vnic_wq_desc_avail(wq)) + goto irq_restore; + + fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, + 0 /* hw inserts cos value */, + fnic->vlan_id, 1); + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); + return; + +irq_restore: + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); + dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE); +free_skb: + kfree_skb(skb); +} + +/* + * Send FC frame. + */ +static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) +{ + struct vnic_wq *wq = &fnic->wq[0]; + struct sk_buff *skb; + dma_addr_t pa; + struct ethhdr *eth_hdr; + struct vlan_ethhdr *vlan_hdr; + struct fcoe_hdr *fcoe_hdr; + struct fc_frame_header *fh; + u32 tot_len, eth_hdr_len; + int ret = 0; + unsigned long flags; + + fh = fc_frame_header_get(fp); + skb = fp_skb(fp); + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && + fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) + return 0; + + if (!fnic->vlan_hw_insert) { + eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); + vlan_hdr = skb_push(skb, eth_hdr_len); + eth_hdr = (struct ethhdr *)vlan_hdr; + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); + vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); + } else { + eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); + eth_hdr = skb_push(skb, eth_hdr_len); + eth_hdr->h_proto = htons(ETH_P_FCOE); + fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); + } + + if (fnic->ctlr.map_dest) + fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); + else + memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); + memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); + + tot_len = skb->len; + BUG_ON(tot_len % 4); + + memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); + fcoe_hdr->fcoe_sof = fr_sof(fp); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); + + pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, pa)) { + ret = -ENOMEM; + printk(KERN_ERR "DMA map failed with error %d\n", ret); + goto free_skb_on_err; + } + + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, + (char *)eth_hdr, tot_len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + + spin_lock_irqsave(&fnic->wq_lock[0], flags); + + if (!vnic_wq_desc_avail(wq)) { + dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE); + ret = -1; + goto irq_restore; + } + + fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), + 0 /* hw inserts cos value */, + fnic->vlan_id, 1, 1, 1); + +irq_restore: + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); + +free_skb_on_err: + if (ret) + dev_kfree_skb_any(fp_skb(fp)); + + return ret; +} + +/* + * fnic_send + * Routine to send a raw frame + */ +int fnic_send(struct fc_lport *lp, struct fc_frame *fp) +{ + struct fnic *fnic = lport_priv(lp); + unsigned long flags; + + if (fnic->in_remove) { + dev_kfree_skb(fp_skb(fp)); + return -1; + } + + /* + * Queue frame if in a transitional state. + * This occurs while registering the Port_ID / MAC address after FLOGI. + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return fnic_send_frame(fnic, fp); +} + +/** + * fnic_flush_tx() - send queued frames. + * @fnic: fnic device + * + * Send frames that were waiting to go out in FC or Ethernet mode. + * Whenever changing modes we purge queued frames, so these frames should + * be queued for the stable mode that we're in, either FC or Ethernet. + * + * Called without fnic_lock held. + */ +void fnic_flush_tx(struct fnic *fnic) +{ + struct sk_buff *skb; + struct fc_frame *fp; + + while ((skb = skb_dequeue(&fnic->tx_queue))) { + fp = (struct fc_frame *)skb; + fnic_send_frame(fnic, fp); + } +} + +/** + * fnic_set_eth_mode() - put fnic into ethernet mode. + * @fnic: fnic device + * + * Called without fnic lock held. + */ +static void fnic_set_eth_mode(struct fnic *fnic) +{ + unsigned long flags; + enum fnic_state old_state; + int ret; + + spin_lock_irqsave(&fnic->fnic_lock, flags); +again: + old_state = fnic->state; + switch (old_state) { + case FNIC_IN_FC_MODE: + case FNIC_IN_ETH_TRANS_FC_MODE: + default: + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + ret = fnic_fw_reset_handler(fnic); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) + goto again; + if (ret) + fnic->state = old_state; + break; + + case FNIC_IN_FC_TRANS_ETH_MODE: + case FNIC_IN_ETH_MODE: + break; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +static void fnic_wq_complete_frame_send(struct vnic_wq *wq, + struct cq_desc *cq_desc, + struct vnic_wq_buf *buf, void *opaque) +{ + struct sk_buff *skb = buf->os_buf; + struct fc_frame *fp = (struct fc_frame *)skb; + struct fnic *fnic = vnic_dev_priv(wq->vdev); + + dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); + dev_kfree_skb_irq(fp_skb(fp)); + buf->os_buf = NULL; +} + +static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev, + struct cq_desc *cq_desc, u8 type, + u16 q_number, u16 completed_index, + void *opaque) +{ + struct fnic *fnic = vnic_dev_priv(vdev); + unsigned long flags; + + spin_lock_irqsave(&fnic->wq_lock[q_number], flags); + vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, + fnic_wq_complete_frame_send, NULL); + spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); + + return 0; +} + +int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) +{ + unsigned int wq_work_done = 0; + unsigned int i; + + for (i = 0; i < fnic->raw_wq_count; i++) { + wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], + work_to_do, + fnic_wq_cmpl_handler_cont, + NULL); + } + + return wq_work_done; +} + + +void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct fc_frame *fp = buf->os_buf; + struct fnic *fnic = vnic_dev_priv(wq->vdev); + + dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); + + dev_kfree_skb(fp_skb(fp)); + buf->os_buf = NULL; +} + +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fcoe_vlan *next; + + /* + * indicate a link down to fcoe so that all fcf's are free'd + * might not be required since we did this before sending vlan + * discovery request + */ + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlans)) { + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); +} + +void fnic_handle_fip_timer(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u64 sol_time; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->ctlr.mode == FIP_MODE_NON_FIP) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + /* no vlans available, try again */ + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + if (printk_ratelimit()) + shost_printk(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "fip_timer: vlan %d state %d sol_count %d\n", + vlan->vid, vlan->state, vlan->sol_count); + switch (vlan->state) { + case FIP_VLAN_USED: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "FIP VLAN is selected for FC transaction\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + break; + case FIP_VLAN_FAILED: + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + /* if all vlans are in failed state, restart vlan disc */ + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + if (printk_ratelimit()) + shost_printk(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + break; + case FIP_VLAN_SENT: + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Dequeue this VLAN ID %d from list\n", + vlan->vid); + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlans)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "fip_timer: vlan list empty, " + "trigger vlan disc\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + /* check the next vlan */ + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + vlan->sol_count++; + sol_time = jiffies + msecs_to_jiffies + (FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + break; + } +} diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h new file mode 100644 index 000000000..79f530297 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fip.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _FNIC_FIP_H_ +#define _FNIC_FIP_H_ + + +#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ +#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ +#define FCOE_CTLR_MAX_SOL 8 + +#define FINC_MAX_FLOGI_REJECTS 8 + +struct vlan { + __be16 vid; + __be16 type; +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + u16 vid; /* vlan ID */ + u16 sol_count; /* no. of sols sent */ + u16 state; /* state */ +}; + +enum fip_vlan_state { + FIP_VLAN_AVAIL = 0, /* don't do anything */ + FIP_VLAN_SENT = 1, /* sent */ + FIP_VLAN_USED = 2, /* succeed */ + FIP_VLAN_FAILED = 3, /* failed to response */ +}; + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h new file mode 100644 index 000000000..5895ead20 --- /dev/null +++ b/drivers/scsi/fnic/fnic_io.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _FNIC_IO_H_ +#define _FNIC_IO_H_ + +#include + +#define FNIC_DFLT_SG_DESC_CNT 32 +#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */ +#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */ + +struct host_sg_desc { + __le64 addr; + __le32 len; + u32 _resvd; +}; + +struct fnic_dflt_sgl_list { + struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT]; +}; + +struct fnic_sgl_list { + struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT]; +}; + +enum fnic_sgl_list_type { + FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */ + FNIC_SGL_CACHE_MAX, /* cache with max size sgl */ + FNIC_SGL_NUM_CACHES /* number of sgl caches */ +}; + +enum fnic_ioreq_state { + FNIC_IOREQ_NOT_INITED = 0, + FNIC_IOREQ_CMD_PENDING, + FNIC_IOREQ_ABTS_PENDING, + FNIC_IOREQ_ABTS_COMPLETE, + FNIC_IOREQ_CMD_COMPLETE, +}; + +struct fnic_io_req { + struct host_sg_desc *sgl_list; /* sgl list */ + void *sgl_list_alloc; /* sgl list address used for free */ + dma_addr_t sense_buf_pa; /* dma address for sense buffer*/ + dma_addr_t sgl_list_pa; /* dma address for sgl list */ + u16 sgl_cnt; + u8 sgl_type; /* device DMA descriptor list type */ + u8 io_completed:1; /* set to 1 when fw completes IO */ + u32 port_id; /* remote port DID */ + unsigned long start_time; /* in jiffies */ + struct completion *abts_done; /* completion for abts */ + struct completion *dr_done; /* completion for device reset */ + unsigned int tag; + struct scsi_cmnd *sc; /* midlayer's cmd pointer */ +}; + +enum fnic_port_speeds { + DCEM_PORTSPEED_NONE = 0, + DCEM_PORTSPEED_1G = 1000, + DCEM_PORTSPEED_10G = 10000, + DCEM_PORTSPEED_20G = 20000, + DCEM_PORTSPEED_25G = 25000, + DCEM_PORTSPEED_40G = 40000, + DCEM_PORTSPEED_4x10G = 41000, + DCEM_PORTSPEED_100G = 100000, +}; +#endif /* _FNIC_IO_H_ */ diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c new file mode 100644 index 000000000..8896758fe --- /dev/null +++ b/drivers/scsi/fnic/fnic_isr.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "fnic_io.h" +#include "fnic.h" + +static irqreturn_t fnic_isr_legacy(int irq, void *data) +{ + struct fnic *fnic = data; + u32 pba; + unsigned long work_done = 0; + + pba = vnic_intr_legacy_pba(fnic->legacy_pba); + if (!pba) + return IRQ_NONE; + + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + + if (pba & (1 << FNIC_INTX_NOTIFY)) { + vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); + fnic_handle_link_event(fnic); + } + + if (pba & (1 << FNIC_INTX_ERR)) { + vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]); + fnic_log_q_error(fnic); + } + + if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { + work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions); + work_done += fnic_wq_cmpl_handler(fnic, -1); + work_done += fnic_rq_cmpl_handler(fnic, -1); + + vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], + work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + } + + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msi(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long work_done = 0; + + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + + work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions); + work_done += fnic_wq_cmpl_handler(fnic, -1); + work_done += fnic_rq_cmpl_handler(fnic, -1); + + vnic_intr_return_credits(&fnic->intr[0], + work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_rq(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long rq_work_done = 0; + + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + + rq_work_done = fnic_rq_cmpl_handler(fnic, -1); + vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], + rq_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_wq(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long wq_work_done = 0; + + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + + wq_work_done = fnic_wq_cmpl_handler(fnic, -1); + vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], + wq_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long wq_copy_work_done = 0; + + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + + wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions); + vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], + wq_copy_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) +{ + struct fnic *fnic = data; + + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + + vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); + fnic_log_q_error(fnic); + fnic_handle_link_event(fnic); + + return IRQ_HANDLED; +} + +void fnic_free_intr(struct fnic *fnic) +{ + int i; + + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSI: + free_irq(pci_irq_vector(fnic->pdev, 0), fnic); + break; + + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) + if (fnic->msix[i].requested) + free_irq(pci_irq_vector(fnic->pdev, i), + fnic->msix[i].devid); + break; + + default: + break; + } +} + +int fnic_request_intr(struct fnic *fnic) +{ + int err = 0; + int i; + + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + + case VNIC_DEV_INTR_MODE_INTX: + err = request_irq(pci_irq_vector(fnic->pdev, 0), + &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic); + break; + + case VNIC_DEV_INTR_MODE_MSI: + err = request_irq(pci_irq_vector(fnic->pdev, 0), &fnic_isr_msi, + 0, fnic->name, fnic); + break; + + case VNIC_DEV_INTR_MODE_MSIX: + + sprintf(fnic->msix[FNIC_MSIX_RQ].devname, + "%.11s-fcs-rq", fnic->name); + fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; + fnic->msix[FNIC_MSIX_RQ].devid = fnic; + + sprintf(fnic->msix[FNIC_MSIX_WQ].devname, + "%.11s-fcs-wq", fnic->name); + fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; + fnic->msix[FNIC_MSIX_WQ].devid = fnic; + + sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, + "%.11s-scsi-wq", fnic->name); + fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; + fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; + + sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, + "%.11s-err-notify", fnic->name); + fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = + fnic_isr_msix_err_notify; + fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; + + for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { + err = request_irq(pci_irq_vector(fnic->pdev, i), + fnic->msix[i].isr, 0, + fnic->msix[i].devname, + fnic->msix[i].devid); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "MSIX: request_irq" + " failed %d\n", err); + fnic_free_intr(fnic); + break; + } + fnic->msix[i].requested = 1; + } + break; + + default: + break; + } + + return err; +} + +int fnic_set_intr_mode(struct fnic *fnic) +{ + unsigned int n = ARRAY_SIZE(fnic->rq); + unsigned int m = ARRAY_SIZE(fnic->wq); + unsigned int o = ARRAY_SIZE(fnic->wq_copy); + + /* + * Set interrupt mode (INTx, MSI, MSI-X) depending + * system capabilities. + * + * Try MSI-X first + * + * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs + * (last INTR is used for WQ/RQ errors and notification area) + */ + if (fnic->rq_count >= n && + fnic->raw_wq_count >= m && + fnic->wq_copy_count >= o && + fnic->cq_count >= n + m + o) { + int vecs = n + m + o + 1; + + if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs, + PCI_IRQ_MSIX) == vecs) { + fnic->rq_count = n; + fnic->raw_wq_count = m; + fnic->wq_copy_count = o; + fnic->wq_count = m + o; + fnic->cq_count = n + m + o; + fnic->intr_count = vecs; + fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; + + FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + "Using MSI-X Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, + VNIC_DEV_INTR_MODE_MSIX); + return 0; + } + } + + /* + * Next try MSI + * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR + */ + if (fnic->rq_count >= 1 && + fnic->raw_wq_count >= 1 && + fnic->wq_copy_count >= 1 && + fnic->cq_count >= 3 && + fnic->intr_count >= 1 && + pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) == 1) { + fnic->rq_count = 1; + fnic->raw_wq_count = 1; + fnic->wq_copy_count = 1; + fnic->wq_count = 2; + fnic->cq_count = 3; + fnic->intr_count = 1; + fnic->err_intr_offset = 0; + + FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + "Using MSI Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); + + return 0; + } + + /* + * Next try INTx + * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs + * 1 INTR is used for all 3 queues, 1 INTR for queue errors + * 1 INTR for notification area + */ + + if (fnic->rq_count >= 1 && + fnic->raw_wq_count >= 1 && + fnic->wq_copy_count >= 1 && + fnic->cq_count >= 3 && + fnic->intr_count >= 3) { + + fnic->rq_count = 1; + fnic->raw_wq_count = 1; + fnic->wq_copy_count = 1; + fnic->cq_count = 3; + fnic->intr_count = 3; + + FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + "Using Legacy Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); + + return 0; + } + + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); + + return -EINVAL; +} + +void fnic_clear_intr_mode(struct fnic *fnic) +{ + pci_free_irq_vectors(fnic->pdev); + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); +} diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c new file mode 100644 index 000000000..f27f9319e --- /dev/null +++ b/drivers/scsi/fnic/fnic_main.c @@ -0,0 +1,1174 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "fnic_io.h" +#include "fnic_fip.h" +#include "fnic.h" + +#define PCI_DEVICE_ID_CISCO_FNIC 0x0045 + +/* Timer to poll notification area for events. Used for MSI interrupts */ +#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ) + +static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; +static struct kmem_cache *fnic_io_req_cache; +static LIST_HEAD(fnic_list); +static DEFINE_SPINLOCK(fnic_list_lock); + +/* Supported devices by fnic module */ +static struct pci_device_id fnic_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) }, + { 0, } +}; + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Abhijeet Joglekar , " + "Joseph R. Eykholt "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, fnic_id_table); + +unsigned int fnic_log_level; +module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); + + +unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS; +module_param(io_completions, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time"); + +unsigned int fnic_trace_max_pages = 16; +module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " + "for fnic trace buffer"); + +unsigned int fnic_fc_trace_max_pages = 64; +module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_fc_trace_max_pages, + "Total allocated memory pages for fc trace buffer"); + +static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; +module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); + +static struct libfc_function_template fnic_transport_template = { + .frame_send = fnic_send, + .lport_set_port_id = fnic_set_port_id, + .fcp_abort_io = fnic_empty_scsi_cleanup, + .fcp_cleanup = fnic_empty_scsi_cleanup, + .exch_mgr_reset = fnic_exch_mgr_reset +}; + +static int fnic_slave_alloc(struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + scsi_change_queue_depth(sdev, fnic_max_qdepth); + return 0; +} + +static const struct scsi_host_template fnic_host_template = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = fnic_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = fnic_abort_cmd, + .eh_device_reset_handler = fnic_device_reset, + .eh_host_reset_handler = fnic_host_reset, + .slave_alloc = fnic_slave_alloc, + .change_queue_depth = scsi_change_queue_depth, + .this_id = -1, + .cmd_per_lun = 3, + .can_queue = FNIC_DFLT_IO_REQ, + .sg_tablesize = FNIC_MAX_SG_DESC_CNT, + .max_sectors = 0xffff, + .shost_groups = fnic_host_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct fnic_cmd_priv), +}; + +static void +fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} + +static void fnic_get_host_speed(struct Scsi_Host *shost); +static struct scsi_transport_template *fnic_fc_transport; +static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); +static void fnic_reset_host_stats(struct Scsi_Host *); + +static struct fc_function_template fnic_fc_functions = { + + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fnic_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .show_rport_dev_loss_tmo = 1, + .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, + .issue_fc_host_lip = fnic_reset, + .get_fc_host_stats = fnic_get_stats, + .reset_fc_host_stats = fnic_reset_host_stats, + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .terminate_rport_io = fnic_terminate_rport_io, + .bsg_request = fc_lport_bsg_request, +}; + +static void fnic_get_host_speed(struct Scsi_Host *shost) +{ + struct fc_lport *lp = shost_priv(shost); + struct fnic *fnic = lport_priv(lp); + u32 port_speed = vnic_dev_port_speed(fnic->vdev); + + /* Add in other values as they get defined in fw */ + switch (port_speed) { + case DCEM_PORTSPEED_10G: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case DCEM_PORTSPEED_20G: + fc_host_speed(shost) = FC_PORTSPEED_20GBIT; + break; + case DCEM_PORTSPEED_25G: + fc_host_speed(shost) = FC_PORTSPEED_25GBIT; + break; + case DCEM_PORTSPEED_40G: + case DCEM_PORTSPEED_4x10G: + fc_host_speed(shost) = FC_PORTSPEED_40GBIT; + break; + case DCEM_PORTSPEED_100G: + fc_host_speed(shost) = FC_PORTSPEED_100GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } +} + +static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) +{ + int ret; + struct fc_lport *lp = shost_priv(host); + struct fnic *fnic = lport_priv(lp); + struct fc_host_statistics *stats = &lp->host_stats; + struct vnic_stats *vs; + unsigned long flags; + + if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) + return stats; + fnic->stats_time = jiffies; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + "fnic: Get vnic stats failed" + " 0x%x", ret); + return stats; + } + vs = fnic->stats; + stats->tx_frames = vs->tx.tx_unicast_frames_ok; + stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; + stats->rx_frames = vs->rx.rx_unicast_frames_ok; + stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; + stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; + stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; + stats->invalid_crc_count = vs->rx.rx_crc_errors; + stats->seconds_since_last_reset = + (jiffies - fnic->stats_reset_time) / HZ; + stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); + stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); + + return stats; +} + +/* + * fnic_dump_fchost_stats + * note : dumps fc_statistics into system logs + */ +void fnic_dump_fchost_stats(struct Scsi_Host *host, + struct fc_host_statistics *stats) +{ + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: seconds since last reset = %llu\n", + stats->seconds_since_last_reset); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: tx frames = %llu\n", + stats->tx_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: tx words = %llu\n", + stats->tx_words); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: rx frames = %llu\n", + stats->rx_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: rx words = %llu\n", + stats->rx_words); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: lip count = %llu\n", + stats->lip_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: nos count = %llu\n", + stats->nos_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: error frames = %llu\n", + stats->error_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: dumped frames = %llu\n", + stats->dumped_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: link failure count = %llu\n", + stats->link_failure_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: loss of sync count = %llu\n", + stats->loss_of_sync_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: loss of signal count = %llu\n", + stats->loss_of_signal_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: prim seq protocol err count = %llu\n", + stats->prim_seq_protocol_err_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: invalid tx word count= %llu\n", + stats->invalid_tx_word_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: invalid crc count = %llu\n", + stats->invalid_crc_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp input requests = %llu\n", + stats->fcp_input_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp output requests = %llu\n", + stats->fcp_output_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp control requests = %llu\n", + stats->fcp_control_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp input megabytes = %llu\n", + stats->fcp_input_megabytes); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp output megabytes = %llu\n", + stats->fcp_output_megabytes); + return; +} + +/* + * fnic_reset_host_stats : clears host stats + * note : called when reset_statistics set under sysfs dir + */ +static void fnic_reset_host_stats(struct Scsi_Host *host) +{ + int ret; + struct fc_lport *lp = shost_priv(host); + struct fnic *fnic = lport_priv(lp); + struct fc_host_statistics *stats; + unsigned long flags; + + /* dump current stats, before clearing them */ + stats = fnic_get_stats(host); + fnic_dump_fchost_stats(host, stats); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + ret = vnic_dev_stats_clear(fnic->vdev); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + "fnic: Reset vnic stats failed" + " 0x%x", ret); + return; + } + fnic->stats_reset_time = jiffies; + memset(stats, 0, sizeof(*stats)); + + return; +} + +void fnic_log_q_error(struct fnic *fnic) +{ + unsigned int i; + u32 error_status; + + for (i = 0; i < fnic->raw_wq_count; i++) { + error_status = ioread32(&fnic->wq[i].ctrl->error_status); + if (error_status) + shost_printk(KERN_ERR, fnic->lport->host, + "WQ[%d] error_status" + " %d\n", i, error_status); + } + + for (i = 0; i < fnic->rq_count; i++) { + error_status = ioread32(&fnic->rq[i].ctrl->error_status); + if (error_status) + shost_printk(KERN_ERR, fnic->lport->host, + "RQ[%d] error_status" + " %d\n", i, error_status); + } + + for (i = 0; i < fnic->wq_copy_count; i++) { + error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status); + if (error_status) + shost_printk(KERN_ERR, fnic->lport->host, + "CWQ[%d] error_status" + " %d\n", i, error_status); + } +} + +void fnic_handle_link_event(struct fnic *fnic) +{ + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + queue_work(fnic_event_queue, &fnic->link_work); + +} + +static int fnic_notify_set(struct fnic *fnic) +{ + int err; + + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY); + break; + case VNIC_DEV_INTR_MODE_MSI: + err = vnic_dev_notify_set(fnic->vdev, -1); + break; + case VNIC_DEV_INTR_MODE_MSIX: + err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); + break; + default: + shost_printk(KERN_ERR, fnic->lport->host, + "Interrupt mode should be set up" + " before devcmd notify set %d\n", + vnic_dev_get_intr_mode(fnic->vdev)); + err = -1; + break; + } + + return err; +} + +static void fnic_notify_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, notify_timer); + + fnic_handle_link_event(fnic); + mod_timer(&fnic->notify_timer, + round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); +} + +static void fnic_fip_notify_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, fip_timer); + + fnic_handle_fip_timer(fnic); +} + +static void fnic_notify_timer_start(struct fnic *fnic) +{ + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + case VNIC_DEV_INTR_MODE_MSI: + /* + * Schedule first timeout immediately. The driver is + * initiatialized and ready to look for link up notification + */ + mod_timer(&fnic->notify_timer, jiffies); + break; + default: + /* Using intr for notification for INTx/MSI-X */ + break; + } +} + +static int fnic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + unsigned long time; + int done; + int err; + int count; + + count = 0; + + err = start(vdev, arg); + if (err) + return err; + + /* Wait for func to complete. + * Sometime schedule_timeout_uninterruptible take long time + * to wake up so we do not retry as we are only waiting for + * 2 seconds in while loop. By adding count, we make sure + * we try atleast three times before returning -ETIMEDOUT + */ + time = jiffies + (HZ * 2); + do { + err = finished(vdev, &done); + count++; + if (err) + return err; + if (done) + return 0; + schedule_timeout_uninterruptible(HZ / 10); + } while (time_after(time, jiffies) || (count < 3)); + + return -ETIMEDOUT; +} + +static int fnic_cleanup(struct fnic *fnic) +{ + unsigned int i; + int err; + + vnic_dev_disable(fnic->vdev); + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_mask(&fnic->intr[i]); + + for (i = 0; i < fnic->rq_count; i++) { + err = vnic_rq_disable(&fnic->rq[i]); + if (err) + return err; + } + for (i = 0; i < fnic->raw_wq_count; i++) { + err = vnic_wq_disable(&fnic->wq[i]); + if (err) + return err; + } + for (i = 0; i < fnic->wq_copy_count; i++) { + err = vnic_wq_copy_disable(&fnic->wq_copy[i]); + if (err) + return err; + } + + /* Clean up completed IOs and FCS frames */ + fnic_wq_copy_cmpl_handler(fnic, io_completions); + fnic_wq_cmpl_handler(fnic, -1); + fnic_rq_cmpl_handler(fnic, -1); + + /* Clean up the IOs and FCS frames that have not completed */ + for (i = 0; i < fnic->raw_wq_count; i++) + vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf); + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + for (i = 0; i < fnic->wq_copy_count; i++) + vnic_wq_copy_clean(&fnic->wq_copy[i], + fnic_wq_copy_cleanup_handler); + + for (i = 0; i < fnic->cq_count; i++) + vnic_cq_clean(&fnic->cq[i]); + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_clean(&fnic->intr[i]); + + mempool_destroy(fnic->io_req_pool); + for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) + mempool_destroy(fnic->io_sgl_pool[i]); + + return 0; +} + +static void fnic_iounmap(struct fnic *fnic) +{ + if (fnic->bar0.vaddr) + iounmap(fnic->bar0.vaddr); +} + +/** + * fnic_get_mac() - get assigned data MAC address for FIP code. + * @lport: local port. + */ +static u8 *fnic_get_mac(struct fc_lport *lport) +{ + struct fnic *fnic = lport_priv(lport); + + return fnic->data_src_addr; +} + +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +{ + vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} + +static int fnic_scsi_drv_init(struct fnic *fnic) +{ + struct Scsi_Host *host = fnic->lport->host; + + /* Configure maximum outstanding IO reqs*/ + if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) + host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, + max_t(u32, FNIC_MIN_IO_REQ, + fnic->config.io_throttle_count)); + + fnic->fnic_max_tag_id = host->can_queue; + host->max_lun = fnic->config.luns_per_tgt; + host->max_id = FNIC_MAX_FCP_TARGET; + host->max_cmd_len = FCOE_MAX_CMD_LEN; + + host->nr_hw_queues = fnic->wq_copy_count; + if (host->nr_hw_queues > 1) + shost_printk(KERN_ERR, host, + "fnic: blk-mq is not supported"); + + host->nr_hw_queues = fnic->wq_copy_count = 1; + + shost_printk(KERN_INFO, host, + "fnic: can_queue: %d max_lun: %llu", + host->can_queue, host->max_lun); + + shost_printk(KERN_INFO, host, + "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + host->max_id, host->max_cmd_len, host->nr_hw_queues); + + return 0; +} + +static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct Scsi_Host *host; + struct fc_lport *lp; + struct fnic *fnic; + mempool_t *pool; + int err; + int i; + unsigned long flags; + + /* + * Allocate SCSI Host and set up association between host, + * local port, and fnic + */ + lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); + if (!lp) { + printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); + err = -ENOMEM; + goto err_out; + } + host = lp->host; + fnic = lport_priv(lp); + fnic->lport = lp; + fnic->ctlr.lp = lp; + + fnic->link_events = 0; + + snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, + host->host_no); + + host->transportt = fnic_fc_transport; + + fnic_stats_debugfs_init(fnic); + + /* Setup PCI resources */ + pci_set_drvdata(pdev, fnic); + + fnic->pdev = pdev; + + err = pci_enable_device(pdev); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Cannot enable PCI device, aborting.\n"); + goto err_out_free_hba; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Cannot enable PCI resources, aborting\n"); + goto err_out_disable_device; + } + + pci_set_master(pdev); + + /* Query PCI controller on system for DMA addressing + * limitation for the device. Try 47-bit first, and + * fail to 32-bit. Cisco VIC supports 47 bits only. + */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "No usable DMA configuration " + "aborting\n"); + goto err_out_release_regions; + } + } + + /* Map vNIC resources from BAR0 */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + shost_printk(KERN_ERR, fnic->lport->host, + "BAR0 not memory-map'able, aborting.\n"); + err = -ENODEV; + goto err_out_release_regions; + } + + fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); + fnic->bar0.bus_addr = pci_resource_start(pdev, 0); + fnic->bar0.len = pci_resource_len(pdev, 0); + + if (!fnic->bar0.vaddr) { + shost_printk(KERN_ERR, fnic->lport->host, + "Cannot memory-map BAR0 res hdr, " + "aborting.\n"); + err = -ENODEV; + goto err_out_release_regions; + } + + fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); + if (!fnic->vdev) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC registration failed, " + "aborting.\n"); + err = -ENODEV; + goto err_out_iounmap; + } + + err = vnic_dev_cmd_init(fnic->vdev); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vnic_dev_cmd_init() returns %d, aborting\n", + err); + goto err_out_vnic_unregister; + } + + err = fnic_dev_wait(fnic->vdev, vnic_dev_open, + vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC dev open failed, aborting.\n"); + goto err_out_dev_cmd_deinit; + } + + err = vnic_dev_init(fnic->vdev, 0); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC dev init failed, aborting.\n"); + goto err_out_dev_close; + } + + err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC get MAC addr failed \n"); + goto err_out_dev_close; + } + /* set data_src for point-to-point mode and to keep it non-zero */ + memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); + + /* Get vNIC configuration */ + err = fnic_get_vnic_config(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Get vNIC configuration failed, " + "aborting.\n"); + goto err_out_dev_close; + } + + fnic_scsi_drv_init(fnic); + + fnic_get_res_counts(fnic); + + err = fnic_set_intr_mode(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to set intr mode, " + "aborting.\n"); + goto err_out_dev_close; + } + + err = fnic_alloc_vnic_resources(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to alloc vNIC resources, " + "aborting.\n"); + goto err_out_clear_intr; + } + + + /* initialize all fnic locks */ + spin_lock_init(&fnic->fnic_lock); + + for (i = 0; i < FNIC_WQ_MAX; i++) + spin_lock_init(&fnic->wq_lock[i]); + + for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { + spin_lock_init(&fnic->wq_copy_lock[i]); + fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; + fnic->fw_ack_recd[i] = 0; + fnic->fw_ack_index[i] = -1; + } + + for (i = 0; i < FNIC_IO_LOCKS; i++) + spin_lock_init(&fnic->io_req_lock[i]); + + spin_lock_init(&fnic->sgreset_lock); + + err = -ENOMEM; + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) + goto err_out_free_resources; + + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + if (!pool) + goto err_out_free_ioreq_pool; + fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; + + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + if (!pool) + goto err_out_free_dflt_pool; + fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; + + /* setup vlan config, hw inserts vlan header */ + fnic->vlan_hw_insert = 1; + fnic->vlan_id = 0; + + /* Initialize the FIP fcoe_ctrl struct */ + fnic->ctlr.send = fnic_eth_send; + fnic->ctlr.update_mac = fnic_update_mac; + fnic->ctlr.get_src_addr = fnic_get_mac; + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + shost_printk(KERN_INFO, fnic->lport->host, + "firmware supports FIP\n"); + /* enable directed and multicast */ + vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); + vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); + vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + fnic->set_vlan = fnic_set_vlan; + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); + timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0); + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); + INIT_WORK(&fnic->event_work, fnic_handle_event); + skb_queue_head_init(&fnic->fip_frame_queue); + INIT_LIST_HEAD(&fnic->evlist); + INIT_LIST_HEAD(&fnic->vlans); + } else { + shost_printk(KERN_INFO, fnic->lport->host, + "firmware uses non-FIP mode\n"); + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); + fnic->ctlr.state = FIP_ST_NON_FIP; + } + fnic->state = FNIC_IN_FC_MODE; + + atomic_set(&fnic->in_flight, 0); + fnic->state_flags = FNIC_FLAGS_NONE; + + /* Enable hardware stripping of vlan header on ingress */ + fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); + + /* Setup notification buffer area */ + err = fnic_notify_set(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to alloc notify buffer, aborting.\n"); + goto err_out_free_max_pool; + } + + /* Setup notify timer when using MSI interrupts */ + if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) + timer_setup(&fnic->notify_timer, fnic_notify_timer, 0); + + /* allocate RQ buffers and post them to RQ*/ + for (i = 0; i < fnic->rq_count; i++) { + vnic_rq_enable(&fnic->rq[i]); + err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "fnic_alloc_rq_frame can't alloc " + "frame\n"); + goto err_out_free_rq_buf; + } + } + + /* + * Initialization done with PCI system, hardware, firmware. + * Add host to SCSI + */ + err = scsi_add_host(lp->host, &pdev->dev); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "fnic: scsi_add_host failed...exiting\n"); + goto err_out_free_rq_buf; + } + + /* Start local port initiatialization */ + + lp->link_up = 0; + + lp->max_retry_count = fnic->config.flogi_retries; + lp->max_rport_retry_count = fnic->config.plogi_retries; + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_CONF_COMPL); + if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + lp->service_params |= FCP_SPPF_RETRY; + + lp->boot_time = jiffies; + lp->e_d_tov = fnic->config.ed_tov; + lp->r_a_tov = fnic->config.ra_tov; + lp->link_supported_speeds = FC_PORTSPEED_10GBIT; + fc_set_wwnn(lp, fnic->config.node_wwn); + fc_set_wwpn(lp, fnic->config.port_wwn); + + fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); + + if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, + FCPIO_HOST_EXCH_RANGE_END, NULL)) { + err = -ENOMEM; + goto err_out_remove_scsi_host; + } + + fc_lport_init_stats(lp); + fnic->stats_reset_time = jiffies; + + fc_lport_config(lp); + + if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + + sizeof(struct fc_frame_header))) { + err = -EINVAL; + goto err_out_free_exch_mgr; + } + fc_host_maxframe_size(lp->host) = lp->mfs; + fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; + + sprintf(fc_host_symbolic_name(lp->host), + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + + spin_lock_irqsave(&fnic_list_lock, flags); + list_add_tail(&fnic->list, &fnic_list); + spin_unlock_irqrestore(&fnic_list_lock, flags); + + INIT_WORK(&fnic->link_work, fnic_handle_link); + INIT_WORK(&fnic->frame_work, fnic_handle_frame); + skb_queue_head_init(&fnic->frame_queue); + skb_queue_head_init(&fnic->tx_queue); + + /* Enable all queues */ + for (i = 0; i < fnic->raw_wq_count; i++) + vnic_wq_enable(&fnic->wq[i]); + for (i = 0; i < fnic->wq_copy_count; i++) + vnic_wq_copy_enable(&fnic->wq_copy[i]); + + fc_fabric_login(lp); + + err = fnic_request_intr(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to request irq.\n"); + goto err_out_free_exch_mgr; + } + + vnic_dev_enable(fnic->vdev); + + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_unmask(&fnic->intr[i]); + + fnic_notify_timer_start(fnic); + + return 0; + +err_out_free_exch_mgr: + fc_exch_mgr_free(lp); +err_out_remove_scsi_host: + fc_remove_host(lp->host); + scsi_remove_host(lp->host); +err_out_free_rq_buf: + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + vnic_dev_notify_unset(fnic->vdev); +err_out_free_max_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); +err_out_free_dflt_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); +err_out_free_ioreq_pool: + mempool_destroy(fnic->io_req_pool); +err_out_free_resources: + fnic_free_vnic_resources(fnic); +err_out_clear_intr: + fnic_clear_intr_mode(fnic); +err_out_dev_close: + vnic_dev_close(fnic->vdev); +err_out_dev_cmd_deinit: +err_out_vnic_unregister: + vnic_dev_unregister(fnic->vdev); +err_out_iounmap: + fnic_iounmap(fnic); +err_out_release_regions: + pci_release_regions(pdev); +err_out_disable_device: + pci_disable_device(pdev); +err_out_free_hba: + fnic_stats_debugfs_remove(fnic); + scsi_host_put(lp->host); +err_out: + return err; +} + +static void fnic_remove(struct pci_dev *pdev) +{ + struct fnic *fnic = pci_get_drvdata(pdev); + struct fc_lport *lp = fnic->lport; + unsigned long flags; + + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events to the local port. ISR and + * other threads that can queue work items will also stop + * creating work items on the fnic workqueue + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->stop_rx_link_events = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) + del_timer_sync(&fnic->notify_timer); + + /* + * Flush the fnic event queue. After this call, there should + * be no event queued for this fnic device in the workqueue + */ + flush_workqueue(fnic_event_queue); + skb_queue_purge(&fnic->frame_queue); + skb_queue_purge(&fnic->tx_queue); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + del_timer_sync(&fnic->fip_timer); + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + fnic_fcoe_evlist_free(fnic); + } + + /* + * Log off the fabric. This stops all remote ports, dns port, + * logs off the fabric. This flushes all rport, disc, lport work + * before returning + */ + fc_fabric_logoff(fnic->lport); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->in_remove = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fcoe_ctlr_destroy(&fnic->ctlr); + fc_lport_destroy(lp); + fnic_stats_debugfs_remove(fnic); + + /* + * This stops the fnic device, masks all interrupts. Completed + * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are + * cleaned up + */ + fnic_cleanup(fnic); + + BUG_ON(!skb_queue_empty(&fnic->frame_queue)); + BUG_ON(!skb_queue_empty(&fnic->tx_queue)); + + spin_lock_irqsave(&fnic_list_lock, flags); + list_del(&fnic->list); + spin_unlock_irqrestore(&fnic_list_lock, flags); + + fc_remove_host(fnic->lport->host); + scsi_remove_host(fnic->lport->host); + fc_exch_mgr_free(fnic->lport); + vnic_dev_notify_unset(fnic->vdev); + fnic_free_intr(fnic); + fnic_free_vnic_resources(fnic); + fnic_clear_intr_mode(fnic); + vnic_dev_close(fnic->vdev); + vnic_dev_unregister(fnic->vdev); + fnic_iounmap(fnic); + pci_release_regions(pdev); + pci_disable_device(pdev); + scsi_host_put(lp->host); +} + +static struct pci_driver fnic_driver = { + .name = DRV_NAME, + .id_table = fnic_id_table, + .probe = fnic_probe, + .remove = fnic_remove, +}; + +static int __init fnic_init_module(void) +{ + size_t len; + int err = 0; + + printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); + + /* Create debugfs entries for fnic */ + err = fnic_debugfs_init(); + if (err < 0) { + printk(KERN_ERR PFX "Failed to create fnic directory " + "for tracing and stats logging\n"); + fnic_debugfs_terminate(); + } + + /* Allocate memory for trace buffer */ + err = fnic_trace_buf_init(); + if (err < 0) { + printk(KERN_ERR PFX + "Trace buffer initialization Failed. " + "Fnic Tracing utility is disabled\n"); + fnic_trace_free(); + } + + /* Allocate memory for fc trace buffer */ + err = fnic_fc_trace_init(); + if (err < 0) { + printk(KERN_ERR PFX "FC trace buffer initialization Failed " + "FC frame tracing utility is disabled\n"); + fnic_fc_trace_free(); + } + + /* Create a cache for allocation of default size sgls */ + len = sizeof(struct fnic_dflt_sgl_list); + fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create + ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, + NULL); + if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { + printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); + err = -ENOMEM; + goto err_create_fnic_sgl_slab_dflt; + } + + /* Create a cache for allocation of max size sgls*/ + len = sizeof(struct fnic_sgl_list); + fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create + ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, + NULL); + if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { + printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); + err = -ENOMEM; + goto err_create_fnic_sgl_slab_max; + } + + /* Create a cache of io_req structs for use via mempool */ + fnic_io_req_cache = kmem_cache_create("fnic_io_req", + sizeof(struct fnic_io_req), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fnic_io_req_cache) { + printk(KERN_ERR PFX "failed to create fnic io_req slab\n"); + err = -ENOMEM; + goto err_create_fnic_ioreq_slab; + } + + fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); + if (!fnic_event_queue) { + printk(KERN_ERR PFX "fnic work queue create failed\n"); + err = -ENOMEM; + goto err_create_fnic_workq; + } + + fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); + err = -ENOMEM; + goto err_create_fip_workq; + } + + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); + if (!fnic_fc_transport) { + printk(KERN_ERR PFX "fc_attach_transport error\n"); + err = -ENOMEM; + goto err_fc_transport; + } + + /* register the driver with PCI system */ + err = pci_register_driver(&fnic_driver); + if (err < 0) { + printk(KERN_ERR PFX "pci register error\n"); + goto err_pci_register; + } + return err; + +err_pci_register: + fc_release_transport(fnic_fc_transport); +err_fc_transport: + destroy_workqueue(fnic_fip_queue); +err_create_fip_workq: + destroy_workqueue(fnic_event_queue); +err_create_fnic_workq: + kmem_cache_destroy(fnic_io_req_cache); +err_create_fnic_ioreq_slab: + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); +err_create_fnic_sgl_slab_max: + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); +err_create_fnic_sgl_slab_dflt: + fnic_trace_free(); + fnic_fc_trace_free(); + fnic_debugfs_terminate(); + return err; +} + +static void __exit fnic_cleanup_module(void) +{ + pci_unregister_driver(&fnic_driver); + destroy_workqueue(fnic_event_queue); + if (fnic_fip_queue) + destroy_workqueue(fnic_fip_queue); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + kmem_cache_destroy(fnic_io_req_cache); + fc_release_transport(fnic_fc_transport); + fnic_trace_free(); + fnic_fc_trace_free(); + fnic_debugfs_terminate(); +} + +module_init(fnic_init_module); +module_exit(fnic_cleanup_module); diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c new file mode 100644 index 000000000..a1c9cfcac --- /dev/null +++ b/drivers/scsi/fnic/fnic_res.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include +#include +#include +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "fnic.h" + +int fnic_get_vnic_config(struct fnic *fnic) +{ + struct vnic_fc_config *c = &fnic->config; + int err; + +#define GET_CONFIG(m) \ + do { \ + err = vnic_dev_spec(fnic->vdev, \ + offsetof(struct vnic_fc_config, m), \ + sizeof(c->m), &c->m); \ + if (err) { \ + shost_printk(KERN_ERR, fnic->lport->host, \ + "Error getting %s, %d\n", #m, \ + err); \ + return err; \ + } \ + } while (0); + + GET_CONFIG(node_wwn); + GET_CONFIG(port_wwn); + GET_CONFIG(wq_enet_desc_count); + GET_CONFIG(wq_copy_desc_count); + GET_CONFIG(rq_desc_count); + GET_CONFIG(maxdatafieldsize); + GET_CONFIG(ed_tov); + GET_CONFIG(ra_tov); + GET_CONFIG(intr_timer); + GET_CONFIG(intr_timer_type); + GET_CONFIG(flags); + GET_CONFIG(flogi_retries); + GET_CONFIG(flogi_timeout); + GET_CONFIG(plogi_retries); + GET_CONFIG(plogi_timeout); + GET_CONFIG(io_throttle_count); + GET_CONFIG(link_down_timeout); + GET_CONFIG(port_down_timeout); + GET_CONFIG(port_down_io_retries); + GET_CONFIG(luns_per_tgt); + + c->wq_enet_desc_count = + min_t(u32, VNIC_FNIC_WQ_DESCS_MAX, + max_t(u32, VNIC_FNIC_WQ_DESCS_MIN, + c->wq_enet_desc_count)); + c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16); + + c->wq_copy_desc_count = + min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX, + max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN, + c->wq_copy_desc_count)); + c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16); + + c->rq_desc_count = + min_t(u32, VNIC_FNIC_RQ_DESCS_MAX, + max_t(u32, VNIC_FNIC_RQ_DESCS_MIN, + c->rq_desc_count)); + c->rq_desc_count = ALIGN(c->rq_desc_count, 16); + + c->maxdatafieldsize = + min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX, + max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN, + c->maxdatafieldsize)); + c->ed_tov = + min_t(u32, VNIC_FNIC_EDTOV_MAX, + max_t(u32, VNIC_FNIC_EDTOV_MIN, + c->ed_tov)); + + c->ra_tov = + min_t(u32, VNIC_FNIC_RATOV_MAX, + max_t(u32, VNIC_FNIC_RATOV_MIN, + c->ra_tov)); + + c->flogi_retries = + min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries); + + c->flogi_timeout = + min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX, + max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN, + c->flogi_timeout)); + + c->plogi_retries = + min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries); + + c->plogi_timeout = + min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX, + max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN, + c->plogi_timeout)); + + c->io_throttle_count = + min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX, + max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN, + c->io_throttle_count)); + + c->link_down_timeout = + min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX, + c->link_down_timeout); + + c->port_down_timeout = + min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX, + c->port_down_timeout); + + c->port_down_io_retries = + min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX, + c->port_down_io_retries); + + c->luns_per_tgt = + min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX, + max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN, + c->luns_per_tgt)); + + c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); + c->intr_timer_type = c->intr_timer_type; + + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC MAC addr %pM " + "wq/wq_copy/rq %d/%d/%d\n", + fnic->ctlr.ctl_src_addr, + c->wq_enet_desc_count, c->wq_copy_desc_count, + c->rq_desc_count); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC node wwn %llx port wwn %llx\n", + c->node_wwn, c->port_wwn); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC ed_tov %d ra_tov %d\n", + c->ed_tov, c->ra_tov); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC mtu %d intr timer %d\n", + c->maxdatafieldsize, c->intr_timer); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC flags 0x%x luns per tgt %d\n", + c->flags, c->luns_per_tgt); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC flogi_retries %d flogi timeout %d\n", + c->flogi_retries, c->flogi_timeout); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC plogi retries %d plogi timeout %d\n", + c->plogi_retries, c->plogi_timeout); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC io throttle count %d link dn timeout %d\n", + c->io_throttle_count, c->link_down_timeout); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC port dn io retries %d port dn timeout %d\n", + c->port_down_io_retries, c->port_down_timeout); + + return 0; +} + +int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu, + u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, + u8 tso_ipid_split_en, u8 ig_vlan_strip_en) +{ + u64 a0, a1; + u32 nic_cfg; + int wait = 1000; + + vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, + rss_hash_type, rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, ig_vlan_strip_en); + + a0 = nic_cfg; + a1 = 0; + + return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait); +} + +void fnic_get_res_counts(struct fnic *fnic) +{ + fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ); + fnic->raw_wq_count = fnic->wq_count - 1; + fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count; + fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ); + fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ); + fnic->intr_count = vnic_dev_get_res_count(fnic->vdev, + RES_TYPE_INTR_CTRL); +} + +void fnic_free_vnic_resources(struct fnic *fnic) +{ + unsigned int i; + + for (i = 0; i < fnic->raw_wq_count; i++) + vnic_wq_free(&fnic->wq[i]); + + for (i = 0; i < fnic->wq_copy_count; i++) + vnic_wq_copy_free(&fnic->wq_copy[i]); + + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_free(&fnic->rq[i]); + + for (i = 0; i < fnic->cq_count; i++) + vnic_cq_free(&fnic->cq[i]); + + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_free(&fnic->intr[i]); +} + +int fnic_alloc_vnic_resources(struct fnic *fnic) +{ + enum vnic_dev_intr_mode intr_mode; + unsigned int mask_on_assertion; + unsigned int interrupt_offset; + unsigned int error_interrupt_enable; + unsigned int error_interrupt_offset; + unsigned int i, cq_index; + unsigned int wq_copy_cq_desc_count; + int err; + + intr_mode = vnic_dev_get_intr_mode(fnic->vdev); + + shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", + intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : + intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : + intr_mode == VNIC_DEV_INTR_MODE_MSIX ? + "MSI-X" : "unknown"); + + shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: " + "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", + fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, + fnic->rq_count, fnic->cq_count, fnic->intr_count); + + /* Allocate Raw WQ used for FCS frames */ + for (i = 0; i < fnic->raw_wq_count; i++) { + err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, + fnic->config.wq_enet_desc_count, + sizeof(struct wq_enet_desc)); + if (err) + goto err_out_cleanup; + } + + /* Allocate Copy WQs used for SCSI IOs */ + for (i = 0; i < fnic->wq_copy_count; i++) { + err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i], + (fnic->raw_wq_count + i), + fnic->config.wq_copy_desc_count, + sizeof(struct fcpio_host_req)); + if (err) + goto err_out_cleanup; + } + + /* RQ for receiving FCS frames */ + for (i = 0; i < fnic->rq_count; i++) { + err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i, + fnic->config.rq_desc_count, + sizeof(struct rq_enet_desc)); + if (err) + goto err_out_cleanup; + } + + /* CQ for each RQ */ + for (i = 0; i < fnic->rq_count; i++) { + cq_index = i; + err = vnic_cq_alloc(fnic->vdev, + &fnic->cq[cq_index], cq_index, + fnic->config.rq_desc_count, + sizeof(struct cq_enet_rq_desc)); + if (err) + goto err_out_cleanup; + } + + /* CQ for each WQ */ + for (i = 0; i < fnic->raw_wq_count; i++) { + cq_index = fnic->rq_count + i; + err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, + fnic->config.wq_enet_desc_count, + sizeof(struct cq_enet_wq_desc)); + if (err) + goto err_out_cleanup; + } + + /* CQ for each COPY WQ */ + wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3); + for (i = 0; i < fnic->wq_copy_count; i++) { + cq_index = fnic->raw_wq_count + fnic->rq_count + i; + err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], + cq_index, + wq_copy_cq_desc_count, + sizeof(struct fcpio_fw_req)); + if (err) + goto err_out_cleanup; + } + + for (i = 0; i < fnic->intr_count; i++) { + err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i); + if (err) + goto err_out_cleanup; + } + + fnic->legacy_pba = vnic_dev_get_res(fnic->vdev, + RES_TYPE_INTR_PBA_LEGACY, 0); + + if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to hook legacy pba resource\n"); + err = -ENODEV; + goto err_out_cleanup; + } + + /* + * Init RQ/WQ resources. + * + * RQ[0 to n-1] point to CQ[0 to n-1] + * WQ[0 to m-1] point to CQ[n to n+m-1] + * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1] + * + * Note for copy wq we always initialize with cq_index = 0 + * + * Error interrupt is not enabled for MSI. + */ + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSIX: + error_interrupt_enable = 1; + error_interrupt_offset = fnic->err_intr_offset; + break; + default: + error_interrupt_enable = 0; + error_interrupt_offset = 0; + break; + } + + for (i = 0; i < fnic->rq_count; i++) { + cq_index = i; + vnic_rq_init(&fnic->rq[i], + cq_index, + error_interrupt_enable, + error_interrupt_offset); + } + + for (i = 0; i < fnic->raw_wq_count; i++) { + cq_index = i + fnic->rq_count; + vnic_wq_init(&fnic->wq[i], + cq_index, + error_interrupt_enable, + error_interrupt_offset); + } + + for (i = 0; i < fnic->wq_copy_count; i++) { + vnic_wq_copy_init(&fnic->wq_copy[i], + 0 /* cq_index 0 - always */, + error_interrupt_enable, + error_interrupt_offset); + } + + for (i = 0; i < fnic->cq_count; i++) { + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_MSIX: + interrupt_offset = i; + break; + default: + interrupt_offset = 0; + break; + } + + vnic_cq_init(&fnic->cq[i], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 1 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + interrupt_offset, + 0 /* cq_message_addr */); + } + + /* + * Init INTR resources + * + * mask_on_assertion is not used for INTx due to the level- + * triggered nature of INTx + */ + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_MSI: + case VNIC_DEV_INTR_MODE_MSIX: + mask_on_assertion = 1; + break; + default: + mask_on_assertion = 0; + break; + } + + for (i = 0; i < fnic->intr_count; i++) { + vnic_intr_init(&fnic->intr[i], + fnic->config.intr_timer, + fnic->config.intr_timer_type, + mask_on_assertion); + } + + /* init the stats memory by making the first call here */ + err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vnic_dev_stats_dump failed - x%x\n", err); + goto err_out_cleanup; + } + + /* Clear LIF stats */ + vnic_dev_stats_clear(fnic->vdev); + + return 0; + +err_out_cleanup: + fnic_free_vnic_resources(fnic); + + return err; +} diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h new file mode 100644 index 000000000..92a2fcfd3 --- /dev/null +++ b/drivers/scsi/fnic/fnic_res.h @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _FNIC_RES_H_ +#define _FNIC_RES_H_ + +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "fnic_io.h" +#include "fcpio.h" +#include "vnic_wq_copy.h" +#include "vnic_cq_copy.h" + +static inline void fnic_queue_wq_desc(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, unsigned int fc_eof, + int vlan_tag_insert, + unsigned int vlan_tag, + int cq_entry, int sop, int eop) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + 0, /* mss_or_csum_offset */ + (u16)fc_eof, + 0, /* offload_mode */ + (u8)eop, (u8)cq_entry, + 1, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + 0 /* loopback */); + + vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); +} + +static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, + int vlan_tag_insert, + unsigned int vlan_tag, + int cq_entry) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + 0, /* mss_or_csum_offset */ + 0, /* fc_eof */ + 0, /* offload_mode */ + 1, /* eop */ + (u8)cq_entry, + 0, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + 0 /* loopback */); + + vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); +} + +static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, + u32 req_id, + u32 lunmap_id, u8 spl_flags, + u32 sgl_cnt, u32 sense_len, + u64 sgl_addr, u64 sns_addr, + u8 crn, u8 pri_ta, + u8 flags, u8 *scsi_cdb, + u8 cdb_len, + u32 data_len, u8 *lun, + u32 d_id, u16 mss, + u32 ratov, u32 edtov) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */ + desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */ + desc->u.icmnd_16._resvd0[0] = 0; /* reserved */ + desc->u.icmnd_16._resvd0[1] = 0; /* reserved */ + desc->u.icmnd_16._resvd0[2] = 0; /* reserved */ + desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */ + desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */ + desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */ + desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */ + desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/ + desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ + desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ + desc->u.icmnd_16.flags = flags; /* command flags */ + memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16); + memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */ + desc->u.icmnd_16.data_len = data_len; /* length of data expected */ + memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ + desc->u.icmnd_16._resvd2 = 0; /* reserved */ + hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */ + desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */ + desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */ + desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, + u32 req_id, u32 lunmap_id, + u32 tm_req, u32 tm_id, u8 *lun, + u32 d_id, u32 r_a_tov, + u32 e_d_tov) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */ + desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */ + desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */ + desc->u.itmf._resvd = 0; + memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */ + desc->u.itmf._resvd1 = 0; + hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */ + desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */ + desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, + u32 req_id, u8 format, + u32 s_id, u8 *gw_mac) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.flogi_reg.format = format; + desc->u.flogi_reg._resvd = 0; + hton24(desc->u.flogi_reg.s_id, s_id); + memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, + u32 req_id, u32 s_id, + u8 *fcf_mac, u8 *ha_mac, + u32 r_a_tov, u32 e_d_tov) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.flogi_fip_reg._resvd0 = 0; + hton24(desc->u.flogi_fip_reg.s_id, s_id); + memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN); + desc->u.flogi_fip_reg._resvd1 = 0; + desc->u.flogi_fip_reg.r_a_tov = r_a_tov; + desc->u.flogi_fip_reg.e_d_tov = e_d_tov; + memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN); + desc->u.flogi_fip_reg._resvd2 = 0; + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, + u32 req_id) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, + u32 req_id, u64 lunmap_addr, + u32 lunmap_len) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */ + desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_rq_desc(struct vnic_rq *rq, + void *os_buf, dma_addr_t dma_addr, + u16 len) +{ + struct rq_enet_desc *desc = vnic_rq_next_desc(rq); + + rq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + RQ_ENET_TYPE_ONLY_SOP, + (u16)len); + + vnic_rq_post(rq, os_buf, 0, dma_addr, len); +} + + +struct fnic; + +int fnic_get_vnic_config(struct fnic *); +int fnic_alloc_vnic_resources(struct fnic *); +void fnic_free_vnic_resources(struct fnic *); +void fnic_get_res_counts(struct fnic *); +int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu, + u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, + u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en); + +#endif /* _FNIC_RES_H_ */ diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c new file mode 100644 index 000000000..416d81954 --- /dev/null +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -0,0 +1,2720 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fnic_io.h" +#include "fnic.h" + +const char *fnic_state_str[] = { + [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", + [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", + [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE", + [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE", +}; + +static const char *fnic_ioreq_state_str[] = { + [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED", + [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", + [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", + [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", + [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE", +}; + +static const char *fcpio_status_str[] = { + [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/ + [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER", + [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE", + [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]", + [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED", + [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND", + [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/ + [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT", + [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID", + [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID", + [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH", + [FCPIO_FW_ERR] = "FCPIO_FW_ERR", + [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED", + [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED", + [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN", + [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED", + [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL", + [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED", + [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", +}; + +const char *fnic_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) + return "unknown"; + + return fnic_state_str[state]; +} + +static const char *fnic_ioreq_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(fnic_ioreq_state_str) || + !fnic_ioreq_state_str[state]) + return "unknown"; + + return fnic_ioreq_state_str[state]; +} + +static const char *fnic_fcpio_status_to_str(unsigned int status) +{ + if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status]) + return "unknown"; + + return fcpio_status_str[status]; +} + +static void fnic_cleanup_io(struct fnic *fnic); + +static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, + struct scsi_cmnd *sc) +{ + u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1); + + return &fnic->io_req_lock[hash]; +} + +static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, + int tag) +{ + return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; +} + +/* + * Unmap the data buffer and sense buffer for an io_req, + * also unmap and free the device-private scatter/gather list. + */ +static void fnic_release_ioreq_buf(struct fnic *fnic, + struct fnic_io_req *io_req, + struct scsi_cmnd *sc) +{ + if (io_req->sgl_list_pa) + dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, + sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, + DMA_TO_DEVICE); + scsi_dma_unmap(sc); + + if (io_req->sgl_cnt) + mempool_free(io_req->sgl_list_alloc, + fnic->io_sgl_pool[io_req->sgl_type]); + if (io_req->sense_buf_pa) + dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); +} + +/* Free up Copy Wq descriptors. Called with copy_wq lock held */ +static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) +{ + /* if no Ack received from firmware, then nothing to clean */ + if (!fnic->fw_ack_recd[0]) + return 1; + + /* + * Update desc_available count based on number of freed descriptors + * Account for wraparound + */ + if (wq->to_clean_index <= fnic->fw_ack_index[0]) + wq->ring.desc_avail += (fnic->fw_ack_index[0] + - wq->to_clean_index + 1); + else + wq->ring.desc_avail += (wq->ring.desc_count + - wq->to_clean_index + + fnic->fw_ack_index[0] + 1); + + /* + * just bump clean index to ack_index+1 accounting for wraparound + * this will essentially free up all descriptors between + * to_clean_index and fw_ack_index, both inclusive + */ + wq->to_clean_index = + (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; + + /* we have processed the acks received so far */ + fnic->fw_ack_recd[0] = 0; + return 0; +} + + +/* + * __fnic_set_state_flags + * Sets/Clears bits in fnic's state_flags + **/ +void +__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, + unsigned long clearbits) +{ + unsigned long flags = 0; + unsigned long host_lock_flags = 0; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags); + + if (clearbits) + fnic->state_flags &= ~st_flags; + else + fnic->state_flags |= st_flags; + + spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; +} + + +/* + * fnic_fw_reset_handler + * Routine to send reset msg to fw + */ +int fnic_fw_reset_handler(struct fnic *fnic) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + int ret = 0; + unsigned long flags; + + /* indicate fwreset to io path */ + fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); + + skb_queue_purge(&fnic->frame_queue); + skb_queue_purge(&fnic->tx_queue); + + /* wait for io cmpl */ + while (atomic_read(&fnic->in_flight)) + schedule_timeout(msecs_to_jiffies(1)); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) + ret = -EAGAIN; + else { + fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read( + &fnic->fnic_stats.fw_stats.active_fw_reqs)); + } + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + + if (!ret) { + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Issued fw reset\n"); + } else { + fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Failed to issue fw reset\n"); + } + + return ret; +} + + +/* + * fnic_flogi_reg_handler + * Routine to send flogi register msg to fw + */ +int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + enum fcpio_flogi_reg_format_type format; + struct fc_lport *lp = fnic->lport; + u8 gw_mac[ETH_ALEN]; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + ret = -EAGAIN; + goto flogi_reg_ioreq_end; + } + + if (fnic->ctlr.map_dest) { + eth_broadcast_addr(gw_mac); + format = FCPIO_FLOGI_REG_DEF_DEST; + } else { + memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); + format = FCPIO_FLOGI_REG_GW_DEST; + } + + if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { + fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, + fc_id, gw_mac, + fnic->data_src_addr, + lp->r_a_tov, lp->e_d_tov); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", + fc_id, fnic->data_src_addr, gw_mac); + } else { + fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, + format, fc_id, gw_mac); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "FLOGI reg issued fcid %x map %d dest %pM\n", + fc_id, fnic->ctlr.map_dest, gw_mac); + } + + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + +flogi_reg_ioreq_end: + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + return ret; +} + +/* + * fnic_queue_wq_copy_desc + * Routine to enqueue a wq copy desc + */ +static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, + struct vnic_wq_copy *wq, + struct fnic_io_req *io_req, + struct scsi_cmnd *sc, + int sg_count) +{ + struct scatterlist *sg; + struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct host_sg_desc *desc; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned int i; + unsigned long intr_flags; + int flags; + u8 exch_flags; + struct scsi_lun fc_lun; + + if (sg_count) { + /* For each SGE, create a device desc entry */ + desc = io_req->sgl_list; + for_each_sg(scsi_sglist(sc), sg, sg_count, i) { + desc->addr = cpu_to_le64(sg_dma_address(sg)); + desc->len = cpu_to_le32(sg_dma_len(sg)); + desc->_resvd = 0; + desc++; + } + + io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev, + io_req->sgl_list, + sizeof(io_req->sgl_list[0]) * sg_count, + DMA_TO_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) { + printk(KERN_ERR "DMA mapping failed\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + + io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev, + sc->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) { + dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, + sizeof(io_req->sgl_list[0]) * sg_count, + DMA_TO_DEVICE); + printk(KERN_ERR "DMA mapping failed\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + int_to_scsilun(sc->device->lun, &fc_lun); + + /* Enqueue the descriptor in the Copy WQ */ + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (unlikely(!vnic_wq_copy_desc_avail(wq))) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "fnic_queue_wq_copy_desc failure - no descriptors\n"); + atomic64_inc(&misc_stats->io_cpwq_alloc_failures); + return SCSI_MLQUEUE_HOST_BUSY; + } + + flags = 0; + if (sc->sc_data_direction == DMA_FROM_DEVICE) + flags = FCPIO_ICMND_RDDATA; + else if (sc->sc_data_direction == DMA_TO_DEVICE) + flags = FCPIO_ICMND_WRDATA; + + exch_flags = 0; + if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && + (rp->flags & FC_RP_FLAGS_RETRY)) + exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; + + fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag, + 0, exch_flags, io_req->sgl_cnt, + SCSI_SENSE_BUFFERSIZE, + io_req->sgl_list_pa, + io_req->sense_buf_pa, + 0, /* scsi cmd ref, always 0 */ + FCPIO_ICMND_PTA_SIMPLE, + /* scsi pri and tag */ + flags, /* command flags */ + sc->cmnd, sc->cmd_len, + scsi_bufflen(sc), + fc_lun.scsi_lun, io_req->port_id, + rport->maxframe_size, rp->r_a_tov, + rp->e_d_tov); + + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + return 0; +} + +/* + * fnic_queuecommand + * Routine to send a scsi cdb + * Called with host_lock held and interrupts disabled. + */ +static int fnic_queuecommand_lck(struct scsi_cmnd *sc) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + const int tag = scsi_cmd_to_rq(sc)->tag; + struct fc_lport *lp = shost_priv(sc->device->host); + struct fc_rport *rport; + struct fnic_io_req *io_req = NULL; + struct fnic *fnic = lport_priv(lp); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct vnic_wq_copy *wq; + int ret; + u64 cmd_trace; + int sg_count = 0; + unsigned long flags = 0; + unsigned long ptr; + spinlock_t *io_lock = NULL; + int io_lock_acquired = 0; + struct fc_rport_libfc_priv *rp; + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) + return SCSI_MLQUEUE_HOST_BUSY; + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + + rport = starget_to_rport(scsi_target(sc->device)); + if (!rport) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "returning DID_NO_CONNECT for IO as rport is NULL\n"); + sc->result = DID_NO_CONNECT << 16; + done(sc); + return 0; + } + + ret = fc_remote_port_chkready(rport); + if (ret) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "rport is not ready\n"); + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = ret; + done(sc); + return 0; + } + + rp = rport->dd_data; + if (!rp || rp->rp_state == RPORT_ST_DELETE) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "rport 0x%x removed, returning DID_NO_CONNECT\n", + rport->port_id); + + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = DID_NO_CONNECT<<16; + done(sc); + return 0; + } + + if (rp->rp_state != RPORT_ST_READY) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", + rport->port_id, rp->rp_state); + + sc->result = DID_IMM_RETRY << 16; + done(sc); + return 0; + } + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + return SCSI_MLQUEUE_HOST_BUSY; + + atomic_inc(&fnic->in_flight); + + /* + * Release host lock, use driver resource specific locks from here. + * Don't re-enable interrupts in case they were disabled prior to the + * caller disabling them. + */ + spin_unlock(lp->host->host_lock); + fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; + fnic_priv(sc)->flags = FNIC_NO_FLAGS; + + /* Get a new io_req for this SCSI IO */ + io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.alloc_failures); + ret = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + memset(io_req, 0, sizeof(*io_req)); + + /* Map the data buffer */ + sg_count = scsi_dma_map(sc); + if (sg_count < 0) { + FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, + tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state); + mempool_free(io_req, fnic->io_req_pool); + goto out; + } + + /* Determine the type of scatter/gather list we need */ + io_req->sgl_cnt = sg_count; + io_req->sgl_type = FNIC_SGL_CACHE_DFLT; + if (sg_count > FNIC_DFLT_SG_DESC_CNT) + io_req->sgl_type = FNIC_SGL_CACHE_MAX; + + if (sg_count) { + io_req->sgl_list = + mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], + GFP_ATOMIC); + if (!io_req->sgl_list) { + atomic64_inc(&fnic_stats->io_stats.alloc_failures); + ret = SCSI_MLQUEUE_HOST_BUSY; + scsi_dma_unmap(sc); + mempool_free(io_req, fnic->io_req_pool); + goto out; + } + + /* Cache sgl list allocated address before alignment */ + io_req->sgl_list_alloc = io_req->sgl_list; + ptr = (unsigned long) io_req->sgl_list; + if (ptr % FNIC_SG_DESC_ALIGN) { + io_req->sgl_list = (struct host_sg_desc *) + (((unsigned long) ptr + + FNIC_SG_DESC_ALIGN - 1) + & ~(FNIC_SG_DESC_ALIGN - 1)); + } + } + + /* + * Will acquire lock defore setting to IO initialized. + */ + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + + /* initialize rest of io_req */ + io_lock_acquired = 1; + io_req->port_id = rport->port_id; + io_req->start_time = jiffies; + fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + fnic_priv(sc)->io_req = io_req; + fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; + + /* create copy wq desc and enqueue it */ + wq = &fnic->wq_copy[0]; + ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); + if (ret) { + /* + * In case another thread cancelled the request, + * refetch the pointer under the lock. + */ + FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, + tag, sc, 0, 0, 0, fnic_flags_and_state(sc)); + io_req = fnic_priv(sc)->io_req; + fnic_priv(sc)->io_req = NULL; + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; + spin_unlock_irqrestore(io_lock, flags); + if (io_req) { + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + } + atomic_dec(&fnic->in_flight); + /* acquire host lock before returning to SCSI */ + spin_lock(lp->host->host_lock); + return ret; + } else { + atomic64_inc(&fnic_stats->io_stats.active_ios); + atomic64_inc(&fnic_stats->io_stats.num_ios); + if (atomic64_read(&fnic_stats->io_stats.active_ios) > + atomic64_read(&fnic_stats->io_stats.max_active_ios)) + atomic64_set(&fnic_stats->io_stats.max_active_ios, + atomic64_read(&fnic_stats->io_stats.active_ios)); + + /* REVISIT: Use per IO lock in the final code */ + fnic_priv(sc)->flags |= FNIC_IO_ISSUED; + } +out: + cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | + (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | + (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | + sc->cmnd[5]); + + FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, + tag, sc, io_req, sg_count, cmd_trace, + fnic_flags_and_state(sc)); + + /* if only we issued IO, will we have the io lock */ + if (io_lock_acquired) + spin_unlock_irqrestore(io_lock, flags); + + atomic_dec(&fnic->in_flight); + /* acquire host lock before returning to SCSI */ + spin_lock(lp->host->host_lock); + return ret; +} + +DEF_SCSI_QCMD(fnic_queuecommand) + +/* + * fnic_fcpio_fw_reset_cmpl_handler + * Routine to handle fw reset completion + */ +static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag tag; + int ret = 0; + unsigned long flags; + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + atomic64_inc(&reset_stats->fw_reset_completions); + + /* Clean up all outstanding io requests */ + fnic_cleanup_io(fnic); + + atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); + atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); + atomic64_set(&fnic->io_cmpl_skip, 0); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + /* fnic should be in FC_TRANS_ETH_MODE */ + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { + /* Check status of reset completion */ + if (!hdr_status) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "reset cmpl success\n"); + /* Ready to send flogi out */ + fnic->state = FNIC_IN_ETH_MODE; + } else { + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "fnic fw_reset : failed %s\n", + fnic_fcpio_status_to_str(hdr_status)); + + /* + * Unable to change to eth mode, cannot send out flogi + * Change state to fc mode, so that subsequent Flogi + * requests from libFC will cause more attempts to + * reset the firmware. Free the cached flogi + */ + fnic->state = FNIC_IN_FC_MODE; + atomic64_inc(&reset_stats->fw_reset_failures); + ret = -1; + } + } else { + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "Unexpected state %s while processing" + " reset cmpl\n", fnic_state_to_str(fnic->state)); + atomic64_inc(&reset_stats->fw_reset_failures); + ret = -1; + } + + /* Thread removing device blocks till firmware reset is complete */ + if (fnic->remove_wait) + complete(fnic->remove_wait); + + /* + * If fnic is being removed, or fw reset failed + * free the flogi frame. Else, send it out + */ + if (fnic->remove_wait || ret) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + skb_queue_purge(&fnic->tx_queue); + goto reset_cmpl_handler_end; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_flush_tx(fnic); + + reset_cmpl_handler_end: + fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); + + return ret; +} + +/* + * fnic_fcpio_flogi_reg_cmpl_handler + * Routine to handle flogi register completion + */ +static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag tag; + int ret = 0; + unsigned long flags; + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + /* Update fnic state based on status of flogi reg completion */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { + + /* Check flogi registration completion status */ + if (!hdr_status) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "flog reg succeeded\n"); + fnic->state = FNIC_IN_FC_MODE; + } else { + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "fnic flogi reg :failed %s\n", + fnic_fcpio_status_to_str(hdr_status)); + fnic->state = FNIC_IN_ETH_MODE; + ret = -1; + } + } else { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Unexpected fnic state %s while" + " processing flogi reg completion\n", + fnic_state_to_str(fnic->state)); + ret = -1; + } + + if (!ret) { + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto reg_cmpl_handler_end; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_flush_tx(fnic); + queue_work(fnic_event_queue, &fnic->frame_work); + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } + +reg_cmpl_handler_end: + return ret; +} + +static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, + u16 request_out) +{ + if (wq->to_clean_index <= wq->to_use_index) { + /* out of range, stale request_out index */ + if (request_out < wq->to_clean_index || + request_out >= wq->to_use_index) + return 0; + } else { + /* out of range, stale request_out index */ + if (request_out < wq->to_clean_index && + request_out >= wq->to_use_index) + return 0; + } + /* request_out index is in range */ + return 1; +} + + +/* + * Mark that ack received and store the Ack index. If there are multiple + * acks received before Tx thread cleans it up, the latest value will be + * used which is correct behavior. This state should be in the copy Wq + * instead of in the fnic + */ +static inline void fnic_fcpio_ack_handler(struct fnic *fnic, + unsigned int cq_index, + struct fcpio_fw_req *desc) +{ + struct vnic_wq_copy *wq; + u16 request_out = desc->u.ack.request_out; + unsigned long flags; + u64 *ox_id_tag = (u64 *)(void *)desc; + + /* mark the ack state */ + wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + fnic->fnic_stats.misc_stats.last_ack_time = jiffies; + if (is_ack_index_in_range(wq, request_out)) { + fnic->fw_ack_index[0] = request_out; + fnic->fw_ack_recd[0] = 1; + } else + atomic64_inc( + &fnic->fnic_stats.misc_stats.ack_index_out_of_range); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + FNIC_TRACE(fnic_fcpio_ack_handler, + fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], + ox_id_tag[4], ox_id_tag[5]); +} + +/* + * fnic_fcpio_icmnd_cmpl_handler + * Routine to handle icmnd completions + */ +static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag tag; + u32 id; + u64 xfer_len = 0; + struct fcpio_icmnd_cmpl *icmnd_cmpl; + struct fnic_io_req *io_req; + struct scsi_cmnd *sc; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned long flags; + spinlock_t *io_lock; + u64 cmd_trace; + unsigned long start_time; + unsigned long io_duration_time; + + /* Decode the cmpl description to get the io_req id */ + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + fcpio_tag_id_dec(&tag, &id); + icmnd_cmpl = &desc->u.icmnd_cmpl; + + if (id >= fnic->fnic_max_tag_id) { + shost_printk(KERN_ERR, fnic->lport->host, + "Tag out of range tag %x hdr status = %s\n", + id, fnic_fcpio_status_to_str(hdr_status)); + return; + } + + sc = scsi_host_find_tag(fnic->lport->host, id); + WARN_ON_ONCE(!sc); + if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); + shost_printk(KERN_ERR, fnic->lport->host, + "icmnd_cmpl sc is null - " + "hdr status = %s tag = 0x%x desc = 0x%p\n", + fnic_fcpio_status_to_str(hdr_status), id, desc); + FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, + fnic->lport->host->host_no, id, + ((u64)icmnd_cmpl->_resvd0[1] << 16 | + (u64)icmnd_cmpl->_resvd0[0]), + ((u64)hdr_status << 16 | + (u64)icmnd_cmpl->scsi_status << 8 | + (u64)icmnd_cmpl->flags), desc, + (u64)icmnd_cmpl->residual, 0); + return; + } + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + WARN_ON_ONCE(!io_req); + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); + fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; + spin_unlock_irqrestore(io_lock, flags); + shost_printk(KERN_ERR, fnic->lport->host, + "icmnd_cmpl io_req is null - " + "hdr status = %s tag = 0x%x sc 0x%p\n", + fnic_fcpio_status_to_str(hdr_status), id, sc); + return; + } + start_time = io_req->start_time; + + /* firmware completed the io */ + io_req->io_completed = 1; + + /* + * if SCSI-ML has already issued abort on this command, + * set completion of the IO. The abts path will clean it up + */ + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + + /* + * set the FNIC_IO_DONE so that this doesn't get + * flagged as 'out of order' if it was not aborted + */ + fnic_priv(sc)->flags |= FNIC_IO_DONE; + fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; + spin_unlock_irqrestore(io_lock, flags); + if(FCPIO_ABORTED == hdr_status) + fnic_priv(sc)->flags |= FNIC_IO_ABORTED; + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "icmnd_cmpl abts pending " + "hdr status = %s tag = 0x%x sc = 0x%p " + "scsi_status = %x residual = %d\n", + fnic_fcpio_status_to_str(hdr_status), + id, sc, + icmnd_cmpl->scsi_status, + icmnd_cmpl->residual); + return; + } + + /* Mark the IO as complete */ + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; + + icmnd_cmpl = &desc->u.icmnd_cmpl; + + switch (hdr_status) { + case FCPIO_SUCCESS: + sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; + xfer_len = scsi_bufflen(sc); + + if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) { + xfer_len -= icmnd_cmpl->residual; + scsi_set_resid(sc, icmnd_cmpl->residual); + } + + if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION) + atomic64_inc(&fnic_stats->misc_stats.check_condition); + + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) + atomic64_inc(&fnic_stats->misc_stats.queue_fulls); + break; + + case FCPIO_TIMEOUT: /* request was timed out */ + atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout); + sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_ABORTED: /* request was aborted */ + atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ + atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch); + scsi_set_resid(sc, icmnd_cmpl->residual); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ + atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources); + sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ + atomic64_inc(&fnic_stats->io_stats.io_not_found); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ + atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_FW_ERR: /* request was terminated due fw error */ + atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ + atomic64_inc(&fnic_stats->misc_stats.mss_invalid); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_INVALID_HEADER: /* header contains invalid data */ + case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ + case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ + default: + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + } + + /* Break link with the SCSI command */ + fnic_priv(sc)->io_req = NULL; + fnic_priv(sc)->flags |= FNIC_IO_DONE; + + if (hdr_status != FCPIO_SUCCESS) { + atomic64_inc(&fnic_stats->io_stats.io_failures); + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + fnic_fcpio_status_to_str(hdr_status)); + } + + fnic_release_ioreq_buf(fnic, io_req, sc); + + cmd_trace = ((u64)hdr_status << 56) | + (u64)icmnd_cmpl->scsi_status << 48 | + (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]; + + FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, + sc->device->host->host_no, id, sc, + ((u64)icmnd_cmpl->_resvd0[1] << 56 | + (u64)icmnd_cmpl->_resvd0[0] << 48 | + jiffies_to_msecs(jiffies - start_time)), + desc, cmd_trace, fnic_flags_and_state(sc)); + + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + fnic->lport->host_stats.fcp_input_requests++; + fnic->fcp_input_bytes += xfer_len; + } else if (sc->sc_data_direction == DMA_TO_DEVICE) { + fnic->lport->host_stats.fcp_output_requests++; + fnic->fcp_output_bytes += xfer_len; + } else + fnic->lport->host_stats.fcp_control_requests++; + + /* Call SCSI completion function to complete the IO */ + scsi_done(sc); + spin_unlock_irqrestore(io_lock, flags); + + mempool_free(io_req, fnic->io_req_pool); + + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + + + io_duration_time = jiffies_to_msecs(jiffies) - + jiffies_to_msecs(start_time); + + if(io_duration_time <= 10) + atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); + else if(io_duration_time <= 100) + atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec); + else if(io_duration_time <= 500) + atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec); + else if(io_duration_time <= 5000) + atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec); + else if(io_duration_time <= 10000) + atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec); + else if(io_duration_time <= 30000) + atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec); + else { + atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec); + + if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time)) + atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time); + } +} + +/* fnic_fcpio_itmf_cmpl_handler + * Routine to handle itmf completions + */ +static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag ftag; + u32 id; + struct scsi_cmnd *sc = NULL; + struct fnic_io_req *io_req; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned long flags; + spinlock_t *io_lock; + unsigned long start_time; + unsigned int tag; + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + fcpio_tag_id_dec(&ftag, &id); + + tag = id & FNIC_TAG_MASK; + if (tag == fnic->fnic_max_tag_id) { + if (!(id & FNIC_TAG_DEV_RST)) { + shost_printk(KERN_ERR, fnic->lport->host, + "Tag out of range id 0x%x hdr status = %s\n", + id, fnic_fcpio_status_to_str(hdr_status)); + return; + } + } else if (tag > fnic->fnic_max_tag_id) { + shost_printk(KERN_ERR, fnic->lport->host, + "Tag out of range tag 0x%x hdr status = %s\n", + tag, fnic_fcpio_status_to_str(hdr_status)); + return; + } + + if ((tag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { + sc = fnic->sgreset_sc; + io_lock = &fnic->sgreset_lock; + } else { + sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + io_lock = fnic_io_lock_hash(fnic, sc); + } + + WARN_ON_ONCE(!sc); + if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); + shost_printk(KERN_ERR, fnic->lport->host, + "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", + fnic_fcpio_status_to_str(hdr_status), tag); + return; + } + + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + WARN_ON_ONCE(!io_req); + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); + spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; + shost_printk(KERN_ERR, fnic->lport->host, + "itmf_cmpl io_req is null - " + "hdr status = %s tag = 0x%x sc 0x%p\n", + fnic_fcpio_status_to_str(hdr_status), tag, sc); + return; + } + start_time = io_req->start_time; + + if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { + /* Abort and terminate completion of device reset req */ + /* REVISIT : Add asserts about various flags */ + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "dev reset abts cmpl recd. id %x status %s\n", + id, fnic_fcpio_status_to_str(hdr_status)); + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; + fnic_priv(sc)->abts_status = hdr_status; + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + if (io_req->abts_done) + complete(io_req->abts_done); + spin_unlock_irqrestore(io_lock, flags); + } else if (id & FNIC_TAG_ABORT) { + /* Completion of abort cmd */ + switch (hdr_status) { + case FCPIO_SUCCESS: + break; + case FCPIO_TIMEOUT: + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_fw_timeouts); + else + atomic64_inc( + &term_stats->terminate_fw_timeouts); + break; + case FCPIO_ITMF_REJECTED: + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "abort reject recd. id %d\n", + (int)(id & FNIC_TAG_MASK)); + break; + case FCPIO_IO_NOT_FOUND: + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_io_not_found); + else + atomic64_inc( + &term_stats->terminate_io_not_found); + break; + default: + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_failures); + else + atomic64_inc( + &term_stats->terminate_failures); + break; + } + if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) { + /* This is a late completion. Ignore it */ + spin_unlock_irqrestore(io_lock, flags); + return; + } + + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; + fnic_priv(sc)->abts_status = hdr_status; + + /* If the status is IO not found consider it as success */ + if (hdr_status == FCPIO_IO_NOT_FOUND) + fnic_priv(sc)->abts_status = FCPIO_SUCCESS; + + if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "abts cmpl recd. id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + + /* + * If scsi_eh thread is blocked waiting for abts to complete, + * signal completion to it. IO will be cleaned in the thread + * else clean it in this context + */ + if (io_req->abts_done) { + complete(io_req->abts_done); + spin_unlock_irqrestore(io_lock, flags); + } else { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "abts cmpl, completing IO\n"); + fnic_priv(sc)->io_req = NULL; + sc->result = (DID_ERROR << 16); + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, + sc, + jiffies_to_msecs(jiffies - start_time), + desc, + (((u64)hdr_status << 40) | + (u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | + (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + fnic_flags_and_state(sc)); + scsi_done(sc); + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + } + } else if (id & FNIC_TAG_DEV_RST) { + /* Completion of device reset */ + fnic_priv(sc)->lr_status = hdr_status; + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), + desc, 0, fnic_flags_and_state(sc)); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Terminate pending " + "dev reset cmpl recd. id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + return; + } + if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { + /* Need to wait for terminate completion */ + spin_unlock_irqrestore(io_lock, flags); + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), + desc, 0, fnic_flags_and_state(sc)); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "dev reset cmpl recd after time out. " + "id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + return; + } + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "dev reset cmpl recd. id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + if (io_req->dr_done) + complete(io_req->dr_done); + spin_unlock_irqrestore(io_lock, flags); + + } else { + shost_printk(KERN_ERR, fnic->lport->host, + "Unexpected itmf io state %s tag %x\n", + fnic_ioreq_state_to_str(fnic_priv(sc)->state), id); + spin_unlock_irqrestore(io_lock, flags); + } + +} + +/* + * fnic_fcpio_cmpl_handler + * Routine to service the cq for wq_copy + */ +static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, + unsigned int cq_index, + struct fcpio_fw_req *desc) +{ + struct fnic *fnic = vnic_dev_priv(vdev); + + switch (desc->hdr.type) { + case FCPIO_ICMND_CMPL: /* fw completed a command */ + case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ + case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ + case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ + case FCPIO_RESET_CMPL: /* fw completed reset */ + atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); + break; + default: + break; + } + + switch (desc->hdr.type) { + case FCPIO_ACK: /* fw copied copy wq desc to its queue */ + fnic_fcpio_ack_handler(fnic, cq_index, desc); + break; + + case FCPIO_ICMND_CMPL: /* fw completed a command */ + fnic_fcpio_icmnd_cmpl_handler(fnic, desc); + break; + + case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ + fnic_fcpio_itmf_cmpl_handler(fnic, desc); + break; + + case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ + case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ + fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); + break; + + case FCPIO_RESET_CMPL: /* fw completed reset */ + fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); + break; + + default: + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "firmware completion type %d\n", + desc->hdr.type); + break; + } + + return 0; +} + +/* + * fnic_wq_copy_cmpl_handler + * Routine to process wq copy + */ +int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) +{ + unsigned int wq_work_done = 0; + unsigned int i, cq_index; + unsigned int cur_work_done; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + u64 start_jiffies = 0; + u64 end_jiffies = 0; + u64 delta_jiffies = 0; + u64 delta_ms = 0; + + for (i = 0; i < fnic->wq_copy_count; i++) { + cq_index = i + fnic->raw_wq_count + fnic->rq_count; + + start_jiffies = jiffies; + cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], + fnic_fcpio_cmpl_handler, + copy_work_to_do); + end_jiffies = jiffies; + + wq_work_done += cur_work_done; + delta_jiffies = end_jiffies - start_jiffies; + if (delta_jiffies > + (u64) atomic64_read(&misc_stats->max_isr_jiffies)) { + atomic64_set(&misc_stats->max_isr_jiffies, + delta_jiffies); + delta_ms = jiffies_to_msecs(delta_jiffies); + atomic64_set(&misc_stats->max_isr_time_ms, delta_ms); + atomic64_set(&misc_stats->corr_work_done, + cur_work_done); + } + } + return wq_work_done; +} + +static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) +{ + const int tag = scsi_cmd_to_rq(sc)->tag; + struct fnic *fnic = data; + struct fnic_io_req *io_req; + unsigned long flags = 0; + spinlock_t *io_lock; + unsigned long start_time = 0; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + io_lock = fnic_io_lock_tag(fnic, tag); + spin_lock_irqsave(io_lock, flags); + + io_req = fnic_priv(sc)->io_req; + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && + !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { + /* + * We will be here only when FW completes reset + * without sending completions for outstanding ios. + */ + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + if (io_req && io_req->dr_done) + complete(io_req->dr_done); + else if (io_req && io_req->abts_done) + complete(io_req->abts_done); + spin_unlock_irqrestore(io_lock, flags); + return true; + } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { + spin_unlock_irqrestore(io_lock, flags); + return true; + } + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto cleanup_scsi_cmd; + } + + fnic_priv(sc)->io_req = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + /* + * If there is a scsi_cmnd associated with this io_req, then + * free the corresponding state + */ + start_time = io_req->start_time; + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + +cleanup_scsi_cmd: + sc->result = DID_TRANSPORT_DISRUPTED << 16; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", + tag, sc, jiffies - start_time); + + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + + /* Complete the command to SCSI */ + if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) + shost_printk(KERN_ERR, fnic->lport->host, + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", + tag, sc); + + FNIC_TRACE(fnic_cleanup_io, + sc->device->host->host_no, tag, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | + (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + fnic_flags_and_state(sc)); + + scsi_done(sc); + + return true; +} + +static void fnic_cleanup_io(struct fnic *fnic) +{ + scsi_host_busy_iter(fnic->lport->host, + fnic_cleanup_io_iter, fnic); +} + +void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, + struct fcpio_host_req *desc) +{ + u32 id; + struct fnic *fnic = vnic_dev_priv(wq->vdev); + struct fnic_io_req *io_req; + struct scsi_cmnd *sc; + unsigned long flags; + spinlock_t *io_lock; + unsigned long start_time = 0; + + /* get the tag reference */ + fcpio_tag_id_dec(&desc->hdr.tag, &id); + id &= FNIC_TAG_MASK; + + if (id >= fnic->fnic_max_tag_id) + return; + + sc = scsi_host_find_tag(fnic->lport->host, id); + if (!sc) + return; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + + /* Get the IO context which this desc refers to */ + io_req = fnic_priv(sc)->io_req; + + /* fnic interrupts are turned off by now */ + + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto wq_copy_cleanup_scsi_cmd; + } + + fnic_priv(sc)->io_req = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + start_time = io_req->start_time; + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + +wq_copy_cleanup_scsi_cmd: + sc->result = DID_NO_CONNECT << 16; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" + " DID_NO_CONNECT\n"); + + FNIC_TRACE(fnic_wq_copy_cleanup_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + fnic_flags_and_state(sc)); + + scsi_done(sc); +} + +static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, + u32 task_req, u8 *fc_lun, + struct fnic_io_req *io_req) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + struct Scsi_Host *host = fnic->lport->host; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned long flags; + + spin_lock_irqsave(host->host_lock, flags); + if (unlikely(fnic_chk_state_flags_locked(fnic, + FNIC_FLAGS_IO_BLOCKED))) { + spin_unlock_irqrestore(host->host_lock, flags); + return 1; + } else + atomic_inc(&fnic->in_flight); + spin_unlock_irqrestore(host->host_lock, flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + atomic_dec(&fnic->in_flight); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_queue_abort_io_req: failure: no descriptors\n"); + atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); + return 1; + } + fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, + 0, task_req, tag, fc_lun, io_req->port_id, + fnic->config.ra_tov, fnic->config.ed_tov); + + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + atomic_dec(&fnic->in_flight); + + return 0; +} + +struct fnic_rport_abort_io_iter_data { + struct fnic *fnic; + u32 port_id; + int term_cnt; +}; + +static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) +{ + struct fnic_rport_abort_io_iter_data *iter_data = data; + struct fnic *fnic = iter_data->fnic; + int abt_tag = scsi_cmd_to_rq(sc)->tag; + struct fnic_io_req *io_req; + spinlock_t *io_lock; + unsigned long flags; + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct scsi_lun fc_lun; + enum fnic_ioreq_state old_ioreq_state; + + io_lock = fnic_io_lock_tag(fnic, abt_tag); + spin_lock_irqsave(io_lock, flags); + + io_req = fnic_priv(sc)->io_req; + + if (!io_req || io_req->port_id != iter_data->port_id) { + spin_unlock_irqrestore(io_lock, flags); + return true; + } + + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && + !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", + sc); + spin_unlock_irqrestore(io_lock, flags); + return true; + } + + /* + * Found IO that is still pending with firmware and + * belongs to rport that went away + */ + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + return true; + } + if (io_req->abts_done) { + shost_printk(KERN_ERR, fnic->lport->host, + "fnic_rport_exch_reset: io_req->abts_done is set " + "state is %s\n", + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + } + + if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { + shost_printk(KERN_ERR, fnic->lport->host, + "rport_exch_reset " + "IO not yet issued %p tag 0x%x flags " + "%x state %d\n", + sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state); + } + old_ioreq_state = fnic_priv(sc)->state; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { + atomic64_inc(&reset_stats->device_reset_terminates); + abt_tag |= FNIC_TAG_DEV_RST; + } + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); + BUG_ON(io_req->abts_done); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_rport_reset_exch: Issuing abts\n"); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, abt_tag, + FCPIO_ITMF_ABT_TASK_TERM, + fc_lun.scsi_lun, io_req)) { + /* + * Revert the cmd state back to old state, if + * it hasn't changed in between. This cmd will get + * aborted later by scsi_eh, or cleaned up during + * lun reset + */ + spin_lock_irqsave(io_lock, flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + fnic_priv(sc)->state = old_ioreq_state; + spin_unlock_irqrestore(io_lock, flags); + } else { + spin_lock_irqsave(io_lock, flags); + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + else + fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&term_stats->terminates); + iter_data->term_cnt++; + } + return true; +} + +static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) +{ + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct fnic_rport_abort_io_iter_data iter_data = { + .fnic = fnic, + .port_id = port_id, + .term_cnt = 0, + }; + + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "fnic_rport_exch_reset called portid 0x%06x\n", + port_id); + + if (fnic->in_remove) + return; + + scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter, + &iter_data); + if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) + atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); + +} + +void fnic_terminate_rport_io(struct fc_rport *rport) +{ + struct fc_rport_libfc_priv *rdata; + struct fc_lport *lport; + struct fnic *fnic; + + if (!rport) { + printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + return; + } + rdata = rport->dd_data; + + if (!rdata) { + printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); + return; + } + lport = rdata->local_port; + + if (!lport) { + printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); + return; + } + fnic = lport_priv(lport); + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, "fnic_terminate_rport_io called" + " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", + rport->port_name, rport->node_name, rport, + rport->port_id); + + if (fnic->in_remove) + return; + + fnic_rport_exch_reset(fnic, rport->port_id); +} + +/* + * This function is exported to SCSI for sending abort cmnds. + * A SCSI IO is represented by a io_req in the driver. + * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO. + */ +int fnic_abort_cmd(struct scsi_cmnd *sc) +{ + struct request *const rq = scsi_cmd_to_rq(sc); + struct fc_lport *lp; + struct fnic *fnic; + struct fnic_io_req *io_req = NULL; + struct fc_rport *rport; + spinlock_t *io_lock; + unsigned long flags; + unsigned long start_time = 0; + int ret = SUCCESS; + u32 task_req = 0; + struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct abort_stats *abts_stats; + struct terminate_stats *term_stats; + enum fnic_ioreq_state old_ioreq_state; + const int tag = rq->tag; + unsigned long abt_issued_time; + DECLARE_COMPLETION_ONSTACK(tm_done); + + /* Wait for rport to unblock */ + fc_block_scsi_eh(sc); + + /* Get local-port, check ready and link up */ + lp = shost_priv(sc->device->host); + + fnic = lport_priv(lp); + fnic_stats = &fnic->fnic_stats; + abts_stats = &fnic->fnic_stats.abts_stats; + term_stats = &fnic->fnic_stats.term_stats; + + rport = starget_to_rport(scsi_target(sc->device)); + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", + rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags); + + fnic_priv(sc)->flags = FNIC_NO_FLAGS; + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + ret = FAILED; + goto fnic_abort_cmd_end; + } + + /* + * Avoid a race between SCSI issuing the abort and the device + * completing the command. + * + * If the command is already completed by the fw cmpl code, + * we just return SUCCESS from here. This means that the abort + * succeeded. In the SCSI ML, since the timeout for command has + * happened, the completion wont actually complete the command + * and it will be considered as an aborted command + * + * .io_req will not be cleared except while holding io_req_lock. + */ + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto fnic_abort_cmd_end; + } + + io_req->abts_done = &tm_done; + + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + goto wait_pending; + } + + abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + if (abt_issued_time <= 6000) + atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec); + else if (abt_issued_time > 6000 && abt_issued_time <= 20000) + atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec); + else if (abt_issued_time > 20000 && abt_issued_time <= 30000) + atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec); + else if (abt_issued_time > 30000 && abt_issued_time <= 40000) + atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec); + else if (abt_issued_time > 40000 && abt_issued_time <= 50000) + atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec); + else if (abt_issued_time > 50000 && abt_issued_time <= 60000) + atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec); + else + atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time); + /* + * Command is still pending, need to abort it + * If the firmware completes the command after this point, + * the completion wont be done till mid-layer, since abort + * has already started. + */ + old_ioreq_state = fnic_priv(sc)->state; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + + spin_unlock_irqrestore(io_lock, flags); + + /* + * Check readiness of the remote port. If the path to remote + * port is up, then send abts to the remote port to terminate + * the IO. Else, just locally terminate the IO in the firmware + */ + if (fc_remote_port_chkready(rport) == 0) + task_req = FCPIO_ITMF_ABT_TASK; + else { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + task_req = FCPIO_ITMF_ABT_TASK_TERM; + } + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun, + io_req)) { + spin_lock_irqsave(io_lock, flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + fnic_priv(sc)->state = old_ioreq_state; + io_req = fnic_priv(sc)->io_req; + if (io_req) + io_req->abts_done = NULL; + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + goto fnic_abort_cmd_end; + } + if (task_req == FCPIO_ITMF_ABT_TASK) { + fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED; + atomic64_inc(&fnic_stats->abts_stats.aborts); + } else { + fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED; + atomic64_inc(&fnic_stats->term_stats.terminates); + } + + /* + * We queued an abort IO, wait for its completion. + * Once the firmware completes the abort command, it will + * wake up this thread. + */ + wait_pending: + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies + (2 * fnic->config.ra_tov + + fnic->config.ed_tov)); + + /* Check the abort status */ + spin_lock_irqsave(io_lock, flags); + + io_req = fnic_priv(sc)->io_req; + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); + spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; + ret = FAILED; + goto fnic_abort_cmd_end; + } + io_req->abts_done = NULL; + + /* fw did not complete abort, timed out */ + if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { + spin_unlock_irqrestore(io_lock, flags); + if (task_req == FCPIO_ITMF_ABT_TASK) { + atomic64_inc(&abts_stats->abort_drv_timeouts); + } else { + atomic64_inc(&term_stats->terminate_drv_timeouts); + } + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT; + ret = FAILED; + goto fnic_abort_cmd_end; + } + + /* IO out of order */ + + if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { + spin_unlock_irqrestore(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Issuing Host reset due to out of order IO\n"); + + ret = FAILED; + goto fnic_abort_cmd_end; + } + + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; + + start_time = io_req->start_time; + /* + * firmware completed the abort, check the status, + * free the io_req if successful. If abort fails, + * Device reset will clean the I/O. + */ + if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) { + fnic_priv(sc)->io_req = NULL; + } else { + ret = FAILED; + spin_unlock_irqrestore(io_lock, flags); + goto fnic_abort_cmd_end; + } + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + + /* Call SCSI completion function to complete the IO */ + sc->result = DID_ABORT << 16; + scsi_done(sc); + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + +fnic_abort_cmd_end: + FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + fnic_flags_and_state(sc)); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from abort cmd type %x %s\n", task_req, + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); + return ret; +} + +static inline int fnic_queue_dr_io_req(struct fnic *fnic, + struct scsi_cmnd *sc, + struct fnic_io_req *io_req) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + struct Scsi_Host *host = fnic->lport->host; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + struct scsi_lun fc_lun; + int ret = 0; + unsigned long intr_flags; + unsigned int tag = scsi_cmd_to_rq(sc)->tag; + + if (tag == SCSI_NO_TAG) + tag = io_req->tag; + + spin_lock_irqsave(host->host_lock, intr_flags); + if (unlikely(fnic_chk_state_flags_locked(fnic, + FNIC_FLAGS_IO_BLOCKED))) { + spin_unlock_irqrestore(host->host_lock, intr_flags); + return FAILED; + } else + atomic_inc(&fnic->in_flight); + spin_unlock_irqrestore(host->host_lock, intr_flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "queue_dr_io_req failure - no descriptors\n"); + atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); + ret = -EAGAIN; + goto lr_io_req_end; + } + + /* fill in the lun info */ + int_to_scsilun(sc->device->lun, &fc_lun); + + tag |= FNIC_TAG_DEV_RST; + fnic_queue_wq_copy_desc_itmf(wq, tag, + 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, + fc_lun.scsi_lun, io_req->port_id, + fnic->config.ra_tov, fnic->config.ed_tov); + + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + +lr_io_req_end: + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + atomic_dec(&fnic->in_flight); + + return ret; +} + +struct fnic_pending_aborts_iter_data { + struct fnic *fnic; + struct scsi_cmnd *lr_sc; + struct scsi_device *lun_dev; + int ret; +}; + +static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) +{ + struct fnic_pending_aborts_iter_data *iter_data = data; + struct fnic *fnic = iter_data->fnic; + struct scsi_device *lun_dev = iter_data->lun_dev; + int abt_tag = scsi_cmd_to_rq(sc)->tag; + struct fnic_io_req *io_req; + spinlock_t *io_lock; + unsigned long flags; + struct scsi_lun fc_lun; + DECLARE_COMPLETION_ONSTACK(tm_done); + enum fnic_ioreq_state old_ioreq_state; + + if (sc == iter_data->lr_sc || sc->device != lun_dev) + return true; + + io_lock = fnic_io_lock_tag(fnic, abt_tag); + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + return true; + } + + /* + * Found IO that is still pending with firmware and + * belongs to the LUN that we are resetting + */ + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Found IO in %s on lun\n", + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + return true; + } + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && + (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s dev rst not pending sc 0x%p\n", __func__, + sc); + spin_unlock_irqrestore(io_lock, flags); + return true; + } + + if (io_req->abts_done) + shost_printk(KERN_ERR, fnic->lport->host, + "%s: io_req->abts_done is set state is %s\n", + __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + old_ioreq_state = fnic_priv(sc)->state; + /* + * Any pending IO issued prior to reset is expected to be + * in abts pending state, if not we need to set + * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. + * When IO is completed, the IO will be handed over and + * handled in this function. + */ + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + + BUG_ON(io_req->abts_done); + + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { + abt_tag |= FNIC_TAG_DEV_RST; + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s: dev rst sc 0x%p\n", __func__, sc); + } + + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + io_req->abts_done = &tm_done; + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, abt_tag, + FCPIO_ITMF_ABT_TASK_TERM, + fc_lun.scsi_lun, io_req)) { + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (io_req) + io_req->abts_done = NULL; + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + fnic_priv(sc)->state = old_ioreq_state; + spin_unlock_irqrestore(io_lock, flags); + iter_data->ret = FAILED; + return false; + } else { + spin_lock_irqsave(io_lock, flags); + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + } + fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + + wait_for_completion_timeout(&tm_done, msecs_to_jiffies + (fnic->config.ed_tov)); + + /* Recheck cmd state to check if it is now aborted */ + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; + return true; + } + + io_req->abts_done = NULL; + + /* if abort is still pending with fw, fail */ + if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { + spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; + iter_data->ret = FAILED; + return false; + } + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; + + /* original sc used for lr is handled by dev reset code */ + if (sc != iter_data->lr_sc) + fnic_priv(sc)->io_req = NULL; + spin_unlock_irqrestore(io_lock, flags); + + /* original sc used for lr is handled by dev reset code */ + if (sc != iter_data->lr_sc) { + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + } + + /* + * Any IO is returned during reset, it needs to call scsi_done + * to return the scsi_cmnd to upper layer. + */ + /* Set result to let upper SCSI layer retry */ + sc->result = DID_RESET << 16; + scsi_done(sc); + + return true; +} + +/* + * Clean up any pending aborts on the lun + * For each outstanding IO on this lun, whose abort is not completed by fw, + * issue a local abort. Wait for abort to complete. Return 0 if all commands + * successfully aborted, 1 otherwise + */ +static int fnic_clean_pending_aborts(struct fnic *fnic, + struct scsi_cmnd *lr_sc, + bool new_sc) + +{ + int ret = 0; + struct fnic_pending_aborts_iter_data iter_data = { + .fnic = fnic, + .lun_dev = lr_sc->device, + .ret = SUCCESS, + }; + + iter_data.lr_sc = lr_sc; + + scsi_host_busy_iter(fnic->lport->host, + fnic_pending_aborts_iter, &iter_data); + if (iter_data.ret == FAILED) { + ret = iter_data.ret; + goto clean_pending_aborts_end; + } + schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); + + /* walk again to check, if IOs are still pending in fw */ + if (fnic_is_abts_pending(fnic, lr_sc)) + ret = 1; + +clean_pending_aborts_end: + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s: exit status: %d\n", __func__, ret); + return ret; +} + +/* + * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN + * fail to get aborted. It calls driver's eh_device_reset with a SCSI command + * on the LUN. + */ +int fnic_device_reset(struct scsi_cmnd *sc) +{ + struct request *rq = scsi_cmd_to_rq(sc); + struct fc_lport *lp; + struct fnic *fnic; + struct fnic_io_req *io_req = NULL; + struct fc_rport *rport; + int status; + int ret = FAILED; + spinlock_t *io_lock; + unsigned long flags; + unsigned long start_time = 0; + struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct reset_stats *reset_stats; + int tag = rq->tag; + DECLARE_COMPLETION_ONSTACK(tm_done); + bool new_sc = 0; + + /* Wait for rport to unblock */ + fc_block_scsi_eh(sc); + + /* Get local-port, check ready and link up */ + lp = shost_priv(sc->device->host); + + fnic = lport_priv(lp); + fnic_stats = &fnic->fnic_stats; + reset_stats = &fnic->fnic_stats.reset_stats; + + atomic64_inc(&reset_stats->device_resets); + + rport = starget_to_rport(scsi_target(sc->device)); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n", + rport->port_id, sc->device->lun, sc); + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + goto fnic_device_reset_end; + + /* Check if remote port up */ + if (fc_remote_port_chkready(rport)) { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + goto fnic_device_reset_end; + } + + fnic_priv(sc)->flags = FNIC_DEVICE_RESET; + + if (unlikely(tag < 0)) { + /* + * For device reset issued through sg3utils, we let + * only one LUN_RESET to go through and use a special + * tag equal to max_tag_id so that we don't have to allocate + * or free it. It won't interact with tags + * allocated by mid layer. + */ + mutex_lock(&fnic->sgreset_mutex); + tag = fnic->fnic_max_tag_id; + new_sc = 1; + fnic->sgreset_sc = sc; + io_lock = &fnic->sgreset_lock; + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n", + rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag); + } else + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + + /* + * If there is a io_req attached to this command, then use it, + * else allocate a new one. + */ + if (!io_req) { + io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto fnic_device_reset_end; + } + memset(io_req, 0, sizeof(*io_req)); + io_req->port_id = rport->port_id; + io_req->tag = tag; + io_req->sc = sc; + fnic_priv(sc)->io_req = io_req; + } + io_req->dr_done = &tm_done; + fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); + + /* + * issue the device reset, if enqueue failed, clean up the ioreq + * and break assoc with scsi cmd + */ + if (fnic_queue_dr_io_req(fnic, sc, io_req)) { + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (io_req) + io_req->dr_done = NULL; + goto fnic_device_reset_clean; + } + spin_lock_irqsave(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + + /* + * Wait on the local completion for LUN reset. The io_req may be + * freed while we wait since we hold no lock. + */ + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "io_req is null tag 0x%x sc 0x%p\n", tag, sc); + goto fnic_device_reset_end; + } + io_req->dr_done = NULL; + + status = fnic_priv(sc)->lr_status; + + /* + * If lun reset not completed, bail out with failed. io_req + * gets cleaned up during higher levels of EH + */ + if (status == FCPIO_INVALID_CODE) { + atomic64_inc(&reset_stats->device_reset_timeouts); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset timed out\n"); + fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; + spin_unlock_irqrestore(io_lock, flags); + int_to_scsilun(sc->device->lun, &fc_lun); + /* + * Issue abort and terminate on device reset request. + * If q'ing of terminate fails, retry it after a delay. + */ + while (1) { + spin_lock_irqsave(io_lock, flags); + if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) { + spin_unlock_irqrestore(io_lock, flags); + break; + } + spin_unlock_irqrestore(io_lock, flags); + if (fnic_queue_abort_io_req(fnic, + tag | FNIC_TAG_DEV_RST, + FCPIO_ITMF_ABT_TASK_TERM, + fc_lun.scsi_lun, io_req)) { + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); + } else { + spin_lock_irqsave(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + io_req->abts_done = &tm_done; + spin_unlock_irqrestore(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Abort and terminate issued on Device reset " + "tag 0x%x sc 0x%p\n", tag, sc); + break; + } + } + while (1) { + spin_lock_irqsave(io_lock, flags); + if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { + spin_unlock_irqrestore(io_lock, flags); + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + break; + } else { + io_req = fnic_priv(sc)->io_req; + io_req->abts_done = NULL; + goto fnic_device_reset_clean; + } + } + } else { + spin_unlock_irqrestore(io_lock, flags); + } + + /* Completed, but not successful, clean up the io_req, return fail */ + if (status != FCPIO_SUCCESS) { + spin_lock_irqsave(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "Device reset completed - failed\n"); + io_req = fnic_priv(sc)->io_req; + goto fnic_device_reset_clean; + } + + /* + * Clean up any aborts on this lun that have still not + * completed. If any of these fail, then LUN reset fails. + * clean_pending_aborts cleans all cmds on this lun except + * the lun reset cmd. If all cmds get cleaned, the lun reset + * succeeds + */ + if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset failed" + " since could not abort all IOs\n"); + goto fnic_device_reset_clean; + } + + /* Clean lun reset command */ + spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (io_req) + /* Completed, and successful */ + ret = SUCCESS; + +fnic_device_reset_clean: + if (io_req) + fnic_priv(sc)->io_req = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + if (io_req) { + start_time = io_req->start_time; + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + } + +fnic_device_reset_end: + FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + fnic_flags_and_state(sc)); + + if (new_sc) { + fnic->sgreset_sc = NULL; + mutex_unlock(&fnic->sgreset_mutex); + } + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from device reset %s\n", + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); + + if (ret == FAILED) + atomic64_inc(&reset_stats->device_reset_failures); + + return ret; +} + +/* Clean up all IOs, clean up libFC local port */ +int fnic_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lp; + struct fnic *fnic; + int ret = 0; + struct reset_stats *reset_stats; + + lp = shost_priv(shost); + fnic = lport_priv(lp); + reset_stats = &fnic->fnic_stats.reset_stats; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_reset called\n"); + + atomic64_inc(&reset_stats->fnic_resets); + + /* + * Reset local port, this will clean up libFC exchanges, + * reset remote port sessions, and if link is up, begin flogi + */ + ret = fc_lport_reset(lp); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from fnic reset %s\n", + (ret == 0) ? + "SUCCESS" : "FAILED"); + + if (ret == 0) + atomic64_inc(&reset_stats->fnic_reset_completions); + else + atomic64_inc(&reset_stats->fnic_reset_failures); + + return ret; +} + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. If host reset completes + * successfully, and if link is up, then Fabric login begins. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + * + */ +int fnic_host_reset(struct scsi_cmnd *sc) +{ + int ret; + unsigned long wait_host_tmo; + struct Scsi_Host *shost = sc->device->host; + struct fc_lport *lp = shost_priv(shost); + struct fnic *fnic = lport_priv(lp); + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (!fnic->internal_reset_inprogress) { + fnic->internal_reset_inprogress = true; + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "host reset in progress skipping another host reset\n"); + return SUCCESS; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + /* + * If fnic_reset is successful, wait for fabric login to complete + * scsi-ml tries to send a TUR to every device if host reset is + * successful, so before returning to scsi, fabric should be up + */ + ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; + if (ret == SUCCESS) { + wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; + ret = FAILED; + while (time_before(jiffies, wait_host_tmo)) { + if ((lp->state == LPORT_ST_READY) && + (lp->link_up)) { + ret = SUCCESS; + break; + } + ssleep(1); + } + } + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->internal_reset_inprogress = false; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return ret; +} + +/* + * This fxn is called from libFC when host is removed + */ +void fnic_scsi_abort_io(struct fc_lport *lp) +{ + int err = 0; + unsigned long flags; + enum fnic_state old_state; + struct fnic *fnic = lport_priv(lp); + DECLARE_COMPLETION_ONSTACK(remove_wait); + + /* Issue firmware reset for fnic, wait for reset to complete */ +retry_fw_reset: + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && + fnic->link_events) { + /* fw reset is in progress, poll for its completion */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + schedule_timeout(msecs_to_jiffies(100)); + goto retry_fw_reset; + } + + fnic->remove_wait = &remove_wait; + old_state = fnic->state; + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + err = fnic_fw_reset_handler(fnic); + if (err) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + fnic->state = old_state; + fnic->remove_wait = NULL; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + /* Wait for firmware reset to complete */ + wait_for_completion_timeout(&remove_wait, + msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->remove_wait = NULL; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_scsi_abort_io %s\n", + (fnic->state == FNIC_IN_ETH_MODE) ? + "SUCCESS" : "FAILED"); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +} + +/* + * This fxn called from libFC to clean up driver IO state on link down + */ +void fnic_scsi_cleanup(struct fc_lport *lp) +{ + unsigned long flags; + enum fnic_state old_state; + struct fnic *fnic = lport_priv(lp); + + /* issue fw reset */ +retry_fw_reset: + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { + /* fw reset is in progress, poll for its completion */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + schedule_timeout(msecs_to_jiffies(100)); + goto retry_fw_reset; + } + old_state = fnic->state; + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic_fw_reset_handler(fnic)) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + fnic->state = old_state; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } + +} + +void fnic_empty_scsi_cleanup(struct fc_lport *lp) +{ +} + +void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) +{ + struct fnic *fnic = lport_priv(lp); + + /* Non-zero sid, nothing to do */ + if (sid) + goto call_fc_exch_mgr_reset; + + if (did) { + fnic_rport_exch_reset(fnic, did); + goto call_fc_exch_mgr_reset; + } + + /* + * sid = 0, did = 0 + * link down or device being removed + */ + if (!fnic->in_remove) + fnic_scsi_cleanup(lp); + else + fnic_scsi_abort_io(lp); + + /* call libFC exch mgr reset to reset its exchanges */ +call_fc_exch_mgr_reset: + fc_exch_mgr_reset(lp, sid, did); + +} + +static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) +{ + struct fnic_pending_aborts_iter_data *iter_data = data; + struct fnic *fnic = iter_data->fnic; + int cmd_state; + struct fnic_io_req *io_req; + spinlock_t *io_lock; + unsigned long flags; + + /* + * ignore this lun reset cmd or cmds that do not belong to + * this lun + */ + if (iter_data->lr_sc && sc == iter_data->lr_sc) + return true; + if (iter_data->lun_dev && sc->device != iter_data->lun_dev) + return true; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + + io_req = fnic_priv(sc)->io_req; + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + return true; + } + + /* + * Found IO that is still pending with firmware and + * belongs to the LUN that we are resetting + */ + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "Found IO in %s on lun\n", + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + cmd_state = fnic_priv(sc)->state; + spin_unlock_irqrestore(io_lock, flags); + if (cmd_state == FNIC_IOREQ_ABTS_PENDING) + iter_data->ret = 1; + + return iter_data->ret ? false : true; +} + +/* + * fnic_is_abts_pending() is a helper function that + * walks through tag map to check if there is any IOs pending,if there is one, + * then it returns 1 (true), otherwise 0 (false) + * if @lr_sc is non NULL, then it checks IOs specific to particular LUN, + * otherwise, it checks for all IOs. + */ +int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) +{ + struct fnic_pending_aborts_iter_data iter_data = { + .fnic = fnic, + .lun_dev = NULL, + .ret = 0, + }; + + if (lr_sc) { + iter_data.lun_dev = lr_sc->device; + iter_data.lr_sc = lr_sc; + } + + /* walk again to check, if IOs are still pending in fw */ + scsi_host_busy_iter(fnic->lport->host, + fnic_abts_pending_iter, &iter_data); + + return iter_data.ret; +} diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h new file mode 100644 index 000000000..bdf639eef --- /dev/null +++ b/drivers/scsi/fnic/fnic_stats.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */ +#ifndef _FNIC_STATS_H_ +#define _FNIC_STATS_H_ + +struct stats_timestamps { + struct timespec64 last_reset_time; + struct timespec64 last_read_time; +}; + +struct io_path_stats { + atomic64_t active_ios; + atomic64_t max_active_ios; + atomic64_t io_completions; + atomic64_t io_failures; + atomic64_t ioreq_null; + atomic64_t alloc_failures; + atomic64_t sc_null; + atomic64_t io_not_found; + atomic64_t num_ios; + atomic64_t io_btw_0_to_10_msec; + atomic64_t io_btw_10_to_100_msec; + atomic64_t io_btw_100_to_500_msec; + atomic64_t io_btw_500_to_5000_msec; + atomic64_t io_btw_5000_to_10000_msec; + atomic64_t io_btw_10000_to_30000_msec; + atomic64_t io_greater_than_30000_msec; + atomic64_t current_max_io_time; +}; + +struct abort_stats { + atomic64_t aborts; + atomic64_t abort_failures; + atomic64_t abort_drv_timeouts; + atomic64_t abort_fw_timeouts; + atomic64_t abort_io_not_found; + atomic64_t abort_issued_btw_0_to_6_sec; + atomic64_t abort_issued_btw_6_to_20_sec; + atomic64_t abort_issued_btw_20_to_30_sec; + atomic64_t abort_issued_btw_30_to_40_sec; + atomic64_t abort_issued_btw_40_to_50_sec; + atomic64_t abort_issued_btw_50_to_60_sec; + atomic64_t abort_issued_greater_than_60_sec; +}; + +struct terminate_stats { + atomic64_t terminates; + atomic64_t max_terminates; + atomic64_t terminate_drv_timeouts; + atomic64_t terminate_fw_timeouts; + atomic64_t terminate_io_not_found; + atomic64_t terminate_failures; +}; + +struct reset_stats { + atomic64_t device_resets; + atomic64_t device_reset_failures; + atomic64_t device_reset_aborts; + atomic64_t device_reset_timeouts; + atomic64_t device_reset_terminates; + atomic64_t fw_resets; + atomic64_t fw_reset_completions; + atomic64_t fw_reset_failures; + atomic64_t fnic_resets; + atomic64_t fnic_reset_completions; + atomic64_t fnic_reset_failures; +}; + +struct fw_stats { + atomic64_t active_fw_reqs; + atomic64_t max_fw_reqs; + atomic64_t fw_out_of_resources; + atomic64_t io_fw_errs; +}; + +struct vlan_stats { + atomic64_t vlan_disc_reqs; + atomic64_t resp_withno_vlanID; + atomic64_t sol_expiry_count; + atomic64_t flogi_rejects; +}; + +struct misc_stats { + u64 last_isr_time; + u64 last_ack_time; + atomic64_t max_isr_jiffies; + atomic64_t max_isr_time_ms; + atomic64_t corr_work_done; + atomic64_t isr_count; + atomic64_t max_cq_entries; + atomic64_t ack_index_out_of_range; + atomic64_t data_count_mismatch; + atomic64_t fcpio_timeout; + atomic64_t fcpio_aborted; + atomic64_t sgl_invalid; + atomic64_t mss_invalid; + atomic64_t abts_cpwq_alloc_failures; + atomic64_t devrst_cpwq_alloc_failures; + atomic64_t io_cpwq_alloc_failures; + atomic64_t no_icmnd_itmf_cmpls; + atomic64_t check_condition; + atomic64_t queue_fulls; + atomic64_t rport_not_ready; + atomic64_t frame_errors; + atomic64_t current_port_speed; +}; + +struct fnic_stats { + struct stats_timestamps stats_timestamps; + struct io_path_stats io_stats; + struct abort_stats abts_stats; + struct terminate_stats term_stats; + struct reset_stats reset_stats; + struct fw_stats fw_stats; + struct vlan_stats vlan_stats; + struct misc_stats misc_stats; +}; + +struct stats_debug_info { + char *debug_buffer; + void *i_private; + int buf_size; + int buffer_len; +}; + +int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); +void fnic_stats_debugfs_init(struct fnic *); +void fnic_stats_debugfs_remove(struct fnic *); +#endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c new file mode 100644 index 000000000..be0d7c57b --- /dev/null +++ b/drivers/scsi/fnic/fnic_trace.c @@ -0,0 +1,826 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2012 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include "fnic_io.h" +#include "fnic.h" + +unsigned int trace_max_pages; +static int fnic_max_trace_entries; + +static unsigned long fnic_trace_buf_p; +static DEFINE_SPINLOCK(fnic_trace_lock); + +static fnic_trace_dbg_t fnic_trace_entries; +int fnic_tracing_enabled = 1; + +/* static char *fnic_fc_ctlr_trace_buf_p; */ + +static int fc_trace_max_entries; +static unsigned long fnic_fc_ctlr_trace_buf_p; +static fnic_trace_dbg_t fc_trace_entries; +int fnic_fc_tracing_enabled = 1; +int fnic_fc_trace_cleared = 1; +static DEFINE_SPINLOCK(fnic_fc_trace_lock); + + +/* + * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information + * + * Description: + * This routine gets next available trace buffer entry location @wr_idx + * from allocated trace buffer pages and give that memory location + * to user to store the trace information. + * + * Return Value: + * This routine returns pointer to next available trace entry + * @fnic_buf_head for user to fill trace information. + */ +fnic_trace_data_t *fnic_trace_get_buf(void) +{ + unsigned long fnic_buf_head; + unsigned long flags; + + spin_lock_irqsave(&fnic_trace_lock, flags); + + /* + * Get next available memory location for writing trace information + * at @wr_idx and increment @wr_idx + */ + fnic_buf_head = + fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; + fnic_trace_entries.wr_idx++; + + /* + * Verify if trace buffer is full then change wd_idx to + * start from zero + */ + if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries) + fnic_trace_entries.wr_idx = 0; + + /* + * Verify if write index @wr_idx and read index @rd_idx are same then + * increment @rd_idx to move to next entry in trace buffer + */ + if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) { + fnic_trace_entries.rd_idx++; + if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries) + fnic_trace_entries.rd_idx = 0; + } + spin_unlock_irqrestore(&fnic_trace_lock, flags); + return (fnic_trace_data_t *)fnic_buf_head; +} + +/* + * fnic_get_trace_data - Copy trace buffer to a memory file + * @fnic_dbgfs_t: pointer to debugfs trace buffer + * + * Description: + * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t + * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in + * the log and process the log until the end of the buffer. Then it will gather + * from the beginning of the log and process until the current entry @wr_idx. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t + */ +int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) +{ + int rd_idx; + int wr_idx; + int len = 0; + unsigned long flags; + char str[KSYM_SYMBOL_LEN]; + struct timespec64 val; + fnic_trace_data_t *tbp; + + spin_lock_irqsave(&fnic_trace_lock, flags); + rd_idx = fnic_trace_entries.rd_idx; + wr_idx = fnic_trace_entries.wr_idx; + if (wr_idx < rd_idx) { + while (1) { + /* Start from read index @rd_idx */ + tbp = (fnic_trace_data_t *) + fnic_trace_entries.page_offset[rd_idx]; + if (!tbp) { + spin_unlock_irqrestore(&fnic_trace_lock, flags); + return 0; + } + /* Convert function pointer to function name */ + if (sizeof(unsigned long) < 8) { + sprint_symbol(str, tbp->fnaddr.low); + jiffies_to_timespec64(tbp->timestamp.low, &val); + } else { + sprint_symbol(str, tbp->fnaddr.val); + jiffies_to_timespec64(tbp->timestamp.val, &val); + } + /* + * Dump trace buffer entry to memory file + * and increment read index @rd_idx + */ + len += scnprintf(fnic_dbgfs_prt->buffer + len, + (trace_max_pages * PAGE_SIZE * 3) - len, + "%16llu.%09lu %-50s %8x %8x %16llx %16llx " + "%16llx %16llx %16llx\n", (u64)val.tv_sec, + val.tv_nsec, str, tbp->host_no, tbp->tag, + tbp->data[0], tbp->data[1], tbp->data[2], + tbp->data[3], tbp->data[4]); + rd_idx++; + /* + * If rd_idx is reached to maximum trace entries + * then move rd_idx to zero + */ + if (rd_idx > (fnic_max_trace_entries-1)) + rd_idx = 0; + /* + * Continue dumping trace buffer entries into + * memory file till rd_idx reaches write index + */ + if (rd_idx == wr_idx) + break; + } + } else if (wr_idx > rd_idx) { + while (1) { + /* Start from read index @rd_idx */ + tbp = (fnic_trace_data_t *) + fnic_trace_entries.page_offset[rd_idx]; + if (!tbp) { + spin_unlock_irqrestore(&fnic_trace_lock, flags); + return 0; + } + /* Convert function pointer to function name */ + if (sizeof(unsigned long) < 8) { + sprint_symbol(str, tbp->fnaddr.low); + jiffies_to_timespec64(tbp->timestamp.low, &val); + } else { + sprint_symbol(str, tbp->fnaddr.val); + jiffies_to_timespec64(tbp->timestamp.val, &val); + } + /* + * Dump trace buffer entry to memory file + * and increment read index @rd_idx + */ + len += scnprintf(fnic_dbgfs_prt->buffer + len, + (trace_max_pages * PAGE_SIZE * 3) - len, + "%16llu.%09lu %-50s %8x %8x %16llx %16llx " + "%16llx %16llx %16llx\n", (u64)val.tv_sec, + val.tv_nsec, str, tbp->host_no, tbp->tag, + tbp->data[0], tbp->data[1], tbp->data[2], + tbp->data[3], tbp->data[4]); + rd_idx++; + /* + * Continue dumping trace buffer entries into + * memory file till rd_idx reaches write index + */ + if (rd_idx == wr_idx) + break; + } + } + spin_unlock_irqrestore(&fnic_trace_lock, flags); + return len; +} + +/* + * fnic_get_stats_data - Copy fnic stats buffer to a memory file + * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer + * + * Description: + * This routine gathers the fnic stats debugfs data from the fnic_stats struct + * and dumps it to stats_debug_info. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into + * stats_debug_info + */ +int fnic_get_stats_data(struct stats_debug_info *debug, + struct fnic_stats *stats) +{ + int len = 0; + int buf_size = debug->buf_size; + struct timespec64 val1, val2; + + ktime_get_real_ts64(&val1); + len = scnprintf(debug->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\tTime\n" + "------------------------------------------\n"); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Current time : [%lld:%ld]\n" + "Last stats reset time: [%lld:%09ld]\n" + "Last stats read time: [%lld:%ld]\n" + "delta since last reset: [%lld:%ld]\n" + "delta since last read: [%lld:%ld]\n", + (s64)val1.tv_sec, val1.tv_nsec, + (s64)stats->stats_timestamps.last_reset_time.tv_sec, + stats->stats_timestamps.last_reset_time.tv_nsec, + (s64)stats->stats_timestamps.last_read_time.tv_sec, + stats->stats_timestamps.last_read_time.tv_nsec, + (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec, + timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec, + (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec, + timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec); + + stats->stats_timestamps.last_read_time = val1; + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\tIO Statistics\n" + "------------------------------------------\n"); + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" + "Number of IOs: %lld\nNumber of IO Completions: %lld\n" + "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" + "Number of Memory alloc Failures: %lld\n" + "Number of IOREQ Null: %lld\n" + "Number of SCSI cmd pointer Null: %lld\n" + + "\nIO completion times: \n" + " < 10 ms : %lld\n" + " 10 ms - 100 ms : %lld\n" + " 100 ms - 500 ms : %lld\n" + " 500 ms - 5 sec: %lld\n" + " 5 sec - 10 sec: %lld\n" + " 10 sec - 30 sec: %lld\n" + " > 30 sec: %lld\n", + (u64)atomic64_read(&stats->io_stats.active_ios), + (u64)atomic64_read(&stats->io_stats.max_active_ios), + (u64)atomic64_read(&stats->io_stats.num_ios), + (u64)atomic64_read(&stats->io_stats.io_completions), + (u64)atomic64_read(&stats->io_stats.io_failures), + (u64)atomic64_read(&stats->io_stats.io_not_found), + (u64)atomic64_read(&stats->io_stats.alloc_failures), + (u64)atomic64_read(&stats->io_stats.ioreq_null), + (u64)atomic64_read(&stats->io_stats.sc_null), + (u64)atomic64_read(&stats->io_stats.io_btw_0_to_10_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_10_to_100_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_100_to_500_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_500_to_5000_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_5000_to_10000_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec), + (u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "\nCurrent Max IO time : %lld\n", + (u64)atomic64_read(&stats->io_stats.current_max_io_time)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tAbort Statistics\n" + "------------------------------------------\n"); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Number of Aborts: %lld\n" + "Number of Abort Failures: %lld\n" + "Number of Abort Driver Timeouts: %lld\n" + "Number of Abort FW Timeouts: %lld\n" + "Number of Abort IO NOT Found: %lld\n" + + "Abort issued times: \n" + " < 6 sec : %lld\n" + " 6 sec - 20 sec : %lld\n" + " 20 sec - 30 sec : %lld\n" + " 30 sec - 40 sec : %lld\n" + " 40 sec - 50 sec : %lld\n" + " 50 sec - 60 sec : %lld\n" + " > 60 sec: %lld\n", + + (u64)atomic64_read(&stats->abts_stats.aborts), + (u64)atomic64_read(&stats->abts_stats.abort_failures), + (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), + (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), + (u64)atomic64_read(&stats->abts_stats.abort_io_not_found), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_0_to_6_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_6_to_20_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_20_to_30_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_30_to_40_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_40_to_50_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tTerminate Statistics\n" + "------------------------------------------\n"); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Number of Terminates: %lld\n" + "Maximum Terminates: %lld\n" + "Number of Terminate Driver Timeouts: %lld\n" + "Number of Terminate FW Timeouts: %lld\n" + "Number of Terminate IO NOT Found: %lld\n" + "Number of Terminate Failures: %lld\n", + (u64)atomic64_read(&stats->term_stats.terminates), + (u64)atomic64_read(&stats->term_stats.max_terminates), + (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), + (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), + (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), + (u64)atomic64_read(&stats->term_stats.terminate_failures)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tReset Statistics\n" + "------------------------------------------\n"); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Number of Device Resets: %lld\n" + "Number of Device Reset Failures: %lld\n" + "Number of Device Reset Aborts: %lld\n" + "Number of Device Reset Timeouts: %lld\n" + "Number of Device Reset Terminates: %lld\n" + "Number of FW Resets: %lld\n" + "Number of FW Reset Completions: %lld\n" + "Number of FW Reset Failures: %lld\n" + "Number of Fnic Reset: %lld\n" + "Number of Fnic Reset Completions: %lld\n" + "Number of Fnic Reset Failures: %lld\n", + (u64)atomic64_read(&stats->reset_stats.device_resets), + (u64)atomic64_read(&stats->reset_stats.device_reset_failures), + (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), + (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), + (u64)atomic64_read( + &stats->reset_stats.device_reset_terminates), + (u64)atomic64_read(&stats->reset_stats.fw_resets), + (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), + (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), + (u64)atomic64_read(&stats->reset_stats.fnic_resets), + (u64)atomic64_read( + &stats->reset_stats.fnic_reset_completions), + (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tFirmware Statistics\n" + "------------------------------------------\n"); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Number of Active FW Requests %lld\n" + "Maximum FW Requests: %lld\n" + "Number of FW out of resources: %lld\n" + "Number of FW IO errors: %lld\n", + (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), + (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), + (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), + (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tVlan Discovery Statistics\n" + "------------------------------------------\n"); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Number of Vlan Discovery Requests Sent %lld\n" + "Vlan Response Received with no FCF VLAN ID: %lld\n" + "No solicitations recvd after vlan set, expiry count: %lld\n" + "Flogi rejects count: %lld\n", + (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), + (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), + (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), + (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tOther Important Statistics\n" + "------------------------------------------\n"); + + jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1); + jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Last ISR time: %llu (%8llu.%09lu)\n" + "Last ACK time: %llu (%8llu.%09lu)\n" + "Max ISR jiffies: %llu\n" + "Max ISR time (ms) (0 denotes < 1 ms): %llu\n" + "Corr. work done: %llu\n" + "Number of ISRs: %lld\n" + "Maximum CQ Entries: %lld\n" + "Number of ACK index out of range: %lld\n" + "Number of data count mismatch: %lld\n" + "Number of FCPIO Timeouts: %lld\n" + "Number of FCPIO Aborted: %lld\n" + "Number of SGL Invalid: %lld\n" + "Number of Copy WQ Alloc Failures for ABTs: %lld\n" + "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" + "Number of Copy WQ Alloc Failures for IOs: %lld\n" + "Number of no icmnd itmf Completions: %lld\n" + "Number of Check Conditions encountered: %lld\n" + "Number of QUEUE Fulls: %lld\n" + "Number of rport not ready: %lld\n" + "Number of receive frame errors: %lld\n", + (u64)stats->misc_stats.last_isr_time, + (s64)val1.tv_sec, val1.tv_nsec, + (u64)stats->misc_stats.last_ack_time, + (s64)val2.tv_sec, val2.tv_nsec, + (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies), + (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms), + (u64)atomic64_read(&stats->misc_stats.corr_work_done), + (u64)atomic64_read(&stats->misc_stats.isr_count), + (u64)atomic64_read(&stats->misc_stats.max_cq_entries), + (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), + (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), + (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), + (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), + (u64)atomic64_read(&stats->misc_stats.sgl_invalid), + (u64)atomic64_read( + &stats->misc_stats.abts_cpwq_alloc_failures), + (u64)atomic64_read( + &stats->misc_stats.devrst_cpwq_alloc_failures), + (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), + (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), + (u64)atomic64_read(&stats->misc_stats.check_condition), + (u64)atomic64_read(&stats->misc_stats.queue_fulls), + (u64)atomic64_read(&stats->misc_stats.rport_not_ready), + (u64)atomic64_read(&stats->misc_stats.frame_errors)); + + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Firmware reported port speed: %llu\n", + (u64)atomic64_read( + &stats->misc_stats.current_port_speed)); + + return len; + +} + +/* + * fnic_trace_buf_init - Initialize fnic trace buffer logging facility + * + * Description: + * Initialize trace buffer data structure by allocating required memory and + * setting page_offset information for every trace entry by adding trace entry + * length to previous page_offset value. + */ +int fnic_trace_buf_init(void) +{ + unsigned long fnic_buf_head; + int i; + int err = 0; + + trace_max_pages = fnic_trace_max_pages; + fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ + FNIC_ENTRY_SIZE_BYTES; + + fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE); + if (!fnic_trace_buf_p) { + printk(KERN_ERR PFX "Failed to allocate memory " + "for fnic_trace_buf_p\n"); + err = -ENOMEM; + goto err_fnic_trace_buf_init; + } + + fnic_trace_entries.page_offset = + vmalloc(array_size(fnic_max_trace_entries, + sizeof(unsigned long))); + if (!fnic_trace_entries.page_offset) { + printk(KERN_ERR PFX "Failed to allocate memory for" + " page_offset\n"); + if (fnic_trace_buf_p) { + vfree((void *)fnic_trace_buf_p); + fnic_trace_buf_p = 0; + } + err = -ENOMEM; + goto err_fnic_trace_buf_init; + } + memset((void *)fnic_trace_entries.page_offset, 0, + (fnic_max_trace_entries * sizeof(unsigned long))); + fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; + fnic_buf_head = fnic_trace_buf_p; + + /* + * Set page_offset field of fnic_trace_entries struct by + * calculating memory location for every trace entry using + * length of each trace entry + */ + for (i = 0; i < fnic_max_trace_entries; i++) { + fnic_trace_entries.page_offset[i] = fnic_buf_head; + fnic_buf_head += FNIC_ENTRY_SIZE_BYTES; + } + fnic_trace_debugfs_init(); + pr_info("fnic: Successfully Initialized Trace Buffer\n"); + return err; + +err_fnic_trace_buf_init: + return err; +} + +/* + * fnic_trace_free - Free memory of fnic trace data structures. + */ +void fnic_trace_free(void) +{ + fnic_tracing_enabled = 0; + fnic_trace_debugfs_terminate(); + if (fnic_trace_entries.page_offset) { + vfree((void *)fnic_trace_entries.page_offset); + fnic_trace_entries.page_offset = NULL; + } + if (fnic_trace_buf_p) { + vfree((void *)fnic_trace_buf_p); + fnic_trace_buf_p = 0; + } + printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); +} + +/* + * fnic_fc_ctlr_trace_buf_init - + * Initialize trace buffer to log fnic control frames + * Description: + * Initialize trace buffer data structure by allocating + * required memory for trace data as well as for Indexes. + * Frame size is 256 bytes and + * memory is allocated for 1024 entries of 256 bytes. + * Page_offset(Index) is set to the address of trace entry + * and page_offset is initialized by adding frame size + * to the previous page_offset entry. + */ + +int fnic_fc_trace_init(void) +{ + unsigned long fc_trace_buf_head; + int err = 0; + int i; + + fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ + FC_TRC_SIZE_BYTES; + fnic_fc_ctlr_trace_buf_p = + (unsigned long)vmalloc(array_size(PAGE_SIZE, + fnic_fc_trace_max_pages)); + if (!fnic_fc_ctlr_trace_buf_p) { + pr_err("fnic: Failed to allocate memory for " + "FC Control Trace Buf\n"); + err = -ENOMEM; + goto err_fnic_fc_ctlr_trace_buf_init; + } + + memset((void *)fnic_fc_ctlr_trace_buf_p, 0, + fnic_fc_trace_max_pages * PAGE_SIZE); + + /* Allocate memory for page offset */ + fc_trace_entries.page_offset = + vmalloc(array_size(fc_trace_max_entries, + sizeof(unsigned long))); + if (!fc_trace_entries.page_offset) { + pr_err("fnic:Failed to allocate memory for page_offset\n"); + if (fnic_fc_ctlr_trace_buf_p) { + pr_err("fnic: Freeing FC Control Trace Buf\n"); + vfree((void *)fnic_fc_ctlr_trace_buf_p); + fnic_fc_ctlr_trace_buf_p = 0; + } + err = -ENOMEM; + goto err_fnic_fc_ctlr_trace_buf_init; + } + memset((void *)fc_trace_entries.page_offset, 0, + (fc_trace_max_entries * sizeof(unsigned long))); + + fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; + fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; + + /* + * Set up fc_trace_entries.page_offset field with memory location + * for every trace entry + */ + for (i = 0; i < fc_trace_max_entries; i++) { + fc_trace_entries.page_offset[i] = fc_trace_buf_head; + fc_trace_buf_head += FC_TRC_SIZE_BYTES; + } + fnic_fc_trace_debugfs_init(); + pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); + return err; + +err_fnic_fc_ctlr_trace_buf_init: + return err; +} + +/* + * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. + */ +void fnic_fc_trace_free(void) +{ + fnic_fc_tracing_enabled = 0; + fnic_fc_trace_debugfs_terminate(); + if (fc_trace_entries.page_offset) { + vfree((void *)fc_trace_entries.page_offset); + fc_trace_entries.page_offset = NULL; + } + if (fnic_fc_ctlr_trace_buf_p) { + vfree((void *)fnic_fc_ctlr_trace_buf_p); + fnic_fc_ctlr_trace_buf_p = 0; + } + pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); +} + +/* + * fnic_fc_ctlr_set_trace_data: + * Maintain rd & wr idx accordingly and set data + * Passed parameters: + * host_no: host number associated with fnic + * frame_type: send_frame, rece_frame or link event + * fc_frame: pointer to fc_frame + * frame_len: Length of the fc_frame + * Description: + * This routine will get next available wr_idx and + * copy all passed trace data to the buffer pointed by wr_idx + * and increment wr_idx. It will also make sure that we dont + * overwrite the entry which we are reading and also + * wrap around if we reach the maximum entries. + * Returned Value: + * It will return 0 for success or -1 for failure + */ +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, + char *frame, u32 fc_trc_frame_len) +{ + unsigned long flags; + struct fc_trace_hdr *fc_buf; + unsigned long eth_fcoe_hdr_len; + char *fc_trace; + + if (fnic_fc_tracing_enabled == 0) + return 0; + + spin_lock_irqsave(&fnic_fc_trace_lock, flags); + + if (fnic_fc_trace_cleared == 1) { + fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; + pr_info("fnic: Resetting the read idx\n"); + memset((void *)fnic_fc_ctlr_trace_buf_p, 0, + fnic_fc_trace_max_pages * PAGE_SIZE); + fnic_fc_trace_cleared = 0; + } + + fc_buf = (struct fc_trace_hdr *) + fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; + + fc_trace_entries.wr_idx++; + + if (fc_trace_entries.wr_idx >= fc_trace_max_entries) + fc_trace_entries.wr_idx = 0; + + if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { + fc_trace_entries.rd_idx++; + if (fc_trace_entries.rd_idx >= fc_trace_max_entries) + fc_trace_entries.rd_idx = 0; + } + + ktime_get_real_ts64(&fc_buf->time_stamp); + fc_buf->host_no = host_no; + fc_buf->frame_type = frame_type; + + fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); + + /* During the receive path, we do not have eth hdr as well as fcoe hdr + * at trace entry point so we will stuff 0xff just to make it generic. + */ + if (frame_type == FNIC_FC_RECV) { + eth_fcoe_hdr_len = sizeof(struct ethhdr) + + sizeof(struct fcoe_hdr); + memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); + /* Copy the rest of data frame */ + memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, + min_t(u8, fc_trc_frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE + - eth_fcoe_hdr_len))); + } else { + memcpy((char *)fc_trace, (void *)frame, + min_t(u8, fc_trc_frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); + } + + /* Store the actual received length */ + fc_buf->frame_len = fc_trc_frame_len; + + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return 0; +} + +/* + * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file + * Passed parameter: + * @fnic_dbgfs_t: pointer to debugfs trace buffer + * rdata_flag: 1 => Unformatted file + * 0 => formatted file + * Description: + * This routine will copy the trace data to memory file with + * proper formatting and also copy to another memory + * file without formatting for further processing. + * Return Value: + * Number of bytes that were dumped into fnic_dbgfs_t + */ + +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) +{ + int rd_idx, wr_idx; + unsigned long flags; + int len = 0, j; + struct fc_trace_hdr *tdata; + char *fc_trace; + + spin_lock_irqsave(&fnic_fc_trace_lock, flags); + if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + pr_info("fnic: Buffer is empty\n"); + return 0; + } + rd_idx = fc_trace_entries.rd_idx; + wr_idx = fc_trace_entries.wr_idx; + if (rdata_flag == 0) { + len += scnprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + "Time Stamp (UTC)\t\t" + "Host No: F Type: len: FCoE_FRAME:\n"); + } + + while (rd_idx != wr_idx) { + tdata = (struct fc_trace_hdr *) + fc_trace_entries.page_offset[rd_idx]; + if (!tdata) { + pr_info("fnic: Rd data is NULL\n"); + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return 0; + } + if (rdata_flag == 0) { + copy_and_format_trace_data(tdata, + fnic_dbgfs_prt, &len, rdata_flag); + } else { + fc_trace = (char *)tdata; + for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { + len += scnprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) + - len, "%02x", fc_trace[j] & 0xff); + } /* for loop */ + len += scnprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + "\n"); + } + rd_idx++; + if (rd_idx > (fc_trace_max_entries - 1)) + rd_idx = 0; + } + + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return len; +} + +/* + * copy_and_format_trace_data: Copy formatted data to char * buffer + * Passed Parameter: + * @fc_trace_hdr_t: pointer to trace data + * @fnic_dbgfs_t: pointer to debugfs trace buffer + * @orig_len: pointer to len + * rdata_flag: 0 => Formatted file, 1 => Unformatted file + * Description: + * This routine will format and copy the passed trace data + * for formatted file or unformatted file accordingly. + */ + +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, + fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, + u8 rdata_flag) +{ + int j, i = 1, len; + int ethhdr_len = sizeof(struct ethhdr) - 1; + int fcoehdr_len = sizeof(struct fcoe_hdr); + int fchdr_len = sizeof(struct fc_frame_header); + int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; + char *fc_trace; + + tdata->frame_type = tdata->frame_type & 0x7F; + + len = *orig_len; + + len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len, + "%ptTs.%09lu ns%8x %c%8x\t", + &tdata->time_stamp.tv_sec, tdata->time_stamp.tv_nsec, + tdata->host_no, tdata->frame_type, tdata->frame_len); + + fc_trace = (char *)FC_TRACE_ADDRESS(tdata); + + for (j = 0; j < min_t(u8, tdata->frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { + if (tdata->frame_type == FNIC_FC_LE) { + len += scnprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "%c", fc_trace[j]); + } else { + len += scnprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "%02x", fc_trace[j] & 0xff); + len += scnprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, " "); + if (j == ethhdr_len || + j == ethhdr_len + fcoehdr_len || + j == ethhdr_len + fcoehdr_len + fchdr_len || + (i > 3 && j%fchdr_len == 0)) { + len += scnprintf(fnic_dbgfs_prt->buffer + + len, max_size - len, + "\n\t\t\t\t\t\t\t\t"); + i++; + } + } /* end of else*/ + } /* End of for loop*/ + len += scnprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "\n"); + *orig_len = len; +} diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h new file mode 100644 index 000000000..d1c301bf3 --- /dev/null +++ b/drivers/scsi/fnic/fnic_trace.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2012 Cisco Systems, Inc. All rights reserved. */ + +#ifndef __FNIC_TRACE_H__ +#define __FNIC_TRACE_H__ + +#define FNIC_ENTRY_SIZE_BYTES 64 +#define FC_TRC_SIZE_BYTES 256 +#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr) + +/* + * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type + * of frame 1 => Eth frame, 0=> FC frame + */ + +#define FNIC_FC_RECV 0x52 /* Character R */ +#define FNIC_FC_SEND 0x54 /* Character T */ +#define FNIC_FC_LE 0x4C /* Character L */ + +extern ssize_t simple_read_from_buffer(void __user *to, + size_t count, + loff_t *ppos, + const void *from, + size_t available); + +extern unsigned int fnic_trace_max_pages; +extern int fnic_tracing_enabled; +extern unsigned int trace_max_pages; + +extern unsigned int fnic_fc_trace_max_pages; +extern int fnic_fc_tracing_enabled; +extern int fnic_fc_trace_cleared; + +typedef struct fnic_trace_dbg { + int wr_idx; + int rd_idx; + unsigned long *page_offset; +} fnic_trace_dbg_t; + +typedef struct fnic_dbgfs { + int buffer_len; + char *buffer; +} fnic_dbgfs_t; + +struct fnic_trace_data { + union { + struct { + u32 low; + u32 high; + }; + u64 val; + } timestamp, fnaddr; + u32 host_no; + u32 tag; + u64 data[5]; +} __attribute__((__packed__)); + +typedef struct fnic_trace_data fnic_trace_data_t; + +struct fc_trace_hdr { + struct timespec64 time_stamp; + u32 host_no; + u8 frame_type; + u8 frame_len; +} __attribute__((__packed__)); + +#define FC_TRACE_ADDRESS(a) \ + ((unsigned long)(a) + sizeof(struct fc_trace_hdr)) + +#define FNIC_TRACE_ENTRY_SIZE \ + (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) + +#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \ + if (unlikely(fnic_tracing_enabled)) { \ + fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \ + if (trace_buf) { \ + if (sizeof(unsigned long) < 8) { \ + trace_buf->timestamp.low = jiffies; \ + trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \ + } else { \ + trace_buf->timestamp.val = jiffies; \ + trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \ + } \ + trace_buf->host_no = _hn; \ + trace_buf->tag = _t; \ + trace_buf->data[0] = (u64)(unsigned long)_a; \ + trace_buf->data[1] = (u64)(unsigned long)_b; \ + trace_buf->data[2] = (u64)(unsigned long)_c; \ + trace_buf->data[3] = (u64)(unsigned long)_d; \ + trace_buf->data[4] = (u64)(unsigned long)_e; \ + } \ + } + +fnic_trace_data_t *fnic_trace_get_buf(void); +int fnic_get_trace_data(fnic_dbgfs_t *); +int fnic_trace_buf_init(void); +void fnic_trace_free(void); +int fnic_debugfs_init(void); +void fnic_debugfs_terminate(void); +void fnic_trace_debugfs_init(void); +void fnic_trace_debugfs_terminate(void); + +/* Fnic FC CTLR Trace releated function */ +int fnic_fc_trace_init(void); +void fnic_fc_trace_free(void); +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, + char *frame, u32 fc_frame_len); +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag); +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, + fnic_dbgfs_t *fnic_dbgfs_prt, + int *len, u8 rdata_flag); +void fnic_fc_trace_debugfs_init(void); +void fnic_fc_trace_debugfs_terminate(void); + +#endif diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h new file mode 100644 index 000000000..9bc509d35 --- /dev/null +++ b/drivers/scsi/fnic/rq_enet_desc.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _RQ_ENET_DESC_H_ +#define _RQ_ENET_DESC_H_ + +/* Ethernet receive queue descriptor: 16B */ +struct rq_enet_desc { + __le64 address; + __le16 length_type; + u8 reserved[6]; +}; + +enum rq_enet_type_types { + RQ_ENET_TYPE_ONLY_SOP = 0, + RQ_ENET_TYPE_NOT_SOP = 1, + RQ_ENET_TYPE_RESV2 = 2, + RQ_ENET_TYPE_RESV3 = 3, +}; + +#define RQ_ENET_ADDR_BITS 64 +#define RQ_ENET_LEN_BITS 14 +#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1) +#define RQ_ENET_TYPE_BITS 2 +#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) + +static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, + u64 address, u8 type, u16 length) +{ + desc->address = cpu_to_le64(address); + desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | + ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS)); +} + +static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, + u64 *address, u8 *type, u16 *length) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; + *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & + RQ_ENET_TYPE_MASK); +} + +#endif /* _RQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c new file mode 100644 index 000000000..ed3dd443f --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +void vnic_cq_free(struct vnic_cq *cq) +{ + vnic_dev_free_desc_ring(cq->vdev, &cq->ring); + + cq->ctrl = NULL; +} + +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + cq->index = index; + cq->vdev = vdev; + + cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); + if (!cq->ctrl) { + printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index); + return -EINVAL; + } + + err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); + if (err) + return err; + + return 0; +} + +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int cq_message_enable, + unsigned int interrupt_offset, u64 cq_message_addr) +{ + u64 paddr; + + paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &cq->ctrl->ring_base); + iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); + iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); + iowrite32(color_enable, &cq->ctrl->color_enable); + iowrite32(cq_head, &cq->ctrl->cq_head); + iowrite32(cq_tail, &cq->ctrl->cq_tail); + iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); + iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); + iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); + iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); + iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); + writeq(cq_message_addr, &cq->ctrl->cq_message_addr); +} + +void vnic_cq_clean(struct vnic_cq *cq) +{ + cq->to_clean = 0; + cq->last_color = 0; + + iowrite32(0, &cq->ctrl->cq_head); + iowrite32(0, &cq->ctrl->cq_tail); + iowrite32(1, &cq->ctrl->cq_tail_color); + + vnic_dev_clear_desc_ring(&cq->ring); +} diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h new file mode 100644 index 000000000..e7cc1f165 --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_CQ_H_ +#define _VNIC_CQ_H_ + +#include "cq_desc.h" +#include "vnic_dev.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_cq_service fnic_cq_service +#define vnic_cq_free fnic_cq_free +#define vnic_cq_alloc fnic_cq_alloc +#define vnic_cq_init fnic_cq_init +#define vnic_cq_clean fnic_cq_clean + +/* Completion queue control */ +struct vnic_cq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 flow_control_enable; /* 0x10 */ + u32 pad1; + u32 color_enable; /* 0x18 */ + u32 pad2; + u32 cq_head; /* 0x20 */ + u32 pad3; + u32 cq_tail; /* 0x28 */ + u32 pad4; + u32 cq_tail_color; /* 0x30 */ + u32 pad5; + u32 interrupt_enable; /* 0x38 */ + u32 pad6; + u32 cq_entry_enable; /* 0x40 */ + u32 pad7; + u32 cq_message_enable; /* 0x48 */ + u32 pad8; + u32 interrupt_offset; /* 0x50 */ + u32 pad9; + u64 cq_message_addr; /* 0x58 */ + u32 pad10; +}; + +struct vnic_cq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned int to_clean; + unsigned int last_color; +}; + +static inline unsigned int vnic_cq_service(struct vnic_cq *cq, + unsigned int work_to_do, + int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque), + void *opaque) +{ + struct cq_desc *cq_desc; + unsigned int work_done = 0; + u16 q_number, completed_index; + u8 type, color; + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq_desc, type, + q_number, completed_index, opaque)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +void vnic_cq_free(struct vnic_cq *cq); +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int message_enable, + unsigned int interrupt_offset, u64 message_addr); +void vnic_cq_clean(struct vnic_cq *cq); + +#endif /* _VNIC_CQ_H_ */ diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h new file mode 100644 index 000000000..1b198ee59 --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq_copy.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_CQ_COPY_H_ +#define _VNIC_CQ_COPY_H_ + +#include "fcpio.h" + +static inline unsigned int vnic_cq_copy_service( + struct vnic_cq *cq, + int (*q_service)(struct vnic_dev *vdev, + unsigned int index, + struct fcpio_fw_req *desc), + unsigned int work_to_do) + +{ + struct fcpio_fw_req *desc; + unsigned int work_done = 0; + u8 color; + + desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + fcpio_color_dec(desc, &color); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq->index, desc)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + fcpio_color_dec(desc, &color); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +#endif /* _VNIC_CQ_COPY_H_ */ diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c new file mode 100644 index 000000000..3e5b437c0 --- /dev/null +++ b/drivers/scsi/fnic/vnic_dev.c @@ -0,0 +1,943 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "vnic_resource.h" +#include "vnic_devcmd.h" +#include "vnic_dev.h" +#include "vnic_stats.h" +#include "vnic_wq.h" + +struct devcmd2_controller { + struct vnic_wq_ctrl *wq_ctrl; + struct vnic_dev_ring results_ring; + struct vnic_wq wq; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; +}; + +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; + +struct vnic_res { + void __iomem *vaddr; + unsigned int count; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 *linkstatus; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; + struct devcmd2_controller *devcmd2; + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); +}; + +#define VNIC_MAX_RES_HDR_SIZE \ + (sizeof(struct vnic_resource_header) + \ + sizeof(struct vnic_resource) * RES_TYPE_MAX) +#define VNIC_RES_STRIDE 128 + +void *vnic_dev_priv(struct vnic_dev *vdev) +{ + return vdev->priv; +} + +static int vnic_dev_discover_res(struct vnic_dev *vdev, + struct vnic_dev_bar *bar) +{ + struct vnic_resource_header __iomem *rh; + struct vnic_resource __iomem *r; + u8 type; + + if (bar->len < VNIC_MAX_RES_HDR_SIZE) { + printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); + return -EINVAL; + } + + rh = bar->vaddr; + if (!rh) { + printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); + return -EINVAL; + } + + if (ioread32(&rh->magic) != VNIC_RES_MAGIC || + ioread32(&rh->version) != VNIC_RES_VERSION) { + printk(KERN_ERR "vNIC BAR0 res magic/version error " + "exp (%lx/%lx) curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); + return -EINVAL; + } + + r = (struct vnic_resource __iomem *)(rh + 1); + + while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { + + u8 bar_num = ioread8(&r->bar); + u32 bar_offset = ioread32(&r->bar_offset); + u32 count = ioread32(&r->count); + u32 len; + + r++; + + if (bar_num != 0) /* only mapping in BAR0 resources */ + continue; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + /* each count is stride bytes long */ + len = count * VNIC_RES_STRIDE; + if (len + bar_offset > bar->len) { + printk(KERN_ERR "vNIC BAR0 resource %d " + "out-of-bounds, offset 0x%x + " + "size 0x%x > bar len 0x%lx\n", + type, bar_offset, + len, + bar->len); + return -EINVAL; + } + break; + case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD2: + case RES_TYPE_DEVCMD: + len = count; + break; + default: + continue; + } + + vdev->res[type].count = count; + vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; + } + + return 0; +} + +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type) +{ + return vdev->res[type].count; +} + +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index) +{ + if (!vdev->res[type].vaddr) + return NULL; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + return (char __iomem *)vdev->res[type].vaddr + + index * VNIC_RES_STRIDE; + default: + return (char __iomem *)vdev->res[type].vaddr; + } +} + +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size) +{ + /* The base address of the desc rings must be 512 byte aligned. + * Descriptor count is aligned to groups of 32 descriptors. A + * count of 0 means the maximum 4096 descriptors. Descriptor + * size is aligned to 16 bytes. + */ + + unsigned int count_align = 32; + unsigned int desc_align = 16; + + ring->base_align = 512; + + if (desc_count == 0) + desc_count = 4096; + + ring->desc_count = ALIGN(desc_count, count_align); + + ring->desc_size = ALIGN(desc_size, desc_align); + + ring->size = ring->desc_count * ring->desc_size; + ring->size_unaligned = ring->size + ring->base_align; + + return ring->size_unaligned; +} + +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) +{ + memset(ring->descs, 0, ring->size); +} + +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + vnic_dev_desc_ring_size(ring, desc_count, desc_size); + + ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev, + ring->size_unaligned, + &ring->base_addr_unaligned, GFP_KERNEL); + + if (!ring->descs_unaligned) { + printk(KERN_ERR + "Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); + return -ENOMEM; + } + + ring->base_addr = ALIGN(ring->base_addr_unaligned, + ring->base_align); + ring->descs = (u8 *)ring->descs_unaligned + + (ring->base_addr - ring->base_addr_unaligned); + + vnic_dev_clear_desc_ring(ring); + + ring->desc_avail = ring->desc_count - 1; + + return 0; +} + +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) +{ + if (ring->descs) { + dma_free_coherent(&vdev->pdev->dev, + ring->size_unaligned, + ring->descs_unaligned, + ring->base_addr_unaligned); + ring->descs = NULL; + } +} + +static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) +{ + struct vnic_devcmd __iomem *devcmd = vdev->devcmd; + int delay; + u32 status; + static const int dev_cmd_err[] = { + /* convert from fw's version of error.h to host's version */ + 0, /* ERR_SUCCESS */ + EINVAL, /* ERR_EINVAL */ + EFAULT, /* ERR_EFAULT */ + EPERM, /* ERR_EPERM */ + EBUSY, /* ERR_EBUSY */ + }; + int err; + u64 *a0 = &vdev->args[0]; + u64 *a1 = &vdev->args[1]; + + status = ioread32(&devcmd->status); + if (status & STAT_BUSY) { + printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); + return -EBUSY; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + writeq(*a0, &devcmd->args[0]); + writeq(*a1, &devcmd->args[1]); + wmb(); + } + + iowrite32(cmd, &devcmd->cmd); + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + return 0; + + for (delay = 0; delay < wait; delay++) { + + udelay(100); + + status = ioread32(&devcmd->status); + if (!(status & STAT_BUSY)) { + + if (status & STAT_ERROR) { + err = dev_cmd_err[(int)readq(&devcmd->args[0])]; + printk(KERN_ERR "Error %d devcmd %d\n", + err, _CMD_N(cmd)); + return -err; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + rmb(); + *a0 = readq(&devcmd->args[0]); + *a1 = readq(&devcmd->args[1]); + } + + return 0; + } + } + + printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); + return -ETIMEDOUT; +} + +static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result; + u8 color; + unsigned int i; + int delay; + int err; + u32 fetch_index; + u32 posted; + u32 new_posted; + + posted = ioread32(&dc2c->wq_ctrl->posted_index); + fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); + + if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) { + /* Hardware surprise removal: return error */ + pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n", + pci_name(vdev->pdev), _CMD_N(cmd)); + pr_err("%s: fetch index: %u, posted index: %u\n", + pci_name(vdev->pdev), fetch_index, posted); + + return -ENODEV; + + } + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + + if (new_posted == fetch_index) { + pr_err("%s: devcmd2 wq full while issuing cmd %d\n", + pci_name(vdev->pdev), _CMD_N(cmd)); + pr_err("%s: fetch index: %u, posted index: %u\n", + pci_name(vdev->pdev), fetch_index, posted); + return -EBUSY; + + } + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + + } + + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return 0; + + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + + for (delay = 0; delay < wait; delay++) { + udelay(100); + if (result->color == color) { + if (result->error) { + err = -(int) result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("%s:Error %d devcmd %d\n", + pci_name(vdev->pdev), + err, _CMD_N(cmd)); + return err; + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + rmb(); /*prevent reorder while reding result*/ + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = result->results[i]; + } + return 0; + } + } + + pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); + + return -ETIMEDOUT; +} + + +static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) +{ + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + return -ENODEV; + + vdev->devcmd_rtn = &vnic_dev_cmd1; + return 0; +} + + +static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + int err; + unsigned int fetch_index; + + if (vdev->devcmd2) + return 0; + + vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_ATOMIC); + if (!vdev->devcmd2) + return -ENOMEM; + + vdev->devcmd2->color = 1; + vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; + err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_free_devcmd2; + + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + pr_err("error in devcmd2 init"); + err = -ENODEV; + goto err_free_wq; + } + + /* + * Don't change fetch_index ever and + * set posted_index same as fetch_index + * when setting up the WQ for devcmd2. + */ + vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, + fetch_index, 0, 0); + + vnic_wq_enable(&vdev->devcmd2->wq); + + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_disable_wq; + + vdev->devcmd2->result = + (struct devcmd2_result *) vdev->devcmd2->results_ring.descs; + vdev->devcmd2->cmd_ring = + (struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs; + vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; + vdev->args[0] = (u64) vdev->devcmd2->results_ring.base_addr | + VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + err = vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); + if (err) + goto err_free_desc_ring; + + vdev->devcmd_rtn = &vnic_dev_cmd2; + + return 0; + +err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +err_disable_wq: + vnic_wq_disable(&vdev->devcmd2->wq); +err_free_wq: + vnic_wq_free(&vdev->devcmd2->wq); +err_free_devcmd2: + kfree(vdev->devcmd2); + vdev->devcmd2 = NULL; + + return err; +} + + +static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); + kfree(vdev->devcmd2); + vdev->devcmd2 = NULL; + vdev->devcmd_rtn = &vnic_dev_cmd1; +} + + +static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + int err; + + vdev->args[0] = *a0; + vdev->args[1] = *a1; + + err = (*vdev->devcmd_rtn)(vdev, cmd, wait); + + *a0 = vdev->args[0]; + *a1 = vdev->args[1]; + + return err; +} + + +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + memset(vdev->args, 0, sizeof(vdev->args)); + + switch (vdev->proxy) { + case PROXY_NONE: + default: + return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); + } +} + + +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err = 0; + + if (!vdev->fw_info) { + vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_fw_info), + &vdev->fw_info_pa, GFP_KERNEL); + if (!vdev->fw_info) + return -ENOMEM; + + a0 = vdev->fw_info_pa; + + /* only get fw_info once and cache it */ + err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); + } + + *fw_info = vdev->fw_info; + + return err; +} + +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, + void *value) +{ + u64 a0, a1; + int wait = 1000; + int err; + + a0 = offset; + a1 = size; + + err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); + + switch (size) { + case 1: + *(u8 *)value = (u8)a0; + break; + case 2: + *(u16 *)value = (u16)a0; + break; + case 4: + *(u32 *)value = (u32)a0; + break; + case 8: + *(u64 *)value = a0; + break; + default: + BUG(); + break; + } + + return err; +} + +int vnic_dev_stats_clear(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); +} + +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) +{ + u64 a0, a1; + int wait = 1000; + + if (!vdev->stats) { + vdev->stats = dma_alloc_coherent(&vdev->pdev->dev, + sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL); + if (!vdev->stats) + return -ENOMEM; + } + + *stats = vdev->stats; + a0 = vdev->stats_pa; + a1 = sizeof(struct vnic_stats); + + return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); +} + +int vnic_dev_close(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); +} + +int vnic_dev_enable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); +} + +int vnic_dev_disable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); +} + +int vnic_dev_open(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); +} + +int vnic_dev_open_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); +} + +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_hang_notify(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); +} + +int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) +{ + u64 a[2] = {}; + int wait = 1000; + int err, i; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = 0; + + err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait); + if (err) + return err; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = ((u8 *)&a)[i]; + + return 0; +} + +void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err; + + a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | + (multicast ? CMD_PFILTER_MULTICAST : 0) | + (broadcast ? CMD_PFILTER_BROADCAST : 0) | + (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | + (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); + + err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); + if (err) + printk(KERN_ERR "Can't set packet filter\n"); +} + +void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) +{ + u64 a[2] = {}; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait); + if (err) + pr_err("Can't add addr [%pM], %d\n", addr, err); +} + +void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) +{ + u64 a[2] = {}; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait); + if (err) + pr_err("Can't del addr [%pM], %d\n", addr, err); +} + +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +{ + u64 a0, a1; + int wait = 1000; + + if (!vdev->notify) { + vdev->notify = dma_alloc_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_notify), + &vdev->notify_pa, GFP_KERNEL); + if (!vdev->notify) + return -ENOMEM; + } + + a0 = vdev->notify_pa; + a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; + a1 += sizeof(struct vnic_devcmd_notify); + + return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +void vnic_dev_notify_unset(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = 1000; + + a0 = 0; /* paddr = 0 to unset notify buffer */ + a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ + a1 += sizeof(struct vnic_devcmd_notify); + + vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +static int vnic_dev_notify_ready(struct vnic_dev *vdev) +{ + u32 *words; + unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; + unsigned int i; + u32 csum; + + if (!vdev->notify) + return 0; + + do { + csum = 0; + memcpy(&vdev->notify_copy, vdev->notify, + sizeof(struct vnic_devcmd_notify)); + words = (u32 *)&vdev->notify_copy; + for (i = 1; i < nwords; i++) + csum += words[i]; + } while (csum != words[0]); + + return 1; +} + +int vnic_dev_init(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); +} + +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan) +{ + u64 a0 = new_default_vlan, a1 = 0; + int wait = 1000; + int old_vlan = 0; + + old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait); + return (u16)old_vlan; +} + +int vnic_dev_link_status(struct vnic_dev *vdev) +{ + if (vdev->linkstatus) + return *vdev->linkstatus; + + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_state; +} + +u32 vnic_dev_port_speed(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.port_speed; +} + +u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.msglvl; +} + +u32 vnic_dev_mtu(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.mtu; +} + +u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_down_cnt; +} + +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode) +{ + vdev->intr_mode = intr_mode; +} + +enum vnic_dev_intr_mode vnic_dev_get_intr_mode( + struct vnic_dev *vdev) +{ + return vdev->intr_mode; +} + +void vnic_dev_unregister(struct vnic_dev *vdev) +{ + if (vdev) { + if (vdev->notify) + dma_free_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + if (vdev->linkstatus) + dma_free_coherent(&vdev->pdev->dev, + sizeof(u32), + vdev->linkstatus, + vdev->linkstatus_pa); + if (vdev->stats) + dma_free_coherent(&vdev->pdev->dev, + sizeof(struct vnic_stats), + vdev->stats, vdev->stats_pa); + if (vdev->fw_info) + dma_free_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_fw_info), + vdev->fw_info, vdev->fw_info_pa); + if (vdev->devcmd2) + vnic_dev_deinit_devcmd2(vdev); + kfree(vdev); + } +} + +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar) +{ + if (!vdev) { + vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL); + if (!vdev) + return NULL; + } + + vdev->priv = priv; + vdev->pdev = pdev; + + if (vnic_dev_discover_res(vdev, bar)) + goto err_out; + + return vdev; + +err_out: + vnic_dev_unregister(vdev); + return NULL; +} + +int vnic_dev_cmd_init(struct vnic_dev *vdev) +{ + int err; + void *p; + + p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (p) { + pr_err("fnic: DEVCMD2 resource found!\n"); + err = vnic_dev_init_devcmd2(vdev); + } else { + pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n"); + err = vnic_dev_init_devcmd1(vdev); + } + + return err; +} diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h new file mode 100644 index 000000000..7a568d141 --- /dev/null +++ b/drivers/scsi/fnic/vnic_dev.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_DEV_H_ +#define _VNIC_DEV_H_ + +#include "vnic_resource.h" +#include "vnic_devcmd.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_dev_priv fnic_dev_priv +#define vnic_dev_get_res_count fnic_dev_get_res_count +#define vnic_dev_get_res fnic_dev_get_res +#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz +#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring +#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring +#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring +#define vnic_dev_cmd fnic_dev_cmd +#define vnic_dev_fw_info fnic_dev_fw_info +#define vnic_dev_spec fnic_dev_spec +#define vnic_dev_stats_clear fnic_dev_stats_clear +#define vnic_dev_cmd_init fnic_dev_cmd_init +#define vnic_dev_stats_dump fnic_dev_stats_dump +#define vnic_dev_hang_notify fnic_dev_hang_notify +#define vnic_dev_packet_filter fnic_dev_packet_filter +#define vnic_dev_add_addr fnic_dev_add_addr +#define vnic_dev_del_addr fnic_dev_del_addr +#define vnic_dev_mac_addr fnic_dev_mac_addr +#define vnic_dev_notify_set fnic_dev_notify_set +#define vnic_dev_notify_unset fnic_dev_notify_unset +#define vnic_dev_link_status fnic_dev_link_status +#define vnic_dev_port_speed fnic_dev_port_speed +#define vnic_dev_msg_lvl fnic_dev_msg_lvl +#define vnic_dev_mtu fnic_dev_mtu +#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt +#define vnic_dev_close fnic_dev_close +#define vnic_dev_enable fnic_dev_enable +#define vnic_dev_disable fnic_dev_disable +#define vnic_dev_open fnic_dev_open +#define vnic_dev_open_done fnic_dev_open_done +#define vnic_dev_init fnic_dev_init +#define vnic_dev_soft_reset fnic_dev_soft_reset +#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done +#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode +#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode +#define vnic_dev_unregister fnic_dev_unregister +#define vnic_dev_register fnic_dev_register + +#ifndef VNIC_PADDR_TARGET +#define VNIC_PADDR_TARGET 0x0000000000000000ULL +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg); +} + +static inline void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel(val >> 32, reg + 0x4UL); +} +#endif + +enum vnic_dev_intr_mode { + VNIC_DEV_INTR_MODE_UNKNOWN, + VNIC_DEV_INTR_MODE_INTX, + VNIC_DEV_INTR_MODE_MSI, + VNIC_DEV_INTR_MODE_MSIX, +}; + +struct vnic_dev_bar { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned long len; +}; + +struct vnic_dev_ring { + void *descs; + size_t size; + dma_addr_t base_addr; + size_t base_align; + void *descs_unaligned; + size_t size_unaligned; + dma_addr_t base_addr_unaligned; + unsigned int desc_size; + unsigned int desc_count; + unsigned int desc_avail; +}; + +struct vnic_dev; +struct vnic_stats; + +void *vnic_dev_priv(struct vnic_dev *vdev); +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type); +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index); +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size); +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait); +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info); +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, + unsigned int size, void *value); +int vnic_dev_stats_clear(struct vnic_dev *vdev); +int vnic_dev_cmd_init(struct vnic_dev *vdev); +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); +int vnic_dev_hang_notify(struct vnic_dev *vdev); +void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti); +void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); +void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); +int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); +void vnic_dev_notify_unset(struct vnic_dev *vdev); +int vnic_dev_link_status(struct vnic_dev *vdev); +u32 vnic_dev_port_speed(struct vnic_dev *vdev); +u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); +u32 vnic_dev_mtu(struct vnic_dev *vdev); +u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); +int vnic_dev_close(struct vnic_dev *vdev); +int vnic_dev_enable(struct vnic_dev *vdev); +int vnic_dev_disable(struct vnic_dev *vdev); +int vnic_dev_open(struct vnic_dev *vdev, int arg); +int vnic_dev_open_done(struct vnic_dev *vdev, int *done); +int vnic_dev_init(struct vnic_dev *vdev, int arg); +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, + u16 new_default_vlan); +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode); +enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); +void vnic_dev_unregister(struct vnic_dev *vdev); +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, + struct vnic_dev_bar *bar); + +#endif /* _VNIC_DEV_H_ */ diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h new file mode 100644 index 000000000..f876d223b --- /dev/null +++ b/drivers/scsi/fnic/vnic_devcmd.h @@ -0,0 +1,492 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_DEVCMD_H_ +#define _VNIC_DEVCMD_H_ + +#define _CMD_NBITS 14 +#define _CMD_VTYPEBITS 10 +#define _CMD_FLAGSBITS 6 +#define _CMD_DIRBITS 2 + +#define _CMD_NMASK ((1 << _CMD_NBITS)-1) +#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) +#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) +#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) + +#define _CMD_NSHIFT 0 +#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) +#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) +#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) + +/* + * Direction bits (from host perspective). + */ +#define _CMD_DIR_NONE 0U +#define _CMD_DIR_WRITE 1U +#define _CMD_DIR_READ 2U +#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) + +/* + * Flag bits. + */ +#define _CMD_FLAGS_NONE 0U +#define _CMD_FLAGS_NOWAIT 1U + +/* + * vNIC type bits. + */ +#define _CMD_VTYPE_NONE 0U +#define _CMD_VTYPE_ENET 1U +#define _CMD_VTYPE_FC 2U +#define _CMD_VTYPE_SCSI 4U +#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) + +/* + * Used to create cmds.. +*/ +#define _CMDCF(dir, flags, vtype, nr) \ + (((dir) << _CMD_DIRSHIFT) | \ + ((flags) << _CMD_FLAGSSHIFT) | \ + ((vtype) << _CMD_VTYPESHIFT) | \ + ((nr) << _CMD_NSHIFT)) +#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) +#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) + +/* + * Used to decode cmds.. +*/ +#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) +#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) +#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) +#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) + +enum vnic_devcmd_cmd { + CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), + + /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* dev-specific block member: + * in: (u16)a0=offset,(u8)a1=size + * out: a0=value */ + CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), + + /* stats clear */ + CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), + + /* stats dump in mem: (u64)a0=paddr to stats area, + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), + + /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7), + + /* hang detection notification */ + CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), + + /* MAC address in (u48)a0 */ + CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9), + + /* disable/enable promisc mode: (u8)a0=0/1 */ +/***** XXX DEPRECATED *****/ + CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10), + + /* disable/enable all-multi mode: (u8)a0=0/1 */ +/***** XXX DEPRECATED *****/ + CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11), + + /* add addr from (u48)a0 */ + CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12), + + /* del addr from (u48)a0 */ + CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13), + + /* add VLAN id in (u16)a0 */ + CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14), + + /* del VLAN id in (u16)a0 */ + CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), + + /* nic_cfg in (u32)a0 */ + CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), + + /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18), + + /* initiate softreset */ + CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19), + + /* softreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20), + + /* set struct vnic_devcmd_notify buffer in mem: + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), + + /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct, + * (u8)a1=PXENV_UNDI_xxx */ + CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22), + + /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ + CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), + + /* open status: + * out: a0=0 open complete, a0=1 open in progress */ + CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), + + /* close vnic */ + CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ + CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), + + /* variant of CMD_INIT, with provisioning info + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), + + /* enable virtual link */ + CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* disable virtual link */ + CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), + + /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */ + CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), + + /* init status: + * out: a0=0 init complete, a0=1 init in progress + * if a0=0, a1=errno */ + CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), + + /* INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u8)a1=INT13_CMD_xxx */ + CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), + + /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */ + CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33), + + /* undo initialize of virtual link */ + CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46), + + /* init_prov_info2: + * Variant of CMD_INIT_PROV_INFO, where it will not try to enable + * the vnic until CMD_ENABLE2 is issued. + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), + + /* enable2: + * (u32)a0=0 ==> standby + * =CMD_ENABLE2_ACTIVE ==> active + */ + CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48), + + /* + * cmd_status: + * Returns the status of the specified command + * Input: + * a0 = command for which status is being queried. + * Possible values are: + * CMD_SOFT_RESET + * CMD_HANG_RESET + * CMD_OPEN + * CMD_INIT + * CMD_INIT_PROV_INFO + * CMD_DEINIT + * CMD_INIT_PROV_INFO2 + * CMD_ENABLE2 + * Output: + * if status == STAT_ERROR + * a0 = ERR_ENOTSUPPORTED - status for command in a0 is + * not supported + * if status == STAT_NONE + * a0 = status of the devcmd specified in a0 as follows. + * ERR_SUCCESS - command in a0 completed successfully + * ERR_EINPROGRESS - command in a0 is still in progress + */ + CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49), + + /* + * Returns interrupt coalescing timer conversion factors. + * After calling this devcmd, ENIC driver can convert + * interrupt coalescing timer in usec into CPU cycles as follows: + * + * intr_timer_cycles = intr_timer_usec * multiplier / divisor + * + * Interrupt coalescing timer in usecs can be be converted/obtained + * from CPU cycles as follows: + * + * intr_timer_usec = intr_timer_cycles * divisor / multiplier + * + * in: none + * out: (u32)a0 = multiplier + * (u32)a1 = divisor + * (u32)a2 = maximum timer value in usec + */ + CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), + + /* + * ISCSI DUMP API: + * in: (u64)a0=paddr of the param or param itself + * (u32)a1=ISCSI_CMD_xxx + */ + CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51), + + /* + * ISCSI DUMP STATUS API: + * in: (u32)a0=cmd tag + * in: (u32)a1=ISCSI_CMD_xxx + * out: (u32)a0=cmd status + */ + CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52), + + /* + * Subvnic migration from MQ <--> VF. + * Enable the LIF migration from MQ to VF and vice versa. MQ and VF + * indexes are statically bound at the time of initialization. + * Based on the + * direction of migration, the resources of either MQ or the VF shall + * be attached to the LIF. + * in: (u32)a0=Direction of Migration + * 0=> Migrate to VF + * 1=> Migrate to MQ + * (u32)a1=VF index (MQ index) + */ + CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53), + + /* + * Register / Deregister the notification block for MQ subvnics + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54), + + /* + * Set the predefined mac address as default + * in: + * (u48)a0=mac addr + */ + CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), + + /* Update the provisioning info of the given VIF + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + + /* + * Initialization for the devcmd2 interface. + * in: (u64) a0=host result buffer physical address + * in: (u16) a1=number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57) +}; + +/* flags for CMD_OPEN */ +#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ + +#define CMD_OPENF_RQ_ENABLE_THEN_POST 0x2 + +/* flags for CMD_INIT */ +#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ + +/* flags for CMD_PACKET_FILTER */ +#define CMD_PFILTER_DIRECTED 0x01 +#define CMD_PFILTER_MULTICAST 0x02 +#define CMD_PFILTER_BROADCAST 0x04 +#define CMD_PFILTER_PROMISCUOUS 0x08 +#define CMD_PFILTER_ALL_MULTICAST 0x10 + +enum vnic_devcmd_status { + STAT_NONE = 0, + STAT_BUSY = 1 << 0, /* cmd in progress */ + STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ +}; + +enum vnic_devcmd_error { + ERR_SUCCESS = 0, + ERR_EINVAL = 1, + ERR_EFAULT = 2, + ERR_EPERM = 3, + ERR_EBUSY = 4, + ERR_ECMDUNKNOWN = 5, + ERR_EBADSTATE = 6, + ERR_ENOMEM = 7, + ERR_ETIMEDOUT = 8, + ERR_ELINKDOWN = 9, +}; + +struct vnic_devcmd_fw_info { + char fw_version[32]; + char fw_build[32]; + char hw_version[32]; + char hw_serial_number[32]; +}; + +struct vnic_devcmd_notify { + u32 csum; /* checksum over following words */ + + u32 link_state; /* link up == 1 */ + u32 port_speed; /* effective port speed (rate limit) */ + u32 mtu; /* MTU */ + u32 msglvl; /* requested driver msg lvl */ + u32 uif; /* uplink interface */ + u32 status; /* status bits (see VNIC_STF_*) */ + u32 error; /* error code (see ERR_*) for first ERR */ + u32 link_down_cnt; /* running count of link down transitions */ +}; +#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ + +struct vnic_devcmd_provinfo { + u8 oui[3]; + u8 type; + u8 data[]; +}; + +/* + * Writing cmd register causes STAT_BUSY to get set in status register. + * When cmd completes, STAT_BUSY will be cleared. + * + * If cmd completed successfully STAT_ERROR will be clear + * and args registers contain cmd-specific results. + * + * If cmd error, STAT_ERROR will be set and args[0] contains error code. + * + * status register is read-only. While STAT_BUSY is set, + * all other register contents are read-only. + */ + +/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ +#define VNIC_DEVCMD_NARGS 15 +struct vnic_devcmd { + u32 status; /* RO */ + u32 cmd; /* RW */ + u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ +}; + +/* + * Version 2 of the interface. + * + * Some things are carried over, notably the vnic_devcmd_cmd enum. + */ + +/* + * Flags for vnic_devcmd2.flags + */ + +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS + +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; /* same command #defines as original */ + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; /* into copy WQ */ + u8 error; /* same error codes as original */ + u8 color; /* 0 or 1 as with completion queues */ +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + +#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1) + +#endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c new file mode 100644 index 000000000..df7f63acd --- /dev/null +++ b/drivers/scsi/fnic/vnic_intr.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_intr.h" + +void vnic_intr_free(struct vnic_intr *intr) +{ + intr->ctrl = NULL; +} + +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index) +{ + intr->index = index; + intr->vdev = vdev; + + intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); + if (!intr->ctrl) { + printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n", + index); + return -EINVAL; + } + + return 0; +} + +void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion) +{ + iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); + iowrite32(coalescing_type, &intr->ctrl->coalescing_type); + iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); + iowrite32(0, &intr->ctrl->int_credits); +} + +void vnic_intr_clean(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->int_credits); +} diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h new file mode 100644 index 000000000..acc194c0f --- /dev/null +++ b/drivers/scsi/fnic/vnic_intr.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_INTR_H_ +#define _VNIC_INTR_H_ + +#include +#include "vnic_dev.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_intr_unmask fnic_intr_unmask +#define vnic_intr_mask fnic_intr_mask +#define vnic_intr_return_credits fnic_intr_return_credits +#define vnic_intr_credits fnic_intr_credits +#define vnic_intr_return_all_credits fnic_intr_return_all_credits +#define vnic_intr_legacy_pba fnic_intr_legacy_pba +#define vnic_intr_free fnic_intr_free +#define vnic_intr_alloc fnic_intr_alloc +#define vnic_intr_init fnic_intr_init +#define vnic_intr_clean fnic_intr_clean + +#define VNIC_INTR_TIMER_MAX 0xffff + +#define VNIC_INTR_TIMER_TYPE_ABS 0 +#define VNIC_INTR_TIMER_TYPE_QUIET 1 + +/* Interrupt control */ +struct vnic_intr_ctrl { + u32 coalescing_timer; /* 0x00 */ + u32 pad0; + u32 coalescing_value; /* 0x08 */ + u32 pad1; + u32 coalescing_type; /* 0x10 */ + u32 pad2; + u32 mask_on_assertion; /* 0x18 */ + u32 pad3; + u32 mask; /* 0x20 */ + u32 pad4; + u32 int_credits; /* 0x28 */ + u32 pad5; + u32 int_credit_return; /* 0x30 */ + u32 pad6; +}; + +struct vnic_intr { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ +}; + +static inline void vnic_intr_unmask(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->mask); +} + +static inline void vnic_intr_mask(struct vnic_intr *intr) +{ + iowrite32(1, &intr->ctrl->mask); +} + +static inline void vnic_intr_return_credits(struct vnic_intr *intr, + unsigned int credits, int unmask, int reset_timer) +{ +#define VNIC_INTR_UNMASK_SHIFT 16 +#define VNIC_INTR_RESET_TIMER_SHIFT 17 + + u32 int_credit_return = (credits & 0xffff) | + (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | + (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); + + iowrite32(int_credit_return, &intr->ctrl->int_credit_return); +} + +static inline unsigned int vnic_intr_credits(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->int_credits); +} + +static inline void vnic_intr_return_all_credits(struct vnic_intr *intr) +{ + unsigned int credits = vnic_intr_credits(intr); + int unmask = 1; + int reset_timer = 1; + + vnic_intr_return_credits(intr, credits, unmask, reset_timer); +} + +static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) +{ + /* read PBA without clearing */ + return ioread32(legacy_pba); +} + +void vnic_intr_free(struct vnic_intr *intr); +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index); +void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion); +void vnic_intr_clean(struct vnic_intr *intr); + +#endif /* _VNIC_INTR_H_ */ diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h new file mode 100644 index 000000000..6896f16d5 --- /dev/null +++ b/drivers/scsi/fnic/vnic_nic.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_NIC_H_ +#define _VNIC_NIC_H_ + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_set_nic_cfg fnic_set_nic_cfg + +#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0 +#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8) +#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8 +#define NIC_CFG_RSS_HASH_BITS (7UL << 16) +#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL +#define NIC_CFG_RSS_HASH_BITS_SHIFT 16 +#define NIC_CFG_RSS_BASE_CPU (7UL << 19) +#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL +#define NIC_CFG_RSS_BASE_CPU_SHIFT 19 +#define NIC_CFG_RSS_ENABLE (1UL << 22) +#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL +#define NIC_CFG_RSS_ENABLE_SHIFT 22 +#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23) +#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL +#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23 +#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24) +#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL +#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 + +static inline void vnic_set_nic_cfg(u32 *nic_cfg, + u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, + u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en) +{ + *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) | + ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD) + << NIC_CFG_RSS_HASH_TYPE_SHIFT) | + ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD) + << NIC_CFG_RSS_HASH_BITS_SHIFT) | + ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD) + << NIC_CFG_RSS_BASE_CPU_SHIFT) | + ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD) + << NIC_CFG_RSS_ENABLE_SHIFT) | + ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD) + << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) | + ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD) + << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT); +} + +#endif /* _VNIC_NIC_H_ */ diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h new file mode 100644 index 000000000..3d260b831 --- /dev/null +++ b/drivers/scsi/fnic/vnic_resource.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_RESOURCE_H_ +#define _VNIC_RESOURCE_H_ + +#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ +#define VNIC_RES_VERSION 0x00000000L + +/* vNIC resource types */ +enum vnic_res_type { + RES_TYPE_EOL, /* End-of-list */ + RES_TYPE_WQ, /* Work queues */ + RES_TYPE_RQ, /* Receive queues */ + RES_TYPE_CQ, /* Completion queues */ + RES_TYPE_RSVD1, + RES_TYPE_NIC_CFG, /* Enet NIC config registers */ + RES_TYPE_RSVD2, + RES_TYPE_RSVD3, + RES_TYPE_RSVD4, + RES_TYPE_RSVD5, + RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ + RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ + RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ + RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ + RES_TYPE_RSVD6, + RES_TYPE_RSVD7, + RES_TYPE_DEVCMD, /* Device command region */ + RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ + + RES_TYPE_MAX, /* Count of resource types */ +}; + +struct vnic_resource_header { + u32 magic; + u32 version; +}; + +struct vnic_resource { + u8 type; + u8 bar; + u8 pad[2]; + u32 bar_offset; + u32 count; +}; + +#endif /* _VNIC_RESOURCE_H_ */ diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c new file mode 100644 index 000000000..350607d13 --- /dev/null +++ b/drivers/scsi/fnic/vnic_rq.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_rq.h" + +static int vnic_rq_alloc_bufs(struct vnic_rq *rq) +{ + struct vnic_rq_buf *buf; + unsigned int i, j, count = rq->ring.desc_count; + unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); + + for (i = 0; i < blks; i++) { + rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); + if (!rq->bufs[i]) { + printk(KERN_ERR "Failed to alloc rq_bufs\n"); + return -ENOMEM; + } + } + + for (i = 0; i < blks; i++) { + buf = rq->bufs[i]; + for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) { + buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j; + buf->desc = (u8 *)rq->ring.descs + + rq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = rq->bufs[0]; + break; + } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) { + buf->next = rq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + rq->to_use = rq->to_clean = rq->bufs[0]; + rq->buf_index = 0; + + return 0; +} + +void vnic_rq_free(struct vnic_rq *rq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = rq->vdev; + + vnic_dev_free_desc_ring(vdev, &rq->ring); + + for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { + kfree(rq->bufs[i]); + rq->bufs[i] = NULL; + } + + rq->ctrl = NULL; +} + +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + rq->index = index; + rq->vdev = vdev; + + rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); + if (!rq->ctrl) { + printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_rq_disable(rq); + + err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); + if (err) + return err; + + err = vnic_rq_alloc_bufs(rq); + if (err) { + vnic_rq_free(rq); + return err; + } + + return 0; +} + +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + u32 fetch_index; + + paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &rq->ctrl->ring_base); + iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); + iowrite32(cq_index, &rq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); + iowrite32(0, &rq->ctrl->dropped_packet_count); + iowrite32(0, &rq->ctrl->error_status); + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + rq->to_use = rq->to_clean = + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; + iowrite32(fetch_index, &rq->ctrl->posted_index); + + rq->buf_index = 0; +} + +unsigned int vnic_rq_error_status(struct vnic_rq *rq) +{ + return ioread32(&rq->ctrl->error_status); +} + +void vnic_rq_enable(struct vnic_rq *rq) +{ + iowrite32(1, &rq->ctrl->enable); +} + +int vnic_rq_disable(struct vnic_rq *rq) +{ + unsigned int wait; + + iowrite32(0, &rq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&rq->ctrl->running))) + return 0; + udelay(1); + } + + printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); + + return -ETIMEDOUT; +} + +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) +{ + struct vnic_rq_buf *buf; + u32 fetch_index; + + WARN_ON(ioread32(&rq->ctrl->enable)); + + buf = rq->to_clean; + + while (vnic_rq_desc_used(rq) > 0) { + + (*buf_clean)(rq, buf); + + buf = rq->to_clean = buf->next; + rq->ring.desc_avail++; + } + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + rq->to_use = rq->to_clean = + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; + iowrite32(fetch_index, &rq->ctrl->posted_index); + + rq->buf_index = 0; + + vnic_dev_clear_desc_ring(&rq->ring); +} diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h new file mode 100644 index 000000000..1066255de --- /dev/null +++ b/drivers/scsi/fnic/vnic_rq.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_RQ_H_ +#define _VNIC_RQ_H_ + +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_rq_desc_avail fnic_rq_desc_avail +#define vnic_rq_desc_used fnic_rq_desc_used +#define vnic_rq_next_desc fnic_rq_next_desc +#define vnic_rq_next_index fnic_rq_next_index +#define vnic_rq_next_buf_index fnic_rq_next_buf_index +#define vnic_rq_post fnic_rq_post +#define vnic_rq_posting_soon fnic_rq_posting_soon +#define vnic_rq_return_descs fnic_rq_return_descs +#define vnic_rq_service fnic_rq_service +#define vnic_rq_fill fnic_rq_fill +#define vnic_rq_free fnic_rq_free +#define vnic_rq_alloc fnic_rq_alloc +#define vnic_rq_init fnic_rq_init +#define vnic_rq_error_status fnic_rq_error_status +#define vnic_rq_enable fnic_rq_enable +#define vnic_rq_disable fnic_rq_disable +#define vnic_rq_clean fnic_rq_clean + +/* Receive queue control */ +struct vnic_rq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 error_interrupt_enable; /* 0x38 */ + u32 pad6; + u32 error_interrupt_offset; /* 0x40 */ + u32 pad7; + u32 error_status; /* 0x48 */ + u32 pad8; + u32 dropped_packet_count; /* 0x50 */ + u32 pad9; + u32 dropped_packet_count_rc; /* 0x58 */ + u32 pad10; +}; + +/* Break the vnic_rq_buf allocations into blocks of 64 entries */ +#define VNIC_RQ_BUF_BLK_ENTRIES 64 +#define VNIC_RQ_BUF_BLK_SZ \ + (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) +#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) +#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) + +struct vnic_rq_buf { + struct vnic_rq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int os_buf_index; + unsigned int len; + unsigned int index; + void *desc; +}; + +struct vnic_rq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; + struct vnic_rq_buf *to_use; + struct vnic_rq_buf *to_clean; + void *os_buf_head; + unsigned int buf_index; + unsigned int pkts_outstanding; +}; + +static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) +{ + /* how many does SW own? */ + return rq->ring.desc_avail; +} + +static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) +{ + /* how many does HW own? */ + return rq->ring.desc_count - rq->ring.desc_avail - 1; +} + +static inline void *vnic_rq_next_desc(struct vnic_rq *rq) +{ + return rq->to_use->desc; +} + +static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) +{ + return rq->to_use->index; +} + +static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) +{ + return rq->buf_index++; +} + +static inline void vnic_rq_post(struct vnic_rq *rq, + void *os_buf, unsigned int os_buf_index, + dma_addr_t dma_addr, unsigned int len) +{ + struct vnic_rq_buf *buf = rq->to_use; + + buf->os_buf = os_buf; + buf->os_buf_index = os_buf_index; + buf->dma_addr = dma_addr; + buf->len = len; + + buf = buf->next; + rq->to_use = buf; + rq->ring.desc_avail--; + + /* Move the posted_index every nth descriptor + */ + +#ifndef VNIC_RQ_RETURN_RATE +#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ +#endif + + if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &rq->ctrl->posted_index); + } +} + +static inline int vnic_rq_posting_soon(struct vnic_rq *rq) +{ + return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0; +} + +static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) +{ + rq->ring.desc_avail += count; +} + +enum desc_return_options { + VNIC_RQ_RETURN_DESC, + VNIC_RQ_DEFER_RETURN_DESC, +}; + +static inline void vnic_rq_service(struct vnic_rq *rq, + struct cq_desc *cq_desc, u16 completed_index, + int desc_return, void (*buf_service)(struct vnic_rq *rq, + struct cq_desc *cq_desc, struct vnic_rq_buf *buf, + int skipped, void *opaque), void *opaque) +{ + struct vnic_rq_buf *buf; + int skipped; + + buf = rq->to_clean; + while (1) { + + skipped = (buf->index != completed_index); + + (*buf_service)(rq, cq_desc, buf, skipped, opaque); + + if (desc_return == VNIC_RQ_RETURN_DESC) + rq->ring.desc_avail++; + + rq->to_clean = buf->next; + + if (!skipped) + break; + + buf = rq->to_clean; + } +} + +static inline int vnic_rq_fill(struct vnic_rq *rq, + int (*buf_fill)(struct vnic_rq *rq)) +{ + int err; + + while (vnic_rq_desc_avail(rq) > 1) { + + err = (*buf_fill)(rq); + if (err) + return err; + } + + return 0; +} + +void vnic_rq_free(struct vnic_rq *rq); +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int vnic_rq_error_status(struct vnic_rq *rq); +void vnic_rq_enable(struct vnic_rq *rq); +int vnic_rq_disable(struct vnic_rq *rq); +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); + +#endif /* _VNIC_RQ_H_ */ diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h new file mode 100644 index 000000000..4e12f7b32 --- /dev/null +++ b/drivers/scsi/fnic/vnic_scsi.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_SCSI_H_ +#define _VNIC_SCSI_H_ + +#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1 +#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1 + +#define VNIC_FNIC_WQ_DESCS_MIN 64 +#define VNIC_FNIC_WQ_DESCS_MAX 128 + +#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64 +#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512 + +#define VNIC_FNIC_RQ_DESCS_MIN 64 +#define VNIC_FNIC_RQ_DESCS_MAX 128 + +#define VNIC_FNIC_EDTOV_MIN 1000 +#define VNIC_FNIC_EDTOV_MAX 255000 +#define VNIC_FNIC_EDTOV_DEF 2000 + +#define VNIC_FNIC_RATOV_MIN 1000 +#define VNIC_FNIC_RATOV_MAX 255000 + +#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256 +#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112 + +#define VNIC_FNIC_FLOGI_RETRIES_MIN 0 +#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff +#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff + +#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000 +#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000 + +#define VNIC_FNIC_PLOGI_RETRIES_MIN 0 +#define VNIC_FNIC_PLOGI_RETRIES_MAX 255 +#define VNIC_FNIC_PLOGI_RETRIES_DEF 8 + +#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 +#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 + +#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1 +#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048 + +#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 +#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 + +#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0 +#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000 + +#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0 +#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255 + +#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1 +#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024 + +/* Device-specific region: scsi configuration */ +struct vnic_fc_config { + u64 node_wwn; + u64 port_wwn; + u32 flags; + u32 wq_enet_desc_count; + u32 wq_copy_desc_count; + u32 rq_desc_count; + u32 flogi_retries; + u32 flogi_timeout; + u32 plogi_retries; + u32 plogi_timeout; + u32 io_throttle_count; + u32 link_down_timeout; + u32 port_down_timeout; + u32 port_down_io_retries; + u32 luns_per_tgt; + u16 maxdatafieldsize; + u16 ed_tov; + u16 ra_tov; + u16 intr_timer; + u8 intr_timer_type; +}; + +#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ +#define VFCF_PERBI 0x2 /* persistent binding info available */ +#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */ + +#endif /* _VNIC_SCSI_H_ */ diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h new file mode 100644 index 000000000..4396397b0 --- /dev/null +++ b/drivers/scsi/fnic/vnic_stats.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_STATS_H_ +#define _VNIC_STATS_H_ + +/* Tx statistics */ +struct vnic_tx_stats { + u64 tx_frames_ok; + u64 tx_unicast_frames_ok; + u64 tx_multicast_frames_ok; + u64 tx_broadcast_frames_ok; + u64 tx_bytes_ok; + u64 tx_unicast_bytes_ok; + u64 tx_multicast_bytes_ok; + u64 tx_broadcast_bytes_ok; + u64 tx_drops; + u64 tx_errors; + u64 tx_tso; + u64 rsvd[16]; +}; + +/* Rx statistics */ +struct vnic_rx_stats { + u64 rx_frames_ok; + u64 rx_frames_total; + u64 rx_unicast_frames_ok; + u64 rx_multicast_frames_ok; + u64 rx_broadcast_frames_ok; + u64 rx_bytes_ok; + u64 rx_unicast_bytes_ok; + u64 rx_multicast_bytes_ok; + u64 rx_broadcast_bytes_ok; + u64 rx_drop; + u64 rx_no_bufs; + u64 rx_errors; + u64 rx_rss; + u64 rx_crc_errors; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_to_max; + u64 rsvd[16]; +}; + +struct vnic_stats { + struct vnic_tx_stats tx; + struct vnic_rx_stats rx; +}; + +#endif /* _VNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c new file mode 100644 index 000000000..426b901c8 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_wq.h" + + +static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, enum vnic_res_type res_type) +{ + wq->ctrl = vnic_dev_get_res(vdev, res_type, index); + + if (!wq->ctrl) + return -EINVAL; + + return 0; +} + + +static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); +} + + +static int vnic_wq_alloc_bufs(struct vnic_wq *wq) +{ + struct vnic_wq_buf *buf; + unsigned int i, j, count = wq->ring.desc_count; + unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); + + for (i = 0; i < blks; i++) { + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); + if (!wq->bufs[i]) { + printk(KERN_ERR "Failed to alloc wq_bufs\n"); + return -ENOMEM; + } + } + + for (i = 0; i < blks; i++) { + buf = wq->bufs[i]; + for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { + buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; + buf->desc = (u8 *)wq->ring.descs + + wq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = wq->bufs[0]; + break; + } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { + buf->next = wq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + return 0; +} + +void vnic_wq_free(struct vnic_wq *wq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = wq->vdev; + + vnic_dev_free_desc_ring(vdev, &wq->ring); + + for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { + kfree(wq->bufs[i]); + wq->bufs[i] = NULL; + } + + wq->ctrl = NULL; + +} + +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); + if (!wq->ctrl) { + printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_wq_disable(wq); + + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + if (err) + return err; + + err = vnic_wq_alloc_bufs(wq); + if (err) { + vnic_wq_free(wq); + return err; + } + + return 0; +} + + +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2); + if (err) { + pr_err("Failed to get devcmd2 resource\n"); + return err; + } + vnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size); + if (err) + return err; + return 0; +} + +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = wq->ring.desc_count; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(count, &wq->ctrl->ring_size); + iowrite32(fetch_index, &wq->ctrl->fetch_index); + iowrite32(posted_index, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); + + wq->to_use = wq->to_clean = + &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES] + [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES]; +} + + +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); +} + +unsigned int vnic_wq_error_status(struct vnic_wq *wq) +{ + return ioread32(&wq->ctrl->error_status); +} + +void vnic_wq_enable(struct vnic_wq *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int vnic_wq_disable(struct vnic_wq *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(1); + } + + printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); + + return -ETIMEDOUT; +} + +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) +{ + struct vnic_wq_buf *buf; + + BUG_ON(ioread32(&wq->ctrl->enable)); + + buf = wq->to_clean; + + while (vnic_wq_desc_used(wq) > 0) { + + (*buf_clean)(wq, buf); + + buf = wq->to_clean = buf->next; + wq->ring.desc_avail++; + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + vnic_dev_clear_desc_ring(&wq->ring); +} diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h new file mode 100644 index 000000000..041618e13 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_WQ_H_ +#define _VNIC_WQ_H_ + +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_wq_desc_avail fnic_wq_desc_avail +#define vnic_wq_desc_used fnic_wq_desc_used +#define vnic_wq_next_desc fni_cwq_next_desc +#define vnic_wq_post fnic_wq_post +#define vnic_wq_service fnic_wq_service +#define vnic_wq_free fnic_wq_free +#define vnic_wq_alloc fnic_wq_alloc +#define vnic_wq_devcmd2_alloc fnic_wq_devcmd2_alloc +#define vnic_wq_init_start fnic_wq_init_start +#define vnic_wq_init fnic_wq_init +#define vnic_wq_error_status fnic_wq_error_status +#define vnic_wq_enable fnic_wq_enable +#define vnic_wq_disable fnic_wq_disable +#define vnic_wq_clean fnic_wq_clean + +/* Work queue control */ +struct vnic_wq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 dca_value; /* 0x38 */ + u32 pad6; + u32 error_interrupt_enable; /* 0x40 */ + u32 pad7; + u32 error_interrupt_offset; /* 0x48 */ + u32 pad8; + u32 error_status; /* 0x50 */ + u32 pad9; +}; + +struct vnic_wq_buf { + struct vnic_wq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int len; + unsigned int index; + int sop; + void *desc; +}; + +/* Break the vnic_wq_buf allocations into blocks of 64 entries */ +#define VNIC_WQ_BUF_BLK_ENTRIES 64 +#define VNIC_WQ_BUF_BLK_SZ \ + (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) + +struct vnic_wq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; + struct vnic_wq_buf *to_use; + struct vnic_wq_buf *to_clean; + unsigned int pkts_outstanding; +}; + +static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) +{ + /* how many does SW own? */ + return wq->ring.desc_avail; +} + +static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) +{ + /* how many does HW own? */ + return wq->ring.desc_count - wq->ring.desc_avail - 1; +} + +static inline void *vnic_wq_next_desc(struct vnic_wq *wq) +{ + return wq->to_use->desc; +} + +static inline void vnic_wq_post(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, int sop, int eop) +{ + struct vnic_wq_buf *buf = wq->to_use; + + buf->sop = sop; + buf->os_buf = eop ? os_buf : NULL; + buf->dma_addr = dma_addr; + buf->len = len; + + buf = buf->next; + if (eop) { + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &wq->ctrl->posted_index); + } + wq->to_use = buf; + + wq->ring.desc_avail--; +} + +static inline void vnic_wq_service(struct vnic_wq *wq, + struct cq_desc *cq_desc, u16 completed_index, + void (*buf_service)(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), + void *opaque) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + while (1) { + + (*buf_service)(wq, cq_desc, buf, opaque); + + wq->ring.desc_avail++; + + wq->to_clean = buf->next; + + if (buf->index == completed_index) + break; + + buf = wq->to_clean; + } +} + +void vnic_wq_free(struct vnic_wq *wq); +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int vnic_wq_error_status(struct vnic_wq *wq); +void vnic_wq_enable(struct vnic_wq *wq); +int vnic_wq_disable(struct vnic_wq *wq); +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); + +#endif /* _VNIC_WQ_H_ */ diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c new file mode 100644 index 000000000..96569d4cc --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq_copy.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "vnic_wq_copy.h" + +void vnic_wq_copy_enable(struct vnic_wq_copy *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int vnic_wq_copy_disable(struct vnic_wq_copy *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(1); + } + + printk(KERN_ERR "Failed to disable Copy WQ[%d]," + " fetch index=%d, posted_index=%d\n", + wq->index, ioread32(&wq->ctrl->fetch_index), + ioread32(&wq->ctrl->posted_index)); + + return -ENODEV; +} + +void vnic_wq_copy_clean(struct vnic_wq_copy *wq, + void (*q_clean)(struct vnic_wq_copy *wq, + struct fcpio_host_req *wq_desc)) +{ + BUG_ON(ioread32(&wq->ctrl->enable)); + + if (vnic_wq_copy_desc_in_use(wq)) + vnic_wq_copy_service(wq, -1, q_clean); + + wq->to_use_index = wq->to_clean_index = 0; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + vnic_dev_clear_desc_ring(&wq->ring); +} + +void vnic_wq_copy_free(struct vnic_wq_copy *wq) +{ + struct vnic_dev *vdev; + + vdev = wq->vdev; + vnic_dev_free_desc_ring(vdev, &wq->ring); + wq->ctrl = NULL; +} + +int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, + unsigned int index, unsigned int desc_count, + unsigned int desc_size) +{ + wq->index = index; + wq->vdev = vdev; + wq->to_use_index = wq->to_clean_index = 0; + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); + if (!wq->ctrl) { + printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_wq_copy_disable(wq); + + return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); +} + +void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); +} diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h new file mode 100644 index 000000000..2f8340144 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq_copy.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_WQ_COPY_H_ +#define _VNIC_WQ_COPY_H_ + +#include +#include "vnic_wq.h" +#include "fcpio.h" + +#define VNIC_WQ_COPY_MAX 1 + +struct vnic_wq_copy { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned to_use_index; + unsigned to_clean_index; +}; + +static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) +{ + return wq->ring.desc_avail; +} + +static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) +{ + return wq->ring.desc_count - 1 - wq->ring.desc_avail; +} + +static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) +{ + struct fcpio_host_req *desc = wq->ring.descs; + return &desc[wq->to_use_index]; +} + +static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) +{ + + ((wq->to_use_index + 1) == wq->ring.desc_count) ? + (wq->to_use_index = 0) : (wq->to_use_index++); + wq->ring.desc_avail--; + + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + + iowrite32(wq->to_use_index, &wq->ctrl->posted_index); +} + +static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) +{ + unsigned int cnt; + + if (wq->to_clean_index <= index) + cnt = (index - wq->to_clean_index) + 1; + else + cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; + + wq->to_clean_index = ((index + 1) % wq->ring.desc_count); + wq->ring.desc_avail += cnt; + +} + +static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq, + u16 completed_index, + void (*q_service)(struct vnic_wq_copy *wq, + struct fcpio_host_req *wq_desc)) +{ + struct fcpio_host_req *wq_desc = wq->ring.descs; + unsigned int curr_index; + + while (1) { + + if (q_service) + (*q_service)(wq, &wq_desc[wq->to_clean_index]); + + wq->ring.desc_avail++; + + curr_index = wq->to_clean_index; + + /* increment the to-clean index so that we start + * with an unprocessed index next time we enter the loop + */ + ((wq->to_clean_index + 1) == wq->ring.desc_count) ? + (wq->to_clean_index = 0) : (wq->to_clean_index++); + + if (curr_index == completed_index) + break; + + /* we have cleaned all the entries */ + if ((completed_index == (u16)-1) && + (wq->to_clean_index == wq->to_use_index)) + break; + } +} + +void vnic_wq_copy_enable(struct vnic_wq_copy *wq); +int vnic_wq_copy_disable(struct vnic_wq_copy *wq); +void vnic_wq_copy_free(struct vnic_wq_copy *wq); +int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size); +void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_wq_copy_clean(struct vnic_wq_copy *wq, + void (*q_clean)(struct vnic_wq_copy *wq, + struct fcpio_host_req *wq_desc)); + +#endif /* _VNIC_WQ_COPY_H_ */ diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h new file mode 100644 index 000000000..9a933a5de --- /dev/null +++ b/drivers/scsi/fnic/wq_enet_desc.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _WQ_ENET_DESC_H_ +#define _WQ_ENET_DESC_H_ + +/* Ethernet work queue descriptor: 16B */ +struct wq_enet_desc { + __le64 address; + __le16 length; + __le16 mss_loopback; + __le16 header_length_flags; + __le16 vlan_tag; +}; + +#define WQ_ENET_ADDR_BITS 64 +#define WQ_ENET_LEN_BITS 14 +#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) +#define WQ_ENET_MSS_BITS 14 +#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) +#define WQ_ENET_MSS_SHIFT 2 +#define WQ_ENET_LOOPBACK_SHIFT 1 +#define WQ_ENET_HDRLEN_BITS 10 +#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) +#define WQ_ENET_FLAGS_OM_BITS 2 +#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) +#define WQ_ENET_FLAGS_EOP_SHIFT 12 +#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 +#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 +#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 + +#define WQ_ENET_OFFLOAD_MODE_CSUM 0 +#define WQ_ENET_OFFLOAD_MODE_RESERVED 1 +#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 +#define WQ_ENET_OFFLOAD_MODE_TSO 3 + +static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, + u64 address, u16 length, u16 mss, u16 header_length, + u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, + u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) +{ + desc->address = cpu_to_le64(address); + desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); + desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << + WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); + desc->header_length_flags = cpu_to_le16( + (header_length & WQ_ENET_HDRLEN_MASK) | + (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | + (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | + (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | + (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | + (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); + desc->vlan_tag = cpu_to_le16(vlan_tag); +} + +static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, + u64 *address, u16 *length, u16 *mss, u16 *header_length, + u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, + u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; + *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & + WQ_ENET_MSS_MASK; + *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> + WQ_ENET_LOOPBACK_SHIFT) & 1); + *header_length = le16_to_cpu(desc->header_length_flags) & + WQ_ENET_HDRLEN_MASK; + *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); + *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_EOP_SHIFT) & 1); + *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); + *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); + *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); + *vlan_tag = le16_to_cpu(desc->vlan_tag); +} + +#endif /* _WQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c new file mode 100644 index 000000000..f6305e3e6 --- /dev/null +++ b/drivers/scsi/g_NCR5380.c @@ -0,0 +1,818 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic Generic NCR5380 driver + * + * Copyright 1993, Drew Eckhardt + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * drew@colorado.edu + * +1 (303) 440-4894 + * + * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin + * K.Lentin@cs.monash.edu.au + * + * NCR53C400A extensions (c) 1996, Ingmar Baumgart + * ingmar@gonzo.schwaben.de + * + * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg + * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl + * + * Added ISAPNP support for DTC436 adapters, + * Thomas Sailer, sailer@ife.ee.ethz.ch + * + * See Documentation/scsi/g_NCR5380.rst for more info. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Definitions for the core NCR5380 driver. */ + +#define NCR5380_read(reg) \ + ioread8(hostdata->io + hostdata->offset + (reg)) +#define NCR5380_write(reg, value) \ + iowrite8(value, hostdata->io + hostdata->offset + (reg)) + +#define NCR5380_implementation_fields \ + int offset; \ + int c400_ctl_status; \ + int c400_blk_cnt; \ + int c400_host_buf; \ + int io_width; \ + int pdma_residual; \ + int board + +#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len +#define NCR5380_dma_recv_setup generic_NCR5380_precv +#define NCR5380_dma_send_setup generic_NCR5380_psend +#define NCR5380_dma_residual generic_NCR5380_dma_residual + +#define NCR5380_intr generic_NCR5380_intr +#define NCR5380_queue_command generic_NCR5380_queue_command +#define NCR5380_abort generic_NCR5380_abort +#define NCR5380_host_reset generic_NCR5380_host_reset +#define NCR5380_info generic_NCR5380_info + +#define NCR5380_io_delay(x) udelay(x) + +#include "NCR5380.h" + +#define DRV_MODULE_NAME "g_NCR5380" + +#define NCR53C400_mem_base 0x3880 +#define NCR53C400_host_buffer 0x3900 +#define NCR53C400_region_size 0x3a00 + +#define BOARD_NCR5380 0 +#define BOARD_NCR53C400 1 +#define BOARD_NCR53C400A 2 +#define BOARD_DTC3181E 3 +#define BOARD_HP_C2502 4 + +#define IRQ_AUTO 254 + +#define MAX_CARDS 8 +#define DMA_MAX_SIZE 32768 + +/* old-style parameters for compatibility */ +static int ncr_irq = -1; +static int ncr_addr; +static int ncr_5380; +static int ncr_53c400; +static int ncr_53c400a; +static int dtc_3181e; +static int hp_c2502; +module_param_hw(ncr_irq, int, irq, 0); +module_param_hw(ncr_addr, int, ioport, 0); +module_param(ncr_5380, int, 0); +module_param(ncr_53c400, int, 0); +module_param(ncr_53c400a, int, 0); +module_param(dtc_3181e, int, 0); +module_param(hp_c2502, int, 0); + +static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +module_param_hw_array(irq, int, irq, NULL, 0); +MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])"); + +static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; +module_param_hw_array(base, int, ioport, NULL, 0); +MODULE_PARM_DESC(base, "base address(es)"); + +static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +module_param_array(card, int, NULL, 0); +MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); + +MODULE_ALIAS("g_NCR5380_mmio"); +MODULE_LICENSE("GPL"); + +static void g_NCR5380_trigger_irq(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + /* + * An interrupt is triggered whenever BSY = false, SEL = true + * and a bit set in the SELECT_ENABLE_REG is asserted on the + * SCSI bus. + * + * Note that the bus is only driven when the phase control signals + * (I/O, C/D, and MSG) match those in the TCR. + */ + NCR5380_write(TARGET_COMMAND_REG, + PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); + NCR5380_write(INITIATOR_COMMAND_REG, + ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); + + msleep(1); + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_write(SELECT_ENABLE_REG, 0); + NCR5380_write(TARGET_COMMAND_REG, 0); +} + +/** + * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent + * @instance: SCSI host instance + * + * Autoprobe for the IRQ line used by the card by triggering an IRQ + * and then looking to see what interrupt actually turned up. + */ + +static int g_NCR5380_probe_irq(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int irq_mask, irq; + + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + irq_mask = probe_irq_on(); + g_NCR5380_trigger_irq(instance); + irq = probe_irq_off(irq_mask); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + if (irq <= 0) + return NO_IRQ; + return irq; +} + +/* + * Configure I/O address of 53C400A or DTC436 by writing magic numbers + * to ports 0x779 and 0x379. + */ +static void magic_configure(int idx, u8 irq, u8 magic[]) +{ + u8 cfg = 0; + + outb(magic[0], 0x779); + outb(magic[1], 0x379); + outb(magic[2], 0x379); + outb(magic[3], 0x379); + outb(magic[4], 0x379); + + if (irq == 9) + irq = 2; + + if (idx >= 0 && idx <= 7) + cfg = 0x80 | idx | (irq << 4); + outb(cfg, 0x379); +} + +static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + +static int legacy_find_free_irq(int *irq_table) +{ + while (*irq_table != -1) { + if (!request_irq(*irq_table, legacy_empty_irq_handler, + IRQF_PROBE_SHARED, "Test IRQ", + (void *)irq_table)) { + free_irq(*irq_table, (void *) irq_table); + return *irq_table; + } + irq_table++; + } + return -1; +} + +static unsigned int ncr_53c400a_ports[] = { + 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 +}; +static unsigned int dtc_3181e_ports[] = { + 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 +}; +static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */ + 0x59, 0xb9, 0xc5, 0xae, 0xa6 +}; +static u8 hp_c2502_magic[] = { /* HP C2502 */ + 0x0f, 0x22, 0xf0, 0x20, 0x80 +}; +static int hp_c2502_irqs[] = { + 9, 5, 7, 3, 4, -1 +}; + +static int generic_NCR5380_init_one(const struct scsi_host_template *tpnt, + struct device *pdev, int base, int irq, int board) +{ + bool is_pmio = base <= 0xffff; + int ret; + int flags = 0; + unsigned int *ports = NULL; + u8 *magic = NULL; + int i; + int port_idx = -1; + unsigned long region_size; + struct Scsi_Host *instance; + struct NCR5380_hostdata *hostdata; + u8 __iomem *iomem; + + switch (board) { + case BOARD_NCR5380: + flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; + break; + case BOARD_NCR53C400A: + ports = ncr_53c400a_ports; + magic = ncr_53c400a_magic; + break; + case BOARD_HP_C2502: + ports = ncr_53c400a_ports; + magic = hp_c2502_magic; + break; + case BOARD_DTC3181E: + ports = dtc_3181e_ports; + magic = ncr_53c400a_magic; + break; + } + + if (is_pmio && ports && magic) { + /* wakeup sequence for the NCR53C400A and DTC3181E */ + + /* Disable the adapter and look for a free io port */ + magic_configure(-1, 0, magic); + + region_size = 16; + if (base) + for (i = 0; ports[i]; i++) { + if (base == ports[i]) { /* index found */ + if (!request_region(ports[i], + region_size, + "ncr53c80")) + return -EBUSY; + break; + } + } + else + for (i = 0; ports[i]; i++) { + if (!request_region(ports[i], region_size, + "ncr53c80")) + continue; + if (inb(ports[i]) == 0xff) + break; + release_region(ports[i], region_size); + } + if (ports[i]) { + /* At this point we have our region reserved */ + magic_configure(i, 0, magic); /* no IRQ yet */ + base = ports[i]; + outb(0xc0, base + 9); + if (inb(base + 9) != 0x80) { + ret = -ENODEV; + goto out_release; + } + port_idx = i; + } else + return -EINVAL; + } else if (is_pmio) { + /* NCR5380 - no configuration, just grab */ + region_size = 8; + if (!base || !request_region(base, region_size, "ncr5380")) + return -EBUSY; + } else { /* MMIO */ + region_size = NCR53C400_region_size; + if (!request_mem_region(base, region_size, "ncr5380")) + return -EBUSY; + } + + if (is_pmio) + iomem = ioport_map(base, region_size); + else + iomem = ioremap(base, region_size); + + if (!iomem) { + ret = -ENOMEM; + goto out_release; + } + + instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata)); + if (instance == NULL) { + ret = -ENOMEM; + goto out_unmap; + } + hostdata = shost_priv(instance); + + hostdata->board = board; + hostdata->io = iomem; + hostdata->region_size = region_size; + + if (is_pmio) { + hostdata->io_port = base; + hostdata->io_width = 1; /* 8-bit PDMA by default */ + hostdata->offset = 0; + + /* + * On NCR53C400 boards, NCR5380 registers are mapped 8 past + * the base address. + */ + switch (board) { + case BOARD_NCR53C400: + hostdata->io_port += 8; + hostdata->c400_ctl_status = 0; + hostdata->c400_blk_cnt = 1; + hostdata->c400_host_buf = 4; + break; + case BOARD_DTC3181E: + hostdata->io_width = 2; /* 16-bit PDMA */ + fallthrough; + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + hostdata->c400_ctl_status = 9; + hostdata->c400_blk_cnt = 10; + hostdata->c400_host_buf = 8; + break; + } + } else { + hostdata->base = base; + hostdata->offset = NCR53C400_mem_base; + switch (board) { + case BOARD_NCR53C400: + hostdata->c400_ctl_status = 0x100; + hostdata->c400_blk_cnt = 0x101; + hostdata->c400_host_buf = 0x104; + break; + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); + ret = -EINVAL; + goto out_unregister; + } + } + + /* Check for vacant slot */ + NCR5380_write(MODE_REG, 0); + if (NCR5380_read(MODE_REG) != 0) { + ret = -ENODEV; + goto out_unregister; + } + + ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); + if (ret) + goto out_unregister; + + switch (board) { + case BOARD_NCR53C400: + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + } + + NCR5380_maybe_reset_bus(instance); + + /* Compatibility with documented NCR5380 kernel parameters */ + if (irq == 255 || irq == 0) + irq = NO_IRQ; + else if (irq == -1) + irq = IRQ_AUTO; + + if (board == BOARD_HP_C2502) { + int *irq_table = hp_c2502_irqs; + int board_irq = -1; + + switch (irq) { + case NO_IRQ: + board_irq = 0; + break; + case IRQ_AUTO: + board_irq = legacy_find_free_irq(irq_table); + break; + default: + while (*irq_table != -1) + if (*irq_table++ == irq) + board_irq = irq; + } + + if (board_irq <= 0) { + board_irq = 0; + irq = NO_IRQ; + } + + magic_configure(port_idx, board_irq, magic); + } + + if (irq == IRQ_AUTO) { + instance->irq = g_NCR5380_probe_irq(instance); + if (instance->irq == NO_IRQ) + shost_printk(KERN_INFO, instance, "no irq detected\n"); + } else { + instance->irq = irq; + if (instance->irq == NO_IRQ) + shost_printk(KERN_INFO, instance, "no irq provided\n"); + } + + if (instance->irq != NO_IRQ) { + if (request_irq(instance->irq, generic_NCR5380_intr, + 0, "NCR5380", instance)) { + instance->irq = NO_IRQ; + shost_printk(KERN_INFO, instance, + "irq %d denied\n", instance->irq); + } else { + shost_printk(KERN_INFO, instance, + "irq %d acquired\n", instance->irq); + } + } + + ret = scsi_add_host(instance, pdev); + if (ret) + goto out_free_irq; + scsi_scan_host(instance); + dev_set_drvdata(pdev, instance); + return 0; + +out_free_irq: + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); + NCR5380_exit(instance); +out_unregister: + scsi_host_put(instance); +out_unmap: + iounmap(iomem); +out_release: + if (is_pmio) + release_region(base, region_size); + else + release_mem_region(base, region_size); + return ret; +} + +static void generic_NCR5380_release_resources(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + void __iomem *iomem = hostdata->io; + unsigned long io_port = hostdata->io_port; + unsigned long base = hostdata->base; + unsigned long region_size = hostdata->region_size; + + scsi_remove_host(instance); + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); + NCR5380_exit(instance); + scsi_host_put(instance); + iounmap(iomem); + if (io_port) + release_region(io_port, region_size); + else + release_mem_region(base, region_size); +} + +/* wait_for_53c80_access - wait for 53C80 registers to become accessible + * @hostdata: scsi host private data + * + * The registers within the 53C80 logic block are inaccessible until + * bit 7 in the 53C400 control status register gets asserted. + */ + +static void wait_for_53c80_access(struct NCR5380_hostdata *hostdata) +{ + int count = 10000; + + do { + if (hostdata->board == BOARD_DTC3181E) + udelay(4); /* DTC436 chip hangs without this */ + if (NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG) + return; + } while (--count > 0); + + scmd_printk(KERN_ERR, hostdata->connected, + "53c80 registers not accessible, device will be reset\n"); + NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); +} + +/** + * generic_NCR5380_precv - pseudo DMA receive + * @hostdata: scsi host private data + * @dst: buffer to write into + * @len: transfer size + * + * Perform a pseudo DMA mode receive from a 53C400 or equivalent device. + */ + +static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata, + unsigned char *dst, int len) +{ + int residual; + int start = 0; + + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR); + NCR5380_write(hostdata->c400_blk_cnt, len / 128); + + do { + if (start == len - 128) { + /* Ignore End of DMA interrupt for the final buffer */ + if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status, + CSR_HOST_BUF_NOT_RDY, 0, 0) < 0) + break; + } else { + if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, + CSR_HOST_BUF_NOT_RDY, 0, + hostdata->c400_ctl_status, + CSR_GATED_53C80_IRQ, + CSR_GATED_53C80_IRQ, 0) < 0 || + NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) + break; + } + + if (hostdata->io_port && hostdata->io_width == 2) + insw(hostdata->io_port + hostdata->c400_host_buf, + dst + start, 64); + else if (hostdata->io_port) + insb(hostdata->io_port + hostdata->c400_host_buf, + dst + start, 128); + else + memcpy_fromio(dst + start, + hostdata->io + NCR53C400_host_buffer, 128); + start += 128; + } while (start < len); + + residual = len - start; + + if (residual != 0) { + /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ + NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + } + wait_for_53c80_access(hostdata); + + if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_END_DMA_TRANSFER, + BASR_END_DMA_TRANSFER, + 0) < 0) + scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", + __func__); + + hostdata->pdma_residual = residual; + + return 0; +} + +/** + * generic_NCR5380_psend - pseudo DMA send + * @hostdata: scsi host private data + * @src: buffer to read from + * @len: transfer size + * + * Perform a pseudo DMA mode send to a 53C400 or equivalent device. + */ + +static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata, + unsigned char *src, int len) +{ + int residual; + int start = 0; + + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + NCR5380_write(hostdata->c400_blk_cnt, len / 128); + + do { + if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, + CSR_HOST_BUF_NOT_RDY, 0, + hostdata->c400_ctl_status, + CSR_GATED_53C80_IRQ, + CSR_GATED_53C80_IRQ, 0) < 0 || + NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) { + /* Both 128 B buffers are in use */ + if (start >= 128) + start -= 128; + if (start >= 128) + start -= 128; + break; + } + + if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0) + break; + + if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { + /* Host buffer is empty, other one is in use */ + if (start >= 128) + start -= 128; + break; + } + + if (start >= len) + continue; + + if (hostdata->io_port && hostdata->io_width == 2) + outsw(hostdata->io_port + hostdata->c400_host_buf, + src + start, 64); + else if (hostdata->io_port) + outsb(hostdata->io_port + hostdata->c400_host_buf, + src + start, 128); + else + memcpy_toio(hostdata->io + NCR53C400_host_buffer, + src + start, 128); + start += 128; + } while (1); + + residual = len - start; + + if (residual != 0) { + /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ + NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + } + wait_for_53c80_access(hostdata); + + if (residual == 0) { + if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, + TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT, + 0) < 0) + scmd_printk(KERN_ERR, hostdata->connected, + "%s: Last Byte Sent timeout\n", __func__); + + if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER, + 0) < 0) + scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", + __func__); + } + + hostdata->pdma_residual = residual; + + return 0; +} + +static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + int transfersize = NCR5380_to_ncmd(cmd)->this_residual; + + if (hostdata->flags & FLAG_NO_PSEUDO_DMA) + return 0; + + /* 53C400 datasheet: non-modulo-128-byte transfers should use PIO */ + if (transfersize % 128) + return 0; + + /* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */ + if (hostdata->board == BOARD_DTC3181E && + cmd->sc_data_direction == DMA_TO_DEVICE) + transfersize = min(transfersize, 512); + + return min(transfersize, DMA_MAX_SIZE); +} + +static int generic_NCR5380_dma_residual(struct NCR5380_hostdata *hostdata) +{ + return hostdata->pdma_residual; +} + +/* Include the core driver code. */ + +#include "NCR5380.c" + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, + .name = "Generic NCR5380/NCR53C400 SCSI", + .info = generic_NCR5380_info, + .queuecommand = generic_NCR5380_queue_command, + .eh_abort_handler = generic_NCR5380_abort, + .eh_host_reset_handler = generic_NCR5380_host_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct NCR5380_cmd), + .max_sectors = 128, +}; + +static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev) +{ + int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev], + irq[ndev], card[ndev]); + if (ret) { + if (base[ndev]) + printk(KERN_WARNING "Card not found at address 0x%03x\n", + base[ndev]); + return 0; + } + + return 1; +} + +static void generic_NCR5380_isa_remove(struct device *pdev, + unsigned int ndev) +{ + generic_NCR5380_release_resources(dev_get_drvdata(pdev)); + dev_set_drvdata(pdev, NULL); +} + +static struct isa_driver generic_NCR5380_isa_driver = { + .match = generic_NCR5380_isa_match, + .remove = generic_NCR5380_isa_remove, + .driver = { + .name = DRV_MODULE_NAME + }, +}; + +#ifdef CONFIG_PNP +static const struct pnp_device_id generic_NCR5380_pnp_ids[] = { + { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, + { .id = "" } +}; +MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids); + +static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev, + const struct pnp_device_id *id) +{ + int base, irq; + + if (pnp_activate_dev(pdev) < 0) + return -EBUSY; + + base = pnp_port_start(pdev, 0); + irq = pnp_irq(pdev, 0); + + return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq, + id->driver_data); +} + +static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev) +{ + generic_NCR5380_release_resources(pnp_get_drvdata(pdev)); + pnp_set_drvdata(pdev, NULL); +} + +static struct pnp_driver generic_NCR5380_pnp_driver = { + .name = DRV_MODULE_NAME, + .id_table = generic_NCR5380_pnp_ids, + .probe = generic_NCR5380_pnp_probe, + .remove = generic_NCR5380_pnp_remove, +}; +#endif /* defined(CONFIG_PNP) */ + +static int pnp_registered, isa_registered; + +static int __init generic_NCR5380_init(void) +{ + int ret = 0; + + /* compatibility with old-style parameters */ + if (irq[0] == -1 && base[0] == 0 && card[0] == -1) { + irq[0] = ncr_irq; + base[0] = ncr_addr; + if (ncr_5380) + card[0] = BOARD_NCR5380; + if (ncr_53c400) + card[0] = BOARD_NCR53C400; + if (ncr_53c400a) + card[0] = BOARD_NCR53C400A; + if (dtc_3181e) + card[0] = BOARD_DTC3181E; + if (hp_c2502) + card[0] = BOARD_HP_C2502; + } + +#ifdef CONFIG_PNP + if (!pnp_register_driver(&generic_NCR5380_pnp_driver)) + pnp_registered = 1; +#endif + ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS); + if (!ret) + isa_registered = 1; + + return (pnp_registered || isa_registered) ? 0 : ret; +} + +static void __exit generic_NCR5380_exit(void) +{ +#ifdef CONFIG_PNP + if (pnp_registered) + pnp_unregister_driver(&generic_NCR5380_pnp_driver); +#endif + if (isa_registered) + isa_unregister_driver(&generic_NCR5380_isa_driver); +} + +module_init(generic_NCR5380_init); +module_exit(generic_NCR5380_exit); diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c new file mode 100644 index 000000000..0420bfe9b --- /dev/null +++ b/drivers/scsi/gvp11.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include "wd33c93.h" +#include "gvp11.h" + + +#define CHECK_WD33C93 + +struct gvp11_hostdata { + struct WD33C93_hostdata wh; + struct gvp11_scsiregs *regs; + struct device *dev; +}; + +#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) +#define TO_DMA_MASK(m) (~((unsigned long long)m & 0xffffffff)) + +static irqreturn_t gvp11_intr(int irq, void *data) +{ + struct Scsi_Host *instance = data; + struct gvp11_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->CNTR; + unsigned long flags; + + if (!(status & GVP11_DMAC_INT_PENDING)) + return IRQ_NONE; + + spin_lock_irqsave(instance->host_lock, flags); + wd33c93_intr(instance); + spin_unlock_irqrestore(instance->host_lock, flags); + return IRQ_HANDLED; +} + +static int gvp11_xfer_mask = 0; + +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + unsigned long len = scsi_pointer->this_residual; + struct Scsi_Host *instance = cmd->device->host; + struct gvp11_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct gvp11_scsiregs *regs = hdata->regs; + unsigned short cntr = GVP11_DMAC_INT_ENABLE; + dma_addr_t addr; + int bank_mask; + static int scsi_alloc_out_of_range = 0; + + addr = dma_map_single(hdata->dev, scsi_pointer->ptr, + len, DMA_DIR(dir_in)); + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, "cannot map SCSI data block %p\n", + scsi_pointer->ptr); + return 1; + } + scsi_pointer->dma_handle = addr; + + /* use bounce buffer if the physical address is bad */ + if (addr & wh->dma_xfer_mask) { + /* drop useless mapping */ + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(dir_in)); + scsi_pointer->dma_handle = (dma_addr_t) NULL; + + wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff; + + if (!scsi_alloc_out_of_range) { + wh->dma_bounce_buffer = + kmalloc(wh->dma_bounce_len, GFP_KERNEL); + wh->dma_buffer_pool = BUF_SCSI_ALLOCED; + } + + if (scsi_alloc_out_of_range || + !wh->dma_bounce_buffer) { + wh->dma_bounce_buffer = + amiga_chip_alloc(wh->dma_bounce_len, + "GVP II SCSI Bounce Buffer"); + + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; + return 1; + } + + wh->dma_buffer_pool = BUF_CHIP_ALLOCED; + } + + if (!dir_in) { + /* copy to bounce buffer for a write */ + memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, + scsi_pointer->this_residual); + } + + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { + /* will flush/invalidate cache for us */ + addr = dma_map_single(hdata->dev, + wh->dma_bounce_buffer, + wh->dma_bounce_len, + DMA_DIR(dir_in)); + /* can't map buffer; use PIO */ + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, + "cannot map bounce buffer %p\n", + wh->dma_bounce_buffer); + return 1; + } + } + + if (addr & wh->dma_xfer_mask) { + /* drop useless mapping */ + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(dir_in)); + /* fall back to Chip RAM if address out of range */ + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { + kfree(wh->dma_bounce_buffer); + scsi_alloc_out_of_range = 1; + } else { + amiga_chip_free(wh->dma_bounce_buffer); + } + + wh->dma_bounce_buffer = + amiga_chip_alloc(wh->dma_bounce_len, + "GVP II SCSI Bounce Buffer"); + + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; + return 1; + } + + if (!dir_in) { + /* copy to bounce buffer for a write */ + memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, + scsi_pointer->this_residual); + } + /* chip RAM can be mapped to phys. address directly */ + addr = virt_to_phys(wh->dma_bounce_buffer); + /* no need to flush/invalidate cache */ + wh->dma_buffer_pool = BUF_CHIP_ALLOCED; + } + /* finally, have OK mapping (punted for PIO else) */ + scsi_pointer->dma_handle = addr; + + } + + /* setup dma direction */ + if (!dir_in) + cntr |= GVP11_DMAC_DIR_WRITE; + + wh->dma_dir = dir_in; + regs->CNTR = cntr; + + /* setup DMA *physical* address */ + regs->ACR = addr; + + /* no more cache flush here - dma_map_single() takes care */ + + bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0; + if (bank_mask) + regs->BANK = bank_mask & (addr >> 18); + + /* start DMA */ + regs->ST_DMA = 1; + + /* return success */ + return 0; +} + +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, + int status) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt); + struct gvp11_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct gvp11_scsiregs *regs = hdata->regs; + + /* stop DMA */ + regs->SP_DMA = 1; + /* remove write bit from CONTROL bits */ + regs->CNTR = GVP11_DMAC_INT_ENABLE; + + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(wh->dma_dir)); + + /* copy from a bounce buffer, if necessary */ + if (status && wh->dma_bounce_buffer) { + if (wh->dma_dir && SCpnt) + memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer, + scsi_pointer->this_residual); + + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) + kfree(wh->dma_bounce_buffer); + else + amiga_chip_free(wh->dma_bounce_buffer); + + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; + } +} + +static const struct scsi_host_template gvp11_scsi_template = { + .module = THIS_MODULE, + .name = "GVP Series II SCSI", + .show_info = wd33c93_show_info, + .write_info = wd33c93_write_info, + .proc_name = "GVP11", + .queuecommand = wd33c93_queuecommand, + .eh_abort_handler = wd33c93_abort, + .eh_host_reset_handler = wd33c93_host_reset, + .can_queue = CAN_QUEUE, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = CMD_PER_LUN, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct scsi_pointer), +}; + +static int check_wd33c93(struct gvp11_scsiregs *regs) +{ +#ifdef CHECK_WD33C93 + volatile unsigned char *sasr_3393, *scmd_3393; + unsigned char save_sasr; + unsigned char q, qq; + + /* + * These darn GVP boards are a problem - it can be tough to tell + * whether or not they include a SCSI controller. This is the + * ultimate Yet-Another-GVP-Detection-Hack in that it actually + * probes for a WD33c93 chip: If we find one, it's extremely + * likely that this card supports SCSI, regardless of Product_ + * Code, Board_Size, etc. + */ + + /* Get pointers to the presumed register locations and save contents */ + + sasr_3393 = ®s->SASR; + scmd_3393 = ®s->SCMD; + save_sasr = *sasr_3393; + + /* First test the AuxStatus Reg */ + + q = *sasr_3393; /* read it */ + if (q & 0x08) /* bit 3 should always be clear */ + return -ENODEV; + *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ + if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ + *sasr_3393 = save_sasr; /* Oops - restore this byte */ + return -ENODEV; + } + if (*sasr_3393 != q) { /* should still read the same */ + *sasr_3393 = save_sasr; /* Oops - restore this byte */ + return -ENODEV; + } + if (*scmd_3393 != q) /* and so should the image at 0x1f */ + return -ENODEV; + + /* + * Ok, we probably have a wd33c93, but let's check a few other places + * for good measure. Make sure that this works for both 'A and 'B + * chip versions. + */ + + *sasr_3393 = WD_SCSI_STATUS; + q = *scmd_3393; + *sasr_3393 = WD_SCSI_STATUS; + *scmd_3393 = ~q; + *sasr_3393 = WD_SCSI_STATUS; + qq = *scmd_3393; + *sasr_3393 = WD_SCSI_STATUS; + *scmd_3393 = q; + if (qq != q) /* should be read only */ + return -ENODEV; + *sasr_3393 = 0x1e; /* this register is unimplemented */ + q = *scmd_3393; + *sasr_3393 = 0x1e; + *scmd_3393 = ~q; + *sasr_3393 = 0x1e; + qq = *scmd_3393; + *sasr_3393 = 0x1e; + *scmd_3393 = q; + if (qq != q || qq != 0xff) /* should be read only, all 1's */ + return -ENODEV; + *sasr_3393 = WD_TIMEOUT_PERIOD; + q = *scmd_3393; + *sasr_3393 = WD_TIMEOUT_PERIOD; + *scmd_3393 = ~q; + *sasr_3393 = WD_TIMEOUT_PERIOD; + qq = *scmd_3393; + *sasr_3393 = WD_TIMEOUT_PERIOD; + *scmd_3393 = q; + if (qq != (~q & 0xff)) /* should be read/write */ + return -ENODEV; +#endif /* CHECK_WD33C93 */ + + return 0; +} + +static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) +{ + struct Scsi_Host *instance; + unsigned long address; + int error; + unsigned int epc; + unsigned int default_dma_xfer_mask; + struct gvp11_hostdata *hdata; + struct gvp11_scsiregs *regs; + wd33c93_regs wdregs; + + default_dma_xfer_mask = ent->driver_data; + + if (dma_set_mask_and_coherent(&z->dev, + TO_DMA_MASK(default_dma_xfer_mask))) { + dev_warn(&z->dev, "cannot use DMA mask %llx\n", + TO_DMA_MASK(default_dma_xfer_mask)); + return -ENODEV; + } + + /* + * Rumors state that some GVP ram boards use the same product + * code as the SCSI controllers. Therefore if the board-size + * is not 64KB we assume it is a ram board and bail out. + */ + if (zorro_resource_len(z) != 0x10000) + return -ENODEV; + + address = z->resource.start; + if (!request_mem_region(address, 256, "wd33c93")) + return -EBUSY; + + regs = ZTWO_VADDR(address); + + error = check_wd33c93(regs); + if (error) + goto fail_check_or_alloc; + + instance = scsi_host_alloc(&gvp11_scsi_template, + sizeof(struct gvp11_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_check_or_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; + instance->unique_id = z->slotaddr; + + regs->secret2 = 1; + regs->secret1 = 0; + regs->secret3 = 15; + while (regs->CNTR & GVP11_DMAC_BUSY) + ; + regs->CNTR = 0; + regs->BANK = 0; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + if (gvp11_xfer_mask) { + hdata->wh.dma_xfer_mask = gvp11_xfer_mask; + if (dma_set_mask_and_coherent(&z->dev, + TO_DMA_MASK(gvp11_xfer_mask))) { + dev_warn(&z->dev, "cannot use DMA mask %llx\n", + TO_DMA_MASK(gvp11_xfer_mask)); + error = -ENODEV; + goto fail_check_or_alloc; + } + } else + hdata->wh.dma_xfer_mask = default_dma_xfer_mask; + + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + /* + * Check for 14MHz SCSI clock + */ + epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); + wd33c93_init(instance, wdregs, dma_setup, dma_stop, + (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 + : WD33C93_FS_12_15); + + error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, + "GVP11 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = GVP11_DMAC_INT_ENABLE; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + zorro_set_drvdata(z, instance); + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(IRQ_AMIGA_PORTS, instance); +fail_irq: + scsi_host_put(instance); +fail_check_or_alloc: + release_mem_region(address, 256); + return error; +} + +static void gvp11_remove(struct zorro_dev *z) +{ + struct Scsi_Host *instance = zorro_get_drvdata(z); + struct gvp11_hostdata *hdata = shost_priv(instance); + + hdata->regs->CNTR = 0; + scsi_remove_host(instance); + free_irq(IRQ_AMIGA_PORTS, instance); + scsi_host_put(instance); + release_mem_region(z->resource.start, 256); +} + + /* + * This should (hopefully) be the correct way to identify + * all the different GVP SCSI controllers (except for the + * SERIES I though). + */ + +static struct zorro_device_id gvp11_zorro_tbl[] = { + { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff }, + { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff }, + { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_A1291, ~0x07ffffff }, + { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl); + +static struct zorro_driver gvp11_driver = { + .name = "gvp11", + .id_table = gvp11_zorro_tbl, + .probe = gvp11_probe, + .remove = gvp11_remove, +}; + +static int __init gvp11_init(void) +{ + return zorro_register_driver(&gvp11_driver); +} +module_init(gvp11_init); + +static void __exit gvp11_exit(void) +{ + zorro_unregister_driver(&gvp11_driver); +} +module_exit(gvp11_exit); + +MODULE_DESCRIPTION("GVP Series II SCSI"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h new file mode 100644 index 000000000..61c1a3584 --- /dev/null +++ b/drivers/scsi/gvp11.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef GVP11_H + +/* $Id: gvp11.h,v 1.4 1997/01/19 23:07:12 davem Exp $ + * + * Header file for the GVP Series II SCSI controller for Linux + * + * Written and (C) 1993, Ralf Baechle, see gvp11.c for more info + * based on a2091.h (C) 1993 by Hamish Macdonald + * + */ + +#include + +#ifndef CMD_PER_LUN +#define CMD_PER_LUN 2 +#endif + +#ifndef CAN_QUEUE +#define CAN_QUEUE 16 +#endif + +/* + * if the transfer address ANDed with this results in a non-zero + * result, then we can't use DMA. + */ +#define GVP11_XFER_MASK (0xff000001) + +struct gvp11_scsiregs { + unsigned char pad1[64]; + volatile unsigned short CNTR; + unsigned char pad2[31]; + volatile unsigned char SASR; + unsigned char pad3; + volatile unsigned char SCMD; + unsigned char pad4[4]; + volatile unsigned short BANK; + unsigned char pad5[6]; + volatile unsigned long ACR; + volatile unsigned short secret1; /* store 0 here */ + volatile unsigned short ST_DMA; + volatile unsigned short SP_DMA; + volatile unsigned short secret2; /* store 1 here */ + volatile unsigned short secret3; /* store 15 here */ +}; + +/* bits in CNTR */ +#define GVP11_DMAC_BUSY (1<<0) +#define GVP11_DMAC_INT_PENDING (1<<1) +#define GVP11_DMAC_INT_ENABLE (1<<3) +#define GVP11_DMAC_DIR_WRITE (1<<4) + +#endif /* GVP11_H */ diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig new file mode 100644 index 000000000..4ba3a8ead --- /dev/null +++ b/drivers/scsi/hisi_sas/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_HISI_SAS + tristate "HiSilicon SAS" + depends on HAS_IOMEM + depends on ARM64 || COMPILE_TEST + select SCSI_SAS_LIBSAS + select BLK_DEV_INTEGRITY + depends on ATA + select SATA_HOST + help + This driver supports HiSilicon's SAS HBA, including support based + on platform device + +config SCSI_HISI_SAS_PCI + tristate "HiSilicon SAS on PCI bus" + depends on SCSI_HISI_SAS + depends on PCI + depends on ACPI + help + This driver supports HiSilicon's SAS HBA based on PCI device + +config SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE + bool "HiSilicon SAS debugging default enable" + depends on SCSI_HISI_SAS + help + Set Y to default enable DEBUGFS for SCSI_HISI_SAS diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile new file mode 100644 index 000000000..742e732cd --- /dev/null +++ b/drivers/scsi/hisi_sas/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o +obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o +obj-$(CONFIG_SCSI_HISI_SAS_PCI) += hisi_sas_v3_hw.o diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h new file mode 100644 index 000000000..9e73e9cbb --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -0,0 +1,673 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2015 Linaro Ltd. + * Copyright (c) 2015 Hisilicon Limited. + */ + +#ifndef _HISI_SAS_H_ +#define _HISI_SAS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HISI_SAS_MAX_PHYS 9 +#define HISI_SAS_MAX_QUEUES 32 +#define HISI_SAS_QUEUE_SLOTS 4096 +#define HISI_SAS_MAX_ITCT_ENTRIES 1024 +#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES +#define HISI_SAS_RESETTING_BIT 0 +#define HISI_SAS_REJECT_CMD_BIT 1 +#define HISI_SAS_PM_BIT 2 +#define HISI_SAS_HW_FAULT_BIT 3 +#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS) +#define HISI_SAS_RESERVED_IPTT 96 +#define HISI_SAS_UNRESERVED_IPTT \ + (HISI_SAS_MAX_COMMANDS - HISI_SAS_RESERVED_IPTT) + +#define HISI_SAS_IOST_ITCT_CACHE_NUM 64 +#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10 +#define HISI_SAS_FIFO_DATA_DW_SIZE 32 + +#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) +#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) + +#define hisi_sas_status_buf_addr(buf) \ + ((buf) + offsetof(struct hisi_sas_slot_buf_table, status_buffer)) +#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr((slot)->buf) +#define hisi_sas_status_buf_addr_dma(slot) \ + hisi_sas_status_buf_addr((slot)->buf_dma) + +#define hisi_sas_cmd_hdr_addr(buf) \ + ((buf) + offsetof(struct hisi_sas_slot_buf_table, command_header)) +#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr((slot)->buf) +#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr((slot)->buf_dma) + +#define hisi_sas_sge_addr(buf) \ + ((buf) + offsetof(struct hisi_sas_slot_buf_table, sge_page)) +#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr((slot)->buf) +#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr((slot)->buf_dma) + +#define hisi_sas_sge_dif_addr(buf) \ + ((buf) + offsetof(struct hisi_sas_slot_dif_buf_table, sge_dif_page)) +#define hisi_sas_sge_dif_addr_mem(slot) hisi_sas_sge_dif_addr((slot)->buf) +#define hisi_sas_sge_dif_addr_dma(slot) hisi_sas_sge_dif_addr((slot)->buf_dma) + +#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024) +#define HISI_SAS_MAX_SMP_RESP_SZ 1028 +#define HISI_SAS_MAX_STP_RESP_SZ 28 + +#define HISI_SAS_SATA_PROTOCOL_NONDATA 0x1 +#define HISI_SAS_SATA_PROTOCOL_PIO 0x2 +#define HISI_SAS_SATA_PROTOCOL_DMA 0x4 +#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8 +#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10 + +#define HISI_SAS_DIF_PROT_MASK (SHOST_DIF_TYPE1_PROTECTION | \ + SHOST_DIF_TYPE2_PROTECTION | \ + SHOST_DIF_TYPE3_PROTECTION) + +#define HISI_SAS_DIX_PROT_MASK (SHOST_DIX_TYPE1_PROTECTION | \ + SHOST_DIX_TYPE2_PROTECTION | \ + SHOST_DIX_TYPE3_PROTECTION) + +#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK) + +#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ) +#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ) + +struct hisi_hba; + +enum { + PORT_TYPE_SAS = (1U << 1), + PORT_TYPE_SATA = (1U << 0), +}; + +enum dev_status { + HISI_SAS_DEV_INIT, + HISI_SAS_DEV_NORMAL, + HISI_SAS_DEV_NCQ_ERR, +}; + +enum { + HISI_SAS_INT_ABT_CMD = 0, + HISI_SAS_INT_ABT_DEV = 1, +}; + +enum hisi_sas_dev_type { + HISI_SAS_DEV_TYPE_STP = 0, + HISI_SAS_DEV_TYPE_SSP, + HISI_SAS_DEV_TYPE_SATA, +}; + +struct hisi_sas_hw_error { + u32 irq_msk; + u32 msk; + int shift; + const char *msg; + int reg; + const struct hisi_sas_hw_error *sub; +}; + +struct hisi_sas_rst { + struct hisi_hba *hisi_hba; + struct completion *completion; + struct work_struct work; + bool done; +}; + +#define HISI_SAS_RST_WORK_INIT(r, c) \ + { .hisi_hba = hisi_hba, \ + .completion = &c, \ + .work = __WORK_INITIALIZER(r.work, \ + hisi_sas_sync_rst_work_handler), \ + .done = false, \ + } + +#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \ + DECLARE_COMPLETION_ONSTACK(c); \ + struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c) + +enum hisi_sas_bit_err_type { + HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0, + HISI_SAS_ERR_MULTI_BIT_ECC = 0x1, +}; + +enum hisi_sas_phy_event { + HISI_PHYE_PHY_UP = 0U, + HISI_PHYE_LINK_RESET, + HISI_PHYE_PHY_UP_PM, + HISI_PHYES_NUM, +}; + +struct hisi_sas_debugfs_fifo { + u32 signal_sel; + u32 dump_msk; + u32 dump_mode; + u32 trigger; + u32 trigger_msk; + u32 trigger_mode; + u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE]; +}; + +struct hisi_sas_phy { + struct work_struct works[HISI_PHYES_NUM]; + struct hisi_hba *hisi_hba; + struct hisi_sas_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct completion *reset_completion; + struct timer_list timer; + spinlock_t lock; + u64 port_id; /* from hw */ + u64 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + u8 in_reset; + u8 reserved[2]; + u32 phy_type; + u32 code_violation_err_count; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; + int enable; + int wait_phyup_cnt; + atomic_t down_cnt; + + /* Trace FIFO */ + struct hisi_sas_debugfs_fifo fifo; +}; + +struct hisi_sas_port { + struct asd_sas_port sas_port; + u8 port_attached; + u8 id; /* from hw */ +}; + +struct hisi_sas_cq { + struct hisi_hba *hisi_hba; + const struct cpumask *irq_mask; + int rd_point; + int id; + int irq_no; + spinlock_t poll_lock; +}; + +struct hisi_sas_dq { + struct hisi_hba *hisi_hba; + struct list_head list; + spinlock_t lock; + int wr_point; + int id; +}; + +struct hisi_sas_device { + struct hisi_hba *hisi_hba; + struct domain_device *sas_device; + struct completion *completion; + struct hisi_sas_dq *dq; + struct list_head list; + enum sas_device_type dev_type; + enum dev_status dev_status; + int device_id; + int sata_idx; + spinlock_t lock; /* For protecting slots */ +}; + +struct hisi_sas_slot { + struct list_head entry; + struct list_head delivery; + struct sas_task *task; + struct hisi_sas_port *port; + u64 n_elem; + u64 n_elem_dif; + int dlvry_queue; + int dlvry_queue_slot; + int cmplt_queue; + int cmplt_queue_slot; + int abort; + int ready; + int device_id; + void *cmd_hdr; + dma_addr_t cmd_hdr_dma; + struct timer_list internal_abort_timer; + bool is_internal; + struct sas_tmf_task *tmf; + /* Do not reorder/change members after here */ + void *buf; + dma_addr_t buf_dma; + u16 idx; +}; + +struct hisi_sas_iost_itct_cache { + u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ]; +}; + +enum hisi_sas_debugfs_reg_array_member { + DEBUGFS_GLOBAL = 0, + DEBUGFS_AXI, + DEBUGFS_RAS, + DEBUGFS_REGS_NUM +}; + +enum hisi_sas_debugfs_cache_type { + HISI_SAS_ITCT_CACHE, + HISI_SAS_IOST_CACHE, +}; + +enum hisi_sas_debugfs_bist_ffe_cfg { + FFE_SAS_1_5_GBPS, + FFE_SAS_3_0_GBPS, + FFE_SAS_6_0_GBPS, + FFE_SAS_12_0_GBPS, + FFE_RESV, + FFE_SATA_1_5_GBPS, + FFE_SATA_3_0_GBPS, + FFE_SATA_6_0_GBPS, + FFE_CFG_MAX +}; + +enum hisi_sas_debugfs_bist_fixed_code { + FIXED_CODE, + FIXED_CODE_1, + FIXED_CODE_MAX +}; + +enum { + HISI_SAS_BIST_CODE_MODE_PRBS7, + HISI_SAS_BIST_CODE_MODE_PRBS23, + HISI_SAS_BIST_CODE_MODE_PRBS31, + HISI_SAS_BIST_CODE_MODE_JTPAT, + HISI_SAS_BIST_CODE_MODE_CJTPAT, + HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, + HISI_SAS_BIST_CODE_MODE_TRAIN, + HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, + HISI_SAS_BIST_CODE_MODE_HFTP, + HISI_SAS_BIST_CODE_MODE_MFTP, + HISI_SAS_BIST_CODE_MODE_LFTP, + HISI_SAS_BIST_CODE_MODE_FIXED_DATA, +}; + +struct hisi_sas_hw { + int (*hw_init)(struct hisi_hba *hisi_hba); + int (*interrupt_preinit)(struct hisi_hba *hisi_hba); + void (*setup_itct)(struct hisi_hba *hisi_hba, + struct hisi_sas_device *device); + int (*slot_index_alloc)(struct hisi_hba *hisi_hba, + struct domain_device *device); + struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); + void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no); + void (*start_delivery)(struct hisi_sas_dq *dq); + void (*prep_ssp)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot); + void (*prep_smp)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot); + void (*prep_stp)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot); + void (*prep_abort)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot); + void (*phys_init)(struct hisi_hba *hisi_hba); + void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no); + void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); + void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no); + void (*get_events)(struct hisi_hba *hisi_hba, int phy_no); + void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *linkrates); + enum sas_linkrate (*phy_get_max_linkrate)(void); + int (*clear_itct)(struct hisi_hba *hisi_hba, + struct hisi_sas_device *dev); + void (*free_device)(struct hisi_sas_device *sas_dev); + int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); + void (*dereg_device)(struct hisi_hba *hisi_hba, + struct domain_device *device); + int (*soft_reset)(struct hisi_hba *hisi_hba); + u32 (*get_phys_state)(struct hisi_hba *hisi_hba); + int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type, + u8 reg_index, u8 reg_count, u8 *write_data); + void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms); + void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba); + int complete_hdr_size; + const struct scsi_host_template *sht; +}; + +#define HISI_SAS_MAX_DEBUGFS_DUMP (50) + +struct hisi_sas_debugfs_cq { + struct hisi_sas_cq *cq; + void *complete_hdr; +}; + +struct hisi_sas_debugfs_dq { + struct hisi_sas_dq *dq; + struct hisi_sas_cmd_hdr *hdr; +}; + +struct hisi_sas_debugfs_regs { + struct hisi_hba *hisi_hba; + u32 *data; +}; + +struct hisi_sas_debugfs_port { + struct hisi_sas_phy *phy; + u32 *data; +}; + +struct hisi_sas_debugfs_iost { + struct hisi_sas_iost *iost; +}; + +struct hisi_sas_debugfs_itct { + struct hisi_sas_itct *itct; +}; + +struct hisi_sas_debugfs_iost_cache { + struct hisi_sas_iost_itct_cache *cache; +}; + +struct hisi_sas_debugfs_itct_cache { + struct hisi_sas_iost_itct_cache *cache; +}; + +struct hisi_hba { + /* This must be the first element, used by SHOST_TO_SAS_HA */ + struct sas_ha_struct *p; + + struct platform_device *platform_dev; + struct pci_dev *pci_dev; + struct device *dev; + + int prot_mask; + + void __iomem *regs; + void __iomem *sgpio_regs; + struct regmap *ctrl; + u32 ctrl_reset_reg; + u32 ctrl_reset_sts_reg; + u32 ctrl_clock_ena_reg; + u32 refclk_frequency_mhz; + u8 sas_addr[SAS_ADDR_SIZE]; + + int *irq_map; /* v2 hw */ + + int n_phy; + spinlock_t lock; + struct semaphore sem; + + struct timer_list timer; + struct workqueue_struct *wq; + + int slot_index_count; + int last_slot_index; + int last_dev_id; + unsigned long *slot_index_tags; + unsigned long reject_stp_links_msk; + + /* SCSI/SAS glue */ + struct sas_ha_struct sha; + struct Scsi_Host *shost; + + struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_dq dq[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS]; + struct hisi_sas_port port[HISI_SAS_MAX_PHYS]; + + int queue_count; + + struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES]; + struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES]; + dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES]; + void *complete_hdr[HISI_SAS_MAX_QUEUES]; + dma_addr_t complete_hdr_dma[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_initial_fis *initial_fis; + dma_addr_t initial_fis_dma; + struct hisi_sas_itct *itct; + dma_addr_t itct_dma; + struct hisi_sas_iost *iost; + dma_addr_t iost_dma; + struct hisi_sas_breakpoint *breakpoint; + dma_addr_t breakpoint_dma; + struct hisi_sas_breakpoint *sata_breakpoint; + dma_addr_t sata_breakpoint_dma; + struct hisi_sas_slot *slot_info; + unsigned long flags; + const struct hisi_sas_hw *hw; /* Low level hw interface */ + unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; + struct work_struct rst_work; + struct work_struct debugfs_work; + u32 phy_state; + u32 intr_coal_ticks; /* Time of interrupt coalesce in us */ + u32 intr_coal_count; /* Interrupt count to coalesce */ + + int cq_nvecs; + + /* bist */ + enum sas_linkrate debugfs_bist_linkrate; + int debugfs_bist_code_mode; + int debugfs_bist_phy_no; + int debugfs_bist_mode; + u32 debugfs_bist_cnt; + int debugfs_bist_enable; + u32 debugfs_bist_ffe[HISI_SAS_MAX_PHYS][FFE_CFG_MAX]; + u32 debugfs_bist_fixed_code[FIXED_CODE_MAX]; + + /* debugfs memories */ + /* Put Global AXI and RAS Register into register array */ + struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM]; + struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS]; + struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES]; + struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES]; + struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP]; + struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP]; + struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP]; + struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP]; + + u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP]; + int debugfs_dump_index; + struct dentry *debugfs_dir; + struct dentry *debugfs_dump_dentry; + struct dentry *debugfs_bist_dentry; + struct dentry *debugfs_fifo_dentry; + + int iopoll_q_cnt; +}; + +/* Generic HW DMA host memory structures */ +/* Delivery queue header */ +struct hisi_sas_cmd_hdr { + /* dw0 */ + __le32 dw0; + + /* dw1 */ + __le32 dw1; + + /* dw2 */ + __le32 dw2; + + /* dw3 */ + __le32 transfer_tags; + + /* dw4 */ + __le32 data_transfer_len; + + /* dw5 */ + __le32 first_burst_num; + + /* dw6 */ + __le32 sg_len; + + /* dw7 */ + __le32 dw7; + + /* dw8-9 */ + __le64 cmd_table_addr; + + /* dw10-11 */ + __le64 sts_buffer_addr; + + /* dw12-13 */ + __le64 prd_table_addr; + + /* dw14-15 */ + __le64 dif_prd_table_addr; +}; + +struct hisi_sas_itct { + __le64 qw0; + __le64 sas_addr; + __le64 qw2; + __le64 qw3; + __le64 qw4_15[12]; +}; + +struct hisi_sas_iost { + __le64 qw0; + __le64 qw1; + __le64 qw2; + __le64 qw3; +}; + +struct hisi_sas_err_record { + u32 data[4]; +}; + +struct hisi_sas_initial_fis { + struct hisi_sas_err_record err_record; + struct dev_to_host_fis fis; + u32 rsvd[3]; +}; + +struct hisi_sas_breakpoint { + u8 data[128]; +}; + +struct hisi_sas_sata_breakpoint { + struct hisi_sas_breakpoint tag[32]; +}; + +struct hisi_sas_sge { + __le64 addr; + __le32 page_ctrl_0; + __le32 page_ctrl_1; + __le32 data_len; + __le32 data_off; +}; + +struct hisi_sas_command_table_smp { + u8 bytes[44]; +}; + +struct hisi_sas_command_table_stp { + struct host_to_dev_fis command_fis; + u8 dummy[12]; + u8 atapi_cdb[ATAPI_CDB_LEN]; +}; + +#define HISI_SAS_SGE_PAGE_CNT (124) +struct hisi_sas_sge_page { + struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; +} __aligned(16); + +#define HISI_SAS_SGE_DIF_PAGE_CNT HISI_SAS_SGE_PAGE_CNT +struct hisi_sas_sge_dif_page { + struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT]; +} __aligned(16); + +struct hisi_sas_command_table_ssp { + struct ssp_frame_hdr hdr; + union { + struct { + struct ssp_command_iu task; + u32 prot[7]; + }; + struct ssp_tmf_iu ssp_task; + struct xfer_rdy_iu xfer_rdy; + struct ssp_response_iu ssp_res; + } u; +}; + +union hisi_sas_command_table { + struct hisi_sas_command_table_ssp ssp; + struct hisi_sas_command_table_smp smp; + struct hisi_sas_command_table_stp stp; +} __aligned(16); + +struct hisi_sas_status_buffer { + struct hisi_sas_err_record err; + u8 iu[1024]; +} __aligned(16); + +struct hisi_sas_slot_buf_table { + struct hisi_sas_status_buffer status_buffer; + union hisi_sas_command_table command_header; + struct hisi_sas_sge_page sge_page; +}; + +struct hisi_sas_slot_dif_buf_table { + struct hisi_sas_slot_buf_table slot_buf; + struct hisi_sas_sge_dif_page sge_dif_page; +}; + +extern struct scsi_transport_template *hisi_sas_stt; + +extern bool hisi_sas_debugfs_enable; +extern u32 hisi_sas_debugfs_dump_count; +extern struct dentry *hisi_sas_debugfs_dir; + +extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); +extern int hisi_sas_alloc(struct hisi_hba *hisi_hba); +extern void hisi_sas_free(struct hisi_hba *hisi_hba); +extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, + int direction); +extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); +extern void hisi_sas_sata_done(struct sas_task *task, + struct hisi_sas_slot *slot); +extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba); +extern int hisi_sas_probe(struct platform_device *pdev, + const struct hisi_sas_hw *ops); +extern void hisi_sas_remove(struct platform_device *pdev); + +extern int hisi_sas_slave_configure(struct scsi_device *sdev); +extern int hisi_sas_slave_alloc(struct scsi_device *sdev); +extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); +extern void hisi_sas_scan_start(struct Scsi_Host *shost); +extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type); +extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, + int enable); +extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, + gfp_t gfp_flags); +extern void hisi_sas_phy_bcast(struct hisi_sas_phy *phy); +extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, + struct sas_task *task, + struct hisi_sas_slot *slot, + bool need_lock); +extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); +extern void hisi_sas_rst_work_handler(struct work_struct *work); +extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); +extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no); +extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, + enum hisi_sas_phy_event event); +extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); +extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max); +extern void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba); +extern void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba); +extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba); +extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba); +#endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c new file mode 100644 index 000000000..b155ac800 --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -0,0 +1,2626 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2015 Linaro Ltd. + * Copyright (c) 2015 Hisilicon Limited. + */ + +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas" + +#define DEV_IS_GONE(dev) \ + ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) + +static int hisi_sas_softreset_ata_disk(struct domain_device *device); +static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +static void hisi_sas_release_task(struct hisi_hba *hisi_hba, + struct domain_device *device); +static void hisi_sas_dev_gone(struct domain_device *device); + +struct hisi_sas_internal_abort_data { + bool rst_ha_timeout; /* reset the HA for timeout */ +}; + +u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) +{ + switch (fis->command) { + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_FPDMA_READ: + case ATA_CMD_FPDMA_RECV: + case ATA_CMD_FPDMA_SEND: + case ATA_CMD_NCQ_NON_DATA: + return HISI_SAS_SATA_PROTOCOL_FPDMA; + + case ATA_CMD_DOWNLOAD_MICRO: + case ATA_CMD_ID_ATA: + case ATA_CMD_PMP_READ: + case ATA_CMD_READ_LOG_EXT: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PMP_WRITE: + case ATA_CMD_WRITE_LOG_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + return HISI_SAS_SATA_PROTOCOL_PIO; + + case ATA_CMD_DSM: + case ATA_CMD_DOWNLOAD_MICRO_DMA: + case ATA_CMD_PMP_READ_DMA: + case ATA_CMD_PMP_WRITE_DMA: + case ATA_CMD_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ_LOG_DMA_EXT: + case ATA_CMD_READ_STREAM_DMA_EXT: + case ATA_CMD_TRUSTED_RCV_DMA: + case ATA_CMD_TRUSTED_SND_DMA: + case ATA_CMD_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_FUA_EXT: + case ATA_CMD_WRITE_QUEUED: + case ATA_CMD_WRITE_LOG_DMA_EXT: + case ATA_CMD_WRITE_STREAM_DMA_EXT: + case ATA_CMD_ZAC_MGMT_IN: + return HISI_SAS_SATA_PROTOCOL_DMA; + + case ATA_CMD_CHK_POWER: + case ATA_CMD_DEV_RESET: + case ATA_CMD_EDD: + case ATA_CMD_FLUSH: + case ATA_CMD_FLUSH_EXT: + case ATA_CMD_VERIFY: + case ATA_CMD_VERIFY_EXT: + case ATA_CMD_SET_FEATURES: + case ATA_CMD_STANDBY: + case ATA_CMD_STANDBYNOW1: + case ATA_CMD_ZAC_MGMT_OUT: + return HISI_SAS_SATA_PROTOCOL_NONDATA; + + case ATA_CMD_SET_MAX: + switch (fis->features) { + case ATA_SET_MAX_PASSWD: + case ATA_SET_MAX_LOCK: + return HISI_SAS_SATA_PROTOCOL_PIO; + + case ATA_SET_MAX_PASSWD_DMA: + case ATA_SET_MAX_UNLOCK_DMA: + return HISI_SAS_SATA_PROTOCOL_DMA; + + default: + return HISI_SAS_SATA_PROTOCOL_NONDATA; + } + + default: + { + if (direction == DMA_NONE) + return HISI_SAS_SATA_PROTOCOL_NONDATA; + return HISI_SAS_SATA_PROTOCOL_PIO; + } + } +} +EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); + +void hisi_sas_sata_done(struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; + struct hisi_sas_status_buffer *status_buf = + hisi_sas_status_buf_addr_mem(slot); + u8 *iu = &status_buf->iu[0]; + struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); + + ts->buf_valid_size = sizeof(*resp); +} +EXPORT_SYMBOL_GPL(hisi_sas_sata_done); + +/* + * This function assumes linkrate mask fits in 8 bits, which it + * does for all HW versions supported. + */ +u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) +{ + u8 rate = 0; + int i; + + max -= SAS_LINK_RATE_1_5_GBPS; + for (i = 0; i <= max; i++) + rate |= 1 << (i * 2); + return rate; +} +EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); + +static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) +{ + return device->port->ha->lldd_ha; +} + +struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) +{ + return container_of(sas_port, struct hisi_sas_port, sas_port); +} +EXPORT_SYMBOL_GPL(to_hisi_sas_port); + +void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) +{ + int phy_no; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) + hisi_sas_phy_enable(hisi_hba, phy_no, 0); +} +EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); + +static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) +{ + void *bitmap = hisi_hba->slot_index_tags; + + __clear_bit(slot_idx, bitmap); +} + +static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) +{ + if (hisi_hba->hw->slot_index_alloc || + slot_idx < HISI_SAS_RESERVED_IPTT) { + spin_lock(&hisi_hba->lock); + hisi_sas_slot_index_clear(hisi_hba, slot_idx); + spin_unlock(&hisi_hba->lock); + } +} + +static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) +{ + void *bitmap = hisi_hba->slot_index_tags; + + __set_bit(slot_idx, bitmap); +} + +static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, + struct request *rq) +{ + int index; + void *bitmap = hisi_hba->slot_index_tags; + + if (rq) + return rq->tag + HISI_SAS_RESERVED_IPTT; + + spin_lock(&hisi_hba->lock); + index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, + hisi_hba->last_slot_index + 1); + if (index >= HISI_SAS_RESERVED_IPTT) { + index = find_next_zero_bit(bitmap, + HISI_SAS_RESERVED_IPTT, + 0); + if (index >= HISI_SAS_RESERVED_IPTT) { + spin_unlock(&hisi_hba->lock); + return -SAS_QUEUE_FULL; + } + } + hisi_sas_slot_index_set(hisi_hba, index); + hisi_hba->last_slot_index = index; + spin_unlock(&hisi_hba->lock); + + return index; +} + +void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, + struct hisi_sas_slot *slot, bool need_lock) +{ + int device_id = slot->device_id; + struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; + + if (task) { + struct device *dev = hisi_hba->dev; + + if (!task->lldd_task) + return; + + task->lldd_task = NULL; + + if (!sas_protocol_ata(task->task_proto)) { + if (slot->n_elem) { + if (task->task_proto & SAS_PROTOCOL_SSP) + dma_unmap_sg(dev, task->scatter, + task->num_scatter, + task->data_dir); + else + dma_unmap_sg(dev, &task->smp_task.smp_req, + 1, DMA_TO_DEVICE); + } + if (slot->n_elem_dif) { + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + } + } + } + + if (need_lock) { + spin_lock(&sas_dev->lock); + list_del_init(&slot->entry); + spin_unlock(&sas_dev->lock); + } else { + list_del_init(&slot->entry); + } + + memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); + + hisi_sas_slot_index_free(hisi_hba, slot->idx); +} +EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); + +static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + hisi_hba->hw->prep_smp(hisi_hba, slot); +} + +static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + hisi_hba->hw->prep_ssp(hisi_hba, slot); +} + +static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + hisi_hba->hw->prep_stp(hisi_hba, slot); +} + +static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + hisi_hba->hw->prep_abort(hisi_hba, slot); +} + +static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, + struct sas_task *task, int n_elem) +{ + struct device *dev = hisi_hba->dev; + + if (!sas_protocol_ata(task->task_proto) && n_elem) { + if (task->num_scatter) { + dma_unmap_sg(dev, task->scatter, task->num_scatter, + task->data_dir); + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + dma_unmap_sg(dev, &task->smp_task.smp_req, + 1, DMA_TO_DEVICE); + } + } +} + +static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, + struct sas_task *task, int *n_elem) +{ + struct device *dev = hisi_hba->dev; + int rc; + + if (sas_protocol_ata(task->task_proto)) { + *n_elem = task->num_scatter; + } else { + unsigned int req_len; + + if (task->num_scatter) { + *n_elem = dma_map_sg(dev, task->scatter, + task->num_scatter, task->data_dir); + if (!*n_elem) { + rc = -ENOMEM; + goto prep_out; + } + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, + 1, DMA_TO_DEVICE); + if (!*n_elem) { + rc = -ENOMEM; + goto prep_out; + } + req_len = sg_dma_len(&task->smp_task.smp_req); + if (req_len & 0x3) { + rc = -EINVAL; + goto err_out_dma_unmap; + } + } + } + + if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { + dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", + *n_elem); + rc = -EINVAL; + goto err_out_dma_unmap; + } + return 0; + +err_out_dma_unmap: + /* It would be better to call dma_unmap_sg() here, but it's messy */ + hisi_sas_dma_unmap(hisi_hba, task, *n_elem); +prep_out: + return rc; +} + +static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, + struct sas_task *task, int n_elem_dif) +{ + struct device *dev = hisi_hba->dev; + + if (n_elem_dif) { + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + } +} + +static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, + int *n_elem_dif, struct sas_task *task) +{ + struct device *dev = hisi_hba->dev; + struct sas_ssp_task *ssp_task; + struct scsi_cmnd *scsi_cmnd; + int rc; + + if (task->num_scatter) { + ssp_task = &task->ssp_task; + scsi_cmnd = ssp_task->cmd; + + if (scsi_prot_sg_count(scsi_cmnd)) { + *n_elem_dif = dma_map_sg(dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + + if (!*n_elem_dif) + return -ENOMEM; + + if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { + dev_err(dev, "task prep: n_elem_dif(%d) too large\n", + *n_elem_dif); + rc = -EINVAL; + goto err_out_dif_dma_unmap; + } + } + } + + return 0; + +err_out_dif_dma_unmap: + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), task->data_dir); + return rc; +} + +static +void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_dq *dq, + struct hisi_sas_device *sas_dev) +{ + struct hisi_sas_cmd_hdr *cmd_hdr_base; + int dlvry_queue_slot, dlvry_queue; + struct sas_task *task = slot->task; + int wr_q_index; + + spin_lock(&dq->lock); + wr_q_index = dq->wr_point; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; + list_add_tail(&slot->delivery, &dq->list); + spin_unlock(&dq->lock); + spin_lock(&sas_dev->lock); + list_add_tail(&slot->entry, &sas_dev->list); + spin_unlock(&sas_dev->lock); + + dlvry_queue = dq->id; + dlvry_queue_slot = wr_q_index; + + slot->device_id = sas_dev->device_id; + slot->dlvry_queue = dlvry_queue; + slot->dlvry_queue_slot = dlvry_queue_slot; + cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; + slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; + + task->lldd_task = slot; + + memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); + memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); + memset(hisi_sas_status_buf_addr_mem(slot), 0, + sizeof(struct hisi_sas_err_record)); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + hisi_sas_task_prep_smp(hisi_hba, slot); + break; + case SAS_PROTOCOL_SSP: + hisi_sas_task_prep_ssp(hisi_hba, slot); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_STP_ALL: + hisi_sas_task_prep_ata(hisi_hba, slot); + break; + case SAS_PROTOCOL_INTERNAL_ABORT: + hisi_sas_task_prep_abort(hisi_hba, slot); + break; + default: + return; + } + + /* Make slot memories observable before marking as ready */ + smp_wmb(); + WRITE_ONCE(slot->ready, 1); + + spin_lock(&dq->lock); + hisi_hba->hw->start_delivery(dq); + spin_unlock(&dq->lock); +} + +static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) +{ + int n_elem = 0, n_elem_dif = 0; + struct domain_device *device = task->dev; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_device *sas_dev = device->lldd_dev; + bool internal_abort = sas_is_internal_abort(task); + struct hisi_sas_dq *dq = NULL; + struct hisi_sas_port *port; + struct hisi_hba *hisi_hba; + struct hisi_sas_slot *slot; + struct request *rq = NULL; + struct device *dev; + int rc; + + if (!sas_port) { + struct task_status_struct *ts = &task->task_status; + + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + /* + * libsas will use dev->port, should + * not call task_done for sata + */ + if (device->dev_type != SAS_SATA_DEV && !internal_abort) + task->task_done(task); + return -ECOMM; + } + + hisi_hba = dev_to_hisi_hba(device); + dev = hisi_hba->dev; + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + case SAS_PROTOCOL_SMP: + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_STP_ALL: + if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { + if (!gfpflags_allow_blocking(gfp_flags)) + return -EINVAL; + + down(&hisi_hba->sem); + up(&hisi_hba->sem); + } + + if (DEV_IS_GONE(sas_dev)) { + if (sas_dev) + dev_info(dev, "task prep: device %d not ready\n", + sas_dev->device_id); + else + dev_info(dev, "task prep: device %016llx not ready\n", + SAS_ADDR(device->sas_addr)); + + return -ECOMM; + } + + port = to_hisi_sas_port(sas_port); + if (!port->port_attached) { + dev_info(dev, "task prep: %s port%d not attach device\n", + dev_is_sata(device) ? "SATA/STP" : "SAS", + device->port->id); + + return -ECOMM; + } + + rq = sas_task_find_rq(task); + if (rq) { + unsigned int dq_index; + u32 blk_tag; + + blk_tag = blk_mq_unique_tag(rq); + dq_index = blk_mq_unique_tag_to_hwq(blk_tag); + dq = &hisi_hba->dq[dq_index]; + } else { + int queue; + + if (hisi_hba->iopoll_q_cnt) { + /* + * Use interrupt queue (queue 0) to deliver and complete + * internal IOs of libsas or libata when there is at least + * one iopoll queue + */ + queue = 0; + } else { + struct Scsi_Host *shost = hisi_hba->shost; + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + + queue = qmap->mq_map[raw_smp_processor_id()]; + } + dq = &hisi_hba->dq[queue]; + } + break; + case SAS_PROTOCOL_INTERNAL_ABORT: + if (!hisi_hba->hw->prep_abort) + return TMF_RESP_FUNC_FAILED; + + if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) + return -EIO; + + hisi_hba = dev_to_hisi_hba(device); + + if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) + return -EINVAL; + + port = to_hisi_sas_port(sas_port); + dq = &hisi_hba->dq[task->abort_task.qid]; + break; + default: + dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", + task->task_proto); + return -EINVAL; + } + + rc = hisi_sas_dma_map(hisi_hba, task, &n_elem); + if (rc < 0) + goto prep_out; + + if (!sas_protocol_ata(task->task_proto)) { + rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); + if (rc < 0) + goto err_out_dma_unmap; + } + + if (!internal_abort && hisi_hba->hw->slot_index_alloc) + rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); + else + rc = hisi_sas_slot_index_alloc(hisi_hba, rq); + + if (rc < 0) + goto err_out_dif_dma_unmap; + + slot = &hisi_hba->slot_info[rc]; + slot->n_elem = n_elem; + slot->n_elem_dif = n_elem_dif; + slot->task = task; + slot->port = port; + + slot->tmf = task->tmf; + slot->is_internal = !!task->tmf || internal_abort; + + /* protect task_prep and start_delivery sequence */ + hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); + + return 0; + +err_out_dif_dma_unmap: + if (!sas_protocol_ata(task->task_proto)) + hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); +err_out_dma_unmap: + hisi_sas_dma_unmap(hisi_hba, task, n_elem); +prep_out: + dev_err(dev, "task exec: failed[%d]!\n", rc); + return rc; +} + +static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, + gfp_t gfp_flags) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (!phy->phy_attached) + return; + + sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); + + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate_hw = + hisi_hba->hw->phy_get_max_linkrate(); + if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) + sphy->minimum_linkrate = phy->minimum_linkrate; + + if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) + sphy->maximum_linkrate = phy->maximum_linkrate; + } + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /* Nothing */ + } + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); +} + +static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) +{ + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct hisi_sas_device *sas_dev = NULL; + int last = hisi_hba->last_dev_id; + int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; + int i; + + spin_lock(&hisi_hba->lock); + for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { + if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { + int queue = i % hisi_hba->queue_count; + struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; + + hisi_hba->devices[i].device_id = i; + sas_dev = &hisi_hba->devices[i]; + sas_dev->dev_status = HISI_SAS_DEV_INIT; + sas_dev->dev_type = device->dev_type; + sas_dev->hisi_hba = hisi_hba; + sas_dev->sas_device = device; + sas_dev->dq = dq; + spin_lock_init(&sas_dev->lock); + INIT_LIST_HEAD(&hisi_hba->devices[i].list); + break; + } + i++; + } + hisi_hba->last_dev_id = i; + spin_unlock(&hisi_hba->lock); + + return sas_dev; +} + +static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq) +{ + /* make sure CQ entries being processed are processed to completion */ + spin_lock(&cq->poll_lock); + spin_unlock(&cq->poll_lock); +} + +static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq) +{ + struct hisi_hba *hisi_hba = cq->hisi_hba; + + if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt) + return false; + return true; +} + +static void hisi_sas_sync_cq(struct hisi_sas_cq *cq) +{ + if (hisi_sas_queue_is_poll(cq)) + hisi_sas_sync_poll_cq(cq); + else + synchronize_irq(cq->irq_no); +} + +void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + + if (hisi_sas_queue_is_poll(cq)) + hisi_sas_sync_poll_cq(cq); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs); + +void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + + hisi_sas_sync_cq(cq); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs); + +static void hisi_sas_tmf_aborted(struct sas_task *task) +{ + struct hisi_sas_slot *slot = task->lldd_task; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = sas_dev->hisi_hba; + + if (slot) { + struct hisi_sas_cq *cq = + &hisi_hba->cq[slot->dlvry_queue]; + /* + * sync irq or poll queue to avoid free'ing task + * before using task in IO completion + */ + hisi_sas_sync_cq(cq); + slot->task = NULL; + } +} + +#define HISI_SAS_DISK_RECOVER_CNT 3 +static int hisi_sas_init_device(struct domain_device *device) +{ + int rc = TMF_RESP_FUNC_COMPLETE; + struct scsi_lun lun; + int retry = HISI_SAS_DISK_RECOVER_CNT; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + + switch (device->dev_type) { + case SAS_END_DEVICE: + int_to_scsilun(0, &lun); + + while (retry-- > 0) { + rc = sas_abort_task_set(device, lun.scsi_lun); + if (rc == TMF_RESP_FUNC_COMPLETE) { + hisi_sas_release_task(hisi_hba, device); + break; + } + } + break; + case SAS_SATA_DEV: + case SAS_SATA_PM: + case SAS_SATA_PM_PORT: + case SAS_SATA_PENDING: + /* + * If an expander is swapped when a SATA disk is attached then + * we should issue a hard reset to clear previous affiliation + * of STP target port, see SPL (chapter 6.19.4). + * + * However we don't need to issue a hard reset here for these + * reasons: + * a. When probing the device, libsas/libata already issues a + * hard reset in sas_probe_sata() -> ata_port_probe(). + * Note that in hisi_sas_debug_I_T_nexus_reset() we take care + * to issue a hard reset by checking the dev status (== INIT). + * b. When resetting the controller, this is simply unnecessary. + */ + while (retry-- > 0) { + rc = hisi_sas_softreset_ata_disk(device); + if (!rc) + break; + } + break; + default: + break; + } + + return rc; +} + +int hisi_sas_slave_alloc(struct scsi_device *sdev) +{ + struct domain_device *ddev = sdev_to_domain_dev(sdev); + struct hisi_sas_device *sas_dev = ddev->lldd_dev; + int rc; + + rc = sas_slave_alloc(sdev); + if (rc) + return rc; + + rc = hisi_sas_init_device(ddev); + if (rc) + return rc; + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); + +static int hisi_sas_dev_found(struct domain_device *device) +{ + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct domain_device *parent_dev = device->parent; + struct hisi_sas_device *sas_dev; + struct device *dev = hisi_hba->dev; + int rc; + + if (hisi_hba->hw->alloc_dev) + sas_dev = hisi_hba->hw->alloc_dev(device); + else + sas_dev = hisi_sas_alloc_dev(device); + if (!sas_dev) { + dev_err(dev, "fail alloc dev: max support %d devices\n", + HISI_SAS_MAX_DEVICES); + return -EINVAL; + } + + device->lldd_dev = sas_dev; + hisi_hba->hw->setup_itct(hisi_hba, sas_dev); + + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { + int phy_no; + + phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); + if (phy_no < 0) { + dev_info(dev, "dev found: no attached " + "dev:%016llx at ex:%016llx\n", + SAS_ADDR(device->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + rc = phy_no; + goto err_out; + } + } + + dev_info(dev, "dev[%d:%x] found\n", + sas_dev->device_id, sas_dev->dev_type); + + return 0; + +err_out: + hisi_sas_dev_gone(device); + return rc; +} + +int hisi_sas_slave_configure(struct scsi_device *sdev) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + int ret = sas_slave_configure(sdev); + + if (ret) + return ret; + if (!dev_is_sata(dev)) + sas_change_queue_depth(sdev, 64); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); + +void hisi_sas_scan_start(struct Scsi_Host *shost) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + + hisi_hba->hw->phys_init(hisi_hba); +} +EXPORT_SYMBOL_GPL(hisi_sas_scan_start); + +int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + struct sas_ha_struct *sha = &hisi_hba->sha; + + /* Wait for PHY up interrupt to occur */ + if (time < HZ) + return 0; + + sas_drain_work(sha); + return 1; +} +EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); + +static void hisi_sas_phyup_work_common(struct work_struct *work, + enum hisi_sas_phy_event event) +{ + struct hisi_sas_phy *phy = + container_of(work, typeof(*phy), works[event]); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int phy_no = sas_phy->id; + + phy->wait_phyup_cnt = 0; + if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) + hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); + hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); +} + +static void hisi_sas_phyup_work(struct work_struct *work) +{ + hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); +} + +static void hisi_sas_linkreset_work(struct work_struct *work) +{ + struct hisi_sas_phy *phy = + container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); +} + +static void hisi_sas_phyup_pm_work(struct work_struct *work) +{ + struct hisi_sas_phy *phy = + container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + + hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); + pm_runtime_put_sync(dev); +} + +static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { + [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, + [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, + [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, +}; + +bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, + enum hisi_sas_phy_event event) +{ + struct hisi_hba *hisi_hba = phy->hisi_hba; + + if (WARN_ON(event >= HISI_PHYES_NUM)) + return false; + + return queue_work(hisi_hba->wq, &phy->works[event]); +} +EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); + +static void hisi_sas_wait_phyup_timedout(struct timer_list *t) +{ + struct hisi_sas_phy *phy = from_timer(phy, t, timer); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + int phy_no = phy->sas_phy.id; + + dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); +} + +#define HISI_SAS_WAIT_PHYUP_RETRIES 10 + +void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct device *dev = hisi_hba->dev; + unsigned long flags; + + dev_dbg(dev, "phy%d OOB ready\n", phy_no); + spin_lock_irqsave(&phy->lock, flags); + if (phy->phy_attached) { + spin_unlock_irqrestore(&phy->lock, flags); + return; + } + + if (!timer_pending(&phy->timer)) { + if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { + phy->wait_phyup_cnt++; + phy->timer.expires = jiffies + + HISI_SAS_WAIT_PHYUP_TIMEOUT; + add_timer(&phy->timer); + spin_unlock_irqrestore(&phy->lock, flags); + return; + } + + dev_warn(dev, "phy%d failed to come up %d times, giving up\n", + phy_no, phy->wait_phyup_cnt); + phy->wait_phyup_cnt = 0; + } + spin_unlock_irqrestore(&phy->lock, flags); +} + +EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); + +static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int i; + + phy->hisi_hba = hisi_hba; + phy->port = NULL; + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); + sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + sas_phy->id = phy_no; + sas_phy->sas_addr = &hisi_hba->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; + sas_phy->lldd_phy = phy; + + for (i = 0; i < HISI_PHYES_NUM; i++) + INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); + + spin_lock_init(&phy->lock); + + timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); +} + +/* Wrapper to ensure we track hisi_sas_phy.enable properly */ +void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *aphy = &phy->sas_phy; + struct sas_phy *sphy = aphy->phy; + unsigned long flags; + + spin_lock_irqsave(&phy->lock, flags); + + if (enable) { + /* We may have been enabled already; if so, don't touch */ + if (!phy->enable) + sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + hisi_hba->hw->phy_start(hisi_hba, phy_no); + } else { + sphy->negotiated_linkrate = SAS_PHY_DISABLED; + hisi_hba->hw->phy_disable(hisi_hba, phy_no); + } + phy->enable = enable; + spin_unlock_irqrestore(&phy->lock, flags); +} +EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); + +static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) +{ + struct hisi_sas_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct hisi_sas_port *port; + + if (!sas_port) + return; + + port = to_hisi_sas_port(sas_port); + port->port_attached = 1; + port->id = phy->port_id; + phy->port = port; + sas_port->lldd_port = port; +} + +static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, + struct hisi_sas_slot *slot, bool need_lock) +{ + if (task) { + unsigned long flags; + struct task_status_struct *ts; + + ts = &task->task_status; + + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + } + + hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock); +} + +static void hisi_sas_release_task(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + struct hisi_sas_slot *slot, *slot2; + struct hisi_sas_device *sas_dev = device->lldd_dev; + + spin_lock(&sas_dev->lock); + list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) + hisi_sas_do_release_task(hisi_hba, slot->task, slot, false); + + spin_unlock(&sas_dev->lock); +} + +void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) +{ + struct hisi_sas_device *sas_dev; + struct domain_device *device; + int i; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + sas_dev = &hisi_hba->devices[i]; + device = sas_dev->sas_device; + + if ((sas_dev->dev_type == SAS_PHY_UNUSED) || + !device) + continue; + + hisi_sas_release_task(hisi_hba, device); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); + +static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + if (hisi_hba->hw->dereg_device) + hisi_hba->hw->dereg_device(hisi_hba, device); +} + +static int +hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, + bool rst_ha_timeout) +{ + struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; + struct domain_device *device = sas_dev->sas_device; + struct hisi_hba *hisi_hba = sas_dev->hisi_hba; + int i, rc; + + for (i = 0; i < hisi_hba->cq_nvecs; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + const struct cpumask *mask = cq->irq_mask; + + if (mask && !cpumask_intersects(cpu_online_mask, mask)) + continue; + rc = sas_execute_internal_abort_dev(device, i, &data); + if (rc) + return rc; + } + + return 0; +} + +static void hisi_sas_dev_gone(struct domain_device *device) +{ + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; + int ret = 0; + + dev_info(dev, "dev[%d:%x] is gone\n", + sas_dev->device_id, sas_dev->dev_type); + + down(&hisi_hba->sem); + if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { + hisi_sas_internal_task_abort_dev(sas_dev, true); + + hisi_sas_dereg_device(hisi_hba, device); + + ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); + device->lldd_dev = NULL; + } + + if (hisi_hba->hw->free_device) + hisi_hba->hw->free_device(sas_dev); + + /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ + if (!ret) + sas_dev->dev_type = SAS_PHY_UNUSED; + sas_dev->sas_device = NULL; + up(&hisi_hba->sem); +} + +static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + struct sas_phy_linkrates _r; + + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + enum sas_linkrate min, max; + + if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) + return -EINVAL; + + if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = sas_phy->phy->maximum_linkrate; + min = r->minimum_linkrate; + } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = r->maximum_linkrate; + min = sas_phy->phy->minimum_linkrate; + } else + return -EINVAL; + + _r.maximum_linkrate = max; + _r.minimum_linkrate = min; + + sas_phy->phy->maximum_linkrate = max; + sas_phy->phy->minimum_linkrate = min; + + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + msleep(100); + hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); + + return 0; +} + +static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + struct hisi_sas_phy *phy = container_of(sas_phy, + struct hisi_sas_phy, sas_phy); + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + struct device *dev = hisi_hba->dev; + DECLARE_COMPLETION_ONSTACK(completion); + int phy_no = sas_phy->id; + u8 sts = phy->phy_attached; + int ret = 0; + + down(&hisi_hba->sem); + phy->reset_completion = &completion; + + switch (func) { + case PHY_FUNC_HARD_RESET: + hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); + break; + + case PHY_FUNC_LINK_RESET: + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + msleep(100); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); + break; + + case PHY_FUNC_DISABLE: + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + goto out; + + case PHY_FUNC_SET_LINK_RATE: + ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); + break; + + case PHY_FUNC_GET_EVENTS: + if (hisi_hba->hw->get_events) { + hisi_hba->hw->get_events(hisi_hba, phy_no); + goto out; + } + fallthrough; + case PHY_FUNC_RELEASE_SPINUP_HOLD: + default: + ret = -EOPNOTSUPP; + goto out; + } + + if (sts && !wait_for_completion_timeout(&completion, + HISI_SAS_WAIT_PHYUP_TIMEOUT)) { + dev_warn(dev, "phy%d wait phyup timed out for func %d\n", + phy_no, func); + if (phy->in_reset) + ret = -ETIMEDOUT; + } + +out: + phy->reset_completion = NULL; + + up(&hisi_hba->sem); + return ret; +} + +static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, + bool reset, int pmp, u8 *fis) +{ + struct ata_taskfile tf; + + ata_tf_init(dev, &tf); + if (reset) + tf.ctl |= ATA_SRST; + else + tf.ctl &= ~ATA_SRST; + tf.command = ATA_CMD_DEV_RESET; + ata_tf_to_fis(&tf, pmp, 0, fis); +} + +static int hisi_sas_softreset_ata_disk(struct domain_device *device) +{ + u8 fis[20] = {0}; + struct ata_port *ap = device->sata_dev.ap; + struct ata_link *link; + int rc = TMF_RESP_FUNC_FAILED; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; + + ata_for_each_link(link, ap, EDGE) { + int pmp = sata_srst_pmp(link); + + hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); + rc = sas_execute_ata_cmd(device, fis, -1); + if (rc != TMF_RESP_FUNC_COMPLETE) + break; + } + + if (rc == TMF_RESP_FUNC_COMPLETE) { + ata_for_each_link(link, ap, EDGE) { + int pmp = sata_srst_pmp(link); + + hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); + rc = sas_execute_ata_cmd(device, fis, -1); + if (rc != TMF_RESP_FUNC_COMPLETE) + dev_err(dev, "ata disk %016llx de-reset failed\n", + SAS_ADDR(device->sas_addr)); + } + } else { + dev_err(dev, "ata disk %016llx reset failed\n", + SAS_ADDR(device->sas_addr)); + } + + if (rc == TMF_RESP_FUNC_COMPLETE) + hisi_sas_release_task(hisi_hba, device); + + return rc; +} + +static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) +{ + u32 state = hisi_hba->hw->get_phys_state(hisi_hba); + int i; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; + struct domain_device *device = sas_dev->sas_device; + struct asd_sas_port *sas_port; + struct hisi_sas_port *port; + struct hisi_sas_phy *phy = NULL; + struct asd_sas_phy *sas_phy; + + if ((sas_dev->dev_type == SAS_PHY_UNUSED) + || !device || !device->port) + continue; + + sas_port = device->port; + port = to_hisi_sas_port(sas_port); + + spin_lock(&sas_port->phy_list_lock); + list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) + if (state & BIT(sas_phy->id)) { + phy = sas_phy->lldd_phy; + break; + } + spin_unlock(&sas_port->phy_list_lock); + + if (phy) { + port->id = phy->port_id; + + /* Update linkrate of directly attached device. */ + if (!device->parent) + device->linkrate = phy->sas_phy.linkrate; + + hisi_hba->hw->setup_itct(hisi_hba, sas_dev); + } else if (!port->port_attached) + port->id = 0xff; + } +} + +static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) +{ + struct asd_sas_port *_sas_port = NULL; + int phy_no; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct asd_sas_port *sas_port = sas_phy->port; + bool do_port_check = _sas_port != sas_port; + + if (!sas_phy->phy->enabled) + continue; + + /* Report PHY state change to libsas */ + if (state & BIT(phy_no)) { + if (do_port_check && sas_port && sas_port->port_dev) { + struct domain_device *dev = sas_port->port_dev; + + _sas_port = sas_port; + + if (dev_is_expander(dev->dev_type)) + sas_notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD, + GFP_KERNEL); + } + } else { + hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); + } + } +} + +static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) +{ + struct hisi_sas_device *sas_dev; + struct domain_device *device; + int i; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + sas_dev = &hisi_hba->devices[i]; + device = sas_dev->sas_device; + + if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) + continue; + + hisi_sas_init_device(device); + } +} + +static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, + struct asd_sas_port *sas_port, + struct domain_device *device) +{ + struct ata_port *ap = device->sata_dev.ap; + struct device *dev = hisi_hba->dev; + int rc = TMF_RESP_FUNC_FAILED; + struct ata_link *link; + u8 fis[20] = {0}; + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + if (!(sas_port->phy_mask & BIT(i))) + continue; + + ata_for_each_link(link, ap, EDGE) { + int pmp = sata_srst_pmp(link); + + hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); + rc = sas_execute_ata_cmd(device, fis, i); + if (rc != TMF_RESP_FUNC_COMPLETE) { + dev_err(dev, "phy%d ata reset failed rc=%d\n", + i, rc); + break; + } + } + } +} + +static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int port_no, rc, i; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; + struct domain_device *device = sas_dev->sas_device; + + if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) + continue; + + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); + if (rc < 0) + dev_err(dev, "STP reject: abort dev failed %d\n", rc); + } + + for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { + struct hisi_sas_port *port = &hisi_hba->port[port_no]; + struct asd_sas_port *sas_port = &port->sas_port; + struct domain_device *port_dev = sas_port->port_dev; + struct domain_device *device; + + if (!port_dev || !dev_is_expander(port_dev->dev_type)) + continue; + + /* Try to find a SATA device */ + list_for_each_entry(device, &sas_port->dev_list, + dev_list_node) { + if (dev_is_sata(device)) { + hisi_sas_send_ata_reset_each_phy(hisi_hba, + sas_port, + device); + break; + } + } + } +} + +void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) +{ + struct Scsi_Host *shost = hisi_hba->shost; + + hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); + + scsi_block_requests(shost); + hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); + + del_timer_sync(&hisi_hba->timer); + + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); +} +EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); + +static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie) +{ + struct hisi_sas_phy *phy = data; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + DECLARE_COMPLETION_ONSTACK(completion); + int phy_no = phy->sas_phy.id; + + phy->reset_completion = &completion; + hisi_sas_phy_enable(hisi_hba, phy_no, 1); + if (!wait_for_completion_timeout(&completion, + HISI_SAS_WAIT_PHYUP_TIMEOUT)) + dev_warn(dev, "phy%d wait phyup timed out\n", phy_no); + + phy->reset_completion = NULL; +} + +void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) +{ + struct Scsi_Host *shost = hisi_hba->shost; + ASYNC_DOMAIN_EXCLUSIVE(async); + int phy_no; + + /* Init and wait for PHYs to come up and all libsas event finished. */ + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + + if (!(hisi_hba->phy_state & BIT(phy_no))) + continue; + + async_schedule_domain(hisi_sas_async_init_wait_phyup, + phy, &async); + } + + async_synchronize_full_domain(&async); + hisi_sas_refresh_port_id(hisi_hba); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + + if (hisi_hba->reject_stp_links_msk) + hisi_sas_terminate_stp_reject(hisi_hba); + hisi_sas_reset_init_all_devices(hisi_hba); + scsi_unblock_requests(shost); + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + up(&hisi_hba->sem); + + hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); +} +EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); + +static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) +{ + if (!hisi_hba->hw->soft_reset) + return -ENOENT; + + down(&hisi_hba->sem); + if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { + up(&hisi_hba->sem); + return -EPERM; + } + + if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) + hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); + + return 0; +} + +static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + struct Scsi_Host *shost = hisi_hba->shost; + int rc; + + dev_info(dev, "controller resetting...\n"); + hisi_sas_controller_reset_prepare(hisi_hba); + + rc = hisi_hba->hw->soft_reset(hisi_hba); + if (rc) { + dev_warn(dev, "controller reset failed (%d)\n", rc); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + up(&hisi_hba->sem); + scsi_unblock_requests(shost); + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + return rc; + } + clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); + + hisi_sas_controller_reset_done(hisi_hba); + dev_info(dev, "controller reset complete\n"); + + return 0; +} + +static int hisi_sas_abort_task(struct sas_task *task) +{ + struct hisi_sas_internal_abort_data internal_abort_data = { false }; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_slot *slot = task->lldd_task; + struct hisi_hba *hisi_hba; + struct device *dev; + int rc = TMF_RESP_FUNC_FAILED; + unsigned long flags; + + if (!sas_dev) + return TMF_RESP_FUNC_FAILED; + + hisi_hba = dev_to_hisi_hba(task->dev); + dev = hisi_hba->dev; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + struct hisi_sas_cq *cq; + + if (slot) { + /* + * sync irq or poll queue to avoid free'ing task + * before using task in IO completion + */ + cq = &hisi_hba->cq[slot->dlvry_queue]; + hisi_sas_sync_cq(cq); + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (!slot) + goto out; + + if (task->task_proto & SAS_PROTOCOL_SSP) { + u16 tag = slot->idx; + int rc2; + + rc = sas_abort_task(task, tag); + rc2 = sas_execute_internal_abort_single(device, tag, + slot->dlvry_queue, &internal_abort_data); + if (rc2 < 0) { + dev_err(dev, "abort task: internal abort (%d)\n", rc2); + return TMF_RESP_FUNC_FAILED; + } + + /* + * If the TMF finds that the IO is not in the device and also + * the internal abort does not succeed, then it is safe to + * free the slot. + * Note: if the internal abort succeeds then the slot + * will have already been completed + */ + if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { + if (task->lldd_task) + hisi_sas_do_release_task(hisi_hba, task, slot, true); + } + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + if (task->dev->dev_type == SAS_SATA_DEV) { + struct ata_queued_cmd *qc = task->uldd_task; + + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); + if (rc < 0) { + dev_err(dev, "abort task: internal abort failed\n"); + goto out; + } + hisi_sas_dereg_device(hisi_hba, device); + + /* + * If an ATA internal command times out in ATA EH, it + * need to execute soft reset, so check the scsicmd + */ + if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && + qc && qc->scsicmd) { + hisi_sas_do_release_task(hisi_hba, task, slot, true); + rc = TMF_RESP_FUNC_COMPLETE; + } else { + rc = hisi_sas_softreset_ata_disk(device); + } + } + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + /* SMP */ + u32 tag = slot->idx; + struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; + + rc = sas_execute_internal_abort_single(device, + tag, slot->dlvry_queue, + &internal_abort_data); + if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && + task->lldd_task) { + /* + * sync irq or poll queue to avoid free'ing task + * before using task in IO completion + */ + hisi_sas_sync_cq(cq); + slot->task = NULL; + } + } + +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + dev_notice(dev, "abort task: rc=%d\n", rc); + return rc; +} + +static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) +{ + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; + int rc; + + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); + if (rc < 0) { + dev_err(dev, "abort task set: internal abort rc=%d\n", rc); + return TMF_RESP_FUNC_FAILED; + } + hisi_sas_dereg_device(hisi_hba, device); + + rc = sas_abort_task_set(device, lun); + if (rc == TMF_RESP_FUNC_COMPLETE) + hisi_sas_release_task(hisi_hba, device); + + return rc; +} + +static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) +{ + struct sas_phy *local_phy = sas_get_local_phy(device); + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + int rc, reset_type; + + if (!local_phy->enabled) { + sas_put_local_phy(local_phy); + return -ENODEV; + } + + if (scsi_is_sas_phy_local(local_phy)) { + struct asd_sas_phy *sas_phy = + sas_ha->sas_phy[local_phy->number]; + struct hisi_sas_phy *phy = + container_of(sas_phy, struct hisi_sas_phy, sas_phy); + unsigned long flags; + + spin_lock_irqsave(&phy->lock, flags); + phy->in_reset = 1; + spin_unlock_irqrestore(&phy->lock, flags); + } + + reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || + !dev_is_sata(device)) ? true : false; + + rc = sas_phy_reset(local_phy, reset_type); + sas_put_local_phy(local_phy); + + if (scsi_is_sas_phy_local(local_phy)) { + struct asd_sas_phy *sas_phy = + sas_ha->sas_phy[local_phy->number]; + struct hisi_sas_phy *phy = + container_of(sas_phy, struct hisi_sas_phy, sas_phy); + unsigned long flags; + + spin_lock_irqsave(&phy->lock, flags); + phy->in_reset = 0; + spin_unlock_irqrestore(&phy->lock, flags); + + /* report PHY down if timed out */ + if (rc == -ETIMEDOUT) + hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); + return rc; + } + + /* Remote phy */ + if (rc) + return rc; + + if (dev_is_sata(device)) { + struct ata_link *link = &device->sata_dev.ap->link; + + rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT, + smp_ata_check_ready_type); + } else { + msleep(2000); + } + + return rc; +} + +static int hisi_sas_I_T_nexus_reset(struct domain_device *device) +{ + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; + int rc; + + if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); + if (rc < 0) { + dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); + return TMF_RESP_FUNC_FAILED; + } + hisi_sas_dereg_device(hisi_hba, device); + + rc = hisi_sas_debug_I_T_nexus_reset(device); + if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { + struct sas_phy *local_phy; + + rc = hisi_sas_softreset_ata_disk(device); + switch (rc) { + case -ECOMM: + rc = -ENODEV; + break; + case TMF_RESP_FUNC_FAILED: + case -EMSGSIZE: + case -EIO: + local_phy = sas_get_local_phy(device); + rc = sas_phy_enable(local_phy, 0); + if (!rc) { + local_phy->enabled = 0; + dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", + SAS_ADDR(device->sas_addr), rc); + rc = -ENODEV; + } + sas_put_local_phy(local_phy); + break; + default: + break; + } + } + + if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) + hisi_sas_release_task(hisi_hba, device); + + return rc; +} + +static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) +{ + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; + int rc = TMF_RESP_FUNC_FAILED; + + /* Clear internal IO and then lu reset */ + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); + if (rc < 0) { + dev_err(dev, "lu_reset: internal abort failed\n"); + goto out; + } + hisi_sas_dereg_device(hisi_hba, device); + + if (dev_is_sata(device)) { + struct sas_phy *phy; + + phy = sas_get_local_phy(device); + + rc = sas_phy_reset(phy, true); + + if (rc == 0) + hisi_sas_release_task(hisi_hba, device); + sas_put_local_phy(phy); + } else { + rc = sas_lu_reset(device, lun); + if (rc == TMF_RESP_FUNC_COMPLETE) + hisi_sas_release_task(hisi_hba, device); + } +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", + sas_dev->device_id, rc); + return rc; +} + +static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) +{ + struct domain_device *device = data; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + int rc; + + rc = hisi_sas_debug_I_T_nexus_reset(device); + if (rc != TMF_RESP_FUNC_COMPLETE) + dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", + SAS_ADDR(device->sas_addr), rc); +} + +static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) +{ + struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); + ASYNC_DOMAIN_EXCLUSIVE(async); + int i; + + queue_work(hisi_hba->wq, &r.work); + wait_for_completion(r.completion); + if (!r.done) + return TMF_RESP_FUNC_FAILED; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; + struct domain_device *device = sas_dev->sas_device; + + if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || + dev_is_expander(device->dev_type)) + continue; + + async_schedule_domain(hisi_sas_async_I_T_nexus_reset, + device, &async); + } + + async_synchronize_full_domain(&async); + hisi_sas_release_tasks(hisi_hba); + + return TMF_RESP_FUNC_COMPLETE; +} + +static int hisi_sas_query_task(struct sas_task *task) +{ + int rc = TMF_RESP_FUNC_FAILED; + + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct hisi_sas_slot *slot = task->lldd_task; + u32 tag = slot->idx; + + rc = sas_query_task(task, tag); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + break; + default: + rc = TMF_RESP_FUNC_FAILED; + break; + } + } + return rc; +} + +static bool hisi_sas_internal_abort_timeout(struct sas_task *task, + void *data) +{ + struct domain_device *device = task->dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct hisi_sas_internal_abort_data *timeout = data; + + if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) + queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); + + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + pr_err("Internal abort: timeout %016llx\n", + SAS_ADDR(device->sas_addr)); + } else { + struct hisi_sas_slot *slot = task->lldd_task; + + set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); + + if (slot) { + struct hisi_sas_cq *cq = + &hisi_hba->cq[slot->dlvry_queue]; + /* + * sync irq or poll queue to avoid free'ing task + * before using task in IO completion + */ + hisi_sas_sync_cq(cq); + slot->task = NULL; + } + + if (timeout->rst_ha_timeout) { + pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", + SAS_ADDR(device->sas_addr)); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } else { + pr_err("Internal abort: timeout and not done %016llx.\n", + SAS_ADDR(device->sas_addr)); + } + + return true; + } + + return false; +} + +static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) +{ + hisi_sas_port_notify_formed(sas_phy); +} + +static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, + u8 reg_index, u8 reg_count, u8 *write_data) +{ + struct hisi_hba *hisi_hba = sha->lldd_ha; + + if (!hisi_hba->hw->write_gpio) + return -EOPNOTSUPP; + + return hisi_hba->hw->write_gpio(hisi_hba, reg_type, + reg_index, reg_count, write_data); +} + +static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) +{ + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + unsigned long flags; + + phy->phy_attached = 0; + phy->phy_type = 0; + phy->port = NULL; + + spin_lock_irqsave(&phy->lock, flags); + if (phy->enable) + sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + else + sphy->negotiated_linkrate = SAS_PHY_DISABLED; + spin_unlock_irqrestore(&phy->lock, flags); +} + +void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, + gfp_t gfp_flags) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = hisi_hba->dev; + + if (rdy) { + /* Phy down but ready */ + hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); + hisi_sas_port_notify_formed(sas_phy); + } else { + struct hisi_sas_port *port = phy->port; + + if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || + phy->in_reset) { + dev_info(dev, "ignore flutter phy%d down\n", phy_no); + return; + } + /* Phy down and not ready */ + sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); + sas_phy_disconnected(sas_phy); + + if (port) { + if (phy->phy_type & PORT_TYPE_SAS) { + int port_id = port->id; + + if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, + port_id)) + port->port_attached = 0; + } else if (phy->phy_type & PORT_TYPE_SATA) + port->port_attached = 0; + } + hisi_sas_phy_disconnected(phy); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_phy_down); + +void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) +{ + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct hisi_hba *hisi_hba = phy->hisi_hba; + + if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) + return; + + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); + +int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + + if (reset_type != SCSI_ADAPTER_RESET) + return -EOPNOTSUPP; + + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_host_reset); + +struct scsi_transport_template *hisi_sas_stt; +EXPORT_SYMBOL_GPL(hisi_sas_stt); + +static struct sas_domain_function_template hisi_sas_transport_ops = { + .lldd_dev_found = hisi_sas_dev_found, + .lldd_dev_gone = hisi_sas_dev_gone, + .lldd_execute_task = hisi_sas_queue_command, + .lldd_control_phy = hisi_sas_control_phy, + .lldd_abort_task = hisi_sas_abort_task, + .lldd_abort_task_set = hisi_sas_abort_task_set, + .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, + .lldd_lu_reset = hisi_sas_lu_reset, + .lldd_query_task = hisi_sas_query_task, + .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, + .lldd_port_formed = hisi_sas_port_formed, + .lldd_write_gpio = hisi_sas_write_gpio, + .lldd_tmf_aborted = hisi_sas_tmf_aborted, + .lldd_abort_timeout = hisi_sas_internal_abort_timeout, +}; + +void hisi_sas_init_mem(struct hisi_hba *hisi_hba) +{ + int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; + struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + struct hisi_sas_dq *dq = &hisi_hba->dq[i]; + struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; + + s = sizeof(struct hisi_sas_cmd_hdr); + for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) + memset(&cmd_hdr[j], 0, s); + + dq->wr_point = 0; + + s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + memset(hisi_hba->complete_hdr[i], 0, s); + cq->rd_point = 0; + } + + s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; + memset(hisi_hba->initial_fis, 0, s); + + s = max_command_entries * sizeof(struct hisi_sas_iost); + memset(hisi_hba->iost, 0, s); + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint); + memset(hisi_hba->breakpoint, 0, s); + + s = sizeof(struct hisi_sas_sata_breakpoint); + for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) + memset(&sata_breakpoint[j], 0, s); +} +EXPORT_SYMBOL_GPL(hisi_sas_init_mem); + +int hisi_sas_alloc(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; + int max_command_entries_ru, sz_slot_buf_ru; + int blk_cnt, slots_per_blk; + + sema_init(&hisi_hba->sem, 1); + spin_lock_init(&hisi_hba->lock); + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_init(hisi_hba, i); + hisi_hba->port[i].port_attached = 0; + hisi_hba->port[i].id = -1; + } + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; + hisi_hba->devices[i].device_id = i; + hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + struct hisi_sas_dq *dq = &hisi_hba->dq[i]; + + /* Completion queue structure */ + cq->id = i; + cq->hisi_hba = hisi_hba; + spin_lock_init(&cq->poll_lock); + + /* Delivery queue structure */ + spin_lock_init(&dq->lock); + INIT_LIST_HEAD(&dq->list); + dq->id = i; + dq->hisi_hba = hisi_hba; + + /* Delivery queue */ + s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; + hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, + &hisi_hba->cmd_hdr_dma[i], + GFP_KERNEL); + if (!hisi_hba->cmd_hdr[i]) + goto err_out; + + /* Completion queue */ + s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, + &hisi_hba->complete_hdr_dma[i], + GFP_KERNEL); + if (!hisi_hba->complete_hdr[i]) + goto err_out; + } + + s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); + hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, + GFP_KERNEL); + if (!hisi_hba->itct) + goto err_out; + + hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, + sizeof(struct hisi_sas_slot), + GFP_KERNEL); + if (!hisi_hba->slot_info) + goto err_out; + + /* roundup to avoid overly large block size */ + max_command_entries_ru = roundup(max_command_entries, 64); + if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) + sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); + else + sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); + sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); + s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); + blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; + slots_per_blk = s / sz_slot_buf_ru; + + for (i = 0; i < blk_cnt; i++) { + int slot_index = i * slots_per_blk; + dma_addr_t buf_dma; + void *buf; + + buf = dmam_alloc_coherent(dev, s, &buf_dma, + GFP_KERNEL); + if (!buf) + goto err_out; + + for (j = 0; j < slots_per_blk; j++, slot_index++) { + struct hisi_sas_slot *slot; + + slot = &hisi_hba->slot_info[slot_index]; + slot->buf = buf; + slot->buf_dma = buf_dma; + slot->idx = slot_index; + + buf += sz_slot_buf_ru; + buf_dma += sz_slot_buf_ru; + } + } + + s = max_command_entries * sizeof(struct hisi_sas_iost); + hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, + GFP_KERNEL); + if (!hisi_hba->iost) + goto err_out; + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint); + hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, + &hisi_hba->breakpoint_dma, + GFP_KERNEL); + if (!hisi_hba->breakpoint) + goto err_out; + + s = hisi_hba->slot_index_count = max_command_entries; + hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); + if (!hisi_hba->slot_index_tags) + goto err_out; + + s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; + hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, + &hisi_hba->initial_fis_dma, + GFP_KERNEL); + if (!hisi_hba->initial_fis) + goto err_out; + + s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); + hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, + &hisi_hba->sata_breakpoint_dma, + GFP_KERNEL); + if (!hisi_hba->sata_breakpoint) + goto err_out; + + hisi_hba->last_slot_index = 0; + + hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); + if (!hisi_hba->wq) { + dev_err(dev, "sas_alloc: failed to create workqueue\n"); + goto err_out; + } + + return 0; +err_out: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(hisi_sas_alloc); + +void hisi_sas_free(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + + del_timer_sync(&phy->timer); + } + + if (hisi_hba->wq) + destroy_workqueue(hisi_hba->wq); +} +EXPORT_SYMBOL_GPL(hisi_sas_free); + +void hisi_sas_rst_work_handler(struct work_struct *work) +{ + struct hisi_hba *hisi_hba = + container_of(work, struct hisi_hba, rst_work); + + if (hisi_sas_controller_prereset(hisi_hba)) + return; + + hisi_sas_controller_reset(hisi_hba); +} +EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); + +void hisi_sas_sync_rst_work_handler(struct work_struct *work) +{ + struct hisi_sas_rst *rst = + container_of(work, struct hisi_sas_rst, work); + + if (hisi_sas_controller_prereset(rst->hisi_hba)) + goto rst_complete; + + if (!hisi_sas_controller_reset(rst->hisi_hba)) + rst->done = true; +rst_complete: + complete(rst->completion); +} +EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); + +int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + struct platform_device *pdev = hisi_hba->platform_dev; + struct device_node *np = pdev ? pdev->dev.of_node : NULL; + struct clk *refclk; + + if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, + SAS_ADDR_SIZE)) { + dev_err(dev, "could not get property sas-addr\n"); + return -ENOENT; + } + + if (np) { + /* + * These properties are only required for platform device-based + * controller with DT firmware. + */ + hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, + "hisilicon,sas-syscon"); + if (IS_ERR(hisi_hba->ctrl)) { + dev_err(dev, "could not get syscon\n"); + return -ENOENT; + } + + if (device_property_read_u32(dev, "ctrl-reset-reg", + &hisi_hba->ctrl_reset_reg)) { + dev_err(dev, "could not get property ctrl-reset-reg\n"); + return -ENOENT; + } + + if (device_property_read_u32(dev, "ctrl-reset-sts-reg", + &hisi_hba->ctrl_reset_sts_reg)) { + dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); + return -ENOENT; + } + + if (device_property_read_u32(dev, "ctrl-clock-ena-reg", + &hisi_hba->ctrl_clock_ena_reg)) { + dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); + return -ENOENT; + } + } + + refclk = devm_clk_get(dev, NULL); + if (IS_ERR(refclk)) + dev_dbg(dev, "no ref clk property\n"); + else + hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; + + if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { + dev_err(dev, "could not get property phy-count\n"); + return -ENOENT; + } + + if (device_property_read_u32(dev, "queue-count", + &hisi_hba->queue_count)) { + dev_err(dev, "could not get property queue-count\n"); + return -ENOENT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); + +static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, + const struct hisi_sas_hw *hw) +{ + struct resource *res; + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + int error; + + shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); + if (!shost) { + dev_err(dev, "scsi host alloc failed\n"); + return NULL; + } + hisi_hba = shost_priv(shost); + + INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); + hisi_hba->hw = hw; + hisi_hba->dev = dev; + hisi_hba->platform_dev = pdev; + hisi_hba->shost = shost; + SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; + + timer_setup(&hisi_hba->timer, NULL, 0); + + if (hisi_sas_get_fw_info(hisi_hba) < 0) + goto err_out; + + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (error) { + dev_err(dev, "No usable DMA addressing method\n"); + goto err_out; + } + + hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(hisi_hba->regs)) + goto err_out; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(hisi_hba->sgpio_regs)) + goto err_out; + } + + if (hisi_sas_alloc(hisi_hba)) { + hisi_sas_free(hisi_hba); + goto err_out; + } + + return shost; +err_out: + scsi_host_put(shost); + dev_err(dev, "shost alloc failed\n"); + return NULL; +} + +static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) +{ + if (hisi_hba->hw->interrupt_preinit) + return hisi_hba->hw->interrupt_preinit(hisi_hba); + return 0; +} + +int hisi_sas_probe(struct platform_device *pdev, + const struct hisi_sas_hw *hw) +{ + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha; + int rc, phy_nr, port_nr, i; + + shost = hisi_sas_shost_alloc(pdev, hw); + if (!shost) + return -ENOMEM; + + sha = SHOST_TO_SAS_HA(shost); + hisi_hba = shost_priv(shost); + platform_set_drvdata(pdev, sha); + + phy_nr = port_nr = hisi_hba->n_phy; + + arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); + arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) { + rc = -ENOMEM; + goto err_out_ha; + } + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->lldd_ha = hisi_hba; + + shost->transportt = hisi_sas_stt; + shost->max_id = HISI_SAS_MAX_DEVICES; + shost->max_lun = ~0; + shost->max_channel = 1; + shost->max_cmd_len = 16; + if (hisi_hba->hw->slot_index_alloc) { + shost->can_queue = HISI_SAS_MAX_COMMANDS; + shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; + } else { + shost->can_queue = HISI_SAS_UNRESERVED_IPTT; + shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; + } + + sha->sas_ha_name = DRV_NAME; + sha->dev = hisi_hba->dev; + sha->sas_addr = &hisi_hba->sas_addr[0]; + sha->num_phys = hisi_hba->n_phy; + sha->shost = hisi_hba->shost; + + for (i = 0; i < hisi_hba->n_phy; i++) { + sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; + sha->sas_port[i] = &hisi_hba->port[i].sas_port; + } + + rc = hisi_sas_interrupt_preinit(hisi_hba); + if (rc) + goto err_out_ha; + + rc = scsi_add_host(shost, &pdev->dev); + if (rc) + goto err_out_ha; + + rc = sas_register_ha(sha); + if (rc) + goto err_out_register_ha; + + rc = hisi_hba->hw->hw_init(hisi_hba); + if (rc) + goto err_out_hw_init; + + scsi_scan_host(shost); + + return 0; + +err_out_hw_init: + sas_unregister_ha(sha); +err_out_register_ha: + scsi_remove_host(shost); +err_out_ha: + hisi_sas_free(hisi_hba); + scsi_host_put(shost); + return rc; +} +EXPORT_SYMBOL_GPL(hisi_sas_probe); + +void hisi_sas_remove(struct platform_device *pdev) +{ + struct sas_ha_struct *sha = platform_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = sha->shost; + + del_timer_sync(&hisi_hba->timer); + + sas_unregister_ha(sha); + sas_remove_host(shost); + + hisi_sas_free(hisi_hba); + scsi_host_put(shost); +} +EXPORT_SYMBOL_GPL(hisi_sas_remove); + +#if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) +#define DEBUGFS_ENABLE_DEFAULT "enabled" +bool hisi_sas_debugfs_enable = true; +u32 hisi_sas_debugfs_dump_count = 50; +#else +#define DEBUGFS_ENABLE_DEFAULT "disabled" +bool hisi_sas_debugfs_enable; +u32 hisi_sas_debugfs_dump_count = 1; +#endif + +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); +module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); +MODULE_PARM_DESC(hisi_sas_debugfs_enable, + "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); + +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); +module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); +MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); + +struct dentry *hisi_sas_debugfs_dir; +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); + +static __init int hisi_sas_init(void) +{ + hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); + if (!hisi_sas_stt) + return -ENOMEM; + + if (hisi_sas_debugfs_enable) { + hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); + if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { + pr_info("hisi_sas: Limiting debugfs dump count\n"); + hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; + } + } + + return 0; +} + +static __exit void hisi_sas_exit(void) +{ + sas_release_transport(hisi_sas_stt); + + debugfs_remove(hisi_sas_debugfs_dir); +} + +module_init(hisi_sas_init); +module_exit(hisi_sas_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry "); +MODULE_DESCRIPTION("HISILICON SAS controller driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c new file mode 100644 index 000000000..3c555579f --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -0,0 +1,1816 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2015 Linaro Ltd. + * Copyright (c) 2015 Hisilicon Limited. + */ + +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas_v1_hw" + +/* global registers need init*/ +#define DLVRY_QUEUE_ENABLE 0x0 +#define IOST_BASE_ADDR_LO 0x8 +#define IOST_BASE_ADDR_HI 0xc +#define ITCT_BASE_ADDR_LO 0x10 +#define ITCT_BASE_ADDR_HI 0x14 +#define BROKEN_MSG_ADDR_LO 0x18 +#define BROKEN_MSG_ADDR_HI 0x1c +#define PHY_CONTEXT 0x20 +#define PHY_STATE 0x24 +#define PHY_PORT_NUM_MA 0x28 +#define PORT_STATE 0x2c +#define PHY_CONN_RATE 0x30 +#define HGC_TRANS_TASK_CNT_LIMIT 0x38 +#define AXI_AHB_CLK_CFG 0x3c +#define HGC_SAS_TXFAIL_RETRY_CTRL 0x84 +#define HGC_GET_ITV_TIME 0x90 +#define DEVICE_MSG_WORK_MODE 0x94 +#define I_T_NEXUS_LOSS_TIME 0xa0 +#define BUS_INACTIVE_LIMIT_TIME 0xa8 +#define REJECT_TO_OPEN_LIMIT_TIME 0xac +#define CFG_AGING_TIME 0xbc +#define CFG_AGING_TIME_ITCT_REL_OFF 0 +#define CFG_AGING_TIME_ITCT_REL_MSK (0x1 << CFG_AGING_TIME_ITCT_REL_OFF) +#define HGC_DFX_CFG2 0xc0 +#define FIS_LIST_BADDR_L 0xc4 +#define CFG_1US_TIMER_TRSH 0xcc +#define CFG_SAS_CONFIG 0xd4 +#define HGC_IOST_ECC_ADDR 0x140 +#define HGC_IOST_ECC_ADDR_BAD_OFF 16 +#define HGC_IOST_ECC_ADDR_BAD_MSK (0x3ff << HGC_IOST_ECC_ADDR_BAD_OFF) +#define HGC_DQ_ECC_ADDR 0x144 +#define HGC_DQ_ECC_ADDR_BAD_OFF 16 +#define HGC_DQ_ECC_ADDR_BAD_MSK (0xfff << HGC_DQ_ECC_ADDR_BAD_OFF) +#define HGC_INVLD_DQE_INFO 0x148 +#define HGC_INVLD_DQE_INFO_DQ_OFF 0 +#define HGC_INVLD_DQE_INFO_DQ_MSK (0xffff << HGC_INVLD_DQE_INFO_DQ_OFF) +#define HGC_INVLD_DQE_INFO_TYPE_OFF 16 +#define HGC_INVLD_DQE_INFO_TYPE_MSK (0x1 << HGC_INVLD_DQE_INFO_TYPE_OFF) +#define HGC_INVLD_DQE_INFO_FORCE_OFF 17 +#define HGC_INVLD_DQE_INFO_FORCE_MSK (0x1 << HGC_INVLD_DQE_INFO_FORCE_OFF) +#define HGC_INVLD_DQE_INFO_PHY_OFF 18 +#define HGC_INVLD_DQE_INFO_PHY_MSK (0x1 << HGC_INVLD_DQE_INFO_PHY_OFF) +#define HGC_INVLD_DQE_INFO_ABORT_OFF 19 +#define HGC_INVLD_DQE_INFO_ABORT_MSK (0x1 << HGC_INVLD_DQE_INFO_ABORT_OFF) +#define HGC_INVLD_DQE_INFO_IPTT_OF_OFF 20 +#define HGC_INVLD_DQE_INFO_IPTT_OF_MSK (0x1 << HGC_INVLD_DQE_INFO_IPTT_OF_OFF) +#define HGC_INVLD_DQE_INFO_SSP_ERR_OFF 21 +#define HGC_INVLD_DQE_INFO_SSP_ERR_MSK (0x1 << HGC_INVLD_DQE_INFO_SSP_ERR_OFF) +#define HGC_INVLD_DQE_INFO_OFL_OFF 22 +#define HGC_INVLD_DQE_INFO_OFL_MSK (0x1 << HGC_INVLD_DQE_INFO_OFL_OFF) +#define HGC_ITCT_ECC_ADDR 0x150 +#define HGC_ITCT_ECC_ADDR_BAD_OFF 16 +#define HGC_ITCT_ECC_ADDR_BAD_MSK (0x3ff << HGC_ITCT_ECC_ADDR_BAD_OFF) +#define HGC_AXI_FIFO_ERR_INFO 0x154 +#define INT_COAL_EN 0x1bc +#define OQ_INT_COAL_TIME 0x1c0 +#define OQ_INT_COAL_CNT 0x1c4 +#define ENT_INT_COAL_TIME 0x1c8 +#define ENT_INT_COAL_CNT 0x1cc +#define OQ_INT_SRC 0x1d0 +#define OQ_INT_SRC_MSK 0x1d4 +#define ENT_INT_SRC1 0x1d8 +#define ENT_INT_SRC2 0x1dc +#define ENT_INT_SRC2_DQ_CFG_ERR_OFF 25 +#define ENT_INT_SRC2_DQ_CFG_ERR_MSK (0x1 << ENT_INT_SRC2_DQ_CFG_ERR_OFF) +#define ENT_INT_SRC2_CQ_CFG_ERR_OFF 27 +#define ENT_INT_SRC2_CQ_CFG_ERR_MSK (0x1 << ENT_INT_SRC2_CQ_CFG_ERR_OFF) +#define ENT_INT_SRC2_AXI_WRONG_INT_OFF 28 +#define ENT_INT_SRC2_AXI_WRONG_INT_MSK (0x1 << ENT_INT_SRC2_AXI_WRONG_INT_OFF) +#define ENT_INT_SRC2_AXI_OVERLF_INT_OFF 29 +#define ENT_INT_SRC2_AXI_OVERLF_INT_MSK (0x1 << ENT_INT_SRC2_AXI_OVERLF_INT_OFF) +#define ENT_INT_SRC_MSK1 0x1e0 +#define ENT_INT_SRC_MSK2 0x1e4 +#define SAS_ECC_INTR 0x1e8 +#define SAS_ECC_INTR_DQ_ECC1B_OFF 0 +#define SAS_ECC_INTR_DQ_ECC1B_MSK (0x1 << SAS_ECC_INTR_DQ_ECC1B_OFF) +#define SAS_ECC_INTR_DQ_ECCBAD_OFF 1 +#define SAS_ECC_INTR_DQ_ECCBAD_MSK (0x1 << SAS_ECC_INTR_DQ_ECCBAD_OFF) +#define SAS_ECC_INTR_IOST_ECC1B_OFF 2 +#define SAS_ECC_INTR_IOST_ECC1B_MSK (0x1 << SAS_ECC_INTR_IOST_ECC1B_OFF) +#define SAS_ECC_INTR_IOST_ECCBAD_OFF 3 +#define SAS_ECC_INTR_IOST_ECCBAD_MSK (0x1 << SAS_ECC_INTR_IOST_ECCBAD_OFF) +#define SAS_ECC_INTR_ITCT_ECC1B_OFF 4 +#define SAS_ECC_INTR_ITCT_ECC1B_MSK (0x1 << SAS_ECC_INTR_ITCT_ECC1B_OFF) +#define SAS_ECC_INTR_ITCT_ECCBAD_OFF 5 +#define SAS_ECC_INTR_ITCT_ECCBAD_MSK (0x1 << SAS_ECC_INTR_ITCT_ECCBAD_OFF) +#define SAS_ECC_INTR_MSK 0x1ec +#define HGC_ERR_STAT_EN 0x238 +#define DLVRY_Q_0_BASE_ADDR_LO 0x260 +#define DLVRY_Q_0_BASE_ADDR_HI 0x264 +#define DLVRY_Q_0_DEPTH 0x268 +#define DLVRY_Q_0_WR_PTR 0x26c +#define DLVRY_Q_0_RD_PTR 0x270 +#define COMPL_Q_0_BASE_ADDR_LO 0x4e0 +#define COMPL_Q_0_BASE_ADDR_HI 0x4e4 +#define COMPL_Q_0_DEPTH 0x4e8 +#define COMPL_Q_0_WR_PTR 0x4ec +#define COMPL_Q_0_RD_PTR 0x4f0 +#define HGC_ECC_ERR 0x7d0 + +/* phy registers need init */ +#define PORT_BASE (0x800) + +#define PHY_CFG (PORT_BASE + 0x0) +#define PHY_CFG_ENA_OFF 0 +#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) +#define PHY_CFG_DC_OPT_OFF 2 +#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) +#define PROG_PHY_LINK_RATE (PORT_BASE + 0xc) +#define PROG_PHY_LINK_RATE_MAX_OFF 0 +#define PROG_PHY_LINK_RATE_MAX_MSK (0xf << PROG_PHY_LINK_RATE_MAX_OFF) +#define PROG_PHY_LINK_RATE_MIN_OFF 4 +#define PROG_PHY_LINK_RATE_MIN_MSK (0xf << PROG_PHY_LINK_RATE_MIN_OFF) +#define PROG_PHY_LINK_RATE_OOB_OFF 8 +#define PROG_PHY_LINK_RATE_OOB_MSK (0xf << PROG_PHY_LINK_RATE_OOB_OFF) +#define PHY_CTRL (PORT_BASE + 0x14) +#define PHY_CTRL_RESET_OFF 0 +#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) +#define PHY_RATE_NEGO (PORT_BASE + 0x30) +#define PHY_PCN (PORT_BASE + 0x44) +#define SL_TOUT_CFG (PORT_BASE + 0x8c) +#define SL_CONTROL (PORT_BASE + 0x94) +#define SL_CONTROL_NOTIFY_EN_OFF 0 +#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) +#define TX_ID_DWORD0 (PORT_BASE + 0x9c) +#define TX_ID_DWORD1 (PORT_BASE + 0xa0) +#define TX_ID_DWORD2 (PORT_BASE + 0xa4) +#define TX_ID_DWORD3 (PORT_BASE + 0xa8) +#define TX_ID_DWORD4 (PORT_BASE + 0xaC) +#define TX_ID_DWORD5 (PORT_BASE + 0xb0) +#define TX_ID_DWORD6 (PORT_BASE + 0xb4) +#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) +#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) +#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) +#define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) +#define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) +#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) +#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) +#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define DONE_RECEIVED_TIME (PORT_BASE + 0x12c) +#define CON_CFG_DRIVER (PORT_BASE + 0x130) +#define PHY_CONFIG2 (PORT_BASE + 0x1a8) +#define PHY_CONFIG2_FORCE_TXDEEMPH_OFF 3 +#define PHY_CONFIG2_FORCE_TXDEEMPH_MSK (0x1 << PHY_CONFIG2_FORCE_TXDEEMPH_OFF) +#define PHY_CONFIG2_TX_TRAIN_COMP_OFF 24 +#define PHY_CONFIG2_TX_TRAIN_COMP_MSK (0x1 << PHY_CONFIG2_TX_TRAIN_COMP_OFF) +#define CHL_INT0 (PORT_BASE + 0x1b0) +#define CHL_INT0_PHYCTRL_NOTRDY_OFF 0 +#define CHL_INT0_PHYCTRL_NOTRDY_MSK (0x1 << CHL_INT0_PHYCTRL_NOTRDY_OFF) +#define CHL_INT0_SN_FAIL_NGR_OFF 2 +#define CHL_INT0_SN_FAIL_NGR_MSK (0x1 << CHL_INT0_SN_FAIL_NGR_OFF) +#define CHL_INT0_DWS_LOST_OFF 4 +#define CHL_INT0_DWS_LOST_MSK (0x1 << CHL_INT0_DWS_LOST_OFF) +#define CHL_INT0_SL_IDAF_FAIL_OFF 10 +#define CHL_INT0_SL_IDAF_FAIL_MSK (0x1 << CHL_INT0_SL_IDAF_FAIL_OFF) +#define CHL_INT0_ID_TIMEOUT_OFF 11 +#define CHL_INT0_ID_TIMEOUT_MSK (0x1 << CHL_INT0_ID_TIMEOUT_OFF) +#define CHL_INT0_SL_OPAF_FAIL_OFF 12 +#define CHL_INT0_SL_OPAF_FAIL_MSK (0x1 << CHL_INT0_SL_OPAF_FAIL_OFF) +#define CHL_INT0_SL_PS_FAIL_OFF 21 +#define CHL_INT0_SL_PS_FAIL_MSK (0x1 << CHL_INT0_SL_PS_FAIL_OFF) +#define CHL_INT1 (PORT_BASE + 0x1b4) +#define CHL_INT2 (PORT_BASE + 0x1b8) +#define CHL_INT2_SL_RX_BC_ACK_OFF 2 +#define CHL_INT2_SL_RX_BC_ACK_MSK (0x1 << CHL_INT2_SL_RX_BC_ACK_OFF) +#define CHL_INT2_SL_PHY_ENA_OFF 6 +#define CHL_INT2_SL_PHY_ENA_MSK (0x1 << CHL_INT2_SL_PHY_ENA_OFF) +#define CHL_INT0_MSK (PORT_BASE + 0x1bc) +#define CHL_INT0_MSK_PHYCTRL_NOTRDY_OFF 0 +#define CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK (0x1 << CHL_INT0_MSK_PHYCTRL_NOTRDY_OFF) +#define CHL_INT1_MSK (PORT_BASE + 0x1c0) +#define CHL_INT2_MSK (PORT_BASE + 0x1c4) +#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define DMA_TX_STATUS (PORT_BASE + 0x2d0) +#define DMA_TX_STATUS_BUSY_OFF 0 +#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) +#define DMA_RX_STATUS (PORT_BASE + 0x2e8) +#define DMA_RX_STATUS_BUSY_OFF 0 +#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) + +#define AXI_CFG 0x5100 +#define RESET_VALUE 0x7ffff + +/* HW dma structures */ +/* Delivery queue header */ +/* dw0 */ +#define CMD_HDR_RESP_REPORT_OFF 5 +#define CMD_HDR_RESP_REPORT_MSK 0x20 +#define CMD_HDR_TLR_CTRL_OFF 6 +#define CMD_HDR_TLR_CTRL_MSK 0xc0 +#define CMD_HDR_PORT_OFF 17 +#define CMD_HDR_PORT_MSK 0xe0000 +#define CMD_HDR_PRIORITY_OFF 27 +#define CMD_HDR_PRIORITY_MSK 0x8000000 +#define CMD_HDR_MODE_OFF 28 +#define CMD_HDR_MODE_MSK 0x10000000 +#define CMD_HDR_CMD_OFF 29 +#define CMD_HDR_CMD_MSK 0xe0000000 +/* dw1 */ +#define CMD_HDR_VERIFY_DTL_OFF 10 +#define CMD_HDR_VERIFY_DTL_MSK 0x400 +#define CMD_HDR_SSP_FRAME_TYPE_OFF 13 +#define CMD_HDR_SSP_FRAME_TYPE_MSK 0xe000 +#define CMD_HDR_DEVICE_ID_OFF 16 +#define CMD_HDR_DEVICE_ID_MSK 0xffff0000 +/* dw2 */ +#define CMD_HDR_CFL_OFF 0 +#define CMD_HDR_CFL_MSK 0x1ff +#define CMD_HDR_MRFL_OFF 15 +#define CMD_HDR_MRFL_MSK 0xff8000 +#define CMD_HDR_FIRST_BURST_OFF 25 +#define CMD_HDR_FIRST_BURST_MSK 0x2000000 +/* dw3 */ +#define CMD_HDR_IPTT_OFF 0 +#define CMD_HDR_IPTT_MSK 0xffff +/* dw6 */ +#define CMD_HDR_DATA_SGL_LEN_OFF 16 +#define CMD_HDR_DATA_SGL_LEN_MSK 0xffff0000 + +/* Completion header */ +#define CMPLT_HDR_IPTT_OFF 0 +#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) +#define CMPLT_HDR_CMD_CMPLT_OFF 17 +#define CMPLT_HDR_CMD_CMPLT_MSK (0x1 << CMPLT_HDR_CMD_CMPLT_OFF) +#define CMPLT_HDR_ERR_RCRD_XFRD_OFF 18 +#define CMPLT_HDR_ERR_RCRD_XFRD_MSK (0x1 << CMPLT_HDR_ERR_RCRD_XFRD_OFF) +#define CMPLT_HDR_RSPNS_XFRD_OFF 19 +#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) +#define CMPLT_HDR_IO_CFG_ERR_OFF 27 +#define CMPLT_HDR_IO_CFG_ERR_MSK (0x1 << CMPLT_HDR_IO_CFG_ERR_OFF) + +/* ITCT header */ +/* qw0 */ +#define ITCT_HDR_DEV_TYPE_OFF 0 +#define ITCT_HDR_DEV_TYPE_MSK (0x3ULL << ITCT_HDR_DEV_TYPE_OFF) +#define ITCT_HDR_VALID_OFF 2 +#define ITCT_HDR_VALID_MSK (0x1ULL << ITCT_HDR_VALID_OFF) +#define ITCT_HDR_AWT_CONTROL_OFF 4 +#define ITCT_HDR_AWT_CONTROL_MSK (0x1ULL << ITCT_HDR_AWT_CONTROL_OFF) +#define ITCT_HDR_MAX_CONN_RATE_OFF 5 +#define ITCT_HDR_MAX_CONN_RATE_MSK (0xfULL << ITCT_HDR_MAX_CONN_RATE_OFF) +#define ITCT_HDR_VALID_LINK_NUM_OFF 9 +#define ITCT_HDR_VALID_LINK_NUM_MSK (0xfULL << ITCT_HDR_VALID_LINK_NUM_OFF) +#define ITCT_HDR_PORT_ID_OFF 13 +#define ITCT_HDR_PORT_ID_MSK (0x7ULL << ITCT_HDR_PORT_ID_OFF) +#define ITCT_HDR_SMP_TIMEOUT_OFF 16 +#define ITCT_HDR_SMP_TIMEOUT_MSK (0xffffULL << ITCT_HDR_SMP_TIMEOUT_OFF) +/* qw1 */ +#define ITCT_HDR_MAX_SAS_ADDR_OFF 0 +#define ITCT_HDR_MAX_SAS_ADDR_MSK (0xffffffffffffffff << \ + ITCT_HDR_MAX_SAS_ADDR_OFF) +/* qw2 */ +#define ITCT_HDR_IT_NEXUS_LOSS_TL_OFF 0 +#define ITCT_HDR_IT_NEXUS_LOSS_TL_MSK (0xffffULL << \ + ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) +#define ITCT_HDR_BUS_INACTIVE_TL_OFF 16 +#define ITCT_HDR_BUS_INACTIVE_TL_MSK (0xffffULL << \ + ITCT_HDR_BUS_INACTIVE_TL_OFF) +#define ITCT_HDR_MAX_CONN_TL_OFF 32 +#define ITCT_HDR_MAX_CONN_TL_MSK (0xffffULL << \ + ITCT_HDR_MAX_CONN_TL_OFF) +#define ITCT_HDR_REJ_OPEN_TL_OFF 48 +#define ITCT_HDR_REJ_OPEN_TL_MSK (0xffffULL << \ + ITCT_HDR_REJ_OPEN_TL_OFF) + +/* Err record header */ +#define ERR_HDR_DMA_TX_ERR_TYPE_OFF 0 +#define ERR_HDR_DMA_TX_ERR_TYPE_MSK (0xffff << ERR_HDR_DMA_TX_ERR_TYPE_OFF) +#define ERR_HDR_DMA_RX_ERR_TYPE_OFF 16 +#define ERR_HDR_DMA_RX_ERR_TYPE_MSK (0xffff << ERR_HDR_DMA_RX_ERR_TYPE_OFF) + +struct hisi_sas_complete_v1_hdr { + __le32 data; +}; + +struct hisi_sas_err_record_v1 { + /* dw0 */ + __le32 dma_err_type; + + /* dw1 */ + __le32 trans_tx_fail_type; + + /* dw2 */ + __le32 trans_rx_fail_type; + + /* dw3 */ + u32 rsvd; +}; + +enum { + HISI_SAS_PHY_BCAST_ACK = 0, + HISI_SAS_PHY_SL_PHY_ENABLED, + HISI_SAS_PHY_INT_ABNORMAL, + HISI_SAS_PHY_INT_NR +}; + +enum { + DMA_TX_ERR_BASE = 0x0, + DMA_RX_ERR_BASE = 0x100, + TRANS_TX_FAIL_BASE = 0x200, + TRANS_RX_FAIL_BASE = 0x300, + + /* dma tx */ + DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x0 */ + DMA_TX_DIF_APP_ERR, /* 0x1 */ + DMA_TX_DIF_RPP_ERR, /* 0x2 */ + DMA_TX_AXI_BUS_ERR, /* 0x3 */ + DMA_TX_DATA_SGL_OVERFLOW_ERR, /* 0x4 */ + DMA_TX_DIF_SGL_OVERFLOW_ERR, /* 0x5 */ + DMA_TX_UNEXP_XFER_RDY_ERR, /* 0x6 */ + DMA_TX_XFER_RDY_OFFSET_ERR, /* 0x7 */ + DMA_TX_DATA_UNDERFLOW_ERR, /* 0x8 */ + DMA_TX_XFER_RDY_LENGTH_OVERFLOW_ERR, /* 0x9 */ + + /* dma rx */ + DMA_RX_BUFFER_ECC_ERR = DMA_RX_ERR_BASE, /* 0x100 */ + DMA_RX_DIF_CRC_ERR, /* 0x101 */ + DMA_RX_DIF_APP_ERR, /* 0x102 */ + DMA_RX_DIF_RPP_ERR, /* 0x103 */ + DMA_RX_RESP_BUFFER_OVERFLOW_ERR, /* 0x104 */ + DMA_RX_AXI_BUS_ERR, /* 0x105 */ + DMA_RX_DATA_SGL_OVERFLOW_ERR, /* 0x106 */ + DMA_RX_DIF_SGL_OVERFLOW_ERR, /* 0x107 */ + DMA_RX_DATA_OFFSET_ERR, /* 0x108 */ + DMA_RX_UNEXP_RX_DATA_ERR, /* 0x109 */ + DMA_RX_DATA_OVERFLOW_ERR, /* 0x10a */ + DMA_RX_DATA_UNDERFLOW_ERR, /* 0x10b */ + DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x10c */ + + /* trans tx */ + TRANS_TX_RSVD0_ERR = TRANS_TX_FAIL_BASE, /* 0x200 */ + TRANS_TX_PHY_NOT_ENABLE_ERR, /* 0x201 */ + TRANS_TX_OPEN_REJCT_WRONG_DEST_ERR, /* 0x202 */ + TRANS_TX_OPEN_REJCT_ZONE_VIOLATION_ERR, /* 0x203 */ + TRANS_TX_OPEN_REJCT_BY_OTHER_ERR, /* 0x204 */ + TRANS_TX_RSVD1_ERR, /* 0x205 */ + TRANS_TX_OPEN_REJCT_AIP_TIMEOUT_ERR, /* 0x206 */ + TRANS_TX_OPEN_REJCT_STP_BUSY_ERR, /* 0x207 */ + TRANS_TX_OPEN_REJCT_PROTOCOL_NOT_SUPPORT_ERR, /* 0x208 */ + TRANS_TX_OPEN_REJCT_RATE_NOT_SUPPORT_ERR, /* 0x209 */ + TRANS_TX_OPEN_REJCT_BAD_DEST_ERR, /* 0x20a */ + TRANS_TX_OPEN_BREAK_RECEIVE_ERR, /* 0x20b */ + TRANS_TX_LOW_PHY_POWER_ERR, /* 0x20c */ + TRANS_TX_OPEN_REJCT_PATHWAY_BLOCKED_ERR, /* 0x20d */ + TRANS_TX_OPEN_TIMEOUT_ERR, /* 0x20e */ + TRANS_TX_OPEN_REJCT_NO_DEST_ERR, /* 0x20f */ + TRANS_TX_OPEN_RETRY_ERR, /* 0x210 */ + TRANS_TX_RSVD2_ERR, /* 0x211 */ + TRANS_TX_BREAK_TIMEOUT_ERR, /* 0x212 */ + TRANS_TX_BREAK_REQUEST_ERR, /* 0x213 */ + TRANS_TX_BREAK_RECEIVE_ERR, /* 0x214 */ + TRANS_TX_CLOSE_TIMEOUT_ERR, /* 0x215 */ + TRANS_TX_CLOSE_NORMAL_ERR, /* 0x216 */ + TRANS_TX_CLOSE_PHYRESET_ERR, /* 0x217 */ + TRANS_TX_WITH_CLOSE_DWS_TIMEOUT_ERR, /* 0x218 */ + TRANS_TX_WITH_CLOSE_COMINIT_ERR, /* 0x219 */ + TRANS_TX_NAK_RECEIVE_ERR, /* 0x21a */ + TRANS_TX_ACK_NAK_TIMEOUT_ERR, /* 0x21b */ + TRANS_TX_CREDIT_TIMEOUT_ERR, /* 0x21c */ + TRANS_TX_IPTT_CONFLICT_ERR, /* 0x21d */ + TRANS_TX_TXFRM_TYPE_ERR, /* 0x21e */ + TRANS_TX_TXSMP_LENGTH_ERR, /* 0x21f */ + + /* trans rx */ + TRANS_RX_FRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x300 */ + TRANS_RX_FRAME_DONE_ERR, /* 0x301 */ + TRANS_RX_FRAME_ERRPRM_ERR, /* 0x302 */ + TRANS_RX_FRAME_NO_CREDIT_ERR, /* 0x303 */ + TRANS_RX_RSVD0_ERR, /* 0x304 */ + TRANS_RX_FRAME_OVERRUN_ERR, /* 0x305 */ + TRANS_RX_FRAME_NO_EOF_ERR, /* 0x306 */ + TRANS_RX_LINK_BUF_OVERRUN_ERR, /* 0x307 */ + TRANS_RX_BREAK_TIMEOUT_ERR, /* 0x308 */ + TRANS_RX_BREAK_REQUEST_ERR, /* 0x309 */ + TRANS_RX_BREAK_RECEIVE_ERR, /* 0x30a */ + TRANS_RX_CLOSE_TIMEOUT_ERR, /* 0x30b */ + TRANS_RX_CLOSE_NORMAL_ERR, /* 0x30c */ + TRANS_RX_CLOSE_PHYRESET_ERR, /* 0x30d */ + TRANS_RX_WITH_CLOSE_DWS_TIMEOUT_ERR, /* 0x30e */ + TRANS_RX_WITH_CLOSE_COMINIT_ERR, /* 0x30f */ + TRANS_RX_DATA_LENGTH0_ERR, /* 0x310 */ + TRANS_RX_BAD_HASH_ERR, /* 0x311 */ + TRANS_RX_XRDY_ZERO_ERR, /* 0x312 */ + TRANS_RX_SSP_FRAME_LEN_ERR, /* 0x313 */ + TRANS_RX_TRANS_RX_RSVD1_ERR, /* 0x314 */ + TRANS_RX_NO_BALANCE_ERR, /* 0x315 */ + TRANS_RX_TRANS_RX_RSVD2_ERR, /* 0x316 */ + TRANS_RX_TRANS_RX_RSVD3_ERR, /* 0x317 */ + TRANS_RX_BAD_FRAME_TYPE_ERR, /* 0x318 */ + TRANS_RX_SMP_FRAME_LEN_ERR, /* 0x319 */ + TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */ +}; + +#define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS) +#define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES) +#define HISI_SAS_FATAL_INT_NR (2) + +#define HISI_SAS_MAX_INT_NR \ + (HISI_SAS_PHY_MAX_INT_NR + HISI_SAS_CQ_MAX_INT_NR +\ + HISI_SAS_FATAL_INT_NR) + +static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl(regs); +} + +static void hisi_sas_write32(struct hisi_hba *hisi_hba, + u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + off; + + writel(val, regs); +} + +static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, + int phy_no, u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + writel(val, regs); +} + +static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, + int phy_no, u32 off) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + return readl(regs); +} + +static void config_phy_opt_mode_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_DC_OPT_MSK; + cfg |= 1 << PHY_CFG_DC_OPT_OFF; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void config_tx_tfe_autoneg_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CONFIG2); + + cfg &= ~PHY_CONFIG2_FORCE_TXDEEMPH_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CONFIG2, cfg); +} + +static void config_id_frame_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct sas_identify_frame identify_frame; + u32 *identify_buffer; + + memset(&identify_frame, 0, sizeof(identify_frame)); + identify_frame.dev_type = SAS_END_DEVICE; + identify_frame.frame_type = 0; + identify_frame._un1 = 1; + identify_frame.initiator_bits = SAS_PROTOCOL_ALL; + identify_frame.target_bits = SAS_PROTOCOL_NONE; + memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + identify_frame.phy_id = phy_no; + identify_buffer = (u32 *)(&identify_frame); + + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, + __swab32(identify_buffer[0])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, + __swab32(identify_buffer[1])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, + __swab32(identify_buffer[2])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, + __swab32(identify_buffer[3])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, + __swab32(identify_buffer[4])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, + __swab32(identify_buffer[5])); +} + +static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + struct domain_device *device = sas_dev->sas_device; + struct device *dev = hisi_hba->dev; + u64 qw0, device_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + u64 sas_addr; + + memset(itct, 0, sizeof(*itct)); + + /* qw0 */ + qw0 = 0; + switch (sas_dev->dev_type) { + case SAS_END_DEVICE: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; + break; + default: + dev_warn(dev, "setup itct: unsupported dev type (%d)\n", + sas_dev->dev_type); + } + + qw0 |= ((1 << ITCT_HDR_VALID_OFF) | + (1 << ITCT_HDR_AWT_CONTROL_OFF) | + (device->max_linkrate << ITCT_HDR_MAX_CONN_RATE_OFF) | + (1 << ITCT_HDR_VALID_LINK_NUM_OFF) | + (port->id << ITCT_HDR_PORT_ID_OFF)); + itct->qw0 = cpu_to_le64(qw0); + + /* qw1 */ + memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); + itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); + + /* qw2 */ + itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) | + (0xff00ULL << ITCT_HDR_BUS_INACTIVE_TL_OFF) | + (0xff00ULL << ITCT_HDR_MAX_CONN_TL_OFF) | + (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); +} + +static int clear_itct_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + u64 dev_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; + u64 qw0; + u32 reg_val = hisi_sas_read32(hisi_hba, CFG_AGING_TIME); + + reg_val |= CFG_AGING_TIME_ITCT_REL_MSK; + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val); + + /* free itct */ + udelay(1); + reg_val = hisi_sas_read32(hisi_hba, CFG_AGING_TIME); + reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK; + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val); + + qw0 = le64_to_cpu(itct->qw0); + qw0 &= ~ITCT_HDR_VALID_MSK; + itct->qw0 = cpu_to_le64(qw0); + + return 0; +} + +static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + unsigned long end_time; + u32 val; + struct device *dev = hisi_hba->dev; + + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 phy_ctrl = hisi_sas_phy_read32(hisi_hba, i, PHY_CTRL); + + phy_ctrl |= PHY_CTRL_RESET_MSK; + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, phy_ctrl); + } + msleep(1); /* It is safe to wait for 50us */ + + /* Ensure DMA tx & rx idle */ + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 dma_tx_status, dma_rx_status; + + end_time = jiffies + msecs_to_jiffies(1000); + + while (1) { + dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_TX_STATUS); + dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_RX_STATUS); + + if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && + !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + } + + /* Ensure axi bus idle */ + end_time = jiffies + msecs_to_jiffies(1000); + while (1) { + u32 axi_status = + hisi_sas_read32(hisi_hba, AXI_CFG); + + if (axi_status == 0) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + + if (ACPI_HANDLE(dev)) { + acpi_status s; + + s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + } else if (hisi_hba->ctrl) { + /* Apply reset and disable clock */ + /* clk disable reg is offset by +4 bytes from clk enable reg */ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, + RESET_VALUE); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, + RESET_VALUE); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (RESET_VALUE != (val & RESET_VALUE)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + + /* De-reset and enable clock */ + /* deassert rst reg is offset by +4 bytes from assert reg */ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, + RESET_VALUE); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, + RESET_VALUE); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (val & RESET_VALUE) { + dev_err(dev, "De-reset failed\n"); + return -EIO; + } + } else { + dev_warn(dev, "no reset method\n"); + return -EINVAL; + } + + return 0; +} + +static void init_reg_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + + /* Global registers init*/ + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + hisi_sas_write32(hisi_hba, HGC_TRANS_TASK_CNT_LIMIT, 0x11); + hisi_sas_write32(hisi_hba, DEVICE_MSG_WORK_MODE, 0x1); + hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x1ff); + hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x401); + hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0x64); + hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); + hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x64); + hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x2710); + hisi_sas_write32(hisi_hba, REJECT_TO_OPEN_LIMIT_TIME, 0x1); + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x7a12); + hisi_sas_write32(hisi_hba, HGC_DFX_CFG2, 0x9c40); + hisi_sas_write32(hisi_hba, FIS_LIST_BADDR_L, 0x2); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x186a0); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 1); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffffffff); + hisi_sas_write32(hisi_hba, OQ_INT_SRC_MSK, 0); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0); + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0); + hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 0x2); + hisi_sas_write32(hisi_hba, CFG_SAS_CONFIG, 0x22000000); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x88a); + hisi_sas_phy_write32(hisi_hba, i, PHY_CONFIG2, 0x7c080); + hisi_sas_phy_write32(hisi_hba, i, PHY_RATE_NEGO, 0x415ee00); + hisi_sas_phy_write32(hisi_hba, i, PHY_PCN, 0x80a80000); + hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x0); + hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0); + hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x13f0a); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 3); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 8); + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + /* Delivery queue */ + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + + /* Completion queue */ + hisi_sas_write32(hisi_hba, + COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, + COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + } + + /* itct */ + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, + lower_32_bits(hisi_hba->itct_dma)); + + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, + upper_32_bits(hisi_hba->itct_dma)); + + /* iost */ + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, + lower_32_bits(hisi_hba->iost_dma)); + + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, + upper_32_bits(hisi_hba->iost_dma)); + + /* breakpoint */ + hisi_sas_write32(hisi_hba, BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->breakpoint_dma)); + + hisi_sas_write32(hisi_hba, BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->breakpoint_dma)); +} + +static int hw_init_v1_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int rc; + + rc = reset_hw_v1_hw(hisi_hba); + if (rc) { + dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); + return rc; + } + + msleep(100); + init_reg_v1_hw(hisi_hba); + + return 0; +} + +static void enable_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg |= PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void disable_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void start_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + config_id_frame_v1_hw(hisi_hba, phy_no); + config_phy_opt_mode_v1_hw(hisi_hba, phy_no); + config_tx_tfe_autoneg_v1_hw(hisi_hba, phy_no); + enable_phy_v1_hw(hisi_hba, phy_no); +} + +static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + msleep(100); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); +} + +static void start_phys_v1_hw(struct timer_list *t) +{ + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x12a); + hisi_sas_phy_enable(hisi_hba, i, 1); + } +} + +static void phys_init_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + struct timer_list *timer = &hisi_hba->timer; + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a); + hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK); + } + + timer_setup(timer, start_phys_v1_hw, 0); + mod_timer(timer, jiffies + HZ); +} + +static void sl_notify_ssp_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 sl_control; + + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control |= SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); + msleep(1); + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); +} + +static enum sas_linkrate phy_get_max_linkrate_v1_hw(void) +{ + return SAS_LINK_RATE_6_0_GBPS; +} + +static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + enum sas_linkrate max = r->maximum_linkrate; + u32 prog_phy_link_rate = 0x800; + + prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, + prog_phy_link_rate); +} + +static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) +{ + int i, bitmap = 0; + u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + + for (i = 0; i < hisi_hba->n_phy; i++) + if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + bitmap |= 1 << i; + + return bitmap; +} + +/* DQ lock must be taken here */ +static void start_delivery_v1_hw(struct hisi_sas_dq *dq) +{ + struct hisi_hba *hisi_hba = dq->hisi_hba; + struct hisi_sas_slot *s, *s1, *s2 = NULL; + int dlvry_queue = dq->id; + int wp; + + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; + s2 = s; + list_del(&s->delivery); + } + + if (!s2) + return; + + /* + * Ensure that memories for slots built on other CPUs is observed. + */ + smp_rmb(); + wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); +} + +static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); + struct scatterlist *sg; + int i; + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &sge_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); + + hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); +} + +static void prep_smp_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_port *port = slot->port; + struct scatterlist *sg_req; + struct hisi_sas_device *sas_dev = device->lldd_dev; + dma_addr_t req_dma_addr; + unsigned int req_len; + + /* req */ + sg_req = &task->smp_task.smp_req; + req_len = sg_dma_len(sg_req); + req_dma_addr = sg_dma_address(sg_req); + + /* create header */ + /* dw0 */ + hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | + (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ + (1 << CMD_HDR_MODE_OFF) | /* ini mode */ + (2 << CMD_HDR_CMD_OFF)); /* smp */ + + /* map itct entry */ + hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF); + + /* dw2 */ + hdr->dw2 = cpu_to_le32((((req_len-4)/4) << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ/4 << + CMD_HDR_MRFL_OFF)); + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); +} + +static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port = slot->port; + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + struct sas_tmf_task *tmf = slot->tmf; + int has_data = 0, priority = !!tmf; + u8 *buf_cmd; + u32 dw1, dw2; + + /* create header */ + hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | + (0x2 << CMD_HDR_TLR_CTRL_OFF) | + (port->id << CMD_HDR_PORT_OFF) | + (priority << CMD_HDR_PRIORITY_OFF) | + (1 << CMD_HDR_MODE_OFF) | /* ini mode */ + (1 << CMD_HDR_CMD_OFF)); /* ssp */ + + dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF; + + if (tmf) { + dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF; + } else { + switch (scsi_cmnd->sc_data_direction) { + case DMA_TO_DEVICE: + dw1 |= 2 << CMD_HDR_SSP_FRAME_TYPE_OFF; + has_data = 1; + break; + case DMA_FROM_DEVICE: + dw1 |= 1 << CMD_HDR_SSP_FRAME_TYPE_OFF; + has_data = 1; + break; + default: + dw1 |= 0 << CMD_HDR_SSP_FRAME_TYPE_OFF; + } + } + + /* map itct entry */ + dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF; + hdr->dw1 = cpu_to_le32(dw1); + + if (tmf) { + dw2 = ((sizeof(struct ssp_tmf_iu) + + sizeof(struct ssp_frame_hdr)+3)/4) << + CMD_HDR_CFL_OFF; + } else { + dw2 = ((sizeof(struct ssp_command_iu) + + sizeof(struct ssp_frame_hdr)+3)/4) << + CMD_HDR_CFL_OFF; + } + + dw2 |= (HISI_SAS_MAX_SSP_RESP_SZ/4) << CMD_HDR_MRFL_OFF; + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + if (has_data) + prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); + + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr); + hdr->dw2 = cpu_to_le32(dw2); + + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + if (!tmf) { + buf_cmd[9] = task->ssp_task.task_attr; + memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + } else { + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } +} + +/* by default, task resp is complete */ +static void slot_err_v1_hw(struct hisi_hba *hisi_hba, + struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct hisi_sas_err_record_v1 *err_record = + hisi_sas_status_buf_addr_mem(slot); + struct device *dev = hisi_hba->dev; + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + int error = -1; + u32 dma_err_type = le32_to_cpu(err_record->dma_err_type); + u32 dma_tx_err_type = ((dma_err_type & + ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >> + ERR_HDR_DMA_TX_ERR_TYPE_OFF; + u32 dma_rx_err_type = ((dma_err_type & + ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >> + ERR_HDR_DMA_RX_ERR_TYPE_OFF; + u32 trans_tx_fail_type = + le32_to_cpu(err_record->trans_tx_fail_type); + u32 trans_rx_fail_type = + le32_to_cpu(err_record->trans_rx_fail_type); + + if (dma_tx_err_type) { + /* dma tx err */ + error = ffs(dma_tx_err_type) + - 1 + DMA_TX_ERR_BASE; + } else if (dma_rx_err_type) { + /* dma rx err */ + error = ffs(dma_rx_err_type) + - 1 + DMA_RX_ERR_BASE; + } else if (trans_tx_fail_type) { + /* trans tx err */ + error = ffs(trans_tx_fail_type) + - 1 + TRANS_TX_FAIL_BASE; + } else if (trans_rx_fail_type) { + /* trans rx err */ + error = ffs(trans_rx_fail_type) + - 1 + TRANS_RX_FAIL_BASE; + } + + switch (error) { + case DMA_TX_DATA_UNDERFLOW_ERR: + case DMA_RX_DATA_UNDERFLOW_ERR: + { + ts->residual = 0; + ts->stat = SAS_DATA_UNDERRUN; + break; + } + case DMA_TX_DATA_SGL_OVERFLOW_ERR: + case DMA_TX_DIF_SGL_OVERFLOW_ERR: + case DMA_TX_XFER_RDY_LENGTH_OVERFLOW_ERR: + case DMA_RX_DATA_OVERFLOW_ERR: + case TRANS_RX_FRAME_OVERRUN_ERR: + case TRANS_RX_LINK_BUF_OVERRUN_ERR: + { + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + } + case TRANS_TX_PHY_NOT_ENABLE_ERR: + { + ts->stat = SAS_PHY_DOWN; + break; + } + case TRANS_TX_OPEN_REJCT_WRONG_DEST_ERR: + case TRANS_TX_OPEN_REJCT_ZONE_VIOLATION_ERR: + case TRANS_TX_OPEN_REJCT_BY_OTHER_ERR: + case TRANS_TX_OPEN_REJCT_AIP_TIMEOUT_ERR: + case TRANS_TX_OPEN_REJCT_STP_BUSY_ERR: + case TRANS_TX_OPEN_REJCT_PROTOCOL_NOT_SUPPORT_ERR: + case TRANS_TX_OPEN_REJCT_RATE_NOT_SUPPORT_ERR: + case TRANS_TX_OPEN_REJCT_BAD_DEST_ERR: + case TRANS_TX_OPEN_BREAK_RECEIVE_ERR: + case TRANS_TX_OPEN_REJCT_PATHWAY_BLOCKED_ERR: + case TRANS_TX_OPEN_REJCT_NO_DEST_ERR: + case TRANS_TX_OPEN_RETRY_ERR: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + } + case TRANS_TX_OPEN_TIMEOUT_ERR: + { + ts->stat = SAS_OPEN_TO; + break; + } + case TRANS_TX_NAK_RECEIVE_ERR: + case TRANS_TX_ACK_NAK_TIMEOUT_ERR: + { + ts->stat = SAS_NAK_R_ERR; + break; + } + case TRANS_TX_CREDIT_TIMEOUT_ERR: + case TRANS_TX_CLOSE_NORMAL_ERR: + { + /* This will request a retry */ + ts->stat = SAS_QUEUE_FULL; + slot->abort = 1; + break; + } + default: + { + ts->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + } + } + } + break; + case SAS_PROTOCOL_SMP: + ts->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + dev_err(dev, "slot err: SATA/STP not supported\n"); + } + break; + default: + break; + } + +} + +static void slot_complete_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_device *sas_dev; + struct device *dev = hisi_hba->dev; + struct task_status_struct *ts; + struct domain_device *device; + struct hisi_sas_complete_v1_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v1_hdr *complete_hdr; + unsigned long flags; + u32 cmplt_hdr_data; + + complete_hdr = &complete_queue[slot->cmplt_queue_slot]; + cmplt_hdr_data = le32_to_cpu(complete_hdr->data); + + if (unlikely(!task || !task->lldd_task || !task->dev)) + return; + + ts = &task->task_status; + device = task->dev; + sas_dev = device->lldd_dev; + + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + memset(ts, 0, sizeof(*ts)); + ts->resp = SAS_TASK_COMPLETE; + + if (unlikely(!sas_dev)) { + dev_dbg(dev, "slot complete: port has no device\n"); + ts->stat = SAS_PHY_DOWN; + goto out; + } + + if (cmplt_hdr_data & CMPLT_HDR_IO_CFG_ERR_MSK) { + u32 info_reg = hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO); + + if (info_reg & HGC_INVLD_DQE_INFO_DQ_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq IPTT err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_TYPE_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq type err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_FORCE_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq force phy err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_PHY_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq phy id err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_ABORT_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq abort flag err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_IPTT_OF_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_SSP_ERR_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_OFL_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq order frame len err\n", + slot->cmplt_queue, slot->cmplt_queue_slot); + + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + goto out; + } + + if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK && + !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { + + slot_err_v1_hw(hisi_hba, task, slot); + if (unlikely(slot->abort)) { + if (dev_is_sata(device) && task->ata_task.use_ncq) + sas_ata_device_link_abort(device, true); + else + sas_task_abort(task); + + return; + } + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + struct hisi_sas_status_buffer *status_buffer = + hisi_sas_status_buf_addr_mem(slot); + struct ssp_response_iu *iu = (struct ssp_response_iu *) + &status_buffer->iu[0]; + + sas_ssp_task_response(dev, task, iu); + break; + } + case SAS_PROTOCOL_SMP: + { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + void *to = page_address(sg_page(sg_resp)); + + ts->stat = SAS_SAM_STAT_GOOD; + + memcpy(to + sg_resp->offset, + hisi_sas_status_buf_addr_mem(slot) + + sizeof(struct hisi_sas_err_record), + sg_resp->length); + break; + } + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + dev_err(dev, "slot complete: SATA/STP not supported\n"); + break; + + default: + ts->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + } + + if (!slot->port->port_attached) { + dev_err(dev, "slot complete: port %d has removed\n", + slot->port->sas_port.id); + ts->stat = SAS_PHY_DOWN; + } + +out: + hisi_sas_slot_task_free(hisi_hba, task, slot, true); + + if (task->task_done) + task->task_done(task); +} + +/* Interrupts */ +static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int i, phy_no = sas_phy->id; + u32 irq_value, context, port_id, link_rate; + u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; + struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; + irqreturn_t res = IRQ_HANDLED; + + irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); + if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) { + dev_dbg(dev, "phyup: irq_value = %x not set enable bit\n", + irq_value); + res = IRQ_NONE; + goto end; + } + + context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); + if (context & 1 << phy_no) { + dev_err(dev, "phyup: phy%d SATA attached equipment\n", + phy_no); + goto end; + } + + port_id = (hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA) >> (4 * phy_no)) + & 0xf; + if (port_id == 0xf) { + dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + + for (i = 0; i < 6; i++) { + u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, + RX_IDAF_DWORD0 + (i * 4)); + frame_rcvd[i] = __swab32(idaf); + } + + /* Get the linkrate */ + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + sas_phy->linkrate = link_rate; + sas_phy->oob_mode = SAS_OOB_MODE; + memcpy(sas_phy->attached_sas_addr, + &id->sas_addr, SAS_ADDR_SIZE); + dev_info(dev, "phyup: phy%d link_rate=%d\n", + phy_no, link_rate); + phy->port_id = port_id; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + phy->phy_type |= PORT_TYPE_SAS; + phy->phy_attached = 1; + phy->identify.device_type = id->dev_type; + phy->frame_rcvd_size = sizeof(struct sas_identify_frame); + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); +end: + if (phy->reset_completion) + complete(phy->reset_completion); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, + CHL_INT2_SL_PHY_ENA_MSK); + + if (irq_value & CHL_INT2_SL_PHY_ENA_MSK) { + u32 chl_int0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); + + chl_int0 &= ~CHL_INT0_PHYCTRL_NOTRDY_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, chl_int0); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, 0x3ce3ee); + } + + return res; +} + +static irqreturn_t int_bcast_v1_hw(int irq, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = hisi_hba->dev; + int phy_no = sas_phy->id; + u32 irq_value; + irqreturn_t res = IRQ_HANDLED; + + irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); + + if (!(irq_value & CHL_INT2_SL_RX_BC_ACK_MSK)) { + dev_err(dev, "bcast: irq_value = %x not set enable bit\n", + irq_value); + res = IRQ_NONE; + goto end; + } + + hisi_sas_phy_bcast(phy); + +end: + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, + CHL_INT2_SL_RX_BC_ACK_MSK); + + return res; +} + +static irqreturn_t int_abnormal_v1_hw(int irq, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + u32 irq_value, irq_mask_old; + int phy_no = sas_phy->id; + + /* mask_int0 */ + irq_mask_old = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, 0x3fffff); + + /* read int0 */ + irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); + + if (irq_value & CHL_INT0_PHYCTRL_NOTRDY_MSK) { + u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + + hisi_sas_phy_down(hisi_hba, phy_no, + (phy_state & 1 << phy_no) ? 1 : 0, + GFP_ATOMIC); + } + + if (irq_value & CHL_INT0_ID_TIMEOUT_MSK) + dev_dbg(dev, "abnormal: ID_TIMEOUT phy%d identify timeout\n", + phy_no); + + if (irq_value & CHL_INT0_DWS_LOST_MSK) + dev_dbg(dev, "abnormal: DWS_LOST phy%d dws lost\n", phy_no); + + if (irq_value & CHL_INT0_SN_FAIL_NGR_MSK) + dev_dbg(dev, "abnormal: SN_FAIL_NGR phy%d sn fail ngr\n", + phy_no); + + if (irq_value & CHL_INT0_SL_IDAF_FAIL_MSK || + irq_value & CHL_INT0_SL_OPAF_FAIL_MSK) + dev_dbg(dev, "abnormal: SL_ID/OPAF_FAIL phy%d check adr frm err\n", + phy_no); + + if (irq_value & CHL_INT0_SL_PS_FAIL_OFF) + dev_dbg(dev, "abnormal: SL_PS_FAIL phy%d fail\n", phy_no); + + /* write to zero */ + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value); + + if (irq_value & CHL_INT0_PHYCTRL_NOTRDY_MSK) + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, + 0x3fffff & ~CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK); + else + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, + irq_mask_old); + + return IRQ_HANDLED; +} + +static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + struct hisi_sas_slot *slot; + int queue = cq->id; + struct hisi_sas_complete_v1_hdr *complete_queue = + (struct hisi_sas_complete_v1_hdr *) + hisi_hba->complete_hdr[queue]; + u32 rd_point = cq->rd_point, wr_point; + + spin_lock(&hisi_hba->lock); + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); + wr_point = hisi_sas_read32(hisi_hba, + COMPL_Q_0_WR_PTR + (0x14 * queue)); + + while (rd_point != wr_point) { + struct hisi_sas_complete_v1_hdr *complete_hdr; + int idx; + u32 cmplt_hdr_data; + + complete_hdr = &complete_queue[rd_point]; + cmplt_hdr_data = le32_to_cpu(complete_hdr->data); + idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >> + CMPLT_HDR_IPTT_OFF; + slot = &hisi_hba->slot_info[idx]; + + /* The completion queue and queue slot index are not + * necessarily the same as the delivery queue and + * queue slot index. + */ + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v1_hw(hisi_hba, slot); + + if (++rd_point >= HISI_SAS_QUEUE_SLOTS) + rd_point = 0; + } + + /* update rd_point */ + cq->rd_point = rd_point; + hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + spin_unlock(&hisi_hba->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p) +{ + struct hisi_hba *hisi_hba = p; + struct device *dev = hisi_hba->dev; + u32 ecc_int = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); + + if (ecc_int & SAS_ECC_INTR_DQ_ECC1B_MSK) { + u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); + + panic("%s: Fatal DQ 1b ECC interrupt (0x%x)\n", + dev_name(dev), ecc_err); + } + + if (ecc_int & SAS_ECC_INTR_DQ_ECCBAD_MSK) { + u32 addr = (hisi_sas_read32(hisi_hba, HGC_DQ_ECC_ADDR) & + HGC_DQ_ECC_ADDR_BAD_MSK) >> + HGC_DQ_ECC_ADDR_BAD_OFF; + + panic("%s: Fatal DQ RAM ECC interrupt @ 0x%08x\n", + dev_name(dev), addr); + } + + if (ecc_int & SAS_ECC_INTR_IOST_ECC1B_MSK) { + u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); + + panic("%s: Fatal IOST 1b ECC interrupt (0x%x)\n", + dev_name(dev), ecc_err); + } + + if (ecc_int & SAS_ECC_INTR_IOST_ECCBAD_MSK) { + u32 addr = (hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR) & + HGC_IOST_ECC_ADDR_BAD_MSK) >> + HGC_IOST_ECC_ADDR_BAD_OFF; + + panic("%s: Fatal IOST RAM ECC interrupt @ 0x%08x\n", + dev_name(dev), addr); + } + + if (ecc_int & SAS_ECC_INTR_ITCT_ECCBAD_MSK) { + u32 addr = (hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR) & + HGC_ITCT_ECC_ADDR_BAD_MSK) >> + HGC_ITCT_ECC_ADDR_BAD_OFF; + + panic("%s: Fatal TCT RAM ECC interrupt @ 0x%08x\n", + dev_name(dev), addr); + } + + if (ecc_int & SAS_ECC_INTR_ITCT_ECC1B_MSK) { + u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); + + panic("%s: Fatal ITCT 1b ECC interrupt (0x%x)\n", + dev_name(dev), ecc_err); + } + + hisi_sas_write32(hisi_hba, SAS_ECC_INTR, ecc_int | 0x3f); + + return IRQ_HANDLED; +} + +static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p) +{ + struct hisi_hba *hisi_hba = p; + struct device *dev = hisi_hba->dev; + u32 axi_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC2); + u32 axi_info = hisi_sas_read32(hisi_hba, HGC_AXI_FIFO_ERR_INFO); + + if (axi_int & ENT_INT_SRC2_DQ_CFG_ERR_MSK) + panic("%s: Fatal DQ_CFG_ERR interrupt (0x%x)\n", + dev_name(dev), axi_info); + + if (axi_int & ENT_INT_SRC2_CQ_CFG_ERR_MSK) + panic("%s: Fatal CQ_CFG_ERR interrupt (0x%x)\n", + dev_name(dev), axi_info); + + if (axi_int & ENT_INT_SRC2_AXI_WRONG_INT_MSK) + panic("%s: Fatal AXI_WRONG_INT interrupt (0x%x)\n", + dev_name(dev), axi_info); + + if (axi_int & ENT_INT_SRC2_AXI_OVERLF_INT_MSK) + panic("%s: Fatal AXI_OVERLF_INT incorrect interrupt (0x%x)\n", + dev_name(dev), axi_info); + + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, axi_int | 0x30000000); + + return IRQ_HANDLED; +} + +static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { + int_bcast_v1_hw, + int_phyup_v1_hw, + int_abnormal_v1_hw +}; + +static irq_handler_t fatal_interrupts[HISI_SAS_MAX_QUEUES] = { + fatal_ecc_int_v1_hw, + fatal_axi_int_v1_hw +}; + +static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) +{ + struct platform_device *pdev = hisi_hba->platform_dev; + struct device *dev = &pdev->dev; + int i, j, irq, rc, idx; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + + idx = i * HISI_SAS_PHY_INT_NR; + for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) { + irq = platform_get_irq(pdev, idx); + if (irq < 0) + return irq; + + rc = devm_request_irq(dev, irq, phy_interrupts[j], 0, + DRV_NAME " phy", phy); + if (rc) { + dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n", + irq, rc); + return rc; + } + } + } + + idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR; + for (i = 0; i < hisi_hba->queue_count; i++, idx++) { + irq = platform_get_irq(pdev, idx); + if (irq < 0) + return irq; + + rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0, + DRV_NAME " cq", &hisi_hba->cq[i]); + if (rc) { + dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", + irq, rc); + return rc; + } + } + + idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count; + for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) { + irq = platform_get_irq(pdev, idx); + if (irq < 0) + return irq; + + rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, + DRV_NAME " fatal", hisi_hba); + if (rc) { + dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n", + irq, rc); + return rc; + } + } + + hisi_hba->cq_nvecs = hisi_hba->queue_count; + + return 0; +} + +static int interrupt_openall_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + u32 val; + + for (i = 0; i < hisi_hba->n_phy; i++) { + /* Clear interrupt status */ + val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT0); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, val); + val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT1); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, val); + val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT2); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, val); + + /* Unmask interrupt */ + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0_MSK, 0x3ce3ee); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0x17fff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8000012a); + + /* bypass chip bug mask abnormal intr */ + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0_MSK, + 0x3fffff & ~CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK); + } + + return 0; +} + +static int hisi_sas_v1_init(struct hisi_hba *hisi_hba) +{ + int rc; + + rc = hw_init_v1_hw(hisi_hba); + if (rc) + return rc; + + rc = interrupt_init_v1_hw(hisi_hba); + if (rc) + return rc; + + rc = interrupt_openall_v1_hw(hisi_hba); + if (rc) + return rc; + + return 0; +} + +static struct attribute *host_v1_hw_attrs[] = { + &dev_attr_phy_event_threshold.attr, + NULL +}; + +ATTRIBUTE_GROUPS(host_v1_hw); + +static const struct scsi_host_template sht_v1_hw = { + .name = DRV_NAME, + .proc_name = DRV_NAME, + .module = THIS_MODULE, + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = hisi_sas_slave_configure, + .scan_finished = hisi_sas_scan_finished, + .scan_start = hisi_sas_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .this_id = -1, + .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = hisi_sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .shost_groups = host_v1_hw_groups, + .host_reset = hisi_sas_host_reset, +}; + +static const struct hisi_sas_hw hisi_sas_v1_hw = { + .hw_init = hisi_sas_v1_init, + .setup_itct = setup_itct_v1_hw, + .sl_notify_ssp = sl_notify_ssp_v1_hw, + .clear_itct = clear_itct_v1_hw, + .prep_smp = prep_smp_v1_hw, + .prep_ssp = prep_ssp_v1_hw, + .start_delivery = start_delivery_v1_hw, + .phys_init = phys_init_v1_hw, + .phy_start = start_phy_v1_hw, + .phy_disable = disable_phy_v1_hw, + .phy_hard_reset = phy_hard_reset_v1_hw, + .phy_set_linkrate = phy_set_linkrate_v1_hw, + .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw, + .get_wideport_bitmap = get_wideport_bitmap_v1_hw, + .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), + .sht = &sht_v1_hw, +}; + +static int hisi_sas_v1_probe(struct platform_device *pdev) +{ + return hisi_sas_probe(pdev, &hisi_sas_v1_hw); +} + +static const struct of_device_id sas_v1_of_match[] = { + { .compatible = "hisilicon,hip05-sas-v1",}, + {}, +}; +MODULE_DEVICE_TABLE(of, sas_v1_of_match); + +static const struct acpi_device_id sas_v1_acpi_match[] = { + { "HISI0161", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match); + +static struct platform_driver hisi_sas_v1_driver = { + .probe = hisi_sas_v1_probe, + .remove_new = hisi_sas_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = sas_v1_of_match, + .acpi_match_table = ACPI_PTR(sas_v1_acpi_match), + }, +}; + +module_platform_driver(hisi_sas_v1_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry "); +MODULE_DESCRIPTION("HISILICON SAS controller v1 hw driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c new file mode 100644 index 000000000..73b378837 --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -0,0 +1,3657 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2016 Linaro Ltd. + * Copyright (c) 2016 Hisilicon Limited. + */ + +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas_v2_hw" + +/* global registers need init*/ +#define DLVRY_QUEUE_ENABLE 0x0 +#define IOST_BASE_ADDR_LO 0x8 +#define IOST_BASE_ADDR_HI 0xc +#define ITCT_BASE_ADDR_LO 0x10 +#define ITCT_BASE_ADDR_HI 0x14 +#define IO_BROKEN_MSG_ADDR_LO 0x18 +#define IO_BROKEN_MSG_ADDR_HI 0x1c +#define PHY_CONTEXT 0x20 +#define PHY_STATE 0x24 +#define PHY_PORT_NUM_MA 0x28 +#define PORT_STATE 0x2c +#define PORT_STATE_PHY8_PORT_NUM_OFF 16 +#define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF) +#define PORT_STATE_PHY8_CONN_RATE_OFF 20 +#define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF) +#define PHY_CONN_RATE 0x30 +#define HGC_TRANS_TASK_CNT_LIMIT 0x38 +#define AXI_AHB_CLK_CFG 0x3c +#define ITCT_CLR 0x44 +#define ITCT_CLR_EN_OFF 16 +#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) +#define ITCT_DEV_OFF 0 +#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) +#define AXI_USER1 0x48 +#define AXI_USER2 0x4c +#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 +#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c +#define SATA_INITI_D2H_STORE_ADDR_LO 0x60 +#define SATA_INITI_D2H_STORE_ADDR_HI 0x64 +#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 +#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 +#define HGC_GET_ITV_TIME 0x90 +#define DEVICE_MSG_WORK_MODE 0x94 +#define OPENA_WT_CONTI_TIME 0x9c +#define I_T_NEXUS_LOSS_TIME 0xa0 +#define MAX_CON_TIME_LIMIT_TIME 0xa4 +#define BUS_INACTIVE_LIMIT_TIME 0xa8 +#define REJECT_TO_OPEN_LIMIT_TIME 0xac +#define CFG_AGING_TIME 0xbc +#define HGC_DFX_CFG2 0xc0 +#define HGC_IOMB_PROC1_STATUS 0x104 +#define CFG_1US_TIMER_TRSH 0xcc +#define HGC_LM_DFX_STATUS2 0x128 +#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 +#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ + HGC_LM_DFX_STATUS2_IOSTLIST_OFF) +#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 +#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ + HGC_LM_DFX_STATUS2_ITCTLIST_OFF) +#define HGC_CQE_ECC_ADDR 0x13c +#define HGC_CQE_ECC_1B_ADDR_OFF 0 +#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) +#define HGC_CQE_ECC_MB_ADDR_OFF 8 +#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) +#define HGC_IOST_ECC_ADDR 0x140 +#define HGC_IOST_ECC_1B_ADDR_OFF 0 +#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) +#define HGC_IOST_ECC_MB_ADDR_OFF 16 +#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) +#define HGC_DQE_ECC_ADDR 0x144 +#define HGC_DQE_ECC_1B_ADDR_OFF 0 +#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) +#define HGC_DQE_ECC_MB_ADDR_OFF 16 +#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) +#define HGC_INVLD_DQE_INFO 0x148 +#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 +#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) +#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 +#define HGC_ITCT_ECC_ADDR 0x150 +#define HGC_ITCT_ECC_1B_ADDR_OFF 0 +#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_1B_ADDR_OFF) +#define HGC_ITCT_ECC_MB_ADDR_OFF 16 +#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_MB_ADDR_OFF) +#define HGC_AXI_FIFO_ERR_INFO 0x154 +#define AXI_ERR_INFO_OFF 0 +#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) +#define FIFO_ERR_INFO_OFF 8 +#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) +#define INT_COAL_EN 0x19c +#define OQ_INT_COAL_TIME 0x1a0 +#define OQ_INT_COAL_CNT 0x1a4 +#define ENT_INT_COAL_TIME 0x1a8 +#define ENT_INT_COAL_CNT 0x1ac +#define OQ_INT_SRC 0x1b0 +#define OQ_INT_SRC_MSK 0x1b4 +#define ENT_INT_SRC1 0x1b8 +#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 +#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) +#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 +#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) +#define ENT_INT_SRC2 0x1bc +#define ENT_INT_SRC3 0x1c0 +#define ENT_INT_SRC3_WP_DEPTH_OFF 8 +#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 +#define ENT_INT_SRC3_RP_DEPTH_OFF 10 +#define ENT_INT_SRC3_AXI_OFF 11 +#define ENT_INT_SRC3_FIFO_OFF 12 +#define ENT_INT_SRC3_LM_OFF 14 +#define ENT_INT_SRC3_ITC_INT_OFF 15 +#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) +#define ENT_INT_SRC3_ABT_OFF 16 +#define ENT_INT_SRC_MSK1 0x1c4 +#define ENT_INT_SRC_MSK2 0x1c8 +#define ENT_INT_SRC_MSK3 0x1cc +#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 +#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) +#define SAS_ECC_INTR 0x1e8 +#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 +#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 +#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 +#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 +#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4 +#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5 +#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6 +#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7 +#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8 +#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9 +#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 +#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 +#define SAS_ECC_INTR_MSK 0x1ec +#define HGC_ERR_STAT_EN 0x238 +#define CQE_SEND_CNT 0x248 +#define DLVRY_Q_0_BASE_ADDR_LO 0x260 +#define DLVRY_Q_0_BASE_ADDR_HI 0x264 +#define DLVRY_Q_0_DEPTH 0x268 +#define DLVRY_Q_0_WR_PTR 0x26c +#define DLVRY_Q_0_RD_PTR 0x270 +#define HYPER_STREAM_ID_EN_CFG 0xc80 +#define OQ0_INT_SRC_MSK 0xc90 +#define COMPL_Q_0_BASE_ADDR_LO 0x4e0 +#define COMPL_Q_0_BASE_ADDR_HI 0x4e4 +#define COMPL_Q_0_DEPTH 0x4e8 +#define COMPL_Q_0_WR_PTR 0x4ec +#define COMPL_Q_0_RD_PTR 0x4f0 +#define HGC_RXM_DFX_STATUS14 0xae8 +#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 +#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM0_OFF) +#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 +#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM1_OFF) +#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 +#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM2_OFF) +#define HGC_RXM_DFX_STATUS15 0xaec +#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 +#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS15_MEM3_OFF) +/* phy registers need init */ +#define PORT_BASE (0x2000) + +#define PHY_CFG (PORT_BASE + 0x0) +#define HARD_PHY_LINKRATE (PORT_BASE + 0x4) +#define PHY_CFG_ENA_OFF 0 +#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) +#define PHY_CFG_DC_OPT_OFF 2 +#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) +#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) +#define PROG_PHY_LINK_RATE_MAX_OFF 0 +#define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF) +#define PHY_CTRL (PORT_BASE + 0x14) +#define PHY_CTRL_RESET_OFF 0 +#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) +#define SAS_PHY_CTRL (PORT_BASE + 0x20) +#define SL_CFG (PORT_BASE + 0x84) +#define PHY_PCN (PORT_BASE + 0x44) +#define SL_TOUT_CFG (PORT_BASE + 0x8c) +#define SL_CONTROL (PORT_BASE + 0x94) +#define SL_CONTROL_NOTIFY_EN_OFF 0 +#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) +#define SL_CONTROL_CTA_OFF 17 +#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) +#define RX_PRIMS_STATUS (PORT_BASE + 0x98) +#define RX_BCAST_CHG_OFF 1 +#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) +#define TX_ID_DWORD0 (PORT_BASE + 0x9c) +#define TX_ID_DWORD1 (PORT_BASE + 0xa0) +#define TX_ID_DWORD2 (PORT_BASE + 0xa4) +#define TX_ID_DWORD3 (PORT_BASE + 0xa8) +#define TX_ID_DWORD4 (PORT_BASE + 0xaC) +#define TX_ID_DWORD5 (PORT_BASE + 0xb0) +#define TX_ID_DWORD6 (PORT_BASE + 0xb4) +#define TXID_AUTO (PORT_BASE + 0xb8) +#define TXID_AUTO_CT3_OFF 1 +#define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) +#define TXID_AUTO_CTB_OFF 11 +#define TXID_AUTO_CTB_MSK (0x1 << TXID_AUTO_CTB_OFF) +#define TX_HARDRST_OFF 2 +#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) +#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) +#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) +#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) +#define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) +#define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) +#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) +#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) +#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define CON_CONTROL (PORT_BASE + 0x118) +#define CON_CONTROL_CFG_OPEN_ACC_STP_OFF 0 +#define CON_CONTROL_CFG_OPEN_ACC_STP_MSK \ + (0x01 << CON_CONTROL_CFG_OPEN_ACC_STP_OFF) +#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) +#define CHL_INT0 (PORT_BASE + 0x1b4) +#define CHL_INT0_HOTPLUG_TOUT_OFF 0 +#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) +#define CHL_INT0_SL_RX_BCST_ACK_OFF 1 +#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) +#define CHL_INT0_SL_PHY_ENABLE_OFF 2 +#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) +#define CHL_INT0_NOT_RDY_OFF 4 +#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) +#define CHL_INT0_PHY_RDY_OFF 5 +#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) +#define CHL_INT1 (PORT_BASE + 0x1b8) +#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 +#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) +#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 +#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) +#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 +#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 +#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 +#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 +#define CHL_INT2 (PORT_BASE + 0x1bc) +#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 +#define CHL_INT0_MSK (PORT_BASE + 0x1c0) +#define CHL_INT1_MSK (PORT_BASE + 0x1c4) +#define CHL_INT2_MSK (PORT_BASE + 0x1c8) +#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define DMA_TX_DFX0 (PORT_BASE + 0x200) +#define DMA_TX_DFX1 (PORT_BASE + 0x204) +#define DMA_TX_DFX1_IPTT_OFF 0 +#define DMA_TX_DFX1_IPTT_MSK (0xffff << DMA_TX_DFX1_IPTT_OFF) +#define DMA_TX_FIFO_DFX0 (PORT_BASE + 0x240) +#define PORT_DFX0 (PORT_BASE + 0x258) +#define LINK_DFX2 (PORT_BASE + 0X264) +#define LINK_DFX2_RCVR_HOLD_STS_OFF 9 +#define LINK_DFX2_RCVR_HOLD_STS_MSK (0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF) +#define LINK_DFX2_SEND_HOLD_STS_OFF 10 +#define LINK_DFX2_SEND_HOLD_STS_MSK (0x1 << LINK_DFX2_SEND_HOLD_STS_OFF) +#define SAS_ERR_CNT4_REG (PORT_BASE + 0x290) +#define SAS_ERR_CNT6_REG (PORT_BASE + 0x298) +#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) +#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) +#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) +#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) +#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) +#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) +#define DMA_TX_STATUS (PORT_BASE + 0x2d0) +#define DMA_TX_STATUS_BUSY_OFF 0 +#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) +#define DMA_RX_STATUS (PORT_BASE + 0x2e8) +#define DMA_RX_STATUS_BUSY_OFF 0 +#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) + +#define AXI_CFG (0x5100) +#define AM_CFG_MAX_TRANS (0x5010) +#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) + +#define AXI_MASTER_CFG_BASE (0x5000) +#define AM_CTRL_GLOBAL (0x0) +#define AM_CURR_TRANS_RETURN (0x150) + +/* HW dma structures */ +/* Delivery queue header */ +/* dw0 */ +#define CMD_HDR_ABORT_FLAG_OFF 0 +#define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) +#define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 +#define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) +#define CMD_HDR_RESP_REPORT_OFF 5 +#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) +#define CMD_HDR_TLR_CTRL_OFF 6 +#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PHY_ID_OFF 8 +#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) +#define CMD_HDR_FORCE_PHY_OFF 17 +#define CMD_HDR_FORCE_PHY_MSK (0x1 << CMD_HDR_FORCE_PHY_OFF) +#define CMD_HDR_PORT_OFF 18 +#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) +#define CMD_HDR_PRIORITY_OFF 27 +#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) +#define CMD_HDR_CMD_OFF 29 +#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) +/* dw1 */ +#define CMD_HDR_DIR_OFF 5 +#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) +#define CMD_HDR_RESET_OFF 7 +#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) +#define CMD_HDR_VDTL_OFF 10 +#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) +#define CMD_HDR_FRAME_TYPE_OFF 11 +#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) +#define CMD_HDR_DEV_ID_OFF 16 +#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) +/* dw2 */ +#define CMD_HDR_CFL_OFF 0 +#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) +#define CMD_HDR_NCQ_TAG_OFF 10 +#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) +#define CMD_HDR_MRFL_OFF 15 +#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) +#define CMD_HDR_SG_MOD_OFF 24 +#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) +#define CMD_HDR_FIRST_BURST_OFF 26 +#define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF) +/* dw3 */ +#define CMD_HDR_IPTT_OFF 0 +#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) +/* dw6 */ +#define CMD_HDR_DIF_SGL_LEN_OFF 0 +#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) +#define CMD_HDR_DATA_SGL_LEN_OFF 16 +#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) +#define CMD_HDR_ABORT_IPTT_OFF 16 +#define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) + +/* Completion header */ +/* dw0 */ +#define CMPLT_HDR_ERR_PHASE_OFF 2 +#define CMPLT_HDR_ERR_PHASE_MSK (0xff << CMPLT_HDR_ERR_PHASE_OFF) +#define CMPLT_HDR_RSPNS_XFRD_OFF 10 +#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) +#define CMPLT_HDR_ERX_OFF 12 +#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) +#define CMPLT_HDR_ABORT_STAT_OFF 13 +#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) +/* abort_stat */ +#define STAT_IO_NOT_VALID 0x1 +#define STAT_IO_NO_DEVICE 0x2 +#define STAT_IO_COMPLETE 0x3 +#define STAT_IO_ABORTED 0x4 +/* dw1 */ +#define CMPLT_HDR_IPTT_OFF 0 +#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) +#define CMPLT_HDR_DEV_ID_OFF 16 +#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) + +/* ITCT header */ +/* qw0 */ +#define ITCT_HDR_DEV_TYPE_OFF 0 +#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) +#define ITCT_HDR_VALID_OFF 2 +#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) +#define ITCT_HDR_MCR_OFF 5 +#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) +#define ITCT_HDR_VLN_OFF 9 +#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) +#define ITCT_HDR_SMP_TIMEOUT_OFF 16 +#define ITCT_HDR_SMP_TIMEOUT_8US 1 +#define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \ + 250) /* 2ms */ +#define ITCT_HDR_AWT_CONTINUE_OFF 25 +#define ITCT_HDR_PORT_ID_OFF 28 +#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) +/* qw2 */ +#define ITCT_HDR_INLT_OFF 0 +#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) +#define ITCT_HDR_BITLT_OFF 16 +#define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF) +#define ITCT_HDR_MCTLT_OFF 32 +#define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF) +#define ITCT_HDR_RTOLT_OFF 48 +#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) + +#define HISI_SAS_FATAL_INT_NR 2 + +struct hisi_sas_complete_v2_hdr { + __le32 dw0; + __le32 dw1; + __le32 act; + __le32 dw3; +}; + +struct hisi_sas_err_record_v2 { + /* dw0 */ + __le32 trans_tx_fail_type; + + /* dw1 */ + __le32 trans_rx_fail_type; + + /* dw2 */ + __le16 dma_tx_err_type; + __le16 sipc_rx_err_type; + + /* dw3 */ + __le32 dma_rx_err_type; +}; + +struct signal_attenuation_s { + u32 de_emphasis; + u32 preshoot; + u32 boost; +}; + +struct sig_atten_lu_s { + const struct signal_attenuation_s *att; + u32 sas_phy_ctrl; +}; + +static const struct hisi_sas_hw_error one_bit_ecc_errors[] = { + { + .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF), + .msk = HGC_DQE_ECC_1B_ADDR_MSK, + .shift = HGC_DQE_ECC_1B_ADDR_OFF, + .msg = "hgc_dqe_ecc1b_intr", + .reg = HGC_DQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF), + .msk = HGC_IOST_ECC_1B_ADDR_MSK, + .shift = HGC_IOST_ECC_1B_ADDR_OFF, + .msg = "hgc_iost_ecc1b_intr", + .reg = HGC_IOST_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF), + .msk = HGC_ITCT_ECC_1B_ADDR_MSK, + .shift = HGC_ITCT_ECC_1B_ADDR_OFF, + .msg = "hgc_itct_ecc1b_intr", + .reg = HGC_ITCT_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF), + .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, + .msg = "hgc_iostl_ecc1b_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF), + .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, + .msg = "hgc_itctl_ecc1b_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF), + .msk = HGC_CQE_ECC_1B_ADDR_MSK, + .shift = HGC_CQE_ECC_1B_ADDR_OFF, + .msg = "hgc_cqe_ecc1b_intr", + .reg = HGC_CQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, + .msg = "rxm_mem0_ecc1b_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, + .msg = "rxm_mem1_ecc1b_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, + .msg = "rxm_mem2_ecc1b_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, + .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, + .msg = "rxm_mem3_ecc1b_intr", + .reg = HGC_RXM_DFX_STATUS15, + }, +}; + +static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { + { + .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), + .msk = HGC_DQE_ECC_MB_ADDR_MSK, + .shift = HGC_DQE_ECC_MB_ADDR_OFF, + .msg = "hgc_dqe_eccbad_intr", + .reg = HGC_DQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), + .msk = HGC_IOST_ECC_MB_ADDR_MSK, + .shift = HGC_IOST_ECC_MB_ADDR_OFF, + .msg = "hgc_iost_eccbad_intr", + .reg = HGC_IOST_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), + .msk = HGC_ITCT_ECC_MB_ADDR_MSK, + .shift = HGC_ITCT_ECC_MB_ADDR_OFF, + .msg = "hgc_itct_eccbad_intr", + .reg = HGC_ITCT_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, + .msg = "hgc_iostl_eccbad_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, + .msg = "hgc_itctl_eccbad_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), + .msk = HGC_CQE_ECC_MB_ADDR_MSK, + .shift = HGC_CQE_ECC_MB_ADDR_OFF, + .msg = "hgc_cqe_eccbad_intr", + .reg = HGC_CQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, + .msg = "rxm_mem0_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, + .msg = "rxm_mem1_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, + .msg = "rxm_mem2_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, + .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, + .msg = "rxm_mem3_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS15, + }, +}; + +enum { + HISI_SAS_PHY_PHY_UPDOWN, + HISI_SAS_PHY_CHNL_INT, + HISI_SAS_PHY_INT_NR +}; + +enum { + TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ + TRANS_RX_FAIL_BASE = 0x20, /* dw1 */ + DMA_TX_ERR_BASE = 0x40, /* dw2 bit 15-0 */ + SIPC_RX_ERR_BASE = 0x50, /* dw2 bit 31-16*/ + DMA_RX_ERR_BASE = 0x60, /* dw3 */ + + /* trans tx*/ + TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ + TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */ + TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */ + TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */ + TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */ + RESERVED0, /* 0x5 */ + TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */ + TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */ + TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */ + TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */ + TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */ + TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */ + TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */ + TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */ + TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */ + TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */ + TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */ + TRANS_TX_ERR_FRAME_TXED, /* 0x11 */ + TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */ + TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */ + TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */ + TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */ + TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/ + TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */ + TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */ + TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */ + TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/ + TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/ + /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */ + TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */ + /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */ + TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */ + TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */ + /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */ + TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ + + /* trans rx */ + TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x20 */ + TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x21 for sata/stp */ + TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x22 for ssp/smp */ + /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x22 <] for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x23 for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x24 for sata/stp */ + TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x25 for smp */ + /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x25 <] for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x26 for sata/stp*/ + TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x27 */ + TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x28 */ + TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x29 */ + TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x2a */ + RESERVED1, /* 0x2b */ + TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x2c */ + TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x2d */ + TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x2e */ + TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x2f */ + TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x30 for ssp/smp */ + TRANS_RX_ERR_WITH_BAD_HASH, /* 0x31 for ssp */ + /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x31 <] for sata/stp */ + TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x32 for ssp*/ + /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x32 <] for sata/stp */ + TRANS_RX_SSP_FRM_LEN_ERR, /* 0x33 for ssp */ + /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x33 <] for sata */ + RESERVED2, /* 0x34 */ + RESERVED3, /* 0x35 */ + RESERVED4, /* 0x36 */ + RESERVED5, /* 0x37 */ + TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x38 */ + TRANS_RX_SMP_FRM_LEN_ERR, /* 0x39 */ + TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x3a */ + RESERVED6, /* 0x3b */ + RESERVED7, /* 0x3c */ + RESERVED8, /* 0x3d */ + RESERVED9, /* 0x3e */ + TRANS_RX_R_ERR, /* 0x3f */ + + /* dma tx */ + DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x40 */ + DMA_TX_DIF_APP_ERR, /* 0x41 */ + DMA_TX_DIF_RPP_ERR, /* 0x42 */ + DMA_TX_DATA_SGL_OVERFLOW, /* 0x43 */ + DMA_TX_DIF_SGL_OVERFLOW, /* 0x44 */ + DMA_TX_UNEXP_XFER_ERR, /* 0x45 */ + DMA_TX_UNEXP_RETRANS_ERR, /* 0x46 */ + DMA_TX_XFER_LEN_OVERFLOW, /* 0x47 */ + DMA_TX_XFER_OFFSET_ERR, /* 0x48 */ + DMA_TX_RAM_ECC_ERR, /* 0x49 */ + DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x4a */ + DMA_TX_MAX_ERR_CODE, + + /* sipc rx */ + SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x50 */ + SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x51 */ + SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x52 */ + SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x53 */ + SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x54 */ + SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x55 */ + SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x56 */ + SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x57 */ + SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x58 */ + SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x59 */ + SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x5a */ + SIPC_RX_MAX_ERR_CODE, + + /* dma rx */ + DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x60 */ + DMA_RX_DIF_APP_ERR, /* 0x61 */ + DMA_RX_DIF_RPP_ERR, /* 0x62 */ + DMA_RX_DATA_SGL_OVERFLOW, /* 0x63 */ + DMA_RX_DIF_SGL_OVERFLOW, /* 0x64 */ + DMA_RX_DATA_LEN_OVERFLOW, /* 0x65 */ + DMA_RX_DATA_LEN_UNDERFLOW, /* 0x66 */ + DMA_RX_DATA_OFFSET_ERR, /* 0x67 */ + RESERVED10, /* 0x68 */ + DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x69 */ + DMA_RX_RESP_BUF_OVERFLOW, /* 0x6a */ + DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x6b */ + DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x6c */ + DMA_RX_UNEXP_RDFRAME_ERR, /* 0x6d */ + DMA_RX_PIO_DATA_LEN_ERR, /* 0x6e */ + DMA_RX_RDSETUP_STATUS_ERR, /* 0x6f */ + DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x70 */ + DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x71 */ + DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x72 */ + DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x73 */ + DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x74 */ + DMA_RX_RDSETUP_OFFSET_ERR, /* 0x75 */ + DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x76 */ + DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x77 */ + DMA_RX_RAM_ECC_ERR, /* 0x78 */ + DMA_RX_UNKNOWN_FRM_ERR, /* 0x79 */ + DMA_RX_MAX_ERR_CODE, +}; + +#define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 +#define HISI_MAX_SATA_SUPPORT_V2_HW (HISI_SAS_COMMAND_ENTRIES_V2_HW/64 - 1) + +#define DIR_NO_DATA 0 +#define DIR_TO_INI 1 +#define DIR_TO_DEVICE 2 +#define DIR_RESERVED 3 + +#define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \ + err_phase == 0x4 || err_phase == 0x8 ||\ + err_phase == 0x6 || err_phase == 0xa) +#define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \ + err_phase == 0x20 || err_phase == 0x40) + +static void link_timeout_disable_link(struct timer_list *t); + +static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl(regs); +} + +static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl_relaxed(regs); +} + +static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + off; + + writel(val, regs); +} + +static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, + u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + writel(val, regs); +} + +static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, + int phy_no, u32 off) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + return readl(regs); +} + +/* This function needs to be protected from pre-emption. */ +static int +slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + int sata_dev = dev_is_sata(device); + void *bitmap = hisi_hba->slot_index_tags; + struct hisi_sas_device *sas_dev = device->lldd_dev; + int sata_idx = sas_dev->sata_idx; + int start, end; + + if (!sata_dev) { + /* + * STP link SoC bug workaround: index starts from 1. + * additionally, we can only allocate odd IPTT(1~4095) + * for SAS/SMP device. + */ + start = 1; + end = hisi_hba->slot_index_count; + } else { + if (sata_idx >= HISI_MAX_SATA_SUPPORT_V2_HW) + return -EINVAL; + + /* + * For SATA device: allocate even IPTT in this interval + * [64*(sata_idx+1), 64*(sata_idx+2)], then each SATA device + * own 32 IPTTs. IPTT 0 shall not be used duing to STP link + * SoC bug workaround. So we ignore the first 32 even IPTTs. + */ + start = 64 * (sata_idx + 1); + end = 64 * (sata_idx + 2); + } + + spin_lock(&hisi_hba->lock); + while (1) { + start = find_next_zero_bit(bitmap, + hisi_hba->slot_index_count, start); + if (start >= end) { + spin_unlock(&hisi_hba->lock); + return -SAS_QUEUE_FULL; + } + /* + * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. + */ + if (sata_dev ^ (start & 1)) + break; + start++; + } + + set_bit(start, bitmap); + spin_unlock(&hisi_hba->lock); + return start; +} + +static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) +{ + unsigned int index; + struct device *dev = hisi_hba->dev; + void *bitmap = hisi_hba->sata_dev_bitmap; + + index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW); + if (index >= HISI_MAX_SATA_SUPPORT_V2_HW) { + dev_warn(dev, "alloc sata index failed, index=%d\n", index); + return false; + } + + set_bit(index, bitmap); + *idx = index; + return true; +} + + +static struct +hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) +{ + struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; + struct hisi_sas_device *sas_dev = NULL; + int i, sata_dev = dev_is_sata(device); + int sata_idx = -1; + + spin_lock(&hisi_hba->lock); + + if (sata_dev) + if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) + goto out; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + /* + * SATA device id bit0 should be 0 + */ + if (sata_dev && (i & 1)) + continue; + if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { + int queue = i % hisi_hba->queue_count; + struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; + + hisi_hba->devices[i].device_id = i; + sas_dev = &hisi_hba->devices[i]; + sas_dev->dev_status = HISI_SAS_DEV_INIT; + sas_dev->dev_type = device->dev_type; + sas_dev->hisi_hba = hisi_hba; + sas_dev->sas_device = device; + sas_dev->sata_idx = sata_idx; + sas_dev->dq = dq; + spin_lock_init(&sas_dev->lock); + INIT_LIST_HEAD(&hisi_hba->devices[i].list); + break; + } + } + +out: + spin_unlock(&hisi_hba->lock); + + return sas_dev; +} + +static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_DC_OPT_MSK; + cfg |= 1 << PHY_CFG_DC_OPT_OFF; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct sas_identify_frame identify_frame; + u32 *identify_buffer; + + memset(&identify_frame, 0, sizeof(identify_frame)); + identify_frame.dev_type = SAS_END_DEVICE; + identify_frame.frame_type = 0; + identify_frame._un1 = 1; + identify_frame.initiator_bits = SAS_PROTOCOL_ALL; + identify_frame.target_bits = SAS_PROTOCOL_NONE; + memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + identify_frame.phy_id = phy_no; + identify_buffer = (u32 *)(&identify_frame); + + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, + __swab32(identify_buffer[0])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, + __swab32(identify_buffer[1])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, + __swab32(identify_buffer[2])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, + __swab32(identify_buffer[3])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, + __swab32(identify_buffer[4])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, + __swab32(identify_buffer[5])); +} + +static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + struct domain_device *device = sas_dev->sas_device; + struct device *dev = hisi_hba->dev; + u64 qw0, device_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + struct domain_device *parent_dev = device->parent; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + u64 sas_addr; + + memset(itct, 0, sizeof(*itct)); + + /* qw0 */ + qw0 = 0; + switch (sas_dev->dev_type) { + case SAS_END_DEVICE: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; + break; + case SAS_SATA_DEV: + case SAS_SATA_PENDING: + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; + else + qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; + break; + default: + dev_warn(dev, "setup itct: unsupported dev type (%d)\n", + sas_dev->dev_type); + } + + qw0 |= ((1 << ITCT_HDR_VALID_OFF) | + (device->linkrate << ITCT_HDR_MCR_OFF) | + (1 << ITCT_HDR_VLN_OFF) | + (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) | + (1 << ITCT_HDR_AWT_CONTINUE_OFF) | + (port->id << ITCT_HDR_PORT_ID_OFF)); + itct->qw0 = cpu_to_le64(qw0); + + /* qw1 */ + memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); + itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); + + /* qw2 */ + if (!dev_is_sata(device)) + itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | + (0x1ULL << ITCT_HDR_BITLT_OFF) | + (0x32ULL << ITCT_HDR_MCTLT_OFF) | + (0x1ULL << ITCT_HDR_RTOLT_OFF)); +} + +static int clear_itct_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + DECLARE_COMPLETION_ONSTACK(completion); + u64 dev_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; + u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + struct device *dev = hisi_hba->dev; + int i; + + sas_dev->completion = &completion; + + /* clear the itct interrupt state */ + if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + ENT_INT_SRC3_ITC_INT_MSK); + + /* need to set register twice to clear ITCT for v2 hw */ + for (i = 0; i < 2; i++) { + reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); + hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); + if (!wait_for_completion_timeout(sas_dev->completion, + HISI_SAS_CLEAR_ITCT_TIMEOUT)) { + dev_warn(dev, "failed to clear ITCT\n"); + return -ETIMEDOUT; + } + + memset(itct, 0, sizeof(struct hisi_sas_itct)); + } + return 0; +} + +static void free_device_v2_hw(struct hisi_sas_device *sas_dev) +{ + struct hisi_hba *hisi_hba = sas_dev->hisi_hba; + + /* SoC bug workaround */ + if (dev_is_sata(sas_dev->sas_device)) + clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); +} + +static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) +{ + int i, reset_val; + u32 val; + unsigned long end_time; + struct device *dev = hisi_hba->dev; + + /* The mask needs to be set depending on the number of phys */ + if (hisi_hba->n_phy == 9) + reset_val = 0x1fffff; + else + reset_val = 0x7ffff; + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); + + /* Disable all of the PHYs */ + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG); + + phy_cfg &= ~PHY_CTRL_RESET_MSK; + hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg); + } + udelay(50); + + /* Ensure DMA tx & rx idle */ + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 dma_tx_status, dma_rx_status; + + end_time = jiffies + msecs_to_jiffies(1000); + + while (1) { + dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_TX_STATUS); + dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_RX_STATUS); + + if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && + !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + } + + /* Ensure axi bus idle */ + end_time = jiffies + msecs_to_jiffies(1000); + while (1) { + u32 axi_status = + hisi_sas_read32(hisi_hba, AXI_CFG); + + if (axi_status == 0) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + + if (ACPI_HANDLE(dev)) { + acpi_status s; + + s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + } else if (hisi_hba->ctrl) { + /* reset and disable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (reset_val != (val & reset_val)) { + dev_err(dev, "SAS reset fail.\n"); + return -EIO; + } + + /* De-reset and enable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, + &val); + if (val & reset_val) { + dev_err(dev, "SAS de-reset fail.\n"); + return -EIO; + } + } else { + dev_err(dev, "no reset method\n"); + return -EINVAL; + } + + return 0; +} + +/* This function needs to be called after resetting SAS controller. */ +static void phys_reject_stp_links_v2_hw(struct hisi_hba *hisi_hba) +{ + u32 cfg; + int phy_no; + + hisi_hba->reject_stp_links_msk = (1 << hisi_hba->n_phy) - 1; + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL); + if (!(cfg & CON_CONTROL_CFG_OPEN_ACC_STP_MSK)) + continue; + + cfg &= ~CON_CONTROL_CFG_OPEN_ACC_STP_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg); + } +} + +static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba) +{ + int phy_no; + u32 dma_tx_dfx1; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + if (!(hisi_hba->reject_stp_links_msk & BIT(phy_no))) + continue; + + dma_tx_dfx1 = hisi_sas_phy_read32(hisi_hba, phy_no, + DMA_TX_DFX1); + if (dma_tx_dfx1 & DMA_TX_DFX1_IPTT_MSK) { + u32 cfg = hisi_sas_phy_read32(hisi_hba, + phy_no, CON_CONTROL); + + cfg |= CON_CONTROL_CFG_OPEN_ACC_STP_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, + CON_CONTROL, cfg); + clear_bit(phy_no, &hisi_hba->reject_stp_links_msk); + } + } +} + +static const struct signal_attenuation_s x6000 = {9200, 0, 10476}; +static const struct sig_atten_lu_s sig_atten_lu[] = { + { &x6000, 0x3016a68 }, +}; + +static void init_reg_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + u32 sas_phy_ctrl = 0x30b9908; + u32 signal[3]; + int i; + + /* Global registers init */ + + /* Deal with am-max-transmissions quirk */ + if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) { + hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); + hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, + 0x2020); + } /* Else, use defaults -> do nothing */ + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); + hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); + hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x0); + hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); + hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); + hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); + hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); + hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); + hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); + hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x60); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x3); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); + + hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); + hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + + /* Get sas_phy_ctrl value to deal with TX FFE issue. */ + if (!device_property_read_u32_array(dev, "hisilicon,signal-attenuation", + signal, ARRAY_SIZE(signal))) { + for (i = 0; i < ARRAY_SIZE(sig_atten_lu); i++) { + const struct sig_atten_lu_s *lookup = &sig_atten_lu[i]; + const struct signal_attenuation_s *att = lookup->att; + + if ((signal[0] == att->de_emphasis) && + (signal[1] == att->preshoot) && + (signal[2] == att->boost)) { + sas_phy_ctrl = lookup->sas_phy_ctrl; + break; + } + } + + if (i == ARRAY_SIZE(sig_atten_lu)) + dev_warn(dev, "unknown signal attenuation values, using default PHY ctrl config\n"); + } + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + u32 prog_phy_link_rate = 0x800; + + if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < + SAS_LINK_RATE_1_5_GBPS)) { + prog_phy_link_rate = 0x855; + } else { + enum sas_linkrate max = sas_phy->phy->maximum_linkrate; + + prog_phy_link_rate = + hisi_sas_get_prog_phy_linkrate_mask(max) | + 0x800; + } + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, + prog_phy_link_rate); + hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, sas_phy_ctrl); + hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); + hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); + hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x8); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); + hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe); + hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc); + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); + if (hisi_hba->refclk_frequency_mhz == 66) + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); + /* else, do nothing -> leave it how you found it */ + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + /* Delivery queue */ + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + + /* Completion queue */ + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + } + + /* itct */ + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, + lower_32_bits(hisi_hba->itct_dma)); + + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, + upper_32_bits(hisi_hba->itct_dma)); + + /* iost */ + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, + lower_32_bits(hisi_hba->iost_dma)); + + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, + upper_32_bits(hisi_hba->iost_dma)); + + /* breakpoint */ + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->breakpoint_dma)); + + /* SATA broken msg */ + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->sata_breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->sata_breakpoint_dma)); + + /* SATA initial fis */ + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, + lower_32_bits(hisi_hba->initial_fis_dma)); + + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, + upper_32_bits(hisi_hba->initial_fis_dma)); +} + +static void link_timeout_enable_link(struct timer_list *t) +{ + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); + int i, reg_val; + + for (i = 0; i < hisi_hba->n_phy; i++) { + if (hisi_hba->reject_stp_links_msk & BIT(i)) + continue; + + reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL); + if (!(reg_val & BIT(0))) { + hisi_sas_phy_write32(hisi_hba, i, + CON_CONTROL, 0x7); + break; + } + } + + hisi_hba->timer.function = link_timeout_disable_link; + mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); +} + +static void link_timeout_disable_link(struct timer_list *t) +{ + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); + int i, reg_val; + + reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); + for (i = 0; i < hisi_hba->n_phy && reg_val; i++) { + if (hisi_hba->reject_stp_links_msk & BIT(i)) + continue; + + if (reg_val & BIT(i)) { + hisi_sas_phy_write32(hisi_hba, i, + CON_CONTROL, 0x6); + break; + } + } + + hisi_hba->timer.function = link_timeout_enable_link; + mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); +} + +static void set_link_timer_quirk(struct hisi_hba *hisi_hba) +{ + hisi_hba->timer.function = link_timeout_disable_link; + hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); + add_timer(&hisi_hba->timer); +} + +static int hw_init_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int rc; + + rc = reset_hw_v2_hw(hisi_hba); + if (rc) { + dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); + return rc; + } + + msleep(100); + init_reg_v2_hw(hisi_hba); + + return 0; +} + +static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg |= PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static bool is_sata_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 context; + + context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); + if (context & (1 << phy_no)) + return true; + + return false; +} + +static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 dfx_val; + + dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); + + if (dfx_val & BIT(16)) + return false; + + return true; +} + +static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + int i, max_loop = 1000; + struct device *dev = hisi_hba->dev; + u32 status, axi_status, dfx_val, dfx_tx_val; + + for (i = 0; i < max_loop; i++) { + status = hisi_sas_read32_relaxed(hisi_hba, + AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); + + axi_status = hisi_sas_read32(hisi_hba, AXI_CFG); + dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); + dfx_tx_val = hisi_sas_phy_read32(hisi_hba, + phy_no, DMA_TX_FIFO_DFX0); + + if ((status == 0x3) && (axi_status == 0x0) && + (dfx_val & BIT(20)) && (dfx_tx_val & BIT(10))) + return true; + udelay(10); + } + dev_err(dev, "bus is not idle phy%d, axi150:0x%x axi100:0x%x port204:0x%x port240:0x%x\n", + phy_no, status, axi_status, + dfx_val, dfx_tx_val); + return false; +} + +static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + int i, max_loop = 1000; + struct device *dev = hisi_hba->dev; + u32 status, tx_dfx0; + + for (i = 0; i < max_loop; i++) { + status = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); + status = (status & 0x3fc0) >> 6; + + if (status != 0x1) + return true; + + tx_dfx0 = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX0); + if ((tx_dfx0 & 0x1ff) == 0x2) + return true; + udelay(10); + } + dev_err(dev, "IO not done phy%d, port264:0x%x port200:0x%x\n", + phy_no, status, tx_dfx0); + return false; +} + +static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + if (tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) + return true; + + if (!axi_bus_is_idle_v2_hw(hisi_hba, phy_no)) + return false; + + if (!wait_io_done_v2_hw(hisi_hba, phy_no)) + return false; + + return true; +} + + +static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg, axi_val, dfx0_val, txid_auto; + struct device *dev = hisi_hba->dev; + + /* Close axi bus. */ + axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + axi_val |= 0x1; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, axi_val); + + if (is_sata_phy_v2_hw(hisi_hba, phy_no)) { + if (allowed_disable_phy_v2_hw(hisi_hba, phy_no)) + goto do_disable; + + /* Reset host controller. */ + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + return; + } + + dfx0_val = hisi_sas_phy_read32(hisi_hba, phy_no, PORT_DFX0); + dfx0_val = (dfx0_val & 0x1fc0) >> 6; + if (dfx0_val != 0x4) + goto do_disable; + + if (!tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) { + dev_warn(dev, "phy%d, wait tx fifo need send break\n", + phy_no); + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, + TXID_AUTO); + txid_auto |= TXID_AUTO_CTB_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto); + } + +do_disable: + cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + cfg &= ~PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); + + /* Open axi bus. */ + axi_val &= ~0x1; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, axi_val); +} + +static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + config_id_frame_v2_hw(hisi_hba, phy_no); + config_phy_opt_mode_v2_hw(hisi_hba, phy_no); + enable_phy_v2_hw(hisi_hba, phy_no); +} + +static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + u32 txid_auto; + + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + if (phy->identify.device_type == SAS_END_DEVICE) { + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto | TX_HARDRST_MSK); + } + msleep(100); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); +} + +static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + u32 err4_reg_val, err6_reg_val; + + /* loss dword syn, phy reset problem */ + err4_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT4_REG); + + /* disparity err, invalid dword */ + err6_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT6_REG); + + sphy->loss_of_dword_sync_count += (err4_reg_val >> 16) & 0xFFFF; + sphy->phy_reset_problem_count += err4_reg_val & 0xFFFF; + sphy->invalid_dword_count += (err6_reg_val & 0xFF0000) >> 16; + sphy->running_disparity_error_count += err6_reg_val & 0xFF; +} + +static void phys_init_v2_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (!sas_phy->phy->enabled) + continue; + + hisi_sas_phy_enable(hisi_hba, i, 1); + } +} + +static void sl_notify_ssp_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 sl_control; + + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control |= SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); + msleep(1); + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); +} + +static enum sas_linkrate phy_get_max_linkrate_v2_hw(void) +{ + return SAS_LINK_RATE_12_0_GBPS; +} + +static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + enum sas_linkrate max = r->maximum_linkrate; + u32 prog_phy_link_rate = 0x800; + + prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, + prog_phy_link_rate); +} + +static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) +{ + int i, bitmap = 0; + u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + + for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++) + if (phy_state & 1 << i) + if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + bitmap |= 1 << i; + + if (hisi_hba->n_phy == 9) { + u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + + if (phy_state & 1 << 8) + if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> + PORT_STATE_PHY8_PORT_NUM_OFF) == port_id) + bitmap |= 1 << 9; + } + + return bitmap; +} + +/* DQ lock must be taken here */ +static void start_delivery_v2_hw(struct hisi_sas_dq *dq) +{ + struct hisi_hba *hisi_hba = dq->hisi_hba; + struct hisi_sas_slot *s, *s1, *s2 = NULL; + int dlvry_queue = dq->id; + int wp; + + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; + s2 = s; + list_del(&s->delivery); + } + + if (!s2) + return; + + /* + * Ensure that memories for slots built on other CPUs is observed. + */ + smp_rmb(); + wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); +} + +static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); + struct scatterlist *sg; + int i; + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &sge_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); + + hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); +} + +static void prep_smp_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_port *port = slot->port; + struct scatterlist *sg_req; + struct hisi_sas_device *sas_dev = device->lldd_dev; + dma_addr_t req_dma_addr; + unsigned int req_len; + + /* req */ + sg_req = &task->smp_task.smp_req; + req_dma_addr = sg_dma_address(sg_req); + req_len = sg_dma_len(&task->smp_task.smp_req); + + /* create header */ + /* dw0 */ + hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | + (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ + (2 << CMD_HDR_CMD_OFF)); /* smp */ + + /* map itct entry */ + hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | + (1 << CMD_HDR_FRAME_TYPE_OFF) | + (DIR_NO_DATA << CMD_HDR_DIR_OFF)); + + /* dw2 */ + hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ / 4 << + CMD_HDR_MRFL_OFF)); + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); +} + +static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port = slot->port; + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + struct sas_tmf_task *tmf = slot->tmf; + int has_data = 0, priority = !!tmf; + u8 *buf_cmd; + u32 dw1 = 0, dw2 = 0; + + hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | + (2 << CMD_HDR_TLR_CTRL_OFF) | + (port->id << CMD_HDR_PORT_OFF) | + (priority << CMD_HDR_PRIORITY_OFF) | + (1 << CMD_HDR_CMD_OFF)); /* ssp */ + + dw1 = 1 << CMD_HDR_VDTL_OFF; + if (tmf) { + dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; + } else { + dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; + switch (scsi_cmnd->sc_data_direction) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + } + + /* map itct entry */ + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + hdr->dw1 = cpu_to_le32(dw1); + + dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + + 3) / 4) << CMD_HDR_CFL_OFF) | + ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | + (2 << CMD_HDR_SG_MOD_OFF); + hdr->dw2 = cpu_to_le32(dw2); + + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) + prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); + + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr); + + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + if (!tmf) { + buf_cmd[9] = task->ssp_task.task_attr; + memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + } else { + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } +} + +#define TRANS_TX_ERR 0 +#define TRANS_RX_ERR 1 +#define DMA_TX_ERR 2 +#define SIPC_RX_ERR 3 +#define DMA_RX_ERR 4 + +#define DMA_TX_ERR_OFF 0 +#define DMA_TX_ERR_MSK (0xffff << DMA_TX_ERR_OFF) +#define SIPC_RX_ERR_OFF 16 +#define SIPC_RX_ERR_MSK (0xffff << SIPC_RX_ERR_OFF) + +static int parse_trans_tx_err_code_v2_hw(u32 err_msk) +{ + static const u8 trans_tx_err_code_prio[] = { + TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, + TRANS_TX_ERR_PHY_NOT_ENABLE, + TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, + TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, + TRANS_TX_OPEN_CNX_ERR_BY_OTHER, + RESERVED0, + TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, + TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, + TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, + TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, + TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, + TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, + TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, + TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, + TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, + TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, + TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, + TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, + TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, + TRANS_TX_ERR_WITH_CLOSE_COMINIT, + TRANS_TX_ERR_WITH_BREAK_TIMEOUT, + TRANS_TX_ERR_WITH_BREAK_REQUEST, + TRANS_TX_ERR_WITH_BREAK_RECEVIED, + TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, + TRANS_TX_ERR_WITH_CLOSE_NORMAL, + TRANS_TX_ERR_WITH_NAK_RECEVIED, + TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, + TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, + TRANS_TX_ERR_WITH_IPTT_CONFLICT, + TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, + TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(trans_tx_err_code_prio); i++) { + index = trans_tx_err_code_prio[i] - TRANS_TX_FAIL_BASE; + if (err_msk & (1 << index)) + return trans_tx_err_code_prio[i]; + } + return -1; +} + +static int parse_trans_rx_err_code_v2_hw(u32 err_msk) +{ + static const u8 trans_rx_err_code_prio[] = { + TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, + TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, + TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, + TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, + TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, + TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, + TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, + TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, + TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, + TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, + TRANS_RX_ERR_WITH_CLOSE_COMINIT, + TRANS_RX_ERR_WITH_BREAK_TIMEOUT, + TRANS_RX_ERR_WITH_BREAK_REQUEST, + TRANS_RX_ERR_WITH_BREAK_RECEVIED, + RESERVED1, + TRANS_RX_ERR_WITH_CLOSE_NORMAL, + TRANS_RX_ERR_WITH_DATA_LEN0, + TRANS_RX_ERR_WITH_BAD_HASH, + TRANS_RX_XRDY_WLEN_ZERO_ERR, + TRANS_RX_SSP_FRM_LEN_ERR, + RESERVED2, + RESERVED3, + RESERVED4, + RESERVED5, + TRANS_RX_ERR_WITH_BAD_FRM_TYPE, + TRANS_RX_SMP_FRM_LEN_ERR, + TRANS_RX_SMP_RESP_TIMEOUT_ERR, + RESERVED6, + RESERVED7, + RESERVED8, + RESERVED9, + TRANS_RX_R_ERR, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(trans_rx_err_code_prio); i++) { + index = trans_rx_err_code_prio[i] - TRANS_RX_FAIL_BASE; + if (err_msk & (1 << index)) + return trans_rx_err_code_prio[i]; + } + return -1; +} + +static int parse_dma_tx_err_code_v2_hw(u32 err_msk) +{ + static const u8 dma_tx_err_code_prio[] = { + DMA_TX_UNEXP_XFER_ERR, + DMA_TX_UNEXP_RETRANS_ERR, + DMA_TX_XFER_LEN_OVERFLOW, + DMA_TX_XFER_OFFSET_ERR, + DMA_TX_RAM_ECC_ERR, + DMA_TX_DIF_LEN_ALIGN_ERR, + DMA_TX_DIF_CRC_ERR, + DMA_TX_DIF_APP_ERR, + DMA_TX_DIF_RPP_ERR, + DMA_TX_DATA_SGL_OVERFLOW, + DMA_TX_DIF_SGL_OVERFLOW, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(dma_tx_err_code_prio); i++) { + index = dma_tx_err_code_prio[i] - DMA_TX_ERR_BASE; + err_msk = err_msk & DMA_TX_ERR_MSK; + if (err_msk & (1 << index)) + return dma_tx_err_code_prio[i]; + } + return -1; +} + +static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) +{ + static const u8 sipc_rx_err_code_prio[] = { + SIPC_RX_FIS_STATUS_ERR_BIT_VLD, + SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, + SIPC_RX_FIS_STATUS_BSY_BIT_ERR, + SIPC_RX_WRSETUP_LEN_ODD_ERR, + SIPC_RX_WRSETUP_LEN_ZERO_ERR, + SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, + SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, + SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, + SIPC_RX_SATA_UNEXP_FIS_ERR, + SIPC_RX_WRSETUP_ESTATUS_ERR, + SIPC_RX_DATA_UNDERFLOW_ERR, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(sipc_rx_err_code_prio); i++) { + index = sipc_rx_err_code_prio[i] - SIPC_RX_ERR_BASE; + err_msk = err_msk & SIPC_RX_ERR_MSK; + if (err_msk & (1 << (index + 0x10))) + return sipc_rx_err_code_prio[i]; + } + return -1; +} + +static int parse_dma_rx_err_code_v2_hw(u32 err_msk) +{ + static const u8 dma_rx_err_code_prio[] = { + DMA_RX_UNKNOWN_FRM_ERR, + DMA_RX_DATA_LEN_OVERFLOW, + DMA_RX_DATA_LEN_UNDERFLOW, + DMA_RX_DATA_OFFSET_ERR, + RESERVED10, + DMA_RX_SATA_FRAME_TYPE_ERR, + DMA_RX_RESP_BUF_OVERFLOW, + DMA_RX_UNEXP_RETRANS_RESP_ERR, + DMA_RX_UNEXP_NORM_RESP_ERR, + DMA_RX_UNEXP_RDFRAME_ERR, + DMA_RX_PIO_DATA_LEN_ERR, + DMA_RX_RDSETUP_STATUS_ERR, + DMA_RX_RDSETUP_STATUS_DRQ_ERR, + DMA_RX_RDSETUP_STATUS_BSY_ERR, + DMA_RX_RDSETUP_LEN_ODD_ERR, + DMA_RX_RDSETUP_LEN_ZERO_ERR, + DMA_RX_RDSETUP_LEN_OVER_ERR, + DMA_RX_RDSETUP_OFFSET_ERR, + DMA_RX_RDSETUP_ACTIVE_ERR, + DMA_RX_RDSETUP_ESTATUS_ERR, + DMA_RX_RAM_ECC_ERR, + DMA_RX_DIF_CRC_ERR, + DMA_RX_DIF_APP_ERR, + DMA_RX_DIF_RPP_ERR, + DMA_RX_DATA_SGL_OVERFLOW, + DMA_RX_DIF_SGL_OVERFLOW, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(dma_rx_err_code_prio); i++) { + index = dma_rx_err_code_prio[i] - DMA_RX_ERR_BASE; + if (err_msk & (1 << index)) + return dma_rx_err_code_prio[i]; + } + return -1; +} + +/* by default, task resp is complete */ +static void slot_err_v2_hw(struct hisi_hba *hisi_hba, + struct sas_task *task, + struct hisi_sas_slot *slot, + int err_phase) +{ + struct task_status_struct *ts = &task->task_status; + struct hisi_sas_err_record_v2 *err_record = + hisi_sas_status_buf_addr_mem(slot); + u32 trans_tx_fail_type = le32_to_cpu(err_record->trans_tx_fail_type); + u32 trans_rx_fail_type = le32_to_cpu(err_record->trans_rx_fail_type); + u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type); + u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type); + u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type); + struct hisi_sas_complete_v2_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v2_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + u32 dw0 = le32_to_cpu(complete_hdr->dw0); + int error = -1; + + if (err_phase == 1) { + /* error in TX phase, the priority of error is: DW2 > DW0 */ + error = parse_dma_tx_err_code_v2_hw(dma_tx_err_type); + if (error == -1) + error = parse_trans_tx_err_code_v2_hw( + trans_tx_fail_type); + } else if (err_phase == 2) { + /* error in RX phase, the priority is: DW1 > DW3 > DW2 */ + error = parse_trans_rx_err_code_v2_hw(trans_rx_fail_type); + if (error == -1) { + error = parse_dma_rx_err_code_v2_hw( + dma_rx_err_type); + if (error == -1) + error = parse_sipc_rx_err_code_v2_hw( + sipc_rx_err_type); + } + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + switch (error) { + case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_NO_DEST; + break; + } + case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + } + case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + } + case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + } + case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + } + case DMA_RX_UNEXP_NORM_RESP_ERR: + case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: + case DMA_RX_RESP_BUF_OVERFLOW: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + } + case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: + { + /* not sure */ + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + case DMA_RX_DATA_LEN_OVERFLOW: + { + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + } + case DMA_RX_DATA_LEN_UNDERFLOW: + { + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; + break; + } + case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: + case TRANS_TX_ERR_PHY_NOT_ENABLE: + case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: + case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: + case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: + case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: + case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: + case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: + case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_TX_ERR_WITH_BREAK_REQUEST: + case TRANS_TX_ERR_WITH_BREAK_RECEVIED: + case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_NORMAL: + case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: + case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_COMINIT: + case TRANS_TX_ERR_WITH_NAK_RECEVIED: + case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: + case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: + case TRANS_TX_ERR_WITH_IPTT_CONFLICT: + case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: + case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: + case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: + case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: + case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_RX_ERR_WITH_BREAK_REQUEST: + case TRANS_RX_ERR_WITH_BREAK_RECEVIED: + case TRANS_RX_ERR_WITH_CLOSE_NORMAL: + case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_RX_ERR_WITH_CLOSE_COMINIT: + case TRANS_TX_ERR_FRAME_TXED: + case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: + case TRANS_RX_ERR_WITH_DATA_LEN0: + case TRANS_RX_ERR_WITH_BAD_HASH: + case TRANS_RX_XRDY_WLEN_ZERO_ERR: + case TRANS_RX_SSP_FRM_LEN_ERR: + case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: + case DMA_TX_DATA_SGL_OVERFLOW: + case DMA_TX_UNEXP_XFER_ERR: + case DMA_TX_UNEXP_RETRANS_ERR: + case DMA_TX_XFER_LEN_OVERFLOW: + case DMA_TX_XFER_OFFSET_ERR: + case SIPC_RX_DATA_UNDERFLOW_ERR: + case DMA_RX_DATA_SGL_OVERFLOW: + case DMA_RX_DATA_OFFSET_ERR: + case DMA_RX_RDSETUP_LEN_ODD_ERR: + case DMA_RX_RDSETUP_LEN_ZERO_ERR: + case DMA_RX_RDSETUP_LEN_OVER_ERR: + case DMA_RX_SATA_FRAME_TYPE_ERR: + case DMA_RX_UNKNOWN_FRM_ERR: + { + /* This will request a retry */ + ts->stat = SAS_QUEUE_FULL; + slot->abort = 1; + break; + } + default: + break; + } + } + break; + case SAS_PROTOCOL_SMP: + ts->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + switch (error) { + case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_NO_DEST; + break; + } + case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: + { + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + } + case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + } + case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + } + case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + } + case DMA_RX_RESP_BUF_OVERFLOW: + case DMA_RX_UNEXP_NORM_RESP_ERR: + case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + } + case DMA_RX_DATA_LEN_OVERFLOW: + { + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + } + case DMA_RX_DATA_LEN_UNDERFLOW: + { + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; + break; + } + case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: + case TRANS_TX_ERR_PHY_NOT_ENABLE: + case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: + case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: + case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: + case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: + case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: + case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: + case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_TX_ERR_WITH_BREAK_REQUEST: + case TRANS_TX_ERR_WITH_BREAK_RECEVIED: + case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_NORMAL: + case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: + case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_COMINIT: + case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: + case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: + case TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS: + case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: + case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: + case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: + case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: + case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: + case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: + case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: + case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: + case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_RX_ERR_WITH_BREAK_REQUEST: + case TRANS_RX_ERR_WITH_BREAK_RECEVIED: + case TRANS_RX_ERR_WITH_CLOSE_NORMAL: + case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: + case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_RX_ERR_WITH_CLOSE_COMINIT: + case TRANS_RX_ERR_WITH_DATA_LEN0: + case TRANS_RX_ERR_WITH_BAD_HASH: + case TRANS_RX_XRDY_WLEN_ZERO_ERR: + case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: + case DMA_TX_DATA_SGL_OVERFLOW: + case DMA_TX_UNEXP_XFER_ERR: + case DMA_TX_UNEXP_RETRANS_ERR: + case DMA_TX_XFER_LEN_OVERFLOW: + case DMA_TX_XFER_OFFSET_ERR: + case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: + case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: + case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: + case SIPC_RX_WRSETUP_LEN_ODD_ERR: + case SIPC_RX_WRSETUP_LEN_ZERO_ERR: + case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: + case SIPC_RX_SATA_UNEXP_FIS_ERR: + case DMA_RX_DATA_SGL_OVERFLOW: + case DMA_RX_DATA_OFFSET_ERR: + case DMA_RX_SATA_FRAME_TYPE_ERR: + case DMA_RX_UNEXP_RDFRAME_ERR: + case DMA_RX_PIO_DATA_LEN_ERR: + case DMA_RX_RDSETUP_STATUS_ERR: + case DMA_RX_RDSETUP_STATUS_DRQ_ERR: + case DMA_RX_RDSETUP_STATUS_BSY_ERR: + case DMA_RX_RDSETUP_LEN_ODD_ERR: + case DMA_RX_RDSETUP_LEN_ZERO_ERR: + case DMA_RX_RDSETUP_LEN_OVER_ERR: + case DMA_RX_RDSETUP_OFFSET_ERR: + case DMA_RX_RDSETUP_ACTIVE_ERR: + case DMA_RX_RDSETUP_ESTATUS_ERR: + case DMA_RX_UNKNOWN_FRM_ERR: + case TRANS_RX_SSP_FRM_LEN_ERR: + case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: + { + slot->abort = 1; + ts->stat = SAS_PHY_DOWN; + break; + } + default: + { + ts->stat = SAS_PROTO_RESPONSE; + break; + } + } + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); + } + break; + default: + break; + } +} + +static void slot_complete_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_device *sas_dev; + struct device *dev = hisi_hba->dev; + struct task_status_struct *ts; + struct domain_device *device; + struct sas_ha_struct *ha; + struct hisi_sas_complete_v2_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v2_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + unsigned long flags; + bool is_internal = slot->is_internal; + u32 dw0; + + if (unlikely(!task || !task->lldd_task || !task->dev)) + return; + + ts = &task->task_status; + device = task->dev; + ha = device->port->ha; + sas_dev = device->lldd_dev; + + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + memset(ts, 0, sizeof(*ts)); + ts->resp = SAS_TASK_COMPLETE; + + if (unlikely(!sas_dev)) { + dev_dbg(dev, "slot complete: port has no device\n"); + ts->stat = SAS_PHY_DOWN; + goto out; + } + + /* Use SAS+TMF status codes */ + dw0 = le32_to_cpu(complete_hdr->dw0); + switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> + CMPLT_HDR_ABORT_STAT_OFF) { + case STAT_IO_ABORTED: + /* this io has been aborted by abort command */ + ts->stat = SAS_ABORTED_TASK; + goto out; + case STAT_IO_COMPLETE: + /* internal abort command complete */ + ts->stat = TMF_RESP_FUNC_SUCC; + del_timer_sync(&slot->internal_abort_timer); + goto out; + case STAT_IO_NO_DEVICE: + ts->stat = TMF_RESP_FUNC_COMPLETE; + del_timer_sync(&slot->internal_abort_timer); + goto out; + case STAT_IO_NOT_VALID: + /* abort single io, controller don't find + * the io need to abort + */ + ts->stat = TMF_RESP_FUNC_FAILED; + del_timer_sync(&slot->internal_abort_timer); + goto out; + default: + break; + } + + if ((dw0 & CMPLT_HDR_ERX_MSK) && (!(dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { + u32 err_phase = (dw0 & CMPLT_HDR_ERR_PHASE_MSK) + >> CMPLT_HDR_ERR_PHASE_OFF; + u32 *error_info = hisi_sas_status_buf_addr_mem(slot); + + /* Analyse error happens on which phase TX or RX */ + if (ERR_ON_TX_PHASE(err_phase)) + slot_err_v2_hw(hisi_hba, task, slot, 1); + else if (ERR_ON_RX_PHASE(err_phase)) + slot_err_v2_hw(hisi_hba, task, slot, 2); + + if (ts->stat != SAS_DATA_UNDERRUN) + dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", + slot->idx, task, sas_dev->device_id, + complete_hdr->dw0, complete_hdr->dw1, + complete_hdr->act, complete_hdr->dw3, + error_info[0], error_info[1], + error_info[2], error_info[3]); + + if (unlikely(slot->abort)) { + if (dev_is_sata(device) && task->ata_task.use_ncq) + sas_ata_device_link_abort(device, true); + else + sas_task_abort(task); + + return; + } + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + struct hisi_sas_status_buffer *status_buffer = + hisi_sas_status_buf_addr_mem(slot); + struct ssp_response_iu *iu = (struct ssp_response_iu *) + &status_buffer->iu[0]; + + sas_ssp_task_response(dev, task, iu); + break; + } + case SAS_PROTOCOL_SMP: + { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + void *to = page_address(sg_page(sg_resp)); + + ts->stat = SAS_SAM_STAT_GOOD; + + memcpy(to + sg_resp->offset, + hisi_sas_status_buf_addr_mem(slot) + + sizeof(struct hisi_sas_err_record), + sg_resp->length); + break; + } + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + ts->stat = SAS_SAM_STAT_GOOD; + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); + break; + } + default: + ts->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + } + + if (!slot->port->port_attached) { + dev_warn(dev, "slot complete: port %d has removed\n", + slot->port->sas_port.id); + ts->stat = SAS_PHY_DOWN; + } + +out: + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + dev_info(dev, "slot complete: task(%pK) aborted\n", task); + return; + } + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); + + if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { + spin_lock_irqsave(&device->done_lock, flags); + if (test_bit(SAS_HA_FROZEN, &ha->state)) { + spin_unlock_irqrestore(&device->done_lock, flags); + dev_info(dev, "slot complete: task(%pK) ignored\n", + task); + return; + } + spin_unlock_irqrestore(&device->done_lock, flags); + } + + if (task->task_done) + task->task_done(task); +} + +static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct domain_device *device = task->dev; + struct domain_device *parent_dev = device->parent; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + struct sas_ata_task *ata_task = &task->ata_task; + struct sas_tmf_task *tmf = slot->tmf; + u8 *buf_cmd; + int has_data = 0, hdr_tag = 0; + u32 dw0, dw1 = 0, dw2 = 0; + + /* create header */ + /* dw0 */ + dw0 = port->id << CMD_HDR_PORT_OFF; + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + dw0 |= 3 << CMD_HDR_CMD_OFF; + else + dw0 |= 4 << CMD_HDR_CMD_OFF; + + if (tmf && ata_task->force_phy) { + dw0 |= CMD_HDR_FORCE_PHY_MSK; + dw0 |= (1 << ata_task->force_phy_id) << CMD_HDR_PHY_ID_OFF; + } + + hdr->dw0 = cpu_to_le32(dw0); + + /* dw1 */ + switch (task->data_dir) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + + if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && + (task->ata_task.fis.control & ATA_SRST)) + dw1 |= 1 << CMD_HDR_RESET_OFF; + + dw1 |= (hisi_sas_get_ata_protocol( + &task->ata_task.fis, task->data_dir)) + << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + hdr->dw1 = cpu_to_le32(dw1); + + /* dw2 */ + if (task->ata_task.use_ncq) { + struct ata_queued_cmd *qc = task->uldd_task; + + hdr_tag = qc->tag; + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; + } + + dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | + 2 << CMD_HDR_SG_MOD_OFF; + hdr->dw2 = cpu_to_le32(dw2); + + /* dw3 */ + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) + prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); + + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); + + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); +} + +static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t) +{ + struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer); + struct hisi_sas_port *port = slot->port; + struct asd_sas_port *asd_sas_port; + struct asd_sas_phy *sas_phy; + + if (!port) + return; + + asd_sas_port = &port->sas_port; + + /* Kick the hardware - send break command */ + list_for_each_entry(sas_phy, &asd_sas_port->phy_list, port_phy_el) { + struct hisi_sas_phy *phy = sas_phy->lldd_phy; + struct hisi_hba *hisi_hba = phy->hisi_hba; + int phy_no = sas_phy->id; + u32 link_dfx2; + + link_dfx2 = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); + if ((link_dfx2 == LINK_DFX2_RCVR_HOLD_STS_MSK) || + (link_dfx2 & LINK_DFX2_SEND_HOLD_STS_MSK)) { + u32 txid_auto; + + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, + TXID_AUTO); + txid_auto |= TXID_AUTO_CTB_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto); + return; + } + } +} + +static void prep_abort_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct sas_internal_abort_task *abort = &task->abort_task; + struct domain_device *dev = task->dev; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct hisi_sas_port *port = slot->port; + struct timer_list *timer = &slot->internal_abort_timer; + struct hisi_sas_device *sas_dev = dev->lldd_dev; + + /* setup the quirk timer */ + timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0); + /* Set the timeout to 10ms less than internal abort timeout */ + mod_timer(timer, jiffies + msecs_to_jiffies(100)); + + /* dw0 */ + hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ + (port->id << CMD_HDR_PORT_OFF) | + (dev_is_sata(dev) << + CMD_HDR_ABORT_DEVICE_TYPE_OFF) | + (abort->type << CMD_HDR_ABORT_FLAG_OFF)); + + /* dw1 */ + hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEV_ID_OFF); + + /* dw7 */ + hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF); + hdr->transfer_tags = cpu_to_le32(slot->idx); +} + +static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + int i, res = IRQ_HANDLED; + u32 port_id, link_rate; + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = hisi_hba->dev; + u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; + struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; + + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); + + if (is_sata_phy_v2_hw(hisi_hba, phy_no)) + goto end; + + del_timer(&phy->timer); + + if (phy_no == 8) { + u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + + port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> + PORT_STATE_PHY8_PORT_NUM_OFF; + link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> + PORT_STATE_PHY8_CONN_RATE_OFF; + } else { + port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + port_id = (port_id >> (4 * phy_no)) & 0xf; + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + } + + if (port_id == 0xf) { + dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + + for (i = 0; i < 6; i++) { + u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, + RX_IDAF_DWORD0 + (i * 4)); + frame_rcvd[i] = __swab32(idaf); + } + + sas_phy->linkrate = link_rate; + sas_phy->oob_mode = SAS_OOB_MODE; + memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); + dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); + phy->port_id = port_id; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + phy->phy_type |= PORT_TYPE_SAS; + phy->phy_attached = 1; + phy->identify.device_type = id->dev_type; + phy->frame_rcvd_size = sizeof(struct sas_identify_frame); + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) { + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + if (!timer_pending(&hisi_hba->timer)) + set_link_timer_quirk(hisi_hba); + } + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); +end: + if (phy->reset_completion) + complete(phy->reset_completion); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_PHY_ENABLE_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); + + return res; +} + +static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba) +{ + u32 port_state; + + port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + if (port_state & 0x1ff) + return true; + + return false; +} + +static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + u32 phy_state, sl_ctrl, txid_auto; + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct hisi_sas_port *port = phy->port; + struct device *dev = hisi_hba->dev; + + del_timer(&phy->timer); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); + + phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); + hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0, + GFP_ATOMIC); + + sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, + sl_ctrl & ~SL_CONTROL_CTA_MSK); + if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) + if (!check_any_wideports_v2_hw(hisi_hba) && + timer_pending(&hisi_hba->timer)) + del_timer(&hisi_hba->timer); + + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto | TXID_AUTO_CT3_MSK); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_msk; + int phy_no = 0; + irqreturn_t res = IRQ_NONE; + + irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) + >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; + while (irq_msk) { + if (irq_msk & 1) { + u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + + switch (reg_value & (CHL_INT0_NOT_RDY_MSK | + CHL_INT0_SL_PHY_ENABLE_MSK)) { + + case CHL_INT0_SL_PHY_ENABLE_MSK: + /* phy up */ + if (phy_up_v2_hw(phy_no, hisi_hba) == + IRQ_HANDLED) + res = IRQ_HANDLED; + break; + + case CHL_INT0_NOT_RDY_MSK: + /* phy down */ + if (phy_down_v2_hw(phy_no, hisi_hba) == + IRQ_HANDLED) + res = IRQ_HANDLED; + break; + + case (CHL_INT0_NOT_RDY_MSK | + CHL_INT0_SL_PHY_ENABLE_MSK): + reg_value = hisi_sas_read32(hisi_hba, + PHY_STATE); + if (reg_value & BIT(phy_no)) { + /* phy up */ + if (phy_up_v2_hw(phy_no, hisi_hba) == + IRQ_HANDLED) + res = IRQ_HANDLED; + } else { + /* phy down */ + if (phy_down_v2_hw(phy_no, hisi_hba) == + IRQ_HANDLED) + res = IRQ_HANDLED; + } + break; + + default: + break; + } + + } + irq_msk >>= 1; + phy_no++; + } + + return res; +} + +static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + u32 bcast_status; + + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); + bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); + if (bcast_status & RX_BCAST_CHG_MSK) + hisi_sas_phy_bcast(phy); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_RX_BCST_ACK_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); +} + +static const struct hisi_sas_hw_error port_ecc_axi_error[] = { + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF), + .msg = "dmac_tx_ecc_bad_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF), + .msg = "dmac_rx_ecc_bad_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), + .msg = "dma_tx_axi_wr_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), + .msg = "dma_tx_axi_rd_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), + .msg = "dma_rx_axi_wr_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), + .msg = "dma_rx_axi_rd_err", + }, +}; + +static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + struct device *dev = hisi_hba->dev; + u32 ent_msk, ent_tmp, irq_msk; + int phy_no = 0; + + ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); + ent_tmp = ent_msk; + ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); + + irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> + HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; + + while (irq_msk) { + u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT1); + u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT2); + + if ((irq_msk & (1 << phy_no)) && irq_value1) { + int i; + + for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) { + const struct hisi_sas_hw_error *error = + &port_ecc_axi_error[i]; + + if (!(irq_value1 & error->irq_msk)) + continue; + + dev_warn(dev, "%s error (phy%d 0x%x) found!\n", + error->msg, phy_no, irq_value1); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT1, irq_value1); + } + + if ((irq_msk & (1 << phy_no)) && irq_value2) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + + if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { + dev_warn(dev, "phy%d identify timeout\n", + phy_no); + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); + } + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT2, irq_value2); + } + + if ((irq_msk & (1 << phy_no)) && irq_value0) { + if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) + phy_bcast_v2_hw(phy_no, hisi_hba); + + if (irq_value0 & CHL_INT0_PHY_RDY_MSK) + hisi_sas_phy_oob_ready(hisi_hba, phy_no); + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT0, irq_value0 + & (~CHL_INT0_HOTPLUG_TOUT_MSK) + & (~CHL_INT0_SL_PHY_ENABLE_MSK) + & (~CHL_INT0_NOT_RDY_MSK)); + } + irq_msk &= ~(1 << phy_no); + phy_no++; + } + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); + + return IRQ_HANDLED; +} + +static void +one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) +{ + struct device *dev = hisi_hba->dev; + const struct hisi_sas_hw_error *ecc_error; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(one_bit_ecc_errors); i++) { + ecc_error = &one_bit_ecc_errors[i]; + if (irq_value & ecc_error->irq_msk) { + val = hisi_sas_read32(hisi_hba, ecc_error->reg); + val &= ecc_error->msk; + val >>= ecc_error->shift; + dev_warn(dev, "%s found: mem addr is 0x%08X\n", + ecc_error->msg, val); + } + } +} + +static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, + u32 irq_value) +{ + struct device *dev = hisi_hba->dev; + const struct hisi_sas_hw_error *ecc_error; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { + ecc_error = &multi_bit_ecc_errors[i]; + if (irq_value & ecc_error->irq_msk) { + val = hisi_sas_read32(hisi_hba, ecc_error->reg); + val &= ecc_error->msk; + val >>= ecc_error->shift; + dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", + ecc_error->msg, irq_value, val); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + } + + return; +} + +static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_value, irq_msk; + + irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); + + irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); + if (irq_value) { + one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); + multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); + } + + hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); + + return IRQ_HANDLED; +} + +static const struct hisi_sas_hw_error axi_error[] = { + { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, + { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, + { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, + { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, + { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, + { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, + { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, + { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, + {} +}; + +static const struct hisi_sas_hw_error fifo_error[] = { + { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, + { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, + { .msk = BIT(10), .msg = "GETDQE_FIFO" }, + { .msk = BIT(11), .msg = "CMDP_FIFO" }, + { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, + {} +}; + +static const struct hisi_sas_hw_error fatal_axi_errors[] = { + { + .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), + .msg = "write pointer and depth", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), + .msg = "iptt no match slot", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), + .msg = "read pointer and depth", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), + .reg = HGC_AXI_FIFO_ERR_INFO, + .sub = axi_error, + }, + { + .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), + .reg = HGC_AXI_FIFO_ERR_INFO, + .sub = fifo_error, + }, + { + .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), + .msg = "LM add/fetch list", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), + .msg = "SAS_HGC_ABT fetch LM list", + }, +}; + +static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_value, irq_msk, err_value; + struct device *dev = hisi_hba->dev; + const struct hisi_sas_hw_error *axi_error; + int i; + + irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); + + irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + + for (i = 0; i < ARRAY_SIZE(fatal_axi_errors); i++) { + axi_error = &fatal_axi_errors[i]; + if (!(irq_value & axi_error->irq_msk)) + continue; + + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + 1 << axi_error->shift); + if (axi_error->sub) { + const struct hisi_sas_hw_error *sub = axi_error->sub; + + err_value = hisi_sas_read32(hisi_hba, axi_error->reg); + for (; sub->msk || sub->msg; sub++) { + if (!(err_value & sub->msk)) + continue; + dev_err(dev, "%s (0x%x) found!\n", + sub->msg, irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + } else { + dev_err(dev, "%s (0x%x) found!\n", + axi_error->msg, irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + } + + if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { + u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); + u32 dev_id = reg_val & ITCT_DEV_MSK; + struct hisi_sas_device *sas_dev = &hisi_hba->devices[dev_id]; + + hisi_sas_write32(hisi_hba, ITCT_CLR, 0); + dev_dbg(dev, "clear ITCT ok\n"); + complete(sas_dev->completion); + } + + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); + + return IRQ_HANDLED; +} + +static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + struct hisi_sas_slot *slot; + struct hisi_sas_itct *itct; + struct hisi_sas_complete_v2_hdr *complete_queue; + u32 rd_point = cq->rd_point, wr_point, dev_id; + int queue = cq->id; + + if (unlikely(hisi_hba->reject_stp_links_msk)) + phys_try_accept_stp_links_v2_hw(hisi_hba); + + complete_queue = hisi_hba->complete_hdr[queue]; + + wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + + (0x14 * queue)); + + while (rd_point != wr_point) { + struct hisi_sas_complete_v2_hdr *complete_hdr; + int iptt; + + complete_hdr = &complete_queue[rd_point]; + + /* Check for NCQ completion */ + if (complete_hdr->act) { + u32 act_tmp = le32_to_cpu(complete_hdr->act); + int ncq_tag_count = ffs(act_tmp); + u32 dw1 = le32_to_cpu(complete_hdr->dw1); + + dev_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> + CMPLT_HDR_DEV_ID_OFF; + itct = &hisi_hba->itct[dev_id]; + + /* The NCQ tags are held in the itct header */ + while (ncq_tag_count) { + __le64 *_ncq_tag = &itct->qw4_15[0], __ncq_tag; + u64 ncq_tag; + + ncq_tag_count--; + __ncq_tag = _ncq_tag[ncq_tag_count / 5]; + ncq_tag = le64_to_cpu(__ncq_tag); + iptt = (ncq_tag >> (ncq_tag_count % 5) * 12) & + 0xfff; + + slot = &hisi_hba->slot_info[iptt]; + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v2_hw(hisi_hba, slot); + + act_tmp &= ~(1 << ncq_tag_count); + ncq_tag_count = ffs(act_tmp); + } + } else { + u32 dw1 = le32_to_cpu(complete_hdr->dw1); + + iptt = dw1 & CMPLT_HDR_IPTT_MSK; + slot = &hisi_hba->slot_info[iptt]; + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v2_hw(hisi_hba, slot); + } + + if (++rd_point >= HISI_SAS_QUEUE_SLOTS) + rd_point = 0; + } + + /* update rd_point */ + cq->rd_point = rd_point; + hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + + return IRQ_HANDLED; +} + +static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + int queue = cq->id; + + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t sata_int_v2_hw(int irq_no, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = hisi_hba->dev; + struct hisi_sas_initial_fis *initial_fis; + struct dev_to_host_fis *fis; + u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; + irqreturn_t res = IRQ_HANDLED; + u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; + int phy_no, offset; + + del_timer(&phy->timer); + + phy_no = sas_phy->id; + initial_fis = &hisi_hba->initial_fis[phy_no]; + fis = &initial_fis->fis; + + offset = 4 * (phy_no / 4); + ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, + ent_msk | 1 << ((phy_no % 4) * 8)); + + ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); + ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * + (phy_no % 4))); + ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); + if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { + dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); + res = IRQ_NONE; + goto end; + } + + /* check ERR bit of Status Register */ + if (fis->status & ATA_ERR) { + dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, + fis->status); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + res = IRQ_NONE; + goto end; + } + + if (unlikely(phy_no == 8)) { + u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + + port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> + PORT_STATE_PHY8_PORT_NUM_OFF; + link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> + PORT_STATE_PHY8_CONN_RATE_OFF; + } else { + port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + port_id = (port_id >> (4 * phy_no)) & 0xf; + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + } + + if (port_id == 0xf) { + dev_err(dev, "sata int: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + + sas_phy->linkrate = link_rate; + hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, + HARD_PHY_LINKRATE); + phy->maximum_linkrate = hard_phy_linkrate & 0xf; + phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; + + sas_phy->oob_mode = SATA_OOB_MODE; + /* Make up some unique SAS address */ + attached_sas_addr[0] = 0x50; + attached_sas_addr[6] = hisi_hba->shost->host_no; + attached_sas_addr[7] = phy_no; + memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); + memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); + dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate); + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + phy->port_id = port_id; + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->identify.device_type = SAS_SATA_DEV; + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); + + if (phy->reset_completion) + complete(phy->reset_completion); +end: + hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); + + return res; +} + +static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { + int_phy_updown_v2_hw, + int_chnl_int_v2_hw, +}; + +static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = { + fatal_ecc_int_v2_hw, + fatal_axi_int_v2_hw +}; + +#define CQ0_IRQ_INDEX (96) + +static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba) +{ + struct platform_device *pdev = hisi_hba->platform_dev; + struct Scsi_Host *shost = hisi_hba->shost; + struct irq_affinity desc = { + .pre_vectors = CQ0_IRQ_INDEX, + .post_vectors = 16, + }; + int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec; + + nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128, + &hisi_hba->irq_map); + if (nvec < 0) + return nvec; + + shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv; + + return 0; +} + +/* + * There is a limitation in the hip06 chipset that we need + * to map in all mbigen interrupts, even if they are not used. + */ +static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) +{ + struct platform_device *pdev = hisi_hba->platform_dev; + struct device *dev = &pdev->dev; + int irq, rc = 0; + int i, phy_no, fatal_no, queue_no; + + for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { + irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */ + rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, + DRV_NAME " phy", hisi_hba); + if (rc) { + dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n", + irq, rc); + rc = -ENOENT; + goto err_out; + } + } + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + + irq = hisi_hba->irq_map[phy_no + 72]; + rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, + DRV_NAME " sata", phy); + if (rc) { + dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n", + irq, rc); + rc = -ENOENT; + goto err_out; + } + } + + for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) { + irq = hisi_hba->irq_map[fatal_no + 81]; + rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0, + DRV_NAME " fatal", hisi_hba); + if (rc) { + dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n", + irq, rc); + rc = -ENOENT; + goto err_out; + } + } + + for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no]; + + cq->irq_no = hisi_hba->irq_map[queue_no + 96]; + rc = devm_request_threaded_irq(dev, cq->irq_no, + cq_interrupt_v2_hw, + cq_thread_v2_hw, IRQF_ONESHOT, + DRV_NAME " cq", cq); + if (rc) { + dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", + cq->irq_no, rc); + rc = -ENOENT; + goto err_out; + } + cq->irq_mask = irq_get_affinity_mask(cq->irq_no); + } +err_out: + return rc; +} + +static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) +{ + int rc; + + memset(hisi_hba->sata_dev_bitmap, 0, sizeof(hisi_hba->sata_dev_bitmap)); + + rc = hw_init_v2_hw(hisi_hba); + if (rc) + return rc; + + rc = interrupt_init_v2_hw(hisi_hba); + if (rc) + return rc; + + return 0; +} + +static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) +{ + struct platform_device *pdev = hisi_hba->platform_dev; + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); + } + + for (i = 0; i < 128; i++) + synchronize_irq(platform_get_irq(pdev, i)); +} + + +static u32 get_phys_state_v2_hw(struct hisi_hba *hisi_hba) +{ + return hisi_sas_read32(hisi_hba, PHY_STATE); +} + +static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int rc, cnt; + + interrupt_disable_v2_hw(hisi_hba); + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); + + hisi_sas_stop_phys(hisi_hba); + + mdelay(10); + + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); + + /* wait until bus idle */ + cnt = 0; + while (1) { + u32 status = hisi_sas_read32_relaxed(hisi_hba, + AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); + + if (status == 0x3) + break; + + udelay(10); + if (cnt++ > 10) { + dev_err(dev, "wait axi bus state to idle timeout!\n"); + return -1; + } + } + + hisi_sas_init_mem(hisi_hba); + + rc = hw_init_v2_hw(hisi_hba); + if (rc) + return rc; + + phys_reject_stp_links_v2_hw(hisi_hba); + + return 0; +} + +static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, + u8 reg_index, u8 reg_count, u8 *write_data) +{ + struct device *dev = hisi_hba->dev; + int phy_no, count; + + if (!hisi_hba->sgpio_regs) + return -EOPNOTSUPP; + + switch (reg_type) { + case SAS_GPIO_REG_TX: + count = reg_count * 4; + count = min(count, hisi_hba->n_phy); + + for (phy_no = 0; phy_no < count; phy_no++) { + /* + * GPIO_TX[n] register has the highest numbered drive + * of the four in the first byte and the lowest + * numbered drive in the fourth byte. + * See SFF-8485 Rev. 0.7 Table 24. + */ + void __iomem *reg_addr = hisi_hba->sgpio_regs + + reg_index * 4 + phy_no; + int data_idx = phy_no + 3 - (phy_no % 4) * 2; + + writeb(write_data[data_idx], reg_addr); + } + + break; + default: + dev_err(dev, "write gpio: unsupported or bad reg type %d\n", + reg_type); + return -EINVAL; + } + + return 0; +} + +static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) +{ + struct device *dev = hisi_hba->dev; + int entries, entries_old = 0, time; + + for (time = 0; time < timeout_ms; time += delay_ms) { + entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); + if (entries == entries_old) + break; + + entries_old = entries; + msleep(delay_ms); + } + + if (time >= timeout_ms) { + dev_dbg(dev, "Wait commands complete timeout!\n"); + return; + } + + dev_dbg(dev, "wait commands complete %dms\n", time); + +} + +static struct attribute *host_v2_hw_attrs[] = { + &dev_attr_phy_event_threshold.attr, + NULL +}; + +ATTRIBUTE_GROUPS(host_v2_hw); + +static void map_queues_v2_hw(struct Scsi_Host *shost) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + const struct cpumask *mask; + unsigned int queue, cpu; + + for (queue = 0; queue < qmap->nr_queues; queue++) { + mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]); + if (!mask) + continue; + + for_each_cpu(cpu, mask) + qmap->mq_map[cpu] = qmap->queue_offset + queue; + } +} + +static const struct scsi_host_template sht_v2_hw = { + .name = DRV_NAME, + .proc_name = DRV_NAME, + .module = THIS_MODULE, + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = hisi_sas_slave_configure, + .scan_finished = hisi_sas_scan_finished, + .scan_start = hisi_sas_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .this_id = -1, + .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = hisi_sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .shost_groups = host_v2_hw_groups, + .host_reset = hisi_sas_host_reset, + .map_queues = map_queues_v2_hw, + .host_tagset = 1, +}; + +static const struct hisi_sas_hw hisi_sas_v2_hw = { + .hw_init = hisi_sas_v2_init, + .interrupt_preinit = hisi_sas_v2_interrupt_preinit, + .setup_itct = setup_itct_v2_hw, + .slot_index_alloc = slot_index_alloc_quirk_v2_hw, + .alloc_dev = alloc_dev_quirk_v2_hw, + .sl_notify_ssp = sl_notify_ssp_v2_hw, + .get_wideport_bitmap = get_wideport_bitmap_v2_hw, + .clear_itct = clear_itct_v2_hw, + .free_device = free_device_v2_hw, + .prep_smp = prep_smp_v2_hw, + .prep_ssp = prep_ssp_v2_hw, + .prep_stp = prep_ata_v2_hw, + .prep_abort = prep_abort_v2_hw, + .start_delivery = start_delivery_v2_hw, + .phys_init = phys_init_v2_hw, + .phy_start = start_phy_v2_hw, + .phy_disable = disable_phy_v2_hw, + .phy_hard_reset = phy_hard_reset_v2_hw, + .get_events = phy_get_events_v2_hw, + .phy_set_linkrate = phy_set_linkrate_v2_hw, + .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, + .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), + .soft_reset = soft_reset_v2_hw, + .get_phys_state = get_phys_state_v2_hw, + .write_gpio = write_gpio_v2_hw, + .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v2_hw, + .sht = &sht_v2_hw, +}; + +static int hisi_sas_v2_probe(struct platform_device *pdev) +{ + return hisi_sas_probe(pdev, &hisi_sas_v2_hw); +} + +static const struct of_device_id sas_v2_of_match[] = { + { .compatible = "hisilicon,hip06-sas-v2",}, + { .compatible = "hisilicon,hip07-sas-v2",}, + {}, +}; +MODULE_DEVICE_TABLE(of, sas_v2_of_match); + +static const struct acpi_device_id sas_v2_acpi_match[] = { + { "HISI0162", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); + +static struct platform_driver hisi_sas_v2_driver = { + .probe = hisi_sas_v2_probe, + .remove_new = hisi_sas_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = sas_v2_of_match, + .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), + }, +}; + +module_platform_driver(hisi_sas_v2_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry "); +MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c new file mode 100644 index 000000000..520fffc14 --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -0,0 +1,5298 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2017 Hisilicon Limited. + */ + +#include +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas_v3_hw" + +/* global registers need init */ +#define DLVRY_QUEUE_ENABLE 0x0 +#define IOST_BASE_ADDR_LO 0x8 +#define IOST_BASE_ADDR_HI 0xc +#define ITCT_BASE_ADDR_LO 0x10 +#define ITCT_BASE_ADDR_HI 0x14 +#define IO_BROKEN_MSG_ADDR_LO 0x18 +#define IO_BROKEN_MSG_ADDR_HI 0x1c +#define PHY_CONTEXT 0x20 +#define PHY_STATE 0x24 +#define PHY_PORT_NUM_MA 0x28 +#define PHY_CONN_RATE 0x30 +#define ITCT_CLR 0x44 +#define ITCT_CLR_EN_OFF 16 +#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) +#define ITCT_DEV_OFF 0 +#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) +#define SAS_AXI_USER3 0x50 +#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 +#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c +#define SATA_INITI_D2H_STORE_ADDR_LO 0x60 +#define SATA_INITI_D2H_STORE_ADDR_HI 0x64 +#define CFG_MAX_TAG 0x68 +#define TRANS_LOCK_ICT_TIME 0X70 +#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 +#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 +#define HGC_GET_ITV_TIME 0x90 +#define DEVICE_MSG_WORK_MODE 0x94 +#define OPENA_WT_CONTI_TIME 0x9c +#define I_T_NEXUS_LOSS_TIME 0xa0 +#define MAX_CON_TIME_LIMIT_TIME 0xa4 +#define BUS_INACTIVE_LIMIT_TIME 0xa8 +#define REJECT_TO_OPEN_LIMIT_TIME 0xac +#define CQ_INT_CONVERGE_EN 0xb0 +#define CFG_AGING_TIME 0xbc +#define HGC_DFX_CFG2 0xc0 +#define CFG_ABT_SET_QUERY_IPTT 0xd4 +#define CFG_SET_ABORTED_IPTT_OFF 0 +#define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) +#define CFG_SET_ABORTED_EN_OFF 12 +#define CFG_ABT_SET_IPTT_DONE 0xd8 +#define CFG_ABT_SET_IPTT_DONE_OFF 0 +#define HGC_IOMB_PROC1_STATUS 0x104 +#define HGC_LM_DFX_STATUS2 0x128 +#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 +#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ + HGC_LM_DFX_STATUS2_IOSTLIST_OFF) +#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 +#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ + HGC_LM_DFX_STATUS2_ITCTLIST_OFF) +#define HGC_CQE_ECC_ADDR 0x13c +#define HGC_CQE_ECC_1B_ADDR_OFF 0 +#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) +#define HGC_CQE_ECC_MB_ADDR_OFF 8 +#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) +#define HGC_IOST_ECC_ADDR 0x140 +#define HGC_IOST_ECC_1B_ADDR_OFF 0 +#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) +#define HGC_IOST_ECC_MB_ADDR_OFF 16 +#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) +#define HGC_DQE_ECC_ADDR 0x144 +#define HGC_DQE_ECC_1B_ADDR_OFF 0 +#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) +#define HGC_DQE_ECC_MB_ADDR_OFF 16 +#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) +#define CHNL_INT_STATUS 0x148 +#define TAB_DFX 0x14c +#define HGC_ITCT_ECC_ADDR 0x150 +#define HGC_ITCT_ECC_1B_ADDR_OFF 0 +#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_1B_ADDR_OFF) +#define HGC_ITCT_ECC_MB_ADDR_OFF 16 +#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_MB_ADDR_OFF) +#define HGC_AXI_FIFO_ERR_INFO 0x154 +#define AXI_ERR_INFO_OFF 0 +#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) +#define FIFO_ERR_INFO_OFF 8 +#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) +#define TAB_RD_TYPE 0x15c +#define INT_COAL_EN 0x19c +#define OQ_INT_COAL_TIME 0x1a0 +#define OQ_INT_COAL_CNT 0x1a4 +#define ENT_INT_COAL_TIME 0x1a8 +#define ENT_INT_COAL_CNT 0x1ac +#define OQ_INT_SRC 0x1b0 +#define OQ_INT_SRC_MSK 0x1b4 +#define ENT_INT_SRC1 0x1b8 +#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 +#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) +#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 +#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) +#define ENT_INT_SRC2 0x1bc +#define ENT_INT_SRC3 0x1c0 +#define ENT_INT_SRC3_WP_DEPTH_OFF 8 +#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 +#define ENT_INT_SRC3_RP_DEPTH_OFF 10 +#define ENT_INT_SRC3_AXI_OFF 11 +#define ENT_INT_SRC3_FIFO_OFF 12 +#define ENT_INT_SRC3_LM_OFF 14 +#define ENT_INT_SRC3_ITC_INT_OFF 15 +#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) +#define ENT_INT_SRC3_ABT_OFF 16 +#define ENT_INT_SRC3_DQE_POISON_OFF 18 +#define ENT_INT_SRC3_IOST_POISON_OFF 19 +#define ENT_INT_SRC3_ITCT_POISON_OFF 20 +#define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21 +#define ENT_INT_SRC_MSK1 0x1c4 +#define ENT_INT_SRC_MSK2 0x1c8 +#define ENT_INT_SRC_MSK3 0x1cc +#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 +#define CHNL_PHYUPDOWN_INT_MSK 0x1d0 +#define CHNL_ENT_INT_MSK 0x1d4 +#define HGC_COM_INT_MSK 0x1d8 +#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) +#define SAS_ECC_INTR 0x1e8 +#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 +#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 +#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 +#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 +#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4 +#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5 +#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6 +#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7 +#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8 +#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9 +#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 +#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19 +#define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20 +#define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21 +#define SAS_ECC_INTR_MSK 0x1ec +#define HGC_ERR_STAT_EN 0x238 +#define CQE_SEND_CNT 0x248 +#define DLVRY_Q_0_BASE_ADDR_LO 0x260 +#define DLVRY_Q_0_BASE_ADDR_HI 0x264 +#define DLVRY_Q_0_DEPTH 0x268 +#define DLVRY_Q_0_WR_PTR 0x26c +#define DLVRY_Q_0_RD_PTR 0x270 +#define HYPER_STREAM_ID_EN_CFG 0xc80 +#define OQ0_INT_SRC_MSK 0xc90 +#define COMPL_Q_0_BASE_ADDR_LO 0x4e0 +#define COMPL_Q_0_BASE_ADDR_HI 0x4e4 +#define COMPL_Q_0_DEPTH 0x4e8 +#define COMPL_Q_0_WR_PTR 0x4ec +#define COMPL_Q_0_RD_PTR 0x4f0 +#define HGC_RXM_DFX_STATUS14 0xae8 +#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 +#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM0_OFF) +#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 +#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM1_OFF) +#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 +#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM2_OFF) +#define HGC_RXM_DFX_STATUS15 0xaec +#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 +#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS15_MEM3_OFF) +#define AWQOS_AWCACHE_CFG 0xc84 +#define ARQOS_ARCACHE_CFG 0xc88 +#define HILINK_ERR_DFX 0xe04 +#define SAS_GPIO_CFG_0 0x1000 +#define SAS_GPIO_CFG_1 0x1004 +#define SAS_GPIO_TX_0_1 0x1040 +#define SAS_CFG_DRIVE_VLD 0x1070 + +/* phy registers requiring init */ +#define PORT_BASE (0x2000) +#define PHY_CFG (PORT_BASE + 0x0) +#define HARD_PHY_LINKRATE (PORT_BASE + 0x4) +#define PHY_CFG_ENA_OFF 0 +#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) +#define PHY_CFG_DC_OPT_OFF 2 +#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) +#define PHY_CFG_PHY_RST_OFF 3 +#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) +#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) +#define CFG_PROG_PHY_LINK_RATE_OFF 0 +#define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF) +#define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8 +#define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF) +#define PHY_CTRL (PORT_BASE + 0x14) +#define PHY_CTRL_RESET_OFF 0 +#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) +#define CMD_HDR_PIR_OFF 8 +#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) +#define SERDES_CFG (PORT_BASE + 0x1c) +#define CFG_ALOS_CHK_DISABLE_OFF 9 +#define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF) +#define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c) +#define CFG_BIST_MODE_SEL_OFF 0 +#define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF) +#define CFG_LOOP_TEST_MODE_OFF 14 +#define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF) +#define CFG_RX_BIST_EN_OFF 16 +#define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF) +#define CFG_TX_BIST_EN_OFF 17 +#define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF) +#define CFG_BIST_TEST_OFF 18 +#define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF) +#define SAS_PHY_BIST_CODE (PORT_BASE + 0x30) +#define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34) +#define SAS_BIST_ERR_CNT (PORT_BASE + 0x38) +#define SL_CFG (PORT_BASE + 0x84) +#define AIP_LIMIT (PORT_BASE + 0x90) +#define SL_CONTROL (PORT_BASE + 0x94) +#define SL_CONTROL_NOTIFY_EN_OFF 0 +#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) +#define SL_CTA_OFF 17 +#define SL_CTA_MSK (0x1 << SL_CTA_OFF) +#define RX_PRIMS_STATUS (PORT_BASE + 0x98) +#define RX_BCAST_CHG_OFF 1 +#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) +#define TX_ID_DWORD0 (PORT_BASE + 0x9c) +#define TX_ID_DWORD1 (PORT_BASE + 0xa0) +#define TX_ID_DWORD2 (PORT_BASE + 0xa4) +#define TX_ID_DWORD3 (PORT_BASE + 0xa8) +#define TX_ID_DWORD4 (PORT_BASE + 0xaC) +#define TX_ID_DWORD5 (PORT_BASE + 0xb0) +#define TX_ID_DWORD6 (PORT_BASE + 0xb4) +#define TXID_AUTO (PORT_BASE + 0xb8) +#define CT3_OFF 1 +#define CT3_MSK (0x1 << CT3_OFF) +#define TX_HARDRST_OFF 2 +#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) +#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) +#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define STP_LINK_TIMER (PORT_BASE + 0x120) +#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) +#define CON_CFG_DRIVER (PORT_BASE + 0x130) +#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) +#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) +#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) +#define CHL_INT0 (PORT_BASE + 0x1b4) +#define CHL_INT0_HOTPLUG_TOUT_OFF 0 +#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) +#define CHL_INT0_SL_RX_BCST_ACK_OFF 1 +#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) +#define CHL_INT0_SL_PHY_ENABLE_OFF 2 +#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) +#define CHL_INT0_NOT_RDY_OFF 4 +#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) +#define CHL_INT0_PHY_RDY_OFF 5 +#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) +#define CHL_INT1 (PORT_BASE + 0x1b8) +#define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15 +#define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16 +#define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17 +#define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18 +#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 +#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 +#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 +#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 +#define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23 +#define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24 +#define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26 +#define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27 +#define CHL_INT2 (PORT_BASE + 0x1bc) +#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 +#define CHL_INT2_RX_DISP_ERR_OFF 28 +#define CHL_INT2_RX_CODE_ERR_OFF 29 +#define CHL_INT2_RX_INVLD_DW_OFF 30 +#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 +#define CHL_INT0_MSK (PORT_BASE + 0x1c0) +#define CHL_INT1_MSK (PORT_BASE + 0x1c4) +#define CHL_INT2_MSK (PORT_BASE + 0x1c8) +#define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) +#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) +#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) +#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) +#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) +#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) +#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) +#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) +#define DMA_TX_STATUS (PORT_BASE + 0x2d0) +#define DMA_TX_STATUS_BUSY_OFF 0 +#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) +#define DMA_RX_STATUS (PORT_BASE + 0x2e8) +#define DMA_RX_STATUS_BUSY_OFF 0 +#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) + +#define COARSETUNE_TIME (PORT_BASE + 0x304) +#define TXDEEMPH_G1 (PORT_BASE + 0x350) +#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) +#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) +#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) +#define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) +#define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) +#define DFX_FIFO_CTRL (PORT_BASE + 0x3a0) +#define DFX_FIFO_CTRL_TRIGGER_MODE_OFF 0 +#define DFX_FIFO_CTRL_TRIGGER_MODE_MSK (0x7 << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) +#define DFX_FIFO_CTRL_DUMP_MODE_OFF 3 +#define DFX_FIFO_CTRL_DUMP_MODE_MSK (0x7 << DFX_FIFO_CTRL_DUMP_MODE_OFF) +#define DFX_FIFO_CTRL_SIGNAL_SEL_OFF 6 +#define DFX_FIFO_CTRL_SIGNAL_SEL_MSK (0xF << DFX_FIFO_CTRL_SIGNAL_SEL_OFF) +#define DFX_FIFO_CTRL_DUMP_DISABLE_OFF 10 +#define DFX_FIFO_CTRL_DUMP_DISABLE_MSK (0x1 << DFX_FIFO_CTRL_DUMP_DISABLE_OFF) +#define DFX_FIFO_TRIGGER (PORT_BASE + 0x3a4) +#define DFX_FIFO_TRIGGER_MSK (PORT_BASE + 0x3a8) +#define DFX_FIFO_DUMP_MSK (PORT_BASE + 0x3aC) +#define DFX_FIFO_RD_DATA (PORT_BASE + 0x3b0) + +#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ +#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) +#error Max ITCT exceeded +#endif + +#define AXI_MASTER_CFG_BASE (0x5000) +#define AM_CTRL_GLOBAL (0x0) +#define AM_CTRL_SHUTDOWN_REQ_OFF 0 +#define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF) +#define AM_CURR_TRANS_RETURN (0x150) + +#define AM_CFG_MAX_TRANS (0x5010) +#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) +#define AXI_CFG (0x5100) +#define AM_ROB_ECC_ERR_ADDR (0x510c) +#define AM_ROB_ECC_ERR_ADDR_OFF 0 +#define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff + +/* RAS registers need init */ +#define RAS_BASE (0x6000) +#define SAS_RAS_INTR0 (RAS_BASE) +#define SAS_RAS_INTR1 (RAS_BASE + 0x04) +#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) +#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) +#define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c) +#define SAS_RAS_INTR2 (RAS_BASE + 0x20) +#define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24) + +/* HW dma structures */ +/* Delivery queue header */ +/* dw0 */ +#define CMD_HDR_ABORT_FLAG_OFF 0 +#define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) +#define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 +#define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) +#define CMD_HDR_RESP_REPORT_OFF 5 +#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) +#define CMD_HDR_TLR_CTRL_OFF 6 +#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PORT_OFF 18 +#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) +#define CMD_HDR_PRIORITY_OFF 27 +#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) +#define CMD_HDR_CMD_OFF 29 +#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) +/* dw1 */ +#define CMD_HDR_UNCON_CMD_OFF 3 +#define CMD_HDR_DIR_OFF 5 +#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) +#define CMD_HDR_RESET_OFF 7 +#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) +#define CMD_HDR_VDTL_OFF 10 +#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) +#define CMD_HDR_FRAME_TYPE_OFF 11 +#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) +#define CMD_HDR_DEV_ID_OFF 16 +#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) +/* dw2 */ +#define CMD_HDR_CFL_OFF 0 +#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) +#define CMD_HDR_NCQ_TAG_OFF 10 +#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) +#define CMD_HDR_MRFL_OFF 15 +#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) +#define CMD_HDR_SG_MOD_OFF 24 +#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) +/* dw3 */ +#define CMD_HDR_IPTT_OFF 0 +#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) +/* dw6 */ +#define CMD_HDR_DIF_SGL_LEN_OFF 0 +#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) +#define CMD_HDR_DATA_SGL_LEN_OFF 16 +#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) +/* dw7 */ +#define CMD_HDR_ADDR_MODE_SEL_OFF 15 +#define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) +#define CMD_HDR_ABORT_IPTT_OFF 16 +#define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) + +/* Completion header */ +/* dw0 */ +#define CMPLT_HDR_CMPLT_OFF 0 +#define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) +#define CMPLT_HDR_ERROR_PHASE_OFF 2 +#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) +/* bit[9:2] Error Phase */ +#define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF \ + 8 +#define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK \ + (0x1 << ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF) +#define CMPLT_HDR_RSPNS_XFRD_OFF 10 +#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) +#define CMPLT_HDR_RSPNS_GOOD_OFF 11 +#define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF) +#define CMPLT_HDR_ERX_OFF 12 +#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) +#define CMPLT_HDR_ABORT_STAT_OFF 13 +#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) +/* abort_stat */ +#define STAT_IO_NOT_VALID 0x1 +#define STAT_IO_NO_DEVICE 0x2 +#define STAT_IO_COMPLETE 0x3 +#define STAT_IO_ABORTED 0x4 +/* dw1 */ +#define CMPLT_HDR_IPTT_OFF 0 +#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) +#define CMPLT_HDR_DEV_ID_OFF 16 +#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) +/* dw3 */ +#define SATA_DISK_IN_ERROR_STATUS_OFF 8 +#define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF) +#define CMPLT_HDR_SATA_DISK_ERR_OFF 16 +#define CMPLT_HDR_SATA_DISK_ERR_MSK (0x1 << CMPLT_HDR_SATA_DISK_ERR_OFF) +#define CMPLT_HDR_IO_IN_TARGET_OFF 17 +#define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) +/* bit[23:18] ERR_FIS_ATA_STATUS */ +#define FIS_ATA_STATUS_ERR_OFF 18 +#define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF) +#define FIS_TYPE_SDB_OFF 31 +#define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF) + +/* ITCT header */ +/* qw0 */ +#define ITCT_HDR_DEV_TYPE_OFF 0 +#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) +#define ITCT_HDR_VALID_OFF 2 +#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) +#define ITCT_HDR_MCR_OFF 5 +#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) +#define ITCT_HDR_VLN_OFF 9 +#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) +#define ITCT_HDR_SMP_TIMEOUT_OFF 16 +#define ITCT_HDR_AWT_CONTINUE_OFF 25 +#define ITCT_HDR_PORT_ID_OFF 28 +#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) +/* qw2 */ +#define ITCT_HDR_INLT_OFF 0 +#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) +#define ITCT_HDR_RTOLT_OFF 48 +#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) + +struct hisi_sas_protect_iu_v3_hw { + u32 dw0; + u32 lbrtcv; + u32 lbrtgv; + u32 dw3; + u32 dw4; + u32 dw5; + u32 rsv; +}; + +struct hisi_sas_complete_v3_hdr { + __le32 dw0; + __le32 dw1; + __le32 act; + __le32 dw3; +}; + +struct hisi_sas_err_record_v3 { + /* dw0 */ + __le32 trans_tx_fail_type; + + /* dw1 */ + __le32 trans_rx_fail_type; + + /* dw2 */ + __le16 dma_tx_err_type; + __le16 sipc_rx_err_type; + + /* dw3 */ + __le32 dma_rx_err_type; +}; + +#define RX_DATA_LEN_UNDERFLOW_OFF 6 +#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) + +#define RX_FIS_STATUS_ERR_OFF 0 +#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF) + +#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 +#define HISI_SAS_MSI_COUNT_V3_HW 32 + +#define DIR_NO_DATA 0 +#define DIR_TO_INI 1 +#define DIR_TO_DEVICE 2 +#define DIR_RESERVED 3 + +#define FIS_CMD_IS_UNCONSTRAINED(fis) \ + ((fis.command == ATA_CMD_READ_LOG_EXT) || \ + (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ + ((fis.command == ATA_CMD_DEV_RESET) && \ + ((fis.control & ATA_SRST) != 0))) + +#define T10_INSRT_EN_OFF 0 +#define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF) +#define T10_RMV_EN_OFF 1 +#define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF) +#define T10_RPLC_EN_OFF 2 +#define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF) +#define T10_CHK_EN_OFF 3 +#define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF) +#define INCR_LBRT_OFF 5 +#define INCR_LBRT_MSK (1 << INCR_LBRT_OFF) +#define USR_DATA_BLOCK_SZ_OFF 20 +#define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) +#define T10_CHK_MSK_OFF 16 +#define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF) +#define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF) + +#define BASE_VECTORS_V3_HW 16 +#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) + +#define CHNL_INT_STS_MSK 0xeeeeeeee +#define CHNL_INT_STS_PHY_MSK 0xe +#define CHNL_INT_STS_INT0_MSK BIT(1) +#define CHNL_INT_STS_INT1_MSK BIT(2) +#define CHNL_INT_STS_INT2_MSK BIT(3) +#define CHNL_WIDTH 4 + +#define BAR_NO_V3_HW 5 + +enum { + DSM_FUNC_ERR_HANDLE_MSI = 0, +}; + +static bool hisi_sas_intr_conv; +MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); + +/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ +static int prot_mask; +module_param(prot_mask, int, 0444); +MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); + +/* the index of iopoll queues are bigger than interrupt queues' */ +static int experimental_iopoll_q_cnt; +module_param(experimental_iopoll_q_cnt, int, 0444); +MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0"); + +static void debugfs_work_handler_v3_hw(struct work_struct *work); +static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba); + +static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl(regs); +} + +static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + off; + + writel(val, regs); +} + +static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, + u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + writel(val, regs); +} + +static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, + int phy_no, u32 off) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + return readl(regs); +} + +#define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \ + timeout_us) \ +({ \ + void __iomem *regs = hisi_hba->regs + off; \ + readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \ +}) + +#define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \ + timeout_us) \ +({ \ + void __iomem *regs = hisi_hba->regs + off; \ + readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ +}) + +static void interrupt_enable_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); + } +} + +static void init_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + struct pci_dev *pdev = hisi_hba->pci_dev; + int i, j; + + /* Global registers init */ + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); + /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */ + hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); + hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, + hisi_sas_intr_conv); + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); + hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); + hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); + hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); + hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); + hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); + hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + + if (pdev->revision < 0x30) + hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); + + interrupt_enable_v3_hw(hisi_hba); + for (i = 0; i < hisi_hba->n_phy; i++) { + enum sas_linkrate max; + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i, + PROG_PHY_LINK_RATE); + + prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; + if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < + SAS_LINK_RATE_1_5_GBPS)) + max = SAS_LINK_RATE_12_0_GBPS; + else + max = sas_phy->phy->maximum_linkrate; + prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, + prog_phy_link_rate); + hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); + hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); + hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); + hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, + 0x30f4240); + hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); + + /* set value through firmware for 920B and later version */ + if (pdev->revision < 0x30) { + hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); + hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); + /* used for 12G negotiate */ + hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); + } + + /* get default FFE configuration for BIST */ + for (j = 0; j < FFE_CFG_MAX; j++) { + u32 val = hisi_sas_phy_read32(hisi_hba, i, + TXDEEMPH_G1 + (j * 0x4)); + hisi_hba->debugfs_bist_ffe[i][j] = val; + } + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + /* Delivery queue */ + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + + /* Completion queue */ + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + } + + /* itct */ + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, + lower_32_bits(hisi_hba->itct_dma)); + + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, + upper_32_bits(hisi_hba->itct_dma)); + + /* iost */ + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, + lower_32_bits(hisi_hba->iost_dma)); + + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, + upper_32_bits(hisi_hba->iost_dma)); + + /* breakpoint */ + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->breakpoint_dma)); + + /* SATA broken msg */ + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->sata_breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->sata_breakpoint_dma)); + + /* SATA initial fis */ + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, + lower_32_bits(hisi_hba->initial_fis_dma)); + + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, + upper_32_bits(hisi_hba->initial_fis_dma)); + + /* RAS registers init */ + hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); + hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); + hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); + hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); + + /* LED registers init */ + hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff); + hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080); + hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080); + /* Configure blink generator rate A to 1Hz and B to 4Hz */ + hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700); + hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000); +} + +static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_DC_OPT_MSK; + cfg |= 1 << PHY_CFG_DC_OPT_OFF; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct sas_identify_frame identify_frame; + u32 *identify_buffer; + + memset(&identify_frame, 0, sizeof(identify_frame)); + identify_frame.dev_type = SAS_END_DEVICE; + identify_frame.frame_type = 0; + identify_frame._un1 = 1; + identify_frame.initiator_bits = SAS_PROTOCOL_ALL; + identify_frame.target_bits = SAS_PROTOCOL_NONE; + memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + identify_frame.phy_id = phy_no; + identify_buffer = (u32 *)(&identify_frame); + + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, + __swab32(identify_buffer[0])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, + __swab32(identify_buffer[1])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, + __swab32(identify_buffer[2])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, + __swab32(identify_buffer[3])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, + __swab32(identify_buffer[4])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, + __swab32(identify_buffer[5])); +} + +static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + struct domain_device *device = sas_dev->sas_device; + struct device *dev = hisi_hba->dev; + u64 qw0, device_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + struct domain_device *parent_dev = device->parent; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + u64 sas_addr; + + memset(itct, 0, sizeof(*itct)); + + /* qw0 */ + qw0 = 0; + switch (sas_dev->dev_type) { + case SAS_END_DEVICE: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; + break; + case SAS_SATA_DEV: + case SAS_SATA_PENDING: + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; + else + qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; + break; + default: + dev_warn(dev, "setup itct: unsupported dev type (%d)\n", + sas_dev->dev_type); + } + + qw0 |= ((1 << ITCT_HDR_VALID_OFF) | + (device->linkrate << ITCT_HDR_MCR_OFF) | + (1 << ITCT_HDR_VLN_OFF) | + (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | + (1 << ITCT_HDR_AWT_CONTINUE_OFF) | + (port->id << ITCT_HDR_PORT_ID_OFF)); + itct->qw0 = cpu_to_le64(qw0); + + /* qw1 */ + memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); + itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); + + /* qw2 */ + if (!dev_is_sata(device)) + itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | + (0x1ULL << ITCT_HDR_RTOLT_OFF)); +} + +static int clear_itct_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + DECLARE_COMPLETION_ONSTACK(completion); + u64 dev_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; + u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + struct device *dev = hisi_hba->dev; + + sas_dev->completion = &completion; + + /* clear the itct interrupt state */ + if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + ENT_INT_SRC3_ITC_INT_MSK); + + /* clear the itct table */ + reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); + hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); + + if (!wait_for_completion_timeout(sas_dev->completion, + HISI_SAS_CLEAR_ITCT_TIMEOUT)) { + dev_warn(dev, "failed to clear ITCT\n"); + return -ETIMEDOUT; + } + + memset(itct, 0, sizeof(struct hisi_sas_itct)); + return 0; +} + +static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + struct hisi_sas_slot *slot, *slot2; + struct hisi_sas_device *sas_dev = device->lldd_dev; + u32 cfg_abt_set_query_iptt; + + cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, + CFG_ABT_SET_QUERY_IPTT); + spin_lock(&sas_dev->lock); + list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { + cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; + cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | + (slot->idx << CFG_SET_ABORTED_IPTT_OFF); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, + cfg_abt_set_query_iptt); + } + spin_unlock(&sas_dev->lock); + cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, + cfg_abt_set_query_iptt); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, + 1 << CFG_ABT_SET_IPTT_DONE_OFF); +} + +static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int ret; + u32 val; + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); + + /* Disable all of the PHYs */ + hisi_sas_stop_phys(hisi_hba); + udelay(50); + + /* Ensure axi bus idle */ + ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, + 20000, 1000000); + if (ret) { + dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); + return -EIO; + } + + if (ACPI_HANDLE(dev)) { + acpi_status s; + + s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + } else { + dev_err(dev, "no reset method!\n"); + return -EINVAL; + } + + return 0; +} + +static int hw_init_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + struct acpi_device *acpi_dev; + union acpi_object *obj; + guid_t guid; + int rc; + + rc = reset_hw_v3_hw(hisi_hba); + if (rc) { + dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); + return rc; + } + + msleep(100); + init_reg_v3_hw(hisi_hba); + + if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { + dev_err(dev, "Parse GUID failed\n"); + return -EINVAL; + } + + /* + * This DSM handles some hardware-related configurations: + * 1. Switch over to MSI error handling in kernel + * 2. BIOS *may* reset some register values through this method + */ + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, + DSM_FUNC_ERR_HANDLE_MSI, NULL); + if (!obj) + dev_warn(dev, "can not find DSM method, ignore\n"); + else + ACPI_FREE(obj); + + acpi_dev = ACPI_COMPANION(dev); + if (!acpi_device_power_manageable(acpi_dev)) + dev_notice(dev, "neither _PS0 nor _PR0 is defined\n"); + return 0; +} + +static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg |= PHY_CFG_ENA_MSK; + cfg &= ~PHY_CFG_PHY_RST_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); + static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | + BIT(CHL_INT2_RX_CODE_ERR_OFF) | + BIT(CHL_INT2_RX_INVLD_DW_OFF); + u32 state; + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk); + + cfg &= ~PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); + + mdelay(50); + + state = hisi_sas_read32(hisi_hba, PHY_STATE); + if (state & BIT(phy_no)) { + cfg |= PHY_CFG_PHY_RST_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); + } + + udelay(1); + + hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); + hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); + hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk); +} + +static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + config_id_frame_v3_hw(hisi_hba, phy_no); + config_phy_opt_mode_v3_hw(hisi_hba, phy_no); + enable_phy_v3_hw(hisi_hba, phy_no); +} + +static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + u32 txid_auto; + + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + if (phy->identify.device_type == SAS_END_DEVICE) { + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto | TX_HARDRST_MSK); + } + msleep(100); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); +} + +static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) +{ + return SAS_LINK_RATE_12_0_GBPS; +} + +static void phys_init_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (!sas_phy->phy->enabled) + continue; + + hisi_sas_phy_enable(hisi_hba, i, 1); + } +} + +static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 sl_control; + + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control |= SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); + msleep(1); + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); +} + +static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) +{ + int i, bitmap = 0; + u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + + for (i = 0; i < hisi_hba->n_phy; i++) + if (phy_state & BIT(i)) + if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + bitmap |= BIT(i); + + return bitmap; +} + +static void start_delivery_v3_hw(struct hisi_sas_dq *dq) +{ + struct hisi_hba *hisi_hba = dq->hisi_hba; + struct hisi_sas_slot *s, *s1, *s2 = NULL; + int dlvry_queue = dq->id; + int wp; + + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; + s2 = s; + list_del(&s->delivery); + } + + if (!s2) + return; + + /* + * Ensure that memories for slots built on other CPUs is observed. + */ + smp_rmb(); + wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); +} + +static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); + struct scatterlist *sg; + int i; + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &sge_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); + + hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); +} + +static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct hisi_sas_sge_dif_page *sge_dif_page; + struct scatterlist *sg; + int i; + + sge_dif_page = hisi_sas_sge_dif_addr_mem(slot); + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = 0; + entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->dif_prd_table_addr = + cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot)); + + hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); +} + +static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd) +{ + unsigned char prot_flags = scsi_cmnd->prot_flags; + + if (prot_flags & SCSI_PROT_REF_CHECK) + return T10_CHK_APP_TAG_MSK; + return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK; +} + +static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, + struct hisi_sas_protect_iu_v3_hw *prot) +{ + unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); + unsigned int interval = scsi_prot_interval(scsi_cmnd); + u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd)); + + switch (prot_op) { + case SCSI_PROT_READ_INSERT: + prot->dw0 |= T10_INSRT_EN_MSK; + prot->lbrtgv = lbrt_chk_val; + break; + case SCSI_PROT_READ_STRIP: + prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); + prot->lbrtcv = lbrt_chk_val; + prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); + break; + case SCSI_PROT_READ_PASS: + prot->dw0 |= T10_CHK_EN_MSK; + prot->lbrtcv = lbrt_chk_val; + prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); + break; + case SCSI_PROT_WRITE_INSERT: + prot->dw0 |= T10_INSRT_EN_MSK; + prot->lbrtgv = lbrt_chk_val; + break; + case SCSI_PROT_WRITE_STRIP: + prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); + prot->lbrtcv = lbrt_chk_val; + break; + case SCSI_PROT_WRITE_PASS: + prot->dw0 |= T10_CHK_EN_MSK; + prot->lbrtcv = lbrt_chk_val; + prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); + break; + default: + WARN(1, "prot_op(0x%x) is not valid\n", prot_op); + break; + } + + switch (interval) { + case 512: + break; + case 4096: + prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF); + break; + case 520: + prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF); + break; + default: + WARN(1, "protection interval (0x%x) invalid\n", + interval); + break; + } + + prot->dw0 |= INCR_LBRT_MSK; +} + +static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port = slot->port; + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + struct sas_tmf_task *tmf = slot->tmf; + int has_data = 0, priority = !!tmf; + unsigned char prot_op; + u8 *buf_cmd; + u32 dw1 = 0, dw2 = 0, len = 0; + + hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | + (2 << CMD_HDR_TLR_CTRL_OFF) | + (port->id << CMD_HDR_PORT_OFF) | + (priority << CMD_HDR_PRIORITY_OFF) | + (1 << CMD_HDR_CMD_OFF)); /* ssp */ + + dw1 = 1 << CMD_HDR_VDTL_OFF; + if (tmf) { + dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; + } else { + prot_op = scsi_get_prot_op(scsi_cmnd); + dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; + switch (scsi_cmnd->sc_data_direction) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + } + + /* map itct entry */ + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + + dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + + 3) / 4) << CMD_HDR_CFL_OFF) | + ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | + (2 << CMD_HDR_SG_MOD_OFF); + hdr->dw2 = cpu_to_le32(dw2); + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) { + prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + + if (scsi_prot_sg_count(scsi_cmnd)) + prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, + scsi_prot_sglist(scsi_cmnd), + slot->n_elem_dif); + } + + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); + + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr); + + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + if (!tmf) { + buf_cmd[9] = ssp_task->task_attr; + memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + } else { + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } + + if (has_data && (prot_op != SCSI_PROT_NORMAL)) { + struct hisi_sas_protect_iu_v3_hw prot; + u8 *buf_cmd_prot; + + hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF); + dw1 |= CMD_HDR_PIR_MSK; + buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr) + + sizeof(struct ssp_command_iu); + + memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw)); + fill_prot_v3_hw(scsi_cmnd, &prot); + memcpy(buf_cmd_prot, &prot, + sizeof(struct hisi_sas_protect_iu_v3_hw)); + /* + * For READ, we need length of info read to memory, while for + * WRITE we need length of data written to the disk. + */ + if (prot_op == SCSI_PROT_WRITE_INSERT || + prot_op == SCSI_PROT_READ_INSERT || + prot_op == SCSI_PROT_WRITE_PASS || + prot_op == SCSI_PROT_READ_PASS) { + unsigned int interval = scsi_prot_interval(scsi_cmnd); + unsigned int ilog2_interval = ilog2(interval); + + len = (task->total_xfer_len >> ilog2_interval) * 8; + } + } + + hdr->dw1 = cpu_to_le32(dw1); + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); +} + +static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_port *port = slot->port; + struct scatterlist *sg_req; + struct hisi_sas_device *sas_dev = device->lldd_dev; + dma_addr_t req_dma_addr; + unsigned int req_len; + + /* req */ + sg_req = &task->smp_task.smp_req; + req_len = sg_dma_len(sg_req); + req_dma_addr = sg_dma_address(sg_req); + + /* create header */ + /* dw0 */ + hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | + (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ + (2 << CMD_HDR_CMD_OFF)); /* smp */ + + /* map itct entry */ + hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | + (1 << CMD_HDR_FRAME_TYPE_OFF) | + (DIR_NO_DATA << CMD_HDR_DIR_OFF)); + + /* dw2 */ + hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ / 4 << + CMD_HDR_MRFL_OFF)); + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); +} + +static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct domain_device *device = task->dev; + struct domain_device *parent_dev = device->parent; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + u8 *buf_cmd; + int has_data = 0, hdr_tag = 0; + u32 dw1 = 0, dw2 = 0; + + hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); + else + hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); + + switch (task->data_dir) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + + if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && + (task->ata_task.fis.control & ATA_SRST)) + dw1 |= 1 << CMD_HDR_RESET_OFF; + + dw1 |= (hisi_sas_get_ata_protocol( + &task->ata_task.fis, task->data_dir)) + << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + + if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) + dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; + + hdr->dw1 = cpu_to_le32(dw1); + + /* dw2 */ + if (task->ata_task.use_ncq) { + struct ata_queued_cmd *qc = task->uldd_task; + + hdr_tag = qc->tag; + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; + } + + dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | + 2 << CMD_HDR_SG_MOD_OFF; + hdr->dw2 = cpu_to_le32(dw2); + + /* dw3 */ + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) + prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); + hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); + + buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); + + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); +} + +static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct sas_internal_abort_task *abort = &task->abort_task; + struct domain_device *dev = task->dev; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct hisi_sas_port *port = slot->port; + struct hisi_sas_device *sas_dev = dev->lldd_dev; + bool sata = dev_is_sata(dev); + + /* dw0 */ + hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /* abort */ + (port->id << CMD_HDR_PORT_OFF) | + (sata << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | + (abort->type << CMD_HDR_ABORT_FLAG_OFF)); + + /* dw1 */ + hdr->dw1 = cpu_to_le32(sas_dev->device_id + << CMD_HDR_DEV_ID_OFF); + + /* dw7 */ + hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF); + hdr->transfer_tags = cpu_to_le32(slot->idx); +} + +static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + int i; + irqreturn_t res; + u32 context, port_id, link_rate; + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = hisi_hba->dev; + + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); + + port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + port_id = (port_id >> (4 * phy_no)) & 0xf; + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + + if (port_id == 0xf) { + dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + sas_phy->linkrate = link_rate; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + + /* Check for SATA dev */ + context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); + if (context & (1 << phy_no)) { + struct hisi_sas_initial_fis *initial_fis; + struct dev_to_host_fis *fis; + u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; + struct Scsi_Host *shost = hisi_hba->shost; + + dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); + initial_fis = &hisi_hba->initial_fis[phy_no]; + fis = &initial_fis->fis; + + /* check ERR bit of Status Register */ + if (fis->status & ATA_ERR) { + dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", + phy_no, fis->status); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + res = IRQ_NONE; + goto end; + } + + sas_phy->oob_mode = SATA_OOB_MODE; + attached_sas_addr[0] = 0x50; + attached_sas_addr[6] = shost->host_no; + attached_sas_addr[7] = phy_no; + memcpy(sas_phy->attached_sas_addr, + attached_sas_addr, + SAS_ADDR_SIZE); + memcpy(sas_phy->frame_rcvd, fis, + sizeof(struct dev_to_host_fis)); + phy->phy_type |= PORT_TYPE_SATA; + phy->identify.device_type = SAS_SATA_DEV; + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + } else { + u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; + struct sas_identify_frame *id = + (struct sas_identify_frame *)frame_rcvd; + + dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); + for (i = 0; i < 6; i++) { + u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, + RX_IDAF_DWORD0 + (i * 4)); + frame_rcvd[i] = __swab32(idaf); + } + sas_phy->oob_mode = SAS_OOB_MODE; + memcpy(sas_phy->attached_sas_addr, + &id->sas_addr, + SAS_ADDR_SIZE); + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = id->dev_type; + phy->frame_rcvd_size = sizeof(struct sas_identify_frame); + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + } + + phy->port_id = port_id; + + /* + * Call pm_runtime_get_noresume() which pairs with + * hisi_sas_phyup_pm_work() -> pm_runtime_put_sync(). + * For failure call pm_runtime_put() as we are in a hardirq context. + */ + pm_runtime_get_noresume(dev); + res = hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM); + if (!res) + pm_runtime_put(dev); + + res = IRQ_HANDLED; + + spin_lock(&phy->lock); + /* Delete timer and set phy_attached atomically */ + del_timer(&phy->timer); + phy->phy_attached = 1; + spin_unlock(&phy->lock); +end: + if (phy->reset_completion) + complete(phy->reset_completion); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_PHY_ENABLE_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); + + return res; +} + +static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + u32 phy_state, sl_ctrl, txid_auto; + struct device *dev = hisi_hba->dev; + + atomic_inc(&phy->down_cnt); + + del_timer(&phy->timer); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); + + phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); + hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0, + GFP_ATOMIC); + + sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, + sl_ctrl&(~SL_CTA_MSK)); + + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto | CT3_MSK); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + u32 bcast_status; + + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); + bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); + if (bcast_status & RX_BCAST_CHG_MSK) + hisi_sas_phy_bcast(phy); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_RX_BCST_ACK_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_msk; + int phy_no = 0; + irqreturn_t res = IRQ_NONE; + + irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) + & 0x11111111; + while (irq_msk) { + if (irq_msk & 1) { + u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + int rdy = phy_state & (1 << phy_no); + + if (rdy) { + if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) + /* phy up */ + if (phy_up_v3_hw(phy_no, hisi_hba) + == IRQ_HANDLED) + res = IRQ_HANDLED; + if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) + /* phy bcast */ + if (phy_bcast_v3_hw(phy_no, hisi_hba) + == IRQ_HANDLED) + res = IRQ_HANDLED; + } else { + if (irq_value & CHL_INT0_NOT_RDY_MSK) + /* phy down */ + if (phy_down_v3_hw(phy_no, hisi_hba) + == IRQ_HANDLED) + res = IRQ_HANDLED; + } + } + irq_msk >>= 4; + phy_no++; + } + + return res; +} + +static const struct hisi_sas_hw_error port_axi_error[] = { + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF), + .msg = "dmac_tx_ecc_bad_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF), + .msg = "dmac_rx_ecc_bad_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), + .msg = "dma_tx_axi_wr_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), + .msg = "dma_tx_axi_rd_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), + .msg = "dma_rx_axi_wr_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), + .msg = "dma_rx_axi_rd_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF), + .msg = "dma_tx_fifo_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF), + .msg = "dma_rx_fifo_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF), + .msg = "dma_tx_axi_ruser_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF), + .msg = "dma_rx_axi_ruser_err", + }, +}; + +static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); + u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK); + struct device *dev = hisi_hba->dev; + int i; + + irq_value &= ~irq_msk; + if (!irq_value) { + dev_warn(dev, "phy%d channel int 1 received with status bits cleared\n", + phy_no); + return; + } + + for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { + const struct hisi_sas_hw_error *error = &port_axi_error[i]; + + if (!(irq_value & error->irq_msk)) + continue; + + dev_err(dev, "%s error (phy%d 0x%x) found!\n", + error->msg, phy_no, irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); +} + +static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + unsigned long flags; + u32 reg_value; + + spin_lock_irqsave(&phy->lock, flags); + + /* loss dword sync */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); + sphy->loss_of_dword_sync_count += reg_value; + + /* phy reset problem */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); + sphy->phy_reset_problem_count += reg_value; + + /* invalid dword */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); + sphy->invalid_dword_count += reg_value; + + /* disparity err */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); + sphy->running_disparity_error_count += reg_value; + + /* code violation error */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); + phy->code_violation_err_count += reg_value; + + spin_unlock_irqrestore(&phy->lock, flags); +} + +static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); + u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct pci_dev *pci_dev = hisi_hba->pci_dev; + struct device *dev = hisi_hba->dev; + static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | + BIT(CHL_INT2_RX_CODE_ERR_OFF) | + BIT(CHL_INT2_RX_INVLD_DW_OFF); + + irq_value &= ~irq_msk; + if (!irq_value) { + dev_warn(dev, "phy%d channel int 2 received with status bits cleared\n", + phy_no); + return; + } + + if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { + dev_warn(dev, "phy%d identify timeout\n", phy_no); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + } + + if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { + u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, + STP_LINK_TIMEOUT_STATE); + + dev_warn(dev, "phy%d stp link timeout (0x%x)\n", + phy_no, reg_value); + if (reg_value & BIT(4)) + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + } + + if (pci_dev->revision > 0x20 && (irq_value & msk)) { + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + + phy_get_events_v3_hw(hisi_hba, phy_no); + + if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) + dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, + sphy->invalid_dword_count); + + if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) + dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, + phy->code_violation_err_count); + + if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) + dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, + sphy->running_disparity_error_count); + } + + if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && + (pci_dev->revision == 0x20)) { + u32 reg_value; + int rc; + + rc = hisi_sas_read32_poll_timeout_atomic( + HILINK_ERR_DFX, reg_value, + !((reg_value >> 8) & BIT(phy_no)), + 1000, 10000); + if (rc) + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + } + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); +} + +static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); + + if (irq_value0 & CHL_INT0_PHY_RDY_MSK) + hisi_sas_phy_oob_ready(hisi_hba, phy_no); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) + & (~CHL_INT0_SL_PHY_ENABLE_MSK) + & (~CHL_INT0_NOT_RDY_MSK)); +} + +static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_msk; + int phy_no = 0; + + irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) + & CHNL_INT_STS_MSK; + + while (irq_msk) { + if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) + handle_chl_int0_v3_hw(hisi_hba, phy_no); + + if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH))) + handle_chl_int1_v3_hw(hisi_hba, phy_no); + + if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH))) + handle_chl_int2_v3_hw(hisi_hba, phy_no); + + irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH)); + phy_no++; + } + + return IRQ_HANDLED; +} + +static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { + { + .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), + .msk = HGC_DQE_ECC_MB_ADDR_MSK, + .shift = HGC_DQE_ECC_MB_ADDR_OFF, + .msg = "hgc_dqe_eccbad_intr", + .reg = HGC_DQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), + .msk = HGC_IOST_ECC_MB_ADDR_MSK, + .shift = HGC_IOST_ECC_MB_ADDR_OFF, + .msg = "hgc_iost_eccbad_intr", + .reg = HGC_IOST_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), + .msk = HGC_ITCT_ECC_MB_ADDR_MSK, + .shift = HGC_ITCT_ECC_MB_ADDR_OFF, + .msg = "hgc_itct_eccbad_intr", + .reg = HGC_ITCT_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, + .msg = "hgc_iostl_eccbad_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, + .msg = "hgc_itctl_eccbad_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), + .msk = HGC_CQE_ECC_MB_ADDR_MSK, + .shift = HGC_CQE_ECC_MB_ADDR_OFF, + .msg = "hgc_cqe_eccbad_intr", + .reg = HGC_CQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, + .msg = "rxm_mem0_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, + .msg = "rxm_mem1_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, + .msg = "rxm_mem2_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, + .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, + .msg = "rxm_mem3_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS15, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF), + .msk = AM_ROB_ECC_ERR_ADDR_MSK, + .shift = AM_ROB_ECC_ERR_ADDR_OFF, + .msg = "ooo_ram_eccbad_intr", + .reg = AM_ROB_ECC_ERR_ADDR, + }, +}; + +static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba, + u32 irq_value) +{ + struct device *dev = hisi_hba->dev; + const struct hisi_sas_hw_error *ecc_error; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { + ecc_error = &multi_bit_ecc_errors[i]; + if (irq_value & ecc_error->irq_msk) { + val = hisi_sas_read32(hisi_hba, ecc_error->reg); + val &= ecc_error->msk; + val >>= ecc_error->shift; + dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", + ecc_error->msg, irq_value, val); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + } +} + +static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 irq_value, irq_msk; + + irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); + + irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); + if (irq_value) + multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value); + + hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); +} + +static const struct hisi_sas_hw_error axi_error[] = { + { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, + { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, + { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, + { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, + { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, + { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, + { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, + { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, + {} +}; + +static const struct hisi_sas_hw_error fifo_error[] = { + { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, + { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, + { .msk = BIT(10), .msg = "GETDQE_FIFO" }, + { .msk = BIT(11), .msg = "CMDP_FIFO" }, + { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, + {} +}; + +static const struct hisi_sas_hw_error fatal_axi_error[] = { + { + .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), + .msg = "write pointer and depth", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), + .msg = "iptt no match slot", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), + .msg = "read pointer and depth", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), + .reg = HGC_AXI_FIFO_ERR_INFO, + .sub = axi_error, + }, + { + .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), + .reg = HGC_AXI_FIFO_ERR_INFO, + .sub = fifo_error, + }, + { + .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), + .msg = "LM add/fetch list", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), + .msg = "SAS_HGC_ABT fetch LM list", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF), + .msg = "read dqe poison", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF), + .msg = "read iost poison", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF), + .msg = "read itct poison", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF), + .msg = "read itct ncq poison", + }, + +}; + +static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) +{ + u32 irq_value, irq_msk; + struct hisi_hba *hisi_hba = p; + struct device *dev = hisi_hba->dev; + struct pci_dev *pdev = hisi_hba->pci_dev; + int i; + + irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); + + irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + irq_value &= ~irq_msk; + + for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { + const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; + + if (!(irq_value & error->irq_msk)) + continue; + + if (error->sub) { + const struct hisi_sas_hw_error *sub = error->sub; + u32 err_value = hisi_sas_read32(hisi_hba, error->reg); + + for (; sub->msk || sub->msg; sub++) { + if (!(err_value & sub->msk)) + continue; + + dev_err(dev, "%s error (0x%x) found!\n", + sub->msg, irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + } else { + dev_err(dev, "%s error (0x%x) found!\n", + error->msg, irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + + if (pdev->revision < 0x21) { + u32 reg_val; + + reg_val = hisi_sas_read32(hisi_hba, + AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, reg_val); + } + } + + fatal_ecc_int_v3_hw(hisi_hba); + + if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { + u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); + u32 dev_id = reg_val & ITCT_DEV_MSK; + struct hisi_sas_device *sas_dev = + &hisi_hba->devices[dev_id]; + + hisi_sas_write32(hisi_hba, ITCT_CLR, 0); + dev_dbg(dev, "clear ITCT ok\n"); + complete(sas_dev->completion); + } + + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); + + return IRQ_HANDLED; +} + +static bool is_ncq_err_v3_hw(struct hisi_sas_complete_v3_hdr *complete_hdr) +{ + u32 dw0, dw3; + + dw0 = le32_to_cpu(complete_hdr->dw0); + dw3 = le32_to_cpu(complete_hdr->dw3); + + return (dw0 & ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK) && + (dw3 & FIS_TYPE_SDB_MSK) && + (dw3 & FIS_ATA_STATUS_ERR_MSK); +} + +static bool +slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct hisi_sas_complete_v3_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v3_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + struct hisi_sas_err_record_v3 *record = + hisi_sas_status_buf_addr_mem(slot); + u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type); + u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type); + u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type); + u32 dw3 = le32_to_cpu(complete_hdr->dw3); + u32 dw0 = le32_to_cpu(complete_hdr->dw0); + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { + /* + * If returned response frame is incorrect because of data underflow, + * but I/O information has been written to the host memory, we examine + * response IU. + */ + if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) && + (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) + return false; + + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; + } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { + ts->stat = SAS_QUEUE_FULL; + slot->abort = 1; + } else { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + } + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && + (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) { + ts->stat = SAS_PROTO_RESPONSE; + } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; + } else if ((dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) || + (dw3 & SATA_DISK_IN_ERROR_STATUS_MSK)) { + ts->stat = SAS_PHY_DOWN; + slot->abort = 1; + } else { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + } + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); + break; + case SAS_PROTOCOL_SMP: + ts->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + default: + break; + } + return true; +} + +static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_device *sas_dev; + struct device *dev = hisi_hba->dev; + struct task_status_struct *ts; + struct domain_device *device; + struct sas_ha_struct *ha; + struct hisi_sas_complete_v3_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v3_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + unsigned long flags; + bool is_internal = slot->is_internal; + u32 dw0, dw1, dw3; + + if (unlikely(!task || !task->lldd_task || !task->dev)) + return; + + ts = &task->task_status; + device = task->dev; + ha = device->port->ha; + sas_dev = device->lldd_dev; + + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + memset(ts, 0, sizeof(*ts)); + ts->resp = SAS_TASK_COMPLETE; + + if (unlikely(!sas_dev)) { + dev_dbg(dev, "slot complete: port has not device\n"); + ts->stat = SAS_PHY_DOWN; + goto out; + } + + dw0 = le32_to_cpu(complete_hdr->dw0); + dw1 = le32_to_cpu(complete_hdr->dw1); + dw3 = le32_to_cpu(complete_hdr->dw3); + + /* + * Use SAS+TMF status codes + */ + switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { + case STAT_IO_ABORTED: + /* this IO has been aborted by abort command */ + ts->stat = SAS_ABORTED_TASK; + goto out; + case STAT_IO_COMPLETE: + /* internal abort command complete */ + ts->stat = TMF_RESP_FUNC_SUCC; + goto out; + case STAT_IO_NO_DEVICE: + ts->stat = TMF_RESP_FUNC_COMPLETE; + goto out; + case STAT_IO_NOT_VALID: + /* + * abort single IO, the controller can't find the IO + */ + ts->stat = TMF_RESP_FUNC_FAILED; + goto out; + default: + break; + } + + /* check for erroneous completion */ + if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { + u32 *error_info = hisi_sas_status_buf_addr_mem(slot); + + if (slot_err_v3_hw(hisi_hba, task, slot)) { + if (ts->stat != SAS_DATA_UNDERRUN) + dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", + slot->idx, task, sas_dev->device_id, + SAS_ADDR(device->sas_addr), + dw0, dw1, complete_hdr->act, dw3, + error_info[0], error_info[1], + error_info[2], error_info[3]); + if (unlikely(slot->abort)) { + if (dev_is_sata(device) && task->ata_task.use_ncq) + sas_ata_device_link_abort(device, true); + else + sas_task_abort(task); + + return; + } + goto out; + } + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: { + struct ssp_response_iu *iu = + hisi_sas_status_buf_addr_mem(slot) + + sizeof(struct hisi_sas_err_record); + + sas_ssp_task_response(dev, task, iu); + break; + } + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + void *to = page_address(sg_page(sg_resp)); + + ts->stat = SAS_SAM_STAT_GOOD; + + memcpy(to + sg_resp->offset, + hisi_sas_status_buf_addr_mem(slot) + + sizeof(struct hisi_sas_err_record), + sg_resp->length); + break; + } + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + ts->stat = SAS_SAM_STAT_GOOD; + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); + break; + default: + ts->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + } + + if (!slot->port->port_attached) { + dev_warn(dev, "slot complete: port %d has removed\n", + slot->port->sas_port.id); + ts->stat = SAS_PHY_DOWN; + } + +out: + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + dev_info(dev, "slot complete: task(%pK) aborted\n", task); + return; + } + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); + + if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { + spin_lock_irqsave(&device->done_lock, flags); + if (test_bit(SAS_HA_FROZEN, &ha->state)) { + spin_unlock_irqrestore(&device->done_lock, flags); + dev_info(dev, "slot complete: task(%pK) ignored\n ", + task); + return; + } + spin_unlock_irqrestore(&device->done_lock, flags); + } + + if (task->task_done) + task->task_done(task); +} + +static int complete_v3_hw(struct hisi_sas_cq *cq) +{ + struct hisi_sas_complete_v3_hdr *complete_queue; + struct hisi_hba *hisi_hba = cq->hisi_hba; + u32 rd_point, wr_point; + int queue = cq->id; + int completed; + + rd_point = cq->rd_point; + complete_queue = hisi_hba->complete_hdr[queue]; + + wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + + (0x14 * queue)); + completed = (wr_point + HISI_SAS_QUEUE_SLOTS - rd_point) % HISI_SAS_QUEUE_SLOTS; + + while (rd_point != wr_point) { + struct hisi_sas_complete_v3_hdr *complete_hdr; + struct device *dev = hisi_hba->dev; + struct hisi_sas_slot *slot; + u32 dw0, dw1, dw3; + int iptt; + + complete_hdr = &complete_queue[rd_point]; + dw0 = le32_to_cpu(complete_hdr->dw0); + dw1 = le32_to_cpu(complete_hdr->dw1); + dw3 = le32_to_cpu(complete_hdr->dw3); + + iptt = dw1 & CMPLT_HDR_IPTT_MSK; + if (unlikely((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) && + (dw3 & CMPLT_HDR_SATA_DISK_ERR_MSK)) { + int device_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> + CMPLT_HDR_DEV_ID_OFF; + struct hisi_sas_itct *itct = + &hisi_hba->itct[device_id]; + struct hisi_sas_device *sas_dev = + &hisi_hba->devices[device_id]; + struct domain_device *device = sas_dev->sas_device; + + dev_err(dev, "erroneous completion disk err dev id=%d sas_addr=0x%llx CQ hdr: 0x%x 0x%x 0x%x 0x%x\n", + device_id, itct->sas_addr, dw0, dw1, + complete_hdr->act, dw3); + + if (is_ncq_err_v3_hw(complete_hdr)) + sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; + + sas_ata_device_link_abort(device, true); + } else if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { + slot = &hisi_hba->slot_info[iptt]; + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v3_hw(hisi_hba, slot); + } else + dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt); + + if (++rd_point >= HISI_SAS_QUEUE_SLOTS) + rd_point = 0; + } + + /* update rd_point */ + cq->rd_point = rd_point; + hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + + return completed; +} + +static int queue_complete_v3_hw(struct Scsi_Host *shost, unsigned int queue) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; + int completed; + + spin_lock(&cq->poll_lock); + completed = complete_v3_hw(cq); + spin_unlock(&cq->poll_lock); + + return completed; +} + +static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + + complete_v3_hw(cq); + + return IRQ_HANDLED; +} + +static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + int queue = cq->id; + + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); + + return IRQ_WAKE_THREAD; +} + +static void hisi_sas_v3_free_vectors(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + +static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) +{ + /* Allocate all MSI vectors to avoid re-insertion issue */ + int max_msi = HISI_SAS_MSI_COUNT_V3_HW; + int vectors, min_msi; + struct Scsi_Host *shost = hisi_hba->shost; + struct pci_dev *pdev = hisi_hba->pci_dev; + struct irq_affinity desc = { + .pre_vectors = BASE_VECTORS_V3_HW, + }; + + min_msi = MIN_AFFINE_VECTORS_V3_HW; + vectors = pci_alloc_irq_vectors_affinity(pdev, + min_msi, max_msi, + PCI_IRQ_MSI | + PCI_IRQ_AFFINITY, + &desc); + if (vectors < 0) + return -ENOENT; + + + hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt; + shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt; + + return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev); +} + +static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + struct pci_dev *pdev = hisi_hba->pci_dev; + int rc, i; + + rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), + int_phy_up_down_bcast_v3_hw, 0, + DRV_NAME " phy", hisi_hba); + if (rc) { + dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); + return -ENOENT; + } + + rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), + int_chnl_int_v3_hw, 0, + DRV_NAME " channel", hisi_hba); + if (rc) { + dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); + return -ENOENT; + } + + rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), + fatal_axi_int_v3_hw, 0, + DRV_NAME " fatal", hisi_hba); + if (rc) { + dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); + return -ENOENT; + } + + if (hisi_sas_intr_conv) + dev_info(dev, "Enable interrupt converge\n"); + + for (i = 0; i < hisi_hba->cq_nvecs; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + int nr = hisi_sas_intr_conv ? 16 : 16 + i; + unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : + IRQF_ONESHOT; + + cq->irq_no = pci_irq_vector(pdev, nr); + rc = devm_request_threaded_irq(dev, cq->irq_no, + cq_interrupt_v3_hw, + cq_thread_v3_hw, + irqflags, + DRV_NAME " cq", cq); + if (rc) { + dev_err(dev, "could not request cq%d interrupt, rc=%d\n", + i, rc); + return -ENOENT; + } + cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW); + if (!cq->irq_mask) { + dev_err(dev, "could not get cq%d irq affinity!\n", i); + return -ENOENT; + } + } + + return 0; +} + +static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) +{ + int rc; + + rc = hw_init_v3_hw(hisi_hba); + if (rc) + return rc; + + rc = interrupt_init_v3_hw(hisi_hba); + if (rc) + return rc; + + return 0; +} + +static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + enum sas_linkrate max = r->maximum_linkrate; + u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no, + PROG_PHY_LINK_RATE); + + prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; + prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, + prog_phy_link_rate); +} + +static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) +{ + struct pci_dev *pdev = hisi_hba->pci_dev; + int i; + + synchronize_irq(pci_irq_vector(pdev, 1)); + synchronize_irq(pci_irq_vector(pdev, 2)); + synchronize_irq(pci_irq_vector(pdev, 11)); + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); + + for (i = 0; i < hisi_hba->cq_nvecs; i++) + synchronize_irq(pci_irq_vector(pdev, i + 16)); + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); + hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); + } +} + +static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) +{ + return hisi_sas_read32(hisi_hba, PHY_STATE); +} + +static int disable_host_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + u32 status, reg_val; + int rc; + + hisi_sas_sync_poll_cqs(hisi_hba); + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); + + hisi_sas_stop_phys(hisi_hba); + + mdelay(10); + + reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, reg_val); + + /* wait until bus idle */ + rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + + AM_CURR_TRANS_RETURN, status, + status == 0x3, 10, 100); + if (rc) { + dev_err(dev, "axi bus is not idle, rc=%d\n", rc); + return rc; + } + + return 0; +} + +static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int rc; + + interrupt_disable_v3_hw(hisi_hba); + rc = disable_host_v3_hw(hisi_hba); + if (rc) { + dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); + return rc; + } + + hisi_sas_init_mem(hisi_hba); + + return hw_init_v3_hw(hisi_hba); +} + +static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, + u8 reg_index, u8 reg_count, u8 *write_data) +{ + struct device *dev = hisi_hba->dev; + u32 *data = (u32 *)write_data; + int i; + + switch (reg_type) { + case SAS_GPIO_REG_TX: + if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { + dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", + reg_index, reg_index + reg_count - 1); + return -EINVAL; + } + + for (i = 0; i < reg_count; i++) + hisi_sas_write32(hisi_hba, + SAS_GPIO_TX_0_1 + (reg_index + i) * 4, + data[i]); + break; + default: + dev_err(dev, "write gpio: unsupported or bad reg type %d\n", + reg_type); + return -EINVAL; + } + + return 0; +} + +static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) +{ + struct device *dev = hisi_hba->dev; + int entries, entries_old = 0, time; + + for (time = 0; time < timeout_ms; time += delay_ms) { + entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); + if (entries == entries_old) + break; + + entries_old = entries; + msleep(delay_ms); + } + + if (time >= timeout_ms) { + dev_dbg(dev, "Wait commands complete timeout!\n"); + return; + } + + dev_dbg(dev, "wait commands complete %dms\n", time); +} + +static ssize_t intr_conv_v3_hw_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); +} +static DEVICE_ATTR_RO(intr_conv_v3_hw); + +static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) +{ + /* config those registers between enable and disable PHYs */ + hisi_sas_stop_phys(hisi_hba); + + if (hisi_hba->intr_coal_ticks == 0 || + hisi_hba->intr_coal_count == 0) { + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + } else { + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, + hisi_hba->intr_coal_ticks); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, + hisi_hba->intr_coal_count); + } + phys_init_v3_hw(hisi_hba); +} + +static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + hisi_hba->intr_coal_ticks); +} + +static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + u32 intr_coal_ticks; + int ret; + + ret = kstrtou32(buf, 10, &intr_coal_ticks); + if (ret) { + dev_err(dev, "Input data of interrupt coalesce unmatch\n"); + return -EINVAL; + } + + if (intr_coal_ticks >= BIT(24)) { + dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); + return -EINVAL; + } + + hisi_hba->intr_coal_ticks = intr_coal_ticks; + + config_intr_coal_v3_hw(hisi_hba); + + return count; +} +static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw); + +static ssize_t intr_coal_count_v3_hw_show(struct device *dev, + struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + hisi_hba->intr_coal_count); +} + +static ssize_t intr_coal_count_v3_hw_store(struct device *dev, + struct device_attribute + *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + u32 intr_coal_count; + int ret; + + ret = kstrtou32(buf, 10, &intr_coal_count); + if (ret) { + dev_err(dev, "Input data of interrupt coalesce unmatch\n"); + return -EINVAL; + } + + if (intr_coal_count >= BIT(8)) { + dev_err(dev, "intr_coal_count must be less than 2^8!\n"); + return -EINVAL; + } + + hisi_hba->intr_coal_count = intr_coal_count; + + config_intr_coal_v3_hw(hisi_hba); + + return count; +} +static DEVICE_ATTR_RW(intr_coal_count_v3_hw); + +static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev, + struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + hisi_hba->iopoll_q_cnt); +} +static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw); + +static int slave_configure_v3_hw(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); + struct hisi_hba *hisi_hba = shost_priv(shost); + int ret = hisi_sas_slave_configure(sdev); + struct device *dev = hisi_hba->dev; + + if (ret) + return ret; + + if (sdev->type == TYPE_ENCLOSURE) + return 0; + + if (!device_link_add(&sdev->sdev_gendev, dev, + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) { + if (pm_runtime_enabled(dev)) { + dev_info(dev, "add device link failed, disable runtime PM for the host\n"); + pm_runtime_disable(dev); + } + } + + return 0; +} + +static struct attribute *host_v3_hw_attrs[] = { + &dev_attr_phy_event_threshold.attr, + &dev_attr_intr_conv_v3_hw.attr, + &dev_attr_intr_coal_ticks_v3_hw.attr, + &dev_attr_intr_coal_count_v3_hw.attr, + &dev_attr_iopoll_q_cnt_v3_hw.attr, + NULL +}; + +ATTRIBUTE_GROUPS(host_v3_hw); + +#define HISI_SAS_DEBUGFS_REG(x) {#x, x} + +struct hisi_sas_debugfs_reg_lu { + char *name; + int off; +}; + +struct hisi_sas_debugfs_reg { + const struct hisi_sas_debugfs_reg_lu *lu; + int count; + int base_off; +}; + +static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(PHY_CFG), + HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), + HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), + HISI_SAS_DEBUGFS_REG(PHY_CTRL), + HISI_SAS_DEBUGFS_REG(SL_CFG), + HISI_SAS_DEBUGFS_REG(AIP_LIMIT), + HISI_SAS_DEBUGFS_REG(SL_CONTROL), + HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), + HISI_SAS_DEBUGFS_REG(TXID_AUTO), + HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), + HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), + HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), + HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), + HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), + HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(CHL_INT0), + HISI_SAS_DEBUGFS_REG(CHL_INT1), + HISI_SAS_DEBUGFS_REG(CHL_INT2), + HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), + HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), + HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), + HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), + HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), + HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), + HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), + HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), + HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), + HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), + HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), + HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), + HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), + HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), + HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_port_reg = { + .lu = debugfs_port_reg_lu, + .count = 0x100, + .base_off = PORT_BASE, +}; + +static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), + HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), + HISI_SAS_DEBUGFS_REG(PHY_STATE), + HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), + HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), + HISI_SAS_DEBUGFS_REG(ITCT_CLR), + HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), + HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), + HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), + HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), + HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), + HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME), + HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), + HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), + HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), + HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), + HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), + HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), + HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), + HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), + HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), + HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), + HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), + HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), + HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), + HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), + HISI_SAS_DEBUGFS_REG(INT_COAL_EN), + HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), + HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), + HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), + HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), + HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), + HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), + HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), + HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), + HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), + HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), + HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), + HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), + HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), + HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), + HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), + HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), + HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_global_reg = { + .lu = debugfs_global_reg_lu, + .count = 0x800, +}; + +static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS), + HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS), + HISI_SAS_DEBUGFS_REG(AXI_CFG), + HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { + .lu = debugfs_axi_reg_lu, + .count = 0x61, + .base_off = AXI_MASTER_CFG_BASE, +}; + +static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK), + HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { + .lu = debugfs_ras_reg_lu, + .count = 0x10, + .base_off = RAS_BASE, +}; + +static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) +{ + struct Scsi_Host *shost = hisi_hba->shost; + + scsi_block_requests(shost); + wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); + + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + hisi_sas_sync_cqs(hisi_hba); + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); +} + +static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) +{ + struct Scsi_Host *shost = hisi_hba->shost; + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + scsi_unblock_requests(shost); +} + +static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, + enum hisi_sas_debugfs_cache_type type, + u32 *cache) +{ + u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * + HISI_SAS_IOST_ITCT_CACHE_NUM; + struct device *dev = hisi_hba->dev; + u32 *buf = cache; + u32 i, val; + + hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type); + + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) { + val = hisi_sas_read32(hisi_hba, TAB_DFX); + if (val == 0xffffffff) + break; + } + + if (val != 0xffffffff) { + dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n"); + return; + } + + memset(buf, 0, cache_dw_size * 4); + buf[0] = val; + + for (i = 1; i < cache_dw_size; i++) + buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX); +} + +static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + int phy_no = hisi_hba->debugfs_bist_phy_no; + int i; + + /* disable PHY */ + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + + /* update FFE */ + for (i = 0; i < FFE_CFG_MAX; i++) + hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4), + hisi_hba->debugfs_bist_ffe[phy_no][i]); + + /* disable ALOS */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); + reg_val |= CFG_ALOS_CHK_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); +} + +static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + int phy_no = hisi_hba->debugfs_bist_phy_no; + + /* disable loopback */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL); + reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | + CFG_BIST_TEST_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); + + /* enable ALOS */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); + reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); + + /* restore the linkrate */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); + /* init OOB link rate as 1.5 Gbits */ + reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; + reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF); + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); + + /* enable PHY */ + hisi_sas_phy_enable(hisi_hba, phy_no, 1); +} + +#define SAS_PHY_BIST_CODE_INIT 0x1 +#define SAS_PHY_BIST_CODE1_INIT 0X80 +static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) +{ + u32 reg_val, mode_tmp; + u32 linkrate = hisi_hba->debugfs_bist_linkrate; + u32 phy_no = hisi_hba->debugfs_bist_phy_no; + u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no]; + u32 code_mode = hisi_hba->debugfs_bist_code_mode; + u32 path_mode = hisi_hba->debugfs_bist_mode; + u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0]; + struct device *dev = hisi_hba->dev; + + dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n", + phy_no, linkrate, code_mode, path_mode, + ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS], + ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS], + ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS], + ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE], + fix_code[FIXED_CODE_1]); + mode_tmp = path_mode ? 2 : 1; + if (enable) { + /* some preparations before bist test */ + hisi_sas_bist_test_prep_v3_hw(hisi_hba); + + /* set linkrate of bit test*/ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, + PROG_PHY_LINK_RATE); + reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; + reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF); + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, + reg_val); + + /* set code mode of bit test */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, + SAS_PHY_BIST_CTRL); + reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK | + CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | + CFG_BIST_TEST_MSK); + reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) | + (mode_tmp << CFG_LOOP_TEST_MODE_OFF) | + CFG_BIST_TEST_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, + reg_val); + + /* set the bist init value */ + if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) { + reg_val = hisi_hba->debugfs_bist_fixed_code[0]; + hisi_sas_phy_write32(hisi_hba, phy_no, + SAS_PHY_BIST_CODE, reg_val); + + reg_val = hisi_hba->debugfs_bist_fixed_code[1]; + hisi_sas_phy_write32(hisi_hba, phy_no, + SAS_PHY_BIST_CODE1, reg_val); + } else { + hisi_sas_phy_write32(hisi_hba, phy_no, + SAS_PHY_BIST_CODE, + SAS_PHY_BIST_CODE_INIT); + hisi_sas_phy_write32(hisi_hba, phy_no, + SAS_PHY_BIST_CODE1, + SAS_PHY_BIST_CODE1_INIT); + } + + mdelay(100); + reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, + reg_val); + + /* clear error bit */ + mdelay(100); + hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); + } else { + /* disable bist test and recover it */ + hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba, + phy_no, SAS_BIST_ERR_CNT); + hisi_sas_bist_test_restore_v3_hw(hisi_hba); + } + + return 0; +} + +static void hisi_sas_map_queues(struct Scsi_Host *shost) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + struct blk_mq_queue_map *qmap; + int i, qoff; + + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + qmap = &shost->tag_set.map[i]; + if (i == HCTX_TYPE_DEFAULT) { + qmap->nr_queues = hisi_hba->cq_nvecs; + } else if (i == HCTX_TYPE_POLL) { + qmap->nr_queues = hisi_hba->iopoll_q_cnt; + } else { + qmap->nr_queues = 0; + continue; + } + + /* At least one interrupt hardware queue */ + if (!qmap->nr_queues) + WARN_ON(i == HCTX_TYPE_DEFAULT); + qmap->queue_offset = qoff; + if (i == HCTX_TYPE_POLL) + blk_mq_map_queues(qmap); + else + blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, + BASE_VECTORS_V3_HW); + qoff += qmap->nr_queues; + } +} + +static const struct scsi_host_template sht_v3_hw = { + .name = DRV_NAME, + .proc_name = DRV_NAME, + .module = THIS_MODULE, + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = slave_configure_v3_hw, + .scan_finished = hisi_sas_scan_finished, + .scan_start = hisi_sas_scan_start, + .map_queues = hisi_sas_map_queues, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .this_id = -1, + .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, + .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = hisi_sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .shost_groups = host_v3_hw_groups, + .tag_alloc_policy = BLK_TAG_ALLOC_RR, + .host_reset = hisi_sas_host_reset, + .host_tagset = 1, + .mq_poll = queue_complete_v3_hw, +}; + +static const struct hisi_sas_hw hisi_sas_v3_hw = { + .setup_itct = setup_itct_v3_hw, + .get_wideport_bitmap = get_wideport_bitmap_v3_hw, + .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), + .clear_itct = clear_itct_v3_hw, + .sl_notify_ssp = sl_notify_ssp_v3_hw, + .prep_ssp = prep_ssp_v3_hw, + .prep_smp = prep_smp_v3_hw, + .prep_stp = prep_ata_v3_hw, + .prep_abort = prep_abort_v3_hw, + .start_delivery = start_delivery_v3_hw, + .phys_init = phys_init_v3_hw, + .phy_start = start_phy_v3_hw, + .phy_disable = disable_phy_v3_hw, + .phy_hard_reset = phy_hard_reset_v3_hw, + .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, + .phy_set_linkrate = phy_set_linkrate_v3_hw, + .dereg_device = dereg_device_v3_hw, + .soft_reset = soft_reset_v3_hw, + .get_phys_state = get_phys_state_v3_hw, + .get_events = phy_get_events_v3_hw, + .write_gpio = write_gpio_v3_hw, + .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, + .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw, +}; + +static struct Scsi_Host * +hisi_sas_shost_alloc_pci(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + + shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba)); + if (!shost) { + dev_err(dev, "shost alloc failed\n"); + return NULL; + } + hisi_hba = shost_priv(shost); + + INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); + INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw); + hisi_hba->hw = &hisi_sas_v3_hw; + hisi_hba->pci_dev = pdev; + hisi_hba->dev = dev; + hisi_hba->shost = shost; + SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; + + if (prot_mask & ~HISI_SAS_PROT_MASK) + dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n", + prot_mask); + else + hisi_hba->prot_mask = prot_mask; + + if (hisi_sas_get_fw_info(hisi_hba) < 0) + goto err_out; + + if (experimental_iopoll_q_cnt < 0 || + experimental_iopoll_q_cnt >= hisi_hba->queue_count) + dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", + experimental_iopoll_q_cnt); + else + hisi_hba->iopoll_q_cnt = experimental_iopoll_q_cnt; + + if (hisi_sas_alloc(hisi_hba)) { + hisi_sas_free(hisi_hba); + goto err_out; + } + + return shost; +err_out: + scsi_host_put(shost); + dev_err(dev, "shost alloc failed\n"); + return NULL; +} + +static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int queue_entry_size = hisi_hba->hw->complete_hdr_size; + int dump_index = hisi_hba->debugfs_dump_index; + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) + memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr, + hisi_hba->complete_hdr[i], + HISI_SAS_QUEUE_SLOTS * queue_entry_size); +} + +static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); + int dump_index = hisi_hba->debugfs_dump_index; + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; + int j; + + debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr; + cmd_hdr = hisi_hba->cmd_hdr[i]; + + for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) + memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], + queue_entry_size); + } +} + +static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + const struct hisi_sas_debugfs_reg *port = &debugfs_port_reg; + int i, phy_cnt; + u32 offset; + u32 *databuf; + + for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { + databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; + for (i = 0; i < port->count; i++, databuf++) { + offset = port->base_off + 4 * i; + *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt, + offset); + } + } +} + +static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data; + int i; + + for (i = 0; i < debugfs_global_reg.count; i++, databuf++) + *databuf = hisi_sas_read32(hisi_hba, 4 * i); +} + +static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data; + const struct hisi_sas_debugfs_reg *axi = &debugfs_axi_reg; + int i; + + for (i = 0; i < axi->count; i++, databuf++) + *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off); +} + +static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data; + const struct hisi_sas_debugfs_reg *ras = &debugfs_ras_reg; + int i; + + for (i = 0; i < ras->count; i++, databuf++) + *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off); +} + +static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache; + void *databuf = hisi_hba->debugfs_itct[dump_index].itct; + struct hisi_sas_itct *itct; + int i; + + read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_ITCT_CACHE, cachebuf); + + itct = hisi_hba->itct; + + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { + memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); + databuf += sizeof(struct hisi_sas_itct); + } +} + +static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + int max_command_entries = HISI_SAS_MAX_COMMANDS; + void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache; + void *databuf = hisi_hba->debugfs_iost[dump_index].iost; + struct hisi_sas_iost *iost; + int i; + + read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_IOST_CACHE, cachebuf); + + iost = hisi_hba->iost; + + for (i = 0; i < max_command_entries; i++, iost++) { + memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); + databuf += sizeof(struct hisi_sas_iost); + } +} + +static const char * +debugfs_to_reg_name_v3_hw(int off, int base_off, + const struct hisi_sas_debugfs_reg_lu *lu) +{ + for (; lu->name; lu++) { + if (off == lu->off - base_off) + return lu->name; + } + + return NULL; +} + +static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, + const struct hisi_sas_debugfs_reg *reg) +{ + int i; + + for (i = 0; i < reg->count; i++) { + int off = i * 4; + const char *name; + + name = debugfs_to_reg_name_v3_hw(off, reg->base_off, + reg->lu); + + if (name) + seq_printf(s, "0x%08x 0x%08x %s\n", off, + regs_val[i], name); + else + seq_printf(s, "0x%08x 0x%08x\n", off, + regs_val[i]); + } +} + +static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_regs *global = s->private; + + debugfs_print_reg_v3_hw(global->data, s, + &debugfs_global_reg); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw); + +static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_regs *axi = s->private; + + debugfs_print_reg_v3_hw(axi->data, s, + &debugfs_axi_reg); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw); + +static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_regs *ras = s->private; + + debugfs_print_reg_v3_hw(ras->data, s, + &debugfs_ras_reg); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw); + +static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_port *port = s->private; + const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; + + debugfs_print_reg_v3_hw(port->data, s, reg_port); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw); + +static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index, + int sz, __le64 *ptr) +{ + int i; + + /* completion header size not fixed per HW version */ + seq_printf(s, "index %04d:\n\t", index); + for (i = 1; i <= sz / 8; i++, ptr++) { + seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); + if (!(i % 2)) + seq_puts(s, "\n\t"); + } + + seq_puts(s, "\n"); +} + +static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index, + int sz, __le32 *ptr) +{ + int i; + + /* completion header size not fixed per HW version */ + seq_printf(s, "index %04d:\n\t", index); + for (i = 1; i <= sz / 4; i++, ptr++) { + seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); + if (!(i % 4)) + seq_puts(s, "\n\t"); + } + seq_puts(s, "\n"); +} + +static void debugfs_cq_show_slot_v3_hw(struct seq_file *s, int slot, + struct hisi_sas_debugfs_cq *debugfs_cq) +{ + struct hisi_sas_cq *cq = debugfs_cq->cq; + struct hisi_hba *hisi_hba = cq->hisi_hba; + __le32 *complete_hdr = debugfs_cq->complete_hdr + + (hisi_hba->hw->complete_hdr_size * slot); + + debugfs_show_row_32_v3_hw(s, slot, + hisi_hba->hw->complete_hdr_size, + complete_hdr); +} + +static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_cq *debugfs_cq = s->private; + int slot; + + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) + debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw); + +static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, + void *dq_ptr) +{ + struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr; + void *cmd_queue = debugfs_dq->hdr; + __le32 *cmd_hdr = cmd_queue + + sizeof(struct hisi_sas_cmd_hdr) * slot; + + debugfs_show_row_32_v3_hw(s, slot, sizeof(struct hisi_sas_cmd_hdr), + cmd_hdr); +} + +static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) +{ + int slot; + + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) + debugfs_dq_show_slot_v3_hw(s, slot, s->private); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw); + +static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_iost *debugfs_iost = s->private; + struct hisi_sas_iost *iost = debugfs_iost->iost; + int i, max_command_entries = HISI_SAS_MAX_COMMANDS; + + for (i = 0; i < max_command_entries; i++, iost++) { + __le64 *data = &iost->qw0; + + debugfs_show_row_64_v3_hw(s, i, sizeof(*iost), data); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw); + +static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; + struct hisi_sas_iost_itct_cache *iost_cache = + debugfs_iost_cache->cache; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + int i, tab_idx; + __le64 *iost; + + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { + /* + * Data struct of IOST cache: + * Data[1]: BIT0~15: Table index + * Bit16: Valid mask + * Data[2]~[9]: IOST table + */ + tab_idx = (iost_cache->data[1] & 0xffff); + iost = (__le64 *)iost_cache; + + debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, iost); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw); + +static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) +{ + int i; + struct hisi_sas_debugfs_itct *debugfs_itct = s->private; + struct hisi_sas_itct *itct = debugfs_itct->itct; + + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { + __le64 *data = &itct->qw0; + + debugfs_show_row_64_v3_hw(s, i, sizeof(*itct), data); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw); + +static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; + struct hisi_sas_iost_itct_cache *itct_cache = + debugfs_itct_cache->cache; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + int i, tab_idx; + __le64 *itct; + + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { + /* + * Data struct of ITCT cache: + * Data[1]: BIT0~15: Table index + * Bit16: Valid mask + * Data[2]~[9]: ITCT table + */ + tab_idx = itct_cache->data[1] & 0xffff; + itct = (__le64 *)itct_cache; + + debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, itct); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); + +static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) +{ + u64 *debugfs_timestamp; + int dump_index = hisi_hba->debugfs_dump_index; + struct dentry *dump_dentry; + struct dentry *dentry; + char name[256]; + int p; + int c; + int d; + + snprintf(name, 256, "%d", dump_index); + + dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); + + debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; + + debugfs_create_u64("timestamp", 0400, dump_dentry, + debugfs_timestamp); + + debugfs_create_file("global", 0400, dump_dentry, + &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], + &debugfs_global_v3_hw_fops); + + /* Create port dir and files */ + dentry = debugfs_create_dir("port", dump_dentry); + for (p = 0; p < hisi_hba->n_phy; p++) { + snprintf(name, 256, "%d", p); + + debugfs_create_file(name, 0400, dentry, + &hisi_hba->debugfs_port_reg[dump_index][p], + &debugfs_port_v3_hw_fops); + } + + /* Create CQ dir and files */ + dentry = debugfs_create_dir("cq", dump_dentry); + for (c = 0; c < hisi_hba->queue_count; c++) { + snprintf(name, 256, "%d", c); + + debugfs_create_file(name, 0400, dentry, + &hisi_hba->debugfs_cq[dump_index][c], + &debugfs_cq_v3_hw_fops); + } + + /* Create DQ dir and files */ + dentry = debugfs_create_dir("dq", dump_dentry); + for (d = 0; d < hisi_hba->queue_count; d++) { + snprintf(name, 256, "%d", d); + + debugfs_create_file(name, 0400, dentry, + &hisi_hba->debugfs_dq[dump_index][d], + &debugfs_dq_v3_hw_fops); + } + + debugfs_create_file("iost", 0400, dump_dentry, + &hisi_hba->debugfs_iost[dump_index], + &debugfs_iost_v3_hw_fops); + + debugfs_create_file("iost_cache", 0400, dump_dentry, + &hisi_hba->debugfs_iost_cache[dump_index], + &debugfs_iost_cache_v3_hw_fops); + + debugfs_create_file("itct", 0400, dump_dentry, + &hisi_hba->debugfs_itct[dump_index], + &debugfs_itct_v3_hw_fops); + + debugfs_create_file("itct_cache", 0400, dump_dentry, + &hisi_hba->debugfs_itct_cache[dump_index], + &debugfs_itct_cache_v3_hw_fops); + + debugfs_create_file("axi", 0400, dump_dentry, + &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], + &debugfs_axi_v3_hw_fops); + + debugfs_create_file("ras", 0400, dump_dentry, + &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], + &debugfs_ras_v3_hw_fops); +} + +static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) +{ + int debugfs_dump_index = hisi_hba->debugfs_dump_index; + struct device *dev = hisi_hba->dev; + u64 timestamp = local_clock(); + + if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) { + dev_warn(dev, "dump count exceeded!\n"); + return; + } + + do_div(timestamp, NSEC_PER_MSEC); + hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp; + + debugfs_snapshot_prepare_v3_hw(hisi_hba); + + debugfs_snapshot_global_reg_v3_hw(hisi_hba); + debugfs_snapshot_port_reg_v3_hw(hisi_hba); + debugfs_snapshot_axi_reg_v3_hw(hisi_hba); + debugfs_snapshot_ras_reg_v3_hw(hisi_hba); + debugfs_snapshot_cq_reg_v3_hw(hisi_hba); + debugfs_snapshot_dq_reg_v3_hw(hisi_hba); + debugfs_snapshot_itct_reg_v3_hw(hisi_hba); + debugfs_snapshot_iost_reg_v3_hw(hisi_hba); + + debugfs_create_files_v3_hw(hisi_hba); + + debugfs_snapshot_restore_v3_hw(hisi_hba); + hisi_hba->debugfs_dump_index++; +} + +static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hisi_hba *hisi_hba = file->f_inode->i_private; + char buf[8]; + + if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count) + return -EFAULT; + + if (count > 8) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (buf[0] != '1') + return -EFAULT; + + queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); + + return count; +} + +static const struct file_operations debugfs_trigger_dump_v3_hw_fops = { + .write = &debugfs_trigger_dump_v3_hw_write, + .owner = THIS_MODULE, +}; + +enum { + HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0, + HISI_SAS_BIST_LOOPBACK_MODE_SERDES, + HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, +}; + +static const struct { + int value; + char *name; +} debugfs_loop_linkrate_v3_hw[] = { + { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, + { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, + { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, + { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, +}; + +static int debugfs_bist_linkrate_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { + int match = (hisi_hba->debugfs_bist_linkrate == + debugfs_loop_linkrate_v3_hw[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + debugfs_loop_linkrate_v3_hw[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + if (count >= sizeof(kbuf)) + return -EOVERFLOW; + + if (copy_from_user(kbuf, buf, count)) + return -EINVAL; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { + if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name, + pkbuf, 16)) { + hisi_hba->debugfs_bist_linkrate = + debugfs_loop_linkrate_v3_hw[i].value; + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return count; +} + +static int debugfs_bist_linkrate_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_linkrate_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_linkrate_v3_hw_fops = { + .open = debugfs_bist_linkrate_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_linkrate_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct { + int value; + char *name; +} debugfs_loop_code_mode_v3_hw[] = { + { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" }, + { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" }, + { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" }, + { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" }, + { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" }, + { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" }, + { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" }, + { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" }, + { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" }, + { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" }, + { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" }, + { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" }, +}; + +static int debugfs_bist_code_mode_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { + int match = (hisi_hba->debugfs_bist_code_mode == + debugfs_loop_code_mode_v3_hw[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + debugfs_loop_code_mode_v3_hw[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EOVERFLOW; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { + if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name, + pkbuf, 16)) { + hisi_hba->debugfs_bist_code_mode = + debugfs_loop_code_mode_v3_hw[i].value; + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return count; +} + +static int debugfs_bist_code_mode_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_code_mode_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_code_mode_v3_hw_fops = { + .open = debugfs_bist_code_mode_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_code_mode_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t debugfs_bist_phy_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + unsigned int phy_no; + int val; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + val = kstrtouint_from_user(buf, count, 0, &phy_no); + if (val) + return val; + + if (phy_no >= hisi_hba->n_phy) + return -EINVAL; + + hisi_hba->debugfs_bist_phy_no = phy_no; + + return count; +} + +static int debugfs_bist_phy_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no); + + return 0; +} + +static int debugfs_bist_phy_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_phy_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_phy_v3_hw_fops = { + .open = debugfs_bist_phy_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_phy_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + unsigned int cnt; + int val; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + val = kstrtouint_from_user(buf, count, 0, &cnt); + if (val) + return val; + + if (cnt) + return -EINVAL; + + hisi_hba->debugfs_bist_cnt = 0; + return count; +} + +static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt); + + return 0; +} + +static int debugfs_bist_cnt_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_cnt_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_cnt_v3_hw_ops = { + .open = debugfs_bist_cnt_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_cnt_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct { + int value; + char *name; +} debugfs_loop_modes_v3_hw[] = { + { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" }, + { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, + { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, +}; + +static int debugfs_bist_mode_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { + int match = (hisi_hba->debugfs_bist_mode == + debugfs_loop_modes_v3_hw[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + debugfs_loop_modes_v3_hw[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EOVERFLOW; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { + if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) { + hisi_hba->debugfs_bist_mode = + debugfs_loop_modes_v3_hw[i].value; + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return count; +} + +static int debugfs_bist_mode_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_mode_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_mode_v3_hw_fops = { + .open = debugfs_bist_mode_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_mode_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t debugfs_bist_enable_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + unsigned int enable; + int val; + + val = kstrtouint_from_user(buf, count, 0, &enable); + if (val) + return val; + + if (enable > 1) + return -EINVAL; + + if (enable == hisi_hba->debugfs_bist_enable) + return count; + + val = debugfs_set_bist_v3_hw(hisi_hba, enable); + if (val < 0) + return val; + + hisi_hba->debugfs_bist_enable = enable; + + return count; +} + +static int debugfs_bist_enable_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable); + + return 0; +} + +static int debugfs_bist_enable_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_enable_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_enable_v3_hw_fops = { + .open = debugfs_bist_enable_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_enable_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct { + char *name; +} debugfs_ffe_name_v3_hw[FFE_CFG_MAX] = { + { "SAS_1_5_GBPS" }, + { "SAS_3_0_GBPS" }, + { "SAS_6_0_GBPS" }, + { "SAS_12_0_GBPS" }, + { "FFE_RESV" }, + { "SATA_1_5_GBPS" }, + { "SATA_3_0_GBPS" }, + { "SATA_6_0_GBPS" }, +}; + +static ssize_t debugfs_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + u32 *val = m->private; + int res; + + res = kstrtouint_from_user(buf, count, 0, val); + if (res) + return res; + + return count; +} + +static int debugfs_v3_hw_show(struct seq_file *s, void *p) +{ + u32 *val = s->private; + + seq_printf(s, "0x%x\n", *val); + + return 0; +} + +static int debugfs_v3_hw_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, debugfs_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_v3_hw_fops = { + .open = debugfs_v3_hw_open, + .read = seq_read, + .write = debugfs_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t debugfs_phy_down_cnt_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = filp->private_data; + struct hisi_sas_phy *phy = s->private; + unsigned int set_val; + int res; + + res = kstrtouint_from_user(buf, count, 0, &set_val); + if (res) + return res; + + if (set_val > 0) + return -EINVAL; + + atomic_set(&phy->down_cnt, 0); + + return count; +} + +static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_phy *phy = s->private; + + seq_printf(s, "%d\n", atomic_read(&phy->down_cnt)); + + return 0; +} + +static int debugfs_phy_down_cnt_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_phy_down_cnt_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops = { + .open = debugfs_phy_down_cnt_v3_hw_open, + .read = seq_read, + .write = debugfs_phy_down_cnt_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +enum fifo_dump_mode_v3_hw { + FIFO_DUMP_FORVER = (1U << 0), + FIFO_DUMP_AFTER_TRIGGER = (1U << 1), + FIFO_DUMP_UNTILL_TRIGGER = (1U << 2), +}; + +enum fifo_trigger_mode_v3_hw { + FIFO_TRIGGER_EDGE = (1U << 0), + FIFO_TRIGGER_SAME_LEVEL = (1U << 1), + FIFO_TRIGGER_DIFF_LEVEL = (1U << 2), +}; + +static int debugfs_is_fifo_config_valid_v3_hw(struct hisi_sas_phy *phy) +{ + struct hisi_hba *hisi_hba = phy->hisi_hba; + + if (phy->fifo.signal_sel > 0xf) { + dev_info(hisi_hba->dev, "Invalid signal select: %u\n", + phy->fifo.signal_sel); + return -EINVAL; + } + + switch (phy->fifo.dump_mode) { + case FIFO_DUMP_FORVER: + case FIFO_DUMP_AFTER_TRIGGER: + case FIFO_DUMP_UNTILL_TRIGGER: + break; + default: + dev_info(hisi_hba->dev, "Invalid dump mode: %u\n", + phy->fifo.dump_mode); + return -EINVAL; + } + + /* when FIFO_DUMP_FORVER, no need to check trigger_mode */ + if (phy->fifo.dump_mode == FIFO_DUMP_FORVER) + return 0; + + switch (phy->fifo.trigger_mode) { + case FIFO_TRIGGER_EDGE: + case FIFO_TRIGGER_SAME_LEVEL: + case FIFO_TRIGGER_DIFF_LEVEL: + break; + default: + dev_info(hisi_hba->dev, "Invalid trigger mode: %u\n", + phy->fifo.trigger_mode); + return -EINVAL; + } + return 0; +} + +static int debugfs_update_fifo_config_v3_hw(struct hisi_sas_phy *phy) +{ + u32 trigger_mode = phy->fifo.trigger_mode; + u32 signal_sel = phy->fifo.signal_sel; + u32 dump_mode = phy->fifo.dump_mode; + struct hisi_hba *hisi_hba = phy->hisi_hba; + int phy_no = phy->sas_phy.id; + u32 reg_val; + int res; + + /* Check the validity of trace FIFO configuration */ + res = debugfs_is_fifo_config_valid_v3_hw(phy); + if (res) + return res; + + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); + /* Disable trace FIFO before update configuration */ + reg_val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK; + + /* Update trace FIFO configuration */ + reg_val &= ~(DFX_FIFO_CTRL_DUMP_MODE_MSK | + DFX_FIFO_CTRL_SIGNAL_SEL_MSK | + DFX_FIFO_CTRL_TRIGGER_MODE_MSK); + + reg_val |= ((trigger_mode << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) | + (dump_mode << DFX_FIFO_CTRL_DUMP_MODE_OFF) | + (signal_sel << DFX_FIFO_CTRL_SIGNAL_SEL_OFF)); + hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val); + + hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK, + phy->fifo.dump_msk); + + hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER, + phy->fifo.trigger); + + hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK, + phy->fifo.trigger_msk); + + /* Enable trace FIFO after updated configuration */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); + reg_val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val); + + return 0; +} + +static ssize_t debugfs_fifo_update_cfg_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hisi_sas_phy *phy = filp->private_data; + bool update; + int val; + + val = kstrtobool_from_user(buf, count, &update); + if (val) + return val; + + if (update != 1) + return -EINVAL; + + val = debugfs_update_fifo_config_v3_hw(phy); + if (val) + return val; + + return count; +} + +static const struct file_operations debugfs_fifo_update_cfg_v3_hw_fops = { + .open = simple_open, + .write = debugfs_fifo_update_cfg_v3_hw_write, + .owner = THIS_MODULE, +}; + +static void debugfs_read_fifo_data_v3_hw(struct hisi_sas_phy *phy) +{ + struct hisi_hba *hisi_hba = phy->hisi_hba; + u32 *buf = phy->fifo.rd_data; + int phy_no = phy->sas_phy.id; + u32 val; + int i; + + memset(buf, 0, sizeof(phy->fifo.rd_data)); + + /* Disable trace FIFO before read data */ + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); + val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val); + + for (i = 0; i < HISI_SAS_FIFO_DATA_DW_SIZE; i++) { + val = hisi_sas_phy_read32(hisi_hba, phy_no, + DFX_FIFO_RD_DATA); + buf[i] = val; + } + + /* Enable trace FIFO after read data */ + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); + val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val); +} + +static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_sas_phy *phy = s->private; + + debugfs_read_fifo_data_v3_hw(phy); + + debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4, + (__le32 *)phy->fifo.rd_data); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_fifo_data_v3_hw); + +static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba) +{ + int phy_no; + + hisi_hba->debugfs_fifo_dentry = + debugfs_create_dir("fifo", hisi_hba->debugfs_dir); + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct dentry *port_dentry; + char name[256]; + u32 val; + + /* get default configuration for trace FIFO */ + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); + val &= DFX_FIFO_CTRL_DUMP_MODE_MSK; + val >>= DFX_FIFO_CTRL_DUMP_MODE_OFF; + phy->fifo.dump_mode = val; + + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); + val &= DFX_FIFO_CTRL_TRIGGER_MODE_MSK; + val >>= DFX_FIFO_CTRL_TRIGGER_MODE_OFF; + phy->fifo.trigger_mode = val; + + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); + val &= DFX_FIFO_CTRL_SIGNAL_SEL_MSK; + val >>= DFX_FIFO_CTRL_SIGNAL_SEL_OFF; + phy->fifo.signal_sel = val; + + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK); + phy->fifo.dump_msk = val; + + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER); + phy->fifo.trigger = val; + val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK); + phy->fifo.trigger_msk = val; + + snprintf(name, 256, "%d", phy_no); + port_dentry = debugfs_create_dir(name, + hisi_hba->debugfs_fifo_dentry); + + debugfs_create_file("update_config", 0200, port_dentry, phy, + &debugfs_fifo_update_cfg_v3_hw_fops); + + debugfs_create_file("signal_sel", 0600, port_dentry, + &phy->fifo.signal_sel, + &debugfs_v3_hw_fops); + + debugfs_create_file("dump_msk", 0600, port_dentry, + &phy->fifo.dump_msk, + &debugfs_v3_hw_fops); + + debugfs_create_file("dump_mode", 0600, port_dentry, + &phy->fifo.dump_mode, + &debugfs_v3_hw_fops); + + debugfs_create_file("trigger_mode", 0600, port_dentry, + &phy->fifo.trigger_mode, + &debugfs_v3_hw_fops); + + debugfs_create_file("trigger", 0600, port_dentry, + &phy->fifo.trigger, + &debugfs_v3_hw_fops); + + debugfs_create_file("trigger_msk", 0600, port_dentry, + &phy->fifo.trigger_msk, + &debugfs_v3_hw_fops); + + debugfs_create_file("fifo_data", 0400, port_dentry, phy, + &debugfs_fifo_data_v3_hw_fops); + } +} + +static void debugfs_work_handler_v3_hw(struct work_struct *work) +{ + struct hisi_hba *hisi_hba = + container_of(work, struct hisi_hba, debugfs_work); + + debugfs_snapshot_regs_v3_hw(hisi_hba); +} + +static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) +{ + struct device *dev = hisi_hba->dev; + int i; + + devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); + devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); + devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); + devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); + + for (i = 0; i < hisi_hba->queue_count; i++) + devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); + + for (i = 0; i < hisi_hba->queue_count; i++) + devm_kfree(dev, + hisi_hba->debugfs_cq[dump_index][i].complete_hdr); + + for (i = 0; i < DEBUGFS_REGS_NUM; i++) + devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); + + for (i = 0; i < hisi_hba->n_phy; i++) + devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); +} + +static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { + [DEBUGFS_GLOBAL] = &debugfs_global_reg, + [DEBUGFS_AXI] = &debugfs_axi_reg, + [DEBUGFS_RAS] = &debugfs_ras_reg, +}; + +static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index) +{ + const struct hisi_sas_hw *hw = hisi_hba->hw; + struct device *dev = hisi_hba->dev; + int p, c, d, r, i; + size_t sz; + + for (r = 0; r < DEBUGFS_REGS_NUM; r++) { + struct hisi_sas_debugfs_regs *regs = + &hisi_hba->debugfs_regs[dump_index][r]; + + sz = debugfs_reg_array_v3_hw[r]->count * 4; + regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!regs->data) + goto fail; + regs->hisi_hba = hisi_hba; + } + + sz = debugfs_port_reg.count * 4; + for (p = 0; p < hisi_hba->n_phy; p++) { + struct hisi_sas_debugfs_port *port = + &hisi_hba->debugfs_port_reg[dump_index][p]; + + port->data = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!port->data) + goto fail; + port->phy = &hisi_hba->phy[p]; + } + + sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + for (c = 0; c < hisi_hba->queue_count; c++) { + struct hisi_sas_debugfs_cq *cq = + &hisi_hba->debugfs_cq[dump_index][c]; + + cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!cq->complete_hdr) + goto fail; + cq->cq = &hisi_hba->cq[c]; + } + + sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; + for (d = 0; d < hisi_hba->queue_count; d++) { + struct hisi_sas_debugfs_dq *dq = + &hisi_hba->debugfs_dq[dump_index][d]; + + dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!dq->hdr) + goto fail; + dq->dq = &hisi_hba->dq[d]; + } + + sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost); + + hisi_hba->debugfs_iost[dump_index].iost = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_iost[dump_index].iost) + goto fail; + + sz = HISI_SAS_IOST_ITCT_CACHE_NUM * + sizeof(struct hisi_sas_iost_itct_cache); + + hisi_hba->debugfs_iost_cache[dump_index].cache = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_iost_cache[dump_index].cache) + goto fail; + + sz = HISI_SAS_IOST_ITCT_CACHE_NUM * + sizeof(struct hisi_sas_iost_itct_cache); + + hisi_hba->debugfs_itct_cache[dump_index].cache = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_itct_cache[dump_index].cache) + goto fail; + + /* New memory allocation must be locate before itct */ + sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); + + hisi_hba->debugfs_itct[dump_index].itct = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_itct[dump_index].itct) + goto fail; + + return 0; +fail: + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) + debugfs_release_v3_hw(hisi_hba, i); + return -ENOMEM; +} + +static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba) +{ + struct dentry *dir = debugfs_create_dir("phy_down_cnt", + hisi_hba->debugfs_dir); + char name[16]; + int phy_no; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + snprintf(name, 16, "%d", phy_no); + debugfs_create_file(name, 0600, dir, + &hisi_hba->phy[phy_no], + &debugfs_phy_down_cnt_v3_hw_fops); + } +} + +static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) +{ + struct dentry *ports_dentry; + int phy_no; + + hisi_hba->debugfs_bist_dentry = + debugfs_create_dir("bist", hisi_hba->debugfs_dir); + debugfs_create_file("link_rate", 0600, + hisi_hba->debugfs_bist_dentry, hisi_hba, + &debugfs_bist_linkrate_v3_hw_fops); + + debugfs_create_file("code_mode", 0600, + hisi_hba->debugfs_bist_dentry, hisi_hba, + &debugfs_bist_code_mode_v3_hw_fops); + + debugfs_create_file("fixed_code", 0600, + hisi_hba->debugfs_bist_dentry, + &hisi_hba->debugfs_bist_fixed_code[0], + &debugfs_v3_hw_fops); + + debugfs_create_file("fixed_code_1", 0600, + hisi_hba->debugfs_bist_dentry, + &hisi_hba->debugfs_bist_fixed_code[1], + &debugfs_v3_hw_fops); + + debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, + hisi_hba, &debugfs_bist_phy_v3_hw_fops); + + debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry, + hisi_hba, &debugfs_bist_cnt_v3_hw_ops); + + debugfs_create_file("loopback_mode", 0600, + hisi_hba->debugfs_bist_dentry, + hisi_hba, &debugfs_bist_mode_v3_hw_fops); + + debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry, + hisi_hba, &debugfs_bist_enable_v3_hw_fops); + + ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry); + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct dentry *port_dentry; + struct dentry *ffe_dentry; + char name[256]; + int i; + + snprintf(name, 256, "%d", phy_no); + port_dentry = debugfs_create_dir(name, ports_dentry); + ffe_dentry = debugfs_create_dir("ffe", port_dentry); + for (i = 0; i < FFE_CFG_MAX; i++) { + if (i == FFE_RESV) + continue; + debugfs_create_file(debugfs_ffe_name_v3_hw[i].name, + 0600, ffe_dentry, + &hisi_hba->debugfs_bist_ffe[phy_no][i], + &debugfs_v3_hw_fops); + } + } + + hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; +} + +static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) +{ + debugfs_remove_recursive(hisi_hba->debugfs_dir); + hisi_hba->debugfs_dir = NULL; +} + +static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int i; + + hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), + hisi_sas_debugfs_dir); + debugfs_create_file("trigger_dump", 0200, + hisi_hba->debugfs_dir, + hisi_hba, + &debugfs_trigger_dump_v3_hw_fops); + + /* create bist structures */ + debugfs_bist_init_v3_hw(hisi_hba); + + hisi_hba->debugfs_dump_dentry = + debugfs_create_dir("dump", hisi_hba->debugfs_dir); + + debugfs_phy_down_cnt_init_v3_hw(hisi_hba); + debugfs_fifo_init_v3_hw(hisi_hba); + + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) { + if (debugfs_alloc_v3_hw(hisi_hba, i)) { + debugfs_exit_v3_hw(hisi_hba); + dev_dbg(dev, "failed to init debugfs!\n"); + break; + } + } +} + +static int +hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha; + int rc, phy_nr, port_nr, i; + + rc = pcim_enable_device(pdev); + if (rc) + goto err_out; + + pci_set_master(pdev); + + rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME); + if (rc) + goto err_out; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + dev_err(dev, "No usable DMA addressing method\n"); + rc = -ENODEV; + goto err_out; + } + + shost = hisi_sas_shost_alloc_pci(pdev); + if (!shost) { + rc = -ENOMEM; + goto err_out; + } + + sha = SHOST_TO_SAS_HA(shost); + hisi_hba = shost_priv(shost); + dev_set_drvdata(dev, sha); + + hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW]; + if (!hisi_hba->regs) { + dev_err(dev, "cannot map register\n"); + rc = -ENOMEM; + goto err_out_free_host; + } + + phy_nr = port_nr = hisi_hba->n_phy; + + arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); + arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) { + rc = -ENOMEM; + goto err_out_free_host; + } + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->shost = shost; + sha->lldd_ha = hisi_hba; + + shost->transportt = hisi_sas_stt; + shost->max_id = HISI_SAS_MAX_DEVICES; + shost->max_lun = ~0; + shost->max_channel = 1; + shost->max_cmd_len = 16; + shost->can_queue = HISI_SAS_UNRESERVED_IPTT; + shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; + if (hisi_hba->iopoll_q_cnt) + shost->nr_maps = 3; + else + shost->nr_maps = 1; + + sha->sas_ha_name = DRV_NAME; + sha->dev = dev; + sha->sas_addr = &hisi_hba->sas_addr[0]; + sha->num_phys = hisi_hba->n_phy; + + for (i = 0; i < hisi_hba->n_phy; i++) { + sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; + sha->sas_port[i] = &hisi_hba->port[i].sas_port; + } + + if (hisi_hba->prot_mask) { + dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", + prot_mask); + scsi_host_set_prot(hisi_hba->shost, prot_mask); + if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) + scsi_host_set_guard(hisi_hba->shost, + SHOST_DIX_GUARD_CRC); + } + + if (hisi_sas_debugfs_enable) + debugfs_init_v3_hw(hisi_hba); + + rc = interrupt_preinit_v3_hw(hisi_hba); + if (rc) + goto err_out_undo_debugfs; + + rc = scsi_add_host(shost, dev); + if (rc) + goto err_out_undo_debugfs; + + rc = sas_register_ha(sha); + if (rc) + goto err_out_remove_host; + + rc = hisi_sas_v3_init(hisi_hba); + if (rc) + goto err_out_unregister_ha; + + scsi_scan_host(shost); + + pm_runtime_set_autosuspend_delay(dev, 5000); + pm_runtime_use_autosuspend(dev); + /* + * For the situation that there are ATA disks connected with SAS + * controller, it additionally creates ata_port which will affect the + * child_count of hisi_hba->dev. Even if suspended all the disks, + * ata_port is still and the child_count of hisi_hba->dev is not 0. + * So use pm_suspend_ignore_children() to ignore the effect to + * hisi_hba->dev. + */ + pm_suspend_ignore_children(dev, true); + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_out_unregister_ha: + sas_unregister_ha(sha); +err_out_remove_host: + scsi_remove_host(shost); +err_out_undo_debugfs: + debugfs_exit_v3_hw(hisi_hba); +err_out_free_host: + hisi_sas_free(hisi_hba); + scsi_host_put(shost); +err_out: + return rc; +} + +static void +hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) +{ + int i; + + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba); + for (i = 0; i < hisi_hba->cq_nvecs; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + int nr = hisi_sas_intr_conv ? 16 : 16 + i; + + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); + } +} + +static void hisi_sas_v3_remove(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct sas_ha_struct *sha = dev_get_drvdata(dev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = sha->shost; + + pm_runtime_get_noresume(dev); + del_timer_sync(&hisi_hba->timer); + + sas_unregister_ha(sha); + flush_workqueue(hisi_hba->wq); + sas_remove_host(shost); + + hisi_sas_v3_destroy_irqs(pdev, hisi_hba); + hisi_sas_free(hisi_hba); + debugfs_exit_v3_hw(hisi_hba); + scsi_host_put(shost); +} + +static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct device *dev = hisi_hba->dev; + int rc; + + dev_info(dev, "FLR prepare\n"); + down(&hisi_hba->sem); + set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + hisi_sas_controller_reset_prepare(hisi_hba); + + interrupt_disable_v3_hw(hisi_hba); + rc = disable_host_v3_hw(hisi_hba); + if (rc) + dev_err(dev, "FLR: disable host failed rc=%d\n", rc); +} + +static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = hisi_hba->shost; + struct device *dev = hisi_hba->dev; + int rc; + + hisi_sas_init_mem(hisi_hba); + + rc = hw_init_v3_hw(hisi_hba); + if (rc) { + dev_err(dev, "FLR: hw init failed rc=%d\n", rc); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + scsi_unblock_requests(shost); + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + up(&hisi_hba->sem); + return; + } + + hisi_sas_controller_reset_done(hisi_hba); + dev_info(dev, "FLR done\n"); +} + +enum { + /* instances of the controller */ + hip08, +}; + +static void enable_host_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + + phys_init_v3_hw(hisi_hba); + reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + reg_val &= ~AM_CTRL_SHUTDOWN_REQ_MSK; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, reg_val); +} + +static int _suspend_v3_hw(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct device *dev = hisi_hba->dev; + struct Scsi_Host *shost = hisi_hba->shost; + int rc; + + if (!pdev->pm_cap) { + dev_err(dev, "PCI PM not supported\n"); + return -ENODEV; + } + + if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) + return -EPERM; + + dev_warn(dev, "entering suspend state\n"); + + scsi_block_requests(shost); + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + flush_workqueue(hisi_hba->wq); + interrupt_disable_v3_hw(hisi_hba); + +#ifdef CONFIG_PM + if (atomic_read(&device->power.usage_count)) { + dev_err(dev, "PM suspend: host status cannot be suspended\n"); + rc = -EBUSY; + goto err_out; + } +#endif + + rc = disable_host_v3_hw(hisi_hba); + if (rc) { + dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); + goto err_out_recover_host; + } + + hisi_sas_init_mem(hisi_hba); + + hisi_sas_release_tasks(hisi_hba); + + sas_suspend_ha(sha); + + dev_warn(dev, "end of suspending controller\n"); + return 0; + +err_out_recover_host: + enable_host_v3_hw(hisi_hba); +#ifdef CONFIG_PM +err_out: +#endif + interrupt_enable_v3_hw(hisi_hba); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + scsi_unblock_requests(shost); + return rc; +} + +static int _resume_v3_hw(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = hisi_hba->shost; + struct device *dev = hisi_hba->dev; + unsigned int rc; + pci_power_t device_state = pdev->current_state; + + dev_warn(dev, "resuming from operating state [D%d]\n", + device_state); + + scsi_unblock_requests(shost); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + + sas_prep_resume_ha(sha); + rc = hw_init_v3_hw(hisi_hba); + if (rc) { + scsi_remove_host(shost); + return rc; + } + phys_init_v3_hw(hisi_hba); + + /* + * If a directly-attached disk is removed during suspend, a deadlock + * may occur, as the PHYE_RESUME_TIMEOUT processing will require the + * hisi_hba->device to be active, which can only happen when resume + * completes. So don't wait for the HA event workqueue to drain upon + * resume. + */ + sas_resume_ha_no_sync(sha); + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + + dev_warn(dev, "end of resuming controller\n"); + + return 0; +} + +static int __maybe_unused suspend_v3_hw(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + int rc; + + set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); + + rc = _suspend_v3_hw(device); + if (rc) + clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); + + return rc; +} + +static int __maybe_unused resume_v3_hw(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + int rc = _resume_v3_hw(device); + + clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); + + return rc; +} + +static const struct pci_device_id sas_v3_pci_table[] = { + { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, + {} +}; +MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); + +static const struct pci_error_handlers hisi_sas_err_handler = { + .reset_prepare = hisi_sas_reset_prepare_v3_hw, + .reset_done = hisi_sas_reset_done_v3_hw, +}; + +static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops, + suspend_v3_hw, + resume_v3_hw, + NULL); + +static struct pci_driver sas_v3_pci_driver = { + .name = DRV_NAME, + .id_table = sas_v3_pci_table, + .probe = hisi_sas_v3_probe, + .remove = hisi_sas_v3_remove, + .err_handler = &hisi_sas_err_handler, + .driver.pm = &hisi_sas_v3_pm_ops, +}; + +module_pci_driver(sas_v3_pci_driver); +module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry "); +MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); +MODULE_ALIAS("pci:" DRV_NAME); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c new file mode 100644 index 000000000..d7f51b84f --- /dev/null +++ b/drivers/scsi/hosts.c @@ -0,0 +1,744 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * hosts.c Copyright (C) 1992 Drew Eckhardt + * Copyright (C) 1993, 1994, 1995 Eric Youngdale + * Copyright (C) 2002-2003 Christoph Hellwig + * + * mid to lowlevel SCSI driver interface + * Initial versions: Drew Eckhardt + * Subsequent revisions: Eric Youngdale + * + * + * + * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli + * Added QLOGIC QLA1280 SCSI controller kernel host support. + * August 4, 1999 Fred Lewis, Intel DuPont + * + * Updated to reflect the new initialization scheme for the higher + * level of scsi drivers (sd/sr/st) + * September 17, 2000 Torben Mathiasen + * + * Restructured scsi_host lists and associated functions. + * September 04, 2002 Mike Anderson (andmike@us.ibm.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scsi_priv.h" +#include "scsi_logging.h" + + +static int shost_eh_deadline = -1; + +module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(eh_deadline, + "SCSI EH timeout in seconds (should be between 0 and 2^31-1)"); + +static DEFINE_IDA(host_index_ida); + + +static void scsi_host_cls_release(struct device *dev) +{ + put_device(&class_to_shost(dev)->shost_gendev); +} + +static struct class shost_class = { + .name = "scsi_host", + .dev_release = scsi_host_cls_release, + .dev_groups = scsi_shost_groups, +}; + +/** + * scsi_host_set_state - Take the given host through the host state model. + * @shost: scsi host to change the state of. + * @state: state to change to. + * + * Returns zero if unsuccessful or an error if the requested + * transition is illegal. + **/ +int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state) +{ + enum scsi_host_state oldstate = shost->shost_state; + + if (state == oldstate) + return 0; + + switch (state) { + case SHOST_CREATED: + /* There are no legal states that come back to + * created. This is the manually initialised start + * state */ + goto illegal; + + case SHOST_RUNNING: + switch (oldstate) { + case SHOST_CREATED: + case SHOST_RECOVERY: + break; + default: + goto illegal; + } + break; + + case SHOST_RECOVERY: + switch (oldstate) { + case SHOST_RUNNING: + break; + default: + goto illegal; + } + break; + + case SHOST_CANCEL: + switch (oldstate) { + case SHOST_CREATED: + case SHOST_RUNNING: + case SHOST_CANCEL_RECOVERY: + break; + default: + goto illegal; + } + break; + + case SHOST_DEL: + switch (oldstate) { + case SHOST_CANCEL: + case SHOST_DEL_RECOVERY: + break; + default: + goto illegal; + } + break; + + case SHOST_CANCEL_RECOVERY: + switch (oldstate) { + case SHOST_CANCEL: + case SHOST_RECOVERY: + break; + default: + goto illegal; + } + break; + + case SHOST_DEL_RECOVERY: + switch (oldstate) { + case SHOST_CANCEL_RECOVERY: + break; + default: + goto illegal; + } + break; + } + shost->shost_state = state; + return 0; + + illegal: + SCSI_LOG_ERROR_RECOVERY(1, + shost_printk(KERN_ERR, shost, + "Illegal host state transition" + "%s->%s\n", + scsi_host_state_name(oldstate), + scsi_host_state_name(state))); + return -EINVAL; +} + +/** + * scsi_remove_host - remove a scsi host + * @shost: a pointer to a scsi host to remove + **/ +void scsi_remove_host(struct Scsi_Host *shost) +{ + unsigned long flags; + + mutex_lock(&shost->scan_mutex); + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_set_state(shost, SHOST_CANCEL)) + if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) { + spin_unlock_irqrestore(shost->host_lock, flags); + mutex_unlock(&shost->scan_mutex); + return; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + scsi_autopm_get_host(shost); + flush_workqueue(shost->tmf_work_q); + scsi_forget_host(shost); + mutex_unlock(&shost->scan_mutex); + scsi_proc_host_rm(shost); + scsi_proc_hostdir_rm(shost->hostt); + + /* + * New SCSI devices cannot be attached anymore because of the SCSI host + * state so drop the tag set refcnt. Wait until the tag set refcnt drops + * to zero because .exit_cmd_priv implementations may need the host + * pointer. + */ + kref_put(&shost->tagset_refcnt, scsi_mq_free_tags); + wait_for_completion(&shost->tagset_freed); + + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_set_state(shost, SHOST_DEL)) + BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY)); + spin_unlock_irqrestore(shost->host_lock, flags); + + transport_unregister_device(&shost->shost_gendev); + device_unregister(&shost->shost_dev); + device_del(&shost->shost_gendev); +} +EXPORT_SYMBOL(scsi_remove_host); + +/** + * scsi_add_host_with_dma - add a scsi host with dma device + * @shost: scsi host pointer to add + * @dev: a struct device of type scsi class + * @dma_dev: dma device for the host + * + * Note: You rarely need to worry about this unless you're in a + * virtualised host environments, so use the simpler scsi_add_host() + * function instead. + * + * Return value: + * 0 on success / != 0 for error + **/ +int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, + struct device *dma_dev) +{ + const struct scsi_host_template *sht = shost->hostt; + int error = -EINVAL; + + shost_printk(KERN_INFO, shost, "%s\n", + sht->info ? sht->info(shost) : sht->name); + + if (!shost->can_queue) { + shost_printk(KERN_ERR, shost, + "can_queue = 0 no longer supported\n"); + goto fail; + } + + /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */ + shost->cmd_per_lun = min_t(int, shost->cmd_per_lun, + shost->can_queue); + + error = scsi_init_sense_cache(shost); + if (error) + goto fail; + + if (!shost->shost_gendev.parent) + shost->shost_gendev.parent = dev ? dev : &platform_bus; + if (!dma_dev) + dma_dev = shost->shost_gendev.parent; + + shost->dma_dev = dma_dev; + + if (dma_dev->dma_mask) { + shost->max_sectors = min_t(unsigned int, shost->max_sectors, + dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT); + } + + error = scsi_mq_setup_tags(shost); + if (error) + goto fail; + + kref_init(&shost->tagset_refcnt); + init_completion(&shost->tagset_freed); + + /* + * Increase usage count temporarily here so that calling + * scsi_autopm_put_host() will trigger runtime idle if there is + * nothing else preventing suspending the device. + */ + pm_runtime_get_noresume(&shost->shost_gendev); + pm_runtime_set_active(&shost->shost_gendev); + pm_runtime_enable(&shost->shost_gendev); + device_enable_async_suspend(&shost->shost_gendev); + + error = device_add(&shost->shost_gendev); + if (error) + goto out_disable_runtime_pm; + + scsi_host_set_state(shost, SHOST_RUNNING); + get_device(shost->shost_gendev.parent); + + device_enable_async_suspend(&shost->shost_dev); + + get_device(&shost->shost_gendev); + error = device_add(&shost->shost_dev); + if (error) + goto out_del_gendev; + + if (shost->transportt->host_size) { + shost->shost_data = kzalloc(shost->transportt->host_size, + GFP_KERNEL); + if (shost->shost_data == NULL) { + error = -ENOMEM; + goto out_del_dev; + } + } + + if (shost->transportt->create_work_queue) { + snprintf(shost->work_q_name, sizeof(shost->work_q_name), + "scsi_wq_%d", shost->host_no); + shost->work_q = alloc_workqueue("%s", + WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, + 1, shost->work_q_name); + + if (!shost->work_q) { + error = -EINVAL; + goto out_del_dev; + } + } + + error = scsi_sysfs_add_host(shost); + if (error) + goto out_del_dev; + + scsi_proc_host_add(shost); + scsi_autopm_put_host(shost); + return error; + + /* + * Any host allocation in this function will be freed in + * scsi_host_dev_release(). + */ + out_del_dev: + device_del(&shost->shost_dev); + out_del_gendev: + /* + * Host state is SHOST_RUNNING so we have to explicitly release + * ->shost_dev. + */ + put_device(&shost->shost_dev); + device_del(&shost->shost_gendev); + out_disable_runtime_pm: + device_disable_async_suspend(&shost->shost_gendev); + pm_runtime_disable(&shost->shost_gendev); + pm_runtime_set_suspended(&shost->shost_gendev); + pm_runtime_put_noidle(&shost->shost_gendev); + kref_put(&shost->tagset_refcnt, scsi_mq_free_tags); + fail: + return error; +} +EXPORT_SYMBOL(scsi_add_host_with_dma); + +static void scsi_host_dev_release(struct device *dev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct device *parent = dev->parent; + + /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */ + rcu_barrier(); + + if (shost->tmf_work_q) + destroy_workqueue(shost->tmf_work_q); + if (shost->ehandler) + kthread_stop(shost->ehandler); + if (shost->work_q) + destroy_workqueue(shost->work_q); + + if (shost->shost_state == SHOST_CREATED) { + /* + * Free the shost_dev device name here if scsi_host_alloc() + * and scsi_host_put() have been called but neither + * scsi_host_add() nor scsi_remove_host() has been called. + * This avoids that the memory allocated for the shost_dev + * name is leaked. + */ + kfree(dev_name(&shost->shost_dev)); + } + + kfree(shost->shost_data); + + ida_free(&host_index_ida, shost->host_no); + + if (shost->shost_state != SHOST_CREATED) + put_device(parent); + kfree(shost); +} + +static struct device_type scsi_host_type = { + .name = "scsi_host", + .release = scsi_host_dev_release, +}; + +/** + * scsi_host_alloc - register a scsi host adapter instance. + * @sht: pointer to scsi host template + * @privsize: extra bytes to allocate for driver + * + * Note: + * Allocate a new Scsi_Host and perform basic initialization. + * The host is not published to the scsi midlayer until scsi_add_host + * is called. + * + * Return value: + * Pointer to a new Scsi_Host + **/ +struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int privsize) +{ + struct Scsi_Host *shost; + int index; + + shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL); + if (!shost) + return NULL; + + shost->host_lock = &shost->default_lock; + spin_lock_init(shost->host_lock); + shost->shost_state = SHOST_CREATED; + INIT_LIST_HEAD(&shost->__devices); + INIT_LIST_HEAD(&shost->__targets); + INIT_LIST_HEAD(&shost->eh_abort_list); + INIT_LIST_HEAD(&shost->eh_cmd_q); + INIT_LIST_HEAD(&shost->starved_list); + init_waitqueue_head(&shost->host_wait); + mutex_init(&shost->scan_mutex); + + index = ida_alloc(&host_index_ida, GFP_KERNEL); + if (index < 0) { + kfree(shost); + return NULL; + } + shost->host_no = index; + + shost->dma_channel = 0xff; + + /* These three are default values which can be overridden */ + shost->max_channel = 0; + shost->max_id = 8; + shost->max_lun = 8; + + /* Give each shost a default transportt */ + shost->transportt = &blank_transport_template; + + /* + * All drivers right now should be able to handle 12 byte + * commands. Every so often there are requests for 16 byte + * commands, but individual low-level drivers need to certify that + * they actually do something sensible with such commands. + */ + shost->max_cmd_len = 12; + shost->hostt = sht; + shost->this_id = sht->this_id; + shost->can_queue = sht->can_queue; + shost->sg_tablesize = sht->sg_tablesize; + shost->sg_prot_tablesize = sht->sg_prot_tablesize; + shost->cmd_per_lun = sht->cmd_per_lun; + shost->no_write_same = sht->no_write_same; + shost->host_tagset = sht->host_tagset; + shost->queuecommand_may_block = sht->queuecommand_may_block; + + if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) + shost->eh_deadline = -1; + else if ((ulong) shost_eh_deadline * HZ > INT_MAX) { + shost_printk(KERN_WARNING, shost, + "eh_deadline %u too large, setting to %u\n", + shost_eh_deadline, INT_MAX / HZ); + shost->eh_deadline = INT_MAX; + } else + shost->eh_deadline = shost_eh_deadline * HZ; + + if (sht->supported_mode == MODE_UNKNOWN) + /* means we didn't set it ... default to INITIATOR */ + shost->active_mode = MODE_INITIATOR; + else + shost->active_mode = sht->supported_mode; + + if (sht->max_host_blocked) + shost->max_host_blocked = sht->max_host_blocked; + else + shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED; + + /* + * If the driver imposes no hard sector transfer limit, start at + * machine infinity initially. + */ + if (sht->max_sectors) + shost->max_sectors = sht->max_sectors; + else + shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS; + + if (sht->max_segment_size) + shost->max_segment_size = sht->max_segment_size; + else + shost->max_segment_size = BLK_MAX_SEGMENT_SIZE; + + /* + * assume a 4GB boundary, if not set + */ + if (sht->dma_boundary) + shost->dma_boundary = sht->dma_boundary; + else + shost->dma_boundary = 0xffffffff; + + if (sht->virt_boundary_mask) + shost->virt_boundary_mask = sht->virt_boundary_mask; + + device_initialize(&shost->shost_gendev); + dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); + shost->shost_gendev.bus = &scsi_bus_type; + shost->shost_gendev.type = &scsi_host_type; + scsi_enable_async_suspend(&shost->shost_gendev); + + device_initialize(&shost->shost_dev); + shost->shost_dev.parent = &shost->shost_gendev; + shost->shost_dev.class = &shost_class; + dev_set_name(&shost->shost_dev, "host%d", shost->host_no); + shost->shost_dev.groups = sht->shost_groups; + + shost->ehandler = kthread_run(scsi_error_handler, shost, + "scsi_eh_%d", shost->host_no); + if (IS_ERR(shost->ehandler)) { + shost_printk(KERN_WARNING, shost, + "error handler thread failed to spawn, error = %ld\n", + PTR_ERR(shost->ehandler)); + shost->ehandler = NULL; + goto fail; + } + + shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, + 1, shost->host_no); + if (!shost->tmf_work_q) { + shost_printk(KERN_WARNING, shost, + "failed to create tmf workq\n"); + goto fail; + } + if (scsi_proc_hostdir_add(shost->hostt) < 0) + goto fail; + return shost; + fail: + /* + * Host state is still SHOST_CREATED and that is enough to release + * ->shost_gendev. scsi_host_dev_release() will free + * dev_name(&shost->shost_dev). + */ + put_device(&shost->shost_gendev); + + return NULL; +} +EXPORT_SYMBOL(scsi_host_alloc); + +static int __scsi_host_match(struct device *dev, const void *data) +{ + struct Scsi_Host *p; + const unsigned int *hostnum = data; + + p = class_to_shost(dev); + return p->host_no == *hostnum; +} + +/** + * scsi_host_lookup - get a reference to a Scsi_Host by host no + * @hostnum: host number to locate + * + * Return value: + * A pointer to located Scsi_Host or NULL. + * + * The caller must do a scsi_host_put() to drop the reference + * that scsi_host_get() took. The put_device() below dropped + * the reference from class_find_device(). + **/ +struct Scsi_Host *scsi_host_lookup(unsigned int hostnum) +{ + struct device *cdev; + struct Scsi_Host *shost = NULL; + + cdev = class_find_device(&shost_class, NULL, &hostnum, + __scsi_host_match); + if (cdev) { + shost = scsi_host_get(class_to_shost(cdev)); + put_device(cdev); + } + return shost; +} +EXPORT_SYMBOL(scsi_host_lookup); + +/** + * scsi_host_get - inc a Scsi_Host ref count + * @shost: Pointer to Scsi_Host to inc. + **/ +struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) +{ + if ((shost->shost_state == SHOST_DEL) || + !get_device(&shost->shost_gendev)) + return NULL; + return shost; +} +EXPORT_SYMBOL(scsi_host_get); + +static bool scsi_host_check_in_flight(struct request *rq, void *data) +{ + int *count = data; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state)) + (*count)++; + + return true; +} + +/** + * scsi_host_busy - Return the host busy counter + * @shost: Pointer to Scsi_Host to inc. + **/ +int scsi_host_busy(struct Scsi_Host *shost) +{ + int cnt = 0; + + blk_mq_tagset_busy_iter(&shost->tag_set, + scsi_host_check_in_flight, &cnt); + return cnt; +} +EXPORT_SYMBOL(scsi_host_busy); + +/** + * scsi_host_put - dec a Scsi_Host ref count + * @shost: Pointer to Scsi_Host to dec. + **/ +void scsi_host_put(struct Scsi_Host *shost) +{ + put_device(&shost->shost_gendev); +} +EXPORT_SYMBOL(scsi_host_put); + +int scsi_init_hosts(void) +{ + return class_register(&shost_class); +} + +void scsi_exit_hosts(void) +{ + class_unregister(&shost_class); + ida_destroy(&host_index_ida); +} + +int scsi_is_host_device(const struct device *dev) +{ + return dev->type == &scsi_host_type; +} +EXPORT_SYMBOL(scsi_is_host_device); + +/** + * scsi_queue_work - Queue work to the Scsi_Host workqueue. + * @shost: Pointer to Scsi_Host. + * @work: Work to queue for execution. + * + * Return value: + * 1 - work queued for execution + * 0 - work is already queued + * -EINVAL - work queue doesn't exist + **/ +int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) +{ + if (unlikely(!shost->work_q)) { + shost_printk(KERN_ERR, shost, + "ERROR: Scsi host '%s' attempted to queue scsi-work, " + "when no workqueue created.\n", shost->hostt->name); + dump_stack(); + + return -EINVAL; + } + + return queue_work(shost->work_q, work); +} +EXPORT_SYMBOL_GPL(scsi_queue_work); + +/** + * scsi_flush_work - Flush a Scsi_Host's workqueue. + * @shost: Pointer to Scsi_Host. + **/ +void scsi_flush_work(struct Scsi_Host *shost) +{ + if (!shost->work_q) { + shost_printk(KERN_ERR, shost, + "ERROR: Scsi host '%s' attempted to flush scsi-work, " + "when no workqueue created.\n", shost->hostt->name); + dump_stack(); + return; + } + + flush_workqueue(shost->work_q); +} +EXPORT_SYMBOL_GPL(scsi_flush_work); + +static bool complete_all_cmds_iter(struct request *rq, void *data) +{ + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + enum scsi_host_status status = *(enum scsi_host_status *)data; + + scsi_dma_unmap(scmd); + scmd->result = 0; + set_host_byte(scmd, status); + scsi_done(scmd); + return true; +} + +/** + * scsi_host_complete_all_commands - Terminate all running commands + * @shost: Scsi Host on which commands should be terminated + * @status: Status to be set for the terminated commands + * + * There is no protection against modification of the number + * of outstanding commands. It is the responsibility of the + * caller to ensure that concurrent I/O submission and/or + * completion is stopped when calling this function. + */ +void scsi_host_complete_all_commands(struct Scsi_Host *shost, + enum scsi_host_status status) +{ + blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter, + &status); +} +EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands); + +struct scsi_host_busy_iter_data { + bool (*fn)(struct scsi_cmnd *, void *); + void *priv; +}; + +static bool __scsi_host_busy_iter_fn(struct request *req, void *priv) +{ + struct scsi_host_busy_iter_data *iter_data = priv; + struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req); + + return iter_data->fn(sc, iter_data->priv); +} + +/** + * scsi_host_busy_iter - Iterate over all busy commands + * @shost: Pointer to Scsi_Host. + * @fn: Function to call on each busy command + * @priv: Data pointer passed to @fn + * + * If locking against concurrent command completions is required + * ithas to be provided by the caller + **/ +void scsi_host_busy_iter(struct Scsi_Host *shost, + bool (*fn)(struct scsi_cmnd *, void *), + void *priv) +{ + struct scsi_host_busy_iter_data iter_data = { + .fn = fn, + .priv = priv, + }; + + blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn, + &iter_data); +} +EXPORT_SYMBOL_GPL(scsi_host_busy_iter); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c new file mode 100644 index 000000000..af18d20f3 --- /dev/null +++ b/drivers/scsi/hpsa.c @@ -0,0 +1,10022 @@ +/* + * Disk Array driver for HP Smart Array SAS controllers + * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * Copyright 2016 Microsemi Corporation + * Copyright 2014-2015 PMC-Sierra, Inc. + * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hpsa_cmd.h" +#include "hpsa.h" + +/* + * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' + * with an optional trailing '-' followed by a byte value (0-255). + */ +#define HPSA_DRIVER_VERSION "3.4.20-200" +#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" +#define HPSA "hpsa" + +/* How long to wait for CISS doorbell communication */ +#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ +#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ +#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ +#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ +#define MAX_IOCTL_CONFIG_WAIT 1000 + +/*define how many times we will try a command because of bus resets */ +#define MAX_CMD_RETRIES 3 +/* How long to wait before giving up on a command */ +#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ) + +/* Embedded module documentation macros - see modules.h */ +MODULE_AUTHOR("Hewlett-Packard Company"); +MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ + HPSA_DRIVER_VERSION); +MODULE_VERSION(HPSA_DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("cciss"); + +static int hpsa_simple_mode; +module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(hpsa_simple_mode, + "Use 'simple mode' rather than 'performant mode'"); + +/* define the PCI info for the cards we can control */ +static const struct pci_device_id hpsa_pci_device_id[] = { + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, + {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, + {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, + {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); + +/* board_id = Subsystem Device ID & Vendor ID + * product = Marketing Name for the board + * access = Address of the struct of function pointers + */ +static struct board_type products[] = { + {0x40700E11, "Smart Array 5300", &SA5A_access}, + {0x40800E11, "Smart Array 5i", &SA5B_access}, + {0x40820E11, "Smart Array 532", &SA5B_access}, + {0x40830E11, "Smart Array 5312", &SA5B_access}, + {0x409A0E11, "Smart Array 641", &SA5A_access}, + {0x409B0E11, "Smart Array 642", &SA5A_access}, + {0x409C0E11, "Smart Array 6400", &SA5A_access}, + {0x409D0E11, "Smart Array 6400 EM", &SA5A_access}, + {0x40910E11, "Smart Array 6i", &SA5A_access}, + {0x3225103C, "Smart Array P600", &SA5A_access}, + {0x3223103C, "Smart Array P800", &SA5A_access}, + {0x3234103C, "Smart Array P400", &SA5A_access}, + {0x3235103C, "Smart Array P400i", &SA5A_access}, + {0x3211103C, "Smart Array E200i", &SA5A_access}, + {0x3212103C, "Smart Array E200", &SA5A_access}, + {0x3213103C, "Smart Array E200i", &SA5A_access}, + {0x3214103C, "Smart Array E200i", &SA5A_access}, + {0x3215103C, "Smart Array E200i", &SA5A_access}, + {0x3237103C, "Smart Array E500", &SA5A_access}, + {0x323D103C, "Smart Array P700m", &SA5A_access}, + {0x3241103C, "Smart Array P212", &SA5_access}, + {0x3243103C, "Smart Array P410", &SA5_access}, + {0x3245103C, "Smart Array P410i", &SA5_access}, + {0x3247103C, "Smart Array P411", &SA5_access}, + {0x3249103C, "Smart Array P812", &SA5_access}, + {0x324A103C, "Smart Array P712m", &SA5_access}, + {0x324B103C, "Smart Array P711m", &SA5_access}, + {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ + {0x3350103C, "Smart Array P222", &SA5_access}, + {0x3351103C, "Smart Array P420", &SA5_access}, + {0x3352103C, "Smart Array P421", &SA5_access}, + {0x3353103C, "Smart Array P822", &SA5_access}, + {0x3354103C, "Smart Array P420i", &SA5_access}, + {0x3355103C, "Smart Array P220i", &SA5_access}, + {0x3356103C, "Smart Array P721m", &SA5_access}, + {0x1920103C, "Smart Array P430i", &SA5_access}, + {0x1921103C, "Smart Array P830i", &SA5_access}, + {0x1922103C, "Smart Array P430", &SA5_access}, + {0x1923103C, "Smart Array P431", &SA5_access}, + {0x1924103C, "Smart Array P830", &SA5_access}, + {0x1925103C, "Smart Array P831", &SA5_access}, + {0x1926103C, "Smart Array P731m", &SA5_access}, + {0x1928103C, "Smart Array P230i", &SA5_access}, + {0x1929103C, "Smart Array P530", &SA5_access}, + {0x21BD103C, "Smart Array P244br", &SA5_access}, + {0x21BE103C, "Smart Array P741m", &SA5_access}, + {0x21BF103C, "Smart HBA H240ar", &SA5_access}, + {0x21C0103C, "Smart Array P440ar", &SA5_access}, + {0x21C1103C, "Smart Array P840ar", &SA5_access}, + {0x21C2103C, "Smart Array P440", &SA5_access}, + {0x21C3103C, "Smart Array P441", &SA5_access}, + {0x21C4103C, "Smart Array", &SA5_access}, + {0x21C5103C, "Smart Array P841", &SA5_access}, + {0x21C6103C, "Smart HBA H244br", &SA5_access}, + {0x21C7103C, "Smart HBA H240", &SA5_access}, + {0x21C8103C, "Smart HBA H241", &SA5_access}, + {0x21C9103C, "Smart Array", &SA5_access}, + {0x21CA103C, "Smart Array P246br", &SA5_access}, + {0x21CB103C, "Smart Array P840", &SA5_access}, + {0x21CC103C, "Smart Array", &SA5_access}, + {0x21CD103C, "Smart Array", &SA5_access}, + {0x21CE103C, "Smart HBA", &SA5_access}, + {0x05809005, "SmartHBA-SA", &SA5_access}, + {0x05819005, "SmartHBA-SA 8i", &SA5_access}, + {0x05829005, "SmartHBA-SA 8i8e", &SA5_access}, + {0x05839005, "SmartHBA-SA 8e", &SA5_access}, + {0x05849005, "SmartHBA-SA 16i", &SA5_access}, + {0x05859005, "SmartHBA-SA 4i4e", &SA5_access}, + {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, + {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, + {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, + {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, + {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, + {0xFFFF103C, "Unknown Smart Array", &SA5_access}, +}; + +static struct scsi_transport_template *hpsa_sas_transport_template; +static int hpsa_add_sas_host(struct ctlr_info *h); +static void hpsa_delete_sas_host(struct ctlr_info *h); +static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, + struct hpsa_scsi_dev_t *device); +static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); +static struct hpsa_scsi_dev_t + *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, + struct sas_rphy *rphy); + +#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) +static const struct scsi_cmnd hpsa_cmd_busy; +#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) +static const struct scsi_cmnd hpsa_cmd_idle; +static int number_of_controllers; + +static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); +static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg); +static int hpsa_passthru_ioctl(struct ctlr_info *h, + IOCTL_Command_struct *iocommand); +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, + BIG_IOCTL_Command_struct *ioc); + +#ifdef CONFIG_COMPAT +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg); +#endif + +static void cmd_free(struct ctlr_info *h, struct CommandList *c); +static struct CommandList *cmd_alloc(struct ctlr_info *h); +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd); +static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, + void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, + int cmd_type); +static void hpsa_free_cmd_pool(struct ctlr_info *h); +#define VPD_PAGE (1 << 8) +#define HPSA_SIMPLE_ERROR_BITS 0x03 + +static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); +static void hpsa_scan_start(struct Scsi_Host *); +static int hpsa_scan_finished(struct Scsi_Host *sh, + unsigned long elapsed_time); +static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); + +static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); +static int hpsa_slave_alloc(struct scsi_device *sdev); +static int hpsa_slave_configure(struct scsi_device *sdev); +static void hpsa_slave_destroy(struct scsi_device *sdev); + +static void hpsa_update_scsi_devices(struct ctlr_info *h); +static int check_for_unit_attention(struct ctlr_info *h, + struct CommandList *c); +static void check_ioctl_unit_attention(struct ctlr_info *h, + struct CommandList *c); +/* performant mode helper functions */ +static void calc_bucket_map(int *bucket, int num_buckets, + int nsgs, int min_blocks, u32 *bucket_map); +static void hpsa_free_performant_mode(struct ctlr_info *h); +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); +static inline u32 next_command(struct ctlr_info *h, u8 q); +static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, + u32 *cfg_base_addr, u64 *cfg_base_addr_index, + u64 *cfg_offset); +static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, + unsigned long *memory_bar); +static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, + bool *legacy_board); +static int wait_for_device_to_become_ready(struct ctlr_info *h, + unsigned char lunaddr[], + int reply_queue); +static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, + int wait_for_ready); +static inline void finish_cmd(struct CommandList *c); +static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); +#define BOARD_NOT_READY 0 +#define BOARD_READY 1 +static void hpsa_drain_accel_commands(struct ctlr_info *h); +static void hpsa_flush_cache(struct ctlr_info *h); +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); +static void hpsa_command_resubmit_worker(struct work_struct *work); +static u32 lockup_detected(struct ctlr_info *h); +static int detect_controller_lockup(struct ctlr_info *h); +static void hpsa_disable_rld_caching(struct ctlr_info *h); +static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, + struct ReportExtendedLUNdata *buf, int bufsize); +static bool hpsa_vpd_page_supported(struct ctlr_info *h, + unsigned char scsi3addr[], u8 page); +static int hpsa_luns_changed(struct ctlr_info *h); +static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, + struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr); + +static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) +{ + unsigned long *priv = shost_priv(sdev->host); + return (struct ctlr_info *) *priv; +} + +static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) +{ + unsigned long *priv = shost_priv(sh); + return (struct ctlr_info *) *priv; +} + +static inline bool hpsa_is_cmd_idle(struct CommandList *c) +{ + return c->scsi_cmd == SCSI_CMD_IDLE; +} + +/* extract sense key, asc, and ascq from sense data. -1 means invalid. */ +static void decode_sense_data(const u8 *sense_data, int sense_data_len, + u8 *sense_key, u8 *asc, u8 *ascq) +{ + struct scsi_sense_hdr sshdr; + bool rc; + + *sense_key = -1; + *asc = -1; + *ascq = -1; + + if (sense_data_len < 1) + return; + + rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); + if (rc) { + *sense_key = sshdr.sense_key; + *asc = sshdr.asc; + *ascq = sshdr.ascq; + } +} + +static int check_for_unit_attention(struct ctlr_info *h, + struct CommandList *c) +{ + u8 sense_key, asc, ascq; + int sense_len; + + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + + decode_sense_data(c->err_info->SenseInfo, sense_len, + &sense_key, &asc, &ascq); + if (sense_key != UNIT_ATTENTION || asc == 0xff) + return 0; + + switch (asc) { + case STATE_CHANGED: + dev_warn(&h->pdev->dev, + "%s: a state change detected, command retried\n", + h->devname); + break; + case LUN_FAILED: + dev_warn(&h->pdev->dev, + "%s: LUN failure detected\n", h->devname); + break; + case REPORT_LUNS_CHANGED: + dev_warn(&h->pdev->dev, + "%s: report LUN data changed\n", h->devname); + /* + * Note: this REPORT_LUNS_CHANGED condition only occurs on the external + * target (array) devices. + */ + break; + case POWER_OR_RESET: + dev_warn(&h->pdev->dev, + "%s: a power on or device reset detected\n", + h->devname); + break; + case UNIT_ATTENTION_CLEARED: + dev_warn(&h->pdev->dev, + "%s: unit attention cleared by another initiator\n", + h->devname); + break; + default: + dev_warn(&h->pdev->dev, + "%s: unknown unit attention detected\n", + h->devname); + break; + } + return 1; +} + +static int check_for_busy(struct ctlr_info *h, struct CommandList *c) +{ + if (c->err_info->CommandStatus != CMD_TARGET_STATUS || + (c->err_info->ScsiStatus != SAM_STAT_BUSY && + c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) + return 0; + dev_warn(&h->pdev->dev, HPSA "device busy"); + return 1; +} + +static u32 lockup_detected(struct ctlr_info *h); +static ssize_t host_show_lockup_detected(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ld; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + ld = lockup_detected(h); + + return sprintf(buf, "ld=%d\n", ld); +} + +static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int status, len; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + char tmpbuf[10]; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; + strncpy(tmpbuf, buf, len); + tmpbuf[len] = '\0'; + if (sscanf(tmpbuf, "%d", &status) != 1) + return -EINVAL; + h = shost_to_hba(shost); + h->acciopath_status = !!status; + dev_warn(&h->pdev->dev, + "hpsa: HP SSD Smart Path %s via sysfs update.\n", + h->acciopath_status ? "enabled" : "disabled"); + return count; +} + +static ssize_t host_store_raid_offload_debug(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int debug_level, len; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + char tmpbuf[10]; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; + strncpy(tmpbuf, buf, len); + tmpbuf[len] = '\0'; + if (sscanf(tmpbuf, "%d", &debug_level) != 1) + return -EINVAL; + if (debug_level < 0) + debug_level = 0; + h = shost_to_hba(shost); + h->raid_offload_debug = debug_level; + dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", + h->raid_offload_debug); + return count; +} + +static ssize_t host_store_rescan(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + h = shost_to_hba(shost); + hpsa_scan_start(h->scsi_host); + return count; +} + +static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) +{ + device->offload_enabled = 0; + device->offload_to_be_enabled = 0; +} + +static ssize_t host_show_firmware_revision(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + unsigned char *fwrev; + + h = shost_to_hba(shost); + if (!h->hba_inquiry_data) + return 0; + fwrev = &h->hba_inquiry_data[32]; + return snprintf(buf, 20, "%c%c%c%c\n", + fwrev[0], fwrev[1], fwrev[2], fwrev[3]); +} + +static ssize_t host_show_commands_outstanding(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ctlr_info *h = shost_to_hba(shost); + + return snprintf(buf, 20, "%d\n", + atomic_read(&h->commands_outstanding)); +} + +static ssize_t host_show_transport_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%s\n", + h->transMethod & CFGTBL_Trans_Performant ? + "performant" : "simple"); +} + +static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 30, "HP SSD Smart Path %s\n", + (h->acciopath_status == 1) ? "enabled" : "disabled"); +} + +/* List of controllers which cannot be hard reset on kexec with reset_devices */ +static u32 unresettable_controller[] = { + 0x324a103C, /* Smart Array P712m */ + 0x324b103C, /* Smart Array P711m */ + 0x3223103C, /* Smart Array P800 */ + 0x3234103C, /* Smart Array P400 */ + 0x3235103C, /* Smart Array P400i */ + 0x3211103C, /* Smart Array E200i */ + 0x3212103C, /* Smart Array E200 */ + 0x3213103C, /* Smart Array E200i */ + 0x3214103C, /* Smart Array E200i */ + 0x3215103C, /* Smart Array E200i */ + 0x3237103C, /* Smart Array E500 */ + 0x323D103C, /* Smart Array P700m */ + 0x40800E11, /* Smart Array 5i */ + 0x409C0E11, /* Smart Array 6400 */ + 0x409D0E11, /* Smart Array 6400 EM */ + 0x40700E11, /* Smart Array 5300 */ + 0x40820E11, /* Smart Array 532 */ + 0x40830E11, /* Smart Array 5312 */ + 0x409A0E11, /* Smart Array 641 */ + 0x409B0E11, /* Smart Array 642 */ + 0x40910E11, /* Smart Array 6i */ +}; + +/* List of controllers which cannot even be soft reset */ +static u32 soft_unresettable_controller[] = { + 0x40800E11, /* Smart Array 5i */ + 0x40700E11, /* Smart Array 5300 */ + 0x40820E11, /* Smart Array 532 */ + 0x40830E11, /* Smart Array 5312 */ + 0x409A0E11, /* Smart Array 641 */ + 0x409B0E11, /* Smart Array 642 */ + 0x40910E11, /* Smart Array 6i */ + /* Exclude 640x boards. These are two pci devices in one slot + * which share a battery backed cache module. One controls the + * cache, the other accesses the cache through the one that controls + * it. If we reset the one controlling the cache, the other will + * likely not be happy. Just forbid resetting this conjoined mess. + * The 640x isn't really supported by hpsa anyway. + */ + 0x409C0E11, /* Smart Array 6400 */ + 0x409D0E11, /* Smart Array 6400 EM */ +}; + +static int board_id_in_array(u32 a[], int nelems, u32 board_id) +{ + int i; + + for (i = 0; i < nelems; i++) + if (a[i] == board_id) + return 1; + return 0; +} + +static int ctlr_is_hard_resettable(u32 board_id) +{ + return !board_id_in_array(unresettable_controller, + ARRAY_SIZE(unresettable_controller), board_id); +} + +static int ctlr_is_soft_resettable(u32 board_id) +{ + return !board_id_in_array(soft_unresettable_controller, + ARRAY_SIZE(soft_unresettable_controller), board_id); +} + +static int ctlr_is_resettable(u32 board_id) +{ + return ctlr_is_hard_resettable(board_id) || + ctlr_is_soft_resettable(board_id); +} + +static ssize_t host_show_resettable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); +} + +static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) +{ + return (scsi3addr[3] & 0xC0) == 0x40; +} + +static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", + "1(+0)ADM", "UNKNOWN", "PHYS DRV" +}; +#define HPSA_RAID_0 0 +#define HPSA_RAID_4 1 +#define HPSA_RAID_1 2 /* also used for RAID 10 */ +#define HPSA_RAID_5 3 /* also used for RAID 50 */ +#define HPSA_RAID_51 4 +#define HPSA_RAID_6 5 /* also used for RAID 60 */ +#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ +#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) +#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) + +static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) +{ + return !device->physical_device; +} + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t l = 0; + unsigned char rlevel; + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + + /* Is this even a logical drive? */ + if (!is_logical_device(hdev)) { + spin_unlock_irqrestore(&h->lock, flags); + l = snprintf(buf, PAGE_SIZE, "N/A\n"); + return l; + } + + rlevel = hdev->raid_level; + spin_unlock_irqrestore(&h->lock, flags); + if (rlevel > RAID_UNKNOWN) + rlevel = RAID_UNKNOWN; + l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); + return l; +} + +static ssize_t lunid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + unsigned char lunid[8]; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); + spin_unlock_irqrestore(&h->lock, flags); + return snprintf(buf, 20, "0x%8phN\n", lunid); +} + +static ssize_t unique_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + unsigned char sn[16]; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + memcpy(sn, hdev->device_id, sizeof(sn)); + spin_unlock_irqrestore(&h->lock, flags); + return snprintf(buf, 16 * 2 + 2, + "%02X%02X%02X%02X%02X%02X%02X%02X" + "%02X%02X%02X%02X%02X%02X%02X%02X\n", + sn[0], sn[1], sn[2], sn[3], + sn[4], sn[5], sn[6], sn[7], + sn[8], sn[9], sn[10], sn[11], + sn[12], sn[13], sn[14], sn[15]); +} + +static ssize_t sas_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + u64 sas_address; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev || is_logical_device(hdev) || !hdev->expose_device) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + sas_address = hdev->sas_address; + spin_unlock_irqrestore(&h->lock, flags); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address); +} + +static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + int offload_enabled; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + offload_enabled = hdev->offload_enabled; + spin_unlock_irqrestore(&h->lock, flags); + + if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) + return snprintf(buf, 20, "%d\n", offload_enabled); + else + return snprintf(buf, 40, "%s\n", + "Not applicable for a controller"); +} + +#define MAX_PATHS 8 +static ssize_t path_info_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + int i; + int output_len = 0; + u8 box; + u8 bay; + u8 path_map_index = 0; + char *active; + unsigned char phys_connector[2]; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->devlock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->devlock, flags); + return -ENODEV; + } + + bay = hdev->bay; + for (i = 0; i < MAX_PATHS; i++) { + path_map_index = 1<active_path_index) + active = "Active"; + else if (hdev->path_map & path_map_index) + active = "Inactive"; + else + continue; + + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "[%d:%d:%d:%d] %20.20s ", + h->scsi_host->host_no, + hdev->bus, hdev->target, hdev->lun, + scsi_device_type(hdev->devtype)); + + if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) { + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "%s\n", active); + continue; + } + + box = hdev->box[i]; + memcpy(&phys_connector, &hdev->phys_connector[i], + sizeof(phys_connector)); + if (phys_connector[0] < '0') + phys_connector[0] = '0'; + if (phys_connector[1] < '0') + phys_connector[1] = '0'; + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "PORT: %.2s ", + phys_connector); + if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && + hdev->expose_device) { + if (box == 0 || box == 0xFF) { + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "BAY: %hhu %s\n", + bay, active); + } else { + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "BOX: %hhu BAY: %hhu %s\n", + box, bay, active); + } + } else if (box != 0 && box != 0xFF) { + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, "BOX: %hhu %s\n", + box, active); + } else + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, "%s\n", active); + } + + spin_unlock_irqrestore(&h->devlock, flags); + return output_len; +} + +static ssize_t host_show_ctlr_num(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", h->ctlr); +} + +static ssize_t host_show_legacy_board(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); +} + +static DEVICE_ATTR_RO(raid_level); +static DEVICE_ATTR_RO(lunid); +static DEVICE_ATTR_RO(unique_id); +static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR_RO(sas_address); +static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, + host_show_hp_ssd_smart_path_enabled, NULL); +static DEVICE_ATTR_RO(path_info); +static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, + host_show_hp_ssd_smart_path_status, + host_store_hp_ssd_smart_path_status); +static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, + host_store_raid_offload_debug); +static DEVICE_ATTR(firmware_revision, S_IRUGO, + host_show_firmware_revision, NULL); +static DEVICE_ATTR(commands_outstanding, S_IRUGO, + host_show_commands_outstanding, NULL); +static DEVICE_ATTR(transport_mode, S_IRUGO, + host_show_transport_mode, NULL); +static DEVICE_ATTR(resettable, S_IRUGO, + host_show_resettable, NULL); +static DEVICE_ATTR(lockup_detected, S_IRUGO, + host_show_lockup_detected, NULL); +static DEVICE_ATTR(ctlr_num, S_IRUGO, + host_show_ctlr_num, NULL); +static DEVICE_ATTR(legacy_board, S_IRUGO, + host_show_legacy_board, NULL); + +static struct attribute *hpsa_sdev_attrs[] = { + &dev_attr_raid_level.attr, + &dev_attr_lunid.attr, + &dev_attr_unique_id.attr, + &dev_attr_hp_ssd_smart_path_enabled.attr, + &dev_attr_path_info.attr, + &dev_attr_sas_address.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(hpsa_sdev); + +static struct attribute *hpsa_shost_attrs[] = { + &dev_attr_rescan.attr, + &dev_attr_firmware_revision.attr, + &dev_attr_commands_outstanding.attr, + &dev_attr_transport_mode.attr, + &dev_attr_resettable.attr, + &dev_attr_hp_ssd_smart_path_status.attr, + &dev_attr_raid_offload_debug.attr, + &dev_attr_lockup_detected.attr, + &dev_attr_ctlr_num.attr, + &dev_attr_legacy_board.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(hpsa_shost); + +#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ + HPSA_MAX_CONCURRENT_PASSTHRUS) + +static const struct scsi_host_template hpsa_driver_template = { + .module = THIS_MODULE, + .name = HPSA, + .proc_name = HPSA, + .queuecommand = hpsa_scsi_queue_command, + .scan_start = hpsa_scan_start, + .scan_finished = hpsa_scan_finished, + .change_queue_depth = hpsa_change_queue_depth, + .this_id = -1, + .eh_device_reset_handler = hpsa_eh_device_reset_handler, + .ioctl = hpsa_ioctl, + .slave_alloc = hpsa_slave_alloc, + .slave_configure = hpsa_slave_configure, + .slave_destroy = hpsa_slave_destroy, +#ifdef CONFIG_COMPAT + .compat_ioctl = hpsa_compat_ioctl, +#endif + .sdev_groups = hpsa_sdev_groups, + .shost_groups = hpsa_shost_groups, + .max_sectors = 2048, + .no_write_same = 1, +}; + +static inline u32 next_command(struct ctlr_info *h, u8 q) +{ + u32 a; + struct reply_queue_buffer *rq = &h->reply_queue[q]; + + if (h->transMethod & CFGTBL_Trans_io_accel1) + return h->access.command_completed(h, q); + + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) + return h->access.command_completed(h, q); + + if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { + a = rq->head[rq->current_entry]; + rq->current_entry++; + atomic_dec(&h->commands_outstanding); + } else { + a = FIFO_EMPTY; + } + /* Check for wraparound */ + if (rq->current_entry == h->max_commands) { + rq->current_entry = 0; + rq->wraparound ^= 1; + } + return a; +} + +/* + * There are some special bits in the bus address of the + * command that we have to set for the controller to know + * how to process the command: + * + * Normal performant mode: + * bit 0: 1 means performant mode, 0 means simple mode. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 0) + * + * ioaccel1 mode: + * bit 0 = "performant mode" bit. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 110) + * (command type is needed because ioaccel1 mode + * commands are submitted through the same register as normal + * mode commands, so this is how the controller knows whether + * the command is normal mode or ioaccel1 mode.) + * + * ioaccel2 mode: + * bit 0 = "performant mode" bit. + * bits 1-4 = block fetch table entry (note extra bit) + * bits 4-6 = not needed, because ioaccel2 mode has + * a separate special register for submitting commands. + */ + +/* + * set_performant_mode: Modify the tag for cciss performant + * set bit 0 for pull model, bits 3-1 for block fetch + * register number + */ +#define DEFAULT_REPLY_QUEUE (-1) +static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, + int reply_queue) +{ + if (likely(h->transMethod & CFGTBL_Trans_Performant)) { + c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); + if (unlikely(!h->msix_vectors)) + return; + c->Header.ReplyQueue = reply_queue; + } +} + +static void set_ioaccel1_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + + /* + * Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + cp->ReplyQueue = reply_queue; + /* + * Set the bits in the address sent down to include: + * - performant mode bit (bit 0) + * - pull count (bits 1-3) + * - command type (bits 4-6) + */ + c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | + IOACCEL1_BUSADDR_CMDTYPE; +} + +static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) + &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + cp->reply_queue = reply_queue; + /* Set the bits in the address sent down to include: + * - performant mode bit not used in ioaccel mode 2 + * - pull count (bits 0-3) + * - command type isn't needed for ioaccel2 + */ + c->busaddr |= h->ioaccel2_blockFetchTable[0]; +} + +static void set_ioaccel2_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* + * Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + cp->reply_queue = reply_queue; + /* + * Set the bits in the address sent down to include: + * - performant mode bit not used in ioaccel mode 2 + * - pull count (bits 0-3) + * - command type isn't needed for ioaccel2 + */ + c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); +} + +static int is_firmware_flash_cmd(u8 *cdb) +{ + return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; +} + +/* + * During firmware flash, the heartbeat register may not update as frequently + * as it should. So we dial down lockup detection during firmware flash. and + * dial it back up when firmware flash completes. + */ +#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) +#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) +#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ) +static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, + struct CommandList *c) +{ + if (!is_firmware_flash_cmd(c->Request.CDB)) + return; + atomic_inc(&h->firmware_flash_in_progress); + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; +} + +static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, + struct CommandList *c) +{ + if (is_firmware_flash_cmd(c->Request.CDB) && + atomic_dec_and_test(&h->firmware_flash_in_progress)) + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; +} + +static void __enqueue_cmd_and_start_io(struct ctlr_info *h, + struct CommandList *c, int reply_queue) +{ + dial_down_lockup_detection_during_fw_flash(h, c); + atomic_inc(&h->commands_outstanding); + /* + * Check to see if the command is being retried. + */ + if (c->device && !c->retry_pending) + atomic_inc(&c->device->commands_outstanding); + + reply_queue = h->reply_map[raw_smp_processor_id()]; + switch (c->cmd_type) { + case CMD_IOACCEL1: + set_ioaccel1_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + break; + case CMD_IOACCEL2: + set_ioaccel2_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); + break; + case IOACCEL2_TMF: + set_ioaccel2_tmf_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); + break; + default: + set_performant_mode(h, c, reply_queue); + h->access.submit_command(h, c); + } +} + +static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) +{ + __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); +} + +static inline int is_hba_lunid(unsigned char scsi3addr[]) +{ + return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; +} + +static inline int is_scsi_rev_5(struct ctlr_info *h) +{ + if (!h->hba_inquiry_data) + return 0; + if ((h->hba_inquiry_data[2] & 0x07) == 5) + return 1; + return 0; +} + +static int hpsa_find_target_lun(struct ctlr_info *h, + unsigned char scsi3addr[], int bus, int *target, int *lun) +{ + /* finds an unused bus, target, lun for a new physical device + * assumes h->devlock is held + */ + int i, found = 0; + DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); + + bitmap_zero(lun_taken, HPSA_MAX_DEVICES); + + for (i = 0; i < h->ndevices; i++) { + if (h->dev[i]->bus == bus && h->dev[i]->target != -1) + __set_bit(h->dev[i]->target, lun_taken); + } + + i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); + if (i < HPSA_MAX_DEVICES) { + /* *bus = 1; */ + *target = i; + *lun = 0; + found = 1; + } + return !found; +} + +static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, char *description) +{ +#define LABEL_SIZE 25 + char label[LABEL_SIZE]; + + if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) + return; + + switch (dev->devtype) { + case TYPE_RAID: + snprintf(label, LABEL_SIZE, "controller"); + break; + case TYPE_ENCLOSURE: + snprintf(label, LABEL_SIZE, "enclosure"); + break; + case TYPE_DISK: + case TYPE_ZBC: + if (dev->external) + snprintf(label, LABEL_SIZE, "external"); + else if (!is_logical_dev_addr_mode(dev->scsi3addr)) + snprintf(label, LABEL_SIZE, "%s", + raid_label[PHYSICAL_DRIVE]); + else + snprintf(label, LABEL_SIZE, "RAID-%s", + dev->raid_level > RAID_UNKNOWN ? "?" : + raid_label[dev->raid_level]); + break; + case TYPE_ROM: + snprintf(label, LABEL_SIZE, "rom"); + break; + case TYPE_TAPE: + snprintf(label, LABEL_SIZE, "tape"); + break; + case TYPE_MEDIUM_CHANGER: + snprintf(label, LABEL_SIZE, "changer"); + break; + default: + snprintf(label, LABEL_SIZE, "UNKNOWN"); + break; + } + + dev_printk(level, &h->pdev->dev, + "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n", + h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + description, + scsi_device_type(dev->devtype), + dev->vendor, + dev->model, + label, + dev->offload_config ? '+' : '-', + dev->offload_to_be_enabled ? '+' : '-', + dev->expose_device); +} + +/* Add an entry into h->dev[] array. */ +static int hpsa_scsi_add_entry(struct ctlr_info *h, + struct hpsa_scsi_dev_t *device, + struct hpsa_scsi_dev_t *added[], int *nadded) +{ + /* assumes h->devlock is held */ + int n = h->ndevices; + int i; + unsigned char addr1[8], addr2[8]; + struct hpsa_scsi_dev_t *sd; + + if (n >= HPSA_MAX_DEVICES) { + dev_err(&h->pdev->dev, "too many devices, some will be " + "inaccessible.\n"); + return -1; + } + + /* physical devices do not have lun or target assigned until now. */ + if (device->lun != -1) + /* Logical device, lun is already assigned. */ + goto lun_assigned; + + /* If this device a non-zero lun of a multi-lun device + * byte 4 of the 8-byte LUN addr will contain the logical + * unit no, zero otherwise. + */ + if (device->scsi3addr[4] == 0) { + /* This is not a non-zero lun of a multi-lun device */ + if (hpsa_find_target_lun(h, device->scsi3addr, + device->bus, &device->target, &device->lun) != 0) + return -1; + goto lun_assigned; + } + + /* This is a non-zero lun of a multi-lun device. + * Search through our list and find the device which + * has the same 8 byte LUN address, excepting byte 4 and 5. + * Assign the same bus and target for this new LUN. + * Use the logical unit number from the firmware. + */ + memcpy(addr1, device->scsi3addr, 8); + addr1[4] = 0; + addr1[5] = 0; + for (i = 0; i < n; i++) { + sd = h->dev[i]; + memcpy(addr2, sd->scsi3addr, 8); + addr2[4] = 0; + addr2[5] = 0; + /* differ only in byte 4 and 5? */ + if (memcmp(addr1, addr2, 8) == 0) { + device->bus = sd->bus; + device->target = sd->target; + device->lun = device->scsi3addr[4]; + break; + } + } + if (device->lun == -1) { + dev_warn(&h->pdev->dev, "physical device with no LUN=0," + " suspect firmware bug or unsupported hardware " + "configuration.\n"); + return -1; + } + +lun_assigned: + + h->dev[n] = device; + h->ndevices++; + added[*nadded] = device; + (*nadded)++; + hpsa_show_dev_msg(KERN_INFO, h, device, + device->expose_device ? "added" : "masked"); + return 0; +} + +/* + * Called during a scan operation. + * + * Update an entry in h->dev[] array. + */ +static void hpsa_scsi_update_entry(struct ctlr_info *h, + int entry, struct hpsa_scsi_dev_t *new_entry) +{ + /* assumes h->devlock is held */ + BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); + + /* Raid level changed. */ + h->dev[entry]->raid_level = new_entry->raid_level; + + /* + * ioacccel_handle may have changed for a dual domain disk + */ + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + + /* Raid offload parameters changed. Careful about the ordering. */ + if (new_entry->offload_config && new_entry->offload_to_be_enabled) { + /* + * if drive is newly offload_enabled, we want to copy the + * raid map data first. If previously offload_enabled and + * offload_config were set, raid map data had better be + * the same as it was before. If raid map data has changed + * then it had better be the case that + * h->dev[entry]->offload_enabled is currently 0. + */ + h->dev[entry]->raid_map = new_entry->raid_map; + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + } + if (new_entry->offload_to_be_enabled) { + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ + } + h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; + h->dev[entry]->offload_config = new_entry->offload_config; + h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; + h->dev[entry]->queue_depth = new_entry->queue_depth; + + /* + * We can turn off ioaccel offload now, but need to delay turning + * ioaccel on until we can update h->dev[entry]->phys_disk[], but we + * can't do that until all the devices are updated. + */ + h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled; + + /* + * turn ioaccel off immediately if told to do so. + */ + if (!new_entry->offload_to_be_enabled) + h->dev[entry]->offload_enabled = 0; + + hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); +} + +/* Replace an entry from h->dev[] array. */ +static void hpsa_scsi_replace_entry(struct ctlr_info *h, + int entry, struct hpsa_scsi_dev_t *new_entry, + struct hpsa_scsi_dev_t *added[], int *nadded, + struct hpsa_scsi_dev_t *removed[], int *nremoved) +{ + /* assumes h->devlock is held */ + BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); + removed[*nremoved] = h->dev[entry]; + (*nremoved)++; + + /* + * New physical devices won't have target/lun assigned yet + * so we need to preserve the values in the slot we are replacing. + */ + if (new_entry->target == -1) { + new_entry->target = h->dev[entry]->target; + new_entry->lun = h->dev[entry]->lun; + } + + h->dev[entry] = new_entry; + added[*nadded] = new_entry; + (*nadded)++; + + hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); +} + +/* Remove an entry from h->dev[] array. */ +static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, + struct hpsa_scsi_dev_t *removed[], int *nremoved) +{ + /* assumes h->devlock is held */ + int i; + struct hpsa_scsi_dev_t *sd; + + BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); + + sd = h->dev[entry]; + removed[*nremoved] = h->dev[entry]; + (*nremoved)++; + + for (i = entry; i < h->ndevices-1; i++) + h->dev[i] = h->dev[i+1]; + h->ndevices--; + hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); +} + +#define SCSI3ADDR_EQ(a, b) ( \ + (a)[7] == (b)[7] && \ + (a)[6] == (b)[6] && \ + (a)[5] == (b)[5] && \ + (a)[4] == (b)[4] && \ + (a)[3] == (b)[3] && \ + (a)[2] == (b)[2] && \ + (a)[1] == (b)[1] && \ + (a)[0] == (b)[0]) + +static void fixup_botched_add(struct ctlr_info *h, + struct hpsa_scsi_dev_t *added) +{ + /* called when scsi_add_device fails in order to re-adjust + * h->dev[] to match the mid layer's view. + */ + unsigned long flags; + int i, j; + + spin_lock_irqsave(&h->lock, flags); + for (i = 0; i < h->ndevices; i++) { + if (h->dev[i] == added) { + for (j = i; j < h->ndevices-1; j++) + h->dev[j] = h->dev[j+1]; + h->ndevices--; + break; + } + } + spin_unlock_irqrestore(&h->lock, flags); + kfree(added); +} + +static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, + struct hpsa_scsi_dev_t *dev2) +{ + /* we compare everything except lun and target as these + * are not yet assigned. Compare parts likely + * to differ first + */ + if (memcmp(dev1->scsi3addr, dev2->scsi3addr, + sizeof(dev1->scsi3addr)) != 0) + return 0; + if (memcmp(dev1->device_id, dev2->device_id, + sizeof(dev1->device_id)) != 0) + return 0; + if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) + return 0; + if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) + return 0; + if (dev1->devtype != dev2->devtype) + return 0; + if (dev1->bus != dev2->bus) + return 0; + return 1; +} + +static inline int device_updated(struct hpsa_scsi_dev_t *dev1, + struct hpsa_scsi_dev_t *dev2) +{ + /* Device attributes that can change, but don't mean + * that the device is a different device, nor that the OS + * needs to be told anything about the change. + */ + if (dev1->raid_level != dev2->raid_level) + return 1; + if (dev1->offload_config != dev2->offload_config) + return 1; + if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled) + return 1; + if (!is_logical_dev_addr_mode(dev1->scsi3addr)) + if (dev1->queue_depth != dev2->queue_depth) + return 1; + /* + * This can happen for dual domain devices. An active + * path change causes the ioaccel handle to change + * + * for example note the handle differences between p0 and p1 + * Device WWN ,WWN hash,Handle + * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003 + * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004 + */ + if (dev1->ioaccel_handle != dev2->ioaccel_handle) + return 1; + return 0; +} + +/* Find needle in haystack. If exact match found, return DEVICE_SAME, + * and return needle location in *index. If scsi3addr matches, but not + * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle + * location in *index. + * In the case of a minor device attribute change, such as RAID level, just + * return DEVICE_UPDATED, along with the updated device's location in index. + * If needle not found, return DEVICE_NOT_FOUND. + */ +static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, + struct hpsa_scsi_dev_t *haystack[], int haystack_size, + int *index) +{ + int i; +#define DEVICE_NOT_FOUND 0 +#define DEVICE_CHANGED 1 +#define DEVICE_SAME 2 +#define DEVICE_UPDATED 3 + if (needle == NULL) + return DEVICE_NOT_FOUND; + + for (i = 0; i < haystack_size; i++) { + if (haystack[i] == NULL) /* previously removed. */ + continue; + if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { + *index = i; + if (device_is_the_same(needle, haystack[i])) { + if (device_updated(needle, haystack[i])) + return DEVICE_UPDATED; + return DEVICE_SAME; + } else { + /* Keep offline devices offline */ + if (needle->volume_offline) + return DEVICE_NOT_FOUND; + return DEVICE_CHANGED; + } + } + } + *index = -1; + return DEVICE_NOT_FOUND; +} + +static void hpsa_monitor_offline_device(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + struct offline_device_entry *device; + unsigned long flags; + + /* Check to see if device is already on the list */ + spin_lock_irqsave(&h->offline_device_lock, flags); + list_for_each_entry(device, &h->offline_device_list, offline_list) { + if (memcmp(device->scsi3addr, scsi3addr, + sizeof(device->scsi3addr)) == 0) { + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&h->offline_device_lock, flags); + + /* Device is not on the list, add it. */ + device = kmalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return; + + memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); + spin_lock_irqsave(&h->offline_device_lock, flags); + list_add_tail(&device->offline_list, &h->offline_device_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); +} + +/* Print a message explaining various offline volume states */ +static void hpsa_show_volume_status(struct ctlr_info *h, + struct hpsa_scsi_dev_t *sd) +{ + if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + switch (sd->volume_offline) { + case HPSA_LV_OK: + break; + case HPSA_LV_UNDERGOING_ERASE: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_NOT_AVAILABLE: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_RPI: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_RPI: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_ENCRYPTED_NO_KEY: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_ENCRYPTION: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_ENCRYPTION: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_ENCRYPTION_REKEYING: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + } +} + +/* + * Figure the list of physical drive pointers for a logical drive with + * raid offload configured. + */ +static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev[], int ndevices, + struct hpsa_scsi_dev_t *logical_drive) +{ + struct raid_map_data *map = &logical_drive->raid_map; + struct raid_map_disk_data *dd = &map->data[0]; + int i, j; + int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + + le16_to_cpu(map->metadata_disks_per_row); + int nraid_map_entries = le16_to_cpu(map->row_cnt) * + le16_to_cpu(map->layout_map_count) * + total_disks_per_row; + int nphys_disk = le16_to_cpu(map->layout_map_count) * + total_disks_per_row; + int qdepth; + + if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) + nraid_map_entries = RAID_MAP_MAX_ENTRIES; + + logical_drive->nphysical_disks = nraid_map_entries; + + qdepth = 0; + for (i = 0; i < nraid_map_entries; i++) { + logical_drive->phys_disk[i] = NULL; + if (!logical_drive->offload_config) + continue; + for (j = 0; j < ndevices; j++) { + if (dev[j] == NULL) + continue; + if (dev[j]->devtype != TYPE_DISK && + dev[j]->devtype != TYPE_ZBC) + continue; + if (is_logical_device(dev[j])) + continue; + if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) + continue; + + logical_drive->phys_disk[i] = dev[j]; + if (i < nphys_disk) + qdepth = min(h->nr_cmds, qdepth + + logical_drive->phys_disk[i]->queue_depth); + break; + } + + /* + * This can happen if a physical drive is removed and + * the logical drive is degraded. In that case, the RAID + * map data will refer to a physical disk which isn't actually + * present. And in that case offload_enabled should already + * be 0, but we'll turn it off here just in case + */ + if (!logical_drive->phys_disk[i]) { + dev_warn(&h->pdev->dev, + "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n", + __func__, + h->scsi_host->host_no, logical_drive->bus, + logical_drive->target, logical_drive->lun); + hpsa_turn_off_ioaccel_for_device(logical_drive); + logical_drive->queue_depth = 8; + } + } + if (nraid_map_entries) + /* + * This is correct for reads, too high for full stripe writes, + * way too high for partial stripe writes + */ + logical_drive->queue_depth = qdepth; + else { + if (logical_drive->external) + logical_drive->queue_depth = EXTERNAL_QD; + else + logical_drive->queue_depth = h->nr_cmds; + } +} + +static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev[], int ndevices) +{ + int i; + + for (i = 0; i < ndevices; i++) { + if (dev[i] == NULL) + continue; + if (dev[i]->devtype != TYPE_DISK && + dev[i]->devtype != TYPE_ZBC) + continue; + if (!is_logical_device(dev[i])) + continue; + + /* + * If offload is currently enabled, the RAID map and + * phys_disk[] assignment *better* not be changing + * because we would be changing ioaccel phsy_disk[] pointers + * on a ioaccel volume processing I/O requests. + * + * If an ioaccel volume status changed, initially because it was + * re-configured and thus underwent a transformation, or + * a drive failed, we would have received a state change + * request and ioaccel should have been turned off. When the + * transformation completes, we get another state change + * request to turn ioaccel back on. In this case, we need + * to update the ioaccel information. + * + * Thus: If it is not currently enabled, but will be after + * the scan completes, make sure the ioaccel pointers + * are up to date. + */ + + if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled) + hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); + } +} + +static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) +{ + int rc = 0; + + if (!h->scsi_host) + return 1; + + if (is_logical_device(device)) /* RAID */ + rc = scsi_add_device(h->scsi_host, device->bus, + device->target, device->lun); + else /* HBA */ + rc = hpsa_add_sas_device(h->sas_host, device); + + return rc; +} + +static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev) +{ + int i; + int count = 0; + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + int refcount = atomic_inc_return(&c->refcount); + + if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, + dev->scsi3addr)) { + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); /* Implied MB */ + if (!hpsa_is_cmd_idle(c)) + ++count; + spin_unlock_irqrestore(&h->lock, flags); + } + + cmd_free(h, c); + } + + return count; +} + +#define NUM_WAIT 20 +static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, + struct hpsa_scsi_dev_t *device) +{ + int cmds = 0; + int waits = 0; + int num_wait = NUM_WAIT; + + if (device->external) + num_wait = HPSA_EH_PTRAID_TIMEOUT; + + while (1) { + cmds = hpsa_find_outstanding_commands_for_dev(h, device); + if (cmds == 0) + break; + if (++waits > num_wait) + break; + msleep(1000); + } + + if (waits > num_wait) { + dev_warn(&h->pdev->dev, + "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n", + __func__, + h->scsi_host->host_no, + device->bus, device->target, device->lun, cmds); + } +} + +static void hpsa_remove_device(struct ctlr_info *h, + struct hpsa_scsi_dev_t *device) +{ + struct scsi_device *sdev = NULL; + + if (!h->scsi_host) + return; + + /* + * Allow for commands to drain + */ + device->removed = 1; + hpsa_wait_for_outstanding_commands_for_dev(h, device); + + if (is_logical_device(device)) { /* RAID */ + sdev = scsi_device_lookup(h->scsi_host, device->bus, + device->target, device->lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + /* + * We don't expect to get here. Future commands + * to this device will get a selection timeout as + * if the device were gone. + */ + hpsa_show_dev_msg(KERN_WARNING, h, device, + "didn't find device for removal."); + } + } else { /* HBA */ + + hpsa_remove_sas_device(device); + } +} + +static void adjust_hpsa_scsi_table(struct ctlr_info *h, + struct hpsa_scsi_dev_t *sd[], int nsds) +{ + /* sd contains scsi3 addresses and devtypes, and inquiry + * data. This function takes what's in sd to be the current + * reality and updates h->dev[] to reflect that reality. + */ + int i, entry, device_change, changes = 0; + struct hpsa_scsi_dev_t *csd; + unsigned long flags; + struct hpsa_scsi_dev_t **added, **removed; + int nadded, nremoved; + + /* + * A reset can cause a device status to change + * re-schedule the scan to see what happened. + */ + spin_lock_irqsave(&h->reset_lock, flags); + if (h->reset_in_progress) { + h->drv_req_rescan = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); + return; + } + spin_unlock_irqrestore(&h->reset_lock, flags); + + added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL); + removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL); + + if (!added || !removed) { + dev_warn(&h->pdev->dev, "out of memory in " + "adjust_hpsa_scsi_table\n"); + goto free_and_out; + } + + spin_lock_irqsave(&h->devlock, flags); + + /* find any devices in h->dev[] that are not in + * sd[] and remove them from h->dev[], and for any + * devices which have changed, remove the old device + * info and add the new device info. + * If minor device attributes change, just update + * the existing device structure. + */ + i = 0; + nremoved = 0; + nadded = 0; + while (i < h->ndevices) { + csd = h->dev[i]; + device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); + if (device_change == DEVICE_NOT_FOUND) { + changes++; + hpsa_scsi_remove_entry(h, i, removed, &nremoved); + continue; /* remove ^^^, hence i not incremented */ + } else if (device_change == DEVICE_CHANGED) { + changes++; + hpsa_scsi_replace_entry(h, i, sd[entry], + added, &nadded, removed, &nremoved); + /* Set it to NULL to prevent it from being freed + * at the bottom of hpsa_update_scsi_devices() + */ + sd[entry] = NULL; + } else if (device_change == DEVICE_UPDATED) { + hpsa_scsi_update_entry(h, i, sd[entry]); + } + i++; + } + + /* Now, make sure every device listed in sd[] is also + * listed in h->dev[], adding them if they aren't found + */ + + for (i = 0; i < nsds; i++) { + if (!sd[i]) /* if already added above. */ + continue; + + /* Don't add devices which are NOT READY, FORMAT IN PROGRESS + * as the SCSI mid-layer does not handle such devices well. + * It relentlessly loops sending TUR at 3Hz, then READ(10) + * at 160Hz, and prevents the system from coming up. + */ + if (sd[i]->volume_offline) { + hpsa_show_volume_status(h, sd[i]); + hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); + continue; + } + + device_change = hpsa_scsi_find_entry(sd[i], h->dev, + h->ndevices, &entry); + if (device_change == DEVICE_NOT_FOUND) { + changes++; + if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) + break; + sd[i] = NULL; /* prevent from being freed later. */ + } else if (device_change == DEVICE_CHANGED) { + /* should never happen... */ + changes++; + dev_warn(&h->pdev->dev, + "device unexpectedly changed.\n"); + /* but if it does happen, we just ignore that device */ + } + } + hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); + + /* + * Now that h->dev[]->phys_disk[] is coherent, we can enable + * any logical drives that need it enabled. + * + * The raid map should be current by now. + * + * We are updating the device list used for I/O requests. + */ + for (i = 0; i < h->ndevices; i++) { + if (h->dev[i] == NULL) + continue; + h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; + } + + spin_unlock_irqrestore(&h->devlock, flags); + + /* Monitor devices which are in one of several NOT READY states to be + * brought online later. This must be done without holding h->devlock, + * so don't touch h->dev[] + */ + for (i = 0; i < nsds; i++) { + if (!sd[i]) /* if already added above. */ + continue; + if (sd[i]->volume_offline) + hpsa_monitor_offline_device(h, sd[i]->scsi3addr); + } + + /* Don't notify scsi mid layer of any changes the first time through + * (or if there are no changes) scsi_scan_host will do it later the + * first time through. + */ + if (!changes) + goto free_and_out; + + /* Notify scsi mid layer of any removed devices */ + for (i = 0; i < nremoved; i++) { + if (removed[i] == NULL) + continue; + if (removed[i]->expose_device) + hpsa_remove_device(h, removed[i]); + kfree(removed[i]); + removed[i] = NULL; + } + + /* Notify scsi mid layer of any added devices */ + for (i = 0; i < nadded; i++) { + int rc = 0; + + if (added[i] == NULL) + continue; + if (!(added[i]->expose_device)) + continue; + rc = hpsa_add_device(h, added[i]); + if (!rc) + continue; + dev_warn(&h->pdev->dev, + "addition failed %d, device not added.", rc); + /* now we have to remove it from h->dev, + * since it didn't get added to scsi mid layer + */ + fixup_botched_add(h, added[i]); + h->drv_req_rescan = 1; + } + +free_and_out: + kfree(added); + kfree(removed); +} + +/* + * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * + * Assume's h->devlock is held. + */ +static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, + int bus, int target, int lun) +{ + int i; + struct hpsa_scsi_dev_t *sd; + + for (i = 0; i < h->ndevices; i++) { + sd = h->dev[i]; + if (sd->bus == bus && sd->target == target && sd->lun == lun) + return sd; + } + return NULL; +} + +static int hpsa_slave_alloc(struct scsi_device *sdev) +{ + struct hpsa_scsi_dev_t *sd = NULL; + unsigned long flags; + struct ctlr_info *h; + + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->devlock, flags); + if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { + struct scsi_target *starget; + struct sas_rphy *rphy; + + starget = scsi_target(sdev); + rphy = target_to_rphy(starget); + sd = hpsa_find_device_by_sas_rphy(h, rphy); + if (sd) { + sd->target = sdev_id(sdev); + sd->lun = sdev->lun; + } + } + if (!sd) + sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), + sdev_id(sdev), sdev->lun); + + if (sd && sd->expose_device) { + atomic_set(&sd->ioaccel_cmds_out, 0); + sdev->hostdata = sd; + } else + sdev->hostdata = NULL; + spin_unlock_irqrestore(&h->devlock, flags); + return 0; +} + +/* configure scsi device based on internal per-device structure */ +#define CTLR_TIMEOUT (120 * HZ) +static int hpsa_slave_configure(struct scsi_device *sdev) +{ + struct hpsa_scsi_dev_t *sd; + int queue_depth; + + sd = sdev->hostdata; + sdev->no_uld_attach = !sd || !sd->expose_device; + + if (sd) { + sd->was_removed = 0; + queue_depth = sd->queue_depth != 0 ? + sd->queue_depth : sdev->host->can_queue; + if (sd->external) { + queue_depth = EXTERNAL_QD; + sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, + HPSA_EH_PTRAID_TIMEOUT); + } + if (is_hba_lunid(sd->scsi3addr)) { + sdev->eh_timeout = CTLR_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT); + } + } else { + queue_depth = sdev->host->can_queue; + } + + scsi_change_queue_depth(sdev, queue_depth); + + return 0; +} + +static void hpsa_slave_destroy(struct scsi_device *sdev) +{ + struct hpsa_scsi_dev_t *hdev = NULL; + + hdev = sdev->hostdata; + + if (hdev) + hdev->was_removed = 1; +} + +static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (!h->ioaccel2_cmd_sg_list) + return; + for (i = 0; i < h->nr_cmds; i++) { + kfree(h->ioaccel2_cmd_sg_list[i]); + h->ioaccel2_cmd_sg_list[i] = NULL; + } + kfree(h->ioaccel2_cmd_sg_list); + h->ioaccel2_cmd_sg_list = NULL; +} + +static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (h->chainsize <= 0) + return 0; + + h->ioaccel2_cmd_sg_list = + kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list), + GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list) + return -ENOMEM; + for (i = 0; i < h->nr_cmds; i++) { + h->ioaccel2_cmd_sg_list[i] = + kmalloc_array(h->maxsgentries, + sizeof(*h->ioaccel2_cmd_sg_list[i]), + GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list[i]) + goto clean; + } + return 0; + +clean: + hpsa_free_ioaccel2_sg_chain_blocks(h); + return -ENOMEM; +} + +static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (!h->cmd_sg_list) + return; + for (i = 0; i < h->nr_cmds; i++) { + kfree(h->cmd_sg_list[i]); + h->cmd_sg_list[i] = NULL; + } + kfree(h->cmd_sg_list); + h->cmd_sg_list = NULL; +} + +static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (h->chainsize <= 0) + return 0; + + h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list), + GFP_KERNEL); + if (!h->cmd_sg_list) + return -ENOMEM; + + for (i = 0; i < h->nr_cmds; i++) { + h->cmd_sg_list[i] = kmalloc_array(h->chainsize, + sizeof(*h->cmd_sg_list[i]), + GFP_KERNEL); + if (!h->cmd_sg_list[i]) + goto clean; + + } + return 0; + +clean: + hpsa_free_sg_chain_blocks(h); + return -ENOMEM; +} + +static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp, struct CommandList *c) +{ + struct ioaccel2_sg_element *chain_block; + u64 temp64; + u32 chain_size; + + chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; + chain_size = le32_to_cpu(cp->sg[0].length); + temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&h->pdev->dev, temp64)) { + /* prevent subsequent unmapping */ + cp->sg->address = 0; + return -1; + } + cp->sg->address = cpu_to_le64(temp64); + return 0; +} + +static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp) +{ + struct ioaccel2_sg_element *chain_sg; + u64 temp64; + u32 chain_size; + + chain_sg = cp->sg; + temp64 = le64_to_cpu(chain_sg->address); + chain_size = le32_to_cpu(cp->sg[0].length); + dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE); +} + +static int hpsa_map_sg_chain_block(struct ctlr_info *h, + struct CommandList *c) +{ + struct SGDescriptor *chain_sg, *chain_block; + u64 temp64; + u32 chain_len; + + chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; + chain_block = h->cmd_sg_list[c->cmdindex]; + chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); + chain_len = sizeof(*chain_sg) * + (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); + chain_sg->Len = cpu_to_le32(chain_len); + temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len, + DMA_TO_DEVICE); + if (dma_mapping_error(&h->pdev->dev, temp64)) { + /* prevent subsequent unmapping */ + chain_sg->Addr = cpu_to_le64(0); + return -1; + } + chain_sg->Addr = cpu_to_le64(temp64); + return 0; +} + +static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, + struct CommandList *c) +{ + struct SGDescriptor *chain_sg; + + if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) + return; + + chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; + dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr), + le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE); +} + + +/* Decode the various types of errors on ioaccel2 path. + * Return 1 for any error that should generate a RAID path retry. + * Return 0 for errors that don't require a RAID path retry. + */ +static int handle_ioaccel_mode2_error(struct ctlr_info *h, + struct CommandList *c, + struct scsi_cmnd *cmd, + struct io_accel2_cmd *c2, + struct hpsa_scsi_dev_t *dev) +{ + int data_len; + int retry = 0; + u32 ioaccel2_resid = 0; + + switch (c2->error_data.serv_response) { + case IOACCEL2_SERV_RESPONSE_COMPLETE: + switch (c2->error_data.status) { + case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + if (cmd) + cmd->result = 0; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: + cmd->result |= SAM_STAT_CHECK_CONDITION; + if (c2->error_data.data_present != + IOACCEL2_SENSE_DATA_PRESENT) { + memset(cmd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); + break; + } + /* copy the sense data */ + data_len = c2->error_data.sense_data_len; + if (data_len > SCSI_SENSE_BUFFERSIZE) + data_len = SCSI_SENSE_BUFFERSIZE; + if (data_len > sizeof(c2->error_data.sense_data_buff)) + data_len = + sizeof(c2->error_data.sense_data_buff); + memcpy(cmd->sense_buffer, + c2->error_data.sense_data_buff, data_len); + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: + retry = 1; + break; + default: + retry = 1; + break; + } + break; + case IOACCEL2_SERV_RESPONSE_FAILURE: + switch (c2->error_data.status) { + case IOACCEL2_STATUS_SR_IO_ERROR: + case IOACCEL2_STATUS_SR_IO_ABORTED: + case IOACCEL2_STATUS_SR_OVERRUN: + retry = 1; + break; + case IOACCEL2_STATUS_SR_UNDERRUN: + cmd->result = (DID_OK << 16); /* host byte */ + ioaccel2_resid = get_unaligned_le32( + &c2->error_data.resid_cnt[0]); + scsi_set_resid(cmd, ioaccel2_resid); + break; + case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: + case IOACCEL2_STATUS_SR_INVALID_DEVICE: + case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: + /* + * Did an HBA disk disappear? We will eventually + * get a state change event from the controller but + * in the meantime, we need to tell the OS that the + * HBA disk is no longer there and stop I/O + * from going down. This allows the potential re-insert + * of the disk to get the same device node. + */ + if (dev->physical_device && dev->expose_device) { + cmd->result = DID_NO_CONNECT << 16; + dev->removed = 1; + h->drv_req_rescan = 1; + dev_warn(&h->pdev->dev, + "%s: device is gone!\n", __func__); + } else + /* + * Retry by sending down the RAID path. + * We will get an event from ctlr to + * trigger rescan regardless. + */ + retry = 1; + break; + default: + retry = 1; + } + break; + case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: + break; + case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: + break; + case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: + retry = 1; + break; + case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: + break; + default: + retry = 1; + break; + } + + if (dev->in_reset) + retry = 0; + + return retry; /* retry on raid path? */ +} + +static void hpsa_cmd_resolve_events(struct ctlr_info *h, + struct CommandList *c) +{ + struct hpsa_scsi_dev_t *dev = c->device; + + /* + * Reset c->scsi_cmd here so that the reset handler will know + * this command has completed. Then, check to see if the handler is + * waiting for this command, and, if so, wake it. + */ + c->scsi_cmd = SCSI_CMD_IDLE; + mb(); /* Declare command idle before checking for pending events. */ + if (dev) { + atomic_dec(&dev->commands_outstanding); + if (dev->in_reset && + atomic_read(&dev->commands_outstanding) <= 0) + wake_up_all(&h->event_sync_wait_queue); + } +} + +static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, + struct CommandList *c) +{ + hpsa_cmd_resolve_events(h, c); + cmd_tagged_free(h, c); +} + +static void hpsa_cmd_free_and_done(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd) +{ + hpsa_cmd_resolve_and_free(h, c); + if (cmd) + scsi_done(cmd); +} + +static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) +{ + INIT_WORK(&c->work, hpsa_command_resubmit_worker); + queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); +} + +static void process_ioaccel2_completion(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + struct hpsa_scsi_dev_t *dev) +{ + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* check for good status */ + if (likely(c2->error_data.serv_response == 0 && + c2->error_data.status == 0)) { + cmd->result = 0; + return hpsa_cmd_free_and_done(h, c, cmd); + } + + /* + * Any RAID offload error results in retry which will use + * the normal I/O path so the controller can handle whatever is + * wrong. + */ + if (is_logical_device(dev) && + c2->error_data.serv_response == + IOACCEL2_SERV_RESPONSE_FAILURE) { + if (c2->error_data.status == + IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { + hpsa_turn_off_ioaccel_for_device(dev); + } + + if (dev->in_reset) { + cmd->result = DID_RESET << 16; + return hpsa_cmd_free_and_done(h, c, cmd); + } + + return hpsa_retry_cmd(h, c); + } + + if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) + return hpsa_retry_cmd(h, c); + + return hpsa_cmd_free_and_done(h, c, cmd); +} + +/* Returns 0 on success, < 0 otherwise. */ +static int hpsa_evaluate_tmf_status(struct ctlr_info *h, + struct CommandList *cp) +{ + u8 tmf_status = cp->err_info->ScsiStatus; + + switch (tmf_status) { + case CISS_TMF_COMPLETE: + /* + * CISS_TMF_COMPLETE never happens, instead, + * ei->CommandStatus == 0 for this case. + */ + case CISS_TMF_SUCCESS: + return 0; + case CISS_TMF_INVALID_FRAME: + case CISS_TMF_NOT_SUPPORTED: + case CISS_TMF_FAILED: + case CISS_TMF_WRONG_LUN: + case CISS_TMF_OVERLAPPED_TAG: + break; + default: + dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", + tmf_status); + break; + } + return -tmf_status; +} + +static void complete_scsi_command(struct CommandList *cp) +{ + struct scsi_cmnd *cmd; + struct ctlr_info *h; + struct ErrorInfo *ei; + struct hpsa_scsi_dev_t *dev; + struct io_accel2_cmd *c2; + + u8 sense_key; + u8 asc; /* additional sense code */ + u8 ascq; /* additional sense code qualifier */ + unsigned long sense_data_size; + + ei = cp->err_info; + cmd = cp->scsi_cmd; + h = cp->h; + + if (!cmd->device) { + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(h, cp, cmd); + } + + dev = cmd->device->hostdata; + if (!dev) { + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(h, cp, cmd); + } + c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; + + scsi_dma_unmap(cmd); /* undo the DMA mappings */ + if ((cp->cmd_type == CMD_SCSI) && + (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) + hpsa_unmap_sg_chain_block(h, cp); + + if ((cp->cmd_type == CMD_IOACCEL2) && + (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) + hpsa_unmap_ioaccel2_sg_chain_block(h, c2); + + cmd->result = (DID_OK << 16); /* host byte */ + + /* SCSI command has already been cleaned up in SML */ + if (dev->was_removed) { + hpsa_cmd_resolve_and_free(h, cp); + return; + } + + if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) { + if (dev->physical_device && dev->expose_device && + dev->removed) { + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(h, cp, cmd); + } + if (likely(cp->phys_disk != NULL)) + atomic_dec(&cp->phys_disk->ioaccel_cmds_out); + } + + /* + * We check for lockup status here as it may be set for + * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by + * fail_all_oustanding_cmds() + */ + if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { + /* DID_NO_CONNECT will prevent a retry */ + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(h, cp, cmd); + } + + if (cp->cmd_type == CMD_IOACCEL2) + return process_ioaccel2_completion(h, cp, cmd, dev); + + scsi_set_resid(cmd, ei->ResidualCnt); + if (ei->CommandStatus == 0) + return hpsa_cmd_free_and_done(h, cp, cmd); + + /* For I/O accelerator commands, copy over some fields to the normal + * CISS header used below for error handling. + */ + if (cp->cmd_type == CMD_IOACCEL1) { + struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; + cp->Header.SGList = scsi_sg_count(cmd); + cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); + cp->Request.CDBLen = le16_to_cpu(c->io_flags) & + IOACCEL1_IOFLAGS_CDBLEN_MASK; + cp->Header.tag = c->tag; + memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); + memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); + + /* Any RAID offload error results in retry which will use + * the normal I/O path so the controller can handle whatever's + * wrong. + */ + if (is_logical_device(dev)) { + if (ei->CommandStatus == CMD_IOACCEL_DISABLED) + dev->offload_enabled = 0; + return hpsa_retry_cmd(h, cp); + } + } + + /* an error has occurred */ + switch (ei->CommandStatus) { + + case CMD_TARGET_STATUS: + cmd->result |= ei->ScsiStatus; + /* copy the sense data */ + if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) + sense_data_size = SCSI_SENSE_BUFFERSIZE; + else + sense_data_size = sizeof(ei->SenseInfo); + if (ei->SenseLen < sense_data_size) + sense_data_size = ei->SenseLen; + memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); + if (ei->ScsiStatus) + decode_sense_data(ei->SenseInfo, sense_data_size, + &sense_key, &asc, &ascq); + if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { + switch (sense_key) { + case ABORTED_COMMAND: + cmd->result |= DID_SOFT_ERROR << 16; + break; + case UNIT_ATTENTION: + if (asc == 0x3F && ascq == 0x0E) + h->drv_req_rescan = 1; + break; + case ILLEGAL_REQUEST: + if (asc == 0x25 && ascq == 0x00) { + dev->removed = 1; + cmd->result = DID_NO_CONNECT << 16; + } + break; + } + break; + } + /* Problem was not a check condition + * Pass it up to the upper layers... + */ + if (ei->ScsiStatus) { + dev_warn(&h->pdev->dev, "cp %p has status 0x%x " + "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " + "Returning result: 0x%x\n", + cp, ei->ScsiStatus, + sense_key, asc, ascq, + cmd->result); + } else { /* scsi status is zero??? How??? */ + dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " + "Returning no connection.\n", cp), + + /* Ordinarily, this case should never happen, + * but there is a bug in some released firmware + * revisions that allows it to happen if, for + * example, a 4100 backplane loses power and + * the tape drive is in it. We assume that + * it's a fatal error of some kind because we + * can't show that it wasn't. We will make it + * look like selection timeout since that is + * the most common reason for this to occur, + * and it's severe enough. + */ + + cmd->result = DID_NO_CONNECT << 16; + } + break; + + case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ + break; + case CMD_DATA_OVERRUN: + dev_warn(&h->pdev->dev, + "CDB %16phN data overrun\n", cp->Request.CDB); + break; + case CMD_INVALID: { + /* print_bytes(cp, sizeof(*cp), 1, 0); + print_cmd(cp); */ + /* We get CMD_INVALID if you address a non-existent device + * instead of a selection timeout (no response). You will + * see this if you yank out a drive, then try to access it. + * This is kind of a shame because it means that any other + * CMD_INVALID (e.g. driver bug) will get interpreted as a + * missing target. */ + cmd->result = DID_NO_CONNECT << 16; + } + break; + case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", + cp->Request.CDB); + break; + case CMD_HARDWARE_ERR: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", + cp->Request.CDB); + break; + case CMD_CONNECTION_LOST: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", + cp->Request.CDB); + break; + case CMD_ABORTED: + cmd->result = DID_ABORT << 16; + break; + case CMD_ABORT_FAILED: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", + cp->Request.CDB); + break; + case CMD_UNSOLICITED_ABORT: + cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ + dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", + cp->Request.CDB); + break; + case CMD_TIMEOUT: + cmd->result = DID_TIME_OUT << 16; + dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", + cp->Request.CDB); + break; + case CMD_UNABORTABLE: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "Command unabortable\n"); + break; + case CMD_TMF_STATUS: + if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ + cmd->result = DID_ERROR << 16; + break; + case CMD_IOACCEL_DISABLED: + /* This only handles the direct pass-through case since RAID + * offload is handled above. Just attempt a retry. + */ + cmd->result = DID_SOFT_ERROR << 16; + dev_warn(&h->pdev->dev, + "cp %p had HP SSD Smart Path error\n", cp); + break; + default: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", + cp, ei->CommandStatus); + } + + return hpsa_cmd_free_and_done(h, cp, cmd); +} + +static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, + int sg_used, enum dma_data_direction data_direction) +{ + int i; + + for (i = 0; i < sg_used; i++) + dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr), + le32_to_cpu(c->SG[i].Len), + data_direction); +} + +static int hpsa_map_one(struct pci_dev *pdev, + struct CommandList *cp, + unsigned char *buf, + size_t buflen, + enum dma_data_direction data_direction) +{ + u64 addr64; + + if (buflen == 0 || data_direction == DMA_NONE) { + cp->Header.SGList = 0; + cp->Header.SGTotal = cpu_to_le16(0); + return 0; + } + + addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction); + if (dma_mapping_error(&pdev->dev, addr64)) { + /* Prevent subsequent unmap of something never mapped */ + cp->Header.SGList = 0; + cp->Header.SGTotal = cpu_to_le16(0); + return -1; + } + cp->SG[0].Addr = cpu_to_le64(addr64); + cp->SG[0].Len = cpu_to_le32(buflen); + cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ + cp->Header.SGList = 1; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ + return 0; +} + +#define NO_TIMEOUT ((unsigned long) -1) +#define DEFAULT_TIMEOUT 30000 /* milliseconds */ +static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, + struct CommandList *c, int reply_queue, unsigned long timeout_msecs) +{ + DECLARE_COMPLETION_ONSTACK(wait); + + c->waiting = &wait; + __enqueue_cmd_and_start_io(h, c, reply_queue); + if (timeout_msecs == NO_TIMEOUT) { + /* TODO: get rid of this no-timeout thing */ + wait_for_completion_io(&wait); + return IO_OK; + } + if (!wait_for_completion_io_timeout(&wait, + msecs_to_jiffies(timeout_msecs))) { + dev_warn(&h->pdev->dev, "Command timed out.\n"); + return -ETIMEDOUT; + } + return IO_OK; +} + +static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, + int reply_queue, unsigned long timeout_msecs) +{ + if (unlikely(lockup_detected(h))) { + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; + return IO_OK; + } + return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); +} + +static u32 lockup_detected(struct ctlr_info *h) +{ + int cpu; + u32 rc, *lockup_detected; + + cpu = get_cpu(); + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + rc = *lockup_detected; + put_cpu(); + return rc; +} + +#define MAX_DRIVER_CMD_RETRIES 25 +static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, + struct CommandList *c, enum dma_data_direction data_direction, + unsigned long timeout_msecs) +{ + int backoff_time = 10, retry_count = 0; + int rc; + + do { + memset(c->err_info, 0, sizeof(*c->err_info)); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + timeout_msecs); + if (rc) + break; + retry_count++; + if (retry_count > 3) { + msleep(backoff_time); + if (backoff_time < 1000) + backoff_time *= 2; + } + } while ((check_for_unit_attention(h, c) || + check_for_busy(h, c)) && + retry_count <= MAX_DRIVER_CMD_RETRIES); + hpsa_pci_unmap(h->pdev, c, 1, data_direction); + if (retry_count > MAX_DRIVER_CMD_RETRIES) + rc = -EIO; + return rc; +} + +static void hpsa_print_cmd(struct ctlr_info *h, char *txt, + struct CommandList *c) +{ + const u8 *cdb = c->Request.CDB; + const u8 *lun = c->Header.LUN.LunAddrBytes; + + dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n", + txt, lun, cdb); +} + +static void hpsa_scsi_interpret_error(struct ctlr_info *h, + struct CommandList *cp) +{ + const struct ErrorInfo *ei = cp->err_info; + struct device *d = &cp->h->pdev->dev; + u8 sense_key, asc, ascq; + int sense_len; + + switch (ei->CommandStatus) { + case CMD_TARGET_STATUS: + if (ei->SenseLen > sizeof(ei->SenseInfo)) + sense_len = sizeof(ei->SenseInfo); + else + sense_len = ei->SenseLen; + decode_sense_data(ei->SenseInfo, sense_len, + &sense_key, &asc, &ascq); + hpsa_print_cmd(h, "SCSI status", cp); + if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) + dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", + sense_key, asc, ascq); + else + dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); + if (ei->ScsiStatus == 0) + dev_warn(d, "SCSI status is abnormally zero. " + "(probably indicates selection timeout " + "reported incorrectly due to a known " + "firmware bug, circa July, 2001.)\n"); + break; + case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ + break; + case CMD_DATA_OVERRUN: + hpsa_print_cmd(h, "overrun condition", cp); + break; + case CMD_INVALID: { + /* controller unfortunately reports SCSI passthru's + * to non-existent targets as invalid commands. + */ + hpsa_print_cmd(h, "invalid command", cp); + dev_warn(d, "probably means device no longer present\n"); + } + break; + case CMD_PROTOCOL_ERR: + hpsa_print_cmd(h, "protocol error", cp); + break; + case CMD_HARDWARE_ERR: + hpsa_print_cmd(h, "hardware error", cp); + break; + case CMD_CONNECTION_LOST: + hpsa_print_cmd(h, "connection lost", cp); + break; + case CMD_ABORTED: + hpsa_print_cmd(h, "aborted", cp); + break; + case CMD_ABORT_FAILED: + hpsa_print_cmd(h, "abort failed", cp); + break; + case CMD_UNSOLICITED_ABORT: + hpsa_print_cmd(h, "unsolicited abort", cp); + break; + case CMD_TIMEOUT: + hpsa_print_cmd(h, "timed out", cp); + break; + case CMD_UNABORTABLE: + hpsa_print_cmd(h, "unabortable", cp); + break; + case CMD_CTLR_LOCKUP: + hpsa_print_cmd(h, "controller lockup detected", cp); + break; + default: + hpsa_print_cmd(h, "unknown status", cp); + dev_warn(d, "Unknown command status %x\n", + ei->CommandStatus); + } +} + +static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr, + u8 page, u8 *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize, + page, scsi3addr, TYPE_CMD)) { + rc = -1; + goto out; + } + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h, + u8 *scsi3addr) +{ + u8 *buf; + u64 sa = 0; + int rc = 0; + + buf = kzalloc(1024, GFP_KERNEL); + if (!buf) + return 0; + + rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC, + buf, 1024); + + if (rc) + goto out; + + sa = get_unaligned_be64(buf+12); + +out: + kfree(buf); + return sa; +} + +static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, + u16 page, unsigned char *buf, + unsigned char bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, + page, scsi3addr, TYPE_CMD)) { + rc = -1; + goto out; + } + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, + u8 reset_type, int reply_queue) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + c->device = dev; + + /* fill_cmd can't fail here, no data buffer to map. */ + (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) { + dev_warn(&h->pdev->dev, "Failed to send reset command\n"); + goto out; + } + /* no unmap needed here because no data xfer. */ + + ei = c->err_info; + if (ei->CommandStatus != 0) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, + struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr) +{ + int i; + bool match = false; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; + + if (hpsa_is_cmd_idle(c)) + return false; + + switch (c->cmd_type) { + case CMD_SCSI: + case CMD_IOCTL_PEND: + match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, + sizeof(c->Header.LUN.LunAddrBytes)); + break; + + case CMD_IOACCEL1: + case CMD_IOACCEL2: + if (c->phys_disk == dev) { + /* HBA mode match */ + match = true; + } else { + /* Possible RAID mode -- check each phys dev. */ + /* FIXME: Do we need to take out a lock here? If + * so, we could just call hpsa_get_pdisk_of_ioaccel2() + * instead. */ + for (i = 0; i < dev->nphysical_disks && !match; i++) { + /* FIXME: an alternate test might be + * + * match = dev->phys_disk[i]->ioaccel_handle + * == c2->scsi_nexus; */ + match = dev->phys_disk[i] == c->phys_disk; + } + } + break; + + case IOACCEL2_TMF: + for (i = 0; i < dev->nphysical_disks && !match; i++) { + match = dev->phys_disk[i]->ioaccel_handle == + le32_to_cpu(ac->it_nexus); + } + break; + + case 0: /* The command is in the middle of being initialized. */ + match = false; + break; + + default: + dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", + c->cmd_type); + BUG(); + } + + return match; +} + +static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, + u8 reset_type, int reply_queue) +{ + int rc = 0; + + /* We can really only handle one reset at a time */ + if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { + dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); + return -EINTR; + } + + rc = hpsa_send_reset(h, dev, reset_type, reply_queue); + if (!rc) { + /* incremented by sending the reset request */ + atomic_dec(&dev->commands_outstanding); + wait_event(h->event_sync_wait_queue, + atomic_read(&dev->commands_outstanding) <= 0 || + lockup_detected(h)); + } + + if (unlikely(lockup_detected(h))) { + dev_warn(&h->pdev->dev, + "Controller lockup detected during reset wait\n"); + rc = -ENODEV; + } + + if (!rc) + rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0); + + mutex_unlock(&h->reset_mutex); + return rc; +} + +static void hpsa_get_raid_level(struct ctlr_info *h, + unsigned char *scsi3addr, unsigned char *raid_level) +{ + int rc; + unsigned char *buf; + + *raid_level = RAID_UNKNOWN; + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return; + + if (!hpsa_vpd_page_supported(h, scsi3addr, + HPSA_VPD_LV_DEVICE_GEOMETRY)) + goto exit; + + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | + HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64); + + if (rc == 0) + *raid_level = buf[8]; + if (*raid_level > RAID_UNKNOWN) + *raid_level = RAID_UNKNOWN; +exit: + kfree(buf); + return; +} + +#define HPSA_MAP_DEBUG +#ifdef HPSA_MAP_DEBUG +static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, + struct raid_map_data *map_buff) +{ + struct raid_map_disk_data *dd = &map_buff->data[0]; + int map, row, col; + u16 map_cnt, row_cnt, disks_per_row; + + if (rc != 0) + return; + + /* Show details only if debugging has been activated. */ + if (h->raid_offload_debug < 2) + return; + + dev_info(&h->pdev->dev, "structure_size = %u\n", + le32_to_cpu(map_buff->structure_size)); + dev_info(&h->pdev->dev, "volume_blk_size = %u\n", + le32_to_cpu(map_buff->volume_blk_size)); + dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", + le64_to_cpu(map_buff->volume_blk_cnt)); + dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", + map_buff->phys_blk_shift); + dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", + map_buff->parity_rotation_shift); + dev_info(&h->pdev->dev, "strip_size = %u\n", + le16_to_cpu(map_buff->strip_size)); + dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", + le64_to_cpu(map_buff->disk_starting_blk)); + dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", + le64_to_cpu(map_buff->disk_blk_cnt)); + dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", + le16_to_cpu(map_buff->data_disks_per_row)); + dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", + le16_to_cpu(map_buff->metadata_disks_per_row)); + dev_info(&h->pdev->dev, "row_cnt = %u\n", + le16_to_cpu(map_buff->row_cnt)); + dev_info(&h->pdev->dev, "layout_map_count = %u\n", + le16_to_cpu(map_buff->layout_map_count)); + dev_info(&h->pdev->dev, "flags = 0x%x\n", + le16_to_cpu(map_buff->flags)); + dev_info(&h->pdev->dev, "encryption = %s\n", + le16_to_cpu(map_buff->flags) & + RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); + dev_info(&h->pdev->dev, "dekindex = %u\n", + le16_to_cpu(map_buff->dekindex)); + map_cnt = le16_to_cpu(map_buff->layout_map_count); + for (map = 0; map < map_cnt; map++) { + dev_info(&h->pdev->dev, "Map%u:\n", map); + row_cnt = le16_to_cpu(map_buff->row_cnt); + for (row = 0; row < row_cnt; row++) { + dev_info(&h->pdev->dev, " Row%u:\n", row); + disks_per_row = + le16_to_cpu(map_buff->data_disks_per_row); + for (col = 0; col < disks_per_row; col++, dd++) + dev_info(&h->pdev->dev, + " D%02u: h=0x%04x xor=%u,%u\n", + col, dd->ioaccel_handle, + dd->xor_mult[0], dd->xor_mult[1]); + disks_per_row = + le16_to_cpu(map_buff->metadata_disks_per_row); + for (col = 0; col < disks_per_row; col++, dd++) + dev_info(&h->pdev->dev, + " M%02u: h=0x%04x xor=%u,%u\n", + col, dd->ioaccel_handle, + dd->xor_mult[0], dd->xor_mult[1]); + } + } +} +#else +static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, + __attribute__((unused)) int rc, + __attribute__((unused)) struct raid_map_data *map_buff) +{ +} +#endif + +static int hpsa_get_raid_map(struct ctlr_info *h, + unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ + int rc = 0; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, + sizeof(this_device->raid_map), 0, + scsi3addr, TYPE_CMD)) { + dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); + cmd_free(h, c); + return -1; + } + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + goto out; + } + cmd_free(h, c); + + /* @todo in the future, dynamically allocate RAID map memory */ + if (le32_to_cpu(this_device->raid_map.structure_size) > + sizeof(this_device->raid_map)) { + dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); + rc = -1; + } + hpsa_debug_map_buff(h, rc, &this_device->raid_map); + return rc; +out: + cmd_free(h, c); + return rc; +} + +static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, + unsigned char scsi3addr[], u16 bmic_device_index, + struct bmic_sense_subsystem_info *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, + 0, RAID_CTLR_LUNID, TYPE_CMD); + if (rc) + goto out; + + c->Request.CDB[2] = bmic_device_index & 0xff; + c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static int hpsa_bmic_id_controller(struct ctlr_info *h, + struct bmic_identify_controller *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, + 0, RAID_CTLR_LUNID, TYPE_CMD); + if (rc) + goto out; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static int hpsa_bmic_id_physical_device(struct ctlr_info *h, + unsigned char scsi3addr[], u16 bmic_device_index, + struct bmic_identify_physical_device *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, + 0, RAID_CTLR_LUNID, TYPE_CMD); + if (rc) + goto out; + + c->Request.CDB[2] = bmic_device_index & 0xff; + c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; + + hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + + return rc; +} + +/* + * get enclosure information + * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number + * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure + * Uses id_physical_device to determine the box_index. + */ +static void hpsa_get_enclosure_info(struct ctlr_info *h, + unsigned char *scsi3addr, + struct ReportExtendedLUNdata *rlep, int rle_index, + struct hpsa_scsi_dev_t *encl_dev) +{ + int rc = -1; + struct CommandList *c = NULL; + struct ErrorInfo *ei = NULL; + struct bmic_sense_storage_box_params *bssbp = NULL; + struct bmic_identify_physical_device *id_phys = NULL; + struct ext_report_lun_entry *rle; + u16 bmic_device_index = 0; + + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) + return; + + rle = &rlep->LUN[rle_index]; + + encl_dev->eli = + hpsa_get_enclosure_logical_identifier(h, scsi3addr); + + bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); + + if (encl_dev->target == -1 || encl_dev->lun == -1) { + rc = IO_OK; + goto out; + } + + if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { + rc = IO_OK; + goto out; + } + + bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); + if (!bssbp) + goto out; + + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) + goto out; + + rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, + id_phys, sizeof(*id_phys)); + if (rc) { + dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n", + __func__, encl_dev->external, bmic_device_index); + goto out; + } + + c = cmd_alloc(h); + + rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, + sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD); + + if (rc) + goto out; + + if (id_phys->phys_connector[1] == 'E') + c->Request.CDB[5] = id_phys->box_index; + else + c->Request.CDB[5] = 0; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + rc = -1; + goto out; + } + + encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; + memcpy(&encl_dev->phys_connector[id_phys->active_path_number], + bssbp->phys_connector, sizeof(bssbp->phys_connector)); + + rc = IO_OK; +out: + kfree(bssbp); + kfree(id_phys); + + if (c) + cmd_free(h, c); + + if (rc != IO_OK) + hpsa_show_dev_msg(KERN_INFO, h, encl_dev, + "Error, could not get enclosure information"); +} + +static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, + unsigned char *scsi3addr) +{ + struct ReportExtendedLUNdata *physdev; + u32 nphysicals; + u64 sa = 0; + int i; + + physdev = kzalloc(sizeof(*physdev), GFP_KERNEL); + if (!physdev) + return 0; + + if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { + dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); + kfree(physdev); + return 0; + } + nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24; + + for (i = 0; i < nphysicals; i++) + if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) { + sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]); + break; + } + + kfree(physdev); + + return sa; +} + +static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, + struct hpsa_scsi_dev_t *dev) +{ + int rc; + u64 sa = 0; + + if (is_hba_lunid(scsi3addr)) { + struct bmic_sense_subsystem_info *ssi; + + ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); + if (!ssi) + return; + + rc = hpsa_bmic_sense_subsystem_information(h, + scsi3addr, 0, ssi, sizeof(*ssi)); + if (rc == 0) { + sa = get_unaligned_be64(ssi->primary_world_wide_id); + h->sas_address = sa; + } + + kfree(ssi); + } else + sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); + + dev->sas_address = sa; +} + +static void hpsa_ext_ctrl_present(struct ctlr_info *h, + struct ReportExtendedLUNdata *physdev) +{ + u32 nphysicals; + int i; + + if (h->discovery_polling) + return; + + nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1; + + for (i = 0; i < nphysicals; i++) { + if (physdev->LUN[i].device_type == + BMIC_DEVICE_TYPE_CONTROLLER + && !is_hba_lunid(physdev->LUN[i].lunid)) { + dev_info(&h->pdev->dev, + "External controller present, activate discovery polling and disable rld caching\n"); + hpsa_disable_rld_caching(h); + h->discovery_polling = 1; + break; + } + } +} + +/* Get a device id from inquiry page 0x83 */ +static bool hpsa_vpd_page_supported(struct ctlr_info *h, + unsigned char scsi3addr[], u8 page) +{ + int rc; + int i; + int pages; + unsigned char *buf, bufsize; + + buf = kzalloc(256, GFP_KERNEL); + if (!buf) + return false; + + /* Get the size of the page list first */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, + buf, HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_unsupported; + pages = buf[3]; + if ((pages + HPSA_VPD_HEADER_SZ) <= 255) + bufsize = pages + HPSA_VPD_HEADER_SZ; + else + bufsize = 255; + + /* Get the whole VPD page list */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, + buf, bufsize); + if (rc != 0) + goto exit_unsupported; + + pages = buf[3]; + for (i = 1; i <= pages; i++) + if (buf[3 + i] == page) + goto exit_supported; +exit_unsupported: + kfree(buf); + return false; +exit_supported: + kfree(buf); + return true; +} + +/* + * Called during a scan operation. + * Sets ioaccel status on the new device list, not the existing device list + * + * The device list used during I/O will be updated later in + * adjust_hpsa_scsi_table. + */ +static void hpsa_get_ioaccel_status(struct ctlr_info *h, + unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ + int rc; + unsigned char *buf; + u8 ioaccel_status; + + this_device->offload_config = 0; + this_device->offload_enabled = 0; + this_device->offload_to_be_enabled = 0; + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return; + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) + goto out; + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); + if (rc != 0) + goto out; + +#define IOACCEL_STATUS_BYTE 4 +#define OFFLOAD_CONFIGURED_BIT 0x01 +#define OFFLOAD_ENABLED_BIT 0x02 + ioaccel_status = buf[IOACCEL_STATUS_BYTE]; + this_device->offload_config = + !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); + if (this_device->offload_config) { + bool offload_enabled = + !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + /* + * Check to see if offload can be enabled. + */ + if (offload_enabled) { + rc = hpsa_get_raid_map(h, scsi3addr, this_device); + if (rc) /* could not load raid_map */ + goto out; + this_device->offload_to_be_enabled = 1; + } + } + +out: + kfree(buf); + return; +} + +/* Get the device id from inquiry page 0x83 */ +static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, + unsigned char *device_id, int index, int buflen) +{ + int rc; + unsigned char *buf; + + /* Does controller have VPD for device id? */ + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID)) + return 1; /* not supported */ + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | + HPSA_VPD_LV_DEVICE_ID, buf, 64); + if (rc == 0) { + if (buflen > 16) + buflen = 16; + memcpy(device_id, &buf[8], buflen); + } + + kfree(buf); + + return rc; /*0 - got id, otherwise, didn't */ +} + +static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, + void *buf, int bufsize, + int extended_response) +{ + int rc = IO_OK; + struct CommandList *c; + unsigned char scsi3addr[8]; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + /* address the controller */ + memset(scsi3addr, 0, sizeof(scsi3addr)); + if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, + buf, bufsize, 0, scsi3addr, TYPE_CMD)) { + rc = -EAGAIN; + goto out; + } + if (extended_response) + c->Request.CDB[1] = extended_response; + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && + ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -EIO; + } else { + struct ReportLUNdata *rld = buf; + + if (rld->extended_response_flag != extended_response) { + if (!h->legacy_board) { + dev_err(&h->pdev->dev, + "report luns requested format %u, got %u\n", + extended_response, + rld->extended_response_flag); + rc = -EINVAL; + } else + rc = -EOPNOTSUPP; + } + } +out: + cmd_free(h, c); + return rc; +} + +static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, + struct ReportExtendedLUNdata *buf, int bufsize) +{ + int rc; + struct ReportLUNdata *lbuf; + + rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize, + HPSA_REPORT_PHYS_EXTENDED); + if (!rc || rc != -EOPNOTSUPP) + return rc; + + /* REPORT PHYS EXTENDED is not supported */ + lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL); + if (!lbuf) + return -ENOMEM; + + rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0); + if (!rc) { + int i; + u32 nphys; + + /* Copy ReportLUNdata header */ + memcpy(buf, lbuf, 8); + nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8; + for (i = 0; i < nphys; i++) + memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8); + } + kfree(lbuf); + return rc; +} + +static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, + struct ReportLUNdata *buf, int bufsize) +{ + return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); +} + +static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, + int bus, int target, int lun) +{ + device->bus = bus; + device->target = target; + device->lun = lun; +} + +/* Use VPD inquiry to get details of volume status */ +static int hpsa_get_volume_status(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + int rc; + int status; + int size; + unsigned char *buf; + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return HPSA_VPD_LV_STATUS_UNSUPPORTED; + + /* Does controller have VPD for logical volume status? */ + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) + goto exit_failed; + + /* Get the size of the VPD return buffer */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, + buf, HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_failed; + size = buf[3]; + + /* Now get the whole VPD buffer */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, + buf, size + HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_failed; + status = buf[4]; /* status byte */ + + kfree(buf); + return status; +exit_failed: + kfree(buf); + return HPSA_VPD_LV_STATUS_UNSUPPORTED; +} + +/* Determine offline status of a volume. + * Return either: + * 0 (not offline) + * 0xff (offline for unknown reasons) + * # (integer code indicating one of several NOT READY states + * describing why a volume is to be kept offline) + */ +static unsigned char hpsa_volume_offline(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + struct CommandList *c; + unsigned char *sense; + u8 sense_key, asc, ascq; + int sense_len; + int rc, ldstat = 0; +#define ASC_LUN_NOT_READY 0x04 +#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 +#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 + + c = cmd_alloc(h); + + (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + NO_TIMEOUT); + if (rc) { + cmd_free(h, c); + return HPSA_VPD_LV_STATUS_UNSUPPORTED; + } + sense = c->err_info->SenseInfo; + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); + cmd_free(h, c); + + /* Determine the reason for not ready state */ + ldstat = hpsa_get_volume_status(h, scsi3addr); + + /* Keep volume offline in certain cases: */ + switch (ldstat) { + case HPSA_LV_FAILED: + case HPSA_LV_UNDERGOING_ERASE: + case HPSA_LV_NOT_AVAILABLE: + case HPSA_LV_UNDERGOING_RPI: + case HPSA_LV_PENDING_RPI: + case HPSA_LV_ENCRYPTED_NO_KEY: + case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: + case HPSA_LV_UNDERGOING_ENCRYPTION: + case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: + case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: + return ldstat; + case HPSA_VPD_LV_STATUS_UNSUPPORTED: + /* If VPD status page isn't available, + * use ASC/ASCQ to determine state + */ + if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || + (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) + return ldstat; + break; + default: + break; + } + return HPSA_LV_OK; +} + +static int hpsa_update_device_info(struct ctlr_info *h, + unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, + unsigned char *is_OBDR_device) +{ + +#define OBDR_SIG_OFFSET 43 +#define OBDR_TAPE_SIG "$DR-10" +#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) +#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) + + unsigned char *inq_buff; + unsigned char *obdr_sig; + int rc = 0; + + inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); + if (!inq_buff) { + rc = -ENOMEM; + goto bail_out; + } + + /* Do an inquiry to the device to see what it is. */ + if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, + (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { + dev_err(&h->pdev->dev, + "%s: inquiry failed, device will be skipped.\n", + __func__); + rc = HPSA_INQUIRY_FAILED; + goto bail_out; + } + + scsi_sanitize_inquiry_string(&inq_buff[8], 8); + scsi_sanitize_inquiry_string(&inq_buff[16], 16); + + this_device->devtype = (inq_buff[0] & 0x1f); + memcpy(this_device->scsi3addr, scsi3addr, 8); + memcpy(this_device->vendor, &inq_buff[8], + sizeof(this_device->vendor)); + memcpy(this_device->model, &inq_buff[16], + sizeof(this_device->model)); + this_device->rev = inq_buff[2]; + memset(this_device->device_id, 0, + sizeof(this_device->device_id)); + if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, + sizeof(this_device->device_id)) < 0) { + dev_err(&h->pdev->dev, + "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n", + h->ctlr, __func__, + h->scsi_host->host_no, + this_device->bus, this_device->target, + this_device->lun, + scsi_device_type(this_device->devtype), + this_device->model); + rc = HPSA_LV_FAILED; + goto bail_out; + } + + if ((this_device->devtype == TYPE_DISK || + this_device->devtype == TYPE_ZBC) && + is_logical_dev_addr_mode(scsi3addr)) { + unsigned char volume_offline; + + hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); + if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) + hpsa_get_ioaccel_status(h, scsi3addr, this_device); + volume_offline = hpsa_volume_offline(h, scsi3addr); + if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED && + h->legacy_board) { + /* + * Legacy boards might not support volume status + */ + dev_info(&h->pdev->dev, + "C0:T%d:L%d Volume status not available, assuming online.\n", + this_device->target, this_device->lun); + volume_offline = 0; + } + this_device->volume_offline = volume_offline; + if (volume_offline == HPSA_LV_FAILED) { + rc = HPSA_LV_FAILED; + dev_err(&h->pdev->dev, + "%s: LV failed, device will be skipped.\n", + __func__); + goto bail_out; + } + } else { + this_device->raid_level = RAID_UNKNOWN; + this_device->offload_config = 0; + hpsa_turn_off_ioaccel_for_device(this_device); + this_device->hba_ioaccel_enabled = 0; + this_device->volume_offline = 0; + this_device->queue_depth = h->nr_cmds; + } + + if (this_device->external) + this_device->queue_depth = EXTERNAL_QD; + + if (is_OBDR_device) { + /* See if this is a One-Button-Disaster-Recovery device + * by looking for "$DR-10" at offset 43 in inquiry data. + */ + obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; + *is_OBDR_device = (this_device->devtype == TYPE_ROM && + strncmp(obdr_sig, OBDR_TAPE_SIG, + OBDR_SIG_LEN) == 0); + } + kfree(inq_buff); + return 0; + +bail_out: + kfree(inq_buff); + return rc; +} + +/* + * Helper function to assign bus, target, lun mapping of devices. + * Logical drive target and lun are assigned at this time, but + * physical device lun and target assignment are deferred (assigned + * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) +*/ +static void figure_bus_target_lun(struct ctlr_info *h, + u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) +{ + u32 lunid = get_unaligned_le32(lunaddrbytes); + + if (!is_logical_dev_addr_mode(lunaddrbytes)) { + /* physical device, target and lun filled in later */ + if (is_hba_lunid(lunaddrbytes)) { + int bus = HPSA_HBA_BUS; + + if (!device->rev) + bus = HPSA_LEGACY_HBA_BUS; + hpsa_set_bus_target_lun(device, + bus, 0, lunid & 0x3fff); + } else + /* defer target, lun assignment for physical devices */ + hpsa_set_bus_target_lun(device, + HPSA_PHYSICAL_DEVICE_BUS, -1, -1); + return; + } + /* It's a logical device */ + if (device->external) { + hpsa_set_bus_target_lun(device, + HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, + lunid & 0x00ff); + return; + } + hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, + 0, lunid & 0x3fff); +} + +static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, + int i, int nphysicals, int nlocal_logicals) +{ + /* In report logicals, local logicals are listed first, + * then any externals. + */ + int logicals_start = nphysicals + (raid_ctlr_position == 0); + + if (i == raid_ctlr_position) + return 0; + + if (i < logicals_start) + return 0; + + /* i is in logicals range, but still within local logicals */ + if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals) + return 0; + + return 1; /* it's an external lun */ +} + +/* + * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, + * logdev. The number of luns in physdev and logdev are returned in + * *nphysicals and *nlogicals, respectively. + * Returns 0 on success, -1 otherwise. + */ +static int hpsa_gather_lun_info(struct ctlr_info *h, + struct ReportExtendedLUNdata *physdev, u32 *nphysicals, + struct ReportLUNdata *logdev, u32 *nlogicals) +{ + if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { + dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); + return -1; + } + *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; + if (*nphysicals > HPSA_MAX_PHYS_LUN) { + dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", + HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); + *nphysicals = HPSA_MAX_PHYS_LUN; + } + if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { + dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); + return -1; + } + *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; + /* Reject Logicals in excess of our max capability. */ + if (*nlogicals > HPSA_MAX_LUN) { + dev_warn(&h->pdev->dev, + "maximum logical LUNs (%d) exceeded. " + "%d LUNs ignored.\n", HPSA_MAX_LUN, + *nlogicals - HPSA_MAX_LUN); + *nlogicals = HPSA_MAX_LUN; + } + if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { + dev_warn(&h->pdev->dev, + "maximum logical + physical LUNs (%d) exceeded. " + "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, + *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); + *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; + } + return 0; +} + +static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, + int i, int nphysicals, int nlogicals, + struct ReportExtendedLUNdata *physdev_list, + struct ReportLUNdata *logdev_list) +{ + /* Helper function, figure out where the LUN ID info is coming from + * given index i, lists of physical and logical devices, where in + * the list the raid controller is supposed to appear (first or last) + */ + + int logicals_start = nphysicals + (raid_ctlr_position == 0); + int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); + + if (i == raid_ctlr_position) + return RAID_CTLR_LUNID; + + if (i < logicals_start) + return &physdev_list->LUN[i - + (raid_ctlr_position == 0)].lunid[0]; + + if (i < last_device) + return &logdev_list->LUN[i - nphysicals - + (raid_ctlr_position == 0)][0]; + BUG(); + return NULL; +} + +/* get physical drive ioaccel handle and queue depth */ +static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, + struct ReportExtendedLUNdata *rlep, int rle_index, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + struct ext_report_lun_entry *rle; + + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) + return; + + rle = &rlep->LUN[rle_index]; + + dev->ioaccel_handle = rle->ioaccel_handle; + if ((rle->device_flags & 0x08) && dev->ioaccel_handle) + dev->hba_ioaccel_enabled = 1; + memset(id_phys, 0, sizeof(*id_phys)); + rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], + GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, + sizeof(*id_phys)); + if (!rc) + /* Reserve space for FW operations */ +#define DRIVE_CMDS_RESERVED_FOR_FW 2 +#define DRIVE_QUEUE_DEPTH 7 + dev->queue_depth = + le16_to_cpu(id_phys->current_queue_depth_limit) - + DRIVE_CMDS_RESERVED_FOR_FW; + else + dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ +} + +static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, + struct ReportExtendedLUNdata *rlep, int rle_index, + struct bmic_identify_physical_device *id_phys) +{ + struct ext_report_lun_entry *rle; + + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) + return; + + rle = &rlep->LUN[rle_index]; + + if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) + this_device->hba_ioaccel_enabled = 1; + + memcpy(&this_device->active_path_index, + &id_phys->active_path_number, + sizeof(this_device->active_path_index)); + memcpy(&this_device->path_map, + &id_phys->redundant_path_present_map, + sizeof(this_device->path_map)); + memcpy(&this_device->box, + &id_phys->alternate_paths_phys_box_on_port, + sizeof(this_device->box)); + memcpy(&this_device->phys_connector, + &id_phys->alternate_paths_phys_connector, + sizeof(this_device->phys_connector)); + memcpy(&this_device->bay, + &id_phys->phys_bay_in_box, + sizeof(this_device->bay)); +} + +/* get number of local logical disks. */ +static int hpsa_set_local_logical_count(struct ctlr_info *h, + struct bmic_identify_controller *id_ctlr, + u32 *nlocals) +{ + int rc; + + if (!id_ctlr) { + dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", + __func__); + return -ENOMEM; + } + memset(id_ctlr, 0, sizeof(*id_ctlr)); + rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); + if (!rc) + if (id_ctlr->configured_logical_drive_count < 255) + *nlocals = id_ctlr->configured_logical_drive_count; + else + *nlocals = le16_to_cpu( + id_ctlr->extended_logical_unit_count); + else + *nlocals = -1; + return rc; +} + +static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) +{ + struct bmic_identify_physical_device *id_phys; + bool is_spare = false; + int rc; + + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) + return false; + + rc = hpsa_bmic_id_physical_device(h, + lunaddrbytes, + GET_BMIC_DRIVE_NUMBER(lunaddrbytes), + id_phys, sizeof(*id_phys)); + if (rc == 0) + is_spare = (id_phys->more_flags >> 6) & 0x01; + + kfree(id_phys); + return is_spare; +} + +#define RPL_DEV_FLAG_NON_DISK 0x1 +#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 +#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 + +#define BMIC_DEVICE_TYPE_ENCLOSURE 6 + +static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, + struct ext_report_lun_entry *rle) +{ + u8 device_flags; + u8 device_type; + + if (!MASKED_DEVICE(lunaddrbytes)) + return false; + + device_flags = rle->device_flags; + device_type = rle->device_type; + + if (device_flags & RPL_DEV_FLAG_NON_DISK) { + if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) + return false; + return true; + } + + if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) + return false; + + if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) + return false; + + /* + * Spares may be spun down, we do not want to + * do an Inquiry to a RAID set spare drive as + * that would have them spun up, that is a + * performance hit because I/O to the RAID device + * stops while the spin up occurs which can take + * over 50 seconds. + */ + if (hpsa_is_disk_spare(h, lunaddrbytes)) + return true; + + return false; +} + +static void hpsa_update_scsi_devices(struct ctlr_info *h) +{ + /* the idea here is we could get notified + * that some devices have changed, so we do a report + * physical luns and report logical luns cmd, and adjust + * our list of devices accordingly. + * + * The scsi3addr's of devices won't change so long as the + * adapter is not reset. That means we can rescan and + * tell which devices we already know about, vs. new + * devices, vs. disappearing devices. + */ + struct ReportExtendedLUNdata *physdev_list = NULL; + struct ReportLUNdata *logdev_list = NULL; + struct bmic_identify_physical_device *id_phys = NULL; + struct bmic_identify_controller *id_ctlr = NULL; + u32 nphysicals = 0; + u32 nlogicals = 0; + u32 nlocal_logicals = 0; + u32 ndev_allocated = 0; + struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; + int ncurrent = 0; + int i, ndevs_to_allocate; + int raid_ctlr_position; + bool physical_device; + + currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL); + physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); + logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); + tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL); + + if (!currentsd || !physdev_list || !logdev_list || + !tmpdevice || !id_phys || !id_ctlr) { + dev_err(&h->pdev->dev, "out of memory\n"); + goto out; + } + + h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ + + if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, + logdev_list, &nlogicals)) { + h->drv_req_rescan = 1; + goto out; + } + + /* Set number of local logicals (non PTRAID) */ + if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { + dev_warn(&h->pdev->dev, + "%s: Can't determine number of local logical devices.\n", + __func__); + } + + /* We might see up to the maximum number of logical and physical disks + * plus external target devices, and a device for the local RAID + * controller. + */ + ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; + + hpsa_ext_ctrl_present(h, physdev_list); + + /* Allocate the per device structures */ + for (i = 0; i < ndevs_to_allocate; i++) { + if (i >= HPSA_MAX_DEVICES) { + dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." + " %d devices ignored.\n", HPSA_MAX_DEVICES, + ndevs_to_allocate - HPSA_MAX_DEVICES); + break; + } + + currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); + if (!currentsd[i]) { + h->drv_req_rescan = 1; + goto out; + } + ndev_allocated++; + } + + if (is_scsi_rev_5(h)) + raid_ctlr_position = 0; + else + raid_ctlr_position = nphysicals + nlogicals; + + /* adjust our table of devices */ + for (i = 0; i < nphysicals + nlogicals + 1; i++) { + u8 *lunaddrbytes, is_OBDR = 0; + int rc = 0; + int phys_dev_index = i - (raid_ctlr_position == 0); + bool skip_device = false; + + memset(tmpdevice, 0, sizeof(*tmpdevice)); + + physical_device = i < nphysicals + (raid_ctlr_position == 0); + + /* Figure out where the LUN ID info is coming from */ + lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, + i, nphysicals, nlogicals, physdev_list, logdev_list); + + /* Determine if this is a lun from an external target array */ + tmpdevice->external = + figure_external_status(h, raid_ctlr_position, i, + nphysicals, nlocal_logicals); + + /* + * Skip over some devices such as a spare. + */ + if (phys_dev_index >= 0 && !tmpdevice->external && + physical_device) { + skip_device = hpsa_skip_device(h, lunaddrbytes, + &physdev_list->LUN[phys_dev_index]); + if (skip_device) + continue; + } + + /* Get device type, vendor, model, device id, raid_map */ + rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, + &is_OBDR); + if (rc == -ENOMEM) { + dev_warn(&h->pdev->dev, + "Out of memory, rescan deferred.\n"); + h->drv_req_rescan = 1; + goto out; + } + if (rc) { + h->drv_req_rescan = 1; + continue; + } + + figure_bus_target_lun(h, lunaddrbytes, tmpdevice); + this_device = currentsd[ncurrent]; + + *this_device = *tmpdevice; + this_device->physical_device = physical_device; + + /* + * Expose all devices except for physical devices that + * are masked. + */ + if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device) + this_device->expose_device = 0; + else + this_device->expose_device = 1; + + + /* + * Get the SAS address for physical devices that are exposed. + */ + if (this_device->physical_device && this_device->expose_device) + hpsa_get_sas_address(h, lunaddrbytes, this_device); + + switch (this_device->devtype) { + case TYPE_ROM: + /* We don't *really* support actual CD-ROM devices, + * just "One Button Disaster Recovery" tape drive + * which temporarily pretends to be a CD-ROM drive. + * So we check that the device is really an OBDR tape + * device by checking for "$DR-10" in bytes 43-48 of + * the inquiry data. + */ + if (is_OBDR) + ncurrent++; + break; + case TYPE_DISK: + case TYPE_ZBC: + if (this_device->physical_device) { + /* The disk is in HBA mode. */ + /* Never use RAID mapper in HBA mode. */ + this_device->offload_enabled = 0; + hpsa_get_ioaccel_drive_info(h, this_device, + physdev_list, phys_dev_index, id_phys); + hpsa_get_path_info(this_device, + physdev_list, phys_dev_index, id_phys); + } + ncurrent++; + break; + case TYPE_TAPE: + case TYPE_MEDIUM_CHANGER: + ncurrent++; + break; + case TYPE_ENCLOSURE: + if (!this_device->external) + hpsa_get_enclosure_info(h, lunaddrbytes, + physdev_list, phys_dev_index, + this_device); + ncurrent++; + break; + case TYPE_RAID: + /* Only present the Smartarray HBA as a RAID controller. + * If it's a RAID controller other than the HBA itself + * (an external RAID controller, MSA500 or similar) + * don't present it. + */ + if (!is_hba_lunid(lunaddrbytes)) + break; + ncurrent++; + break; + default: + break; + } + if (ncurrent >= HPSA_MAX_DEVICES) + break; + } + + if (h->sas_host == NULL) { + int rc = 0; + + rc = hpsa_add_sas_host(h); + if (rc) { + dev_warn(&h->pdev->dev, + "Could not add sas host %d\n", rc); + goto out; + } + } + + adjust_hpsa_scsi_table(h, currentsd, ncurrent); +out: + kfree(tmpdevice); + for (i = 0; i < ndev_allocated; i++) + kfree(currentsd[i]); + kfree(currentsd); + kfree(physdev_list); + kfree(logdev_list); + kfree(id_ctlr); + kfree(id_phys); +} + +static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, + struct scatterlist *sg) +{ + u64 addr64 = (u64) sg_dma_address(sg); + unsigned int len = sg_dma_len(sg); + + desc->Addr = cpu_to_le64(addr64); + desc->Len = cpu_to_le32(len); + desc->Ext = 0; +} + +/* + * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci + * dma mapping and fills in the scatter gather entries of the + * hpsa command, cp. + */ +static int hpsa_scatter_gather(struct ctlr_info *h, + struct CommandList *cp, + struct scsi_cmnd *cmd) +{ + struct scatterlist *sg; + int use_sg, i, sg_limit, chained; + struct SGDescriptor *curr_sg; + + BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) + return use_sg; + + if (!use_sg) + goto sglist_finished; + + /* + * If the number of entries is greater than the max for a single list, + * then we have a chained list; we will set up all but one entry in the + * first list (the last entry is saved for link information); + * otherwise, we don't have a chained list and we'll set up at each of + * the entries in the one list. + */ + curr_sg = cp->SG; + chained = use_sg > h->max_cmd_sg_entries; + sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; + scsi_for_each_sg(cmd, sg, sg_limit, i) { + hpsa_set_sg_descriptor(curr_sg, sg); + curr_sg++; + } + + if (chained) { + /* + * Continue with the chained list. Set curr_sg to the chained + * list. Modify the limit to the total count less the entries + * we've already set up. Resume the scan at the list entry + * where the previous loop left off. + */ + curr_sg = h->cmd_sg_list[cp->cmdindex]; + sg_limit = use_sg - sg_limit; + for_each_sg(sg, sg, sg_limit, i) { + hpsa_set_sg_descriptor(curr_sg, sg); + curr_sg++; + } + } + + /* Back the pointer up to the last entry and mark it as "last". */ + (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); + + if (use_sg + chained > h->maxSG) + h->maxSG = use_sg + chained; + + if (chained) { + cp->Header.SGList = h->max_cmd_sg_entries; + cp->Header.SGTotal = cpu_to_le16(use_sg + 1); + if (hpsa_map_sg_chain_block(h, cp)) { + scsi_dma_unmap(cmd); + return -1; + } + return 0; + } + +sglist_finished: + + cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ + return 0; +} + +static inline void warn_zero_length_transfer(struct ctlr_info *h, + u8 *cdb, int cdb_len, + const char *func) +{ + dev_warn(&h->pdev->dev, + "%s: Blocking zero-length request: CDB:%*phN\n", + func, cdb_len, cdb); +} + +#define IO_ACCEL_INELIGIBLE 1 +/* zero-length transfers trigger hardware errors. */ +static bool is_zero_length_transfer(u8 *cdb) +{ + u32 block_cnt; + + /* Block zero-length transfer sizes on certain commands. */ + switch (cdb[0]) { + case READ_10: + case WRITE_10: + case VERIFY: /* 0x2F */ + case WRITE_VERIFY: /* 0x2E */ + block_cnt = get_unaligned_be16(&cdb[7]); + break; + case READ_12: + case WRITE_12: + case VERIFY_12: /* 0xAF */ + case WRITE_VERIFY_12: /* 0xAE */ + block_cnt = get_unaligned_be32(&cdb[6]); + break; + case READ_16: + case WRITE_16: + case VERIFY_16: /* 0x8F */ + block_cnt = get_unaligned_be32(&cdb[10]); + break; + default: + return false; + } + + return block_cnt == 0; +} + +static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) +{ + int is_write = 0; + u32 block; + u32 block_cnt; + + /* Perform some CDB fixups if needed using 10 byte reads/writes only */ + switch (cdb[0]) { + case WRITE_6: + case WRITE_12: + is_write = 1; + fallthrough; + case READ_6: + case READ_12: + if (*cdb_len == 6) { + block = (((cdb[1] & 0x1F) << 16) | + (cdb[2] << 8) | + cdb[3]); + block_cnt = cdb[4]; + if (block_cnt == 0) + block_cnt = 256; + } else { + BUG_ON(*cdb_len != 12); + block = get_unaligned_be32(&cdb[2]); + block_cnt = get_unaligned_be32(&cdb[6]); + } + if (block_cnt > 0xffff) + return IO_ACCEL_INELIGIBLE; + + cdb[0] = is_write ? WRITE_10 : READ_10; + cdb[1] = 0; + cdb[2] = (u8) (block >> 24); + cdb[3] = (u8) (block >> 16); + cdb[4] = (u8) (block >> 8); + cdb[5] = (u8) (block); + cdb[6] = 0; + cdb[7] = (u8) (block_cnt >> 8); + cdb[8] = (u8) (block_cnt); + cdb[9] = 0; + *cdb_len = 10; + break; + } + return 0; +} + +static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + unsigned int len; + unsigned int total_len = 0; + struct scatterlist *sg; + u64 addr64; + int use_sg, i; + struct SGDescriptor *curr_sg; + u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; + + /* TODO: implement chaining support */ + if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + + BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); + + if (is_zero_length_transfer(cdb)) { + warn_zero_length_transfer(h, cdb, cdb_len, __func__); + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + + if (fixup_ioaccel_cdb(cdb, &cdb_len)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + + c->cmd_type = CMD_IOACCEL1; + + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + + (c->cmdindex * sizeof(*cp)); + BUG_ON(c->busaddr & 0x0000007F); + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return use_sg; + } + + if (use_sg) { + curr_sg = cp->SG; + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (u64) sg_dma_address(sg); + len = sg_dma_len(sg); + total_len += len; + curr_sg->Addr = cpu_to_le64(addr64); + curr_sg->Len = cpu_to_le32(len); + curr_sg->Ext = cpu_to_le32(0); + curr_sg++; + } + (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); + + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + control |= IOACCEL1_CONTROL_DATA_OUT; + break; + case DMA_FROM_DEVICE: + control |= IOACCEL1_CONTROL_DATA_IN; + break; + case DMA_NONE: + control |= IOACCEL1_CONTROL_NODATAXFER; + break; + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + } else { + control |= IOACCEL1_CONTROL_NODATAXFER; + } + + c->Header.SGList = use_sg; + /* Fill out the command structure to submit */ + cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); + cp->transfer_len = cpu_to_le32(total_len); + cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | + (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); + cp->control = cpu_to_le32(control); + memcpy(cp->CDB, cdb, cdb_len); + memcpy(cp->CISS_LUN, scsi3addr, 8); + /* Tag was already set at init time. */ + enqueue_cmd_and_start_io(h, c); + return 0; +} + +/* + * Queue a command directly to a device behind the controller using the + * I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, + struct CommandList *c) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + + if (!dev) + return -1; + + c->phys_disk = dev; + + if (dev->in_reset) + return -1; + + return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, + cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); +} + +/* + * Set encryption parameters for the ioaccel2 request + */ +static void set_encrypt_ioaccel2(struct ctlr_info *h, + struct CommandList *c, struct io_accel2_cmd *cp) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + struct raid_map_data *map = &dev->raid_map; + u64 first_block; + + /* Are we doing encryption on this device */ + if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) + return; + /* Set the data encryption key index. */ + cp->dekindex = map->dekindex; + + /* Set the encryption enable flag, encoded into direction field. */ + cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; + + /* Set encryption tweak values based on logical block address + * If block size is 512, tweak value is LBA. + * For other block sizes, tweak is (LBA * block size)/ 512) + */ + switch (cmd->cmnd[0]) { + /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ + case READ_6: + case WRITE_6: + first_block = (((cmd->cmnd[1] & 0x1F) << 16) | + (cmd->cmnd[2] << 8) | + cmd->cmnd[3]); + break; + case WRITE_10: + case READ_10: + /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ + case WRITE_12: + case READ_12: + first_block = get_unaligned_be32(&cmd->cmnd[2]); + break; + case WRITE_16: + case READ_16: + first_block = get_unaligned_be64(&cmd->cmnd[2]); + break; + default: + dev_err(&h->pdev->dev, + "ERROR: %s: size (0x%x) not supported for encryption\n", + __func__, cmd->cmnd[0]); + BUG(); + break; + } + + if (le32_to_cpu(map->volume_blk_size) != 512) + first_block = first_block * + le32_to_cpu(map->volume_blk_size)/512; + + cp->tweak_lower = cpu_to_le32(first_block); + cp->tweak_upper = cpu_to_le32(first_block >> 32); +} + +static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct ioaccel2_sg_element *curr_sg; + int use_sg, i; + struct scatterlist *sg; + u64 addr64; + u32 len; + u32 total_len = 0; + + if (!cmd->device) + return -1; + + if (!cmd->device->hostdata) + return -1; + + BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); + + if (is_zero_length_transfer(cdb)) { + warn_zero_length_transfer(h, cdb, cdb_len, __func__); + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + + if (fixup_ioaccel_cdb(cdb, &cdb_len)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + + c->cmd_type = CMD_IOACCEL2; + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + + (c->cmdindex * sizeof(*cp)); + BUG_ON(c->busaddr & 0x0000007F); + + memset(cp, 0, sizeof(*cp)); + cp->IU_type = IOACCEL2_IU_TYPE; + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return use_sg; + } + + if (use_sg) { + curr_sg = cp->sg; + if (use_sg > h->ioaccel_maxsg) { + addr64 = le64_to_cpu( + h->ioaccel2_cmd_sg_list[c->cmdindex]->address); + curr_sg->address = cpu_to_le64(addr64); + curr_sg->length = 0; + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; + curr_sg->chain_indicator = IOACCEL2_CHAIN; + + curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; + } + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (u64) sg_dma_address(sg); + len = sg_dma_len(sg); + total_len += len; + curr_sg->address = cpu_to_le64(addr64); + curr_sg->length = cpu_to_le32(len); + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; + curr_sg->chain_indicator = 0; + curr_sg++; + } + + /* + * Set the last s/g element bit + */ + (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG; + + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_DATA_OUT; + break; + case DMA_FROM_DEVICE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_DATA_IN; + break; + case DMA_NONE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_NO_DATA; + break; + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + } else { + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_NO_DATA; + } + + /* Set encryption parameters, if necessary */ + set_encrypt_ioaccel2(h, c, cp); + + cp->scsi_nexus = cpu_to_le32(ioaccel_handle); + cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); + memcpy(cp->cdb, cdb, sizeof(cp->cdb)); + + cp->data_len = cpu_to_le32(total_len); + cp->err_ptr = cpu_to_le64(c->busaddr + + offsetof(struct io_accel2_cmd, error_data)); + cp->err_len = cpu_to_le32(sizeof(cp->error_data)); + + /* fill in sg elements */ + if (use_sg > h->ioaccel_maxsg) { + cp->sg_count = 1; + cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0])); + if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + scsi_dma_unmap(cmd); + return -1; + } + } else + cp->sg_count = (u8) use_sg; + + if (phys_disk->in_reset) { + cmd->result = DID_RESET << 16; + return -1; + } + + enqueue_cmd_and_start_io(h, c); + return 0; +} + +/* + * Queue a command to the correct I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) +{ + if (!c->scsi_cmd->device) + return -1; + + if (!c->scsi_cmd->device->hostdata) + return -1; + + if (phys_disk->in_reset) + return -1; + + /* Try to honor the device's queue depth */ + if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > + phys_disk->queue_depth) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + if (h->transMethod & CFGTBL_Trans_io_accel1) + return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, + cdb, cdb_len, scsi3addr, + phys_disk); + else + return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, + cdb, cdb_len, scsi3addr, + phys_disk); +} + +static void raid_map_helper(struct raid_map_data *map, + int offload_to_mirror, u32 *map_index, u32 *current_group) +{ + if (offload_to_mirror == 0) { + /* use physical disk in the first mirrored group. */ + *map_index %= le16_to_cpu(map->data_disks_per_row); + return; + } + do { + /* determine mirror group that *map_index indicates */ + *current_group = *map_index / + le16_to_cpu(map->data_disks_per_row); + if (offload_to_mirror == *current_group) + continue; + if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { + /* select map index from next group */ + *map_index += le16_to_cpu(map->data_disks_per_row); + (*current_group)++; + } else { + /* select map index from first group */ + *map_index %= le16_to_cpu(map->data_disks_per_row); + *current_group = 0; + } + } while (offload_to_mirror != *current_group); +} + +/* + * Attempt to perform offload RAID mapping for a logical volume I/O. + */ +static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, + struct CommandList *c) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + struct raid_map_data *map = &dev->raid_map; + struct raid_map_disk_data *dd = &map->data[0]; + int is_write = 0; + u32 map_index; + u64 first_block, last_block; + u32 block_cnt; + u32 blocks_per_row; + u64 first_row, last_row; + u32 first_row_offset, last_row_offset; + u32 first_column, last_column; + u64 r0_first_row, r0_last_row; + u32 r5or6_blocks_per_row; + u64 r5or6_first_row, r5or6_last_row; + u32 r5or6_first_row_offset, r5or6_last_row_offset; + u32 r5or6_first_column, r5or6_last_column; + u32 total_disks_per_row; + u32 stripesize; + u32 first_group, last_group, current_group; + u32 map_row; + u32 disk_handle; + u64 disk_block; + u32 disk_block_cnt; + u8 cdb[16]; + u8 cdb_len; + u16 strip_size; +#if BITS_PER_LONG == 32 + u64 tmpdiv; +#endif + int offload_to_mirror; + + if (!dev) + return -1; + + if (dev->in_reset) + return -1; + + /* check for valid opcode, get LBA and block count */ + switch (cmd->cmnd[0]) { + case WRITE_6: + is_write = 1; + fallthrough; + case READ_6: + first_block = (((cmd->cmnd[1] & 0x1F) << 16) | + (cmd->cmnd[2] << 8) | + cmd->cmnd[3]); + block_cnt = cmd->cmnd[4]; + if (block_cnt == 0) + block_cnt = 256; + break; + case WRITE_10: + is_write = 1; + fallthrough; + case READ_10: + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + block_cnt = + (((u32) cmd->cmnd[7]) << 8) | + cmd->cmnd[8]; + break; + case WRITE_12: + is_write = 1; + fallthrough; + case READ_12: + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + block_cnt = + (((u32) cmd->cmnd[6]) << 24) | + (((u32) cmd->cmnd[7]) << 16) | + (((u32) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + break; + case WRITE_16: + is_write = 1; + fallthrough; + case READ_16: + first_block = + (((u64) cmd->cmnd[2]) << 56) | + (((u64) cmd->cmnd[3]) << 48) | + (((u64) cmd->cmnd[4]) << 40) | + (((u64) cmd->cmnd[5]) << 32) | + (((u64) cmd->cmnd[6]) << 24) | + (((u64) cmd->cmnd[7]) << 16) | + (((u64) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + block_cnt = + (((u32) cmd->cmnd[10]) << 24) | + (((u32) cmd->cmnd[11]) << 16) | + (((u32) cmd->cmnd[12]) << 8) | + cmd->cmnd[13]; + break; + default: + return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ + } + last_block = first_block + block_cnt - 1; + + /* check for write to non-RAID-0 */ + if (is_write && dev->raid_level != 0) + return IO_ACCEL_INELIGIBLE; + + /* check for invalid block or wraparound */ + if (last_block >= le64_to_cpu(map->volume_blk_cnt) || + last_block < first_block) + return IO_ACCEL_INELIGIBLE; + + /* calculate stripe information for the request */ + blocks_per_row = le16_to_cpu(map->data_disks_per_row) * + le16_to_cpu(map->strip_size); + strip_size = le16_to_cpu(map->strip_size); +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + (void) do_div(tmpdiv, blocks_per_row); + first_row = tmpdiv; + tmpdiv = last_block; + (void) do_div(tmpdiv, blocks_per_row); + last_row = tmpdiv; + first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); + last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); + tmpdiv = first_row_offset; + (void) do_div(tmpdiv, strip_size); + first_column = tmpdiv; + tmpdiv = last_row_offset; + (void) do_div(tmpdiv, strip_size); + last_column = tmpdiv; +#else + first_row = first_block / blocks_per_row; + last_row = last_block / blocks_per_row; + first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); + last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); + first_column = first_row_offset / strip_size; + last_column = last_row_offset / strip_size; +#endif + + /* if this isn't a single row/column then give to the controller */ + if ((first_row != last_row) || (first_column != last_column)) + return IO_ACCEL_INELIGIBLE; + + /* proceeding with driver mapping */ + total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + + le16_to_cpu(map->metadata_disks_per_row); + map_row = ((u32)(first_row >> map->parity_rotation_shift)) % + le16_to_cpu(map->row_cnt); + map_index = (map_row * total_disks_per_row) + first_column; + + switch (dev->raid_level) { + case HPSA_RAID_0: + break; /* nothing special to do */ + case HPSA_RAID_1: + /* Handles load balance across RAID 1 members. + * (2-drive R1 and R10 with even # of drives.) + * Appropriate for SSDs, not optimal for HDDs + * Ensure we have the correct raid_map. + */ + if (le16_to_cpu(map->layout_map_count) != 2) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } + if (dev->offload_to_mirror) + map_index += le16_to_cpu(map->data_disks_per_row); + dev->offload_to_mirror = !dev->offload_to_mirror; + break; + case HPSA_RAID_ADM: + /* Handles N-way mirrors (R1-ADM) + * and R10 with # of drives divisible by 3.) + * Ensure we have the correct raid_map. + */ + if (le16_to_cpu(map->layout_map_count) != 3) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } + + offload_to_mirror = dev->offload_to_mirror; + raid_map_helper(map, offload_to_mirror, + &map_index, ¤t_group); + /* set mirror group to use next time */ + offload_to_mirror = + (offload_to_mirror >= + le16_to_cpu(map->layout_map_count) - 1) + ? 0 : offload_to_mirror + 1; + dev->offload_to_mirror = offload_to_mirror; + /* Avoid direct use of dev->offload_to_mirror within this + * function since multiple threads might simultaneously + * increment it beyond the range of dev->layout_map_count -1. + */ + break; + case HPSA_RAID_5: + case HPSA_RAID_6: + if (le16_to_cpu(map->layout_map_count) <= 1) + break; + + /* Verify first and last block are in same RAID group */ + r5or6_blocks_per_row = + le16_to_cpu(map->strip_size) * + le16_to_cpu(map->data_disks_per_row); + if (r5or6_blocks_per_row == 0) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } + stripesize = r5or6_blocks_per_row * + le16_to_cpu(map->layout_map_count); +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + first_group = do_div(tmpdiv, stripesize); + tmpdiv = first_group; + (void) do_div(tmpdiv, r5or6_blocks_per_row); + first_group = tmpdiv; + tmpdiv = last_block; + last_group = do_div(tmpdiv, stripesize); + tmpdiv = last_group; + (void) do_div(tmpdiv, r5or6_blocks_per_row); + last_group = tmpdiv; +#else + first_group = (first_block % stripesize) / r5or6_blocks_per_row; + last_group = (last_block % stripesize) / r5or6_blocks_per_row; +#endif + if (first_group != last_group) + return IO_ACCEL_INELIGIBLE; + + /* Verify request is in a single row of RAID 5/6 */ +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + (void) do_div(tmpdiv, stripesize); + first_row = r5or6_first_row = r0_first_row = tmpdiv; + tmpdiv = last_block; + (void) do_div(tmpdiv, stripesize); + r5or6_last_row = r0_last_row = tmpdiv; +#else + first_row = r5or6_first_row = r0_first_row = + first_block / stripesize; + r5or6_last_row = r0_last_row = last_block / stripesize; +#endif + if (r5or6_first_row != r5or6_last_row) + return IO_ACCEL_INELIGIBLE; + + + /* Verify request is in a single column */ +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + first_row_offset = do_div(tmpdiv, stripesize); + tmpdiv = first_row_offset; + first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); + r5or6_first_row_offset = first_row_offset; + tmpdiv = last_block; + r5or6_last_row_offset = do_div(tmpdiv, stripesize); + tmpdiv = r5or6_last_row_offset; + r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); + tmpdiv = r5or6_first_row_offset; + (void) do_div(tmpdiv, map->strip_size); + first_column = r5or6_first_column = tmpdiv; + tmpdiv = r5or6_last_row_offset; + (void) do_div(tmpdiv, map->strip_size); + r5or6_last_column = tmpdiv; +#else + first_row_offset = r5or6_first_row_offset = + (u32)((first_block % stripesize) % + r5or6_blocks_per_row); + + r5or6_last_row_offset = + (u32)((last_block % stripesize) % + r5or6_blocks_per_row); + + first_column = r5or6_first_column = + r5or6_first_row_offset / le16_to_cpu(map->strip_size); + r5or6_last_column = + r5or6_last_row_offset / le16_to_cpu(map->strip_size); +#endif + if (r5or6_first_column != r5or6_last_column) + return IO_ACCEL_INELIGIBLE; + + /* Request is eligible */ + map_row = ((u32)(first_row >> map->parity_rotation_shift)) % + le16_to_cpu(map->row_cnt); + + map_index = (first_group * + (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + + (map_row * total_disks_per_row) + first_column; + break; + default: + return IO_ACCEL_INELIGIBLE; + } + + if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) + return IO_ACCEL_INELIGIBLE; + + c->phys_disk = dev->phys_disk[map_index]; + if (!c->phys_disk) + return IO_ACCEL_INELIGIBLE; + + disk_handle = dd[map_index].ioaccel_handle; + disk_block = le64_to_cpu(map->disk_starting_blk) + + first_row * le16_to_cpu(map->strip_size) + + (first_row_offset - first_column * + le16_to_cpu(map->strip_size)); + disk_block_cnt = block_cnt; + + /* handle differing logical/physical block sizes */ + if (map->phys_blk_shift) { + disk_block <<= map->phys_blk_shift; + disk_block_cnt <<= map->phys_blk_shift; + } + BUG_ON(disk_block_cnt > 0xffff); + + /* build the new CDB for the physical disk I/O */ + if (disk_block > 0xffffffff) { + cdb[0] = is_write ? WRITE_16 : READ_16; + cdb[1] = 0; + cdb[2] = (u8) (disk_block >> 56); + cdb[3] = (u8) (disk_block >> 48); + cdb[4] = (u8) (disk_block >> 40); + cdb[5] = (u8) (disk_block >> 32); + cdb[6] = (u8) (disk_block >> 24); + cdb[7] = (u8) (disk_block >> 16); + cdb[8] = (u8) (disk_block >> 8); + cdb[9] = (u8) (disk_block); + cdb[10] = (u8) (disk_block_cnt >> 24); + cdb[11] = (u8) (disk_block_cnt >> 16); + cdb[12] = (u8) (disk_block_cnt >> 8); + cdb[13] = (u8) (disk_block_cnt); + cdb[14] = 0; + cdb[15] = 0; + cdb_len = 16; + } else { + cdb[0] = is_write ? WRITE_10 : READ_10; + cdb[1] = 0; + cdb[2] = (u8) (disk_block >> 24); + cdb[3] = (u8) (disk_block >> 16); + cdb[4] = (u8) (disk_block >> 8); + cdb[5] = (u8) (disk_block); + cdb[6] = 0; + cdb[7] = (u8) (disk_block_cnt >> 8); + cdb[8] = (u8) (disk_block_cnt); + cdb[9] = 0; + cdb_len = 10; + } + return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, + dev->scsi3addr, + dev->phys_disk[map_index]); +} + +/* + * Submit commands down the "normal" RAID stack path + * All callers to hpsa_ciss_submit must check lockup_detected + * beforehand, before (opt.) and after calling cmd_alloc + */ +static int hpsa_ciss_submit(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + struct hpsa_scsi_dev_t *dev) +{ + cmd->host_scribble = (unsigned char *) c; + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + c->Header.ReplyQueue = 0; /* unused in simple mode */ + memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); + c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); + + /* Fill in the request block... */ + + c->Request.Timeout = 0; + BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); + c->Request.CDBLen = cmd->cmd_len; + memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); + break; + case DMA_FROM_DEVICE: + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); + break; + case DMA_NONE: + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); + break; + case DMA_BIDIRECTIONAL: + /* This can happen if a buggy application does a scsi passthru + * and sets both inlen and outlen to non-zero. ( see + * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) + */ + + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); + /* This is technically wrong, and hpsa controllers should + * reject it with CMD_INVALID, which is the most correct + * response, but non-fibre backends appear to let it + * slide by, and give the same results as if this field + * were set correctly. Either way is acceptable for + * our purposes here. + */ + + break; + + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + + if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ + hpsa_cmd_resolve_and_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + + if (dev->in_reset) { + hpsa_cmd_resolve_and_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + + c->device = dev; + + enqueue_cmd_and_start_io(h, c); + /* the cmd'll come back via intr handler in complete_scsi_command() */ + return 0; +} + +static void hpsa_cmd_init(struct ctlr_info *h, int index, + struct CommandList *c) +{ + dma_addr_t cmd_dma_handle, err_dma_handle; + + /* Zero out all of commandlist except the last field, refcount */ + memset(c, 0, offsetof(struct CommandList, refcount)); + c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); + cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + c->err_info = h->errinfo_pool + index; + memset(c->err_info, 0, sizeof(*c->err_info)); + err_dma_handle = h->errinfo_pool_dhandle + + index * sizeof(*c->err_info); + c->cmdindex = index; + c->busaddr = (u32) cmd_dma_handle; + c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); + c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); + c->h = h; + c->scsi_cmd = SCSI_CMD_IDLE; +} + +static void hpsa_preinitialize_commands(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + + hpsa_cmd_init(h, i, c); + atomic_set(&c->refcount, 0); + } +} + +static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, + struct CommandList *c) +{ + dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + + BUG_ON(c->cmdindex != index); + + memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); + memset(c->err_info, 0, sizeof(*c->err_info)); + c->busaddr = (u32) cmd_dma_handle; +} + +static int hpsa_ioaccel_submit(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + bool retry) +{ + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + int rc = IO_ACCEL_INELIGIBLE; + + if (!dev) + return SCSI_MLQUEUE_HOST_BUSY; + + if (dev->in_reset) + return SCSI_MLQUEUE_HOST_BUSY; + + if (hpsa_simple_mode) + return IO_ACCEL_INELIGIBLE; + + cmd->host_scribble = (unsigned char *) c; + + if (dev->offload_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + c->device = dev; + if (retry) /* Resubmit but do not increment device->commands_outstanding. */ + c->retry_pending = true; + rc = hpsa_scsi_ioaccel_raid_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } else if (dev->hba_ioaccel_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + c->device = dev; + if (retry) /* Resubmit but do not increment device->commands_outstanding. */ + c->retry_pending = true; + rc = hpsa_scsi_ioaccel_direct_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } + return rc; +} + +static void hpsa_command_resubmit_worker(struct work_struct *work) +{ + struct scsi_cmnd *cmd; + struct hpsa_scsi_dev_t *dev; + struct CommandList *c = container_of(work, struct CommandList, work); + + cmd = c->scsi_cmd; + dev = cmd->device->hostdata; + if (!dev) { + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(c->h, c, cmd); + } + + if (dev->in_reset) { + cmd->result = DID_RESET << 16; + return hpsa_cmd_free_and_done(c->h, c, cmd); + } + + if (c->cmd_type == CMD_IOACCEL2) { + struct ctlr_info *h = c->h; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + int rc; + + if (c2->error_data.serv_response == + IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { + /* Resubmit with the retry_pending flag set. */ + rc = hpsa_ioaccel_submit(h, c, cmd, true); + if (rc == 0) + return; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + /* + * If we get here, it means dma mapping failed. + * Try again via scsi mid layer, which will + * then get SCSI_MLQUEUE_HOST_BUSY. + */ + cmd->result = DID_IMM_RETRY << 16; + return hpsa_cmd_free_and_done(h, c, cmd); + } + /* else, fall thru and resubmit down CISS path */ + } + } + hpsa_cmd_partial_init(c->h, c->cmdindex, c); + /* + * Here we have not come in though queue_command, so we + * can set the retry_pending flag to true for a driver initiated + * retry attempt (I.E. not a SML retry). + * I.E. We are submitting a driver initiated retry. + * Note: hpsa_ciss_submit does not zero out the command fields like + * ioaccel submit does. + */ + c->retry_pending = true; + if (hpsa_ciss_submit(c->h, c, cmd, dev)) { + /* + * If we get here, it means dma mapping failed. Try + * again via scsi mid layer, which will then get + * SCSI_MLQUEUE_HOST_BUSY. + * + * hpsa_ciss_submit will have already freed c + * if it encountered a dma mapping failure. + */ + cmd->result = DID_IMM_RETRY << 16; + scsi_done(cmd); + } +} + +/* Running in struct Scsi_Host->host_lock less mode */ +static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) +{ + struct ctlr_info *h; + struct hpsa_scsi_dev_t *dev; + struct CommandList *c; + int rc = 0; + + /* Get the ptr to our adapter structure out of cmd->host. */ + h = sdev_to_hba(cmd->device); + + BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0); + + dev = cmd->device->hostdata; + if (!dev) { + cmd->result = DID_NO_CONNECT << 16; + scsi_done(cmd); + return 0; + } + + if (dev->removed) { + cmd->result = DID_NO_CONNECT << 16; + scsi_done(cmd); + return 0; + } + + if (unlikely(lockup_detected(h))) { + cmd->result = DID_NO_CONNECT << 16; + scsi_done(cmd); + return 0; + } + + if (dev->in_reset) + return SCSI_MLQUEUE_DEVICE_BUSY; + + c = cmd_tagged_alloc(h, cmd); + if (c == NULL) + return SCSI_MLQUEUE_DEVICE_BUSY; + + /* + * This is necessary because the SML doesn't zero out this field during + * error recovery. + */ + cmd->result = 0; + + /* + * Call alternate submit routine for I/O accelerated commands. + * Retries always go down the normal I/O path. + * Note: If cmd->retries is non-zero, then this is a SML + * initiated retry and not a driver initiated retry. + * This command has been obtained from cmd_tagged_alloc + * and is therefore a brand-new command. + */ + if (likely(cmd->retries == 0 && + !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) && + h->acciopath_status)) { + /* Submit with the retry_pending flag unset. */ + rc = hpsa_ioaccel_submit(h, c, cmd, false); + if (rc == 0) + return 0; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + hpsa_cmd_resolve_and_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + return hpsa_ciss_submit(h, c, cmd, dev); +} + +static void hpsa_scan_complete(struct ctlr_info *h) +{ + unsigned long flags; + + spin_lock_irqsave(&h->scan_lock, flags); + h->scan_finished = 1; + wake_up(&h->scan_wait_queue); + spin_unlock_irqrestore(&h->scan_lock, flags); +} + +static void hpsa_scan_start(struct Scsi_Host *sh) +{ + struct ctlr_info *h = shost_to_hba(sh); + unsigned long flags; + + /* + * Don't let rescans be initiated on a controller known to be locked + * up. If the controller locks up *during* a rescan, that thread is + * probably hosed, but at least we can prevent new rescan threads from + * piling up on a locked up controller. + */ + if (unlikely(lockup_detected(h))) + return hpsa_scan_complete(h); + + /* + * If a scan is already waiting to run, no need to add another + */ + spin_lock_irqsave(&h->scan_lock, flags); + if (h->scan_waiting) { + spin_unlock_irqrestore(&h->scan_lock, flags); + return; + } + + spin_unlock_irqrestore(&h->scan_lock, flags); + + /* wait until any scan already in progress is finished. */ + while (1) { + spin_lock_irqsave(&h->scan_lock, flags); + if (h->scan_finished) + break; + h->scan_waiting = 1; + spin_unlock_irqrestore(&h->scan_lock, flags); + wait_event(h->scan_wait_queue, h->scan_finished); + /* Note: We don't need to worry about a race between this + * thread and driver unload because the midlayer will + * have incremented the reference count, so unload won't + * happen if we're in here. + */ + } + h->scan_finished = 0; /* mark scan as in progress */ + h->scan_waiting = 0; + spin_unlock_irqrestore(&h->scan_lock, flags); + + if (unlikely(lockup_detected(h))) + return hpsa_scan_complete(h); + + /* + * Do the scan after a reset completion + */ + spin_lock_irqsave(&h->reset_lock, flags); + if (h->reset_in_progress) { + h->drv_req_rescan = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); + hpsa_scan_complete(h); + return; + } + spin_unlock_irqrestore(&h->reset_lock, flags); + + hpsa_update_scsi_devices(h); + + hpsa_scan_complete(h); +} + +static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; + + if (!logical_drive) + return -ENODEV; + + if (qdepth < 1) + qdepth = 1; + else if (qdepth > logical_drive->queue_depth) + qdepth = logical_drive->queue_depth; + + return scsi_change_queue_depth(sdev, qdepth); +} + +static int hpsa_scan_finished(struct Scsi_Host *sh, + unsigned long elapsed_time) +{ + struct ctlr_info *h = shost_to_hba(sh); + unsigned long flags; + int finished; + + spin_lock_irqsave(&h->scan_lock, flags); + finished = h->scan_finished; + spin_unlock_irqrestore(&h->scan_lock, flags); + return finished; +} + +static int hpsa_scsi_host_alloc(struct ctlr_info *h) +{ + struct Scsi_Host *sh; + + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); + if (sh == NULL) { + dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); + return -ENOMEM; + } + + sh->io_port = 0; + sh->n_io_port = 0; + sh->this_id = -1; + sh->max_channel = 3; + sh->max_cmd_len = MAX_COMMAND_SIZE; + sh->max_lun = HPSA_MAX_LUN; + sh->max_id = HPSA_MAX_LUN; + sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; + sh->cmd_per_lun = sh->can_queue; + sh->sg_tablesize = h->maxsgentries; + sh->transportt = hpsa_sas_transport_template; + sh->hostdata[0] = (unsigned long) h; + sh->irq = pci_irq_vector(h->pdev, 0); + sh->unique_id = sh->irq; + + h->scsi_host = sh; + return 0; +} + +static int hpsa_scsi_add_host(struct ctlr_info *h) +{ + int rv; + + rv = scsi_add_host(h->scsi_host, &h->pdev->dev); + if (rv) { + dev_err(&h->pdev->dev, "scsi_add_host failed\n"); + return rv; + } + scsi_scan_host(h->scsi_host); + return 0; +} + +/* + * The block layer has already gone to the trouble of picking out a unique, + * small-integer tag for this request. We use an offset from that value as + * an index to select our command block. (The offset allows us to reserve the + * low-numbered entries for our own uses.) + */ +static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) +{ + int idx = scsi_cmd_to_rq(scmd)->tag; + + if (idx < 0) + return idx; + + /* Offset to leave space for internal cmds. */ + return idx += HPSA_NRESERVED_CMDS; +} + +/* + * Send a TEST_UNIT_READY command to the specified LUN using the specified + * reply queue; returns zero if the unit is ready, and non-zero otherwise. + */ +static int hpsa_send_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, unsigned char lunaddr[], + int reply_queue) +{ + int rc; + + /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ + (void) fill_cmd(c, TEST_UNIT_READY, h, + NULL, 0, 0, lunaddr, TYPE_CMD); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) + return rc; + /* no unmap needed here because no data xfer. */ + + /* Check if the unit is already ready. */ + if (c->err_info->CommandStatus == CMD_SUCCESS) + return 0; + + /* + * The first command sent after reset will receive "unit attention" to + * indicate that the LUN has been reset...this is actually what we're + * looking for (but, success is good too). + */ + if (c->err_info->CommandStatus == CMD_TARGET_STATUS && + c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && + (c->err_info->SenseInfo[2] == NO_SENSE || + c->err_info->SenseInfo[2] == UNIT_ATTENTION)) + return 0; + + return 1; +} + +/* + * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; + * returns zero when the unit is ready, and non-zero when giving up. + */ +static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, + unsigned char lunaddr[], int reply_queue) +{ + int rc; + int count = 0; + int waittime = 1; /* seconds */ + + /* Send test unit ready until device ready, or give up. */ + for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { + + /* + * Wait for a bit. do this first, because if we send + * the TUR right away, the reset will just abort it. + */ + msleep(1000 * waittime); + + rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); + if (!rc) + break; + + /* Increase wait time with each try, up to a point. */ + if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) + waittime *= 2; + + dev_warn(&h->pdev->dev, + "waiting %d secs for device to become ready.\n", + waittime); + } + + return rc; +} + +static int wait_for_device_to_become_ready(struct ctlr_info *h, + unsigned char lunaddr[], + int reply_queue) +{ + int first_queue; + int last_queue; + int rq; + int rc = 0; + struct CommandList *c; + + c = cmd_alloc(h); + + /* + * If no specific reply queue was requested, then send the TUR + * repeatedly, requesting a reply on each reply queue; otherwise execute + * the loop exactly once using only the specified queue. + */ + if (reply_queue == DEFAULT_REPLY_QUEUE) { + first_queue = 0; + last_queue = h->nreply_queues - 1; + } else { + first_queue = reply_queue; + last_queue = reply_queue; + } + + for (rq = first_queue; rq <= last_queue; rq++) { + rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); + if (rc) + break; + } + + if (rc) + dev_warn(&h->pdev->dev, "giving up on device.\n"); + else + dev_warn(&h->pdev->dev, "device is ready.\n"); + + cmd_free(h, c); + return rc; +} + +/* Need at least one of these error handlers to keep ../scsi/hosts.c from + * complaining. Doing a host- or bus-reset can't do anything good here. + */ +static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) +{ + int rc = SUCCESS; + int i; + struct ctlr_info *h; + struct hpsa_scsi_dev_t *dev = NULL; + u8 reset_type; + char msg[48]; + unsigned long flags; + + /* find the controller to which the command to be aborted was sent */ + h = sdev_to_hba(scsicmd->device); + if (h == NULL) /* paranoia */ + return FAILED; + + spin_lock_irqsave(&h->reset_lock, flags); + h->reset_in_progress = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); + + if (lockup_detected(h)) { + rc = FAILED; + goto return_reset_status; + } + + dev = scsicmd->device->hostdata; + if (!dev) { + dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); + rc = FAILED; + goto return_reset_status; + } + + if (dev->devtype == TYPE_ENCLOSURE) { + rc = SUCCESS; + goto return_reset_status; + } + + /* if controller locked up, we can guarantee command won't complete */ + if (lockup_detected(h)) { + snprintf(msg, sizeof(msg), + "cmd %d RESET FAILED, lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + rc = FAILED; + goto return_reset_status; + } + + /* this reset request might be the result of a lockup; check */ + if (detect_controller_lockup(h)) { + snprintf(msg, sizeof(msg), + "cmd %d RESET FAILED, new lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + rc = FAILED; + goto return_reset_status; + } + + /* Do not attempt on controller */ + if (is_hba_lunid(dev->scsi3addr)) { + rc = SUCCESS; + goto return_reset_status; + } + + if (is_logical_dev_addr_mode(dev->scsi3addr)) + reset_type = HPSA_DEVICE_RESET_MSG; + else + reset_type = HPSA_PHYS_TARGET_RESET; + + sprintf(msg, "resetting %s", + reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + + /* + * wait to see if any commands will complete before sending reset + */ + dev->in_reset = true; /* block any new cmds from OS for this device */ + for (i = 0; i < 10; i++) { + if (atomic_read(&dev->commands_outstanding) > 0) + msleep(1000); + else + break; + } + + /* send a reset to the SCSI LUN which the command was sent to */ + rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); + if (rc == 0) + rc = SUCCESS; + else + rc = FAILED; + + sprintf(msg, "reset %s %s", + reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", + rc == SUCCESS ? "completed successfully" : "failed"); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + +return_reset_status: + spin_lock_irqsave(&h->reset_lock, flags); + h->reset_in_progress = 0; + if (dev) + dev->in_reset = false; + spin_unlock_irqrestore(&h->reset_lock, flags); + return rc; +} + +/* + * For operations with an associated SCSI command, a command block is allocated + * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the + * block request tag as an index into a table of entries. cmd_tagged_free() is + * the complement, although cmd_free() may be called instead. + * This function is only called for new requests from queue_command. + */ +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd) +{ + int idx = hpsa_get_cmd_index(scmd); + struct CommandList *c = h->cmd_pool + idx; + + if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { + dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", + idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); + /* The index value comes from the block layer, so if it's out of + * bounds, it's probably not our bug. + */ + BUG(); + } + + if (unlikely(!hpsa_is_cmd_idle(c))) { + /* + * We expect that the SCSI layer will hand us a unique tag + * value. Thus, there should never be a collision here between + * two requests...because if the selected command isn't idle + * then someone is going to be very disappointed. + */ + if (idx != h->last_collision_tag) { /* Print once per tag */ + dev_warn(&h->pdev->dev, + "%s: tag collision (tag=%d)\n", __func__, idx); + if (scmd) + scsi_print_command(scmd); + h->last_collision_tag = idx; + } + return NULL; + } + + atomic_inc(&c->refcount); + hpsa_cmd_partial_init(h, idx, c); + + /* + * This is a new command obtained from queue_command so + * there have not been any driver initiated retry attempts. + */ + c->retry_pending = false; + + return c; +} + +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) +{ + /* + * Release our reference to the block. We don't need to do anything + * else to free it, because it is accessed by index. + */ + (void)atomic_dec(&c->refcount); +} + +/* + * For operations that cannot sleep, a command block is allocated at init, + * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track + * which ones are free or in use. Lock must be held when calling this. + * cmd_free() is the complement. + * This function never gives up and returns NULL. If it hangs, + * another thread must call cmd_free() to free some tags. + */ + +static struct CommandList *cmd_alloc(struct ctlr_info *h) +{ + struct CommandList *c; + int refcount, i; + int offset = 0; + + /* + * There is some *extremely* small but non-zero chance that that + * multiple threads could get in here, and one thread could + * be scanning through the list of bits looking for a free + * one, but the free ones are always behind him, and other + * threads sneak in behind him and eat them before he can + * get to them, so that while there is always a free one, a + * very unlucky thread might be starved anyway, never able to + * beat the other threads. In reality, this happens so + * infrequently as to be indistinguishable from never. + * + * Note that we start allocating commands before the SCSI host structure + * is initialized. Since the search starts at bit zero, this + * all works, since we have at least one command structure available; + * however, it means that the structures with the low indexes have to be + * reserved for driver-initiated requests, while requests from the block + * layer will use the higher indexes. + */ + + for (;;) { + i = find_next_zero_bit(h->cmd_pool_bits, + HPSA_NRESERVED_CMDS, + offset); + if (unlikely(i >= HPSA_NRESERVED_CMDS)) { + offset = 0; + continue; + } + c = h->cmd_pool + i; + refcount = atomic_inc_return(&c->refcount); + if (unlikely(refcount > 1)) { + cmd_free(h, c); /* already in use */ + offset = (i + 1) % HPSA_NRESERVED_CMDS; + continue; + } + set_bit(i, h->cmd_pool_bits); + break; /* it's ours now. */ + } + hpsa_cmd_partial_init(h, i, c); + c->device = NULL; + + /* + * cmd_alloc is for "internal" commands and they are never + * retried. + */ + c->retry_pending = false; + + return c; +} + +/* + * This is the complementary operation to cmd_alloc(). Note, however, in some + * corner cases it may also be used to free blocks allocated by + * cmd_tagged_alloc() in which case the ref-count decrement does the trick and + * the clear-bit is harmless. + */ +static void cmd_free(struct ctlr_info *h, struct CommandList *c) +{ + if (atomic_dec_and_test(&c->refcount)) { + int i; + + i = c - h->cmd_pool; + clear_bit(i, h->cmd_pool_bits); + } +} + +#ifdef CONFIG_COMPAT + +static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, + void __user *arg) +{ + struct ctlr_info *h = sdev_to_hba(dev); + IOCTL32_Command_struct __user *arg32 = arg; + IOCTL_Command_struct arg64; + int err; + u32 cp; + + if (!arg) + return -EINVAL; + + memset(&arg64, 0, sizeof(arg64)); + if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf))) + return -EFAULT; + if (get_user(cp, &arg32->buf)) + return -EFAULT; + arg64.buf = compat_ptr(cp); + + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + err = hpsa_passthru_ioctl(h, &arg64); + atomic_inc(&h->passthru_cmds_avail); + if (err) + return err; + if (copy_to_user(&arg32->error_info, &arg64.error_info, + sizeof(arg32->error_info))) + return -EFAULT; + return 0; +} + +static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, + unsigned int cmd, void __user *arg) +{ + struct ctlr_info *h = sdev_to_hba(dev); + BIG_IOCTL32_Command_struct __user *arg32 = arg; + BIG_IOCTL_Command_struct arg64; + int err; + u32 cp; + + if (!arg) + return -EINVAL; + memset(&arg64, 0, sizeof(arg64)); + if (copy_from_user(&arg64, arg32, + offsetof(BIG_IOCTL32_Command_struct, buf))) + return -EFAULT; + if (get_user(cp, &arg32->buf)) + return -EFAULT; + arg64.buf = compat_ptr(cp); + + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + err = hpsa_big_passthru_ioctl(h, &arg64); + atomic_inc(&h->passthru_cmds_avail); + if (err) + return err; + if (copy_to_user(&arg32->error_info, &arg64.error_info, + sizeof(arg32->error_info))) + return -EFAULT; + return 0; +} + +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) +{ + switch (cmd) { + case CCISS_GETPCIINFO: + case CCISS_GETINTINFO: + case CCISS_SETINTINFO: + case CCISS_GETNODENAME: + case CCISS_SETNODENAME: + case CCISS_GETHEARTBEAT: + case CCISS_GETBUSTYPES: + case CCISS_GETFIRMVER: + case CCISS_GETDRIVVER: + case CCISS_REVALIDVOLS: + case CCISS_DEREGDISK: + case CCISS_REGNEWDISK: + case CCISS_REGNEWD: + case CCISS_RESCANDISK: + case CCISS_GETLUNINFO: + return hpsa_ioctl(dev, cmd, arg); + + case CCISS_PASSTHRU32: + return hpsa_ioctl32_passthru(dev, cmd, arg); + case CCISS_BIG_PASSTHRU32: + return hpsa_ioctl32_big_passthru(dev, cmd, arg); + + default: + return -ENOIOCTLCMD; + } +} +#endif + +static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) +{ + struct hpsa_pci_info pciinfo; + + if (!argp) + return -EINVAL; + pciinfo.domain = pci_domain_nr(h->pdev->bus); + pciinfo.bus = h->pdev->bus->number; + pciinfo.dev_fn = h->pdev->devfn; + pciinfo.board_id = h->board_id; + if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) + return -EFAULT; + return 0; +} + +static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) +{ + DriverVer_type DriverVer; + unsigned char vmaj, vmin, vsubmin; + int rc; + + rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", + &vmaj, &vmin, &vsubmin); + if (rc != 3) { + dev_info(&h->pdev->dev, "driver version string '%s' " + "unrecognized.", HPSA_DRIVER_VERSION); + vmaj = 0; + vmin = 0; + vsubmin = 0; + } + DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; + if (!argp) + return -EINVAL; + if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) + return -EFAULT; + return 0; +} + +static int hpsa_passthru_ioctl(struct ctlr_info *h, + IOCTL_Command_struct *iocommand) +{ + struct CommandList *c; + char *buff = NULL; + u64 temp64; + int rc = 0; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if ((iocommand->buf_size < 1) && + (iocommand->Request.Type.Direction != XFER_NONE)) { + return -EINVAL; + } + if (iocommand->buf_size > 0) { + buff = kmalloc(iocommand->buf_size, GFP_KERNEL); + if (buff == NULL) + return -ENOMEM; + if (iocommand->Request.Type.Direction & XFER_WRITE) { + /* Copy the data into the buffer we created */ + if (copy_from_user(buff, iocommand->buf, + iocommand->buf_size)) { + rc = -EFAULT; + goto out_kfree; + } + } else { + memset(buff, 0, iocommand->buf_size); + } + } + c = cmd_alloc(h); + + /* Fill in the command type */ + c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; + /* Fill in Command Header */ + c->Header.ReplyQueue = 0; /* unused in simple mode */ + if (iocommand->buf_size > 0) { /* buffer to fill */ + c->Header.SGList = 1; + c->Header.SGTotal = cpu_to_le16(1); + } else { /* no buffers to fill */ + c->Header.SGList = 0; + c->Header.SGTotal = cpu_to_le16(0); + } + memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN)); + + /* Fill in Request block */ + memcpy(&c->Request, &iocommand->Request, + sizeof(c->Request)); + + /* Fill in the scatter gather information */ + if (iocommand->buf_size > 0) { + temp64 = dma_map_single(&h->pdev->dev, buff, + iocommand->buf_size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { + c->SG[0].Addr = cpu_to_le64(0); + c->SG[0].Len = cpu_to_le32(0); + rc = -ENOMEM; + goto out; + } + c->SG[0].Addr = cpu_to_le64(temp64); + c->SG[0].Len = cpu_to_le32(iocommand->buf_size); + c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ + } + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + NO_TIMEOUT); + if (iocommand->buf_size > 0) + hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); + check_ioctl_unit_attention(h, c); + if (rc) { + rc = -EIO; + goto out; + } + + /* Copy the error information out */ + memcpy(&iocommand->error_info, c->err_info, + sizeof(iocommand->error_info)); + if ((iocommand->Request.Type.Direction & XFER_READ) && + iocommand->buf_size > 0) { + /* Copy the data out of the buffer we created */ + if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) { + rc = -EFAULT; + goto out; + } + } +out: + cmd_free(h, c); +out_kfree: + kfree(buff); + return rc; +} + +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, + BIG_IOCTL_Command_struct *ioc) +{ + struct CommandList *c; + unsigned char **buff = NULL; + int *buff_size = NULL; + u64 temp64; + BYTE sg_used = 0; + int status = 0; + u32 left; + u32 sz; + BYTE __user *data_ptr; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + if ((ioc->buf_size < 1) && + (ioc->Request.Type.Direction != XFER_NONE)) + return -EINVAL; + /* Check kmalloc limits using all SGs */ + if (ioc->malloc_size > MAX_KMALLOC_SIZE) + return -EINVAL; + if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) + return -EINVAL; + buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL); + if (!buff) { + status = -ENOMEM; + goto cleanup1; + } + buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL); + if (!buff_size) { + status = -ENOMEM; + goto cleanup1; + } + left = ioc->buf_size; + data_ptr = ioc->buf; + while (left) { + sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; + buff_size[sg_used] = sz; + buff[sg_used] = kmalloc(sz, GFP_KERNEL); + if (buff[sg_used] == NULL) { + status = -ENOMEM; + goto cleanup1; + } + if (ioc->Request.Type.Direction & XFER_WRITE) { + if (copy_from_user(buff[sg_used], data_ptr, sz)) { + status = -EFAULT; + goto cleanup1; + } + } else + memset(buff[sg_used], 0, sz); + left -= sz; + data_ptr += sz; + sg_used++; + } + c = cmd_alloc(h); + + c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; + c->Header.ReplyQueue = 0; + c->Header.SGList = (u8) sg_used; + c->Header.SGTotal = cpu_to_le16(sg_used); + memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); + memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); + if (ioc->buf_size > 0) { + int i; + for (i = 0; i < sg_used; i++) { + temp64 = dma_map_single(&h->pdev->dev, buff[i], + buff_size[i], DMA_BIDIRECTIONAL); + if (dma_mapping_error(&h->pdev->dev, + (dma_addr_t) temp64)) { + c->SG[i].Addr = cpu_to_le64(0); + c->SG[i].Len = cpu_to_le32(0); + hpsa_pci_unmap(h->pdev, c, i, + DMA_BIDIRECTIONAL); + status = -ENOMEM; + goto cleanup0; + } + c->SG[i].Addr = cpu_to_le64(temp64); + c->SG[i].Len = cpu_to_le32(buff_size[i]); + c->SG[i].Ext = cpu_to_le32(0); + } + c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); + } + status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + NO_TIMEOUT); + if (sg_used) + hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL); + check_ioctl_unit_attention(h, c); + if (status) { + status = -EIO; + goto cleanup0; + } + + /* Copy the error information out */ + memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); + if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { + int i; + + /* Copy the data out of the buffer we created */ + BYTE __user *ptr = ioc->buf; + for (i = 0; i < sg_used; i++) { + if (copy_to_user(ptr, buff[i], buff_size[i])) { + status = -EFAULT; + goto cleanup0; + } + ptr += buff_size[i]; + } + } + status = 0; +cleanup0: + cmd_free(h, c); +cleanup1: + if (buff) { + int i; + + for (i = 0; i < sg_used; i++) + kfree(buff[i]); + kfree(buff); + } + kfree(buff_size); + return status; +} + +static void check_ioctl_unit_attention(struct ctlr_info *h, + struct CommandList *c) +{ + if (c->err_info->CommandStatus == CMD_TARGET_STATUS && + c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) + (void) check_for_unit_attention(h, c); +} + +/* + * ioctl + */ +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *argp) +{ + struct ctlr_info *h = sdev_to_hba(dev); + int rc; + + switch (cmd) { + case CCISS_DEREGDISK: + case CCISS_REGNEWDISK: + case CCISS_REGNEWD: + hpsa_scan_start(h->scsi_host); + return 0; + case CCISS_GETPCIINFO: + return hpsa_getpciinfo_ioctl(h, argp); + case CCISS_GETDRIVVER: + return hpsa_getdrivver_ioctl(h, argp); + case CCISS_PASSTHRU: { + IOCTL_Command_struct iocommand; + + if (!argp) + return -EINVAL; + if (copy_from_user(&iocommand, argp, sizeof(iocommand))) + return -EFAULT; + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + rc = hpsa_passthru_ioctl(h, &iocommand); + atomic_inc(&h->passthru_cmds_avail); + if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand))) + rc = -EFAULT; + return rc; + } + case CCISS_BIG_PASSTHRU: { + BIG_IOCTL_Command_struct ioc; + if (!argp) + return -EINVAL; + if (copy_from_user(&ioc, argp, sizeof(ioc))) + return -EFAULT; + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + rc = hpsa_big_passthru_ioctl(h, &ioc); + atomic_inc(&h->passthru_cmds_avail); + if (!rc && copy_to_user(argp, &ioc, sizeof(ioc))) + rc = -EFAULT; + return rc; + } + default: + return -ENOTTY; + } +} + +static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) +{ + struct CommandList *c; + + c = cmd_alloc(h); + + /* fill_cmd can't fail here, no data buffer to map */ + (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, + RAID_CTLR_LUNID, TYPE_MSG); + c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ + c->waiting = NULL; + enqueue_cmd_and_start_io(h, c); + /* Don't wait for completion, the reset won't complete. Don't free + * the command either. This is the last command we will send before + * re-initializing everything, so it doesn't matter and won't leak. + */ + return; +} + +static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, + void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, + int cmd_type) +{ + enum dma_data_direction dir = DMA_NONE; + + c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; + c->Header.ReplyQueue = 0; + if (buff != NULL && size > 0) { + c->Header.SGList = 1; + c->Header.SGTotal = cpu_to_le16(1); + } else { + c->Header.SGList = 0; + c->Header.SGTotal = cpu_to_le16(0); + } + memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); + + if (cmd_type == TYPE_CMD) { + switch (cmd) { + case HPSA_INQUIRY: + /* are we trying to read a vital product page */ + if (page_code & VPD_PAGE) { + c->Request.CDB[1] = 0x01; + c->Request.CDB[2] = (page_code & 0xff); + } + c->Request.CDBLen = 6; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = HPSA_INQUIRY; + c->Request.CDB[4] = size & 0xFF; + break; + case RECEIVE_DIAGNOSTIC: + c->Request.CDBLen = 6; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + c->Request.CDB[1] = 1; + c->Request.CDB[2] = 1; + c->Request.CDB[3] = (size >> 8) & 0xFF; + c->Request.CDB[4] = size & 0xFF; + break; + case HPSA_REPORT_LOG: + case HPSA_REPORT_PHYS: + /* Talking to controller so It's a physical command + mode = 00 target = 0. Nothing to write. + */ + c->Request.CDBLen = 12; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + c->Request.CDB[9] = size & 0xFF; + break; + case BMIC_SENSE_DIAG_OPTIONS: + c->Request.CDBLen = 16; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + /* Spec says this should be BMIC_WRITE */ + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; + break; + case BMIC_SET_DIAG_OPTIONS: + c->Request.CDBLen = 16; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, + ATTR_SIMPLE, XFER_WRITE); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_WRITE; + c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; + break; + case HPSA_CACHE_FLUSH: + c->Request.CDBLen = 12; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, + ATTR_SIMPLE, XFER_WRITE); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_WRITE; + c->Request.CDB[6] = BMIC_CACHE_FLUSH; + c->Request.CDB[7] = (size >> 8) & 0xFF; + c->Request.CDB[8] = size & 0xFF; + break; + case TEST_UNIT_READY: + c->Request.CDBLen = 6; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); + c->Request.Timeout = 0; + break; + case HPSA_GET_RAID_MAP: + c->Request.CDBLen = 12; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = HPSA_CISS_READ; + c->Request.CDB[1] = cmd; + c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + c->Request.CDB[9] = size & 0xFF; + break; + case BMIC_SENSE_CONTROLLER_PARAMETERS: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + break; + case BMIC_IDENTIFY_PHYSICAL_DEVICE: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + break; + case BMIC_SENSE_SUBSYSTEM_INFORMATION: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + break; + case BMIC_SENSE_STORAGE_BOX_PARAMS: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + break; + case BMIC_IDENTIFY_CONTROLLER: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[1] = 0; + c->Request.CDB[2] = 0; + c->Request.CDB[3] = 0; + c->Request.CDB[4] = 0; + c->Request.CDB[5] = 0; + c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + c->Request.CDB[9] = 0; + break; + default: + dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); + BUG(); + } + } else if (cmd_type == TYPE_MSG) { + switch (cmd) { + + case HPSA_PHYS_TARGET_RESET: + c->Request.CDBLen = 16; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); + c->Request.Timeout = 0; /* Don't time out */ + memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); + c->Request.CDB[0] = HPSA_RESET; + c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; + /* Physical target reset needs no control bytes 4-7*/ + c->Request.CDB[4] = 0x00; + c->Request.CDB[5] = 0x00; + c->Request.CDB[6] = 0x00; + c->Request.CDB[7] = 0x00; + break; + case HPSA_DEVICE_RESET_MSG: + c->Request.CDBLen = 16; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); + c->Request.Timeout = 0; /* Don't time out */ + memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); + c->Request.CDB[0] = cmd; + c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; + /* If bytes 4-7 are zero, it means reset the */ + /* LunID device */ + c->Request.CDB[4] = 0x00; + c->Request.CDB[5] = 0x00; + c->Request.CDB[6] = 0x00; + c->Request.CDB[7] = 0x00; + break; + default: + dev_warn(&h->pdev->dev, "unknown message type %d\n", + cmd); + BUG(); + } + } else { + dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); + BUG(); + } + + switch (GET_DIR(c->Request.type_attr_dir)) { + case XFER_READ: + dir = DMA_FROM_DEVICE; + break; + case XFER_WRITE: + dir = DMA_TO_DEVICE; + break; + case XFER_NONE: + dir = DMA_NONE; + break; + default: + dir = DMA_BIDIRECTIONAL; + } + if (hpsa_map_one(h->pdev, c, buff, size, dir)) + return -1; + return 0; +} + +/* + * Map (physical) PCI mem into (virtual) kernel space + */ +static void __iomem *remap_pci_mem(ulong base, ulong size) +{ + ulong page_base = ((ulong) base) & PAGE_MASK; + ulong page_offs = ((ulong) base) - page_base; + void __iomem *page_remapped = ioremap(page_base, + page_offs + size); + + return page_remapped ? (page_remapped + page_offs) : NULL; +} + +static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) +{ + return h->access.command_completed(h, q); +} + +static inline bool interrupt_pending(struct ctlr_info *h) +{ + return h->access.intr_pending(h); +} + +static inline long interrupt_not_for_us(struct ctlr_info *h) +{ + return (h->access.intr_pending(h) == 0) || + (h->interrupts_enabled == 0); +} + +static inline int bad_tag(struct ctlr_info *h, u32 tag_index, + u32 raw_tag) +{ + if (unlikely(tag_index >= h->nr_cmds)) { + dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); + return 1; + } + return 0; +} + +static inline void finish_cmd(struct CommandList *c) +{ + dial_up_lockup_detection_on_fw_flash_complete(c->h, c); + if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI + || c->cmd_type == CMD_IOACCEL2)) + complete_scsi_command(c); + else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) + complete(c->waiting); +} + +/* process completion of an indexed ("direct lookup") command */ +static inline void process_indexed_cmd(struct ctlr_info *h, + u32 raw_tag) +{ + u32 tag_index; + struct CommandList *c; + + tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; + if (!bad_tag(h, tag_index, raw_tag)) { + c = h->cmd_pool + tag_index; + finish_cmd(c); + } +} + +/* Some controllers, like p400, will give us one interrupt + * after a soft reset, even if we turned interrupts off. + * Only need to check for this in the hpsa_xxx_discard_completions + * functions. + */ +static int ignore_bogus_interrupt(struct ctlr_info *h) +{ + if (likely(!reset_devices)) + return 0; + + if (likely(h->interrupts_enabled)) + return 0; + + dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " + "(known firmware bug.) Ignoring.\n"); + + return 1; +} + +/* + * Convert &h->q[x] (passed to interrupt handlers) back to h. + * Relies on (h-q[x] == x) being true for x such that + * 0 <= x < MAX_REPLY_QUEUES. + */ +static struct ctlr_info *queue_to_hba(u8 *queue) +{ + return container_of((queue - *queue), struct ctlr_info, q[0]); +} + +static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) +{ + struct ctlr_info *h = queue_to_hba(queue); + u8 q = *(u8 *) queue; + u32 raw_tag; + + if (ignore_bogus_interrupt(h)) + return IRQ_NONE; + + if (interrupt_not_for_us(h)) + return IRQ_NONE; + h->last_intr_timestamp = get_jiffies_64(); + while (interrupt_pending(h)) { + raw_tag = get_next_completion(h, q); + while (raw_tag != FIFO_EMPTY) + raw_tag = next_command(h, q); + } + return IRQ_HANDLED; +} + +static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) +{ + struct ctlr_info *h = queue_to_hba(queue); + u32 raw_tag; + u8 q = *(u8 *) queue; + + if (ignore_bogus_interrupt(h)) + return IRQ_NONE; + + h->last_intr_timestamp = get_jiffies_64(); + raw_tag = get_next_completion(h, q); + while (raw_tag != FIFO_EMPTY) + raw_tag = next_command(h, q); + return IRQ_HANDLED; +} + +static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) +{ + struct ctlr_info *h = queue_to_hba((u8 *) queue); + u32 raw_tag; + u8 q = *(u8 *) queue; + + if (interrupt_not_for_us(h)) + return IRQ_NONE; + h->last_intr_timestamp = get_jiffies_64(); + while (interrupt_pending(h)) { + raw_tag = get_next_completion(h, q); + while (raw_tag != FIFO_EMPTY) { + process_indexed_cmd(h, raw_tag); + raw_tag = next_command(h, q); + } + } + return IRQ_HANDLED; +} + +static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) +{ + struct ctlr_info *h = queue_to_hba(queue); + u32 raw_tag; + u8 q = *(u8 *) queue; + + h->last_intr_timestamp = get_jiffies_64(); + raw_tag = get_next_completion(h, q); + while (raw_tag != FIFO_EMPTY) { + process_indexed_cmd(h, raw_tag); + raw_tag = next_command(h, q); + } + return IRQ_HANDLED; +} + +/* Send a message CDB to the firmware. Careful, this only works + * in simple mode, not performant mode due to the tag lookup. + * We only ever use this immediately after a controller reset. + */ +static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, + unsigned char type) +{ + struct Command { + struct CommandListHeader CommandHeader; + struct RequestBlock Request; + struct ErrDescriptor ErrorDescriptor; + }; + struct Command *cmd; + static const size_t cmd_sz = sizeof(*cmd) + + sizeof(cmd->ErrorDescriptor); + dma_addr_t paddr64; + __le32 paddr32; + u32 tag; + void __iomem *vaddr; + int i, err; + + vaddr = pci_ioremap_bar(pdev, 0); + if (vaddr == NULL) + return -ENOMEM; + + /* The Inbound Post Queue only accepts 32-bit physical addresses for the + * CCISS commands, so they must be allocated from the lower 4GiB of + * memory. + */ + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + iounmap(vaddr); + return err; + } + + cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL); + if (cmd == NULL) { + iounmap(vaddr); + return -ENOMEM; + } + + /* This must fit, because of the 32-bit consistent DMA mask. Also, + * although there's no guarantee, we assume that the address is at + * least 4-byte aligned (most likely, it's page-aligned). + */ + paddr32 = cpu_to_le32(paddr64); + + cmd->CommandHeader.ReplyQueue = 0; + cmd->CommandHeader.SGList = 0; + cmd->CommandHeader.SGTotal = cpu_to_le16(0); + cmd->CommandHeader.tag = cpu_to_le64(paddr64); + memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); + + cmd->Request.CDBLen = 16; + cmd->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); + cmd->Request.Timeout = 0; /* Don't time out */ + cmd->Request.CDB[0] = opcode; + cmd->Request.CDB[1] = type; + memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ + cmd->ErrorDescriptor.Addr = + cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); + cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); + + writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); + + for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { + tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); + if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) + break; + msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); + } + + iounmap(vaddr); + + /* we leak the DMA buffer here ... no choice since the controller could + * still complete the command. + */ + if (i == HPSA_MSG_SEND_RETRY_LIMIT) { + dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", + opcode, type); + return -ETIMEDOUT; + } + + dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64); + + if (tag & HPSA_ERROR_BIT) { + dev_err(&pdev->dev, "controller message %02x:%02x failed\n", + opcode, type); + return -EIO; + } + + dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", + opcode, type); + return 0; +} + +#define hpsa_noop(p) hpsa_message(p, 3, 0) + +static int hpsa_controller_hard_reset(struct pci_dev *pdev, + void __iomem *vaddr, u32 use_doorbell) +{ + + if (use_doorbell) { + /* For everything after the P600, the PCI power state method + * of resetting the controller doesn't work, so we have this + * other way using the doorbell register. + */ + dev_info(&pdev->dev, "using doorbell to reset controller\n"); + writel(use_doorbell, vaddr + SA5_DOORBELL); + + /* PMC hardware guys tell us we need a 10 second delay after + * doorbell reset and before any attempt to talk to the board + * at all to ensure that this actually works and doesn't fall + * over in some weird corner cases. + */ + msleep(10000); + } else { /* Try to do it the PCI power state way */ + + /* Quoting from the Open CISS Specification: "The Power + * Management Control/Status Register (CSR) controls the power + * state of the device. The normal operating state is D0, + * CSR=00h. The software off state is D3, CSR=03h. To reset + * the controller, place the interface device in D3 then to D0, + * this causes a secondary PCI reset which will reset the + * controller." */ + + int rc = 0; + + dev_info(&pdev->dev, "using PCI PM to reset controller\n"); + + /* enter the D3hot power management state */ + rc = pci_set_power_state(pdev, PCI_D3hot); + if (rc) + return rc; + + msleep(500); + + /* enter the D0 power management state */ + rc = pci_set_power_state(pdev, PCI_D0); + if (rc) + return rc; + + /* + * The P600 requires a small delay when changing states. + * Otherwise we may think the board did not reset and we bail. + * This for kdump only and is particular to the P600. + */ + msleep(500); + } + return 0; +} + +static void init_driver_version(char *driver_version, int len) +{ + memset(driver_version, 0, len); + strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); +} + +static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) +{ + char *driver_version; + int i, size = sizeof(cfgtable->driver_version); + + driver_version = kmalloc(size, GFP_KERNEL); + if (!driver_version) + return -ENOMEM; + + init_driver_version(driver_version, size); + for (i = 0; i < size; i++) + writeb(driver_version[i], &cfgtable->driver_version[i]); + kfree(driver_version); + return 0; +} + +static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, + unsigned char *driver_ver) +{ + int i; + + for (i = 0; i < sizeof(cfgtable->driver_version); i++) + driver_ver[i] = readb(&cfgtable->driver_version[i]); +} + +static int controller_reset_failed(struct CfgTable __iomem *cfgtable) +{ + + char *driver_ver, *old_driver_ver; + int rc, size = sizeof(cfgtable->driver_version); + + old_driver_ver = kmalloc_array(2, size, GFP_KERNEL); + if (!old_driver_ver) + return -ENOMEM; + driver_ver = old_driver_ver + size; + + /* After a reset, the 32 bytes of "driver version" in the cfgtable + * should have been changed, otherwise we know the reset failed. + */ + init_driver_version(old_driver_ver, size); + read_driver_ver_from_cfgtable(cfgtable, driver_ver); + rc = !memcmp(driver_ver, old_driver_ver, size); + kfree(old_driver_ver); + return rc; +} +/* This does a hard reset of the controller using PCI power management + * states or the using the doorbell register. + */ +static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) +{ + u64 cfg_offset; + u32 cfg_base_addr; + u64 cfg_base_addr_index; + void __iomem *vaddr; + unsigned long paddr; + u32 misc_fw_support; + int rc; + struct CfgTable __iomem *cfgtable; + u32 use_doorbell; + u16 command_register; + + /* For controllers as old as the P600, this is very nearly + * the same thing as + * + * pci_save_state(pci_dev); + * pci_set_power_state(pci_dev, PCI_D3hot); + * pci_set_power_state(pci_dev, PCI_D0); + * pci_restore_state(pci_dev); + * + * For controllers newer than the P600, the pci power state + * method of resetting doesn't work so we have another way + * using the doorbell register. + */ + + if (!ctlr_is_resettable(board_id)) { + dev_warn(&pdev->dev, "Controller not resettable\n"); + return -ENODEV; + } + + /* if controller is soft- but not hard resettable... */ + if (!ctlr_is_hard_resettable(board_id)) + return -ENOTSUPP; /* try soft reset later. */ + + /* Save the PCI command register */ + pci_read_config_word(pdev, 4, &command_register); + pci_save_state(pdev); + + /* find the first memory BAR, so we can find the cfg table */ + rc = hpsa_pci_find_memory_BAR(pdev, &paddr); + if (rc) + return rc; + vaddr = remap_pci_mem(paddr, 0x250); + if (!vaddr) + return -ENOMEM; + + /* find cfgtable in order to check if reset via doorbell is supported */ + rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, + &cfg_base_addr_index, &cfg_offset); + if (rc) + goto unmap_vaddr; + cfgtable = remap_pci_mem(pci_resource_start(pdev, + cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); + if (!cfgtable) { + rc = -ENOMEM; + goto unmap_vaddr; + } + rc = write_driver_ver_to_cfgtable(cfgtable); + if (rc) + goto unmap_cfgtable; + + /* If reset via doorbell register is supported, use that. + * There are two such methods. Favor the newest method. + */ + misc_fw_support = readl(&cfgtable->misc_fw_support); + use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; + if (use_doorbell) { + use_doorbell = DOORBELL_CTLR_RESET2; + } else { + use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + if (use_doorbell) { + dev_warn(&pdev->dev, + "Soft reset not supported. Firmware update is required.\n"); + rc = -ENOTSUPP; /* try soft reset */ + goto unmap_cfgtable; + } + } + + rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); + if (rc) + goto unmap_cfgtable; + + pci_restore_state(pdev); + pci_write_config_word(pdev, 4, command_register); + + /* Some devices (notably the HP Smart Array 5i Controller) + need a little pause here */ + msleep(HPSA_POST_RESET_PAUSE_MSECS); + + rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); + if (rc) { + dev_warn(&pdev->dev, + "Failed waiting for board to become ready after hard reset\n"); + goto unmap_cfgtable; + } + + rc = controller_reset_failed(vaddr); + if (rc < 0) + goto unmap_cfgtable; + if (rc) { + dev_warn(&pdev->dev, "Unable to successfully reset " + "controller. Will try soft reset.\n"); + rc = -ENOTSUPP; + } else { + dev_info(&pdev->dev, "board ready after hard reset.\n"); + } + +unmap_cfgtable: + iounmap(cfgtable); + +unmap_vaddr: + iounmap(vaddr); + return rc; +} + +/* + * We cannot read the structure directly, for portability we must use + * the io functions. + * This is for debug only. + */ +static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) +{ +#ifdef HPSA_DEBUG + int i; + char temp_name[17]; + + dev_info(dev, "Controller Configuration information\n"); + dev_info(dev, "------------------------------------\n"); + for (i = 0; i < 4; i++) + temp_name[i] = readb(&(tb->Signature[i])); + temp_name[4] = '\0'; + dev_info(dev, " Signature = %s\n", temp_name); + dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); + dev_info(dev, " Transport methods supported = 0x%x\n", + readl(&(tb->TransportSupport))); + dev_info(dev, " Transport methods active = 0x%x\n", + readl(&(tb->TransportActive))); + dev_info(dev, " Requested transport Method = 0x%x\n", + readl(&(tb->HostWrite.TransportRequest))); + dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", + readl(&(tb->HostWrite.CoalIntDelay))); + dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", + readl(&(tb->HostWrite.CoalIntCount))); + dev_info(dev, " Max outstanding commands = %d\n", + readl(&(tb->CmdsOutMax))); + dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); + for (i = 0; i < 16; i++) + temp_name[i] = readb(&(tb->ServerName[i])); + temp_name[16] = '\0'; + dev_info(dev, " Server Name = %s\n", temp_name); + dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", + readl(&(tb->HeartBeat))); +#endif /* HPSA_DEBUG */ +} + +static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) +{ + int i, offset, mem_type, bar_type; + + if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ + return 0; + offset = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; + if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) + offset += 4; + else { + mem_type = pci_resource_flags(pdev, i) & + PCI_BASE_ADDRESS_MEM_TYPE_MASK; + switch (mem_type) { + case PCI_BASE_ADDRESS_MEM_TYPE_32: + case PCI_BASE_ADDRESS_MEM_TYPE_1M: + offset += 4; /* 32 bit */ + break; + case PCI_BASE_ADDRESS_MEM_TYPE_64: + offset += 8; + break; + default: /* reserved in PCI 2.2 */ + dev_warn(&pdev->dev, + "base address is invalid\n"); + return -1; + } + } + if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) + return i + 1; + } + return -1; +} + +static void hpsa_disable_interrupt_mode(struct ctlr_info *h) +{ + pci_free_irq_vectors(h->pdev); + h->msix_vectors = 0; +} + +static void hpsa_setup_reply_map(struct ctlr_info *h) +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + for (queue = 0; queue < h->msix_vectors; queue++) { + mask = pci_irq_get_affinity(h->pdev, queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + h->reply_map[cpu] = queue; + } + return; + +fallback: + for_each_possible_cpu(cpu) + h->reply_map[cpu] = 0; +} + +/* If MSI/MSI-X is supported by the kernel we will try to enable it on + * controllers that are capable. If not, we use legacy INTx mode. + */ +static int hpsa_interrupt_mode(struct ctlr_info *h) +{ + unsigned int flags = PCI_IRQ_LEGACY; + int ret; + + /* Some boards advertise MSI but don't really support it */ + switch (h->board_id) { + case 0x40700E11: + case 0x40800E11: + case 0x40820E11: + case 0x40830E11: + break; + default: + ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + if (ret > 0) { + h->msix_vectors = ret; + return 0; + } + + flags |= PCI_IRQ_MSI; + break; + } + + ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags); + if (ret < 0) + return ret; + return 0; +} + +static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, + bool *legacy_board) +{ + int i; + u32 subsystem_vendor_id, subsystem_device_id; + + subsystem_vendor_id = pdev->subsystem_vendor; + subsystem_device_id = pdev->subsystem_device; + *board_id = ((subsystem_device_id << 16) & 0xffff0000) | + subsystem_vendor_id; + + if (legacy_board) + *legacy_board = false; + for (i = 0; i < ARRAY_SIZE(products); i++) + if (*board_id == products[i].board_id) { + if (products[i].access != &SA5A_access && + products[i].access != &SA5B_access) + return i; + dev_warn(&pdev->dev, + "legacy board ID: 0x%08x\n", + *board_id); + if (legacy_board) + *legacy_board = true; + return i; + } + + dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id); + if (legacy_board) + *legacy_board = true; + return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ +} + +static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, + unsigned long *memory_bar) +{ + int i; + + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + /* addressing mode bits already removed */ + *memory_bar = pci_resource_start(pdev, i); + dev_dbg(&pdev->dev, "memory BAR = %lx\n", + *memory_bar); + return 0; + } + dev_warn(&pdev->dev, "no memory BAR found\n"); + return -ENODEV; +} + +static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, + int wait_for_ready) +{ + int i, iterations; + u32 scratchpad; + if (wait_for_ready) + iterations = HPSA_BOARD_READY_ITERATIONS; + else + iterations = HPSA_BOARD_NOT_READY_ITERATIONS; + + for (i = 0; i < iterations; i++) { + scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); + if (wait_for_ready) { + if (scratchpad == HPSA_FIRMWARE_READY) + return 0; + } else { + if (scratchpad != HPSA_FIRMWARE_READY) + return 0; + } + msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); + } + dev_warn(&pdev->dev, "board not ready, timed out.\n"); + return -ENODEV; +} + +static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, + u32 *cfg_base_addr, u64 *cfg_base_addr_index, + u64 *cfg_offset) +{ + *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); + *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); + *cfg_base_addr &= (u32) 0x0000ffff; + *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); + if (*cfg_base_addr_index == -1) { + dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); + return -ENODEV; + } + return 0; +} + +static void hpsa_free_cfgtables(struct ctlr_info *h) +{ + if (h->transtable) { + iounmap(h->transtable); + h->transtable = NULL; + } + if (h->cfgtable) { + iounmap(h->cfgtable); + h->cfgtable = NULL; + } +} + +/* Find and map CISS config table and transfer table ++ * several items must be unmapped (freed) later ++ * */ +static int hpsa_find_cfgtables(struct ctlr_info *h) +{ + u64 cfg_offset; + u32 cfg_base_addr; + u64 cfg_base_addr_index; + u32 trans_offset; + int rc; + + rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, + &cfg_base_addr_index, &cfg_offset); + if (rc) + return rc; + h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, + cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); + if (!h->cfgtable) { + dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); + return -ENOMEM; + } + rc = write_driver_ver_to_cfgtable(h->cfgtable); + if (rc) + return rc; + /* Find performant mode table. */ + trans_offset = readl(&h->cfgtable->TransMethodOffset); + h->transtable = remap_pci_mem(pci_resource_start(h->pdev, + cfg_base_addr_index)+cfg_offset+trans_offset, + sizeof(*h->transtable)); + if (!h->transtable) { + dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); + hpsa_free_cfgtables(h); + return -ENOMEM; + } + return 0; +} + +static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) +{ +#define MIN_MAX_COMMANDS 16 + BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); + + h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); + + /* Limit commands in memory limited kdump scenario. */ + if (reset_devices && h->max_commands > 32) + h->max_commands = 32; + + if (h->max_commands < MIN_MAX_COMMANDS) { + dev_warn(&h->pdev->dev, + "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", + h->max_commands, + MIN_MAX_COMMANDS); + h->max_commands = MIN_MAX_COMMANDS; + } +} + +/* If the controller reports that the total max sg entries is greater than 512, + * then we know that chained SG blocks work. (Original smart arrays did not + * support chained SG blocks and would return zero for max sg entries.) + */ +static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) +{ + return h->maxsgentries > 512; +} + +/* Interrogate the hardware for some limits: + * max commands, max SG elements without chaining, and with chaining, + * SG chain block size, etc. + */ +static void hpsa_find_board_params(struct ctlr_info *h) +{ + hpsa_get_max_perf_mode_cmds(h); + h->nr_cmds = h->max_commands; + h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); + h->fw_support = readl(&(h->cfgtable->misc_fw_support)); + if (hpsa_supports_chained_sg_blocks(h)) { + /* Limit in-command s/g elements to 32 save dma'able memory. */ + h->max_cmd_sg_entries = 32; + h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; + h->maxsgentries--; /* save one for chain pointer */ + } else { + /* + * Original smart arrays supported at most 31 s/g entries + * embedded inline in the command (trying to use more + * would lock up the controller) + */ + h->max_cmd_sg_entries = 31; + h->maxsgentries = 31; /* default to traditional values */ + h->chainsize = 0; + } + + /* Find out what task management functions are supported and cache */ + h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); + if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); + if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); + if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); +} + +static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) +{ + if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { + dev_err(&h->pdev->dev, "not a valid CISS config table\n"); + return false; + } + return true; +} + +static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) +{ + u32 driver_support; + + driver_support = readl(&(h->cfgtable->driver_support)); + /* Need to enable prefetch in the SCSI core for 6400 in x86 */ +#ifdef CONFIG_X86 + driver_support |= ENABLE_SCSI_PREFETCH; +#endif + driver_support |= ENABLE_UNIT_ATTN; + writel(driver_support, &(h->cfgtable->driver_support)); +} + +/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result + * in a prefetch beyond physical memory. + */ +static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) +{ + u32 dma_prefetch; + + if (h->board_id != 0x3225103C) + return; + dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); + dma_prefetch |= 0x8000; + writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); +} + +static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) +{ + int i; + u32 doorbell_value; + unsigned long flags; + /* wait until the clear_event_notify bit 6 is cleared by controller. */ + for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { + spin_lock_irqsave(&h->lock, flags); + doorbell_value = readl(h->vaddr + SA5_DOORBELL); + spin_unlock_irqrestore(&h->lock, flags); + if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) + goto done; + /* delay and try again */ + msleep(CLEAR_EVENT_WAIT_INTERVAL); + } + return -ENODEV; +done: + return 0; +} + +static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) +{ + int i; + u32 doorbell_value; + unsigned long flags; + + /* under certain very rare conditions, this can take awhile. + * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right + * as we enter this code.) + */ + for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { + if (h->remove_in_progress) + goto done; + spin_lock_irqsave(&h->lock, flags); + doorbell_value = readl(h->vaddr + SA5_DOORBELL); + spin_unlock_irqrestore(&h->lock, flags); + if (!(doorbell_value & CFGTBL_ChangeReq)) + goto done; + /* delay and try again */ + msleep(MODE_CHANGE_WAIT_INTERVAL); + } + return -ENODEV; +done: + return 0; +} + +/* return -ENODEV or other reason on error, 0 on success */ +static int hpsa_enter_simple_mode(struct ctlr_info *h) +{ + u32 trans_support; + + trans_support = readl(&(h->cfgtable->TransportSupport)); + if (!(trans_support & SIMPLE_MODE)) + return -ENOTSUPP; + + h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); + + /* Update the field, and then ring the doorbell */ + writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); + writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + if (hpsa_wait_for_mode_change_ack(h)) + goto error; + print_cfg_table(&h->pdev->dev, h->cfgtable); + if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) + goto error; + h->transMethod = CFGTBL_Trans_Simple; + return 0; +error: + dev_err(&h->pdev->dev, "failed to enter simple mode\n"); + return -ENODEV; +} + +/* free items allocated or mapped by hpsa_pci_init */ +static void hpsa_free_pci_init(struct ctlr_info *h) +{ + hpsa_free_cfgtables(h); /* pci_init 4 */ + iounmap(h->vaddr); /* pci_init 3 */ + h->vaddr = NULL; + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ + /* + * call pci_disable_device before pci_release_regions per + * Documentation/driver-api/pci/pci.rst + */ + pci_disable_device(h->pdev); /* pci_init 1 */ + pci_release_regions(h->pdev); /* pci_init 2 */ +} + +/* several items must be freed later */ +static int hpsa_pci_init(struct ctlr_info *h) +{ + int prod_index, err; + bool legacy_board; + + prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board); + if (prod_index < 0) + return prod_index; + h->product_name = products[prod_index].product_name; + h->access = *(products[prod_index].access); + h->legacy_board = legacy_board; + pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + + err = pci_enable_device(h->pdev); + if (err) { + dev_err(&h->pdev->dev, "failed to enable PCI device\n"); + pci_disable_device(h->pdev); + return err; + } + + err = pci_request_regions(h->pdev, HPSA); + if (err) { + dev_err(&h->pdev->dev, + "failed to obtain PCI resources\n"); + pci_disable_device(h->pdev); + return err; + } + + pci_set_master(h->pdev); + + err = hpsa_interrupt_mode(h); + if (err) + goto clean1; + + /* setup mapping between CPU and reply queue */ + hpsa_setup_reply_map(h); + + err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); + if (err) + goto clean2; /* intmode+region, pci */ + h->vaddr = remap_pci_mem(h->paddr, 0x250); + if (!h->vaddr) { + dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); + err = -ENOMEM; + goto clean2; /* intmode+region, pci */ + } + err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); + if (err) + goto clean3; /* vaddr, intmode+region, pci */ + err = hpsa_find_cfgtables(h); + if (err) + goto clean3; /* vaddr, intmode+region, pci */ + hpsa_find_board_params(h); + + if (!hpsa_CISS_signature_present(h)) { + err = -ENODEV; + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ + } + hpsa_set_driver_support_bits(h); + hpsa_p600_dma_prefetch_quirk(h); + err = hpsa_enter_simple_mode(h); + if (err) + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ + return 0; + +clean4: /* cfgtables, vaddr, intmode+region, pci */ + hpsa_free_cfgtables(h); +clean3: /* vaddr, intmode+region, pci */ + iounmap(h->vaddr); + h->vaddr = NULL; +clean2: /* intmode+region, pci */ + hpsa_disable_interrupt_mode(h); +clean1: + /* + * call pci_disable_device before pci_release_regions per + * Documentation/driver-api/pci/pci.rst + */ + pci_disable_device(h->pdev); + pci_release_regions(h->pdev); + return err; +} + +static void hpsa_hba_inquiry(struct ctlr_info *h) +{ + int rc; + +#define HBA_INQUIRY_BYTE_COUNT 64 + h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); + if (!h->hba_inquiry_data) + return; + rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, + h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); + if (rc != 0) { + kfree(h->hba_inquiry_data); + h->hba_inquiry_data = NULL; + } +} + +static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) +{ + int rc, i; + void __iomem *vaddr; + + if (!reset_devices) + return 0; + + /* kdump kernel is loading, we don't know in which state is + * the pci interface. The dev->enable_cnt is equal zero + * so we call enable+disable, wait a while and switch it on. + */ + rc = pci_enable_device(pdev); + if (rc) { + dev_warn(&pdev->dev, "Failed to enable PCI device\n"); + return -ENODEV; + } + pci_disable_device(pdev); + msleep(260); /* a randomly chosen number */ + rc = pci_enable_device(pdev); + if (rc) { + dev_warn(&pdev->dev, "failed to enable device.\n"); + return -ENODEV; + } + + pci_set_master(pdev); + + vaddr = pci_ioremap_bar(pdev, 0); + if (vaddr == NULL) { + rc = -ENOMEM; + goto out_disable; + } + writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); + iounmap(vaddr); + + /* Reset the controller with a PCI power-cycle or via doorbell */ + rc = hpsa_kdump_hard_reset_controller(pdev, board_id); + + /* -ENOTSUPP here means we cannot reset the controller + * but it's already (and still) up and running in + * "performant mode". Or, it might be 640x, which can't reset + * due to concerns about shared bbwc between 6402/6404 pair. + */ + if (rc) + goto out_disable; + + /* Now try to get the controller to respond to a no-op */ + dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); + for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { + if (hpsa_noop(pdev) == 0) + break; + else + dev_warn(&pdev->dev, "no-op failed%s\n", + (i < 11 ? "; re-trying" : "")); + } + +out_disable: + + pci_disable_device(pdev); + return rc; +} + +static void hpsa_free_cmd_pool(struct ctlr_info *h) +{ + bitmap_free(h->cmd_pool_bits); + h->cmd_pool_bits = NULL; + if (h->cmd_pool) { + dma_free_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(struct CommandList), + h->cmd_pool, + h->cmd_pool_dhandle); + h->cmd_pool = NULL; + h->cmd_pool_dhandle = 0; + } + if (h->errinfo_pool) { + dma_free_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(struct ErrorInfo), + h->errinfo_pool, + h->errinfo_pool_dhandle); + h->errinfo_pool = NULL; + h->errinfo_pool_dhandle = 0; + } +} + +static int hpsa_alloc_cmd_pool(struct ctlr_info *h) +{ + h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL); + h->cmd_pool = dma_alloc_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->cmd_pool), + &h->cmd_pool_dhandle, GFP_KERNEL); + h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->errinfo_pool), + &h->errinfo_pool_dhandle, GFP_KERNEL); + if ((h->cmd_pool_bits == NULL) + || (h->cmd_pool == NULL) + || (h->errinfo_pool == NULL)) { + dev_err(&h->pdev->dev, "out of memory in %s", __func__); + goto clean_up; + } + hpsa_preinitialize_commands(h); + return 0; +clean_up: + hpsa_free_cmd_pool(h); + return -ENOMEM; +} + +/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ +static void hpsa_free_irqs(struct ctlr_info *h) +{ + int i; + int irq_vector = 0; + + if (hpsa_simple_mode) + irq_vector = h->intr_mode; + + if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { + /* Single reply queue, only one irq to free */ + free_irq(pci_irq_vector(h->pdev, irq_vector), + &h->q[h->intr_mode]); + h->q[h->intr_mode] = 0; + return; + } + + for (i = 0; i < h->msix_vectors; i++) { + free_irq(pci_irq_vector(h->pdev, i), &h->q[i]); + h->q[i] = 0; + } + for (; i < MAX_REPLY_QUEUES; i++) + h->q[i] = 0; +} + +/* returns 0 on success; cleans up and returns -Enn on error */ +static int hpsa_request_irqs(struct ctlr_info *h, + irqreturn_t (*msixhandler)(int, void *), + irqreturn_t (*intxhandler)(int, void *)) +{ + int rc, i; + int irq_vector = 0; + + if (hpsa_simple_mode) + irq_vector = h->intr_mode; + + /* + * initialize h->q[x] = x so that interrupt handlers know which + * queue to process. + */ + for (i = 0; i < MAX_REPLY_QUEUES; i++) + h->q[i] = (u8) i; + + if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) { + /* If performant mode and MSI-X, use multiple reply queues */ + for (i = 0; i < h->msix_vectors; i++) { + sprintf(h->intrname[i], "%s-msix%d", h->devname, i); + rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler, + 0, h->intrname[i], + &h->q[i]); + if (rc) { + int j; + + dev_err(&h->pdev->dev, + "failed to get irq %d for %s\n", + pci_irq_vector(h->pdev, i), h->devname); + for (j = 0; j < i; j++) { + free_irq(pci_irq_vector(h->pdev, j), &h->q[j]); + h->q[j] = 0; + } + for (; j < MAX_REPLY_QUEUES; j++) + h->q[j] = 0; + return rc; + } + } + } else { + /* Use single reply pool */ + if (h->msix_vectors > 0 || h->pdev->msi_enabled) { + sprintf(h->intrname[0], "%s-msi%s", h->devname, + h->msix_vectors ? "x" : ""); + rc = request_irq(pci_irq_vector(h->pdev, irq_vector), + msixhandler, 0, + h->intrname[0], + &h->q[h->intr_mode]); + } else { + sprintf(h->intrname[h->intr_mode], + "%s-intx", h->devname); + rc = request_irq(pci_irq_vector(h->pdev, irq_vector), + intxhandler, IRQF_SHARED, + h->intrname[0], + &h->q[h->intr_mode]); + } + } + if (rc) { + dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", + pci_irq_vector(h->pdev, irq_vector), h->devname); + hpsa_free_irqs(h); + return -ENODEV; + } + return 0; +} + +static int hpsa_kdump_soft_reset(struct ctlr_info *h) +{ + int rc; + hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); + + dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); + if (rc) { + dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); + return rc; + } + + dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); + if (rc) { + dev_warn(&h->pdev->dev, "Board failed to become ready " + "after soft reset.\n"); + return rc; + } + + return 0; +} + +static void hpsa_free_reply_queues(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->nreply_queues; i++) { + if (!h->reply_queue[i].head) + continue; + dma_free_coherent(&h->pdev->dev, + h->reply_queue_size, + h->reply_queue[i].head, + h->reply_queue[i].busaddr); + h->reply_queue[i].head = NULL; + h->reply_queue[i].busaddr = 0; + } + h->reply_queue_size = 0; +} + +static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) +{ + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + hpsa_free_irqs(h); /* init_one 4 */ + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + hpsa_free_pci_init(h); /* init_one 2_5 */ + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + if (h->resubmit_wq) { + destroy_workqueue(h->resubmit_wq); /* init_one 1 */ + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { + destroy_workqueue(h->rescan_ctlr_wq); + h->rescan_ctlr_wq = NULL; + } + if (h->monitor_ctlr_wq) { + destroy_workqueue(h->monitor_ctlr_wq); + h->monitor_ctlr_wq = NULL; + } + + kfree(h); /* init_one 1 */ +} + +/* Called when controller lockup detected. */ +static void fail_all_outstanding_cmds(struct ctlr_info *h) +{ + int i, refcount; + struct CommandList *c; + int failcount = 0; + + flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ + for (i = 0; i < h->nr_cmds; i++) { + c = h->cmd_pool + i; + refcount = atomic_inc_return(&c->refcount); + if (refcount > 1) { + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; + finish_cmd(c); + atomic_dec(&h->commands_outstanding); + failcount++; + } + cmd_free(h, c); + } + dev_warn(&h->pdev->dev, + "failed %d commands in fail_all\n", failcount); +} + +static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) +{ + int cpu; + + for_each_online_cpu(cpu) { + u32 *lockup_detected; + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + *lockup_detected = value; + } + wmb(); /* be sure the per-cpu variables are out to memory */ +} + +static void controller_lockup_detected(struct ctlr_info *h) +{ + unsigned long flags; + u32 lockup_detected; + + h->access.set_intr_mask(h, HPSA_INTR_OFF); + spin_lock_irqsave(&h->lock, flags); + lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + if (!lockup_detected) { + /* no heartbeat, but controller gave us a zero. */ + dev_warn(&h->pdev->dev, + "lockup detected after %d but scratchpad register is zero\n", + h->heartbeat_sample_interval / HZ); + lockup_detected = 0xffffffff; + } + set_lockup_detected_for_all_cpus(h, lockup_detected); + spin_unlock_irqrestore(&h->lock, flags); + dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", + lockup_detected, h->heartbeat_sample_interval / HZ); + if (lockup_detected == 0xffff0000) { + dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n"); + writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL); + } + pci_disable_device(h->pdev); + fail_all_outstanding_cmds(h); +} + +static int detect_controller_lockup(struct ctlr_info *h) +{ + u64 now; + u32 heartbeat; + unsigned long flags; + + now = get_jiffies_64(); + /* If we've received an interrupt recently, we're ok. */ + if (time_after64(h->last_intr_timestamp + + (h->heartbeat_sample_interval), now)) + return false; + + /* + * If we've already checked the heartbeat recently, we're ok. + * This could happen if someone sends us a signal. We + * otherwise don't care about signals in this thread. + */ + if (time_after64(h->last_heartbeat_timestamp + + (h->heartbeat_sample_interval), now)) + return false; + + /* If heartbeat has not changed since we last looked, we're not ok. */ + spin_lock_irqsave(&h->lock, flags); + heartbeat = readl(&h->cfgtable->HeartBeat); + spin_unlock_irqrestore(&h->lock, flags); + if (h->last_heartbeat == heartbeat) { + controller_lockup_detected(h); + return true; + } + + /* We're ok. */ + h->last_heartbeat = heartbeat; + h->last_heartbeat_timestamp = now; + return false; +} + +/* + * Set ioaccel status for all ioaccel volumes. + * + * Called from monitor controller worker (hpsa_event_monitor_worker) + * + * A Volume (or Volumes that comprise an Array set) may be undergoing a + * transformation, so we will be turning off ioaccel for all volumes that + * make up the Array. + */ +static void hpsa_set_ioaccel_status(struct ctlr_info *h) +{ + int rc; + int i; + u8 ioaccel_status; + unsigned char *buf; + struct hpsa_scsi_dev_t *device; + + if (!h) + return; + + buf = kmalloc(64, GFP_KERNEL); + if (!buf) + return; + + /* + * Run through current device list used during I/O requests. + */ + for (i = 0; i < h->ndevices; i++) { + int offload_to_be_enabled = 0; + int offload_config = 0; + + device = h->dev[i]; + + if (!device) + continue; + if (!hpsa_vpd_page_supported(h, device->scsi3addr, + HPSA_VPD_LV_IOACCEL_STATUS)) + continue; + + memset(buf, 0, 64); + + rc = hpsa_scsi_do_inquiry(h, device->scsi3addr, + VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, + buf, 64); + if (rc != 0) + continue; + + ioaccel_status = buf[IOACCEL_STATUS_BYTE]; + + /* + * Check if offload is still configured on + */ + offload_config = + !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); + /* + * If offload is configured on, check to see if ioaccel + * needs to be enabled. + */ + if (offload_config) + offload_to_be_enabled = + !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + + /* + * If ioaccel is to be re-enabled, re-enable later during the + * scan operation so the driver can get a fresh raidmap + * before turning ioaccel back on. + */ + if (offload_to_be_enabled) + continue; + + /* + * Immediately turn off ioaccel for any volume the + * controller tells us to. Some of the reasons could be: + * transformation - change to the LVs of an Array. + * degraded volume - component failure + */ + hpsa_turn_off_ioaccel_for_device(device); + } + + kfree(buf); +} + +static void hpsa_ack_ctlr_events(struct ctlr_info *h) +{ + char *event_type; + + if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) + return; + + /* Ask the controller to clear the events we're handling. */ + if ((h->transMethod & (CFGTBL_Trans_io_accel1 + | CFGTBL_Trans_io_accel2)) && + (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || + h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { + + if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) + event_type = "state change"; + if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) + event_type = "configuration change"; + /* Stop sending new RAID offload reqs via the IO accelerator */ + scsi_block_requests(h->scsi_host); + hpsa_set_ioaccel_status(h); + hpsa_drain_accel_commands(h); + /* Set 'accelerator path config change' bit */ + dev_warn(&h->pdev->dev, + "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", + h->events, event_type); + writel(h->events, &(h->cfgtable->clear_event_notify)); + /* Set the "clear event notify field update" bit 6 */ + writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); + /* Wait until ctlr clears 'clear event notify field', bit 6 */ + hpsa_wait_for_clear_event_notify_ack(h); + scsi_unblock_requests(h->scsi_host); + } else { + /* Acknowledge controller notification events. */ + writel(h->events, &(h->cfgtable->clear_event_notify)); + writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); + hpsa_wait_for_clear_event_notify_ack(h); + } + return; +} + +/* Check a register on the controller to see if there are configuration + * changes (added/changed/removed logical drives, etc.) which mean that + * we should rescan the controller for devices. + * Also check flag for driver-initiated rescan. + */ +static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) +{ + if (h->drv_req_rescan) { + h->drv_req_rescan = 0; + return 1; + } + + if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) + return 0; + + h->events = readl(&(h->cfgtable->event_notify)); + return h->events & RESCAN_REQUIRED_EVENT_BITS; +} + +/* + * Check if any of the offline devices have become ready + */ +static int hpsa_offline_devices_ready(struct ctlr_info *h) +{ + unsigned long flags; + struct offline_device_entry *d; + struct list_head *this, *tmp; + + spin_lock_irqsave(&h->offline_device_lock, flags); + list_for_each_safe(this, tmp, &h->offline_device_list) { + d = list_entry(this, struct offline_device_entry, + offline_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); + if (!hpsa_volume_offline(h, d->scsi3addr)) { + spin_lock_irqsave(&h->offline_device_lock, flags); + list_del(&d->offline_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return 1; + } + spin_lock_irqsave(&h->offline_device_lock, flags); + } + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return 0; +} + +static int hpsa_luns_changed(struct ctlr_info *h) +{ + int rc = 1; /* assume there are changes */ + struct ReportLUNdata *logdev = NULL; + + /* if we can't find out if lun data has changed, + * assume that it has. + */ + + if (!h->lastlogicals) + return rc; + + logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); + if (!logdev) + return rc; + + if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { + dev_warn(&h->pdev->dev, + "report luns failed, can't track lun changes.\n"); + goto out; + } + if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { + dev_info(&h->pdev->dev, + "Lun changes detected.\n"); + memcpy(h->lastlogicals, logdev, sizeof(*logdev)); + goto out; + } else + rc = 0; /* no changes detected. */ +out: + kfree(logdev); + return rc; +} + +static void hpsa_perform_rescan(struct ctlr_info *h) +{ + struct Scsi_Host *sh = NULL; + unsigned long flags; + + /* + * Do the scan after the reset + */ + spin_lock_irqsave(&h->reset_lock, flags); + if (h->reset_in_progress) { + h->drv_req_rescan = 1; + spin_unlock_irqrestore(&h->reset_lock, flags); + return; + } + spin_unlock_irqrestore(&h->reset_lock, flags); + + sh = scsi_host_get(h->scsi_host); + if (sh != NULL) { + hpsa_scan_start(sh); + scsi_host_put(sh); + h->drv_req_rescan = 0; + } +} + +/* + * watch for controller events + */ +static void hpsa_event_monitor_worker(struct work_struct *work) +{ + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, event_monitor_work); + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); + if (h->remove_in_progress) { + spin_unlock_irqrestore(&h->lock, flags); + return; + } + spin_unlock_irqrestore(&h->lock, flags); + + if (hpsa_ctlr_needs_rescan(h)) { + hpsa_ack_ctlr_events(h); + hpsa_perform_rescan(h); + } + + spin_lock_irqsave(&h->lock, flags); + if (!h->remove_in_progress) + queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work, + HPSA_EVENT_MONITOR_INTERVAL); + spin_unlock_irqrestore(&h->lock, flags); +} + +static void hpsa_rescan_ctlr_worker(struct work_struct *work) +{ + unsigned long flags; + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, rescan_ctlr_work); + + spin_lock_irqsave(&h->lock, flags); + if (h->remove_in_progress) { + spin_unlock_irqrestore(&h->lock, flags); + return; + } + spin_unlock_irqrestore(&h->lock, flags); + + if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { + hpsa_perform_rescan(h); + } else if (h->discovery_polling) { + if (hpsa_luns_changed(h)) { + dev_info(&h->pdev->dev, + "driver discovery polling rescan.\n"); + hpsa_perform_rescan(h); + } + } + spin_lock_irqsave(&h->lock, flags); + if (!h->remove_in_progress) + queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, + h->heartbeat_sample_interval); + spin_unlock_irqrestore(&h->lock, flags); +} + +static void hpsa_monitor_ctlr_worker(struct work_struct *work) +{ + unsigned long flags; + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, monitor_ctlr_work); + + detect_controller_lockup(h); + if (lockup_detected(h)) + return; + + spin_lock_irqsave(&h->lock, flags); + if (!h->remove_in_progress) + queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work, + h->heartbeat_sample_interval); + spin_unlock_irqrestore(&h->lock, flags); +} + +static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, + char *name) +{ + struct workqueue_struct *wq = NULL; + + wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); + if (!wq) + dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); + + return wq; +} + +static void hpda_free_ctlr_info(struct ctlr_info *h) +{ + kfree(h->reply_map); + kfree(h); +} + +static struct ctlr_info *hpda_alloc_ctlr_info(void) +{ + struct ctlr_info *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return NULL; + + h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL); + if (!h->reply_map) { + kfree(h); + return NULL; + } + return h; +} + +static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int rc; + struct ctlr_info *h; + int try_soft_reset = 0; + unsigned long flags; + u32 board_id; + + if (number_of_controllers == 0) + printk(KERN_INFO DRIVER_NAME "\n"); + + rc = hpsa_lookup_board_id(pdev, &board_id, NULL); + if (rc < 0) { + dev_warn(&pdev->dev, "Board ID not found\n"); + return rc; + } + + rc = hpsa_init_reset_devices(pdev, board_id); + if (rc) { + if (rc != -ENOTSUPP) + return rc; + /* If the reset fails in a particular way (it has no way to do + * a proper hard reset, so returns -ENOTSUPP) we can try to do + * a soft reset once we get the controller configured up to the + * point that it can accept a command. + */ + try_soft_reset = 1; + rc = 0; + } + +reinit_after_soft_reset: + + /* Command structures must be aligned on a 32-byte boundary because + * the 5 lower bits of the address are used by the hardware. and by + * the driver. See comments in hpsa.h for more info. + */ + BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); + h = hpda_alloc_ctlr_info(); + if (!h) { + dev_err(&pdev->dev, "Failed to allocate controller head\n"); + return -ENOMEM; + } + + h->pdev = pdev; + + h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; + INIT_LIST_HEAD(&h->offline_device_list); + spin_lock_init(&h->lock); + spin_lock_init(&h->offline_device_lock); + spin_lock_init(&h->scan_lock); + spin_lock_init(&h->reset_lock); + atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); + + /* Allocate and clear per-cpu variable lockup_detected */ + h->lockup_detected = alloc_percpu(u32); + if (!h->lockup_detected) { + dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); + rc = -ENOMEM; + goto clean1; /* aer/h */ + } + set_lockup_detected_for_all_cpus(h, 0); + + rc = hpsa_pci_init(h); + if (rc) + goto clean2; /* lu, aer/h */ + + /* relies on h-> settings made by hpsa_pci_init, including + * interrupt_mode h->intr */ + rc = hpsa_scsi_host_alloc(h); + if (rc) + goto clean2_5; /* pci, lu, aer/h */ + + sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); + h->ctlr = number_of_controllers; + number_of_controllers++; + + /* configure PCI DMA stuff */ + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (rc != 0) { + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (rc != 0) { + dev_err(&pdev->dev, "no suitable DMA available\n"); + goto clean3; /* shost, pci, lu, aer/h */ + } + } + + /* make sure the board interrupts are off */ + h->access.set_intr_mask(h, HPSA_INTR_OFF); + + rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); + if (rc) + goto clean3; /* shost, pci, lu, aer/h */ + rc = hpsa_alloc_cmd_pool(h); + if (rc) + goto clean4; /* irq, shost, pci, lu, aer/h */ + rc = hpsa_alloc_sg_chain_blocks(h); + if (rc) + goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ + init_waitqueue_head(&h->scan_wait_queue); + init_waitqueue_head(&h->event_sync_wait_queue); + mutex_init(&h->reset_mutex); + h->scan_finished = 1; /* no scan currently in progress */ + h->scan_waiting = 0; + + pci_set_drvdata(pdev, h); + h->ndevices = 0; + + spin_lock_init(&h->devlock); + rc = hpsa_put_ctlr_into_performant_mode(h); + if (rc) + goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ + + /* create the resubmit workqueue */ + h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); + if (!h->rescan_ctlr_wq) { + rc = -ENOMEM; + goto clean7; + } + + h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); + if (!h->resubmit_wq) { + rc = -ENOMEM; + goto clean7; /* aer/h */ + } + + h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor"); + if (!h->monitor_ctlr_wq) { + rc = -ENOMEM; + goto clean7; + } + + /* + * At this point, the controller is ready to take commands. + * Now, if reset_devices and the hard reset didn't work, try + * the soft reset and see if that works. + */ + if (try_soft_reset) { + + /* This is kind of gross. We may or may not get a completion + * from the soft reset command, and if we do, then the value + * from the fifo may or may not be valid. So, we wait 10 secs + * after the reset throwing away any completions we get during + * that time. Unregister the interrupt handler and register + * fake ones to scoop up any residual completions. + */ + spin_lock_irqsave(&h->lock, flags); + h->access.set_intr_mask(h, HPSA_INTR_OFF); + spin_unlock_irqrestore(&h->lock, flags); + hpsa_free_irqs(h); + rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, + hpsa_intx_discard_completions); + if (rc) { + dev_warn(&h->pdev->dev, + "Failed to request_irq after soft reset.\n"); + /* + * cannot goto clean7 or free_irqs will be called + * again. Instead, do its work + */ + hpsa_free_performant_mode(h); /* clean7 */ + hpsa_free_sg_chain_blocks(h); /* clean6 */ + hpsa_free_cmd_pool(h); /* clean5 */ + /* + * skip hpsa_free_irqs(h) clean4 since that + * was just called before request_irqs failed + */ + goto clean3; + } + + rc = hpsa_kdump_soft_reset(h); + if (rc) + /* Neither hard nor soft reset worked, we're hosed. */ + goto clean7; + + dev_info(&h->pdev->dev, "Board READY.\n"); + dev_info(&h->pdev->dev, + "Waiting for stale completions to drain.\n"); + h->access.set_intr_mask(h, HPSA_INTR_ON); + msleep(10000); + h->access.set_intr_mask(h, HPSA_INTR_OFF); + + rc = controller_reset_failed(h->cfgtable); + if (rc) + dev_info(&h->pdev->dev, + "Soft reset appears to have failed.\n"); + + /* since the controller's reset, we have to go back and re-init + * everything. Easiest to just forget what we've done and do it + * all over again. + */ + hpsa_undo_allocations_after_kdump_soft_reset(h); + try_soft_reset = 0; + if (rc) + /* don't goto clean, we already unallocated */ + return -ENODEV; + + goto reinit_after_soft_reset; + } + + /* Enable Accelerated IO path at driver layer */ + h->acciopath_status = 1; + /* Disable discovery polling.*/ + h->discovery_polling = 0; + + + /* Turn the interrupts on so we can service requests */ + h->access.set_intr_mask(h, HPSA_INTR_ON); + + hpsa_hba_inquiry(h); + + h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); + if (!h->lastlogicals) + dev_info(&h->pdev->dev, + "Can't track change to report lun data\n"); + + /* hook into SCSI subsystem */ + rc = hpsa_scsi_add_host(h); + if (rc) + goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + + /* Monitor the controller for firmware lockups */ + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; + INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); + schedule_delayed_work(&h->monitor_ctlr_work, + h->heartbeat_sample_interval); + INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); + queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, + h->heartbeat_sample_interval); + INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker); + schedule_delayed_work(&h->event_monitor_work, + HPSA_EVENT_MONITOR_INTERVAL); + return 0; + +clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + kfree(h->lastlogicals); +clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + hpsa_free_performant_mode(h); + h->access.set_intr_mask(h, HPSA_INTR_OFF); +clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ + hpsa_free_sg_chain_blocks(h); +clean5: /* cmd, irq, shost, pci, lu, aer/h */ + hpsa_free_cmd_pool(h); +clean4: /* irq, shost, pci, lu, aer/h */ + hpsa_free_irqs(h); +clean3: /* shost, pci, lu, aer/h */ + scsi_host_put(h->scsi_host); + h->scsi_host = NULL; +clean2_5: /* pci, lu, aer/h */ + hpsa_free_pci_init(h); +clean2: /* lu, aer/h */ + if (h->lockup_detected) { + free_percpu(h->lockup_detected); + h->lockup_detected = NULL; + } +clean1: /* wq/aer/h */ + if (h->resubmit_wq) { + destroy_workqueue(h->resubmit_wq); + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { + destroy_workqueue(h->rescan_ctlr_wq); + h->rescan_ctlr_wq = NULL; + } + if (h->monitor_ctlr_wq) { + destroy_workqueue(h->monitor_ctlr_wq); + h->monitor_ctlr_wq = NULL; + } + hpda_free_ctlr_info(h); + return rc; +} + +static void hpsa_flush_cache(struct ctlr_info *h) +{ + char *flush_buf; + struct CommandList *c; + int rc; + + if (unlikely(lockup_detected(h))) + return; + flush_buf = kzalloc(4, GFP_KERNEL); + if (!flush_buf) + return; + + c = cmd_alloc(h); + + if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, + RAID_CTLR_LUNID, TYPE_CMD)) { + goto out; + } + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, + DEFAULT_TIMEOUT); + if (rc) + goto out; + if (c->err_info->CommandStatus != 0) +out: + dev_warn(&h->pdev->dev, + "error flushing cache on controller\n"); + cmd_free(h, c); + kfree(flush_buf); +} + +/* Make controller gather fresh report lun data each time we + * send down a report luns request + */ +static void hpsa_disable_rld_caching(struct ctlr_info *h) +{ + u32 *options; + struct CommandList *c; + int rc; + + /* Don't bother trying to set diag options if locked up */ + if (unlikely(h->lockup_detected)) + return; + + options = kzalloc(sizeof(*options), GFP_KERNEL); + if (!options) + return; + + c = cmd_alloc(h); + + /* first, get the current diag options settings */ + if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, + RAID_CTLR_LUNID, TYPE_CMD)) + goto errout; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if ((rc != 0) || (c->err_info->CommandStatus != 0)) + goto errout; + + /* Now, set the bit for disabling the RLD caching */ + *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING; + + if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, + RAID_CTLR_LUNID, TYPE_CMD)) + goto errout; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, + NO_TIMEOUT); + if ((rc != 0) || (c->err_info->CommandStatus != 0)) + goto errout; + + /* Now verify that it got set: */ + if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, + RAID_CTLR_LUNID, TYPE_CMD)) + goto errout; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if ((rc != 0) || (c->err_info->CommandStatus != 0)) + goto errout; + + if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) + goto out; + +errout: + dev_err(&h->pdev->dev, + "Error: failed to disable report lun data caching.\n"); +out: + cmd_free(h, c); + kfree(options); +} + +static void __hpsa_shutdown(struct pci_dev *pdev) +{ + struct ctlr_info *h; + + h = pci_get_drvdata(pdev); + /* Turn board interrupts off and send the flush cache command + * sendcmd will turn off interrupt, and send the flush... + * To write all data in the battery backed cache to disks + */ + hpsa_flush_cache(h); + h->access.set_intr_mask(h, HPSA_INTR_OFF); + hpsa_free_irqs(h); /* init_one 4 */ + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ +} + +static void hpsa_shutdown(struct pci_dev *pdev) +{ + __hpsa_shutdown(pdev); + pci_disable_device(pdev); +} + +static void hpsa_free_device_info(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->ndevices; i++) { + kfree(h->dev[i]); + h->dev[i] = NULL; + } +} + +static void hpsa_remove_one(struct pci_dev *pdev) +{ + struct ctlr_info *h; + unsigned long flags; + + if (pci_get_drvdata(pdev) == NULL) { + dev_err(&pdev->dev, "unable to remove device\n"); + return; + } + h = pci_get_drvdata(pdev); + + /* Get rid of any controller monitoring work items */ + spin_lock_irqsave(&h->lock, flags); + h->remove_in_progress = 1; + spin_unlock_irqrestore(&h->lock, flags); + cancel_delayed_work_sync(&h->monitor_ctlr_work); + cancel_delayed_work_sync(&h->rescan_ctlr_work); + cancel_delayed_work_sync(&h->event_monitor_work); + destroy_workqueue(h->rescan_ctlr_wq); + destroy_workqueue(h->resubmit_wq); + destroy_workqueue(h->monitor_ctlr_wq); + + hpsa_delete_sas_host(h); + + /* + * Call before disabling interrupts. + * scsi_remove_host can trigger I/O operations especially + * when multipath is enabled. There can be SYNCHRONIZE CACHE + * operations which cannot complete and will hang the system. + */ + if (h->scsi_host) + scsi_remove_host(h->scsi_host); /* init_one 8 */ + /* includes hpsa_free_irqs - init_one 4 */ + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ + __hpsa_shutdown(pdev); + + hpsa_free_device_info(h); /* scan */ + + kfree(h->hba_inquiry_data); /* init_one 10 */ + h->hba_inquiry_data = NULL; /* init_one 10 */ + hpsa_free_ioaccel2_sg_chain_blocks(h); + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + kfree(h->lastlogicals); + + /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ + + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ + hpsa_free_pci_init(h); /* init_one 2.5 */ + + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + + hpda_free_ctlr_info(h); /* init_one 1 */ +} + +static int __maybe_unused hpsa_suspend( + __attribute__((unused)) struct device *dev) +{ + return -ENOSYS; +} + +static int __maybe_unused hpsa_resume + (__attribute__((unused)) struct device *dev) +{ + return -ENOSYS; +} + +static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume); + +static struct pci_driver hpsa_pci_driver = { + .name = HPSA, + .probe = hpsa_init_one, + .remove = hpsa_remove_one, + .id_table = hpsa_pci_device_id, /* id_table */ + .shutdown = hpsa_shutdown, + .driver.pm = &hpsa_pm_ops, +}; + +/* Fill in bucket_map[], given nsgs (the max number of + * scatter gather elements supported) and bucket[], + * which is an array of 8 integers. The bucket[] array + * contains 8 different DMA transfer sizes (in 16 + * byte increments) which the controller uses to fetch + * commands. This function fills in bucket_map[], which + * maps a given number of scatter gather elements to one of + * the 8 DMA transfer sizes. The point of it is to allow the + * controller to only do as much DMA as needed to fetch the + * command, with the DMA transfer size encoded in the lower + * bits of the command address. + */ +static void calc_bucket_map(int bucket[], int num_buckets, + int nsgs, int min_blocks, u32 *bucket_map) +{ + int i, j, b, size; + + /* Note, bucket_map must have nsgs+1 entries. */ + for (i = 0; i <= nsgs; i++) { + /* Compute size of a command with i SG entries */ + size = i + min_blocks; + b = num_buckets; /* Assume the biggest bucket */ + /* Find the bucket that is just big enough */ + for (j = 0; j < num_buckets; j++) { + if (bucket[j] >= size) { + b = j; + break; + } + } + /* for a command with i SG entries, use bucket b. */ + bucket_map[i] = b; + } +} + +/* + * return -ENODEV on err, 0 on success (or no action) + * allocates numerous items that must be freed later + */ +static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) +{ + int i; + unsigned long register_value; + unsigned long transMethod = CFGTBL_Trans_Performant | + (trans_support & CFGTBL_Trans_use_short_tags) | + CFGTBL_Trans_enable_directed_msix | + (trans_support & (CFGTBL_Trans_io_accel1 | + CFGTBL_Trans_io_accel2)); + struct access_method access = SA5_performant_access; + + /* This is a bit complicated. There are 8 registers on + * the controller which we write to to tell it 8 different + * sizes of commands which there may be. It's a way of + * reducing the DMA done to fetch each command. Encoded into + * each command's tag are 3 bits which communicate to the controller + * which of the eight sizes that command fits within. The size of + * each command depends on how many scatter gather entries there are. + * Each SG entry requires 16 bytes. The eight registers are programmed + * with the number of 16-byte blocks a command of that size requires. + * The smallest command possible requires 5 such 16 byte blocks. + * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte + * blocks. Note, this only extends to the SG entries contained + * within the command block, and does not extend to chained blocks + * of SG elements. bft[] contains the eight values we write to + * the registers. They are not evenly distributed, but have more + * sizes for small commands, and fewer sizes for larger commands. + */ + int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; +#define MIN_IOACCEL2_BFT_ENTRY 5 +#define HPSA_IOACCEL2_HEADER_SZ 4 + int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, + HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; + BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); + BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > + 16 * MIN_IOACCEL2_BFT_ENTRY); + BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); + BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); + /* 5 = 1 s/g entry or 4k + * 6 = 2 s/g entry or 8k + * 8 = 4 s/g entry or 16k + * 10 = 6 s/g entry or 24k + */ + + /* If the controller supports either ioaccel method then + * we can also use the RAID stack submit path that does not + * perform the superfluous readl() after each command submission. + */ + if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) + access = SA5_performant_access_no_read; + + /* Controller spec: zero out this buffer. */ + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, 0, h->reply_queue_size); + + bft[7] = SG_ENTRIES_IN_CMD + 4; + calc_bucket_map(bft, ARRAY_SIZE(bft), + SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); + for (i = 0; i < 8; i++) + writel(bft[i], &h->transtable->BlockFetch[i]); + + /* size of controller ring buffer */ + writel(h->max_commands, &h->transtable->RepQSize); + writel(h->nreply_queues, &h->transtable->RepQCount); + writel(0, &h->transtable->RepQCtrAddrLow32); + writel(0, &h->transtable->RepQCtrAddrHigh32); + + for (i = 0; i < h->nreply_queues; i++) { + writel(0, &h->transtable->RepQAddr[i].upper); + writel(h->reply_queue[i].busaddr, + &h->transtable->RepQAddr[i].lower); + } + + writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); + writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); + /* + * enable outbound interrupt coalescing in accelerator mode; + */ + if (trans_support & CFGTBL_Trans_io_accel1) { + access = SA5_ioaccel_mode1_access; + writel(10, &h->cfgtable->HostWrite.CoalIntDelay); + writel(4, &h->cfgtable->HostWrite.CoalIntCount); + } else + if (trans_support & CFGTBL_Trans_io_accel2) + access = SA5_ioaccel_mode2_access; + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + if (hpsa_wait_for_mode_change_ack(h)) { + dev_err(&h->pdev->dev, + "performant mode problem - doorbell timeout\n"); + return -ENODEV; + } + register_value = readl(&(h->cfgtable->TransportActive)); + if (!(register_value & CFGTBL_Trans_Performant)) { + dev_err(&h->pdev->dev, + "performant mode problem - transport not active\n"); + return -ENODEV; + } + /* Change the access methods to the performant access methods */ + h->access = access; + h->transMethod = transMethod; + + if (!((trans_support & CFGTBL_Trans_io_accel1) || + (trans_support & CFGTBL_Trans_io_accel2))) + return 0; + + if (trans_support & CFGTBL_Trans_io_accel1) { + /* Set up I/O accelerator mode */ + for (i = 0; i < h->nreply_queues; i++) { + writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); + h->reply_queue[i].current_entry = + readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); + } + bft[7] = h->ioaccel_maxsg + 8; + calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, + h->ioaccel1_blockFetchTable); + + /* initialize all reply queue entries to unused */ + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, + (u8) IOACCEL_MODE1_REPLY_UNUSED, + h->reply_queue_size); + + /* set all the constant fields in the accelerator command + * frames once at init time to save CPU cycles later. + */ + for (i = 0; i < h->nr_cmds; i++) { + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; + + cp->function = IOACCEL1_FUNCTION_SCSIIO; + cp->err_info = (u32) (h->errinfo_pool_dhandle + + (i * sizeof(struct ErrorInfo))); + cp->err_info_len = sizeof(struct ErrorInfo); + cp->sgl_offset = IOACCEL1_SGLOFFSET; + cp->host_context_flags = + cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); + cp->timeout_sec = 0; + cp->ReplyQueue = 0; + cp->tag = + cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); + cp->host_addr = + cpu_to_le64(h->ioaccel_cmd_pool_dhandle + + (i * sizeof(struct io_accel1_cmd))); + } + } else if (trans_support & CFGTBL_Trans_io_accel2) { + u64 cfg_offset, cfg_base_addr_index; + u32 bft2_offset, cfg_base_addr; + + hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, + &cfg_base_addr_index, &cfg_offset); + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); + bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; + calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, + 4, h->ioaccel2_blockFetchTable); + bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); + BUILD_BUG_ON(offsetof(struct CfgTable, + io_accel_request_size_offset) != 0xb8); + h->ioaccel2_bft2_regs = + remap_pci_mem(pci_resource_start(h->pdev, + cfg_base_addr_index) + + cfg_offset + bft2_offset, + ARRAY_SIZE(bft2) * + sizeof(*h->ioaccel2_bft2_regs)); + for (i = 0; i < ARRAY_SIZE(bft2); i++) + writel(bft2[i], &h->ioaccel2_bft2_regs[i]); + } + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + if (hpsa_wait_for_mode_change_ack(h)) { + dev_err(&h->pdev->dev, + "performant mode problem - enabling ioaccel mode\n"); + return -ENODEV; + } + return 0; +} + +/* Free ioaccel1 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) +{ + if (h->ioaccel_cmd_pool) { + dma_free_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + h->ioaccel_cmd_pool, + h->ioaccel_cmd_pool_dhandle); + h->ioaccel_cmd_pool = NULL; + h->ioaccel_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel1_blockFetchTable); + h->ioaccel1_blockFetchTable = NULL; +} + +/* Allocate ioaccel1 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) +{ + h->ioaccel_maxsg = + readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); + if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) + h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; + + /* Command structures must be aligned on a 128-byte boundary + * because the 7 lower bits of the address are used by the + * hardware. + */ + BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % + IOACCEL1_COMMANDLIST_ALIGNMENT); + h->ioaccel_cmd_pool = + dma_alloc_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL); + + h->ioaccel1_blockFetchTable = + kmalloc(((h->ioaccel_maxsg + 1) * + sizeof(u32)), GFP_KERNEL); + + if ((h->ioaccel_cmd_pool == NULL) || + (h->ioaccel1_blockFetchTable == NULL)) + goto clean_up; + + memset(h->ioaccel_cmd_pool, 0, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); + return 0; + +clean_up: + hpsa_free_ioaccel1_cmd_and_bft(h); + return -ENOMEM; +} + +/* Free ioaccel2 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) +{ + hpsa_free_ioaccel2_sg_chain_blocks(h); + + if (h->ioaccel2_cmd_pool) { + dma_free_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + h->ioaccel2_cmd_pool, + h->ioaccel2_cmd_pool_dhandle); + h->ioaccel2_cmd_pool = NULL; + h->ioaccel2_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel2_blockFetchTable); + h->ioaccel2_blockFetchTable = NULL; +} + +/* Allocate ioaccel2 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) +{ + int rc; + + /* Allocate ioaccel2 mode command blocks and block fetch table */ + + h->ioaccel_maxsg = + readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); + if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) + h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; + + BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % + IOACCEL2_COMMANDLIST_ALIGNMENT); + h->ioaccel2_cmd_pool = + dma_alloc_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL); + + h->ioaccel2_blockFetchTable = + kmalloc(((h->ioaccel_maxsg + 1) * + sizeof(u32)), GFP_KERNEL); + + if ((h->ioaccel2_cmd_pool == NULL) || + (h->ioaccel2_blockFetchTable == NULL)) { + rc = -ENOMEM; + goto clean_up; + } + + rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); + if (rc) + goto clean_up; + + memset(h->ioaccel2_cmd_pool, 0, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); + return 0; + +clean_up: + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; +} + +/* Free items allocated by hpsa_put_ctlr_into_performant_mode */ +static void hpsa_free_performant_mode(struct ctlr_info *h) +{ + kfree(h->blockFetchTable); + h->blockFetchTable = NULL; + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); +} + +/* return -ENODEV on error, 0 on success (or no action) + * allocates numerous items that must be freed later + */ +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) +{ + u32 trans_support; + int i, rc; + + if (hpsa_simple_mode) + return 0; + + trans_support = readl(&(h->cfgtable->TransportSupport)); + if (!(trans_support & PERFORMANT_MODE)) + return 0; + + /* Check for I/O accelerator mode support */ + if (trans_support & CFGTBL_Trans_io_accel1) { + rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); + if (rc) + return rc; + } else if (trans_support & CFGTBL_Trans_io_accel2) { + rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); + if (rc) + return rc; + } + + h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1; + hpsa_get_max_perf_mode_cmds(h); + /* Performant mode ring buffer and supporting data structures */ + h->reply_queue_size = h->max_commands * sizeof(u64); + + for (i = 0; i < h->nreply_queues; i++) { + h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev, + h->reply_queue_size, + &h->reply_queue[i].busaddr, + GFP_KERNEL); + if (!h->reply_queue[i].head) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } + h->reply_queue[i].size = h->max_commands; + h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ + h->reply_queue[i].current_entry = 0; + } + + /* Need a block fetch table for performant mode */ + h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * + sizeof(u32)), GFP_KERNEL); + if (!h->blockFetchTable) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } + + rc = hpsa_enter_performant_mode(h, trans_support); + if (rc) + goto clean2; /* bft, rq, ioaccel */ + return 0; + +clean2: /* bft, rq, ioaccel */ + kfree(h->blockFetchTable); + h->blockFetchTable = NULL; +clean1: /* rq, ioaccel */ + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; +} + +static int is_accelerated_cmd(struct CommandList *c) +{ + return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; +} + +static void hpsa_drain_accel_commands(struct ctlr_info *h) +{ + struct CommandList *c = NULL; + int i, accel_cmds_out; + int refcount; + + do { /* wait for all outstanding ioaccel commands to drain out */ + accel_cmds_out = 0; + for (i = 0; i < h->nr_cmds; i++) { + c = h->cmd_pool + i; + refcount = atomic_inc_return(&c->refcount); + if (refcount > 1) /* Command is allocated */ + accel_cmds_out += is_accelerated_cmd(c); + cmd_free(h, c); + } + if (accel_cmds_out <= 0) + break; + msleep(100); + } while (1); +} + +static struct hpsa_sas_phy *hpsa_alloc_sas_phy( + struct hpsa_sas_port *hpsa_sas_port) +{ + struct hpsa_sas_phy *hpsa_sas_phy; + struct sas_phy *phy; + + hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL); + if (!hpsa_sas_phy) + return NULL; + + phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev, + hpsa_sas_port->next_phy_index); + if (!phy) { + kfree(hpsa_sas_phy); + return NULL; + } + + hpsa_sas_port->next_phy_index++; + hpsa_sas_phy->phy = phy; + hpsa_sas_phy->parent_port = hpsa_sas_port; + + return hpsa_sas_phy; +} + +static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) +{ + struct sas_phy *phy = hpsa_sas_phy->phy; + + sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); + if (hpsa_sas_phy->added_to_port) + list_del(&hpsa_sas_phy->phy_list_entry); + sas_phy_delete(phy); + kfree(hpsa_sas_phy); +} + +static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy) +{ + int rc; + struct hpsa_sas_port *hpsa_sas_port; + struct sas_phy *phy; + struct sas_identify *identify; + + hpsa_sas_port = hpsa_sas_phy->parent_port; + phy = hpsa_sas_phy->phy; + + identify = &phy->identify; + memset(identify, 0, sizeof(*identify)); + identify->sas_address = hpsa_sas_port->sas_address; + identify->device_type = SAS_END_DEVICE; + identify->initiator_port_protocols = SAS_PROTOCOL_STP; + identify->target_port_protocols = SAS_PROTOCOL_STP; + phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + + rc = sas_phy_add(hpsa_sas_phy->phy); + if (rc) + return rc; + + sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy); + list_add_tail(&hpsa_sas_phy->phy_list_entry, + &hpsa_sas_port->phy_list_head); + hpsa_sas_phy->added_to_port = true; + + return 0; +} + +static int + hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port, + struct sas_rphy *rphy) +{ + struct sas_identify *identify; + + identify = &rphy->identify; + identify->sas_address = hpsa_sas_port->sas_address; + identify->initiator_port_protocols = SAS_PROTOCOL_STP; + identify->target_port_protocols = SAS_PROTOCOL_STP; + + return sas_rphy_add(rphy); +} + +static struct hpsa_sas_port + *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node, + u64 sas_address) +{ + int rc; + struct hpsa_sas_port *hpsa_sas_port; + struct sas_port *port; + + hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL); + if (!hpsa_sas_port) + return NULL; + + INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head); + hpsa_sas_port->parent_node = hpsa_sas_node; + + port = sas_port_alloc_num(hpsa_sas_node->parent_dev); + if (!port) + goto free_hpsa_port; + + rc = sas_port_add(port); + if (rc) + goto free_sas_port; + + hpsa_sas_port->port = port; + hpsa_sas_port->sas_address = sas_address; + list_add_tail(&hpsa_sas_port->port_list_entry, + &hpsa_sas_node->port_list_head); + + return hpsa_sas_port; + +free_sas_port: + sas_port_free(port); +free_hpsa_port: + kfree(hpsa_sas_port); + + return NULL; +} + +static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port) +{ + struct hpsa_sas_phy *hpsa_sas_phy; + struct hpsa_sas_phy *next; + + list_for_each_entry_safe(hpsa_sas_phy, next, + &hpsa_sas_port->phy_list_head, phy_list_entry) + hpsa_free_sas_phy(hpsa_sas_phy); + + sas_port_delete(hpsa_sas_port->port); + list_del(&hpsa_sas_port->port_list_entry); + kfree(hpsa_sas_port); +} + +static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev) +{ + struct hpsa_sas_node *hpsa_sas_node; + + hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL); + if (hpsa_sas_node) { + hpsa_sas_node->parent_dev = parent_dev; + INIT_LIST_HEAD(&hpsa_sas_node->port_list_head); + } + + return hpsa_sas_node; +} + +static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node) +{ + struct hpsa_sas_port *hpsa_sas_port; + struct hpsa_sas_port *next; + + if (!hpsa_sas_node) + return; + + list_for_each_entry_safe(hpsa_sas_port, next, + &hpsa_sas_node->port_list_head, port_list_entry) + hpsa_free_sas_port(hpsa_sas_port); + + kfree(hpsa_sas_node); +} + +static struct hpsa_scsi_dev_t + *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, + struct sas_rphy *rphy) +{ + int i; + struct hpsa_scsi_dev_t *device; + + for (i = 0; i < h->ndevices; i++) { + device = h->dev[i]; + if (!device->sas_port) + continue; + if (device->sas_port->rphy == rphy) + return device; + } + + return NULL; +} + +static int hpsa_add_sas_host(struct ctlr_info *h) +{ + int rc; + struct device *parent_dev; + struct hpsa_sas_node *hpsa_sas_node; + struct hpsa_sas_port *hpsa_sas_port; + struct hpsa_sas_phy *hpsa_sas_phy; + + parent_dev = &h->scsi_host->shost_dev; + + hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); + if (!hpsa_sas_node) + return -ENOMEM; + + hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); + if (!hpsa_sas_port) { + rc = -ENODEV; + goto free_sas_node; + } + + hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port); + if (!hpsa_sas_phy) { + rc = -ENODEV; + goto free_sas_port; + } + + rc = hpsa_sas_port_add_phy(hpsa_sas_phy); + if (rc) + goto free_sas_phy; + + h->sas_host = hpsa_sas_node; + + return 0; + +free_sas_phy: + sas_phy_free(hpsa_sas_phy->phy); + kfree(hpsa_sas_phy); +free_sas_port: + hpsa_free_sas_port(hpsa_sas_port); +free_sas_node: + hpsa_free_sas_node(hpsa_sas_node); + + return rc; +} + +static void hpsa_delete_sas_host(struct ctlr_info *h) +{ + hpsa_free_sas_node(h->sas_host); +} + +static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, + struct hpsa_scsi_dev_t *device) +{ + int rc; + struct hpsa_sas_port *hpsa_sas_port; + struct sas_rphy *rphy; + + hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address); + if (!hpsa_sas_port) + return -ENOMEM; + + rphy = sas_end_device_alloc(hpsa_sas_port->port); + if (!rphy) { + rc = -ENODEV; + goto free_sas_port; + } + + hpsa_sas_port->rphy = rphy; + device->sas_port = hpsa_sas_port; + + rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); + if (rc) + goto free_sas_rphy; + + return 0; + +free_sas_rphy: + sas_rphy_free(rphy); +free_sas_port: + hpsa_free_sas_port(hpsa_sas_port); + device->sas_port = NULL; + + return rc; +} + +static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device) +{ + if (device->sas_port) { + hpsa_free_sas_port(device->sas_port); + device->sas_port = NULL; + } +} + +static int +hpsa_sas_get_linkerrors(struct sas_phy *phy) +{ + return 0; +} + +static int +hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +{ + struct Scsi_Host *shost = phy_to_shost(rphy); + struct ctlr_info *h; + struct hpsa_scsi_dev_t *sd; + + if (!shost) + return -ENXIO; + + h = shost_to_hba(shost); + + if (!h) + return -ENXIO; + + sd = hpsa_find_device_by_sas_rphy(h, rphy); + if (!sd) + return -ENXIO; + + *identifier = sd->eli; + + return 0; +} + +static int +hpsa_sas_get_bay_identifier(struct sas_rphy *rphy) +{ + return -ENXIO; +} + +static int +hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset) +{ + return 0; +} + +static int +hpsa_sas_phy_enable(struct sas_phy *phy, int enable) +{ + return 0; +} + +static int +hpsa_sas_phy_setup(struct sas_phy *phy) +{ + return 0; +} + +static void +hpsa_sas_phy_release(struct sas_phy *phy) +{ +} + +static int +hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + return -EINVAL; +} + +static struct sas_function_template hpsa_sas_transport_functions = { + .get_linkerrors = hpsa_sas_get_linkerrors, + .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, + .get_bay_identifier = hpsa_sas_get_bay_identifier, + .phy_reset = hpsa_sas_phy_reset, + .phy_enable = hpsa_sas_phy_enable, + .phy_setup = hpsa_sas_phy_setup, + .phy_release = hpsa_sas_phy_release, + .set_phy_speed = hpsa_sas_phy_speed, +}; + +/* + * This is it. Register the PCI driver information for the cards we control + * the OS will call our registered routines when it finds one of our cards. + */ +static int __init hpsa_init(void) +{ + int rc; + + hpsa_sas_transport_template = + sas_attach_transport(&hpsa_sas_transport_functions); + if (!hpsa_sas_transport_template) + return -ENODEV; + + rc = pci_register_driver(&hpsa_pci_driver); + + if (rc) + sas_release_transport(hpsa_sas_transport_template); + + return rc; +} + +static void __exit hpsa_cleanup(void) +{ + pci_unregister_driver(&hpsa_pci_driver); + sas_release_transport(hpsa_sas_transport_template); +} + +static void __attribute__((unused)) verify_offsets(void) +{ +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) + + VERIFY_OFFSET(structure_size, 0); + VERIFY_OFFSET(volume_blk_size, 4); + VERIFY_OFFSET(volume_blk_cnt, 8); + VERIFY_OFFSET(phys_blk_shift, 16); + VERIFY_OFFSET(parity_rotation_shift, 17); + VERIFY_OFFSET(strip_size, 18); + VERIFY_OFFSET(disk_starting_blk, 20); + VERIFY_OFFSET(disk_blk_cnt, 28); + VERIFY_OFFSET(data_disks_per_row, 36); + VERIFY_OFFSET(metadata_disks_per_row, 38); + VERIFY_OFFSET(row_cnt, 40); + VERIFY_OFFSET(layout_map_count, 42); + VERIFY_OFFSET(flags, 44); + VERIFY_OFFSET(dekindex, 46); + /* VERIFY_OFFSET(reserved, 48 */ + VERIFY_OFFSET(data, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) + + VERIFY_OFFSET(IU_type, 0); + VERIFY_OFFSET(direction, 1); + VERIFY_OFFSET(reply_queue, 2); + /* VERIFY_OFFSET(reserved1, 3); */ + VERIFY_OFFSET(scsi_nexus, 4); + VERIFY_OFFSET(Tag, 8); + VERIFY_OFFSET(cdb, 16); + VERIFY_OFFSET(cciss_lun, 32); + VERIFY_OFFSET(data_len, 40); + VERIFY_OFFSET(cmd_priority_task_attr, 44); + VERIFY_OFFSET(sg_count, 45); + /* VERIFY_OFFSET(reserved3 */ + VERIFY_OFFSET(err_ptr, 48); + VERIFY_OFFSET(err_len, 56); + /* VERIFY_OFFSET(reserved4 */ + VERIFY_OFFSET(sg, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) + + VERIFY_OFFSET(dev_handle, 0x00); + VERIFY_OFFSET(reserved1, 0x02); + VERIFY_OFFSET(function, 0x03); + VERIFY_OFFSET(reserved2, 0x04); + VERIFY_OFFSET(err_info, 0x0C); + VERIFY_OFFSET(reserved3, 0x10); + VERIFY_OFFSET(err_info_len, 0x12); + VERIFY_OFFSET(reserved4, 0x13); + VERIFY_OFFSET(sgl_offset, 0x14); + VERIFY_OFFSET(reserved5, 0x15); + VERIFY_OFFSET(transfer_len, 0x1C); + VERIFY_OFFSET(reserved6, 0x20); + VERIFY_OFFSET(io_flags, 0x24); + VERIFY_OFFSET(reserved7, 0x26); + VERIFY_OFFSET(LUN, 0x34); + VERIFY_OFFSET(control, 0x3C); + VERIFY_OFFSET(CDB, 0x40); + VERIFY_OFFSET(reserved8, 0x50); + VERIFY_OFFSET(host_context_flags, 0x60); + VERIFY_OFFSET(timeout_sec, 0x62); + VERIFY_OFFSET(ReplyQueue, 0x64); + VERIFY_OFFSET(reserved9, 0x65); + VERIFY_OFFSET(tag, 0x68); + VERIFY_OFFSET(host_addr, 0x70); + VERIFY_OFFSET(CISS_LUN, 0x78); + VERIFY_OFFSET(SG, 0x78 + 8); +#undef VERIFY_OFFSET +} + +module_init(hpsa_init); +module_exit(hpsa_cleanup); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h new file mode 100644 index 000000000..99b075085 --- /dev/null +++ b/drivers/scsi/hpsa.h @@ -0,0 +1,673 @@ +/* + * Disk Array driver for HP Smart Array SAS controllers + * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * Copyright 2016 Microsemi Corporation + * Copyright 2014-2015 PMC-Sierra, Inc. + * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com + * + */ +#ifndef HPSA_H +#define HPSA_H + +#include + +#define IO_OK 0 +#define IO_ERROR 1 + +struct ctlr_info; + +struct access_method { + void (*submit_command)(struct ctlr_info *h, + struct CommandList *c); + void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); + bool (*intr_pending)(struct ctlr_info *h); + unsigned long (*command_completed)(struct ctlr_info *h, u8 q); +}; + +/* for SAS hosts and SAS expanders */ +struct hpsa_sas_node { + struct device *parent_dev; + struct list_head port_list_head; +}; + +struct hpsa_sas_port { + struct list_head port_list_entry; + u64 sas_address; + struct sas_port *port; + int next_phy_index; + struct list_head phy_list_head; + struct hpsa_sas_node *parent_node; + struct sas_rphy *rphy; +}; + +struct hpsa_sas_phy { + struct list_head phy_list_entry; + struct sas_phy *phy; + struct hpsa_sas_port *parent_port; + bool added_to_port; +}; + +#define EXTERNAL_QD 128 +struct hpsa_scsi_dev_t { + unsigned int devtype; + int bus, target, lun; /* as presented to the OS */ + unsigned char scsi3addr[8]; /* as presented to the HW */ + u8 physical_device : 1; + u8 expose_device; + u8 removed : 1; /* device is marked for death */ + u8 was_removed : 1; /* device actually removed */ +#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" + unsigned char device_id[16]; /* from inquiry pg. 0x83 */ + u64 sas_address; + u64 eli; /* from report diags. */ + unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ + unsigned char model[16]; /* bytes 16-31 of inquiry data */ + unsigned char rev; /* byte 2 of inquiry data */ + unsigned char raid_level; /* from inquiry page 0xC1 */ + unsigned char volume_offline; /* discovered via TUR or VPD */ + u16 queue_depth; /* max queue_depth for this device */ + atomic_t commands_outstanding; /* track commands sent to device */ + atomic_t ioaccel_cmds_out; /* Only used for physical devices + * counts commands sent to physical + * device via "ioaccel" path. + */ + bool in_reset; + u32 ioaccel_handle; + u8 active_path_index; + u8 path_map; + u8 bay; + u8 box[8]; + u16 phys_connector[8]; + int offload_config; /* I/O accel RAID offload configured */ + int offload_enabled; /* I/O accel RAID offload enabled */ + int offload_to_be_enabled; + int hba_ioaccel_enabled; + int offload_to_mirror; /* Send next I/O accelerator RAID + * offload request to mirror drive + */ + struct raid_map_data raid_map; /* I/O accelerator RAID map */ + + /* + * Pointers from logical drive map indices to the phys drives that + * make those logical drives. Note, multiple logical drives may + * share physical drives. You can have for instance 5 physical + * drives with 3 logical drives each using those same 5 physical + * disks. We need these pointers for counting i/o's out to physical + * devices in order to honor physical device queue depth limits. + */ + struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES]; + int nphysical_disks; + int supports_aborts; + struct hpsa_sas_port *sas_port; + int external; /* 1-from external array 0-not <0-unknown */ +}; + +struct reply_queue_buffer { + u64 *head; + size_t size; + u8 wraparound; + u32 current_entry; + dma_addr_t busaddr; +}; + +#pragma pack(1) +struct bmic_controller_parameters { + u8 led_flags; + u8 enable_command_list_verification; + u8 backed_out_write_drives; + u16 stripes_for_parity; + u8 parity_distribution_mode_flags; + u16 max_driver_requests; + u16 elevator_trend_count; + u8 disable_elevator; + u8 force_scan_complete; + u8 scsi_transfer_mode; + u8 force_narrow; + u8 rebuild_priority; + u8 expand_priority; + u8 host_sdb_asic_fix; + u8 pdpi_burst_from_host_disabled; + char software_name[64]; + char hardware_name[32]; + u8 bridge_revision; + u8 snapshot_priority; + u32 os_specific; + u8 post_prompt_timeout; + u8 automatic_drive_slamming; + u8 reserved1; + u8 nvram_flags; + u8 cache_nvram_flags; + u8 drive_config_flags; + u16 reserved2; + u8 temp_warning_level; + u8 temp_shutdown_level; + u8 temp_condition_reset; + u8 max_coalesce_commands; + u32 max_coalesce_delay; + u8 orca_password[4]; + u8 access_id[16]; + u8 reserved[356]; +}; +#pragma pack() + +struct ctlr_info { + unsigned int *reply_map; + int ctlr; + char devname[8]; + char *product_name; + struct pci_dev *pdev; + u32 board_id; + u64 sas_address; + void __iomem *vaddr; + unsigned long paddr; + int nr_cmds; /* Number of commands allowed on this controller */ +#define HPSA_CMDS_RESERVED_FOR_ABORTS 2 +#define HPSA_CMDS_RESERVED_FOR_DRIVER 1 + struct CfgTable __iomem *cfgtable; + int interrupts_enabled; + int max_commands; + int last_collision_tag; /* tags are global */ + atomic_t commands_outstanding; +# define PERF_MODE_INT 0 +# define DOORBELL_INT 1 +# define SIMPLE_MODE_INT 2 +# define MEMQ_MODE_INT 3 + unsigned int msix_vectors; + int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ + struct access_method access; + + /* queue and queue Info */ + unsigned int Qdepth; + unsigned int maxSG; + spinlock_t lock; + int maxsgentries; + u8 max_cmd_sg_entries; + int chainsize; + struct SGDescriptor **cmd_sg_list; + struct ioaccel2_sg_element **ioaccel2_cmd_sg_list; + + /* pointers to command and error info pool */ + struct CommandList *cmd_pool; + dma_addr_t cmd_pool_dhandle; + struct io_accel1_cmd *ioaccel_cmd_pool; + dma_addr_t ioaccel_cmd_pool_dhandle; + struct io_accel2_cmd *ioaccel2_cmd_pool; + dma_addr_t ioaccel2_cmd_pool_dhandle; + struct ErrorInfo *errinfo_pool; + dma_addr_t errinfo_pool_dhandle; + unsigned long *cmd_pool_bits; + int scan_finished; + u8 scan_waiting : 1; + spinlock_t scan_lock; + wait_queue_head_t scan_wait_queue; + + struct Scsi_Host *scsi_host; + spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ + int ndevices; /* number of used elements in .dev[] array. */ + struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; + /* + * Performant mode tables. + */ + u32 trans_support; + u32 trans_offset; + struct TransTable_struct __iomem *transtable; + unsigned long transMethod; + + /* cap concurrent passthrus at some reasonable maximum */ +#define HPSA_MAX_CONCURRENT_PASSTHRUS (10) + atomic_t passthru_cmds_avail; + + /* + * Performant mode completion buffers + */ + size_t reply_queue_size; + struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES]; + u8 nreply_queues; + u32 *blockFetchTable; + u32 *ioaccel1_blockFetchTable; + u32 *ioaccel2_blockFetchTable; + u32 __iomem *ioaccel2_bft2_regs; + unsigned char *hba_inquiry_data; + u32 driver_support; + u32 fw_support; + int ioaccel_support; + int ioaccel_maxsg; + u64 last_intr_timestamp; + u32 last_heartbeat; + u64 last_heartbeat_timestamp; + u32 heartbeat_sample_interval; + atomic_t firmware_flash_in_progress; + u32 __percpu *lockup_detected; + struct delayed_work monitor_ctlr_work; + struct delayed_work rescan_ctlr_work; + struct delayed_work event_monitor_work; + int remove_in_progress; + /* Address of h->q[x] is passed to intr handler to know which queue */ + u8 q[MAX_REPLY_QUEUES]; + char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */ + u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ +#define HPSATMF_BITS_SUPPORTED (1 << 0) +#define HPSATMF_PHYS_LUN_RESET (1 << 1) +#define HPSATMF_PHYS_NEX_RESET (1 << 2) +#define HPSATMF_PHYS_TASK_ABORT (1 << 3) +#define HPSATMF_PHYS_TSET_ABORT (1 << 4) +#define HPSATMF_PHYS_CLEAR_ACA (1 << 5) +#define HPSATMF_PHYS_CLEAR_TSET (1 << 6) +#define HPSATMF_PHYS_QRY_TASK (1 << 7) +#define HPSATMF_PHYS_QRY_TSET (1 << 8) +#define HPSATMF_PHYS_QRY_ASYNC (1 << 9) +#define HPSATMF_IOACCEL_ENABLED (1 << 15) +#define HPSATMF_MASK_SUPPORTED (1 << 16) +#define HPSATMF_LOG_LUN_RESET (1 << 17) +#define HPSATMF_LOG_NEX_RESET (1 << 18) +#define HPSATMF_LOG_TASK_ABORT (1 << 19) +#define HPSATMF_LOG_TSET_ABORT (1 << 20) +#define HPSATMF_LOG_CLEAR_ACA (1 << 21) +#define HPSATMF_LOG_CLEAR_TSET (1 << 22) +#define HPSATMF_LOG_QRY_TASK (1 << 23) +#define HPSATMF_LOG_QRY_TSET (1 << 24) +#define HPSATMF_LOG_QRY_ASYNC (1 << 25) + u32 events; +#define CTLR_STATE_CHANGE_EVENT (1 << 0) +#define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1) +#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4) +#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5) +#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6) +#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30) +#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) + +#define RESCAN_REQUIRED_EVENT_BITS \ + (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ + CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ + CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ + CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ + CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) + spinlock_t offline_device_lock; + struct list_head offline_device_list; + int acciopath_status; + int drv_req_rescan; + int raid_offload_debug; + int discovery_polling; + int legacy_board; + struct ReportLUNdata *lastlogicals; + int needs_abort_tags_swizzled; + struct workqueue_struct *resubmit_wq; + struct workqueue_struct *rescan_ctlr_wq; + struct workqueue_struct *monitor_ctlr_wq; + atomic_t abort_cmds_available; + wait_queue_head_t event_sync_wait_queue; + struct mutex reset_mutex; + u8 reset_in_progress; + struct hpsa_sas_node *sas_host; + spinlock_t reset_lock; +}; + +struct offline_device_entry { + unsigned char scsi3addr[8]; + struct list_head offline_list; +}; + +#define HPSA_ABORT_MSG 0 +#define HPSA_DEVICE_RESET_MSG 1 +#define HPSA_RESET_TYPE_CONTROLLER 0x00 +#define HPSA_RESET_TYPE_BUS 0x01 +#define HPSA_RESET_TYPE_LUN 0x04 +#define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */ +#define HPSA_MSG_SEND_RETRY_LIMIT 10 +#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) + +/* Maximum time in seconds driver will wait for command completions + * when polling before giving up. + */ +#define HPSA_MAX_POLL_TIME_SECS (20) + +/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines + * how many times to retry TEST UNIT READY on a device + * while waiting for it to become ready before giving up. + * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval + * between sending TURs while waiting for a device + * to become ready. + */ +#define HPSA_TUR_RETRY_LIMIT (20) +#define HPSA_MAX_WAIT_INTERVAL_SECS (30) + +/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board + * to become ready, in seconds, before giving up on it. + * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait + * between polling the board to see if it is ready, in + * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and + * HPSA_BOARD_READY_ITERATIONS are derived from those. + */ +#define HPSA_BOARD_READY_WAIT_SECS (120) +#define HPSA_BOARD_NOT_READY_WAIT_SECS (100) +#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) +#define HPSA_BOARD_READY_POLL_INTERVAL \ + ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) +#define HPSA_BOARD_READY_ITERATIONS \ + ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ + HPSA_BOARD_READY_POLL_INTERVAL_MSECS) +#define HPSA_BOARD_NOT_READY_ITERATIONS \ + ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ + HPSA_BOARD_READY_POLL_INTERVAL_MSECS) +#define HPSA_POST_RESET_PAUSE_MSECS (3000) +#define HPSA_POST_RESET_NOOP_RETRIES (12) + +/* Defining the diffent access_menthods */ +/* + * Memory mapped FIFO interface (SMART 53xx cards) + */ +#define SA5_DOORBELL 0x20 +#define SA5_REQUEST_PORT_OFFSET 0x40 +#define SA5_REQUEST_PORT64_LO_OFFSET 0xC0 +#define SA5_REQUEST_PORT64_HI_OFFSET 0xC4 +#define SA5_REPLY_INTR_MASK_OFFSET 0x34 +#define SA5_REPLY_PORT_OFFSET 0x44 +#define SA5_INTR_STATUS 0x30 +#define SA5_SCRATCHPAD_OFFSET 0xB0 + +#define SA5_CTCFG_OFFSET 0xB4 +#define SA5_CTMEM_OFFSET 0xB8 + +#define SA5_INTR_OFF 0x08 +#define SA5B_INTR_OFF 0x04 +#define SA5_INTR_PENDING 0x08 +#define SA5B_INTR_PENDING 0x04 +#define FIFO_EMPTY 0xffffffff +#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ + +#define HPSA_ERROR_BIT 0x02 + +/* Performant mode flags */ +#define SA5_PERF_INTR_PENDING 0x04 +#define SA5_PERF_INTR_OFF 0x05 +#define SA5_OUTDB_STATUS_PERF_BIT 0x01 +#define SA5_OUTDB_CLEAR_PERF_BIT 0x01 +#define SA5_OUTDB_CLEAR 0xA0 +#define SA5_OUTDB_CLEAR_PERF_BIT 0x01 +#define SA5_OUTDB_STATUS 0x9C + + +#define HPSA_INTR_ON 1 +#define HPSA_INTR_OFF 0 + +/* + * Inbound Post Queue offsets for IO Accelerator Mode 2 + */ +#define IOACCEL2_INBOUND_POSTQ_32 0x48 +#define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 +#define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 + +#define HPSA_PHYSICAL_DEVICE_BUS 0 +#define HPSA_RAID_VOLUME_BUS 1 +#define HPSA_EXTERNAL_RAID_VOLUME_BUS 2 +#define HPSA_HBA_BUS 0 +#define HPSA_LEGACY_HBA_BUS 3 + +/* + Send the command to the hardware +*/ +static void SA5_submit_command(struct ctlr_info *h, + struct CommandList *c) +{ + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); +} + +static void SA5_submit_command_no_read(struct ctlr_info *h, + struct CommandList *c) +{ + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); +} + +static void SA5_submit_command_ioaccel2(struct ctlr_info *h, + struct CommandList *c) +{ + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); +} + +/* + * This card is the opposite of the other cards. + * 0 turns interrupts on... + * 0x08 turns them off... + */ +static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) +{ + if (val) { /* Turn interrupts on */ + h->interrupts_enabled = 1; + writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } else { /* Turn them off */ + h->interrupts_enabled = 0; + writel(SA5_INTR_OFF, + h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } +} + +/* + * Variant of the above; 0x04 turns interrupts off... + */ +static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val) +{ + if (val) { /* Turn interrupts on */ + h->interrupts_enabled = 1; + writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } else { /* Turn them off */ + h->interrupts_enabled = 0; + writel(SA5B_INTR_OFF, + h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } +} + +static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) +{ + if (val) { /* turn on interrupts */ + h->interrupts_enabled = 1; + writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } else { + h->interrupts_enabled = 0; + writel(SA5_PERF_INTR_OFF, + h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } +} + +static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) +{ + struct reply_queue_buffer *rq = &h->reply_queue[q]; + unsigned long register_value = FIFO_EMPTY; + + /* msi auto clears the interrupt pending bit. */ + if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) { + /* flush the controller write of the reply queue by reading + * outbound doorbell status register. + */ + (void) readl(h->vaddr + SA5_OUTDB_STATUS); + writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); + /* Do a read in order to flush the write to the controller + * (as per spec.) + */ + (void) readl(h->vaddr + SA5_OUTDB_STATUS); + } + + if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) { + register_value = rq->head[rq->current_entry]; + rq->current_entry++; + atomic_dec(&h->commands_outstanding); + } else { + register_value = FIFO_EMPTY; + } + /* Check for wraparound */ + if (rq->current_entry == h->max_commands) { + rq->current_entry = 0; + rq->wraparound ^= 1; + } + return register_value; +} + +/* + * returns value read from hardware. + * returns FIFO_EMPTY if there is nothing to read + */ +static unsigned long SA5_completed(struct ctlr_info *h, + __attribute__((unused)) u8 q) +{ + unsigned long register_value + = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); + + if (register_value != FIFO_EMPTY) + atomic_dec(&h->commands_outstanding); + +#ifdef HPSA_DEBUG + if (register_value != FIFO_EMPTY) + dev_dbg(&h->pdev->dev, "Read %lx back from board\n", + register_value); + else + dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); +#endif + + return register_value; +} +/* + * Returns true if an interrupt is pending.. + */ +static bool SA5_intr_pending(struct ctlr_info *h) +{ + unsigned long register_value = + readl(h->vaddr + SA5_INTR_STATUS); + return register_value & SA5_INTR_PENDING; +} + +static bool SA5_performant_intr_pending(struct ctlr_info *h) +{ + unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); + + if (!register_value) + return false; + + /* Read outbound doorbell to flush */ + register_value = readl(h->vaddr + SA5_OUTDB_STATUS); + return register_value & SA5_OUTDB_STATUS_PERF_BIT; +} + +#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 + +static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) +{ + unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); + + return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? + true : false; +} + +/* + * Returns true if an interrupt is pending.. + */ +static bool SA5B_intr_pending(struct ctlr_info *h) +{ + return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING; +} + +#define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 +#define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 +#define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC +#define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL + +static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) +{ + u64 register_value; + struct reply_queue_buffer *rq = &h->reply_queue[q]; + + BUG_ON(q >= h->nreply_queues); + + register_value = rq->head[rq->current_entry]; + if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { + rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; + if (++rq->current_entry == rq->size) + rq->current_entry = 0; + /* + * @todo + * + * Don't really need to write the new index after each command, + * but with current driver design this is easiest. + */ + wmb(); + writel((q << 24) | rq->current_entry, h->vaddr + + IOACCEL_MODE1_CONSUMER_INDEX); + atomic_dec(&h->commands_outstanding); + } + return (unsigned long) register_value; +} + +static struct access_method SA5_access = { + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, +}; + +/* Duplicate entry of the above to mark unsupported boards */ +static struct access_method SA5A_access = { + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, +}; + +static struct access_method SA5B_access = { + .submit_command = SA5_submit_command, + .set_intr_mask = SA5B_intr_mask, + .intr_pending = SA5B_intr_pending, + .command_completed = SA5_completed, +}; + +static struct access_method SA5_ioaccel_mode1_access = { + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_ioaccel_mode1_intr_pending, + .command_completed = SA5_ioaccel_mode1_completed, +}; + +static struct access_method SA5_ioaccel_mode2_access = { + .submit_command = SA5_submit_command_ioaccel2, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, +}; + +static struct access_method SA5_performant_access = { + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, +}; + +static struct access_method SA5_performant_access_no_read = { + .submit_command = SA5_submit_command_no_read, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, +}; + +struct board_type { + u32 board_id; + char *product_name; + struct access_method *access; +}; + +#endif /* HPSA_H */ + diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h new file mode 100644 index 000000000..ba6a3aa8d --- /dev/null +++ b/drivers/scsi/hpsa_cmd.h @@ -0,0 +1,888 @@ +/* + * Disk Array driver for HP Smart Array SAS controllers + * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * Copyright 2016 Microsemi Corporation + * Copyright 2014-2015 PMC-Sierra, Inc. + * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com + * + */ +#ifndef HPSA_CMD_H +#define HPSA_CMD_H + +#include + +#include /* static_assert */ +#include /* offsetof */ + +/* general boundary defintions */ +#define SENSEINFOBYTES 32 /* may vary between hbas */ +#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */ +#define HPSA_SG_CHAIN 0x80000000 +#define HPSA_SG_LAST 0x40000000 +#define MAXREPLYQS 256 + +/* Command Status value */ +#define CMD_SUCCESS 0x0000 +#define CMD_TARGET_STATUS 0x0001 +#define CMD_DATA_UNDERRUN 0x0002 +#define CMD_DATA_OVERRUN 0x0003 +#define CMD_INVALID 0x0004 +#define CMD_PROTOCOL_ERR 0x0005 +#define CMD_HARDWARE_ERR 0x0006 +#define CMD_CONNECTION_LOST 0x0007 +#define CMD_ABORTED 0x0008 +#define CMD_ABORT_FAILED 0x0009 +#define CMD_UNSOLICITED_ABORT 0x000A +#define CMD_TIMEOUT 0x000B +#define CMD_UNABORTABLE 0x000C +#define CMD_TMF_STATUS 0x000D +#define CMD_IOACCEL_DISABLED 0x000E +#define CMD_CTLR_LOCKUP 0xffff +/* Note: CMD_CTLR_LOCKUP is not a value defined by the CISS spec + * it is a value defined by the driver that commands can be marked + * with when a controller lockup has been detected by the driver + */ + +/* TMF function status values */ +#define CISS_TMF_COMPLETE 0x00 +#define CISS_TMF_INVALID_FRAME 0x02 +#define CISS_TMF_NOT_SUPPORTED 0x04 +#define CISS_TMF_FAILED 0x05 +#define CISS_TMF_SUCCESS 0x08 +#define CISS_TMF_WRONG_LUN 0x09 +#define CISS_TMF_OVERLAPPED_TAG 0x0a + +/* Unit Attentions ASC's as defined for the MSA2012sa */ +#define POWER_OR_RESET 0x29 +#define STATE_CHANGED 0x2a +#define UNIT_ATTENTION_CLEARED 0x2f +#define LUN_FAILED 0x3e +#define REPORT_LUNS_CHANGED 0x3f + +/* Unit Attentions ASCQ's as defined for the MSA2012sa */ + + /* These ASCQ's defined for ASC = POWER_OR_RESET */ +#define POWER_ON_RESET 0x00 +#define POWER_ON_REBOOT 0x01 +#define SCSI_BUS_RESET 0x02 +#define MSA_TARGET_RESET 0x03 +#define CONTROLLER_FAILOVER 0x04 +#define TRANSCEIVER_SE 0x05 +#define TRANSCEIVER_LVD 0x06 + + /* These ASCQ's defined for ASC = STATE_CHANGED */ +#define RESERVATION_PREEMPTED 0x03 +#define ASYM_ACCESS_CHANGED 0x06 +#define LUN_CAPACITY_CHANGED 0x09 + +/* transfer direction */ +#define XFER_NONE 0x00 +#define XFER_WRITE 0x01 +#define XFER_READ 0x02 +#define XFER_RSVD 0x03 + +/* task attribute */ +#define ATTR_UNTAGGED 0x00 +#define ATTR_SIMPLE 0x04 +#define ATTR_HEADOFQUEUE 0x05 +#define ATTR_ORDERED 0x06 +#define ATTR_ACA 0x07 + +/* cdb type */ +#define TYPE_CMD 0x00 +#define TYPE_MSG 0x01 +#define TYPE_IOACCEL2_CMD 0x81 /* 0x81 is not used by hardware */ + +/* Message Types */ +#define HPSA_TASK_MANAGEMENT 0x00 +#define HPSA_RESET 0x01 +#define HPSA_SCAN 0x02 +#define HPSA_NOOP 0x03 + +#define HPSA_CTLR_RESET_TYPE 0x00 +#define HPSA_BUS_RESET_TYPE 0x01 +#define HPSA_TARGET_RESET_TYPE 0x03 +#define HPSA_LUN_RESET_TYPE 0x04 +#define HPSA_NEXUS_RESET_TYPE 0x05 + +/* Task Management Functions */ +#define HPSA_TMF_ABORT_TASK 0x00 +#define HPSA_TMF_ABORT_TASK_SET 0x01 +#define HPSA_TMF_CLEAR_ACA 0x02 +#define HPSA_TMF_CLEAR_TASK_SET 0x03 +#define HPSA_TMF_QUERY_TASK 0x04 +#define HPSA_TMF_QUERY_TASK_SET 0x05 +#define HPSA_TMF_QUERY_ASYNCEVENT 0x06 + + + +/* config space register offsets */ +#define CFG_VENDORID 0x00 +#define CFG_DEVICEID 0x02 +#define CFG_I2OBAR 0x10 +#define CFG_MEM1BAR 0x14 + +/* i2o space register offsets */ +#define I2O_IBDB_SET 0x20 +#define I2O_IBDB_CLEAR 0x70 +#define I2O_INT_STATUS 0x30 +#define I2O_INT_MASK 0x34 +#define I2O_IBPOST_Q 0x40 +#define I2O_OBPOST_Q 0x44 +#define I2O_DMA1_CFG 0x214 + +/* Configuration Table */ +#define CFGTBL_ChangeReq 0x00000001l +#define CFGTBL_AccCmds 0x00000001l +#define DOORBELL_CTLR_RESET 0x00000004l +#define DOORBELL_CTLR_RESET2 0x00000020l +#define DOORBELL_CLEAR_EVENTS 0x00000040l +#define DOORBELL_GENERATE_CHKPT 0x00000080l + +#define CFGTBL_Trans_Simple 0x00000002l +#define CFGTBL_Trans_Performant 0x00000004l +#define CFGTBL_Trans_io_accel1 0x00000080l +#define CFGTBL_Trans_io_accel2 0x00000100l +#define CFGTBL_Trans_use_short_tags 0x20000000l +#define CFGTBL_Trans_enable_directed_msix (1 << 30) + +#define CFGTBL_BusType_Ultra2 0x00000001l +#define CFGTBL_BusType_Ultra3 0x00000002l +#define CFGTBL_BusType_Fibre1G 0x00000100l +#define CFGTBL_BusType_Fibre2G 0x00000200l + +/* VPD Inquiry types */ +#define HPSA_INQUIRY_FAILED 0x02 +#define HPSA_VPD_SUPPORTED_PAGES 0x00 +#define HPSA_VPD_LV_DEVICE_ID 0x83 +#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 +#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2 +#define HPSA_VPD_LV_STATUS 0xC3 +#define HPSA_VPD_HEADER_SZ 4 + +/* Logical volume states */ +#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff +#define HPSA_LV_OK 0x0 +#define HPSA_LV_FAILED 0x01 +#define HPSA_LV_NOT_AVAILABLE 0x0b +#define HPSA_LV_UNDERGOING_ERASE 0x0F +#define HPSA_LV_UNDERGOING_RPI 0x12 +#define HPSA_LV_PENDING_RPI 0x13 +#define HPSA_LV_ENCRYPTED_NO_KEY 0x14 +#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15 +#define HPSA_LV_UNDERGOING_ENCRYPTION 0x16 +#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17 +#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18 +#define HPSA_LV_PENDING_ENCRYPTION 0x19 +#define HPSA_LV_PENDING_ENCRYPTION_REKEYING 0x1A + +struct vals32 { + u32 lower; + u32 upper; +}; + +union u64bit { + struct vals32 val32; + u64 val; +}; + +/* FIXME this is a per controller value (barf!) */ +#define HPSA_MAX_LUN 1024 +#define HPSA_MAX_PHYS_LUN 1024 +#define MAX_EXT_TARGETS 32 +#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \ + MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */ + +/* SCSI-3 Commands */ +#define HPSA_INQUIRY 0x12 +struct InquiryData { + u8 data_byte[36]; +} __packed; + +#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ +#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ +#define HPSA_REPORT_PHYS_EXTENDED 0x02 +#define HPSA_CISS_READ 0xc0 /* CISS Read */ +#define HPSA_GET_RAID_MAP 0xc8 /* CISS Get RAID Layout Map */ + +#define RAID_MAP_MAX_ENTRIES 256 + +struct raid_map_disk_data { + u32 ioaccel_handle; /**< Handle to access this disk via the + * I/O accelerator */ + u8 xor_mult[2]; /**< XOR multipliers for this position, + * valid for data disks only */ + u8 reserved[2]; +} __packed; + +struct raid_map_data { + __le32 structure_size; /* Size of entire structure in bytes */ + __le32 volume_blk_size; /* bytes / block in the volume */ + __le64 volume_blk_cnt; /* logical blocks on the volume */ + u8 phys_blk_shift; /* Shift factor to convert between + * units of logical blocks and physical + * disk blocks */ + u8 parity_rotation_shift; /* Shift factor to convert between units + * of logical stripes and physical + * stripes */ + __le16 strip_size; /* blocks used on each disk / stripe */ + __le64 disk_starting_blk; /* First disk block used in volume */ + __le64 disk_blk_cnt; /* disk blocks used by volume / disk */ + __le16 data_disks_per_row; /* data disk entries / row in the map */ + __le16 metadata_disks_per_row;/* mirror/parity disk entries / row + * in the map */ + __le16 row_cnt; /* rows in each layout map */ + __le16 layout_map_count; /* layout maps (1 map per mirror/parity + * group) */ + __le16 flags; /* Bit 0 set if encryption enabled */ +#define RAID_MAP_FLAG_ENCRYPT_ON 0x01 + __le16 dekindex; /* Data encryption key index. */ + u8 reserved[16]; + struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; +} __packed; + +struct ReportLUNdata { + u8 LUNListLength[4]; + u8 extended_response_flag; + u8 reserved[3]; + u8 LUN[HPSA_MAX_LUN][8]; +} __packed; + +struct ext_report_lun_entry { + u8 lunid[8]; +#define MASKED_DEVICE(x) ((x)[3] & 0xC0) +#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F) +#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6]) +#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \ + GET_BMIC_LEVEL_TWO_TARGET((lunid))) + u8 wwid[8]; + u8 device_type; + u8 device_flags; + u8 lun_count; /* multi-lun device, how many luns */ + u8 redundant_paths; + u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ +} __packed; + +struct ReportExtendedLUNdata { + u8 LUNListLength[4]; + u8 extended_response_flag; + u8 reserved[3]; + struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN]; +} __packed; + +struct SenseSubsystem_info { + u8 reserved[36]; + u8 portname[8]; + u8 reserved1[1108]; +} __packed; + +/* BMIC commands */ +#define BMIC_READ 0x26 +#define BMIC_WRITE 0x27 +#define BMIC_CACHE_FLUSH 0xc2 +#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ +#define BMIC_FLASH_FIRMWARE 0xF7 +#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 +#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 +#define BMIC_IDENTIFY_CONTROLLER 0x11 +#define BMIC_SET_DIAG_OPTIONS 0xF4 +#define BMIC_SENSE_DIAG_OPTIONS 0xF5 +#define HPSA_DIAG_OPTS_DISABLE_RLD_CACHING 0x80000000 +#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 +#define BMIC_SENSE_STORAGE_BOX_PARAMS 0x65 + +/* Command List Structure */ +union SCSI3Addr { + struct { + u8 Dev; + u8 Bus:6; + u8 Mode:2; /* b00 */ + } PeripDev; + struct { + u8 DevLSB; + u8 DevMSB:6; + u8 Mode:2; /* b01 */ + } LogDev; + struct { + u8 Dev:5; + u8 Bus:3; + u8 Targ:6; + u8 Mode:2; /* b10 */ + } LogUnit; +} __packed; + +struct PhysDevAddr { + u32 TargetId:24; + u32 Bus:6; + u32 Mode:2; + /* 2 level target device addr */ + union SCSI3Addr Target[2]; +} __packed; + +struct LogDevAddr { + u32 VolId:30; + u32 Mode:2; + u8 reserved[4]; +} __packed; + +union LUNAddr { + u8 LunAddrBytes[8]; + union SCSI3Addr SCSI3Lun[4]; + struct PhysDevAddr PhysDev; + struct LogDevAddr LogDev; +} __packed; + +struct CommandListHeader { + u8 ReplyQueue; + u8 SGList; + __le16 SGTotal; + __le64 tag; + union LUNAddr LUN; +} __packed; + +struct RequestBlock { + u8 CDBLen; + /* + * type_attr_dir: + * type: low 3 bits + * attr: middle 3 bits + * dir: high 2 bits + */ + u8 type_attr_dir; +#define TYPE_ATTR_DIR(t, a, d) ((((d) & 0x03) << 6) |\ + (((a) & 0x07) << 3) |\ + ((t) & 0x07)) +#define GET_TYPE(tad) ((tad) & 0x07) +#define GET_ATTR(tad) (((tad) >> 3) & 0x07) +#define GET_DIR(tad) (((tad) >> 6) & 0x03) + u16 Timeout; + u8 CDB[16]; +} __packed; + +struct ErrDescriptor { + __le64 Addr; + __le32 Len; +} __packed; + +struct SGDescriptor { + __le64 Addr; + __le32 Len; + __le32 Ext; +} __packed; + +union MoreErrInfo { + struct { + u8 Reserved[3]; + u8 Type; + u32 ErrorInfo; + } Common_Info; + struct { + u8 Reserved[2]; + u8 offense_size; /* size of offending entry */ + u8 offense_num; /* byte # of offense 0-base */ + u32 offense_value; + } Invalid_Cmd; +} __packed; + +struct ErrorInfo { + u8 ScsiStatus; + u8 SenseLen; + u16 CommandStatus; + u32 ResidualCnt; + union MoreErrInfo MoreErrInfo; + u8 SenseInfo[SENSEINFOBYTES]; +} __packed; +/* Command types */ +#define CMD_IOCTL_PEND 0x01 +#define CMD_SCSI 0x03 +#define CMD_IOACCEL1 0x04 +#define CMD_IOACCEL2 0x05 +#define IOACCEL2_TMF 0x06 + +#define DIRECT_LOOKUP_SHIFT 4 +#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) + +#define HPSA_ERROR_BIT 0x02 +struct ctlr_info; /* defined in hpsa.h */ +/* The size of this structure needs to be divisible by 128 + * on all architectures. The low 4 bits of the addresses + * are used as follows: + * + * bit 0: to device, used to indicate "performant mode" command + * from device, indidcates error status. + * bit 1-3: to device, indicates block fetch table entry for + * reducing DMA in fetching commands from host memory. + */ + +#define COMMANDLIST_ALIGNMENT 128 +struct CommandList { + struct CommandListHeader Header; + struct RequestBlock Request; + struct ErrDescriptor ErrDesc; + struct SGDescriptor SG[SG_ENTRIES_IN_CMD]; + /* information associated with the command */ + u32 busaddr; /* physical addr of this record */ + struct ErrorInfo *err_info; /* pointer to the allocated mem */ + struct ctlr_info *h; + int cmd_type; + long cmdindex; + struct completion *waiting; + struct scsi_cmnd *scsi_cmd; + struct work_struct work; + + /* + * For commands using either of the two "ioaccel" paths to + * bypass the RAID stack and go directly to the physical disk + * phys_disk is a pointer to the hpsa_scsi_dev_t to which the + * i/o is destined. We need to store that here because the command + * may potentially encounter TASK SET FULL and need to be resubmitted + * For "normal" i/o's not using the "ioaccel" paths, phys_disk is + * not used. + */ + struct hpsa_scsi_dev_t *phys_disk; + + bool retry_pending; + struct hpsa_scsi_dev_t *device; + atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */ +} __aligned(COMMANDLIST_ALIGNMENT); + +/* + * Make sure our embedded atomic variable is aligned. Otherwise we break atomic + * operations on architectures that don't support unaligned atomics like IA64. + * + * The assert guards against reintroductin against unwanted __packed to + * the struct CommandList. + */ +static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0); + +/* Max S/G elements in I/O accelerator command */ +#define IOACCEL1_MAXSGENTRIES 24 +#define IOACCEL2_MAXSGENTRIES 28 + +/* + * Structure for I/O accelerator (mode 1) commands. + * Note that this structure must be 128-byte aligned in size. + */ +#define IOACCEL1_COMMANDLIST_ALIGNMENT 128 +struct io_accel1_cmd { + __le16 dev_handle; /* 0x00 - 0x01 */ + u8 reserved1; /* 0x02 */ + u8 function; /* 0x03 */ + u8 reserved2[8]; /* 0x04 - 0x0B */ + u32 err_info; /* 0x0C - 0x0F */ + u8 reserved3[2]; /* 0x10 - 0x11 */ + u8 err_info_len; /* 0x12 */ + u8 reserved4; /* 0x13 */ + u8 sgl_offset; /* 0x14 */ + u8 reserved5[7]; /* 0x15 - 0x1B */ + __le32 transfer_len; /* 0x1C - 0x1F */ + u8 reserved6[4]; /* 0x20 - 0x23 */ + __le16 io_flags; /* 0x24 - 0x25 */ + u8 reserved7[14]; /* 0x26 - 0x33 */ + u8 LUN[8]; /* 0x34 - 0x3B */ + __le32 control; /* 0x3C - 0x3F */ + u8 CDB[16]; /* 0x40 - 0x4F */ + u8 reserved8[16]; /* 0x50 - 0x5F */ + __le16 host_context_flags; /* 0x60 - 0x61 */ + __le16 timeout_sec; /* 0x62 - 0x63 */ + u8 ReplyQueue; /* 0x64 */ + u8 reserved9[3]; /* 0x65 - 0x67 */ + __le64 tag; /* 0x68 - 0x6F */ + __le64 host_addr; /* 0x70 - 0x77 */ + u8 CISS_LUN[8]; /* 0x78 - 0x7F */ + struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; +} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); + +#define IOACCEL1_FUNCTION_SCSIIO 0x00 +#define IOACCEL1_SGLOFFSET 32 + +#define IOACCEL1_IOFLAGS_IO_REQ 0x4000 +#define IOACCEL1_IOFLAGS_CDBLEN_MASK 0x001F +#define IOACCEL1_IOFLAGS_CDBLEN_MAX 16 + +#define IOACCEL1_CONTROL_NODATAXFER 0x00000000 +#define IOACCEL1_CONTROL_DATA_OUT 0x01000000 +#define IOACCEL1_CONTROL_DATA_IN 0x02000000 +#define IOACCEL1_CONTROL_TASKPRIO_MASK 0x00007800 +#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11 +#define IOACCEL1_CONTROL_SIMPLEQUEUE 0x00000000 +#define IOACCEL1_CONTROL_HEADOFQUEUE 0x00000100 +#define IOACCEL1_CONTROL_ORDEREDQUEUE 0x00000200 +#define IOACCEL1_CONTROL_ACA 0x00000400 + +#define IOACCEL1_HCFLAGS_CISS_FORMAT 0x0013 + +#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060 + +struct ioaccel2_sg_element { + __le64 address; + __le32 length; + u8 reserved[3]; + u8 chain_indicator; +#define IOACCEL2_CHAIN 0x80 +#define IOACCEL2_LAST_SG 0x40 +} __packed; + +/* + * SCSI Response Format structure for IO Accelerator Mode 2 + */ +struct io_accel2_scsi_response { + u8 IU_type; +#define IOACCEL2_IU_TYPE_SRF 0x60 + u8 reserved1[3]; + u8 req_id[4]; /* request identifier */ + u8 reserved2[4]; + u8 serv_response; /* service response */ +#define IOACCEL2_SERV_RESPONSE_COMPLETE 0x000 +#define IOACCEL2_SERV_RESPONSE_FAILURE 0x001 +#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE 0x002 +#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS 0x003 +#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED 0x004 +#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN 0x005 + u8 status; /* status */ +#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD 0x00 +#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND 0x02 +#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY 0x08 +#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON 0x18 +#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28 +#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40 +#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E +#define IOACCEL2_STATUS_SR_IO_ERROR 0x01 +#define IOACCEL2_STATUS_SR_IO_ABORTED 0x02 +#define IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE 0x03 +#define IOACCEL2_STATUS_SR_INVALID_DEVICE 0x04 +#define IOACCEL2_STATUS_SR_UNDERRUN 0x51 +#define IOACCEL2_STATUS_SR_OVERRUN 0x75 + u8 data_present; /* low 2 bits */ +#define IOACCEL2_NO_DATAPRESENT 0x000 +#define IOACCEL2_RESPONSE_DATAPRESENT 0x001 +#define IOACCEL2_SENSE_DATA_PRESENT 0x002 +#define IOACCEL2_RESERVED 0x003 + u8 sense_data_len; /* sense/response data length */ + u8 resid_cnt[4]; /* residual count */ + u8 sense_data_buff[32]; /* sense/response data buffer */ +} __packed; + +/* + * Structure for I/O accelerator (mode 2 or m2) commands. + * Note that this structure must be 128-byte aligned in size. + */ +#define IOACCEL2_COMMANDLIST_ALIGNMENT 128 +struct io_accel2_cmd { + u8 IU_type; /* IU Type */ + u8 direction; /* direction, memtype, and encryption */ +#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */ +#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */ + /* 0b=PCIe, 1b=DDR */ +#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */ + /* 0=off, 1=on */ + u8 reply_queue; /* Reply Queue ID */ + u8 reserved1; /* Reserved */ + __le32 scsi_nexus; /* Device Handle */ + __le32 Tag; /* cciss tag, lower 4 bytes only */ + __le32 tweak_lower; /* Encryption tweak, lower 4 bytes */ + u8 cdb[16]; /* SCSI Command Descriptor Block */ + u8 cciss_lun[8]; /* 8 byte SCSI address */ + __le32 data_len; /* Total bytes to transfer */ + u8 cmd_priority_task_attr; /* priority and task attrs */ +#define IOACCEL2_PRIORITY_MASK 0x78 +#define IOACCEL2_ATTR_MASK 0x07 + u8 sg_count; /* Number of sg elements */ + __le16 dekindex; /* Data encryption key index */ + __le64 err_ptr; /* Error Pointer */ + __le32 err_len; /* Error Length*/ + __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */ + struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; + struct io_accel2_scsi_response error_data; +} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); + +/* + * defines for Mode 2 command struct + * FIXME: this can't be all I need mfm + */ +#define IOACCEL2_IU_TYPE 0x40 +#define IOACCEL2_IU_TMF_TYPE 0x41 +#define IOACCEL2_DIR_NO_DATA 0x00 +#define IOACCEL2_DIR_DATA_IN 0x01 +#define IOACCEL2_DIR_DATA_OUT 0x02 +#define IOACCEL2_TMF_ABORT 0x01 +/* + * SCSI Task Management Request format for Accelerator Mode 2 + */ +struct hpsa_tmf_struct { + u8 iu_type; /* Information Unit Type */ + u8 reply_queue; /* Reply Queue ID */ + u8 tmf; /* Task Management Function */ + u8 reserved1; /* byte 3 Reserved */ + __le32 it_nexus; /* SCSI I-T Nexus */ + u8 lun_id[8]; /* LUN ID for TMF request */ + __le64 tag; /* cciss tag associated w/ request */ + __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ + __le64 error_ptr; /* Error Pointer */ + __le32 error_len; /* Error Length */ +} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); + +/* Configuration Table Structure */ +struct HostWrite { + __le32 TransportRequest; + __le32 command_pool_addr_hi; + __le32 CoalIntDelay; + __le32 CoalIntCount; +} __packed; + +#define SIMPLE_MODE 0x02 +#define PERFORMANT_MODE 0x04 +#define MEMQ_MODE 0x08 +#define IOACCEL_MODE_1 0x80 + +#define DRIVER_SUPPORT_UA_ENABLE 0x00000001 + +struct CfgTable { + u8 Signature[4]; + __le32 SpecValence; + __le32 TransportSupport; + __le32 TransportActive; + struct HostWrite HostWrite; + __le32 CmdsOutMax; + __le32 BusTypes; + __le32 TransMethodOffset; + u8 ServerName[16]; + __le32 HeartBeat; + __le32 driver_support; +#define ENABLE_SCSI_PREFETCH 0x100 +#define ENABLE_UNIT_ATTN 0x01 + __le32 MaxScatterGatherElements; + __le32 MaxLogicalUnits; + __le32 MaxPhysicalDevices; + __le32 MaxPhysicalDrivesPerLogicalUnit; + __le32 MaxPerformantModeCommands; + __le32 MaxBlockFetch; + __le32 PowerConservationSupport; + __le32 PowerConservationEnable; + __le32 TMFSupportFlags; + u8 TMFTagMask[8]; + u8 reserved[0x78 - 0x70]; + __le32 misc_fw_support; /* offset 0x78 */ +#define MISC_FW_DOORBELL_RESET 0x02 +#define MISC_FW_DOORBELL_RESET2 0x010 +#define MISC_FW_RAID_OFFLOAD_BASIC 0x020 +#define MISC_FW_EVENT_NOTIFY 0x080 + u8 driver_version[32]; + __le32 max_cached_write_size; + u8 driver_scratchpad[16]; + __le32 max_error_info_length; + __le32 io_accel_max_embedded_sg_count; + __le32 io_accel_request_size_offset; + __le32 event_notify; +#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) +#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) + __le32 clear_event_notify; +} __packed; + +#define NUM_BLOCKFETCH_ENTRIES 8 +struct TransTable_struct { + __le32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; + __le32 RepQSize; + __le32 RepQCount; + __le32 RepQCtrAddrLow32; + __le32 RepQCtrAddrHigh32; +#define MAX_REPLY_QUEUES 64 + struct vals32 RepQAddr[MAX_REPLY_QUEUES]; +} __packed; + +struct hpsa_pci_info { + unsigned char bus; + unsigned char dev_fn; + unsigned short domain; + u32 board_id; +} __packed; + +struct bmic_identify_controller { + u8 configured_logical_drive_count; /* offset 0 */ + u8 pad1[153]; + __le16 extended_logical_unit_count; /* offset 154 */ + u8 pad2[136]; + u8 controller_mode; /* offset 292 */ + u8 pad3[32]; +} __packed; + + +struct bmic_identify_physical_device { + u8 scsi_bus; /* SCSI Bus number on controller */ + u8 scsi_id; /* SCSI ID on this bus */ + __le16 block_size; /* sector size in bytes */ + __le32 total_blocks; /* number for sectors on drive */ + __le32 reserved_blocks; /* controller reserved (RIS) */ + u8 model[40]; /* Physical Drive Model */ + u8 serial_number[40]; /* Drive Serial Number */ + u8 firmware_revision[8]; /* drive firmware revision */ + u8 scsi_inquiry_bits; /* inquiry byte 7 bits */ + u8 compaq_drive_stamp; /* 0 means drive not stamped */ + u8 last_failure_reason; +#define BMIC_LAST_FAILURE_TOO_SMALL_IN_LOAD_CONFIG 0x01 +#define BMIC_LAST_FAILURE_ERROR_ERASING_RIS 0x02 +#define BMIC_LAST_FAILURE_ERROR_SAVING_RIS 0x03 +#define BMIC_LAST_FAILURE_FAIL_DRIVE_COMMAND 0x04 +#define BMIC_LAST_FAILURE_MARK_BAD_FAILED 0x05 +#define BMIC_LAST_FAILURE_MARK_BAD_FAILED_IN_FINISH_REMAP 0x06 +#define BMIC_LAST_FAILURE_TIMEOUT 0x07 +#define BMIC_LAST_FAILURE_AUTOSENSE_FAILED 0x08 +#define BMIC_LAST_FAILURE_MEDIUM_ERROR_1 0x09 +#define BMIC_LAST_FAILURE_MEDIUM_ERROR_2 0x0a +#define BMIC_LAST_FAILURE_NOT_READY_BAD_SENSE 0x0b +#define BMIC_LAST_FAILURE_NOT_READY 0x0c +#define BMIC_LAST_FAILURE_HARDWARE_ERROR 0x0d +#define BMIC_LAST_FAILURE_ABORTED_COMMAND 0x0e +#define BMIC_LAST_FAILURE_WRITE_PROTECTED 0x0f +#define BMIC_LAST_FAILURE_SPIN_UP_FAILURE_IN_RECOVER 0x10 +#define BMIC_LAST_FAILURE_REBUILD_WRITE_ERROR 0x11 +#define BMIC_LAST_FAILURE_TOO_SMALL_IN_HOT_PLUG 0x12 +#define BMIC_LAST_FAILURE_BUS_RESET_RECOVERY_ABORTED 0x13 +#define BMIC_LAST_FAILURE_REMOVED_IN_HOT_PLUG 0x14 +#define BMIC_LAST_FAILURE_INIT_REQUEST_SENSE_FAILED 0x15 +#define BMIC_LAST_FAILURE_INIT_START_UNIT_FAILED 0x16 +#define BMIC_LAST_FAILURE_INQUIRY_FAILED 0x17 +#define BMIC_LAST_FAILURE_NON_DISK_DEVICE 0x18 +#define BMIC_LAST_FAILURE_READ_CAPACITY_FAILED 0x19 +#define BMIC_LAST_FAILURE_INVALID_BLOCK_SIZE 0x1a +#define BMIC_LAST_FAILURE_HOT_PLUG_REQUEST_SENSE_FAILED 0x1b +#define BMIC_LAST_FAILURE_HOT_PLUG_START_UNIT_FAILED 0x1c +#define BMIC_LAST_FAILURE_WRITE_ERROR_AFTER_REMAP 0x1d +#define BMIC_LAST_FAILURE_INIT_RESET_RECOVERY_ABORTED 0x1e +#define BMIC_LAST_FAILURE_DEFERRED_WRITE_ERROR 0x1f +#define BMIC_LAST_FAILURE_MISSING_IN_SAVE_RIS 0x20 +#define BMIC_LAST_FAILURE_WRONG_REPLACE 0x21 +#define BMIC_LAST_FAILURE_GDP_VPD_INQUIRY_FAILED 0x22 +#define BMIC_LAST_FAILURE_GDP_MODE_SENSE_FAILED 0x23 +#define BMIC_LAST_FAILURE_DRIVE_NOT_IN_48BIT_MODE 0x24 +#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_HOT_PLUG 0x25 +#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_LOAD_CFG 0x26 +#define BMIC_LAST_FAILURE_PROTOCOL_ADAPTER_FAILED 0x27 +#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_EMPTY 0x28 +#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_OCCUPIED 0x29 +#define BMIC_LAST_FAILURE_FAULTY_ID_INVALID_BAY 0x2a +#define BMIC_LAST_FAILURE_WRITE_RETRIES_FAILED 0x2b + +#define BMIC_LAST_FAILURE_SMART_ERROR_REPORTED 0x37 +#define BMIC_LAST_FAILURE_PHY_RESET_FAILED 0x38 +#define BMIC_LAST_FAILURE_ONLY_ONE_CTLR_CAN_SEE_DRIVE 0x40 +#define BMIC_LAST_FAILURE_KC_VOLUME_FAILED 0x41 +#define BMIC_LAST_FAILURE_UNEXPECTED_REPLACEMENT 0x42 +#define BMIC_LAST_FAILURE_OFFLINE_ERASE 0x80 +#define BMIC_LAST_FAILURE_OFFLINE_TOO_SMALL 0x81 +#define BMIC_LAST_FAILURE_OFFLINE_DRIVE_TYPE_MIX 0x82 +#define BMIC_LAST_FAILURE_OFFLINE_ERASE_COMPLETE 0x83 + + u8 flags; + u8 more_flags; + u8 scsi_lun; /* SCSI LUN for phys drive */ + u8 yet_more_flags; + u8 even_more_flags; + __le32 spi_speed_rules;/* SPI Speed data:Ultra disable diagnose */ + u8 phys_connector[2]; /* connector number on controller */ + u8 phys_box_on_bus; /* phys enclosure this drive resides */ + u8 phys_bay_in_box; /* phys drv bay this drive resides */ + __le32 rpm; /* Drive rotational speed in rpm */ + u8 device_type; /* type of drive */ +#define BMIC_DEVICE_TYPE_CONTROLLER 0x07 + + u8 sata_version; /* only valid when drive_type is SATA */ + __le64 big_total_block_count; + __le64 ris_starting_lba; + __le32 ris_size; + u8 wwid[20]; + u8 controller_phy_map[32]; + __le16 phy_count; + u8 phy_connected_dev_type[256]; + u8 phy_to_drive_bay_num[256]; + __le16 phy_to_attached_dev_index[256]; + u8 box_index; + u8 reserved; + __le16 extra_physical_drive_flags; +#define BMIC_PHYS_DRIVE_SUPPORTS_GAS_GAUGE(idphydrv) \ + (idphydrv->extra_physical_drive_flags & (1 << 10)) + u8 negotiated_link_rate[256]; + u8 phy_to_phy_map[256]; + u8 redundant_path_present_map; + u8 redundant_path_failure_map; + u8 active_path_number; + __le16 alternate_paths_phys_connector[8]; + u8 alternate_paths_phys_box_on_port[8]; + u8 multi_lun_device_lun_count; + u8 minimum_good_fw_revision[8]; + u8 unique_inquiry_bytes[20]; + u8 current_temperature_degreesC; + u8 temperature_threshold_degreesC; + u8 max_temperature_degreesC; + u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */ + __le16 current_queue_depth_limit; + u8 reserved_switch_stuff[60]; + __le16 power_on_hours; /* valid only if gas gauge supported */ + __le16 percent_endurance_used; /* valid only if gas gauge supported. */ +#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \ + ((idphydrv->percent_endurance_used & 0x80) || \ + (idphydrv->percent_endurance_used > 10000)) + u8 drive_authentication; +#define BMIC_PHYS_DRIVE_AUTHENTICATED(idphydrv) \ + (idphydrv->drive_authentication == 0x80) + u8 smart_carrier_authentication; +#define BMIC_SMART_CARRIER_AUTHENTICATION_SUPPORTED(idphydrv) \ + (idphydrv->smart_carrier_authentication != 0x0) +#define BMIC_SMART_CARRIER_AUTHENTICATED(idphydrv) \ + (idphydrv->smart_carrier_authentication == 0x01) + u8 smart_carrier_app_fw_version; + u8 smart_carrier_bootloader_fw_version; + u8 sanitize_support_flags; + u8 drive_key_flags; + u8 encryption_key_name[64]; + __le32 misc_drive_flags; + __le16 dek_index; + __le16 hba_drive_encryption_flags; + __le16 max_overwrite_time; + __le16 max_block_erase_time; + __le16 max_crypto_erase_time; + u8 device_connector_info[5]; + u8 connector_name[8][8]; + u8 page_83_id[16]; + u8 max_link_rate[256]; + u8 neg_phys_link_rate[256]; + u8 box_conn_name[8]; +} __packed __attribute((aligned(512))); + +struct bmic_sense_subsystem_info { + u8 primary_slot_number; + u8 reserved[3]; + u8 chasis_serial_number[32]; + u8 primary_world_wide_id[8]; + u8 primary_array_serial_number[32]; /* NULL terminated */ + u8 primary_cache_serial_number[32]; /* NULL terminated */ + u8 reserved_2[8]; + u8 secondary_array_serial_number[32]; + u8 secondary_cache_serial_number[32]; + u8 pad[332]; +} __packed; + +struct bmic_sense_storage_box_params { + u8 reserved[36]; + u8 inquiry_valid; + u8 reserved_1[68]; + u8 phys_box_on_port; + u8 reserved_2[22]; + u16 connection_info; + u8 reserver_3[84]; + u8 phys_connector[2]; + u8 reserved_4[296]; +} __packed; + +#endif /* HPSA_CMD_H */ diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c new file mode 100644 index 000000000..f5334ccbf --- /dev/null +++ b/drivers/scsi/hptiop.c @@ -0,0 +1,1699 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HighPoint RR3xxx/4xxx controller driver for Linux + * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved. + * + * Please report bugs/comments/suggestions to linux@highpoint-tech.com + * + * For more information, visit http://www.highpoint-tech.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hptiop.h" + +MODULE_AUTHOR("HighPoint Technologies, Inc."); +MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); + +static char driver_name[] = "hptiop"; +static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; +static const char driver_ver[] = "v1.10.0"; + +static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); +static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, + struct hpt_iop_request_scsi_command *req); +static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); +static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); +static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); + +static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) +{ + u32 req = 0; + int i; + + for (i = 0; i < millisec; i++) { + req = readl(&hba->u.itl.iop->inbound_queue); + if (req != IOPMU_QUEUE_EMPTY) + break; + msleep(1); + } + + if (req != IOPMU_QUEUE_EMPTY) { + writel(req, &hba->u.itl.iop->outbound_queue); + readl(&hba->u.itl.iop->outbound_intstatus); + return 0; + } + + return -1; +} + +static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) +{ + return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); +} + +static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec) +{ + return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); +} + +static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) +{ + if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) + hptiop_host_request_callback_itl(hba, + tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); + else + hptiop_iop_request_callback_itl(hba, tag); +} + +static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) +{ + u32 req; + + while ((req = readl(&hba->u.itl.iop->outbound_queue)) != + IOPMU_QUEUE_EMPTY) { + + if (req & IOPMU_QUEUE_MASK_HOST_BITS) + hptiop_request_callback_itl(hba, req); + else { + struct hpt_iop_request_header __iomem * p; + + p = (struct hpt_iop_request_header __iomem *) + ((char __iomem *)hba->u.itl.iop + req); + + if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { + if (readl(&p->context)) + hptiop_request_callback_itl(hba, req); + else + writel(1, &p->context); + } + else + hptiop_request_callback_itl(hba, req); + } + } +} + +static int iop_intr_itl(struct hptiop_hba *hba) +{ + struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; + void __iomem *plx = hba->u.itl.plx; + u32 status; + int ret = 0; + + if (plx && readl(plx + 0x11C5C) & 0xf) + writel(1, plx + 0x11C60); + + status = readl(&iop->outbound_intstatus); + + if (status & IOPMU_OUTBOUND_INT_MSG0) { + u32 msg = readl(&iop->outbound_msgaddr0); + + dprintk("received outbound msg %x\n", msg); + writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); + hptiop_message_callback(hba, msg); + ret = 1; + } + + if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { + hptiop_drain_outbound_queue_itl(hba); + ret = 1; + } + + return ret; +} + +static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) +{ + u32 outbound_tail = readl(&mu->outbound_tail); + u32 outbound_head = readl(&mu->outbound_head); + + if (outbound_tail != outbound_head) { + u64 p; + + memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); + outbound_tail++; + + if (outbound_tail == MVIOP_QUEUE_LEN) + outbound_tail = 0; + writel(outbound_tail, &mu->outbound_tail); + return p; + } else + return 0; +} + +static void mv_inbound_write(u64 p, struct hptiop_hba *hba) +{ + u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); + u32 head = inbound_head + 1; + + if (head == MVIOP_QUEUE_LEN) + head = 0; + + memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); + writel(head, &hba->u.mv.mu->inbound_head); + writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, + &hba->u.mv.regs->inbound_doorbell); +} + +static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) +{ + u32 req_type = (tag >> 5) & 0x7; + struct hpt_iop_request_scsi_command *req; + + dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); + + BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); + + switch (req_type) { + case IOP_REQUEST_TYPE_GET_CONFIG: + case IOP_REQUEST_TYPE_SET_CONFIG: + hba->msg_done = 1; + break; + + case IOP_REQUEST_TYPE_SCSI_COMMAND: + req = hba->reqs[tag >> 8].req_virt; + if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) + req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); + + hptiop_finish_scsi_req(hba, tag>>8, req); + break; + + default: + break; + } +} + +static int iop_intr_mv(struct hptiop_hba *hba) +{ + u32 status; + int ret = 0; + + status = readl(&hba->u.mv.regs->outbound_doorbell); + writel(~status, &hba->u.mv.regs->outbound_doorbell); + + if (status & MVIOP_MU_OUTBOUND_INT_MSG) { + u32 msg; + msg = readl(&hba->u.mv.mu->outbound_msg); + dprintk("received outbound msg %x\n", msg); + hptiop_message_callback(hba, msg); + ret = 1; + } + + if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { + u64 tag; + + while ((tag = mv_outbound_read(hba->u.mv.mu))) + hptiop_request_callback_mv(hba, tag); + ret = 1; + } + + return ret; +} + +static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag) +{ + u32 req_type = _tag & 0xf; + struct hpt_iop_request_scsi_command *req; + + switch (req_type) { + case IOP_REQUEST_TYPE_GET_CONFIG: + case IOP_REQUEST_TYPE_SET_CONFIG: + hba->msg_done = 1; + break; + + case IOP_REQUEST_TYPE_SCSI_COMMAND: + req = hba->reqs[(_tag >> 4) & 0xff].req_virt; + if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) + req->header.result = IOP_RESULT_SUCCESS; + hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req); + break; + + default: + break; + } +} + +static int iop_intr_mvfrey(struct hptiop_hba *hba) +{ + u32 _tag, status, cptr, cur_rptr; + int ret = 0; + + if (hba->initialized) + writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); + + status = readl(&(hba->u.mvfrey.mu->f0_doorbell)); + if (status) { + writel(status, &(hba->u.mvfrey.mu->f0_doorbell)); + if (status & CPU_TO_F0_DRBL_MSG_BIT) { + u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a)); + dprintk("received outbound msg %x\n", msg); + hptiop_message_callback(hba, msg); + } + ret = 1; + } + + status = readl(&(hba->u.mvfrey.mu->isr_cause)); + if (status) { + writel(status, &(hba->u.mvfrey.mu->isr_cause)); + do { + cptr = *hba->u.mvfrey.outlist_cptr & 0xff; + cur_rptr = hba->u.mvfrey.outlist_rptr; + while (cur_rptr != cptr) { + cur_rptr++; + if (cur_rptr == hba->u.mvfrey.list_count) + cur_rptr = 0; + + _tag = hba->u.mvfrey.outlist[cur_rptr].val; + BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS)); + hptiop_request_callback_mvfrey(hba, _tag); + ret = 1; + } + hba->u.mvfrey.outlist_rptr = cur_rptr; + } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); + } + + if (hba->initialized) + writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); + + return ret; +} + +static int iop_send_sync_request_itl(struct hptiop_hba *hba, + void __iomem *_req, u32 millisec) +{ + struct hpt_iop_request_header __iomem *req = _req; + u32 i; + + writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); + writel(0, &req->context); + writel((unsigned long)req - (unsigned long)hba->u.itl.iop, + &hba->u.itl.iop->inbound_queue); + readl(&hba->u.itl.iop->outbound_intstatus); + + for (i = 0; i < millisec; i++) { + iop_intr_itl(hba); + if (readl(&req->context)) + return 0; + msleep(1); + } + + return -1; +} + +static int iop_send_sync_request_mv(struct hptiop_hba *hba, + u32 size_bits, u32 millisec) +{ + struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; + u32 i; + + hba->msg_done = 0; + reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); + mv_inbound_write(hba->u.mv.internal_req_phy | + MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); + + for (i = 0; i < millisec; i++) { + iop_intr_mv(hba); + if (hba->msg_done) + return 0; + msleep(1); + } + return -1; +} + +static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba, + u32 size_bits, u32 millisec) +{ + struct hpt_iop_request_header *reqhdr = + hba->u.mvfrey.internal_req.req_virt; + u32 i; + + hba->msg_done = 0; + reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); + hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req)); + + for (i = 0; i < millisec; i++) { + iop_intr_mvfrey(hba); + if (hba->msg_done) + break; + msleep(1); + } + return hba->msg_done ? 0 : -1; +} + +static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) +{ + writel(msg, &hba->u.itl.iop->inbound_msgaddr0); + readl(&hba->u.itl.iop->outbound_intstatus); +} + +static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) +{ + writel(msg, &hba->u.mv.mu->inbound_msg); + writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); + readl(&hba->u.mv.regs->inbound_doorbell); +} + +static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg) +{ + writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); + readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); +} + +static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) +{ + u32 i; + + hba->msg_done = 0; + hba->ops->disable_intr(hba); + hba->ops->post_msg(hba, msg); + + for (i = 0; i < millisec; i++) { + spin_lock_irq(hba->host->host_lock); + hba->ops->iop_intr(hba); + spin_unlock_irq(hba->host->host_lock); + if (hba->msg_done) + break; + msleep(1); + } + + hba->ops->enable_intr(hba); + return hba->msg_done? 0 : -1; +} + +static int iop_get_config_itl(struct hptiop_hba *hba, + struct hpt_iop_request_get_config *config) +{ + u32 req32; + struct hpt_iop_request_get_config __iomem *req; + + req32 = readl(&hba->u.itl.iop->inbound_queue); + if (req32 == IOPMU_QUEUE_EMPTY) + return -1; + + req = (struct hpt_iop_request_get_config __iomem *) + ((unsigned long)hba->u.itl.iop + req32); + + writel(0, &req->header.flags); + writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); + writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); + writel(IOP_RESULT_PENDING, &req->header.result); + + if (iop_send_sync_request_itl(hba, req, 20000)) { + dprintk("Get config send cmd failed\n"); + return -1; + } + + memcpy_fromio(config, req, sizeof(*config)); + writel(req32, &hba->u.itl.iop->outbound_queue); + return 0; +} + +static int iop_get_config_mv(struct hptiop_hba *hba, + struct hpt_iop_request_get_config *config) +{ + struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; + + req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); + req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); + req->header.size = + cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); + req->header.result = cpu_to_le32(IOP_RESULT_PENDING); + req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); + req->header.context_hi32 = 0; + + if (iop_send_sync_request_mv(hba, 0, 20000)) { + dprintk("Get config send cmd failed\n"); + return -1; + } + + memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); + return 0; +} + +static int iop_get_config_mvfrey(struct hptiop_hba *hba, + struct hpt_iop_request_get_config *config) +{ + struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; + + if (info->header.size != sizeof(struct hpt_iop_request_get_config) || + info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) + return -1; + + config->interface_version = info->interface_version; + config->firmware_version = info->firmware_version; + config->max_requests = info->max_requests; + config->request_size = info->request_size; + config->max_sg_count = info->max_sg_count; + config->data_transfer_length = info->data_transfer_length; + config->alignment_mask = info->alignment_mask; + config->max_devices = info->max_devices; + config->sdram_size = info->sdram_size; + + return 0; +} + +static int iop_set_config_itl(struct hptiop_hba *hba, + struct hpt_iop_request_set_config *config) +{ + u32 req32; + struct hpt_iop_request_set_config __iomem *req; + + req32 = readl(&hba->u.itl.iop->inbound_queue); + if (req32 == IOPMU_QUEUE_EMPTY) + return -1; + + req = (struct hpt_iop_request_set_config __iomem *) + ((unsigned long)hba->u.itl.iop + req32); + + memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), + (u8 *)config + sizeof(struct hpt_iop_request_header), + sizeof(struct hpt_iop_request_set_config) - + sizeof(struct hpt_iop_request_header)); + + writel(0, &req->header.flags); + writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); + writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); + writel(IOP_RESULT_PENDING, &req->header.result); + + if (iop_send_sync_request_itl(hba, req, 20000)) { + dprintk("Set config send cmd failed\n"); + return -1; + } + + writel(req32, &hba->u.itl.iop->outbound_queue); + return 0; +} + +static int iop_set_config_mv(struct hptiop_hba *hba, + struct hpt_iop_request_set_config *config) +{ + struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; + + memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); + req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); + req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); + req->header.size = + cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); + req->header.result = cpu_to_le32(IOP_RESULT_PENDING); + req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); + req->header.context_hi32 = 0; + + if (iop_send_sync_request_mv(hba, 0, 20000)) { + dprintk("Set config send cmd failed\n"); + return -1; + } + + return 0; +} + +static int iop_set_config_mvfrey(struct hptiop_hba *hba, + struct hpt_iop_request_set_config *config) +{ + struct hpt_iop_request_set_config *req = + hba->u.mvfrey.internal_req.req_virt; + + memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); + req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); + req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); + req->header.size = + cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); + req->header.result = cpu_to_le32(IOP_RESULT_PENDING); + req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); + req->header.context_hi32 = 0; + + if (iop_send_sync_request_mvfrey(hba, 0, 20000)) { + dprintk("Set config send cmd failed\n"); + return -1; + } + + return 0; +} + +static void hptiop_enable_intr_itl(struct hptiop_hba *hba) +{ + writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), + &hba->u.itl.iop->outbound_intmask); +} + +static void hptiop_enable_intr_mv(struct hptiop_hba *hba) +{ + writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, + &hba->u.mv.regs->outbound_intmask); +} + +static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba) +{ + writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable)); + writel(0x1, &(hba->u.mvfrey.mu->isr_enable)); + writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); +} + +static int hptiop_initialize_iop(struct hptiop_hba *hba) +{ + /* enable interrupts */ + hba->ops->enable_intr(hba); + + hba->initialized = 1; + + /* start background tasks */ + if (iop_send_sync_msg(hba, + IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { + printk(KERN_ERR "scsi%d: fail to start background task\n", + hba->host->host_no); + return -1; + } + return 0; +} + +static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) +{ + u32 mem_base_phy, length; + void __iomem *mem_base_virt; + + struct pci_dev *pcidev = hba->pcidev; + + + if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { + printk(KERN_ERR "scsi%d: pci resource invalid\n", + hba->host->host_no); + return NULL; + } + + mem_base_phy = pci_resource_start(pcidev, index); + length = pci_resource_len(pcidev, index); + mem_base_virt = ioremap(mem_base_phy, length); + + if (!mem_base_virt) { + printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", + hba->host->host_no); + return NULL; + } + return mem_base_virt; +} + +static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) +{ + struct pci_dev *pcidev = hba->pcidev; + hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); + if (hba->u.itl.iop == NULL) + return -1; + if ((pcidev->device & 0xff00) == 0x4400) { + hba->u.itl.plx = hba->u.itl.iop; + hba->u.itl.iop = hptiop_map_pci_bar(hba, 2); + if (hba->u.itl.iop == NULL) { + iounmap(hba->u.itl.plx); + return -1; + } + } + return 0; +} + +static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) +{ + if (hba->u.itl.plx) + iounmap(hba->u.itl.plx); + iounmap(hba->u.itl.iop); +} + +static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) +{ + hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); + if (hba->u.mv.regs == NULL) + return -1; + + hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); + if (hba->u.mv.mu == NULL) { + iounmap(hba->u.mv.regs); + return -1; + } + + return 0; +} + +static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba) +{ + hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0); + if (hba->u.mvfrey.config == NULL) + return -1; + + hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2); + if (hba->u.mvfrey.mu == NULL) { + iounmap(hba->u.mvfrey.config); + return -1; + } + + return 0; +} + +static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) +{ + iounmap(hba->u.mv.regs); + iounmap(hba->u.mv.mu); +} + +static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba) +{ + iounmap(hba->u.mvfrey.config); + iounmap(hba->u.mvfrey.mu); +} + +static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) +{ + dprintk("iop message 0x%x\n", msg); + + if (msg == IOPMU_INBOUND_MSG0_NOP || + msg == IOPMU_INBOUND_MSG0_RESET_COMM) + hba->msg_done = 1; + + if (!hba->initialized) + return; + + if (msg == IOPMU_INBOUND_MSG0_RESET) { + atomic_set(&hba->resetting, 0); + wake_up(&hba->reset_wq); + } + else if (msg <= IOPMU_INBOUND_MSG0_MAX) + hba->msg_done = 1; +} + +static struct hptiop_request *get_req(struct hptiop_hba *hba) +{ + struct hptiop_request *ret; + + dprintk("get_req : req=%p\n", hba->req_list); + + ret = hba->req_list; + if (ret) + hba->req_list = ret->next; + + return ret; +} + +static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) +{ + dprintk("free_req(%d, %p)\n", req->index, req); + req->next = hba->req_list; + hba->req_list = req; +} + +static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, + struct hpt_iop_request_scsi_command *req) +{ + struct scsi_cmnd *scp; + + dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " + "result=%d, context=0x%x tag=%d\n", + req, req->header.type, req->header.result, + req->header.context, tag); + + BUG_ON(!req->header.result); + BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); + + scp = hba->reqs[tag].scp; + + if (HPT_SCP(scp)->mapped) + scsi_dma_unmap(scp); + + switch (le32_to_cpu(req->header.result)) { + case IOP_RESULT_SUCCESS: + scsi_set_resid(scp, + scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); + scp->result = (DID_OK<<16); + break; + case IOP_RESULT_BAD_TARGET: + scp->result = (DID_BAD_TARGET<<16); + break; + case IOP_RESULT_BUSY: + scp->result = (DID_BUS_BUSY<<16); + break; + case IOP_RESULT_RESET: + scp->result = (DID_RESET<<16); + break; + case IOP_RESULT_FAIL: + scp->result = (DID_ERROR<<16); + break; + case IOP_RESULT_INVALID_REQUEST: + scp->result = (DID_ABORT<<16); + break; + case IOP_RESULT_CHECK_CONDITION: + scsi_set_resid(scp, + scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); + scp->result = SAM_STAT_CHECK_CONDITION; + memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE); + goto skip_resid; + + default: + scp->result = DID_ABORT << 16; + break; + } + + scsi_set_resid(scp, + scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); + +skip_resid: + dprintk("scsi_done(%p)\n", scp); + scsi_done(scp); + free_req(hba, &hba->reqs[tag]); +} + +static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) +{ + struct hpt_iop_request_scsi_command *req; + u32 tag; + + if (hba->iopintf_v2) { + tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; + req = hba->reqs[tag].req_virt; + if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) + req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); + } else { + tag = _tag; + req = hba->reqs[tag].req_virt; + } + + hptiop_finish_scsi_req(hba, tag, req); +} + +static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) +{ + struct hpt_iop_request_header __iomem *req; + struct hpt_iop_request_ioctl_command __iomem *p; + struct hpt_ioctl_k *arg; + + req = (struct hpt_iop_request_header __iomem *) + ((unsigned long)hba->u.itl.iop + tag); + dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " + "result=%d, context=0x%x tag=%d\n", + req, readl(&req->type), readl(&req->result), + readl(&req->context), tag); + + BUG_ON(!readl(&req->result)); + BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); + + p = (struct hpt_iop_request_ioctl_command __iomem *)req; + arg = (struct hpt_ioctl_k *)(unsigned long) + (readl(&req->context) | + ((u64)readl(&req->context_hi32)<<32)); + + if (readl(&req->result) == IOP_RESULT_SUCCESS) { + arg->result = HPT_IOCTL_RESULT_OK; + + if (arg->outbuf_size) + memcpy_fromio(arg->outbuf, + &p->buf[(readl(&p->inbuf_size) + 3)& ~3], + arg->outbuf_size); + + if (arg->bytes_returned) + *arg->bytes_returned = arg->outbuf_size; + } + else + arg->result = HPT_IOCTL_RESULT_FAILED; + + arg->done(arg); + writel(tag, &hba->u.itl.iop->outbound_queue); +} + +static irqreturn_t hptiop_intr(int irq, void *dev_id) +{ + struct hptiop_hba *hba = dev_id; + int handled; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + handled = hba->ops->iop_intr(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return handled; +} + +static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) +{ + struct Scsi_Host *host = scp->device->host; + struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; + struct scatterlist *sg; + int idx, nseg; + + nseg = scsi_dma_map(scp); + BUG_ON(nseg < 0); + if (!nseg) + return 0; + + HPT_SCP(scp)->sgcnt = nseg; + HPT_SCP(scp)->mapped = 1; + + BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); + + scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { + psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) | + hba->ops->host_phy_flag; + psg[idx].size = cpu_to_le32(sg_dma_len(sg)); + psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? + cpu_to_le32(1) : 0; + } + return HPT_SCP(scp)->sgcnt; +} + +static void hptiop_post_req_itl(struct hptiop_hba *hba, + struct hptiop_request *_req) +{ + struct hpt_iop_request_header *reqhdr = _req->req_virt; + + reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | + (u32)_req->index); + reqhdr->context_hi32 = 0; + + if (hba->iopintf_v2) { + u32 size, size_bits; + + size = le32_to_cpu(reqhdr->size); + if (size < 256) + size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; + else if (size < 512) + size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; + else + size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | + IOPMU_QUEUE_ADDR_HOST_BIT; + writel(_req->req_shifted_phy | size_bits, + &hba->u.itl.iop->inbound_queue); + } else + writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, + &hba->u.itl.iop->inbound_queue); +} + +static void hptiop_post_req_mv(struct hptiop_hba *hba, + struct hptiop_request *_req) +{ + struct hpt_iop_request_header *reqhdr = _req->req_virt; + u32 size, size_bit; + + reqhdr->context = cpu_to_le32(_req->index<<8 | + IOP_REQUEST_TYPE_SCSI_COMMAND<<5); + reqhdr->context_hi32 = 0; + size = le32_to_cpu(reqhdr->size); + + if (size <= 256) + size_bit = 0; + else if (size <= 256*2) + size_bit = 1; + else if (size <= 256*3) + size_bit = 2; + else + size_bit = 3; + + mv_inbound_write((_req->req_shifted_phy << 5) | + MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); +} + +static void hptiop_post_req_mvfrey(struct hptiop_hba *hba, + struct hptiop_request *_req) +{ + struct hpt_iop_request_header *reqhdr = _req->req_virt; + u32 index; + + reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT | + IOP_REQUEST_FLAG_ADDR_BITS | + ((_req->req_shifted_phy >> 11) & 0xffff0000)); + reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | + (_req->index << 4) | reqhdr->type); + reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) & + 0xffffffff); + + hba->u.mvfrey.inlist_wptr++; + index = hba->u.mvfrey.inlist_wptr & 0x3fff; + + if (index == hba->u.mvfrey.list_count) { + index = 0; + hba->u.mvfrey.inlist_wptr &= ~0x3fff; + hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; + } + + hba->u.mvfrey.inlist[index].addr = + (dma_addr_t)_req->req_shifted_phy << 5; + hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; + writel(hba->u.mvfrey.inlist_wptr, + &(hba->u.mvfrey.mu->inbound_write_ptr)); + readl(&(hba->u.mvfrey.mu->inbound_write_ptr)); +} + +static int hptiop_reset_comm_itl(struct hptiop_hba *hba) +{ + return 0; +} + +static int hptiop_reset_comm_mv(struct hptiop_hba *hba) +{ + return 0; +} + +static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba) +{ + u32 list_count = hba->u.mvfrey.list_count; + + if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) + return -1; + + /* wait 100ms for MCU ready */ + msleep(100); + + writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff), + &(hba->u.mvfrey.mu->inbound_base)); + writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16), + &(hba->u.mvfrey.mu->inbound_base_high)); + + writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff), + &(hba->u.mvfrey.mu->outbound_base)); + writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16), + &(hba->u.mvfrey.mu->outbound_base_high)); + + writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff), + &(hba->u.mvfrey.mu->outbound_shadow_base)); + writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16), + &(hba->u.mvfrey.mu->outbound_shadow_base_high)); + + hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE; + *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE; + hba->u.mvfrey.outlist_rptr = list_count - 1; + return 0; +} + +static int hptiop_queuecommand_lck(struct scsi_cmnd *scp) +{ + struct Scsi_Host *host = scp->device->host; + struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; + struct hpt_iop_request_scsi_command *req; + int sg_count = 0; + struct hptiop_request *_req; + + _req = get_req(hba); + if (_req == NULL) { + dprintk("hptiop_queuecmd : no free req\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + _req->scp = scp; + + dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) " + "req_index=%d, req=%p\n", + scp, + host->host_no, scp->device->channel, + scp->device->id, scp->device->lun, + cpu_to_be32(((u32 *)scp->cmnd)[0]), + cpu_to_be32(((u32 *)scp->cmnd)[1]), + cpu_to_be32(((u32 *)scp->cmnd)[2]), + cpu_to_be32(((u32 *)scp->cmnd)[3]), + _req->index, _req->req_virt); + + scp->result = 0; + + if (scp->device->channel || + (scp->device->id > hba->max_devices) || + ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) { + scp->result = DID_BAD_TARGET << 16; + free_req(hba, _req); + goto cmd_done; + } + + req = _req->req_virt; + + /* build S/G table */ + sg_count = hptiop_buildsgl(scp, req->sg_list); + if (!sg_count) + HPT_SCP(scp)->mapped = 0; + + req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); + req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); + req->header.result = cpu_to_le32(IOP_RESULT_PENDING); + req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); + req->channel = scp->device->channel; + req->target = scp->device->id; + req->lun = scp->device->lun; + req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count)); + + memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); + hba->ops->post_req(hba, _req); + return 0; + +cmd_done: + dprintk("scsi_done(scp=%p)\n", scp); + scsi_done(scp); + return 0; +} + +static DEF_SCSI_QCMD(hptiop_queuecommand) + +static const char *hptiop_info(struct Scsi_Host *host) +{ + return driver_name_long; +} + +static int hptiop_reset_hba(struct hptiop_hba *hba) +{ + if (atomic_xchg(&hba->resetting, 1) == 0) { + atomic_inc(&hba->reset_count); + hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); + } + + wait_event_timeout(hba->reset_wq, + atomic_read(&hba->resetting) == 0, 60 * HZ); + + if (atomic_read(&hba->resetting)) { + /* IOP is in unknown state, abort reset */ + printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); + return -1; + } + + if (iop_send_sync_msg(hba, + IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { + dprintk("scsi%d: fail to start background task\n", + hba->host->host_no); + } + + return 0; +} + +static int hptiop_reset(struct scsi_cmnd *scp) +{ + struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata; + + printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n", + scp->device->host->host_no, -1, -1); + + return hptiop_reset_hba(hba)? FAILED : SUCCESS; +} + +static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, + int queue_depth) +{ + struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; + + if (queue_depth > hba->max_requests) + queue_depth = hba->max_requests; + return scsi_change_queue_depth(sdev, queue_depth); +} + +static ssize_t hptiop_show_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); +} + +static ssize_t hptiop_show_fw_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", + hba->firmware_version >> 24, + (hba->firmware_version >> 16) & 0xff, + (hba->firmware_version >> 8) & 0xff, + hba->firmware_version & 0xff); +} + +static struct device_attribute hptiop_attr_version = { + .attr = { + .name = "driver-version", + .mode = S_IRUGO, + }, + .show = hptiop_show_version, +}; + +static struct device_attribute hptiop_attr_fw_version = { + .attr = { + .name = "firmware-version", + .mode = S_IRUGO, + }, + .show = hptiop_show_fw_version, +}; + +static struct attribute *hptiop_host_attrs[] = { + &hptiop_attr_version.attr, + &hptiop_attr_fw_version.attr, + NULL +}; + +ATTRIBUTE_GROUPS(hptiop_host); + +static int hptiop_slave_config(struct scsi_device *sdev) +{ + if (sdev->type == TYPE_TAPE) + blk_queue_max_hw_sectors(sdev->request_queue, 8192); + + return 0; +} + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = driver_name, + .queuecommand = hptiop_queuecommand, + .eh_host_reset_handler = hptiop_reset, + .info = hptiop_info, + .emulated = 0, + .proc_name = driver_name, + .shost_groups = hptiop_host_groups, + .slave_configure = hptiop_slave_config, + .this_id = -1, + .change_queue_depth = hptiop_adjust_disk_queue_depth, + .cmd_size = sizeof(struct hpt_cmd_priv), +}; + +static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba) +{ + return 0; +} + +static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) +{ + hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, + 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); + if (hba->u.mv.internal_req) + return 0; + else + return -1; +} + +static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba) +{ + u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl); + char *p; + dma_addr_t phy; + + BUG_ON(hba->max_request_size == 0); + + if (list_count == 0) { + BUG_ON(1); + return -1; + } + + list_count >>= 16; + + hba->u.mvfrey.list_count = list_count; + hba->u.mvfrey.internal_mem_size = 0x800 + + list_count * sizeof(struct mvfrey_inlist_entry) + + list_count * sizeof(struct mvfrey_outlist_entry) + + sizeof(int); + + p = dma_alloc_coherent(&hba->pcidev->dev, + hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL); + if (!p) + return -1; + + hba->u.mvfrey.internal_req.req_virt = p; + hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5; + hba->u.mvfrey.internal_req.scp = NULL; + hba->u.mvfrey.internal_req.next = NULL; + + p += 0x800; + phy += 0x800; + + hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; + hba->u.mvfrey.inlist_phy = phy; + + p += list_count * sizeof(struct mvfrey_inlist_entry); + phy += list_count * sizeof(struct mvfrey_inlist_entry); + + hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; + hba->u.mvfrey.outlist_phy = phy; + + p += list_count * sizeof(struct mvfrey_outlist_entry); + phy += list_count * sizeof(struct mvfrey_outlist_entry); + + hba->u.mvfrey.outlist_cptr = (__le32 *)p; + hba->u.mvfrey.outlist_cptr_phy = phy; + + return 0; +} + +static int hptiop_internal_memfree_itl(struct hptiop_hba *hba) +{ + return 0; +} + +static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) +{ + if (hba->u.mv.internal_req) { + dma_free_coherent(&hba->pcidev->dev, 0x800, + hba->u.mv.internal_req, hba->u.mv.internal_req_phy); + return 0; + } else + return -1; +} + +static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba) +{ + if (hba->u.mvfrey.internal_req.req_virt) { + dma_free_coherent(&hba->pcidev->dev, + hba->u.mvfrey.internal_mem_size, + hba->u.mvfrey.internal_req.req_virt, + (dma_addr_t) + hba->u.mvfrey.internal_req.req_shifted_phy << 5); + return 0; + } else + return -1; +} + +static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) +{ + struct Scsi_Host *host = NULL; + struct hptiop_hba *hba; + struct hptiop_adapter_ops *iop_ops; + struct hpt_iop_request_get_config iop_config; + struct hpt_iop_request_set_config set_config; + dma_addr_t start_phy; + void *start_virt; + u32 offset, i, req_size; + int rc; + + dprintk("hptiop_probe(%p)\n", pcidev); + + if (pci_enable_device(pcidev)) { + printk(KERN_ERR "hptiop: fail to enable pci device\n"); + return -ENODEV; + } + + printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", + pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, + pcidev->irq); + + pci_set_master(pcidev); + + /* Enable 64bit DMA if possible */ + iop_ops = (struct hptiop_adapter_ops *)id->driver_data; + rc = dma_set_mask(&pcidev->dev, + DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)); + if (rc) + rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)); + + if (rc) { + printk(KERN_ERR "hptiop: fail to set dma_mask\n"); + goto disable_pci_device; + } + + if (pci_request_regions(pcidev, driver_name)) { + printk(KERN_ERR "hptiop: pci_request_regions failed\n"); + goto disable_pci_device; + } + + host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); + if (!host) { + printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); + goto free_pci_regions; + } + + hba = (struct hptiop_hba *)host->hostdata; + memset(hba, 0, sizeof(struct hptiop_hba)); + + hba->ops = iop_ops; + hba->pcidev = pcidev; + hba->host = host; + hba->initialized = 0; + hba->iopintf_v2 = 0; + + atomic_set(&hba->resetting, 0); + atomic_set(&hba->reset_count, 0); + + init_waitqueue_head(&hba->reset_wq); + init_waitqueue_head(&hba->ioctl_wq); + + host->max_lun = 128; + host->max_channel = 0; + host->io_port = 0; + host->n_io_port = 0; + host->irq = pcidev->irq; + + if (hba->ops->map_pci_bar(hba)) + goto free_scsi_host; + + if (hba->ops->iop_wait_ready(hba, 20000)) { + printk(KERN_ERR "scsi%d: firmware not ready\n", + hba->host->host_no); + goto unmap_pci_bar; + } + + if (hba->ops->family == MV_BASED_IOP) { + if (hba->ops->internal_memalloc(hba)) { + printk(KERN_ERR "scsi%d: internal_memalloc failed\n", + hba->host->host_no); + goto unmap_pci_bar; + } + } + + if (hba->ops->get_config(hba, &iop_config)) { + printk(KERN_ERR "scsi%d: get config failed\n", + hba->host->host_no); + goto unmap_pci_bar; + } + + hba->max_requests = min(le32_to_cpu(iop_config.max_requests), + HPTIOP_MAX_REQUESTS); + hba->max_devices = le32_to_cpu(iop_config.max_devices); + hba->max_request_size = le32_to_cpu(iop_config.request_size); + hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); + hba->firmware_version = le32_to_cpu(iop_config.firmware_version); + hba->interface_version = le32_to_cpu(iop_config.interface_version); + hba->sdram_size = le32_to_cpu(iop_config.sdram_size); + + if (hba->ops->family == MVFREY_BASED_IOP) { + if (hba->ops->internal_memalloc(hba)) { + printk(KERN_ERR "scsi%d: internal_memalloc failed\n", + hba->host->host_no); + goto unmap_pci_bar; + } + if (hba->ops->reset_comm(hba)) { + printk(KERN_ERR "scsi%d: reset comm failed\n", + hba->host->host_no); + goto unmap_pci_bar; + } + } + + if (hba->firmware_version > 0x01020000 || + hba->interface_version > 0x01020000) + hba->iopintf_v2 = 1; + + host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; + host->max_id = le32_to_cpu(iop_config.max_devices); + host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); + host->can_queue = le32_to_cpu(iop_config.max_requests); + host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); + host->max_cmd_len = 16; + + req_size = struct_size_t(struct hpt_iop_request_scsi_command, + sg_list, hba->max_sg_descriptors); + if ((req_size & 0x1f) != 0) + req_size = (req_size + 0x1f) & ~0x1f; + + memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); + set_config.iop_id = cpu_to_le32(host->host_no); + set_config.vbus_id = cpu_to_le16(host->host_no); + set_config.max_host_request_size = cpu_to_le16(req_size); + + if (hba->ops->set_config(hba, &set_config)) { + printk(KERN_ERR "scsi%d: set config failed\n", + hba->host->host_no); + goto unmap_pci_bar; + } + + pci_set_drvdata(pcidev, host); + + if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, + driver_name, hba)) { + printk(KERN_ERR "scsi%d: request irq %d failed\n", + hba->host->host_no, pcidev->irq); + goto unmap_pci_bar; + } + + /* Allocate request mem */ + + dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); + + hba->req_size = req_size; + hba->req_list = NULL; + + for (i = 0; i < hba->max_requests; i++) { + start_virt = dma_alloc_coherent(&pcidev->dev, + hba->req_size + 0x20, + &start_phy, GFP_KERNEL); + + if (!start_virt) { + printk(KERN_ERR "scsi%d: fail to alloc request mem\n", + hba->host->host_no); + goto free_request_mem; + } + + hba->dma_coherent[i] = start_virt; + hba->dma_coherent_handle[i] = start_phy; + + if ((start_phy & 0x1f) != 0) { + offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; + start_phy += offset; + start_virt += offset; + } + + hba->reqs[i].next = NULL; + hba->reqs[i].req_virt = start_virt; + hba->reqs[i].req_shifted_phy = start_phy >> 5; + hba->reqs[i].index = i; + free_req(hba, &hba->reqs[i]); + } + + /* Enable Interrupt and start background task */ + if (hptiop_initialize_iop(hba)) + goto free_request_mem; + + if (scsi_add_host(host, &pcidev->dev)) { + printk(KERN_ERR "scsi%d: scsi_add_host failed\n", + hba->host->host_no); + goto free_request_mem; + } + + scsi_scan_host(host); + + dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); + return 0; + +free_request_mem: + for (i = 0; i < hba->max_requests; i++) { + if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) + dma_free_coherent(&hba->pcidev->dev, + hba->req_size + 0x20, + hba->dma_coherent[i], + hba->dma_coherent_handle[i]); + else + break; + } + + free_irq(hba->pcidev->irq, hba); + +unmap_pci_bar: + hba->ops->internal_memfree(hba); + + hba->ops->unmap_pci_bar(hba); + +free_scsi_host: + scsi_host_put(host); + +free_pci_regions: + pci_release_regions(pcidev); + +disable_pci_device: + pci_disable_device(pcidev); + + dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0); + return -ENODEV; +} + +static void hptiop_shutdown(struct pci_dev *pcidev) +{ + struct Scsi_Host *host = pci_get_drvdata(pcidev); + struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; + + dprintk("hptiop_shutdown(%p)\n", hba); + + /* stop the iop */ + if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) + printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", + hba->host->host_no); + + /* disable all outbound interrupts */ + hba->ops->disable_intr(hba); +} + +static void hptiop_disable_intr_itl(struct hptiop_hba *hba) +{ + u32 int_mask; + + int_mask = readl(&hba->u.itl.iop->outbound_intmask); + writel(int_mask | + IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, + &hba->u.itl.iop->outbound_intmask); + readl(&hba->u.itl.iop->outbound_intmask); +} + +static void hptiop_disable_intr_mv(struct hptiop_hba *hba) +{ + writel(0, &hba->u.mv.regs->outbound_intmask); + readl(&hba->u.mv.regs->outbound_intmask); +} + +static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba) +{ + writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable)); + readl(&(hba->u.mvfrey.mu->f0_doorbell_enable)); + writel(0, &(hba->u.mvfrey.mu->isr_enable)); + readl(&(hba->u.mvfrey.mu->isr_enable)); + writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); + readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable)); +} + +static void hptiop_remove(struct pci_dev *pcidev) +{ + struct Scsi_Host *host = pci_get_drvdata(pcidev); + struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; + u32 i; + + dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); + + scsi_remove_host(host); + + hptiop_shutdown(pcidev); + + free_irq(hba->pcidev->irq, hba); + + for (i = 0; i < hba->max_requests; i++) { + if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) + dma_free_coherent(&hba->pcidev->dev, + hba->req_size + 0x20, + hba->dma_coherent[i], + hba->dma_coherent_handle[i]); + else + break; + } + + hba->ops->internal_memfree(hba); + + hba->ops->unmap_pci_bar(hba); + + pci_release_regions(hba->pcidev); + pci_set_drvdata(hba->pcidev, NULL); + pci_disable_device(hba->pcidev); + + scsi_host_put(host); +} + +static struct hptiop_adapter_ops hptiop_itl_ops = { + .family = INTEL_BASED_IOP, + .iop_wait_ready = iop_wait_ready_itl, + .internal_memalloc = hptiop_internal_memalloc_itl, + .internal_memfree = hptiop_internal_memfree_itl, + .map_pci_bar = hptiop_map_pci_bar_itl, + .unmap_pci_bar = hptiop_unmap_pci_bar_itl, + .enable_intr = hptiop_enable_intr_itl, + .disable_intr = hptiop_disable_intr_itl, + .get_config = iop_get_config_itl, + .set_config = iop_set_config_itl, + .iop_intr = iop_intr_itl, + .post_msg = hptiop_post_msg_itl, + .post_req = hptiop_post_req_itl, + .hw_dma_bit_mask = 64, + .reset_comm = hptiop_reset_comm_itl, + .host_phy_flag = cpu_to_le64(0), +}; + +static struct hptiop_adapter_ops hptiop_mv_ops = { + .family = MV_BASED_IOP, + .iop_wait_ready = iop_wait_ready_mv, + .internal_memalloc = hptiop_internal_memalloc_mv, + .internal_memfree = hptiop_internal_memfree_mv, + .map_pci_bar = hptiop_map_pci_bar_mv, + .unmap_pci_bar = hptiop_unmap_pci_bar_mv, + .enable_intr = hptiop_enable_intr_mv, + .disable_intr = hptiop_disable_intr_mv, + .get_config = iop_get_config_mv, + .set_config = iop_set_config_mv, + .iop_intr = iop_intr_mv, + .post_msg = hptiop_post_msg_mv, + .post_req = hptiop_post_req_mv, + .hw_dma_bit_mask = 33, + .reset_comm = hptiop_reset_comm_mv, + .host_phy_flag = cpu_to_le64(0), +}; + +static struct hptiop_adapter_ops hptiop_mvfrey_ops = { + .family = MVFREY_BASED_IOP, + .iop_wait_ready = iop_wait_ready_mvfrey, + .internal_memalloc = hptiop_internal_memalloc_mvfrey, + .internal_memfree = hptiop_internal_memfree_mvfrey, + .map_pci_bar = hptiop_map_pci_bar_mvfrey, + .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey, + .enable_intr = hptiop_enable_intr_mvfrey, + .disable_intr = hptiop_disable_intr_mvfrey, + .get_config = iop_get_config_mvfrey, + .set_config = iop_set_config_mvfrey, + .iop_intr = iop_intr_mvfrey, + .post_msg = hptiop_post_msg_mvfrey, + .post_req = hptiop_post_req_mvfrey, + .hw_dma_bit_mask = 64, + .reset_comm = hptiop_reset_comm_mvfrey, + .host_phy_flag = cpu_to_le64(1), +}; + +static struct pci_device_id hptiop_id_table[] = { + { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops }, + { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, + { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, + { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, + { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops }, + { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops }, + {}, +}; + +MODULE_DEVICE_TABLE(pci, hptiop_id_table); + +static struct pci_driver hptiop_pci_driver = { + .name = driver_name, + .id_table = hptiop_id_table, + .probe = hptiop_probe, + .remove = hptiop_remove, + .shutdown = hptiop_shutdown, +}; + +static int __init hptiop_module_init(void) +{ + printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); + return pci_register_driver(&hptiop_pci_driver); +} + +static void __exit hptiop_module_exit(void) +{ + pci_unregister_driver(&hptiop_pci_driver); +} + + +module_init(hptiop_module_init); +module_exit(hptiop_module_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h new file mode 100644 index 000000000..394ef6aa4 --- /dev/null +++ b/drivers/scsi/hptiop.h @@ -0,0 +1,374 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HighPoint RR3xxx/4xxx controller driver for Linux + * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved. + * + * Please report bugs/comments/suggestions to linux@highpoint-tech.com + * + * For more information, visit http://www.highpoint-tech.com + */ +#ifndef _HPTIOP_H_ +#define _HPTIOP_H_ + +struct hpt_iopmu_itl { + __le32 resrved0[4]; + __le32 inbound_msgaddr0; + __le32 inbound_msgaddr1; + __le32 outbound_msgaddr0; + __le32 outbound_msgaddr1; + __le32 inbound_doorbell; + __le32 inbound_intstatus; + __le32 inbound_intmask; + __le32 outbound_doorbell; + __le32 outbound_intstatus; + __le32 outbound_intmask; + __le32 reserved1[2]; + __le32 inbound_queue; + __le32 outbound_queue; +}; + +#define IOPMU_QUEUE_EMPTY 0xffffffff +#define IOPMU_QUEUE_MASK_HOST_BITS 0xf0000000 +#define IOPMU_QUEUE_ADDR_HOST_BIT 0x80000000 +#define IOPMU_QUEUE_REQUEST_SIZE_BIT 0x40000000 +#define IOPMU_QUEUE_REQUEST_RESULT_BIT 0x40000000 + +#define IOPMU_OUTBOUND_INT_MSG0 1 +#define IOPMU_OUTBOUND_INT_MSG1 2 +#define IOPMU_OUTBOUND_INT_DOORBELL 4 +#define IOPMU_OUTBOUND_INT_POSTQUEUE 8 +#define IOPMU_OUTBOUND_INT_PCI 0x10 + +#define IOPMU_INBOUND_INT_MSG0 1 +#define IOPMU_INBOUND_INT_MSG1 2 +#define IOPMU_INBOUND_INT_DOORBELL 4 +#define IOPMU_INBOUND_INT_ERROR 8 +#define IOPMU_INBOUND_INT_POSTQUEUE 0x10 + +#define MVIOP_QUEUE_LEN 512 + +struct hpt_iopmu_mv { + __le32 inbound_head; + __le32 inbound_tail; + __le32 outbound_head; + __le32 outbound_tail; + __le32 inbound_msg; + __le32 outbound_msg; + __le32 reserve[10]; + __le64 inbound_q[MVIOP_QUEUE_LEN]; + __le64 outbound_q[MVIOP_QUEUE_LEN]; +}; + +struct hpt_iopmv_regs { + __le32 reserved[0x20400 / 4]; + __le32 inbound_doorbell; + __le32 inbound_intmask; + __le32 outbound_doorbell; + __le32 outbound_intmask; +}; + +#pragma pack(1) +struct hpt_iopmu_mvfrey { + __le32 reserved0[(0x4000 - 0) / 4]; + __le32 inbound_base; + __le32 inbound_base_high; + __le32 reserved1[(0x4018 - 0x4008) / 4]; + __le32 inbound_write_ptr; + __le32 reserved2[(0x402c - 0x401c) / 4]; + __le32 inbound_conf_ctl; + __le32 reserved3[(0x4050 - 0x4030) / 4]; + __le32 outbound_base; + __le32 outbound_base_high; + __le32 outbound_shadow_base; + __le32 outbound_shadow_base_high; + __le32 reserved4[(0x4088 - 0x4060) / 4]; + __le32 isr_cause; + __le32 isr_enable; + __le32 reserved5[(0x1020c - 0x4090) / 4]; + __le32 pcie_f0_int_enable; + __le32 reserved6[(0x10400 - 0x10210) / 4]; + __le32 f0_to_cpu_msg_a; + __le32 reserved7[(0x10420 - 0x10404) / 4]; + __le32 cpu_to_f0_msg_a; + __le32 reserved8[(0x10480 - 0x10424) / 4]; + __le32 f0_doorbell; + __le32 f0_doorbell_enable; +}; + +struct mvfrey_inlist_entry { + dma_addr_t addr; + __le32 intrfc_len; + __le32 reserved; +}; + +struct mvfrey_outlist_entry { + __le32 val; +}; +#pragma pack() + +#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full)) +#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4 + +#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff +#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1 +#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2 + +#define MVIOP_MU_INBOUND_INT_MSG 1 +#define MVIOP_MU_INBOUND_INT_POSTQUEUE 2 +#define MVIOP_MU_OUTBOUND_INT_MSG 1 +#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2 + +#define CL_POINTER_TOGGLE 0x00004000 +#define CPU_TO_F0_DRBL_MSG_BIT 0x02000000 + +enum hpt_iopmu_message { + /* host-to-iop messages */ + IOPMU_INBOUND_MSG0_NOP = 0, + IOPMU_INBOUND_MSG0_RESET, + IOPMU_INBOUND_MSG0_FLUSH, + IOPMU_INBOUND_MSG0_SHUTDOWN, + IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, + IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, + IOPMU_INBOUND_MSG0_RESET_COMM, + IOPMU_INBOUND_MSG0_MAX = 0xff, + /* iop-to-host messages */ + IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100, + IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff, + IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200, + IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff, + IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300, + IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff, +}; + +struct hpt_iop_request_header { + __le32 size; + __le32 type; + __le32 flags; + __le32 result; + __le32 context; /* host context */ + __le32 context_hi32; +}; + +#define IOP_REQUEST_FLAG_SYNC_REQUEST 1 +#define IOP_REQUEST_FLAG_BIST_REQUEST 2 +#define IOP_REQUEST_FLAG_REMAPPED 4 +#define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8 +#define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */ + +enum hpt_iop_request_type { + IOP_REQUEST_TYPE_GET_CONFIG = 0, + IOP_REQUEST_TYPE_SET_CONFIG, + IOP_REQUEST_TYPE_BLOCK_COMMAND, + IOP_REQUEST_TYPE_SCSI_COMMAND, + IOP_REQUEST_TYPE_IOCTL_COMMAND, + IOP_REQUEST_TYPE_MAX +}; + +enum hpt_iop_result_type { + IOP_RESULT_PENDING = 0, + IOP_RESULT_SUCCESS, + IOP_RESULT_FAIL, + IOP_RESULT_BUSY, + IOP_RESULT_RESET, + IOP_RESULT_INVALID_REQUEST, + IOP_RESULT_BAD_TARGET, + IOP_RESULT_CHECK_CONDITION, +}; + +struct hpt_iop_request_get_config { + struct hpt_iop_request_header header; + __le32 interface_version; + __le32 firmware_version; + __le32 max_requests; + __le32 request_size; + __le32 max_sg_count; + __le32 data_transfer_length; + __le32 alignment_mask; + __le32 max_devices; + __le32 sdram_size; +}; + +struct hpt_iop_request_set_config { + struct hpt_iop_request_header header; + __le32 iop_id; + __le16 vbus_id; + __le16 max_host_request_size; + __le32 reserve[6]; +}; + +struct hpt_iopsg { + __le32 size; + __le32 eot; /* non-zero: end of table */ + __le64 pci_address; +}; + +struct hpt_iop_request_block_command { + struct hpt_iop_request_header header; + u8 channel; + u8 target; + u8 lun; + u8 pad1; + __le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */ + __le16 sectors; + __le64 lba; + struct hpt_iopsg sg_list[1]; +}; + +#define IOP_BLOCK_COMMAND_READ 1 +#define IOP_BLOCK_COMMAND_WRITE 2 +#define IOP_BLOCK_COMMAND_VERIFY 3 +#define IOP_BLOCK_COMMAND_FLUSH 4 +#define IOP_BLOCK_COMMAND_SHUTDOWN 5 + +struct hpt_iop_request_scsi_command { + struct hpt_iop_request_header header; + u8 channel; + u8 target; + u8 lun; + u8 pad1; + u8 cdb[16]; + __le32 dataxfer_length; + struct hpt_iopsg sg_list[]; +}; + +struct hpt_iop_request_ioctl_command { + struct hpt_iop_request_header header; + __le32 ioctl_code; + __le32 inbuf_size; + __le32 outbuf_size; + __le32 bytes_returned; + u8 buf[]; + /* out data should be put at buf[(inbuf_size+3)&~3] */ +}; + +#define HPTIOP_MAX_REQUESTS 256u + +struct hptiop_request { + struct hptiop_request *next; + void *req_virt; + u32 req_shifted_phy; + struct scsi_cmnd *scp; + int index; +}; + +struct hpt_cmd_priv { + int mapped; + int sgcnt; + dma_addr_t dma_handle; +}; + +#define HPT_SCP(scp) ((struct hpt_cmd_priv *)scsi_cmd_priv(scp)) + +enum hptiop_family { + UNKNOWN_BASED_IOP, + INTEL_BASED_IOP, + MV_BASED_IOP, + MVFREY_BASED_IOP +} ; + +struct hptiop_hba { + struct hptiop_adapter_ops *ops; + union { + struct { + struct hpt_iopmu_itl __iomem *iop; + void __iomem *plx; + } itl; + struct { + struct hpt_iopmv_regs *regs; + struct hpt_iopmu_mv __iomem *mu; + void *internal_req; + dma_addr_t internal_req_phy; + } mv; + struct { + struct hpt_iop_request_get_config __iomem *config; + struct hpt_iopmu_mvfrey __iomem *mu; + + int internal_mem_size; + struct hptiop_request internal_req; + int list_count; + struct mvfrey_inlist_entry *inlist; + dma_addr_t inlist_phy; + __le32 inlist_wptr; + struct mvfrey_outlist_entry *outlist; + dma_addr_t outlist_phy; + __le32 *outlist_cptr; /* copy pointer shadow */ + dma_addr_t outlist_cptr_phy; + __le32 outlist_rptr; + } mvfrey; + } u; + + struct Scsi_Host *host; + struct pci_dev *pcidev; + + /* IOP config info */ + u32 interface_version; + u32 firmware_version; + u32 sdram_size; + u32 max_devices; + u32 max_requests; + u32 max_request_size; + u32 max_sg_descriptors; + + u32 req_size; /* host-allocated request buffer size */ + + u32 iopintf_v2: 1; + u32 initialized: 1; + u32 msg_done: 1; + + struct hptiop_request * req_list; + struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; + + /* used to free allocated dma area */ + void *dma_coherent[HPTIOP_MAX_REQUESTS]; + dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS]; + + atomic_t reset_count; + atomic_t resetting; + + wait_queue_head_t reset_wq; + wait_queue_head_t ioctl_wq; +}; + +struct hpt_ioctl_k { + struct hptiop_hba * hba; + u32 ioctl_code; + u32 inbuf_size; + u32 outbuf_size; + void *inbuf; + void *outbuf; + u32 *bytes_returned; + void (*done)(struct hpt_ioctl_k *); + int result; /* HPT_IOCTL_RESULT_ */ +}; + +struct hptiop_adapter_ops { + enum hptiop_family family; + int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec); + int (*internal_memalloc)(struct hptiop_hba *hba); + int (*internal_memfree)(struct hptiop_hba *hba); + int (*map_pci_bar)(struct hptiop_hba *hba); + void (*unmap_pci_bar)(struct hptiop_hba *hba); + void (*enable_intr)(struct hptiop_hba *hba); + void (*disable_intr)(struct hptiop_hba *hba); + int (*get_config)(struct hptiop_hba *hba, + struct hpt_iop_request_get_config *config); + int (*set_config)(struct hptiop_hba *hba, + struct hpt_iop_request_set_config *config); + int (*iop_intr)(struct hptiop_hba *hba); + void (*post_msg)(struct hptiop_hba *hba, u32 msg); + void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req); + int hw_dma_bit_mask; + int (*reset_comm)(struct hptiop_hba *hba); + __le64 host_phy_flag; +}; + +#define HPT_IOCTL_RESULT_OK 0 +#define HPT_IOCTL_RESULT_FAILED (-1) + +#if 0 +#define dprintk(fmt, args...) do { printk(fmt, ##args); } while(0) +#else +#define dprintk(fmt, args...) +#endif + +#endif diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile new file mode 100644 index 000000000..5eb1cb1a0 --- /dev/null +++ b/drivers/scsi/ibmvscsi/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o +obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c new file mode 100644 index 000000000..c98346e46 --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -0,0 +1,6503 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter + * + * Written By: Brian King , IBM Corporation + * + * Copyright (C) IBM Corporation, 2008 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ibmvfc.h" + +static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; +static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; +static u64 max_lun = IBMVFC_MAX_LUN; +static unsigned int max_targets = IBMVFC_MAX_TARGETS; +static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; +static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; +static unsigned int ibmvfc_debug = IBMVFC_DEBUG; +static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; +static unsigned int cls3_error = IBMVFC_CLS3_ERROR; +static unsigned int mq_enabled = IBMVFC_MQ; +static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES; +static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS; +static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ; +static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M; + +static LIST_HEAD(ibmvfc_head); +static DEFINE_SPINLOCK(ibmvfc_driver_lock); +static struct scsi_transport_template *ibmvfc_transport_template; + +MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver"); +MODULE_AUTHOR("Brian King "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IBMVFC_DRIVER_VERSION); + +module_param_named(mq, mq_enabled, uint, S_IRUGO); +MODULE_PARM_DESC(mq, "Enable multiqueue support. " + "[Default=" __stringify(IBMVFC_MQ) "]"); +module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO); +MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. " + "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]"); +module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO); +MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. " + "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]"); +module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO); +MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. " + "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]"); +module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO); +MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. " + "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]"); + +module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. " + "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]"); +module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(default_timeout, + "Default timeout in seconds for initialization and EH commands. " + "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]"); +module_param_named(max_requests, max_requests, uint, S_IRUGO); +MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " + "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); +module_param_named(max_lun, max_lun, ullong, S_IRUGO); +MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. " + "[Default=" __stringify(IBMVFC_MAX_LUN) "]"); +module_param_named(max_targets, max_targets, uint, S_IRUGO); +MODULE_PARM_DESC(max_targets, "Maximum allowed targets. " + "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]"); +module_param_named(disc_threads, disc_threads, uint, S_IRUGO); +MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. " + "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]"); +module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Enable driver debug information. " + "[Default=" __stringify(IBMVFC_DEBUG) "]"); +module_param_named(log_level, log_level, uint, 0); +MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " + "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); +module_param_named(cls3_error, cls3_error, uint, 0); +MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. " + "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]"); + +static const struct { + u16 status; + u16 error; + u8 result; + u8 retry; + int log; + char *name; +} cmd_status [] = { + { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, + + { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" }, + + { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" }, + { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" }, + { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" }, + { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" }, + { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" }, + { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" }, + { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" }, + { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" }, + { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" }, + { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" }, + { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, + + { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, + { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." }, +}; + +static void ibmvfc_npiv_login(struct ibmvfc_host *); +static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); +static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); +static void ibmvfc_tgt_query_target(struct ibmvfc_target *); +static void ibmvfc_npiv_logout(struct ibmvfc_host *); +static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); +static void ibmvfc_tgt_move_login(struct ibmvfc_target *); + +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *); + +static const char *unknown_error = "unknown error"; + +static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba, + unsigned long length, unsigned long *cookie, + unsigned long *irq) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length); + *cookie = retbuf[0]; + *irq = retbuf[1]; + + return rc; +} + +static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags) +{ + u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities); + + return (host_caps & cap_flags) ? 1 : 0; +} + +static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost, + struct ibmvfc_cmd *vfc_cmd) +{ + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + return &vfc_cmd->v2.iu; + else + return &vfc_cmd->v1.iu; +} + +static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost, + struct ibmvfc_cmd *vfc_cmd) +{ + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + return &vfc_cmd->v2.rsp; + else + return &vfc_cmd->v1.rsp; +} + +#ifdef CONFIG_SCSI_IBMVFC_TRACE +/** + * ibmvfc_trc_start - Log a start trace entry + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_trc_start(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd; + struct ibmvfc_mad_common *mad = &evt->iu.mad_common; + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); + struct ibmvfc_trace_entry *entry; + int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK; + + entry = &vhost->trace[index]; + entry->evt = evt; + entry->time = jiffies; + entry->fmt = evt->crq.format; + entry->type = IBMVFC_TRC_START; + + switch (entry->fmt) { + case IBMVFC_CMD_FORMAT: + entry->op_code = iu->cdb[0]; + entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); + entry->lun = scsilun_to_int(&iu->lun); + entry->tmf_flags = iu->tmf_flags; + entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len); + break; + case IBMVFC_MAD_FORMAT: + entry->op_code = be32_to_cpu(mad->opcode); + break; + default: + break; + } +} + +/** + * ibmvfc_trc_end - Log an end trace entry + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_trc_end(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; + struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common; + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); + struct ibmvfc_trace_entry *entry; + int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK; + + entry = &vhost->trace[index]; + entry->evt = evt; + entry->time = jiffies; + entry->fmt = evt->crq.format; + entry->type = IBMVFC_TRC_END; + + switch (entry->fmt) { + case IBMVFC_CMD_FORMAT: + entry->op_code = iu->cdb[0]; + entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); + entry->lun = scsilun_to_int(&iu->lun); + entry->tmf_flags = iu->tmf_flags; + entry->u.end.status = be16_to_cpu(vfc_cmd->status); + entry->u.end.error = be16_to_cpu(vfc_cmd->error); + entry->u.end.fcp_rsp_flags = rsp->flags; + entry->u.end.rsp_code = rsp->data.info.rsp_code; + entry->u.end.scsi_status = rsp->scsi_status; + break; + case IBMVFC_MAD_FORMAT: + entry->op_code = be32_to_cpu(mad->opcode); + entry->u.end.status = be16_to_cpu(mad->status); + break; + default: + break; + + } +} + +#else +#define ibmvfc_trc_start(evt) do { } while (0) +#define ibmvfc_trc_end(evt) do { } while (0) +#endif + +/** + * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response + * @status: status / error class + * @error: error + * + * Return value: + * index into cmd_status / -EINVAL on failure + **/ +static int ibmvfc_get_err_index(u16 status, u16 error) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cmd_status); i++) + if ((cmd_status[i].status & status) == cmd_status[i].status && + cmd_status[i].error == error) + return i; + + return -EINVAL; +} + +/** + * ibmvfc_get_cmd_error - Find the error description for the fcp response + * @status: status / error class + * @error: error + * + * Return value: + * error description string + **/ +static const char *ibmvfc_get_cmd_error(u16 status, u16 error) +{ + int rc = ibmvfc_get_err_index(status, error); + if (rc >= 0) + return cmd_status[rc].name; + return unknown_error; +} + +/** + * ibmvfc_get_err_result - Find the scsi status to return for the fcp response + * @vhost: ibmvfc host struct + * @vfc_cmd: ibmvfc command struct + * + * Return value: + * SCSI result value to return for completed command + **/ +static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd) +{ + int err; + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); + int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len); + + if ((rsp->flags & FCP_RSP_LEN_VALID) && + ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || + rsp->data.info.rsp_code)) + return DID_ERROR << 16; + + err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); + if (err >= 0) + return rsp->scsi_status | (cmd_status[err].result << 16); + return rsp->scsi_status | (DID_ERROR << 16); +} + +/** + * ibmvfc_retry_cmd - Determine if error status is retryable + * @status: status / error class + * @error: error + * + * Return value: + * 1 if error should be retried / 0 if it should not + **/ +static int ibmvfc_retry_cmd(u16 status, u16 error) +{ + int rc = ibmvfc_get_err_index(status, error); + + if (rc >= 0) + return cmd_status[rc].retry; + return 1; +} + +static const char *unknown_fc_explain = "unknown fc explain"; + +static const struct { + u16 fc_explain; + char *name; +} ls_explain [] = { + { 0x00, "no additional explanation" }, + { 0x01, "service parameter error - options" }, + { 0x03, "service parameter error - initiator control" }, + { 0x05, "service parameter error - recipient control" }, + { 0x07, "service parameter error - received data field size" }, + { 0x09, "service parameter error - concurrent seq" }, + { 0x0B, "service parameter error - credit" }, + { 0x0D, "invalid N_Port/F_Port_Name" }, + { 0x0E, "invalid node/Fabric Name" }, + { 0x0F, "invalid common service parameters" }, + { 0x11, "invalid association header" }, + { 0x13, "association header required" }, + { 0x15, "invalid originator S_ID" }, + { 0x17, "invalid OX_ID-RX-ID combination" }, + { 0x19, "command (request) already in progress" }, + { 0x1E, "N_Port Login requested" }, + { 0x1F, "Invalid N_Port_ID" }, +}; + +static const struct { + u16 fc_explain; + char *name; +} gs_explain [] = { + { 0x00, "no additional explanation" }, + { 0x01, "port identifier not registered" }, + { 0x02, "port name not registered" }, + { 0x03, "node name not registered" }, + { 0x04, "class of service not registered" }, + { 0x06, "initial process associator not registered" }, + { 0x07, "FC-4 TYPEs not registered" }, + { 0x08, "symbolic port name not registered" }, + { 0x09, "symbolic node name not registered" }, + { 0x0A, "port type not registered" }, + { 0xF0, "authorization exception" }, + { 0xF1, "authentication exception" }, + { 0xF2, "data base full" }, + { 0xF3, "data base empty" }, + { 0xF4, "processing request" }, + { 0xF5, "unable to verify connection" }, + { 0xF6, "devices not in a common zone" }, +}; + +/** + * ibmvfc_get_ls_explain - Return the FC Explain description text + * @status: FC Explain status + * + * Returns: + * error string + **/ +static const char *ibmvfc_get_ls_explain(u16 status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ls_explain); i++) + if (ls_explain[i].fc_explain == status) + return ls_explain[i].name; + + return unknown_fc_explain; +} + +/** + * ibmvfc_get_gs_explain - Return the FC Explain description text + * @status: FC Explain status + * + * Returns: + * error string + **/ +static const char *ibmvfc_get_gs_explain(u16 status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(gs_explain); i++) + if (gs_explain[i].fc_explain == status) + return gs_explain[i].name; + + return unknown_fc_explain; +} + +static const struct { + enum ibmvfc_fc_type fc_type; + char *name; +} fc_type [] = { + { IBMVFC_FABRIC_REJECT, "fabric reject" }, + { IBMVFC_PORT_REJECT, "port reject" }, + { IBMVFC_LS_REJECT, "ELS reject" }, + { IBMVFC_FABRIC_BUSY, "fabric busy" }, + { IBMVFC_PORT_BUSY, "port busy" }, + { IBMVFC_BASIC_REJECT, "basic reject" }, +}; + +static const char *unknown_fc_type = "unknown fc type"; + +/** + * ibmvfc_get_fc_type - Return the FC Type description text + * @status: FC Type error status + * + * Returns: + * error string + **/ +static const char *ibmvfc_get_fc_type(u16 status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fc_type); i++) + if (fc_type[i].fc_type == status) + return fc_type[i].name; + + return unknown_fc_type; +} + +/** + * ibmvfc_set_tgt_action - Set the next init action for the target + * @tgt: ibmvfc target struct + * @action: action to perform + * + * Returns: + * 0 if action changed / non-zero if not changed + **/ +static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, + enum ibmvfc_target_action action) +{ + int rc = -EINVAL; + + switch (tgt->action) { + case IBMVFC_TGT_ACTION_LOGOUT_RPORT: + if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT || + action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT: + if (action == IBMVFC_TGT_ACTION_DEL_RPORT || + action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT: + if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT: + if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_DEL_RPORT: + if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_DELETED_RPORT: + break; + default: + tgt->action = action; + rc = 0; + break; + } + + if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT) + tgt->add_rport = 0; + + return rc; +} + +/** + * ibmvfc_set_host_state - Set the state for the host + * @vhost: ibmvfc host struct + * @state: state to set host to + * + * Returns: + * 0 if state changed / non-zero if not changed + **/ +static int ibmvfc_set_host_state(struct ibmvfc_host *vhost, + enum ibmvfc_host_state state) +{ + int rc = 0; + + switch (vhost->state) { + case IBMVFC_HOST_OFFLINE: + rc = -EINVAL; + break; + default: + vhost->state = state; + break; + } + + return rc; +} + +/** + * ibmvfc_set_host_action - Set the next init action for the host + * @vhost: ibmvfc host struct + * @action: action to perform + * + **/ +static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, + enum ibmvfc_host_action action) +{ + switch (action) { + case IBMVFC_HOST_ACTION_ALLOC_TGTS: + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_LOGO_WAIT: + if (vhost->action == IBMVFC_HOST_ACTION_LOGO) + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_INIT_WAIT: + if (vhost->action == IBMVFC_HOST_ACTION_INIT) + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_QUERY: + switch (vhost->action) { + case IBMVFC_HOST_ACTION_INIT_WAIT: + case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + vhost->action = action; + break; + default: + break; + } + break; + case IBMVFC_HOST_ACTION_TGT_INIT: + if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_REENABLE: + case IBMVFC_HOST_ACTION_RESET: + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_INIT: + case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_LOGO: + case IBMVFC_HOST_ACTION_QUERY_TGTS: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + case IBMVFC_HOST_ACTION_NONE: + default: + switch (vhost->action) { + case IBMVFC_HOST_ACTION_RESET: + case IBMVFC_HOST_ACTION_REENABLE: + break; + default: + vhost->action = action; + break; + } + break; + } +} + +/** + * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login) + * @vhost: ibmvfc host struct + * + * Return value: + * nothing + **/ +static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) +{ + if (vhost->action == IBMVFC_HOST_ACTION_NONE && + vhost->state == IBMVFC_ACTIVE) { + if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + } + } else + vhost->reinit = 1; + + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_del_tgt - Schedule cleanup and removal of the target + * @tgt: ibmvfc target struct + **/ +static void ibmvfc_del_tgt(struct ibmvfc_target *tgt) +{ + if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) { + tgt->job_step = ibmvfc_tgt_implicit_logout_and_del; + tgt->init_retries = 0; + } + wake_up(&tgt->vhost->work_wait_q); +} + +/** + * ibmvfc_link_down - Handle a link down event from the adapter + * @vhost: ibmvfc host struct + * @state: ibmvfc host state to enter + * + **/ +static void ibmvfc_link_down(struct ibmvfc_host *vhost, + enum ibmvfc_host_state state) +{ + struct ibmvfc_target *tgt; + + ENTER; + scsi_block_requests(vhost->host); + list_for_each_entry(tgt, &vhost->targets, queue) + ibmvfc_del_tgt(tgt); + ibmvfc_set_host_state(vhost, state); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); + vhost->events_to_log |= IBMVFC_AE_LINKDOWN; + wake_up(&vhost->work_wait_q); + LEAVE; +} + +/** + * ibmvfc_init_host - Start host initialization + * @vhost: ibmvfc host struct + * + * Return value: + * nothing + **/ +static void ibmvfc_init_host(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { + if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { + dev_err(vhost->dev, + "Host initialization retries exceeded. Taking adapter offline\n"); + ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); + return; + } + } + + if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE); + vhost->async_crq.cur = 0; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (vhost->client_migrated) + tgt->need_login = 1; + else + ibmvfc_del_tgt(tgt); + } + + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + vhost->job_step = ibmvfc_npiv_login; + wake_up(&vhost->work_wait_q); + } +} + +/** + * ibmvfc_send_crq - Send a CRQ + * @vhost: ibmvfc host struct + * @word1: the first 64 bits of the data + * @word2: the second 64 bits of the data + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2) +{ + struct vio_dev *vdev = to_vio_dev(vhost->dev); + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); +} + +static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1, + u64 word2, u64 word3, u64 word4) +{ + struct vio_dev *vdev = to_vio_dev(vhost->dev); + + return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie, + word1, word2, word3, word4); +} + +/** + * ibmvfc_send_crq_init - Send a CRQ init message + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost) +{ + ibmvfc_dbg(vhost, "Sending CRQ init\n"); + return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0); +} + +/** + * ibmvfc_send_crq_init_complete - Send a CRQ init complete message + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost) +{ + ibmvfc_dbg(vhost, "Sending CRQ init complete\n"); + return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0); +} + +/** + * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host + * @vhost: ibmvfc host who owns the event pool + * @queue: ibmvfc queue struct + * @size: pool size + * + * Returns zero on success. + **/ +static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue, + unsigned int size) +{ + int i; + struct ibmvfc_event_pool *pool = &queue->evt_pool; + + ENTER; + if (!size) + return 0; + + pool->size = size; + pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL); + if (!pool->events) + return -ENOMEM; + + pool->iu_storage = dma_alloc_coherent(vhost->dev, + size * sizeof(*pool->iu_storage), + &pool->iu_token, 0); + + if (!pool->iu_storage) { + kfree(pool->events); + return -ENOMEM; + } + + INIT_LIST_HEAD(&queue->sent); + INIT_LIST_HEAD(&queue->free); + spin_lock_init(&queue->l_lock); + + for (i = 0; i < size; ++i) { + struct ibmvfc_event *evt = &pool->events[i]; + + /* + * evt->active states + * 1 = in flight + * 0 = being completed + * -1 = free/freed + */ + atomic_set(&evt->active, -1); + atomic_set(&evt->free, 1); + evt->crq.valid = 0x80; + evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); + evt->xfer_iu = pool->iu_storage + i; + evt->vhost = vhost; + evt->queue = queue; + evt->ext_list = NULL; + list_add_tail(&evt->queue_list, &queue->free); + } + + LEAVE; + return 0; +} + +/** + * ibmvfc_free_event_pool - Frees memory of the event pool of a host + * @vhost: ibmvfc host who owns the event pool + * @queue: ibmvfc queue struct + * + **/ +static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue) +{ + int i; + struct ibmvfc_event_pool *pool = &queue->evt_pool; + + ENTER; + for (i = 0; i < pool->size; ++i) { + list_del(&pool->events[i].queue_list); + BUG_ON(atomic_read(&pool->events[i].free) != 1); + if (pool->events[i].ext_list) + dma_pool_free(vhost->sg_pool, + pool->events[i].ext_list, + pool->events[i].ext_list_token); + } + + kfree(pool->events); + dma_free_coherent(vhost->dev, + pool->size * sizeof(*pool->iu_storage), + pool->iu_storage, pool->iu_token); + LEAVE; +} + +/** + * ibmvfc_free_queue - Deallocate queue + * @vhost: ibmvfc host struct + * @queue: ibmvfc queue struct + * + * Unmaps dma and deallocates page for messages + **/ +static void ibmvfc_free_queue(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue) +{ + struct device *dev = vhost->dev; + + dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)queue->msgs.handle); + queue->msgs.handle = NULL; + + ibmvfc_free_event_pool(vhost, queue); +} + +/** + * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ + * @vhost: ibmvfc host struct + * + * Frees irq, deallocates a page for messages, unmaps dma, and unregisters + * the crq with the hypervisor. + **/ +static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) +{ + long rc = 0; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + struct ibmvfc_queue *crq = &vhost->crq; + + ibmvfc_dbg(vhost, "Releasing CRQ\n"); + free_irq(vdev->irq, vhost); + tasklet_kill(&vhost->tasklet); + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; + + ibmvfc_free_queue(vhost, crq); +} + +/** + * ibmvfc_reenable_crq_queue - reenables the CRQ + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) +{ + int rc = 0; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + unsigned long flags; + + ibmvfc_dereg_sub_crqs(vhost); + + /* Re-enable the CRQ */ + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) + dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc); + + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); + vhost->do_enquiry = 1; + vhost->using_channels = 0; + spin_unlock(vhost->crq.q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + ibmvfc_reg_sub_crqs(vhost); + + return rc; +} + +/** + * ibmvfc_reset_crq - resets a crq after a failure + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) +{ + int rc = 0; + unsigned long flags; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + struct ibmvfc_queue *crq = &vhost->crq; + + ibmvfc_dereg_sub_crqs(vhost); + + /* Close the CRQ */ + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); + vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; + vhost->do_enquiry = 1; + vhost->using_channels = 0; + + /* Clean out the queue */ + memset(crq->msgs.crq, 0, PAGE_SIZE); + crq->cur = 0; + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_CLOSED) + /* Adapter is good, but other end is not ready */ + dev_warn(vhost->dev, "Partner adapter not ready\n"); + else if (rc != 0) + dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); + + spin_unlock(vhost->crq.q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + ibmvfc_reg_sub_crqs(vhost); + + return rc; +} + +/** + * ibmvfc_valid_event - Determines if event is valid. + * @pool: event_pool that contains the event + * @evt: ibmvfc event to be checked for validity + * + * Return value: + * 1 if event is valid / 0 if event is not valid + **/ +static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, + struct ibmvfc_event *evt) +{ + int index = evt - pool->events; + if (index < 0 || index >= pool->size) /* outside of bounds */ + return 0; + if (evt != pool->events + index) /* unaligned */ + return 0; + return 1; +} + +/** + * ibmvfc_free_event - Free the specified event + * @evt: ibmvfc_event to be freed + * + **/ +static void ibmvfc_free_event(struct ibmvfc_event *evt) +{ + struct ibmvfc_event_pool *pool = &evt->queue->evt_pool; + unsigned long flags; + + BUG_ON(!ibmvfc_valid_event(pool, evt)); + BUG_ON(atomic_inc_return(&evt->free) != 1); + BUG_ON(atomic_dec_and_test(&evt->active)); + + spin_lock_irqsave(&evt->queue->l_lock, flags); + list_add_tail(&evt->queue_list, &evt->queue->free); + if (evt->eh_comp) + complete(evt->eh_comp); + spin_unlock_irqrestore(&evt->queue->l_lock, flags); +} + +/** + * ibmvfc_scsi_eh_done - EH done function for queuecommand commands + * @evt: ibmvfc event struct + * + * This function does not setup any error status, that must be done + * before this function gets called. + **/ +static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt) +{ + struct scsi_cmnd *cmnd = evt->cmnd; + + if (cmnd) { + scsi_dma_unmap(cmnd); + scsi_done(cmnd); + } + + ibmvfc_free_event(evt); +} + +/** + * ibmvfc_complete_purge - Complete failed command list + * @purge_list: list head of failed commands + * + * This function runs completions on commands to fail as a result of a + * host reset or platform migration. + **/ +static void ibmvfc_complete_purge(struct list_head *purge_list) +{ + struct ibmvfc_event *evt, *pos; + + list_for_each_entry_safe(evt, pos, purge_list, queue_list) { + list_del(&evt->queue_list); + ibmvfc_trc_end(evt); + evt->done(evt); + } +} + +/** + * ibmvfc_fail_request - Fail request with specified error code + * @evt: ibmvfc event struct + * @error_code: error code to fail request with + * + * Return value: + * none + **/ +static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) +{ + /* + * Anything we are failing should still be active. Otherwise, it + * implies we already got a response for the command and are doing + * something bad like double completing it. + */ + BUG_ON(!atomic_dec_and_test(&evt->active)); + if (evt->cmnd) { + evt->cmnd->result = (error_code << 16); + evt->done = ibmvfc_scsi_eh_done; + } else + evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); + + del_timer(&evt->timer); +} + +/** + * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests + * @vhost: ibmvfc host struct + * @error_code: error code to fail requests with + * + * Return value: + * none + **/ +static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) +{ + struct ibmvfc_event *evt, *pos; + struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs; + unsigned long flags; + int hwqs = 0; + int i; + + if (vhost->using_channels) + hwqs = vhost->scsi_scrqs.active_queues; + + ibmvfc_dbg(vhost, "Purging all requests\n"); + spin_lock_irqsave(&vhost->crq.l_lock, flags); + list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list) + ibmvfc_fail_request(evt, error_code); + list_splice_init(&vhost->crq.sent, &vhost->purge); + spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + + for (i = 0; i < hwqs; i++) { + spin_lock_irqsave(queues[i].q_lock, flags); + spin_lock(&queues[i].l_lock); + list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list) + ibmvfc_fail_request(evt, error_code); + list_splice_init(&queues[i].sent, &vhost->purge); + spin_unlock(&queues[i].l_lock); + spin_unlock_irqrestore(queues[i].q_lock, flags); + } +} + +/** + * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ + * @vhost: struct ibmvfc host to reset + **/ +static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) +{ + ibmvfc_purge_requests(vhost, DID_ERROR); + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); +} + +/** + * __ibmvfc_reset_host - Reset the connection to the server (no locking) + * @vhost: struct ibmvfc host to reset + **/ +static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +{ + if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT && + !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO); + vhost->job_step = ibmvfc_npiv_logout; + wake_up(&vhost->work_wait_q); + } else + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_reset_host - Reset the connection to the server + * @vhost: ibmvfc host struct + **/ +static void ibmvfc_reset_host(struct ibmvfc_host *vhost) +{ + unsigned long flags; + + spin_lock_irqsave(vhost->host->host_lock, flags); + __ibmvfc_reset_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_retry_host_init - Retry host initialization if allowed + * @vhost: ibmvfc host struct + * + * Returns: 1 if init will be retried / 0 if not + * + **/ +static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) +{ + int retry = 0; + + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { + vhost->delay_init = 1; + if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { + dev_err(vhost->dev, + "Host initialization retries exceeded. Taking adapter offline\n"); + ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); + } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) + __ibmvfc_reset_host(vhost); + else { + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + retry = 1; + } + } + + wake_up(&vhost->work_wait_q); + return retry; +} + +/** + * __ibmvfc_get_target - Find the specified scsi_target (no locking) + * @starget: scsi target struct + * + * Return value: + * ibmvfc_target struct / NULL if not found + **/ +static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ibmvfc_host *vhost = shost_priv(shost); + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->target_id == starget->id) { + kref_get(&tgt->kref); + return tgt; + } + return NULL; +} + +/** + * ibmvfc_get_target - Find the specified scsi_target + * @starget: scsi target struct + * + * Return value: + * ibmvfc_target struct / NULL if not found + **/ +static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ibmvfc_target *tgt; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + tgt = __ibmvfc_get_target(starget); + spin_unlock_irqrestore(shost->host_lock, flags); + return tgt; +} + +/** + * ibmvfc_get_host_speed - Get host port speed + * @shost: scsi host struct + * + * Return value: + * none + **/ +static void ibmvfc_get_host_speed(struct Scsi_Host *shost) +{ + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + if (vhost->state == IBMVFC_ACTIVE) { + switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) { + case 1: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case 2: + fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + break; + case 4: + fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + break; + case 8: + fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + break; + case 10: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case 16: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; + default: + ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n", + be64_to_cpu(vhost->login_buf->resp.link_speed) / 100); + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } + } else + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * ibmvfc_get_host_port_state - Get host port state + * @shost: scsi host struct + * + * Return value: + * none + **/ +static void ibmvfc_get_host_port_state(struct Scsi_Host *shost) +{ + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + switch (vhost->state) { + case IBMVFC_INITIALIZING: + case IBMVFC_ACTIVE: + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + break; + case IBMVFC_LINK_DOWN: + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + break; + case IBMVFC_LINK_DEAD: + case IBMVFC_HOST_OFFLINE: + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + break; + case IBMVFC_HALTED: + fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED; + break; + case IBMVFC_NO_CRQ: + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + break; + default: + ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state); + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + break; + } + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout + * @rport: rport struct + * @timeout: timeout value + * + * Return value: + * none + **/ +static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} + +/** + * ibmvfc_release_tgt - Free memory allocated for a target + * @kref: kref struct + * + **/ +static void ibmvfc_release_tgt(struct kref *kref) +{ + struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref); + kfree(tgt); +} + +/** + * ibmvfc_get_starget_node_name - Get SCSI target's node name + * @starget: scsi target struct + * + * Return value: + * none + **/ +static void ibmvfc_get_starget_node_name(struct scsi_target *starget) +{ + struct ibmvfc_target *tgt = ibmvfc_get_target(starget); + fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0; + if (tgt) + kref_put(&tgt->kref, ibmvfc_release_tgt); +} + +/** + * ibmvfc_get_starget_port_name - Get SCSI target's port name + * @starget: scsi target struct + * + * Return value: + * none + **/ +static void ibmvfc_get_starget_port_name(struct scsi_target *starget) +{ + struct ibmvfc_target *tgt = ibmvfc_get_target(starget); + fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0; + if (tgt) + kref_put(&tgt->kref, ibmvfc_release_tgt); +} + +/** + * ibmvfc_get_starget_port_id - Get SCSI target's port ID + * @starget: scsi target struct + * + * Return value: + * none + **/ +static void ibmvfc_get_starget_port_id(struct scsi_target *starget) +{ + struct ibmvfc_target *tgt = ibmvfc_get_target(starget); + fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1; + if (tgt) + kref_put(&tgt->kref, ibmvfc_release_tgt); +} + +/** + * ibmvfc_wait_while_resetting - Wait while the host resets + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost) +{ + long timeout = wait_event_timeout(vhost->init_wait_q, + ((vhost->state == IBMVFC_ACTIVE || + vhost->state == IBMVFC_HOST_OFFLINE || + vhost->state == IBMVFC_LINK_DEAD) && + vhost->action == IBMVFC_HOST_ACTION_NONE), + (init_timeout * HZ)); + + return timeout ? 0 : -EIO; +} + +/** + * ibmvfc_issue_fc_host_lip - Re-initiate link initialization + * @shost: scsi host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost) +{ + struct ibmvfc_host *vhost = shost_priv(shost); + + dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n"); + ibmvfc_reset_host(vhost); + return ibmvfc_wait_while_resetting(vhost); +} + +/** + * ibmvfc_gather_partition_info - Gather info about the LPAR + * @vhost: ibmvfc host struct + * + * Return value: + * none + **/ +static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost) +{ + struct device_node *rootdn; + const char *name; + const unsigned int *num; + + rootdn = of_find_node_by_path("/"); + if (!rootdn) + return; + + name = of_get_property(rootdn, "ibm,partition-name", NULL); + if (name) + strncpy(vhost->partition_name, name, sizeof(vhost->partition_name)); + num = of_get_property(rootdn, "ibm,partition-no", NULL); + if (num) + vhost->partition_number = *num; + of_node_put(rootdn); +} + +/** + * ibmvfc_set_login_info - Setup info for NPIV login + * @vhost: ibmvfc host struct + * + * Return value: + * none + **/ +static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_login *login_info = &vhost->login_info; + struct ibmvfc_queue *async_crq = &vhost->async_crq; + struct device_node *of_node = vhost->dev->of_node; + const char *location; + + memset(login_info, 0, sizeof(*login_info)); + + login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); + login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9); + login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); + login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); + login_info->partition_num = cpu_to_be32(vhost->partition_number); + login_info->vfc_frame_version = cpu_to_be32(1); + login_info->fcp_version = cpu_to_be16(3); + login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT); + if (vhost->client_migrated) + login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED); + + login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ); + login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN); + + if (vhost->mq_enabled || vhost->using_channels) + login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS); + + login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token); + login_info->async.len = cpu_to_be32(async_crq->size * + sizeof(*async_crq->msgs.async)); + strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); + strncpy(login_info->device_name, + dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME); + + location = of_get_property(of_node, "ibm,loc-code", NULL); + location = location ? location : dev_name(vhost->dev); + strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME); +} + +/** + * ibmvfc_get_event - Gets the next free event in pool + * @queue: ibmvfc queue struct + * + * Returns a free event from the pool. + **/ +static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue) +{ + struct ibmvfc_event *evt; + unsigned long flags; + + spin_lock_irqsave(&queue->l_lock, flags); + if (list_empty(&queue->free)) { + ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id); + spin_unlock_irqrestore(&queue->l_lock, flags); + return NULL; + } + evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list); + atomic_set(&evt->free, 0); + list_del(&evt->queue_list); + spin_unlock_irqrestore(&queue->l_lock, flags); + return evt; +} + +/** + * ibmvfc_locked_done - Calls evt completion with host_lock held + * @evt: ibmvfc evt to complete + * + * All non-scsi command completion callbacks have the expectation that the + * host_lock is held. This callback is used by ibmvfc_init_event to wrap a + * MAD evt with the host_lock. + **/ +static void ibmvfc_locked_done(struct ibmvfc_event *evt) +{ + unsigned long flags; + + spin_lock_irqsave(evt->vhost->host->host_lock, flags); + evt->_done(evt); + spin_unlock_irqrestore(evt->vhost->host->host_lock, flags); +} + +/** + * ibmvfc_init_event - Initialize fields in an event struct that are always + * required. + * @evt: The event + * @done: Routine to call when the event is responded to + * @format: SRP or MAD format + **/ +static void ibmvfc_init_event(struct ibmvfc_event *evt, + void (*done) (struct ibmvfc_event *), u8 format) +{ + evt->cmnd = NULL; + evt->sync_iu = NULL; + evt->eh_comp = NULL; + evt->crq.format = format; + if (format == IBMVFC_CMD_FORMAT) + evt->done = done; + else { + evt->_done = done; + evt->done = ibmvfc_locked_done; + } + evt->hwq = 0; +} + +/** + * ibmvfc_map_sg_list - Initialize scatterlist + * @scmd: scsi command struct + * @nseg: number of scatterlist segments + * @md: memory descriptor list to initialize + **/ +static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg, + struct srp_direct_buf *md) +{ + int i; + struct scatterlist *sg; + + scsi_for_each_sg(scmd, sg, nseg, i) { + md[i].va = cpu_to_be64(sg_dma_address(sg)); + md[i].len = cpu_to_be32(sg_dma_len(sg)); + md[i].key = 0; + } +} + +/** + * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields + * @scmd: struct scsi_cmnd with the scatterlist + * @evt: ibmvfc event struct + * @vfc_cmd: vfc_cmd that contains the memory descriptor + * @dev: device for which to map dma memory + * + * Returns: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, + struct ibmvfc_event *evt, + struct ibmvfc_cmd *vfc_cmd, struct device *dev) +{ + + int sg_mapped; + struct srp_direct_buf *data = &vfc_cmd->ioba; + struct ibmvfc_host *vhost = dev_get_drvdata(dev); + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd); + + if (cls3_error) + vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR); + + sg_mapped = scsi_dma_map(scmd); + if (!sg_mapped) { + vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC); + return 0; + } else if (unlikely(sg_mapped < 0)) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n"); + return sg_mapped; + } + + if (scmd->sc_data_direction == DMA_TO_DEVICE) { + vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE); + iu->add_cdb_len |= IBMVFC_WRDATA; + } else { + vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ); + iu->add_cdb_len |= IBMVFC_RDDATA; + } + + if (sg_mapped == 1) { + ibmvfc_map_sg_list(scmd, sg_mapped, data); + return 0; + } + + vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST); + + if (!evt->ext_list) { + evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC, + &evt->ext_list_token); + + if (!evt->ext_list) { + scsi_dma_unmap(scmd); + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n"); + return -ENOMEM; + } + } + + ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list); + + data->va = cpu_to_be64(evt->ext_list_token); + data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf)); + data->key = 0; + return 0; +} + +/** + * ibmvfc_timeout - Internal command timeout handler + * @t: struct ibmvfc_event that timed out + * + * Called when an internally generated command times out + **/ +static void ibmvfc_timeout(struct timer_list *t) +{ + struct ibmvfc_event *evt = from_timer(evt, t, timer); + struct ibmvfc_host *vhost = evt->vhost; + dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt); + ibmvfc_reset_host(vhost); +} + +/** + * ibmvfc_send_event - Transforms event to u64 array and calls send_crq() + * @evt: event to be sent + * @vhost: ibmvfc host struct + * @timeout: timeout in seconds - 0 means do not time command + * + * Returns the value returned from ibmvfc_send_crq(). (Zero for success) + **/ +static int ibmvfc_send_event(struct ibmvfc_event *evt, + struct ibmvfc_host *vhost, unsigned long timeout) +{ + __be64 *crq_as_u64 = (__be64 *) &evt->crq; + unsigned long flags; + int rc; + + /* Copy the IU into the transfer area */ + *evt->xfer_iu = evt->iu; + if (evt->crq.format == IBMVFC_CMD_FORMAT) + evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt); + else if (evt->crq.format == IBMVFC_MAD_FORMAT) + evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt); + else + BUG(); + + timer_setup(&evt->timer, ibmvfc_timeout, 0); + + if (timeout) { + evt->timer.expires = jiffies + (timeout * HZ); + add_timer(&evt->timer); + } + + spin_lock_irqsave(&evt->queue->l_lock, flags); + list_add_tail(&evt->queue_list, &evt->queue->sent); + atomic_set(&evt->active, 1); + + mb(); + + if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT) + rc = ibmvfc_send_sub_crq(vhost, + evt->queue->vios_cookie, + be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1]), + 0, 0); + else + rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + if (rc) { + atomic_set(&evt->active, 0); + list_del(&evt->queue_list); + spin_unlock_irqrestore(&evt->queue->l_lock, flags); + del_timer(&evt->timer); + + /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. + * Firmware will send a CRQ with a transport event (0xFF) to + * tell this client what has happened to the transport. This + * will be handled in ibmvfc_handle_crq() + */ + if (rc == H_CLOSED) { + if (printk_ratelimit()) + dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n"); + if (evt->cmnd) + scsi_dma_unmap(evt->cmnd); + ibmvfc_free_event(evt); + return SCSI_MLQUEUE_HOST_BUSY; + } + + dev_err(vhost->dev, "Send error (rc=%d)\n", rc); + if (evt->cmnd) { + evt->cmnd->result = DID_ERROR << 16; + evt->done = ibmvfc_scsi_eh_done; + } else + evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR); + + evt->done(evt); + } else { + spin_unlock_irqrestore(&evt->queue->l_lock, flags); + ibmvfc_trc_start(evt); + } + + return 0; +} + +/** + * ibmvfc_log_error - Log an error for the failed command if appropriate + * @evt: ibmvfc event to log + * + **/ +static void ibmvfc_log_error(struct ibmvfc_event *evt) +{ + struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); + struct scsi_cmnd *cmnd = evt->cmnd; + const char *err = unknown_error; + int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); + int logerr = 0; + int rsp_code = 0; + + if (index >= 0) { + logerr = cmd_status[index].log; + err = cmd_status[index].name; + } + + if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1))) + return; + + if (rsp->flags & FCP_RSP_LEN_VALID) + rsp_code = rsp->data.info.rsp_code; + + scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) " + "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", + cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error), + rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); +} + +/** + * ibmvfc_relogin - Log back into the specified device + * @sdev: scsi device struct + * + **/ +static void ibmvfc_relogin(struct scsi_device *sdev) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_target *tgt; + unsigned long flags; + + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (rport == tgt->rport) { + ibmvfc_del_tgt(tgt); + break; + } + } + + ibmvfc_reinit_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_scsi_done - Handle responses from commands + * @evt: ibmvfc event to be handled + * + * Used as a callback when sending scsi cmds. + **/ +static void ibmvfc_scsi_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd); + struct scsi_cmnd *cmnd = evt->cmnd; + u32 rsp_len = 0; + u32 sense_len = be32_to_cpu(rsp->fcp_sense_len); + + if (cmnd) { + if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID) + scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid)); + else if (rsp->flags & FCP_RESID_UNDER) + scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid)); + else + scsi_set_resid(cmnd, 0); + + if (vfc_cmd->status) { + cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd); + + if (rsp->flags & FCP_RSP_LEN_VALID) + rsp_len = be32_to_cpu(rsp->fcp_rsp_len); + if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) + sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; + if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) + memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); + if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) && + (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED)) + ibmvfc_relogin(cmnd->device); + + if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) + cmnd->result = (DID_ERROR << 16); + + ibmvfc_log_error(evt); + } + + if (!cmnd->result && + (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow)) + cmnd->result = (DID_ERROR << 16); + + scsi_dma_unmap(cmnd); + scsi_done(cmnd); + } + + ibmvfc_free_event(evt); +} + +/** + * ibmvfc_host_chkready - Check if the host can accept commands + * @vhost: struct ibmvfc host + * + * Returns: + * 1 if host can accept command / 0 if not + **/ +static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost) +{ + int result = 0; + + switch (vhost->state) { + case IBMVFC_LINK_DEAD: + case IBMVFC_HOST_OFFLINE: + result = DID_NO_CONNECT << 16; + break; + case IBMVFC_NO_CRQ: + case IBMVFC_INITIALIZING: + case IBMVFC_HALTED: + case IBMVFC_LINK_DOWN: + result = DID_REQUEUE << 16; + break; + case IBMVFC_ACTIVE: + result = 0; + break; + } + + return result; +} + +static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd; + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); + size_t offset; + + memset(vfc_cmd, 0, sizeof(*vfc_cmd)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + offset = offsetof(struct ibmvfc_cmd, v2.rsp); + vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name); + } else + offset = offsetof(struct ibmvfc_cmd, v1.rsp); + vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset); + vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp)); + vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE); + vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu)); + vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp)); + vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); + vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id); + int_to_scsilun(sdev->lun, &iu->lun); + + return vfc_cmd; +} + +/** + * ibmvfc_queuecommand - The queuecommand function of the scsi template + * @shost: scsi host struct + * @cmnd: struct scsi_cmnd to be executed + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) +{ + struct ibmvfc_host *vhost = shost_priv(shost); + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + struct ibmvfc_cmd *vfc_cmd; + struct ibmvfc_fcp_cmd_iu *iu; + struct ibmvfc_event *evt; + u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); + u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq); + u16 scsi_channel; + int rc; + + if (unlikely((rc = fc_remote_port_chkready(rport))) || + unlikely((rc = ibmvfc_host_chkready(vhost)))) { + cmnd->result = rc; + scsi_done(cmnd); + return 0; + } + + cmnd->result = (DID_OK << 16); + if (vhost->using_channels) { + scsi_channel = hwq % vhost->scsi_scrqs.active_queues; + evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]); + if (!evt) + return SCSI_MLQUEUE_HOST_BUSY; + + evt->hwq = hwq % vhost->scsi_scrqs.active_queues; + } else { + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) + return SCSI_MLQUEUE_HOST_BUSY; + } + + ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT); + evt->cmnd = cmnd; + + vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device); + iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); + + iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd)); + memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); + + if (cmnd->flags & SCMD_TAGGED) { + vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag); + iu->pri_task_attr = IBMVFC_SIMPLE_TASK; + } + + vfc_cmd->correlation = cpu_to_be64((u64)evt); + + if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) + return ibmvfc_send_event(evt, vhost, 0); + + ibmvfc_free_event(evt); + if (rc == -ENOMEM) + return SCSI_MLQUEUE_HOST_BUSY; + + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + scmd_printk(KERN_ERR, cmnd, + "Failed to map DMA buffer for command. rc=%d\n", rc); + + cmnd->result = DID_ERROR << 16; + scsi_done(cmnd); + return 0; +} + +/** + * ibmvfc_sync_completion - Signal that a synchronous command has completed + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_sync_completion(struct ibmvfc_event *evt) +{ + /* copy the response back */ + if (evt->sync_iu) + *evt->sync_iu = *evt->xfer_iu; + + complete(&evt->comp); +} + +/** + * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands + * @evt: struct ibmvfc_event + * + **/ +static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + + ibmvfc_free_event(evt); + vhost->aborting_passthru = 0; + dev_info(vhost->dev, "Passthru command cancelled\n"); +} + +/** + * ibmvfc_bsg_timeout - Handle a BSG timeout + * @job: struct bsg_job that timed out + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_bsg_timeout(struct bsg_job *job) +{ + struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); + unsigned long port_id = (unsigned long)job->dd_data; + struct ibmvfc_event *evt; + struct ibmvfc_tmf *tmf; + unsigned long flags; + int rc; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) { + __ibmvfc_reset_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; + } + + vhost->aborting_passthru = 1; + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } + + ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT); + + tmf = &evt->iu.tmf; + memset(tmf, 0, sizeof(*tmf)); + tmf->common.version = cpu_to_be32(1); + tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); + tmf->common.length = cpu_to_be16(sizeof(*tmf)); + tmf->scsi_id = cpu_to_be64(port_id); + tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); + tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY); + rc = ibmvfc_send_event(evt, vhost, default_timeout); + + if (rc != 0) { + vhost->aborting_passthru = 0; + dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc); + rc = -EIO; + } else + dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n", + port_id); + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + LEAVE; + return rc; +} + +/** + * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command + * @vhost: struct ibmvfc_host to send command + * @port_id: port ID to send command + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id) +{ + struct ibmvfc_port_login *plogi; + struct ibmvfc_target *tgt; + struct ibmvfc_event *evt; + union ibmvfc_iu rsp_iu; + unsigned long flags; + int rc = 0, issue_login = 1; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->scsi_id == port_id) { + issue_login = 0; + break; + } + } + + if (!issue_login) + goto unlock_out; + if (unlikely((rc = ibmvfc_host_chkready(vhost)))) + goto unlock_out; + + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + rc = -ENOMEM; + goto unlock_out; + } + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); + plogi = &evt->iu.plogi; + memset(plogi, 0, sizeof(*plogi)); + plogi->common.version = cpu_to_be32(1); + plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); + plogi->common.length = cpu_to_be16(sizeof(*plogi)); + plogi->scsi_id = cpu_to_be64(port_id); + evt->sync_iu = &rsp_iu; + init_completion(&evt->comp); + + rc = ibmvfc_send_event(evt, vhost, default_timeout); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rc) + return -EIO; + + wait_for_completion(&evt->comp); + + if (rsp_iu.plogi.common.status) + rc = -EIO; + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); +unlock_out: + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; + return rc; +} + +/** + * ibmvfc_bsg_request - Handle a BSG request + * @job: struct bsg_job to be executed + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_bsg_request(struct bsg_job *job) +{ + struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); + struct fc_rport *rport = fc_bsg_to_rport(job); + struct ibmvfc_passthru_mad *mad; + struct ibmvfc_event *evt; + union ibmvfc_iu rsp_iu; + unsigned long flags, port_id = -1; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + unsigned int code = bsg_request->msgcode; + int rc = 0, req_seg, rsp_seg, issue_login = 0; + u32 fc_flags, rsp_len; + + ENTER; + bsg_reply->reply_payload_rcv_len = 0; + if (rport) + port_id = rport->port_id; + + switch (code) { + case FC_BSG_HST_ELS_NOLOGIN: + port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | + (bsg_request->rqst_data.h_els.port_id[1] << 8) | + bsg_request->rqst_data.h_els.port_id[2]; + fallthrough; + case FC_BSG_RPT_ELS: + fc_flags = IBMVFC_FC_ELS; + break; + case FC_BSG_HST_CT: + issue_login = 1; + port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | + (bsg_request->rqst_data.h_ct.port_id[1] << 8) | + bsg_request->rqst_data.h_ct.port_id[2]; + fallthrough; + case FC_BSG_RPT_CT: + fc_flags = IBMVFC_FC_CT_IU; + break; + default: + return -ENOTSUPP; + } + + if (port_id == -1) + return -EINVAL; + if (!mutex_trylock(&vhost->passthru_mutex)) + return -EBUSY; + + job->dd_data = (void *)port_id; + req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + + if (!req_seg) { + mutex_unlock(&vhost->passthru_mutex); + return -ENOMEM; + } + + rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + if (!rsp_seg) { + dma_unmap_sg(vhost->dev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + mutex_unlock(&vhost->passthru_mutex); + return -ENOMEM; + } + + if (req_seg > 1 || rsp_seg > 1) { + rc = -EINVAL; + goto out; + } + + if (issue_login) + rc = ibmvfc_bsg_plogi(vhost, port_id); + + spin_lock_irqsave(vhost->host->host_lock, flags); + + if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) || + unlikely((rc = ibmvfc_host_chkready(vhost)))) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + goto out; + } + + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + rc = -ENOMEM; + goto out; + } + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); + mad = &evt->iu.passthru; + + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); + mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); + + mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + + offsetof(struct ibmvfc_passthru_mad, iu)); + mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); + + mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len); + mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len); + mad->iu.flags = cpu_to_be32(fc_flags); + mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); + + mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list)); + mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list)); + mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list)); + mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list)); + mad->iu.scsi_id = cpu_to_be64(port_id); + mad->iu.tag = cpu_to_be64((u64)evt); + rsp_len = be32_to_cpu(mad->iu.rsp.len); + + evt->sync_iu = &rsp_iu; + init_completion(&evt->comp); + rc = ibmvfc_send_event(evt, vhost, 0); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rc) { + rc = -EIO; + goto out; + } + + wait_for_completion(&evt->comp); + + if (rsp_iu.passthru.common.status) + rc = -EIO; + else + bsg_reply->reply_payload_rcv_len = rsp_len; + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + rc = 0; +out: + dma_unmap_sg(vhost->dev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + dma_unmap_sg(vhost->dev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + mutex_unlock(&vhost->passthru_mutex); + LEAVE; + return rc; +} + +/** + * ibmvfc_reset_device - Reset the device with the specified reset type + * @sdev: scsi device to reset + * @type: reset type + * @desc: reset type description for log messages + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_cmd *tmf; + struct ibmvfc_event *evt = NULL; + union ibmvfc_iu rsp_iu; + struct ibmvfc_fcp_cmd_iu *iu; + struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd); + int rsp_rc = -EBUSY; + unsigned long flags; + int rsp_code = 0; + + spin_lock_irqsave(vhost->host->host_lock, flags); + if (vhost->state == IBMVFC_ACTIVE) { + if (vhost->using_channels) + evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]); + else + evt = ibmvfc_get_event(&vhost->crq); + + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } + + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); + tmf = ibmvfc_init_vfc_cmd(evt, sdev); + iu = ibmvfc_get_fcp_iu(vhost, tmf); + + tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + tmf->target_wwpn = cpu_to_be64(rport->port_name); + iu->tmf_flags = type; + evt->sync_iu = &rsp_iu; + + init_completion(&evt->comp); + rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n", + desc, rsp_rc); + return -EIO; + } + + sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); + wait_for_completion(&evt->comp); + + if (rsp_iu.cmd.status) + rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd); + + if (rsp_code) { + if (fc_rsp->flags & FCP_RSP_LEN_VALID) + rsp_code = fc_rsp->data.info.rsp_code; + + sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " + "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, + ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), + be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, + fc_rsp->scsi_status); + rsp_rc = -EIO; + } else + sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc); + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return rsp_rc; +} + +/** + * ibmvfc_match_rport - Match function for specified remote port + * @evt: ibmvfc event struct + * @rport: device to match + * + * Returns: + * 1 if event matches rport / 0 if event does not match rport + **/ +static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport) +{ + struct fc_rport *cmd_rport; + + if (evt->cmnd) { + cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device)); + if (cmd_rport == rport) + return 1; + } + return 0; +} + +/** + * ibmvfc_match_target - Match function for specified target + * @evt: ibmvfc event struct + * @device: device to match (starget) + * + * Returns: + * 1 if event matches starget / 0 if event does not match starget + **/ +static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device) +{ + if (evt->cmnd && scsi_target(evt->cmnd->device) == device) + return 1; + return 0; +} + +/** + * ibmvfc_match_lun - Match function for specified LUN + * @evt: ibmvfc event struct + * @device: device to match (sdev) + * + * Returns: + * 1 if event matches sdev / 0 if event does not match sdev + **/ +static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) +{ + if (evt->cmnd && evt->cmnd->device == device) + return 1; + return 0; +} + +/** + * ibmvfc_event_is_free - Check if event is free or not + * @evt: ibmvfc event struct + * + * Returns: + * true / false + **/ +static bool ibmvfc_event_is_free(struct ibmvfc_event *evt) +{ + struct ibmvfc_event *loop_evt; + + list_for_each_entry(loop_evt, &evt->queue->free, queue_list) + if (loop_evt == evt) + return true; + + return false; +} + +/** + * ibmvfc_wait_for_ops - Wait for ops to complete + * @vhost: ibmvfc host struct + * @device: device to match (starget or sdev) + * @match: match function + * + * Returns: + * SUCCESS / FAILED + **/ +static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, + int (*match) (struct ibmvfc_event *, void *)) +{ + struct ibmvfc_event *evt; + DECLARE_COMPLETION_ONSTACK(comp); + int wait, i, q_index, q_size; + unsigned long flags; + signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; + struct ibmvfc_queue *queues; + + ENTER; + if (vhost->mq_enabled && vhost->using_channels) { + queues = vhost->scsi_scrqs.scrqs; + q_size = vhost->scsi_scrqs.active_queues; + } else { + queues = &vhost->crq; + q_size = 1; + } + + do { + wait = 0; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = ∁ + wait++; + } + } + } + spin_unlock(&queues[q_index].l_lock); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (wait) { + timeout = wait_for_completion_timeout(&comp, timeout); + + if (!timeout) { + wait = 0; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = NULL; + wait++; + } + } + } + spin_unlock(&queues[q_index].l_lock); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + if (wait) + dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); + LEAVE; + return wait ? FAILED : SUCCESS; + } + } + } while (wait); + + LEAVE; + return SUCCESS; +} + +static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue, + struct scsi_device *sdev, + int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct scsi_target *starget = scsi_target(sdev); + struct fc_rport *rport = starget_to_rport(starget); + struct ibmvfc_event *evt; + struct ibmvfc_tmf *tmf; + + evt = ibmvfc_get_event(queue); + if (!evt) + return NULL; + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); + + tmf = &evt->iu.tmf; + memset(tmf, 0, sizeof(*tmf)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + tmf->common.version = cpu_to_be32(2); + tmf->target_wwpn = cpu_to_be64(rport->port_name); + } else { + tmf->common.version = cpu_to_be32(1); + } + tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); + tmf->common.length = cpu_to_be16(sizeof(*tmf)); + tmf->scsi_id = cpu_to_be64(rport->port_id); + int_to_scsilun(sdev->lun, &tmf->lun); + if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS)) + type &= ~IBMVFC_TMF_SUPPRESS_ABTS; + if (vhost->state == IBMVFC_ACTIVE) + tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID)); + else + tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID)); + tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); + tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata); + + init_completion(&evt->comp); + + return evt; +} + +static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct ibmvfc_event *evt, *found_evt, *temp; + struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs; + unsigned long flags; + int num_hwq, i; + int fail = 0; + LIST_HEAD(cancelq); + u16 status; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + num_hwq = vhost->scsi_scrqs.active_queues; + for (i = 0; i < num_hwq; i++) { + spin_lock(queues[i].q_lock); + spin_lock(&queues[i].l_lock); + found_evt = NULL; + list_for_each_entry(evt, &queues[i].sent, queue_list) { + if (evt->cmnd && evt->cmnd->device == sdev) { + found_evt = evt; + break; + } + } + spin_unlock(&queues[i].l_lock); + + if (found_evt && vhost->logged_in) { + evt = ibmvfc_init_tmf(&queues[i], sdev, type); + if (!evt) { + spin_unlock(queues[i].q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } + evt->sync_iu = &queues[i].cancel_rsp; + ibmvfc_send_event(evt, vhost, default_timeout); + list_add_tail(&evt->cancel, &cancelq); + } + + spin_unlock(queues[i].q_lock); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (list_empty(&cancelq)) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + sdev_printk(KERN_INFO, sdev, "No events found to cancel\n"); + return 0; + } + + sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); + + list_for_each_entry_safe(evt, temp, &cancelq, cancel) { + wait_for_completion(&evt->comp); + status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status); + list_del(&evt->cancel); + ibmvfc_free_event(evt); + + if (status != IBMVFC_MAD_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); + switch (status) { + case IBMVFC_MAD_DRIVER_FAILED: + case IBMVFC_MAD_CRQ_ERROR: + /* Host adapter most likely going through reset, return success to + * the caller will wait for the command being cancelled to get returned + */ + break; + default: + fail = 1; + break; + } + } + } + + if (fail) + return -EIO; + + sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); + LEAVE; + return 0; +} + +static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct ibmvfc_event *evt, *found_evt; + union ibmvfc_iu rsp; + int rsp_rc = -EBUSY; + unsigned long flags; + u16 status; + + ENTER; + found_evt = NULL; + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(&vhost->crq.l_lock); + list_for_each_entry(evt, &vhost->crq.sent, queue_list) { + if (evt->cmnd && evt->cmnd->device == sdev) { + found_evt = evt; + break; + } + } + spin_unlock(&vhost->crq.l_lock); + + if (!found_evt) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + sdev_printk(KERN_INFO, sdev, "No events found to cancel\n"); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; + } + + if (vhost->logged_in) { + evt = ibmvfc_init_tmf(&vhost->crq, sdev, type); + evt->sync_iu = &rsp; + rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + } + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); + /* If failure is received, the host adapter is most likely going + through reset, return success so the caller will wait for the command + being cancelled to get returned */ + return 0; + } + + sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); + + wait_for_completion(&evt->comp); + status = be16_to_cpu(rsp.mad_common.status); + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (status != IBMVFC_MAD_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); + switch (status) { + case IBMVFC_MAD_DRIVER_FAILED: + case IBMVFC_MAD_CRQ_ERROR: + /* Host adapter most likely going through reset, return success to + the caller will wait for the command being cancelled to get returned */ + return 0; + default: + return -EIO; + }; + } + + sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); + return 0; +} + +/** + * ibmvfc_cancel_all - Cancel all outstanding commands to the device + * @sdev: scsi device to cancel commands + * @type: type of error recovery being performed + * + * This sends a cancel to the VIOS for the specified device. This does + * NOT send any abort to the actual device. That must be done separately. + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + + if (vhost->mq_enabled && vhost->using_channels) + return ibmvfc_cancel_all_mq(sdev, type); + else + return ibmvfc_cancel_all_sq(sdev, type); +} + +/** + * ibmvfc_match_key - Match function for specified cancel key + * @evt: ibmvfc event struct + * @key: cancel key to match + * + * Returns: + * 1 if event matches key / 0 if event does not match key + **/ +static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key) +{ + unsigned long cancel_key = (unsigned long)key; + + if (evt->crq.format == IBMVFC_CMD_FORMAT && + be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key) + return 1; + return 0; +} + +/** + * ibmvfc_match_evt - Match function for specified event + * @evt: ibmvfc event struct + * @match: event to match + * + * Returns: + * 1 if event matches key / 0 if event does not match key + **/ +static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match) +{ + if (evt == match) + return 1; + return 0; +} + +/** + * ibmvfc_abort_task_set - Abort outstanding commands to the device + * @sdev: scsi device to abort commands + * + * This sends an Abort Task Set to the VIOS for the specified device. This does + * NOT send any cancel to the VIOS. That must be done separately. + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_abort_task_set(struct scsi_device *sdev) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_cmd *tmf; + struct ibmvfc_event *evt, *found_evt; + union ibmvfc_iu rsp_iu; + struct ibmvfc_fcp_cmd_iu *iu; + struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd); + int rc, rsp_rc = -EBUSY; + unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT; + int rsp_code = 0; + + found_evt = NULL; + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(&vhost->crq.l_lock); + list_for_each_entry(evt, &vhost->crq.sent, queue_list) { + if (evt->cmnd && evt->cmnd->device == sdev) { + found_evt = evt; + break; + } + } + spin_unlock(&vhost->crq.l_lock); + + if (!found_evt) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + sdev_printk(KERN_INFO, sdev, "No events found to abort\n"); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; + } + + if (vhost->state == IBMVFC_ACTIVE) { + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); + tmf = ibmvfc_init_vfc_cmd(evt, sdev); + iu = ibmvfc_get_fcp_iu(vhost, tmf); + + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + tmf->target_wwpn = cpu_to_be64(rport->port_name); + iu->tmf_flags = IBMVFC_ABORT_TASK_SET; + tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); + evt->sync_iu = &rsp_iu; + + tmf->correlation = cpu_to_be64((u64)evt); + + init_completion(&evt->comp); + rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + } + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc); + return -EIO; + } + + sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); + timeout = wait_for_completion_timeout(&evt->comp, timeout); + + if (!timeout) { + rc = ibmvfc_cancel_all(sdev, 0); + if (!rc) { + rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); + if (rc == SUCCESS) + rc = 0; + } + + if (rc) { + sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n"); + ibmvfc_reset_host(vhost); + rsp_rc = -EIO; + rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); + + if (rc == SUCCESS) + rsp_rc = 0; + + rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt); + if (rc != SUCCESS) { + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_hard_reset_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + rsp_rc = 0; + } + + goto out; + } + } + + if (rsp_iu.cmd.status) + rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd); + + if (rsp_code) { + if (fc_rsp->flags & FCP_RSP_LEN_VALID) + rsp_code = fc_rsp->data.info.rsp_code; + + sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " + "flags: %x fcp_rsp: %x, scsi_status: %x\n", + ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), + be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, + fc_rsp->scsi_status); + rsp_rc = -EIO; + } else + sdev_printk(KERN_INFO, sdev, "Abort successful\n"); + +out: + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return rsp_rc; +} + +/** + * ibmvfc_eh_abort_handler - Abort a command + * @cmd: scsi command to abort + * + * Returns: + * SUCCESS / FAST_IO_FAIL / FAILED + **/ +static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct ibmvfc_host *vhost = shost_priv(sdev->host); + int cancel_rc, block_rc; + int rc = FAILED; + + ENTER; + block_rc = fc_block_scsi_eh(cmd); + ibmvfc_wait_while_resetting(vhost); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + ibmvfc_abort_task_set(sdev); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); + + if (!cancel_rc) + rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + + LEAVE; + return rc; +} + +/** + * ibmvfc_eh_device_reset_handler - Reset a single LUN + * @cmd: scsi command struct + * + * Returns: + * SUCCESS / FAST_IO_FAIL / FAILED + **/ +static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct ibmvfc_host *vhost = shost_priv(sdev->host); + int cancel_rc, block_rc, reset_rc = 0; + int rc = FAILED; + + ENTER; + block_rc = fc_block_scsi_eh(cmd); + ibmvfc_wait_while_resetting(vhost); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); + + if (!cancel_rc && !reset_rc) + rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + + LEAVE; + return rc; +} + +/** + * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); +} + +/** + * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); +} + +/** + * ibmvfc_eh_target_reset_handler - Reset the target + * @cmd: scsi command struct + * + * Returns: + * SUCCESS / FAST_IO_FAIL / FAILED + **/ +static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct scsi_target *starget = scsi_target(sdev); + int block_rc; + int reset_rc = 0; + int rc = FAILED; + unsigned long cancel_rc = 0; + + ENTER; + block_rc = fc_block_scsi_eh(cmd); + ibmvfc_wait_while_resetting(vhost); + if (block_rc != FAST_IO_FAIL) { + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + } else + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); + + if (!cancel_rc && !reset_rc) + rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); + + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + + LEAVE; + return rc; +} + +/** + * ibmvfc_eh_host_reset_handler - Reset the connection to the server + * @cmd: struct scsi_cmnd having problems + * + **/ +static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + int rc; + struct ibmvfc_host *vhost = shost_priv(cmd->device->host); + + dev_err(vhost->dev, "Resetting connection due to error recovery\n"); + rc = ibmvfc_issue_fc_host_lip(vhost->host); + + return rc ? FAILED : SUCCESS; +} + +/** + * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport. + * @rport: rport struct + * + * Return value: + * none + **/ +static void ibmvfc_terminate_rport_io(struct fc_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct ibmvfc_host *vhost = shost_priv(shost); + struct fc_rport *dev_rport; + struct scsi_device *sdev; + struct ibmvfc_target *tgt; + unsigned long rc, flags; + unsigned int found; + + ENTER; + shost_for_each_device(sdev, shost) { + dev_rport = starget_to_rport(scsi_target(sdev)); + if (dev_rport != rport) + continue; + ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); + } + + rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); + + if (rc == FAILED) + ibmvfc_issue_fc_host_lip(shost); + + spin_lock_irqsave(shost->host_lock, flags); + found = 0; + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->scsi_id == rport->port_id) { + found++; + break; + } + } + + if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { + /* + * If we get here, that means we previously attempted to send + * an implicit logout to the target but it failed, most likely + * due to I/O being pending, so we need to send it again + */ + ibmvfc_del_tgt(tgt); + ibmvfc_reinit_host(vhost); + } + + spin_unlock_irqrestore(shost->host_lock, flags); + LEAVE; +} + +static const struct ibmvfc_async_desc ae_desc [] = { + { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL }, + { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL }, + { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL }, + { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL }, + { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL }, + { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL }, + { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL }, + { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL }, +}; + +static const struct ibmvfc_async_desc unknown_ae = { + "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL +}; + +/** + * ibmvfc_get_ae_desc - Get text description for async event + * @ae: async event + * + **/ +static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ae_desc); i++) + if (ae_desc[i].ae == ae) + return &ae_desc[i]; + + return &unknown_ae; +} + +static const struct { + enum ibmvfc_ae_link_state state; + const char *desc; +} link_desc [] = { + { IBMVFC_AE_LS_LINK_UP, " link up" }, + { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" }, + { IBMVFC_AE_LS_LINK_DOWN, " link down" }, + { IBMVFC_AE_LS_LINK_DEAD, " link dead" }, +}; + +/** + * ibmvfc_get_link_state - Get text description for link state + * @state: link state + * + **/ +static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(link_desc); i++) + if (link_desc[i].state == state) + return link_desc[i].desc; + + return ""; +} + +/** + * ibmvfc_handle_async - Handle an async event from the adapter + * @crq: crq to process + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, + struct ibmvfc_host *vhost) +{ + const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); + struct ibmvfc_target *tgt; + + ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx," + " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id), + be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name), + ibmvfc_get_link_state(crq->link_state)); + + switch (be64_to_cpu(crq->event)) { + case IBMVFC_AE_RESUME: + switch (crq->link_state) { + case IBMVFC_AE_LS_LINK_DOWN: + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + break; + case IBMVFC_AE_LS_LINK_DEAD: + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + case IBMVFC_AE_LS_LINK_UP: + case IBMVFC_AE_LS_LINK_BOUNCED: + default: + vhost->events_to_log |= IBMVFC_AE_LINKUP; + vhost->delay_init = 1; + __ibmvfc_reset_host(vhost); + break; + } + + break; + case IBMVFC_AE_LINK_UP: + vhost->events_to_log |= IBMVFC_AE_LINKUP; + vhost->delay_init = 1; + __ibmvfc_reset_host(vhost); + break; + case IBMVFC_AE_SCN_FABRIC: + case IBMVFC_AE_SCN_DOMAIN: + vhost->events_to_log |= IBMVFC_AE_RSCN; + if (vhost->state < IBMVFC_HALTED) { + vhost->delay_init = 1; + __ibmvfc_reset_host(vhost); + } + break; + case IBMVFC_AE_SCN_NPORT: + case IBMVFC_AE_SCN_GROUP: + vhost->events_to_log |= IBMVFC_AE_RSCN; + ibmvfc_reinit_host(vhost); + break; + case IBMVFC_AE_ELS_LOGO: + case IBMVFC_AE_ELS_PRLO: + case IBMVFC_AE_ELS_PLOGI: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (!crq->scsi_id && !crq->wwpn && !crq->node_name) + break; + if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) + continue; + if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) + continue; + if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) + continue; + if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) + tgt->logo_rcvd = 1; + if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { + ibmvfc_del_tgt(tgt); + ibmvfc_reinit_host(vhost); + } + } + break; + case IBMVFC_AE_LINK_DOWN: + case IBMVFC_AE_ADAPTER_FAILED: + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + break; + case IBMVFC_AE_LINK_DEAD: + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + case IBMVFC_AE_HALT: + ibmvfc_link_down(vhost, IBMVFC_HALTED); + break; + default: + dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); + break; + } +} + +/** + * ibmvfc_handle_crq - Handles and frees received events in the CRQ + * @crq: Command/Response queue + * @vhost: ibmvfc host struct + * @evt_doneq: Event done queue + * +**/ +static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, + struct list_head *evt_doneq) +{ + long rc; + struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); + + switch (crq->valid) { + case IBMVFC_CRQ_INIT_RSP: + switch (crq->format) { + case IBMVFC_CRQ_INIT: + dev_info(vhost->dev, "Partner initialized\n"); + /* Send back a response */ + rc = ibmvfc_send_crq_init_complete(vhost); + if (rc == 0) + ibmvfc_init_host(vhost); + else + dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); + break; + case IBMVFC_CRQ_INIT_COMPLETE: + dev_info(vhost->dev, "Partner initialization complete\n"); + ibmvfc_init_host(vhost); + break; + default: + dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); + } + return; + case IBMVFC_CRQ_XPORT_EVENT: + vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + if (crq->format == IBMVFC_PARTITION_MIGRATED) { + /* We need to re-setup the interpartition connection */ + dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); + vhost->client_migrated = 1; + + scsi_block_requests(vhost->host); + ibmvfc_purge_requests(vhost, DID_REQUEUE); + ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); + wake_up(&vhost->work_wait_q); + } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { + dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); + ibmvfc_purge_requests(vhost, DID_ERROR); + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); + } else { + dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); + } + return; + case IBMVFC_CRQ_CMD_RSP: + break; + default: + dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid); + return; + } + + if (crq->format == IBMVFC_ASYNC_EVENT) + return; + + /* The only kind of payload CRQs we should get are responses to + * things we send. Make sure this response is to something we + * actually sent + */ + if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) { + dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", + crq->ioba); + return; + } + + if (unlikely(atomic_dec_if_positive(&evt->active))) { + dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", + crq->ioba); + return; + } + + spin_lock(&evt->queue->l_lock); + list_move_tail(&evt->queue_list, evt_doneq); + spin_unlock(&evt->queue->l_lock); +} + +/** + * ibmvfc_scan_finished - Check if the device scan is done. + * @shost: scsi host struct + * @time: current elapsed time + * + * Returns: + * 0 if scan is not done / 1 if scan is done + **/ +static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + unsigned long flags; + struct ibmvfc_host *vhost = shost_priv(shost); + int done = 0; + + spin_lock_irqsave(shost->host_lock, flags); + if (!vhost->scan_timeout) + done = 1; + else if (time >= (vhost->scan_timeout * HZ)) { + dev_info(vhost->dev, "Scan taking longer than %d seconds, " + "continuing initialization\n", vhost->scan_timeout); + done = 1; + } + + if (vhost->scan_complete) { + vhost->scan_timeout = init_timeout; + done = 1; + } + spin_unlock_irqrestore(shost->host_lock, flags); + return done; +} + +/** + * ibmvfc_slave_alloc - Setup the device's task set value + * @sdev: struct scsi_device device to configure + * + * Set the device's task set value so that error handling works as + * expected. + * + * Returns: + * 0 on success / -ENXIO if device does not exist + **/ +static int ibmvfc_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + spin_lock_irqsave(shost->host_lock, flags); + sdev->hostdata = (void *)(unsigned long)vhost->task_set++; + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; +} + +/** + * ibmvfc_target_alloc - Setup the target's task set value + * @starget: struct scsi_target + * + * Set the target's task set value so that error handling works as + * expected. + * + * Returns: + * 0 on success / -ENXIO if device does not exist + **/ +static int ibmvfc_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); + starget->hostdata = (void *)(unsigned long)vhost->task_set++; + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; +} + +/** + * ibmvfc_slave_configure - Configure the device + * @sdev: struct scsi_device device to configure + * + * Enable allow_restart for a device if it is a disk. Adjust the + * queue_depth here also. + * + * Returns: + * 0 + **/ +static int ibmvfc_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); + if (sdev->type == TYPE_DISK) { + sdev->allow_restart = 1; + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); + } + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; +} + +/** + * ibmvfc_change_queue_depth - Change the device's queue depth + * @sdev: scsi device struct + * @qdepth: depth to set + * + * Return value: + * actual depth set + **/ +static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) + qdepth = IBMVFC_MAX_CMDS_PER_LUN; + + return scsi_change_queue_depth(sdev, qdepth); +} + +static ssize_t ibmvfc_show_host_partition_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.partition_name); +} + +static ssize_t ibmvfc_show_host_device_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.device_name); +} + +static ssize_t ibmvfc_show_host_loc_code(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.port_loc_code); +} + +static ssize_t ibmvfc_show_host_drc_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.drc_name); +} + +static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version)); +} + +static ssize_t ibmvfc_show_host_capabilities(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities)); +} + +/** + * ibmvfc_show_log_level - Show the adapter's error logging level + * @dev: class device struct + * @attr: unused + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ibmvfc_show_log_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + int len; + + spin_lock_irqsave(shost->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level); + spin_unlock_irqrestore(shost->host_lock, flags); + return len; +} + +/** + * ibmvfc_store_log_level - Change the adapter's error logging level + * @dev: class device struct + * @attr: unused + * @buf: buffer + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ibmvfc_store_log_level(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); + vhost->log_level = simple_strtoul(buf, NULL, 10); + spin_unlock_irqrestore(shost->host_lock, flags); + return strlen(buf); +} + +static ssize_t ibmvfc_show_scsi_channels(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + int len; + + spin_lock_irqsave(shost->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels); + spin_unlock_irqrestore(shost->host_lock, flags); + return len; +} + +static ssize_t ibmvfc_store_scsi_channels(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + unsigned int channels; + + spin_lock_irqsave(shost->host_lock, flags); + channels = simple_strtoul(buf, NULL, 10); + vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues); + ibmvfc_hard_reset_host(vhost); + spin_unlock_irqrestore(shost->host_lock, flags); + return strlen(buf); +} + +static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); +static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); +static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); +static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); +static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); +static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); +static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, + ibmvfc_show_log_level, ibmvfc_store_log_level); +static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR, + ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels); + +#ifdef CONFIG_SCSI_IBMVFC_TRACE +/** + * ibmvfc_read_trace - Dump the adapter trace + * @filp: open sysfs file + * @kobj: kobject struct + * @bin_attr: bin_attribute struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + int size = IBMVFC_TRACE_SIZE; + char *src = (char *)vhost->trace; + + if (off > size) + return 0; + if (off + count > size) { + size -= off; + count = size; + } + + spin_lock_irqsave(shost->host_lock, flags); + memcpy(buf, &src[off], count); + spin_unlock_irqrestore(shost->host_lock, flags); + return count; +} + +static struct bin_attribute ibmvfc_trace_attr = { + .attr = { + .name = "trace", + .mode = S_IRUGO, + }, + .size = 0, + .read = ibmvfc_read_trace, +}; +#endif + +static struct attribute *ibmvfc_host_attrs[] = { + &dev_attr_partition_name.attr, + &dev_attr_device_name.attr, + &dev_attr_port_loc_code.attr, + &dev_attr_drc_name.attr, + &dev_attr_npiv_version.attr, + &dev_attr_capabilities.attr, + &dev_attr_log_level.attr, + &dev_attr_nr_scsi_channels.attr, + NULL +}; + +ATTRIBUTE_GROUPS(ibmvfc_host); + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "IBM POWER Virtual FC Adapter", + .proc_name = IBMVFC_NAME, + .queuecommand = ibmvfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = ibmvfc_eh_abort_handler, + .eh_device_reset_handler = ibmvfc_eh_device_reset_handler, + .eh_target_reset_handler = ibmvfc_eh_target_reset_handler, + .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, + .slave_alloc = ibmvfc_slave_alloc, + .slave_configure = ibmvfc_slave_configure, + .target_alloc = ibmvfc_target_alloc, + .scan_finished = ibmvfc_scan_finished, + .change_queue_depth = ibmvfc_change_queue_depth, + .cmd_per_lun = 16, + .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = IBMVFC_MAX_SECTORS, + .shost_groups = ibmvfc_host_groups, + .track_queue_depth = 1, + .host_tagset = 1, +}; + +/** + * ibmvfc_next_async_crq - Returns the next entry in async queue + * @vhost: ibmvfc host struct + * + * Returns: + * Pointer to next entry in queue / NULL if empty + **/ +static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost) +{ + struct ibmvfc_queue *async_crq = &vhost->async_crq; + struct ibmvfc_async_crq *crq; + + crq = &async_crq->msgs.async[async_crq->cur]; + if (crq->valid & 0x80) { + if (++async_crq->cur == async_crq->size) + async_crq->cur = 0; + rmb(); + } else + crq = NULL; + + return crq; +} + +/** + * ibmvfc_next_crq - Returns the next entry in message queue + * @vhost: ibmvfc host struct + * + * Returns: + * Pointer to next entry in queue / NULL if empty + **/ +static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost) +{ + struct ibmvfc_queue *queue = &vhost->crq; + struct ibmvfc_crq *crq; + + crq = &queue->msgs.crq[queue->cur]; + if (crq->valid & 0x80) { + if (++queue->cur == queue->size) + queue->cur = 0; + rmb(); + } else + crq = NULL; + + return crq; +} + +/** + * ibmvfc_interrupt - Interrupt handler + * @irq: number of irq to handle, not used + * @dev_instance: ibmvfc_host that received interrupt + * + * Returns: + * IRQ_HANDLED + **/ +static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance) +{ + struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance; + unsigned long flags; + + spin_lock_irqsave(vhost->host->host_lock, flags); + vio_disable_interrupts(to_vio_dev(vhost->dev)); + tasklet_schedule(&vhost->tasklet); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return IRQ_HANDLED; +} + +/** + * ibmvfc_tasklet - Interrupt handler tasklet + * @data: ibmvfc host struct + * + * Returns: + * Nothing + **/ +static void ibmvfc_tasklet(void *data) +{ + struct ibmvfc_host *vhost = data; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + struct ibmvfc_crq *crq; + struct ibmvfc_async_crq *async; + struct ibmvfc_event *evt, *temp; + unsigned long flags; + int done = 0; + LIST_HEAD(evt_doneq); + + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); + while (!done) { + /* Pull all the valid messages off the async CRQ */ + while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { + ibmvfc_handle_async(async, vhost); + async->valid = 0; + wmb(); + } + + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvfc_next_crq(vhost)) != NULL) { + ibmvfc_handle_crq(crq, vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } + + vio_enable_interrupts(vdev); + if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { + vio_disable_interrupts(vdev); + ibmvfc_handle_async(async, vhost); + async->valid = 0; + wmb(); + } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { + vio_disable_interrupts(vdev); + ibmvfc_handle_crq(crq, vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } else + done = 1; + } + + spin_unlock(vhost->crq.q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { + del_timer(&evt->timer); + list_del(&evt->queue_list); + ibmvfc_trc_end(evt); + evt->done(evt); + } +} + +static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable) +{ + struct device *dev = scrq->vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + unsigned long rc; + int irq_action = H_ENABLE_VIO_INTERRUPT; + + if (!enable) + irq_action = H_DISABLE_VIO_INTERRUPT; + + rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action, + scrq->hw_irq, 0, 0); + + if (rc) + dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n", + enable ? "enable" : "disable", scrq->hwq_id, rc); + + return rc; +} + +static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, + struct list_head *evt_doneq) +{ + struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); + + switch (crq->valid) { + case IBMVFC_CRQ_CMD_RSP: + break; + case IBMVFC_CRQ_XPORT_EVENT: + return; + default: + dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid); + return; + } + + /* The only kind of payload CRQs we should get are responses to + * things we send. Make sure this response is to something we + * actually sent + */ + if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) { + dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", + crq->ioba); + return; + } + + if (unlikely(atomic_dec_if_positive(&evt->active))) { + dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", + crq->ioba); + return; + } + + spin_lock(&evt->queue->l_lock); + list_move_tail(&evt->queue_list, evt_doneq); + spin_unlock(&evt->queue->l_lock); +} + +static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq) +{ + struct ibmvfc_crq *crq; + + crq = &scrq->msgs.scrq[scrq->cur].crq; + if (crq->valid & 0x80) { + if (++scrq->cur == scrq->size) + scrq->cur = 0; + rmb(); + } else + crq = NULL; + + return crq; +} + +static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq) +{ + struct ibmvfc_crq *crq; + struct ibmvfc_event *evt, *temp; + unsigned long flags; + int done = 0; + LIST_HEAD(evt_doneq); + + spin_lock_irqsave(scrq->q_lock, flags); + while (!done) { + while ((crq = ibmvfc_next_scrq(scrq)) != NULL) { + ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } + + ibmvfc_toggle_scrq_irq(scrq, 1); + if ((crq = ibmvfc_next_scrq(scrq)) != NULL) { + ibmvfc_toggle_scrq_irq(scrq, 0); + ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } else + done = 1; + } + spin_unlock_irqrestore(scrq->q_lock, flags); + + list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { + del_timer(&evt->timer); + list_del(&evt->queue_list); + ibmvfc_trc_end(evt); + evt->done(evt); + } +} + +static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance) +{ + struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance; + + ibmvfc_toggle_scrq_irq(scrq, 0); + ibmvfc_drain_sub_crq(scrq); + + return IRQ_HANDLED; +} + +/** + * ibmvfc_init_tgt - Set the next init job step for the target + * @tgt: ibmvfc target struct + * @job_step: job step to perform + * + **/ +static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, + void (*job_step) (struct ibmvfc_target *)) +{ + if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT)) + tgt->job_step = job_step; + wake_up(&tgt->vhost->work_wait_q); +} + +/** + * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization + * @tgt: ibmvfc target struct + * @job_step: initialization job step + * + * Returns: 1 if step will be retried / 0 if not + * + **/ +static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, + void (*job_step) (struct ibmvfc_target *)) +{ + if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { + ibmvfc_del_tgt(tgt); + wake_up(&tgt->vhost->work_wait_q); + return 0; + } else + ibmvfc_init_tgt(tgt, job_step); + return 1; +} + +/* Defined in FC-LS */ +static const struct { + int code; + int retry; + int logged_in; +} prli_rsp [] = { + { 0, 1, 0 }, + { 1, 0, 1 }, + { 2, 1, 0 }, + { 3, 1, 0 }, + { 4, 0, 0 }, + { 5, 0, 0 }, + { 6, 0, 1 }, + { 7, 0, 0 }, + { 8, 1, 0 }, +}; + +/** + * ibmvfc_get_prli_rsp - Find PRLI response index + * @flags: PRLI response flags + * + **/ +static int ibmvfc_get_prli_rsp(u16 flags) +{ + int i; + int code = (flags & 0x0f00) >> 8; + + for (i = 0; i < ARRAY_SIZE(prli_rsp); i++) + if (prli_rsp[i].code == code) + return i; + + return 0; +} + +/** + * ibmvfc_tgt_prli_done - Completion handler for Process Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; + struct ibmvfc_prli_svc_parms *parms = &rsp->parms; + u32 status = be16_to_cpu(rsp->common.status); + int index, level = IBMVFC_DEFAULT_LOG_LEVEL; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n", + parms->type, parms->flags, parms->service_parms); + + if (parms->type == IBMVFC_SCSI_FCP_TYPE) { + index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags)); + if (prli_rsp[index].logged_in) { + if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) { + tgt->need_login = 0; + tgt->ids.roles = 0; + if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC) + tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; + if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC) + tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; + tgt->add_rport = 1; + } else + ibmvfc_del_tgt(tgt); + } else if (prli_rsp[index].retry) + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + else + ibmvfc_del_tgt(tgt); + } else + ibmvfc_del_tgt(tgt); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + break; + case IBMVFC_MAD_FAILED: + default: + if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) && + be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED) + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + else if (tgt->logo_rcvd) + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + else + ibmvfc_del_tgt(tgt); + + tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status); + break; + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_send_prli - Send a process login + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt) +{ + struct ibmvfc_process_login *prli; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } + vhost->discovery_threads++; + ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + prli = &evt->iu.prli; + memset(prli, 0, sizeof(*prli)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + prli->common.version = cpu_to_be32(2); + prli->target_wwpn = cpu_to_be64(tgt->wwpn); + } else { + prli->common.version = cpu_to_be32(1); + } + prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN); + prli->common.length = cpu_to_be16(sizeof(*prli)); + prli->scsi_id = cpu_to_be64(tgt->scsi_id); + + prli->parms.type = IBMVFC_SCSI_FCP_TYPE; + prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR); + prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC); + prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED); + + if (cls3_error) + prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY); + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent process login\n"); +} + +/** + * ibmvfc_tgt_plogi_done - Completion handler for Port Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; + u32 status = be16_to_cpu(rsp->common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Port Login succeeded\n"); + if (tgt->ids.port_name && + tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) { + vhost->reinit = 1; + tgt_dbg(tgt, "Port re-init required\n"); + break; + } + tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name); + tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name); + tgt->ids.port_id = tgt->scsi_id; + memcpy(&tgt->service_parms, &rsp->service_parms, + sizeof(tgt->service_parms)); + memcpy(&tgt->service_parms_change, &rsp->service_parms_change, + sizeof(tgt->service_parms_change)); + ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + break; + case IBMVFC_MAD_FAILED: + default: + if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + else + ibmvfc_del_tgt(tgt); + + tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), + ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), + ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status); + break; + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) +{ + struct ibmvfc_port_login *plogi; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + tgt->logo_rcvd = 0; + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } + vhost->discovery_threads++; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + plogi = &evt->iu.plogi; + memset(plogi, 0, sizeof(*plogi)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + plogi->common.version = cpu_to_be32(2); + plogi->target_wwpn = cpu_to_be64(tgt->wwpn); + } else { + plogi->common.version = cpu_to_be32(1); + } + plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); + plogi->common.length = cpu_to_be16(sizeof(*plogi)); + plogi->scsi_id = cpu_to_be64(tgt->scsi_id); + + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent port login\n"); +} + +/** + * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout; + u32 status = be16_to_cpu(rsp->common.status); + + vhost->discovery_threads--; + ibmvfc_free_event(evt); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Implicit Logout succeeded\n"); + break; + case IBMVFC_MAD_DRIVER_FAILED: + kref_put(&tgt->kref, ibmvfc_release_tgt); + wake_up(&vhost->work_wait_q); + return; + case IBMVFC_MAD_FAILED: + default: + tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status); + break; + } + + ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi); + kref_put(&tgt->kref, ibmvfc_release_tgt); + wake_up(&vhost->work_wait_q); +} + +/** + * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout + * @tgt: ibmvfc target struct + * @done: Routine to call when the event is responded to + * + * Returns: + * Allocated and initialized ibmvfc_event struct + **/ +static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt, + void (*done) (struct ibmvfc_event *)) +{ + struct ibmvfc_implicit_logout *mad; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) + return NULL; + ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + mad = &evt->iu.implicit_logout; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT); + mad->common.length = cpu_to_be16(sizeof(*mad)); + mad->old_scsi_id = cpu_to_be64(tgt->scsi_id); + return evt; +} + +/** + * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt) +{ + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + vhost->discovery_threads++; + evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt, + ibmvfc_tgt_implicit_logout_done); + if (!evt) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Implicit Logout\n"); +} + +/** + * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; + u32 status = be16_to_cpu(mad->common.status); + + vhost->discovery_threads--; + ibmvfc_free_event(evt); + + /* + * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the + * driver in which case we need to free up all the targets. If we are + * not unloading, we will still go through a hard reset to get out of + * offline state, so there is no need to track the old targets in that + * case. + */ + if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE) + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + else + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT); + + tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed"); + kref_put(&tgt->kref, ibmvfc_release_tgt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt) +{ + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (!vhost->logged_in) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + return; + } + + if (vhost->discovery_threads >= disc_threads) + return; + + vhost->discovery_threads++; + evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt, + ibmvfc_tgt_implicit_logout_and_del_done); + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Implicit Logout\n"); +} + +/** + * ibmvfc_tgt_move_login_done - Completion handler for Move Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login; + u32 status = be16_to_cpu(rsp->common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id); + tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name); + tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name); + tgt->scsi_id = tgt->new_scsi_id; + tgt->ids.port_id = tgt->scsi_id; + memcpy(&tgt->service_parms, &rsp->service_parms, + sizeof(tgt->service_parms)); + memcpy(&tgt->service_parms_change, &rsp->service_parms_change, + sizeof(tgt->service_parms_change)); + ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login); + break; + case IBMVFC_MAD_FAILED: + default: + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login); + + tgt_log(tgt, level, + "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n", + tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags), + status); + break; + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + + +/** + * ibmvfc_tgt_move_login - Initiate a move login for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt) +{ + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_move_login *move; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } + vhost->discovery_threads++; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + move = &evt->iu.move_login; + memset(move, 0, sizeof(*move)); + move->common.version = cpu_to_be32(1); + move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN); + move->common.length = cpu_to_be16(sizeof(*move)); + + move->old_scsi_id = cpu_to_be64(tgt->scsi_id); + move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id); + move->wwpn = cpu_to_be64(tgt->wwpn); + move->node_name = cpu_to_be64(tgt->ids.node_name); + + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id); +} + +/** + * ibmvfc_adisc_needs_plogi - Does device need PLOGI? + * @mad: ibmvfc passthru mad struct + * @tgt: ibmvfc target struct + * + * Returns: + * 1 if PLOGI needed / 0 if PLOGI not needed + **/ +static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad, + struct ibmvfc_target *tgt) +{ + if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name) + return 1; + if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name) + return 1; + if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id) + return 1; + return 0; +} + +/** + * ibmvfc_tgt_adisc_done - Completion handler for ADISC + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; + u32 status = be16_to_cpu(mad->common.status); + u8 fc_reason, fc_explain; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + del_timer(&tgt->timer); + + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "ADISC succeeded\n"); + if (ibmvfc_adisc_needs_plogi(mad, tgt)) + ibmvfc_del_tgt(tgt); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_FAILED: + default: + ibmvfc_del_tgt(tgt); + fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16; + fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; + tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), + be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error), + ibmvfc_get_fc_type(fc_reason), fc_reason, + ibmvfc_get_ls_explain(fc_explain), fc_explain, status); + break; + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_init_passthru - Initialize an event struct for FC passthru + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_init_passthru(struct ibmvfc_event *evt) +{ + struct ibmvfc_passthru_mad *mad = &evt->iu.passthru; + + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); + mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); + mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + + offsetof(struct ibmvfc_passthru_mad, iu)); + mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); + mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload)); + mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response)); + mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + + offsetof(struct ibmvfc_passthru_mad, fc_iu) + + offsetof(struct ibmvfc_passthru_fc_iu, payload)); + mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload)); + mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + + offsetof(struct ibmvfc_passthru_mad, fc_iu) + + offsetof(struct ibmvfc_passthru_fc_iu, response)); + mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response)); +} + +/** + * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC + * @evt: ibmvfc event struct + * + * Just cleanup this event struct. Everything else is handled by + * the ADISC completion handler. If the ADISC never actually comes + * back, we still have the timer running on the ADISC event struct + * which will fire and cause the CRQ to get reset. + * + **/ +static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_target *tgt = evt->tgt; + + tgt_dbg(tgt, "ADISC cancel complete\n"); + vhost->abort_threads--; + ibmvfc_free_event(evt); + kref_put(&tgt->kref, ibmvfc_release_tgt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_adisc_timeout - Handle an ADISC timeout + * @t: ibmvfc target struct + * + * If an ADISC times out, send a cancel. If the cancel times + * out, reset the CRQ. When the ADISC comes back as cancelled, + * log back into the target. + **/ +static void ibmvfc_adisc_timeout(struct timer_list *t) +{ + struct ibmvfc_target *tgt = from_timer(tgt, t, timer); + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + struct ibmvfc_tmf *tmf; + unsigned long flags; + int rc; + + tgt_dbg(tgt, "ADISC timeout\n"); + spin_lock_irqsave(vhost->host->host_lock, flags); + if (vhost->abort_threads >= disc_threads || + tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT || + vhost->state != IBMVFC_INITIALIZING || + vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return; + } + + vhost->abort_threads++; + kref_get(&tgt->kref); + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + tgt_err(tgt, "Failed to get cancel event for ADISC.\n"); + vhost->abort_threads--; + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return; + } + ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT); + + evt->tgt = tgt; + tmf = &evt->iu.tmf; + memset(tmf, 0, sizeof(*tmf)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + tmf->common.version = cpu_to_be32(2); + tmf->target_wwpn = cpu_to_be64(tgt->wwpn); + } else { + tmf->common.version = cpu_to_be32(1); + } + tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); + tmf->common.length = cpu_to_be16(sizeof(*tmf)); + tmf->scsi_id = cpu_to_be64(tgt->scsi_id); + tmf->cancel_key = cpu_to_be32(tgt->cancel_key); + + rc = ibmvfc_send_event(evt, vhost, default_timeout); + + if (rc) { + tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc); + vhost->abort_threads--; + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + } else + tgt_dbg(tgt, "Attempting to cancel ADISC\n"); + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_tgt_adisc - Initiate an ADISC for specified target + * @tgt: ibmvfc target struct + * + * When sending an ADISC we end up with two timers running. The + * first timer is the timer in the ibmvfc target struct. If this + * fires, we send a cancel to the target. The second timer is the + * timer on the ibmvfc event for the ADISC, which is longer. If that + * fires, it means the ADISC timed out and our attempt to cancel it + * also failed, so we need to reset the CRQ. + **/ +static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt) +{ + struct ibmvfc_passthru_mad *mad; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } + vhost->discovery_threads++; + ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + + ibmvfc_init_passthru(evt); + mad = &evt->iu.passthru; + mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS); + mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id); + mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key); + + mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC); + memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name, + sizeof(vhost->login_buf->resp.port_name)); + memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name, + sizeof(vhost->login_buf->resp.node_name)); + mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff); + + if (timer_pending(&tgt->timer)) + mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ)); + else { + tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ); + add_timer(&tgt->timer); + } + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) { + vhost->discovery_threads--; + del_timer(&tgt->timer); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent ADISC\n"); +} + +/** + * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; + u32 status = be16_to_cpu(rsp->common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Query Target succeeded\n"); + if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id) + ibmvfc_del_tgt(tgt); + else + ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + break; + case IBMVFC_MAD_FAILED: + default: + if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && + be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ && + be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG) + ibmvfc_del_tgt(tgt); + else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + else + ibmvfc_del_tgt(tgt); + + tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), + ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), + ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), + status); + break; + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_query_target - Initiate a Query Target for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt) +{ + struct ibmvfc_query_tgt *query_tgt; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } + vhost->discovery_threads++; + evt->tgt = tgt; + ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT); + query_tgt = &evt->iu.query_tgt; + memset(query_tgt, 0, sizeof(*query_tgt)); + query_tgt->common.version = cpu_to_be32(1); + query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET); + query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt)); + query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name); + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Query Target\n"); +} + +/** + * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target + * @vhost: ibmvfc host struct + * @target: Holds SCSI ID to allocate target forand the WWPN + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, + struct ibmvfc_discover_targets_entry *target) +{ + struct ibmvfc_target *stgt = NULL; + struct ibmvfc_target *wtgt = NULL; + struct ibmvfc_target *tgt; + unsigned long flags; + u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK; + u64 wwpn = be64_to_cpu(target->wwpn); + + /* Look to see if we already have a target allocated for this SCSI ID or WWPN */ + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->wwpn == wwpn) { + wtgt = tgt; + break; + } + } + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->scsi_id == scsi_id) { + stgt = tgt; + break; + } + } + + if (wtgt && !stgt) { + /* + * A WWPN target has moved and we still are tracking the old + * SCSI ID. The only way we should be able to get here is if + * we attempted to send an implicit logout for the old SCSI ID + * and it failed for some reason, such as there being I/O + * pending to the target. In this case, we will have already + * deleted the rport from the FC transport so we do a move + * login, which works even with I/O pending, however, if + * there is still I/O pending, it will stay outstanding, so + * we only do this if fast fail is disabled for the rport, + * otherwise we let terminate_rport_io clean up the port + * before we login at the new location. + */ + if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { + if (wtgt->move_login) { + /* + * Do a move login here. The old target is no longer + * known to the transport layer We don't use the + * normal ibmvfc_set_tgt_action to set this, as we + * don't normally want to allow this state change. + */ + wtgt->new_scsi_id = scsi_id; + wtgt->action = IBMVFC_TGT_ACTION_INIT; + wtgt->init_retries = 0; + ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login); + } + goto unlock_out; + } else { + tgt_err(wtgt, "Unexpected target state: %d, %p\n", + wtgt->action, wtgt->rport); + } + } else if (stgt) { + if (tgt->need_login) + ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + goto unlock_out; + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); + memset(tgt, 0, sizeof(*tgt)); + tgt->scsi_id = scsi_id; + tgt->wwpn = wwpn; + tgt->vhost = vhost; + tgt->need_login = 1; + timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0); + kref_init(&tgt->kref); + ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + spin_lock_irqsave(vhost->host->host_lock, flags); + tgt->cancel_key = vhost->task_set++; + list_add_tail(&tgt->queue, &vhost->targets); + +unlock_out: + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; +} + +/** + * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets + * @vhost: ibmvfc host struct + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost) +{ + int i, rc; + + for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++) + rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]); + + return rc; +} + +/** + * ibmvfc_discover_targets_done - Completion handler for discover targets MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; + u32 mad_status = be16_to_cpu(rsp->common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_dbg(vhost, "Discover Targets succeeded\n"); + vhost->num_targets = be32_to_cpu(rsp->num_written); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); + break; + case IBMVFC_MAD_FAILED: + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + default: + dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + } + + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_discover_targets - Send Discover Targets MAD + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) +{ + struct ibmvfc_discover_targets *mad; + struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + if (!evt) { + ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + + ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT); + mad = &evt->iu.discover_targets; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS); + mad->common.length = cpu_to_be16(sizeof(*mad)); + mad->bufflen = cpu_to_be32(vhost->disc_buf_sz); + mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma); + mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz); + mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent discover targets\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + +static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf; + struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs; + u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + int flags, active_queues, i; + + ibmvfc_free_event(evt); + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_dbg(vhost, "Channel Setup succeeded\n"); + flags = be32_to_cpu(setup->flags); + vhost->do_enquiry = 0; + active_queues = be32_to_cpu(setup->num_scsi_subq_channels); + scrqs->active_queues = active_queues; + + if (flags & IBMVFC_CHANNELS_CANCELED) { + ibmvfc_dbg(vhost, "Channels Canceled\n"); + vhost->using_channels = 0; + } else { + if (active_queues) + vhost->using_channels = 1; + for (i = 0; i < active_queues; i++) + scrqs->scrqs[i].vios_cookie = + be64_to_cpu(setup->channel_handles[i]); + + ibmvfc_dbg(vhost, "Using %u channels\n", + vhost->scsi_scrqs.active_queues); + } + break; + case IBMVFC_MAD_FAILED: + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Channel Setup failed\n"); + fallthrough; + case IBMVFC_MAD_DRIVER_FAILED: + return; + default: + dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n", + mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + return; + } + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + wake_up(&vhost->work_wait_q); +} + +static void ibmvfc_channel_setup(struct ibmvfc_host *vhost) +{ + struct ibmvfc_channel_setup_mad *mad; + struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf; + struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); + struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs; + unsigned int num_channels = + min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + int i; + + if (!evt) { + ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + + memset(setup_buf, 0, sizeof(*setup_buf)); + if (num_channels == 0) + setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS); + else { + setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels); + for (i = 0; i < num_channels; i++) + setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie); + } + + ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT); + mad = &evt->iu.channel_setup; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP); + mad->common.length = cpu_to_be16(sizeof(*mad)); + mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma); + mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf)); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent channel setup\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); +} + +static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry; + u32 mad_status = be16_to_cpu(rsp->common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n"); + vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels); + ibmvfc_free_event(evt); + break; + case IBMVFC_MAD_FAILED: + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Channel Enquiry failed\n"); + fallthrough; + case IBMVFC_MAD_DRIVER_FAILED: + ibmvfc_free_event(evt); + return; + default: + dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n", + mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_free_event(evt); + return; + } + + ibmvfc_channel_setup(vhost); +} + +static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost) +{ + struct ibmvfc_channel_enquiry *mad; + struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + if (!evt) { + ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + + ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT); + mad = &evt->iu.channel_enquiry; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY); + mad->common.length = cpu_to_be16(sizeof(*mad)); + + if (mig_channels_only) + mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT); + if (mig_no_less_channels) + mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Send channel enquiry\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + +/** + * ibmvfc_npiv_login_done - Completion handler for NPIV Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status); + struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; + unsigned int npiv_max_sectors; + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_free_event(evt); + break; + case IBMVFC_MAD_FAILED: + if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) + level += ibmvfc_retry_host_init(vhost); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); + ibmvfc_free_event(evt); + return; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_host_init(vhost); + fallthrough; + case IBMVFC_MAD_DRIVER_FAILED: + ibmvfc_free_event(evt); + return; + default: + dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_free_event(evt); + return; + } + + vhost->client_migrated = 0; + + if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) { + dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n", + rsp->flags); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + wake_up(&vhost->work_wait_q); + return; + } + + if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) { + dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n", + rsp->max_cmds); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + wake_up(&vhost->work_wait_q); + return; + } + + vhost->logged_in = 1; + npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); + dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", + rsp->partition_name, rsp->device_name, rsp->port_loc_code, + rsp->drc_name, npiv_max_sectors); + + fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name); + fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name); + fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name); + fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id); + fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV; + fc_host_supported_classes(vhost->host) = 0; + if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000) + fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1; + if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000) + fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2; + if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000) + fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3; + fc_host_maxframe_size(vhost->host) = + be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff; + + vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ; + vhost->host->max_sectors = npiv_max_sectors; + + if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) { + ibmvfc_channel_enquiry(vhost); + } else { + vhost->do_enquiry = 0; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + wake_up(&vhost->work_wait_q); + } +} + +/** + * ibmvfc_npiv_login - Sends NPIV login + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_login_mad *mad; + struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); + + if (!evt) { + ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + + ibmvfc_gather_partition_info(vhost); + ibmvfc_set_login_info(vhost); + ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT); + + memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info)); + mad = &evt->iu.npiv_login; + memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN); + mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad)); + mad->buffer.va = cpu_to_be64(vhost->login_buf_dma); + mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf)); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent NPIV login\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + +/** + * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status); + + ibmvfc_free_event(evt); + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + if (list_empty(&vhost->crq.sent) && + vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { + ibmvfc_init_host(vhost); + return; + } + break; + case IBMVFC_MAD_FAILED: + case IBMVFC_MAD_NOT_SUPPORTED: + case IBMVFC_MAD_CRQ_ERROR: + case IBMVFC_MAD_DRIVER_FAILED: + default: + ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status); + break; + } + + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_npiv_logout - Issue an NPIV Logout + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_logout_mad *mad; + struct ibmvfc_event *evt; + + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + + ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); + + mad = &evt->iu.npiv_logout; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT); + mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad)); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent NPIV logout\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + +/** + * ibmvfc_dev_init_to_do - Is there target initialization work to do? + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_INIT || + tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) + return 1; + } + + return 0; +} + +/** + * ibmvfc_dev_logo_to_do - Is there target logout work to do? + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT || + tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT) + return 1; + } + return 0; +} + +/** + * __ibmvfc_work_to_do - Is there task level work to do? (no locking) + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + if (kthread_should_stop()) + return 1; + switch (vhost->action) { + case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_INIT_WAIT: + case IBMVFC_HOST_ACTION_LOGO_WAIT: + return 0; + case IBMVFC_HOST_ACTION_TGT_INIT: + case IBMVFC_HOST_ACTION_QUERY_TGTS: + if (vhost->discovery_threads == disc_threads) + return 0; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_INIT) + return 1; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) + return 0; + return 1; + case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + if (vhost->discovery_threads == disc_threads) + return 0; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) + return 1; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT) + return 0; + return 1; + case IBMVFC_HOST_ACTION_LOGO: + case IBMVFC_HOST_ACTION_INIT: + case IBMVFC_HOST_ACTION_ALLOC_TGTS: + case IBMVFC_HOST_ACTION_QUERY: + case IBMVFC_HOST_ACTION_RESET: + case IBMVFC_HOST_ACTION_REENABLE: + default: + break; + } + + return 1; +} + +/** + * ibmvfc_work_to_do - Is there task level work to do? + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int ibmvfc_work_to_do(struct ibmvfc_host *vhost) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(vhost->host->host_lock, flags); + rc = __ibmvfc_work_to_do(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return rc; +} + +/** + * ibmvfc_log_ae - Log async events if necessary + * @vhost: ibmvfc host struct + * @events: events to log + * + **/ +static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) +{ + if (events & IBMVFC_AE_RSCN) + fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0); + if ((events & IBMVFC_AE_LINKDOWN) && + vhost->state >= IBMVFC_HALTED) + fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); + if ((events & IBMVFC_AE_LINKUP) && + vhost->state == IBMVFC_INITIALIZING) + fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0); +} + +/** + * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) +{ + struct ibmvfc_host *vhost = tgt->vhost; + struct fc_rport *rport; + unsigned long flags; + + tgt_dbg(tgt, "Adding rport\n"); + rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); + spin_lock_irqsave(vhost->host->host_lock, flags); + + if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt_dbg(tgt, "Deleting rport\n"); + list_del(&tgt->queue); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + fc_remote_port_delete(rport); + del_timer_sync(&tgt->timer); + kref_put(&tgt->kref, ibmvfc_release_tgt); + return; + } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { + tgt_dbg(tgt, "Deleting rport with outstanding I/O\n"); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT); + tgt->rport = NULL; + tgt->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + fc_remote_port_delete(rport); + return; + } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return; + } + + if (rport) { + tgt_dbg(tgt, "rport add succeeded\n"); + tgt->rport = rport; + rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff; + rport->supported_classes = 0; + tgt->target_id = rport->scsi_target_id; + if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000) + rport->supported_classes |= FC_COS_CLASS1; + if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000) + rport->supported_classes |= FC_COS_CLASS2; + if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000) + rport->supported_classes |= FC_COS_CLASS3; + if (rport->rqst_q) + blk_queue_max_segments(rport->rqst_q, 1); + } else + tgt_dbg(tgt, "rport add failed\n"); + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_do_work - Do task level work + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_do_work(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + unsigned long flags; + struct fc_rport *rport; + LIST_HEAD(purge); + int rc; + + ibmvfc_log_ae(vhost, vhost->events_to_log); + spin_lock_irqsave(vhost->host->host_lock, flags); + vhost->events_to_log = 0; + switch (vhost->action) { + case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_LOGO_WAIT: + case IBMVFC_HOST_ACTION_INIT_WAIT: + break; + case IBMVFC_HOST_ACTION_RESET: + list_splice_init(&vhost->purge, &purge); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_complete_purge(&purge); + rc = ibmvfc_reset_crq(vhost); + + spin_lock_irqsave(vhost->host->host_lock, flags); + if (!rc || rc == H_CLOSED) + vio_enable_interrupts(to_vio_dev(vhost->dev)); + if (vhost->action == IBMVFC_HOST_ACTION_RESET) { + /* + * The only action we could have changed to would have + * been reenable, in which case, we skip the rest of + * this path and wait until we've done the re-enable + * before sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + + if (rc || (rc = ibmvfc_send_crq_init(vhost)) || + (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); + } + } + break; + case IBMVFC_HOST_ACTION_REENABLE: + list_splice_init(&vhost->purge, &purge); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_complete_purge(&purge); + rc = ibmvfc_reenable_crq_queue(vhost); + + spin_lock_irqsave(vhost->host->host_lock, flags); + if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) { + /* + * The only action we could have changed to would have + * been reset, in which case, we skip the rest of this + * path and wait until we've done the reset before + * sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + if (rc || (rc = ibmvfc_send_crq_init(vhost))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); + } + } + break; + case IBMVFC_HOST_ACTION_LOGO: + vhost->job_step(vhost); + break; + case IBMVFC_HOST_ACTION_INIT: + BUG_ON(vhost->state != IBMVFC_INITIALIZING); + if (vhost->delay_init) { + vhost->delay_init = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ssleep(15); + return; + } else + vhost->job_step(vhost); + break; + case IBMVFC_HOST_ACTION_QUERY: + list_for_each_entry(tgt, &vhost->targets, queue) + ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS); + break; + case IBMVFC_HOST_ACTION_QUERY_TGTS: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_INIT) { + tgt->job_step(tgt); + break; + } + } + + if (!ibmvfc_dev_init_to_do(vhost)) + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); + break; + case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) { + tgt->job_step(tgt); + break; + } + } + + if (ibmvfc_dev_logo_to_do(vhost)) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return; + } + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt_dbg(tgt, "Deleting rport\n"); + rport = tgt->rport; + tgt->rport = NULL; + list_del(&tgt->queue); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + if (rport) + fc_remote_port_delete(rport); + del_timer_sync(&tgt->timer); + kref_put(&tgt->kref, ibmvfc_release_tgt); + return; + } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { + tgt_dbg(tgt, "Deleting rport with I/O outstanding\n"); + rport = tgt->rport; + tgt->rport = NULL; + tgt->init_retries = 0; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT); + + /* + * If fast fail is enabled, we wait for it to fire and then clean up + * the old port, since we expect the fast fail timer to clean up the + * outstanding I/O faster than waiting for normal command timeouts. + * However, if fast fail is disabled, any I/O outstanding to the + * rport LUNs will stay outstanding indefinitely, since the EH handlers + * won't get invoked for I/O's timing out. If this is a NPIV failover + * scenario, the better alternative is to use the move login. + */ + if (rport && rport->fast_io_fail_tmo == -1) + tgt->move_login = 1; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + if (rport) + fc_remote_port_delete(rport); + return; + } + } + + if (vhost->state == IBMVFC_INITIALIZING) { + if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { + if (vhost->reinit) { + vhost->reinit = 0; + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + } else { + ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + wake_up(&vhost->init_wait_q); + schedule_work(&vhost->rport_add_work_q); + vhost->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); + } + + return; + } else { + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + vhost->job_step = ibmvfc_discover_targets; + } + } else { + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); + wake_up(&vhost->init_wait_q); + return; + } + break; + case IBMVFC_HOST_ACTION_ALLOC_TGTS: + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_alloc_targets(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); + break; + case IBMVFC_HOST_ACTION_TGT_INIT: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_INIT) { + tgt->job_step(tgt); + break; + } + } + + if (!ibmvfc_dev_init_to_do(vhost)) + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); + break; + default: + break; + } + + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_work - Do task level work + * @data: ibmvfc host struct + * + * Returns: + * zero + **/ +static int ibmvfc_work(void *data) +{ + struct ibmvfc_host *vhost = data; + int rc; + + set_user_nice(current, MIN_NICE); + + while (1) { + rc = wait_event_interruptible(vhost->work_wait_q, + ibmvfc_work_to_do(vhost)); + + BUG_ON(rc); + + if (kthread_should_stop()) + break; + + ibmvfc_do_work(vhost); + } + + ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n"); + return 0; +} + +/** + * ibmvfc_alloc_queue - Allocate queue + * @vhost: ibmvfc host struct + * @queue: ibmvfc queue to allocate + * @fmt: queue format to allocate + * + * Returns: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue, + enum ibmvfc_msg_fmt fmt) +{ + struct device *dev = vhost->dev; + size_t fmt_size; + unsigned int pool_size = 0; + + ENTER; + spin_lock_init(&queue->_lock); + queue->q_lock = &queue->_lock; + + switch (fmt) { + case IBMVFC_CRQ_FMT: + fmt_size = sizeof(*queue->msgs.crq); + pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ; + break; + case IBMVFC_ASYNC_FMT: + fmt_size = sizeof(*queue->msgs.async); + break; + case IBMVFC_SUB_CRQ_FMT: + fmt_size = sizeof(*queue->msgs.scrq); + /* We need one extra event for Cancel Commands */ + pool_size = max_requests + 1; + break; + default: + dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt); + return -EINVAL; + } + + if (ibmvfc_init_event_pool(vhost, queue, pool_size)) { + dev_err(dev, "Couldn't initialize event pool.\n"); + return -ENOMEM; + } + + queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL); + if (!queue->msgs.handle) + return -ENOMEM; + + queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(dev, queue->msg_token)) { + free_page((unsigned long)queue->msgs.handle); + queue->msgs.handle = NULL; + return -ENOMEM; + } + + queue->cur = 0; + queue->fmt = fmt; + queue->size = PAGE_SIZE / fmt_size; + + queue->vhost = vhost; + return 0; +} + +/** + * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor + * @vhost: ibmvfc host struct + * + * Allocates a page for messages, maps it for dma, and registers + * the crq with the hypervisor. + * + * Return value: + * zero on success / other on failure + **/ +static int ibmvfc_init_crq(struct ibmvfc_host *vhost) +{ + int rc, retrc = -ENOMEM; + struct device *dev = vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + struct ibmvfc_queue *crq = &vhost->crq; + + ENTER; + if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT)) + return -ENOMEM; + + retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_RESOURCE) + /* maybe kexecing and resource is busy. try a reset */ + retrc = rc = ibmvfc_reset_crq(vhost); + + if (rc == H_CLOSED) + dev_warn(dev, "Partner adapter not ready\n"); + else if (rc) { + dev_warn(dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + retrc = 0; + + tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost); + + if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) { + dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc); + goto req_irq_failed; + } + + if ((rc = vio_enable_interrupts(vdev))) { + dev_err(dev, "Error %d enabling interrupts\n", rc); + goto req_irq_failed; + } + + LEAVE; + return retrc; + +req_irq_failed: + tasklet_kill(&vhost->tasklet); + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_crq_failed: + ibmvfc_free_queue(vhost, crq); + return retrc; +} + +static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, + int index) +{ + struct device *dev = vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index]; + int rc = -ENOMEM; + + ENTER; + + rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, + &scrq->cookie, &scrq->hw_irq); + + /* H_CLOSED indicates successful register, but no CRQ partner */ + if (rc && rc != H_CLOSED) { + dev_warn(dev, "Error registering sub-crq: %d\n", rc); + if (rc == H_PARAMETER) + dev_warn_once(dev, "Firmware may not support MQ\n"); + goto reg_failed; + } + + scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); + + if (!scrq->irq) { + rc = -EINVAL; + dev_err(dev, "Error mapping sub-crq[%d] irq\n", index); + goto irq_failed; + } + + snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d", + vdev->unit_address, index); + rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq); + + if (rc) { + dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index); + irq_dispose_mapping(scrq->irq); + goto irq_failed; + } + + scrq->hwq_id = index; + + LEAVE; + return 0; + +irq_failed: + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_failed: + LEAVE; + return rc; +} + +static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) +{ + struct device *dev = vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index]; + long rc; + + ENTER; + + free_irq(scrq->irq, scrq); + irq_dispose_mapping(scrq->irq); + scrq->irq = 0; + + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, + scrq->cookie); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) + dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc); + + /* Clean out the queue */ + memset(scrq->msgs.crq, 0, PAGE_SIZE); + scrq->cur = 0; + + LEAVE; +} + +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i, j; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) { + if (ibmvfc_register_scsi_channel(vhost, i)) { + for (j = i; j > 0; j--) + ibmvfc_deregister_scsi_channel(vhost, j - 1); + vhost->do_enquiry = 0; + return; + } + } + + LEAVE; +} + +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) + ibmvfc_deregister_scsi_channel(vhost, i); + + LEAVE; +} + +static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) +{ + struct ibmvfc_queue *scrq; + int i, j; + + ENTER; + if (!vhost->mq_enabled) + return; + + vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues, + sizeof(*vhost->scsi_scrqs.scrqs), + GFP_KERNEL); + if (!vhost->scsi_scrqs.scrqs) { + vhost->do_enquiry = 0; + return; + } + + for (i = 0; i < nr_scsi_hw_queues; i++) { + scrq = &vhost->scsi_scrqs.scrqs[i]; + if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) { + for (j = i; j > 0; j--) { + scrq = &vhost->scsi_scrqs.scrqs[j - 1]; + ibmvfc_free_queue(vhost, scrq); + } + kfree(vhost->scsi_scrqs.scrqs); + vhost->scsi_scrqs.scrqs = NULL; + vhost->scsi_scrqs.active_queues = 0; + vhost->do_enquiry = 0; + vhost->mq_enabled = 0; + return; + } + } + + ibmvfc_reg_sub_crqs(vhost); + + LEAVE; +} + +static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) +{ + struct ibmvfc_queue *scrq; + int i; + + ENTER; + if (!vhost->scsi_scrqs.scrqs) + return; + + ibmvfc_dereg_sub_crqs(vhost); + + for (i = 0; i < nr_scsi_hw_queues; i++) { + scrq = &vhost->scsi_scrqs.scrqs[i]; + ibmvfc_free_queue(vhost, scrq); + } + + kfree(vhost->scsi_scrqs.scrqs); + vhost->scsi_scrqs.scrqs = NULL; + vhost->scsi_scrqs.active_queues = 0; + LEAVE; +} + +/** + * ibmvfc_free_mem - Free memory for vhost + * @vhost: ibmvfc host struct + * + * Return value: + * none + **/ +static void ibmvfc_free_mem(struct ibmvfc_host *vhost) +{ + struct ibmvfc_queue *async_q = &vhost->async_crq; + + ENTER; + mempool_destroy(vhost->tgt_pool); + kfree(vhost->trace); + dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf, + vhost->disc_buf_dma); + dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), + vhost->login_buf, vhost->login_buf_dma); + dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf), + vhost->channel_setup_buf, vhost->channel_setup_dma); + dma_pool_destroy(vhost->sg_pool); + ibmvfc_free_queue(vhost, async_q); + LEAVE; +} + +/** + * ibmvfc_alloc_mem - Allocate memory for vhost + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) +{ + struct ibmvfc_queue *async_q = &vhost->async_crq; + struct device *dev = vhost->dev; + + ENTER; + if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) { + dev_err(dev, "Couldn't allocate/map async queue.\n"); + goto nomem; + } + + vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev, + SG_ALL * sizeof(struct srp_direct_buf), + sizeof(struct srp_direct_buf), 0); + + if (!vhost->sg_pool) { + dev_err(dev, "Failed to allocate sg pool\n"); + goto unmap_async_crq; + } + + vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf), + &vhost->login_buf_dma, GFP_KERNEL); + + if (!vhost->login_buf) { + dev_err(dev, "Couldn't allocate NPIV login buffer\n"); + goto free_sg_pool; + } + + vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets; + vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz, + &vhost->disc_buf_dma, GFP_KERNEL); + + if (!vhost->disc_buf) { + dev_err(dev, "Couldn't allocate Discover Targets buffer\n"); + goto free_login_buffer; + } + + vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES, + sizeof(struct ibmvfc_trace_entry), GFP_KERNEL); + atomic_set(&vhost->trace_index, -1); + + if (!vhost->trace) + goto free_disc_buffer; + + vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ, + sizeof(struct ibmvfc_target)); + + if (!vhost->tgt_pool) { + dev_err(dev, "Couldn't allocate target memory pool\n"); + goto free_trace; + } + + vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf), + &vhost->channel_setup_dma, + GFP_KERNEL); + + if (!vhost->channel_setup_buf) { + dev_err(dev, "Couldn't allocate Channel Setup buffer\n"); + goto free_tgt_pool; + } + + LEAVE; + return 0; + +free_tgt_pool: + mempool_destroy(vhost->tgt_pool); +free_trace: + kfree(vhost->trace); +free_disc_buffer: + dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf, + vhost->disc_buf_dma); +free_login_buffer: + dma_free_coherent(dev, sizeof(*vhost->login_buf), + vhost->login_buf, vhost->login_buf_dma); +free_sg_pool: + dma_pool_destroy(vhost->sg_pool); +unmap_async_crq: + ibmvfc_free_queue(vhost, async_q); +nomem: + LEAVE; + return -ENOMEM; +} + +/** + * ibmvfc_rport_add_thread - Worker thread for rport adds + * @work: work struct + * + **/ +static void ibmvfc_rport_add_thread(struct work_struct *work) +{ + struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, + rport_add_work_q); + struct ibmvfc_target *tgt; + struct fc_rport *rport; + unsigned long flags; + int did_work; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + do { + did_work = 0; + if (vhost->state != IBMVFC_ACTIVE) + break; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->add_rport) { + did_work = 1; + tgt->add_rport = 0; + kref_get(&tgt->kref); + rport = tgt->rport; + if (!rport) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_tgt_add_rport(tgt); + } else if (get_device(&rport->dev)) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + tgt_dbg(tgt, "Setting rport roles\n"); + fc_remote_port_rolechg(rport, tgt->ids.roles); + put_device(&rport->dev); + } else { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + spin_lock_irqsave(vhost->host->host_lock, flags); + break; + } + } + } while(did_work); + + if (vhost->state == IBMVFC_ACTIVE) + vhost->scan_complete = 1; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; +} + +/** + * ibmvfc_probe - Adapter hot plug add entry point + * @vdev: vio device struct + * @id: vio device id struct + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct ibmvfc_host *vhost; + struct Scsi_Host *shost; + struct device *dev = &vdev->dev; + int rc = -ENOMEM; + unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES; + + ENTER; + shost = scsi_host_alloc(&driver_template, sizeof(*vhost)); + if (!shost) { + dev_err(dev, "Couldn't allocate host data\n"); + goto out; + } + + shost->transportt = ibmvfc_transport_template; + shost->can_queue = max_requests; + shost->max_lun = max_lun; + shost->max_id = max_targets; + shost->max_sectors = IBMVFC_MAX_SECTORS; + shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; + shost->unique_id = shost->host_no; + shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1; + + vhost = shost_priv(shost); + INIT_LIST_HEAD(&vhost->targets); + INIT_LIST_HEAD(&vhost->purge); + sprintf(vhost->name, IBMVFC_NAME); + vhost->host = shost; + vhost->dev = dev; + vhost->partition_number = -1; + vhost->log_level = log_level; + vhost->task_set = 1; + + vhost->mq_enabled = mq_enabled; + vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels); + vhost->using_channels = 0; + vhost->do_enquiry = 1; + vhost->scan_timeout = 0; + + strcpy(vhost->partition_name, "UNKNOWN"); + init_waitqueue_head(&vhost->work_wait_q); + init_waitqueue_head(&vhost->init_wait_q); + INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); + mutex_init(&vhost->passthru_mutex); + + if ((rc = ibmvfc_alloc_mem(vhost))) + goto free_scsi_host; + + vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME, + shost->host_no); + + if (IS_ERR(vhost->work_thread)) { + dev_err(dev, "Couldn't create kernel thread: %ld\n", + PTR_ERR(vhost->work_thread)); + rc = PTR_ERR(vhost->work_thread); + goto free_host_mem; + } + + if ((rc = ibmvfc_init_crq(vhost))) { + dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); + goto kill_kthread; + } + + if ((rc = scsi_add_host(shost, dev))) + goto release_crq; + + fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO; + + if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj, + &ibmvfc_trace_attr))) { + dev_err(dev, "Failed to create trace file. rc=%d\n", rc); + goto remove_shost; + } + + ibmvfc_init_sub_crqs(vhost); + + if (shost_to_fc_host(shost)->rqst_q) + blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); + dev_set_drvdata(dev, vhost); + spin_lock(&ibmvfc_driver_lock); + list_add_tail(&vhost->queue, &ibmvfc_head); + spin_unlock(&ibmvfc_driver_lock); + + ibmvfc_send_crq_init(vhost); + scsi_scan_host(shost); + return 0; + +remove_shost: + scsi_remove_host(shost); +release_crq: + ibmvfc_release_crq_queue(vhost); +kill_kthread: + kthread_stop(vhost->work_thread); +free_host_mem: + ibmvfc_free_mem(vhost); +free_scsi_host: + scsi_host_put(shost); +out: + LEAVE; + return rc; +} + +/** + * ibmvfc_remove - Adapter hot plug remove entry point + * @vdev: vio device struct + * + * Return value: + * 0 + **/ +static void ibmvfc_remove(struct vio_dev *vdev) +{ + struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev); + LIST_HEAD(purge); + unsigned long flags; + + ENTER; + ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + ibmvfc_wait_while_resetting(vhost); + kthread_stop(vhost->work_thread); + fc_remove_host(vhost->host); + scsi_remove_host(vhost->host); + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_purge_requests(vhost, DID_ERROR); + list_splice_init(&vhost->purge, &purge); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_complete_purge(&purge); + ibmvfc_release_sub_crqs(vhost); + ibmvfc_release_crq_queue(vhost); + + ibmvfc_free_mem(vhost); + spin_lock(&ibmvfc_driver_lock); + list_del(&vhost->queue); + spin_unlock(&ibmvfc_driver_lock); + scsi_host_put(vhost->host); + LEAVE; +} + +/** + * ibmvfc_resume - Resume from suspend + * @dev: device struct + * + * We may have lost an interrupt across suspend/resume, so kick the + * interrupt handler + * + */ +static int ibmvfc_resume(struct device *dev) +{ + unsigned long flags; + struct ibmvfc_host *vhost = dev_get_drvdata(dev); + struct vio_dev *vdev = to_vio_dev(dev); + + spin_lock_irqsave(vhost->host->host_lock, flags); + vio_disable_interrupts(vdev); + tasklet_schedule(&vhost->tasklet); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; +} + +/** + * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver + * @vdev: vio device struct + * + * Return value: + * Number of bytes the driver will need to DMA map at the same time in + * order to perform well. + */ +static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev) +{ + unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu); + return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun); +} + +static const struct vio_device_id ibmvfc_device_table[] = { + {"fcp", "IBM,vfc-client"}, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); + +static const struct dev_pm_ops ibmvfc_pm_ops = { + .resume = ibmvfc_resume +}; + +static struct vio_driver ibmvfc_driver = { + .id_table = ibmvfc_device_table, + .probe = ibmvfc_probe, + .remove = ibmvfc_remove, + .get_desired_dma = ibmvfc_get_desired_dma, + .name = IBMVFC_NAME, + .pm = &ibmvfc_pm_ops, +}; + +static struct fc_function_template ibmvfc_transport_functions = { + .show_host_fabric_name = 1, + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_port_type = 1, + .show_host_port_id = 1, + .show_host_maxframe_size = 1, + + .get_host_port_state = ibmvfc_get_host_port_state, + .show_host_port_state = 1, + + .get_host_speed = ibmvfc_get_host_speed, + .show_host_speed = 1, + + .issue_fc_host_lip = ibmvfc_issue_fc_host_lip, + .terminate_rport_io = ibmvfc_terminate_rport_io, + + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .get_starget_node_name = ibmvfc_get_starget_node_name, + .show_starget_node_name = 1, + + .get_starget_port_name = ibmvfc_get_starget_port_name, + .show_starget_port_name = 1, + + .get_starget_port_id = ibmvfc_get_starget_port_id, + .show_starget_port_id = 1, + + .bsg_request = ibmvfc_bsg_request, + .bsg_timeout = ibmvfc_bsg_timeout, +}; + +/** + * ibmvfc_module_init - Initialize the ibmvfc module + * + * Return value: + * 0 on success / other on failure + **/ +static int __init ibmvfc_module_init(void) +{ + int rc; + + if (!firmware_has_feature(FW_FEATURE_VIO)) + return -ENODEV; + + printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", + IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); + + ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); + if (!ibmvfc_transport_template) + return -ENOMEM; + + rc = vio_register_driver(&ibmvfc_driver); + if (rc) + fc_release_transport(ibmvfc_transport_template); + return rc; +} + +/** + * ibmvfc_module_exit - Teardown the ibmvfc module + * + * Return value: + * nothing + **/ +static void __exit ibmvfc_module_exit(void) +{ + vio_unregister_driver(&ibmvfc_driver); + fc_release_transport(ibmvfc_transport_template); +} + +module_init(ibmvfc_module_init); +module_exit(ibmvfc_module_exit); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h new file mode 100644 index 000000000..c39a245f4 --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -0,0 +1,938 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter + * + * Written By: Brian King , IBM Corporation + * + * Copyright (C) IBM Corporation, 2008 + */ + +#ifndef _IBMVFC_H +#define _IBMVFC_H + +#include +#include +#include + +#define IBMVFC_NAME "ibmvfc" +#define IBMVFC_DRIVER_VERSION "1.0.11" +#define IBMVFC_DRIVER_DATE "(April 12, 2013)" + +#define IBMVFC_DEFAULT_TIMEOUT 60 +#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 +#define IBMVFC_ADISC_TIMEOUT 15 +#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \ + (IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT) +#define IBMVFC_INIT_TIMEOUT 120 +#define IBMVFC_ABORT_TIMEOUT 8 +#define IBMVFC_ABORT_WAIT_TIMEOUT 40 +#define IBMVFC_MAX_REQUESTS_DEFAULT 100 + +#define IBMVFC_DEBUG 0 +#define IBMVFC_MAX_TARGETS 1024 +#define IBMVFC_MAX_LUN 0xffffffff +#define IBMVFC_MAX_SECTORS 0xffffu +#define IBMVFC_MAX_DISC_THREADS 4 +#define IBMVFC_TGT_MEMPOOL_SZ 64 +#define IBMVFC_MAX_CMDS_PER_LUN 64 +#define IBMVFC_MAX_HOST_INIT_RETRIES 6 +#define IBMVFC_MAX_TGT_INIT_RETRIES 3 +#define IBMVFC_DEV_LOSS_TMO (5 * 60) +#define IBMVFC_DEFAULT_LOG_LEVEL 2 +#define IBMVFC_MAX_CDB_LEN 16 +#define IBMVFC_CLS3_ERROR 0 +#define IBMVFC_MQ 1 +#define IBMVFC_SCSI_CHANNELS 8 +#define IBMVFC_MAX_SCSI_QUEUES 16 +#define IBMVFC_SCSI_HW_QUEUES 8 +#define IBMVFC_MIG_NO_SUB_TO_CRQ 0 +#define IBMVFC_MIG_NO_N_TO_M 0 + +/* + * Ensure we have resources for ERP and initialization: + * 1 for ERP + * 1 for initialization + * 1 for NPIV Logout + * 2 for BSG passthru + * 2 for each discovery thread + */ +#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + 2 + (disc_threads * 2)) + +#define IBMVFC_MAD_SUCCESS 0x00 +#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 +#define IBMVFC_MAD_VERSION_NOT_SUPP 0xF2 +#define IBMVFC_MAD_FAILED 0xF7 +#define IBMVFC_MAD_DRIVER_FAILED 0xEE +#define IBMVFC_MAD_CRQ_ERROR 0xEF + +enum ibmvfc_crq_valid { + IBMVFC_CRQ_CMD_RSP = 0x80, + IBMVFC_CRQ_INIT_RSP = 0xC0, + IBMVFC_CRQ_XPORT_EVENT = 0xFF, +}; + +enum ibmvfc_crq_init_msg { + IBMVFC_CRQ_INIT = 0x01, + IBMVFC_CRQ_INIT_COMPLETE = 0x02, +}; + +enum ibmvfc_crq_xport_evts { + IBMVFC_PARTNER_FAILED = 0x01, + IBMVFC_PARTNER_DEREGISTER = 0x02, + IBMVFC_PARTITION_MIGRATED = 0x06, +}; + +enum ibmvfc_cmd_status_flags { + IBMVFC_FABRIC_MAPPED = 0x0001, + IBMVFC_VIOS_FAILURE = 0x0002, + IBMVFC_FC_FAILURE = 0x0004, + IBMVFC_FC_SCSI_ERROR = 0x0008, + IBMVFC_HW_EVENT_LOGGED = 0x0010, + IBMVFC_VIOS_LOGGED = 0x0020, +}; + +enum ibmvfc_fabric_mapped_errors { + IBMVFC_UNABLE_TO_ESTABLISH = 0x0001, + IBMVFC_XPORT_FAULT = 0x0002, + IBMVFC_CMD_TIMEOUT = 0x0003, + IBMVFC_ENETDOWN = 0x0004, + IBMVFC_HW_FAILURE = 0x0005, + IBMVFC_LINK_DOWN_ERR = 0x0006, + IBMVFC_LINK_DEAD_ERR = 0x0007, + IBMVFC_UNABLE_TO_REGISTER = 0x0008, + IBMVFC_XPORT_BUSY = 0x000A, + IBMVFC_XPORT_DEAD = 0x000B, + IBMVFC_CONFIG_ERROR = 0x000C, + IBMVFC_NAME_SERVER_FAIL = 0x000D, + IBMVFC_LINK_HALTED = 0x000E, + IBMVFC_XPORT_GENERAL = 0x8000, +}; + +enum ibmvfc_vios_errors { + IBMVFC_CRQ_FAILURE = 0x0001, + IBMVFC_SW_FAILURE = 0x0002, + IBMVFC_INVALID_PARAMETER = 0x0003, + IBMVFC_MISSING_PARAMETER = 0x0004, + IBMVFC_HOST_IO_BUS = 0x0005, + IBMVFC_TRANS_CANCELLED = 0x0006, + IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007, + IBMVFC_INSUFFICIENT_RESOURCE = 0x0008, + IBMVFC_PLOGI_REQUIRED = 0x0010, + IBMVFC_COMMAND_FAILED = 0x8000, +}; + +enum ibmvfc_mad_types { + IBMVFC_NPIV_LOGIN = 0x0001, + IBMVFC_DISC_TARGETS = 0x0002, + IBMVFC_PORT_LOGIN = 0x0004, + IBMVFC_PROCESS_LOGIN = 0x0008, + IBMVFC_QUERY_TARGET = 0x0010, + IBMVFC_MOVE_LOGIN = 0x0020, + IBMVFC_IMPLICIT_LOGOUT = 0x0040, + IBMVFC_PASSTHRU = 0x0200, + IBMVFC_TMF_MAD = 0x0100, + IBMVFC_NPIV_LOGOUT = 0x0800, + IBMVFC_CHANNEL_ENQUIRY = 0x1000, + IBMVFC_CHANNEL_SETUP = 0x2000, + IBMVFC_CONNECTION_INFO = 0x4000, +}; + +struct ibmvfc_mad_common { + __be32 version; + __be32 reserved; + __be32 opcode; + __be16 status; + __be16 length; + __be64 tag; +} __packed __aligned(8); + +struct ibmvfc_npiv_login_mad { + struct ibmvfc_mad_common common; + struct srp_direct_buf buffer; +} __packed __aligned(8); + +struct ibmvfc_npiv_logout_mad { + struct ibmvfc_mad_common common; +} __packed __aligned(8); + +#define IBMVFC_MAX_NAME 256 + +struct ibmvfc_npiv_login { + __be32 ostype; +#define IBMVFC_OS_LINUX 0x02 + __be32 pad; + __be64 max_dma_len; + __be32 max_payload; + __be32 max_response; + __be32 partition_num; + __be32 vfc_frame_version; + __be16 fcp_version; + __be16 flags; +#define IBMVFC_CLIENT_MIGRATED 0x01 +#define IBMVFC_FLUSH_ON_HALT 0x02 + __be32 max_cmds; + __be64 capabilities; +#define IBMVFC_CAN_MIGRATE 0x01 +#define IBMVFC_CAN_USE_CHANNELS 0x02 +#define IBMVFC_CAN_HANDLE_FPIN 0x04 +#define IBMVFC_CAN_USE_MAD_VERSION 0x08 +#define IBMVFC_CAN_SEND_VF_WWPN 0x10 + __be64 node_name; + struct srp_direct_buf async; + u8 partition_name[IBMVFC_MAX_NAME]; + u8 device_name[IBMVFC_MAX_NAME]; + u8 drc_name[IBMVFC_MAX_NAME]; + __be64 reserved2[2]; +} __packed __aligned(8); + +struct ibmvfc_common_svc_parms { + __be16 fcph_version; + __be16 b2b_credit; + __be16 features; + __be16 bb_rcv_sz; /* upper nibble is BB_SC_N */ + __be32 ratov; + __be32 edtov; +} __packed __aligned(4); + +struct ibmvfc_service_parms { + struct ibmvfc_common_svc_parms common; + u8 port_name[8]; + u8 node_name[8]; + __be32 class1_parms[4]; + __be32 class2_parms[4]; + __be32 class3_parms[4]; + __be32 obsolete[4]; + __be32 vendor_version[4]; + __be32 services_avail[2]; + __be32 ext_len; + __be32 reserved[30]; + __be32 clk_sync_qos[2]; + __be32 reserved2; +} __packed __aligned(4); + +struct ibmvfc_npiv_login_resp { + __be32 version; + __be16 status; + __be16 error; + __be32 flags; +#define IBMVFC_NATIVE_FC 0x01 + __be32 reserved; + __be64 capabilities; +#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 +#define IBMVFC_CAN_SUPPRESS_ABTS 0x10 +#define IBMVFC_MAD_VERSION_CAP 0x20 +#define IBMVFC_HANDLE_VF_WWPN 0x40 +#define IBMVFC_CAN_SUPPORT_CHANNELS 0x80 + __be32 max_cmds; + __be32 scsi_id_sz; + __be64 max_dma_len; + __be64 scsi_id; + __be64 port_name; + __be64 node_name; + __be64 link_speed; + u8 partition_name[IBMVFC_MAX_NAME]; + u8 device_name[IBMVFC_MAX_NAME]; + u8 port_loc_code[IBMVFC_MAX_NAME]; + u8 drc_name[IBMVFC_MAX_NAME]; + struct ibmvfc_service_parms service_parms; + __be64 reserved2; +} __packed __aligned(8); + +union ibmvfc_npiv_login_data { + struct ibmvfc_npiv_login login; + struct ibmvfc_npiv_login_resp resp; +} __packed __aligned(8); + +struct ibmvfc_discover_targets_entry { + __be32 scsi_id; + __be32 pad; + __be64 wwpn; +#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff +} __packed __aligned(8); + +struct ibmvfc_discover_targets { + struct ibmvfc_mad_common common; + struct srp_direct_buf buffer; + __be32 flags; +#define IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST 0x02 + __be16 status; + __be16 error; + __be32 bufflen; + __be32 num_avail; + __be32 num_written; + __be64 reserved[2]; +} __packed __aligned(8); + +enum ibmvfc_fc_reason { + IBMVFC_INVALID_ELS_CMD_CODE = 0x01, + IBMVFC_INVALID_VERSION = 0x02, + IBMVFC_LOGICAL_ERROR = 0x03, + IBMVFC_INVALID_CT_IU_SIZE = 0x04, + IBMVFC_LOGICAL_BUSY = 0x05, + IBMVFC_PROTOCOL_ERROR = 0x07, + IBMVFC_UNABLE_TO_PERFORM_REQ = 0x09, + IBMVFC_CMD_NOT_SUPPORTED = 0x0B, + IBMVFC_SERVER_NOT_AVAIL = 0x0D, + IBMVFC_CMD_IN_PROGRESS = 0x0E, + IBMVFC_VENDOR_SPECIFIC = 0xFF, +}; + +enum ibmvfc_fc_type { + IBMVFC_FABRIC_REJECT = 0x01, + IBMVFC_PORT_REJECT = 0x02, + IBMVFC_LS_REJECT = 0x03, + IBMVFC_FABRIC_BUSY = 0x04, + IBMVFC_PORT_BUSY = 0x05, + IBMVFC_BASIC_REJECT = 0x06, +}; + +enum ibmvfc_gs_explain { + IBMVFC_PORT_NAME_NOT_REG = 0x02, +}; + +struct ibmvfc_port_login { + struct ibmvfc_mad_common common; + __be64 scsi_id; + __be16 reserved; + __be16 fc_service_class; + __be32 blksz; + __be32 hdr_per_blk; + __be16 status; + __be16 error; /* also fc_reason */ + __be16 fc_explain; + __be16 fc_type; + __be32 reserved2; + struct ibmvfc_service_parms service_parms; + struct ibmvfc_service_parms service_parms_change; + __be64 target_wwpn; + __be64 reserved3[2]; +} __packed __aligned(8); + +struct ibmvfc_move_login { + struct ibmvfc_mad_common common; + __be64 old_scsi_id; + __be64 new_scsi_id; + __be64 wwpn; + __be64 node_name; + __be32 flags; +#define IBMVFC_MOVE_LOGIN_IMPLICIT_OLD_FAILED 0x01 +#define IBMVFC_MOVE_LOGIN_IMPLICIT_NEW_FAILED 0x02 +#define IBMVFC_MOVE_LOGIN_PORT_LOGIN_FAILED 0x04 + __be32 reserved; + struct ibmvfc_service_parms service_parms; + struct ibmvfc_service_parms service_parms_change; + __be32 reserved2; + __be16 service_class; + __be16 vios_flags; +#define IBMVFC_MOVE_LOGIN_VF_NOT_SENT_ADAPTER 0x01 + __be64 reserved3; +} __packed __aligned(8); + +struct ibmvfc_prli_svc_parms { + u8 type; +#define IBMVFC_SCSI_FCP_TYPE 0x08 + u8 type_ext; + __be16 flags; +#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000 +#define IBMVFC_PRLI_RESP_PA_VALID 0x4000 +#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000 + __be32 orig_pa; + __be32 resp_pa; + __be32 service_parms; +#define IBMVFC_PRLI_TASK_RETRY 0x00000200 +#define IBMVFC_PRLI_RETRY 0x00000100 +#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040 +#define IBMVFC_PRLI_INITIATOR_FUNC 0x00000020 +#define IBMVFC_PRLI_TARGET_FUNC 0x00000010 +#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002 +#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001 +} __packed __aligned(4); + +struct ibmvfc_process_login { + struct ibmvfc_mad_common common; + __be64 scsi_id; + struct ibmvfc_prli_svc_parms parms; + u8 reserved[48]; + __be16 status; + __be16 error; /* also fc_reason */ + __be32 reserved2; + __be64 target_wwpn; + __be64 reserved3[2]; +} __packed __aligned(8); + +struct ibmvfc_query_tgt { + struct ibmvfc_mad_common common; + __be64 wwpn; + __be64 scsi_id; + __be16 status; + __be16 error; + __be16 fc_explain; + __be16 fc_type; + __be64 reserved[2]; +} __packed __aligned(8); + +struct ibmvfc_implicit_logout { + struct ibmvfc_mad_common common; + __be64 old_scsi_id; + __be64 reserved[2]; +} __packed __aligned(8); + +struct ibmvfc_tmf { + struct ibmvfc_mad_common common; + __be64 scsi_id; + struct scsi_lun lun; + __be32 flags; +#define IBMVFC_TMF_ABORT_TASK 0x02 +#define IBMVFC_TMF_ABORT_TASK_SET 0x04 +#define IBMVFC_TMF_LUN_RESET 0x10 +#define IBMVFC_TMF_TGT_RESET 0x20 +#define IBMVFC_TMF_LUA_VALID 0x40 +#define IBMVFC_TMF_SUPPRESS_ABTS 0x80 + __be32 cancel_key; + __be32 my_cancel_key; + __be32 pad; + __be64 target_wwpn; + __be64 task_tag; + __be64 reserved[2]; +} __packed __aligned(8); + +enum ibmvfc_fcp_rsp_info_codes { + RSP_NO_FAILURE = 0x00, + RSP_TMF_REJECTED = 0x04, + RSP_TMF_FAILED = 0x05, + RSP_TMF_INVALID_LUN = 0x09, +}; + +struct ibmvfc_fcp_rsp_info { + u8 reserved[3]; + u8 rsp_code; + u8 reserved2[4]; +} __packed __aligned(2); + +enum ibmvfc_fcp_rsp_flags { + FCP_BIDI_RSP = 0x80, + FCP_BIDI_READ_RESID_UNDER = 0x40, + FCP_BIDI_READ_RESID_OVER = 0x20, + FCP_CONF_REQ = 0x10, + FCP_RESID_UNDER = 0x08, + FCP_RESID_OVER = 0x04, + FCP_SNS_LEN_VALID = 0x02, + FCP_RSP_LEN_VALID = 0x01, +}; + +union ibmvfc_fcp_rsp_data { + struct ibmvfc_fcp_rsp_info info; + u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)]; +} __packed __aligned(8); + +struct ibmvfc_fcp_rsp { + __be64 reserved; + __be16 retry_delay_timer; + u8 flags; + u8 scsi_status; + __be32 fcp_resid; + __be32 fcp_sense_len; + __be32 fcp_rsp_len; + union ibmvfc_fcp_rsp_data data; +} __packed __aligned(8); + +enum ibmvfc_cmd_flags { + IBMVFC_SCATTERLIST = 0x0001, + IBMVFC_NO_MEM_DESC = 0x0002, + IBMVFC_READ = 0x0004, + IBMVFC_WRITE = 0x0008, + IBMVFC_TMF = 0x0080, + IBMVFC_CLASS_3_ERR = 0x0100, +}; + +enum ibmvfc_fc_task_attr { + IBMVFC_SIMPLE_TASK = 0x00, + IBMVFC_HEAD_OF_QUEUE = 0x01, + IBMVFC_ORDERED_TASK = 0x02, + IBMVFC_ACA_TASK = 0x04, +}; + +enum ibmvfc_fc_tmf_flags { + IBMVFC_ABORT_TASK_SET = 0x02, + IBMVFC_LUN_RESET = 0x10, + IBMVFC_TARGET_RESET = 0x20, +}; + +struct ibmvfc_fcp_cmd_iu { + struct scsi_lun lun; + u8 crn; + u8 pri_task_attr; + u8 tmf_flags; + u8 add_cdb_len; +#define IBMVFC_RDDATA 0x02 +#define IBMVFC_WRDATA 0x01 + u8 cdb[IBMVFC_MAX_CDB_LEN]; + __be32 xfer_len; +} __packed __aligned(4); + +struct ibmvfc_cmd { + __be64 task_tag; + __be32 frame_type; + __be32 payload_len; + __be32 resp_len; + __be32 adapter_resid; + __be16 status; + __be16 error; + __be16 flags; + __be16 response_flags; +#define IBMVFC_ADAPTER_RESID_VALID 0x01 + __be32 cancel_key; + __be32 exchange_id; + struct srp_direct_buf ext_func; + struct srp_direct_buf ioba; + struct srp_direct_buf resp; + __be64 correlation; + __be64 tgt_scsi_id; + __be64 tag; + __be64 target_wwpn; + __be64 reserved3; + union { + struct { + struct ibmvfc_fcp_cmd_iu iu; + struct ibmvfc_fcp_rsp rsp; + } v1; + struct { + __be64 reserved4; + struct ibmvfc_fcp_cmd_iu iu; + struct ibmvfc_fcp_rsp rsp; + } v2; + }; +} __packed __aligned(8); + +struct ibmvfc_passthru_fc_iu { + __be32 payload[7]; +#define IBMVFC_ADISC 0x52000000 + __be32 response[7]; +}; + +struct ibmvfc_passthru_iu { + __be64 task_tag; + __be32 cmd_len; + __be32 rsp_len; + __be16 status; + __be16 error; + __be32 flags; +#define IBMVFC_FC_ELS 0x01 +#define IBMVFC_FC_CT_IU 0x02 + __be32 cancel_key; +#define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000 +#define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001 + __be32 reserved; + struct srp_direct_buf cmd; + struct srp_direct_buf rsp; + __be64 correlation; + __be64 scsi_id; + __be64 tag; + __be64 target_wwpn; + __be64 reserved2[2]; +} __packed __aligned(8); + +struct ibmvfc_passthru_mad { + struct ibmvfc_mad_common common; + struct srp_direct_buf cmd_ioba; + struct ibmvfc_passthru_iu iu; + struct ibmvfc_passthru_fc_iu fc_iu; +} __packed __aligned(8); + +struct ibmvfc_channel_enquiry { + struct ibmvfc_mad_common common; + __be32 flags; +#define IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT 0x01 +#define IBMVFC_SUPPORT_VARIABLE_SUBQ_MSG 0x02 +#define IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT 0x04 + __be32 num_scsi_subq_channels; + __be32 num_nvmeof_subq_channels; + __be32 num_scsi_vas_channels; + __be32 num_nvmeof_vas_channels; +} __packed __aligned(8); + +struct ibmvfc_channel_setup_mad { + struct ibmvfc_mad_common common; + struct srp_direct_buf buffer; +} __packed __aligned(8); + +#define IBMVFC_MAX_CHANNELS 502 + +struct ibmvfc_channel_setup { + __be32 flags; +#define IBMVFC_CANCEL_CHANNELS 0x01 +#define IBMVFC_USE_BUFFER 0x02 +#define IBMVFC_CHANNELS_CANCELED 0x04 + __be32 reserved; + __be32 num_scsi_subq_channels; + __be32 num_nvmeof_subq_channels; + __be32 num_scsi_vas_channels; + __be32 num_nvmeof_vas_channels; + struct srp_direct_buf buffer; + __be64 reserved2[5]; + __be64 channel_handles[IBMVFC_MAX_CHANNELS]; +} __packed __aligned(8); + +struct ibmvfc_connection_info { + struct ibmvfc_mad_common common; + __be64 information_bits; +#define IBMVFC_NO_FC_IO_CHANNEL 0x01 +#define IBMVFC_NO_PHYP_VAS 0x02 +#define IBMVFC_NO_PHYP_SUBQ 0x04 +#define IBMVFC_PHYP_DEPRECATED_SUBQ 0x08 +#define IBMVFC_PHYP_PRESERVED_SUBQ 0x10 +#define IBMVFC_PHYP_FULL_SUBQ 0x20 + __be64 reserved[16]; +} __packed __aligned(8); + +struct ibmvfc_trace_start_entry { + u32 xfer_len; +} __packed; + +struct ibmvfc_trace_end_entry { + u16 status; + u16 error; + u8 fcp_rsp_flags; + u8 rsp_code; + u8 scsi_status; + u8 reserved; +} __packed; + +struct ibmvfc_trace_entry { + struct ibmvfc_event *evt; + u32 time; + u32 scsi_id; + u32 lun; + u8 fmt; + u8 op_code; + u8 tmf_flags; + u8 type; +#define IBMVFC_TRC_START 0x00 +#define IBMVFC_TRC_END 0xff + union { + struct ibmvfc_trace_start_entry start; + struct ibmvfc_trace_end_entry end; + } u; +} __packed __aligned(8); + +enum ibmvfc_crq_formats { + IBMVFC_CMD_FORMAT = 0x01, + IBMVFC_ASYNC_EVENT = 0x02, + IBMVFC_MAD_FORMAT = 0x04, +}; + +enum ibmvfc_async_event { + IBMVFC_AE_ELS_PLOGI = 0x0001, + IBMVFC_AE_ELS_LOGO = 0x0002, + IBMVFC_AE_ELS_PRLO = 0x0004, + IBMVFC_AE_SCN_NPORT = 0x0008, + IBMVFC_AE_SCN_GROUP = 0x0010, + IBMVFC_AE_SCN_DOMAIN = 0x0020, + IBMVFC_AE_SCN_FABRIC = 0x0040, + IBMVFC_AE_LINK_UP = 0x0080, + IBMVFC_AE_LINK_DOWN = 0x0100, + IBMVFC_AE_LINK_DEAD = 0x0200, + IBMVFC_AE_HALT = 0x0400, + IBMVFC_AE_RESUME = 0x0800, + IBMVFC_AE_ADAPTER_FAILED = 0x1000, + IBMVFC_AE_FPIN = 0x2000, +}; + +struct ibmvfc_async_desc { + const char *desc; + enum ibmvfc_async_event ae; + int log_level; +}; + +struct ibmvfc_crq { + volatile u8 valid; + volatile u8 format; + u8 reserved[6]; + volatile __be64 ioba; +} __packed __aligned(8); + +struct ibmvfc_sub_crq { + struct ibmvfc_crq crq; + __be64 reserved[2]; +} __packed __aligned(8); + +enum ibmvfc_ae_link_state { + IBMVFC_AE_LS_LINK_UP = 0x01, + IBMVFC_AE_LS_LINK_BOUNCED = 0x02, + IBMVFC_AE_LS_LINK_DOWN = 0x04, + IBMVFC_AE_LS_LINK_DEAD = 0x08, +}; + +enum ibmvfc_ae_fpin_status { + IBMVFC_AE_FPIN_LINK_CONGESTED = 0x1, + IBMVFC_AE_FPIN_PORT_CONGESTED = 0x2, + IBMVFC_AE_FPIN_PORT_CLEARED = 0x3, + IBMVFC_AE_FPIN_PORT_DEGRADED = 0x4, +}; + +struct ibmvfc_async_crq { + volatile u8 valid; + u8 link_state; + u8 fpin_status; + u8 pad; + __be32 pad2; + volatile __be64 event; + volatile __be64 scsi_id; + volatile __be64 wwpn; + volatile __be64 node_name; + __be64 reserved; +} __packed __aligned(8); + +union ibmvfc_iu { + struct ibmvfc_mad_common mad_common; + struct ibmvfc_npiv_login_mad npiv_login; + struct ibmvfc_npiv_logout_mad npiv_logout; + struct ibmvfc_discover_targets discover_targets; + struct ibmvfc_port_login plogi; + struct ibmvfc_process_login prli; + struct ibmvfc_move_login move_login; + struct ibmvfc_query_tgt query_tgt; + struct ibmvfc_implicit_logout implicit_logout; + struct ibmvfc_tmf tmf; + struct ibmvfc_cmd cmd; + struct ibmvfc_passthru_mad passthru; + struct ibmvfc_channel_enquiry channel_enquiry; + struct ibmvfc_channel_setup_mad channel_setup; + struct ibmvfc_connection_info connection_info; +} __packed __aligned(8); + +enum ibmvfc_target_action { + IBMVFC_TGT_ACTION_NONE = 0, + IBMVFC_TGT_ACTION_INIT, + IBMVFC_TGT_ACTION_INIT_WAIT, + IBMVFC_TGT_ACTION_LOGOUT_RPORT, + IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT, + IBMVFC_TGT_ACTION_DEL_RPORT, + IBMVFC_TGT_ACTION_DELETED_RPORT, + IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT, + IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT, +}; + +struct ibmvfc_target { + struct list_head queue; + struct ibmvfc_host *vhost; + u64 scsi_id; + u64 wwpn; + u64 new_scsi_id; + struct fc_rport *rport; + int target_id; + enum ibmvfc_target_action action; + int need_login; + int add_rport; + int init_retries; + int logo_rcvd; + int move_login; + u32 cancel_key; + struct ibmvfc_service_parms service_parms; + struct ibmvfc_service_parms service_parms_change; + struct fc_rport_identifiers ids; + void (*job_step) (struct ibmvfc_target *); + struct timer_list timer; + struct kref kref; +}; + +/* a unit of work for the hosting partition */ +struct ibmvfc_event { + struct list_head queue_list; + struct list_head cancel; + struct ibmvfc_host *vhost; + struct ibmvfc_queue *queue; + struct ibmvfc_target *tgt; + struct scsi_cmnd *cmnd; + atomic_t free; + atomic_t active; + union ibmvfc_iu *xfer_iu; + void (*done)(struct ibmvfc_event *evt); + void (*_done)(struct ibmvfc_event *evt); + struct ibmvfc_crq crq; + union ibmvfc_iu iu; + union ibmvfc_iu *sync_iu; + struct srp_direct_buf *ext_list; + dma_addr_t ext_list_token; + struct completion comp; + struct completion *eh_comp; + struct timer_list timer; + u16 hwq; +}; + +/* a pool of event structs for use */ +struct ibmvfc_event_pool { + struct ibmvfc_event *events; + u32 size; + union ibmvfc_iu *iu_storage; + dma_addr_t iu_token; +}; + +enum ibmvfc_msg_fmt { + IBMVFC_CRQ_FMT = 0, + IBMVFC_ASYNC_FMT, + IBMVFC_SUB_CRQ_FMT, +}; + +union ibmvfc_msgs { + void *handle; + struct ibmvfc_crq *crq; + struct ibmvfc_async_crq *async; + struct ibmvfc_sub_crq *scrq; +}; + +struct ibmvfc_queue { + union ibmvfc_msgs msgs; + dma_addr_t msg_token; + enum ibmvfc_msg_fmt fmt; + int size, cur; + spinlock_t _lock; + spinlock_t *q_lock; + + struct ibmvfc_host *vhost; + struct ibmvfc_event_pool evt_pool; + struct list_head sent; + struct list_head free; + spinlock_t l_lock; + + union ibmvfc_iu cancel_rsp; + + /* Sub-CRQ fields */ + unsigned long cookie; + unsigned long vios_cookie; + unsigned long hw_irq; + unsigned long irq; + unsigned long hwq_id; + char name[32]; +}; + +struct ibmvfc_scsi_channels { + struct ibmvfc_queue *scrqs; + unsigned int active_queues; +}; + +enum ibmvfc_host_action { + IBMVFC_HOST_ACTION_NONE = 0, + IBMVFC_HOST_ACTION_RESET, + IBMVFC_HOST_ACTION_REENABLE, + IBMVFC_HOST_ACTION_LOGO, + IBMVFC_HOST_ACTION_LOGO_WAIT, + IBMVFC_HOST_ACTION_INIT, + IBMVFC_HOST_ACTION_INIT_WAIT, + IBMVFC_HOST_ACTION_QUERY, + IBMVFC_HOST_ACTION_QUERY_TGTS, + IBMVFC_HOST_ACTION_TGT_DEL, + IBMVFC_HOST_ACTION_ALLOC_TGTS, + IBMVFC_HOST_ACTION_TGT_INIT, + IBMVFC_HOST_ACTION_TGT_DEL_FAILED, +}; + +enum ibmvfc_host_state { + IBMVFC_NO_CRQ = 0, + IBMVFC_INITIALIZING, + IBMVFC_ACTIVE, + IBMVFC_HALTED, + IBMVFC_LINK_DOWN, + IBMVFC_LINK_DEAD, + IBMVFC_HOST_OFFLINE, +}; + +struct ibmvfc_host { + char name[8]; + struct list_head queue; + struct Scsi_Host *host; + enum ibmvfc_host_state state; + enum ibmvfc_host_action action; +#define IBMVFC_NUM_TRACE_INDEX_BITS 8 +#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS) +#define IBMVFC_TRACE_INDEX_MASK (IBMVFC_NUM_TRACE_ENTRIES - 1) +#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES) + struct ibmvfc_trace_entry *trace; + atomic_t trace_index; + int num_targets; + struct list_head targets; + struct list_head purge; + struct device *dev; + struct dma_pool *sg_pool; + mempool_t *tgt_pool; + struct ibmvfc_queue crq; + struct ibmvfc_queue async_crq; + struct ibmvfc_scsi_channels scsi_scrqs; + struct ibmvfc_npiv_login login_info; + union ibmvfc_npiv_login_data *login_buf; + dma_addr_t login_buf_dma; + struct ibmvfc_channel_setup *channel_setup_buf; + dma_addr_t channel_setup_dma; + int disc_buf_sz; + int log_level; + struct ibmvfc_discover_targets_entry *disc_buf; + struct mutex passthru_mutex; + int max_vios_scsi_channels; + int task_set; + int init_retries; + int discovery_threads; + int abort_threads; + int client_migrated; + int reinit; + int delay_init; + int scan_complete; + int scan_timeout; + int logged_in; + int mq_enabled; + int using_channels; + int do_enquiry; + int client_scsi_channels; + int aborting_passthru; + int events_to_log; +#define IBMVFC_AE_LINKUP 0x0001 +#define IBMVFC_AE_LINKDOWN 0x0002 +#define IBMVFC_AE_RSCN 0x0004 + dma_addr_t disc_buf_dma; + unsigned int partition_number; + char partition_name[97]; + void (*job_step) (struct ibmvfc_host *); + struct task_struct *work_thread; + struct tasklet_struct tasklet; + struct work_struct rport_add_work_q; + wait_queue_head_t init_wait_q; + wait_queue_head_t work_wait_q; +}; + +#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0) + +#define tgt_dbg(t, fmt, ...) \ + DBG_CMD(dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) + +#define tgt_info(t, fmt, ...) \ + dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) + +#define tgt_err(t, fmt, ...) \ + dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) + +#define tgt_log(t, level, fmt, ...) \ + do { \ + if ((t)->vhost->log_level >= level) \ + tgt_err(t, fmt, ##__VA_ARGS__); \ + } while (0) + +#define ibmvfc_dbg(vhost, ...) \ + DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) + +#define ibmvfc_log(vhost, level, ...) \ + do { \ + if ((vhost)->log_level >= level) \ + dev_err((vhost)->dev, ##__VA_ARGS__); \ + } while (0) + +#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__)) +#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__)) + +#ifdef CONFIG_SCSI_IBMVFC_TRACE +#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ibmvfc_create_trace_file(kobj, attr) 0 +#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0) +#endif + +#endif diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c new file mode 100644 index 000000000..595992996 --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -0,0 +1,2434 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* ------------------------------------------------------------ + * ibmvscsi.c + * (C) Copyright IBM Corporation 1994, 2004 + * Authors: Colin DeVilbiss (devilbis@us.ibm.com) + * Santiago Leon (santil@us.ibm.com) + * Dave Boutcher (sleddog@us.ibm.com) + * + * ------------------------------------------------------------ + * Emulation of a SCSI host adapter for Virtual I/O devices + * + * This driver supports the SCSI adapter implemented by the IBM + * Power5 firmware. That SCSI adapter is not a physical adapter, + * but allows Linux SCSI peripheral drivers to directly + * access devices in another logical partition on the physical system. + * + * The virtual adapter(s) are present in the open firmware device + * tree just like real adapters. + * + * One of the capabilities provided on these systems is the ability + * to DMA between partitions. The architecture states that for VSCSI, + * the server side is allowed to DMA to and from the client. The client + * is never trusted to DMA to or from the server directly. + * + * Messages are sent between partitions on a "Command/Response Queue" + * (CRQ), which is just a buffer of 16 byte entries in the receiver's + * Senders cannot access the buffer directly, but send messages by + * making a hypervisor call and passing in the 16 bytes. The hypervisor + * puts the message in the next 16 byte space in round-robin fashion, + * turns on the high order bit of the message (the valid bit), and + * generates an interrupt to the receiver (if interrupts are turned on.) + * The receiver just turns off the valid bit when they have copied out + * the message. + * + * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit + * (IU) (as defined in the T10 standard available at www.t10.org), gets + * a DMA address for the message, and sends it to the server as the + * payload of a CRQ message. The server DMAs the SRP IU and processes it, + * including doing any additional data transfers. When it is done, it + * DMAs the SRP response back to the same address as the request came from, + * and sends a CRQ message back to inform the client that the request has + * completed. + * + * TODO: This is currently pretty tied to the IBM pSeries hypervisor + * interfaces. It would be really nice to abstract this above an RDMA + * layer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ibmvscsi.h" + +/* The values below are somewhat arbitrary default values, but + * OS/400 will use 3 busses (disks, CDs, tapes, I think.) + * Note that there are 3 bits of channel value, 6 bits of id, and + * 5 bits of LUN. + */ +static int max_id = 64; +static int max_channel = 3; +static int init_timeout = 300; +static int login_timeout = 60; +static int info_timeout = 30; +static int abort_timeout = 60; +static int reset_timeout = 60; +static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; +static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; +static int fast_fail = 1; +static int client_reserve = 1; +static char partition_name[96] = "UNKNOWN"; +static unsigned int partition_number = -1; +static LIST_HEAD(ibmvscsi_head); +static DEFINE_SPINLOCK(ibmvscsi_driver_lock); + +static struct scsi_transport_template *ibmvscsi_transport_template; + +#define IBMVSCSI_VERSION "1.5.9" + +MODULE_DESCRIPTION("IBM Virtual SCSI"); +MODULE_AUTHOR("Dave Boutcher"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IBMVSCSI_VERSION); + +module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_id, "Largest ID value for each channel [Default=64]"); +module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_channel, "Largest channel value [Default=3]"); +module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); +module_param_named(max_requests, max_requests, int, S_IRUGO); +MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); +module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); +module_param_named(client_reserve, client_reserve, int, S_IRUGO ); +MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); + +static void ibmvscsi_handle_crq(struct viosrp_crq *crq, + struct ibmvscsi_host_data *hostdata); + +/* ------------------------------------------------------------ + * Routines for managing the command/response queue + */ +/** + * ibmvscsi_handle_event: - Interrupt handler for crq events + * @irq: number of irq to handle, not used + * @dev_instance: ibmvscsi_host_data of host that received interrupt + * + * Disables interrupts and schedules srp_task + * Always returns IRQ_HANDLED + */ +static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance) +{ + struct ibmvscsi_host_data *hostdata = + (struct ibmvscsi_host_data *)dev_instance; + vio_disable_interrupts(to_vio_dev(hostdata->dev)); + tasklet_schedule(&hostdata->srp_task); + return IRQ_HANDLED; +} + +/** + * ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ + * @queue: crq_queue to initialize and register + * @hostdata: ibmvscsi_host_data of host + * @max_requests: maximum requests (unused) + * + * Frees irq, deallocates a page for messages, unmaps dma, and unregisters + * the crq with the hypervisor. + */ +static void ibmvscsi_release_crq_queue(struct crq_queue *queue, + struct ibmvscsi_host_data *hostdata, + int max_requests) +{ + long rc = 0; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + free_irq(vdev->irq, (void *)hostdata); + tasklet_kill(&hostdata->srp_task); + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + dma_unmap_single(hostdata->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); + free_page((unsigned long)queue->msgs); +} + +/** + * crq_queue_next_crq: - Returns the next entry in message queue + * @queue: crq_queue to use + * + * Returns pointer to next entry in queue, or NULL if there are no new + * entried in the CRQ. + */ +static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) +{ + struct viosrp_crq *crq; + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + crq = &queue->msgs[queue->cur]; + if (crq->valid != VIOSRP_CRQ_FREE) { + if (++queue->cur == queue->size) + queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + rmb(); + } else + crq = NULL; + spin_unlock_irqrestore(&queue->lock, flags); + + return crq; +} + +/** + * ibmvscsi_send_crq: - Send a CRQ + * @hostdata: the adapter + * @word1: the first 64 bits of the data + * @word2: the second 64 bits of the data + */ +static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, + u64 word1, u64 word2) +{ + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the VIOS to prevent it from fetching any stale data. + */ + mb(); + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); +} + +/** + * ibmvscsi_task: - Process srps asynchronously + * @data: ibmvscsi_host_data of host + */ +static void ibmvscsi_task(void *data) +{ + struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + struct viosrp_crq *crq; + int done = 0; + + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { + ibmvscsi_handle_crq(crq, hostdata); + crq->valid = VIOSRP_CRQ_FREE; + wmb(); + } + + vio_enable_interrupts(vdev); + crq = crq_queue_next_crq(&hostdata->queue); + if (crq != NULL) { + vio_disable_interrupts(vdev); + ibmvscsi_handle_crq(crq, hostdata); + crq->valid = VIOSRP_CRQ_FREE; + wmb(); + } else { + done = 1; + } + } +} + +static void gather_partition_info(void) +{ + const char *ppartition_name; + const __be32 *p_number_ptr; + + /* Retrieve information about this partition */ + if (!of_root) + return; + + of_node_get(of_root); + + ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL); + if (ppartition_name) + strscpy(partition_name, ppartition_name, + sizeof(partition_name)); + p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL); + if (p_number_ptr) + partition_number = of_read_number(p_number_ptr, 1); + of_node_put(of_root); +} + +static void set_adapter_info(struct ibmvscsi_host_data *hostdata) +{ + memset(&hostdata->madapter_info, 0x00, + sizeof(hostdata->madapter_info)); + + dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION); + strcpy(hostdata->madapter_info.srp_version, SRP_VERSION); + + strncpy(hostdata->madapter_info.partition_name, partition_name, + sizeof(hostdata->madapter_info.partition_name)); + + hostdata->madapter_info.partition_number = + cpu_to_be32(partition_number); + + hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1); + hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX); +} + +/** + * ibmvscsi_reset_crq_queue() - resets a crq after a failure + * @queue: crq_queue to initialize and register + * @hostdata: ibmvscsi_host_data of host + */ +static int ibmvscsi_reset_crq_queue(struct crq_queue *queue, + struct ibmvscsi_host_data *hostdata) +{ + int rc = 0; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + + /* Close the CRQ */ + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + + /* Clean out the queue */ + memset(queue->msgs, 0x00, PAGE_SIZE); + queue->cur = 0; + + set_adapter_info(hostdata); + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + if (rc == H_CLOSED) { + /* Adapter is good, but other end is not ready */ + dev_warn(hostdata->dev, "Partner adapter not ready\n"); + } else if (rc != 0) { + dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc); + } + return rc; +} + +/** + * ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor + * @queue: crq_queue to initialize and register + * @hostdata: ibmvscsi_host_data of host + * @max_requests: maximum requests (unused) + * + * Allocates a page for messages, maps it for dma, and registers + * the crq with the hypervisor. + * Returns zero on success. + */ +static int ibmvscsi_init_crq_queue(struct crq_queue *queue, + struct ibmvscsi_host_data *hostdata, + int max_requests) +{ + int rc; + int retrc; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + + queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); + + if (!queue->msgs) + goto malloc_failed; + queue->size = PAGE_SIZE / sizeof(*queue->msgs); + + queue->msg_token = dma_map_single(hostdata->dev, queue->msgs, + queue->size * sizeof(*queue->msgs), + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(hostdata->dev, queue->msg_token)) + goto map_failed; + + gather_partition_info(); + set_adapter_info(hostdata); + + retrc = rc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + if (rc == H_RESOURCE) + /* maybe kexecing and resource is busy. try a reset */ + rc = ibmvscsi_reset_crq_queue(queue, + hostdata); + + if (rc == H_CLOSED) { + /* Adapter is good, but other end is not ready */ + dev_warn(hostdata->dev, "Partner adapter not ready\n"); + retrc = 0; + } else if (rc != 0) { + dev_warn(hostdata->dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + queue->cur = 0; + spin_lock_init(&queue->lock); + + tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, + (unsigned long)hostdata); + + if (request_irq(vdev->irq, + ibmvscsi_handle_event, + 0, "ibmvscsi", (void *)hostdata) != 0) { + dev_err(hostdata->dev, "couldn't register irq 0x%x\n", + vdev->irq); + goto req_irq_failed; + } + + rc = vio_enable_interrupts(vdev); + if (rc != 0) { + dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc); + goto req_irq_failed; + } + + return retrc; + + req_irq_failed: + tasklet_kill(&hostdata->srp_task); + rc = 0; + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + reg_crq_failed: + dma_unmap_single(hostdata->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); + map_failed: + free_page((unsigned long)queue->msgs); + malloc_failed: + return -1; +} + +/** + * ibmvscsi_reenable_crq_queue() - reenables a crq after + * @queue: crq_queue to initialize and register + * @hostdata: ibmvscsi_host_data of host + */ +static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, + struct ibmvscsi_host_data *hostdata) +{ + int rc = 0; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + + set_adapter_info(hostdata); + + /* Re-enable the CRQ */ + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); + } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + + if (rc) + dev_err(hostdata->dev, "Error %d enabling adapter\n", rc); + return rc; +} + +/* ------------------------------------------------------------ + * Routines for the event pool and event structs + */ +/** + * initialize_event_pool: - Allocates and initializes the event pool for a host + * @pool: event_pool to be initialized + * @size: Number of events in pool + * @hostdata: ibmvscsi_host_data who owns the event pool + * + * Returns zero on success. + */ +static int initialize_event_pool(struct event_pool *pool, + int size, struct ibmvscsi_host_data *hostdata) +{ + int i; + + pool->size = size; + pool->next = 0; + pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); + if (!pool->events) + return -ENOMEM; + + pool->iu_storage = + dma_alloc_coherent(hostdata->dev, + pool->size * sizeof(*pool->iu_storage), + &pool->iu_token, GFP_KERNEL); + if (!pool->iu_storage) { + kfree(pool->events); + return -ENOMEM; + } + + for (i = 0; i < pool->size; ++i) { + struct srp_event_struct *evt = &pool->events[i]; + memset(&evt->crq, 0x00, sizeof(evt->crq)); + atomic_set(&evt->free, 1); + evt->crq.valid = VIOSRP_CRQ_CMD_RSP; + evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); + evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + + sizeof(*evt->xfer_iu) * i); + evt->xfer_iu = pool->iu_storage + i; + evt->hostdata = hostdata; + evt->ext_list = NULL; + evt->ext_list_token = 0; + } + + return 0; +} + +/** + * release_event_pool() - Frees memory of an event pool of a host + * @pool: event_pool to be released + * @hostdata: ibmvscsi_host_data who owns the even pool + * + * Returns zero on success. + */ +static void release_event_pool(struct event_pool *pool, + struct ibmvscsi_host_data *hostdata) +{ + int i, in_use = 0; + for (i = 0; i < pool->size; ++i) { + if (atomic_read(&pool->events[i].free) != 1) + ++in_use; + if (pool->events[i].ext_list) { + dma_free_coherent(hostdata->dev, + SG_ALL * sizeof(struct srp_direct_buf), + pool->events[i].ext_list, + pool->events[i].ext_list_token); + } + } + if (in_use) + dev_warn(hostdata->dev, "releasing event pool with %d " + "events still in use?\n", in_use); + kfree(pool->events); + dma_free_coherent(hostdata->dev, + pool->size * sizeof(*pool->iu_storage), + pool->iu_storage, pool->iu_token); +} + +/** + * valid_event_struct: - Determines if event is valid. + * @pool: event_pool that contains the event + * @evt: srp_event_struct to be checked for validity + * + * Returns zero if event is invalid, one otherwise. +*/ +static int valid_event_struct(struct event_pool *pool, + struct srp_event_struct *evt) +{ + int index = evt - pool->events; + if (index < 0 || index >= pool->size) /* outside of bounds */ + return 0; + if (evt != pool->events + index) /* unaligned */ + return 0; + return 1; +} + +/** + * free_event_struct() - Changes status of event to "free" + * @pool: event_pool that contains the event + * @evt: srp_event_struct to be modified + */ +static void free_event_struct(struct event_pool *pool, + struct srp_event_struct *evt) +{ + if (!valid_event_struct(pool, evt)) { + dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p " + "(not in pool %p)\n", evt, pool->events); + return; + } + if (atomic_inc_return(&evt->free) != 1) { + dev_err(evt->hostdata->dev, "Freeing event_struct %p " + "which is not in use!\n", evt); + return; + } +} + +/** + * get_event_struct() - Gets the next free event in pool + * @pool: event_pool that contains the events to be searched + * + * Returns the next event in "free" state, and NULL if none are free. + * Note that no synchronization is done here, we assume the host_lock + * will syncrhonze things. +*/ +static struct srp_event_struct *get_event_struct(struct event_pool *pool) +{ + int i; + int poolsize = pool->size; + int offset = pool->next; + + for (i = 0; i < poolsize; i++) { + offset = (offset + 1) % poolsize; + if (!atomic_dec_if_positive(&pool->events[offset].free)) { + pool->next = offset; + return &pool->events[offset]; + } + } + + printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); + return NULL; +} + +/** + * init_event_struct: Initialize fields in an event struct that are always + * required. + * @evt_struct: The event + * @done: Routine to call when the event is responded to + * @format: SRP or MAD format + * @timeout: timeout value set in the CRQ + */ +static void init_event_struct(struct srp_event_struct *evt_struct, + void (*done) (struct srp_event_struct *), + u8 format, + int timeout) +{ + evt_struct->cmnd = NULL; + evt_struct->cmnd_done = NULL; + evt_struct->sync_srp = NULL; + evt_struct->crq.format = format; + evt_struct->crq.timeout = cpu_to_be16(timeout); + evt_struct->done = done; +} + +/* ------------------------------------------------------------ + * Routines for receiving SCSI responses from the hosting partition + */ + +/* + * set_srp_direction: Set the fields in the srp related to data + * direction and number of buffers based on the direction in + * the scsi_cmnd and the number of buffers + */ +static void set_srp_direction(struct scsi_cmnd *cmd, + struct srp_cmd *srp_cmd, + int numbuf) +{ + u8 fmt; + + if (numbuf == 0) + return; + + if (numbuf == 1) + fmt = SRP_DATA_DESC_DIRECT; + else { + fmt = SRP_DATA_DESC_INDIRECT; + numbuf = min(numbuf, MAX_INDIRECT_BUFS); + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + srp_cmd->data_out_desc_cnt = numbuf; + else + srp_cmd->data_in_desc_cnt = numbuf; + } + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + srp_cmd->buf_fmt = fmt << 4; + else + srp_cmd->buf_fmt = fmt; +} + +/** + * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format + * @cmd: srp_cmd whose additional_data member will be unmapped + * @evt_struct: the event + * @dev: device for which the memory is mapped + */ +static void unmap_cmd_data(struct srp_cmd *cmd, + struct srp_event_struct *evt_struct, + struct device *dev) +{ + u8 out_fmt, in_fmt; + + out_fmt = cmd->buf_fmt >> 4; + in_fmt = cmd->buf_fmt & ((1U << 4) - 1); + + if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) + return; + + if (evt_struct->cmnd) + scsi_dma_unmap(evt_struct->cmnd); +} + +static int map_sg_list(struct scsi_cmnd *cmd, int nseg, + struct srp_direct_buf *md) +{ + int i; + struct scatterlist *sg; + u64 total_length = 0; + + scsi_for_each_sg(cmd, sg, nseg, i) { + struct srp_direct_buf *descr = md + i; + descr->va = cpu_to_be64(sg_dma_address(sg)); + descr->len = cpu_to_be32(sg_dma_len(sg)); + descr->key = 0; + total_length += sg_dma_len(sg); + } + return total_length; +} + +/** + * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields + * @cmd: struct scsi_cmnd with the scatterlist + * @evt_struct: struct srp_event_struct to map + * @srp_cmd: srp_cmd that contains the memory descriptor + * @dev: device for which to map dma memory + * + * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. + * Returns 1 on success. +*/ +static int map_sg_data(struct scsi_cmnd *cmd, + struct srp_event_struct *evt_struct, + struct srp_cmd *srp_cmd, struct device *dev) +{ + + int sg_mapped; + u64 total_length = 0; + struct srp_direct_buf *data = + (struct srp_direct_buf *) srp_cmd->add_data; + struct srp_indirect_buf *indirect = + (struct srp_indirect_buf *) data; + + sg_mapped = scsi_dma_map(cmd); + if (!sg_mapped) + return 1; + else if (sg_mapped < 0) + return 0; + + set_srp_direction(cmd, srp_cmd, sg_mapped); + + /* special case; we can use a single direct descriptor */ + if (sg_mapped == 1) { + map_sg_list(cmd, sg_mapped, data); + return 1; + } + + indirect->table_desc.va = 0; + indirect->table_desc.len = cpu_to_be32(sg_mapped * + sizeof(struct srp_direct_buf)); + indirect->table_desc.key = 0; + + if (sg_mapped <= MAX_INDIRECT_BUFS) { + total_length = map_sg_list(cmd, sg_mapped, + &indirect->desc_list[0]); + indirect->len = cpu_to_be32(total_length); + return 1; + } + + /* get indirect table */ + if (!evt_struct->ext_list) { + evt_struct->ext_list = dma_alloc_coherent(dev, + SG_ALL * sizeof(struct srp_direct_buf), + &evt_struct->ext_list_token, 0); + if (!evt_struct->ext_list) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + sdev_printk(KERN_ERR, cmd->device, + "Can't allocate memory " + "for indirect table\n"); + scsi_dma_unmap(cmd); + return 0; + } + } + + total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); + + indirect->len = cpu_to_be32(total_length); + indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token); + indirect->table_desc.len = cpu_to_be32(sg_mapped * + sizeof(indirect->desc_list[0])); + memcpy(indirect->desc_list, evt_struct->ext_list, + MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); + return 1; +} + +/** + * map_data_for_srp_cmd: - Calls functions to map data for srp cmds + * @cmd: struct scsi_cmnd with the memory to be mapped + * @evt_struct: struct srp_event_struct to map + * @srp_cmd: srp_cmd that contains the memory descriptor + * @dev: dma device for which to map dma memory + * + * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds + * Returns 1 on success. +*/ +static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, + struct srp_event_struct *evt_struct, + struct srp_cmd *srp_cmd, struct device *dev) +{ + switch (cmd->sc_data_direction) { + case DMA_FROM_DEVICE: + case DMA_TO_DEVICE: + break; + case DMA_NONE: + return 1; + case DMA_BIDIRECTIONAL: + sdev_printk(KERN_ERR, cmd->device, + "Can't map DMA_BIDIRECTIONAL to read/write\n"); + return 0; + default: + sdev_printk(KERN_ERR, cmd->device, + "Unknown data direction 0x%02x; can't map!\n", + cmd->sc_data_direction); + return 0; + } + + return map_sg_data(cmd, evt_struct, srp_cmd, dev); +} + +/** + * purge_requests: Our virtual adapter just shut down. purge any sent requests + * @hostdata: the adapter + * @error_code: error code to return as the 'result' + */ +static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) +{ + struct srp_event_struct *evt; + unsigned long flags; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + while (!list_empty(&hostdata->sent)) { + evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list); + list_del(&evt->list); + del_timer(&evt->timer); + + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + if (evt->cmnd) { + evt->cmnd->result = (error_code << 16); + unmap_cmd_data(&evt->iu.srp.cmd, evt, + evt->hostdata->dev); + if (evt->cmnd_done) + evt->cmnd_done(evt->cmnd); + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) + evt->done(evt); + free_event_struct(&evt->hostdata->pool, evt); + spin_lock_irqsave(hostdata->host->host_lock, flags); + } + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +} + +/** + * ibmvscsi_set_request_limit - Set the adapter request_limit in response to + * an adapter failure, reset, or SRP Login. Done under host lock to prevent + * race with SCSI command submission. + * @hostdata: adapter to adjust + * @limit: new request limit + */ +static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit) +{ + unsigned long flags; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + atomic_set(&hostdata->request_limit, limit); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +} + +/** + * ibmvscsi_reset_host - Reset the connection to the server + * @hostdata: struct ibmvscsi_host_data to reset +*/ +static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata) +{ + scsi_block_requests(hostdata->host); + ibmvscsi_set_request_limit(hostdata, 0); + + purge_requests(hostdata, DID_ERROR); + hostdata->action = IBMVSCSI_HOST_ACTION_RESET; + wake_up(&hostdata->work_wait_q); +} + +/** + * ibmvscsi_timeout - Internal command timeout handler + * @t: struct srp_event_struct that timed out + * + * Called when an internally generated command times out +*/ +static void ibmvscsi_timeout(struct timer_list *t) +{ + struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer); + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n", + evt_struct->iu.srp.cmd.opcode); + + ibmvscsi_reset_host(hostdata); +} + + +/* ------------------------------------------------------------ + * Routines for sending and receiving SRPs + */ +/** + * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() + * @evt_struct: evt_struct to be sent + * @hostdata: ibmvscsi_host_data of host + * @timeout: timeout in seconds - 0 means do not time command + * + * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) + * Note that this routine assumes that host_lock is held for synchronization +*/ +static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, + struct ibmvscsi_host_data *hostdata, + unsigned long timeout) +{ + __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq; + int request_status = 0; + int rc; + int srp_req = 0; + + /* If we have exhausted our request limit, just fail this request, + * unless it is for a reset or abort. + * Note that there are rare cases involving driver generated requests + * (such as task management requests) that the mid layer may think we + * can handle more requests (can_queue) when we actually can't + */ + if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) { + srp_req = 1; + request_status = + atomic_dec_if_positive(&hostdata->request_limit); + /* If request limit was -1 when we started, it is now even + * less than that + */ + if (request_status < -1) + goto send_error; + /* Otherwise, we may have run out of requests. */ + /* If request limit was 0 when we started the adapter is in the + * process of performing a login with the server adapter, or + * we may have run out of requests. + */ + else if (request_status == -1 && + evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ) + goto send_busy; + /* Abort and reset calls should make it through. + * Nothing except abort and reset should use the last two + * slots unless we had two or less to begin with. + */ + else if (request_status < 2 && + evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) { + /* In the case that we have less than two requests + * available, check the server limit as a combination + * of the request limit and the number of requests + * in-flight (the size of the send list). If the + * server limit is greater than 2, return busy so + * that the last two are reserved for reset and abort. + */ + int server_limit = request_status; + struct srp_event_struct *tmp_evt; + + list_for_each_entry(tmp_evt, &hostdata->sent, list) { + server_limit++; + } + + if (server_limit > 2) + goto send_busy; + } + } + + /* Copy the IU into the transfer area */ + *evt_struct->xfer_iu = evt_struct->iu; + evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct; + + /* Add this to the sent list. We need to do this + * before we actually send + * in case it comes back REALLY fast + */ + list_add_tail(&evt_struct->list, &hostdata->sent); + + timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0); + if (timeout) { + evt_struct->timer.expires = jiffies + (timeout * HZ); + add_timer(&evt_struct->timer); + } + + rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + if (rc != 0) { + list_del(&evt_struct->list); + del_timer(&evt_struct->timer); + + /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. + * Firmware will send a CRQ with a transport event (0xFF) to + * tell this client what has happened to the transport. This + * will be handled in ibmvscsi_handle_crq() + */ + if (rc == H_CLOSED) { + dev_warn(hostdata->dev, "send warning. " + "Receive queue closed, will retry.\n"); + goto send_busy; + } + dev_err(hostdata->dev, "send error %d\n", rc); + if (srp_req) + atomic_inc(&hostdata->request_limit); + goto send_error; + } + + return 0; + + send_busy: + unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); + + free_event_struct(&hostdata->pool, evt_struct); + if (srp_req && request_status != -1) + atomic_inc(&hostdata->request_limit); + return SCSI_MLQUEUE_HOST_BUSY; + + send_error: + unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); + + if (evt_struct->cmnd != NULL) { + evt_struct->cmnd->result = DID_ERROR << 16; + evt_struct->cmnd_done(evt_struct->cmnd); + } else if (evt_struct->done) + evt_struct->done(evt_struct); + + free_event_struct(&hostdata->pool, evt_struct); + return 0; +} + +/** + * handle_cmd_rsp: - Handle responses from commands + * @evt_struct: srp_event_struct to be handled + * + * Used as a callback by when sending scsi cmds. + * Gets called by ibmvscsi_handle_crq() +*/ +static void handle_cmd_rsp(struct srp_event_struct *evt_struct) +{ + struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; + struct scsi_cmnd *cmnd = evt_struct->cmnd; + + if (unlikely(rsp->opcode != SRP_RSP)) { + if (printk_ratelimit()) + dev_warn(evt_struct->hostdata->dev, + "bad SRP RSP type %#02x\n", rsp->opcode); + } + + if (cmnd) { + cmnd->result |= rsp->status; + if (scsi_status_is_check_condition(cmnd->result)) + memcpy(cmnd->sense_buffer, + rsp->data, + be32_to_cpu(rsp->sense_data_len)); + unmap_cmd_data(&evt_struct->iu.srp.cmd, + evt_struct, + evt_struct->hostdata->dev); + + if (rsp->flags & SRP_RSP_FLAG_DOOVER) + scsi_set_resid(cmnd, + be32_to_cpu(rsp->data_out_res_cnt)); + else if (rsp->flags & SRP_RSP_FLAG_DIOVER) + scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt)); + } + + if (evt_struct->cmnd_done) + evt_struct->cmnd_done(cmnd); +} + +/** + * lun_from_dev: - Returns the lun of the scsi device + * @dev: struct scsi_device + * +*/ +static inline u16 lun_from_dev(struct scsi_device *dev) +{ + return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun; +} + +/** + * ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template + * @cmnd: struct scsi_cmnd to be executed + * @done: Callback function to be called when cmd is completed +*/ +static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + struct srp_cmd *srp_cmd; + struct srp_event_struct *evt_struct; + struct srp_indirect_buf *indirect; + struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host); + u16 lun = lun_from_dev(cmnd->device); + u8 out_fmt, in_fmt; + + cmnd->result = (DID_OK << 16); + evt_struct = get_event_struct(&hostdata->pool); + if (!evt_struct) + return SCSI_MLQUEUE_HOST_BUSY; + + /* Set up the actual SRP IU */ + BUILD_BUG_ON(sizeof(evt_struct->iu.srp) != SRP_MAX_IU_LEN); + memset(&evt_struct->iu.srp, 0x00, sizeof(evt_struct->iu.srp)); + srp_cmd = &evt_struct->iu.srp.cmd; + srp_cmd->opcode = SRP_CMD; + memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); + int_to_scsilun(lun, &srp_cmd->lun); + + if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + sdev_printk(KERN_ERR, cmnd->device, + "couldn't convert cmd to srp_cmd\n"); + free_event_struct(&hostdata->pool, evt_struct); + return SCSI_MLQUEUE_HOST_BUSY; + } + + init_event_struct(evt_struct, + handle_cmd_rsp, + VIOSRP_SRP_FORMAT, + scsi_cmd_to_rq(cmnd)->timeout / HZ); + + evt_struct->cmnd = cmnd; + evt_struct->cmnd_done = done; + + /* Fix up dma address of the buffer itself */ + indirect = (struct srp_indirect_buf *) srp_cmd->add_data; + out_fmt = srp_cmd->buf_fmt >> 4; + in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1); + if ((in_fmt == SRP_DATA_DESC_INDIRECT || + out_fmt == SRP_DATA_DESC_INDIRECT) && + indirect->table_desc.va == 0) { + indirect->table_desc.va = + cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) + + offsetof(struct srp_cmd, add_data) + + offsetof(struct srp_indirect_buf, desc_list)); + } + + return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); +} + +static DEF_SCSI_QCMD(ibmvscsi_queuecommand) + +/* ------------------------------------------------------------ + * Routines for driver initialization + */ + +/** + * map_persist_bufs: - Pre-map persistent data for adapter logins + * @hostdata: ibmvscsi_host_data of host + * + * Map the capabilities and adapter info DMA buffers to avoid runtime failures. + * Return 1 on error, 0 on success. + */ +static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) +{ + + hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + + if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { + dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); + return 1; + } + + hostdata->adapter_info_addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { + dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + return 1; + } + + return 0; +} + +/** + * unmap_persist_bufs: - Unmap persistent data needed for adapter logins + * @hostdata: ibmvscsi_host_data of host + * + * Unmap the capabilities and adapter info DMA buffers + */ +static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) +{ + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + + dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, + sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); +} + +/** + * login_rsp: - Handle response to SRP login request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending srp_login. Gets called + * by ibmvscsi_handle_crq() +*/ +static void login_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { + case SRP_LOGIN_RSP: /* it worked! */ + break; + case SRP_LOGIN_REJ: /* refused! */ + dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n", + evt_struct->xfer_iu->srp.login_rej.reason); + /* Login failed. */ + ibmvscsi_set_request_limit(hostdata, -1); + return; + default: + dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n", + evt_struct->xfer_iu->srp.login_rsp.opcode); + /* Login failed. */ + ibmvscsi_set_request_limit(hostdata, -1); + return; + } + + dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); + hostdata->client_migrated = 0; + + /* Now we know what the real request-limit is. + * This value is set rather than added to request_limit because + * request_limit could have been set to -1 by this client. + */ + ibmvscsi_set_request_limit(hostdata, + be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta)); + + /* If we had any pending I/Os, kick them */ + hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK; + wake_up(&hostdata->work_wait_q); +} + +/** + * send_srp_login: - Sends the srp login + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. +*/ +static int send_srp_login(struct ibmvscsi_host_data *hostdata) +{ + int rc; + unsigned long flags; + struct srp_login_req *login; + struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); + + BUG_ON(!evt_struct); + init_event_struct(evt_struct, login_rsp, + VIOSRP_SRP_FORMAT, login_timeout); + + login = &evt_struct->iu.srp.login_req; + memset(login, 0, sizeof(*login)); + login->opcode = SRP_LOGIN_REQ; + login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); + login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | + SRP_BUF_FORMAT_INDIRECT); + + /* Start out with a request limit of 0, since this is negotiated in + * the login request we are just sending and login requests always + * get sent by the driver regardless of request_limit. + */ + ibmvscsi_set_request_limit(hostdata, 0); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + dev_info(hostdata->dev, "sent SRP login\n"); + return rc; +}; + +/** + * capabilities_rsp: - Handle response to MAD adapter capabilities request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending adapter_info. + */ +static void capabilities_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + if (evt_struct->xfer_iu->mad.capabilities.common.status) { + dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", + evt_struct->xfer_iu->mad.capabilities.common.status); + } else { + if (hostdata->caps.migration.common.server_support != + cpu_to_be16(SERVER_SUPPORTS_CAP)) + dev_info(hostdata->dev, "Partition migration not supported\n"); + + if (client_reserve) { + if (hostdata->caps.reserve.common.server_support == + cpu_to_be16(SERVER_SUPPORTS_CAP)) + dev_info(hostdata->dev, "Client reserve enabled\n"); + else + dev_info(hostdata->dev, "Client reserve not supported\n"); + } + } + + send_srp_login(hostdata); +} + +/** + * send_mad_capabilities: - Sends the mad capabilities request + * and stores the result so it can be retrieved with + * @hostdata: ibmvscsi_host_data of host + */ +static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) +{ + struct viosrp_capabilities *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + struct device_node *of_node = hostdata->dev->of_node; + const char *location; + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, capabilities_rsp, + VIOSRP_MAD_FORMAT, info_timeout); + + req = &evt_struct->iu.mad.capabilities; + memset(req, 0, sizeof(*req)); + + hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED); + if (hostdata->client_migrated) + hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); + + strscpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), + sizeof(hostdata->caps.name)); + + location = of_get_property(of_node, "ibm,loc-code", NULL); + location = location ? location : dev_name(hostdata->dev); + strscpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); + + req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); + req->buffer = cpu_to_be64(hostdata->caps_addr); + + hostdata->caps.migration.common.cap_type = + cpu_to_be32(MIGRATION_CAPABILITIES); + hostdata->caps.migration.common.length = + cpu_to_be16(sizeof(hostdata->caps.migration)); + hostdata->caps.migration.common.server_support = + cpu_to_be16(SERVER_SUPPORTS_CAP); + hostdata->caps.migration.ecl = cpu_to_be32(1); + + if (client_reserve) { + hostdata->caps.reserve.common.cap_type = + cpu_to_be32(RESERVATION_CAPABILITIES); + hostdata->caps.reserve.common.length = + cpu_to_be16(sizeof(hostdata->caps.reserve)); + hostdata->caps.reserve.common.server_support = + cpu_to_be16(SERVER_SUPPORTS_CAP); + hostdata->caps.reserve.type = + cpu_to_be32(CLIENT_RESERVE_SCSI_2); + req->common.length = + cpu_to_be16(sizeof(hostdata->caps)); + } else + req->common.length = cpu_to_be16(sizeof(hostdata->caps) - + sizeof(hostdata->caps.reserve)); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) + dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; + +/** + * fast_fail_rsp: - Handle response to MAD enable fast fail + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending enable fast fail. Gets called + * by ibmvscsi_handle_crq() + */ +static void fast_fail_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status); + + if (status == VIOSRP_MAD_NOT_SUPPORTED) + dev_err(hostdata->dev, "fast_fail not supported in server\n"); + else if (status == VIOSRP_MAD_FAILED) + dev_err(hostdata->dev, "fast_fail request failed\n"); + else if (status != VIOSRP_MAD_SUCCESS) + dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); + + send_mad_capabilities(hostdata); +} + +/** + * enable_fast_fail() - Start host initialization + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. + */ +static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) +{ + int rc; + unsigned long flags; + struct viosrp_fast_fail *fast_fail_mad; + struct srp_event_struct *evt_struct; + + if (!fast_fail) { + send_mad_capabilities(hostdata); + return 0; + } + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout); + + fast_fail_mad = &evt_struct->iu.mad.fast_fail; + memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); + fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL); + fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad)); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + return rc; +} + +/** + * adapter_info_rsp: - Handle response to MAD adapter info request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending adapter_info. Gets called + * by ibmvscsi_handle_crq() +*/ +static void adapter_info_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + if (evt_struct->xfer_iu->mad.adapter_info.common.status) { + dev_err(hostdata->dev, "error %d getting adapter info\n", + evt_struct->xfer_iu->mad.adapter_info.common.status); + } else { + dev_info(hostdata->dev, "host srp version: %s, " + "host partition %s (%d), OS %d, max io %u\n", + hostdata->madapter_info.srp_version, + hostdata->madapter_info.partition_name, + be32_to_cpu(hostdata->madapter_info.partition_number), + be32_to_cpu(hostdata->madapter_info.os_type), + be32_to_cpu(hostdata->madapter_info.port_max_txu[0])); + + if (hostdata->madapter_info.port_max_txu[0]) + hostdata->host->max_sectors = + be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; + + if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX && + strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { + dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", + hostdata->madapter_info.srp_version); + dev_err(hostdata->dev, "limiting scatterlists to %d\n", + MAX_INDIRECT_BUFS); + hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; + } + + if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) { + enable_fast_fail(hostdata); + return; + } + } + + send_srp_login(hostdata); +} + +/** + * send_mad_adapter_info: - Sends the mad adapter info request + * and stores the result so it can be retrieved with + * sysfs. We COULD consider causing a failure if the + * returned SRP version doesn't match ours. + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. +*/ +static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) +{ + struct viosrp_adapter_info *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, + adapter_info_rsp, + VIOSRP_MAD_FORMAT, + info_timeout); + + req = &evt_struct->iu.mad.adapter_info; + memset(req, 0x00, sizeof(*req)); + + req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE); + req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info)); + req->buffer = cpu_to_be64(hostdata->adapter_info_addr); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) + dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; + +/* + * init_adapter() - Start virtual adapter initialization sequence + */ +static void init_adapter(struct ibmvscsi_host_data *hostdata) +{ + send_mad_adapter_info(hostdata); +} + +/* + * sync_completion: Signal that a synchronous command has completed + * Note that after returning from this call, the evt_struct is freed. + * the caller waiting on this completion shouldn't touch the evt_struct + * again. + */ +static void sync_completion(struct srp_event_struct *evt_struct) +{ + /* copy the response back */ + if (evt_struct->sync_srp) + *evt_struct->sync_srp = *evt_struct->xfer_iu; + + complete(&evt_struct->comp); +} + +/* + * ibmvscsi_eh_abort_handler: Abort a command...from scsi host template + * send this over to the server and wait synchronously for the response + */ +static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) +{ + struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host); + struct srp_tsk_mgmt *tsk_mgmt; + struct srp_event_struct *evt; + struct srp_event_struct *tmp_evt, *found_evt; + union viosrp_iu srp_rsp; + int rsp_rc; + unsigned long flags; + u16 lun = lun_from_dev(cmd->device); + unsigned long wait_switch = 0; + + /* First, find this command in our sent list so we can figure + * out the correct tag + */ + spin_lock_irqsave(hostdata->host->host_lock, flags); + wait_switch = jiffies + (init_timeout * HZ); + do { + found_evt = NULL; + list_for_each_entry(tmp_evt, &hostdata->sent, list) { + if (tmp_evt->cmnd == cmd) { + found_evt = tmp_evt; + break; + } + } + + if (!found_evt) { + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + return SUCCESS; + } + + evt = get_event_struct(&hostdata->pool); + if (evt == NULL) { + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + sdev_printk(KERN_ERR, cmd->device, + "failed to allocate abort event\n"); + return FAILED; + } + + init_event_struct(evt, + sync_completion, + VIOSRP_SRP_FORMAT, + abort_timeout); + + tsk_mgmt = &evt->iu.srp.tsk_mgmt; + + /* Set up an abort SRP command */ + memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); + tsk_mgmt->opcode = SRP_TSK_MGMT; + int_to_scsilun(lun, &tsk_mgmt->lun); + tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; + tsk_mgmt->task_tag = (u64) found_evt; + + evt->sync_srp = &srp_rsp; + + init_completion(&evt->comp); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2); + + if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) + break; + + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + msleep(10); + spin_lock_irqsave(hostdata->host->host_lock, flags); + } while (time_before(jiffies, wait_switch)); + + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, cmd->device, + "failed to send abort() event. rc=%d\n", rsp_rc); + return FAILED; + } + + sdev_printk(KERN_INFO, cmd->device, + "aborting command. lun 0x%llx, tag 0x%llx\n", + (((u64) lun) << 48), (u64) found_evt); + + wait_for_completion(&evt->comp); + + /* make sure we got a good response */ + if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { + if (printk_ratelimit()) + sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n", + srp_rsp.srp.rsp.opcode); + return FAILED; + } + + if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) + rsp_rc = *((int *)srp_rsp.srp.rsp.data); + else + rsp_rc = srp_rsp.srp.rsp.status; + + if (rsp_rc) { + if (printk_ratelimit()) + sdev_printk(KERN_WARNING, cmd->device, + "abort code %d for task tag 0x%llx\n", + rsp_rc, tsk_mgmt->task_tag); + return FAILED; + } + + /* Because we dropped the spinlock above, it's possible + * The event is no longer in our list. Make sure it didn't + * complete while we were aborting + */ + spin_lock_irqsave(hostdata->host->host_lock, flags); + found_evt = NULL; + list_for_each_entry(tmp_evt, &hostdata->sent, list) { + if (tmp_evt->cmnd == cmd) { + found_evt = tmp_evt; + break; + } + } + + if (found_evt == NULL) { + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n", + tsk_mgmt->task_tag); + return SUCCESS; + } + + sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n", + tsk_mgmt->task_tag); + + cmd->result = (DID_ABORT << 16); + list_del(&found_evt->list); + unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt, + found_evt->hostdata->dev); + free_event_struct(&found_evt->hostdata->pool, found_evt); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + atomic_inc(&hostdata->request_limit); + return SUCCESS; +} + +/* + * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host + * template send this over to the server and wait synchronously for the + * response + */ +static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host); + struct srp_tsk_mgmt *tsk_mgmt; + struct srp_event_struct *evt; + struct srp_event_struct *tmp_evt, *pos; + union viosrp_iu srp_rsp; + int rsp_rc; + unsigned long flags; + u16 lun = lun_from_dev(cmd->device); + unsigned long wait_switch = 0; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + wait_switch = jiffies + (init_timeout * HZ); + do { + evt = get_event_struct(&hostdata->pool); + if (evt == NULL) { + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + sdev_printk(KERN_ERR, cmd->device, + "failed to allocate reset event\n"); + return FAILED; + } + + init_event_struct(evt, + sync_completion, + VIOSRP_SRP_FORMAT, + reset_timeout); + + tsk_mgmt = &evt->iu.srp.tsk_mgmt; + + /* Set up a lun reset SRP command */ + memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); + tsk_mgmt->opcode = SRP_TSK_MGMT; + int_to_scsilun(lun, &tsk_mgmt->lun); + tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; + + evt->sync_srp = &srp_rsp; + + init_completion(&evt->comp); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2); + + if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) + break; + + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + msleep(10); + spin_lock_irqsave(hostdata->host->host_lock, flags); + } while (time_before(jiffies, wait_switch)); + + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, cmd->device, + "failed to send reset event. rc=%d\n", rsp_rc); + return FAILED; + } + + sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n", + (((u64) lun) << 48)); + + wait_for_completion(&evt->comp); + + /* make sure we got a good response */ + if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { + if (printk_ratelimit()) + sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n", + srp_rsp.srp.rsp.opcode); + return FAILED; + } + + if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) + rsp_rc = *((int *)srp_rsp.srp.rsp.data); + else + rsp_rc = srp_rsp.srp.rsp.status; + + if (rsp_rc) { + if (printk_ratelimit()) + sdev_printk(KERN_WARNING, cmd->device, + "reset code %d for task tag 0x%llx\n", + rsp_rc, tsk_mgmt->task_tag); + return FAILED; + } + + /* We need to find all commands for this LUN that have not yet been + * responded to, and fail them with DID_RESET + */ + spin_lock_irqsave(hostdata->host->host_lock, flags); + list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { + if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { + if (tmp_evt->cmnd) + tmp_evt->cmnd->result = (DID_RESET << 16); + list_del(&tmp_evt->list); + unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt, + tmp_evt->hostdata->dev); + free_event_struct(&tmp_evt->hostdata->pool, + tmp_evt); + atomic_inc(&hostdata->request_limit); + if (tmp_evt->cmnd_done) + tmp_evt->cmnd_done(tmp_evt->cmnd); + else if (tmp_evt->done) + tmp_evt->done(tmp_evt); + } + } + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + return SUCCESS; +} + +/** + * ibmvscsi_eh_host_reset_handler - Reset the connection to the server + * @cmd: struct scsi_cmnd having problems +*/ +static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + unsigned long wait_switch = 0; + struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host); + + dev_err(hostdata->dev, "Resetting connection due to error recovery\n"); + + ibmvscsi_reset_host(hostdata); + + for (wait_switch = jiffies + (init_timeout * HZ); + time_before(jiffies, wait_switch) && + atomic_read(&hostdata->request_limit) < 2;) { + + msleep(10); + } + + if (atomic_read(&hostdata->request_limit) <= 0) + return FAILED; + + return SUCCESS; +} + +/** + * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ + * @crq: Command/Response queue + * @hostdata: ibmvscsi_host_data of host + * +*/ +static void ibmvscsi_handle_crq(struct viosrp_crq *crq, + struct ibmvscsi_host_data *hostdata) +{ + long rc; + unsigned long flags; + /* The hypervisor copies our tag value here so no byteswapping */ + struct srp_event_struct *evt_struct = + (__force struct srp_event_struct *)crq->IU_data_ptr; + switch (crq->valid) { + case VIOSRP_CRQ_INIT_RSP: /* initialization */ + switch (crq->format) { + case VIOSRP_CRQ_INIT: /* Initialization message */ + dev_info(hostdata->dev, "partner initialized\n"); + /* Send back a response */ + rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0); + if (rc == 0) { + /* Now login */ + init_adapter(hostdata); + } else { + dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); + } + + break; + case VIOSRP_CRQ_INIT_COMPLETE: /* Initialization response */ + dev_info(hostdata->dev, "partner initialization complete\n"); + + /* Now login */ + init_adapter(hostdata); + break; + default: + dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); + } + return; + case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */ + scsi_block_requests(hostdata->host); + ibmvscsi_set_request_limit(hostdata, 0); + if (crq->format == 0x06) { + /* We need to re-setup the interpartition connection */ + dev_info(hostdata->dev, "Re-enabling adapter!\n"); + hostdata->client_migrated = 1; + hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE; + purge_requests(hostdata, DID_REQUEUE); + wake_up(&hostdata->work_wait_q); + } else { + dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n", + crq->format); + ibmvscsi_reset_host(hostdata); + } + return; + case VIOSRP_CRQ_CMD_RSP: /* real payload */ + break; + default: + dev_err(hostdata->dev, "got an invalid message type 0x%02x\n", + crq->valid); + return; + } + + /* The only kind of payload CRQs we should get are responses to + * things we send. Make sure this response is to something we + * actually sent + */ + if (!valid_event_struct(&hostdata->pool, evt_struct)) { + dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", + evt_struct); + return; + } + + if (atomic_read(&evt_struct->free)) { + dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", + evt_struct); + return; + } + + if (crq->format == VIOSRP_SRP_FORMAT) + atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), + &hostdata->request_limit); + + del_timer(&evt_struct->timer); + + if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd) + evt_struct->cmnd->result = DID_ERROR << 16; + if (evt_struct->done) + evt_struct->done(evt_struct); + else + dev_err(hostdata->dev, "returned done() is NULL; not running it!\n"); + + /* + * Lock the host_lock before messing with these structures, since we + * are running in a task context + */ + spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags); + list_del(&evt_struct->list); + free_event_struct(&evt_struct->hostdata->pool, evt_struct); + spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags); +} + +/** + * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk. + * @sdev: struct scsi_device device to configure + * + * Enable allow_restart for a device if it is a disk. Adjust the + * queue_depth here also as is required by the documentation for + * struct scsi_host_template. + */ +static int ibmvscsi_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + unsigned long lock_flags = 0; + + spin_lock_irqsave(shost->host_lock, lock_flags); + if (sdev->type == TYPE_DISK) { + sdev->allow_restart = 1; + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); + } + spin_unlock_irqrestore(shost->host_lock, lock_flags); + return 0; +} + +/** + * ibmvscsi_change_queue_depth - Change the device's queue depth + * @sdev: scsi device struct + * @qdepth: depth to set + * + * Return value: + * actual depth set + **/ +static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) + qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; + return scsi_change_queue_depth(sdev, qdepth); +} + +/* ------------------------------------------------------------ + * sysfs attributes + */ +static ssize_t show_host_vhost_loc(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", + hostdata->caps.loc); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_loc = { + .attr = { + .name = "vhost_loc", + .mode = S_IRUGO, + }, + .show = show_host_vhost_loc, +}; + +static ssize_t show_host_vhost_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", + hostdata->caps.name); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_name = { + .attr = { + .name = "vhost_name", + .mode = S_IRUGO, + }, + .show = show_host_vhost_name, +}; + +static ssize_t show_host_srp_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, PAGE_SIZE, "%s\n", + hostdata->madapter_info.srp_version); + return len; +} + +static struct device_attribute ibmvscsi_host_srp_version = { + .attr = { + .name = "srp_version", + .mode = S_IRUGO, + }, + .show = show_host_srp_version, +}; + +static ssize_t show_host_partition_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, PAGE_SIZE, "%s\n", + hostdata->madapter_info.partition_name); + return len; +} + +static struct device_attribute ibmvscsi_host_partition_name = { + .attr = { + .name = "partition_name", + .mode = S_IRUGO, + }, + .show = show_host_partition_name, +}; + +static ssize_t show_host_partition_number(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, PAGE_SIZE, "%d\n", + be32_to_cpu(hostdata->madapter_info.partition_number)); + return len; +} + +static struct device_attribute ibmvscsi_host_partition_number = { + .attr = { + .name = "partition_number", + .mode = S_IRUGO, + }, + .show = show_host_partition_number, +}; + +static ssize_t show_host_mad_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, PAGE_SIZE, "%d\n", + be32_to_cpu(hostdata->madapter_info.mad_version)); + return len; +} + +static struct device_attribute ibmvscsi_host_mad_version = { + .attr = { + .name = "mad_version", + .mode = S_IRUGO, + }, + .show = show_host_mad_version, +}; + +static ssize_t show_host_os_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, PAGE_SIZE, "%d\n", + be32_to_cpu(hostdata->madapter_info.os_type)); + return len; +} + +static struct device_attribute ibmvscsi_host_os_type = { + .attr = { + .name = "os_type", + .mode = S_IRUGO, + }, + .show = show_host_os_type, +}; + +static ssize_t show_host_config(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return 0; +} + +static struct device_attribute ibmvscsi_host_config = { + .attr = { + .name = "config", + .mode = S_IRUGO, + }, + .show = show_host_config, +}; + +static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type) +{ + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + + dev_info(hostdata->dev, "Initiating adapter reset!\n"); + ibmvscsi_reset_host(hostdata); + + return 0; +} + +static struct attribute *ibmvscsi_host_attrs[] = { + &ibmvscsi_host_vhost_loc.attr, + &ibmvscsi_host_vhost_name.attr, + &ibmvscsi_host_srp_version.attr, + &ibmvscsi_host_partition_name.attr, + &ibmvscsi_host_partition_number.attr, + &ibmvscsi_host_mad_version.attr, + &ibmvscsi_host_os_type.attr, + &ibmvscsi_host_config.attr, + NULL +}; + +ATTRIBUTE_GROUPS(ibmvscsi_host); + +/* ------------------------------------------------------------ + * SCSI driver registration + */ +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, + .proc_name = "ibmvscsi", + .queuecommand = ibmvscsi_queuecommand, + .eh_timed_out = srp_timed_out, + .eh_abort_handler = ibmvscsi_eh_abort_handler, + .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, + .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, + .slave_configure = ibmvscsi_slave_configure, + .change_queue_depth = ibmvscsi_change_queue_depth, + .host_reset = ibmvscsi_host_reset, + .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT, + .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, + .this_id = -1, + .sg_tablesize = SG_ALL, + .shost_groups = ibmvscsi_host_groups, +}; + +/** + * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver + * + * @vdev: struct vio_dev for the device whose desired IO mem is to be returned + * + * Return value: + * Number of bytes of IO data the driver will need to perform well. + */ +static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev) +{ + /* iu_storage data allocated in initialize_event_pool */ + unsigned long desired_io = max_events * sizeof(union viosrp_iu); + + /* add io space for sg data */ + desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 * + IBMVSCSI_CMDS_PER_LUN_DEFAULT); + + return desired_io; +} + +static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata) +{ + unsigned long flags; + int rc; + char *action = "reset"; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + switch (hostdata->action) { + case IBMVSCSI_HOST_ACTION_UNBLOCK: + rc = 0; + break; + case IBMVSCSI_HOST_ACTION_RESET: + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (!rc) + rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0); + vio_enable_interrupts(to_vio_dev(hostdata->dev)); + break; + case IBMVSCSI_HOST_ACTION_REENABLE: + action = "enable"; + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata); + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (!rc) + rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0); + break; + case IBMVSCSI_HOST_ACTION_NONE: + default: + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + return; + } + + hostdata->action = IBMVSCSI_HOST_ACTION_NONE; + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + + if (rc) { + ibmvscsi_set_request_limit(hostdata, -1); + dev_err(hostdata->dev, "error after %s\n", action); + } + + scsi_unblock_requests(hostdata->host); +} + +static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata) +{ + if (kthread_should_stop()) + return 1; + switch (hostdata->action) { + case IBMVSCSI_HOST_ACTION_NONE: + return 0; + case IBMVSCSI_HOST_ACTION_RESET: + case IBMVSCSI_HOST_ACTION_REENABLE: + case IBMVSCSI_HOST_ACTION_UNBLOCK: + default: + break; + } + + return 1; +} + +static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + rc = __ibmvscsi_work_to_do(hostdata); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + + return rc; +} + +static int ibmvscsi_work(void *data) +{ + struct ibmvscsi_host_data *hostdata = data; + int rc; + + set_user_nice(current, MIN_NICE); + + while (1) { + rc = wait_event_interruptible(hostdata->work_wait_q, + ibmvscsi_work_to_do(hostdata)); + + BUG_ON(rc); + + if (kthread_should_stop()) + break; + + ibmvscsi_do_work(hostdata); + } + + return 0; +} + +/* + * Called by bus code for each adapter + */ +static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct ibmvscsi_host_data *hostdata; + struct Scsi_Host *host; + struct device *dev = &vdev->dev; + struct srp_rport_identifiers ids; + struct srp_rport *rport; + unsigned long wait_switch = 0; + int rc; + + dev_set_drvdata(&vdev->dev, NULL); + + host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); + if (!host) { + dev_err(&vdev->dev, "couldn't allocate host data\n"); + goto scsi_host_alloc_failed; + } + + host->transportt = ibmvscsi_transport_template; + hostdata = shost_priv(host); + memset(hostdata, 0x00, sizeof(*hostdata)); + INIT_LIST_HEAD(&hostdata->sent); + init_waitqueue_head(&hostdata->work_wait_q); + hostdata->host = host; + hostdata->dev = dev; + ibmvscsi_set_request_limit(hostdata, -1); + hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; + + if (map_persist_bufs(hostdata)) { + dev_err(&vdev->dev, "couldn't map persistent buffers\n"); + goto persist_bufs_failed; + } + + hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d", + "ibmvscsi", host->host_no); + + if (IS_ERR(hostdata->work_thread)) { + dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n", + PTR_ERR(hostdata->work_thread)); + goto init_crq_failed; + } + + rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events); + if (rc != 0 && rc != H_RESOURCE) { + dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); + goto kill_kthread; + } + if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) { + dev_err(&vdev->dev, "couldn't initialize event pool\n"); + goto init_pool_failed; + } + + host->max_lun = IBMVSCSI_MAX_LUN; + host->max_id = max_id; + host->max_channel = max_channel; + host->max_cmd_len = 16; + + dev_info(dev, + "Maximum ID: %d Maximum LUN: %llu Maximum Channel: %d\n", + host->max_id, host->max_lun, host->max_channel); + + if (scsi_add_host(hostdata->host, hostdata->dev)) + goto add_host_failed; + + /* we don't have a proper target_port_id so let's use the fake one */ + memcpy(ids.port_id, hostdata->madapter_info.partition_name, + sizeof(ids.port_id)); + ids.roles = SRP_RPORT_ROLE_TARGET; + rport = srp_rport_add(host, &ids); + if (IS_ERR(rport)) + goto add_srp_port_failed; + + /* Try to send an initialization message. Note that this is allowed + * to fail if the other end is not acive. In that case we don't + * want to scan + */ + if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 + || rc == H_RESOURCE) { + /* + * Wait around max init_timeout secs for the adapter to finish + * initializing. When we are done initializing, we will have a + * valid request_limit. We don't want Linux scanning before + * we are ready. + */ + for (wait_switch = jiffies + (init_timeout * HZ); + time_before(jiffies, wait_switch) && + atomic_read(&hostdata->request_limit) < 2;) { + + msleep(10); + } + + /* if we now have a valid request_limit, initiate a scan */ + if (atomic_read(&hostdata->request_limit) > 0) + scsi_scan_host(host); + } + + dev_set_drvdata(&vdev->dev, hostdata); + spin_lock(&ibmvscsi_driver_lock); + list_add_tail(&hostdata->host_list, &ibmvscsi_head); + spin_unlock(&ibmvscsi_driver_lock); + return 0; + + add_srp_port_failed: + scsi_remove_host(hostdata->host); + add_host_failed: + release_event_pool(&hostdata->pool, hostdata); + init_pool_failed: + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); + kill_kthread: + kthread_stop(hostdata->work_thread); + init_crq_failed: + unmap_persist_bufs(hostdata); + persist_bufs_failed: + scsi_host_put(host); + scsi_host_alloc_failed: + return -1; +} + +static void ibmvscsi_remove(struct vio_dev *vdev) +{ + struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); + + srp_remove_host(hostdata->host); + scsi_remove_host(hostdata->host); + + purge_requests(hostdata, DID_ERROR); + release_event_pool(&hostdata->pool, hostdata); + + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, + max_events); + + kthread_stop(hostdata->work_thread); + unmap_persist_bufs(hostdata); + + spin_lock(&ibmvscsi_driver_lock); + list_del(&hostdata->host_list); + spin_unlock(&ibmvscsi_driver_lock); + + scsi_host_put(hostdata->host); +} + +/** + * ibmvscsi_resume: Resume from suspend + * @dev: device struct + * + * We may have lost an interrupt across suspend/resume, so kick the + * interrupt handler + */ +static int ibmvscsi_resume(struct device *dev) +{ + struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); + vio_disable_interrupts(to_vio_dev(hostdata->dev)); + tasklet_schedule(&hostdata->srp_task); + + return 0; +} + +/* + * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we + * support. + */ +static const struct vio_device_id ibmvscsi_device_table[] = { + {"vscsi", "IBM,v-scsi"}, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); + +static const struct dev_pm_ops ibmvscsi_pm_ops = { + .resume = ibmvscsi_resume +}; + +static struct vio_driver ibmvscsi_driver = { + .id_table = ibmvscsi_device_table, + .probe = ibmvscsi_probe, + .remove = ibmvscsi_remove, + .get_desired_dma = ibmvscsi_get_desired_dma, + .name = "ibmvscsi", + .pm = &ibmvscsi_pm_ops, +}; + +static struct srp_function_template ibmvscsi_transport_functions = { +}; + +static int __init ibmvscsi_module_init(void) +{ + int ret; + + /* Ensure we have two requests to do error recovery */ + driver_template.can_queue = max_requests; + max_events = max_requests + 2; + + if (!firmware_has_feature(FW_FEATURE_VIO)) + return -ENODEV; + + ibmvscsi_transport_template = + srp_attach_transport(&ibmvscsi_transport_functions); + if (!ibmvscsi_transport_template) + return -ENOMEM; + + ret = vio_register_driver(&ibmvscsi_driver); + if (ret) + srp_release_transport(ibmvscsi_transport_template); + return ret; +} + +static void __exit ibmvscsi_module_exit(void) +{ + vio_unregister_driver(&ibmvscsi_driver); + srp_release_transport(ibmvscsi_transport_template); +} + +module_init(ibmvscsi_module_init); +module_exit(ibmvscsi_module_exit); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h new file mode 100644 index 000000000..e60916ef7 --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* ------------------------------------------------------------ + * ibmvscsi.h + * (C) Copyright IBM Corporation 1994, 2003 + * Authors: Colin DeVilbiss (devilbis@us.ibm.com) + * Santiago Leon (santil@us.ibm.com) + * Dave Boutcher (sleddog@us.ibm.com) + * + * ------------------------------------------------------------ + * Emulation of a SCSI host adapter for Virtual I/O devices + * + * This driver allows the Linux SCSI peripheral drivers to directly + * access devices in the hosting partition, either on an iSeries + * hypervisor system or a converged hypervisor system. + */ +#ifndef IBMVSCSI_H +#define IBMVSCSI_H +#include +#include +#include +#include +#include + +struct scsi_cmnd; +struct Scsi_Host; + +/* Number of indirect bufs...the list of these has to fit in the + * additional data of the srp_cmd struct along with the indirect + * descriptor + */ +#define MAX_INDIRECT_BUFS 10 + +#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100 +#define IBMVSCSI_CMDS_PER_LUN_DEFAULT 16 +#define IBMVSCSI_MAX_SECTORS_DEFAULT 256 /* 32 * 8 = default max I/O 32 pages */ +#define IBMVSCSI_MAX_CMDS_PER_LUN 64 +#define IBMVSCSI_MAX_LUN 32 + +/* ------------------------------------------------------------ + * Data Structures + */ +/* an RPA command/response transport queue */ +struct crq_queue { + struct viosrp_crq *msgs; + int size, cur; + dma_addr_t msg_token; + spinlock_t lock; +}; + +/* a unit of work for the hosting partition */ +struct srp_event_struct { + union viosrp_iu *xfer_iu; + struct scsi_cmnd *cmnd; + struct list_head list; + void (*done) (struct srp_event_struct *); + struct viosrp_crq crq; + struct ibmvscsi_host_data *hostdata; + atomic_t free; + union viosrp_iu iu; + void (*cmnd_done) (struct scsi_cmnd *); + struct completion comp; + struct timer_list timer; + union viosrp_iu *sync_srp; + struct srp_direct_buf *ext_list; + dma_addr_t ext_list_token; +}; + +/* a pool of event structs for use */ +struct event_pool { + struct srp_event_struct *events; + u32 size; + int next; + union viosrp_iu *iu_storage; + dma_addr_t iu_token; +}; + +enum ibmvscsi_host_action { + IBMVSCSI_HOST_ACTION_NONE = 0, + IBMVSCSI_HOST_ACTION_RESET, + IBMVSCSI_HOST_ACTION_REENABLE, + IBMVSCSI_HOST_ACTION_UNBLOCK, +}; + +/* all driver data associated with a host adapter */ +struct ibmvscsi_host_data { + struct list_head host_list; + atomic_t request_limit; + int client_migrated; + enum ibmvscsi_host_action action; + struct device *dev; + struct event_pool pool; + struct crq_queue queue; + struct tasklet_struct srp_task; + struct list_head sent; + struct Scsi_Host *host; + struct task_struct *work_thread; + wait_queue_head_t work_wait_q; + struct mad_adapter_info_data madapter_info; + struct capabilities caps; + dma_addr_t caps_addr; + dma_addr_t adapter_info_addr; +}; + +#endif /* IBMVSCSI_H */ diff --git a/drivers/scsi/ibmvscsi_tgt/Makefile b/drivers/scsi/ibmvscsi_tgt/Makefile new file mode 100644 index 000000000..cc7a8256d --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsis.o + +ibmvscsis-y := libsrp.o ibmvscsi_tgt.o diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c new file mode 100644 index 000000000..385f812b8 --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -0,0 +1,4076 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * IBM Virtual SCSI Target Driver + * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. + * Santiago Leon (santil@us.ibm.com) IBM Corp. + * Linda Xie (lxie@us.ibm.com) IBM Corp. + * + * Copyright (C) 2005-2011 FUJITA Tomonori + * Copyright (C) 2010 Nicholas A. Bellinger + * + * Authors: Bryant G. Ly + * Authors: Michael Cyr + * + ****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include "ibmvscsi_tgt.h" + +#define IBMVSCSIS_VERSION "v0.2" + +#define INITIAL_SRP_LIMIT 1024 +#define DEFAULT_MAX_SECTORS 256 +#define MAX_TXU 1024 * 1024 + +static uint max_vdma_size = MAX_H_COPY_RDMA; + +static char system_id[SYS_ID_NAME_LEN] = ""; +static char partition_name[PARTITION_NAMELEN] = "UNKNOWN"; +static uint partition_number = -1; + +/* Adapter list and lock to control it */ +static DEFINE_SPINLOCK(ibmvscsis_dev_lock); +static LIST_HEAD(ibmvscsis_dev_list); + +static long ibmvscsis_parse_command(struct scsi_info *vscsi, + struct viosrp_crq *crq); + +static void ibmvscsis_adapter_idle(struct scsi_info *vscsi); + +static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, + struct srp_rsp *rsp) +{ + u32 residual_count = se_cmd->residual_count; + + if (!residual_count) + return; + + if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an underflow write */ + rsp->flags = SRP_RSP_FLAG_DOUNDER; + rsp->data_out_res_cnt = cpu_to_be32(residual_count); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an underflow read */ + rsp->flags = SRP_RSP_FLAG_DIUNDER; + rsp->data_in_res_cnt = cpu_to_be32(residual_count); + } + } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an overflow write */ + rsp->flags = SRP_RSP_FLAG_DOOVER; + rsp->data_out_res_cnt = cpu_to_be32(residual_count); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an overflow read */ + rsp->flags = SRP_RSP_FLAG_DIOVER; + rsp->data_in_res_cnt = cpu_to_be32(residual_count); + } + } +} + +/** + * connection_broken() - Determine if the connection to the client is good + * @vscsi: Pointer to our adapter structure + * + * This function attempts to send a ping MAD to the client. If the call to + * queue the request returns H_CLOSED then the connection has been broken + * and the function returns TRUE. + * + * EXECUTION ENVIRONMENT: + * Interrupt or Process environment + */ +static bool connection_broken(struct scsi_info *vscsi) +{ + struct viosrp_crq *crq; + u64 buffer[2] = { 0, 0 }; + long h_return_code; + bool rc = false; + + /* create a PING crq */ + crq = (struct viosrp_crq *)&buffer; + crq->valid = VALID_CMD_RESP_EL; + crq->format = MESSAGE_IN_CRQ; + crq->status = PING; + + h_return_code = h_send_crq(vscsi->dds.unit_id, + cpu_to_be64(buffer[MSG_HI]), + cpu_to_be64(buffer[MSG_LOW])); + + dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code); + + if (h_return_code == H_CLOSED) + rc = true; + + return rc; +} + +/** + * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue + * @vscsi: Pointer to our adapter structure + * + * This function calls h_free_q then frees the interrupt bit etc. + * It must release the lock before doing so because of the time it can take + * for h_free_crq in PHYP + * NOTE: * the caller must make sure that state and or flags will prevent + * interrupt handler from scheduling work. + * * anyone calling this function may need to set the CRQ_CLOSED flag + * we can't do it here, because we don't have the lock + * + * EXECUTION ENVIRONMENT: + * Process level + */ +static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi) +{ + long qrc; + long rc = ADAPT_SUCCESS; + int ticks = 0; + + do { + qrc = h_free_crq(vscsi->dds.unit_id); + switch (qrc) { + case H_SUCCESS: + spin_lock_bh(&vscsi->intr_lock); + vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS; + spin_unlock_bh(&vscsi->intr_lock); + break; + + case H_HARDWARE: + case H_PARAMETER: + dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n", + qrc); + rc = ERROR; + break; + + case H_BUSY: + case H_LONG_BUSY_ORDER_1_MSEC: + /* msleep not good for small values */ + usleep_range(1000, 2000); + ticks += 1; + break; + case H_LONG_BUSY_ORDER_10_MSEC: + usleep_range(10000, 20000); + ticks += 10; + break; + case H_LONG_BUSY_ORDER_100_MSEC: + msleep(100); + ticks += 100; + break; + case H_LONG_BUSY_ORDER_1_SEC: + ssleep(1); + ticks += 1000; + break; + case H_LONG_BUSY_ORDER_10_SEC: + ssleep(10); + ticks += 10000; + break; + case H_LONG_BUSY_ORDER_100_SEC: + ssleep(100); + ticks += 100000; + break; + default: + dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n", + qrc); + rc = ERROR; + break; + } + + /* + * dont wait more then 300 seconds + * ticks are in milliseconds more or less + */ + if (ticks > 300000 && qrc != H_SUCCESS) { + rc = ERROR; + dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n"); + } + } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS); + + dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc); + + return rc; +} + +/** + * ibmvscsis_delete_client_info() - Helper function to Delete Client Info + * @vscsi: Pointer to our adapter structure + * @client_closed: True if client closed its queue + * + * Deletes information specific to the client when the client goes away + * + * EXECUTION ENVIRONMENT: + * Interrupt or Process + */ +static void ibmvscsis_delete_client_info(struct scsi_info *vscsi, + bool client_closed) +{ + vscsi->client_cap = 0; + + /* + * Some things we don't want to clear if we're closing the queue, + * because some clients don't resend the host handshake when they + * get a transport event. + */ + if (client_closed) + vscsi->client_data.os_type = 0; +} + +/** + * ibmvscsis_free_command_q() - Free Command Queue + * @vscsi: Pointer to our adapter structure + * + * This function calls unregister_command_q, then clears interrupts and + * any pending interrupt acknowledgments associated with the command q. + * It also clears memory if there is no error. + * + * PHYP did not meet the PAPR architecture so that we must give up the + * lock. This causes a timing hole regarding state change. To close the + * hole this routine does accounting on any change that occurred during + * the time the lock is not held. + * NOTE: must give up and then acquire the interrupt lock, the caller must + * make sure that state and or flags will prevent interrupt handler from + * scheduling work. + * + * EXECUTION ENVIRONMENT: + * Process level, interrupt lock is held + */ +static long ibmvscsis_free_command_q(struct scsi_info *vscsi) +{ + int bytes; + u32 flags_under_lock; + u16 state_under_lock; + long rc = ADAPT_SUCCESS; + + if (!(vscsi->flags & CRQ_CLOSED)) { + vio_disable_interrupts(vscsi->dma_dev); + + state_under_lock = vscsi->new_state; + flags_under_lock = vscsi->flags; + vscsi->phyp_acr_state = 0; + vscsi->phyp_acr_flags = 0; + + spin_unlock_bh(&vscsi->intr_lock); + rc = ibmvscsis_unregister_command_q(vscsi); + spin_lock_bh(&vscsi->intr_lock); + + if (state_under_lock != vscsi->new_state) + vscsi->phyp_acr_state = vscsi->new_state; + + vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags); + + if (rc == ADAPT_SUCCESS) { + bytes = vscsi->cmd_q.size * PAGE_SIZE; + memset(vscsi->cmd_q.base_addr, 0, bytes); + vscsi->cmd_q.index = 0; + vscsi->flags |= CRQ_CLOSED; + + ibmvscsis_delete_client_info(vscsi, false); + } + + dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", + vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, + vscsi->phyp_acr_state); + } + return rc; +} + +/** + * ibmvscsis_cmd_q_dequeue() - Get valid Command element + * @mask: Mask to use in case index wraps + * @current_index: Current index into command queue + * @base_addr: Pointer to start of command queue + * + * Returns a pointer to a valid command element or NULL, if the command + * queue is empty + * + * EXECUTION ENVIRONMENT: + * Interrupt environment, interrupt lock held + */ +static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, + uint *current_index, + struct viosrp_crq *base_addr) +{ + struct viosrp_crq *ptr; + + ptr = base_addr + *current_index; + + if (ptr->valid) { + *current_index = (*current_index + 1) & mask; + dma_rmb(); + } else { + ptr = NULL; + } + + return ptr; +} + +/** + * ibmvscsis_send_init_message() - send initialize message to the client + * @vscsi: Pointer to our adapter structure + * @format: Which Init Message format to send + * + * EXECUTION ENVIRONMENT: + * Interrupt environment interrupt lock held + */ +static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format) +{ + struct viosrp_crq *crq; + u64 buffer[2] = { 0, 0 }; + long rc; + + crq = (struct viosrp_crq *)&buffer; + crq->valid = VALID_INIT_MSG; + crq->format = format; + rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), + cpu_to_be64(buffer[MSG_LOW])); + + return rc; +} + +/** + * ibmvscsis_check_init_msg() - Check init message valid + * @vscsi: Pointer to our adapter structure + * @format: Pointer to return format of Init Message, if any. + * Set to UNUSED_FORMAT if no Init Message in queue. + * + * Checks if an initialize message was queued by the initiatior + * after the queue was created and before the interrupt was enabled. + * + * EXECUTION ENVIRONMENT: + * Process level only, interrupt lock held + */ +static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) +{ + struct viosrp_crq *crq; + long rc = ADAPT_SUCCESS; + + crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index, + vscsi->cmd_q.base_addr); + if (!crq) { + *format = (uint)UNUSED_FORMAT; + } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) { + *format = (uint)INIT_MSG; + crq->valid = INVALIDATE_CMD_RESP_EL; + dma_rmb(); + + /* + * the caller has ensured no initialize message was + * sent after the queue was + * created so there should be no other message on the queue. + */ + crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, + &vscsi->cmd_q.index, + vscsi->cmd_q.base_addr); + if (crq) { + *format = (uint)(crq->format); + rc = ERROR; + crq->valid = INVALIDATE_CMD_RESP_EL; + dma_rmb(); + } + } else { + *format = (uint)(crq->format); + rc = ERROR; + crq->valid = INVALIDATE_CMD_RESP_EL; + dma_rmb(); + } + + return rc; +} + +/** + * ibmvscsis_disconnect() - Helper function to disconnect + * @work: Pointer to work_struct, gives access to our adapter structure + * + * An error has occurred or the driver received a Transport event, + * and the driver is requesting that the command queue be de-registered + * in a safe manner. If there is no outstanding I/O then we can stop the + * queue. If we are restarting the queue it will be reflected in the + * the state of the adapter. + * + * EXECUTION ENVIRONMENT: + * Process environment + */ +static void ibmvscsis_disconnect(struct work_struct *work) +{ + struct scsi_info *vscsi = container_of(work, struct scsi_info, + proc_work); + u16 new_state; + bool wait_idle = false; + + spin_lock_bh(&vscsi->intr_lock); + new_state = vscsi->new_state; + vscsi->new_state = 0; + + vscsi->flags |= DISCONNECT_SCHEDULED; + vscsi->flags &= ~SCHEDULE_DISCONNECT; + + dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + + /* + * check which state we are in and see if we + * should transitition to the new state + */ + switch (vscsi->state) { + /* Should never be called while in this state. */ + case NO_QUEUE: + /* + * Can never transition from this state; + * igonore errors and logout. + */ + case UNCONFIGURING: + break; + + /* can transition from this state to UNCONFIGURING */ + case ERR_DISCONNECT: + if (new_state == UNCONFIGURING) + vscsi->state = new_state; + break; + + /* + * Can transition from this state to unconfiguring + * or err disconnect. + */ + case ERR_DISCONNECT_RECONNECT: + switch (new_state) { + case UNCONFIGURING: + case ERR_DISCONNECT: + vscsi->state = new_state; + break; + + case WAIT_IDLE: + break; + default: + break; + } + break; + + /* can transition from this state to UNCONFIGURING */ + case ERR_DISCONNECTED: + if (new_state == UNCONFIGURING) + vscsi->state = new_state; + break; + + case WAIT_ENABLED: + switch (new_state) { + case UNCONFIGURING: + vscsi->state = new_state; + vscsi->flags |= RESPONSE_Q_DOWN; + vscsi->flags &= ~(SCHEDULE_DISCONNECT | + DISCONNECT_SCHEDULED); + dma_rmb(); + if (vscsi->flags & CFG_SLEEPING) { + vscsi->flags &= ~CFG_SLEEPING; + complete(&vscsi->unconfig); + } + break; + + /* should never happen */ + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case WAIT_IDLE: + dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", + vscsi->state); + break; + } + break; + + case WAIT_IDLE: + switch (new_state) { + case UNCONFIGURING: + vscsi->flags |= RESPONSE_Q_DOWN; + vscsi->state = new_state; + vscsi->flags &= ~(SCHEDULE_DISCONNECT | + DISCONNECT_SCHEDULED); + ibmvscsis_free_command_q(vscsi); + break; + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + vscsi->state = new_state; + break; + } + break; + + /* + * Initiator has not done a successful srp login + * or has done a successful srp logout ( adapter was not + * busy). In the first case there can be responses queued + * waiting for space on the initiators response queue (MAD) + * The second case the adapter is idle. Assume the worse case, + * i.e. the second case. + */ + case WAIT_CONNECTION: + case CONNECTED: + case SRP_PROCESSING: + wait_idle = true; + vscsi->state = new_state; + break; + + /* can transition from this state to UNCONFIGURING */ + case UNDEFINED: + if (new_state == UNCONFIGURING) + vscsi->state = new_state; + break; + default: + break; + } + + if (wait_idle) { + dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n", + (int)list_empty(&vscsi->active_q), + (int)list_empty(&vscsi->schedule_q)); + if (!list_empty(&vscsi->active_q) || + !list_empty(&vscsi->schedule_q)) { + vscsi->flags |= WAIT_FOR_IDLE; + dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n", + vscsi->flags); + /* + * This routine is can not be called with the interrupt + * lock held. + */ + spin_unlock_bh(&vscsi->intr_lock); + wait_for_completion(&vscsi->wait_idle); + spin_lock_bh(&vscsi->intr_lock); + } + dev_dbg(&vscsi->dev, "disconnect stop wait\n"); + + ibmvscsis_adapter_idle(vscsi); + } + + spin_unlock_bh(&vscsi->intr_lock); +} + +/** + * ibmvscsis_post_disconnect() - Schedule the disconnect + * @vscsi: Pointer to our adapter structure + * @new_state: State to move to after disconnecting + * @flag_bits: Flags to turn on in adapter structure + * + * If it's already been scheduled, then see if we need to "upgrade" + * the new state (if the one passed in is more "severe" than the + * previous one). + * + * PRECONDITION: + * interrupt lock is held + */ +static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, + uint flag_bits) +{ + uint state; + + /* check the validity of the new state */ + switch (new_state) { + case UNCONFIGURING: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case WAIT_IDLE: + break; + + default: + dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n", + new_state); + return; + } + + vscsi->flags |= flag_bits; + + dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n", + new_state, flag_bits, vscsi->flags, vscsi->state); + + if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) { + vscsi->flags |= SCHEDULE_DISCONNECT; + vscsi->new_state = new_state; + + INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect); + (void)queue_work(vscsi->work_q, &vscsi->proc_work); + } else { + if (vscsi->new_state) + state = vscsi->new_state; + else + state = vscsi->state; + + switch (state) { + case NO_QUEUE: + case UNCONFIGURING: + break; + + case ERR_DISCONNECTED: + case ERR_DISCONNECT: + case UNDEFINED: + if (new_state == UNCONFIGURING) + vscsi->new_state = new_state; + break; + + case ERR_DISCONNECT_RECONNECT: + switch (new_state) { + case UNCONFIGURING: + case ERR_DISCONNECT: + vscsi->new_state = new_state; + break; + default: + break; + } + break; + + case WAIT_ENABLED: + case WAIT_IDLE: + case WAIT_CONNECTION: + case CONNECTED: + case SRP_PROCESSING: + vscsi->new_state = new_state; + break; + + default: + break; + } + } + + dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", + vscsi->flags, vscsi->new_state); +} + +/** + * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + case NO_QUEUE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case ERR_DISCONNECTED: + case UNCONFIGURING: + case UNDEFINED: + rc = ERROR; + break; + + case WAIT_CONNECTION: + vscsi->state = CONNECTED; + break; + + case WAIT_IDLE: + case SRP_PROCESSING: + case CONNECTED: + case WAIT_ENABLED: + default: + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_handle_init_msg() - Respond to an Init Message + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + case WAIT_CONNECTION: + rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); + switch (rc) { + case H_SUCCESS: + vscsi->state = CONNECTED; + break; + + case H_PARAMETER: + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + break; + + case H_DROPPED: + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", + rc); + rc = ERROR; + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + break; + + case H_CLOSED: + dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n", + rc); + rc = 0; + break; + } + break; + + case UNDEFINED: + rc = ERROR; + break; + + case UNCONFIGURING: + break; + + case WAIT_ENABLED: + case CONNECTED: + case SRP_PROCESSING: + case WAIT_IDLE: + case NO_QUEUE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case ERR_DISCONNECTED: + default: + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_init_msg() - Respond to an init message + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to CRQ element containing the Init Message + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) +{ + long rc = ADAPT_SUCCESS; + + dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state); + + rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, + (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, + 0); + if (rc == H_SUCCESS) { + vscsi->client_data.partition_number = + be64_to_cpu(*(u64 *)vscsi->map_buf); + dev_dbg(&vscsi->dev, "init_msg, part num %d\n", + vscsi->client_data.partition_number); + } else { + dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc); + rc = ADAPT_SUCCESS; + } + + if (crq->format == INIT_MSG) { + rc = ibmvscsis_handle_init_msg(vscsi); + } else if (crq->format == INIT_COMPLETE_MSG) { + rc = ibmvscsis_handle_init_compl_msg(vscsi); + } else { + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + } + + return rc; +} + +/** + * ibmvscsis_establish_new_q() - Establish new CRQ queue + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + uint format; + + rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000, + 0, 0, 0, 0); + if (rc == H_SUCCESS) + vscsi->flags |= PREP_FOR_SUSPEND_ENABLED; + else if (rc != H_NOT_FOUND) + dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n", + rc); + + vscsi->flags &= PRESERVE_FLAG_FIELDS; + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + + rc = vio_enable_interrupts(vscsi->dma_dev); + if (rc) { + dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n", + rc); + return rc; + } + + rc = ibmvscsis_check_init_msg(vscsi, &format); + if (rc) { + dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n", + rc); + return rc; + } + + if (format == UNUSED_FORMAT) { + rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); + switch (rc) { + case H_SUCCESS: + case H_DROPPED: + case H_CLOSED: + rc = ADAPT_SUCCESS; + break; + + case H_PARAMETER: + case H_HARDWARE: + break; + + default: + vscsi->state = UNDEFINED; + rc = H_HARDWARE; + break; + } + } else if (format == INIT_MSG) { + rc = ibmvscsis_handle_init_msg(vscsi); + } + + return rc; +} + +/** + * ibmvscsis_reset_queue() - Reset CRQ Queue + * @vscsi: Pointer to our adapter structure + * + * This function calls h_free_q and then calls h_reg_q and does all + * of the bookkeeping to get us back to where we can communicate. + * + * Actually, we don't always call h_free_crq. A problem was discovered + * where one partition would close and reopen his queue, which would + * cause his partner to get a transport event, which would cause him to + * close and reopen his queue, which would cause the original partition + * to get a transport event, etc., etc. To prevent this, we don't + * actually close our queue if the client initiated the reset, (i.e. + * either we got a transport event or we have detected that the client's + * queue is gone) + * + * EXECUTION ENVIRONMENT: + * Process environment, called with interrupt lock held + */ +static void ibmvscsis_reset_queue(struct scsi_info *vscsi) +{ + int bytes; + long rc = ADAPT_SUCCESS; + + dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags); + + /* don't reset, the client did it for us */ + if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { + vscsi->flags &= PRESERVE_FLAG_FIELDS; + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + vscsi->state = WAIT_CONNECTION; + vio_enable_interrupts(vscsi->dma_dev); + } else { + rc = ibmvscsis_free_command_q(vscsi); + if (rc == ADAPT_SUCCESS) { + vscsi->state = WAIT_CONNECTION; + + bytes = vscsi->cmd_q.size * PAGE_SIZE; + rc = h_reg_crq(vscsi->dds.unit_id, + vscsi->cmd_q.crq_token, bytes); + if (rc == H_CLOSED || rc == H_SUCCESS) { + rc = ibmvscsis_establish_new_q(vscsi); + } + + if (rc != ADAPT_SUCCESS) { + dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n", + rc); + + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; + ibmvscsis_free_command_q(vscsi); + } + } else { + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; + } + } +} + +/** + * ibmvscsis_free_cmd_resources() - Free command resources + * @vscsi: Pointer to our adapter structure + * @cmd: Command which is not longer in use + * + * Must be called with interrupt lock held. + */ +static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + + switch (cmd->type) { + case TASK_MANAGEMENT: + case SCSI_CDB: + /* + * When the queue goes down this value is cleared, so it + * cannot be cleared in this general purpose function. + */ + if (vscsi->debit) + vscsi->debit -= 1; + break; + case ADAPTER_MAD: + vscsi->flags &= ~PROCESSING_MAD; + break; + case UNSET_TYPE: + break; + default: + dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", + cmd->type); + break; + } + + cmd->iue = NULL; + list_add_tail(&cmd->list, &vscsi->free_cmd); + srp_iu_put(iue); + + if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && + list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { + vscsi->flags &= ~WAIT_FOR_IDLE; + complete(&vscsi->wait_idle); + } +} + +/** + * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL + * @vscsi: Pointer to our adapter structure + * @idle: Indicates whether we were called from adapter_idle. This + * is important to know if we need to do a disconnect, since if + * we're called from adapter_idle, we're still processing the + * current disconnect, so we can't just call post_disconnect. + * + * This function is called when the adapter is idle when phyp has sent + * us a Prepare for Suspend Transport Event. + * + * EXECUTION ENVIRONMENT: + * Process or interrupt environment called with interrupt lock held + */ +static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle) +{ + long rc = 0; + struct viosrp_crq *crq; + + /* See if there is a Resume event in the queue */ + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + + dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n", + vscsi->flags, vscsi->state, (int)crq->valid); + + if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) { + rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0, + 0, 0); + if (rc) { + dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n", + rc); + rc = 0; + } + } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) && + (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) || + ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || + (crq->format != RESUME_FROM_SUSP)))) { + if (idle) { + vscsi->state = ERR_DISCONNECT_RECONNECT; + ibmvscsis_reset_queue(vscsi); + rc = -1; + } else if (vscsi->state == CONNECTED) { + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + } + + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; + + if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || + (crq->format != RESUME_FROM_SUSP))) + dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend"); + } + + vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED); + + return rc; +} + +/** + * ibmvscsis_trans_event() - Handle a Transport Event + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to CRQ entry containing the Transport Event + * + * Do the logic to close the I_T nexus. This function may not + * behave to specification. + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_trans_event(struct scsi_info *vscsi, + struct viosrp_crq *crq) +{ + long rc = ADAPT_SUCCESS; + + dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n", + (int)crq->format, vscsi->flags, vscsi->state); + + switch (crq->format) { + case MIGRATED: + case PARTNER_FAILED: + case PARTNER_DEREGISTER: + ibmvscsis_delete_client_info(vscsi, true); + if (crq->format == MIGRATED) + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; + switch (vscsi->state) { + case NO_QUEUE: + case ERR_DISCONNECTED: + case UNDEFINED: + break; + + case UNCONFIGURING: + vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); + break; + + case WAIT_ENABLED: + break; + + case WAIT_CONNECTION: + break; + + case CONNECTED: + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, + (RESPONSE_Q_DOWN | + TRANS_EVENT)); + break; + + case SRP_PROCESSING: + if ((vscsi->debit > 0) || + !list_empty(&vscsi->schedule_q) || + !list_empty(&vscsi->waiting_rsp) || + !list_empty(&vscsi->active_q)) { + dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n", + vscsi->debit, + (int)list_empty(&vscsi->schedule_q), + (int)list_empty(&vscsi->waiting_rsp), + (int)list_empty(&vscsi->active_q)); + dev_warn(&vscsi->dev, "connection lost with outstanding work\n"); + } else { + dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n"); + } + + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, + (RESPONSE_Q_DOWN | + TRANS_EVENT)); + break; + + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case WAIT_IDLE: + vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); + break; + } + break; + + case PREPARE_FOR_SUSPEND: + dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n", + (int)crq->status); + switch (vscsi->state) { + case ERR_DISCONNECTED: + case WAIT_CONNECTION: + case CONNECTED: + ibmvscsis_ready_for_suspend(vscsi, false); + break; + case SRP_PROCESSING: + vscsi->resume_state = vscsi->state; + vscsi->flags |= PREP_FOR_SUSPEND_PENDING; + if (crq->status == CRQ_ENTRY_OVERWRITTEN) + vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE; + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); + break; + case NO_QUEUE: + case UNDEFINED: + case UNCONFIGURING: + case WAIT_ENABLED: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case WAIT_IDLE: + dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n", + vscsi->state); + break; + } + break; + + case RESUME_FROM_SUSP: + dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n", + (int)crq->status); + if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { + vscsi->flags |= PREP_FOR_SUSPEND_ABORTED; + } else { + if ((crq->status == CRQ_ENTRY_OVERWRITTEN) || + (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) { + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + 0); + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; + } + } + break; + + default: + rc = ERROR; + dev_err(&vscsi->dev, "trans_event: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, + RESPONSE_Q_DOWN); + break; + } + + rc = vscsi->flags & SCHEDULE_DISCONNECT; + + dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", + vscsi->flags, vscsi->state, rc); + + return rc; +} + +/** + * ibmvscsis_poll_cmd_q() - Poll Command Queue + * @vscsi: Pointer to our adapter structure + * + * Called to handle command elements that may have arrived while + * interrupts were disabled. + * + * EXECUTION ENVIRONMENT: + * intr_lock must be held + */ +static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi) +{ + struct viosrp_crq *crq; + long rc; + bool ack = true; + volatile u8 valid; + + dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n", + vscsi->flags, vscsi->state, vscsi->cmd_q.index); + + rc = vscsi->flags & SCHEDULE_DISCONNECT; + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + + while (valid) { +poll_work: + vscsi->cmd_q.index = + (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; + + if (!rc) { + rc = ibmvscsis_parse_command(vscsi, crq); + } else { + if ((uint)crq->valid == VALID_TRANS_EVENT) { + /* + * must service the transport layer events even + * in an error state, dont break out until all + * the consecutive transport events have been + * processed + */ + rc = ibmvscsis_trans_event(vscsi, crq); + } else if (vscsi->flags & TRANS_EVENT) { + /* + * if a tranport event has occurred leave + * everything but transport events on the queue + */ + dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n"); + + /* + * need to decrement the queue index so we can + * look at the elment again + */ + if (vscsi->cmd_q.index) + vscsi->cmd_q.index -= 1; + else + /* + * index is at 0 it just wrapped. + * have it index last element in q + */ + vscsi->cmd_q.index = vscsi->cmd_q.mask; + break; + } + } + + crq->valid = INVALIDATE_CMD_RESP_EL; + + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + } + + if (!rc) { + if (ack) { + vio_enable_interrupts(vscsi->dma_dev); + ack = false; + dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n"); + } + valid = crq->valid; + dma_rmb(); + if (valid) + goto poll_work; + } + + dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc); +} + +/** + * ibmvscsis_free_cmd_qs() - Free elements in queue + * @vscsi: Pointer to our adapter structure + * + * Free all of the elements on all queues that are waiting for + * whatever reason. + * + * PRECONDITION: + * Called with interrupt lock held + */ +static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi) +{ + struct ibmvscsis_cmd *cmd, *nxt; + + dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n", + (int)list_empty(&vscsi->waiting_rsp), + vscsi->rsp_q_timer.started); + + list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { + list_del(&cmd->list); + ibmvscsis_free_cmd_resources(vscsi, cmd); + } +} + +/** + * ibmvscsis_get_free_cmd() - Get free command from list + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) +{ + struct ibmvscsis_cmd *cmd = NULL; + struct iu_entry *iue; + + iue = srp_iu_get(&vscsi->target); + if (iue) { + cmd = list_first_entry_or_null(&vscsi->free_cmd, + struct ibmvscsis_cmd, list); + if (cmd) { + if (cmd->abort_cmd) + cmd->abort_cmd = NULL; + cmd->flags &= ~(DELAY_SEND); + list_del(&cmd->list); + cmd->iue = iue; + cmd->type = UNSET_TYPE; + memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd)); + } else { + srp_iu_put(iue); + } + } + + return cmd; +} + +/** + * ibmvscsis_adapter_idle() - Helper function to handle idle adapter + * @vscsi: Pointer to our adapter structure + * + * This function is called when the adapter is idle when the driver + * is attempting to clear an error condition. + * The adapter is considered busy if any of its cmd queues + * are non-empty. This function can be invoked + * from the off level disconnect function. + * + * EXECUTION ENVIRONMENT: + * Process environment called with interrupt lock held + */ +static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) +{ + int free_qs = false; + long rc = 0; + + dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + + /* Only need to free qs if we're disconnecting from client */ + if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT) + free_qs = true; + + switch (vscsi->state) { + case UNCONFIGURING: + ibmvscsis_free_command_q(vscsi); + dma_rmb(); + isync(); + if (vscsi->flags & CFG_SLEEPING) { + vscsi->flags &= ~CFG_SLEEPING; + complete(&vscsi->unconfig); + } + break; + case ERR_DISCONNECT_RECONNECT: + ibmvscsis_reset_queue(vscsi); + dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n", + vscsi->flags); + break; + + case ERR_DISCONNECT: + ibmvscsis_free_command_q(vscsi); + vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); + vscsi->flags |= RESPONSE_Q_DOWN; + if (vscsi->tport.enabled) + vscsi->state = ERR_DISCONNECTED; + else + vscsi->state = WAIT_ENABLED; + dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + break; + + case WAIT_IDLE: + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { + vscsi->state = vscsi->resume_state; + vscsi->resume_state = 0; + rc = ibmvscsis_ready_for_suspend(vscsi, true); + vscsi->flags &= ~DISCONNECT_SCHEDULED; + if (rc) + break; + } else if (vscsi->flags & TRANS_EVENT) { + vscsi->state = WAIT_CONNECTION; + vscsi->flags &= PRESERVE_FLAG_FIELDS; + } else { + vscsi->state = CONNECTED; + vscsi->flags &= ~DISCONNECT_SCHEDULED; + } + + dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + ibmvscsis_poll_cmd_q(vscsi); + break; + + case ERR_DISCONNECTED: + vscsi->flags &= ~DISCONNECT_SCHEDULED; + dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + break; + + default: + dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n", + vscsi->state); + break; + } + + if (free_qs) + ibmvscsis_free_cmd_qs(vscsi); + + /* + * There is a timing window where we could lose a disconnect request. + * The known path to this window occurs during the DISCONNECT_RECONNECT + * case above: reset_queue calls free_command_q, which will release the + * interrupt lock. During that time, a new post_disconnect call can be + * made with a "more severe" state (DISCONNECT or UNCONFIGURING). + * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect + * will only set the new_state. Now free_command_q reacquires the intr + * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_ + * FIELDS), and the disconnect is lost. This is particularly bad when + * the new disconnect was for UNCONFIGURING, since the unconfigure hangs + * forever. + * Fix is that free command queue sets acr state and acr flags if there + * is a change under the lock + * note free command queue writes to this state it clears it + * before releasing the lock, different drivers call the free command + * queue different times so dont initialize above + */ + if (vscsi->phyp_acr_state != 0) { + /* + * set any bits in flags that may have been cleared by + * a call to free command queue in switch statement + * or reset queue + */ + vscsi->flags |= vscsi->phyp_acr_flags; + ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0); + vscsi->phyp_acr_state = 0; + vscsi->phyp_acr_flags = 0; + + dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", + vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, + vscsi->phyp_acr_state); + } + + dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n", + vscsi->flags, vscsi->state, vscsi->new_state); +} + +/** + * ibmvscsis_copy_crq_packet() - Copy CRQ Packet + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command element to use to process the request + * @crq: Pointer to CRQ entry containing the request + * + * Copy the srp information unit from the hosted + * partition using remote dma + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + long rc = 0; + u16 len; + + len = be16_to_cpu(crq->IU_length); + if ((len > SRP_MAX_IU_LEN) || (len == 0)) { + dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return SRP_VIOLATION; + } + + rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(crq->IU_data_ptr), + vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma); + + switch (rc) { + case H_SUCCESS: + cmd->init_time = mftb(); + iue->remote_token = crq->IU_data_ptr; + iue->iu_len = len; + dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n", + be64_to_cpu(crq->IU_data_ptr), cmd->init_time); + break; + case H_PERMISSION: + if (connection_broken(vscsi)) + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + (RESPONSE_Q_DOWN | + CLIENT_FAILED)); + else + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + + dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", + rc); + break; + case H_DEST_PARM: + case H_SOURCE_PARM: + default: + dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram + * @vscsi: Pointer to our adapter structure + * @iue: Information Unit containing the Adapter Info MAD request + * + * EXECUTION ENVIRONMENT: + * Interrupt adapter lock is held + */ +static long ibmvscsis_adapter_info(struct scsi_info *vscsi, + struct iu_entry *iue) +{ + struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info; + struct mad_adapter_info_data *info; + uint flag_bits = 0; + dma_addr_t token; + long rc; + + mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS); + + if (be16_to_cpu(mad->common.length) > sizeof(*info)) { + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + + info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, + GFP_ATOMIC); + if (!info) { + dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", + iue->target); + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + + /* Get remote info */ + rc = h_copy_rdma(be16_to_cpu(mad->common.length), + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer), + vscsi->dds.window[LOCAL].liobn, token); + + if (rc != H_SUCCESS) { + if (rc == H_PERMISSION) { + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); + } + dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n", + rc); + dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n", + be64_to_cpu(mad->buffer), vscsi->flags, flag_bits); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + goto free_dma; + } + + /* + * Copy client info, but ignore partition number, which we + * already got from phyp - unless we failed to get it from + * phyp (e.g. if we're running on a p5 system). + */ + if (vscsi->client_data.partition_number == 0) + vscsi->client_data.partition_number = + be32_to_cpu(info->partition_number); + strncpy(vscsi->client_data.srp_version, info->srp_version, + sizeof(vscsi->client_data.srp_version)); + strncpy(vscsi->client_data.partition_name, info->partition_name, + sizeof(vscsi->client_data.partition_name)); + vscsi->client_data.mad_version = be32_to_cpu(info->mad_version); + vscsi->client_data.os_type = be32_to_cpu(info->os_type); + + /* Copy our info */ + strncpy(info->srp_version, SRP_VERSION, + sizeof(info->srp_version)); + strncpy(info->partition_name, vscsi->dds.partition_name, + sizeof(info->partition_name)); + info->partition_number = cpu_to_be32(vscsi->dds.partition_num); + info->mad_version = cpu_to_be32(MAD_VERSION_1); + info->os_type = cpu_to_be32(LINUX); + memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); + info->port_max_txu[0] = cpu_to_be32(MAX_TXU); + + dma_wmb(); + rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, + token, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer)); + switch (rc) { + case H_SUCCESS: + break; + + case H_SOURCE_PARM: + case H_DEST_PARM: + case H_PERMISSION: + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); + fallthrough; + default: + dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + flag_bits); + break; + } + +free_dma: + dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token); + dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc); + + return rc; +} + +/** + * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram + * @vscsi: Pointer to our adapter structure + * @iue: Information Unit containing the Capabilities MAD request + * + * NOTE: if you return an error from this routine you must be + * disconnecting or you will cause a hang + * + * EXECUTION ENVIRONMENT: + * Interrupt called with adapter lock held + */ +static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) +{ + struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities; + struct capabilities *cap; + struct mad_capability_common *common; + dma_addr_t token; + u16 olen, len, status, min_len, cap_len; + u32 flag; + uint flag_bits = 0; + long rc = 0; + + olen = be16_to_cpu(mad->common.length); + /* + * struct capabilities hardcodes a couple capabilities after the + * header, but the capabilities can actually be in any order. + */ + min_len = offsetof(struct capabilities, migration); + if ((olen < min_len) || (olen > PAGE_SIZE)) { + dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen); + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + + cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, + GFP_ATOMIC); + if (!cap) { + dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", + iue->target); + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer), + vscsi->dds.window[LOCAL].liobn, token); + if (rc == H_SUCCESS) { + strncpy(cap->name, dev_name(&vscsi->dma_dev->dev), + SRP_MAX_LOC_LEN); + + len = olen - min_len; + status = VIOSRP_MAD_SUCCESS; + common = (struct mad_capability_common *)&cap->migration; + + while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) { + dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n", + len, be32_to_cpu(common->cap_type), + be16_to_cpu(common->length)); + + cap_len = be16_to_cpu(common->length); + if (cap_len > len) { + dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n"); + status = VIOSRP_MAD_FAILED; + break; + } + + if (cap_len == 0) { + dev_err(&vscsi->dev, "cap_mad: cap len is 0\n"); + status = VIOSRP_MAD_FAILED; + break; + } + + switch (common->cap_type) { + default: + dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n"); + common->server_support = 0; + flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED); + cap->flags &= ~flag; + break; + } + + len = len - cap_len; + common = (struct mad_capability_common *) + ((char *)common + cap_len); + } + + mad->common.status = cpu_to_be16(status); + + dma_wmb(); + rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token, + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer)); + + if (rc != H_SUCCESS) { + dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n", + rc); + + if (rc == H_PERMISSION) { + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | + CLIENT_FAILED); + } + + dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + flag_bits); + } + } + + dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token); + + dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n", + rc, vscsi->client_cap); + + return rc; +} + +/** + * ibmvscsis_process_mad() - Service a MAnagement Data gram + * @vscsi: Pointer to our adapter structure + * @iue: Information Unit containing the MAD request + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue) +{ + struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; + struct viosrp_empty_iu *empty; + long rc = ADAPT_SUCCESS; + + switch (be32_to_cpu(mad->type)) { + case VIOSRP_EMPTY_IU_TYPE: + empty = &vio_iu(iue)->mad.empty_iu; + vscsi->empty_iu_id = be64_to_cpu(empty->buffer); + vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag); + mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); + break; + case VIOSRP_ADAPTER_INFO_TYPE: + rc = ibmvscsis_adapter_info(vscsi, iue); + break; + case VIOSRP_CAPABILITIES_TYPE: + rc = ibmvscsis_cap_mad(vscsi, iue); + break; + case VIOSRP_ENABLE_FAST_FAIL: + if (vscsi->state == CONNECTED) { + vscsi->fast_fail = true; + mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); + } else { + dev_warn(&vscsi->dev, "fast fail mad sent after login\n"); + mad->status = cpu_to_be16(VIOSRP_MAD_FAILED); + } + break; + default: + mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED); + break; + } + + return rc; +} + +/** + * srp_snd_msg_failed() - Handle an error when sending a response + * @vscsi: Pointer to our adapter structure + * @rc: The return code from the h_send_crq command + * + * Must be called with interrupt lock held. + */ +static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) +{ + ktime_t kt; + + if (rc != H_DROPPED) { + ibmvscsis_free_cmd_qs(vscsi); + + if (rc == H_CLOSED) + vscsi->flags |= CLIENT_FAILED; + + /* don't flag the same problem multiple times */ + if (!(vscsi->flags & RESPONSE_Q_DOWN)) { + vscsi->flags |= RESPONSE_Q_DOWN; + if (!(vscsi->state & (ERR_DISCONNECT | + ERR_DISCONNECT_RECONNECT | + ERR_DISCONNECTED | UNDEFINED))) { + dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n", + vscsi->state, vscsi->flags, rc); + } + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + } + return; + } + + /* + * The response queue is full. + * If the server is processing SRP requests, i.e. + * the client has successfully done an + * SRP_LOGIN, then it will wait forever for room in + * the queue. However if the system admin + * is attempting to unconfigure the server then one + * or more children will be in a state where + * they are being removed. So if there is even one + * child being removed then the driver assumes + * the system admin is attempting to break the + * connection with the client and MAX_TIMER_POPS + * is honored. + */ + if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) || + (vscsi->state == SRP_PROCESSING)) { + dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n", + vscsi->flags, (int)vscsi->rsp_q_timer.started, + vscsi->rsp_q_timer.timer_pops); + + /* + * Check if the timer is running; if it + * is not then start it up. + */ + if (!vscsi->rsp_q_timer.started) { + if (vscsi->rsp_q_timer.timer_pops < + MAX_TIMER_POPS) { + kt = WAIT_NANO_SECONDS; + } else { + /* + * slide the timeslice if the maximum + * timer pops have already happened + */ + kt = ktime_set(WAIT_SECONDS, 0); + } + + vscsi->rsp_q_timer.started = true; + hrtimer_start(&vscsi->rsp_q_timer.timer, kt, + HRTIMER_MODE_REL); + } + } else { + /* + * TBD: Do we need to worry about this? Need to get + * remove working. + */ + /* + * waited a long time and it appears the system admin + * is bring this driver down + */ + vscsi->flags |= RESPONSE_Q_DOWN; + ibmvscsis_free_cmd_qs(vscsi); + /* + * if the driver is already attempting to disconnect + * from the client and has already logged an error + * trace this event but don't put it in the error log + */ + if (!(vscsi->state & (ERR_DISCONNECT | + ERR_DISCONNECT_RECONNECT | + ERR_DISCONNECTED | UNDEFINED))) { + dev_err(&vscsi->dev, "client crq full too long\n"); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + 0); + } + } +} + +/** + * ibmvscsis_send_messages() - Send a Response + * @vscsi: Pointer to our adapter structure + * + * Send a response, first checking the waiting queue. Responses are + * sent in order they are received. If the response cannot be sent, + * because the client queue is full, it stays on the waiting queue. + * + * PRECONDITION: + * Called with interrupt lock held + */ +static void ibmvscsis_send_messages(struct scsi_info *vscsi) +{ + struct viosrp_crq empty_crq = { }; + struct viosrp_crq *crq = &empty_crq; + struct ibmvscsis_cmd *cmd, *nxt; + long rc = ADAPT_SUCCESS; + bool retry = false; + + if (!(vscsi->flags & RESPONSE_Q_DOWN)) { + do { + retry = false; + list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, + list) { + /* + * Check to make sure abort cmd gets processed + * prior to the abort tmr cmd + */ + if (cmd->flags & DELAY_SEND) + continue; + + if (cmd->abort_cmd) { + retry = true; + cmd->abort_cmd->flags &= ~(DELAY_SEND); + cmd->abort_cmd = NULL; + } + + /* + * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and + * the case where LIO issued a + * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST + * case then we dont send a response, since it + * was already done. + */ + if (cmd->se_cmd.transport_state & CMD_T_ABORTED && + !(cmd->se_cmd.transport_state & CMD_T_TAS)) { + list_del(&cmd->list); + ibmvscsis_free_cmd_resources(vscsi, + cmd); + /* + * With a successfully aborted op + * through LIO we want to increment the + * the vscsi credit so that when we dont + * send a rsp to the original scsi abort + * op (h_send_crq), but the tm rsp to + * the abort is sent, the credit is + * correctly sent with the abort tm rsp. + * We would need 1 for the abort tm rsp + * and 1 credit for the aborted scsi op. + * Thus we need to increment here. + * Also we want to increment the credit + * here because we want to make sure + * cmd is actually released first + * otherwise the client will think it + * it can send a new cmd, and we could + * find ourselves short of cmd elements. + */ + vscsi->credit += 1; + } else { + crq->valid = VALID_CMD_RESP_EL; + crq->format = cmd->rsp.format; + + if (cmd->flags & CMD_FAST_FAIL) + crq->status = VIOSRP_ADAPTER_FAIL; + + crq->IU_length = cpu_to_be16(cmd->rsp.len); + + rc = h_send_crq(vscsi->dma_dev->unit_address, + be64_to_cpu(crq->high), + be64_to_cpu(cmd->rsp.tag)); + + dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n", + cmd, be64_to_cpu(cmd->rsp.tag), + rc); + + /* if all ok free up the command + * element resources + */ + if (rc == H_SUCCESS) { + /* some movement has occurred */ + vscsi->rsp_q_timer.timer_pops = 0; + list_del(&cmd->list); + + ibmvscsis_free_cmd_resources(vscsi, + cmd); + } else { + srp_snd_msg_failed(vscsi, rc); + break; + } + } + } + } while (retry); + + if (!rc) { + /* + * The timer could pop with the queue empty. If + * this happens, rc will always indicate a + * success; clear the pop count. + */ + vscsi->rsp_q_timer.timer_pops = 0; + } + } else { + ibmvscsis_free_cmd_qs(vscsi); + } +} + +/* Called with intr lock held */ +static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; + uint flag_bits = 0; + long rc; + + dma_wmb(); + rc = h_copy_rdma(sizeof(struct mad_common), + vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(crq->IU_data_ptr)); + if (!rc) { + cmd->rsp.format = VIOSRP_MAD_FORMAT; + cmd->rsp.len = sizeof(struct mad_common); + cmd->rsp.tag = mad->tag; + list_add_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + } else { + dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n", + rc); + if (rc == H_PERMISSION) { + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); + } + dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n", + rc); + + ibmvscsis_free_cmd_resources(vscsi, cmd); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + } +} + +/** + * ibmvscsis_mad() - Service a MAnagement Data gram. + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to the CRQ entry containing the MAD request + * + * EXECUTION ENVIRONMENT: + * Interrupt, called with adapter lock held + */ +static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) +{ + struct iu_entry *iue; + struct ibmvscsis_cmd *cmd; + struct mad_common *mad; + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + /* + * We have not exchanged Init Msgs yet, so this MAD was sent + * before the last Transport Event; client will not be + * expecting a response. + */ + case WAIT_CONNECTION: + dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n", + vscsi->flags); + return ADAPT_SUCCESS; + + case SRP_PROCESSING: + case CONNECTED: + break; + + /* + * We should never get here while we're in these states. + * Just log an error and get out. + */ + case UNCONFIGURING: + case WAIT_IDLE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + default: + dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n", + vscsi->state); + return ADAPT_SUCCESS; + } + + cmd = ibmvscsis_get_free_cmd(vscsi); + if (!cmd) { + dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n", + vscsi->debit); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return ERROR; + } + iue = cmd->iue; + cmd->type = ADAPTER_MAD; + + rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); + if (!rc) { + mad = (struct mad_common *)&vio_iu(iue)->mad; + + dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type)); + + rc = ibmvscsis_process_mad(vscsi, iue); + + dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n", + be16_to_cpu(mad->status), rc); + + if (!rc) + ibmvscsis_send_mad_resp(vscsi, cmd, crq); + } else { + ibmvscsis_free_cmd_resources(vscsi, cmd); + } + + dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc); + return rc; +} + +/** + * ibmvscsis_login_rsp() - Create/copy a login response notice to the client + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to the command for the SRP Login request + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_login_rsp(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp; + struct format_code *fmt; + uint flag_bits = 0; + long rc = ADAPT_SUCCESS; + + memset(rsp, 0, sizeof(struct srp_login_rsp)); + + rsp->opcode = SRP_LOGIN_RSP; + rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit); + rsp->tag = cmd->rsp.tag; + rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); + rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); + fmt = (struct format_code *)&rsp->buf_fmt; + fmt->buffers = SUPPORTED_FORMATS; + vscsi->credit = 0; + + cmd->rsp.len = sizeof(struct srp_login_rsp); + + dma_wmb(); + rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, + iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(iue->remote_token)); + + switch (rc) { + case H_SUCCESS: + break; + + case H_PERMISSION: + if (connection_broken(vscsi)) + flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; + dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + break; + case H_SOURCE_PARM: + case H_DEST_PARM: + default: + dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to the command for the SRP Login request + * @reason: The reason the SRP Login is being rejected, per SRP protocol + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, u32 reason) +{ + struct iu_entry *iue = cmd->iue; + struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej; + struct format_code *fmt; + uint flag_bits = 0; + long rc = ADAPT_SUCCESS; + + memset(rej, 0, sizeof(*rej)); + + rej->opcode = SRP_LOGIN_REJ; + rej->reason = cpu_to_be32(reason); + rej->tag = cmd->rsp.tag; + fmt = (struct format_code *)&rej->buf_fmt; + fmt->buffers = SUPPORTED_FORMATS; + + cmd->rsp.len = sizeof(*rej); + + dma_wmb(); + rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, + iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(iue->remote_token)); + + switch (rc) { + case H_SUCCESS: + break; + case H_PERMISSION: + if (connection_broken(vscsi)) + flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; + dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + break; + case H_SOURCE_PARM: + case H_DEST_PARM: + default: + dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport) +{ + char *name = tport->tport_name; + struct ibmvscsis_nexus *nexus; + struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); + int rc; + + if (tport->ibmv_nexus) { + dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n"); + return 0; + } + + nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); + if (!nexus) { + dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n"); + return -ENOMEM; + } + + nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0, + TARGET_PROT_NORMAL, name, nexus, + NULL); + if (IS_ERR(nexus->se_sess)) { + rc = PTR_ERR(nexus->se_sess); + goto transport_init_fail; + } + + tport->ibmv_nexus = nexus; + + return 0; + +transport_init_fail: + kfree(nexus); + return rc; +} + +static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport) +{ + struct se_session *se_sess; + struct ibmvscsis_nexus *nexus; + + nexus = tport->ibmv_nexus; + if (!nexus) + return -ENODEV; + + se_sess = nexus->se_sess; + if (!se_sess) + return -ENODEV; + + /* + * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port + */ + target_remove_session(se_sess); + tport->ibmv_nexus = NULL; + kfree(nexus); + + return 0; +} + +/** + * ibmvscsis_srp_login() - Process an SRP Login Request + * @vscsi: Pointer to our adapter structure + * @cmd: Command element to use to process the SRP Login request + * @crq: Pointer to CRQ entry containing the SRP Login request + * + * EXECUTION ENVIRONMENT: + * Interrupt, called with interrupt lock held + */ +static long ibmvscsis_srp_login(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + struct srp_login_req *req = &vio_iu(iue)->srp.login_req; + struct port_id { + __be64 id_extension; + __be64 io_guid; + } *iport, *tport; + struct format_code *fmt; + u32 reason = 0x0; + long rc = ADAPT_SUCCESS; + + iport = (struct port_id *)req->initiator_port_id; + tport = (struct port_id *)req->target_port_id; + fmt = (struct format_code *)&req->req_buf_fmt; + if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN) + reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE; + else if (be32_to_cpu(req->req_it_iu_len) < 64) + reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; + else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) || + (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1))) + reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL; + else if (req->req_flags & SRP_MULTICHAN_MULTI) + reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED; + else if (fmt->buffers & (~SUPPORTED_FORMATS)) + reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; + else if ((fmt->buffers & SUPPORTED_FORMATS) == 0) + reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; + + if (vscsi->state == SRP_PROCESSING) + reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED; + + rc = ibmvscsis_make_nexus(&vscsi->tport); + if (rc) + reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; + + cmd->rsp.format = VIOSRP_SRP_FORMAT; + cmd->rsp.tag = req->tag; + + dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason); + + if (reason) + rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason); + else + rc = ibmvscsis_login_rsp(vscsi, cmd); + + if (!rc) { + if (!reason) + vscsi->state = SRP_PROCESSING; + + list_add_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + } else { + ibmvscsis_free_cmd_resources(vscsi, cmd); + } + + dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc); + return rc; +} + +/** + * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus + * @vscsi: Pointer to our adapter structure + * @cmd: Command element to use to process the Implicit Logout request + * @crq: Pointer to CRQ entry containing the Implicit Logout request + * + * Do the logic to close the I_T nexus. This function may not + * behave to specification. + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout; + + if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) || + !list_empty(&vscsi->waiting_rsp)) { + dev_err(&vscsi->dev, "i_logout: outstanding work\n"); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + } else { + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.tag = log_out->tag; + cmd->rsp.len = sizeof(struct mad_common); + list_add_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); + } + + return ADAPT_SUCCESS; +} + +/* Called with intr lock held */ +static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) +{ + struct ibmvscsis_cmd *cmd; + struct iu_entry *iue; + struct srp_cmd *srp; + struct srp_tsk_mgmt *tsk; + long rc; + + if (vscsi->request_limit - vscsi->debit <= 0) { + /* Client has exceeded request limit */ + dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n", + vscsi->request_limit, vscsi->debit); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return; + } + + cmd = ibmvscsis_get_free_cmd(vscsi); + if (!cmd) { + dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n", + vscsi->debit); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return; + } + iue = cmd->iue; + srp = &vio_iu(iue)->srp.cmd; + + rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); + if (rc) { + ibmvscsis_free_cmd_resources(vscsi, cmd); + return; + } + + if (vscsi->state == SRP_PROCESSING) { + switch (srp->opcode) { + case SRP_LOGIN_REQ: + rc = ibmvscsis_srp_login(vscsi, cmd, crq); + break; + + case SRP_TSK_MGMT: + tsk = &vio_iu(iue)->srp.tsk_mgmt; + dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n", + tsk->tag, tsk->tag); + cmd->rsp.tag = tsk->tag; + vscsi->debit += 1; + cmd->type = TASK_MANAGEMENT; + list_add_tail(&cmd->list, &vscsi->schedule_q); + queue_work(vscsi->work_q, &cmd->work); + break; + + case SRP_CMD: + dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n", + srp->tag, srp->tag); + cmd->rsp.tag = srp->tag; + vscsi->debit += 1; + cmd->type = SCSI_CDB; + /* + * We want to keep track of work waiting for + * the workqueue. + */ + list_add_tail(&cmd->list, &vscsi->schedule_q); + queue_work(vscsi->work_q, &cmd->work); + break; + + case SRP_I_LOGOUT: + rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); + break; + + case SRP_CRED_RSP: + case SRP_AER_RSP: + default: + ibmvscsis_free_cmd_resources(vscsi, cmd); + dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", + (uint)srp->opcode); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + break; + } + } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { + rc = ibmvscsis_srp_login(vscsi, cmd, crq); + } else { + ibmvscsis_free_cmd_resources(vscsi, cmd); + dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + } +} + +/** + * ibmvscsis_ping_response() - Respond to a ping request + * @vscsi: Pointer to our adapter structure + * + * Let the client know that the server is alive and waiting on + * its native I/O stack. + * If any type of error occurs from the call to queue a ping + * response then the client is either not accepting or receiving + * interrupts. Disconnect with an error. + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_ping_response(struct scsi_info *vscsi) +{ + struct viosrp_crq *crq; + u64 buffer[2] = { 0, 0 }; + long rc; + + crq = (struct viosrp_crq *)&buffer; + crq->valid = VALID_CMD_RESP_EL; + crq->format = (u8)MESSAGE_IN_CRQ; + crq->status = PING_RESPONSE; + + rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), + cpu_to_be64(buffer[MSG_LOW])); + + switch (rc) { + case H_SUCCESS: + break; + case H_CLOSED: + vscsi->flags |= CLIENT_FAILED; + fallthrough; + case H_DROPPED: + vscsi->flags |= RESPONSE_Q_DOWN; + fallthrough; + case H_REMOTE_PARM: + dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + default: + dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue. + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to CRQ element containing the SRP request + * + * This function will return success if the command queue element is valid + * and the srp iu or MAD request it pointed to was also valid. That does + * not mean that an error was not returned to the client. + * + * EXECUTION ENVIRONMENT: + * Interrupt, intr lock held + */ +static long ibmvscsis_parse_command(struct scsi_info *vscsi, + struct viosrp_crq *crq) +{ + long rc = ADAPT_SUCCESS; + + switch (crq->valid) { + case VALID_CMD_RESP_EL: + switch (crq->format) { + case OS400_FORMAT: + case AIX_FORMAT: + case LINUX_FORMAT: + case MAD_FORMAT: + if (vscsi->flags & PROCESSING_MAD) { + rc = ERROR; + dev_err(&vscsi->dev, "parse_command: already processing mad\n"); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + 0); + } else { + vscsi->flags |= PROCESSING_MAD; + rc = ibmvscsis_mad(vscsi, crq); + } + break; + + case SRP_FORMAT: + ibmvscsis_srp_cmd(vscsi, crq); + break; + + case MESSAGE_IN_CRQ: + if (crq->status == PING) + ibmvscsis_ping_response(vscsi); + break; + + default: + dev_err(&vscsi->dev, "parse_command: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + break; + } + break; + + case VALID_TRANS_EVENT: + rc = ibmvscsis_trans_event(vscsi, crq); + break; + + case VALID_INIT_MSG: + rc = ibmvscsis_init_msg(vscsi, crq); + break; + + default: + dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n", + (uint)crq->valid); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + /* + * Return only what the interrupt handler cares + * about. Most errors we keep right on trucking. + */ + rc = vscsi->flags & SCHEDULE_DISCONNECT; + + return rc; +} + +static int read_dma_window(struct scsi_info *vscsi) +{ + struct vio_dev *vdev = vscsi->dma_dev; + const __be32 *dma_window; + const __be32 *prop; + + /* TODO Using of_parse_dma_window would be better, but it doesn't give + * a way to read multiple windows without already knowing the size of + * a window or the number of windows. + */ + dma_window = (const __be32 *)vio_get_attribute(vdev, + "ibm,my-dma-window", + NULL); + if (!dma_window) { + dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n"); + return -1; + } + + vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window); + dma_window++; + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", + NULL); + if (!prop) { + dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", + NULL); + if (!prop) { + dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + /* dma_window should point to the second window now */ + vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window); + + return 0; +} + +static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name) +{ + struct ibmvscsis_tport *tport = NULL; + struct vio_dev *vdev; + struct scsi_info *vscsi; + + spin_lock_bh(&ibmvscsis_dev_lock); + list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) { + vdev = vscsi->dma_dev; + if (!strcmp(dev_name(&vdev->dev), name)) { + tport = &vscsi->tport; + break; + } + } + spin_unlock_bh(&ibmvscsis_dev_lock); + + return tport; +} + +/** + * ibmvscsis_parse_cmd() - Parse SRP Command + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command element with SRP command + * + * Parse the srp command; if it is valid then submit it to tcm. + * Note: The return code does not reflect the status of the SCSI CDB. + * + * EXECUTION ENVIRONMENT: + * Process level + */ +static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; + struct ibmvscsis_nexus *nexus; + u64 data_len = 0; + enum dma_data_direction dir; + int attr = 0; + + nexus = vscsi->tport.ibmv_nexus; + /* + * additional length in bytes. Note that the SRP spec says that + * additional length is in 4-byte words, but technically the + * additional length field is only the upper 6 bits of the byte. + * The lower 2 bits are reserved. If the lower 2 bits are 0 (as + * all reserved fields should be), then interpreting the byte as + * an int will yield the length in bytes. + */ + if (srp->add_cdb_len & 0x03) { + dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n"); + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + ibmvscsis_free_cmd_resources(vscsi, cmd); + spin_unlock_bh(&vscsi->intr_lock); + return; + } + + if (srp_get_desc_table(srp, &dir, &data_len)) { + dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", + srp->tag); + goto fail; + } + + cmd->rsp.sol_not = srp->sol_not; + + switch (srp->task_attr) { + case SRP_SIMPLE_TASK: + attr = TCM_SIMPLE_TAG; + break; + case SRP_ORDERED_TASK: + attr = TCM_ORDERED_TAG; + break; + case SRP_HEAD_TASK: + attr = TCM_HEAD_TAG; + break; + case SRP_ACA_TASK: + attr = TCM_ACA_TAG; + break; + default: + dev_err(&vscsi->dev, "Invalid task attribute %d\n", + srp->task_attr); + goto fail; + } + + cmd->se_cmd.tag = be64_to_cpu(srp->tag); + + spin_lock_bh(&vscsi->intr_lock); + list_add_tail(&cmd->list, &vscsi->active_q); + spin_unlock_bh(&vscsi->intr_lock); + + srp->lun.scsi_lun[0] &= 0x3f; + + target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb, + cmd->sense_buf, scsilun_to_int(&srp->lun), + data_len, attr, dir, 0); + return; + +fail: + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + spin_unlock_bh(&vscsi->intr_lock); +} + +/** + * ibmvscsis_parse_task() - Parse SRP Task Management Request + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command element with SRP task management request + * + * Parse the srp task management request; if it is valid then submit it to tcm. + * Note: The return code does not reflect the status of the task management + * request. + * + * EXECUTION ENVIRONMENT: + * Processor level + */ +static void ibmvscsis_parse_task(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; + int tcm_type; + u64 tag_to_abort = 0; + int rc = 0; + struct ibmvscsis_nexus *nexus; + + nexus = vscsi->tport.ibmv_nexus; + + cmd->rsp.sol_not = srp_tsk->sol_not; + + switch (srp_tsk->tsk_mgmt_func) { + case SRP_TSK_ABORT_TASK: + tcm_type = TMR_ABORT_TASK; + tag_to_abort = be64_to_cpu(srp_tsk->task_tag); + break; + case SRP_TSK_ABORT_TASK_SET: + tcm_type = TMR_ABORT_TASK_SET; + break; + case SRP_TSK_CLEAR_TASK_SET: + tcm_type = TMR_CLEAR_TASK_SET; + break; + case SRP_TSK_LUN_RESET: + tcm_type = TMR_LUN_RESET; + break; + case SRP_TSK_CLEAR_ACA: + tcm_type = TMR_CLEAR_ACA; + break; + default: + dev_err(&vscsi->dev, "unknown task mgmt func %d\n", + srp_tsk->tsk_mgmt_func); + cmd->se_cmd.se_tmr_req->response = + TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; + rc = -1; + break; + } + + if (!rc) { + cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag); + + spin_lock_bh(&vscsi->intr_lock); + list_add_tail(&cmd->list, &vscsi->active_q); + spin_unlock_bh(&vscsi->intr_lock); + + srp_tsk->lun.scsi_lun[0] &= 0x3f; + + dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n", + srp_tsk->tsk_mgmt_func); + rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL, + scsilun_to_int(&srp_tsk->lun), srp_tsk, + tcm_type, GFP_KERNEL, tag_to_abort, 0); + if (rc) { + dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", + rc); + spin_lock_bh(&vscsi->intr_lock); + list_del(&cmd->list); + spin_unlock_bh(&vscsi->intr_lock); + cmd->se_cmd.se_tmr_req->response = + TMR_FUNCTION_REJECTED; + } + } + + if (rc) + transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0); +} + +static void ibmvscsis_scheduler(struct work_struct *work) +{ + struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd, + work); + struct scsi_info *vscsi = cmd->adapter; + + spin_lock_bh(&vscsi->intr_lock); + + /* Remove from schedule_q */ + list_del(&cmd->list); + + /* Don't submit cmd if we're disconnecting */ + if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) { + ibmvscsis_free_cmd_resources(vscsi, cmd); + + /* ibmvscsis_disconnect might be waiting for us */ + if (list_empty(&vscsi->active_q) && + list_empty(&vscsi->schedule_q) && + (vscsi->flags & WAIT_FOR_IDLE)) { + vscsi->flags &= ~WAIT_FOR_IDLE; + complete(&vscsi->wait_idle); + } + + spin_unlock_bh(&vscsi->intr_lock); + return; + } + + spin_unlock_bh(&vscsi->intr_lock); + + switch (cmd->type) { + case SCSI_CDB: + ibmvscsis_parse_cmd(vscsi, cmd); + break; + case TASK_MANAGEMENT: + ibmvscsis_parse_task(vscsi, cmd); + break; + default: + dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n", + cmd->type); + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_free_cmd_resources(vscsi, cmd); + spin_unlock_bh(&vscsi->intr_lock); + break; + } +} + +static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num) +{ + struct ibmvscsis_cmd *cmd; + int i; + + INIT_LIST_HEAD(&vscsi->free_cmd); + vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd), + GFP_KERNEL); + if (!vscsi->cmd_pool) + return -ENOMEM; + + for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num; + i++, cmd++) { + cmd->abort_cmd = NULL; + cmd->adapter = vscsi; + INIT_WORK(&cmd->work, ibmvscsis_scheduler); + list_add_tail(&cmd->list, &vscsi->free_cmd); + } + + return 0; +} + +static void ibmvscsis_free_cmds(struct scsi_info *vscsi) +{ + kfree(vscsi->cmd_pool); + vscsi->cmd_pool = NULL; + INIT_LIST_HEAD(&vscsi->free_cmd); +} + +/** + * ibmvscsis_service_wait_q() - Service Waiting Queue + * @timer: Pointer to timer which has expired + * + * This routine is called when the timer pops to service the waiting + * queue. Elements on the queue have completed, their responses have been + * copied to the client, but the client's response queue was full so + * the queue message could not be sent. The routine grabs the proper locks + * and calls send messages. + * + * EXECUTION ENVIRONMENT: + * called at interrupt level + */ +static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer) +{ + struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer); + struct scsi_info *vscsi = container_of(p_timer, struct scsi_info, + rsp_q_timer); + + spin_lock_bh(&vscsi->intr_lock); + p_timer->timer_pops += 1; + p_timer->started = false; + ibmvscsis_send_messages(vscsi); + spin_unlock_bh(&vscsi->intr_lock); + + return HRTIMER_NORESTART; +} + +static long ibmvscsis_alloctimer(struct scsi_info *vscsi) +{ + struct timer_cb *p_timer; + + p_timer = &vscsi->rsp_q_timer; + hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + p_timer->timer.function = ibmvscsis_service_wait_q; + p_timer->started = false; + p_timer->timer_pops = 0; + + return ADAPT_SUCCESS; +} + +static void ibmvscsis_freetimer(struct scsi_info *vscsi) +{ + struct timer_cb *p_timer; + + p_timer = &vscsi->rsp_q_timer; + + (void)hrtimer_cancel(&p_timer->timer); + + p_timer->started = false; + p_timer->timer_pops = 0; +} + +static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) +{ + struct scsi_info *vscsi = data; + + vio_disable_interrupts(vscsi->dma_dev); + tasklet_schedule(&vscsi->work_task); + + return IRQ_HANDLED; +} + +/** + * ibmvscsis_enable_change_state() - Set new state based on enabled status + * @vscsi: Pointer to our adapter structure + * + * This function determines our new state now that we are enabled. This + * may involve sending an Init Complete message to the client. + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) +{ + int bytes; + long rc = ADAPT_SUCCESS; + + bytes = vscsi->cmd_q.size * PAGE_SIZE; + rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes); + if (rc == H_CLOSED || rc == H_SUCCESS) { + vscsi->state = WAIT_CONNECTION; + rc = ibmvscsis_establish_new_q(vscsi); + } + + if (rc != ADAPT_SUCCESS) { + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; + } + + return rc; +} + +/** + * ibmvscsis_create_command_q() - Create Command Queue + * @vscsi: Pointer to our adapter structure + * @num_cmds: Currently unused. In the future, may be used to determine + * the size of the CRQ. + * + * Allocates memory for command queue maps remote memory into an ioba + * initializes the command response queue + * + * EXECUTION ENVIRONMENT: + * Process level only + */ +static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) +{ + int pages; + struct vio_dev *vdev = vscsi->dma_dev; + + /* We might support multiple pages in the future, but just 1 for now */ + pages = 1; + + vscsi->cmd_q.size = pages; + + vscsi->cmd_q.base_addr = + (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); + if (!vscsi->cmd_q.base_addr) + return -ENOMEM; + + vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1; + + vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev, + vscsi->cmd_q.base_addr, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) { + free_page((unsigned long)vscsi->cmd_q.base_addr); + return -ENOMEM; + } + + return 0; +} + +/** + * ibmvscsis_destroy_command_q - Destroy Command Queue + * @vscsi: Pointer to our adapter structure + * + * Releases memory for command queue and unmaps mapped remote memory. + * + * EXECUTION ENVIRONMENT: + * Process level only + */ +static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi) +{ + dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token, + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)vscsi->cmd_q.base_addr); + vscsi->cmd_q.base_addr = NULL; + vscsi->state = NO_QUEUE; +} + +static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct se_cmd *se_cmd = &cmd->se_cmd; + struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; + struct scsi_sense_hdr sshdr; + u8 rc = se_cmd->scsi_status; + + if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb))) + if (scsi_normalize_sense(se_cmd->sense_buffer, + se_cmd->scsi_sense_length, &sshdr)) + if (sshdr.sense_key == HARDWARE_ERROR && + (se_cmd->residual_count == 0 || + se_cmd->residual_count == se_cmd->data_length)) { + rc = NO_SENSE; + cmd->flags |= CMD_FAST_FAIL; + } + + return rc; +} + +/** + * srp_build_response() - Build an SRP response buffer + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command for which to send the response + * @len_p: Where to return the length of the IU response sent. This + * is needed to construct the CRQ response. + * + * Build the SRP response buffer and copy it to the client's memory space. + */ +static long srp_build_response(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, uint *len_p) +{ + struct iu_entry *iue = cmd->iue; + struct se_cmd *se_cmd = &cmd->se_cmd; + struct srp_rsp *rsp; + uint len; + u32 rsp_code; + char *data; + u32 *tsk_status; + long rc = ADAPT_SUCCESS; + + spin_lock_bh(&vscsi->intr_lock); + + rsp = &vio_iu(iue)->srp.rsp; + len = sizeof(*rsp); + memset(rsp, 0, len); + data = rsp->data; + + rsp->opcode = SRP_RSP; + + rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); + rsp->tag = cmd->rsp.tag; + rsp->flags = 0; + + if (cmd->type == SCSI_CDB) { + rsp->status = ibmvscsis_fast_fail(vscsi, cmd); + if (rsp->status) { + dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n", + cmd, (int)rsp->status); + ibmvscsis_determine_resid(se_cmd, rsp); + if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) { + rsp->sense_data_len = + cpu_to_be32(se_cmd->scsi_sense_length); + rsp->flags |= SRP_RSP_FLAG_SNSVALID; + len += se_cmd->scsi_sense_length; + memcpy(data, se_cmd->sense_buffer, + se_cmd->scsi_sense_length); + } + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + } else if (cmd->flags & CMD_FAST_FAIL) { + dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n", + cmd); + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + } else { + rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> + SCSOLNT_RESP_SHIFT; + } + } else { + /* this is task management */ + rsp->status = 0; + rsp->resp_data_len = cpu_to_be32(4); + rsp->flags |= SRP_RSP_FLAG_RSPVALID; + + switch (se_cmd->se_tmr_req->response) { + case TMR_FUNCTION_COMPLETE: + case TMR_TASK_DOES_NOT_EXIST: + rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE; + rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> + SCSOLNT_RESP_SHIFT; + break; + case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: + case TMR_LUN_DOES_NOT_EXIST: + rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED; + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + break; + case TMR_FUNCTION_FAILED: + case TMR_FUNCTION_REJECTED: + default: + rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED; + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + break; + } + + tsk_status = (u32 *)data; + *tsk_status = cpu_to_be32(rsp_code); + data = (char *)(tsk_status + 1); + len += 4; + } + + dma_wmb(); + rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(iue->remote_token)); + + switch (rc) { + case H_SUCCESS: + vscsi->credit = 0; + *len_p = len; + break; + case H_PERMISSION: + if (connection_broken(vscsi)) + vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED; + + dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n", + rc, vscsi->flags, vscsi->state); + break; + case H_SOURCE_PARM: + case H_DEST_PARM: + default: + dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n", + rc); + break; + } + + spin_unlock_bh(&vscsi->intr_lock); + + return rc; +} + +static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg, + int nsg, struct srp_direct_buf *md, int nmd, + enum dma_data_direction dir, unsigned int bytes) +{ + struct iu_entry *iue = cmd->iue; + struct srp_target *target = iue->target; + struct scsi_info *vscsi = target->ldata; + struct scatterlist *sgp; + dma_addr_t client_ioba, server_ioba; + ulong buf_len; + ulong client_len, server_len; + int md_idx; + long tx_len; + long rc = 0; + + if (bytes == 0) + return 0; + + sgp = sg; + client_len = 0; + server_len = 0; + md_idx = 0; + tx_len = bytes; + + do { + if (client_len == 0) { + if (md_idx >= nmd) { + dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n"); + rc = -EIO; + break; + } + client_ioba = be64_to_cpu(md[md_idx].va); + client_len = be32_to_cpu(md[md_idx].len); + } + if (server_len == 0) { + if (!sgp) { + dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n"); + rc = -EIO; + break; + } + server_ioba = sg_dma_address(sgp); + server_len = sg_dma_len(sgp); + } + + buf_len = tx_len; + + if (buf_len > client_len) + buf_len = client_len; + + if (buf_len > server_len) + buf_len = server_len; + + if (buf_len > max_vdma_size) + buf_len = max_vdma_size; + + if (dir == DMA_TO_DEVICE) { + /* read from client */ + rc = h_copy_rdma(buf_len, + vscsi->dds.window[REMOTE].liobn, + client_ioba, + vscsi->dds.window[LOCAL].liobn, + server_ioba); + } else { + /* The h_copy_rdma will cause phyp, running in another + * partition, to read memory, so we need to make sure + * the data has been written out, hence these syncs. + */ + /* ensure that everything is in memory */ + isync(); + /* ensure that memory has been made visible */ + dma_wmb(); + rc = h_copy_rdma(buf_len, + vscsi->dds.window[LOCAL].liobn, + server_ioba, + vscsi->dds.window[REMOTE].liobn, + client_ioba); + } + switch (rc) { + case H_SUCCESS: + break; + case H_PERMISSION: + case H_SOURCE_PARM: + case H_DEST_PARM: + if (connection_broken(vscsi)) { + spin_lock_bh(&vscsi->intr_lock); + vscsi->flags |= + (RESPONSE_Q_DOWN | CLIENT_FAILED); + spin_unlock_bh(&vscsi->intr_lock); + } + dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n", + rc); + break; + + default: + dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n", + rc); + break; + } + + if (!rc) { + tx_len -= buf_len; + if (tx_len) { + client_len -= buf_len; + if (client_len == 0) + md_idx++; + else + client_ioba += buf_len; + + server_len -= buf_len; + if (server_len == 0) + sgp = sg_next(sgp); + else + server_ioba += buf_len; + } else { + break; + } + } + } while (!rc); + + return rc; +} + +/** + * ibmvscsis_handle_crq() - Handle CRQ + * @data: Pointer to our adapter structure + * + * Read the command elements from the command queue and copy the payloads + * associated with the command elements to local memory and execute the + * SRP requests. + * + * Note: this is an edge triggered interrupt. It can not be shared. + */ +static void ibmvscsis_handle_crq(unsigned long data) +{ + struct scsi_info *vscsi = (struct scsi_info *)data; + struct viosrp_crq *crq; + long rc; + bool ack = true; + volatile u8 valid; + + spin_lock_bh(&vscsi->intr_lock); + + dev_dbg(&vscsi->dev, "got interrupt\n"); + + /* + * if we are in a path where we are waiting for all pending commands + * to complete because we received a transport event and anything in + * the command queue is for a new connection, do nothing + */ + if (TARGET_STOP(vscsi)) { + vio_enable_interrupts(vscsi->dma_dev); + + dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + spin_unlock_bh(&vscsi->intr_lock); + return; + } + + rc = vscsi->flags & SCHEDULE_DISCONNECT; + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + + while (valid) { + /* + * These are edege triggered interrupts. After dropping out of + * the while loop, the code must check for work since an + * interrupt could be lost, and an elment be left on the queue, + * hence the label. + */ +cmd_work: + vscsi->cmd_q.index = + (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; + + if (!rc) { + rc = ibmvscsis_parse_command(vscsi, crq); + } else { + if ((uint)crq->valid == VALID_TRANS_EVENT) { + /* + * must service the transport layer events even + * in an error state, dont break out until all + * the consecutive transport events have been + * processed + */ + rc = ibmvscsis_trans_event(vscsi, crq); + } else if (vscsi->flags & TRANS_EVENT) { + /* + * if a transport event has occurred leave + * everything but transport events on the queue + * + * need to decrement the queue index so we can + * look at the element again + */ + if (vscsi->cmd_q.index) + vscsi->cmd_q.index -= 1; + else + /* + * index is at 0 it just wrapped. + * have it index last element in q + */ + vscsi->cmd_q.index = vscsi->cmd_q.mask; + break; + } + } + + crq->valid = INVALIDATE_CMD_RESP_EL; + + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + } + + if (!rc) { + if (ack) { + vio_enable_interrupts(vscsi->dma_dev); + ack = false; + dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n"); + } + valid = crq->valid; + dma_rmb(); + if (valid) + goto cmd_work; + } else { + dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n", + vscsi->flags, vscsi->state, vscsi->cmd_q.index); + } + + dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n", + (int)list_empty(&vscsi->schedule_q), vscsi->flags, + vscsi->state); + + spin_unlock_bh(&vscsi->intr_lock); +} + +static int ibmvscsis_probe(struct vio_dev *vdev, + const struct vio_device_id *id) +{ + struct scsi_info *vscsi; + int rc = 0; + long hrc = 0; + char wq_name[24]; + + vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); + if (!vscsi) { + rc = -ENOMEM; + dev_err(&vdev->dev, "probe: allocation of adapter failed\n"); + return rc; + } + + vscsi->dma_dev = vdev; + vscsi->dev = vdev->dev; + INIT_LIST_HEAD(&vscsi->schedule_q); + INIT_LIST_HEAD(&vscsi->waiting_rsp); + INIT_LIST_HEAD(&vscsi->active_q); + + snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", + dev_name(&vdev->dev)); + + dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name); + + rc = read_dma_window(vscsi); + if (rc) + goto free_adapter; + dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n", + vscsi->dds.window[LOCAL].liobn, + vscsi->dds.window[REMOTE].liobn); + + snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name); + + vscsi->dds.unit_id = vdev->unit_address; + strscpy(vscsi->dds.partition_name, partition_name, + sizeof(vscsi->dds.partition_name)); + vscsi->dds.partition_num = partition_number; + + spin_lock_bh(&ibmvscsis_dev_lock); + list_add_tail(&vscsi->list, &ibmvscsis_dev_list); + spin_unlock_bh(&ibmvscsis_dev_lock); + + /* + * TBD: How do we determine # of cmds to request? Do we know how + * many "children" we have? + */ + vscsi->request_limit = INITIAL_SRP_LIMIT; + rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit, + SRP_MAX_IU_LEN); + if (rc) + goto rem_list; + + vscsi->target.ldata = vscsi; + + rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit); + if (rc) { + dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n", + rc, vscsi->request_limit); + goto free_target; + } + + /* + * Note: the lock is used in freeing timers, so must initialize + * first so that ordering in case of error is correct. + */ + spin_lock_init(&vscsi->intr_lock); + + rc = ibmvscsis_alloctimer(vscsi); + if (rc) { + dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc); + goto free_cmds; + } + + rc = ibmvscsis_create_command_q(vscsi, 256); + if (rc) { + dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n", + rc); + goto free_timer; + } + + vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!vscsi->map_buf) { + rc = -ENOMEM; + dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n"); + goto destroy_queue; + } + + vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) { + rc = -ENOMEM; + dev_err(&vscsi->dev, "probe: error mapping command buffer\n"); + goto free_buf; + } + + hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, + (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, + 0); + if (hrc == H_SUCCESS) + vscsi->client_data.partition_number = + be64_to_cpu(*(u64 *)vscsi->map_buf); + /* + * We expect the VIOCTL to fail if we're configured as "any + * client can connect" and the client isn't activated yet. + * We'll make the call again when he sends an init msg. + */ + dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n", + hrc, vscsi->client_data.partition_number); + + tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq, + (unsigned long)vscsi); + + init_completion(&vscsi->wait_idle); + init_completion(&vscsi->unconfig); + + snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); + vscsi->work_q = create_workqueue(wq_name); + if (!vscsi->work_q) { + rc = -ENOMEM; + dev_err(&vscsi->dev, "create_workqueue failed\n"); + goto unmap_buf; + } + + rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi); + if (rc) { + rc = -EPERM; + dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc); + goto destroy_WQ; + } + + vscsi->state = WAIT_ENABLED; + + dev_set_drvdata(&vdev->dev, vscsi); + + return 0; + +destroy_WQ: + destroy_workqueue(vscsi->work_q); +unmap_buf: + dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, + DMA_BIDIRECTIONAL); +free_buf: + kfree(vscsi->map_buf); +destroy_queue: + tasklet_kill(&vscsi->work_task); + ibmvscsis_unregister_command_q(vscsi); + ibmvscsis_destroy_command_q(vscsi); +free_timer: + ibmvscsis_freetimer(vscsi); +free_cmds: + ibmvscsis_free_cmds(vscsi); +free_target: + srp_target_free(&vscsi->target); +rem_list: + spin_lock_bh(&ibmvscsis_dev_lock); + list_del(&vscsi->list); + spin_unlock_bh(&ibmvscsis_dev_lock); +free_adapter: + kfree(vscsi); + + return rc; +} + +static void ibmvscsis_remove(struct vio_dev *vdev) +{ + struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev); + + dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); + + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); + vscsi->flags |= CFG_SLEEPING; + spin_unlock_bh(&vscsi->intr_lock); + wait_for_completion(&vscsi->unconfig); + + vio_disable_interrupts(vdev); + free_irq(vdev->irq, vscsi); + destroy_workqueue(vscsi->work_q); + dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, + DMA_BIDIRECTIONAL); + kfree(vscsi->map_buf); + tasklet_kill(&vscsi->work_task); + ibmvscsis_destroy_command_q(vscsi); + ibmvscsis_freetimer(vscsi); + ibmvscsis_free_cmds(vscsi); + srp_target_free(&vscsi->target); + spin_lock_bh(&ibmvscsis_dev_lock); + list_del(&vscsi->list); + spin_unlock_bh(&ibmvscsis_dev_lock); + kfree(vscsi); +} + +static ssize_t system_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", system_id); +} + +static ssize_t partition_number_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%x\n", partition_number); +} + +static ssize_t unit_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev); + + return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address); +} + +static int ibmvscsis_get_system_info(void) +{ + struct device_node *rootdn, *vdevdn; + const char *id, *model, *name; + const uint *num; + + rootdn = of_find_node_by_path("/"); + if (!rootdn) + return -ENOENT; + + model = of_get_property(rootdn, "model", NULL); + id = of_get_property(rootdn, "system-id", NULL); + if (model && id) + snprintf(system_id, sizeof(system_id), "%s-%s", model, id); + + name = of_get_property(rootdn, "ibm,partition-name", NULL); + if (name) + strncpy(partition_name, name, sizeof(partition_name)); + + num = of_get_property(rootdn, "ibm,partition-no", NULL); + if (num) + partition_number = of_read_number(num, 1); + + of_node_put(rootdn); + + vdevdn = of_find_node_by_path("/vdevice"); + if (vdevdn) { + const uint *mvds; + + mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size", + NULL); + if (mvds) + max_vdma_size = *mvds; + of_node_put(vdevdn); + } + + return 0; +} + +static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct ibmvscsis_tport *tport = + container_of(se_tpg, struct ibmvscsis_tport, se_tpg); + + return tport->tport_name; +} + +static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg) +{ + struct ibmvscsis_tport *tport = + container_of(se_tpg, struct ibmvscsis_tport, se_tpg); + + return tport->tport_tpgt; +} + +static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int ibmvscsis_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd) +{ + return target_put_sess_cmd(se_cmd); +} + +static void ibmvscsis_release_cmd(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + + spin_lock_bh(&vscsi->intr_lock); + /* Remove from active_q */ + list_move_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + spin_unlock_bh(&vscsi->intr_lock); +} + +static int ibmvscsis_write_pending(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + struct iu_entry *iue = cmd->iue; + int rc; + + /* + * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success + * since LIO can't do anything about it, and we dont want to + * attempt an srp_transfer_data. + */ + if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { + dev_err(&vscsi->dev, "write_pending failed since: %d\n", + vscsi->flags); + return -EIO; + + } + + rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, + 1, 1); + if (rc) { + dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc); + return -EIO; + } + /* + * We now tell TCM to add this WRITE CDB directly into the TCM storage + * object execution queue. + */ + target_execute_cmd(se_cmd); + return 0; +} + +static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct iu_entry *iue = cmd->iue; + struct scsi_info *vscsi = cmd->adapter; + uint len = 0; + int rc; + + rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, + 1); + if (rc) { + dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc); + se_cmd->scsi_sense_length = 18; + memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); + /* Logical Unit Communication Time-out asc/ascq = 0x0801 */ + scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR, + 0x08, 0x01); + } + + srp_build_response(vscsi, cmd, &len); + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.len = len; + + return 0; +} + +static int ibmvscsis_queue_status(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + uint len; + + dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd); + + srp_build_response(vscsi, cmd, &len); + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.len = len; + + return 0; +} + +static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + struct ibmvscsis_cmd *cmd_itr; + struct iu_entry *iue = iue = cmd->iue; + struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; + u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); + uint len; + + dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n", + se_cmd, (int)se_cmd->se_tmr_req->response); + + if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK && + cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) { + spin_lock_bh(&vscsi->intr_lock); + list_for_each_entry(cmd_itr, &vscsi->active_q, list) { + if (tag_to_abort == cmd_itr->se_cmd.tag) { + cmd_itr->abort_cmd = cmd; + cmd->flags |= DELAY_SEND; + break; + } + } + spin_unlock_bh(&vscsi->intr_lock); + } + + srp_build_response(vscsi, cmd, &len); + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.len = len; +} + +static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + + dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n", + se_cmd, se_cmd->tag); +} + +static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct ibmvscsis_tport *tport; + struct scsi_info *vscsi; + + tport = ibmvscsis_lookup_port(name); + if (tport) { + vscsi = container_of(tport, struct scsi_info, tport); + tport->tport_proto_id = SCSI_PROTOCOL_SRP; + dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n", + name, tport, tport->tport_proto_id); + return &tport->tport_wwn; + } + + return ERR_PTR(-EINVAL); +} + +static void ibmvscsis_drop_tport(struct se_wwn *wwn) +{ + struct ibmvscsis_tport *tport = container_of(wwn, + struct ibmvscsis_tport, + tport_wwn); + struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); + + dev_dbg(&vscsi->dev, "drop_tport(%s)\n", + config_item_name(&tport->tport_wwn.wwn_group.cg_item)); +} + +static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, + const char *name) +{ + struct ibmvscsis_tport *tport = + container_of(wwn, struct ibmvscsis_tport, tport_wwn); + u16 tpgt; + int rc; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + rc = kstrtou16(name + 5, 0, &tpgt); + if (rc) + return ERR_PTR(rc); + tport->tport_tpgt = tpgt; + + tport->releasing = false; + + rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg, + tport->tport_proto_id); + if (rc) + return ERR_PTR(rc); + + return &tport->se_tpg; +} + +static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg) +{ + struct ibmvscsis_tport *tport = container_of(se_tpg, + struct ibmvscsis_tport, + se_tpg); + + tport->releasing = true; + tport->enabled = false; + + /* + * Release the virtual I_T Nexus for this ibmvscsis TPG + */ + ibmvscsis_drop_nexus(tport); + /* + * Deregister the se_tpg from TCM.. + */ + core_tpg_deregister(se_tpg); +} + +static ssize_t ibmvscsis_wwn_version_show(struct config_item *item, + char *page) +{ + return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION); +} +CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version); + +static struct configfs_attribute *ibmvscsis_wwn_attrs[] = { + &ibmvscsis_wwn_attr_version, + NULL, +}; + + +static int ibmvscsis_enable_tpg(struct se_portal_group *se_tpg, bool enable) +{ + struct ibmvscsis_tport *tport = container_of(se_tpg, + struct ibmvscsis_tport, + se_tpg); + struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); + long lrc; + + if (enable) { + spin_lock_bh(&vscsi->intr_lock); + tport->enabled = true; + lrc = ibmvscsis_enable_change_state(vscsi); + if (lrc) + dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n", + lrc, vscsi->state); + spin_unlock_bh(&vscsi->intr_lock); + } else { + spin_lock_bh(&vscsi->intr_lock); + tport->enabled = false; + /* This simulates the server going down */ + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + spin_unlock_bh(&vscsi->intr_lock); + } + + return 0; +} + +static const struct target_core_fabric_ops ibmvscsis_ops = { + .module = THIS_MODULE, + .fabric_name = "ibmvscsis", + .max_data_sg_nents = MAX_TXU / PAGE_SIZE, + .tpg_get_wwn = ibmvscsis_get_fabric_wwn, + .tpg_get_tag = ibmvscsis_get_tag, + .tpg_get_default_depth = ibmvscsis_get_default_depth, + .tpg_check_demo_mode = ibmvscsis_check_true, + .tpg_check_demo_mode_cache = ibmvscsis_check_true, + .check_stop_free = ibmvscsis_check_stop_free, + .release_cmd = ibmvscsis_release_cmd, + .write_pending = ibmvscsis_write_pending, + .queue_data_in = ibmvscsis_queue_data_in, + .queue_status = ibmvscsis_queue_status, + .queue_tm_rsp = ibmvscsis_queue_tm_rsp, + .aborted_task = ibmvscsis_aborted_task, + /* + * Setup function pointers for logic in target_core_fabric_configfs.c + */ + .fabric_make_wwn = ibmvscsis_make_tport, + .fabric_drop_wwn = ibmvscsis_drop_tport, + .fabric_make_tpg = ibmvscsis_make_tpg, + .fabric_enable_tpg = ibmvscsis_enable_tpg, + .fabric_drop_tpg = ibmvscsis_drop_tpg, + + .tfc_wwn_attrs = ibmvscsis_wwn_attrs, +}; + +static void ibmvscsis_dev_release(struct device *dev) {}; + +static struct device_attribute dev_attr_system_id = + __ATTR(system_id, S_IRUGO, system_id_show, NULL); + +static struct device_attribute dev_attr_partition_number = + __ATTR(partition_number, S_IRUGO, partition_number_show, NULL); + +static struct device_attribute dev_attr_unit_address = + __ATTR(unit_address, S_IRUGO, unit_address_show, NULL); + +static struct attribute *ibmvscsis_dev_attrs[] = { + &dev_attr_system_id.attr, + &dev_attr_partition_number.attr, + &dev_attr_unit_address.attr, +}; +ATTRIBUTE_GROUPS(ibmvscsis_dev); + +static struct class ibmvscsis_class = { + .name = "ibmvscsis", + .dev_release = ibmvscsis_dev_release, + .dev_groups = ibmvscsis_dev_groups, +}; + +static const struct vio_device_id ibmvscsis_device_table[] = { + { "v-scsi-host", "IBM,v-scsi-host" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table); + +static struct vio_driver ibmvscsis_driver = { + .name = "ibmvscsis", + .id_table = ibmvscsis_device_table, + .probe = ibmvscsis_probe, + .remove = ibmvscsis_remove, +}; + +/* + * ibmvscsis_init() - Kernel Module initialization + * + * Note: vio_register_driver() registers callback functions, and at least one + * of those callback functions calls TCM - Linux IO Target Subsystem, thus + * the SCSI Target template must be registered before vio_register_driver() + * is called. + */ +static int __init ibmvscsis_init(void) +{ + int rc = 0; + + rc = ibmvscsis_get_system_info(); + if (rc) { + pr_err("rc %d from get_system_info\n", rc); + goto out; + } + + rc = class_register(&ibmvscsis_class); + if (rc) { + pr_err("failed class register\n"); + goto out; + } + + rc = target_register_template(&ibmvscsis_ops); + if (rc) { + pr_err("rc %d from target_register_template\n", rc); + goto unregister_class; + } + + rc = vio_register_driver(&ibmvscsis_driver); + if (rc) { + pr_err("rc %d from vio_register_driver\n", rc); + goto unregister_target; + } + + return 0; + +unregister_target: + target_unregister_template(&ibmvscsis_ops); +unregister_class: + class_unregister(&ibmvscsis_class); +out: + return rc; +} + +static void __exit ibmvscsis_exit(void) +{ + pr_info("Unregister IBM virtual SCSI host driver\n"); + vio_unregister_driver(&ibmvscsis_driver); + target_unregister_template(&ibmvscsis_ops); + class_unregister(&ibmvscsis_class); +} + +MODULE_DESCRIPTION("IBMVSCSIS fabric driver"); +MODULE_AUTHOR("Bryant G. Ly and Michael Cyr"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IBMVSCSIS_VERSION); +module_init(ibmvscsis_init); +module_exit(ibmvscsis_exit); diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h new file mode 100644 index 000000000..7ae074e5d --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * IBM Virtual SCSI Target Driver + * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. + * Santiago Leon (santil@us.ibm.com) IBM Corp. + * Linda Xie (lxie@us.ibm.com) IBM Corp. + * + * Copyright (C) 2005-2011 FUJITA Tomonori + * Copyright (C) 2010 Nicholas A. Bellinger + * Copyright (C) 2016 Bryant G. Ly IBM Corp. + * + * Authors: Bryant G. Ly + * Authors: Michael Cyr + * + ****************************************************************************/ + +#ifndef __H_IBMVSCSI_TGT +#define __H_IBMVSCSI_TGT + +#include +#include "libsrp.h" + +#define SYS_ID_NAME_LEN 64 +#define PARTITION_NAMELEN 96 +#define IBMVSCSIS_NAMELEN 32 + +#define MSG_HI 0 +#define MSG_LOW 1 + +#define MAX_CMD_Q_PAGES 4 +#define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq)) +/* in terms of number of elements */ +#define DEFAULT_CMD_Q_SIZE CRQ_PER_PAGE +#define MAX_CMD_Q_SIZE (DEFAULT_CMD_Q_SIZE * MAX_CMD_Q_PAGES) + +#define SRP_VIOLATION 0x102 /* general error code */ + +/* + * SRP buffer formats defined as of 16.a supported by this driver. + */ +#define SUPPORTED_FORMATS ((SRP_DATA_DESC_DIRECT << 1) | \ + (SRP_DATA_DESC_INDIRECT << 1)) + +#define SCSI_LUN_ADDR_METHOD_FLAT 1 + +struct dma_window { + u32 liobn; /* Unique per vdevice */ + u64 tce_base; /* Physical location of the TCE table */ + u64 tce_size; /* Size of the TCE table in bytes */ +}; + +struct target_dds { + u64 unit_id; /* 64 bit will force alignment */ +#define NUM_DMA_WINDOWS 2 +#define LOCAL 0 +#define REMOTE 1 + struct dma_window window[NUM_DMA_WINDOWS]; + + /* root node property "ibm,partition-no" */ + uint partition_num; + char partition_name[PARTITION_NAMELEN]; +}; + +#define MAX_NUM_PORTS 1 +#define MAX_H_COPY_RDMA (128 * 1024) + +#define MAX_EYE 64 + +/* Return codes */ +#define ADAPT_SUCCESS 0L +/* choose error codes that do not conflict with PHYP */ +#define ERROR -40L + +struct format_code { + u8 reserved; + u8 buffers; +}; + +struct client_info { +#define SRP_VERSION "16.a" + char srp_version[8]; + /* root node property ibm,partition-name */ + char partition_name[PARTITION_NAMELEN]; + /* root node property ibm,partition-no */ + u32 partition_number; + /* initially 1 */ + u32 mad_version; + u32 os_type; +}; + +/* + * Changing this constant changes the number of seconds to wait before + * considering the client will never service its queue again. + */ +#define SECONDS_TO_CONSIDER_FAILED 30 +/* + * These constants set the polling period used to determine if the client + * has freed at least one element in the response queue. + */ +#define WAIT_SECONDS 1 +#define WAIT_NANO_SECONDS 5000 +#define MAX_TIMER_POPS ((1000000 / WAIT_NANO_SECONDS) * \ + SECONDS_TO_CONSIDER_FAILED) +/* + * general purpose timer control block + * which can be used for multiple functions + */ +struct timer_cb { + struct hrtimer timer; + /* + * how long has it been since the client + * serviced the queue. The variable is incrmented + * in the service_wait_q routine and cleared + * in send messages + */ + int timer_pops; + /* the timer is started */ + bool started; +}; + +struct cmd_queue { + /* kva */ + struct viosrp_crq *base_addr; + dma_addr_t crq_token; + /* used to maintain index */ + uint mask; + /* current element */ + uint index; + int size; +}; + +#define SCSOLNT_RESP_SHIFT 1 +#define UCSOLNT_RESP_SHIFT 2 + +#define SCSOLNT BIT(SCSOLNT_RESP_SHIFT) +#define UCSOLNT BIT(UCSOLNT_RESP_SHIFT) + +enum cmd_type { + SCSI_CDB = 0x01, + TASK_MANAGEMENT = 0x02, + /* MAD or addressed to port 0 */ + ADAPTER_MAD = 0x04, + UNSET_TYPE = 0x08, +}; + +struct iu_rsp { + u8 format; + u8 sol_not; + u16 len; + /* tag is just to help client identify cmd, so don't translate be/le */ + u64 tag; +}; + +struct ibmvscsis_cmd { + struct list_head list; + /* Used for TCM Core operations */ + struct se_cmd se_cmd; + struct iu_entry *iue; + struct iu_rsp rsp; + struct work_struct work; + struct scsi_info *adapter; + struct ibmvscsis_cmd *abort_cmd; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char sense_buf[TRANSPORT_SENSE_BUFFER]; + u64 init_time; +#define CMD_FAST_FAIL BIT(0) +#define DELAY_SEND BIT(1) + u32 flags; + char type; +}; + +struct ibmvscsis_nexus { + struct se_session *se_sess; +}; + +struct ibmvscsis_tport { + /* SCSI protocol the tport is providing */ + u8 tport_proto_id; + /* ASCII formatted WWPN for SRP Target port */ + char tport_name[IBMVSCSIS_NAMELEN]; + /* Returned by ibmvscsis_make_tport() */ + struct se_wwn tport_wwn; + /* Returned by ibmvscsis_make_tpg() */ + struct se_portal_group se_tpg; + /* ibmvscsis port target portal group tag for TCM */ + u16 tport_tpgt; + /* Pointer to TCM session for I_T Nexus */ + struct ibmvscsis_nexus *ibmv_nexus; + bool enabled; + bool releasing; +}; + +struct scsi_info { + struct list_head list; + char eye[MAX_EYE]; + + /* commands waiting for space on repsonse queue */ + struct list_head waiting_rsp; +#define NO_QUEUE 0x00 +#define WAIT_ENABLED 0X01 +#define WAIT_CONNECTION 0x04 + /* have established a connection */ +#define CONNECTED 0x08 + /* at least one port is processing SRP IU */ +#define SRP_PROCESSING 0x10 + /* remove request received */ +#define UNCONFIGURING 0x20 + /* disconnect by letting adapter go idle, no error */ +#define WAIT_IDLE 0x40 + /* disconnecting to clear an error */ +#define ERR_DISCONNECT 0x80 + /* disconnect to clear error state, then come back up */ +#define ERR_DISCONNECT_RECONNECT 0x100 + /* disconnected after clearing an error */ +#define ERR_DISCONNECTED 0x200 + /* A series of errors caused unexpected errors */ +#define UNDEFINED 0x400 + u16 state; + int fast_fail; + struct target_dds dds; + char *cmd_pool; + /* list of free commands */ + struct list_head free_cmd; + /* command elements ready for scheduler */ + struct list_head schedule_q; + /* commands sent to TCM */ + struct list_head active_q; + caddr_t *map_buf; + /* ioba of map buffer */ + dma_addr_t map_ioba; + /* allowable number of outstanding SRP requests */ + int request_limit; + /* extra credit */ + int credit; + /* outstanding transactions against credit limit */ + int debit; + + /* allow only one outstanding mad request */ +#define PROCESSING_MAD 0x00002 + /* Waiting to go idle */ +#define WAIT_FOR_IDLE 0x00004 + /* H_REG_CRQ called */ +#define CRQ_CLOSED 0x00010 + /* detected that client has failed */ +#define CLIENT_FAILED 0x00040 + /* detected that transport event occurred */ +#define TRANS_EVENT 0x00080 + /* don't attempt to send anything to the client */ +#define RESPONSE_Q_DOWN 0x00100 + /* request made to schedule disconnect handler */ +#define SCHEDULE_DISCONNECT 0x00400 + /* disconnect handler is scheduled */ +#define DISCONNECT_SCHEDULED 0x00800 + /* remove function is sleeping */ +#define CFG_SLEEPING 0x01000 + /* Register for Prepare for Suspend Transport Events */ +#define PREP_FOR_SUSPEND_ENABLED 0x02000 + /* Prepare for Suspend event sent */ +#define PREP_FOR_SUSPEND_PENDING 0x04000 + /* Resume from Suspend event sent */ +#define PREP_FOR_SUSPEND_ABORTED 0x08000 + /* Prepare for Suspend event overwrote another CRQ entry */ +#define PREP_FOR_SUSPEND_OVERWRITE 0x10000 + u32 flags; + /* adapter lock */ + spinlock_t intr_lock; + /* information needed to manage command queue */ + struct cmd_queue cmd_q; + /* used in hcall to copy response back into srp buffer */ + u64 empty_iu_id; + /* used in crq, to tag what iu the response is for */ + u64 empty_iu_tag; + uint new_state; + uint resume_state; + /* control block for the response queue timer */ + struct timer_cb rsp_q_timer; + /* keep last client to enable proper accounting */ + struct client_info client_data; + /* what can this client do */ + u32 client_cap; + /* + * The following two fields capture state and flag changes that + * can occur when the lock is given up. In the orginal design, + * the lock was held during calls into phyp; + * however, phyp did not meet PAPR architecture. This is + * a work around. + */ + u16 phyp_acr_state; + u32 phyp_acr_flags; + + struct workqueue_struct *work_q; + struct completion wait_idle; + struct completion unconfig; + struct device dev; + struct vio_dev *dma_dev; + struct srp_target target; + struct ibmvscsis_tport tport; + struct tasklet_struct work_task; + struct work_struct proc_work; +}; + +/* + * Provide a constant that allows software to detect the adapter is + * disconnecting from the client from one of several states. + */ +#define IS_DISCONNECTING (UNCONFIGURING | ERR_DISCONNECT_RECONNECT | \ + ERR_DISCONNECT) + +/* + * Provide a constant that can be used with interrupt handling that + * essentially lets the interrupt handler know that all requests should + * be thrown out, + */ +#define DONT_PROCESS_STATE (IS_DISCONNECTING | UNDEFINED | \ + ERR_DISCONNECTED | WAIT_IDLE) + +/* + * If any of these flag bits are set then do not allow the interrupt + * handler to schedule the off level handler. + */ +#define BLOCK (DISCONNECT_SCHEDULED) + +/* State and transition events that stop the interrupt handler */ +#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \ + ((VSCSI)->flags & BLOCK)) + +#define PREP_FOR_SUSPEND_FLAGS (PREP_FOR_SUSPEND_ENABLED | \ + PREP_FOR_SUSPEND_PENDING | \ + PREP_FOR_SUSPEND_ABORTED | \ + PREP_FOR_SUSPEND_OVERWRITE) + +/* flag bit that are not reset during disconnect */ +#define PRESERVE_FLAG_FIELDS (PREP_FOR_SUSPEND_FLAGS) + +#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf)) + +#define READ_CMD(cdb) (((cdb)[0] & 0x1F) == 8) +#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA) + +#ifndef H_GET_PARTNER_INFO +#define H_GET_PARTNER_INFO 0x0000000000000008LL +#endif +#ifndef H_ENABLE_PREPARE_FOR_SUSPEND +#define H_ENABLE_PREPARE_FOR_SUSPEND 0x000000000000001DLL +#endif +#ifndef H_READY_FOR_SUSPEND +#define H_READY_FOR_SUSPEND 0x000000000000001ELL +#endif + + +#define h_copy_rdma(l, sa, sb, da, db) \ + plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) +#define h_vioctl(u, o, a, u1, u2, u3, u4) \ + plpar_hcall_norets(H_VIOCTL, u, o, a, u1, u2) +#define h_reg_crq(ua, tok, sz) \ + plpar_hcall_norets(H_REG_CRQ, ua, tok, sz) +#define h_free_crq(ua) \ + plpar_hcall_norets(H_FREE_CRQ, ua) +#define h_send_crq(ua, d1, d2) \ + plpar_hcall_norets(H_SEND_CRQ, ua, d1, d2) + +#endif diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c new file mode 100644 index 000000000..8a0e28aec --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * SCSI RDMA Protocol lib functions + * + * Copyright (C) 2006 FUJITA Tomonori + * Copyright (C) 2016 Bryant G. Ly IBM Corp. + * + ***********************************************************************/ + +#define pr_fmt(fmt) "libsrp: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libsrp.h" +#include "ibmvscsi_tgt.h" + +static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, + struct srp_buf **ring) +{ + struct iu_entry *iue; + int i; + + q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); + if (!q->pool) + return -ENOMEM; + q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); + if (!q->items) + goto free_pool; + + spin_lock_init(&q->lock); + kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *)); + + for (i = 0, iue = q->items; i < max; i++) { + kfifo_in(&q->queue, (void *)&iue, sizeof(void *)); + iue->sbuf = ring[i]; + iue++; + } + return 0; + +free_pool: + kfree(q->pool); + return -ENOMEM; +} + +static void srp_iu_pool_free(struct srp_queue *q) +{ + kfree(q->items); + kfree(q->pool); +} + +static struct srp_buf **srp_ring_alloc(struct device *dev, + size_t max, size_t size) +{ + struct srp_buf **ring; + int i; + + ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL); + if (!ring) + return NULL; + + for (i = 0; i < max; i++) { + ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL); + if (!ring[i]) + goto out; + ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma, + GFP_KERNEL); + if (!ring[i]->buf) + goto out; + } + return ring; + +out: + for (i = 0; i < max && ring[i]; i++) { + if (ring[i]->buf) { + dma_free_coherent(dev, size, ring[i]->buf, + ring[i]->dma); + } + kfree(ring[i]); + } + kfree(ring); + + return NULL; +} + +static void srp_ring_free(struct device *dev, struct srp_buf **ring, + size_t max, size_t size) +{ + int i; + + for (i = 0; i < max; i++) { + dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); + kfree(ring[i]); + } + kfree(ring); +} + +int srp_target_alloc(struct srp_target *target, struct device *dev, + size_t nr, size_t iu_size) +{ + int err; + + spin_lock_init(&target->lock); + + target->dev = dev; + + target->srp_iu_size = iu_size; + target->rx_ring_size = nr; + target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size); + if (!target->rx_ring) + return -ENOMEM; + err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring); + if (err) + goto free_ring; + + dev_set_drvdata(target->dev, target); + return 0; + +free_ring: + srp_ring_free(target->dev, target->rx_ring, nr, iu_size); + return -ENOMEM; +} + +void srp_target_free(struct srp_target *target) +{ + dev_set_drvdata(target->dev, NULL); + srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size, + target->srp_iu_size); + srp_iu_pool_free(&target->iu_queue); +} + +struct iu_entry *srp_iu_get(struct srp_target *target) +{ + struct iu_entry *iue = NULL; + + if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue, + sizeof(void *), + &target->iu_queue.lock) != sizeof(void *)) { + WARN_ONCE(1, "unexpected fifo state"); + return NULL; + } + if (!iue) + return iue; + iue->target = target; + iue->flags = 0; + return iue; +} + +void srp_iu_put(struct iu_entry *iue) +{ + kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue, + sizeof(void *), &iue->target->iu_queue.lock); +} + +static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md, + enum dma_data_direction dir, srp_rdma_t rdma_io, + int dma_map, int ext_desc) +{ + struct iu_entry *iue = NULL; + struct scatterlist *sg = NULL; + int err, nsg = 0, len; + + if (dma_map) { + iue = cmd->iue; + sg = cmd->se_cmd.t_data_sg; + nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, + DMA_BIDIRECTIONAL); + if (!nsg) { + pr_err("fail to map %p %d\n", iue, + cmd->se_cmd.t_data_nents); + return 0; + } + len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len)); + } else { + len = be32_to_cpu(md->len); + } + + err = rdma_io(cmd, sg, nsg, md, 1, dir, len); + + if (dma_map) + dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); + + return err; +} + +static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, + struct srp_indirect_buf *id, + enum dma_data_direction dir, srp_rdma_t rdma_io, + int dma_map, int ext_desc) +{ + struct iu_entry *iue = NULL; + struct srp_direct_buf *md = NULL; + struct scatterlist dummy, *sg = NULL; + dma_addr_t token = 0; + int err = 0; + int nmd, nsg = 0, len; + + if (dma_map || ext_desc) { + iue = cmd->iue; + sg = cmd->se_cmd.t_data_sg; + } + + nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf); + + if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) || + (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) { + md = &id->desc_list[0]; + goto rdma; + } + + if (ext_desc && dma_map) { + md = dma_alloc_coherent(iue->target->dev, + be32_to_cpu(id->table_desc.len), + &token, GFP_KERNEL); + if (!md) { + pr_err("Can't get dma memory %u\n", + be32_to_cpu(id->table_desc.len)); + return -ENOMEM; + } + + sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len)); + sg_dma_address(&dummy) = token; + sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len); + err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, + be32_to_cpu(id->table_desc.len)); + if (err) { + pr_err("Error copying indirect table %d\n", err); + goto free_mem; + } + } else { + pr_err("This command uses external indirect buffer\n"); + return -EINVAL; + } + +rdma: + if (dma_map) { + nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, + DMA_BIDIRECTIONAL); + if (!nsg) { + pr_err("fail to map %p %d\n", iue, + cmd->se_cmd.t_data_nents); + err = -EIO; + goto free_mem; + } + len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len)); + } else { + len = be32_to_cpu(id->len); + } + + err = rdma_io(cmd, sg, nsg, md, nmd, dir, len); + + if (dma_map) + dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); + +free_mem: + if (token && dma_map) { + dma_free_coherent(iue->target->dev, + be32_to_cpu(id->table_desc.len), md, token); + } + return err; +} + +static int data_out_desc_size(struct srp_cmd *cmd) +{ + int size = 0; + u8 fmt = cmd->buf_fmt >> 4; + + switch (fmt) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + size = sizeof(struct srp_direct_buf); + break; + case SRP_DATA_DESC_INDIRECT: + size = sizeof(struct srp_indirect_buf) + + sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt; + break; + default: + pr_err("client error. Invalid data_out_format %x\n", fmt); + break; + } + return size; +} + +/* + * TODO: this can be called multiple times for a single command if it + * has very long data. + */ +int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, + srp_rdma_t rdma_io, int dma_map, int ext_desc) +{ + struct srp_direct_buf *md; + struct srp_indirect_buf *id; + enum dma_data_direction dir; + int offset, err = 0; + u8 format; + + if (!cmd->se_cmd.t_data_nents) + return 0; + + offset = srp_cmd->add_cdb_len & ~3; + + dir = srp_cmd_direction(srp_cmd); + if (dir == DMA_FROM_DEVICE) + offset += data_out_desc_size(srp_cmd); + + if (dir == DMA_TO_DEVICE) + format = srp_cmd->buf_fmt >> 4; + else + format = srp_cmd->buf_fmt & ((1U << 4) - 1); + + switch (format) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + md = (struct srp_direct_buf *)(srp_cmd->add_data + offset); + err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc); + break; + case SRP_DATA_DESC_INDIRECT: + id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset); + err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map, + ext_desc); + break; + default: + pr_err("Unknown format %d %x\n", dir, format); + err = -EINVAL; + } + + return err; +} + +u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir) +{ + struct srp_direct_buf *md; + struct srp_indirect_buf *id; + u64 len = 0; + uint offset = cmd->add_cdb_len & ~3; + u8 fmt; + + if (dir == DMA_TO_DEVICE) { + fmt = cmd->buf_fmt >> 4; + } else { + fmt = cmd->buf_fmt & ((1U << 4) - 1); + offset += data_out_desc_size(cmd); + } + + switch (fmt) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + md = (struct srp_direct_buf *)(cmd->add_data + offset); + len = be32_to_cpu(md->len); + break; + case SRP_DATA_DESC_INDIRECT: + id = (struct srp_indirect_buf *)(cmd->add_data + offset); + len = be32_to_cpu(id->len); + break; + default: + pr_err("invalid data format %x\n", fmt); + break; + } + return len; +} + +int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir, + u64 *data_len) +{ + struct srp_indirect_buf *idb; + struct srp_direct_buf *db; + uint add_cdb_offset; + int rc; + + /* + * The pointer computations below will only be compiled correctly + * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check + * whether srp_cmd::add_data has been declared as a byte pointer. + */ + BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) + && !__same_type(srp_cmd->add_data[0], (u8)0)); + + BUG_ON(!dir); + BUG_ON(!data_len); + + rc = 0; + *data_len = 0; + + *dir = DMA_NONE; + + if (srp_cmd->buf_fmt & 0xf) + *dir = DMA_FROM_DEVICE; + else if (srp_cmd->buf_fmt >> 4) + *dir = DMA_TO_DEVICE; + + add_cdb_offset = srp_cmd->add_cdb_len & ~3; + if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { + db = (struct srp_direct_buf *)(srp_cmd->add_data + + add_cdb_offset); + *data_len = be32_to_cpu(db->len); + } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { + idb = (struct srp_indirect_buf *)(srp_cmd->add_data + + add_cdb_offset); + + *data_len = be32_to_cpu(idb->len); + } + return rc; +} + +MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions"); +MODULE_AUTHOR("FUJITA Tomonori"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h new file mode 100644 index 000000000..832606ae2 --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBSRP_H__ +#define __LIBSRP_H__ + +#include +#include +#include + +enum srp_valid { + INVALIDATE_CMD_RESP_EL = 0, + VALID_CMD_RESP_EL = 0x80, + VALID_INIT_MSG = 0xC0, + VALID_TRANS_EVENT = 0xFF +}; + +enum srp_format { + SRP_FORMAT = 1, + MAD_FORMAT = 2, + OS400_FORMAT = 3, + AIX_FORMAT = 4, + LINUX_FORMAT = 5, + MESSAGE_IN_CRQ = 6 +}; + +enum srp_init_msg { + INIT_MSG = 1, + INIT_COMPLETE_MSG = 2 +}; + +enum srp_trans_event { + UNUSED_FORMAT = 0, + PARTNER_FAILED = 1, + PARTNER_DEREGISTER = 2, + MIGRATED = 6, + PREPARE_FOR_SUSPEND = 9, + RESUME_FROM_SUSP = 0xA +}; + +enum srp_status { + CRQ_ENTRY_OVERWRITTEN = 0x20, + HEADER_DESCRIPTOR = 0xF1, + PING = 0xF5, + PING_RESPONSE = 0xF6 +}; + +enum srp_mad_version { + MAD_VERSION_1 = 1 +}; + +enum srp_os_type { + OS400 = 1, + LINUX = 2, + AIX = 3, + OFW = 4 +}; + +enum srp_task_attributes { + SRP_SIMPLE_TASK = 0, + SRP_HEAD_TASK = 1, + SRP_ORDERED_TASK = 2, + SRP_ACA_TASK = 4 +}; + +enum { + SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE = 0, + SRP_REQUEST_FIELDS_INVALID = 2, + SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED = 4, + SRP_TASK_MANAGEMENT_FUNCTION_FAILED = 5 +}; + +struct srp_buf { + dma_addr_t dma; + void *buf; +}; + +struct srp_queue { + void *pool; + void *items; + struct kfifo queue; + spinlock_t lock; +}; + +struct srp_target { + struct device *dev; + + spinlock_t lock; + struct list_head cmd_queue; + + size_t srp_iu_size; + struct srp_queue iu_queue; + size_t rx_ring_size; + struct srp_buf **rx_ring; + + void *ldata; +}; + +struct iu_entry { + struct srp_target *target; + + struct list_head ilist; + dma_addr_t remote_token; + unsigned long flags; + + struct srp_buf *sbuf; + u16 iu_len; +}; + +struct ibmvscsis_cmd; + +typedef int (srp_rdma_t)(struct ibmvscsis_cmd *, struct scatterlist *, int, + struct srp_direct_buf *, int, + enum dma_data_direction, unsigned int); +int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t); +void srp_target_free(struct srp_target *); +struct iu_entry *srp_iu_get(struct srp_target *); +void srp_iu_put(struct iu_entry *); +int srp_transfer_data(struct ibmvscsis_cmd *, struct srp_cmd *, + srp_rdma_t, int, int); +u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir); +int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir, + u64 *data_len); +static inline int srp_cmd_direction(struct srp_cmd *cmd) +{ + return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} + +#endif diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c new file mode 100644 index 000000000..07db98161 --- /dev/null +++ b/drivers/scsi/imm.c @@ -0,0 +1,1288 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* imm.c -- low level driver for the IOMEGA MatchMaker + * parallel port SCSI host adapter. + * + * (The IMM is the embedded controller in the ZIP Plus drive.) + * + * My unofficial company acronym list is 21 pages long: + * FLA: Four letter acronym with built in facility for + * future expansion to five letters. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* The following #define is to avoid a clash with hosts.c */ +#define IMM_PROBE_SPP 0x0001 +#define IMM_PROBE_PS2 0x0002 +#define IMM_PROBE_ECR 0x0010 +#define IMM_PROBE_EPP17 0x0100 +#define IMM_PROBE_EPP19 0x0200 + + +typedef struct { + struct pardevice *dev; /* Parport device entry */ + int base; /* Actual port address */ + int base_hi; /* Hi Base address for ECP-ISA chipset */ + int mode; /* Transfer mode */ + struct scsi_cmnd *cur_cmd; /* Current queued command */ + struct delayed_work imm_tq; /* Polling interrupt stuff */ + unsigned long jstart; /* Jiffies at start */ + unsigned failed:1; /* Failure flag */ + unsigned dp:1; /* Data phase present */ + unsigned rd:1; /* Read data in data phase */ + unsigned wanted:1; /* Parport sharing busy flag */ + unsigned int dev_no; /* Device number */ + wait_queue_head_t *waiting; + struct Scsi_Host *host; + struct list_head list; +} imm_struct; + +static void imm_reset_pulse(unsigned int base); +static int device_check(imm_struct *dev); + +#include "imm.h" + +static inline imm_struct *imm_dev(struct Scsi_Host *host) +{ + return *(imm_struct **)&host->hostdata; +} + +static DEFINE_SPINLOCK(arbitration_lock); + +static void got_it(imm_struct *dev) +{ + dev->base = dev->dev->port->base; + if (dev->cur_cmd) + imm_scsi_pointer(dev->cur_cmd)->phase = 1; + else + wake_up(dev->waiting); +} + +static void imm_wakeup(void *ref) +{ + imm_struct *dev = (imm_struct *) ref; + unsigned long flags; + + spin_lock_irqsave(&arbitration_lock, flags); + if (dev->wanted) { + if (parport_claim(dev->dev) == 0) { + got_it(dev); + dev->wanted = 0; + } + } + spin_unlock_irqrestore(&arbitration_lock, flags); +} + +static int imm_pb_claim(imm_struct *dev) +{ + unsigned long flags; + int res = 1; + spin_lock_irqsave(&arbitration_lock, flags); + if (parport_claim(dev->dev) == 0) { + got_it(dev); + res = 0; + } + dev->wanted = res; + spin_unlock_irqrestore(&arbitration_lock, flags); + return res; +} + +static void imm_pb_dismiss(imm_struct *dev) +{ + unsigned long flags; + int wanted; + spin_lock_irqsave(&arbitration_lock, flags); + wanted = dev->wanted; + dev->wanted = 0; + spin_unlock_irqrestore(&arbitration_lock, flags); + if (!wanted) + parport_release(dev->dev); +} + +static inline void imm_pb_release(imm_struct *dev) +{ + parport_release(dev->dev); +} + +/* This is to give the imm driver a way to modify the timings (and other + * parameters) by writing to the /proc/scsi/imm/0 file. + * Very simple method really... (Too simple, no error checking :( ) + * Reason: Kernel hackers HATE having to unload and reload modules for + * testing... + * Also gives a method to use a script to obtain optimum timings (TODO) + */ +static int imm_write_info(struct Scsi_Host *host, char *buffer, int length) +{ + imm_struct *dev = imm_dev(host); + + if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) { + dev->mode = simple_strtoul(buffer + 5, NULL, 0); + return length; + } + printk("imm /proc: invalid variable\n"); + return -EINVAL; +} + +static int imm_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + imm_struct *dev = imm_dev(host); + + seq_printf(m, "Version : %s\n", IMM_VERSION); + seq_printf(m, "Parport : %s\n", dev->dev->port->name); + seq_printf(m, "Mode : %s\n", IMM_MODE_STRING[dev->mode]); + return 0; +} + +#if IMM_DEBUG > 0 +#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\ + y, __func__, __LINE__); imm_fail_func(x,y); +static inline void +imm_fail_func(imm_struct *dev, int error_code) +#else +static inline void +imm_fail(imm_struct *dev, int error_code) +#endif +{ + /* If we fail a device then we trash status / message bytes */ + if (dev->cur_cmd) { + dev->cur_cmd->result = error_code << 16; + dev->failed = 1; + } +} + +/* + * Wait for the high bit to be set. + * + * In principle, this could be tied to an interrupt, but the adapter + * doesn't appear to be designed to support interrupts. We spin on + * the 0x80 ready bit. + */ +static unsigned char imm_wait(imm_struct *dev) +{ + int k; + unsigned short ppb = dev->base; + unsigned char r; + + w_ctr(ppb, 0x0c); + + k = IMM_SPIN_TMO; + do { + r = r_str(ppb); + k--; + udelay(1); + } + while (!(r & 0x80) && (k)); + + /* + * STR register (LPT base+1) to SCSI mapping: + * + * STR imm imm + * =================================== + * 0x80 S_REQ S_REQ + * 0x40 !S_BSY (????) + * 0x20 !S_CD !S_CD + * 0x10 !S_IO !S_IO + * 0x08 (????) !S_BSY + * + * imm imm meaning + * ================================== + * 0xf0 0xb8 Bit mask + * 0xc0 0x88 ZIP wants more data + * 0xd0 0x98 ZIP wants to send more data + * 0xe0 0xa8 ZIP is expecting SCSI command data + * 0xf0 0xb8 end of transfer, ZIP is sending status + */ + w_ctr(ppb, 0x04); + if (k) + return (r & 0xb8); + + /* Counter expired - Time out occurred */ + imm_fail(dev, DID_TIME_OUT); + printk("imm timeout in imm_wait\n"); + return 0; /* command timed out */ +} + +static int imm_negotiate(imm_struct * tmp) +{ + /* + * The following is supposedly the IEEE 1284-1994 negotiate + * sequence. I have yet to obtain a copy of the above standard + * so this is a bit of a guess... + * + * A fair chunk of this is based on the Linux parport implementation + * of IEEE 1284. + * + * Return 0 if data available + * 1 if no data available + */ + + unsigned short base = tmp->base; + unsigned char a, mode; + + switch (tmp->mode) { + case IMM_NIBBLE: + mode = 0x00; + break; + case IMM_PS2: + mode = 0x01; + break; + default: + return 0; + } + + w_ctr(base, 0x04); + udelay(5); + w_dtr(base, mode); + udelay(100); + w_ctr(base, 0x06); + udelay(5); + a = (r_str(base) & 0x20) ? 0 : 1; + udelay(5); + w_ctr(base, 0x07); + udelay(5); + w_ctr(base, 0x06); + + if (a) { + printk + ("IMM: IEEE1284 negotiate indicates no data available.\n"); + imm_fail(tmp, DID_ERROR); + } + return a; +} + +/* + * Clear EPP timeout bit. + */ +static inline void epp_reset(unsigned short ppb) +{ + int i; + + i = r_str(ppb); + w_str(ppb, i); + w_str(ppb, i & 0xfe); +} + +/* + * Wait for empty ECP fifo (if we are in ECP fifo mode only) + */ +static inline void ecp_sync(imm_struct *dev) +{ + int i, ppb_hi = dev->base_hi; + + if (ppb_hi == 0) + return; + + if ((r_ecr(ppb_hi) & 0xe0) == 0x60) { /* mode 011 == ECP fifo mode */ + for (i = 0; i < 100; i++) { + if (r_ecr(ppb_hi) & 0x01) + return; + udelay(5); + } + printk("imm: ECP sync failed as data still present in FIFO.\n"); + } +} + +static int imm_byte_out(unsigned short base, const char *buffer, int len) +{ + int i; + + w_ctr(base, 0x4); /* apparently a sane mode */ + for (i = len >> 1; i; i--) { + w_dtr(base, *buffer++); + w_ctr(base, 0x5); /* Drop STROBE low */ + w_dtr(base, *buffer++); + w_ctr(base, 0x0); /* STROBE high + INIT low */ + } + w_ctr(base, 0x4); /* apparently a sane mode */ + return 1; /* All went well - we hope! */ +} + +static int imm_nibble_in(unsigned short base, char *buffer, int len) +{ + unsigned char l; + int i; + + /* + * The following is based on documented timing signals + */ + w_ctr(base, 0x4); + for (i = len; i; i--) { + w_ctr(base, 0x6); + l = (r_str(base) & 0xf0) >> 4; + w_ctr(base, 0x5); + *buffer++ = (r_str(base) & 0xf0) | l; + w_ctr(base, 0x4); + } + return 1; /* All went well - we hope! */ +} + +static int imm_byte_in(unsigned short base, char *buffer, int len) +{ + int i; + + /* + * The following is based on documented timing signals + */ + w_ctr(base, 0x4); + for (i = len; i; i--) { + w_ctr(base, 0x26); + *buffer++ = r_dtr(base); + w_ctr(base, 0x25); + } + return 1; /* All went well - we hope! */ +} + +static int imm_out(imm_struct *dev, char *buffer, int len) +{ + unsigned short ppb = dev->base; + int r = imm_wait(dev); + + /* + * Make sure that: + * a) the SCSI bus is BUSY (device still listening) + * b) the device is listening + */ + if ((r & 0x18) != 0x08) { + imm_fail(dev, DID_ERROR); + printk("IMM: returned SCSI status %2x\n", r); + return 0; + } + switch (dev->mode) { + case IMM_EPP_32: + case IMM_EPP_16: + case IMM_EPP_8: + epp_reset(ppb); + w_ctr(ppb, 0x4); +#ifdef CONFIG_SCSI_IZIP_EPP16 + if (!(((long) buffer | len) & 0x01)) + outsw(ppb + 4, buffer, len >> 1); +#else + if (!(((long) buffer | len) & 0x03)) + outsl(ppb + 4, buffer, len >> 2); +#endif + else + outsb(ppb + 4, buffer, len); + w_ctr(ppb, 0xc); + r = !(r_str(ppb) & 0x01); + w_ctr(ppb, 0xc); + ecp_sync(dev); + break; + + case IMM_NIBBLE: + case IMM_PS2: + /* 8 bit output, with a loop */ + r = imm_byte_out(ppb, buffer, len); + break; + + default: + printk("IMM: bug in imm_out()\n"); + r = 0; + } + return r; +} + +static int imm_in(imm_struct *dev, char *buffer, int len) +{ + unsigned short ppb = dev->base; + int r = imm_wait(dev); + + /* + * Make sure that: + * a) the SCSI bus is BUSY (device still listening) + * b) the device is sending data + */ + if ((r & 0x18) != 0x18) { + imm_fail(dev, DID_ERROR); + return 0; + } + switch (dev->mode) { + case IMM_NIBBLE: + /* 4 bit input, with a loop */ + r = imm_nibble_in(ppb, buffer, len); + w_ctr(ppb, 0xc); + break; + + case IMM_PS2: + /* 8 bit input, with a loop */ + r = imm_byte_in(ppb, buffer, len); + w_ctr(ppb, 0xc); + break; + + case IMM_EPP_32: + case IMM_EPP_16: + case IMM_EPP_8: + epp_reset(ppb); + w_ctr(ppb, 0x24); +#ifdef CONFIG_SCSI_IZIP_EPP16 + if (!(((long) buffer | len) & 0x01)) + insw(ppb + 4, buffer, len >> 1); +#else + if (!(((long) buffer | len) & 0x03)) + insl(ppb + 4, buffer, len >> 2); +#endif + else + insb(ppb + 4, buffer, len); + w_ctr(ppb, 0x2c); + r = !(r_str(ppb) & 0x01); + w_ctr(ppb, 0x2c); + ecp_sync(dev); + break; + + default: + printk("IMM: bug in imm_ins()\n"); + r = 0; + break; + } + return r; +} + +static int imm_cpp(unsigned short ppb, unsigned char b) +{ + /* + * Comments on udelay values refer to the + * Command Packet Protocol (CPP) timing diagram. + */ + + unsigned char s1, s2, s3; + w_ctr(ppb, 0x0c); + udelay(2); /* 1 usec - infinite */ + w_dtr(ppb, 0xaa); + udelay(10); /* 7 usec - infinite */ + w_dtr(ppb, 0x55); + udelay(10); /* 7 usec - infinite */ + w_dtr(ppb, 0x00); + udelay(10); /* 7 usec - infinite */ + w_dtr(ppb, 0xff); + udelay(10); /* 7 usec - infinite */ + s1 = r_str(ppb) & 0xb8; + w_dtr(ppb, 0x87); + udelay(10); /* 7 usec - infinite */ + s2 = r_str(ppb) & 0xb8; + w_dtr(ppb, 0x78); + udelay(10); /* 7 usec - infinite */ + s3 = r_str(ppb) & 0x38; + /* + * Values for b are: + * 0000 00aa Assign address aa to current device + * 0010 00aa Select device aa in EPP Winbond mode + * 0010 10aa Select device aa in EPP mode + * 0011 xxxx Deselect all devices + * 0110 00aa Test device aa + * 1101 00aa Select device aa in ECP mode + * 1110 00aa Select device aa in Compatible mode + */ + w_dtr(ppb, b); + udelay(2); /* 1 usec - infinite */ + w_ctr(ppb, 0x0c); + udelay(10); /* 7 usec - infinite */ + w_ctr(ppb, 0x0d); + udelay(2); /* 1 usec - infinite */ + w_ctr(ppb, 0x0c); + udelay(10); /* 7 usec - infinite */ + w_dtr(ppb, 0xff); + udelay(10); /* 7 usec - infinite */ + + /* + * The following table is electrical pin values. + * (BSY is inverted at the CTR register) + * + * BSY ACK POut SEL Fault + * S1 0 X 1 1 1 + * S2 1 X 0 1 1 + * S3 L X 1 1 S + * + * L => Last device in chain + * S => Selected + * + * Observered values for S1,S2,S3 are: + * Disconnect => f8/58/78 + * Connect => f8/58/70 + */ + if ((s1 == 0xb8) && (s2 == 0x18) && (s3 == 0x30)) + return 1; /* Connected */ + if ((s1 == 0xb8) && (s2 == 0x18) && (s3 == 0x38)) + return 0; /* Disconnected */ + + return -1; /* No device present */ +} + +static inline int imm_connect(imm_struct *dev, int flag) +{ + unsigned short ppb = dev->base; + + imm_cpp(ppb, 0xe0); /* Select device 0 in compatible mode */ + imm_cpp(ppb, 0x30); /* Disconnect all devices */ + + if ((dev->mode == IMM_EPP_8) || + (dev->mode == IMM_EPP_16) || + (dev->mode == IMM_EPP_32)) + return imm_cpp(ppb, 0x28); /* Select device 0 in EPP mode */ + return imm_cpp(ppb, 0xe0); /* Select device 0 in compatible mode */ +} + +static void imm_disconnect(imm_struct *dev) +{ + imm_cpp(dev->base, 0x30); /* Disconnect all devices */ +} + +static int imm_select(imm_struct *dev, int target) +{ + int k; + unsigned short ppb = dev->base; + + /* + * Firstly we want to make sure there is nothing + * holding onto the SCSI bus. + */ + w_ctr(ppb, 0xc); + + k = IMM_SELECT_TMO; + do { + k--; + } while ((r_str(ppb) & 0x08) && (k)); + + if (!k) + return 0; + + /* + * Now assert the SCSI ID (HOST and TARGET) on the data bus + */ + w_ctr(ppb, 0x4); + w_dtr(ppb, 0x80 | (1 << target)); + udelay(1); + + /* + * Deassert SELIN first followed by STROBE + */ + w_ctr(ppb, 0xc); + w_ctr(ppb, 0xd); + + /* + * ACK should drop low while SELIN is deasserted. + * FAULT should drop low when the SCSI device latches the bus. + */ + k = IMM_SELECT_TMO; + do { + k--; + } + while (!(r_str(ppb) & 0x08) && (k)); + + /* + * Place the interface back into a sane state (status mode) + */ + w_ctr(ppb, 0xc); + return (k) ? 1 : 0; +} + +static int imm_init(imm_struct *dev) +{ + if (imm_connect(dev, 0) != 1) + return -EIO; + imm_reset_pulse(dev->base); + mdelay(1); /* Delay to allow devices to settle */ + imm_disconnect(dev); + mdelay(1); /* Another delay to allow devices to settle */ + return device_check(dev); +} + +static inline int imm_send_command(struct scsi_cmnd *cmd) +{ + imm_struct *dev = imm_dev(cmd->device->host); + int k; + + /* NOTE: IMM uses byte pairs */ + for (k = 0; k < cmd->cmd_len; k += 2) + if (!imm_out(dev, &cmd->cmnd[k], 2)) + return 0; + return 1; +} + +/* + * The bulk flag enables some optimisations in the data transfer loops, + * it should be true for any command that transfers data in integral + * numbers of sectors. + * + * The driver appears to remain stable if we speed up the parallel port + * i/o in this function, but not elsewhere. + */ +static int imm_completion(struct scsi_cmnd *const cmd) +{ + /* Return codes: + * -1 Error + * 0 Told to schedule + * 1 Finished data transfer + */ + struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd); + imm_struct *dev = imm_dev(cmd->device->host); + unsigned short ppb = dev->base; + unsigned long start_jiffies = jiffies; + + unsigned char r, v; + int fast, bulk, status; + + v = cmd->cmnd[0]; + bulk = ((v == READ_6) || + (v == READ_10) || (v == WRITE_6) || (v == WRITE_10)); + + /* + * We only get here if the drive is ready to comunicate, + * hence no need for a full imm_wait. + */ + w_ctr(ppb, 0x0c); + r = (r_str(ppb) & 0xb8); + + /* + * while (device is not ready to send status byte) + * loop; + */ + while (r != (unsigned char) 0xb8) { + /* + * If we have been running for more than a full timer tick + * then take a rest. + */ + if (time_after(jiffies, start_jiffies + 1)) + return 0; + + /* + * FAIL if: + * a) Drive status is screwy (!ready && !present) + * b) Drive is requesting/sending more data than expected + */ + if ((r & 0x88) != 0x88 || scsi_pointer->this_residual <= 0) { + imm_fail(dev, DID_ERROR); + return -1; /* ERROR_RETURN */ + } + /* determine if we should use burst I/O */ + if (dev->rd == 0) { + fast = bulk && scsi_pointer->this_residual >= + IMM_BURST_SIZE ? IMM_BURST_SIZE : 2; + status = imm_out(dev, scsi_pointer->ptr, fast); + } else { + fast = bulk && scsi_pointer->this_residual >= + IMM_BURST_SIZE ? IMM_BURST_SIZE : 1; + status = imm_in(dev, scsi_pointer->ptr, fast); + } + + scsi_pointer->ptr += fast; + scsi_pointer->this_residual -= fast; + + if (!status) { + imm_fail(dev, DID_BUS_BUSY); + return -1; /* ERROR_RETURN */ + } + if (scsi_pointer->buffer && !scsi_pointer->this_residual) { + /* if scatter/gather, advance to the next segment */ + if (scsi_pointer->buffers_residual--) { + scsi_pointer->buffer = + sg_next(scsi_pointer->buffer); + scsi_pointer->this_residual = + scsi_pointer->buffer->length; + scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); + + /* + * Make sure that we transfer even number of bytes + * otherwise it makes imm_byte_out() messy. + */ + if (scsi_pointer->this_residual & 0x01) + scsi_pointer->this_residual++; + } + } + /* Now check to see if the drive is ready to comunicate */ + w_ctr(ppb, 0x0c); + r = (r_str(ppb) & 0xb8); + + /* If not, drop back down to the scheduler and wait a timer tick */ + if (!(r & 0x80)) + return 0; + } + return 1; /* FINISH_RETURN */ +} + +/* + * Since the IMM itself doesn't generate interrupts, we use + * the scheduler's task queue to generate a stream of call-backs and + * complete the request when the drive is ready. + */ +static void imm_interrupt(struct work_struct *work) +{ + imm_struct *dev = container_of(work, imm_struct, imm_tq.work); + struct scsi_cmnd *cmd = dev->cur_cmd; + struct Scsi_Host *host = cmd->device->host; + unsigned long flags; + + if (imm_engine(dev, cmd)) { + schedule_delayed_work(&dev->imm_tq, 1); + return; + } + /* Command must of completed hence it is safe to let go... */ +#if IMM_DEBUG > 0 + switch ((cmd->result >> 16) & 0xff) { + case DID_OK: + break; + case DID_NO_CONNECT: + printk("imm: no device at SCSI ID %i\n", cmd->device->id); + break; + case DID_BUS_BUSY: + printk("imm: BUS BUSY - EPP timeout detected\n"); + break; + case DID_TIME_OUT: + printk("imm: unknown timeout\n"); + break; + case DID_ABORT: + printk("imm: told to abort\n"); + break; + case DID_PARITY: + printk("imm: parity error (???)\n"); + break; + case DID_ERROR: + printk("imm: internal driver error\n"); + break; + case DID_RESET: + printk("imm: told to reset device\n"); + break; + case DID_BAD_INTR: + printk("imm: bad interrupt (???)\n"); + break; + default: + printk("imm: bad return code (%02x)\n", + (cmd->result >> 16) & 0xff); + } +#endif + + if (imm_scsi_pointer(cmd)->phase > 1) + imm_disconnect(dev); + + imm_pb_dismiss(dev); + + spin_lock_irqsave(host->host_lock, flags); + dev->cur_cmd = NULL; + scsi_done(cmd); + spin_unlock_irqrestore(host->host_lock, flags); + return; +} + +static int imm_engine(imm_struct *dev, struct scsi_cmnd *const cmd) +{ + struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd); + unsigned short ppb = dev->base; + unsigned char l = 0, h = 0; + int retv, x; + + /* First check for any errors that may have occurred + * Here we check for internal errors + */ + if (dev->failed) + return 0; + + switch (scsi_pointer->phase) { + case 0: /* Phase 0 - Waiting for parport */ + if (time_after(jiffies, dev->jstart + HZ)) { + /* + * We waited more than a second + * for parport to call us + */ + imm_fail(dev, DID_BUS_BUSY); + return 0; + } + return 1; /* wait until imm_wakeup claims parport */ + + case 1: /* Phase 1 - Connected */ + imm_connect(dev, CONNECT_EPP_MAYBE); + scsi_pointer->phase++; + fallthrough; + + case 2: /* Phase 2 - We are now talking to the scsi bus */ + if (!imm_select(dev, scmd_id(cmd))) { + imm_fail(dev, DID_NO_CONNECT); + return 0; + } + scsi_pointer->phase++; + fallthrough; + + case 3: /* Phase 3 - Ready to accept a command */ + w_ctr(ppb, 0x0c); + if (!(r_str(ppb) & 0x80)) + return 1; + + if (!imm_send_command(cmd)) + return 0; + scsi_pointer->phase++; + fallthrough; + + case 4: /* Phase 4 - Setup scatter/gather buffers */ + if (scsi_bufflen(cmd)) { + scsi_pointer->buffer = scsi_sglist(cmd); + scsi_pointer->this_residual = scsi_pointer->buffer->length; + scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); + } else { + scsi_pointer->buffer = NULL; + scsi_pointer->this_residual = 0; + scsi_pointer->ptr = NULL; + } + scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1; + scsi_pointer->phase++; + if (scsi_pointer->this_residual & 0x01) + scsi_pointer->this_residual++; + fallthrough; + + case 5: /* Phase 5 - Pre-Data transfer stage */ + /* Spin lock for BUSY */ + w_ctr(ppb, 0x0c); + if (!(r_str(ppb) & 0x80)) + return 1; + + /* Require negotiation for read requests */ + x = (r_str(ppb) & 0xb8); + dev->rd = (x & 0x10) ? 1 : 0; + dev->dp = (x & 0x20) ? 0 : 1; + + if ((dev->dp) && (dev->rd)) + if (imm_negotiate(dev)) + return 0; + scsi_pointer->phase++; + fallthrough; + + case 6: /* Phase 6 - Data transfer stage */ + /* Spin lock for BUSY */ + w_ctr(ppb, 0x0c); + if (!(r_str(ppb) & 0x80)) + return 1; + + if (dev->dp) { + retv = imm_completion(cmd); + if (retv == -1) + return 0; + if (retv == 0) + return 1; + } + scsi_pointer->phase++; + fallthrough; + + case 7: /* Phase 7 - Post data transfer stage */ + if ((dev->dp) && (dev->rd)) { + if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) { + w_ctr(ppb, 0x4); + w_ctr(ppb, 0xc); + w_ctr(ppb, 0xe); + w_ctr(ppb, 0x4); + } + } + scsi_pointer->phase++; + fallthrough; + + case 8: /* Phase 8 - Read status/message */ + /* Check for data overrun */ + if (imm_wait(dev) != (unsigned char) 0xb8) { + imm_fail(dev, DID_ERROR); + return 0; + } + if (imm_negotiate(dev)) + return 0; + if (imm_in(dev, &l, 1)) { /* read status byte */ + /* Check for optional message byte */ + if (imm_wait(dev) == (unsigned char) 0xb8) + imm_in(dev, &h, 1); + cmd->result = (DID_OK << 16) | (l & STATUS_MASK); + } + if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) { + w_ctr(ppb, 0x4); + w_ctr(ppb, 0xc); + w_ctr(ppb, 0xe); + w_ctr(ppb, 0x4); + } + return 0; /* Finished */ + + default: + printk("imm: Invalid scsi phase\n"); + } + return 0; +} + +static int imm_queuecommand_lck(struct scsi_cmnd *cmd) +{ + imm_struct *dev = imm_dev(cmd->device->host); + + if (dev->cur_cmd) { + printk("IMM: bug in imm_queuecommand\n"); + return 0; + } + dev->failed = 0; + dev->jstart = jiffies; + dev->cur_cmd = cmd; + cmd->result = DID_ERROR << 16; /* default return code */ + imm_scsi_pointer(cmd)->phase = 0; /* bus free */ + + schedule_delayed_work(&dev->imm_tq, 0); + + imm_pb_claim(dev); + + return 0; +} + +static DEF_SCSI_QCMD(imm_queuecommand) + +/* + * Apparently the disk->capacity attribute is off by 1 sector + * for all disk drives. We add the one here, but it should really + * be done in sd.c. Even if it gets fixed there, this will still + * work. + */ +static int imm_biosparam(struct scsi_device *sdev, struct block_device *dev, + sector_t capacity, int ip[]) +{ + ip[0] = 0x40; + ip[1] = 0x20; + ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); + if (ip[2] > 1024) { + ip[0] = 0xff; + ip[1] = 0x3f; + ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); + } + return 0; +} + +static int imm_abort(struct scsi_cmnd *cmd) +{ + imm_struct *dev = imm_dev(cmd->device->host); + /* + * There is no method for aborting commands since Iomega + * have tied the SCSI_MESSAGE line high in the interface + */ + + switch (imm_scsi_pointer(cmd)->phase) { + case 0: /* Do not have access to parport */ + case 1: /* Have not connected to interface */ + dev->cur_cmd = NULL; /* Forget the problem */ + return SUCCESS; + default: /* SCSI command sent, can not abort */ + return FAILED; + } +} + +static void imm_reset_pulse(unsigned int base) +{ + w_ctr(base, 0x04); + w_dtr(base, 0x40); + udelay(1); + w_ctr(base, 0x0c); + w_ctr(base, 0x0d); + udelay(50); + w_ctr(base, 0x0c); + w_ctr(base, 0x04); +} + +static int imm_reset(struct scsi_cmnd *cmd) +{ + imm_struct *dev = imm_dev(cmd->device->host); + + if (imm_scsi_pointer(cmd)->phase) + imm_disconnect(dev); + dev->cur_cmd = NULL; /* Forget the problem */ + + imm_connect(dev, CONNECT_NORMAL); + imm_reset_pulse(dev->base); + mdelay(1); /* device settle delay */ + imm_disconnect(dev); + mdelay(1); /* device settle delay */ + return SUCCESS; +} + +static int device_check(imm_struct *dev) +{ + /* This routine looks for a device and then attempts to use EPP + to send a command. If all goes as planned then EPP is available. */ + + static char cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + int loop, old_mode, status, k, ppb = dev->base; + unsigned char l; + + old_mode = dev->mode; + for (loop = 0; loop < 8; loop++) { + /* Attempt to use EPP for Test Unit Ready */ + if ((ppb & 0x0007) == 0x0000) + dev->mode = IMM_EPP_32; + + second_pass: + imm_connect(dev, CONNECT_EPP_MAYBE); + /* Select SCSI device */ + if (!imm_select(dev, loop)) { + imm_disconnect(dev); + continue; + } + printk("imm: Found device at ID %i, Attempting to use %s\n", + loop, IMM_MODE_STRING[dev->mode]); + + /* Send SCSI command */ + status = 1; + w_ctr(ppb, 0x0c); + for (l = 0; (l < 3) && (status); l++) + status = imm_out(dev, &cmd[l << 1], 2); + + if (!status) { + imm_disconnect(dev); + imm_connect(dev, CONNECT_EPP_MAYBE); + imm_reset_pulse(dev->base); + udelay(1000); + imm_disconnect(dev); + udelay(1000); + if (dev->mode == IMM_EPP_32) { + dev->mode = old_mode; + goto second_pass; + } + printk("imm: Unable to establish communication\n"); + return -EIO; + } + w_ctr(ppb, 0x0c); + + k = 1000000; /* 1 Second */ + do { + l = r_str(ppb); + k--; + udelay(1); + } while (!(l & 0x80) && (k)); + + l &= 0xb8; + + if (l != 0xb8) { + imm_disconnect(dev); + imm_connect(dev, CONNECT_EPP_MAYBE); + imm_reset_pulse(dev->base); + udelay(1000); + imm_disconnect(dev); + udelay(1000); + if (dev->mode == IMM_EPP_32) { + dev->mode = old_mode; + goto second_pass; + } + printk + ("imm: Unable to establish communication\n"); + return -EIO; + } + imm_disconnect(dev); + printk + ("imm: Communication established at 0x%x with ID %i using %s\n", + ppb, loop, IMM_MODE_STRING[dev->mode]); + imm_connect(dev, CONNECT_EPP_MAYBE); + imm_reset_pulse(dev->base); + udelay(1000); + imm_disconnect(dev); + udelay(1000); + return 0; + } + printk("imm: No devices found\n"); + return -ENODEV; +} + +/* + * imm cannot deal with highmem, so this causes all IO pages for this host + * to reside in low memory (hence mapped) + */ +static int imm_adjust_queue(struct scsi_device *device) +{ + blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); + return 0; +} + +static const struct scsi_host_template imm_template = { + .module = THIS_MODULE, + .proc_name = "imm", + .show_info = imm_show_info, + .write_info = imm_write_info, + .name = "Iomega VPI2 (imm) interface", + .queuecommand = imm_queuecommand, + .eh_abort_handler = imm_abort, + .eh_host_reset_handler = imm_reset, + .bios_param = imm_biosparam, + .this_id = 7, + .sg_tablesize = SG_ALL, + .can_queue = 1, + .slave_alloc = imm_adjust_queue, + .cmd_size = sizeof(struct scsi_pointer), +}; + +/*************************************************************************** + * Parallel port probing routines * + ***************************************************************************/ + +static LIST_HEAD(imm_hosts); + +/* + * Finds the first available device number that can be alloted to the + * new imm device and returns the address of the previous node so that + * we can add to the tail and have a list in the ascending order. + */ + +static inline imm_struct *find_parent(void) +{ + imm_struct *dev, *par = NULL; + unsigned int cnt = 0; + + if (list_empty(&imm_hosts)) + return NULL; + + list_for_each_entry(dev, &imm_hosts, list) { + if (dev->dev_no != cnt) + return par; + cnt++; + par = dev; + } + + return par; +} + +static int __imm_attach(struct parport *pb) +{ + struct Scsi_Host *host; + imm_struct *dev, *temp; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting); + DEFINE_WAIT(wait); + int ports; + int modes, ppb; + int err = -ENOMEM; + struct pardev_cb imm_cb; + + init_waitqueue_head(&waiting); + + dev = kzalloc(sizeof(imm_struct), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + + dev->base = -1; + dev->mode = IMM_AUTODETECT; + INIT_LIST_HEAD(&dev->list); + + temp = find_parent(); + if (temp) + dev->dev_no = temp->dev_no + 1; + + memset(&imm_cb, 0, sizeof(imm_cb)); + imm_cb.private = dev; + imm_cb.wakeup = imm_wakeup; + + dev->dev = parport_register_dev_model(pb, "imm", &imm_cb, dev->dev_no); + if (!dev->dev) + goto out; + + + /* Claim the bus so it remembers what we do to the control + * registers. [ CTR and ECP ] + */ + err = -EBUSY; + dev->waiting = &waiting; + prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE); + if (imm_pb_claim(dev)) + schedule_timeout(3 * HZ); + if (dev->wanted) { + printk(KERN_ERR "imm%d: failed to claim parport because " + "a pardevice is owning the port for too long " + "time!\n", pb->number); + imm_pb_dismiss(dev); + dev->waiting = NULL; + finish_wait(&waiting, &wait); + goto out1; + } + dev->waiting = NULL; + finish_wait(&waiting, &wait); + ppb = dev->base = dev->dev->port->base; + dev->base_hi = dev->dev->port->base_hi; + w_ctr(ppb, 0x0c); + modes = dev->dev->port->modes; + + /* Mode detection works up the chain of speed + * This avoids a nasty if-then-else-if-... tree + */ + dev->mode = IMM_NIBBLE; + + if (modes & PARPORT_MODE_TRISTATE) + dev->mode = IMM_PS2; + + /* Done configuration */ + + err = imm_init(dev); + + imm_pb_release(dev); + + if (err) + goto out1; + + /* now the glue ... */ + if (dev->mode == IMM_NIBBLE || dev->mode == IMM_PS2) + ports = 3; + else + ports = 8; + + INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt); + + err = -ENOMEM; + host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); + if (!host) + goto out1; + host->io_port = pb->base; + host->n_io_port = ports; + host->dma_channel = -1; + host->unique_id = pb->number; + *(imm_struct **)&host->hostdata = dev; + dev->host = host; + if (!temp) + list_add_tail(&dev->list, &imm_hosts); + else + list_add_tail(&dev->list, &temp->list); + err = scsi_add_host(host, NULL); + if (err) + goto out2; + scsi_scan_host(host); + return 0; + +out2: + list_del_init(&dev->list); + scsi_host_put(host); +out1: + parport_unregister_device(dev->dev); +out: + kfree(dev); + return err; +} + +static void imm_attach(struct parport *pb) +{ + __imm_attach(pb); +} + +static void imm_detach(struct parport *pb) +{ + imm_struct *dev; + list_for_each_entry(dev, &imm_hosts, list) { + if (dev->dev->port == pb) { + list_del_init(&dev->list); + scsi_remove_host(dev->host); + scsi_host_put(dev->host); + parport_unregister_device(dev->dev); + kfree(dev); + break; + } + } +} + +static struct parport_driver imm_driver = { + .name = "imm", + .match_port = imm_attach, + .detach = imm_detach, + .devmodel = true, +}; +module_parport_driver(imm_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h new file mode 100644 index 000000000..411cf94af --- /dev/null +++ b/drivers/scsi/imm.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in + * the Iomega ZIP Plus drive + * + * (c) 1998 David Campbell + * + * Please note that I live in Perth, Western Australia. GMT+0800 + */ + +#ifndef _IMM_H +#define _IMM_H + +#define IMM_VERSION "2.05 (for Linux 2.4.0)" + +/* + * 10 Apr 1998 (Good Friday) - Received EN144302 by email from Iomega. + * Scarry thing is the level of support from one of their managers. + * The onus is now on us (the developers) to shut up and start coding. + * 11Apr98 [ 0.10 ] + * + * --- SNIP --- + * + * It manages to find the drive which is a good start. Writing data during + * data phase is known to be broken (due to requirements of two byte writes). + * Removing "Phase" debug messages. + * + * PS: Took four hours of coding after I bought a drive. + * ANZAC Day (Aus "War Veterans Holiday") 25Apr98 [ 0.14 ] + * + * Ten minutes later after a few fixes.... (LITERALLY!!!) + * Have mounted disk, copied file, dismounted disk, remount disk, diff file + * ----- It actually works!!! ----- + * 25Apr98 [ 0.15 ] + * + * Twenty minutes of mucking around, rearanged the IEEE negotiate mechanism. + * Now have byte mode working (only EPP and ECP to go now... :=) + * 26Apr98 [ 0.16 ] + * + * Thirty minutes of further coding results in EPP working on my machine. + * 27Apr98 [ 0.17 ] + * + * Due to work commitments and inability to get a "true" ECP mode functioning + * I have decided to code the parport support into imm. + * 09Jun98 [ 0.18 ] + * + * Driver is now out of beta testing. + * Support for parport has been added. + * Now distributed with the ppa driver. + * 12Jun98 [ 2.00 ] + * + * Err.. It appears that imm-2.00 was broken.... + * 18Jun98 [ 2.01 ] + * + * Patch applied to sync this against the Linux 2.1.x kernel code + * Included qboot_zip.sh + * 21Jun98 [ 2.02 ] + * + * Other clean ups include the follow changes: + * CONFIG_SCSI_PPA_HAVE_PEDANTIC => CONFIG_SCSI_IZIP_EPP16 + * added CONFIG_SCSI_IZIP_SLOW_CTR option + * [2.03] + * Fix kernel panic on scsi timeout. 20Aug00 [2.04] + * + * Avoid io_request_lock problems. + * John Cavan 16Nov00 [2.05] + */ +/* ------ END OF USER CONFIGURABLE PARAMETERS ----- */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +/* batteries not included :-) */ + +/* + * modes in which the driver can operate + */ +#define IMM_AUTODETECT 0 /* Autodetect mode */ +#define IMM_NIBBLE 1 /* work in standard 4 bit mode */ +#define IMM_PS2 2 /* PS/2 byte mode */ +#define IMM_EPP_8 3 /* EPP mode, 8 bit */ +#define IMM_EPP_16 4 /* EPP mode, 16 bit */ +#define IMM_EPP_32 5 /* EPP mode, 32 bit */ +#define IMM_UNKNOWN 6 /* Just in case... */ + +static char *IMM_MODE_STRING[] = +{ + [IMM_AUTODETECT] = "Autodetect", + [IMM_NIBBLE] = "SPP", + [IMM_PS2] = "PS/2", + [IMM_EPP_8] = "EPP 8 bit", + [IMM_EPP_16] = "EPP 16 bit", +#ifdef CONFIG_SCSI_IZIP_EPP16 + [IMM_EPP_32] = "EPP 16 bit", +#else + [IMM_EPP_32] = "EPP 32 bit", +#endif + [IMM_UNKNOWN] = "Unknown", +}; + +/* other options */ +#define IMM_BURST_SIZE 512 /* data burst size */ +#define IMM_SELECT_TMO 500 /* 500 how long to wait for target ? */ +#define IMM_SPIN_TMO 5000 /* 50000 imm_wait loop limiter */ +#define IMM_DEBUG 0 /* debugging option */ +#define IN_EPP_MODE(x) (x == IMM_EPP_8 || x == IMM_EPP_16 || x == IMM_EPP_32) + +/* args to imm_connect */ +#define CONNECT_EPP_MAYBE 1 +#define CONNECT_NORMAL 0 + +#define r_dtr(x) (unsigned char)inb((x)) +#define r_str(x) (unsigned char)inb((x)+1) +#define r_ctr(x) (unsigned char)inb((x)+2) +#define r_epp(x) (unsigned char)inb((x)+4) +#define r_fifo(x) (unsigned char)inb((x)) /* x must be base_hi */ + /* On PCI is: base+0x400 != base_hi */ +#define r_ecr(x) (unsigned char)inb((x)+2) /* x must be base_hi */ + +#define w_dtr(x,y) outb(y, (x)) +#define w_str(x,y) outb(y, (x)+1) +#define w_epp(x,y) outb(y, (x)+4) +#define w_fifo(x,y) outb(y, (x)) /* x must be base_hi */ +#define w_ecr(x,y) outb(y, (x)+0x2) /* x must be base_hi */ + +#ifdef CONFIG_SCSI_IZIP_SLOW_CTR +#define w_ctr(x,y) outb_p(y, (x)+2) +#else +#define w_ctr(x,y) outb(y, (x)+2) +#endif + +static inline struct scsi_pointer *imm_scsi_pointer(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static int imm_engine(imm_struct *, struct scsi_cmnd *); + +#endif /* _IMM_H */ diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c new file mode 100644 index 000000000..2a50fda3a --- /dev/null +++ b/drivers/scsi/initio.c @@ -0,0 +1,2965 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/************************************************************************** + * Initio 9100 device driver for Linux. + * + * Copyright (c) 1994-1998 Initio Corporation + * Copyright (c) 1998 Bas Vermeulen + * Copyright (c) 2004 Christoph Hellwig + * Copyright (c) 2007 Red Hat + * + ************************************************************************* + * + * DESCRIPTION: + * + * This is the Linux low-level SCSI driver for Initio INI-9X00U/UW SCSI host + * adapters + * + * 08/06/97 hc - v1.01h + * - Support inic-940 and inic-935 + * 09/26/97 hc - v1.01i + * - Make correction from J.W. Schultz suggestion + * 10/13/97 hc - Support reset function + * 10/21/97 hc - v1.01j + * - Support 32 LUN (SCSI 3) + * 01/14/98 hc - v1.01k + * - Fix memory allocation problem + * 03/04/98 hc - v1.01l + * - Fix tape rewind which will hang the system problem + * - Set can_queue to initio_num_scb + * 06/25/98 hc - v1.01m + * - Get it work for kernel version >= 2.1.75 + * - Dynamic assign SCSI bus reset holding time in initio_init() + * 07/02/98 hc - v1.01n + * - Support 0002134A + * 08/07/98 hc - v1.01o + * - Change the initio_abort_srb routine to use scsi_done. <01> + * 09/07/98 hl - v1.02 + * - Change the INI9100U define and proc_dir_entry to + * reflect the newer Kernel 2.1.118, but the v1.o1o + * should work with Kernel 2.1.118. + * 09/20/98 wh - v1.02a + * - Support Abort command. + * - Handle reset routine. + * 09/21/98 hl - v1.03 + * - remove comments. + * 12/09/98 bv - v1.03a + * - Removed unused code + * 12/13/98 bv - v1.03b + * - Remove cli() locking for kernels >= 2.1.95. This uses + * spinlocks to serialize access to the pSRB_head and + * pSRB_tail members of the HCS structure. + * 09/01/99 bv - v1.03d + * - Fixed a deadlock problem in SMP. + * 21/01/99 bv - v1.03e + * - Add support for the Domex 3192U PCI SCSI + * This is a slightly modified patch by + * Brian Macy + * 22/02/99 bv - v1.03f + * - Didn't detect the INIC-950 in 2.0.x correctly. + * Now fixed. + * 05/07/99 bv - v1.03g + * - Changed the assumption that HZ = 100 + * 10/17/03 mc - v1.04 + * - added new DMA API support + * 06/01/04 jmd - v1.04a + * - Re-add reset_bus support + **************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "initio.h" + +#define SENSE_SIZE 14 + +#define i91u_MAXQUEUE 2 +#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a" + +#ifdef DEBUG_i91u +static unsigned int i91u_debug = DEBUG_DEFAULT; +#endif + +static int initio_tag_enable = 1; + +#ifdef DEBUG_i91u +static int setup_debug = 0; +#endif + +static void i91uSCBPost(u8 * pHcb, u8 * pScb); + +#define DEBUG_INTERRUPT 0 +#define DEBUG_QUEUE 0 +#define DEBUG_STATE 0 +#define INT_DISC 0 + +/*--- forward references ---*/ +static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun); +static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host); + +static int tulip_main(struct initio_host * host); + +static int initio_next_state(struct initio_host * host); +static int initio_state_1(struct initio_host * host); +static int initio_state_2(struct initio_host * host); +static int initio_state_3(struct initio_host * host); +static int initio_state_4(struct initio_host * host); +static int initio_state_5(struct initio_host * host); +static int initio_state_6(struct initio_host * host); +static int initio_state_7(struct initio_host * host); +static int initio_xfer_data_in(struct initio_host * host); +static int initio_xfer_data_out(struct initio_host * host); +static int initio_xpad_in(struct initio_host * host); +static int initio_xpad_out(struct initio_host * host); +static int initio_status_msg(struct initio_host * host); + +static int initio_msgin(struct initio_host * host); +static int initio_msgin_sync(struct initio_host * host); +static int initio_msgin_accept(struct initio_host * host); +static int initio_msgout_reject(struct initio_host * host); +static int initio_msgin_extend(struct initio_host * host); + +static int initio_msgout_ide(struct initio_host * host); +static int initio_msgout_abort_targ(struct initio_host * host); +static int initio_msgout_abort_tag(struct initio_host * host); + +static int initio_bus_device_reset(struct initio_host * host); +static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb); +static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb); +static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb); +static int int_initio_busfree(struct initio_host * host); +static int int_initio_scsi_rst(struct initio_host * host); +static int int_initio_bad_seq(struct initio_host * host); +static int int_initio_resel(struct initio_host * host); +static int initio_sync_done(struct initio_host * host); +static int wdtr_done(struct initio_host * host); +static int wait_tulip(struct initio_host * host); +static int initio_wait_done_disc(struct initio_host * host); +static int initio_wait_disc(struct initio_host * host); +static void tulip_scsi(struct initio_host * host); +static int initio_post_scsi_rst(struct initio_host * host); + +static void initio_se2_ew_en(unsigned long base); +static void initio_se2_ew_ds(unsigned long base); +static int initio_se2_rd_all(unsigned long base); +static void initio_se2_update_all(unsigned long base); /* setup default pattern */ +static void initio_read_eeprom(unsigned long base); + +/* ---- INTERNAL VARIABLES ---- */ + +static NVRAM i91unvram; +static NVRAM *i91unvramp; + +static u8 i91udftNvRam[64] = +{ + /*----------- header -----------*/ + 0x25, 0xc9, /* Signature */ + 0x40, /* Size */ + 0x01, /* Revision */ + /* -- Host Adapter Structure -- */ + 0x95, /* ModelByte0 */ + 0x00, /* ModelByte1 */ + 0x00, /* ModelInfo */ + 0x01, /* NumOfCh */ + NBC1_DEFAULT, /* BIOSConfig1 */ + 0, /* BIOSConfig2 */ + 0, /* HAConfig1 */ + 0, /* HAConfig2 */ + /* SCSI channel 0 and target Structure */ + 7, /* SCSIid */ + NCC1_DEFAULT, /* SCSIconfig1 */ + 0, /* SCSIconfig2 */ + 0x10, /* NumSCSItarget */ + + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + + /* SCSI channel 1 and target Structure */ + 7, /* SCSIid */ + NCC1_DEFAULT, /* SCSIconfig1 */ + 0, /* SCSIconfig2 */ + 0x10, /* NumSCSItarget */ + + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0}; /* - CheckSum - */ + + +static u8 initio_rate_tbl[8] = /* fast 20 */ +{ + /* nanosecond divide by 4 */ + 12, /* 50ns, 20M */ + 18, /* 75ns, 13.3M */ + 25, /* 100ns, 10M */ + 31, /* 125ns, 8M */ + 37, /* 150ns, 6.6M */ + 43, /* 175ns, 5.7M */ + 50, /* 200ns, 5M */ + 62 /* 250ns, 4M */ +}; + +static void initio_do_pause(unsigned amount) +{ + /* Pause for amount jiffies */ + unsigned long the_time = jiffies + amount; + + while (time_before_eq(jiffies, the_time)) + cpu_relax(); +} + +/*-- forward reference --*/ + +/****************************************************************** + Input: instruction for Serial E2PROM + + EX: se2_rd(0 call se2_instr() to send address and read command + + StartBit OP_Code Address Data + --------- -------- ------------------ ------- + 1 1 , 0 A5,A4,A3,A2,A1,A0 D15-D0 + + +----------------------------------------------------- + | + CS -----+ + +--+ +--+ +--+ +--+ +--+ + ^ | ^ | ^ | ^ | ^ | + | | | | | | | | | | + CLK -------+ +--+ +--+ +--+ +--+ +-- + (leading edge trigger) + + +--1-----1--+ + | SB OP | OP A5 A4 + DI ----+ +--0------------------ + (address and cmd sent to nvram) + + -------------------------------------------+ + | + DO +--- + (data sent from nvram) + + +******************************************************************/ + +/** + * initio_se2_instr - bitbang an instruction + * @base: Base of InitIO controller + * @instr: Instruction for serial E2PROM + * + * Bitbang an instruction out to the serial E2Prom + */ + +static void initio_se2_instr(unsigned long base, u8 instr) +{ + int i; + u8 b; + + outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */ + udelay(30); + outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */ + udelay(30); + + for (i = 0; i < 8; i++) { + if (instr & 0x80) + b = SE2CS | SE2DO; /* -CLK+dataBit */ + else + b = SE2CS; /* -CLK */ + outb(b, base + TUL_NVRAM); + udelay(30); + outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */ + udelay(30); + instr <<= 1; + } + outb(SE2CS, base + TUL_NVRAM); /* -CLK */ + udelay(30); +} + + +/** + * initio_se2_ew_en - Enable erase/write + * @base: Base address of InitIO controller + * + * Enable erase/write state of serial EEPROM + */ +void initio_se2_ew_en(unsigned long base) +{ + initio_se2_instr(base, 0x30); /* EWEN */ + outb(0, base + TUL_NVRAM); /* -CS */ + udelay(30); +} + + +/** + * initio_se2_ew_ds - Disable erase/write + * @base: Base address of InitIO controller + * + * Disable erase/write state of serial EEPROM + */ +void initio_se2_ew_ds(unsigned long base) +{ + initio_se2_instr(base, 0); /* EWDS */ + outb(0, base + TUL_NVRAM); /* -CS */ + udelay(30); +} + + +/** + * initio_se2_rd - read E2PROM word + * @base: Base of InitIO controller + * @addr: Address of word in E2PROM + * + * Read a word from the NV E2PROM device + */ +static u16 initio_se2_rd(unsigned long base, u8 addr) +{ + u8 instr, rb; + u16 val = 0; + int i; + + instr = (u8) (addr | 0x80); + initio_se2_instr(base, instr); /* READ INSTR */ + + for (i = 15; i >= 0; i--) { + outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */ + udelay(30); + outb(SE2CS, base + TUL_NVRAM); /* -CLK */ + + /* sample data after the following edge of clock */ + rb = inb(base + TUL_NVRAM); + rb &= SE2DI; + val += (rb << i); + udelay(30); /* 6/20/95 */ + } + + outb(0, base + TUL_NVRAM); /* no chip select */ + udelay(30); + return val; +} + +/** + * initio_se2_wr - read E2PROM word + * @base: Base of InitIO controller + * @addr: Address of word in E2PROM + * @val: Value to write + * + * Write a word to the NV E2PROM device. Used when recovering from + * a problem with the NV. + */ +static void initio_se2_wr(unsigned long base, u8 addr, u16 val) +{ + u8 rb; + u8 instr; + int i; + + instr = (u8) (addr | 0x40); + initio_se2_instr(base, instr); /* WRITE INSTR */ + for (i = 15; i >= 0; i--) { + if (val & 0x8000) + outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */ + else + outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */ + udelay(30); + outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */ + udelay(30); + val <<= 1; + } + outb(SE2CS, base + TUL_NVRAM); /* -CLK */ + udelay(30); + outb(0, base + TUL_NVRAM); /* -CS */ + udelay(30); + + outb(SE2CS, base + TUL_NVRAM); /* +CS */ + udelay(30); + + for (;;) { + outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */ + udelay(30); + outb(SE2CS, base + TUL_NVRAM); /* -CLK */ + udelay(30); + if ((rb = inb(base + TUL_NVRAM)) & SE2DI) + break; /* write complete */ + } + outb(0, base + TUL_NVRAM); /* -CS */ +} + +/** + * initio_se2_rd_all - read hostadapter NV configuration + * @base: Base address of InitIO controller + * + * Reads the E2PROM data into main memory. Ensures that the checksum + * and header marker are valid. Returns 1 on success -1 on error. + */ + +static int initio_se2_rd_all(unsigned long base) +{ + int i; + u16 chksum = 0; + u16 *np; + + i91unvramp = &i91unvram; + np = (u16 *) i91unvramp; + for (i = 0; i < 32; i++) + *np++ = initio_se2_rd(base, i); + + /* Is signature "ini" ok ? */ + if (i91unvramp->NVM_Signature != INI_SIGNATURE) + return -1; + /* Is ckecksum ok ? */ + np = (u16 *) i91unvramp; + for (i = 0; i < 31; i++) + chksum += *np++; + if (i91unvramp->NVM_CheckSum != chksum) + return -1; + return 1; +} + +/** + * initio_se2_update_all - Update E2PROM + * @base: Base of InitIO controller + * + * Update the E2PROM by wrting any changes into the E2PROM + * chip, rewriting the checksum. + */ +static void initio_se2_update_all(unsigned long base) +{ /* setup default pattern */ + int i; + u16 chksum = 0; + u16 *np, *np1; + + i91unvramp = &i91unvram; + /* Calculate checksum first */ + np = (u16 *) i91udftNvRam; + for (i = 0; i < 31; i++) + chksum += *np++; + *np = chksum; + initio_se2_ew_en(base); /* Enable write */ + + np = (u16 *) i91udftNvRam; + np1 = (u16 *) i91unvramp; + for (i = 0; i < 32; i++, np++, np1++) { + if (*np != *np1) + initio_se2_wr(base, i, *np); + } + initio_se2_ew_ds(base); /* Disable write */ +} + +/** + * initio_read_eeprom - Retrieve configuration + * @base: Base of InitIO Host Adapter + * + * Retrieve the host adapter configuration data from E2Prom. If the + * data is invalid then the defaults are used and are also restored + * into the E2PROM. This forms the access point for the SCSI driver + * into the E2PROM layer, the other functions for the E2PROM are all + * internal use. + * + * Must be called single threaded, uses a shared global area. + */ + +static void initio_read_eeprom(unsigned long base) +{ + u8 gctrl; + + i91unvramp = &i91unvram; + /* Enable EEProm programming */ + gctrl = inb(base + TUL_GCTRL); + outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL); + if (initio_se2_rd_all(base) != 1) { + initio_se2_update_all(base); /* setup default pattern */ + initio_se2_rd_all(base); /* load again */ + } + /* Disable EEProm programming */ + gctrl = inb(base + TUL_GCTRL); + outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL); +} + +/** + * initio_stop_bm - stop bus master + * @host: InitIO we are stopping + * + * Stop any pending DMA operation, aborting the DMA if necessary + */ + +static void initio_stop_bm(struct initio_host * host) +{ + + if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ + outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd); + /* wait Abort DMA xfer done */ + while ((inb(host->addr + TUL_Int) & XABT) == 0) + cpu_relax(); + } + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); +} + +/** + * initio_reset_scsi - Reset SCSI host controller + * @host: InitIO host to reset + * @seconds: Recovery time + * + * Perform a full reset of the SCSI subsystem. + */ + +static int initio_reset_scsi(struct initio_host * host, int seconds) +{ + outb(TSC_RST_BUS, host->addr + TUL_SCtrl0); + + while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT)) + cpu_relax(); + + /* reset tulip chip */ + outb(0, host->addr + TUL_SSignal); + + /* Stall for a while, wait for target's firmware ready,make it 2 sec ! */ + /* SONY 5200 tape drive won't work if only stall for 1 sec */ + /* FIXME: this is a very long busy wait right now */ + initio_do_pause(seconds * HZ); + + inb(host->addr + TUL_SInt); + return SCSI_RESET_SUCCESS; +} + +/** + * initio_init - set up an InitIO host adapter + * @host: InitIO host adapter + * @bios_addr: BIOS address + * + * Set up the host adapter and devices according to the configuration + * retrieved from the E2PROM. + * + * Locking: Calls E2PROM layer code which is not re-enterable so must + * run single threaded for now. + */ + +static void initio_init(struct initio_host * host, u8 *bios_addr) +{ + int i; + u8 *flags; + u8 *heads; + + /* Get E2Prom configuration */ + initio_read_eeprom(host->addr); + if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8) + host->max_tar = 8; + else + host->max_tar = 16; + + host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1; + + host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID; + host->idmask = ~(1 << host->scsi_id); + +#ifdef CHK_PARITY + /* Enable parity error response */ + outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD); +#endif + + /* Mask all the interrupt */ + outb(0x1F, host->addr + TUL_Mask); + + initio_stop_bm(host); + /* --- Initialize the tulip --- */ + outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0); + + /* program HBA's SCSI ID */ + outb(host->scsi_id << 4, host->addr + TUL_SScsiId); + + /* Enable Initiator Mode ,phase latch,alternate sync period mode, + disable SCSI reset */ + if (host->config & HCC_EN_PAR) + host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR); + else + host->sconf1 = (TSC_INITDEFAULT); + outb(host->sconf1, host->addr + TUL_SConfig); + + /* Enable HW reselect */ + outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); + + outb(0, host->addr + TUL_SPeriod); + + /* selection time out = 250 ms */ + outb(153, host->addr + TUL_STimeOut); + + /* Enable SCSI terminator */ + outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)), + host->addr + TUL_XCtrl); + outb(((host->config & HCC_AUTO_TERM) >> 4) | + (inb(host->addr + TUL_GCTRL1) & 0xFE), + host->addr + TUL_GCTRL1); + + for (i = 0, + flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config), + heads = bios_addr + 0x180; + i < host->max_tar; + i++, flags++) { + host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE); + if (host->targets[i].flags & TCF_EN_255) + host->targets[i].drv_flags = TCF_DRV_255_63; + else + host->targets[i].drv_flags = 0; + host->targets[i].js_period = 0; + host->targets[i].sconfig0 = host->sconf1; + host->targets[i].heads = *heads++; + if (host->targets[i].heads == 255) + host->targets[i].drv_flags = TCF_DRV_255_63; + else + host->targets[i].drv_flags = 0; + host->targets[i].sectors = *heads++; + host->targets[i].flags &= ~TCF_BUSY; + host->act_tags[i] = 0; + host->max_tags[i] = 0xFF; + } /* for */ + printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n", + host->addr, host->pci_dev->irq, + host->bios_addr, host->scsi_id); + /* Reset SCSI Bus */ + if (host->config & HCC_SCSI_RESET) { + printk(KERN_INFO "i91u: Reset SCSI Bus ... \n"); + initio_reset_scsi(host, 10); + } + outb(0x17, host->addr + TUL_SCFG1); + outb(0xE9, host->addr + TUL_SIntEnable); +} + +/** + * initio_alloc_scb - Allocate an SCB + * @host: InitIO host we are allocating for + * + * Walk the SCB list for the controller and allocate a free SCB if + * one exists. + */ +static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host) +{ + struct scsi_ctrl_blk *scb; + unsigned long flags; + + spin_lock_irqsave(&host->avail_lock, flags); + if ((scb = host->first_avail) != NULL) { +#if DEBUG_QUEUE + printk("find scb at %p\n", scb); +#endif + if ((host->first_avail = scb->next) == NULL) + host->last_avail = NULL; + scb->next = NULL; + scb->status = SCB_RENT; + } + spin_unlock_irqrestore(&host->avail_lock, flags); + return scb; +} + +/** + * initio_release_scb - Release an SCB + * @host: InitIO host that owns the SCB + * @cmnd: SCB command block being returned + * + * Return an allocated SCB to the host free list + */ + +static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd) +{ + unsigned long flags; + +#if DEBUG_QUEUE + printk("Release SCB %p; ", cmnd); +#endif + spin_lock_irqsave(&(host->avail_lock), flags); + cmnd->srb = NULL; + cmnd->status = 0; + cmnd->next = NULL; + if (host->last_avail != NULL) { + host->last_avail->next = cmnd; + host->last_avail = cmnd; + } else { + host->first_avail = cmnd; + host->last_avail = cmnd; + } + spin_unlock_irqrestore(&(host->avail_lock), flags); +} + +/***************************************************************************/ +static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) +{ + +#if DEBUG_QUEUE + printk("Append pend SCB %p; ", scbp); +#endif + scbp->status = SCB_PEND; + scbp->next = NULL; + if (host->last_pending != NULL) { + host->last_pending->next = scbp; + host->last_pending = scbp; + } else { + host->first_pending = scbp; + host->last_pending = scbp; + } +} + +/***************************************************************************/ +static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) +{ + +#if DEBUG_QUEUE + printk("Push pend SCB %p; ", scbp); +#endif + scbp->status = SCB_PEND; + if ((scbp->next = host->first_pending) != NULL) { + host->first_pending = scbp; + } else { + host->first_pending = scbp; + host->last_pending = scbp; + } +} + +static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host) +{ + struct scsi_ctrl_blk *first; + + + first = host->first_pending; + while (first != NULL) { + if (first->opcode != ExecSCSI) + return first; + if (first->tagmsg == 0) { + if ((host->act_tags[first->target] == 0) && + !(host->targets[first->target].flags & TCF_BUSY)) + return first; + } else { + if ((host->act_tags[first->target] >= + host->max_tags[first->target]) | + (host->targets[first->target].flags & TCF_BUSY)) { + first = first->next; + continue; + } + return first; + } + first = first->next; + } + return first; +} + +static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) +{ + struct scsi_ctrl_blk *tmp, *prev; + +#if DEBUG_QUEUE + printk("unlink pend SCB %p; ", scb); +#endif + + prev = tmp = host->first_pending; + while (tmp != NULL) { + if (scb == tmp) { /* Unlink this SCB */ + if (tmp == host->first_pending) { + if ((host->first_pending = tmp->next) == NULL) + host->last_pending = NULL; + } else { + prev->next = tmp->next; + if (tmp == host->last_pending) + host->last_pending = prev; + } + tmp->next = NULL; + break; + } + prev = tmp; + tmp = tmp->next; + } +} + +static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) +{ + +#if DEBUG_QUEUE + printk("append busy SCB %p; ", scbp); +#endif + if (scbp->tagmsg) + host->act_tags[scbp->target]++; + else + host->targets[scbp->target].flags |= TCF_BUSY; + scbp->status = SCB_BUSY; + scbp->next = NULL; + if (host->last_busy != NULL) { + host->last_busy->next = scbp; + host->last_busy = scbp; + } else { + host->first_busy = scbp; + host->last_busy = scbp; + } +} + +/***************************************************************************/ +static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host) +{ + struct scsi_ctrl_blk *tmp; + + + if ((tmp = host->first_busy) != NULL) { + if ((host->first_busy = tmp->next) == NULL) + host->last_busy = NULL; + tmp->next = NULL; + if (tmp->tagmsg) + host->act_tags[tmp->target]--; + else + host->targets[tmp->target].flags &= ~TCF_BUSY; + } +#if DEBUG_QUEUE + printk("Pop busy SCB %p; ", tmp); +#endif + return tmp; +} + +/***************************************************************************/ +static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) +{ + struct scsi_ctrl_blk *tmp, *prev; + +#if DEBUG_QUEUE + printk("unlink busy SCB %p; ", scb); +#endif + + prev = tmp = host->first_busy; + while (tmp != NULL) { + if (scb == tmp) { /* Unlink this SCB */ + if (tmp == host->first_busy) { + if ((host->first_busy = tmp->next) == NULL) + host->last_busy = NULL; + } else { + prev->next = tmp->next; + if (tmp == host->last_busy) + host->last_busy = prev; + } + tmp->next = NULL; + if (tmp->tagmsg) + host->act_tags[tmp->target]--; + else + host->targets[tmp->target].flags &= ~TCF_BUSY; + break; + } + prev = tmp; + tmp = tmp->next; + } + return; +} + +struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun) +{ + struct scsi_ctrl_blk *tmp; + u16 scbp_tarlun; + + + tmp = host->first_busy; + while (tmp != NULL) { + scbp_tarlun = (tmp->lun << 8) | (tmp->target); + if (scbp_tarlun == tarlun) { /* Unlink this SCB */ + break; + } + tmp = tmp->next; + } +#if DEBUG_QUEUE + printk("find busy SCB %p; ", tmp); +#endif + return tmp; +} + +static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) +{ +#if DEBUG_QUEUE + printk("append done SCB %p; ", scbp); +#endif + + scbp->status = SCB_DONE; + scbp->next = NULL; + if (host->last_done != NULL) { + host->last_done->next = scbp; + host->last_done = scbp; + } else { + host->first_done = scbp; + host->last_done = scbp; + } +} + +struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host) +{ + struct scsi_ctrl_blk *tmp; + + if ((tmp = host->first_done) != NULL) { + if ((host->first_done = tmp->next) == NULL) + host->last_done = NULL; + tmp->next = NULL; + } +#if DEBUG_QUEUE + printk("find done SCB %p; ",tmp); +#endif + return tmp; +} + +static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp) +{ + unsigned long flags; + struct scsi_ctrl_blk *tmp, *prev; + + spin_lock_irqsave(&host->semaph_lock, flags); + + if ((host->semaph == 0) && (host->active == NULL)) { + /* disable Jasmin SCSI Int */ + outb(0x1F, host->addr + TUL_Mask); + spin_unlock_irqrestore(&host->semaph_lock, flags); + /* FIXME: synchronize_irq needed ? */ + tulip_main(host); + spin_lock_irqsave(&host->semaph_lock, flags); + host->semaph = 1; + outb(0x0F, host->addr + TUL_Mask); + spin_unlock_irqrestore(&host->semaph_lock, flags); + return SCSI_ABORT_SNOOZE; + } + prev = tmp = host->first_pending; /* Check Pend queue */ + while (tmp != NULL) { + /* 07/27/98 */ + if (tmp->srb == srbp) { + if (tmp == host->active) { + spin_unlock_irqrestore(&host->semaph_lock, flags); + return SCSI_ABORT_BUSY; + } else if (tmp == host->first_pending) { + if ((host->first_pending = tmp->next) == NULL) + host->last_pending = NULL; + } else { + prev->next = tmp->next; + if (tmp == host->last_pending) + host->last_pending = prev; + } + tmp->hastat = HOST_ABORTED; + tmp->flags |= SCF_DONE; + if (tmp->flags & SCF_POST) + (*tmp->post) ((u8 *) host, (u8 *) tmp); + spin_unlock_irqrestore(&host->semaph_lock, flags); + return SCSI_ABORT_SUCCESS; + } + prev = tmp; + tmp = tmp->next; + } + + prev = tmp = host->first_busy; /* Check Busy queue */ + while (tmp != NULL) { + if (tmp->srb == srbp) { + if (tmp == host->active) { + spin_unlock_irqrestore(&host->semaph_lock, flags); + return SCSI_ABORT_BUSY; + } else if (tmp->tagmsg == 0) { + spin_unlock_irqrestore(&host->semaph_lock, flags); + return SCSI_ABORT_BUSY; + } else { + host->act_tags[tmp->target]--; + if (tmp == host->first_busy) { + if ((host->first_busy = tmp->next) == NULL) + host->last_busy = NULL; + } else { + prev->next = tmp->next; + if (tmp == host->last_busy) + host->last_busy = prev; + } + tmp->next = NULL; + + + tmp->hastat = HOST_ABORTED; + tmp->flags |= SCF_DONE; + if (tmp->flags & SCF_POST) + (*tmp->post) ((u8 *) host, (u8 *) tmp); + spin_unlock_irqrestore(&host->semaph_lock, flags); + return SCSI_ABORT_SUCCESS; + } + } + prev = tmp; + tmp = tmp->next; + } + spin_unlock_irqrestore(&host->semaph_lock, flags); + return SCSI_ABORT_NOT_RUNNING; +} + +/***************************************************************************/ +static int initio_bad_seq(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb; + + printk("initio_bad_seg c=%d\n", host->index); + + if ((scb = host->active) != NULL) { + initio_unlink_busy_scb(host, scb); + scb->hastat = HOST_BAD_PHAS; + scb->tastat = 0; + initio_append_done_scb(host, scb); + } + initio_stop_bm(host); + initio_reset_scsi(host, 8); /* 7/29/98 */ + return initio_post_scsi_rst(host); +} + + +/************************************************************************/ +static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) +{ + unsigned long flags; + + scb->mode = 0; + + scb->sgidx = 0; + scb->sgmax = scb->sglen; + + spin_lock_irqsave(&host->semaph_lock, flags); + + initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */ + +/* VVVVV 07/21/98 */ + if (host->semaph == 1) { + /* Disable Jasmin SCSI Int */ + outb(0x1F, host->addr + TUL_Mask); + host->semaph = 0; + spin_unlock_irqrestore(&host->semaph_lock, flags); + + tulip_main(host); + + spin_lock_irqsave(&host->semaph_lock, flags); + host->semaph = 1; + outb(0x0F, host->addr + TUL_Mask); + } + spin_unlock_irqrestore(&host->semaph_lock, flags); + return; +} + +/***************************************************************************/ +static int initio_isr(struct initio_host * host) +{ + if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) { + if (host->semaph == 1) { + outb(0x1F, host->addr + TUL_Mask); + /* Disable Tulip SCSI Int */ + host->semaph = 0; + + tulip_main(host); + + host->semaph = 1; + outb(0x0F, host->addr + TUL_Mask); + return 1; + } + } + return 0; +} + +static int tulip_main(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb; + + for (;;) { + tulip_scsi(host); /* Call tulip_scsi */ + + /* Walk the list of completed SCBs */ + while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */ + if (scb->tastat == INI_QUEUE_FULL) { + host->max_tags[scb->target] = + host->act_tags[scb->target] - 1; + scb->tastat = 0; + initio_append_pend_scb(host, scb); + continue; + } + if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */ + if (scb->tastat == 2) { + + /* clr sync. nego flag */ + + if (scb->flags & SCF_SENSE) { + u8 len; + len = scb->senselen; + if (len == 0) + len = 1; + scb->buflen = scb->senselen; + scb->bufptr = scb->senseptr; + scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */ + /* so, we won't report wrong direction in xfer_data_in, + and won't report HOST_DO_DU in state_6 */ + scb->mode = SCM_RSENS; + scb->ident &= 0xBF; /* Disable Disconnect */ + scb->tagmsg = 0; + scb->tastat = 0; + scb->cdblen = 6; + scb->cdb[0] = SCSICMD_RequestSense; + scb->cdb[1] = 0; + scb->cdb[2] = 0; + scb->cdb[3] = 0; + scb->cdb[4] = len; + scb->cdb[5] = 0; + initio_push_pend_scb(host, scb); + break; + } + } + } else { /* in request sense mode */ + + if (scb->tastat == 2) { /* check contition status again after sending + requset sense cmd 0x3 */ + scb->hastat = HOST_BAD_PHAS; + } + scb->tastat = 2; + } + scb->flags |= SCF_DONE; + if (scb->flags & SCF_POST) { + /* FIXME: only one post method and lose casts */ + (*scb->post) ((u8 *) host, (u8 *) scb); + } + } /* while */ + /* find_active: */ + if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING) + continue; + if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */ + return 1; /* return to OS, enable interrupt */ + /* Check pending SCB */ + if (initio_find_first_pend_scb(host) == NULL) + return 1; /* return to OS, enable interrupt */ + } /* End of for loop */ + /* statement won't reach here */ +} + +static void tulip_scsi(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb; + struct target_control *active_tc; + + /* make sure to service interrupt asap */ + if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) { + host->phase = host->jsstatus0 & TSS_PH_MASK; + host->jsstatus1 = inb(host->addr + TUL_SStatus1); + host->jsint = inb(host->addr + TUL_SInt); + if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */ + int_initio_scsi_rst(host); + return; + } + if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */ + if (int_initio_resel(host) == 0) + initio_next_state(host); + return; + } + if (host->jsint & TSS_SEL_TIMEOUT) { + int_initio_busfree(host); + return; + } + if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ + int_initio_busfree(host); /* unexpected bus free or sel timeout */ + return; + } + if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */ + if (host->active) + initio_next_state(host); + return; + } + } + if (host->active != NULL) + return; + + if ((scb = initio_find_first_pend_scb(host)) == NULL) + return; + + /* program HBA's SCSI ID & target SCSI ID */ + outb((host->scsi_id << 4) | (scb->target & 0x0F), + host->addr + TUL_SScsiId); + if (scb->opcode == ExecSCSI) { + active_tc = &host->targets[scb->target]; + + if (scb->tagmsg) + active_tc->drv_flags |= TCF_DRV_EN_TAG; + else + active_tc->drv_flags &= ~TCF_DRV_EN_TAG; + + outb(active_tc->js_period, host->addr + TUL_SPeriod); + if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */ + initio_select_atn_stop(host, scb); + } else { + if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */ + initio_select_atn_stop(host, scb); + } else { + if (scb->tagmsg) + initio_select_atn3(host, scb); + else + initio_select_atn(host, scb); + } + } + if (scb->flags & SCF_POLL) { + while (wait_tulip(host) != -1) { + if (initio_next_state(host) == -1) + break; + } + } + } else if (scb->opcode == BusDevRst) { + initio_select_atn_stop(host, scb); + scb->next_state = 8; + if (scb->flags & SCF_POLL) { + while (wait_tulip(host) != -1) { + if (initio_next_state(host) == -1) + break; + } + } + } else if (scb->opcode == AbortCmd) { + if (initio_abort_srb(host, scb->srb) != 0) { + initio_unlink_pend_scb(host, scb); + initio_release_scb(host, scb); + } else { + scb->opcode = BusDevRst; + initio_select_atn_stop(host, scb); + scb->next_state = 8; + } + } else { + initio_unlink_pend_scb(host, scb); + scb->hastat = 0x16; /* bad command */ + initio_append_done_scb(host, scb); + } + return; +} + +/** + * initio_next_state - Next SCSI state + * @host: InitIO host we are processing + * + * Progress the active command block along the state machine + * until we hit a state which we must wait for activity to occur. + * + * Returns zero or a negative code. + */ + +static int initio_next_state(struct initio_host * host) +{ + int next; + + next = host->active->next_state; + for (;;) { + switch (next) { + case 1: + next = initio_state_1(host); + break; + case 2: + next = initio_state_2(host); + break; + case 3: + next = initio_state_3(host); + break; + case 4: + next = initio_state_4(host); + break; + case 5: + next = initio_state_5(host); + break; + case 6: + next = initio_state_6(host); + break; + case 7: + next = initio_state_7(host); + break; + case 8: + return initio_bus_device_reset(host); + default: + return initio_bad_seq(host); + } + if (next <= 0) + return next; + } +} + + +/** + * initio_state_1 - SCSI state machine + * @host: InitIO host we are controlling + * + * Perform SCSI state processing for Select/Attention/Stop + */ + +static int initio_state_1(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + struct target_control *active_tc = host->active_tc; +#if DEBUG_STATE + printk("-s1-"); +#endif + + /* Move the SCB from pending to busy */ + initio_unlink_pend_scb(host, scb); + initio_append_busy_scb(host, scb); + + outb(active_tc->sconfig0, host->addr + TUL_SConfig ); + /* ATN on */ + if (host->phase == MSG_OUT) { + outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1); + outb(scb->ident, host->addr + TUL_SFifo); + + if (scb->tagmsg) { + outb(scb->tagmsg, host->addr + TUL_SFifo); + outb(scb->tagid, host->addr + TUL_SFifo); + } + if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { + active_tc->flags |= TCF_WDTR_DONE; + outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); + outb(2, host->addr + TUL_SFifo); /* Extended msg length */ + outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* Sync request */ + outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */ + } else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { + active_tc->flags |= TCF_SYNC_DONE; + outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); + outb(3, host->addr + TUL_SFifo); /* extended msg length */ + outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */ + outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo); + outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */ + } + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + } + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal); + /* Into before CDB xfer */ + return 3; +} + + +/** + * initio_state_2 - SCSI state machine + * @host: InitIO host we are controlling + * + * state after selection with attention + * state after selection with attention3 + */ + +static int initio_state_2(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + struct target_control *active_tc = host->active_tc; +#if DEBUG_STATE + printk("-s2-"); +#endif + + initio_unlink_pend_scb(host, scb); + initio_append_busy_scb(host, scb); + + outb(active_tc->sconfig0, host->addr + TUL_SConfig); + + if (host->jsstatus1 & TSS_CMD_PH_CMP) + return 4; + + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal); + /* Into before CDB xfer */ + return 3; +} + +/** + * initio_state_3 - SCSI state machine + * @host: InitIO host we are controlling + * + * state before CDB xfer is done + */ + +static int initio_state_3(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + struct target_control *active_tc = host->active_tc; + int i; + +#if DEBUG_STATE + printk("-s3-"); +#endif + for (;;) { + switch (host->phase) { + case CMD_OUT: /* Command out phase */ + for (i = 0; i < (int) scb->cdblen; i++) + outb(scb->cdb[i], host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + if (host->phase == CMD_OUT) + return initio_bad_seq(host); + return 4; + + case MSG_IN: /* Message in phase */ + scb->next_state = 3; + if (initio_msgin(host) == -1) + return -1; + break; + + case STATUS_IN: /* Status phase */ + if (initio_status_msg(host) == -1) + return -1; + break; + + case MSG_OUT: /* Message out phase */ + if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) { + outb(NOP, host->addr + TUL_SFifo); /* msg nop */ + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + } else { + active_tc->flags |= TCF_SYNC_DONE; + + outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); + outb(3, host->addr + TUL_SFifo); /* ext. msg len */ + outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */ + outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo); + outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */ + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal); + + } + break; + default: + return initio_bad_seq(host); + } + } +} + +/** + * initio_state_4 - SCSI state machine + * @host: InitIO host we are controlling + * + * SCSI state machine. State 4 + */ + +static int initio_state_4(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + +#if DEBUG_STATE + printk("-s4-"); +#endif + if ((scb->flags & SCF_DIR) == SCF_NO_XF) { + return 6; /* Go to state 6 (After data) */ + } + for (;;) { + if (scb->buflen == 0) + return 6; + + switch (host->phase) { + + case STATUS_IN: /* Status phase */ + if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */ + scb->hastat = HOST_DO_DU; + if ((initio_status_msg(host)) == -1) + return -1; + break; + + case MSG_IN: /* Message in phase */ + scb->next_state = 0x4; + if (initio_msgin(host) == -1) + return -1; + break; + + case MSG_OUT: /* Message out phase */ + if (host->jsstatus0 & TSS_PAR_ERROR) { + scb->buflen = 0; + scb->hastat = HOST_DO_DU; + if (initio_msgout_ide(host) == -1) + return -1; + return 6; + } else { + outb(NOP, host->addr + TUL_SFifo); /* msg nop */ + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + } + break; + + case DATA_IN: /* Data in phase */ + return initio_xfer_data_in(host); + + case DATA_OUT: /* Data out phase */ + return initio_xfer_data_out(host); + + default: + return initio_bad_seq(host); + } + } +} + + +/** + * initio_state_5 - SCSI state machine + * @host: InitIO host we are controlling + * + * State after dma xfer done or phase change before xfer done + */ + +static int initio_state_5(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */ + +#if DEBUG_STATE + printk("-s5-"); +#endif + /*------ get remaining count -------*/ + cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF; + + if (inb(host->addr + TUL_XCmd) & 0x20) { + /* ----------------------- DATA_IN ----------------------------- */ + /* check scsi parity error */ + if (host->jsstatus0 & TSS_PAR_ERROR) + scb->hastat = HOST_DO_DU; + if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */ + /* tell Hardware scsi xfer has been terminated */ + outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl); + /* wait until DMA xfer not pending */ + while (inb(host->addr + TUL_XStatus) & XPEND) + cpu_relax(); + } + } else { + /*-------- DATA OUT -----------*/ + if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) { + if (host->active_tc->js_period & TSC_WIDE_SCSI) + cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1; + else + cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F); + } + if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ + outb(TAX_X_ABT, host->addr + TUL_XCmd); + /* wait Abort DMA xfer done */ + while ((inb(host->addr + TUL_Int) & XABT) == 0) + cpu_relax(); + } + if ((cnt == 1) && (host->phase == DATA_OUT)) { + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + cnt = 0; + } else { + if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + } + } + if (cnt == 0) { + scb->buflen = 0; + return 6; /* After Data */ + } + /* Update active data pointer */ + xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */ + scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */ + if (scb->flags & SCF_SG) { + struct sg_entry *sgp; + unsigned long i; + + sgp = &scb->sglist[scb->sgidx]; + for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) { + xcnt -= (long) sgp->len; + if (xcnt < 0) { /* this sgp xfer half done */ + xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */ + sgp->data += (u32) xcnt; /* new ptr to be xfer */ + sgp->len -= (u32) xcnt; /* new len to be xfer */ + scb->bufptr += ((u32) (i - scb->sgidx) << 3); + /* new SG table ptr */ + scb->sglen = (u8) (scb->sgmax - i); + /* new SG table len */ + scb->sgidx = (u16) i; + /* for next disc and come in this loop */ + return 4; /* Go to state 4 */ + } + /* else (xcnt >= 0 , i.e. this sgp already xferred */ + } /* for */ + return 6; /* Go to state 6 */ + } else { + scb->bufptr += (u32) xcnt; + } + return 4; /* Go to state 4 */ +} + +/** + * initio_state_6 - SCSI state machine + * @host: InitIO host we are controlling + * + * State after Data phase + */ + +static int initio_state_6(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + +#if DEBUG_STATE + printk("-s6-"); +#endif + for (;;) { + switch (host->phase) { + case STATUS_IN: /* Status phase */ + if ((initio_status_msg(host)) == -1) + return -1; + break; + + case MSG_IN: /* Message in phase */ + scb->next_state = 6; + if ((initio_msgin(host)) == -1) + return -1; + break; + + case MSG_OUT: /* Message out phase */ + outb(NOP, host->addr + TUL_SFifo); /* msg nop */ + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + break; + + case DATA_IN: /* Data in phase */ + return initio_xpad_in(host); + + case DATA_OUT: /* Data out phase */ + return initio_xpad_out(host); + + default: + return initio_bad_seq(host); + } + } +} + +/** + * initio_state_7 - SCSI state machine + * @host: InitIO host we are controlling + * + */ + +static int initio_state_7(struct initio_host * host) +{ + int cnt, i; + +#if DEBUG_STATE + printk("-s7-"); +#endif + /* flush SCSI FIFO */ + cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F; + if (cnt) { + for (i = 0; i < cnt; i++) + inb(host->addr + TUL_SFifo); + } + switch (host->phase) { + case DATA_IN: /* Data in phase */ + case DATA_OUT: /* Data out phase */ + return initio_bad_seq(host); + default: + return 6; /* Go to state 6 */ + } +} + +/** + * initio_xfer_data_in - Commence data input + * @host: InitIO host in use + * + * Commence a block of data transfer. The transfer itself will + * be managed by the controller and we will get a completion (or + * failure) interrupt. + */ +static int initio_xfer_data_in(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + + if ((scb->flags & SCF_DIR) == SCF_DOUT) + return 6; /* wrong direction */ + + outl(scb->buflen, host->addr + TUL_SCnt0); + outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */ + + if (scb->flags & SCF_SG) { /* S/G xfer */ + outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH); + outl(scb->bufptr, host->addr + TUL_XAddH); + outb(TAX_SG_IN, host->addr + TUL_XCmd); + } else { + outl(scb->buflen, host->addr + TUL_XCntH); + outl(scb->bufptr, host->addr + TUL_XAddH); + outb(TAX_X_IN, host->addr + TUL_XCmd); + } + scb->next_state = 0x5; + return 0; /* return to OS, wait xfer done , let jas_isr come in */ +} + +/** + * initio_xfer_data_out - Commence data output + * @host: InitIO host in use + * + * Commence a block of data transfer. The transfer itself will + * be managed by the controller and we will get a completion (or + * failure) interrupt. + */ + +static int initio_xfer_data_out(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + + if ((scb->flags & SCF_DIR) == SCF_DIN) + return 6; /* wrong direction */ + + outl(scb->buflen, host->addr + TUL_SCnt0); + outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd); + + if (scb->flags & SCF_SG) { /* S/G xfer */ + outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH); + outl(scb->bufptr, host->addr + TUL_XAddH); + outb(TAX_SG_OUT, host->addr + TUL_XCmd); + } else { + outl(scb->buflen, host->addr + TUL_XCntH); + outl(scb->bufptr, host->addr + TUL_XAddH); + outb(TAX_X_OUT, host->addr + TUL_XCmd); + } + + scb->next_state = 0x5; + return 0; /* return to OS, wait xfer done , let jas_isr come in */ +} + +int initio_xpad_in(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + struct target_control *active_tc = host->active_tc; + + if ((scb->flags & SCF_DIR) != SCF_NO_DCHK) + scb->hastat = HOST_DO_DU; /* over run */ + for (;;) { + if (active_tc->js_period & TSC_WIDE_SCSI) + outl(2, host->addr + TUL_SCnt0); + else + outl(1, host->addr + TUL_SCnt0); + + outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + if (host->phase != DATA_IN) { + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + return 6; + } + inb(host->addr + TUL_SFifo); + } +} + +int initio_xpad_out(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + struct target_control *active_tc = host->active_tc; + + if ((scb->flags & SCF_DIR) != SCF_NO_DCHK) + scb->hastat = HOST_DO_DU; /* over run */ + for (;;) { + if (active_tc->js_period & TSC_WIDE_SCSI) + outl(2, host->addr + TUL_SCnt0); + else + outl(1, host->addr + TUL_SCnt0); + + outb(0, host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + if ((wait_tulip(host)) == -1) + return -1; + if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */ + outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + return 6; + } + } +} + +int initio_status_msg(struct initio_host * host) +{ /* status & MSG_IN */ + struct scsi_ctrl_blk *scb = host->active; + u8 msg; + + outb(TSC_CMD_COMP, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + + /* get status */ + scb->tastat = inb(host->addr + TUL_SFifo); + + if (host->phase == MSG_OUT) { + if (host->jsstatus0 & TSS_PAR_ERROR) + outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo); + else + outb(NOP, host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + return wait_tulip(host); + } + if (host->phase == MSG_IN) { + msg = inb(host->addr + TUL_SFifo); + if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */ + if ((initio_msgin_accept(host)) == -1) + return -1; + if (host->phase != MSG_OUT) + return initio_bad_seq(host); + outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + return wait_tulip(host); + } + if (msg == 0) { /* Command complete */ + + if ((scb->tastat & 0x18) == 0x10) /* No link support */ + return initio_bad_seq(host); + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); + return initio_wait_done_disc(host); + + } + if (msg == LINKED_CMD_COMPLETE || + msg == LINKED_FLG_CMD_COMPLETE) { + if ((scb->tastat & 0x18) == 0x10) + return initio_msgin_accept(host); + } + } + return initio_bad_seq(host); +} + + +/* scsi bus free */ +int int_initio_busfree(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + + if (scb != NULL) { + if (scb->status & SCB_SELECT) { /* selection timeout */ + initio_unlink_pend_scb(host, scb); + scb->hastat = HOST_SEL_TOUT; + initio_append_done_scb(host, scb); + } else { /* Unexpected bus free */ + initio_unlink_busy_scb(host, scb); + scb->hastat = HOST_BUS_FREE; + initio_append_done_scb(host, scb); + } + host->active = NULL; + host->active_tc = NULL; + } + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ + outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); + outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ + return -1; +} + + +/** + * int_initio_scsi_rst - SCSI reset occurred + * @host: Host seeing the reset + * + * A SCSI bus reset has occurred. Clean up any pending transfer + * the hardware is doing by DMA and then abort all active and + * disconnected commands. The mid layer should sort the rest out + * for us + */ + +static int int_initio_scsi_rst(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb; + int i; + + /* if DMA xfer is pending, abort DMA xfer */ + if (inb(host->addr + TUL_XStatus) & 0x01) { + outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd); + /* wait Abort DMA xfer done */ + while ((inb(host->addr + TUL_Int) & 0x04) == 0) + cpu_relax(); + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + } + /* Abort all active & disconnected scb */ + while ((scb = initio_pop_busy_scb(host)) != NULL) { + scb->hastat = HOST_BAD_PHAS; + initio_append_done_scb(host, scb); + } + host->active = NULL; + host->active_tc = NULL; + + /* clr sync nego. done flag */ + for (i = 0; i < host->max_tar; i++) + host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); + return -1; +} + +/** + * int_initio_resel - Reselection occurred + * @host: InitIO host adapter + * + * A SCSI reselection event has been signalled and the interrupt + * is now being processed. Work out which command block needs attention + * and continue processing that command. + */ + +int int_initio_resel(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb; + struct target_control *active_tc; + u8 tag, msg = 0; + u8 tar, lun; + + if ((scb = host->active) != NULL) { + /* FIXME: Why check and not just clear ? */ + if (scb->status & SCB_SELECT) /* if waiting for selection complete */ + scb->status &= ~SCB_SELECT; + host->active = NULL; + } + /* --------- get target id---------------------- */ + tar = inb(host->addr + TUL_SBusId); + /* ------ get LUN from Identify message----------- */ + lun = inb(host->addr + TUL_SIdent) & 0x0F; + /* 07/22/98 from 0x1F -> 0x0F */ + active_tc = &host->targets[tar]; + host->active_tc = active_tc; + outb(active_tc->sconfig0, host->addr + TUL_SConfig); + outb(active_tc->js_period, host->addr + TUL_SPeriod); + + /* ------------- tag queueing ? ------------------- */ + if (active_tc->drv_flags & TCF_DRV_EN_TAG) { + if ((initio_msgin_accept(host)) == -1) + return -1; + if (host->phase != MSG_IN) + goto no_tag; + outl(1, host->addr + TUL_SCnt0); + outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */ + + if (msg < SIMPLE_QUEUE_TAG || msg > ORDERED_QUEUE_TAG) + /* Is simple Tag */ + goto no_tag; + + if (initio_msgin_accept(host) == -1) + return -1; + + if (host->phase != MSG_IN) + goto no_tag; + + outl(1, host->addr + TUL_SCnt0); + outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */ + scb = host->scb + tag; + if (scb->target != tar || scb->lun != lun) { + return initio_msgout_abort_tag(host); + } + if (scb->status != SCB_BUSY) { /* 03/24/95 */ + return initio_msgout_abort_tag(host); + } + host->active = scb; + if ((initio_msgin_accept(host)) == -1) + return -1; + } else { /* No tag */ + no_tag: + if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) { + return initio_msgout_abort_targ(host); + } + host->active = scb; + if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) { + if ((initio_msgin_accept(host)) == -1) + return -1; + } + } + return 0; +} + +/** + * int_initio_bad_seq - out of phase + * @host: InitIO host flagging event + * + * We have ended up out of phase somehow. Reset the host controller + * and throw all our toys out of the pram. Let the midlayer clean up + */ + +static int int_initio_bad_seq(struct initio_host * host) +{ /* target wrong phase */ + struct scsi_ctrl_blk *scb; + int i; + + initio_reset_scsi(host, 10); + + while ((scb = initio_pop_busy_scb(host)) != NULL) { + scb->hastat = HOST_BAD_PHAS; + initio_append_done_scb(host, scb); + } + for (i = 0; i < host->max_tar; i++) + host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); + return -1; +} + + +/** + * initio_msgout_abort_targ - abort a tag + * @host: InitIO host + * + * Abort when the target/lun does not match or when our SCB is not + * busy. Used by untagged commands. + */ + +static int initio_msgout_abort_targ(struct initio_host * host) +{ + + outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); + if (initio_msgin_accept(host) == -1) + return -1; + if (host->phase != MSG_OUT) + return initio_bad_seq(host); + + outb(ABORT_TASK_SET, host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + + return initio_wait_disc(host); +} + +/** + * initio_msgout_abort_tag - abort a tag + * @host: InitIO host + * + * Abort when the target/lun does not match or when our SCB is not + * busy. Used for tagged commands. + */ + +static int initio_msgout_abort_tag(struct initio_host * host) +{ + + outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); + if (initio_msgin_accept(host) == -1) + return -1; + if (host->phase != MSG_OUT) + return initio_bad_seq(host); + + outb(ABORT_TASK, host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + + return initio_wait_disc(host); + +} + +/** + * initio_msgin - Message in + * @host: InitIO Host + * + * Process incoming message + */ +static int initio_msgin(struct initio_host * host) +{ + struct target_control *active_tc; + + for (;;) { + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + + outl(1, host->addr + TUL_SCnt0); + outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + + switch (inb(host->addr + TUL_SFifo)) { + case DISCONNECT: /* Disconnect msg */ + outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); + return initio_wait_disc(host); + case SAVE_POINTERS: + case RESTORE_POINTERS: + case NOP: + initio_msgin_accept(host); + break; + case MESSAGE_REJECT: /* Clear ATN first */ + outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), + host->addr + TUL_SSignal); + active_tc = host->active_tc; + if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */ + outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), + host->addr + TUL_SSignal); + initio_msgin_accept(host); + break; + case EXTENDED_MESSAGE: /* extended msg */ + initio_msgin_extend(host); + break; + case IGNORE_WIDE_RESIDUE: + initio_msgin_accept(host); + break; + case COMMAND_COMPLETE: + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); + return initio_wait_done_disc(host); + default: + initio_msgout_reject(host); + break; + } + if (host->phase != MSG_IN) + return host->phase; + } + /* statement won't reach here */ +} + +static int initio_msgout_reject(struct initio_host * host) +{ + outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); + + if (initio_msgin_accept(host) == -1) + return -1; + + if (host->phase == MSG_OUT) { + outb(MESSAGE_REJECT, host->addr + TUL_SFifo); /* Msg reject */ + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + return wait_tulip(host); + } + return host->phase; +} + +static int initio_msgout_ide(struct initio_host * host) +{ + outb(INITIATOR_ERROR, host->addr + TUL_SFifo); /* Initiator Detected Error */ + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + return wait_tulip(host); +} + +static int initio_msgin_extend(struct initio_host * host) +{ + u8 len, idx; + + if (initio_msgin_accept(host) != MSG_IN) + return host->phase; + + /* Get extended msg length */ + outl(1, host->addr + TUL_SCnt0); + outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + + len = inb(host->addr + TUL_SFifo); + host->msg[0] = len; + for (idx = 1; len != 0; len--) { + + if ((initio_msgin_accept(host)) != MSG_IN) + return host->phase; + outl(1, host->addr + TUL_SCnt0); + outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); + if (wait_tulip(host) == -1) + return -1; + host->msg[idx++] = inb(host->addr + TUL_SFifo); + } + if (host->msg[1] == 1) { /* if it's synchronous data transfer request */ + u8 r; + if (host->msg[0] != 3) /* if length is not right */ + return initio_msgout_reject(host); + if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */ + host->msg[3] = 0; + } else { + if (initio_msgin_sync(host) == 0 && + (host->active_tc->flags & TCF_SYNC_DONE)) { + initio_sync_done(host); + return initio_msgin_accept(host); + } + } + + r = inb(host->addr + TUL_SSignal); + outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN, + host->addr + TUL_SSignal); + if (initio_msgin_accept(host) != MSG_OUT) + return host->phase; + /* sync msg out */ + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); + + initio_sync_done(host); + + outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); + outb(3, host->addr + TUL_SFifo); + outb(EXTENDED_SDTR, host->addr + TUL_SFifo); + outb(host->msg[2], host->addr + TUL_SFifo); + outb(host->msg[3], host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + return wait_tulip(host); + } + if (host->msg[0] != 2 || host->msg[1] != 3) + return initio_msgout_reject(host); + /* if it's WIDE DATA XFER REQ */ + if (host->active_tc->flags & TCF_NO_WDTR) { + host->msg[2] = 0; + } else { + if (host->msg[2] > 2) /* > 32 bits */ + return initio_msgout_reject(host); + if (host->msg[2] == 2) { /* == 32 */ + host->msg[2] = 1; + } else { + if ((host->active_tc->flags & TCF_NO_WDTR) == 0) { + wdtr_done(host); + if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) + outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); + return initio_msgin_accept(host); + } + } + } + outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); + + if (initio_msgin_accept(host) != MSG_OUT) + return host->phase; + /* WDTR msg out */ + outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); + outb(2, host->addr + TUL_SFifo); + outb(EXTENDED_WDTR, host->addr + TUL_SFifo); + outb(host->msg[2], host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + return wait_tulip(host); +} + +static int initio_msgin_sync(struct initio_host * host) +{ + char default_period; + + default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE]; + if (host->msg[3] > MAX_OFFSET) { + host->msg[3] = MAX_OFFSET; + if (host->msg[2] < default_period) { + host->msg[2] = default_period; + return 1; + } + if (host->msg[2] >= 59) /* Change to async */ + host->msg[3] = 0; + return 1; + } + /* offset requests asynchronous transfers ? */ + if (host->msg[3] == 0) { + return 0; + } + if (host->msg[2] < default_period) { + host->msg[2] = default_period; + return 1; + } + if (host->msg[2] >= 59) { + host->msg[3] = 0; + return 1; + } + return 0; +} + +static int wdtr_done(struct initio_host * host) +{ + host->active_tc->flags &= ~TCF_SYNC_DONE; + host->active_tc->flags |= TCF_WDTR_DONE; + + host->active_tc->js_period = 0; + if (host->msg[2]) /* if 16 bit */ + host->active_tc->js_period |= TSC_WIDE_SCSI; + host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD; + outb(host->active_tc->sconfig0, host->addr + TUL_SConfig); + outb(host->active_tc->js_period, host->addr + TUL_SPeriod); + + return 1; +} + +static int initio_sync_done(struct initio_host * host) +{ + int i; + + host->active_tc->flags |= TCF_SYNC_DONE; + + if (host->msg[3]) { + host->active_tc->js_period |= host->msg[3]; + for (i = 0; i < 8; i++) { + if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */ + break; + } + host->active_tc->js_period |= (i << 4); + host->active_tc->sconfig0 |= TSC_ALT_PERIOD; + } + outb(host->active_tc->sconfig0, host->addr + TUL_SConfig); + outb(host->active_tc->js_period, host->addr + TUL_SPeriod); + + return -1; +} + + +static int initio_post_scsi_rst(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb; + struct target_control *active_tc; + int i; + + host->active = NULL; + host->active_tc = NULL; + host->flags = 0; + + while ((scb = initio_pop_busy_scb(host)) != NULL) { + scb->hastat = HOST_BAD_PHAS; + initio_append_done_scb(host, scb); + } + /* clear sync done flag */ + active_tc = &host->targets[0]; + for (i = 0; i < host->max_tar; active_tc++, i++) { + active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); + /* Initialize the sync. xfer register values to an asyn xfer */ + active_tc->js_period = 0; + active_tc->sconfig0 = host->sconf1; + host->act_tags[0] = 0; /* 07/22/98 */ + host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */ + } /* for */ + + return -1; +} + +static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb) +{ + scb->status |= SCB_SELECT; + scb->next_state = 0x1; + host->active = scb; + host->active_tc = &host->targets[scb->target]; + outb(TSC_SELATNSTOP, host->addr + TUL_SCmd); +} + + +static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb) +{ + int i; + + scb->status |= SCB_SELECT; + scb->next_state = 0x2; + + outb(scb->ident, host->addr + TUL_SFifo); + for (i = 0; i < (int) scb->cdblen; i++) + outb(scb->cdb[i], host->addr + TUL_SFifo); + host->active_tc = &host->targets[scb->target]; + host->active = scb; + outb(TSC_SEL_ATN, host->addr + TUL_SCmd); +} + +static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb) +{ + int i; + + scb->status |= SCB_SELECT; + scb->next_state = 0x2; + + outb(scb->ident, host->addr + TUL_SFifo); + outb(scb->tagmsg, host->addr + TUL_SFifo); + outb(scb->tagid, host->addr + TUL_SFifo); + for (i = 0; i < scb->cdblen; i++) + outb(scb->cdb[i], host->addr + TUL_SFifo); + host->active_tc = &host->targets[scb->target]; + host->active = scb; + outb(TSC_SEL_ATN3, host->addr + TUL_SCmd); +} + +/** + * initio_bus_device_reset - SCSI Bus Device Reset + * @host: InitIO host to reset + * + * Perform a device reset and abort all pending SCBs for the + * victim device + */ +int initio_bus_device_reset(struct initio_host * host) +{ + struct scsi_ctrl_blk *scb = host->active; + struct target_control *active_tc = host->active_tc; + struct scsi_ctrl_blk *tmp, *prev; + u8 tar; + + if (host->phase != MSG_OUT) + return int_initio_bad_seq(host); /* Unexpected phase */ + + initio_unlink_pend_scb(host, scb); + initio_release_scb(host, scb); + + + tar = scb->target; /* target */ + active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY); + /* clr sync. nego & WDTR flags 07/22/98 */ + + /* abort all SCB with same target */ + prev = tmp = host->first_busy; /* Check Busy queue */ + while (tmp != NULL) { + if (tmp->target == tar) { + /* unlink it */ + if (tmp == host->first_busy) { + if ((host->first_busy = tmp->next) == NULL) + host->last_busy = NULL; + } else { + prev->next = tmp->next; + if (tmp == host->last_busy) + host->last_busy = prev; + } + tmp->hastat = HOST_ABORTED; + initio_append_done_scb(host, tmp); + } + /* Previous haven't change */ + else { + prev = tmp; + } + tmp = tmp->next; + } + outb(TARGET_RESET, host->addr + TUL_SFifo); + outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); + return initio_wait_disc(host); + +} + +static int initio_msgin_accept(struct initio_host * host) +{ + outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); + return wait_tulip(host); +} + +static int wait_tulip(struct initio_host * host) +{ + + while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) + & TSS_INT_PENDING)) + cpu_relax(); + + host->jsint = inb(host->addr + TUL_SInt); + host->phase = host->jsstatus0 & TSS_PH_MASK; + host->jsstatus1 = inb(host->addr + TUL_SStatus1); + + if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */ + return int_initio_resel(host); + if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */ + return int_initio_busfree(host); + if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ + return int_initio_scsi_rst(host); + + if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ + if (host->flags & HCF_EXPECT_DONE_DISC) { + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ + initio_unlink_busy_scb(host, host->active); + host->active->hastat = 0; + initio_append_done_scb(host, host->active); + host->active = NULL; + host->active_tc = NULL; + host->flags &= ~HCF_EXPECT_DONE_DISC; + outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); + outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ + return -1; + } + if (host->flags & HCF_EXPECT_DISC) { + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ + host->active = NULL; + host->active_tc = NULL; + host->flags &= ~HCF_EXPECT_DISC; + outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); + outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ + return -1; + } + return int_initio_busfree(host); + } + /* The old code really does the below. Can probably be removed */ + if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) + return host->phase; + return host->phase; +} + +static int initio_wait_disc(struct initio_host * host) +{ + while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING)) + cpu_relax(); + + host->jsint = inb(host->addr + TUL_SInt); + + if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ + return int_initio_scsi_rst(host); + if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ + outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); + outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ + host->active = NULL; + return -1; + } + return initio_bad_seq(host); +} + +static int initio_wait_done_disc(struct initio_host * host) +{ + while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) + & TSS_INT_PENDING)) + cpu_relax(); + + host->jsint = inb(host->addr + TUL_SInt); + + if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ + return int_initio_scsi_rst(host); + if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ + outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ + outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); + outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ + initio_unlink_busy_scb(host, host->active); + + initio_append_done_scb(host, host->active); + host->active = NULL; + return -1; + } + return initio_bad_seq(host); +} + +/** + * i91u_intr - IRQ handler + * @irqno: IRQ number + * @dev_id: IRQ identifier + * + * Take the relevant locks and then invoke the actual isr processing + * code under the lock. + */ + +static irqreturn_t i91u_intr(int irqno, void *dev_id) +{ + struct Scsi_Host *dev = dev_id; + unsigned long flags; + int r; + + spin_lock_irqsave(dev->host_lock, flags); + r = initio_isr((struct initio_host *)dev->hostdata); + spin_unlock_irqrestore(dev->host_lock, flags); + if (r) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + + +/** + * initio_build_scb - Build the mappings and SCB + * @host: InitIO host taking the command + * @cblk: Firmware command block + * @cmnd: SCSI midlayer command block + * + * Translate the abstract SCSI command into a firmware command block + * suitable for feeding to the InitIO host controller. This also requires + * we build the scatter gather lists and ensure they are mapped properly. + */ + +static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd) +{ /* Create corresponding SCB */ + struct scatterlist *sglist; + struct sg_entry *sg; /* Pointer to SG list */ + int i, nseg; + long total_len; + dma_addr_t dma_addr; + + /* Fill in the command headers */ + cblk->post = i91uSCBPost; /* i91u's callback routine */ + cblk->srb = cmnd; + cblk->opcode = ExecSCSI; + cblk->flags = SCF_POST; /* After SCSI done, call post routine */ + cblk->target = cmnd->device->id; + cblk->lun = cmnd->device->lun; + cblk->ident = cmnd->device->lun | DISC_ALLOW; + + cblk->flags |= SCF_SENSE; /* Turn on auto request sense */ + + /* Map the sense buffer into bus memory */ + dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer, + SENSE_SIZE, DMA_FROM_DEVICE); + cblk->senseptr = (u32)dma_addr; + cblk->senselen = SENSE_SIZE; + initio_priv(cmnd)->sense_dma_addr = dma_addr; + cblk->cdblen = cmnd->cmd_len; + + /* Clear the returned status */ + cblk->hastat = 0; + cblk->tastat = 0; + /* Command the command */ + memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len); + + /* Set up tags */ + if (cmnd->device->tagged_supported) { /* Tag Support */ + cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ + } else { + cblk->tagmsg = 0; /* No tag support */ + } + + /* todo handle map_sg error */ + nseg = scsi_dma_map(cmnd); + BUG_ON(nseg < 0); + if (nseg) { + dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0], + sizeof(struct sg_entry) * TOTAL_SG_ENTRY, + DMA_BIDIRECTIONAL); + cblk->bufptr = (u32)dma_addr; + initio_priv(cmnd)->sglist_dma_addr = dma_addr; + + cblk->sglen = nseg; + + cblk->flags |= SCF_SG; /* Turn on SG list flag */ + total_len = 0; + sg = &cblk->sglist[0]; + scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) { + sg->data = cpu_to_le32((u32)sg_dma_address(sglist)); + sg->len = cpu_to_le32((u32)sg_dma_len(sglist)); + total_len += sg_dma_len(sglist); + ++sg; + } + + cblk->buflen = (scsi_bufflen(cmnd) > total_len) ? + total_len : scsi_bufflen(cmnd); + } else { /* No data transfer required */ + cblk->buflen = 0; + cblk->sglen = 0; + } +} + +/** + * i91u_queuecommand_lck - Queue a new command if possible + * @cmd: SCSI command block from the mid layer + * + * Attempts to queue a new command with the host adapter. Will return + * zero if successful or indicate a host busy condition if not (which + * will cause the mid layer to call us again later with the command) + */ +static int i91u_queuecommand_lck(struct scsi_cmnd *cmd) +{ + struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; + struct scsi_ctrl_blk *cmnd; + + cmnd = initio_alloc_scb(host); + if (!cmnd) + return SCSI_MLQUEUE_HOST_BUSY; + + initio_build_scb(host, cmnd, cmd); + initio_exec_scb(host, cmnd); + return 0; +} + +static DEF_SCSI_QCMD(i91u_queuecommand) + +/** + * i91u_bus_reset - reset the SCSI bus + * @cmnd: Command block we want to trigger the reset for + * + * Initiate a SCSI bus reset sequence + */ + +static int i91u_bus_reset(struct scsi_cmnd * cmnd) +{ + struct initio_host *host; + + host = (struct initio_host *) cmnd->device->host->hostdata; + + spin_lock_irq(cmnd->device->host->host_lock); + initio_reset_scsi(host, 0); + spin_unlock_irq(cmnd->device->host->host_lock); + + return SUCCESS; +} + +/** + * i91u_biosparam - return the "logical geometry + * @sdev: SCSI device + * @dev: Matching block device + * @capacity: Sector size of drive + * @info_array: Return space for BIOS geometry + * + * Map the device geometry in a manner compatible with the host + * controller BIOS behaviour. + * + * FIXME: limited to 2^32 sector devices. + */ + +static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev, + sector_t capacity, int *info_array) +{ + struct initio_host *host; /* Point to Host adapter control block */ + struct target_control *tc; + + host = (struct initio_host *) sdev->host->hostdata; + tc = &host->targets[sdev->id]; + + if (tc->heads) { + info_array[0] = tc->heads; + info_array[1] = tc->sectors; + info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors; + } else { + if (tc->drv_flags & TCF_DRV_255_63) { + info_array[0] = 255; + info_array[1] = 63; + info_array[2] = (unsigned long)capacity / 255 / 63; + } else { + info_array[0] = 64; + info_array[1] = 32; + info_array[2] = (unsigned long)capacity >> 11; + } + } + +#if defined(DEBUG_BIOSPARAM) + if (i91u_debug & debug_biosparam) { + printk("bios geometry: head=%d, sec=%d, cyl=%d\n", + info_array[0], info_array[1], info_array[2]); + printk("WARNING: check, if the bios geometry is correct.\n"); + } +#endif + + return 0; +} + +/** + * i91u_unmap_scb - Unmap a command + * @pci_dev: PCI device the command is for + * @cmnd: The command itself + * + * Unmap any PCI mapping/IOMMU resources allocated when the command + * was mapped originally as part of initio_build_scb + */ + +static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd) +{ + /* auto sense buffer */ + if (initio_priv(cmnd)->sense_dma_addr) { + dma_unmap_single(&pci_dev->dev, + initio_priv(cmnd)->sense_dma_addr, + SENSE_SIZE, DMA_FROM_DEVICE); + initio_priv(cmnd)->sense_dma_addr = 0; + } + + /* request buffer */ + if (scsi_sg_count(cmnd)) { + dma_unmap_single(&pci_dev->dev, + initio_priv(cmnd)->sglist_dma_addr, + sizeof(struct sg_entry) * TOTAL_SG_ENTRY, + DMA_BIDIRECTIONAL); + + scsi_dma_unmap(cmnd); + } +} + +/* + * i91uSCBPost - SCSI callback + * + * This is callback routine be called when tulip finish one + * SCSI command. + */ + +static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem) +{ + struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */ + struct initio_host *host; + struct scsi_ctrl_blk *cblk; + + host = (struct initio_host *) host_mem; + cblk = (struct scsi_ctrl_blk *) cblk_mem; + if ((cmnd = cblk->srb) == NULL) { + printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n"); + WARN_ON(1); + initio_release_scb(host, cblk); /* Release SCB for current channel */ + return; + } + + /* + * Remap the firmware error status into a mid layer one + */ + switch (cblk->hastat) { + case 0x0: + case 0xa: /* Linked command complete without error and linked normally */ + case 0xb: /* Linked command complete without error interrupt generated */ + cblk->hastat = 0; + break; + + case 0x11: /* Selection time out-The initiator selection or target + reselection was not complete within the SCSI Time out period */ + cblk->hastat = DID_TIME_OUT; + break; + + case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus + phase sequence was requested by the target. The host adapter + will generate a SCSI Reset Condition, notifying the host with + a SCRD interrupt */ + cblk->hastat = DID_RESET; + break; + + case 0x1a: /* SCB Aborted. 07/21/98 */ + cblk->hastat = DID_ABORT; + break; + + case 0x12: /* Data overrun/underrun-The target attempted to transfer more data + than was allocated by the Data Length field or the sum of the + Scatter / Gather Data Length fields. */ + case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */ + case 0x16: /* Invalid SCB Operation Code. */ + + default: + printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat); + cblk->hastat = DID_ERROR; /* Couldn't find any better */ + break; + } + + cmnd->result = cblk->tastat | (cblk->hastat << 16); + i91u_unmap_scb(host->pci_dev, cmnd); + scsi_done(cmnd); /* Notify system DONE */ + initio_release_scb(host, cblk); /* Release SCB for current channel */ +} + +static const struct scsi_host_template initio_template = { + .proc_name = "INI9100U", + .name = "Initio INI-9X00U/UW SCSI device driver", + .queuecommand = i91u_queuecommand, + .eh_bus_reset_handler = i91u_bus_reset, + .bios_param = i91u_biosparam, + .can_queue = MAX_TARGETS * i91u_MAXQUEUE, + .this_id = 1, + .sg_tablesize = SG_ALL, + .cmd_size = sizeof(struct initio_cmd_priv), +}; + +static int initio_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct Scsi_Host *shost; + struct initio_host *host; + u32 reg; + u16 bios_seg; + struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */; + int num_scb, i, error; + + error = pci_enable_device(pdev); + if (error) + return error; + + pci_read_config_dword(pdev, 0x44, (u32 *) & reg); + bios_seg = (u16) (reg & 0xFF); + if (((reg & 0xFF00) >> 8) == 0xFF) + reg = 0; + bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8)); + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n"); + error = -ENODEV; + goto out_disable_device; + } + shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host)); + if (!shost) { + printk(KERN_WARNING "initio: Could not allocate host structure.\n"); + error = -ENOMEM; + goto out_disable_device; + } + host = (struct initio_host *)shost->hostdata; + memset(host, 0, sizeof(struct initio_host)); + host->addr = pci_resource_start(pdev, 0); + host->bios_addr = bios_seg; + + if (!request_region(host->addr, 256, "i91u")) { + printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr); + error = -ENODEV; + goto out_host_put; + } + + if (initio_tag_enable) /* 1.01i */ + num_scb = MAX_TARGETS * i91u_MAXQUEUE; + else + num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */ + + for (; num_scb >= MAX_TARGETS + 3; num_scb--) { + i = num_scb * sizeof(struct scsi_ctrl_blk); + scb = kzalloc(i, GFP_KERNEL); + if (scb) + break; + } + + if (!scb) { + printk(KERN_WARNING "initio: Cannot allocate SCB array.\n"); + error = -ENOMEM; + goto out_release_region; + } + + host->pci_dev = pdev; + + host->semaph = 1; + spin_lock_init(&host->semaph_lock); + host->num_scbs = num_scb; + host->scb = scb; + host->next_pending = scb; + host->next_avail = scb; + for (i = 0, tmp = scb; i < num_scb; i++, tmp++) { + tmp->tagid = i; + if (i != 0) + prev->next = tmp; + prev = tmp; + } + prev->next = NULL; + host->scb_end = tmp; + host->first_avail = scb; + host->last_avail = prev; + spin_lock_init(&host->avail_lock); + + initio_init(host, phys_to_virt(((u32)bios_seg << 4))); + + host->jsstatus0 = 0; + + shost->io_port = host->addr; + shost->n_io_port = 0xff; + shost->can_queue = num_scb; /* 03/05/98 */ + shost->unique_id = host->addr; + shost->max_id = host->max_tar; + shost->max_lun = 32; /* 10/21/97 */ + shost->irq = pdev->irq; + shost->this_id = host->scsi_id; /* Assign HCS index */ + shost->base = host->addr; + shost->sg_tablesize = TOTAL_SG_ENTRY; + + error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost); + if (error < 0) { + printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq); + goto out_free_scbs; + } + + pci_set_drvdata(pdev, shost); + + error = scsi_add_host(shost, &pdev->dev); + if (error) + goto out_free_irq; + scsi_scan_host(shost); + return 0; +out_free_irq: + free_irq(pdev->irq, shost); +out_free_scbs: + kfree(host->scb); +out_release_region: + release_region(host->addr, 256); +out_host_put: + scsi_host_put(shost); +out_disable_device: + pci_disable_device(pdev); + return error; +} + +/** + * initio_remove_one - control shutdown + * @pdev: PCI device being released + * + * Release the resources assigned to this adapter after it has + * finished being used. + */ + +static void initio_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct initio_host *s = (struct initio_host *)host->hostdata; + scsi_remove_host(host); + free_irq(pdev->irq, host); + release_region(s->addr, 256); + scsi_host_put(host); + pci_disable_device(pdev); +} + +MODULE_LICENSE("GPL"); + +static struct pci_device_id initio_pci_tbl[] = { + {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, initio_pci_tbl); + +static struct pci_driver initio_pci_driver = { + .name = "initio", + .id_table = initio_pci_tbl, + .probe = initio_probe_one, + .remove = initio_remove_one, +}; +module_pci_driver(initio_pci_driver); + +MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver"); +MODULE_AUTHOR("Initio Corporation"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h new file mode 100644 index 000000000..7c9741552 --- /dev/null +++ b/drivers/scsi/initio.h @@ -0,0 +1,651 @@ +/************************************************************************** + * Initio 9100 device driver for Linux. + * + * Copyright (c) 1994-1998 Initio Corporation + * All rights reserved. + * + * Cleanups (c) Copyright 2007 Red Hat + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + **************************************************************************/ + + +#include + +#define TOTAL_SG_ENTRY 32 +#define MAX_SUPPORTED_ADAPTERS 8 +#define MAX_OFFSET 15 +#define MAX_TARGETS 16 + +typedef struct { + unsigned short base; + unsigned short vec; +} i91u_config; + +/***************************************/ +/* Tulip Configuration Register Set */ +/***************************************/ +#define TUL_PVID 0x00 /* Vendor ID */ +#define TUL_PDID 0x02 /* Device ID */ +#define TUL_PCMD 0x04 /* Command */ +#define TUL_PSTUS 0x06 /* Status */ +#define TUL_PRID 0x08 /* Revision number */ +#define TUL_PPI 0x09 /* Programming interface */ +#define TUL_PSC 0x0A /* Sub Class */ +#define TUL_PBC 0x0B /* Base Class */ +#define TUL_PCLS 0x0C /* Cache line size */ +#define TUL_PLTR 0x0D /* Latency timer */ +#define TUL_PHDT 0x0E /* Header type */ +#define TUL_PBIST 0x0F /* BIST */ +#define TUL_PBAD 0x10 /* Base address */ +#define TUL_PBAD1 0x14 /* Base address */ +#define TUL_PBAD2 0x18 /* Base address */ +#define TUL_PBAD3 0x1C /* Base address */ +#define TUL_PBAD4 0x20 /* Base address */ +#define TUL_PBAD5 0x24 /* Base address */ +#define TUL_PRSVD 0x28 /* Reserved */ +#define TUL_PRSVD1 0x2C /* Reserved */ +#define TUL_PRAD 0x30 /* Expansion ROM base address */ +#define TUL_PRSVD2 0x34 /* Reserved */ +#define TUL_PRSVD3 0x38 /* Reserved */ +#define TUL_PINTL 0x3C /* Interrupt line */ +#define TUL_PINTP 0x3D /* Interrupt pin */ +#define TUL_PIGNT 0x3E /* MIN_GNT */ +#define TUL_PMGNT 0x3F /* MAX_GNT */ + +/************************/ +/* Jasmin Register Set */ +/************************/ +#define TUL_HACFG0 0x40 /* H/A Configuration Register 0 */ +#define TUL_HACFG1 0x41 /* H/A Configuration Register 1 */ +#define TUL_HACFG2 0x42 /* H/A Configuration Register 2 */ + +#define TUL_SDCFG0 0x44 /* SCSI Device Configuration 0 */ +#define TUL_SDCFG1 0x45 /* SCSI Device Configuration 1 */ +#define TUL_SDCFG2 0x46 /* SCSI Device Configuration 2 */ +#define TUL_SDCFG3 0x47 /* SCSI Device Configuration 3 */ + +#define TUL_GINTS 0x50 /* Global Interrupt Status Register */ +#define TUL_GIMSK 0x52 /* Global Interrupt MASK Register */ +#define TUL_GCTRL 0x54 /* Global Control Register */ +#define TUL_GCTRL_EEPROM_BIT 0x04 +#define TUL_GCTRL1 0x55 /* Global Control Register */ +#define TUL_DMACFG 0x5B /* DMA configuration */ +#define TUL_NVRAM 0x5D /* Non-volatile RAM port */ + +#define TUL_SCnt0 0x80 /* 00 R/W Transfer Counter Low */ +#define TUL_SCnt1 0x81 /* 01 R/W Transfer Counter Mid */ +#define TUL_SCnt2 0x82 /* 02 R/W Transfer Count High */ +#define TUL_SFifoCnt 0x83 /* 03 R FIFO counter */ +#define TUL_SIntEnable 0x84 /* 03 W Interrupt enble */ +#define TUL_SInt 0x84 /* 04 R Interrupt Register */ +#define TUL_SCtrl0 0x85 /* 05 W Control 0 */ +#define TUL_SStatus0 0x85 /* 05 R Status 0 */ +#define TUL_SCtrl1 0x86 /* 06 W Control 1 */ +#define TUL_SStatus1 0x86 /* 06 R Status 1 */ +#define TUL_SConfig 0x87 /* 07 W Configuration */ +#define TUL_SStatus2 0x87 /* 07 R Status 2 */ +#define TUL_SPeriod 0x88 /* 08 W Sync. Transfer Period & Offset */ +#define TUL_SOffset 0x88 /* 08 R Offset */ +#define TUL_SScsiId 0x89 /* 09 W SCSI ID */ +#define TUL_SBusId 0x89 /* 09 R SCSI BUS ID */ +#define TUL_STimeOut 0x8A /* 0A W Sel/Resel Time Out Register */ +#define TUL_SIdent 0x8A /* 0A R Identify Message Register */ +#define TUL_SAvail 0x8A /* 0A R Available Counter Register */ +#define TUL_SData 0x8B /* 0B R/W SCSI data in/out */ +#define TUL_SFifo 0x8C /* 0C R/W FIFO */ +#define TUL_SSignal 0x90 /* 10 R/W SCSI signal in/out */ +#define TUL_SCmd 0x91 /* 11 R/W Command */ +#define TUL_STest0 0x92 /* 12 R/W Test0 */ +#define TUL_STest1 0x93 /* 13 R/W Test1 */ +#define TUL_SCFG1 0x94 /* 14 R/W Configuration */ + +#define TUL_XAddH 0xC0 /*DMA Transfer Physical Address */ +#define TUL_XAddW 0xC8 /*DMA Current Transfer Physical Address */ +#define TUL_XCntH 0xD0 /*DMA Transfer Counter */ +#define TUL_XCntW 0xD4 /*DMA Current Transfer Counter */ +#define TUL_XCmd 0xD8 /*DMA Command Register */ +#define TUL_Int 0xDC /*Interrupt Register */ +#define TUL_XStatus 0xDD /*DMA status Register */ +#define TUL_Mask 0xE0 /*Interrupt Mask Register */ +#define TUL_XCtrl 0xE4 /*DMA Control Register */ +#define TUL_XCtrl1 0xE5 /*DMA Control Register 1 */ +#define TUL_XFifo 0xE8 /*DMA FIFO */ + +#define TUL_WCtrl 0xF7 /*Bus master wait state control */ +#define TUL_DCtrl 0xFB /*DMA delay control */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Command register of Configuration Space Header */ +/*----------------------------------------------------------------------*/ +#define BUSMS 0x04 /* BUS MASTER Enable */ +#define IOSPA 0x01 /* IO Space Enable */ + +/*----------------------------------------------------------------------*/ +/* Command Codes of Tulip SCSI Command register */ +/*----------------------------------------------------------------------*/ +#define TSC_EN_RESEL 0x80 /* Enable Reselection */ +#define TSC_CMD_COMP 0x84 /* Command Complete Sequence */ +#define TSC_SEL 0x01 /* Select Without ATN Sequence */ +#define TSC_SEL_ATN 0x11 /* Select With ATN Sequence */ +#define TSC_SEL_ATN_DMA 0x51 /* Select With ATN Sequence with DMA */ +#define TSC_SEL_ATN3 0x31 /* Select With ATN3 Sequence */ +#define TSC_SEL_ATNSTOP 0x12 /* Select With ATN and Stop Sequence */ +#define TSC_SELATNSTOP 0x1E /* Select With ATN and Stop Sequence */ + +#define TSC_SEL_ATN_DIRECT_IN 0x95 /* Select With ATN Sequence */ +#define TSC_SEL_ATN_DIRECT_OUT 0x15 /* Select With ATN Sequence */ +#define TSC_SEL_ATN3_DIRECT_IN 0xB5 /* Select With ATN3 Sequence */ +#define TSC_SEL_ATN3_DIRECT_OUT 0x35 /* Select With ATN3 Sequence */ +#define TSC_XF_DMA_OUT_DIRECT 0x06 /* DMA Xfer Information out */ +#define TSC_XF_DMA_IN_DIRECT 0x86 /* DMA Xfer Information in */ + +#define TSC_XF_DMA_OUT 0x43 /* DMA Xfer Information out */ +#define TSC_XF_DMA_IN 0xC3 /* DMA Xfer Information in */ +#define TSC_XF_FIFO_OUT 0x03 /* FIFO Xfer Information out */ +#define TSC_XF_FIFO_IN 0x83 /* FIFO Xfer Information in */ + +#define TSC_MSG_ACCEPT 0x0F /* Message Accept */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI Control 0 Register */ +/*----------------------------------------------------------------------*/ +#define TSC_RST_SEQ 0x20 /* Reset sequence counter */ +#define TSC_FLUSH_FIFO 0x10 /* Flush FIFO */ +#define TSC_ABT_CMD 0x04 /* Abort command (sequence) */ +#define TSC_RST_CHIP 0x02 /* Reset SCSI Chip */ +#define TSC_RST_BUS 0x01 /* Reset SCSI Bus */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI Control 1 Register */ +/*----------------------------------------------------------------------*/ +#define TSC_EN_SCAM 0x80 /* Enable SCAM */ +#define TSC_TIMER 0x40 /* Select timeout unit */ +#define TSC_EN_SCSI2 0x20 /* SCSI-2 mode */ +#define TSC_PWDN 0x10 /* Power down mode */ +#define TSC_WIDE_CPU 0x08 /* Wide CPU */ +#define TSC_HW_RESELECT 0x04 /* Enable HW reselect */ +#define TSC_EN_BUS_OUT 0x02 /* Enable SCSI data bus out latch */ +#define TSC_EN_BUS_IN 0x01 /* Enable SCSI data bus in latch */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI Configuration Register */ +/*----------------------------------------------------------------------*/ +#define TSC_EN_LATCH 0x80 /* Enable phase latch */ +#define TSC_INITIATOR 0x40 /* Initiator mode */ +#define TSC_EN_SCSI_PAR 0x20 /* Enable SCSI parity */ +#define TSC_DMA_8BIT 0x10 /* Alternate dma 8-bits mode */ +#define TSC_DMA_16BIT 0x08 /* Alternate dma 16-bits mode */ +#define TSC_EN_WDACK 0x04 /* Enable DACK while wide SCSI xfer */ +#define TSC_ALT_PERIOD 0x02 /* Alternate sync period mode */ +#define TSC_DIS_SCSIRST 0x01 /* Disable SCSI bus reset us */ + +#define TSC_INITDEFAULT (TSC_INITIATOR | TSC_EN_LATCH | TSC_ALT_PERIOD | TSC_DIS_SCSIRST) + +#define TSC_WIDE_SCSI 0x80 /* Enable Wide SCSI */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI signal Register */ +/*----------------------------------------------------------------------*/ +#define TSC_RST_ACK 0x00 /* Release ACK signal */ +#define TSC_RST_ATN 0x00 /* Release ATN signal */ +#define TSC_RST_BSY 0x00 /* Release BSY signal */ + +#define TSC_SET_ACK 0x40 /* ACK signal */ +#define TSC_SET_ATN 0x08 /* ATN signal */ + +#define TSC_REQI 0x80 /* REQ signal */ +#define TSC_ACKI 0x40 /* ACK signal */ +#define TSC_BSYI 0x20 /* BSY signal */ +#define TSC_SELI 0x10 /* SEL signal */ +#define TSC_ATNI 0x08 /* ATN signal */ +#define TSC_MSGI 0x04 /* MSG signal */ +#define TSC_CDI 0x02 /* C/D signal */ +#define TSC_IOI 0x01 /* I/O signal */ + + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI Status 0 Register */ +/*----------------------------------------------------------------------*/ +#define TSS_INT_PENDING 0x80 /* Interrupt pending */ +#define TSS_SEQ_ACTIVE 0x40 /* Sequencer active */ +#define TSS_XFER_CNT 0x20 /* Transfer counter zero */ +#define TSS_FIFO_EMPTY 0x10 /* FIFO empty */ +#define TSS_PAR_ERROR 0x08 /* SCSI parity error */ +#define TSS_PH_MASK 0x07 /* SCSI phase mask */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI Status 1 Register */ +/*----------------------------------------------------------------------*/ +#define TSS_STATUS_RCV 0x08 /* Status received */ +#define TSS_MSG_SEND 0x40 /* Message sent */ +#define TSS_CMD_PH_CMP 0x20 /* command phase done */ +#define TSS_DATA_PH_CMP 0x10 /* Data phase done */ +#define TSS_STATUS_SEND 0x08 /* Status sent */ +#define TSS_XFER_CMP 0x04 /* Transfer completed */ +#define TSS_SEL_CMP 0x02 /* Selection completed */ +#define TSS_ARB_CMP 0x01 /* Arbitration completed */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI Status 2 Register */ +/*----------------------------------------------------------------------*/ +#define TSS_CMD_ABTED 0x80 /* Command aborted */ +#define TSS_OFFSET_0 0x40 /* Offset counter zero */ +#define TSS_FIFO_FULL 0x20 /* FIFO full */ +#define TSS_TIMEOUT_0 0x10 /* Timeout counter zero */ +#define TSS_BUSY_RLS 0x08 /* Busy release */ +#define TSS_PH_MISMATCH 0x04 /* Phase mismatch */ +#define TSS_SCSI_BUS_EN 0x02 /* SCSI data bus enable */ +#define TSS_SCSIRST 0x01 /* SCSI bus reset in progress */ + +/*----------------------------------------------------------------------*/ +/* bit definition for Tulip SCSI Interrupt Register */ +/*----------------------------------------------------------------------*/ +#define TSS_RESEL_INT 0x80 /* Reselected interrupt */ +#define TSS_SEL_TIMEOUT 0x40 /* Selected/reselected timeout */ +#define TSS_BUS_SERV 0x20 +#define TSS_SCSIRST_INT 0x10 /* SCSI bus reset detected */ +#define TSS_DISC_INT 0x08 /* Disconnected interrupt */ +#define TSS_SEL_INT 0x04 /* Select interrupt */ +#define TSS_SCAM_SEL 0x02 /* SCAM selected */ +#define TSS_FUNC_COMP 0x01 + +/*----------------------------------------------------------------------*/ +/* SCSI Phase Codes. */ +/*----------------------------------------------------------------------*/ +#define DATA_OUT 0 +#define DATA_IN 1 /* 4 */ +#define CMD_OUT 2 +#define STATUS_IN 3 /* 6 */ +#define MSG_OUT 6 /* 3 */ +#define MSG_IN 7 + + + +/*----------------------------------------------------------------------*/ +/* Command Codes of Tulip xfer Command register */ +/*----------------------------------------------------------------------*/ +#define TAX_X_FORC 0x02 +#define TAX_X_ABT 0x04 +#define TAX_X_CLR_FIFO 0x08 + +#define TAX_X_IN 0x21 +#define TAX_X_OUT 0x01 +#define TAX_SG_IN 0xA1 +#define TAX_SG_OUT 0x81 + +/*----------------------------------------------------------------------*/ +/* Tulip Interrupt Register */ +/*----------------------------------------------------------------------*/ +#define XCMP 0x01 +#define FCMP 0x02 +#define XABT 0x04 +#define XERR 0x08 +#define SCMP 0x10 +#define IPEND 0x80 + +/*----------------------------------------------------------------------*/ +/* Tulip DMA Status Register */ +/*----------------------------------------------------------------------*/ +#define XPEND 0x01 /* Transfer pending */ +#define FEMPTY 0x02 /* FIFO empty */ + + + +/*----------------------------------------------------------------------*/ +/* bit definition for TUL_GCTRL */ +/*----------------------------------------------------------------------*/ +#define EXTSG 0x80 +#define EXTAD 0x60 +#define SEG4K 0x08 +#define EEPRG 0x04 +#define MRMUL 0x02 + +/*----------------------------------------------------------------------*/ +/* bit definition for TUL_NVRAM */ +/*----------------------------------------------------------------------*/ +#define SE2CS 0x08 +#define SE2CLK 0x04 +#define SE2DO 0x02 +#define SE2DI 0x01 + + +/************************************************************************/ +/* Scatter-Gather Element Structure */ +/************************************************************************/ +struct sg_entry { + u32 data; /* Data Pointer */ + u32 len; /* Data Length */ +}; + +/*********************************************************************** + SCSI Control Block +************************************************************************/ +struct scsi_ctrl_blk { + struct scsi_ctrl_blk *next; + u8 status; /*4 */ + u8 next_state; /*5 */ + u8 mode; /*6 */ + u8 msgin; /*7 SCB_Res0 */ + u16 sgidx; /*8 */ + u16 sgmax; /*A */ +#ifdef ALPHA + u32 reserved[2]; /*C */ +#else + u32 reserved[3]; /*C */ +#endif + + u32 xferlen; /*18 Current xfer len */ + u32 totxlen; /*1C Total xfer len */ + u32 paddr; /*20 SCB phy. Addr. */ + + u8 opcode; /*24 SCB command code */ + u8 flags; /*25 SCB Flags */ + u8 target; /*26 Target Id */ + u8 lun; /*27 Lun */ + u32 bufptr; /*28 Data Buffer Pointer */ + u32 buflen; /*2C Data Allocation Length */ + u8 sglen; /*30 SG list # */ + u8 senselen; /*31 Sense Allocation Length */ + u8 hastat; /*32 */ + u8 tastat; /*33 */ + u8 cdblen; /*34 CDB Length */ + u8 ident; /*35 Identify */ + u8 tagmsg; /*36 Tag Message */ + u8 tagid; /*37 Queue Tag */ + u8 cdb[12]; /*38 */ + u32 sgpaddr; /*44 SG List/Sense Buf phy. Addr. */ + u32 senseptr; /*48 Sense data pointer */ + void (*post) (u8 *, u8 *); /*4C POST routine */ + struct scsi_cmnd *srb; /*50 SRB Pointer */ + struct sg_entry sglist[TOTAL_SG_ENTRY]; /*54 Start of SG list */ +}; + +/* Bit Definition for status */ +#define SCB_RENT 0x01 +#define SCB_PEND 0x02 +#define SCB_CONTIG 0x04 /* Contingent Allegiance */ +#define SCB_SELECT 0x08 +#define SCB_BUSY 0x10 +#define SCB_DONE 0x20 + + +/* Opcodes for opcode */ +#define ExecSCSI 0x1 +#define BusDevRst 0x2 +#define AbortCmd 0x3 + + +/* Bit Definition for mode */ +#define SCM_RSENS 0x01 /* request sense mode */ + + +/* Bit Definition for flags */ +#define SCF_DONE 0x01 +#define SCF_POST 0x02 +#define SCF_SENSE 0x04 +#define SCF_DIR 0x18 +#define SCF_NO_DCHK 0x00 +#define SCF_DIN 0x08 +#define SCF_DOUT 0x10 +#define SCF_NO_XF 0x18 +#define SCF_WR_VF 0x20 /* Write verify turn on */ +#define SCF_POLL 0x40 +#define SCF_SG 0x80 + +/* Error Codes for SCB_HaStat */ +#define HOST_SEL_TOUT 0x11 +#define HOST_DO_DU 0x12 +#define HOST_BUS_FREE 0x13 +#define HOST_BAD_PHAS 0x14 +#define HOST_INV_CMD 0x16 +#define HOST_ABORTED 0x1A /* 07/21/98 */ +#define HOST_SCSI_RST 0x1B +#define HOST_DEV_RST 0x1C + +/* Error Codes for SCB_TaStat */ +#define TARGET_CHKCOND 0x02 +#define TARGET_BUSY 0x08 +#define INI_QUEUE_FULL 0x28 + +/*********************************************************************** + Target Device Control Structure +**********************************************************************/ + +struct target_control { + u16 flags; + u8 js_period; + u8 sconfig0; + u16 drv_flags; + u8 heads; + u8 sectors; +}; + +/*********************************************************************** + Target Device Control Structure +**********************************************************************/ + +/* Bit Definition for TCF_Flags */ +#define TCF_SCSI_RATE 0x0007 +#define TCF_EN_DISC 0x0008 +#define TCF_NO_SYNC_NEGO 0x0010 +#define TCF_NO_WDTR 0x0020 +#define TCF_EN_255 0x0040 +#define TCF_EN_START 0x0080 +#define TCF_WDTR_DONE 0x0100 +#define TCF_SYNC_DONE 0x0200 +#define TCF_BUSY 0x0400 + + +/* Bit Definition for TCF_DrvFlags */ +#define TCF_DRV_BUSY 0x01 /* Indicate target busy(driver) */ +#define TCF_DRV_EN_TAG 0x0800 +#define TCF_DRV_255_63 0x0400 + +/*********************************************************************** + Host Adapter Control Structure +************************************************************************/ +struct initio_host { + u16 addr; /* 00 */ + u16 bios_addr; /* 02 */ + u8 irq; /* 04 */ + u8 scsi_id; /* 05 */ + u8 max_tar; /* 06 */ + u8 num_scbs; /* 07 */ + + u8 flags; /* 08 */ + u8 index; /* 09 */ + u8 ha_id; /* 0A */ + u8 config; /* 0B */ + u16 idmask; /* 0C */ + u8 semaph; /* 0E */ + u8 phase; /* 0F */ + u8 jsstatus0; /* 10 */ + u8 jsint; /* 11 */ + u8 jsstatus1; /* 12 */ + u8 sconf1; /* 13 */ + + u8 msg[8]; /* 14 */ + struct scsi_ctrl_blk *next_avail; /* 1C */ + struct scsi_ctrl_blk *scb; /* 20 */ + struct scsi_ctrl_blk *scb_end; /* 24 */ /*UNUSED*/ + struct scsi_ctrl_blk *next_pending; /* 28 */ + struct scsi_ctrl_blk *next_contig; /* 2C */ /*UNUSED*/ + struct scsi_ctrl_blk *active; /* 30 */ + struct target_control *active_tc; /* 34 */ + + struct scsi_ctrl_blk *first_avail; /* 38 */ + struct scsi_ctrl_blk *last_avail; /* 3C */ + struct scsi_ctrl_blk *first_pending; /* 40 */ + struct scsi_ctrl_blk *last_pending; /* 44 */ + struct scsi_ctrl_blk *first_busy; /* 48 */ + struct scsi_ctrl_blk *last_busy; /* 4C */ + struct scsi_ctrl_blk *first_done; /* 50 */ + struct scsi_ctrl_blk *last_done; /* 54 */ + u8 max_tags[16]; /* 58 */ + u8 act_tags[16]; /* 68 */ + struct target_control targets[MAX_TARGETS]; /* 78 */ + spinlock_t avail_lock; + spinlock_t semaph_lock; + struct pci_dev *pci_dev; +}; + +/* Bit Definition for HCB_Config */ +#define HCC_SCSI_RESET 0x01 +#define HCC_EN_PAR 0x02 +#define HCC_ACT_TERM1 0x04 +#define HCC_ACT_TERM2 0x08 +#define HCC_AUTO_TERM 0x10 +#define HCC_EN_PWR 0x80 + +/* Bit Definition for HCB_Flags */ +#define HCF_EXPECT_DISC 0x01 +#define HCF_EXPECT_SELECT 0x02 +#define HCF_EXPECT_RESET 0x10 +#define HCF_EXPECT_DONE_DISC 0x20 + +/****************************************************************** + Serial EEProm +*******************************************************************/ + +typedef struct _NVRAM_SCSI { /* SCSI channel configuration */ + u8 NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */ + u8 NVM_ChConfig1; /* 0Dh -> Channel config 1 */ + u8 NVM_ChConfig2; /* 0Eh -> Channel config 2 */ + u8 NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */ + /* SCSI target configuration */ + u8 NVM_Targ0Config; /* 10h -> Target 0 configuration */ + u8 NVM_Targ1Config; /* 11h -> Target 1 configuration */ + u8 NVM_Targ2Config; /* 12h -> Target 2 configuration */ + u8 NVM_Targ3Config; /* 13h -> Target 3 configuration */ + u8 NVM_Targ4Config; /* 14h -> Target 4 configuration */ + u8 NVM_Targ5Config; /* 15h -> Target 5 configuration */ + u8 NVM_Targ6Config; /* 16h -> Target 6 configuration */ + u8 NVM_Targ7Config; /* 17h -> Target 7 configuration */ + u8 NVM_Targ8Config; /* 18h -> Target 8 configuration */ + u8 NVM_Targ9Config; /* 19h -> Target 9 configuration */ + u8 NVM_TargAConfig; /* 1Ah -> Target A configuration */ + u8 NVM_TargBConfig; /* 1Bh -> Target B configuration */ + u8 NVM_TargCConfig; /* 1Ch -> Target C configuration */ + u8 NVM_TargDConfig; /* 1Dh -> Target D configuration */ + u8 NVM_TargEConfig; /* 1Eh -> Target E configuration */ + u8 NVM_TargFConfig; /* 1Fh -> Target F configuration */ +} NVRAM_SCSI; + +typedef struct _NVRAM { +/*----------header ---------------*/ + u16 NVM_Signature; /* 0,1: Signature */ + u8 NVM_Size; /* 2: Size of data structure */ + u8 NVM_Revision; /* 3: Revision of data structure */ + /* ----Host Adapter Structure ---- */ + u8 NVM_ModelByte0; /* 4: Model number (byte 0) */ + u8 NVM_ModelByte1; /* 5: Model number (byte 1) */ + u8 NVM_ModelInfo; /* 6: Model information */ + u8 NVM_NumOfCh; /* 7: Number of SCSI channel */ + u8 NVM_BIOSConfig1; /* 8: BIOS configuration 1 */ + u8 NVM_BIOSConfig2; /* 9: BIOS configuration 2 */ + u8 NVM_HAConfig1; /* A: Hoat adapter configuration 1 */ + u8 NVM_HAConfig2; /* B: Hoat adapter configuration 2 */ + NVRAM_SCSI NVM_SCSIInfo[2]; + u8 NVM_reserved[10]; + /* ---------- CheckSum ---------- */ + u16 NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */ +} NVRAM, *PNVRAM; + +/* Bios Configuration for nvram->BIOSConfig1 */ +#define NBC1_ENABLE 0x01 /* BIOS enable */ +#define NBC1_8DRIVE 0x02 /* Support more than 2 drives */ +#define NBC1_REMOVABLE 0x04 /* Support removable drive */ +#define NBC1_INT19 0x08 /* Intercept int 19h */ +#define NBC1_BIOSSCAN 0x10 /* Dynamic BIOS scan */ +#define NBC1_LUNSUPPORT 0x40 /* Support LUN */ + +/* HA Configuration Byte 1 */ +#define NHC1_BOOTIDMASK 0x0F /* Boot ID number */ +#define NHC1_LUNMASK 0x70 /* Boot LUN number */ +#define NHC1_CHANMASK 0x80 /* Boot Channel number */ + +/* Bit definition for nvram->SCSIconfig1 */ +#define NCC1_BUSRESET 0x01 /* Reset SCSI bus at power up */ +#define NCC1_PARITYCHK 0x02 /* SCSI parity enable */ +#define NCC1_ACTTERM1 0x04 /* Enable active terminator 1 */ +#define NCC1_ACTTERM2 0x08 /* Enable active terminator 2 */ +#define NCC1_AUTOTERM 0x10 /* Enable auto terminator */ +#define NCC1_PWRMGR 0x80 /* Enable power management */ + +/* Bit definition for SCSI Target configuration byte */ +#define NTC_DISCONNECT 0x08 /* Enable SCSI disconnect */ +#define NTC_SYNC 0x10 /* SYNC_NEGO */ +#define NTC_NO_WDTR 0x20 /* SYNC_NEGO */ +#define NTC_1GIGA 0x40 /* 255 head / 63 sectors (64/32) */ +#define NTC_SPINUP 0x80 /* Start disk drive */ + +/* Default NVRam values */ +#define INI_SIGNATURE 0xC925 +#define NBC1_DEFAULT (NBC1_ENABLE) +#define NCC1_DEFAULT (NCC1_BUSRESET | NCC1_AUTOTERM | NCC1_PARITYCHK) +#define NTC_DEFAULT (NTC_NO_WDTR | NTC_1GIGA | NTC_DISCONNECT) + +/* SCSI related definition */ +#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */ +#define DISC_ALLOW 0xC0 /* Disconnect is allowed */ +#define SCSICMD_RequestSense 0x03 + +#define SCSI_ABORT_SNOOZE 0 +#define SCSI_ABORT_SUCCESS 1 +#define SCSI_ABORT_PENDING 2 +#define SCSI_ABORT_BUSY 3 +#define SCSI_ABORT_NOT_RUNNING 4 +#define SCSI_ABORT_ERROR 5 + +#define SCSI_RESET_SNOOZE 0 +#define SCSI_RESET_PUNT 1 +#define SCSI_RESET_SUCCESS 2 +#define SCSI_RESET_PENDING 3 +#define SCSI_RESET_WAKEUP 4 +#define SCSI_RESET_NOT_RUNNING 5 +#define SCSI_RESET_ERROR 6 + +#define SCSI_RESET_SYNCHRONOUS 0x01 +#define SCSI_RESET_ASYNCHRONOUS 0x02 +#define SCSI_RESET_SUGGEST_BUS_RESET 0x04 +#define SCSI_RESET_SUGGEST_HOST_RESET 0x08 + +#define SCSI_RESET_BUS_RESET 0x100 +#define SCSI_RESET_HOST_RESET 0x200 +#define SCSI_RESET_ACTION 0xff + +struct initio_cmd_priv { + dma_addr_t sense_dma_addr; + dma_addr_t sglist_dma_addr; +}; + +static inline struct initio_cmd_priv *initio_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c new file mode 100644 index 000000000..4e13797b2 --- /dev/null +++ b/drivers/scsi/ipr.c @@ -0,0 +1,10121 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ipr.c -- driver for IBM Power Linux RAID adapters + * + * Written By: Brian King , IBM Corporation + * + * Copyright (C) 2003, 2004 IBM Corporation + */ + +/* + * Notes: + * + * This driver is used to control the following SCSI adapters: + * + * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B + * + * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter + * PCI-X Dual Channel Ultra 320 SCSI Adapter + * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card + * Embedded SCSI adapter on p615 and p655 systems + * + * Supported Hardware Features: + * - Ultra 320 SCSI controller + * - PCI-X host interface + * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine + * - Non-Volatile Write Cache + * - Supports attachment of non-RAID disks, tape, and optical devices + * - RAID Levels 0, 5, 10 + * - Hot spare + * - Background Parity Checking + * - Background Data Scrubbing + * - Ability to increase the capacity of an existing RAID 5 disk array + * by adding disks + * + * Driver Features: + * - Tagged command queuing + * - Adapter microcode download + * - PCI hot plug + * - SCSI device hot plug + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipr.h" + +/* + * Global Data + */ +static LIST_HEAD(ipr_ioa_head); +static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; +static unsigned int ipr_max_speed = 1; +static int ipr_testmode = 0; +static unsigned int ipr_fastfail = 0; +static unsigned int ipr_transop_timeout = 0; +static unsigned int ipr_debug = 0; +static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; +static unsigned int ipr_dual_ioa_raid = 1; +static unsigned int ipr_number_of_msix = 16; +static unsigned int ipr_fast_reboot; +static DEFINE_SPINLOCK(ipr_driver_lock); + +/* This table describes the differences between DMA controller chips */ +static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { + { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ + .mailbox = 0x0042C, + .max_cmds = 100, + .cache_line_size = 0x20, + .clear_isr = 1, + .iopoll_weight = 0, + { + .set_interrupt_mask_reg = 0x0022C, + .clr_interrupt_mask_reg = 0x00230, + .clr_interrupt_mask_reg32 = 0x00230, + .sense_interrupt_mask_reg = 0x0022C, + .sense_interrupt_mask_reg32 = 0x0022C, + .clr_interrupt_reg = 0x00228, + .clr_interrupt_reg32 = 0x00228, + .sense_interrupt_reg = 0x00224, + .sense_interrupt_reg32 = 0x00224, + .ioarrin_reg = 0x00404, + .sense_uproc_interrupt_reg = 0x00214, + .sense_uproc_interrupt_reg32 = 0x00214, + .set_uproc_interrupt_reg = 0x00214, + .set_uproc_interrupt_reg32 = 0x00214, + .clr_uproc_interrupt_reg = 0x00218, + .clr_uproc_interrupt_reg32 = 0x00218 + } + }, + { /* Snipe and Scamp */ + .mailbox = 0x0052C, + .max_cmds = 100, + .cache_line_size = 0x20, + .clear_isr = 1, + .iopoll_weight = 0, + { + .set_interrupt_mask_reg = 0x00288, + .clr_interrupt_mask_reg = 0x0028C, + .clr_interrupt_mask_reg32 = 0x0028C, + .sense_interrupt_mask_reg = 0x00288, + .sense_interrupt_mask_reg32 = 0x00288, + .clr_interrupt_reg = 0x00284, + .clr_interrupt_reg32 = 0x00284, + .sense_interrupt_reg = 0x00280, + .sense_interrupt_reg32 = 0x00280, + .ioarrin_reg = 0x00504, + .sense_uproc_interrupt_reg = 0x00290, + .sense_uproc_interrupt_reg32 = 0x00290, + .set_uproc_interrupt_reg = 0x00290, + .set_uproc_interrupt_reg32 = 0x00290, + .clr_uproc_interrupt_reg = 0x00294, + .clr_uproc_interrupt_reg32 = 0x00294 + } + }, + { /* CRoC */ + .mailbox = 0x00044, + .max_cmds = 1000, + .cache_line_size = 0x20, + .clear_isr = 0, + .iopoll_weight = 64, + { + .set_interrupt_mask_reg = 0x00010, + .clr_interrupt_mask_reg = 0x00018, + .clr_interrupt_mask_reg32 = 0x0001C, + .sense_interrupt_mask_reg = 0x00010, + .sense_interrupt_mask_reg32 = 0x00014, + .clr_interrupt_reg = 0x00008, + .clr_interrupt_reg32 = 0x0000C, + .sense_interrupt_reg = 0x00000, + .sense_interrupt_reg32 = 0x00004, + .ioarrin_reg = 0x00070, + .sense_uproc_interrupt_reg = 0x00020, + .sense_uproc_interrupt_reg32 = 0x00024, + .set_uproc_interrupt_reg = 0x00020, + .set_uproc_interrupt_reg32 = 0x00024, + .clr_uproc_interrupt_reg = 0x00028, + .clr_uproc_interrupt_reg32 = 0x0002C, + .init_feedback_reg = 0x0005C, + .dump_addr_reg = 0x00064, + .dump_data_reg = 0x00068, + .endian_swap_reg = 0x00084 + } + }, +}; + +static const struct ipr_chip_t ipr_chip[] = { + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } +}; + +static int ipr_max_bus_speeds[] = { + IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE +}; + +MODULE_AUTHOR("Brian King "); +MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); +module_param_named(max_speed, ipr_max_speed, uint, 0); +MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); +module_param_named(log_level, ipr_log_level, uint, 0); +MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); +module_param_named(testmode, ipr_testmode, int, 0); +MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); +module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); +module_param_named(transop_timeout, ipr_transop_timeout, int, 0); +MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); +module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); +module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); +MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); +module_param_named(max_devs, ipr_max_devs, int, 0); +MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " + "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); +module_param_named(number_of_msix, ipr_number_of_msix, int, 0); +MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)"); +module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IPR_DRIVER_VERSION); + +/* A constant array of IOASCs/URCs/Error Messages */ +static const +struct ipr_error_table_t ipr_error_table[] = { + {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, + "8155: An unknown error was received"}, + {0x00330000, 0, 0, + "Soft underlength error"}, + {0x005A0000, 0, 0, + "Command to be cancelled not found"}, + {0x00808000, 0, 0, + "Qualified success"}, + {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, + "FFFE: Soft device bus error recovered by the IOA"}, + {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, + "4101: Soft device bus fabric error"}, + {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFC: Logical block guard error recovered by the device"}, + {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFC: Logical block reference tag error recovered by the device"}, + {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, + "4171: Recovered scatter list tag / sequence number error"}, + {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, + "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, + {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, + "4171: Recovered logical block sequence number error on IOA to Host transfer"}, + {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFD: Recovered logical block reference tag error detected by the IOA"}, + {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFD: Logical block guard error recovered by the IOA"}, + {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF9: Device sector reassign successful"}, + {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF7: Media error recovered by device rewrite procedures"}, + {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, + "7001: IOA sector reassignment successful"}, + {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF9: Soft media error. Sector reassignment recommended"}, + {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF7: Media error recovered by IOA rewrite procedures"}, + {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, + "FF3D: Soft PCI bus error recovered by the IOA"}, + {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, + "FFF6: Device hardware error recovered by the IOA"}, + {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF6: Device hardware error recovered by the device"}, + {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, + "FF3D: Soft IOA error recovered by the IOA"}, + {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFA: Undefined device response recovered by the IOA"}, + {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, + "FFF6: Device bus error, message or command phase"}, + {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFE: Task Management Function failed"}, + {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF6: Failure prediction threshold exceeded"}, + {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, + "8009: Impending cache battery pack failure"}, + {0x02040100, 0, 0, + "Logical Unit in process of becoming ready"}, + {0x02040200, 0, 0, + "Initializing command required"}, + {0x02040400, 0, 0, + "34FF: Disk device format in progress"}, + {0x02040C00, 0, 0, + "Logical unit not accessible, target port in unavailable state"}, + {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, + "9070: IOA requested reset"}, + {0x023F0000, 0, 0, + "Synchronization required"}, + {0x02408500, 0, 0, + "IOA microcode download required"}, + {0x02408600, 0, 0, + "Device bus connection is prohibited by host"}, + {0x024E0000, 0, 0, + "No ready, IOA shutdown"}, + {0x025A0000, 0, 0, + "Not ready, IOA has been shutdown"}, + {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, + "3020: Storage subsystem configuration error"}, + {0x03110B00, 0, 0, + "FFF5: Medium error, data unreadable, recommend reassign"}, + {0x03110C00, 0, 0, + "7000: Medium error, data unreadable, do not reassign"}, + {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF3: Disk media format bad"}, + {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, + "3002: Addressed device failed to respond to selection"}, + {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, + "3100: Device bus error"}, + {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, + "3109: IOA timed out a device command"}, + {0x04088000, 0, 0, + "3120: SCSI bus is not operational"}, + {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, + "4100: Hard device bus fabric error"}, + {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, + "310C: Logical block guard error detected by the device"}, + {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, + "310C: Logical block reference tag error detected by the device"}, + {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, + "4170: Scatter list tag / sequence number error"}, + {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, + "8150: Logical block CRC error on IOA to Host transfer"}, + {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, + "4170: Logical block sequence number error on IOA to Host transfer"}, + {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, + "310D: Logical block reference tag error detected by the IOA"}, + {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, + "310D: Logical block guard error detected by the IOA"}, + {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, + "9000: IOA reserved area data check"}, + {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, + "9001: IOA reserved area invalid data pattern"}, + {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, + "9002: IOA reserved area LRC error"}, + {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, + "Hardware Error, IOA metadata access error"}, + {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, + "102E: Out of alternate sectors for disk storage"}, + {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, + "FFF4: Data transfer underlength error"}, + {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, + "FFF4: Data transfer overlength error"}, + {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, + "3400: Logical unit failure"}, + {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF4: Device microcode is corrupt"}, + {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, + "8150: PCI bus error"}, + {0x04430000, 1, 0, + "Unsupported device bus message received"}, + {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, + "FFF4: Disk device problem"}, + {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, + "8150: Permanent IOA failure"}, + {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, + "3010: Disk device returned wrong response to IOA"}, + {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, + "8151: IOA microcode error"}, + {0x04448500, 0, 0, + "Device bus status error"}, + {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, + "8157: IOA error requiring IOA reset to recover"}, + {0x04448700, 0, 0, + "ATA device status error"}, + {0x04490000, 0, 0, + "Message reject received from the device"}, + {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, + "8008: A permanent cache battery pack failure occurred"}, + {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, + "9090: Disk unit has been modified after the last known status"}, + {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, + "9081: IOA detected device error"}, + {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, + "9082: IOA detected device error"}, + {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, + "3110: Device bus error, message or command phase"}, + {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, + "3110: SAS Command / Task Management Function failed"}, + {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, + "9091: Incorrect hardware configuration change has been detected"}, + {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, + "9073: Invalid multi-adapter configuration"}, + {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, + "4010: Incorrect connection between cascaded expanders"}, + {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, + "4020: Connections exceed IOA design limits"}, + {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, + "4030: Incorrect multipath connection"}, + {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, + "4110: Unsupported enclosure function"}, + {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL, + "4120: SAS cable VPD cannot be read"}, + {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFF4: Command to logical unit failed"}, + {0x05240000, 1, 0, + "Illegal request, invalid request type or request packet"}, + {0x05250000, 0, 0, + "Illegal request, invalid resource handle"}, + {0x05258000, 0, 0, + "Illegal request, commands not allowed to this device"}, + {0x05258100, 0, 0, + "Illegal request, command not allowed to a secondary adapter"}, + {0x05258200, 0, 0, + "Illegal request, command not allowed to a non-optimized resource"}, + {0x05260000, 0, 0, + "Illegal request, invalid field in parameter list"}, + {0x05260100, 0, 0, + "Illegal request, parameter not supported"}, + {0x05260200, 0, 0, + "Illegal request, parameter value invalid"}, + {0x052C0000, 0, 0, + "Illegal request, command sequence error"}, + {0x052C8000, 1, 0, + "Illegal request, dual adapter support not enabled"}, + {0x052C8100, 1, 0, + "Illegal request, another cable connector was physically disabled"}, + {0x054E8000, 1, 0, + "Illegal request, inconsistent group id/group count"}, + {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, + "9031: Array protection temporarily suspended, protection resuming"}, + {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, + "9040: Array protection temporarily suspended, protection resuming"}, + {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL, + "4080: IOA exceeded maximum operating temperature"}, + {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, + "4085: Service required"}, + {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL, + "4086: SAS Adapter Hardware Configuration Error"}, + {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, + "3140: Device bus not ready to ready transition"}, + {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFB: SCSI bus was reset"}, + {0x06290500, 0, 0, + "FFFE: SCSI bus transition to single ended"}, + {0x06290600, 0, 0, + "FFFE: SCSI bus transition to LVD"}, + {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFB: SCSI bus was reset by another initiator"}, + {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, + "3029: A device replacement has occurred"}, + {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL, + "4102: Device bus fabric performance degradation"}, + {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, + "9051: IOA cache data exists for a missing or failed device"}, + {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, + "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, + {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, + "9025: Disk unit is not supported at its physical location"}, + {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, + "3020: IOA detected a SCSI bus configuration error"}, + {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, + "3150: SCSI bus configuration error"}, + {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, + "9074: Asymmetric advanced function disk configuration"}, + {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, + "4040: Incomplete multipath connection between IOA and enclosure"}, + {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, + "4041: Incomplete multipath connection between enclosure and device"}, + {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, + "9075: Incomplete multipath connection between IOA and remote IOA"}, + {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, + "9076: Configuration error, missing remote IOA"}, + {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, + "4050: Enclosure does not support a required multipath function"}, + {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL, + "4121: Configuration error, required cable is missing"}, + {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL, + "4122: Cable is not plugged into the correct location on remote IOA"}, + {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL, + "4123: Configuration error, invalid cable vital product data"}, + {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL, + "4124: Configuration error, both cable ends are plugged into the same IOA"}, + {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, + "4070: Logically bad block written on device"}, + {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, + "9041: Array protection temporarily suspended"}, + {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, + "9042: Corrupt array parity detected on specified device"}, + {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, + "9030: Array no longer protected due to missing or failed disk unit"}, + {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, + "9071: Link operational transition"}, + {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, + "9072: Link not operational transition"}, + {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, + "9032: Array exposed but still protected"}, + {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL, + "70DD: Device forced failed by disrupt device command"}, + {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, + "4061: Multipath redundancy level got better"}, + {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, + "4060: Multipath redundancy level got worse"}, + {0x06808100, 0, IPR_DEBUG_LOG_LEVEL, + "9083: Device raw mode enabled"}, + {0x06808200, 0, IPR_DEBUG_LOG_LEVEL, + "9084: Device raw mode disabled"}, + {0x07270000, 0, 0, + "Failure due to other device"}, + {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, + "9008: IOA does not support functions expected by devices"}, + {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, + "9010: Cache data associated with attached devices cannot be found"}, + {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, + "9011: Cache data belongs to devices other than those attached"}, + {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, + "9020: Array missing 2 or more devices with only 1 device present"}, + {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, + "9021: Array missing 2 or more devices with 2 or more devices present"}, + {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, + "9022: Exposed array is missing a required device"}, + {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, + "9023: Array member(s) not at required physical locations"}, + {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, + "9024: Array not functional due to present hardware configuration"}, + {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, + "9026: Array not functional due to present hardware configuration"}, + {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, + "9027: Array is missing a device and parity is out of sync"}, + {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, + "9028: Maximum number of arrays already exist"}, + {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, + "9050: Required cache data cannot be located for a disk unit"}, + {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, + "9052: Cache data exists for a device that has been modified"}, + {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, + "9054: IOA resources not available due to previous problems"}, + {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, + "9092: Disk unit requires initialization before use"}, + {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, + "9029: Incorrect hardware configuration change has been detected"}, + {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, + "9060: One or more disk pairs are missing from an array"}, + {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, + "9061: One or more disks are missing from an array"}, + {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, + "9062: One or more disks are missing from an array"}, + {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, + "9063: Maximum number of functional arrays has been exceeded"}, + {0x07279A00, 0, 0, + "Data protect, other volume set problem"}, + {0x0B260000, 0, 0, + "Aborted command, invalid descriptor"}, + {0x0B3F9000, 0, 0, + "Target operating conditions have changed, dual adapter takeover"}, + {0x0B530200, 0, 0, + "Aborted command, medium removal prevented"}, + {0x0B5A0000, 0, 0, + "Command terminated by host"}, + {0x0B5B8000, 0, 0, + "Aborted command, command terminated by host"} +}; + +static const struct ipr_ses_table_entry ipr_ses_table[] = { + { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, + { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, + { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ + { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ + { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ + { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ + { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, + { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, + { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, + { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } +}; + +/* + * Function Prototypes + */ +static int ipr_reset_alert(struct ipr_cmnd *); +static void ipr_process_ccn(struct ipr_cmnd *); +static void ipr_process_error(struct ipr_cmnd *); +static void ipr_reset_ioa_job(struct ipr_cmnd *); +static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, + enum ipr_shutdown_type); + +#ifdef CONFIG_SCSI_IPR_TRACE +/** + * ipr_trc_hook - Add a trace entry to the driver trace + * @ipr_cmd: ipr command struct + * @type: trace type + * @add_data: additional data + * + * Return value: + * none + **/ +static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, + u8 type, u32 add_data) +{ + struct ipr_trace_entry *trace_entry; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned int trace_index; + + trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; + trace_entry = &ioa_cfg->trace[trace_index]; + trace_entry->time = jiffies; + trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; + trace_entry->type = type; + trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; + trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; + trace_entry->u.add_data = add_data; + wmb(); +} +#else +#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) +#endif + +/** + * ipr_lock_and_done - Acquire lock and complete command + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) +{ + unsigned long lock_flags; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ipr_cmd->done(ipr_cmd); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** + * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + dma_addr_t dma_addr = ipr_cmd->dma_addr; + int hrrq_id; + + hrrq_id = ioarcb->cmd_pkt.hrrq_id; + memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); + ioarcb->cmd_pkt.hrrq_id = hrrq_id; + ioarcb->data_transfer_length = 0; + ioarcb->read_data_transfer_length = 0; + ioarcb->ioadl_len = 0; + ioarcb->read_ioadl_len = 0; + + if (ipr_cmd->ioa_cfg->sis64) { + ioarcb->u.sis64_addr_data.data_ioadl_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); + } else { + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + } + + ioasa->hdr.ioasc = 0; + ioasa->hdr.residual_data_len = 0; + ipr_cmd->scsi_cmd = NULL; + ipr_cmd->sense_buffer[0] = 0; + ipr_cmd->dma_use_sg = 0; +} + +/** + * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block + * @ipr_cmd: ipr command struct + * @fast_done: fast done function call-back + * + * Return value: + * none + **/ +static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, + void (*fast_done) (struct ipr_cmnd *)) +{ + ipr_reinit_ipr_cmnd(ipr_cmd); + ipr_cmd->u.scratch = 0; + ipr_cmd->sibling = NULL; + ipr_cmd->eh_comp = NULL; + ipr_cmd->fast_done = fast_done; + timer_setup(&ipr_cmd->timer, NULL, 0); +} + +/** + * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block + * @hrrq: hrr queue + * + * Return value: + * pointer to ipr command struct + **/ +static +struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq) +{ + struct ipr_cmnd *ipr_cmd = NULL; + + if (likely(!list_empty(&hrrq->hrrq_free_q))) { + ipr_cmd = list_entry(hrrq->hrrq_free_q.next, + struct ipr_cmnd, queue); + list_del(&ipr_cmd->queue); + } + + + return ipr_cmd; +} + +/** + * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it + * @ioa_cfg: ioa config struct + * + * Return value: + * pointer to ipr command struct + **/ +static +struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd = + __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); + ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); + return ipr_cmd; +} + +/** + * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts + * @ioa_cfg: ioa config struct + * @clr_ints: interrupts to clear + * + * This function masks all interrupts on the adapter, then clears the + * interrupts specified in the mask + * + * Return value: + * none + **/ +static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, + u32 clr_ints) +{ + int i; + + /* Stop new interrupts */ + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].allow_interrupts = 0; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + + /* Set interrupt mask to stop all new interrupts */ + if (ioa_cfg->sis64) + writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); + else + writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); + + /* Clear any pending interrupts */ + if (ioa_cfg->sis64) + writel(~0, ioa_cfg->regs.clr_interrupt_reg); + writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); + readl(ioa_cfg->regs.sense_interrupt_reg); +} + +/** + * ipr_save_pcix_cmd_reg - Save PCI-X command register + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) +{ + int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); + + if (pcix_cmd_reg == 0) + return 0; + + if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, + &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { + dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); + return -EIO; + } + + ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; + return 0; +} + +/** + * ipr_set_pcix_cmd_reg - Setup PCI-X command register + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) +{ + int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); + + if (pcix_cmd_reg) { + if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, + ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { + dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); + return -EIO; + } + } + + return 0; +} + + +/** + * __ipr_scsi_eh_done - mid-layer done function for aborted ops + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer which are being aborted. + * + * Return value: + * none + **/ +static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + + scsi_cmd->result |= (DID_ERROR << 16); + + scsi_dma_unmap(ipr_cmd->scsi_cmd); + scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); +} + +/** + * ipr_scsi_eh_done - mid-layer done function for aborted ops + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer which are being aborted. + * + * Return value: + * none + **/ +static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) +{ + unsigned long hrrq_flags; + struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; + + spin_lock_irqsave(&hrrq->_lock, hrrq_flags); + __ipr_scsi_eh_done(ipr_cmd); + spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); +} + +/** + * ipr_fail_all_ops - Fails all outstanding ops. + * @ioa_cfg: ioa config struct + * + * This function fails all outstanding ops. + * + * Return value: + * none + **/ +static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd, *temp; + struct ipr_hrr_queue *hrrq; + + ENTER; + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock(&hrrq->_lock); + list_for_each_entry_safe(ipr_cmd, + temp, &hrrq->hrrq_pending_q, queue) { + list_del(&ipr_cmd->queue); + + ipr_cmd->s.ioasa.hdr.ioasc = + cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); + ipr_cmd->s.ioasa.hdr.ilid = + cpu_to_be32(IPR_DRIVER_ILID); + + if (ipr_cmd->scsi_cmd) + ipr_cmd->done = __ipr_scsi_eh_done; + + ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, + IPR_IOASC_IOA_WAS_RESET); + del_timer(&ipr_cmd->timer); + ipr_cmd->done(ipr_cmd); + } + spin_unlock(&hrrq->_lock); + } + LEAVE; +} + +/** + * ipr_send_command - Send driver initiated requests. + * @ipr_cmd: ipr command struct + * + * This function sends a command to the adapter using the correct write call. + * In the case of sis64, calculate the ioarcb size required. Then or in the + * appropriate bits. + * + * Return value: + * none + **/ +static void ipr_send_command(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + dma_addr_t send_dma_addr = ipr_cmd->dma_addr; + + if (ioa_cfg->sis64) { + /* The default size is 256 bytes */ + send_dma_addr |= 0x1; + + /* If the number of ioadls * size of ioadl > 128 bytes, + then use a 512 byte ioarcb */ + if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) + send_dma_addr |= 0x4; + writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); + } else + writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); +} + +/** + * ipr_do_req - Send driver initiated requests. + * @ipr_cmd: ipr command struct + * @done: done function + * @timeout_func: timeout function + * @timeout: timeout value + * + * This function sends the specified command to the adapter with the + * timeout given. The done function is invoked on command completion. + * + * Return value: + * none + **/ +static void ipr_do_req(struct ipr_cmnd *ipr_cmd, + void (*done) (struct ipr_cmnd *), + void (*timeout_func) (struct timer_list *), u32 timeout) +{ + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); + + ipr_cmd->done = done; + + ipr_cmd->timer.expires = jiffies + timeout; + ipr_cmd->timer.function = timeout_func; + + add_timer(&ipr_cmd->timer); + + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); + + ipr_send_command(ipr_cmd); +} + +/** + * ipr_internal_cmd_done - Op done function for an internally generated op. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for an internally generated, + * blocking op. It simply wakes the sleeping thread. + * + * Return value: + * none + **/ +static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) +{ + if (ipr_cmd->sibling) + ipr_cmd->sibling = NULL; + else + complete(&ipr_cmd->completion); +} + +/** + * ipr_init_ioadl - initialize the ioadl for the correct SIS type + * @ipr_cmd: ipr command struct + * @dma_addr: dma address + * @len: transfer length + * @flags: ioadl flag value + * + * This function initializes an ioadl in the case where there is only a single + * descriptor. + * + * Return value: + * nothing + **/ +static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, + u32 len, int flags) +{ + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + + ipr_cmd->dma_use_sg = 1; + + if (ipr_cmd->ioa_cfg->sis64) { + ioadl64->flags = cpu_to_be32(flags); + ioadl64->data_len = cpu_to_be32(len); + ioadl64->address = cpu_to_be64(dma_addr); + + ipr_cmd->ioarcb.ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); + ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); + } else { + ioadl->flags_and_data_len = cpu_to_be32(flags | len); + ioadl->address = cpu_to_be32(dma_addr); + + if (flags == IPR_IOADL_FLAGS_READ_LAST) { + ipr_cmd->ioarcb.read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); + } else { + ipr_cmd->ioarcb.ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); + } + } +} + +/** + * ipr_send_blocking_cmd - Send command and sleep on its completion. + * @ipr_cmd: ipr command struct + * @timeout_func: function to invoke if command times out + * @timeout: timeout + * + * Return value: + * none + **/ +static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, + void (*timeout_func) (struct timer_list *), + u32 timeout) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + init_completion(&ipr_cmd->completion); + ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); + + spin_unlock_irq(ioa_cfg->host->host_lock); + wait_for_completion(&ipr_cmd->completion); + spin_lock_irq(ioa_cfg->host->host_lock); +} + +static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) +{ + unsigned int hrrq; + + if (ioa_cfg->hrrq_num == 1) + hrrq = 0; + else { + hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); + hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; + } + return hrrq; +} + +/** + * ipr_send_hcam - Send an HCAM to the adapter. + * @ioa_cfg: ioa config struct + * @type: HCAM type + * @hostrcb: hostrcb struct + * + * This function will send a Host Controlled Async command to the adapter. + * If HCAMs are currently not allowed to be issued to the adapter, it will + * place the hostrcb on the free queue. + * + * Return value: + * none + **/ +static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); + + ipr_cmd->u.hostrcb = hostrcb; + ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; + ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; + ioarcb->cmd_pkt.cdb[1] = type; + ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; + + ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, + sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); + + if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) + ipr_cmd->done = ipr_process_ccn; + else + ipr_cmd->done = ipr_process_error; + + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); + + ipr_send_command(ipr_cmd); + } else { + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); + } +} + +/** + * ipr_init_res_entry - Initialize a resource entry struct. + * @res: resource entry struct + * @cfgtew: config table entry wrapper struct + * + * Return value: + * none + **/ +static void ipr_init_res_entry(struct ipr_resource_entry *res, + struct ipr_config_table_entry_wrapper *cfgtew) +{ + int found = 0; + struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; + struct ipr_resource_entry *gscsi_res = NULL; + + res->needs_sync_complete = 0; + res->in_erp = 0; + res->add_to_ml = 0; + res->del_from_ml = 0; + res->resetting_device = 0; + res->reset_occurred = 0; + res->sdev = NULL; + + if (ioa_cfg->sis64) { + res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); + res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); + res->qmodel = IPR_QUEUEING_MODEL64(res); + res->type = cfgtew->u.cfgte64->res_type; + + memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, + sizeof(res->res_path)); + + res->bus = 0; + memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, + sizeof(res->dev_lun.scsi_lun)); + res->lun = scsilun_to_int(&res->dev_lun); + + if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { + list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { + if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { + found = 1; + res->target = gscsi_res->target; + break; + } + } + if (!found) { + res->target = find_first_zero_bit(ioa_cfg->target_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->target_ids); + } + } else if (res->type == IPR_RES_TYPE_IOAFP) { + res->bus = IPR_IOAFP_VIRTUAL_BUS; + res->target = 0; + } else if (res->type == IPR_RES_TYPE_ARRAY) { + res->bus = IPR_ARRAY_VIRTUAL_BUS; + res->target = find_first_zero_bit(ioa_cfg->array_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->array_ids); + } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { + res->bus = IPR_VSET_VIRTUAL_BUS; + res->target = find_first_zero_bit(ioa_cfg->vset_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->vset_ids); + } else { + res->target = find_first_zero_bit(ioa_cfg->target_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->target_ids); + } + } else { + res->qmodel = IPR_QUEUEING_MODEL(res); + res->flags = cfgtew->u.cfgte->flags; + if (res->flags & IPR_IS_IOA_RESOURCE) + res->type = IPR_RES_TYPE_IOAFP; + else + res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; + + res->bus = cfgtew->u.cfgte->res_addr.bus; + res->target = cfgtew->u.cfgte->res_addr.target; + res->lun = cfgtew->u.cfgte->res_addr.lun; + res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); + } +} + +/** + * ipr_is_same_device - Determine if two devices are the same. + * @res: resource entry struct + * @cfgtew: config table entry wrapper struct + * + * Return value: + * 1 if the devices are the same / 0 otherwise + **/ +static int ipr_is_same_device(struct ipr_resource_entry *res, + struct ipr_config_table_entry_wrapper *cfgtew) +{ + if (res->ioa_cfg->sis64) { + if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, + sizeof(cfgtew->u.cfgte64->dev_id)) && + !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, + sizeof(cfgtew->u.cfgte64->lun))) { + return 1; + } + } else { + if (res->bus == cfgtew->u.cfgte->res_addr.bus && + res->target == cfgtew->u.cfgte->res_addr.target && + res->lun == cfgtew->u.cfgte->res_addr.lun) + return 1; + } + + return 0; +} + +/** + * __ipr_format_res_path - Format the resource path for printing. + * @res_path: resource path + * @buffer: buffer + * @len: length of buffer provided + * + * Return value: + * pointer to buffer + **/ +static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len) +{ + int i; + char *p = buffer; + + *p = '\0'; + p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); + for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++) + p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); + + return buffer; +} + +/** + * ipr_format_res_path - Format the resource path for printing. + * @ioa_cfg: ioa config struct + * @res_path: resource path + * @buffer: buffer + * @len: length of buffer provided + * + * Return value: + * pointer to buffer + **/ +static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, + u8 *res_path, char *buffer, int len) +{ + char *p = buffer; + + *p = '\0'; + p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); + __ipr_format_res_path(res_path, p, len - (p - buffer)); + return buffer; +} + +/** + * ipr_update_res_entry - Update the resource entry. + * @res: resource entry struct + * @cfgtew: config table entry wrapper struct + * + * Return value: + * none + **/ +static void ipr_update_res_entry(struct ipr_resource_entry *res, + struct ipr_config_table_entry_wrapper *cfgtew) +{ + char buffer[IPR_MAX_RES_PATH_LENGTH]; + int new_path = 0; + + if (res->ioa_cfg->sis64) { + res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); + res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); + res->type = cfgtew->u.cfgte64->res_type; + + memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, + sizeof(struct ipr_std_inq_data)); + + res->qmodel = IPR_QUEUEING_MODEL64(res); + res->res_handle = cfgtew->u.cfgte64->res_handle; + res->dev_id = cfgtew->u.cfgte64->dev_id; + + memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, + sizeof(res->dev_lun.scsi_lun)); + + if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, + sizeof(res->res_path))) { + memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, + sizeof(res->res_path)); + new_path = 1; + } + + if (res->sdev && new_path) + sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", + ipr_format_res_path(res->ioa_cfg, + res->res_path, buffer, sizeof(buffer))); + } else { + res->flags = cfgtew->u.cfgte->flags; + if (res->flags & IPR_IS_IOA_RESOURCE) + res->type = IPR_RES_TYPE_IOAFP; + else + res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; + + memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, + sizeof(struct ipr_std_inq_data)); + + res->qmodel = IPR_QUEUEING_MODEL(res); + res->res_handle = cfgtew->u.cfgte->res_handle; + } +} + +/** + * ipr_clear_res_target - Clear the bit in the bit map representing the target + * for the resource. + * @res: resource entry struct + * + * Return value: + * none + **/ +static void ipr_clear_res_target(struct ipr_resource_entry *res) +{ + struct ipr_resource_entry *gscsi_res = NULL; + struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; + + if (!ioa_cfg->sis64) + return; + + if (res->bus == IPR_ARRAY_VIRTUAL_BUS) + clear_bit(res->target, ioa_cfg->array_ids); + else if (res->bus == IPR_VSET_VIRTUAL_BUS) + clear_bit(res->target, ioa_cfg->vset_ids); + else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { + list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) + if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) + return; + clear_bit(res->target, ioa_cfg->target_ids); + + } else if (res->bus == 0) + clear_bit(res->target, ioa_cfg->target_ids); +} + +/** + * ipr_handle_config_change - Handle a config change from the adapter + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb + * + * Return value: + * none + **/ +static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_resource_entry *res = NULL; + struct ipr_config_table_entry_wrapper cfgtew; + __be32 cc_res_handle; + + u32 is_ndn = 1; + + if (ioa_cfg->sis64) { + cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; + cc_res_handle = cfgtew.u.cfgte64->res_handle; + } else { + cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; + cc_res_handle = cfgtew.u.cfgte->res_handle; + } + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->res_handle == cc_res_handle) { + is_ndn = 0; + break; + } + } + + if (is_ndn) { + if (list_empty(&ioa_cfg->free_res_q)) { + ipr_send_hcam(ioa_cfg, + IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, + hostrcb); + return; + } + + res = list_entry(ioa_cfg->free_res_q.next, + struct ipr_resource_entry, queue); + + list_del(&res->queue); + ipr_init_res_entry(res, &cfgtew); + list_add_tail(&res->queue, &ioa_cfg->used_res_q); + } + + ipr_update_res_entry(res, &cfgtew); + + if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { + if (res->sdev) { + res->del_from_ml = 1; + res->res_handle = IPR_INVALID_RES_HANDLE; + schedule_work(&ioa_cfg->work_q); + } else { + ipr_clear_res_target(res); + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } + } else if (!res->sdev || res->del_from_ml) { + res->add_to_ml = 1; + schedule_work(&ioa_cfg->work_q); + } + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); +} + +/** + * ipr_process_ccn - Op done function for a CCN. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for a configuration + * change notification host controlled async from the adapter. + * + * Return value: + * none + **/ +static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + list_del_init(&hostrcb->queue); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + + if (ioasc) { + if (ioasc != IPR_IOASC_IOA_WAS_RESET && + ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) + dev_err(&ioa_cfg->pdev->dev, + "Host RCB failed with IOASC: 0x%08X\n", ioasc); + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); + } else { + ipr_handle_config_change(ioa_cfg, hostrcb); + } +} + +/** + * strip_whitespace - Strip and pad trailing whitespace. + * @i: size of buffer + * @buf: string to modify + * + * This function will strip all trailing whitespace and + * NUL terminate the string. + * + **/ +static void strip_whitespace(int i, char *buf) +{ + if (i < 1) + return; + i--; + while (i && buf[i] == ' ') + i--; + buf[i+1] = '\0'; +} + +/** + * ipr_log_vpd_compact - Log the passed extended VPD compactly. + * @prefix: string to print at start of printk + * @hostrcb: hostrcb pointer + * @vpd: vendor/product id/sn struct + * + * Return value: + * none + **/ +static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, + struct ipr_vpd *vpd) +{ + char vendor_id[IPR_VENDOR_ID_LEN + 1]; + char product_id[IPR_PROD_ID_LEN + 1]; + char sn[IPR_SERIAL_NUM_LEN + 1]; + + memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); + strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id); + + memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN); + strip_whitespace(IPR_PROD_ID_LEN, product_id); + + memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN); + strip_whitespace(IPR_SERIAL_NUM_LEN, sn); + + ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix, + vendor_id, product_id, sn); +} + +/** + * ipr_log_vpd - Log the passed VPD to the error log. + * @vpd: vendor/product id/sn struct + * + * Return value: + * none + **/ +static void ipr_log_vpd(struct ipr_vpd *vpd) +{ + char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + + IPR_SERIAL_NUM_LEN]; + + memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); + memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, + IPR_PROD_ID_LEN); + buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; + ipr_err("Vendor/Product ID: %s\n", buffer); + + memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); + buffer[IPR_SERIAL_NUM_LEN] = '\0'; + ipr_err(" Serial Number: %s\n", buffer); +} + +/** + * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. + * @prefix: string to print at start of printk + * @hostrcb: hostrcb pointer + * @vpd: vendor/product id/sn/wwn struct + * + * Return value: + * none + **/ +static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, + struct ipr_ext_vpd *vpd) +{ + ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); + ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, + be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); +} + +/** + * ipr_log_ext_vpd - Log the passed extended VPD to the error log. + * @vpd: vendor/product id/sn/wwn struct + * + * Return value: + * none + **/ +static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) +{ + ipr_log_vpd(&vpd->vpd); + ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), + be32_to_cpu(vpd->wwid[1])); +} + +/** + * ipr_log_enhanced_cache_error - Log a cache error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_12_error *error; + + if (ioa_cfg->sis64) + error = &hostrcb->hcam.u.error64.u.type_12_error; + else + error = &hostrcb->hcam.u.error.u.type_12_error; + + ipr_err("-----Current Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&error->ioa_vpd); + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&error->cfc_vpd); + + ipr_err("-----Expected Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); + + ipr_err("Additional IOA Data: %08X %08X %08X\n", + be32_to_cpu(error->ioa_data[0]), + be32_to_cpu(error->ioa_data[1]), + be32_to_cpu(error->ioa_data[2])); +} + +/** + * ipr_log_cache_error - Log a cache error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_02_error *error = + &hostrcb->hcam.u.error.u.type_02_error; + + ipr_err("-----Current Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&error->ioa_vpd); + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&error->cfc_vpd); + + ipr_err("-----Expected Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); + + ipr_err("Additional IOA Data: %08X %08X %08X\n", + be32_to_cpu(error->ioa_data[0]), + be32_to_cpu(error->ioa_data[1]), + be32_to_cpu(error->ioa_data[2])); +} + +/** + * ipr_log_enhanced_config_error - Log a configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int errors_logged, i; + struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; + struct ipr_hostrcb_type_13_error *error; + + error = &hostrcb->hcam.u.error.u.type_13_error; + errors_logged = be32_to_cpu(error->errors_logged); + + ipr_err("Device Errors Detected/Logged: %d/%d\n", + be32_to_cpu(error->errors_detected), errors_logged); + + dev_entry = error->dev; + + for (i = 0; i < errors_logged; i++, dev_entry++) { + ipr_err_separator; + + ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); + ipr_log_ext_vpd(&dev_entry->vpd); + + ipr_err("-----New Device Information-----\n"); + ipr_log_ext_vpd(&dev_entry->new_vpd); + + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); + + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); + } +} + +/** + * ipr_log_sis64_config_error - Log a device error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int errors_logged, i; + struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; + struct ipr_hostrcb_type_23_error *error; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + error = &hostrcb->hcam.u.error64.u.type_23_error; + errors_logged = be32_to_cpu(error->errors_logged); + + ipr_err("Device Errors Detected/Logged: %d/%d\n", + be32_to_cpu(error->errors_detected), errors_logged); + + dev_entry = error->dev; + + for (i = 0; i < errors_logged; i++, dev_entry++) { + ipr_err_separator; + + ipr_err("Device %d : %s", i + 1, + __ipr_format_res_path(dev_entry->res_path, + buffer, sizeof(buffer))); + ipr_log_ext_vpd(&dev_entry->vpd); + + ipr_err("-----New Device Information-----\n"); + ipr_log_ext_vpd(&dev_entry->new_vpd); + + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); + + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); + } +} + +/** + * ipr_log_config_error - Log a configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int errors_logged, i; + struct ipr_hostrcb_device_data_entry *dev_entry; + struct ipr_hostrcb_type_03_error *error; + + error = &hostrcb->hcam.u.error.u.type_03_error; + errors_logged = be32_to_cpu(error->errors_logged); + + ipr_err("Device Errors Detected/Logged: %d/%d\n", + be32_to_cpu(error->errors_detected), errors_logged); + + dev_entry = error->dev; + + for (i = 0; i < errors_logged; i++, dev_entry++) { + ipr_err_separator; + + ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); + ipr_log_vpd(&dev_entry->vpd); + + ipr_err("-----New Device Information-----\n"); + ipr_log_vpd(&dev_entry->new_vpd); + + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); + + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); + + ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", + be32_to_cpu(dev_entry->ioa_data[0]), + be32_to_cpu(dev_entry->ioa_data[1]), + be32_to_cpu(dev_entry->ioa_data[2]), + be32_to_cpu(dev_entry->ioa_data[3]), + be32_to_cpu(dev_entry->ioa_data[4])); + } +} + +/** + * ipr_log_enhanced_array_error - Log an array configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i, num_entries; + struct ipr_hostrcb_type_14_error *error; + struct ipr_hostrcb_array_data_entry_enhanced *array_entry; + const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; + + error = &hostrcb->hcam.u.error.u.type_14_error; + + ipr_err_separator; + + ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", + error->protection_level, + ioa_cfg->host->host_no, + error->last_func_vset_res_addr.bus, + error->last_func_vset_res_addr.target, + error->last_func_vset_res_addr.lun); + + ipr_err_separator; + + array_entry = error->array_member; + num_entries = min_t(u32, be32_to_cpu(error->num_entries), + ARRAY_SIZE(error->array_member)); + + for (i = 0; i < num_entries; i++, array_entry++) { + if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) + continue; + + if (be32_to_cpu(error->exposed_mode_adn) == i) + ipr_err("Exposed Array Member %d:\n", i); + else + ipr_err("Array Member %d:\n", i); + + ipr_log_ext_vpd(&array_entry->vpd); + ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); + ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, + "Expected Location"); + + ipr_err_separator; + } +} + +/** + * ipr_log_array_error - Log an array configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i; + struct ipr_hostrcb_type_04_error *error; + struct ipr_hostrcb_array_data_entry *array_entry; + const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; + + error = &hostrcb->hcam.u.error.u.type_04_error; + + ipr_err_separator; + + ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", + error->protection_level, + ioa_cfg->host->host_no, + error->last_func_vset_res_addr.bus, + error->last_func_vset_res_addr.target, + error->last_func_vset_res_addr.lun); + + ipr_err_separator; + + array_entry = error->array_member; + + for (i = 0; i < 18; i++) { + if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) + continue; + + if (be32_to_cpu(error->exposed_mode_adn) == i) + ipr_err("Exposed Array Member %d:\n", i); + else + ipr_err("Array Member %d:\n", i); + + ipr_log_vpd(&array_entry->vpd); + + ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); + ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, + "Expected Location"); + + ipr_err_separator; + + if (i == 9) + array_entry = error->array_member2; + else + array_entry++; + } +} + +/** + * ipr_log_hex_data - Log additional hex IOA error data. + * @ioa_cfg: ioa config struct + * @data: IOA error data + * @len: data length + * + * Return value: + * none + **/ +static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len) +{ + int i; + + if (len == 0) + return; + + if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) + len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); + + for (i = 0; i < len / 4; i += 4) { + ipr_err("%08X: %08X %08X %08X %08X\n", i*4, + be32_to_cpu(data[i]), + be32_to_cpu(data[i+1]), + be32_to_cpu(data[i+2]), + be32_to_cpu(data[i+3])); + } +} + +/** + * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_17_error *error; + + if (ioa_cfg->sis64) + error = &hostrcb->hcam.u.error64.u.type_17_error; + else + error = &hostrcb->hcam.u.error.u.type_17_error; + + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + strim(error->failure_reason); + + ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, + be32_to_cpu(hostrcb->hcam.u.error.prc)); + ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); + ipr_log_hex_data(ioa_cfg, error->data, + be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb_error, u) + + offsetof(struct ipr_hostrcb_type_17_error, data))); +} + +/** + * ipr_log_dual_ioa_error - Log a dual adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_07_error *error; + + error = &hostrcb->hcam.u.error.u.type_07_error; + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + strim(error->failure_reason); + + ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, + be32_to_cpu(hostrcb->hcam.u.error.prc)); + ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); + ipr_log_hex_data(ioa_cfg, error->data, + be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb_error, u) + + offsetof(struct ipr_hostrcb_type_07_error, data))); +} + +static const struct { + u8 active; + char *desc; +} path_active_desc[] = { + { IPR_PATH_NO_INFO, "Path" }, + { IPR_PATH_ACTIVE, "Active path" }, + { IPR_PATH_NOT_ACTIVE, "Inactive path" } +}; + +static const struct { + u8 state; + char *desc; +} path_state_desc[] = { + { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, + { IPR_PATH_HEALTHY, "is healthy" }, + { IPR_PATH_DEGRADED, "is degraded" }, + { IPR_PATH_FAILED, "is failed" } +}; + +/** + * ipr_log_fabric_path - Log a fabric path error + * @hostrcb: hostrcb struct + * @fabric: fabric descriptor + * + * Return value: + * none + **/ +static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb_fabric_desc *fabric) +{ + int i, j; + u8 path_state = fabric->path_state; + u8 active = path_state & IPR_PATH_ACTIVE_MASK; + u8 state = path_state & IPR_PATH_STATE_MASK; + + for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { + if (path_active_desc[i].active != active) + continue; + + for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { + if (path_state_desc[j].state != state) + continue; + + if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port); + } else if (fabric->cascaded_expander == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port, fabric->phy); + } else if (fabric->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port, fabric->cascaded_expander); + } else { + ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", + path_active_desc[i].desc, path_state_desc[j].desc, + fabric->ioa_port, fabric->cascaded_expander, fabric->phy); + } + return; + } + } + + ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, + fabric->ioa_port, fabric->cascaded_expander, fabric->phy); +} + +/** + * ipr_log64_fabric_path - Log a fabric path error + * @hostrcb: hostrcb struct + * @fabric: fabric descriptor + * + * Return value: + * none + **/ +static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb64_fabric_desc *fabric) +{ + int i, j; + u8 path_state = fabric->path_state; + u8 active = path_state & IPR_PATH_ACTIVE_MASK; + u8 state = path_state & IPR_PATH_STATE_MASK; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { + if (path_active_desc[i].active != active) + continue; + + for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { + if (path_state_desc[j].state != state) + continue; + + ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", + path_active_desc[i].desc, path_state_desc[j].desc, + ipr_format_res_path(hostrcb->ioa_cfg, + fabric->res_path, + buffer, sizeof(buffer))); + return; + } + } + + ipr_err("Path state=%02X Resource Path=%s\n", path_state, + ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, + buffer, sizeof(buffer))); +} + +static const struct { + u8 type; + char *desc; +} path_type_desc[] = { + { IPR_PATH_CFG_IOA_PORT, "IOA port" }, + { IPR_PATH_CFG_EXP_PORT, "Expander port" }, + { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, + { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } +}; + +static const struct { + u8 status; + char *desc; +} path_status_desc[] = { + { IPR_PATH_CFG_NO_PROB, "Functional" }, + { IPR_PATH_CFG_DEGRADED, "Degraded" }, + { IPR_PATH_CFG_FAILED, "Failed" }, + { IPR_PATH_CFG_SUSPECT, "Suspect" }, + { IPR_PATH_NOT_DETECTED, "Missing" }, + { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } +}; + +static const char *link_rate[] = { + "unknown", + "disabled", + "phy reset problem", + "spinup hold", + "port selector", + "unknown", + "unknown", + "unknown", + "1.5Gbps", + "3.0Gbps", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown" +}; + +/** + * ipr_log_path_elem - Log a fabric path element. + * @hostrcb: hostrcb struct + * @cfg: fabric path element struct + * + * Return value: + * none + **/ +static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb_config_element *cfg) +{ + int i, j; + u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; + u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; + + if (type == IPR_PATH_CFG_NOT_EXIST) + return; + + for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { + if (path_type_desc[i].type != type) + continue; + + for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { + if (path_status_desc[j].status != status) + continue; + + if (type == IPR_PATH_CFG_IOA_PORT) { + ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", + path_status_desc[j].desc, path_type_desc[i].desc, + cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else { + if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", + path_status_desc[j].desc, path_type_desc[i].desc, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else if (cfg->cascaded_expander == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " + "WWN=%08X%08X\n", path_status_desc[j].desc, + path_type_desc[i].desc, cfg->phy, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else if (cfg->phy == 0xff) { + ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " + "WWN=%08X%08X\n", path_status_desc[j].desc, + path_type_desc[i].desc, cfg->cascaded_expander, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } else { + ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " + "WWN=%08X%08X\n", path_status_desc[j].desc, + path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + } + } + return; + } + } + + ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " + "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); +} + +/** + * ipr_log64_path_elem - Log a fabric path element. + * @hostrcb: hostrcb struct + * @cfg: fabric path element struct + * + * Return value: + * none + **/ +static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb64_config_element *cfg) +{ + int i, j; + u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; + u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; + u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) + return; + + for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { + if (path_type_desc[i].type != type) + continue; + + for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { + if (path_status_desc[j].status != status) + continue; + + ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", + path_status_desc[j].desc, path_type_desc[i].desc, + ipr_format_res_path(hostrcb->ioa_cfg, + cfg->res_path, buffer, sizeof(buffer)), + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), + be32_to_cpu(cfg->wwid[1])); + return; + } + } + ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " + "WWN=%08X%08X\n", cfg->type_status, + ipr_format_res_path(hostrcb->ioa_cfg, + cfg->res_path, buffer, sizeof(buffer)), + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); +} + +/** + * ipr_log_fabric_error - Log a fabric error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_20_error *error; + struct ipr_hostrcb_fabric_desc *fabric; + struct ipr_hostrcb_config_element *cfg; + int i, add_len; + + error = &hostrcb->hcam.u.error.u.type_20_error; + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); + + add_len = be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb_error, u) + + offsetof(struct ipr_hostrcb_type_20_error, desc)); + + for (i = 0, fabric = error->desc; i < error->num_entries; i++) { + ipr_log_fabric_path(hostrcb, fabric); + for_each_fabric_cfg(fabric, cfg) + ipr_log_path_elem(hostrcb, cfg); + + add_len -= be16_to_cpu(fabric->length); + fabric = (struct ipr_hostrcb_fabric_desc *) + ((unsigned long)fabric + be16_to_cpu(fabric->length)); + } + + ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); +} + +/** + * ipr_log_sis64_array_error - Log a sis64 array error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i, num_entries; + struct ipr_hostrcb_type_24_error *error; + struct ipr_hostrcb64_array_data_entry *array_entry; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; + + error = &hostrcb->hcam.u.error64.u.type_24_error; + + ipr_err_separator; + + ipr_err("RAID %s Array Configuration: %s\n", + error->protection_level, + ipr_format_res_path(ioa_cfg, error->last_res_path, + buffer, sizeof(buffer))); + + ipr_err_separator; + + array_entry = error->array_member; + num_entries = min_t(u32, error->num_entries, + ARRAY_SIZE(error->array_member)); + + for (i = 0; i < num_entries; i++, array_entry++) { + + if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) + continue; + + if (error->exposed_mode_adn == i) + ipr_err("Exposed Array Member %d:\n", i); + else + ipr_err("Array Member %d:\n", i); + + ipr_err("Array Member %d:\n", i); + ipr_log_ext_vpd(&array_entry->vpd); + ipr_err("Current Location: %s\n", + ipr_format_res_path(ioa_cfg, array_entry->res_path, + buffer, sizeof(buffer))); + ipr_err("Expected Location: %s\n", + ipr_format_res_path(ioa_cfg, + array_entry->expected_res_path, + buffer, sizeof(buffer))); + + ipr_err_separator; + } +} + +/** + * ipr_log_sis64_fabric_error - Log a sis64 fabric error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_30_error *error; + struct ipr_hostrcb64_fabric_desc *fabric; + struct ipr_hostrcb64_config_element *cfg; + int i, add_len; + + error = &hostrcb->hcam.u.error64.u.type_30_error; + + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); + + add_len = be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb64_error, u) + + offsetof(struct ipr_hostrcb_type_30_error, desc)); + + for (i = 0, fabric = error->desc; i < error->num_entries; i++) { + ipr_log64_fabric_path(hostrcb, fabric); + for_each_fabric_cfg(fabric, cfg) + ipr_log64_path_elem(hostrcb, cfg); + + add_len -= be16_to_cpu(fabric->length); + fabric = (struct ipr_hostrcb64_fabric_desc *) + ((unsigned long)fabric + be16_to_cpu(fabric->length)); + } + + ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); +} + +/** + * ipr_log_sis64_service_required_error - Log a sis64 service required error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_41_error *error; + + error = &hostrcb->hcam.u.error64.u.type_41_error; + + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + ipr_err("Primary Failure Reason: %s\n", error->failure_reason); + ipr_log_hex_data(ioa_cfg, error->data, + be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb_error, u) + + offsetof(struct ipr_hostrcb_type_41_error, data))); +} +/** + * ipr_log_generic_error - Log an adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, + be32_to_cpu(hostrcb->hcam.length)); +} + +/** + * ipr_log_sis64_device_error - Log a cache error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_21_error *error; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + error = &hostrcb->hcam.u.error64.u.type_21_error; + + ipr_err("-----Failing Device Information-----\n"); + ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", + be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), + be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); + ipr_err("Device Resource Path: %s\n", + __ipr_format_res_path(error->res_path, + buffer, sizeof(buffer))); + error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; + error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; + ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); + ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); + ipr_err("SCSI Sense Data:\n"); + ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); + ipr_err("SCSI Command Descriptor Block: \n"); + ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); + + ipr_err("Additional IOA Data:\n"); + ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); +} + +/** + * ipr_get_error - Find the specfied IOASC in the ipr_error_table. + * @ioasc: IOASC + * + * This function will return the index of into the ipr_error_table + * for the specified IOASC. If the IOASC is not in the table, + * 0 will be returned, which points to the entry used for unknown errors. + * + * Return value: + * index into the ipr_error_table + **/ +static u32 ipr_get_error(u32 ioasc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) + if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) + return i; + + return 0; +} + +/** + * ipr_handle_log_data - Log an adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * This function logs an adapter error to the system. + * + * Return value: + * none + **/ +static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + u32 ioasc; + int error_index; + struct ipr_hostrcb_type_21_error *error; + + if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) + return; + + if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) + dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); + + if (ioa_cfg->sis64) + ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); + else + ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); + + if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || + ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { + /* Tell the midlayer we had a bus reset so it will handle the UA properly */ + scsi_report_bus_reset(ioa_cfg->host, + hostrcb->hcam.u.error.fd_res_addr.bus); + } + + error_index = ipr_get_error(ioasc); + + if (!ipr_error_table[error_index].log_hcam) + return; + + if (ioasc == IPR_IOASC_HW_CMD_FAILED && + hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { + error = &hostrcb->hcam.u.error64.u.type_21_error; + + if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && + ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) + return; + } + + ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); + + /* Set indication we have logged an error */ + ioa_cfg->errors_logged++; + + if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) + return; + if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) + hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); + + switch (hostrcb->hcam.overlay_id) { + case IPR_HOST_RCB_OVERLAY_ID_2: + ipr_log_cache_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_3: + ipr_log_config_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_4: + case IPR_HOST_RCB_OVERLAY_ID_6: + ipr_log_array_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_7: + ipr_log_dual_ioa_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_12: + ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_13: + ipr_log_enhanced_config_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_14: + case IPR_HOST_RCB_OVERLAY_ID_16: + ipr_log_enhanced_array_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_17: + ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_20: + ipr_log_fabric_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_21: + ipr_log_sis64_device_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_23: + ipr_log_sis64_config_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_24: + case IPR_HOST_RCB_OVERLAY_ID_26: + ipr_log_sis64_array_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_30: + ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_41: + ipr_log_sis64_service_required_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_1: + case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: + default: + ipr_log_generic_error(ioa_cfg, hostrcb); + break; + } +} + +static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa) +{ + struct ipr_hostrcb *hostrcb; + + hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, + struct ipr_hostrcb, queue); + + if (unlikely(!hostrcb)) { + dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); + hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, + struct ipr_hostrcb, queue); + } + + list_del_init(&hostrcb->queue); + return hostrcb; +} + +/** + * ipr_process_error - Op done function for an adapter error log. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for an error log host + * controlled async from the adapter. It will log the error and + * send the HCAM back to the adapter. + * + * Return value: + * none + **/ +static void ipr_process_error(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + u32 fd_ioasc; + + if (ioa_cfg->sis64) + fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); + else + fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); + + list_del_init(&hostrcb->queue); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + + if (!ioasc) { + ipr_handle_log_data(ioa_cfg, hostrcb); + if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); + } else if (ioasc != IPR_IOASC_IOA_WAS_RESET && + ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) { + dev_err(&ioa_cfg->pdev->dev, + "Host RCB failed with IOASC: 0x%08X\n", ioasc); + } + + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); + schedule_work(&ioa_cfg->work_q); + hostrcb = ipr_get_free_hostrcb(ioa_cfg); + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); +} + +/** + * ipr_timeout - An internally generated op has timed out. + * @t: Timer context used to fetch ipr command struct + * + * This function blocks host requests and initiates an + * adapter reset. + * + * Return value: + * none + **/ +static void ipr_timeout(struct timer_list *t) +{ + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); + unsigned long lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, + "Adapter being reset due to command timeout.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_oper_timeout - Adapter timed out transitioning to operational + * @t: Timer context used to fetch ipr command struct + * + * This function blocks host requests and initiates an + * adapter reset. + * + * Return value: + * none + **/ +static void ipr_oper_timeout(struct timer_list *t) +{ + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); + unsigned long lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, + "Adapter timed out transitioning to operational.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { + if (ipr_fastfail) + ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_find_ses_entry - Find matching SES in SES table + * @res: resource entry struct of SES + * + * Return value: + * pointer to SES table entry / NULL on failure + **/ +static const struct ipr_ses_table_entry * +ipr_find_ses_entry(struct ipr_resource_entry *res) +{ + int i, j, matches; + struct ipr_std_inq_vpids *vpids; + const struct ipr_ses_table_entry *ste = ipr_ses_table; + + for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { + for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { + if (ste->compare_product_id_byte[j] == 'X') { + vpids = &res->std_inq_data.vpids; + if (vpids->product_id[j] == ste->product_id[j]) + matches++; + else + break; + } else + matches++; + } + + if (matches == IPR_PROD_ID_LEN) + return ste; + } + + return NULL; +} + +/** + * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus + * @ioa_cfg: ioa config struct + * @bus: SCSI bus + * @bus_width: bus width + * + * Return value: + * SCSI bus speed in units of 100KHz, 1600 is 160 MHz + * For a 2-byte wide SCSI bus, the maximum transfer speed is + * twice the maximum transfer rate (e.g. for a wide enabled bus, + * max 160MHz = max 320MB/sec). + **/ +static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) +{ + struct ipr_resource_entry *res; + const struct ipr_ses_table_entry *ste; + u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); + + /* Loop through each config table entry in the config table buffer */ + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) + continue; + + if (bus != res->bus) + continue; + + if (!(ste = ipr_find_ses_entry(res))) + continue; + + max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); + } + + return max_xfer_rate; +} + +/** + * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA + * @ioa_cfg: ioa config struct + * @max_delay: max delay in micro-seconds to wait + * + * Waits for an IODEBUG ACK from the IOA, doing busy looping. + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) +{ + volatile u32 pcii_reg; + int delay = 1; + + /* Read interrupt reg until IOA signals IO Debug Acknowledge */ + while (delay < max_delay) { + pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) + return 0; + + /* udelay cannot be used if delay is more than a few milliseconds */ + if ((delay / 1000) > MAX_UDELAY_MS) + mdelay(delay / 1000); + else + udelay(delay); + + delay += delay; + } + return -EIO; +} + +/** + * ipr_get_sis64_dump_data_section - Dump IOA memory + * @ioa_cfg: ioa config struct + * @start_addr: adapter address to dump + * @dest: destination kernel buffer + * @length_in_words: length to dump in 4 byte words + * + * Return value: + * 0 on success + **/ +static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, + u32 start_addr, + __be32 *dest, u32 length_in_words) +{ + int i; + + for (i = 0; i < length_in_words; i++) { + writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); + *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); + dest++; + } + + return 0; +} + +/** + * ipr_get_ldump_data_section - Dump IOA memory + * @ioa_cfg: ioa config struct + * @start_addr: adapter address to dump + * @dest: destination kernel buffer + * @length_in_words: length to dump in 4 byte words + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, + u32 start_addr, + __be32 *dest, u32 length_in_words) +{ + volatile u32 temp_pcii_reg; + int i, delay = 0; + + if (ioa_cfg->sis64) + return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, + dest, length_in_words); + + /* Write IOA interrupt reg starting LDUMP state */ + writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), + ioa_cfg->regs.set_uproc_interrupt_reg32); + + /* Wait for IO debug acknowledge */ + if (ipr_wait_iodbg_ack(ioa_cfg, + IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { + dev_err(&ioa_cfg->pdev->dev, + "IOA dump long data transfer timeout\n"); + return -EIO; + } + + /* Signal LDUMP interlocked - clear IO debug ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + + /* Write Mailbox with starting address */ + writel(start_addr, ioa_cfg->ioa_mailbox); + + /* Signal address valid - clear IOA Reset alert */ + writel(IPR_UPROCI_RESET_ALERT, + ioa_cfg->regs.clr_uproc_interrupt_reg32); + + for (i = 0; i < length_in_words; i++) { + /* Wait for IO debug acknowledge */ + if (ipr_wait_iodbg_ack(ioa_cfg, + IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { + dev_err(&ioa_cfg->pdev->dev, + "IOA dump short data transfer timeout\n"); + return -EIO; + } + + /* Read data from mailbox and increment destination pointer */ + *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); + dest++; + + /* For all but the last word of data, signal data received */ + if (i < (length_in_words - 1)) { + /* Signal dump data received - Clear IO debug Ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + } + } + + /* Signal end of block transfer. Set reset alert then clear IO debug ack */ + writel(IPR_UPROCI_RESET_ALERT, + ioa_cfg->regs.set_uproc_interrupt_reg32); + + writel(IPR_UPROCI_IO_DEBUG_ALERT, + ioa_cfg->regs.clr_uproc_interrupt_reg32); + + /* Signal dump data received - Clear IO debug Ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + + /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ + while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { + temp_pcii_reg = + readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); + + if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) + return 0; + + udelay(10); + delay += 10; + } + + return 0; +} + +#ifdef CONFIG_SCSI_IPR_DUMP +/** + * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer + * @ioa_cfg: ioa config struct + * @pci_address: adapter address + * @length: length of data to copy + * + * Copy data from PCI adapter to kernel buffer. + * Note: length MUST be a 4 byte multiple + * Return value: + * 0 on success / other on failure + **/ +static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, + unsigned long pci_address, u32 length) +{ + int bytes_copied = 0; + int cur_len, rc, rem_len, rem_page_len, max_dump_size; + __be32 *page; + unsigned long lock_flags = 0; + struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; + + if (ioa_cfg->sis64) + max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; + else + max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; + + while (bytes_copied < length && + (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { + if (ioa_dump->page_offset >= PAGE_SIZE || + ioa_dump->page_offset == 0) { + page = (__be32 *)__get_free_page(GFP_ATOMIC); + + if (!page) { + ipr_trace; + return bytes_copied; + } + + ioa_dump->page_offset = 0; + ioa_dump->ioa_data[ioa_dump->next_page_index] = page; + ioa_dump->next_page_index++; + } else + page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; + + rem_len = length - bytes_copied; + rem_page_len = PAGE_SIZE - ioa_dump->page_offset; + cur_len = min(rem_len, rem_page_len); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->sdt_state == ABORT_DUMP) { + rc = -EIO; + } else { + rc = ipr_get_ldump_data_section(ioa_cfg, + pci_address + bytes_copied, + &page[ioa_dump->page_offset / 4], + (cur_len / sizeof(u32))); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (!rc) { + ioa_dump->page_offset += cur_len; + bytes_copied += cur_len; + } else { + ipr_trace; + break; + } + schedule(); + } + + return bytes_copied; +} + +/** + * ipr_init_dump_entry_hdr - Initialize a dump entry header. + * @hdr: dump entry header struct + * + * Return value: + * nothing + **/ +static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) +{ + hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; + hdr->num_elems = 1; + hdr->offset = sizeof(*hdr); + hdr->status = IPR_DUMP_STATUS_SUCCESS; +} + +/** + * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + + ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); + driver_dump->ioa_type_entry.hdr.len = + sizeof(struct ipr_dump_ioa_type_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; + driver_dump->ioa_type_entry.type = ioa_cfg->type; + driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | + (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | + ucode_vpd->minor_release[1]; + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_version_data - Fill in the driver version in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); + driver_dump->version_entry.hdr.len = + sizeof(struct ipr_dump_version_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; + driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; + strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_trace_data - Fill in the IOA trace in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); + driver_dump->trace_entry.hdr.len = + sizeof(struct ipr_dump_trace_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; + memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_location_data - Fill in the IOA location in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); + driver_dump->location_entry.hdr.len = + sizeof(struct ipr_dump_location_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; + driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; + strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_get_ioa_dump - Perform a dump of the driver and adapter. + * @ioa_cfg: ioa config struct + * @dump: dump struct + * + * Return value: + * nothing + **/ +static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) +{ + unsigned long start_addr, sdt_word; + unsigned long lock_flags = 0; + struct ipr_driver_dump *driver_dump = &dump->driver_dump; + struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; + u32 num_entries, max_num_entries, start_off, end_off; + u32 max_dump_size, bytes_to_copy, bytes_copied, rc; + struct ipr_sdt *sdt; + int valid = 1; + int i; + + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->sdt_state != READ_DUMP) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + if (ioa_cfg->sis64) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + ssleep(IPR_DUMP_DELAY_SECONDS); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + + start_addr = readl(ioa_cfg->ioa_mailbox); + + if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { + dev_err(&ioa_cfg->pdev->dev, + "Invalid dump table format: %lx\n", start_addr); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); + + driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; + + /* Initialize the overall dump header */ + driver_dump->hdr.len = sizeof(struct ipr_driver_dump); + driver_dump->hdr.num_entries = 1; + driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); + driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; + driver_dump->hdr.os = IPR_DUMP_OS_LINUX; + driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; + + ipr_dump_version_data(ioa_cfg, driver_dump); + ipr_dump_location_data(ioa_cfg, driver_dump); + ipr_dump_ioa_type_data(ioa_cfg, driver_dump); + ipr_dump_trace_data(ioa_cfg, driver_dump); + + /* Update dump_header */ + driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); + + /* IOA Dump entry */ + ipr_init_dump_entry_hdr(&ioa_dump->hdr); + ioa_dump->hdr.len = 0; + ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; + + /* First entries in sdt are actually a list of dump addresses and + lengths to gather the real dump data. sdt represents the pointer + to the ioa generated dump table. Dump data will be extracted based + on entries in this table */ + sdt = &ioa_dump->sdt; + + if (ioa_cfg->sis64) { + max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; + max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; + } else { + max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; + max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; + } + + bytes_to_copy = offsetof(struct ipr_sdt, entry) + + (max_num_entries * sizeof(struct ipr_sdt_entry)); + rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, + bytes_to_copy / sizeof(__be32)); + + /* Smart Dump table is ready to use and the first entry is valid */ + if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && + (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { + dev_err(&ioa_cfg->pdev->dev, + "Dump of IOA failed. Dump table not valid: %d, %X.\n", + rc, be32_to_cpu(sdt->hdr.state)); + driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; + ioa_cfg->sdt_state = DUMP_OBTAINED; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + num_entries = be32_to_cpu(sdt->hdr.num_entries_used); + + if (num_entries > max_num_entries) + num_entries = max_num_entries; + + /* Update dump length to the actual data to be copied */ + dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); + if (ioa_cfg->sis64) + dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); + else + dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + for (i = 0; i < num_entries; i++) { + if (ioa_dump->hdr.len > max_dump_size) { + driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; + break; + } + + if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { + sdt_word = be32_to_cpu(sdt->entry[i].start_token); + if (ioa_cfg->sis64) + bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); + else { + start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; + end_off = be32_to_cpu(sdt->entry[i].end_token); + + if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) + bytes_to_copy = end_off - start_off; + else + valid = 0; + } + if (valid) { + if (bytes_to_copy > max_dump_size) { + sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; + continue; + } + + /* Copy data from adapter to driver buffers */ + bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, + bytes_to_copy); + + ioa_dump->hdr.len += bytes_copied; + + if (bytes_copied != bytes_to_copy) { + driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; + break; + } + } + } + } + + dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); + + /* Update dump_header */ + driver_dump->hdr.len += ioa_dump->hdr.len; + wmb(); + ioa_cfg->sdt_state = DUMP_OBTAINED; + LEAVE; +} + +#else +#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) +#endif + +/** + * ipr_release_dump - Free adapter dump memory + * @kref: kref struct + * + * Return value: + * nothing + **/ +static void ipr_release_dump(struct kref *kref) +{ + struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); + struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; + unsigned long lock_flags = 0; + int i; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->dump = NULL; + ioa_cfg->sdt_state = INACTIVE; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + for (i = 0; i < dump->ioa_dump.next_page_index; i++) + free_page((unsigned long) dump->ioa_dump.ioa_data[i]); + + vfree(dump->ioa_dump.ioa_data); + kfree(dump); + LEAVE; +} + +static void ipr_add_remove_thread(struct work_struct *work) +{ + unsigned long lock_flags; + struct ipr_resource_entry *res; + struct scsi_device *sdev; + struct ipr_ioa_cfg *ioa_cfg = + container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); + u8 bus, target, lun; + int did_work; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + +restart: + do { + did_work = 0; + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->del_from_ml && res->sdev) { + did_work = 1; + sdev = res->sdev; + if (!scsi_device_get(sdev)) { + if (!res->add_to_ml) + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + else + res->del_from_ml = 0; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_remove_device(sdev); + scsi_device_put(sdev); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + break; + } + } + } while (did_work); + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->add_to_ml) { + bus = res->bus; + target = res->target; + lun = res->lun; + res->add_to_ml = 0; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_add_device(ioa_cfg->host, bus, target, lun); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + goto restart; + } + } + + ioa_cfg->scan_done = 1; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); + LEAVE; +} + +/** + * ipr_worker_thread - Worker thread + * @work: ioa config struct + * + * Called at task level from a work thread. This function takes care + * of adding and removing device from the mid-layer as configuration + * changes are detected by the adapter. + * + * Return value: + * nothing + **/ +static void ipr_worker_thread(struct work_struct *work) +{ + unsigned long lock_flags; + struct ipr_dump *dump; + struct ipr_ioa_cfg *ioa_cfg = + container_of(work, struct ipr_ioa_cfg, work_q); + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->sdt_state == READ_DUMP) { + dump = ioa_cfg->dump; + if (!dump) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + kref_get(&dump->kref); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + ipr_get_ioa_dump(ioa_cfg, dump); + kref_put(&dump->kref, ipr_release_dump); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + if (ioa_cfg->scsi_unblock) { + ioa_cfg->scsi_unblock = 0; + ioa_cfg->scsi_blocked = 0; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_unblock_requests(ioa_cfg->host); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->scsi_blocked) + scsi_block_requests(ioa_cfg->host); + } + + if (!ioa_cfg->scan_enabled) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + schedule_work(&ioa_cfg->scsi_add_work_q); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +#ifdef CONFIG_SCSI_IPR_TRACE +/** + * ipr_read_trace - Dump the adapter trace + * @filp: open sysfs file + * @kobj: kobject struct + * @bin_attr: bin_attribute struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + ssize_t ret; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, + IPR_TRACE_SIZE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return ret; +} + +static struct bin_attribute ipr_trace_attr = { + .attr = { + .name = "trace", + .mode = S_IRUGO, + }, + .size = 0, + .read = ipr_read_trace, +}; +#endif + +/** + * ipr_show_fw_version - Show the firmware version + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_fw_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", + ucode_vpd->major_release, ucode_vpd->card_type, + ucode_vpd->minor_release[0], + ucode_vpd->minor_release[1]); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_fw_version_attr = { + .attr = { + .name = "fw_version", + .mode = S_IRUGO, + }, + .show = ipr_show_fw_version, +}; + +/** + * ipr_show_log_level - Show the adapter's error logging level + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_log_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_log_level - Change the adapter's error logging level + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_log_level(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return strlen(buf); +} + +static struct device_attribute ipr_log_level_attr = { + .attr = { + .name = "log_level", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_log_level, + .store = ipr_store_log_level +}; + +/** + * ipr_store_diagnostics - IOA Diagnostics interface + * @dev: device struct + * @attr: device attribute (unused) + * @buf: buffer + * @count: buffer size + * + * This function will reset the adapter and wait a reasonable + * amount of time for any errors that the adapter might log. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_diagnostics(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int rc = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + while (ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + + ioa_cfg->errors_logged = 0; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + + if (ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + /* Wait for a second for any errors to be logged */ + msleep(1000); + } else { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return -EIO; + } + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) + rc = -EIO; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return rc; +} + +static struct device_attribute ipr_diagnostics_attr = { + .attr = { + .name = "run_diagnostics", + .mode = S_IWUSR, + }, + .store = ipr_store_diagnostics +}; + +/** + * ipr_show_adapter_state - Show the adapter's state + * @dev: device struct + * @attr: device attribute (unused) + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_adapter_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) + len = snprintf(buf, PAGE_SIZE, "offline\n"); + else + len = snprintf(buf, PAGE_SIZE, "online\n"); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_adapter_state - Change adapter state + * @dev: device struct + * @attr: device attribute (unused) + * @buf: buffer + * @count: buffer size + * + * This function will change the adapter's state. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_adapter_state(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags; + int result = count, i; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && + !strncmp(buf, "online", 6)) { + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 0; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + ioa_cfg->reset_retries = 0; + ioa_cfg->in_ioa_bringdown = 0; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + return result; +} + +static struct device_attribute ipr_ioa_state_attr = { + .attr = { + .name = "online_state", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_adapter_state, + .store = ipr_store_adapter_state +}; + +/** + * ipr_store_reset_adapter - Reset the adapter + * @dev: device struct + * @attr: device attribute (unused) + * @buf: buffer + * @count: buffer size + * + * This function will reset the adapter. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_reset_adapter(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags; + int result = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (!ioa_cfg->in_reset_reload) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + return result; +} + +static struct device_attribute ipr_ioa_reset_attr = { + .attr = { + .name = "reset_host", + .mode = S_IWUSR, + }, + .store = ipr_store_reset_adapter +}; + +static int ipr_iopoll(struct irq_poll *iop, int budget); + /** + * ipr_show_iopoll_weight - Show ipr polling mode + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_iopoll_weight(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(shost->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); + spin_unlock_irqrestore(shost->host_lock, lock_flags); + + return len; +} + +/** + * ipr_store_iopoll_weight - Change the adapter's polling mode + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_iopoll_weight(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long user_iopoll_weight; + unsigned long lock_flags = 0; + int i; + + if (!ioa_cfg->sis64) { + dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); + return -EINVAL; + } + if (kstrtoul(buf, 10, &user_iopoll_weight)) + return -EINVAL; + + if (user_iopoll_weight > 256) { + dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); + return -EINVAL; + } + + if (user_iopoll_weight == ioa_cfg->iopoll_weight) { + dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); + return strlen(buf); + } + + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + for (i = 1; i < ioa_cfg->hrrq_num; i++) + irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); + } + + spin_lock_irqsave(shost->host_lock, lock_flags); + ioa_cfg->iopoll_weight = user_iopoll_weight; + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + for (i = 1; i < ioa_cfg->hrrq_num; i++) { + irq_poll_init(&ioa_cfg->hrrq[i].iopoll, + ioa_cfg->iopoll_weight, ipr_iopoll); + } + } + spin_unlock_irqrestore(shost->host_lock, lock_flags); + + return strlen(buf); +} + +static struct device_attribute ipr_iopoll_weight_attr = { + .attr = { + .name = "iopoll_weight", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_iopoll_weight, + .store = ipr_store_iopoll_weight +}; + +/** + * ipr_alloc_ucode_buffer - Allocates a microcode download buffer + * @buf_len: buffer length + * + * Allocates a DMA'able buffer in chunks and assembles a scatter/gather + * list to use for microcode download + * + * Return value: + * pointer to sglist / NULL on failure + **/ +static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) +{ + int sg_size, order; + struct ipr_sglist *sglist; + + /* Get the minimum size per scatter/gather element */ + sg_size = buf_len / (IPR_MAX_SGLIST - 1); + + /* Get the actual size per element */ + order = get_order(sg_size); + + /* Allocate a scatter/gather list for the DMA */ + sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL); + if (sglist == NULL) { + ipr_trace; + return NULL; + } + sglist->order = order; + sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, + &sglist->num_sg); + if (!sglist->scatterlist) { + kfree(sglist); + return NULL; + } + + return sglist; +} + +/** + * ipr_free_ucode_buffer - Frees a microcode download buffer + * @sglist: scatter/gather list pointer + * + * Free a DMA'able ucode download buffer previously allocated with + * ipr_alloc_ucode_buffer + * + * Return value: + * nothing + **/ +static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) +{ + sgl_free_order(sglist->scatterlist, sglist->order); + kfree(sglist); +} + +/** + * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer + * @sglist: scatter/gather list pointer + * @buffer: buffer pointer + * @len: buffer length + * + * Copy a microcode image from a user buffer into a buffer allocated by + * ipr_alloc_ucode_buffer + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, + u8 *buffer, u32 len) +{ + int bsize_elem, i, result = 0; + struct scatterlist *sg; + + /* Determine the actual number of bytes per element */ + bsize_elem = PAGE_SIZE * (1 << sglist->order); + + sg = sglist->scatterlist; + + for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), + buffer += bsize_elem) { + struct page *page = sg_page(sg); + + memcpy_to_page(page, 0, buffer, bsize_elem); + + sg->length = bsize_elem; + + if (result != 0) { + ipr_trace; + return result; + } + } + + if (len % bsize_elem) { + struct page *page = sg_page(sg); + + memcpy_to_page(page, 0, buffer, len % bsize_elem); + + sg->length = len % bsize_elem; + } + + sglist->buffer_len = len; + return result; +} + +/** + * ipr_build_ucode_ioadl64 - Build a microcode download IOADL + * @ipr_cmd: ipr command struct + * @sglist: scatter/gather list + * + * Builds a microcode download IOA data list (IOADL). + * + **/ +static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, + struct ipr_sglist *sglist) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct scatterlist *scatterlist = sglist->scatterlist; + struct scatterlist *sg; + int i; + + ipr_cmd->dma_use_sg = sglist->num_dma_sg; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); + + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); + for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { + ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); + ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); + ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); + } + + ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); +} + +/** + * ipr_build_ucode_ioadl - Build a microcode download IOADL + * @ipr_cmd: ipr command struct + * @sglist: scatter/gather list + * + * Builds a microcode download IOA data list (IOADL). + * + **/ +static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, + struct ipr_sglist *sglist) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; + struct scatterlist *scatterlist = sglist->scatterlist; + struct scatterlist *sg; + int i; + + ipr_cmd->dma_use_sg = sglist->num_dma_sg; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); + + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + + for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { + ioadl[i].flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg)); + ioadl[i].address = + cpu_to_be32(sg_dma_address(sg)); + } + + ioadl[i-1].flags_and_data_len |= + cpu_to_be32(IPR_IOADL_FLAGS_LAST); +} + +/** + * ipr_update_ioa_ucode - Update IOA's microcode + * @ioa_cfg: ioa config struct + * @sglist: scatter/gather list + * + * Initiate an adapter reset to update the IOA's microcode + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_sglist *sglist) +{ + unsigned long lock_flags; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + while (ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + + if (ioa_cfg->ucode_sglist) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + dev_err(&ioa_cfg->pdev->dev, + "Microcode download already in progress\n"); + return -EIO; + } + + sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, + sglist->scatterlist, sglist->num_sg, + DMA_TO_DEVICE); + + if (!sglist->num_dma_sg) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + dev_err(&ioa_cfg->pdev->dev, + "Failed to map microcode download buffer!\n"); + return -EIO; + } + + ioa_cfg->ucode_sglist = sglist; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->ucode_sglist = NULL; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; +} + +/** + * ipr_store_update_fw - Update the firmware on the adapter + * @dev: device struct + * @attr: device attribute (unused) + * @buf: buffer + * @count: buffer size + * + * This function will update the firmware on the adapter. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_update_fw(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_ucode_image_header *image_hdr; + const struct firmware *fw_entry; + struct ipr_sglist *sglist; + char fname[100]; + char *src; + char *endline; + int result, dnld_size; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + snprintf(fname, sizeof(fname), "%s", buf); + + endline = strchr(fname, '\n'); + if (endline) + *endline = '\0'; + + if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { + dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); + return -EIO; + } + + image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; + + src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); + dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); + sglist = ipr_alloc_ucode_buffer(dnld_size); + + if (!sglist) { + dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); + release_firmware(fw_entry); + return -ENOMEM; + } + + result = ipr_copy_ucode_buffer(sglist, src, dnld_size); + + if (result) { + dev_err(&ioa_cfg->pdev->dev, + "Microcode buffer copy to DMA buffer failed\n"); + goto out; + } + + ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); + + result = ipr_update_ioa_ucode(ioa_cfg, sglist); + + if (!result) + result = count; +out: + ipr_free_ucode_buffer(sglist); + release_firmware(fw_entry); + return result; +} + +static struct device_attribute ipr_update_fw_attr = { + .attr = { + .name = "update_fw", + .mode = S_IWUSR, + }, + .store = ipr_store_update_fw +}; + +/** + * ipr_show_fw_type - Show the adapter's firmware type. + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_fw_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_ioa_fw_type_attr = { + .attr = { + .name = "fw_type", + .mode = S_IRUGO, + }, + .show = ipr_show_fw_type +}; + +static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct device *cdev = kobj_to_dev(kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_hostrcb *hostrcb; + unsigned long lock_flags = 0; + int ret; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, + struct ipr_hostrcb, queue); + if (!hostrcb) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; + } + ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, + sizeof(hostrcb->hcam)); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return ret; +} + +static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct device *cdev = kobj_to_dev(kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_hostrcb *hostrcb; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, + struct ipr_hostrcb, queue); + if (!hostrcb) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return count; + } + + /* Reclaim hostrcb before exit */ + list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return count; +} + +static struct bin_attribute ipr_ioa_async_err_log = { + .attr = { + .name = "async_err_log", + .mode = S_IRUGO | S_IWUSR, + }, + .size = 0, + .read = ipr_read_async_err_log, + .write = ipr_next_async_err_log +}; + +static struct attribute *ipr_ioa_attrs[] = { + &ipr_fw_version_attr.attr, + &ipr_log_level_attr.attr, + &ipr_diagnostics_attr.attr, + &ipr_ioa_state_attr.attr, + &ipr_ioa_reset_attr.attr, + &ipr_update_fw_attr.attr, + &ipr_ioa_fw_type_attr.attr, + &ipr_iopoll_weight_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(ipr_ioa); + +#ifdef CONFIG_SCSI_IPR_DUMP +/** + * ipr_read_dump - Dump the adapter + * @filp: open sysfs file + * @kobj: kobject struct + * @bin_attr: bin_attribute struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *cdev = kobj_to_dev(kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_dump *dump; + unsigned long lock_flags = 0; + char *src; + int len, sdt_end; + size_t rc = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + dump = ioa_cfg->dump; + + if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; + } + kref_get(&dump->kref); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (off > dump->driver_dump.hdr.len) { + kref_put(&dump->kref, ipr_release_dump); + return 0; + } + + if (off + count > dump->driver_dump.hdr.len) { + count = dump->driver_dump.hdr.len - off; + rc = count; + } + + if (count && off < sizeof(dump->driver_dump)) { + if (off + count > sizeof(dump->driver_dump)) + len = sizeof(dump->driver_dump) - off; + else + len = count; + src = (u8 *)&dump->driver_dump + off; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + off -= sizeof(dump->driver_dump); + + if (ioa_cfg->sis64) + sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + + (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * + sizeof(struct ipr_sdt_entry)); + else + sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + + (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); + + if (count && off < sdt_end) { + if (off + count > sdt_end) + len = sdt_end - off; + else + len = count; + src = (u8 *)&dump->ioa_dump + off; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + off -= sdt_end; + + while (count) { + if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) + len = PAGE_ALIGN(off) - off; + else + len = count; + src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; + src += off & ~PAGE_MASK; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + kref_put(&dump->kref, ipr_release_dump); + return rc; +} + +/** + * ipr_alloc_dump - Prepare for adapter dump + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_dump *dump; + __be32 **ioa_data; + unsigned long lock_flags = 0; + + dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); + + if (!dump) { + ipr_err("Dump memory allocation failed\n"); + return -ENOMEM; + } + + if (ioa_cfg->sis64) + ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES, + sizeof(__be32 *))); + else + ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES, + sizeof(__be32 *))); + + if (!ioa_data) { + ipr_err("Dump memory allocation failed\n"); + kfree(dump); + return -ENOMEM; + } + + dump->ioa_dump.ioa_data = ioa_data; + + kref_init(&dump->kref); + dump->ioa_cfg = ioa_cfg; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (INACTIVE != ioa_cfg->sdt_state) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + vfree(dump->ioa_dump.ioa_data); + kfree(dump); + return 0; + } + + ioa_cfg->dump = dump; + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { + ioa_cfg->dump_taken = 1; + schedule_work(&ioa_cfg->work_q); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return 0; +} + +/** + * ipr_free_dump - Free adapter dump memory + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_dump *dump; + unsigned long lock_flags = 0; + + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + dump = ioa_cfg->dump; + if (!dump) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; + } + + ioa_cfg->dump = NULL; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + kref_put(&dump->kref, ipr_release_dump); + + LEAVE; + return 0; +} + +/** + * ipr_write_dump - Setup dump state of adapter + * @filp: open sysfs file + * @kobj: kobject struct + * @bin_attr: bin_attribute struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *cdev = kobj_to_dev(kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + int rc; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (buf[0] == '1') + rc = ipr_alloc_dump(ioa_cfg); + else if (buf[0] == '0') + rc = ipr_free_dump(ioa_cfg); + else + return -EINVAL; + + if (rc) + return rc; + else + return count; +} + +static struct bin_attribute ipr_dump_attr = { + .attr = { + .name = "dump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = ipr_read_dump, + .write = ipr_write_dump +}; +#else +static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; +#endif + +/** + * ipr_change_queue_depth - Change the device's queue depth + * @sdev: scsi device struct + * @qdepth: depth to set + * + * Return value: + * actual depth set + **/ +static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + scsi_change_queue_depth(sdev, qdepth); + return sdev->queue_depth; +} + +/** + * ipr_show_adapter_handle - Show the adapter's resource handle for this device + * @dev: device struct + * @attr: device attribute structure + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_adapter_handle_attr = { + .attr = { + .name = "adapter_handle", + .mode = S_IRUSR, + }, + .show = ipr_show_adapter_handle +}; + +/** + * ipr_show_resource_path - Show the resource path or the resource address for + * this device. + * @dev: device struct + * @attr: device attribute structure + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res && ioa_cfg->sis64) + len = snprintf(buf, PAGE_SIZE, "%s\n", + __ipr_format_res_path(res->res_path, buffer, + sizeof(buffer))); + else if (res) + len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, + res->bus, res->target, res->lun); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_resource_path_attr = { + .attr = { + .name = "resource_path", + .mode = S_IRUGO, + }, + .show = ipr_show_resource_path +}; + +/** + * ipr_show_device_id - Show the device_id for this device. + * @dev: device struct + * @attr: device attribute structure + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res && ioa_cfg->sis64) + len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); + else if (res) + len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_device_id_attr = { + .attr = { + .name = "device_id", + .mode = S_IRUGO, + }, + .show = ipr_show_device_id +}; + +/** + * ipr_show_resource_type - Show the resource type for this device. + * @dev: device struct + * @attr: device attribute structure + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + + if (res) + len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_resource_type_attr = { + .attr = { + .name = "resource_type", + .mode = S_IRUGO, + }, + .show = ipr_show_resource_type +}; + +/** + * ipr_show_raw_mode - Show the adapter's raw mode + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_raw_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); + else + len = -ENXIO; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_raw_mode - Change the adapter's raw mode + * @dev: class device struct + * @attr: device attribute (unused) + * @buf: buffer + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_raw_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) { + if (ipr_is_af_dasd_device(res)) { + res->raw_mode = simple_strtoul(buf, NULL, 10); + len = strlen(buf); + if (res->sdev) + sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", + res->raw_mode ? "enabled" : "disabled"); + } else + len = -EINVAL; + } else + len = -ENXIO; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_raw_mode_attr = { + .attr = { + .name = "raw_mode", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_raw_mode, + .store = ipr_store_raw_mode +}; + +static struct attribute *ipr_dev_attrs[] = { + &ipr_adapter_handle_attr.attr, + &ipr_resource_path_attr.attr, + &ipr_device_id_attr.attr, + &ipr_resource_type_attr.attr, + &ipr_raw_mode_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(ipr_dev); + +/** + * ipr_biosparam - Return the HSC mapping + * @sdev: scsi device struct + * @block_device: block device pointer + * @capacity: capacity of the device + * @parm: Array containing returned HSC values. + * + * This function generates the HSC parms that fdisk uses. + * We want to make sure we return something that places partitions + * on 4k boundaries for best performance with the IOA. + * + * Return value: + * 0 on success + **/ +static int ipr_biosparam(struct scsi_device *sdev, + struct block_device *block_device, + sector_t capacity, int *parm) +{ + int heads, sectors; + sector_t cylinders; + + heads = 128; + sectors = 32; + + cylinders = capacity; + sector_div(cylinders, (128 * 32)); + + /* return result */ + parm[0] = heads; + parm[1] = sectors; + parm[2] = cylinders; + + return 0; +} + +/** + * ipr_find_starget - Find target based on bus/target. + * @starget: scsi target struct + * + * Return value: + * resource entry pointer if found / NULL if not found + **/ +static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; + struct ipr_resource_entry *res; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if ((res->bus == starget->channel) && + (res->target == starget->id)) { + return res; + } + } + + return NULL; +} + +/** + * ipr_target_destroy - Destroy a SCSI target + * @starget: scsi target struct + * + **/ +static void ipr_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; + + if (ioa_cfg->sis64) { + if (!ipr_find_starget(starget)) { + if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) + clear_bit(starget->id, ioa_cfg->array_ids); + else if (starget->channel == IPR_VSET_VIRTUAL_BUS) + clear_bit(starget->id, ioa_cfg->vset_ids); + else if (starget->channel == 0) + clear_bit(starget->id, ioa_cfg->target_ids); + } + } +} + +/** + * ipr_find_sdev - Find device based on bus/target/lun. + * @sdev: scsi device struct + * + * Return value: + * resource entry pointer if found / NULL if not found + **/ +static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if ((res->bus == sdev->channel) && + (res->target == sdev->id) && + (res->lun == sdev->lun)) + return res; + } + + return NULL; +} + +/** + * ipr_slave_destroy - Unconfigure a SCSI device + * @sdev: scsi device struct + * + * Return value: + * nothing + **/ +static void ipr_slave_destroy(struct scsi_device *sdev) +{ + struct ipr_resource_entry *res; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long lock_flags = 0; + + ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *) sdev->hostdata; + if (res) { + sdev->hostdata = NULL; + res->sdev = NULL; + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** + * ipr_slave_configure - Configure a SCSI device + * @sdev: scsi device struct + * + * This function configures the specified scsi device. + * + * Return value: + * 0 on success + **/ +static int ipr_slave_configure(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = sdev->hostdata; + if (res) { + if (ipr_is_af_dasd_device(res)) + sdev->type = TYPE_RAID; + if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { + sdev->scsi_level = 4; + sdev->no_uld_attach = 1; + } + if (ipr_is_vset_device(res)) { + sdev->scsi_level = SCSI_SPC_3; + sdev->no_report_opcodes = 1; + blk_queue_rq_timeout(sdev->request_queue, + IPR_VSET_RW_TIMEOUT); + blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->sis64) + sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", + ipr_format_res_path(ioa_cfg, + res->res_path, buffer, sizeof(buffer))); + return 0; + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; +} + +/** + * ipr_slave_alloc - Prepare for commands to a device. + * @sdev: scsi device struct + * + * This function saves a pointer to the resource entry + * in the scsi device struct if the device exists. We + * can then use this pointer in ipr_queuecommand when + * handling new commands. + * + * Return value: + * 0 on success / -ENXIO if device does not exist + **/ +static int ipr_slave_alloc(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags; + int rc = -ENXIO; + + sdev->hostdata = NULL; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + res = ipr_find_sdev(sdev); + if (res) { + res->sdev = sdev; + res->add_to_ml = 0; + res->in_erp = 0; + sdev->hostdata = res; + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; + rc = 0; + if (ipr_is_gata(res)) { + sdev_printk(KERN_ERR, sdev, "SATA devices are no longer " + "supported by this driver. Skipping device.\n"); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return -ENXIO; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return rc; +} + +/** + * ipr_match_lun - Match function for specified LUN + * @ipr_cmd: ipr command struct + * @device: device to match (sdev) + * + * Returns: + * 1 if command matches sdev / 0 if command does not match sdev + **/ +static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) +{ + if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) + return 1; + return 0; +} + +/** + * ipr_cmnd_is_free - Check if a command is free or not + * @ipr_cmd: ipr command struct + * + * Returns: + * true / false + **/ +static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_cmnd *loop_cmd; + + list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { + if (loop_cmd == ipr_cmd) + return true; + } + + return false; +} + +/** + * ipr_wait_for_ops - Wait for matching commands to complete + * @ioa_cfg: ioa config struct + * @device: device to match (sdev) + * @match: match function to use + * + * Returns: + * SUCCESS / FAILED + **/ +static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, + int (*match)(struct ipr_cmnd *, void *)) +{ + struct ipr_cmnd *ipr_cmd; + int wait, i; + unsigned long flags; + struct ipr_hrr_queue *hrrq; + signed long timeout = IPR_ABORT_TASK_TIMEOUT; + DECLARE_COMPLETION_ONSTACK(comp); + + ENTER; + do { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { + ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; + if (!ipr_cmnd_is_free(ipr_cmd)) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = ∁ + wait++; + } + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) { + timeout = wait_for_completion_timeout(&comp, timeout); + + if (!timeout) { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { + ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; + if (!ipr_cmnd_is_free(ipr_cmd)) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = NULL; + wait++; + } + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) + dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); + LEAVE; + return wait ? FAILED : SUCCESS; + } + } + } while (wait); + + LEAVE; + return SUCCESS; +} + +static int ipr_eh_host_reset(struct scsi_cmnd *cmd) +{ + struct ipr_ioa_cfg *ioa_cfg; + unsigned long lock_flags = 0; + int rc = SUCCESS; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); + dev_err(&ioa_cfg->pdev->dev, + "Adapter being reset as a result of error recovery.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + /* If we got hit with a host reset while we were already resetting + the adapter for some reason, and the reset failed. */ + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { + ipr_trace; + rc = FAILED; + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; + return rc; +} + +/** + * ipr_device_reset - Reset the device + * @ioa_cfg: ioa config struct + * @res: resource entry struct + * + * This function issues a device reset to the affected device. + * If the device is a SCSI device, a LUN reset will be sent + * to the device first. If that does not work, a target reset + * will be sent. + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_resource_entry *res) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + struct ipr_cmd_pkt *cmd_pkt; + u32 ioasc; + + ENTER; + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioarcb = &ipr_cmd->ioarcb; + cmd_pkt = &ioarcb->cmd_pkt; + + if (ipr_cmd->ioa_cfg->sis64) + ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); + + ioarcb->res_handle = res->res_handle; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_RESET_DEVICE; + + ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + + LEAVE; + return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; +} + +/** + * __ipr_eh_dev_reset - Reset the device + * @scsi_cmd: scsi command struct + * + * This function issues a device reset to the affected device. + * A LUN reset will be sent to the device first. If that does + * not work, a target reset will be sent. + * + * Return value: + * SUCCESS / FAILED + **/ +static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + int rc = 0; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + /* + * If we are currently going through reset/reload, return failed. This will force the + * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the + * reset to complete + */ + if (ioa_cfg->in_reset_reload) + return FAILED; + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) + return FAILED; + + res->resetting_device = 1; + scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); + + rc = ipr_device_reset(ioa_cfg, res); + res->resetting_device = 0; + res->reset_occurred = 1; + + LEAVE; + return rc ? FAILED : SUCCESS; +} + +static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) +{ + int rc; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + + ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; + res = cmd->device->hostdata; + + if (!res) + return FAILED; + + spin_lock_irq(cmd->device->host->host_lock); + rc = __ipr_eh_dev_reset(cmd); + spin_unlock_irq(cmd->device->host->host_lock); + + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); + + return rc; +} + +/** + * ipr_bus_reset_done - Op done function for bus reset. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for a bus reset + * + * Return value: + * none + **/ +static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res; + + ENTER; + if (!ioa_cfg->sis64) + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->res_handle == ipr_cmd->ioarcb.res_handle) { + scsi_report_bus_reset(ioa_cfg->host, res->bus); + break; + } + } + + /* + * If abort has not completed, indicate the reset has, else call the + * abort's done function to wake the sleeping eh thread + */ + if (ipr_cmd->sibling->sibling) + ipr_cmd->sibling->sibling = NULL; + else + ipr_cmd->sibling->done(ipr_cmd->sibling); + + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + LEAVE; +} + +/** + * ipr_abort_timeout - An abort task has timed out + * @t: Timer context used to fetch ipr command struct + * + * This function handles when an abort task times out. If this + * happens we issue a bus reset since we have resources tied + * up that must be freed before returning to the midlayer. + * + * Return value: + * none + **/ +static void ipr_abort_timeout(struct timer_list *t) +{ + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); + struct ipr_cmnd *reset_cmd; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_cmd_pkt *cmd_pkt; + unsigned long lock_flags = 0; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); + reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->sibling = reset_cmd; + reset_cmd->sibling = ipr_cmd; + reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; + cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_RESET_DEVICE; + cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; + + ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_cancel_op - Cancel specified op + * @scsi_cmd: scsi command struct + * + * This function cancels specified op. + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_cmd_pkt *cmd_pkt; + u32 ioasc; + int i, op_found = 0; + struct ipr_hrr_queue *hrrq; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + /* If we are currently going through reset/reload, return failed. + * This will force the mid-layer to call ipr_eh_host_reset, + * which will then go to sleep and wait for the reset to complete + */ + if (ioa_cfg->in_reset_reload || + ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) + return FAILED; + if (!res) + return FAILED; + + /* + * If we are aborting a timed out op, chances are that the timeout was caused + * by a still not detected EEH error. In such cases, reading a register will + * trigger the EEH recovery infrastructure. + */ + readl(ioa_cfg->regs.sense_interrupt_reg); + + if (!ipr_is_gscsi(res)) + return FAILED; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock(&hrrq->_lock); + for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { + if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { + if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { + op_found = 1; + break; + } + } + } + spin_unlock(&hrrq->_lock); + } + + if (!op_found) + return SUCCESS; + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->ioarcb.res_handle = res->res_handle; + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; + ipr_cmd->u.sdev = scsi_cmd->device; + + scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", + scsi_cmd->cmnd[0]); + ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + /* + * If the abort task timed out and we sent a bus reset, we will get + * one the following responses to the abort + */ + if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { + ioasc = 0; + ipr_trace; + } + + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; + + LEAVE; + return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; +} + +/** + * ipr_scan_finished - Report whether scan is done + * @shost: scsi host struct + * @elapsed_time: elapsed time + * + * Return value: + * 0 if scan in progress / 1 if scan is complete + **/ +static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time) +{ + unsigned long lock_flags; + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; + int rc = 0; + + spin_lock_irqsave(shost->host_lock, lock_flags); + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) + rc = 1; + if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) + rc = 1; + spin_unlock_irqrestore(shost->host_lock, lock_flags); + return rc; +} + +/** + * ipr_eh_abort - Reset the host adapter + * @scsi_cmd: scsi command struct + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) +{ + unsigned long flags; + int rc; + struct ipr_ioa_cfg *ioa_cfg; + + ENTER; + + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); + rc = ipr_cancel_op(scsi_cmd); + spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); + + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); + LEAVE; + return rc; +} + +/** + * ipr_handle_other_interrupt - Handle "other" interrupts + * @ioa_cfg: ioa config struct + * @int_reg: interrupt register + * + * Return value: + * IRQ_NONE / IRQ_HANDLED + **/ +static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, + u32 int_reg) +{ + irqreturn_t rc = IRQ_HANDLED; + u32 int_mask_reg; + + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + int_reg &= ~int_mask_reg; + + /* If an interrupt on the adapter did not occur, ignore it. + * Or in the case of SIS 64, check for a stage change interrupt. + */ + if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { + if (ioa_cfg->sis64) { + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { + + /* clear stage change */ + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + return IRQ_HANDLED; + } + } + + return IRQ_NONE; + } + + if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { + /* Mask the interrupt */ + writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { + if (ioa_cfg->clear_isr) { + if (ipr_debug && printk_ratelimit()) + dev_err(&ioa_cfg->pdev->dev, + "Spurious interrupt detected. 0x%08X\n", int_reg); + writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + return IRQ_NONE; + } + } else { + if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) + ioa_cfg->ioa_unit_checked = 1; + else if (int_reg & IPR_PCII_NO_HOST_RRQ) + dev_err(&ioa_cfg->pdev->dev, + "No Host RRQ. 0x%08X\n", int_reg); + else + dev_err(&ioa_cfg->pdev->dev, + "Permanent IOA failure. 0x%08X\n", int_reg); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + ipr_mask_and_clear_interrupts(ioa_cfg, ~0); + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } + + return rc; +} + +/** + * ipr_isr_eh - Interrupt service routine error handler + * @ioa_cfg: ioa config struct + * @msg: message to log + * @number: various meanings depending on the caller/message + * + * Return value: + * none + **/ +static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) +{ + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); +} + +static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget, + struct list_head *doneq) +{ + u32 ioasc; + u16 cmd_index; + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; + int num_hrrq = 0; + + /* If interrupts are disabled, ignore the interrupt */ + if (!hrr_queue->allow_interrupts) + return 0; + + while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == + hrr_queue->toggle_bit) { + + cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & + IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> + IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; + + if (unlikely(cmd_index > hrr_queue->max_cmd_id || + cmd_index < hrr_queue->min_cmd_id)) { + ipr_isr_eh(ioa_cfg, + "Invalid response handle from IOA: ", + cmd_index); + break; + } + + ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); + + list_move_tail(&ipr_cmd->queue, doneq); + + if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { + hrr_queue->hrrq_curr++; + } else { + hrr_queue->hrrq_curr = hrr_queue->hrrq_start; + hrr_queue->toggle_bit ^= 1u; + } + num_hrrq++; + if (budget > 0 && num_hrrq >= budget) + break; + } + + return num_hrrq; +} + +static int ipr_iopoll(struct irq_poll *iop, int budget) +{ + struct ipr_hrr_queue *hrrq; + struct ipr_cmnd *ipr_cmd, *temp; + unsigned long hrrq_flags; + int completed_ops; + LIST_HEAD(doneq); + + hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); + + spin_lock_irqsave(hrrq->lock, hrrq_flags); + completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); + + if (completed_ops < budget) + irq_poll_complete(iop); + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + + list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { + list_del(&ipr_cmd->queue); + del_timer(&ipr_cmd->timer); + ipr_cmd->fast_done(ipr_cmd); + } + + return completed_ops; +} + +/** + * ipr_isr - Interrupt service routine + * @irq: irq number + * @devp: pointer to ioa config struct + * + * Return value: + * IRQ_NONE / IRQ_HANDLED + **/ +static irqreturn_t ipr_isr(int irq, void *devp) +{ + struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; + struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; + unsigned long hrrq_flags = 0; + u32 int_reg = 0; + int num_hrrq = 0; + int irq_none = 0; + struct ipr_cmnd *ipr_cmd, *temp; + irqreturn_t rc = IRQ_NONE; + LIST_HEAD(doneq); + + spin_lock_irqsave(hrrq->lock, hrrq_flags); + /* If interrupts are disabled, ignore the interrupt */ + if (!hrrq->allow_interrupts) { + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + return IRQ_NONE; + } + + while (1) { + if (ipr_process_hrrq(hrrq, -1, &doneq)) { + rc = IRQ_HANDLED; + + if (!ioa_cfg->clear_isr) + break; + + /* Clear the PCI interrupt */ + num_hrrq = 0; + do { + writel(IPR_PCII_HRRQ_UPDATED, + ioa_cfg->regs.clr_interrupt_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + } while (int_reg & IPR_PCII_HRRQ_UPDATED && + num_hrrq++ < IPR_MAX_HRRQ_RETRIES); + + } else if (rc == IRQ_NONE && irq_none == 0) { + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + irq_none++; + } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && + int_reg & IPR_PCII_HRRQ_UPDATED) { + ipr_isr_eh(ioa_cfg, + "Error clearing HRRQ: ", num_hrrq); + rc = IRQ_HANDLED; + break; + } else + break; + } + + if (unlikely(rc == IRQ_NONE)) + rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); + + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { + list_del(&ipr_cmd->queue); + del_timer(&ipr_cmd->timer); + ipr_cmd->fast_done(ipr_cmd); + } + return rc; +} + +/** + * ipr_isr_mhrrq - Interrupt service routine + * @irq: irq number + * @devp: pointer to ioa config struct + * + * Return value: + * IRQ_NONE / IRQ_HANDLED + **/ +static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) +{ + struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; + struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; + unsigned long hrrq_flags = 0; + struct ipr_cmnd *ipr_cmd, *temp; + irqreturn_t rc = IRQ_NONE; + LIST_HEAD(doneq); + + spin_lock_irqsave(hrrq->lock, hrrq_flags); + + /* If interrupts are disabled, ignore the interrupt */ + if (!hrrq->allow_interrupts) { + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + return IRQ_NONE; + } + + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == + hrrq->toggle_bit) { + irq_poll_sched(&hrrq->iopoll); + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + return IRQ_HANDLED; + } + } else { + if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == + hrrq->toggle_bit) + + if (ipr_process_hrrq(hrrq, -1, &doneq)) + rc = IRQ_HANDLED; + } + + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + + list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { + list_del(&ipr_cmd->queue); + del_timer(&ipr_cmd->timer); + ipr_cmd->fast_done(ipr_cmd); + } + return rc; +} + +/** + * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * Return value: + * 0 on success / -1 on failure + **/ +static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + int i, nseg; + struct scatterlist *sg; + u32 length; + u32 ioadl_flags = 0; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + + length = scsi_bufflen(scsi_cmd); + if (!length) + return 0; + + nseg = scsi_dma_map(scsi_cmd); + if (nseg < 0) { + if (printk_ratelimit()) + dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); + return -1; + } + + ipr_cmd->dma_use_sg = nseg; + + ioarcb->data_transfer_length = cpu_to_be32(length); + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); + + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) + ioadl_flags = IPR_IOADL_FLAGS_READ; + + scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { + ioadl64[i].flags = cpu_to_be32(ioadl_flags); + ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); + ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); + } + + ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); + return 0; +} + +/** + * ipr_build_ioadl - Build a scatter/gather list and map the buffer + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * Return value: + * 0 on success / -1 on failure + **/ +static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + int i, nseg; + struct scatterlist *sg; + u32 length; + u32 ioadl_flags = 0; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; + + length = scsi_bufflen(scsi_cmd); + if (!length) + return 0; + + nseg = scsi_dma_map(scsi_cmd); + if (nseg < 0) { + dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); + return -1; + } + + ipr_cmd->dma_use_sg = nseg; + + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->data_transfer_length = cpu_to_be32(length); + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(length); + ioarcb->read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } + + if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { + ioadl = ioarcb->u.add_data.u.ioadl; + ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + + offsetof(struct ipr_ioarcb, u.add_data)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + } + + scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { + ioadl[i].flags_and_data_len = + cpu_to_be32(ioadl_flags | sg_dma_len(sg)); + ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); + } + + ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); + return 0; +} + +/** + * __ipr_erp_done - Process completion of ERP for a device + * @ipr_cmd: ipr command struct + * + * This function copies the sense buffer into the scsi_cmd + * struct and pushes the scsi_done function. + * + * Return value: + * nothing + **/ +static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { + scsi_cmd->result |= (DID_ERROR << 16); + scmd_printk(KERN_ERR, scsi_cmd, + "Request Sense failed with IOASC: 0x%08X\n", ioasc); + } else { + memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE); + } + + if (res) { + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; + res->in_erp = 0; + } + scsi_dma_unmap(ipr_cmd->scsi_cmd); + scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); +} + +/** + * ipr_erp_done - Process completion of ERP for a device + * @ipr_cmd: ipr command struct + * + * This function copies the sense buffer into the scsi_cmd + * struct and pushes the scsi_done function. + * + * Return value: + * nothing + **/ +static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; + unsigned long hrrq_flags; + + spin_lock_irqsave(&hrrq->_lock, hrrq_flags); + __ipr_erp_done(ipr_cmd); + spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); +} + +/** + * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + dma_addr_t dma_addr = ipr_cmd->dma_addr; + + memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); + ioarcb->data_transfer_length = 0; + ioarcb->read_data_transfer_length = 0; + ioarcb->ioadl_len = 0; + ioarcb->read_ioadl_len = 0; + ioasa->hdr.ioasc = 0; + ioasa->hdr.residual_data_len = 0; + + if (ipr_cmd->ioa_cfg->sis64) + ioarcb->u.sis64_addr_data.data_ioadl_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); + else { + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + } +} + +/** + * __ipr_erp_request_sense - Send request sense to a device + * @ipr_cmd: ipr command struct + * + * This function sends a request sense to a device as a result + * of a check condition. + * + * Return value: + * nothing + **/ +static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { + __ipr_erp_done(ipr_cmd); + return; + } + + ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); + + cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; + cmd_pkt->cdb[0] = REQUEST_SENSE; + cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; + cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; + cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); + + ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, + SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); + + ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, + IPR_REQUEST_SENSE_TIMEOUT * 2); +} + +/** + * ipr_erp_request_sense - Send request sense to a device + * @ipr_cmd: ipr command struct + * + * This function sends a request sense to a device as a result + * of a check condition. + * + * Return value: + * nothing + **/ +static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; + unsigned long hrrq_flags; + + spin_lock_irqsave(&hrrq->_lock, hrrq_flags); + __ipr_erp_request_sense(ipr_cmd); + spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); +} + +/** + * ipr_erp_cancel_all - Send cancel all to a device + * @ipr_cmd: ipr command struct + * + * This function sends a cancel all to a device to clear the + * queue. If we are running TCQ on the device, QERR is set to 1, + * which means all outstanding ops have been dropped on the floor. + * Cancel all will return them to us. + * + * Return value: + * nothing + **/ +static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + struct ipr_cmd_pkt *cmd_pkt; + + res->in_erp = 1; + + ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); + + if (!scsi_cmd->device->simple_tags) { + __ipr_erp_request_sense(ipr_cmd); + return; + } + + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; + + ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, + IPR_CANCEL_ALL_TIMEOUT); +} + +/** + * ipr_dump_ioasa - Dump contents of IOASA + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * @res: resource entry struct + * + * This function is invoked by the interrupt handler when ops + * fail. It will log the IOASA if appropriate. Only called + * for GPDD ops. + * + * Return value: + * none + **/ +static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) +{ + int i; + u16 data_len; + u32 ioasc, fd_ioasc; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + __be32 *ioasa_data = (__be32 *)ioasa; + int error_index; + + ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; + fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; + + if (0 == ioasc) + return; + + if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) + return; + + if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) + error_index = ipr_get_error(fd_ioasc); + else + error_index = ipr_get_error(ioasc); + + if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { + /* Don't log an error if the IOA already logged one */ + if (ioasa->hdr.ilid != 0) + return; + + if (!ipr_is_gscsi(res)) + return; + + if (ipr_error_table[error_index].log_ioasa == 0) + return; + } + + ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); + + data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); + if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) + data_len = sizeof(struct ipr_ioasa64); + else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) + data_len = sizeof(struct ipr_ioasa); + + ipr_err("IOASA Dump:\n"); + + for (i = 0; i < data_len / 4; i += 4) { + ipr_err("%08X: %08X %08X %08X %08X\n", i*4, + be32_to_cpu(ioasa_data[i]), + be32_to_cpu(ioasa_data[i+1]), + be32_to_cpu(ioasa_data[i+2]), + be32_to_cpu(ioasa_data[i+3])); + } +} + +/** + * ipr_gen_sense - Generate SCSI sense data from an IOASA + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) +{ + u32 failing_lba; + u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; + struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); + + memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); + + if (ioasc >= IPR_FIRST_DRIVER_IOASC) + return; + + ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; + + if (ipr_is_vset_device(res) && + ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && + ioasa->u.vset.failing_lba_hi != 0) { + sense_buf[0] = 0x72; + sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); + sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); + sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); + + sense_buf[7] = 12; + sense_buf[8] = 0; + sense_buf[9] = 0x0A; + sense_buf[10] = 0x80; + + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); + + sense_buf[12] = (failing_lba & 0xff000000) >> 24; + sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[15] = failing_lba & 0x000000ff; + + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); + + sense_buf[16] = (failing_lba & 0xff000000) >> 24; + sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[19] = failing_lba & 0x000000ff; + } else { + sense_buf[0] = 0x70; + sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); + sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); + sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); + + /* Illegal request */ + if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && + (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { + sense_buf[7] = 10; /* additional length */ + + /* IOARCB was in error */ + if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) + sense_buf[15] = 0xC0; + else /* Parameter data was invalid */ + sense_buf[15] = 0x80; + + sense_buf[16] = + ((IPR_FIELD_POINTER_MASK & + be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; + sense_buf[17] = + (IPR_FIELD_POINTER_MASK & + be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; + } else { + if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { + if (ipr_is_vset_device(res)) + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); + else + failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); + + sense_buf[0] |= 0x80; /* Or in the Valid bit */ + sense_buf[3] = (failing_lba & 0xff000000) >> 24; + sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[6] = failing_lba & 0x000000ff; + } + + sense_buf[7] = 6; /* additional length */ + } + } +} + +/** + * ipr_get_autosense - Copy autosense data to sense buffer + * @ipr_cmd: ipr command struct + * + * This function copies the autosense buffer to the buffer + * in the scsi_cmd, if there is autosense available. + * + * Return value: + * 1 if autosense was available / 0 if not + **/ +static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; + + if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) + return 0; + + if (ipr_cmd->ioa_cfg->sis64) + memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, + min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), + SCSI_SENSE_BUFFERSIZE)); + else + memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, + min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), + SCSI_SENSE_BUFFERSIZE)); + return 1; +} + +/** + * ipr_erp_start - Process an error response for a SCSI op + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * This function determines whether or not to initiate ERP + * on the affected device. + * + * Return value: + * nothing + **/ +static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; + + if (!res) { + __ipr_scsi_eh_done(ipr_cmd); + return; + } + + if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) + ipr_gen_sense(ipr_cmd); + + ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); + + switch (masked_ioasc) { + case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: + if (ipr_is_naca_model(res)) + scsi_cmd->result |= (DID_ABORT << 16); + else + scsi_cmd->result |= (DID_IMM_RETRY << 16); + break; + case IPR_IOASC_IR_RESOURCE_HANDLE: + case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + break; + case IPR_IOASC_HW_SEL_TIMEOUT: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; + break; + case IPR_IOASC_SYNC_REQUIRED: + if (!res->in_erp) + res->needs_sync_complete = 1; + scsi_cmd->result |= (DID_IMM_RETRY << 16); + break; + case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ + case IPR_IOASA_IR_DUAL_IOA_DISABLED: + /* + * exception: do not set DID_PASSTHROUGH on CHECK CONDITION + * so SCSI mid-layer and upper layers handle it accordingly. + */ + if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) + scsi_cmd->result |= (DID_PASSTHROUGH << 16); + break; + case IPR_IOASC_BUS_WAS_RESET: + case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: + /* + * Report the bus reset and ask for a retry. The device + * will give CC/UA the next command. + */ + if (!res->resetting_device) + scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); + scsi_cmd->result |= (DID_ERROR << 16); + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; + break; + case IPR_IOASC_HW_DEV_BUS_STATUS: + scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); + if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { + if (!ipr_get_autosense(ipr_cmd)) { + if (!ipr_is_naca_model(res)) { + ipr_erp_cancel_all(ipr_cmd); + return; + } + } + } + if (!ipr_is_naca_model(res)) + res->needs_sync_complete = 1; + break; + case IPR_IOASC_NR_INIT_CMD_REQUIRED: + break; + case IPR_IOASC_IR_NON_OPTIMIZED: + if (res->raw_mode) { + res->raw_mode = 0; + scsi_cmd->result |= (DID_IMM_RETRY << 16); + } else + scsi_cmd->result |= (DID_ERROR << 16); + break; + default: + if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) + scsi_cmd->result |= (DID_ERROR << 16); + if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) + res->needs_sync_complete = 1; + break; + } + + scsi_dma_unmap(ipr_cmd->scsi_cmd); + scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); +} + +/** + * ipr_scsi_done - mid-layer done function + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer + * + * Return value: + * none + **/ +static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + unsigned long lock_flags; + + scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); + + if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { + scsi_dma_unmap(scsi_cmd); + + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); + scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); + } else { + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + spin_lock(&ipr_cmd->hrrq->_lock); + ipr_erp_start(ioa_cfg, ipr_cmd); + spin_unlock(&ipr_cmd->hrrq->_lock); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + } +} + +/** + * ipr_queuecommand - Queue a mid-layer request + * @shost: scsi host struct + * @scsi_cmd: scsi command struct + * + * This function queues a request generated by the mid-layer. + * + * Return value: + * 0 on success + * SCSI_MLQUEUE_DEVICE_BUSY if device is busy + * SCSI_MLQUEUE_HOST_BUSY if host is busy + **/ +static int ipr_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scsi_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_ioarcb *ioarcb; + struct ipr_cmnd *ipr_cmd; + unsigned long hrrq_flags; + int rc; + struct ipr_hrr_queue *hrrq; + int hrrq_id; + + ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + + scsi_cmd->result = (DID_OK << 16); + res = scsi_cmd->device->hostdata; + + hrrq_id = ipr_get_hrrq_index(ioa_cfg); + hrrq = &ioa_cfg->hrrq[hrrq_id]; + + spin_lock_irqsave(hrrq->lock, hrrq_flags); + /* + * We are currently blocking all devices due to a host reset + * We have told the host to stop giving us new requests, but + * ERP ops don't count. FIXME + */ + if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + /* + * FIXME - Create scsi_set_host_offline interface + * and the ioa_is_dead check can be removed + */ + if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + goto err_nodev; + } + + ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); + if (ipr_cmd == NULL) { + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + + ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); + ioarcb = &ipr_cmd->ioarcb; + + memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); + ipr_cmd->scsi_cmd = scsi_cmd; + ipr_cmd->done = ipr_scsi_eh_done; + + if (ipr_is_gscsi(res)) { + if (scsi_cmd->underflow == 0) + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + + if (res->reset_occurred) { + res->reset_occurred = 0; + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; + } + } + + if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; + + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; + if (scsi_cmd->flags & SCMD_TAGGED) + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; + else + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; + } + + if (scsi_cmd->cmnd[0] >= 0xC0 && + (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + } + if (res->raw_mode && ipr_is_af_dasd_device(res)) { + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; + + if (scsi_cmd->underflow == 0) + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + } + + if (ioa_cfg->sis64) + rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); + else + rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); + + spin_lock_irqsave(hrrq->lock, hrrq_flags); + if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { + list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + if (!rc) + scsi_dma_unmap(scsi_cmd); + return SCSI_MLQUEUE_HOST_BUSY; + } + + if (unlikely(hrrq->ioa_is_dead)) { + list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + scsi_dma_unmap(scsi_cmd); + goto err_nodev; + } + + ioarcb->res_handle = res->res_handle; + if (res->needs_sync_complete) { + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; + res->needs_sync_complete = 0; + } + list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); + ipr_send_command(ipr_cmd); + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + return 0; + +err_nodev: + spin_lock_irqsave(hrrq->lock, hrrq_flags); + memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + scsi_cmd->result = (DID_NO_CONNECT << 16); + scsi_done(scsi_cmd); + spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + return 0; +} + +/** + * ipr_ioa_info - Get information about the card/driver + * @host: scsi host struct + * + * Return value: + * pointer to buffer with description string + **/ +static const char *ipr_ioa_info(struct Scsi_Host *host) +{ + static char buffer[512]; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long lock_flags = 0; + + ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; + + spin_lock_irqsave(host->host_lock, lock_flags); + sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); + spin_unlock_irqrestore(host->host_lock, lock_flags); + + return buffer; +} + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "IPR", + .info = ipr_ioa_info, + .queuecommand = ipr_queuecommand, + .eh_abort_handler = ipr_eh_abort, + .eh_device_reset_handler = ipr_eh_dev_reset, + .eh_host_reset_handler = ipr_eh_host_reset, + .slave_alloc = ipr_slave_alloc, + .slave_configure = ipr_slave_configure, + .slave_destroy = ipr_slave_destroy, + .scan_finished = ipr_scan_finished, + .target_destroy = ipr_target_destroy, + .change_queue_depth = ipr_change_queue_depth, + .bios_param = ipr_biosparam, + .can_queue = IPR_MAX_COMMANDS, + .this_id = -1, + .sg_tablesize = IPR_MAX_SGLIST, + .max_sectors = IPR_IOA_MAX_SECTORS, + .cmd_per_lun = IPR_MAX_CMD_PER_LUN, + .shost_groups = ipr_ioa_groups, + .sdev_groups = ipr_dev_groups, + .proc_name = IPR_NAME, +}; + +#ifdef CONFIG_PPC_PSERIES +static const u16 ipr_blocked_processors[] = { + PVR_NORTHSTAR, + PVR_PULSAR, + PVR_POWER4, + PVR_ICESTAR, + PVR_SSTAR, + PVR_POWER4p, + PVR_630, + PVR_630p +}; + +/** + * ipr_invalid_adapter - Determine if this adapter is supported on this hardware + * @ioa_cfg: ioa cfg struct + * + * Adapters that use Gemstone revision < 3.1 do not work reliably on + * certain pSeries hardware. This function determines if the given + * adapter is in one of these confgurations or not. + * + * Return value: + * 1 if adapter is not supported / 0 if adapter is supported + **/ +static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { + for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { + if (pvr_version_is(ipr_blocked_processors[i])) + return 1; + } + } + return 0; +} +#else +#define ipr_invalid_adapter(ioa_cfg) 0 +#endif + +/** + * ipr_ioa_bringdown_done - IOA bring down completion. + * @ipr_cmd: ipr command struct + * + * This function processes the completion of an adapter bring down. + * It wakes any reset sleepers. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int i; + + ENTER; + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { + ipr_trace; + ioa_cfg->scsi_unblock = 1; + schedule_work(&ioa_cfg->work_q); + } + + ioa_cfg->in_reset_reload = 0; + ioa_cfg->reset_retries = 0; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + wake_up_all(&ioa_cfg->reset_wait_q); + LEAVE; + + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioa_reset_done - IOA reset completion. + * @ipr_cmd: ipr command struct + * + * This function processes the completion of an adapter reset. + * It schedules any necessary mid-layer add/removes and + * wakes any reset sleepers. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res; + int j; + + ENTER; + ioa_cfg->in_reset_reload = 0; + for (j = 0; j < ioa_cfg->hrrq_num; j++) { + spin_lock(&ioa_cfg->hrrq[j]._lock); + ioa_cfg->hrrq[j].allow_cmds = 1; + spin_unlock(&ioa_cfg->hrrq[j]._lock); + } + wmb(); + ioa_cfg->reset_cmd = NULL; + ioa_cfg->doorbell |= IPR_RUNTIME_RESET; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->add_to_ml || res->del_from_ml) { + ipr_trace; + break; + } + } + schedule_work(&ioa_cfg->work_q); + + for (j = 0; j < IPR_NUM_HCAMS; j++) { + list_del_init(&ioa_cfg->hostrcb[j]->queue); + if (j < IPR_NUM_LOG_HCAMS) + ipr_send_hcam(ioa_cfg, + IPR_HCAM_CDB_OP_CODE_LOG_DATA, + ioa_cfg->hostrcb[j]); + else + ipr_send_hcam(ioa_cfg, + IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, + ioa_cfg->hostrcb[j]); + } + + scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); + dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); + + ioa_cfg->reset_retries = 0; + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + wake_up_all(&ioa_cfg->reset_wait_q); + + ioa_cfg->scsi_unblock = 1; + schedule_work(&ioa_cfg->work_q); + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer + * @supported_dev: supported device struct + * @vpids: vendor product id struct + * + * Return value: + * none + **/ +static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, + struct ipr_std_inq_vpids *vpids) +{ + memset(supported_dev, 0, sizeof(struct ipr_supported_device)); + memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); + supported_dev->num_records = 1; + supported_dev->data_length = + cpu_to_be16(sizeof(struct ipr_supported_device)); + supported_dev->reserved = 0; +} + +/** + * ipr_set_supported_devs - Send Set Supported Devices for a device + * @ipr_cmd: ipr command struct + * + * This function sends a Set Supported Devices to the adapter + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_resource_entry *res = ipr_cmd->u.res; + + ipr_cmd->job_step = ipr_ioa_reset_done; + + list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { + if (!ipr_is_scsi_disk(res)) + continue; + + ipr_cmd->u.res = res; + ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); + + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + + ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; + ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; + ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; + + ipr_init_ioadl(ipr_cmd, + ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, supp_dev), + sizeof(struct ipr_supported_device), + IPR_IOADL_FLAGS_WRITE_LAST); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_SET_SUP_DEVICE_TIMEOUT); + + if (!ioa_cfg->sis64) + ipr_cmd->job_step = ipr_set_supported_devs; + LEAVE; + return IPR_RC_JOB_RETURN; + } + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_get_mode_page - Locate specified mode page + * @mode_pages: mode page buffer + * @page_code: page code to find + * @len: minimum required length for mode page + * + * Return value: + * pointer to mode page / NULL on failure + **/ +static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, + u32 page_code, u32 len) +{ + struct ipr_mode_page_hdr *mode_hdr; + u32 page_length; + u32 length; + + if (!mode_pages || (mode_pages->hdr.length == 0)) + return NULL; + + length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; + mode_hdr = (struct ipr_mode_page_hdr *) + (mode_pages->data + mode_pages->hdr.block_desc_len); + + while (length) { + if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { + if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) + return mode_hdr; + break; + } else { + page_length = (sizeof(struct ipr_mode_page_hdr) + + mode_hdr->page_length); + length -= page_length; + mode_hdr = (struct ipr_mode_page_hdr *) + ((unsigned long)mode_hdr + page_length); + } + } + return NULL; +} + +/** + * ipr_check_term_power - Check for term power errors + * @ioa_cfg: ioa config struct + * @mode_pages: IOAFP mode pages buffer + * + * Check the IOAFP's mode page 28 for term power errors + * + * Return value: + * nothing + **/ +static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_mode_pages *mode_pages) +{ + int i; + int entry_length; + struct ipr_dev_bus_entry *bus; + struct ipr_mode_page28 *mode_page; + + mode_page = ipr_get_mode_page(mode_pages, 0x28, + sizeof(struct ipr_mode_page28)); + + entry_length = mode_page->entry_length; + + bus = mode_page->bus; + + for (i = 0; i < mode_page->num_entries; i++) { + if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { + dev_err(&ioa_cfg->pdev->dev, + "Term power is absent on scsi bus %d\n", + bus->res_addr.bus); + } + + bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); + } +} + +/** + * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table + * @ioa_cfg: ioa config struct + * + * Looks through the config table checking for SES devices. If + * the SES device is in the SES table indicating a maximum SCSI + * bus speed, the speed is limited for the bus. + * + * Return value: + * none + **/ +static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) +{ + u32 max_xfer_rate; + int i; + + for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { + max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, + ioa_cfg->bus_attr[i].bus_width); + + if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) + ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; + } +} + +/** + * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 + * @ioa_cfg: ioa config struct + * @mode_pages: mode page 28 buffer + * + * Updates mode page 28 based on driver configuration + * + * Return value: + * none + **/ +static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_mode_pages *mode_pages) +{ + int i, entry_length; + struct ipr_dev_bus_entry *bus; + struct ipr_bus_attributes *bus_attr; + struct ipr_mode_page28 *mode_page; + + mode_page = ipr_get_mode_page(mode_pages, 0x28, + sizeof(struct ipr_mode_page28)); + + entry_length = mode_page->entry_length; + + /* Loop for each device bus entry */ + for (i = 0, bus = mode_page->bus; + i < mode_page->num_entries; + i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { + if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { + dev_err(&ioa_cfg->pdev->dev, + "Invalid resource address reported: 0x%08X\n", + IPR_GET_PHYS_LOC(bus->res_addr)); + continue; + } + + bus_attr = &ioa_cfg->bus_attr[i]; + bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; + bus->bus_width = bus_attr->bus_width; + bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); + bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; + if (bus_attr->qas_enabled) + bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; + else + bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; + } +} + +/** + * ipr_build_mode_select - Build a mode select command + * @ipr_cmd: ipr command struct + * @res_handle: resource handle to send command to + * @parm: Byte 2 of Mode Sense command + * @dma_addr: DMA buffer address + * @xfer_len: data transfer length + * + * Return value: + * none + **/ +static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, + __be32 res_handle, u8 parm, + dma_addr_t dma_addr, u8 xfer_len) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = res_handle; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; + ioarcb->cmd_pkt.cdb[1] = parm; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); +} + +/** + * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA + * @ipr_cmd: ipr command struct + * + * This function sets up the SCSI bus attributes and sends + * a Mode Select for Page 28 to activate them. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; + int length; + + ENTER; + ipr_scsi_bus_speed_limit(ioa_cfg); + ipr_check_term_power(ioa_cfg, mode_pages); + ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); + length = mode_pages->hdr.length + 1; + mode_pages->hdr.length = 0; + + ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), + length); + + ipr_cmd->job_step = ipr_set_supported_devs; + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, + struct ipr_resource_entry, queue); + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_build_mode_sense - Builds a mode sense command + * @ipr_cmd: ipr command struct + * @res_handle: resource entry struct + * @parm: Byte 2 of mode sense command + * @dma_addr: DMA address of mode sense buffer + * @xfer_len: Size of DMA buffer + * + * Return value: + * none + **/ +static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, + __be32 res_handle, + u8 parm, dma_addr_t dma_addr, u8 xfer_len) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = res_handle; + ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; + ioarcb->cmd_pkt.cdb[2] = parm; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); +} + +/** + * ipr_reset_cmd_failed - Handle failure of IOA reset command + * @ipr_cmd: ipr command struct + * + * This function handles the failure of an IOA bringup command. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + dev_err(&ioa_cfg->pdev->dev, + "0x%02X failed with IOASC: 0x%08X\n", + ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); + + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense + * @ipr_cmd: ipr command struct + * + * This function handles the failure of a Mode Sense to the IOAFP. + * Some adapters do not handle all mode pages. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { + ipr_cmd->job_step = ipr_set_supported_devs; + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, + struct ipr_resource_entry, queue); + return IPR_RC_JOB_CONTINUE; + } + + return ipr_reset_cmd_failed(ipr_cmd); +} + +/** + * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA + * @ipr_cmd: ipr command struct + * + * This function send a Page 28 mode sense to the IOA to + * retrieve SCSI bus attributes. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), + 0x28, ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, mode_pages), + sizeof(struct ipr_mode_pages)); + + ipr_cmd->job_step = ipr_ioafp_mode_select_page28; + ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA + * @ipr_cmd: ipr command struct + * + * This function enables dual IOA RAID support if possible. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; + struct ipr_mode_page24 *mode_page; + int length; + + ENTER; + mode_page = ipr_get_mode_page(mode_pages, 0x24, + sizeof(struct ipr_mode_page24)); + + if (mode_page) + mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; + + length = mode_pages->hdr.length + 1; + mode_pages->hdr.length = 0; + + ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), + length); + + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense + * @ipr_cmd: ipr command struct + * + * This function handles the failure of a Mode Sense to the IOAFP. + * Some adapters do not handle all mode pages. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) +{ + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + return IPR_RC_JOB_CONTINUE; + } + + return ipr_reset_cmd_failed(ipr_cmd); +} + +/** + * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA + * @ipr_cmd: ipr command struct + * + * This function send a mode sense to the IOA to retrieve + * the IOA Advanced Function Control mode page. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), + 0x24, ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, mode_pages), + sizeof(struct ipr_mode_pages)); + + ipr_cmd->job_step = ipr_ioafp_mode_select_page24; + ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_init_res_table - Initialize the resource table + * @ipr_cmd: ipr command struct + * + * This function looks through the existing resource table, comparing + * it with the config table. This function will take care of old/new + * devices and schedule adding/removing them from the mid-layer + * as appropriate. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res, *temp; + struct ipr_config_table_entry_wrapper cfgtew; + int entries, found, flag, i; + LIST_HEAD(old_res); + + ENTER; + if (ioa_cfg->sis64) + flag = ioa_cfg->u.cfg_table64->hdr64.flags; + else + flag = ioa_cfg->u.cfg_table->hdr.flags; + + if (flag & IPR_UCODE_DOWNLOAD_REQ) + dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); + + list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) + list_move_tail(&res->queue, &old_res); + + if (ioa_cfg->sis64) + entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); + else + entries = ioa_cfg->u.cfg_table->hdr.num_entries; + + for (i = 0; i < entries; i++) { + if (ioa_cfg->sis64) + cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; + else + cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; + found = 0; + + list_for_each_entry_safe(res, temp, &old_res, queue) { + if (ipr_is_same_device(res, &cfgtew)) { + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + found = 1; + break; + } + } + + if (!found) { + if (list_empty(&ioa_cfg->free_res_q)) { + dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); + break; + } + + found = 1; + res = list_entry(ioa_cfg->free_res_q.next, + struct ipr_resource_entry, queue); + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + ipr_init_res_entry(res, &cfgtew); + res->add_to_ml = 1; + } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) + res->sdev->allow_restart = 1; + + if (found) + ipr_update_res_entry(res, &cfgtew); + } + + list_for_each_entry_safe(res, temp, &old_res, queue) { + if (res->sdev) { + res->del_from_ml = 1; + res->res_handle = IPR_INVALID_RES_HANDLE; + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + } + } + + list_for_each_entry_safe(res, temp, &old_res, queue) { + ipr_clear_res_target(res); + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } + + if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) + ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; + else + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Query IOA Configuration command + * to the adapter to retrieve the IOA configuration table. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; + + ENTER; + if (cap->cap & IPR_CAP_DUAL_IOA_RAID) + ioa_cfg->dual_raid = 1; + dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", + ucode_vpd->major_release, ucode_vpd->card_type, + ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; + ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; + ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; + + ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, + IPR_IOADL_FLAGS_READ_LAST); + + ipr_cmd->job_step = ipr_init_res_table; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd) +{ + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) + return IPR_RC_JOB_CONTINUE; + + return ipr_reset_cmd_failed(ipr_cmd); +} + +static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd, + __be32 res_handle, u8 sa_code) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = res_handle; + ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; + ioarcb->cmd_pkt.cdb[1] = sa_code; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; +} + +/** + * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service + * action + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; + + ENTER; + + ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; + + if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { + ipr_build_ioa_service_action(ipr_cmd, + cpu_to_be32(IPR_IOA_RES_HANDLE), + IPR_IOA_SA_CHANGE_CACHE_PARAMS); + + ioarcb->cmd_pkt.cdb[2] = 0x40; + + ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_SET_SUP_DEVICE_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; + } + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_ioafp_inquiry - Send an Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * @flags: flags to send + * @page: page to inquire + * @dma_addr: DMA address + * @xfer_len: transfer data length + * + * This utility function sends an inquiry to the adapter. + * + * Return value: + * none + **/ +static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, + dma_addr_t dma_addr, u8 xfer_len) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ENTER; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.cdb[0] = INQUIRY; + ioarcb->cmd_pkt.cdb[1] = flags; + ioarcb->cmd_pkt.cdb[2] = page; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + LEAVE; +} + +/** + * ipr_inquiry_page_supported - Is the given inquiry page supported + * @page0: inquiry page 0 buffer + * @page: page code. + * + * This function determines if the specified inquiry page is supported. + * + * Return value: + * 1 if page is supported / 0 if not + **/ +static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) +{ + int i; + + for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) + if (page0->page[i] == page) + return 1; + + return 0; +} + +/** + * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 0xC4 inquiry to the adapter + * to retrieve software VPD information. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; + struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; + memset(pageC4, 0, sizeof(*pageC4)); + + if (ipr_inquiry_page_supported(page0, 0xC4)) { + ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4, + (ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, + pageC4_data)), + sizeof(struct ipr_inquiry_pageC4)); + return IPR_RC_JOB_RETURN; + } + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 0xD0 inquiry to the adapter + * to retrieve adapter capabilities. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; + struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; + memset(cap, 0, sizeof(*cap)); + + if (ipr_inquiry_page_supported(page0, 0xD0)) { + ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), + sizeof(struct ipr_inquiry_cap)); + return IPR_RC_JOB_RETURN; + } + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 3 inquiry to the adapter + * to retrieve software VPD information. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + + ipr_cmd->job_step = ipr_ioafp_cap_inquiry; + + ipr_ioafp_inquiry(ipr_cmd, 1, 3, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), + sizeof(struct ipr_inquiry_page3)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 0 inquiry to the adapter + * to retrieve supported inquiry pages. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + char type[5]; + + ENTER; + + /* Grab the type out of the VPD and store it away */ + memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); + type[4] = '\0'; + ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); + + if (ipr_invalid_adapter(ioa_cfg)) { + dev_err(&ioa_cfg->pdev->dev, + "Adapter not supported in this hardware configuration.\n"); + + if (!ipr_testmode) { + ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + list_add_tail(&ipr_cmd->queue, + &ioa_cfg->hrrq->hrrq_free_q); + return IPR_RC_JOB_RETURN; + } + } + + ipr_cmd->job_step = ipr_ioafp_page3_inquiry; + + ipr_ioafp_inquiry(ipr_cmd, 1, 0, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), + sizeof(struct ipr_inquiry_page0)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a standard inquiry to the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_page0_inquiry; + + ipr_ioafp_inquiry(ipr_cmd, 0, 0, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), + sizeof(struct ipr_ioa_vpd)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. + * @ipr_cmd: ipr command struct + * + * This function send an Identify Host Request Response Queue + * command to establish the HRRQ with the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_hrr_queue *hrrq; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_std_inquiry; + if (ioa_cfg->identify_hrrq_index == 0) + dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); + + if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { + hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; + + ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + if (ioa_cfg->sis64) + ioarcb->cmd_pkt.cdb[1] = 0x1; + + if (ioa_cfg->nvectors == 1) + ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; + else + ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; + + ioarcb->cmd_pkt.cdb[2] = + ((u64) hrrq->host_rrq_dma >> 24) & 0xff; + ioarcb->cmd_pkt.cdb[3] = + ((u64) hrrq->host_rrq_dma >> 16) & 0xff; + ioarcb->cmd_pkt.cdb[4] = + ((u64) hrrq->host_rrq_dma >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[5] = + ((u64) hrrq->host_rrq_dma) & 0xff; + ioarcb->cmd_pkt.cdb[7] = + ((sizeof(u32) * hrrq->size) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = + (sizeof(u32) * hrrq->size) & 0xff; + + if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) + ioarcb->cmd_pkt.cdb[9] = + ioa_cfg->identify_hrrq_index; + + if (ioa_cfg->sis64) { + ioarcb->cmd_pkt.cdb[10] = + ((u64) hrrq->host_rrq_dma >> 56) & 0xff; + ioarcb->cmd_pkt.cdb[11] = + ((u64) hrrq->host_rrq_dma >> 48) & 0xff; + ioarcb->cmd_pkt.cdb[12] = + ((u64) hrrq->host_rrq_dma >> 40) & 0xff; + ioarcb->cmd_pkt.cdb[13] = + ((u64) hrrq->host_rrq_dma >> 32) & 0xff; + } + + if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) + ioarcb->cmd_pkt.cdb[14] = + ioa_cfg->identify_hrrq_index; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_INTERNAL_TIMEOUT); + + if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; + + LEAVE; + return IPR_RC_JOB_RETURN; + } + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_timer_done - Adapter reset timer function + * @t: Timer context used to fetch ipr command struct + * + * Description: This function is used in adapter reset processing + * for timing events. If the reset_cmd pointer in the IOA + * config struct is not this adapter's we are doing nested + * resets and fail_all_ops will take care of freeing the + * command block. + * + * Return value: + * none + **/ +static void ipr_reset_timer_done(struct timer_list *t) +{ + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->reset_cmd == ipr_cmd) { + list_del(&ipr_cmd->queue); + ipr_cmd->done(ipr_cmd); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** + * ipr_reset_start_timer - Start a timer for adapter reset job + * @ipr_cmd: ipr command struct + * @timeout: timeout value + * + * Description: This function is used in adapter reset processing + * for timing events. If the reset_cmd pointer in the IOA + * config struct is not this adapter's we are doing nested + * resets and fail_all_ops will take care of freeing the + * command block. + * + * Return value: + * none + **/ +static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, + unsigned long timeout) +{ + + ENTER; + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); + ipr_cmd->done = ipr_reset_ioa_job; + + ipr_cmd->timer.expires = jiffies + timeout; + ipr_cmd->timer.function = ipr_reset_timer_done; + add_timer(&ipr_cmd->timer); +} + +/** + * ipr_init_ioa_mem - Initialize ioa_cfg control block + * @ioa_cfg: ioa cfg struct + * + * Return value: + * nothing + **/ +static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_hrr_queue *hrrq; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock(&hrrq->_lock); + memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); + + /* Initialize Host RRQ pointers */ + hrrq->hrrq_start = hrrq->host_rrq; + hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; + hrrq->hrrq_curr = hrrq->hrrq_start; + hrrq->toggle_bit = 1; + spin_unlock(&hrrq->_lock); + } + wmb(); + + ioa_cfg->identify_hrrq_index = 0; + if (ioa_cfg->hrrq_num == 1) + atomic_set(&ioa_cfg->hrrq_index, 0); + else + atomic_set(&ioa_cfg->hrrq_index, 1); + + /* Zero out config table */ + memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); +} + +/** + * ipr_reset_next_stage - Process IPL stage change based on feedback register. + * @ipr_cmd: ipr command struct + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) +{ + unsigned long stage, stage_time; + u32 feedback; + volatile u32 int_reg; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u64 maskval = 0; + + feedback = readl(ioa_cfg->regs.init_feedback_reg); + stage = feedback & IPR_IPL_INIT_STAGE_MASK; + stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; + + ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); + + /* sanity check the stage_time value */ + if (stage_time == 0) + stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; + else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) + stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; + else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) + stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; + + if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + stage_time = ioa_cfg->transop_timeout; + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; + } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; + maskval = IPR_PCII_IPL_STAGE_CHANGE; + maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; + writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + return IPR_RC_JOB_CONTINUE; + } + } + + ipr_cmd->timer.expires = jiffies + stage_time * HZ; + ipr_cmd->timer.function = ipr_oper_timeout; + ipr_cmd->done = ipr_reset_ioa_job; + add_timer(&ipr_cmd->timer); + + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); + + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_enable_ioa - Enable the IOA following a reset. + * @ipr_cmd: ipr command struct + * + * This function reinitializes some control blocks and + * enables destructive diagnostics on the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + volatile u32 int_reg; + volatile u64 maskval; + int i; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; + ipr_init_ioa_mem(ioa_cfg); + + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].allow_interrupts = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + if (ioa_cfg->sis64) { + /* Set the adapter to the correct endian mode. */ + writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); + int_reg = readl(ioa_cfg->regs.endian_swap_reg); + } + + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + + if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { + writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), + ioa_cfg->regs.clr_interrupt_mask_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + return IPR_RC_JOB_CONTINUE; + } + + /* Enable destructive diagnostics on IOA */ + writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); + + if (ioa_cfg->sis64) { + maskval = IPR_PCII_IPL_STAGE_CHANGE; + maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; + writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); + } else + writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); + + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + + dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); + + if (ioa_cfg->sis64) { + ipr_cmd->job_step = ipr_reset_next_stage; + return IPR_RC_JOB_CONTINUE; + } + + ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); + ipr_cmd->timer.function = ipr_oper_timeout; + ipr_cmd->done = ipr_reset_ioa_job; + add_timer(&ipr_cmd->timer); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_wait_for_dump - Wait for a dump to timeout. + * @ipr_cmd: ipr command struct + * + * This function is invoked when an adapter dump has run out + * of processing time. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + if (ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + else if (ioa_cfg->sdt_state == READ_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + + ioa_cfg->dump_timeout = 1; + ipr_cmd->job_step = ipr_reset_alert; + + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_unit_check_no_data - Log a unit check/no data error log + * @ioa_cfg: ioa config struct + * + * Logs an error indicating the adapter unit checked, but for some + * reason, we were unable to fetch the unit check buffer. + * + * Return value: + * nothing + **/ +static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) +{ + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); +} + +/** + * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA + * @ioa_cfg: ioa config struct + * + * Fetches the unit check buffer from the adapter by clocking the data + * through the mailbox register. + * + * Return value: + * nothing + **/ +static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) +{ + unsigned long mailbox; + struct ipr_hostrcb *hostrcb; + struct ipr_uc_sdt sdt; + int rc, length; + u32 ioasc; + + mailbox = readl(ioa_cfg->ioa_mailbox); + + if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { + ipr_unit_check_no_data(ioa_cfg); + return; + } + + memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); + rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, + (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); + + if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || + ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && + (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { + ipr_unit_check_no_data(ioa_cfg); + return; + } + + /* Find length of the first sdt entry (UC buffer) */ + if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) + length = be32_to_cpu(sdt.entry[0].end_token); + else + length = (be32_to_cpu(sdt.entry[0].end_token) - + be32_to_cpu(sdt.entry[0].start_token)) & + IPR_FMT2_MBX_ADDR_MASK; + + hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, + struct ipr_hostrcb, queue); + list_del_init(&hostrcb->queue); + memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); + + rc = ipr_get_ldump_data_section(ioa_cfg, + be32_to_cpu(sdt.entry[0].start_token), + (__be32 *)&hostrcb->hcam, + min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); + + if (!rc) { + ipr_handle_log_data(ioa_cfg, hostrcb); + ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); + if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && + ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + } else + ipr_unit_check_no_data(ioa_cfg); + + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); +} + +/** + * ipr_reset_get_unit_check_job - Call to get the unit check buffer. + * @ipr_cmd: ipr command struct + * + * Description: This function will call to get the unit check buffer. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ioa_cfg->ioa_unit_checked = 0; + ipr_get_unit_check_buffer(ioa_cfg); + ipr_cmd->job_step = ipr_reset_alert; + ipr_reset_start_timer(ipr_cmd, 0); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + + if (ioa_cfg->sdt_state != GET_DUMP) + return IPR_RC_JOB_RETURN; + + if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || + (readl(ioa_cfg->regs.sense_interrupt_reg) & + IPR_PCII_MAILBOX_STABLE)) { + + if (!ipr_cmd->u.time_left) + dev_err(&ioa_cfg->pdev->dev, + "Timed out waiting for Mailbox register.\n"); + + ioa_cfg->sdt_state = READ_DUMP; + ioa_cfg->dump_timeout = 0; + if (ioa_cfg->sis64) + ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); + else + ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); + ipr_cmd->job_step = ipr_reset_wait_for_dump; + schedule_work(&ioa_cfg->work_q); + + } else { + ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, + IPR_CHECK_FOR_RESET_TIMEOUT); + } + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_restore_cfg_space - Restore PCI config space. + * @ipr_cmd: ipr command struct + * + * Description: This function restores the saved PCI config space of + * the adapter, fails all outstanding ops back to the callers, and + * fetches the dump/unit check if applicable to this reset. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ioa_cfg->pdev->state_saved = true; + pci_restore_state(ioa_cfg->pdev); + + if (ipr_set_pcix_cmd_reg(ioa_cfg)) { + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + return IPR_RC_JOB_CONTINUE; + } + + ipr_fail_all_ops(ioa_cfg); + + if (ioa_cfg->sis64) { + /* Set the adapter to the correct endian mode. */ + writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); + readl(ioa_cfg->regs.endian_swap_reg); + } + + if (ioa_cfg->ioa_unit_checked) { + if (ioa_cfg->sis64) { + ipr_cmd->job_step = ipr_reset_get_unit_check_job; + ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); + return IPR_RC_JOB_RETURN; + } else { + ioa_cfg->ioa_unit_checked = 0; + ipr_get_unit_check_buffer(ioa_cfg); + ipr_cmd->job_step = ipr_reset_alert; + ipr_reset_start_timer(ipr_cmd, 0); + return IPR_RC_JOB_RETURN; + } + } + + if (ioa_cfg->in_ioa_bringdown) { + ipr_cmd->job_step = ipr_ioa_bringdown_done; + } else if (ioa_cfg->sdt_state == GET_DUMP) { + ipr_cmd->job_step = ipr_dump_mailbox_wait; + ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; + } else { + ipr_cmd->job_step = ipr_reset_enable_ioa; + } + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_bist_done - BIST has completed on the adapter. + * @ipr_cmd: ipr command struct + * + * Description: Unblock config space and resume the reset process. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + if (ioa_cfg->cfg_locked) + pci_cfg_access_unlock(ioa_cfg->pdev); + ioa_cfg->cfg_locked = 0; + ipr_cmd->job_step = ipr_reset_restore_cfg_space; + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_start_bist - Run BIST on the adapter. + * @ipr_cmd: ipr command struct + * + * Description: This function runs BIST on the adapter, then delays 2 seconds. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc = PCIBIOS_SUCCESSFUL; + + ENTER; + if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) + writel(IPR_UPROCI_SIS64_START_BIST, + ioa_cfg->regs.set_uproc_interrupt_reg32); + else + rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); + + if (rc == PCIBIOS_SUCCESSFUL) { + ipr_cmd->job_step = ipr_reset_bist_done; + ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); + rc = IPR_RC_JOB_RETURN; + } else { + if (ioa_cfg->cfg_locked) + pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); + ioa_cfg->cfg_locked = 0; + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + rc = IPR_RC_JOB_CONTINUE; + } + + LEAVE; + return rc; +} + +/** + * ipr_reset_slot_reset_done - Clear PCI reset to the adapter + * @ipr_cmd: ipr command struct + * + * Description: This clears PCI reset to the adapter and delays two seconds. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) +{ + ENTER; + ipr_cmd->job_step = ipr_reset_bist_done; + ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_reset_work - Pulse a PCIe fundamental reset + * @work: work struct + * + * Description: This pulses warm reset to a slot. + * + **/ +static void ipr_reset_reset_work(struct work_struct *work) +{ + struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct pci_dev *pdev = ioa_cfg->pdev; + unsigned long lock_flags = 0; + + ENTER; + pci_set_pcie_reset_state(pdev, pcie_warm_reset); + msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT)); + pci_set_pcie_reset_state(pdev, pcie_deassert_reset); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->reset_cmd == ipr_cmd) + ipr_reset_ioa_job(ipr_cmd); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_reset_slot_reset - Reset the PCI slot of the adapter. + * @ipr_cmd: ipr command struct + * + * Description: This asserts PCI reset to the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); + queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); + ipr_cmd->job_step = ipr_reset_slot_reset_done; + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_block_config_access_wait - Wait for permission to block config access + * @ipr_cmd: ipr command struct + * + * Description: This attempts to block config access to the IOA. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc = IPR_RC_JOB_CONTINUE; + + if (pci_cfg_access_trylock(ioa_cfg->pdev)) { + ioa_cfg->cfg_locked = 1; + ipr_cmd->job_step = ioa_cfg->reset; + } else { + if (ipr_cmd->u.time_left) { + rc = IPR_RC_JOB_RETURN; + ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, + IPR_CHECK_FOR_RESET_TIMEOUT); + } else { + ipr_cmd->job_step = ioa_cfg->reset; + dev_err(&ioa_cfg->pdev->dev, + "Timed out waiting to lock config access. Resetting anyway.\n"); + } + } + + return rc; +} + +/** + * ipr_reset_block_config_access - Block config access to the IOA + * @ipr_cmd: ipr command struct + * + * Description: This attempts to block config access to the IOA + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd) +{ + ipr_cmd->ioa_cfg->cfg_locked = 0; + ipr_cmd->job_step = ipr_reset_block_config_access_wait; + ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_allowed - Query whether or not IOA can be reset + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 if reset not allowed / non-zero if reset is allowed + **/ +static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) +{ + volatile u32 temp_reg; + + temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); +} + +/** + * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. + * @ipr_cmd: ipr command struct + * + * Description: This function waits for adapter permission to run BIST, + * then runs BIST. If the adapter does not give permission after a + * reasonable time, we will reset the adapter anyway. The impact of + * resetting the adapter without warning the adapter is the risk of + * losing the persistent error log on the adapter. If the adapter is + * reset while it is writing to the flash on the adapter, the flash + * segment will have bad ECC and be zeroed. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc = IPR_RC_JOB_RETURN; + + if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { + ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); + } else { + ipr_cmd->job_step = ipr_reset_block_config_access; + rc = IPR_RC_JOB_CONTINUE; + } + + return rc; +} + +/** + * ipr_reset_alert - Alert the adapter of a pending reset + * @ipr_cmd: ipr command struct + * + * Description: This function alerts the adapter that it will be reset. + * If memory space is not currently enabled, proceed directly + * to running BIST on the adapter. The timer must always be started + * so we guarantee we do not run BIST from ipr_isr. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u16 cmd_reg; + int rc; + + ENTER; + rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); + + if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { + ipr_mask_and_clear_interrupts(ioa_cfg, ~0); + writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); + ipr_cmd->job_step = ipr_reset_wait_to_start_bist; + } else { + ipr_cmd->job_step = ipr_reset_block_config_access; + } + + ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_quiesce_done - Complete IOA disconnect + * @ipr_cmd: ipr command struct + * + * Description: Freeze the adapter to complete quiesce processing + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_cmd->job_step = ipr_ioa_bringdown_done; + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_cancel_hcam_done - Check for outstanding commands + * @ipr_cmd: ipr command struct + * + * Description: Ensure nothing is outstanding to the IOA and + * proceed with IOA disconnect. Otherwise reset the IOA. + * + * Return value: + * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_cmnd *loop_cmd; + struct ipr_hrr_queue *hrrq; + int rc = IPR_RC_JOB_CONTINUE; + int count = 0; + + ENTER; + ipr_cmd->job_step = ipr_reset_quiesce_done; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock(&hrrq->_lock); + list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { + count++; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + rc = IPR_RC_JOB_RETURN; + break; + } + spin_unlock(&hrrq->_lock); + + if (count) + break; + } + + LEAVE; + return rc; +} + +/** + * ipr_reset_cancel_hcam - Cancel outstanding HCAMs + * @ipr_cmd: ipr command struct + * + * Description: Cancel any oustanding HCAMs to the IOA. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc = IPR_RC_JOB_CONTINUE; + struct ipr_cmd_pkt *cmd_pkt; + struct ipr_cmnd *hcam_cmd; + struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; + + ENTER; + ipr_cmd->job_step = ipr_reset_cancel_hcam_done; + + if (!hrrq->ioa_is_dead) { + if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { + list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { + if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) + continue; + + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; + cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; + cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; + cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; + cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; + cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; + cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; + cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; + cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; + cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_CANCEL_TIMEOUT); + + rc = IPR_RC_JOB_RETURN; + ipr_cmd->job_step = ipr_reset_cancel_hcam; + break; + } + } + } else + ipr_cmd->job_step = ipr_reset_alert; + + LEAVE; + return rc; +} + +/** + * ipr_reset_ucode_download_done - Microcode download completion + * @ipr_cmd: ipr command struct + * + * Description: This function unmaps the microcode download buffer. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; + + dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, + sglist->num_sg, DMA_TO_DEVICE); + + ipr_cmd->job_step = ipr_reset_alert; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_ucode_download - Download microcode to the adapter + * @ipr_cmd: ipr command struct + * + * Description: This function checks to see if it there is microcode + * to download to the adapter. If there is, a download is performed. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; + + ENTER; + ipr_cmd->job_step = ipr_reset_alert; + + if (!sglist) + return IPR_RC_JOB_CONTINUE; + + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; + ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; + ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; + ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; + + if (ioa_cfg->sis64) + ipr_build_ucode_ioadl64(ipr_cmd, sglist); + else + ipr_build_ucode_ioadl(ipr_cmd, sglist); + ipr_cmd->job_step = ipr_reset_ucode_download_done; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_WRITE_BUFFER_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_shutdown_ioa - Shutdown the adapter + * @ipr_cmd: ipr command struct + * + * Description: This function issues an adapter shutdown of the + * specified type to the specified adapter as part of the + * adapter reset job. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; + unsigned long timeout; + int rc = IPR_RC_JOB_CONTINUE; + + ENTER; + if (shutdown_type == IPR_SHUTDOWN_QUIESCE) + ipr_cmd->job_step = ipr_reset_cancel_hcam; + else if (shutdown_type != IPR_SHUTDOWN_NONE && + !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; + + if (shutdown_type == IPR_SHUTDOWN_NORMAL) + timeout = IPR_SHUTDOWN_TIMEOUT; + else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) + timeout = IPR_INTERNAL_TIMEOUT; + else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) + timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; + else + timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); + + rc = IPR_RC_JOB_RETURN; + ipr_cmd->job_step = ipr_reset_ucode_download; + } else + ipr_cmd->job_step = ipr_reset_alert; + + LEAVE; + return rc; +} + +/** + * ipr_reset_ioa_job - Adapter reset job + * @ipr_cmd: ipr command struct + * + * Description: This function is the job router for the adapter reset job. + * + * Return value: + * none + **/ +static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) +{ + u32 rc, ioasc; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + do { + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + + if (ioa_cfg->reset_cmd != ipr_cmd) { + /* + * We are doing nested adapter resets and this is + * not the current reset job. + */ + list_add_tail(&ipr_cmd->queue, + &ipr_cmd->hrrq->hrrq_free_q); + return; + } + + if (IPR_IOASC_SENSE_KEY(ioasc)) { + rc = ipr_cmd->job_step_failed(ipr_cmd); + if (rc == IPR_RC_JOB_RETURN) + return; + } + + ipr_reinit_ipr_cmnd(ipr_cmd); + ipr_cmd->job_step_failed = ipr_reset_cmd_failed; + rc = ipr_cmd->job_step(ipr_cmd); + } while (rc == IPR_RC_JOB_CONTINUE); +} + +/** + * _ipr_initiate_ioa_reset - Initiate an adapter reset + * @ioa_cfg: ioa config struct + * @job_step: first job step of reset job + * @shutdown_type: shutdown type + * + * Description: This function will initiate the reset of the given adapter + * starting at the selected job step. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, + int (*job_step) (struct ipr_cmnd *), + enum ipr_shutdown_type shutdown_type) +{ + struct ipr_cmnd *ipr_cmd; + int i; + + ioa_cfg->in_reset_reload = 1; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].allow_cmds = 0; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { + ioa_cfg->scsi_unblock = 0; + ioa_cfg->scsi_blocked = 1; + scsi_block_requests(ioa_cfg->host); + } + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioa_cfg->reset_cmd = ipr_cmd; + ipr_cmd->job_step = job_step; + ipr_cmd->u.shutdown_type = shutdown_type; + + ipr_reset_ioa_job(ipr_cmd); +} + +/** + * ipr_initiate_ioa_reset - Initiate an adapter reset + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * Description: This function will initiate the reset of the given adapter. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + int i; + + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) + return; + + if (ioa_cfg->in_reset_reload) { + if (ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + else if (ioa_cfg->sdt_state == READ_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + } + + if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { + dev_err(&ioa_cfg->pdev->dev, + "IOA taken offline - error recovery failed\n"); + + ioa_cfg->reset_retries = 0; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + + if (ioa_cfg->in_ioa_bringdown) { + ioa_cfg->reset_cmd = NULL; + ioa_cfg->in_reset_reload = 0; + ipr_fail_all_ops(ioa_cfg); + wake_up_all(&ioa_cfg->reset_wait_q); + + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { + ioa_cfg->scsi_unblock = 1; + schedule_work(&ioa_cfg->work_q); + } + return; + } else { + ioa_cfg->in_ioa_bringdown = 1; + shutdown_type = IPR_SHUTDOWN_NONE; + } + } + + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, + shutdown_type); +} + +/** + * ipr_reset_freeze - Hold off all I/O activity + * @ipr_cmd: ipr command struct + * + * Description: If the PCI slot is frozen, hold off all I/O + * activity; then, as soon as the slot is available again, + * initiate an adapter reset. + */ +static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int i; + + /* Disallow new interrupts, avoid loop */ + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].allow_interrupts = 0; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); + ipr_cmd->done = ipr_reset_ioa_job; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled + * @pdev: PCI device struct + * + * Description: This routine is called to tell us that the MMIO + * access to the IOA has been restored + */ +static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (!ioa_cfg->probe_done) + pci_save_state(pdev); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ipr_pci_frozen - Called when slot has experienced a PCI bus error. + * @pdev: PCI device struct + * + * Description: This routine is called to tell us that the PCI bus + * is down. Can't do anything here, except put the device driver + * into a holding pattern, waiting for the PCI bus to come back. + */ +static void ipr_pci_frozen(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (ioa_cfg->probe_done) + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); +} + +/** + * ipr_pci_slot_reset - Called when PCI slot has been reset. + * @pdev: PCI device struct + * + * Description: This routine is called by the pci error recovery + * code after the PCI slot has been reset, just before we + * should resume normal operations. + */ +static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (ioa_cfg->probe_done) { + if (ioa_cfg->needs_warm_reset) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + else + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, + IPR_SHUTDOWN_NONE); + } else + wake_up_all(&ioa_cfg->eeh_wait_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ipr_pci_perm_failure - Called when PCI slot is dead for good. + * @pdev: PCI device struct + * + * Description: This routine is called when the PCI bus has + * permanently failed. + */ +static void ipr_pci_perm_failure(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + int i; + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (ioa_cfg->probe_done) { + if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; + ioa_cfg->in_ioa_bringdown = 1; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].allow_cmds = 0; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } else + wake_up_all(&ioa_cfg->eeh_wait_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); +} + +/** + * ipr_pci_error_detected - Called when a PCI error is detected. + * @pdev: PCI device struct + * @state: PCI channel state + * + * Description: Called when a PCI error is detected. + * + * Return value: + * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT + */ +static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + switch (state) { + case pci_channel_io_frozen: + ipr_pci_frozen(pdev); + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_perm_failure: + ipr_pci_perm_failure(pdev); + return PCI_ERS_RESULT_DISCONNECT; + default: + break; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) + * @ioa_cfg: ioa cfg struct + * + * Description: This is the second phase of adapter initialization + * This function takes care of initilizing the adapter to the point + * where it can accept new commands. + * Return value: + * none + **/ +static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) +{ + unsigned long host_lock_flags = 0; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); + ioa_cfg->probe_done = 1; + if (ioa_cfg->needs_hard_reset) { + ioa_cfg->needs_hard_reset = 0; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } else + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, + IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + + LEAVE; +} + +/** + * ipr_free_cmd_blks - Frees command blocks allocated for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + if (ioa_cfg->ipr_cmnd_list) { + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { + if (ioa_cfg->ipr_cmnd_list[i]) + dma_pool_free(ioa_cfg->ipr_cmd_pool, + ioa_cfg->ipr_cmnd_list[i], + ioa_cfg->ipr_cmnd_list_dma[i]); + + ioa_cfg->ipr_cmnd_list[i] = NULL; + } + } + + dma_pool_destroy(ioa_cfg->ipr_cmd_pool); + + kfree(ioa_cfg->ipr_cmnd_list); + kfree(ioa_cfg->ipr_cmnd_list_dma); + ioa_cfg->ipr_cmnd_list = NULL; + ioa_cfg->ipr_cmnd_list_dma = NULL; + ioa_cfg->ipr_cmd_pool = NULL; +} + +/** + * ipr_free_mem - Frees memory allocated for an adapter + * @ioa_cfg: ioa cfg struct + * + * Return value: + * nothing + **/ +static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + kfree(ioa_cfg->res_entries); + dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), + ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); + ipr_free_cmd_blks(ioa_cfg); + + for (i = 0; i < ioa_cfg->hrrq_num; i++) + dma_free_coherent(&ioa_cfg->pdev->dev, + sizeof(u32) * ioa_cfg->hrrq[i].size, + ioa_cfg->hrrq[i].host_rrq, + ioa_cfg->hrrq[i].host_rrq_dma); + + dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, + ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); + + for (i = 0; i < IPR_MAX_HCAMS; i++) { + dma_free_coherent(&ioa_cfg->pdev->dev, + sizeof(struct ipr_hostrcb), + ioa_cfg->hostrcb[i], + ioa_cfg->hostrcb_dma[i]); + } + + ipr_free_dump(ioa_cfg); + kfree(ioa_cfg->trace); +} + +/** + * ipr_free_irqs - Free all allocated IRQs for the adapter. + * @ioa_cfg: ipr cfg struct + * + * This function frees all allocated IRQs for the + * specified adapter. + * + * Return value: + * none + **/ +static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) +{ + struct pci_dev *pdev = ioa_cfg->pdev; + int i; + + for (i = 0; i < ioa_cfg->nvectors; i++) + free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); + pci_free_irq_vectors(pdev); +} + +/** + * ipr_free_all_resources - Free all allocated resources for an adapter. + * @ioa_cfg: ioa config struct + * + * This function frees all allocated resources for the + * specified adapter. + * + * Return value: + * none + **/ +static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) +{ + struct pci_dev *pdev = ioa_cfg->pdev; + + ENTER; + ipr_free_irqs(ioa_cfg); + if (ioa_cfg->reset_work_q) + destroy_workqueue(ioa_cfg->reset_work_q); + iounmap(ioa_cfg->hdw_dma_regs); + pci_release_regions(pdev); + ipr_free_mem(ioa_cfg); + scsi_host_put(ioa_cfg->host); + pci_disable_device(pdev); + LEAVE; +} + +/** + * ipr_alloc_cmd_blks - Allocate command blocks for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -ENOMEM on allocation failure + **/ +static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + dma_addr_t dma_addr; + int i, entries_each_hrrq, hrrq_id = 0; + + ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, + sizeof(struct ipr_cmnd), 512, 0); + + if (!ioa_cfg->ipr_cmd_pool) + return -ENOMEM; + + ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); + ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); + + if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { + ipr_free_cmd_blks(ioa_cfg); + return -ENOMEM; + } + + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + if (ioa_cfg->hrrq_num > 1) { + if (i == 0) { + entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS; + ioa_cfg->hrrq[i].min_cmd_id = 0; + ioa_cfg->hrrq[i].max_cmd_id = + (entries_each_hrrq - 1); + } else { + entries_each_hrrq = + IPR_NUM_BASE_CMD_BLKS/ + (ioa_cfg->hrrq_num - 1); + ioa_cfg->hrrq[i].min_cmd_id = + IPR_NUM_INTERNAL_CMD_BLKS + + (i - 1) * entries_each_hrrq; + ioa_cfg->hrrq[i].max_cmd_id = + (IPR_NUM_INTERNAL_CMD_BLKS + + i * entries_each_hrrq - 1); + } + } else { + entries_each_hrrq = IPR_NUM_CMD_BLKS; + ioa_cfg->hrrq[i].min_cmd_id = 0; + ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); + } + ioa_cfg->hrrq[i].size = entries_each_hrrq; + } + + BUG_ON(ioa_cfg->hrrq_num == 0); + + i = IPR_NUM_CMD_BLKS - + ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; + if (i > 0) { + ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; + ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; + } + + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { + ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, + GFP_KERNEL, &dma_addr); + + if (!ipr_cmd) { + ipr_free_cmd_blks(ioa_cfg); + return -ENOMEM; + } + + ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; + ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; + + ioarcb = &ipr_cmd->ioarcb; + ipr_cmd->dma_addr = dma_addr; + if (ioa_cfg->sis64) + ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); + else + ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); + + ioarcb->host_response_handle = cpu_to_be32(i << 2); + if (ioa_cfg->sis64) { + ioarcb->u.sis64_addr_data.data_ioadl_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); + ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); + } else { + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + ioarcb->ioasa_host_pci_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); + } + ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); + ipr_cmd->cmd_index = i; + ipr_cmd->ioa_cfg = ioa_cfg; + ipr_cmd->sense_buffer_dma = dma_addr + + offsetof(struct ipr_cmnd, sense_buffer); + + ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; + ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) + hrrq_id++; + } + + return 0; +} + +/** + * ipr_alloc_mem - Allocate memory for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / non-zero for error + **/ +static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + struct pci_dev *pdev = ioa_cfg->pdev; + int i, rc = -ENOMEM; + + ENTER; + ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, + sizeof(struct ipr_resource_entry), + GFP_KERNEL); + + if (!ioa_cfg->res_entries) + goto out; + + for (i = 0; i < ioa_cfg->max_devs_supported; i++) { + list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); + ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; + } + + ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, + sizeof(struct ipr_misc_cbs), + &ioa_cfg->vpd_cbs_dma, + GFP_KERNEL); + + if (!ioa_cfg->vpd_cbs) + goto out_free_res_entries; + + if (ipr_alloc_cmd_blks(ioa_cfg)) + goto out_free_vpd_cbs; + + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, + sizeof(u32) * ioa_cfg->hrrq[i].size, + &ioa_cfg->hrrq[i].host_rrq_dma, + GFP_KERNEL); + + if (!ioa_cfg->hrrq[i].host_rrq) { + while (--i >= 0) + dma_free_coherent(&pdev->dev, + sizeof(u32) * ioa_cfg->hrrq[i].size, + ioa_cfg->hrrq[i].host_rrq, + ioa_cfg->hrrq[i].host_rrq_dma); + goto out_ipr_free_cmd_blocks; + } + ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; + } + + ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, + ioa_cfg->cfg_table_size, + &ioa_cfg->cfg_table_dma, + GFP_KERNEL); + + if (!ioa_cfg->u.cfg_table) + goto out_free_host_rrq; + + for (i = 0; i < IPR_MAX_HCAMS; i++) { + ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, + sizeof(struct ipr_hostrcb), + &ioa_cfg->hostrcb_dma[i], + GFP_KERNEL); + + if (!ioa_cfg->hostrcb[i]) + goto out_free_hostrcb_dma; + + ioa_cfg->hostrcb[i]->hostrcb_dma = + ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); + ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; + list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); + } + + ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, + sizeof(struct ipr_trace_entry), + GFP_KERNEL); + + if (!ioa_cfg->trace) + goto out_free_hostrcb_dma; + + rc = 0; +out: + LEAVE; + return rc; + +out_free_hostrcb_dma: + while (i-- > 0) { + dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), + ioa_cfg->hostrcb[i], + ioa_cfg->hostrcb_dma[i]); + } + dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, + ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); +out_free_host_rrq: + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + dma_free_coherent(&pdev->dev, + sizeof(u32) * ioa_cfg->hrrq[i].size, + ioa_cfg->hrrq[i].host_rrq, + ioa_cfg->hrrq[i].host_rrq_dma); + } +out_ipr_free_cmd_blocks: + ipr_free_cmd_blks(ioa_cfg); +out_free_vpd_cbs: + dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), + ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); +out_free_res_entries: + kfree(ioa_cfg->res_entries); + goto out; +} + +/** + * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { + ioa_cfg->bus_attr[i].bus = i; + ioa_cfg->bus_attr[i].qas_enabled = 0; + ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; + if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) + ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; + else + ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; + } +} + +/** + * ipr_init_regs - Initialize IOA registers + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) +{ + const struct ipr_interrupt_offsets *p; + struct ipr_interrupts *t; + void __iomem *base; + + p = &ioa_cfg->chip_cfg->regs; + t = &ioa_cfg->regs; + base = ioa_cfg->hdw_dma_regs; + + t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; + t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; + t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; + t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; + t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; + t->clr_interrupt_reg = base + p->clr_interrupt_reg; + t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; + t->sense_interrupt_reg = base + p->sense_interrupt_reg; + t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; + t->ioarrin_reg = base + p->ioarrin_reg; + t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; + t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; + t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; + t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; + t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; + t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; + + if (ioa_cfg->sis64) { + t->init_feedback_reg = base + p->init_feedback_reg; + t->dump_addr_reg = base + p->dump_addr_reg; + t->dump_data_reg = base + p->dump_data_reg; + t->endian_swap_reg = base + p->endian_swap_reg; + } +} + +/** + * ipr_init_ioa_cfg - Initialize IOA config struct + * @ioa_cfg: ioa config struct + * @host: scsi host struct + * @pdev: PCI dev struct + * + * Return value: + * none + **/ +static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, + struct Scsi_Host *host, struct pci_dev *pdev) +{ + int i; + + ioa_cfg->host = host; + ioa_cfg->pdev = pdev; + ioa_cfg->log_level = ipr_log_level; + ioa_cfg->doorbell = IPR_DOORBELL; + sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); + sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); + sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); + sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); + sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); + sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); + + INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); + INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); + INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); + INIT_LIST_HEAD(&ioa_cfg->free_res_q); + INIT_LIST_HEAD(&ioa_cfg->used_res_q); + INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); + INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); + init_waitqueue_head(&ioa_cfg->reset_wait_q); + init_waitqueue_head(&ioa_cfg->msi_wait_q); + init_waitqueue_head(&ioa_cfg->eeh_wait_q); + ioa_cfg->sdt_state = INACTIVE; + + ipr_initialize_bus_attr(ioa_cfg); + ioa_cfg->max_devs_supported = ipr_max_devs; + + if (ioa_cfg->sis64) { + host->max_channel = IPR_MAX_SIS64_BUSES; + host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; + host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; + if (ipr_max_devs > IPR_MAX_SIS64_DEVS) + ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) + + ((sizeof(struct ipr_config_table_entry64) + * ioa_cfg->max_devs_supported))); + } else { + host->max_channel = IPR_VSET_BUS; + host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; + host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; + if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) + ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) + + ((sizeof(struct ipr_config_table_entry) + * ioa_cfg->max_devs_supported))); + } + + host->unique_id = host->host_no; + host->max_cmd_len = IPR_MAX_CDB_LEN; + host->can_queue = ioa_cfg->max_cmds; + pci_set_drvdata(pdev, ioa_cfg); + + for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { + INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); + INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); + spin_lock_init(&ioa_cfg->hrrq[i]._lock); + if (i == 0) + ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; + else + ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; + } +} + +/** + * ipr_get_chip_info - Find adapter chip information + * @dev_id: PCI device id struct + * + * Return value: + * ptr to chip information on success / NULL on failure + **/ +static const struct ipr_chip_t * +ipr_get_chip_info(const struct pci_device_id *dev_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) + if (ipr_chip[i].vendor == dev_id->vendor && + ipr_chip[i].device == dev_id->device) + return &ipr_chip[i]; + return NULL; +} + +/** + * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete + * during probe time + * @ioa_cfg: ioa config struct + * + * Return value: + * None + **/ +static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) +{ + struct pci_dev *pdev = ioa_cfg->pdev; + + if (pci_channel_offline(pdev)) { + wait_event_timeout(ioa_cfg->eeh_wait_q, + !pci_channel_offline(pdev), + IPR_PCI_ERROR_RECOVERY_TIMEOUT); + pci_restore_state(pdev); + } +} + +static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) +{ + int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; + + for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { + snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, + "host%d-%d", ioa_cfg->host->host_no, vec_idx); + ioa_cfg->vectors_info[vec_idx]. + desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; + } +} + +static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, + struct pci_dev *pdev) +{ + int i, rc; + + for (i = 1; i < ioa_cfg->nvectors; i++) { + rc = request_irq(pci_irq_vector(pdev, i), + ipr_isr_mhrrq, + 0, + ioa_cfg->vectors_info[i].desc, + &ioa_cfg->hrrq[i]); + if (rc) { + while (--i > 0) + free_irq(pci_irq_vector(pdev, i), + &ioa_cfg->hrrq[i]); + return rc; + } + } + return 0; +} + +/** + * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). + * @devp: PCI device struct + * @irq: IRQ number + * + * Description: Simply set the msi_received flag to 1 indicating that + * Message Signaled Interrupts are supported. + * + * Return value: + * 0 on success / non-zero on failure + **/ +static irqreturn_t ipr_test_intr(int irq, void *devp) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; + unsigned long lock_flags = 0; + + dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + ioa_cfg->msi_received = 1; + wake_up(&ioa_cfg->msi_wait_q); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_HANDLED; +} + +/** + * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. + * @ioa_cfg: ioa config struct + * @pdev: PCI device struct + * + * Description: This routine sets up and initiates a test interrupt to determine + * if the interrupt is received via the ipr_test_intr() service routine. + * If the tests fails, the driver will fall back to LSI. + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) +{ + int rc; + unsigned long lock_flags = 0; + int irq = pci_irq_vector(pdev, 0); + + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + init_waitqueue_head(&ioa_cfg->msi_wait_q); + ioa_cfg->msi_received = 0; + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); + readl(ioa_cfg->regs.sense_interrupt_mask_reg); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + if (rc) { + dev_err(&pdev->dev, "Can not assign irq %d\n", irq); + return rc; + } else if (ipr_debug) + dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); + + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); + readl(ioa_cfg->regs.sense_interrupt_reg); + wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); + + if (!ioa_cfg->msi_received) { + /* MSI test failed */ + dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); + rc = -EOPNOTSUPP; + } else if (ipr_debug) + dev_info(&pdev->dev, "MSI test succeeded.\n"); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + free_irq(irq, ioa_cfg); + + LEAVE; + + return rc; +} + + /* ipr_probe_ioa - Allocates memory and does first stage of initialization + * @pdev: PCI device struct + * @dev_id: PCI device id struct + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ipr_probe_ioa(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + struct ipr_ioa_cfg *ioa_cfg; + struct Scsi_Host *host; + unsigned long ipr_regs_pci; + void __iomem *ipr_regs; + int rc = PCIBIOS_SUCCESSFUL; + volatile u32 mask, uproc, interrupts; + unsigned long lock_flags, driver_lock_flags; + unsigned int irq_flag; + + ENTER; + + dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); + host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); + + if (!host) { + dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); + rc = -ENOMEM; + goto out; + } + + ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; + memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); + + ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); + + if (!ioa_cfg->ipr_chip) { + dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", + dev_id->vendor, dev_id->device); + goto out_scsi_host_put; + } + + /* set SIS 32 or SIS 64 */ + ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; + ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; + ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; + ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; + + if (ipr_transop_timeout) + ioa_cfg->transop_timeout = ipr_transop_timeout; + else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) + ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; + else + ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; + + ioa_cfg->revid = pdev->revision; + + ipr_init_ioa_cfg(ioa_cfg, host, pdev); + + ipr_regs_pci = pci_resource_start(pdev, 0); + + rc = pci_request_regions(pdev, IPR_NAME); + if (rc < 0) { + dev_err(&pdev->dev, + "Couldn't register memory range of registers\n"); + goto out_scsi_host_put; + } + + rc = pci_enable_device(pdev); + + if (rc || pci_channel_offline(pdev)) { + if (pci_channel_offline(pdev)) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + rc = pci_enable_device(pdev); + } + + if (rc) { + dev_err(&pdev->dev, "Cannot enable adapter\n"); + ipr_wait_for_pci_err_recovery(ioa_cfg); + goto out_release_regions; + } + } + + ipr_regs = pci_ioremap_bar(pdev, 0); + + if (!ipr_regs) { + dev_err(&pdev->dev, + "Couldn't map memory range of registers\n"); + rc = -ENOMEM; + goto out_disable; + } + + ioa_cfg->hdw_dma_regs = ipr_regs; + ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; + ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; + + ipr_init_regs(ioa_cfg); + + if (ioa_cfg->sis64) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc < 0) { + dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); + rc = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(32)); + } + } else + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + + if (rc < 0) { + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + goto cleanup_nomem; + } + + rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + ioa_cfg->chip_cfg->cache_line_size); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Write of cache line size failed\n"); + ipr_wait_for_pci_err_recovery(ioa_cfg); + rc = -EIO; + goto cleanup_nomem; + } + + /* Issue MMIO read to ensure card is not in EEH */ + interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); + ipr_wait_for_pci_err_recovery(ioa_cfg); + + if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { + dev_err(&pdev->dev, "The max number of MSIX is %d\n", + IPR_MAX_MSIX_VECTORS); + ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; + } + + irq_flag = PCI_IRQ_LEGACY; + if (ioa_cfg->ipr_chip->has_msi) + irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX; + rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag); + if (rc < 0) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + goto cleanup_nomem; + } + ioa_cfg->nvectors = rc; + + if (!pdev->msi_enabled && !pdev->msix_enabled) + ioa_cfg->clear_isr = 1; + + pci_set_master(pdev); + + if (pci_channel_offline(pdev)) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + pci_set_master(pdev); + if (pci_channel_offline(pdev)) { + rc = -EIO; + goto out_msi_disable; + } + } + + if (pdev->msi_enabled || pdev->msix_enabled) { + rc = ipr_test_msi(ioa_cfg, pdev); + switch (rc) { + case 0: + dev_info(&pdev->dev, + "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, + pdev->msix_enabled ? "-X" : ""); + break; + case -EOPNOTSUPP: + ipr_wait_for_pci_err_recovery(ioa_cfg); + pci_free_irq_vectors(pdev); + + ioa_cfg->nvectors = 1; + ioa_cfg->clear_isr = 1; + break; + default: + goto out_msi_disable; + } + } + + ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, + (unsigned int)num_online_cpus(), + (unsigned int)IPR_MAX_HRRQ_NUM); + + if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) + goto out_msi_disable; + + if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) + goto out_msi_disable; + + rc = ipr_alloc_mem(ioa_cfg); + if (rc < 0) { + dev_err(&pdev->dev, + "Couldn't allocate enough memory for device driver!\n"); + goto out_msi_disable; + } + + /* Save away PCI config space for use following IOA reset */ + rc = pci_save_state(pdev); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Failed to save PCI config space\n"); + rc = -EIO; + goto cleanup_nolog; + } + + /* + * If HRRQ updated interrupt is not masked, or reset alert is set, + * the card is in an unknown state and needs a hard reset + */ + mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); + uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); + if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) + ioa_cfg->needs_hard_reset = 1; + if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) + ioa_cfg->needs_hard_reset = 1; + if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) + ioa_cfg->ioa_unit_checked = 1; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (pdev->msi_enabled || pdev->msix_enabled) { + name_msi_vectors(ioa_cfg); + rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0, + ioa_cfg->vectors_info[0].desc, + &ioa_cfg->hrrq[0]); + if (!rc) + rc = ipr_request_other_msi_irqs(ioa_cfg, pdev); + } else { + rc = request_irq(pdev->irq, ipr_isr, + IRQF_SHARED, + IPR_NAME, &ioa_cfg->hrrq[0]); + } + if (rc) { + dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", + pdev->irq, rc); + goto cleanup_nolog; + } + + if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || + (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { + ioa_cfg->needs_warm_reset = 1; + ioa_cfg->reset = ipr_reset_slot_reset; + + ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", + WQ_MEM_RECLAIM, host->host_no); + + if (!ioa_cfg->reset_work_q) { + dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); + rc = -ENOMEM; + goto out_free_irq; + } + } else + ioa_cfg->reset = ipr_reset_start_bist; + + spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); + list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); + spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); + + LEAVE; +out: + return rc; + +out_free_irq: + ipr_free_irqs(ioa_cfg); +cleanup_nolog: + ipr_free_mem(ioa_cfg); +out_msi_disable: + ipr_wait_for_pci_err_recovery(ioa_cfg); + pci_free_irq_vectors(pdev); +cleanup_nomem: + iounmap(ipr_regs); +out_disable: + pci_disable_device(pdev); +out_release_regions: + pci_release_regions(pdev); +out_scsi_host_put: + scsi_host_put(host); + goto out; +} + +/** + * ipr_initiate_ioa_bringdown - Bring down an adapter + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * Description: This function will initiate bringing down the adapter. + * This consists of issuing an IOA shutdown to the adapter + * to flush the cache, and running BIST. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + ENTER; + if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + ioa_cfg->reset_retries = 0; + ioa_cfg->in_ioa_bringdown = 1; + ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); + LEAVE; +} + +/** + * __ipr_remove - Remove a single adapter + * @pdev: pci device struct + * + * Adapter hot plug remove entry point. + * + * Return value: + * none + **/ +static void __ipr_remove(struct pci_dev *pdev) +{ + unsigned long host_lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + int i; + unsigned long driver_lock_flags; + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + while (ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + } + + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].removing_ioa = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + flush_work(&ioa_cfg->work_q); + if (ioa_cfg->reset_work_q) + flush_workqueue(ioa_cfg->reset_work_q); + INIT_LIST_HEAD(&ioa_cfg->used_res_q); + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + + spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); + list_del(&ioa_cfg->queue); + spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); + + if (ioa_cfg->sdt_state == ABORT_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + + ipr_free_all_resources(ioa_cfg); + + LEAVE; +} + +/** + * ipr_remove - IOA hot plug remove entry point + * @pdev: pci device struct + * + * Adapter hot plug remove entry point. + * + * Return value: + * none + **/ +static void ipr_remove(struct pci_dev *pdev) +{ + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + ENTER; + + ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_trace_attr); + ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_dump_attr); + sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_ioa_async_err_log); + scsi_remove_host(ioa_cfg->host); + + __ipr_remove(pdev); + + LEAVE; +} + +/** + * ipr_probe - Adapter hot plug add entry point + * @pdev: pci device struct + * @dev_id: pci device ID + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) +{ + struct ipr_ioa_cfg *ioa_cfg; + unsigned long flags; + int rc, i; + + rc = ipr_probe_ioa(pdev, dev_id); + + if (rc) + return rc; + + ioa_cfg = pci_get_drvdata(pdev); + ipr_probe_ioa_part2(ioa_cfg); + + rc = scsi_add_host(ioa_cfg->host, &pdev->dev); + + if (rc) { + __ipr_remove(pdev); + return rc; + } + + rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_trace_attr); + + if (rc) { + scsi_remove_host(ioa_cfg->host); + __ipr_remove(pdev); + return rc; + } + + rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_ioa_async_err_log); + + if (rc) { + ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_dump_attr); + ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_trace_attr); + scsi_remove_host(ioa_cfg->host); + __ipr_remove(pdev); + return rc; + } + + rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_dump_attr); + + if (rc) { + sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_ioa_async_err_log); + ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, + &ipr_trace_attr); + scsi_remove_host(ioa_cfg->host); + __ipr_remove(pdev); + return rc; + } + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + ioa_cfg->scan_enabled = 1; + schedule_work(&ioa_cfg->work_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + + ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; + + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + for (i = 1; i < ioa_cfg->hrrq_num; i++) { + irq_poll_init(&ioa_cfg->hrrq[i].iopoll, + ioa_cfg->iopoll_weight, ipr_iopoll); + } + } + + scsi_scan_host(ioa_cfg->host); + + return 0; +} + +/** + * ipr_shutdown - Shutdown handler. + * @pdev: pci device struct + * + * This function is invoked upon system shutdown/reboot. It will issue + * an adapter shutdown to the adapter to flush the write cache. + * + * Return value: + * none + **/ +static void ipr_shutdown(struct pci_dev *pdev) +{ + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + unsigned long lock_flags = 0; + enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL; + int i; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + ioa_cfg->iopoll_weight = 0; + for (i = 1; i < ioa_cfg->hrrq_num; i++) + irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); + } + + while (ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + + if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) + shutdown_type = IPR_SHUTDOWN_QUIESCE; + + ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { + ipr_free_irqs(ioa_cfg); + pci_disable_device(ioa_cfg->pdev); + } +} + +static struct pci_device_id ipr_pci_table[] = { + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT}, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 }, + { } +}; +MODULE_DEVICE_TABLE(pci, ipr_pci_table); + +static const struct pci_error_handlers ipr_err_handler = { + .error_detected = ipr_pci_error_detected, + .mmio_enabled = ipr_pci_mmio_enabled, + .slot_reset = ipr_pci_slot_reset, +}; + +static struct pci_driver ipr_driver = { + .name = IPR_NAME, + .id_table = ipr_pci_table, + .probe = ipr_probe, + .remove = ipr_remove, + .shutdown = ipr_shutdown, + .err_handler = &ipr_err_handler, +}; + +/** + * ipr_halt_done - Shutdown prepare completion + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) +{ + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); +} + +/** + * ipr_halt - Issue shutdown prepare to all adapters + * @nb: Notifier block + * @event: Notifier event + * @buf: Notifier data (unused) + * + * Return value: + * NOTIFY_OK on success / NOTIFY_DONE on failure + **/ +static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long flags = 0, driver_lock_flags; + + if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) + return NOTIFY_DONE; + + spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); + + list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || + (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + continue; + } + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; + + ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + } + spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); + + return NOTIFY_OK; +} + +static struct notifier_block ipr_notifier = { + ipr_halt, NULL, 0 +}; + +/** + * ipr_init - Module entry point + * + * Return value: + * 0 on success / negative value on failure + **/ +static int __init ipr_init(void) +{ + int rc; + + ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", + IPR_DRIVER_VERSION, IPR_DRIVER_DATE); + + register_reboot_notifier(&ipr_notifier); + rc = pci_register_driver(&ipr_driver); + if (rc) { + unregister_reboot_notifier(&ipr_notifier); + return rc; + } + + return 0; +} + +/** + * ipr_exit - Module unload + * + * Module unload entry point. + * + * Return value: + * none + **/ +static void __exit ipr_exit(void) +{ + unregister_reboot_notifier(&ipr_notifier); + pci_unregister_driver(&ipr_driver); +} + +module_init(ipr_init); +module_exit(ipr_exit); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h new file mode 100644 index 000000000..c77d6ca1a --- /dev/null +++ b/drivers/scsi/ipr.h @@ -0,0 +1,1929 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * ipr.h -- driver for IBM Power Linux RAID adapters + * + * Written By: Brian King , IBM Corporation + * + * Copyright (C) 2003, 2004 IBM Corporation + * + * Alan Cox - Removed several careless u32/dma_addr_t errors + * that broke 64bit platforms. + */ + +#ifndef _IPR_H +#define _IPR_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Literals + */ +#define IPR_DRIVER_VERSION "2.6.4" +#define IPR_DRIVER_DATE "(March 14, 2017)" + +/* + * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding + * ops per device for devices not running tagged command queuing. + * This can be adjusted at runtime through sysfs device attributes. + */ +#define IPR_MAX_CMD_PER_LUN 6 + +/* + * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of + * ops the mid-layer can send to the adapter. + */ +#define IPR_NUM_BASE_CMD_BLKS (ioa_cfg->max_cmds) + +#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 + +#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D +#define PCI_DEVICE_ID_IBM_CROCODILE 0x034A +#define PCI_DEVICE_ID_IBM_RATTLESNAKE 0x04DA + +#define IPR_SUBS_DEV_ID_2780 0x0264 +#define IPR_SUBS_DEV_ID_5702 0x0266 +#define IPR_SUBS_DEV_ID_5703 0x0278 +#define IPR_SUBS_DEV_ID_572E 0x028D +#define IPR_SUBS_DEV_ID_573E 0x02D3 +#define IPR_SUBS_DEV_ID_573D 0x02D4 +#define IPR_SUBS_DEV_ID_571A 0x02C0 +#define IPR_SUBS_DEV_ID_571B 0x02BE +#define IPR_SUBS_DEV_ID_571E 0x02BF +#define IPR_SUBS_DEV_ID_571F 0x02D5 +#define IPR_SUBS_DEV_ID_572A 0x02C1 +#define IPR_SUBS_DEV_ID_572B 0x02C2 +#define IPR_SUBS_DEV_ID_572F 0x02C3 +#define IPR_SUBS_DEV_ID_574E 0x030A +#define IPR_SUBS_DEV_ID_575B 0x030D +#define IPR_SUBS_DEV_ID_575C 0x0338 +#define IPR_SUBS_DEV_ID_57B3 0x033A +#define IPR_SUBS_DEV_ID_57B7 0x0360 +#define IPR_SUBS_DEV_ID_57B8 0x02C2 + +#define IPR_SUBS_DEV_ID_57B4 0x033B +#define IPR_SUBS_DEV_ID_57B2 0x035F +#define IPR_SUBS_DEV_ID_57C0 0x0352 +#define IPR_SUBS_DEV_ID_57C3 0x0353 +#define IPR_SUBS_DEV_ID_57C4 0x0354 +#define IPR_SUBS_DEV_ID_57C6 0x0357 +#define IPR_SUBS_DEV_ID_57CC 0x035C + +#define IPR_SUBS_DEV_ID_57B5 0x033C +#define IPR_SUBS_DEV_ID_57CE 0x035E +#define IPR_SUBS_DEV_ID_57B1 0x0355 + +#define IPR_SUBS_DEV_ID_574D 0x0356 +#define IPR_SUBS_DEV_ID_57C8 0x035D + +#define IPR_SUBS_DEV_ID_57D5 0x03FB +#define IPR_SUBS_DEV_ID_57D6 0x03FC +#define IPR_SUBS_DEV_ID_57D7 0x03FF +#define IPR_SUBS_DEV_ID_57D8 0x03FE +#define IPR_SUBS_DEV_ID_57D9 0x046D +#define IPR_SUBS_DEV_ID_57DA 0x04CA +#define IPR_SUBS_DEV_ID_57EB 0x0474 +#define IPR_SUBS_DEV_ID_57EC 0x0475 +#define IPR_SUBS_DEV_ID_57ED 0x0499 +#define IPR_SUBS_DEV_ID_57EE 0x049A +#define IPR_SUBS_DEV_ID_57EF 0x049B +#define IPR_SUBS_DEV_ID_57F0 0x049C +#define IPR_SUBS_DEV_ID_2CCA 0x04C7 +#define IPR_SUBS_DEV_ID_2CD2 0x04C8 +#define IPR_SUBS_DEV_ID_2CCD 0x04C9 +#define IPR_SUBS_DEV_ID_580A 0x04FC +#define IPR_SUBS_DEV_ID_580B 0x04FB +#define IPR_NAME "ipr" + +/* + * Return codes + */ +#define IPR_RC_JOB_CONTINUE 1 +#define IPR_RC_JOB_RETURN 2 + +/* + * IOASCs + */ +#define IPR_IOASC_NR_INIT_CMD_REQUIRED 0x02040200 +#define IPR_IOASC_NR_IOA_RESET_REQUIRED 0x02048000 +#define IPR_IOASC_SYNC_REQUIRED 0x023f0000 +#define IPR_IOASC_MED_DO_NOT_REALLOC 0x03110C00 +#define IPR_IOASC_HW_SEL_TIMEOUT 0x04050000 +#define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500 +#define IPR_IOASC_IOASC_MASK 0xFFFFFF00 +#define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF +#define IPR_IOASC_HW_CMD_FAILED 0x046E0000 +#define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT 0x05240000 +#define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000 +#define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA 0x05258100 +#define IPR_IOASA_IR_DUAL_IOA_DISABLED 0x052C8000 +#define IPR_IOASC_BUS_WAS_RESET 0x06290000 +#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000 +#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000 +#define IPR_IOASC_IR_NON_OPTIMIZED 0x05258200 + +#define IPR_FIRST_DRIVER_IOASC 0x10000000 +#define IPR_IOASC_IOA_WAS_RESET 0x10000001 +#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 + +/* Driver data flags */ +#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001 +#define IPR_USE_PCI_WARM_RESET 0x00000002 + +#define IPR_DEFAULT_MAX_ERROR_DUMP 984 +#define IPR_NUM_LOG_HCAMS 2 +#define IPR_NUM_CFG_CHG_HCAMS 2 +#define IPR_NUM_HCAM_QUEUE 12 +#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) +#define IPR_MAX_HCAMS (IPR_NUM_HCAMS + IPR_NUM_HCAM_QUEUE) + +#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024 +#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff + +#define IPR_MAX_NUM_TARGETS_PER_BUS 256 +#define IPR_MAX_NUM_LUNS_PER_TARGET 256 +#define IPR_VSET_BUS 0xff +#define IPR_IOA_BUS 0xff +#define IPR_IOA_TARGET 0xff +#define IPR_IOA_LUN 0xff +#define IPR_MAX_NUM_BUSES 16 + +#define IPR_NUM_RESET_RELOAD_RETRIES 3 + +/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */ +#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ + ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4) + +#define IPR_MAX_COMMANDS 100 +#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ + IPR_NUM_INTERNAL_CMD_BLKS) + +#define IPR_MAX_PHYSICAL_DEVS 192 +#define IPR_DEFAULT_SIS64_DEVS 1024 +#define IPR_MAX_SIS64_DEVS 4096 + +#define IPR_MAX_SGLIST 64 +#define IPR_IOA_MAX_SECTORS 32767 +#define IPR_VSET_MAX_SECTORS 512 +#define IPR_MAX_CDB_LEN 16 +#define IPR_MAX_HRRQ_RETRIES 3 + +#define IPR_DEFAULT_BUS_WIDTH 16 +#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_U160_SCSI_RATE ((160 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_U320_SCSI_RATE ((320 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8)) + +#define IPR_IOA_RES_HANDLE 0xffffffff +#define IPR_INVALID_RES_HANDLE 0 +#define IPR_IOA_RES_ADDR 0x00ffffff + +/* + * Adapter Commands + */ +#define IPR_CANCEL_REQUEST 0xC0 +#define IPR_CANCEL_64BIT_IOARCB 0x01 +#define IPR_QUERY_RSRC_STATE 0xC2 +#define IPR_RESET_DEVICE 0xC3 +#define IPR_RESET_TYPE_SELECT 0x80 +#define IPR_LUN_RESET 0x40 +#define IPR_TARGET_RESET 0x20 +#define IPR_BUS_RESET 0x10 +#define IPR_ID_HOST_RR_Q 0xC4 +#define IPR_QUERY_IOA_CONFIG 0xC5 +#define IPR_CANCEL_ALL_REQUESTS 0xCE +#define IPR_HOST_CONTROLLED_ASYNC 0xCF +#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01 +#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02 +#define IPR_SET_SUPPORTED_DEVICES 0xFB +#define IPR_SET_ALL_SUPPORTED_DEVICES 0x80 +#define IPR_IOA_SHUTDOWN 0xF7 +#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05 +#define IPR_IOA_SERVICE_ACTION 0xD2 + +/* IOA Service Actions */ +#define IPR_IOA_SA_CHANGE_CACHE_PARAMS 0x14 + +/* + * Timeouts + */ +#define IPR_SHUTDOWN_TIMEOUT (ipr_fastfail ? 60 * HZ : 10 * 60 * HZ) +#define IPR_VSET_RW_TIMEOUT (ipr_fastfail ? 30 * HZ : 2 * 60 * HZ) +#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ) +#define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ) +#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) +#define IPR_CANCEL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) +#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) +#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) +#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) +#define IPR_WRITE_BUFFER_TIMEOUT (30 * 60 * HZ) +#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ) +#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ) +#define IPR_OPERATIONAL_TIMEOUT (5 * 60) +#define IPR_LONG_OPERATIONAL_TIMEOUT (12 * 60) +#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) +#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) +#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) +#define IPR_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ) +#define IPR_PCI_RESET_TIMEOUT (HZ / 2) +#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ) +#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ) +#define IPR_DUMP_DELAY_SECONDS 4 +#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ) + +/* + * SCSI Literals + */ +#define IPR_VENDOR_ID_LEN 8 +#define IPR_PROD_ID_LEN 16 +#define IPR_SERIAL_NUM_LEN 8 + +/* + * Hardware literals + */ +#define IPR_FMT2_MBX_ADDR_MASK 0x0fffffff +#define IPR_FMT2_MBX_BAR_SEL_MASK 0xf0000000 +#define IPR_FMT2_MKR_BAR_SEL_SHIFT 28 +#define IPR_GET_FMT2_BAR_SEL(mbx) \ +(((mbx) & IPR_FMT2_MBX_BAR_SEL_MASK) >> IPR_FMT2_MKR_BAR_SEL_SHIFT) +#define IPR_SDT_FMT2_BAR0_SEL 0x0 +#define IPR_SDT_FMT2_BAR1_SEL 0x1 +#define IPR_SDT_FMT2_BAR2_SEL 0x2 +#define IPR_SDT_FMT2_BAR3_SEL 0x3 +#define IPR_SDT_FMT2_BAR4_SEL 0x4 +#define IPR_SDT_FMT2_BAR5_SEL 0x5 +#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8 +#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2 +#define IPR_FMT3_SDT_READY_TO_USE 0xC4D4E3F3 +#define IPR_DOORBELL 0x82800000 +#define IPR_RUNTIME_RESET 0x40000000 + +#define IPR_IPL_INIT_MIN_STAGE_TIME 5 +#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30 +#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 +#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 +#define IPR_IPL_INIT_STAGE_MASK 0xff000000 +#define IPR_IPL_INIT_STAGE_TIME_MASK 0x0000ffff +#define IPR_PCII_IPL_STAGE_CHANGE (0x80000000 >> 0) + +#define IPR_PCII_MAILBOX_STABLE (0x80000000 >> 4) +#define IPR_WAIT_FOR_MAILBOX (2 * HZ) + +#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0) +#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3) +#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4) +#define IPR_PCII_NO_HOST_RRQ (0x80000000 >> 5) +#define IPR_PCII_CRITICAL_OPERATION (0x80000000 >> 6) +#define IPR_PCII_IO_DEBUG_ACKNOWLEDGE (0x80000000 >> 7) +#define IPR_PCII_IOARRIN_LOST (0x80000000 >> 27) +#define IPR_PCII_MMIO_ERROR (0x80000000 >> 28) +#define IPR_PCII_PROC_ERR_STATE (0x80000000 >> 29) +#define IPR_PCII_HRRQ_UPDATED (0x80000000 >> 30) +#define IPR_PCII_CORE_ISSUED_RST_REQ (0x80000000 >> 31) + +#define IPR_PCII_ERROR_INTERRUPTS \ +(IPR_PCII_IOARCB_XFER_FAILED | IPR_PCII_IOA_UNIT_CHECKED | \ +IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR) + +#define IPR_PCII_OPER_INTERRUPTS \ +(IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED | IPR_PCII_IOA_TRANS_TO_OPER) + +#define IPR_UPROCI_RESET_ALERT (0x80000000 >> 7) +#define IPR_UPROCI_IO_DEBUG_ALERT (0x80000000 >> 9) +#define IPR_UPROCI_SIS64_START_BIST (0x80000000 >> 23) + +#define IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC 200000 /* 200 ms */ +#define IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC 200000 /* 200 ms */ + +/* + * Dump literals + */ +#define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) +#define IPR_FMT3_MAX_IOA_DUMP_SIZE (80 * 1024 * 1024) +#define IPR_FMT2_NUM_SDT_ENTRIES 511 +#define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF +#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) +#define IPR_FMT3_MAX_NUM_DUMP_PAGES ((IPR_FMT3_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) + +/* + * Misc literals + */ +#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST +#define IPR_MAX_MSIX_VECTORS 0x10 +#define IPR_MAX_HRRQ_NUM 0x10 +#define IPR_INIT_HRRQ 0x0 + +/* + * Adapter interface types + */ + +struct ipr_res_addr { + u8 reserved; + u8 bus; + u8 target; + u8 lun; +#define IPR_GET_PHYS_LOC(res_addr) \ + (((res_addr).bus << 16) | ((res_addr).target << 8) | (res_addr).lun) +}__attribute__((packed, aligned (4))); + +struct ipr_std_inq_vpids { + u8 vendor_id[IPR_VENDOR_ID_LEN]; + u8 product_id[IPR_PROD_ID_LEN]; +}__attribute__((packed)); + +struct ipr_vpd { + struct ipr_std_inq_vpids vpids; + u8 sn[IPR_SERIAL_NUM_LEN]; +}__attribute__((packed)); + +struct ipr_ext_vpd { + struct ipr_vpd vpd; + __be32 wwid[2]; +}__attribute__((packed)); + +struct ipr_ext_vpd64 { + struct ipr_vpd vpd; + __be32 wwid[4]; +}__attribute__((packed)); + +struct ipr_std_inq_data { + u8 peri_qual_dev_type; +#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5) +#define IPR_STD_INQ_PERI_DEV_TYPE(peri) ((peri) & 0x1F) + + u8 removeable_medium_rsvd; +#define IPR_STD_INQ_REMOVEABLE_MEDIUM 0x80 + +#define IPR_IS_DASD_DEVICE(std_inq) \ +((IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_DISK) && \ +!(((std_inq).removeable_medium_rsvd) & IPR_STD_INQ_REMOVEABLE_MEDIUM)) + +#define IPR_IS_SES_DEVICE(std_inq) \ +(IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_ENCLOSURE) + + u8 version; + u8 aen_naca_fmt; + u8 additional_len; + u8 sccs_rsvd; + u8 bq_enc_multi; + u8 sync_cmdq_flags; + + struct ipr_std_inq_vpids vpids; + + u8 ros_rsvd_ram_rsvd[4]; + + u8 serial_num[IPR_SERIAL_NUM_LEN]; +}__attribute__ ((packed)); + +#define IPR_RES_TYPE_AF_DASD 0x00 +#define IPR_RES_TYPE_GENERIC_SCSI 0x01 +#define IPR_RES_TYPE_VOLUME_SET 0x02 +#define IPR_RES_TYPE_REMOTE_AF_DASD 0x03 +#define IPR_RES_TYPE_GENERIC_ATA 0x04 +#define IPR_RES_TYPE_ARRAY 0x05 +#define IPR_RES_TYPE_IOAFP 0xff + +struct ipr_config_table_entry { + u8 proto; +#define IPR_PROTO_SATA 0x02 +#define IPR_PROTO_SATA_ATAPI 0x03 +#define IPR_PROTO_SAS_STP 0x06 +#define IPR_PROTO_SAS_STP_ATAPI 0x07 + u8 array_id; + u8 flags; +#define IPR_IS_IOA_RESOURCE 0x80 + u8 rsvd_subtype; + +#define IPR_QUEUEING_MODEL(res) ((((res)->flags) & 0x70) >> 4) +#define IPR_QUEUE_FROZEN_MODEL 0 +#define IPR_QUEUE_NACA_MODEL 1 + + struct ipr_res_addr res_addr; + __be32 res_handle; + __be32 lun_wwn[2]; + struct ipr_std_inq_data std_inq_data; +}__attribute__ ((packed, aligned (4))); + +struct ipr_config_table_entry64 { + u8 res_type; + u8 proto; + u8 vset_num; + u8 array_id; + __be16 flags; + __be16 res_flags; +#define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12) + __be32 res_handle; + u8 dev_id_type; + u8 reserved[3]; + __be64 dev_id; + __be64 lun; + __be64 lun_wwn[2]; +#define IPR_MAX_RES_PATH_LENGTH 48 +#define IPR_RES_PATH_BYTES 8 + __be64 res_path; + struct ipr_std_inq_data std_inq_data; + u8 reserved2[4]; + __be64 reserved3[2]; + u8 reserved4[8]; +}__attribute__ ((packed, aligned (8))); + +struct ipr_config_table_hdr { + u8 num_entries; + u8 flags; +#define IPR_UCODE_DOWNLOAD_REQ 0x10 + __be16 reserved; +}__attribute__((packed, aligned (4))); + +struct ipr_config_table_hdr64 { + __be16 num_entries; + __be16 reserved; + u8 flags; + u8 reserved2[11]; +}__attribute__((packed, aligned (4))); + +struct ipr_config_table { + struct ipr_config_table_hdr hdr; + struct ipr_config_table_entry dev[]; +}__attribute__((packed, aligned (4))); + +struct ipr_config_table64 { + struct ipr_config_table_hdr64 hdr64; + struct ipr_config_table_entry64 dev[]; +}__attribute__((packed, aligned (8))); + +struct ipr_config_table_entry_wrapper { + union { + struct ipr_config_table_entry *cfgte; + struct ipr_config_table_entry64 *cfgte64; + } u; +}; + +struct ipr_hostrcb_cfg_ch_not { + union { + struct ipr_config_table_entry cfgte; + struct ipr_config_table_entry64 cfgte64; + } u; + u8 reserved[936]; +}__attribute__((packed, aligned (4))); + +struct ipr_supported_device { + __be16 data_length; + u8 reserved; + u8 num_records; + struct ipr_std_inq_vpids vpids; + u8 reserved2[16]; +}__attribute__((packed, aligned (4))); + +struct ipr_hrr_queue { + struct ipr_ioa_cfg *ioa_cfg; + __be32 *host_rrq; + dma_addr_t host_rrq_dma; +#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc +#define IPR_HRRQ_RESP_BIT_SET 0x00000002 +#define IPR_HRRQ_TOGGLE_BIT 0x00000001 +#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2 +#define IPR_ID_HRRQ_SELE_ENABLE 0x02 + volatile __be32 *hrrq_start; + volatile __be32 *hrrq_end; + volatile __be32 *hrrq_curr; + + struct list_head hrrq_free_q; + struct list_head hrrq_pending_q; + spinlock_t _lock; + spinlock_t *lock; + + volatile u32 toggle_bit; + u32 size; + u32 min_cmd_id; + u32 max_cmd_id; + u8 allow_interrupts:1; + u8 ioa_is_dead:1; + u8 allow_cmds:1; + u8 removing_ioa:1; + + struct irq_poll iopoll; +}; + +/* Command packet structure */ +struct ipr_cmd_pkt { + u8 reserved; /* Reserved by IOA */ + u8 hrrq_id; + u8 request_type; +#define IPR_RQTYPE_SCSICDB 0x00 +#define IPR_RQTYPE_IOACMD 0x01 +#define IPR_RQTYPE_HCAM 0x02 +#define IPR_RQTYPE_PIPE 0x05 + + u8 reserved2; + + u8 flags_hi; +#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 +#define IPR_FLAGS_HI_NO_ULEN_CHK 0x20 +#define IPR_FLAGS_HI_SYNC_OVERRIDE 0x10 +#define IPR_FLAGS_HI_SYNC_COMPLETE 0x08 +#define IPR_FLAGS_HI_NO_LINK_DESC 0x04 + + u8 flags_lo; +#define IPR_FLAGS_LO_ALIGNED_BFR 0x20 +#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10 +#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00 +#define IPR_FLAGS_LO_SIMPLE_TASK 0x02 +#define IPR_FLAGS_LO_ORDERED_TASK 0x04 +#define IPR_FLAGS_LO_HEAD_OF_Q_TASK 0x06 +#define IPR_FLAGS_LO_ACA_TASK 0x08 + + u8 cdb[16]; + __be16 timeout; +}__attribute__ ((packed, aligned(4))); + +struct ipr_ioadl_desc { + __be32 flags_and_data_len; +#define IPR_IOADL_FLAGS_MASK 0xff000000 +#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK) +#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff +#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK) +#define IPR_IOADL_FLAGS_READ 0x48000000 +#define IPR_IOADL_FLAGS_READ_LAST 0x49000000 +#define IPR_IOADL_FLAGS_WRITE 0x68000000 +#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000 +#define IPR_IOADL_FLAGS_LAST 0x01000000 + + __be32 address; +}__attribute__((packed, aligned (8))); + +struct ipr_ioadl64_desc { + __be32 flags; + __be32 data_len; + __be64 address; +}__attribute__((packed, aligned (16))); + +struct ipr_ioarcb_add_data { + union { + struct ipr_ioadl_desc ioadl[5]; + __be32 add_cmd_parms[10]; + } u; +}__attribute__ ((packed, aligned (4))); + +struct ipr_ioarcb_sis64_add_addr_ecb { + __be64 ioasa_host_pci_addr; + __be64 data_ioadl_addr; + __be64 reserved; + __be32 ext_control_buf[4]; +}__attribute__((packed, aligned (8))); + +/* IOA Request Control Block 128 bytes */ +struct ipr_ioarcb { + union { + __be32 ioarcb_host_pci_addr; + __be64 ioarcb_host_pci_addr64; + } a; + __be32 res_handle; + __be32 host_response_handle; + __be32 reserved1; + __be32 reserved2; + __be32 reserved3; + + __be32 data_transfer_length; + __be32 read_data_transfer_length; + __be32 write_ioadl_addr; + __be32 ioadl_len; + __be32 read_ioadl_addr; + __be32 read_ioadl_len; + + __be32 ioasa_host_pci_addr; + __be16 ioasa_len; + __be16 reserved4; + + struct ipr_cmd_pkt cmd_pkt; + + __be16 add_cmd_parms_offset; + __be16 add_cmd_parms_len; + + union { + struct ipr_ioarcb_add_data add_data; + struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data; + } u; + +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_vset { + __be32 failing_lba_hi; + __be32 failing_lba_lo; + __be32 reserved; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_af_dasd { + __be32 failing_lba; + __be32 reserved[2]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_gpdd { + u8 end_state; + u8 bus_phase; + __be16 reserved; + __be32 ioa_data[2]; +}__attribute__((packed, aligned (4))); + +struct ipr_auto_sense { + __be16 auto_sense_len; + __be16 ioa_data_len; + __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; +}; + +struct ipr_ioasa_hdr { + __be32 ioasc; +#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) +#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) +#define IPR_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8) +#define IPR_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff) + + __be16 ret_stat_len; /* Length of the returned IOASA */ + + __be16 avail_stat_len; /* Total Length of status available. */ + + __be32 residual_data_len; /* number of bytes in the host data */ + /* buffers that were not used by the IOARCB command. */ + + __be32 ilid; +#define IPR_NO_ILID 0 +#define IPR_DRIVER_ILID 0xffffffff + + __be32 fd_ioasc; + + __be32 fd_phys_locator; + + __be32 fd_res_handle; + + __be32 ioasc_specific; /* status code specific field */ +#define IPR_ADDITIONAL_STATUS_FMT 0x80000000 +#define IPR_AUTOSENSE_VALID 0x40000000 +#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff +#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) +#define IPR_FIELD_POINTER_MASK 0x0000ffff + +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa { + struct ipr_ioasa_hdr hdr; + + union { + struct ipr_ioasa_vset vset; + struct ipr_ioasa_af_dasd dasd; + struct ipr_ioasa_gpdd gpdd; + } u; + + struct ipr_auto_sense auto_sense; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa64 { + struct ipr_ioasa_hdr hdr; + u8 fd_res_path[8]; + + union { + struct ipr_ioasa_vset vset; + struct ipr_ioasa_af_dasd dasd; + struct ipr_ioasa_gpdd gpdd; + } u; + + struct ipr_auto_sense auto_sense; +}__attribute__((packed, aligned (4))); + +struct ipr_mode_parm_hdr { + u8 length; + u8 medium_type; + u8 device_spec_parms; + u8 block_desc_len; +}__attribute__((packed)); + +struct ipr_mode_pages { + struct ipr_mode_parm_hdr hdr; + u8 data[255 - sizeof(struct ipr_mode_parm_hdr)]; +}__attribute__((packed)); + +struct ipr_mode_page_hdr { + u8 ps_page_code; +#define IPR_MODE_PAGE_PS 0x80 +#define IPR_GET_MODE_PAGE_CODE(hdr) ((hdr)->ps_page_code & 0x3F) + u8 page_length; +}__attribute__ ((packed)); + +struct ipr_dev_bus_entry { + struct ipr_res_addr res_addr; + u8 flags; +#define IPR_SCSI_ATTR_ENABLE_QAS 0x80 +#define IPR_SCSI_ATTR_DISABLE_QAS 0x40 +#define IPR_SCSI_ATTR_QAS_MASK 0xC0 +#define IPR_SCSI_ATTR_ENABLE_TM 0x20 +#define IPR_SCSI_ATTR_NO_TERM_PWR 0x10 +#define IPR_SCSI_ATTR_TM_SUPPORTED 0x08 +#define IPR_SCSI_ATTR_LVD_TO_SE_NOT_ALLOWED 0x04 + + u8 scsi_id; + u8 bus_width; + u8 extended_reset_delay; +#define IPR_EXTENDED_RESET_DELAY 7 + + __be32 max_xfer_rate; + + u8 spinup_delay; + u8 reserved3; + __be16 reserved4; +}__attribute__((packed, aligned (4))); + +struct ipr_mode_page28 { + struct ipr_mode_page_hdr hdr; + u8 num_entries; + u8 entry_length; + struct ipr_dev_bus_entry bus[]; +}__attribute__((packed)); + +struct ipr_mode_page24 { + struct ipr_mode_page_hdr hdr; + u8 flags; +#define IPR_ENABLE_DUAL_IOA_AF 0x80 +}__attribute__((packed)); + +struct ipr_ioa_vpd { + struct ipr_std_inq_data std_inq_data; + u8 ascii_part_num[12]; + u8 reserved[40]; + u8 ascii_plant_code[4]; +}__attribute__((packed)); + +struct ipr_inquiry_page3 { + u8 peri_qual_dev_type; + u8 page_code; + u8 reserved1; + u8 page_length; + u8 ascii_len; + u8 reserved2[3]; + u8 load_id[4]; + u8 major_release; + u8 card_type; + u8 minor_release[2]; + u8 ptf_number[4]; + u8 patch_number[4]; +}__attribute__((packed)); + +struct ipr_inquiry_cap { + u8 peri_qual_dev_type; + u8 page_code; + u8 reserved1; + u8 page_length; + u8 ascii_len; + u8 reserved2; + u8 sis_version[2]; + u8 cap; +#define IPR_CAP_DUAL_IOA_RAID 0x80 + u8 reserved3[15]; +}__attribute__((packed)); + +#define IPR_INQUIRY_PAGE0_ENTRIES 20 +struct ipr_inquiry_page0 { + u8 peri_qual_dev_type; + u8 page_code; + u8 reserved1; + u8 len; + u8 page[IPR_INQUIRY_PAGE0_ENTRIES]; +}__attribute__((packed)); + +struct ipr_inquiry_pageC4 { + u8 peri_qual_dev_type; + u8 page_code; + u8 reserved1; + u8 len; + u8 cache_cap[4]; +#define IPR_CAP_SYNC_CACHE 0x08 + u8 reserved2[20]; +} __packed; + +struct ipr_hostrcb_device_data_entry { + struct ipr_vpd vpd; + struct ipr_res_addr dev_res_addr; + struct ipr_vpd new_vpd; + struct ipr_vpd ioa_last_with_dev_vpd; + struct ipr_vpd cfc_last_with_dev_vpd; + __be32 ioa_data[5]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_device_data_entry_enhanced { + struct ipr_ext_vpd vpd; + u8 ccin[4]; + struct ipr_res_addr dev_res_addr; + struct ipr_ext_vpd new_vpd; + u8 new_ccin[4]; + struct ipr_ext_vpd ioa_last_with_dev_vpd; + struct ipr_ext_vpd cfc_last_with_dev_vpd; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb64_device_data_entry_enhanced { + struct ipr_ext_vpd vpd; + u8 ccin[4]; + u8 res_path[8]; + struct ipr_ext_vpd new_vpd; + u8 new_ccin[4]; + struct ipr_ext_vpd ioa_last_with_dev_vpd; + struct ipr_ext_vpd cfc_last_with_dev_vpd; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_array_data_entry { + struct ipr_vpd vpd; + struct ipr_res_addr expected_dev_res_addr; + struct ipr_res_addr dev_res_addr; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb64_array_data_entry { + struct ipr_ext_vpd vpd; + u8 ccin[4]; + u8 expected_res_path[8]; + u8 res_path[8]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_array_data_entry_enhanced { + struct ipr_ext_vpd vpd; + u8 ccin[4]; + struct ipr_res_addr expected_dev_res_addr; + struct ipr_res_addr dev_res_addr; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_ff_error { + __be32 ioa_data[758]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_01_error { + __be32 seek_counter; + __be32 read_counter; + u8 sense_data[32]; + __be32 ioa_data[236]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_21_error { + __be32 wwn[4]; + u8 res_path[8]; + u8 primary_problem_desc[32]; + u8 second_problem_desc[32]; + __be32 sense_data[8]; + __be32 cdb[4]; + __be32 residual_trans_length; + __be32 length_of_error; + __be32 ioa_data[236]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_02_error { + struct ipr_vpd ioa_vpd; + struct ipr_vpd cfc_vpd; + struct ipr_vpd ioa_last_attached_to_cfc_vpd; + struct ipr_vpd cfc_last_attached_to_ioa_vpd; + __be32 ioa_data[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_12_error { + struct ipr_ext_vpd ioa_vpd; + struct ipr_ext_vpd cfc_vpd; + struct ipr_ext_vpd ioa_last_attached_to_cfc_vpd; + struct ipr_ext_vpd cfc_last_attached_to_ioa_vpd; + __be32 ioa_data[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_03_error { + struct ipr_vpd ioa_vpd; + struct ipr_vpd cfc_vpd; + __be32 errors_detected; + __be32 errors_logged; + u8 ioa_data[12]; + struct ipr_hostrcb_device_data_entry dev[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_13_error { + struct ipr_ext_vpd ioa_vpd; + struct ipr_ext_vpd cfc_vpd; + __be32 errors_detected; + __be32 errors_logged; + struct ipr_hostrcb_device_data_entry_enhanced dev[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_23_error { + struct ipr_ext_vpd ioa_vpd; + struct ipr_ext_vpd cfc_vpd; + __be32 errors_detected; + __be32 errors_logged; + struct ipr_hostrcb64_device_data_entry_enhanced dev[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_04_error { + struct ipr_vpd ioa_vpd; + struct ipr_vpd cfc_vpd; + u8 ioa_data[12]; + struct ipr_hostrcb_array_data_entry array_member[10]; + __be32 exposed_mode_adn; + __be32 array_id; + struct ipr_vpd incomp_dev_vpd; + __be32 ioa_data2; + struct ipr_hostrcb_array_data_entry array_member2[8]; + struct ipr_res_addr last_func_vset_res_addr; + u8 vset_serial_num[IPR_SERIAL_NUM_LEN]; + u8 protection_level[8]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_14_error { + struct ipr_ext_vpd ioa_vpd; + struct ipr_ext_vpd cfc_vpd; + __be32 exposed_mode_adn; + __be32 array_id; + struct ipr_res_addr last_func_vset_res_addr; + u8 vset_serial_num[IPR_SERIAL_NUM_LEN]; + u8 protection_level[8]; + __be32 num_entries; + struct ipr_hostrcb_array_data_entry_enhanced array_member[18]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_24_error { + struct ipr_ext_vpd ioa_vpd; + struct ipr_ext_vpd cfc_vpd; + u8 reserved[2]; + u8 exposed_mode_adn; +#define IPR_INVALID_ARRAY_DEV_NUM 0xff + u8 array_id; + u8 last_res_path[8]; + u8 protection_level[8]; + struct ipr_ext_vpd64 array_vpd; + u8 description[16]; + u8 reserved2[3]; + u8 num_entries; + struct ipr_hostrcb64_array_data_entry array_member[32]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_07_error { + u8 failure_reason[64]; + struct ipr_vpd vpd; + __be32 data[222]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_17_error { + u8 failure_reason[64]; + struct ipr_ext_vpd vpd; + __be32 data[476]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_config_element { + u8 type_status; +#define IPR_PATH_CFG_TYPE_MASK 0xF0 +#define IPR_PATH_CFG_NOT_EXIST 0x00 +#define IPR_PATH_CFG_IOA_PORT 0x10 +#define IPR_PATH_CFG_EXP_PORT 0x20 +#define IPR_PATH_CFG_DEVICE_PORT 0x30 +#define IPR_PATH_CFG_DEVICE_LUN 0x40 + +#define IPR_PATH_CFG_STATUS_MASK 0x0F +#define IPR_PATH_CFG_NO_PROB 0x00 +#define IPR_PATH_CFG_DEGRADED 0x01 +#define IPR_PATH_CFG_FAILED 0x02 +#define IPR_PATH_CFG_SUSPECT 0x03 +#define IPR_PATH_NOT_DETECTED 0x04 +#define IPR_PATH_INCORRECT_CONN 0x05 + + u8 cascaded_expander; + u8 phy; + u8 link_rate; +#define IPR_PHY_LINK_RATE_MASK 0x0F + + __be32 wwid[2]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb64_config_element { + __be16 length; + u8 descriptor_id; +#define IPR_DESCRIPTOR_MASK 0xC0 +#define IPR_DESCRIPTOR_SIS64 0x00 + + u8 reserved; + u8 type_status; + + u8 reserved2[2]; + u8 link_rate; + + u8 res_path[8]; + __be32 wwid[2]; +}__attribute__((packed, aligned (8))); + +struct ipr_hostrcb_fabric_desc { + __be16 length; + u8 ioa_port; + u8 cascaded_expander; + u8 phy; + u8 path_state; +#define IPR_PATH_ACTIVE_MASK 0xC0 +#define IPR_PATH_NO_INFO 0x00 +#define IPR_PATH_ACTIVE 0x40 +#define IPR_PATH_NOT_ACTIVE 0x80 + +#define IPR_PATH_STATE_MASK 0x0F +#define IPR_PATH_STATE_NO_INFO 0x00 +#define IPR_PATH_HEALTHY 0x01 +#define IPR_PATH_DEGRADED 0x02 +#define IPR_PATH_FAILED 0x03 + + __be16 num_entries; + struct ipr_hostrcb_config_element elem[1]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb64_fabric_desc { + __be16 length; + u8 descriptor_id; + + u8 reserved[2]; + u8 path_state; + + u8 reserved2[2]; + u8 res_path[8]; + u8 reserved3[6]; + __be16 num_entries; + struct ipr_hostrcb64_config_element elem[1]; +}__attribute__((packed, aligned (8))); + +#define for_each_hrrq(hrrq, ioa_cfg) \ + for (hrrq = (ioa_cfg)->hrrq; \ + hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++) + +#define for_each_fabric_cfg(fabric, cfg) \ + for (cfg = (fabric)->elem; \ + cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \ + cfg++) + +struct ipr_hostrcb_type_20_error { + u8 failure_reason[64]; + u8 reserved[3]; + u8 num_entries; + struct ipr_hostrcb_fabric_desc desc[1]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_30_error { + u8 failure_reason[64]; + u8 reserved[3]; + u8 num_entries; + struct ipr_hostrcb64_fabric_desc desc[1]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_41_error { + u8 failure_reason[64]; + __be32 data[200]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_error { + __be32 fd_ioasc; + struct ipr_res_addr fd_res_addr; + __be32 fd_res_handle; + __be32 prc; + union { + struct ipr_hostrcb_type_ff_error type_ff_error; + struct ipr_hostrcb_type_01_error type_01_error; + struct ipr_hostrcb_type_02_error type_02_error; + struct ipr_hostrcb_type_03_error type_03_error; + struct ipr_hostrcb_type_04_error type_04_error; + struct ipr_hostrcb_type_07_error type_07_error; + struct ipr_hostrcb_type_12_error type_12_error; + struct ipr_hostrcb_type_13_error type_13_error; + struct ipr_hostrcb_type_14_error type_14_error; + struct ipr_hostrcb_type_17_error type_17_error; + struct ipr_hostrcb_type_20_error type_20_error; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb64_error { + __be32 fd_ioasc; + __be32 ioa_fw_level; + __be32 fd_res_handle; + __be32 prc; + __be64 fd_dev_id; + __be64 fd_lun; + u8 fd_res_path[8]; + __be64 time_stamp; + u8 reserved[16]; + union { + struct ipr_hostrcb_type_ff_error type_ff_error; + struct ipr_hostrcb_type_12_error type_12_error; + struct ipr_hostrcb_type_17_error type_17_error; + struct ipr_hostrcb_type_21_error type_21_error; + struct ipr_hostrcb_type_23_error type_23_error; + struct ipr_hostrcb_type_24_error type_24_error; + struct ipr_hostrcb_type_30_error type_30_error; + struct ipr_hostrcb_type_41_error type_41_error; + } u; +}__attribute__((packed, aligned (8))); + +struct ipr_hostrcb_raw { + __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)]; +}__attribute__((packed, aligned (4))); + +struct ipr_hcam { + u8 op_code; +#define IPR_HOST_RCB_OP_CODE_CONFIG_CHANGE 0xE1 +#define IPR_HOST_RCB_OP_CODE_LOG_DATA 0xE2 + + u8 notify_type; +#define IPR_HOST_RCB_NOTIF_TYPE_EXISTING_CHANGED 0x00 +#define IPR_HOST_RCB_NOTIF_TYPE_NEW_ENTRY 0x01 +#define IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY 0x02 +#define IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY 0x10 +#define IPR_HOST_RCB_NOTIF_TYPE_INFORMATION_ENTRY 0x11 + + u8 notifications_lost; +#define IPR_HOST_RCB_NO_NOTIFICATIONS_LOST 0 +#define IPR_HOST_RCB_NOTIFICATIONS_LOST 0x80 + + u8 flags; +#define IPR_HOSTRCB_INTERNAL_OPER 0x80 +#define IPR_HOSTRCB_ERR_RESP_SENT 0x40 + + u8 overlay_id; +#define IPR_HOST_RCB_OVERLAY_ID_1 0x01 +#define IPR_HOST_RCB_OVERLAY_ID_2 0x02 +#define IPR_HOST_RCB_OVERLAY_ID_3 0x03 +#define IPR_HOST_RCB_OVERLAY_ID_4 0x04 +#define IPR_HOST_RCB_OVERLAY_ID_6 0x06 +#define IPR_HOST_RCB_OVERLAY_ID_7 0x07 +#define IPR_HOST_RCB_OVERLAY_ID_12 0x12 +#define IPR_HOST_RCB_OVERLAY_ID_13 0x13 +#define IPR_HOST_RCB_OVERLAY_ID_14 0x14 +#define IPR_HOST_RCB_OVERLAY_ID_16 0x16 +#define IPR_HOST_RCB_OVERLAY_ID_17 0x17 +#define IPR_HOST_RCB_OVERLAY_ID_20 0x20 +#define IPR_HOST_RCB_OVERLAY_ID_21 0x21 +#define IPR_HOST_RCB_OVERLAY_ID_23 0x23 +#define IPR_HOST_RCB_OVERLAY_ID_24 0x24 +#define IPR_HOST_RCB_OVERLAY_ID_26 0x26 +#define IPR_HOST_RCB_OVERLAY_ID_30 0x30 +#define IPR_HOST_RCB_OVERLAY_ID_41 0x41 +#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF + + u8 reserved1[3]; + __be32 ilid; + __be32 time_since_last_ioa_reset; + __be32 reserved2; + __be32 length; + + union { + struct ipr_hostrcb_error error; + struct ipr_hostrcb64_error error64; + struct ipr_hostrcb_cfg_ch_not ccn; + struct ipr_hostrcb_raw raw; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb { + struct ipr_hcam hcam; + dma_addr_t hostrcb_dma; + struct list_head queue; + struct ipr_ioa_cfg *ioa_cfg; + char rp_buffer[IPR_MAX_RES_PATH_LENGTH]; +}; + +/* IPR smart dump table structures */ +struct ipr_sdt_entry { + __be32 start_token; + __be32 end_token; + u8 reserved[4]; + + u8 flags; +#define IPR_SDT_ENDIAN 0x80 +#define IPR_SDT_VALID_ENTRY 0x20 + + u8 resv; + __be16 priority; +}__attribute__((packed, aligned (4))); + +struct ipr_sdt_header { + __be32 state; + __be32 num_entries; + __be32 num_entries_used; + __be32 dump_size; +}__attribute__((packed, aligned (4))); + +struct ipr_sdt { + struct ipr_sdt_header hdr; + struct ipr_sdt_entry entry[IPR_FMT3_NUM_SDT_ENTRIES]; +}__attribute__((packed, aligned (4))); + +struct ipr_uc_sdt { + struct ipr_sdt_header hdr; + struct ipr_sdt_entry entry[1]; +}__attribute__((packed, aligned (4))); + +/* + * Driver types + */ +struct ipr_bus_attributes { + u8 bus; + u8 qas_enabled; + u8 bus_width; + u8 reserved; + u32 max_xfer_rate; +}; + +struct ipr_resource_entry { + u8 needs_sync_complete:1; + u8 in_erp:1; + u8 add_to_ml:1; + u8 del_from_ml:1; + u8 resetting_device:1; + u8 reset_occurred:1; + u8 raw_mode:1; + + u32 bus; /* AKA channel */ + u32 target; /* AKA id */ + u32 lun; +#define IPR_ARRAY_VIRTUAL_BUS 0x1 +#define IPR_VSET_VIRTUAL_BUS 0x2 +#define IPR_IOAFP_VIRTUAL_BUS 0x3 +#define IPR_MAX_SIS64_BUSES 0x4 + +#define IPR_GET_RES_PHYS_LOC(res) \ + (((res)->bus << 24) | ((res)->target << 8) | (res)->lun) + + u8 ata_class; + u8 type; + + u16 flags; + u16 res_flags; + + u8 qmodel; + struct ipr_std_inq_data std_inq_data; + + __be32 res_handle; + __be64 dev_id; + u64 lun_wwn; + struct scsi_lun dev_lun; + u8 res_path[8]; + + struct ipr_ioa_cfg *ioa_cfg; + struct scsi_device *sdev; + struct list_head queue; +}; /* struct ipr_resource_entry */ + +struct ipr_resource_hdr { + u16 num_entries; + u16 reserved; +}; + +struct ipr_misc_cbs { + struct ipr_ioa_vpd ioa_vpd; + struct ipr_inquiry_page0 page0_data; + struct ipr_inquiry_page3 page3_data; + struct ipr_inquiry_cap cap; + struct ipr_inquiry_pageC4 pageC4_data; + struct ipr_mode_pages mode_pages; + struct ipr_supported_device supp_dev; +}; + +struct ipr_interrupt_offsets { + unsigned long set_interrupt_mask_reg; + unsigned long clr_interrupt_mask_reg; + unsigned long clr_interrupt_mask_reg32; + unsigned long sense_interrupt_mask_reg; + unsigned long sense_interrupt_mask_reg32; + unsigned long clr_interrupt_reg; + unsigned long clr_interrupt_reg32; + + unsigned long sense_interrupt_reg; + unsigned long sense_interrupt_reg32; + unsigned long ioarrin_reg; + unsigned long sense_uproc_interrupt_reg; + unsigned long sense_uproc_interrupt_reg32; + unsigned long set_uproc_interrupt_reg; + unsigned long set_uproc_interrupt_reg32; + unsigned long clr_uproc_interrupt_reg; + unsigned long clr_uproc_interrupt_reg32; + + unsigned long init_feedback_reg; + + unsigned long dump_addr_reg; + unsigned long dump_data_reg; + +#define IPR_ENDIAN_SWAP_KEY 0x00080800 + unsigned long endian_swap_reg; +}; + +struct ipr_interrupts { + void __iomem *set_interrupt_mask_reg; + void __iomem *clr_interrupt_mask_reg; + void __iomem *clr_interrupt_mask_reg32; + void __iomem *sense_interrupt_mask_reg; + void __iomem *sense_interrupt_mask_reg32; + void __iomem *clr_interrupt_reg; + void __iomem *clr_interrupt_reg32; + + void __iomem *sense_interrupt_reg; + void __iomem *sense_interrupt_reg32; + void __iomem *ioarrin_reg; + void __iomem *sense_uproc_interrupt_reg; + void __iomem *sense_uproc_interrupt_reg32; + void __iomem *set_uproc_interrupt_reg; + void __iomem *set_uproc_interrupt_reg32; + void __iomem *clr_uproc_interrupt_reg; + void __iomem *clr_uproc_interrupt_reg32; + + void __iomem *init_feedback_reg; + + void __iomem *dump_addr_reg; + void __iomem *dump_data_reg; + + void __iomem *endian_swap_reg; +}; + +struct ipr_chip_cfg_t { + u32 mailbox; + u16 max_cmds; + u8 cache_line_size; + u8 clear_isr; + u32 iopoll_weight; + struct ipr_interrupt_offsets regs; +}; + +struct ipr_chip_t { + u16 vendor; + u16 device; + bool has_msi; + u16 sis_type; +#define IPR_SIS32 0x00 +#define IPR_SIS64 0x01 + u16 bist_method; +#define IPR_PCI_CFG 0x00 +#define IPR_MMIO 0x01 + const struct ipr_chip_cfg_t *cfg; +}; + +enum ipr_shutdown_type { + IPR_SHUTDOWN_NORMAL = 0x00, + IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40, + IPR_SHUTDOWN_ABBREV = 0x80, + IPR_SHUTDOWN_NONE = 0x100, + IPR_SHUTDOWN_QUIESCE = 0x101, +}; + +struct ipr_trace_entry { + u32 time; + + u8 op_code; + u8 ata_op_code; + u8 type; +#define IPR_TRACE_START 0x00 +#define IPR_TRACE_FINISH 0xff + u8 cmd_index; + + __be32 res_handle; + union { + u32 ioasc; + u32 add_data; + u32 res_addr; + } u; +}; + +struct ipr_sglist { + u32 order; + u32 num_sg; + u32 num_dma_sg; + u32 buffer_len; + struct scatterlist *scatterlist; +}; + +enum ipr_sdt_state { + INACTIVE, + WAIT_FOR_DUMP, + GET_DUMP, + READ_DUMP, + ABORT_DUMP, + DUMP_OBTAINED +}; + +/* Per-controller data */ +struct ipr_ioa_cfg { + char eye_catcher[8]; +#define IPR_EYECATCHER "iprcfg" + + struct list_head queue; + + u8 in_reset_reload:1; + u8 in_ioa_bringdown:1; + u8 ioa_unit_checked:1; + u8 dump_taken:1; + u8 scan_enabled:1; + u8 scan_done:1; + u8 needs_hard_reset:1; + u8 dual_raid:1; + u8 needs_warm_reset:1; + u8 msi_received:1; + u8 sis64:1; + u8 dump_timeout:1; + u8 cfg_locked:1; + u8 clear_isr:1; + u8 probe_done:1; + u8 scsi_unblock:1; + u8 scsi_blocked:1; + + u8 revid; + + /* + * Bitmaps for SIS64 generated target values + */ + unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + + u16 type; /* CCIN of the card */ + + u8 log_level; +#define IPR_MAX_LOG_LEVEL 4 +#define IPR_DEFAULT_LOG_LEVEL 2 +#define IPR_DEBUG_LOG_LEVEL 3 + +#define IPR_NUM_TRACE_INDEX_BITS 8 +#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) +#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) + char trace_start[8]; +#define IPR_TRACE_START_LABEL "trace" + struct ipr_trace_entry *trace; + atomic_t trace_index; + + char cfg_table_start[8]; +#define IPR_CFG_TBL_START "cfg" + union { + struct ipr_config_table *cfg_table; + struct ipr_config_table64 *cfg_table64; + } u; + dma_addr_t cfg_table_dma; + u32 cfg_table_size; + u32 max_devs_supported; + + char resource_table_label[8]; +#define IPR_RES_TABLE_LABEL "res_tbl" + struct ipr_resource_entry *res_entries; + struct list_head free_res_q; + struct list_head used_res_q; + + char ipr_hcam_label[8]; +#define IPR_HCAM_LABEL "hcams" + struct ipr_hostrcb *hostrcb[IPR_MAX_HCAMS]; + dma_addr_t hostrcb_dma[IPR_MAX_HCAMS]; + struct list_head hostrcb_free_q; + struct list_head hostrcb_pending_q; + struct list_head hostrcb_report_q; + + struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM]; + u32 hrrq_num; + atomic_t hrrq_index; + u16 identify_hrrq_index; + + struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; + + unsigned int transop_timeout; + const struct ipr_chip_cfg_t *chip_cfg; + const struct ipr_chip_t *ipr_chip; + + void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ + unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ + void __iomem *ioa_mailbox; + struct ipr_interrupts regs; + + u16 saved_pcix_cmd_reg; + u16 reset_retries; + + u32 errors_logged; + u32 doorbell; + + struct Scsi_Host *host; + struct pci_dev *pdev; + struct ipr_sglist *ucode_sglist; + u8 saved_mode_page_len; + + struct work_struct work_q; + struct work_struct scsi_add_work_q; + struct workqueue_struct *reset_work_q; + + wait_queue_head_t reset_wait_q; + wait_queue_head_t msi_wait_q; + wait_queue_head_t eeh_wait_q; + + struct ipr_dump *dump; + enum ipr_sdt_state sdt_state; + + struct ipr_misc_cbs *vpd_cbs; + dma_addr_t vpd_cbs_dma; + + struct dma_pool *ipr_cmd_pool; + + struct ipr_cmnd *reset_cmd; + int (*reset) (struct ipr_cmnd *); + + char ipr_cmd_label[8]; +#define IPR_CMD_LABEL "ipr_cmd" + u32 max_cmds; + struct ipr_cmnd **ipr_cmnd_list; + dma_addr_t *ipr_cmnd_list_dma; + + unsigned int nvectors; + + struct { + char desc[22]; + } vectors_info[IPR_MAX_MSIX_VECTORS]; + + u32 iopoll_weight; + +}; /* struct ipr_ioa_cfg */ + +struct ipr_cmnd { + struct ipr_ioarcb ioarcb; + union { + struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; + struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; + } i; + union { + struct ipr_ioasa ioasa; + struct ipr_ioasa64 ioasa64; + } s; + struct list_head queue; + struct scsi_cmnd *scsi_cmd; + struct completion completion; + struct timer_list timer; + struct work_struct work; + void (*fast_done) (struct ipr_cmnd *); + void (*done) (struct ipr_cmnd *); + int (*job_step) (struct ipr_cmnd *); + int (*job_step_failed) (struct ipr_cmnd *); + u16 cmd_index; + u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; + dma_addr_t sense_buffer_dma; + unsigned short dma_use_sg; + dma_addr_t dma_addr; + struct ipr_cmnd *sibling; + union { + enum ipr_shutdown_type shutdown_type; + struct ipr_hostrcb *hostrcb; + unsigned long time_left; + unsigned long scratch; + struct ipr_resource_entry *res; + struct scsi_device *sdev; + } u; + + struct completion *eh_comp; + struct ipr_hrr_queue *hrrq; + struct ipr_ioa_cfg *ioa_cfg; +}; + +struct ipr_ses_table_entry { + char product_id[17]; + char compare_product_id_byte[17]; + u32 max_bus_speed_limit; /* MB/sec limit for this backplane */ +}; + +struct ipr_dump_header { + u32 eye_catcher; +#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2 + u32 len; + u32 num_entries; + u32 first_entry_offset; + u32 status; +#define IPR_DUMP_STATUS_SUCCESS 0 +#define IPR_DUMP_STATUS_QUAL_SUCCESS 2 +#define IPR_DUMP_STATUS_FAILED 0xffffffff + u32 os; +#define IPR_DUMP_OS_LINUX 0x4C4E5558 + u32 driver_name; +#define IPR_DUMP_DRIVER_NAME 0x49505232 +}__attribute__((packed, aligned (4))); + +struct ipr_dump_entry_header { + u32 eye_catcher; +#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2 + u32 len; + u32 num_elems; + u32 offset; + u32 data_type; +#define IPR_DUMP_DATA_TYPE_ASCII 0x41534349 +#define IPR_DUMP_DATA_TYPE_BINARY 0x42494E41 + u32 id; +#define IPR_DUMP_IOA_DUMP_ID 0x494F4131 +#define IPR_DUMP_LOCATION_ID 0x4C4F4341 +#define IPR_DUMP_TRACE_ID 0x54524143 +#define IPR_DUMP_DRIVER_VERSION_ID 0x44525652 +#define IPR_DUMP_DRIVER_TYPE_ID 0x54595045 +#define IPR_DUMP_IOA_CTRL_BLK 0x494F4342 +#define IPR_DUMP_PEND_OPS 0x414F5053 + u32 status; +}__attribute__((packed, aligned (4))); + +struct ipr_dump_location_entry { + struct ipr_dump_entry_header hdr; + u8 location[20]; +}__attribute__((packed, aligned (4))); + +struct ipr_dump_trace_entry { + struct ipr_dump_entry_header hdr; + u32 trace[IPR_TRACE_SIZE / sizeof(u32)]; +}__attribute__((packed, aligned (4))); + +struct ipr_dump_version_entry { + struct ipr_dump_entry_header hdr; + u8 version[sizeof(IPR_DRIVER_VERSION)]; +}; + +struct ipr_dump_ioa_type_entry { + struct ipr_dump_entry_header hdr; + u32 type; + u32 fw_version; +}; + +struct ipr_driver_dump { + struct ipr_dump_header hdr; + struct ipr_dump_version_entry version_entry; + struct ipr_dump_location_entry location_entry; + struct ipr_dump_ioa_type_entry ioa_type_entry; + struct ipr_dump_trace_entry trace_entry; +}__attribute__((packed, aligned (4))); + +struct ipr_ioa_dump { + struct ipr_dump_entry_header hdr; + struct ipr_sdt sdt; + __be32 **ioa_data; + u32 reserved; + u32 next_page_index; + u32 page_offset; + u32 format; +}__attribute__((packed, aligned (4))); + +struct ipr_dump { + struct kref kref; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_driver_dump driver_dump; + struct ipr_ioa_dump ioa_dump; +}; + +struct ipr_error_table_t { + u32 ioasc; + int log_ioasa; + int log_hcam; + char *error; +}; + +struct ipr_software_inq_lid_info { + __be32 load_id; + __be32 timestamp[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_ucode_image_header { + __be32 header_length; + __be32 lid_table_offset; + u8 major_release; + u8 card_type; + u8 minor_release[2]; + u8 reserved[20]; + char eyecatcher[16]; + __be32 num_lids; + struct ipr_software_inq_lid_info lid[1]; +}__attribute__((packed, aligned (4))); + +/* + * Macros + */ +#define IPR_DBG_CMD(CMD) if (ipr_debug) { CMD; } + +#ifdef CONFIG_SCSI_IPR_TRACE +#define ipr_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ipr_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ipr_create_trace_file(kobj, attr) 0 +#define ipr_remove_trace_file(kobj, attr) do { } while(0) +#endif + +#ifdef CONFIG_SCSI_IPR_DUMP +#define ipr_create_dump_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ipr_remove_dump_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ipr_create_dump_file(kobj, attr) 0 +#define ipr_remove_dump_file(kobj, attr) do { } while(0) +#endif + +/* + * Error logging macros + */ +#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__) +#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) +#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) + +#define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \ + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ + bus, target, lun, ##__VA_ARGS__) + +#define ipr_res_err(ioa_cfg, res, fmt, ...) \ + ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__) + +#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \ + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ + (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__) + +#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \ + ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__) + +#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \ +{ \ + if ((res).bus >= IPR_MAX_NUM_BUSES) { \ + ipr_err(fmt": unknown\n", ##__VA_ARGS__); \ + } else { \ + ipr_err(fmt": %d:%d:%d:%d\n", \ + ##__VA_ARGS__, (ioa_cfg)->host->host_no, \ + (res).bus, (res).target, (res).lun); \ + } \ +} + +#define ipr_hcam_err(hostrcb, fmt, ...) \ +{ \ + if (ipr_is_device(hostrcb)) { \ + if ((hostrcb)->ioa_cfg->sis64) { \ + printk(KERN_ERR IPR_NAME ": %s: " fmt, \ + ipr_format_res_path(hostrcb->ioa_cfg, \ + hostrcb->hcam.u.error64.fd_res_path, \ + hostrcb->rp_buffer, \ + sizeof(hostrcb->rp_buffer)), \ + __VA_ARGS__); \ + } else { \ + ipr_ra_err((hostrcb)->ioa_cfg, \ + (hostrcb)->hcam.u.error.fd_res_addr, \ + fmt, __VA_ARGS__); \ + } \ + } else { \ + dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \ + } \ +} + +#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ + __FILE__, __func__, __LINE__) + +#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__)) +#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__)) + +#define ipr_err_separator \ +ipr_err("----------------------------------------------------------\n") + + +/* + * Inlines + */ + +/** + * ipr_is_ioa_resource - Determine if a resource is the IOA + * @res: resource entry struct + * + * Return value: + * 1 if IOA / 0 if not IOA + **/ +static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) +{ + return res->type == IPR_RES_TYPE_IOAFP; +} + +/** + * ipr_is_af_dasd_device - Determine if a resource is an AF DASD + * @res: resource entry struct + * + * Return value: + * 1 if AF DASD / 0 if not AF DASD + **/ +static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) +{ + return res->type == IPR_RES_TYPE_AF_DASD || + res->type == IPR_RES_TYPE_REMOTE_AF_DASD; +} + +/** + * ipr_is_vset_device - Determine if a resource is a VSET + * @res: resource entry struct + * + * Return value: + * 1 if VSET / 0 if not VSET + **/ +static inline int ipr_is_vset_device(struct ipr_resource_entry *res) +{ + return res->type == IPR_RES_TYPE_VOLUME_SET; +} + +/** + * ipr_is_gscsi - Determine if a resource is a generic scsi resource + * @res: resource entry struct + * + * Return value: + * 1 if GSCSI / 0 if not GSCSI + **/ +static inline int ipr_is_gscsi(struct ipr_resource_entry *res) +{ + return res->type == IPR_RES_TYPE_GENERIC_SCSI; +} + +/** + * ipr_is_scsi_disk - Determine if a resource is a SCSI disk + * @res: resource entry struct + * + * Return value: + * 1 if SCSI disk / 0 if not SCSI disk + **/ +static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res) +{ + if (ipr_is_af_dasd_device(res) || + (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data))) + return 1; + else + return 0; +} + +/** + * ipr_is_gata - Determine if a resource is a generic ATA resource + * @res: resource entry struct + * + * Return value: + * 1 if GATA / 0 if not GATA + **/ +static inline int ipr_is_gata(struct ipr_resource_entry *res) +{ + return res->type == IPR_RES_TYPE_GENERIC_ATA; +} + +/** + * ipr_is_naca_model - Determine if a resource is using NACA queueing model + * @res: resource entry struct + * + * Return value: + * 1 if NACA queueing model / 0 if not NACA queueing model + **/ +static inline int ipr_is_naca_model(struct ipr_resource_entry *res) +{ + if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL) + return 1; + return 0; +} + +/** + * ipr_is_device - Determine if the hostrcb structure is related to a device + * @hostrcb: host resource control blocks struct + * + * Return value: + * 1 if AF / 0 if not AF + **/ +static inline int ipr_is_device(struct ipr_hostrcb *hostrcb) +{ + struct ipr_res_addr *res_addr; + u8 *res_path; + + if (hostrcb->ioa_cfg->sis64) { + res_path = &hostrcb->hcam.u.error64.fd_res_path[0]; + if ((res_path[0] == 0x00 || res_path[0] == 0x80 || + res_path[0] == 0x81) && res_path[2] != 0xFF) + return 1; + } else { + res_addr = &hostrcb->hcam.u.error.fd_res_addr; + + if ((res_addr->bus < IPR_MAX_NUM_BUSES) && + (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1))) + return 1; + } + return 0; +} + +/** + * ipr_sdt_is_fmt2 - Determine if a SDT address is in format 2 + * @sdt_word: SDT address + * + * Return value: + * 1 if format 2 / 0 if not + **/ +static inline int ipr_sdt_is_fmt2(u32 sdt_word) +{ + u32 bar_sel = IPR_GET_FMT2_BAR_SEL(sdt_word); + + switch (bar_sel) { + case IPR_SDT_FMT2_BAR0_SEL: + case IPR_SDT_FMT2_BAR1_SEL: + case IPR_SDT_FMT2_BAR2_SEL: + case IPR_SDT_FMT2_BAR3_SEL: + case IPR_SDT_FMT2_BAR4_SEL: + case IPR_SDT_FMT2_BAR5_SEL: + case IPR_SDT_FMT2_EXP_ROM_SEL: + return 1; + }; + + return 0; +} + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(((u32) (val >> 32)), addr); + writel(((u32) (val)), (addr + 4)); +} +#endif + +#endif /* _IPR_H */ diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c new file mode 100644 index 000000000..bb2065092 --- /dev/null +++ b/drivers/scsi/ips.c @@ -0,0 +1,7099 @@ +/*****************************************************************************/ +/* ips.c -- driver for the Adaptec / IBM ServeRAID controller */ +/* */ +/* Written By: Keith Mitchell, IBM Corporation */ +/* Jack Hammer, Adaptec, Inc. */ +/* David Jeffery, Adaptec, Inc. */ +/* */ +/* Copyright (C) 2000 IBM Corporation */ +/* Copyright (C) 2002,2003 Adaptec, Inc. */ +/* */ +/* This program is free software; you can redistribute it and/or modify */ +/* it under the terms of the GNU General Public License as published by */ +/* the Free Software Foundation; either version 2 of the License, or */ +/* (at your option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ +/* */ +/* NO WARRANTY */ +/* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR */ +/* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT */ +/* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, */ +/* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is */ +/* solely responsible for determining the appropriateness of using and */ +/* distributing the Program and assumes all risks associated with its */ +/* exercise of rights under this Agreement, including but not limited to */ +/* the risks and costs of program errors, damage to or loss of data, */ +/* programs or equipment, and unavailability or interruption of operations. */ +/* */ +/* DISCLAIMER OF LIABILITY */ +/* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY */ +/* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL */ +/* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR */ +/* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE */ +/* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED */ +/* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program; if not, write to the Free Software */ +/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* */ +/* Bugs/Comments/Suggestions about this driver should be mailed to: */ +/* ipslinux@adaptec.com */ +/* */ +/* For system support issues, contact your local IBM Customer support. */ +/* Directions to find IBM Customer Support for each country can be found at: */ +/* http://www.ibm.com/planetwide/ */ +/* */ +/*****************************************************************************/ + +/*****************************************************************************/ +/* Change Log */ +/* */ +/* 0.99.02 - Breakup commands that are bigger than 8 * the stripe size */ +/* 0.99.03 - Make interrupt routine handle all completed request on the */ +/* adapter not just the first one */ +/* - Make sure passthru commands get woken up if we run out of */ +/* SCBs */ +/* - Send all of the commands on the queue at once rather than */ +/* one at a time since the card will support it. */ +/* 0.99.04 - Fix race condition in the passthru mechanism -- this required */ +/* the interface to the utilities to change */ +/* - Fix error recovery code */ +/* 0.99.05 - Fix an oops when we get certain passthru commands */ +/* 1.00.00 - Initial Public Release */ +/* Functionally equivalent to 0.99.05 */ +/* 3.60.00 - Bump max commands to 128 for use with firmware 3.60 */ +/* - Change version to 3.60 to coincide with release numbering. */ +/* 3.60.01 - Remove bogus error check in passthru routine */ +/* 3.60.02 - Make DCDB direction based on lookup table */ +/* - Only allow one DCDB command to a SCSI ID at a time */ +/* 4.00.00 - Add support for ServeRAID 4 */ +/* 4.00.01 - Add support for First Failure Data Capture */ +/* 4.00.02 - Fix problem with PT DCDB with no buffer */ +/* 4.00.03 - Add alternative passthru interface */ +/* - Add ability to flash BIOS */ +/* 4.00.04 - Rename structures/constants to be prefixed with IPS_ */ +/* 4.00.05 - Remove wish_block from init routine */ +/* - Use linux/spinlock.h instead of asm/spinlock.h for kernels */ +/* 2.3.18 and later */ +/* - Sync with other changes from the 2.3 kernels */ +/* 4.00.06 - Fix timeout with initial FFDC command */ +/* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig */ +/* 4.10.00 - Add support for ServeRAID 4M/4L */ +/* 4.10.13 - Fix for dynamic unload and proc file system */ +/* 4.20.03 - Rename version to coincide with new release schedules */ +/* Performance fixes */ +/* Fix truncation of /proc files with cat */ +/* Merge in changes through kernel 2.4.0test1ac21 */ +/* 4.20.13 - Fix some failure cases / reset code */ +/* - Hook into the reboot_notifier to flush the controller cache */ +/* 4.50.01 - Fix problem when there is a hole in logical drive numbering */ +/* 4.70.09 - Use a Common ( Large Buffer ) for Flashing from the JCRM CD */ +/* - Add IPSSEND Flash Support */ +/* - Set Sense Data for Unknown SCSI Command */ +/* - Use Slot Number from NVRAM Page 5 */ +/* - Restore caller's DCDB Structure */ +/* 4.70.12 - Corrective actions for bad controller ( during initialization )*/ +/* 4.70.13 - Don't Send CDB's if we already know the device is not present */ +/* - Don't release HA Lock in ips_next() until SC taken off queue */ +/* - Unregister SCSI device in ips_release() */ +/* 4.70.15 - Fix Breakup for very large ( non-SG ) requests in ips_done() */ +/* 4.71.00 - Change all memory allocations to not use GFP_DMA flag */ +/* Code Clean-Up for 2.4.x kernel */ +/* 4.72.00 - Allow for a Scatter-Gather Element to exceed MAX_XFER Size */ +/* 4.72.01 - I/O Mapped Memory release ( so "insmod ips" does not Fail ) */ +/* - Don't Issue Internal FFDC Command if there are Active Commands */ +/* - Close Window for getting too many IOCTL's active */ +/* 4.80.00 - Make ia64 Safe */ +/* 4.80.04 - Eliminate calls to strtok() if 2.4.x or greater */ +/* - Adjustments to Device Queue Depth */ +/* 4.80.14 - Take all semaphores off stack */ +/* - Clean Up New_IOCTL path */ +/* 4.80.20 - Set max_sectors in Scsi_Host structure ( if >= 2.4.7 kernel ) */ +/* - 5 second delay needed after resetting an i960 adapter */ +/* 4.80.26 - Clean up potential code problems ( Arjan's recommendations ) */ +/* 4.90.01 - Version Matching for FirmWare, BIOS, and Driver */ +/* 4.90.05 - Use New PCI Architecture to facilitate Hot Plug Development */ +/* 4.90.08 - Increase Delays in Flashing ( Trombone Only - 4H ) */ +/* 4.90.08 - Data Corruption if First Scatter Gather Element is > 64K */ +/* 4.90.11 - Don't actually RESET unless it's physically required */ +/* - Remove unused compile options */ +/* 5.00.01 - Sarasota ( 5i ) adapters must always be scanned first */ +/* - Get rid on IOCTL_NEW_COMMAND code */ +/* - Add Extended DCDB Commands for Tape Support in 5I */ +/* 5.10.12 - use pci_dma interfaces, update for 2.5 kernel changes */ +/* 5.10.15 - remove unused code (sem, macros, etc.) */ +/* 5.30.00 - use __devexit_p() */ +/* 6.00.00 - Add 6x Adapters and Battery Flash */ +/* 6.10.00 - Remove 1G Addressing Limitations */ +/* 6.11.xx - Get VersionInfo buffer off the stack ! DDTS 60401 */ +/* 6.11.xx - Make Logical Drive Info structure safe for DMA DDTS 60639 */ +/* 7.10.18 - Add highmem_io flag in SCSI Templete for 2.4 kernels */ +/* - Fix path/name for scsi_hosts.h include for 2.6 kernels */ +/* - Fix sort order of 7k */ +/* - Remove 3 unused "inline" functions */ +/* 7.12.xx - Use STATIC functions wherever possible */ +/* - Clean up deprecated MODULE_PARM calls */ +/* 7.12.05 - Remove Version Matching per IBM request */ +/*****************************************************************************/ + +/* + * Conditional Compilation directives for this driver: + * + * IPS_DEBUG - Turn on debugging info + * + * Parameters: + * + * debug: - Set debug level to + * NOTE: only works when IPS_DEBUG compile directive is used. + * 1 - Normal debug messages + * 2 - Verbose debug messages + * 11 - Method trace (non interrupt) + * 12 - Method trace (includes interrupt) + * + * noi2o - Don't use I2O Queues (ServeRAID 4 only) + * nommap - Don't use memory mapped I/O + * ioctlsize - Initial size of the IOCTL buffer + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ips.h" + +#include + +#include + +#include +#include + +#include + +#ifdef MODULE +static char *ips = NULL; +module_param(ips, charp, 0); +#endif + +/* + * DRIVER_VER + */ +#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING +#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " " + +#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ + DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ + DMA_BIDIRECTIONAL : \ + scb->scsi_cmd->sc_data_direction) + +#ifdef IPS_DEBUG +#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n"); +#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n"); +#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v); +#else +#define METHOD_TRACE(s, i) +#define DEBUG(i, s) +#define DEBUG_VAR(i, s, v...) +#endif + +/* + * Function prototypes + */ +static int ips_eh_abort(struct scsi_cmnd *); +static int ips_eh_reset(struct scsi_cmnd *); +static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *); +static const char *ips_info(struct Scsi_Host *); +static irqreturn_t do_ipsintr(int, void *); +static int ips_hainit(ips_ha_t *); +static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *); +static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int); +static int ips_send_cmd(ips_ha_t *, ips_scb_t *); +static int ips_online(ips_ha_t *, ips_scb_t *); +static int ips_inquiry(ips_ha_t *, ips_scb_t *); +static int ips_rdcap(ips_ha_t *, ips_scb_t *); +static int ips_msense(ips_ha_t *, ips_scb_t *); +static int ips_reqsen(ips_ha_t *, ips_scb_t *); +static int ips_deallocatescbs(ips_ha_t *, int); +static int ips_allocatescbs(ips_ha_t *); +static int ips_reset_copperhead(ips_ha_t *); +static int ips_reset_copperhead_memio(ips_ha_t *); +static int ips_reset_morpheus(ips_ha_t *); +static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *); +static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *); +static int ips_issue_i2o(ips_ha_t *, ips_scb_t *); +static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *); +static int ips_isintr_copperhead(ips_ha_t *); +static int ips_isintr_copperhead_memio(ips_ha_t *); +static int ips_isintr_morpheus(ips_ha_t *); +static int ips_wait(ips_ha_t *, int, int); +static int ips_write_driver_status(ips_ha_t *, int); +static int ips_read_adapter_status(ips_ha_t *, int); +static int ips_read_subsystem_parameters(ips_ha_t *, int); +static int ips_read_config(ips_ha_t *, int); +static int ips_clear_adapter(ips_ha_t *, int); +static int ips_readwrite_page5(ips_ha_t *, int, int); +static int ips_init_copperhead(ips_ha_t *); +static int ips_init_copperhead_memio(ips_ha_t *); +static int ips_init_morpheus(ips_ha_t *); +static int ips_isinit_copperhead(ips_ha_t *); +static int ips_isinit_copperhead_memio(ips_ha_t *); +static int ips_isinit_morpheus(ips_ha_t *); +static int ips_erase_bios(ips_ha_t *); +static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t); +static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t); +static int ips_erase_bios_memio(ips_ha_t *); +static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t); +static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t); +static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *); +static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *); +static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *); +static void ips_free_flash_copperhead(ips_ha_t * ha); +static void ips_get_bios_version(ips_ha_t *, int); +static void ips_identify_controller(ips_ha_t *); +static void ips_chkstatus(ips_ha_t *, IPS_STATUS *); +static void ips_enable_int_copperhead(ips_ha_t *); +static void ips_enable_int_copperhead_memio(ips_ha_t *); +static void ips_enable_int_morpheus(ips_ha_t *); +static int ips_intr_copperhead(ips_ha_t *); +static int ips_intr_morpheus(ips_ha_t *); +static void ips_next(ips_ha_t *, int); +static void ipsintr_blocking(ips_ha_t *, struct ips_scb *); +static void ipsintr_done(ips_ha_t *, struct ips_scb *); +static void ips_done(ips_ha_t *, ips_scb_t *); +static void ips_free(ips_ha_t *); +static void ips_init_scb(ips_ha_t *, ips_scb_t *); +static void ips_freescb(ips_ha_t *, ips_scb_t *); +static void ips_setup_funclist(ips_ha_t *); +static void ips_statinit(ips_ha_t *); +static void ips_statinit_memio(ips_ha_t *); +static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t); +static void ips_ffdc_reset(ips_ha_t *, int); +static void ips_ffdc_time(ips_ha_t *); +static uint32_t ips_statupd_copperhead(ips_ha_t *); +static uint32_t ips_statupd_copperhead_memio(ips_ha_t *); +static uint32_t ips_statupd_morpheus(ips_ha_t *); +static ips_scb_t *ips_getscb(ips_ha_t *); +static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); +static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *); +static void ips_putq_copp_tail(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); +static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); +static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *); +static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *, + struct scsi_cmnd *); +static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *); + +static int ips_is_passthru(struct scsi_cmnd *); +static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int); +static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *); +static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *); +static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data, + unsigned int count); +static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data, + unsigned int count); + +static int ips_write_info(struct Scsi_Host *, char *, int); +static int ips_show_info(struct seq_file *, struct Scsi_Host *); +static int ips_host_info(ips_ha_t *, struct seq_file *); +static int ips_abort_init(ips_ha_t * ha, int index); +static int ips_init_phase2(int index); + +static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr); +static int ips_register_scsi(int index); + +static int ips_poll_for_flush_complete(ips_ha_t * ha); +static void ips_flush_and_reset(ips_ha_t *ha); + +/* + * global variables + */ +static const char ips_name[] = "ips"; +static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */ +static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */ +static unsigned int ips_next_controller; +static unsigned int ips_num_controllers; +static unsigned int ips_released_controllers; +static int ips_hotplug; +static int ips_cmd_timeout = 60; +static int ips_reset_timeout = 60 * 5; +static int ips_force_memio = 1; /* Always use Memory Mapped I/O */ +static int ips_force_i2o = 1; /* Always use I2O command delivery */ +static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */ +static int ips_cd_boot; /* Booting from Manager CD */ +static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */ +static dma_addr_t ips_flashbusaddr; +static long ips_FlashDataInUse; /* CD Boot - Flash Data In Use Flag */ +static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */ +static struct scsi_host_template ips_driver_template = { + .info = ips_info, + .queuecommand = ips_queue, + .eh_abort_handler = ips_eh_abort, + .eh_host_reset_handler = ips_eh_reset, + .proc_name = "ips", + .show_info = ips_show_info, + .write_info = ips_write_info, + .slave_configure = ips_slave_configure, + .bios_param = ips_biosparam, + .this_id = -1, + .sg_tablesize = IPS_MAX_SG, + .cmd_per_lun = 3, + .no_write_same = 1, +}; + + +/* This table describes all ServeRAID Adapters */ +static struct pci_device_id ips_pci_table[] = { + { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { 0, } +}; + +MODULE_DEVICE_TABLE( pci, ips_pci_table ); + +static char ips_hot_plug_name[] = "ips"; + +static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent); +static void ips_remove_device(struct pci_dev *pci_dev); + +static struct pci_driver ips_pci_driver = { + .name = ips_hot_plug_name, + .id_table = ips_pci_table, + .probe = ips_insert_device, + .remove = ips_remove_device, +}; + + +/* + * Necessary forward function protoypes + */ +static int ips_halt(struct notifier_block *nb, ulong event, void *buf); + +#define MAX_ADAPTER_NAME 15 + +static char ips_adapter_name[][30] = { + "ServeRAID", + "ServeRAID II", + "ServeRAID on motherboard", + "ServeRAID on motherboard", + "ServeRAID 3H", + "ServeRAID 3L", + "ServeRAID 4H", + "ServeRAID 4M", + "ServeRAID 4L", + "ServeRAID 4Mx", + "ServeRAID 4Lx", + "ServeRAID 5i", + "ServeRAID 5i", + "ServeRAID 6M", + "ServeRAID 6i", + "ServeRAID 7t", + "ServeRAID 7k", + "ServeRAID 7M" +}; + +static struct notifier_block ips_notifier = { + ips_halt, NULL, 0 +}; + +/* + * Direction table + */ +static char ips_command_direction[] = { + IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, + IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT, + IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN, + IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE, + IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, + IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE, + IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE, + IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK +}; + + +/****************************************************************************/ +/* */ +/* Routine Name: ips_setup */ +/* */ +/* Routine Description: */ +/* */ +/* setup parameters to the driver */ +/* */ +/****************************************************************************/ +static int +ips_setup(char *ips_str) +{ + + int i; + char *key; + char *value; + static const IPS_OPTION options[] = { + {"noi2o", &ips_force_i2o, 0}, + {"nommap", &ips_force_memio, 0}, + {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, + {"cdboot", &ips_cd_boot, 0}, + {"maxcmds", &MaxLiteCmds, 32}, + }; + + /* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */ + /* Search for value */ + while ((key = strsep(&ips_str, ",."))) { + if (!*key) + continue; + value = strchr(key, ':'); + if (value) + *value++ = '\0'; + /* + * We now have key/value pairs. + * Update the variables + */ + for (i = 0; i < ARRAY_SIZE(options); i++) { + if (strncasecmp + (key, options[i].option_name, + strlen(options[i].option_name)) == 0) { + if (value) + *options[i].option_flag = + simple_strtoul(value, NULL, 0); + else + *options[i].option_flag = + options[i].option_value; + break; + } + } + } + + return (1); +} + +__setup("ips=", ips_setup); + +/****************************************************************************/ +/* */ +/* Routine Name: ips_detect */ +/* */ +/* Routine Description: */ +/* */ +/* Detect and initialize the driver */ +/* */ +/* NOTE: this routine is called under the io_request_lock spinlock */ +/* */ +/****************************************************************************/ +static int +ips_detect(struct scsi_host_template * SHT) +{ + int i; + + METHOD_TRACE("ips_detect", 1); + +#ifdef MODULE + if (ips) + ips_setup(ips); +#endif + + for (i = 0; i < ips_num_controllers; i++) { + if (ips_register_scsi(i)) + ips_free(ips_ha[i]); + ips_released_controllers++; + } + ips_hotplug = 1; + return (ips_num_controllers); +} + +/****************************************************************************/ +/* configure the function pointers to use the functions that will work */ +/* with the found version of the adapter */ +/****************************************************************************/ +static void +ips_setup_funclist(ips_ha_t * ha) +{ + + /* + * Setup Functions + */ + if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { + /* morpheus / marco / sebring */ + ha->func.isintr = ips_isintr_morpheus; + ha->func.isinit = ips_isinit_morpheus; + ha->func.issue = ips_issue_i2o_memio; + ha->func.init = ips_init_morpheus; + ha->func.statupd = ips_statupd_morpheus; + ha->func.reset = ips_reset_morpheus; + ha->func.intr = ips_intr_morpheus; + ha->func.enableint = ips_enable_int_morpheus; + } else if (IPS_USE_MEMIO(ha)) { + /* copperhead w/MEMIO */ + ha->func.isintr = ips_isintr_copperhead_memio; + ha->func.isinit = ips_isinit_copperhead_memio; + ha->func.init = ips_init_copperhead_memio; + ha->func.statupd = ips_statupd_copperhead_memio; + ha->func.statinit = ips_statinit_memio; + ha->func.reset = ips_reset_copperhead_memio; + ha->func.intr = ips_intr_copperhead; + ha->func.erasebios = ips_erase_bios_memio; + ha->func.programbios = ips_program_bios_memio; + ha->func.verifybios = ips_verify_bios_memio; + ha->func.enableint = ips_enable_int_copperhead_memio; + if (IPS_USE_I2O_DELIVER(ha)) + ha->func.issue = ips_issue_i2o_memio; + else + ha->func.issue = ips_issue_copperhead_memio; + } else { + /* copperhead */ + ha->func.isintr = ips_isintr_copperhead; + ha->func.isinit = ips_isinit_copperhead; + ha->func.init = ips_init_copperhead; + ha->func.statupd = ips_statupd_copperhead; + ha->func.statinit = ips_statinit; + ha->func.reset = ips_reset_copperhead; + ha->func.intr = ips_intr_copperhead; + ha->func.erasebios = ips_erase_bios; + ha->func.programbios = ips_program_bios; + ha->func.verifybios = ips_verify_bios; + ha->func.enableint = ips_enable_int_copperhead; + + if (IPS_USE_I2O_DELIVER(ha)) + ha->func.issue = ips_issue_i2o; + else + ha->func.issue = ips_issue_copperhead; + } +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_release */ +/* */ +/* Routine Description: */ +/* */ +/* Remove a driver */ +/* */ +/****************************************************************************/ +static void ips_release(struct Scsi_Host *sh) +{ + ips_scb_t *scb; + ips_ha_t *ha; + int i; + + METHOD_TRACE("ips_release", 1); + + scsi_remove_host(sh); + + for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ; + + if (i == IPS_MAX_ADAPTERS) { + printk(KERN_WARNING + "(%s) release, invalid Scsi_Host pointer.\n", ips_name); + BUG(); + } + + ha = IPS_HA(sh); + + if (!ha) + return; + + /* flush the cache on the controller */ + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n"); + + /* send command */ + if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) + IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n"); + + IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n"); + + ips_sh[i] = NULL; + ips_ha[i] = NULL; + + /* free extra memory */ + ips_free(ha); + + /* free IRQ */ + free_irq(ha->pcidev->irq, ha); + + scsi_host_put(sh); + + ips_released_controllers++; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_halt */ +/* */ +/* Routine Description: */ +/* */ +/* Perform cleanup when the system reboots */ +/* */ +/****************************************************************************/ +static int +ips_halt(struct notifier_block *nb, ulong event, void *buf) +{ + ips_scb_t *scb; + ips_ha_t *ha; + int i; + + if ((event != SYS_RESTART) && (event != SYS_HALT) && + (event != SYS_POWER_OFF)) + return (NOTIFY_DONE); + + for (i = 0; i < ips_next_controller; i++) { + ha = (ips_ha_t *) ips_ha[i]; + + if (!ha) + continue; + + if (!ha->active) + continue; + + /* flush the cache on the controller */ + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n"); + + /* send command */ + if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == + IPS_FAILURE) + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Incomplete Flush.\n"); + else + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Flushing Complete.\n"); + } + + return (NOTIFY_OK); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_eh_abort */ +/* */ +/* Routine Description: */ +/* */ +/* Abort a command (using the new error code stuff) */ +/* Note: this routine is called under the io_request_lock */ +/****************************************************************************/ +int ips_eh_abort(struct scsi_cmnd *SC) +{ + ips_ha_t *ha; + ips_copp_wait_item_t *item; + int ret; + struct Scsi_Host *host; + + METHOD_TRACE("ips_eh_abort", 1); + + if (!SC) + return (FAILED); + + host = SC->device->host; + ha = (ips_ha_t *) SC->device->host->hostdata; + + if (!ha) + return (FAILED); + + if (!ha->active) + return (FAILED); + + spin_lock(host->host_lock); + + /* See if the command is on the copp queue */ + item = ha->copp_waitlist.head; + while ((item) && (item->scsi_cmd != SC)) + item = item->next; + + if (item) { + /* Found it */ + ips_removeq_copp(&ha->copp_waitlist, item); + ret = (SUCCESS); + + /* See if the command is on the wait queue */ + } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) { + /* command not sent yet */ + ret = (SUCCESS); + } else { + /* command must have already been sent */ + ret = (FAILED); + } + + spin_unlock(host->host_lock); + return ret; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_eh_reset */ +/* */ +/* Routine Description: */ +/* */ +/* Reset the controller (with new eh error code) */ +/* */ +/* NOTE: this routine is called under the io_request_lock spinlock */ +/* */ +/****************************************************************************/ +static int __ips_eh_reset(struct scsi_cmnd *SC) +{ + int ret; + int i; + ips_ha_t *ha; + ips_scb_t *scb; + ips_copp_wait_item_t *item; + + METHOD_TRACE("ips_eh_reset", 1); + +#ifdef NO_IPS_RESET + return (FAILED); +#else + + if (!SC) { + DEBUG(1, "Reset called with NULL scsi command"); + + return (FAILED); + } + + ha = (ips_ha_t *) SC->device->host->hostdata; + + if (!ha) { + DEBUG(1, "Reset called with NULL ha struct"); + + return (FAILED); + } + + if (!ha->active) + return (FAILED); + + /* See if the command is on the copp queue */ + item = ha->copp_waitlist.head; + while ((item) && (item->scsi_cmd != SC)) + item = item->next; + + if (item) { + /* Found it */ + ips_removeq_copp(&ha->copp_waitlist, item); + return (SUCCESS); + } + + /* See if the command is on the wait queue */ + if (ips_removeq_wait(&ha->scb_waitlist, SC)) { + /* command not sent yet */ + return (SUCCESS); + } + + /* An explanation for the casual observer: */ + /* Part of the function of a RAID controller is automatic error */ + /* detection and recovery. As such, the only problem that physically */ + /* resetting an adapter will ever fix is when, for some reason, */ + /* the driver is not successfully communicating with the adapter. */ + /* Therefore, we will attempt to flush this adapter. If that succeeds, */ + /* then there's no real purpose in a physical reset. This will complete */ + /* much faster and avoids any problems that might be caused by a */ + /* physical reset ( such as having to fail all the outstanding I/O's ). */ + + if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */ + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + /* Attempt the flush command */ + ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL); + if (ret == IPS_SUCCESS) { + IPS_PRINTK(KERN_NOTICE, ha->pcidev, + "Reset Request - Flushed Cache\n"); + return (SUCCESS); + } + } + + /* Either we can't communicate with the adapter or it's an IOCTL request */ + /* from a utility. A physical reset is needed at this point. */ + + ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */ + + /* + * command must have already been sent + * reset the controller + */ + IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n"); + ret = (*ha->func.reset) (ha); + + if (!ret) { + struct scsi_cmnd *scsi_cmd; + + IPS_PRINTK(KERN_NOTICE, ha->pcidev, + "Controller reset failed - controller now offline.\n"); + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", + ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = DID_ERROR << 16; + scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Now fail all of the pending commands */ + DEBUG_VAR(1, "(%s%d) Failing pending commands", + ips_name, ha->host_num); + + while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { + scsi_cmd->result = DID_ERROR; + scsi_done(scsi_cmd); + } + + ha->active = false; + return (FAILED); + } + + if (!ips_clear_adapter(ha, IPS_INTR_IORL)) { + struct scsi_cmnd *scsi_cmd; + + IPS_PRINTK(KERN_NOTICE, ha->pcidev, + "Controller reset failed - controller now offline.\n"); + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", + ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = DID_ERROR << 16; + scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Now fail all of the pending commands */ + DEBUG_VAR(1, "(%s%d) Failing pending commands", + ips_name, ha->host_num); + + while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { + scsi_cmd->result = DID_ERROR << 16; + scsi_done(scsi_cmd); + } + + ha->active = false; + return (FAILED); + } + + /* FFDC */ + if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) { + ha->last_ffdc = ktime_get_real_seconds(); + ha->reset_count++; + ips_ffdc_reset(ha, IPS_INTR_IORL); + } + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = DID_RESET << 16; + scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Reset DCDB active command bits */ + for (i = 1; i < ha->nbus; i++) + ha->dcdb_active[i - 1] = 0; + + /* Reset the number of active IOCTLs */ + ha->num_ioctl = 0; + + ips_next(ha, IPS_INTR_IORL); + + return (SUCCESS); +#endif /* NO_IPS_RESET */ + +} + +static int ips_eh_reset(struct scsi_cmnd *SC) +{ + int rc; + + spin_lock_irq(SC->device->host->host_lock); + rc = __ips_eh_reset(SC); + spin_unlock_irq(SC->device->host->host_lock); + + return rc; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_queue */ +/* */ +/* Routine Description: */ +/* */ +/* Send a command to the controller */ +/* */ +/* NOTE: */ +/* Linux obtains io_request_lock before calling this function */ +/* */ +/****************************************************************************/ +static int ips_queue_lck(struct scsi_cmnd *SC) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + ips_ha_t *ha; + ips_passthru_t *pt; + + METHOD_TRACE("ips_queue", 1); + + ha = (ips_ha_t *) SC->device->host->hostdata; + + if (!ha) + goto out_error; + + if (!ha->active) + goto out_error; + + if (ips_is_passthru(SC)) { + if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + + return (0); + } + } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + + return (0); + } + + DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)", + ips_name, + ha->host_num, + SC->cmnd[0], + SC->device->channel, SC->device->id, SC->device->lun); + + /* Check for command to initiator IDs */ + if ((scmd_channel(SC) > 0) + && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) { + SC->result = DID_NO_CONNECT << 16; + done(SC); + + return (0); + } + + if (ips_is_passthru(SC)) { + + ips_copp_wait_item_t *scratch; + + /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ + /* There can never be any system activity ( network or disk ), but check */ + /* anyway just as a good practice. */ + pt = (ips_passthru_t *) scsi_sglist(SC); + if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && + (pt->CoppCP.cmd.reset.adapter_flag == 1)) { + if (ha->scb_activelist.count != 0) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + return (0); + } + ha->ioctl_reset = 1; /* This reset request is from an IOCTL */ + __ips_eh_reset(SC); + SC->result = DID_OK << 16; + scsi_done(SC); + return (0); + } + + /* allocate space for the scribble */ + scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC); + + if (!scratch) { + SC->result = DID_ERROR << 16; + done(SC); + + return (0); + } + + scratch->scsi_cmd = SC; + scratch->next = NULL; + + ips_putq_copp_tail(&ha->copp_waitlist, scratch); + } else { + ips_putq_wait_tail(&ha->scb_waitlist, SC); + } + + ips_next(ha, IPS_INTR_IORL); + + return (0); +out_error: + SC->result = DID_ERROR << 16; + done(SC); + + return (0); +} + +static DEF_SCSI_QCMD(ips_queue) + +/****************************************************************************/ +/* */ +/* Routine Name: ips_biosparam */ +/* */ +/* Routine Description: */ +/* */ +/* Set bios geometry for the controller */ +/* */ +/****************************************************************************/ +static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata; + int heads; + int sectors; + int cylinders; + + METHOD_TRACE("ips_biosparam", 1); + + if (!ha) + /* ?!?! host adater info invalid */ + return (0); + + if (!ha->active) + return (0); + + if (!ips_read_adapter_status(ha, IPS_INTR_ON)) + /* ?!?! Enquiry command failed */ + return (0); + + if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) { + heads = IPS_NORM_HEADS; + sectors = IPS_NORM_SECTORS; + } else { + heads = IPS_COMP_HEADS; + sectors = IPS_COMP_SECTORS; + } + + cylinders = (unsigned long) capacity / (heads * sectors); + + DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d", + heads, sectors, cylinders); + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_slave_configure */ +/* */ +/* Routine Description: */ +/* */ +/* Set queue depths on devices once scan is complete */ +/* */ +/****************************************************************************/ +static int +ips_slave_configure(struct scsi_device * SDptr) +{ + ips_ha_t *ha; + int min; + + ha = IPS_HA(SDptr->host); + if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) { + min = ha->max_cmds / 2; + if (ha->enq->ucLogDriveCount <= 2) + min = ha->max_cmds - 1; + scsi_change_queue_depth(SDptr, min); + } + + SDptr->skip_ms_page_8 = 1; + SDptr->skip_ms_page_3f = 1; + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: do_ipsintr */ +/* */ +/* Routine Description: */ +/* */ +/* Wrapper for the interrupt handler */ +/* */ +/****************************************************************************/ +static irqreturn_t +do_ipsintr(int irq, void *dev_id) +{ + ips_ha_t *ha; + struct Scsi_Host *host; + int irqstatus; + + METHOD_TRACE("do_ipsintr", 2); + + ha = (ips_ha_t *) dev_id; + if (!ha) + return IRQ_NONE; + host = ips_sh[ha->host_num]; + /* interrupt during initialization */ + if (!host) { + (*ha->func.intr) (ha); + return IRQ_HANDLED; + } + + spin_lock(host->host_lock); + + if (!ha->active) { + spin_unlock(host->host_lock); + return IRQ_HANDLED; + } + + irqstatus = (*ha->func.intr) (ha); + + spin_unlock(host->host_lock); + + /* start the next command */ + ips_next(ha, IPS_INTR_ON); + return IRQ_RETVAL(irqstatus); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_intr_copperhead */ +/* */ +/* Routine Description: */ +/* */ +/* Polling interrupt handler */ +/* */ +/* ASSUMES interrupts are disabled */ +/* */ +/****************************************************************************/ +int +ips_intr_copperhead(ips_ha_t * ha) +{ + ips_stat_t *sp; + ips_scb_t *scb; + IPS_STATUS cstatus; + int intrstatus; + + METHOD_TRACE("ips_intr", 2); + + if (!ha) + return 0; + + if (!ha->active) + return 0; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) { + /* + * Unexpected/Shared interrupt + */ + + return 0; + } + + while (true) { + sp = &ha->sp; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) + break; + else + cstatus.value = (*ha->func.statupd) (ha); + + if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { + /* Spurious Interrupt ? */ + continue; + } + + ips_chkstatus(ha, &cstatus); + scb = (ips_scb_t *) sp->scb_addr; + + /* + * use the callback function to finish things up + * NOTE: interrupts are OFF for this + */ + (*scb->callback) (ha, scb); + } /* end while */ + return 1; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_intr_morpheus */ +/* */ +/* Routine Description: */ +/* */ +/* Polling interrupt handler */ +/* */ +/* ASSUMES interrupts are disabled */ +/* */ +/****************************************************************************/ +int +ips_intr_morpheus(ips_ha_t * ha) +{ + ips_stat_t *sp; + ips_scb_t *scb; + IPS_STATUS cstatus; + int intrstatus; + + METHOD_TRACE("ips_intr_morpheus", 2); + + if (!ha) + return 0; + + if (!ha->active) + return 0; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) { + /* + * Unexpected/Shared interrupt + */ + + return 0; + } + + while (true) { + sp = &ha->sp; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) + break; + else + cstatus.value = (*ha->func.statupd) (ha); + + if (cstatus.value == 0xffffffff) + /* No more to process */ + break; + + if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Spurious interrupt; no ccb.\n"); + + continue; + } + + ips_chkstatus(ha, &cstatus); + scb = (ips_scb_t *) sp->scb_addr; + + /* + * use the callback function to finish things up + * NOTE: interrupts are OFF for this + */ + (*scb->callback) (ha, scb); + } /* end while */ + return 1; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_info */ +/* */ +/* Routine Description: */ +/* */ +/* Return info about the driver */ +/* */ +/****************************************************************************/ +static const char * +ips_info(struct Scsi_Host *SH) +{ + static char buffer[256]; + char *bp; + ips_ha_t *ha; + + METHOD_TRACE("ips_info", 1); + + ha = IPS_HA(SH); + + if (!ha) + return (NULL); + + bp = &buffer[0]; + memset(bp, 0, sizeof (buffer)); + + sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ", + IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT); + + if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) { + strcat(bp, " <"); + strcat(bp, ips_adapter_name[ha->ad_type - 1]); + strcat(bp, ">"); + } + + return (bp); +} + +static int +ips_write_info(struct Scsi_Host *host, char *buffer, int length) +{ + int i; + ips_ha_t *ha = NULL; + + /* Find our host structure */ + for (i = 0; i < ips_next_controller; i++) { + if (ips_sh[i]) { + if (ips_sh[i] == host) { + ha = (ips_ha_t *) ips_sh[i]->hostdata; + break; + } + } + } + + if (!ha) + return (-EINVAL); + + return 0; +} + +static int +ips_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + int i; + ips_ha_t *ha = NULL; + + /* Find our host structure */ + for (i = 0; i < ips_next_controller; i++) { + if (ips_sh[i]) { + if (ips_sh[i] == host) { + ha = (ips_ha_t *) ips_sh[i]->hostdata; + break; + } + } + } + + if (!ha) + return (-EINVAL); + + return ips_host_info(ha, m); +} + +/*--------------------------------------------------------------------------*/ +/* Helper Functions */ +/*--------------------------------------------------------------------------*/ + +/****************************************************************************/ +/* */ +/* Routine Name: ips_is_passthru */ +/* */ +/* Routine Description: */ +/* */ +/* Determine if the specified SCSI command is really a passthru command */ +/* */ +/****************************************************************************/ +static int ips_is_passthru(struct scsi_cmnd *SC) +{ + unsigned long flags; + + METHOD_TRACE("ips_is_passthru", 1); + + if (!SC) + return (0); + + if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && + (SC->device->channel == 0) && + (SC->device->id == IPS_ADAPTER_ID) && + (SC->device->lun == 0) && scsi_sglist(SC)) { + struct scatterlist *sg = scsi_sglist(SC); + char *buffer; + + /* local_irq_save() protects the KM_IRQ0 address slot. */ + local_irq_save(flags); + buffer = kmap_local_page(sg_page(sg)) + sg->offset; + if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && + buffer[2] == 'P' && buffer[3] == 'P') { + kunmap_local(buffer); + local_irq_restore(flags); + return 1; + } + kunmap_local(buffer); + local_irq_restore(flags); + } + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_alloc_passthru_buffer */ +/* */ +/* Routine Description: */ +/* allocate a buffer large enough for the ioctl data if the ioctl buffer */ +/* is too small or doesn't exist */ +/****************************************************************************/ +static int +ips_alloc_passthru_buffer(ips_ha_t * ha, int length) +{ + void *bigger_buf; + dma_addr_t dma_busaddr; + + if (ha->ioctl_data && length <= ha->ioctl_len) + return 0; + /* there is no buffer or it's not big enough, allocate a new one */ + bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr, + GFP_KERNEL); + if (bigger_buf) { + /* free the old memory */ + dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len, + ha->ioctl_data, ha->ioctl_busaddr); + /* use the new memory */ + ha->ioctl_data = (char *) bigger_buf; + ha->ioctl_len = length; + ha->ioctl_busaddr = dma_busaddr; + } else { + return -1; + } + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_make_passthru */ +/* */ +/* Routine Description: */ +/* */ +/* Make a passthru command out of the info in the Scsi block */ +/* */ +/****************************************************************************/ +static int +ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr) +{ + ips_passthru_t *pt; + int length = 0; + int i, ret; + struct scatterlist *sg = scsi_sglist(SC); + + METHOD_TRACE("ips_make_passthru", 1); + + scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) + length += sg->length; + + if (length < sizeof (ips_passthru_t)) { + /* wrong size */ + DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", + ips_name, ha->host_num); + return (IPS_FAILURE); + } + if (ips_alloc_passthru_buffer(ha, length)) { + /* allocation failure! If ha->ioctl_data exists, use it to return + some error codes. Return a failed command to the scsi layer. */ + if (ha->ioctl_data) { + pt = (ips_passthru_t *) ha->ioctl_data; + ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t)); + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t)); + } + return IPS_FAILURE; + } + ha->ioctl_datasize = length; + + ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize); + pt = (ips_passthru_t *) ha->ioctl_data; + + /* + * Some notes about the passthru interface used + * + * IF the scsi op_code == 0x0d then we assume + * that the data came along with/goes with the + * packet we received from the sg driver. In this + * case the CmdBSize field of the pt structure is + * used for the size of the buffer. + */ + + switch (pt->CoppCmd) { + case IPS_NUMCTRLS: + memcpy(ha->ioctl_data + sizeof (ips_passthru_t), + &ips_num_controllers, sizeof (int)); + ips_scmd_buf_write(SC, ha->ioctl_data, + sizeof (ips_passthru_t) + sizeof (int)); + SC->result = DID_OK << 16; + + return (IPS_SUCCESS_IMM); + + case IPS_COPPUSRCMD: + case IPS_COPPIOCCMD: + if (SC->cmnd[0] == IPS_IOCTL_COMMAND) { + if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) { + /* wrong size */ + DEBUG_VAR(1, + "(%s%d) Passthru structure wrong size", + ips_name, ha->host_num); + + return (IPS_FAILURE); + } + + if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD && + pt->CoppCP.cmd.flashfw.op_code == + IPS_CMD_RW_BIOSFW) { + ret = ips_flash_copperhead(ha, pt, scb); + ips_scmd_buf_write(SC, ha->ioctl_data, + sizeof (ips_passthru_t)); + return ret; + } + if (ips_usrcmd(ha, pt, scb)) + return (IPS_SUCCESS); + else + return (IPS_FAILURE); + } + + break; + + } /* end switch */ + + return (IPS_FAILURE); +} + +/****************************************************************************/ +/* Routine Name: ips_flash_copperhead */ +/* Routine Description: */ +/* Flash the BIOS/FW on a Copperhead style controller */ +/****************************************************************************/ +static int +ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + int datasize; + + /* Trombone is the only copperhead that can do packet flash, but only + * for firmware. No one said it had to make sense. */ + if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) { + if (ips_usrcmd(ha, pt, scb)) + return IPS_SUCCESS; + else + return IPS_FAILURE; + } + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0; + scb->scsi_cmd->result = DID_OK << 16; + /* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */ + /* avoid allocating a huge buffer per adapter ( which can fail ). */ + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { + pt->BasicStatus = 0; + return ips_flash_bios(ha, pt, scb); + } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) { + if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){ + ha->flash_data = ips_FlashData; + ha->flash_busaddr = ips_flashbusaddr; + ha->flash_len = PAGE_SIZE << 7; + ha->flash_datasize = 0; + } else if (!ha->flash_data) { + datasize = pt->CoppCP.cmd.flashfw.total_packets * + pt->CoppCP.cmd.flashfw.count; + ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev, + datasize, &ha->flash_busaddr, GFP_KERNEL); + if (!ha->flash_data){ + printk(KERN_WARNING "Unable to allocate a flash buffer\n"); + return IPS_FAILURE; + } + ha->flash_datasize = 0; + ha->flash_len = datasize; + } else + return IPS_FAILURE; + } else { + if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize > + ha->flash_len) { + ips_free_flash_copperhead(ha); + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "failed size sanity check\n"); + return IPS_FAILURE; + } + } + if (!ha->flash_data) + return IPS_FAILURE; + pt->BasicStatus = 0; + memcpy(&ha->flash_data[ha->flash_datasize], pt + 1, + pt->CoppCP.cmd.flashfw.count); + ha->flash_datasize += pt->CoppCP.cmd.flashfw.count; + if (pt->CoppCP.cmd.flashfw.packet_num == + pt->CoppCP.cmd.flashfw.total_packets - 1) { + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE) + return ips_flash_bios(ha, pt, scb); + else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) + return ips_flash_firmware(ha, pt, scb); + } + return IPS_SUCCESS_IMM; +} + +/****************************************************************************/ +/* Routine Name: ips_flash_bios */ +/* Routine Description: */ +/* flashes the bios of a copperhead adapter */ +/****************************************************************************/ +static int +ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) { + if ((!ha->func.programbios) || (!ha->func.erasebios) || + (!ha->func.verifybios)) + goto error; + if ((*ha->func.erasebios) (ha)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to erase flash", + ips_name, ha->host_num); + goto error; + } else + if ((*ha->func.programbios) (ha, + ha->flash_data + + IPS_BIOS_HEADER, + ha->flash_datasize - + IPS_BIOS_HEADER, 0)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to flash", + ips_name, ha->host_num); + goto error; + } else + if ((*ha->func.verifybios) (ha, + ha->flash_data + + IPS_BIOS_HEADER, + ha->flash_datasize - + IPS_BIOS_HEADER, 0)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to verify flash", + ips_name, ha->host_num); + goto error; + } + ips_free_flash_copperhead(ha); + return IPS_SUCCESS_IMM; + } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { + if (!ha->func.erasebios) + goto error; + if ((*ha->func.erasebios) (ha)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to erase flash", + ips_name, ha->host_num); + goto error; + } + return IPS_SUCCESS_IMM; + } + error: + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_free_flash_copperhead(ha); + return IPS_FAILURE; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_fill_scb_sg_single */ +/* */ +/* Routine Description: */ +/* Fill in a single scb sg_list element from an address */ +/* return a -1 if a breakup occurred */ +/****************************************************************************/ +static int +ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr, + ips_scb_t * scb, int indx, unsigned int e_len) +{ + + int ret_val = 0; + + if ((scb->data_len + e_len) > ha->max_xfer) { + e_len = ha->max_xfer - scb->data_len; + scb->breakup = indx; + ++scb->sg_break; + ret_val = -1; + } else { + scb->breakup = 0; + scb->sg_break = 0; + } + if (IPS_USE_ENH_SGLIST(ha)) { + scb->sg_list.enh_list[indx].address_lo = + cpu_to_le32(lower_32_bits(busaddr)); + scb->sg_list.enh_list[indx].address_hi = + cpu_to_le32(upper_32_bits(busaddr)); + scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len); + } else { + scb->sg_list.std_list[indx].address = + cpu_to_le32(lower_32_bits(busaddr)); + scb->sg_list.std_list[indx].length = cpu_to_le32(e_len); + } + + ++scb->sg_len; + scb->data_len += e_len; + return ret_val; +} + +/****************************************************************************/ +/* Routine Name: ips_flash_firmware */ +/* Routine Description: */ +/* flashes the firmware of a copperhead adapter */ +/****************************************************************************/ +static int +ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr; + + if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) { + memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND)); + pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD; + pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize); + } else { + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_free_flash_copperhead(ha); + return IPS_FAILURE; + } + /* Save the S/G list pointer so it doesn't get clobbered */ + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + /* copy in the CP */ + memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); + /* FIX stuff that might be wrong */ + scb->sg_list.list = sg_list.list; + scb->scb_busaddr = cmd_busaddr; + scb->bus = scb->scsi_cmd->device->channel; + scb->target_id = scb->scsi_cmd->device->id; + scb->lun = scb->scsi_cmd->device->lun; + scb->sg_len = 0; + scb->data_len = 0; + scb->flags = 0; + scb->op_code = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + + scb->data_len = ha->flash_datasize; + scb->data_busaddr = + dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr); + if (pt->TimeOut) + scb->timeout = pt->TimeOut; + scb->scsi_cmd->result = DID_OK << 16; + return IPS_SUCCESS; +} + +/****************************************************************************/ +/* Routine Name: ips_free_flash_copperhead */ +/* Routine Description: */ +/* release the memory resources used to hold the flash image */ +/****************************************************************************/ +static void +ips_free_flash_copperhead(ips_ha_t * ha) +{ + if (ha->flash_data == ips_FlashData) + test_and_clear_bit(0, &ips_FlashDataInUse); + else if (ha->flash_data) + dma_free_coherent(&ha->pcidev->dev, ha->flash_len, + ha->flash_data, ha->flash_busaddr); + ha->flash_data = NULL; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_usrcmd */ +/* */ +/* Routine Description: */ +/* */ +/* Process a user command and make it ready to send */ +/* */ +/****************************************************************************/ +static int +ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr; + + METHOD_TRACE("ips_usrcmd", 1); + + if ((!scb) || (!pt) || (!ha)) + return (0); + + /* Save the S/G list pointer so it doesn't get clobbered */ + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + /* copy in the CP */ + memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); + memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE)); + + /* FIX stuff that might be wrong */ + scb->sg_list.list = sg_list.list; + scb->scb_busaddr = cmd_busaddr; + scb->bus = scb->scsi_cmd->device->channel; + scb->target_id = scb->scsi_cmd->device->id; + scb->lun = scb->scsi_cmd->device->lun; + scb->sg_len = 0; + scb->data_len = 0; + scb->flags = 0; + scb->op_code = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + + /* we don't support DCDB/READ/WRITE Scatter Gather */ + if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) || + (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) || + (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG)) + return (0); + + if (pt->CmdBSize) { + scb->data_len = pt->CmdBSize; + scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t); + } else { + scb->data_busaddr = 0L; + } + + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) + scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + + (unsigned long) &scb-> + dcdb - + (unsigned long) scb); + + if (pt->CmdBSize) { + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->data_busaddr); + else + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } + + /* set timeouts */ + if (pt->TimeOut) { + scb->timeout = pt->TimeOut; + + if (pt->TimeOut <= 10) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; + else if (pt->TimeOut <= 60) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; + else + scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; + } + + /* assume success */ + scb->scsi_cmd->result = DID_OK << 16; + + /* success */ + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_cleanup_passthru */ +/* */ +/* Routine Description: */ +/* */ +/* Cleanup after a passthru command */ +/* */ +/****************************************************************************/ +static void +ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb) +{ + ips_passthru_t *pt; + + METHOD_TRACE("ips_cleanup_passthru", 1); + + if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) { + DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", + ips_name, ha->host_num); + + return; + } + pt = (ips_passthru_t *) ha->ioctl_data; + + /* Copy data back to the user */ + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */ + memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE)); + + pt->BasicStatus = scb->basic_status; + pt->ExtendedStatus = scb->extended_status; + pt->AdapterType = ha->ad_type; + + if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD && + (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD || + scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW)) + ips_free_flash_copperhead(ha); + + ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_host_info */ +/* */ +/* Routine Description: */ +/* */ +/* The passthru interface for the driver */ +/* */ +/****************************************************************************/ +static int +ips_host_info(ips_ha_t *ha, struct seq_file *m) +{ + METHOD_TRACE("ips_host_info", 1); + + seq_puts(m, "\nIBM ServeRAID General Information:\n\n"); + + if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && + (le16_to_cpu(ha->nvram->adapter_type) != 0)) + seq_printf(m, "\tController Type : %s\n", + ips_adapter_name[ha->ad_type - 1]); + else + seq_puts(m, "\tController Type : Unknown\n"); + + if (ha->io_addr) + seq_printf(m, + "\tIO region : 0x%x (%d bytes)\n", + ha->io_addr, ha->io_len); + + if (ha->mem_addr) { + seq_printf(m, + "\tMemory region : 0x%x (%d bytes)\n", + ha->mem_addr, ha->mem_len); + seq_printf(m, + "\tShared memory address : 0x%lx\n", + (unsigned long)ha->mem_ptr); + } + + seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq); + + /* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */ + /* That keeps everything happy for "text" operations on the proc file. */ + + if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) { + if (ha->nvram->bios_low[3] == 0) { + seq_printf(m, + "\tBIOS Version : %c%c%c%c%c%c%c\n", + ha->nvram->bios_high[0], ha->nvram->bios_high[1], + ha->nvram->bios_high[2], ha->nvram->bios_high[3], + ha->nvram->bios_low[0], ha->nvram->bios_low[1], + ha->nvram->bios_low[2]); + + } else { + seq_printf(m, + "\tBIOS Version : %c%c%c%c%c%c%c%c\n", + ha->nvram->bios_high[0], ha->nvram->bios_high[1], + ha->nvram->bios_high[2], ha->nvram->bios_high[3], + ha->nvram->bios_low[0], ha->nvram->bios_low[1], + ha->nvram->bios_low[2], ha->nvram->bios_low[3]); + } + + } + + if (ha->enq->CodeBlkVersion[7] == 0) { + seq_printf(m, + "\tFirmware Version : %c%c%c%c%c%c%c\n", + ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], + ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], + ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], + ha->enq->CodeBlkVersion[6]); + } else { + seq_printf(m, + "\tFirmware Version : %c%c%c%c%c%c%c%c\n", + ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], + ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], + ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], + ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); + } + + if (ha->enq->BootBlkVersion[7] == 0) { + seq_printf(m, + "\tBoot Block Version : %c%c%c%c%c%c%c\n", + ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], + ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], + ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], + ha->enq->BootBlkVersion[6]); + } else { + seq_printf(m, + "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", + ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], + ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], + ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], + ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); + } + + seq_printf(m, "\tDriver Version : %s%s\n", + IPS_VERSION_HIGH, IPS_VERSION_LOW); + + seq_printf(m, "\tDriver Build : %d\n", + IPS_BUILD_IDENT); + + seq_printf(m, "\tMax Physical Devices : %d\n", + ha->enq->ucMaxPhysicalDevices); + seq_printf(m, "\tMax Active Commands : %d\n", + ha->max_cmds); + seq_printf(m, "\tCurrent Queued Commands : %d\n", + ha->scb_waitlist.count); + seq_printf(m, "\tCurrent Active Commands : %d\n", + ha->scb_activelist.count - ha->num_ioctl); + seq_printf(m, "\tCurrent Queued PT Commands : %d\n", + ha->copp_waitlist.count); + seq_printf(m, "\tCurrent Active PT Commands : %d\n", + ha->num_ioctl); + + seq_putc(m, '\n'); + + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_identify_controller */ +/* */ +/* Routine Description: */ +/* */ +/* Identify this controller */ +/* */ +/****************************************************************************/ +static void +ips_identify_controller(ips_ha_t * ha) +{ + METHOD_TRACE("ips_identify_controller", 1); + + switch (ha->pcidev->device) { + case IPS_DEVICEID_COPPERHEAD: + if (ha->pcidev->revision <= IPS_REVID_SERVERAID) { + ha->ad_type = IPS_ADTYPE_SERVERAID; + } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) { + ha->ad_type = IPS_ADTYPE_SERVERAID2; + } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) { + ha->ad_type = IPS_ADTYPE_NAVAJO; + } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2) + && (ha->slot_num == 0)) { + ha->ad_type = IPS_ADTYPE_KIOWA; + } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) && + (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) { + if (ha->enq->ucMaxPhysicalDevices == 15) + ha->ad_type = IPS_ADTYPE_SERVERAID3L; + else + ha->ad_type = IPS_ADTYPE_SERVERAID3; + } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) && + (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) { + ha->ad_type = IPS_ADTYPE_SERVERAID4H; + } + break; + + case IPS_DEVICEID_MORPHEUS: + switch (ha->pcidev->subsystem_device) { + case IPS_SUBDEVICEID_4L: + ha->ad_type = IPS_ADTYPE_SERVERAID4L; + break; + + case IPS_SUBDEVICEID_4M: + ha->ad_type = IPS_ADTYPE_SERVERAID4M; + break; + + case IPS_SUBDEVICEID_4MX: + ha->ad_type = IPS_ADTYPE_SERVERAID4MX; + break; + + case IPS_SUBDEVICEID_4LX: + ha->ad_type = IPS_ADTYPE_SERVERAID4LX; + break; + + case IPS_SUBDEVICEID_5I2: + ha->ad_type = IPS_ADTYPE_SERVERAID5I2; + break; + + case IPS_SUBDEVICEID_5I1: + ha->ad_type = IPS_ADTYPE_SERVERAID5I1; + break; + } + + break; + + case IPS_DEVICEID_MARCO: + switch (ha->pcidev->subsystem_device) { + case IPS_SUBDEVICEID_6M: + ha->ad_type = IPS_ADTYPE_SERVERAID6M; + break; + case IPS_SUBDEVICEID_6I: + ha->ad_type = IPS_ADTYPE_SERVERAID6I; + break; + case IPS_SUBDEVICEID_7k: + ha->ad_type = IPS_ADTYPE_SERVERAID7k; + break; + case IPS_SUBDEVICEID_7M: + ha->ad_type = IPS_ADTYPE_SERVERAID7M; + break; + } + break; + } +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_get_bios_version */ +/* */ +/* Routine Description: */ +/* */ +/* Get the BIOS revision number */ +/* */ +/****************************************************************************/ +static void +ips_get_bios_version(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t *buffer; + + METHOD_TRACE("ips_get_bios_version", 1); + + major = 0; + minor = 0; + + memcpy(ha->bios_version, " ?", 8); + + if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) { + if (IPS_USE_MEMIO(ha)) { + /* Memory Mapped I/O */ + + /* test 1st byte */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) + return; + + writel(1, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) + return; + + /* Get Major version */ + writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + major = readb(ha->mem_ptr + IPS_REG_FLDP); + + /* Get Minor version */ + writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + minor = readb(ha->mem_ptr + IPS_REG_FLDP); + + /* Get SubMinor version */ + writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + subminor = readb(ha->mem_ptr + IPS_REG_FLDP); + + } else { + /* Programmed I/O */ + + /* test 1st byte */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) + return; + + outl(1, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) + return; + + /* Get Major version */ + outl(0x1FF, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + major = inb(ha->io_addr + IPS_REG_FLDP); + + /* Get Minor version */ + outl(0x1FE, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + minor = inb(ha->io_addr + IPS_REG_FLDP); + + /* Get SubMinor version */ + outl(0x1FD, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + subminor = inb(ha->io_addr + IPS_REG_FLDP); + + } + } else { + /* Morpheus Family - Send Command to the card */ + + buffer = ha->ioctl_data; + + memset(buffer, 0, 0x1000); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_RW_BIOSFW; + + scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW; + scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flashfw.type = 1; + scb->cmd.flashfw.direction = 0; + scb->cmd.flashfw.count = cpu_to_le32(0x800); + scb->cmd.flashfw.total_packets = 1; + scb->cmd.flashfw.packet_num = 0; + scb->data_len = 0x1000; + scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr; + + /* issue the command */ + if (((ret = + ips_send_wait(ha, scb, ips_cmd_timeout, + intr)) == IPS_FAILURE) + || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { + /* Error occurred */ + + return; + } + + if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) { + major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */ + minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */ + subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */ + } else { + return; + } + } + + ha->bios_version[0] = hex_asc_upper_hi(major); + ha->bios_version[1] = '.'; + ha->bios_version[2] = hex_asc_upper_lo(major); + ha->bios_version[3] = hex_asc_upper_lo(subminor); + ha->bios_version[4] = '.'; + ha->bios_version[5] = hex_asc_upper_hi(minor); + ha->bios_version[6] = hex_asc_upper_lo(minor); + ha->bios_version[7] = 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_hainit */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize the controller */ +/* */ +/* NOTE: Assumes to be called from with a lock */ +/* */ +/****************************************************************************/ +static int +ips_hainit(ips_ha_t * ha) +{ + int i; + + METHOD_TRACE("ips_hainit", 1); + + if (!ha) + return (0); + + if (ha->func.statinit) + (*ha->func.statinit) (ha); + + if (ha->func.enableint) + (*ha->func.enableint) (ha); + + /* Send FFDC */ + ha->reset_count = 1; + ha->last_ffdc = ktime_get_real_seconds(); + ips_ffdc_reset(ha, IPS_INTR_IORL); + + if (!ips_read_config(ha, IPS_INTR_IORL)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "unable to read config from controller.\n"); + + return (0); + } + /* end if */ + if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "unable to read controller status.\n"); + + return (0); + } + + /* Identify this controller */ + ips_identify_controller(ha); + + if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "unable to read subsystem parameters.\n"); + + return (0); + } + + /* write nvram user page 5 */ + if (!ips_write_driver_status(ha, IPS_INTR_IORL)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "unable to write driver info to controller.\n"); + + return (0); + } + + /* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */ + if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1)) + ips_clear_adapter(ha, IPS_INTR_IORL); + + /* set limits on SID, LUN, BUS */ + ha->ntargets = IPS_MAX_TARGETS + 1; + ha->nlun = 1; + ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1; + + switch (ha->conf->logical_drive[0].ucStripeSize) { + case 4: + ha->max_xfer = 0x10000; + break; + + case 5: + ha->max_xfer = 0x20000; + break; + + case 6: + ha->max_xfer = 0x40000; + break; + + case 7: + default: + ha->max_xfer = 0x80000; + break; + } + + /* setup max concurrent commands */ + if (le32_to_cpu(ha->subsys->param[4]) & 0x1) { + /* Use the new method */ + ha->max_cmds = ha->enq->ucConcurrentCmdCount; + } else { + /* use the old method */ + switch (ha->conf->logical_drive[0].ucStripeSize) { + case 4: + ha->max_cmds = 32; + break; + + case 5: + ha->max_cmds = 16; + break; + + case 6: + ha->max_cmds = 8; + break; + + case 7: + default: + ha->max_cmds = 4; + break; + } + } + + /* Limit the Active Commands on a Lite Adapter */ + if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) || + (ha->ad_type == IPS_ADTYPE_SERVERAID4L) || + (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) { + if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds)) + ha->max_cmds = MaxLiteCmds; + } + + /* set controller IDs */ + ha->ha_id[0] = IPS_ADAPTER_ID; + for (i = 1; i < ha->nbus; i++) { + ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f; + ha->dcdb_active[i - 1] = 0; + } + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_next */ +/* */ +/* Routine Description: */ +/* */ +/* Take the next command off the queue and send it to the controller */ +/* */ +/****************************************************************************/ +static void +ips_next(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + struct scsi_cmnd *SC; + struct scsi_cmnd *p; + struct scsi_cmnd *q; + ips_copp_wait_item_t *item; + int ret; + struct Scsi_Host *host; + METHOD_TRACE("ips_next", 1); + + if (!ha) + return; + host = ips_sh[ha->host_num]; + /* + * Block access to the queue function so + * this command won't time out + */ + if (intr == IPS_INTR_ON) + spin_lock(host->host_lock); + + if ((ha->subsys->param[3] & 0x300000) + && (ha->scb_activelist.count == 0)) { + time64_t now = ktime_get_real_seconds(); + if (now - ha->last_ffdc > IPS_SECS_8HOURS) { + ha->last_ffdc = now; + ips_ffdc_time(ha); + } + } + + /* + * Send passthru commands + * These have priority over normal I/O + * but shouldn't affect performance too much + * since we limit the number that can be active + * on the card at any one time + */ + while ((ha->num_ioctl < IPS_MAX_IOCTL) && + (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) { + + item = ips_removeq_copp_head(&ha->copp_waitlist); + ha->num_ioctl++; + if (intr == IPS_INTR_ON) + spin_unlock(host->host_lock); + scb->scsi_cmd = item->scsi_cmd; + kfree(item); + + ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); + + if (intr == IPS_INTR_ON) + spin_lock(host->host_lock); + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_OK << 16; + scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + if (ret != IPS_SUCCESS) { + ha->num_ioctl--; + continue; + } + + ret = ips_send_cmd(ha, scb); + + if (ret == IPS_SUCCESS) + ips_putq_scb_head(&ha->scb_activelist, scb); + else + ha->num_ioctl--; + + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + } + + /* + * Send "Normal" I/O commands + */ + + p = ha->scb_waitlist.head; + while ((p) && (scb = ips_getscb(ha))) { + if ((scmd_channel(p) > 0) + && (ha-> + dcdb_active[scmd_channel(p) - + 1] & (1 << scmd_id(p)))) { + ips_freescb(ha, scb); + p = (struct scsi_cmnd *) p->host_scribble; + continue; + } + + q = p; + SC = ips_removeq_wait(&ha->scb_waitlist, q); + + if (intr == IPS_INTR_ON) + spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */ + + SC->result = DID_OK; + SC->host_scribble = NULL; + + scb->target_id = SC->device->id; + scb->lun = SC->device->lun; + scb->bus = SC->device->channel; + scb->scsi_cmd = SC; + scb->breakup = 0; + scb->data_len = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + memset(&scb->cmd, 0, 16); + + /* copy in the CDB */ + memcpy(scb->cdb, SC->cmnd, SC->cmd_len); + + scb->sg_count = scsi_dma_map(SC); + BUG_ON(scb->sg_count < 0); + if (scb->sg_count) { + struct scatterlist *sg; + int i; + + scb->flags |= IPS_SCB_MAP_SG; + + scsi_for_each_sg(SC, sg, scb->sg_count, i) { + if (ips_fill_scb_sg_single + (ha, sg_dma_address(sg), scb, i, + sg_dma_len(sg)) < 0) + break; + } + scb->dcdb.transfer_length = scb->data_len; + } else { + scb->data_busaddr = 0L; + scb->sg_len = 0; + scb->data_len = 0; + scb->dcdb.transfer_length = 0; + } + + scb->dcdb.cmd_attribute = + ips_command_direction[scb->scsi_cmd->cmnd[0]]; + + /* Allow a WRITE BUFFER Command to Have no Data */ + /* This is Used by Tape Flash Utilites */ + if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) && + (scb->data_len == 0)) + scb->dcdb.cmd_attribute = 0; + + if (!(scb->dcdb.cmd_attribute & 0x3)) + scb->dcdb.transfer_length = 0; + + if (scb->data_len >= IPS_MAX_XFER) { + scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; + scb->dcdb.transfer_length = 0; + } + if (intr == IPS_INTR_ON) + spin_lock(host->host_lock); + + ret = ips_send_cmd(ha, scb); + + switch (ret) { + case IPS_SUCCESS: + ips_putq_scb_head(&ha->scb_activelist, scb); + break; + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scsi_done(scb->scsi_cmd); + } + + if (scb->bus) + ha->dcdb_active[scb->bus - 1] &= + ~(1 << scb->target_id); + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) + scsi_done(scb->scsi_cmd); + + if (scb->bus) + ha->dcdb_active[scb->bus - 1] &= + ~(1 << scb->target_id); + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + p = (struct scsi_cmnd *) p->host_scribble; + + } /* end while */ + + if (intr == IPS_INTR_ON) + spin_unlock(host->host_lock); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_putq_scb_head */ +/* */ +/* Routine Description: */ +/* */ +/* Add an item to the head of the queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static void +ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item) +{ + METHOD_TRACE("ips_putq_scb_head", 1); + + if (!item) + return; + + item->q_next = queue->head; + queue->head = item; + + if (!queue->tail) + queue->tail = item; + + queue->count++; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_removeq_scb_head */ +/* */ +/* Routine Description: */ +/* */ +/* Remove the head of the queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static ips_scb_t * +ips_removeq_scb_head(ips_scb_queue_t * queue) +{ + ips_scb_t *item; + + METHOD_TRACE("ips_removeq_scb_head", 1); + + item = queue->head; + + if (!item) { + return (NULL); + } + + queue->head = item->q_next; + item->q_next = NULL; + + if (queue->tail == item) + queue->tail = NULL; + + queue->count--; + + return (item); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_removeq_scb */ +/* */ +/* Routine Description: */ +/* */ +/* Remove an item from a queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static ips_scb_t * +ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item) +{ + ips_scb_t *p; + + METHOD_TRACE("ips_removeq_scb", 1); + + if (!item) + return (NULL); + + if (item == queue->head) { + return (ips_removeq_scb_head(queue)); + } + + p = queue->head; + + while ((p) && (item != p->q_next)) + p = p->q_next; + + if (p) { + /* found a match */ + p->q_next = item->q_next; + + if (!item->q_next) + queue->tail = p; + + item->q_next = NULL; + queue->count--; + + return (item); + } + + return (NULL); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_putq_wait_tail */ +/* */ +/* Routine Description: */ +/* */ +/* Add an item to the tail of the queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item) +{ + METHOD_TRACE("ips_putq_wait_tail", 1); + + if (!item) + return; + + item->host_scribble = NULL; + + if (queue->tail) + queue->tail->host_scribble = (char *) item; + + queue->tail = item; + + if (!queue->head) + queue->head = item; + + queue->count++; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_removeq_wait_head */ +/* */ +/* Routine Description: */ +/* */ +/* Remove the head of the queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue) +{ + struct scsi_cmnd *item; + + METHOD_TRACE("ips_removeq_wait_head", 1); + + item = queue->head; + + if (!item) { + return (NULL); + } + + queue->head = (struct scsi_cmnd *) item->host_scribble; + item->host_scribble = NULL; + + if (queue->tail == item) + queue->tail = NULL; + + queue->count--; + + return (item); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_removeq_wait */ +/* */ +/* Routine Description: */ +/* */ +/* Remove an item from a queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue, + struct scsi_cmnd *item) +{ + struct scsi_cmnd *p; + + METHOD_TRACE("ips_removeq_wait", 1); + + if (!item) + return (NULL); + + if (item == queue->head) { + return (ips_removeq_wait_head(queue)); + } + + p = queue->head; + + while ((p) && (item != (struct scsi_cmnd *) p->host_scribble)) + p = (struct scsi_cmnd *) p->host_scribble; + + if (p) { + /* found a match */ + p->host_scribble = item->host_scribble; + + if (!item->host_scribble) + queue->tail = p; + + item->host_scribble = NULL; + queue->count--; + + return (item); + } + + return (NULL); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_putq_copp_tail */ +/* */ +/* Routine Description: */ +/* */ +/* Add an item to the tail of the queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static void +ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + METHOD_TRACE("ips_putq_copp_tail", 1); + + if (!item) + return; + + item->next = NULL; + + if (queue->tail) + queue->tail->next = item; + + queue->tail = item; + + if (!queue->head) + queue->head = item; + + queue->count++; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_removeq_copp_head */ +/* */ +/* Routine Description: */ +/* */ +/* Remove the head of the queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static ips_copp_wait_item_t * +ips_removeq_copp_head(ips_copp_queue_t * queue) +{ + ips_copp_wait_item_t *item; + + METHOD_TRACE("ips_removeq_copp_head", 1); + + item = queue->head; + + if (!item) { + return (NULL); + } + + queue->head = item->next; + item->next = NULL; + + if (queue->tail == item) + queue->tail = NULL; + + queue->count--; + + return (item); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_removeq_copp */ +/* */ +/* Routine Description: */ +/* */ +/* Remove an item from a queue */ +/* */ +/* ASSUMED to be called from within the HA lock */ +/* */ +/****************************************************************************/ +static ips_copp_wait_item_t * +ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + ips_copp_wait_item_t *p; + + METHOD_TRACE("ips_removeq_copp", 1); + + if (!item) + return (NULL); + + if (item == queue->head) { + return (ips_removeq_copp_head(queue)); + } + + p = queue->head; + + while ((p) && (item != p->next)) + p = p->next; + + if (p) { + /* found a match */ + p->next = item->next; + + if (!item->next) + queue->tail = p; + + item->next = NULL; + queue->count--; + + return (item); + } + + return (NULL); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ipsintr_blocking */ +/* */ +/* Routine Description: */ +/* */ +/* Finalize an interrupt for internal commands */ +/* */ +/****************************************************************************/ +static void +ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ipsintr_blocking", 2); + + ips_freescb(ha, scb); + if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) { + ha->waitflag = false; + + return; + } +} + +/****************************************************************************/ +/* */ +/* Routine Name: ipsintr_done */ +/* */ +/* Routine Description: */ +/* */ +/* Finalize an interrupt for non-internal commands */ +/* */ +/****************************************************************************/ +static void +ipsintr_done(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ipsintr_done", 2); + + if (!scb) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Spurious interrupt; scb NULL.\n"); + + return; + } + + if (scb->scsi_cmd == NULL) { + /* unexpected interrupt */ + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Spurious interrupt; scsi_cmd not set.\n"); + + return; + } + + ips_done(ha, scb); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_done */ +/* */ +/* Routine Description: */ +/* */ +/* Do housekeeping on completed commands */ +/* ASSUMED to be called form within the request lock */ +/****************************************************************************/ +static void +ips_done(ips_ha_t * ha, ips_scb_t * scb) +{ + int ret; + + METHOD_TRACE("ips_done", 1); + + if (!scb) + return; + + if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) { + ips_cleanup_passthru(ha, scb); + ha->num_ioctl--; + } else { + /* + * Check to see if this command had too much + * data and had to be broke up. If so, queue + * the rest of the data and continue. + */ + if ((scb->breakup) || (scb->sg_break)) { + struct scatterlist *sg; + int i, sg_dma_index, ips_sg_index = 0; + + /* we had a data breakup */ + scb->data_len = 0; + + sg = scsi_sglist(scb->scsi_cmd); + + /* Spin forward to last dma chunk */ + sg_dma_index = scb->breakup; + for (i = 0; i < scb->breakup; i++) + sg = sg_next(sg); + + /* Take care of possible partial on last chunk */ + ips_fill_scb_sg_single(ha, + sg_dma_address(sg), + scb, ips_sg_index++, + sg_dma_len(sg)); + + for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd); + sg_dma_index++, sg = sg_next(sg)) { + if (ips_fill_scb_sg_single + (ha, + sg_dma_address(sg), + scb, ips_sg_index++, + sg_dma_len(sg)) < 0) + break; + } + + scb->dcdb.transfer_length = scb->data_len; + scb->dcdb.cmd_attribute |= + ips_command_direction[scb->scsi_cmd->cmnd[0]]; + + if (!(scb->dcdb.cmd_attribute & 0x3)) + scb->dcdb.transfer_length = 0; + + if (scb->data_len >= IPS_MAX_XFER) { + scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; + scb->dcdb.transfer_length = 0; + } + + ret = ips_send_cmd(ha, scb); + + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + return; + } + } /* end if passthru */ + + if (scb->bus) { + ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id); + } + + scsi_done(scb->scsi_cmd); + + ips_freescb(ha, scb); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_map_status */ +/* */ +/* Routine Description: */ +/* */ +/* Map Controller Error codes to Linux Error Codes */ +/* */ +/****************************************************************************/ +static int +ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp) +{ + int errcode; + int device_error; + uint32_t transfer_len; + IPS_DCDB_TABLE_TAPE *tapeDCDB; + IPS_SCSI_INQ_DATA inquiryData; + + METHOD_TRACE("ips_map_status", 1); + + if (scb->bus) { + DEBUG_VAR(2, + "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x", + ips_name, ha->host_num, + scb->scsi_cmd->device->channel, + scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun, + scb->basic_status, scb->extended_status, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0); + } + + /* default driver error */ + errcode = DID_ERROR; + device_error = 0; + + switch (scb->basic_status & IPS_GSC_STATUS_MASK) { + case IPS_CMD_TIMEOUT: + errcode = DID_TIME_OUT; + break; + + case IPS_INVAL_OPCO: + case IPS_INVAL_CMD_BLK: + case IPS_INVAL_PARM_BLK: + case IPS_LD_ERROR: + case IPS_CMD_CMPLT_WERROR: + break; + + case IPS_PHYS_DRV_ERROR: + switch (scb->extended_status) { + case IPS_ERR_SEL_TO: + if (scb->bus) + errcode = DID_NO_CONNECT; + + break; + + case IPS_ERR_OU_RUN: + if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) || + (scb->cmd.dcdb.op_code == + IPS_CMD_EXTENDED_DCDB_SG)) { + tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; + transfer_len = tapeDCDB->transfer_length; + } else { + transfer_len = + (uint32_t) scb->dcdb.transfer_length; + } + + if ((scb->bus) && (transfer_len < scb->data_len)) { + /* Underrun - set default to no error */ + errcode = DID_OK; + + /* Restrict access to physical DASD */ + if (scb->scsi_cmd->cmnd[0] == INQUIRY) { + ips_scmd_buf_read(scb->scsi_cmd, + &inquiryData, sizeof (inquiryData)); + if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) { + errcode = DID_TIME_OUT; + break; + } + } + } else + errcode = DID_ERROR; + + break; + + case IPS_ERR_RECOVERY: + /* don't fail recovered errors */ + if (scb->bus) + errcode = DID_OK; + + break; + + case IPS_ERR_HOST_RESET: + case IPS_ERR_DEV_RESET: + errcode = DID_RESET; + break; + + case IPS_ERR_CKCOND: + if (scb->bus) { + if ((scb->cmd.dcdb.op_code == + IPS_CMD_EXTENDED_DCDB) + || (scb->cmd.dcdb.op_code == + IPS_CMD_EXTENDED_DCDB_SG)) { + tapeDCDB = + (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; + memcpy_and_pad(scb->scsi_cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + tapeDCDB->sense_info, + sizeof(tapeDCDB->sense_info), 0); + } else { + memcpy_and_pad(scb->scsi_cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + scb->dcdb.sense_info, + sizeof(scb->dcdb.sense_info), 0); + } + device_error = 2; /* check condition */ + } + + errcode = DID_OK; + + break; + + default: + errcode = DID_ERROR; + break; + + } /* end switch */ + } /* end switch */ + + scb->scsi_cmd->result = device_error | (errcode << 16); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_send_wait */ +/* */ +/* Routine Description: */ +/* */ +/* Send a command to the controller and wait for it to return */ +/* */ +/* The FFDC Time Stamp use this function for the callback, but doesn't */ +/* actually need to wait. */ +/****************************************************************************/ +static int +ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr) +{ + int ret; + + METHOD_TRACE("ips_send_wait", 1); + + if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */ + ha->waitflag = true; + ha->cmd_in_progress = scb->cdb[0]; + } + scb->callback = ipsintr_blocking; + ret = ips_send_cmd(ha, scb); + + if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM)) + return (ret); + + if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */ + ret = ips_wait(ha, timeout, intr); + + return (ret); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_scmd_buf_write */ +/* */ +/* Routine Description: */ +/* Write data to struct scsi_cmnd request_buffer at proper offsets */ +/****************************************************************************/ +static void +ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) +{ + unsigned long flags; + + local_irq_save(flags); + scsi_sg_copy_from_buffer(scmd, data, count); + local_irq_restore(flags); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_scmd_buf_read */ +/* */ +/* Routine Description: */ +/* Copy data from a struct scsi_cmnd to a new, linear buffer */ +/****************************************************************************/ +static void +ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) +{ + unsigned long flags; + + local_irq_save(flags); + scsi_sg_copy_to_buffer(scmd, data, count); + local_irq_restore(flags); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_send_cmd */ +/* */ +/* Routine Description: */ +/* */ +/* Map SCSI commands to ServeRAID commands for logical drives */ +/* */ +/****************************************************************************/ +static int +ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) +{ + int ret; + char *sp; + int device_error; + IPS_DCDB_TABLE_TAPE *tapeDCDB; + int TimeOut; + + METHOD_TRACE("ips_send_cmd", 1); + + ret = IPS_SUCCESS; + + if (!scb->scsi_cmd) { + /* internal command */ + + if (scb->bus > 0) { + /* Controller commands can't be issued */ + /* to real devices -- fail them */ + if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) + ha->waitflag = false; + + return (1); + } + } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) { + /* command to logical bus -- interpret */ + ret = IPS_SUCCESS_IMM; + + switch (scb->scsi_cmd->cmnd[0]) { + case ALLOW_MEDIUM_REMOVAL: + case REZERO_UNIT: + case ERASE: + case WRITE_FILEMARKS: + case SPACE: + scb->scsi_cmd->result = DID_ERROR << 16; + break; + + case START_STOP: + scb->scsi_cmd->result = DID_OK << 16; + break; + + case TEST_UNIT_READY: + case INQUIRY: + if (scb->target_id == IPS_ADAPTER_ID) { + /* + * Either we have a TUR + * or we have a SCSI inquiry + */ + if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY) + scb->scsi_cmd->result = DID_OK << 16; + + if (scb->scsi_cmd->cmnd[0] == INQUIRY) { + IPS_SCSI_INQ_DATA inquiry; + + memset(&inquiry, 0, + sizeof (IPS_SCSI_INQ_DATA)); + + inquiry.DeviceType = + IPS_SCSI_INQ_TYPE_PROCESSOR; + inquiry.DeviceTypeQualifier = + IPS_SCSI_INQ_LU_CONNECTED; + inquiry.Version = IPS_SCSI_INQ_REV2; + inquiry.ResponseDataFormat = + IPS_SCSI_INQ_RD_REV2; + inquiry.AdditionalLength = 31; + inquiry.Flags[0] = + IPS_SCSI_INQ_Address16; + inquiry.Flags[1] = + IPS_SCSI_INQ_WBus16 | + IPS_SCSI_INQ_Sync; + memcpy(inquiry.VendorId, "IBM ", + 8); + memcpy(inquiry.ProductId, + "SERVERAID ", 16); + memcpy(inquiry.ProductRevisionLevel, + "1.00", 4); + + ips_scmd_buf_write(scb->scsi_cmd, + &inquiry, + sizeof (inquiry)); + + scb->scsi_cmd->result = DID_OK << 16; + } + } else { + scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; + scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.logical_info.reserved = 0; + scb->cmd.logical_info.reserved2 = 0; + scb->data_len = sizeof (IPS_LD_INFO); + scb->data_busaddr = ha->logical_drive_info_dma_addr; + scb->flags = 0; + scb->cmd.logical_info.buffer_addr = scb->data_busaddr; + ret = IPS_SUCCESS; + } + + break; + + case REQUEST_SENSE: + ips_reqsen(ha, scb); + scb->scsi_cmd->result = DID_OK << 16; + break; + + case READ_6: + case WRITE_6: + if (!scb->sg_len) { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE; + scb->cmd.basic_io.enhanced_sg = 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } else { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_6) ? IPS_CMD_READ_SG : + IPS_CMD_WRITE_SG; + scb->cmd.basic_io.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->sg_busaddr); + } + + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.log_drv = scb->target_id; + scb->cmd.basic_io.sg_count = scb->sg_len; + + if (scb->cmd.basic_io.lba) + le32_add_cpu(&scb->cmd.basic_io.lba, + le16_to_cpu(scb->cmd.basic_io. + sector_count)); + else + scb->cmd.basic_io.lba = + (((scb->scsi_cmd-> + cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd-> + cmnd[2] << 8) | + (scb->scsi_cmd->cmnd[3])); + + scb->cmd.basic_io.sector_count = + cpu_to_le16(scb->data_len / IPS_BLKSIZE); + + if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0) + scb->cmd.basic_io.sector_count = + cpu_to_le16(256); + + ret = IPS_SUCCESS; + break; + + case READ_10: + case WRITE_10: + if (!scb->sg_len) { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE; + scb->cmd.basic_io.enhanced_sg = 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } else { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_10) ? IPS_CMD_READ_SG : + IPS_CMD_WRITE_SG; + scb->cmd.basic_io.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->sg_busaddr); + } + + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.log_drv = scb->target_id; + scb->cmd.basic_io.sg_count = scb->sg_len; + + if (scb->cmd.basic_io.lba) + le32_add_cpu(&scb->cmd.basic_io.lba, + le16_to_cpu(scb->cmd.basic_io. + sector_count)); + else + scb->cmd.basic_io.lba = + ((scb->scsi_cmd->cmnd[2] << 24) | (scb-> + scsi_cmd-> + cmnd[3] + << 16) | + (scb->scsi_cmd->cmnd[4] << 8) | scb-> + scsi_cmd->cmnd[5]); + + scb->cmd.basic_io.sector_count = + cpu_to_le16(scb->data_len / IPS_BLKSIZE); + + if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) { + /* + * This is a null condition + * we don't have to do anything + * so just return + */ + scb->scsi_cmd->result = DID_OK << 16; + } else + ret = IPS_SUCCESS; + + break; + + case RESERVE: + case RELEASE: + scb->scsi_cmd->result = DID_OK << 16; + break; + + case MODE_SENSE: + scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.enhanced_sg = 0; + scb->data_len = sizeof (*ha->enq); + scb->cmd.basic_io.sg_addr = ha->enq_busaddr; + ret = IPS_SUCCESS; + break; + + case READ_CAPACITY: + scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; + scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.logical_info.reserved = 0; + scb->cmd.logical_info.reserved2 = 0; + scb->cmd.logical_info.reserved3 = 0; + scb->data_len = sizeof (IPS_LD_INFO); + scb->data_busaddr = ha->logical_drive_info_dma_addr; + scb->flags = 0; + scb->cmd.logical_info.buffer_addr = scb->data_busaddr; + ret = IPS_SUCCESS; + break; + + case SEND_DIAGNOSTIC: + case REASSIGN_BLOCKS: + case FORMAT_UNIT: + case SEEK_10: + case VERIFY: + case READ_DEFECT_DATA: + case READ_BUFFER: + case WRITE_BUFFER: + scb->scsi_cmd->result = DID_OK << 16; + break; + + default: + /* Set the Return Info to appear like the Command was */ + /* attempted, a Check Condition occurred, and Sense */ + /* Data indicating an Invalid CDB OpCode is returned. */ + sp = (char *) scb->scsi_cmd->sense_buffer; + + sp[0] = 0x70; /* Error Code */ + sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */ + sp[7] = 0x0A; /* Additional Sense Length */ + sp[12] = 0x20; /* ASC = Invalid OpCode */ + sp[13] = 0x00; /* ASCQ */ + + device_error = 2; /* Indicate Check Condition */ + scb->scsi_cmd->result = device_error | (DID_OK << 16); + break; + } /* end switch */ + } + /* end if */ + if (ret == IPS_SUCCESS_IMM) + return (ret); + + /* setup DCDB */ + if (scb->bus > 0) { + + /* If we already know the Device is Not there, no need to attempt a Command */ + /* This also protects an NT FailOver Controller from getting CDB's sent to it */ + if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) { + scb->scsi_cmd->result = DID_NO_CONNECT << 16; + return (IPS_SUCCESS_IMM); + } + + ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id); + scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + + (unsigned long) &scb-> + dcdb - + (unsigned long) scb); + scb->cmd.dcdb.reserved = 0; + scb->cmd.dcdb.reserved2 = 0; + scb->cmd.dcdb.reserved3 = 0; + scb->cmd.dcdb.segment_4G = 0; + scb->cmd.dcdb.enhanced_sg = 0; + + TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout; + + if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ + if (!scb->sg_len) { + scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB; + } else { + scb->cmd.dcdb.op_code = + IPS_CMD_EXTENDED_DCDB_SG; + scb->cmd.dcdb.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + } + + tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; /* Use Same Data Area as Old DCDB Struct */ + tapeDCDB->device_address = + ((scb->bus - 1) << 4) | scb->target_id; + tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED; + tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */ + + if (TimeOut) { + if (TimeOut < (10 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ + else if (TimeOut < (60 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ + else if (TimeOut < (1200 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ + } + + tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len; + tapeDCDB->reserved_for_LUN = 0; + tapeDCDB->transfer_length = scb->data_len; + if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG) + tapeDCDB->buffer_pointer = + cpu_to_le32(scb->sg_busaddr); + else + tapeDCDB->buffer_pointer = + cpu_to_le32(scb->data_busaddr); + tapeDCDB->sg_count = scb->sg_len; + tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info); + tapeDCDB->scsi_status = 0; + tapeDCDB->reserved = 0; + memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd, + scb->scsi_cmd->cmd_len); + } else { + if (!scb->sg_len) { + scb->cmd.dcdb.op_code = IPS_CMD_DCDB; + } else { + scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG; + scb->cmd.dcdb.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + } + + scb->dcdb.device_address = + ((scb->bus - 1) << 4) | scb->target_id; + scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED; + + if (TimeOut) { + if (TimeOut < (10 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ + else if (TimeOut < (60 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ + else if (TimeOut < (1200 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ + } + + scb->dcdb.transfer_length = scb->data_len; + if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K) + scb->dcdb.transfer_length = 0; + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG) + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->sg_busaddr); + else + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->data_busaddr); + scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len; + scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info); + scb->dcdb.sg_count = scb->sg_len; + scb->dcdb.reserved = 0; + memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd, + scb->scsi_cmd->cmd_len); + scb->dcdb.scsi_status = 0; + scb->dcdb.reserved2[0] = 0; + scb->dcdb.reserved2[1] = 0; + scb->dcdb.reserved2[2] = 0; + } + } + + return ((*ha->func.issue) (ha, scb)); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_chk_status */ +/* */ +/* Routine Description: */ +/* */ +/* Check the status of commands to logical drives */ +/* Assumed to be called with the HA lock */ +/****************************************************************************/ +static void +ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus) +{ + ips_scb_t *scb; + ips_stat_t *sp; + uint8_t basic_status; + uint8_t ext_status; + int errcode; + IPS_SCSI_INQ_DATA inquiryData; + + METHOD_TRACE("ips_chkstatus", 1); + + scb = &ha->scbs[pstatus->fields.command_id]; + scb->basic_status = basic_status = + pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK; + scb->extended_status = ext_status = pstatus->fields.extended_status; + + sp = &ha->sp; + sp->residue_len = 0; + sp->scb_addr = (void *) scb; + + /* Remove the item from the active queue */ + ips_removeq_scb(&ha->scb_activelist, scb); + + if (!scb->scsi_cmd) + /* internal commands are handled in do_ipsintr */ + return; + + DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + + if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) + /* passthru - just returns the raw result */ + return; + + errcode = DID_OK; + + if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) || + ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) { + + if (scb->bus == 0) { + if ((basic_status & IPS_GSC_STATUS_MASK) == + IPS_CMD_RECOVERED_ERROR) { + DEBUG_VAR(1, + "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", + ips_name, ha->host_num, + scb->cmd.basic_io.op_code, + basic_status, ext_status); + } + + switch (scb->scsi_cmd->cmnd[0]) { + case ALLOW_MEDIUM_REMOVAL: + case REZERO_UNIT: + case ERASE: + case WRITE_FILEMARKS: + case SPACE: + errcode = DID_ERROR; + break; + + case START_STOP: + break; + + case TEST_UNIT_READY: + if (!ips_online(ha, scb)) { + errcode = DID_TIME_OUT; + } + break; + + case INQUIRY: + if (ips_online(ha, scb)) { + ips_inquiry(ha, scb); + } else { + errcode = DID_TIME_OUT; + } + break; + + case REQUEST_SENSE: + ips_reqsen(ha, scb); + break; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case RESERVE: + case RELEASE: + break; + + case MODE_SENSE: + if (!ips_online(ha, scb) + || !ips_msense(ha, scb)) { + errcode = DID_ERROR; + } + break; + + case READ_CAPACITY: + if (ips_online(ha, scb)) + ips_rdcap(ha, scb); + else { + errcode = DID_TIME_OUT; + } + break; + + case SEND_DIAGNOSTIC: + case REASSIGN_BLOCKS: + break; + + case FORMAT_UNIT: + errcode = DID_ERROR; + break; + + case SEEK_10: + case VERIFY: + case READ_DEFECT_DATA: + case READ_BUFFER: + case WRITE_BUFFER: + break; + + default: + errcode = DID_ERROR; + } /* end switch */ + + scb->scsi_cmd->result = errcode << 16; + } else { /* bus == 0 */ + /* restrict access to physical drives */ + if (scb->scsi_cmd->cmnd[0] == INQUIRY) { + ips_scmd_buf_read(scb->scsi_cmd, + &inquiryData, sizeof (inquiryData)); + if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) + scb->scsi_cmd->result = DID_TIME_OUT << 16; + } + } /* else */ + } else { /* recovered error / success */ + if (scb->bus == 0) { + DEBUG_VAR(1, + "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", + ips_name, ha->host_num, + scb->cmd.basic_io.op_code, basic_status, + ext_status); + } + + ips_map_status(ha, scb, sp); + } /* else */ +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_online */ +/* */ +/* Routine Description: */ +/* */ +/* Determine if a logical drive is online */ +/* */ +/****************************************************************************/ +static int +ips_online(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ips_online", 1); + + if (scb->target_id >= IPS_MAX_LD) + return (0); + + if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) { + memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO)); + return (0); + } + + if (ha->logical_drive_info->drive_info[scb->target_id].state != + IPS_LD_OFFLINE + && ha->logical_drive_info->drive_info[scb->target_id].state != + IPS_LD_FREE + && ha->logical_drive_info->drive_info[scb->target_id].state != + IPS_LD_CRS + && ha->logical_drive_info->drive_info[scb->target_id].state != + IPS_LD_SYS) + return (1); + else + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_inquiry */ +/* */ +/* Routine Description: */ +/* */ +/* Simulate an inquiry command to a logical drive */ +/* */ +/****************************************************************************/ +static int +ips_inquiry(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_INQ_DATA inquiry; + + METHOD_TRACE("ips_inquiry", 1); + + memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA)); + + inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD; + inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; + inquiry.Version = IPS_SCSI_INQ_REV2; + inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; + inquiry.AdditionalLength = 31; + inquiry.Flags[0] = IPS_SCSI_INQ_Address16; + inquiry.Flags[1] = + IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue; + memcpy(inquiry.VendorId, "IBM ", 8); + memcpy(inquiry.ProductId, "SERVERAID ", 16); + memcpy(inquiry.ProductRevisionLevel, "1.00", 4); + + ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry)); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_rdcap */ +/* */ +/* Routine Description: */ +/* */ +/* Simulate a read capacity command to a logical drive */ +/* */ +/****************************************************************************/ +static int +ips_rdcap(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_CAPACITY cap; + + METHOD_TRACE("ips_rdcap", 1); + + if (scsi_bufflen(scb->scsi_cmd) < 8) + return (0); + + cap.lba = + cpu_to_be32(le32_to_cpu + (ha->logical_drive_info-> + drive_info[scb->target_id].sector_count) - 1); + cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE); + + ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap)); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_msense */ +/* */ +/* Routine Description: */ +/* */ +/* Simulate a mode sense command to a logical drive */ +/* */ +/****************************************************************************/ +static int +ips_msense(ips_ha_t * ha, ips_scb_t * scb) +{ + uint16_t heads; + uint16_t sectors; + uint32_t cylinders; + IPS_SCSI_MODE_PAGE_DATA mdata; + + METHOD_TRACE("ips_msense", 1); + + if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 && + (ha->enq->ucMiscFlag & 0x8) == 0) { + heads = IPS_NORM_HEADS; + sectors = IPS_NORM_SECTORS; + } else { + heads = IPS_COMP_HEADS; + sectors = IPS_COMP_SECTORS; + } + + cylinders = + (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) - + 1) / (heads * sectors); + + memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA)); + + mdata.hdr.BlockDescLength = 8; + + switch (scb->scsi_cmd->cmnd[2] & 0x3f) { + case 0x03: /* page 3 */ + mdata.pdata.pg3.PageCode = 3; + mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3); + mdata.hdr.DataLength = + 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength; + mdata.pdata.pg3.TracksPerZone = 0; + mdata.pdata.pg3.AltSectorsPerZone = 0; + mdata.pdata.pg3.AltTracksPerZone = 0; + mdata.pdata.pg3.AltTracksPerVolume = 0; + mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors); + mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE); + mdata.pdata.pg3.Interleave = cpu_to_be16(1); + mdata.pdata.pg3.TrackSkew = 0; + mdata.pdata.pg3.CylinderSkew = 0; + mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector; + break; + + case 0x4: + mdata.pdata.pg4.PageCode = 4; + mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4); + mdata.hdr.DataLength = + 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength; + mdata.pdata.pg4.CylindersHigh = + cpu_to_be16((cylinders >> 8) & 0xFFFF); + mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF); + mdata.pdata.pg4.Heads = heads; + mdata.pdata.pg4.WritePrecompHigh = 0; + mdata.pdata.pg4.WritePrecompLow = 0; + mdata.pdata.pg4.ReducedWriteCurrentHigh = 0; + mdata.pdata.pg4.ReducedWriteCurrentLow = 0; + mdata.pdata.pg4.StepRate = cpu_to_be16(1); + mdata.pdata.pg4.LandingZoneHigh = 0; + mdata.pdata.pg4.LandingZoneLow = 0; + mdata.pdata.pg4.flags = 0; + mdata.pdata.pg4.RotationalOffset = 0; + mdata.pdata.pg4.MediumRotationRate = 0; + break; + case 0x8: + mdata.pdata.pg8.PageCode = 8; + mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8); + mdata.hdr.DataLength = + 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength; + /* everything else is left set to 0 */ + break; + + default: + return (0); + } /* end switch */ + + ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata)); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_reqsen */ +/* */ +/* Routine Description: */ +/* */ +/* Simulate a request sense command to a logical drive */ +/* */ +/****************************************************************************/ +static int +ips_reqsen(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_REQSEN reqsen; + + METHOD_TRACE("ips_reqsen", 1); + + memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN)); + + reqsen.ResponseCode = + IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR; + reqsen.AdditionalLength = 10; + reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE; + reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE; + + ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen)); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_free */ +/* */ +/* Routine Description: */ +/* */ +/* Free any allocated space for this controller */ +/* */ +/****************************************************************************/ +static void +ips_free(ips_ha_t * ha) +{ + + METHOD_TRACE("ips_free", 1); + + if (ha) { + if (ha->enq) { + dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ), + ha->enq, ha->enq_busaddr); + ha->enq = NULL; + } + + kfree(ha->conf); + ha->conf = NULL; + + if (ha->adapt) { + dma_free_coherent(&ha->pcidev->dev, + sizeof (IPS_ADAPTER) + + sizeof (IPS_IO_CMD), ha->adapt, + ha->adapt->hw_status_start); + ha->adapt = NULL; + } + + if (ha->logical_drive_info) { + dma_free_coherent(&ha->pcidev->dev, + sizeof (IPS_LD_INFO), + ha->logical_drive_info, + ha->logical_drive_info_dma_addr); + ha->logical_drive_info = NULL; + } + + kfree(ha->nvram); + ha->nvram = NULL; + + kfree(ha->subsys); + ha->subsys = NULL; + + if (ha->ioctl_data) { + dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len, + ha->ioctl_data, ha->ioctl_busaddr); + ha->ioctl_data = NULL; + ha->ioctl_datasize = 0; + ha->ioctl_len = 0; + } + ips_deallocatescbs(ha, ha->max_cmds); + + /* free memory mapped (if applicable) */ + if (ha->mem_ptr) { + iounmap(ha->ioremap_ptr); + ha->ioremap_ptr = NULL; + ha->mem_ptr = NULL; + } + + ha->mem_addr = 0; + + } +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_deallocatescbs */ +/* */ +/* Routine Description: */ +/* */ +/* Free the command blocks */ +/* */ +/****************************************************************************/ +static int +ips_deallocatescbs(ips_ha_t * ha, int cmds) +{ + if (ha->scbs) { + dma_free_coherent(&ha->pcidev->dev, + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds, + ha->scbs->sg_list.list, + ha->scbs->sg_busaddr); + dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds, + ha->scbs, ha->scbs->scb_busaddr); + ha->scbs = NULL; + } /* end if */ + return 1; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_allocatescbs */ +/* */ +/* Routine Description: */ +/* */ +/* Allocate the command blocks */ +/* */ +/****************************************************************************/ +static int +ips_allocatescbs(ips_ha_t * ha) +{ + ips_scb_t *scb_p; + IPS_SG_LIST ips_sg; + int i; + dma_addr_t command_dma, sg_dma; + + METHOD_TRACE("ips_allocatescbs", 1); + + /* Allocate memory for the SCBs */ + ha->scbs = dma_alloc_coherent(&ha->pcidev->dev, + ha->max_cmds * sizeof (ips_scb_t), + &command_dma, GFP_KERNEL); + if (ha->scbs == NULL) + return 0; + ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev, + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds, + &sg_dma, GFP_KERNEL); + if (ips_sg.list == NULL) { + dma_free_coherent(&ha->pcidev->dev, + ha->max_cmds * sizeof (ips_scb_t), ha->scbs, + command_dma); + return 0; + } + + memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t)); + + for (i = 0; i < ha->max_cmds; i++) { + scb_p = &ha->scbs[i]; + scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i; + /* set up S/G list */ + if (IPS_USE_ENH_SGLIST(ha)) { + scb_p->sg_list.enh_list = + ips_sg.enh_list + i * IPS_MAX_SG; + scb_p->sg_busaddr = + sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; + } else { + scb_p->sg_list.std_list = + ips_sg.std_list + i * IPS_MAX_SG; + scb_p->sg_busaddr = + sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; + } + + /* add to the free list */ + if (i < ha->max_cmds - 1) { + scb_p->q_next = ha->scb_freelist; + ha->scb_freelist = scb_p; + } + } + + /* success */ + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_init_scb */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize a CCB to default values */ +/* */ +/****************************************************************************/ +static void +ips_init_scb(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr, sg_busaddr; + METHOD_TRACE("ips_init_scb", 1); + + if (scb == NULL) + return; + + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + sg_busaddr = scb->sg_busaddr; + /* zero fill */ + memset(scb, 0, sizeof (ips_scb_t)); + memset(ha->dummy, 0, sizeof (IPS_IO_CMD)); + + /* Initialize dummy command bucket */ + ha->dummy->op_code = 0xFF; + ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start + + sizeof (IPS_ADAPTER)); + ha->dummy->command_id = IPS_MAX_CMDS; + + /* set bus address of scb */ + scb->scb_busaddr = cmd_busaddr; + scb->sg_busaddr = sg_busaddr; + scb->sg_list.list = sg_list.list; + + /* Neptune Fix */ + scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE); + scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start + + sizeof (IPS_ADAPTER)); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_get_scb */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize a CCB to default values */ +/* */ +/* ASSUMED to be called from within a lock */ +/* */ +/****************************************************************************/ +static ips_scb_t * +ips_getscb(ips_ha_t * ha) +{ + ips_scb_t *scb; + + METHOD_TRACE("ips_getscb", 1); + + if ((scb = ha->scb_freelist) == NULL) { + + return (NULL); + } + + ha->scb_freelist = scb->q_next; + scb->flags = 0; + scb->q_next = NULL; + + ips_init_scb(ha, scb); + + return (scb); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_free_scb */ +/* */ +/* Routine Description: */ +/* */ +/* Return an unused CCB back to the free list */ +/* */ +/* ASSUMED to be called from within a lock */ +/* */ +/****************************************************************************/ +static void +ips_freescb(ips_ha_t * ha, ips_scb_t * scb) +{ + + METHOD_TRACE("ips_freescb", 1); + if (scb->flags & IPS_SCB_MAP_SG) + scsi_dma_unmap(scb->scsi_cmd); + else if (scb->flags & IPS_SCB_MAP_SINGLE) + dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr, + scb->data_len, IPS_DMA_DIR(scb)); + + /* check to make sure this is not our "special" scb */ + if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { + scb->q_next = ha->scb_freelist; + ha->scb_freelist = scb; + } +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_isinit_copperhead */ +/* */ +/* Routine Description: */ +/* */ +/* Is controller initialized ? */ +/* */ +/****************************************************************************/ +static int +ips_isinit_copperhead(ips_ha_t * ha) +{ + uint8_t scpr; + uint8_t isr; + + METHOD_TRACE("ips_isinit_copperhead", 1); + + isr = inb(ha->io_addr + IPS_REG_HISR); + scpr = inb(ha->io_addr + IPS_REG_SCPR); + + if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) + return (0); + else + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_isinit_copperhead_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Is controller initialized ? */ +/* */ +/****************************************************************************/ +static int +ips_isinit_copperhead_memio(ips_ha_t * ha) +{ + uint8_t isr = 0; + uint8_t scpr; + + METHOD_TRACE("ips_is_init_copperhead_memio", 1); + + isr = readb(ha->mem_ptr + IPS_REG_HISR); + scpr = readb(ha->mem_ptr + IPS_REG_SCPR); + + if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) + return (0); + else + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_isinit_morpheus */ +/* */ +/* Routine Description: */ +/* */ +/* Is controller initialized ? */ +/* */ +/****************************************************************************/ +static int +ips_isinit_morpheus(ips_ha_t * ha) +{ + uint32_t post; + uint32_t bits; + + METHOD_TRACE("ips_is_init_morpheus", 1); + + if (ips_isintr_morpheus(ha)) + ips_flush_and_reset(ha); + + post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (post == 0) + return (0); + else if (bits & 0x3) + return (0); + else + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_flush_and_reset */ +/* */ +/* Routine Description: */ +/* */ +/* Perform cleanup ( FLUSH and RESET ) when the adapter is in an unknown */ +/* state ( was trying to INIT and an interrupt was already pending ) ... */ +/* */ +/****************************************************************************/ +static void +ips_flush_and_reset(ips_ha_t *ha) +{ + ips_scb_t *scb; + int ret; + int time; + int done; + dma_addr_t command_dma; + + /* Create a usuable SCB */ + scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), + &command_dma, GFP_KERNEL); + if (scb) { + memset(scb, 0, sizeof(ips_scb_t)); + ips_init_scb(ha, scb); + scb->scb_busaddr = command_dma; + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_MAX_CMDS; /* Use an ID that would otherwise not exist */ + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + ret = ips_send_cmd(ha, scb); /* Send the Flush Command */ + + if (ret == IPS_SUCCESS) { + time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */ + done = 0; + + while ((time > 0) && (!done)) { + done = ips_poll_for_flush_complete(ha); + /* This may look evil, but it's only done during extremely rare start-up conditions ! */ + udelay(1000); + time--; + } + } + } + + /* Now RESET and INIT the adapter */ + (*ha->func.reset) (ha); + + dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma); + return; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_poll_for_flush_complete */ +/* */ +/* Routine Description: */ +/* */ +/* Poll for the Flush Command issued by ips_flush_and_reset() to complete */ +/* All other responses are just taken off the queue and ignored */ +/* */ +/****************************************************************************/ +static int +ips_poll_for_flush_complete(ips_ha_t * ha) +{ + IPS_STATUS cstatus; + + while (true) { + cstatus.value = (*ha->func.statupd) (ha); + + if (cstatus.value == 0xffffffff) /* If No Interrupt to process */ + break; + + /* Success is when we see the Flush Command ID */ + if (cstatus.fields.command_id == IPS_MAX_CMDS) + return 1; + } + + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_enable_int_copperhead */ +/* */ +/* Routine Description: */ +/* Turn on interrupts */ +/* */ +/****************************************************************************/ +static void +ips_enable_int_copperhead(ips_ha_t * ha) +{ + METHOD_TRACE("ips_enable_int_copperhead", 1); + + outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI); + inb(ha->io_addr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/ +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_enable_int_copperhead_memio */ +/* */ +/* Routine Description: */ +/* Turn on interrupts */ +/* */ +/****************************************************************************/ +static void +ips_enable_int_copperhead_memio(ips_ha_t * ha) +{ + METHOD_TRACE("ips_enable_int_copperhead_memio", 1); + + writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + readb(ha->mem_ptr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/ +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_enable_int_morpheus */ +/* */ +/* Routine Description: */ +/* Turn on interrupts */ +/* */ +/****************************************************************************/ +static void +ips_enable_int_morpheus(ips_ha_t * ha) +{ + uint32_t Oimr; + + METHOD_TRACE("ips_enable_int_morpheus", 1); + + Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr &= ~0x08; + writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + readl(ha->mem_ptr + IPS_REG_I960_OIMR); /*Ensure PCI Posting Completes*/ +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_init_copperhead */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize a copperhead controller */ +/* */ +/****************************************************************************/ +static int +ips_init_copperhead(ips_ha_t * ha) +{ + uint8_t Isr; + uint8_t Cbsp; + uint8_t PostByte[IPS_MAX_POST_BYTES]; + int i, j; + + METHOD_TRACE("ips_init_copperhead", 1); + + for (i = 0; i < IPS_MAX_POST_BYTES; i++) { + for (j = 0; j < 45; j++) { + Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 45) + /* error occurred */ + return (0); + + PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR); + outb(Isr, ha->io_addr + IPS_REG_HISR); + } + + if (PostByte[0] < IPS_GOOD_POST_STATUS) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "reset controller fails (post status %x %x).\n", + PostByte[0], PostByte[1]); + + return (0); + } + + for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { + for (j = 0; j < 240; j++) { + Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 240) + /* error occurred */ + return (0); + + inb(ha->io_addr + IPS_REG_ISPR); + outb(Isr, ha->io_addr + IPS_REG_HISR); + } + + for (i = 0; i < 240; i++) { + Cbsp = inb(ha->io_addr + IPS_REG_CBSP); + + if ((Cbsp & IPS_BIT_OP) == 0) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) + /* reset failed */ + return (0); + + /* setup CCCR */ + outl(0x1010, ha->io_addr + IPS_REG_CCCR); + + /* Enable busmastering */ + outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); + + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + /* fix for anaconda64 */ + outl(0, ha->io_addr + IPS_REG_NDAE); + + /* Enable interrupts */ + outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_init_copperhead_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize a copperhead controller with memory mapped I/O */ +/* */ +/****************************************************************************/ +static int +ips_init_copperhead_memio(ips_ha_t * ha) +{ + uint8_t Isr = 0; + uint8_t Cbsp; + uint8_t PostByte[IPS_MAX_POST_BYTES]; + int i, j; + + METHOD_TRACE("ips_init_copperhead_memio", 1); + + for (i = 0; i < IPS_MAX_POST_BYTES; i++) { + for (j = 0; j < 45; j++) { + Isr = readb(ha->mem_ptr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 45) + /* error occurred */ + return (0); + + PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + if (PostByte[0] < IPS_GOOD_POST_STATUS) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "reset controller fails (post status %x %x).\n", + PostByte[0], PostByte[1]); + + return (0); + } + + for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { + for (j = 0; j < 240; j++) { + Isr = readb(ha->mem_ptr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 240) + /* error occurred */ + return (0); + + readb(ha->mem_ptr + IPS_REG_ISPR); + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + for (i = 0; i < 240; i++) { + Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP); + + if ((Cbsp & IPS_BIT_OP) == 0) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) + /* error occurred */ + return (0); + + /* setup CCCR */ + writel(0x1010, ha->mem_ptr + IPS_REG_CCCR); + + /* Enable busmastering */ + writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR); + + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + /* fix for anaconda64 */ + writel(0, ha->mem_ptr + IPS_REG_NDAE); + + /* Enable interrupts */ + writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + + /* if we get here then everything went OK */ + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_init_morpheus */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize a morpheus controller */ +/* */ +/****************************************************************************/ +static int +ips_init_morpheus(ips_ha_t * ha) +{ + uint32_t Post; + uint32_t Config; + uint32_t Isr; + uint32_t Oimr; + int i; + + METHOD_TRACE("ips_init_morpheus", 1); + + /* Wait up to 45 secs for Post */ + for (i = 0; i < 45; i++) { + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Isr & IPS_BIT_I960_MSG0I) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 45) { + /* error occurred */ + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "timeout waiting for post.\n"); + + return (0); + } + + Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + + if (Post == 0x4F00) { /* If Flashing the Battery PIC */ + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Flashing Battery PIC, Please wait ...\n"); + + /* Clear the interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG0I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */ + Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + if (Post != 0x4F00) + break; + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 120) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "timeout waiting for Battery PIC Flash\n"); + return (0); + } + + } + + /* Clear the interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG0I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Post < (IPS_GOOD_POST_STATUS << 8)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "reset controller fails (post status %x).\n", Post); + + return (0); + } + + /* Wait up to 240 secs for config bytes */ + for (i = 0; i < 240; i++) { + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Isr & IPS_BIT_I960_MSG1I) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) { + /* error occurred */ + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "timeout waiting for config.\n"); + + return (0); + } + + Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1); + + /* Clear interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG1I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + /* Turn on the interrupts */ + Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr &= ~0x8; + writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + + /* if we get here then everything went OK */ + + /* Since we did a RESET, an EraseStripeLock may be needed */ + if (Post == 0xEF10) { + if ((Config == 0x000F) || (Config == 0x0009)) + ha->requires_esl = 1; + } + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_reset_copperhead */ +/* */ +/* Routine Description: */ +/* */ +/* Reset the controller */ +/* */ +/****************************************************************************/ +static int +ips_reset_copperhead(ips_ha_t * ha) +{ + int reset_counter; + + METHOD_TRACE("ips_reset_copperhead", 1); + + DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d", + ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq); + + reset_counter = 0; + + while (reset_counter < 2) { + reset_counter++; + + outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + + outb(0, ha->io_addr + IPS_REG_SCPR); + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { + + return (0); + } + } + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_reset_copperhead_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Reset the controller */ +/* */ +/****************************************************************************/ +static int +ips_reset_copperhead_memio(ips_ha_t * ha) +{ + int reset_counter; + + METHOD_TRACE("ips_reset_copperhead_memio", 1); + + DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d", + ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq); + + reset_counter = 0; + + while (reset_counter < 2) { + reset_counter++; + + writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + + writeb(0, ha->mem_ptr + IPS_REG_SCPR); + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { + + return (0); + } + } + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_reset_morpheus */ +/* */ +/* Routine Description: */ +/* */ +/* Reset the controller */ +/* */ +/****************************************************************************/ +static int +ips_reset_morpheus(ips_ha_t * ha) +{ + int reset_counter; + uint8_t junk; + + METHOD_TRACE("ips_reset_morpheus", 1); + + DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d", + ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq); + + reset_counter = 0; + + while (reset_counter < 2) { + reset_counter++; + + writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); + + /* Delay for 5 Seconds */ + MDELAY(5 * IPS_ONE_SEC); + + /* Do a PCI config read to wait for adapter */ + pci_read_config_byte(ha->pcidev, 4, &junk); + + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { + + return (0); + } + } + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_statinit */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize the status queues on the controller */ +/* */ +/****************************************************************************/ +static void +ips_statinit(ips_ha_t * ha) +{ + uint32_t phys_status_start; + + METHOD_TRACE("ips_statinit", 1); + + ha->adapt->p_status_start = ha->adapt->status; + ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; + ha->adapt->p_status_tail = ha->adapt->status; + + phys_status_start = ha->adapt->hw_status_start; + outl(phys_status_start, ha->io_addr + IPS_REG_SQSR); + outl(phys_status_start + IPS_STATUS_Q_SIZE, + ha->io_addr + IPS_REG_SQER); + outl(phys_status_start + IPS_STATUS_SIZE, + ha->io_addr + IPS_REG_SQHR); + outl(phys_status_start, ha->io_addr + IPS_REG_SQTR); + + ha->adapt->hw_status_tail = phys_status_start; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_statinit_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Initialize the status queues on the controller */ +/* */ +/****************************************************************************/ +static void +ips_statinit_memio(ips_ha_t * ha) +{ + uint32_t phys_status_start; + + METHOD_TRACE("ips_statinit_memio", 1); + + ha->adapt->p_status_start = ha->adapt->status; + ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; + ha->adapt->p_status_tail = ha->adapt->status; + + phys_status_start = ha->adapt->hw_status_start; + writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR); + writel(phys_status_start + IPS_STATUS_Q_SIZE, + ha->mem_ptr + IPS_REG_SQER); + writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR); + writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR); + + ha->adapt->hw_status_tail = phys_status_start; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_statupd_copperhead */ +/* */ +/* Routine Description: */ +/* */ +/* Remove an element from the status queue */ +/* */ +/****************************************************************************/ +static uint32_t +ips_statupd_copperhead(ips_ha_t * ha) +{ + METHOD_TRACE("ips_statupd_copperhead", 1); + + if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { + ha->adapt->p_status_tail++; + ha->adapt->hw_status_tail += sizeof (IPS_STATUS); + } else { + ha->adapt->p_status_tail = ha->adapt->p_status_start; + ha->adapt->hw_status_tail = ha->adapt->hw_status_start; + } + + outl(ha->adapt->hw_status_tail, + ha->io_addr + IPS_REG_SQTR); + + return (ha->adapt->p_status_tail->value); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_statupd_copperhead_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Remove an element from the status queue */ +/* */ +/****************************************************************************/ +static uint32_t +ips_statupd_copperhead_memio(ips_ha_t * ha) +{ + METHOD_TRACE("ips_statupd_copperhead_memio", 1); + + if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { + ha->adapt->p_status_tail++; + ha->adapt->hw_status_tail += sizeof (IPS_STATUS); + } else { + ha->adapt->p_status_tail = ha->adapt->p_status_start; + ha->adapt->hw_status_tail = ha->adapt->hw_status_start; + } + + writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR); + + return (ha->adapt->p_status_tail->value); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_statupd_morpheus */ +/* */ +/* Routine Description: */ +/* */ +/* Remove an element from the status queue */ +/* */ +/****************************************************************************/ +static uint32_t +ips_statupd_morpheus(ips_ha_t * ha) +{ + uint32_t val; + + METHOD_TRACE("ips_statupd_morpheus", 1); + + val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ); + + return (val); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_issue_copperhead */ +/* */ +/* Routine Description: */ +/* */ +/* Send a command down to the controller */ +/* */ +/****************************************************************************/ +static int +ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb) +{ + uint32_t TimeOut; + uint32_t val; + + METHOD_TRACE("ips_issue_copperhead", 1); + + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + TimeOut = 0; + + while ((val = + le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) { + udelay(1000); + + if (++TimeOut >= IPS_SEM_TIMEOUT) { + if (!(val & IPS_BIT_START_STOP)) + break; + + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "ips_issue val [0x%x].\n", val); + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "ips_issue semaphore chk timeout.\n"); + + return (IPS_FAILURE); + } /* end if */ + } /* end while */ + + outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR); + outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR); + + return (IPS_SUCCESS); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_issue_copperhead_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Send a command down to the controller */ +/* */ +/****************************************************************************/ +static int +ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb) +{ + uint32_t TimeOut; + uint32_t val; + + METHOD_TRACE("ips_issue_copperhead_memio", 1); + + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + TimeOut = 0; + + while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) { + udelay(1000); + + if (++TimeOut >= IPS_SEM_TIMEOUT) { + if (!(val & IPS_BIT_START_STOP)) + break; + + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "ips_issue val [0x%x].\n", val); + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "ips_issue semaphore chk timeout.\n"); + + return (IPS_FAILURE); + } /* end if */ + } /* end while */ + + writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR); + writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR); + + return (IPS_SUCCESS); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_issue_i2o */ +/* */ +/* Routine Description: */ +/* */ +/* Send a command down to the controller */ +/* */ +/****************************************************************************/ +static int +ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb) +{ + + METHOD_TRACE("ips_issue_i2o", 1); + + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ); + + return (IPS_SUCCESS); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_issue_i2o_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Send a command down to the controller */ +/* */ +/****************************************************************************/ +static int +ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb) +{ + + METHOD_TRACE("ips_issue_i2o_memio", 1); + + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ); + + return (IPS_SUCCESS); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_isintr_copperhead */ +/* */ +/* Routine Description: */ +/* */ +/* Test to see if an interrupt is for us */ +/* */ +/****************************************************************************/ +static int +ips_isintr_copperhead(ips_ha_t * ha) +{ + uint8_t Isr; + + METHOD_TRACE("ips_isintr_copperhead", 2); + + Isr = inb(ha->io_addr + IPS_REG_HISR); + + if (Isr == 0xFF) + /* ?!?! Nothing really there */ + return (0); + + if (Isr & IPS_BIT_SCE) + return (1); + else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { + /* status queue overflow or GHI */ + /* just clear the interrupt */ + outb(Isr, ha->io_addr + IPS_REG_HISR); + } + + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_isintr_copperhead_memio */ +/* */ +/* Routine Description: */ +/* */ +/* Test to see if an interrupt is for us */ +/* */ +/****************************************************************************/ +static int +ips_isintr_copperhead_memio(ips_ha_t * ha) +{ + uint8_t Isr; + + METHOD_TRACE("ips_isintr_memio", 2); + + Isr = readb(ha->mem_ptr + IPS_REG_HISR); + + if (Isr == 0xFF) + /* ?!?! Nothing really there */ + return (0); + + if (Isr & IPS_BIT_SCE) + return (1); + else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { + /* status queue overflow or GHI */ + /* just clear the interrupt */ + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_isintr_morpheus */ +/* */ +/* Routine Description: */ +/* */ +/* Test to see if an interrupt is for us */ +/* */ +/****************************************************************************/ +static int +ips_isintr_morpheus(ips_ha_t * ha) +{ + uint32_t Isr; + + METHOD_TRACE("ips_isintr_morpheus", 2); + + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Isr & IPS_BIT_I2O_OPQI) + return (1); + else + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_wait */ +/* */ +/* Routine Description: */ +/* */ +/* Wait for a command to complete */ +/* */ +/****************************************************************************/ +static int +ips_wait(ips_ha_t * ha, int time, int intr) +{ + int ret; + int done; + + METHOD_TRACE("ips_wait", 1); + + ret = IPS_FAILURE; + done = false; + + time *= IPS_ONE_SEC; /* convert seconds */ + + while ((time > 0) && (!done)) { + if (intr == IPS_INTR_ON) { + if (!ha->waitflag) { + ret = IPS_SUCCESS; + done = true; + break; + } + } else if (intr == IPS_INTR_IORL) { + if (!ha->waitflag) { + /* + * controller generated an interrupt to + * acknowledge completion of the command + * and ips_intr() has serviced the interrupt. + */ + ret = IPS_SUCCESS; + done = true; + break; + } + + /* + * NOTE: we already have the io_request_lock so + * even if we get an interrupt it won't get serviced + * until after we finish. + */ + + (*ha->func.intr) (ha); + } + + /* This looks like a very evil loop, but it only does this during start-up */ + udelay(1000); + time--; + } + + return (ret); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_write_driver_status */ +/* */ +/* Routine Description: */ +/* */ +/* Write OS/Driver version to Page 5 of the nvram on the controller */ +/* */ +/****************************************************************************/ +static int +ips_write_driver_status(ips_ha_t * ha, int intr) +{ + METHOD_TRACE("ips_write_driver_status", 1); + + if (!ips_readwrite_page5(ha, false, intr)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "unable to read NVRAM page 5.\n"); + + return (0); + } + + /* check to make sure the page has a valid */ + /* signature */ + if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) { + DEBUG_VAR(1, + "(%s%d) NVRAM page 5 has an invalid signature: %X.", + ips_name, ha->host_num, ha->nvram->signature); + ha->nvram->signature = IPS_NVRAM_P5_SIG; + } + + DEBUG_VAR(2, + "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.", + ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type), + ha->nvram->adapter_slot, ha->nvram->bios_high[0], + ha->nvram->bios_high[1], ha->nvram->bios_high[2], + ha->nvram->bios_high[3], ha->nvram->bios_low[0], + ha->nvram->bios_low[1], ha->nvram->bios_low[2], + ha->nvram->bios_low[3]); + + ips_get_bios_version(ha, intr); + + /* change values (as needed) */ + ha->nvram->operating_system = IPS_OS_LINUX; + ha->nvram->adapter_type = ha->ad_type; + memcpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4); + memcpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4); + memcpy((char *) ha->nvram->bios_high, ha->bios_version, 4); + memcpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4); + + ha->nvram->versioning = 0; /* Indicate the Driver Does Not Support Versioning */ + + /* now update the page */ + if (!ips_readwrite_page5(ha, true, intr)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "unable to write NVRAM page 5.\n"); + + return (0); + } + + /* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */ + ha->slot_num = ha->nvram->adapter_slot; + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_read_adapter_status */ +/* */ +/* Routine Description: */ +/* */ +/* Do an Inquiry command to the adapter */ +/* */ +/****************************************************************************/ +static int +ips_read_adapter_status(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_read_adapter_status", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_ENQUIRY; + + scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.sg_count = 0; + scb->cmd.basic_io.lba = 0; + scb->cmd.basic_io.sector_count = 0; + scb->cmd.basic_io.log_drv = 0; + scb->data_len = sizeof (*ha->enq); + scb->cmd.basic_io.sg_addr = ha->enq_busaddr; + + /* send command */ + if (((ret = + ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) + || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_read_subsystem_parameters */ +/* */ +/* Routine Description: */ +/* */ +/* Read subsystem parameters from the adapter */ +/* */ +/****************************************************************************/ +static int +ips_read_subsystem_parameters(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_read_subsystem_parameters", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_GET_SUBSYS; + + scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.sg_count = 0; + scb->cmd.basic_io.lba = 0; + scb->cmd.basic_io.sector_count = 0; + scb->cmd.basic_io.log_drv = 0; + scb->data_len = sizeof (*ha->subsys); + scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr; + + /* send command */ + if (((ret = + ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) + || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys)); + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_read_config */ +/* */ +/* Routine Description: */ +/* */ +/* Read the configuration on the adapter */ +/* */ +/****************************************************************************/ +static int +ips_read_config(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int i; + int ret; + + METHOD_TRACE("ips_read_config", 1); + + /* set defaults for initiator IDs */ + for (i = 0; i < 4; i++) + ha->conf->init_id[i] = 7; + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_READ_CONF; + + scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->data_len = sizeof (*ha->conf); + scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr; + + /* send command */ + if (((ret = + ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) + || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { + + memset(ha->conf, 0, sizeof (IPS_CONF)); + + /* reset initiator IDs */ + for (i = 0; i < 4; i++) + ha->conf->init_id[i] = 7; + + /* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */ + if ((scb->basic_status & IPS_GSC_STATUS_MASK) == + IPS_CMD_CMPLT_WERROR) + return (1); + + return (0); + } + + memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf)); + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_readwrite_page5 */ +/* */ +/* Routine Description: */ +/* */ +/* Read nvram page 5 from the adapter */ +/* */ +/****************************************************************************/ +static int +ips_readwrite_page5(ips_ha_t * ha, int write, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_readwrite_page5", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE; + + scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE; + scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.nvram.page = 5; + scb->cmd.nvram.write = write; + scb->cmd.nvram.reserved = 0; + scb->cmd.nvram.reserved2 = 0; + scb->data_len = sizeof (*ha->nvram); + scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr; + if (write) + memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram)); + + /* issue the command */ + if (((ret = + ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) + || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { + + memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5)); + + return (0); + } + if (!write) + memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram)); + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_clear_adapter */ +/* */ +/* Routine Description: */ +/* */ +/* Clear the stripe lock tables */ +/* */ +/****************************************************************************/ +static int +ips_clear_adapter(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_clear_adapter", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_reset_timeout; + scb->cdb[0] = IPS_CMD_CONFIG_SYNC; + + scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC; + scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.config_sync.channel = 0; + scb->cmd.config_sync.source_target = IPS_POCL; + scb->cmd.config_sync.reserved = 0; + scb->cmd.config_sync.reserved2 = 0; + scb->cmd.config_sync.reserved3 = 0; + + /* issue command */ + if (((ret = + ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE) + || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + /* send unlock stripe command */ + ips_init_scb(ha, scb); + + scb->cdb[0] = IPS_CMD_ERROR_TABLE; + scb->timeout = ips_reset_timeout; + + scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE; + scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.unlock_stripe.log_drv = 0; + scb->cmd.unlock_stripe.control = IPS_CSL; + scb->cmd.unlock_stripe.reserved = 0; + scb->cmd.unlock_stripe.reserved2 = 0; + scb->cmd.unlock_stripe.reserved3 = 0; + + /* issue command */ + if (((ret = + ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) + || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + return (1); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_ffdc_reset */ +/* */ +/* Routine Description: */ +/* */ +/* FFDC: write reset info */ +/* */ +/****************************************************************************/ +static void +ips_ffdc_reset(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + + METHOD_TRACE("ips_ffdc_reset", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FFDC; + scb->cmd.ffdc.op_code = IPS_CMD_FFDC; + scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.ffdc.reset_count = ha->reset_count; + scb->cmd.ffdc.reset_type = 0x80; + + /* convert time to what the card wants */ + ips_fix_ffdc_time(ha, scb, ha->last_ffdc); + + /* issue command */ + ips_send_wait(ha, scb, ips_cmd_timeout, intr); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_ffdc_time */ +/* */ +/* Routine Description: */ +/* */ +/* FFDC: write time info */ +/* */ +/****************************************************************************/ +static void +ips_ffdc_time(ips_ha_t * ha) +{ + ips_scb_t *scb; + + METHOD_TRACE("ips_ffdc_time", 1); + + DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FFDC; + scb->cmd.ffdc.op_code = IPS_CMD_FFDC; + scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.ffdc.reset_count = 0; + scb->cmd.ffdc.reset_type = 0; + + /* convert time to what the card wants */ + ips_fix_ffdc_time(ha, scb, ha->last_ffdc); + + /* issue command */ + ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_fix_ffdc_time */ +/* */ +/* Routine Description: */ +/* Adjust time_t to what the card wants */ +/* */ +/****************************************************************************/ +static void +ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time) +{ + struct tm tm; + + METHOD_TRACE("ips_fix_ffdc_time", 1); + + time64_to_tm(current_time, 0, &tm); + + scb->cmd.ffdc.hour = tm.tm_hour; + scb->cmd.ffdc.minute = tm.tm_min; + scb->cmd.ffdc.second = tm.tm_sec; + scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100; + scb->cmd.ffdc.yearL = tm.tm_year % 100; + scb->cmd.ffdc.month = tm.tm_mon + 1; + scb->cmd.ffdc.day = tm.tm_mday; +} + +/**************************************************************************** + * BIOS Flash Routines * + ****************************************************************************/ + +/****************************************************************************/ +/* */ +/* Routine Name: ips_erase_bios */ +/* */ +/* Routine Description: */ +/* Erase the BIOS on the adapter */ +/* */ +/****************************************************************************/ +static int +ips_erase_bios(ips_ha_t * ha) +{ + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_erase_bios", 1); + + status = 0; + + /* Clear the status register */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0x50, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Setup */ + outb(0x20, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Confirm */ + outb(0xD0, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Status */ + outb(0x70, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + timeout = 80000; /* 80 seconds */ + + while (timeout > 0) { + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + /* check for timeout */ + if (timeout <= 0) { + /* timeout */ + + /* try to suspend the erase */ + outb(0xB0, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait for 10 seconds */ + timeout = 10000; + while (timeout > 0) { + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0xC0) + break; + + MDELAY(1); + timeout--; + } + + return (1); + } + + /* check for valid VPP */ + if (status & 0x08) + /* VPP failure */ + return (1); + + /* check for successful flash */ + if (status & 0x30) + /* sequence error */ + return (1); + + /* Otherwise, we were successful */ + /* clear status */ + outb(0x50, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* enable reads */ + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_erase_bios_memio */ +/* */ +/* Routine Description: */ +/* Erase the BIOS on the adapter */ +/* */ +/****************************************************************************/ +static int +ips_erase_bios_memio(ips_ha_t * ha) +{ + int timeout; + uint8_t status; + + METHOD_TRACE("ips_erase_bios_memio", 1); + + status = 0; + + /* Clear the status register */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Setup */ + writeb(0x20, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Confirm */ + writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Status */ + writeb(0x70, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + timeout = 80000; /* 80 seconds */ + + while (timeout > 0) { + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + /* check for timeout */ + if (timeout <= 0) { + /* timeout */ + + /* try to suspend the erase */ + writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait for 10 seconds */ + timeout = 10000; + while (timeout > 0) { + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0xC0) + break; + + MDELAY(1); + timeout--; + } + + return (1); + } + + /* check for valid VPP */ + if (status & 0x08) + /* VPP failure */ + return (1); + + /* check for successful flash */ + if (status & 0x30) + /* sequence error */ + return (1); + + /* Otherwise, we were successful */ + /* clear status */ + writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* enable reads */ + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_program_bios */ +/* */ +/* Routine Description: */ +/* Program the BIOS on the adapter */ +/* */ +/****************************************************************************/ +static int +ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + int i; + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_program_bios", 1); + + status = 0; + + for (i = 0; i < buffersize; i++) { + /* write a byte */ + outl(i + offset, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0x40, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(buffer[i], ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait up to one second */ + timeout = 1000; + while (timeout > 0) { + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + if (timeout == 0) { + /* timeout error */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + + /* check the status */ + if (status & 0x18) { + /* programming error */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + } /* end for */ + + /* Enable reading */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_program_bios_memio */ +/* */ +/* Routine Description: */ +/* Program the BIOS on the adapter */ +/* */ +/****************************************************************************/ +static int +ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + int i; + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_program_bios_memio", 1); + + status = 0; + + for (i = 0; i < buffersize; i++) { + /* write a byte */ + writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0x40, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait up to one second */ + timeout = 1000; + while (timeout > 0) { + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + if (timeout == 0) { + /* timeout error */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + + /* check the status */ + if (status & 0x18) { + /* programming error */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + } /* end for */ + + /* Enable reading */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_verify_bios */ +/* */ +/* Routine Description: */ +/* Verify the BIOS on the adapter */ +/* */ +/****************************************************************************/ +static int +ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + uint8_t checksum; + int i; + + METHOD_TRACE("ips_verify_bios", 1); + + /* test 1st byte */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) + return (1); + + outl(1, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) + return (1); + + checksum = 0xff; + for (i = 2; i < buffersize; i++) { + + outl(i + offset, ha->io_addr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP); + } + + if (checksum != 0) + /* failure */ + return (1); + else + /* success */ + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_verify_bios_memio */ +/* */ +/* Routine Description: */ +/* Verify the BIOS on the adapter */ +/* */ +/****************************************************************************/ +static int +ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + uint8_t checksum; + int i; + + METHOD_TRACE("ips_verify_bios_memio", 1); + + /* test 1st byte */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) + return (1); + + writel(1, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) + return (1); + + checksum = 0xff; + for (i = 2; i < buffersize; i++) { + + writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); + if (ha->pcidev->revision == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + checksum = + (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP); + } + + if (checksum != 0) + /* failure */ + return (1); + else + /* success */ + return (0); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_abort_init */ +/* */ +/* Routine Description: */ +/* cleanup routine for a failed adapter initialization */ +/****************************************************************************/ +static int +ips_abort_init(ips_ha_t * ha, int index) +{ + ha->active = 0; + ips_free(ha); + ips_ha[index] = NULL; + ips_sh[index] = NULL; + return -1; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_shift_controllers */ +/* */ +/* Routine Description: */ +/* helper function for ordering adapters */ +/****************************************************************************/ +static void +ips_shift_controllers(int lowindex, int highindex) +{ + ips_ha_t *ha_sav = ips_ha[highindex]; + struct Scsi_Host *sh_sav = ips_sh[highindex]; + int i; + + for (i = highindex; i > lowindex; i--) { + ips_ha[i] = ips_ha[i - 1]; + ips_sh[i] = ips_sh[i - 1]; + ips_ha[i]->host_num = i; + } + ha_sav->host_num = lowindex; + ips_ha[lowindex] = ha_sav; + ips_sh[lowindex] = sh_sav; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_order_controllers */ +/* */ +/* Routine Description: */ +/* place controllers is the "proper" boot order */ +/****************************************************************************/ +static void +ips_order_controllers(void) +{ + int i, j, tmp, position = 0; + IPS_NVRAM_P5 *nvram; + if (!ips_ha[0]) + return; + nvram = ips_ha[0]->nvram; + + if (nvram->adapter_order[0]) { + for (i = 1; i <= nvram->adapter_order[0]; i++) { + for (j = position; j < ips_num_controllers; j++) { + switch (ips_ha[j]->ad_type) { + case IPS_ADTYPE_SERVERAID6M: + case IPS_ADTYPE_SERVERAID7M: + if (nvram->adapter_order[i] == 'M') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID4L: + case IPS_ADTYPE_SERVERAID4M: + case IPS_ADTYPE_SERVERAID4MX: + case IPS_ADTYPE_SERVERAID4LX: + if (nvram->adapter_order[i] == 'N') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID6I: + case IPS_ADTYPE_SERVERAID5I2: + case IPS_ADTYPE_SERVERAID5I1: + case IPS_ADTYPE_SERVERAID7k: + if (nvram->adapter_order[i] == 'S') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID: + case IPS_ADTYPE_SERVERAID2: + case IPS_ADTYPE_NAVAJO: + case IPS_ADTYPE_KIOWA: + case IPS_ADTYPE_SERVERAID3L: + case IPS_ADTYPE_SERVERAID3: + case IPS_ADTYPE_SERVERAID4H: + if (nvram->adapter_order[i] == 'A') { + ips_shift_controllers(position, + j); + position++; + } + break; + default: + break; + } + } + } + /* if adapter_order[0], then ordering is complete */ + return; + } + /* old bios, use older ordering */ + tmp = 0; + for (i = position; i < ips_num_controllers; i++) { + if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) { + ips_shift_controllers(position, i); + position++; + tmp = 1; + } + } + /* if there were no 5I cards, then don't do any extra ordering */ + if (!tmp) + return; + for (i = position; i < ips_num_controllers; i++) { + if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) { + ips_shift_controllers(position, i); + position++; + } + } + + return; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_register_scsi */ +/* */ +/* Routine Description: */ +/* perform any registration and setup with the scsi layer */ +/****************************************************************************/ +static int +ips_register_scsi(int index) +{ + struct Scsi_Host *sh; + ips_ha_t *ha, *oldha = ips_ha[index]; + sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t)); + if (!sh) { + IPS_PRINTK(KERN_WARNING, oldha->pcidev, + "Unable to register controller with SCSI subsystem\n"); + return -1; + } + ha = IPS_HA(sh); + memcpy(ha, oldha, sizeof (ips_ha_t)); + free_irq(oldha->pcidev->irq, oldha); + /* Install the interrupt handler with the new ha */ + if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Unable to install interrupt handler\n"); + goto err_out_sh; + } + + kfree(oldha); + + /* Store away needed values for later use */ + sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr; + sh->sg_tablesize = sh->hostt->sg_tablesize; + sh->can_queue = sh->hostt->can_queue; + sh->cmd_per_lun = sh->hostt->cmd_per_lun; + sh->max_sectors = 128; + + sh->max_id = ha->ntargets; + sh->max_lun = ha->nlun; + sh->max_channel = ha->nbus - 1; + sh->can_queue = ha->max_cmds - 1; + + if (scsi_add_host(sh, &ha->pcidev->dev)) + goto err_out; + + ips_sh[index] = sh; + ips_ha[index] = ha; + + scsi_scan_host(sh); + + return 0; + +err_out: + free_irq(ha->pcidev->irq, ha); +err_out_sh: + scsi_host_put(sh); + return -1; +} + +/*---------------------------------------------------------------------------*/ +/* Routine Name: ips_remove_device */ +/* */ +/* Routine Description: */ +/* Remove one Adapter ( Hot Plugging ) */ +/*---------------------------------------------------------------------------*/ +static void +ips_remove_device(struct pci_dev *pci_dev) +{ + struct Scsi_Host *sh = pci_get_drvdata(pci_dev); + + pci_set_drvdata(pci_dev, NULL); + + ips_release(sh); + + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_module_init */ +/* */ +/* Routine Description: */ +/* function called on module load */ +/****************************************************************************/ +static int __init +ips_module_init(void) +{ +#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__) + printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); +#endif + + if (pci_register_driver(&ips_pci_driver) < 0) + return -ENODEV; + ips_driver_template.module = THIS_MODULE; + ips_order_controllers(); + if (!ips_detect(&ips_driver_template)) { + pci_unregister_driver(&ips_pci_driver); + return -ENODEV; + } + register_reboot_notifier(&ips_notifier); + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_module_exit */ +/* */ +/* Routine Description: */ +/* function called on module unload */ +/****************************************************************************/ +static void __exit +ips_module_exit(void) +{ + pci_unregister_driver(&ips_pci_driver); + unregister_reboot_notifier(&ips_notifier); +} + +module_init(ips_module_init); +module_exit(ips_module_exit); + +/*---------------------------------------------------------------------------*/ +/* Routine Name: ips_insert_device */ +/* */ +/* Routine Description: */ +/* Add One Adapter ( Hot Plug ) */ +/* */ +/* Return Value: */ +/* 0 if Successful, else non-zero */ +/*---------------------------------------------------------------------------*/ +static int +ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) +{ + int index = -1; + int rc; + + METHOD_TRACE("ips_insert_device", 1); + rc = pci_enable_device(pci_dev); + if (rc) + return rc; + + rc = pci_request_regions(pci_dev, "ips"); + if (rc) + goto err_out; + + rc = ips_init_phase1(pci_dev, &index); + if (rc == SUCCESS) + rc = ips_init_phase2(index); + + if (ips_hotplug) + if (ips_register_scsi(index)) { + ips_free(ips_ha[index]); + rc = -1; + } + + if (rc == SUCCESS) + ips_num_controllers++; + + ips_next_controller = ips_num_controllers; + + if (rc < 0) { + rc = -ENODEV; + goto err_out_regions; + } + + pci_set_drvdata(pci_dev, ips_sh[index]); + return 0; + +err_out_regions: + pci_release_regions(pci_dev); +err_out: + pci_disable_device(pci_dev); + return rc; +} + +/*---------------------------------------------------------------------------*/ +/* Routine Name: ips_init_phase1 */ +/* */ +/* Routine Description: */ +/* Adapter Initialization */ +/* */ +/* Return Value: */ +/* 0 if Successful, else non-zero */ +/*---------------------------------------------------------------------------*/ +static int +ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr) +{ + ips_ha_t *ha; + uint32_t io_addr; + uint32_t mem_addr; + uint32_t io_len; + uint32_t mem_len; + int j; + int index; + dma_addr_t dma_address; + char __iomem *ioremap_ptr; + char __iomem *mem_ptr; + uint32_t IsDead; + + METHOD_TRACE("ips_init_phase1", 1); + index = IPS_MAX_ADAPTERS; + for (j = 0; j < IPS_MAX_ADAPTERS; j++) { + if (ips_ha[j] == NULL) { + index = j; + break; + } + } + + if (index >= IPS_MAX_ADAPTERS) + return -1; + + /* Init MEM/IO addresses to 0 */ + mem_addr = 0; + io_addr = 0; + mem_len = 0; + io_len = 0; + + for (j = 0; j < 2; j++) { + if (!pci_resource_start(pci_dev, j)) + break; + + if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) { + io_addr = pci_resource_start(pci_dev, j); + io_len = pci_resource_len(pci_dev, j); + } else { + mem_addr = pci_resource_start(pci_dev, j); + mem_len = pci_resource_len(pci_dev, j); + } + } + + /* setup memory mapped area (if applicable) */ + if (mem_addr) { + uint32_t base; + uint32_t offs; + + base = mem_addr & PAGE_MASK; + offs = mem_addr - base; + ioremap_ptr = ioremap(base, PAGE_SIZE); + if (!ioremap_ptr) + return -1; + mem_ptr = ioremap_ptr + offs; + } else { + ioremap_ptr = NULL; + mem_ptr = NULL; + } + + /* found a controller */ + ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL); + if (ha == NULL) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate temporary ha struct\n"); + return -1; + } + + ips_sh[index] = NULL; + ips_ha[index] = ha; + ha->active = 1; + + /* Store info in HA structure */ + ha->io_addr = io_addr; + ha->io_len = io_len; + ha->mem_addr = mem_addr; + ha->mem_len = mem_len; + ha->mem_ptr = mem_ptr; + ha->ioremap_ptr = ioremap_ptr; + ha->host_num = (uint32_t) index; + ha->slot_num = PCI_SLOT(pci_dev->devfn); + ha->pcidev = pci_dev; + + /* + * Set the pci_dev's dma_mask. Not all adapters support 64bit + * addressing so don't enable it if the adapter can't support + * it! Also, don't use 64bit addressing if dma addresses + * are guaranteed to be < 4G. + */ + if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) && + !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) { + (ha)->flags |= IPS_HA_ENH_SG; + } else { + if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) { + printk(KERN_WARNING "Unable to set DMA Mask\n"); + return ips_abort_init(ha, index); + } + } + if(ips_cd_boot && !ips_FlashData){ + ips_FlashData = dma_alloc_coherent(&pci_dev->dev, + PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL); + } + + ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ), + &ha->enq_busaddr, GFP_KERNEL); + if (!ha->enq) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate host inquiry structure\n"); + return ips_abort_init(ha, index); + } + + ha->adapt = dma_alloc_coherent(&pci_dev->dev, + sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD), + &dma_address, GFP_KERNEL); + if (!ha->adapt) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate host adapt & dummy structures\n"); + return ips_abort_init(ha, index); + } + ha->adapt->hw_status_start = dma_address; + ha->dummy = (void *) (ha->adapt + 1); + + + + ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev, + sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL); + if (!ha->logical_drive_info) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate logical drive info structure\n"); + return ips_abort_init(ha, index); + } + ha->logical_drive_info_dma_addr = dma_address; + + + ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL); + + if (!ha->conf) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate host conf structure\n"); + return ips_abort_init(ha, index); + } + + ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL); + + if (!ha->nvram) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate host NVRAM structure\n"); + return ips_abort_init(ha, index); + } + + ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL); + + if (!ha->subsys) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate host subsystem structure\n"); + return ips_abort_init(ha, index); + } + + /* the ioctl buffer is now used during adapter initialization, so its + * successful allocation is now required */ + if (ips_ioctlsize < PAGE_SIZE) + ips_ioctlsize = PAGE_SIZE; + + ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize, + &ha->ioctl_busaddr, GFP_KERNEL); + ha->ioctl_len = ips_ioctlsize; + if (!ha->ioctl_data) { + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to allocate IOCTL data\n"); + return ips_abort_init(ha, index); + } + + /* + * Setup Functions + */ + ips_setup_funclist(ha); + + if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) { + /* If Morpheus appears dead, reset it */ + IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1); + if (IsDead == 0xDEADBEEF) { + ips_reset_morpheus(ha); + } + } + + /* + * Initialize the card if it isn't already + */ + + if (!(*ha->func.isinit) (ha)) { + if (!(*ha->func.init) (ha)) { + /* + * Initialization failed + */ + IPS_PRINTK(KERN_WARNING, pci_dev, + "Unable to initialize controller\n"); + return ips_abort_init(ha, index); + } + } + + *indexPtr = index; + return SUCCESS; +} + +/*---------------------------------------------------------------------------*/ +/* Routine Name: ips_init_phase2 */ +/* */ +/* Routine Description: */ +/* Adapter Initialization Phase 2 */ +/* */ +/* Return Value: */ +/* 0 if Successful, else non-zero */ +/*---------------------------------------------------------------------------*/ +static int +ips_init_phase2(int index) +{ + ips_ha_t *ha; + + ha = ips_ha[index]; + + METHOD_TRACE("ips_init_phase2", 1); + if (!ha->active) { + ips_ha[index] = NULL; + return -1; + } + + /* Install the interrupt handler */ + if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Unable to install interrupt handler\n"); + return ips_abort_init(ha, index); + } + + /* + * Allocate a temporary SCB for initialization + */ + ha->max_cmds = 1; + if (!ips_allocatescbs(ha)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Unable to allocate a CCB\n"); + free_irq(ha->pcidev->irq, ha); + return ips_abort_init(ha, index); + } + + if (!ips_hainit(ha)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Unable to initialize controller\n"); + free_irq(ha->pcidev->irq, ha); + return ips_abort_init(ha, index); + } + /* Free the temporary SCB */ + ips_deallocatescbs(ha, 1); + + /* allocate CCBs */ + if (!ips_allocatescbs(ha)) { + IPS_PRINTK(KERN_WARNING, ha->pcidev, + "Unable to allocate CCBs\n"); + free_irq(ha->pcidev->irq, ha); + return ips_abort_init(ha, index); + } + + return SUCCESS; +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING); +MODULE_VERSION(IPS_VER_STRING); diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h new file mode 100644 index 000000000..65edf000e --- /dev/null +++ b/drivers/scsi/ips.h @@ -0,0 +1,1213 @@ +/*****************************************************************************/ +/* ips.h -- driver for the Adaptec / IBM ServeRAID controller */ +/* */ +/* Written By: Keith Mitchell, IBM Corporation */ +/* Jack Hammer, Adaptec, Inc. */ +/* David Jeffery, Adaptec, Inc. */ +/* */ +/* Copyright (C) 1999 IBM Corporation */ +/* Copyright (C) 2003 Adaptec, Inc. */ +/* */ +/* This program is free software; you can redistribute it and/or modify */ +/* it under the terms of the GNU General Public License as published by */ +/* the Free Software Foundation; either version 2 of the License, or */ +/* (at your option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ +/* */ +/* NO WARRANTY */ +/* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR */ +/* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT */ +/* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, */ +/* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is */ +/* solely responsible for determining the appropriateness of using and */ +/* distributing the Program and assumes all risks associated with its */ +/* exercise of rights under this Agreement, including but not limited to */ +/* the risks and costs of program errors, damage to or loss of data, */ +/* programs or equipment, and unavailability or interruption of operations. */ +/* */ +/* DISCLAIMER OF LIABILITY */ +/* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY */ +/* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL */ +/* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND */ +/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR */ +/* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE */ +/* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED */ +/* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program; if not, write to the Free Software */ +/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* */ +/* Bugs/Comments/Suggestions should be mailed to: */ +/* ipslinux@adaptec.com */ +/* */ +/*****************************************************************************/ + +#ifndef _IPS_H_ + #define _IPS_H_ + +#include +#include + #include + + /* + * Some handy macros + */ + #define IPS_HA(x) ((ips_ha_t *) x->hostdata) + #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs) + #define IPS_IS_TROMBONE(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \ + (ha->pcidev->revision >= IPS_REVID_TROMBONE32) && \ + (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) ? 1 : 0) + #define IPS_IS_CLARINET(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \ + (ha->pcidev->revision >= IPS_REVID_CLARINETP1) && \ + (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) ? 1 : 0) + #define IPS_IS_MORPHEUS(ha) (ha->pcidev->device == IPS_DEVICEID_MORPHEUS) + #define IPS_IS_MARCO(ha) (ha->pcidev->device == IPS_DEVICEID_MARCO) + #define IPS_USE_I2O_DELIVER(ha) ((IPS_IS_MORPHEUS(ha) || \ + (IPS_IS_TROMBONE(ha) && \ + (ips_force_i2o))) ? 1 : 0) + #define IPS_USE_MEMIO(ha) ((IPS_IS_MORPHEUS(ha) || \ + ((IPS_IS_TROMBONE(ha) || IPS_IS_CLARINET(ha)) && \ + (ips_force_memio))) ? 1 : 0) + + #define IPS_HAS_ENH_SGLIST(ha) (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) + #define IPS_USE_ENH_SGLIST(ha) ((ha)->flags & IPS_HA_ENH_SG) + #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \ + sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST)) + + #define IPS_PRINTK(level, pcidev, format, arg...) \ + dev_printk(level , &((pcidev)->dev) , format , ## arg) + + #define MDELAY(n) \ + do { \ + mdelay(n); \ + touch_nmi_watchdog(); \ + } while (0) + + #ifndef min + #define min(x,y) ((x) < (y) ? x : y) + #endif + + #ifndef __iomem /* For clean compiles in earlier kernels without __iomem annotations */ + #define __iomem + #endif + + /* + * Adapter address map equates + */ + #define IPS_REG_HISR 0x08 /* Host Interrupt Status Reg */ + #define IPS_REG_CCSAR 0x10 /* Cmd Channel System Addr Reg */ + #define IPS_REG_CCCR 0x14 /* Cmd Channel Control Reg */ + #define IPS_REG_SQHR 0x20 /* Status Q Head Reg */ + #define IPS_REG_SQTR 0x24 /* Status Q Tail Reg */ + #define IPS_REG_SQER 0x28 /* Status Q End Reg */ + #define IPS_REG_SQSR 0x2C /* Status Q Start Reg */ + #define IPS_REG_SCPR 0x05 /* Subsystem control port reg */ + #define IPS_REG_ISPR 0x06 /* interrupt status port reg */ + #define IPS_REG_CBSP 0x07 /* CBSP register */ + #define IPS_REG_FLAP 0x18 /* Flash address port */ + #define IPS_REG_FLDP 0x1C /* Flash data port */ + #define IPS_REG_NDAE 0x38 /* Anaconda 64 NDAE Register */ + #define IPS_REG_I2O_INMSGQ 0x40 /* I2O Inbound Message Queue */ + #define IPS_REG_I2O_OUTMSGQ 0x44 /* I2O Outbound Message Queue */ + #define IPS_REG_I2O_HIR 0x30 /* I2O Interrupt Status */ + #define IPS_REG_I960_IDR 0x20 /* i960 Inbound Doorbell */ + #define IPS_REG_I960_MSG0 0x18 /* i960 Outbound Reg 0 */ + #define IPS_REG_I960_MSG1 0x1C /* i960 Outbound Reg 1 */ + #define IPS_REG_I960_OIMR 0x34 /* i960 Oubound Int Mask Reg */ + + /* + * Adapter register bit equates + */ + #define IPS_BIT_GHI 0x04 /* HISR General Host Interrupt */ + #define IPS_BIT_SQO 0x02 /* HISR Status Q Overflow */ + #define IPS_BIT_SCE 0x01 /* HISR Status Channel Enqueue */ + #define IPS_BIT_SEM 0x08 /* CCCR Semaphore Bit */ + #define IPS_BIT_ILE 0x10 /* CCCR ILE Bit */ + #define IPS_BIT_START_CMD 0x101A /* CCCR Start Command Channel */ + #define IPS_BIT_START_STOP 0x0002 /* CCCR Start/Stop Bit */ + #define IPS_BIT_RST 0x80 /* SCPR Reset Bit */ + #define IPS_BIT_EBM 0x02 /* SCPR Enable Bus Master */ + #define IPS_BIT_EI 0x80 /* HISR Enable Interrupts */ + #define IPS_BIT_OP 0x01 /* OP bit in CBSP */ + #define IPS_BIT_I2O_OPQI 0x08 /* General Host Interrupt */ + #define IPS_BIT_I960_MSG0I 0x01 /* Message Register 0 Interrupt*/ + #define IPS_BIT_I960_MSG1I 0x02 /* Message Register 1 Interrupt*/ + + /* + * Adapter Command ID Equates + */ + #define IPS_CMD_GET_LD_INFO 0x19 + #define IPS_CMD_GET_SUBSYS 0x40 + #define IPS_CMD_READ_CONF 0x38 + #define IPS_CMD_RW_NVRAM_PAGE 0xBC + #define IPS_CMD_READ 0x02 + #define IPS_CMD_WRITE 0x03 + #define IPS_CMD_FFDC 0xD7 + #define IPS_CMD_ENQUIRY 0x05 + #define IPS_CMD_FLUSH 0x0A + #define IPS_CMD_READ_SG 0x82 + #define IPS_CMD_WRITE_SG 0x83 + #define IPS_CMD_DCDB 0x04 + #define IPS_CMD_DCDB_SG 0x84 + #define IPS_CMD_EXTENDED_DCDB 0x95 + #define IPS_CMD_EXTENDED_DCDB_SG 0x96 + #define IPS_CMD_CONFIG_SYNC 0x58 + #define IPS_CMD_ERROR_TABLE 0x17 + #define IPS_CMD_DOWNLOAD 0x20 + #define IPS_CMD_RW_BIOSFW 0x22 + #define IPS_CMD_GET_VERSION_INFO 0xC6 + #define IPS_CMD_RESET_CHANNEL 0x1A + + /* + * Adapter Equates + */ + #define IPS_CSL 0xFF + #define IPS_POCL 0x30 + #define IPS_NORM_STATE 0x00 + #define IPS_MAX_ADAPTER_TYPES 3 + #define IPS_MAX_ADAPTERS 16 + #define IPS_MAX_IOCTL 1 + #define IPS_MAX_IOCTL_QUEUE 8 + #define IPS_MAX_QUEUE 128 + #define IPS_BLKSIZE 512 + #define IPS_MAX_SG 17 + #define IPS_MAX_LD 8 + #define IPS_MAX_CHANNELS 4 + #define IPS_MAX_TARGETS 15 + #define IPS_MAX_CHUNKS 16 + #define IPS_MAX_CMDS 128 + #define IPS_MAX_XFER 0x10000 + #define IPS_NVRAM_P5_SIG 0xFFDDBB99 + #define IPS_MAX_POST_BYTES 0x02 + #define IPS_MAX_CONFIG_BYTES 0x02 + #define IPS_GOOD_POST_STATUS 0x80 + #define IPS_SEM_TIMEOUT 2000 + #define IPS_IOCTL_COMMAND 0x0D + #define IPS_INTR_ON 0 + #define IPS_INTR_IORL 1 + #define IPS_FFDC 99 + #define IPS_ADAPTER_ID 0xF + #define IPS_VENDORID_IBM 0x1014 + #define IPS_VENDORID_ADAPTEC 0x9005 + #define IPS_DEVICEID_COPPERHEAD 0x002E + #define IPS_DEVICEID_MORPHEUS 0x01BD + #define IPS_DEVICEID_MARCO 0x0250 + #define IPS_SUBDEVICEID_4M 0x01BE + #define IPS_SUBDEVICEID_4L 0x01BF + #define IPS_SUBDEVICEID_4MX 0x0208 + #define IPS_SUBDEVICEID_4LX 0x020E + #define IPS_SUBDEVICEID_5I2 0x0259 + #define IPS_SUBDEVICEID_5I1 0x0258 + #define IPS_SUBDEVICEID_6M 0x0279 + #define IPS_SUBDEVICEID_6I 0x028C + #define IPS_SUBDEVICEID_7k 0x028E + #define IPS_SUBDEVICEID_7M 0x028F + #define IPS_IOCTL_SIZE 8192 + #define IPS_STATUS_SIZE 4 + #define IPS_STATUS_Q_SIZE (IPS_MAX_CMDS+1) * IPS_STATUS_SIZE + #define IPS_IMAGE_SIZE 500 * 1024 + #define IPS_MEMMAP_SIZE 128 + #define IPS_ONE_MSEC 1 + #define IPS_ONE_SEC 1000 + + /* + * Geometry Settings + */ + #define IPS_COMP_HEADS 128 + #define IPS_COMP_SECTORS 32 + #define IPS_NORM_HEADS 254 + #define IPS_NORM_SECTORS 63 + + /* + * Adapter Basic Status Codes + */ + #define IPS_BASIC_STATUS_MASK 0xFF + #define IPS_GSC_STATUS_MASK 0x0F + #define IPS_CMD_SUCCESS 0x00 + #define IPS_CMD_RECOVERED_ERROR 0x01 + #define IPS_INVAL_OPCO 0x03 + #define IPS_INVAL_CMD_BLK 0x04 + #define IPS_INVAL_PARM_BLK 0x05 + #define IPS_BUSY 0x08 + #define IPS_CMD_CMPLT_WERROR 0x0C + #define IPS_LD_ERROR 0x0D + #define IPS_CMD_TIMEOUT 0x0E + #define IPS_PHYS_DRV_ERROR 0x0F + + /* + * Adapter Extended Status Equates + */ + #define IPS_ERR_SEL_TO 0xF0 + #define IPS_ERR_OU_RUN 0xF2 + #define IPS_ERR_HOST_RESET 0xF7 + #define IPS_ERR_DEV_RESET 0xF8 + #define IPS_ERR_RECOVERY 0xFC + #define IPS_ERR_CKCOND 0xFF + + /* + * Operating System Defines + */ + #define IPS_OS_WINDOWS_NT 0x01 + #define IPS_OS_NETWARE 0x02 + #define IPS_OS_OPENSERVER 0x03 + #define IPS_OS_UNIXWARE 0x04 + #define IPS_OS_SOLARIS 0x05 + #define IPS_OS_OS2 0x06 + #define IPS_OS_LINUX 0x07 + #define IPS_OS_FREEBSD 0x08 + + /* + * Adapter Revision ID's + */ + #define IPS_REVID_SERVERAID 0x02 + #define IPS_REVID_NAVAJO 0x03 + #define IPS_REVID_SERVERAID2 0x04 + #define IPS_REVID_CLARINETP1 0x05 + #define IPS_REVID_CLARINETP2 0x07 + #define IPS_REVID_CLARINETP3 0x0D + #define IPS_REVID_TROMBONE32 0x0F + #define IPS_REVID_TROMBONE64 0x10 + + /* + * NVRAM Page 5 Adapter Defines + */ + #define IPS_ADTYPE_SERVERAID 0x01 + #define IPS_ADTYPE_SERVERAID2 0x02 + #define IPS_ADTYPE_NAVAJO 0x03 + #define IPS_ADTYPE_KIOWA 0x04 + #define IPS_ADTYPE_SERVERAID3 0x05 + #define IPS_ADTYPE_SERVERAID3L 0x06 + #define IPS_ADTYPE_SERVERAID4H 0x07 + #define IPS_ADTYPE_SERVERAID4M 0x08 + #define IPS_ADTYPE_SERVERAID4L 0x09 + #define IPS_ADTYPE_SERVERAID4MX 0x0A + #define IPS_ADTYPE_SERVERAID4LX 0x0B + #define IPS_ADTYPE_SERVERAID5I2 0x0C + #define IPS_ADTYPE_SERVERAID5I1 0x0D + #define IPS_ADTYPE_SERVERAID6M 0x0E + #define IPS_ADTYPE_SERVERAID6I 0x0F + #define IPS_ADTYPE_SERVERAID7t 0x10 + #define IPS_ADTYPE_SERVERAID7k 0x11 + #define IPS_ADTYPE_SERVERAID7M 0x12 + + /* + * Adapter Command/Status Packet Definitions + */ + #define IPS_SUCCESS 0x01 /* Successfully completed */ + #define IPS_SUCCESS_IMM 0x02 /* Success - Immediately */ + #define IPS_FAILURE 0x04 /* Completed with Error */ + + /* + * Logical Drive Equates + */ + #define IPS_LD_OFFLINE 0x02 + #define IPS_LD_OKAY 0x03 + #define IPS_LD_FREE 0x00 + #define IPS_LD_SYS 0x06 + #define IPS_LD_CRS 0x24 + + /* + * DCDB Table Equates + */ + #define IPS_NO_DISCONNECT 0x00 + #define IPS_DISCONNECT_ALLOWED 0x80 + #define IPS_NO_AUTO_REQSEN 0x40 + #define IPS_DATA_NONE 0x00 + #define IPS_DATA_UNK 0x00 + #define IPS_DATA_IN 0x01 + #define IPS_DATA_OUT 0x02 + #define IPS_TRANSFER64K 0x08 + #define IPS_NOTIMEOUT 0x00 + #define IPS_TIMEOUT10 0x10 + #define IPS_TIMEOUT60 0x20 + #define IPS_TIMEOUT20M 0x30 + + /* + * SCSI Inquiry Data Flags + */ + #define IPS_SCSI_INQ_TYPE_DASD 0x00 + #define IPS_SCSI_INQ_TYPE_PROCESSOR 0x03 + #define IPS_SCSI_INQ_LU_CONNECTED 0x00 + #define IPS_SCSI_INQ_RD_REV2 0x02 + #define IPS_SCSI_INQ_REV2 0x02 + #define IPS_SCSI_INQ_REV3 0x03 + #define IPS_SCSI_INQ_Address16 0x01 + #define IPS_SCSI_INQ_Address32 0x02 + #define IPS_SCSI_INQ_MedChanger 0x08 + #define IPS_SCSI_INQ_MultiPort 0x10 + #define IPS_SCSI_INQ_EncServ 0x40 + #define IPS_SCSI_INQ_SoftReset 0x01 + #define IPS_SCSI_INQ_CmdQue 0x02 + #define IPS_SCSI_INQ_Linked 0x08 + #define IPS_SCSI_INQ_Sync 0x10 + #define IPS_SCSI_INQ_WBus16 0x20 + #define IPS_SCSI_INQ_WBus32 0x40 + #define IPS_SCSI_INQ_RelAdr 0x80 + + /* + * SCSI Request Sense Data Flags + */ + #define IPS_SCSI_REQSEN_VALID 0x80 + #define IPS_SCSI_REQSEN_CURRENT_ERR 0x70 + #define IPS_SCSI_REQSEN_NO_SENSE 0x00 + + /* + * SCSI Mode Page Equates + */ + #define IPS_SCSI_MP3_SoftSector 0x01 + #define IPS_SCSI_MP3_HardSector 0x02 + #define IPS_SCSI_MP3_Removeable 0x04 + #define IPS_SCSI_MP3_AllocateSurface 0x08 + + /* + * HA Flags + */ + + #define IPS_HA_ENH_SG 0x1 + + /* + * SCB Flags + */ + #define IPS_SCB_MAP_SG 0x00008 + #define IPS_SCB_MAP_SINGLE 0X00010 + + /* + * Passthru stuff + */ + #define IPS_COPPUSRCMD (('C'<<8) | 65) + #define IPS_COPPIOCCMD (('C'<<8) | 66) + #define IPS_NUMCTRLS (('C'<<8) | 68) + #define IPS_CTRLINFO (('C'<<8) | 69) + + /* flashing defines */ + #define IPS_FW_IMAGE 0x00 + #define IPS_BIOS_IMAGE 0x01 + #define IPS_WRITE_FW 0x01 + #define IPS_WRITE_BIOS 0x02 + #define IPS_ERASE_BIOS 0x03 + #define IPS_BIOS_HEADER 0xC0 + + /* time oriented stuff */ + #define IPS_SECS_8HOURS 28800 + + /* + * Scsi_Host Template + */ + static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]); + static int ips_slave_configure(struct scsi_device *SDptr); + +/* + * Raid Command Formats + */ +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t log_drv; + uint8_t sg_count; + uint32_t lba; + uint32_t sg_addr; + uint16_t sector_count; + uint8_t segment_4G; + uint8_t enhanced_sg; + uint32_t ccsar; + uint32_t cccr; +} IPS_IO_CMD, *PIPS_IO_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint16_t reserved; + uint32_t reserved2; + uint32_t buffer_addr; + uint32_t reserved3; + uint32_t ccsar; + uint32_t cccr; +} IPS_LD_CMD, *PIPS_LD_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t reserved; + uint8_t reserved2; + uint32_t reserved3; + uint32_t buffer_addr; + uint32_t reserved4; +} IPS_IOCTL_CMD, *PIPS_IOCTL_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t channel; + uint8_t reserved3; + uint8_t reserved4; + uint8_t reserved5; + uint8_t reserved6; + uint8_t reserved7; + uint8_t reserved8; + uint8_t reserved9; + uint8_t reserved10; + uint8_t reserved11; + uint8_t reserved12; + uint8_t reserved13; + uint8_t reserved14; + uint8_t adapter_flag; +} IPS_RESET_CMD, *PIPS_RESET_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint16_t reserved; + uint32_t reserved2; + uint32_t dcdb_address; + uint16_t reserved3; + uint8_t segment_4G; + uint8_t enhanced_sg; + uint32_t ccsar; + uint32_t cccr; +} IPS_DCDB_CMD, *PIPS_DCDB_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t channel; + uint8_t source_target; + uint32_t reserved; + uint32_t reserved2; + uint32_t reserved3; + uint32_t ccsar; + uint32_t cccr; +} IPS_CS_CMD, *PIPS_CS_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t log_drv; + uint8_t control; + uint32_t reserved; + uint32_t reserved2; + uint32_t reserved3; + uint32_t ccsar; + uint32_t cccr; +} IPS_US_CMD, *PIPS_US_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t reserved; + uint8_t state; + uint32_t reserved2; + uint32_t reserved3; + uint32_t reserved4; + uint32_t ccsar; + uint32_t cccr; +} IPS_FC_CMD, *PIPS_FC_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t reserved; + uint8_t desc; + uint32_t reserved2; + uint32_t buffer_addr; + uint32_t reserved3; + uint32_t ccsar; + uint32_t cccr; +} IPS_STATUS_CMD, *PIPS_STATUS_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t page; + uint8_t write; + uint32_t reserved; + uint32_t buffer_addr; + uint32_t reserved2; + uint32_t ccsar; + uint32_t cccr; +} IPS_NVRAM_CMD, *PIPS_NVRAM_CMD; + +typedef struct +{ + uint8_t op_code; + uint8_t command_id; + uint16_t reserved; + uint32_t count; + uint32_t buffer_addr; + uint32_t reserved2; +} IPS_VERSION_INFO, *PIPS_VERSION_INFO; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t reset_count; + uint8_t reset_type; + uint8_t second; + uint8_t minute; + uint8_t hour; + uint8_t day; + uint8_t reserved1[4]; + uint8_t month; + uint8_t yearH; + uint8_t yearL; + uint8_t reserved2; +} IPS_FFDC_CMD, *PIPS_FFDC_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t type; + uint8_t direction; + uint32_t count; + uint32_t buffer_addr; + uint8_t total_packets; + uint8_t packet_num; + uint16_t reserved; +} IPS_FLASHFW_CMD, *PIPS_FLASHFW_CMD; + +typedef struct { + uint8_t op_code; + uint8_t command_id; + uint8_t type; + uint8_t direction; + uint32_t count; + uint32_t buffer_addr; + uint32_t offset; +} IPS_FLASHBIOS_CMD, *PIPS_FLASHBIOS_CMD; + +typedef union { + IPS_IO_CMD basic_io; + IPS_LD_CMD logical_info; + IPS_IOCTL_CMD ioctl_info; + IPS_DCDB_CMD dcdb; + IPS_CS_CMD config_sync; + IPS_US_CMD unlock_stripe; + IPS_FC_CMD flush_cache; + IPS_STATUS_CMD status; + IPS_NVRAM_CMD nvram; + IPS_FFDC_CMD ffdc; + IPS_FLASHFW_CMD flashfw; + IPS_FLASHBIOS_CMD flashbios; + IPS_VERSION_INFO version_info; + IPS_RESET_CMD reset; +} IPS_HOST_COMMAND, *PIPS_HOST_COMMAND; + +typedef struct { + uint8_t logical_id; + uint8_t reserved; + uint8_t raid_level; + uint8_t state; + uint32_t sector_count; +} IPS_DRIVE_INFO, *PIPS_DRIVE_INFO; + +typedef struct { + uint8_t no_of_log_drive; + uint8_t reserved[3]; + IPS_DRIVE_INFO drive_info[IPS_MAX_LD]; +} IPS_LD_INFO, *PIPS_LD_INFO; + +typedef struct { + uint8_t device_address; + uint8_t cmd_attribute; + uint16_t transfer_length; + uint32_t buffer_pointer; + uint8_t cdb_length; + uint8_t sense_length; + uint8_t sg_count; + uint8_t reserved; + uint8_t scsi_cdb[12]; + uint8_t sense_info[64]; + uint8_t scsi_status; + uint8_t reserved2[3]; +} IPS_DCDB_TABLE, *PIPS_DCDB_TABLE; + +typedef struct { + uint8_t device_address; + uint8_t cmd_attribute; + uint8_t cdb_length; + uint8_t reserved_for_LUN; + uint32_t transfer_length; + uint32_t buffer_pointer; + uint16_t sg_count; + uint8_t sense_length; + uint8_t scsi_status; + uint32_t reserved; + uint8_t scsi_cdb[16]; + uint8_t sense_info[56]; +} IPS_DCDB_TABLE_TAPE, *PIPS_DCDB_TABLE_TAPE; + +typedef union { + struct { + volatile uint8_t reserved; + volatile uint8_t command_id; + volatile uint8_t basic_status; + volatile uint8_t extended_status; + } fields; + + volatile uint32_t value; +} IPS_STATUS, *PIPS_STATUS; + +typedef struct { + IPS_STATUS status[IPS_MAX_CMDS + 1]; + volatile PIPS_STATUS p_status_start; + volatile PIPS_STATUS p_status_end; + volatile PIPS_STATUS p_status_tail; + volatile uint32_t hw_status_start; + volatile uint32_t hw_status_tail; +} IPS_ADAPTER, *PIPS_ADAPTER; + +typedef struct { + uint8_t ucLogDriveCount; + uint8_t ucMiscFlag; + uint8_t ucSLTFlag; + uint8_t ucBSTFlag; + uint8_t ucPwrChgCnt; + uint8_t ucWrongAdrCnt; + uint8_t ucUnidentCnt; + uint8_t ucNVramDevChgCnt; + uint8_t CodeBlkVersion[8]; + uint8_t BootBlkVersion[8]; + uint32_t ulDriveSize[IPS_MAX_LD]; + uint8_t ucConcurrentCmdCount; + uint8_t ucMaxPhysicalDevices; + uint16_t usFlashRepgmCount; + uint8_t ucDefunctDiskCount; + uint8_t ucRebuildFlag; + uint8_t ucOfflineLogDrvCount; + uint8_t ucCriticalDrvCount; + uint16_t usConfigUpdateCount; + uint8_t ucBlkFlag; + uint8_t reserved; + uint16_t usAddrDeadDisk[IPS_MAX_CHANNELS * (IPS_MAX_TARGETS + 1)]; +} IPS_ENQ, *PIPS_ENQ; + +typedef struct { + uint8_t ucInitiator; + uint8_t ucParameters; + uint8_t ucMiscFlag; + uint8_t ucState; + uint32_t ulBlockCount; + uint8_t ucDeviceId[28]; +} IPS_DEVSTATE, *PIPS_DEVSTATE; + +typedef struct { + uint8_t ucChn; + uint8_t ucTgt; + uint16_t ucReserved; + uint32_t ulStartSect; + uint32_t ulNoOfSects; +} IPS_CHUNK, *PIPS_CHUNK; + +typedef struct { + uint16_t ucUserField; + uint8_t ucState; + uint8_t ucRaidCacheParam; + uint8_t ucNoOfChunkUnits; + uint8_t ucStripeSize; + uint8_t ucParams; + uint8_t ucReserved; + uint32_t ulLogDrvSize; + IPS_CHUNK chunk[IPS_MAX_CHUNKS]; +} IPS_LD, *PIPS_LD; + +typedef struct { + uint8_t board_disc[8]; + uint8_t processor[8]; + uint8_t ucNoChanType; + uint8_t ucNoHostIntType; + uint8_t ucCompression; + uint8_t ucNvramType; + uint32_t ulNvramSize; +} IPS_HARDWARE, *PIPS_HARDWARE; + +typedef struct { + uint8_t ucLogDriveCount; + uint8_t ucDateD; + uint8_t ucDateM; + uint8_t ucDateY; + uint8_t init_id[4]; + uint8_t host_id[12]; + uint8_t time_sign[8]; + uint32_t UserOpt; + uint16_t user_field; + uint8_t ucRebuildRate; + uint8_t ucReserve; + IPS_HARDWARE hardware_disc; + IPS_LD logical_drive[IPS_MAX_LD]; + IPS_DEVSTATE dev[IPS_MAX_CHANNELS][IPS_MAX_TARGETS+1]; + uint8_t reserved[512]; +} IPS_CONF, *PIPS_CONF; + +typedef struct { + uint32_t signature; + uint8_t reserved1; + uint8_t adapter_slot; + uint16_t adapter_type; + uint8_t ctrl_bios[8]; + uint8_t versioning; /* 1 = Versioning Supported, else 0 */ + uint8_t version_mismatch; /* 1 = Versioning MisMatch, else 0 */ + uint8_t reserved2; + uint8_t operating_system; + uint8_t driver_high[4]; + uint8_t driver_low[4]; + uint8_t BiosCompatibilityID[8]; + uint8_t ReservedForOS2[8]; + uint8_t bios_high[4]; /* Adapter's Flashed BIOS Version */ + uint8_t bios_low[4]; + uint8_t adapter_order[16]; /* BIOS Telling us the Sort Order */ + uint8_t Filler[60]; +} IPS_NVRAM_P5, *PIPS_NVRAM_P5; + +/*--------------------------------------------------------------------------*/ +/* Data returned from a GetVersion Command */ +/*--------------------------------------------------------------------------*/ + + /* SubSystem Parameter[4] */ +#define IPS_GET_VERSION_SUPPORT 0x00018000 /* Mask for Versioning Support */ + +typedef struct +{ + uint32_t revision; + uint8_t bootBlkVersion[32]; + uint8_t bootBlkAttributes[4]; + uint8_t codeBlkVersion[32]; + uint8_t biosVersion[32]; + uint8_t biosAttributes[4]; + uint8_t compatibilityId[32]; + uint8_t reserved[4]; +} IPS_VERSION_DATA; + + +typedef struct _IPS_SUBSYS { + uint32_t param[128]; +} IPS_SUBSYS, *PIPS_SUBSYS; + +/** + ** SCSI Structures + **/ + +/* + * Inquiry Data Format + */ +typedef struct { + uint8_t DeviceType; + uint8_t DeviceTypeQualifier; + uint8_t Version; + uint8_t ResponseDataFormat; + uint8_t AdditionalLength; + uint8_t Reserved; + uint8_t Flags[2]; + uint8_t VendorId[8]; + uint8_t ProductId[16]; + uint8_t ProductRevisionLevel[4]; + uint8_t Reserved2; /* Provides NULL terminator to name */ +} IPS_SCSI_INQ_DATA, *PIPS_SCSI_INQ_DATA; + +/* + * Read Capacity Data Format + */ +typedef struct { + uint32_t lba; + uint32_t len; +} IPS_SCSI_CAPACITY; + +/* + * Request Sense Data Format + */ +typedef struct { + uint8_t ResponseCode; + uint8_t SegmentNumber; + uint8_t Flags; + uint8_t Information[4]; + uint8_t AdditionalLength; + uint8_t CommandSpecific[4]; + uint8_t AdditionalSenseCode; + uint8_t AdditionalSenseCodeQual; + uint8_t FRUCode; + uint8_t SenseKeySpecific[3]; +} IPS_SCSI_REQSEN; + +/* + * Sense Data Format - Page 3 + */ +typedef struct { + uint8_t PageCode; + uint8_t PageLength; + uint16_t TracksPerZone; + uint16_t AltSectorsPerZone; + uint16_t AltTracksPerZone; + uint16_t AltTracksPerVolume; + uint16_t SectorsPerTrack; + uint16_t BytesPerSector; + uint16_t Interleave; + uint16_t TrackSkew; + uint16_t CylinderSkew; + uint8_t flags; + uint8_t reserved[3]; +} IPS_SCSI_MODE_PAGE3; + +/* + * Sense Data Format - Page 4 + */ +typedef struct { + uint8_t PageCode; + uint8_t PageLength; + uint16_t CylindersHigh; + uint8_t CylindersLow; + uint8_t Heads; + uint16_t WritePrecompHigh; + uint8_t WritePrecompLow; + uint16_t ReducedWriteCurrentHigh; + uint8_t ReducedWriteCurrentLow; + uint16_t StepRate; + uint16_t LandingZoneHigh; + uint8_t LandingZoneLow; + uint8_t flags; + uint8_t RotationalOffset; + uint8_t Reserved; + uint16_t MediumRotationRate; + uint8_t Reserved2[2]; +} IPS_SCSI_MODE_PAGE4; + +/* + * Sense Data Format - Page 8 + */ +typedef struct { + uint8_t PageCode; + uint8_t PageLength; + uint8_t flags; + uint8_t RetentPrio; + uint16_t DisPrefetchLen; + uint16_t MinPrefetchLen; + uint16_t MaxPrefetchLen; + uint16_t MaxPrefetchCeiling; +} IPS_SCSI_MODE_PAGE8; + +/* + * Sense Data Format - Block Descriptor (DASD) + */ +typedef struct { + uint32_t NumberOfBlocks; + uint8_t DensityCode; + uint16_t BlockLengthHigh; + uint8_t BlockLengthLow; +} IPS_SCSI_MODE_PAGE_BLKDESC; + +/* + * Sense Data Format - Mode Page Header + */ +typedef struct { + uint8_t DataLength; + uint8_t MediumType; + uint8_t Reserved; + uint8_t BlockDescLength; +} IPS_SCSI_MODE_PAGE_HEADER; + +typedef struct { + IPS_SCSI_MODE_PAGE_HEADER hdr; + IPS_SCSI_MODE_PAGE_BLKDESC blkdesc; + + union { + IPS_SCSI_MODE_PAGE3 pg3; + IPS_SCSI_MODE_PAGE4 pg4; + IPS_SCSI_MODE_PAGE8 pg8; + } pdata; +} IPS_SCSI_MODE_PAGE_DATA; + +/* + * Scatter Gather list format + */ +typedef struct ips_sglist { + uint32_t address; + uint32_t length; +} IPS_STD_SG_LIST; + +typedef struct ips_enh_sglist { + uint32_t address_lo; + uint32_t address_hi; + uint32_t length; + uint32_t reserved; +} IPS_ENH_SG_LIST; + +typedef union { + void *list; + IPS_STD_SG_LIST *std_list; + IPS_ENH_SG_LIST *enh_list; +} IPS_SG_LIST; + +typedef struct { + char *option_name; + int *option_flag; + int option_value; +} IPS_OPTION; + +/* + * Status Info + */ +typedef struct ips_stat { + uint32_t residue_len; + void *scb_addr; + uint8_t padding[12 - sizeof(void *)]; +} ips_stat_t; + +/* + * SCB Queue Format + */ +typedef struct ips_scb_queue { + struct ips_scb *head; + struct ips_scb *tail; + int count; +} ips_scb_queue_t; + +/* + * Wait queue_format + */ +typedef struct ips_wait_queue { + struct scsi_cmnd *head; + struct scsi_cmnd *tail; + int count; +} ips_wait_queue_entry_t; + +typedef struct ips_copp_wait_item { + struct scsi_cmnd *scsi_cmd; + struct ips_copp_wait_item *next; +} ips_copp_wait_item_t; + +typedef struct ips_copp_queue { + struct ips_copp_wait_item *head; + struct ips_copp_wait_item *tail; + int count; +} ips_copp_queue_t; + +/* forward decl for host structure */ +struct ips_ha; + +typedef struct { + int (*reset)(struct ips_ha *); + int (*issue)(struct ips_ha *, struct ips_scb *); + int (*isinit)(struct ips_ha *); + int (*isintr)(struct ips_ha *); + int (*init)(struct ips_ha *); + int (*erasebios)(struct ips_ha *); + int (*programbios)(struct ips_ha *, char *, uint32_t, uint32_t); + int (*verifybios)(struct ips_ha *, char *, uint32_t, uint32_t); + void (*statinit)(struct ips_ha *); + int (*intr)(struct ips_ha *); + void (*enableint)(struct ips_ha *); + uint32_t (*statupd)(struct ips_ha *); +} ips_hw_func_t; + +typedef struct ips_ha { + uint8_t ha_id[IPS_MAX_CHANNELS+1]; + uint32_t dcdb_active[IPS_MAX_CHANNELS]; + uint32_t io_addr; /* Base I/O address */ + uint8_t ntargets; /* Number of targets */ + uint8_t nbus; /* Number of buses */ + uint8_t nlun; /* Number of Luns */ + uint16_t ad_type; /* Adapter type */ + uint16_t host_num; /* Adapter number */ + uint32_t max_xfer; /* Maximum Xfer size */ + uint32_t max_cmds; /* Max concurrent commands */ + uint32_t num_ioctl; /* Number of Ioctls */ + ips_stat_t sp; /* Status packer pointer */ + struct ips_scb *scbs; /* Array of all CCBS */ + struct ips_scb *scb_freelist; /* SCB free list */ + ips_wait_queue_entry_t scb_waitlist; /* Pending SCB list */ + ips_copp_queue_t copp_waitlist; /* Pending PT list */ + ips_scb_queue_t scb_activelist; /* Active SCB list */ + IPS_IO_CMD *dummy; /* dummy command */ + IPS_ADAPTER *adapt; /* Adapter status area */ + IPS_LD_INFO *logical_drive_info; /* Adapter Logical Drive Info */ + dma_addr_t logical_drive_info_dma_addr; /* Logical Drive Info DMA Address */ + IPS_ENQ *enq; /* Adapter Enquiry data */ + IPS_CONF *conf; /* Adapter config data */ + IPS_NVRAM_P5 *nvram; /* NVRAM page 5 data */ + IPS_SUBSYS *subsys; /* Subsystem parameters */ + char *ioctl_data; /* IOCTL data area */ + uint32_t ioctl_datasize; /* IOCTL data size */ + uint32_t cmd_in_progress; /* Current command in progress*/ + int flags; /* */ + uint8_t waitflag; /* are we waiting for cmd */ + uint8_t active; + int ioctl_reset; /* IOCTL Requested Reset Flag */ + uint16_t reset_count; /* number of resets */ + time64_t last_ffdc; /* last time we sent ffdc info*/ + uint8_t slot_num; /* PCI Slot Number */ + int ioctl_len; /* size of ioctl buffer */ + dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/ + uint8_t bios_version[8]; /* BIOS Revision */ + uint32_t mem_addr; /* Memory mapped address */ + uint32_t io_len; /* Size of IO Address */ + uint32_t mem_len; /* Size of memory address */ + char __iomem *mem_ptr; /* Memory mapped Ptr */ + char __iomem *ioremap_ptr;/* ioremapped memory pointer */ + ips_hw_func_t func; /* hw function pointers */ + struct pci_dev *pcidev; /* PCI device handle */ + char *flash_data; /* Save Area for flash data */ + int flash_len; /* length of flash buffer */ + u32 flash_datasize; /* Save Area for flash data size */ + dma_addr_t flash_busaddr; /* dma address of flash buffer*/ + dma_addr_t enq_busaddr; /* dma address of enq struct */ + uint8_t requires_esl; /* Requires an EraseStripeLock */ +} ips_ha_t; + +typedef void (*ips_scb_callback) (ips_ha_t *, struct ips_scb *); + +/* + * SCB Format + */ +typedef struct ips_scb { + IPS_HOST_COMMAND cmd; + IPS_DCDB_TABLE dcdb; + uint8_t target_id; + uint8_t bus; + uint8_t lun; + uint8_t cdb[12]; + uint32_t scb_busaddr; + uint32_t old_data_busaddr; // Obsolete, but kept for old utility compatibility + uint32_t timeout; + uint8_t basic_status; + uint8_t extended_status; + uint8_t breakup; + uint8_t sg_break; + uint32_t data_len; + uint32_t sg_len; + uint32_t flags; + uint32_t op_code; + IPS_SG_LIST sg_list; + struct scsi_cmnd *scsi_cmd; + struct ips_scb *q_next; + ips_scb_callback callback; + uint32_t sg_busaddr; + int sg_count; + dma_addr_t data_busaddr; +} ips_scb_t; + +typedef struct ips_scb_pt { + IPS_HOST_COMMAND cmd; + IPS_DCDB_TABLE dcdb; + uint8_t target_id; + uint8_t bus; + uint8_t lun; + uint8_t cdb[12]; + uint32_t scb_busaddr; + uint32_t data_busaddr; + uint32_t timeout; + uint8_t basic_status; + uint8_t extended_status; + uint16_t breakup; + uint32_t data_len; + uint32_t sg_len; + uint32_t flags; + uint32_t op_code; + IPS_SG_LIST *sg_list; + struct scsi_cmnd *scsi_cmd; + struct ips_scb *q_next; + ips_scb_callback callback; +} ips_scb_pt_t; + +/* + * Passthru Command Format + */ +typedef struct { + uint8_t CoppID[4]; + uint32_t CoppCmd; + uint32_t PtBuffer; + uint8_t *CmdBuffer; + uint32_t CmdBSize; + ips_scb_pt_t CoppCP; + uint32_t TimeOut; + uint8_t BasicStatus; + uint8_t ExtendedStatus; + uint8_t AdapterType; + uint8_t reserved; +} ips_passthru_t; + +#endif + +/* The Version Information below gets created by SED during the build process. */ +/* Do not modify the next line; it's what SED is looking for to do the insert. */ +/* Version Info */ +/************************************************************************* +* +* VERSION.H -- version numbers and copyright notices in various formats +* +*************************************************************************/ + +#define IPS_VER_MAJOR 7 +#define IPS_VER_MAJOR_STRING __stringify(IPS_VER_MAJOR) +#define IPS_VER_MINOR 12 +#define IPS_VER_MINOR_STRING __stringify(IPS_VER_MINOR) +#define IPS_VER_BUILD 05 +#define IPS_VER_BUILD_STRING __stringify(IPS_VER_BUILD) +#define IPS_VER_STRING IPS_VER_MAJOR_STRING "." \ + IPS_VER_MINOR_STRING "." IPS_VER_BUILD_STRING +#define IPS_RELEASE_ID 0x00020000 +#define IPS_BUILD_IDENT 761 +#define IPS_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2002. All Rights Reserved." +#define IPS_ADAPTECCOPYRIGHT_STRING "(c) Copyright Adaptec, Inc. 2002 to 2004. All Rights Reserved." +#define IPS_DELLCOPYRIGHT_STRING "(c) Copyright Dell 2004. All Rights Reserved." +#define IPS_NT_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2002." + +/* Version numbers for various adapters */ +#define IPS_VER_SERVERAID1 "2.25.01" +#define IPS_VER_SERVERAID2 "2.88.13" +#define IPS_VER_NAVAJO "2.88.13" +#define IPS_VER_SERVERAID3 "6.10.24" +#define IPS_VER_SERVERAID4H "7.12.02" +#define IPS_VER_SERVERAID4MLx "7.12.02" +#define IPS_VER_SARASOTA "7.12.02" +#define IPS_VER_MARCO "7.12.02" +#define IPS_VER_SEBRING "7.12.02" +#define IPS_VER_KEYWEST "7.12.02" + +/* Compatibility IDs for various adapters */ +#define IPS_COMPAT_UNKNOWN "" +#define IPS_COMPAT_CURRENT "KW710" +#define IPS_COMPAT_SERVERAID1 "2.25.01" +#define IPS_COMPAT_SERVERAID2 "2.88.13" +#define IPS_COMPAT_NAVAJO "2.88.13" +#define IPS_COMPAT_KIOWA "2.88.13" +#define IPS_COMPAT_SERVERAID3H "SB610" +#define IPS_COMPAT_SERVERAID3L "SB610" +#define IPS_COMPAT_SERVERAID4H "KW710" +#define IPS_COMPAT_SERVERAID4M "KW710" +#define IPS_COMPAT_SERVERAID4L "KW710" +#define IPS_COMPAT_SERVERAID4Mx "KW710" +#define IPS_COMPAT_SERVERAID4Lx "KW710" +#define IPS_COMPAT_SARASOTA "KW710" +#define IPS_COMPAT_MARCO "KW710" +#define IPS_COMPAT_SEBRING "KW710" +#define IPS_COMPAT_TAMPA "KW710" +#define IPS_COMPAT_KEYWEST "KW710" +#define IPS_COMPAT_BIOS "KW710" + +#define IPS_COMPAT_MAX_ADAPTER_TYPE 18 +#define IPS_COMPAT_ID_LENGTH 8 + +#define IPS_DEFINE_COMPAT_TABLE(tablename) \ + char tablename[IPS_COMPAT_MAX_ADAPTER_TYPE] [IPS_COMPAT_ID_LENGTH] = { \ + IPS_COMPAT_UNKNOWN, \ + IPS_COMPAT_SERVERAID1, \ + IPS_COMPAT_SERVERAID2, \ + IPS_COMPAT_NAVAJO, \ + IPS_COMPAT_KIOWA, \ + IPS_COMPAT_SERVERAID3H, \ + IPS_COMPAT_SERVERAID3L, \ + IPS_COMPAT_SERVERAID4H, \ + IPS_COMPAT_SERVERAID4M, \ + IPS_COMPAT_SERVERAID4L, \ + IPS_COMPAT_SERVERAID4Mx, \ + IPS_COMPAT_SERVERAID4Lx, \ + IPS_COMPAT_SARASOTA, /* one-channel variety of SARASOTA */ \ + IPS_COMPAT_SARASOTA, /* two-channel variety of SARASOTA */ \ + IPS_COMPAT_MARCO, \ + IPS_COMPAT_SEBRING, \ + IPS_COMPAT_TAMPA, \ + IPS_COMPAT_KEYWEST \ + } diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile new file mode 100644 index 000000000..da6f04cae --- /dev/null +++ b/drivers/scsi/isci/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_SCSI_ISCI) += isci.o +isci-objs := init.o phy.o request.o \ + remote_device.o port.o \ + host.o task.o probe_roms.o \ + remote_node_context.o \ + remote_node_table.o \ + unsolicited_frame_control.o \ + port_config.o \ diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c new file mode 100644 index 000000000..35589b6af --- /dev/null +++ b/drivers/scsi/isci/host.c @@ -0,0 +1,2804 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include "host.h" +#include "isci.h" +#include "port.h" +#include "probe_roms.h" +#include "remote_device.h" +#include "request.h" +#include "scu_completion_codes.h" +#include "scu_event_codes.h" +#include "registers.h" +#include "scu_remote_node_context.h" +#include "scu_task_context.h" + +#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 + +#define smu_max_ports(dcc_value) \ + (\ + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ + ) + +#define smu_max_task_contexts(dcc_value) \ + (\ + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ + ) + +#define smu_max_rncs(dcc_value) \ + (\ + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ + ) + +#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 + +/* + * The number of milliseconds to wait while a given phy is consuming power + * before allowing another set of phys to consume power. Ultimately, this will + * be specified by OEM parameter. + */ +#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 + +/* + * NORMALIZE_PUT_POINTER() - + * + * This macro will normalize the completion queue put pointer so its value can + * be used as an array inde + */ +#define NORMALIZE_PUT_POINTER(x) \ + ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) + + +/* + * NORMALIZE_EVENT_POINTER() - + * + * This macro will normalize the completion queue event entry so its value can + * be used as an index. + */ +#define NORMALIZE_EVENT_POINTER(x) \ + (\ + ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ + >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ + ) + +/* + * NORMALIZE_GET_POINTER() - + * + * This macro will normalize the completion queue get pointer so its value can + * be used as an index into an array + */ +#define NORMALIZE_GET_POINTER(x) \ + ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) + +/* + * NORMALIZE_GET_POINTER_CYCLE_BIT() - + * + * This macro will normalize the completion queue cycle pointer so it matches + * the completion queue cycle bit + */ +#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ + ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) + +/* + * COMPLETION_QUEUE_CYCLE_BIT() - + * + * This macro will return the cycle bit of the completion queue entry + */ +#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) + +/* Init the state machine and call the state entry function (if any) */ +void sci_init_sm(struct sci_base_state_machine *sm, + const struct sci_base_state *state_table, u32 initial_state) +{ + sci_state_transition_t handler; + + sm->initial_state_id = initial_state; + sm->previous_state_id = initial_state; + sm->current_state_id = initial_state; + sm->state_table = state_table; + + handler = sm->state_table[initial_state].enter_state; + if (handler) + handler(sm); +} + +/* Call the state exit fn, update the current state, call the state entry fn */ +void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) +{ + sci_state_transition_t handler; + + handler = sm->state_table[sm->current_state_id].exit_state; + if (handler) + handler(sm); + + sm->previous_state_id = sm->current_state_id; + sm->current_state_id = next_state; + + handler = sm->state_table[sm->current_state_id].enter_state; + if (handler) + handler(sm); +} + +static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) +{ + u32 get_value = ihost->completion_queue_get; + u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; + + if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == + COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) + return true; + + return false; +} + +static bool sci_controller_isr(struct isci_host *ihost) +{ + if (sci_controller_completion_queue_has_entries(ihost)) + return true; + + /* we have a spurious interrupt it could be that we have already + * emptied the completion queue from a previous interrupt + * FIXME: really!? + */ + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); + + /* There is a race in the hardware that could cause us not to be + * notified of an interrupt completion if we do not take this + * step. We will mask then unmask the interrupts so if there is + * another interrupt pending the clearing of the interrupt + * source we get the next interrupt message. + */ + spin_lock(&ihost->scic_lock); + if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) { + writel(0xFF000000, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); + } + spin_unlock(&ihost->scic_lock); + + return false; +} + +irqreturn_t isci_msix_isr(int vec, void *data) +{ + struct isci_host *ihost = data; + + if (sci_controller_isr(ihost)) + tasklet_schedule(&ihost->completion_tasklet); + + return IRQ_HANDLED; +} + +static bool sci_controller_error_isr(struct isci_host *ihost) +{ + u32 interrupt_status; + + interrupt_status = + readl(&ihost->smu_registers->interrupt_status); + interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); + + if (interrupt_status != 0) { + /* + * There is an error interrupt pending so let it through and handle + * in the callback */ + return true; + } + + /* + * There is a race in the hardware that could cause us not to be notified + * of an interrupt completion if we do not take this step. We will mask + * then unmask the error interrupts so if there was another interrupt + * pending we will be notified. + * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ + writel(0xff, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); + + return false; +} + +static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) +{ + u32 index = SCU_GET_COMPLETION_INDEX(ent); + struct isci_request *ireq = ihost->reqs[index]; + + /* Make sure that we really want to process this IO request */ + if (test_bit(IREQ_ACTIVE, &ireq->flags) && + ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && + ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) + /* Yep this is a valid io request pass it along to the + * io request handler + */ + sci_io_request_tc_completion(ireq, ent); +} + +static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) +{ + u32 index; + struct isci_request *ireq; + struct isci_remote_device *idev; + + index = SCU_GET_COMPLETION_INDEX(ent); + + switch (scu_get_command_request_type(ent)) { + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: + ireq = ihost->reqs[index]; + dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", + __func__, ent, ireq); + /* @todo For a post TC operation we need to fail the IO + * request + */ + break; + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: + idev = ihost->device_table[index]; + dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", + __func__, ent, idev); + /* @todo For a port RNC operation we need to fail the + * device + */ + break; + default: + dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", + __func__, ent); + break; + } +} + +static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) +{ + u32 index; + u32 frame_index; + + struct scu_unsolicited_frame_header *frame_header; + struct isci_phy *iphy; + struct isci_remote_device *idev; + + enum sci_status result = SCI_FAILURE; + + frame_index = SCU_GET_FRAME_INDEX(ent); + + frame_header = ihost->uf_control.buffers.array[frame_index].header; + ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; + + if (SCU_GET_FRAME_ERROR(ent)) { + /* + * / @todo If the IAF frame or SIGNATURE FIS frame has an error will + * / this cause a problem? We expect the phy initialization will + * / fail if there is an error in the frame. */ + sci_controller_release_frame(ihost, frame_index); + return; + } + + if (frame_header->is_address_frame) { + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); + iphy = &ihost->phys[index]; + result = sci_phy_frame_handler(iphy, frame_index); + } else { + + index = SCU_GET_COMPLETION_INDEX(ent); + + if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + /* + * This is a signature fis or a frame from a direct attached SATA + * device that has not yet been created. In either case forwared + * the frame to the PE and let it take care of the frame data. */ + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); + iphy = &ihost->phys[index]; + result = sci_phy_frame_handler(iphy, frame_index); + } else { + if (index < ihost->remote_node_entries) + idev = ihost->device_table[index]; + else + idev = NULL; + + if (idev != NULL) + result = sci_remote_device_frame_handler(idev, frame_index); + else + sci_controller_release_frame(ihost, frame_index); + } + } + + if (result != SCI_SUCCESS) { + /* + * / @todo Is there any reason to report some additional error message + * / when we get this failure notifiction? */ + } +} + +static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) +{ + struct isci_remote_device *idev; + struct isci_request *ireq; + struct isci_phy *iphy; + u32 index; + + index = SCU_GET_COMPLETION_INDEX(ent); + + switch (scu_get_event_type(ent)) { + case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: + /* / @todo The driver did something wrong and we need to fix the condtion. */ + dev_err(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received SMU command error " + "0x%x\n", + __func__, + ihost, + ent); + break; + + case SCU_EVENT_TYPE_SMU_PCQ_ERROR: + case SCU_EVENT_TYPE_SMU_ERROR: + case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: + /* + * / @todo This is a hardware failure and its likely that we want to + * / reset the controller. */ + dev_err(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received fatal controller " + "event 0x%x\n", + __func__, + ihost, + ent); + break; + + case SCU_EVENT_TYPE_TRANSPORT_ERROR: + ireq = ihost->reqs[index]; + sci_io_request_event_handler(ireq, ent); + break; + + case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: + switch (scu_get_event_specifier(ent)) { + case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: + case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: + ireq = ihost->reqs[index]; + if (ireq != NULL) + sci_io_request_event_handler(ireq, ent); + else + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received " + "event 0x%x for io request object " + "that doesn't exist.\n", + __func__, + ihost, + ent); + + break; + + case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: + idev = ihost->device_table[index]; + if (idev != NULL) + sci_remote_device_event_handler(idev, ent); + else + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received " + "event 0x%x for remote device object " + "that doesn't exist.\n", + __func__, + ihost, + ent); + + break; + } + break; + + case SCU_EVENT_TYPE_BROADCAST_CHANGE: + /* + * direct the broadcast change event to the phy first and then let + * the phy redirect the broadcast change to the port object */ + case SCU_EVENT_TYPE_ERR_CNT_EVENT: + /* + * direct error counter event to the phy object since that is where + * we get the event notification. This is a type 4 event. */ + case SCU_EVENT_TYPE_OSSP_EVENT: + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); + iphy = &ihost->phys[index]; + sci_phy_event_handler(iphy, ent); + break; + + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + case SCU_EVENT_TYPE_RNC_OPS_MISC: + if (index < ihost->remote_node_entries) { + idev = ihost->device_table[index]; + + if (idev != NULL) + sci_remote_device_event_handler(idev, ent); + } else + dev_err(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received event 0x%x " + "for remote device object 0x%0x that doesn't " + "exist.\n", + __func__, + ihost, + ent, + index); + + break; + + default: + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller received unknown event code %x\n", + __func__, + ent); + break; + } +} + +static void sci_controller_process_completions(struct isci_host *ihost) +{ + u32 completion_count = 0; + u32 ent; + u32 get_index; + u32 get_cycle; + u32 event_get; + u32 event_cycle; + + dev_dbg(&ihost->pdev->dev, + "%s: completion queue beginning get:0x%08x\n", + __func__, + ihost->completion_queue_get); + + /* Get the component parts of the completion queue */ + get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); + get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; + + event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); + event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; + + while ( + NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) + == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) + ) { + completion_count++; + + ent = ihost->completion_queue[get_index]; + + /* increment the get pointer and check for rollover to toggle the cycle bit */ + get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << + (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); + get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); + + dev_dbg(&ihost->pdev->dev, + "%s: completion queue entry:0x%08x\n", + __func__, + ent); + + switch (SCU_GET_COMPLETION_TYPE(ent)) { + case SCU_COMPLETION_TYPE_TASK: + sci_controller_task_completion(ihost, ent); + break; + + case SCU_COMPLETION_TYPE_SDMA: + sci_controller_sdma_completion(ihost, ent); + break; + + case SCU_COMPLETION_TYPE_UFI: + sci_controller_unsolicited_frame(ihost, ent); + break; + + case SCU_COMPLETION_TYPE_EVENT: + sci_controller_event_completion(ihost, ent); + break; + + case SCU_COMPLETION_TYPE_NOTIFY: { + event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << + (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); + event_get = (event_get+1) & (SCU_MAX_EVENTS-1); + + sci_controller_event_completion(ihost, ent); + break; + } + default: + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller received unknown " + "completion type %x\n", + __func__, + ent); + break; + } + } + + /* Update the get register if we completed one or more entries */ + if (completion_count > 0) { + ihost->completion_queue_get = + SMU_CQGR_GEN_BIT(ENABLE) | + SMU_CQGR_GEN_BIT(EVENT_ENABLE) | + event_cycle | + SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) | + get_cycle | + SMU_CQGR_GEN_VAL(POINTER, get_index); + + writel(ihost->completion_queue_get, + &ihost->smu_registers->completion_queue_get); + + } + + dev_dbg(&ihost->pdev->dev, + "%s: completion queue ending get:0x%08x\n", + __func__, + ihost->completion_queue_get); + +} + +static void sci_controller_error_handler(struct isci_host *ihost) +{ + u32 interrupt_status; + + interrupt_status = + readl(&ihost->smu_registers->interrupt_status); + + if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && + sci_controller_completion_queue_has_entries(ihost)) { + + sci_controller_process_completions(ihost); + writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); + } else { + dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, + interrupt_status); + + sci_change_state(&ihost->sm, SCIC_FAILED); + + return; + } + + /* If we dont process any completions I am not sure that we want to do this. + * We are in the middle of a hardware fault and should probably be reset. + */ + writel(0, &ihost->smu_registers->interrupt_mask); +} + +irqreturn_t isci_intx_isr(int vec, void *data) +{ + irqreturn_t ret = IRQ_NONE; + struct isci_host *ihost = data; + + if (sci_controller_isr(ihost)) { + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); + tasklet_schedule(&ihost->completion_tasklet); + ret = IRQ_HANDLED; + } else if (sci_controller_error_isr(ihost)) { + spin_lock(&ihost->scic_lock); + sci_controller_error_handler(ihost); + spin_unlock(&ihost->scic_lock); + ret = IRQ_HANDLED; + } + + return ret; +} + +irqreturn_t isci_error_isr(int vec, void *data) +{ + struct isci_host *ihost = data; + + if (sci_controller_error_isr(ihost)) + sci_controller_error_handler(ihost); + + return IRQ_HANDLED; +} + +/** + * isci_host_start_complete() - This function is called by the core library, + * through the ISCI Module, to indicate controller start status. + * @ihost: This parameter specifies the ISCI host object + * @completion_status: This parameter specifies the completion status from the + * core library. + * + */ +static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) +{ + if (completion_status != SCI_SUCCESS) + dev_info(&ihost->pdev->dev, + "controller start timed out, continuing...\n"); + clear_bit(IHOST_START_PENDING, &ihost->flags); + wake_up(&ihost->eventq); +} + +int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct isci_host *ihost = ha->lldd_ha; + + if (test_bit(IHOST_START_PENDING, &ihost->flags)) + return 0; + + sas_drain_work(ha); + + return 1; +} + +/** + * sci_controller_get_suggested_start_timeout() - This method returns the + * suggested sci_controller_start() timeout amount. The user is free to + * use any timeout value, but this method provides the suggested minimum + * start timeout value. The returned value is based upon empirical + * information determined as a result of interoperability testing. + * @ihost: the handle to the controller object for which to return the + * suggested start timeout. + * + * This method returns the number of milliseconds for the suggested start + * operation timeout. + */ +static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) +{ + /* Validate the user supplied parameters. */ + if (!ihost) + return 0; + + /* + * The suggested minimum timeout value for a controller start operation: + * + * Signature FIS Timeout + * + Phy Start Timeout + * + Number of Phy Spin Up Intervals + * --------------------------------- + * Number of milliseconds for the controller start operation. + * + * NOTE: The number of phy spin up intervals will be equivalent + * to the number of phys divided by the number phys allowed + * per interval - 1 (once OEM parameters are supported). + * Currently we assume only 1 phy per interval. */ + + return SCIC_SDS_SIGNATURE_FIS_TIMEOUT + + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT + + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); +} + +static void sci_controller_enable_interrupts(struct isci_host *ihost) +{ + set_bit(IHOST_IRQ_ENABLED, &ihost->flags); + writel(0, &ihost->smu_registers->interrupt_mask); +} + +void sci_controller_disable_interrupts(struct isci_host *ihost) +{ + clear_bit(IHOST_IRQ_ENABLED, &ihost->flags); + writel(0xffffffff, &ihost->smu_registers->interrupt_mask); + readl(&ihost->smu_registers->interrupt_mask); /* flush */ +} + +static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) +{ + u32 port_task_scheduler_value; + + port_task_scheduler_value = + readl(&ihost->scu_registers->peg0.ptsg.control); + port_task_scheduler_value |= + (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | + SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); + writel(port_task_scheduler_value, + &ihost->scu_registers->peg0.ptsg.control); +} + +static void sci_controller_assign_task_entries(struct isci_host *ihost) +{ + u32 task_assignment; + + /* + * Assign all the TCs to function 0 + * TODO: Do we actually need to read this register to write it back? + */ + + task_assignment = + readl(&ihost->smu_registers->task_context_assignment[0]); + + task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | + (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | + (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); + + writel(task_assignment, + &ihost->smu_registers->task_context_assignment[0]); + +} + +static void sci_controller_initialize_completion_queue(struct isci_host *ihost) +{ + u32 index; + u32 completion_queue_control_value; + u32 completion_queue_get_value; + u32 completion_queue_put_value; + + ihost->completion_queue_get = 0; + + completion_queue_control_value = + (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | + SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); + + writel(completion_queue_control_value, + &ihost->smu_registers->completion_queue_control); + + + /* Set the completion queue get pointer and enable the queue */ + completion_queue_get_value = ( + (SMU_CQGR_GEN_VAL(POINTER, 0)) + | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) + | (SMU_CQGR_GEN_BIT(ENABLE)) + | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) + ); + + writel(completion_queue_get_value, + &ihost->smu_registers->completion_queue_get); + + /* Set the completion queue put pointer */ + completion_queue_put_value = ( + (SMU_CQPR_GEN_VAL(POINTER, 0)) + | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) + ); + + writel(completion_queue_put_value, + &ihost->smu_registers->completion_queue_put); + + /* Initialize the cycle bit of the completion queue entries */ + for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { + /* + * If get.cycle_bit != completion_queue.cycle_bit + * its not a valid completion queue entry + * so at system start all entries are invalid */ + ihost->completion_queue[index] = 0x80000000; + } +} + +static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) +{ + u32 frame_queue_control_value; + u32 frame_queue_get_value; + u32 frame_queue_put_value; + + /* Write the queue size */ + frame_queue_control_value = + SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); + + writel(frame_queue_control_value, + &ihost->scu_registers->sdma.unsolicited_frame_queue_control); + + /* Setup the get pointer for the unsolicited frame queue */ + frame_queue_get_value = ( + SCU_UFQGP_GEN_VAL(POINTER, 0) + | SCU_UFQGP_GEN_BIT(ENABLE_BIT) + ); + + writel(frame_queue_get_value, + &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); + /* Setup the put pointer for the unsolicited frame queue */ + frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); + writel(frame_queue_put_value, + &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); +} + +void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) +{ + if (ihost->sm.current_state_id == SCIC_STARTING) { + /* + * We move into the ready state, because some of the phys/ports + * may be up and operational. + */ + sci_change_state(&ihost->sm, SCIC_READY); + + isci_host_start_complete(ihost, status); + } +} + +static bool is_phy_starting(struct isci_phy *iphy) +{ + enum sci_phy_states state; + + state = iphy->sm.current_state_id; + switch (state) { + case SCI_PHY_STARTING: + case SCI_PHY_SUB_INITIAL: + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + case SCI_PHY_SUB_AWAIT_IAF_UF: + case SCI_PHY_SUB_AWAIT_SAS_POWER: + case SCI_PHY_SUB_AWAIT_SATA_POWER: + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_OSSP_EN: + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + case SCI_PHY_SUB_FINAL: + return true; + default: + return false; + } +} + +bool is_controller_start_complete(struct isci_host *ihost) +{ + int i; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + struct isci_phy *iphy = &ihost->phys[i]; + u32 state = iphy->sm.current_state_id; + + /* in apc mode we need to check every phy, in + * mpc mode we only need to check phys that have + * been configured into a port + */ + if (is_port_config_apc(ihost)) + /* pass */; + else if (!phy_get_non_dummy_port(iphy)) + continue; + + /* The controller start operation is complete iff: + * - all links have been given an opportunity to start + * - have no indication of a connected device + * - have an indication of a connected device and it has + * finished the link training process. + */ + if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || + (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || + (iphy->is_in_link_training == true && is_phy_starting(iphy)) || + (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) + return false; + } + + return true; +} + +/** + * sci_controller_start_next_phy - start phy + * @ihost: controller + * + * If all the phys have been started, then attempt to transition the + * controller to the READY state and inform the user + * (sci_cb_controller_start_complete()). + */ +static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) +{ + struct sci_oem_params *oem = &ihost->oem_parameters; + struct isci_phy *iphy; + enum sci_status status; + + status = SCI_SUCCESS; + + if (ihost->phy_startup_timer_pending) + return status; + + if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { + if (is_controller_start_complete(ihost)) { + sci_controller_transition_to_ready(ihost, SCI_SUCCESS); + sci_del_timer(&ihost->phy_timer); + ihost->phy_startup_timer_pending = false; + } + } else { + iphy = &ihost->phys[ihost->next_phy_to_start]; + + if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { + if (phy_get_non_dummy_port(iphy) == NULL) { + ihost->next_phy_to_start++; + + /* Caution recursion ahead be forwarned + * + * The PHY was never added to a PORT in MPC mode + * so start the next phy in sequence This phy + * will never go link up and will not draw power + * the OEM parameters either configured the phy + * incorrectly for the PORT or it was never + * assigned to a PORT + */ + return sci_controller_start_next_phy(ihost); + } + } + + status = sci_phy_start(iphy); + + if (status == SCI_SUCCESS) { + sci_mod_timer(&ihost->phy_timer, + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); + ihost->phy_startup_timer_pending = true; + } else { + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed " + "to stop phy %d because of status " + "%d.\n", + __func__, + ihost->phys[ihost->next_phy_to_start].phy_index, + status); + } + + ihost->next_phy_to_start++; + } + + return status; +} + +static void phy_startup_timeout(struct timer_list *t) +{ + struct sci_timer *tmr = from_timer(tmr, t, timer); + struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); + unsigned long flags; + enum sci_status status; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + ihost->phy_startup_timer_pending = false; + + do { + status = sci_controller_start_next_phy(ihost); + } while (status != SCI_SUCCESS); + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +static u16 isci_tci_active(struct isci_host *ihost) +{ + return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); +} + +static enum sci_status sci_controller_start(struct isci_host *ihost, + u32 timeout) +{ + enum sci_status result; + u16 index; + + if (ihost->sm.current_state_id != SCIC_INITIALIZED) { + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } + + /* Build the TCi free pool */ + BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); + ihost->tci_head = 0; + ihost->tci_tail = 0; + for (index = 0; index < ihost->task_context_entries; index++) + isci_tci_free(ihost, index); + + /* Build the RNi free pool */ + sci_remote_node_table_initialize(&ihost->available_remote_nodes, + ihost->remote_node_entries); + + /* + * Before anything else lets make sure we will not be + * interrupted by the hardware. + */ + sci_controller_disable_interrupts(ihost); + + /* Enable the port task scheduler */ + sci_controller_enable_port_task_scheduler(ihost); + + /* Assign all the task entries to ihost physical function */ + sci_controller_assign_task_entries(ihost); + + /* Now initialize the completion queue */ + sci_controller_initialize_completion_queue(ihost); + + /* Initialize the unsolicited frame queue for use */ + sci_controller_initialize_unsolicited_frame_queue(ihost); + + /* Start all of the ports on this controller */ + for (index = 0; index < ihost->logical_port_entries; index++) { + struct isci_port *iport = &ihost->ports[index]; + + result = sci_port_start(iport); + if (result) + return result; + } + + sci_controller_start_next_phy(ihost); + + sci_mod_timer(&ihost->timer, timeout); + + sci_change_state(&ihost->sm, SCIC_STARTING); + + return SCI_SUCCESS; +} + +void isci_host_start(struct Scsi_Host *shost) +{ + struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; + unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); + + set_bit(IHOST_START_PENDING, &ihost->flags); + + spin_lock_irq(&ihost->scic_lock); + sci_controller_start(ihost, tmo); + sci_controller_enable_interrupts(ihost); + spin_unlock_irq(&ihost->scic_lock); +} + +static void isci_host_stop_complete(struct isci_host *ihost) +{ + sci_controller_disable_interrupts(ihost); + clear_bit(IHOST_STOP_PENDING, &ihost->flags); + wake_up(&ihost->eventq); +} + +static void sci_controller_completion_handler(struct isci_host *ihost) +{ + /* Empty out the completion queue */ + if (sci_controller_completion_queue_has_entries(ihost)) + sci_controller_process_completions(ihost); + + /* Clear the interrupt and enable all interrupts again */ + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); + /* Could we write the value of SMU_ISR_COMPLETION? */ + writel(0xFF000000, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); +} + +void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task) +{ + if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) && + !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) { + /* Normal notification (task_done) */ + dev_dbg(&ihost->pdev->dev, + "%s: Normal - ireq/task = %p/%p\n", + __func__, ireq, task); + task->lldd_task = NULL; + task->task_done(task); + } else { + dev_dbg(&ihost->pdev->dev, + "%s: Error - ireq/task = %p/%p\n", + __func__, ireq, task); + if (sas_protocol_ata(task->task_proto)) + task->lldd_task = NULL; + sas_task_abort(task); + } + } else + task->lldd_task = NULL; + + if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) + wake_up_all(&ihost->eventq); + + if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) + isci_free_tag(ihost, ireq->io_tag); +} +/** + * isci_host_completion_routine() - This function is the delayed service + * routine that calls the sci core library's completion handler. It's + * scheduled as a tasklet from the interrupt service routine when interrupts + * in use, or set as the timeout function in polled mode. + * @data: This parameter specifies the ISCI host object + * + */ +void isci_host_completion_routine(unsigned long data) +{ + struct isci_host *ihost = (struct isci_host *)data; + u16 active; + + spin_lock_irq(&ihost->scic_lock); + sci_controller_completion_handler(ihost); + spin_unlock_irq(&ihost->scic_lock); + + /* + * we subtract SCI_MAX_PORTS to account for the number of dummy TCs + * issued for hardware issue workaround + */ + active = isci_tci_active(ihost) - SCI_MAX_PORTS; + + /* + * the coalesence timeout doubles at each encoding step, so + * update it based on the ilog2 value of the outstanding requests + */ + writel(SMU_ICC_GEN_VAL(NUMBER, active) | + SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), + &ihost->smu_registers->interrupt_coalesce_control); +} + +/** + * sci_controller_stop() - This method will stop an individual controller + * object.This method will invoke the associated user callback upon + * completion. The completion callback is called when the following + * conditions are met: -# the method return status is SCI_SUCCESS. -# the + * controller has been quiesced. This method will ensure that all IO + * requests are quiesced, phys are stopped, and all additional operation by + * the hardware is halted. + * @ihost: the handle to the controller object to stop. + * @timeout: This parameter specifies the number of milliseconds in which the + * stop operation should complete. + * + * The controller must be in the STARTED or STOPPED state. Indicate if the + * controller stop method succeeded or failed in some way. SCI_SUCCESS if the + * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the + * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the + * controller is not either in the STARTED or STOPPED states. + */ +static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) +{ + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } + + sci_mod_timer(&ihost->timer, timeout); + sci_change_state(&ihost->sm, SCIC_STOPPING); + return SCI_SUCCESS; +} + +/** + * sci_controller_reset() - This method will reset the supplied core + * controller regardless of the state of said controller. This operation is + * considered destructive. In other words, all current operations are wiped + * out. No IO completions for outstanding devices occur. Outstanding IO + * requests are not aborted or completed at the actual remote device. + * @ihost: the handle to the controller object to reset. + * + * Indicate if the controller reset method succeeded or failed in some way. + * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if + * the controller reset operation is unable to complete. + */ +static enum sci_status sci_controller_reset(struct isci_host *ihost) +{ + switch (ihost->sm.current_state_id) { + case SCIC_RESET: + case SCIC_READY: + case SCIC_STOPPING: + case SCIC_FAILED: + /* + * The reset operation is not a graceful cleanup, just + * perform the state transition. + */ + sci_change_state(&ihost->sm, SCIC_RESETTING); + return SCI_SUCCESS; + default: + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } +} + +static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) +{ + u32 index; + enum sci_status status; + enum sci_status phy_status; + + status = SCI_SUCCESS; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + phy_status = sci_phy_stop(&ihost->phys[index]); + + if (phy_status != SCI_SUCCESS && + phy_status != SCI_FAILURE_INVALID_STATE) { + status = SCI_FAILURE; + + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed to stop " + "phy %d because of status %d.\n", + __func__, + ihost->phys[index].phy_index, phy_status); + } + } + + return status; +} + + +/** + * isci_host_deinit - shutdown frame reception and dma + * @ihost: host to take down + * + * This is called in either the driver shutdown or the suspend path. In + * the shutdown case libsas went through port teardown and normal device + * removal (i.e. physical links stayed up to service scsi_device removal + * commands). In the suspend case we disable the hardware without + * notifying libsas of the link down events since we want libsas to + * remember the domain across the suspend/resume cycle + */ +void isci_host_deinit(struct isci_host *ihost) +{ + int i; + + /* disable output data selects */ + for (i = 0; i < isci_gpio_count(ihost); i++) + writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); + + set_bit(IHOST_STOP_PENDING, &ihost->flags); + + spin_lock_irq(&ihost->scic_lock); + sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); + spin_unlock_irq(&ihost->scic_lock); + + wait_for_stop(ihost); + + /* phy stop is after controller stop to allow port and device to + * go idle before shutting down the phys, but the expectation is + * that i/o has been shut off well before we reach this + * function. + */ + sci_controller_stop_phys(ihost); + + /* disable sgpio: where the above wait should give time for the + * enclosure to sample the gpios going inactive + */ + writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); + + spin_lock_irq(&ihost->scic_lock); + sci_controller_reset(ihost); + spin_unlock_irq(&ihost->scic_lock); + + /* Cancel any/all outstanding port timers */ + for (i = 0; i < ihost->logical_port_entries; i++) { + struct isci_port *iport = &ihost->ports[i]; + del_timer_sync(&iport->timer.timer); + } + + /* Cancel any/all outstanding phy timers */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + struct isci_phy *iphy = &ihost->phys[i]; + del_timer_sync(&iphy->sata_timer.timer); + } + + del_timer_sync(&ihost->port_agent.timer.timer); + + del_timer_sync(&ihost->power_control.timer.timer); + + del_timer_sync(&ihost->timer.timer); + + del_timer_sync(&ihost->phy_timer.timer); +} + +static void __iomem *scu_base(struct isci_host *isci_host) +{ + struct pci_dev *pdev = isci_host->pdev; + int id = isci_host->id; + + return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; +} + +static void __iomem *smu_base(struct isci_host *isci_host) +{ + struct pci_dev *pdev = isci_host->pdev; + int id = isci_host->id; + + return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; +} + +static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_change_state(&ihost->sm, SCIC_RESET); +} + +static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_del_timer(&ihost->timer); +} + +#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 +#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 +#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 +#define INTERRUPT_COALESCE_NUMBER_MAX 256 +#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 +#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 + +/** + * sci_controller_set_interrupt_coalescence() - This method allows the user to + * configure the interrupt coalescence. + * @ihost: This parameter represents the handle to the controller object + * for which its interrupt coalesce register is overridden. + * @coalesce_number: Used to control the number of entries in the Completion + * Queue before an interrupt is generated. If the number of entries exceed + * this number, an interrupt will be generated. The valid range of the input + * is [0, 256]. A setting of 0 results in coalescing being disabled. + * @coalesce_timeout: Timeout value in microseconds. The valid range of the + * input is [0, 2700000] . A setting of 0 is allowed and results in no + * interrupt coalescing timeout. + * + * Indicate if the user successfully set the interrupt coalesce parameters. + * SCI_SUCCESS The user successfully updated the interrutp coalescence. + * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. + */ +static enum sci_status +sci_controller_set_interrupt_coalescence(struct isci_host *ihost, + u32 coalesce_number, + u32 coalesce_timeout) +{ + u8 timeout_encode = 0; + u32 min = 0; + u32 max = 0; + + /* Check if the input parameters fall in the range. */ + if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + /* + * Defined encoding for interrupt coalescing timeout: + * Value Min Max Units + * ----- --- --- ----- + * 0 - - Disabled + * 1 13.3 20.0 ns + * 2 26.7 40.0 + * 3 53.3 80.0 + * 4 106.7 160.0 + * 5 213.3 320.0 + * 6 426.7 640.0 + * 7 853.3 1280.0 + * 8 1.7 2.6 us + * 9 3.4 5.1 + * 10 6.8 10.2 + * 11 13.7 20.5 + * 12 27.3 41.0 + * 13 54.6 81.9 + * 14 109.2 163.8 + * 15 218.5 327.7 + * 16 436.9 655.4 + * 17 873.8 1310.7 + * 18 1.7 2.6 ms + * 19 3.5 5.2 + * 20 7.0 10.5 + * 21 14.0 21.0 + * 22 28.0 41.9 + * 23 55.9 83.9 + * 24 111.8 167.8 + * 25 223.7 335.5 + * 26 447.4 671.1 + * 27 894.8 1342.2 + * 28 1.8 2.7 s + * Others Undefined */ + + /* + * Use the table above to decide the encode of interrupt coalescing timeout + * value for register writing. */ + if (coalesce_timeout == 0) + timeout_encode = 0; + else{ + /* make the timeout value in unit of (10 ns). */ + coalesce_timeout = coalesce_timeout * 100; + min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; + max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; + + /* get the encode of timeout for register writing. */ + for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; + timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; + timeout_encode++) { + if (min <= coalesce_timeout && max > coalesce_timeout) + break; + else if (coalesce_timeout >= max && coalesce_timeout < min * 2 + && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { + if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) + break; + else{ + timeout_encode++; + break; + } + } else { + max = max * 2; + min = min * 2; + } + } + + if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) + /* the value is out of range. */ + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + } + + writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | + SMU_ICC_GEN_VAL(TIMER, timeout_encode), + &ihost->smu_registers->interrupt_coalesce_control); + + + ihost->interrupt_coalesce_number = (u16)coalesce_number; + ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; + + return SCI_SUCCESS; +} + + +static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + u32 val; + + /* enable clock gating for power control of the scu unit */ + val = readl(&ihost->smu_registers->clock_gating_control); + val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) | + SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) | + SMU_CGUCR_GEN_BIT(XCLK_ENABLE)); + val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE); + writel(val, &ihost->smu_registers->clock_gating_control); + + /* set the default interrupt coalescence number and timeout value. */ + sci_controller_set_interrupt_coalescence(ihost, 0, 0); +} + +static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + /* disable interrupt coalescence. */ + sci_controller_set_interrupt_coalescence(ihost, 0, 0); +} + +static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) +{ + u32 index; + enum sci_status port_status; + enum sci_status status = SCI_SUCCESS; + + for (index = 0; index < ihost->logical_port_entries; index++) { + struct isci_port *iport = &ihost->ports[index]; + + port_status = sci_port_stop(iport); + + if ((port_status != SCI_SUCCESS) && + (port_status != SCI_FAILURE_INVALID_STATE)) { + status = SCI_FAILURE; + + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed to " + "stop port %d because of status %d.\n", + __func__, + iport->logical_port_index, + port_status); + } + } + + return status; +} + +static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) +{ + u32 index; + enum sci_status status; + enum sci_status device_status; + + status = SCI_SUCCESS; + + for (index = 0; index < ihost->remote_node_entries; index++) { + if (ihost->device_table[index] != NULL) { + /* / @todo What timeout value do we want to provide to this request? */ + device_status = sci_remote_device_stop(ihost->device_table[index], 0); + + if ((device_status != SCI_SUCCESS) && + (device_status != SCI_FAILURE_INVALID_STATE)) { + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed " + "to stop device 0x%p because of " + "status %d.\n", + __func__, + ihost->device_table[index], device_status); + } + } + } + + return status; +} + +static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_controller_stop_devices(ihost); + sci_controller_stop_ports(ihost); + + if (!sci_controller_has_remote_devices_stopping(ihost)) + isci_host_stop_complete(ihost); +} + +static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_del_timer(&ihost->timer); +} + +static void sci_controller_reset_hardware(struct isci_host *ihost) +{ + /* Disable interrupts so we dont take any spurious interrupts */ + sci_controller_disable_interrupts(ihost); + + /* Reset the SCU */ + writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); + + /* Delay for 1ms to before clearing the CQP and UFQPR. */ + udelay(1000); + + /* The write to the CQGR clears the CQP */ + writel(0x00000000, &ihost->smu_registers->completion_queue_get); + + /* The write to the UFQGP clears the UFQPR */ + writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); + + /* clear all interrupts */ + writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status); +} + +static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_controller_reset_hardware(ihost); + sci_change_state(&ihost->sm, SCIC_RESET); +} + +static const struct sci_base_state sci_controller_state_table[] = { + [SCIC_INITIAL] = { + .enter_state = sci_controller_initial_state_enter, + }, + [SCIC_RESET] = {}, + [SCIC_INITIALIZING] = {}, + [SCIC_INITIALIZED] = {}, + [SCIC_STARTING] = { + .exit_state = sci_controller_starting_state_exit, + }, + [SCIC_READY] = { + .enter_state = sci_controller_ready_state_enter, + .exit_state = sci_controller_ready_state_exit, + }, + [SCIC_RESETTING] = { + .enter_state = sci_controller_resetting_state_enter, + }, + [SCIC_STOPPING] = { + .enter_state = sci_controller_stopping_state_enter, + .exit_state = sci_controller_stopping_state_exit, + }, + [SCIC_FAILED] = {} +}; + +static void controller_timeout(struct timer_list *t) +{ + struct sci_timer *tmr = from_timer(tmr, t, timer); + struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); + struct sci_base_state_machine *sm = &ihost->sm; + unsigned long flags; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + if (sm->current_state_id == SCIC_STARTING) + sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); + else if (sm->current_state_id == SCIC_STOPPING) { + sci_change_state(sm, SCIC_FAILED); + isci_host_stop_complete(ihost); + } else /* / @todo Now what do we want to do in this case? */ + dev_err(&ihost->pdev->dev, + "%s: Controller timer fired when controller was not " + "in a state being timed.\n", + __func__); + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +static enum sci_status sci_controller_construct(struct isci_host *ihost, + void __iomem *scu_base, + void __iomem *smu_base) +{ + u8 i; + + sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); + + ihost->scu_registers = scu_base; + ihost->smu_registers = smu_base; + + sci_port_configuration_agent_construct(&ihost->port_agent); + + /* Construct the ports for this controller */ + for (i = 0; i < SCI_MAX_PORTS; i++) + sci_port_construct(&ihost->ports[i], i, ihost); + sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); + + /* Construct the phys for this controller */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + /* Add all the PHYs to the dummy port */ + sci_phy_construct(&ihost->phys[i], + &ihost->ports[SCI_MAX_PORTS], i); + } + + ihost->invalid_phy_mask = 0; + + sci_init_timer(&ihost->timer, controller_timeout); + + return sci_controller_reset(ihost); +} + +int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) +{ + int i; + + for (i = 0; i < SCI_MAX_PORTS; i++) + if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) + return -EINVAL; + + for (i = 0; i < SCI_MAX_PHYS; i++) + if (oem->phys[i].sas_address.high == 0 && + oem->phys[i].sas_address.low == 0) + return -EINVAL; + + if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { + for (i = 0; i < SCI_MAX_PHYS; i++) + if (oem->ports[i].phy_mask != 0) + return -EINVAL; + } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { + u8 phy_mask = 0; + + for (i = 0; i < SCI_MAX_PHYS; i++) + phy_mask |= oem->ports[i].phy_mask; + + if (phy_mask == 0) + return -EINVAL; + } else + return -EINVAL; + + if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT || + oem->controller.max_concurr_spin_up < 1) + return -EINVAL; + + if (oem->controller.do_enable_ssc) { + if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1) + return -EINVAL; + + if (version >= ISCI_ROM_VER_1_1) { + u8 test = oem->controller.ssc_sata_tx_spread_level; + + switch (test) { + case 0: + case 2: + case 3: + case 6: + case 7: + break; + default: + return -EINVAL; + } + + test = oem->controller.ssc_sas_tx_spread_level; + if (oem->controller.ssc_sas_tx_type == 0) { + switch (test) { + case 0: + case 2: + case 3: + break; + default: + return -EINVAL; + } + } else if (oem->controller.ssc_sas_tx_type == 1) { + switch (test) { + case 0: + case 3: + case 6: + break; + default: + return -EINVAL; + } + } + } + } + + return 0; +} + +static u8 max_spin_up(struct isci_host *ihost) +{ + if (ihost->user_parameters.max_concurr_spinup) + return min_t(u8, ihost->user_parameters.max_concurr_spinup, + MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); + else + return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up, + MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); +} + +static void power_control_timeout(struct timer_list *t) +{ + struct sci_timer *tmr = from_timer(tmr, t, timer); + struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); + struct isci_phy *iphy; + unsigned long flags; + u8 i; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + ihost->power_control.phys_granted_power = 0; + + if (ihost->power_control.phys_waiting == 0) { + ihost->power_control.timer_started = false; + goto done; + } + + for (i = 0; i < SCI_MAX_PHYS; i++) { + + if (ihost->power_control.phys_waiting == 0) + break; + + iphy = ihost->power_control.requesters[i]; + if (iphy == NULL) + continue; + + if (ihost->power_control.phys_granted_power >= max_spin_up(ihost)) + break; + + ihost->power_control.requesters[i] = NULL; + ihost->power_control.phys_waiting--; + ihost->power_control.phys_granted_power++; + sci_phy_consume_power_handler(iphy); + + if (iphy->protocol == SAS_PROTOCOL_SSP) { + u8 j; + + for (j = 0; j < SCI_MAX_PHYS; j++) { + struct isci_phy *requester = ihost->power_control.requesters[j]; + + /* + * Search the power_control queue to see if there are other phys + * attached to the same remote device. If found, take all of + * them out of await_sas_power state. + */ + if (requester != NULL && requester != iphy) { + u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr, + iphy->frame_rcvd.iaf.sas_addr, + sizeof(requester->frame_rcvd.iaf.sas_addr)); + + if (other == 0) { + ihost->power_control.requesters[j] = NULL; + ihost->power_control.phys_waiting--; + sci_phy_consume_power_handler(requester); + } + } + } + } + } + + /* + * It doesn't matter if the power list is empty, we need to start the + * timer in case another phy becomes ready. + */ + sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); + ihost->power_control.timer_started = true; + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +void sci_controller_power_control_queue_insert(struct isci_host *ihost, + struct isci_phy *iphy) +{ + BUG_ON(iphy == NULL); + + if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) { + ihost->power_control.phys_granted_power++; + sci_phy_consume_power_handler(iphy); + + /* + * stop and start the power_control timer. When the timer fires, the + * no_of_phys_granted_power will be set to 0 + */ + if (ihost->power_control.timer_started) + sci_del_timer(&ihost->power_control.timer); + + sci_mod_timer(&ihost->power_control.timer, + SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); + ihost->power_control.timer_started = true; + + } else { + /* + * There are phys, attached to the same sas address as this phy, are + * already in READY state, this phy don't need wait. + */ + u8 i; + struct isci_phy *current_phy; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + u8 other; + current_phy = &ihost->phys[i]; + + other = memcmp(current_phy->frame_rcvd.iaf.sas_addr, + iphy->frame_rcvd.iaf.sas_addr, + sizeof(current_phy->frame_rcvd.iaf.sas_addr)); + + if (current_phy->sm.current_state_id == SCI_PHY_READY && + current_phy->protocol == SAS_PROTOCOL_SSP && + other == 0) { + sci_phy_consume_power_handler(iphy); + break; + } + } + + if (i == SCI_MAX_PHYS) { + /* Add the phy in the waiting list */ + ihost->power_control.requesters[iphy->phy_index] = iphy; + ihost->power_control.phys_waiting++; + } + } +} + +void sci_controller_power_control_queue_remove(struct isci_host *ihost, + struct isci_phy *iphy) +{ + BUG_ON(iphy == NULL); + + if (ihost->power_control.requesters[iphy->phy_index]) + ihost->power_control.phys_waiting--; + + ihost->power_control.requesters[iphy->phy_index] = NULL; +} + +static int is_long_cable(int phy, unsigned char selection_byte) +{ + return !!(selection_byte & (1 << phy)); +} + +static int is_medium_cable(int phy, unsigned char selection_byte) +{ + return !!(selection_byte & (1 << (phy + 4))); +} + +static enum cable_selections decode_selection_byte( + int phy, + unsigned char selection_byte) +{ + return ((selection_byte & (1 << phy)) ? 1 : 0) + + (selection_byte & (1 << (phy + 4)) ? 2 : 0); +} + +static unsigned char *to_cable_select(struct isci_host *ihost) +{ + if (is_cable_select_overridden()) + return ((unsigned char *)&cable_selection_override) + + ihost->id; + else + return &ihost->oem_parameters.controller.cable_selection_mask; +} + +enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy) +{ + return decode_selection_byte(phy, *to_cable_select(ihost)); +} + +char *lookup_cable_names(enum cable_selections selection) +{ + static char *cable_names[] = { + [short_cable] = "short", + [long_cable] = "long", + [medium_cable] = "medium", + [undefined_cable] = "" /* bit 0==1 */ + }; + return (selection <= undefined_cable) ? cable_names[selection] + : cable_names[undefined_cable]; +} + +#define AFE_REGISTER_WRITE_DELAY 10 + +static void sci_controller_afe_initialization(struct isci_host *ihost) +{ + struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; + const struct sci_oem_params *oem = &ihost->oem_parameters; + struct pci_dev *pdev = ihost->pdev; + u32 afe_status; + u32 phy_id; + unsigned char cable_selection_mask = *to_cable_select(ihost); + + /* Clear DFX Status registers */ + writel(0x0081000f, &afe->afe_dfx_master_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) { + /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement + * Timer, PM Stagger Timer + */ + writel(0x0007FFFF, &afe->afe_pmsn_master_control2); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + /* Configure bias currents to normal */ + if (is_a2(pdev)) + writel(0x00005A00, &afe->afe_bias_control); + else if (is_b0(pdev) || is_c0(pdev)) + writel(0x00005F00, &afe->afe_bias_control); + else if (is_c1(pdev)) + writel(0x00005500, &afe->afe_bias_control); + + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Enable PLL */ + if (is_a2(pdev)) + writel(0x80040908, &afe->afe_pll_control0); + else if (is_b0(pdev) || is_c0(pdev)) + writel(0x80040A08, &afe->afe_pll_control0); + else if (is_c1(pdev)) { + writel(0x80000B08, &afe->afe_pll_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + writel(0x00000B08, &afe->afe_pll_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + writel(0x80000B08, &afe->afe_pll_control0); + } + + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Wait for the PLL to lock */ + do { + afe_status = readl(&afe->afe_common_block_status); + udelay(AFE_REGISTER_WRITE_DELAY); + } while ((afe_status & 0x00001000) == 0); + + if (is_a2(pdev)) { + /* Shorten SAS SNW lock time (RxLock timer value from 76 + * us to 50 us) + */ + writel(0x7bcc96ad, &afe->afe_pmsn_master_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { + struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id]; + const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; + int cable_length_long = + is_long_cable(phy_id, cable_selection_mask); + int cable_length_medium = + is_medium_cable(phy_id, cable_selection_mask); + + if (is_a2(pdev)) { + /* All defaults, except the Receive Word + * Alignament/Comma Detect Enable....(0xe800) + */ + writel(0x00004512, &xcvr->afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x0050100F, &xcvr->afe_xcvr_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + } else if (is_b0(pdev)) { + /* Configure transmitter SSC parameters */ + writel(0x00030000, &xcvr->afe_tx_ssc_control); + udelay(AFE_REGISTER_WRITE_DELAY); + } else if (is_c0(pdev)) { + /* Configure transmitter SSC parameters */ + writel(0x00010202, &xcvr->afe_tx_ssc_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* All defaults, except the Receive Word + * Alignament/Comma Detect Enable....(0xe800) + */ + writel(0x00014500, &xcvr->afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + } else if (is_c1(pdev)) { + /* Configure transmitter SSC parameters */ + writel(0x00010202, &xcvr->afe_tx_ssc_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* All defaults, except the Receive Word + * Alignament/Comma Detect Enable....(0xe800) + */ + writel(0x0001C500, &xcvr->afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + /* Power up TX and RX out from power down (PWRDNTX and + * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c) + */ + if (is_a2(pdev)) + writel(0x000003F0, &xcvr->afe_channel_control); + else if (is_b0(pdev)) { + writel(0x000003D7, &xcvr->afe_channel_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x000003D4, &xcvr->afe_channel_control); + } else if (is_c0(pdev)) { + writel(0x000001E7, &xcvr->afe_channel_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x000001E4, &xcvr->afe_channel_control); + } else if (is_c1(pdev)) { + writel(cable_length_long ? 0x000002F7 : 0x000001F7, + &xcvr->afe_channel_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(cable_length_long ? 0x000002F4 : 0x000001F4, + &xcvr->afe_channel_control); + } + udelay(AFE_REGISTER_WRITE_DELAY); + + if (is_a2(pdev)) { + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &xcvr->afe_tx_control); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + if (is_a2(pdev) || is_b0(pdev)) + /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, + * TPD=0x0(TX Power On), RDD=0x0(RX Detect + * Enabled) ....(0xe800) + */ + writel(0x00004100, &xcvr->afe_xcvr_control0); + else if (is_c0(pdev)) + writel(0x00014100, &xcvr->afe_xcvr_control0); + else if (is_c1(pdev)) + writel(0x0001C100, &xcvr->afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Leave DFE/FFE on */ + if (is_a2(pdev)) + writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); + else if (is_b0(pdev)) { + writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &xcvr->afe_tx_control); + } else if (is_c0(pdev)) { + writel(0x01400C0F, &xcvr->afe_rx_ssc_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &xcvr->afe_tx_control); + } else if (is_c1(pdev)) { + writel(cable_length_long ? 0x01500C0C : + cable_length_medium ? 0x01400C0D : 0x02400C0D, + &xcvr->afe_xcvr_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x000003E0, &xcvr->afe_dfx_rx_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(cable_length_long ? 0x33091C1F : + cable_length_medium ? 0x3315181F : 0x2B17161F, + &xcvr->afe_rx_ssc_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &xcvr->afe_tx_control); + } + + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + /* Transfer control to the PEs */ + writel(0x00010f00, &afe->afe_dfx_master_control0); + udelay(AFE_REGISTER_WRITE_DELAY); +} + +static void sci_controller_initialize_power_control(struct isci_host *ihost) +{ + sci_init_timer(&ihost->power_control.timer, power_control_timeout); + + memset(ihost->power_control.requesters, 0, + sizeof(ihost->power_control.requesters)); + + ihost->power_control.phys_waiting = 0; + ihost->power_control.phys_granted_power = 0; +} + +static enum sci_status sci_controller_initialize(struct isci_host *ihost) +{ + struct sci_base_state_machine *sm = &ihost->sm; + enum sci_status result = SCI_FAILURE; + unsigned long i, state, val; + + if (ihost->sm.current_state_id != SCIC_RESET) { + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(sm, SCIC_INITIALIZING); + + sci_init_timer(&ihost->phy_timer, phy_startup_timeout); + + ihost->next_phy_to_start = 0; + ihost->phy_startup_timer_pending = false; + + sci_controller_initialize_power_control(ihost); + + /* + * There is nothing to do here for B0 since we do not have to + * program the AFE registers. + * / @todo The AFE settings are supposed to be correct for the B0 but + * / presently they seem to be wrong. */ + sci_controller_afe_initialization(ihost); + + + /* Take the hardware out of reset */ + writel(0, &ihost->smu_registers->soft_reset_control); + + /* + * / @todo Provide meaningfull error code for hardware failure + * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ + for (i = 100; i >= 1; i--) { + u32 status; + + /* Loop until the hardware reports success */ + udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); + status = readl(&ihost->smu_registers->control_status); + + if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) + break; + } + if (i == 0) + goto out; + + /* + * Determine what are the actaul device capacities that the + * hardware will support */ + val = readl(&ihost->smu_registers->device_context_capacity); + + /* Record the smaller of the two capacity values */ + ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); + ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); + ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); + + /* + * Make all PEs that are unassigned match up with the + * logical ports + */ + for (i = 0; i < ihost->logical_port_entries; i++) { + struct scu_port_task_scheduler_group_registers __iomem + *ptsg = &ihost->scu_registers->peg0.ptsg; + + writel(i, &ptsg->protocol_engine[i]); + } + + /* Initialize hardware PCI Relaxed ordering in DMA engines */ + val = readl(&ihost->scu_registers->sdma.pdma_configuration); + val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); + writel(val, &ihost->scu_registers->sdma.pdma_configuration); + + val = readl(&ihost->scu_registers->sdma.cdma_configuration); + val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); + writel(val, &ihost->scu_registers->sdma.cdma_configuration); + + /* + * Initialize the PHYs before the PORTs because the PHY registers + * are accessed during the port initialization. + */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + result = sci_phy_initialize(&ihost->phys[i], + &ihost->scu_registers->peg0.pe[i].tl, + &ihost->scu_registers->peg0.pe[i].ll); + if (result != SCI_SUCCESS) + goto out; + } + + for (i = 0; i < ihost->logical_port_entries; i++) { + struct isci_port *iport = &ihost->ports[i]; + + iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; + iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; + iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; + } + + result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); + + out: + /* Advance the controller state machine */ + if (result == SCI_SUCCESS) + state = SCIC_INITIALIZED; + else + state = SCIC_FAILED; + sci_change_state(sm, state); + + return result; +} + +static int sci_controller_dma_alloc(struct isci_host *ihost) +{ + struct device *dev = &ihost->pdev->dev; + size_t size; + int i; + + /* detect re-initialization */ + if (ihost->completion_queue) + return 0; + + size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); + ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma, + GFP_KERNEL); + if (!ihost->completion_queue) + return -ENOMEM; + + size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); + ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma, + GFP_KERNEL); + + if (!ihost->remote_node_context_table) + return -ENOMEM; + + size = ihost->task_context_entries * sizeof(struct scu_task_context), + ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma, + GFP_KERNEL); + if (!ihost->task_context_table) + return -ENOMEM; + + size = SCI_UFI_TOTAL_SIZE; + ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL); + if (!ihost->ufi_buf) + return -ENOMEM; + + for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { + struct isci_request *ireq; + dma_addr_t dma; + + ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL); + if (!ireq) + return -ENOMEM; + + ireq->tc = &ihost->task_context_table[i]; + ireq->owning_controller = ihost; + ireq->request_daddr = dma; + ireq->isci_host = ihost; + ihost->reqs[i] = ireq; + } + + return 0; +} + +static int sci_controller_mem_init(struct isci_host *ihost) +{ + int err = sci_controller_dma_alloc(ihost); + + if (err) + return err; + + writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower); + writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper); + + writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower); + writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper); + + writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower); + writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper); + + sci_unsolicited_frame_control_construct(ihost); + + /* + * Inform the silicon as to the location of the UF headers and + * address table. + */ + writel(lower_32_bits(ihost->uf_control.headers.physical_address), + &ihost->scu_registers->sdma.uf_header_base_address_lower); + writel(upper_32_bits(ihost->uf_control.headers.physical_address), + &ihost->scu_registers->sdma.uf_header_base_address_upper); + + writel(lower_32_bits(ihost->uf_control.address_table.physical_address), + &ihost->scu_registers->sdma.uf_address_table_lower); + writel(upper_32_bits(ihost->uf_control.address_table.physical_address), + &ihost->scu_registers->sdma.uf_address_table_upper); + + return 0; +} + +/** + * isci_host_init - (re-)initialize hardware and internal (private) state + * @ihost: host to init + * + * Any public facing objects (like asd_sas_port, and asd_sas_phys), or + * one-time initialization objects like locks and waitqueues, are + * not touched (they are initialized in isci_host_alloc) + */ +int isci_host_init(struct isci_host *ihost) +{ + int i, err; + enum sci_status status; + + spin_lock_irq(&ihost->scic_lock); + status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost)); + spin_unlock_irq(&ihost->scic_lock); + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: sci_controller_construct failed - status = %x\n", + __func__, + status); + return -ENODEV; + } + + spin_lock_irq(&ihost->scic_lock); + status = sci_controller_initialize(ihost); + spin_unlock_irq(&ihost->scic_lock); + if (status != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: sci_controller_initialize failed -" + " status = 0x%x\n", + __func__, status); + return -ENODEV; + } + + err = sci_controller_mem_init(ihost); + if (err) + return err; + + /* enable sgpio */ + writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); + for (i = 0; i < isci_gpio_count(ihost); i++) + writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); + writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); + + return 0; +} + +void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) +{ + switch (ihost->sm.current_state_id) { + case SCIC_STARTING: + sci_del_timer(&ihost->phy_timer); + ihost->phy_startup_timer_pending = false; + ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, + iport, iphy); + sci_controller_start_next_phy(ihost); + break; + case SCIC_READY: + ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, + iport, iphy); + break; + default: + dev_dbg(&ihost->pdev->dev, + "%s: SCIC Controller linkup event from phy %d in " + "unexpected state %d\n", __func__, iphy->phy_index, + ihost->sm.current_state_id); + } +} + +void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) +{ + switch (ihost->sm.current_state_id) { + case SCIC_STARTING: + case SCIC_READY: + ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, + iport, iphy); + break; + default: + dev_dbg(&ihost->pdev->dev, + "%s: SCIC Controller linkdown event from phy %d in " + "unexpected state %d\n", + __func__, + iphy->phy_index, + ihost->sm.current_state_id); + } +} + +bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) +{ + u32 index; + + for (index = 0; index < ihost->remote_node_entries; index++) { + if ((ihost->device_table[index] != NULL) && + (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) + return true; + } + + return false; +} + +void sci_controller_remote_device_stopped(struct isci_host *ihost, + struct isci_remote_device *idev) +{ + if (ihost->sm.current_state_id != SCIC_STOPPING) { + dev_dbg(&ihost->pdev->dev, + "SCIC Controller 0x%p remote device stopped event " + "from device 0x%p in unexpected state %d\n", + ihost, idev, + ihost->sm.current_state_id); + return; + } + + if (!sci_controller_has_remote_devices_stopping(ihost)) + isci_host_stop_complete(ihost); +} + +void sci_controller_post_request(struct isci_host *ihost, u32 request) +{ + dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", + __func__, ihost->id, request); + + writel(request, &ihost->smu_registers->post_context_port); +} + +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) +{ + u16 task_index; + u16 task_sequence; + + task_index = ISCI_TAG_TCI(io_tag); + + if (task_index < ihost->task_context_entries) { + struct isci_request *ireq = ihost->reqs[task_index]; + + if (test_bit(IREQ_ACTIVE, &ireq->flags)) { + task_sequence = ISCI_TAG_SEQ(io_tag); + + if (task_sequence == ihost->io_request_sequence[task_index]) + return ireq; + } + } + + return NULL; +} + +/** + * sci_controller_allocate_remote_node_context() + * This method allocates remote node index and the reserves the remote node + * context space for use. This method can fail if there are no more remote + * node index available. + * @ihost: This is the controller object which contains the set of + * free remote node ids + * @idev: This is the device object which is requesting the a remote node + * id + * @node_id: This is the remote node id that is assinged to the device if one + * is available + * + * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote + * node index available. + */ +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 *node_id) +{ + u16 node_index; + u32 remote_node_count = sci_remote_device_node_count(idev); + + node_index = sci_remote_node_table_allocate_remote_node( + &ihost->available_remote_nodes, remote_node_count + ); + + if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + ihost->device_table[node_index] = idev; + + *node_id = node_index; + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INSUFFICIENT_RESOURCES; +} + +void sci_controller_free_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 node_id) +{ + u32 remote_node_count = sci_remote_device_node_count(idev); + + if (ihost->device_table[node_id] == idev) { + ihost->device_table[node_id] = NULL; + + sci_remote_node_table_release_remote_node_index( + &ihost->available_remote_nodes, remote_node_count, node_id + ); + } +} + +void sci_controller_copy_sata_response(void *response_buffer, + void *frame_header, + void *frame_buffer) +{ + /* XXX type safety? */ + memcpy(response_buffer, frame_header, sizeof(u32)); + + memcpy(response_buffer + sizeof(u32), + frame_buffer, + sizeof(struct dev_to_host_fis) - sizeof(u32)); +} + +void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) +{ + if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) + writel(ihost->uf_control.get, + &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); +} + +void isci_tci_free(struct isci_host *ihost, u16 tci) +{ + u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); + + ihost->tci_pool[tail] = tci; + ihost->tci_tail = tail + 1; +} + +static u16 isci_tci_alloc(struct isci_host *ihost) +{ + u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); + u16 tci = ihost->tci_pool[head]; + + ihost->tci_head = head + 1; + return tci; +} + +static u16 isci_tci_space(struct isci_host *ihost) +{ + return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); +} + +u16 isci_alloc_tag(struct isci_host *ihost) +{ + if (isci_tci_space(ihost)) { + u16 tci = isci_tci_alloc(ihost); + u8 seq = ihost->io_request_sequence[tci]; + + return ISCI_TAG(seq, tci); + } + + return SCI_CONTROLLER_INVALID_IO_TAG; +} + +enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) +{ + u16 tci = ISCI_TAG_TCI(io_tag); + u16 seq = ISCI_TAG_SEQ(io_tag); + + /* prevent tail from passing head */ + if (isci_tci_active(ihost) == 0) + return SCI_FAILURE_INVALID_IO_TAG; + + if (seq == ihost->io_request_sequence[tci]) { + ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); + + isci_tci_free(ihost, tci); + + return SCI_SUCCESS; + } + return SCI_FAILURE_INVALID_IO_TAG; +} + +enum sci_status sci_controller_start_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } + + status = sci_remote_device_start_io(ihost, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + set_bit(IREQ_ACTIVE, &ireq->flags); + sci_controller_post_request(ihost, ireq->post_context); + return SCI_SUCCESS; +} + +enum sci_status sci_controller_terminate_request(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + /* terminate an ongoing (i.e. started) core IO request. This does not + * abort the IO request at the target, but rather removes the IO + * request from the host controller. + */ + enum sci_status status; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } + status = sci_io_request_terminate(ireq); + + dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n", + __func__, status, ireq, ireq->flags); + + if ((status == SCI_SUCCESS) && + !test_bit(IREQ_PENDING_ABORT, &ireq->flags) && + !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { + /* Utilize the original post context command and or in the + * POST_TC_ABORT request sub-type. + */ + sci_controller_post_request( + ihost, ireq->post_context | + SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); + } + return status; +} + +/** + * sci_controller_complete_io() - This method will perform core specific + * completion operations for an IO request. After this method is invoked, + * the user should consider the IO request as invalid until it is properly + * reused (i.e. re-constructed). + * @ihost: The handle to the controller object for which to complete the + * IO request. + * @idev: The handle to the remote device object for which to complete + * the IO request. + * @ireq: the handle to the io request object to complete. + */ +enum sci_status sci_controller_complete_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + + switch (ihost->sm.current_state_id) { + case SCIC_STOPPING: + /* XXX: Implement this function */ + return SCI_FAILURE; + case SCIC_READY: + status = sci_remote_device_complete_io(ihost, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + clear_bit(IREQ_ACTIVE, &ireq->flags); + return SCI_SUCCESS; + default: + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } + +} + +enum sci_status sci_controller_continue_io(struct isci_request *ireq) +{ + struct isci_host *ihost = ireq->owning_controller; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); + return SCI_FAILURE_INVALID_STATE; + } + + set_bit(IREQ_ACTIVE, &ireq->flags); + sci_controller_post_request(ihost, ireq->post_context); + return SCI_SUCCESS; +} + +/** + * sci_controller_start_task() - This method is called by the SCIC user to + * send/start a framework task management request. + * @ihost: the handle to the controller object for which to start the task + * management request. + * @idev: the handle to the remote device object for which to start + * the task management request. + * @ireq: the handle to the task request object to start. + */ +enum sci_status sci_controller_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller starting task from invalid " + "state\n", + __func__); + return SCI_FAILURE_INVALID_STATE; + } + + status = sci_remote_device_start_task(ihost, idev, ireq); + switch (status) { + case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: + set_bit(IREQ_ACTIVE, &ireq->flags); + + /* + * We will let framework know this task request started successfully, + * although core is still woring on starting the request (to post tc when + * RNC is resumed.) + */ + return SCI_SUCCESS; + case SCI_SUCCESS: + set_bit(IREQ_ACTIVE, &ireq->flags); + sci_controller_post_request(ihost, ireq->post_context); + break; + default: + break; + } + + return status; +} + +static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data) +{ + int d; + + /* no support for TX_GP_CFG */ + if (reg_index == 0) + return -EINVAL; + + for (d = 0; d < isci_gpio_count(ihost); d++) { + u32 val = 0x444; /* all ODx.n clear */ + int i; + + for (i = 0; i < 3; i++) { + int bit; + + bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i), + write_data, reg_index, + reg_count); + if (bit < 0) + break; + + /* if od is set, clear the 'invert' bit */ + val &= ~(bit << ((i << 2) + 2)); + } + + if (i < 3) + break; + writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]); + } + + /* unless reg_index is > 1, we should always be able to write at + * least one register + */ + return d > 0; +} + +int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data) +{ + struct isci_host *ihost = sas_ha->lldd_ha; + int written; + + switch (reg_type) { + case SAS_GPIO_REG_TX_GP: + written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data); + break; + default: + written = -EINVAL; + } + + return written; +} diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h new file mode 100644 index 000000000..52388374c --- /dev/null +++ b/drivers/scsi/isci/host.h @@ -0,0 +1,516 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _SCI_HOST_H_ +#define _SCI_HOST_H_ + +#include +#include "remote_device.h" +#include "phy.h" +#include "isci.h" +#include "remote_node_table.h" +#include "registers.h" +#include "unsolicited_frame_control.h" +#include "probe_roms.h" + +struct isci_request; +struct scu_task_context; + + +/** + * struct sci_power_control - + * + * This structure defines the fields for managing power control for direct + * attached disk devices. + */ +struct sci_power_control { + /** + * This field is set when the power control timer is running and cleared when + * it is not. + */ + bool timer_started; + + /** + * Timer to control when the directed attached disks can consume power. + */ + struct sci_timer timer; + + /** + * This field is used to keep track of how many phys are put into the + * requesters field. + */ + u8 phys_waiting; + + /** + * This field is used to keep track of how many phys have been granted to consume power + */ + u8 phys_granted_power; + + /** + * This field is an array of phys that we are waiting on. The phys are direct + * mapped into requesters via struct sci_phy.phy_index + */ + struct isci_phy *requesters[SCI_MAX_PHYS]; + +}; + +struct sci_port_configuration_agent; +typedef void (*port_config_fn)(struct isci_host *, + struct sci_port_configuration_agent *, + struct isci_port *, struct isci_phy *); +bool is_port_config_apc(struct isci_host *ihost); +bool is_controller_start_complete(struct isci_host *ihost); + +struct sci_port_configuration_agent { + u16 phy_configured_mask; + u16 phy_ready_mask; + struct { + u8 min_index; + u8 max_index; + } phy_valid_port_range[SCI_MAX_PHYS]; + bool timer_pending; + port_config_fn link_up_handler; + port_config_fn link_down_handler; + struct sci_timer timer; +}; + +/** + * isci_host - primary host/controller object + * @timer: timeout start/stop operations + * @device_table: rni (hw remote node index) to remote device lookup table + * @available_remote_nodes: rni allocator + * @power_control: manage device spin up + * @io_request_sequence: generation number for tci's (task contexts) + * @task_context_table: hw task context table + * @remote_node_context_table: hw remote node context table + * @completion_queue: hw-producer driver-consumer communication ring + * @completion_queue_get: tracks the driver 'head' of the ring to notify hw + * @logical_port_entries: min({driver|silicon}-supported-port-count) + * @remote_node_entries: min({driver|silicon}-supported-node-count) + * @task_context_entries: min({driver|silicon}-supported-task-count) + * @phy_timer: phy startup timer + * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for + * the phy index is set so further notifications are not + * made. Once the phy reports link up and is made part of a + * port then this bit is cleared. + + */ +struct isci_host { + struct sci_base_state_machine sm; + /* XXX can we time this externally */ + struct sci_timer timer; + /* XXX drop reference module params directly */ + struct sci_user_parameters user_parameters; + /* XXX no need to be a union */ + struct sci_oem_params oem_parameters; + struct sci_port_configuration_agent port_agent; + struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; + struct sci_remote_node_table available_remote_nodes; + struct sci_power_control power_control; + u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; + struct scu_task_context *task_context_table; + dma_addr_t tc_dma; + union scu_remote_node_context *remote_node_context_table; + dma_addr_t rnc_dma; + u32 *completion_queue; + dma_addr_t cq_dma; + u32 completion_queue_get; + u32 logical_port_entries; + u32 remote_node_entries; + u32 task_context_entries; + void *ufi_buf; + dma_addr_t ufi_dma; + struct sci_unsolicited_frame_control uf_control; + + /* phy startup */ + struct sci_timer phy_timer; + /* XXX kill */ + bool phy_startup_timer_pending; + u32 next_phy_to_start; + /* XXX convert to unsigned long and use bitops */ + u8 invalid_phy_mask; + + /* TODO attempt dynamic interrupt coalescing scheme */ + u16 interrupt_coalesce_number; + u32 interrupt_coalesce_timeout; + struct smu_registers __iomem *smu_registers; + struct scu_registers __iomem *scu_registers; + + u16 tci_head; + u16 tci_tail; + u16 tci_pool[SCI_MAX_IO_REQUESTS]; + + int id; /* unique within a given pci device */ + struct isci_phy phys[SCI_MAX_PHYS]; + struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ + struct asd_sas_port sas_ports[SCI_MAX_PORTS]; + struct sas_ha_struct sas_ha; + + struct pci_dev *pdev; + #define IHOST_START_PENDING 0 + #define IHOST_STOP_PENDING 1 + #define IHOST_IRQ_ENABLED 2 + unsigned long flags; + wait_queue_head_t eventq; + struct tasklet_struct completion_tasklet; + spinlock_t scic_lock; + struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; + struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; +}; + +/** + * enum sci_controller_states - This enumeration depicts all the states + * for the common controller state machine. + */ +enum sci_controller_states { + /** + * Simply the initial state for the base controller state machine. + */ + SCIC_INITIAL = 0, + + /** + * This state indicates that the controller is reset. The memory for + * the controller is in it's initial state, but the controller requires + * initialization. + * This state is entered from the INITIAL state. + * This state is entered from the RESETTING state. + */ + SCIC_RESET, + + /** + * This state is typically an action state that indicates the controller + * is in the process of initialization. In this state no new IO operations + * are permitted. + * This state is entered from the RESET state. + */ + SCIC_INITIALIZING, + + /** + * This state indicates that the controller has been successfully + * initialized. In this state no new IO operations are permitted. + * This state is entered from the INITIALIZING state. + */ + SCIC_INITIALIZED, + + /** + * This state indicates the the controller is in the process of becoming + * ready (i.e. starting). In this state no new IO operations are permitted. + * This state is entered from the INITIALIZED state. + */ + SCIC_STARTING, + + /** + * This state indicates the controller is now ready. Thus, the user + * is able to perform IO operations on the controller. + * This state is entered from the STARTING state. + */ + SCIC_READY, + + /** + * This state is typically an action state that indicates the controller + * is in the process of resetting. Thus, the user is unable to perform + * IO operations on the controller. A reset is considered destructive in + * most cases. + * This state is entered from the READY state. + * This state is entered from the FAILED state. + * This state is entered from the STOPPED state. + */ + SCIC_RESETTING, + + /** + * This state indicates that the controller is in the process of stopping. + * In this state no new IO operations are permitted, but existing IO + * operations are allowed to complete. + * This state is entered from the READY state. + */ + SCIC_STOPPING, + + /** + * This state indicates that the controller could not successfully be + * initialized. In this state no new IO operations are permitted. + * This state is entered from the INITIALIZING state. + * This state is entered from the STARTING state. + * This state is entered from the STOPPING state. + * This state is entered from the RESETTING state. + */ + SCIC_FAILED, +}; + +/** + * struct isci_pci_info - This class represents the pci function containing the + * controllers. Depending on PCI SKU, there could be up to 2 controllers in + * the PCI function. + */ +#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS) + +struct isci_pci_info { + struct isci_host *hosts[SCI_MAX_CONTROLLERS]; + struct isci_orom *orom; +}; + +static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +static inline struct Scsi_Host *to_shost(struct isci_host *ihost) +{ + return ihost->sas_ha.shost; +} + +#define for_each_isci_host(id, ihost, pdev) \ + for (id = 0; id < SCI_MAX_CONTROLLERS && \ + (ihost = to_pci_info(pdev)->hosts[id]); id++) + +static inline void wait_for_start(struct isci_host *ihost) +{ + wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); +} + +static inline void wait_for_stop(struct isci_host *ihost) +{ + wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags)); +} + +static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev) +{ + wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags)); +} + +static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) +{ + wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags)); +} + +static inline struct isci_host *dev_to_ihost(struct domain_device *dev) +{ + return dev->port->ha->lldd_ha; +} + +static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev) +{ + return dev_to_ihost(idev->domain_dev); +} + +/* we always use protocol engine group zero */ +#define ISCI_PEG 0 + +/* see sci_controller_io_tag_allocate|free for how seq and tci are built */ +#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) + +/* these are returned by the hardware, so sanitize them */ +#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) +#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) + +/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ +#define ISCI_COALESCE_BASE 9 + +/* expander attached sata devices require 3 rnc slots */ +static inline int sci_remote_device_node_count(struct isci_remote_device *idev) +{ + struct domain_device *dev = idev->domain_dev; + + if (dev_is_sata(dev) && dev->parent) + return SCU_STP_REMOTE_NODE_COUNT; + return SCU_SSP_REMOTE_NODE_COUNT; +} + +/** + * sci_controller_clear_invalid_phy() - + * + * This macro will clear the bit in the invalid phy mask for this controller + * object. This is used to control messages reported for invalid link up + * notifications. + */ +#define sci_controller_clear_invalid_phy(controller, phy) \ + ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) + +static inline struct device *scirdev_to_dev(struct isci_remote_device *idev) +{ + if (!idev || !idev->isci_port || !idev->isci_port->isci_host) + return NULL; + + return &idev->isci_port->isci_host->pdev->dev; +} + +static inline bool is_a2(struct pci_dev *pdev) +{ + if (pdev->revision < 4) + return true; + return false; +} + +static inline bool is_b0(struct pci_dev *pdev) +{ + if (pdev->revision == 4) + return true; + return false; +} + +static inline bool is_c0(struct pci_dev *pdev) +{ + if (pdev->revision == 5) + return true; + return false; +} + +static inline bool is_c1(struct pci_dev *pdev) +{ + if (pdev->revision >= 6) + return true; + return false; +} + +enum cable_selections { + short_cable = 0, + long_cable = 1, + medium_cable = 2, + undefined_cable = 3 +}; + +#define CABLE_OVERRIDE_DISABLED (0x10000) + +static inline int is_cable_select_overridden(void) +{ + return cable_selection_override < CABLE_OVERRIDE_DISABLED; +} + +enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy); +void validate_cable_selections(struct isci_host *ihost); +char *lookup_cable_names(enum cable_selections); + +/* set hw control for 'activity', even though active enclosures seem to drive + * the activity led on their own. Skip setting FSENG control on 'status' due + * to unexpected operation and 'error' due to not being a supported automatic + * FSENG output + */ +#define SGPIO_HW_CONTROL 0x00000443 + +static inline int isci_gpio_count(struct isci_host *ihost) +{ + return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select); +} + +void sci_controller_post_request(struct isci_host *ihost, + u32 request); +void sci_controller_release_frame(struct isci_host *ihost, + u32 frame_index); +void sci_controller_copy_sata_response(void *response_buffer, + void *frame_header, + void *frame_buffer); +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 *node_id); +void sci_controller_free_remote_node_context( + struct isci_host *ihost, + struct isci_remote_device *idev, + u16 node_id); + +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag); +void sci_controller_power_control_queue_insert(struct isci_host *ihost, + struct isci_phy *iphy); +void sci_controller_power_control_queue_remove(struct isci_host *ihost, + struct isci_phy *iphy); +void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy); +void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy); +void sci_controller_remote_device_stopped(struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status sci_controller_continue_io(struct isci_request *ireq); +int isci_host_scan_finished(struct Scsi_Host *, unsigned long); +void isci_host_start(struct Scsi_Host *); +u16 isci_alloc_tag(struct isci_host *ihost); +enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); +void isci_tci_free(struct isci_host *ihost, u16 tci); +void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task); + +int isci_host_init(struct isci_host *); +void isci_host_completion_routine(unsigned long data); +void isci_host_deinit(struct isci_host *); +void sci_controller_disable_interrupts(struct isci_host *ihost); +bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost); +void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status); + +enum sci_status sci_controller_start_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_controller_start_task( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_controller_terminate_request( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_controller_complete_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent); + +enum sci_status sci_port_configuration_agent_initialize( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent); + +int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data); +#endif diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c new file mode 100644 index 000000000..6277162a0 --- /dev/null +++ b/drivers/scsi/isci/init.c @@ -0,0 +1,777 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "host.h" +#include "isci.h" +#include "task.h" +#include "probe_roms.h" + +#define MAJ 1 +#define MIN 2 +#define BUILD 0 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ + __stringify(BUILD) + +MODULE_VERSION(DRV_VERSION); + +static struct scsi_transport_template *isci_transport_template; + +static const struct pci_device_id isci_id_table[] = { + { PCI_VDEVICE(INTEL, 0x1D61),}, + { PCI_VDEVICE(INTEL, 0x1D63),}, + { PCI_VDEVICE(INTEL, 0x1D65),}, + { PCI_VDEVICE(INTEL, 0x1D67),}, + { PCI_VDEVICE(INTEL, 0x1D69),}, + { PCI_VDEVICE(INTEL, 0x1D6B),}, + { PCI_VDEVICE(INTEL, 0x1D60),}, + { PCI_VDEVICE(INTEL, 0x1D62),}, + { PCI_VDEVICE(INTEL, 0x1D64),}, + { PCI_VDEVICE(INTEL, 0x1D66),}, + { PCI_VDEVICE(INTEL, 0x1D68),}, + { PCI_VDEVICE(INTEL, 0x1D6A),}, + {} +}; + +MODULE_DEVICE_TABLE(pci, isci_id_table); + +/* linux isci specific settings */ + +unsigned char no_outbound_task_to = 2; +module_param(no_outbound_task_to, byte, 0); +MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); + +u16 ssp_max_occ_to = 20; +module_param(ssp_max_occ_to, ushort, 0); +MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)"); + +u16 stp_max_occ_to = 5; +module_param(stp_max_occ_to, ushort, 0); +MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)"); + +u16 ssp_inactive_to = 5; +module_param(ssp_inactive_to, ushort, 0); +MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)"); + +u16 stp_inactive_to = 5; +module_param(stp_inactive_to, ushort, 0); +MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); + +unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED; +module_param(phy_gen, byte, 0); +MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); + +unsigned char max_concurr_spinup; +module_param(max_concurr_spinup, byte, 0); +MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); + +uint cable_selection_override = CABLE_OVERRIDE_DISABLED; +module_param(cable_selection_override, uint, 0); + +MODULE_PARM_DESC(cable_selection_override, + "This field indicates length of the SAS/SATA cable between " + "host and device. If any bits > 15 are set (default) " + "indicates \"use platform defaults\""); + +static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); + + return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); +} + +static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); + +static struct attribute *isci_host_attrs[] = { + &dev_attr_isci_id.attr, + NULL +}; + +ATTRIBUTE_GROUPS(isci_host); + +static const struct scsi_host_template isci_sht = { + + .module = THIS_MODULE, + .name = DRV_NAME, + .proc_name = DRV_NAME, + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .scan_finished = isci_host_scan_finished, + .scan_start = isci_host_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = ISCI_CAN_QUEUE_VAL, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_abort_handler = sas_eh_abort_handler, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .shost_groups = isci_host_groups, + .track_queue_depth = 1, +}; + +static struct sas_domain_function_template isci_transport_ops = { + + /* The class calls these to notify the LLDD of an event. */ + .lldd_port_formed = isci_port_formed, + .lldd_port_deformed = isci_port_deformed, + + /* The class calls these when a device is found or gone. */ + .lldd_dev_found = isci_remote_device_found, + .lldd_dev_gone = isci_remote_device_gone, + + .lldd_execute_task = isci_task_execute_task, + /* Task Management Functions. Must be called from process context. */ + .lldd_abort_task = isci_task_abort_task, + .lldd_abort_task_set = isci_task_abort_task_set, + .lldd_clear_task_set = isci_task_clear_task_set, + .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset, + .lldd_lu_reset = isci_task_lu_reset, + .lldd_query_task = isci_task_query_task, + + /* ata recovery called from ata-eh */ + .lldd_ata_check_ready = isci_ata_check_ready, + + /* Port and Adapter management */ + .lldd_clear_nexus_port = isci_task_clear_nexus_port, + .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, + + /* Phy management */ + .lldd_control_phy = isci_phy_control, + + /* GPIO support */ + .lldd_write_gpio = isci_gpio_write, +}; + + +/****************************************************************************** +* P R O T E C T E D M E T H O D S +******************************************************************************/ + + + +/** + * isci_register_sas_ha() - This method initializes various lldd + * specific members of the sas_ha struct and calls the libsas + * sas_register_ha() function. + * @isci_host: This parameter specifies the lldd specific wrapper for the + * libsas sas_ha struct. + * + * This method returns an error code indicating success or failure. The user + * should check for possible memory allocation error return otherwise, a zero + * indicates success. + */ +static int isci_register_sas_ha(struct isci_host *isci_host) +{ + int i; + struct sas_ha_struct *sas_ha = &(isci_host->sas_ha); + struct asd_sas_phy **sas_phys; + struct asd_sas_port **sas_ports; + + sas_phys = devm_kcalloc(&isci_host->pdev->dev, + SCI_MAX_PHYS, sizeof(void *), + GFP_KERNEL); + if (!sas_phys) + return -ENOMEM; + + sas_ports = devm_kcalloc(&isci_host->pdev->dev, + SCI_MAX_PORTS, sizeof(void *), + GFP_KERNEL); + if (!sas_ports) + return -ENOMEM; + + sas_ha->sas_ha_name = DRV_NAME; + sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + sas_phys[i] = &isci_host->phys[i].sas_phy; + sas_ports[i] = &isci_host->sas_ports[i]; + } + + sas_ha->sas_phy = sas_phys; + sas_ha->sas_port = sas_ports; + sas_ha->num_phys = SCI_MAX_PHYS; + + sas_ha->strict_wide_ports = 1; + + return sas_register_ha(sas_ha); +} + +static void isci_unregister(struct isci_host *isci_host) +{ + struct Scsi_Host *shost; + + if (!isci_host) + return; + + shost = to_shost(isci_host); + sas_unregister_ha(&isci_host->sas_ha); + + sas_remove_host(shost); + scsi_host_put(shost); +} + +static int isci_pci_init(struct pci_dev *pdev) +{ + int err, bar_num, bar_mask = 0; + void __iomem * const *iomap; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, + "failed enable PCI device %s!\n", + pci_name(pdev)); + return err; + } + + for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++) + bar_mask |= 1 << (bar_num * 2); + + err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME); + if (err) + return err; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + return err; +} + +static int num_controllers(struct pci_dev *pdev) +{ + /* bar size alone can tell us if we are running with a dual controller + * part, no need to trust revision ids that might be under broken firmware + * control + */ + resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2); + resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2); + + if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS && + smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS) + return SCI_MAX_CONTROLLERS; + else + return 1; +} + +static int isci_setup_interrupts(struct pci_dev *pdev) +{ + int err, i, num_msix; + struct isci_host *ihost; + struct isci_pci_info *pci_info = to_pci_info(pdev); + + /* + * Determine the number of vectors associated with this + * PCI function. + */ + num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; + + err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX); + if (err < 0) + goto intx; + + for (i = 0; i < num_msix; i++) { + int id = i / SCI_NUM_MSI_X_INT; + irq_handler_t isr; + + ihost = pci_info->hosts[id]; + /* odd numbered vectors are error interrupts */ + if (i & 1) + isr = isci_error_isr; + else + isr = isci_msix_isr; + + err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), + isr, 0, DRV_NAME"-msix", ihost); + if (!err) + continue; + + dev_info(&pdev->dev, "msix setup failed falling back to intx\n"); + while (i--) { + id = i / SCI_NUM_MSI_X_INT; + ihost = pci_info->hosts[id]; + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), + ihost); + } + pci_free_irq_vectors(pdev); + goto intx; + } + return 0; + + intx: + for_each_isci_host(i, ihost, pdev) { + err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0), + isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx", + ihost); + if (err) + break; + } + return err; +} + +static void isci_user_parameters_get(struct sci_user_parameters *u) +{ + int i; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + struct sci_phy_user_params *u_phy = &u->phys[i]; + + u_phy->max_speed_generation = phy_gen; + + /* we are not exporting these for now */ + u_phy->align_insertion_frequency = 0x7f; + u_phy->in_connection_align_insertion_frequency = 0xff; + u_phy->notify_enable_spin_up_insertion_frequency = 0x33; + } + + u->stp_inactivity_timeout = stp_inactive_to; + u->ssp_inactivity_timeout = ssp_inactive_to; + u->stp_max_occupancy_timeout = stp_max_occ_to; + u->ssp_max_occupancy_timeout = ssp_max_occ_to; + u->no_outbound_task_timeout = no_outbound_task_to; + u->max_concurr_spinup = max_concurr_spinup; +} + +static enum sci_status sci_user_parameters_set(struct isci_host *ihost, + struct sci_user_parameters *sci_parms) +{ + u16 index; + + /* + * Validate the user parameters. If they are not legal, then + * return a failure. + */ + for (index = 0; index < SCI_MAX_PHYS; index++) { + struct sci_phy_user_params *u; + + u = &sci_parms->phys[index]; + + if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && + (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED))) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + if ((u->in_connection_align_insertion_frequency < 3) || + (u->align_insertion_frequency == 0) || + (u->notify_enable_spin_up_insertion_frequency == 0)) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + } + + if ((sci_parms->stp_inactivity_timeout == 0) || + (sci_parms->ssp_inactivity_timeout == 0) || + (sci_parms->stp_max_occupancy_timeout == 0) || + (sci_parms->ssp_max_occupancy_timeout == 0) || + (sci_parms->no_outbound_task_timeout == 0)) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); + + return SCI_SUCCESS; +} + +static void sci_oem_defaults(struct isci_host *ihost) +{ + /* these defaults are overridden by the platform / firmware */ + struct sci_user_parameters *user = &ihost->user_parameters; + struct sci_oem_params *oem = &ihost->oem_parameters; + int i; + + /* Default to APC mode. */ + oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; + + /* Default to APC mode. */ + oem->controller.max_concurr_spin_up = 1; + + /* Default to no SSC operation. */ + oem->controller.do_enable_ssc = false; + + /* Default to short cables on all phys. */ + oem->controller.cable_selection_mask = 0; + + /* Initialize all of the port parameter information to narrow ports. */ + for (i = 0; i < SCI_MAX_PORTS; i++) + oem->ports[i].phy_mask = 0; + + /* Initialize all of the phy parameter information. */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + /* Default to 3G (i.e. Gen 2). */ + user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED; + + /* the frequencies cannot be 0 */ + user->phys[i].align_insertion_frequency = 0x7f; + user->phys[i].in_connection_align_insertion_frequency = 0xff; + user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33; + + /* Previous Vitesse based expanders had a arbitration issue that + * is worked around by having the upper 32-bits of SAS address + * with a value greater then the Vitesse company identifier. + * Hence, usage of 0x5FCFFFFF. + */ + oem->phys[i].sas_address.low = 0x1 + ihost->id; + oem->phys[i].sas_address.high = 0x5FCFFFFF; + } + + user->stp_inactivity_timeout = 5; + user->ssp_inactivity_timeout = 5; + user->stp_max_occupancy_timeout = 5; + user->ssp_max_occupancy_timeout = 20; + user->no_outbound_task_timeout = 2; +} + +static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) +{ + struct isci_orom *orom = to_pci_info(pdev)->orom; + struct sci_user_parameters sci_user_params; + u8 oem_version = ISCI_ROM_VER_1_0; + struct isci_host *ihost; + struct Scsi_Host *shost; + int err, i; + + ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL); + if (!ihost) + return NULL; + + ihost->pdev = pdev; + ihost->id = id; + spin_lock_init(&ihost->scic_lock); + init_waitqueue_head(&ihost->eventq); + ihost->sas_ha.dev = &ihost->pdev->dev; + ihost->sas_ha.lldd_ha = ihost; + tasklet_init(&ihost->completion_tasklet, + isci_host_completion_routine, (unsigned long)ihost); + + /* validate module parameters */ + /* TODO: kill struct sci_user_parameters and reference directly */ + sci_oem_defaults(ihost); + isci_user_parameters_get(&sci_user_params); + if (sci_user_parameters_set(ihost, &sci_user_params)) { + dev_warn(&pdev->dev, + "%s: sci_user_parameters_set failed\n", __func__); + return NULL; + } + + /* sanity check platform (or 'firmware') oem parameters */ + if (orom) { + if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) { + dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n"); + return NULL; + } + ihost->oem_parameters = orom->ctrl[id]; + oem_version = orom->hdr.version; + } + + /* validate oem parameters (platform, firmware, or built-in defaults) */ + if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) { + dev_warn(&pdev->dev, "oem parameter validation failed\n"); + return NULL; + } + + for (i = 0; i < SCI_MAX_PORTS; i++) { + struct isci_port *iport = &ihost->ports[i]; + + INIT_LIST_HEAD(&iport->remote_dev_list); + iport->isci_host = ihost; + } + + for (i = 0; i < SCI_MAX_PHYS; i++) + isci_phy_init(&ihost->phys[i], ihost, i); + + for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { + struct isci_remote_device *idev = &ihost->devices[i]; + + INIT_LIST_HEAD(&idev->node); + } + + shost = scsi_host_alloc(&isci_sht, sizeof(void *)); + if (!shost) + return NULL; + + dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " + "{%s, %s, %s, %s}\n", + (is_cable_select_overridden() ? "* " : ""), ihost->id, + lookup_cable_names(decode_cable_selection(ihost, 3)), + lookup_cable_names(decode_cable_selection(ihost, 2)), + lookup_cable_names(decode_cable_selection(ihost, 1)), + lookup_cable_names(decode_cable_selection(ihost, 0))); + + err = isci_host_init(ihost); + if (err) + goto err_shost; + + SHOST_TO_SAS_HA(shost) = &ihost->sas_ha; + ihost->sas_ha.shost = shost; + shost->transportt = isci_transport_template; + + shost->max_id = ~0; + shost->max_lun = ~0; + shost->max_cmd_len = MAX_COMMAND_SIZE; + + /* turn on DIF support */ + scsi_host_set_prot(shost, + SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + + err = scsi_add_host(shost, &pdev->dev); + if (err) + goto err_shost; + + err = isci_register_sas_ha(ihost); + if (err) + goto err_shost_remove; + + return ihost; + + err_shost_remove: + scsi_remove_host(shost); + err_shost: + scsi_host_put(shost); + + return NULL; +} + +static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct isci_pci_info *pci_info; + int err, i; + struct isci_host *isci_host; + const struct firmware *fw = NULL; + struct isci_orom *orom = NULL; + char *source = "(platform)"; + + dev_info(&pdev->dev, "driver configured for rev: %d silicon\n", + pdev->revision); + + pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + pci_set_drvdata(pdev, pci_info); + + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + orom = isci_get_efi_var(pdev); + + if (!orom) + orom = isci_request_oprom(pdev); + + for (i = 0; orom && i < num_controllers(pdev); i++) { + if (sci_oem_parameters_validate(&orom->ctrl[i], + orom->hdr.version)) { + dev_warn(&pdev->dev, + "[%d]: invalid oem parameters detected, falling back to firmware\n", i); + orom = NULL; + break; + } + } + + if (!orom) { + source = "(firmware)"; + orom = isci_request_firmware(pdev, fw); + if (!orom) { + /* TODO convert this to WARN_TAINT_ONCE once the + * orom/efi parameter support is widely available + */ + dev_warn(&pdev->dev, + "Loading user firmware failed, using default " + "values\n"); + dev_warn(&pdev->dev, + "Default OEM configuration being used: 4 " + "narrow ports, and default SAS Addresses\n"); + } + } + + if (orom) + dev_info(&pdev->dev, + "OEM SAS parameters (version: %u.%u) loaded %s\n", + (orom->hdr.version & 0xf0) >> 4, + (orom->hdr.version & 0xf), source); + + pci_info->orom = orom; + + err = isci_pci_init(pdev); + if (err) + return err; + + for (i = 0; i < num_controllers(pdev); i++) { + struct isci_host *h = isci_host_alloc(pdev, i); + + if (!h) { + err = -ENOMEM; + goto err_host_alloc; + } + pci_info->hosts[i] = h; + } + + err = isci_setup_interrupts(pdev); + if (err) + goto err_host_alloc; + + for_each_isci_host(i, isci_host, pdev) + scsi_scan_host(to_shost(isci_host)); + + return 0; + + err_host_alloc: + for_each_isci_host(i, isci_host, pdev) + isci_unregister(isci_host); + return err; +} + +static void isci_pci_remove(struct pci_dev *pdev) +{ + struct isci_host *ihost; + int i; + + for_each_isci_host(i, ihost, pdev) { + wait_for_start(ihost); + isci_unregister(ihost); + isci_host_deinit(ihost); + } +} + +#ifdef CONFIG_PM_SLEEP +static int isci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct isci_host *ihost; + int i; + + for_each_isci_host(i, ihost, pdev) { + sas_suspend_ha(&ihost->sas_ha); + isci_host_deinit(ihost); + } + + return 0; +} + +static int isci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct isci_host *ihost; + int i; + + for_each_isci_host(i, ihost, pdev) { + sas_prep_resume_ha(&ihost->sas_ha); + + isci_host_init(ihost); + isci_host_start(ihost->sas_ha.shost); + wait_for_start(ihost); + + sas_resume_ha(&ihost->sas_ha); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume); + +static struct pci_driver isci_pci_driver = { + .name = DRV_NAME, + .id_table = isci_id_table, + .probe = isci_pci_probe, + .remove = isci_pci_remove, + .driver.pm = &isci_pm_ops, +}; + +static __init int isci_init(void) +{ + int err; + + pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", + DRV_NAME, DRV_VERSION); + + isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); + if (!isci_transport_template) + return -ENOMEM; + + err = pci_register_driver(&isci_pci_driver); + if (err) + sas_release_transport(isci_transport_template); + + return err; +} + +static __exit void isci_exit(void) +{ + pci_unregister_driver(&isci_pci_driver); + sas_release_transport(isci_transport_template); +} + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_FIRMWARE(ISCI_FW_NAME); +module_init(isci_init); +module_exit(isci_exit); diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h new file mode 100644 index 000000000..4e6b1decb --- /dev/null +++ b/drivers/scsi/isci/isci.h @@ -0,0 +1,537 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ISCI_H__ +#define __ISCI_H__ + +#include +#include + +#define DRV_NAME "isci" +#define SCI_PCI_BAR_COUNT 2 +#define SCI_NUM_MSI_X_INT 2 +#define SCI_SMU_BAR 0 +#define SCI_SMU_BAR_SIZE (16*1024) +#define SCI_SCU_BAR 1 +#define SCI_SCU_BAR_SIZE (4*1024*1024) +#define SCI_IO_SPACE_BAR0 2 +#define SCI_IO_SPACE_BAR1 3 +#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */ +#define SCIC_CONTROLLER_STOP_TIMEOUT 5000 + +#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF + +#define SCI_MAX_PHYS (4UL) +#define SCI_MAX_PORTS SCI_MAX_PHYS +#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ +#define SCI_MAX_REMOTE_DEVICES (256UL) +#define SCI_MAX_IO_REQUESTS (256UL) +#define SCI_MAX_SEQ (16) +#define SCI_MAX_MSIX_MESSAGES (2) +#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */ +#define SCI_MAX_CONTROLLERS 2 +#define SCI_MAX_DOMAINS SCI_MAX_PORTS + +#define SCU_MAX_CRITICAL_NOTIFICATIONS (384) +#define SCU_MAX_EVENTS_SHIFT (7) +#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT) +#define SCU_MAX_UNSOLICITED_FRAMES (128) +#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128) +#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \ + + SCU_MAX_EVENTS \ + + SCU_MAX_UNSOLICITED_FRAMES \ + + SCI_MAX_IO_REQUESTS \ + + SCU_MAX_COMPLETION_QUEUE_SCRATCH) +#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES)) + +#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) +#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024U) +#define SCU_INVALID_FRAME_INDEX (0xFFFF) + +#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF) +#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF) + +static inline void check_sizes(void) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS); + BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8); + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES); + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES); + BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES); + BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS); + BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ); +} + +/** + * enum sci_status - This is the general return status enumeration for non-IO, + * non-task management related SCI interface methods. + * + * + */ +enum sci_status { + /** + * This member indicates successful completion. + */ + SCI_SUCCESS = 0, + + /** + * This value indicates that the calling method completed successfully, + * but that the IO may have completed before having it's start method + * invoked. This occurs during SAT translation for requests that do + * not require an IO to the target or for any other requests that may + * be completed without having to submit IO. + */ + SCI_SUCCESS_IO_COMPLETE_BEFORE_START, + + /** + * This Value indicates that the SCU hardware returned an early response + * because the io request specified more data than is returned by the + * target device (mode pages, inquiry data, etc.). The completion routine + * will handle this case to get the actual number of bytes transferred. + */ + SCI_SUCCESS_IO_DONE_EARLY, + + /** + * This member indicates that the object for which a state change is + * being requested is already in said state. + */ + SCI_WARNING_ALREADY_IN_STATE, + + /** + * This member indicates interrupt coalescence timer may cause SAS + * specification compliance issues (i.e. SMP target mode response + * frames must be returned within 1.9 milliseconds). + */ + SCI_WARNING_TIMER_CONFLICT, + + /** + * This field indicates a sequence of action is not completed yet. Mostly, + * this status is used when multiple ATA commands are needed in a SATI translation. + */ + SCI_WARNING_SEQUENCE_INCOMPLETE, + + /** + * This member indicates that there was a general failure. + */ + SCI_FAILURE, + + /** + * This member indicates that the SCI implementation is unable to complete + * an operation due to a critical flaw the prevents any further operation + * (i.e. an invalid pointer). + */ + SCI_FATAL_ERROR, + + /** + * This member indicates the calling function failed, because the state + * of the controller is in a state that prevents successful completion. + */ + SCI_FAILURE_INVALID_STATE, + + /** + * This member indicates the calling function failed, because there is + * insufficient resources/memory to complete the request. + */ + SCI_FAILURE_INSUFFICIENT_RESOURCES, + + /** + * This member indicates the calling function failed, because the + * controller object required for the operation can't be located. + */ + SCI_FAILURE_CONTROLLER_NOT_FOUND, + + /** + * This member indicates the calling function failed, because the + * discovered controller type is not supported by the library. + */ + SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE, + + /** + * This member indicates the calling function failed, because the + * requested initialization data version isn't supported. + */ + SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION, + + /** + * This member indicates the calling function failed, because the + * requested configuration of SAS Phys into SAS Ports is not supported. + */ + SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION, + + /** + * This member indicates the calling function failed, because the + * requested protocol is not supported by the remote device, port, + * or controller. + */ + SCI_FAILURE_UNSUPPORTED_PROTOCOL, + + /** + * This member indicates the calling function failed, because the + * requested information type is not supported by the SCI implementation. + */ + SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE, + + /** + * This member indicates the calling function failed, because the + * device already exists. + */ + SCI_FAILURE_DEVICE_EXISTS, + + /** + * This member indicates the calling function failed, because adding + * a phy to the object is not possible. + */ + SCI_FAILURE_ADDING_PHY_UNSUPPORTED, + + /** + * This member indicates the calling function failed, because the + * requested information type is not supported by the SCI implementation. + */ + SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD, + + /** + * This member indicates the calling function failed, because the SCI + * implementation does not support the supplied time limit. + */ + SCI_FAILURE_UNSUPPORTED_TIME_LIMIT, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified Phy. + */ + SCI_FAILURE_INVALID_PHY, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified Port. + */ + SCI_FAILURE_INVALID_PORT, + + /** + * This member indicates the calling method was partly successful + * The port was reset but not all phys in port are operational + */ + SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS, + + /** + * This member indicates that calling method failed + * The port reset did not complete because none of the phys are operational + */ + SCI_FAILURE_RESET_PORT_FAILURE, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified remote device. + */ + SCI_FAILURE_INVALID_REMOTE_DEVICE, + + /** + * This member indicates the calling method failed, because the remote + * device is in a bad state and requires a reset. + */ + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain or support the specified IO tag. + */ + SCI_FAILURE_INVALID_IO_TAG, + + /** + * This member indicates that the operation failed and the user should + * check the response data associated with the IO. + */ + SCI_FAILURE_IO_RESPONSE_VALID, + + /** + * This member indicates that the operation failed, the failure is + * controller implementation specific, and the response data associated + * with the request is not valid. You can query for the controller + * specific error information via sci_controller_get_request_status() + */ + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + + /** + * This member indicated that the operation failed because the + * user requested this IO to be terminated. + */ + SCI_FAILURE_IO_TERMINATED, + + /** + * This member indicates that the operation failed and the associated + * request requires a SCSI abort task to be sent to the target. + */ + SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, + + /** + * This member indicates that the operation failed because the supplied + * device could not be located. + */ + SCI_FAILURE_DEVICE_NOT_FOUND, + + /** + * This member indicates that the operation failed because the + * objects association is required and is not correctly set. + */ + SCI_FAILURE_INVALID_ASSOCIATION, + + /** + * This member indicates that the operation failed, because a timeout + * occurred. + */ + SCI_FAILURE_TIMEOUT, + + /** + * This member indicates that the operation failed, because the user + * specified a value that is either invalid or not supported. + */ + SCI_FAILURE_INVALID_PARAMETER_VALUE, + + /** + * This value indicates that the operation failed, because the number + * of messages (MSI-X) is not supported. + */ + SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT, + + /** + * This value indicates that the method failed due to a lack of + * available NCQ tags. + */ + SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, + + /** + * This value indicates that a protocol violation has occurred on the + * link. + */ + SCI_FAILURE_PROTOCOL_VIOLATION, + + /** + * This value indicates a failure condition that retry may help to clear. + */ + SCI_FAILURE_RETRY_REQUIRED, + + /** + * This field indicates the retry limit was reached when a retry is attempted + */ + SCI_FAILURE_RETRY_LIMIT_REACHED, + + /** + * This member indicates the calling method was partly successful. + * Mostly, this status is used when a LUN_RESET issued to an expander attached + * STP device in READY NCQ substate needs to have RNC suspended/resumed + * before posting TC. + */ + SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS, + + /** + * This field indicates an illegal phy connection based on the routing attribute + * of both expander phy attached to each other. + */ + SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION, + + /** + * This field indicates a CONFIG ROUTE INFO command has a response with function result + * INDEX DOES NOT EXIST, usually means exceeding max route index. + */ + SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX, + + /** + * This value indicates that an unsupported PCI device ID has been + * specified. This indicates that attempts to invoke + * sci_library_allocate_controller() will fail. + */ + SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID + +}; + +/** + * enum sci_io_status - This enumeration depicts all of the possible IO + * completion status values. Each value in this enumeration maps directly + * to a value in the enum sci_status enumeration. Please refer to that + * enumeration for detailed comments concerning what the status represents. + * + * Add the API to retrieve the SCU status from the core. Check to see that the + * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL + * - SCI_IO_FAILURE_INVALID_IO_TAG + */ +enum sci_io_status { + SCI_IO_SUCCESS = SCI_SUCCESS, + SCI_IO_FAILURE = SCI_FAILURE, + SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START, + SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY, + SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, + SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, + SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, + SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, + SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, + SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, + SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, + SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, + SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION, + + SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + + SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED, + SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED, + SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE +}; + +/** + * enum sci_task_status - This enumeration depicts all of the possible task + * completion status values. Each value in this enumeration maps directly + * to a value in the enum sci_status enumeration. Please refer to that + * enumeration for detailed comments concerning what the status represents. + * + * Check to see that the following status are properly handled: + */ +enum sci_task_status { + SCI_TASK_SUCCESS = SCI_SUCCESS, + SCI_TASK_FAILURE = SCI_FAILURE, + SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, + SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, + SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, + SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG, + SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, + SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, + SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, + + SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS + +}; + +/** + * sci_swab32_cpy - convert between scsi and scu-hardware byte format + * @dest: receive the 4-byte endian swapped version of src + * @src: word aligned source buffer + * + * scu hardware handles SSP/SMP control, response, and unidentified + * frames in "big endian dword" order. Regardless of host endian this + * is always a swab32()-per-dword conversion of the standard definition, + * i.e. single byte fields swapped and multi-byte fields in little- + * endian + */ +static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt) +{ + u32 *dest = _dest, *src = _src; + + while (--word_cnt >= 0) + dest[word_cnt] = swab32(src[word_cnt]); +} + +extern unsigned char no_outbound_task_to; +extern u16 ssp_max_occ_to; +extern u16 stp_max_occ_to; +extern u16 ssp_inactive_to; +extern u16 stp_inactive_to; +extern unsigned char phy_gen; +extern unsigned char max_concurr_spinup; +extern uint cable_selection_override; + +irqreturn_t isci_msix_isr(int vec, void *data); +irqreturn_t isci_intx_isr(int vec, void *data); +irqreturn_t isci_error_isr(int vec, void *data); + +/* + * Each timer is associated with a cancellation flag that is set when + * del_timer() is called and checked in the timer callback function. This + * is needed since del_timer_sync() cannot be called with sci_lock held. + * For deinit however, del_timer_sync() is used without holding the lock. + */ +struct sci_timer { + struct timer_list timer; + bool cancel; +}; + +static inline +void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t)) +{ + tmr->cancel = false; + timer_setup(&tmr->timer, fn, 0); +} + +static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec) +{ + tmr->cancel = false; + mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec)); +} + +static inline void sci_del_timer(struct sci_timer *tmr) +{ + tmr->cancel = true; + del_timer(&tmr->timer); +} + +struct sci_base_state_machine { + const struct sci_base_state *state_table; + u32 initial_state_id; + u32 current_state_id; + u32 previous_state_id; +}; + +typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm); + +struct sci_base_state { + sci_state_transition_t enter_state; /* Called on state entry */ + sci_state_transition_t exit_state; /* Called on state exit */ +}; + +extern void sci_init_sm(struct sci_base_state_machine *sm, + const struct sci_base_state *state_table, + u32 initial_state); +extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state); +#endif /* __ISCI_H__ */ diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c new file mode 100644 index 000000000..743a3c64b --- /dev/null +++ b/drivers/scsi/isci/phy.c @@ -0,0 +1,1482 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "isci.h" +#include "host.h" +#include "phy.h" +#include "scu_event_codes.h" +#include "probe_roms.h" + +#undef C +#define C(a) (#a) +static const char *phy_state_name(enum sci_phy_states state) +{ + static const char * const strings[] = PHY_STATES; + + return strings[state]; +} +#undef C + +/* Maximum arbitration wait time in micro-seconds */ +#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) + +enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) +{ + return iphy->max_negotiated_speed; +} + +static struct isci_host *phy_to_host(struct isci_phy *iphy) +{ + struct isci_phy *table = iphy - iphy->phy_index; + struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]); + + return ihost; +} + +static struct device *sciphy_to_dev(struct isci_phy *iphy) +{ + return &phy_to_host(iphy)->pdev->dev; +} + +static enum sci_status +sci_phy_transport_layer_initialization(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *reg) +{ + u32 tl_control; + + iphy->transport_layer_registers = reg; + + writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, + &iphy->transport_layer_registers->stp_rni); + + /* + * Hardware team recommends that we enable the STP prefetch for all + * transports + */ + tl_control = readl(&iphy->transport_layer_registers->control); + tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH); + writel(tl_control, &iphy->transport_layer_registers->control); + + return SCI_SUCCESS; +} + +static enum sci_status +sci_phy_link_layer_initialization(struct isci_phy *iphy, + struct scu_link_layer_registers __iomem *llr) +{ + struct isci_host *ihost = iphy->owning_port->owning_controller; + struct sci_phy_user_params *phy_user; + struct sci_phy_oem_params *phy_oem; + int phy_idx = iphy->phy_index; + struct sci_phy_cap phy_cap; + u32 phy_configuration; + u32 parity_check = 0; + u32 parity_count = 0; + u32 llctl, link_rate; + u32 clksm_value = 0; + u32 sp_timeouts = 0; + + phy_user = &ihost->user_parameters.phys[phy_idx]; + phy_oem = &ihost->oem_parameters.phys[phy_idx]; + iphy->link_layer_registers = llr; + + /* Set our IDENTIFY frame data */ + #define SCI_END_DEVICE 0x01 + + writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) | + SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) | + SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | + SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | + SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), + &llr->transmit_identification); + + /* Write the device SAS Address */ + writel(0xFEDCBA98, &llr->sas_device_name_high); + writel(phy_idx, &llr->sas_device_name_low); + + /* Write the source SAS Address */ + writel(phy_oem->sas_address.high, &llr->source_sas_address_high); + writel(phy_oem->sas_address.low, &llr->source_sas_address_low); + + /* Clear and Set the PHY Identifier */ + writel(0, &llr->identify_frame_phy_id); + writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id); + + /* Change the initial state of the phy configuration register */ + phy_configuration = readl(&llr->phy_configuration); + + /* Hold OOB state machine in reset */ + phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + writel(phy_configuration, &llr->phy_configuration); + + /* Configure the SNW capabilities */ + phy_cap.all = 0; + phy_cap.start = 1; + phy_cap.gen3_no_ssc = 1; + phy_cap.gen2_no_ssc = 1; + phy_cap.gen1_no_ssc = 1; + if (ihost->oem_parameters.controller.do_enable_ssc) { + struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; + struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx]; + struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); + bool en_sas = false; + bool en_sata = false; + u32 sas_type = 0; + u32 sata_spread = 0x2; + u32 sas_spread = 0x2; + + phy_cap.gen3_ssc = 1; + phy_cap.gen2_ssc = 1; + phy_cap.gen1_ssc = 1; + + if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1) + en_sas = en_sata = true; + else { + sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level; + sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level; + + if (sata_spread) + en_sata = true; + + if (sas_spread) { + en_sas = true; + sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type; + } + + } + + if (en_sas) { + u32 reg; + + reg = readl(&xcvr->afe_xcvr_control0); + reg |= (0x00100000 | (sas_type << 19)); + writel(reg, &xcvr->afe_xcvr_control0); + + reg = readl(&xcvr->afe_tx_ssc_control); + reg |= sas_spread << 8; + writel(reg, &xcvr->afe_tx_ssc_control); + } + + if (en_sata) { + u32 reg; + + reg = readl(&xcvr->afe_tx_ssc_control); + reg |= sata_spread; + writel(reg, &xcvr->afe_tx_ssc_control); + + reg = readl(&llr->stp_control); + reg |= 1 << 12; + writel(reg, &llr->stp_control); + } + } + + /* The SAS specification indicates that the phy_capabilities that + * are transmitted shall have an even parity. Calculate the parity. + */ + parity_check = phy_cap.all; + while (parity_check != 0) { + if (parity_check & 0x1) + parity_count++; + parity_check >>= 1; + } + + /* If parity indicates there are an odd number of bits set, then + * set the parity bit to 1 in the phy capabilities. + */ + if ((parity_count % 2) != 0) + phy_cap.parity = 1; + + writel(phy_cap.all, &llr->phy_capabilities); + + /* Set the enable spinup period but disable the ability to send + * notify enable spinup + */ + writel(SCU_ENSPINUP_GEN_VAL(COUNT, + phy_user->notify_enable_spin_up_insertion_frequency), + &llr->notify_enable_spinup_control); + + /* Write the ALIGN Insertion Ferequency for connected phy and + * inpendent of connected state + */ + clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED, + phy_user->in_connection_align_insertion_frequency); + + clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, + phy_user->align_insertion_frequency); + + writel(clksm_value, &llr->clock_skew_management); + + if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) { + writel(0x04210400, &llr->afe_lookup_table_control); + writel(0x020A7C05, &llr->sas_primitive_timeout); + } else + writel(0x02108421, &llr->afe_lookup_table_control); + + llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, + (u8)ihost->user_parameters.no_outbound_task_timeout); + + switch (phy_user->max_speed_generation) { + case SCIC_SDS_PARM_GEN3_SPEED: + link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3; + break; + case SCIC_SDS_PARM_GEN2_SPEED: + link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2; + break; + default: + link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1; + break; + } + llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); + writel(llctl, &llr->link_layer_control); + + sp_timeouts = readl(&llr->sas_phy_timeouts); + + /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ + sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); + + /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can + * lock with 3Gb drive when SCU max rate is set to 1.5Gb. + */ + sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); + + writel(sp_timeouts, &llr->sas_phy_timeouts); + + if (is_a2(ihost->pdev)) { + /* Program the max ARB time for the PHY to 700us so we + * inter-operate with the PMC expander which shuts down + * PHYs if the expander PHY generates too many breaks. + * This time value will guarantee that the initiator PHY + * will generate the break. + */ + writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, + &llr->maximum_arbitration_wait_timer_timeout); + } + + /* Disable link layer hang detection, rely on the OS timeout for + * I/O timeouts. + */ + writel(0, &llr->link_layer_hang_detection_timeout); + + /* We can exit the initial state to the stopped state */ + sci_change_state(&iphy->sm, SCI_PHY_STOPPED); + + return SCI_SUCCESS; +} + +static void phy_sata_timeout(struct timer_list *t) +{ + struct sci_timer *tmr = from_timer(tmr, t, timer); + struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); + struct isci_host *ihost = iphy->owning_port->owning_controller; + unsigned long flags; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + dev_dbg(sciphy_to_dev(iphy), + "%s: SCIC SDS Phy 0x%p did not receive signature fis before " + "timeout.\n", + __func__, + iphy); + + sci_change_state(&iphy->sm, SCI_PHY_STARTING); +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +/** + * phy_get_non_dummy_port() - This method returns the port currently containing + * this phy. If the phy is currently contained by the dummy port, then the phy + * is considered to not be part of a port. + * + * @iphy: This parameter specifies the phy for which to retrieve the + * containing port. + * + * This method returns a handle to a port that contains the supplied phy. + * NULL This value is returned if the phy is not part of a real + * port (i.e. it's contained in the dummy port). !NULL All other + * values indicate a handle/pointer to the port containing the phy. + */ +struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy) +{ + struct isci_port *iport = iphy->owning_port; + + if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT) + return NULL; + + return iphy->owning_port; +} + +/* + * sci_phy_set_port() - This method will assign a port to the phy object. + */ +void sci_phy_set_port( + struct isci_phy *iphy, + struct isci_port *iport) +{ + iphy->owning_port = iport; + + if (iphy->bcn_received_while_port_unassigned) { + iphy->bcn_received_while_port_unassigned = false; + sci_port_broadcast_change_received(iphy->owning_port, iphy); + } +} + +enum sci_status sci_phy_initialize(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *tl, + struct scu_link_layer_registers __iomem *ll) +{ + /* Perfrom the initialization of the TL hardware */ + sci_phy_transport_layer_initialization(iphy, tl); + + /* Perofrm the initialization of the PE hardware */ + sci_phy_link_layer_initialization(iphy, ll); + + /* There is nothing that needs to be done in this state just + * transition to the stopped state + */ + sci_change_state(&iphy->sm, SCI_PHY_STOPPED); + + return SCI_SUCCESS; +} + +/** + * sci_phy_setup_transport() - This method assigns the direct attached device ID for this phy. + * + * @iphy: The phy for which the direct attached device id is to + * be assigned. + * @device_id: The direct attached device ID to assign to the phy. + * This will either be the RNi for the device or an invalid RNi if there + * is no current device assigned to the phy. + */ +void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id) +{ + u32 tl_control; + + writel(device_id, &iphy->transport_layer_registers->stp_rni); + + /* + * The read should guarantee that the first write gets posted + * before the next write + */ + tl_control = readl(&iphy->transport_layer_registers->control); + tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE); + writel(tl_control, &iphy->transport_layer_registers->control); +} + +static void sci_phy_suspend(struct isci_phy *iphy) +{ + u32 scu_sas_pcfg_value; + + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); +} + +void sci_phy_resume(struct isci_phy *iphy) +{ + u32 scu_sas_pcfg_value; + + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); +} + +void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) +{ + sas->high = readl(&iphy->link_layer_registers->source_sas_address_high); + sas->low = readl(&iphy->link_layer_registers->source_sas_address_low); +} + +void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) +{ + struct sas_identify_frame *iaf; + + iaf = &iphy->frame_rcvd.iaf; + memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE); +} + +void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto) +{ + proto->all = readl(&iphy->link_layer_registers->transmit_identification); +} + +enum sci_status sci_phy_start(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + if (state != SCI_PHY_STOPPED) { + dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", + __func__, phy_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + return SCI_SUCCESS; +} + +enum sci_status sci_phy_stop(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + switch (state) { + case SCI_PHY_SUB_INITIAL: + case SCI_PHY_SUB_AWAIT_OSSP_EN: + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SAS_POWER: + case SCI_PHY_SUB_AWAIT_SATA_POWER: + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + case SCI_PHY_SUB_FINAL: + case SCI_PHY_READY: + break; + default: + dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", + __func__, phy_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(&iphy->sm, SCI_PHY_STOPPED); + return SCI_SUCCESS; +} + +enum sci_status sci_phy_reset(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + if (state != SCI_PHY_READY) { + dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", + __func__, phy_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(&iphy->sm, SCI_PHY_RESETTING); + return SCI_SUCCESS; +} + +enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + switch (state) { + case SCI_PHY_SUB_AWAIT_SAS_POWER: { + u32 enable_spinup; + + enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control); + enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE); + writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control); + + /* Change state to the final state this substate machine has run to completion */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); + + return SCI_SUCCESS; + } + case SCI_PHY_SUB_AWAIT_SATA_POWER: { + u32 scu_sas_pcfg_value; + + /* Release the spinup hold state and reset the OOB state machine */ + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value &= + ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); + scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + /* Now restart the OOB operation */ + scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + /* Change state to the final state this substate machine has run to completion */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN); + + return SCI_SUCCESS; + } + default: + dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", + __func__, phy_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +static void sci_phy_start_sas_link_training(struct isci_phy *iphy) +{ + /* continue the link training for the phy as if it were a SAS PHY + * instead of a SATA PHY. This is done because the completion queue had a SAS + * PHY DETECTED event when the state machine was expecting a SATA PHY event. + */ + u32 phy_control; + + phy_control = readl(&iphy->link_layer_registers->phy_configuration); + phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); + writel(phy_control, + &iphy->link_layer_registers->phy_configuration); + + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); + + iphy->protocol = SAS_PROTOCOL_SSP; +} + +static void sci_phy_start_sata_link_training(struct isci_phy *iphy) +{ + /* This method continues the link training for the phy as if it were a SATA PHY + * instead of a SAS PHY. This is done because the completion queue had a SATA + * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none + */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); + + iphy->protocol = SAS_PROTOCOL_SATA; +} + +/** + * sci_phy_complete_link_training - perform processing common to + * all protocols upon completion of link training. + * @iphy: This parameter specifies the phy object for which link training + * has completed. + * @max_link_rate: This parameter specifies the maximum link rate to be + * associated with this phy. + * @next_state: This parameter specifies the next state for the phy's starting + * sub-state machine. + * + */ +static void sci_phy_complete_link_training(struct isci_phy *iphy, + enum sas_linkrate max_link_rate, + u32 next_state) +{ + iphy->max_negotiated_speed = max_link_rate; + + sci_change_state(&iphy->sm, next_state); +} + +static const char *phy_event_name(u32 event_code) +{ + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_PORT_SELECTOR_DETECTED: + return "port selector"; + case SCU_EVENT_SENT_PORT_SELECTION: + return "port selection"; + case SCU_EVENT_HARD_RESET_TRANSMITTED: + return "tx hard reset"; + case SCU_EVENT_HARD_RESET_RECEIVED: + return "rx hard reset"; + case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + return "identify timeout"; + case SCU_EVENT_LINK_FAILURE: + return "link fail"; + case SCU_EVENT_SATA_SPINUP_HOLD: + return "sata spinup hold"; + case SCU_EVENT_SAS_15_SSC: + case SCU_EVENT_SAS_15: + return "sas 1.5"; + case SCU_EVENT_SAS_30_SSC: + case SCU_EVENT_SAS_30: + return "sas 3.0"; + case SCU_EVENT_SAS_60_SSC: + case SCU_EVENT_SAS_60: + return "sas 6.0"; + case SCU_EVENT_SATA_15_SSC: + case SCU_EVENT_SATA_15: + return "sata 1.5"; + case SCU_EVENT_SATA_30_SSC: + case SCU_EVENT_SATA_30: + return "sata 3.0"; + case SCU_EVENT_SATA_60_SSC: + case SCU_EVENT_SATA_60: + return "sata 6.0"; + case SCU_EVENT_SAS_PHY_DETECTED: + return "sas detect"; + case SCU_EVENT_SATA_PHY_DETECTED: + return "sata detect"; + default: + return "unknown"; + } +} + +#define phy_event_dbg(iphy, state, code) \ + dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ + phy_to_host(iphy)->id, iphy->phy_index, \ + phy_state_name(state), phy_event_name(code), code) + +#define phy_event_warn(iphy, state, code) \ + dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ + phy_to_host(iphy)->id, iphy->phy_index, \ + phy_state_name(state), phy_event_name(code), code) + + +static void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout) +{ + u32 val; + + /* Extend timeout */ + val = readl(&iphy->link_layer_registers->transmit_comsas_signal); + val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK); + val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout); + + writel(val, &iphy->link_layer_registers->transmit_comsas_signal); +} + +enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + switch (state) { + case SCI_PHY_SUB_AWAIT_OSSP_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SAS_PHY_DETECTED: + sci_phy_start_sas_link_training(iphy); + iphy->is_in_link_training = true; + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + sci_phy_start_sata_link_training(iphy); + iphy->is_in_link_training = true; + break; + case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + /* Extend timeout value */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); + + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + phy_event_dbg(iphy, state, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SAS_PHY_DETECTED: + /* + * Why is this being reported again by the controller? + * We would re-enter this state so just stay here */ + break; + case SCU_EVENT_SAS_15: + case SCU_EVENT_SAS_15_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); + break; + case SCU_EVENT_SAS_30: + case SCU_EVENT_SAS_30_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); + break; + case SCU_EVENT_SAS_60: + case SCU_EVENT_SAS_60_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* + * We were doing SAS PHY link training and received a SATA PHY event + * continue OOB/SN as if this were a SATA PHY */ + sci_phy_start_sata_link_training(iphy); + break; + case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + /* Extend the timeout value */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); + + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_IAF_UF: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SAS_PHY_DETECTED: + /* Backup the state machine */ + sci_phy_start_sas_link_training(iphy); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* We were doing SAS PHY link training and received a + * SATA PHY event continue OOB/SN as if this were a + * SATA PHY + */ + sci_phy_start_sata_link_training(iphy); + break; + case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + /* Extend the timeout value */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); + + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_LINK_FAILURE: + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + fallthrough; + case SCU_EVENT_HARD_RESET_RECEIVED: + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SAS_POWER: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SATA_POWER: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* These events are received every 10ms and are + * expected while in this state + */ + break; + + case SCU_EVENT_SAS_PHY_DETECTED: + /* There has been a change in the phy type before OOB/SN for the + * SATA finished start down the SAS link traning path. + */ + sci_phy_start_sas_link_training(iphy); + break; + + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* These events might be received since we dont know how many may be in + * the completion queue while waiting for power + */ + break; + case SCU_EVENT_SATA_PHY_DETECTED: + iphy->protocol = SAS_PROTOCOL_SATA; + + /* We have received the SATA PHY notification change state */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); + break; + case SCU_EVENT_SAS_PHY_DETECTED: + /* There has been a change in the phy type before OOB/SN for the + * SATA finished start down the SAS link traning path. + */ + sci_phy_start_sas_link_training(iphy); + break; + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SATA_PHY_DETECTED: + /* + * The hardware reports multiple SATA PHY detected events + * ignore the extras */ + break; + case SCU_EVENT_SATA_15: + case SCU_EVENT_SATA_15_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + break; + case SCU_EVENT_SATA_30: + case SCU_EVENT_SATA_30_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + break; + case SCU_EVENT_SATA_60: + case SCU_EVENT_SATA_60_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + break; + case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_SAS_PHY_DETECTED: + /* + * There has been a change in the phy type before OOB/SN for the + * SATA finished start down the SAS link traning path. */ + sci_phy_start_sas_link_training(iphy); + break; + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE; + } + + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SATA_PHY_DETECTED: + /* Backup the state machine */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); + break; + + case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_READY: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Set default timeout */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_BROADCAST_CHANGE: + case SCU_EVENT_BROADCAST_SES: + case SCU_EVENT_BROADCAST_RESERVED0: + case SCU_EVENT_BROADCAST_RESERVED1: + case SCU_EVENT_BROADCAST_EXPANDER: + case SCU_EVENT_BROADCAST_AEN: + /* Broadcast change received. Notify the port. */ + if (phy_get_non_dummy_port(iphy) != NULL) + sci_port_broadcast_change_received(iphy->owning_port, iphy); + else + iphy->bcn_received_while_port_unassigned = true; + break; + case SCU_EVENT_BROADCAST_RESERVED3: + case SCU_EVENT_BROADCAST_RESERVED4: + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE_INVALID_STATE; + } + return SCI_SUCCESS; + case SCI_PHY_RESETTING: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_HARD_RESET_TRANSMITTED: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + phy_event_warn(iphy, state, event_code); + return SCI_FAILURE_INVALID_STATE; + } + return SCI_SUCCESS; + default: + dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", + __func__, phy_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + struct isci_host *ihost = iphy->owning_port->owning_controller; + enum sci_status result; + unsigned long flags; + + switch (state) { + case SCI_PHY_SUB_AWAIT_IAF_UF: { + u32 *frame_words; + struct sas_identify_frame iaf; + + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_words); + + if (result != SCI_SUCCESS) + return result; + + sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32)); + if (iaf.frame_type == 0) { + u32 state; + + spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); + memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf)); + spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); + if (iaf.smp_tport) { + /* We got the IAF for an expander PHY go to the final + * state since there are no power requirements for + * expander phys. + */ + state = SCI_PHY_SUB_FINAL; + } else { + /* We got the IAF we can now go to the await spinup + * semaphore state + */ + state = SCI_PHY_SUB_AWAIT_SAS_POWER; + } + sci_change_state(&iphy->sm, state); + result = SCI_SUCCESS; + } else + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected frame id %x\n", + __func__, frame_index); + + sci_controller_release_frame(ihost, frame_index); + return result; + } + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { + struct dev_to_host_fis *frame_header; + u32 *fis_frame_data; + + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (result != SCI_SUCCESS) + return result; + + if ((frame_header->fis_type == FIS_REGD2H) && + !(frame_header->status & ATA_BUSY)) { + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&fis_frame_data); + + spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); + sci_controller_copy_sata_response(&iphy->frame_rcvd.fis, + frame_header, + fis_frame_data); + spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); + + /* got IAF we can now go to the await spinup semaphore state */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); + + result = SCI_SUCCESS; + } else + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected frame id %x\n", + __func__, frame_index); + + /* Regardless of the result we are done with this frame with it */ + sci_controller_release_frame(ihost, frame_index); + + return result; + } + default: + dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", + __func__, phy_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + +} + +static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + /* This is just an temporary state go off to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); +} + +static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_insert(ihost, iphy); +} + +static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_remove(ihost, iphy); +} + +static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_insert(ihost, iphy); +} + +static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_remove(ihost, iphy); +} + +static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); +} + +static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_del_timer(&iphy->sata_timer); +} + +static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); +} + +static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_del_timer(&iphy->sata_timer); +} + +static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + if (sci_port_link_detected(iphy->owning_port, iphy)) { + + /* + * Clear the PE suspend condition so we can actually + * receive SIG FIS + * The hardware will not respond to the XRDY until the PE + * suspend condition is cleared. + */ + sci_phy_resume(iphy); + + sci_mod_timer(&iphy->sata_timer, + SCIC_SDS_SIGNATURE_FIS_TIMEOUT); + } else + iphy->is_in_link_training = false; +} + +static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_del_timer(&iphy->sata_timer); +} + +static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + /* State machine has run to completion so exit out and change + * the base state machine to the ready state + */ + sci_change_state(&iphy->sm, SCI_PHY_READY); +} + +/** + * scu_link_layer_stop_protocol_engine() + * @iphy: This is the struct isci_phy object to stop. + * + * This method will stop the struct isci_phy object. This does not reset the + * protocol engine it just suspends it and places it in a state where it will + * not cause the end device to power up. none + */ +static void scu_link_layer_stop_protocol_engine( + struct isci_phy *iphy) +{ + u32 scu_sas_pcfg_value; + u32 enable_spinup_value; + + /* Suspend the protocol engine and place it in a sata spinup hold state */ + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value |= + (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | + SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) | + SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD)); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + /* Disable the notify enable spinup primitives */ + enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control); + enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE); + writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); +} + +static void scu_link_layer_start_oob(struct isci_phy *iphy) +{ + struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers; + u32 val; + + /** Reset OOB sequence - start */ + val = readl(&ll->phy_configuration); + val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | + SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) | + SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); + writel(val, &ll->phy_configuration); + readl(&ll->phy_configuration); /* flush */ + /** Reset OOB sequence - end */ + + /** Start OOB sequence - start */ + val = readl(&ll->phy_configuration); + val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); + writel(val, &ll->phy_configuration); + readl(&ll->phy_configuration); /* flush */ + /** Start OOB sequence - end */ +} + +/** + * scu_link_layer_tx_hard_reset() + * @iphy: This is the struct isci_phy object to stop. + * + * This method will transmit a hard reset request on the specified phy. The SCU + * hardware requires that we reset the OOB state machine and set the hard reset + * bit in the phy configuration register. We then must start OOB over with the + * hard reset bit set. + */ +static void scu_link_layer_tx_hard_reset( + struct isci_phy *iphy) +{ + u32 phy_configuration_value; + + /* + * SAS Phys must wait for the HARD_RESET_TX event notification to transition + * to the starting state. */ + phy_configuration_value = + readl(&iphy->link_layer_registers->phy_configuration); + phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); + phy_configuration_value |= + (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | + SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); + writel(phy_configuration_value, + &iphy->link_layer_registers->phy_configuration); + + /* Now take the OOB state machine out of reset */ + phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); + phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + writel(phy_configuration_value, + &iphy->link_layer_registers->phy_configuration); +} + +static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; + + /* + * @todo We need to get to the controller to place this PE in a + * reset state + */ + sci_del_timer(&iphy->sata_timer); + + scu_link_layer_stop_protocol_engine(iphy); + + if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); +} + +static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; + + scu_link_layer_stop_protocol_engine(iphy); + scu_link_layer_start_oob(iphy); + + /* We don't know what kind of phy we are going to be just yet */ + iphy->protocol = SAS_PROTOCOL_NONE; + iphy->bcn_received_while_port_unassigned = false; + + if (iphy->sm.previous_state_id == SCI_PHY_READY) + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); + + sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); +} + +static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; + + sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy); +} + +static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_phy_suspend(iphy); +} + +static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + /* The phy is being reset, therefore deactivate it from the port. In + * the resetting state we don't notify the user regarding link up and + * link down notifications + */ + sci_port_deactivate_phy(iphy->owning_port, iphy, false); + + if (iphy->protocol == SAS_PROTOCOL_SSP) { + scu_link_layer_tx_hard_reset(iphy); + } else { + /* The SCU does not need to have a discrete reset state so + * just go back to the starting state. + */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + } +} + +static const struct sci_base_state sci_phy_state_table[] = { + [SCI_PHY_INITIAL] = { }, + [SCI_PHY_STOPPED] = { + .enter_state = sci_phy_stopped_state_enter, + }, + [SCI_PHY_STARTING] = { + .enter_state = sci_phy_starting_state_enter, + }, + [SCI_PHY_SUB_INITIAL] = { + .enter_state = sci_phy_starting_initial_substate_enter, + }, + [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, + [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, + [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, + [SCI_PHY_SUB_AWAIT_SAS_POWER] = { + .enter_state = sci_phy_starting_await_sas_power_substate_enter, + .exit_state = sci_phy_starting_await_sas_power_substate_exit, + }, + [SCI_PHY_SUB_AWAIT_SATA_POWER] = { + .enter_state = sci_phy_starting_await_sata_power_substate_enter, + .exit_state = sci_phy_starting_await_sata_power_substate_exit + }, + [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { + .enter_state = sci_phy_starting_await_sata_phy_substate_enter, + .exit_state = sci_phy_starting_await_sata_phy_substate_exit + }, + [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { + .enter_state = sci_phy_starting_await_sata_speed_substate_enter, + .exit_state = sci_phy_starting_await_sata_speed_substate_exit + }, + [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { + .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter, + .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit + }, + [SCI_PHY_SUB_FINAL] = { + .enter_state = sci_phy_starting_final_substate_enter, + }, + [SCI_PHY_READY] = { + .enter_state = sci_phy_ready_state_enter, + .exit_state = sci_phy_ready_state_exit, + }, + [SCI_PHY_RESETTING] = { + .enter_state = sci_phy_resetting_state_enter, + }, + [SCI_PHY_FINAL] = { }, +}; + +void sci_phy_construct(struct isci_phy *iphy, + struct isci_port *iport, u8 phy_index) +{ + sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL); + + /* Copy the rest of the input data to our locals */ + iphy->owning_port = iport; + iphy->phy_index = phy_index; + iphy->bcn_received_while_port_unassigned = false; + iphy->protocol = SAS_PROTOCOL_NONE; + iphy->link_layer_registers = NULL; + iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; + + /* Create the SIGNATURE FIS Timeout timer for this phy */ + sci_init_timer(&iphy->sata_timer, phy_sata_timeout); +} + +void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) +{ + struct sci_oem_params *oem = &ihost->oem_parameters; + u64 sci_sas_addr; + __be64 sas_addr; + + sci_sas_addr = oem->phys[index].sas_address.high; + sci_sas_addr <<= 32; + sci_sas_addr |= oem->phys[index].sas_address.low; + sas_addr = cpu_to_be64(sci_sas_addr); + memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); + + iphy->sas_phy.enabled = 0; + iphy->sas_phy.id = index; + iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; + iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd; + iphy->sas_phy.ha = &ihost->sas_ha; + iphy->sas_phy.lldd_phy = iphy; + iphy->sas_phy.enabled = 1; + iphy->sas_phy.iproto = SAS_PROTOCOL_ALL; + iphy->sas_phy.tproto = 0; + iphy->sas_phy.role = PHY_ROLE_INITIATOR; + iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED; + iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN; + memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd)); +} + + +/** + * isci_phy_control() - This function is one of the SAS Domain Template + * functions. This is a phy management function. + * @sas_phy: This parameter specifies the sphy being controlled. + * @func: This parameter specifies the phy control function being invoked. + * @buf: This parameter is specific to the phy function being invoked. + * + * status, zero indicates success. + */ +int isci_phy_control(struct asd_sas_phy *sas_phy, + enum phy_func func, + void *buf) +{ + int ret = 0; + struct isci_phy *iphy = sas_phy->lldd_phy; + struct asd_sas_port *port = sas_phy->port; + struct isci_host *ihost = sas_phy->ha->lldd_ha; + unsigned long flags; + + dev_dbg(&ihost->pdev->dev, + "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", + __func__, sas_phy, func, buf, iphy, port); + + switch (func) { + case PHY_FUNC_DISABLE: + spin_lock_irqsave(&ihost->scic_lock, flags); + scu_link_layer_start_oob(iphy); + sci_phy_stop(iphy); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + break; + + case PHY_FUNC_LINK_RESET: + spin_lock_irqsave(&ihost->scic_lock, flags); + scu_link_layer_start_oob(iphy); + sci_phy_stop(iphy); + sci_phy_start(iphy); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + break; + + case PHY_FUNC_HARD_RESET: + if (!port) + return -ENODEV; + + ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy); + + break; + case PHY_FUNC_GET_EVENTS: { + struct scu_link_layer_registers __iomem *r; + struct sas_phy *phy = sas_phy->phy; + + r = iphy->link_layer_registers; + phy->running_disparity_error_count = readl(&r->running_disparity_error_count); + phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count); + phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count); + phy->invalid_dword_count = readl(&r->invalid_dword_counter); + break; + } + + default: + dev_dbg(&ihost->pdev->dev, + "%s: phy %p; func %d NOT IMPLEMENTED!\n", + __func__, sas_phy, func); + ret = -ENOSYS; + break; + } + return ret; +} diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h new file mode 100644 index 000000000..5aaf95b14 --- /dev/null +++ b/drivers/scsi/isci/phy.h @@ -0,0 +1,459 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _ISCI_PHY_H_ +#define _ISCI_PHY_H_ + +#include +#include +#include "isci.h" +#include "sas.h" + +/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS + * before restarting the starting state machine. Technically, the old parallel + * ATA specification required up to 30 seconds for a device to issue its + * signature FIS as a result of a soft reset. Now we see that devices respond + * generally within 15 seconds, but we'll use 25 for now. + */ +#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000 + +/* This is the timeout for the SATA OOB/SN because the hardware does not + * recognize a hot plug after OOB signal but before the SN signals. We need to + * make sure after a hotplug timeout if we have not received the speed event + * notification from the hardware that we restart the hardware OOB state + * machine. + */ +#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 + +/** + * isci_phy - hba local phy infrastructure + * @sm: + * @protocol: attached device protocol + * @phy_index: physical index relative to the controller (0-3) + * @bcn_received_while_port_unassigned: bcn to report after port association + * @sata_timer: timeout SATA signature FIS arrival + */ +struct isci_phy { + struct sci_base_state_machine sm; + struct isci_port *owning_port; + enum sas_linkrate max_negotiated_speed; + enum sas_protocol protocol; + u8 phy_index; + bool bcn_received_while_port_unassigned; + bool is_in_link_training; + struct sci_timer sata_timer; + struct scu_transport_layer_registers __iomem *transport_layer_registers; + struct scu_link_layer_registers __iomem *link_layer_registers; + struct asd_sas_phy sas_phy; + u8 sas_addr[SAS_ADDR_SIZE]; + union { + struct sas_identify_frame iaf; + struct dev_to_host_fis fis; + } frame_rcvd; +}; + +static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy) +{ + struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy); + + return iphy; +} + +struct sci_phy_cap { + union { + struct { + /* + * The SAS specification indicates the start bit shall + * always be set to + * 1. This implementation will have the start bit set + * to 0 if the PHY CAPABILITIES were either not + * received or speed negotiation failed. + */ + u8 start:1; + u8 tx_ssc_type:1; + u8 res1:2; + u8 req_logical_linkrate:4; + + u32 gen1_no_ssc:1; + u32 gen1_ssc:1; + u32 gen2_no_ssc:1; + u32 gen2_ssc:1; + u32 gen3_no_ssc:1; + u32 gen3_ssc:1; + u32 res2:17; + u32 parity:1; + }; + u32 all; + }; +} __packed; + +/* this data structure reflects the link layer transmit identification reg */ +struct sci_phy_proto { + union { + struct { + u16 _r_a:1; + u16 smp_iport:1; + u16 stp_iport:1; + u16 ssp_iport:1; + u16 _r_b:4; + u16 _r_c:1; + u16 smp_tport:1; + u16 stp_tport:1; + u16 ssp_tport:1; + u16 _r_d:4; + }; + u16 all; + }; +} __packed; + + +/** + * struct sci_phy_properties - This structure defines the properties common to + * all phys that can be retrieved. + * + * + */ +struct sci_phy_properties { + /** + * This field specifies the port that currently contains the + * supplied phy. This field may be set to NULL + * if the phy is not currently contained in a port. + */ + struct isci_port *iport; + + /** + * This field specifies the link rate at which the phy is + * currently operating. + */ + enum sas_linkrate negotiated_link_rate; + + /** + * This field specifies the index of the phy in relation to other + * phys within the controller. This index is zero relative. + */ + u8 index; +}; + +/** + * struct sci_sas_phy_properties - This structure defines the properties, + * specific to a SAS phy, that can be retrieved. + * + * + */ +struct sci_sas_phy_properties { + /** + * This field delineates the Identify Address Frame received + * from the remote end point. + */ + struct sas_identify_frame rcvd_iaf; + + /** + * This field delineates the Phy capabilities structure received + * from the remote end point. + */ + struct sci_phy_cap rcvd_cap; + +}; + +/** + * struct sci_sata_phy_properties - This structure defines the properties, + * specific to a SATA phy, that can be retrieved. + * + * + */ +struct sci_sata_phy_properties { + /** + * This field delineates the signature FIS received from the + * attached target. + */ + struct dev_to_host_fis signature_fis; + + /** + * This field specifies to the user if a port selector is connected + * on the specified phy. + */ + bool is_port_selector_present; + +}; + +/** + * enum sci_phy_counter_id - This enumeration depicts the various pieces of + * optional information that can be retrieved for a specific phy. + * + * + */ +enum sci_phy_counter_id { + /** + * This PHY information field tracks the number of frames received. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME, + + /** + * This PHY information field tracks the number of frames transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_FRAME, + + /** + * This PHY information field tracks the number of DWORDs received. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD, + + /** + * This PHY information field tracks the number of DWORDs transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD, + + /** + * This PHY information field tracks the number of times DWORD + * synchronization was lost. + */ + SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR, + + /** + * This PHY information field tracks the number of received DWORDs with + * running disparity errors. + */ + SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR, + + /** + * This PHY information field tracks the number of received frames with a + * CRC error (not including short or truncated frames). + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR, + + /** + * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT) + * primitives received. + */ + SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT, + + /** + * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT) + * primitives transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT, + + /** + * This PHY information field tracks the number of times the inactivity + * timer for connections on the phy has been utilized. + */ + SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED, + + /** + * This PHY information field tracks the number of DONE (CREDIT TIMEOUT) + * primitives received. + */ + SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT, + + /** + * This PHY information field tracks the number of DONE (CREDIT TIMEOUT) + * primitives transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT, + + /** + * This PHY information field tracks the number of CREDIT BLOCKED + * primitives received. + * @note Depending on remote device implementation, credit blocks + * may occur regularly. + */ + SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED, + + /** + * This PHY information field contains the number of short frames + * received. A short frame is simply a frame smaller then what is + * allowed by either the SAS or SATA specification. + */ + SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME, + + /** + * This PHY information field contains the number of frames received after + * credit has been exhausted. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT, + + /** + * This PHY information field contains the number of frames received after + * a DONE has been received. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE, + + /** + * This PHY information field contains the number of times the phy + * failed to achieve DWORD synchronization during speed negotiation. + */ + SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR +}; + +/** + * enum sci_phy_states - phy state machine states + * @SCI_PHY_INITIAL: Simply the initial state for the base domain state + * machine. + * @SCI_PHY_STOPPED: phy has successfully been stopped. In this state + * no new IO operations are permitted on this phy. + * @SCI_PHY_STARTING: the phy is in the process of becomming ready. In + * this state no new IO operations are permitted on + * this phy. + * @SCI_PHY_SUB_INITIAL: Initial state + * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event + * type notification + * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed + * notification + * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame + * notification + * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume + * power + * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume + * power + * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY + * notification + * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed + * notification + * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS + * unsolicited frame notification + * @SCI_PHY_SUB_FINAL: Exit state for this state machine + * @SCI_PHY_READY: phy is now ready. Thus, the user is able to perform + * IO operations utilizing this phy as long as it is + * currently part of a valid port. This state is + * entered from the STARTING state. + * @SCI_PHY_RESETTING: phy is in the process of being reset. In this + * state no new IO operations are permitted on this + * phy. This state is entered from the READY state. + * @SCI_PHY_FINAL: Simply the final state for the base phy state + * machine. + */ +#define PHY_STATES {\ + C(PHY_INITIAL),\ + C(PHY_STOPPED),\ + C(PHY_STARTING),\ + C(PHY_SUB_INITIAL),\ + C(PHY_SUB_AWAIT_OSSP_EN),\ + C(PHY_SUB_AWAIT_SAS_SPEED_EN),\ + C(PHY_SUB_AWAIT_IAF_UF),\ + C(PHY_SUB_AWAIT_SAS_POWER),\ + C(PHY_SUB_AWAIT_SATA_POWER),\ + C(PHY_SUB_AWAIT_SATA_PHY_EN),\ + C(PHY_SUB_AWAIT_SATA_SPEED_EN),\ + C(PHY_SUB_AWAIT_SIG_FIS_UF),\ + C(PHY_SUB_FINAL),\ + C(PHY_READY),\ + C(PHY_RESETTING),\ + C(PHY_FINAL),\ + } +#undef C +#define C(a) SCI_##a +enum sci_phy_states PHY_STATES; +#undef C + +void sci_phy_construct( + struct isci_phy *iphy, + struct isci_port *iport, + u8 phy_index); + +struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy); + +void sci_phy_set_port( + struct isci_phy *iphy, + struct isci_port *iport); + +enum sci_status sci_phy_initialize( + struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *transport_layer_registers, + struct scu_link_layer_registers __iomem *link_layer_registers); + +enum sci_status sci_phy_start( + struct isci_phy *iphy); + +enum sci_status sci_phy_stop( + struct isci_phy *iphy); + +enum sci_status sci_phy_reset( + struct isci_phy *iphy); + +void sci_phy_resume( + struct isci_phy *iphy); + +void sci_phy_setup_transport( + struct isci_phy *iphy, + u32 device_id); + +enum sci_status sci_phy_event_handler( + struct isci_phy *iphy, + u32 event_code); + +enum sci_status sci_phy_frame_handler( + struct isci_phy *iphy, + u32 frame_index); + +enum sci_status sci_phy_consume_power_handler( + struct isci_phy *iphy); + +void sci_phy_get_sas_address( + struct isci_phy *iphy, + struct sci_sas_address *sas_address); + +void sci_phy_get_attached_sas_address( + struct isci_phy *iphy, + struct sci_sas_address *sas_address); + +void sci_phy_get_protocols( + struct isci_phy *iphy, + struct sci_phy_proto *protocols); +enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy); + +struct isci_host; +void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index); +int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf); + +#endif /* !defined(_ISCI_PHY_H_) */ diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c new file mode 100644 index 000000000..1609aba1c --- /dev/null +++ b/drivers/scsi/isci/port.c @@ -0,0 +1,1773 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "isci.h" +#include "port.h" +#include "request.h" + +#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) +#define SCU_DUMMY_INDEX (0xFFFF) + +#undef C +#define C(a) (#a) +static const char *port_state_name(enum sci_port_states state) +{ + static const char * const strings[] = PORT_STATES; + + return strings[state]; +} +#undef C + +static struct device *sciport_to_dev(struct isci_port *iport) +{ + int i = iport->physical_port_index; + struct isci_port *table; + struct isci_host *ihost; + + if (i == SCIC_SDS_DUMMY_PORT) + i = SCI_MAX_PORTS+1; + + table = iport - i; + ihost = container_of(table, typeof(*ihost), ports[0]); + + return &ihost->pdev->dev; +} + +static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) +{ + u8 index; + + proto->all = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) { + struct isci_phy *iphy = iport->phy_table[index]; + + if (!iphy) + continue; + sci_phy_get_protocols(iphy, proto); + } +} + +static u32 sci_port_get_phys(struct isci_port *iport) +{ + u32 index; + u32 mask; + + mask = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) + mask |= (1 << index); + + return mask; +} + +/** + * sci_port_get_properties() - This method simply returns the properties + * regarding the port, such as: physical index, protocols, sas address, etc. + * @iport: this parameter specifies the port for which to retrieve the physical + * index. + * @prop: This parameter specifies the properties structure into which to + * copy the requested information. + * + * Indicate if the user specified a valid port. SCI_SUCCESS This value is + * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This + * value is returned if the specified port is not valid. When this value is + * returned, no data is copied to the properties output parameter. + */ +enum sci_status sci_port_get_properties(struct isci_port *iport, + struct sci_port_properties *prop) +{ + if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) + return SCI_FAILURE_INVALID_PORT; + + prop->index = iport->logical_port_index; + prop->phy_mask = sci_port_get_phys(iport); + sci_port_get_sas_address(iport, &prop->local.sas_address); + sci_port_get_protocols(iport, &prop->local.protocols); + sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); + + return SCI_SUCCESS; +} + +static void sci_port_bcn_enable(struct isci_port *iport) +{ + struct isci_phy *iphy; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { + iphy = iport->phy_table[i]; + if (!iphy) + continue; + val = readl(&iphy->link_layer_registers->link_layer_control); + /* clear the bit by writing 1. */ + writel(val, &iphy->link_layer_registers->link_layer_control); + } +} + +static void isci_port_bc_change_received(struct isci_host *ihost, + struct isci_port *iport, + struct isci_phy *iphy) +{ + dev_dbg(&ihost->pdev->dev, + "%s: isci_phy = %p, sas_phy = %p\n", + __func__, iphy, &iphy->sas_phy); + + sas_notify_port_event(&iphy->sas_phy, + PORTE_BROADCAST_RCVD, GFP_ATOMIC); + sci_port_bcn_enable(iport); +} + +static void isci_port_link_up(struct isci_host *isci_host, + struct isci_port *iport, + struct isci_phy *iphy) +{ + unsigned long flags; + struct sci_port_properties properties; + unsigned long success = true; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p\n", + __func__, iport); + + spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); + + sci_port_get_properties(iport, &properties); + + if (iphy->protocol == SAS_PROTOCOL_SATA) { + u64 attached_sas_address; + + iphy->sas_phy.oob_mode = SATA_OOB_MODE; + iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis); + + /* + * For direct-attached SATA devices, the SCI core will + * automagically assign a SAS address to the end device + * for the purpose of creating a port. This SAS address + * will not be the same as assigned to the PHY and needs + * to be obtained from struct sci_port_properties properties. + */ + attached_sas_address = properties.remote.sas_address.high; + attached_sas_address <<= 32; + attached_sas_address |= properties.remote.sas_address.low; + swab64s(&attached_sas_address); + + memcpy(&iphy->sas_phy.attached_sas_addr, + &attached_sas_address, sizeof(attached_sas_address)); + } else if (iphy->protocol == SAS_PROTOCOL_SSP) { + iphy->sas_phy.oob_mode = SAS_OOB_MODE; + iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); + + /* Copy the attached SAS address from the IAF */ + memcpy(iphy->sas_phy.attached_sas_addr, + iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); + } else { + dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__); + success = false; + } + + iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy); + + spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); + + /* Notify libsas that we have an address frame, if indeed + * we've found an SSP, SMP, or STP target */ + if (success) + sas_notify_port_event(&iphy->sas_phy, + PORTE_BYTES_DMAED, GFP_ATOMIC); +} + + +/** + * isci_port_link_down() - This function is called by the sci core when a link + * becomes inactive. + * @isci_host: This parameter specifies the isci host object. + * @isci_phy: This parameter specifies the isci phy with the active link. + * @isci_port: This parameter specifies the isci port with the active link. + * + */ +static void isci_port_link_down(struct isci_host *isci_host, + struct isci_phy *isci_phy, + struct isci_port *isci_port) +{ + struct isci_remote_device *isci_device; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p\n", __func__, isci_port); + + if (isci_port) { + + /* check to see if this is the last phy on this port. */ + if (isci_phy->sas_phy.port && + isci_phy->sas_phy.port->num_phys == 1) { + /* change the state for all devices on this port. The + * next task sent to this device will be returned as + * SAS_TASK_UNDELIVERED, and the scsi mid layer will + * remove the target + */ + list_for_each_entry(isci_device, + &isci_port->remote_dev_list, + node) { + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = %p\n", + __func__, isci_device); + set_bit(IDEV_GONE, &isci_device->flags); + } + } + } + + /* Notify libsas of the borken link, this will trigger calls to our + * isci_port_deformed and isci_dev_gone functions. + */ + sas_phy_disconnected(&isci_phy->sas_phy); + sas_notify_phy_event(&isci_phy->sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p - Done\n", __func__, isci_port); +} + +static bool is_port_ready_state(enum sci_port_states state) +{ + switch (state) { + case SCI_PORT_READY: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + case SCI_PORT_SUB_CONFIGURING: + return true; + default: + return false; + } +} + +/* flag dummy rnc hanling when exiting a ready state */ +static void port_state_machine_change(struct isci_port *iport, + enum sci_port_states state) +{ + struct sci_base_state_machine *sm = &iport->sm; + enum sci_port_states old_state = sm->current_state_id; + + if (is_port_ready_state(old_state) && !is_port_ready_state(state)) + iport->ready_exit = true; + + sci_change_state(sm, state); + iport->ready_exit = false; +} + +/** + * isci_port_hard_reset_complete() - This function is called by the sci core + * when the hard reset complete notification has been received. + * @isci_port: This parameter specifies the sci port with the active link. + * @completion_status: This parameter specifies the core status for the reset + * process. + * + */ +static void isci_port_hard_reset_complete(struct isci_port *isci_port, + enum sci_status completion_status) +{ + struct isci_host *ihost = isci_port->owning_controller; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_port = %p, completion_status=%x\n", + __func__, isci_port, completion_status); + + /* Save the status of the hard reset from the port. */ + isci_port->hard_reset_status = completion_status; + + if (completion_status != SCI_SUCCESS) { + + /* The reset failed. The port state is now SCI_PORT_FAILED. */ + if (isci_port->active_phy_mask == 0) { + int phy_idx = isci_port->last_active_phy; + struct isci_phy *iphy = &ihost->phys[phy_idx]; + + /* Generate the link down now to the host, since it + * was intercepted by the hard reset state machine when + * it really happened. + */ + isci_port_link_down(ihost, iphy, isci_port); + } + /* Advance the port state so that link state changes will be + * noticed. + */ + port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); + + } + clear_bit(IPORT_RESET_PENDING, &isci_port->state); + wake_up(&ihost->eventq); + +} + +/* This method will return a true value if the specified phy can be assigned to + * this port The following is a list of phys for each port that are allowed: - + * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method + * doesn't preclude all configurations. It merely ensures that a phy is part + * of the allowable set of phy identifiers for that port. For example, one + * could assign phy 3 to port 0 and no other phys. Please refer to + * sci_port_is_phy_mask_valid() for information regarding whether the + * phy_mask for a port can be supported. bool true if this is a valid phy + * assignment for the port false if this is not a valid phy assignment for the + * port + */ +bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) +{ + struct isci_host *ihost = iport->owning_controller; + struct sci_user_parameters *user = &ihost->user_parameters; + + /* Initialize to invalid value. */ + u32 existing_phy_index = SCI_MAX_PHYS; + u32 index; + + if ((iport->physical_port_index == 1) && (phy_index != 1)) + return false; + + if (iport->physical_port_index == 3 && phy_index != 3) + return false; + + if (iport->physical_port_index == 2 && + (phy_index == 0 || phy_index == 1)) + return false; + + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index] && index != phy_index) + existing_phy_index = index; + + /* Ensure that all of the phys in the port are capable of + * operating at the same maximum link rate. + */ + if (existing_phy_index < SCI_MAX_PHYS && + user->phys[phy_index].max_speed_generation != + user->phys[existing_phy_index].max_speed_generation) + return false; + + return true; +} + +/** + * sci_port_is_phy_mask_valid() + * @iport: This is the port object for which to determine if the phy mask + * can be supported. + * @phy_mask: Phy mask belonging to this port + * + * This method will return a true value if the port's phy mask can be supported + * by the SCU. The following is a list of valid PHY mask configurations for + * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2] + * - Port 3 - [3] This method returns a boolean indication specifying if the + * phy mask can be supported. true if this is a valid phy assignment for the + * port false if this is not a valid phy assignment for the port + */ +static bool sci_port_is_phy_mask_valid( + struct isci_port *iport, + u32 phy_mask) +{ + if (iport->physical_port_index == 0) { + if (((phy_mask & 0x0F) == 0x0F) + || ((phy_mask & 0x03) == 0x03) + || ((phy_mask & 0x01) == 0x01) + || (phy_mask == 0)) + return true; + } else if (iport->physical_port_index == 1) { + if (((phy_mask & 0x02) == 0x02) + || (phy_mask == 0)) + return true; + } else if (iport->physical_port_index == 2) { + if (((phy_mask & 0x0C) == 0x0C) + || ((phy_mask & 0x04) == 0x04) + || (phy_mask == 0)) + return true; + } else if (iport->physical_port_index == 3) { + if (((phy_mask & 0x08) == 0x08) + || (phy_mask == 0)) + return true; + } + + return false; +} + +/* + * This method retrieves a currently active (i.e. connected) phy contained in + * the port. Currently, the lowest order phy that is connected is returned. + * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is + * returned if there are no currently active (i.e. connected to a remote end + * point) phys contained in the port. All other values specify a struct sci_phy + * object that is active in the port. + */ +static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) +{ + u32 index; + struct isci_phy *iphy; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + /* Ensure that the phy is both part of the port and currently + * connected to the remote end-point. + */ + iphy = iport->phy_table[index]; + if (iphy && sci_port_active_phy(iport, iphy)) + return iphy; + } + + return NULL; +} + +static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) +{ + /* Check to see if we can add this phy to a port + * that means that the phy is not part of a port and that the port does + * not already have a phy assinged to the phy index. + */ + if (!iport->phy_table[iphy->phy_index] && + !phy_get_non_dummy_port(iphy) && + sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + /* Phy is being added in the stopped state so we are in MPC mode + * make logical port index = physical port index + */ + iport->logical_port_index = iport->physical_port_index; + iport->phy_table[iphy->phy_index] = iphy; + sci_phy_set_port(iphy, iport); + + return SCI_SUCCESS; + } + + return SCI_FAILURE; +} + +static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) +{ + /* Make sure that this phy is part of this port */ + if (iport->phy_table[iphy->phy_index] == iphy && + phy_get_non_dummy_port(iphy) == iport) { + struct isci_host *ihost = iport->owning_controller; + + /* Yep it is assigned to this port so remove it */ + sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); + iport->phy_table[iphy->phy_index] = NULL; + return SCI_SUCCESS; + } + + return SCI_FAILURE; +} + +void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) +{ + u32 index; + + sas->high = 0; + sas->low = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) + sci_phy_get_sas_address(iport->phy_table[index], sas); +} + +void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) +{ + struct isci_phy *iphy; + + /* + * Ensure that the phy is both part of the port and currently + * connected to the remote end-point. + */ + iphy = sci_port_get_a_connected_phy(iport); + if (iphy) { + if (iphy->protocol != SAS_PROTOCOL_SATA) { + sci_phy_get_attached_sas_address(iphy, sas); + } else { + sci_phy_get_sas_address(iphy, sas); + sas->low += iphy->phy_index; + } + } else { + sas->high = 0; + sas->low = 0; + } +} + +/** + * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround + * + * @iport: logical port on which we need to create the remote node context + * @rni: remote node index for this remote node context. + * + * This routine will construct a dummy remote node context data structure + * This structure will be posted to the hardware to work around a scheduler + * error in the hardware. + */ +static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) +{ + union scu_remote_node_context *rnc; + + rnc = &iport->owning_controller->remote_node_context_table[rni]; + + memset(rnc, 0, sizeof(union scu_remote_node_context)); + + rnc->ssp.remote_sas_address_hi = 0; + rnc->ssp.remote_sas_address_lo = 0; + + rnc->ssp.remote_node_index = rni; + rnc->ssp.remote_node_port_width = 1; + rnc->ssp.logical_port_index = iport->physical_port_index; + + rnc->ssp.nexus_loss_timer_enable = false; + rnc->ssp.check_bit = false; + rnc->ssp.is_valid = true; + rnc->ssp.is_remote_node_context = true; + rnc->ssp.function_number = 0; + rnc->ssp.arbitration_wait_time = 0; +} + +/* + * construct a dummy task context data structure. This + * structure will be posted to the hardwre to work around a scheduler error + * in the hardware. + */ +static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) +{ + struct isci_host *ihost = iport->owning_controller; + struct scu_task_context *task_context; + + task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; + memset(task_context, 0, sizeof(struct scu_task_context)); + + task_context->initiator_request = 1; + task_context->connection_rate = 1; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; + task_context->task_index = ISCI_TAG_TCI(tag); + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + task_context->remote_node_index = iport->reserved_rni; + task_context->do_not_dma_ssp_good_response = 1; + task_context->task_phase = 0x01; +} + +static void sci_port_destroy_dummy_resources(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + + if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) + isci_free_tag(ihost, iport->reserved_tag); + + if (iport->reserved_rni != SCU_DUMMY_INDEX) + sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, + 1, iport->reserved_rni); + + iport->reserved_rni = SCU_DUMMY_INDEX; + iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; +} + +void sci_port_setup_transports(struct isci_port *iport, u32 device_id) +{ + u8 index; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if (iport->active_phy_mask & (1 << index)) + sci_phy_setup_transport(iport->phy_table[index], device_id); + } +} + +static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy) +{ + sci_phy_resume(iphy); + iport->enabled_phy_mask |= 1 << iphy->phy_index; +} + +static void sci_port_activate_phy(struct isci_port *iport, + struct isci_phy *iphy, + u8 flags) +{ + struct isci_host *ihost = iport->owning_controller; + + if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME)) + sci_phy_resume(iphy); + + iport->active_phy_mask |= 1 << iphy->phy_index; + + sci_controller_clear_invalid_phy(ihost, iphy); + + if (flags & PF_NOTIFY) + isci_port_link_up(ihost, iport, iphy); +} + +void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, + bool do_notify_user) +{ + struct isci_host *ihost = iport->owning_controller; + + iport->active_phy_mask &= ~(1 << iphy->phy_index); + iport->enabled_phy_mask &= ~(1 << iphy->phy_index); + if (!iport->active_phy_mask) + iport->last_active_phy = iphy->phy_index; + + iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; + + /* Re-assign the phy back to the LP as if it were a narrow port for APC + * mode. For MPC mode, the phy will remain in the port. + */ + if (iport->owning_controller->oem_parameters.controller.mode_type == + SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) + writel(iphy->phy_index, + &iport->port_pe_configuration_register[iphy->phy_index]); + + if (do_notify_user == true) + isci_port_link_down(ihost, iphy, iport); +} + +static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) +{ + struct isci_host *ihost = iport->owning_controller; + + /* + * Check to see if we have alreay reported this link as bad and if + * not go ahead and tell the SCI_USER that we have discovered an + * invalid link. + */ + if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { + ihost->invalid_phy_mask |= 1 << iphy->phy_index; + dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); + } +} + +/** + * sci_port_general_link_up_handler - phy can be assigned to port? + * @iport: sci_port object for which has a phy that has gone link up. + * @iphy: This is the struct isci_phy object that has gone link up. + * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy + * + * Determine if this phy can be assigned to this port . If the phy is + * not a valid PHY for this port then the function will notify the user. + * A PHY can only be part of a port if it's attached SAS ADDRESS is the + * same as all other PHYs in the same port. + */ +static void sci_port_general_link_up_handler(struct isci_port *iport, + struct isci_phy *iphy, + u8 flags) +{ + struct sci_sas_address port_sas_address; + struct sci_sas_address phy_sas_address; + + sci_port_get_attached_sas_address(iport, &port_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_sas_address); + + /* If the SAS address of the new phy matches the SAS address of + * other phys in the port OR this is the first phy in the port, + * then activate the phy and allow it to be used for operations + * in this port. + */ + if ((phy_sas_address.high == port_sas_address.high && + phy_sas_address.low == port_sas_address.low) || + iport->active_phy_mask == 0) { + struct sci_base_state_machine *sm = &iport->sm; + + sci_port_activate_phy(iport, iphy, flags); + if (sm->current_state_id == SCI_PORT_RESETTING) + port_state_machine_change(iport, SCI_PORT_READY); + } else + sci_port_invalid_link_up(iport, iphy); +} + + + +/** + * sci_port_is_wide() + * This method returns false if the port only has a single phy object assigned. + * If there are no phys or more than one phy then the method will return + * true. + * @iport: The port for which the wide port condition is to be checked. + * + * bool true Is returned if this is a wide ported port. false Is returned if + * this is a narrow port. + */ +static bool sci_port_is_wide(struct isci_port *iport) +{ + u32 index; + u32 phy_count = 0; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if (iport->phy_table[index] != NULL) { + phy_count++; + } + } + + return phy_count != 1; +} + +/** + * sci_port_link_detected() + * This method is called by the PHY object when the link is detected. if the + * port wants the PHY to continue on to the link up state then the port + * layer must return true. If the port object returns false the phy object + * must halt its attempt to go link up. + * @iport: The port associated with the phy object. + * @iphy: The phy object that is trying to go link up. + * + * true if the phy object can continue to the link up condition. true Is + * returned if this phy can continue to the ready state. false Is returned if + * can not continue on to the ready state. This notification is in place for + * wide ports and direct attached phys. Since there are no wide ported SATA + * devices this could become an invalid port configuration. + */ +bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) +{ + if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && + (iphy->protocol == SAS_PROTOCOL_SATA)) { + if (sci_port_is_wide(iport)) { + sci_port_invalid_link_up(iport, iphy); + return false; + } else { + struct isci_host *ihost = iport->owning_controller; + struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]); + writel(iphy->phy_index, + &dst_port->port_pe_configuration_register[iphy->phy_index]); + } + } + + return true; +} + +static void port_timeout(struct timer_list *t) +{ + struct sci_timer *tmr = from_timer(tmr, t, timer); + struct isci_port *iport = container_of(tmr, typeof(*iport), timer); + struct isci_host *ihost = iport->owning_controller; + unsigned long flags; + u32 current_state; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + current_state = iport->sm.current_state_id; + + if (current_state == SCI_PORT_RESETTING) { + /* if the port is still in the resetting state then the timeout + * fired before the reset completed. + */ + port_state_machine_change(iport, SCI_PORT_FAILED); + } else if (current_state == SCI_PORT_STOPPED) { + /* if the port is stopped then the start request failed In this + * case stay in the stopped state. + */ + dev_err(sciport_to_dev(iport), + "%s: SCIC Port 0x%p failed to stop before timeout.\n", + __func__, + iport); + } else if (current_state == SCI_PORT_STOPPING) { + dev_dbg(sciport_to_dev(iport), + "%s: port%d: stop complete timeout\n", + __func__, iport->physical_port_index); + } else { + /* The port is in the ready state and we have a timer + * reporting a timeout this should not happen. + */ + dev_err(sciport_to_dev(iport), + "%s: SCIC Port 0x%p is processing a timeout operation " + "in state %d.\n", __func__, iport, current_state); + } + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +/* --------------------------------------------------------------------------- */ + +/* + * This function updates the hardwares VIIT entry for this port. + */ +static void sci_port_update_viit_entry(struct isci_port *iport) +{ + struct sci_sas_address sas_address; + + sci_port_get_sas_address(iport, &sas_address); + + writel(sas_address.high, + &iport->viit_registers->initiator_sas_address_hi); + writel(sas_address.low, + &iport->viit_registers->initiator_sas_address_lo); + + /* This value get cleared just in case its not already cleared */ + writel(0, &iport->viit_registers->reserved); + + /* We are required to update the status register last */ + writel(SCU_VIIT_ENTRY_ID_VIIT | + SCU_VIIT_IPPT_INITIATOR | + ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) | + SCU_VIIT_STATUS_ALL_VALID, + &iport->viit_registers->status); +} + +enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) +{ + u16 index; + struct isci_phy *iphy; + enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS; + + /* + * Loop through all of the phys in this port and find the phy with the + * lowest maximum link rate. */ + for (index = 0; index < SCI_MAX_PHYS; index++) { + iphy = iport->phy_table[index]; + if (iphy && sci_port_active_phy(iport, iphy) && + iphy->max_negotiated_speed < max_allowed_speed) + max_allowed_speed = iphy->max_negotiated_speed; + } + + return max_allowed_speed; +} + +static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +/** + * sci_port_post_dummy_request() - post dummy/workaround request + * @iport: port to post task + * + * Prevent the hardware scheduler from posting new requests to the front + * of the scheduler queue causing a starvation problem for currently + * ongoing requests. + * + */ +static void sci_port_post_dummy_request(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u16 tag = iport->reserved_tag; + struct scu_task_context *tc; + u32 command; + + tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; + tc->abort = 0; + + command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | + ISCI_TAG_TCI(tag); + + sci_controller_post_request(ihost, command); +} + +/** + * sci_port_abort_dummy_request() + * This routine will abort the dummy request. This will allow the hardware to + * power down parts of the silicon to save power. + * + * @iport: The port on which the task must be aborted. + * + */ +static void sci_port_abort_dummy_request(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u16 tag = iport->reserved_tag; + struct scu_task_context *tc; + u32 command; + + tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; + tc->abort = 1; + + command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | + iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | + ISCI_TAG_TCI(tag); + + sci_controller_post_request(ihost, command); +} + +/** + * sci_port_resume_port_task_scheduler() + * @iport: This is the struct isci_port object to resume. + * + * This method will resume the port task scheduler for this port object. none + */ +static void +sci_port_resume_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + sci_port_suspend_port_task_scheduler(iport); + + iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; + + if (iport->active_phy_mask != 0) { + /* At least one of the phys on the port is ready */ + port_state_machine_change(iport, + SCI_PORT_SUB_OPERATIONAL); + } +} + +static void scic_sds_port_ready_substate_waiting_exit( + struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + sci_port_resume_port_task_scheduler(iport); +} + +static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) +{ + u32 index; + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + + dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n", + __func__, iport->physical_port_index); + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if (iport->phy_table[index]) { + writel(iport->physical_port_index, + &iport->port_pe_configuration_register[ + iport->phy_table[index]->phy_index]); + if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0) + sci_port_resume_phy(iport, iport->phy_table[index]); + } + } + + sci_port_update_viit_entry(iport); + + /* + * Post the dummy task for the port so the hardware can schedule + * io correctly + */ + sci_port_post_dummy_request(iport); +} + +static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u8 phys_index = iport->physical_port_index; + union scu_remote_node_context *rnc; + u16 rni = iport->reserved_rni; + u32 command; + + rnc = &ihost->remote_node_context_table[rni]; + + rnc->ssp.is_valid = false; + + /* ensure the preceding tc abort request has reached the + * controller and give it ample time to act before posting the rnc + * invalidate + */ + readl(&ihost->smu_registers->interrupt_status); /* flush */ + udelay(10); + + command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | + phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; + + sci_controller_post_request(ihost, command); +} + +/** + * sci_port_ready_substate_operational_exit() + * @sm: This is the object which is cast to a struct isci_port object. + * + * This method will perform the actions required by the struct isci_port on + * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports + * the port not ready and suspends the port task scheduler. none + */ +static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + + /* + * Kill the dummy task for this port if it has not yet posted + * the hardware will treat this as a NOP and just return abort + * complete. + */ + sci_port_abort_dummy_request(iport); + + dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", + __func__, iport->physical_port_index); + + if (iport->ready_exit) + sci_port_invalidate_dummy_remote_node(iport); +} + +static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + + if (iport->active_phy_mask == 0) { + dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", + __func__, iport->physical_port_index); + + port_state_machine_change(iport, SCI_PORT_SUB_WAITING); + } else + port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); +} + +enum sci_status sci_port_start(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + enum sci_status status = SCI_SUCCESS; + enum sci_port_states state; + u32 phy_mask; + + state = iport->sm.current_state_id; + if (state != SCI_PORT_STOPPED) { + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + if (iport->assigned_device_count > 0) { + /* TODO This is a start failure operation because + * there are still devices assigned to this port. + * There must be no devices assigned to a port on a + * start operation. + */ + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + if (iport->reserved_rni == SCU_DUMMY_INDEX) { + u16 rni = sci_remote_node_table_allocate_remote_node( + &ihost->available_remote_nodes, 1); + + if (rni != SCU_DUMMY_INDEX) + sci_port_construct_dummy_rnc(iport, rni); + else + status = SCI_FAILURE_INSUFFICIENT_RESOURCES; + iport->reserved_rni = rni; + } + + if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { + u16 tag; + + tag = isci_alloc_tag(ihost); + if (tag == SCI_CONTROLLER_INVALID_IO_TAG) + status = SCI_FAILURE_INSUFFICIENT_RESOURCES; + else + sci_port_construct_dummy_task(iport, tag); + iport->reserved_tag = tag; + } + + if (status == SCI_SUCCESS) { + phy_mask = sci_port_get_phys(iport); + + /* + * There are one or more phys assigned to this port. Make sure + * the port's phy mask is in fact legal and supported by the + * silicon. + */ + if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { + port_state_machine_change(iport, + SCI_PORT_READY); + + return SCI_SUCCESS; + } + status = SCI_FAILURE; + } + + if (status != SCI_SUCCESS) + sci_port_destroy_dummy_resources(iport); + + return status; +} + +enum sci_status sci_port_stop(struct isci_port *iport) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_STOPPED: + return SCI_SUCCESS; + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + case SCI_PORT_SUB_CONFIGURING: + case SCI_PORT_RESETTING: + port_state_machine_change(iport, + SCI_PORT_STOPPING); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) +{ + enum sci_status status = SCI_FAILURE_INVALID_PHY; + struct isci_phy *iphy = NULL; + enum sci_port_states state; + u32 phy_index; + + state = iport->sm.current_state_id; + if (state != SCI_PORT_SUB_OPERATIONAL) { + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + /* Select a phy on which we can send the hard reset request. */ + for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { + iphy = iport->phy_table[phy_index]; + if (iphy && !sci_port_active_phy(iport, iphy)) { + /* + * We found a phy but it is not ready select + * different phy + */ + iphy = NULL; + } + } + + /* If we have a phy then go ahead and start the reset procedure */ + if (!iphy) + return status; + status = sci_phy_reset(iphy); + + if (status != SCI_SUCCESS) + return status; + + sci_mod_timer(&iport->timer, timeout); + iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED; + + port_state_machine_change(iport, SCI_PORT_RESETTING); + return SCI_SUCCESS; +} + +/** + * sci_port_add_phy() + * @iport: This parameter specifies the port in which the phy will be added. + * @iphy: This parameter is the phy which is to be added to the port. + * + * This method will add a PHY to the selected port. This method returns an + * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other + * status is a failure to add the phy to the port. + */ +enum sci_status sci_port_add_phy(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_status status; + enum sci_port_states state; + + sci_port_bcn_enable(iport); + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_STOPPED: { + struct sci_sas_address port_sas_address; + + /* Read the port assigned SAS Address if there is one */ + sci_port_get_sas_address(iport, &port_sas_address); + + if (port_sas_address.high != 0 && port_sas_address.low != 0) { + struct sci_sas_address phy_sas_address; + + /* Make sure that the PHY SAS Address matches the SAS Address + * for this port + */ + sci_phy_get_sas_address(iphy, &phy_sas_address); + + if (port_sas_address.high != phy_sas_address.high || + port_sas_address.low != phy_sas_address.low) + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + return sci_port_set_phy(iport, iphy); + } + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + status = sci_port_set_phy(iport, iphy); + + if (status != SCI_SUCCESS) + return status; + + sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); + iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; + port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); + + return status; + case SCI_PORT_SUB_CONFIGURING: + status = sci_port_set_phy(iport, iphy); + + if (status != SCI_SUCCESS) + return status; + sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY); + + /* Re-enter the configuring state since this may be the last phy in + * the port. + */ + port_state_machine_change(iport, + SCI_PORT_SUB_CONFIGURING); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +/** + * sci_port_remove_phy() + * @iport: This parameter specifies the port in which the phy will be added. + * @iphy: This parameter is the phy which is to be added to the port. + * + * This method will remove the PHY from the selected PORT. This method returns + * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any + * other status is a failure to add the phy to the port. + */ +enum sci_status sci_port_remove_phy(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_status status; + enum sci_port_states state; + + state = iport->sm.current_state_id; + + switch (state) { + case SCI_PORT_STOPPED: + return sci_port_clear_phy(iport, iphy); + case SCI_PORT_SUB_OPERATIONAL: + status = sci_port_clear_phy(iport, iphy); + if (status != SCI_SUCCESS) + return status; + + sci_port_deactivate_phy(iport, iphy, true); + iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; + port_state_machine_change(iport, + SCI_PORT_SUB_CONFIGURING); + return SCI_SUCCESS; + case SCI_PORT_SUB_CONFIGURING: + status = sci_port_clear_phy(iport, iphy); + + if (status != SCI_SUCCESS) + return status; + sci_port_deactivate_phy(iport, iphy, true); + + /* Re-enter the configuring state since this may be the last phy in + * the port + */ + port_state_machine_change(iport, + SCI_PORT_SUB_CONFIGURING); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_link_up(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_SUB_WAITING: + /* Since this is the first phy going link up for the port we + * can just enable it and continue + */ + sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME); + + port_state_machine_change(iport, + SCI_PORT_SUB_OPERATIONAL); + return SCI_SUCCESS; + case SCI_PORT_SUB_OPERATIONAL: + sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); + return SCI_SUCCESS; + case SCI_PORT_RESETTING: + /* TODO We should make sure that the phy that has gone + * link up is the same one on which we sent the reset. It is + * possible that the phy on which we sent the reset is not the + * one that has gone link up and we want to make sure that + * phy being reset comes back. Consider the case where a + * reset is sent but before the hardware processes the reset it + * get a link up on the port because of a hot plug event. + * because of the reset request this phy will go link down + * almost immediately. + */ + + /* In the resetting state we don't notify the user regarding + * link up and link down notifications. + */ + sci_port_general_link_up_handler(iport, iphy, PF_RESUME); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_link_down(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_SUB_OPERATIONAL: + sci_port_deactivate_phy(iport, iphy, true); + + /* If there are no active phys left in the port, then + * transition the port to the WAITING state until such time + * as a phy goes link up + */ + if (iport->active_phy_mask == 0) + port_state_machine_change(iport, + SCI_PORT_SUB_WAITING); + return SCI_SUCCESS; + case SCI_PORT_RESETTING: + /* In the resetting state we don't notify the user regarding + * link up and link down notifications. */ + sci_port_deactivate_phy(iport, iphy, false); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_start_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_SUB_WAITING: + return SCI_FAILURE_INVALID_STATE; + case SCI_PORT_SUB_OPERATIONAL: + iport->started_request_count++; + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_complete_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_STOPPED: + dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", + __func__, port_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + case SCI_PORT_STOPPING: + sci_port_decrement_request_count(iport); + + if (iport->started_request_count == 0) + port_state_machine_change(iport, + SCI_PORT_STOPPED); + break; + case SCI_PORT_READY: + case SCI_PORT_RESETTING: + case SCI_PORT_FAILED: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + sci_port_decrement_request_count(iport); + break; + case SCI_PORT_SUB_CONFIGURING: + sci_port_decrement_request_count(iport); + if (iport->started_request_count == 0) { + port_state_machine_change(iport, + SCI_PORT_SUB_OPERATIONAL); + } + break; + } + return SCI_SUCCESS; +} + +static void sci_port_enable_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + /* enable the port task scheduler in a suspended state */ + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +static void sci_port_disable_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value &= + ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND)); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +static void sci_port_post_dummy_remote_node(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u8 phys_index = iport->physical_port_index; + union scu_remote_node_context *rnc; + u16 rni = iport->reserved_rni; + u32 command; + + rnc = &ihost->remote_node_context_table[rni]; + rnc->ssp.is_valid = true; + + command = SCU_CONTEXT_COMMAND_POST_RNC_32 | + phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; + + sci_controller_post_request(ihost, command); + + /* ensure hardware has seen the post rnc command and give it + * ample time to act before sending the suspend + */ + readl(&ihost->smu_registers->interrupt_status); /* flush */ + udelay(10); + + command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | + phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; + + sci_controller_post_request(ihost, command); +} + +static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + if (iport->sm.previous_state_id == SCI_PORT_STOPPING) { + /* + * If we enter this state becasuse of a request to stop + * the port then we want to disable the hardwares port + * task scheduler. */ + sci_port_disable_port_task_scheduler(iport); + } +} + +static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + /* Enable and suspend the port task scheduler */ + sci_port_enable_port_task_scheduler(iport); +} + +static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + u32 prev_state; + + prev_state = iport->sm.previous_state_id; + if (prev_state == SCI_PORT_RESETTING) + isci_port_hard_reset_complete(iport, SCI_SUCCESS); + else + dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", + __func__, iport->physical_port_index); + + /* Post and suspend the dummy remote node context for this port. */ + sci_port_post_dummy_remote_node(iport); + + /* Start the ready substate machine */ + port_state_machine_change(iport, + SCI_PORT_SUB_WAITING); +} + +static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + sci_del_timer(&iport->timer); +} + +static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + sci_del_timer(&iport->timer); + + sci_port_destroy_dummy_resources(iport); +} + +static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); +} + +void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout) +{ + int phy_index; + u32 phy_mask = iport->active_phy_mask; + + if (timeout) + ++iport->hang_detect_users; + else if (iport->hang_detect_users > 1) + --iport->hang_detect_users; + else + iport->hang_detect_users = 0; + + if (timeout || (iport->hang_detect_users == 0)) { + for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { + if ((phy_mask >> phy_index) & 1) { + writel(timeout, + &iport->phy_table[phy_index] + ->link_layer_registers + ->link_layer_hang_detection_timeout); + } + } + } +} +/* --------------------------------------------------------------------------- */ + +static const struct sci_base_state sci_port_state_table[] = { + [SCI_PORT_STOPPED] = { + .enter_state = sci_port_stopped_state_enter, + .exit_state = sci_port_stopped_state_exit + }, + [SCI_PORT_STOPPING] = { + .exit_state = sci_port_stopping_state_exit + }, + [SCI_PORT_READY] = { + .enter_state = sci_port_ready_state_enter, + }, + [SCI_PORT_SUB_WAITING] = { + .enter_state = sci_port_ready_substate_waiting_enter, + .exit_state = scic_sds_port_ready_substate_waiting_exit, + }, + [SCI_PORT_SUB_OPERATIONAL] = { + .enter_state = sci_port_ready_substate_operational_enter, + .exit_state = sci_port_ready_substate_operational_exit + }, + [SCI_PORT_SUB_CONFIGURING] = { + .enter_state = sci_port_ready_substate_configuring_enter + }, + [SCI_PORT_RESETTING] = { + .exit_state = sci_port_resetting_state_exit + }, + [SCI_PORT_FAILED] = { + .enter_state = sci_port_failed_state_enter, + } +}; + +void sci_port_construct(struct isci_port *iport, u8 index, + struct isci_host *ihost) +{ + sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); + + iport->logical_port_index = SCIC_SDS_DUMMY_PORT; + iport->physical_port_index = index; + iport->active_phy_mask = 0; + iport->enabled_phy_mask = 0; + iport->last_active_phy = 0; + iport->ready_exit = false; + + iport->owning_controller = ihost; + + iport->started_request_count = 0; + iport->assigned_device_count = 0; + iport->hang_detect_users = 0; + + iport->reserved_rni = SCU_DUMMY_INDEX; + iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; + + sci_init_timer(&iport->timer, port_timeout); + + iport->port_task_scheduler_registers = NULL; + + for (index = 0; index < SCI_MAX_PHYS; index++) + iport->phy_table[index] = NULL; +} + +void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) +{ + struct isci_host *ihost = iport->owning_controller; + + /* notify the user. */ + isci_port_bc_change_received(ihost, iport, iphy); +} + +static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport) +{ + wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state)); +} + +int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) +{ + unsigned long flags; + enum sci_status status; + int ret = TMF_RESP_FUNC_COMPLETE; + + dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", + __func__, iport); + + spin_lock_irqsave(&ihost->scic_lock, flags); + set_bit(IPORT_RESET_PENDING, &iport->state); + + #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT + status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); + + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (status == SCI_SUCCESS) { + wait_port_reset(ihost, iport); + + dev_dbg(&ihost->pdev->dev, + "%s: iport = %p; hard reset completion\n", + __func__, iport); + + if (iport->hard_reset_status != SCI_SUCCESS) { + ret = TMF_RESP_FUNC_FAILED; + + dev_err(&ihost->pdev->dev, + "%s: iport = %p; hard reset failed (0x%x)\n", + __func__, iport, iport->hard_reset_status); + } + } else { + clear_bit(IPORT_RESET_PENDING, &iport->state); + wake_up(&ihost->eventq); + ret = TMF_RESP_FUNC_FAILED; + + dev_err(&ihost->pdev->dev, + "%s: iport = %p; sci_port_hard_reset call" + " failed 0x%x\n", + __func__, iport, status); + + } + return ret; +} + +int isci_ata_check_ready(struct domain_device *dev) +{ + struct isci_port *iport = dev->port->lldd_port; + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (!idev) + goto out; + + if (test_bit(IPORT_RESET_PENDING, &iport->state)) + goto out; + + rc = !!iport->active_phy_mask; + out: + isci_put_device(idev); + + return rc; +} + +void isci_port_deformed(struct asd_sas_phy *phy) +{ + struct isci_host *ihost = phy->ha->lldd_ha; + struct isci_port *iport = phy->port->lldd_port; + unsigned long flags; + int i; + + /* we got a port notification on a port that was subsequently + * torn down and libsas is just now catching up + */ + if (!iport) + return; + + spin_lock_irqsave(&ihost->scic_lock, flags); + for (i = 0; i < SCI_MAX_PHYS; i++) { + if (iport->active_phy_mask & 1 << i) + break; + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (i >= SCI_MAX_PHYS) + dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n", + __func__, (long) (iport - &ihost->ports[0])); +} + +void isci_port_formed(struct asd_sas_phy *phy) +{ + struct isci_host *ihost = phy->ha->lldd_ha; + struct isci_phy *iphy = to_iphy(phy); + struct asd_sas_port *port = phy->port; + struct isci_port *iport = NULL; + unsigned long flags; + int i; + + /* initial ports are formed as the driver is still initializing, + * wait for that process to complete + */ + wait_for_start(ihost); + + spin_lock_irqsave(&ihost->scic_lock, flags); + for (i = 0; i < SCI_MAX_PORTS; i++) { + iport = &ihost->ports[i]; + if (iport->active_phy_mask & 1 << iphy->phy_index) + break; + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (i >= SCI_MAX_PORTS) + iport = NULL; + + port->lldd_port = iport; +} diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h new file mode 100644 index 000000000..861e8f728 --- /dev/null +++ b/drivers/scsi/isci/port.h @@ -0,0 +1,283 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ISCI_PORT_H_ +#define _ISCI_PORT_H_ + +#include +#include "isci.h" +#include "sas.h" +#include "phy.h" + +#define SCIC_SDS_DUMMY_PORT 0xFF + +#define PF_NOTIFY (1 << 0) +#define PF_RESUME (1 << 1) + +struct isci_phy; +struct isci_host; + +enum isci_status { + isci_freed = 0x00, + isci_starting = 0x01, + isci_ready = 0x02, + isci_ready_for_io = 0x03, + isci_stopping = 0x04, + isci_stopped = 0x05, +}; + +/** + * struct isci_port - isci direct attached sas port object + * @ready_exit: several states constitute 'ready'. When exiting ready we + * need to take extra port-teardown actions that are + * skipped when exiting to another 'ready' state. + * @logical_port_index: software port index + * @physical_port_index: hardware port index + * @active_phy_mask: identifies phy members + * @enabled_phy_mask: phy mask for the port + * that are already part of the port + * @reserved_tag: + * @reserved_rni: reserver for port task scheduler workaround + * @started_request_count: reference count for outstanding commands + * @not_ready_reason: set during state transitions and notified + * @timer: timeout start/stop operations + */ +struct isci_port { + struct isci_host *isci_host; + struct list_head remote_dev_list; + #define IPORT_RESET_PENDING 0 + unsigned long state; + enum sci_status hard_reset_status; + struct sci_base_state_machine sm; + bool ready_exit; + u8 logical_port_index; + u8 physical_port_index; + u8 active_phy_mask; + u8 enabled_phy_mask; + u8 last_active_phy; + u16 reserved_rni; + u16 reserved_tag; + u32 started_request_count; + u32 assigned_device_count; + u32 hang_detect_users; + u32 not_ready_reason; + struct isci_phy *phy_table[SCI_MAX_PHYS]; + struct isci_host *owning_controller; + struct sci_timer timer; + struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers; + /* XXX rework: only one register, no need to replicate per-port */ + u32 __iomem *port_pe_configuration_register; + struct scu_viit_entry __iomem *viit_registers; +}; + +enum sci_port_not_ready_reason_code { + SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS, + SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED, + SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION, + SCIC_PORT_NOT_READY_RECONFIGURING, + + SCIC_PORT_NOT_READY_REASON_CODE_MAX +}; + +struct sci_port_end_point_properties { + struct sci_sas_address sas_address; + struct sci_phy_proto protocols; +}; + +struct sci_port_properties { + u32 index; + struct sci_port_end_point_properties local; + struct sci_port_end_point_properties remote; + u32 phy_mask; +}; + +/** + * enum sci_port_states - port state machine states + * @SCI_PORT_STOPPED: port has successfully been stopped. In this state + * no new IO operations are permitted. This state is + * entered from the STOPPING state. + * @SCI_PORT_STOPPING: port is in the process of stopping. In this + * state no new IO operations are permitted, but + * existing IO operations are allowed to complete. + * This state is entered from the READY state. + * @SCI_PORT_READY: port is now ready. Thus, the user is able to + * perform IO operations on this port. This state is + * entered from the STARTING state. + * @SCI_PORT_SUB_WAITING: port is started and ready but has no active + * phys. + * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at + * least one phy operational. + * @SCI_PORT_SUB_CONFIGURING: port is started and there was an + * add/remove phy event. This state is only + * used in Automatic Port Configuration Mode + * (APC) + * @SCI_PORT_RESETTING: port is in the process of performing a hard + * reset. Thus, the user is unable to perform IO + * operations on this port. This state is entered + * from the READY state. + * @SCI_PORT_FAILED: port has failed a reset request. This state is + * entered when a port reset request times out. This + * state is entered from the RESETTING state. + */ +#define PORT_STATES {\ + C(PORT_STOPPED),\ + C(PORT_STOPPING),\ + C(PORT_READY),\ + C(PORT_SUB_WAITING),\ + C(PORT_SUB_OPERATIONAL),\ + C(PORT_SUB_CONFIGURING),\ + C(PORT_RESETTING),\ + C(PORT_FAILED),\ + } +#undef C +#define C(a) SCI_##a +enum sci_port_states PORT_STATES; +#undef C + +static inline void sci_port_decrement_request_count(struct isci_port *iport) +{ + if (WARN_ONCE(iport->started_request_count == 0, + "%s: tried to decrement started_request_count past 0!?", + __func__)) + /* pass */; + else + iport->started_request_count--; +} + +#define sci_port_active_phy(port, phy) \ + (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0) + +void sci_port_construct( + struct isci_port *iport, + u8 port_index, + struct isci_host *ihost); + +enum sci_status sci_port_start(struct isci_port *iport); +enum sci_status sci_port_stop(struct isci_port *iport); + +enum sci_status sci_port_add_phy( + struct isci_port *iport, + struct isci_phy *iphy); + +enum sci_status sci_port_remove_phy( + struct isci_port *iport, + struct isci_phy *iphy); + +void sci_port_setup_transports( + struct isci_port *iport, + u32 device_id); + +void isci_port_bcn_enable(struct isci_host *, struct isci_port *); + +void sci_port_deactivate_phy( + struct isci_port *iport, + struct isci_phy *iphy, + bool do_notify_user); + +bool sci_port_link_detected( + struct isci_port *iport, + struct isci_phy *iphy); + +enum sci_status sci_port_get_properties( + struct isci_port *iport, + struct sci_port_properties *prop); + +enum sci_status sci_port_link_up(struct isci_port *iport, + struct isci_phy *iphy); +enum sci_status sci_port_link_down(struct isci_port *iport, + struct isci_phy *iphy); + +struct isci_request; +struct isci_remote_device; +enum sci_status sci_port_start_io( + struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_port_complete_io( + struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sas_linkrate sci_port_get_max_allowed_speed( + struct isci_port *iport); + +void sci_port_broadcast_change_received( + struct isci_port *iport, + struct isci_phy *iphy); + +bool sci_port_is_valid_phy_assignment( + struct isci_port *iport, + u32 phy_index); + +void sci_port_get_sas_address( + struct isci_port *iport, + struct sci_sas_address *sas_address); + +void sci_port_get_attached_sas_address( + struct isci_port *iport, + struct sci_sas_address *sas_address); + +void sci_port_set_hang_detection_timeout( + struct isci_port *isci_port, + u32 timeout); + +void isci_port_formed(struct asd_sas_phy *); +void isci_port_deformed(struct asd_sas_phy *); + +int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy); +int isci_ata_check_ready(struct domain_device *dev); +#endif /* !defined(_ISCI_PORT_H_) */ diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c new file mode 100644 index 000000000..c382a257b --- /dev/null +++ b/drivers/scsi/isci/port_config.c @@ -0,0 +1,760 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "host.h" + +#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) +#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) +#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000) + +enum SCIC_SDS_APC_ACTIVITY { + SCIC_SDS_APC_SKIP_PHY, + SCIC_SDS_APC_ADD_PHY, + SCIC_SDS_APC_START_TIMER, + + SCIC_SDS_APC_ACTIVITY_MAX +}; + +/* + * ****************************************************************************** + * General port configuration agent routines + * ****************************************************************************** */ + +/** + * sci_sas_address_compare() + * @address_one: A SAS Address to be compared. + * @address_two: A SAS Address to be compared. + * + * Compare the two SAS Address and if SAS Address One is greater than SAS + * Address Two then return > 0 else if SAS Address One is less than SAS Address + * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0 + * > y where x is returned for Address One > Address Two y is returned for + * Address One < Address Two 0 is returned ofr Address One = Address Two + */ +static s32 sci_sas_address_compare( + struct sci_sas_address address_one, + struct sci_sas_address address_two) +{ + if (address_one.high > address_two.high) { + return 1; + } else if (address_one.high < address_two.high) { + return -1; + } else if (address_one.low > address_two.low) { + return 1; + } else if (address_one.low < address_two.low) { + return -1; + } + + /* The two SAS Address must be identical */ + return 0; +} + +/** + * sci_port_configuration_agent_find_port() + * @ihost: The controller object used for the port search. + * @iphy: The phy object to match. + * + * This routine will find a matching port for the phy. This means that the + * port and phy both have the same broadcast sas address and same received sas + * address. The port address or the NULL if there is no matching + * port. port address if the port can be found to match the phy. + * NULL if there is no matching port for the phy. + */ +static struct isci_port *sci_port_configuration_agent_find_port( + struct isci_host *ihost, + struct isci_phy *iphy) +{ + u8 i; + struct sci_sas_address port_sas_address; + struct sci_sas_address port_attached_device_address; + struct sci_sas_address phy_sas_address; + struct sci_sas_address phy_attached_device_address; + + /* + * Since this phy can be a member of a wide port check to see if one or + * more phys match the sent and received SAS address as this phy in which + * case it should participate in the same port. + */ + sci_phy_get_sas_address(iphy, &phy_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address); + + for (i = 0; i < ihost->logical_port_entries; i++) { + struct isci_port *iport = &ihost->ports[i]; + + sci_port_get_sas_address(iport, &port_sas_address); + sci_port_get_attached_sas_address(iport, &port_attached_device_address); + + if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && + sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) + return iport; + } + + return NULL; +} + +/** + * sci_port_configuration_agent_validate_ports() + * @ihost: This is the controller object that contains the port agent + * @port_agent: This is the port configuration agent for the controller. + * + * This routine will validate the port configuration is correct for the SCU + * hardware. The SCU hardware allows for port configurations as follows. LP0 + * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2, + * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for + * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION + * the port configuration is not valid for this port configuration agent. + */ +static enum sci_status sci_port_configuration_agent_validate_ports( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + struct sci_sas_address first_address; + struct sci_sas_address second_address; + + /* + * Sanity check the max ranges for all the phys the max index + * is always equal to the port range index */ + if (port_agent->phy_valid_port_range[0].max_index != 0 || + port_agent->phy_valid_port_range[1].max_index != 1 || + port_agent->phy_valid_port_range[2].max_index != 2 || + port_agent->phy_valid_port_range[3].max_index != 3) + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + + /* + * This is a request to configure a single x4 port or at least attempt + * to make all the phys into a single port */ + if (port_agent->phy_valid_port_range[0].min_index == 0 && + port_agent->phy_valid_port_range[1].min_index == 0 && + port_agent->phy_valid_port_range[2].min_index == 0 && + port_agent->phy_valid_port_range[3].min_index == 0) + return SCI_SUCCESS; + + /* + * This is a degenerate case where phy 1 and phy 2 are assigned + * to the same port this is explicitly disallowed by the hardware + * unless they are part of the same x4 port and this condition was + * already checked above. */ + if (port_agent->phy_valid_port_range[2].min_index == 1) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + /* + * PE0 and PE3 can never have the same SAS Address unless they + * are part of the same x4 wide port and we have already checked + * for this condition. */ + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); + + if (sci_sas_address_compare(first_address, second_address) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + /* + * PE0 and PE1 are configured into a 2x1 ports make sure that the + * SAS Address for PE0 and PE2 are different since they can not be + * part of the same port. */ + if (port_agent->phy_valid_port_range[0].min_index == 0 && + port_agent->phy_valid_port_range[1].min_index == 1) { + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[2], &second_address); + + if (sci_sas_address_compare(first_address, second_address) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + } + + /* + * PE2 and PE3 are configured into a 2x1 ports make sure that the + * SAS Address for PE1 and PE3 are different since they can not be + * part of the same port. */ + if (port_agent->phy_valid_port_range[2].min_index == 2 && + port_agent->phy_valid_port_range[3].min_index == 3) { + sci_phy_get_sas_address(&ihost->phys[1], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); + + if (sci_sas_address_compare(first_address, second_address) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + } + + return SCI_SUCCESS; +} + +/* + * ****************************************************************************** + * Manual port configuration agent routines + * ****************************************************************************** */ + +/* verify all of the phys in the same port are using the same SAS address */ +static enum sci_status +sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + u32 phy_mask; + u32 assigned_phy_mask; + struct sci_sas_address sas_address; + struct sci_sas_address phy_assigned_address; + u8 port_index; + u8 phy_index; + + assigned_phy_mask = 0; + sas_address.high = 0; + sas_address.low = 0; + + for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { + phy_mask = ihost->oem_parameters.ports[port_index].phy_mask; + + if (!phy_mask) + continue; + /* + * Make sure that one or more of the phys were not already assinged to + * a different port. */ + if ((phy_mask & ~assigned_phy_mask) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + /* Find the starting phy index for this round through the loop */ + for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { + if ((phy_mask & (1 << phy_index)) == 0) + continue; + sci_phy_get_sas_address(&ihost->phys[phy_index], + &sas_address); + + /* + * The phy_index can be used as the starting point for the + * port range since the hardware starts all logical ports + * the same as the PE index. */ + port_agent->phy_valid_port_range[phy_index].min_index = port_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + + if (phy_index != port_index) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + break; + } + + /* + * See how many additional phys are being added to this logical port. + * Note: We have not moved the current phy_index so we will actually + * compare the startting phy with itself. + * This is expected and required to add the phy to the port. */ + for (; phy_index < SCI_MAX_PHYS; phy_index++) { + if ((phy_mask & (1 << phy_index)) == 0) + continue; + sci_phy_get_sas_address(&ihost->phys[phy_index], + &phy_assigned_address); + + if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { + /* + * The phy mask specified that this phy is part of the same port + * as the starting phy and it is not so fail this configuration */ + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + port_agent->phy_valid_port_range[phy_index].min_index = port_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + + sci_port_add_phy(&ihost->ports[port_index], + &ihost->phys[phy_index]); + + assigned_phy_mask |= (1 << phy_index); + } + + } + + return sci_port_configuration_agent_validate_ports(ihost, port_agent); +} + +static void mpc_agent_timeout(struct timer_list *t) +{ + u8 index; + struct sci_timer *tmr = from_timer(tmr, t, timer); + struct sci_port_configuration_agent *port_agent; + struct isci_host *ihost; + unsigned long flags; + u16 configure_phy_mask; + + port_agent = container_of(tmr, typeof(*port_agent), timer); + ihost = container_of(port_agent, typeof(*ihost), port_agent); + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + port_agent->timer_pending = false; + + /* Find the mask of phys that are reported read but as yet unconfigured into a port */ + configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + struct isci_phy *iphy = &ihost->phys[index]; + + if (configure_phy_mask & (1 << index)) { + port_agent->link_up_handler(ihost, port_agent, + phy_get_non_dummy_port(iphy), + iphy); + } + } + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +static void sci_mpc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + /* If the port is NULL then the phy was not assigned to a port. + * This is because the phy was not given the same SAS Address as + * the other PHYs in the port. + */ + if (!iport) + return; + + port_agent->phy_ready_mask |= (1 << iphy->phy_index); + sci_port_link_up(iport, iphy); + if ((iport->active_phy_mask & (1 << iphy->phy_index))) + port_agent->phy_configured_mask |= (1 << iphy->phy_index); +} + +/** + * sci_mpc_agent_link_down() + * @ihost: This is the controller object that receives the link down + * notification. + * @port_agent: This is the port configuration agent for the controller. + * @iport: This is the port object associated with the phy. If the is no + * associated port this is an NULL. The port is an invalid + * handle only if the phy was never port of this port. This happens when + * the phy is not broadcasting the same SAS address as the other phys in the + * assigned port. + * @iphy: This is the phy object which has gone link down. + * + * This function handles the manual port configuration link down notifications. + * Since all ports and phys are associated at initialization time we just turn + * around and notifiy the port object of the link down event. If this PHY is + * not associated with a port there is no action taken. Is it possible to get a + * link down notification from a phy that has no assocoated port? + */ +static void sci_mpc_agent_link_down( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + if (iport != NULL) { + /* + * If we can form a new port from the remainder of the phys + * then we want to start the timer to allow the SCI User to + * cleanup old devices and rediscover the port before + * rebuilding the port with the phys that remain in the ready + * state. + */ + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); + port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); + + /* + * Check to see if there are more phys waiting to be + * configured into a port. If there are allow the SCI User + * to tear down this port, if necessary, and then reconstruct + * the port after the timeout. + */ + if ((port_agent->phy_configured_mask == 0x0000) && + (port_agent->phy_ready_mask != 0x0000) && + !port_agent->timer_pending) { + port_agent->timer_pending = true; + + sci_mod_timer(&port_agent->timer, + SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); + } + + sci_port_link_down(iport, iphy); + } +} + +/* verify phys are assigned a valid SAS address for automatic port + * configuration mode. + */ +static enum sci_status +sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + u8 phy_index; + u8 port_index; + struct sci_sas_address sas_address; + struct sci_sas_address phy_assigned_address; + + phy_index = 0; + + while (phy_index < SCI_MAX_PHYS) { + port_index = phy_index; + + /* Get the assigned SAS Address for the first PHY on the controller. */ + sci_phy_get_sas_address(&ihost->phys[phy_index], + &sas_address); + + while (++phy_index < SCI_MAX_PHYS) { + sci_phy_get_sas_address(&ihost->phys[phy_index], + &phy_assigned_address); + + /* Verify each of the SAS address are all the same for every PHY */ + if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) { + port_agent->phy_valid_port_range[phy_index].min_index = port_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + } else { + port_agent->phy_valid_port_range[phy_index].min_index = phy_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + break; + } + } + } + + return sci_port_configuration_agent_validate_ports(ihost, port_agent); +} + +/* + * This routine will restart the automatic port configuration timeout + * timer for the next time period. This could be caused by either a link + * down event or a link up event where we can not yet tell to which a phy + * belongs. + */ +static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent, + u32 timeout) +{ + port_agent->timer_pending = true; + sci_mod_timer(&port_agent->timer, timeout); +} + +static void sci_apc_agent_configure_ports(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_phy *iphy, + bool start_timer) +{ + u8 port_index; + enum sci_status status; + struct isci_port *iport; + enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; + + iport = sci_port_configuration_agent_find_port(ihost, iphy); + + if (iport) { + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) + apc_activity = SCIC_SDS_APC_ADD_PHY; + else + apc_activity = SCIC_SDS_APC_SKIP_PHY; + } else { + /* + * There is no matching Port for this PHY so lets search through the + * Ports and see if we can add the PHY to its own port or maybe start + * the timer and wait to see if a wider port can be made. + * + * Note the break when we reach the condition of the port id == phy id */ + for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index; + port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index; + port_index++) { + + iport = &ihost->ports[port_index]; + + /* First we must make sure that this PHY can be added to this Port. */ + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + /* + * Port contains a PHY with a greater PHY ID than the current + * PHY that has gone link up. This phy can not be part of any + * port so skip it and move on. */ + if (iport->active_phy_mask > (1 << iphy->phy_index)) { + apc_activity = SCIC_SDS_APC_SKIP_PHY; + break; + } + + /* + * We have reached the end of our Port list and have not found + * any reason why we should not either add the PHY to the port + * or wait for more phys to become active. */ + if (iport->physical_port_index == iphy->phy_index) { + /* + * The Port either has no active PHYs. + * Consider that if the port had any active PHYs we would have + * or active PHYs with + * a lower PHY Id than this PHY. */ + if (apc_activity != SCIC_SDS_APC_START_TIMER) { + apc_activity = SCIC_SDS_APC_ADD_PHY; + } + + break; + } + + /* + * The current Port has no active PHYs and this PHY could be part + * of this Port. Since we dont know as yet setup to start the + * timer and see if there is a better configuration. */ + if (iport->active_phy_mask == 0) { + apc_activity = SCIC_SDS_APC_START_TIMER; + } + } else if (iport->active_phy_mask != 0) { + /* + * The Port has an active phy and the current Phy can not + * participate in this port so skip the PHY and see if + * there is a better configuration. */ + apc_activity = SCIC_SDS_APC_SKIP_PHY; + } + } + } + + /* + * Check to see if the start timer operations should instead map to an + * add phy operation. This is caused because we have been waiting to + * add a phy to a port but could not becuase the automatic port + * configuration engine had a choice of possible ports for the phy. + * Since we have gone through a timeout we are going to restrict the + * choice to the smallest possible port. */ + if ( + (start_timer == false) + && (apc_activity == SCIC_SDS_APC_START_TIMER) + ) { + apc_activity = SCIC_SDS_APC_ADD_PHY; + } + + switch (apc_activity) { + case SCIC_SDS_APC_ADD_PHY: + status = sci_port_add_phy(iport, iphy); + + if (status == SCI_SUCCESS) { + port_agent->phy_configured_mask |= (1 << iphy->phy_index); + } + break; + + case SCIC_SDS_APC_START_TIMER: + sci_apc_agent_start_timer(port_agent, + SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); + break; + + case SCIC_SDS_APC_SKIP_PHY: + default: + /* do nothing the PHY can not be made part of a port at this time. */ + break; + } +} + +/** + * sci_apc_agent_link_up - handle apc link up events + * @ihost: This is the controller object that receives the link up + * notification. + * @port_agent: This is the port configuration agent for the controller. + * @iport: This is the port object associated with the phy. If the is no + * associated port this is an NULL. + * @iphy: This is the phy object which has gone link up. + * + * This method handles the automatic port configuration for link up + * notifications. Is it possible to get a link down notification from a phy + * that has no assocoated port? + */ +static void sci_apc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + u8 phy_index = iphy->phy_index; + + if (!iport) { + /* the phy is not the part of this port */ + port_agent->phy_ready_mask |= 1 << phy_index; + sci_apc_agent_start_timer(port_agent, + SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); + } else { + /* the phy is already the part of the port */ + port_agent->phy_ready_mask |= 1 << phy_index; + sci_port_link_up(iport, iphy); + } +} + +/** + * sci_apc_agent_link_down() + * @ihost: This is the controller object that receives the link down + * notification. + * @port_agent: This is the port configuration agent for the controller. + * @iport: This is the port object associated with the phy. If the is no + * associated port this is an NULL. + * @iphy: This is the phy object which has gone link down. + * + * This method handles the automatic port configuration link down + * notifications. not associated with a port there is no action taken. Is it + * possible to get a link down notification from a phy that has no assocoated + * port? + */ +static void sci_apc_agent_link_down( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); + + if (!iport) + return; + if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { + enum sci_status status; + + status = sci_port_remove_phy(iport, iphy); + + if (status == SCI_SUCCESS) + port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); + } +} + +/* configure the phys into ports when the timer fires */ +static void apc_agent_timeout(struct timer_list *t) +{ + u32 index; + struct sci_timer *tmr = from_timer(tmr, t, timer); + struct sci_port_configuration_agent *port_agent; + struct isci_host *ihost; + unsigned long flags; + u16 configure_phy_mask; + + port_agent = container_of(tmr, typeof(*port_agent), timer); + ihost = container_of(port_agent, typeof(*ihost), port_agent); + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + port_agent->timer_pending = false; + + configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; + + if (!configure_phy_mask) + goto done; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if ((configure_phy_mask & (1 << index)) == 0) + continue; + + sci_apc_agent_configure_ports(ihost, port_agent, + &ihost->phys[index], false); + } + + if (is_controller_start_complete(ihost)) + sci_controller_transition_to_ready(ihost, SCI_SUCCESS); + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +/* + * ****************************************************************************** + * Public port configuration agent routines + * ****************************************************************************** */ + +/* + * This method will construct the port configuration agent for operation. This + * call is universal for both manual port configuration and automatic port + * configuration modes. + */ +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent) +{ + u32 index; + + port_agent->phy_configured_mask = 0x00; + port_agent->phy_ready_mask = 0x00; + + port_agent->link_up_handler = NULL; + port_agent->link_down_handler = NULL; + + port_agent->timer_pending = false; + + for (index = 0; index < SCI_MAX_PORTS; index++) { + port_agent->phy_valid_port_range[index].min_index = 0; + port_agent->phy_valid_port_range[index].max_index = 0; + } +} + +bool is_port_config_apc(struct isci_host *ihost) +{ + return ihost->port_agent.link_up_handler == sci_apc_agent_link_up; +} + +enum sci_status sci_port_configuration_agent_initialize( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + enum sci_status status; + enum sci_port_configuration_mode mode; + + mode = ihost->oem_parameters.controller.mode_type; + + if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { + status = sci_mpc_agent_validate_phy_configuration( + ihost, port_agent); + + port_agent->link_up_handler = sci_mpc_agent_link_up; + port_agent->link_down_handler = sci_mpc_agent_link_down; + + sci_init_timer(&port_agent->timer, mpc_agent_timeout); + } else { + status = sci_apc_agent_validate_phy_configuration( + ihost, port_agent); + + port_agent->link_up_handler = sci_apc_agent_link_up; + port_agent->link_down_handler = sci_apc_agent_link_down; + + sci_init_timer(&port_agent->timer, apc_agent_timeout); + } + + return status; +} diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c new file mode 100644 index 000000000..a2bbe46f8 --- /dev/null +++ b/drivers/scsi/isci/probe_roms.c @@ -0,0 +1,231 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + */ + +/* probe_roms - scan for oem parameters */ + +#include +#include +#include +#include +#include + +#include "isci.h" +#include "task.h" +#include "probe_roms.h" + +static efi_char16_t isci_efivar_name[] = { + 'R', 's', 't', 'S', 'c', 'u', 'O' +}; + +struct isci_orom *isci_request_oprom(struct pci_dev *pdev) +{ + void __iomem *oprom = pci_map_biosrom(pdev); + struct isci_orom *rom = NULL; + size_t len, i; + int j; + char oem_sig[4]; + struct isci_oem_hdr oem_hdr; + u8 *tmp, sum; + + if (!oprom) + return NULL; + + len = pci_biosrom_size(pdev); + rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL); + if (!rom) { + pci_unmap_biosrom(oprom); + dev_warn(&pdev->dev, + "Unable to allocate memory for orom\n"); + return NULL; + } + + for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) { + memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE); + + /* we think we found the OEM table */ + if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) { + size_t copy_len; + + memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr)); + + copy_len = min(oem_hdr.len - sizeof(oem_hdr), + sizeof(*rom)); + + memcpy_fromio(rom, + oprom + i + sizeof(oem_hdr), + copy_len); + + /* calculate checksum */ + tmp = (u8 *)&oem_hdr; + for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++) + sum += *tmp; + + tmp = (u8 *)rom; + for (j = 0; j < sizeof(*rom); j++, tmp++) + sum += *tmp; + + if (sum != 0) { + dev_warn(&pdev->dev, + "OEM table checksum failed\n"); + continue; + } + + /* keep going if that's not the oem param table */ + if (memcmp(rom->hdr.signature, + ISCI_ROM_SIG, + ISCI_ROM_SIG_SIZE) != 0) + continue; + + dev_info(&pdev->dev, + "OEM parameter table found in OROM\n"); + break; + } + } + + if (i >= len) { + dev_err(&pdev->dev, "oprom parse error\n"); + rom = NULL; + } + pci_unmap_biosrom(oprom); + + return rom; +} + +struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) +{ + struct isci_orom *orom = NULL, *data; + int i, j; + + if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0) + return NULL; + + if (fw->size < sizeof(*orom)) + goto out; + + data = (struct isci_orom *)fw->data; + + if (strncmp(ISCI_ROM_SIG, data->hdr.signature, + strlen(ISCI_ROM_SIG)) != 0) + goto out; + + orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL); + if (!orom) + goto out; + + memcpy(orom, fw->data, fw->size); + + if (is_c0(pdev) || is_c1(pdev)) + goto out; + + /* + * deprecated: override default amp_control for pre-preproduction + * silicon revisions + */ + for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++) + for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) { + orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03; + orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03; + orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03; + orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03; + } + out: + release_firmware(fw); + + return orom; +} + +static struct efi *get_efi(void) +{ +#ifdef CONFIG_EFI + return &efi; +#else + return NULL; +#endif +} + +struct isci_orom *isci_get_efi_var(struct pci_dev *pdev) +{ + efi_status_t status; + struct isci_orom *rom; + struct isci_oem_hdr *oem_hdr; + u8 *tmp, sum; + int j; + unsigned long data_len; + u8 *efi_data; + u32 efi_attrib = 0; + + data_len = 1024; + efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL); + if (!efi_data) { + dev_warn(&pdev->dev, + "Unable to allocate memory for EFI data\n"); + return NULL; + } + + rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr)); + + if (get_efi()) + status = get_efi()->get_variable(isci_efivar_name, + &ISCI_EFI_VENDOR_GUID, + &efi_attrib, + &data_len, + efi_data); + else + status = EFI_NOT_FOUND; + + if (status != EFI_SUCCESS) { + dev_warn(&pdev->dev, + "Unable to obtain EFI var data for OEM parms\n"); + return NULL; + } + + oem_hdr = (struct isci_oem_hdr *)efi_data; + + if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) { + dev_warn(&pdev->dev, + "Invalid OEM header signature\n"); + return NULL; + } + + /* calculate checksum */ + tmp = (u8 *)efi_data; + for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++) + sum += *tmp; + + if (sum != 0) { + dev_warn(&pdev->dev, + "OEM table checksum failed\n"); + return NULL; + } + + if (memcmp(rom->hdr.signature, + ISCI_ROM_SIG, + ISCI_ROM_SIG_SIZE) != 0) { + dev_warn(&pdev->dev, + "Invalid OEM table signature\n"); + return NULL; + } + + return rom; +} diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h new file mode 100644 index 000000000..e08b57824 --- /dev/null +++ b/drivers/scsi/isci/probe_roms.h @@ -0,0 +1,330 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _ISCI_PROBE_ROMS_H_ +#define _ISCI_PROBE_ROMS_H_ + +#ifdef __KERNEL__ +#include +#include +#include +#include "isci.h" + +#define SCIC_SDS_PARM_NO_SPEED 0 + +/* generation 1 (i.e. 1.5 Gb/s) */ +#define SCIC_SDS_PARM_GEN1_SPEED 1 + +/* generation 2 (i.e. 3.0 Gb/s) */ +#define SCIC_SDS_PARM_GEN2_SPEED 2 + +/* generation 3 (i.e. 6.0 Gb/s) */ +#define SCIC_SDS_PARM_GEN3_SPEED 3 +#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED + +/* parameters that can be set by module parameters */ +struct sci_user_parameters { + struct sci_phy_user_params { + /** + * This field specifies the NOTIFY (ENABLE SPIN UP) primitive + * insertion frequency for this phy index. + */ + u32 notify_enable_spin_up_insertion_frequency; + + /** + * This method specifies the number of transmitted DWORDs within which + * to transmit a single ALIGN primitive. This value applies regardless + * of what type of device is attached or connection state. A value of + * 0 indicates that no ALIGN primitives will be inserted. + */ + u16 align_insertion_frequency; + + /** + * This method specifies the number of transmitted DWORDs within which + * to transmit 2 ALIGN primitives. This applies for SAS connections + * only. A minimum value of 3 is required for this field. + */ + u16 in_connection_align_insertion_frequency; + + /** + * This field indicates the maximum speed generation to be utilized + * by phys in the supplied port. + * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s). + * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s). + * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s). + */ + u8 max_speed_generation; + + } phys[SCI_MAX_PHYS]; + + /** + * This field specifies the maximum number of direct attached devices + * that can have power supplied to them simultaneously. + */ + u8 max_concurr_spinup; + + /** + * This field specifies the number of seconds to allow a phy to consume + * power before yielding to another phy. + * + */ + u8 phy_spin_up_delay_interval; + + /** + * These timer values specifies how long a link will remain open with no + * activity in increments of a microsecond, it can be in increments of + * 100 microseconds if the upper most bit is set. + * + */ + u16 stp_inactivity_timeout; + u16 ssp_inactivity_timeout; + + /** + * These timer values specifies how long a link will remain open in increments + * of 100 microseconds. + * + */ + u16 stp_max_occupancy_timeout; + u16 ssp_max_occupancy_timeout; + + /** + * This timer value specifies how long a link will remain open with no + * outbound traffic in increments of a microsecond. + * + */ + u8 no_outbound_task_timeout; + +}; + +#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0 +#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF +#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 + +struct sci_oem_params; +int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version); + +struct isci_orom; +struct isci_orom *isci_request_oprom(struct pci_dev *pdev); +struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); +struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); + +struct isci_oem_hdr { + u8 sig[4]; + u8 rev_major; + u8 rev_minor; + u16 len; + u8 checksum; + u8 reserved1; + u16 reserved2; +} __attribute__ ((packed)); + +#else +#define SCI_MAX_PORTS 4 +#define SCI_MAX_PHYS 4 +#define SCI_MAX_CONTROLLERS 2 +#endif + +#define ISCI_FW_NAME "isci/isci_firmware.bin" + +#define ROMSIGNATURE 0xaa55 + +#define ISCI_OEM_SIG "$OEM" +#define ISCI_OEM_SIG_SIZE 4 +#define ISCI_ROM_SIG "ISCUOEMB" +#define ISCI_ROM_SIG_SIZE 8 + +#define ISCI_EFI_VENDOR_GUID \ + EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \ + 0x1a, 0x04, 0xc6) +#define ISCI_EFI_VAR_NAME "RstScuO" + +#define ISCI_ROM_VER_1_0 0x10 +#define ISCI_ROM_VER_1_1 0x11 +#define ISCI_ROM_VER_1_3 0x13 +#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3 + +/* Allowed PORT configuration modes APC Automatic PORT configuration mode is + * defined by the OEM configuration parameters providing no PHY_MASK parameters + * for any PORT. i.e. There are no phys assigned to any of the ports at start. + * MPC Manual PORT configuration mode is defined by the OEM configuration + * parameters providing a PHY_MASK value for any PORT. It is assumed that any + * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned. + * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs + * being assigned is sufficient to declare manual PORT configuration. + */ +enum sci_port_configuration_mode { + SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0, + SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1 +}; + +struct sci_bios_oem_param_block_hdr { + uint8_t signature[ISCI_ROM_SIG_SIZE]; + uint16_t total_block_length; + uint8_t hdr_length; + uint8_t version; + uint8_t preboot_source; + uint8_t num_elements; + uint16_t element_length; + uint8_t reserved[8]; +} __attribute__ ((packed)); + +struct sci_oem_params { + struct { + uint8_t mode_type; + uint8_t max_concurr_spin_up; + /* + * This bitfield indicates the OEM's desired default Tx + * Spread Spectrum Clocking (SSC) settings for SATA and SAS. + * NOTE: Default SSC Modulation Frequency is 31.5KHz. + */ + union { + struct { + /* + * NOTE: Max spread for SATA is +0 / -5000 PPM. + * Down-spreading SSC (only method allowed for SATA): + * SATA SSC Tx Disabled = 0x0 + * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2 + * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3 + * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6 + * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7 + */ + uint8_t ssc_sata_tx_spread_level:4; + /* + * SAS SSC Tx Disabled = 0x0 + * + * NOTE: Max spread for SAS down-spreading +0 / + * -2300 PPM + * Down-spreading SSC: + * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2 + * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3 + * + * NOTE: Max spread for SAS center-spreading +2300 / + * -2300 PPM + * Center-spreading SSC: + * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3 + * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6 + */ + uint8_t ssc_sas_tx_spread_level:3; + /* + * NOTE: Refer to the SSC section of the SAS 2.x + * Specification for proper setting of this field. + * For standard SAS Initiator SAS PHY operation it + * should be 0 for Down-spreading. + * SAS SSC Tx spread type: + * Down-spreading SSC = 0 + * Center-spreading SSC = 1 + */ + uint8_t ssc_sas_tx_type:1; + }; + uint8_t do_enable_ssc; + }; + /* + * This field indicates length of the SAS/SATA cable between + * host and device. + * This field is used make relationship between analog + * parameters of the phy in the silicon and length of the cable. + * Supported cable attenuation levels: + * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than + * 6m. + * + * This is bit mask field: + * + * BIT: (MSB) 7 6 5 4 + * ASSIGNMENT: - Medium cable + * length assignment + * BIT: 3 2 1 0 (LSB) + * ASSIGNMENT: - Long cable length + * assignment + * + * BITS 7-4 are set when the cable length is assigned to medium + * BITS 3-0 are set when the cable length is assigned to long + * + * The BIT positions are clear when the cable length is + * assigned to short. + * + * Setting the bits for both long and medium cable length is + * undefined. + * + * A value of 0x84 would assign + * phy3 - medium + * phy2 - long + * phy1 - short + * phy0 - short + */ + uint8_t cable_selection_mask; + } controller; + + struct { + uint8_t phy_mask; + } ports[SCI_MAX_PORTS]; + + struct sci_phy_oem_params { + struct { + uint32_t high; + uint32_t low; + } sas_address; + + uint32_t afe_tx_amp_control0; + uint32_t afe_tx_amp_control1; + uint32_t afe_tx_amp_control2; + uint32_t afe_tx_amp_control3; + } phys[SCI_MAX_PHYS]; +} __attribute__ ((packed)); + +struct isci_orom { + struct sci_bios_oem_param_block_hdr hdr; + struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS]; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h new file mode 100644 index 000000000..63468cfe3 --- /dev/null +++ b/drivers/scsi/isci/registers.h @@ -0,0 +1,1863 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCU_REGISTERS_H_ +#define _SCU_REGISTERS_H_ + +/** + * This file contains the constants and structures for the SCU memory mapped + * registers. + * + * + */ + +#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000) +#define SCU_VIIT_ENTRY_ID_SHIFT (30) + +#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000) +#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20) + +#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800) +#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12) + +#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00) +#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8) + +#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF) +#define SCU_VIIT_ENTRY_STATUS_SHIFT (0) + +#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT) +#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT) +#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT) +#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT) + +#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) +#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) +#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) +#define SCU_VIIT_IPPT_INITIATOR \ + (\ + SCU_VIIT_IPPT_SSP_INITIATOR \ + | SCU_VIIT_IPPT_SMP_INITIATOR \ + | SCU_VIIT_IPPT_STP_INITIATOR \ + ) + +#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT) +#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT) +#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT) +#define SCU_VIIT_STATUS_ALL_VALID \ + (\ + SCU_VIIT_STATUS_RNC_VALID \ + | SCU_VIIT_STATUS_ADDRESS_VALID \ + | SCU_VIIT_STATUS_RNI_VALID \ + ) + +#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) + +/** + * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry + * + * + */ +struct scu_viit_entry { + /** + * This must be encoded as to the type of initiator that is being constructed + * for this port. + */ + u32 status; + + /** + * Virtual initiator high SAS Address + */ + u32 initiator_sas_address_hi; + + /** + * Virtual initiator low SAS Address + */ + u32 initiator_sas_address_lo; + + /** + * This must be 0 + */ + u32 reserved; + +}; + + +/* IIT Status Defines */ +#define SCU_IIT_ENTRY_ID_MASK (0xC0000000) +#define SCU_IIT_ENTRY_ID_SHIFT (30) + +#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000) +#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29) + +#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00) +#define SCU_IIT_ENTRY_LPI_SHIFT (8) + +#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF) +#define SCU_IIT_ENTRY_STATUS_SHIFT (0) + +/* IIT Remote Initiator Defines */ +#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF) +#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0) + +#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000) +#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16) + +#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT) +#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT) +#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT) +#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT) + +/** + * struct scu_iit_entry - This will be implemented later when we support + * virtual functions + * + * + */ +struct scu_iit_entry { + u32 status; + u32 remote_initiator_sas_address_hi; + u32 remote_initiator_sas_address_lo; + u32 remote_initiator; + +}; + +/* Generate a value for an SCU register */ +#define SCU_GEN_VALUE(name, value) \ + (((value) << name ## _SHIFT) & (name ## _MASK)) + +/* + * Generate a bit value for an SCU register + * Make sure that the register MASK is just a single bit */ +#define SCU_GEN_BIT(name) \ + SCU_GEN_VALUE(name, ((u32)1)) + +#define SCU_SET_BIT(name, reg_value) \ + ((reg_value) | SCU_GEN_BIT(name)) + +#define SCU_CLEAR_BIT(name, reg_value) \ + ((reg_value)$ ~(SCU_GEN_BIT(name))) + +/* + * ***************************************************************************** + * Unions for bitfield definitions of SCU Registers + * SMU Post Context Port + * ***************************************************************************** */ +#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0) +#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF) +#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12) +#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000) +#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16) +#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000) +#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18) +#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000) +#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000) + +#define SMU_PCP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value) + +/* ***************************************************************************** */ +#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31) +#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000) +#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1) +#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002) +#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0) +#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001) +#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC) + +#define SMU_ISR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name) + +#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR) +#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND) +#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION) + +/* ***************************************************************************** */ +#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31) +#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000) +#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1) +#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002) +#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0) +#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001) +#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC) + +#define SMU_IMR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name) + +#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR) +#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND) +#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION) + +/* ***************************************************************************** */ +#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0) +#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F) +#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8) +#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00) +#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0) + +#define SMU_ICC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value) + +/* ***************************************************************************** */ +#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0) +#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF) +#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16) +#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000) +#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31) +#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000) +#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000) + +#define SMU_TCR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value) + +#define SMU_TCR_GEN_BIT(name, value) \ + SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name) + +/* ***************************************************************************** */ + +#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0) +#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF) +#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15) +#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000) +#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000) + +#define SMU_CQPR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value) + +#define SMU_CQPR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name) + +/* ***************************************************************************** */ + +#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0) +#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF) +#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15) +#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000) +#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16) +#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000) +#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26) +#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000) +#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30) +#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000) +#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31) +#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000) +#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000) + +#define SMU_CQGR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value) + +#define SMU_CQGR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name) + +#define SMU_CQGR_CYCLE_BIT \ + SMU_CQGR_GEN_BIT(CYCLE_BIT) + +#define SMU_CQGR_EVENT_CYCLE_BIT \ + SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT) + +#define SMU_CQGR_GET_POINTER_SET(value) \ + SMU_CQGR_GEN_VAL(POINTER, value) + + +/* ***************************************************************************** */ +#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0) +#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF) +#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16) +#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000) +#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000) + +#define SMU_CQC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value) + +#define SMU_CQC_QUEUE_LIMIT_SET(value) \ + SMU_CQC_GEN_VAL(QUEUE_LIMIT, value) + +#define SMU_CQC_EVENT_LIMIT_SET(value) \ + SMU_CQC_GEN_VAL(EVENT_LIMIT, value) + + +/* ***************************************************************************** */ +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000) +#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000) + +#define SMU_DCC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value) + +#define SMU_DCC_GET_MAX_PEG(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \ + ) + +#define SMU_DCC_GET_MAX_LP(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \ + ) + +#define SMU_DCC_GET_MAX_TC(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \ + ) + +#define SMU_DCC_GET_MAX_RNC(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \ + ) + +/* ***************************************************************************** */ +#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT (0) +#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK (0x00000001) +#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT (1) +#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK (0x00000002) +#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT (2) +#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK (0x00000004) +#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT (3) +#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK (0x00000008) +#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT (16) +#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK (0x000F0000) +#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT (31) +#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK (0x80000000) +#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK (0x7FF0FFF0) + +#define SMU_CGUCR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value) + +#define SMU_CGUCR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name) + +/* -------------------------------------------------------------------------- */ + +#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0) +#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001) +#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1) +#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002) +#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16) +#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000) +#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17) +#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000) +#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC) + +#define SMU_SMUCSR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name) + +#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \ + (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED)) + +#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \ + (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED)) + +#define SCU_RAM_INIT_COMPLETED \ + (\ + SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \ + | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \ + ) + +/* -------------------------------------------------------------------------- */ + +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800) + +#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \ + ((1 << (pe)) << ((peg) * 8)) + +#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \ + (\ + SMU_RESET_PROTOCOL_ENGINE(peg, 0) \ + | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \ + | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \ + | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \ + ) + +#define SMU_RESET_ALL_PROTOCOL_ENGINES() \ + (\ + SMU_RESET_PEG_PROTOCOL_ENGINES(0) \ + | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \ + ) + +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000) + +#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \ + ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16) + +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000) +#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22) +#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000) + +/* + * It seems to make sense that if you are going to reset the protocol + * engine group that you would also reset all of the protocol engines */ +#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \ + (\ + (1 << ((peg) + 20)) \ + | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \ + | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \ + | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \ + ) + +#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \ + (\ + SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \ + | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \ + ) + +#define SMU_RESET_SCU() (0xFFFFFFFF) + + + +/* ***************************************************************************** */ +#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0) +#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF) +#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16) +#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000) +#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31) +#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000) +#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000) + +#define SMU_TCA_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value) + +#define SMU_TCA_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name) + +/* ***************************************************************************** */ +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000) + +#define SCU_UFQC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value) + +#define SCU_UFQC_QUEUE_SIZE_SET(value) \ + SCU_UFQC_GEN_VAL(QUEUE_SIZE, value) + +/* ***************************************************************************** */ +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000) + +#define SCU_UFQPP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value) + +#define SCU_UFQPP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name) + +/* + * ***************************************************************************** + * * SDMA Registers + * ***************************************************************************** */ +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000) + +#define SCU_UFQGP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value) + +#define SCU_UFQGP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name) + +#define SCU_UFQGP_CYCLE_BIT(value) \ + SCU_UFQGP_GEN_BIT(CYCLE_BIT, value) + +#define SCU_UFQGP_GET_POINTER(value) \ + SCU_UFQGP_GEN_VALUE(POINTER, value) + +#define SCU_UFQGP_ENABLE(value) \ + (SCU_UFQGP_GEN_BIT(ENABLE) | value) + +#define SCU_UFQGP_DISABLE(value) \ + (~SCU_UFQGP_GEN_BIT(ENABLE) & value) + +#define SCU_UFQGP_VALUE(bit, value) \ + (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value)) + +/* ***************************************************************************** */ +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0) +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF) +#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16) +#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000) +#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17) +#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000) +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22) +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000) +#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000) + +#define SCU_PDMACR_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value) + +#define SCU_PDMACR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name) + +#define SCU_PDMACR_BE_GEN_BIT(name) \ + SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name) + +/* ***************************************************************************** */ +#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8) +#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100) + +#define SCU_CDMACR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name) + +/* + * ***************************************************************************** + * * SCU Link Layer Registers + * ***************************************************************************** */ +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000) +#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000) +#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F) +#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000) + +#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value) + + +#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2) +#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004) +#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4) +#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010) +#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5) +#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020) +#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD) + +#define SCU_SAS_LLSTA_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_STATUS_ ## name) + + +/* TODO: Where is the SATA_PSELTOV register? */ + +/* + * ***************************************************************************** + * * SCU SAS Maximum Arbitration Wait Time Timeout Register + * ***************************************************************************** */ +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0) +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF) +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15) +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000) + +#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value) + +#define SCU_SAS_MAWTTOV_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name) + + +/* + * TODO: Where is the SAS_LNKTOV register? + * TODO: Where is the SAS_PHYTOV register? */ + +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1) + +#define SCU_SAS_TIID_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value) + +#define SCU_SAS_TIID_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name) + +/* SAS Identify Frame PHY Identifier Register */ +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF) + +#define SCU_SAS_TIPID_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value) + +#define SCU_SAS_TIPID_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name) + + +#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4) +#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010) +#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6) +#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200) +#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11) +#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800) +#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12) +#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000) +#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13) +#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000) +#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14) +#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000) +#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23) +#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000) +#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27) +#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000) +#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29) +#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000) +#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30) +#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000) +#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F) +#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F) +#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000) + +#define SCU_SAS_PCFG_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name) + +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0) +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF) +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16) +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000) + +#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value) + +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000) + +#define SCU_ENSPINUP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value) + +#define SCU_ENSPINUP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name) + + +#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D) + +#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value) + +#define SCU_SAS_PHYCAP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name) + + +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00) + +#define SCU_PSZGCR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value) + +#define SCU_PSZGCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name) + +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9) + +#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val) + +#define SCU_PEG_SCUVZECR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name) + + +/* + * ***************************************************************************** + * * Port Task Scheduler registers shift and mask values + * ***************************************************************************** */ +#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0) +#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF) +#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16) +#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000) +#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24) +#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000) +#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25) +#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000) +#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002) +#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000) +#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000) + +#define SCU_PTSGCR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val) + +#define SCU_PTSGCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name) + + +/* ***************************************************************************** */ +#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0) +#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF) +#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000) + +#define SCU_RTCR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PTSG_ ## name, val) + + +#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0) +#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF) +#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000) + +#define SCU_RTCCR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val) + + +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC) + +#define SCU_PTSxCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name) + + +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8) + +#define SCU_PTSxSR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name) + +/* + * ***************************************************************************** + * * SMU Registers + * ***************************************************************************** */ + +/* + * ---------------------------------------------------------------------------- + * SMU Registers + * These registers are based off of BAR0 + * + * To calculate the offset for other functions use + * BAR0 + FN# * SystemPageSize * 2 + * + * The TCA is only accessable from FN#0 (Physical Function) and each + * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or + * TCA0 for FN#0 is at BAR0 + 0x0400 + * TCA1 for FN#1 is at BAR0 + 0x0404 + * etc. + * ---------------------------------------------------------------------------- + * Accessable to all FN#s */ +#define SCU_SMU_PCP_OFFSET 0x0000 +#define SCU_SMU_AMR_OFFSET 0x0004 +#define SCU_SMU_ISR_OFFSET 0x0010 +#define SCU_SMU_IMR_OFFSET 0x0014 +#define SCU_SMU_ICC_OFFSET 0x0018 +#define SCU_SMU_HTTLBAR_OFFSET 0x0020 +#define SCU_SMU_HTTUBAR_OFFSET 0x0024 +#define SCU_SMU_TCR_OFFSET 0x0028 +#define SCU_SMU_CQLBAR_OFFSET 0x0030 +#define SCU_SMU_CQUBAR_OFFSET 0x0034 +#define SCU_SMU_CQPR_OFFSET 0x0040 +#define SCU_SMU_CQGR_OFFSET 0x0044 +#define SCU_SMU_CQC_OFFSET 0x0048 +/* Accessable to FN#0 only */ +#define SCU_SMU_RNCLBAR_OFFSET 0x0080 +#define SCU_SMU_RNCUBAR_OFFSET 0x0084 +#define SCU_SMU_DCC_OFFSET 0x0090 +#define SCU_SMU_DFC_OFFSET 0x0094 +#define SCU_SMU_SMUCSR_OFFSET 0x0098 +#define SCU_SMU_SCUSRCR_OFFSET 0x009C +#define SCU_SMU_SMAW_OFFSET 0x00A0 +#define SCU_SMU_SMDW_OFFSET 0x00A4 +/* Accessable to FN#0 only */ +#define SCU_SMU_TCA_OFFSET 0x0400 +/* Accessable to all FN#s */ +#define SCU_SMU_MT_MLAR0_OFFSET 0x2000 +#define SCU_SMU_MT_MUAR0_OFFSET 0x2004 +#define SCU_SMU_MT_MDR0_OFFSET 0x2008 +#define SCU_SMU_MT_VCR0_OFFSET 0x200C +#define SCU_SMU_MT_MLAR1_OFFSET 0x2010 +#define SCU_SMU_MT_MUAR1_OFFSET 0x2014 +#define SCU_SMU_MT_MDR1_OFFSET 0x2018 +#define SCU_SMU_MT_VCR1_OFFSET 0x201C +#define SCU_SMU_MPBA_OFFSET 0x3000 + +/** + * struct smu_registers - These are the SMU registers + * + * + */ +struct smu_registers { +/* 0x0000 PCP */ + u32 post_context_port; +/* 0x0004 AMR */ + u32 address_modifier; + u32 reserved_08; + u32 reserved_0C; +/* 0x0010 ISR */ + u32 interrupt_status; +/* 0x0014 IMR */ + u32 interrupt_mask; +/* 0x0018 ICC */ + u32 interrupt_coalesce_control; + u32 reserved_1C; +/* 0x0020 HTTLBAR */ + u32 host_task_table_lower; +/* 0x0024 HTTUBAR */ + u32 host_task_table_upper; +/* 0x0028 TCR */ + u32 task_context_range; + u32 reserved_2C; +/* 0x0030 CQLBAR */ + u32 completion_queue_lower; +/* 0x0034 CQUBAR */ + u32 completion_queue_upper; + u32 reserved_38; + u32 reserved_3C; +/* 0x0040 CQPR */ + u32 completion_queue_put; +/* 0x0044 CQGR */ + u32 completion_queue_get; +/* 0x0048 CQC */ + u32 completion_queue_control; + u32 reserved_4C; + u32 reserved_5x[4]; + u32 reserved_6x[4]; + u32 reserved_7x[4]; +/* + * Accessable to FN#0 only + * 0x0080 RNCLBAR */ + u32 remote_node_context_lower; +/* 0x0084 RNCUBAR */ + u32 remote_node_context_upper; + u32 reserved_88; + u32 reserved_8C; +/* 0x0090 DCC */ + u32 device_context_capacity; +/* 0x0094 DFC */ + u32 device_function_capacity; +/* 0x0098 SMUCSR */ + u32 control_status; +/* 0x009C SCUSRCR */ + u32 soft_reset_control; +/* 0x00A0 SMAW */ + u32 mmr_address_window; +/* 0x00A4 SMDW */ + u32 mmr_data_window; +/* 0x00A8 CGUCR */ + u32 clock_gating_control; +/* 0x00AC CGUPC */ + u32 clock_gating_performance; +/* A whole bunch of reserved space */ + u32 reserved_Bx[4]; + u32 reserved_Cx[4]; + u32 reserved_Dx[4]; + u32 reserved_Ex[4]; + u32 reserved_Fx[4]; + u32 reserved_1xx[64]; + u32 reserved_2xx[64]; + u32 reserved_3xx[64]; +/* + * Accessable to FN#0 only + * 0x0400 TCA */ + u32 task_context_assignment[256]; +/* MSI-X registers not included */ +}; + +/* + * ***************************************************************************** + * SDMA Registers + * ***************************************************************************** */ +#define SCU_SDMA_BASE 0x6000 +#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000 +#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004 +#define SCU_SDMA_UFLHBAR_OFFSET 0x0008 +#define SCU_SDMA_UFUHBAR_OFFSET 0x000C +#define SCU_SDMA_UFQC_OFFSET 0x0010 +#define SCU_SDMA_UFQPP_OFFSET 0x0014 +#define SCU_SDMA_UFQGP_OFFSET 0x0018 +#define SCU_SDMA_PDMACR_OFFSET 0x001C +#define SCU_SDMA_CDMACR_OFFSET 0x0080 + +/** + * struct scu_sdma_registers - These are the SCU SDMA Registers + * + * + */ +struct scu_sdma_registers { +/* 0x0000 PUFATLHAR */ + u32 uf_address_table_lower; +/* 0x0004 PUFATUHAR */ + u32 uf_address_table_upper; +/* 0x0008 UFLHBAR */ + u32 uf_header_base_address_lower; +/* 0x000C UFUHBAR */ + u32 uf_header_base_address_upper; +/* 0x0010 UFQC */ + u32 unsolicited_frame_queue_control; +/* 0x0014 UFQPP */ + u32 unsolicited_frame_put_pointer; +/* 0x0018 UFQGP */ + u32 unsolicited_frame_get_pointer; +/* 0x001C PDMACR */ + u32 pdma_configuration; +/* Reserved until offset 0x80 */ + u32 reserved_0020_007C[0x18]; +/* 0x0080 CDMACR */ + u32 cdma_configuration; +/* Remainder SDMA register space */ + u32 reserved_0084_0400[0xDF]; + +}; + +/* + * ***************************************************************************** + * * SCU Link Registers + * ***************************************************************************** */ +#define SCU_PEG0_OFFSET 0x0000 +#define SCU_PEG1_OFFSET 0x8000 + +#define SCU_TL0_OFFSET 0x0000 +#define SCU_TL1_OFFSET 0x0400 +#define SCU_TL2_OFFSET 0x0800 +#define SCU_TL3_OFFSET 0x0C00 + +#define SCU_LL_OFFSET 0x0080 +#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET) +#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET) +#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET) +#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET) + +/* Transport Layer Offsets (PEG + TL) */ +#define SCU_TLCR_OFFSET 0x0000 +#define SCU_TLADTR_OFFSET 0x0004 +#define SCU_TLTTMR_OFFSET 0x0008 +#define SCU_TLEECR0_OFFSET 0x000C +#define SCU_STPTLDARNI_OFFSET 0x0010 + + +#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0) +#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001) +#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1) +#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002) +#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3) +#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008) +#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4) +#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010) +#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB) + +#define SCU_TLCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_TLCR_ ## name) + +/** + * struct scu_transport_layer_registers - These are the SCU Transport Layer + * registers + * + * + */ +struct scu_transport_layer_registers { + /* 0x0000 TLCR */ + u32 control; + /* 0x0004 TLADTR */ + u32 arbitration_delay_timer; + /* 0x0008 TLTTMR */ + u32 timer_test_mode; + /* 0x000C reserved */ + u32 reserved_0C; + /* 0x0010 STPTLDARNI */ + u32 stp_rni; + /* 0x0014 TLFEWPORCTRL */ + u32 tlfe_wpo_read_control; + /* 0x0018 TLFEWPORDATA */ + u32 tlfe_wpo_read_data; + /* 0x001C RXTLSSCSR1 */ + u32 rxtl_single_step_control_status_1; + /* 0x0020 RXTLSSCSR2 */ + u32 rxtl_single_step_control_status_2; + /* 0x0024 AWTRDDCR */ + u32 tlfe_awt_retry_delay_debug_control; + /* Remainder of TL memory space */ + u32 reserved_0028_007F[0x16]; + +}; + +/* Protocol Engine Group Registers */ +#define SCU_SCUVZECRx_OFFSET 0x1080 + +/* Link Layer Offsets (PEG + TL + LL) */ +#define SCU_SAS_SPDTOV_OFFSET 0x0000 +#define SCU_SAS_LLSTA_OFFSET 0x0004 +#define SCU_SATA_PSELTOV_OFFSET 0x0008 +#define SCU_SAS_TIMETOV_OFFSET 0x0010 +#define SCU_SAS_LOSTOT_OFFSET 0x0014 +#define SCU_SAS_LNKTOV_OFFSET 0x0018 +#define SCU_SAS_PHYTOV_OFFSET 0x001C +#define SCU_SAS_AFERCNT_OFFSET 0x0020 +#define SCU_SAS_WERCNT_OFFSET 0x0024 +#define SCU_SAS_TIID_OFFSET 0x0028 +#define SCU_SAS_TIDNH_OFFSET 0x002C +#define SCU_SAS_TIDNL_OFFSET 0x0030 +#define SCU_SAS_TISSAH_OFFSET 0x0034 +#define SCU_SAS_TISSAL_OFFSET 0x0038 +#define SCU_SAS_TIPID_OFFSET 0x003C +#define SCU_SAS_TIRES2_OFFSET 0x0040 +#define SCU_SAS_ADRSTA_OFFSET 0x0044 +#define SCU_SAS_MAWTTOV_OFFSET 0x0048 +#define SCU_SAS_FRPLDFIL_OFFSET 0x0054 +#define SCU_SAS_RFCNT_OFFSET 0x0060 +#define SCU_SAS_TFCNT_OFFSET 0x0064 +#define SCU_SAS_RFDCNT_OFFSET 0x0068 +#define SCU_SAS_TFDCNT_OFFSET 0x006C +#define SCU_SAS_LERCNT_OFFSET 0x0070 +#define SCU_SAS_RDISERRCNT_OFFSET 0x0074 +#define SCU_SAS_CRERCNT_OFFSET 0x0078 +#define SCU_STPCTL_OFFSET 0x007C +#define SCU_SAS_PCFG_OFFSET 0x0080 +#define SCU_SAS_CLKSM_OFFSET 0x0084 +#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088 +#define SCU_SAS_TXCOMINIT_OFFSET 0x008C +#define SCU_SAS_TXCOMSAS_OFFSET 0x0090 +#define SCU_SAS_COMINIT_OFFSET 0x0094 +#define SCU_SAS_COMWAKE_OFFSET 0x0098 +#define SCU_SAS_COMSAS_OFFSET 0x009C +#define SCU_SAS_SFERCNT_OFFSET 0x00A0 +#define SCU_SAS_CDFERCNT_OFFSET 0x00A4 +#define SCU_SAS_DNFERCNT_OFFSET 0x00A8 +#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC +#define SCU_SAS_CNTCTL_OFFSET 0x00B0 +#define SCU_SAS_SSPTOV_OFFSET 0x00B4 +#define SCU_FTCTL_OFFSET 0x00B8 +#define SCU_FRCTL_OFFSET 0x00BC +#define SCU_FTWMRK_OFFSET 0x00C0 +#define SCU_ENSPINUP_OFFSET 0x00C4 +#define SCU_SAS_TRNTOV_OFFSET 0x00C8 +#define SCU_SAS_PHYCAP_OFFSET 0x00CC +#define SCU_SAS_PHYCTL_OFFSET 0x00D0 +#define SCU_SAS_LLCTL_OFFSET 0x00D8 +#define SCU_AFE_XCVRCR_OFFSET 0x00DC +#define SCU_AFE_LUTCR_OFFSET 0x00E0 + +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL) + +#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) + +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2) +#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2) +#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000) +#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24) +#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000) +#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00) + +#define SCU_SAS_LLCTL_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value) + +#define SCU_SAS_LLCTL_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) + +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0) +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF) +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0) +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF) + +#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value) + + +/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ +#define SCU_PSZGCR_OFFSET 0x00E4 +#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8 +/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */ + +#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */ + +/** + * struct scu_link_layer_registers - SCU Link Layer Registers + * + * + */ +struct scu_link_layer_registers { +/* 0x0000 SAS_SPDTOV */ + u32 speed_negotiation_timers; +/* 0x0004 SAS_LLSTA */ + u32 link_layer_status; +/* 0x0008 SATA_PSELTOV */ + u32 port_selector_timeout; + u32 reserved0C; +/* 0x0010 SAS_TIMETOV */ + u32 timeout_unit_value; +/* 0x0014 SAS_RCDTOV */ + u32 rcd_timeout; +/* 0x0018 SAS_LNKTOV */ + u32 link_timer_timeouts; +/* 0x001C SAS_PHYTOV */ + u32 sas_phy_timeouts; +/* 0x0020 SAS_AFERCNT */ + u32 received_address_frame_error_counter; +/* 0x0024 SAS_WERCNT */ + u32 invalid_dword_counter; +/* 0x0028 SAS_TIID */ + u32 transmit_identification; +/* 0x002C SAS_TIDNH */ + u32 sas_device_name_high; +/* 0x0030 SAS_TIDNL */ + u32 sas_device_name_low; +/* 0x0034 SAS_TISSAH */ + u32 source_sas_address_high; +/* 0x0038 SAS_TISSAL */ + u32 source_sas_address_low; +/* 0x003C SAS_TIPID */ + u32 identify_frame_phy_id; +/* 0x0040 SAS_TIRES2 */ + u32 identify_frame_reserved; +/* 0x0044 SAS_ADRSTA */ + u32 received_address_frame; +/* 0x0048 SAS_MAWTTOV */ + u32 maximum_arbitration_wait_timer_timeout; +/* 0x004C SAS_PTxC */ + u32 transmit_primitive; +/* 0x0050 SAS_RORES */ + u32 error_counter_event_notification_control; +/* 0x0054 SAS_FRPLDFIL */ + u32 frxq_payload_fill_threshold; +/* 0x0058 SAS_LLHANG_TOT */ + u32 link_layer_hang_detection_timeout; + u32 reserved_5C; +/* 0x0060 SAS_RFCNT */ + u32 received_frame_count; +/* 0x0064 SAS_TFCNT */ + u32 transmit_frame_count; +/* 0x0068 SAS_RFDCNT */ + u32 received_dword_count; +/* 0x006C SAS_TFDCNT */ + u32 transmit_dword_count; +/* 0x0070 SAS_LERCNT */ + u32 loss_of_sync_error_count; +/* 0x0074 SAS_RDISERRCNT */ + u32 running_disparity_error_count; +/* 0x0078 SAS_CRERCNT */ + u32 received_frame_crc_error_count; +/* 0x007C STPCTL */ + u32 stp_control; +/* 0x0080 SAS_PCFG */ + u32 phy_configuration; +/* 0x0084 SAS_CLKSM */ + u32 clock_skew_management; +/* 0x0088 SAS_TXCOMWAKE */ + u32 transmit_comwake_signal; +/* 0x008C SAS_TXCOMINIT */ + u32 transmit_cominit_signal; +/* 0x0090 SAS_TXCOMSAS */ + u32 transmit_comsas_signal; +/* 0x0094 SAS_COMINIT */ + u32 cominit_control; +/* 0x0098 SAS_COMWAKE */ + u32 comwake_control; +/* 0x009C SAS_COMSAS */ + u32 comsas_control; +/* 0x00A0 SAS_SFERCNT */ + u32 received_short_frame_count; +/* 0x00A4 SAS_CDFERCNT */ + u32 received_frame_without_credit_count; +/* 0x00A8 SAS_DNFERCNT */ + u32 received_frame_after_done_count; +/* 0x00AC SAS_PRSTERCNT */ + u32 phy_reset_problem_count; +/* 0x00B0 SAS_CNTCTL */ + u32 counter_control; +/* 0x00B4 SAS_SSPTOV */ + u32 ssp_timer_timeout_values; +/* 0x00B8 FTCTL */ + u32 ftx_control; +/* 0x00BC FRCTL */ + u32 frx_control; +/* 0x00C0 FTWMRK */ + u32 ftx_watermark; +/* 0x00C4 ENSPINUP */ + u32 notify_enable_spinup_control; +/* 0x00C8 SAS_TRNTOV */ + u32 sas_training_sequence_timer_values; +/* 0x00CC SAS_PHYCAP */ + u32 phy_capabilities; +/* 0x00D0 SAS_PHYCTL */ + u32 phy_control; + u32 reserved_d4; +/* 0x00D8 LLCTL */ + u32 link_layer_control; +/* 0x00DC AFE_XCVRCR */ + u32 afe_xcvr_control; +/* 0x00E0 AFE_LUTCR */ + u32 afe_lookup_table_control; +/* 0x00E4 PSZGCR */ + u32 phy_source_zone_group_control; +/* 0x00E8 SAS_RECPHYCAP */ + u32 receive_phycap; + u32 reserved_ec; +/* 0x00F0 SNAFERXRSTCTL */ + u32 speed_negotiation_afe_rx_reset_control; +/* 0x00F4 SAS_SSIPMCTL */ + u32 power_management_control; +/* 0x00F8 SAS_PSPREQ_PRIM */ + u32 sas_pm_partial_request_primitive; +/* 0x00FC SAS_PSSREQ_PRIM */ + u32 sas_pm_slumber_request_primitive; +/* 0x0100 SAS_PPSACK_PRIM */ + u32 sas_pm_ack_primitive_register; +/* 0x0104 SAS_PSNAK_PRIM */ + u32 sas_pm_nak_primitive_register; +/* 0x0108 SAS_SSIPMTOV */ + u32 sas_primitive_timeout; + u32 reserved_10c; +/* 0x0110 - 0x011C PLAPRDCTRLxREG */ + u32 pla_product_control[4]; +/* 0x0120 PLAPRDSUMREG */ + u32 pla_product_sum; +/* 0x0124 PLACONTROLREG */ + u32 pla_control; +/* Remainder of memory space 896 bytes */ + u32 reserved_0128_037f[0x96]; + +}; + +/* + * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC + * u32 primitive_transmit_control; */ + +/* + * ---------------------------------------------------------------------------- + * SGPIO + * ---------------------------------------------------------------------------- */ +#define SCU_SGPIO_OFFSET 0x1400 + +/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */ +#define SCU_SGPIO_SGICR_OFFSET 0x0000 +#define SCU_SGPIO_SGPBR_OFFSET 0x0004 +#define SCU_SGPIO_SGSDLR_OFFSET 0x0008 +#define SCU_SGPIO_SGSDUR_OFFSET 0x000C +#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010 +#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014 +#define SCU_SGPIO_SGVSCR_OFFSET 0x0018 +/* Address from 0x0820 to 0x083C */ +#define SCU_SGPIO_SGODSR_OFFSET 0x0020 + +/** + * struct scu_sgpio_registers - SCU SGPIO Registers + * + * + */ +struct scu_sgpio_registers { +/* 0x0000 SGPIO_SGICR */ + u32 interface_control; +/* 0x0004 SGPIO_SGPBR */ + u32 blink_rate; +/* 0x0008 SGPIO_SGSDLR */ + u32 start_drive_lower; +/* 0x000C SGPIO_SGSDUR */ + u32 start_drive_upper; +/* 0x0010 SGPIO_SGSIDLR */ + u32 serial_input_lower; +/* 0x0014 SGPIO_SGSIDUR */ + u32 serial_input_upper; +/* 0x0018 SGPIO_SGVSCR */ + u32 vendor_specific_code; +/* 0x001C Reserved */ + u32 reserved_001c; +/* 0x0020 SGPIO_SGODSR */ + u32 output_data_select[8]; +/* Remainder of memory space 256 bytes */ + u32 reserved_1444_14ff[0x30]; + +}; + +/* + * ***************************************************************************** + * * Defines for VIIT entry offsets + * * Access additional entries by SCU_VIIT_BASE + index * 0x10 + * ***************************************************************************** */ +#define SCU_VIIT_BASE 0x1c00 + +struct scu_viit_registers { + u32 registers[256]; +}; + +/* + * ***************************************************************************** + * * SCU PORT TASK SCHEDULER REGISTERS + * ***************************************************************************** */ + +#define SCU_PTSG_BASE 0x1000 + +#define SCU_PTSG_PTSGCR_OFFSET 0x0000 +#define SCU_PTSG_RTCR_OFFSET 0x0004 +#define SCU_PTSG_RTCCR_OFFSET 0x0008 +#define SCU_PTSG_PTS0CR_OFFSET 0x0010 +#define SCU_PTSG_PTS0SR_OFFSET 0x0014 +#define SCU_PTSG_PTS1CR_OFFSET 0x0018 +#define SCU_PTSG_PTS1SR_OFFSET 0x001C +#define SCU_PTSG_PTS2CR_OFFSET 0x0020 +#define SCU_PTSG_PTS2SR_OFFSET 0x0024 +#define SCU_PTSG_PTS3CR_OFFSET 0x0028 +#define SCU_PTSG_PTS3SR_OFFSET 0x002C +#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030 +#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034 +#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038 +#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C +#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040 +#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044 + +/** + * struct scu_port_task_scheduler_registers - These are the control/stats pairs + * for each Port Task Scheduler. + * + * + */ +struct scu_port_task_scheduler_registers { + u32 control; + u32 status; +}; + +/** + * struct scu_port_task_scheduler_group_registers - These are the PORT Task + * Scheduler registers + * + * + */ +struct scu_port_task_scheduler_group_registers { +/* 0x0000 PTSGCR */ + u32 control; +/* 0x0004 RTCR */ + u32 real_time_clock; +/* 0x0008 RTCCR */ + u32 real_time_clock_control; +/* 0x000C */ + u32 reserved_0C; +/* + * 0x0010 PTS0CR + * 0x0014 PTS0SR + * 0x0018 PTS1CR + * 0x001C PTS1SR + * 0x0020 PTS2CR + * 0x0024 PTS2SR + * 0x0028 PTS3CR + * 0x002C PTS3SR */ + struct scu_port_task_scheduler_registers port[4]; +/* + * 0x0030 PCSPE0CR + * 0x0034 PCSPE1CR + * 0x0038 PCSPE2CR + * 0x003C PCSPE3CR */ + u32 protocol_engine[4]; +/* 0x0040 ETMTSCCR */ + u32 tc_scanning_interval_control; +/* 0x0044 ETMRNSCCR */ + u32 rnc_scanning_interval_control; +/* Remainder of memory space 128 bytes */ + u32 reserved_1048_107f[0x0E]; + +}; + +#define SCU_PTSG_SCUVZECR_OFFSET 0x003C + +/* + * ***************************************************************************** + * * AFE REGISTERS + * ***************************************************************************** */ +#define SCU_AFE_MMR_BASE 0xE000 + +/* + * AFE 0 is at offset 0x0800 + * AFE 1 is at offset 0x0900 + * AFE 2 is at offset 0x0a00 + * AFE 3 is at offset 0x0b00 */ +struct scu_afe_transceiver { + /* 0x0000 AFE_XCVR_CTRL0 */ + u32 afe_xcvr_control0; + /* 0x0004 AFE_XCVR_CTRL1 */ + u32 afe_xcvr_control1; + /* 0x0008 */ + u32 reserved_0008; + /* 0x000c afe_dfx_rx_control0 */ + u32 afe_dfx_rx_control0; + /* 0x0010 AFE_DFX_RX_CTRL1 */ + u32 afe_dfx_rx_control1; + /* 0x0014 */ + u32 reserved_0014; + /* 0x0018 AFE_DFX_RX_STS0 */ + u32 afe_dfx_rx_status0; + /* 0x001c AFE_DFX_RX_STS1 */ + u32 afe_dfx_rx_status1; + /* 0x0020 */ + u32 reserved_0020; + /* 0x0024 AFE_TX_CTRL */ + u32 afe_tx_control; + /* 0x0028 AFE_TX_AMP_CTRL0 */ + u32 afe_tx_amp_control0; + /* 0x002c AFE_TX_AMP_CTRL1 */ + u32 afe_tx_amp_control1; + /* 0x0030 AFE_TX_AMP_CTRL2 */ + u32 afe_tx_amp_control2; + /* 0x0034 AFE_TX_AMP_CTRL3 */ + u32 afe_tx_amp_control3; + /* 0x0038 afe_tx_ssc_control */ + u32 afe_tx_ssc_control; + /* 0x003c */ + u32 reserved_003c; + /* 0x0040 AFE_RX_SSC_CTRL0 */ + u32 afe_rx_ssc_control0; + /* 0x0044 AFE_RX_SSC_CTRL1 */ + u32 afe_rx_ssc_control1; + /* 0x0048 AFE_RX_SSC_CTRL2 */ + u32 afe_rx_ssc_control2; + /* 0x004c AFE_RX_EQ_STS0 */ + u32 afe_rx_eq_status0; + /* 0x0050 AFE_RX_EQ_STS1 */ + u32 afe_rx_eq_status1; + /* 0x0054 AFE_RX_CDR_STS */ + u32 afe_rx_cdr_status; + /* 0x0058 */ + u32 reserved_0058; + /* 0x005c AFE_CHAN_CTRL */ + u32 afe_channel_control; + /* 0x0060-0x006c */ + u32 reserved_0060_006c[0x04]; + /* 0x0070 AFE_XCVR_EC_STS0 */ + u32 afe_xcvr_error_capture_status0; + /* 0x0074 AFE_XCVR_EC_STS1 */ + u32 afe_xcvr_error_capture_status1; + /* 0x0078 AFE_XCVR_EC_STS2 */ + u32 afe_xcvr_error_capture_status2; + /* 0x007c afe_xcvr_ec_status3 */ + u32 afe_xcvr_error_capture_status3; + /* 0x0080 AFE_XCVR_EC_STS4 */ + u32 afe_xcvr_error_capture_status4; + /* 0x0084 AFE_XCVR_EC_STS5 */ + u32 afe_xcvr_error_capture_status5; + /* 0x0088-0x00fc */ + u32 reserved_008c_00fc[0x1e]; +}; + +/** + * struct scu_afe_registers - AFE Regsiters + * + * + */ +/* Uaoa AFE registers */ +struct scu_afe_registers { + /* 0Xe000 AFE_BIAS_CTRL */ + u32 afe_bias_control; + u32 reserved_0004; + /* 0x0008 AFE_PLL_CTRL0 */ + u32 afe_pll_control0; + /* 0x000c AFE_PLL_CTRL1 */ + u32 afe_pll_control1; + /* 0x0010 AFE_PLL_CTRL2 */ + u32 afe_pll_control2; + /* 0x0014 AFE_CB_STS */ + u32 afe_common_block_status; + /* 0x0018-0x007c */ + u32 reserved_18_7c[0x1a]; + /* 0x0080 AFE_PMSN_MCTRL0 */ + u32 afe_pmsn_master_control0; + /* 0x0084 AFE_PMSN_MCTRL1 */ + u32 afe_pmsn_master_control1; + /* 0x0088 AFE_PMSN_MCTRL2 */ + u32 afe_pmsn_master_control2; + /* 0x008C-0x00fc */ + u32 reserved_008c_00fc[0x1D]; + /* 0x0100 AFE_DFX_MST_CTRL0 */ + u32 afe_dfx_master_control0; + /* 0x0104 AFE_DFX_MST_CTRL1 */ + u32 afe_dfx_master_control1; + /* 0x0108 AFE_DFX_DCL_CTRL */ + u32 afe_dfx_dcl_control; + /* 0x010c AFE_DFX_DMON_CTRL */ + u32 afe_dfx_digital_monitor_control; + /* 0x0110 AFE_DFX_AMONP_CTRL */ + u32 afe_dfx_analog_p_monitor_control; + /* 0x0114 AFE_DFX_AMONN_CTRL */ + u32 afe_dfx_analog_n_monitor_control; + /* 0x0118 AFE_DFX_NTL_STS */ + u32 afe_dfx_ntl_status; + /* 0x011c AFE_DFX_FIFO_STS0 */ + u32 afe_dfx_fifo_status0; + /* 0x0120 AFE_DFX_FIFO_STS1 */ + u32 afe_dfx_fifo_status1; + /* 0x0124 AFE_DFX_MPAT_CTRL */ + u32 afe_dfx_master_pattern_control; + /* 0x0128 AFE_DFX_P0_CTRL */ + u32 afe_dfx_p0_control; + /* 0x012c-0x01a8 AFE_DFX_P0_DRx */ + u32 afe_dfx_p0_data[32]; + /* 0x01ac */ + u32 reserved_01ac; + /* 0x01b0-0x020c AFE_DFX_P0_IRx */ + u32 afe_dfx_p0_instruction[24]; + /* 0x0210 */ + u32 reserved_0210; + /* 0x0214 AFE_DFX_P1_CTRL */ + u32 afe_dfx_p1_control; + /* 0x0218-0x245 AFE_DFX_P1_DRx */ + u32 afe_dfx_p1_data[16]; + /* 0x0258-0x029c */ + u32 reserved_0258_029c[0x12]; + /* 0x02a0-0x02bc AFE_DFX_P1_IRx */ + u32 afe_dfx_p1_instruction[8]; + /* 0x02c0-0x2fc */ + u32 reserved_02c0_02fc[0x10]; + /* 0x0300 AFE_DFX_TX_PMSN_CTRL */ + u32 afe_dfx_tx_pmsn_control; + /* 0x0304 AFE_DFX_RX_PMSN_CTRL */ + u32 afe_dfx_rx_pmsn_control; + u32 reserved_0308; + /* 0x030c AFE_DFX_NOA_CTRL0 */ + u32 afe_dfx_noa_control0; + /* 0x0310 AFE_DFX_NOA_CTRL1 */ + u32 afe_dfx_noa_control1; + /* 0x0314 AFE_DFX_NOA_CTRL2 */ + u32 afe_dfx_noa_control2; + /* 0x0318 AFE_DFX_NOA_CTRL3 */ + u32 afe_dfx_noa_control3; + /* 0x031c AFE_DFX_NOA_CTRL4 */ + u32 afe_dfx_noa_control4; + /* 0x0320 AFE_DFX_NOA_CTRL5 */ + u32 afe_dfx_noa_control5; + /* 0x0324 AFE_DFX_NOA_CTRL6 */ + u32 afe_dfx_noa_control6; + /* 0x0328 AFE_DFX_NOA_CTRL7 */ + u32 afe_dfx_noa_control7; + /* 0x032c-0x07fc */ + u32 reserved_032c_07fc[0x135]; + + /* 0x0800-0x0bfc */ + struct scu_afe_transceiver scu_afe_xcvr[4]; + + /* 0x0c00-0x0ffc */ + u32 reserved_0c00_0ffc[0x0100]; +}; + +struct scu_protocol_engine_group_registers { + u32 table[0xE0]; +}; + + +struct scu_viit_iit { + u32 table[256]; +}; + +/** + * Placeholder for the ZONE Partition Table information ZONING will not be + * included in the 1.1 release. + * + * + */ +struct scu_zone_partition_table { + u32 table[2048]; +}; + +/** + * Placeholder for the CRAM register since I am not sure if we need to + * read/write to these registers as yet. + * + * + */ +struct scu_completion_ram { + u32 ram[128]; +}; + +/** + * Placeholder for the FBRAM registers since I am not sure if we need to + * read/write to these registers as yet. + * + * + */ +struct scu_frame_buffer_ram { + u32 ram[128]; +}; + +#define scu_scratch_ram_SIZE_IN_DWORDS 256 + +/** + * Placeholder for the scratch RAM registers. + * + * + */ +struct scu_scratch_ram { + u32 ram[scu_scratch_ram_SIZE_IN_DWORDS]; +}; + +/** + * Placeholder since I am not yet sure what these registers are here for. + * + * + */ +struct noa_protocol_engine_partition { + u32 reserved[64]; +}; + +/** + * Placeholder since I am not yet sure what these registers are here for. + * + * + */ +struct noa_hub_partition { + u32 reserved[64]; +}; + +/** + * Placeholder since I am not yet sure what these registers are here for. + * + * + */ +struct noa_host_interface_partition { + u32 reserved[64]; +}; + +/** + * struct transport_link_layer_pair - The SCU Hardware pairs up the TL + * registers with the LL registers so we must place them adjcent to make the + * array of registers in the PEG. + * + * + */ +struct transport_link_layer_pair { + struct scu_transport_layer_registers tl; + struct scu_link_layer_registers ll; +}; + +/** + * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space. + * These registers are unique to each protocol engine group. There can be + * at most two PEG for a single SCU part. + * + * + */ +struct scu_peg_registers { + struct transport_link_layer_pair pe[4]; + struct scu_port_task_scheduler_group_registers ptsg; + struct scu_protocol_engine_group_registers peg; + struct scu_sgpio_registers sgpio; + u32 reserved_01500_1BFF[0x1C0]; + struct scu_viit_entry viit[64]; + struct scu_zone_partition_table zpt0; + struct scu_zone_partition_table zpt1; +}; + +/** + * struct scu_registers - SCU registers including both PEG registers if we turn + * on that compile option. All of these registers are in the memory mapped + * space returned from BAR1. + * + * + */ +struct scu_registers { + /* 0x0000 - PEG 0 */ + struct scu_peg_registers peg0; + + /* 0x6000 - SDMA and Miscellaneous */ + struct scu_sdma_registers sdma; + struct scu_completion_ram cram; + struct scu_frame_buffer_ram fbram; + u32 reserved_6800_69FF[0x80]; + struct noa_protocol_engine_partition noa_pe; + struct noa_hub_partition noa_hub; + struct noa_host_interface_partition noa_if; + u32 reserved_6d00_7fff[0x4c0]; + + /* 0x8000 - PEG 1 */ + struct scu_peg_registers peg1; + + /* 0xE000 - AFE Registers */ + struct scu_afe_registers afe; + + /* 0xF000 - reserved */ + u32 reserved_f000_211fff[0x80c00]; + + /* 0x212000 - scratch RAM */ + struct scu_scratch_ram scratch_ram; +}; + +#endif /* _SCU_REGISTERS_HEADER_ */ diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c new file mode 100644 index 000000000..866950a02 --- /dev/null +++ b/drivers/scsi/isci/remote_device.c @@ -0,0 +1,1727 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include "isci.h" +#include "port.h" +#include "remote_device.h" +#include "request.h" +#include "remote_node_context.h" +#include "scu_event_codes.h" +#include "task.h" + +#undef C +#define C(a) (#a) +const char *dev_state_name(enum sci_remote_device_states state) +{ + static const char * const strings[] = REMOTE_DEV_STATES; + + return strings[state]; +} +#undef C + +enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, + enum sci_remote_node_suspension_reasons reason) +{ + return sci_remote_node_context_suspend(&idev->rnc, reason, + SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); +} + +/** + * isci_remote_device_ready() - This function is called by the ihost when the + * remote device is ready. We mark the isci device as ready and signal the + * waiting proccess. + * @ihost: our valid isci_host + * @idev: remote device + * + */ +static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) +{ + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p\n", __func__, idev); + + clear_bit(IDEV_IO_NCQERROR, &idev->flags); + set_bit(IDEV_IO_READY, &idev->flags); + if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) + wake_up(&ihost->eventq); +} + +static enum sci_status sci_remote_device_terminate_req( + struct isci_host *ihost, + struct isci_remote_device *idev, + int check_abort, + struct isci_request *ireq) +{ + if (!test_bit(IREQ_ACTIVE, &ireq->flags) || + (ireq->target_device != idev) || + (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) + return SCI_SUCCESS; + + dev_dbg(&ihost->pdev->dev, + "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", + __func__, idev, idev->flags, ireq, ireq->target_device); + + set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); + + return sci_controller_terminate_request(ihost, idev, ireq); +} + +static enum sci_status sci_remote_device_terminate_reqs_checkabort( + struct isci_remote_device *idev, + int chk) +{ + struct isci_host *ihost = idev->owning_port->owning_controller; + enum sci_status status = SCI_SUCCESS; + u32 i; + + for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { + struct isci_request *ireq = ihost->reqs[i]; + enum sci_status s; + + s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); + if (s != SCI_SUCCESS) + status = s; + } + return status; +} + +static bool isci_compare_suspendcount( + struct isci_remote_device *idev, + u32 localcount) +{ + smp_rmb(); + + /* Check for a change in the suspend count, or the RNC + * being destroyed. + */ + return (localcount != idev->rnc.suspend_count) + || sci_remote_node_context_is_being_destroyed(&idev->rnc); +} + +static bool isci_check_reqterm( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq, + u32 localcount) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(&ihost->scic_lock, flags); + res = isci_compare_suspendcount(idev, localcount) + && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return res; +} + +static bool isci_check_devempty( + struct isci_host *ihost, + struct isci_remote_device *idev, + u32 localcount) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(&ihost->scic_lock, flags); + res = isci_compare_suspendcount(idev, localcount) + && idev->started_request_count == 0; + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return res; +} + +enum sci_status isci_remote_device_terminate_requests( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status = SCI_SUCCESS; + unsigned long flags; + u32 rnc_suspend_count; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (isci_get_device(idev) == NULL) { + dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", + __func__, idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + status = SCI_FAILURE; + } else { + /* If already suspended, don't wait for another suspension. */ + smp_rmb(); + rnc_suspend_count + = sci_remote_node_context_is_suspended(&idev->rnc) + ? 0 : idev->rnc.suspend_count; + + dev_dbg(&ihost->pdev->dev, + "%s: idev=%p, ireq=%p; started_request_count=%d, " + "rnc_suspend_count=%d, rnc.suspend_count=%d" + "about to wait\n", + __func__, idev, ireq, idev->started_request_count, + rnc_suspend_count, idev->rnc.suspend_count); + + #define MAX_SUSPEND_MSECS 10000 + if (ireq) { + /* Terminate a specific TC. */ + set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); + sci_remote_device_terminate_req(ihost, idev, 0, ireq); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + if (!wait_event_timeout(ihost->eventq, + isci_check_reqterm(ihost, idev, ireq, + rnc_suspend_count), + msecs_to_jiffies(MAX_SUSPEND_MSECS))) { + + dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n", + __func__, ihost->id); + dev_dbg(&ihost->pdev->dev, + "%s: ******* Timeout waiting for " + "suspend; idev=%p, current state %s; " + "started_request_count=%d, flags=%lx\n\t" + "rnc_suspend_count=%d, rnc.suspend_count=%d " + "RNC: current state %s, current " + "suspend_type %x dest state %d;\n" + "ireq=%p, ireq->flags = %lx\n", + __func__, idev, + dev_state_name(idev->sm.current_state_id), + idev->started_request_count, idev->flags, + rnc_suspend_count, idev->rnc.suspend_count, + rnc_state_name(idev->rnc.sm.current_state_id), + idev->rnc.suspend_type, + idev->rnc.destination_state, + ireq, ireq->flags); + } + spin_lock_irqsave(&ihost->scic_lock, flags); + clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); + if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) + isci_free_tag(ihost, ireq->io_tag); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } else { + /* Terminate all TCs. */ + sci_remote_device_terminate_requests(idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + if (!wait_event_timeout(ihost->eventq, + isci_check_devempty(ihost, idev, + rnc_suspend_count), + msecs_to_jiffies(MAX_SUSPEND_MSECS))) { + + dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n", + __func__, ihost->id); + dev_dbg(&ihost->pdev->dev, + "%s: ******* Timeout waiting for " + "suspend; idev=%p, current state %s; " + "started_request_count=%d, flags=%lx\n\t" + "rnc_suspend_count=%d, " + "RNC: current state %s, " + "rnc.suspend_count=%d, current " + "suspend_type %x dest state %d\n", + __func__, idev, + dev_state_name(idev->sm.current_state_id), + idev->started_request_count, idev->flags, + rnc_suspend_count, + rnc_state_name(idev->rnc.sm.current_state_id), + idev->rnc.suspend_count, + idev->rnc.suspend_type, + idev->rnc.destination_state); + } + } + dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", + __func__, idev); + isci_put_device(idev); + } + return status; +} + +/** +* isci_remote_device_not_ready() - This function is called by the ihost when +* the remote device is not ready. We mark the isci device as ready (not +* "ready_for_io") and signal the waiting proccess. +* @ihost: This parameter specifies the isci host object. +* @idev: This parameter specifies the remote device +* @reason: Reason to switch on +* +* sci_lock is held on entrance to this function. +*/ +static void isci_remote_device_not_ready(struct isci_host *ihost, + struct isci_remote_device *idev, + u32 reason) +{ + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p; reason = %d\n", __func__, idev, reason); + + switch (reason) { + case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: + set_bit(IDEV_IO_NCQERROR, &idev->flags); + + /* Suspend the remote device so the I/O can be terminated. */ + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); + + /* Kill all outstanding requests for the device. */ + sci_remote_device_terminate_requests(idev); + + fallthrough; /* into the default case */ + default: + clear_bit(IDEV_IO_READY, &idev->flags); + break; + } +} + +/* called once the remote node context is ready to be freed. + * The remote device can now report that its stop operation is complete. none + */ +static void rnc_destruct_done(void *_dev) +{ + struct isci_remote_device *idev = _dev; + + BUG_ON(idev->started_request_count != 0); + sci_change_state(&idev->sm, SCI_DEV_STOPPED); +} + +enum sci_status sci_remote_device_terminate_requests( + struct isci_remote_device *idev) +{ + return sci_remote_device_terminate_reqs_checkabort(idev, 0); +} + +enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, + u32 timeout) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_FAILED: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_STOPPED: + return SCI_SUCCESS; + case SCI_DEV_STARTING: + /* device not started so there had better be no requests */ + BUG_ON(idev->started_request_count != 0); + sci_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, idev); + /* Transition to the stopping state and wait for the + * remote node to complete being posted and invalidated. + */ + sci_change_state(sm, SCI_DEV_STOPPING); + return SCI_SUCCESS; + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + sci_change_state(sm, SCI_DEV_STOPPING); + if (idev->started_request_count == 0) + sci_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, + idev); + else { + sci_remote_device_suspend( + idev, SCI_SW_SUSPEND_LINKHANG_DETECT); + sci_remote_device_terminate_requests(idev); + } + return SCI_SUCCESS; + case SCI_DEV_STOPPING: + /* All requests should have been terminated, but if there is an + * attempt to stop a device already in the stopping state, then + * try again to terminate. + */ + return sci_remote_device_terminate_requests(idev); + case SCI_DEV_RESETTING: + sci_change_state(sm, SCI_DEV_STOPPING); + return SCI_SUCCESS; + } +} + +enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + sci_change_state(sm, SCI_DEV_RESETTING); + return SCI_SUCCESS; + } +} + +enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + if (state != SCI_DEV_RESETTING) { + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(sm, SCI_DEV_READY); + return SCI_SUCCESS; +} + +enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, + u32 frame_index) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_host *ihost = idev->owning_port->owning_controller; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_IDLE: + case SCI_SMP_DEV_IDLE: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + /* Return the frame back to the controller */ + sci_controller_release_frame(ihost, frame_index); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: { + struct isci_request *ireq; + struct ssp_frame_hdr hdr; + void *frame_header; + ssize_t word_cnt; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + &frame_header); + if (status != SCI_SUCCESS) + return status; + + word_cnt = sizeof(hdr) / sizeof(u32); + sci_swab32_cpy(&hdr, frame_header, word_cnt); + + ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); + if (ireq && ireq->target_device == idev) { + /* The IO request is now in charge of releasing the frame */ + status = sci_io_request_frame_handler(ireq, frame_index); + } else { + /* We could not map this tag to a valid IO + * request Just toss the frame and continue + */ + sci_controller_release_frame(ihost, frame_index); + } + break; + } + case SCI_STP_DEV_NCQ: { + struct dev_to_host_fis *hdr; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&hdr); + if (status != SCI_SUCCESS) + return status; + + if (hdr->fis_type == FIS_SETDEVBITS && + (hdr->status & ATA_ERR)) { + idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; + + /* TODO Check sactive and complete associated IO if any. */ + sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); + } else if (hdr->fis_type == FIS_REGD2H && + (hdr->status & ATA_ERR)) { + /* + * Some devices return D2H FIS when an NCQ error is detected. + * Treat this like an SDB error FIS ready reason. + */ + idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; + sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); + } else + status = SCI_FAILURE; + + sci_controller_release_frame(ihost, frame_index); + break; + } + case SCI_STP_DEV_CMD: + case SCI_SMP_DEV_CMD: + /* The device does not process any UF received from the hardware while + * in this state. All unsolicited frames are forwarded to the io request + * object. + */ + status = sci_io_request_frame_handler(idev->working_request, frame_index); + break; + } + + return status; +} + +static bool is_remote_device_ready(struct isci_remote_device *idev) +{ + + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + switch (state) { + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + return true; + default: + return false; + } +} + +/* + * called once the remote node context has transisitioned to a ready + * state (after suspending RX and/or TX due to early D2H fis) + */ +static void atapi_remote_device_resume_done(void *_dev) +{ + struct isci_remote_device *idev = _dev; + struct isci_request *ireq = idev->working_request; + + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); +} + +enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, + u32 event_code) +{ + enum sci_status status; + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TYPE_RNC_OPS_MISC: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + status = sci_remote_node_context_event_handler(&idev->rnc, event_code); + break; + case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: + if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { + status = SCI_SUCCESS; + + /* Suspend the associated RNC */ + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); + + dev_dbg(scirdev_to_dev(idev), + "%s: device: %p event code: %x: %s\n", + __func__, idev, event_code, + is_remote_device_ready(idev) + ? "I_T_Nexus_Timeout event" + : "I_T_Nexus_Timeout event in wrong state"); + + break; + } + fallthrough; /* and treat as unhandled */ + default: + dev_dbg(scirdev_to_dev(idev), + "%s: device: %p event code: %x: %s\n", + __func__, idev, event_code, + is_remote_device_ready(idev) + ? "unexpected event" + : "unexpected event in wrong state"); + status = SCI_FAILURE_INVALID_STATE; + break; + } + + if (status != SCI_SUCCESS) + return status; + + /* Decode device-specific states that may require an RNC resume during + * normal operation. When the abort path is active, these resumes are + * managed when the abort path exits. + */ + if (state == SCI_STP_DEV_ATAPI_ERROR) { + /* For ATAPI error state resume the RNC right away. */ + if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || + scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) { + return sci_remote_node_context_resume(&idev->rnc, + atapi_remote_device_resume_done, + idev); + } + } + + if (state == SCI_STP_DEV_IDLE) { + + /* We pick up suspension events to handle specifically to this + * state. We resume the RNC right away. + */ + if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || + scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) + status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); + } + + return status; +} + +static void sci_remote_device_start_request(struct isci_remote_device *idev, + struct isci_request *ireq, + enum sci_status status) +{ + struct isci_port *iport = idev->owning_port; + + /* cleanup requests that failed after starting on the port */ + if (status != SCI_SUCCESS) + sci_port_complete_io(iport, idev, ireq); + else { + kref_get(&idev->kref); + idev->started_request_count++; + } +} + +enum sci_status sci_remote_device_start_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_port *iport = idev->owning_port; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + /* attempt to start an io request for this device object. The remote + * device object will issue the start request for the io and if + * successful it will start the request for the port object then + * increment its own request count. + */ + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + break; + case SCI_STP_DEV_IDLE: { + /* handle the start io operation for a sata device that is in + * the command idle state. - Evalute the type of IO request to + * be started - If its an NCQ request change to NCQ substate - + * If its any other command change to the CMD substate + * + * If this is a softreset we may want to have a different + * substate. + */ + enum sci_remote_device_states new_state; + struct sas_task *task = isci_request_access_task(ireq); + + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + if (status != SCI_SUCCESS) + break; + + if (task->ata_task.use_ncq) + new_state = SCI_STP_DEV_NCQ; + else { + idev->working_request = ireq; + new_state = SCI_STP_DEV_CMD; + } + sci_change_state(sm, new_state); + break; + } + case SCI_STP_DEV_NCQ: { + struct sas_task *task = isci_request_access_task(ireq); + + if (task->ata_task.use_ncq) { + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + } else + return SCI_FAILURE_INVALID_STATE; + break; + } + case SCI_STP_DEV_AWAIT_RESET: + return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + case SCI_SMP_DEV_IDLE: + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + if (status != SCI_SUCCESS) + break; + + idev->working_request = ireq; + sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); + break; + case SCI_STP_DEV_CMD: + case SCI_SMP_DEV_CMD: + /* device is already handling a command it can not accept new commands + * until this one is complete. + */ + return SCI_FAILURE_INVALID_STATE; + } + + sci_remote_device_start_request(idev, ireq, status); + return status; +} + +static enum sci_status common_complete_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + + status = sci_request_complete(ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_port_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + sci_remote_device_decrement_request_count(idev); + return status; +} + +enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_port *iport = idev->owning_port; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_IDLE: + case SCI_SMP_DEV_IDLE: + case SCI_DEV_FAILED: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_DEV_RESETTING: + status = common_complete_io(iport, idev, ireq); + break; + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_ATAPI_ERROR: + status = common_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + break; + + if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + /* This request causes hardware error, device needs to be Lun Reset. + * So here we force the state machine to IDLE state so the rest IOs + * can reach RNC state handler, these IOs will be completed by RNC with + * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". + */ + sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); + } else if (idev->started_request_count == 0) + sci_change_state(sm, SCI_STP_DEV_IDLE); + break; + case SCI_SMP_DEV_CMD: + status = common_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + break; + sci_change_state(sm, SCI_SMP_DEV_IDLE); + break; + case SCI_DEV_STOPPING: + status = common_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + break; + + if (idev->started_request_count == 0) + sci_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, + idev); + break; + } + + if (status != SCI_SUCCESS) + dev_err(scirdev_to_dev(idev), + "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " + "could not complete\n", __func__, iport, + idev, ireq, status); + else + isci_put_device(idev); + + return status; +} + +static void sci_remote_device_continue_request(void *dev) +{ + struct isci_remote_device *idev = dev; + + /* we need to check if this request is still valid to continue. */ + if (idev->working_request) + sci_controller_continue_io(idev->working_request); +} + +enum sci_status sci_remote_device_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_port *iport = idev->owning_port; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_request_start(ireq); + if (status != SCI_SUCCESS) + goto out; + + /* Note: If the remote device state is not IDLE this will + * replace the request that probably resulted in the task + * management request. + */ + idev->working_request = ireq; + sci_change_state(sm, SCI_STP_DEV_CMD); + + /* The remote node context must cleanup the TCi to NCQ mapping + * table. The only way to do this correctly is to either write + * to the TLCR register or to invalidate and repost the RNC. In + * either case the remote node context state machine will take + * the correct action when the remote node context is suspended + * and later resumed. + */ + sci_remote_device_suspend(idev, + SCI_SW_SUSPEND_LINKHANG_DETECT); + + status = sci_remote_node_context_start_task(&idev->rnc, ireq, + sci_remote_device_continue_request, idev); + + out: + sci_remote_device_start_request(idev, ireq, status); + /* We need to let the controller start request handler know that + * it can't post TC yet. We will provide a callback function to + * post TC when RNC gets resumed. + */ + return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; + case SCI_DEV_READY: + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + /* Resume the RNC as needed: */ + status = sci_remote_node_context_start_task(&idev->rnc, ireq, + NULL, NULL); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + break; + } + sci_remote_device_start_request(idev, ireq, status); + + return status; +} + +void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) +{ + struct isci_port *iport = idev->owning_port; + u32 context; + + context = request | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + idev->rnc.remote_node_index; + + sci_controller_post_request(iport->owning_controller, context); +} + +/* called once the remote node context has transisitioned to a + * ready state. This is the indication that the remote device object can also + * transition to ready. + */ +static void remote_device_resume_done(void *_dev) +{ + struct isci_remote_device *idev = _dev; + + if (is_remote_device_ready(idev)) + return; + + /* go 'ready' if we are not already in a ready state */ + sci_change_state(&idev->sm, SCI_DEV_READY); +} + +static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) +{ + struct isci_remote_device *idev = _dev; + struct isci_host *ihost = idev->owning_port->owning_controller; + + /* For NCQ operation we do not issue a isci_remote_device_not_ready(). + * As a result, avoid sending the ready notification. + */ + if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) + isci_remote_device_ready(ihost, idev); +} + +static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + /* Initial state is a transitional state to the stopped state */ + sci_change_state(&idev->sm, SCI_DEV_STOPPED); +} + +/** + * sci_remote_device_destruct() - free remote node context and destruct + * @idev: This parameter specifies the remote device to be destructed. + * + * Remote device objects are a limited resource. As such, they must be + * protected. Thus calls to construct and destruct are mutually exclusive and + * non-reentrant. The return value shall indicate if the device was + * successfully destructed or if some failure occurred. enum sci_status This value + * is returned if the device is successfully destructed. + * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied + * device isn't valid (e.g. it's already been destoryed, the handle isn't + * valid, etc.). + */ +static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_host *ihost; + + if (state != SCI_DEV_STOPPED) { + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + ihost = idev->owning_port->owning_controller; + sci_controller_free_remote_node_context(ihost, idev, + idev->rnc.remote_node_index); + idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + sci_change_state(sm, SCI_DEV_FINAL); + + return SCI_SUCCESS; +} + +/** + * isci_remote_device_deconstruct() - This function frees an isci_remote_device. + * @ihost: This parameter specifies the isci host object. + * @idev: This parameter specifies the remote device to be freed. + * + */ +static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) +{ + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + /* There should not be any outstanding io's. All paths to + * here should go through isci_remote_device_nuke_requests. + * If we hit this condition, we will need a way to complete + * io requests in process */ + BUG_ON(idev->started_request_count > 0); + + sci_remote_device_destruct(idev); + list_del_init(&idev->node); + isci_put_device(idev); +} + +static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + u32 prev_state; + + /* If we are entering from the stopping state let the SCI User know that + * the stop operation has completed. + */ + prev_state = idev->sm.previous_state_id; + if (prev_state == SCI_DEV_STOPPING) + isci_remote_device_deconstruct(ihost, idev); + + sci_controller_remote_device_stopped(ihost, idev); +} + +static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); +} + +static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + struct domain_device *dev = idev->domain_dev; + + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { + sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); + } else if (dev_is_expander(dev->dev_type)) { + sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); + } else + isci_remote_device_ready(ihost, idev); +} + +static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct domain_device *dev = idev->domain_dev; + + if (dev->dev_type == SAS_END_DEVICE) { + struct isci_host *ihost = idev->owning_port->owning_controller; + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); + } +} + +static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); +} + +static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + sci_remote_node_context_resume(&idev->rnc, NULL, NULL); +} + +static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + idev->working_request = NULL; + if (sci_remote_node_context_is_ready(&idev->rnc)) { + /* + * Since the RNC is ready, it's alright to finish completion + * processing (e.g. signal the remote device is ready). */ + sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); + } else { + sci_remote_node_context_resume(&idev->rnc, + sci_stp_remote_device_ready_idle_substate_resume_complete_handler, + idev); + } +} + +static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + BUG_ON(idev->working_request == NULL); + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); +} + +static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) + isci_remote_device_not_ready(ihost, idev, + idev->not_ready_reason); +} + +static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + isci_remote_device_ready(ihost, idev); +} + +static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + BUG_ON(idev->working_request == NULL); + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); +} + +static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + idev->working_request = NULL; +} + +static const struct sci_base_state sci_remote_device_state_table[] = { + [SCI_DEV_INITIAL] = { + .enter_state = sci_remote_device_initial_state_enter, + }, + [SCI_DEV_STOPPED] = { + .enter_state = sci_remote_device_stopped_state_enter, + }, + [SCI_DEV_STARTING] = { + .enter_state = sci_remote_device_starting_state_enter, + }, + [SCI_DEV_READY] = { + .enter_state = sci_remote_device_ready_state_enter, + .exit_state = sci_remote_device_ready_state_exit + }, + [SCI_STP_DEV_IDLE] = { + .enter_state = sci_stp_remote_device_ready_idle_substate_enter, + }, + [SCI_STP_DEV_CMD] = { + .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, + }, + [SCI_STP_DEV_NCQ] = { }, + [SCI_STP_DEV_NCQ_ERROR] = { + .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, + }, + [SCI_STP_DEV_ATAPI_ERROR] = { }, + [SCI_STP_DEV_AWAIT_RESET] = { }, + [SCI_SMP_DEV_IDLE] = { + .enter_state = sci_smp_remote_device_ready_idle_substate_enter, + }, + [SCI_SMP_DEV_CMD] = { + .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, + .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, + }, + [SCI_DEV_STOPPING] = { }, + [SCI_DEV_FAILED] = { }, + [SCI_DEV_RESETTING] = { + .enter_state = sci_remote_device_resetting_state_enter, + .exit_state = sci_remote_device_resetting_state_exit + }, + [SCI_DEV_FINAL] = { }, +}; + +/** + * sci_remote_device_construct() - common construction + * @iport: SAS/SATA port through which this device is accessed. + * @idev: remote device to construct + * + * This routine just performs benign initialization and does not + * allocate the remote_node_context which is left to + * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() + * frees the remote_node_context(s) for the device. + */ +static void sci_remote_device_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + idev->owning_port = iport; + idev->started_request_count = 0; + + sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); + + sci_remote_node_context_construct(&idev->rnc, + SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); +} + +/* + * sci_remote_device_da_construct() - construct direct attached device. + * + * The information (e.g. IAF, Signature FIS, etc.) necessary to build + * the device is known to the SCI Core since it is contained in the + * sci_phy object. Remote node context(s) is/are a global resource + * allocated by this routine, freed by sci_remote_device_destruct(). + * + * Returns: + * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. + * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to + * sata-only controller instance. + * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. + */ +static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + enum sci_status status; + struct sci_port_properties properties; + + sci_remote_device_construct(iport, idev); + + sci_port_get_properties(iport, &properties); + /* Get accurate port width from port's phy mask for a DA device. */ + idev->device_port_width = hweight32(properties.phy_mask); + + status = sci_controller_allocate_remote_node_context(iport->owning_controller, + idev, + &idev->rnc.remote_node_index); + + if (status != SCI_SUCCESS) + return status; + + idev->connection_rate = sci_port_get_max_allowed_speed(iport); + + return SCI_SUCCESS; +} + +/* + * sci_remote_device_ea_construct() - construct expander attached device + * + * Remote node context(s) is/are a global resource allocated by this + * routine, freed by sci_remote_device_destruct(). + * + * Returns: + * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. + * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to + * sata-only controller instance. + * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. + */ +static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + struct domain_device *dev = idev->domain_dev; + enum sci_status status; + + sci_remote_device_construct(iport, idev); + + status = sci_controller_allocate_remote_node_context(iport->owning_controller, + idev, + &idev->rnc.remote_node_index); + if (status != SCI_SUCCESS) + return status; + + /* For SAS-2 the physical link rate is actually a logical link + * rate that incorporates multiplexing. The SCU doesn't + * incorporate multiplexing and for the purposes of the + * connection the logical link rate is that same as the + * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay + * one another, so this code works for both situations. + */ + idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), + dev->linkrate); + + /* / @todo Should I assign the port width by reading all of the phys on the port? */ + idev->device_port_width = 1; + + return SCI_SUCCESS; +} + +enum sci_status sci_remote_device_resume( + struct isci_remote_device *idev, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum sci_status status; + + status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p); + if (status != SCI_SUCCESS) + dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n", + __func__, status); + return status; +} + +static void isci_remote_device_resume_from_abort_complete(void *cbparam) +{ + struct isci_remote_device *idev = cbparam; + struct isci_host *ihost = idev->owning_port->owning_controller; + scics_sds_remote_node_context_callback abort_resume_cb = + idev->abort_resume_cb; + + dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n", + __func__, abort_resume_cb); + + if (abort_resume_cb != NULL) { + idev->abort_resume_cb = NULL; + abort_resume_cb(idev->abort_resume_cbparam); + } + clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + wake_up(&ihost->eventq); +} + +static bool isci_remote_device_test_resume_done( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + unsigned long flags; + bool done; + + spin_lock_irqsave(&ihost->scic_lock, flags); + done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags) + || test_bit(IDEV_STOP_PENDING, &idev->flags) + || sci_remote_node_context_is_being_destroyed(&idev->rnc); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return done; +} + +static void isci_remote_device_wait_for_resume_from_abort( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n", + __func__, idev); + + #define MAX_RESUME_MSECS 10000 + if (!wait_event_timeout(ihost->eventq, + isci_remote_device_test_resume_done(ihost, idev), + msecs_to_jiffies(MAX_RESUME_MSECS))) { + + dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for " + "resume: %p\n", __func__, idev); + } + clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + + dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n", + __func__, idev); +} + +enum sci_status isci_remote_device_resume_from_abort( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + unsigned long flags; + enum sci_status status = SCI_SUCCESS; + int destroyed; + + spin_lock_irqsave(&ihost->scic_lock, flags); + /* Preserve any current resume callbacks, for instance from other + * resumptions. + */ + idev->abort_resume_cb = idev->rnc.user_callback; + idev->abort_resume_cbparam = idev->rnc.user_cookie; + set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); + destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc); + if (!destroyed) + status = sci_remote_device_resume( + idev, isci_remote_device_resume_from_abort_complete, + idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + if (!destroyed && (status == SCI_SUCCESS)) + isci_remote_device_wait_for_resume_from_abort(ihost, idev); + else + clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + + return status; +} + +/** + * sci_remote_device_start() - This method will start the supplied remote + * device. This method enables normal IO requests to flow through to the + * remote device. + * @idev: This parameter specifies the device to be started. + * @timeout: This parameter specifies the number of milliseconds in which the + * start operation should complete. + * + * An indication of whether the device was successfully started. SCI_SUCCESS + * This value is returned if the device was successfully started. + * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start + * the device when there have been no phys added to it. + */ +static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, + u32 timeout) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + enum sci_status status; + + if (state != SCI_DEV_STOPPED) { + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", + __func__, dev_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + + status = sci_remote_device_resume(idev, remote_device_resume_done, + idev); + if (status != SCI_SUCCESS) + return status; + + sci_change_state(sm, SCI_DEV_STARTING); + + return SCI_SUCCESS; +} + +static enum sci_status isci_remote_device_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + struct isci_host *ihost = iport->isci_host; + struct domain_device *dev = idev->domain_dev; + enum sci_status status; + + if (dev->parent && dev_is_expander(dev->parent->dev_type)) + status = sci_remote_device_ea_construct(iport, idev); + else + status = sci_remote_device_da_construct(iport, idev); + + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", + __func__, status); + + return status; + } + + /* start the device. */ + status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); + + if (status != SCI_SUCCESS) + dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", + status); + + return status; +} + +/** + * isci_remote_device_alloc() + * This function builds the isci_remote_device when a libsas dev_found message + * is received. + * @ihost: This parameter specifies the isci host object. + * @iport: This parameter specifies the isci_port connected to this device. + * + * pointer to new isci_remote_device. + */ +static struct isci_remote_device * +isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) +{ + struct isci_remote_device *idev; + int i; + + for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { + idev = &ihost->devices[i]; + if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) + break; + } + + if (i >= SCI_MAX_REMOTE_DEVICES) { + dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); + return NULL; + } + if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) + return NULL; + + return idev; +} + +void isci_remote_device_release(struct kref *kref) +{ + struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); + struct isci_host *ihost = idev->isci_port->isci_host; + + idev->domain_dev = NULL; + idev->isci_port = NULL; + clear_bit(IDEV_START_PENDING, &idev->flags); + clear_bit(IDEV_STOP_PENDING, &idev->flags); + clear_bit(IDEV_IO_READY, &idev->flags); + clear_bit(IDEV_GONE, &idev->flags); + smp_mb__before_atomic(); + clear_bit(IDEV_ALLOCATED, &idev->flags); + wake_up(&ihost->eventq); +} + +/** + * isci_remote_device_stop() - This function is called internally to stop the + * remote device. + * @ihost: This parameter specifies the isci host object. + * @idev: This parameter specifies the remote device. + * + * The status of the ihost request to stop. + */ +enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) +{ + enum sci_status status; + unsigned long flags; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ + set_bit(IDEV_GONE, &idev->flags); + + set_bit(IDEV_STOP_PENDING, &idev->flags); + status = sci_remote_device_stop(idev, 50); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Wait for the stop complete callback. */ + if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) + /* nothing to wait for */; + else + wait_for_device_stop(ihost, idev); + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p, waiting done.\n", __func__, idev); + + return status; +} + +/** + * isci_remote_device_gone() - This function is called by libsas when a domain + * device is removed. + * @dev: This parameter specifies the libsas domain device. + */ +void isci_remote_device_gone(struct domain_device *dev) +{ + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev = dev->lldd_dev; + + dev_dbg(&ihost->pdev->dev, + "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", + __func__, dev, idev, idev->isci_port); + + isci_remote_device_stop(ihost, idev); +} + + +/** + * isci_remote_device_found() - This function is called by libsas when a remote + * device is discovered. A remote device object is created and started. the + * function then sleeps until the sci core device started message is + * received. + * @dev: This parameter specifies the libsas domain device. + * + * status, zero indicates success. + */ +int isci_remote_device_found(struct domain_device *dev) +{ + struct isci_host *isci_host = dev_to_ihost(dev); + struct isci_port *isci_port = dev->port->lldd_port; + struct isci_remote_device *isci_device; + enum sci_status status; + + dev_dbg(&isci_host->pdev->dev, + "%s: domain_device = %p\n", __func__, dev); + + if (!isci_port) + return -ENODEV; + + isci_device = isci_remote_device_alloc(isci_host, isci_port); + if (!isci_device) + return -ENODEV; + + kref_init(&isci_device->kref); + INIT_LIST_HEAD(&isci_device->node); + + spin_lock_irq(&isci_host->scic_lock); + isci_device->domain_dev = dev; + isci_device->isci_port = isci_port; + list_add_tail(&isci_device->node, &isci_port->remote_dev_list); + + set_bit(IDEV_START_PENDING, &isci_device->flags); + status = isci_remote_device_construct(isci_port, isci_device); + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = %p\n", + __func__, isci_device); + + if (status == SCI_SUCCESS) { + /* device came up, advertise it to the world */ + dev->lldd_dev = isci_device; + } else + isci_put_device(isci_device); + spin_unlock_irq(&isci_host->scic_lock); + + /* wait for the device ready callback. */ + wait_for_device_start(isci_host, isci_device); + + return status == SCI_SUCCESS ? 0 : -ENODEV; +} + +enum sci_status isci_remote_device_suspend_terminate( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + unsigned long flags; + enum sci_status status; + + /* Put the device into suspension. */ + spin_lock_irqsave(&ihost->scic_lock, flags); + set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Terminate and wait for the completions. */ + status = isci_remote_device_terminate_requests(ihost, idev, ireq); + if (status != SCI_SUCCESS) + dev_dbg(&ihost->pdev->dev, + "%s: isci_remote_device_terminate_requests(%p) " + "returned %d!\n", + __func__, idev, status); + + /* NOTE: RNC resumption is left to the caller! */ + return status; +} + +int isci_remote_device_is_safe_to_abort( + struct isci_remote_device *idev) +{ + return sci_remote_node_context_is_safe_to_abort(&idev->rnc); +} + +enum sci_status sci_remote_device_abort_requests_pending_abort( + struct isci_remote_device *idev) +{ + return sci_remote_device_terminate_reqs_checkabort(idev, 1); +} + +enum sci_status isci_remote_device_reset_complete( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + unsigned long flags; + enum sci_status status; + + spin_lock_irqsave(&ihost->scic_lock, flags); + status = sci_remote_device_reset_complete(idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return status; +} + +void isci_dev_set_hang_detection_timeout( + struct isci_remote_device *idev, + u32 timeout) +{ + if (dev_is_sata(idev->domain_dev)) { + if (timeout) { + if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED, + &idev->flags)) + return; /* Already enabled. */ + } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED, + &idev->flags)) + return; /* Not enabled. */ + + sci_port_set_hang_detection_timeout(idev->owning_port, + timeout); + } +} diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h new file mode 100644 index 000000000..3ad681c4c --- /dev/null +++ b/drivers/scsi/isci/remote_device.h @@ -0,0 +1,382 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ISCI_REMOTE_DEVICE_H_ +#define _ISCI_REMOTE_DEVICE_H_ +#include +#include +#include "scu_remote_node_context.h" +#include "remote_node_context.h" +#include "port.h" + +enum sci_remote_device_not_ready_reason_code { + SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, + SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, + SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, + SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED, + SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED, + SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX +}; + +/** + * isci_remote_device - isci representation of a sas expander / end point + * @device_port_width: hw setting for number of simultaneous connections + * @connection_rate: per-taskcontext connection rate for this device + * @working_request: SATA requests have no tag we for unaccelerated + * protocols we need a method to associate unsolicited + * frames with a pending request + */ +struct isci_remote_device { + #define IDEV_START_PENDING 0 + #define IDEV_STOP_PENDING 1 + #define IDEV_ALLOCATED 2 + #define IDEV_GONE 3 + #define IDEV_IO_READY 4 + #define IDEV_IO_NCQERROR 5 + #define IDEV_RNC_LLHANG_ENABLED 6 + #define IDEV_ABORT_PATH_ACTIVE 7 + #define IDEV_ABORT_PATH_RESUME_PENDING 8 + unsigned long flags; + struct kref kref; + struct isci_port *isci_port; + struct domain_device *domain_dev; + struct list_head node; + struct sci_base_state_machine sm; + u32 device_port_width; + enum sas_linkrate connection_rate; + struct isci_port *owning_port; + struct sci_remote_node_context rnc; + /* XXX unify with device reference counting and delete */ + u32 started_request_count; + struct isci_request *working_request; + u32 not_ready_reason; + scics_sds_remote_node_context_callback abort_resume_cb; + void *abort_resume_cbparam; +}; + +#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 + +/* device reference routines must be called under sci_lock */ +static inline struct isci_remote_device *isci_get_device( + struct isci_remote_device *idev) +{ + if (idev) + kref_get(&idev->kref); + return idev; +} + +static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) +{ + struct isci_remote_device *idev = dev->lldd_dev; + + if (idev && !test_bit(IDEV_GONE, &idev->flags)) { + kref_get(&idev->kref); + return idev; + } + + return NULL; +} + +void isci_remote_device_release(struct kref *kref); +static inline void isci_put_device(struct isci_remote_device *idev) +{ + if (idev) + kref_put(&idev->kref, isci_remote_device_release); +} + +enum sci_status isci_remote_device_stop(struct isci_host *ihost, + struct isci_remote_device *idev); +void isci_remote_device_nuke_requests(struct isci_host *ihost, + struct isci_remote_device *idev); +void isci_remote_device_gone(struct domain_device *domain_dev); +int isci_remote_device_found(struct domain_device *domain_dev); + +/** + * sci_remote_device_stop() - This method will stop both transmission and + * reception of link activity for the supplied remote device. This method + * disables normal IO requests from flowing through to the remote device. + * @remote_device: This parameter specifies the device to be stopped. + * @timeout: This parameter specifies the number of milliseconds in which the + * stop operation should complete. + * + * An indication of whether the device was successfully stopped. SCI_SUCCESS + * This value is returned if the transmission and reception for the device was + * successfully stopped. + */ +enum sci_status sci_remote_device_stop( + struct isci_remote_device *idev, + u32 timeout); + +/** + * sci_remote_device_reset() - This method will reset the device making it + * ready for operation. This method must be called anytime the device is + * reset either through a SMP phy control or a port hard reset request. + * @remote_device: This parameter specifies the device to be reset. + * + * This method does not actually cause the device hardware to be reset. This + * method resets the software object so that it will be operational after a + * device hardware reset completes. An indication of whether the device reset + * was accepted. SCI_SUCCESS This value is returned if the device reset is + * started. + */ +enum sci_status sci_remote_device_reset( + struct isci_remote_device *idev); + +/** + * sci_remote_device_reset_complete() - This method informs the device object + * that the reset operation is complete and the device can resume operation + * again. + * @remote_device: This parameter specifies the device which is to be informed + * of the reset complete operation. + * + * An indication that the device is resuming operation. SCI_SUCCESS the device + * is resuming operation. + */ +enum sci_status sci_remote_device_reset_complete( + struct isci_remote_device *idev); + +/** + * enum sci_remote_device_states - This enumeration depicts all the states + * for the common remote device state machine. + * @SCI_DEV_INITIAL: Simply the initial state for the base remote device + * state machine. + * + * @SCI_DEV_STOPPED: This state indicates that the remote device has + * successfully been stopped. In this state no new IO operations are + * permitted. This state is entered from the INITIAL state. This state + * is entered from the STOPPING state. + * + * @SCI_DEV_STARTING: This state indicates the the remote device is in + * the process of becoming ready (i.e. starting). In this state no new + * IO operations are permitted. This state is entered from the STOPPED + * state. + * + * @SCI_DEV_READY: This state indicates the remote device is now ready. + * Thus, the user is able to perform IO operations on the remote device. + * This state is entered from the STARTING state. + * + * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote + * device. When there are no active IO for the device it is is in this + * state. + * + * @SCI_STP_DEV_CMD: This is the command state for for the STP remote + * device. This state is entered when the device is processing a + * non-NCQ command. The device object will fail any new start IO + * requests until this command is complete. + * + * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device. + * This state is entered when the device is processing an NCQ reuqest. + * It will remain in this state so long as there is one or more NCQ + * requests being processed. + * + * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP + * remote device. This state is entered when an SDB error FIS is + * received by the device object while in the NCQ state. The device + * object will only accept a READ LOG command while in this state. + * + * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP + * ATAPI remote device. This state is entered when ATAPI device sends + * error status FIS without data while the device object is in CMD + * state. A suspension event is expected in this state. The device + * object will resume right away. + * + * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the + * device is waiting for the RESET task coming to be recovered from + * certain hardware specific error. + * + * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the + * remote device. This is the normal operational state for a remote + * device. + * + * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device. + * This is the state that the device is placed in when a RNC suspend is + * received by the SCU hardware. + * + * @SCI_DEV_STOPPING: This state indicates that the remote device is in + * the process of stopping. In this state no new IO operations are + * permitted, but existing IO operations are allowed to complete. This + * state is entered from the READY state. This state is entered from + * the FAILED state. + * + * @SCI_DEV_FAILED: This state indicates that the remote device has + * failed. In this state no new IO operations are permitted. This + * state is entered from the INITIALIZING state. This state is entered + * from the READY state. + * + * @SCI_DEV_RESETTING: This state indicates the device is being reset. + * In this state no new IO operations are permitted. This state is + * entered from the READY state. + * + * @SCI_DEV_FINAL: Simply the final state for the base remote device + * state machine. + */ +#define REMOTE_DEV_STATES {\ + C(DEV_INITIAL),\ + C(DEV_STOPPED),\ + C(DEV_STARTING),\ + C(DEV_READY),\ + C(STP_DEV_IDLE),\ + C(STP_DEV_CMD),\ + C(STP_DEV_NCQ),\ + C(STP_DEV_NCQ_ERROR),\ + C(STP_DEV_ATAPI_ERROR),\ + C(STP_DEV_AWAIT_RESET),\ + C(SMP_DEV_IDLE),\ + C(SMP_DEV_CMD),\ + C(DEV_STOPPING),\ + C(DEV_FAILED),\ + C(DEV_RESETTING),\ + C(DEV_FINAL),\ + } +#undef C +#define C(a) SCI_##a +enum sci_remote_device_states REMOTE_DEV_STATES; +#undef C +const char *dev_state_name(enum sci_remote_device_states state); + +static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) +{ + struct isci_remote_device *idev; + + idev = container_of(rnc, typeof(*idev), rnc); + + return idev; +} + +static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) +{ + /* XXX delete this voodoo when converting to the top-level device + * reference count + */ + if (WARN_ONCE(idev->started_request_count == 0, + "%s: tried to decrement started_request_count past 0!?", + __func__)) + /* pass */; + else + idev->started_request_count--; +} + +void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout); + +enum sci_status sci_remote_device_frame_handler( + struct isci_remote_device *idev, + u32 frame_index); + +enum sci_status sci_remote_device_event_handler( + struct isci_remote_device *idev, + u32 event_code); + +enum sci_status sci_remote_device_start_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_remote_device_start_task( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_remote_device_complete_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +void sci_remote_device_post_request( + struct isci_remote_device *idev, + u32 request); + +enum sci_status sci_remote_device_terminate_requests( + struct isci_remote_device *idev); + +int isci_remote_device_is_safe_to_abort( + struct isci_remote_device *idev); + +enum sci_status +sci_remote_device_abort_requests_pending_abort( + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_suspend( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status sci_remote_device_resume( + struct isci_remote_device *idev, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p); + +enum sci_status isci_remote_device_resume_from_abort( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_reset( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_reset_complete( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_suspend_terminate( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status isci_remote_device_terminate_requests( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); +enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, + enum sci_remote_node_suspension_reasons reason); +#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c new file mode 100644 index 000000000..77ba02911 --- /dev/null +++ b/drivers/scsi/isci/remote_node_context.c @@ -0,0 +1,805 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include "host.h" +#include "isci.h" +#include "remote_device.h" +#include "remote_node_context.h" +#include "scu_event_codes.h" +#include "scu_task_context.h" + +#undef C +#define C(a) (#a) +const char *rnc_state_name(enum scis_sds_remote_node_context_states state) +{ + static const char * const strings[] = RNC_STATES; + + if (state >= ARRAY_SIZE(strings)) + return "UNKNOWN"; + + return strings[state]; +} +#undef C + +/** + * sci_remote_node_context_is_ready() + * @sci_rnc: The state of the remote node context object to check. + * + * This method will return true if the remote node context is in a READY state + * otherwise it will return false bool true if the remote node context is in + * the ready state. false if the remote node context is not in the ready state. + */ +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc) +{ + u32 current_state = sci_rnc->sm.current_state_id; + + if (current_state == SCI_RNC_READY) { + return true; + } + + return false; +} + +bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc) +{ + u32 current_state = sci_rnc->sm.current_state_id; + + if (current_state == SCI_RNC_TX_RX_SUSPENDED) + return true; + return false; +} + +static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) +{ + if (id < ihost->remote_node_entries && + ihost->device_table[id]) + return &ihost->remote_node_context_table[id]; + + return NULL; +} + +static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) +{ + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; + int rni = sci_rnc->remote_node_index; + union scu_remote_node_context *rnc; + struct isci_host *ihost; + __le64 sas_addr; + + ihost = idev->owning_port->owning_controller; + rnc = sci_rnc_by_id(ihost, rni); + + memset(rnc, 0, sizeof(union scu_remote_node_context) + * sci_remote_device_node_count(idev)); + + rnc->ssp.remote_node_index = rni; + rnc->ssp.remote_node_port_width = idev->device_port_width; + rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; + + /* sas address is __be64, context ram format is __le64 */ + sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); + rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); + rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); + + rnc->ssp.nexus_loss_timer_enable = true; + rnc->ssp.check_bit = false; + rnc->ssp.is_valid = false; + rnc->ssp.is_remote_node_context = true; + rnc->ssp.function_number = 0; + + rnc->ssp.arbitration_wait_time = 0; + + if (dev_is_sata(dev)) { + rnc->ssp.connection_occupancy_timeout = + ihost->user_parameters.stp_max_occupancy_timeout; + rnc->ssp.connection_inactivity_timeout = + ihost->user_parameters.stp_inactivity_timeout; + } else { + rnc->ssp.connection_occupancy_timeout = + ihost->user_parameters.ssp_max_occupancy_timeout; + rnc->ssp.connection_inactivity_timeout = + ihost->user_parameters.ssp_inactivity_timeout; + } + + rnc->ssp.initial_arbitration_wait_time = 0; + + /* Open Address Frame Parameters */ + rnc->ssp.oaf_connection_rate = idev->connection_rate; + rnc->ssp.oaf_features = 0; + rnc->ssp.oaf_source_zone_group = 0; + rnc->ssp.oaf_more_compatibility_features = 0; +} +/* + * This method will setup the remote node context object so it will transition + * to its ready state. If the remote node context is already setup to + * transition to its final state then this function does nothing. none + */ +static void sci_remote_node_context_setup_to_resume( + struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback callback, + void *callback_parameter, + enum sci_remote_node_context_destination_state dest_param) +{ + if (sci_rnc->destination_state != RNC_DEST_FINAL) { + sci_rnc->destination_state = dest_param; + if (callback != NULL) { + sci_rnc->user_callback = callback; + sci_rnc->user_cookie = callback_parameter; + } + } +} + +static void sci_remote_node_context_setup_to_destroy( + struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback callback, + void *callback_parameter) +{ + struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc)); + + sci_rnc->destination_state = RNC_DEST_FINAL; + sci_rnc->user_callback = callback; + sci_rnc->user_cookie = callback_parameter; + + wake_up(&ihost->eventq); +} + +/* + * This method just calls the user callback function and then resets the + * callback. + */ +static void sci_remote_node_context_notify_user( + struct sci_remote_node_context *rnc) +{ + if (rnc->user_callback != NULL) { + (*rnc->user_callback)(rnc->user_cookie); + + rnc->user_callback = NULL; + rnc->user_cookie = NULL; + } +} + +static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) +{ + switch (rnc->destination_state) { + case RNC_DEST_READY: + case RNC_DEST_SUSPENDED_RESUME: + rnc->destination_state = RNC_DEST_READY; + fallthrough; + case RNC_DEST_FINAL: + sci_remote_node_context_resume(rnc, rnc->user_callback, + rnc->user_cookie); + break; + default: + rnc->destination_state = RNC_DEST_UNSPECIFIED; + break; + } +} + +static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) +{ + union scu_remote_node_context *rnc_buffer; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; + struct isci_host *ihost = idev->owning_port->owning_controller; + + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); + + rnc_buffer->ssp.is_valid = true; + + if (dev_is_sata(dev) && dev->parent) { + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); + } else { + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); + + if (!dev->parent) + sci_port_setup_transports(idev->owning_port, + sci_rnc->remote_node_index); + } +} + +static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) +{ + union scu_remote_node_context *rnc_buffer; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; + + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); + + rnc_buffer->ssp.is_valid = false; + + sci_remote_device_post_request(rnc_to_dev(sci_rnc), + SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); +} + +static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev = rnc_to_dev(rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; + + /* Check to see if we have gotten back to the initial state because + * someone requested to destroy the remote node context object. + */ + if (sm->previous_state_id == SCI_RNC_INVALIDATING) { + rnc->destination_state = RNC_DEST_UNSPECIFIED; + sci_remote_node_context_notify_user(rnc); + + smp_wmb(); + wake_up(&ihost->eventq); + } +} + +static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); + + sci_remote_node_context_validate_context_buffer(sci_rnc); +} + +static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + + /* Terminate all outstanding requests. */ + sci_remote_device_terminate_requests(rnc_to_dev(rnc)); + sci_remote_node_context_invalidate_context_buffer(rnc); +} + +static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev; + struct domain_device *dev; + + idev = rnc_to_dev(rnc); + dev = idev->domain_dev; + + /* + * For direct attached SATA devices we need to clear the TLCR + * NCQ to TCi tag mapping on the phy and in cases where we + * resume because of a target reset we also need to update + * the STPTLDARNI register with the RNi of the device + */ + if (dev_is_sata(dev) && !dev->parent) + sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); + + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); +} + +static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + enum sci_remote_node_context_destination_state dest_select; + int tell_user = 1; + + dest_select = rnc->destination_state; + rnc->destination_state = RNC_DEST_UNSPECIFIED; + + if ((dest_select == RNC_DEST_SUSPENDED) || + (dest_select == RNC_DEST_SUSPENDED_RESUME)) { + sci_remote_node_context_suspend( + rnc, rnc->suspend_reason, + SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); + + if (dest_select == RNC_DEST_SUSPENDED_RESUME) + tell_user = 0; /* Wait until ready again. */ + } + if (tell_user) + sci_remote_node_context_notify_user(rnc); +} + +static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + + sci_remote_node_context_continue_state_transitions(rnc); +} + +static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev = rnc_to_dev(rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; + u32 new_count = rnc->suspend_count + 1; + + if (new_count == 0) + rnc->suspend_count = 1; + else + rnc->suspend_count = new_count; + smp_wmb(); + + /* Terminate outstanding requests pending abort. */ + sci_remote_device_abort_requests_pending_abort(idev); + + wake_up(&ihost->eventq); + sci_remote_node_context_continue_state_transitions(rnc); +} + +static void sci_remote_node_context_await_suspend_state_exit( + struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc + = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev = rnc_to_dev(rnc); + + if (dev_is_sata(idev->domain_dev)) + isci_dev_set_hang_detection_timeout(idev, 0); +} + +static const struct sci_base_state sci_remote_node_context_state_table[] = { + [SCI_RNC_INITIAL] = { + .enter_state = sci_remote_node_context_initial_state_enter, + }, + [SCI_RNC_POSTING] = { + .enter_state = sci_remote_node_context_posting_state_enter, + }, + [SCI_RNC_INVALIDATING] = { + .enter_state = sci_remote_node_context_invalidating_state_enter, + }, + [SCI_RNC_RESUMING] = { + .enter_state = sci_remote_node_context_resuming_state_enter, + }, + [SCI_RNC_READY] = { + .enter_state = sci_remote_node_context_ready_state_enter, + }, + [SCI_RNC_TX_SUSPENDED] = { + .enter_state = sci_remote_node_context_tx_suspended_state_enter, + }, + [SCI_RNC_TX_RX_SUSPENDED] = { + .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, + }, + [SCI_RNC_AWAIT_SUSPENSION] = { + .exit_state = sci_remote_node_context_await_suspend_state_exit, + }, +}; + +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, + u16 remote_node_index) +{ + memset(rnc, 0, sizeof(struct sci_remote_node_context)); + + rnc->remote_node_index = remote_node_index; + rnc->destination_state = RNC_DEST_UNSPECIFIED; + + sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); +} + +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, + u32 event_code) +{ + enum scis_sds_remote_node_context_states state; + u32 next_state; + + state = sci_rnc->sm.current_state_id; + switch (state) { + case SCI_RNC_POSTING: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_POST_RNC_COMPLETE: + sci_change_state(&sci_rnc->sm, SCI_RNC_READY); + break; + default: + goto out; + } + break; + case SCI_RNC_INVALIDATING: + if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { + if (sci_rnc->destination_state == RNC_DEST_FINAL) + next_state = SCI_RNC_INITIAL; + else + next_state = SCI_RNC_POSTING; + sci_change_state(&sci_rnc->sm, next_state); + } else { + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + /* We really dont care if the hardware is going to suspend + * the device since it's being invalidated anyway */ + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: SCIC Remote Node Context 0x%p was " + "suspended by hardware while being " + "invalidated.\n", __func__, sci_rnc); + break; + default: + goto out; + } + } + break; + case SCI_RNC_RESUMING: + if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { + sci_change_state(&sci_rnc->sm, SCI_RNC_READY); + } else { + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + /* We really dont care if the hardware is going to suspend + * the device since it's being resumed anyway */ + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: SCIC Remote Node Context 0x%p was " + "suspended by hardware while being resumed.\n", + __func__, sci_rnc); + break; + default: + goto out; + } + } + break; + case SCI_RNC_READY: + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TL_RNC_SUSPEND_TX: + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); + sci_rnc->suspend_type = scu_get_event_type(event_code); + break; + case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); + sci_rnc->suspend_type = scu_get_event_type(event_code); + break; + default: + goto out; + } + break; + case SCI_RNC_AWAIT_SUSPENSION: + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TL_RNC_SUSPEND_TX: + next_state = SCI_RNC_TX_SUSPENDED; + break; + case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: + next_state = SCI_RNC_TX_RX_SUSPENDED; + break; + default: + goto out; + } + if (sci_rnc->suspend_type == scu_get_event_type(event_code)) + sci_change_state(&sci_rnc->sm, next_state); + break; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state: %s\n", __func__, + rnc_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + return SCI_SUCCESS; + + out: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: code: %#x state: %s\n", __func__, event_code, + rnc_state_name(state)); + return SCI_FAILURE; + +} + +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + switch (state) { + case SCI_RNC_INVALIDATING: + sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); + return SCI_SUCCESS; + case SCI_RNC_POSTING: + case SCI_RNC_RESUMING: + case SCI_RNC_READY: + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: + sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); + sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); + return SCI_SUCCESS; + case SCI_RNC_AWAIT_SUSPENSION: + sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); + return SCI_SUCCESS; + case SCI_RNC_INITIAL: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state: %s\n", __func__, + rnc_state_name(state)); + /* We have decided that the destruct request on the remote node context + * can not fail since it is either in the initial/destroyed state or is + * can be destroyed. + */ + return SCI_SUCCESS; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_remote_node_context_suspend( + struct sci_remote_node_context *sci_rnc, + enum sci_remote_node_suspension_reasons suspend_reason, + u32 suspend_type) +{ + enum scis_sds_remote_node_context_states state + = sci_rnc->sm.current_state_id; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + enum sci_status status = SCI_FAILURE_INVALID_STATE; + enum sci_remote_node_context_destination_state dest_param = + RNC_DEST_UNSPECIFIED; + + dev_dbg(scirdev_to_dev(idev), + "%s: current state %s, current suspend_type %x dest state %d," + " arg suspend_reason %d, arg suspend_type %x", + __func__, rnc_state_name(state), sci_rnc->suspend_type, + sci_rnc->destination_state, suspend_reason, + suspend_type); + + /* Disable automatic state continuations if explicitly suspending. */ + if ((suspend_reason == SCI_HW_SUSPEND) || + (sci_rnc->destination_state == RNC_DEST_FINAL)) + dest_param = sci_rnc->destination_state; + + switch (state) { + case SCI_RNC_READY: + break; + case SCI_RNC_INVALIDATING: + if (sci_rnc->destination_state == RNC_DEST_FINAL) { + dev_warn(scirdev_to_dev(idev), + "%s: already destroying %p\n", + __func__, sci_rnc); + return SCI_FAILURE_INVALID_STATE; + } + fallthrough; /* and handle like SCI_RNC_POSTING */ + case SCI_RNC_RESUMING: + fallthrough; /* and handle like SCI_RNC_POSTING */ + case SCI_RNC_POSTING: + /* Set the destination state to AWAIT - this signals the + * entry into the SCI_RNC_READY state that a suspension + * needs to be done immediately. + */ + if (sci_rnc->destination_state != RNC_DEST_FINAL) + sci_rnc->destination_state = RNC_DEST_SUSPENDED; + sci_rnc->suspend_type = suspend_type; + sci_rnc->suspend_reason = suspend_reason; + return SCI_SUCCESS; + + case SCI_RNC_TX_SUSPENDED: + if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) + status = SCI_SUCCESS; + break; + case SCI_RNC_TX_RX_SUSPENDED: + if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) + status = SCI_SUCCESS; + break; + case SCI_RNC_AWAIT_SUSPENSION: + if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) + || (suspend_type == sci_rnc->suspend_type)) + return SCI_SUCCESS; + break; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } + sci_rnc->destination_state = dest_param; + sci_rnc->suspend_type = suspend_type; + sci_rnc->suspend_reason = suspend_reason; + + if (status == SCI_SUCCESS) { /* Already in the destination state? */ + struct isci_host *ihost = idev->owning_port->owning_controller; + + wake_up_all(&ihost->eventq); /* Let observers look. */ + return SCI_SUCCESS; + } + if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) || + (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) { + + if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT) + isci_dev_set_hang_detection_timeout(idev, 0x00000001); + + sci_remote_device_post_request( + idev, SCI_SOFTWARE_SUSPEND_CMD); + } + if (state != SCI_RNC_AWAIT_SUSPENSION) + sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); + + return SCI_SUCCESS; +} + +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum scis_sds_remote_node_context_states state; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + + state = sci_rnc->sm.current_state_id; + dev_dbg(scirdev_to_dev(idev), + "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " + "dev resume path %s\n", + __func__, rnc_state_name(state), cb_fn, cb_p, + sci_rnc->destination_state, + test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) + ? "" : ""); + + switch (state) { + case SCI_RNC_INITIAL: + if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + return SCI_FAILURE_INVALID_STATE; + + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, + RNC_DEST_READY); + if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { + sci_remote_node_context_construct_buffer(sci_rnc); + sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); + } + return SCI_SUCCESS; + + case SCI_RNC_POSTING: + case SCI_RNC_INVALIDATING: + case SCI_RNC_RESUMING: + /* We are still waiting to post when a resume was + * requested. + */ + switch (sci_rnc->destination_state) { + case RNC_DEST_SUSPENDED: + case RNC_DEST_SUSPENDED_RESUME: + /* Previously waiting to suspend after posting. + * Now continue onto resumption. + */ + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, + RNC_DEST_SUSPENDED_RESUME); + break; + default: + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, + RNC_DEST_READY); + break; + } + return SCI_SUCCESS; + + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: + { + struct domain_device *dev = idev->domain_dev; + /* If this is an expander attached SATA device we must + * invalidate and repost the RNC since this is the only + * way to clear the TCi to NCQ tag mapping table for + * the RNi. All other device types we can just resume. + */ + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, RNC_DEST_READY); + + if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { + if ((dev_is_sata(dev) && dev->parent) || + (sci_rnc->destination_state == RNC_DEST_FINAL)) + sci_change_state(&sci_rnc->sm, + SCI_RNC_INVALIDATING); + else + sci_change_state(&sci_rnc->sm, + SCI_RNC_RESUMING); + } + } + return SCI_SUCCESS; + + case SCI_RNC_AWAIT_SUSPENSION: + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); + return SCI_SUCCESS; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + + switch (state) { + case SCI_RNC_READY: + return SCI_SUCCESS; + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: + case SCI_RNC_AWAIT_SUSPENSION: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); + return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + default: + dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_remote_node_context_start_task( + struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum sci_status status = sci_remote_node_context_resume(sci_rnc, + cb_fn, cb_p); + if (status != SCI_SUCCESS) + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: resume failed: %d\n", __func__, status); + return status; +} + +int sci_remote_node_context_is_safe_to_abort( + struct sci_remote_node_context *sci_rnc) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + switch (state) { + case SCI_RNC_INVALIDATING: + case SCI_RNC_TX_RX_SUSPENDED: + return 1; + case SCI_RNC_POSTING: + case SCI_RNC_RESUMING: + case SCI_RNC_READY: + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_AWAIT_SUSPENSION: + case SCI_RNC_INITIAL: + return 0; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + return 0; + } +} diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h new file mode 100644 index 000000000..c7ee81d01 --- /dev/null +++ b/drivers/scsi/isci/remote_node_context.h @@ -0,0 +1,236 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ +#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ + +/** + * This file contains the structures, constants, and prototypes associated with + * the remote node context in the silicon. It exists to model and manage + * the remote node context in the silicon. + * + * + */ + +#include "isci.h" + +/** + * + * + * This constant represents an invalid remote device id, it is used to program + * the STPDARNI register so the driver knows when it has received a SIGNATURE + * FIS from the SCU. + */ +#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF + +enum sci_remote_node_suspension_reasons { + SCI_HW_SUSPEND, + SCI_SW_SUSPEND_NORMAL, + SCI_SW_SUSPEND_LINKHANG_DETECT +}; +#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX +#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX + +struct isci_request; +struct isci_remote_device; +struct sci_remote_node_context; + +typedef void (*scics_sds_remote_node_context_callback)(void *); + +/** + * enum sci_remote_node_context_states + * @SCI_RNC_INITIAL initial state for a remote node context. On a resume + * request the remote node context will transition to the posting state. + * + * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once + * the RNC is posted the remote node context will be made ready. + * + * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to + * the hardware. Once the invalidate is complete the remote node context will + * transition to the posting state. + * + * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the + * hardare. Once the event notification of resume complete is received the + * remote node context will transition to the ready state. + * + * @SCI_RNC_READY: state that the remote node context must be in to accept io + * request operations. + * + * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when + * it gets a TX suspend notification from the hardware. + * + * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to + * when it gets a TX RX suspend notification from the hardware. + * + * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits + * for a suspend notification from the hardware. This state is entered when + * either there is a request to supend the remote node context or when there is + * a TC completion where the remote node will be suspended by the hardware. + */ +#define RNC_STATES {\ + C(RNC_INITIAL),\ + C(RNC_POSTING),\ + C(RNC_INVALIDATING),\ + C(RNC_RESUMING),\ + C(RNC_READY),\ + C(RNC_TX_SUSPENDED),\ + C(RNC_TX_RX_SUSPENDED),\ + C(RNC_AWAIT_SUSPENSION),\ + } +#undef C +#define C(a) SCI_##a +enum scis_sds_remote_node_context_states RNC_STATES; +#undef C +const char *rnc_state_name(enum scis_sds_remote_node_context_states state); + +/** + * + * + * This enumeration is used to define the end destination state for the remote + * node context. + */ +enum sci_remote_node_context_destination_state { + RNC_DEST_UNSPECIFIED, + RNC_DEST_READY, + RNC_DEST_FINAL, + RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */ + RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting + * or invalidating and already suspending. + */ +}; + +/** + * struct sci_remote_node_context - This structure contains the data + * associated with the remote node context object. The remote node context + * (RNC) object models the the remote device information necessary to manage + * the silicon RNC. + */ +struct sci_remote_node_context { + /** + * This field indicates the remote node index (RNI) associated with + * this RNC. + */ + u16 remote_node_index; + + /** + * This field is the recored suspension type of the remote node + * context suspension. + */ + u32 suspend_type; + enum sci_remote_node_suspension_reasons suspend_reason; + u32 suspend_count; + + /** + * This field is true if the remote node context is resuming from its current + * state. This can cause an automatic resume on receiving a suspension + * notification. + */ + enum sci_remote_node_context_destination_state destination_state; + + /** + * This field contains the callback function that the user requested to be + * called when the requested state transition is complete. + */ + scics_sds_remote_node_context_callback user_callback; + + /** + * This field contains the parameter that is called when the user requested + * state transition is completed. + */ + void *user_cookie; + + /** + * This field contains the data for the object's state machine. + */ + struct sci_base_state_machine sm; +}; + +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, + u16 remote_node_index); + + +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc); + +bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc); + +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, + u32 event_code); +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback callback, + void *callback_parameter); +enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, + enum sci_remote_node_suspension_reasons reason, + u32 suspension_code); +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p); +enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p); +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq); +int sci_remote_node_context_is_safe_to_abort( + struct sci_remote_node_context *sci_rnc); + +static inline bool sci_remote_node_context_is_being_destroyed( + struct sci_remote_node_context *sci_rnc) +{ + return (sci_rnc->destination_state == RNC_DEST_FINAL) + || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL) + && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED)); +} +#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c new file mode 100644 index 000000000..1bcaf528c --- /dev/null +++ b/drivers/scsi/isci/remote_node_table.c @@ -0,0 +1,598 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE + * public, protected, and private methods. + */ +#include "remote_node_table.h" +#include "remote_node_context.h" + +/** + * sci_remote_node_table_get_group_index() + * @remote_node_table: This is the remote node index table from which the + * selection will be made. + * @group_table_index: This is the index to the group table from which to + * search for an available selection. + * + * This routine will find the bit position in absolute bit terms of the next 32 + * + bit position. If there are available bits in the first u32 then it is + * just bit position. u32 This is the absolute bit position for an available + * group. + */ +static u32 sci_remote_node_table_get_group_index( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index) +{ + u32 dword_index; + u32 *group_table; + u32 bit_index; + + group_table = remote_node_table->remote_node_groups[group_table_index]; + + for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) { + if (group_table[dword_index] != 0) { + for (bit_index = 0; bit_index < 32; bit_index++) { + if ((group_table[dword_index] & (1 << bit_index)) != 0) { + return (dword_index * 32) + bit_index; + } + } + } + } + + return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX; +} + +/** + * sci_remote_node_table_clear_group_index() + * @remote_node_table: This the remote node table in which to clear the + * selector. + * @group_table_index: This is the remote node selector in which the change will be + * made. + * @group_index: This is the bit index in the table to be modified. + * + * This method will clear the group index entry in the specified group index + * table. none + */ +static void sci_remote_node_table_clear_group_index( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index, + u32 group_index) +{ + u32 dword_index; + u32 bit_index; + u32 *group_table; + + BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT); + BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32)); + + dword_index = group_index / 32; + bit_index = group_index % 32; + group_table = remote_node_table->remote_node_groups[group_table_index]; + + group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index); +} + +/** + * sci_remote_node_table_set_group_index() + * @remote_node_table: This the remote node table in which to set the + * selector. + * @group_table_index: This is the remote node selector in which the change + * will be made. + * @group_index: This is the bit position in the table to be modified. + * + * This method will set the group index bit entry in the specified gropu index + * table. none + */ +static void sci_remote_node_table_set_group_index( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index, + u32 group_index) +{ + u32 dword_index; + u32 bit_index; + u32 *group_table; + + BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT); + BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32)); + + dword_index = group_index / 32; + bit_index = group_index % 32; + group_table = remote_node_table->remote_node_groups[group_table_index]; + + group_table[dword_index] = group_table[dword_index] | (1 << bit_index); +} + +/** + * sci_remote_node_table_set_node_index() + * @remote_node_table: This is the remote node table in which to modify + * the remote node availability. + * @remote_node_index: This is the remote node index that is being returned to + * the table. + * + * This method will set the remote to available in the remote node allocation + * table. none + */ +static void sci_remote_node_table_set_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 slot_normalized; + u32 slot_position; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD; + dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD; + slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32); + slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT; + + remote_node_table->available_remote_nodes[dword_location] |= + 1 << (slot_normalized + slot_position); +} + +/** + * sci_remote_node_table_clear_node_index() + * @remote_node_table: This is the remote node table from which to clear + * the available remote node bit. + * @remote_node_index: This is the remote node index which is to be cleared + * from the table. + * + * This method clears the remote node index from the table of available remote + * nodes. none + */ +static void sci_remote_node_table_clear_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 slot_position; + u32 slot_normalized; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD; + dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD; + slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32); + slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT; + + remote_node_table->available_remote_nodes[dword_location] &= + ~(1 << (slot_normalized + slot_position)); +} + +/** + * sci_remote_node_table_clear_group() + * @remote_node_table: The remote node table from which the slot will be + * cleared. + * @group_index: The index for the slot that is to be cleared. + * + * This method clears the entire table slot at the specified slot index. none + */ +static void sci_remote_node_table_clear_group( + struct sci_remote_node_table *remote_node_table, + u32 group_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 dword_value; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (group_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + + dword_value = remote_node_table->available_remote_nodes[dword_location]; + dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); + remote_node_table->available_remote_nodes[dword_location] = dword_value; +} + +/* + * sci_remote_node_table_set_group() + * + * THis method sets an entire remote node group in the remote node table. + */ +static void sci_remote_node_table_set_group( + struct sci_remote_node_table *remote_node_table, + u32 group_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 dword_value; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (group_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + + dword_value = remote_node_table->available_remote_nodes[dword_location]; + dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); + remote_node_table->available_remote_nodes[dword_location] = dword_value; +} + +/** + * sci_remote_node_table_get_group_value() + * @remote_node_table: This is the remote node table that for which the group + * value is to be returned. + * @group_index: This is the group index to use to find the group value. + * + * This method will return the group value for the specified group index. The + * bit values at the specified remote node group index. + */ +static u8 sci_remote_node_table_get_group_value( + struct sci_remote_node_table *remote_node_table, + u32 group_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 dword_value; + + dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + + dword_value = remote_node_table->available_remote_nodes[dword_location]; + dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); + dword_value = dword_value >> (dword_remainder * 4); + + return (u8)dword_value; +} + +/** + * sci_remote_node_table_initialize() + * @remote_node_table: The remote that which is to be initialized. + * @remote_node_entries: The number of entries to put in the table. + * + * This method will initialize the remote node table for use. none + */ +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_entries) +{ + u32 index; + + /* + * Initialize the raw data we could improve the speed by only initializing + * those entries that we are actually going to be used */ + memset( + remote_node_table->available_remote_nodes, + 0x00, + sizeof(remote_node_table->available_remote_nodes) + ); + + memset( + remote_node_table->remote_node_groups, + 0x00, + sizeof(remote_node_table->remote_node_groups) + ); + + /* Initialize the available remote node sets */ + remote_node_table->available_nodes_array_size = (u16) + (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD) + + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0); + + + /* Initialize each full DWORD to a FULL SET of remote nodes */ + for (index = 0; index < remote_node_entries; index++) { + sci_remote_node_table_set_node_index(remote_node_table, index); + } + + remote_node_table->group_array_size = (u16) + (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32)) + + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0); + + for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) { + /* + * These are all guaranteed to be full slot values so fill them in the + * available sets of 3 remote nodes */ + sci_remote_node_table_set_group_index(remote_node_table, 2, index); + } + + /* Now fill in any remainders that we may find */ + if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) { + sci_remote_node_table_set_group_index(remote_node_table, 1, index); + } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) { + sci_remote_node_table_set_group_index(remote_node_table, 0, index); + } +} + +/** + * sci_remote_node_table_allocate_single_remote_node() + * @remote_node_table: The remote node table from which to allocate a + * remote node. + * @group_table_index: The group index that is to be used for the search. + * + * This method will allocate a single RNi from the remote node table. The + * table index will determine from which remote node group table to search. + * This search may fail and another group node table can be specified. The + * function is designed to allow a serach of the available single remote node + * group up to the triple remote node group. If an entry is found in the + * specified table the remote node is removed and the remote node groups are + * updated. The RNi value or an invalid remote node context if an RNi can not + * be found. + */ +static u16 sci_remote_node_table_allocate_single_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index) +{ + u8 index; + u8 group_value; + u32 group_index; + u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + + group_index = sci_remote_node_table_get_group_index( + remote_node_table, group_table_index); + + /* We could not find an available slot in the table selector 0 */ + if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { + group_value = sci_remote_node_table_get_group_value( + remote_node_table, group_index); + + for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) { + if (((1 << index) & group_value) != 0) { + /* We have selected a bit now clear it */ + remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT + + index); + + sci_remote_node_table_clear_group_index( + remote_node_table, group_table_index, group_index + ); + + sci_remote_node_table_clear_node_index( + remote_node_table, remote_node_index + ); + + if (group_table_index > 0) { + sci_remote_node_table_set_group_index( + remote_node_table, group_table_index - 1, group_index + ); + } + + break; + } + } + } + + return remote_node_index; +} + +/** + * sci_remote_node_table_allocate_triple_remote_node() + * @remote_node_table: This is the remote node table from which to allocate the + * remote node entries. + * @group_table_index: This is the group table index which must equal two (2) + * for this operation. + * + * This method will allocate three consecutive remote node context entries. If + * there are no remaining triple entries the function will return a failure. + * The remote node index that represents three consecutive remote node entries + * or an invalid remote node context if none can be found. + */ +static u16 sci_remote_node_table_allocate_triple_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index) +{ + u32 group_index; + u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + + group_index = sci_remote_node_table_get_group_index( + remote_node_table, group_table_index); + + if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { + remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT; + + sci_remote_node_table_clear_group_index( + remote_node_table, group_table_index, group_index + ); + + sci_remote_node_table_clear_group( + remote_node_table, group_index + ); + } + + return remote_node_index; +} + +/** + * sci_remote_node_table_allocate_remote_node() + * @remote_node_table: This is the remote node table from which the remote node + * allocation is to take place. + * @remote_node_count: This is ther remote node count which is one of + * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). + * + * This method will allocate a remote node that mataches the remote node count + * specified by the caller. Valid values for remote node count is + * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is + * the remote node index that is returned or an invalid remote node context. + */ +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count) +{ + u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + + if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { + remote_node_index = + sci_remote_node_table_allocate_single_remote_node( + remote_node_table, 0); + + if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + remote_node_index = + sci_remote_node_table_allocate_single_remote_node( + remote_node_table, 1); + } + + if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + remote_node_index = + sci_remote_node_table_allocate_single_remote_node( + remote_node_table, 2); + } + } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { + remote_node_index = + sci_remote_node_table_allocate_triple_remote_node( + remote_node_table, 2); + } + + return remote_node_index; +} + +/** + * sci_remote_node_table_release_single_remote_node() + * @remote_node_table: This is the remote node table from which the remote node + * release is to take place. + * @remote_node_index: This is the remote node index that is being released. + * This method will free a single remote node index back to the remote node + * table. This routine will update the remote node groups + */ +static void sci_remote_node_table_release_single_remote_node( + struct sci_remote_node_table *remote_node_table, + u16 remote_node_index) +{ + u32 group_index; + u8 group_value; + + group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; + + group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index); + + /* + * Assert that we are not trying to add an entry to a slot that is already + * full. */ + BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE); + + if (group_value == 0x00) { + /* + * There are no entries in this slot so it must be added to the single + * slot table. */ + sci_remote_node_table_set_group_index(remote_node_table, 0, group_index); + } else if ((group_value & (group_value - 1)) == 0) { + /* + * There is only one entry in this slot so it must be moved from the + * single slot table to the dual slot table */ + sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 1, group_index); + } else { + /* + * There are two entries in the slot so it must be moved from the dual + * slot table to the tripple slot table. */ + sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 2, group_index); + } + + sci_remote_node_table_set_node_index(remote_node_table, remote_node_index); +} + +/** + * sci_remote_node_table_release_triple_remote_node() + * @remote_node_table: This is the remote node table to which the remote node + * index is to be freed. + * @remote_node_index: This is the remote node index that is being released. + * + * This method will release a group of three consecutive remote nodes back to + * the free remote nodes. + */ +static void sci_remote_node_table_release_triple_remote_node( + struct sci_remote_node_table *remote_node_table, + u16 remote_node_index) +{ + u32 group_index; + + group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; + + sci_remote_node_table_set_group_index( + remote_node_table, 2, group_index + ); + + sci_remote_node_table_set_group(remote_node_table, group_index); +} + +/** + * sci_remote_node_table_release_remote_node_index() + * @remote_node_table: The remote node table to which the remote node index is + * to be freed. + * @remote_node_count: This is the count of consecutive remote nodes that are + * to be freed. + * @remote_node_index: This is the remote node index that is being released. + * + * This method will release the remote node index back into the remote node + * table free pool. + */ +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count, + u16 remote_node_index) +{ + if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { + sci_remote_node_table_release_single_remote_node( + remote_node_table, remote_node_index); + } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { + sci_remote_node_table_release_triple_remote_node( + remote_node_table, remote_node_index); + } +} + diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h new file mode 100644 index 000000000..0ddfdda2b --- /dev/null +++ b/drivers/scsi/isci/remote_node_table.h @@ -0,0 +1,188 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_ +#define _SCIC_SDS_REMOTE_NODE_TABLE_H_ + +#include "isci.h" + +/** + * + * + * Remote node sets are sets of remote node index in the remote node table. The + * SCU hardware requires that STP remote node entries take three consecutive + * remote node index so the table is arranged in sets of three. The bits are + * used as 0111 0111 to make a byte and the bits define the set of three remote + * nodes to use as a sequence. + */ +#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2 + +/** + * + * + * Since the remote node table is organized as DWORDS take the remote node sets + * in bytes and represent them in DWORDs. The lowest ordered bits are the ones + * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111 + * 0111 0111 0111 // if only a single WORD is in use in the DWORD. + */ +#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \ + (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE) +/** + * + * + * This is a count of the numeber of remote nodes that can be represented in a + * byte + */ +#define SCIC_SDS_REMOTE_NODES_PER_BYTE \ + (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE) + +/** + * + * + * This is a count of the number of remote nodes that can be represented in a + * DWROD + */ +#define SCIC_SDS_REMOTE_NODES_PER_DWORD \ + (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE) + +/** + * + * + * This is the number of bits in a remote node group + */ +#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4 + +#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF) +#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07) +#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00) + +/** + * + * + * Expander attached sata remote node count + */ +#define SCU_STP_REMOTE_NODE_COUNT 3 + +/** + * + * + * Expander or direct attached ssp remote node count + */ +#define SCU_SSP_REMOTE_NODE_COUNT 1 + +/** + * + * + * Direct attached STP remote node count + */ +#define SCU_SATA_REMOTE_NODE_COUNT 1 + +/** + * struct sci_remote_node_table - + * + * + */ +struct sci_remote_node_table { + /** + * This field contains the array size in dwords + */ + u16 available_nodes_array_size; + + /** + * This field contains the array size of the + */ + u16 group_array_size; + + /** + * This field is the array of available remote node entries in bits. + * Because of the way STP remote node data is allocated on the SCU hardware + * the remote nodes must occupy three consecutive remote node context + * entries. For ease of allocation and de-allocation we have broken the + * sets of three into a single nibble. When the STP RNi is allocated all + * of the bits in the nibble are cleared. This math results in a table size + * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte. + */ + u32 available_remote_nodes[ + (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD) + + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)]; + + /** + * This field is the nibble selector for the above table. There are three + * possible selectors each for fast lookup when trying to find one, two or + * three remote node entries. + */ + u32 remote_node_groups[ + SCU_STP_REMOTE_NODE_COUNT][ + (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT)) + + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)]; + +}; + +/* --------------------------------------------------------------------------- */ + +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_entries); + +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count); + +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count, + u16 remote_node_index); + +#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c new file mode 100644 index 000000000..a7b3243b4 --- /dev/null +++ b/drivers/scsi/isci/request.c @@ -0,0 +1,3519 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include "isci.h" +#include "task.h" +#include "request.h" +#include "scu_completion_codes.h" +#include "scu_event_codes.h" +#include "sas.h" + +#undef C +#define C(a) (#a) +const char *req_state_name(enum sci_base_request_states state) +{ + static const char * const strings[] = REQUEST_STATES; + + return strings[state]; +} +#undef C + +static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, + int idx) +{ + if (idx == 0) + return &ireq->tc->sgl_pair_ab; + else if (idx == 1) + return &ireq->tc->sgl_pair_cd; + else if (idx < 0) + return NULL; + else + return &ireq->sg_table[idx - 2]; +} + +static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, + struct isci_request *ireq, u32 idx) +{ + u32 offset; + + if (idx == 0) { + offset = (void *) &ireq->tc->sgl_pair_ab - + (void *) &ihost->task_context_table[0]; + return ihost->tc_dma + offset; + } else if (idx == 1) { + offset = (void *) &ireq->tc->sgl_pair_cd - + (void *) &ihost->task_context_table[0]; + return ihost->tc_dma + offset; + } + + return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); +} + +static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) +{ + e->length = sg_dma_len(sg); + e->address_upper = upper_32_bits(sg_dma_address(sg)); + e->address_lower = lower_32_bits(sg_dma_address(sg)); + e->address_modifier = 0; +} + +static void sci_request_build_sgl(struct isci_request *ireq) +{ + struct isci_host *ihost = ireq->isci_host; + struct sas_task *task = isci_request_access_task(ireq); + struct scatterlist *sg = NULL; + dma_addr_t dma_addr; + u32 sg_idx = 0; + struct scu_sgl_element_pair *scu_sg = NULL; + struct scu_sgl_element_pair *prev_sg = NULL; + + if (task->num_scatter > 0) { + sg = task->scatter; + + while (sg) { + scu_sg = to_sgl_element_pair(ireq, sg_idx); + init_sgl_element(&scu_sg->A, sg); + sg = sg_next(sg); + if (sg) { + init_sgl_element(&scu_sg->B, sg); + sg = sg_next(sg); + } else + memset(&scu_sg->B, 0, sizeof(scu_sg->B)); + + if (prev_sg) { + dma_addr = to_sgl_element_pair_dma(ihost, + ireq, + sg_idx); + + prev_sg->next_pair_upper = + upper_32_bits(dma_addr); + prev_sg->next_pair_lower = + lower_32_bits(dma_addr); + } + + prev_sg = scu_sg; + sg_idx++; + } + } else { /* handle when no sg */ + scu_sg = to_sgl_element_pair(ireq, sg_idx); + + dma_addr = dma_map_single(&ihost->pdev->dev, + task->scatter, + task->total_xfer_len, + task->data_dir); + + ireq->zero_scatter_daddr = dma_addr; + + scu_sg->A.length = task->total_xfer_len; + scu_sg->A.address_upper = upper_32_bits(dma_addr); + scu_sg->A.address_lower = lower_32_bits(dma_addr); + } + + if (scu_sg) { + scu_sg->next_pair_upper = 0; + scu_sg->next_pair_lower = 0; + } +} + +static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) +{ + struct ssp_cmd_iu *cmd_iu; + struct sas_task *task = isci_request_access_task(ireq); + + cmd_iu = &ireq->ssp.cmd; + + memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); + cmd_iu->add_cdb_len = 0; + cmd_iu->_r_a = 0; + cmd_iu->_r_b = 0; + cmd_iu->en_fburst = 0; /* unsupported */ + cmd_iu->task_prio = 0; + cmd_iu->task_attr = task->ssp_task.task_attr; + cmd_iu->_r_c = 0; + + sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, + (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); +} + +static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) +{ + struct ssp_task_iu *task_iu; + struct sas_task *task = isci_request_access_task(ireq); + struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); + + task_iu = &ireq->ssp.tmf; + + memset(task_iu, 0, sizeof(struct ssp_task_iu)); + + memcpy(task_iu->LUN, task->ssp_task.LUN, 8); + + task_iu->task_func = isci_tmf->tmf_code; + task_iu->task_tag = + (test_bit(IREQ_TMF, &ireq->flags)) ? + isci_tmf->io_tag : + SCI_CONTROLLER_INVALID_IO_TAG; +} + +/* + * This method is will fill in the SCU Task Context for any type of SSP request. + */ +static void scu_ssp_request_construct_task_context( + struct isci_request *ireq, + struct scu_task_context *task_context) +{ + dma_addr_t dma_addr; + struct isci_remote_device *idev; + struct isci_port *iport; + + idev = ireq->target_device; + iport = idev->owning_port; + + /* Fill in the TC with its required data */ + task_context->abort = 0; + task_context->priority = 0; + task_context->initiator_request = 1; + task_context->connection_rate = idev->connection_rate; + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + task_context->remote_node_index = idev->rnc.remote_node_index; + task_context->command_code = 0; + + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 0; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + task_context->address_modifier = 0; + + /* task_context->type.ssp.tag = ireq->io_tag; */ + task_context->task_phase = 0x01; + + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); + + /* + * Copy the physical address for the command buffer to the + * SCU Task Context + */ + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* + * Copy the physical address for the response buffer to the + * SCU Task Context + */ + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); + + task_context->response_iu_upper = upper_32_bits(dma_addr); + task_context->response_iu_lower = lower_32_bits(dma_addr); +} + +static u8 scu_bg_blk_size(struct scsi_device *sdp) +{ + switch (sdp->sector_size) { + case 512: + return 0; + case 1024: + return 1; + case 4096: + return 3; + default: + return 0xff; + } +} + +static u32 scu_dif_bytes(u32 len, u32 sector_size) +{ + return (len >> ilog2(sector_size)) * 8; +} + +static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) +{ + struct scu_task_context *tc = ireq->tc; + struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; + u8 blk_sz = scu_bg_blk_size(scmd->device); + + tc->block_guard_enable = 1; + tc->blk_prot_en = 1; + tc->blk_sz = blk_sz; + /* DIF write insert */ + tc->blk_prot_func = 0x2; + + tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, + scmd->device->sector_size); + + /* always init to 0, used by hw */ + tc->interm_crc_val = 0; + + tc->init_crc_seed = 0; + tc->app_tag_verify = 0; + tc->app_tag_gen = 0; + tc->ref_tag_seed_verify = 0; + + /* always init to same as bg_blk_sz */ + tc->UD_bytes_immed_val = scmd->device->sector_size; + + tc->reserved_DC_0 = 0; + + /* always init to 8 */ + tc->DIF_bytes_immed_val = 8; + + tc->reserved_DC_1 = 0; + tc->bgc_blk_sz = scmd->device->sector_size; + tc->reserved_E0_0 = 0; + tc->app_tag_gen_mask = 0; + + /** setup block guard control **/ + tc->bgctl = 0; + + /* DIF write insert */ + tc->bgctl_f.op = 0x2; + + tc->app_tag_verify_mask = 0; + + /* must init to 0 for hw */ + tc->blk_guard_err = 0; + + tc->reserved_E8_0 = 0; + + if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) + tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd); + else if (type & SCSI_PROT_DIF_TYPE3) + tc->ref_tag_seed_gen = 0; +} + +static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) +{ + struct scu_task_context *tc = ireq->tc; + struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; + u8 blk_sz = scu_bg_blk_size(scmd->device); + + tc->block_guard_enable = 1; + tc->blk_prot_en = 1; + tc->blk_sz = blk_sz; + /* DIF read strip */ + tc->blk_prot_func = 0x1; + + tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, + scmd->device->sector_size); + + /* always init to 0, used by hw */ + tc->interm_crc_val = 0; + + tc->init_crc_seed = 0; + tc->app_tag_verify = 0; + tc->app_tag_gen = 0; + + if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) + tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd); + else if (type & SCSI_PROT_DIF_TYPE3) + tc->ref_tag_seed_verify = 0; + + /* always init to same as bg_blk_sz */ + tc->UD_bytes_immed_val = scmd->device->sector_size; + + tc->reserved_DC_0 = 0; + + /* always init to 8 */ + tc->DIF_bytes_immed_val = 8; + + tc->reserved_DC_1 = 0; + tc->bgc_blk_sz = scmd->device->sector_size; + tc->reserved_E0_0 = 0; + tc->app_tag_gen_mask = 0; + + /** setup block guard control **/ + tc->bgctl = 0; + + /* DIF read strip */ + tc->bgctl_f.crc_verify = 1; + tc->bgctl_f.op = 0x1; + if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { + tc->bgctl_f.ref_tag_chk = 1; + tc->bgctl_f.app_f_detect = 1; + } else if (type & SCSI_PROT_DIF_TYPE3) + tc->bgctl_f.app_ref_f_detect = 1; + + tc->app_tag_verify_mask = 0; + + /* must init to 0 for hw */ + tc->blk_guard_err = 0; + + tc->reserved_E8_0 = 0; + tc->ref_tag_seed_gen = 0; +} + +/* + * This method is will fill in the SCU Task Context for a SSP IO request. + */ +static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, + enum dma_data_direction dir, + u32 len) +{ + struct scu_task_context *task_context = ireq->tc; + struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; + struct scsi_cmnd *scmd = sas_task->uldd_task; + u8 prot_type = scsi_get_prot_type(scmd); + u8 prot_op = scsi_get_prot_op(scmd); + + scu_ssp_request_construct_task_context(ireq, task_context); + + task_context->ssp_command_iu_length = + sizeof(struct ssp_cmd_iu) / sizeof(u32); + task_context->type.ssp.frame_type = SSP_COMMAND; + + switch (dir) { + case DMA_FROM_DEVICE: + case DMA_NONE: + default: + task_context->task_type = SCU_TASK_TYPE_IOREAD; + break; + case DMA_TO_DEVICE: + task_context->task_type = SCU_TASK_TYPE_IOWRITE; + break; + } + + task_context->transfer_length_bytes = len; + + if (task_context->transfer_length_bytes > 0) + sci_request_build_sgl(ireq); + + if (prot_type != SCSI_PROT_DIF_TYPE0) { + if (prot_op == SCSI_PROT_READ_STRIP) + scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); + else if (prot_op == SCSI_PROT_WRITE_INSERT) + scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); + } +} + +/** + * scu_ssp_task_request_construct_task_context() - This method will fill in + * the SCU Task Context for a SSP Task request. The following important + * settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This + * ensures that the task request is issued ahead of other task destined + * for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This + * simply indicates that a normal request type (i.e. non-raw frame) is + * being utilized to perform task management. -#control_frame == 1. This + * ensures that the proper endianness is set so that the bytes are + * transmitted in the right order for a task frame. + * @ireq: This parameter specifies the task request object being constructed. + */ +static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) +{ + struct scu_task_context *task_context = ireq->tc; + + scu_ssp_request_construct_task_context(ireq, task_context); + + task_context->control_frame = 1; + task_context->priority = SCU_TASK_PRIORITY_HIGH; + task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; + task_context->transfer_length_bytes = 0; + task_context->type.ssp.frame_type = SSP_TASK; + task_context->ssp_command_iu_length = + sizeof(struct ssp_task_iu) / sizeof(u32); +} + +/** + * scu_sata_request_construct_task_context() + * This method is will fill in the SCU Task Context for any type of SATA + * request. This is called from the various SATA constructors. + * @ireq: The general IO request object which is to be used in + * constructing the SCU task context. + * @task_context: The buffer pointer for the SCU task context which is being + * constructed. + * + * The general io request construction is complete. The buffer assignment for + * the command buffer is complete. none Revisit task context construction to + * determine what is common for SSP/SMP/STP task context structures. + */ +static void scu_sata_request_construct_task_context( + struct isci_request *ireq, + struct scu_task_context *task_context) +{ + dma_addr_t dma_addr; + struct isci_remote_device *idev; + struct isci_port *iport; + + idev = ireq->target_device; + iport = idev->owning_port; + + /* Fill in the TC with its required data */ + task_context->abort = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->initiator_request = 1; + task_context->connection_rate = idev->connection_rate; + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + task_context->remote_node_index = idev->rnc.remote_node_index; + task_context->command_code = 0; + + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 0; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + task_context->address_modifier = 0; + task_context->task_phase = 0x01; + + task_context->ssp_command_iu_length = + (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); + + /* Set the first word of the H2D REG FIS */ + task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; + + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); + /* + * Copy the physical address for the command buffer to the SCU Task + * Context. We must offset the command buffer by 4 bytes because the + * first 4 bytes are transfered in the body of the TC. + */ + dma_addr = sci_io_request_get_dma_addr(ireq, + ((char *) &ireq->stp.cmd) + + sizeof(u32)); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* SATA Requests do not have a response buffer */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; +} + +static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) +{ + struct scu_task_context *task_context = ireq->tc; + + scu_sata_request_construct_task_context(ireq, task_context); + + task_context->control_frame = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; + task_context->type.stp.fis_type = FIS_REGH2D; + task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); +} + +static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, + bool copy_rx_frame) +{ + struct isci_stp_request *stp_req = &ireq->stp.req; + + scu_stp_raw_request_construct_task_context(ireq); + + stp_req->status = 0; + stp_req->sgl.offset = 0; + stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; + + if (copy_rx_frame) { + sci_request_build_sgl(ireq); + stp_req->sgl.index = 0; + } else { + /* The user does not want the data copied to the SGL buffer location */ + stp_req->sgl.index = -1; + } + + return SCI_SUCCESS; +} + +/* + * sci_stp_optimized_request_construct() + * @ireq: This parameter specifies the request to be constructed as an + * optimized request. + * @optimized_task_type: This parameter specifies whether the request is to be + * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A + * value of 1 indicates NCQ. + * + * This method will perform request construction common to all types of STP + * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method + * returns an indication as to whether the construction was successful. + */ +static void sci_stp_optimized_request_construct(struct isci_request *ireq, + u8 optimized_task_type, + u32 len, + enum dma_data_direction dir) +{ + struct scu_task_context *task_context = ireq->tc; + + /* Build the STP task context structure */ + scu_sata_request_construct_task_context(ireq, task_context); + + /* Copy over the SGL elements */ + sci_request_build_sgl(ireq); + + /* Copy over the number of bytes to be transfered */ + task_context->transfer_length_bytes = len; + + if (dir == DMA_TO_DEVICE) { + /* + * The difference between the DMA IN and DMA OUT request task type + * values are consistent with the difference between FPDMA READ + * and FPDMA WRITE values. Add the supplied task type parameter + * to this difference to set the task type properly for this + * DATA OUT (WRITE) case. */ + task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT + - SCU_TASK_TYPE_DMA_IN); + } else { + /* + * For the DATA IN (READ) case, simply save the supplied + * optimized task type. */ + task_context->task_type = optimized_task_type; + } +} + +static void sci_atapi_construct(struct isci_request *ireq) +{ + struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; + struct sas_task *task; + + /* To simplify the implementation we take advantage of the + * silicon's partial acceleration of atapi protocol (dma data + * transfers), so we promote all commands to dma protocol. This + * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. + */ + h2d_fis->features |= ATAPI_PKT_DMA; + + scu_stp_raw_request_construct_task_context(ireq); + + task = isci_request_access_task(ireq); + if (task->data_dir == DMA_NONE) + task->total_xfer_len = 0; + + /* clear the response so we can detect arrivial of an + * unsolicited h2d fis + */ + ireq->stp.rsp.fis_type = 0; +} + +static enum sci_status +sci_io_request_construct_sata(struct isci_request *ireq, + u32 len, + enum dma_data_direction dir, + bool copy) +{ + enum sci_status status = SCI_SUCCESS; + struct sas_task *task = isci_request_access_task(ireq); + struct domain_device *dev = ireq->target_device->domain_dev; + + /* check for management protocols */ + if (test_bit(IREQ_TMF, &ireq->flags)) { + struct isci_tmf *tmf = isci_request_access_tmf(ireq); + + dev_err(&ireq->owning_controller->pdev->dev, + "%s: Request 0x%p received un-handled SAT " + "management protocol 0x%x.\n", + __func__, ireq, tmf->tmf_code); + + return SCI_FAILURE; + } + + if (!sas_protocol_ata(task->task_proto)) { + dev_err(&ireq->owning_controller->pdev->dev, + "%s: Non-ATA protocol in SATA path: 0x%x\n", + __func__, + task->task_proto); + return SCI_FAILURE; + + } + + /* ATAPI */ + if (dev->sata_dev.class == ATA_DEV_ATAPI && + task->ata_task.fis.command == ATA_CMD_PACKET) { + sci_atapi_construct(ireq); + return SCI_SUCCESS; + } + + /* non data */ + if (task->data_dir == DMA_NONE) { + scu_stp_raw_request_construct_task_context(ireq); + return SCI_SUCCESS; + } + + /* NCQ */ + if (task->ata_task.use_ncq) { + sci_stp_optimized_request_construct(ireq, + SCU_TASK_TYPE_FPDMAQ_READ, + len, dir); + return SCI_SUCCESS; + } + + /* DMA */ + if (task->ata_task.dma_xfer) { + sci_stp_optimized_request_construct(ireq, + SCU_TASK_TYPE_DMA_IN, + len, dir); + return SCI_SUCCESS; + } else /* PIO */ + return sci_stp_pio_request_construct(ireq, copy); + + return status; +} + +static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) +{ + struct sas_task *task = isci_request_access_task(ireq); + + ireq->protocol = SAS_PROTOCOL_SSP; + + scu_ssp_io_request_construct_task_context(ireq, + task->data_dir, + task->total_xfer_len); + + sci_io_request_build_ssp_command_iu(ireq); + + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return SCI_SUCCESS; +} + +enum sci_status sci_task_request_construct_ssp( + struct isci_request *ireq) +{ + /* Construct the SSP Task SCU Task Context */ + scu_ssp_task_request_construct_task_context(ireq); + + /* Fill in the SSP Task IU */ + sci_task_request_build_ssp_task_iu(ireq); + + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return SCI_SUCCESS; +} + +static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) +{ + enum sci_status status; + bool copy = false; + struct sas_task *task = isci_request_access_task(ireq); + + ireq->protocol = SAS_PROTOCOL_STP; + + copy = (task->data_dir == DMA_NONE) ? false : true; + + status = sci_io_request_construct_sata(ireq, + task->total_xfer_len, + task->data_dir, + copy); + + if (status == SCI_SUCCESS) + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return status; +} + +#define SCU_TASK_CONTEXT_SRAM 0x200000 +/** + * sci_req_tx_bytes - bytes transferred when reply underruns request + * @ireq: request that was terminated early + */ +static u32 sci_req_tx_bytes(struct isci_request *ireq) +{ + struct isci_host *ihost = ireq->owning_controller; + u32 ret_val = 0; + + if (readl(&ihost->smu_registers->address_modifier) == 0) { + void __iomem *scu_reg_base = ihost->scu_registers; + + /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where + * BAR1 is the scu_registers + * 0x20002C = 0x200000 + 0x2c + * = start of task context SRAM + offset of (type.ssp.data_offset) + * TCi is the io_tag of struct sci_request + */ + ret_val = readl(scu_reg_base + + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + + ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); + } + + return ret_val; +} + +enum sci_status sci_request_start(struct isci_request *ireq) +{ + enum sci_base_request_states state; + struct scu_task_context *tc = ireq->tc; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + if (state != SCI_REQ_CONSTRUCTED) { + dev_warn(&ihost->pdev->dev, + "%s: SCIC IO Request requested to start while in wrong " + "state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + tc->task_index = ISCI_TAG_TCI(ireq->io_tag); + + switch (tc->protocol_type) { + case SCU_TASK_CONTEXT_PROTOCOL_SMP: + case SCU_TASK_CONTEXT_PROTOCOL_SSP: + /* SSP/SMP Frame */ + tc->type.ssp.tag = ireq->io_tag; + tc->type.ssp.target_port_transfer_tag = 0xFFFF; + break; + + case SCU_TASK_CONTEXT_PROTOCOL_STP: + /* STP/SATA Frame + * tc->type.stp.ncq_tag = ireq->ncq_tag; + */ + break; + + case SCU_TASK_CONTEXT_PROTOCOL_NONE: + /* / @todo When do we set no protocol type? */ + break; + + default: + /* This should never happen since we build the IO + * requests */ + break; + } + + /* Add to the post_context the io tag value */ + ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); + + /* Everything is good go ahead and change state */ + sci_change_state(&ireq->sm, SCI_REQ_STARTED); + + return SCI_SUCCESS; +} + +enum sci_status +sci_io_request_terminate(struct isci_request *ireq) +{ + enum sci_base_request_states state; + + state = ireq->sm.current_state_id; + + switch (state) { + case SCI_REQ_CONSTRUCTED: + /* Set to make sure no HW terminate posting is done: */ + set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; + ireq->sci_status = SCI_FAILURE_IO_TERMINATED; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; + case SCI_REQ_STARTED: + case SCI_REQ_TASK_WAIT_TC_COMP: + case SCI_REQ_SMP_WAIT_RESP: + case SCI_REQ_SMP_WAIT_TC_COMP: + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + case SCI_REQ_STP_UDMA_WAIT_D2H: + case SCI_REQ_STP_NON_DATA_WAIT_H2D: + case SCI_REQ_STP_NON_DATA_WAIT_D2H: + case SCI_REQ_STP_PIO_WAIT_H2D: + case SCI_REQ_STP_PIO_WAIT_FRAME: + case SCI_REQ_STP_PIO_DATA_IN: + case SCI_REQ_STP_PIO_DATA_OUT: + case SCI_REQ_ATAPI_WAIT_H2D: + case SCI_REQ_ATAPI_WAIT_PIO_SETUP: + case SCI_REQ_ATAPI_WAIT_D2H: + case SCI_REQ_ATAPI_WAIT_TC_COMP: + /* Fall through and change state to ABORTING... */ + case SCI_REQ_TASK_WAIT_TC_RESP: + /* The task frame was already confirmed to have been + * sent by the SCU HW. Since the state machine is + * now only waiting for the task response itself, + * abort the request and complete it immediately + * and don't wait for the task response. + */ + sci_change_state(&ireq->sm, SCI_REQ_ABORTING); + fallthrough; /* and handle like ABORTING */ + case SCI_REQ_ABORTING: + if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) + set_bit(IREQ_PENDING_ABORT, &ireq->flags); + else + clear_bit(IREQ_PENDING_ABORT, &ireq->flags); + /* If the request is only waiting on the remote device + * suspension, return SUCCESS so the caller will wait too. + */ + return SCI_SUCCESS; + case SCI_REQ_COMPLETED: + default: + dev_warn(&ireq->owning_controller->pdev->dev, + "%s: SCIC IO Request requested to abort while in wrong " + "state %d\n", __func__, ireq->sm.current_state_id); + break; + } + + return SCI_FAILURE_INVALID_STATE; +} + +enum sci_status sci_request_complete(struct isci_request *ireq) +{ + enum sci_base_request_states state; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + if (WARN_ONCE(state != SCI_REQ_COMPLETED, + "isci: request completion from wrong state (%s)\n", + req_state_name(state))) + return SCI_FAILURE_INVALID_STATE; + + if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) + sci_controller_release_frame(ihost, + ireq->saved_rx_frame_index); + + /* XXX can we just stop the machine and remove the 'final' state? */ + sci_change_state(&ireq->sm, SCI_REQ_FINAL); + return SCI_SUCCESS; +} + +enum sci_status sci_io_request_event_handler(struct isci_request *ireq, + u32 event_code) +{ + enum sci_base_request_states state; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + + if (state != SCI_REQ_STP_PIO_DATA_IN) { + dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", + __func__, event_code, req_state_name(state)); + + return SCI_FAILURE_INVALID_STATE; + } + + switch (scu_get_event_specifier(event_code)) { + case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: + /* We are waiting for data and the SCU has R_ERR the data frame. + * Go back to waiting for the D2H Register FIS + */ + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + return SCI_SUCCESS; + default: + dev_err(&ihost->pdev->dev, + "%s: pio request unexpected event %#x\n", + __func__, event_code); + + /* TODO Should we fail the PIO request when we get an + * unexpected event? + */ + return SCI_FAILURE; + } +} + +/* + * This function copies response data for requests returning response data + * instead of sense data. + * @sci_req: This parameter specifies the request object for which to copy + * the response data. + */ +static void sci_io_request_copy_response(struct isci_request *ireq) +{ + void *resp_buf; + u32 len; + struct ssp_response_iu *ssp_response; + struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); + + ssp_response = &ireq->ssp.rsp; + + resp_buf = &isci_tmf->resp.resp_iu; + + len = min_t(u32, + SSP_RESP_IU_MAX_SIZE, + be32_to_cpu(ssp_response->response_data_len)); + + memcpy(resp_buf, ssp_response->resp_data, len); +} + +static enum sci_status +request_started_state_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + struct ssp_response_iu *resp_iu; + u8 datapres; + + /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 + * to determine SDMA status + */ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { + /* There are times when the SCU hardware will return an early + * response because the io request specified more data than is + * returned by the target device (mode pages, inquiry data, + * etc.). We must check the response stats to see if this is + * truly a failed request or a good request that just got + * completed early. + */ + struct ssp_response_iu *resp = &ireq->ssp.rsp; + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, + word_cnt); + + if (resp->status == 0) { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; + } else { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + } + break; + } + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, + word_cnt); + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + break; + } + + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): + /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame + * guaranteed to be received before this completion status is + * posted? + */ + resp_iu = &ireq->ssp.rsp; + datapres = resp_iu->datapres; + + if (datapres == SAS_DATAPRES_RESPONSE_DATA || + datapres == SAS_DATAPRES_SENSE_DATA) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } + break; + /* only stp device gets suspended. */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): + if (ireq->protocol == SAS_PROTOCOL_STP) { + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + } else { + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + } + break; + + /* both stp/ssp device gets suspended */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + break; + + /* neither ssp nor stp gets suspended. */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): + default: + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + break; + } + + /* + * TODO: This is probably wrong for ACK/NAK timeout conditions + */ + + /* In all cases we will treat this as the completion of the IO req. */ + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; +} + +static enum sci_status +request_aborting_state_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): + case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; + ireq->sci_status = SCI_FAILURE_IO_TERMINATED; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + default: + /* Unless we get some strange error wait for the task abort to complete + * TODO: Should there be a state change for this completion? + */ + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): + /* Currently, the decision is to simply allow the task request + * to timeout if the task IU wasn't received successfully. + * There is a potential for receiving multiple task responses if + * we decide to send the task IU again. + */ + dev_warn(&ireq->owning_controller->pdev->dev, + "%s: TaskRequest:0x%p CompletionCode:%x - " + "ACK/NAK timeout\n", __func__, ireq, + completion_code); + + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); + break; + default: + /* + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status +smp_request_await_response_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* In the AWAIT RESPONSE state, any TC completion is + * unexpected. but if the TC has success status, we + * complete the IO anyway. + */ + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): + /* These status has been seen in a specific LSI + * expander, which sometimes is not able to send smp + * response within 2 ms. This causes our hardware break + * the connection and set TC completion with one of + * these SMP_XXX_XX_ERR status. For these type of error, + * we ask ihost user to retry the request. + */ + ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; + ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + default: + /* All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status +smp_request_await_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + default: + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) +{ + struct scu_sgl_element *sgl; + struct scu_sgl_element_pair *sgl_pair; + struct isci_request *ireq = to_ireq(stp_req); + struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; + + sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); + if (!sgl_pair) + sgl = NULL; + else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { + if (sgl_pair->B.address_lower == 0 && + sgl_pair->B.address_upper == 0) { + sgl = NULL; + } else { + pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; + sgl = &sgl_pair->B; + } + } else { + if (sgl_pair->next_pair_lower == 0 && + sgl_pair->next_pair_upper == 0) { + sgl = NULL; + } else { + pio_sgl->index++; + pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; + sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); + sgl = &sgl_pair->A; + } + } + + return sgl; +} + +static enum sci_status +stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); + break; + + default: + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ + +/* transmit DATA_FIS from (current sgl + offset) for input + * parameter length. current sgl and offset is alreay stored in the IO request + */ +static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( + struct isci_request *ireq, + u32 length) +{ + struct isci_stp_request *stp_req = &ireq->stp.req; + struct scu_task_context *task_context = ireq->tc; + struct scu_sgl_element_pair *sgl_pair; + struct scu_sgl_element *current_sgl; + + /* Recycle the TC and reconstruct it for sending out DATA FIS containing + * for the data from current_sgl+offset for the input length + */ + sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); + if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) + current_sgl = &sgl_pair->A; + else + current_sgl = &sgl_pair->B; + + /* update the TC */ + task_context->command_iu_upper = current_sgl->address_upper; + task_context->command_iu_lower = current_sgl->address_lower; + task_context->transfer_length_bytes = length; + task_context->type.stp.fis_type = FIS_DATA; + + /* send the new TC out. */ + return sci_controller_continue_io(ireq); +} + +static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) +{ + struct isci_stp_request *stp_req = &ireq->stp.req; + struct scu_sgl_element_pair *sgl_pair; + enum sci_status status = SCI_SUCCESS; + struct scu_sgl_element *sgl; + u32 offset; + u32 len = 0; + + offset = stp_req->sgl.offset; + sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); + if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) + return SCI_FAILURE; + + if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { + sgl = &sgl_pair->A; + len = sgl_pair->A.length - offset; + } else { + sgl = &sgl_pair->B; + len = sgl_pair->B.length - offset; + } + + if (stp_req->pio_len == 0) + return SCI_SUCCESS; + + if (stp_req->pio_len >= len) { + status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); + if (status != SCI_SUCCESS) + return status; + stp_req->pio_len -= len; + + /* update the current sgl, offset and save for future */ + sgl = pio_sgl_next(stp_req); + offset = 0; + } else if (stp_req->pio_len < len) { + sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); + + /* Sgl offset will be adjusted and saved for future */ + offset += stp_req->pio_len; + sgl->address_lower += stp_req->pio_len; + stp_req->pio_len = 0; + } + + stp_req->sgl.offset = offset; + + return status; +} + +/** + * sci_stp_request_pio_data_in_copy_data_buffer() + * @stp_req: The request that is used for the SGL processing. + * @data_buf: The buffer of data to be copied. + * @len: The length of the data transfer. + * + * Copy the data from the buffer for the length specified to the IO request SGL + * specified data region. enum sci_status + */ +static enum sci_status +sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, + u8 *data_buf, u32 len) +{ + struct isci_request *ireq; + u8 *src_addr; + int copy_len; + struct sas_task *task; + struct scatterlist *sg; + void *kaddr; + int total_len = len; + + ireq = to_ireq(stp_req); + task = isci_request_access_task(ireq); + src_addr = data_buf; + + if (task->num_scatter > 0) { + sg = task->scatter; + + while (total_len > 0) { + struct page *page = sg_page(sg); + + copy_len = min_t(int, total_len, sg_dma_len(sg)); + kaddr = kmap_atomic(page); + memcpy(kaddr + sg->offset, src_addr, copy_len); + kunmap_atomic(kaddr); + total_len -= copy_len; + src_addr += copy_len; + sg = sg_next(sg); + } + } else { + BUG_ON(task->total_xfer_len < total_len); + memcpy(task->scatter, src_addr, total_len); + } + + return SCI_SUCCESS; +} + +/** + * sci_stp_request_pio_data_in_copy_data() + * @stp_req: The PIO DATA IN request that is to receive the data. + * @data_buffer: The buffer to copy from. + * + * Copy the data buffer to the io request data region. enum sci_status + */ +static enum sci_status sci_stp_request_pio_data_in_copy_data( + struct isci_stp_request *stp_req, + u8 *data_buffer) +{ + enum sci_status status; + + /* + * If there is less than 1K remaining in the transfer request + * copy just the data for the transfer */ + if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { + status = sci_stp_request_pio_data_in_copy_data_buffer( + stp_req, data_buffer, stp_req->pio_len); + + if (status == SCI_SUCCESS) + stp_req->pio_len = 0; + } else { + /* We are transfering the whole frame so copy */ + status = sci_stp_request_pio_data_in_copy_data_buffer( + stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); + + if (status == SCI_SUCCESS) + stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; + } + + return status; +} + +static enum sci_status +stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + break; + + default: + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status +pio_data_out_tx_done_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + bool all_frames_transferred = false; + struct isci_stp_request *stp_req = &ireq->stp.req; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* Transmit data */ + if (stp_req->pio_len != 0) { + status = sci_stp_request_pio_data_out_transmit_data(ireq); + if (status == SCI_SUCCESS) { + if (stp_req->pio_len == 0) + all_frames_transferred = true; + } + } else if (stp_req->pio_len == 0) { + /* + * this will happen if the all data is written at the + * first time after the pio setup fis is received + */ + all_frames_transferred = true; + } + + /* all data transferred. */ + if (all_frames_transferred) { + /* + * Change the state to SCI_REQ_STP_PIO_DATA_IN + * and wait for PIO_SETUP fis / or D2H REg fis. */ + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + } + break; + + default: + /* + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return status; +} + +static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, + u32 frame_index) +{ + struct isci_host *ihost = ireq->owning_controller; + struct dev_to_host_fis *frame_header; + enum sci_status status; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if ((status == SCI_SUCCESS) && + (frame_header->fis_type == FIS_REGD2H)) { + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + } + + sci_controller_release_frame(ihost, frame_index); + + return status; +} + +static enum sci_status process_unsolicited_fis(struct isci_request *ireq, + u32 frame_index) +{ + struct isci_host *ihost = ireq->owning_controller; + enum sci_status status; + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) + return status; + + if (frame_header->fis_type != FIS_REGD2H) { + dev_err(&ireq->isci_host->pdev->dev, + "%s ERROR: invalid fis type 0x%X\n", + __func__, frame_header->fis_type); + return SCI_FAILURE; + } + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + (u32 *)frame_header, + frame_buffer); + + /* Frame has been decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + + return status; +} + +static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, + u32 frame_index) +{ + struct sas_task *task = isci_request_access_task(ireq); + enum sci_status status; + + status = process_unsolicited_fis(ireq, frame_index); + + if (status == SCI_SUCCESS) { + if (ireq->stp.rsp.status & ATA_ERR) + status = SCI_FAILURE_IO_RESPONSE_VALID; + } else { + status = SCI_FAILURE_IO_RESPONSE_VALID; + } + + if (status != SCI_SUCCESS) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = status; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } + + /* the d2h ufi is the end of non-data commands */ + if (task->data_dir == DMA_NONE) + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + + return status; +} + +static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) +{ + struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); + void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; + struct scu_task_context *task_context = ireq->tc; + + /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame + * type. The TC for previous Packet fis was already there, we only need to + * change the H2D fis content. + */ + memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); + memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); + memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); + task_context->type.stp.fis_type = FIS_DATA; + task_context->transfer_length_bytes = dev->cdb_len; +} + +static void scu_atapi_construct_task_context(struct isci_request *ireq) +{ + struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); + struct sas_task *task = isci_request_access_task(ireq); + struct scu_task_context *task_context = ireq->tc; + int cdb_len = dev->cdb_len; + + /* reference: SSTL 1.13.4.2 + * task_type, sata_direction + */ + if (task->data_dir == DMA_TO_DEVICE) { + task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; + task_context->sata_direction = 0; + } else { + /* todo: for NO_DATA command, we need to send out raw frame. */ + task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; + task_context->sata_direction = 1; + } + + memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); + task_context->type.stp.fis_type = FIS_DATA; + + memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); + memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); + task_context->ssp_command_iu_length = cdb_len / sizeof(u32); + + /* task phase is set to TX_CMD */ + task_context->task_phase = 0x1; + + /* retry counter */ + task_context->stp_retry_count = 0; + + /* data transfer size. */ + task_context->transfer_length_bytes = task->total_xfer_len; + + /* setup sgl */ + sci_request_build_sgl(ireq); +} + +enum sci_status +sci_io_request_frame_handler(struct isci_request *ireq, + u32 frame_index) +{ + struct isci_host *ihost = ireq->owning_controller; + struct isci_stp_request *stp_req = &ireq->stp.req; + enum sci_base_request_states state; + enum sci_status status; + ssize_t word_cnt; + + state = ireq->sm.current_state_id; + switch (state) { + case SCI_REQ_STARTED: { + struct ssp_frame_hdr ssp_hdr; + void *frame_header; + + sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + &frame_header); + + word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); + sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); + + if (ssp_hdr.frame_type == SSP_RESPONSE) { + struct ssp_response_iu *resp_iu; + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&resp_iu); + + sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); + + resp_iu = &ireq->ssp.rsp; + + if (resp_iu->datapres == SAS_DATAPRES_RESPONSE_DATA || + resp_iu->datapres == SAS_DATAPRES_SENSE_DATA) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } + } else { + /* not a response frame, why did it get forwarded? */ + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p received unexpected " + "frame %d type 0x%02x\n", __func__, ireq, + frame_index, ssp_hdr.frame_type); + } + + /* + * In any case we are done with this frame buffer return it to + * the controller + */ + sci_controller_release_frame(ihost, frame_index); + + return SCI_SUCCESS; + } + + case SCI_REQ_TASK_WAIT_TC_RESP: + sci_io_request_copy_response(ireq); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + sci_controller_release_frame(ihost, frame_index); + return SCI_SUCCESS; + + case SCI_REQ_SMP_WAIT_RESP: { + struct sas_task *task = isci_request_access_task(ireq); + struct scatterlist *sg = &task->smp_task.smp_resp; + void *frame_header, *kaddr; + u8 *rsp; + + sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + &frame_header); + kaddr = kmap_atomic(sg_page(sg)); + rsp = kaddr + sg->offset; + sci_swab32_cpy(rsp, frame_header, 1); + + if (rsp[0] == SMP_RESPONSE) { + void *smp_resp; + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + &smp_resp); + + word_cnt = (sg->length/4)-1; + if (word_cnt > 0) + word_cnt = min_t(unsigned int, word_cnt, + SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); + sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); + + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); + } else { + /* + * This was not a response frame why did it get + * forwarded? + */ + dev_err(&ihost->pdev->dev, + "%s: SCIC SMP Request 0x%p received unexpected " + "frame %d type 0x%02x\n", + __func__, + ireq, + frame_index, + rsp[0]); + + ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + } + kunmap_atomic(kaddr); + + sci_controller_release_frame(ihost, frame_index); + + return SCI_SUCCESS; + } + + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + return sci_stp_request_udma_general_frame_handler(ireq, + frame_index); + + case SCI_REQ_STP_UDMA_WAIT_D2H: + /* Use the general frame handler to copy the resposne data */ + status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); + + if (status != SCI_SUCCESS) + return status; + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; + + case SCI_REQ_STP_NON_DATA_WAIT_D2H: { + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); + + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + break; + + default: + dev_warn(&ihost->pdev->dev, + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; + ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; + break; + } + + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + + /* Frame has been decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + + return status; + } + + case SCI_REQ_STP_PIO_WAIT_FRAME: { + struct sas_task *task = isci_request_access_task(ireq); + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_PIO_SETUP: + /* Get from the frame buffer the PIO Setup Data */ + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + /* Get the data from the PIO Setup The SCU Hardware + * returns first word in the frame_header and the rest + * of the data is in the frame buffer so we need to + * back up one dword + */ + + /* transfer_count: first 16bits in the 4th dword */ + stp_req->pio_len = frame_buffer[3] & 0xffff; + + /* status: 4th byte in the 3rd dword */ + stp_req->status = (frame_buffer[2] >> 24) & 0xff; + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + + ireq->stp.rsp.status = stp_req->status; + + /* The next state is dependent on whether the + * request was PIO Data-in or Data out + */ + if (task->data_dir == DMA_FROM_DEVICE) { + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); + } else if (task->data_dir == DMA_TO_DEVICE) { + /* Transmit data */ + status = sci_stp_request_pio_data_out_transmit_data(ireq); + if (status != SCI_SUCCESS) + break; + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); + } + break; + + case FIS_SETDEVBITS: + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + break; + + case FIS_REGD2H: + if (frame_header->status & ATA_BUSY) { + /* + * Now why is the drive sending a D2H Register + * FIS when it is still busy? Do nothing since + * we are still in the right state. + */ + dev_dbg(&ihost->pdev->dev, + "%s: SCIC PIO Request 0x%p received " + "D2H Register FIS with BSY status " + "0x%x\n", + __func__, + stp_req, + frame_header->status); + break; + } + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + default: + /* FIXME: what do we do here? */ + break; + } + + /* Frame is decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + + return status; + } + + case SCI_REQ_STP_PIO_DATA_IN: { + struct dev_to_host_fis *frame_header; + struct sata_fis_data *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); + return status; + } + + if (frame_header->fis_type != FIS_DATA) { + dev_err(&ihost->pdev->dev, + "%s: SCIC PIO Request 0x%p received frame %d " + "with fis type 0x%02x when expecting a data " + "fis.\n", + __func__, + stp_req, + frame_index, + frame_header->fis_type); + + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + + /* Frame is decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + return status; + } + + if (stp_req->sgl.index < 0) { + ireq->saved_rx_frame_index = frame_index; + stp_req->pio_len = 0; + } else { + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + status = sci_stp_request_pio_data_in_copy_data(stp_req, + (u8 *)frame_buffer); + + /* Frame is decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + } + + /* Check for the end of the transfer, are there more + * bytes remaining for this data transfer + */ + if (status != SCI_SUCCESS || stp_req->pio_len != 0) + return status; + + if ((stp_req->status & ATA_BUSY) == 0) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + } else { + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + } + return status; + } + + case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { + struct sas_task *task = isci_request_access_task(ireq); + + sci_controller_release_frame(ihost, frame_index); + ireq->target_device->working_request = ireq; + if (task->data_dir == DMA_NONE) { + sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); + scu_atapi_reconstruct_raw_frame_task_context(ireq); + } else { + sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); + scu_atapi_construct_task_context(ireq); + } + + sci_controller_continue_io(ireq); + return SCI_SUCCESS; + } + case SCI_REQ_ATAPI_WAIT_D2H: + return atapi_d2h_reg_frame_handler(ireq, frame_index); + case SCI_REQ_ABORTING: + /* + * TODO: Is it even possible to get an unsolicited frame in the + * aborting state? + */ + sci_controller_release_frame(ihost, frame_index); + return SCI_SUCCESS; + + default: + dev_warn(&ihost->pdev->dev, + "%s: SCIC IO Request given unexpected frame %x while " + "in state %d\n", + __func__, + frame_index, + state); + + sci_controller_release_frame(ihost, frame_index); + return SCI_FAILURE_INVALID_STATE; + } +} + +static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): + /* We must check ther response buffer to see if the D2H + * Register FIS was received before we got the TC + * completion. + */ + if (ireq->stp.rsp.fis_type == FIS_REGD2H) { + sci_remote_device_suspend(ireq->target_device, + SCI_SW_SUSPEND_NORMAL); + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + } else { + /* If we have an error completion status for the + * TC then we can expect a D2H register FIS from + * the device so we must change state to wait + * for it + */ + sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); + } + break; + + /* TODO Check to see if any of these completion status need to + * wait for the device to host register fis. + */ + /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR + * - this comes only for B0 + */ + default: + /* All other completion status cause the IO to be complete. */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, + enum sci_base_request_states next) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, next); + break; + default: + /* All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, + u32 completion_code) +{ + struct isci_remote_device *idev = ireq->target_device; + struct dev_to_host_fis *d2h = &ireq->stp.rsp; + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { + u16 len = sci_req_tx_bytes(ireq); + + /* likely non-error data underrun, workaround missing + * d2h frame from the controller + */ + if (d2h->fis_type != FIS_REGD2H) { + d2h->fis_type = FIS_REGD2H; + d2h->flags = (1 << 6); + d2h->status = 0x50; + d2h->error = 0; + d2h->lbal = 0; + d2h->byte_count_low = len & 0xff; + d2h->byte_count_high = len >> 8; + d2h->device = 0xa0; + d2h->lbal_exp = 0; + d2h->lbam_exp = 0; + d2h->lbah_exp = 0; + d2h->_r_a = 0; + d2h->sector_count = 0x3; + d2h->sector_count_exp = 0; + d2h->_r_b = 0; + d2h->_r_c = 0; + d2h->_r_d = 0; + } + + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; + status = ireq->sci_status; + + /* the hw will have suspended the rnc, so complete the + * request upon pending resume + */ + sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); + break; + } + case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): + /* In this case, there is no UF coming after. + * compelte the IO now. + */ + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + default: + if (d2h->fis_type == FIS_REGD2H) { + /* UF received change the device state to ATAPI_ERROR */ + status = ireq->sci_status; + sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); + } else { + /* If receiving any non-success TC status, no UF + * received yet, then an UF for the status fis + * is coming after (XXX: suspect this is + * actually a protocol error or a bug like the + * DONE_UNEXP_FIS case) + */ + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + + sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); + } + break; + } + + return status; +} + +static int sci_request_smp_completion_status_is_tx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + return 1; + } + return 0; +} + +static int sci_request_smp_completion_status_is_tx_rx_suspend( + unsigned int completion_status) +{ + return 0; /* There are no Tx/Rx SMP suspend conditions. */ +} + +static int sci_request_ssp_completion_status_is_tx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_DONE_TX_RAW_CMD_ERR: + case SCU_TASK_DONE_LF_ERR: + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: + case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: + case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: + return 1; + } + return 0; +} + +static int sci_request_ssp_completion_status_is_tx_rx_suspend( + unsigned int completion_status) +{ + return 0; /* There are no Tx/Rx SSP suspend conditions. */ +} + +static int sci_request_stpsata_completion_status_is_tx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_DONE_TX_RAW_CMD_ERR: + case SCU_TASK_DONE_LL_R_ERR: + case SCU_TASK_DONE_LL_PERR: + case SCU_TASK_DONE_REG_ERR: + case SCU_TASK_DONE_SDB_ERR: + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: + case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: + case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: + return 1; + } + return 0; +} + + +static int sci_request_stpsata_completion_status_is_tx_rx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_DONE_LF_ERR: + case SCU_TASK_DONE_LL_SY_TERM: + case SCU_TASK_DONE_LL_LF_TERM: + case SCU_TASK_DONE_BREAK_RCVD: + case SCU_TASK_DONE_INV_FIS_LEN: + case SCU_TASK_DONE_UNEXP_FIS: + case SCU_TASK_DONE_UNEXP_SDBFIS: + case SCU_TASK_DONE_MAX_PLD_ERR: + return 1; + } + return 0; +} + +static void sci_request_handle_suspending_completions( + struct isci_request *ireq, + u32 completion_code) +{ + int is_tx = 0; + int is_tx_rx = 0; + + switch (ireq->protocol) { + case SAS_PROTOCOL_SMP: + is_tx = sci_request_smp_completion_status_is_tx_suspend( + completion_code); + is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( + completion_code); + break; + case SAS_PROTOCOL_SSP: + is_tx = sci_request_ssp_completion_status_is_tx_suspend( + completion_code); + is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( + completion_code); + break; + case SAS_PROTOCOL_STP: + is_tx = sci_request_stpsata_completion_status_is_tx_suspend( + completion_code); + is_tx_rx = + sci_request_stpsata_completion_status_is_tx_rx_suspend( + completion_code); + break; + default: + dev_warn(&ireq->isci_host->pdev->dev, + "%s: request %p has no valid protocol\n", + __func__, ireq); + break; + } + if (is_tx || is_tx_rx) { + BUG_ON(is_tx && is_tx_rx); + + sci_remote_node_context_suspend( + &ireq->target_device->rnc, + SCI_HW_SUSPEND, + (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX + : SCU_EVENT_TL_RNC_SUSPEND_TX); + } +} + +enum sci_status +sci_io_request_tc_completion(struct isci_request *ireq, + u32 completion_code) +{ + enum sci_base_request_states state; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + + /* Decode those completions that signal upcoming suspension events. */ + sci_request_handle_suspending_completions( + ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); + + switch (state) { + case SCI_REQ_STARTED: + return request_started_state_tc_event(ireq, completion_code); + + case SCI_REQ_TASK_WAIT_TC_COMP: + return ssp_task_request_await_tc_event(ireq, + completion_code); + + case SCI_REQ_SMP_WAIT_RESP: + return smp_request_await_response_tc_event(ireq, + completion_code); + + case SCI_REQ_SMP_WAIT_TC_COMP: + return smp_request_await_tc_event(ireq, completion_code); + + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + return stp_request_udma_await_tc_event(ireq, + completion_code); + + case SCI_REQ_STP_NON_DATA_WAIT_H2D: + return stp_request_non_data_await_h2d_tc_event(ireq, + completion_code); + + case SCI_REQ_STP_PIO_WAIT_H2D: + return stp_request_pio_await_h2d_completion_tc_event(ireq, + completion_code); + + case SCI_REQ_STP_PIO_DATA_OUT: + return pio_data_out_tx_done_tc_event(ireq, completion_code); + + case SCI_REQ_ABORTING: + return request_aborting_state_tc_event(ireq, + completion_code); + + case SCI_REQ_ATAPI_WAIT_H2D: + return atapi_raw_completion(ireq, completion_code, + SCI_REQ_ATAPI_WAIT_PIO_SETUP); + + case SCI_REQ_ATAPI_WAIT_TC_COMP: + return atapi_raw_completion(ireq, completion_code, + SCI_REQ_ATAPI_WAIT_D2H); + + case SCI_REQ_ATAPI_WAIT_D2H: + return atapi_data_tc_completion_handler(ireq, completion_code); + + default: + dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", + __func__, completion_code, req_state_name(state)); + return SCI_FAILURE_INVALID_STATE; + } +} + +/** + * isci_request_process_response_iu() - This function sets the status and + * response iu, in the task struct, from the request object for the upper + * layer driver. + * @task: This parameter is the task struct from the upper layer driver. + * @resp_iu: This parameter points to the response iu of the completed request. + * @dev: This parameter specifies the linux device struct. + * + * none. + */ +static void isci_request_process_response_iu( + struct sas_task *task, + struct ssp_response_iu *resp_iu, + struct device *dev) +{ + dev_dbg(dev, + "%s: resp_iu = %p " + "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " + "resp_iu->response_data_len = %x, " + "resp_iu->sense_data_len = %x\nresponse data: ", + __func__, + resp_iu, + resp_iu->status, + resp_iu->datapres, + resp_iu->response_data_len, + resp_iu->sense_data_len); + + task->task_status.stat = resp_iu->status; + + /* libsas updates the task status fields based on the response iu. */ + sas_ssp_task_response(dev, task, resp_iu); +} + +/** + * isci_request_set_open_reject_status() - This function prepares the I/O + * completion for OPEN_REJECT conditions. + * @request: This parameter is the completed isci_request object. + * @task: This parameter is the task struct from the upper layer driver. + * @response_ptr: This parameter specifies the service response for the I/O. + * @status_ptr: This parameter specifies the exec status for the I/O. + * @open_rej_reason: This parameter specifies the encoded reason for the + * abandon-class reject. + * + * none. + */ +static void isci_request_set_open_reject_status( + struct isci_request *request, + struct sas_task *task, + enum service_response *response_ptr, + enum exec_status *status_ptr, + enum sas_open_rej_reason open_rej_reason) +{ + /* Task in the target is done. */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + *response_ptr = SAS_TASK_UNDELIVERED; + *status_ptr = SAS_OPEN_REJECT; + task->task_status.open_rej_reason = open_rej_reason; +} + +/** + * isci_request_handle_controller_specific_errors() - This function decodes + * controller-specific I/O completion error conditions. + * @idev: Remote device + * @request: This parameter is the completed isci_request object. + * @task: This parameter is the task struct from the upper layer driver. + * @response_ptr: This parameter specifies the service response for the I/O. + * @status_ptr: This parameter specifies the exec status for the I/O. + * + * none. + */ +static void isci_request_handle_controller_specific_errors( + struct isci_remote_device *idev, + struct isci_request *request, + struct sas_task *task, + enum service_response *response_ptr, + enum exec_status *status_ptr) +{ + unsigned int cstatus; + + cstatus = request->scu_status; + + dev_dbg(&request->isci_host->pdev->dev, + "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " + "- controller status = 0x%x\n", + __func__, request, cstatus); + + /* Decode the controller-specific errors; most + * important is to recognize those conditions in which + * the target may still have a task outstanding that + * must be aborted. + * + * Note that there are SCU completion codes being + * named in the decode below for which SCIC has already + * done work to handle them in a way other than as + * a controller-specific completion code; these are left + * in the decode below for completeness sake. + */ + switch (cstatus) { + case SCU_TASK_DONE_DMASETUP_DIRERR: + /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ + case SCU_TASK_DONE_XFERCNT_ERR: + /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ + if (task->task_proto == SAS_PROTOCOL_SMP) { + /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ + *response_ptr = SAS_TASK_COMPLETE; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAS_ABORTED_TASK; + + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + } else { + /* Task in the target is not done. */ + *response_ptr = SAS_TASK_UNDELIVERED; + + if (!idev) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAS_SAM_STAT_TASK_ABORTED; + + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + } + + break; + + case SCU_TASK_DONE_CRC_ERR: + case SCU_TASK_DONE_NAK_CMD_ERR: + case SCU_TASK_DONE_EXCESS_DATA: + case SCU_TASK_DONE_UNEXP_FIS: + /* Also SCU_TASK_DONE_UNEXP_RESP: */ + case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ + case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ + case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ + /* These are conditions in which the target + * has completed the task, so that no cleanup + * is necessary. + */ + *response_ptr = SAS_TASK_COMPLETE; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAS_ABORTED_TASK; + + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + + + /* Note that the only open reject completion codes seen here will be + * abandon-class codes; all others are automatically retried in the SCU. + */ + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_WRONG_DEST); + break; + + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + + /* Note - the return of AB0 will change when + * libsas implements detection of zone violations. + */ + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_RESV_AB0); + break; + + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_RESV_AB1); + break; + + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_RESV_AB2); + break; + + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_RESV_AB3); + break; + + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_BAD_DEST); + break; + + case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_STP_NORES); + break; + + case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_EPROTO); + break; + + case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + SAS_OREJ_CONN_RATE); + break; + + case SCU_TASK_DONE_LL_R_ERR: + /* Also SCU_TASK_DONE_ACK_NAK_TO: */ + case SCU_TASK_DONE_LL_PERR: + case SCU_TASK_DONE_LL_SY_TERM: + /* Also SCU_TASK_DONE_NAK_ERR:*/ + case SCU_TASK_DONE_LL_LF_TERM: + /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ + case SCU_TASK_DONE_LL_ABORT_ERR: + case SCU_TASK_DONE_SEQ_INV_TYPE: + /* Also SCU_TASK_DONE_UNEXP_XR: */ + case SCU_TASK_DONE_XR_IU_LEN_ERR: + case SCU_TASK_DONE_INV_FIS_LEN: + /* Also SCU_TASK_DONE_XR_WD_LEN: */ + case SCU_TASK_DONE_SDMA_ERR: + case SCU_TASK_DONE_OFFSET_ERR: + case SCU_TASK_DONE_MAX_PLD_ERR: + case SCU_TASK_DONE_LF_ERR: + case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ + case SCU_TASK_DONE_SMP_LL_RX_ERR: + case SCU_TASK_DONE_UNEXP_DATA: + case SCU_TASK_DONE_UNEXP_SDBFIS: + case SCU_TASK_DONE_REG_ERR: + case SCU_TASK_DONE_SDB_ERR: + case SCU_TASK_DONE_TASK_ABORT: + default: + /* Task in the target is not done. */ + *response_ptr = SAS_TASK_UNDELIVERED; + *status_ptr = SAS_SAM_STAT_TASK_ABORTED; + + if (task->task_proto == SAS_PROTOCOL_SMP) + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + else + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + } +} + +static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) +{ + struct task_status_struct *ts = &task->task_status; + struct ata_task_resp *resp = (void *)&ts->buf[0]; + + resp->frame_len = sizeof(*fis); + memcpy(resp->ending_fis, fis, sizeof(*fis)); + ts->buf_valid_size = sizeof(*resp); + + /* If an error is flagged let libata decode the fis */ + if (ac_err_mask(fis->status)) + ts->stat = SAS_PROTO_RESPONSE; + else + ts->stat = SAS_SAM_STAT_GOOD; + + ts->resp = SAS_TASK_COMPLETE; +} + +static void isci_request_io_request_complete(struct isci_host *ihost, + struct isci_request *request, + enum sci_io_status completion_status) +{ + struct sas_task *task = isci_request_access_task(request); + struct ssp_response_iu *resp_iu; + unsigned long task_flags; + struct isci_remote_device *idev = request->target_device; + enum service_response response = SAS_TASK_UNDELIVERED; + enum exec_status status = SAS_ABORTED_TASK; + + dev_dbg(&ihost->pdev->dev, + "%s: request = %p, task = %p, " + "task->data_dir = %d completion_status = 0x%x\n", + __func__, request, task, task->data_dir, completion_status); + + /* The request is done from an SCU HW perspective. */ + + /* This is an active request being completed from the core. */ + switch (completion_status) { + + case SCI_IO_FAILURE_RESPONSE_VALID: + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", + __func__, request, task); + + if (sas_protocol_ata(task->task_proto)) { + isci_process_stp_response(task, &request->stp.rsp); + } else if (SAS_PROTOCOL_SSP == task->task_proto) { + + /* crack the iu response buffer. */ + resp_iu = &request->ssp.rsp; + isci_request_process_response_iu(task, resp_iu, + &ihost->pdev->dev); + + } else if (SAS_PROTOCOL_SMP == task->task_proto) { + + dev_err(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID: " + "SAS_PROTOCOL_SMP protocol\n", + __func__); + + } else + dev_err(&ihost->pdev->dev, + "%s: unknown protocol\n", __func__); + + /* use the task status set in the task struct by the + * isci_request_process_response_iu call. + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = task->task_status.resp; + status = task->task_status.stat; + break; + + case SCI_IO_SUCCESS: + case SCI_IO_SUCCESS_IO_DONE_EARLY: + + response = SAS_TASK_COMPLETE; + status = SAS_SAM_STAT_GOOD; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { + + /* This was an SSP / STP / SATA transfer. + * There is a possibility that less data than + * the maximum was transferred. + */ + u32 transferred_length = sci_req_tx_bytes(request); + + task->task_status.residual + = task->total_xfer_len - transferred_length; + + /* If there were residual bytes, call this an + * underrun. + */ + if (task->task_status.residual != 0) + status = SAS_DATA_UNDERRUN; + + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", + __func__, status); + + } else + dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", + __func__); + break; + + case SCI_IO_FAILURE_TERMINATED: + + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", + __func__, request, task); + + /* The request was terminated explicitly. */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + break; + + case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: + + isci_request_handle_controller_specific_errors(idev, request, + task, &response, + &status); + break; + + case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: + /* This is a special case, in that the I/O completion + * is telling us that the device needs a reset. + * In order for the device reset condition to be + * noticed, the I/O has to be handled in the error + * handler. Set the reset flag and cause the + * SCSI error thread to be scheduled. + */ + spin_lock_irqsave(&task->task_state_lock, task_flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, task_flags); + + /* Fail the I/O. */ + response = SAS_TASK_UNDELIVERED; + status = SAS_SAM_STAT_TASK_ABORTED; + + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + + case SCI_FAILURE_RETRY_REQUIRED: + + /* Fail the I/O so it can be retried. */ + response = SAS_TASK_UNDELIVERED; + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + + + default: + /* Catch any otherwise unhandled error codes here. */ + dev_dbg(&ihost->pdev->dev, + "%s: invalid completion code: 0x%x - " + "isci_request = %p\n", + __func__, completion_status, request); + + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + if (SAS_PROTOCOL_SMP == task->task_proto) + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + else + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + if (task->data_dir == DMA_NONE) + break; + if (task->num_scatter == 0) + /* 0 indicates a single dma address */ + dma_unmap_single(&ihost->pdev->dev, + request->zero_scatter_daddr, + task->total_xfer_len, task->data_dir); + else /* unmap the sgl dma addresses */ + dma_unmap_sg(&ihost->pdev->dev, task->scatter, + request->num_sg_entries, task->data_dir); + break; + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg = &task->smp_task.smp_req; + struct smp_req *smp_req; + void *kaddr; + + dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); + + /* need to swab it back in case the command buffer is re-used */ + kaddr = kmap_atomic(sg_page(sg)); + smp_req = kaddr + sg->offset; + sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); + kunmap_atomic(kaddr); + break; + } + default: + break; + } + + spin_lock_irqsave(&task->task_state_lock, task_flags); + + task->task_status.resp = response; + task->task_status.stat = status; + + if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { + /* Normal notification (task_done) */ + task->task_state_flags |= SAS_TASK_STATE_DONE; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + } + spin_unlock_irqrestore(&task->task_state_lock, task_flags); + + /* complete the io request to the core. */ + sci_controller_complete_io(ihost, request->target_device, request); + + /* set terminated handle so it cannot be completed or + * terminated again, and to cause any calls into abort + * task to recognize the already completed case. + */ + set_bit(IREQ_TERMINATED, &request->flags); + + ireq_done(ihost, request, task); +} + +static void sci_request_started_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct domain_device *dev = ireq->target_device->domain_dev; + enum sci_base_request_states state; + struct sas_task *task; + + /* XXX as hch said always creating an internal sas_task for tmf + * requests would simplify the driver + */ + task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); + + /* all unaccelerated request types (non ssp or ncq) handled with + * substates + */ + if (!task && dev->dev_type == SAS_END_DEVICE) { + state = SCI_REQ_TASK_WAIT_TC_COMP; + } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { + state = SCI_REQ_SMP_WAIT_RESP; + } else if (task && sas_protocol_ata(task->task_proto) && + !task->ata_task.use_ncq) { + if (dev->sata_dev.class == ATA_DEV_ATAPI && + task->ata_task.fis.command == ATA_CMD_PACKET) { + state = SCI_REQ_ATAPI_WAIT_H2D; + } else if (task->data_dir == DMA_NONE) { + state = SCI_REQ_STP_NON_DATA_WAIT_H2D; + } else if (task->ata_task.dma_xfer) { + state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; + } else /* PIO */ { + state = SCI_REQ_STP_PIO_WAIT_H2D; + } + } else { + /* SSP or NCQ are fully accelerated, no substates */ + return; + } + sci_change_state(sm, state); +} + +static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct isci_host *ihost = ireq->owning_controller; + + /* Tell the SCI_USER that the IO request is complete */ + if (!test_bit(IREQ_TMF, &ireq->flags)) + isci_request_io_request_complete(ihost, ireq, + ireq->sci_status); + else + isci_task_request_complete(ihost, ireq, ireq->sci_status); +} + +static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + + /* Setting the abort bit in the Task Context is required by the silicon. */ + ireq->tc->abort = 1; +} + +static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + + ireq->target_device->working_request = ireq; +} + +static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + + ireq->target_device->working_request = ireq; +} + +static const struct sci_base_state sci_request_state_table[] = { + [SCI_REQ_INIT] = { }, + [SCI_REQ_CONSTRUCTED] = { }, + [SCI_REQ_STARTED] = { + .enter_state = sci_request_started_state_enter, + }, + [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { + .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, + }, + [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, + [SCI_REQ_STP_PIO_WAIT_H2D] = { + .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, + }, + [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, + [SCI_REQ_STP_PIO_DATA_IN] = { }, + [SCI_REQ_STP_PIO_DATA_OUT] = { }, + [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, + [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, + [SCI_REQ_TASK_WAIT_TC_COMP] = { }, + [SCI_REQ_TASK_WAIT_TC_RESP] = { }, + [SCI_REQ_SMP_WAIT_RESP] = { }, + [SCI_REQ_SMP_WAIT_TC_COMP] = { }, + [SCI_REQ_ATAPI_WAIT_H2D] = { }, + [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, + [SCI_REQ_ATAPI_WAIT_D2H] = { }, + [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, + [SCI_REQ_COMPLETED] = { + .enter_state = sci_request_completed_state_enter, + }, + [SCI_REQ_ABORTING] = { + .enter_state = sci_request_aborting_state_enter, + }, + [SCI_REQ_FINAL] = { }, +}; + +static void +sci_general_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); + + ireq->target_device = idev; + ireq->protocol = SAS_PROTOCOL_NONE; + ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; + + ireq->sci_status = SCI_SUCCESS; + ireq->scu_status = 0; + ireq->post_context = 0xFFFFFFFF; +} + +static enum sci_status +sci_io_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct domain_device *dev = idev->domain_dev; + enum sci_status status = SCI_SUCCESS; + + /* Build the common part of the request */ + sci_general_request_construct(ihost, idev, ireq); + + if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + return SCI_FAILURE_INVALID_REMOTE_DEVICE; + + if (dev->dev_type == SAS_END_DEVICE) + /* pass */; + else if (dev_is_sata(dev)) + memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); + else if (dev_is_expander(dev->dev_type)) + /* pass */; + else + return SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); + + return status; +} + +enum sci_status sci_task_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 io_tag, struct isci_request *ireq) +{ + struct domain_device *dev = idev->domain_dev; + enum sci_status status = SCI_SUCCESS; + + /* Build the common part of the request */ + sci_general_request_construct(ihost, idev, ireq); + + if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { + set_bit(IREQ_TMF, &ireq->flags); + memset(ireq->tc, 0, sizeof(struct scu_task_context)); + + /* Set the protocol indicator. */ + if (dev_is_sata(dev)) + ireq->protocol = SAS_PROTOCOL_STP; + else + ireq->protocol = SAS_PROTOCOL_SSP; + } else + status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + return status; +} + +static enum sci_status isci_request_ssp_request_construct( + struct isci_request *request) +{ + enum sci_status status; + + dev_dbg(&request->isci_host->pdev->dev, + "%s: request = %p\n", + __func__, + request); + status = sci_io_request_construct_basic_ssp(request); + return status; +} + +static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) +{ + struct sas_task *task = isci_request_access_task(ireq); + struct host_to_dev_fis *fis = &ireq->stp.cmd; + struct ata_queued_cmd *qc = task->uldd_task; + enum sci_status status; + + dev_dbg(&ireq->isci_host->pdev->dev, + "%s: ireq = %p\n", + __func__, + ireq); + + memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + if (!task->ata_task.device_control_reg_update) + fis->flags |= 0x80; + fis->flags &= 0xF0; + + status = sci_io_request_construct_basic_sata(ireq); + + if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ || + qc->tf.command == ATA_CMD_FPDMA_RECV || + qc->tf.command == ATA_CMD_FPDMA_SEND || + qc->tf.command == ATA_CMD_NCQ_NON_DATA)) { + fis->sector_count = qc->tag << 3; + ireq->tc->type.stp.ncq_tag = qc->tag; + } + + return status; +} + +static enum sci_status +sci_io_request_construct_smp(struct device *dev, + struct isci_request *ireq, + struct sas_task *task) +{ + struct scatterlist *sg = &task->smp_task.smp_req; + struct isci_remote_device *idev; + struct scu_task_context *task_context; + struct isci_port *iport; + struct smp_req *smp_req; + void *kaddr; + u8 req_len; + u32 cmd; + + kaddr = kmap_atomic(sg_page(sg)); + smp_req = kaddr + sg->offset; + /* + * Look at the SMP requests' header fields; for certain SAS 1.x SMP + * functions under SAS 2.0, a zero request length really indicates + * a non-zero default length. + */ + if (smp_req->req_len == 0) { + switch (smp_req->func) { + case SMP_DISCOVER: + case SMP_REPORT_PHY_ERR_LOG: + case SMP_REPORT_PHY_SATA: + case SMP_REPORT_ROUTE_INFO: + smp_req->req_len = 2; + break; + case SMP_CONF_ROUTE_INFO: + case SMP_PHY_CONTROL: + case SMP_PHY_TEST_FUNCTION: + smp_req->req_len = 9; + break; + /* Default - zero is a valid default for 2.0. */ + } + } + req_len = smp_req->req_len; + sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); + cmd = *(u32 *) smp_req; + kunmap_atomic(kaddr); + + if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) + return SCI_FAILURE; + + ireq->protocol = SAS_PROTOCOL_SMP; + + /* byte swap the smp request. */ + + task_context = ireq->tc; + + idev = ireq->target_device; + iport = idev->owning_port; + + /* + * Fill in the TC with its required data + * 00h + */ + task_context->priority = 0; + task_context->initiator_request = 1; + task_context->connection_rate = idev->connection_rate; + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; + task_context->abort = 0; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + /* 04h */ + task_context->remote_node_index = idev->rnc.remote_node_index; + task_context->command_code = 0; + task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; + + /* 08h */ + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 1; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + /* 0ch */ + task_context->address_modifier = 0; + + /* 10h */ + task_context->ssp_command_iu_length = req_len; + + /* 14h */ + task_context->transfer_length_bytes = 0; + + /* + * 18h ~ 30h, protocol specific + * since commandIU has been build by framework at this point, we just + * copy the frist DWord from command IU to this location. */ + memcpy(&task_context->type.smp, &cmd, sizeof(u32)); + + /* + * 40h + * "For SMP you could program it to zero. We would prefer that way + * so that done code will be consistent." - Venki + */ + task_context->task_phase = 0; + + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); + /* + * Copy the physical address for the command buffer to the SCU Task + * Context command buffer should not contain command header. + */ + task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); + task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); + + /* SMP response comes as UF, so no need to set response IU address. */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; + + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return SCI_SUCCESS; +} + +/* + * isci_smp_request_build() - This function builds the smp request. + * @ireq: This parameter points to the isci_request allocated in the + * request construct function. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +static enum sci_status isci_smp_request_build(struct isci_request *ireq) +{ + struct sas_task *task = isci_request_access_task(ireq); + struct device *dev = &ireq->isci_host->pdev->dev; + enum sci_status status = SCI_FAILURE; + + status = sci_io_request_construct_smp(dev, ireq, task); + if (status != SCI_SUCCESS) + dev_dbg(&ireq->isci_host->pdev->dev, + "%s: failed with status = %d\n", + __func__, + status); + + return status; +} + +/** + * isci_io_request_build() - This function builds the io request object. + * @ihost: This parameter specifies the ISCI host object + * @request: This parameter points to the isci_request object allocated in the + * request construct function. + * @idev: This parameter is the handle for the sci core's remote device + * object that is the destination for this request. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +static enum sci_status isci_io_request_build(struct isci_host *ihost, + struct isci_request *request, + struct isci_remote_device *idev) +{ + enum sci_status status = SCI_SUCCESS; + struct sas_task *task = isci_request_access_task(request); + + dev_dbg(&ihost->pdev->dev, + "%s: idev = 0x%p; request = %p, " + "num_scatter = %d\n", + __func__, + idev, + request, + task->num_scatter); + + /* map the sgl addresses, if present. + * libata does the mapping for sata devices + * before we get the request. + */ + if (task->num_scatter && + !sas_protocol_ata(task->task_proto) && + !(SAS_PROTOCOL_SMP & task->task_proto)) { + + request->num_sg_entries = dma_map_sg( + &ihost->pdev->dev, + task->scatter, + task->num_scatter, + task->data_dir + ); + + if (request->num_sg_entries == 0) + return SCI_FAILURE_INSUFFICIENT_RESOURCES; + } + + status = sci_io_request_construct(ihost, idev, request); + + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: failed request construct\n", + __func__); + return SCI_FAILURE; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + status = isci_smp_request_build(request); + break; + case SAS_PROTOCOL_SSP: + status = isci_request_ssp_request_construct(request); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + status = isci_request_stp_request_construct(request); + break; + default: + dev_dbg(&ihost->pdev->dev, + "%s: unknown protocol\n", __func__); + return SCI_FAILURE; + } + + return SCI_SUCCESS; +} + +static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) +{ + struct isci_request *ireq; + + ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; + ireq->io_tag = tag; + ireq->io_request_completion = NULL; + ireq->flags = 0; + ireq->num_sg_entries = 0; + + return ireq; +} + +struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, + struct sas_task *task, + u16 tag) +{ + struct isci_request *ireq; + + ireq = isci_request_from_tag(ihost, tag); + ireq->ttype_ptr.io_task_ptr = task; + clear_bit(IREQ_TMF, &ireq->flags); + task->lldd_task = ireq; + + return ireq; +} + +struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + u16 tag) +{ + struct isci_request *ireq; + + ireq = isci_request_from_tag(ihost, tag); + ireq->ttype_ptr.tmf_task_ptr = isci_tmf; + set_bit(IREQ_TMF, &ireq->flags); + + return ireq; +} + +int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, + struct sas_task *task, struct isci_request *ireq) +{ + enum sci_status status; + unsigned long flags; + int ret = 0; + + status = isci_io_request_build(ihost, ireq, idev); + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: request_construct failed - status = 0x%x\n", + __func__, + status); + return status; + } + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { + + if (isci_task_is_ncq_recovery(task)) { + + /* The device is in an NCQ recovery state. Issue the + * request on the task side. Note that it will + * complete on the I/O request side because the + * request was built that way (ie. + * ireq->is_task_management_request is false). + */ + status = sci_controller_start_task(ihost, + idev, + ireq); + } else { + status = SCI_FAILURE; + } + } else { + /* send the request, let the core assign the IO TAG. */ + status = sci_controller_start_io(ihost, idev, + ireq); + } + + if (status != SCI_SUCCESS && + status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + dev_dbg(&ihost->pdev->dev, + "%s: failed request start (0x%x)\n", + __func__, status); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + return status; + } + /* Either I/O started OK, or the core has signaled that + * the device needs a target reset. + */ + if (status != SCI_SUCCESS) { + /* The request did not really start in the + * hardware, so clear the request handle + * here so no terminations will be done. + */ + set_bit(IREQ_TERMINATED, &ireq->flags); + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (status == + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + /* Signal libsas that we need the SCSI error + * handler thread to work on this I/O and that + * we want a device reset. + */ + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* Cause this task to be scheduled in the SCSI error + * handler thread. + */ + sas_task_abort(task); + + /* Change the status, since we are holding + * the I/O until it is managed by the SCSI + * error handler. + */ + status = SCI_SUCCESS; + } + + return ret; +} diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h new file mode 100644 index 000000000..20b141739 --- /dev/null +++ b/drivers/scsi/isci/request.h @@ -0,0 +1,313 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ISCI_REQUEST_H_ +#define _ISCI_REQUEST_H_ + +#include "isci.h" +#include "host.h" +#include "scu_task_context.h" + +/** + * isci_stp_request - extra request infrastructure to handle pio/atapi protocol + * @pio_len - number of bytes requested at PIO setup + * @status - pio setup ending status value to tell us if we need + * to wait for another fis or if the transfer is complete. Upon + * receipt of a d2h fis this will be the status field of that fis. + * @sgl - track pio transfer progress as we iterate through the sgl + */ +struct isci_stp_request { + u32 pio_len; + u8 status; + + struct isci_stp_pio_sgl { + int index; + u8 set; + u32 offset; + } sgl; +}; + +struct isci_request { + #define IREQ_COMPLETE_IN_TARGET 0 + #define IREQ_TERMINATED 1 + #define IREQ_TMF 2 + #define IREQ_ACTIVE 3 + #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */ + #define IREQ_TC_ABORT_POSTED 5 + #define IREQ_ABORT_PATH_ACTIVE 6 + #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */ + unsigned long flags; + /* XXX kill ttype and ttype_ptr, allocate full sas_task */ + union ttype_ptr_union { + struct sas_task *io_task_ptr; /* When ttype==io_task */ + struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ + } ttype_ptr; + struct isci_host *isci_host; + dma_addr_t request_daddr; + dma_addr_t zero_scatter_daddr; + unsigned int num_sg_entries; + /* Note: "io_request_completion" is completed in two different ways + * depending on whether this is a TMF or regular request. + * - TMF requests are completed in the thread that started them; + * - regular requests are completed in the request completion callback + * function. + * This difference in operation allows the aborter of a TMF request + * to be sure that once the TMF request completes, the I/O that the + * TMF was aborting is guaranteed to have completed. + * + * XXX kill io_request_completion + */ + struct completion *io_request_completion; + struct sci_base_state_machine sm; + struct isci_host *owning_controller; + struct isci_remote_device *target_device; + u16 io_tag; + enum sas_protocol protocol; + u32 scu_status; /* hardware result */ + u32 sci_status; /* upper layer disposition */ + u32 post_context; + struct scu_task_context *tc; + /* could be larger with sg chaining */ + #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) + struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); + /* This field is a pointer to the stored rx frame data. It is used in + * STP internal requests and SMP response frames. If this field is + * non-NULL the saved frame must be released on IO request completion. + */ + u32 saved_rx_frame_index; + + union { + struct { + union { + struct ssp_cmd_iu cmd; + struct ssp_task_iu tmf; + }; + union { + struct ssp_response_iu rsp; + u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; + }; + } ssp; + struct { + struct isci_stp_request req; + struct host_to_dev_fis cmd; + struct dev_to_host_fis rsp; + } stp; + }; +}; + +static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req) +{ + struct isci_request *ireq; + + ireq = container_of(stp_req, typeof(*ireq), stp.req); + return ireq; +} + +/** + * enum sci_base_request_states - request state machine states + * + * @SCI_REQ_INIT: Simply the initial state for the base request state machine. + * + * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been + * constructed. This state is entered from the INITIAL state. + * + * @SCI_REQ_STARTED: This state indicates that the request has been started. + * This state is entered from the CONSTRUCTED state. + * + * @SCI_REQ_STP_UDMA_WAIT_TC_COMP: + * @SCI_REQ_STP_UDMA_WAIT_D2H: + * @SCI_REQ_STP_NON_DATA_WAIT_H2D: + * @SCI_REQ_STP_NON_DATA_WAIT_D2H: + * + * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is + * waiting for the TC completion notification for the H2D Register FIS + * + * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is + * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame + * received is based on the result of the prior frame and line conditions. + * + * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is + * waiting for a DATA frame from the device. + * + * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is + * waiting to transmit the next data frame to the device. + * + * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is + * waiting for the TC completion notification for the H2D Register FIS + * + * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is + * waiting for either a PIO Setup. + * + * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state + * after receiving TC completion. While in this state IO request object is + * waiting for D2H status frame as UF. + * + * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports + * task context completion after every frame submission, so in the + * non-accelerated case we need to expect the completion for the "cdb" frame. + * + * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that + * the started raw task management request is waiting for the transmission of + * the initial frame (i.e. command, task, etc.). + * + * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task + * management request is waiting for the reception of an unsolicited frame + * (i.e. response IU). + * + * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task + * management request is waiting for the reception of an unsolicited frame + * (i.e. response IU). + * + * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that + * the started SMP request is waiting for the transmission of the initial frame + * (i.e. command, task, etc.). + * + * @SCI_REQ_COMPLETED: This state indicates that the request has completed. + * This state is entered from the STARTED state. This state is entered from the + * ABORTING state. + * + * @SCI_REQ_ABORTING: This state indicates that the request is in the process + * of being terminated/aborted. This state is entered from the CONSTRUCTED + * state. This state is entered from the STARTED state. + * + * @SCI_REQ_FINAL: Simply the final state for the base request state machine. + */ +#define REQUEST_STATES {\ + C(REQ_INIT),\ + C(REQ_CONSTRUCTED),\ + C(REQ_STARTED),\ + C(REQ_STP_UDMA_WAIT_TC_COMP),\ + C(REQ_STP_UDMA_WAIT_D2H),\ + C(REQ_STP_NON_DATA_WAIT_H2D),\ + C(REQ_STP_NON_DATA_WAIT_D2H),\ + C(REQ_STP_PIO_WAIT_H2D),\ + C(REQ_STP_PIO_WAIT_FRAME),\ + C(REQ_STP_PIO_DATA_IN),\ + C(REQ_STP_PIO_DATA_OUT),\ + C(REQ_ATAPI_WAIT_H2D),\ + C(REQ_ATAPI_WAIT_PIO_SETUP),\ + C(REQ_ATAPI_WAIT_D2H),\ + C(REQ_ATAPI_WAIT_TC_COMP),\ + C(REQ_TASK_WAIT_TC_COMP),\ + C(REQ_TASK_WAIT_TC_RESP),\ + C(REQ_SMP_WAIT_RESP),\ + C(REQ_SMP_WAIT_TC_COMP),\ + C(REQ_COMPLETED),\ + C(REQ_ABORTING),\ + C(REQ_FINAL),\ + } +#undef C +#define C(a) SCI_##a +enum sci_base_request_states REQUEST_STATES; +#undef C +const char *req_state_name(enum sci_base_request_states state); + +enum sci_status sci_request_start(struct isci_request *ireq); +enum sci_status sci_io_request_terminate(struct isci_request *ireq); +enum sci_status +sci_io_request_event_handler(struct isci_request *ireq, + u32 event_code); +enum sci_status +sci_io_request_frame_handler(struct isci_request *ireq, + u32 frame_index); +enum sci_status +sci_task_request_terminate(struct isci_request *ireq); +extern enum sci_status +sci_request_complete(struct isci_request *ireq); +extern enum sci_status +sci_io_request_tc_completion(struct isci_request *ireq, u32 code); + +/* XXX open code in caller */ +static inline dma_addr_t +sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) +{ + + char *requested_addr = (char *)virt_addr; + char *base_addr = (char *)ireq; + + BUG_ON(requested_addr < base_addr); + BUG_ON((requested_addr - base_addr) >= sizeof(*ireq)); + + return ireq->request_daddr + (requested_addr - base_addr); +} + +#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) + +#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) + +struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + u16 tag); +int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, + struct sas_task *task, struct isci_request *ireq); +struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, + struct sas_task *task, + u16 tag); +enum sci_status +sci_task_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 io_tag, + struct isci_request *ireq); +enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq); +void sci_smp_request_copy_response(struct isci_request *ireq); + +static inline int isci_task_is_ncq_recovery(struct sas_task *task) +{ + return (sas_protocol_ata(task->task_proto) && + task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT && + task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); + +} +#endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h new file mode 100644 index 000000000..15d8f3631 --- /dev/null +++ b/drivers/scsi/isci/sas.h @@ -0,0 +1,217 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCI_SAS_H_ +#define _SCI_SAS_H_ + +#include + +/* + * SATA FIS Types These constants depict the various SATA FIS types devined in + * the serial ATA specification. + * XXX: This needs to go into + */ +#define FIS_REGH2D 0x27 +#define FIS_REGD2H 0x34 +#define FIS_SETDEVBITS 0xA1 +#define FIS_DMA_ACTIVATE 0x39 +#define FIS_DMA_SETUP 0x41 +#define FIS_BIST_ACTIVATE 0x58 +#define FIS_PIO_SETUP 0x5F +#define FIS_DATA 0x46 + +/**************************************************************************/ +#define SSP_RESP_IU_MAX_SIZE 280 + +/* + * contents of the SSP COMMAND INFORMATION UNIT. + * For specific information on each of these individual fields please + * reference the SAS specification SSP transport layer section. + * XXX: This needs to go into + */ +struct ssp_cmd_iu { + u8 LUN[8]; + u8 add_cdb_len:6; + u8 _r_a:2; + u8 _r_b; + u8 en_fburst:1; + u8 task_prio:4; + u8 task_attr:3; + u8 _r_c; + + u8 cdb[16]; +} __packed; + +/* + * contents of the SSP TASK INFORMATION UNIT. + * For specific information on each of these individual fields please + * reference the SAS specification SSP transport layer section. + * XXX: This needs to go into + */ +struct ssp_task_iu { + u8 LUN[8]; + u8 _r_a; + u8 task_func; + u8 _r_b[4]; + u16 task_tag; + u8 _r_c[12]; +} __packed; + + +/* + * struct smp_req_phy_id - This structure defines the contents of + * an SMP Request that is comprised of the struct smp_request_header and a + * phy identifier. + * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA. + * + * For specific information on each of these individual fields please reference + * the SAS specification. + */ +struct smp_req_phy_id { + u8 _r_a[4]; /* bytes 4-7 */ + + u8 ign_zone_grp:1; /* byte 8 */ + u8 _r_b:7; + + u8 phy_id; /* byte 9 */ + u8 _r_c; /* byte 10 */ + u8 _r_d; /* byte 11 */ +} __packed; + +/* + * struct smp_req_config_route_info - This structure defines the + * contents of an SMP Configure Route Information request. + * + * For specific information on each of these individual fields please reference + * the SAS specification. + */ +struct smp_req_conf_rtinfo { + u16 exp_change_cnt; /* bytes 4-5 */ + u8 exp_rt_idx_hi; /* byte 6 */ + u8 exp_rt_idx; /* byte 7 */ + + u8 _r_a; /* byte 8 */ + u8 phy_id; /* byte 9 */ + u16 _r_b; /* bytes 10-11 */ + + u8 _r_c:7; /* byte 12 */ + u8 dis_rt_entry:1; + u8 _r_d[3]; /* bytes 13-15 */ + + u8 rt_sas_addr[8]; /* bytes 16-23 */ + u8 _r_e[16]; /* bytes 24-39 */ +} __packed; + +/* + * struct smp_req_phycntl - This structure defines the contents of an + * SMP Phy Controller request. + * + * For specific information on each of these individual fields please reference + * the SAS specification. + */ +struct smp_req_phycntl { + u16 exp_change_cnt; /* byte 4-5 */ + + u8 _r_a[3]; /* bytes 6-8 */ + + u8 phy_id; /* byte 9 */ + u8 phy_op; /* byte 10 */ + + u8 upd_pathway:1; /* byte 11 */ + u8 _r_b:7; + + u8 _r_c[12]; /* byte 12-23 */ + + u8 att_dev_name[8]; /* byte 24-31 */ + + u8 _r_d:4; /* byte 32 */ + u8 min_linkrate:4; + + u8 _r_e:4; /* byte 33 */ + u8 max_linkrate:4; + + u8 _r_f[2]; /* byte 34-35 */ + + u8 pathway:4; /* byte 36 */ + u8 _r_g:4; + + u8 _r_h[3]; /* bytes 37-39 */ +} __packed; + +/* + * struct smp_req - This structure simply unionizes the existing request + * structures into a common request type. + * + * XXX: This data structure may need to go to scsi/sas.h + */ +struct smp_req { + u8 type; /* byte 0 */ + u8 func; /* byte 1 */ + u8 alloc_resp_len; /* byte 2 */ + u8 req_len; /* byte 3 */ + u8 req_data[]; +} __packed; + +/* + * struct sci_sas_address - This structure depicts how a SAS address is + * represented by SCI. + * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas + * + */ +struct sci_sas_address { + u32 high; + u32 low; +}; +#endif diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h new file mode 100644 index 000000000..071cb74a2 --- /dev/null +++ b/drivers/scsi/isci/scu_completion_codes.h @@ -0,0 +1,285 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCU_COMPLETION_CODES_HEADER_ +#define _SCU_COMPLETION_CODES_HEADER_ + +/** + * This file contains the constants and macros for the SCU hardware completion + * codes. + * + * + */ + +#define SCU_COMPLETION_TYPE_SHIFT 28 +#define SCU_COMPLETION_TYPE_MASK 0x70000000 + +/** + * SCU_COMPLETION_TYPE() - + * + * This macro constructs an SCU completion type + */ +#define SCU_COMPLETION_TYPE(type) \ + ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT) + +/** + * SCU_COMPLETION_TYPE() - + * + * These macros contain the SCU completion types SCU_COMPLETION_TYPE + */ +#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0) +#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1) +#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2) +#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3) +#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4) + +/** + * + * + * These constants provide the shift and mask values for the various parts of + * an SCU completion code. + */ +#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000 +#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000 +#define SCU_COMPLETION_TL_STATUS_SHIFT 22 +#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000 +#define SCU_COMPLETION_PEG_MASK 0x00010000 +#define SCU_COMPLETION_PORT_MASK 0x00007000 +#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK +#define SCU_COMPLETION_PE_SHIFT 12 +#define SCU_COMPLETION_INDEX_MASK 0x00000FFF + +/** + * SCU_GET_COMPLETION_TYPE() - + * + * This macro returns the SCU completion type. + */ +#define SCU_GET_COMPLETION_TYPE(completion_code) \ + ((completion_code) & SCU_COMPLETION_TYPE_MASK) + +/** + * SCU_GET_COMPLETION_STATUS() - + * + * This macro returns the SCU completion status. + */ +#define SCU_GET_COMPLETION_STATUS(completion_code) \ + ((completion_code) & SCU_COMPLETION_STATUS_MASK) + +/** + * SCU_GET_COMPLETION_TL_STATUS() - + * + * This macro returns the transport layer completion status. + */ +#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \ + ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) + +/** + * SCU_MAKE_COMPLETION_STATUS() - + * + * This macro takes a completion code and performs the shift and mask + * operations to turn it into a completion code that can be compared to a + * SCU_GET_COMPLETION_TL_STATUS. + */ +#define SCU_MAKE_COMPLETION_STATUS(completion_code) \ + ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT) + +/** + * SCU_NORMALIZE_COMPLETION_STATUS() - + * + * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a + * return code. + */ +#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \ + (\ + ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \ + >> SCU_COMPLETION_TL_STATUS_SHIFT \ + ) + +/** + * SCU_GET_COMPLETION_SDMA_STATUS() - + * + * This macro returns the SDMA completion status. + */ +#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \ + ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK) + +/** + * SCU_GET_COMPLETION_PEG() - + * + * This macro returns the Protocol Engine Group from the completion code. + */ +#define SCU_GET_COMPLETION_PEG(completion_code) \ + ((completion_code) & SCU_COMPLETION_PEG_MASK) + +/** + * SCU_GET_COMPLETION_PORT() - + * + * This macro reuturns the logical port index from the completion code. + */ +#define SCU_GET_COMPLETION_PORT(completion_code) \ + ((completion_code) & SCU_COMPLETION_PORT_MASK) + +/** + * SCU_GET_PROTOCOL_ENGINE_INDEX() - + * + * This macro returns the PE index from the completion code. + */ +#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \ + (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT) + +/** + * SCU_GET_COMPLETION_INDEX() - + * + * This macro returns the index of the completion which is either a TCi or an + * RNi depending on the completion type. + */ +#define SCU_GET_COMPLETION_INDEX(completion_code) \ + ((completion_code) & SCU_COMPLETION_INDEX_MASK) + +#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000 +#define SCU_UNSOLICITED_FRAME_SHIFT 16 + +/** + * SCU_GET_FRAME_INDEX() - + * + * This macro returns a normalized frame index from an unsolicited frame + * completion. + */ +#define SCU_GET_FRAME_INDEX(completion_code) \ + (\ + ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \ + >> SCU_UNSOLICITED_FRAME_SHIFT \ + ) + +#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000 + +/** + * SCU_GET_FRAME_ERROR() - + * + * This macro returns a zero (0) value if there is no frame error otherwise it + * returns non-zero (!0). + */ +#define SCU_GET_FRAME_ERROR(completion_code) \ + ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK) + +/** + * + * + * These constants represent normalized completion codes which must be shifted + * 18 bits to match it with the hardware completion code. In a 16-bit compiler, + * immediate constants are 16-bit values (the size of an int). If we shift + * those by 18 bits, we completely lose the value. To ensure the value is a + * 32-bit value like we want, each immediate value must be cast to a u32. + */ +#define SCU_TASK_DONE_GOOD ((u32)0x00) +#define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08) +#define SCU_TASK_DONE_CRC_ERR ((u32)0x14) +#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) +#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) +#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16) +#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16) +#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17) +#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17) +#define SCU_TASK_DONE_LL_PERR ((u32)0x18) +#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19) +#define SCU_TASK_DONE_NAK_ERR ((u32)0x19) +#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) +#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) +#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) +#define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B) +#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) +#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) +#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) +#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D) +#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D) +#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E) +#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E) +#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F) +#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20) +#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21) +#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22) +#define SCU_TASK_DONE_LF_ERR ((u32)0x23) +#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24) +#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24) +#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25) +#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26) +#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27) +#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27) +#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28) +#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28) +#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29) +#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A) +#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B) +#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C) +#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D) +#define SCU_TASK_DONE_REG_ERR ((u32)0x2E) +#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F) +#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30) +#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32) +#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33) +#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34) +#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35) +#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36) +#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37) +#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38) +#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39) +#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A) +#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B) +#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C) +#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D) +#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E) +#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F) + +#endif /* _SCU_COMPLETION_CODES_HEADER_ */ diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h new file mode 100644 index 000000000..36a945ad5 --- /dev/null +++ b/drivers/scsi/isci/scu_event_codes.h @@ -0,0 +1,336 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SCU_EVENT_CODES_HEADER__ +#define __SCU_EVENT_CODES_HEADER__ + +/** + * This file contains the constants and macros for the SCU event codes. + * + * + */ + +#define SCU_EVENT_TYPE_CODE_SHIFT 24 +#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000 + +#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18 +#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000 + +#define SCU_EVENT_CODE_MASK \ + (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK) + +/** + * SCU_EVENT_TYPE() - + * + * This macro constructs an SCU event type from the type value. + */ +#define SCU_EVENT_TYPE(type) \ + ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT) + +/** + * SCU_EVENT_SPECIFIC() - + * + * This macro constructs an SCU event specifier from the code value. + */ +#define SCU_EVENT_SPECIFIC(code) \ + ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT) + +/** + * SCU_EVENT_MESSAGE() - + * + * This macro constructs a combines an SCU event type and SCU event specifier + * from the type and code values. + */ +#define SCU_EVENT_MESSAGE(type, code) \ + ((type) | SCU_EVENT_SPECIFIC(code)) + +/** + * SCU_EVENT_TYPE() - + * + * SCU_EVENT_TYPES + */ +#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08) +#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09) +#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00) +#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01) +#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02) +#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03) +#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F) +#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04) +#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05) +#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06) +#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07) +#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A) + +/** + * + * + * SCU_EVENT_SPECIFIERS + */ +#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20 +#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00 + +/** + * + * + * SMU_COMMAND_EVENTS + */ +#define SCU_EVENT_INVALID_CONTEXT_COMMAND \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00) + +/** + * + * + * SMU_PCQ_EVENTS + */ +#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00) + +/** + * + * + * SMU_EVENTS + */ +#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02) +#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03) +#define SCU_EVENT_PCIE_INTERFACE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04) +#define SCU_EVENT_FUNCTION_LEVEL_RESET \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05) + +/** + * + * + * TRANSPORT_LEVEL_ERRORS + */ +#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00) + +/** + * + * + * BROADCAST_CHANGE_EVENTS + */ +#define SCU_EVENT_BROADCAST_CHANGE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01) +#define SCU_EVENT_BROADCAST_RESERVED0 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02) +#define SCU_EVENT_BROADCAST_RESERVED1 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03) +#define SCU_EVENT_BROADCAST_SES \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04) +#define SCU_EVENT_BROADCAST_EXPANDER \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05) +#define SCU_EVENT_BROADCAST_AEN \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06) +#define SCU_EVENT_BROADCAST_RESERVED3 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07) +#define SCU_EVENT_BROADCAST_RESERVED4 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08) +#define SCU_EVENT_PE_SUSPENDED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09) + +/** + * + * + * OSSP_EVENTS + */ +#define SCU_EVENT_PORT_SELECTOR_DETECTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10) +#define SCU_EVENT_SENT_PORT_SELECTION \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11) +#define SCU_EVENT_HARD_RESET_TRANSMITTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12) +#define SCU_EVENT_HARD_RESET_RECEIVED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13) +#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15) +#define SCU_EVENT_LINK_FAILURE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16) +#define SCU_EVENT_SATA_SPINUP_HOLD \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17) +#define SCU_EVENT_SAS_15_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18) +#define SCU_EVENT_SAS_15 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19) +#define SCU_EVENT_SAS_30_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A) +#define SCU_EVENT_SAS_30 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B) +#define SCU_EVENT_SAS_60_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C) +#define SCU_EVENT_SAS_60 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D) +#define SCU_EVENT_SATA_15_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E) +#define SCU_EVENT_SATA_15 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F) +#define SCU_EVENT_SATA_30_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20) +#define SCU_EVENT_SATA_30 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21) +#define SCU_EVENT_SATA_60_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22) +#define SCU_EVENT_SATA_60 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23) +#define SCU_EVENT_SAS_PHY_DETECTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24) +#define SCU_EVENT_SATA_PHY_DETECTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25) + +/** + * + * + * FATAL_INTERNAL_MEMORY_ERROR_EVENTS + */ +#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00) +#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01) +#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02) + +/** + * + * + * REMOTE_NODE_SUSPEND_EVENTS + */ +#define SCU_EVENT_TL_RNC_SUSPEND_TX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00) +#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00) +#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20) +#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20) + +/** + * + * + * REMOTE_NODE_MISC_EVENTS + */ +#define SCU_EVENT_POST_RCN_RELEASE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE) +#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01) +#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02) +#define SCU_EVENT_POST_RNC_COMPLETE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03) +#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04) + +/** + * + * + * ERROR_COUNT_EVENT + */ +#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00) +#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01) +#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02) + +/** + * scu_get_event_type() - + * + * This macro returns the SCU event type from the event code. + */ +#define scu_get_event_type(event_code) \ + ((event_code) & SCU_EVENT_TYPE_CODE_MASK) + +/** + * scu_get_event_specifier() - + * + * This macro returns the SCU event specifier from the event code. + */ +#define scu_get_event_specifier(event_code) \ + ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK) + +/** + * scu_get_event_code() - + * + * This macro returns the combined SCU event type and SCU event specifier from + * the event code. + */ +#define scu_get_event_code(event_code) \ + ((event_code) & SCU_EVENT_CODE_MASK) + + +/** + * + * + * PTS_SCHEDULE_EVENT + */ +#define SCU_EVENT_SMP_RESPONSE_NO_PE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00) +#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \ + scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE) + +#define SCU_EVENT_TASK_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01) +#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \ + scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT) + +#define SCU_EVENT_IT_NEXUS_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02) +#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \ + scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT) + + +#endif /* __SCU_EVENT_CODES_HEADER__ */ diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h new file mode 100644 index 000000000..33745adc8 --- /dev/null +++ b/drivers/scsi/isci/scu_remote_node_context.h @@ -0,0 +1,229 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__ +#define __SCU_REMOTE_NODE_CONTEXT_HEADER__ + +/** + * This file contains the structures and constatns used by the SCU hardware to + * describe a remote node context. + * + * + */ + +/** + * struct ssp_remote_node_context - This structure contains the SCU hardware + * definition for an SSP remote node. + * + * + */ +struct ssp_remote_node_context { + /* WORD 0 */ + + /** + * This field is the remote node index assigned for this remote node. All + * remote nodes must have a unique remote node index. The value of the remote + * node index can not exceed the maximum number of remote nodes reported in + * the SCU device context capacity register. + */ + u32 remote_node_index:12; + u32 reserved0_1:4; + + /** + * This field tells the SCU hardware how many simultaneous connections that + * this remote node will support. + */ + u32 remote_node_port_width:4; + + /** + * This field tells the SCU hardware which logical port to associate with this + * remote node. + */ + u32 logical_port_index:3; + u32 reserved0_2:5; + + /** + * This field will enable the I_T nexus loss timer for this remote node. + */ + u32 nexus_loss_timer_enable:1; + + /** + * This field is the for driver debug only and is not used. + */ + u32 check_bit:1; + + /** + * This field must be set to true when the hardware DMAs the remote node + * context to the hardware SRAM. When the remote node is being invalidated + * this field must be set to false. + */ + u32 is_valid:1; + + /** + * This field must be set to true. + */ + u32 is_remote_node_context:1; + + /* WORD 1 - 2 */ + + /** + * This is the low word of the remote device SAS Address + */ + u32 remote_sas_address_lo; + + /** + * This field is the high word of the remote device SAS Address + */ + u32 remote_sas_address_hi; + + /* WORD 3 */ + /** + * This field reprensets the function number assigned to this remote device. + * This value must match the virtual function number that is being used to + * communicate to the device. + */ + u32 function_number:8; + u32 reserved3_1:8; + + /** + * This field provides the driver a way to cheat on the arbitration wait time + * for this remote node. + */ + u32 arbitration_wait_time:16; + + /* WORD 4 */ + /** + * This field tells the SCU hardware how long this device may occupy the + * connection before it must be closed. + */ + u32 connection_occupancy_timeout:16; + + /** + * This field tells the SCU hardware how long to maintain a connection when + * there are no frames being transmitted on the link. + */ + u32 connection_inactivity_timeout:16; + + /* WORD 5 */ + /** + * This field allows the driver to cheat on the arbitration wait time for this + * remote node. + */ + u32 initial_arbitration_wait_time:16; + + /** + * This field is tells the hardware what to program for the connection rate in + * the open address frame. See the SAS spec for valid values. + */ + u32 oaf_connection_rate:4; + + /** + * This field tells the SCU hardware what to program for the features in the + * open address frame. See the SAS spec for valid values. + */ + u32 oaf_features:4; + + /** + * This field tells the SCU hardware what to use for the source zone group in + * the open address frame. See the SAS spec for more details on zoning. + */ + u32 oaf_source_zone_group:8; + + /* WORD 6 */ + /** + * This field tells the SCU hardware what to use as the more capibilities in + * the open address frame. See the SAS Spec for details. + */ + u32 oaf_more_compatibility_features; + + /* WORD 7 */ + u32 reserved7; + +}; + +/** + * struct stp_remote_node_context - This structure contains the SCU hardware + * definition for a STP remote node. + * + * STP Targets are not yet supported so this definition is a placeholder until + * we do support them. + */ +struct stp_remote_node_context { + /** + * Placeholder data for the STP remote node. + */ + u32 data[8]; + +}; + +/** + * This union combines the SAS and SATA remote node definitions. + * + * union scu_remote_node_context + */ +union scu_remote_node_context { + /** + * SSP Remote Node + */ + struct ssp_remote_node_context ssp; + + /** + * STP Remote Node + */ + struct stp_remote_node_context stp; + +}; + +#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */ diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h new file mode 100644 index 000000000..582d22d54 --- /dev/null +++ b/drivers/scsi/isci/scu_task_context.h @@ -0,0 +1,965 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCU_TASK_CONTEXT_H_ +#define _SCU_TASK_CONTEXT_H_ + +/** + * This file contains the structures and constants for the SCU hardware task + * context. + * + * + */ + + +/** + * enum scu_ssp_task_type - This enumberation defines the various SSP task + * types the SCU hardware will accept. The definition for the various task + * types the SCU hardware will accept can be found in the DS specification. + * + * + */ +typedef enum { + SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */ + SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */ + SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */ + SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */ + SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */ + SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */ +} scu_ssp_task_type; + +/** + * enum scu_sata_task_type - This enumeration defines the various SATA task + * types the SCU hardware will accept. The definition for the various task + * types the SCU hardware will accept can be found in the DS specification. + * + * + */ +typedef enum { + SCU_TASK_TYPE_DMA_IN, /* /< Read request */ + SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */ + SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */ + SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */ + RESERVED_4, + RESERVED_5, + RESERVED_6, + RESERVED_7, + SCU_TASK_TYPE_DMA_OUT, /* /< Write request */ + SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */ + SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */ +} scu_sata_task_type; + + +/** + * + * + * SCU_CONTEXT_TYPE + */ +#define SCU_TASK_CONTEXT_TYPE 0 +#define SCU_RNC_CONTEXT_TYPE 1 + +/** + * + * + * SCU_TASK_CONTEXT_VALIDITY + */ +#define SCU_TASK_CONTEXT_INVALID 0 +#define SCU_TASK_CONTEXT_VALID 1 + +/** + * + * + * SCU_COMMAND_CODE + */ +#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0 +#define SCU_COMMAND_CODE_ACTIVE_TASK 1 +#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2 +#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3 + +/** + * + * + * SCU_TASK_PRIORITY + */ +/** + * + * + * This priority is used when there is no priority request for this request. + */ +#define SCU_TASK_PRIORITY_NORMAL 0 + +/** + * + * + * This priority indicates that the task should be scheduled to the head of the + * queue. The task will NOT be executed if the TX is suspended for the remote + * node. + */ +#define SCU_TASK_PRIORITY_HEAD_OF_Q 1 + +/** + * + * + * This priority indicates that the task will be executed before all + * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task + * WILL be executed if the TX is suspended for the remote node. + */ +#define SCU_TASK_PRIORITY_HIGH 2 + +/** + * + * + * This task priority is reserved and should not be used. + */ +#define SCU_TASK_PRIORITY_RESERVED 3 + +#define SCU_TASK_INITIATOR_MODE 1 +#define SCU_TASK_TARGET_MODE 0 + +#define SCU_TASK_REGULAR 0 +#define SCU_TASK_ABORTED 1 + +/* direction bit defintion */ +/** + * + * + * SATA_DIRECTION + */ +#define SCU_SATA_WRITE_DATA_DIRECTION 0 +#define SCU_SATA_READ_DATA_DIRECTION 1 + +/** + * + * + * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift + * operations to construct the various SCU commands + */ +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21 +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000 +#define scu_get_command_request_type(x) \ + ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK) + +#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18 +#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000 +#define scu_get_command_request_subtype(x) \ + ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK) + +#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \ + (\ + SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \ + | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \ + ) +#define scu_get_command_request_full_type(x) \ + ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK) + +#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16 +#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000 +#define scu_get_command_protocl_engine_group(x) \ + ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK) + +#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12 +#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000 +#define scu_get_command_reqeust_logical_port(x) \ + ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK) + + +#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \ + ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT) + +/** + * MAKE_SCU_CONTEXT_COMMAND_TYPE() - + * + * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU + * command types. + */ +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6) + +#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \ + ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT)) + +/** + * + * + * SCU_REQUEST_TYPES These constants are the various request types that can be + * posted to the SCU hardware. + */ +#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0)) + +#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1)) + +#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_32 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_96 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2)) + +#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0)) + +#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2)) + +#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3)) + +#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4)) + +/** + * + * + * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to + * program the SCU Task context protocol field in word 0x00. + */ +#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00 +#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01 +#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02 +#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07 + +/** + * struct ssp_task_context - This is the SCU hardware definition for an SSP + * request. + * + * + */ +struct ssp_task_context { + /* OFFSET 0x18 */ + u32 reserved00:24; + u32 frame_type:8; + + /* OFFSET 0x1C */ + u32 reserved01; + + /* OFFSET 0x20 */ + u32 fill_bytes:2; + u32 reserved02:6; + u32 changing_data_pointer:1; + u32 retransmit:1; + u32 retry_data_frame:1; + u32 tlr_control:2; + u32 reserved03:19; + + /* OFFSET 0x24 */ + u32 uiRsvd4; + + /* OFFSET 0x28 */ + u32 target_port_transfer_tag:16; + u32 tag:16; + + /* OFFSET 0x2C */ + u32 data_offset; +}; + +/** + * struct stp_task_context - This is the SCU hardware definition for an STP + * request. + * + * + */ +struct stp_task_context { + /* OFFSET 0x18 */ + u32 fis_type:8; + u32 pm_port:4; + u32 reserved0:3; + u32 control:1; + u32 command:8; + u32 features:8; + + /* OFFSET 0x1C */ + u32 reserved1; + + /* OFFSET 0x20 */ + u32 reserved2; + + /* OFFSET 0x24 */ + u32 reserved3; + + /* OFFSET 0x28 */ + u32 ncq_tag:5; + u32 reserved4:27; + + /* OFFSET 0x2C */ + u32 data_offset; /* TODO: What is this used for? */ +}; + +/** + * struct smp_task_context - This is the SCU hardware definition for an SMP + * request. + * + * + */ +struct smp_task_context { + /* OFFSET 0x18 */ + u32 response_length:8; + u32 function_result:8; + u32 function:8; + u32 frame_type:8; + + /* OFFSET 0x1C */ + u32 smp_response_ufi:12; + u32 reserved1:20; + + /* OFFSET 0x20 */ + u32 reserved2; + + /* OFFSET 0x24 */ + u32 reserved3; + + /* OFFSET 0x28 */ + u32 reserved4; + + /* OFFSET 0x2C */ + u32 reserved5; +}; + +/** + * struct primitive_task_context - This is the SCU hardware definition used + * when the driver wants to send a primitive on the link. + * + * + */ +struct primitive_task_context { + /* OFFSET 0x18 */ + /** + * This field is the control word and it must be 0. + */ + u32 control; /* /< must be set to 0 */ + + /* OFFSET 0x1C */ + /** + * This field specifies the primitive that is to be transmitted. + */ + u32 sequence; + + /* OFFSET 0x20 */ + u32 reserved0; + + /* OFFSET 0x24 */ + u32 reserved1; + + /* OFFSET 0x28 */ + u32 reserved2; + + /* OFFSET 0x2C */ + u32 reserved3; +}; + +/** + * The union of the protocols that can be selected in the SCU task context + * field. + * + * protocol_context + */ +union protocol_context { + struct ssp_task_context ssp; + struct stp_task_context stp; + struct smp_task_context smp; + struct primitive_task_context primitive; + u32 words[6]; +}; + +/** + * struct scu_sgl_element - This structure represents a single SCU defined SGL + * element. SCU SGLs contain a 64 bit address with the maximum data transfer + * being 24 bits in size. The SGL can not cross a 4GB boundary. + * + * struct scu_sgl_element + */ +struct scu_sgl_element { + /** + * This field is the upper 32 bits of the 64 bit physical address. + */ + u32 address_upper; + + /** + * This field is the lower 32 bits of the 64 bit physical address. + */ + u32 address_lower; + + /** + * This field is the number of bytes to transfer. + */ + u32 length:24; + + /** + * This field is the address modifier to be used when a virtual function is + * requesting a data transfer. + */ + u32 address_modifier:8; + +}; + +#define SCU_SGL_ELEMENT_PAIR_A 0 +#define SCU_SGL_ELEMENT_PAIR_B 1 + +/** + * struct scu_sgl_element_pair - This structure is the SCU hardware definition + * of a pair of SGL elements. The SCU hardware always works on SGL pairs. + * They are refered to in the DS specification as SGL A and SGL B. Each SGL + * pair is followed by the address of the next pair. + * + * + */ +struct scu_sgl_element_pair { + /* OFFSET 0x60-0x68 */ + /** + * This field is the SGL element A of the SGL pair. + */ + struct scu_sgl_element A; + + /* OFFSET 0x6C-0x74 */ + /** + * This field is the SGL element B of the SGL pair. + */ + struct scu_sgl_element B; + + /* OFFSET 0x78-0x7C */ + /** + * This field is the upper 32 bits of the 64 bit address to the next SGL + * element pair. + */ + u32 next_pair_upper; + + /** + * This field is the lower 32 bits of the 64 bit address to the next SGL + * element pair. + */ + u32 next_pair_lower; + +}; + +/** + * struct transport_snapshot - This structure is the SCU hardware scratch area + * for the task context. This is set to 0 by the driver but can be read by + * issuing a dump TC request to the SCU. + * + * + */ +struct transport_snapshot { + /* OFFSET 0x48 */ + u32 xfer_rdy_write_data_length; + + /* OFFSET 0x4C */ + u32 data_offset; + + /* OFFSET 0x50 */ + u32 data_transfer_size:24; + u32 reserved_50_0:8; + + /* OFFSET 0x54 */ + u32 next_initiator_write_data_offset; + + /* OFFSET 0x58 */ + u32 next_initiator_write_data_xfer_size:24; + u32 reserved_58_0:8; +}; + +/** + * struct scu_task_context - This structure defines the contents of the SCU + * silicon task context. It lays out all of the fields according to the + * expected order and location for the Storage Controller unit. + * + * + */ +struct scu_task_context { + /* OFFSET 0x00 ------ */ + /** + * This field must be encoded to one of the valid SCU task priority values + * - SCU_TASK_PRIORITY_NORMAL + * - SCU_TASK_PRIORITY_HEAD_OF_Q + * - SCU_TASK_PRIORITY_HIGH + */ + u32 priority:2; + + /** + * This field must be set to true if this is an initiator generated request. + * Until target mode is supported all task requests are initiator requests. + */ + u32 initiator_request:1; + + /** + * This field must be set to one of the valid connection rates valid values + * are 0x8, 0x9, and 0xA. + */ + u32 connection_rate:4; + + /** + * This field muse be programed when generating an SMP response since the SMP + * connection remains open until the SMP response is generated. + */ + u32 protocol_engine_index:3; + + /** + * This field must contain the logical port for the task request. + */ + u32 logical_port_index:3; + + /** + * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values + * - SCU_TASK_CONTEXT_PROTOCOL_SMP + * - SCU_TASK_CONTEXT_PROTOCOL_SSP + * - SCU_TASK_CONTEXT_PROTOCOL_STP + * - SCU_TASK_CONTEXT_PROTOCOL_NONE + */ + u32 protocol_type:3; + + /** + * This filed must be set to the TCi allocated for this task + */ + u32 task_index:12; + + /** + * This field is reserved and must be set to 0x00 + */ + u32 reserved_00_0:1; + + /** + * For a normal task request this must be set to 0. If this is an abort of + * this task request it must be set to 1. + */ + u32 abort:1; + + /** + * This field must be set to true for the SCU hardware to process the task. + */ + u32 valid:1; + + /** + * This field must be set to SCU_TASK_CONTEXT_TYPE + */ + u32 context_type:1; + + /* OFFSET 0x04 */ + /** + * This field contains the RNi that is the target of this request. + */ + u32 remote_node_index:12; + + /** + * This field is programmed if this is a mirrored request, which we are not + * using, in which case it is the RNi for the mirrored target. + */ + u32 mirrored_node_index:12; + + /** + * This field is programmed with the direction of the SATA reqeust + * - SCU_SATA_WRITE_DATA_DIRECTION + * - SCU_SATA_READ_DATA_DIRECTION + */ + u32 sata_direction:1; + + /** + * This field is programmsed with one of the following SCU_COMMAND_CODE + * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK + * - SCU_COMMAND_CODE_ACTIVE_TASK + * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK + * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES + */ + u32 command_code:2; + + /** + * This field is set to true if the remote node should be suspended. + * This bit is only valid for SSP & SMP target devices. + */ + u32 suspend_node:1; + + /** + * This field is programmed with one of the following command type codes + * + * For SAS requests use the scu_ssp_task_type + * - SCU_TASK_TYPE_IOREAD + * - SCU_TASK_TYPE_IOWRITE + * - SCU_TASK_TYPE_SMP_REQUEST + * - SCU_TASK_TYPE_RESPONSE + * - SCU_TASK_TYPE_RAW_FRAME + * - SCU_TASK_TYPE_PRIMITIVE + * + * For SATA requests use the scu_sata_task_type + * - SCU_TASK_TYPE_DMA_IN + * - SCU_TASK_TYPE_FPDMAQ_READ + * - SCU_TASK_TYPE_PACKET_DMA_IN + * - SCU_TASK_TYPE_SATA_RAW_FRAME + * - SCU_TASK_TYPE_DMA_OUT + * - SCU_TASK_TYPE_FPDMAQ_WRITE + * - SCU_TASK_TYPE_PACKET_DMA_OUT + */ + u32 task_type:4; + + /* OFFSET 0x08 */ + /** + * This field is reserved and the must be set to 0x00 + */ + u32 link_layer_control:8; /* presently all reserved */ + + /** + * This field is set to true when TLR is to be enabled + */ + u32 ssp_tlr_enable:1; + + /** + * This is field specifies if the SCU DMAs a response frame to host + * memory for good response frames when operating in target mode. + */ + u32 dma_ssp_target_good_response:1; + + /** + * This field indicates if the SCU should DMA the response frame to + * host memory. + */ + u32 do_not_dma_ssp_good_response:1; + + /** + * This field is set to true when strict ordering is to be enabled + */ + u32 strict_ordering:1; + + /** + * This field indicates the type of endianess to be utilized for the + * frame. command, task, and response frames utilized control_frame + * set to 1. + */ + u32 control_frame:1; + + /** + * This field is reserved and the driver should set to 0x00 + */ + u32 tl_control_reserved:3; + + /** + * This field is set to true when the SCU hardware task timeout control is to + * be enabled + */ + u32 timeout_enable:1; + + /** + * This field is reserved and the driver should set it to 0x00 + */ + u32 pts_control_reserved:7; + + /** + * This field should be set to true when block guard is to be enabled + */ + u32 block_guard_enable:1; + + /** + * This field is reserved and the driver should set to 0x00 + */ + u32 sdma_control_reserved:7; + + /* OFFSET 0x0C */ + /** + * This field is the address modifier for this io request it should be + * programmed with the virtual function that is making the request. + */ + u32 address_modifier:16; + + /** + * @todo What we support mirrored SMP response frame? + */ + u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */ + + /** + * If this is a mirrored request the logical port index for the mirrored RNi + * must be programmed. + */ + u32 mirrored_logical_port:4; /* mirrored local port index */ + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_0C_0:8; + + /** + * This field must be set to true if the mirrored request processing is to be + * enabled. + */ + u32 mirror_request_enable:1; /* Mirrored request Enable */ + + /* OFFSET 0x10 */ + /** + * This field is the command iu length in dwords + */ + u32 ssp_command_iu_length:8; + + /** + * This is the target TLR enable bit it must be set to 0 when creatning the + * task context. + */ + u32 xfer_ready_tlr_enable:1; + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_10_0:7; + + /** + * This is the maximum burst size that the SCU hardware will send in one + * connection its value is (N x 512) and N must be a multiple of 2. If the + * value is 0x00 then maximum burst size is disabled. + */ + u32 ssp_max_burst_size:16; + + /* OFFSET 0x14 */ + /** + * This filed is set to the number of bytes to be transfered in the request. + */ + u32 transfer_length_bytes:24; /* In terms of bytes */ + + /** + * This field is reserved and the driver should set it to 0x00 + */ + u32 reserved_14_0:8; + + /* OFFSET 0x18-0x2C */ + /** + * This union provides for the protocol specif part of the SCU Task Context. + */ + union protocol_context type; + + /* OFFSET 0x30-0x34 */ + /** + * This field is the upper 32 bits of the 64 bit physical address of the + * command iu buffer + */ + u32 command_iu_upper; + + /** + * This field is the lower 32 bits of the 64 bit physical address of the + * command iu buffer + */ + u32 command_iu_lower; + + /* OFFSET 0x38-0x3C */ + /** + * This field is the upper 32 bits of the 64 bit physical address of the + * response iu buffer + */ + u32 response_iu_upper; + + /** + * This field is the lower 32 bits of the 64 bit physical address of the + * response iu buffer + */ + u32 response_iu_lower; + + /* OFFSET 0x40 */ + /** + * This field is set to the task phase of the SCU hardware. The driver must + * set this to 0x01 + */ + u32 task_phase:8; + + /** + * This field is set to the transport layer task status. The driver must set + * this to 0x00 + */ + u32 task_status:8; + + /** + * This field is used during initiator write TLR + */ + u32 previous_extended_tag:4; + + /** + * This field is set the maximum number of retries for a STP non-data FIS + */ + u32 stp_retry_count:2; + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_40_1:2; + + /** + * This field is used by the SCU TL to determine when to take a snapshot when + * transmitting read data frames. + * - 0x00 The entire IO + * - 0x01 32k + * - 0x02 64k + * - 0x04 128k + * - 0x08 256k + */ + u32 ssp_tlr_threshold:4; + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_40_2:4; + + /* OFFSET 0x44 */ + u32 write_data_length; /* read only set to 0 */ + + /* OFFSET 0x48-0x58 */ + struct transport_snapshot snapshot; /* read only set to 0 */ + + /* OFFSET 0x5C */ + u32 blk_prot_en:1; + u32 blk_sz:2; + u32 blk_prot_func:2; + u32 reserved_5C_0:9; + u32 active_sgl_element:2; /* read only set to 0 */ + u32 sgl_exhausted:1; /* read only set to 0 */ + u32 payload_data_transfer_error:4; /* read only set to 0 */ + u32 frame_buffer_offset:11; /* read only set to 0 */ + + /* OFFSET 0x60-0x7C */ + /** + * This field is the first SGL element pair found in the TC data structure. + */ + struct scu_sgl_element_pair sgl_pair_ab; + /* OFFSET 0x80-0x9C */ + /** + * This field is the second SGL element pair found in the TC data structure. + */ + struct scu_sgl_element_pair sgl_pair_cd; + + /* OFFSET 0xA0-BC */ + struct scu_sgl_element_pair sgl_snapshot_ac; + + /* OFFSET 0xC0 */ + u32 active_sgl_element_pair; /* read only set to 0 */ + + /* OFFSET 0xC4-0xCC */ + u32 reserved_C4_CC[3]; + + /* OFFSET 0xD0 */ + u32 interm_crc_val:16; + u32 init_crc_seed:16; + + /* OFFSET 0xD4 */ + u32 app_tag_verify:16; + u32 app_tag_gen:16; + + /* OFFSET 0xD8 */ + u32 ref_tag_seed_verify; + + /* OFFSET 0xDC */ + u32 UD_bytes_immed_val:13; + u32 reserved_DC_0:3; + u32 DIF_bytes_immed_val:4; + u32 reserved_DC_1:12; + + /* OFFSET 0xE0 */ + u32 bgc_blk_sz:13; + u32 reserved_E0_0:3; + u32 app_tag_gen_mask:16; + + /* OFFSET 0xE4 */ + union { + u16 bgctl; + struct { + u16 crc_verify:1; + u16 app_tag_chk:1; + u16 ref_tag_chk:1; + u16 op:2; + u16 legacy:1; + u16 invert_crc_seed:1; + u16 ref_tag_gen:1; + u16 fixed_ref_tag:1; + u16 invert_crc:1; + u16 app_ref_f_detect:1; + u16 uninit_dif_check_err:1; + u16 uninit_dif_bypass:1; + u16 app_f_detect:1; + u16 reserved_0:2; + } bgctl_f; + }; + + u16 app_tag_verify_mask; + + /* OFFSET 0xE8 */ + u32 blk_guard_err:8; + u32 reserved_E8_0:24; + + /* OFFSET 0xEC */ + u32 ref_tag_seed_gen; + + /* OFFSET 0xF0 */ + u32 intermediate_crc_valid_snapshot:16; + u32 reserved_F0_0:16; + + /* OFFSET 0xF4 */ + u32 reference_tag_seed_for_verify_function_snapshot; + + /* OFFSET 0xF8 */ + u32 snapshot_of_reserved_dword_DC_of_tc; + + /* OFFSET 0xFC */ + u32 reference_tag_seed_for_generate_function_snapshot; + +} __packed; + +#endif /* _SCU_TASK_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c new file mode 100644 index 000000000..c514b2029 --- /dev/null +++ b/drivers/scsi/isci/task.c @@ -0,0 +1,781 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "sas.h" +#include +#include "remote_device.h" +#include "remote_node_context.h" +#include "isci.h" +#include "request.h" +#include "task.h" +#include "host.h" + +/** +* isci_task_refuse() - complete the request to the upper layer driver in +* the case where an I/O needs to be completed back in the submit path. +* @ihost: host on which the the request was queued +* @task: request to complete +* @response: response code for the completed task. +* @status: status code for the completed task. +* +*/ +static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, + enum service_response response, + enum exec_status status) + +{ + unsigned long flags; + + /* Normal notification (task_done) */ + dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", + __func__, task, response, status); + + spin_lock_irqsave(&task->task_state_lock, flags); + + task->task_status.resp = response; + task->task_status.stat = status; + + /* Normal notification (task_done) */ + task->task_state_flags |= SAS_TASK_STATE_DONE; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->lldd_task = NULL; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + task->task_done(task); +} + +#define for_each_sas_task(num, task) \ + for (; num > 0; num--,\ + task = list_entry(task->list.next, struct sas_task, list)) + + +static inline int isci_device_io_ready(struct isci_remote_device *idev, + struct sas_task *task) +{ + return idev ? test_bit(IDEV_IO_READY, &idev->flags) || + (test_bit(IDEV_IO_NCQERROR, &idev->flags) && + isci_task_is_ncq_recovery(task)) + : 0; +} +/** + * isci_task_execute_task() - This function is one of the SAS Domain Template + * functions. This function is called by libsas to send a task down to + * hardware. + * @task: This parameter specifies the SAS task to send. + * @gfp_flags: This parameter specifies the context of this call. + * + * status, zero indicates success. + */ +int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags) +{ + struct isci_host *ihost = dev_to_ihost(task->dev); + struct isci_remote_device *idev; + unsigned long flags; + enum sci_status status = SCI_FAILURE; + bool io_ready; + u16 tag; + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(task->dev); + io_ready = isci_device_io_ready(idev, task); + tag = isci_alloc_tag(ihost); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + dev_dbg(&ihost->pdev->dev, + "task: %p, dev: %p idev: %p:%#lx cmd = %p\n", + task, task->dev, idev, idev ? idev->flags : 0, + task->uldd_task); + + if (!idev) { + isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, + SAS_DEVICE_UNKNOWN); + } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { + /* Indicate QUEUE_FULL so that the scsi midlayer + * retries. + */ + isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, + SAS_QUEUE_FULL); + } else { + /* There is a device and it's ready for I/O. */ + spin_lock_irqsave(&task->task_state_lock, flags); + + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + /* The I/O was aborted. */ + spin_unlock_irqrestore(&task->task_state_lock, flags); + + isci_task_refuse(ihost, task, + SAS_TASK_UNDELIVERED, + SAS_SAM_STAT_TASK_ABORTED); + } else { + struct isci_request *ireq; + + /* do common allocation and init of request object. */ + ireq = isci_io_request_from_tag(ihost, task, tag); + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* build and send the request. */ + /* do common allocation and init of request object. */ + status = isci_request_execute(ihost, idev, task, ireq); + + if (status != SCI_SUCCESS) { + if (test_bit(IDEV_GONE, &idev->flags)) { + /* Indicate that the device + * is gone. + */ + isci_task_refuse(ihost, task, + SAS_TASK_UNDELIVERED, + SAS_DEVICE_UNKNOWN); + } else { + /* Indicate QUEUE_FULL so that + * the scsi midlayer retries. + * If the request failed for + * remote device reasons, it + * gets returned as + * SAS_TASK_UNDELIVERED next + * time through. + */ + isci_task_refuse(ihost, task, + SAS_TASK_COMPLETE, + SAS_QUEUE_FULL); + } + } + } + } + + if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { + spin_lock_irqsave(&ihost->scic_lock, flags); + /* command never hit the device, so just free + * the tci and skip the sequence increment + */ + isci_tci_free(ihost, ISCI_TAG_TCI(tag)); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } + + isci_put_device(idev); + return 0; +} + +static struct isci_request *isci_task_request_build(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 tag, struct isci_tmf *isci_tmf) +{ + enum sci_status status = SCI_FAILURE; + struct isci_request *ireq = NULL; + struct domain_device *dev; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_tmf = %p\n", __func__, isci_tmf); + + dev = idev->domain_dev; + + /* do common allocation and init of request object. */ + ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag); + if (!ireq) + return NULL; + + /* let the core do it's construct. */ + status = sci_task_request_construct(ihost, idev, tag, + ireq); + + if (status != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: sci_task_request_construct failed - " + "status = 0x%x\n", + __func__, + status); + return NULL; + } + + /* XXX convert to get this from task->tproto like other drivers */ + if (dev->dev_type == SAS_END_DEVICE) { + isci_tmf->proto = SAS_PROTOCOL_SSP; + status = sci_task_request_construct_ssp(ireq); + if (status != SCI_SUCCESS) + return NULL; + } + + return ireq; +} + +static int isci_task_execute_tmf(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_tmf *tmf, unsigned long timeout_ms) +{ + DECLARE_COMPLETION_ONSTACK(completion); + enum sci_status status = SCI_FAILURE; + struct isci_request *ireq; + int ret = TMF_RESP_FUNC_FAILED; + unsigned long flags; + unsigned long timeleft; + u16 tag; + + spin_lock_irqsave(&ihost->scic_lock, flags); + tag = isci_alloc_tag(ihost); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (tag == SCI_CONTROLLER_INVALID_IO_TAG) + return ret; + + /* sanity check, return TMF_RESP_FUNC_FAILED + * if the device is not there and ready. + */ + if (!idev || + (!test_bit(IDEV_IO_READY, &idev->flags) && + !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p not ready (%#lx)\n", + __func__, + idev, idev ? idev->flags : 0); + goto err_tci; + } else + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p\n", + __func__, idev); + + /* Assign the pointer to the TMF's completion kernel wait structure. */ + tmf->complete = &completion; + tmf->status = SCI_FAILURE_TIMEOUT; + + ireq = isci_task_request_build(ihost, idev, tag, tmf); + if (!ireq) + goto err_tci; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + /* start the TMF io. */ + status = sci_controller_start_task(ihost, idev, ireq); + + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: start_io failed - status = 0x%x, request = %p\n", + __func__, + status, + ireq); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + goto err_tci; + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* The RNC must be unsuspended before the TMF can get a response. */ + isci_remote_device_resume_from_abort(ihost, idev); + + /* Wait for the TMF to complete, or a timeout. */ + timeleft = wait_for_completion_timeout(&completion, + msecs_to_jiffies(timeout_ms)); + + if (timeleft == 0) { + /* The TMF did not complete - this could be because + * of an unplug. Terminate the TMF request now. + */ + isci_remote_device_suspend_terminate(ihost, idev, ireq); + } + + isci_print_tmf(ihost, tmf); + + if (tmf->status == SCI_SUCCESS) + ret = TMF_RESP_FUNC_COMPLETE; + else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { + dev_dbg(&ihost->pdev->dev, + "%s: tmf.status == " + "SCI_FAILURE_IO_RESPONSE_VALID\n", + __func__); + ret = TMF_RESP_FUNC_COMPLETE; + } + /* Else - leave the default "failed" status alone. */ + + dev_dbg(&ihost->pdev->dev, + "%s: completed request = %p\n", + __func__, + ireq); + + return ret; + + err_tci: + spin_lock_irqsave(&ihost->scic_lock, flags); + isci_tci_free(ihost, ISCI_TAG_TCI(tag)); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return ret; +} + +static void isci_task_build_tmf(struct isci_tmf *tmf, + enum isci_tmf_function_codes code) +{ + memset(tmf, 0, sizeof(*tmf)); + tmf->tmf_code = code; +} + +static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, + enum isci_tmf_function_codes code, + struct isci_request *old_request) +{ + isci_task_build_tmf(tmf, code); + tmf->io_tag = old_request->io_tag; +} + +/* + * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain + * Template functions. + * @lun: This parameter specifies the lun to be reset. + * + * status, zero indicates success. + */ +static int isci_task_send_lu_reset_sas( + struct isci_host *isci_host, + struct isci_remote_device *isci_device, + u8 *lun) +{ + struct isci_tmf tmf; + int ret = TMF_RESP_FUNC_FAILED; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_host = %p, isci_device = %p\n", + __func__, isci_host, isci_device); + /* Send the LUN reset to the target. By the time the call returns, + * the TMF has fully exected in the target (in which case the return + * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or + * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). + */ + isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset); + + #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ + ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); + + if (ret == TMF_RESP_FUNC_COMPLETE) + dev_dbg(&isci_host->pdev->dev, + "%s: %p: TMF_LU_RESET passed\n", + __func__, isci_device); + else + dev_dbg(&isci_host->pdev->dev, + "%s: %p: TMF_LU_RESET failed (%x)\n", + __func__, isci_device, ret); + + return ret; +} + +int isci_task_lu_reset(struct domain_device *dev, u8 *lun) +{ + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev; + unsigned long flags; + int ret = TMF_RESP_FUNC_COMPLETE; + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_get_device(dev->lldd_dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + dev_dbg(&ihost->pdev->dev, + "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", + __func__, dev, ihost, idev); + + if (!idev) { + /* If the device is gone, escalate to I_T_Nexus_Reset. */ + dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); + + ret = TMF_RESP_FUNC_FAILED; + goto out; + } + + /* Suspend the RNC, kill all TCs */ + if (isci_remote_device_suspend_terminate(ihost, idev, NULL) + != SCI_SUCCESS) { + /* The suspend/terminate only fails if isci_get_device fails */ + ret = TMF_RESP_FUNC_FAILED; + goto out; + } + /* All pending I/Os have been terminated and cleaned up. */ + if (!test_bit(IDEV_GONE, &idev->flags)) { + if (dev_is_sata(dev)) + sas_ata_schedule_reset(dev); + else + /* Send the task management part of the reset. */ + ret = isci_task_send_lu_reset_sas(ihost, idev, lun); + } + out: + isci_put_device(idev); + return ret; +} + + +/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */ +int isci_task_clear_nexus_port(struct asd_sas_port *port) +{ + return TMF_RESP_FUNC_FAILED; +} + + + +int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) +{ + return TMF_RESP_FUNC_FAILED; +} + +/* Task Management Functions. Must be called from process context. */ + +/** + * isci_task_abort_task() - This function is one of the SAS Domain Template + * functions. This function is called by libsas to abort a specified task. + * @task: This parameter specifies the SAS task to abort. + * + * status, zero indicates success. + */ +int isci_task_abort_task(struct sas_task *task) +{ + struct isci_host *ihost = dev_to_ihost(task->dev); + DECLARE_COMPLETION_ONSTACK(aborted_io_completion); + struct isci_request *old_request = NULL; + struct isci_remote_device *idev = NULL; + struct isci_tmf tmf; + int ret = TMF_RESP_FUNC_FAILED; + unsigned long flags; + int target_done_already = 0; + + /* Get the isci_request reference from the task. Note that + * this check does not depend on the pending request list + * in the device, because tasks driving resets may land here + * after completion in the core. + */ + spin_lock_irqsave(&ihost->scic_lock, flags); + spin_lock(&task->task_state_lock); + + old_request = task->lldd_task; + + /* If task is already done, the request isn't valid */ + if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && + old_request) { + idev = isci_get_device(task->dev->lldd_dev); + target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET, + &old_request->flags); + } + spin_unlock(&task->task_state_lock); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + dev_warn(&ihost->pdev->dev, + "%s: dev = %p (%s%s), task = %p, old_request == %p\n", + __func__, idev, + (dev_is_sata(task->dev) ? "STP/SATA" + : ((dev_is_expander(task->dev->dev_type)) + ? "SMP" + : "SSP")), + ((idev) ? ((test_bit(IDEV_GONE, &idev->flags)) + ? " IDEV_GONE" + : "") + : " "), + task, old_request); + + /* Device reset conditions signalled in task_state_flags are the + * responsbility of libsas to observe at the start of the error + * handler thread. + */ + if (!idev || !old_request) { + /* The request has already completed and there + * is nothing to do here other than to set the task + * done bit, and indicate that the task abort function + * was successful. + */ + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags |= SAS_TASK_STATE_DONE; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + ret = TMF_RESP_FUNC_COMPLETE; + + dev_warn(&ihost->pdev->dev, + "%s: abort task not needed for %p\n", + __func__, task); + goto out; + } + /* Suspend the RNC, kill the TC */ + if (isci_remote_device_suspend_terminate(ihost, idev, old_request) + != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: isci_remote_device_reset_terminate(dev=%p, " + "req=%p, task=%p) failed\n", + __func__, idev, old_request, task); + ret = TMF_RESP_FUNC_FAILED; + goto out; + } + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (task->task_proto == SAS_PROTOCOL_SMP || + sas_protocol_ata(task->task_proto) || + target_done_already || + test_bit(IDEV_GONE, &idev->flags)) { + + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* No task to send, so explicitly resume the device here */ + isci_remote_device_resume_from_abort(ihost, idev); + + dev_warn(&ihost->pdev->dev, + "%s: %s request" + " or complete_in_target (%d), " + "or IDEV_GONE (%d), thus no TMF\n", + __func__, + ((task->task_proto == SAS_PROTOCOL_SMP) + ? "SMP" + : (sas_protocol_ata(task->task_proto) + ? "SATA/STP" + : "") + ), + test_bit(IREQ_COMPLETE_IN_TARGET, + &old_request->flags), + test_bit(IDEV_GONE, &idev->flags)); + + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + ret = TMF_RESP_FUNC_COMPLETE; + } else { + /* Fill in the tmf structure */ + isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, + old_request); + + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Send the task management request. */ + #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ + ret = isci_task_execute_tmf(ihost, idev, &tmf, + ISCI_ABORT_TASK_TIMEOUT_MS); + } +out: + dev_warn(&ihost->pdev->dev, + "%s: Done; dev = %p, task = %p , old_request == %p\n", + __func__, idev, task, old_request); + isci_put_device(idev); + return ret; +} + +/** + * isci_task_abort_task_set() - This function is one of the SAS Domain Template + * functions. This is one of the Task Management functoins called by libsas, + * to abort all task for the given lun. + * @d_device: This parameter specifies the domain device associated with this + * request. + * @lun: This parameter specifies the lun associated with this request. + * + * status, zero indicates success. + */ +int isci_task_abort_task_set( + struct domain_device *d_device, + u8 *lun) +{ + return TMF_RESP_FUNC_FAILED; +} + + +/** + * isci_task_clear_task_set() - This function is one of the SAS Domain Template + * functions. This is one of the Task Management functoins called by libsas. + * @d_device: This parameter specifies the domain device associated with this + * request. + * @lun: This parameter specifies the lun associated with this request. + * + * status, zero indicates success. + */ +int isci_task_clear_task_set( + struct domain_device *d_device, + u8 *lun) +{ + return TMF_RESP_FUNC_FAILED; +} + + +/** + * isci_task_query_task() - This function is implemented to cause libsas to + * correctly escalate the failed abort to a LUN or target reset (this is + * because sas_scsi_find_task libsas function does not correctly interpret + * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is + * returned, libsas turns this into a LUN reset; when FUNC_FAILED is + * returned, libsas will turn this into a target reset + * @task: This parameter specifies the sas task being queried. + * + * status, zero indicates success. + */ +int isci_task_query_task( + struct sas_task *task) +{ + /* See if there is a pending device reset for this device. */ + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) + return TMF_RESP_FUNC_FAILED; + else + return TMF_RESP_FUNC_SUCC; +} + +/* + * isci_task_request_complete() - This function is called by the sci core when + * an task request completes. + * @ihost: This parameter specifies the ISCI host object + * @ireq: This parameter is the completed isci_request object. + * @completion_status: This parameter specifies the completion status from the + * sci core. + * + * none. + */ +void +isci_task_request_complete(struct isci_host *ihost, + struct isci_request *ireq, + enum sci_task_status completion_status) +{ + struct isci_tmf *tmf = isci_request_access_tmf(ireq); + struct completion *tmf_complete = NULL; + + dev_dbg(&ihost->pdev->dev, + "%s: request = %p, status=%d\n", + __func__, ireq, completion_status); + + set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); + + if (tmf) { + tmf->status = completion_status; + + if (tmf->proto == SAS_PROTOCOL_SSP) { + memcpy(tmf->resp.rsp_buf, + ireq->ssp.rsp_buf, + SSP_RESP_IU_MAX_SIZE); + } else if (tmf->proto == SAS_PROTOCOL_SATA) { + memcpy(&tmf->resp.d2h_fis, + &ireq->stp.rsp, + sizeof(struct dev_to_host_fis)); + } + /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ + tmf_complete = tmf->complete; + } + sci_controller_complete_io(ihost, ireq->target_device, ireq); + /* set the 'terminated' flag handle to make sure it cannot be terminated + * or completed again. + */ + set_bit(IREQ_TERMINATED, &ireq->flags); + + if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) + wake_up_all(&ihost->eventq); + + if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) + isci_free_tag(ihost, ireq->io_tag); + + /* The task management part completes last. */ + if (tmf_complete) + complete(tmf_complete); +} + +static int isci_reset_device(struct isci_host *ihost, + struct domain_device *dev, + struct isci_remote_device *idev) +{ + int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1; + struct sas_phy *phy = sas_get_local_phy(dev); + struct isci_port *iport = dev->port->lldd_port; + + dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); + + /* Suspend the RNC, terminate all outstanding TCs. */ + if (isci_remote_device_suspend_terminate(ihost, idev, NULL) + != SCI_SUCCESS) { + rc = TMF_RESP_FUNC_FAILED; + goto out; + } + /* Note that since the termination for outstanding requests succeeded, + * this function will return success. This is because the resets will + * only fail if the device has been removed (ie. hotplug), and the + * primary duty of this function is to cleanup tasks, so that is the + * relevant status. + */ + if (!test_bit(IDEV_GONE, &idev->flags)) { + if (scsi_is_sas_phy_local(phy)) { + struct isci_phy *iphy = &ihost->phys[phy->number]; + + reset_stat = isci_port_perform_hard_reset(ihost, iport, + iphy); + } else + reset_stat = sas_phy_reset(phy, !dev_is_sata(dev)); + } + /* Explicitly resume the RNC here, since there was no task sent. */ + isci_remote_device_resume_from_abort(ihost, idev); + + dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", + __func__, idev, reset_stat); + out: + sas_put_local_phy(phy); + return rc; +} + +int isci_task_I_T_nexus_reset(struct domain_device *dev) +{ + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev; + unsigned long flags; + int ret; + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_get_device(dev->lldd_dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (!idev) { + /* XXX: need to cleanup any ireqs targeting this + * domain_device + */ + ret = -ENODEV; + goto out; + } + + ret = isci_reset_device(ihost, dev, idev); + out: + isci_put_device(idev); + return ret; +} diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h new file mode 100644 index 000000000..f96633fa6 --- /dev/null +++ b/drivers/scsi/isci/task.h @@ -0,0 +1,181 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _ISCI_TASK_H_ +#define _ISCI_TASK_H_ + +#include +#include "host.h" + +#define ISCI_TERMINATION_TIMEOUT_MSEC 500 + +struct isci_request; + +/** + * enum isci_tmf_function_codes - This enum defines the possible preparations + * of task management requests. + * + * + */ +enum isci_tmf_function_codes { + + isci_tmf_func_none = 0, + isci_tmf_ssp_task_abort = TMF_ABORT_TASK, + isci_tmf_ssp_lun_reset = TMF_LU_RESET, +}; + +/** + * struct isci_tmf - This class represents the task management object which + * acts as an interface to libsas for processing task management requests + * + * + */ +struct isci_tmf { + + struct completion *complete; + enum sas_protocol proto; + union { + struct ssp_response_iu resp_iu; + struct dev_to_host_fis d2h_fis; + u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; + } resp; + unsigned char lun[8]; + u16 io_tag; + enum isci_tmf_function_codes tmf_code; + int status; +}; + +static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) +{ + if (SAS_PROTOCOL_SATA == tmf->proto) + dev_dbg(&ihost->pdev->dev, + "%s: status = %x\n" + "tmf->resp.d2h_fis.status = %x\n" + "tmf->resp.d2h_fis.error = %x\n", + __func__, + tmf->status, + tmf->resp.d2h_fis.status, + tmf->resp.d2h_fis.error); + else + dev_dbg(&ihost->pdev->dev, + "%s: status = %x\n" + "tmf->resp.resp_iu.data_present = %x\n" + "tmf->resp.resp_iu.status = %x\n" + "tmf->resp.resp_iu.data_length = %x\n" + "tmf->resp.resp_iu.data[0] = %x\n" + "tmf->resp.resp_iu.data[1] = %x\n" + "tmf->resp.resp_iu.data[2] = %x\n" + "tmf->resp.resp_iu.data[3] = %x\n", + __func__, + tmf->status, + tmf->resp.resp_iu.datapres, + tmf->resp.resp_iu.status, + be32_to_cpu(tmf->resp.resp_iu.response_data_len), + tmf->resp.resp_iu.resp_data[0], + tmf->resp.resp_iu.resp_data[1], + tmf->resp.resp_iu.resp_data[2], + tmf->resp.resp_iu.resp_data[3]); +} + + +int isci_task_execute_task( + struct sas_task *task, + gfp_t gfp_flags); + +int isci_task_abort_task( + struct sas_task *task); + +int isci_task_abort_task_set( + struct domain_device *d_device, + u8 *lun); + +int isci_task_clear_task_set( + struct domain_device *d_device, + u8 *lun); + +int isci_task_query_task( + struct sas_task *task); + +int isci_task_lu_reset( + struct domain_device *d_device, + u8 *lun); + +int isci_task_clear_nexus_port( + struct asd_sas_port *port); + +int isci_task_clear_nexus_ha( + struct sas_ha_struct *ha); + +int isci_task_I_T_nexus_reset( + struct domain_device *d_device); + +void isci_task_request_complete( + struct isci_host *isci_host, + struct isci_request *request, + enum sci_task_status completion_status); + +u16 isci_task_ssp_request_get_io_tag_to_manage( + struct isci_request *request); + +u8 isci_task_ssp_request_get_function( + struct isci_request *request); + + +void *isci_task_ssp_request_get_response_data_address( + struct isci_request *request); + +u32 isci_task_ssp_request_get_response_data_length( + struct isci_request *request); + +#endif /* !defined(_SCI_TASK_H_) */ diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c new file mode 100644 index 000000000..04a6d0d59 --- /dev/null +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -0,0 +1,211 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "host.h" +#include "unsolicited_frame_control.h" +#include "registers.h" + +void sci_unsolicited_frame_control_construct(struct isci_host *ihost) +{ + struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; + struct sci_unsolicited_frame *uf; + dma_addr_t dma = ihost->ufi_dma; + void *virt = ihost->ufi_buf; + int i; + + /* + * The Unsolicited Frame buffers are set at the start of the UF + * memory descriptor entry. The headers and address table will be + * placed after the buffers. + */ + + /* + * Program the location of the UF header table into the SCU. + * Notes: + * - The address must align on a 64-byte boundary. Guaranteed to be + * on 64-byte boundary already 1KB boundary for unsolicited frames. + * - Program unused header entries to overlap with the last + * unsolicited frame. The silicon will never DMA to these unused + * headers, since we program the UF address table pointers to + * NULL. + */ + uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE; + uf_control->headers.array = virt + SCI_UFI_BUF_SIZE; + + /* + * Program the location of the UF address table into the SCU. + * Notes: + * - The address must align on a 64-bit boundary. Guaranteed to be on 64 + * byte boundary already due to above programming headers being on a + * 64-bit boundary and headers are on a 64-bytes in size. + */ + uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; + uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; + uf_control->get = 0; + + /* + * UF buffer requirements are: + * - The last entry in the UF queue is not NULL. + * - There is a power of 2 number of entries (NULL or not-NULL) + * programmed into the queue. + * - Aligned on a 1KB boundary. */ + + /* + * Program the actual used UF buffers into the UF address table and + * the controller's array of UFs. + */ + for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) { + uf = &uf_control->buffers.array[i]; + + uf_control->address_table.array[i] = dma; + + uf->buffer = virt; + uf->header = &uf_control->headers.array[i]; + uf->state = UNSOLICITED_FRAME_EMPTY; + + /* + * Increment the address of the physical and virtual memory + * pointers. Everything is aligned on 1k boundary with an + * increment of 1k. + */ + virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + } +} + +enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_header) +{ + if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { + /* Skip the first word in the frame since this is a controll word used + * by the hardware. + */ + *frame_header = &uf_control->buffers.array[frame_index].header->data; + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INVALID_PARAMETER_VALUE; +} + +enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_buffer) +{ + if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { + *frame_buffer = uf_control->buffers.array[frame_index].buffer; + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INVALID_PARAMETER_VALUE; +} + +bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index) +{ + u32 frame_get; + u32 frame_cycle; + + frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1); + frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES; + + /* + * In the event there are NULL entries in the UF table, we need to + * advance the get pointer in order to find out if this frame should + * be released (i.e. update the get pointer) + */ + while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 && + upper_32_bits(uf_control->address_table.array[frame_get]) == 0 && + frame_get < SCU_MAX_UNSOLICITED_FRAMES) + frame_get++; + + /* + * The table has a NULL entry as it's last element. This is + * illegal. + */ + BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES); + if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES) + return false; + + uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED; + + if (frame_get != frame_index) { + /* + * Frames remain in use until we advance the get pointer + * so there is nothing we can do here + */ + return false; + } + + /* + * The frame index is equal to the current get pointer so we + * can now free up all of the frame entries that + */ + while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) { + uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY; + + if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) { + frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES; + frame_get = 0; + } else + frame_get++; + } + + uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get; + + return true; +} diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h new file mode 100644 index 000000000..1bc551ec6 --- /dev/null +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -0,0 +1,282 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ +#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ + +#include "isci.h" + +#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15 + +/** + * struct scu_unsolicited_frame_header - + * + * This structure delineates the format of an unsolicited frame header. The + * first DWORD are UF attributes defined by the silicon architecture. The data + * depicts actual header information received on the link. + */ +struct scu_unsolicited_frame_header { + /** + * This field indicates if there is an Initiator Index Table entry with + * which this header is associated. + */ + u32 iit_exists:1; + + /** + * This field simply indicates the protocol type (i.e. SSP, STP, SMP). + */ + u32 protocol_type:3; + + /** + * This field indicates if the frame is an address frame (IAF or OAF) + * or if it is a information unit frame. + */ + u32 is_address_frame:1; + + /** + * This field simply indicates the connection rate at which the frame + * was received. + */ + u32 connection_rate:4; + + u32 reserved:23; + + /** + * This field represents the actual header data received on the link. + */ + u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS]; + +}; + + + +/** + * enum unsolicited_frame_state - + * + * This enumeration represents the current unsolicited frame state. The + * controller object can not updtate the hardware unsolicited frame put pointer + * unless it has already processed the priror unsolicited frames. + */ +enum unsolicited_frame_state { + /** + * This state is when the frame is empty and not in use. It is + * different from the released state in that the hardware could DMA + * data to this frame buffer. + */ + UNSOLICITED_FRAME_EMPTY, + + /** + * This state is set when the frame buffer is in use by by some + * object in the system. + */ + UNSOLICITED_FRAME_IN_USE, + + /** + * This state is set when the frame is returned to the free pool + * but one or more frames prior to this one are still in use. + * Once all of the frame before this one are freed it will go to + * the empty state. + */ + UNSOLICITED_FRAME_RELEASED, + + UNSOLICITED_FRAME_MAX_STATES +}; + +/** + * struct sci_unsolicited_frame - + * + * This is the unsolicited frame data structure it acts as the container for + * the current frame state, frame header and frame buffer. + */ +struct sci_unsolicited_frame { + /** + * This field contains the current frame state + */ + enum unsolicited_frame_state state; + + /** + * This field points to the frame header data. + */ + struct scu_unsolicited_frame_header *header; + + /** + * This field points to the frame buffer data. + */ + void *buffer; + +}; + +/** + * struct sci_uf_header_array - + * + * This structure contains all of the unsolicited frame header information. + */ +struct sci_uf_header_array { + /** + * This field is represents a virtual pointer to the start + * address of the UF address table. The table contains + * 64-bit pointers as required by the hardware. + */ + struct scu_unsolicited_frame_header *array; + + /** + * This field specifies the physical address location for the UF + * buffer array. + */ + dma_addr_t physical_address; + +}; + +/** + * struct sci_uf_buffer_array - + * + * This structure contains all of the unsolicited frame buffer (actual payload) + * information. + */ +struct sci_uf_buffer_array { + /** + * This field is the unsolicited frame data its used to manage + * the data for the unsolicited frame requests. It also represents + * the virtual address location that corresponds to the + * physical_address field. + */ + struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; + + /** + * This field specifies the physical address location for the UF + * buffer array. + */ + dma_addr_t physical_address; +}; + +/** + * struct sci_uf_address_table_array - + * + * This object maintains all of the unsolicited frame address table specific + * data. The address table is a collection of 64-bit pointers that point to + * 1KB buffers into which the silicon will DMA unsolicited frames. + */ +struct sci_uf_address_table_array { + /** + * This field represents a virtual pointer that refers to the + * starting address of the UF address table. + * 64-bit pointers are required by the hardware. + */ + u64 *array; + + /** + * This field specifies the physical address location for the UF + * address table. + */ + dma_addr_t physical_address; + +}; + +/** + * struct sci_unsolicited_frame_control - + * + * This object contains all of the data necessary to handle unsolicited frames. + */ +struct sci_unsolicited_frame_control { + /** + * This field is the software copy of the unsolicited frame queue + * get pointer. The controller object writes this value to the + * hardware to let the hardware put more unsolicited frame entries. + */ + u32 get; + + /** + * This field contains all of the unsolicited frame header + * specific fields. + */ + struct sci_uf_header_array headers; + + /** + * This field contains all of the unsolicited frame buffer + * specific fields. + */ + struct sci_uf_buffer_array buffers; + + /** + * This field contains all of the unsolicited frame address table + * specific fields. + */ + struct sci_uf_address_table_array address_table; + +}; + +#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE) +#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header)) +#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64)) + +struct isci_host; + +void sci_unsolicited_frame_control_construct(struct isci_host *ihost); + +enum sci_status sci_unsolicited_frame_control_get_header( + struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_header); + +enum sci_status sci_unsolicited_frame_control_get_buffer( + struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_buffer); + +bool sci_unsolicited_frame_control_release_frame( + struct sci_unsolicited_frame_control *uf_control, + u32 frame_index); + +#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */ diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c new file mode 100644 index 000000000..a64abe38d --- /dev/null +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Export the iSCSI boot info to userland via sysfs. + * + * Copyright (C) 2010 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010 Mike Christie + */ + +#include +#include +#include +#include +#include +#include + + +MODULE_AUTHOR("Mike Christie "); +MODULE_DESCRIPTION("sysfs interface and helpers to export iSCSI boot information"); +MODULE_LICENSE("GPL"); +/* + * The kobject and attribute structures. + */ +struct iscsi_boot_attr { + struct attribute attr; + int type; + ssize_t (*show) (void *data, int type, char *buf); +}; + +/* + * The routine called for all sysfs attributes. + */ +static ssize_t iscsi_boot_show_attribute(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + struct iscsi_boot_attr *boot_attr = + container_of(attr, struct iscsi_boot_attr, attr); + ssize_t ret = -EIO; + char *str = buf; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (boot_kobj->show) + ret = boot_kobj->show(boot_kobj->data, boot_attr->type, str); + return ret; +} + +static const struct sysfs_ops iscsi_boot_attr_ops = { + .show = iscsi_boot_show_attribute, +}; + +static void iscsi_boot_kobj_release(struct kobject *kobj) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (boot_kobj->release) + boot_kobj->release(boot_kobj->data); + kfree(boot_kobj); +} + +static struct kobj_type iscsi_boot_ktype = { + .release = iscsi_boot_kobj_release, + .sysfs_ops = &iscsi_boot_attr_ops, +}; + +#define iscsi_boot_rd_attr(fnname, sysfs_name, attr_type) \ +static struct iscsi_boot_attr iscsi_boot_attr_##fnname = { \ + .attr = { .name = __stringify(sysfs_name), .mode = 0444 }, \ + .type = attr_type, \ +} + +/* Target attrs */ +iscsi_boot_rd_attr(tgt_index, index, ISCSI_BOOT_TGT_INDEX); +iscsi_boot_rd_attr(tgt_flags, flags, ISCSI_BOOT_TGT_FLAGS); +iscsi_boot_rd_attr(tgt_ip, ip-addr, ISCSI_BOOT_TGT_IP_ADDR); +iscsi_boot_rd_attr(tgt_port, port, ISCSI_BOOT_TGT_PORT); +iscsi_boot_rd_attr(tgt_lun, lun, ISCSI_BOOT_TGT_LUN); +iscsi_boot_rd_attr(tgt_chap, chap-type, ISCSI_BOOT_TGT_CHAP_TYPE); +iscsi_boot_rd_attr(tgt_nic, nic-assoc, ISCSI_BOOT_TGT_NIC_ASSOC); +iscsi_boot_rd_attr(tgt_name, target-name, ISCSI_BOOT_TGT_NAME); +iscsi_boot_rd_attr(tgt_chap_name, chap-name, ISCSI_BOOT_TGT_CHAP_NAME); +iscsi_boot_rd_attr(tgt_chap_secret, chap-secret, ISCSI_BOOT_TGT_CHAP_SECRET); +iscsi_boot_rd_attr(tgt_chap_rev_name, rev-chap-name, + ISCSI_BOOT_TGT_REV_CHAP_NAME); +iscsi_boot_rd_attr(tgt_chap_rev_secret, rev-chap-name-secret, + ISCSI_BOOT_TGT_REV_CHAP_SECRET); + +static struct attribute *target_attrs[] = { + &iscsi_boot_attr_tgt_index.attr, + &iscsi_boot_attr_tgt_flags.attr, + &iscsi_boot_attr_tgt_ip.attr, + &iscsi_boot_attr_tgt_port.attr, + &iscsi_boot_attr_tgt_lun.attr, + &iscsi_boot_attr_tgt_chap.attr, + &iscsi_boot_attr_tgt_nic.attr, + &iscsi_boot_attr_tgt_name.attr, + &iscsi_boot_attr_tgt_chap_name.attr, + &iscsi_boot_attr_tgt_chap_secret.attr, + &iscsi_boot_attr_tgt_chap_rev_name.attr, + &iscsi_boot_attr_tgt_chap_rev_secret.attr, + NULL +}; + +static umode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (attr == &iscsi_boot_attr_tgt_index.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_INDEX); + else if (attr == &iscsi_boot_attr_tgt_flags.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_FLAGS); + else if (attr == &iscsi_boot_attr_tgt_ip.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_IP_ADDR); + else if (attr == &iscsi_boot_attr_tgt_port.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_PORT); + else if (attr == &iscsi_boot_attr_tgt_lun.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_LUN); + else if (attr == &iscsi_boot_attr_tgt_chap.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_CHAP_TYPE); + else if (attr == &iscsi_boot_attr_tgt_nic.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_NIC_ASSOC); + else if (attr == &iscsi_boot_attr_tgt_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_NAME); + else if (attr == &iscsi_boot_attr_tgt_chap_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_CHAP_NAME); + else if (attr == &iscsi_boot_attr_tgt_chap_secret.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_CHAP_SECRET); + else if (attr == &iscsi_boot_attr_tgt_chap_rev_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_REV_CHAP_NAME); + else if (attr == &iscsi_boot_attr_tgt_chap_rev_secret.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_REV_CHAP_SECRET); + return 0; +} + +static struct attribute_group iscsi_boot_target_attr_group = { + .attrs = target_attrs, + .is_visible = iscsi_boot_tgt_attr_is_visible, +}; + +/* Ethernet attrs */ +iscsi_boot_rd_attr(eth_index, index, ISCSI_BOOT_ETH_INDEX); +iscsi_boot_rd_attr(eth_flags, flags, ISCSI_BOOT_ETH_FLAGS); +iscsi_boot_rd_attr(eth_ip, ip-addr, ISCSI_BOOT_ETH_IP_ADDR); +iscsi_boot_rd_attr(eth_prefix, prefix-len, ISCSI_BOOT_ETH_PREFIX_LEN); +iscsi_boot_rd_attr(eth_subnet, subnet-mask, ISCSI_BOOT_ETH_SUBNET_MASK); +iscsi_boot_rd_attr(eth_origin, origin, ISCSI_BOOT_ETH_ORIGIN); +iscsi_boot_rd_attr(eth_gateway, gateway, ISCSI_BOOT_ETH_GATEWAY); +iscsi_boot_rd_attr(eth_primary_dns, primary-dns, ISCSI_BOOT_ETH_PRIMARY_DNS); +iscsi_boot_rd_attr(eth_secondary_dns, secondary-dns, + ISCSI_BOOT_ETH_SECONDARY_DNS); +iscsi_boot_rd_attr(eth_dhcp, dhcp, ISCSI_BOOT_ETH_DHCP); +iscsi_boot_rd_attr(eth_vlan, vlan, ISCSI_BOOT_ETH_VLAN); +iscsi_boot_rd_attr(eth_mac, mac, ISCSI_BOOT_ETH_MAC); +iscsi_boot_rd_attr(eth_hostname, hostname, ISCSI_BOOT_ETH_HOSTNAME); + +static struct attribute *ethernet_attrs[] = { + &iscsi_boot_attr_eth_index.attr, + &iscsi_boot_attr_eth_flags.attr, + &iscsi_boot_attr_eth_ip.attr, + &iscsi_boot_attr_eth_prefix.attr, + &iscsi_boot_attr_eth_subnet.attr, + &iscsi_boot_attr_eth_origin.attr, + &iscsi_boot_attr_eth_gateway.attr, + &iscsi_boot_attr_eth_primary_dns.attr, + &iscsi_boot_attr_eth_secondary_dns.attr, + &iscsi_boot_attr_eth_dhcp.attr, + &iscsi_boot_attr_eth_vlan.attr, + &iscsi_boot_attr_eth_mac.attr, + &iscsi_boot_attr_eth_hostname.attr, + NULL +}; + +static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (attr == &iscsi_boot_attr_eth_index.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_INDEX); + else if (attr == &iscsi_boot_attr_eth_flags.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_FLAGS); + else if (attr == &iscsi_boot_attr_eth_ip.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_IP_ADDR); + else if (attr == &iscsi_boot_attr_eth_prefix.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_PREFIX_LEN); + else if (attr == &iscsi_boot_attr_eth_subnet.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_SUBNET_MASK); + else if (attr == &iscsi_boot_attr_eth_origin.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_ORIGIN); + else if (attr == &iscsi_boot_attr_eth_gateway.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_GATEWAY); + else if (attr == &iscsi_boot_attr_eth_primary_dns.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_PRIMARY_DNS); + else if (attr == &iscsi_boot_attr_eth_secondary_dns.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_SECONDARY_DNS); + else if (attr == &iscsi_boot_attr_eth_dhcp.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_DHCP); + else if (attr == &iscsi_boot_attr_eth_vlan.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_VLAN); + else if (attr == &iscsi_boot_attr_eth_mac.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_MAC); + else if (attr == &iscsi_boot_attr_eth_hostname.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_HOSTNAME); + return 0; +} + +static struct attribute_group iscsi_boot_ethernet_attr_group = { + .attrs = ethernet_attrs, + .is_visible = iscsi_boot_eth_attr_is_visible, +}; + +/* Initiator attrs */ +iscsi_boot_rd_attr(ini_index, index, ISCSI_BOOT_INI_INDEX); +iscsi_boot_rd_attr(ini_flags, flags, ISCSI_BOOT_INI_FLAGS); +iscsi_boot_rd_attr(ini_isns, isns-server, ISCSI_BOOT_INI_ISNS_SERVER); +iscsi_boot_rd_attr(ini_slp, slp-server, ISCSI_BOOT_INI_SLP_SERVER); +iscsi_boot_rd_attr(ini_primary_radius, pri-radius-server, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER); +iscsi_boot_rd_attr(ini_secondary_radius, sec-radius-server, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER); +iscsi_boot_rd_attr(ini_name, initiator-name, ISCSI_BOOT_INI_INITIATOR_NAME); + +static struct attribute *initiator_attrs[] = { + &iscsi_boot_attr_ini_index.attr, + &iscsi_boot_attr_ini_flags.attr, + &iscsi_boot_attr_ini_isns.attr, + &iscsi_boot_attr_ini_slp.attr, + &iscsi_boot_attr_ini_primary_radius.attr, + &iscsi_boot_attr_ini_secondary_radius.attr, + &iscsi_boot_attr_ini_name.attr, + NULL +}; + +static umode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (attr == &iscsi_boot_attr_ini_index.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_INDEX); + if (attr == &iscsi_boot_attr_ini_flags.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_FLAGS); + if (attr == &iscsi_boot_attr_ini_isns.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_ISNS_SERVER); + if (attr == &iscsi_boot_attr_ini_slp.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_SLP_SERVER); + if (attr == &iscsi_boot_attr_ini_primary_radius.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER); + if (attr == &iscsi_boot_attr_ini_secondary_radius.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER); + if (attr == &iscsi_boot_attr_ini_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_INITIATOR_NAME); + + return 0; +} + +static struct attribute_group iscsi_boot_initiator_attr_group = { + .attrs = initiator_attrs, + .is_visible = iscsi_boot_ini_attr_is_visible, +}; + +/* iBFT ACPI Table attributes */ +iscsi_boot_rd_attr(acpitbl_signature, signature, ISCSI_BOOT_ACPITBL_SIGNATURE); +iscsi_boot_rd_attr(acpitbl_oem_id, oem_id, ISCSI_BOOT_ACPITBL_OEM_ID); +iscsi_boot_rd_attr(acpitbl_oem_table_id, oem_table_id, + ISCSI_BOOT_ACPITBL_OEM_TABLE_ID); + +static struct attribute *acpitbl_attrs[] = { + &iscsi_boot_attr_acpitbl_signature.attr, + &iscsi_boot_attr_acpitbl_oem_id.attr, + &iscsi_boot_attr_acpitbl_oem_table_id.attr, + NULL +}; + +static umode_t iscsi_boot_acpitbl_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (attr == &iscsi_boot_attr_acpitbl_signature.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ACPITBL_SIGNATURE); + if (attr == &iscsi_boot_attr_acpitbl_oem_id.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ACPITBL_OEM_ID); + if (attr == &iscsi_boot_attr_acpitbl_oem_table_id.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ACPITBL_OEM_TABLE_ID); + return 0; +} + +static struct attribute_group iscsi_boot_acpitbl_attr_group = { + .attrs = acpitbl_attrs, + .is_visible = iscsi_boot_acpitbl_attr_is_visible, +}; + +static struct iscsi_boot_kobj * +iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, + struct attribute_group *attr_group, + const char *name, int index, void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) +{ + struct iscsi_boot_kobj *boot_kobj; + + boot_kobj = kzalloc(sizeof(*boot_kobj), GFP_KERNEL); + if (!boot_kobj) + return NULL; + INIT_LIST_HEAD(&boot_kobj->list); + + boot_kobj->kobj.kset = boot_kset->kset; + if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, + NULL, name, index)) { + kobject_put(&boot_kobj->kobj); + return NULL; + } + boot_kobj->data = data; + boot_kobj->show = show; + boot_kobj->is_visible = is_visible; + boot_kobj->release = release; + + if (sysfs_create_group(&boot_kobj->kobj, attr_group)) { + /* + * We do not want to free this because the caller + * will assume that since the creation call failed + * the boot kobj was not setup and the normal release + * path is not being run. + */ + boot_kobj->release = NULL; + kobject_put(&boot_kobj->kobj); + return NULL; + } + boot_kobj->attr_group = attr_group; + + kobject_uevent(&boot_kobj->kobj, KOBJ_ADD); + /* Nothing broke so lets add it to the list. */ + list_add_tail(&boot_kobj->list, &boot_kset->kobj_list); + return boot_kobj; +} + +static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj) +{ + list_del(&boot_kobj->list); + sysfs_remove_group(&boot_kobj->kobj, boot_kobj->attr_group); + kobject_put(&boot_kobj->kobj); +} + +/** + * iscsi_boot_create_target() - create boot target sysfs dir + * @boot_kset: boot kset + * @index: the target id + * @data: driver specific data for target + * @show: attr show function + * @is_visible: attr visibility function + * @release: release function + * + * Note: The boot sysfs lib will free the data passed in for the caller + * when all refs to the target kobject have been released. + */ +struct iscsi_boot_kobj * +iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) +{ + return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group, + "target%d", index, data, show, is_visible, + release); +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_target); + +/** + * iscsi_boot_create_initiator() - create boot initiator sysfs dir + * @boot_kset: boot kset + * @index: the initiator id + * @data: driver specific data + * @show: attr show function + * @is_visible: attr visibility function + * @release: release function + * + * Note: The boot sysfs lib will free the data passed in for the caller + * when all refs to the initiator kobject have been released. + */ +struct iscsi_boot_kobj * +iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) +{ + return iscsi_boot_create_kobj(boot_kset, + &iscsi_boot_initiator_attr_group, + "initiator", index, data, show, + is_visible, release); +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator); + +/** + * iscsi_boot_create_ethernet() - create boot ethernet sysfs dir + * @boot_kset: boot kset + * @index: the ethernet device id + * @data: driver specific data + * @show: attr show function + * @is_visible: attr visibility function + * @release: release function + * + * Note: The boot sysfs lib will free the data passed in for the caller + * when all refs to the ethernet kobject have been released. + */ +struct iscsi_boot_kobj * +iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) +{ + return iscsi_boot_create_kobj(boot_kset, + &iscsi_boot_ethernet_attr_group, + "ethernet%d", index, data, show, + is_visible, release); +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet); + +/** + * iscsi_boot_create_acpitbl() - create boot acpi table sysfs dir + * @boot_kset: boot kset + * @index: not used + * @data: driver specific data + * @show: attr show function + * @is_visible: attr visibility function + * @release: release function + * + * Note: The boot sysfs lib will free the data passed in for the caller + * when all refs to the acpitbl kobject have been released. + */ +struct iscsi_boot_kobj * +iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show)(void *data, int type, char *buf), + umode_t (*is_visible)(void *data, int type), + void (*release)(void *data)) +{ + return iscsi_boot_create_kobj(boot_kset, + &iscsi_boot_acpitbl_attr_group, + "acpi_header", index, data, show, + is_visible, release); +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_acpitbl); + +/** + * iscsi_boot_create_kset() - creates root sysfs tree + * @set_name: name of root dir + */ +struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name) +{ + struct iscsi_boot_kset *boot_kset; + + boot_kset = kzalloc(sizeof(*boot_kset), GFP_KERNEL); + if (!boot_kset) + return NULL; + + boot_kset->kset = kset_create_and_add(set_name, NULL, firmware_kobj); + if (!boot_kset->kset) { + kfree(boot_kset); + return NULL; + } + + INIT_LIST_HEAD(&boot_kset->kobj_list); + return boot_kset; +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_kset); + +/** + * iscsi_boot_create_host_kset() - creates root sysfs tree for a scsi host + * @hostno: host number of scsi host + */ +struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno) +{ + struct iscsi_boot_kset *boot_kset; + char *set_name; + + set_name = kasprintf(GFP_KERNEL, "iscsi_boot%u", hostno); + if (!set_name) + return NULL; + + boot_kset = iscsi_boot_create_kset(set_name); + kfree(set_name); + return boot_kset; +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_host_kset); + +/** + * iscsi_boot_destroy_kset() - destroy kset and kobjects under it + * @boot_kset: boot kset + * + * This will remove the kset and kobjects and attrs under it. + */ +void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset) +{ + struct iscsi_boot_kobj *boot_kobj, *tmp_kobj; + + if (!boot_kset) + return; + + list_for_each_entry_safe(boot_kobj, tmp_kobj, + &boot_kset->kobj_list, list) + iscsi_boot_remove_kobj(boot_kobj); + + kset_unregister(boot_kset->kset); + kfree(boot_kset); +} +EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c new file mode 100644 index 000000000..8e14cea15 --- /dev/null +++ b/drivers/scsi/iscsi_tcp.c @@ -0,0 +1,1151 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * iSCSI Initiator over TCP/IP Data-Path + * + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 - 2006 Mike Christie + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * maintained by open-iscsi@googlegroups.com + * + * See the file COPYING included with this distribution for more details. + * + * Credits: + * Christoph Hellwig + * FUJITA Tomonori + * Arne Redlich + * Zhenyu Wang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iscsi_tcp.h" + +MODULE_AUTHOR("Mike Christie , " + "Dmitry Yusupov , " + "Alex Aizman "); +MODULE_DESCRIPTION("iSCSI/TCP data-path"); +MODULE_LICENSE("GPL"); + +static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport; +static const struct scsi_host_template iscsi_sw_tcp_sht; +static struct iscsi_transport iscsi_sw_tcp_transport; + +static unsigned int iscsi_max_lun = ~0; +module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); + +static bool iscsi_recv_from_iscsi_q; +module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644); +MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context."); + +static int iscsi_sw_tcp_dbg; +module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module " + "Set to 1 to turn on, and zero to turn off. Default is off."); + +#define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...) \ + do { \ + if (iscsi_sw_tcp_dbg) \ + iscsi_conn_printk(KERN_INFO, _conn, \ + "%s " dbg_fmt, \ + __func__, ##arg); \ + iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp, \ + &(_conn)->cls_conn->dev, \ + "%s " dbg_fmt, __func__, ##arg);\ + } while (0); + + +/** + * iscsi_sw_tcp_recv - TCP receive in sendfile fashion + * @rd_desc: read descriptor + * @skb: socket buffer + * @offset: offset in skb + * @len: skb->len - offset + */ +static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, + unsigned int offset, size_t len) +{ + struct iscsi_conn *conn = rd_desc->arg.data; + unsigned int consumed, total_consumed = 0; + int status; + + ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset); + + do { + status = 0; + consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status); + offset += consumed; + total_consumed += consumed; + } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE); + + ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n", + skb->len - offset, status); + return total_consumed; +} + +/** + * iscsi_sw_sk_state_check - check socket state + * @sk: socket + * + * If the socket is in CLOSE or CLOSE_WAIT we should + * not close the connection if there is still some + * data pending. + * + * Must be called with sk_callback_lock. + */ +static inline int iscsi_sw_sk_state_check(struct sock *sk) +{ + struct iscsi_conn *conn = sk->sk_user_data; + + if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && + (conn->session->state != ISCSI_STATE_LOGGING_OUT) && + !atomic_read(&sk->sk_rmem_alloc)) { + ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n"); + iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE); + return -ECONNRESET; + } + return 0; +} + +static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct sock *sk = tcp_sw_conn->sock->sk; + read_descriptor_t rd_desc; + + /* + * Use rd_desc to pass 'conn' to iscsi_tcp_recv. + * We set count to 1 because we want the network layer to + * hand us all the skbs that are available. iscsi_tcp_recv + * handled pdus that cross buffers or pdus that still need data. + */ + rd_desc.arg.data = conn; + rd_desc.count = 1; + + tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); + + /* If we had to (atomically) map a highmem page, + * unmap it now. */ + iscsi_tcp_segment_unmap(&tcp_conn->in.segment); + + iscsi_sw_sk_state_check(sk); +} + +static void iscsi_sw_tcp_recv_data_work(struct work_struct *work) +{ + struct iscsi_conn *conn = container_of(work, struct iscsi_conn, + recvwork); + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct sock *sk = tcp_sw_conn->sock->sk; + + lock_sock(sk); + iscsi_sw_tcp_recv_data(conn); + release_sock(sk); +} + +static void iscsi_sw_tcp_data_ready(struct sock *sk) +{ + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_conn *conn; + + trace_sk_data_ready(sk); + + read_lock_bh(&sk->sk_callback_lock); + conn = sk->sk_user_data; + if (!conn) { + read_unlock_bh(&sk->sk_callback_lock); + return; + } + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + + if (tcp_sw_conn->queue_recv) + iscsi_conn_queue_recv(conn); + else + iscsi_sw_tcp_recv_data(conn); + read_unlock_bh(&sk->sk_callback_lock); +} + +static void iscsi_sw_tcp_state_change(struct sock *sk) +{ + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct iscsi_conn *conn; + void (*old_state_change)(struct sock *); + + read_lock_bh(&sk->sk_callback_lock); + conn = sk->sk_user_data; + if (!conn) { + read_unlock_bh(&sk->sk_callback_lock); + return; + } + + iscsi_sw_sk_state_check(sk); + + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + old_state_change = tcp_sw_conn->old_state_change; + + read_unlock_bh(&sk->sk_callback_lock); + + old_state_change(sk); +} + +/** + * iscsi_sw_tcp_write_space - Called when more output buffer space is available + * @sk: socket space is available for + **/ +static void iscsi_sw_tcp_write_space(struct sock *sk) +{ + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + void (*old_write_space)(struct sock *); + + read_lock_bh(&sk->sk_callback_lock); + conn = sk->sk_user_data; + if (!conn) { + read_unlock_bh(&sk->sk_callback_lock); + return; + } + + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + old_write_space = tcp_sw_conn->old_write_space; + read_unlock_bh(&sk->sk_callback_lock); + + old_write_space(sk); + + ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n"); + iscsi_conn_queue_xmit(conn); +} + +static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct sock *sk = tcp_sw_conn->sock->sk; + + /* assign new callbacks */ + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = conn; + tcp_sw_conn->old_data_ready = sk->sk_data_ready; + tcp_sw_conn->old_state_change = sk->sk_state_change; + tcp_sw_conn->old_write_space = sk->sk_write_space; + sk->sk_data_ready = iscsi_sw_tcp_data_ready; + sk->sk_state_change = iscsi_sw_tcp_state_change; + sk->sk_write_space = iscsi_sw_tcp_write_space; + write_unlock_bh(&sk->sk_callback_lock); +} + +static void +iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct sock *sk = tcp_sw_conn->sock->sk; + + /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = NULL; + sk->sk_data_ready = tcp_sw_conn->old_data_ready; + sk->sk_state_change = tcp_sw_conn->old_state_change; + sk->sk_write_space = tcp_sw_conn->old_write_space; + sk->sk_no_check_tx = 0; + write_unlock_bh(&sk->sk_callback_lock); +} + +/** + * iscsi_sw_tcp_xmit_segment - transmit segment + * @tcp_conn: the iSCSI TCP connection + * @segment: the buffer to transmnit + * + * This function transmits as much of the buffer as + * the network layer will accept, and returns the number of + * bytes transmitted. + * + * If CRC hashing is enabled, the function will compute the + * hash as it goes. When the entire segment has been transmitted, + * it will retrieve the hash value and send it as well. + */ +static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment) +{ + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct socket *sk = tcp_sw_conn->sock; + unsigned int copied = 0; + int r = 0; + + while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) { + struct scatterlist *sg; + struct msghdr msg = {}; + struct bio_vec bv; + unsigned int offset, copy; + + r = 0; + offset = segment->copied; + copy = segment->size - offset; + + if (segment->total_copied + segment->size < segment->total_size) + msg.msg_flags |= MSG_MORE; + + if (tcp_sw_conn->queue_recv) + msg.msg_flags |= MSG_DONTWAIT; + + if (!segment->data) { + if (!tcp_conn->iscsi_conn->datadgst_en) + msg.msg_flags |= MSG_SPLICE_PAGES; + sg = segment->sg; + offset += segment->sg_offset + sg->offset; + bvec_set_page(&bv, sg_page(sg), copy, offset); + } else { + bvec_set_virt(&bv, segment->data + offset, copy); + } + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, copy); + + r = sock_sendmsg(sk, &msg); + if (r < 0) { + iscsi_tcp_segment_unmap(segment); + return r; + } + copied += r; + } + return copied; +} + +/** + * iscsi_sw_tcp_xmit - TCP transmit + * @conn: iscsi connection + **/ +static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct iscsi_segment *segment = &tcp_sw_conn->out.segment; + unsigned int consumed = 0; + int rc = 0; + + while (1) { + rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); + /* + * We may not have been able to send data because the conn + * is getting stopped. libiscsi will know so propagate err + * for it to do the right thing. + */ + if (rc == -EAGAIN) + return rc; + else if (rc < 0) { + rc = ISCSI_ERR_XMIT_FAILED; + goto error; + } else if (rc == 0) + break; + + consumed += rc; + + if (segment->total_copied >= segment->total_size) { + if (segment->done != NULL) { + rc = segment->done(tcp_conn, segment); + if (rc != 0) + goto error; + } + } + } + + ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed); + + conn->txdata_octets += consumed; + return consumed; + +error: + /* Transmit error. We could initiate error recovery + * here. */ + ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc); + iscsi_conn_failure(conn, rc); + return -EIO; +} + +/** + * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit + * @conn: iscsi connection + */ +static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct iscsi_segment *segment = &tcp_sw_conn->out.segment; + + return segment->total_copied - segment->total_size; +} + +static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + unsigned int noreclaim_flag; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + int rc = 0; + + if (!tcp_sw_conn->sock) { + iscsi_conn_printk(KERN_ERR, conn, + "Transport not bound to socket!\n"); + return -EINVAL; + } + + noreclaim_flag = memalloc_noreclaim_save(); + + while (iscsi_sw_tcp_xmit_qlen(conn)) { + rc = iscsi_sw_tcp_xmit(conn); + if (rc == 0) { + rc = -EAGAIN; + break; + } + if (rc < 0) + break; + rc = 0; + } + + memalloc_noreclaim_restore(noreclaim_flag); + return rc; +} + +/* + * This is called when we're done sending the header. + * Simply copy the data_segment to the send segment, and return. + */ +static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment) +{ + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + + tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment; + ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn, + "Header done. Next segment size %u total_size %u\n", + tcp_sw_conn->out.segment.size, + tcp_sw_conn->out.segment.total_size); + return 0; +} + +static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, + size_t hdrlen) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + + ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ? + "digest enabled" : "digest disabled"); + + /* Clear the data segment - needs to be filled in by the + * caller using iscsi_tcp_send_data_prep() */ + memset(&tcp_sw_conn->out.data_segment, 0, + sizeof(struct iscsi_segment)); + + /* If header digest is enabled, compute the CRC and + * place the digest into the same buffer. We make + * sure that both iscsi_tcp_task and mtask have + * sufficient room. + */ + if (conn->hdrdgst_en) { + iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen, + hdr + hdrlen); + hdrlen += ISCSI_DIGEST_SIZE; + } + + /* Remember header pointer for later, when we need + * to decide whether there's a payload to go along + * with the header. */ + tcp_sw_conn->out.hdr = hdr; + + iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen, + iscsi_sw_tcp_send_hdr_done, NULL); +} + +/* + * Prepare the send buffer for the payload data. + * Padding and checksumming will all be taken care + * of by the iscsi_segment routines. + */ +static int +iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, + unsigned int count, unsigned int offset, + unsigned int len) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct ahash_request *tx_hash = NULL; + unsigned int hdr_spec_len; + + ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len, + conn->datadgst_en ? + "digest enabled" : "digest disabled"); + + /* Make sure the datalen matches what the caller + said he would send. */ + hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength); + WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); + + if (conn->datadgst_en) + tx_hash = tcp_sw_conn->tx_hash; + + return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment, + sg, count, offset, len, + NULL, tx_hash); +} + +static void +iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, + size_t len) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct ahash_request *tx_hash = NULL; + unsigned int hdr_spec_len; + + ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ? + "digest enabled" : "digest disabled"); + + /* Make sure the datalen matches what the caller + said he would send. */ + hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength); + WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); + + if (conn->datadgst_en) + tx_hash = tcp_sw_conn->tx_hash; + + iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment, + data, len, NULL, tx_hash); +} + +static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task, + unsigned int offset, unsigned int count) +{ + struct iscsi_conn *conn = task->conn; + int err = 0; + + iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len); + + if (!count) + return 0; + + if (!task->sc) + iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count); + else { + struct scsi_data_buffer *sdb = &task->sc->sdb; + + err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl, + sdb->table.nents, offset, + count); + } + + if (err) { + /* got invalid offset/len */ + return -EIO; + } + return 0; +} + +static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode) +{ + struct iscsi_tcp_task *tcp_task = task->dd_data; + + task->hdr = task->dd_data + sizeof(*tcp_task); + task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE; + return 0; +} + +static struct iscsi_cls_conn * +iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, + uint32_t conn_idx) +{ + struct iscsi_conn *conn; + struct iscsi_cls_conn *cls_conn; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct crypto_ahash *tfm; + + cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn), + conn_idx); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work); + tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q; + + mutex_init(&tcp_sw_conn->sock_lock); + + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto free_conn; + + tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!tcp_sw_conn->tx_hash) + goto free_tfm; + ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL); + + tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!tcp_sw_conn->rx_hash) + goto free_tx_hash; + ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL); + + tcp_conn->rx_hash = tcp_sw_conn->rx_hash; + + return cls_conn; + +free_tx_hash: + ahash_request_free(tcp_sw_conn->tx_hash); +free_tfm: + crypto_free_ahash(tfm); +free_conn: + iscsi_conn_printk(KERN_ERR, conn, + "Could not create connection due to crc32c " + "loading error. Make sure the crc32c " + "module is built as a module or into the " + "kernel\n"); + iscsi_tcp_conn_teardown(cls_conn); + return NULL; +} + +static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct socket *sock = tcp_sw_conn->sock; + + /* + * The iscsi transport class will make sure we are not called in + * parallel with start, stop, bind and destroys. However, this can be + * called twice if userspace does a stop then a destroy. + */ + if (!sock) + return; + + /* + * Make sure we start socket shutdown now in case userspace is up + * but delayed in releasing the socket. + */ + kernel_sock_shutdown(sock, SHUT_RDWR); + + sock_hold(sock->sk); + iscsi_sw_tcp_conn_restore_callbacks(conn); + sock_put(sock->sk); + + iscsi_suspend_rx(conn); + + mutex_lock(&tcp_sw_conn->sock_lock); + tcp_sw_conn->sock = NULL; + mutex_unlock(&tcp_sw_conn->sock_lock); + sockfd_put(sock); +} + +static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + + iscsi_sw_tcp_release_conn(conn); + + ahash_request_free(tcp_sw_conn->rx_hash); + if (tcp_sw_conn->tx_hash) { + struct crypto_ahash *tfm; + + tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash); + ahash_request_free(tcp_sw_conn->tx_hash); + crypto_free_ahash(tfm); + } + + iscsi_tcp_conn_teardown(cls_conn); +} + +static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct socket *sock = tcp_sw_conn->sock; + + /* userspace may have goofed up and not bound us */ + if (!sock) + return; + + sock->sk->sk_err = EIO; + wake_up_interruptible(sk_sleep(sock->sk)); + + /* stop xmit side */ + iscsi_suspend_tx(conn); + + /* stop recv side and release socket */ + iscsi_sw_tcp_release_conn(conn); + + iscsi_conn_stop(cls_conn, flag); +} + +static int +iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, + int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct sock *sk; + struct socket *sock; + int err; + + /* lookup for existing socket */ + sock = sockfd_lookup((int)transport_eph, &err); + if (!sock) { + iscsi_conn_printk(KERN_ERR, conn, + "sockfd_lookup failed %d\n", err); + return -EEXIST; + } + + err = -EINVAL; + if (!sk_is_tcp(sock->sk)) + goto free_socket; + + err = iscsi_conn_bind(cls_session, cls_conn, is_leading); + if (err) + goto free_socket; + + mutex_lock(&tcp_sw_conn->sock_lock); + /* bind iSCSI connection and socket */ + tcp_sw_conn->sock = sock; + mutex_unlock(&tcp_sw_conn->sock_lock); + + /* setup Socket parameters */ + sk = sock->sk; + sk->sk_reuse = SK_CAN_REUSE; + sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ + sk->sk_allocation = GFP_ATOMIC; + sk->sk_use_task_frag = false; + sk_set_memalloc(sk); + sock_no_linger(sk); + + iscsi_sw_tcp_conn_set_callbacks(conn); + /* + * set receive state machine into initial state + */ + iscsi_tcp_hdr_recv_prep(tcp_conn); + return 0; + +free_socket: + sockfd_put(sock); + return err; +} + +static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, + int buflen) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + + switch(param) { + case ISCSI_PARAM_HDRDGST_EN: + iscsi_set_param(cls_conn, param, buf, buflen); + break; + case ISCSI_PARAM_DATADGST_EN: + mutex_lock(&tcp_sw_conn->sock_lock); + if (!tcp_sw_conn->sock) { + mutex_unlock(&tcp_sw_conn->sock_lock); + return -ENOTCONN; + } + iscsi_set_param(cls_conn, param, buf, buflen); + mutex_unlock(&tcp_sw_conn->sock_lock); + break; + case ISCSI_PARAM_MAX_R2T: + return iscsi_tcp_set_max_r2t(conn, buf); + default: + return iscsi_set_param(cls_conn, param, buf, buflen); + } + + return 0; +} + +static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct iscsi_tcp_conn *tcp_conn; + struct sockaddr_in6 addr; + struct socket *sock; + int rc; + + switch(param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_LOCAL_PORT: + spin_lock_bh(&conn->session->frwd_lock); + if (!conn->session->leadconn) { + spin_unlock_bh(&conn->session->frwd_lock); + return -ENOTCONN; + } + /* + * The conn has been setup and bound, so just grab a ref + * incase a destroy runs while we are in the net layer. + */ + iscsi_get_conn(conn->cls_conn); + spin_unlock_bh(&conn->session->frwd_lock); + + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + + mutex_lock(&tcp_sw_conn->sock_lock); + sock = tcp_sw_conn->sock; + if (!sock) { + rc = -ENOTCONN; + goto sock_unlock; + } + + if (param == ISCSI_PARAM_LOCAL_PORT) + rc = kernel_getsockname(sock, + (struct sockaddr *)&addr); + else + rc = kernel_getpeername(sock, + (struct sockaddr *)&addr); +sock_unlock: + mutex_unlock(&tcp_sw_conn->sock_lock); + iscsi_put_conn(conn->cls_conn); + if (rc < 0) + return rc; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &addr, param, buf); + default: + return iscsi_conn_get_param(cls_conn, param, buf); + } + + return 0; +} + +static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); + struct iscsi_session *session; + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct sockaddr_in6 addr; + struct socket *sock; + int rc; + + switch (param) { + case ISCSI_HOST_PARAM_IPADDRESS: + session = tcp_sw_host->session; + if (!session) + return -ENOTCONN; + + spin_lock_bh(&session->frwd_lock); + conn = session->leadconn; + if (!conn) { + spin_unlock_bh(&session->frwd_lock); + return -ENOTCONN; + } + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + /* + * The conn has been setup and bound, so just grab a ref + * incase a destroy runs while we are in the net layer. + */ + iscsi_get_conn(conn->cls_conn); + spin_unlock_bh(&session->frwd_lock); + + mutex_lock(&tcp_sw_conn->sock_lock); + sock = tcp_sw_conn->sock; + if (!sock) + rc = -ENOTCONN; + else + rc = kernel_getsockname(sock, (struct sockaddr *)&addr); + mutex_unlock(&tcp_sw_conn->sock_lock); + iscsi_put_conn(conn->cls_conn); + if (rc < 0) + return rc; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &addr, + (enum iscsi_param)param, buf); + default: + return iscsi_host_get_param(shost, param, buf); + } + + return 0; +} + +static void +iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + + stats->custom_length = 3; + strcpy(stats->custom[0].desc, "tx_sendpage_failures"); + stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt; + strcpy(stats->custom[1].desc, "rx_discontiguous_hdr"); + stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt; + strcpy(stats->custom[2].desc, "eh_abort_cnt"); + stats->custom[2].value = conn->eh_abort_cnt; + + iscsi_tcp_conn_get_stats(cls_conn, stats); +} + +static struct iscsi_cls_session * +iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, + uint16_t qdepth, uint32_t initial_cmdsn) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_sw_tcp_host *tcp_sw_host; + struct Scsi_Host *shost; + int rc; + + if (ep) { + printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep); + return NULL; + } + + shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, + sizeof(struct iscsi_sw_tcp_host), 1); + if (!shost) + return NULL; + shost->transportt = iscsi_sw_tcp_scsi_transport; + shost->cmd_per_lun = qdepth; + shost->max_lun = iscsi_max_lun; + shost->max_id = 0; + shost->max_channel = 0; + shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; + + rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max); + if (rc < 0) + goto free_host; + shost->can_queue = rc; + + if (iscsi_host_add(shost, NULL)) + goto free_host; + + cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost, + cmds_max, 0, + sizeof(struct iscsi_tcp_task) + + sizeof(struct iscsi_sw_tcp_hdrbuf), + initial_cmdsn, 0); + if (!cls_session) + goto remove_host; + session = cls_session->dd_data; + + if (iscsi_tcp_r2tpool_alloc(session)) + goto remove_session; + + /* We are now fully setup so expose the session to sysfs. */ + tcp_sw_host = iscsi_host_priv(shost); + tcp_sw_host->session = session; + return cls_session; + +remove_session: + iscsi_session_teardown(cls_session); +remove_host: + iscsi_host_remove(shost, false); +free_host: + iscsi_host_free(shost); + return NULL; +} + +static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct iscsi_session *session = cls_session->dd_data; + + if (WARN_ON_ONCE(session->leadconn)) + return; + + iscsi_session_remove(cls_session); + /* + * Our get_host_param needs to access the session, so remove the + * host from sysfs before freeing the session to make sure userspace + * is no longer accessing the callout. + */ + iscsi_host_remove(shost, false); + + iscsi_tcp_r2tpool_free(cls_session->dd_data); + + iscsi_session_free(cls_session); + iscsi_host_free(shost); +} + +static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_LOCAL_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} + +static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) +{ + struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); + struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_conn *conn = session->leadconn; + + if (conn->datadgst_en) + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, + sdev->request_queue); + blk_queue_dma_alignment(sdev->request_queue, 0); + return 0; +} + +static const struct scsi_host_template iscsi_sw_tcp_sht = { + .module = THIS_MODULE, + .name = "iSCSI Initiator over TCP/IP", + .queuecommand = iscsi_queuecommand, + .change_queue_depth = scsi_change_queue_depth, + .can_queue = ISCSI_TOTAL_CMDS_MAX, + .sg_tablesize = 4096, + .max_sectors = 0xFFFF, + .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler= iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .dma_boundary = PAGE_SIZE - 1, + .slave_configure = iscsi_sw_tcp_slave_configure, + .proc_name = "iscsi_tcp", + .this_id = -1, + .track_queue_depth = 1, + .cmd_size = sizeof(struct iscsi_cmd), +}; + +static struct iscsi_transport iscsi_sw_tcp_transport = { + .owner = THIS_MODULE, + .name = "tcp", + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST + | CAP_DATADGST, + /* session management */ + .create_session = iscsi_sw_tcp_session_create, + .destroy_session = iscsi_sw_tcp_session_destroy, + /* connection management */ + .create_conn = iscsi_sw_tcp_conn_create, + .bind_conn = iscsi_sw_tcp_conn_bind, + .destroy_conn = iscsi_sw_tcp_conn_destroy, + .attr_is_visible = iscsi_sw_tcp_attr_is_visible, + .set_param = iscsi_sw_tcp_conn_set_param, + .get_conn_param = iscsi_sw_tcp_conn_get_param, + .get_session_param = iscsi_session_get_param, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_sw_tcp_conn_stop, + /* iscsi host params */ + .get_host_param = iscsi_sw_tcp_host_get_param, + .set_host_param = iscsi_host_set_param, + /* IO */ + .send_pdu = iscsi_conn_send_pdu, + .get_stats = iscsi_sw_tcp_conn_get_stats, + /* iscsi task/cmd helpers */ + .init_task = iscsi_tcp_task_init, + .xmit_task = iscsi_tcp_task_xmit, + .cleanup_task = iscsi_tcp_cleanup_task, + /* low level pdu helpers */ + .xmit_pdu = iscsi_sw_tcp_pdu_xmit, + .init_pdu = iscsi_sw_tcp_pdu_init, + .alloc_pdu = iscsi_sw_tcp_pdu_alloc, + /* recovery */ + .session_recovery_timedout = iscsi_session_recovery_timedout, +}; + +static int __init iscsi_sw_tcp_init(void) +{ + if (iscsi_max_lun < 1) { + printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n", + iscsi_max_lun); + return -EINVAL; + } + + iscsi_sw_tcp_scsi_transport = iscsi_register_transport( + &iscsi_sw_tcp_transport); + if (!iscsi_sw_tcp_scsi_transport) + return -ENODEV; + + return 0; +} + +static void __exit iscsi_sw_tcp_exit(void) +{ + iscsi_unregister_transport(&iscsi_sw_tcp_transport); +} + +module_init(iscsi_sw_tcp_init); +module_exit(iscsi_sw_tcp_exit); diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h new file mode 100644 index 000000000..89a6fc552 --- /dev/null +++ b/drivers/scsi/iscsi_tcp.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * iSCSI Initiator TCP Transport + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 - 2006 Mike Christie + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * maintained by open-iscsi@googlegroups.com + * + * See the file COPYING included with this distribution for more details. + */ + +#ifndef ISCSI_SW_TCP_H +#define ISCSI_SW_TCP_H + +#include +#include + +struct socket; +struct iscsi_tcp_conn; + +/* Socket connection send helper */ +struct iscsi_sw_tcp_send { + struct iscsi_hdr *hdr; + struct iscsi_segment segment; + struct iscsi_segment data_segment; +}; + +struct iscsi_sw_tcp_conn { + struct socket *sock; + /* Taken when accessing the sock from the netlink/sysfs interface */ + struct mutex sock_lock; + + struct work_struct recvwork; + bool queue_recv; + + struct iscsi_sw_tcp_send out; + /* old values for socket callbacks */ + void (*old_data_ready)(struct sock *); + void (*old_state_change)(struct sock *); + void (*old_write_space)(struct sock *); + + /* data and header digests */ + struct ahash_request *tx_hash; /* CRC32C (Tx) */ + struct ahash_request *rx_hash; /* CRC32C (Rx) */ + + /* MIB custom statistics */ + uint32_t sendpage_failures_cnt; + uint32_t discontiguous_hdr_cnt; +}; + +struct iscsi_sw_tcp_host { + struct iscsi_session *session; +}; + +struct iscsi_sw_tcp_hdrbuf { + struct iscsi_hdr hdrbuf; + char hdrextbuf[ISCSI_MAX_AHS_SIZE + + ISCSI_DIGEST_SIZE]; +}; + +#endif /* ISCSI_SW_TCP_H */ diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c new file mode 100644 index 000000000..0c842fb29 --- /dev/null +++ b/drivers/scsi/jazz_esp.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* jazz_esp.c: ESP front-end for MIPS JAZZ systems. + * + * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include "esp_scsi.h" + +#define DRV_MODULE_NAME "jazz_esp" +#define PFX DRV_MODULE_NAME ": " +#define DRV_VERSION "1.000" +#define DRV_MODULE_RELDATE "May 19, 2007" + +static void jazz_esp_write8(struct esp *esp, u8 val, unsigned long reg) +{ + *(volatile u8 *)(esp->regs + reg) = val; +} + +static u8 jazz_esp_read8(struct esp *esp, unsigned long reg) +{ + return *(volatile u8 *)(esp->regs + reg); +} + +static int jazz_esp_irq_pending(struct esp *esp) +{ + if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) + return 1; + return 0; +} + +static void jazz_esp_reset_dma(struct esp *esp) +{ + vdma_disable ((int)esp->dma_regs); +} + +static void jazz_esp_dma_drain(struct esp *esp) +{ + /* nothing to do */ +} + +static void jazz_esp_dma_invalidate(struct esp *esp) +{ + vdma_disable ((int)esp->dma_regs); +} + +static void jazz_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, + u32 dma_count, int write, u8 cmd) +{ + BUG_ON(!(cmd & ESP_CMD_DMA)); + + jazz_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + jazz_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + vdma_disable ((int)esp->dma_regs); + if (write) + vdma_set_mode ((int)esp->dma_regs, DMA_MODE_READ); + else + vdma_set_mode ((int)esp->dma_regs, DMA_MODE_WRITE); + + vdma_set_addr ((int)esp->dma_regs, addr); + vdma_set_count ((int)esp->dma_regs, dma_count); + vdma_enable ((int)esp->dma_regs); + + scsi_esp_cmd(esp, cmd); +} + +static int jazz_esp_dma_error(struct esp *esp) +{ + u32 enable = vdma_get_enable((int)esp->dma_regs); + + if (enable & (R4030_MEM_INTR|R4030_ADDR_INTR)) + return 1; + + return 0; +} + +static const struct esp_driver_ops jazz_esp_ops = { + .esp_write8 = jazz_esp_write8, + .esp_read8 = jazz_esp_read8, + .irq_pending = jazz_esp_irq_pending, + .reset_dma = jazz_esp_reset_dma, + .dma_drain = jazz_esp_dma_drain, + .dma_invalidate = jazz_esp_dma_invalidate, + .send_dma_cmd = jazz_esp_send_dma_cmd, + .dma_error = jazz_esp_dma_error, +}; + +static int esp_jazz_probe(struct platform_device *dev) +{ + const struct scsi_host_template *tpnt = &scsi_esp_template; + struct Scsi_Host *host; + struct esp *esp; + struct resource *res; + int err; + + host = scsi_host_alloc(tpnt, sizeof(struct esp)); + + err = -ENOMEM; + if (!host) + goto fail; + + host->max_id = 8; + esp = shost_priv(host); + + esp->host = host; + esp->dev = &dev->dev; + esp->ops = &jazz_esp_ops; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + goto fail_unlink; + + esp->regs = (void __iomem *)res->start; + if (!esp->regs) + goto fail_unlink; + + res = platform_get_resource(dev, IORESOURCE_MEM, 1); + if (!res) + goto fail_unlink; + + esp->dma_regs = (void __iomem *)res->start; + + esp->command_block = dma_alloc_coherent(esp->dev, 16, + &esp->command_block_dma, + GFP_KERNEL); + if (!esp->command_block) + goto fail_unmap_regs; + + host->irq = err = platform_get_irq(dev, 0); + if (err < 0) + goto fail_unmap_command_block; + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); + if (err < 0) + goto fail_unmap_command_block; + + esp->scsi_id = 7; + esp->host->this_id = esp->scsi_id; + esp->scsi_id_mask = (1 << esp->scsi_id); + esp->cfreq = 40000000; + + dev_set_drvdata(&dev->dev, esp); + + err = scsi_esp_register(esp); + if (err) + goto fail_free_irq; + + return 0; + +fail_free_irq: + free_irq(host->irq, esp); +fail_unmap_command_block: + dma_free_coherent(esp->dev, 16, + esp->command_block, + esp->command_block_dma); +fail_unmap_regs: +fail_unlink: + scsi_host_put(host); +fail: + return err; +} + +static int esp_jazz_remove(struct platform_device *dev) +{ + struct esp *esp = dev_get_drvdata(&dev->dev); + unsigned int irq = esp->host->irq; + + scsi_esp_unregister(esp); + + free_irq(irq, esp); + dma_free_coherent(esp->dev, 16, + esp->command_block, + esp->command_block_dma); + + scsi_host_put(esp->host); + + return 0; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:jazz_esp"); + +static struct platform_driver esp_jazz_driver = { + .probe = esp_jazz_probe, + .remove = esp_jazz_remove, + .driver = { + .name = "jazz_esp", + }, +}; +module_platform_driver(esp_jazz_driver); + +MODULE_DESCRIPTION("JAZZ ESP SCSI driver"); +MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c new file mode 100644 index 000000000..86fe19e04 --- /dev/null +++ b/drivers/scsi/lasi700.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* PARISC LASI driver for the 53c700 chip + * + * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com +**----------------------------------------------------------------------------- +** +** +**----------------------------------------------------------------------------- + */ + +/* + * Many thanks to Richard Hirst for patiently + * debugging this driver on the parisc architecture and suggesting + * many improvements and bug fixes. + * + * Thanks also go to Linuxcare Inc. for providing several PARISC + * machines for me to debug the driver on. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "53c700.h" + +MODULE_AUTHOR("James Bottomley"); +MODULE_DESCRIPTION("lasi700 SCSI Driver"); +MODULE_LICENSE("GPL"); + +#define LASI_700_SVERSION 0x00071 +#define LASI_710_SVERSION 0x00082 + +#define LASI700_ID_TABLE { \ + .hw_type = HPHW_FIO, \ + .sversion = LASI_700_SVERSION, \ + .hversion = HVERSION_ANY_ID, \ + .hversion_rev = HVERSION_REV_ANY_ID, \ +} + +#define LASI710_ID_TABLE { \ + .hw_type = HPHW_FIO, \ + .sversion = LASI_710_SVERSION, \ + .hversion = HVERSION_ANY_ID, \ + .hversion_rev = HVERSION_REV_ANY_ID, \ +} + +#define LASI700_CLOCK 25 +#define LASI710_CLOCK 40 +#define LASI_SCSI_CORE_OFFSET 0x100 + +static const struct parisc_device_id lasi700_ids[] __initconst = { + LASI700_ID_TABLE, + LASI710_ID_TABLE, + { 0 } +}; + +static struct scsi_host_template lasi700_template = { + .name = "LASI SCSI 53c700", + .proc_name = "lasi700", + .this_id = 7, + .module = THIS_MODULE, +}; +MODULE_DEVICE_TABLE(parisc, lasi700_ids); + +static int __init +lasi700_probe(struct parisc_device *dev) +{ + unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET; + struct NCR_700_Host_Parameters *hostdata; + struct Scsi_Host *host; + + hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); + if (!hostdata) { + dev_printk(KERN_ERR, &dev->dev, "Failed to allocate host data\n"); + return -ENOMEM; + } + + hostdata->dev = &dev->dev; + dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); + hostdata->base = ioremap(base, 0x100); + hostdata->differential = 0; + + if (dev->id.sversion == LASI_700_SVERSION) { + hostdata->clock = LASI700_CLOCK; + hostdata->force_le_on_be = 1; + } else { + hostdata->clock = LASI710_CLOCK; + hostdata->force_le_on_be = 0; + hostdata->chip710 = 1; + hostdata->dmode_extra = DMODE_FC2; + hostdata->burst_length = 8; + } + + host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); + if (!host) + goto out_kfree; + host->this_id = 7; + host->base = base; + host->irq = dev->irq; + if(request_irq(dev->irq, NCR_700_intr, IRQF_SHARED, "lasi700", host)) { + printk(KERN_ERR "lasi700: request_irq failed!\n"); + goto out_put_host; + } + + dev_set_drvdata(&dev->dev, host); + scsi_scan_host(host); + + return 0; + + out_put_host: + scsi_host_put(host); + out_kfree: + iounmap(hostdata->base); + kfree(hostdata); + return -ENODEV; +} + +static void __exit +lasi700_driver_remove(struct parisc_device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(&dev->dev); + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + + scsi_remove_host(host); + NCR_700_release(host); + free_irq(host->irq, host); + iounmap(hostdata->base); + kfree(hostdata); +} + +static struct parisc_driver lasi700_driver __refdata = { + .name = "lasi_scsi", + .id_table = lasi700_ids, + .probe = lasi700_probe, + .remove = __exit_p(lasi700_driver_remove), +}; + +static int __init +lasi700_init(void) +{ + return register_parisc_driver(&lasi700_driver); +} + +static void __exit +lasi700_exit(void) +{ + unregister_parisc_driver(&lasi700_driver); +} + +module_init(lasi700_init); +module_exit(lasi700_exit); diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile new file mode 100644 index 000000000..65396f86c --- /dev/null +++ b/drivers/scsi/libfc/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# $Id: Makefile + +obj-$(CONFIG_LIBFC) += libfc.o + +libfc-objs := \ + fc_libfc.o \ + fc_disc.o \ + fc_exch.o \ + fc_elsct.o \ + fc_frame.o \ + fc_lport.o \ + fc_rport.o \ + fc_fcp.o \ + fc_npiv.o diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c new file mode 100644 index 000000000..384f48ff6 --- /dev/null +++ b/drivers/scsi/libfc/fc_disc.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * Target Discovery + * + * This block discovers all FC-4 remote ports, including FCP initiators. It + * also handles RSCN events and re-discovery if necessary. + */ + +/* + * DISC LOCKING + * + * The disc mutex is can be locked when acquiring rport locks, but may not + * be held when acquiring the lport lock. Refer to fc_lport.c for more + * details. + */ + +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include "fc_libfc.h" + +#define FC_DISC_RETRY_LIMIT 3 /* max retries */ +#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ + +static void fc_disc_gpn_ft_req(struct fc_disc *); +static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); +static void fc_disc_done(struct fc_disc *, enum fc_disc_event); +static void fc_disc_timeout(struct work_struct *); +static int fc_disc_single(struct fc_lport *, struct fc_disc_port *); +static void fc_disc_restart(struct fc_disc *); + +/** + * fc_disc_stop_rports() - Delete all the remote ports associated with the lport + * @disc: The discovery job to stop remote ports on + */ +static void fc_disc_stop_rports(struct fc_disc *disc) +{ + struct fc_rport_priv *rdata; + + lockdep_assert_held(&disc->disc_mutex); + + list_for_each_entry(rdata, &disc->rports, peers) { + if (kref_get_unless_zero(&rdata->kref)) { + fc_rport_logoff(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + } + } +} + +/** + * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) + * @disc: The discovery object to which the RSCN applies + * @fp: The RSCN frame + */ +static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) +{ + struct fc_lport *lport; + struct fc_els_rscn *rp; + struct fc_els_rscn_page *pp; + struct fc_seq_els_data rjt_data; + unsigned int len; + int redisc = 0; + enum fc_els_rscn_addr_fmt fmt; + LIST_HEAD(disc_ports); + struct fc_disc_port *dp, *next; + + lockdep_assert_held(&disc->disc_mutex); + + lport = fc_disc_lport(disc); + + FC_DISC_DBG(disc, "Received an RSCN event\n"); + + /* make sure the frame contains an RSCN message */ + rp = fc_frame_payload_get(fp, sizeof(*rp)); + if (!rp) + goto reject; + /* make sure the page length is as expected (4 bytes) */ + if (rp->rscn_page_len != sizeof(*pp)) + goto reject; + /* get the RSCN payload length */ + len = ntohs(rp->rscn_plen); + if (len < sizeof(*rp)) + goto reject; + /* make sure the frame contains the expected payload */ + rp = fc_frame_payload_get(fp, len); + if (!rp) + goto reject; + /* payload must be a multiple of the RSCN page size */ + len -= sizeof(*rp); + if (len % sizeof(*pp)) + goto reject; + + for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) { + fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT; + fmt &= ELS_RSCN_ADDR_FMT_MASK; + /* + * if we get an address format other than port + * (area, domain, fabric), then do a full discovery + */ + switch (fmt) { + case ELS_ADDR_FMT_PORT: + FC_DISC_DBG(disc, "Port address format for port " + "(%6.6x)\n", ntoh24(pp->rscn_fid)); + dp = kzalloc(sizeof(*dp), GFP_KERNEL); + if (!dp) { + redisc = 1; + break; + } + dp->lp = lport; + dp->port_id = ntoh24(pp->rscn_fid); + list_add_tail(&dp->peers, &disc_ports); + break; + case ELS_ADDR_FMT_AREA: + case ELS_ADDR_FMT_DOM: + case ELS_ADDR_FMT_FAB: + default: + FC_DISC_DBG(disc, "Address format is (%d)\n", fmt); + redisc = 1; + break; + } + } + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + + /* + * If not doing a complete rediscovery, do GPN_ID on + * the individual ports mentioned in the list. + * If any of these get an error, do a full rediscovery. + * In any case, go through the list and free the entries. + */ + list_for_each_entry_safe(dp, next, &disc_ports, peers) { + list_del(&dp->peers); + if (!redisc) + redisc = fc_disc_single(lport, dp); + kfree(dp); + } + if (redisc) { + FC_DISC_DBG(disc, "RSCN received: rediscovering\n"); + fc_disc_restart(disc); + } else { + FC_DISC_DBG(disc, "RSCN received: not rediscovering. " + "redisc %d state %d in_prog %d\n", + redisc, lport->state, disc->pending); + } + fc_frame_free(fp); + return; +reject: + FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_NONE; + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); + fc_frame_free(fp); +} + +/** + * fc_disc_recv_req() - Handle incoming requests + * @lport: The local port receiving the request + * @fp: The request frame + * + * Locking Note: This function is called from the EM and will lock + * the disc_mutex before calling the handler for the + * request. + */ +static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp) +{ + u8 op; + struct fc_disc *disc = &lport->disc; + + op = fc_frame_payload_op(fp); + switch (op) { + case ELS_RSCN: + mutex_lock(&disc->disc_mutex); + fc_disc_recv_rscn_req(disc, fp); + mutex_unlock(&disc->disc_mutex); + break; + default: + FC_DISC_DBG(disc, "Received an unsupported request, " + "the opcode is (%x)\n", op); + fc_frame_free(fp); + break; + } +} + +/** + * fc_disc_restart() - Restart discovery + * @disc: The discovery object to be restarted + */ +static void fc_disc_restart(struct fc_disc *disc) +{ + lockdep_assert_held(&disc->disc_mutex); + + if (!disc->disc_callback) + return; + + FC_DISC_DBG(disc, "Restarting discovery\n"); + + disc->requested = 1; + if (disc->pending) + return; + + /* + * Advance disc_id. This is an arbitrary non-zero number that will + * match the value in the fc_rport_priv after discovery for all + * freshly-discovered remote ports. Avoid wrapping to zero. + */ + disc->disc_id = (disc->disc_id + 2) | 1; + disc->retry_count = 0; + fc_disc_gpn_ft_req(disc); +} + +/** + * fc_disc_start() - Start discovery on a local port + * @lport: The local port to have discovery started on + * @disc_callback: Callback function to be called when discovery is complete + */ +static void fc_disc_start(void (*disc_callback)(struct fc_lport *, + enum fc_disc_event), + struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + + /* + * At this point we may have a new disc job or an existing + * one. Either way, let's lock when we make changes to it + * and send the GPN_FT request. + */ + mutex_lock(&disc->disc_mutex); + disc->disc_callback = disc_callback; + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); +} + +/** + * fc_disc_done() - Discovery has been completed + * @disc: The discovery context + * @event: The discovery completion status + */ +static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) +{ + struct fc_lport *lport = fc_disc_lport(disc); + struct fc_rport_priv *rdata; + + lockdep_assert_held(&disc->disc_mutex); + FC_DISC_DBG(disc, "Discovery complete\n"); + + disc->pending = 0; + if (disc->requested) { + fc_disc_restart(disc); + return; + } + + /* + * Go through all remote ports. If they were found in the latest + * discovery, reverify or log them in. Otherwise, log them out. + * Skip ports which were never discovered. These are the dNS port + * and ports which were created by PLOGI. + * + * We don't need to use the _rcu variant here as the rport list + * is protected by the disc mutex which is already held on entry. + */ + list_for_each_entry(rdata, &disc->rports, peers) { + if (!kref_get_unless_zero(&rdata->kref)) + continue; + if (rdata->disc_id) { + if (rdata->disc_id == disc->disc_id) + fc_rport_login(rdata); + else + fc_rport_logoff(rdata); + } + kref_put(&rdata->kref, fc_rport_destroy); + } + mutex_unlock(&disc->disc_mutex); + disc->disc_callback(lport, event); + mutex_lock(&disc->disc_mutex); +} + +/** + * fc_disc_error() - Handle error on dNS request + * @disc: The discovery context + * @fp: The error code encoded as a frame pointer + */ +static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) +{ + struct fc_lport *lport = fc_disc_lport(disc); + unsigned long delay = 0; + + FC_DISC_DBG(disc, "Error %d, retries %d/%d\n", + PTR_ERR_OR_ZERO(fp), disc->retry_count, + FC_DISC_RETRY_LIMIT); + + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { + /* + * Memory allocation failure, or the exchange timed out, + * retry after delay. + */ + if (disc->retry_count < FC_DISC_RETRY_LIMIT) { + /* go ahead and retry */ + if (!fp) + delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY); + else { + delay = msecs_to_jiffies(lport->e_d_tov); + + /* timeout faster first time */ + if (!disc->retry_count) + delay /= 4; + } + disc->retry_count++; + schedule_delayed_work(&disc->disc_work, delay); + } else + fc_disc_done(disc, DISC_EV_FAILED); + } else if (PTR_ERR(fp) == -FC_EX_CLOSED) { + /* + * if discovery fails due to lport reset, clear + * pending flag so that subsequent discovery can + * continue + */ + disc->pending = 0; + } +} + +/** + * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request + * @disc: The discovery context + */ +static void fc_disc_gpn_ft_req(struct fc_disc *disc) +{ + struct fc_frame *fp; + struct fc_lport *lport = fc_disc_lport(disc); + + lockdep_assert_held(&disc->disc_mutex); + + WARN_ON(!fc_lport_test_ready(lport)); + + disc->pending = 1; + disc->requested = 0; + + disc->buf_len = 0; + disc->seq_count = 0; + fp = fc_frame_alloc(lport, + sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_gid_ft)); + if (!fp) + goto err; + + if (lport->tt.elsct_send(lport, 0, fp, + FC_NS_GPN_FT, + fc_disc_gpn_ft_resp, + disc, 3 * lport->r_a_tov)) + return; +err: + fc_disc_error(disc, NULL); +} + +/** + * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. + * @disc: The discovery context + * @buf: The GPN_FT response buffer + * @len: The size of response buffer + * + * Goes through the list of IDs and names resulting from a request. + */ +static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) +{ + struct fc_lport *lport; + struct fc_gpn_ft_resp *np; + char *bp; + size_t plen; + size_t tlen; + int error = 0; + struct fc_rport_identifiers ids; + struct fc_rport_priv *rdata; + + lport = fc_disc_lport(disc); + disc->seq_count++; + + /* + * Handle partial name record left over from previous call. + */ + bp = buf; + plen = len; + np = (struct fc_gpn_ft_resp *)bp; + tlen = disc->buf_len; + disc->buf_len = 0; + if (tlen) { + WARN_ON(tlen >= sizeof(*np)); + plen = sizeof(*np) - tlen; + WARN_ON(plen <= 0); + WARN_ON(plen >= sizeof(*np)); + if (plen > len) + plen = len; + np = &disc->partial_buf; + memcpy((char *)np + tlen, bp, plen); + + /* + * Set bp so that the loop below will advance it to the + * first valid full name element. + */ + bp -= tlen; + len += tlen; + plen += tlen; + disc->buf_len = (unsigned char) plen; + if (plen == sizeof(*np)) + disc->buf_len = 0; + } + + /* + * Handle full name records, including the one filled from above. + * Normally, np == bp and plen == len, but from the partial case above, + * bp, len describe the overall buffer, and np, plen describe the + * partial buffer, which if would usually be full now. + * After the first time through the loop, things return to "normal". + */ + while (plen >= sizeof(*np)) { + ids.port_id = ntoh24(np->fp_fid); + ids.port_name = ntohll(np->fp_wwpn); + + if (ids.port_id != lport->port_id && + ids.port_name != lport->wwpn) { + rdata = fc_rport_create(lport, ids.port_id); + if (rdata) { + rdata->ids.port_name = ids.port_name; + rdata->disc_id = disc->disc_id; + } else { + printk(KERN_WARNING "libfc: Failed to allocate " + "memory for the newly discovered port " + "(%6.6x)\n", ids.port_id); + error = -ENOMEM; + } + } + + if (np->fp_flags & FC_NS_FID_LAST) { + fc_disc_done(disc, DISC_EV_SUCCESS); + len = 0; + break; + } + len -= sizeof(*np); + bp += sizeof(*np); + np = (struct fc_gpn_ft_resp *)bp; + plen = len; + } + + /* + * Save any partial record at the end of the buffer for next time. + */ + if (error == 0 && len > 0 && len < sizeof(*np)) { + if (np != &disc->partial_buf) { + FC_DISC_DBG(disc, "Partial buffer remains " + "for discovery\n"); + memcpy(&disc->partial_buf, np, len); + } + disc->buf_len = (unsigned char) len; + } + return error; +} + +/** + * fc_disc_timeout() - Handler for discovery timeouts + * @work: Structure holding discovery context that needs to retry discovery + */ +static void fc_disc_timeout(struct work_struct *work) +{ + struct fc_disc *disc = container_of(work, + struct fc_disc, + disc_work.work); + mutex_lock(&disc->disc_mutex); + fc_disc_gpn_ft_req(disc); + mutex_unlock(&disc->disc_mutex); +} + +/** + * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) + * @sp: The sequence that the GPN_FT response was received on + * @fp: The GPN_FT response frame + * @disc_arg: The discovery context + * + * Locking Note: This function is called without disc mutex held, and + * should do all its processing with the mutex held + */ +static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, + void *disc_arg) +{ + struct fc_disc *disc = disc_arg; + struct fc_ct_hdr *cp; + struct fc_frame_header *fh; + enum fc_disc_event event = DISC_EV_NONE; + unsigned int seq_cnt; + unsigned int len; + int error = 0; + + mutex_lock(&disc->disc_mutex); + FC_DISC_DBG(disc, "Received a GPN_FT response\n"); + + if (IS_ERR(fp)) { + fc_disc_error(disc, fp); + mutex_unlock(&disc->disc_mutex); + return; + } + + WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */ + fh = fc_frame_header_get(fp); + len = fr_len(fp) - sizeof(*fh); + seq_cnt = ntohs(fh->fh_seq_cnt); + if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) { + cp = fc_frame_payload_get(fp, sizeof(*cp)); + if (!cp) { + FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", + fr_len(fp)); + event = DISC_EV_FAILED; + } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { + + /* Accepted, parse the response. */ + len -= sizeof(*cp); + error = fc_disc_gpn_ft_parse(disc, cp + 1, len); + } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { + FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " + "(check zoning)\n", cp->ct_reason, + cp->ct_explan); + event = DISC_EV_FAILED; + if (cp->ct_reason == FC_FS_RJT_UNABL && + cp->ct_explan == FC_FS_EXP_FTNR) + event = DISC_EV_SUCCESS; + } else { + FC_DISC_DBG(disc, "GPN_FT unexpected response code " + "%x\n", ntohs(cp->ct_cmd)); + event = DISC_EV_FAILED; + } + } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) { + error = fc_disc_gpn_ft_parse(disc, fh + 1, len); + } else { + FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " + "seq_cnt %x expected %x sof %x eof %x\n", + seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); + event = DISC_EV_FAILED; + } + if (error) + fc_disc_error(disc, ERR_PTR(error)); + else if (event != DISC_EV_NONE) + fc_disc_done(disc, event); + fc_frame_free(fp); + mutex_unlock(&disc->disc_mutex); +} + +/** + * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) + * @sp: The sequence the GPN_ID is on + * @fp: The response frame + * @rdata_arg: The remote port that sent the GPN_ID response + * + * Locking Note: This function is called without disc mutex held. + */ +static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + struct fc_rport_priv *new_rdata; + struct fc_lport *lport; + struct fc_disc *disc; + struct fc_ct_hdr *cp; + struct fc_ns_gid_pn *pn; + u64 port_name; + + lport = rdata->local_port; + disc = &lport->disc; + + if (PTR_ERR(fp) == -FC_EX_CLOSED) + goto out; + if (IS_ERR(fp)) { + mutex_lock(&disc->disc_mutex); + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); + goto out; + } + + cp = fc_frame_payload_get(fp, sizeof(*cp)); + if (!cp) + goto redisc; + if (ntohs(cp->ct_cmd) == FC_FS_ACC) { + if (fr_len(fp) < sizeof(struct fc_frame_header) + + sizeof(*cp) + sizeof(*pn)) + goto redisc; + pn = (struct fc_ns_gid_pn *)(cp + 1); + port_name = get_unaligned_be64(&pn->fn_wwpn); + mutex_lock(&rdata->rp_mutex); + if (rdata->ids.port_name == -1) + rdata->ids.port_name = port_name; + else if (rdata->ids.port_name != port_name) { + FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. " + "Port-id %6.6x wwpn %16.16llx\n", + rdata->ids.port_id, port_name); + mutex_unlock(&rdata->rp_mutex); + fc_rport_logoff(rdata); + mutex_lock(&lport->disc.disc_mutex); + new_rdata = fc_rport_create(lport, rdata->ids.port_id); + mutex_unlock(&lport->disc.disc_mutex); + if (new_rdata) { + new_rdata->disc_id = disc->disc_id; + fc_rport_login(new_rdata); + } + goto free_fp; + } + rdata->disc_id = disc->disc_id; + mutex_unlock(&rdata->rp_mutex); + fc_rport_login(rdata); + } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { + FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n", + cp->ct_reason, cp->ct_explan); + fc_rport_logoff(rdata); + } else { + FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n", + ntohs(cp->ct_cmd)); +redisc: + mutex_lock(&disc->disc_mutex); + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); + } +free_fp: + fc_frame_free(fp); +out: + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request + * @lport: The local port to initiate discovery on + * @rdata: remote port private data + * + * On failure, an error code is returned. + */ +static int fc_disc_gpn_id_req(struct fc_lport *lport, + struct fc_rport_priv *rdata) +{ + struct fc_frame *fp; + + lockdep_assert_held(&lport->disc.disc_mutex); + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_fid)); + if (!fp) + return -ENOMEM; + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, + fc_disc_gpn_id_resp, rdata, + 3 * lport->r_a_tov)) + return -ENOMEM; + kref_get(&rdata->kref); + return 0; +} + +/** + * fc_disc_single() - Discover the directory information for a single target + * @lport: The local port the remote port is associated with + * @dp: The port to rediscover + */ +static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) +{ + struct fc_rport_priv *rdata; + + lockdep_assert_held(&lport->disc.disc_mutex); + + rdata = fc_rport_create(lport, dp->port_id); + if (!rdata) + return -ENOMEM; + rdata->disc_id = 0; + return fc_disc_gpn_id_req(lport, rdata); +} + +/** + * fc_disc_stop() - Stop discovery for a given lport + * @lport: The local port that discovery should stop on + */ +static void fc_disc_stop(struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + + if (disc->pending) + cancel_delayed_work_sync(&disc->disc_work); + mutex_lock(&disc->disc_mutex); + fc_disc_stop_rports(disc); + mutex_unlock(&disc->disc_mutex); +} + +/** + * fc_disc_stop_final() - Stop discovery for a given lport + * @lport: The lport that discovery should stop on + * + * This function will block until discovery has been + * completely stopped and all rports have been deleted. + */ +static void fc_disc_stop_final(struct fc_lport *lport) +{ + fc_disc_stop(lport); + fc_rport_flush_queue(); +} + +/** + * fc_disc_config() - Configure the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be configured + * @priv: Private data structre for users of the discovery layer + */ +void fc_disc_config(struct fc_lport *lport, void *priv) +{ + struct fc_disc *disc; + + if (!lport->tt.disc_start) + lport->tt.disc_start = fc_disc_start; + + if (!lport->tt.disc_stop) + lport->tt.disc_stop = fc_disc_stop; + + if (!lport->tt.disc_stop_final) + lport->tt.disc_stop_final = fc_disc_stop_final; + + if (!lport->tt.disc_recv_req) + lport->tt.disc_recv_req = fc_disc_recv_req; + + disc = &lport->disc; + + disc->priv = priv; +} +EXPORT_SYMBOL(fc_disc_config); + +/** + * fc_disc_init() - Initialize the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be initialized + */ +void fc_disc_init(struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + + INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); + mutex_init(&disc->disc_mutex); + INIT_LIST_HEAD(&disc->rports); +} +EXPORT_SYMBOL(fc_disc_init); diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c new file mode 100644 index 000000000..8d3006edb --- /dev/null +++ b/drivers/scsi/libfc/fc_elsct.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2008 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * Provide interface to send ELS/CT FC frames + */ + +#include +#include +#include +#include +#include +#include +#include "fc_encode.h" +#include "fc_libfc.h" + +/** + * fc_elsct_send() - Send an ELS or CT frame + * @lport: The local port to send the frame on + * @did: The destination ID for the frame + * @fp: The frame to be sent + * @op: The operational code + * @resp: The callback routine when the response is received + * @arg: The argument to pass to the response callback routine + * @timer_msec: The timeout period for the frame (in msecs) + */ +struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timer_msec) +{ + enum fc_rctl r_ctl; + enum fc_fh_type fh_type; + int rc; + + /* ELS requests */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) + rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type); + else { + /* CT requests */ + rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did); + } + + if (rc) { + fc_frame_free(fp); + return NULL; + } + + fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type, + FC_FCTL_REQ, 0); + + return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); +} +EXPORT_SYMBOL(fc_elsct_send); + +/** + * fc_elsct_init() - Initialize the ELS/CT layer + * @lport: The local port to initialize the ELS/CT layer for + */ +int fc_elsct_init(struct fc_lport *lport) +{ + if (!lport->tt.elsct_send) + lport->tt.elsct_send = fc_elsct_send; + + return 0; +} +EXPORT_SYMBOL(fc_elsct_init); + +/** + * fc_els_resp_type() - Return a string describing the ELS response + * @fp: The frame pointer or possible error code + */ +const char *fc_els_resp_type(struct fc_frame *fp) +{ + const char *msg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + + if (IS_ERR(fp)) { + switch (-PTR_ERR(fp)) { + case FC_NO_ERR: + msg = "response no error"; + break; + case FC_EX_TIMEOUT: + msg = "response timeout"; + break; + case FC_EX_CLOSED: + msg = "response closed"; + break; + default: + msg = "response unknown error"; + break; + } + } else { + fh = fc_frame_header_get(fp); + switch (fh->fh_type) { + case FC_TYPE_ELS: + switch (fc_frame_payload_op(fp)) { + case ELS_LS_ACC: + msg = "accept"; + break; + case ELS_LS_RJT: + msg = "reject"; + break; + default: + msg = "response unknown ELS"; + break; + } + break; + case FC_TYPE_CT: + ct = fc_frame_payload_get(fp, sizeof(*ct)); + if (ct) { + switch (ntohs(ct->ct_cmd)) { + case FC_FS_ACC: + msg = "CT accept"; + break; + case FC_FS_RJT: + msg = "CT reject"; + break; + default: + msg = "response unknown CT"; + break; + } + } else { + msg = "short CT response"; + } + break; + default: + msg = "response not ELS or CT"; + break; + } + } + return msg; +} diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h new file mode 100644 index 000000000..7dcac3b6b --- /dev/null +++ b/drivers/scsi/libfc/fc_encode.h @@ -0,0 +1,951 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2008 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _FC_ENCODE_H_ +#define _FC_ENCODE_H_ +#include +#include +#include + +/* + * F_CTL values for simple requests and responses. + */ +#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT) +#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \ + FC_FC_END_SEQ | FC_FC_SEQ_INIT) + +struct fc_ns_rft { + struct fc_ns_fid fid; /* port ID object */ + struct fc_ns_fts fts; /* FC4-types object */ +}; + +struct fc_ct_req { + struct fc_ct_hdr hdr; + union { + struct fc_ns_gid_ft gid; + struct fc_ns_rn_id rn; + struct fc_ns_rft rft; + struct fc_ns_rff_id rff; + struct fc_ns_fid fid; + struct fc_ns_rsnn snn; + struct fc_ns_rspn spn; + struct fc_fdmi_rhba rhba; + struct fc_fdmi_rpa rpa; + struct fc_fdmi_dprt dprt; + struct fc_fdmi_dhba dhba; + } payload; +}; + +/** + * fc_adisc_fill() - Fill in adisc request frame + * @lport: local port. + * @fp: fc frame where payload will be placed. + */ +static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + memset(adisc, 0, sizeof(*adisc)); + adisc->adisc_cmd = ELS_ADISC; + put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn); + put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn); + hton24(adisc->adisc_port_id, lport->port_id); +} + +/** + * fc_ct_hdr_fill- fills ct header and reset ct payload + * returns pointer to ct request. + */ +static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp, + unsigned int op, size_t req_size, + enum fc_ct_fs_type fs_type, + u8 subtype) +{ + struct fc_ct_req *ct; + size_t ct_plen; + + ct_plen = sizeof(struct fc_ct_hdr) + req_size; + ct = fc_frame_payload_get(fp, ct_plen); + memset(ct, 0, ct_plen); + ct->hdr.ct_rev = FC_CT_REV; + ct->hdr.ct_fs_type = fs_type; + ct->hdr.ct_fs_subtype = subtype; + ct->hdr.ct_cmd = htons((u16) op); + return ct; +} + +/** + * fc_ct_ns_fill() - Fill in a name service request frame + * @lport: local port. + * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries. + * @fp: frame to contain payload. + * @op: CT opcode. + * @r_ctl: pointer to FC header R_CTL. + * @fh_type: pointer to FC-4 type. + */ +static inline int fc_ct_ns_fill(struct fc_lport *lport, + u32 fc_id, struct fc_frame *fp, + unsigned int op, enum fc_rctl *r_ctl, + enum fc_fh_type *fh_type) +{ + struct fc_ct_req *ct; + size_t len; + + switch (op) { + case FC_NS_GPN_FT: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft), + FC_FST_DIR, FC_NS_SUBTYPE); + ct->payload.gid.fn_fc4_type = FC_TYPE_FCP; + break; + + case FC_NS_GPN_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid), + FC_FST_DIR, FC_NS_SUBTYPE); + ct->payload.gid.fn_fc4_type = FC_TYPE_FCP; + hton24(ct->payload.fid.fp_fid, fc_id); + break; + + case FC_NS_RFT_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft), + FC_FST_DIR, FC_NS_SUBTYPE); + hton24(ct->payload.rft.fid.fp_fid, lport->port_id); + ct->payload.rft.fts = lport->fcts; + break; + + case FC_NS_RFF_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id), + FC_FST_DIR, FC_NS_SUBTYPE); + hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id); + ct->payload.rff.fr_type = FC_TYPE_FCP; + if (lport->service_params & FCP_SPPF_INIT_FCN) + ct->payload.rff.fr_feat = FCP_FEAT_INIT; + if (lport->service_params & FCP_SPPF_TARG_FCN) + ct->payload.rff.fr_feat |= FCP_FEAT_TARG; + break; + + case FC_NS_RNN_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id), + FC_FST_DIR, FC_NS_SUBTYPE); + hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id); + put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); + break; + + case FC_NS_RSPN_ID: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len, + FC_FST_DIR, FC_NS_SUBTYPE); + hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id); + strncpy(ct->payload.spn.fr_name, + fc_host_symbolic_name(lport->host), len); + ct->payload.spn.fr_name_len = len; + break; + + case FC_NS_RSNN_NN: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len, + FC_FST_DIR, FC_NS_SUBTYPE); + put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn); + strncpy(ct->payload.snn.fr_name, + fc_host_symbolic_name(lport->host), len); + ct->payload.snn.fr_name_len = len; + break; + + default: + return -EINVAL; + } + *r_ctl = FC_RCTL_DD_UNSOL_CTL; + *fh_type = FC_TYPE_CT; + return 0; +} + +static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry, + const char *in, size_t len) +{ + int copied; + + copied = strscpy(entry->value, in, len); + if (copied > 0 && copied + 1 < len) + memset(entry->value + copied + 1, 0, len - copied - 1); +} + +/** + * fc_ct_ms_fill() - Fill in a mgmt service request frame + * @lport: local port. + * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries. + * @fp: frame to contain payload. + * @op: CT opcode. + * @r_ctl: pointer to FC header R_CTL. + * @fh_type: pointer to FC-4 type. + */ +static inline int fc_ct_ms_fill(struct fc_lport *lport, + u32 fc_id, struct fc_frame *fp, + unsigned int op, enum fc_rctl *r_ctl, + enum fc_fh_type *fh_type) +{ + struct fc_ct_req *ct; + size_t len; + struct fc_fdmi_attr_entry *entry; + struct fs_fdmi_attrs *hba_attrs; + int numattrs = 0; + struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); + + switch (op) { + case FC_FDMI_RHBA: + numattrs = 11; + len = sizeof(struct fc_fdmi_rhba); + len -= sizeof(struct fc_fdmi_attr_entry); + len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); + len += FC_FDMI_HBA_ATTR_NODENAME_LEN; + len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN; + len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN; + len += FC_FDMI_HBA_ATTR_MODEL_LEN; + len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN; + len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN; + len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN; + len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN; + len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN; + len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN; + len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN; + + if (fc_host->fdmi_version == FDMI_V2) { + numattrs += 7; + len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN; + len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN; + len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN; + len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN; + len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN; + len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN; + len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN; + } + + ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, + FC_FDMI_SUBTYPE); + + /* HBA Identifier */ + put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id); + /* Number of Ports - always 1 */ + put_unaligned_be32(1, &ct->payload.rhba.port.numport); + /* Port Name */ + put_unaligned_be64(lport->wwpn, + &ct->payload.rhba.port.port[0].portname); + + /* HBA Attributes */ + put_unaligned_be32(numattrs, + &ct->payload.rhba.hba_attrs.numattrs); + hba_attrs = &ct->payload.rhba.hba_attrs; + entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr; + /* NodeName*/ + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_NODENAME_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_NODENAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be64(lport->wwnn, + (__be64 *)&entry->value); + + /* Manufacturer */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_NODENAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_MANUFACTURER, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_manufacturer(lport->host), + FC_FDMI_HBA_ATTR_MANUFACTURER_LEN); + + /* SerialNumber */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_MANUFACTURER_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_SERIALNUMBER, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_serial_number(lport->host), + FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN); + + /* Model */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_MODEL_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_MODEL, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_model(lport->host), + FC_FDMI_HBA_ATTR_MODEL_LEN); + + /* Model Description */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_MODEL_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_MODELDESCRIPTION, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_model_description(lport->host), + FC_FDMI_HBA_ATTR_MODELDESCR_LEN); + + /* Hardware Version */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_MODELDESCR_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_HARDWAREVERSION, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_hardware_version(lport->host), + FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN); + + /* Driver Version */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_DRIVERVERSION, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_driver_version(lport->host), + FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN); + + /* OptionROM Version */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_OPTIONROMVERSION, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + "unknown", + FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN); + + /* Firmware Version */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_FIRMWAREVERSION, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_firmware_version(lport->host), + FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN); + + /* OS Name and Version */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_OSNAMEVERSION, + &entry->type); + put_unaligned_be16(len, &entry->len); + snprintf((char *)&entry->value, + FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN, + "%s v%s", + init_utsname()->sysname, + init_utsname()->release); + + /* Max CT payload */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_max_ct_payload(lport->host), + &entry->value); + + if (fc_host->fdmi_version == FDMI_V2) { + /* Node symbolic name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_NODESYMBLNAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_symbolic_name(lport->host), + FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN); + + /* Vendor specific info */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(0, + &entry->value); + + /* Number of ports */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_NUMBEROFPORTS, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_num_ports(lport->host), + &entry->value); + + /* Fabric name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_FABRICNAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be64(fc_host_fabric_name(lport->host), + &entry->value); + + /* BIOS version */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_FABRICNAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSVERSION, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_bootbios_version(lport->host), + FC_FDMI_HBA_ATTR_BIOSVERSION_LEN); + + /* BIOS state */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_BIOSVERSION_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSSTATE, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_bootbios_state(lport->host), + &entry->value); + + /* Vendor identifier */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_HBA_ATTR_BIOSSTATE_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN; + put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORIDENTIFIER, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_vendor_identifier(lport->host), + FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN); + } + + break; + case FC_FDMI_RPA: + numattrs = 6; + len = sizeof(struct fc_fdmi_rpa); + len -= sizeof(struct fc_fdmi_attr_entry); + len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); + len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN; + len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN; + len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN; + len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN; + len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN; + len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN; + + + if (fc_host->fdmi_version == FDMI_V2) { + numattrs += 10; + + len += FC_FDMI_PORT_ATTR_NODENAME_LEN; + len += FC_FDMI_PORT_ATTR_PORTNAME_LEN; + len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN; + len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN; + len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN; + len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN; + len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN; + len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN; + len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN; + len += FC_FDMI_PORT_ATTR_PORTID_LEN; + + } + + ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, + FC_FDMI_SUBTYPE); + + /* Port Name */ + put_unaligned_be64(lport->wwpn, + &ct->payload.rpa.port.portname); + + /* Port Attributes */ + put_unaligned_be32(numattrs, + &ct->payload.rpa.hba_attrs.numattrs); + + hba_attrs = &ct->payload.rpa.hba_attrs; + entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr; + + /* FC4 types */ + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_FC4TYPES, + &entry->type); + put_unaligned_be16(len, &entry->len); + memcpy(&entry->value, fc_host_supported_fc4s(lport->host), + FC_FDMI_PORT_ATTR_FC4TYPES_LEN); + + /* Supported Speed */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_FC4TYPES_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, + &entry->type); + put_unaligned_be16(len, &entry->len); + + put_unaligned_be32(fc_host_supported_speeds(lport->host), + &entry->value); + + /* Current Port Speed */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(lport->link_speed, + &entry->value); + + /* Max Frame Size */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_MAXFRAMESIZE, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_maxframe_size(lport->host), + &entry->value); + + /* OS Device Name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_OSDEVICENAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + /* Use the sysfs device name */ + fc_ct_ms_fill_attr(entry, + dev_name(&lport->host->shost_gendev), + strnlen(dev_name(&lport->host->shost_gendev), + FC_FDMI_PORT_ATTR_HOSTNAME_LEN)); + + /* Host Name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_HOSTNAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + if (strlen(fc_host_system_hostname(lport->host))) + fc_ct_ms_fill_attr(entry, + fc_host_system_hostname(lport->host), + strnlen(fc_host_system_hostname(lport->host), + FC_FDMI_PORT_ATTR_HOSTNAME_LEN)); + else + fc_ct_ms_fill_attr(entry, + init_utsname()->nodename, + FC_FDMI_PORT_ATTR_HOSTNAME_LEN); + + + if (fc_host->fdmi_version == FDMI_V2) { + + /* Node name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_HOSTNAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_NODENAME_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_NODENAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be64(fc_host_node_name(lport->host), + &entry->value); + + /* Port name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_NODENAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_PORTNAME_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTNAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be64(lport->wwpn, + &entry->value); + + /* Port symbolic name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_PORTNAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_SYMBOLICNAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + fc_ct_ms_fill_attr(entry, + fc_host_symbolic_name(lport->host), + FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN); + + /* Port type */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTTYPE, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_port_type(lport->host), + &entry->value); + + /* Supported class of service */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_PORTTYPE_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_supported_classes(lport->host), + &entry->value); + + /* Port Fabric name */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_FABRICNAME, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be64(fc_host_fabric_name(lport->host), + &entry->value); + + /* Port active FC-4 */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_FABRICNAME_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTFC4TYPE, + &entry->type); + put_unaligned_be16(len, &entry->len); + memcpy(&entry->value, fc_host_active_fc4s(lport->host), + FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN); + + /* Port state */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTSTATE, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_port_state(lport->host), + &entry->value); + + /* Discovered ports */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_PORTSTATE_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_DISCOVEREDPORTS, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_num_discovered_ports(lport->host), + &entry->value); + + /* Port ID */ + entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + + FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN); + len = FC_FDMI_ATTR_ENTRY_HEADER_LEN; + len += FC_FDMI_PORT_ATTR_PORTID_LEN; + put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTID, + &entry->type); + put_unaligned_be16(len, &entry->len); + put_unaligned_be32(fc_host_port_id(lport->host), + &entry->value); + } + + break; + case FC_FDMI_DPRT: + len = sizeof(struct fc_fdmi_dprt); + ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, + FC_FDMI_SUBTYPE); + /* Port Name */ + put_unaligned_be64(lport->wwpn, + &ct->payload.dprt.port.portname); + break; + case FC_FDMI_DHBA: + len = sizeof(struct fc_fdmi_dhba); + ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, + FC_FDMI_SUBTYPE); + /* HBA Identifier */ + put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id); + break; + default: + return -EINVAL; + } + *r_ctl = FC_RCTL_DD_UNSOL_CTL; + *fh_type = FC_TYPE_CT; + return 0; +} + +/** + * fc_ct_fill() - Fill in a common transport service request frame + * @lport: local port. + * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries. + * @fp: frame to contain payload. + * @op: CT opcode. + * @r_ctl: pointer to FC header R_CTL. + * @fh_type: pointer to FC-4 type. + */ +static inline int fc_ct_fill(struct fc_lport *lport, + u32 fc_id, struct fc_frame *fp, + unsigned int op, enum fc_rctl *r_ctl, + enum fc_fh_type *fh_type, u32 *did) +{ + int rc = -EINVAL; + + switch (fc_id) { + case FC_FID_MGMT_SERV: + rc = fc_ct_ms_fill(lport, fc_id, fp, op, r_ctl, fh_type); + *did = FC_FID_MGMT_SERV; + break; + case FC_FID_DIR_SERV: + default: + rc = fc_ct_ns_fill(lport, fc_id, fp, op, r_ctl, fh_type); + *did = FC_FID_DIR_SERV; + break; + } + + return rc; +} +/** + * fc_plogi_fill - Fill in plogi request frame + */ +static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp, + unsigned int op) +{ + struct fc_els_flogi *plogi; + struct fc_els_csp *csp; + struct fc_els_cssp *cp; + + plogi = fc_frame_payload_get(fp, sizeof(*plogi)); + memset(plogi, 0, sizeof(*plogi)); + plogi->fl_cmd = (u8) op; + put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn); + put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn); + + csp = &plogi->fl_csp; + csp->sp_hi_ver = 0x20; + csp->sp_lo_ver = 0x20; + csp->sp_bb_cred = htons(10); /* this gets set by gateway */ + csp->sp_bb_data = htons((u16) lport->mfs); + cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */ + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); + csp->sp_features = htons(FC_SP_FT_CIRO); + csp->sp_tot_seq = htons(255); /* seq. we accept */ + csp->sp_rel_off = htons(0x1f); + csp->sp_e_d_tov = htonl(lport->e_d_tov); + + cp->cp_rdfs = htons((u16) lport->mfs); + cp->cp_con_seq = htons(255); + cp->cp_open_seq = 1; +} + +/** + * fc_flogi_fill - Fill in a flogi request frame. + */ +static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_csp *sp; + struct fc_els_cssp *cp; + struct fc_els_flogi *flogi; + + flogi = fc_frame_payload_get(fp, sizeof(*flogi)); + memset(flogi, 0, sizeof(*flogi)); + flogi->fl_cmd = (u8) ELS_FLOGI; + put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); + put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); + sp = &flogi->fl_csp; + sp->sp_hi_ver = 0x20; + sp->sp_lo_ver = 0x20; + sp->sp_bb_cred = htons(10); /* this gets set by gateway */ + sp->sp_bb_data = htons((u16) lport->mfs); + cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); + if (lport->does_npiv) + sp->sp_features = htons(FC_SP_FT_NPIV); +} + +/** + * fc_fdisc_fill - Fill in a fdisc request frame. + */ +static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_csp *sp; + struct fc_els_cssp *cp; + struct fc_els_flogi *fdisc; + + fdisc = fc_frame_payload_get(fp, sizeof(*fdisc)); + memset(fdisc, 0, sizeof(*fdisc)); + fdisc->fl_cmd = (u8) ELS_FDISC; + put_unaligned_be64(lport->wwpn, &fdisc->fl_wwpn); + put_unaligned_be64(lport->wwnn, &fdisc->fl_wwnn); + sp = &fdisc->fl_csp; + sp->sp_hi_ver = 0x20; + sp->sp_lo_ver = 0x20; + sp->sp_bb_cred = htons(10); /* this gets set by gateway */ + sp->sp_bb_data = htons((u16) lport->mfs); + cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */ + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); +} + +/** + * fc_logo_fill - Fill in a logo request frame. + */ +static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_logo *logo; + + logo = fc_frame_payload_get(fp, sizeof(*logo)); + memset(logo, 0, sizeof(*logo)); + logo->fl_cmd = ELS_LOGO; + hton24(logo->fl_n_port_id, lport->port_id); + logo->fl_n_port_wwn = htonll(lport->wwpn); +} + +/** + * fc_rtv_fill - Fill in RTV (read timeout value) request frame. + */ +static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_rtv *rtv; + + rtv = fc_frame_payload_get(fp, sizeof(*rtv)); + memset(rtv, 0, sizeof(*rtv)); + rtv->rtv_cmd = ELS_RTV; +} + +/** + * fc_rec_fill - Fill in rec request frame + */ +static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_rec *rec; + struct fc_exch *ep = fc_seq_exch(fr_seq(fp)); + + rec = fc_frame_payload_get(fp, sizeof(*rec)); + memset(rec, 0, sizeof(*rec)); + rec->rec_cmd = ELS_REC; + hton24(rec->rec_s_id, lport->port_id); + rec->rec_ox_id = htons(ep->oxid); + rec->rec_rx_id = htons(ep->rxid); +} + +/** + * fc_prli_fill - Fill in prli request frame + */ +static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct { + struct fc_els_prli prli; + struct fc_els_spp spp; + } *pp; + + pp = fc_frame_payload_get(fp, sizeof(*pp)); + memset(pp, 0, sizeof(*pp)); + pp->prli.prli_cmd = ELS_PRLI; + pp->prli.prli_spp_len = sizeof(struct fc_els_spp); + pp->prli.prli_len = htons(sizeof(*pp)); + pp->spp.spp_type = FC_TYPE_FCP; + pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR; + pp->spp.spp_params = htonl(lport->service_params); +} + +/** + * fc_scr_fill - Fill in a scr request frame. + */ +static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_scr *scr; + + scr = fc_frame_payload_get(fp, sizeof(*scr)); + memset(scr, 0, sizeof(*scr)); + scr->scr_cmd = ELS_SCR; + scr->scr_reg_func = ELS_SCRF_FULL; +} + +/** + * fc_els_fill - Fill in an ELS request frame + */ +static inline int fc_els_fill(struct fc_lport *lport, + u32 did, + struct fc_frame *fp, unsigned int op, + enum fc_rctl *r_ctl, enum fc_fh_type *fh_type) +{ + switch (op) { + case ELS_ADISC: + fc_adisc_fill(lport, fp); + break; + + case ELS_PLOGI: + fc_plogi_fill(lport, fp, ELS_PLOGI); + break; + + case ELS_FLOGI: + fc_flogi_fill(lport, fp); + break; + + case ELS_FDISC: + fc_fdisc_fill(lport, fp); + break; + + case ELS_LOGO: + fc_logo_fill(lport, fp); + break; + + case ELS_RTV: + fc_rtv_fill(lport, fp); + break; + + case ELS_REC: + fc_rec_fill(lport, fp); + break; + + case ELS_PRLI: + fc_prli_fill(lport, fp); + break; + + case ELS_SCR: + fc_scr_fill(lport, fp); + break; + + default: + return -EINVAL; + } + + *r_ctl = FC_RCTL_ELS_REQ; + *fh_type = FC_TYPE_ELS; + return 0; +} +#endif /* _FC_ENCODE_H_ */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c new file mode 100644 index 000000000..1d91c4575 --- /dev/null +++ b/drivers/scsi/libfc/fc_exch.c @@ -0,0 +1,2712 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2007 Intel Corporation. All rights reserved. + * Copyright(c) 2008 Red Hat, Inc. All rights reserved. + * Copyright(c) 2008 Mike Christie + * + * Maintained at www.Open-FCoE.org + */ + +/* + * Fibre Channel exchange and sequence handling. + */ + +#include +#include +#include +#include +#include + +#include + +#include + +#include "fc_libfc.h" + +u16 fc_cpu_mask; /* cpu mask for possible cpus */ +EXPORT_SYMBOL(fc_cpu_mask); +static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ +static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ +static struct workqueue_struct *fc_exch_workqueue; + +/* + * Structure and function definitions for managing Fibre Channel Exchanges + * and Sequences. + * + * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq. + * + * fc_exch_mgr holds the exchange state for an N port + * + * fc_exch holds state for one exchange and links to its active sequence. + * + * fc_seq holds the state for an individual sequence. + */ + +/** + * struct fc_exch_pool - Per cpu exchange pool + * @next_index: Next possible free exchange index + * @total_exches: Total allocated exchanges + * @lock: Exch pool lock + * @ex_list: List of exchanges + * @left: Cache of free slot in exch array + * @right: Cache of free slot in exch array + * + * This structure manages per cpu exchanges in array of exchange pointers. + * This array is allocated followed by struct fc_exch_pool memory for + * assigned range of exchanges to per cpu pool. + */ +struct fc_exch_pool { + spinlock_t lock; + struct list_head ex_list; + u16 next_index; + u16 total_exches; + + u16 left; + u16 right; +} ____cacheline_aligned_in_smp; + +/** + * struct fc_exch_mgr - The Exchange Manager (EM). + * @class: Default class for new sequences + * @kref: Reference counter + * @min_xid: Minimum exchange ID + * @max_xid: Maximum exchange ID + * @ep_pool: Reserved exchange pointers + * @pool_max_index: Max exch array index in exch pool + * @pool: Per cpu exch pool + * @lport: Local exchange port + * @stats: Statistics structure + * + * This structure is the center for creating exchanges and sequences. + * It manages the allocation of exchange IDs. + */ +struct fc_exch_mgr { + struct fc_exch_pool __percpu *pool; + mempool_t *ep_pool; + struct fc_lport *lport; + enum fc_class class; + struct kref kref; + u16 min_xid; + u16 max_xid; + u16 pool_max_index; + + struct { + atomic_t no_free_exch; + atomic_t no_free_exch_xid; + atomic_t xid_not_found; + atomic_t xid_busy; + atomic_t seq_not_found; + atomic_t non_bls_resp; + } stats; +}; + +/** + * struct fc_exch_mgr_anchor - primary structure for list of EMs + * @ema_list: Exchange Manager Anchor list + * @mp: Exchange Manager associated with this anchor + * @match: Routine to determine if this anchor's EM should be used + * + * When walking the list of anchors the match routine will be called + * for each anchor to determine if that EM should be used. The last + * anchor in the list will always match to handle any exchanges not + * handled by other EMs. The non-default EMs would be added to the + * anchor list by HW that provides offloads. + */ +struct fc_exch_mgr_anchor { + struct list_head ema_list; + struct fc_exch_mgr *mp; + bool (*match)(struct fc_frame *); +}; + +static void fc_exch_rrq(struct fc_exch *); +static void fc_seq_ls_acc(struct fc_frame *); +static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason, + enum fc_els_rjt_explan); +static void fc_exch_els_rec(struct fc_frame *); +static void fc_exch_els_rrq(struct fc_frame *); + +/* + * Internal implementation notes. + * + * The exchange manager is one by default in libfc but LLD may choose + * to have one per CPU. The sequence manager is one per exchange manager + * and currently never separated. + * + * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field + * assigned by the Sequence Initiator that shall be unique for a specific + * D_ID and S_ID pair while the Sequence is open." Note that it isn't + * qualified by exchange ID, which one might think it would be. + * In practice this limits the number of open sequences and exchanges to 256 + * per session. For most targets we could treat this limit as per exchange. + * + * The exchange and its sequence are freed when the last sequence is received. + * It's possible for the remote port to leave an exchange open without + * sending any sequences. + * + * Notes on reference counts: + * + * Exchanges are reference counted and exchange gets freed when the reference + * count becomes zero. + * + * Timeouts: + * Sequences are timed out for E_D_TOV and R_A_TOV. + * + * Sequence event handling: + * + * The following events may occur on initiator sequences: + * + * Send. + * For now, the whole thing is sent. + * Receive ACK + * This applies only to class F. + * The sequence is marked complete. + * ULP completion. + * The upper layer calls fc_exch_done() when done + * with exchange and sequence tuple. + * RX-inferred completion. + * When we receive the next sequence on the same exchange, we can + * retire the previous sequence ID. (XXX not implemented). + * Timeout. + * R_A_TOV frees the sequence ID. If we're waiting for ACK, + * E_D_TOV causes abort and calls upper layer response handler + * with FC_EX_TIMEOUT error. + * Receive RJT + * XXX defer. + * Send ABTS + * On timeout. + * + * The following events may occur on recipient sequences: + * + * Receive + * Allocate sequence for first frame received. + * Hold during receive handler. + * Release when final frame received. + * Keep status of last N of these for the ELS RES command. XXX TBD. + * Receive ABTS + * Deallocate sequence + * Send RJT + * Deallocate + * + * For now, we neglect conditions where only part of a sequence was + * received or transmitted, or where out-of-order receipt is detected. + */ + +/* + * Locking notes: + * + * The EM code run in a per-CPU worker thread. + * + * To protect against concurrency between a worker thread code and timers, + * sequence allocation and deallocation must be locked. + * - exchange refcnt can be done atomicly without locks. + * - sequence allocation must be locked by exch lock. + * - If the EM pool lock and ex_lock must be taken at the same time, then the + * EM pool lock must be taken before the ex_lock. + */ + +/* + * opcode names for debugging. + */ +static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT; + +/** + * fc_exch_name_lookup() - Lookup name by opcode + * @op: Opcode to be looked up + * @table: Opcode/name table + * @max_index: Index not to be exceeded + * + * This routine is used to determine a human-readable string identifying + * a R_CTL opcode. + */ +static inline const char *fc_exch_name_lookup(unsigned int op, char **table, + unsigned int max_index) +{ + const char *name = NULL; + + if (op < max_index) + name = table[op]; + if (!name) + name = "unknown"; + return name; +} + +/** + * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup() + * @op: The opcode to be looked up + */ +static const char *fc_exch_rctl_name(unsigned int op) +{ + return fc_exch_name_lookup(op, fc_exch_rctl_names, + ARRAY_SIZE(fc_exch_rctl_names)); +} + +/** + * fc_exch_hold() - Increment an exchange's reference count + * @ep: Echange to be held + */ +static inline void fc_exch_hold(struct fc_exch *ep) +{ + atomic_inc(&ep->ex_refcnt); +} + +/** + * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields + * and determine SOF and EOF. + * @ep: The exchange to that will use the header + * @fp: The frame whose header is to be modified + * @f_ctl: F_CTL bits that will be used for the frame header + * + * The fields initialized by this routine are: fh_ox_id, fh_rx_id, + * fh_seq_id, fh_seq_cnt and the SOF and EOF. + */ +static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, + u32 f_ctl) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + u16 fill; + + fr_sof(fp) = ep->class; + if (ep->seq.cnt) + fr_sof(fp) = fc_sof_normal(ep->class); + + if (f_ctl & FC_FC_END_SEQ) { + fr_eof(fp) = FC_EOF_T; + if (fc_sof_needs_ack((enum fc_sof)ep->class)) + fr_eof(fp) = FC_EOF_N; + /* + * From F_CTL. + * The number of fill bytes to make the length a 4-byte + * multiple is the low order 2-bits of the f_ctl. + * The fill itself will have been cleared by the frame + * allocation. + * After this, the length will be even, as expected by + * the transport. + */ + fill = fr_len(fp) & 3; + if (fill) { + fill = 4 - fill; + /* TODO, this may be a problem with fragmented skb */ + skb_put(fp_skb(fp), fill); + hton24(fh->fh_f_ctl, f_ctl | fill); + } + } else { + WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ + fr_eof(fp) = FC_EOF_N; + } + + /* Initialize remaining fh fields from fc_fill_fc_hdr */ + fh->fh_ox_id = htons(ep->oxid); + fh->fh_rx_id = htons(ep->rxid); + fh->fh_seq_id = ep->seq.id; + fh->fh_seq_cnt = htons(ep->seq.cnt); +} + +/** + * fc_exch_release() - Decrement an exchange's reference count + * @ep: Exchange to be released + * + * If the reference count reaches zero and the exchange is complete, + * it is freed. + */ +static void fc_exch_release(struct fc_exch *ep) +{ + struct fc_exch_mgr *mp; + + if (atomic_dec_and_test(&ep->ex_refcnt)) { + mp = ep->em; + if (ep->destructor) + ep->destructor(&ep->seq, ep->arg); + WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE)); + mempool_free(ep, mp->ep_pool); + } +} + +/** + * fc_exch_timer_cancel() - cancel exch timer + * @ep: The exchange whose timer to be canceled + */ +static inline void fc_exch_timer_cancel(struct fc_exch *ep) +{ + if (cancel_delayed_work(&ep->timeout_work)) { + FC_EXCH_DBG(ep, "Exchange timer canceled\n"); + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + } +} + +/** + * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the + * the exchange lock held + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period + * + * Used for upper level protocols to time out the exchange. + * The timer is cancelled when it fires or when the exchange completes. + */ +static inline void fc_exch_timer_set_locked(struct fc_exch *ep, + unsigned int timer_msec) +{ + if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) + return; + + FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); + + fc_exch_hold(ep); /* hold for timer */ + if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, + msecs_to_jiffies(timer_msec))) { + FC_EXCH_DBG(ep, "Exchange already queued\n"); + fc_exch_release(ep); + } +} + +/** + * fc_exch_timer_set() - Lock the exchange and set the timer + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period + */ +static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) +{ + spin_lock_bh(&ep->ex_lock); + fc_exch_timer_set_locked(ep, timer_msec); + spin_unlock_bh(&ep->ex_lock); +} + +/** + * fc_exch_done_locked() - Complete an exchange with the exchange lock held + * @ep: The exchange that is complete + * + * Note: May sleep if invoked from outside a response handler. + */ +static int fc_exch_done_locked(struct fc_exch *ep) +{ + int rc = 1; + + /* + * We must check for completion in case there are two threads + * tyring to complete this. But the rrq code will reuse the + * ep, and in that case we only clear the resp and set it as + * complete, so it can be reused by the timer to send the rrq. + */ + if (ep->state & FC_EX_DONE) + return rc; + ep->esb_stat |= ESB_ST_COMPLETE; + + if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { + ep->state |= FC_EX_DONE; + fc_exch_timer_cancel(ep); + rc = 0; + } + return rc; +} + +static struct fc_exch fc_quarantine_exch; + +/** + * fc_exch_ptr_get() - Return an exchange from an exchange pool + * @pool: Exchange Pool to get an exchange from + * @index: Index of the exchange within the pool + * + * Use the index to get an exchange from within an exchange pool. exches + * will point to an array of exchange pointers. The index will select + * the exchange within the array. + */ +static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, + u16 index) +{ + struct fc_exch **exches = (struct fc_exch **)(pool + 1); + return exches[index]; +} + +/** + * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool + * @pool: The pool to assign the exchange to + * @index: The index in the pool where the exchange will be assigned + * @ep: The exchange to assign to the pool + */ +static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, + struct fc_exch *ep) +{ + ((struct fc_exch **)(pool + 1))[index] = ep; +} + +/** + * fc_exch_delete() - Delete an exchange + * @ep: The exchange to be deleted + */ +static void fc_exch_delete(struct fc_exch *ep) +{ + struct fc_exch_pool *pool; + u16 index; + + pool = ep->pool; + spin_lock_bh(&pool->lock); + WARN_ON(pool->total_exches <= 0); + pool->total_exches--; + + /* update cache of free slot */ + index = (ep->xid - ep->em->min_xid) >> fc_cpu_order; + if (!(ep->state & FC_EX_QUARANTINE)) { + if (pool->left == FC_XID_UNKNOWN) + pool->left = index; + else if (pool->right == FC_XID_UNKNOWN) + pool->right = index; + else + pool->next_index = index; + fc_exch_ptr_set(pool, index, NULL); + } else { + fc_exch_ptr_set(pool, index, &fc_quarantine_exch); + } + list_del(&ep->ex_list); + spin_unlock_bh(&pool->lock); + fc_exch_release(ep); /* drop hold for exch in mp */ +} + +static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, + struct fc_frame *fp) +{ + struct fc_exch *ep; + struct fc_frame_header *fh = fc_frame_header_get(fp); + int error = -ENXIO; + u32 f_ctl; + u8 fh_type = fh->fh_type; + + ep = fc_seq_exch(sp); + + if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) { + fc_frame_free(fp); + goto out; + } + + WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); + + f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, f_ctl); + fr_encaps(fp) = ep->encaps; + + /* + * update sequence count if this frame is carrying + * multiple FC frames when sequence offload is enabled + * by LLD. + */ + if (fr_max_payload(fp)) + sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), + fr_max_payload(fp)); + else + sp->cnt++; + + /* + * Send the frame. + */ + error = lport->tt.frame_send(lport, fp); + + if (fh_type == FC_TYPE_BLS) + goto out; + + /* + * Update the exchange and sequence flags, + * assuming all frames for the sequence have been sent. + * We can only be called to send once for each sequence. + */ + ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ + if (f_ctl & FC_FC_SEQ_INIT) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; +out: + return error; +} + +/** + * fc_seq_send() - Send a frame using existing sequence/exchange pair + * @lport: The local port that the exchange will be sent on + * @sp: The sequence to be sent + * @fp: The frame to be sent on the exchange + * + * Note: The frame will be freed either by a direct call to fc_frame_free(fp) + * or indirectly by calling libfc_function_template.frame_send(). + */ +int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) +{ + struct fc_exch *ep; + int error; + ep = fc_seq_exch(sp); + spin_lock_bh(&ep->ex_lock); + error = fc_seq_send_locked(lport, sp, fp); + spin_unlock_bh(&ep->ex_lock); + return error; +} +EXPORT_SYMBOL(fc_seq_send); + +/** + * fc_seq_alloc() - Allocate a sequence for a given exchange + * @ep: The exchange to allocate a new sequence for + * @seq_id: The sequence ID to be used + * + * We don't support multiple originated sequences on the same exchange. + * By implication, any previously originated sequence on this exchange + * is complete, and we reallocate the same sequence. + */ +static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) +{ + struct fc_seq *sp; + + sp = &ep->seq; + sp->ssb_stat = 0; + sp->cnt = 0; + sp->id = seq_id; + return sp; +} + +/** + * fc_seq_start_next_locked() - Allocate a new sequence on the same + * exchange as the supplied sequence + * @sp: The sequence/exchange to get a new sequence for + */ +static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + sp = fc_seq_alloc(ep, ep->seq_id++); + FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", + ep->f_ctl, sp->id); + return sp; +} + +/** + * fc_seq_start_next() - Lock the exchange and get a new sequence + * for a given sequence/exchange pair + * @sp: The sequence/exchange to get a new exchange for + */ +struct fc_seq *fc_seq_start_next(struct fc_seq *sp) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + spin_lock_bh(&ep->ex_lock); + sp = fc_seq_start_next_locked(sp); + spin_unlock_bh(&ep->ex_lock); + + return sp; +} +EXPORT_SYMBOL(fc_seq_start_next); + +/* + * Set the response handler for the exchange associated with a sequence. + * + * Note: May sleep if invoked from outside a response handler. + */ +void fc_seq_set_resp(struct fc_seq *sp, + void (*resp)(struct fc_seq *, struct fc_frame *, void *), + void *arg) +{ + struct fc_exch *ep = fc_seq_exch(sp); + DEFINE_WAIT(wait); + + spin_lock_bh(&ep->ex_lock); + while (ep->resp_active && ep->resp_task != current) { + prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&ep->ex_lock); + + schedule(); + + spin_lock_bh(&ep->ex_lock); + } + finish_wait(&ep->resp_wq, &wait); + ep->resp = resp; + ep->arg = arg; + spin_unlock_bh(&ep->ex_lock); +} +EXPORT_SYMBOL(fc_seq_set_resp); + +/** + * fc_exch_abort_locked() - Abort an exchange + * @ep: The exchange to be aborted + * @timer_msec: The period of time to wait before aborting + * + * Abort an exchange and sequence. Generally called because of a + * exchange timeout or an abort from the upper layer. + * + * A timer_msec can be specified for abort timeout, if non-zero + * timer_msec value is specified then exchange resp handler + * will be called with timeout error if no response to abort. + * + * Locking notes: Called with exch lock held + * + * Return value: 0 on success else error code + */ +static int fc_exch_abort_locked(struct fc_exch *ep, + unsigned int timer_msec) +{ + struct fc_seq *sp; + struct fc_frame *fp; + int error; + + FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec); + if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { + FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n", + ep->esb_stat, ep->state); + return -ENXIO; + } + + /* + * Send the abort on a new sequence if possible. + */ + sp = fc_seq_start_next_locked(&ep->seq); + if (!sp) + return -ENOMEM; + + if (timer_msec) + fc_exch_timer_set_locked(ep, timer_msec); + + if (ep->sid) { + /* + * Send an abort for the sequence that timed out. + */ + fp = fc_frame_alloc(ep->lp, 0); + if (fp) { + ep->esb_stat |= ESB_ST_SEQ_INIT; + fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, + FC_TYPE_BLS, FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + error = fc_seq_send_locked(ep->lp, sp, fp); + } else { + error = -ENOBUFS; + } + } else { + /* + * If not logged into the fabric, don't send ABTS but leave + * sequence active until next timeout. + */ + error = 0; + } + ep->esb_stat |= ESB_ST_ABNORMAL; + return error; +} + +/** + * fc_seq_exch_abort() - Abort an exchange and sequence + * @req_sp: The sequence to be aborted + * @timer_msec: The period of time to wait before aborting + * + * Generally called because of a timeout or an abort from the upper layer. + * + * Return value: 0 on success else error code + */ +int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) +{ + struct fc_exch *ep; + int error; + + ep = fc_seq_exch(req_sp); + spin_lock_bh(&ep->ex_lock); + error = fc_exch_abort_locked(ep, timer_msec); + spin_unlock_bh(&ep->ex_lock); + return error; +} + +/** + * fc_invoke_resp() - invoke ep->resp() + * @ep: The exchange to be operated on + * @fp: The frame pointer to pass through to ->resp() + * @sp: The sequence pointer to pass through to ->resp() + * + * Notes: + * It is assumed that after initialization finished (this means the + * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are + * modified only via fc_seq_set_resp(). This guarantees that none of these + * two variables changes if ep->resp_active > 0. + * + * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when + * this function is invoked, the first spin_lock_bh() call in this function + * will wait until fc_seq_set_resp() has finished modifying these variables. + * + * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that + * ep->resp() won't be invoked after fc_exch_done() has returned. + * + * The response handler itself may invoke fc_exch_done(), which will clear the + * ep->resp pointer. + * + * Return value: + * Returns true if and only if ep->resp has been invoked. + */ +static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, + struct fc_frame *fp) +{ + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); + void *arg; + bool res = false; + + spin_lock_bh(&ep->ex_lock); + ep->resp_active++; + if (ep->resp_task != current) + ep->resp_task = !ep->resp_task ? current : NULL; + resp = ep->resp; + arg = ep->arg; + spin_unlock_bh(&ep->ex_lock); + + if (resp) { + resp(sp, fp, arg); + res = true; + } + + spin_lock_bh(&ep->ex_lock); + if (--ep->resp_active == 0) + ep->resp_task = NULL; + spin_unlock_bh(&ep->ex_lock); + + if (ep->resp_active == 0) + wake_up(&ep->resp_wq); + + return res; +} + +/** + * fc_exch_timeout() - Handle exchange timer expiration + * @work: The work_struct identifying the exchange that timed out + */ +static void fc_exch_timeout(struct work_struct *work) +{ + struct fc_exch *ep = container_of(work, struct fc_exch, + timeout_work.work); + struct fc_seq *sp = &ep->seq; + u32 e_stat; + int rc = 1; + + FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state); + + spin_lock_bh(&ep->ex_lock); + if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) + goto unlock; + + e_stat = ep->esb_stat; + if (e_stat & ESB_ST_COMPLETE) { + ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; + spin_unlock_bh(&ep->ex_lock); + if (e_stat & ESB_ST_REC_QUAL) + fc_exch_rrq(ep); + goto done; + } else { + if (e_stat & ESB_ST_ABNORMAL) + rc = fc_exch_done_locked(ep); + spin_unlock_bh(&ep->ex_lock); + if (!rc) + fc_exch_delete(ep); + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); + fc_seq_set_resp(sp, NULL, ep->arg); + fc_seq_exch_abort(sp, 2 * ep->r_a_tov); + goto done; + } +unlock: + spin_unlock_bh(&ep->ex_lock); +done: + /* + * This release matches the hold taken when the timer was set. + */ + fc_exch_release(ep); +} + +/** + * fc_exch_em_alloc() - Allocate an exchange from a specified EM. + * @lport: The local port that the exchange is for + * @mp: The exchange manager that will allocate the exchange + * + * Returns pointer to allocated fc_exch with exch lock held. + */ +static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, + struct fc_exch_mgr *mp) +{ + struct fc_exch *ep; + unsigned int cpu; + u16 index; + struct fc_exch_pool *pool; + + /* allocate memory for exchange */ + ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); + if (!ep) { + atomic_inc(&mp->stats.no_free_exch); + goto out; + } + memset(ep, 0, sizeof(*ep)); + + cpu = raw_smp_processor_id(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { + if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) { + index = pool->left; + pool->left = FC_XID_UNKNOWN; + goto hit; + } + } + if (pool->right != FC_XID_UNKNOWN) { + if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) { + index = pool->right; + pool->right = FC_XID_UNKNOWN; + goto hit; + } + } + + index = pool->next_index; + /* allocate new exch from pool */ + while (fc_exch_ptr_get(pool, index)) { + index = index == mp->pool_max_index ? 0 : index + 1; + if (index == pool->next_index) + goto err; + } + pool->next_index = index == mp->pool_max_index ? 0 : index + 1; +hit: + fc_exch_hold(ep); /* hold for exch in mp */ + spin_lock_init(&ep->ex_lock); + /* + * Hold exch lock for caller to prevent fc_exch_reset() + * from releasing exch while fc_exch_alloc() caller is + * still working on exch. + */ + spin_lock_bh(&ep->ex_lock); + + fc_exch_ptr_set(pool, index, ep); + list_add_tail(&ep->ex_list, &pool->ex_list); + fc_seq_alloc(ep, ep->seq_id++); + pool->total_exches++; + spin_unlock_bh(&pool->lock); + + /* + * update exchange + */ + ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid; + ep->em = mp; + ep->pool = pool; + ep->lp = lport; + ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ + ep->rxid = FC_XID_UNKNOWN; + ep->class = mp->class; + ep->resp_active = 0; + init_waitqueue_head(&ep->resp_wq); + INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); +out: + return ep; +err: + spin_unlock_bh(&pool->lock); + atomic_inc(&mp->stats.no_free_exch_xid); + mempool_free(ep, mp->ep_pool); + return NULL; +} + +/** + * fc_exch_alloc() - Allocate an exchange from an EM on a + * local port's list of EMs. + * @lport: The local port that will own the exchange + * @fp: The FC frame that the exchange will be for + * + * This function walks the list of exchange manager(EM) + * anchors to select an EM for a new exchange allocation. The + * EM is selected when a NULL match function pointer is encountered + * or when a call to a match function returns true. + */ +static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fc_exch_mgr_anchor *ema; + struct fc_exch *ep; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + if (!ema->match || ema->match(fp)) { + ep = fc_exch_em_alloc(lport, ema->mp); + if (ep) + return ep; + } + } + return NULL; +} + +/** + * fc_exch_find() - Lookup and hold an exchange + * @mp: The exchange manager to lookup the exchange from + * @xid: The XID of the exchange to look up + */ +static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) +{ + struct fc_lport *lport = mp->lport; + struct fc_exch_pool *pool; + struct fc_exch *ep = NULL; + u16 cpu = xid & fc_cpu_mask; + + if (xid == FC_XID_UNKNOWN) + return NULL; + + if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { + pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:", + lport->host->host_no, lport->port_id, xid, cpu); + return NULL; + } + + if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); + ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); + if (ep == &fc_quarantine_exch) { + FC_LPORT_DBG(lport, "xid %x quarantined\n", xid); + ep = NULL; + } + if (ep) { + WARN_ON(ep->xid != xid); + fc_exch_hold(ep); + } + spin_unlock_bh(&pool->lock); + } + return ep; +} + + +/** + * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and + * the memory allocated for the related objects may be freed. + * @sp: The sequence that has completed + * + * Note: May sleep if invoked from outside a response handler. + */ +void fc_exch_done(struct fc_seq *sp) +{ + struct fc_exch *ep = fc_seq_exch(sp); + int rc; + + spin_lock_bh(&ep->ex_lock); + rc = fc_exch_done_locked(ep); + spin_unlock_bh(&ep->ex_lock); + + fc_seq_set_resp(sp, NULL, ep->arg); + if (!rc) + fc_exch_delete(ep); +} +EXPORT_SYMBOL(fc_exch_done); + +/** + * fc_exch_resp() - Allocate a new exchange for a response frame + * @lport: The local port that the exchange was for + * @mp: The exchange manager to allocate the exchange from + * @fp: The response frame + * + * Sets the responder ID in the frame header. + */ +static struct fc_exch *fc_exch_resp(struct fc_lport *lport, + struct fc_exch_mgr *mp, + struct fc_frame *fp) +{ + struct fc_exch *ep; + struct fc_frame_header *fh; + + ep = fc_exch_alloc(lport, fp); + if (ep) { + ep->class = fc_frame_class(fp); + + /* + * Set EX_CTX indicating we're responding on this exchange. + */ + ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */ + ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */ + fh = fc_frame_header_get(fp); + ep->sid = ntoh24(fh->fh_d_id); + ep->did = ntoh24(fh->fh_s_id); + ep->oid = ep->did; + + /* + * Allocated exchange has placed the XID in the + * originator field. Move it to the responder field, + * and set the originator XID from the frame. + */ + ep->rxid = ep->xid; + ep->oxid = ntohs(fh->fh_ox_id); + ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT; + if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; + + fc_exch_hold(ep); /* hold for caller */ + spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */ + } + return ep; +} + +/** + * fc_seq_lookup_recip() - Find a sequence where the other end + * originated the sequence + * @lport: The local port that the frame was sent to + * @mp: The Exchange Manager to lookup the exchange from + * @fp: The frame associated with the sequence we're looking for + * + * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold + * on the ep that should be released by the caller. + */ +static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + struct fc_exch_mgr *mp, + struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_exch *ep = NULL; + struct fc_seq *sp = NULL; + enum fc_pf_rjt_reason reject = FC_RJT_NONE; + u32 f_ctl; + u16 xid; + + f_ctl = ntoh24(fh->fh_f_ctl); + WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0); + + /* + * Lookup or create the exchange if we will be creating the sequence. + */ + if (f_ctl & FC_FC_EX_CTX) { + xid = ntohs(fh->fh_ox_id); /* we originated exch */ + ep = fc_exch_find(mp, xid); + if (!ep) { + atomic_inc(&mp->stats.xid_not_found); + reject = FC_RJT_OX_ID; + goto out; + } + if (ep->rxid == FC_XID_UNKNOWN) + ep->rxid = ntohs(fh->fh_rx_id); + else if (ep->rxid != ntohs(fh->fh_rx_id)) { + reject = FC_RJT_OX_ID; + goto rel; + } + } else { + xid = ntohs(fh->fh_rx_id); /* we are the responder */ + + /* + * Special case for MDS issuing an ELS TEST with a + * bad rxid of 0. + * XXX take this out once we do the proper reject. + */ + if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fc_frame_payload_op(fp) == ELS_TEST) { + fh->fh_rx_id = htons(FC_XID_UNKNOWN); + xid = FC_XID_UNKNOWN; + } + + /* + * new sequence - find the exchange + */ + ep = fc_exch_find(mp, xid); + if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { + if (ep) { + atomic_inc(&mp->stats.xid_busy); + reject = FC_RJT_RX_ID; + goto rel; + } + ep = fc_exch_resp(lport, mp, fp); + if (!ep) { + reject = FC_RJT_EXCH_EST; /* XXX */ + goto out; + } + xid = ep->xid; /* get our XID */ + } else if (!ep) { + atomic_inc(&mp->stats.xid_not_found); + reject = FC_RJT_RX_ID; /* XID not found */ + goto out; + } + } + + spin_lock_bh(&ep->ex_lock); + /* + * At this point, we have the exchange held. + * Find or create the sequence. + */ + if (fc_sof_is_init(fr_sof(fp))) { + sp = &ep->seq; + sp->ssb_stat |= SSB_ST_RESP; + sp->id = fh->fh_seq_id; + } else { + sp = &ep->seq; + if (sp->id != fh->fh_seq_id) { + atomic_inc(&mp->stats.seq_not_found); + if (f_ctl & FC_FC_END_SEQ) { + /* + * Update sequence_id based on incoming last + * frame of sequence exchange. This is needed + * for FC target where DDP has been used + * on target where, stack is indicated only + * about last frame's (payload _header) header. + * Whereas "seq_id" which is part of + * frame_header is allocated by initiator + * which is totally different from "seq_id" + * allocated when XFER_RDY was sent by target. + * To avoid false -ve which results into not + * sending RSP, hence write request on other + * end never finishes. + */ + sp->ssb_stat |= SSB_ST_RESP; + sp->id = fh->fh_seq_id; + } else { + spin_unlock_bh(&ep->ex_lock); + + /* sequence/exch should exist */ + reject = FC_RJT_SEQ_ID; + goto rel; + } + } + } + WARN_ON(ep != fc_seq_exch(sp)); + + if (f_ctl & FC_FC_SEQ_INIT) + ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + + fr_seq(fp) = sp; +out: + return reject; +rel: + fc_exch_done(&ep->seq); + fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */ + return reject; +} + +/** + * fc_seq_lookup_orig() - Find a sequence where this end + * originated the sequence + * @mp: The Exchange Manager to lookup the exchange from + * @fp: The frame associated with the sequence we're looking for + * + * Does not hold the sequence for the caller. + */ +static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, + struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_exch *ep; + struct fc_seq *sp = NULL; + u32 f_ctl; + u16 xid; + + f_ctl = ntoh24(fh->fh_f_ctl); + WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX); + xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id); + ep = fc_exch_find(mp, xid); + if (!ep) + return NULL; + if (ep->seq.id == fh->fh_seq_id) { + /* + * Save the RX_ID if we didn't previously know it. + */ + sp = &ep->seq; + if ((f_ctl & FC_FC_EX_CTX) != 0 && + ep->rxid == FC_XID_UNKNOWN) { + ep->rxid = ntohs(fh->fh_rx_id); + } + } + fc_exch_release(ep); + return sp; +} + +/** + * fc_exch_set_addr() - Set the source and destination IDs for an exchange + * @ep: The exchange to set the addresses for + * @orig_id: The originator's ID + * @resp_id: The responder's ID + * + * Note this must be done before the first sequence of the exchange is sent. + */ +static void fc_exch_set_addr(struct fc_exch *ep, + u32 orig_id, u32 resp_id) +{ + ep->oid = orig_id; + if (ep->esb_stat & ESB_ST_RESP) { + ep->sid = resp_id; + ep->did = orig_id; + } else { + ep->sid = orig_id; + ep->did = resp_id; + } +} + +/** + * fc_seq_els_rsp_send() - Send an ELS response using information from + * the existing sequence/exchange. + * @fp: The received frame + * @els_cmd: The ELS command to be sent + * @els_data: The ELS data to be sent + * + * The received frame is not freed. + */ +void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd, + struct fc_seq_els_data *els_data) +{ + switch (els_cmd) { + case ELS_LS_RJT: + fc_seq_ls_rjt(fp, els_data->reason, els_data->explan); + break; + case ELS_LS_ACC: + fc_seq_ls_acc(fp); + break; + case ELS_RRQ: + fc_exch_els_rrq(fp); + break; + case ELS_REC: + fc_exch_els_rec(fp); + break; + default: + FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd); + } +} +EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send); + +/** + * fc_seq_send_last() - Send a sequence that is the last in the exchange + * @sp: The sequence that is to be sent + * @fp: The frame that will be sent on the sequence + * @rctl: The R_CTL information to be sent + * @fh_type: The frame header type + */ +static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, + enum fc_rctl rctl, enum fc_fh_type fh_type) +{ + u32 f_ctl; + struct fc_exch *ep = fc_seq_exch(sp); + + f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; + f_ctl |= ep->f_ctl; + fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); + fc_seq_send_locked(ep->lp, sp, fp); +} + +/** + * fc_seq_send_ack() - Send an acknowledgement that we've received a frame + * @sp: The sequence to send the ACK on + * @rx_fp: The received frame that is being acknoledged + * + * Send ACK_1 (or equiv.) indicating we received something. + */ +static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) +{ + struct fc_frame *fp; + struct fc_frame_header *rx_fh; + struct fc_frame_header *fh; + struct fc_exch *ep = fc_seq_exch(sp); + struct fc_lport *lport = ep->lp; + unsigned int f_ctl; + + /* + * Don't send ACKs for class 3. + */ + if (fc_sof_needs_ack(fr_sof(rx_fp))) { + fp = fc_frame_alloc(lport, 0); + if (!fp) { + FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n"); + return; + } + + fh = fc_frame_header_get(fp); + fh->fh_r_ctl = FC_RCTL_ACK_1; + fh->fh_type = FC_TYPE_BLS; + + /* + * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22). + * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT. + * Bits 9-8 are meaningful (retransmitted or unidirectional). + * Last ACK uses bits 7-6 (continue sequence), + * bits 5-4 are meaningful (what kind of ACK to use). + */ + rx_fh = fc_frame_header_get(rx_fp); + f_ctl = ntoh24(rx_fh->fh_f_ctl); + f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX | + FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ | + FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT | + FC_FC_RETX_SEQ | FC_FC_UNI_TX; + f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX; + hton24(fh->fh_f_ctl, f_ctl); + + fc_exch_setup_hdr(ep, fp, f_ctl); + fh->fh_seq_id = rx_fh->fh_seq_id; + fh->fh_seq_cnt = rx_fh->fh_seq_cnt; + fh->fh_parm_offset = htonl(1); /* ack single frame */ + + fr_sof(fp) = fr_sof(rx_fp); + if (f_ctl & FC_FC_END_SEQ) + fr_eof(fp) = FC_EOF_T; + else + fr_eof(fp) = FC_EOF_N; + + lport->tt.frame_send(lport, fp); + } +} + +/** + * fc_exch_send_ba_rjt() - Send BLS Reject + * @rx_fp: The frame being rejected + * @reason: The reason the frame is being rejected + * @explan: The explanation for the rejection + * + * This is for rejecting BA_ABTS only. + */ +static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, + enum fc_ba_rjt_reason reason, + enum fc_ba_rjt_explan explan) +{ + struct fc_frame *fp; + struct fc_frame_header *rx_fh; + struct fc_frame_header *fh; + struct fc_ba_rjt *rp; + struct fc_seq *sp; + struct fc_lport *lport; + unsigned int f_ctl; + + lport = fr_dev(rx_fp); + sp = fr_seq(rx_fp); + fp = fc_frame_alloc(lport, sizeof(*rp)); + if (!fp) { + FC_EXCH_DBG(fc_seq_exch(sp), + "Drop BA_RJT request, out of memory\n"); + return; + } + fh = fc_frame_header_get(fp); + rx_fh = fc_frame_header_get(rx_fp); + + memset(fh, 0, sizeof(*fh) + sizeof(*rp)); + + rp = fc_frame_payload_get(fp, sizeof(*rp)); + rp->br_reason = reason; + rp->br_explan = explan; + + /* + * seq_id, cs_ctl, df_ctl and param/offset are zero. + */ + memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3); + memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3); + fh->fh_ox_id = rx_fh->fh_ox_id; + fh->fh_rx_id = rx_fh->fh_rx_id; + fh->fh_seq_cnt = rx_fh->fh_seq_cnt; + fh->fh_r_ctl = FC_RCTL_BA_RJT; + fh->fh_type = FC_TYPE_BLS; + + /* + * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22). + * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT. + * Bits 9-8 are meaningful (retransmitted or unidirectional). + * Last ACK uses bits 7-6 (continue sequence), + * bits 5-4 are meaningful (what kind of ACK to use). + * Always set LAST_SEQ, END_SEQ. + */ + f_ctl = ntoh24(rx_fh->fh_f_ctl); + f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX | + FC_FC_END_CONN | FC_FC_SEQ_INIT | + FC_FC_RETX_SEQ | FC_FC_UNI_TX; + f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX; + f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; + f_ctl &= ~FC_FC_FIRST_SEQ; + hton24(fh->fh_f_ctl, f_ctl); + + fr_sof(fp) = fc_sof_class(fr_sof(rx_fp)); + fr_eof(fp) = FC_EOF_T; + if (fc_sof_needs_ack(fr_sof(fp))) + fr_eof(fp) = FC_EOF_N; + + lport->tt.frame_send(lport, fp); +} + +/** + * fc_exch_recv_abts() - Handle an incoming ABTS + * @ep: The exchange the abort was on + * @rx_fp: The ABTS frame + * + * This would be for target mode usually, but could be due to lost + * FCP transfer ready, confirm or RRQ. We always handle this as an + * exchange abort, ignoring the parameter. + */ +static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) +{ + struct fc_frame *fp; + struct fc_ba_acc *ap; + struct fc_frame_header *fh; + struct fc_seq *sp; + + if (!ep) + goto reject; + + FC_EXCH_DBG(ep, "exch: ABTS received\n"); + fp = fc_frame_alloc(ep->lp, sizeof(*ap)); + if (!fp) { + FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n"); + goto free; + } + + spin_lock_bh(&ep->ex_lock); + if (ep->esb_stat & ESB_ST_COMPLETE) { + spin_unlock_bh(&ep->ex_lock); + FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n"); + fc_frame_free(fp); + goto reject; + } + if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { + ep->esb_stat |= ESB_ST_REC_QUAL; + fc_exch_hold(ep); /* hold for REC_QUAL */ + } + fc_exch_timer_set_locked(ep, ep->r_a_tov); + fh = fc_frame_header_get(fp); + ap = fc_frame_payload_get(fp, sizeof(*ap)); + memset(ap, 0, sizeof(*ap)); + sp = &ep->seq; + ap->ba_high_seq_cnt = htons(0xffff); + if (sp->ssb_stat & SSB_ST_RESP) { + ap->ba_seq_id = sp->id; + ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL; + ap->ba_high_seq_cnt = fh->fh_seq_cnt; + ap->ba_low_seq_cnt = htons(sp->cnt); + } + sp = fc_seq_start_next_locked(sp); + fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); + ep->esb_stat |= ESB_ST_ABNORMAL; + spin_unlock_bh(&ep->ex_lock); + +free: + fc_frame_free(rx_fp); + return; + +reject: + fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); + goto free; +} + +/** + * fc_seq_assign() - Assign exchange and sequence for incoming request + * @lport: The local port that received the request + * @fp: The request frame + * + * On success, the sequence pointer will be returned and also in fr_seq(@fp). + * A reference will be held on the exchange/sequence for the caller, which + * must call fc_seq_release(). + */ +struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_exch_mgr_anchor *ema; + + WARN_ON(lport != fr_dev(fp)); + WARN_ON(fr_seq(fp)); + fr_seq(fp) = NULL; + + list_for_each_entry(ema, &lport->ema_list, ema_list) + if ((!ema->match || ema->match(fp)) && + fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE) + break; + return fr_seq(fp); +} +EXPORT_SYMBOL(fc_seq_assign); + +/** + * fc_seq_release() - Release the hold + * @sp: The sequence. + */ +void fc_seq_release(struct fc_seq *sp) +{ + fc_exch_release(fc_seq_exch(sp)); +} +EXPORT_SYMBOL(fc_seq_release); + +/** + * fc_exch_recv_req() - Handler for an incoming request + * @lport: The local port that received the request + * @mp: The EM that the exchange is on + * @fp: The request frame + * + * This is used when the other end is originating the exchange + * and the sequence. + */ +static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, + struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_seq *sp = NULL; + struct fc_exch *ep = NULL; + enum fc_pf_rjt_reason reject; + + /* We can have the wrong fc_lport at this point with NPIV, which is a + * problem now that we know a new exchange needs to be allocated + */ + lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + if (!lport) { + fc_frame_free(fp); + return; + } + fr_dev(fp) = lport; + + BUG_ON(fr_seq(fp)); /* XXX remove later */ + + /* + * If the RX_ID is 0xffff, don't allocate an exchange. + * The upper-level protocol may request one later, if needed. + */ + if (fh->fh_rx_id == htons(FC_XID_UNKNOWN)) + return fc_lport_recv(lport, fp); + + reject = fc_seq_lookup_recip(lport, mp, fp); + if (reject == FC_RJT_NONE) { + sp = fr_seq(fp); /* sequence will be held */ + ep = fc_seq_exch(sp); + fc_seq_send_ack(sp, fp); + ep->encaps = fr_encaps(fp); + + /* + * Call the receive function. + * + * The receive function may allocate a new sequence + * over the old one, so we shouldn't change the + * sequence after this. + * + * The frame will be freed by the receive function. + * If new exch resp handler is valid then call that + * first. + */ + if (!fc_invoke_resp(ep, sp, fp)) + fc_lport_recv(lport, fp); + fc_exch_release(ep); /* release from lookup */ + } else { + FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", + reject); + fc_frame_free(fp); + } +} + +/** + * fc_exch_recv_seq_resp() - Handler for an incoming response where the other + * end is the originator of the sequence that is a + * response to our initial exchange + * @mp: The EM that the exchange is on + * @fp: The response frame + */ +static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_seq *sp; + struct fc_exch *ep; + enum fc_sof sof; + u32 f_ctl; + int rc; + + ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); + if (!ep) { + atomic_inc(&mp->stats.xid_not_found); + goto out; + } + if (ep->esb_stat & ESB_ST_COMPLETE) { + atomic_inc(&mp->stats.xid_not_found); + goto rel; + } + if (ep->rxid == FC_XID_UNKNOWN) + ep->rxid = ntohs(fh->fh_rx_id); + if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { + atomic_inc(&mp->stats.xid_not_found); + goto rel; + } + if (ep->did != ntoh24(fh->fh_s_id) && + ep->did != FC_FID_FLOGI) { + atomic_inc(&mp->stats.xid_not_found); + goto rel; + } + sof = fr_sof(fp); + sp = &ep->seq; + if (fc_sof_is_init(sof)) { + sp->ssb_stat |= SSB_ST_RESP; + sp->id = fh->fh_seq_id; + } + + f_ctl = ntoh24(fh->fh_f_ctl); + fr_seq(fp) = sp; + + spin_lock_bh(&ep->ex_lock); + if (f_ctl & FC_FC_SEQ_INIT) + ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + + if (fc_sof_needs_ack(sof)) + fc_seq_send_ack(sp, fp); + + if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && + (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == + (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { + spin_lock_bh(&ep->ex_lock); + rc = fc_exch_done_locked(ep); + WARN_ON(fc_seq_exch(sp) != ep); + spin_unlock_bh(&ep->ex_lock); + if (!rc) { + fc_exch_delete(ep); + } else { + FC_EXCH_DBG(ep, "ep is completed already," + "hence skip calling the resp\n"); + goto skip_resp; + } + } + + /* + * Call the receive function. + * The sequence is held (has a refcnt) for us, + * but not for the receive function. + * + * The receive function may allocate a new sequence + * over the old one, so we shouldn't change the + * sequence after this. + * + * The frame will be freed by the receive function. + * If new exch resp handler is valid then call that + * first. + */ + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); + +skip_resp: + fc_exch_release(ep); + return; +rel: + fc_exch_release(ep); +out: + fc_frame_free(fp); +} + +/** + * fc_exch_recv_resp() - Handler for a sequence where other end is + * responding to our sequence + * @mp: The EM that the exchange is on + * @fp: The response frame + */ +static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) +{ + struct fc_seq *sp; + + sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ + + if (!sp) + atomic_inc(&mp->stats.xid_not_found); + else + atomic_inc(&mp->stats.non_bls_resp); + + fc_frame_free(fp); +} + +/** + * fc_exch_abts_resp() - Handler for a response to an ABT + * @ep: The exchange that the frame is on + * @fp: The response frame + * + * This response would be to an ABTS cancelling an exchange or sequence. + * The response can be either BA_ACC or BA_RJT + */ +static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) +{ + struct fc_frame_header *fh; + struct fc_ba_acc *ap; + struct fc_seq *sp; + u16 low; + u16 high; + int rc = 1, has_rec = 0; + + fh = fc_frame_header_get(fp); + FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, + fc_exch_rctl_name(fh->fh_r_ctl)); + + if (cancel_delayed_work_sync(&ep->timeout_work)) { + FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n"); + fc_exch_release(ep); /* release from pending timer hold */ + return; + } + + spin_lock_bh(&ep->ex_lock); + switch (fh->fh_r_ctl) { + case FC_RCTL_BA_ACC: + ap = fc_frame_payload_get(fp, sizeof(*ap)); + if (!ap) + break; + + /* + * Decide whether to establish a Recovery Qualifier. + * We do this if there is a non-empty SEQ_CNT range and + * SEQ_ID is the same as the one we aborted. + */ + low = ntohs(ap->ba_low_seq_cnt); + high = ntohs(ap->ba_high_seq_cnt); + if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 && + (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL || + ap->ba_seq_id == ep->seq_id) && low != high) { + ep->esb_stat |= ESB_ST_REC_QUAL; + fc_exch_hold(ep); /* hold for recovery qualifier */ + has_rec = 1; + } + break; + case FC_RCTL_BA_RJT: + break; + default: + break; + } + + /* do we need to do some other checks here. Can we reuse more of + * fc_exch_recv_seq_resp + */ + sp = &ep->seq; + /* + * do we want to check END_SEQ as well as LAST_SEQ here? + */ + if (ep->fh_type != FC_TYPE_FCP && + ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) + rc = fc_exch_done_locked(ep); + spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); + if (!rc) + fc_exch_delete(ep); + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); + if (has_rec) + fc_exch_timer_set(ep, ep->r_a_tov); + fc_exch_release(ep); +} + +/** + * fc_exch_recv_bls() - Handler for a BLS sequence + * @mp: The EM that the exchange is on + * @fp: The request frame + * + * The BLS frame is always a sequence initiated by the remote side. + * We may be either the originator or recipient of the exchange. + */ +static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) +{ + struct fc_frame_header *fh; + struct fc_exch *ep; + u32 f_ctl; + + fh = fc_frame_header_get(fp); + f_ctl = ntoh24(fh->fh_f_ctl); + fr_seq(fp) = NULL; + + ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ? + ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id)); + if (ep && (f_ctl & FC_FC_SEQ_INIT)) { + spin_lock_bh(&ep->ex_lock); + ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + } + if (f_ctl & FC_FC_SEQ_CTX) { + /* + * A response to a sequence we initiated. + * This should only be ACKs for class 2 or F. + */ + switch (fh->fh_r_ctl) { + case FC_RCTL_ACK_1: + case FC_RCTL_ACK_0: + break; + default: + if (ep) + FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n", + fh->fh_r_ctl, + fc_exch_rctl_name(fh->fh_r_ctl)); + break; + } + fc_frame_free(fp); + } else { + switch (fh->fh_r_ctl) { + case FC_RCTL_BA_RJT: + case FC_RCTL_BA_ACC: + if (ep) + fc_exch_abts_resp(ep, fp); + else + fc_frame_free(fp); + break; + case FC_RCTL_BA_ABTS: + if (ep) + fc_exch_recv_abts(ep, fp); + else + fc_frame_free(fp); + break; + default: /* ignore junk */ + fc_frame_free(fp); + break; + } + } + if (ep) + fc_exch_release(ep); /* release hold taken by fc_exch_find */ +} + +/** + * fc_seq_ls_acc() - Accept sequence with LS_ACC + * @rx_fp: The received frame, not freed here. + * + * If this fails due to allocation or transmit congestion, assume the + * originator will repeat the sequence. + */ +static void fc_seq_ls_acc(struct fc_frame *rx_fp) +{ + struct fc_lport *lport; + struct fc_els_ls_acc *acc; + struct fc_frame *fp; + struct fc_seq *sp; + + lport = fr_dev(rx_fp); + sp = fr_seq(rx_fp); + fp = fc_frame_alloc(lport, sizeof(*acc)); + if (!fp) { + FC_EXCH_DBG(fc_seq_exch(sp), + "exch: drop LS_ACC, out of memory\n"); + return; + } + acc = fc_frame_payload_get(fp, sizeof(*acc)); + memset(acc, 0, sizeof(*acc)); + acc->la_cmd = ELS_LS_ACC; + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); +} + +/** + * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT + * @rx_fp: The received frame, not freed here. + * @reason: The reason the sequence is being rejected + * @explan: The explanation for the rejection + * + * If this fails due to allocation or transmit congestion, assume the + * originator will repeat the sequence. + */ +static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason, + enum fc_els_rjt_explan explan) +{ + struct fc_lport *lport; + struct fc_els_ls_rjt *rjt; + struct fc_frame *fp; + struct fc_seq *sp; + + lport = fr_dev(rx_fp); + sp = fr_seq(rx_fp); + fp = fc_frame_alloc(lport, sizeof(*rjt)); + if (!fp) { + FC_EXCH_DBG(fc_seq_exch(sp), + "exch: drop LS_ACC, out of memory\n"); + return; + } + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + memset(rjt, 0, sizeof(*rjt)); + rjt->er_cmd = ELS_LS_RJT; + rjt->er_reason = reason; + rjt->er_explan = explan; + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); +} + +/** + * fc_exch_reset() - Reset an exchange + * @ep: The exchange to be reset + * + * Note: May sleep if invoked from outside a response handler. + */ +static void fc_exch_reset(struct fc_exch *ep) +{ + struct fc_seq *sp; + int rc = 1; + + spin_lock_bh(&ep->ex_lock); + ep->state |= FC_EX_RST_CLEANUP; + fc_exch_timer_cancel(ep); + if (ep->esb_stat & ESB_ST_REC_QUAL) + atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ + ep->esb_stat &= ~ESB_ST_REC_QUAL; + sp = &ep->seq; + rc = fc_exch_done_locked(ep); + spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); + + if (!rc) { + fc_exch_delete(ep); + } else { + FC_EXCH_DBG(ep, "ep is completed already," + "hence skip calling the resp\n"); + goto skip_resp; + } + + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); +skip_resp: + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); +} + +/** + * fc_exch_pool_reset() - Reset a per cpu exchange pool + * @lport: The local port that the exchange pool is on + * @pool: The exchange pool to be reset + * @sid: The source ID + * @did: The destination ID + * + * Resets a per cpu exches pool, releasing all of its sequences + * and exchanges. If sid is non-zero then reset only exchanges + * we sourced from the local port's FID. If did is non-zero then + * only reset exchanges destined for the local port's FID. + */ +static void fc_exch_pool_reset(struct fc_lport *lport, + struct fc_exch_pool *pool, + u32 sid, u32 did) +{ + struct fc_exch *ep; + struct fc_exch *next; + + spin_lock_bh(&pool->lock); +restart: + list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) { + if ((lport == ep->lp) && + (sid == 0 || sid == ep->sid) && + (did == 0 || did == ep->did)) { + fc_exch_hold(ep); + spin_unlock_bh(&pool->lock); + + fc_exch_reset(ep); + + fc_exch_release(ep); + spin_lock_bh(&pool->lock); + + /* + * must restart loop incase while lock + * was down multiple eps were released. + */ + goto restart; + } + } + pool->next_index = 0; + pool->left = FC_XID_UNKNOWN; + pool->right = FC_XID_UNKNOWN; + spin_unlock_bh(&pool->lock); +} + +/** + * fc_exch_mgr_reset() - Reset all EMs of a local port + * @lport: The local port whose EMs are to be reset + * @sid: The source ID + * @did: The destination ID + * + * Reset all EMs associated with a given local port. Release all + * sequences and exchanges. If sid is non-zero then reset only the + * exchanges sent from the local port's FID. If did is non-zero then + * reset only exchanges destined for the local port's FID. + */ +void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) +{ + struct fc_exch_mgr_anchor *ema; + unsigned int cpu; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + for_each_possible_cpu(cpu) + fc_exch_pool_reset(lport, + per_cpu_ptr(ema->mp->pool, cpu), + sid, did); + } +} +EXPORT_SYMBOL(fc_exch_mgr_reset); + +/** + * fc_exch_lookup() - find an exchange + * @lport: The local port + * @xid: The exchange ID + * + * Returns exchange pointer with hold for caller, or NULL if not found. + */ +static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid) +{ + struct fc_exch_mgr_anchor *ema; + + list_for_each_entry(ema, &lport->ema_list, ema_list) + if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid) + return fc_exch_find(ema->mp, xid); + return NULL; +} + +/** + * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests + * @rfp: The REC frame, not freed here. + * + * Note that the requesting port may be different than the S_ID in the request. + */ +static void fc_exch_els_rec(struct fc_frame *rfp) +{ + struct fc_lport *lport; + struct fc_frame *fp; + struct fc_exch *ep; + struct fc_els_rec *rp; + struct fc_els_rec_acc *acc; + enum fc_els_rjt_reason reason = ELS_RJT_LOGIC; + enum fc_els_rjt_explan explan; + u32 sid; + u16 xid, rxid, oxid; + + lport = fr_dev(rfp); + rp = fc_frame_payload_get(rfp, sizeof(*rp)); + explan = ELS_EXPL_INV_LEN; + if (!rp) + goto reject; + sid = ntoh24(rp->rec_s_id); + rxid = ntohs(rp->rec_rx_id); + oxid = ntohs(rp->rec_ox_id); + + explan = ELS_EXPL_OXID_RXID; + if (sid == fc_host_port_id(lport->host)) + xid = oxid; + else + xid = rxid; + if (xid == FC_XID_UNKNOWN) { + FC_LPORT_DBG(lport, + "REC request from %x: invalid rxid %x oxid %x\n", + sid, rxid, oxid); + goto reject; + } + ep = fc_exch_lookup(lport, xid); + if (!ep) { + FC_LPORT_DBG(lport, + "REC request from %x: rxid %x oxid %x not found\n", + sid, rxid, oxid); + goto reject; + } + FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n", + sid, rxid, oxid); + if (ep->oid != sid || oxid != ep->oxid) + goto rel; + if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid) + goto rel; + fp = fc_frame_alloc(lport, sizeof(*acc)); + if (!fp) { + FC_EXCH_DBG(ep, "Drop REC request, out of memory\n"); + goto out; + } + + acc = fc_frame_payload_get(fp, sizeof(*acc)); + memset(acc, 0, sizeof(*acc)); + acc->reca_cmd = ELS_LS_ACC; + acc->reca_ox_id = rp->rec_ox_id; + memcpy(acc->reca_ofid, rp->rec_s_id, 3); + acc->reca_rx_id = htons(ep->rxid); + if (ep->sid == ep->oid) + hton24(acc->reca_rfid, ep->did); + else + hton24(acc->reca_rfid, ep->sid); + acc->reca_fc4value = htonl(ep->seq.rec_data); + acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP | + ESB_ST_SEQ_INIT | + ESB_ST_COMPLETE)); + fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); +out: + fc_exch_release(ep); + return; + +rel: + fc_exch_release(ep); +reject: + fc_seq_ls_rjt(rfp, reason, explan); +} + +/** + * fc_exch_rrq_resp() - Handler for RRQ responses + * @sp: The sequence that the RRQ is on + * @fp: The RRQ frame + * @arg: The exchange that the RRQ is on + * + * TODO: fix error handler. + */ +static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) +{ + struct fc_exch *aborted_ep = arg; + unsigned int op; + + if (IS_ERR(fp)) { + int err = PTR_ERR(fp); + + if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) + goto cleanup; + FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, " + "frame error %d\n", err); + return; + } + + op = fc_frame_payload_op(fp); + fc_frame_free(fp); + + switch (op) { + case ELS_LS_RJT: + FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n"); + fallthrough; + case ELS_LS_ACC: + goto cleanup; + default: + FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n", + op); + return; + } + +cleanup: + fc_exch_done(&aborted_ep->seq); + /* drop hold for rec qual */ + fc_exch_release(aborted_ep); +} + + +/** + * fc_exch_seq_send() - Send a frame using a new exchange and sequence + * @lport: The local port to send the frame on + * @fp: The frame to be sent + * @resp: The response handler for this request + * @destructor: The destructor for the exchange + * @arg: The argument to be passed to the response handler + * @timer_msec: The timeout period for the exchange + * + * The exchange response handler is set in this routine to resp() + * function pointer. It can be called in two scenarios: if a timeout + * occurs or if a response frame is received for the exchange. The + * fc_frame pointer in response handler will also indicate timeout + * as error using IS_ERR related macros. + * + * The exchange destructor handler is also set in this routine. + * The destructor handler is invoked by EM layer when exchange + * is about to free, this can be used by caller to free its + * resources along with exchange free. + * + * The arg is passed back to resp and destructor handler. + * + * The timeout value (in msec) for an exchange is set if non zero + * timer_msec argument is specified. The timer is canceled when + * it fires or when the exchange is done. The exchange timeout handler + * is registered by EM layer. + * + * The frame pointer with some of the header's fields must be + * filled before calling this routine, those fields are: + * + * - routing control + * - FC port did + * - FC port sid + * - FC header type + * - frame control + * - parameter or relative offset + */ +struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, + struct fc_frame *fp, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg), + void (*destructor)(struct fc_seq *, void *), + void *arg, u32 timer_msec) +{ + struct fc_exch *ep; + struct fc_seq *sp = NULL; + struct fc_frame_header *fh; + struct fc_fcp_pkt *fsp = NULL; + int rc = 1; + + ep = fc_exch_alloc(lport, fp); + if (!ep) { + fc_frame_free(fp); + return NULL; + } + ep->esb_stat |= ESB_ST_SEQ_INIT; + fh = fc_frame_header_get(fp); + fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); + ep->resp = resp; + ep->destructor = destructor; + ep->arg = arg; + ep->r_a_tov = lport->r_a_tov; + ep->lp = lport; + sp = &ep->seq; + + ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ + ep->f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, ep->f_ctl); + sp->cnt++; + + if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { + fsp = fr_fsp(fp); + fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); + } + + if (unlikely(lport->tt.frame_send(lport, fp))) + goto err; + + if (timer_msec) + fc_exch_timer_set_locked(ep, timer_msec); + ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ + + if (ep->f_ctl & FC_FC_SEQ_INIT) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + return sp; +err: + if (fsp) + fc_fcp_ddp_done(fsp); + rc = fc_exch_done_locked(ep); + spin_unlock_bh(&ep->ex_lock); + if (!rc) + fc_exch_delete(ep); + return NULL; +} +EXPORT_SYMBOL(fc_exch_seq_send); + +/** + * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command + * @ep: The exchange to send the RRQ on + * + * This tells the remote port to stop blocking the use of + * the exchange and the seq_cnt range. + */ +static void fc_exch_rrq(struct fc_exch *ep) +{ + struct fc_lport *lport; + struct fc_els_rrq *rrq; + struct fc_frame *fp; + u32 did; + + lport = ep->lp; + + fp = fc_frame_alloc(lport, sizeof(*rrq)); + if (!fp) + goto retry; + + rrq = fc_frame_payload_get(fp, sizeof(*rrq)); + memset(rrq, 0, sizeof(*rrq)); + rrq->rrq_cmd = ELS_RRQ; + hton24(rrq->rrq_s_id, ep->sid); + rrq->rrq_ox_id = htons(ep->oxid); + rrq->rrq_rx_id = htons(ep->rxid); + + did = ep->did; + if (ep->esb_stat & ESB_ST_RESP) + did = ep->sid; + + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, + lport->port_id, FC_TYPE_ELS, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep, + lport->e_d_tov)) + return; + +retry: + FC_EXCH_DBG(ep, "exch: RRQ send failed\n"); + spin_lock_bh(&ep->ex_lock); + if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { + spin_unlock_bh(&ep->ex_lock); + /* drop hold for rec qual */ + fc_exch_release(ep); + return; + } + ep->esb_stat |= ESB_ST_REC_QUAL; + fc_exch_timer_set_locked(ep, ep->r_a_tov); + spin_unlock_bh(&ep->ex_lock); +} + +/** + * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests + * @fp: The RRQ frame, not freed here. + */ +static void fc_exch_els_rrq(struct fc_frame *fp) +{ + struct fc_lport *lport; + struct fc_exch *ep = NULL; /* request or subject exchange */ + struct fc_els_rrq *rp; + u32 sid; + u16 xid; + enum fc_els_rjt_explan explan; + + lport = fr_dev(fp); + rp = fc_frame_payload_get(fp, sizeof(*rp)); + explan = ELS_EXPL_INV_LEN; + if (!rp) + goto reject; + + /* + * lookup subject exchange. + */ + sid = ntoh24(rp->rrq_s_id); /* subject source */ + xid = fc_host_port_id(lport->host) == sid ? + ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id); + ep = fc_exch_lookup(lport, xid); + explan = ELS_EXPL_OXID_RXID; + if (!ep) + goto reject; + spin_lock_bh(&ep->ex_lock); + FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n", + sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id)); + if (ep->oxid != ntohs(rp->rrq_ox_id)) + goto unlock_reject; + if (ep->rxid != ntohs(rp->rrq_rx_id) && + ep->rxid != FC_XID_UNKNOWN) + goto unlock_reject; + explan = ELS_EXPL_SID; + if (ep->sid != sid) + goto unlock_reject; + + /* + * Clear Recovery Qualifier state, and cancel timer if complete. + */ + if (ep->esb_stat & ESB_ST_REC_QUAL) { + ep->esb_stat &= ~ESB_ST_REC_QUAL; + atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */ + } + if (ep->esb_stat & ESB_ST_COMPLETE) + fc_exch_timer_cancel(ep); + + spin_unlock_bh(&ep->ex_lock); + + /* + * Send LS_ACC. + */ + fc_seq_ls_acc(fp); + goto out; + +unlock_reject: + spin_unlock_bh(&ep->ex_lock); +reject: + fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan); +out: + if (ep) + fc_exch_release(ep); /* drop hold from fc_exch_find */ +} + +/** + * fc_exch_update_stats() - update exches stats to lport + * @lport: The local port to update exchange manager stats + */ +void fc_exch_update_stats(struct fc_lport *lport) +{ + struct fc_host_statistics *st; + struct fc_exch_mgr_anchor *ema; + struct fc_exch_mgr *mp; + + st = &lport->host_stats; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + mp = ema->mp; + st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch); + st->fc_no_free_exch_xid += + atomic_read(&mp->stats.no_free_exch_xid); + st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found); + st->fc_xid_busy += atomic_read(&mp->stats.xid_busy); + st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found); + st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp); + } +} +EXPORT_SYMBOL(fc_exch_update_stats); + +/** + * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs + * @lport: The local port to add the exchange manager to + * @mp: The exchange manager to be added to the local port + * @match: The match routine that indicates when this EM should be used + */ +struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, + struct fc_exch_mgr *mp, + bool (*match)(struct fc_frame *)) +{ + struct fc_exch_mgr_anchor *ema; + + ema = kmalloc(sizeof(*ema), GFP_ATOMIC); + if (!ema) + return ema; + + ema->mp = mp; + ema->match = match; + /* add EM anchor to EM anchors list */ + list_add_tail(&ema->ema_list, &lport->ema_list); + kref_get(&mp->kref); + return ema; +} +EXPORT_SYMBOL(fc_exch_mgr_add); + +/** + * fc_exch_mgr_destroy() - Destroy an exchange manager + * @kref: The reference to the EM to be destroyed + */ +static void fc_exch_mgr_destroy(struct kref *kref) +{ + struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); + + mempool_destroy(mp->ep_pool); + free_percpu(mp->pool); + kfree(mp); +} + +/** + * fc_exch_mgr_del() - Delete an EM from a local port's list + * @ema: The exchange manager anchor identifying the EM to be deleted + */ +void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) +{ + /* remove EM anchor from EM anchors list */ + list_del(&ema->ema_list); + kref_put(&ema->mp->kref, fc_exch_mgr_destroy); + kfree(ema); +} +EXPORT_SYMBOL(fc_exch_mgr_del); + +/** + * fc_exch_mgr_list_clone() - Share all exchange manager objects + * @src: Source lport to clone exchange managers from + * @dst: New lport that takes references to all the exchange managers + */ +int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst) +{ + struct fc_exch_mgr_anchor *ema, *tmp; + + list_for_each_entry(ema, &src->ema_list, ema_list) { + if (!fc_exch_mgr_add(dst, ema->mp, ema->match)) + goto err; + } + return 0; +err: + list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list) + fc_exch_mgr_del(ema); + return -ENOMEM; +} +EXPORT_SYMBOL(fc_exch_mgr_list_clone); + +/** + * fc_exch_mgr_alloc() - Allocate an exchange manager + * @lport: The local port that the new EM will be associated with + * @class: The default FC class for new exchanges + * @min_xid: The minimum XID for exchanges from the new EM + * @max_xid: The maximum XID for exchanges from the new EM + * @match: The match routine for the new EM + */ +struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, + enum fc_class class, + u16 min_xid, u16 max_xid, + bool (*match)(struct fc_frame *)) +{ + struct fc_exch_mgr *mp; + u16 pool_exch_range; + size_t pool_size; + unsigned int cpu; + struct fc_exch_pool *pool; + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || + (min_xid & fc_cpu_mask) != 0) { + FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", + min_xid, max_xid); + return NULL; + } + + /* + * allocate memory for EM + */ + mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC); + if (!mp) + return NULL; + + mp->class = class; + mp->lport = lport; + /* adjust em exch xid range for offload */ + mp->min_xid = min_xid; + + /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */ + pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) / + sizeof(struct fc_exch *); + if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) { + mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) + + min_xid - 1; + } else { + mp->max_xid = max_xid; + pool_exch_range = (mp->max_xid - mp->min_xid + 1) / + (fc_cpu_mask + 1); + } + + mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); + if (!mp->ep_pool) + goto free_mp; + + /* + * Setup per cpu exch pool with entire exchange id range equally + * divided across all cpus. The exch pointers array memory is + * allocated for exch range per pool. + */ + mp->pool_max_index = pool_exch_range - 1; + + /* + * Allocate and initialize per cpu exch pool + */ + pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *); + mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool)); + if (!mp->pool) + goto free_mempool; + for_each_possible_cpu(cpu) { + pool = per_cpu_ptr(mp->pool, cpu); + pool->next_index = 0; + pool->left = FC_XID_UNKNOWN; + pool->right = FC_XID_UNKNOWN; + spin_lock_init(&pool->lock); + INIT_LIST_HEAD(&pool->ex_list); + } + + kref_init(&mp->kref); + if (!fc_exch_mgr_add(lport, mp, match)) { + free_percpu(mp->pool); + goto free_mempool; + } + + /* + * Above kref_init() sets mp->kref to 1 and then + * call to fc_exch_mgr_add incremented mp->kref again, + * so adjust that extra increment. + */ + kref_put(&mp->kref, fc_exch_mgr_destroy); + return mp; + +free_mempool: + mempool_destroy(mp->ep_pool); +free_mp: + kfree(mp); + return NULL; +} +EXPORT_SYMBOL(fc_exch_mgr_alloc); + +/** + * fc_exch_mgr_free() - Free all exchange managers on a local port + * @lport: The local port whose EMs are to be freed + */ +void fc_exch_mgr_free(struct fc_lport *lport) +{ + struct fc_exch_mgr_anchor *ema, *next; + + flush_workqueue(fc_exch_workqueue); + list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) + fc_exch_mgr_del(ema); +} +EXPORT_SYMBOL(fc_exch_mgr_free); + +/** + * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending + * upon 'xid'. + * @f_ctl: f_ctl + * @lport: The local port the frame was received on + * @fh: The received frame header + */ +static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl, + struct fc_lport *lport, + struct fc_frame_header *fh) +{ + struct fc_exch_mgr_anchor *ema; + u16 xid; + + if (f_ctl & FC_FC_EX_CTX) + xid = ntohs(fh->fh_ox_id); + else { + xid = ntohs(fh->fh_rx_id); + if (xid == FC_XID_UNKNOWN) + return list_entry(lport->ema_list.prev, + typeof(*ema), ema_list); + } + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + if ((xid >= ema->mp->min_xid) && + (xid <= ema->mp->max_xid)) + return ema; + } + return NULL; +} +/** + * fc_exch_recv() - Handler for received frames + * @lport: The local port the frame was received on + * @fp: The received frame + */ +void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_exch_mgr_anchor *ema; + u32 f_ctl; + + /* lport lock ? */ + if (!lport || lport->state == LPORT_ST_DISABLED) { + FC_LIBFC_DBG("Receiving frames for an lport that " + "has not been initialized correctly\n"); + fc_frame_free(fp); + return; + } + + f_ctl = ntoh24(fh->fh_f_ctl); + ema = fc_find_ema(f_ctl, lport, fh); + if (!ema) { + FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor," + "fc_ctl <0x%x>, xid <0x%x>\n", + f_ctl, + (f_ctl & FC_FC_EX_CTX) ? + ntohs(fh->fh_ox_id) : + ntohs(fh->fh_rx_id)); + fc_frame_free(fp); + return; + } + + /* + * If frame is marked invalid, just drop it. + */ + switch (fr_eof(fp)) { + case FC_EOF_T: + if (f_ctl & FC_FC_END_SEQ) + skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl)); + fallthrough; + case FC_EOF_N: + if (fh->fh_type == FC_TYPE_BLS) + fc_exch_recv_bls(ema->mp, fp); + else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == + FC_FC_EX_CTX) + fc_exch_recv_seq_resp(ema->mp, fp); + else if (f_ctl & FC_FC_SEQ_CTX) + fc_exch_recv_resp(ema->mp, fp); + else /* no EX_CTX and no SEQ_CTX */ + fc_exch_recv_req(lport, ema->mp, fp); + break; + default: + FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)", + fr_eof(fp)); + fc_frame_free(fp); + } +} +EXPORT_SYMBOL(fc_exch_recv); + +/** + * fc_exch_init() - Initialize the exchange layer for a local port + * @lport: The local port to initialize the exchange layer for + */ +int fc_exch_init(struct fc_lport *lport) +{ + if (!lport->tt.exch_mgr_reset) + lport->tt.exch_mgr_reset = fc_exch_mgr_reset; + + return 0; +} +EXPORT_SYMBOL(fc_exch_init); + +/** + * fc_setup_exch_mgr() - Setup an exchange manager + */ +int fc_setup_exch_mgr(void) +{ + fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fc_em_cachep) + return -ENOMEM; + + /* + * Initialize fc_cpu_mask and fc_cpu_order. The + * fc_cpu_mask is set for nr_cpu_ids rounded up + * to order of 2's * power and order is stored + * in fc_cpu_order as this is later required in + * mapping between an exch id and exch array index + * in per cpu exch pool. + * + * This round up is required to align fc_cpu_mask + * to exchange id's lower bits such that all incoming + * frames of an exchange gets delivered to the same + * cpu on which exchange originated by simple bitwise + * AND operation between fc_cpu_mask and exchange id. + */ + fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); + fc_cpu_mask = (1 << fc_cpu_order) - 1; + + fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); + if (!fc_exch_workqueue) + goto err; + return 0; +err: + kmem_cache_destroy(fc_em_cachep); + return -ENOMEM; +} + +/** + * fc_destroy_exch_mgr() - Destroy an exchange manager + */ +void fc_destroy_exch_mgr(void) +{ + destroy_workqueue(fc_exch_workqueue); + kmem_cache_destroy(fc_em_cachep); +} diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c new file mode 100644 index 000000000..945adca5e --- /dev/null +++ b/drivers/scsi/libfc/fc_fcp.c @@ -0,0 +1,2313 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2007 Intel Corporation. All rights reserved. + * Copyright(c) 2008 Red Hat, Inc. All rights reserved. + * Copyright(c) 2008 Mike Christie + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#include "fc_encode.h" +#include "fc_libfc.h" + +static struct kmem_cache *scsi_pkt_cachep; + +/* SRB state definitions */ +#define FC_SRB_FREE 0 /* cmd is free */ +#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ +#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ +#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ +#define FC_SRB_ABORTED (1 << 3) /* abort acknowledged */ +#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ +#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ +#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ + +#define FC_SRB_READ (1 << 1) +#define FC_SRB_WRITE (1 << 0) + +static struct libfc_cmd_priv *libfc_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +/** + * struct fc_fcp_internal - FCP layer internal data + * @scsi_pkt_pool: Memory pool to draw FCP packets from + * @scsi_queue_lock: Protects the scsi_pkt_queue + * @scsi_pkt_queue: Current FCP packets + * @last_can_queue_ramp_down_time: ramp down time + * @last_can_queue_ramp_up_time: ramp up time + * @max_can_queue: max can_queue size + */ +struct fc_fcp_internal { + mempool_t *scsi_pkt_pool; + spinlock_t scsi_queue_lock; + struct list_head scsi_pkt_queue; + unsigned long last_can_queue_ramp_down_time; + unsigned long last_can_queue_ramp_up_time; + int max_can_queue; +}; + +#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) + +/* + * function prototypes + * FC scsi I/O related functions + */ +static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *); +static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); +static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); +static void fc_fcp_complete_locked(struct fc_fcp_pkt *); +static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); +static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); +static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code); +static void fc_fcp_timeout(struct timer_list *); +static void fc_fcp_rec(struct fc_fcp_pkt *); +static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); +static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); +static void fc_io_compl(struct fc_fcp_pkt *); + +static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32); +static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *); +static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); + +/* + * command status codes + */ +#define FC_COMPLETE 0 +#define FC_CMD_ABORTED 1 +#define FC_CMD_RESET 2 +#define FC_CMD_PLOGO 3 +#define FC_SNS_RCV 4 +#define FC_TRANS_ERR 5 +#define FC_DATA_OVRRUN 6 +#define FC_DATA_UNDRUN 7 +#define FC_ERROR 8 +#define FC_HRD_ERROR 9 +#define FC_CRC_ERROR 10 +#define FC_TIMED_OUT 11 +#define FC_TRANS_RESET 12 + +/* + * Error recovery timeout values. + */ +#define FC_SCSI_TM_TOV (10 * HZ) +#define FC_HOST_RESET_TIMEOUT (30 * HZ) +#define FC_CAN_QUEUE_PERIOD (60 * HZ) + +#define FC_MAX_ERROR_CNT 5 +#define FC_MAX_RECOV_RETRY 3 + +#define FC_FCP_DFLT_QUEUE_DEPTH 32 + +/** + * fc_fcp_pkt_alloc() - Allocate a fcp_pkt + * @lport: The local port that the FCP packet is for + * @gfp: GFP flags for allocation + * + * Return value: fcp_pkt structure or null on allocation failure. + * Context: Can be called from process context, no lock is required. + */ +static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + struct fc_fcp_pkt *fsp; + + fsp = mempool_alloc(si->scsi_pkt_pool, gfp); + if (fsp) { + memset(fsp, 0, sizeof(*fsp)); + fsp->lp = lport; + fsp->xfer_ddp = FC_XID_UNKNOWN; + refcount_set(&fsp->ref_cnt, 1); + timer_setup(&fsp->timer, NULL, 0); + INIT_LIST_HEAD(&fsp->list); + spin_lock_init(&fsp->scsi_pkt_lock); + } else { + this_cpu_inc(lport->stats->FcpPktAllocFails); + } + return fsp; +} + +/** + * fc_fcp_pkt_release() - Release hold on a fcp_pkt + * @fsp: The FCP packet to be released + * + * Context: Can be called from process or interrupt context, + * no lock is required. + */ +static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) +{ + if (refcount_dec_and_test(&fsp->ref_cnt)) { + struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp); + + mempool_free(fsp, si->scsi_pkt_pool); + } +} + +/** + * fc_fcp_pkt_hold() - Hold a fcp_pkt + * @fsp: The FCP packet to be held + */ +static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) +{ + refcount_inc(&fsp->ref_cnt); +} + +/** + * fc_fcp_pkt_destroy() - Release hold on a fcp_pkt + * @seq: The sequence that the FCP packet is on (required by destructor API) + * @fsp: The FCP packet to be released + * + * This routine is called by a destructor callback in the fc_exch_seq_send() + * routine of the libfc Transport Template. The 'struct fc_seq' is a required + * argument even though it is not used by this routine. + * + * Context: No locking required. + */ +static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) +{ + fc_fcp_pkt_release(fsp); +} + +/** + * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count + * @fsp: The FCP packet to be locked and incremented + * + * We should only return error if we return a command to SCSI-ml before + * getting a response. This can happen in cases where we send a abort, but + * do not wait for the response and the abort and command can be passing + * each other on the wire/network-layer. + * + * Note: this function locks the packet and gets a reference to allow + * callers to call the completion function while the lock is held and + * not have to worry about the packets refcount. + * + * TODO: Maybe we should just have callers grab/release the lock and + * have a function that they call to verify the fsp and grab a ref if + * needed. + */ +static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) +{ + spin_lock_bh(&fsp->scsi_pkt_lock); + if (fsp->state & FC_SRB_COMPL) { + spin_unlock_bh(&fsp->scsi_pkt_lock); + return -EPERM; + } + + fc_fcp_pkt_hold(fsp); + return 0; +} + +/** + * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its + * reference count + * @fsp: The FCP packet to be unlocked and decremented + */ +static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) +{ + spin_unlock_bh(&fsp->scsi_pkt_lock); + fc_fcp_pkt_release(fsp); +} + +/** + * fc_fcp_timer_set() - Start a timer for a fcp_pkt + * @fsp: The FCP packet to start a timer for + * @delay: The timeout period in jiffies + */ +static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) +{ + if (!(fsp->state & FC_SRB_COMPL)) { + mod_timer(&fsp->timer, jiffies + delay); + fsp->timer_delay = delay; + } +} + +static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp) +{ + fsp->state |= FC_SRB_ABORTED; + fsp->state &= ~FC_SRB_ABORT_PENDING; + + if (fsp->wait_for_comp) + complete(&fsp->tm_done); + else + fc_fcp_complete_locked(fsp); +} + +/** + * fc_fcp_send_abort() - Send an abort for exchanges associated with a + * fcp_pkt + * @fsp: The FCP packet to abort exchanges on + */ +static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) +{ + int rc; + + if (!fsp->seq_ptr) + return -EINVAL; + + this_cpu_inc(fsp->lp->stats->FcpPktAborts); + + fsp->state |= FC_SRB_ABORT_PENDING; + rc = fc_seq_exch_abort(fsp->seq_ptr, 0); + /* + * fc_seq_exch_abort() might return -ENXIO if + * the sequence is already completed + */ + if (rc == -ENXIO) { + fc_fcp_abort_done(fsp); + rc = 0; + } + return rc; +} + +/** + * fc_fcp_retry_cmd() - Retry a fcp_pkt + * @fsp: The FCP packet to be retried + * @status_code: The FCP status code to set + * + * Sets the status code to be FC_ERROR and then calls + * fc_fcp_complete_locked() which in turn calls fc_io_compl(). + * fc_io_compl() will notify the SCSI-ml that the I/O is done. + * The SCSI-ml will retry the command. + */ +static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code) +{ + if (fsp->seq_ptr) { + fc_exch_done(fsp->seq_ptr); + fsp->seq_ptr = NULL; + } + + fsp->state &= ~FC_SRB_ABORT_PENDING; + fsp->io_status = 0; + fsp->status_code = status_code; + fc_fcp_complete_locked(fsp); +} + +/** + * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context + * @fsp: The FCP packet that will manage the DDP frames + * @xid: The XID that will be used for the DDP exchange + */ +void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) +{ + struct fc_lport *lport; + + lport = fsp->lp; + if ((fsp->req_flags & FC_SRB_READ) && + (lport->lro_enabled) && (lport->tt.ddp_setup)) { + if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd), + scsi_sg_count(fsp->cmd))) + fsp->xfer_ddp = xid; + } +} + +/** + * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any + * DDP related resources for a fcp_pkt + * @fsp: The FCP packet that DDP had been used on + */ +void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) +{ + struct fc_lport *lport; + + if (!fsp) + return; + + if (fsp->xfer_ddp == FC_XID_UNKNOWN) + return; + + lport = fsp->lp; + if (lport->tt.ddp_done) { + fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp); + fsp->xfer_ddp = FC_XID_UNKNOWN; + } +} + +/** + * fc_fcp_can_queue_ramp_up() - increases can_queue + * @lport: lport to ramp up can_queue + */ +static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + unsigned long flags; + int can_queue; + + spin_lock_irqsave(lport->host->host_lock, flags); + + if (si->last_can_queue_ramp_up_time && + (time_before(jiffies, si->last_can_queue_ramp_up_time + + FC_CAN_QUEUE_PERIOD))) + goto unlock; + + if (time_before(jiffies, si->last_can_queue_ramp_down_time + + FC_CAN_QUEUE_PERIOD)) + goto unlock; + + si->last_can_queue_ramp_up_time = jiffies; + + can_queue = lport->host->can_queue << 1; + if (can_queue >= si->max_can_queue) { + can_queue = si->max_can_queue; + si->last_can_queue_ramp_down_time = 0; + } + lport->host->can_queue = can_queue; + shost_printk(KERN_ERR, lport->host, "libfc: increased " + "can_queue to %d.\n", can_queue); + +unlock: + spin_unlock_irqrestore(lport->host->host_lock, flags); +} + +/** + * fc_fcp_can_queue_ramp_down() - reduces can_queue + * @lport: lport to reduce can_queue + * + * If we are getting memory allocation failures, then we may + * be trying to execute too many commands. We let the running + * commands complete or timeout, then try again with a reduced + * can_queue. Eventually we will hit the point where we run + * on all reserved structs. + */ +static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + unsigned long flags; + int can_queue; + bool changed = false; + + spin_lock_irqsave(lport->host->host_lock, flags); + + if (si->last_can_queue_ramp_down_time && + (time_before(jiffies, si->last_can_queue_ramp_down_time + + FC_CAN_QUEUE_PERIOD))) + goto unlock; + + si->last_can_queue_ramp_down_time = jiffies; + + can_queue = lport->host->can_queue; + can_queue >>= 1; + if (!can_queue) + can_queue = 1; + lport->host->can_queue = can_queue; + changed = true; + +unlock: + spin_unlock_irqrestore(lport->host->host_lock, flags); + return changed; +} + +/* + * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer. + * @lport: fc lport struct + * @len: payload length + * + * Allocates fc_frame structure and buffer but if fails to allocate + * then reduce can_queue. + */ +static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, + size_t len) +{ + struct fc_frame *fp; + + fp = fc_frame_alloc(lport, len); + if (likely(fp)) + return fp; + + this_cpu_inc(lport->stats->FcpFrameAllocFails); + /* error case */ + fc_fcp_can_queue_ramp_down(lport); + shost_printk(KERN_ERR, lport->host, + "libfc: Could not allocate frame, " + "reducing can_queue to %d.\n", lport->host->can_queue); + return NULL; +} + +/** + * get_fsp_rec_tov() - Helper function to get REC_TOV + * @fsp: the FCP packet + * + * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second + */ +static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) +{ + struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data; + unsigned int e_d_tov = FC_DEF_E_D_TOV; + + if (rpriv && rpriv->e_d_tov > e_d_tov) + e_d_tov = rpriv->e_d_tov; + return msecs_to_jiffies(e_d_tov) + HZ; +} + +/** + * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target + * @fsp: The FCP packet the data is on + * @fp: The data frame + */ +static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) +{ + struct scsi_cmnd *sc = fsp->cmd; + struct fc_lport *lport = fsp->lp; + struct fc_frame_header *fh; + size_t start_offset; + size_t offset; + u32 crc; + u32 copy_len = 0; + size_t len; + void *buf; + struct scatterlist *sg; + u32 nents; + u8 host_bcode = FC_COMPLETE; + + fh = fc_frame_header_get(fp); + offset = ntohl(fh->fh_parm_offset); + start_offset = offset; + len = fr_len(fp) - sizeof(*fh); + buf = fc_frame_payload_get(fp, 0); + + /* + * if this I/O is ddped then clear it and initiate recovery since data + * frames are expected to be placed directly in that case. + * + * Indicate error to scsi-ml because something went wrong with the + * ddp handling to get us here. + */ + if (fsp->xfer_ddp != FC_XID_UNKNOWN) { + fc_fcp_ddp_done(fsp); + FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n"); + host_bcode = FC_ERROR; + goto err; + } + if (offset + len > fsp->data_len) { + /* this should never happen */ + if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && + fc_frame_crc_check(fp)) + goto crc_err; + FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " + "data_len %x\n", len, offset, fsp->data_len); + + /* Data is corrupted indicate scsi-ml should retry */ + host_bcode = FC_DATA_OVRRUN; + goto err; + } + if (offset != fsp->xfer_len) + fsp->state |= FC_SRB_DISCONTIG; + + sg = scsi_sglist(sc); + nents = scsi_sg_count(sc); + + if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { + copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, + &offset, NULL); + } else { + crc = crc32(~0, (u8 *) fh, sizeof(*fh)); + copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, + &offset, &crc); + buf = fc_frame_payload_get(fp, 0); + if (len % 4) + crc = crc32(crc, buf + len, 4 - (len % 4)); + + if (~crc != le32_to_cpu(fr_crc(fp))) { +crc_err: + this_cpu_inc(lport->stats->ErrorFrames); + /* per cpu count, not total count, but OK for limit */ + if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT) + printk(KERN_WARNING "libfc: CRC error on data " + "frame for port (%6.6x)\n", + lport->port_id); + /* + * Assume the frame is total garbage. + * We may have copied it over the good part + * of the buffer. + * If so, we need to retry the entire operation. + * Otherwise, ignore it. + */ + if (fsp->state & FC_SRB_DISCONTIG) { + host_bcode = FC_CRC_ERROR; + goto err; + } + return; + } + } + + if (fsp->xfer_contig_end == start_offset) + fsp->xfer_contig_end += copy_len; + fsp->xfer_len += copy_len; + + /* + * In the very rare event that this data arrived after the response + * and completes the transfer, call the completion handler. + */ + if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && + fsp->xfer_len == fsp->data_len - fsp->scsi_resid) { + FC_FCP_DBG( fsp, "complete out-of-order sequence\n" ); + fc_fcp_complete_locked(fsp); + } + return; +err: + fc_fcp_recovery(fsp, host_bcode); +} + +/** + * fc_fcp_send_data() - Send SCSI data to a target + * @fsp: The FCP packet the data is on + * @seq: The sequence the data is to be sent on + * @offset: The starting offset for this data request + * @seq_blen: The burst length for this data request + * + * Called after receiving a Transfer Ready data descriptor. + * If the LLD is capable of sequence offload then send down the + * seq_blen amount of data in single frame, otherwise send + * multiple frames of the maximum frame payload supported by + * the target port. + */ +static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, + size_t offset, size_t seq_blen) +{ + struct fc_exch *ep; + struct scsi_cmnd *sc; + struct scatterlist *sg; + struct fc_frame *fp = NULL; + struct fc_lport *lport = fsp->lp; + struct page *page; + size_t remaining; + size_t t_blen; + size_t tlen; + size_t sg_bytes; + size_t frame_offset, fh_parm_offset; + size_t off; + int error; + void *data = NULL; + void *page_addr; + int using_sg = lport->sg_supp; + u32 f_ctl; + + WARN_ON(seq_blen <= 0); + if (unlikely(offset + seq_blen > fsp->data_len)) { + /* this should never happen */ + FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx " + "offset %zx\n", seq_blen, offset); + fc_fcp_send_abort(fsp); + return 0; + } else if (offset != fsp->xfer_len) { + /* Out of Order Data Request - no problem, but unexpected. */ + FC_FCP_DBG(fsp, "xfer-ready non-contiguous. " + "seq_blen %zx offset %zx\n", seq_blen, offset); + } + + /* + * if LLD is capable of seq_offload then set transport + * burst length (t_blen) to seq_blen, otherwise set t_blen + * to max FC frame payload previously set in fsp->max_payload. + */ + t_blen = fsp->max_payload; + if (lport->seq_offload) { + t_blen = min(seq_blen, (size_t)lport->lso_max); + FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", + fsp, seq_blen, lport->lso_max, t_blen); + } + + if (t_blen > 512) + t_blen &= ~(512 - 1); /* round down to block size */ + sc = fsp->cmd; + + remaining = seq_blen; + fh_parm_offset = frame_offset = offset; + tlen = 0; + seq = fc_seq_start_next(seq); + f_ctl = FC_FC_REL_OFF; + WARN_ON(!seq); + + sg = scsi_sglist(sc); + + while (remaining > 0 && sg) { + if (offset >= sg->length) { + offset -= sg->length; + sg = sg_next(sg); + continue; + } + if (!fp) { + tlen = min(t_blen, remaining); + + /* + * TODO. Temporary workaround. fc_seq_send() can't + * handle odd lengths in non-linear skbs. + * This will be the final fragment only. + */ + if (tlen % 4) + using_sg = 0; + fp = fc_frame_alloc(lport, using_sg ? 0 : tlen); + if (!fp) + return -ENOMEM; + + data = fc_frame_header_get(fp) + 1; + fh_parm_offset = frame_offset; + fr_max_payload(fp) = fsp->max_payload; + } + + off = offset + sg->offset; + sg_bytes = min(tlen, sg->length - offset); + sg_bytes = min(sg_bytes, + (size_t) (PAGE_SIZE - (off & ~PAGE_MASK))); + page = sg_page(sg) + (off >> PAGE_SHIFT); + if (using_sg) { + get_page(page); + skb_fill_page_desc(fp_skb(fp), + skb_shinfo(fp_skb(fp))->nr_frags, + page, off & ~PAGE_MASK, sg_bytes); + fp_skb(fp)->data_len += sg_bytes; + fr_len(fp) += sg_bytes; + fp_skb(fp)->truesize += PAGE_SIZE; + } else { + /* + * The scatterlist item may be bigger than PAGE_SIZE, + * but we must not cross pages inside the kmap. + */ + page_addr = kmap_atomic(page); + memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), + sg_bytes); + kunmap_atomic(page_addr); + data += sg_bytes; + } + offset += sg_bytes; + frame_offset += sg_bytes; + tlen -= sg_bytes; + remaining -= sg_bytes; + + if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && + (tlen)) + continue; + + /* + * Send sequence with transfer sequence initiative in case + * this is last FCP frame of the sequence. + */ + if (remaining == 0) + f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; + + ep = fc_seq_exch(seq); + fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, + FC_TYPE_FCP, f_ctl, fh_parm_offset); + + /* + * send fragment using for a sequence. + */ + error = fc_seq_send(lport, seq, fp); + if (error) { + WARN_ON(1); /* send error should be rare */ + return error; + } + fp = NULL; + } + fsp->xfer_len += seq_blen; /* premature count? */ + return 0; +} + +/** + * fc_fcp_abts_resp() - Receive an ABTS response + * @fsp: The FCP packet that is being aborted + * @fp: The response frame + */ +static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) +{ + int ba_done = 1; + struct fc_ba_rjt *brp; + struct fc_frame_header *fh; + + fh = fc_frame_header_get(fp); + switch (fh->fh_r_ctl) { + case FC_RCTL_BA_ACC: + break; + case FC_RCTL_BA_RJT: + brp = fc_frame_payload_get(fp, sizeof(*brp)); + if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) + break; + fallthrough; + default: + /* + * we will let the command timeout + * and scsi-ml recover in this case, + * therefore cleared the ba_done flag. + */ + ba_done = 0; + } + + if (ba_done) + fc_fcp_abort_done(fsp); +} + +/** + * fc_fcp_recv() - Receive an FCP frame + * @seq: The sequence the frame is on + * @fp: The received frame + * @arg: The related FCP packet + * + * Context: Called from Soft IRQ context. Can not be called + * holding the FCP packet list lock. + */ +static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; + struct fc_lport *lport = fsp->lp; + struct fc_frame_header *fh; + struct fcp_txrdy *dd; + u8 r_ctl; + int rc = 0; + + if (IS_ERR(fp)) { + fc_fcp_error(fsp, fp); + return; + } + + fh = fc_frame_header_get(fp); + r_ctl = fh->fh_r_ctl; + + if (lport->state != LPORT_ST_READY) { + FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n", + lport->state, r_ctl); + goto out; + } + if (fc_fcp_lock_pkt(fsp)) + goto out; + + if (fh->fh_type == FC_TYPE_BLS) { + fc_fcp_abts_resp(fsp, fp); + goto unlock; + } + + if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) { + FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl); + goto unlock; + } + + if (r_ctl == FC_RCTL_DD_DATA_DESC) { + /* + * received XFER RDY from the target + * need to send data to the target + */ + WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); + dd = fc_frame_payload_get(fp, sizeof(*dd)); + WARN_ON(!dd); + + rc = fc_fcp_send_data(fsp, seq, + (size_t) ntohl(dd->ft_data_ro), + (size_t) ntohl(dd->ft_burst_len)); + if (!rc) + seq->rec_data = fsp->xfer_len; + } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { + /* + * received a DATA frame + * next we will copy the data to the system buffer + */ + WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */ + fc_fcp_recv_data(fsp, fp); + seq->rec_data = fsp->xfer_contig_end; + } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) { + WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); + + fc_fcp_resp(fsp, fp); + } else { + FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl); + } +unlock: + fc_fcp_unlock_pkt(fsp); +out: + fc_frame_free(fp); +} + +/** + * fc_fcp_resp() - Handler for FCP responses + * @fsp: The FCP packet the response is for + * @fp: The response frame + */ +static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) +{ + struct fc_frame_header *fh; + struct fcp_resp *fc_rp; + struct fcp_resp_ext *rp_ex; + struct fcp_resp_rsp_info *fc_rp_info; + u32 plen; + u32 expected_len; + u32 respl = 0; + u32 snsl = 0; + u8 flags = 0; + + plen = fr_len(fp); + fh = (struct fc_frame_header *)fr_hdr(fp); + if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp))) + goto len_err; + plen -= sizeof(*fh); + fc_rp = (struct fcp_resp *)(fh + 1); + fsp->cdb_status = fc_rp->fr_status; + flags = fc_rp->fr_flags; + fsp->scsi_comp_flags = flags; + expected_len = fsp->data_len; + + /* if ddp, update xfer len */ + fc_fcp_ddp_done(fsp); + + if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) { + rp_ex = (void *)(fc_rp + 1); + if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) { + if (plen < sizeof(*fc_rp) + sizeof(*rp_ex)) + goto len_err; + fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); + if (flags & FCP_RSP_LEN_VAL) { + respl = ntohl(rp_ex->fr_rsp_len); + if ((respl != FCP_RESP_RSP_INFO_LEN4) && + (respl != FCP_RESP_RSP_INFO_LEN8)) + goto len_err; + if (fsp->wait_for_comp) { + /* Abuse cdb_status for rsp code */ + fsp->cdb_status = fc_rp_info->rsp_code; + complete(&fsp->tm_done); + /* + * tmfs will not have any scsi cmd so + * exit here + */ + return; + } + } + if (flags & FCP_SNS_LEN_VAL) { + snsl = ntohl(rp_ex->fr_sns_len); + if (snsl > SCSI_SENSE_BUFFERSIZE) + snsl = SCSI_SENSE_BUFFERSIZE; + memcpy(fsp->cmd->sense_buffer, + (char *)fc_rp_info + respl, snsl); + } + } + if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) { + if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid)) + goto len_err; + if (flags & FCP_RESID_UNDER) { + fsp->scsi_resid = ntohl(rp_ex->fr_resid); + /* + * The cmnd->underflow is the minimum number of + * bytes that must be transferred for this + * command. Provided a sense condition is not + * present, make sure the actual amount + * transferred is at least the underflow value + * or fail. + */ + if (!(flags & FCP_SNS_LEN_VAL) && + (fc_rp->fr_status == 0) && + (scsi_bufflen(fsp->cmd) - + fsp->scsi_resid) < fsp->cmd->underflow) + goto err; + expected_len -= fsp->scsi_resid; + } else { + fsp->status_code = FC_ERROR; + } + } + } + fsp->state |= FC_SRB_RCV_STATUS; + + /* + * Check for missing or extra data frames. + */ + if (unlikely(fsp->cdb_status == SAM_STAT_GOOD && + fsp->xfer_len != expected_len)) { + if (fsp->xfer_len < expected_len) { + /* + * Some data may be queued locally, + * Wait a at least one jiffy to see if it is delivered. + * If this expires without data, we may do SRR. + */ + if (fsp->lp->qfull) { + FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n", + fsp->rport->port_id); + return; + } + FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun " + "len %x, data len %x\n", + fsp->rport->port_id, + fsp->xfer_len, expected_len, fsp->data_len); + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); + return; + } + fsp->status_code = FC_DATA_OVRRUN; + FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, " + "len %x, data len %x\n", + fsp->rport->port_id, + fsp->xfer_len, expected_len, fsp->data_len); + } + fc_fcp_complete_locked(fsp); + return; + +len_err: + FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u " + "snsl %u\n", flags, fr_len(fp), respl, snsl); +err: + fsp->status_code = FC_ERROR; + fc_fcp_complete_locked(fsp); +} + +/** + * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the + * fcp_pkt lock held + * @fsp: The FCP packet to be completed + * + * This function may sleep if a timer is pending. The packet lock must be + * held, and the host lock must not be held. + */ +static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) +{ + struct fc_lport *lport = fsp->lp; + struct fc_seq *seq; + struct fc_exch *ep; + u32 f_ctl; + + if (fsp->state & FC_SRB_ABORT_PENDING) + return; + + if (fsp->state & FC_SRB_ABORTED) { + if (!fsp->status_code) + fsp->status_code = FC_CMD_ABORTED; + } else { + /* + * Test for transport underrun, independent of response + * underrun status. + */ + if (fsp->cdb_status == SAM_STAT_GOOD && + fsp->xfer_len < fsp->data_len && !fsp->io_status && + (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || + fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { + FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n", + fsp->xfer_len, fsp->data_len); + fsp->status_code = FC_DATA_UNDRUN; + } + } + + seq = fsp->seq_ptr; + if (seq) { + fsp->seq_ptr = NULL; + if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) { + struct fc_frame *conf_frame; + struct fc_seq *csp; + + csp = fc_seq_start_next(seq); + conf_frame = fc_fcp_frame_alloc(fsp->lp, 0); + if (conf_frame) { + f_ctl = FC_FC_SEQ_INIT; + f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; + ep = fc_seq_exch(seq); + fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, + ep->did, ep->sid, + FC_TYPE_FCP, f_ctl, 0); + fc_seq_send(lport, csp, conf_frame); + } + } + fc_exch_done(seq); + } + /* + * Some resets driven by SCSI are not I/Os and do not have + * SCSI commands associated with the requests. We should not + * call I/O completion if we do not have a SCSI command. + */ + if (fsp->cmd) + fc_io_compl(fsp); +} + +/** + * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt + * @fsp: The FCP packet whose exchanges should be canceled + * @error: The reason for the cancellation + */ +static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) +{ + if (fsp->seq_ptr) { + fc_exch_done(fsp->seq_ptr); + fsp->seq_ptr = NULL; + } + fsp->status_code = error; +} + +/** + * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port + * @lport: The local port whose exchanges should be canceled + * @id: The target's ID + * @lun: The LUN + * @error: The reason for cancellation + * + * If lun or id is -1, they are ignored. + */ +static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, + unsigned int lun, int error) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + struct fc_fcp_pkt *fsp; + struct scsi_cmnd *sc_cmd; + unsigned long flags; + + spin_lock_irqsave(&si->scsi_queue_lock, flags); +restart: + list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { + sc_cmd = fsp->cmd; + if (id != -1 && scmd_id(sc_cmd) != id) + continue; + + if (lun != -1 && sc_cmd->device->lun != lun) + continue; + + fc_fcp_pkt_hold(fsp); + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); + + spin_lock_bh(&fsp->scsi_pkt_lock); + if (!(fsp->state & FC_SRB_COMPL)) { + fsp->state |= FC_SRB_COMPL; + /* + * TODO: dropping scsi_pkt_lock and then reacquiring + * again around fc_fcp_cleanup_cmd() is required, + * since fc_fcp_cleanup_cmd() calls into + * fc_seq_set_resp() and that func preempts cpu using + * schedule. May be schedule and related code should be + * removed instead of unlocking here to avoid scheduling + * while atomic bug. + */ + spin_unlock_bh(&fsp->scsi_pkt_lock); + + fc_fcp_cleanup_cmd(fsp, error); + + spin_lock_bh(&fsp->scsi_pkt_lock); + fc_io_compl(fsp); + } + spin_unlock_bh(&fsp->scsi_pkt_lock); + + fc_fcp_pkt_release(fsp); + spin_lock_irqsave(&si->scsi_queue_lock, flags); + /* + * while we dropped the lock multiple pkts could + * have been released, so we have to start over. + */ + goto restart; + } + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); +} + +/** + * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port + * @lport: The local port whose exchanges are to be aborted + */ +static void fc_fcp_abort_io(struct fc_lport *lport) +{ + fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR); +} + +/** + * fc_fcp_pkt_send() - Send a fcp_pkt + * @lport: The local port to send the FCP packet on + * @fsp: The FCP packet to send + * + * Return: Zero for success and -1 for failure + * Locks: Called without locks held + */ +static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + unsigned long flags; + int rc; + + libfc_priv(fsp->cmd)->fsp = fsp; + fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); + fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; + + int_to_scsilun(fsp->cmd->device->lun, &fsp->cdb_cmd.fc_lun); + memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); + + spin_lock_irqsave(&si->scsi_queue_lock, flags); + list_add_tail(&fsp->list, &si->scsi_pkt_queue); + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); + rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); + if (unlikely(rc)) { + spin_lock_irqsave(&si->scsi_queue_lock, flags); + libfc_priv(fsp->cmd)->fsp = NULL; + list_del(&fsp->list); + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); + } + + return rc; +} + +/** + * fc_fcp_cmd_send() - Send a FCP command + * @lport: The local port to send the command on + * @fsp: The FCP packet the command is on + * @resp: The handler for the response + */ +static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg)) +{ + struct fc_frame *fp; + struct fc_seq *seq; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rpriv; + const size_t len = sizeof(fsp->cdb_cmd); + int rc = 0; + + if (fc_fcp_lock_pkt(fsp)) + return 0; + + fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd)); + if (!fp) { + rc = -1; + goto unlock; + } + + memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); + fr_fsp(fp) = fsp; + rport = fsp->rport; + fsp->max_payload = rport->maxframe_size; + rpriv = rport->dd_data; + + fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, + rpriv->local_port->port_id, FC_TYPE_FCP, + FC_FCTL_REQ, 0); + + seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0); + if (!seq) { + rc = -1; + goto unlock; + } + fsp->seq_ptr = seq; + fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ + + fsp->timer.function = fc_fcp_timeout; + if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); + +unlock: + fc_fcp_unlock_pkt(fsp); + return rc; +} + +/** + * fc_fcp_error() - Handler for FCP layer errors + * @fsp: The FCP packet the error is on + * @fp: The frame that has errored + */ +static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) +{ + int error = PTR_ERR(fp); + + if (fc_fcp_lock_pkt(fsp)) + return; + + if (error == -FC_EX_CLOSED) { + fc_fcp_retry_cmd(fsp, FC_ERROR); + goto unlock; + } + + /* + * clear abort pending, because the lower layer + * decided to force completion. + */ + fsp->state &= ~FC_SRB_ABORT_PENDING; + fsp->status_code = FC_CMD_PLOGO; + fc_fcp_complete_locked(fsp); +unlock: + fc_fcp_unlock_pkt(fsp); +} + +/** + * fc_fcp_pkt_abort() - Abort a fcp_pkt + * @fsp: The FCP packet to abort on + * + * Called to send an abort and then wait for abort completion + */ +static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) +{ + int rc = FAILED; + unsigned long ticks_left; + + FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state); + if (fc_fcp_send_abort(fsp)) { + FC_FCP_DBG(fsp, "failed to send abort\n"); + return FAILED; + } + + if (fsp->state & FC_SRB_ABORTED) { + FC_FCP_DBG(fsp, "target abort cmd completed\n"); + return SUCCESS; + } + + init_completion(&fsp->tm_done); + fsp->wait_for_comp = 1; + + spin_unlock_bh(&fsp->scsi_pkt_lock); + ticks_left = wait_for_completion_timeout(&fsp->tm_done, + FC_SCSI_TM_TOV); + spin_lock_bh(&fsp->scsi_pkt_lock); + fsp->wait_for_comp = 0; + + if (!ticks_left) { + FC_FCP_DBG(fsp, "target abort cmd failed\n"); + } else if (fsp->state & FC_SRB_ABORTED) { + FC_FCP_DBG(fsp, "target abort cmd passed\n"); + rc = SUCCESS; + fc_fcp_complete_locked(fsp); + } + + return rc; +} + +/** + * fc_lun_reset_send() - Send LUN reset command + * @t: Timer context used to fetch the FSP packet + */ +static void fc_lun_reset_send(struct timer_list *t) +{ + struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); + struct fc_lport *lport = fsp->lp; + + if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { + if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) + return; + if (fc_fcp_lock_pkt(fsp)) + return; + fsp->timer.function = fc_lun_reset_send; + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); + fc_fcp_unlock_pkt(fsp); + } +} + +/** + * fc_lun_reset() - Send a LUN RESET command to a device + * and wait for the reply + * @lport: The local port to sent the command on + * @fsp: The FCP packet that identifies the LUN to be reset + * @id: The SCSI command ID + * @lun: The LUN ID to be reset + */ +static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, + unsigned int id, unsigned int lun) +{ + int rc; + + fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); + fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; + int_to_scsilun(lun, &fsp->cdb_cmd.fc_lun); + + fsp->wait_for_comp = 1; + init_completion(&fsp->tm_done); + + fc_lun_reset_send(&fsp->timer); + + /* + * wait for completion of reset + * after that make sure all commands are terminated + */ + rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); + + spin_lock_bh(&fsp->scsi_pkt_lock); + fsp->state |= FC_SRB_COMPL; + spin_unlock_bh(&fsp->scsi_pkt_lock); + + del_timer_sync(&fsp->timer); + + spin_lock_bh(&fsp->scsi_pkt_lock); + if (fsp->seq_ptr) { + fc_exch_done(fsp->seq_ptr); + fsp->seq_ptr = NULL; + } + fsp->wait_for_comp = 0; + spin_unlock_bh(&fsp->scsi_pkt_lock); + + if (!rc) { + FC_SCSI_DBG(lport, "lun reset failed\n"); + return FAILED; + } + + /* cdb_status holds the tmf's rsp code */ + if (fsp->cdb_status != FCP_TMF_CMPL) + return FAILED; + + FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun); + fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED); + return SUCCESS; +} + +/** + * fc_tm_done() - Task Management response handler + * @seq: The sequence that the response is on + * @fp: The response frame + * @arg: The FCP packet the response is for + */ +static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fc_fcp_pkt *fsp = arg; + struct fc_frame_header *fh; + + if (IS_ERR(fp)) { + /* + * If there is an error just let it timeout or wait + * for TMF to be aborted if it timedout. + * + * scsi-eh will escalate for when either happens. + */ + return; + } + + if (fc_fcp_lock_pkt(fsp)) + goto out; + + /* + * raced with eh timeout handler. + */ + if (!fsp->seq_ptr || !fsp->wait_for_comp) + goto out_unlock; + + fh = fc_frame_header_get(fp); + if (fh->fh_type != FC_TYPE_BLS) + fc_fcp_resp(fsp, fp); + fsp->seq_ptr = NULL; + fc_exch_done(seq); +out_unlock: + fc_fcp_unlock_pkt(fsp); +out: + fc_frame_free(fp); +} + +/** + * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port + * @lport: The local port to be cleaned up + */ +static void fc_fcp_cleanup(struct fc_lport *lport) +{ + fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR); +} + +/** + * fc_fcp_timeout() - Handler for fcp_pkt timeouts + * @t: Timer context used to fetch the FSP packet + * + * If REC is supported then just issue it and return. The REC exchange will + * complete or time out and recovery can continue at that point. Otherwise, + * if the response has been received without all the data it has been + * ER_TIMEOUT since the response was received. If the response has not been + * received we see if data was received recently. If it has been then we + * continue waiting, otherwise, we abort the command. + */ +static void fc_fcp_timeout(struct timer_list *t) +{ + struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); + struct fc_rport *rport = fsp->rport; + struct fc_rport_libfc_priv *rpriv = rport->dd_data; + + if (fc_fcp_lock_pkt(fsp)) + return; + + if (fsp->cdb_cmd.fc_tm_flags) + goto unlock; + + if (fsp->lp->qfull) { + FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n", + fsp->timer_delay); + fsp->timer.function = fc_fcp_timeout; + fc_fcp_timer_set(fsp, fsp->timer_delay); + goto unlock; + } + FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n", + fsp->timer_delay, rpriv->flags, fsp->state); + fsp->state |= FC_SRB_FCP_PROCESSING_TMO; + + if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) + fc_fcp_rec(fsp); + else if (fsp->state & FC_SRB_RCV_STATUS) + fc_fcp_complete_locked(fsp); + else + fc_fcp_recovery(fsp, FC_TIMED_OUT); + fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; +unlock: + fc_fcp_unlock_pkt(fsp); +} + +/** + * fc_fcp_rec() - Send a REC ELS request + * @fsp: The FCP packet to send the REC request on + */ +static void fc_fcp_rec(struct fc_fcp_pkt *fsp) +{ + struct fc_lport *lport; + struct fc_frame *fp; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rpriv; + + lport = fsp->lp; + rport = fsp->rport; + rpriv = rport->dd_data; + if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) { + fsp->status_code = FC_HRD_ERROR; + fsp->io_status = 0; + fc_fcp_complete_locked(fsp); + return; + } + + fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); + if (!fp) + goto retry; + + fr_seq(fp) = fsp->seq_ptr; + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, + rpriv->local_port->port_id, FC_TYPE_ELS, + FC_FCTL_REQ, 0); + if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, + fc_fcp_rec_resp, fsp, + 2 * lport->r_a_tov)) { + fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ + return; + } +retry: + if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); + else + fc_fcp_recovery(fsp, FC_TIMED_OUT); +} + +/** + * fc_fcp_rec_resp() - Handler for REC ELS responses + * @seq: The sequence the response is on + * @fp: The response frame + * @arg: The FCP packet the response is on + * + * If the response is a reject then the scsi layer will handle + * the timeout. If the response is a LS_ACC then if the I/O was not completed + * set the timeout and return. If the I/O was completed then complete the + * exchange and tell the SCSI layer. + */ +static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; + struct fc_els_rec_acc *recp; + struct fc_els_ls_rjt *rjt; + u32 e_stat; + u8 opcode; + u32 offset; + enum dma_data_direction data_dir; + enum fc_rctl r_ctl; + struct fc_rport_libfc_priv *rpriv; + + if (IS_ERR(fp)) { + fc_fcp_rec_error(fsp, fp); + return; + } + + if (fc_fcp_lock_pkt(fsp)) + goto out; + + fsp->recov_retry = 0; + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + switch (rjt->er_reason) { + default: + FC_FCP_DBG(fsp, + "device %x invalid REC reject %d/%d\n", + fsp->rport->port_id, rjt->er_reason, + rjt->er_explan); + fallthrough; + case ELS_RJT_UNSUP: + FC_FCP_DBG(fsp, "device does not support REC\n"); + rpriv = fsp->rport->dd_data; + /* + * if we do not spport RECs or got some bogus + * reason then resetup timer so we check for + * making progress. + */ + rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; + break; + case ELS_RJT_LOGIC: + case ELS_RJT_UNAB: + FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n", + fsp->rport->port_id, rjt->er_reason, + rjt->er_explan); + /* + * If response got lost or is stuck in the + * queue somewhere we have no idea if and when + * the response will be received. So quarantine + * the xid and retry the command. + */ + if (rjt->er_explan == ELS_EXPL_OXID_RXID) { + struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); + ep->state |= FC_EX_QUARANTINE; + fsp->state |= FC_SRB_ABORTED; + fc_fcp_retry_cmd(fsp, FC_TRANS_RESET); + break; + } + fc_fcp_recovery(fsp, FC_TRANS_RESET); + break; + } + } else if (opcode == ELS_LS_ACC) { + if (fsp->state & FC_SRB_ABORTED) + goto unlock_out; + + data_dir = fsp->cmd->sc_data_direction; + recp = fc_frame_payload_get(fp, sizeof(*recp)); + offset = ntohl(recp->reca_fc4value); + e_stat = ntohl(recp->reca_e_stat); + + if (e_stat & ESB_ST_COMPLETE) { + + /* + * The exchange is complete. + * + * For output, we must've lost the response. + * For input, all data must've been sent. + * We lost may have lost the response + * (and a confirmation was requested) and maybe + * some data. + * + * If all data received, send SRR + * asking for response. If partial data received, + * or gaps, SRR requests data at start of gap. + * Recovery via SRR relies on in-order-delivery. + */ + if (data_dir == DMA_TO_DEVICE) { + r_ctl = FC_RCTL_DD_CMD_STATUS; + } else if (fsp->xfer_contig_end == offset) { + r_ctl = FC_RCTL_DD_CMD_STATUS; + } else { + offset = fsp->xfer_contig_end; + r_ctl = FC_RCTL_DD_SOL_DATA; + } + fc_fcp_srr(fsp, r_ctl, offset); + } else if (e_stat & ESB_ST_SEQ_INIT) { + /* + * The remote port has the initiative, so just + * keep waiting for it to complete. + */ + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); + } else { + + /* + * The exchange is incomplete, we have seq. initiative. + * Lost response with requested confirmation, + * lost confirmation, lost transfer ready or + * lost write data. + * + * For output, if not all data was received, ask + * for transfer ready to be repeated. + * + * If we received or sent all the data, send SRR to + * request response. + * + * If we lost a response, we may have lost some read + * data as well. + */ + r_ctl = FC_RCTL_DD_SOL_DATA; + if (data_dir == DMA_TO_DEVICE) { + r_ctl = FC_RCTL_DD_CMD_STATUS; + if (offset < fsp->data_len) + r_ctl = FC_RCTL_DD_DATA_DESC; + } else if (offset == fsp->xfer_contig_end) { + r_ctl = FC_RCTL_DD_CMD_STATUS; + } else if (fsp->xfer_contig_end < offset) { + offset = fsp->xfer_contig_end; + } + fc_fcp_srr(fsp, r_ctl, offset); + } + } +unlock_out: + fc_fcp_unlock_pkt(fsp); +out: + fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ + fc_frame_free(fp); +} + +/** + * fc_fcp_rec_error() - Handler for REC errors + * @fsp: The FCP packet the error is on + * @fp: The REC frame + */ +static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) +{ + int error = PTR_ERR(fp); + + if (fc_fcp_lock_pkt(fsp)) + goto out; + + switch (error) { + case -FC_EX_CLOSED: + FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n", + fsp, fsp->rport->port_id); + fc_fcp_retry_cmd(fsp, FC_ERROR); + break; + + default: + FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n", + fsp, fsp->rport->port_id, error); + fsp->status_code = FC_CMD_PLOGO; + fallthrough; + + case -FC_EX_TIMEOUT: + /* + * Assume REC or LS_ACC was lost. + * The exchange manager will have aborted REC, so retry. + */ + FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n", + fsp, fsp->rport->port_id, fsp->recov_retry, + FC_MAX_RECOV_RETRY); + if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) + fc_fcp_rec(fsp); + else + fc_fcp_recovery(fsp, FC_ERROR); + break; + } + fc_fcp_unlock_pkt(fsp); +out: + fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ +} + +/** + * fc_fcp_recovery() - Handler for fcp_pkt recovery + * @fsp: The FCP pkt that needs to be aborted + * @code: The FCP status code to set + */ +static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code) +{ + FC_FCP_DBG(fsp, "start recovery code %x\n", code); + fsp->status_code = code; + fsp->cdb_status = 0; + fsp->io_status = 0; + /* + * if this fails then we let the scsi command timer fire and + * scsi-ml escalate. + */ + fc_fcp_send_abort(fsp); +} + +/** + * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request) + * @fsp: The FCP packet the SRR is to be sent on + * @r_ctl: The R_CTL field for the SRR request + * @offset: The SRR relative offset + * This is called after receiving status but insufficient data, or + * when expecting status but the request has timed out. + */ +static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) +{ + struct fc_lport *lport = fsp->lp; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rpriv; + struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); + struct fc_seq *seq; + struct fcp_srr *srr; + struct fc_frame *fp; + + rport = fsp->rport; + rpriv = rport->dd_data; + + if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || + rpriv->rp_state != RPORT_ST_READY) + goto retry; /* shouldn't happen */ + fp = fc_fcp_frame_alloc(lport, sizeof(*srr)); + if (!fp) + goto retry; + + srr = fc_frame_payload_get(fp, sizeof(*srr)); + memset(srr, 0, sizeof(*srr)); + srr->srr_op = ELS_SRR; + srr->srr_ox_id = htons(ep->oxid); + srr->srr_rx_id = htons(ep->rxid); + srr->srr_r_ctl = r_ctl; + srr->srr_rel_off = htonl(offset); + + fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, + rpriv->local_port->port_id, FC_TYPE_FCP, + FC_FCTL_REQ, 0); + + seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp, + fc_fcp_pkt_destroy, + fsp, get_fsp_rec_tov(fsp)); + if (!seq) + goto retry; + + fsp->recov_seq = seq; + fsp->xfer_len = offset; + fsp->xfer_contig_end = offset; + fsp->state &= ~FC_SRB_RCV_STATUS; + fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ + return; +retry: + fc_fcp_retry_cmd(fsp, FC_TRANS_RESET); +} + +/** + * fc_fcp_srr_resp() - Handler for SRR response + * @seq: The sequence the SRR is on + * @fp: The SRR frame + * @arg: The FCP packet the SRR is on + */ +static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fc_fcp_pkt *fsp = arg; + struct fc_frame_header *fh; + + if (IS_ERR(fp)) { + fc_fcp_srr_error(fsp, fp); + return; + } + + if (fc_fcp_lock_pkt(fsp)) + goto out; + + fh = fc_frame_header_get(fp); + /* + * BUG? fc_fcp_srr_error calls fc_exch_done which would release + * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, + * then fc_exch_timeout would be sending an abort. The fc_exch_done + * call by fc_fcp_srr_error would prevent fc_exch.c from seeing + * an abort response though. + */ + if (fh->fh_type == FC_TYPE_BLS) { + fc_fcp_unlock_pkt(fsp); + return; + } + + switch (fc_frame_payload_op(fp)) { + case ELS_LS_ACC: + fsp->recov_retry = 0; + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); + break; + case ELS_LS_RJT: + default: + fc_fcp_recovery(fsp, FC_ERROR); + break; + } + fc_fcp_unlock_pkt(fsp); +out: + fc_exch_done(seq); + fc_frame_free(fp); +} + +/** + * fc_fcp_srr_error() - Handler for SRR errors + * @fsp: The FCP packet that the SRR error is on + * @fp: The SRR frame + */ +static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) +{ + if (fc_fcp_lock_pkt(fsp)) + goto out; + switch (PTR_ERR(fp)) { + case -FC_EX_TIMEOUT: + FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry); + if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) + fc_fcp_rec(fsp); + else + fc_fcp_recovery(fsp, FC_TIMED_OUT); + break; + case -FC_EX_CLOSED: /* e.g., link failure */ + FC_FCP_DBG(fsp, "SRR error, exchange closed\n"); + fallthrough; + default: + fc_fcp_retry_cmd(fsp, FC_ERROR); + break; + } + fc_fcp_unlock_pkt(fsp); +out: + fc_exch_done(fsp->recov_seq); +} + +/** + * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready + * @lport: The local port to be checked + */ +static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) +{ + /* lock ? */ + return (lport->state == LPORT_ST_READY) && + lport->link_up && !lport->qfull; +} + +/** + * fc_queuecommand() - The queuecommand function of the SCSI template + * @shost: The Scsi_Host that the command was issued to + * @sc_cmd: The scsi_cmnd to be executed + * + * This is the i/o strategy routine, called by the SCSI layer. + */ +int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(shost); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_fcp_pkt *fsp; + int rval; + int rc = 0; + + rval = fc_remote_port_chkready(rport); + if (rval) { + sc_cmd->result = rval; + scsi_done(sc_cmd); + return 0; + } + + if (!*(struct fc_remote_port **)rport->dd_data) { + /* + * rport is transitioning from blocked/deleted to + * online + */ + sc_cmd->result = DID_IMM_RETRY << 16; + scsi_done(sc_cmd); + goto out; + } + + if (!fc_fcp_lport_queue_ready(lport)) { + if (lport->qfull) { + if (fc_fcp_can_queue_ramp_down(lport)) + shost_printk(KERN_ERR, lport->host, + "libfc: queue full, " + "reducing can_queue to %d.\n", + lport->host->can_queue); + } + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC); + if (fsp == NULL) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* + * build the libfc request pkt + */ + fsp->cmd = sc_cmd; /* save the cmd */ + fsp->rport = rport; /* set the remote port ptr */ + + /* + * set up the transfer length + */ + fsp->data_len = scsi_bufflen(sc_cmd); + fsp->xfer_len = 0; + + /* + * setup the data direction + */ + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + fsp->req_flags = FC_SRB_READ; + this_cpu_inc(lport->stats->InputRequests); + this_cpu_add(lport->stats->InputBytes, fsp->data_len); + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + fsp->req_flags = FC_SRB_WRITE; + this_cpu_inc(lport->stats->OutputRequests); + this_cpu_add(lport->stats->OutputBytes, fsp->data_len); + } else { + fsp->req_flags = 0; + this_cpu_inc(lport->stats->ControlRequests); + } + + /* + * send it to the lower layer + * if we get -1 return then put the request in the pending + * queue. + */ + rval = fc_fcp_pkt_send(lport, fsp); + if (rval != 0) { + fsp->state = FC_SRB_FREE; + fc_fcp_pkt_release(fsp); + rc = SCSI_MLQUEUE_HOST_BUSY; + } +out: + return rc; +} +EXPORT_SYMBOL(fc_queuecommand); + +/** + * fc_io_compl() - Handle responses for completed commands + * @fsp: The FCP packet that is complete + * + * Translates fcp_pkt errors to a Linux SCSI errors. + * The fcp packet lock must be held when calling. + */ +static void fc_io_compl(struct fc_fcp_pkt *fsp) +{ + struct fc_fcp_internal *si; + struct scsi_cmnd *sc_cmd; + struct fc_lport *lport; + unsigned long flags; + + /* release outstanding ddp context */ + fc_fcp_ddp_done(fsp); + + fsp->state |= FC_SRB_COMPL; + if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { + spin_unlock_bh(&fsp->scsi_pkt_lock); + del_timer_sync(&fsp->timer); + spin_lock_bh(&fsp->scsi_pkt_lock); + } + + lport = fsp->lp; + si = fc_get_scsi_internal(lport); + + /* + * if can_queue ramp down is done then try can_queue ramp up + * since commands are completing now. + */ + if (si->last_can_queue_ramp_down_time) + fc_fcp_can_queue_ramp_up(lport); + + sc_cmd = fsp->cmd; + libfc_priv(sc_cmd)->status = fsp->cdb_status; + switch (fsp->status_code) { + case FC_COMPLETE: + if (fsp->cdb_status == 0) { + /* + * good I/O status + */ + sc_cmd->result = DID_OK << 16; + if (fsp->scsi_resid) + libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid; + } else { + /* + * transport level I/O was ok but scsi + * has non zero status + */ + sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; + } + break; + case FC_ERROR: + FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " + "due to FC_ERROR\n"); + sc_cmd->result = DID_ERROR << 16; + break; + case FC_DATA_UNDRUN: + if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) { + /* + * scsi status is good but transport level + * underrun. + */ + if (fsp->state & FC_SRB_RCV_STATUS) { + sc_cmd->result = DID_OK << 16; + } else { + FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml" + " due to FC_DATA_UNDRUN (trans)\n"); + sc_cmd->result = DID_ERROR << 16; + } + } else { + /* + * scsi got underrun, this is an error + */ + FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " + "due to FC_DATA_UNDRUN (scsi)\n"); + libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid; + sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; + } + break; + case FC_DATA_OVRRUN: + /* + * overrun is an error + */ + FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " + "due to FC_DATA_OVRRUN\n"); + sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; + break; + case FC_CMD_ABORTED: + if (host_byte(sc_cmd->result) == DID_TIME_OUT) + FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml " + "due to FC_CMD_ABORTED\n"); + else { + FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " + "due to FC_CMD_ABORTED\n"); + set_host_byte(sc_cmd, DID_ERROR); + } + sc_cmd->result |= fsp->io_status; + break; + case FC_CMD_RESET: + FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml " + "due to FC_CMD_RESET\n"); + sc_cmd->result = (DID_RESET << 16); + break; + case FC_TRANS_RESET: + FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml " + "due to FC_TRANS_RESET\n"); + sc_cmd->result = (DID_SOFT_ERROR << 16); + break; + case FC_HRD_ERROR: + FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml " + "due to FC_HRD_ERROR\n"); + sc_cmd->result = (DID_NO_CONNECT << 16); + break; + case FC_CRC_ERROR: + FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml " + "due to FC_CRC_ERROR\n"); + sc_cmd->result = (DID_PARITY << 16); + break; + case FC_TIMED_OUT: + FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml " + "due to FC_TIMED_OUT\n"); + sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; + break; + default: + FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " + "due to unknown error\n"); + sc_cmd->result = (DID_ERROR << 16); + break; + } + + if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) + sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16); + + spin_lock_irqsave(&si->scsi_queue_lock, flags); + list_del(&fsp->list); + libfc_priv(sc_cmd)->fsp = NULL; + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); + scsi_done(sc_cmd); + + /* release ref from initial allocation in queue command */ + fc_fcp_pkt_release(fsp); +} + +/** + * fc_eh_abort() - Abort a command + * @sc_cmd: The SCSI command to abort + * + * From SCSI host template. + * Send an ABTS to the target device and wait for the response. + */ +int fc_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_fcp_pkt *fsp; + struct fc_lport *lport; + struct fc_fcp_internal *si; + int rc = FAILED; + unsigned long flags; + int rval; + + rval = fc_block_scsi_eh(sc_cmd); + if (rval) + return rval; + + lport = shost_priv(sc_cmd->device->host); + if (lport->state != LPORT_ST_READY) + return rc; + else if (!lport->link_up) + return rc; + + si = fc_get_scsi_internal(lport); + spin_lock_irqsave(&si->scsi_queue_lock, flags); + fsp = libfc_priv(sc_cmd)->fsp; + if (!fsp) { + /* command completed while scsi eh was setting up */ + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); + return SUCCESS; + } + /* grab a ref so the fsp and sc_cmd cannot be released from under us */ + fc_fcp_pkt_hold(fsp); + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); + + if (fc_fcp_lock_pkt(fsp)) { + /* completed while we were waiting for timer to be deleted */ + rc = SUCCESS; + goto release_pkt; + } + + rc = fc_fcp_pkt_abort(fsp); + fc_fcp_unlock_pkt(fsp); + +release_pkt: + fc_fcp_pkt_release(fsp); + return rc; +} +EXPORT_SYMBOL(fc_eh_abort); + +/** + * fc_eh_device_reset() - Reset a single LUN + * @sc_cmd: The SCSI command which identifies the device whose + * LUN is to be reset + * + * Set from SCSI host template. + */ +int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport; + struct fc_fcp_pkt *fsp; + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + int rc = FAILED; + int rval; + + rval = fc_block_scsi_eh(sc_cmd); + if (rval) + return rval; + + lport = shost_priv(sc_cmd->device->host); + + if (lport->state != LPORT_ST_READY) + return rc; + + FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id); + + fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); + if (fsp == NULL) { + printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); + goto out; + } + + /* + * Build the libfc request pkt. Do not set the scsi cmnd, because + * the sc passed in is not setup for execution like when sent + * through the queuecommand callout. + */ + fsp->rport = rport; /* set the remote port ptr */ + + /* + * flush outstanding commands + */ + rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); + fsp->state = FC_SRB_FREE; + fc_fcp_pkt_release(fsp); + +out: + return rc; +} +EXPORT_SYMBOL(fc_eh_device_reset); + +/** + * fc_eh_host_reset() - Reset a Scsi_Host. + * @sc_cmd: The SCSI command that identifies the SCSI host to be reset + */ +int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) +{ + struct Scsi_Host *shost = sc_cmd->device->host; + struct fc_lport *lport = shost_priv(shost); + unsigned long wait_tmo; + + FC_SCSI_DBG(lport, "Resetting host\n"); + + fc_lport_reset(lport); + wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; + while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, + wait_tmo)) + msleep(1000); + + if (fc_fcp_lport_queue_ready(lport)) { + shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " + "on port (%6.6x)\n", lport->port_id); + return SUCCESS; + } else { + shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " + "port (%6.6x) is not ready.\n", + lport->port_id); + return FAILED; + } +} +EXPORT_SYMBOL(fc_eh_host_reset); + +/** + * fc_slave_alloc() - Configure the queue depth of a Scsi_Host + * @sdev: The SCSI device that identifies the SCSI host + * + * Configures queue depth based on host's cmd_per_len. If not set + * then we use the libfc default. + */ +int fc_slave_alloc(struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH); + return 0; +} +EXPORT_SYMBOL(fc_slave_alloc); + +/** + * fc_fcp_destroy() - Tear down the FCP layer for a given local port + * @lport: The local port that no longer needs the FCP layer + */ +void fc_fcp_destroy(struct fc_lport *lport) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + + if (!list_empty(&si->scsi_pkt_queue)) + printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " + "port (%6.6x)\n", lport->port_id); + + mempool_destroy(si->scsi_pkt_pool); + kfree(si); + lport->scsi_priv = NULL; +} +EXPORT_SYMBOL(fc_fcp_destroy); + +int fc_setup_fcp(void) +{ + int rc = 0; + + scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", + sizeof(struct fc_fcp_pkt), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!scsi_pkt_cachep) { + printk(KERN_ERR "libfc: Unable to allocate SRB cache, " + "module load failed!"); + rc = -ENOMEM; + } + + return rc; +} + +void fc_destroy_fcp(void) +{ + kmem_cache_destroy(scsi_pkt_cachep); +} + +/** + * fc_fcp_init() - Initialize the FCP layer for a local port + * @lport: The local port to initialize the exchange layer for + */ +int fc_fcp_init(struct fc_lport *lport) +{ + int rc; + struct fc_fcp_internal *si; + + if (!lport->tt.fcp_cmd_send) + lport->tt.fcp_cmd_send = fc_fcp_cmd_send; + + if (!lport->tt.fcp_cleanup) + lport->tt.fcp_cleanup = fc_fcp_cleanup; + + if (!lport->tt.fcp_abort_io) + lport->tt.fcp_abort_io = fc_fcp_abort_io; + + si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); + if (!si) + return -ENOMEM; + lport->scsi_priv = si; + si->max_can_queue = lport->host->can_queue; + INIT_LIST_HEAD(&si->scsi_pkt_queue); + spin_lock_init(&si->scsi_queue_lock); + + si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); + if (!si->scsi_pkt_pool) { + rc = -ENOMEM; + goto free_internal; + } + return 0; + +free_internal: + kfree(si); + return rc; +} +EXPORT_SYMBOL(fc_fcp_init); diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c new file mode 100644 index 000000000..f3aefb2de --- /dev/null +++ b/drivers/scsi/libfc/fc_frame.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2007 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * Frame allocation. + */ +#include +#include +#include +#include +#include + +#include + +/* + * Check the CRC in a frame. + */ +u32 fc_frame_crc_check(struct fc_frame *fp) +{ + u32 crc; + u32 error; + const u8 *bp; + unsigned int len; + + WARN_ON(!fc_frame_is_linear(fp)); + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */ + bp = (const u8 *) fr_hdr(fp); + crc = ~crc32(~0, bp, len); + error = crc ^ fr_crc(fp); + return error; +} +EXPORT_SYMBOL(fc_frame_crc_check); + +/* + * Allocate a frame intended to be sent. + * Get an sk_buff for the frame and set the length. + */ +struct fc_frame *_fc_frame_alloc(size_t len) +{ + struct fc_frame *fp; + struct sk_buff *skb; + + WARN_ON((len % sizeof(u32)) != 0); + len += sizeof(struct fc_frame_header); + skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + + NET_SKB_PAD, GFP_ATOMIC); + if (!skb) + return NULL; + skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); + fp = (struct fc_frame *) skb; + fc_frame_init(fp); + skb_put(skb, len); + return fp; +} +EXPORT_SYMBOL(_fc_frame_alloc); + +struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) +{ + struct fc_frame *fp; + size_t fill; + + fill = payload_len % 4; + if (fill != 0) + fill = 4 - fill; + fp = _fc_frame_alloc(payload_len + fill); + if (fp) { + memset((char *) fr_hdr(fp) + payload_len, 0, fill); + /* trim is OK, we just allocated it so there are no fragments */ + skb_trim(fp_skb(fp), + payload_len + sizeof(struct fc_frame_header)); + } + return fp; +} +EXPORT_SYMBOL(fc_frame_alloc_fill); diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c new file mode 100644 index 000000000..0e6a1355d --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include + +#include + +#include "fc_encode.h" +#include "fc_libfc.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("libfc"); +MODULE_LICENSE("GPL v2"); + +unsigned int fc_debug_logging; +module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +DEFINE_MUTEX(fc_prov_mutex); +static LIST_HEAD(fc_local_ports); +struct blocking_notifier_head fc_lport_notifier_head = + BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head); +EXPORT_SYMBOL(fc_lport_notifier_head); + +/* + * Providers which primarily send requests and PRLIs. + */ +struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = { + [0] = &fc_rport_t0_prov, + [FC_TYPE_FCP] = &fc_rport_fcp_init, +}; + +/* + * Providers which receive requests. + */ +struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = { + [FC_TYPE_ELS] = &fc_lport_els_prov, +}; + +/** + * libfc_init() - Initialize libfc.ko + */ +static int __init libfc_init(void) +{ + int rc = 0; + + rc = fc_setup_fcp(); + if (rc) + return rc; + + rc = fc_setup_exch_mgr(); + if (rc) + goto destroy_pkt_cache; + + rc = fc_setup_rport(); + if (rc) + goto destroy_em; + + return rc; +destroy_em: + fc_destroy_exch_mgr(); +destroy_pkt_cache: + fc_destroy_fcp(); + return rc; +} +module_init(libfc_init); + +/** + * libfc_exit() - Tear down libfc.ko + */ +static void __exit libfc_exit(void) +{ + fc_destroy_fcp(); + fc_destroy_exch_mgr(); + fc_destroy_rport(); +} +module_exit(libfc_exit); + +/** + * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer + * into a scatter-gather list (SG list). + * + * @buf: pointer to the data buffer. + * @len: the byte-length of the data buffer. + * @sg: pointer to the pointer of the SG list. + * @nents: pointer to the remaining number of entries in the SG list. + * @offset: pointer to the current offset in the SG list. + * @crc: pointer to the 32-bit crc value. + * If crc is NULL, CRC is not calculated. + */ +u32 fc_copy_buffer_to_sglist(void *buf, size_t len, + struct scatterlist *sg, + u32 *nents, size_t *offset, + u32 *crc) +{ + size_t remaining = len; + u32 copy_len = 0; + + while (remaining > 0 && sg) { + size_t off, sg_bytes; + void *page_addr; + + if (*offset >= sg->length) { + /* + * Check for end and drop resources + * from the last iteration. + */ + if (!(*nents)) + break; + --(*nents); + *offset -= sg->length; + sg = sg_next(sg); + continue; + } + sg_bytes = min(remaining, sg->length - *offset); + + /* + * The scatterlist item may be bigger than PAGE_SIZE, + * but we are limited to mapping PAGE_SIZE at a time. + */ + off = *offset + sg->offset; + sg_bytes = min(sg_bytes, + (size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); + page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT)); + if (crc) + *crc = crc32(*crc, buf, sg_bytes); + memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); + kunmap_atomic(page_addr); + buf += sg_bytes; + *offset += sg_bytes; + remaining -= sg_bytes; + copy_len += sg_bytes; + } + return copy_len; +} + +/** + * fc_fill_hdr() - fill FC header fields based on request + * @fp: reply frame containing header to be filled in + * @in_fp: request frame containing header to use in filling in reply + * @r_ctl: R_CTL value for header + * @f_ctl: F_CTL value for header, with 0 pad + * @seq_cnt: sequence count for the header, ignored if frame has a sequence + * @parm_offset: parameter / offset value + */ +void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, + enum fc_rctl r_ctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset) +{ + struct fc_frame_header *fh; + struct fc_frame_header *in_fh; + struct fc_seq *sp; + u32 fill; + + fh = __fc_frame_header_get(fp); + in_fh = __fc_frame_header_get(in_fp); + + if (f_ctl & FC_FC_END_SEQ) { + fill = -fr_len(fp) & 3; + if (fill) { + /* TODO, this may be a problem with fragmented skb */ + skb_put_zero(fp_skb(fp), fill); + f_ctl |= fill; + } + fr_eof(fp) = FC_EOF_T; + } else { + WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ + fr_eof(fp) = FC_EOF_N; + } + + fh->fh_r_ctl = r_ctl; + memcpy(fh->fh_d_id, in_fh->fh_s_id, sizeof(fh->fh_d_id)); + memcpy(fh->fh_s_id, in_fh->fh_d_id, sizeof(fh->fh_s_id)); + fh->fh_type = in_fh->fh_type; + hton24(fh->fh_f_ctl, f_ctl); + fh->fh_ox_id = in_fh->fh_ox_id; + fh->fh_rx_id = in_fh->fh_rx_id; + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = htonl(parm_offset); + + sp = fr_seq(in_fp); + if (sp) { + fr_seq(fp) = sp; + fh->fh_seq_id = sp->id; + seq_cnt = sp->cnt; + } else { + fh->fh_seq_id = 0; + } + fh->fh_seq_cnt = ntohs(seq_cnt); + fr_sof(fp) = seq_cnt ? FC_SOF_N3 : FC_SOF_I3; + fr_encaps(fp) = fr_encaps(in_fp); +} +EXPORT_SYMBOL(fc_fill_hdr); + +/** + * fc_fill_reply_hdr() - fill FC reply header fields based on request + * @fp: reply frame containing header to be filled in + * @in_fp: request frame containing header to use in filling in reply + * @r_ctl: R_CTL value for reply + * @parm_offset: parameter / offset value + */ +void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, + enum fc_rctl r_ctl, u32 parm_offset) +{ + struct fc_seq *sp; + + sp = fr_seq(in_fp); + if (sp) + fr_seq(fp) = fc_seq_start_next(sp); + fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset); +} +EXPORT_SYMBOL(fc_fill_reply_hdr); + +/** + * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport + * if there is service provider (target provider) registered with libfc + * for specified "fc_ft_type" + * @lport: Local port which service_params needs to be modified + * @type: FC-4 type, such as FC_TYPE_FCP + */ +void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type) +{ + struct fc4_prov *prov_entry; + BUG_ON(type >= FC_FC4_PROV_SIZE); + BUG_ON(!lport); + prov_entry = fc_passive_prov[type]; + if (type == FC_TYPE_FCP) { + if (prov_entry && prov_entry->recv) + lport->service_params |= FCP_SPPF_TARG_FCN; + } +} + +void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg) +{ + struct fc_lport *lport; + + mutex_lock(&fc_prov_mutex); + list_for_each_entry(lport, &fc_local_ports, lport_list) + notify(lport, arg); + mutex_unlock(&fc_prov_mutex); +} +EXPORT_SYMBOL(fc_lport_iterate); + +/** + * fc_fc4_register_provider() - register FC-4 upper-level provider. + * @type: FC-4 type, such as FC_TYPE_FCP + * @prov: structure describing provider including ops vector. + * + * Returns 0 on success, negative error otherwise. + */ +int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov) +{ + struct fc4_prov **prov_entry; + int ret = 0; + + if (type >= FC_FC4_PROV_SIZE) + return -EINVAL; + mutex_lock(&fc_prov_mutex); + prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type; + if (*prov_entry) + ret = -EBUSY; + else + *prov_entry = prov; + mutex_unlock(&fc_prov_mutex); + return ret; +} +EXPORT_SYMBOL(fc_fc4_register_provider); + +/** + * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider. + * @type: FC-4 type, such as FC_TYPE_FCP + * @prov: structure describing provider including ops vector. + */ +void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov) +{ + BUG_ON(type >= FC_FC4_PROV_SIZE); + mutex_lock(&fc_prov_mutex); + if (prov->recv) + RCU_INIT_POINTER(fc_passive_prov[type], NULL); + else + RCU_INIT_POINTER(fc_active_prov[type], NULL); + mutex_unlock(&fc_prov_mutex); + synchronize_rcu(); +} +EXPORT_SYMBOL(fc_fc4_deregister_provider); + +/** + * fc_fc4_add_lport() - add new local port to list and run notifiers. + * @lport: The new local port. + */ +void fc_fc4_add_lport(struct fc_lport *lport) +{ + mutex_lock(&fc_prov_mutex); + list_add_tail(&lport->lport_list, &fc_local_ports); + blocking_notifier_call_chain(&fc_lport_notifier_head, + FC_LPORT_EV_ADD, lport); + mutex_unlock(&fc_prov_mutex); +} + +/** + * fc_fc4_del_lport() - remove local port from list and run notifiers. + * @lport: The new local port. + */ +void fc_fc4_del_lport(struct fc_lport *lport) +{ + mutex_lock(&fc_prov_mutex); + list_del(&lport->lport_list); + blocking_notifier_call_chain(&fc_lport_notifier_head, + FC_LPORT_EV_DEL, lport); + mutex_unlock(&fc_prov_mutex); +} diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h new file mode 100644 index 000000000..685e3bdd0 --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _FC_LIBFC_H_ +#define _FC_LIBFC_H_ + +#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */ +#define FC_LPORT_LOGGING 0x02 /* lport layer logging */ +#define FC_DISC_LOGGING 0x04 /* discovery layer logging */ +#define FC_RPORT_LOGGING 0x08 /* rport layer logging */ +#define FC_FCP_LOGGING 0x10 /* I/O path logging */ +#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ +#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ +#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ + +extern unsigned int fc_debug_logging; + +#define FC_CHECK_LOGGING(LEVEL, CMD) \ + do { \ + if (unlikely(fc_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ + } while (0) + +#define FC_LIBFC_DBG(fmt, args...) \ + FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ + pr_info("libfc: " fmt, ##args)) + +#define FC_LPORT_DBG(lport, fmt, args...) \ + FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ + pr_info("host%u: lport %6.6x: " fmt, \ + (lport)->host->host_no, \ + (lport)->port_id, ##args)) + +#define FC_DISC_DBG(disc, fmt, args...) \ + FC_CHECK_LOGGING(FC_DISC_LOGGING, \ + pr_info("host%u: disc: " fmt, \ + fc_disc_lport(disc)->host->host_no, \ + ##args)) + +#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ + FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ + pr_info("host%u: rport %6.6x: " fmt, \ + (lport)->host->host_no, \ + (port_id), ##args)) + +#define FC_RPORT_DBG(rdata, fmt, args...) \ + FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) + +#define FC_FCP_DBG(pkt, fmt, args...) \ + FC_CHECK_LOGGING(FC_FCP_LOGGING, \ + { \ + if ((pkt)->seq_ptr) { \ + struct fc_exch *_ep = NULL; \ + _ep = fc_seq_exch((pkt)->seq_ptr); \ + pr_info("host%u: fcp: %6.6x: " \ + "xid %04x-%04x: " fmt, \ + (pkt)->lp->host->host_no, \ + (pkt)->rport->port_id, \ + (_ep)->oxid, (_ep)->rxid, ##args); \ + } else { \ + pr_info("host%u: fcp: %6.6x: " fmt, \ + (pkt)->lp->host->host_no, \ + (pkt)->rport->port_id, ##args); \ + } \ + }) + +#define FC_EXCH_DBG(exch, fmt, args...) \ + FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ + pr_info("host%u: xid %4x: " fmt, \ + (exch)->lp->host->host_no, \ + exch->xid, ##args)) + +#define FC_SCSI_DBG(lport, fmt, args...) \ + FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ + pr_info("host%u: scsi: " fmt, \ + (lport)->host->host_no, ##args)) + +/* + * FC-4 Providers. + */ +extern struct fc4_prov *fc_active_prov[]; /* providers without recv */ +extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */ +extern struct mutex fc_prov_mutex; /* lock over table changes */ + +extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */ +extern struct fc4_prov fc_lport_els_prov; /* ELS provider */ +extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */ + +/* + * Set up direct-data placement for this I/O request + */ +void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); +void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp); + +/* + * Module setup functions + */ +int fc_setup_exch_mgr(void); +void fc_destroy_exch_mgr(void); +int fc_setup_rport(void); +void fc_destroy_rport(void); +int fc_setup_fcp(void); +void fc_destroy_fcp(void); + +/* + * Internal libfc functions + */ +const char *fc_els_resp_type(struct fc_frame *); +extern void fc_fc4_add_lport(struct fc_lport *); +extern void fc_fc4_del_lport(struct fc_lport *); +extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type); + +/* + * Copies a buffer into an sg list + */ +u32 fc_copy_buffer_to_sglist(void *buf, size_t len, + struct scatterlist *sg, + u32 *nents, size_t *offset, + u32 *crc); + +#endif /* _FC_LIBFC_H_ */ diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c new file mode 100644 index 000000000..ab06e9aeb --- /dev/null +++ b/drivers/scsi/libfc/fc_lport.c @@ -0,0 +1,2200 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2007 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * PORT LOCKING NOTES + * + * These comments only apply to the 'port code' which consists of the lport, + * disc and rport blocks. + * + * MOTIVATION + * + * The lport, disc and rport blocks all have mutexes that are used to protect + * those objects. The main motivation for these locks is to prevent from + * having an lport reset just before we send a frame. In that scenario the + * lport's FID would get set to zero and then we'd send a frame with an + * invalid SID. We also need to ensure that states don't change unexpectedly + * while processing another state. + * + * HIERARCHY + * + * The following hierarchy defines the locking rules. A greater lock + * may be held before acquiring a lesser lock, but a lesser lock should never + * be held while attempting to acquire a greater lock. Here is the hierarchy- + * + * lport > disc, lport > rport, disc > rport + * + * CALLBACKS + * + * The callbacks cause complications with this scheme. There is a callback + * from the rport (to either lport or disc) and a callback from disc + * (to the lport). + * + * As rports exit the rport state machine a callback is made to the owner of + * the rport to notify success or failure. Since the callback is likely to + * cause the lport or disc to grab its lock we cannot hold the rport lock + * while making the callback. To ensure that the rport is not free'd while + * processing the callback the rport callbacks are serialized through a + * single-threaded workqueue. An rport would never be free'd while in a + * callback handler because no other rport work in this queue can be executed + * at the same time. + * + * When discovery succeeds or fails a callback is made to the lport as + * notification. Currently, successful discovery causes the lport to take no + * action. A failure will cause the lport to reset. There is likely a circular + * locking problem with this implementation. + */ + +/* + * LPORT LOCKING + * + * The critical sections protected by the lport's mutex are quite broad and + * may be improved upon in the future. The lport code and its locking doesn't + * influence the I/O path, so excessive locking doesn't penalize I/O + * performance. + * + * The strategy is to lock whenever processing a request or response. Note + * that every _enter_* function corresponds to a state change. They generally + * change the lports state and then send a request out on the wire. We lock + * before calling any of these functions to protect that state change. This + * means that the entry points into the lport block manage the locks while + * the state machine can transition between states (i.e. _enter_* functions) + * while always staying protected. + * + * When handling responses we also hold the lport mutex broadly. When the + * lport receives the response frame it locks the mutex and then calls the + * appropriate handler for the particuar response. Generally a response will + * trigger a state change and so the lock must already be held. + * + * Retries also have to consider the locking. The retries occur from a work + * context and the work function will lock the lport and then retry the state + * (i.e. _enter_* function). + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "fc_encode.h" +#include "fc_libfc.h" + +/* Fabric IDs to use for point-to-point mode, chosen on whims. */ +#define FC_LOCAL_PTP_FID_LO 0x010101 +#define FC_LOCAL_PTP_FID_HI 0x010102 + +#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ +#define MAX_CT_PAYLOAD 2048 +#define DISCOVERED_PORTS 4 +#define NUMBER_OF_PORTS 1 + +static void fc_lport_error(struct fc_lport *, struct fc_frame *); + +static void fc_lport_enter_reset(struct fc_lport *); +static void fc_lport_enter_flogi(struct fc_lport *); +static void fc_lport_enter_dns(struct fc_lport *); +static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); +static void fc_lport_enter_scr(struct fc_lport *); +static void fc_lport_enter_ready(struct fc_lport *); +static void fc_lport_enter_logo(struct fc_lport *); +static void fc_lport_enter_fdmi(struct fc_lport *lport); +static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state); + +static const char *fc_lport_state_names[] = { + [LPORT_ST_DISABLED] = "disabled", + [LPORT_ST_FLOGI] = "FLOGI", + [LPORT_ST_DNS] = "dNS", + [LPORT_ST_RNN_ID] = "RNN_ID", + [LPORT_ST_RSNN_NN] = "RSNN_NN", + [LPORT_ST_RSPN_ID] = "RSPN_ID", + [LPORT_ST_RFT_ID] = "RFT_ID", + [LPORT_ST_RFF_ID] = "RFF_ID", + [LPORT_ST_FDMI] = "FDMI", + [LPORT_ST_RHBA] = "RHBA", + [LPORT_ST_RPA] = "RPA", + [LPORT_ST_DHBA] = "DHBA", + [LPORT_ST_DPRT] = "DPRT", + [LPORT_ST_SCR] = "SCR", + [LPORT_ST_READY] = "Ready", + [LPORT_ST_LOGO] = "LOGO", + [LPORT_ST_RESET] = "reset", +}; + +/** + * struct fc_bsg_info - FC Passthrough managemet structure + * @job: The passthrough job + * @lport: The local port to pass through a command + * @rsp_code: The expected response code + * @sg: job->reply_payload.sg_list + * @nents: job->reply_payload.sg_cnt + * @offset: The offset into the response data + */ +struct fc_bsg_info { + struct bsg_job *job; + struct fc_lport *lport; + u16 rsp_code; + struct scatterlist *sg; + u32 nents; + size_t offset; +}; + +/** + * fc_frame_drop() - Dummy frame handler + * @lport: The local port the frame was received on + * @fp: The received frame + */ +static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) +{ + fc_frame_free(fp); + return 0; +} + +/** + * fc_lport_rport_callback() - Event handler for rport events + * @lport: The lport which is receiving the event + * @rdata: private remote port data + * @event: The event that occurred + * + * Locking Note: The rport lock should not be held when calling + * this function. + */ +static void fc_lport_rport_callback(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, + rdata->ids.port_id); + + mutex_lock(&lport->lp_mutex); + switch (event) { + case RPORT_EV_READY: + if (lport->state == LPORT_ST_DNS) { + lport->dns_rdata = rdata; + fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); + } else if (lport->state == LPORT_ST_FDMI) { + lport->ms_rdata = rdata; + fc_lport_enter_ms(lport, LPORT_ST_DHBA); + } else { + FC_LPORT_DBG(lport, "Received an READY event " + "on port (%6.6x) for the directory " + "server, but the lport is not " + "in the DNS or FDMI state, it's in the " + "%d state", rdata->ids.port_id, + lport->state); + fc_rport_logoff(rdata); + } + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + if (rdata->ids.port_id == FC_FID_DIR_SERV) + lport->dns_rdata = NULL; + else if (rdata->ids.port_id == FC_FID_MGMT_SERV) + lport->ms_rdata = NULL; + break; + case RPORT_EV_NONE: + break; + } + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_state() - Return a string which represents the lport's state + * @lport: The lport whose state is to converted to a string + */ +static const char *fc_lport_state(struct fc_lport *lport) +{ + const char *cp; + + cp = fc_lport_state_names[lport->state]; + if (!cp) + cp = "unknown"; + return cp; +} + +/** + * fc_lport_ptp_setup() - Create an rport for point-to-point mode + * @lport: The lport to attach the ptp rport to + * @remote_fid: The FID of the ptp rport + * @remote_wwpn: The WWPN of the ptp rport + * @remote_wwnn: The WWNN of the ptp rport + */ +static void fc_lport_ptp_setup(struct fc_lport *lport, + u32 remote_fid, u64 remote_wwpn, + u64 remote_wwnn) +{ + lockdep_assert_held(&lport->lp_mutex); + + if (lport->ptp_rdata) { + fc_rport_logoff(lport->ptp_rdata); + kref_put(&lport->ptp_rdata->kref, fc_rport_destroy); + } + mutex_lock(&lport->disc.disc_mutex); + lport->ptp_rdata = fc_rport_create(lport, remote_fid); + if (!lport->ptp_rdata) { + printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n", + lport->port_id); + mutex_unlock(&lport->disc.disc_mutex); + return; + } + kref_get(&lport->ptp_rdata->kref); + lport->ptp_rdata->ids.port_name = remote_wwpn; + lport->ptp_rdata->ids.node_name = remote_wwnn; + mutex_unlock(&lport->disc.disc_mutex); + + fc_rport_login(lport->ptp_rdata); + + fc_lport_enter_ready(lport); +} + +/** + * fc_get_host_port_state() - Return the port state of the given Scsi_Host + * @shost: The SCSI host whose port state is to be determined + */ +void fc_get_host_port_state(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + + mutex_lock(&lport->lp_mutex); + if (!lport->link_up) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + else + switch (lport->state) { + case LPORT_ST_READY: + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + break; + default: + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + } + mutex_unlock(&lport->lp_mutex); +} +EXPORT_SYMBOL(fc_get_host_port_state); + +/** + * fc_get_host_speed() - Return the speed of the given Scsi_Host + * @shost: The SCSI host whose port speed is to be determined + */ +void fc_get_host_speed(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + + fc_host_speed(shost) = lport->link_speed; +} +EXPORT_SYMBOL(fc_get_host_speed); + +/** + * fc_get_host_stats() - Return the Scsi_Host's statistics + * @shost: The SCSI host whose statistics are to be returned + */ +struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) +{ + struct fc_host_statistics *fc_stats; + struct fc_lport *lport = shost_priv(shost); + unsigned int cpu; + u64 fcp_in_bytes = 0; + u64 fcp_out_bytes = 0; + + fc_stats = &lport->host_stats; + memset(fc_stats, 0, sizeof(struct fc_host_statistics)); + + fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ; + + for_each_possible_cpu(cpu) { + struct fc_stats *stats; + + stats = per_cpu_ptr(lport->stats, cpu); + + fc_stats->tx_frames += READ_ONCE(stats->TxFrames); + fc_stats->tx_words += READ_ONCE(stats->TxWords); + fc_stats->rx_frames += READ_ONCE(stats->RxFrames); + fc_stats->rx_words += READ_ONCE(stats->RxWords); + fc_stats->error_frames += READ_ONCE(stats->ErrorFrames); + fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount); + fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests); + fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests); + fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests); + fcp_in_bytes += READ_ONCE(stats->InputBytes); + fcp_out_bytes += READ_ONCE(stats->OutputBytes); + fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails); + fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts); + fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails); + fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount); + } + fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); + fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); + fc_stats->lip_count = -1; + fc_stats->nos_count = -1; + fc_stats->loss_of_sync_count = -1; + fc_stats->loss_of_signal_count = -1; + fc_stats->prim_seq_protocol_err_count = -1; + fc_stats->dumped_frames = -1; + + /* update exches stats */ + fc_exch_update_stats(lport); + + return fc_stats; +} +EXPORT_SYMBOL(fc_get_host_stats); + +/** + * fc_lport_flogi_fill() - Fill in FLOGI command for request + * @lport: The local port the FLOGI is for + * @flogi: The FLOGI command + * @op: The opcode + */ +static void fc_lport_flogi_fill(struct fc_lport *lport, + struct fc_els_flogi *flogi, + unsigned int op) +{ + struct fc_els_csp *sp; + struct fc_els_cssp *cp; + + memset(flogi, 0, sizeof(*flogi)); + flogi->fl_cmd = (u8) op; + put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); + put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); + sp = &flogi->fl_csp; + sp->sp_hi_ver = 0x20; + sp->sp_lo_ver = 0x20; + sp->sp_bb_cred = htons(10); /* this gets set by gateway */ + sp->sp_bb_data = htons((u16) lport->mfs); + cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); + if (op != ELS_FLOGI) { + sp->sp_features = htons(FC_SP_FT_CIRO); + sp->sp_tot_seq = htons(255); /* seq. we accept */ + sp->sp_rel_off = htons(0x1f); + sp->sp_e_d_tov = htonl(lport->e_d_tov); + + cp->cp_rdfs = htons((u16) lport->mfs); + cp->cp_con_seq = htons(255); + cp->cp_open_seq = 1; + } +} + +/** + * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port + * @lport: The local port to add a new FC-4 type to + * @type: The new FC-4 type + */ +static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) +{ + __be32 *mp; + + mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; + *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); +} + +/** + * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. + * @lport: Fibre Channel local port receiving the RLIR + * @fp: The RLIR request frame + */ +static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) +{ + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", + fc_lport_state(lport)); + + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + fc_frame_free(fp); +} + +/** + * fc_lport_recv_echo_req() - Handle received ECHO request + * @lport: The local port receiving the ECHO + * @in_fp: ECHO request frame + */ +static void fc_lport_recv_echo_req(struct fc_lport *lport, + struct fc_frame *in_fp) +{ + struct fc_frame *fp; + unsigned int len; + void *pp; + void *dp; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", + fc_lport_state(lport)); + + len = fr_len(in_fp) - sizeof(struct fc_frame_header); + pp = fc_frame_payload_get(in_fp, len); + + if (len < sizeof(__be32)) + len = sizeof(__be32); + + fp = fc_frame_alloc(lport, len); + if (fp) { + dp = fc_frame_payload_get(fp, len); + memcpy(dp, pp, len); + *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); + } + fc_frame_free(in_fp); +} + +/** + * fc_lport_recv_rnid_req() - Handle received Request Node ID data request + * @lport: The local port receiving the RNID + * @in_fp: The RNID request frame + */ +static void fc_lport_recv_rnid_req(struct fc_lport *lport, + struct fc_frame *in_fp) +{ + struct fc_frame *fp; + struct fc_els_rnid *req; + struct { + struct fc_els_rnid_resp rnid; + struct fc_els_rnid_cid cid; + struct fc_els_rnid_gen gen; + } *rp; + struct fc_seq_els_data rjt_data; + u8 fmt; + size_t len; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", + fc_lport_state(lport)); + + req = fc_frame_payload_get(in_fp, sizeof(*req)); + if (!req) { + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_NONE; + fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); + } else { + fmt = req->rnid_fmt; + len = sizeof(*rp); + if (fmt != ELS_RNIDF_GEN || + ntohl(lport->rnid_gen.rnid_atype) == 0) { + fmt = ELS_RNIDF_NONE; /* nothing to provide */ + len -= sizeof(rp->gen); + } + fp = fc_frame_alloc(lport, len); + if (fp) { + rp = fc_frame_payload_get(fp, len); + memset(rp, 0, len); + rp->rnid.rnid_cmd = ELS_LS_ACC; + rp->rnid.rnid_fmt = fmt; + rp->rnid.rnid_cid_len = sizeof(rp->cid); + rp->cid.rnid_wwpn = htonll(lport->wwpn); + rp->cid.rnid_wwnn = htonll(lport->wwnn); + if (fmt == ELS_RNIDF_GEN) { + rp->rnid.rnid_sid_len = sizeof(rp->gen); + memcpy(&rp->gen, &lport->rnid_gen, + sizeof(rp->gen)); + } + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); + } + } + fc_frame_free(in_fp); +} + +/** + * fc_lport_recv_logo_req() - Handle received fabric LOGO request + * @lport: The local port receiving the LOGO + * @fp: The LOGO request frame + */ +static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) +{ + lockdep_assert_held(&lport->lp_mutex); + + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + fc_lport_enter_reset(lport); + fc_frame_free(fp); +} + +/** + * fc_fabric_login() - Start the lport state machine + * @lport: The local port that should log into the fabric + * + * Locking Note: This function should not be called + * with the lport lock held. + */ +int fc_fabric_login(struct fc_lport *lport) +{ + int rc = -1; + + mutex_lock(&lport->lp_mutex); + if (lport->state == LPORT_ST_DISABLED || + lport->state == LPORT_ST_LOGO) { + fc_lport_state_enter(lport, LPORT_ST_RESET); + fc_lport_enter_reset(lport); + rc = 0; + } + mutex_unlock(&lport->lp_mutex); + + return rc; +} +EXPORT_SYMBOL(fc_fabric_login); + +/** + * __fc_linkup() - Handler for transport linkup events + * @lport: The lport whose link is up + */ +void __fc_linkup(struct fc_lport *lport) +{ + lockdep_assert_held(&lport->lp_mutex); + + if (!lport->link_up) { + lport->link_up = 1; + + if (lport->state == LPORT_ST_RESET) + fc_lport_enter_flogi(lport); + } +} + +/** + * fc_linkup() - Handler for transport linkup events + * @lport: The local port whose link is up + */ +void fc_linkup(struct fc_lport *lport) +{ + printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", + lport->host->host_no, lport->port_id); + + mutex_lock(&lport->lp_mutex); + __fc_linkup(lport); + mutex_unlock(&lport->lp_mutex); +} +EXPORT_SYMBOL(fc_linkup); + +/** + * __fc_linkdown() - Handler for transport linkdown events + * @lport: The lport whose link is down + */ +void __fc_linkdown(struct fc_lport *lport) +{ + lockdep_assert_held(&lport->lp_mutex); + + if (lport->link_up) { + lport->link_up = 0; + fc_lport_enter_reset(lport); + lport->tt.fcp_cleanup(lport); + } +} + +/** + * fc_linkdown() - Handler for transport linkdown events + * @lport: The local port whose link is down + */ +void fc_linkdown(struct fc_lport *lport) +{ + printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", + lport->host->host_no, lport->port_id); + + mutex_lock(&lport->lp_mutex); + __fc_linkdown(lport); + mutex_unlock(&lport->lp_mutex); +} +EXPORT_SYMBOL(fc_linkdown); + +/** + * fc_fabric_logoff() - Logout of the fabric + * @lport: The local port to logoff the fabric + * + * Return value: + * 0 for success, -1 for failure + */ +int fc_fabric_logoff(struct fc_lport *lport) +{ + lport->tt.disc_stop_final(lport); + mutex_lock(&lport->lp_mutex); + if (lport->dns_rdata) + fc_rport_logoff(lport->dns_rdata); + mutex_unlock(&lport->lp_mutex); + fc_rport_flush_queue(); + mutex_lock(&lport->lp_mutex); + fc_lport_enter_logo(lport); + mutex_unlock(&lport->lp_mutex); + cancel_delayed_work_sync(&lport->retry_work); + return 0; +} +EXPORT_SYMBOL(fc_fabric_logoff); + +/** + * fc_lport_destroy() - Unregister a fc_lport + * @lport: The local port to unregister + * + * Note: + * exit routine for fc_lport instance + * clean-up all the allocated memory + * and free up other system resources. + * + */ +int fc_lport_destroy(struct fc_lport *lport) +{ + mutex_lock(&lport->lp_mutex); + lport->state = LPORT_ST_DISABLED; + lport->link_up = 0; + lport->tt.frame_send = fc_frame_drop; + mutex_unlock(&lport->lp_mutex); + + lport->tt.fcp_abort_io(lport); + lport->tt.disc_stop_final(lport); + lport->tt.exch_mgr_reset(lport, 0, 0); + cancel_delayed_work_sync(&lport->retry_work); + fc_fc4_del_lport(lport); + return 0; +} +EXPORT_SYMBOL(fc_lport_destroy); + +/** + * fc_set_mfs() - Set the maximum frame size for a local port + * @lport: The local port to set the MFS for + * @mfs: The new MFS + */ +int fc_set_mfs(struct fc_lport *lport, u32 mfs) +{ + unsigned int old_mfs; + int rc = -EINVAL; + + mutex_lock(&lport->lp_mutex); + + old_mfs = lport->mfs; + + if (mfs >= FC_MIN_MAX_FRAME) { + mfs &= ~3; + if (mfs > FC_MAX_FRAME) + mfs = FC_MAX_FRAME; + mfs -= sizeof(struct fc_frame_header); + lport->mfs = mfs; + rc = 0; + } + + if (!rc && mfs < old_mfs) + fc_lport_enter_reset(lport); + + mutex_unlock(&lport->lp_mutex); + + return rc; +} +EXPORT_SYMBOL(fc_set_mfs); + +/** + * fc_lport_disc_callback() - Callback for discovery events + * @lport: The local port receiving the event + * @event: The discovery event + */ +static void fc_lport_disc_callback(struct fc_lport *lport, + enum fc_disc_event event) +{ + switch (event) { + case DISC_EV_SUCCESS: + FC_LPORT_DBG(lport, "Discovery succeeded\n"); + break; + case DISC_EV_FAILED: + printk(KERN_ERR "host%d: libfc: " + "Discovery failed for port (%6.6x)\n", + lport->host->host_no, lport->port_id); + mutex_lock(&lport->lp_mutex); + fc_lport_enter_reset(lport); + mutex_unlock(&lport->lp_mutex); + break; + case DISC_EV_NONE: + WARN_ON(1); + break; + } +} + +/** + * fc_lport_enter_ready() - Enter the ready state and start discovery + * @lport: The local port that is ready + */ +static void fc_lport_enter_ready(struct fc_lport *lport) +{ + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered READY from state %s\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_READY); + if (lport->vport) + fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); + fc_vports_linkchange(lport); + + if (!lport->ptp_rdata) + lport->tt.disc_start(fc_lport_disc_callback, lport); +} + +/** + * fc_lport_set_port_id() - set the local port Port ID + * @lport: The local port which will have its Port ID set. + * @port_id: The new port ID. + * @fp: The frame containing the incoming request, or NULL. + */ +static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, + struct fc_frame *fp) +{ + lockdep_assert_held(&lport->lp_mutex); + + if (port_id) + printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", + lport->host->host_no, port_id); + + lport->port_id = port_id; + + /* Update the fc_host */ + fc_host_port_id(lport->host) = port_id; + + if (lport->tt.lport_set_port_id) + lport->tt.lport_set_port_id(lport, port_id, fp); +} + +/** + * fc_lport_set_local_id() - set the local port Port ID for point-to-multipoint + * @lport: The local port which will have its Port ID set. + * @port_id: The new port ID. + * + * Called by the lower-level driver when transport sets the local port_id. + * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and + * discovery to be skipped. + */ +void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) +{ + mutex_lock(&lport->lp_mutex); + + fc_lport_set_port_id(lport, port_id, NULL); + + switch (lport->state) { + case LPORT_ST_RESET: + case LPORT_ST_FLOGI: + if (port_id) + fc_lport_enter_ready(lport); + break; + default: + break; + } + mutex_unlock(&lport->lp_mutex); +} +EXPORT_SYMBOL(fc_lport_set_local_id); + +/** + * fc_lport_recv_flogi_req() - Receive a FLOGI request + * @lport: The local port that received the request + * @rx_fp: The FLOGI frame + * + * A received FLOGI request indicates a point-to-point connection. + * Accept it with the common service parameters indicating our N port. + * Set up to do a PLOGI if we have the higher-number WWPN. + */ +static void fc_lport_recv_flogi_req(struct fc_lport *lport, + struct fc_frame *rx_fp) +{ + struct fc_frame *fp; + struct fc_frame_header *fh; + struct fc_els_flogi *flp; + struct fc_els_flogi *new_flp; + u64 remote_wwpn; + u32 remote_fid; + u32 local_fid; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", + fc_lport_state(lport)); + + remote_fid = fc_frame_sid(rx_fp); + flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); + if (!flp) + goto out; + remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); + if (remote_wwpn == lport->wwpn) { + printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " + "with same WWPN %16.16llx\n", + lport->host->host_no, remote_wwpn); + goto out; + } + FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); + + /* + * XXX what is the right thing to do for FIDs? + * The originator might expect our S_ID to be 0xfffffe. + * But if so, both of us could end up with the same FID. + */ + local_fid = FC_LOCAL_PTP_FID_LO; + if (remote_wwpn < lport->wwpn) { + local_fid = FC_LOCAL_PTP_FID_HI; + if (!remote_fid || remote_fid == local_fid) + remote_fid = FC_LOCAL_PTP_FID_LO; + } else if (!remote_fid) { + remote_fid = FC_LOCAL_PTP_FID_HI; + } + + fc_lport_set_port_id(lport, local_fid, rx_fp); + + fp = fc_frame_alloc(lport, sizeof(*flp)); + if (fp) { + new_flp = fc_frame_payload_get(fp, sizeof(*flp)); + fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); + new_flp->fl_cmd = (u8) ELS_LS_ACC; + + /* + * Send the response. If this fails, the originator should + * repeat the sequence. + */ + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + fh = fc_frame_header_get(fp); + hton24(fh->fh_s_id, local_fid); + hton24(fh->fh_d_id, remote_fid); + lport->tt.frame_send(lport, fp); + + } else { + fc_lport_error(lport, fp); + } + fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, + get_unaligned_be64(&flp->fl_wwnn)); +out: + fc_frame_free(rx_fp); +} + +/** + * fc_lport_recv_els_req() - The generic lport ELS request handler + * @lport: The local port that received the request + * @fp: The request frame + * + * This function will see if the lport handles the request or + * if an rport should handle the request. + * + * Locking Note: This function should not be called with the lport + * lock held because it will grab the lock. + */ +static void fc_lport_recv_els_req(struct fc_lport *lport, + struct fc_frame *fp) +{ + mutex_lock(&lport->lp_mutex); + + /* + * Handle special ELS cases like FLOGI, LOGO, and + * RSCN here. These don't require a session. + * Even if we had a session, it might not be ready. + */ + if (!lport->link_up) + fc_frame_free(fp); + else { + /* + * Check opcode. + */ + switch (fc_frame_payload_op(fp)) { + case ELS_FLOGI: + if (!lport->point_to_multipoint) + fc_lport_recv_flogi_req(lport, fp); + else + fc_rport_recv_req(lport, fp); + break; + case ELS_LOGO: + if (fc_frame_sid(fp) == FC_FID_FLOGI) + fc_lport_recv_logo_req(lport, fp); + else + fc_rport_recv_req(lport, fp); + break; + case ELS_RSCN: + lport->tt.disc_recv_req(lport, fp); + break; + case ELS_ECHO: + fc_lport_recv_echo_req(lport, fp); + break; + case ELS_RLIR: + fc_lport_recv_rlir_req(lport, fp); + break; + case ELS_RNID: + fc_lport_recv_rnid_req(lport, fp); + break; + default: + fc_rport_recv_req(lport, fp); + break; + } + } + mutex_unlock(&lport->lp_mutex); +} + +static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *spp_in, + struct fc_els_spp *spp_out) +{ + return FC_SPP_RESP_INVL; +} + +struct fc4_prov fc_lport_els_prov = { + .prli = fc_lport_els_prli, + .recv = fc_lport_recv_els_req, +}; + +/** + * fc_lport_recv() - The generic lport request handler + * @lport: The lport that received the request + * @fp: The frame the request is in + * + * Locking Note: This function should not be called with the lport + * lock held because it may grab the lock. + */ +void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_seq *sp = fr_seq(fp); + struct fc4_prov *prov; + + /* + * Use RCU read lock and module_lock to be sure module doesn't + * deregister and get unloaded while we're calling it. + * try_module_get() is inlined and accepts a NULL parameter. + * Only ELSes and FCP target ops should come through here. + * The locking is unfortunate, and a better scheme is being sought. + */ + + rcu_read_lock(); + if (fh->fh_type >= FC_FC4_PROV_SIZE) + goto drop; + prov = rcu_dereference(fc_passive_prov[fh->fh_type]); + if (!prov || !try_module_get(prov->module)) + goto drop; + rcu_read_unlock(); + prov->recv(lport, fp); + module_put(prov->module); + return; +drop: + rcu_read_unlock(); + FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); + fc_frame_free(fp); + if (sp) + fc_exch_done(sp); +} +EXPORT_SYMBOL(fc_lport_recv); + +/** + * fc_lport_reset() - Reset a local port + * @lport: The local port which should be reset + * + * Locking Note: This functions should not be called with the + * lport lock held. + */ +int fc_lport_reset(struct fc_lport *lport) +{ + cancel_delayed_work_sync(&lport->retry_work); + mutex_lock(&lport->lp_mutex); + fc_lport_enter_reset(lport); + mutex_unlock(&lport->lp_mutex); + return 0; +} +EXPORT_SYMBOL(fc_lport_reset); + +/** + * fc_lport_reset_locked() - Reset the local port w/ the lport lock held + * @lport: The local port to be reset + */ +static void fc_lport_reset_locked(struct fc_lport *lport) +{ + lockdep_assert_held(&lport->lp_mutex); + + if (lport->dns_rdata) { + fc_rport_logoff(lport->dns_rdata); + lport->dns_rdata = NULL; + } + + if (lport->ptp_rdata) { + fc_rport_logoff(lport->ptp_rdata); + kref_put(&lport->ptp_rdata->kref, fc_rport_destroy); + lport->ptp_rdata = NULL; + } + + lport->tt.disc_stop(lport); + + lport->tt.exch_mgr_reset(lport, 0, 0); + fc_host_fabric_name(lport->host) = 0; + + if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) + fc_lport_set_port_id(lport, 0, NULL); +} + +/** + * fc_lport_enter_reset() - Reset the local port + * @lport: The local port to be reset + */ +static void fc_lport_enter_reset(struct fc_lport *lport) +{ + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", + fc_lport_state(lport)); + + if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) + return; + + if (lport->vport) { + if (lport->link_up) + fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); + else + fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); + } + fc_lport_state_enter(lport, LPORT_ST_RESET); + fc_host_post_event(lport->host, fc_get_event_number(), + FCH_EVT_LIPRESET, 0); + fc_vports_linkchange(lport); + fc_lport_reset_locked(lport); + if (lport->link_up) + fc_lport_enter_flogi(lport); +} + +/** + * fc_lport_enter_disabled() - Disable the local port + * @lport: The local port to be reset + */ +static void fc_lport_enter_disabled(struct fc_lport *lport) +{ + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_DISABLED); + fc_vports_linkchange(lport); + fc_lport_reset_locked(lport); +} + +/** + * fc_lport_error() - Handler for any errors + * @lport: The local port that the error was on + * @fp: The error code encoded in a frame pointer + * + * If the error was caused by a resource allocation failure + * then wait for half a second and retry, otherwise retry + * after the e_d_tov time. + */ +static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) +{ + unsigned long delay = 0; + FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", + IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport), + lport->retry_count); + + if (PTR_ERR(fp) == -FC_EX_CLOSED) + return; + + /* + * Memory allocation failure, or the exchange timed out + * or we received LS_RJT. + * Retry after delay + */ + if (lport->retry_count < lport->max_retry_count) { + lport->retry_count++; + if (!fp) + delay = msecs_to_jiffies(500); + else + delay = msecs_to_jiffies(lport->e_d_tov); + + schedule_delayed_work(&lport->retry_work, delay); + } else + fc_lport_enter_reset(lport); +} + +/** + * fc_lport_ns_resp() - Handle response to a name server + * registration exchange + * @sp: current sequence in exchange + * @fp: response frame + * @lp_arg: Fibre Channel host port instance + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error() + * and then unlock the lport. + */ +static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + + FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { + FC_LPORT_DBG(lport, "Received a name server response, " + "but in state %s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + fh = fc_frame_header_get(fp); + ct = fc_frame_payload_get(fp, sizeof(*ct)); + + if (fh && ct && fh->fh_type == FC_TYPE_CT && + ct->ct_fs_type == FC_FST_DIR && + ct->ct_fs_subtype == FC_NS_SUBTYPE && + ntohs(ct->ct_cmd) == FC_FS_ACC) + switch (lport->state) { + case LPORT_ST_RNN_ID: + fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); + break; + case LPORT_ST_RSNN_NN: + fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); + break; + case LPORT_ST_RSPN_ID: + fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + break; + case LPORT_ST_RFT_ID: + fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); + break; + case LPORT_ST_RFF_ID: + if (lport->fdmi_enabled) + fc_lport_enter_fdmi(lport); + else + fc_lport_enter_scr(lport); + break; + default: + /* should have already been caught by state checks */ + break; + } + else + fc_lport_error(lport, fp); +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_ms_resp() - Handle response to a management server + * exchange + * @sp: current sequence in exchange + * @fp: response frame + * @lp_arg: Fibre Channel host port instance + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error() + * and then unlock the lport. + */ +static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); + FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) { + FC_LPORT_DBG(lport, "Received a management server response, " + "but in state %s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + fh = fc_frame_header_get(fp); + ct = fc_frame_payload_get(fp, sizeof(*ct)); + + if (fh && ct && fh->fh_type == FC_TYPE_CT && + ct->ct_fs_type == FC_FST_MGMT && + ct->ct_fs_subtype == FC_FDMI_SUBTYPE) { + FC_LPORT_DBG(lport, "Received a management server response, " + "reason=%d explain=%d\n", + ct->ct_reason, + ct->ct_explan); + + switch (lport->state) { + case LPORT_ST_RHBA: + if ((ntohs(ct->ct_cmd) == FC_FS_RJT) && fc_host->fdmi_version == FDMI_V2) { + FC_LPORT_DBG(lport, "Error for FDMI-V2, fall back to FDMI-V1\n"); + fc_host->fdmi_version = FDMI_V1; + + fc_lport_enter_ms(lport, LPORT_ST_RHBA); + + } else if (ntohs(ct->ct_cmd) == FC_FS_ACC) + fc_lport_enter_ms(lport, LPORT_ST_RPA); + else /* Error Skip RPA */ + fc_lport_enter_scr(lport); + break; + case LPORT_ST_RPA: + fc_lport_enter_scr(lport); + break; + case LPORT_ST_DPRT: + fc_lport_enter_ms(lport, LPORT_ST_RHBA); + break; + case LPORT_ST_DHBA: + fc_lport_enter_ms(lport, LPORT_ST_DPRT); + break; + default: + /* should have already been caught by state checks */ + break; + } + } else { + /* Invalid Frame? */ + fc_lport_error(lport, fp); + } +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request + * @sp: current sequence in SCR exchange + * @fp: response frame + * @lp_arg: Fibre Channel lport port instance that sent the registration request + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error + * and then unlock the lport. + */ +static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + u8 op; + + FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state != LPORT_ST_SCR) { + FC_LPORT_DBG(lport, "Received a SCR response, but in state " + "%s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC) + fc_lport_enter_ready(lport); + else + fc_lport_error(lport, fp); + +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_enter_scr() - Send a SCR (State Change Register) request + * @lport: The local port to register for state changes + */ +static void fc_lport_enter_scr(struct fc_lport *lport) +{ + struct fc_frame *fp; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_SCR); + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); + if (!fp) { + fc_lport_error(lport, fp); + return; + } + + if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, + fc_lport_scr_resp, lport, + 2 * lport->r_a_tov)) + fc_lport_error(lport, NULL); +} + +/** + * fc_lport_enter_ns() - register some object with the name server + * @lport: Fibre Channel local port to register + * @state: Local port state + */ +static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) +{ + struct fc_frame *fp; + enum fc_ns_req cmd; + int size = sizeof(struct fc_ct_hdr); + size_t len; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered %s state from %s state\n", + fc_lport_state_names[state], + fc_lport_state(lport)); + + fc_lport_state_enter(lport, state); + + switch (state) { + case LPORT_ST_RNN_ID: + cmd = FC_NS_RNN_ID; + size += sizeof(struct fc_ns_rn_id); + break; + case LPORT_ST_RSNN_NN: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + /* if there is no symbolic name, skip to RFT_ID */ + if (!len) + return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + cmd = FC_NS_RSNN_NN; + size += sizeof(struct fc_ns_rsnn) + len; + break; + case LPORT_ST_RSPN_ID: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + /* if there is no symbolic name, skip to RFT_ID */ + if (!len) + return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + cmd = FC_NS_RSPN_ID; + size += sizeof(struct fc_ns_rspn) + len; + break; + case LPORT_ST_RFT_ID: + cmd = FC_NS_RFT_ID; + size += sizeof(struct fc_ns_rft); + break; + case LPORT_ST_RFF_ID: + cmd = FC_NS_RFF_ID; + size += sizeof(struct fc_ns_rff_id); + break; + default: + fc_lport_error(lport, NULL); + return; + } + + fp = fc_frame_alloc(lport, size); + if (!fp) { + fc_lport_error(lport, fp); + return; + } + + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, + fc_lport_ns_resp, + lport, 3 * lport->r_a_tov)) + fc_lport_error(lport, fp); +} + +static struct fc_rport_operations fc_lport_rport_ops = { + .event_callback = fc_lport_rport_callback, +}; + +/** + * fc_lport_enter_dns() - Create a fc_rport for the name server + * @lport: The local port requesting a remote port for the name server + */ +static void fc_lport_enter_dns(struct fc_lport *lport) +{ + struct fc_rport_priv *rdata; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_DNS); + + mutex_lock(&lport->disc.disc_mutex); + rdata = fc_rport_create(lport, FC_FID_DIR_SERV); + mutex_unlock(&lport->disc.disc_mutex); + if (!rdata) + goto err; + + rdata->ops = &fc_lport_rport_ops; + fc_rport_login(rdata); + return; + +err: + fc_lport_error(lport, NULL); +} + +/** + * fc_lport_enter_ms() - management server commands + * @lport: Fibre Channel local port to register + * @state: Local port state + */ +static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state) +{ + struct fc_frame *fp; + enum fc_fdmi_req cmd; + int size = sizeof(struct fc_ct_hdr); + size_t len; + int numattrs; + struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered %s state from %s state\n", + fc_lport_state_names[state], + fc_lport_state(lport)); + + fc_lport_state_enter(lport, state); + + switch (state) { + case LPORT_ST_RHBA: + cmd = FC_FDMI_RHBA; + /* Number of HBA Attributes */ + numattrs = 11; + len = sizeof(struct fc_fdmi_rhba); + len -= sizeof(struct fc_fdmi_attr_entry); + + len += FC_FDMI_HBA_ATTR_NODENAME_LEN; + len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN; + len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN; + len += FC_FDMI_HBA_ATTR_MODEL_LEN; + len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN; + len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN; + len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN; + len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN; + len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN; + len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN; + len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN; + + + if (fc_host->fdmi_version == FDMI_V2) { + numattrs += 7; + len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN; + len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN; + len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN; + len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN; + len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN; + len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN; + len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN; + } + + len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); + + size += len; + break; + case LPORT_ST_RPA: + cmd = FC_FDMI_RPA; + /* Number of Port Attributes */ + numattrs = 6; + len = sizeof(struct fc_fdmi_rpa); + len -= sizeof(struct fc_fdmi_attr_entry); + len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN; + len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN; + len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN; + len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN; + len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN; + len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN; + + if (fc_host->fdmi_version == FDMI_V2) { + numattrs += 10; + len += FC_FDMI_PORT_ATTR_NODENAME_LEN; + len += FC_FDMI_PORT_ATTR_PORTNAME_LEN; + len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN; + len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN; + len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN; + len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN; + len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN; + len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN; + len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN; + len += FC_FDMI_PORT_ATTR_PORTID_LEN; + } + + len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); + + size += len; + break; + case LPORT_ST_DPRT: + cmd = FC_FDMI_DPRT; + len = sizeof(struct fc_fdmi_dprt); + size += len; + break; + case LPORT_ST_DHBA: + cmd = FC_FDMI_DHBA; + len = sizeof(struct fc_fdmi_dhba); + size += len; + break; + default: + fc_lport_error(lport, NULL); + return; + } + + FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n", + cmd, (int)len, size); + fp = fc_frame_alloc(lport, size); + if (!fp) { + fc_lport_error(lport, fp); + return; + } + + if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd, + fc_lport_ms_resp, + lport, 3 * lport->r_a_tov)) + fc_lport_error(lport, fp); +} + +/** + * fc_lport_enter_fdmi() - Create a fc_rport for the management server + * @lport: The local port requesting a remote port for the management server + */ +static void fc_lport_enter_fdmi(struct fc_lport *lport) +{ + struct fc_rport_priv *rdata; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_FDMI); + + mutex_lock(&lport->disc.disc_mutex); + rdata = fc_rport_create(lport, FC_FID_MGMT_SERV); + mutex_unlock(&lport->disc.disc_mutex); + if (!rdata) + goto err; + + rdata->ops = &fc_lport_rport_ops; + fc_rport_login(rdata); + return; + +err: + fc_lport_error(lport, NULL); +} + +/** + * fc_lport_timeout() - Handler for the retry_work timer + * @work: The work struct of the local port + */ +static void fc_lport_timeout(struct work_struct *work) +{ + struct fc_lport *lport = + container_of(work, struct fc_lport, + retry_work.work); + struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); + + mutex_lock(&lport->lp_mutex); + + switch (lport->state) { + case LPORT_ST_DISABLED: + break; + case LPORT_ST_READY: + break; + case LPORT_ST_RESET: + break; + case LPORT_ST_FLOGI: + fc_lport_enter_flogi(lport); + break; + case LPORT_ST_DNS: + fc_lport_enter_dns(lport); + break; + case LPORT_ST_RNN_ID: + case LPORT_ST_RSNN_NN: + case LPORT_ST_RSPN_ID: + case LPORT_ST_RFT_ID: + case LPORT_ST_RFF_ID: + fc_lport_enter_ns(lport, lport->state); + break; + case LPORT_ST_FDMI: + fc_lport_enter_fdmi(lport); + break; + case LPORT_ST_RHBA: + if (fc_host->fdmi_version == FDMI_V2) { + FC_LPORT_DBG(lport, "timeout for FDMI-V2 RHBA,fall back to FDMI-V1\n"); + fc_host->fdmi_version = FDMI_V1; + fc_lport_enter_ms(lport, LPORT_ST_RHBA); + break; + } + fallthrough; + case LPORT_ST_RPA: + case LPORT_ST_DHBA: + case LPORT_ST_DPRT: + FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n", + fc_lport_state(lport)); + fallthrough; + case LPORT_ST_SCR: + fc_lport_enter_scr(lport); + break; + case LPORT_ST_LOGO: + fc_lport_enter_logo(lport); + break; + } + + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_logo_resp() - Handle response to LOGO request + * @sp: The sequence that the LOGO was on + * @fp: The LOGO frame + * @lp_arg: The lport port that received the LOGO request + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error() + * and then unlock the lport. + */ +void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + u8 op; + + FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state != LPORT_ST_LOGO) { + FC_LPORT_DBG(lport, "Received a LOGO response, but in state " + "%s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC) + fc_lport_enter_disabled(lport); + else + fc_lport_error(lport, fp); + +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} +EXPORT_SYMBOL(fc_lport_logo_resp); + +/** + * fc_lport_enter_logo() - Logout of the fabric + * @lport: The local port to be logged out + */ +static void fc_lport_enter_logo(struct fc_lport *lport) +{ + struct fc_frame *fp; + struct fc_els_logo *logo; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_LOGO); + fc_vports_linkchange(lport); + + fp = fc_frame_alloc(lport, sizeof(*logo)); + if (!fp) { + fc_lport_error(lport, fp); + return; + } + + if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, + fc_lport_logo_resp, lport, + 2 * lport->r_a_tov)) + fc_lport_error(lport, NULL); +} + +/** + * fc_lport_flogi_resp() - Handle response to FLOGI request + * @sp: The sequence that the FLOGI was on + * @fp: The FLOGI response frame + * @lp_arg: The lport port that received the FLOGI response + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error() + * and then unlock the lport. + */ +void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + struct fc_frame_header *fh; + struct fc_els_flogi *flp; + u32 did; + u16 csp_flags; + unsigned int r_a_tov; + unsigned int e_d_tov; + u16 mfs; + + FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state != LPORT_ST_FLOGI) { + FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " + "%s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + fh = fc_frame_header_get(fp); + did = fc_frame_did(fp); + if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 || + fc_frame_payload_op(fp) != ELS_LS_ACC) { + FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); + fc_lport_error(lport, fp); + goto out; + } + + flp = fc_frame_payload_get(fp, sizeof(*flp)); + if (!flp) { + FC_LPORT_DBG(lport, "FLOGI bad response\n"); + fc_lport_error(lport, fp); + goto out; + } + + mfs = ntohs(flp->fl_csp.sp_bb_data) & + FC_SP_BB_DATA_MASK; + + if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { + FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " + "lport->mfs:%u\n", mfs, lport->mfs); + fc_lport_error(lport, fp); + goto out; + } + + if (mfs <= lport->mfs) { + lport->mfs = mfs; + fc_host_maxframe_size(lport->host) = mfs; + } + + csp_flags = ntohs(flp->fl_csp.sp_features); + r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); + e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); + if (csp_flags & FC_SP_FT_EDTR) + e_d_tov /= 1000000; + + lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); + + if ((csp_flags & FC_SP_FT_FPORT) == 0) { + if (e_d_tov > lport->e_d_tov) + lport->e_d_tov = e_d_tov; + lport->r_a_tov = 2 * lport->e_d_tov; + fc_lport_set_port_id(lport, did, fp); + printk(KERN_INFO "host%d: libfc: " + "Port (%6.6x) entered " + "point-to-point mode\n", + lport->host->host_no, did); + fc_lport_ptp_setup(lport, fc_frame_sid(fp), + get_unaligned_be64( + &flp->fl_wwpn), + get_unaligned_be64( + &flp->fl_wwnn)); + } else { + if (e_d_tov > lport->e_d_tov) + lport->e_d_tov = e_d_tov; + if (r_a_tov > lport->r_a_tov) + lport->r_a_tov = r_a_tov; + fc_host_fabric_name(lport->host) = + get_unaligned_be64(&flp->fl_wwnn); + fc_lport_set_port_id(lport, did, fp); + fc_lport_enter_dns(lport); + } + +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} +EXPORT_SYMBOL(fc_lport_flogi_resp); + +/** + * fc_lport_enter_flogi() - Send a FLOGI request to the fabric manager + * @lport: Fibre Channel local port to be logged in to the fabric + */ +static void fc_lport_enter_flogi(struct fc_lport *lport) +{ + struct fc_frame *fp; + + lockdep_assert_held(&lport->lp_mutex); + + FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_FLOGI); + + if (lport->point_to_multipoint) { + if (lport->port_id) + fc_lport_enter_ready(lport); + return; + } + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); + if (!fp) + return fc_lport_error(lport, fp); + + if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, + lport->vport ? ELS_FDISC : ELS_FLOGI, + fc_lport_flogi_resp, lport, + lport->vport ? 2 * lport->r_a_tov : + lport->e_d_tov)) + fc_lport_error(lport, NULL); +} + +/** + * fc_lport_config() - Configure a fc_lport + * @lport: The local port to be configured + */ +int fc_lport_config(struct fc_lport *lport) +{ + INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); + mutex_init(&lport->lp_mutex); + + fc_lport_state_enter(lport, LPORT_ST_DISABLED); + + fc_lport_add_fc4_type(lport, FC_TYPE_FCP); + fc_lport_add_fc4_type(lport, FC_TYPE_CT); + fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); + + return 0; +} +EXPORT_SYMBOL(fc_lport_config); + +/** + * fc_lport_init() - Initialize the lport layer for a local port + * @lport: The local port to initialize the exchange layer for + */ +int fc_lport_init(struct fc_lport *lport) +{ + struct fc_host_attrs *fc_host; + + fc_host = shost_to_fc_host(lport->host); + + /* Set FDMI version to FDMI-2 specification*/ + fc_host->fdmi_version = FDMI_V2; + + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + fc_host_node_name(lport->host) = lport->wwnn; + fc_host_port_name(lport->host) = lport->wwpn; + fc_host_supported_classes(lport->host) = FC_COS_CLASS3; + memset(fc_host_supported_fc4s(lport->host), 0, + sizeof(fc_host_supported_fc4s(lport->host))); + fc_host_supported_fc4s(lport->host)[2] = 1; + fc_host_supported_fc4s(lport->host)[7] = 1; + fc_host_num_discovered_ports(lport->host) = 4; + + /* This value is also unchanging */ + memset(fc_host_active_fc4s(lport->host), 0, + sizeof(fc_host_active_fc4s(lport->host))); + fc_host_active_fc4s(lport->host)[2] = 1; + fc_host_active_fc4s(lport->host)[7] = 1; + fc_host_maxframe_size(lport->host) = lport->mfs; + fc_host_supported_speeds(lport->host) = 0; + if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) + fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; + if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) + fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; + if (lport->link_supported_speeds & FC_PORTSPEED_40GBIT) + fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_40GBIT; + if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT) + fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT; + if (lport->link_supported_speeds & FC_PORTSPEED_25GBIT) + fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_25GBIT; + if (lport->link_supported_speeds & FC_PORTSPEED_50GBIT) + fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_50GBIT; + if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT) + fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT; + + fc_fc4_add_lport(lport); + + fc_host_num_discovered_ports(lport->host) = DISCOVERED_PORTS; + fc_host_port_state(lport->host) = FC_PORTSTATE_ONLINE; + fc_host_max_ct_payload(lport->host) = MAX_CT_PAYLOAD; + fc_host_num_ports(lport->host) = NUMBER_OF_PORTS; + fc_host_bootbios_state(lport->host) = 0X00000000; + snprintf(fc_host_bootbios_version(lport->host), + FC_SYMBOLIC_NAME_SIZE, "%s", "Unknown"); + + return 0; +} +EXPORT_SYMBOL(fc_lport_init); + +/** + * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests + * @sp: The sequence for the FC Passthrough response + * @fp: The response frame + * @info_arg: The BSG info that the response is for + */ +static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, + void *info_arg) +{ + struct fc_bsg_info *info = info_arg; + struct bsg_job *job = info->job; + struct fc_bsg_reply *bsg_reply = job->reply; + struct fc_lport *lport = info->lport; + struct fc_frame_header *fh; + size_t len; + void *buf; + + if (IS_ERR(fp)) { + bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? + -ECONNABORTED : -ETIMEDOUT; + job->reply_len = sizeof(uint32_t); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + kfree(info); + return; + } + + mutex_lock(&lport->lp_mutex); + fh = fc_frame_header_get(fp); + len = fr_len(fp) - sizeof(*fh); + buf = fc_frame_payload_get(fp, 0); + + if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { + /* Get the response code from the first frame payload */ + unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? + ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : + (unsigned short)fc_frame_payload_op(fp); + + /* Save the reply status of the job */ + bsg_reply->reply_data.ctels_reply.status = + (cmd == info->rsp_code) ? + FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; + } + + bsg_reply->reply_payload_rcv_len += + fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, + &info->offset, NULL); + + if (fr_eof(fp) == FC_EOF_T && + (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == + (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { + if (bsg_reply->reply_payload_rcv_len > + job->reply_payload.payload_len) + bsg_reply->reply_payload_rcv_len = + job->reply_payload.payload_len; + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + kfree(info); + } + fc_frame_free(fp); + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_els_request() - Send ELS passthrough request + * @job: The BSG Passthrough job + * @lport: The local port sending the request + * @did: The destination port id + * @tov: The timeout period (in ms) + */ +static int fc_lport_els_request(struct bsg_job *job, + struct fc_lport *lport, + u32 did, u32 tov) +{ + struct fc_bsg_info *info; + struct fc_frame *fp; + struct fc_frame_header *fh; + char *pp; + int len; + + lockdep_assert_held(&lport->lp_mutex); + + fp = fc_frame_alloc(lport, job->request_payload.payload_len); + if (!fp) + return -ENOMEM; + + len = job->request_payload.payload_len; + pp = fc_frame_payload_get(fp, len); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + pp, len); + + fh = fc_frame_header_get(fp); + fh->fh_r_ctl = FC_RCTL_ELS_REQ; + hton24(fh->fh_d_id, did); + hton24(fh->fh_s_id, lport->port_id); + fh->fh_type = FC_TYPE_ELS; + hton24(fh->fh_f_ctl, FC_FCTL_REQ); + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = 0; + + info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); + if (!info) { + fc_frame_free(fp); + return -ENOMEM; + } + + info->job = job; + info->lport = lport; + info->rsp_code = ELS_LS_ACC; + info->nents = job->reply_payload.sg_cnt; + info->sg = job->reply_payload.sg_list; + + if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) { + kfree(info); + return -ECOMM; + } + return 0; +} + +/** + * fc_lport_ct_request() - Send CT Passthrough request + * @job: The BSG Passthrough job + * @lport: The local port sending the request + * @did: The destination FC-ID + * @tov: The timeout period to wait for the response + */ +static int fc_lport_ct_request(struct bsg_job *job, + struct fc_lport *lport, u32 did, u32 tov) +{ + struct fc_bsg_info *info; + struct fc_frame *fp; + struct fc_frame_header *fh; + struct fc_ct_req *ct; + size_t len; + + lockdep_assert_held(&lport->lp_mutex); + + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + job->request_payload.payload_len); + if (!fp) + return -ENOMEM; + + len = job->request_payload.payload_len; + ct = fc_frame_payload_get(fp, len); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + ct, len); + + fh = fc_frame_header_get(fp); + fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; + hton24(fh->fh_d_id, did); + hton24(fh->fh_s_id, lport->port_id); + fh->fh_type = FC_TYPE_CT; + hton24(fh->fh_f_ctl, FC_FCTL_REQ); + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = 0; + + info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); + if (!info) { + fc_frame_free(fp); + return -ENOMEM; + } + + info->job = job; + info->lport = lport; + info->rsp_code = FC_FS_ACC; + info->nents = job->reply_payload.sg_cnt; + info->sg = job->reply_payload.sg_list; + + if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) { + kfree(info); + return -ECOMM; + } + return 0; +} + +/** + * fc_lport_bsg_request() - The common entry point for sending + * FC Passthrough requests + * @job: The BSG passthrough job + */ +int fc_lport_bsg_request(struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct fc_lport *lport = shost_priv(shost); + struct fc_rport *rport; + struct fc_rport_priv *rdata; + int rc = -EINVAL; + u32 did, tov; + + bsg_reply->reply_payload_rcv_len = 0; + + mutex_lock(&lport->lp_mutex); + + switch (bsg_request->msgcode) { + case FC_BSG_RPT_ELS: + rport = fc_bsg_to_rport(job); + if (!rport) + break; + + rdata = rport->dd_data; + rc = fc_lport_els_request(job, lport, rport->port_id, + rdata->e_d_tov); + break; + + case FC_BSG_RPT_CT: + rport = fc_bsg_to_rport(job); + if (!rport) + break; + + rdata = rport->dd_data; + rc = fc_lport_ct_request(job, lport, rport->port_id, + rdata->e_d_tov); + break; + + case FC_BSG_HST_CT: + did = ntoh24(bsg_request->rqst_data.h_ct.port_id); + if (did == FC_FID_DIR_SERV) { + rdata = lport->dns_rdata; + if (!rdata) + break; + tov = rdata->e_d_tov; + } else { + rdata = fc_rport_lookup(lport, did); + if (!rdata) + break; + tov = rdata->e_d_tov; + kref_put(&rdata->kref, fc_rport_destroy); + } + + rc = fc_lport_ct_request(job, lport, did, tov); + break; + + case FC_BSG_HST_ELS_NOLOGIN: + did = ntoh24(bsg_request->rqst_data.h_els.port_id); + rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); + break; + } + + mutex_unlock(&lport->lp_mutex); + return rc; +} +EXPORT_SYMBOL(fc_lport_bsg_request); diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c new file mode 100644 index 000000000..c045898b8 --- /dev/null +++ b/drivers/scsi/libfc/fc_npiv.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * NPIV VN_Port helper functions for libfc + */ + +#include +#include + +/** + * libfc_vport_create() - Create a new NPIV vport instance + * @vport: fc_vport structure from scsi_transport_fc + * @privsize: driver private data size to allocate along with the Scsi_Host + */ + +struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + + vn_port = libfc_host_alloc(shost->hostt, privsize); + if (!vn_port) + return vn_port; + + vn_port->vport = vport; + vport->dd_data = vn_port; + + mutex_lock(&n_port->lp_mutex); + list_add_tail(&vn_port->list, &n_port->vports); + mutex_unlock(&n_port->lp_mutex); + + return vn_port; +} +EXPORT_SYMBOL(libfc_vport_create); + +/** + * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID + * @n_port: Top level N_Port which may have multiple NPIV VN_Ports + * @port_id: Fabric ID to find a match for + * + * Returns: matching lport pointer or NULL if there is no match + */ +struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id) +{ + struct fc_lport *lport = NULL; + struct fc_lport *vn_port; + + if (n_port->port_id == port_id) + return n_port; + + if (port_id == FC_FID_FLOGI) + return n_port; /* for point-to-point */ + + mutex_lock(&n_port->lp_mutex); + list_for_each_entry(vn_port, &n_port->vports, list) { + if (vn_port->port_id == port_id) { + lport = vn_port; + break; + } + } + mutex_unlock(&n_port->lp_mutex); + + return lport; +} +EXPORT_SYMBOL(fc_vport_id_lookup); + +/* + * When setting the link state of vports during an lport state change, it's + * necessary to hold the lp_mutex of both the N_Port and the VN_Port. + * This tells the lockdep engine to treat the nested locking of the VN_Port + * as a different lock class. + */ +enum libfc_lport_mutex_class { + LPORT_MUTEX_NORMAL = 0, + LPORT_MUTEX_VN_PORT = 1, +}; + +/** + * __fc_vport_setlink() - update link and status on a VN_Port + * @n_port: parent N_Port + * @vn_port: VN_Port to update + * + * Locking: must be called with both the N_Port and VN_Port lp_mutex held + */ +static void __fc_vport_setlink(struct fc_lport *n_port, + struct fc_lport *vn_port) +{ + struct fc_vport *vport = vn_port->vport; + + if (vn_port->state == LPORT_ST_DISABLED) + return; + + if (n_port->state == LPORT_ST_READY) { + if (n_port->npiv_enabled) { + fc_vport_set_state(vport, FC_VPORT_INITIALIZING); + __fc_linkup(vn_port); + } else { + fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + __fc_linkdown(vn_port); + } + } else { + fc_vport_set_state(vport, FC_VPORT_LINKDOWN); + __fc_linkdown(vn_port); + } +} + +/** + * fc_vport_setlink() - update link and status on a VN_Port + * @vn_port: virtual port to update + */ +void fc_vport_setlink(struct fc_lport *vn_port) +{ + struct fc_vport *vport = vn_port->vport; + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + + mutex_lock(&n_port->lp_mutex); + mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); + __fc_vport_setlink(n_port, vn_port); + mutex_unlock(&vn_port->lp_mutex); + mutex_unlock(&n_port->lp_mutex); +} +EXPORT_SYMBOL(fc_vport_setlink); + +/** + * fc_vports_linkchange() - change the link state of all vports + * @n_port: Parent N_Port that has changed state + * + * Locking: called with the n_port lp_mutex held + */ +void fc_vports_linkchange(struct fc_lport *n_port) +{ + struct fc_lport *vn_port; + + list_for_each_entry(vn_port, &n_port->vports, list) { + mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); + __fc_vport_setlink(n_port, vn_port); + mutex_unlock(&vn_port->lp_mutex); + } +} + diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c new file mode 100644 index 000000000..33da3c108 --- /dev/null +++ b/drivers/scsi/libfc/fc_rport.c @@ -0,0 +1,2292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * RPORT GENERAL INFO + * + * This file contains all processing regarding fc_rports. It contains the + * rport state machine and does all rport interaction with the transport class. + * There should be no other places in libfc that interact directly with the + * transport class in regards to adding and deleting rports. + * + * fc_rport's represent N_Port's within the fabric. + */ + +/* + * RPORT LOCKING + * + * The rport should never hold the rport mutex and then attempt to acquire + * either the lport or disc mutexes. The rport's mutex is considered lesser + * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for + * more comments on the hierarchy. + * + * The locking strategy is similar to the lport's strategy. The lock protects + * the rport's states and is held and released by the entry points to the rport + * block. All _enter_* functions correspond to rport states and expect the rport + * mutex to be locked before calling them. This means that rports only handle + * one request or response at a time, since they're not critical for the I/O + * path this potential over-use of the mutex is acceptable. + */ + +/* + * RPORT REFERENCE COUNTING + * + * A rport reference should be taken when: + * - an rport is allocated + * - a workqueue item is scheduled + * - an ELS request is send + * The reference should be dropped when: + * - the workqueue function has finished + * - the ELS response is handled + * - an rport is removed + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "fc_encode.h" +#include "fc_libfc.h" + +static struct workqueue_struct *rport_event_queue; + +static void fc_rport_enter_flogi(struct fc_rport_priv *); +static void fc_rport_enter_plogi(struct fc_rport_priv *); +static void fc_rport_enter_prli(struct fc_rport_priv *); +static void fc_rport_enter_rtv(struct fc_rport_priv *); +static void fc_rport_enter_ready(struct fc_rport_priv *); +static void fc_rport_enter_logo(struct fc_rport_priv *); +static void fc_rport_enter_adisc(struct fc_rport_priv *); + +static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *); +static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *); +static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *); +static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *); +static void fc_rport_timeout(struct work_struct *); +static void fc_rport_error(struct fc_rport_priv *, int); +static void fc_rport_error_retry(struct fc_rport_priv *, int); +static void fc_rport_work(struct work_struct *); + +static const char *fc_rport_state_names[] = { + [RPORT_ST_INIT] = "Init", + [RPORT_ST_FLOGI] = "FLOGI", + [RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT", + [RPORT_ST_PLOGI] = "PLOGI", + [RPORT_ST_PRLI] = "PRLI", + [RPORT_ST_RTV] = "RTV", + [RPORT_ST_READY] = "Ready", + [RPORT_ST_ADISC] = "ADISC", + [RPORT_ST_DELETE] = "Delete", +}; + +/** + * fc_rport_lookup() - Lookup a remote port by port_id + * @lport: The local port to lookup the remote port on + * @port_id: The remote port ID to look up + * + * The reference count of the fc_rport_priv structure is + * increased by one. + */ +struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, + u32 port_id) +{ + struct fc_rport_priv *rdata = NULL, *tmp_rdata; + + rcu_read_lock(); + list_for_each_entry_rcu(tmp_rdata, &lport->disc.rports, peers) + if (tmp_rdata->ids.port_id == port_id && + kref_get_unless_zero(&tmp_rdata->kref)) { + rdata = tmp_rdata; + break; + } + rcu_read_unlock(); + return rdata; +} +EXPORT_SYMBOL(fc_rport_lookup); + +/** + * fc_rport_create() - Create a new remote port + * @lport: The local port this remote port will be associated with + * @port_id: The identifiers for the new remote port + * + * The remote port will start in the INIT state. + */ +struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) +{ + struct fc_rport_priv *rdata; + size_t rport_priv_size = sizeof(*rdata); + + lockdep_assert_held(&lport->disc.disc_mutex); + + rdata = fc_rport_lookup(lport, port_id); + if (rdata) { + kref_put(&rdata->kref, fc_rport_destroy); + return rdata; + } + + if (lport->rport_priv_size > 0) + rport_priv_size = lport->rport_priv_size; + rdata = kzalloc(rport_priv_size, GFP_KERNEL); + if (!rdata) + return NULL; + + rdata->ids.node_name = -1; + rdata->ids.port_name = -1; + rdata->ids.port_id = port_id; + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + + kref_init(&rdata->kref); + mutex_init(&rdata->rp_mutex); + rdata->local_port = lport; + rdata->rp_state = RPORT_ST_INIT; + rdata->event = RPORT_EV_NONE; + rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; + rdata->e_d_tov = lport->e_d_tov; + rdata->r_a_tov = lport->r_a_tov; + rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; + INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); + INIT_WORK(&rdata->event_work, fc_rport_work); + if (port_id != FC_FID_DIR_SERV) { + rdata->lld_event_callback = lport->tt.rport_event_callback; + list_add_rcu(&rdata->peers, &lport->disc.rports); + } + return rdata; +} +EXPORT_SYMBOL(fc_rport_create); + +/** + * fc_rport_destroy() - Free a remote port after last reference is released + * @kref: The remote port's kref + */ +void fc_rport_destroy(struct kref *kref) +{ + struct fc_rport_priv *rdata; + + rdata = container_of(kref, struct fc_rport_priv, kref); + kfree_rcu(rdata, rcu); +} +EXPORT_SYMBOL(fc_rport_destroy); + +/** + * fc_rport_state() - Return a string identifying the remote port's state + * @rdata: The remote port + */ +static const char *fc_rport_state(struct fc_rport_priv *rdata) +{ + const char *cp; + + cp = fc_rport_state_names[rdata->rp_state]; + if (!cp) + cp = "Unknown"; + return cp; +} + +/** + * fc_set_rport_loss_tmo() - Set the remote port loss timeout + * @rport: The remote port that gets a new timeout value + * @timeout: The new timeout value (in seconds) + */ +void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} +EXPORT_SYMBOL(fc_set_rport_loss_tmo); + +/** + * fc_plogi_get_maxframe() - Get the maximum payload from the common service + * parameters in a FLOGI frame + * @flp: The FLOGI or PLOGI payload + * @maxval: The maximum frame size upper limit; this may be less than what + * is in the service parameters + */ +static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, + unsigned int maxval) +{ + unsigned int mfs; + + /* + * Get max payload from the common service parameters and the + * class 3 receive data field size. + */ + mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; + if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) + maxval = mfs; + mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs); + if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) + maxval = mfs; + return maxval; +} + +/** + * fc_rport_state_enter() - Change the state of a remote port + * @rdata: The remote port whose state should change + * @new: The new state + */ +static void fc_rport_state_enter(struct fc_rport_priv *rdata, + enum fc_rport_state new) +{ + lockdep_assert_held(&rdata->rp_mutex); + + if (rdata->rp_state != new) + rdata->retries = 0; + rdata->rp_state = new; +} + +/** + * fc_rport_work() - Handler for remote port events in the rport_event_queue + * @work: Handle to the remote port being dequeued + * + * Reference counting: drops kref on return + */ +static void fc_rport_work(struct work_struct *work) +{ + u32 port_id; + struct fc_rport_priv *rdata = + container_of(work, struct fc_rport_priv, event_work); + struct fc_rport_libfc_priv *rpriv; + enum fc_rport_event event; + struct fc_lport *lport = rdata->local_port; + struct fc_rport_operations *rport_ops; + struct fc_rport_identifiers ids; + struct fc_rport *rport; + struct fc4_prov *prov; + u8 type; + + mutex_lock(&rdata->rp_mutex); + event = rdata->event; + rport_ops = rdata->ops; + rport = rdata->rport; + + FC_RPORT_DBG(rdata, "work event %u\n", event); + + switch (event) { + case RPORT_EV_READY: + ids = rdata->ids; + rdata->event = RPORT_EV_NONE; + rdata->major_retries = 0; + kref_get(&rdata->kref); + mutex_unlock(&rdata->rp_mutex); + + if (!rport) { + FC_RPORT_DBG(rdata, "No rport!\n"); + rport = fc_remote_port_add(lport->host, 0, &ids); + } + if (!rport) { + FC_RPORT_DBG(rdata, "Failed to add the rport\n"); + fc_rport_logoff(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + return; + } + mutex_lock(&rdata->rp_mutex); + if (rdata->rport) + FC_RPORT_DBG(rdata, "rport already allocated\n"); + rdata->rport = rport; + rport->maxframe_size = rdata->maxframe_size; + rport->supported_classes = rdata->supported_classes; + + rpriv = rport->dd_data; + rpriv->local_port = lport; + rpriv->rp_state = rdata->rp_state; + rpriv->flags = rdata->flags; + rpriv->e_d_tov = rdata->e_d_tov; + rpriv->r_a_tov = rdata->r_a_tov; + mutex_unlock(&rdata->rp_mutex); + + if (rport_ops && rport_ops->event_callback) { + FC_RPORT_DBG(rdata, "callback ev %d\n", event); + rport_ops->event_callback(lport, rdata, event); + } + if (rdata->lld_event_callback) { + FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); + rdata->lld_event_callback(lport, rdata, event); + } + kref_put(&rdata->kref, fc_rport_destroy); + break; + + case RPORT_EV_FAILED: + case RPORT_EV_LOGO: + case RPORT_EV_STOP: + if (rdata->prli_count) { + mutex_lock(&fc_prov_mutex); + for (type = 1; type < FC_FC4_PROV_SIZE; type++) { + prov = fc_passive_prov[type]; + if (prov && prov->prlo) + prov->prlo(rdata); + } + mutex_unlock(&fc_prov_mutex); + } + port_id = rdata->ids.port_id; + mutex_unlock(&rdata->rp_mutex); + + if (rport_ops && rport_ops->event_callback) { + FC_RPORT_DBG(rdata, "callback ev %d\n", event); + rport_ops->event_callback(lport, rdata, event); + } + if (rdata->lld_event_callback) { + FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); + rdata->lld_event_callback(lport, rdata, event); + } + if (cancel_delayed_work_sync(&rdata->retry_work)) + kref_put(&rdata->kref, fc_rport_destroy); + + /* + * Reset any outstanding exchanges before freeing rport. + */ + lport->tt.exch_mgr_reset(lport, 0, port_id); + lport->tt.exch_mgr_reset(lport, port_id, 0); + + if (rport) { + rpriv = rport->dd_data; + rpriv->rp_state = RPORT_ST_DELETE; + mutex_lock(&rdata->rp_mutex); + rdata->rport = NULL; + mutex_unlock(&rdata->rp_mutex); + fc_remote_port_delete(rport); + } + + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state == RPORT_ST_DELETE) { + if (port_id == FC_FID_DIR_SERV) { + rdata->event = RPORT_EV_NONE; + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + } else if ((rdata->flags & FC_RP_STARTED) && + rdata->major_retries < + lport->max_rport_retry_count) { + rdata->major_retries++; + rdata->event = RPORT_EV_NONE; + FC_RPORT_DBG(rdata, "work restart\n"); + fc_rport_enter_flogi(rdata); + mutex_unlock(&rdata->rp_mutex); + } else { + mutex_unlock(&rdata->rp_mutex); + FC_RPORT_DBG(rdata, "work delete\n"); + mutex_lock(&lport->disc.disc_mutex); + list_del_rcu(&rdata->peers); + mutex_unlock(&lport->disc.disc_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + } + } else { + /* + * Re-open for events. Reissue READY event if ready. + */ + rdata->event = RPORT_EV_NONE; + if (rdata->rp_state == RPORT_ST_READY) { + FC_RPORT_DBG(rdata, "work reopen\n"); + fc_rport_enter_ready(rdata); + } + mutex_unlock(&rdata->rp_mutex); + } + break; + + default: + mutex_unlock(&rdata->rp_mutex); + break; + } + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_rport_login() - Start the remote port login state machine + * @rdata: The remote port to be logged in to + * + * Initiates the RP state machine. It is called from the LP module. + * This function will issue the following commands to the N_Port + * identified by the FC ID provided. + * + * - PLOGI + * - PRLI + * - RTV + * + * Locking Note: Called without the rport lock held. This + * function will hold the rport lock, call an _enter_* + * function and then unlock the rport. + * + * This indicates the intent to be logged into the remote port. + * If it appears we are already logged in, ADISC is used to verify + * the setup. + */ +int fc_rport_login(struct fc_rport_priv *rdata) +{ + mutex_lock(&rdata->rp_mutex); + + if (rdata->flags & FC_RP_STARTED) { + FC_RPORT_DBG(rdata, "port already started\n"); + mutex_unlock(&rdata->rp_mutex); + return 0; + } + + rdata->flags |= FC_RP_STARTED; + switch (rdata->rp_state) { + case RPORT_ST_READY: + FC_RPORT_DBG(rdata, "ADISC port\n"); + fc_rport_enter_adisc(rdata); + break; + case RPORT_ST_DELETE: + FC_RPORT_DBG(rdata, "Restart deleted port\n"); + break; + case RPORT_ST_INIT: + FC_RPORT_DBG(rdata, "Login to port\n"); + fc_rport_enter_flogi(rdata); + break; + default: + FC_RPORT_DBG(rdata, "Login in progress, state %s\n", + fc_rport_state(rdata)); + break; + } + mutex_unlock(&rdata->rp_mutex); + + return 0; +} +EXPORT_SYMBOL(fc_rport_login); + +/** + * fc_rport_enter_delete() - Schedule a remote port to be deleted + * @rdata: The remote port to be deleted + * @event: The event to report as the reason for deletion + * + * Allow state change into DELETE only once. + * + * Call queue_work only if there's no event already pending. + * Set the new event so that the old pending event will not occur. + * Since we have the mutex, even if fc_rport_work() is already started, + * it'll see the new event. + * + * Reference counting: does not modify kref + */ +static void fc_rport_enter_delete(struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + lockdep_assert_held(&rdata->rp_mutex); + + if (rdata->rp_state == RPORT_ST_DELETE) + return; + + FC_RPORT_DBG(rdata, "Delete port\n"); + + fc_rport_state_enter(rdata, RPORT_ST_DELETE); + + if (rdata->event == RPORT_EV_NONE) { + kref_get(&rdata->kref); + if (!queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + } + + rdata->event = event; +} + +/** + * fc_rport_logoff() - Logoff and remove a remote port + * @rdata: The remote port to be logged off of + * + * Locking Note: Called without the rport lock held. This + * function will hold the rport lock, call an _enter_* + * function and then unlock the rport. + */ +int fc_rport_logoff(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + u32 port_id = rdata->ids.port_id; + + mutex_lock(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Remove port\n"); + + rdata->flags &= ~FC_RP_STARTED; + if (rdata->rp_state == RPORT_ST_DELETE) { + FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); + goto out; + } + /* + * FC-LS states: + * To explicitly Logout, the initiating Nx_Port shall terminate + * other open Sequences that it initiated with the destination + * Nx_Port prior to performing Logout. + */ + lport->tt.exch_mgr_reset(lport, 0, port_id); + lport->tt.exch_mgr_reset(lport, port_id, 0); + + fc_rport_enter_logo(rdata); + + /* + * Change the state to Delete so that we discard + * the response. + */ + fc_rport_enter_delete(rdata, RPORT_EV_STOP); +out: + mutex_unlock(&rdata->rp_mutex); + return 0; +} +EXPORT_SYMBOL(fc_rport_logoff); + +/** + * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state + * @rdata: The remote port that is ready + * + * Reference counting: schedules workqueue, does not modify kref + */ +static void fc_rport_enter_ready(struct fc_rport_priv *rdata) +{ + lockdep_assert_held(&rdata->rp_mutex); + + fc_rport_state_enter(rdata, RPORT_ST_READY); + + FC_RPORT_DBG(rdata, "Port is Ready\n"); + + kref_get(&rdata->kref); + if (rdata->event == RPORT_EV_NONE && + !queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + + rdata->event = RPORT_EV_READY; +} + +/** + * fc_rport_timeout() - Handler for the retry_work timer + * @work: Handle to the remote port that has timed out + * + * Locking Note: Called without the rport lock held. This + * function will hold the rport lock, call an _enter_* + * function and then unlock the rport. + * + * Reference counting: Drops kref on return. + */ +static void fc_rport_timeout(struct work_struct *work) +{ + struct fc_rport_priv *rdata = + container_of(work, struct fc_rport_priv, retry_work.work); + + mutex_lock(&rdata->rp_mutex); + FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata)); + + switch (rdata->rp_state) { + case RPORT_ST_FLOGI: + fc_rport_enter_flogi(rdata); + break; + case RPORT_ST_PLOGI: + fc_rport_enter_plogi(rdata); + break; + case RPORT_ST_PRLI: + fc_rport_enter_prli(rdata); + break; + case RPORT_ST_RTV: + fc_rport_enter_rtv(rdata); + break; + case RPORT_ST_ADISC: + fc_rport_enter_adisc(rdata); + break; + case RPORT_ST_PLOGI_WAIT: + case RPORT_ST_READY: + case RPORT_ST_INIT: + case RPORT_ST_DELETE: + break; + } + + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_rport_error() - Error handler, called once retries have been exhausted + * @rdata: The remote port the error is happened on + * @err: The error code + * + * Reference counting: does not modify kref + */ +static void fc_rport_error(struct fc_rport_priv *rdata, int err) +{ + struct fc_lport *lport = rdata->local_port; + + lockdep_assert_held(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n", + -err, fc_rport_state(rdata), rdata->retries); + + switch (rdata->rp_state) { + case RPORT_ST_FLOGI: + rdata->flags &= ~FC_RP_STARTED; + fc_rport_enter_delete(rdata, RPORT_EV_FAILED); + break; + case RPORT_ST_PLOGI: + if (lport->point_to_multipoint) { + rdata->flags &= ~FC_RP_STARTED; + fc_rport_enter_delete(rdata, RPORT_EV_FAILED); + } else + fc_rport_enter_logo(rdata); + break; + case RPORT_ST_RTV: + fc_rport_enter_ready(rdata); + break; + case RPORT_ST_PRLI: + fc_rport_enter_plogi(rdata); + break; + case RPORT_ST_ADISC: + fc_rport_enter_logo(rdata); + break; + case RPORT_ST_PLOGI_WAIT: + case RPORT_ST_DELETE: + case RPORT_ST_READY: + case RPORT_ST_INIT: + break; + } +} + +/** + * fc_rport_error_retry() - Handler for remote port state retries + * @rdata: The remote port whose state is to be retried + * @err: The error code + * + * If the error was an exchange timeout retry immediately, + * otherwise wait for E_D_TOV. + * + * Reference counting: increments kref when scheduling retry_work + */ +static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err) +{ + unsigned long delay = msecs_to_jiffies(rdata->e_d_tov); + + lockdep_assert_held(&rdata->rp_mutex); + + /* make sure this isn't an FC_EX_CLOSED error, never retry those */ + if (err == -FC_EX_CLOSED) + goto out; + + if (rdata->retries < rdata->local_port->max_rport_retry_count) { + FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n", + err, fc_rport_state(rdata)); + rdata->retries++; + /* no additional delay on exchange timeouts */ + if (err == -FC_EX_TIMEOUT) + delay = 0; + kref_get(&rdata->kref); + if (!schedule_delayed_work(&rdata->retry_work, delay)) + kref_put(&rdata->kref, fc_rport_destroy); + return; + } + +out: + fc_rport_error(rdata, err); +} + +/** + * fc_rport_login_complete() - Handle parameters and completion of p-mp login. + * @rdata: The remote port which we logged into or which logged into us. + * @fp: The FLOGI or PLOGI request or response frame + * + * Returns non-zero error if a problem is detected with the frame. + * Does not free the frame. + * + * This is only used in point-to-multipoint mode for FIP currently. + */ +static int fc_rport_login_complete(struct fc_rport_priv *rdata, + struct fc_frame *fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_els_flogi *flogi; + unsigned int e_d_tov; + u16 csp_flags; + + flogi = fc_frame_payload_get(fp, sizeof(*flogi)); + if (!flogi) + return -EINVAL; + + csp_flags = ntohs(flogi->fl_csp.sp_features); + + if (fc_frame_payload_op(fp) == ELS_FLOGI) { + if (csp_flags & FC_SP_FT_FPORT) { + FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n"); + return -EINVAL; + } + } else { + + /* + * E_D_TOV is not valid on an incoming FLOGI request. + */ + e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov); + if (csp_flags & FC_SP_FT_EDTR) + e_d_tov /= 1000000; + if (e_d_tov > rdata->e_d_tov) + rdata->e_d_tov = e_d_tov; + } + rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs); + return 0; +} + +/** + * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode + * @sp: The sequence that the FLOGI was on + * @fp: The FLOGI response frame + * @rp_arg: The remote port that received the FLOGI response + */ +static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rp_arg) +{ + struct fc_rport_priv *rdata = rp_arg; + struct fc_lport *lport = rdata->local_port; + struct fc_els_flogi *flogi; + unsigned int r_a_tov; + u8 opcode; + int err = 0; + + FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", + IS_ERR(fp) ? "error" : fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + + if (rdata->rp_state != RPORT_ST_FLOGI) { + FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state " + "%s\n", fc_rport_state(rdata)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_rport_error(rdata, PTR_ERR(fp)); + goto err; + } + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + struct fc_els_ls_rjt *rjt; + + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n", + rjt->er_reason, rjt->er_explan); + err = -FC_EX_ELS_RJT; + goto bad; + } else if (opcode != ELS_LS_ACC) { + FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode); + err = -FC_EX_ELS_RJT; + goto bad; + } + if (fc_rport_login_complete(rdata, fp)) { + FC_RPORT_DBG(rdata, "FLOGI failed, no login\n"); + err = -FC_EX_INV_LOGIN; + goto bad; + } + + flogi = fc_frame_payload_get(fp, sizeof(*flogi)); + if (!flogi) { + err = -FC_EX_ALLOC_ERR; + goto bad; + } + r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov); + if (r_a_tov > rdata->r_a_tov) + rdata->r_a_tov = r_a_tov; + + if (rdata->ids.port_name < lport->wwpn) + fc_rport_enter_plogi(rdata); + else + fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); +out: + fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); +put: + kref_put(&rdata->kref, fc_rport_destroy); + return; +bad: + FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); + fc_rport_error_retry(rdata, err); + goto out; +} + +/** + * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp + * @rdata: The remote port to send a FLOGI to + * + * Reference counting: increments kref when sending ELS + */ +static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + + lockdep_assert_held(&rdata->rp_mutex); + + if (!lport->point_to_multipoint) + return fc_rport_enter_plogi(rdata); + + FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n", + fc_rport_state(rdata)); + + fc_rport_state_enter(rdata, RPORT_ST_FLOGI); + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); + if (!fp) + return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); + + kref_get(&rdata->kref); + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI, + fc_rport_flogi_resp, rdata, + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } +} + +/** + * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode + * @lport: The local port that received the PLOGI request + * @rx_fp: The PLOGI request frame + * + * Reference counting: drops kref on return + */ +static void fc_rport_recv_flogi_req(struct fc_lport *lport, + struct fc_frame *rx_fp) +{ + struct fc_els_flogi *flp; + struct fc_rport_priv *rdata; + struct fc_frame *fp = rx_fp; + struct fc_seq_els_data rjt_data; + u32 sid; + + sid = fc_frame_sid(fp); + + FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); + + if (!lport->point_to_multipoint) { + rjt_data.reason = ELS_RJT_UNSUP; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } + + flp = fc_frame_payload_get(fp, sizeof(*flp)); + if (!flp) { + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_INV_LEN; + goto reject; + } + + rdata = fc_rport_lookup(lport, sid); + if (!rdata) { + rjt_data.reason = ELS_RJT_FIP; + rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; + goto reject; + } + mutex_lock(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n", + fc_rport_state(rdata)); + + switch (rdata->rp_state) { + case RPORT_ST_INIT: + /* + * If received the FLOGI request on RPORT which is INIT state + * (means not transition to FLOGI either fc_rport timeout + * function didn;t trigger or this end hasn;t received + * beacon yet from other end. In that case only, allow RPORT + * state machine to continue, otherwise fall through which + * causes the code to send reject response. + * NOTE; Not checking for FIP->state such as VNMP_UP or + * VNMP_CLAIM because if FIP state is not one of those, + * RPORT wouldn;t have created and 'rport_lookup' would have + * failed anyway in that case. + */ + break; + case RPORT_ST_DELETE: + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_FIP; + rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; + goto reject_put; + case RPORT_ST_FLOGI: + case RPORT_ST_PLOGI_WAIT: + case RPORT_ST_PLOGI: + break; + case RPORT_ST_PRLI: + case RPORT_ST_RTV: + case RPORT_ST_READY: + case RPORT_ST_ADISC: + /* + * Set the remote port to be deleted and to then restart. + * This queues work to be sure exchanges are reset. + */ + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_BUSY; + rjt_data.explan = ELS_EXPL_NONE; + goto reject_put; + } + if (fc_rport_login_complete(rdata, fp)) { + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_NONE; + goto reject_put; + } + + fp = fc_frame_alloc(lport, sizeof(*flp)); + if (!fp) + goto out; + + fc_flogi_fill(lport, fp); + flp = fc_frame_payload_get(fp, sizeof(*flp)); + flp->fl_cmd = ELS_LS_ACC; + + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); + + /* + * Do not proceed with the state machine if our + * FLOGI has crossed with an FLOGI from the + * remote port; wait for the FLOGI response instead. + */ + if (rdata->rp_state != RPORT_ST_FLOGI) { + if (rdata->ids.port_name < lport->wwpn) + fc_rport_enter_plogi(rdata); + else + fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); + } +out: + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + fc_frame_free(rx_fp); + return; + +reject_put: + kref_put(&rdata->kref, fc_rport_destroy); +reject: + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); + fc_frame_free(rx_fp); +} + +/** + * fc_rport_plogi_resp() - Handler for ELS PLOGI responses + * @sp: The sequence the PLOGI is on + * @fp: The PLOGI response frame + * @rdata_arg: The remote port that sent the PLOGI response + * + * Locking Note: This function will be called without the rport lock + * held, but it will lock, call an _enter_* function or fc_rport_error + * and then unlock the rport. + */ +static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + struct fc_lport *lport = rdata->local_port; + struct fc_els_flogi *plp = NULL; + u16 csp_seq; + u16 cssp_seq; + u8 op; + + FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + + if (rdata->rp_state != RPORT_ST_PLOGI) { + FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " + "%s\n", fc_rport_state(rdata)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_rport_error_retry(rdata, PTR_ERR(fp)); + goto err; + } + + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC && + (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { + rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); + rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); + + /* save plogi response sp_features for further reference */ + rdata->sp_features = ntohs(plp->fl_csp.sp_features); + + if (lport->point_to_multipoint) + fc_rport_login_complete(rdata, fp); + csp_seq = ntohs(plp->fl_csp.sp_tot_seq); + cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq); + if (cssp_seq < csp_seq) + csp_seq = cssp_seq; + rdata->max_seq = csp_seq; + rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs); + fc_rport_enter_prli(rdata); + } else { + struct fc_els_ls_rjt *rjt; + + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + if (!rjt) + FC_RPORT_DBG(rdata, "PLOGI bad response\n"); + else + FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n", + rjt->er_reason, rjt->er_explan); + fc_rport_error_retry(rdata, -FC_EX_ELS_RJT); + } +out: + fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); +put: + kref_put(&rdata->kref, fc_rport_destroy); +} + +static bool +fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata) +{ + if (rdata->ids.roles == FC_PORT_ROLE_UNKNOWN) + return true; + if ((rdata->ids.roles & FC_PORT_ROLE_FCP_TARGET) && + (lport->service_params & FCP_SPPF_INIT_FCN)) + return true; + if ((rdata->ids.roles & FC_PORT_ROLE_FCP_INITIATOR) && + (lport->service_params & FCP_SPPF_TARG_FCN)) + return true; + return false; +} + +/** + * fc_rport_enter_plogi() - Send Port Login (PLOGI) request + * @rdata: The remote port to send a PLOGI to + * + * Reference counting: increments kref when sending ELS + */ +static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + + lockdep_assert_held(&rdata->rp_mutex); + + if (!fc_rport_compatible_roles(lport, rdata)) { + FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n"); + fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); + return; + } + + FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n", + fc_rport_state(rdata)); + + fc_rport_state_enter(rdata, RPORT_ST_PLOGI); + + rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); + if (!fp) { + FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__); + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); + return; + } + rdata->e_d_tov = lport->e_d_tov; + + kref_get(&rdata->kref); + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, + fc_rport_plogi_resp, rdata, + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } +} + +/** + * fc_rport_prli_resp() - Process Login (PRLI) response handler + * @sp: The sequence the PRLI response was on + * @fp: The PRLI response frame + * @rdata_arg: The remote port that sent the PRLI response + * + * Locking Note: This function will be called without the rport lock + * held, but it will lock, call an _enter_* function or fc_rport_error + * and then unlock the rport. + */ +static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + struct { + struct fc_els_prli prli; + struct fc_els_spp spp; + } *pp; + struct fc_els_spp temp_spp; + struct fc_els_ls_rjt *rjt; + struct fc4_prov *prov; + u32 roles = FC_RPORT_ROLE_UNKNOWN; + u32 fcp_parm = 0; + u8 op; + enum fc_els_spp_resp resp_code; + + FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + + if (rdata->rp_state != RPORT_ST_PRLI) { + FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " + "%s\n", fc_rport_state(rdata)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_rport_error_retry(rdata, PTR_ERR(fp)); + goto err; + } + + /* reinitialize remote port roles */ + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC) { + pp = fc_frame_payload_get(fp, sizeof(*pp)); + if (!pp) { + fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR); + goto out; + } + + resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); + FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n", + pp->spp.spp_flags, pp->spp.spp_type); + + rdata->spp_type = pp->spp.spp_type; + if (resp_code != FC_SPP_RESP_ACK) { + if (resp_code == FC_SPP_RESP_CONF) + fc_rport_error(rdata, -FC_EX_SEQ_ERR); + else + fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR); + goto out; + } + if (pp->prli.prli_spp_len < sizeof(pp->spp)) { + fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR); + goto out; + } + + fcp_parm = ntohl(pp->spp.spp_params); + if (fcp_parm & FCP_SPPF_RETRY) + rdata->flags |= FC_RP_FLAGS_RETRY; + if (fcp_parm & FCP_SPPF_CONF_COMPL) + rdata->flags |= FC_RP_FLAGS_CONF_REQ; + + /* + * Call prli provider if we should act as a target + */ + if (rdata->spp_type < FC_FC4_PROV_SIZE) { + prov = fc_passive_prov[rdata->spp_type]; + if (prov) { + memset(&temp_spp, 0, sizeof(temp_spp)); + prov->prli(rdata, pp->prli.prli_spp_len, + &pp->spp, &temp_spp); + } + } + /* + * Check if the image pair could be established + */ + if (rdata->spp_type != FC_TYPE_FCP || + !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) { + /* + * Nope; we can't use this port as a target. + */ + fcp_parm &= ~FCP_SPPF_TARG_FCN; + } + rdata->supported_classes = FC_COS_CLASS3; + if (fcp_parm & FCP_SPPF_INIT_FCN) + roles |= FC_RPORT_ROLE_FCP_INITIATOR; + if (fcp_parm & FCP_SPPF_TARG_FCN) + roles |= FC_RPORT_ROLE_FCP_TARGET; + + rdata->ids.roles = roles; + fc_rport_enter_rtv(rdata); + + } else { + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + if (!rjt) + FC_RPORT_DBG(rdata, "PRLI bad response\n"); + else { + FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", + rjt->er_reason, rjt->er_explan); + if (rjt->er_reason == ELS_RJT_UNAB && + rjt->er_explan == ELS_EXPL_PLOGI_REQD) { + fc_rport_enter_plogi(rdata); + goto out; + } + } + fc_rport_error_retry(rdata, FC_EX_ELS_RJT); + } + +out: + fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); +put: + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_rport_enter_prli() - Send Process Login (PRLI) request + * @rdata: The remote port to send the PRLI request to + * + * Reference counting: increments kref when sending ELS + */ +static void fc_rport_enter_prli(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct { + struct fc_els_prli prli; + struct fc_els_spp spp; + } *pp; + struct fc_frame *fp; + struct fc4_prov *prov; + + lockdep_assert_held(&rdata->rp_mutex); + + /* + * If the rport is one of the well known addresses + * we skip PRLI and RTV and go straight to READY. + */ + if (rdata->ids.port_id >= FC_FID_DOM_MGR) { + fc_rport_enter_ready(rdata); + return; + } + + /* + * And if the local port does not support the initiator function + * there's no need to send a PRLI, either. + */ + if (!(lport->service_params & FCP_SPPF_INIT_FCN)) { + fc_rport_enter_ready(rdata); + return; + } + + FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", + fc_rport_state(rdata)); + + fc_rport_state_enter(rdata, RPORT_ST_PRLI); + + fp = fc_frame_alloc(lport, sizeof(*pp)); + if (!fp) { + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); + return; + } + + fc_prli_fill(lport, fp); + + prov = fc_passive_prov[FC_TYPE_FCP]; + if (prov) { + pp = fc_frame_payload_get(fp, sizeof(*pp)); + prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp); + } + + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id, + fc_host_port_id(lport->host), FC_TYPE_ELS, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + kref_get(&rdata->kref); + if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp, + NULL, rdata, 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } +} + +/** + * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses + * @sp: The sequence the RTV was on + * @fp: The RTV response frame + * @rdata_arg: The remote port that sent the RTV response + * + * Many targets don't seem to support this. + * + * Locking Note: This function will be called without the rport lock + * held, but it will lock, call an _enter_* function or fc_rport_error + * and then unlock the rport. + */ +static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + u8 op; + + FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + + if (rdata->rp_state != RPORT_ST_RTV) { + FC_RPORT_DBG(rdata, "Received a RTV response, but in state " + "%s\n", fc_rport_state(rdata)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_rport_error(rdata, PTR_ERR(fp)); + goto err; + } + + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC) { + struct fc_els_rtv_acc *rtv; + u32 toq; + u32 tov; + + rtv = fc_frame_payload_get(fp, sizeof(*rtv)); + if (rtv) { + toq = ntohl(rtv->rtv_toq); + tov = ntohl(rtv->rtv_r_a_tov); + if (tov == 0) + tov = 1; + if (tov > rdata->r_a_tov) + rdata->r_a_tov = tov; + tov = ntohl(rtv->rtv_e_d_tov); + if (toq & FC_ELS_RTV_EDRES) + tov /= 1000000; + if (tov == 0) + tov = 1; + if (tov > rdata->e_d_tov) + rdata->e_d_tov = tov; + } + } + + fc_rport_enter_ready(rdata); + +out: + fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); +put: + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request + * @rdata: The remote port to send the RTV request to + * + * Reference counting: increments kref when sending ELS + */ +static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) +{ + struct fc_frame *fp; + struct fc_lport *lport = rdata->local_port; + + lockdep_assert_held(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", + fc_rport_state(rdata)); + + fc_rport_state_enter(rdata, RPORT_ST_RTV); + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); + if (!fp) { + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); + return; + } + + kref_get(&rdata->kref); + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, + fc_rport_rtv_resp, rdata, + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } +} + +/** + * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests + * @rdata: The remote port that sent the RTV request + * @in_fp: The RTV request frame + */ +static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata, + struct fc_frame *in_fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct fc_els_rtv_acc *rtv; + struct fc_seq_els_data rjt_data; + + lockdep_assert_held(&rdata->rp_mutex); + lockdep_assert_held(&lport->lp_mutex); + + FC_RPORT_DBG(rdata, "Received RTV request\n"); + + fp = fc_frame_alloc(lport, sizeof(*rtv)); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); + goto drop; + } + rtv = fc_frame_payload_get(fp, sizeof(*rtv)); + rtv->rtv_cmd = ELS_LS_ACC; + rtv->rtv_r_a_tov = htonl(lport->r_a_tov); + rtv->rtv_e_d_tov = htonl(lport->e_d_tov); + rtv->rtv_toq = 0; + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); +drop: + fc_frame_free(in_fp); +} + +/** + * fc_rport_logo_resp() - Handler for logout (LOGO) responses + * @sp: The sequence the LOGO was on + * @fp: The LOGO response frame + * @rdata_arg: The remote port + */ +static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + struct fc_lport *lport = rdata->local_port; + + FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did, + "Received a LOGO %s\n", fc_els_resp_type(fp)); + if (!IS_ERR(fp)) + fc_frame_free(fp); + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_rport_enter_logo() - Send a logout (LOGO) request + * @rdata: The remote port to send the LOGO request to + * + * Reference counting: increments kref when sending ELS + */ +static void fc_rport_enter_logo(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + + lockdep_assert_held(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n", + fc_rport_state(rdata)); + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); + if (!fp) + return; + kref_get(&rdata->kref); + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, + fc_rport_logo_resp, rdata, 0)) + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_rport_adisc_resp() - Handler for Address Discovery (ADISC) responses + * @sp: The sequence the ADISC response was on + * @fp: The ADISC response frame + * @rdata_arg: The remote port that sent the ADISC response + * + * Locking Note: This function will be called without the rport lock + * held, but it will lock, call an _enter_* function or fc_rport_error + * and then unlock the rport. + */ +static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + struct fc_els_adisc *adisc; + u8 op; + + FC_RPORT_DBG(rdata, "Received a ADISC response\n"); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + + if (rdata->rp_state != RPORT_ST_ADISC) { + FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n", + fc_rport_state(rdata)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_rport_error(rdata, PTR_ERR(fp)); + goto err; + } + + /* + * If address verification failed. Consider us logged out of the rport. + * Since the rport is still in discovery, we want to be + * logged in, so go to PLOGI state. Otherwise, go back to READY. + */ + op = fc_frame_payload_op(fp); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + if (op != ELS_LS_ACC || !adisc || + ntoh24(adisc->adisc_port_id) != rdata->ids.port_id || + get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name || + get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) { + FC_RPORT_DBG(rdata, "ADISC error or mismatch\n"); + fc_rport_enter_flogi(rdata); + } else { + FC_RPORT_DBG(rdata, "ADISC OK\n"); + fc_rport_enter_ready(rdata); + } +out: + fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); +put: + kref_put(&rdata->kref, fc_rport_destroy); +} + +/** + * fc_rport_enter_adisc() - Send Address Discover (ADISC) request + * @rdata: The remote port to send the ADISC request to + * + * Reference counting: increments kref when sending ELS + */ +static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + + lockdep_assert_held(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "sending ADISC from %s state\n", + fc_rport_state(rdata)); + + fc_rport_state_enter(rdata, RPORT_ST_ADISC); + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc)); + if (!fp) { + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); + return; + } + kref_get(&rdata->kref); + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, + fc_rport_adisc_resp, rdata, + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } +} + +/** + * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests + * @rdata: The remote port that sent the ADISC request + * @in_fp: The ADISC request frame + */ +static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, + struct fc_frame *in_fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct fc_els_adisc *adisc; + struct fc_seq_els_data rjt_data; + + lockdep_assert_held(&rdata->rp_mutex); + lockdep_assert_held(&lport->lp_mutex); + + FC_RPORT_DBG(rdata, "Received ADISC request\n"); + + adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); + if (!adisc) { + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; + fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); + goto drop; + } + + fp = fc_frame_alloc(lport, sizeof(*adisc)); + if (!fp) + goto drop; + fc_adisc_fill(lport, fp); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + adisc->adisc_cmd = ELS_LS_ACC; + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); +drop: + fc_frame_free(in_fp); +} + +/** + * fc_rport_recv_rls_req() - Handle received Read Link Status request + * @rdata: The remote port that sent the RLS request + * @rx_fp: The PRLI request frame + */ +static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, + struct fc_frame *rx_fp) + +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct fc_els_rls *rls; + struct fc_els_rls_resp *rsp; + struct fc_els_lesb *lesb; + struct fc_seq_els_data rjt_data; + struct fc_host_statistics *hst; + + lockdep_assert_held(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", + fc_rport_state(rdata)); + + rls = fc_frame_payload_get(rx_fp, sizeof(*rls)); + if (!rls) { + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; + goto out_rjt; + } + + fp = fc_frame_alloc(lport, sizeof(*rsp)); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto out_rjt; + } + + rsp = fc_frame_payload_get(fp, sizeof(*rsp)); + memset(rsp, 0, sizeof(*rsp)); + rsp->rls_cmd = ELS_LS_ACC; + lesb = &rsp->rls_lesb; + if (lport->tt.get_lesb) { + /* get LESB from LLD if it supports it */ + lport->tt.get_lesb(lport, lesb); + } else { + fc_get_host_stats(lport->host); + hst = &lport->host_stats; + lesb->lesb_link_fail = htonl(hst->link_failure_count); + lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count); + lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count); + lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count); + lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count); + lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); + } + + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); + goto out; + +out_rjt: + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); +out: + fc_frame_free(rx_fp); +} + +/** + * fc_rport_recv_els_req() - Handler for validated ELS requests + * @lport: The local port that received the ELS request + * @fp: The ELS request frame + * + * Handle incoming ELS requests that require port login. + * The ELS opcode has already been validated by the caller. + * + * Reference counting: does not modify kref + */ +static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_rport_priv *rdata; + struct fc_seq_els_data els_data; + + lockdep_assert_held(&lport->lp_mutex); + + rdata = fc_rport_lookup(lport, fc_frame_sid(fp)); + if (!rdata) { + FC_RPORT_ID_DBG(lport, fc_frame_sid(fp), + "Received ELS 0x%02x from non-logged-in port\n", + fc_frame_payload_op(fp)); + goto reject; + } + + mutex_lock(&rdata->rp_mutex); + + switch (rdata->rp_state) { + case RPORT_ST_PRLI: + case RPORT_ST_RTV: + case RPORT_ST_READY: + case RPORT_ST_ADISC: + break; + case RPORT_ST_PLOGI: + if (fc_frame_payload_op(fp) == ELS_PRLI) { + FC_RPORT_DBG(rdata, "Reject ELS PRLI " + "while in state %s\n", + fc_rport_state(rdata)); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + goto busy; + } + fallthrough; + default: + FC_RPORT_DBG(rdata, + "Reject ELS 0x%02x while in state %s\n", + fc_frame_payload_op(fp), fc_rport_state(rdata)); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + goto reject; + } + + switch (fc_frame_payload_op(fp)) { + case ELS_PRLI: + fc_rport_recv_prli_req(rdata, fp); + break; + case ELS_PRLO: + fc_rport_recv_prlo_req(rdata, fp); + break; + case ELS_ADISC: + fc_rport_recv_adisc_req(rdata, fp); + break; + case ELS_RRQ: + fc_seq_els_rsp_send(fp, ELS_RRQ, NULL); + fc_frame_free(fp); + break; + case ELS_REC: + fc_seq_els_rsp_send(fp, ELS_REC, NULL); + fc_frame_free(fp); + break; + case ELS_RLS: + fc_rport_recv_rls_req(rdata, fp); + break; + case ELS_RTV: + fc_rport_recv_rtv_req(rdata, fp); + break; + default: + fc_frame_free(fp); /* can't happen */ + break; + } + + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + return; + +reject: + els_data.reason = ELS_RJT_UNAB; + els_data.explan = ELS_EXPL_PLOGI_REQD; + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); + return; + +busy: + els_data.reason = ELS_RJT_BUSY; + els_data.explan = ELS_EXPL_NONE; + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); + return; +} + +/** + * fc_rport_recv_req() - Handler for requests + * @lport: The local port that received the request + * @fp: The request frame + * + * Reference counting: does not modify kref + */ +void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_seq_els_data els_data; + + lockdep_assert_held(&lport->lp_mutex); + + /* + * Handle FLOGI, PLOGI and LOGO requests separately, since they + * don't require prior login. + * Check for unsupported opcodes first and reject them. + * For some ops, it would be incorrect to reject with "PLOGI required". + */ + switch (fc_frame_payload_op(fp)) { + case ELS_FLOGI: + fc_rport_recv_flogi_req(lport, fp); + break; + case ELS_PLOGI: + fc_rport_recv_plogi_req(lport, fp); + break; + case ELS_LOGO: + fc_rport_recv_logo_req(lport, fp); + break; + case ELS_PRLI: + case ELS_PRLO: + case ELS_ADISC: + case ELS_RRQ: + case ELS_REC: + case ELS_RLS: + case ELS_RTV: + fc_rport_recv_els_req(lport, fp); + break; + default: + els_data.reason = ELS_RJT_UNSUP; + els_data.explan = ELS_EXPL_NONE; + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); + break; + } +} +EXPORT_SYMBOL(fc_rport_recv_req); + +/** + * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests + * @lport: The local port that received the PLOGI request + * @rx_fp: The PLOGI request frame + * + * Reference counting: increments kref on return + */ +static void fc_rport_recv_plogi_req(struct fc_lport *lport, + struct fc_frame *rx_fp) +{ + struct fc_disc *disc; + struct fc_rport_priv *rdata; + struct fc_frame *fp = rx_fp; + struct fc_els_flogi *pl; + struct fc_seq_els_data rjt_data; + u32 sid; + + lockdep_assert_held(&lport->lp_mutex); + + sid = fc_frame_sid(fp); + + FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); + + pl = fc_frame_payload_get(fp, sizeof(*pl)); + if (!pl) { + FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n"); + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; + goto reject; + } + + disc = &lport->disc; + mutex_lock(&disc->disc_mutex); + rdata = fc_rport_create(lport, sid); + if (!rdata) { + mutex_unlock(&disc->disc_mutex); + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto reject; + } + + mutex_lock(&rdata->rp_mutex); + mutex_unlock(&disc->disc_mutex); + + rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn); + rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn); + + /* + * If the rport was just created, possibly due to the incoming PLOGI, + * set the state appropriately and accept the PLOGI. + * + * If we had also sent a PLOGI, and if the received PLOGI is from a + * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason + * "command already in progress". + * + * XXX TBD: If the session was ready before, the PLOGI should result in + * all outstanding exchanges being reset. + */ + switch (rdata->rp_state) { + case RPORT_ST_INIT: + FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n"); + break; + case RPORT_ST_PLOGI_WAIT: + FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n"); + break; + case RPORT_ST_PLOGI: + FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n"); + if (rdata->ids.port_name < lport->wwpn) { + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_INPROG; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } + break; + case RPORT_ST_PRLI: + case RPORT_ST_RTV: + case RPORT_ST_READY: + case RPORT_ST_ADISC: + FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " + "- ignored for now\n", rdata->rp_state); + /* XXX TBD - should reset */ + break; + case RPORT_ST_FLOGI: + case RPORT_ST_DELETE: + FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", + fc_rport_state(rdata)); + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_BUSY; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } + if (!fc_rport_compatible_roles(lport, rdata)) { + FC_RPORT_DBG(rdata, "Received PLOGI for incompatible role\n"); + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } + + /* + * Get session payload size from incoming PLOGI. + */ + rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); + + /* + * Send LS_ACC. If this fails, the originator should retry. + */ + fp = fc_frame_alloc(lport, sizeof(*pl)); + if (!fp) + goto out; + + fc_plogi_fill(lport, fp, ELS_LS_ACC); + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); + fc_rport_enter_prli(rdata); +out: + mutex_unlock(&rdata->rp_mutex); + fc_frame_free(rx_fp); + return; + +reject: + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); + fc_frame_free(fp); +} + +/** + * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests + * @rdata: The remote port that sent the PRLI request + * @rx_fp: The PRLI request frame + */ +static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, + struct fc_frame *rx_fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct { + struct fc_els_prli prli; + struct fc_els_spp spp; + } *pp; + struct fc_els_spp *rspp; /* request service param page */ + struct fc_els_spp *spp; /* response spp */ + unsigned int len; + unsigned int plen; + enum fc_els_spp_resp resp; + struct fc_seq_els_data rjt_data; + struct fc4_prov *prov; + + lockdep_assert_held(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", + fc_rport_state(rdata)); + + len = fr_len(rx_fp) - sizeof(struct fc_frame_header); + pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); + if (!pp) + goto reject_len; + plen = ntohs(pp->prli.prli_len); + if ((plen % 4) != 0 || plen > len || plen < 16) + goto reject_len; + if (plen < len) + len = plen; + plen = pp->prli.prli_spp_len; + if ((plen % 4) != 0 || plen < sizeof(*spp) || + plen > len || len < sizeof(*pp) || plen < 12) + goto reject_len; + rspp = &pp->spp; + + fp = fc_frame_alloc(lport, len); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto reject; + } + pp = fc_frame_payload_get(fp, len); + WARN_ON(!pp); + memset(pp, 0, len); + pp->prli.prli_cmd = ELS_LS_ACC; + pp->prli.prli_spp_len = plen; + pp->prli.prli_len = htons(len); + len -= sizeof(struct fc_els_prli); + + /* + * Go through all the service parameter pages and build + * response. If plen indicates longer SPP than standard, + * use that. The entire response has been pre-cleared above. + */ + spp = &pp->spp; + mutex_lock(&fc_prov_mutex); + while (len >= plen) { + rdata->spp_type = rspp->spp_type; + spp->spp_type = rspp->spp_type; + spp->spp_type_ext = rspp->spp_type_ext; + resp = 0; + + if (rspp->spp_type < FC_FC4_PROV_SIZE) { + enum fc_els_spp_resp active = 0, passive = 0; + + prov = fc_active_prov[rspp->spp_type]; + if (prov) + active = prov->prli(rdata, plen, rspp, spp); + prov = fc_passive_prov[rspp->spp_type]; + if (prov) + passive = prov->prli(rdata, plen, rspp, spp); + if (!active || passive == FC_SPP_RESP_ACK) + resp = passive; + else + resp = active; + FC_RPORT_DBG(rdata, "PRLI rspp type %x " + "active %x passive %x\n", + rspp->spp_type, active, passive); + } + if (!resp) { + if (spp->spp_flags & FC_SPP_EST_IMG_PAIR) + resp |= FC_SPP_RESP_CONF; + else + resp |= FC_SPP_RESP_INVL; + } + spp->spp_flags |= resp; + len -= plen; + rspp = (struct fc_els_spp *)((char *)rspp + plen); + spp = (struct fc_els_spp *)((char *)spp + plen); + } + mutex_unlock(&fc_prov_mutex); + + /* + * Send LS_ACC. If this fails, the originator should retry. + */ + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); + + goto drop; + +reject_len: + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; +reject: + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); +drop: + fc_frame_free(rx_fp); +} + +/** + * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests + * @rdata: The remote port that sent the PRLO request + * @rx_fp: The PRLO request frame + */ +static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, + struct fc_frame *rx_fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct { + struct fc_els_prlo prlo; + struct fc_els_spp spp; + } *pp; + struct fc_els_spp *rspp; /* request service param page */ + struct fc_els_spp *spp; /* response spp */ + unsigned int len; + unsigned int plen; + struct fc_seq_els_data rjt_data; + + lockdep_assert_held(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", + fc_rport_state(rdata)); + + len = fr_len(rx_fp) - sizeof(struct fc_frame_header); + pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); + if (!pp) + goto reject_len; + plen = ntohs(pp->prlo.prlo_len); + if (plen != 20) + goto reject_len; + if (plen < len) + len = plen; + + rspp = &pp->spp; + + fp = fc_frame_alloc(lport, len); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto reject; + } + + pp = fc_frame_payload_get(fp, len); + WARN_ON(!pp); + memset(pp, 0, len); + pp->prlo.prlo_cmd = ELS_LS_ACC; + pp->prlo.prlo_obs = 0x10; + pp->prlo.prlo_len = htons(len); + spp = &pp->spp; + spp->spp_type = rspp->spp_type; + spp->spp_type_ext = rspp->spp_type_ext; + spp->spp_flags = FC_SPP_RESP_ACK; + + fc_rport_enter_prli(rdata); + + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); + goto drop; + +reject_len: + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; +reject: + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); +drop: + fc_frame_free(rx_fp); +} + +/** + * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests + * @lport: The local port that received the LOGO request + * @fp: The LOGO request frame + * + * Reference counting: drops kref on return + */ +static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_rport_priv *rdata; + u32 sid; + + lockdep_assert_held(&lport->lp_mutex); + + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + + sid = fc_frame_sid(fp); + + rdata = fc_rport_lookup(lport, sid); + if (rdata) { + mutex_lock(&rdata->rp_mutex); + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", + fc_rport_state(rdata)); + + fc_rport_enter_delete(rdata, RPORT_EV_STOP); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + } else + FC_RPORT_ID_DBG(lport, sid, + "Received LOGO from non-logged-in port\n"); + fc_frame_free(fp); +} + +/** + * fc_rport_flush_queue() - Flush the rport_event_queue + */ +void fc_rport_flush_queue(void) +{ + flush_workqueue(rport_event_queue); +} +EXPORT_SYMBOL(fc_rport_flush_queue); + +/** + * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator. + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page + * @spp: response service parameter page + * + * Returns the value for the response code to be placed in spp_flags; + * Returns 0 if not an initiator. + */ +static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, + struct fc_els_spp *spp) +{ + struct fc_lport *lport = rdata->local_port; + u32 fcp_parm; + + fcp_parm = ntohl(rspp->spp_params); + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + if (fcp_parm & FCP_SPPF_INIT_FCN) + rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; + if (fcp_parm & FCP_SPPF_TARG_FCN) + rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + if (fcp_parm & FCP_SPPF_RETRY) + rdata->flags |= FC_RP_FLAGS_RETRY; + rdata->supported_classes = FC_COS_CLASS3; + + if (!(lport->service_params & FCP_SPPF_INIT_FCN)) + return 0; + + spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; + + /* + * OR in our service parameters with other providers (target), if any. + */ + fcp_parm = ntohl(spp->spp_params); + spp->spp_params = htonl(fcp_parm | lport->service_params); + return FC_SPP_RESP_ACK; +} + +/* + * FC-4 provider ops for FCP initiator. + */ +struct fc4_prov fc_rport_fcp_init = { + .prli = fc_rport_fcp_prli, +}; + +/** + * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0 + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page + * @spp: response service parameter page + */ +static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, + struct fc_els_spp *spp) +{ + if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) + return FC_SPP_RESP_INVL; + return FC_SPP_RESP_ACK; +} + +/* + * FC-4 provider ops for type 0 service parameters. + * + * This handles the special case of type 0 which is always successful + * but doesn't do anything otherwise. + */ +struct fc4_prov fc_rport_t0_prov = { + .prli = fc_rport_t0_prli, +}; + +/** + * fc_setup_rport() - Initialize the rport_event_queue + */ +int fc_setup_rport(void) +{ + rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); + if (!rport_event_queue) + return -ENOMEM; + return 0; +} + +/** + * fc_destroy_rport() - Destroy the rport_event_queue + */ +void fc_destroy_rport(void) +{ + destroy_workqueue(rport_event_queue); +} + +/** + * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port + * @rport: The remote port whose I/O should be terminated + */ +void fc_rport_terminate_io(struct fc_rport *rport) +{ + struct fc_rport_libfc_priv *rpriv = rport->dd_data; + struct fc_lport *lport = rpriv->local_port; + + lport->tt.exch_mgr_reset(lport, 0, rport->port_id); + lport->tt.exch_mgr_reset(lport, rport->port_id, 0); +} +EXPORT_SYMBOL(fc_rport_terminate_io); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c new file mode 100644 index 000000000..0fda8905e --- /dev/null +++ b/drivers/scsi/libiscsi.c @@ -0,0 +1,3934 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * iSCSI lib functions + * + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004 - 2006 Mike Christie + * Copyright (C) 2004 - 2005 Dmitry Yusupov + * Copyright (C) 2004 - 2005 Alex Aizman + * maintained by open-iscsi@googlegroups.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int iscsi_dbg_lib_conn; +module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_libiscsi_conn, + "Turn on debugging for connections in libiscsi module. " + "Set to 1 to turn on, and zero to turn off. Default is off."); + +static int iscsi_dbg_lib_session; +module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_libiscsi_session, + "Turn on debugging for sessions in libiscsi module. " + "Set to 1 to turn on, and zero to turn off. Default is off."); + +static int iscsi_dbg_lib_eh; +module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_libiscsi_eh, + "Turn on debugging for error handling in libiscsi module. " + "Set to 1 to turn on, and zero to turn off. Default is off."); + +#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ + do { \ + if (iscsi_dbg_lib_conn) \ + iscsi_conn_printk(KERN_INFO, _conn, \ + "%s " dbg_fmt, \ + __func__, ##arg); \ + iscsi_dbg_trace(trace_iscsi_dbg_conn, \ + &(_conn)->cls_conn->dev, \ + "%s " dbg_fmt, __func__, ##arg);\ + } while (0); + +#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ + do { \ + if (iscsi_dbg_lib_session) \ + iscsi_session_printk(KERN_INFO, _session, \ + "%s " dbg_fmt, \ + __func__, ##arg); \ + iscsi_dbg_trace(trace_iscsi_dbg_session, \ + &(_session)->cls_session->dev, \ + "%s " dbg_fmt, __func__, ##arg); \ + } while (0); + +#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ + do { \ + if (iscsi_dbg_lib_eh) \ + iscsi_session_printk(KERN_INFO, _session, \ + "%s " dbg_fmt, \ + __func__, ##arg); \ + iscsi_dbg_trace(trace_iscsi_dbg_eh, \ + &(_session)->cls_session->dev, \ + "%s " dbg_fmt, __func__, ##arg); \ + } while (0); + +#define ISCSI_CMD_COMPL_WAIT 5 + +inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn) +{ + struct Scsi_Host *shost = conn->session->host; + struct iscsi_host *ihost = shost_priv(shost); + + if (ihost->workq) + queue_work(ihost->workq, &conn->xmitwork); +} +EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit); + +inline void iscsi_conn_queue_recv(struct iscsi_conn *conn) +{ + struct Scsi_Host *shost = conn->session->host; + struct iscsi_host *ihost = shost_priv(shost); + + if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags)) + queue_work(ihost->workq, &conn->recvwork); +} +EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv); + +static void __iscsi_update_cmdsn(struct iscsi_session *session, + uint32_t exp_cmdsn, uint32_t max_cmdsn) +{ + /* + * standard specifies this check for when to update expected and + * max sequence numbers + */ + if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1)) + return; + + if (exp_cmdsn != session->exp_cmdsn && + !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn)) + session->exp_cmdsn = exp_cmdsn; + + if (max_cmdsn != session->max_cmdsn && + !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) + session->max_cmdsn = max_cmdsn; +} + +void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) +{ + __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn), + be32_to_cpu(hdr->max_cmdsn)); +} +EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); + +/** + * iscsi_prep_data_out_pdu - initialize Data-Out + * @task: scsi command task + * @r2t: R2T info + * @hdr: iscsi data in pdu + * + * Notes: + * Initialize Data-Out within this R2T sequence and finds + * proper data_offset within this SCSI command. + * + * This function is called with connection lock taken. + **/ +void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t, + struct iscsi_data *hdr) +{ + struct iscsi_conn *conn = task->conn; + unsigned int left = r2t->data_length - r2t->sent; + + task->hdr_len = sizeof(struct iscsi_data); + + memset(hdr, 0, sizeof(struct iscsi_data)); + hdr->ttt = r2t->ttt; + hdr->datasn = cpu_to_be32(r2t->datasn); + r2t->datasn++; + hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; + hdr->lun = task->lun; + hdr->itt = task->hdr_itt; + hdr->exp_statsn = r2t->exp_statsn; + hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent); + if (left > conn->max_xmit_dlength) { + hton24(hdr->dlength, conn->max_xmit_dlength); + r2t->data_count = conn->max_xmit_dlength; + hdr->flags = 0; + } else { + hton24(hdr->dlength, left); + r2t->data_count = left; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + } + conn->dataout_pdus_cnt++; +} +EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu); + +static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) +{ + unsigned exp_len = task->hdr_len + len; + + if (exp_len > task->hdr_max) { + WARN_ON(1); + return -EINVAL; + } + + WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ + task->hdr_len = exp_len; + return 0; +} + +/* + * make an extended cdb AHS + */ +static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) +{ + struct scsi_cmnd *cmd = task->sc; + unsigned rlen, pad_len; + unsigned short ahslength; + struct iscsi_ecdb_ahdr *ecdb_ahdr; + int rc; + + ecdb_ahdr = iscsi_next_hdr(task); + rlen = cmd->cmd_len - ISCSI_CDB_SIZE; + + BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); + ahslength = rlen + sizeof(ecdb_ahdr->reserved); + + pad_len = iscsi_padding(rlen); + + rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + + sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); + if (rc) + return rc; + + if (pad_len) + memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len); + + ecdb_ahdr->ahslength = cpu_to_be16(ahslength); + ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB; + ecdb_ahdr->reserved = 0; + memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); + + ISCSI_DBG_SESSION(task->conn->session, + "iscsi_prep_ecdb_ahs: varlen_cdb_len %d " + "rlen %d pad_len %d ahs_length %d iscsi_headers_size " + "%u\n", cmd->cmd_len, rlen, pad_len, ahslength, + task->hdr_len); + return 0; +} + +/** + * iscsi_check_tmf_restrictions - check if a task is affected by TMF + * @task: iscsi task + * @opcode: opcode to check for + * + * During TMF a task has to be checked if it's affected. + * All unrelated I/O can be passed through, but I/O to the + * affected LUN should be restricted. + * If 'fast_abort' is set we won't be sending any I/O to the + * affected LUN. + * Otherwise the target is waiting for all TTTs to be completed, + * so we have to send all outstanding Data-Out PDUs to the target. + */ +static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) +{ + struct iscsi_session *session = task->conn->session; + struct iscsi_tm *tmf = &session->tmhdr; + u64 hdr_lun; + + if (session->tmf_state == TMF_INITIAL) + return 0; + + if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC) + return 0; + + switch (ISCSI_TM_FUNC_VALUE(tmf)) { + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + /* + * Allow PDUs for unrelated LUNs + */ + hdr_lun = scsilun_to_int(&tmf->lun); + if (hdr_lun != task->sc->device->lun) + return 0; + fallthrough; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + /* + * Fail all SCSI cmd PDUs + */ + if (opcode != ISCSI_OP_SCSI_DATA_OUT) { + iscsi_session_printk(KERN_INFO, session, + "task [op %x itt 0x%x/0x%x] rejected.\n", + opcode, task->itt, task->hdr_itt); + return -EACCES; + } + /* + * And also all data-out PDUs in response to R2T + * if fast_abort is set. + */ + if (session->fast_abort) { + iscsi_session_printk(KERN_INFO, session, + "task [op %x itt 0x%x/0x%x] fast abort.\n", + opcode, task->itt, task->hdr_itt); + return -EACCES; + } + break; + case ISCSI_TM_FUNC_ABORT_TASK: + /* + * the caller has already checked if the task + * they want to abort was in the pending queue so if + * we are here the cmd pdu has gone out already, and + * we will only hit this for data-outs + */ + if (opcode == ISCSI_OP_SCSI_DATA_OUT && + task->hdr_itt == tmf->rtt) { + ISCSI_DBG_SESSION(session, + "Preventing task %x/%x from sending " + "data-out due to abort task in " + "progress\n", task->itt, + task->hdr_itt); + return -EACCES; + } + break; + } + + return 0; +} + +/** + * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu + * @task: iscsi task + * + * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set + * fields like dlength or final based on how much data it sends + */ +static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct scsi_cmnd *sc = task->sc; + struct iscsi_scsi_req *hdr; + unsigned hdrlength, cmd_len, transfer_length; + itt_t itt; + int rc; + + rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); + if (rc) + return rc; + + if (conn->session->tt->alloc_pdu) { + rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); + if (rc) + return rc; + } + hdr = (struct iscsi_scsi_req *)task->hdr; + itt = hdr->itt; + memset(hdr, 0, sizeof(*hdr)); + + if (session->tt->parse_pdu_itt) + hdr->itt = task->hdr_itt = itt; + else + hdr->itt = task->hdr_itt = build_itt(task->itt, + task->conn->session->age); + task->hdr_len = 0; + rc = iscsi_add_hdr(task, sizeof(*hdr)); + if (rc) + return rc; + hdr->opcode = ISCSI_OP_SCSI_CMD; + hdr->flags = ISCSI_ATTR_SIMPLE; + int_to_scsilun(sc->device->lun, &hdr->lun); + task->lun = hdr->lun; + hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); + cmd_len = sc->cmd_len; + if (cmd_len < ISCSI_CDB_SIZE) + memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); + else if (cmd_len > ISCSI_CDB_SIZE) { + rc = iscsi_prep_ecdb_ahs(task); + if (rc) + return rc; + cmd_len = ISCSI_CDB_SIZE; + } + memcpy(hdr->cdb, sc->cmnd, cmd_len); + + task->imm_count = 0; + if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) + task->protected = true; + + transfer_length = scsi_transfer_length(sc); + hdr->data_length = cpu_to_be32(transfer_length); + if (sc->sc_data_direction == DMA_TO_DEVICE) { + struct iscsi_r2t_info *r2t = &task->unsol_r2t; + + hdr->flags |= ISCSI_FLAG_CMD_WRITE; + /* + * Write counters: + * + * imm_count bytes to be sent right after + * SCSI PDU Header + * + * unsol_count bytes(as Data-Out) to be sent + * without R2T ack right after + * immediate data + * + * r2t data_length bytes to be sent via R2T ack's + * + * pad_count bytes to be sent as zero-padding + */ + memset(r2t, 0, sizeof(*r2t)); + + if (session->imm_data_en) { + if (transfer_length >= session->first_burst) + task->imm_count = min(session->first_burst, + conn->max_xmit_dlength); + else + task->imm_count = min(transfer_length, + conn->max_xmit_dlength); + hton24(hdr->dlength, task->imm_count); + } else + zero_data(hdr->dlength); + + if (!session->initial_r2t_en) { + r2t->data_length = min(session->first_burst, + transfer_length) - + task->imm_count; + r2t->data_offset = task->imm_count; + r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); + r2t->exp_statsn = cpu_to_be32(conn->exp_statsn); + } + + if (!task->unsol_r2t.data_length) + /* No unsolicit Data-Out's */ + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + } else { + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + zero_data(hdr->dlength); + + if (sc->sc_data_direction == DMA_FROM_DEVICE) + hdr->flags |= ISCSI_FLAG_CMD_READ; + } + + /* calculate size of additional header segments (AHSs) */ + hdrlength = task->hdr_len - sizeof(*hdr); + + WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); + hdrlength /= ISCSI_PAD_LEN; + + WARN_ON(hdrlength >= 256); + hdr->hlength = hdrlength & 0xFF; + hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); + + if (session->tt->init_task && session->tt->init_task(task)) + return -EIO; + + task->state = ISCSI_TASK_RUNNING; + session->cmdsn++; + + conn->scsicmd_pdus_cnt++; + ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " + "itt 0x%x len %d cmdsn %d win %d]\n", + sc->sc_data_direction == DMA_TO_DEVICE ? + "write" : "read", conn->id, sc, sc->cmnd[0], + task->itt, transfer_length, + session->cmdsn, + session->max_cmdsn - session->exp_cmdsn + 1); + return 0; +} + +/** + * iscsi_free_task - free a task + * @task: iscsi cmd task + * + * Must be called with session back_lock. + * This function returns the scsi command to scsi-ml or cleans + * up mgmt tasks then returns the task to the pool. + */ +static void iscsi_free_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct scsi_cmnd *sc = task->sc; + int oldstate = task->state; + + ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", + task->itt, task->state, task->sc); + + session->tt->cleanup_task(task); + task->state = ISCSI_TASK_FREE; + task->sc = NULL; + /* + * login task is preallocated so do not free + */ + if (conn->login_task == task) + return; + + kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); + + if (sc) { + /* SCSI eh reuses commands to verify us */ + iscsi_cmd(sc)->task = NULL; + /* + * queue command may call this to free the task, so + * it will decide how to return sc to scsi-ml. + */ + if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ) + scsi_done(sc); + } +} + +bool iscsi_get_task(struct iscsi_task *task) +{ + return refcount_inc_not_zero(&task->refcount); +} +EXPORT_SYMBOL_GPL(iscsi_get_task); + +/** + * __iscsi_put_task - drop the refcount on a task + * @task: iscsi_task to drop the refcount on + * + * The back_lock must be held when calling in case it frees the task. + */ +void __iscsi_put_task(struct iscsi_task *task) +{ + if (refcount_dec_and_test(&task->refcount)) + iscsi_free_task(task); +} +EXPORT_SYMBOL_GPL(__iscsi_put_task); + +void iscsi_put_task(struct iscsi_task *task) +{ + struct iscsi_session *session = task->conn->session; + + if (refcount_dec_and_test(&task->refcount)) { + spin_lock_bh(&session->back_lock); + iscsi_free_task(task); + spin_unlock_bh(&session->back_lock); + } +} +EXPORT_SYMBOL_GPL(iscsi_put_task); + +/** + * iscsi_complete_task - finish a task + * @task: iscsi cmd task + * @state: state to complete task with + * + * Must be called with session back_lock. + */ +static void iscsi_complete_task(struct iscsi_task *task, int state) +{ + struct iscsi_conn *conn = task->conn; + + ISCSI_DBG_SESSION(conn->session, + "complete task itt 0x%x state %d sc %p\n", + task->itt, task->state, task->sc); + if (task->state == ISCSI_TASK_COMPLETED || + task->state == ISCSI_TASK_ABRT_TMF || + task->state == ISCSI_TASK_ABRT_SESS_RECOV || + task->state == ISCSI_TASK_REQUEUE_SCSIQ) + return; + WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); + task->state = state; + + if (READ_ONCE(conn->ping_task) == task) + WRITE_ONCE(conn->ping_task, NULL); + + /* release get from queueing */ + __iscsi_put_task(task); +} + +/** + * iscsi_complete_scsi_task - finish scsi task normally + * @task: iscsi task for scsi cmd + * @exp_cmdsn: expected cmd sn in cpu format + * @max_cmdsn: max cmd sn in cpu format + * + * This is used when drivers do not need or cannot perform + * lower level pdu processing. + * + * Called with session back_lock + */ +void iscsi_complete_scsi_task(struct iscsi_task *task, + uint32_t exp_cmdsn, uint32_t max_cmdsn) +{ + struct iscsi_conn *conn = task->conn; + + ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt); + + conn->last_recv = jiffies; + __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); +} +EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task); + +/* + * Must be called with back and frwd lock + */ +static bool cleanup_queued_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + bool early_complete = false; + + /* + * We might have raced where we handled a R2T early and got a response + * but have not yet taken the task off the requeue list, then a TMF or + * recovery happened and so we can still see it here. + */ + if (task->state == ISCSI_TASK_COMPLETED) + early_complete = true; + + if (!list_empty(&task->running)) { + list_del_init(&task->running); + /* + * If it's on a list but still running this could be cleanup + * from a TMF or session recovery. + */ + if (task->state == ISCSI_TASK_RUNNING || + task->state == ISCSI_TASK_COMPLETED) + __iscsi_put_task(task); + } + + if (conn->session->running_aborted_task == task) { + conn->session->running_aborted_task = NULL; + __iscsi_put_task(task); + } + + if (conn->task == task) { + conn->task = NULL; + __iscsi_put_task(task); + } + + return early_complete; +} + +/* + * session back and frwd lock must be held and if not called for a task that + * is still pending or from the xmit thread, then xmit thread must be suspended + */ +static void __fail_scsi_task(struct iscsi_task *task, int err) +{ + struct iscsi_conn *conn = task->conn; + struct scsi_cmnd *sc; + int state; + + if (cleanup_queued_task(task)) + return; + + if (task->state == ISCSI_TASK_PENDING) { + /* + * cmd never made it to the xmit thread, so we should not count + * the cmd in the sequencing + */ + conn->session->queued_cmdsn--; + /* it was never sent so just complete like normal */ + state = ISCSI_TASK_COMPLETED; + } else if (err == DID_TRANSPORT_DISRUPTED) + state = ISCSI_TASK_ABRT_SESS_RECOV; + else + state = ISCSI_TASK_ABRT_TMF; + + sc = task->sc; + sc->result = err << 16; + scsi_set_resid(sc, scsi_bufflen(sc)); + iscsi_complete_task(task, state); +} + +static void fail_scsi_task(struct iscsi_task *task, int err) +{ + struct iscsi_session *session = task->conn->session; + + spin_lock_bh(&session->back_lock); + __fail_scsi_task(task, err); + spin_unlock_bh(&session->back_lock); +} + +static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, + struct iscsi_task *task) +{ + struct iscsi_session *session = conn->session; + struct iscsi_hdr *hdr = task->hdr; + struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; + uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; + + if (conn->session->state == ISCSI_STATE_LOGGING_OUT) + return -ENOTCONN; + + if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT) + nop->exp_statsn = cpu_to_be32(conn->exp_statsn); + /* + * pre-format CmdSN for outgoing PDU. + */ + nop->cmdsn = cpu_to_be32(session->cmdsn); + if (hdr->itt != RESERVED_ITT) { + /* + * TODO: We always use immediate for normal session pdus. + * If we start to send tmfs or nops as non-immediate then + * we should start checking the cmdsn numbers for mgmt tasks. + * + * During discovery sessions iscsid sends TEXT as non immediate, + * but we always only send one PDU at a time. + */ + if (conn->c_stage == ISCSI_CONN_STARTED && + !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { + session->queued_cmdsn++; + session->cmdsn++; + } + } + + if (session->tt->init_task && session->tt->init_task(task)) + return -EIO; + + if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) + session->state = ISCSI_STATE_LOGGING_OUT; + + task->state = ISCSI_TASK_RUNNING; + ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " + "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, + hdr->itt, task->data_count); + return 0; +} + +/** + * iscsi_alloc_mgmt_task - allocate and setup a mgmt task. + * @conn: iscsi conn that the task will be sent on. + * @hdr: iscsi pdu that will be sent. + * @data: buffer for data segment if needed. + * @data_size: length of data in bytes. + */ +static struct iscsi_task * +iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, uint32_t data_size) +{ + struct iscsi_session *session = conn->session; + uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; + struct iscsi_task *task; + itt_t itt; + + if (session->state == ISCSI_STATE_TERMINATE || + !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags)) + return NULL; + + if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { + /* + * Login and Text are sent serially, in + * request-followed-by-response sequence. + * Same task can be used. Same ITT must be used. + * Note that login_task is preallocated at conn_create(). + */ + if (conn->login_task->state != ISCSI_TASK_FREE) { + iscsi_conn_printk(KERN_ERR, conn, "Login/Text in " + "progress. Cannot start new task.\n"); + return NULL; + } + + if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) { + iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); + return NULL; + } + + task = conn->login_task; + } else { + if (session->state != ISCSI_STATE_LOGGED_IN) + return NULL; + + if (data_size != 0) { + iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode); + return NULL; + } + + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); + BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); + + if (!kfifo_out(&session->cmdpool.queue, + (void*)&task, sizeof(void*))) + return NULL; + } + /* + * released in complete pdu for task we expect a response for, and + * released by the lld when it has transmitted the task for + * pdus we do not expect a response for. + */ + refcount_set(&task->refcount, 1); + task->conn = conn; + task->sc = NULL; + INIT_LIST_HEAD(&task->running); + task->state = ISCSI_TASK_PENDING; + + if (data_size) { + memcpy(task->data, data, data_size); + task->data_count = data_size; + } else + task->data_count = 0; + + if (conn->session->tt->alloc_pdu) { + if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { + iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " + "pdu for mgmt task.\n"); + goto free_task; + } + } + + itt = task->hdr->itt; + task->hdr_len = sizeof(struct iscsi_hdr); + memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); + + if (hdr->itt != RESERVED_ITT) { + if (session->tt->parse_pdu_itt) + task->hdr->itt = itt; + else + task->hdr->itt = build_itt(task->itt, + task->conn->session->age); + } + + return task; + +free_task: + iscsi_put_task(task); + return NULL; +} + +/** + * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task. + * @task: iscsi task to send. + * + * On failure this returns a non-zero error code, and the driver must free + * the task with iscsi_put_task; + */ +static int iscsi_send_mgmt_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct iscsi_host *ihost = shost_priv(conn->session->host); + int rc = 0; + + if (!ihost->workq) { + rc = iscsi_prep_mgmt_task(conn, task); + if (rc) + return rc; + + rc = session->tt->xmit_task(task); + if (rc) + return rc; + } else { + list_add_tail(&task->running, &conn->mgmtqueue); + iscsi_conn_queue_xmit(conn); + } + + return 0; +} + +static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, uint32_t data_size) +{ + struct iscsi_task *task; + int rc; + + task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size); + if (!task) + return -ENOMEM; + + rc = iscsi_send_mgmt_task(task); + if (rc) + iscsi_put_task(task); + return rc; +} + +int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, + char *data, uint32_t data_size) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; + int err = 0; + + spin_lock_bh(&session->frwd_lock); + if (__iscsi_conn_send_pdu(conn, hdr, data, data_size)) + err = -EPERM; + spin_unlock_bh(&session->frwd_lock); + return err; +} +EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); + +/** + * iscsi_scsi_cmd_rsp - SCSI Command Response processing + * @conn: iscsi connection + * @hdr: iscsi header + * @task: scsi command task + * @data: cmd data buffer + * @datalen: len of buffer + * + * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and + * then completes the command and task. called under back_lock + **/ +static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + struct iscsi_task *task, char *data, + int datalen) +{ + struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr; + struct iscsi_session *session = conn->session; + struct scsi_cmnd *sc = task->sc; + + iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); + conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; + + sc->result = (DID_OK << 16) | rhdr->cmd_status; + + if (task->protected) { + sector_t sector; + u8 ascq; + + /** + * Transports that didn't implement check_protection + * callback but still published T10-PI support to scsi-mid + * deserve this BUG_ON. + **/ + BUG_ON(!session->tt->check_protection); + + ascq = session->tt->check_protection(task, §or); + if (ascq) { + scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq); + scsi_set_sense_information(sc->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + sector); + goto out; + } + } + + if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { + sc->result = DID_ERROR << 16; + goto out; + } + + if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) { + uint16_t senselen; + + if (datalen < 2) { +invalid_datalen: + iscsi_conn_printk(KERN_ERR, conn, + "Got CHECK_CONDITION but invalid data " + "buffer size of %d\n", datalen); + sc->result = DID_BAD_TARGET << 16; + goto out; + } + + senselen = get_unaligned_be16(data); + if (datalen < senselen) + goto invalid_datalen; + + memcpy(sc->sense_buffer, data + 2, + min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); + ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n", + min_t(uint16_t, senselen, + SCSI_SENSE_BUFFERSIZE)); + } + + if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | + ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { + sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; + } + + if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | + ISCSI_FLAG_CMD_OVERFLOW)) { + int res_count = be32_to_cpu(rhdr->residual_count); + + if (res_count > 0 && + (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || + res_count <= scsi_bufflen(sc))) + /* write side for bidi or uni-io set_resid */ + scsi_set_resid(sc, res_count); + else + sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; + } +out: + ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", + sc, sc->result, task->itt); + conn->scsirsp_pdus_cnt++; + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); +} + +/** + * iscsi_data_in_rsp - SCSI Data-In Response processing + * @conn: iscsi connection + * @hdr: iscsi pdu + * @task: scsi command task + * + * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received + * then completes the command and task. called under back_lock + **/ +static void +iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + struct iscsi_task *task) +{ + struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr; + struct scsi_cmnd *sc = task->sc; + + if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) + return; + + iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr); + sc->result = (DID_OK << 16) | rhdr->cmd_status; + conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; + if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | + ISCSI_FLAG_DATA_OVERFLOW)) { + int res_count = be32_to_cpu(rhdr->residual_count); + + if (res_count > 0 && + (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || + res_count <= sc->sdb.length)) + scsi_set_resid(sc, res_count); + else + sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; + } + + ISCSI_DBG_SESSION(conn->session, "data in with status done " + "[sc %p res %d itt 0x%x]\n", + sc, sc->result, task->itt); + conn->scsirsp_pdus_cnt++; + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); +} + +static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) +{ + struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; + struct iscsi_session *session = conn->session; + + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + conn->tmfrsp_pdus_cnt++; + + if (session->tmf_state != TMF_QUEUED) + return; + + if (tmf->response == ISCSI_TMF_RSP_COMPLETE) + session->tmf_state = TMF_SUCCESS; + else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) + session->tmf_state = TMF_NOT_FOUND; + else + session->tmf_state = TMF_FAILED; + wake_up(&session->ehwait); +} + +static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) +{ + struct iscsi_nopout hdr; + struct iscsi_task *task; + + if (!rhdr) { + if (READ_ONCE(conn->ping_task)) + return -EINVAL; + } + + memset(&hdr, 0, sizeof(struct iscsi_nopout)); + hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; + hdr.flags = ISCSI_FLAG_CMD_FINAL; + + if (rhdr) { + hdr.lun = rhdr->lun; + hdr.ttt = rhdr->ttt; + hdr.itt = RESERVED_ITT; + } else + hdr.ttt = RESERVED_ITT; + + task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0); + if (!task) + return -ENOMEM; + + if (!rhdr) + WRITE_ONCE(conn->ping_task, task); + + if (iscsi_send_mgmt_task(task)) { + if (!rhdr) + WRITE_ONCE(conn->ping_task, NULL); + iscsi_put_task(task); + + iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); + return -EIO; + } else if (!rhdr) { + /* only track our nops */ + conn->last_ping = jiffies; + } + + return 0; +} + +/** + * iscsi_nop_out_rsp - SCSI NOP Response processing + * @task: scsi command task + * @nop: the nop structure + * @data: where to put the data + * @datalen: length of data + * + * iscsi_nop_out_rsp handles nop response from use or + * from user space. called under back_lock + **/ +static int iscsi_nop_out_rsp(struct iscsi_task *task, + struct iscsi_nopin *nop, char *data, int datalen) +{ + struct iscsi_conn *conn = task->conn; + int rc = 0; + + if (READ_ONCE(conn->ping_task) != task) { + /* + * If this is not in response to one of our + * nops then it must be from userspace. + */ + if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop, + data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; + } else + mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + return rc; +} + +static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, int datalen) +{ + struct iscsi_reject *reject = (struct iscsi_reject *)hdr; + struct iscsi_hdr rejected_pdu; + int opcode, rc = 0; + + conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; + + if (ntoh24(reject->dlength) > datalen || + ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) { + iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected " + "pdu. Invalid data length (pdu dlength " + "%u, datalen %d\n", ntoh24(reject->dlength), + datalen); + return ISCSI_ERR_PROTO; + } + memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); + opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK; + + switch (reject->reason) { + case ISCSI_REASON_DATA_DIGEST_ERROR: + iscsi_conn_printk(KERN_ERR, conn, + "pdu (op 0x%x itt 0x%x) rejected " + "due to DataDigest error.\n", + opcode, rejected_pdu.itt); + break; + case ISCSI_REASON_IMM_CMD_REJECT: + iscsi_conn_printk(KERN_ERR, conn, + "pdu (op 0x%x itt 0x%x) rejected. Too many " + "immediate commands.\n", + opcode, rejected_pdu.itt); + /* + * We only send one TMF at a time so if the target could not + * handle it, then it should get fixed (RFC mandates that + * a target can handle one immediate TMF per conn). + * + * For nops-outs, we could have sent more than one if + * the target is sending us lots of nop-ins + */ + if (opcode != ISCSI_OP_NOOP_OUT) + return 0; + + if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { + /* + * nop-out in response to target's nop-out rejected. + * Just resend. + */ + /* In RX path we are under back lock */ + spin_unlock(&conn->session->back_lock); + spin_lock(&conn->session->frwd_lock); + iscsi_send_nopout(conn, + (struct iscsi_nopin*)&rejected_pdu); + spin_unlock(&conn->session->frwd_lock); + spin_lock(&conn->session->back_lock); + } else { + struct iscsi_task *task; + /* + * Our nop as ping got dropped. We know the target + * and transport are ok so just clean up + */ + task = iscsi_itt_to_task(conn, rejected_pdu.itt); + if (!task) { + iscsi_conn_printk(KERN_ERR, conn, + "Invalid pdu reject. Could " + "not lookup rejected task.\n"); + rc = ISCSI_ERR_BAD_ITT; + } else + rc = iscsi_nop_out_rsp(task, + (struct iscsi_nopin*)&rejected_pdu, + NULL, 0); + } + break; + default: + iscsi_conn_printk(KERN_ERR, conn, + "pdu (op 0x%x itt 0x%x) rejected. Reason " + "code 0x%x\n", rejected_pdu.opcode, + rejected_pdu.itt, reject->reason); + break; + } + return rc; +} + +/** + * iscsi_itt_to_task - look up task by itt + * @conn: iscsi connection + * @itt: itt + * + * This should be used for mgmt tasks like login and nops, or if + * the LDD's itt space does not include the session age. + * + * The session back_lock must be held. + */ +struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) +{ + struct iscsi_session *session = conn->session; + int i; + + if (itt == RESERVED_ITT) + return NULL; + + if (session->tt->parse_pdu_itt) + session->tt->parse_pdu_itt(conn, itt, &i, NULL); + else + i = get_itt(itt); + if (i >= session->cmds_max) + return NULL; + + return session->cmds[i]; +} +EXPORT_SYMBOL_GPL(iscsi_itt_to_task); + +/** + * __iscsi_complete_pdu - complete pdu + * @conn: iscsi conn + * @hdr: iscsi header + * @data: data buffer + * @datalen: len of data buffer + * + * Completes pdu processing by freeing any resources allocated at + * queuecommand or send generic. session back_lock must be held and verify + * itt must have been called. + */ +int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, int datalen) +{ + struct iscsi_session *session = conn->session; + int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; + struct iscsi_task *task; + uint32_t itt; + + conn->last_recv = jiffies; + rc = iscsi_verify_itt(conn, hdr->itt); + if (rc) + return rc; + + if (hdr->itt != RESERVED_ITT) + itt = get_itt(hdr->itt); + else + itt = ~0U; + + ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n", + opcode, conn->id, itt, datalen); + + if (itt == ~0U) { + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + + switch(opcode) { + case ISCSI_OP_NOOP_IN: + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } + + if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) + break; + + /* In RX path we are under back lock */ + spin_unlock(&session->back_lock); + spin_lock(&session->frwd_lock); + iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); + spin_unlock(&session->frwd_lock); + spin_lock(&session->back_lock); + break; + case ISCSI_OP_REJECT: + rc = iscsi_handle_reject(conn, hdr, data, datalen); + break; + case ISCSI_OP_ASYNC_EVENT: + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; + break; + default: + rc = ISCSI_ERR_BAD_OPCODE; + break; + } + goto out; + } + + switch(opcode) { + case ISCSI_OP_SCSI_CMD_RSP: + case ISCSI_OP_SCSI_DATA_IN: + task = iscsi_itt_to_ctask(conn, hdr->itt); + if (!task) + return ISCSI_ERR_BAD_ITT; + task->last_xfer = jiffies; + break; + case ISCSI_OP_R2T: + /* + * LLD handles R2Ts if they need to. + */ + return 0; + case ISCSI_OP_LOGOUT_RSP: + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_TEXT_RSP: + case ISCSI_OP_SCSI_TMFUNC_RSP: + case ISCSI_OP_NOOP_IN: + task = iscsi_itt_to_task(conn, hdr->itt); + if (!task) + return ISCSI_ERR_BAD_ITT; + break; + default: + return ISCSI_ERR_BAD_OPCODE; + } + + switch(opcode) { + case ISCSI_OP_SCSI_CMD_RSP: + iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); + break; + case ISCSI_OP_SCSI_DATA_IN: + iscsi_data_in_rsp(conn, hdr, task); + break; + case ISCSI_OP_LOGOUT_RSP: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + goto recv_pdu; + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_TEXT_RSP: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + /* + * login related PDU's exp_statsn is handled in + * userspace + */ + goto recv_pdu; + case ISCSI_OP_SCSI_TMFUNC_RSP: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } + + iscsi_tmf_rsp(conn, hdr); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + break; + case ISCSI_OP_NOOP_IN: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { + rc = ISCSI_ERR_PROTO; + break; + } + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + + rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr, + data, datalen); + break; + default: + rc = ISCSI_ERR_BAD_OPCODE; + break; + } + +out: + return rc; +recv_pdu: + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + return rc; +} +EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); + +int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, int datalen) +{ + int rc; + + spin_lock(&conn->session->back_lock); + rc = __iscsi_complete_pdu(conn, hdr, data, datalen); + spin_unlock(&conn->session->back_lock); + return rc; +} +EXPORT_SYMBOL_GPL(iscsi_complete_pdu); + +int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) +{ + struct iscsi_session *session = conn->session; + int age = 0, i = 0; + + if (itt == RESERVED_ITT) + return 0; + + if (session->tt->parse_pdu_itt) + session->tt->parse_pdu_itt(conn, itt, &i, &age); + else { + i = get_itt(itt); + age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK; + } + + if (age != session->age) { + iscsi_conn_printk(KERN_ERR, conn, + "received itt %x expected session age (%x)\n", + (__force u32)itt, session->age); + return ISCSI_ERR_BAD_ITT; + } + + if (i >= session->cmds_max) { + iscsi_conn_printk(KERN_ERR, conn, + "received invalid itt index %u (max cmds " + "%u.\n", i, session->cmds_max); + return ISCSI_ERR_BAD_ITT; + } + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_verify_itt); + +/** + * iscsi_itt_to_ctask - look up ctask by itt + * @conn: iscsi connection + * @itt: itt + * + * This should be used for cmd tasks. + * + * The session back_lock must be held. + */ +struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) +{ + struct iscsi_task *task; + + if (iscsi_verify_itt(conn, itt)) + return NULL; + + task = iscsi_itt_to_task(conn, itt); + if (!task || !task->sc) + return NULL; + + if (iscsi_cmd(task->sc)->age != conn->session->age) { + iscsi_session_printk(KERN_ERR, conn->session, + "task's session age %d, expected %d\n", + iscsi_cmd(task->sc)->age, conn->session->age); + return NULL; + } + + return task; +} +EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); + +void iscsi_session_failure(struct iscsi_session *session, + enum iscsi_err err) +{ + struct iscsi_conn *conn; + + spin_lock_bh(&session->frwd_lock); + conn = session->leadconn; + if (session->state == ISCSI_STATE_TERMINATE || !conn) { + spin_unlock_bh(&session->frwd_lock); + return; + } + + iscsi_get_conn(conn->cls_conn); + spin_unlock_bh(&session->frwd_lock); + /* + * if the host is being removed bypass the connection + * recovery initialization because we are going to kill + * the session. + */ + if (err == ISCSI_ERR_INVALID_HOST) + iscsi_conn_error_event(conn->cls_conn, err); + else + iscsi_conn_failure(conn, err); + iscsi_put_conn(conn->cls_conn); +} +EXPORT_SYMBOL_GPL(iscsi_session_failure); + +static bool iscsi_set_conn_failed(struct iscsi_conn *conn) +{ + struct iscsi_session *session = conn->session; + + if (session->state == ISCSI_STATE_FAILED) + return false; + + if (conn->stop_stage == 0) + session->state = ISCSI_STATE_FAILED; + + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); + set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); + return true; +} + +void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) +{ + struct iscsi_session *session = conn->session; + bool needs_evt; + + spin_lock_bh(&session->frwd_lock); + needs_evt = iscsi_set_conn_failed(conn); + spin_unlock_bh(&session->frwd_lock); + + if (needs_evt) + iscsi_conn_error_event(conn->cls_conn, err); +} +EXPORT_SYMBOL_GPL(iscsi_conn_failure); + +static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) +{ + struct iscsi_session *session = conn->session; + + /* + * Check for iSCSI window and take care of CmdSN wrap-around + */ + if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) { + ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn " + "%u MaxCmdSN %u CmdSN %u/%u\n", + session->exp_cmdsn, session->max_cmdsn, + session->cmdsn, session->queued_cmdsn); + return -ENOSPC; + } + return 0; +} + +static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task, + bool was_requeue) +{ + int rc; + + if (!conn->task) { + /* + * Take a ref so we can access it after xmit_task(). + * + * This should never fail because the failure paths will have + * stopped the xmit thread. + */ + if (!iscsi_get_task(task)) { + WARN_ON_ONCE(1); + return 0; + } + } else { + /* Already have a ref from when we failed to send it last call */ + conn->task = NULL; + } + + /* + * If this was a requeue for a R2T we have an extra ref on the task in + * case a bad target sends a cmd rsp before we have handled the task. + */ + if (was_requeue) + iscsi_put_task(task); + + /* + * Do this after dropping the extra ref because if this was a requeue + * it's removed from that list and cleanup_queued_task would miss it. + */ + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { + /* + * Save the task and ref in case we weren't cleaning up this + * task and get woken up again. + */ + conn->task = task; + return -ENODATA; + } + + spin_unlock_bh(&conn->session->frwd_lock); + rc = conn->session->tt->xmit_task(task); + spin_lock_bh(&conn->session->frwd_lock); + if (!rc) { + /* done with this task */ + task->last_xfer = jiffies; + } else { + /* + * get an extra ref that is released next time we access it + * as conn->task above. + */ + iscsi_get_task(task); + conn->task = task; + } + + iscsi_put_task(task); + return rc; +} + +/** + * iscsi_requeue_task - requeue task to run from session workqueue + * @task: task to requeue + * + * Callers must have taken a ref to the task that is going to be requeued. + */ +void iscsi_requeue_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + + /* + * this may be on the requeue list already if the xmit_task callout + * is handling the r2ts while we are adding new ones + */ + spin_lock_bh(&conn->session->frwd_lock); + if (list_empty(&task->running)) { + list_add_tail(&task->running, &conn->requeue); + } else { + /* + * Don't need the extra ref since it's already requeued and + * has a ref. + */ + iscsi_put_task(task); + } + iscsi_conn_queue_xmit(conn); + spin_unlock_bh(&conn->session->frwd_lock); +} +EXPORT_SYMBOL_GPL(iscsi_requeue_task); + +/** + * iscsi_data_xmit - xmit any command into the scheduled connection + * @conn: iscsi connection + * + * Notes: + * The function can return -EAGAIN in which case the caller must + * re-schedule it again later or recover. '0' return code means + * successful xmit. + **/ +static int iscsi_data_xmit(struct iscsi_conn *conn) +{ + struct iscsi_task *task; + int rc = 0; + + spin_lock_bh(&conn->session->frwd_lock); + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { + ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); + spin_unlock_bh(&conn->session->frwd_lock); + return -ENODATA; + } + + if (conn->task) { + rc = iscsi_xmit_task(conn, conn->task, false); + if (rc) + goto done; + } + + /* + * process mgmt pdus like nops before commands since we should + * only have one nop-out as a ping from us and targets should not + * overflow us with nop-ins + */ +check_mgmt: + while (!list_empty(&conn->mgmtqueue)) { + task = list_entry(conn->mgmtqueue.next, struct iscsi_task, + running); + list_del_init(&task->running); + if (iscsi_prep_mgmt_task(conn, task)) { + /* regular RX path uses back_lock */ + spin_lock_bh(&conn->session->back_lock); + __iscsi_put_task(task); + spin_unlock_bh(&conn->session->back_lock); + continue; + } + rc = iscsi_xmit_task(conn, task, false); + if (rc) + goto done; + } + +check_requeue: + while (!list_empty(&conn->requeue)) { + /* + * we always do fastlogout - conn stop code will clean up. + */ + if (conn->session->state == ISCSI_STATE_LOGGING_OUT) + break; + + task = list_entry(conn->requeue.next, struct iscsi_task, + running); + + if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) + break; + + list_del_init(&task->running); + rc = iscsi_xmit_task(conn, task, true); + if (rc) + goto done; + if (!list_empty(&conn->mgmtqueue)) + goto check_mgmt; + } + + /* process pending command queue */ + while (!list_empty(&conn->cmdqueue)) { + task = list_entry(conn->cmdqueue.next, struct iscsi_task, + running); + list_del_init(&task->running); + if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { + fail_scsi_task(task, DID_IMM_RETRY); + continue; + } + rc = iscsi_prep_scsi_cmd_pdu(task); + if (rc) { + if (rc == -ENOMEM || rc == -EACCES) + fail_scsi_task(task, DID_IMM_RETRY); + else + fail_scsi_task(task, DID_ABORT); + continue; + } + rc = iscsi_xmit_task(conn, task, false); + if (rc) + goto done; + /* + * we could continuously get new task requests so + * we need to check the mgmt queue for nops that need to + * be sent to aviod starvation + */ + if (!list_empty(&conn->mgmtqueue)) + goto check_mgmt; + if (!list_empty(&conn->requeue)) + goto check_requeue; + } + + spin_unlock_bh(&conn->session->frwd_lock); + return -ENODATA; + +done: + spin_unlock_bh(&conn->session->frwd_lock); + return rc; +} + +static void iscsi_xmitworker(struct work_struct *work) +{ + struct iscsi_conn *conn = + container_of(work, struct iscsi_conn, xmitwork); + int rc; + /* + * serialize Xmit worker on a per-connection basis. + */ + do { + rc = iscsi_data_xmit(conn); + } while (rc >= 0 || rc == -EAGAIN); +} + +static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, + struct scsi_cmnd *sc) +{ + struct iscsi_task *task; + + if (!kfifo_out(&conn->session->cmdpool.queue, + (void *) &task, sizeof(void *))) + return NULL; + + iscsi_cmd(sc)->age = conn->session->age; + iscsi_cmd(sc)->task = task; + + refcount_set(&task->refcount, 1); + task->state = ISCSI_TASK_PENDING; + task->conn = conn; + task->sc = sc; + task->have_checked_conn = false; + task->last_timeout = jiffies; + task->last_xfer = jiffies; + task->protected = false; + INIT_LIST_HEAD(&task->running); + return task; +} + +enum { + FAILURE_BAD_HOST = 1, + FAILURE_SESSION_FAILED, + FAILURE_SESSION_FREED, + FAILURE_WINDOW_CLOSED, + FAILURE_OOM, + FAILURE_SESSION_TERMINATE, + FAILURE_SESSION_IN_RECOVERY, + FAILURE_SESSION_RECOVERY_TIMEOUT, + FAILURE_SESSION_LOGGING_OUT, + FAILURE_SESSION_NOT_READY, +}; + +int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_host *ihost; + int reason = 0; + struct iscsi_session *session; + struct iscsi_conn *conn; + struct iscsi_task *task = NULL; + + sc->result = 0; + iscsi_cmd(sc)->task = NULL; + + ihost = shost_priv(host); + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + spin_lock_bh(&session->frwd_lock); + + reason = iscsi_session_chkready(cls_session); + if (reason) { + sc->result = reason; + goto fault; + } + + if (session->state != ISCSI_STATE_LOGGED_IN) { + /* + * to handle the race between when we set the recovery state + * and block the session we requeue here (commands could + * be entering our queuecommand while a block is starting + * up because the block code is not locked) + */ + switch (session->state) { + case ISCSI_STATE_FAILED: + /* + * cmds should fail during shutdown, if the session + * state is bad, allowing completion to happen + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + reason = FAILURE_SESSION_FAILED; + sc->result = DID_NO_CONNECT << 16; + break; + } + fallthrough; + case ISCSI_STATE_IN_RECOVERY: + reason = FAILURE_SESSION_IN_RECOVERY; + sc->result = DID_IMM_RETRY << 16; + break; + case ISCSI_STATE_LOGGING_OUT: + reason = FAILURE_SESSION_LOGGING_OUT; + sc->result = DID_IMM_RETRY << 16; + break; + case ISCSI_STATE_RECOVERY_FAILED: + reason = FAILURE_SESSION_RECOVERY_TIMEOUT; + sc->result = DID_TRANSPORT_FAILFAST << 16; + break; + case ISCSI_STATE_TERMINATE: + reason = FAILURE_SESSION_TERMINATE; + sc->result = DID_NO_CONNECT << 16; + break; + default: + reason = FAILURE_SESSION_FREED; + sc->result = DID_NO_CONNECT << 16; + } + goto fault; + } + + conn = session->leadconn; + if (!conn) { + reason = FAILURE_SESSION_FREED; + sc->result = DID_NO_CONNECT << 16; + goto fault; + } + + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { + reason = FAILURE_SESSION_IN_RECOVERY; + sc->result = DID_REQUEUE << 16; + goto fault; + } + + if (iscsi_check_cmdsn_window_closed(conn)) { + reason = FAILURE_WINDOW_CLOSED; + goto reject; + } + + task = iscsi_alloc_task(conn, sc); + if (!task) { + reason = FAILURE_OOM; + goto reject; + } + + if (!ihost->workq) { + reason = iscsi_prep_scsi_cmd_pdu(task); + if (reason) { + if (reason == -ENOMEM || reason == -EACCES) { + reason = FAILURE_OOM; + goto prepd_reject; + } else { + sc->result = DID_ABORT << 16; + goto prepd_fault; + } + } + if (session->tt->xmit_task(task)) { + session->cmdsn--; + reason = FAILURE_SESSION_NOT_READY; + goto prepd_reject; + } + } else { + list_add_tail(&task->running, &conn->cmdqueue); + iscsi_conn_queue_xmit(conn); + } + + session->queued_cmdsn++; + spin_unlock_bh(&session->frwd_lock); + return 0; + +prepd_reject: + spin_lock_bh(&session->back_lock); + iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); + spin_unlock_bh(&session->back_lock); +reject: + spin_unlock_bh(&session->frwd_lock); + ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", + sc->cmnd[0], reason); + return SCSI_MLQUEUE_TARGET_BUSY; + +prepd_fault: + spin_lock_bh(&session->back_lock); + iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); + spin_unlock_bh(&session->back_lock); +fault: + spin_unlock_bh(&session->frwd_lock); + ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", + sc->cmnd[0], reason); + scsi_set_resid(sc, scsi_bufflen(sc)); + scsi_done(sc); + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_queuecommand); + +int iscsi_target_alloc(struct scsi_target *starget) +{ + struct iscsi_cls_session *cls_session = starget_to_session(starget); + struct iscsi_session *session = cls_session->dd_data; + + starget->can_queue = session->scsi_cmds_max; + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_target_alloc); + +static void iscsi_tmf_timedout(struct timer_list *t) +{ + struct iscsi_session *session = from_timer(session, t, tmf_timer); + + spin_lock(&session->frwd_lock); + if (session->tmf_state == TMF_QUEUED) { + session->tmf_state = TMF_TIMEDOUT; + ISCSI_DBG_EH(session, "tmf timedout\n"); + /* unblock eh_abort() */ + wake_up(&session->ehwait); + } + spin_unlock(&session->frwd_lock); +} + +static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, + struct iscsi_tm *hdr, int age, + int timeout) + __must_hold(&session->frwd_lock) +{ + struct iscsi_session *session = conn->session; + + if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) { + spin_unlock_bh(&session->frwd_lock); + iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n"); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + spin_lock_bh(&session->frwd_lock); + return -EPERM; + } + conn->tmfcmd_pdus_cnt++; + session->tmf_timer.expires = timeout * HZ + jiffies; + add_timer(&session->tmf_timer); + ISCSI_DBG_EH(session, "tmf set timeout\n"); + + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + + /* + * block eh thread until: + * + * 1) tmf response + * 2) tmf timeout + * 3) session is terminated or restarted or userspace has + * given up on recovery + */ + wait_event_interruptible(session->ehwait, age != session->age || + session->state != ISCSI_STATE_LOGGED_IN || + session->tmf_state != TMF_QUEUED); + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&session->tmf_timer); + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + /* if the session drops it will clean up the task */ + if (age != session->age || + session->state != ISCSI_STATE_LOGGED_IN) + return -ENOTCONN; + return 0; +} + +/* + * Fail commands. session frwd lock held and xmit thread flushed. + */ +static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error) +{ + struct iscsi_session *session = conn->session; + struct iscsi_task *task; + int i; + +restart_cmd_loop: + spin_lock_bh(&session->back_lock); + for (i = 0; i < session->cmds_max; i++) { + task = session->cmds[i]; + if (!task->sc || task->state == ISCSI_TASK_FREE) + continue; + + if (lun != -1 && lun != task->sc->device->lun) + continue; + /* + * The cmd is completing but if this is called from an eh + * callout path then when we return scsi-ml owns the cmd. Wait + * for the completion path to finish freeing the cmd. + */ + if (!iscsi_get_task(task)) { + spin_unlock_bh(&session->back_lock); + spin_unlock_bh(&session->frwd_lock); + udelay(ISCSI_CMD_COMPL_WAIT); + spin_lock_bh(&session->frwd_lock); + goto restart_cmd_loop; + } + + ISCSI_DBG_SESSION(session, + "failing sc %p itt 0x%x state %d\n", + task->sc, task->itt, task->state); + __fail_scsi_task(task, error); + __iscsi_put_task(task); + } + spin_unlock_bh(&session->back_lock); +} + +/** + * iscsi_suspend_queue - suspend iscsi_queuecommand + * @conn: iscsi conn to stop queueing IO on + * + * This grabs the session frwd_lock to make sure no one is in + * xmit_task/queuecommand, and then sets suspend to prevent + * new commands from being queued. This only needs to be called + * by offload drivers that need to sync a path like ep disconnect + * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi + * will call iscsi_start_tx and iscsi_unblock_session when in FFP. + */ +void iscsi_suspend_queue(struct iscsi_conn *conn) +{ + spin_lock_bh(&conn->session->frwd_lock); + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); + spin_unlock_bh(&conn->session->frwd_lock); +} +EXPORT_SYMBOL_GPL(iscsi_suspend_queue); + +/** + * iscsi_suspend_tx - suspend iscsi_data_xmit + * @conn: iscsi conn to stop processing IO on. + * + * This function sets the suspend bit to prevent iscsi_data_xmit + * from sending new IO, and if work is queued on the xmit thread + * it will wait for it to be completed. + */ +void iscsi_suspend_tx(struct iscsi_conn *conn) +{ + struct Scsi_Host *shost = conn->session->host; + struct iscsi_host *ihost = shost_priv(shost); + + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); + if (ihost->workq) + flush_work(&conn->xmitwork); +} +EXPORT_SYMBOL_GPL(iscsi_suspend_tx); + +static void iscsi_start_tx(struct iscsi_conn *conn) +{ + clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); + iscsi_conn_queue_xmit(conn); +} + +/** + * iscsi_suspend_rx - Prevent recvwork from running again. + * @conn: iscsi conn to stop. + */ +void iscsi_suspend_rx(struct iscsi_conn *conn) +{ + struct Scsi_Host *shost = conn->session->host; + struct iscsi_host *ihost = shost_priv(shost); + + set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); + if (ihost->workq) + flush_work(&conn->recvwork); +} +EXPORT_SYMBOL_GPL(iscsi_suspend_rx); + +/* + * We want to make sure a ping is in flight. It has timed out. + * And we are not busy processing a pdu that is making + * progress but got started before the ping and is taking a while + * to complete so the ping is just stuck behind it in a queue. + */ +static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) +{ + if (READ_ONCE(conn->ping_task) && + time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + + (conn->ping_timeout * HZ), jiffies)) + return 1; + else + return 0; +} + +enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) +{ + enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED; + struct iscsi_task *task = NULL, *running_task; + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + int i; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); + + spin_lock_bh(&session->frwd_lock); + spin_lock(&session->back_lock); + task = iscsi_cmd(sc)->task; + if (!task) { + /* + * Raced with completion. Blk layer has taken ownership + * so let timeout code complete it now. + */ + rc = SCSI_EH_NOT_HANDLED; + spin_unlock(&session->back_lock); + goto done; + } + if (!iscsi_get_task(task)) { + /* + * Racing with the completion path right now, so give it more + * time so that path can complete it like normal. + */ + rc = SCSI_EH_RESET_TIMER; + task = NULL; + spin_unlock(&session->back_lock); + goto done; + } + spin_unlock(&session->back_lock); + + if (session->state != ISCSI_STATE_LOGGED_IN) { + /* + * During shutdown, if session is prematurely disconnected, + * recovery won't happen and there will be hung cmds. Not + * handling cmds would trigger EH, also bad in this case. + * Instead, handle cmd, allow completion to happen and let + * upper layer to deal with the result. + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + sc->result = DID_NO_CONNECT << 16; + ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); + rc = SCSI_EH_NOT_HANDLED; + goto done; + } + /* + * We are probably in the middle of iscsi recovery so let + * that complete and handle the error. + */ + rc = SCSI_EH_RESET_TIMER; + goto done; + } + + conn = session->leadconn; + if (!conn) { + /* In the middle of shuting down */ + rc = SCSI_EH_RESET_TIMER; + goto done; + } + + /* + * If we have sent (at least queued to the network layer) a pdu or + * recvd one for the task since the last timeout ask for + * more time. If on the next timeout we have not made progress + * we can check if it is the task or connection when we send the + * nop as a ping. + */ + if (time_after(task->last_xfer, task->last_timeout)) { + ISCSI_DBG_EH(session, "Command making progress. Asking " + "scsi-ml for more time to complete. " + "Last data xfer at %lu. Last timeout was at " + "%lu\n.", task->last_xfer, task->last_timeout); + task->have_checked_conn = false; + rc = SCSI_EH_RESET_TIMER; + goto done; + } + + if (!conn->recv_timeout && !conn->ping_timeout) + goto done; + /* + * if the ping timedout then we are in the middle of cleaning up + * and can let the iscsi eh handle it + */ + if (iscsi_has_ping_timed_out(conn)) { + rc = SCSI_EH_RESET_TIMER; + goto done; + } + + spin_lock(&session->back_lock); + for (i = 0; i < conn->session->cmds_max; i++) { + running_task = conn->session->cmds[i]; + if (!running_task->sc || running_task == task || + running_task->state != ISCSI_TASK_RUNNING) + continue; + + /* + * Only check if cmds started before this one have made + * progress, or this could never fail + */ + if (time_after(running_task->sc->jiffies_at_alloc, + task->sc->jiffies_at_alloc)) + continue; + + if (time_after(running_task->last_xfer, task->last_timeout)) { + /* + * This task has not made progress, but a task + * started before us has transferred data since + * we started/last-checked. We could be queueing + * too many tasks or the LU is bad. + * + * If the device is bad the cmds ahead of us on + * other devs will complete, and this loop will + * eventually fail starting the scsi eh. + */ + ISCSI_DBG_EH(session, "Command has not made progress " + "but commands ahead of it have. " + "Asking scsi-ml for more time to " + "complete. Our last xfer vs running task " + "last xfer %lu/%lu. Last check %lu.\n", + task->last_xfer, running_task->last_xfer, + task->last_timeout); + spin_unlock(&session->back_lock); + rc = SCSI_EH_RESET_TIMER; + goto done; + } + } + spin_unlock(&session->back_lock); + + /* Assumes nop timeout is shorter than scsi cmd timeout */ + if (task->have_checked_conn) + goto done; + + /* + * Checking the transport already or nop from a cmd timeout still + * running + */ + if (READ_ONCE(conn->ping_task)) { + task->have_checked_conn = true; + rc = SCSI_EH_RESET_TIMER; + goto done; + } + + /* Make sure there is a transport check done */ + iscsi_send_nopout(conn, NULL); + task->have_checked_conn = true; + rc = SCSI_EH_RESET_TIMER; + +done: + spin_unlock_bh(&session->frwd_lock); + + if (task) { + task->last_timeout = jiffies; + iscsi_put_task(task); + } + ISCSI_DBG_EH(session, "return %s\n", rc == SCSI_EH_RESET_TIMER ? + "timer reset" : "shutdown or nh"); + return rc; +} +EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); + +static void iscsi_check_transport_timeouts(struct timer_list *t) +{ + struct iscsi_conn *conn = from_timer(conn, t, transport_timer); + struct iscsi_session *session = conn->session; + unsigned long recv_timeout, next_timeout = 0, last_recv; + + spin_lock(&session->frwd_lock); + if (session->state != ISCSI_STATE_LOGGED_IN) + goto done; + + recv_timeout = conn->recv_timeout; + if (!recv_timeout) + goto done; + + recv_timeout *= HZ; + last_recv = conn->last_recv; + + if (iscsi_has_ping_timed_out(conn)) { + iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " + "expired, recv timeout %d, last rx %lu, " + "last ping %lu, now %lu\n", + conn->ping_timeout, conn->recv_timeout, + last_recv, conn->last_ping, jiffies); + spin_unlock(&session->frwd_lock); + iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT); + return; + } + + if (time_before_eq(last_recv + recv_timeout, jiffies)) { + /* send a ping to try to provoke some traffic */ + ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); + if (iscsi_send_nopout(conn, NULL)) + next_timeout = jiffies + (1 * HZ); + else + next_timeout = conn->last_ping + (conn->ping_timeout * HZ); + } else + next_timeout = last_recv + recv_timeout; + + ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout); + mod_timer(&conn->transport_timer, next_timeout); +done: + spin_unlock(&session->frwd_lock); +} + +/** + * iscsi_conn_unbind - prevent queueing to conn. + * @cls_conn: iscsi conn ep is bound to. + * @is_active: is the conn in use for boot or is this for EH/termination + * + * This must be called by drivers implementing the ep_disconnect callout. + * It disables queueing to the connection from libiscsi in preparation for + * an ep_disconnect call. + */ +void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) +{ + struct iscsi_session *session; + struct iscsi_conn *conn; + + if (!cls_conn) + return; + + conn = cls_conn->dd_data; + session = conn->session; + /* + * Wait for iscsi_eh calls to exit. We don't wait for the tmf to + * complete or timeout. The caller just wants to know what's running + * is everything that needs to be cleaned up, and no cmds will be + * queued. + */ + mutex_lock(&session->eh_mutex); + + iscsi_suspend_queue(conn); + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->frwd_lock); + clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); + + if (!is_active) { + /* + * if logout timed out before userspace could even send a PDU + * the state might still be in ISCSI_STATE_LOGGED_IN and + * allowing new cmds and TMFs. + */ + if (session->state == ISCSI_STATE_LOGGED_IN) + iscsi_set_conn_failed(conn); + } + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); +} +EXPORT_SYMBOL_GPL(iscsi_conn_unbind); + +static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, + struct iscsi_tm *hdr) +{ + memset(hdr, 0, sizeof(*hdr)); + hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; + hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hdr->lun = task->lun; + hdr->rtt = task->hdr_itt; + hdr->refcmdsn = task->cmdsn; +} + +int iscsi_eh_abort(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + struct iscsi_task *task; + struct iscsi_tm *hdr; + int age; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + ISCSI_DBG_EH(session, "aborting sc %p\n", sc); + +completion_check: + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + /* + * if session was ISCSI_STATE_IN_RECOVERY then we may not have + * got the command. + */ + if (!iscsi_cmd(sc)->task) { + ISCSI_DBG_EH(session, "sc never reached iscsi layer or " + "it completed.\n"); + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + return SUCCESS; + } + + /* + * If we are not logged in or we have started a new session + * then let the host reset code handle this + */ + if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || + iscsi_cmd(sc)->age != session->age) { + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + ISCSI_DBG_EH(session, "failing abort due to dropped " + "session.\n"); + return FAILED; + } + + spin_lock(&session->back_lock); + task = iscsi_cmd(sc)->task; + if (!task || !task->sc) { + /* task completed before time out */ + ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); + + spin_unlock(&session->back_lock); + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + return SUCCESS; + } + + if (!iscsi_get_task(task)) { + spin_unlock(&session->back_lock); + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + /* We are just about to call iscsi_free_task so wait for it. */ + udelay(ISCSI_CMD_COMPL_WAIT); + goto completion_check; + } + + ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt); + conn = session->leadconn; + iscsi_get_conn(conn->cls_conn); + conn->eh_abort_cnt++; + age = session->age; + spin_unlock(&session->back_lock); + + if (task->state == ISCSI_TASK_PENDING) { + fail_scsi_task(task, DID_ABORT); + goto success; + } + + /* only have one tmf outstanding at a time */ + if (session->tmf_state != TMF_INITIAL) + goto failed; + session->tmf_state = TMF_QUEUED; + + hdr = &session->tmhdr; + iscsi_prep_abort_task_pdu(task, hdr); + + if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) + goto failed; + + switch (session->tmf_state) { + case TMF_SUCCESS: + spin_unlock_bh(&session->frwd_lock); + /* + * stop tx side incase the target had sent a abort rsp but + * the initiator was still writing out data. + */ + iscsi_suspend_tx(conn); + /* + * we do not stop the recv side because targets have been + * good and have never sent us a successful tmf response + * then sent more data for the cmd. + */ + spin_lock_bh(&session->frwd_lock); + fail_scsi_task(task, DID_ABORT); + session->tmf_state = TMF_INITIAL; + memset(hdr, 0, sizeof(*hdr)); + spin_unlock_bh(&session->frwd_lock); + iscsi_start_tx(conn); + goto success_unlocked; + case TMF_TIMEDOUT: + session->running_aborted_task = task; + spin_unlock_bh(&session->frwd_lock); + iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); + goto failed_unlocked; + case TMF_NOT_FOUND: + if (iscsi_task_is_completed(task)) { + session->tmf_state = TMF_INITIAL; + memset(hdr, 0, sizeof(*hdr)); + /* task completed before tmf abort response */ + ISCSI_DBG_EH(session, "sc completed while abort in " + "progress\n"); + goto success; + } + fallthrough; + default: + session->tmf_state = TMF_INITIAL; + goto failed; + } + +success: + spin_unlock_bh(&session->frwd_lock); +success_unlocked: + ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", + sc, task->itt); + iscsi_put_task(task); + iscsi_put_conn(conn->cls_conn); + mutex_unlock(&session->eh_mutex); + return SUCCESS; + +failed: + spin_unlock_bh(&session->frwd_lock); +failed_unlocked: + ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, + task ? task->itt : 0); + /* + * The driver might be accessing the task so hold the ref. The conn + * stop cleanup will drop the ref after ep_disconnect so we know the + * driver's no longer touching the task. + */ + if (!session->running_aborted_task) + iscsi_put_task(task); + + iscsi_put_conn(conn->cls_conn); + mutex_unlock(&session->eh_mutex); + return FAILED; +} +EXPORT_SYMBOL_GPL(iscsi_eh_abort); + +static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) +{ + memset(hdr, 0, sizeof(*hdr)); + hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; + hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + int_to_scsilun(sc->device->lun, &hdr->lun); + hdr->rtt = RESERVED_ITT; +} + +int iscsi_eh_device_reset(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + struct iscsi_tm *hdr; + int rc = FAILED; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc, + sc->device->lun); + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + /* + * Just check if we are not logged in. We cannot check for + * the phase because the reset could come from a ioctl. + */ + if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) + goto unlock; + conn = session->leadconn; + + /* only have one tmf outstanding at a time */ + if (session->tmf_state != TMF_INITIAL) + goto unlock; + session->tmf_state = TMF_QUEUED; + + hdr = &session->tmhdr; + iscsi_prep_lun_reset_pdu(sc, hdr); + + if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, + session->lu_reset_timeout)) { + rc = FAILED; + goto unlock; + } + + switch (session->tmf_state) { + case TMF_SUCCESS: + break; + case TMF_TIMEDOUT: + spin_unlock_bh(&session->frwd_lock); + iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); + goto done; + default: + session->tmf_state = TMF_INITIAL; + goto unlock; + } + + rc = SUCCESS; + spin_unlock_bh(&session->frwd_lock); + + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->frwd_lock); + memset(hdr, 0, sizeof(*hdr)); + fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); + session->tmf_state = TMF_INITIAL; + spin_unlock_bh(&session->frwd_lock); + + iscsi_start_tx(conn); + goto done; + +unlock: + spin_unlock_bh(&session->frwd_lock); +done: + ISCSI_DBG_EH(session, "dev reset result = %s\n", + rc == SUCCESS ? "SUCCESS" : "FAILED"); + mutex_unlock(&session->eh_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); + +void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + + spin_lock_bh(&session->frwd_lock); + if (session->state != ISCSI_STATE_LOGGED_IN) { + session->state = ISCSI_STATE_RECOVERY_FAILED; + wake_up(&session->ehwait); + } + spin_unlock_bh(&session->frwd_lock); +} +EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); + +/** + * iscsi_eh_session_reset - drop session and attempt relogin + * @sc: scsi command + * + * This function will wait for a relogin, session termination from + * userspace, or a recovery/replacement timeout. + */ +int iscsi_eh_session_reset(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + if (session->state == ISCSI_STATE_TERMINATE) { +failed: + ISCSI_DBG_EH(session, + "failing session reset: Could not log back into " + "%s [age %d]\n", session->targetname, + session->age); + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + return FAILED; + } + + conn = session->leadconn; + iscsi_get_conn(conn->cls_conn); + + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + + iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); + iscsi_put_conn(conn->cls_conn); + + ISCSI_DBG_EH(session, "wait for relogin\n"); + wait_event_interruptible(session->ehwait, + session->state == ISCSI_STATE_TERMINATE || + session->state == ISCSI_STATE_LOGGED_IN || + session->state == ISCSI_STATE_RECOVERY_FAILED); + if (signal_pending(current)) + flush_signals(current); + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + if (session->state == ISCSI_STATE_LOGGED_IN) { + ISCSI_DBG_EH(session, + "session reset succeeded for %s,%s\n", + session->targetname, conn->persistent_address); + } else + goto failed; + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + return SUCCESS; +} +EXPORT_SYMBOL_GPL(iscsi_eh_session_reset); + +static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) +{ + memset(hdr, 0, sizeof(*hdr)); + hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; + hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hdr->rtt = RESERVED_ITT; +} + +/** + * iscsi_eh_target_reset - reset target + * @sc: scsi command + * + * This will attempt to send a warm target reset. + */ +static int iscsi_eh_target_reset(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + struct iscsi_tm *hdr; + int rc = FAILED; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc, + session->targetname); + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + /* + * Just check if we are not logged in. We cannot check for + * the phase because the reset could come from a ioctl. + */ + if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) + goto unlock; + conn = session->leadconn; + + /* only have one tmf outstanding at a time */ + if (session->tmf_state != TMF_INITIAL) + goto unlock; + session->tmf_state = TMF_QUEUED; + + hdr = &session->tmhdr; + iscsi_prep_tgt_reset_pdu(sc, hdr); + + if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, + session->tgt_reset_timeout)) { + rc = FAILED; + goto unlock; + } + + switch (session->tmf_state) { + case TMF_SUCCESS: + break; + case TMF_TIMEDOUT: + spin_unlock_bh(&session->frwd_lock); + iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); + goto done; + default: + session->tmf_state = TMF_INITIAL; + goto unlock; + } + + rc = SUCCESS; + spin_unlock_bh(&session->frwd_lock); + + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->frwd_lock); + memset(hdr, 0, sizeof(*hdr)); + fail_scsi_tasks(conn, -1, DID_ERROR); + session->tmf_state = TMF_INITIAL; + spin_unlock_bh(&session->frwd_lock); + + iscsi_start_tx(conn); + goto done; + +unlock: + spin_unlock_bh(&session->frwd_lock); +done: + ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, + rc == SUCCESS ? "SUCCESS" : "FAILED"); + mutex_unlock(&session->eh_mutex); + return rc; +} + +/** + * iscsi_eh_recover_target - reset target and possibly the session + * @sc: scsi command + * + * This will attempt to send a warm target reset. If that fails, + * we will escalate to ERL0 session recovery. + */ +int iscsi_eh_recover_target(struct scsi_cmnd *sc) +{ + int rc; + + rc = iscsi_eh_target_reset(sc); + if (rc == FAILED) + rc = iscsi_eh_session_reset(sc); + return rc; +} +EXPORT_SYMBOL_GPL(iscsi_eh_recover_target); + +/* + * Pre-allocate a pool of @max items of @item_size. By default, the pool + * should be accessed via kfifo_{get,put} on q->queue. + * Optionally, the caller can obtain the array of object pointers + * by passing in a non-NULL @items pointer + */ +int +iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) +{ + int i, num_arrays = 1; + + memset(q, 0, sizeof(*q)); + + q->max = max; + + /* If the user passed an items pointer, he wants a copy of + * the array. */ + if (items) + num_arrays++; + q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL); + if (q->pool == NULL) + return -ENOMEM; + + kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); + + for (i = 0; i < max; i++) { + q->pool[i] = kzalloc(item_size, GFP_KERNEL); + if (q->pool[i] == NULL) { + q->max = i; + goto enomem; + } + kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); + } + + if (items) { + *items = q->pool + max; + memcpy(*items, q->pool, max * sizeof(void *)); + } + + return 0; + +enomem: + iscsi_pool_free(q); + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(iscsi_pool_init); + +void iscsi_pool_free(struct iscsi_pool *q) +{ + int i; + + for (i = 0; i < q->max; i++) + kfree(q->pool[i]); + kvfree(q->pool); +} +EXPORT_SYMBOL_GPL(iscsi_pool_free); + +int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, + uint16_t requested_cmds_max) +{ + int scsi_cmds, total_cmds = requested_cmds_max; + +check: + if (!total_cmds) + total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; + /* + * The iscsi layer needs some tasks for nop handling and tmfs, + * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX + * + 1 command for scsi IO. + */ + if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { + printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n", + total_cmds, ISCSI_TOTAL_CMDS_MIN); + return -EINVAL; + } + + if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { + printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n", + requested_cmds_max, ISCSI_TOTAL_CMDS_MAX, + ISCSI_TOTAL_CMDS_MAX); + total_cmds = ISCSI_TOTAL_CMDS_MAX; + } + + if (!is_power_of_2(total_cmds)) { + total_cmds = rounddown_pow_of_two(total_cmds); + if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { + printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN); + return -EINVAL; + } + + printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n", + requested_cmds_max, total_cmds); + } + + scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; + if (shost->can_queue && scsi_cmds > shost->can_queue) { + total_cmds = shost->can_queue; + + printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n", + requested_cmds_max, shost->can_queue); + goto check; + } + + return scsi_cmds; +} +EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds); + +/** + * iscsi_host_add - add host to system + * @shost: scsi host + * @pdev: parent device + * + * This should be called by partial offload and software iscsi drivers + * to add a host to the system. + */ +int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) +{ + if (!shost->can_queue) + shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; + + if (!shost->cmd_per_lun) + shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN; + + return scsi_add_host(shost, pdev); +} +EXPORT_SYMBOL_GPL(iscsi_host_add); + +/** + * iscsi_host_alloc - allocate a host and driver data + * @sht: scsi host template + * @dd_data_size: driver host data size + * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue + * + * This should be called by partial offload and software iscsi drivers. + * To access the driver specific memory use the iscsi_host_priv() macro. + */ +struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht, + int dd_data_size, bool xmit_can_sleep) +{ + struct Scsi_Host *shost; + struct iscsi_host *ihost; + + shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); + if (!shost) + return NULL; + ihost = shost_priv(shost); + + if (xmit_can_sleep) { + ihost->workq = alloc_workqueue("iscsi_q_%d", + WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, + 1, shost->host_no); + if (!ihost->workq) + goto free_host; + } + + spin_lock_init(&ihost->lock); + ihost->state = ISCSI_HOST_SETUP; + ihost->num_sessions = 0; + init_waitqueue_head(&ihost->session_removal_wq); + return shost; + +free_host: + scsi_host_put(shost); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_host_alloc); + +static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) +{ + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST); +} + +/** + * iscsi_host_remove - remove host and sessions + * @shost: scsi host + * @is_shutdown: true if called from a driver shutdown callout + * + * If there are any sessions left, this will initiate the removal and wait + * for the completion. + */ +void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown) +{ + struct iscsi_host *ihost = shost_priv(shost); + unsigned long flags; + + spin_lock_irqsave(&ihost->lock, flags); + ihost->state = ISCSI_HOST_REMOVED; + spin_unlock_irqrestore(&ihost->lock, flags); + + if (!is_shutdown) + iscsi_host_for_each_session(shost, iscsi_notify_host_removed); + else + iscsi_host_for_each_session(shost, iscsi_force_destroy_session); + + wait_event_interruptible(ihost->session_removal_wq, + ihost->num_sessions == 0); + if (signal_pending(current)) + flush_signals(current); + + scsi_remove_host(shost); +} +EXPORT_SYMBOL_GPL(iscsi_host_remove); + +void iscsi_host_free(struct Scsi_Host *shost) +{ + struct iscsi_host *ihost = shost_priv(shost); + + if (ihost->workq) + destroy_workqueue(ihost->workq); + + kfree(ihost->netdev); + kfree(ihost->hwaddress); + kfree(ihost->initiatorname); + scsi_host_put(shost); +} +EXPORT_SYMBOL_GPL(iscsi_host_free); + +static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost) +{ + struct iscsi_host *ihost = shost_priv(shost); + unsigned long flags; + + shost = scsi_host_get(shost); + if (!shost) { + printk(KERN_ERR "Invalid state. Cannot notify host removal " + "of session teardown event because host already " + "removed.\n"); + return; + } + + spin_lock_irqsave(&ihost->lock, flags); + ihost->num_sessions--; + if (ihost->num_sessions == 0) + wake_up(&ihost->session_removal_wq); + spin_unlock_irqrestore(&ihost->lock, flags); + scsi_host_put(shost); +} + +/** + * iscsi_session_setup - create iscsi cls session and host and session + * @iscsit: iscsi transport template + * @shost: scsi host + * @cmds_max: session can queue + * @dd_size: private driver data size, added to session allocation size + * @cmd_task_size: LLD task private data size + * @initial_cmdsn: initial CmdSN + * @id: target ID to add to this session + * + * This can be used by software iscsi_transports that allocate + * a session per scsi host. + * + * Callers should set cmds_max to the largest total numer (mgmt + scsi) of + * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks + * for nop handling and login/logout requests. + */ +struct iscsi_cls_session * +iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, + uint16_t cmds_max, int dd_size, int cmd_task_size, + uint32_t initial_cmdsn, unsigned int id) +{ + struct iscsi_host *ihost = shost_priv(shost); + struct iscsi_session *session; + struct iscsi_cls_session *cls_session; + int cmd_i, scsi_cmds; + unsigned long flags; + + spin_lock_irqsave(&ihost->lock, flags); + if (ihost->state == ISCSI_HOST_REMOVED) { + spin_unlock_irqrestore(&ihost->lock, flags); + return NULL; + } + ihost->num_sessions++; + spin_unlock_irqrestore(&ihost->lock, flags); + + scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max); + if (scsi_cmds < 0) + goto dec_session_count; + + cls_session = iscsi_alloc_session(shost, iscsit, + sizeof(struct iscsi_session) + + dd_size); + if (!cls_session) + goto dec_session_count; + session = cls_session->dd_data; + session->cls_session = cls_session; + session->host = shost; + session->state = ISCSI_STATE_FREE; + session->fast_abort = 1; + session->tgt_reset_timeout = 30; + session->lu_reset_timeout = 15; + session->abort_timeout = 10; + session->scsi_cmds_max = scsi_cmds; + session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX; + session->queued_cmdsn = session->cmdsn = initial_cmdsn; + session->exp_cmdsn = initial_cmdsn + 1; + session->max_cmdsn = initial_cmdsn + 1; + session->max_r2t = 1; + session->tt = iscsit; + session->dd_data = cls_session->dd_data + sizeof(*session); + + session->tmf_state = TMF_INITIAL; + timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0); + mutex_init(&session->eh_mutex); + init_waitqueue_head(&session->ehwait); + + spin_lock_init(&session->frwd_lock); + spin_lock_init(&session->back_lock); + + /* initialize SCSI PDU commands pool */ + if (iscsi_pool_init(&session->cmdpool, session->cmds_max, + (void***)&session->cmds, + cmd_task_size + sizeof(struct iscsi_task))) + goto cmdpool_alloc_fail; + + /* pre-format cmds pool with ITT */ + for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { + struct iscsi_task *task = session->cmds[cmd_i]; + + if (cmd_task_size) + task->dd_data = &task[1]; + task->itt = cmd_i; + task->state = ISCSI_TASK_FREE; + INIT_LIST_HEAD(&task->running); + } + + if (!try_module_get(iscsit->owner)) + goto module_get_fail; + + if (iscsi_add_session(cls_session, id)) + goto cls_session_fail; + + return cls_session; + +cls_session_fail: + module_put(iscsit->owner); +module_get_fail: + iscsi_pool_free(&session->cmdpool); +cmdpool_alloc_fail: + iscsi_free_session(cls_session); +dec_session_count: + iscsi_host_dec_session_cnt(shost); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_session_setup); + +/* + * issi_session_remove - Remove session from iSCSI class. + */ +void iscsi_session_remove(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct Scsi_Host *shost = session->host; + + iscsi_remove_session(cls_session); + /* + * host removal only has to wait for its children to be removed from + * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing + * the session, so drop the session count here. + */ + iscsi_host_dec_session_cnt(shost); +} +EXPORT_SYMBOL_GPL(iscsi_session_remove); + +/** + * iscsi_session_free - Free iscsi session and it's resources + * @cls_session: iscsi session + */ +void iscsi_session_free(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct module *owner = cls_session->transport->owner; + + iscsi_pool_free(&session->cmdpool); + kfree(session->password); + kfree(session->password_in); + kfree(session->username); + kfree(session->username_in); + kfree(session->targetname); + kfree(session->targetalias); + kfree(session->initiatorname); + kfree(session->boot_root); + kfree(session->boot_nic); + kfree(session->boot_target); + kfree(session->ifacename); + kfree(session->portal_type); + kfree(session->discovery_parent_type); + + iscsi_free_session(cls_session); + module_put(owner); +} +EXPORT_SYMBOL_GPL(iscsi_session_free); + +/** + * iscsi_session_teardown - destroy session and cls_session + * @cls_session: iscsi session + */ +void iscsi_session_teardown(struct iscsi_cls_session *cls_session) +{ + iscsi_session_remove(cls_session); + iscsi_session_free(cls_session); +} +EXPORT_SYMBOL_GPL(iscsi_session_teardown); + +/** + * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn + * @cls_session: iscsi_cls_session + * @dd_size: private driver data size + * @conn_idx: cid + */ +struct iscsi_cls_conn * +iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, + uint32_t conn_idx) +{ + struct iscsi_session *session = cls_session->dd_data; + struct iscsi_conn *conn; + struct iscsi_cls_conn *cls_conn; + char *data; + int err; + + cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size, + conn_idx); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + + conn->dd_data = cls_conn->dd_data + sizeof(*conn); + conn->session = session; + conn->cls_conn = cls_conn; + conn->c_stage = ISCSI_CONN_INITIAL_STAGE; + conn->id = conn_idx; + conn->exp_statsn = 0; + + timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0); + + INIT_LIST_HEAD(&conn->mgmtqueue); + INIT_LIST_HEAD(&conn->cmdqueue); + INIT_LIST_HEAD(&conn->requeue); + INIT_WORK(&conn->xmitwork, iscsi_xmitworker); + + /* allocate login_task used for the login/text sequences */ + spin_lock_bh(&session->frwd_lock); + if (!kfifo_out(&session->cmdpool.queue, + (void*)&conn->login_task, + sizeof(void*))) { + spin_unlock_bh(&session->frwd_lock); + goto login_task_alloc_fail; + } + spin_unlock_bh(&session->frwd_lock); + + data = (char *) __get_free_pages(GFP_KERNEL, + get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); + if (!data) + goto login_task_data_alloc_fail; + conn->login_task->data = conn->data = data; + + err = iscsi_add_conn(cls_conn); + if (err) + goto login_task_add_dev_fail; + + return cls_conn; + +login_task_add_dev_fail: + free_pages((unsigned long) conn->data, + get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); + +login_task_data_alloc_fail: + kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, + sizeof(void*)); +login_task_alloc_fail: + iscsi_put_conn(cls_conn); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_conn_setup); + +/** + * iscsi_conn_teardown - teardown iscsi connection + * @cls_conn: iscsi class connection + * + * TODO: we may need to make this into a two step process + * like scsi-mls remove + put host + */ +void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; + + iscsi_remove_conn(cls_conn); + + del_timer_sync(&conn->transport_timer); + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; + if (session->leadconn == conn) { + /* + * leading connection? then give up on recovery. + */ + session->state = ISCSI_STATE_TERMINATE; + wake_up(&session->ehwait); + } + spin_unlock_bh(&session->frwd_lock); + + /* flush queued up work because we free the connection below */ + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->frwd_lock); + free_pages((unsigned long) conn->data, + get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); + kfree(conn->persistent_address); + kfree(conn->local_ipaddr); + /* regular RX path uses back_lock */ + spin_lock_bh(&session->back_lock); + kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, + sizeof(void*)); + spin_unlock_bh(&session->back_lock); + if (session->leadconn == conn) + session->leadconn = NULL; + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + + iscsi_put_conn(cls_conn); +} +EXPORT_SYMBOL_GPL(iscsi_conn_teardown); + +int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; + + if (!session) { + iscsi_conn_printk(KERN_ERR, conn, + "can't start unbound connection\n"); + return -EPERM; + } + + if ((session->imm_data_en || !session->initial_r2t_en) && + session->first_burst > session->max_burst) { + iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: " + "first_burst %d max_burst %d\n", + session->first_burst, session->max_burst); + return -EINVAL; + } + + if (conn->ping_timeout && !conn->recv_timeout) { + iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of " + "zero. Using 5 seconds\n."); + conn->recv_timeout = 5; + } + + if (conn->recv_timeout && !conn->ping_timeout) { + iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of " + "zero. Using 5 seconds.\n"); + conn->ping_timeout = 5; + } + + spin_lock_bh(&session->frwd_lock); + conn->c_stage = ISCSI_CONN_STARTED; + session->state = ISCSI_STATE_LOGGED_IN; + session->queued_cmdsn = session->cmdsn; + + conn->last_recv = jiffies; + conn->last_ping = jiffies; + if (conn->recv_timeout && conn->ping_timeout) + mod_timer(&conn->transport_timer, + jiffies + (conn->recv_timeout * HZ)); + + switch(conn->stop_stage) { + case STOP_CONN_RECOVER: + /* + * unblock eh_abort() if it is blocked. re-try all + * commands after successful recovery + */ + conn->stop_stage = 0; + session->tmf_state = TMF_INITIAL; + session->age++; + if (session->age == 16) + session->age = 0; + break; + case STOP_CONN_TERM: + conn->stop_stage = 0; + break; + default: + break; + } + spin_unlock_bh(&session->frwd_lock); + + iscsi_unblock_session(session->cls_session); + wake_up(&session->ehwait); + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_conn_start); + +static void +fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) +{ + struct iscsi_task *task; + int i, state; + + for (i = 0; i < conn->session->cmds_max; i++) { + task = conn->session->cmds[i]; + if (task->sc) + continue; + + if (task->state == ISCSI_TASK_FREE) + continue; + + ISCSI_DBG_SESSION(conn->session, + "failing mgmt itt 0x%x state %d\n", + task->itt, task->state); + + spin_lock_bh(&session->back_lock); + if (cleanup_queued_task(task)) { + spin_unlock_bh(&session->back_lock); + continue; + } + + state = ISCSI_TASK_ABRT_SESS_RECOV; + if (task->state == ISCSI_TASK_PENDING) + state = ISCSI_TASK_COMPLETED; + iscsi_complete_task(task, state); + spin_unlock_bh(&session->back_lock); + } +} + +void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; + int old_stop_stage; + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->frwd_lock); + if (conn->stop_stage == STOP_CONN_TERM) { + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + return; + } + + /* + * When this is called for the in_login state, we only want to clean + * up the login task and connection. We do not need to block and set + * the recovery state again + */ + if (flag == STOP_CONN_TERM) + session->state = ISCSI_STATE_TERMINATE; + else if (conn->stop_stage != STOP_CONN_RECOVER) + session->state = ISCSI_STATE_IN_RECOVERY; + + old_stop_stage = conn->stop_stage; + conn->stop_stage = flag; + spin_unlock_bh(&session->frwd_lock); + + del_timer_sync(&conn->transport_timer); + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->frwd_lock); + conn->c_stage = ISCSI_CONN_STOPPED; + spin_unlock_bh(&session->frwd_lock); + + /* + * for connection level recovery we should not calculate + * header digest. conn->hdr_size used for optimization + * in hdr_extract() and will be re-negotiated at + * set_param() time. + */ + if (flag == STOP_CONN_RECOVER) { + conn->hdrdgst_en = 0; + conn->datadgst_en = 0; + if (session->state == ISCSI_STATE_IN_RECOVERY && + old_stop_stage != STOP_CONN_RECOVER) { + ISCSI_DBG_SESSION(session, "blocking session\n"); + iscsi_block_session(session->cls_session); + } + } + + /* + * flush queues. + */ + spin_lock_bh(&session->frwd_lock); + fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); + fail_mgmt_tasks(session, conn); + memset(&session->tmhdr, 0, sizeof(session->tmhdr)); + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); +} +EXPORT_SYMBOL_GPL(iscsi_conn_stop); + +int iscsi_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, int is_leading) +{ + struct iscsi_session *session = cls_session->dd_data; + struct iscsi_conn *conn = cls_conn->dd_data; + + spin_lock_bh(&session->frwd_lock); + if (is_leading) + session->leadconn = conn; + + set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); + spin_unlock_bh(&session->frwd_lock); + + /* + * The target could have reduced it's window size between logins, so + * we have to reset max/exp cmdsn so we can see the new values. + */ + spin_lock_bh(&session->back_lock); + session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1; + spin_unlock_bh(&session->back_lock); + /* + * Unblock xmitworker(), Login Phase will pass through. + */ + clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); + clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_conn_bind); + +int iscsi_switch_str_param(char **param, char *new_val_buf) +{ + char *new_val; + + if (*param) { + if (!strcmp(*param, new_val_buf)) + return 0; + } + + new_val = kstrdup(new_val_buf, GFP_NOIO); + if (!new_val) + return -ENOMEM; + + kfree(*param); + *param = new_val; + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_switch_str_param); + +int iscsi_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; + int val; + + switch(param) { + case ISCSI_PARAM_FAST_ABORT: + sscanf(buf, "%d", &session->fast_abort); + break; + case ISCSI_PARAM_ABORT_TMO: + sscanf(buf, "%d", &session->abort_timeout); + break; + case ISCSI_PARAM_LU_RESET_TMO: + sscanf(buf, "%d", &session->lu_reset_timeout); + break; + case ISCSI_PARAM_TGT_RESET_TMO: + sscanf(buf, "%d", &session->tgt_reset_timeout); + break; + case ISCSI_PARAM_PING_TMO: + sscanf(buf, "%d", &conn->ping_timeout); + break; + case ISCSI_PARAM_RECV_TMO: + sscanf(buf, "%d", &conn->recv_timeout); + break; + case ISCSI_PARAM_MAX_RECV_DLENGTH: + sscanf(buf, "%d", &conn->max_recv_dlength); + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + sscanf(buf, "%d", &conn->max_xmit_dlength); + break; + case ISCSI_PARAM_HDRDGST_EN: + sscanf(buf, "%d", &conn->hdrdgst_en); + break; + case ISCSI_PARAM_DATADGST_EN: + sscanf(buf, "%d", &conn->datadgst_en); + break; + case ISCSI_PARAM_INITIAL_R2T_EN: + sscanf(buf, "%d", &session->initial_r2t_en); + break; + case ISCSI_PARAM_MAX_R2T: + sscanf(buf, "%hu", &session->max_r2t); + break; + case ISCSI_PARAM_IMM_DATA_EN: + sscanf(buf, "%d", &session->imm_data_en); + break; + case ISCSI_PARAM_FIRST_BURST: + sscanf(buf, "%d", &session->first_burst); + break; + case ISCSI_PARAM_MAX_BURST: + sscanf(buf, "%d", &session->max_burst); + break; + case ISCSI_PARAM_PDU_INORDER_EN: + sscanf(buf, "%d", &session->pdu_inorder_en); + break; + case ISCSI_PARAM_DATASEQ_INORDER_EN: + sscanf(buf, "%d", &session->dataseq_inorder_en); + break; + case ISCSI_PARAM_ERL: + sscanf(buf, "%d", &session->erl); + break; + case ISCSI_PARAM_EXP_STATSN: + sscanf(buf, "%u", &conn->exp_statsn); + break; + case ISCSI_PARAM_USERNAME: + return iscsi_switch_str_param(&session->username, buf); + case ISCSI_PARAM_USERNAME_IN: + return iscsi_switch_str_param(&session->username_in, buf); + case ISCSI_PARAM_PASSWORD: + return iscsi_switch_str_param(&session->password, buf); + case ISCSI_PARAM_PASSWORD_IN: + return iscsi_switch_str_param(&session->password_in, buf); + case ISCSI_PARAM_TARGET_NAME: + return iscsi_switch_str_param(&session->targetname, buf); + case ISCSI_PARAM_TARGET_ALIAS: + return iscsi_switch_str_param(&session->targetalias, buf); + case ISCSI_PARAM_TPGT: + sscanf(buf, "%d", &session->tpgt); + break; + case ISCSI_PARAM_PERSISTENT_PORT: + sscanf(buf, "%d", &conn->persistent_port); + break; + case ISCSI_PARAM_PERSISTENT_ADDRESS: + return iscsi_switch_str_param(&conn->persistent_address, buf); + case ISCSI_PARAM_IFACE_NAME: + return iscsi_switch_str_param(&session->ifacename, buf); + case ISCSI_PARAM_INITIATOR_NAME: + return iscsi_switch_str_param(&session->initiatorname, buf); + case ISCSI_PARAM_BOOT_ROOT: + return iscsi_switch_str_param(&session->boot_root, buf); + case ISCSI_PARAM_BOOT_NIC: + return iscsi_switch_str_param(&session->boot_nic, buf); + case ISCSI_PARAM_BOOT_TARGET: + return iscsi_switch_str_param(&session->boot_target, buf); + case ISCSI_PARAM_PORTAL_TYPE: + return iscsi_switch_str_param(&session->portal_type, buf); + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + return iscsi_switch_str_param(&session->discovery_parent_type, + buf); + case ISCSI_PARAM_DISCOVERY_SESS: + sscanf(buf, "%d", &val); + session->discovery_sess = !!val; + break; + case ISCSI_PARAM_LOCAL_IPADDR: + return iscsi_switch_str_param(&conn->local_ipaddr, buf); + default: + return -ENOSYS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_set_param); + +int iscsi_session_get_param(struct iscsi_cls_session *cls_session, + enum iscsi_param param, char *buf) +{ + struct iscsi_session *session = cls_session->dd_data; + int len; + + switch(param) { + case ISCSI_PARAM_FAST_ABORT: + len = sysfs_emit(buf, "%d\n", session->fast_abort); + break; + case ISCSI_PARAM_ABORT_TMO: + len = sysfs_emit(buf, "%d\n", session->abort_timeout); + break; + case ISCSI_PARAM_LU_RESET_TMO: + len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); + break; + case ISCSI_PARAM_TGT_RESET_TMO: + len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); + break; + case ISCSI_PARAM_INITIAL_R2T_EN: + len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); + break; + case ISCSI_PARAM_MAX_R2T: + len = sysfs_emit(buf, "%hu\n", session->max_r2t); + break; + case ISCSI_PARAM_IMM_DATA_EN: + len = sysfs_emit(buf, "%d\n", session->imm_data_en); + break; + case ISCSI_PARAM_FIRST_BURST: + len = sysfs_emit(buf, "%u\n", session->first_burst); + break; + case ISCSI_PARAM_MAX_BURST: + len = sysfs_emit(buf, "%u\n", session->max_burst); + break; + case ISCSI_PARAM_PDU_INORDER_EN: + len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); + break; + case ISCSI_PARAM_DATASEQ_INORDER_EN: + len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); + break; + case ISCSI_PARAM_DEF_TASKMGMT_TMO: + len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); + break; + case ISCSI_PARAM_ERL: + len = sysfs_emit(buf, "%d\n", session->erl); + break; + case ISCSI_PARAM_TARGET_NAME: + len = sysfs_emit(buf, "%s\n", session->targetname); + break; + case ISCSI_PARAM_TARGET_ALIAS: + len = sysfs_emit(buf, "%s\n", session->targetalias); + break; + case ISCSI_PARAM_TPGT: + len = sysfs_emit(buf, "%d\n", session->tpgt); + break; + case ISCSI_PARAM_USERNAME: + len = sysfs_emit(buf, "%s\n", session->username); + break; + case ISCSI_PARAM_USERNAME_IN: + len = sysfs_emit(buf, "%s\n", session->username_in); + break; + case ISCSI_PARAM_PASSWORD: + len = sysfs_emit(buf, "%s\n", session->password); + break; + case ISCSI_PARAM_PASSWORD_IN: + len = sysfs_emit(buf, "%s\n", session->password_in); + break; + case ISCSI_PARAM_IFACE_NAME: + len = sysfs_emit(buf, "%s\n", session->ifacename); + break; + case ISCSI_PARAM_INITIATOR_NAME: + len = sysfs_emit(buf, "%s\n", session->initiatorname); + break; + case ISCSI_PARAM_BOOT_ROOT: + len = sysfs_emit(buf, "%s\n", session->boot_root); + break; + case ISCSI_PARAM_BOOT_NIC: + len = sysfs_emit(buf, "%s\n", session->boot_nic); + break; + case ISCSI_PARAM_BOOT_TARGET: + len = sysfs_emit(buf, "%s\n", session->boot_target); + break; + case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: + len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); + break; + case ISCSI_PARAM_DISCOVERY_SESS: + len = sysfs_emit(buf, "%u\n", session->discovery_sess); + break; + case ISCSI_PARAM_PORTAL_TYPE: + len = sysfs_emit(buf, "%s\n", session->portal_type); + break; + case ISCSI_PARAM_CHAP_AUTH_EN: + len = sysfs_emit(buf, "%u\n", session->chap_auth_en); + break; + case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: + len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); + break; + case ISCSI_PARAM_BIDI_CHAP_EN: + len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); + break; + case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: + len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); + break; + case ISCSI_PARAM_DEF_TIME2WAIT: + len = sysfs_emit(buf, "%d\n", session->time2wait); + break; + case ISCSI_PARAM_DEF_TIME2RETAIN: + len = sysfs_emit(buf, "%d\n", session->time2retain); + break; + case ISCSI_PARAM_TSID: + len = sysfs_emit(buf, "%u\n", session->tsid); + break; + case ISCSI_PARAM_ISID: + len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", + session->isid[0], session->isid[1], + session->isid[2], session->isid[3], + session->isid[4], session->isid[5]); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_IDX: + len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + if (session->discovery_parent_type) + len = sysfs_emit(buf, "%s\n", + session->discovery_parent_type); + else + len = sysfs_emit(buf, "\n"); + break; + default: + return -ENOSYS; + } + + return len; +} +EXPORT_SYMBOL_GPL(iscsi_session_get_param); + +int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, + enum iscsi_param param, char *buf) +{ + struct sockaddr_in6 *sin6 = NULL; + struct sockaddr_in *sin = NULL; + int len; + + switch (addr->ss_family) { + case AF_INET: + sin = (struct sockaddr_in *)addr; + break; + case AF_INET6: + sin6 = (struct sockaddr_in6 *)addr; + break; + default: + return -EINVAL; + } + + switch (param) { + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + if (sin) + len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); + else + len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); + break; + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_LOCAL_PORT: + if (sin) + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + else + len = sysfs_emit(buf, "%hu\n", + be16_to_cpu(sin6->sin6_port)); + break; + default: + return -EINVAL; + } + + return len; +} +EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param); + +int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + int len; + + switch(param) { + case ISCSI_PARAM_PING_TMO: + len = sysfs_emit(buf, "%u\n", conn->ping_timeout); + break; + case ISCSI_PARAM_RECV_TMO: + len = sysfs_emit(buf, "%u\n", conn->recv_timeout); + break; + case ISCSI_PARAM_MAX_RECV_DLENGTH: + len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); + break; + case ISCSI_PARAM_HDRDGST_EN: + len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); + break; + case ISCSI_PARAM_DATADGST_EN: + len = sysfs_emit(buf, "%d\n", conn->datadgst_en); + break; + case ISCSI_PARAM_IFMARKER_EN: + len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); + break; + case ISCSI_PARAM_OFMARKER_EN: + len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); + break; + case ISCSI_PARAM_EXP_STATSN: + len = sysfs_emit(buf, "%u\n", conn->exp_statsn); + break; + case ISCSI_PARAM_PERSISTENT_PORT: + len = sysfs_emit(buf, "%d\n", conn->persistent_port); + break; + case ISCSI_PARAM_PERSISTENT_ADDRESS: + len = sysfs_emit(buf, "%s\n", conn->persistent_address); + break; + case ISCSI_PARAM_STATSN: + len = sysfs_emit(buf, "%u\n", conn->statsn); + break; + case ISCSI_PARAM_MAX_SEGMENT_SIZE: + len = sysfs_emit(buf, "%u\n", conn->max_segment_size); + break; + case ISCSI_PARAM_KEEPALIVE_TMO: + len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); + break; + case ISCSI_PARAM_LOCAL_PORT: + len = sysfs_emit(buf, "%u\n", conn->local_port); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_STAT: + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); + break; + case ISCSI_PARAM_TCP_NAGLE_DISABLE: + len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); + break; + case ISCSI_PARAM_TCP_WSF_DISABLE: + len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); + break; + case ISCSI_PARAM_TCP_TIMER_SCALE: + len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_EN: + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); + break; + case ISCSI_PARAM_IP_FRAGMENT_DISABLE: + len = sysfs_emit(buf, "%u\n", conn->fragment_disable); + break; + case ISCSI_PARAM_IPV4_TOS: + len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); + break; + case ISCSI_PARAM_IPV6_TC: + len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); + break; + case ISCSI_PARAM_IPV6_FLOW_LABEL: + len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); + break; + case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: + len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); + break; + case ISCSI_PARAM_TCP_XMIT_WSF: + len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); + break; + case ISCSI_PARAM_TCP_RECV_WSF: + len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); + break; + case ISCSI_PARAM_LOCAL_IPADDR: + len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); + break; + default: + return -ENOSYS; + } + + return len; +} +EXPORT_SYMBOL_GPL(iscsi_conn_get_param); + +int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, + char *buf) +{ + struct iscsi_host *ihost = shost_priv(shost); + int len; + + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sysfs_emit(buf, "%s\n", ihost->netdev); + break; + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_emit(buf, "%s\n", ihost->hwaddress); + break; + case ISCSI_HOST_PARAM_INITIATOR_NAME: + len = sysfs_emit(buf, "%s\n", ihost->initiatorname); + break; + default: + return -ENOSYS; + } + + return len; +} +EXPORT_SYMBOL_GPL(iscsi_host_get_param); + +int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, + char *buf, int buflen) +{ + struct iscsi_host *ihost = shost_priv(shost); + + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + return iscsi_switch_str_param(&ihost->netdev, buf); + case ISCSI_HOST_PARAM_HWADDRESS: + return iscsi_switch_str_param(&ihost->hwaddress, buf); + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return iscsi_switch_str_param(&ihost->initiatorname, buf); + default: + return -ENOSYS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_host_set_param); + +MODULE_AUTHOR("Mike Christie"); +MODULE_DESCRIPTION("iSCSI library functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c new file mode 100644 index 000000000..c182aa83f --- /dev/null +++ b/drivers/scsi/libiscsi_tcp.c @@ -0,0 +1,1250 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * iSCSI over TCP/IP Data-Path lib + * + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 - 2006 Mike Christie + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * maintained by open-iscsi@googlegroups.com + * + * Credits: + * Christoph Hellwig + * FUJITA Tomonori + * Arne Redlich + * Zhenyu Wang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iscsi_tcp.h" + +MODULE_AUTHOR("Mike Christie , " + "Dmitry Yusupov , " + "Alex Aizman "); +MODULE_DESCRIPTION("iSCSI/TCP data-path"); +MODULE_LICENSE("GPL"); + +static int iscsi_dbg_libtcp; +module_param_named(debug_libiscsi_tcp, iscsi_dbg_libtcp, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp " + "module. Set to 1 to turn on, and zero to turn off. Default " + "is off."); + +#define ISCSI_DBG_TCP(_conn, dbg_fmt, arg...) \ + do { \ + if (iscsi_dbg_libtcp) \ + iscsi_conn_printk(KERN_INFO, _conn, \ + "%s " dbg_fmt, \ + __func__, ##arg); \ + iscsi_dbg_trace(trace_iscsi_dbg_tcp, \ + &(_conn)->cls_conn->dev, \ + "%s " dbg_fmt, __func__, ##arg);\ + } while (0); + +static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment); + +/* + * Scatterlist handling: inside the iscsi_segment, we + * remember an index into the scatterlist, and set data/size + * to the current scatterlist entry. For highmem pages, we + * kmap as needed. + * + * Note that the page is unmapped when we return from + * TCP's data_ready handler, so we may end up mapping and + * unmapping the same page repeatedly. The whole reason + * for this is that we shouldn't keep the page mapped + * outside the softirq. + */ + +/** + * iscsi_tcp_segment_init_sg - init indicated scatterlist entry + * @segment: the buffer object + * @sg: scatterlist + * @offset: byte offset into that sg entry + * + * This function sets up the segment so that subsequent + * data is copied to the indicated sg entry, at the given + * offset. + */ +static inline void +iscsi_tcp_segment_init_sg(struct iscsi_segment *segment, + struct scatterlist *sg, unsigned int offset) +{ + segment->sg = sg; + segment->sg_offset = offset; + segment->size = min(sg->length - offset, + segment->total_size - segment->total_copied); + segment->data = NULL; +} + +/** + * iscsi_tcp_segment_map - map the current S/G page + * @segment: iscsi_segment + * @recv: 1 if called from recv path + * + * We only need to possibly kmap data if scatter lists are being used, + * because the iscsi passthrough and internal IO paths will never use high + * mem pages. + */ +static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) +{ + struct scatterlist *sg; + + if (segment->data != NULL || !segment->sg) + return; + + sg = segment->sg; + BUG_ON(segment->sg_mapped); + BUG_ON(sg->length == 0); + + /* + * We always map for the recv path. + * + * If the page count is greater than one it is ok to send + * to the network layer's zero copy send path. If not we + * have to go the slow sendmsg path. + * + * Same goes for slab pages: skb_can_coalesce() allows + * coalescing neighboring slab objects into a single frag which + * triggers one of hardened usercopy checks. + */ + if (!recv && sendpage_ok(sg_page(sg))) + return; + + if (recv) { + segment->atomic_mapped = true; + segment->sg_mapped = kmap_atomic(sg_page(sg)); + } else { + segment->atomic_mapped = false; + /* the xmit path can sleep with the page mapped so use kmap */ + segment->sg_mapped = kmap(sg_page(sg)); + } + + segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; +} + +void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) +{ + if (segment->sg_mapped) { + if (segment->atomic_mapped) + kunmap_atomic(segment->sg_mapped); + else + kunmap(sg_page(segment->sg)); + segment->sg_mapped = NULL; + segment->data = NULL; + } +} +EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap); + +/* + * Splice the digest buffer into the buffer + */ +static inline void +iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest) +{ + segment->data = digest; + segment->digest_len = ISCSI_DIGEST_SIZE; + segment->total_size += ISCSI_DIGEST_SIZE; + segment->size = ISCSI_DIGEST_SIZE; + segment->copied = 0; + segment->sg = NULL; + segment->hash = NULL; +} + +/** + * iscsi_tcp_segment_done - check whether the segment is complete + * @tcp_conn: iscsi tcp connection + * @segment: iscsi segment to check + * @recv: set to one of this is called from the recv path + * @copied: number of bytes copied + * + * Check if we're done receiving this segment. If the receive + * buffer is full but we expect more data, move on to the + * next entry in the scatterlist. + * + * If the amount of data we received isn't a multiple of 4, + * we will transparently receive the pad bytes, too. + * + * This function must be re-entrant. + */ +int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment, int recv, + unsigned copied) +{ + struct scatterlist sg; + unsigned int pad; + + ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n", + segment->copied, copied, segment->size, + recv ? "recv" : "xmit"); + if (segment->hash && copied) { + /* + * If a segment is kmapd we must unmap it before sending + * to the crypto layer since that will try to kmap it again. + */ + iscsi_tcp_segment_unmap(segment); + + if (!segment->data) { + sg_init_table(&sg, 1); + sg_set_page(&sg, sg_page(segment->sg), copied, + segment->copied + segment->sg_offset + + segment->sg->offset); + } else + sg_init_one(&sg, segment->data + segment->copied, + copied); + ahash_request_set_crypt(segment->hash, &sg, NULL, copied); + crypto_ahash_update(segment->hash); + } + + segment->copied += copied; + if (segment->copied < segment->size) { + iscsi_tcp_segment_map(segment, recv); + return 0; + } + + segment->total_copied += segment->copied; + segment->copied = 0; + segment->size = 0; + + /* Unmap the current scatterlist page, if there is one. */ + iscsi_tcp_segment_unmap(segment); + + /* Do we have more scatterlist entries? */ + ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n", + segment->total_copied, segment->total_size); + if (segment->total_copied < segment->total_size) { + /* Proceed to the next entry in the scatterlist. */ + iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), + 0); + iscsi_tcp_segment_map(segment, recv); + BUG_ON(segment->size == 0); + return 0; + } + + /* Do we need to handle padding? */ + if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) { + pad = iscsi_padding(segment->total_copied); + if (pad != 0) { + ISCSI_DBG_TCP(tcp_conn->iscsi_conn, + "consume %d pad bytes\n", pad); + segment->total_size += pad; + segment->size = pad; + segment->data = segment->padbuf; + return 0; + } + } + + /* + * Set us up for transferring the data digest. hdr digest + * is completely handled in hdr done function. + */ + if (segment->hash) { + ahash_request_set_crypt(segment->hash, NULL, + segment->digest, 0); + crypto_ahash_final(segment->hash); + iscsi_tcp_segment_splice_digest(segment, + recv ? segment->recv_digest : segment->digest); + return 0; + } + + return 1; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done); + +/** + * iscsi_tcp_segment_recv - copy data to segment + * @tcp_conn: the iSCSI TCP connection + * @segment: the buffer to copy to + * @ptr: data pointer + * @len: amount of data available + * + * This function copies up to @len bytes to the + * given buffer, and returns the number of bytes + * consumed, which can actually be less than @len. + * + * If hash digest is enabled, the function will update the + * hash while copying. + * Combining these two operations doesn't buy us a lot (yet), + * but in the future we could implement combined copy+crc, + * just way we do for network layer checksums. + */ +static int +iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment, const void *ptr, + unsigned int len) +{ + unsigned int copy = 0, copied = 0; + + while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) { + if (copied == len) { + ISCSI_DBG_TCP(tcp_conn->iscsi_conn, + "copied %d bytes\n", len); + break; + } + + copy = min(len - copied, segment->size - segment->copied); + ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copying %d\n", copy); + memcpy(segment->data + segment->copied, ptr + copied, copy); + copied += copy; + } + return copied; +} + +inline void +iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr, + size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]) +{ + struct scatterlist sg; + + sg_init_one(&sg, hdr, hdrlen); + ahash_request_set_crypt(hash, &sg, digest, hdrlen); + crypto_ahash_digest(hash); +} +EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header); + +static inline int +iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment) +{ + if (!segment->digest_len) + return 1; + + if (memcmp(segment->recv_digest, segment->digest, + segment->digest_len)) { + ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "digest mismatch\n"); + return 0; + } + + return 1; +} + +/* + * Helper function to set up segment buffer + */ +static inline void +__iscsi_segment_init(struct iscsi_segment *segment, size_t size, + iscsi_segment_done_fn_t *done, struct ahash_request *hash) +{ + memset(segment, 0, sizeof(*segment)); + segment->total_size = size; + segment->done = done; + + if (hash) { + segment->hash = hash; + crypto_ahash_init(hash); + } +} + +inline void +iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, + size_t size, iscsi_segment_done_fn_t *done, + struct ahash_request *hash) +{ + __iscsi_segment_init(segment, size, done, hash); + segment->data = data; + segment->size = size; +} +EXPORT_SYMBOL_GPL(iscsi_segment_init_linear); + +inline int +iscsi_segment_seek_sg(struct iscsi_segment *segment, + struct scatterlist *sg_list, unsigned int sg_count, + unsigned int offset, size_t size, + iscsi_segment_done_fn_t *done, + struct ahash_request *hash) +{ + struct scatterlist *sg; + unsigned int i; + + __iscsi_segment_init(segment, size, done, hash); + for_each_sg(sg_list, sg, sg_count, i) { + if (offset < sg->length) { + iscsi_tcp_segment_init_sg(segment, sg, offset); + return 0; + } + offset -= sg->length; + } + + return ISCSI_ERR_DATA_OFFSET; +} +EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg); + +/** + * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception + * @tcp_conn: iscsi connection to prep for + * + * This function always passes NULL for the hash argument, because when this + * function is called we do not yet know the final size of the header and want + * to delay the digest processing until we know that. + */ +void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn) +{ + ISCSI_DBG_TCP(tcp_conn->iscsi_conn, + "(%s)\n", tcp_conn->iscsi_conn->hdrdgst_en ? + "digest enabled" : "digest disabled"); + iscsi_segment_init_linear(&tcp_conn->in.segment, + tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr), + iscsi_tcp_hdr_recv_done, NULL); +} +EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep); + +/* + * Handle incoming reply to any other type of command + */ +static int +iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment) +{ + struct iscsi_conn *conn = tcp_conn->iscsi_conn; + int rc = 0; + + if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) + return ISCSI_ERR_DATA_DGST; + + rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, + conn->data, tcp_conn->in.datalen); + if (rc) + return rc; + + iscsi_tcp_hdr_recv_prep(tcp_conn); + return 0; +} + +static void +iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) +{ + struct iscsi_conn *conn = tcp_conn->iscsi_conn; + struct ahash_request *rx_hash = NULL; + + if (conn->datadgst_en && + !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) + rx_hash = tcp_conn->rx_hash; + + iscsi_segment_init_linear(&tcp_conn->in.segment, + conn->data, tcp_conn->in.datalen, + iscsi_tcp_data_recv_done, rx_hash); +} + +/** + * iscsi_tcp_cleanup_task - free tcp_task resources + * @task: iscsi task + * + * must be called with session back_lock + */ +void iscsi_tcp_cleanup_task(struct iscsi_task *task) +{ + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct iscsi_r2t_info *r2t; + + /* nothing to do for mgmt */ + if (!task->sc) + return; + + spin_lock_bh(&tcp_task->queue2pool); + /* flush task's r2t queues */ + while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { + kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); + } + + r2t = tcp_task->r2t; + if (r2t != NULL) { + kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + tcp_task->r2t = NULL; + } + spin_unlock_bh(&tcp_task->queue2pool); +} +EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task); + +/** + * iscsi_tcp_data_in - SCSI Data-In Response processing + * @conn: iscsi connection + * @task: scsi command task + */ +static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; + int datasn = be32_to_cpu(rhdr->datasn); + unsigned total_in_length = task->sc->sdb.length; + + /* + * lib iscsi will update this in the completion handling if there + * is status. + */ + if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) + iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); + + if (tcp_conn->in.datalen == 0) + return 0; + + if (tcp_task->exp_datasn != datasn) { + ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->datasn(%d)" + "\n", tcp_task->exp_datasn, datasn); + return ISCSI_ERR_DATASN; + } + + tcp_task->exp_datasn++; + + tcp_task->data_offset = be32_to_cpu(rhdr->offset); + if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) { + ISCSI_DBG_TCP(conn, "data_offset(%d) + data_len(%d) > " + "total_length_in(%d)\n", tcp_task->data_offset, + tcp_conn->in.datalen, total_in_length); + return ISCSI_ERR_DATA_OFFSET; + } + + conn->datain_pdus_cnt++; + return 0; +} + +/** + * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing + * @conn: iscsi connection + * @hdr: PDU header + */ +static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) +{ + struct iscsi_session *session = conn->session; + struct iscsi_tcp_task *tcp_task; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_r2t_rsp *rhdr; + struct iscsi_r2t_info *r2t; + struct iscsi_task *task; + u32 data_length; + u32 data_offset; + int r2tsn; + int rc; + + spin_lock(&session->back_lock); + task = iscsi_itt_to_ctask(conn, hdr->itt); + if (!task) { + spin_unlock(&session->back_lock); + return ISCSI_ERR_BAD_ITT; + } else if (task->sc->sc_data_direction != DMA_TO_DEVICE) { + spin_unlock(&session->back_lock); + return ISCSI_ERR_PROTO; + } + /* + * A bad target might complete the cmd before we have handled R2Ts + * so get a ref to the task that will be dropped in the xmit path. + */ + if (task->state != ISCSI_TASK_RUNNING) { + spin_unlock(&session->back_lock); + /* Let the path that got the early rsp complete it */ + return 0; + } + task->last_xfer = jiffies; + if (!iscsi_get_task(task)) { + spin_unlock(&session->back_lock); + /* Let the path that got the early rsp complete it */ + return 0; + } + + tcp_conn = conn->dd_data; + rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; + /* fill-in new R2T associated with the task */ + iscsi_update_cmdsn(session, (struct iscsi_nopin *)rhdr); + spin_unlock(&session->back_lock); + + if (tcp_conn->in.datalen) { + iscsi_conn_printk(KERN_ERR, conn, + "invalid R2t with datalen %d\n", + tcp_conn->in.datalen); + rc = ISCSI_ERR_DATALEN; + goto put_task; + } + + tcp_task = task->dd_data; + r2tsn = be32_to_cpu(rhdr->r2tsn); + if (tcp_task->exp_datasn != r2tsn){ + ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n", + tcp_task->exp_datasn, r2tsn); + rc = ISCSI_ERR_R2TSN; + goto put_task; + } + + if (session->state != ISCSI_STATE_LOGGED_IN) { + iscsi_conn_printk(KERN_INFO, conn, + "dropping R2T itt %d in recovery.\n", + task->itt); + rc = 0; + goto put_task; + } + + data_length = be32_to_cpu(rhdr->data_length); + if (data_length == 0) { + iscsi_conn_printk(KERN_ERR, conn, + "invalid R2T with zero data len\n"); + rc = ISCSI_ERR_DATALEN; + goto put_task; + } + + if (data_length > session->max_burst) + ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max " + "burst %u. Attempting to execute request.\n", + data_length, session->max_burst); + + data_offset = be32_to_cpu(rhdr->data_offset); + if (data_offset + data_length > task->sc->sdb.length) { + iscsi_conn_printk(KERN_ERR, conn, + "invalid R2T with data len %u at offset %u " + "and total length %d\n", data_length, + data_offset, task->sc->sdb.length); + rc = ISCSI_ERR_DATALEN; + goto put_task; + } + + spin_lock(&tcp_task->pool2queue); + rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *)); + if (!rc) { + iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " + "Target has sent more R2Ts than it " + "negotiated for or driver has leaked.\n"); + spin_unlock(&tcp_task->pool2queue); + rc = ISCSI_ERR_PROTO; + goto put_task; + } + + r2t->exp_statsn = rhdr->statsn; + r2t->data_length = data_length; + r2t->data_offset = data_offset; + + r2t->ttt = rhdr->ttt; /* no flip */ + r2t->datasn = 0; + r2t->sent = 0; + + tcp_task->exp_datasn = r2tsn + 1; + kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); + conn->r2t_pdus_cnt++; + spin_unlock(&tcp_task->pool2queue); + + iscsi_requeue_task(task); + return 0; + +put_task: + iscsi_put_task(task); + return rc; +} + +/* + * Handle incoming reply to DataIn command + */ +static int +iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment) +{ + struct iscsi_conn *conn = tcp_conn->iscsi_conn; + struct iscsi_hdr *hdr = tcp_conn->in.hdr; + int rc; + + if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) + return ISCSI_ERR_DATA_DGST; + + /* check for non-exceptional status */ + if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { + rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); + if (rc) + return rc; + } + + iscsi_tcp_hdr_recv_prep(tcp_conn); + return 0; +} + +/** + * iscsi_tcp_hdr_dissect - process PDU header + * @conn: iSCSI connection + * @hdr: PDU header + * + * This function analyzes the header of the PDU received, + * and performs several sanity checks. If the PDU is accompanied + * by data, the receive buffer is set up to copy the incoming data + * to the correct location. + */ +static int +iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) +{ + int rc = 0, opcode, ahslen; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_task *task; + + /* verify PDU length */ + tcp_conn->in.datalen = ntoh24(hdr->dlength); + if (tcp_conn->in.datalen > conn->max_recv_dlength) { + iscsi_conn_printk(KERN_ERR, conn, + "iscsi_tcp: datalen %d > %d\n", + tcp_conn->in.datalen, conn->max_recv_dlength); + return ISCSI_ERR_DATALEN; + } + + /* Additional header segments. So far, we don't + * process additional headers. + */ + ahslen = hdr->hlength << 2; + + opcode = hdr->opcode & ISCSI_OPCODE_MASK; + /* verify itt (itt encoding: age+cid+itt) */ + rc = iscsi_verify_itt(conn, hdr->itt); + if (rc) + return rc; + + ISCSI_DBG_TCP(conn, "opcode 0x%x ahslen %d datalen %d\n", + opcode, ahslen, tcp_conn->in.datalen); + + switch(opcode) { + case ISCSI_OP_SCSI_DATA_IN: + spin_lock(&conn->session->back_lock); + task = iscsi_itt_to_ctask(conn, hdr->itt); + if (!task) + rc = ISCSI_ERR_BAD_ITT; + else + rc = iscsi_tcp_data_in(conn, task); + if (rc) { + spin_unlock(&conn->session->back_lock); + break; + } + + if (tcp_conn->in.datalen) { + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct ahash_request *rx_hash = NULL; + struct scsi_data_buffer *sdb = &task->sc->sdb; + + /* + * Setup copy of Data-In into the struct scsi_cmnd + * Scatterlist case: + * We set up the iscsi_segment to point to the next + * scatterlist entry to copy to. As we go along, + * we move on to the next scatterlist entry and + * update the digest per-entry. + */ + if (conn->datadgst_en && + !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) + rx_hash = tcp_conn->rx_hash; + + ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( " + "offset=%d, datalen=%d)\n", + tcp_task->data_offset, + tcp_conn->in.datalen); + task->last_xfer = jiffies; + rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, + sdb->table.sgl, + sdb->table.nents, + tcp_task->data_offset, + tcp_conn->in.datalen, + iscsi_tcp_process_data_in, + rx_hash); + spin_unlock(&conn->session->back_lock); + return rc; + } + rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); + spin_unlock(&conn->session->back_lock); + break; + case ISCSI_OP_SCSI_CMD_RSP: + if (tcp_conn->in.datalen) { + iscsi_tcp_data_recv_prep(tcp_conn); + return 0; + } + rc = iscsi_complete_pdu(conn, hdr, NULL, 0); + break; + case ISCSI_OP_R2T: + if (ahslen) { + rc = ISCSI_ERR_AHSLEN; + break; + } + rc = iscsi_tcp_r2t_rsp(conn, hdr); + break; + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_TEXT_RSP: + case ISCSI_OP_REJECT: + case ISCSI_OP_ASYNC_EVENT: + /* + * It is possible that we could get a PDU with a buffer larger + * than 8K, but there are no targets that currently do this. + * For now we fail until we find a vendor that needs it + */ + if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) { + iscsi_conn_printk(KERN_ERR, conn, + "iscsi_tcp: received buffer of " + "len %u but conn buffer is only %u " + "(opcode %0x)\n", + tcp_conn->in.datalen, + ISCSI_DEF_MAX_RECV_SEG_LEN, opcode); + rc = ISCSI_ERR_PROTO; + break; + } + + /* If there's data coming in with the response, + * receive it to the connection's buffer. + */ + if (tcp_conn->in.datalen) { + iscsi_tcp_data_recv_prep(tcp_conn); + return 0; + } + fallthrough; + case ISCSI_OP_LOGOUT_RSP: + case ISCSI_OP_NOOP_IN: + case ISCSI_OP_SCSI_TMFUNC_RSP: + rc = iscsi_complete_pdu(conn, hdr, NULL, 0); + break; + default: + rc = ISCSI_ERR_BAD_OPCODE; + break; + } + + if (rc == 0) { + /* Anything that comes with data should have + * been handled above. */ + if (tcp_conn->in.datalen) + return ISCSI_ERR_PROTO; + iscsi_tcp_hdr_recv_prep(tcp_conn); + } + + return rc; +} + +/** + * iscsi_tcp_hdr_recv_done - process PDU header + * @tcp_conn: iSCSI TCP connection + * @segment: the buffer segment being processed + * + * This is the callback invoked when the PDU header has + * been received. If the header is followed by additional + * header segments, we go back for more data. + */ +static int +iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, + struct iscsi_segment *segment) +{ + struct iscsi_conn *conn = tcp_conn->iscsi_conn; + struct iscsi_hdr *hdr; + + /* Check if there are additional header segments + * *prior* to computing the digest, because we + * may need to go back to the caller for more. + */ + hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf; + if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) { + /* Bump the header length - the caller will + * just loop around and get the AHS for us, and + * call again. */ + unsigned int ahslen = hdr->hlength << 2; + + /* Make sure we don't overflow */ + if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf)) + return ISCSI_ERR_AHSLEN; + + segment->total_size += ahslen; + segment->size += ahslen; + return 0; + } + + /* We're done processing the header. See if we're doing + * header digests; if so, set up the recv_digest buffer + * and go back for more. */ + if (conn->hdrdgst_en && + !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) { + if (segment->digest_len == 0) { + /* + * Even if we offload the digest processing we + * splice it in so we can increment the skb/segment + * counters in preparation for the data segment. + */ + iscsi_tcp_segment_splice_digest(segment, + segment->recv_digest); + return 0; + } + + iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr, + segment->total_copied - ISCSI_DIGEST_SIZE, + segment->digest); + + if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) + return ISCSI_ERR_HDR_DGST; + } + + tcp_conn->in.hdr = hdr; + return iscsi_tcp_hdr_dissect(conn, hdr); +} + +/** + * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header + * @tcp_conn: iscsi tcp conn + * + * returns non zero if we are currently processing or setup to process + * a header. + */ +inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn) +{ + return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr); + +/** + * iscsi_tcp_recv_skb - Process skb + * @conn: iscsi connection + * @skb: network buffer with header and/or data segment + * @offset: offset in skb + * @offloaded: bool indicating if transfer was offloaded + * @status: iscsi TCP status result + * + * Will return status of transfer in @status. And will return + * number of bytes copied. + */ +int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, + unsigned int offset, bool offloaded, int *status) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_segment *segment = &tcp_conn->in.segment; + struct skb_seq_state seq; + unsigned int consumed = 0; + int rc = 0; + + ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); + /* + * Update for each skb instead of pdu, because over slow networks a + * data_in's data could take a while to read in. We also want to + * account for r2ts. + */ + conn->last_recv = jiffies; + + if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { + ISCSI_DBG_TCP(conn, "Rx suspended!\n"); + *status = ISCSI_TCP_SUSPENDED; + return 0; + } + + if (offloaded) { + segment->total_copied = segment->total_size; + goto segment_done; + } + + skb_prepare_seq_read(skb, offset, skb->len, &seq); + while (1) { + unsigned int avail; + const u8 *ptr; + + avail = skb_seq_read(consumed, &ptr, &seq); + if (avail == 0) { + ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n", + consumed); + *status = ISCSI_TCP_SKB_DONE; + goto skb_done; + } + BUG_ON(segment->copied >= segment->size); + + ISCSI_DBG_TCP(conn, "skb %p ptr=%p avail=%u\n", skb, ptr, + avail); + rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail); + BUG_ON(rc == 0); + consumed += rc; + + if (segment->total_copied >= segment->total_size) { + skb_abort_seq_read(&seq); + goto segment_done; + } + } + +segment_done: + *status = ISCSI_TCP_SEGMENT_DONE; + ISCSI_DBG_TCP(conn, "segment done\n"); + rc = segment->done(tcp_conn, segment); + if (rc != 0) { + *status = ISCSI_TCP_CONN_ERR; + ISCSI_DBG_TCP(conn, "Error receiving PDU, errno=%d\n", rc); + iscsi_conn_failure(conn, rc); + return 0; + } + /* The done() functions sets up the next segment. */ + +skb_done: + conn->rxdata_octets += consumed; + return consumed; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb); + +/** + * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands + * @task: scsi command task + */ +int iscsi_tcp_task_init(struct iscsi_task *task) +{ + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct scsi_cmnd *sc = task->sc; + int err; + + if (!sc) { + /* + * mgmt tasks do not have a scatterlist since they come + * in from the iscsi interface. + */ + ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt); + + return conn->session->tt->init_pdu(task, 0, task->data_count); + } + + BUG_ON(kfifo_len(&tcp_task->r2tqueue)); + tcp_task->exp_datasn = 0; + + /* Prepare PDU, optionally w/ immediate data */ + ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n", + task->itt, task->imm_count, task->unsol_r2t.data_length); + + err = conn->session->tt->init_pdu(task, 0, task->imm_count); + if (err) + return err; + task->imm_count = 0; + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_task_init); + +static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) +{ + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct iscsi_r2t_info *r2t = NULL; + + if (iscsi_task_has_unsol_data(task)) + r2t = &task->unsol_r2t; + else { + spin_lock_bh(&tcp_task->queue2pool); + if (tcp_task->r2t) { + r2t = tcp_task->r2t; + /* Continue with this R2T? */ + if (r2t->data_length <= r2t->sent) { + ISCSI_DBG_TCP(task->conn, + " done with r2t %p\n", r2t); + kfifo_in(&tcp_task->r2tpool.queue, + (void *)&tcp_task->r2t, + sizeof(void *)); + tcp_task->r2t = r2t = NULL; + } + } + + if (r2t == NULL) { + if (kfifo_out(&tcp_task->r2tqueue, + (void *)&tcp_task->r2t, sizeof(void *)) != + sizeof(void *)) + r2t = NULL; + else + r2t = tcp_task->r2t; + } + spin_unlock_bh(&tcp_task->queue2pool); + } + + return r2t; +} + +/** + * iscsi_tcp_task_xmit - xmit normal PDU task + * @task: iscsi command task + * + * We're expected to return 0 when everything was transmitted successfully, + * -EAGAIN if there's still data in the queue, or != 0 for any other kind + * of error. + */ +int iscsi_tcp_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct iscsi_r2t_info *r2t; + int rc = 0; + +flush: + /* Flush any pending data first. */ + rc = session->tt->xmit_pdu(task); + if (rc < 0) + return rc; + + /* mgmt command */ + if (!task->sc) { + if (task->hdr->itt == RESERVED_ITT) + iscsi_put_task(task); + return 0; + } + + /* Are we done already? */ + if (task->sc->sc_data_direction != DMA_TO_DEVICE) + return 0; + + r2t = iscsi_tcp_get_curr_r2t(task); + if (r2t == NULL) { + /* Waiting for more R2Ts to arrive. */ + ISCSI_DBG_TCP(conn, "no R2Ts yet\n"); + return 0; + } + + rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT); + if (rc) + return rc; + iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr); + + ISCSI_DBG_TCP(conn, "sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", + r2t, r2t->datasn - 1, task->hdr->itt, + r2t->data_offset + r2t->sent, r2t->data_count); + + rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent, + r2t->data_count); + if (rc) { + iscsi_conn_failure(conn, ISCSI_ERR_XMIT_FAILED); + return rc; + } + + r2t->sent += r2t->data_count; + goto flush; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit); + +struct iscsi_cls_conn * +iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size, + uint32_t conn_idx) + +{ + struct iscsi_conn *conn; + struct iscsi_cls_conn *cls_conn; + struct iscsi_tcp_conn *tcp_conn; + + cls_conn = iscsi_conn_setup(cls_session, + sizeof(*tcp_conn) + dd_data_size, conn_idx); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + /* + * due to strange issues with iser these are not set + * in iscsi_conn_setup + */ + conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN; + + tcp_conn = conn->dd_data; + tcp_conn->iscsi_conn = conn; + tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn); + return cls_conn; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup); + +void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn) +{ + iscsi_conn_teardown(cls_conn); +} +EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown); + +int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session) +{ + int i; + int cmd_i; + + /* + * initialize per-task: R2T pool and xmit queue + */ + for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { + struct iscsi_task *task = session->cmds[cmd_i]; + struct iscsi_tcp_task *tcp_task = task->dd_data; + + /* + * pre-allocated x2 as much r2ts to handle race when + * target acks DataOut faster than we data_xmit() queues + * could replenish r2tqueue. + */ + + /* R2T pool */ + if (iscsi_pool_init(&tcp_task->r2tpool, + session->max_r2t * 2, NULL, + sizeof(struct iscsi_r2t_info))) { + goto r2t_alloc_fail; + } + + /* R2T xmit queue */ + if (kfifo_alloc(&tcp_task->r2tqueue, + session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) { + iscsi_pool_free(&tcp_task->r2tpool); + goto r2t_alloc_fail; + } + spin_lock_init(&tcp_task->pool2queue); + spin_lock_init(&tcp_task->queue2pool); + } + + return 0; + +r2t_alloc_fail: + for (i = 0; i < cmd_i; i++) { + struct iscsi_task *task = session->cmds[i]; + struct iscsi_tcp_task *tcp_task = task->dd_data; + + kfifo_free(&tcp_task->r2tqueue); + iscsi_pool_free(&tcp_task->r2tpool); + } + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc); + +void iscsi_tcp_r2tpool_free(struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct iscsi_tcp_task *tcp_task = task->dd_data; + + kfifo_free(&tcp_task->r2tqueue); + iscsi_pool_free(&tcp_task->r2tpool); + } +} +EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free); + +int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf) +{ + struct iscsi_session *session = conn->session; + unsigned short r2ts = 0; + + sscanf(buf, "%hu", &r2ts); + if (session->max_r2t == r2ts) + return 0; + + if (!r2ts || !is_power_of_2(r2ts)) + return -EINVAL; + + session->max_r2t = r2ts; + iscsi_tcp_r2tpool_free(session); + return iscsi_tcp_r2tpool_alloc(session); +} +EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t); + +void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; +} +EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats); diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig new file mode 100644 index 000000000..c640535d1 --- /dev/null +++ b/drivers/scsi/libsas/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Kernel configuration file for the SAS Class +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov +# + +config SCSI_SAS_LIBSAS + tristate "SAS Domain Transport Attributes" + depends on SCSI + select SCSI_SAS_ATTRS + help + This provides transport specific helpers for SAS drivers which + use the domain device construct (like the aic94xxx). + +config SCSI_SAS_ATA + bool "ATA support for libsas (requires libata)" + depends on SCSI_SAS_LIBSAS + depends on ATA = y || ATA = SCSI_SAS_LIBSAS + select SATA_HOST + help + Builds in ATA support into libsas. Will necessitate + the loading of libata along with libsas. + +config SCSI_SAS_HOST_SMP + bool "Support for SMP interpretation for SAS hosts" + default y + depends on SCSI_SAS_LIBSAS + help + Allows sas hosts to receive SMP frames. Selecting this + option builds an SMP interpreter into libsas. Say + N here if you want to save the few kb this consumes. diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile new file mode 100644 index 000000000..9dc32736c --- /dev/null +++ b/drivers/scsi/libsas/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Kernel Makefile for the libsas helpers +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov +# + +obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o +libsas-y += sas_init.o \ + sas_phy.o \ + sas_port.o \ + sas_event.o \ + sas_discover.o \ + sas_expander.o \ + sas_scsi_host.o \ + sas_task.o +libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o +libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o + +ccflags-y := -DDEBUG -I$(srctree)/drivers/scsi diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c new file mode 100644 index 000000000..12e265384 --- /dev/null +++ b/drivers/scsi/libsas/sas_ata.c @@ -0,0 +1,966 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for SATA devices on Serial Attached SCSI (SAS) controllers + * + * Copyright (C) 2006 IBM Corporation + * + * Written by: Darrick J. Wong , IBM Corporation + */ + +#include +#include +#include +#include + +#include +#include "sas_internal.h" +#include +#include +#include +#include +#include +#include +#include "scsi_sas_internal.h" +#include "scsi_transport_api.h" +#include + +static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts) +{ + /* Cheesy attempt to translate SAS errors into ATA. Hah! */ + + /* transport error */ + if (ts->resp == SAS_TASK_UNDELIVERED) + return AC_ERR_ATA_BUS; + + /* ts->resp == SAS_TASK_COMPLETE */ + /* task delivered, what happened afterwards? */ + switch (ts->stat) { + case SAS_DEV_NO_RESPONSE: + return AC_ERR_TIMEOUT; + case SAS_INTERRUPTED: + case SAS_PHY_DOWN: + case SAS_NAK_R_ERR: + return AC_ERR_ATA_BUS; + case SAS_DATA_UNDERRUN: + /* + * Some programs that use the taskfile interface + * (smartctl in particular) can cause underrun + * problems. Ignore these errors, perhaps at our + * peril. + */ + return 0; + case SAS_DATA_OVERRUN: + case SAS_QUEUE_FULL: + case SAS_DEVICE_UNKNOWN: + case SAS_OPEN_TO: + case SAS_OPEN_REJECT: + pr_warn("%s: Saw error %d. What to do?\n", + __func__, ts->stat); + return AC_ERR_OTHER; + case SAM_STAT_CHECK_CONDITION: + case SAS_ABORTED_TASK: + return AC_ERR_DEV; + case SAS_PROTO_RESPONSE: + /* This means the ending_fis has the error + * value; return 0 here to collect it + */ + return 0; + default: + return 0; + } +} + +static void sas_ata_task_done(struct sas_task *task) +{ + struct ata_queued_cmd *qc = task->uldd_task; + struct domain_device *dev = task->dev; + struct task_status_struct *stat = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf; + struct sas_ha_struct *sas_ha = dev->port->ha; + enum ata_completion_errors ac; + unsigned long flags; + struct ata_link *link; + struct ata_port *ap; + + spin_lock_irqsave(&dev->done_lock, flags); + if (test_bit(SAS_HA_FROZEN, &sas_ha->state)) + task = NULL; + else if (qc && qc->scsicmd) + ASSIGN_SAS_TASK(qc->scsicmd, NULL); + spin_unlock_irqrestore(&dev->done_lock, flags); + + /* check if libsas-eh got to the task before us */ + if (unlikely(!task)) + return; + + if (!qc) + goto qc_already_gone; + + ap = qc->ap; + link = &ap->link; + + spin_lock_irqsave(ap->lock, flags); + /* check if we lost the race with libata/sas_ata_post_internal() */ + if (unlikely(ata_port_is_frozen(ap))) { + spin_unlock_irqrestore(ap->lock, flags); + if (qc->scsicmd) + goto qc_already_gone; + else { + /* if eh is not involved and the port is frozen then the + * ata internal abort process has taken responsibility + * for this sas_task + */ + return; + } + } + + if (stat->stat == SAS_PROTO_RESPONSE || + stat->stat == SAS_SAM_STAT_GOOD || + (stat->stat == SAS_SAM_STAT_CHECK_CONDITION && + dev->sata_dev.class == ATA_DEV_ATAPI)) { + memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE); + + if (!link->sactive) { + qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]); + } else { + link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]); + if (unlikely(link->eh_info.err_mask)) + qc->flags |= ATA_QCFLAG_EH; + } + } else { + ac = sas_to_ata_err(stat); + if (ac) { + pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat); + /* We saw a SAS error. Send a vague error. */ + if (!link->sactive) { + qc->err_mask = ac; + } else { + link->eh_info.err_mask |= AC_ERR_DEV; + qc->flags |= ATA_QCFLAG_EH; + } + + dev->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ + dev->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ + } + } + + qc->lldd_task = NULL; + ata_qc_complete(qc); + spin_unlock_irqrestore(ap->lock, flags); + +qc_already_gone: + sas_free_task(task); +} + +static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) + __must_hold(ap->lock) +{ + struct sas_task *task; + struct scatterlist *sg; + int ret = AC_ERR_SYSTEM; + unsigned int si, xfer = 0; + struct ata_port *ap = qc->ap; + struct domain_device *dev = ap->private_data; + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *host = sas_ha->shost; + struct sas_internal *i = to_sas_internal(host->transportt); + + /* TODO: we should try to remove that unlock */ + spin_unlock(ap->lock); + + /* If the device fell off, no sense in issuing commands */ + if (test_bit(SAS_DEV_GONE, &dev->state)) + goto out; + + task = sas_alloc_task(GFP_ATOMIC); + if (!task) + goto out; + task->dev = dev; + task->task_proto = SAS_PROTOCOL_STP; + task->task_done = sas_ata_task_done; + + /* For NCQ commands, zero out the tag libata assigned us */ + if (ata_is_ncq(qc->tf.protocol)) + qc->tf.nsect = 0; + + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); + task->uldd_task = qc; + if (ata_is_atapi(qc->tf.protocol)) { + memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); + task->total_xfer_len = qc->nbytes; + task->num_scatter = qc->n_elem; + task->data_dir = qc->dma_dir; + } else if (!ata_is_data(qc->tf.protocol)) { + task->data_dir = DMA_NONE; + } else { + for_each_sg(qc->sg, sg, qc->n_elem, si) + xfer += sg_dma_len(sg); + + task->total_xfer_len = xfer; + task->num_scatter = si; + task->data_dir = qc->dma_dir; + } + task->scatter = qc->sg; + qc->lldd_task = task; + + task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol); + task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol); + + if (qc->flags & ATA_QCFLAG_RESULT_TF) + task->ata_task.return_fis_on_success = 1; + + if (qc->scsicmd) + ASSIGN_SAS_TASK(qc->scsicmd, task); + + ret = i->dft->lldd_execute_task(task, GFP_ATOMIC); + if (ret) { + pr_debug("lldd_execute_task returned: %d\n", ret); + + if (qc->scsicmd) + ASSIGN_SAS_TASK(qc->scsicmd, NULL); + sas_free_task(task); + qc->lldd_task = NULL; + ret = AC_ERR_SYSTEM; + } + + out: + spin_lock(ap->lock); + return ret; +} + +static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + struct domain_device *dev = qc->ap->private_data; + + ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf); +} + +static struct sas_internal *dev_to_sas_internal(struct domain_device *dev) +{ + return to_sas_internal(dev->port->ha->shost->transportt); +} + +static int sas_get_ata_command_set(struct domain_device *dev) +{ + struct ata_taskfile tf; + + if (dev->dev_type == SAS_SATA_PENDING) + return ATA_DEV_UNKNOWN; + + ata_tf_from_fis(dev->frame_rcvd, &tf); + + return ata_dev_classify(&tf); +} + +int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) +{ + if (phy->attached_tproto & SAS_PROTOCOL_STP) + dev->tproto = phy->attached_tproto; + if (phy->attached_sata_dev) + dev->tproto |= SAS_SATA_DEV; + + if (phy->attached_dev_type == SAS_SATA_PENDING) + dev->dev_type = SAS_SATA_PENDING; + else { + int res; + + dev->dev_type = SAS_SATA_DEV; + res = sas_get_report_phy_sata(dev->parent, phy->phy_id, + &dev->sata_dev.rps_resp); + if (res) { + pr_debug("report phy sata to %016llx:%02d returned 0x%x\n", + SAS_ADDR(dev->parent->sas_addr), + phy->phy_id, res); + return res; + } + memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis, + sizeof(struct dev_to_host_fis)); + dev->sata_dev.class = sas_get_ata_command_set(dev); + } + return 0; +} + +static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) +{ + int res; + + /* we weren't pending, so successfully end the reset sequence now */ + if (dev->dev_type != SAS_SATA_PENDING) + return 1; + + /* hmmm, if this succeeds do we need to repost the domain_device to the + * lldd so it can pick up new parameters? + */ + res = sas_get_ata_info(dev, phy); + if (res) + return 0; /* retry */ + else + return 1; +} + +int smp_ata_check_ready_type(struct ata_link *link) +{ + struct domain_device *dev = link->ap->private_data; + struct sas_phy *phy = sas_get_local_phy(dev); + struct domain_device *ex_dev = dev->parent; + enum sas_device_type type = SAS_PHY_UNUSED; + u8 sas_addr[SAS_ADDR_SIZE]; + int res; + + res = sas_get_phy_attached_dev(ex_dev, phy->number, sas_addr, &type); + sas_put_local_phy(phy); + if (res) + return res; + + switch (type) { + case SAS_SATA_PENDING: + return 0; + case SAS_END_DEVICE: + return 1; + default: + return -ENODEV; + } +} +EXPORT_SYMBOL_GPL(smp_ata_check_ready_type); + +static int smp_ata_check_ready(struct ata_link *link) +{ + int res; + struct ata_port *ap = link->ap; + struct domain_device *dev = ap->private_data; + struct domain_device *ex_dev = dev->parent; + struct sas_phy *phy = sas_get_local_phy(dev); + struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number]; + + res = sas_ex_phy_discover(ex_dev, phy->number); + sas_put_local_phy(phy); + + /* break the wait early if the expander is unreachable, + * otherwise keep polling + */ + if (res == -ECOMM) + return res; + if (res != SMP_RESP_FUNC_ACC) + return 0; + + switch (ex_phy->attached_dev_type) { + case SAS_SATA_PENDING: + return 0; + case SAS_END_DEVICE: + if (ex_phy->attached_sata_dev) + return sas_ata_clear_pending(dev, ex_phy); + fallthrough; + default: + return -ENODEV; + } +} + +static int local_ata_check_ready(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct domain_device *dev = ap->private_data; + struct sas_internal *i = dev_to_sas_internal(dev); + + if (i->dft->lldd_ata_check_ready) + return i->dft->lldd_ata_check_ready(dev); + else { + /* lldd's that don't implement 'ready' checking get the + * old default behavior of not coordinating reset + * recovery with libata + */ + return 1; + } +} + +static int sas_ata_printk(const char *level, const struct domain_device *ddev, + const char *fmt, ...) +{ + struct ata_port *ap = ddev->sata_dev.ap; + struct device *dev = &ddev->rphy->dev; + struct va_format vaf; + va_list args; + int r; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + r = printk("%s" SAS_FMT "ata%u: %s: %pV", + level, ap->print_id, dev_name(dev), &vaf); + + va_end(args); + + return r; +} + +static int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) +{ + struct sata_device *sata_dev = &dev->sata_dev; + int (*check_ready)(struct ata_link *link); + struct ata_port *ap = sata_dev->ap; + struct ata_link *link = &ap->link; + struct sas_phy *phy; + int ret; + + phy = sas_get_local_phy(dev); + if (scsi_is_sas_phy_local(phy)) + check_ready = local_ata_check_ready; + else + check_ready = smp_ata_check_ready; + sas_put_local_phy(phy); + + ret = ata_wait_after_reset(link, deadline, check_ready); + if (ret && ret != -EAGAIN) + sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret); + + return ret; +} + +static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct domain_device *dev = ap->private_data; + struct sas_internal *i = dev_to_sas_internal(dev); + int ret; + + ret = i->dft->lldd_I_T_nexus_reset(dev); + if (ret == -ENODEV) + return ret; + + if (ret != TMF_RESP_FUNC_COMPLETE) + sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n"); + + ret = sas_ata_wait_after_reset(dev, deadline); + + *class = dev->sata_dev.class; + + ap->cbl = ATA_CBL_SATA; + return ret; +} + +/* + * notify the lldd to forget the sas_task for this internal ata command + * that bypasses scsi-eh + */ +static void sas_ata_internal_abort(struct sas_task *task) +{ + struct sas_internal *si = dev_to_sas_internal(task->dev); + unsigned long flags; + int res; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_ABORTED || + task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + pr_debug("%s: Task %p already finished.\n", __func__, task); + goto out; + } + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + res = si->dft->lldd_abort_task(task); + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE || + res == TMF_RESP_FUNC_COMPLETE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + goto out; + } + + /* XXX we are not prepared to deal with ->lldd_abort_task() + * failures. TODO: lldds need to unconditionally forget about + * aborted ata tasks, otherwise we (likely) leak the sas task + * here + */ + pr_warn("%s: Task %p leaked.\n", __func__, task); + + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + return; + out: + sas_free_task(task); +} + +static void sas_ata_post_internal(struct ata_queued_cmd *qc) +{ + if (qc->flags & ATA_QCFLAG_EH) + qc->err_mask |= AC_ERR_OTHER; + + if (qc->err_mask) { + /* + * Find the sas_task and kill it. By this point, libata + * has decided to kill the qc and has frozen the port. + * In this state sas_ata_task_done() will no longer free + * the sas_task, so we need to notify the lldd (via + * ->lldd_abort_task) that the task is dead and free it + * ourselves. + */ + struct sas_task *task = qc->lldd_task; + + qc->lldd_task = NULL; + if (!task) + return; + task->uldd_task = NULL; + sas_ata_internal_abort(task); + } +} + + +static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev) +{ + struct domain_device *dev = ap->private_data; + struct sas_internal *i = dev_to_sas_internal(dev); + + if (i->dft->lldd_ata_set_dmamode) + i->dft->lldd_ata_set_dmamode(dev); +} + +static void sas_ata_sched_eh(struct ata_port *ap) +{ + struct domain_device *dev = ap->private_data; + struct sas_ha_struct *ha = dev->port->ha; + unsigned long flags; + + spin_lock_irqsave(&ha->lock, flags); + if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state)) + ha->eh_active++; + ata_std_sched_eh(ap); + spin_unlock_irqrestore(&ha->lock, flags); +} + +void sas_ata_end_eh(struct ata_port *ap) +{ + struct domain_device *dev = ap->private_data; + struct sas_ha_struct *ha = dev->port->ha; + unsigned long flags; + + spin_lock_irqsave(&ha->lock, flags); + if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state)) + ha->eh_active--; + spin_unlock_irqrestore(&ha->lock, flags); +} + +static int sas_ata_prereset(struct ata_link *link, unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct domain_device *dev = ap->private_data; + struct sas_phy *local_phy = sas_get_local_phy(dev); + int res = 0; + + if (!local_phy->enabled || test_bit(SAS_DEV_GONE, &dev->state)) + res = -ENOENT; + sas_put_local_phy(local_phy); + + return res; +} + +static struct ata_port_operations sas_sata_ops = { + .prereset = sas_ata_prereset, + .hardreset = sas_ata_hard_reset, + .error_handler = ata_std_error_handler, + .post_internal_cmd = sas_ata_post_internal, + .qc_defer = ata_std_qc_defer, + .qc_prep = ata_noop_qc_prep, + .qc_issue = sas_ata_qc_issue, + .qc_fill_rtf = sas_ata_qc_fill_rtf, + .set_dmamode = sas_ata_set_dmamode, + .sched_eh = sas_ata_sched_eh, + .end_eh = sas_ata_end_eh, +}; + +static struct ata_port_info sata_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | + ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &sas_sata_ops +}; + +int sas_ata_init(struct domain_device *found_dev) +{ + struct sas_ha_struct *ha = found_dev->port->ha; + struct Scsi_Host *shost = ha->shost; + struct ata_host *ata_host; + struct ata_port *ap; + int rc; + + ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL); + if (!ata_host) { + pr_err("ata host alloc failed.\n"); + return -ENOMEM; + } + + ata_host_init(ata_host, ha->dev, &sas_sata_ops); + + ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost); + if (!ap) { + pr_err("ata_sas_port_alloc failed.\n"); + rc = -ENODEV; + goto free_host; + } + + ap->private_data = found_dev; + ap->cbl = ATA_CBL_SATA; + ap->scsi_host = shost; + + rc = ata_sas_tport_add(ata_host->dev, ap); + if (rc) + goto destroy_port; + + found_dev->sata_dev.ata_host = ata_host; + found_dev->sata_dev.ap = ap; + + return 0; + +destroy_port: + kfree(ap); +free_host: + ata_host_put(ata_host); + return rc; +} + +void sas_ata_task_abort(struct sas_task *task) +{ + struct ata_queued_cmd *qc = task->uldd_task; + struct completion *waiting; + + /* Bounce SCSI-initiated commands to the SCSI EH */ + if (qc->scsicmd) { + blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); + return; + } + + /* Internal command, fake a timeout and complete. */ + qc->flags &= ~ATA_QCFLAG_ACTIVE; + qc->flags |= ATA_QCFLAG_EH; + qc->err_mask |= AC_ERR_TIMEOUT; + waiting = qc->private_data; + complete(waiting); +} + +void sas_probe_sata(struct asd_sas_port *port) +{ + struct domain_device *dev, *n; + + mutex_lock(&port->ha->disco_mutex); + list_for_each_entry(dev, &port->disco_list, disco_list_node) { + if (!dev_is_sata(dev)) + continue; + + ata_port_probe(dev->sata_dev.ap); + } + mutex_unlock(&port->ha->disco_mutex); + + list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { + if (!dev_is_sata(dev)) + continue; + + sas_ata_wait_eh(dev); + + /* if libata could not bring the link up, don't surface + * the device + */ + if (!ata_dev_enabled(sas_to_ata_dev(dev))) + sas_fail_probe(dev, __func__, -ENODEV); + } + +} + +int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, + struct domain_device *child, int phy_id) +{ + struct sas_rphy *rphy; + int ret; + + if (child->linkrate > parent->min_linkrate) { + struct sas_phy *cphy = child->phy; + enum sas_linkrate min_prate = cphy->minimum_linkrate, + parent_min_lrate = parent->min_linkrate, + min_linkrate = (min_prate > parent_min_lrate) ? + parent_min_lrate : 0; + struct sas_phy_linkrates rates = { + .maximum_linkrate = parent->min_linkrate, + .minimum_linkrate = min_linkrate, + }; + + pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n", + SAS_ADDR(child->sas_addr), phy_id); + ret = sas_smp_phy_control(parent, phy_id, + PHY_FUNC_LINK_RESET, &rates); + if (ret) { + pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n", + SAS_ADDR(child->sas_addr), phy_id, ret); + return ret; + } + pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n", + SAS_ADDR(child->sas_addr), phy_id); + child->linkrate = child->min_linkrate; + } + ret = sas_get_ata_info(child, phy); + if (ret) + return ret; + + sas_init_dev(child); + ret = sas_ata_init(child); + if (ret) + return ret; + + rphy = sas_end_device_alloc(phy->port); + if (!rphy) + return -ENOMEM; + + rphy->identify.phy_identifier = phy_id; + child->rphy = rphy; + get_device(&rphy->dev); + + list_add_tail(&child->disco_list_node, &parent->port->disco_list); + + ret = sas_discover_sata(child); + if (ret) { + pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, ret); + sas_rphy_free(child->rphy); + list_del(&child->disco_list_node); + return ret; + } + + return 0; +} + +static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func) +{ + struct domain_device *dev, *n; + + list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { + if (!dev_is_sata(dev)) + continue; + + sas_ata_wait_eh(dev); + + /* if libata failed to power manage the device, tear it down */ + if (ata_dev_disabled(sas_to_ata_dev(dev))) + sas_fail_probe(dev, func, -ENODEV); + } +} + +void sas_suspend_sata(struct asd_sas_port *port) +{ + struct domain_device *dev; + + mutex_lock(&port->ha->disco_mutex); + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + struct sata_device *sata; + + if (!dev_is_sata(dev)) + continue; + + sata = &dev->sata_dev; + if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND) + continue; + + ata_sas_port_suspend(sata->ap); + } + mutex_unlock(&port->ha->disco_mutex); + + sas_ata_flush_pm_eh(port, __func__); +} + +void sas_resume_sata(struct asd_sas_port *port) +{ + struct domain_device *dev; + + mutex_lock(&port->ha->disco_mutex); + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + struct sata_device *sata; + + if (!dev_is_sata(dev)) + continue; + + sata = &dev->sata_dev; + if (sata->ap->pm_mesg.event == PM_EVENT_ON) + continue; + + ata_sas_port_resume(sata->ap); + } + mutex_unlock(&port->ha->disco_mutex); + + sas_ata_flush_pm_eh(port, __func__); +} + +/** + * sas_discover_sata - discover an STP/SATA domain device + * @dev: pointer to struct domain_device of interest + * + * Devices directly attached to a HA port, have no parents. All other + * devices do, and should have their "parent" pointer set appropriately + * before calling this function. + */ +int sas_discover_sata(struct domain_device *dev) +{ + if (dev->dev_type == SAS_SATA_PM) + return -ENODEV; + + dev->sata_dev.class = sas_get_ata_command_set(dev); + sas_fill_in_rphy(dev, dev->rphy); + + return sas_notify_lldd_dev_found(dev); +} + +static void async_sas_ata_eh(void *data, async_cookie_t cookie) +{ + struct domain_device *dev = data; + struct ata_port *ap = dev->sata_dev.ap; + struct sas_ha_struct *ha = dev->port->ha; + + sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n"); + ata_scsi_port_error_handler(ha->shost, ap); + sas_put_device(dev); +} + +void sas_ata_strategy_handler(struct Scsi_Host *shost) +{ + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + ASYNC_DOMAIN_EXCLUSIVE(async); + int i; + + /* it's ok to defer revalidation events during ata eh, these + * disks are in one of three states: + * 1/ present for initial domain discovery, and these + * resets will cause bcn flutters + * 2/ hot removed, we'll discover that after eh fails + * 3/ hot added after initial discovery, lost the race, and need + * to catch the next train. + */ + sas_disable_revalidation(sas_ha); + + spin_lock_irq(&sas_ha->phy_port_lock); + for (i = 0; i < sas_ha->num_phys; i++) { + struct asd_sas_port *port = sas_ha->sas_port[i]; + struct domain_device *dev; + + spin_lock(&port->dev_list_lock); + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (!dev_is_sata(dev)) + continue; + + /* hold a reference over eh since we may be + * racing with final remove once all commands + * are completed + */ + kref_get(&dev->kref); + + async_schedule_domain(async_sas_ata_eh, dev, &async); + } + spin_unlock(&port->dev_list_lock); + } + spin_unlock_irq(&sas_ha->phy_port_lock); + + async_synchronize_full_domain(&async); + + sas_enable_revalidation(sas_ha); +} + +void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q) +{ + struct scsi_cmnd *cmd, *n; + struct domain_device *eh_dev; + + do { + LIST_HEAD(sata_q); + eh_dev = NULL; + + list_for_each_entry_safe(cmd, n, work_q, eh_entry) { + struct domain_device *ddev = cmd_to_domain_dev(cmd); + + if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd)) + continue; + if (eh_dev && eh_dev != ddev) + continue; + eh_dev = ddev; + list_move(&cmd->eh_entry, &sata_q); + } + + if (!list_empty(&sata_q)) { + struct ata_port *ap = eh_dev->sata_dev.ap; + + sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n"); + ata_scsi_cmd_error_handler(shost, ap, &sata_q); + /* + * ata's error handler may leave the cmd on the list + * so make sure they don't remain on a stack list + * about to go out of scope. + * + * This looks strange, since the commands are + * now part of no list, but the next error + * action will be ata_port_error_handler() + * which takes no list and sweeps them up + * anyway from the ata tag array. + */ + while (!list_empty(&sata_q)) + list_del_init(sata_q.next); + } + } while (eh_dev); +} + +void sas_ata_schedule_reset(struct domain_device *dev) +{ + struct ata_eh_info *ehi; + struct ata_port *ap; + unsigned long flags; + + if (!dev_is_sata(dev)) + return; + + ap = dev->sata_dev.ap; + ehi = &ap->link.eh_info; + + spin_lock_irqsave(ap->lock, flags); + ehi->err_mask |= AC_ERR_TIMEOUT; + ehi->action |= ATA_EH_RESET; + ata_port_schedule_eh(ap); + spin_unlock_irqrestore(ap->lock, flags); +} +EXPORT_SYMBOL_GPL(sas_ata_schedule_reset); + +void sas_ata_wait_eh(struct domain_device *dev) +{ + struct ata_port *ap; + + if (!dev_is_sata(dev)) + return; + + ap = dev->sata_dev.ap; + ata_port_wait_eh(ap); +} + +void sas_ata_device_link_abort(struct domain_device *device, bool force_reset) +{ + struct ata_port *ap = device->sata_dev.ap; + struct ata_link *link = &ap->link; + unsigned long flags; + + spin_lock_irqsave(ap->lock, flags); + device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ + device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ + + link->eh_info.err_mask |= AC_ERR_DEV; + if (force_reset) + link->eh_info.action |= ATA_EH_RESET; + ata_link_abort(link); + spin_unlock_irqrestore(ap->lock, flags); +} +EXPORT_SYMBOL_GPL(sas_ata_device_link_abort); + +int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id) +{ + struct sas_tmf_task tmf_task = {}; + return sas_execute_tmf(device, fis, sizeof(struct host_to_dev_fis), + force_phy_id, &tmf_task); +} +EXPORT_SYMBOL_GPL(sas_execute_ata_cmd); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c new file mode 100644 index 000000000..ff7b63b10 --- /dev/null +++ b/drivers/scsi/libsas/sas_discover.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Serial Attached SCSI (SAS) Discover process + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include +#include +#include "sas_internal.h" + +#include +#include +#include +#include "scsi_sas_internal.h" + +/* ---------- Basic task processing for discovery purposes ---------- */ + +void sas_init_dev(struct domain_device *dev) +{ + switch (dev->dev_type) { + case SAS_END_DEVICE: + INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); + break; + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + INIT_LIST_HEAD(&dev->ex_dev.children); + mutex_init(&dev->ex_dev.cmd_mutex); + break; + default: + break; + } +} + +/* ---------- Domain device discovery ---------- */ + +/** + * sas_get_port_device - Discover devices which caused port creation + * @port: pointer to struct sas_port of interest + * + * Devices directly attached to a HA port, have no parent. This is + * how we know they are (domain) "root" devices. All other devices + * do, and should have their "parent" pointer set appropriately as + * soon as a child device is discovered. + */ +static int sas_get_port_device(struct asd_sas_port *port) +{ + struct asd_sas_phy *phy; + struct sas_rphy *rphy; + struct domain_device *dev; + int rc = -ENODEV; + + dev = sas_alloc_device(); + if (!dev) + return -ENOMEM; + + spin_lock_irq(&port->phy_list_lock); + if (list_empty(&port->phy_list)) { + spin_unlock_irq(&port->phy_list_lock); + sas_put_device(dev); + return -ENODEV; + } + phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); + spin_lock(&phy->frame_rcvd_lock); + memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd), + (size_t)phy->frame_rcvd_size)); + spin_unlock(&phy->frame_rcvd_lock); + spin_unlock_irq(&port->phy_list_lock); + + if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) { + struct dev_to_host_fis *fis = + (struct dev_to_host_fis *) dev->frame_rcvd; + if (fis->interrupt_reason == 1 && fis->lbal == 1 && + fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96 + && (fis->device & ~0x10) == 0) + dev->dev_type = SAS_SATA_PM; + else + dev->dev_type = SAS_SATA_DEV; + dev->tproto = SAS_PROTOCOL_SATA; + } else if (port->oob_mode == SAS_OOB_MODE) { + struct sas_identify_frame *id = + (struct sas_identify_frame *) dev->frame_rcvd; + dev->dev_type = id->dev_type; + dev->iproto = id->initiator_bits; + dev->tproto = id->target_bits; + } else { + /* If the oob mode is OOB_NOT_CONNECTED, the port is + * disconnected due to race with PHY down. We cannot + * continue to discover this port + */ + sas_put_device(dev); + pr_warn("Port %016llx is disconnected when discovering\n", + SAS_ADDR(port->attached_sas_addr)); + return -ENODEV; + } + + sas_init_dev(dev); + + dev->port = port; + switch (dev->dev_type) { + case SAS_SATA_DEV: + rc = sas_ata_init(dev); + if (rc) { + rphy = NULL; + break; + } + fallthrough; + case SAS_END_DEVICE: + rphy = sas_end_device_alloc(port->port); + break; + case SAS_EDGE_EXPANDER_DEVICE: + rphy = sas_expander_alloc(port->port, + SAS_EDGE_EXPANDER_DEVICE); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + rphy = sas_expander_alloc(port->port, + SAS_FANOUT_EXPANDER_DEVICE); + break; + default: + pr_warn("ERROR: Unidentified device type %d\n", dev->dev_type); + rphy = NULL; + break; + } + + if (!rphy) { + sas_put_device(dev); + return rc; + } + + rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; + memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); + sas_fill_in_rphy(dev, rphy); + sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); + port->port_dev = dev; + dev->linkrate = port->linkrate; + dev->min_linkrate = port->linkrate; + dev->max_linkrate = port->linkrate; + dev->pathways = port->num_phys; + memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE); + memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE); + memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE); + port->disc.max_level = 0; + sas_device_set_phy(dev, port->port); + + dev->rphy = rphy; + get_device(&dev->rphy->dev); + + if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE) + list_add_tail(&dev->disco_list_node, &port->disco_list); + else { + spin_lock_irq(&port->dev_list_lock); + list_add_tail(&dev->dev_list_node, &port->dev_list); + spin_unlock_irq(&port->dev_list_lock); + } + + spin_lock_irq(&port->phy_list_lock); + list_for_each_entry(phy, &port->phy_list, port_phy_el) + sas_phy_set_target(phy, dev); + spin_unlock_irq(&port->phy_list_lock); + + return 0; +} + +/* ---------- Discover and Revalidate ---------- */ + +int sas_notify_lldd_dev_found(struct domain_device *dev) +{ + int res = 0; + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *shost = sas_ha->shost; + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (!i->dft->lldd_dev_found) + return 0; + + res = i->dft->lldd_dev_found(dev); + if (res) { + pr_warn("driver on host %s cannot handle device %016llx, error:%d\n", + dev_name(sas_ha->dev), + SAS_ADDR(dev->sas_addr), res); + return res; + } + set_bit(SAS_DEV_FOUND, &dev->state); + kref_get(&dev->kref); + return 0; +} + + +void sas_notify_lldd_dev_gone(struct domain_device *dev) +{ + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *shost = sas_ha->shost; + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (!i->dft->lldd_dev_gone) + return; + + if (test_and_clear_bit(SAS_DEV_FOUND, &dev->state)) { + i->dft->lldd_dev_gone(dev); + sas_put_device(dev); + } +} + +static void sas_probe_devices(struct asd_sas_port *port) +{ + struct domain_device *dev, *n; + + /* devices must be domain members before link recovery and probe */ + list_for_each_entry(dev, &port->disco_list, disco_list_node) { + spin_lock_irq(&port->dev_list_lock); + list_add_tail(&dev->dev_list_node, &port->dev_list); + spin_unlock_irq(&port->dev_list_lock); + } + + sas_probe_sata(port); + + list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { + int err; + + err = sas_rphy_add(dev->rphy); + if (err) + sas_fail_probe(dev, __func__, err); + else + list_del_init(&dev->disco_list_node); + } +} + +static void sas_suspend_devices(struct work_struct *work) +{ + struct asd_sas_phy *phy; + struct domain_device *dev; + struct sas_discovery_event *ev = to_sas_discovery_event(work); + struct asd_sas_port *port = ev->port; + struct Scsi_Host *shost = port->ha->shost; + struct sas_internal *si = to_sas_internal(shost->transportt); + + clear_bit(DISCE_SUSPEND, &port->disc.pending); + + sas_suspend_sata(port); + + /* lldd is free to forget the domain_device across the + * suspension, we force the issue here to keep the reference + * counts aligned + */ + list_for_each_entry(dev, &port->dev_list, dev_list_node) + sas_notify_lldd_dev_gone(dev); + + /* we are suspending, so we know events are disabled and + * phy_list is not being mutated + */ + list_for_each_entry(phy, &port->phy_list, port_phy_el) { + if (si->dft->lldd_port_deformed) + si->dft->lldd_port_deformed(phy); + phy->suspended = 1; + port->suspended = 1; + } +} + +static void sas_resume_devices(struct work_struct *work) +{ + struct sas_discovery_event *ev = to_sas_discovery_event(work); + struct asd_sas_port *port = ev->port; + + clear_bit(DISCE_RESUME, &port->disc.pending); + + sas_resume_sata(port); +} + +/** + * sas_discover_end_dev - discover an end device (SSP, etc) + * @dev: pointer to domain device of interest + * + * See comment in sas_discover_sata(). + */ +int sas_discover_end_dev(struct domain_device *dev) +{ + return sas_notify_lldd_dev_found(dev); +} + +/* ---------- Device registration and unregistration ---------- */ + +void sas_free_device(struct kref *kref) +{ + struct domain_device *dev = container_of(kref, typeof(*dev), kref); + + put_device(&dev->rphy->dev); + dev->rphy = NULL; + + if (dev->parent) + sas_put_device(dev->parent); + + sas_port_put_phy(dev->phy); + dev->phy = NULL; + + /* remove the phys and ports, everything else should be gone */ + if (dev_is_expander(dev->dev_type)) + kfree(dev->ex_dev.ex_phy); + + if (dev_is_sata(dev) && dev->sata_dev.ap) { + ata_sas_tport_delete(dev->sata_dev.ap); + kfree(dev->sata_dev.ap); + ata_host_put(dev->sata_dev.ata_host); + dev->sata_dev.ata_host = NULL; + dev->sata_dev.ap = NULL; + } + + kfree(dev); +} + +static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev) +{ + struct sas_ha_struct *ha = port->ha; + + sas_notify_lldd_dev_gone(dev); + if (!dev->parent) + dev->port->port_dev = NULL; + else + list_del_init(&dev->siblings); + + spin_lock_irq(&port->dev_list_lock); + list_del_init(&dev->dev_list_node); + if (dev_is_sata(dev)) + sas_ata_end_eh(dev->sata_dev.ap); + spin_unlock_irq(&port->dev_list_lock); + + spin_lock_irq(&ha->lock); + if (dev->dev_type == SAS_END_DEVICE && + !list_empty(&dev->ssp_dev.eh_list_node)) { + list_del_init(&dev->ssp_dev.eh_list_node); + ha->eh_active--; + } + spin_unlock_irq(&ha->lock); + + sas_put_device(dev); +} + +void sas_destruct_devices(struct asd_sas_port *port) +{ + struct domain_device *dev, *n; + + list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) { + list_del_init(&dev->disco_list_node); + + sas_remove_children(&dev->rphy->dev); + sas_rphy_delete(dev->rphy); + sas_unregister_common_dev(port, dev); + } +} + +static void sas_destruct_ports(struct asd_sas_port *port) +{ + struct sas_port *sas_port, *p; + + list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) { + list_del_init(&sas_port->del_list); + sas_port_delete(sas_port); + } +} + +static bool sas_abort_cmd(struct request *req, void *data) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + struct domain_device *dev = data; + + if (dev == cmd_to_domain_dev(cmd)) + blk_abort_request(req); + return true; +} + +static void sas_abort_device_scsi_cmds(struct domain_device *dev) +{ + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *shost = sas_ha->shost; + + if (dev_is_expander(dev->dev_type)) + return; + + /* + * For removed device with active IOs, the user space applications have + * to spend very long time waiting for the timeout. This is not + * necessary because a removed device will not return the IOs. + * Abort the inflight IOs here so that EH can be quickly kicked in. + */ + blk_mq_tagset_busy_iter(&shost->tag_set, sas_abort_cmd, dev); +} + +void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) +{ + if (!test_bit(SAS_DEV_DESTROY, &dev->state) && + !list_empty(&dev->disco_list_node)) { + /* this rphy never saw sas_rphy_add */ + list_del_init(&dev->disco_list_node); + sas_rphy_free(dev->rphy); + sas_unregister_common_dev(port, dev); + return; + } + + if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { + if (test_bit(SAS_DEV_GONE, &dev->state)) + sas_abort_device_scsi_cmds(dev); + sas_rphy_unlink(dev->rphy); + list_move_tail(&dev->disco_list_node, &port->destroy_list); + } +} + +void sas_unregister_domain_devices(struct asd_sas_port *port, int gone) +{ + struct domain_device *dev, *n; + + list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) { + if (gone) + set_bit(SAS_DEV_GONE, &dev->state); + sas_unregister_dev(port, dev); + } + + list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) + sas_unregister_dev(port, dev); + + port->port->rphy = NULL; + +} + +void sas_device_set_phy(struct domain_device *dev, struct sas_port *port) +{ + struct sas_ha_struct *ha; + struct sas_phy *new_phy; + + if (!dev) + return; + + ha = dev->port->ha; + new_phy = sas_port_get_phy(port); + + /* pin and record last seen phy */ + spin_lock_irq(&ha->phy_port_lock); + if (new_phy) { + sas_port_put_phy(dev->phy); + dev->phy = new_phy; + } + spin_unlock_irq(&ha->phy_port_lock); +} + +/* ---------- Discovery and Revalidation ---------- */ + +/** + * sas_discover_domain - discover the domain + * @work: work structure embedded in port domain device. + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain. + */ +static void sas_discover_domain(struct work_struct *work) +{ + struct domain_device *dev; + int error = 0; + struct sas_discovery_event *ev = to_sas_discovery_event(work); + struct asd_sas_port *port = ev->port; + + clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); + + if (port->port_dev) + return; + + error = sas_get_port_device(port); + if (error) + return; + dev = port->port_dev; + + pr_debug("DOING DISCOVERY on port %d, pid:%d\n", port->id, + task_pid_nr(current)); + + switch (dev->dev_type) { + case SAS_END_DEVICE: + error = sas_discover_end_dev(dev); + break; + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + error = sas_discover_root_expander(dev); + break; + case SAS_SATA_DEV: + case SAS_SATA_PM: + error = sas_discover_sata(dev); + break; + default: + error = -ENXIO; + pr_err("unhandled device %d\n", dev->dev_type); + break; + } + + if (error) { + sas_rphy_free(dev->rphy); + list_del_init(&dev->disco_list_node); + spin_lock_irq(&port->dev_list_lock); + list_del_init(&dev->dev_list_node); + spin_unlock_irq(&port->dev_list_lock); + + sas_put_device(dev); + port->port_dev = NULL; + } + + sas_probe_devices(port); + + pr_debug("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, + task_pid_nr(current), error); +} + +static void sas_revalidate_domain(struct work_struct *work) +{ + int res = 0; + struct sas_discovery_event *ev = to_sas_discovery_event(work); + struct asd_sas_port *port = ev->port; + struct sas_ha_struct *ha = port->ha; + struct domain_device *ddev = port->port_dev; + + /* prevent revalidation from finding sata links in recovery */ + mutex_lock(&ha->disco_mutex); + if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { + pr_debug("REVALIDATION DEFERRED on port %d, pid:%d\n", + port->id, task_pid_nr(current)); + goto out; + } + + clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending); + + pr_debug("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, + task_pid_nr(current)); + + if (ddev && dev_is_expander(ddev->dev_type)) + res = sas_ex_revalidate_domain(ddev); + + pr_debug("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", + port->id, task_pid_nr(current), res); + out: + mutex_unlock(&ha->disco_mutex); + + sas_destruct_devices(port); + sas_destruct_ports(port); + sas_probe_devices(port); +} + +/* ---------- Events ---------- */ + +static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw) +{ + /* chained work is not subject to SA_HA_DRAINING or + * SAS_HA_REGISTERED, because it is either submitted in the + * workqueue, or known to be submitted from a context that is + * not racing against draining + */ + queue_work(ha->disco_q, &sw->work); +} + +static void sas_chain_event(int event, unsigned long *pending, + struct sas_work *sw, + struct sas_ha_struct *ha) +{ + if (!test_and_set_bit(event, pending)) { + unsigned long flags; + + spin_lock_irqsave(&ha->lock, flags); + sas_chain_work(ha, sw); + spin_unlock_irqrestore(&ha->lock, flags); + } +} + +void sas_discover_event(struct asd_sas_port *port, enum discover_event ev) +{ + struct sas_discovery *disc; + + if (!port) + return; + disc = &port->disc; + + BUG_ON(ev >= DISC_NUM_EVENTS); + + sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha); +} + +/** + * sas_init_disc - initialize the discovery struct in the port + * @disc: port discovery structure + * @port: pointer to struct port + * + * Called when the ports are being initialized. + */ +void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) +{ + int i; + + static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { + [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, + [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, + [DISCE_SUSPEND] = sas_suspend_devices, + [DISCE_RESUME] = sas_resume_devices, + }; + + disc->pending = 0; + for (i = 0; i < DISC_NUM_EVENTS; i++) { + INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); + disc->disc_work[i].port = port; + } +} diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c new file mode 100644 index 000000000..f3a17191a --- /dev/null +++ b/drivers/scsi/libsas/sas_event.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Serial Attached SCSI (SAS) Event processing + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include "sas_internal.h" + +bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) +{ + if (!test_bit(SAS_HA_REGISTERED, &ha->state)) + return false; + + if (test_bit(SAS_HA_DRAINING, &ha->state)) { + /* add it to the defer list, if not already pending */ + if (list_empty(&sw->drain_node)) + list_add_tail(&sw->drain_node, &ha->defer_q); + return true; + } + + return queue_work(ha->event_q, &sw->work); +} + +static bool sas_queue_event(int event, struct sas_work *work, + struct sas_ha_struct *ha) +{ + unsigned long flags; + bool rc; + + spin_lock_irqsave(&ha->lock, flags); + rc = sas_queue_work(ha, work); + spin_unlock_irqrestore(&ha->lock, flags); + + return rc; +} + +void sas_queue_deferred_work(struct sas_ha_struct *ha) +{ + struct sas_work *sw, *_sw; + + spin_lock_irq(&ha->lock); + list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { + list_del_init(&sw->drain_node); + + if (!sas_queue_work(ha, sw)) { + pm_runtime_put(ha->dev); + sas_free_event(to_asd_sas_event(&sw->work)); + } + } + spin_unlock_irq(&ha->lock); +} + +void __sas_drain_work(struct sas_ha_struct *ha) +{ + set_bit(SAS_HA_DRAINING, &ha->state); + /* flush submitters */ + spin_lock_irq(&ha->lock); + spin_unlock_irq(&ha->lock); + + drain_workqueue(ha->event_q); + drain_workqueue(ha->disco_q); + + clear_bit(SAS_HA_DRAINING, &ha->state); + sas_queue_deferred_work(ha); +} + +int sas_drain_work(struct sas_ha_struct *ha) +{ + int err; + + err = mutex_lock_interruptible(&ha->drain_mutex); + if (err) + return err; + if (test_bit(SAS_HA_REGISTERED, &ha->state)) + __sas_drain_work(ha); + mutex_unlock(&ha->drain_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(sas_drain_work); + +void sas_disable_revalidation(struct sas_ha_struct *ha) +{ + mutex_lock(&ha->disco_mutex); + set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state); + mutex_unlock(&ha->disco_mutex); +} + +void sas_enable_revalidation(struct sas_ha_struct *ha) +{ + int i; + + mutex_lock(&ha->disco_mutex); + clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state); + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + const int ev = DISCE_REVALIDATE_DOMAIN; + struct sas_discovery *d = &port->disc; + struct asd_sas_phy *sas_phy; + + if (!test_and_clear_bit(ev, &d->pending)) + continue; + + spin_lock(&port->phy_list_lock); + if (list_empty(&port->phy_list)) { + spin_unlock(&port->phy_list_lock); + continue; + } + + sas_phy = container_of(port->phy_list.next, struct asd_sas_phy, + port_phy_el); + spin_unlock(&port->phy_list_lock); + sas_notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD, GFP_KERNEL); + } + mutex_unlock(&ha->disco_mutex); +} + + +static void sas_port_event_worker(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *ha = phy->ha; + + sas_port_event_fns[ev->event](work); + pm_runtime_put(ha->dev); + sas_free_event(ev); +} + +static void sas_phy_event_worker(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *ha = phy->ha; + + sas_phy_event_fns[ev->event](work); + pm_runtime_put(ha->dev); + sas_free_event(ev); +} + +/* defer works of new phys during suspend */ +static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev) +{ + struct sas_ha_struct *ha = phy->ha; + unsigned long flags; + bool deferred = false; + + spin_lock_irqsave(&ha->lock, flags); + if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) { + struct sas_work *sw = &ev->work; + + list_add_tail(&sw->drain_node, &ha->defer_q); + deferred = true; + } + spin_unlock_irqrestore(&ha->lock, flags); + return deferred; +} + +void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, + gfp_t gfp_flags) +{ + struct sas_ha_struct *ha = phy->ha; + struct asd_sas_event *ev; + + BUG_ON(event >= PORT_NUM_EVENTS); + + ev = sas_alloc_event(phy, gfp_flags); + if (!ev) + return; + + /* Call pm_runtime_put() with pairs in sas_port_event_worker() */ + pm_runtime_get_noresume(ha->dev); + + INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event); + + if (sas_defer_event(phy, ev)) + return; + + if (!sas_queue_event(event, &ev->work, ha)) { + pm_runtime_put(ha->dev); + sas_free_event(ev); + } +} +EXPORT_SYMBOL_GPL(sas_notify_port_event); + +void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, + gfp_t gfp_flags) +{ + struct sas_ha_struct *ha = phy->ha; + struct asd_sas_event *ev; + + BUG_ON(event >= PHY_NUM_EVENTS); + + ev = sas_alloc_event(phy, gfp_flags); + if (!ev) + return; + + /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */ + pm_runtime_get_noresume(ha->dev); + + INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event); + + if (sas_defer_event(phy, ev)) + return; + + if (!sas_queue_event(event, &ev->work, ha)) { + pm_runtime_put(ha->dev); + sas_free_event(ev); + } +} +EXPORT_SYMBOL_GPL(sas_notify_phy_event); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c new file mode 100644 index 000000000..a2204674b --- /dev/null +++ b/drivers/scsi/libsas/sas_expander.c @@ -0,0 +1,2139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Serial Attached SCSI (SAS) Expander discovery and configuration + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + */ + +#include +#include +#include +#include + +#include "sas_internal.h" + +#include +#include +#include +#include "scsi_sas_internal.h" + +static int sas_discover_expander(struct domain_device *dev); +static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); +static int sas_configure_phy(struct domain_device *dev, int phy_id, + u8 *sas_addr, int include); +static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); + +/* ---------- SMP task management ---------- */ + +/* Give it some long enough timeout. In seconds. */ +#define SMP_TIMEOUT 10 + +static int smp_execute_task_sg(struct domain_device *dev, + struct scatterlist *req, struct scatterlist *resp) +{ + int res, retry; + struct sas_task *task = NULL; + struct sas_internal *i = + to_sas_internal(dev->port->ha->shost->transportt); + struct sas_ha_struct *ha = dev->port->ha; + + pm_runtime_get_sync(ha->dev); + mutex_lock(&dev->ex_dev.cmd_mutex); + for (retry = 0; retry < 3; retry++) { + if (test_bit(SAS_DEV_GONE, &dev->state)) { + res = -ECOMM; + break; + } + + task = sas_alloc_slow_task(GFP_KERNEL); + if (!task) { + res = -ENOMEM; + break; + } + task->dev = dev; + task->task_proto = dev->tproto; + task->smp_task.smp_req = *req; + task->smp_task.smp_resp = *resp; + + task->task_done = sas_task_internal_done; + + task->slow_task->timer.function = sas_task_internal_timedout; + task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; + add_timer(&task->slow_task->timer); + + res = i->dft->lldd_execute_task(task, GFP_KERNEL); + + if (res) { + del_timer_sync(&task->slow_task->timer); + pr_notice("executing SMP task failed:%d\n", res); + break; + } + + wait_for_completion(&task->slow_task->completion); + res = -ECOMM; + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + pr_notice("smp task timed out or aborted\n"); + i->dft->lldd_abort_task(task); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + pr_notice("SMP task aborted and not done\n"); + break; + } + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_SAM_STAT_GOOD) { + res = 0; + break; + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun */ + res = task->task_status.residual; + break; + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + res = -EMSGSIZE; + break; + } + if (task->task_status.resp == SAS_TASK_UNDELIVERED && + task->task_status.stat == SAS_DEVICE_UNKNOWN) + break; + else { + pr_notice("%s: task to dev %016llx response: 0x%x status 0x%x\n", + __func__, + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat); + sas_free_task(task); + task = NULL; + } + } + mutex_unlock(&dev->ex_dev.cmd_mutex); + pm_runtime_put_sync(ha->dev); + + BUG_ON(retry == 3 && task != NULL); + sas_free_task(task); + return res; +} + +static int smp_execute_task(struct domain_device *dev, void *req, int req_size, + void *resp, int resp_size) +{ + struct scatterlist req_sg; + struct scatterlist resp_sg; + + sg_init_one(&req_sg, req, req_size); + sg_init_one(&resp_sg, resp, resp_size); + return smp_execute_task_sg(dev, &req_sg, &resp_sg); +} + +/* ---------- Allocations ---------- */ + +static inline void *alloc_smp_req(int size) +{ + u8 *p = kzalloc(size, GFP_KERNEL); + if (p) + p[0] = SMP_REQUEST; + return p; +} + +static inline void *alloc_smp_resp(int size) +{ + return kzalloc(size, GFP_KERNEL); +} + +static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) +{ + switch (phy->routing_attr) { + case TABLE_ROUTING: + if (dev->ex_dev.t2t_supp) + return 'U'; + else + return 'T'; + case DIRECT_ROUTING: + return 'D'; + case SUBTRACTIVE_ROUTING: + return 'S'; + default: + return '?'; + } +} + +static enum sas_device_type to_dev_type(struct discover_resp *dr) +{ + /* This is detecting a failure to transmit initial dev to host + * FIS as described in section J.5 of sas-2 r16 + */ + if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && + dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) + return SAS_SATA_PENDING; + else + return dr->attached_dev_type; +} + +static void sas_set_ex_phy(struct domain_device *dev, int phy_id, + struct smp_disc_resp *disc_resp) +{ + enum sas_device_type dev_type; + enum sas_linkrate linkrate; + u8 sas_addr[SAS_ADDR_SIZE]; + struct discover_resp *dr = &disc_resp->disc; + struct sas_ha_struct *ha = dev->port->ha; + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + struct sas_rphy *rphy = dev->rphy; + bool new_phy = !phy->phy; + char *type; + + if (new_phy) { + if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) + return; + phy->phy = sas_phy_alloc(&rphy->dev, phy_id); + + /* FIXME: error_handling */ + BUG_ON(!phy->phy); + } + + switch (disc_resp->result) { + case SMP_RESP_PHY_VACANT: + phy->phy_state = PHY_VACANT; + break; + default: + phy->phy_state = PHY_NOT_PRESENT; + break; + case SMP_RESP_FUNC_ACC: + phy->phy_state = PHY_EMPTY; /* do not know yet */ + break; + } + + /* check if anything important changed to squelch debug */ + dev_type = phy->attached_dev_type; + linkrate = phy->linkrate; + memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + + /* Handle vacant phy - rest of dr data is not valid so skip it */ + if (phy->phy_state == PHY_VACANT) { + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + phy->attached_dev_type = SAS_PHY_UNUSED; + if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { + phy->phy_id = phy_id; + goto skip; + } else + goto out; + } + + phy->attached_dev_type = to_dev_type(dr); + if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) + goto out; + phy->phy_id = phy_id; + phy->linkrate = dr->linkrate; + phy->attached_sata_host = dr->attached_sata_host; + phy->attached_sata_dev = dr->attached_sata_dev; + phy->attached_sata_ps = dr->attached_sata_ps; + phy->attached_iproto = dr->iproto << 1; + phy->attached_tproto = dr->tproto << 1; + /* help some expanders that fail to zero sas_address in the 'no + * device' case + */ + if (phy->attached_dev_type == SAS_PHY_UNUSED || + phy->linkrate < SAS_LINK_RATE_1_5_GBPS) + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + else + memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); + phy->attached_phy_id = dr->attached_phy_id; + phy->phy_change_count = dr->change_count; + phy->routing_attr = dr->routing_attr; + phy->virtual = dr->virtual; + phy->last_da_index = -1; + + phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); + phy->phy->identify.device_type = dr->attached_dev_type; + phy->phy->identify.initiator_port_protocols = phy->attached_iproto; + phy->phy->identify.target_port_protocols = phy->attached_tproto; + if (!phy->attached_tproto && dr->attached_sata_dev) + phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->phy->identify.phy_identifier = phy_id; + phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; + phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; + phy->phy->minimum_linkrate = dr->pmin_linkrate; + phy->phy->maximum_linkrate = dr->pmax_linkrate; + phy->phy->negotiated_linkrate = phy->linkrate; + phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); + + skip: + if (new_phy) + if (sas_phy_add(phy->phy)) { + sas_phy_free(phy->phy); + return; + } + + out: + switch (phy->attached_dev_type) { + case SAS_SATA_PENDING: + type = "stp pending"; + break; + case SAS_PHY_UNUSED: + type = "no device"; + break; + case SAS_END_DEVICE: + if (phy->attached_iproto) { + if (phy->attached_tproto) + type = "host+target"; + else + type = "host"; + } else { + if (dr->attached_sata_dev) + type = "stp"; + else + type = "ssp"; + } + break; + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + type = "smp"; + break; + default: + type = "unknown"; + } + + /* this routine is polled by libata error recovery so filter + * unimportant messages + */ + if (new_phy || phy->attached_dev_type != dev_type || + phy->linkrate != linkrate || + SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr)) + /* pass */; + else + return; + + /* if the attached device type changed and ata_eh is active, + * make sure we run revalidation when eh completes (see: + * sas_enable_revalidation) + */ + if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) + set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); + + pr_debug("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", + test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", + SAS_ADDR(dev->sas_addr), phy->phy_id, + sas_route_char(dev, phy), phy->linkrate, + SAS_ADDR(phy->attached_sas_addr), type); +} + +/* check if we have an existing attached ata device on this expander phy */ +struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id) +{ + struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id]; + struct domain_device *dev; + struct sas_rphy *rphy; + + if (!ex_phy->port) + return NULL; + + rphy = ex_phy->port->rphy; + if (!rphy) + return NULL; + + dev = sas_find_dev_by_rphy(rphy); + + if (dev && dev_is_sata(dev)) + return dev; + + return NULL; +} + +#define DISCOVER_REQ_SIZE 16 +#define DISCOVER_RESP_SIZE sizeof(struct smp_disc_resp) + +static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, + struct smp_disc_resp *disc_resp, + int single) +{ + struct discover_resp *dr = &disc_resp->disc; + int res; + + disc_req[9] = single; + + res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, + disc_resp, DISCOVER_RESP_SIZE); + if (res) + return res; + if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { + pr_notice("Found loopback topology, just ignore it!\n"); + return 0; + } + sas_set_ex_phy(dev, single, disc_resp); + return 0; +} + +int sas_ex_phy_discover(struct domain_device *dev, int single) +{ + struct expander_device *ex = &dev->ex_dev; + int res = 0; + u8 *disc_req; + struct smp_disc_resp *disc_resp; + + disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); + if (!disc_req) + return -ENOMEM; + + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) { + kfree(disc_req); + return -ENOMEM; + } + + disc_req[1] = SMP_DISCOVER; + + if (0 <= single && single < ex->num_phys) { + res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single); + } else { + int i; + + for (i = 0; i < ex->num_phys; i++) { + res = sas_ex_phy_discover_helper(dev, disc_req, + disc_resp, i); + if (res) + goto out_err; + } + } +out_err: + kfree(disc_resp); + kfree(disc_req); + return res; +} + +static int sas_expander_discover(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + int res; + + ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL); + if (!ex->ex_phy) + return -ENOMEM; + + res = sas_ex_phy_discover(dev, -1); + if (res) + goto out_err; + + return 0; + out_err: + kfree(ex->ex_phy); + ex->ex_phy = NULL; + return res; +} + +#define MAX_EXPANDER_PHYS 128 + +#define RG_REQ_SIZE 8 +#define RG_RESP_SIZE sizeof(struct smp_rg_resp) + +static int sas_ex_general(struct domain_device *dev) +{ + u8 *rg_req; + struct smp_rg_resp *rg_resp; + struct report_general_resp *rg; + int res; + int i; + + rg_req = alloc_smp_req(RG_REQ_SIZE); + if (!rg_req) + return -ENOMEM; + + rg_resp = alloc_smp_resp(RG_RESP_SIZE); + if (!rg_resp) { + kfree(rg_req); + return -ENOMEM; + } + + rg_req[1] = SMP_REPORT_GENERAL; + + for (i = 0; i < 5; i++) { + res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, + RG_RESP_SIZE); + + if (res) { + pr_notice("RG to ex %016llx failed:0x%x\n", + SAS_ADDR(dev->sas_addr), res); + goto out; + } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { + pr_debug("RG:ex %016llx returned SMP result:0x%x\n", + SAS_ADDR(dev->sas_addr), rg_resp->result); + res = rg_resp->result; + goto out; + } + + rg = &rg_resp->rg; + dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); + dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); + dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); + dev->ex_dev.t2t_supp = rg->t2t_supp; + dev->ex_dev.conf_route_table = rg->conf_route_table; + dev->ex_dev.configuring = rg->configuring; + memcpy(dev->ex_dev.enclosure_logical_id, + rg->enclosure_logical_id, 8); + + if (dev->ex_dev.configuring) { + pr_debug("RG: ex %016llx self-configuring...\n", + SAS_ADDR(dev->sas_addr)); + schedule_timeout_interruptible(5*HZ); + } else + break; + } +out: + kfree(rg_req); + kfree(rg_resp); + return res; +} + +static void ex_assign_manuf_info(struct domain_device *dev, void + *_mi_resp) +{ + u8 *mi_resp = _mi_resp; + struct sas_rphy *rphy = dev->rphy; + struct sas_expander_device *edev = rphy_to_expander_device(rphy); + + memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); + memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); + memcpy(edev->product_rev, mi_resp + 36, + SAS_EXPANDER_PRODUCT_REV_LEN); + + if (mi_resp[8] & 1) { + memcpy(edev->component_vendor_id, mi_resp + 40, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + edev->component_id = mi_resp[48] << 8 | mi_resp[49]; + edev->component_revision_id = mi_resp[50]; + } +} + +#define MI_REQ_SIZE 8 +#define MI_RESP_SIZE 64 + +static int sas_ex_manuf_info(struct domain_device *dev) +{ + u8 *mi_req; + u8 *mi_resp; + int res; + + mi_req = alloc_smp_req(MI_REQ_SIZE); + if (!mi_req) + return -ENOMEM; + + mi_resp = alloc_smp_resp(MI_RESP_SIZE); + if (!mi_resp) { + kfree(mi_req); + return -ENOMEM; + } + + mi_req[1] = SMP_REPORT_MANUF_INFO; + + res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp, MI_RESP_SIZE); + if (res) { + pr_notice("MI: ex %016llx failed:0x%x\n", + SAS_ADDR(dev->sas_addr), res); + goto out; + } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { + pr_debug("MI ex %016llx returned SMP result:0x%x\n", + SAS_ADDR(dev->sas_addr), mi_resp[2]); + goto out; + } + + ex_assign_manuf_info(dev, mi_resp); +out: + kfree(mi_req); + kfree(mi_resp); + return res; +} + +#define PC_REQ_SIZE 44 +#define PC_RESP_SIZE 8 + +int sas_smp_phy_control(struct domain_device *dev, int phy_id, + enum phy_func phy_func, + struct sas_phy_linkrates *rates) +{ + u8 *pc_req; + u8 *pc_resp; + int res; + + pc_req = alloc_smp_req(PC_REQ_SIZE); + if (!pc_req) + return -ENOMEM; + + pc_resp = alloc_smp_resp(PC_RESP_SIZE); + if (!pc_resp) { + kfree(pc_req); + return -ENOMEM; + } + + pc_req[1] = SMP_PHY_CONTROL; + pc_req[9] = phy_id; + pc_req[10] = phy_func; + if (rates) { + pc_req[32] = rates->minimum_linkrate << 4; + pc_req[33] = rates->maximum_linkrate << 4; + } + + res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp, PC_RESP_SIZE); + if (res) { + pr_err("ex %016llx phy%02d PHY control failed: %d\n", + SAS_ADDR(dev->sas_addr), phy_id, res); + } else if (pc_resp[2] != SMP_RESP_FUNC_ACC) { + pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]); + res = pc_resp[2]; + } + kfree(pc_resp); + kfree(pc_req); + return res; +} + +static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + + sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); + phy->linkrate = SAS_PHY_DISABLED; +} + +static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) + sas_ex_disable_phy(dev, i); + } +} + +static int sas_dev_present_in_domain(struct asd_sas_port *port, + u8 *sas_addr) +{ + struct domain_device *dev; + + if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) + return 1; + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) + return 1; + } + return 0; +} + +#define RPEL_REQ_SIZE 16 +#define RPEL_RESP_SIZE 32 +int sas_smp_get_phy_events(struct sas_phy *phy) +{ + int res; + u8 *req; + u8 *resp; + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *dev = sas_find_dev_by_rphy(rphy); + + req = alloc_smp_req(RPEL_REQ_SIZE); + if (!req) + return -ENOMEM; + + resp = alloc_smp_resp(RPEL_RESP_SIZE); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + req[1] = SMP_REPORT_PHY_ERR_LOG; + req[9] = phy->number; + + res = smp_execute_task(dev, req, RPEL_REQ_SIZE, + resp, RPEL_RESP_SIZE); + + if (res) + goto out; + + phy->invalid_dword_count = get_unaligned_be32(&resp[12]); + phy->running_disparity_error_count = get_unaligned_be32(&resp[16]); + phy->loss_of_dword_sync_count = get_unaligned_be32(&resp[20]); + phy->phy_reset_problem_count = get_unaligned_be32(&resp[24]); + + out: + kfree(req); + kfree(resp); + return res; + +} + +#ifdef CONFIG_SCSI_SAS_ATA + +#define RPS_REQ_SIZE 16 +#define RPS_RESP_SIZE sizeof(struct smp_rps_resp) + +int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, + struct smp_rps_resp *rps_resp) +{ + int res; + u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); + u8 *resp = (u8 *)rps_resp; + + if (!rps_req) + return -ENOMEM; + + rps_req[1] = SMP_REPORT_PHY_SATA; + rps_req[9] = phy_id; + + res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, + rps_resp, RPS_RESP_SIZE); + + /* 0x34 is the FIS type for the D2H fis. There's a potential + * standards cockup here. sas-2 explicitly specifies the FIS + * should be encoded so that FIS type is in resp[24]. + * However, some expanders endian reverse this. Undo the + * reversal here */ + if (!res && resp[27] == 0x34 && resp[24] != 0x34) { + int i; + + for (i = 0; i < 5; i++) { + int j = 24 + (i*4); + u8 a, b; + a = resp[j + 0]; + b = resp[j + 1]; + resp[j + 0] = resp[j + 3]; + resp[j + 1] = resp[j + 2]; + resp[j + 2] = b; + resp[j + 3] = a; + } + } + + kfree(rps_req); + return res; +} +#endif + +static void sas_ex_get_linkrate(struct domain_device *parent, + struct domain_device *child, + struct ex_phy *parent_phy) +{ + struct expander_device *parent_ex = &parent->ex_dev; + struct sas_port *port; + int i; + + child->pathways = 0; + + port = parent_phy->port; + + for (i = 0; i < parent_ex->num_phys; i++) { + struct ex_phy *phy = &parent_ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (sas_phy_match_dev_addr(child, phy)) { + child->min_linkrate = min(parent->min_linkrate, + phy->linkrate); + child->max_linkrate = max(parent->max_linkrate, + phy->linkrate); + child->pathways++; + sas_port_add_phy(port, phy->phy); + } + } + child->linkrate = min(parent_phy->linkrate, child->max_linkrate); + child->pathways = min(child->pathways, parent->pathways); +} + +static int sas_ex_add_dev(struct domain_device *parent, struct ex_phy *phy, + struct domain_device *child, int phy_id) +{ + struct sas_rphy *rphy; + int res; + + child->dev_type = SAS_END_DEVICE; + rphy = sas_end_device_alloc(phy->port); + if (!rphy) + return -ENOMEM; + + child->tproto = phy->attached_tproto; + sas_init_dev(child); + + child->rphy = rphy; + get_device(&rphy->dev); + rphy->identify.phy_identifier = phy_id; + sas_fill_in_rphy(child, rphy); + + list_add_tail(&child->disco_list_node, &parent->port->disco_list); + + res = sas_notify_lldd_dev_found(child); + if (res) { + pr_notice("notify lldd for device %016llx at %016llx:%02d returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, res); + sas_rphy_free(child->rphy); + list_del(&child->disco_list_node); + return res; + } + + return 0; +} + +static struct domain_device *sas_ex_discover_end_dev( + struct domain_device *parent, int phy_id) +{ + struct expander_device *parent_ex = &parent->ex_dev; + struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; + struct domain_device *child = NULL; + int res; + + if (phy->attached_sata_host || phy->attached_sata_ps) + return NULL; + + child = sas_alloc_device(); + if (!child) + return NULL; + + kref_get(&parent->kref); + child->parent = parent; + child->port = parent->port; + child->iproto = phy->attached_iproto; + memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + sas_hash_addr(child->hashed_sas_addr, child->sas_addr); + if (!phy->port) { + phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); + if (unlikely(!phy->port)) + goto out_err; + if (unlikely(sas_port_add(phy->port) != 0)) { + sas_port_free(phy->port); + goto out_err; + } + } + sas_ex_get_linkrate(parent, child, phy); + sas_device_set_phy(child, phy->port); + + if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { + res = sas_ata_add_dev(parent, phy, child, phy_id); + } else if (phy->attached_tproto & SAS_PROTOCOL_SSP) { + res = sas_ex_add_dev(parent, phy, child, phy_id); + } else { + pr_notice("target proto 0x%x at %016llx:0x%x not handled\n", + phy->attached_tproto, SAS_ADDR(parent->sas_addr), + phy_id); + res = -ENODEV; + } + + if (res) + goto out_free; + + list_add_tail(&child->siblings, &parent_ex->children); + return child; + + out_free: + sas_port_delete(phy->port); + out_err: + phy->port = NULL; + sas_put_device(child); + return NULL; +} + +/* See if this phy is part of a wide port */ +static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) +{ + struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; + int i; + + for (i = 0; i < parent->ex_dev.num_phys; i++) { + struct ex_phy *ephy = &parent->ex_dev.ex_phy[i]; + + if (ephy == phy) + continue; + + if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, + SAS_ADDR_SIZE) && ephy->port) { + sas_port_add_phy(ephy->port, phy->phy); + phy->port = ephy->port; + phy->phy_state = PHY_DEVICE_DISCOVERED; + return true; + } + } + + return false; +} + +static struct domain_device *sas_ex_discover_expander( + struct domain_device *parent, int phy_id) +{ + struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); + struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; + struct domain_device *child = NULL; + struct sas_rphy *rphy; + struct sas_expander_device *edev; + struct asd_sas_port *port; + int res; + + if (phy->routing_attr == DIRECT_ROUTING) { + pr_warn("ex %016llx:%02d:D <--> ex %016llx:0x%x is not allowed\n", + SAS_ADDR(parent->sas_addr), phy_id, + SAS_ADDR(phy->attached_sas_addr), + phy->attached_phy_id); + return NULL; + } + child = sas_alloc_device(); + if (!child) + return NULL; + + phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); + /* FIXME: better error handling */ + BUG_ON(sas_port_add(phy->port) != 0); + + + switch (phy->attached_dev_type) { + case SAS_EDGE_EXPANDER_DEVICE: + rphy = sas_expander_alloc(phy->port, + SAS_EDGE_EXPANDER_DEVICE); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + rphy = sas_expander_alloc(phy->port, + SAS_FANOUT_EXPANDER_DEVICE); + break; + default: + rphy = NULL; /* shut gcc up */ + BUG(); + } + port = parent->port; + child->rphy = rphy; + get_device(&rphy->dev); + edev = rphy_to_expander_device(rphy); + child->dev_type = phy->attached_dev_type; + kref_get(&parent->kref); + child->parent = parent; + child->port = port; + child->iproto = phy->attached_iproto; + child->tproto = phy->attached_tproto; + memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + sas_hash_addr(child->hashed_sas_addr, child->sas_addr); + sas_ex_get_linkrate(parent, child, phy); + edev->level = parent_ex->level + 1; + parent->port->disc.max_level = max(parent->port->disc.max_level, + edev->level); + sas_init_dev(child); + sas_fill_in_rphy(child, rphy); + sas_rphy_add(rphy); + + spin_lock_irq(&parent->port->dev_list_lock); + list_add_tail(&child->dev_list_node, &parent->port->dev_list); + spin_unlock_irq(&parent->port->dev_list_lock); + + res = sas_discover_expander(child); + if (res) { + sas_rphy_delete(rphy); + spin_lock_irq(&parent->port->dev_list_lock); + list_del(&child->dev_list_node); + spin_unlock_irq(&parent->port->dev_list_lock); + sas_put_device(child); + sas_port_delete(phy->port); + phy->port = NULL; + return NULL; + } + list_add_tail(&child->siblings, &parent->ex_dev.children); + return child; +} + +static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; + struct domain_device *child = NULL; + int res = 0; + + /* Phy state */ + if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { + if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) + res = sas_ex_phy_discover(dev, phy_id); + if (res) + return res; + } + + /* Parent and domain coherency */ + if (!dev->parent && sas_phy_match_port_addr(dev->port, ex_phy)) { + sas_add_parent_port(dev, phy_id); + return 0; + } + if (dev->parent && sas_phy_match_dev_addr(dev->parent, ex_phy)) { + sas_add_parent_port(dev, phy_id); + if (ex_phy->routing_attr == TABLE_ROUTING) + sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); + return 0; + } + + if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) + sas_ex_disable_port(dev, ex_phy->attached_sas_addr); + + if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { + if (ex_phy->routing_attr == DIRECT_ROUTING) { + memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + sas_configure_routing(dev, ex_phy->attached_sas_addr); + } + return 0; + } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) + return 0; + + if (ex_phy->attached_dev_type != SAS_END_DEVICE && + ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_SATA_PENDING) { + pr_warn("unknown device type(0x%x) attached to ex %016llx phy%02d\n", + ex_phy->attached_dev_type, + SAS_ADDR(dev->sas_addr), + phy_id); + return 0; + } + + res = sas_configure_routing(dev, ex_phy->attached_sas_addr); + if (res) { + pr_notice("configure routing for dev %016llx reported 0x%x. Forgotten\n", + SAS_ADDR(ex_phy->attached_sas_addr), res); + sas_disable_routing(dev, ex_phy->attached_sas_addr); + return res; + } + + if (sas_ex_join_wide_port(dev, phy_id)) { + pr_debug("Attaching ex phy%02d to wide port %016llx\n", + phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); + return res; + } + + switch (ex_phy->attached_dev_type) { + case SAS_END_DEVICE: + case SAS_SATA_PENDING: + child = sas_ex_discover_end_dev(dev, phy_id); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { + pr_debug("second fanout expander %016llx phy%02d attached to ex %016llx phy%02d\n", + SAS_ADDR(ex_phy->attached_sas_addr), + ex_phy->attached_phy_id, + SAS_ADDR(dev->sas_addr), + phy_id); + sas_ex_disable_phy(dev, phy_id); + return res; + } else + memcpy(dev->port->disc.fanout_sas_addr, + ex_phy->attached_sas_addr, SAS_ADDR_SIZE); + fallthrough; + case SAS_EDGE_EXPANDER_DEVICE: + child = sas_ex_discover_expander(dev, phy_id); + break; + default: + break; + } + + if (!child) + pr_notice("ex %016llx phy%02d failed to discover\n", + SAS_ADDR(dev->sas_addr), phy_id); + return res; +} + +static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (dev_is_expander(phy->attached_dev_type) && + phy->routing_attr == SUBTRACTIVE_ROUTING) { + + memcpy(sub_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + + return 1; + } + } + return 0; +} + +static int sas_check_level_subtractive_boundary(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + struct domain_device *child; + u8 sub_addr[SAS_ADDR_SIZE] = {0, }; + + list_for_each_entry(child, &ex->children, siblings) { + if (!dev_is_expander(child->dev_type)) + continue; + if (sub_addr[0] == 0) { + sas_find_sub_addr(child, sub_addr); + continue; + } else { + u8 s2[SAS_ADDR_SIZE]; + + if (sas_find_sub_addr(child, s2) && + (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { + + pr_notice("ex %016llx->%016llx-?->%016llx diverges from subtractive boundary %016llx\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(child->sas_addr), + SAS_ADDR(s2), + SAS_ADDR(sub_addr)); + + sas_ex_disable_port(child, s2); + } + } + } + return 0; +} +/** + * sas_ex_discover_devices - discover devices attached to this expander + * @dev: pointer to the expander domain device + * @single: if you want to do a single phy, else set to -1; + * + * Configure this expander for use with its devices and register the + * devices of this expander. + */ +static int sas_ex_discover_devices(struct domain_device *dev, int single) +{ + struct expander_device *ex = &dev->ex_dev; + int i = 0, end = ex->num_phys; + int res = 0; + + if (0 <= single && single < end) { + i = single; + end = i+1; + } + + for ( ; i < end; i++) { + struct ex_phy *ex_phy = &ex->ex_phy[i]; + + if (ex_phy->phy_state == PHY_VACANT || + ex_phy->phy_state == PHY_NOT_PRESENT || + ex_phy->phy_state == PHY_DEVICE_DISCOVERED) + continue; + + switch (ex_phy->linkrate) { + case SAS_PHY_DISABLED: + case SAS_PHY_RESET_PROBLEM: + case SAS_SATA_PORT_SELECTOR: + continue; + default: + res = sas_ex_discover_dev(dev, i); + if (res) + break; + continue; + } + } + + if (!res) + sas_check_level_subtractive_boundary(dev); + + return res; +} + +static int sas_check_ex_subtractive_boundary(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + u8 *sub_sas_addr = NULL; + + if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) + return 0; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (dev_is_expander(phy->attached_dev_type) && + phy->routing_attr == SUBTRACTIVE_ROUTING) { + + if (!sub_sas_addr) + sub_sas_addr = &phy->attached_sas_addr[0]; + else if (SAS_ADDR(sub_sas_addr) != + SAS_ADDR(phy->attached_sas_addr)) { + + pr_notice("ex %016llx phy%02d diverges(%016llx) on subtractive boundary(%016llx). Disabled\n", + SAS_ADDR(dev->sas_addr), i, + SAS_ADDR(phy->attached_sas_addr), + SAS_ADDR(sub_sas_addr)); + sas_ex_disable_phy(dev, i); + } + } + } + return 0; +} + +static void sas_print_parent_topology_bug(struct domain_device *child, + struct ex_phy *parent_phy, + struct ex_phy *child_phy) +{ + static const char *ex_type[] = { + [SAS_EDGE_EXPANDER_DEVICE] = "edge", + [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", + }; + struct domain_device *parent = child->parent; + + pr_notice("%s ex %016llx phy%02d <--> %s ex %016llx phy%02d has %c:%c routing link!\n", + ex_type[parent->dev_type], + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + + ex_type[child->dev_type], + SAS_ADDR(child->sas_addr), + child_phy->phy_id, + + sas_route_char(parent, parent_phy), + sas_route_char(child, child_phy)); +} + +static bool sas_eeds_valid(struct domain_device *parent, + struct domain_device *child) +{ + struct sas_discovery *disc = &parent->port->disc; + + return (SAS_ADDR(disc->eeds_a) == SAS_ADDR(parent->sas_addr) || + SAS_ADDR(disc->eeds_a) == SAS_ADDR(child->sas_addr)) && + (SAS_ADDR(disc->eeds_b) == SAS_ADDR(parent->sas_addr) || + SAS_ADDR(disc->eeds_b) == SAS_ADDR(child->sas_addr)); +} + +static int sas_check_eeds(struct domain_device *child, + struct ex_phy *parent_phy, + struct ex_phy *child_phy) +{ + int res = 0; + struct domain_device *parent = child->parent; + struct sas_discovery *disc = &parent->port->disc; + + if (SAS_ADDR(disc->fanout_sas_addr) != 0) { + res = -ENODEV; + pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n", + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + SAS_ADDR(child->sas_addr), + child_phy->phy_id, + SAS_ADDR(disc->fanout_sas_addr)); + } else if (SAS_ADDR(disc->eeds_a) == 0) { + memcpy(disc->eeds_a, parent->sas_addr, SAS_ADDR_SIZE); + memcpy(disc->eeds_b, child->sas_addr, SAS_ADDR_SIZE); + } else if (!sas_eeds_valid(parent, child)) { + res = -ENODEV; + pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n", + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + SAS_ADDR(child->sas_addr), + child_phy->phy_id); + } + + return res; +} + +static int sas_check_edge_expander_topo(struct domain_device *child, + struct ex_phy *parent_phy) +{ + struct expander_device *child_ex = &child->ex_dev; + struct expander_device *parent_ex = &child->parent->ex_dev; + struct ex_phy *child_phy; + + child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; + + if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { + if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || + child_phy->routing_attr != TABLE_ROUTING) + goto error; + } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { + if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) + return sas_check_eeds(child, parent_phy, child_phy); + else if (child_phy->routing_attr != TABLE_ROUTING) + goto error; + } else if (parent_phy->routing_attr == TABLE_ROUTING) { + if (child_phy->routing_attr != SUBTRACTIVE_ROUTING && + (child_phy->routing_attr != TABLE_ROUTING || + !child_ex->t2t_supp || !parent_ex->t2t_supp)) + goto error; + } + + return 0; +error: + sas_print_parent_topology_bug(child, parent_phy, child_phy); + return -ENODEV; +} + +static int sas_check_fanout_expander_topo(struct domain_device *child, + struct ex_phy *parent_phy) +{ + struct expander_device *child_ex = &child->ex_dev; + struct ex_phy *child_phy; + + child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; + + if (parent_phy->routing_attr == TABLE_ROUTING && + child_phy->routing_attr == SUBTRACTIVE_ROUTING) + return 0; + + sas_print_parent_topology_bug(child, parent_phy, child_phy); + + return -ENODEV; +} + +static int sas_check_parent_topology(struct domain_device *child) +{ + struct expander_device *parent_ex; + int i; + int res = 0; + + if (!child->parent) + return 0; + + if (!dev_is_expander(child->parent->dev_type)) + return 0; + + parent_ex = &child->parent->ex_dev; + + for (i = 0; i < parent_ex->num_phys; i++) { + struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; + + if (parent_phy->phy_state == PHY_VACANT || + parent_phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (!sas_phy_match_dev_addr(child, parent_phy)) + continue; + + switch (child->parent->dev_type) { + case SAS_EDGE_EXPANDER_DEVICE: + if (sas_check_edge_expander_topo(child, parent_phy)) + res = -ENODEV; + break; + case SAS_FANOUT_EXPANDER_DEVICE: + if (sas_check_fanout_expander_topo(child, parent_phy)) + res = -ENODEV; + break; + default: + break; + } + } + + return res; +} + +#define RRI_REQ_SIZE 16 +#define RRI_RESP_SIZE 44 + +static int sas_configure_present(struct domain_device *dev, int phy_id, + u8 *sas_addr, int *index, int *present) +{ + int i, res = 0; + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + u8 *rri_req; + u8 *rri_resp; + + *present = 0; + *index = 0; + + rri_req = alloc_smp_req(RRI_REQ_SIZE); + if (!rri_req) + return -ENOMEM; + + rri_resp = alloc_smp_resp(RRI_RESP_SIZE); + if (!rri_resp) { + kfree(rri_req); + return -ENOMEM; + } + + rri_req[1] = SMP_REPORT_ROUTE_INFO; + rri_req[9] = phy_id; + + for (i = 0; i < ex->max_route_indexes ; i++) { + *(__be16 *)(rri_req+6) = cpu_to_be16(i); + res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, + RRI_RESP_SIZE); + if (res) + goto out; + res = rri_resp[2]; + if (res == SMP_RESP_NO_INDEX) { + pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, i); + goto out; + } else if (res != SMP_RESP_FUNC_ACC) { + pr_notice("%s: dev %016llx phy%02d index 0x%x result 0x%x\n", + __func__, SAS_ADDR(dev->sas_addr), phy_id, + i, res); + goto out; + } + if (SAS_ADDR(sas_addr) != 0) { + if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { + *index = i; + if ((rri_resp[12] & 0x80) == 0x80) + *present = 0; + else + *present = 1; + goto out; + } else if (SAS_ADDR(rri_resp+16) == 0) { + *index = i; + *present = 0; + goto out; + } + } else if (SAS_ADDR(rri_resp+16) == 0 && + phy->last_da_index < i) { + phy->last_da_index = i; + *index = i; + *present = 0; + goto out; + } + } + res = -1; +out: + kfree(rri_req); + kfree(rri_resp); + return res; +} + +#define CRI_REQ_SIZE 44 +#define CRI_RESP_SIZE 8 + +static int sas_configure_set(struct domain_device *dev, int phy_id, + u8 *sas_addr, int index, int include) +{ + int res; + u8 *cri_req; + u8 *cri_resp; + + cri_req = alloc_smp_req(CRI_REQ_SIZE); + if (!cri_req) + return -ENOMEM; + + cri_resp = alloc_smp_resp(CRI_RESP_SIZE); + if (!cri_resp) { + kfree(cri_req); + return -ENOMEM; + } + + cri_req[1] = SMP_CONF_ROUTE_INFO; + *(__be16 *)(cri_req+6) = cpu_to_be16(index); + cri_req[9] = phy_id; + if (SAS_ADDR(sas_addr) == 0 || !include) + cri_req[12] |= 0x80; + memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); + + res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, + CRI_RESP_SIZE); + if (res) + goto out; + res = cri_resp[2]; + if (res == SMP_RESP_NO_INDEX) { + pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, index); + } +out: + kfree(cri_req); + kfree(cri_resp); + return res; +} + +static int sas_configure_phy(struct domain_device *dev, int phy_id, + u8 *sas_addr, int include) +{ + int index; + int present; + int res; + + res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); + if (res) + return res; + if (include ^ present) + return sas_configure_set(dev, phy_id, sas_addr, index, + include); + + return res; +} + +/** + * sas_configure_parent - configure routing table of parent + * @parent: parent expander + * @child: child expander + * @sas_addr: SAS port identifier of device directly attached to child + * @include: whether or not to include @child in the expander routing table + */ +static int sas_configure_parent(struct domain_device *parent, + struct domain_device *child, + u8 *sas_addr, int include) +{ + struct expander_device *ex_parent = &parent->ex_dev; + int res = 0; + int i; + + if (parent->parent) { + res = sas_configure_parent(parent->parent, parent, sas_addr, + include); + if (res) + return res; + } + + if (ex_parent->conf_route_table == 0) { + pr_debug("ex %016llx has self-configuring routing table\n", + SAS_ADDR(parent->sas_addr)); + return 0; + } + + for (i = 0; i < ex_parent->num_phys; i++) { + struct ex_phy *phy = &ex_parent->ex_phy[i]; + + if ((phy->routing_attr == TABLE_ROUTING) && + sas_phy_match_dev_addr(child, phy)) { + res = sas_configure_phy(parent, i, sas_addr, include); + if (res) + return res; + } + } + + return res; +} + +/** + * sas_configure_routing - configure routing + * @dev: expander device + * @sas_addr: port identifier of device directly attached to the expander device + */ +static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) +{ + if (dev->parent) + return sas_configure_parent(dev->parent, dev, sas_addr, 1); + return 0; +} + +static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) +{ + if (dev->parent) + return sas_configure_parent(dev->parent, dev, sas_addr, 0); + return 0; +} + +/** + * sas_discover_expander - expander discovery + * @dev: pointer to expander domain device + * + * See comment in sas_discover_sata(). + */ +static int sas_discover_expander(struct domain_device *dev) +{ + int res; + + res = sas_notify_lldd_dev_found(dev); + if (res) + return res; + + res = sas_ex_general(dev); + if (res) + goto out_err; + res = sas_ex_manuf_info(dev); + if (res) + goto out_err; + + res = sas_expander_discover(dev); + if (res) { + pr_warn("expander %016llx discovery failed(0x%x)\n", + SAS_ADDR(dev->sas_addr), res); + goto out_err; + } + + sas_check_ex_subtractive_boundary(dev); + res = sas_check_parent_topology(dev); + if (res) + goto out_err; + return 0; +out_err: + sas_notify_lldd_dev_gone(dev); + return res; +} + +static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) +{ + int res = 0; + struct domain_device *dev; + + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (dev_is_expander(dev->dev_type)) { + struct sas_expander_device *ex = + rphy_to_expander_device(dev->rphy); + + if (level == ex->level) + res = sas_ex_discover_devices(dev, -1); + else if (level > 0) + res = sas_ex_discover_devices(port->port_dev, -1); + + } + } + + return res; +} + +static int sas_ex_bfs_disc(struct asd_sas_port *port) +{ + int res; + int level; + + do { + level = port->disc.max_level; + res = sas_ex_level_discovery(port, level); + mb(); + } while (level < port->disc.max_level); + + return res; +} + +int sas_discover_root_expander(struct domain_device *dev) +{ + int res; + struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); + + res = sas_rphy_add(dev->rphy); + if (res) + goto out_err; + + ex->level = dev->port->disc.max_level; /* 0 */ + res = sas_discover_expander(dev); + if (res) + goto out_err2; + + sas_ex_bfs_disc(dev->port); + + return res; + +out_err2: + sas_rphy_remove(dev->rphy); +out_err: + return res; +} + +/* ---------- Domain revalidation ---------- */ + +static int sas_get_phy_discover(struct domain_device *dev, + int phy_id, struct smp_disc_resp *disc_resp) +{ + int res; + u8 *disc_req; + + disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); + if (!disc_req) + return -ENOMEM; + + disc_req[1] = SMP_DISCOVER; + disc_req[9] = phy_id; + + res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, + disc_resp, DISCOVER_RESP_SIZE); + if (res) + goto out; + if (disc_resp->result != SMP_RESP_FUNC_ACC) + res = disc_resp->result; +out: + kfree(disc_req); + return res; +} + +static int sas_get_phy_change_count(struct domain_device *dev, + int phy_id, int *pcc) +{ + int res; + struct smp_disc_resp *disc_resp; + + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) + return -ENOMEM; + + res = sas_get_phy_discover(dev, phy_id, disc_resp); + if (!res) + *pcc = disc_resp->disc.change_count; + + kfree(disc_resp); + return res; +} + +int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, + u8 *sas_addr, enum sas_device_type *type) +{ + int res; + struct smp_disc_resp *disc_resp; + + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) + return -ENOMEM; + + res = sas_get_phy_discover(dev, phy_id, disc_resp); + if (res == 0) { + memcpy(sas_addr, disc_resp->disc.attached_sas_addr, + SAS_ADDR_SIZE); + *type = to_dev_type(&disc_resp->disc); + if (*type == 0) + memset(sas_addr, 0, SAS_ADDR_SIZE); + } + kfree(disc_resp); + return res; +} + +static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, + int from_phy, bool update) +{ + struct expander_device *ex = &dev->ex_dev; + int res = 0; + int i; + + for (i = from_phy; i < ex->num_phys; i++) { + int phy_change_count = 0; + + res = sas_get_phy_change_count(dev, i, &phy_change_count); + switch (res) { + case SMP_RESP_PHY_VACANT: + case SMP_RESP_NO_PHY: + continue; + case SMP_RESP_FUNC_ACC: + break; + default: + return res; + } + + if (phy_change_count != ex->ex_phy[i].phy_change_count) { + if (update) + ex->ex_phy[i].phy_change_count = + phy_change_count; + *phy_id = i; + return 0; + } + } + return 0; +} + +static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) +{ + int res; + u8 *rg_req; + struct smp_rg_resp *rg_resp; + + rg_req = alloc_smp_req(RG_REQ_SIZE); + if (!rg_req) + return -ENOMEM; + + rg_resp = alloc_smp_resp(RG_RESP_SIZE); + if (!rg_resp) { + kfree(rg_req); + return -ENOMEM; + } + + rg_req[1] = SMP_REPORT_GENERAL; + + res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, + RG_RESP_SIZE); + if (res) + goto out; + if (rg_resp->result != SMP_RESP_FUNC_ACC) { + res = rg_resp->result; + goto out; + } + + *ecc = be16_to_cpu(rg_resp->rg.change_count); +out: + kfree(rg_resp); + kfree(rg_req); + return res; +} +/** + * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). + * @dev:domain device to be detect. + * @src_dev: the device which originated BROADCAST(CHANGE). + * + * Add self-configuration expander support. Suppose two expander cascading, + * when the first level expander is self-configuring, hotplug the disks in + * second level expander, BROADCAST(CHANGE) will not only be originated + * in the second level expander, but also be originated in the first level + * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, + * expander changed count in two level expanders will all increment at least + * once, but the phy which chang count has changed is the source device which + * we concerned. + */ + +static int sas_find_bcast_dev(struct domain_device *dev, + struct domain_device **src_dev) +{ + struct expander_device *ex = &dev->ex_dev; + int ex_change_count = -1; + int phy_id = -1; + int res; + struct domain_device *ch; + + res = sas_get_ex_change_count(dev, &ex_change_count); + if (res) + goto out; + if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { + /* Just detect if this expander phys phy change count changed, + * in order to determine if this expander originate BROADCAST, + * and do not update phy change count field in our structure. + */ + res = sas_find_bcast_phy(dev, &phy_id, 0, false); + if (phy_id != -1) { + *src_dev = dev; + ex->ex_change_count = ex_change_count; + pr_info("ex %016llx phy%02d change count has changed\n", + SAS_ADDR(dev->sas_addr), phy_id); + return res; + } else + pr_info("ex %016llx phys DID NOT change\n", + SAS_ADDR(dev->sas_addr)); + } + list_for_each_entry(ch, &ex->children, siblings) { + if (dev_is_expander(ch->dev_type)) { + res = sas_find_bcast_dev(ch, src_dev); + if (*src_dev) + return res; + } + } +out: + return res; +} + +static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + struct domain_device *child, *n; + + list_for_each_entry_safe(child, n, &ex->children, siblings) { + set_bit(SAS_DEV_GONE, &child->state); + if (dev_is_expander(child->dev_type)) + sas_unregister_ex_tree(port, child); + else + sas_unregister_dev(port, child); + } + sas_unregister_dev(port, dev); +} + +static void sas_unregister_devs_sas_addr(struct domain_device *parent, + int phy_id, bool last) +{ + struct expander_device *ex_dev = &parent->ex_dev; + struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; + struct domain_device *child, *n, *found = NULL; + if (last) { + list_for_each_entry_safe(child, n, + &ex_dev->children, siblings) { + if (sas_phy_match_dev_addr(child, phy)) { + set_bit(SAS_DEV_GONE, &child->state); + if (dev_is_expander(child->dev_type)) + sas_unregister_ex_tree(parent->port, child); + else + sas_unregister_dev(parent->port, child); + found = child; + break; + } + } + sas_disable_routing(parent, phy->attached_sas_addr); + } + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + if (phy->port) { + sas_port_delete_phy(phy->port, phy->phy); + sas_device_set_phy(found, phy->port); + if (phy->port->num_phys == 0) + list_add_tail(&phy->port->del_list, + &parent->port->sas_port_del_list); + phy->port = NULL; + } +} + +static int sas_discover_bfs_by_root_level(struct domain_device *root, + const int level) +{ + struct expander_device *ex_root = &root->ex_dev; + struct domain_device *child; + int res = 0; + + list_for_each_entry(child, &ex_root->children, siblings) { + if (dev_is_expander(child->dev_type)) { + struct sas_expander_device *ex = + rphy_to_expander_device(child->rphy); + + if (level > ex->level) + res = sas_discover_bfs_by_root_level(child, + level); + else if (level == ex->level) + res = sas_ex_discover_devices(child, -1); + } + } + return res; +} + +static int sas_discover_bfs_by_root(struct domain_device *dev) +{ + int res; + struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); + int level = ex->level+1; + + res = sas_ex_discover_devices(dev, -1); + if (res) + goto out; + do { + res = sas_discover_bfs_by_root_level(dev, level); + mb(); + level += 1; + } while (level <= dev->port->disc.max_level); +out: + return res; +} + +static int sas_discover_new(struct domain_device *dev, int phy_id) +{ + struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; + struct domain_device *child; + int res; + + pr_debug("ex %016llx phy%02d new device attached\n", + SAS_ADDR(dev->sas_addr), phy_id); + res = sas_ex_phy_discover(dev, phy_id); + if (res) + return res; + + if (sas_ex_join_wide_port(dev, phy_id)) + return 0; + + res = sas_ex_discover_devices(dev, phy_id); + if (res) + return res; + list_for_each_entry(child, &dev->ex_dev.children, siblings) { + if (sas_phy_match_dev_addr(child, ex_phy)) { + if (dev_is_expander(child->dev_type)) + res = sas_discover_bfs_by_root(child); + break; + } + } + return res; +} + +static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) +{ + if (old == new) + return true; + + /* treat device directed resets as flutter, if we went + * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery + */ + if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || + (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) + return true; + + return false; +} + +static int sas_rediscover_dev(struct domain_device *dev, int phy_id, + bool last, int sibling) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + enum sas_device_type type = SAS_PHY_UNUSED; + u8 sas_addr[SAS_ADDR_SIZE]; + char msg[80] = ""; + int res; + + if (!last) + sprintf(msg, ", part of a wide port with phy%02d", sibling); + + pr_debug("ex %016llx rediscovering phy%02d%s\n", + SAS_ADDR(dev->sas_addr), phy_id, msg); + + memset(sas_addr, 0, SAS_ADDR_SIZE); + res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); + switch (res) { + case SMP_RESP_NO_PHY: + phy->phy_state = PHY_NOT_PRESENT; + sas_unregister_devs_sas_addr(dev, phy_id, last); + return res; + case SMP_RESP_PHY_VACANT: + phy->phy_state = PHY_VACANT; + sas_unregister_devs_sas_addr(dev, phy_id, last); + return res; + case SMP_RESP_FUNC_ACC: + break; + case -ECOMM: + break; + default: + return res; + } + + if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { + phy->phy_state = PHY_EMPTY; + sas_unregister_devs_sas_addr(dev, phy_id, last); + /* + * Even though the PHY is empty, for convenience we discover + * the PHY to update the PHY info, like negotiated linkrate. + */ + sas_ex_phy_discover(dev, phy_id); + return res; + } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && + dev_type_flutter(type, phy->attached_dev_type)) { + struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id); + char *action = ""; + + sas_ex_phy_discover(dev, phy_id); + + if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) + action = ", needs recovery"; + pr_debug("ex %016llx phy%02d broadcast flutter%s\n", + SAS_ADDR(dev->sas_addr), phy_id, action); + return res; + } + + /* we always have to delete the old device when we went here */ + pr_info("ex %016llx phy%02d replace %016llx\n", + SAS_ADDR(dev->sas_addr), phy_id, + SAS_ADDR(phy->attached_sas_addr)); + sas_unregister_devs_sas_addr(dev, phy_id, last); + + return sas_discover_new(dev, phy_id); +} + +/** + * sas_rediscover - revalidate the domain. + * @dev:domain device to be detect. + * @phy_id: the phy id will be detected. + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain.For plugging out, we un-register the device only when it is + * the last phy in the port, for other phys in this port, we just delete it + * from the port.For inserting, we do discovery when it is the + * first phy,for other phys in this port, we add it to the port to + * forming the wide-port. + */ +static int sas_rediscover(struct domain_device *dev, const int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; + int res = 0; + int i; + bool last = true; /* is this the last phy of the port */ + + pr_debug("ex %016llx phy%02d originated BROADCAST(CHANGE)\n", + SAS_ADDR(dev->sas_addr), phy_id); + + if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (i == phy_id) + continue; + if (sas_phy_addr_match(phy, changed_phy)) { + last = false; + break; + } + } + res = sas_rediscover_dev(dev, phy_id, last, i); + } else + res = sas_discover_new(dev, phy_id); + return res; +} + +/** + * sas_ex_revalidate_domain - revalidate the domain + * @port_dev: port domain device. + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain. + */ +int sas_ex_revalidate_domain(struct domain_device *port_dev) +{ + int res; + struct domain_device *dev = NULL; + + res = sas_find_bcast_dev(port_dev, &dev); + if (res == 0 && dev) { + struct expander_device *ex = &dev->ex_dev; + int i = 0, phy_id; + + do { + phy_id = -1; + res = sas_find_bcast_phy(dev, &phy_id, i, true); + if (phy_id == -1) + break; + res = sas_rediscover(dev, phy_id); + i = phy_id + 1; + } while (i < ex->num_phys); + } + return res; +} + +int sas_find_attached_phy_id(struct expander_device *ex_dev, + struct domain_device *dev) +{ + struct ex_phy *phy; + int phy_id; + + for (phy_id = 0; phy_id < ex_dev->num_phys; phy_id++) { + phy = &ex_dev->ex_phy[phy_id]; + if (sas_phy_match_dev_addr(dev, phy)) + return phy_id; + } + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(sas_find_attached_phy_id); + +void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + struct domain_device *dev; + unsigned int rcvlen = 0; + int ret = -EINVAL; + + /* no rphy means no smp target support (ie aic94xx host) */ + if (!rphy) + return sas_smp_host_handler(job, shost); + + switch (rphy->identify.device_type) { + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + break; + default: + pr_err("%s: can we send a smp request to a device?\n", + __func__); + goto out; + } + + dev = sas_find_dev_by_rphy(rphy); + if (!dev) { + pr_err("%s: fail to find a domain_device?\n", __func__); + goto out; + } + + /* do we need to support multiple segments? */ + if (job->request_payload.sg_cnt > 1 || + job->reply_payload.sg_cnt > 1) { + pr_info("%s: multiple segments req %u, rsp %u\n", + __func__, job->request_payload.payload_len, + job->reply_payload.payload_len); + goto out; + } + + ret = smp_execute_task_sg(dev, job->request_payload.sg_list, + job->reply_payload.sg_list); + if (ret >= 0) { + /* bsg_job_done() requires the length received */ + rcvlen = job->reply_payload.payload_len - ret; + ret = 0; + } + +out: + bsg_job_done(job, ret, rcvlen); +} diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c new file mode 100644 index 000000000..2ecb85356 --- /dev/null +++ b/drivers/scsi/libsas/sas_host_smp.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Serial Attached SCSI (SAS) Expander discovery and configuration + * + * Copyright (C) 2007 James E.J. Bottomley + * + */ +#include +#include +#include +#include + +#include "sas_internal.h" + +#include +#include +#include "scsi_sas_internal.h" + +static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data, + u8 phy_id) +{ + struct sas_phy *phy; + struct sas_rphy *rphy; + + if (phy_id >= sas_ha->num_phys) { + resp_data[2] = SMP_RESP_NO_PHY; + return; + } + resp_data[2] = SMP_RESP_FUNC_ACC; + + phy = sas_ha->sas_phy[phy_id]->phy; + resp_data[9] = phy_id; + resp_data[13] = phy->negotiated_linkrate; + memcpy(resp_data + 16, sas_ha->sas_addr, SAS_ADDR_SIZE); + memcpy(resp_data + 24, sas_ha->sas_phy[phy_id]->attached_sas_addr, + SAS_ADDR_SIZE); + resp_data[40] = (phy->minimum_linkrate << 4) | + phy->minimum_linkrate_hw; + resp_data[41] = (phy->maximum_linkrate << 4) | + phy->maximum_linkrate_hw; + + if (!sas_ha->sas_phy[phy_id]->port || + !sas_ha->sas_phy[phy_id]->port->port_dev) + return; + + rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy; + resp_data[12] = rphy->identify.device_type << 4; + resp_data[14] = rphy->identify.initiator_port_protocols; + resp_data[15] = rphy->identify.target_port_protocols; +} + +/** + * to_sas_gpio_gp_bit - given the gpio frame data find the byte/bit position of 'od' + * @od: od bit to find + * @data: incoming bitstream (from frame) + * @index: requested data register index (from frame) + * @count: total number of registers in the bitstream (from frame) + * @bit: bit position of 'od' in the returned byte + * + * returns NULL if 'od' is not in 'data' + * + * From SFF-8485 v0.7: + * "In GPIO_TX[1], bit 0 of byte 3 contains the first bit (i.e., OD0.0) + * and bit 7 of byte 0 contains the 32nd bit (i.e., OD10.1). + * + * In GPIO_TX[2], bit 0 of byte 3 contains the 33rd bit (i.e., OD10.2) + * and bit 7 of byte 0 contains the 64th bit (i.e., OD21.0)." + * + * The general-purpose (raw-bitstream) RX registers have the same layout + * although 'od' is renamed 'id' for 'input data'. + * + * SFF-8489 defines the behavior of the LEDs in response to the 'od' values. + */ +static u8 *to_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count, u8 *bit) +{ + unsigned int reg; + u8 byte; + + /* gp registers start at index 1 */ + if (index == 0) + return NULL; + + index--; /* make index 0-based */ + if (od < index * 32) + return NULL; + + od -= index * 32; + reg = od >> 5; + + if (reg >= count) + return NULL; + + od &= (1 << 5) - 1; + byte = 3 - (od >> 3); + *bit = od & ((1 << 3) - 1); + + return &data[reg * 4 + byte]; +} + +int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count) +{ + u8 *byte; + u8 bit; + + byte = to_sas_gpio_gp_bit(od, data, index, count, &bit); + if (!byte) + return -1; + + return (*byte >> bit) & 1; +} +EXPORT_SYMBOL(try_test_sas_gpio_gp_bit); + +static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data, + u8 reg_type, u8 reg_index, u8 reg_count, + u8 *req_data) +{ + struct sas_internal *i = to_sas_internal(sas_ha->shost->transportt); + int written; + + if (i->dft->lldd_write_gpio == NULL) { + resp_data[2] = SMP_RESP_FUNC_UNK; + return 0; + } + + written = i->dft->lldd_write_gpio(sas_ha, reg_type, reg_index, + reg_count, req_data); + + if (written < 0) { + resp_data[2] = SMP_RESP_FUNC_FAILED; + written = 0; + } else + resp_data[2] = SMP_RESP_FUNC_ACC; + + return written; +} + +static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data, + u8 phy_id) +{ + struct sas_rphy *rphy; + struct dev_to_host_fis *fis; + int i; + + if (phy_id >= sas_ha->num_phys) { + resp_data[2] = SMP_RESP_NO_PHY; + return; + } + + resp_data[2] = SMP_RESP_PHY_NO_SATA; + + if (!sas_ha->sas_phy[phy_id]->port) + return; + + rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy; + fis = (struct dev_to_host_fis *) + sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd; + if (rphy->identify.target_port_protocols != SAS_PROTOCOL_SATA) + return; + + resp_data[2] = SMP_RESP_FUNC_ACC; + resp_data[9] = phy_id; + memcpy(resp_data + 16, sas_ha->sas_phy[phy_id]->attached_sas_addr, + SAS_ADDR_SIZE); + + /* check to see if we have a valid d2h fis */ + if (fis->fis_type != 0x34) + return; + + /* the d2h fis is required by the standard to be in LE format */ + for (i = 0; i < 20; i += 4) { + u8 *dst = resp_data + 24 + i, *src = + &sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd[i]; + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; + } +} + +static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id, + u8 phy_op, enum sas_linkrate min, + enum sas_linkrate max, u8 *resp_data) +{ + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + struct sas_phy_linkrates rates; + struct asd_sas_phy *asd_phy; + + if (phy_id >= sas_ha->num_phys) { + resp_data[2] = SMP_RESP_NO_PHY; + return; + } + + asd_phy = sas_ha->sas_phy[phy_id]; + switch (phy_op) { + case PHY_FUNC_NOP: + case PHY_FUNC_LINK_RESET: + case PHY_FUNC_HARD_RESET: + case PHY_FUNC_DISABLE: + case PHY_FUNC_CLEAR_ERROR_LOG: + case PHY_FUNC_CLEAR_AFFIL: + case PHY_FUNC_TX_SATA_PS_SIGNAL: + break; + + default: + resp_data[2] = SMP_RESP_PHY_UNK_OP; + return; + } + + rates.minimum_linkrate = min; + rates.maximum_linkrate = max; + + /* filter reset requests through libata eh */ + if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) { + resp_data[2] = SMP_RESP_FUNC_ACC; + return; + } + + if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates)) + resp_data[2] = SMP_RESP_FUNC_FAILED; + else + resp_data[2] = SMP_RESP_FUNC_ACC; +} + +void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost) +{ + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + u8 *req_data, *resp_data; + unsigned int reslen = 0; + int error = -EINVAL; + + /* eight is the minimum size for request and response frames */ + if (job->request_payload.payload_len < 8 || + job->reply_payload.payload_len < 8) + goto out; + + error = -ENOMEM; + req_data = kzalloc(job->request_payload.payload_len, GFP_KERNEL); + if (!req_data) + goto out; + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, req_data, + job->request_payload.payload_len); + + /* make sure frame can always be built ... we copy + * back only the requested length */ + resp_data = kzalloc(max(job->reply_payload.payload_len, 128U), + GFP_KERNEL); + if (!resp_data) + goto out_free_req; + + error = -EINVAL; + if (req_data[0] != SMP_REQUEST) + goto out_free_resp; + + /* set up default don't know response */ + resp_data[0] = SMP_RESPONSE; + resp_data[1] = req_data[1]; + resp_data[2] = SMP_RESP_FUNC_UNK; + + switch (req_data[1]) { + case SMP_REPORT_GENERAL: + resp_data[2] = SMP_RESP_FUNC_ACC; + resp_data[9] = sas_ha->num_phys; + reslen = 32; + break; + + case SMP_REPORT_MANUF_INFO: + resp_data[2] = SMP_RESP_FUNC_ACC; + memcpy(resp_data + 12, shost->hostt->name, + SAS_EXPANDER_VENDOR_ID_LEN); + memcpy(resp_data + 20, "libsas virt phy", + SAS_EXPANDER_PRODUCT_ID_LEN); + reslen = 64; + break; + + case SMP_READ_GPIO_REG: + /* FIXME: need GPIO support in the transport class */ + break; + + case SMP_DISCOVER: + if (job->request_payload.payload_len < 16) + goto out_free_resp; + sas_host_smp_discover(sas_ha, resp_data, req_data[9]); + reslen = 56; + break; + + case SMP_REPORT_PHY_ERR_LOG: + /* FIXME: could implement this with additional + * libsas callbacks providing the HW supports it */ + break; + + case SMP_REPORT_PHY_SATA: + if (job->request_payload.payload_len < 16) + goto out_free_resp; + sas_report_phy_sata(sas_ha, resp_data, req_data[9]); + reslen = 60; + break; + + case SMP_REPORT_ROUTE_INFO: + /* Can't implement; hosts have no routes */ + break; + + case SMP_WRITE_GPIO_REG: { + /* SFF-8485 v0.7 */ + const int base_frame_size = 11; + int to_write = req_data[4]; + + if (job->request_payload.payload_len < + base_frame_size + to_write * 4) { + resp_data[2] = SMP_RESP_INV_FRM_LEN; + break; + } + + to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2], + req_data[3], to_write, &req_data[8]); + reslen = 8; + break; + } + + case SMP_CONF_ROUTE_INFO: + /* Can't implement; hosts have no routes */ + break; + + case SMP_PHY_CONTROL: + if (job->request_payload.payload_len < 44) + goto out_free_resp; + sas_phy_control(sas_ha, req_data[9], req_data[10], + req_data[32] >> 4, req_data[33] >> 4, + resp_data); + reslen = 8; + break; + + case SMP_PHY_TEST_FUNCTION: + /* FIXME: should this be implemented? */ + break; + + default: + /* probably a 2.0 function */ + break; + } + + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, resp_data, + job->reply_payload.payload_len); + + error = 0; +out_free_resp: + kfree(resp_data); +out_free_req: + kfree(req_data); +out: + bsg_job_done(job, error, reslen); +} diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c new file mode 100644 index 000000000..8586dc79f --- /dev/null +++ b/drivers/scsi/libsas/sas_init.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Serial Attached SCSI (SAS) Transport Layer initialization + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sas_internal.h" + +#include "scsi_sas_internal.h" + +static struct kmem_cache *sas_task_cache; +static struct kmem_cache *sas_event_cache; + +struct sas_task *sas_alloc_task(gfp_t flags) +{ + struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); + + if (task) { + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + } + + return task; +} + +struct sas_task *sas_alloc_slow_task(gfp_t flags) +{ + struct sas_task *task = sas_alloc_task(flags); + struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags); + + if (!task || !slow) { + if (task) + kmem_cache_free(sas_task_cache, task); + kfree(slow); + return NULL; + } + + task->slow_task = slow; + slow->task = task; + timer_setup(&slow->timer, NULL, 0); + init_completion(&slow->completion); + + return task; +} + +void sas_free_task(struct sas_task *task) +{ + if (task) { + kfree(task->slow_task); + kmem_cache_free(sas_task_cache, task); + } +} + +/*------------ SAS addr hash -----------*/ +void sas_hash_addr(u8 *hashed, const u8 *sas_addr) +{ + const u32 poly = 0x00DB2777; + u32 r = 0; + int i; + + for (i = 0; i < SAS_ADDR_SIZE; i++) { + int b; + + for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) { + r <<= 1; + if ((1 << b) & sas_addr[i]) { + if (!(r & 0x01000000)) + r ^= poly; + } else if (r & 0x01000000) { + r ^= poly; + } + } + } + + hashed[0] = (r >> 16) & 0xFF; + hashed[1] = (r >> 8) & 0xFF; + hashed[2] = r & 0xFF; +} + +int sas_register_ha(struct sas_ha_struct *sas_ha) +{ + char name[64]; + int error = 0; + + mutex_init(&sas_ha->disco_mutex); + spin_lock_init(&sas_ha->phy_port_lock); + sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); + + set_bit(SAS_HA_REGISTERED, &sas_ha->state); + spin_lock_init(&sas_ha->lock); + mutex_init(&sas_ha->drain_mutex); + init_waitqueue_head(&sas_ha->eh_wait_q); + INIT_LIST_HEAD(&sas_ha->defer_q); + INIT_LIST_HEAD(&sas_ha->eh_dev_q); + + sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES; + + error = sas_register_phys(sas_ha); + if (error) { + pr_notice("couldn't register sas phys:%d\n", error); + return error; + } + + error = sas_register_ports(sas_ha); + if (error) { + pr_notice("couldn't register sas ports:%d\n", error); + goto Undo_phys; + } + + error = -ENOMEM; + snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev)); + sas_ha->event_q = create_singlethread_workqueue(name); + if (!sas_ha->event_q) + goto Undo_ports; + + snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev)); + sas_ha->disco_q = create_singlethread_workqueue(name); + if (!sas_ha->disco_q) + goto Undo_event_q; + + INIT_LIST_HEAD(&sas_ha->eh_done_q); + INIT_LIST_HEAD(&sas_ha->eh_ata_q); + + return 0; + +Undo_event_q: + destroy_workqueue(sas_ha->event_q); +Undo_ports: + sas_unregister_ports(sas_ha); +Undo_phys: + + return error; +} +EXPORT_SYMBOL_GPL(sas_register_ha); + +static void sas_disable_events(struct sas_ha_struct *sas_ha) +{ + /* Set the state to unregistered to avoid further unchained + * events to be queued, and flush any in-progress drainers + */ + mutex_lock(&sas_ha->drain_mutex); + spin_lock_irq(&sas_ha->lock); + clear_bit(SAS_HA_REGISTERED, &sas_ha->state); + spin_unlock_irq(&sas_ha->lock); + __sas_drain_work(sas_ha); + mutex_unlock(&sas_ha->drain_mutex); +} + +int sas_unregister_ha(struct sas_ha_struct *sas_ha) +{ + sas_disable_events(sas_ha); + sas_unregister_ports(sas_ha); + + /* flush unregistration work */ + mutex_lock(&sas_ha->drain_mutex); + __sas_drain_work(sas_ha); + mutex_unlock(&sas_ha->drain_mutex); + + destroy_workqueue(sas_ha->disco_q); + destroy_workqueue(sas_ha->event_q); + + return 0; +} +EXPORT_SYMBOL_GPL(sas_unregister_ha); + +static int sas_get_linkerrors(struct sas_phy *phy) +{ + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL); + } + + return sas_smp_get_phy_events(phy); +} + +int sas_try_ata_reset(struct asd_sas_phy *asd_phy) +{ + struct domain_device *dev = NULL; + + /* try to route user requested link resets through libata */ + if (asd_phy->port) + dev = asd_phy->port->port_dev; + + /* validate that dev has been probed */ + if (dev) + dev = sas_find_dev_by_rphy(dev->rphy); + + if (dev && dev_is_sata(dev)) { + sas_ata_schedule_reset(dev); + sas_ata_wait_eh(dev); + return 0; + } + + return -ENODEV; +} + +/* + * transport_sas_phy_reset - reset a phy and permit libata to manage the link + * + * phy reset request via sysfs in host workqueue context so we know we + * can block on eh and safely traverse the domain_device topology + */ +static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset) +{ + enum phy_func reset_type; + + if (hard_reset) + reset_type = PHY_FUNC_HARD_RESET; + else + reset_type = PHY_FUNC_LINK_RESET; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + if (!hard_reset && sas_try_ata_reset(asd_phy) == 0) + return 0; + return i->dft->lldd_control_phy(asd_phy, reset_type, NULL); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number); + + if (ata_dev && !hard_reset) { + sas_ata_schedule_reset(ata_dev); + sas_ata_wait_eh(ata_dev); + return 0; + } else + return sas_smp_phy_control(ddev, phy->number, reset_type, NULL); + } +} + +int sas_phy_enable(struct sas_phy *phy, int enable) +{ + int ret; + enum phy_func cmd; + + if (enable) + cmd = PHY_FUNC_LINK_RESET; + else + cmd = PHY_FUNC_DISABLE; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + if (enable) + ret = transport_sas_phy_reset(phy, 0); + else + ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + + if (enable) + ret = transport_sas_phy_reset(phy, 0); + else + ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL); + } + return ret; +} +EXPORT_SYMBOL_GPL(sas_phy_enable); + +int sas_phy_reset(struct sas_phy *phy, int hard_reset) +{ + int ret; + enum phy_func reset_type; + + if (!phy->enabled) + return -ENODEV; + + if (hard_reset) + reset_type = PHY_FUNC_HARD_RESET; + else + reset_type = PHY_FUNC_LINK_RESET; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL); + } + return ret; +} +EXPORT_SYMBOL_GPL(sas_phy_reset); + +int sas_set_phy_speed(struct sas_phy *phy, + struct sas_phy_linkrates *rates) +{ + int ret; + + if ((rates->minimum_linkrate && + rates->minimum_linkrate > phy->maximum_linkrate) || + (rates->maximum_linkrate && + rates->maximum_linkrate < phy->minimum_linkrate)) + return -EINVAL; + + if (rates->minimum_linkrate && + rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + + if (rates->maximum_linkrate && + rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE, + rates); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + ret = sas_smp_phy_control(ddev, phy->number, + PHY_FUNC_LINK_RESET, rates); + + } + + return ret; +} + +void sas_prep_resume_ha(struct sas_ha_struct *ha) +{ + int i; + + set_bit(SAS_HA_REGISTERED, &ha->state); + set_bit(SAS_HA_RESUMING, &ha->state); + + /* clear out any stale link events/data from the suspension path */ + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_phy *phy = ha->sas_phy[i]; + + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + phy->frame_rcvd_size = 0; + } +} +EXPORT_SYMBOL(sas_prep_resume_ha); + +static int phys_suspended(struct sas_ha_struct *ha) +{ + int i, rc = 0; + + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_phy *phy = ha->sas_phy[i]; + + if (phy->suspended) + rc++; + } + + return rc; +} + +static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha) +{ + int i; + + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + struct domain_device *dev = port->port_dev; + + if (dev && dev_is_expander(dev->dev_type)) { + struct asd_sas_phy *first_phy; + + spin_lock(&port->phy_list_lock); + first_phy = list_first_entry_or_null( + &port->phy_list, struct asd_sas_phy, + port_phy_el); + spin_unlock(&port->phy_list_lock); + + if (first_phy) + sas_notify_port_event(first_phy, + PORTE_BROADCAST_RCVD, GFP_KERNEL); + } + } +} + +static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain) +{ + const unsigned long tmo = msecs_to_jiffies(25000); + int i; + + /* deform ports on phys that did not resume + * at this point we may be racing the phy coming back (as posted + * by the lldd). So we post the event and once we are in the + * libsas context check that the phy remains suspended before + * tearing it down. + */ + i = phys_suspended(ha); + if (i) + dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n", + i, i > 1 ? "s" : ""); + wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo); + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_phy *phy = ha->sas_phy[i]; + + if (phy->suspended) { + dev_warn(&phy->phy->dev, "resume timeout\n"); + sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT, + GFP_KERNEL); + } + } + + /* all phys are back up or timed out, turn on i/o so we can + * flush out disks that did not return + */ + scsi_unblock_requests(ha->shost); + if (drain) + sas_drain_work(ha); + clear_bit(SAS_HA_RESUMING, &ha->state); + + sas_queue_deferred_work(ha); + /* send event PORTE_BROADCAST_RCVD to identify some new inserted + * disks for expander + */ + sas_resume_insert_broadcast_ha(ha); +} + +void sas_resume_ha(struct sas_ha_struct *ha) +{ + _sas_resume_ha(ha, true); +} +EXPORT_SYMBOL(sas_resume_ha); + +/* A no-sync variant, which does not call sas_drain_ha(). */ +void sas_resume_ha_no_sync(struct sas_ha_struct *ha) +{ + _sas_resume_ha(ha, false); +} +EXPORT_SYMBOL(sas_resume_ha_no_sync); + +void sas_suspend_ha(struct sas_ha_struct *ha) +{ + int i; + + sas_disable_events(ha); + scsi_block_requests(ha->shost); + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + + sas_discover_event(port, DISCE_SUSPEND); + } + + /* flush suspend events while unregistered */ + mutex_lock(&ha->drain_mutex); + __sas_drain_work(ha); + mutex_unlock(&ha->drain_mutex); +} +EXPORT_SYMBOL(sas_suspend_ha); + +static void sas_phy_release(struct sas_phy *phy) +{ + kfree(phy->hostdata); + phy->hostdata = NULL; +} + +static void phy_reset_work(struct work_struct *work) +{ + struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work); + + d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); +} + +static void phy_enable_work(struct work_struct *work) +{ + struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work); + + d->enable_result = sas_phy_enable(d->phy, d->enable); +} + +static int sas_phy_setup(struct sas_phy *phy) +{ + struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL); + + if (!d) + return -ENOMEM; + + mutex_init(&d->event_lock); + INIT_SAS_WORK(&d->reset_work, phy_reset_work); + INIT_SAS_WORK(&d->enable_work, phy_enable_work); + d->phy = phy; + phy->hostdata = d; + + return 0; +} + +static int queue_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct sas_phy_data *d = phy->hostdata; + int rc; + + if (!d) + return -ENOMEM; + + pm_runtime_get_sync(ha->dev); + /* libsas workqueue coordinates ata-eh reset with discovery */ + mutex_lock(&d->event_lock); + d->reset_result = 0; + d->hard_reset = hard_reset; + + spin_lock_irq(&ha->lock); + sas_queue_work(ha, &d->reset_work); + spin_unlock_irq(&ha->lock); + + rc = sas_drain_work(ha); + if (rc == 0) + rc = d->reset_result; + mutex_unlock(&d->event_lock); + pm_runtime_put_sync(ha->dev); + + return rc; +} + +static int queue_phy_enable(struct sas_phy *phy, int enable) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct sas_phy_data *d = phy->hostdata; + int rc; + + if (!d) + return -ENOMEM; + + pm_runtime_get_sync(ha->dev); + /* libsas workqueue coordinates ata-eh reset with discovery */ + mutex_lock(&d->event_lock); + d->enable_result = 0; + d->enable = enable; + + spin_lock_irq(&ha->lock); + sas_queue_work(ha, &d->enable_work); + spin_unlock_irq(&ha->lock); + + rc = sas_drain_work(ha); + if (rc == 0) + rc = d->enable_result; + mutex_unlock(&d->event_lock); + pm_runtime_put_sync(ha->dev); + + return rc; +} + +static struct sas_function_template sft = { + .phy_enable = queue_phy_enable, + .phy_reset = queue_phy_reset, + .phy_setup = sas_phy_setup, + .phy_release = sas_phy_release, + .set_phy_speed = sas_set_phy_speed, + .get_linkerrors = sas_get_linkerrors, + .smp_handler = sas_smp_handler, +}; + +static inline ssize_t phy_event_threshold_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres); +} + +static inline ssize_t phy_event_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + sha->event_thres = simple_strtol(buf, NULL, 10); + + /* threshold cannot be set too small */ + if (sha->event_thres < 32) + sha->event_thres = 32; + + return count; +} + +DEVICE_ATTR(phy_event_threshold, + S_IRUGO|S_IWUSR, + phy_event_threshold_show, + phy_event_threshold_store); +EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold); + +struct scsi_transport_template * +sas_domain_attach_transport(struct sas_domain_function_template *dft) +{ + struct scsi_transport_template *stt = sas_attach_transport(&sft); + struct sas_internal *i; + + if (!stt) + return stt; + + i = to_sas_internal(stt); + i->dft = dft; + stt->create_work_queue = 1; + stt->eh_strategy_handler = sas_scsi_recover_host; + + return stt; +} +EXPORT_SYMBOL_GPL(sas_domain_attach_transport); + +struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, + gfp_t gfp_flags) +{ + struct asd_sas_event *event; + struct sas_ha_struct *sas_ha = phy->ha; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + event = kmem_cache_zalloc(sas_event_cache, gfp_flags); + if (!event) + return NULL; + + atomic_inc(&phy->event_nr); + + if (atomic_read(&phy->event_nr) > phy->ha->event_thres) { + if (i->dft->lldd_control_phy) { + if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) { + pr_notice("The phy%d bursting events, shut it down.\n", + phy->id); + sas_notify_phy_event(phy, PHYE_SHUTDOWN, + gfp_flags); + } + } else { + /* Do not support PHY control, stop allocating events */ + WARN_ONCE(1, "PHY control not supported.\n"); + kmem_cache_free(sas_event_cache, event); + atomic_dec(&phy->event_nr); + event = NULL; + } + } + + return event; +} + +void sas_free_event(struct asd_sas_event *event) +{ + struct asd_sas_phy *phy = event->phy; + + kmem_cache_free(sas_event_cache, event); + atomic_dec(&phy->event_nr); +} + +/* ---------- SAS Class register/unregister ---------- */ + +static int __init sas_class_init(void) +{ + sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN); + if (!sas_task_cache) + goto out; + + sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN); + if (!sas_event_cache) + goto free_task_kmem; + + return 0; +free_task_kmem: + kmem_cache_destroy(sas_task_cache); +out: + return -ENOMEM; +} + +static void __exit sas_class_exit(void) +{ + kmem_cache_destroy(sas_task_cache); + kmem_cache_destroy(sas_event_cache); +} + +MODULE_AUTHOR("Luben Tuikov "); +MODULE_DESCRIPTION("SAS Transport Layer"); +MODULE_LICENSE("GPL v2"); + +module_init(sas_class_init); +module_exit(sas_class_exit); + diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h new file mode 100644 index 000000000..a6dc7dc07 --- /dev/null +++ b/drivers/scsi/libsas/sas_internal.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Serial Attached SCSI (SAS) class internal header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#ifndef _SAS_INTERNAL_H_ +#define _SAS_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define SAS_FMT "sas: " + +#define pr_fmt(fmt) SAS_FMT fmt + +#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble) +#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0) + +struct sas_phy_data { + /* let reset be performed in sas_queue_work() context */ + struct sas_phy *phy; + struct mutex event_lock; + int hard_reset; + int reset_result; + struct sas_work reset_work; + int enable; + int enable_result; + struct sas_work enable_work; +}; + +void sas_scsi_recover_host(struct Scsi_Host *shost); + +int sas_register_phys(struct sas_ha_struct *sas_ha); + +struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags); +void sas_free_event(struct asd_sas_event *event); + +struct sas_task *sas_alloc_task(gfp_t flags); +struct sas_task *sas_alloc_slow_task(gfp_t flags); +void sas_free_task(struct sas_task *task); + +int sas_register_ports(struct sas_ha_struct *sas_ha); +void sas_unregister_ports(struct sas_ha_struct *sas_ha); + +void sas_disable_revalidation(struct sas_ha_struct *ha); +void sas_enable_revalidation(struct sas_ha_struct *ha); +void sas_queue_deferred_work(struct sas_ha_struct *ha); +void __sas_drain_work(struct sas_ha_struct *ha); + +void sas_deform_port(struct asd_sas_phy *phy, int gone); + +void sas_porte_bytes_dmaed(struct work_struct *work); +void sas_porte_broadcast_rcvd(struct work_struct *work); +void sas_porte_link_reset_err(struct work_struct *work); +void sas_porte_timer_event(struct work_struct *work); +void sas_porte_hard_reset(struct work_struct *work); +bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); + +int sas_notify_lldd_dev_found(struct domain_device *); +void sas_notify_lldd_dev_gone(struct domain_device *); + +void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy); +int sas_smp_phy_control(struct domain_device *dev, int phy_id, + enum phy_func phy_func, struct sas_phy_linkrates *); +int sas_smp_get_phy_events(struct sas_phy *phy); + +void sas_device_set_phy(struct domain_device *dev, struct sas_port *port); +struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); +struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id); +int sas_ex_phy_discover(struct domain_device *dev, int single); +int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, + struct smp_rps_resp *rps_resp); +int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, + u8 *sas_addr, enum sas_device_type *type); +int sas_try_ata_reset(struct asd_sas_phy *phy); + +void sas_free_device(struct kref *kref); +void sas_destruct_devices(struct asd_sas_port *port); + +extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS]; +extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS]; + +void sas_task_internal_done(struct sas_task *task); +void sas_task_internal_timedout(struct timer_list *t); +int sas_execute_tmf(struct domain_device *device, void *parameter, + int para_len, int force_phy_id, + struct sas_tmf_task *tmf); + +#ifdef CONFIG_SCSI_SAS_HOST_SMP +extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost); +#else +static inline void sas_smp_host_handler(struct bsg_job *job, + struct Scsi_Host *shost) +{ + shost_printk(KERN_ERR, shost, + "Cannot send SMP to a sas host (not enabled in CONFIG)\n"); + bsg_job_done(job, -EINVAL, 0); +} +#endif + +static inline bool sas_phy_match_dev_addr(struct domain_device *dev, + struct ex_phy *phy) +{ + return SAS_ADDR(dev->sas_addr) == SAS_ADDR(phy->attached_sas_addr); +} + +static inline bool sas_phy_match_port_addr(struct asd_sas_port *port, + struct ex_phy *phy) +{ + return SAS_ADDR(port->sas_addr) == SAS_ADDR(phy->attached_sas_addr); +} + +static inline bool sas_phy_addr_match(struct ex_phy *p1, struct ex_phy *p2) +{ + return SAS_ADDR(p1->attached_sas_addr) == SAS_ADDR(p2->attached_sas_addr); +} + +static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err) +{ + pr_warn("%s: for %s device %016llx returned %d\n", + func, dev->parent ? "exp-attached" : + "direct-attached", + SAS_ADDR(dev->sas_addr), err); + sas_unregister_dev(dev->port, dev); +} + +static inline void sas_fill_in_rphy(struct domain_device *dev, + struct sas_rphy *rphy) +{ + rphy->identify.sas_address = SAS_ADDR(dev->sas_addr); + rphy->identify.initiator_port_protocols = dev->iproto; + rphy->identify.target_port_protocols = dev->tproto; + switch (dev->dev_type) { + case SAS_SATA_DEV: + /* FIXME: need sata device type */ + case SAS_END_DEVICE: + case SAS_SATA_PENDING: + rphy->identify.device_type = SAS_END_DEVICE; + break; + case SAS_EDGE_EXPANDER_DEVICE: + rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case SAS_FANOUT_EXPANDER_DEVICE: + rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + default: + rphy->identify.device_type = SAS_PHY_UNUSED; + break; + } +} + +static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_device *dev) +{ + struct sas_phy *phy = p->phy; + + if (dev) { + if (dev_is_sata(dev)) + phy->identify.device_type = SAS_END_DEVICE; + else + phy->identify.device_type = dev->dev_type; + phy->identify.target_port_protocols = dev->tproto; + } else { + phy->identify.device_type = SAS_PHY_UNUSED; + phy->identify.target_port_protocols = 0; + } +} + +static inline void sas_add_parent_port(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; + + if (!ex->parent_port) { + ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id); + /* FIXME: error handling */ + BUG_ON(!ex->parent_port); + BUG_ON(sas_port_add(ex->parent_port)); + sas_port_mark_backlink(ex->parent_port); + } + sas_port_add_phy(ex->parent_port, ex_phy->phy); +} + +static inline struct domain_device *sas_alloc_device(void) +{ + struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL); + + if (dev) { + INIT_LIST_HEAD(&dev->siblings); + INIT_LIST_HEAD(&dev->dev_list_node); + INIT_LIST_HEAD(&dev->disco_list_node); + kref_init(&dev->kref); + spin_lock_init(&dev->done_lock); + } + return dev; +} + +static inline void sas_put_device(struct domain_device *dev) +{ + kref_put(&dev->kref, sas_free_device); +} + +#endif /* _SAS_INTERNAL_H_ */ diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c new file mode 100644 index 000000000..57494ac97 --- /dev/null +++ b/drivers/scsi/libsas/sas_phy.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Serial Attached SCSI (SAS) Phy class + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include "sas_internal.h" +#include +#include +#include +#include "scsi_sas_internal.h" + +/* ---------- Phy events ---------- */ + +static void sas_phye_loss_of_signal(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + + phy->error = 0; + sas_deform_port(phy, 1); +} + +static void sas_phye_oob_done(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + + phy->error = 0; +} + +static void sas_phye_oob_error(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + sas_deform_port(phy, 1); + + if (!port && phy->enabled && i->dft->lldd_control_phy) { + phy->error++; + switch (phy->error) { + case 1: + case 2: + i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET, + NULL); + break; + case 3: + default: + phy->error = 0; + phy->enabled = 0; + i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL); + break; + } + } +} + +static void sas_phye_spinup_hold(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *sas_ha = phy->ha; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + phy->error = 0; + i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); +} + +static void sas_phye_resume_timeout(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + + /* phew, lldd got the phy back in the nick of time */ + if (!phy->suspended) { + dev_info(&phy->phy->dev, "resume timeout cancelled\n"); + return; + } + + phy->error = 0; + phy->suspended = 0; + sas_deform_port(phy, 1); +} + + +static void sas_phye_shutdown(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *sas_ha = phy->ha; + struct sas_internal *i = + to_sas_internal(sas_ha->shost->transportt); + + if (phy->enabled) { + int ret; + + phy->error = 0; + phy->enabled = 0; + ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL); + if (ret) + pr_notice("lldd disable phy%d returned %d\n", phy->id, + ret); + } else + pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id); + phy->in_shutdown = 0; +} + +/* ---------- Phy class registration ---------- */ + +int sas_register_phys(struct sas_ha_struct *sas_ha) +{ + int i; + + /* Now register the phys. */ + for (i = 0; i < sas_ha->num_phys; i++) { + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; + + phy->error = 0; + atomic_set(&phy->event_nr, 0); + INIT_LIST_HEAD(&phy->port_phy_el); + + phy->port = NULL; + phy->ha = sas_ha; + spin_lock_init(&phy->frame_rcvd_lock); + spin_lock_init(&phy->sas_prim_lock); + phy->frame_rcvd_size = 0; + + phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i); + if (!phy->phy) + return -ENOMEM; + + phy->phy->identify.initiator_port_protocols = + phy->iproto; + phy->phy->identify.target_port_protocols = phy->tproto; + phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr); + phy->phy->identify.phy_identifier = i; + phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy_add(phy->phy); + } + + return 0; +} + +const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { + [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, + [PHYE_OOB_DONE] = sas_phye_oob_done, + [PHYE_OOB_ERROR] = sas_phye_oob_error, + [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, + [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout, + [PHYE_SHUTDOWN] = sas_phye_shutdown, +}; diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c new file mode 100644 index 000000000..e3f2ed913 --- /dev/null +++ b/drivers/scsi/libsas/sas_port.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Serial Attached SCSI (SAS) Port class + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include "sas_internal.h" + +#include +#include +#include "scsi_sas_internal.h" + +static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy) +{ + struct sas_ha_struct *sas_ha = phy->ha; + + if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, + SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports && + memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0)) + return false; + return true; +} + +static void sas_resume_port(struct asd_sas_phy *phy) +{ + struct domain_device *dev, *n; + struct asd_sas_port *port = phy->port; + struct sas_ha_struct *sas_ha = phy->ha; + struct sas_internal *si = to_sas_internal(sas_ha->shost->transportt); + + if (si->dft->lldd_port_formed) + si->dft->lldd_port_formed(phy); + + if (port->suspended) + port->suspended = 0; + else { + /* we only need to handle "link returned" actions once */ + return; + } + + /* if the port came back: + * 1/ presume every device came back + * 2/ force the next revalidation to check all expander phys + */ + list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { + int i, rc; + + rc = sas_notify_lldd_dev_found(dev); + if (rc) { + sas_unregister_dev(port, dev); + sas_destruct_devices(port); + continue; + } + + if (dev_is_expander(dev->dev_type)) { + dev->ex_dev.ex_change_count = -1; + for (i = 0; i < dev->ex_dev.num_phys; i++) { + struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; + + phy->phy_change_count = -1; + } + } + } + + sas_discover_event(port, DISCE_RESUME); +} + +static void sas_form_port_add_phy(struct asd_sas_port *port, + struct asd_sas_phy *phy, bool wideport) +{ + list_add_tail(&phy->port_phy_el, &port->phy_list); + sas_phy_set_target(phy, port->port_dev); + phy->port = port; + port->num_phys++; + port->phy_mask |= (1U << phy->id); + + if (wideport) + pr_debug("phy%d matched wide port%d\n", phy->id, + port->id); + else + memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE); + + if (*(u64 *)port->attached_sas_addr == 0) { + memcpy(port->attached_sas_addr, phy->attached_sas_addr, + SAS_ADDR_SIZE); + port->iproto = phy->iproto; + port->tproto = phy->tproto; + port->oob_mode = phy->oob_mode; + port->linkrate = phy->linkrate; + } else { + port->linkrate = max(port->linkrate, phy->linkrate); + } +} + +/** + * sas_form_port - add this phy to a port + * @phy: the phy of interest + * + * This function adds this phy to an existing port, thus creating a wide + * port, or it creates a port and adds the phy to the port. + */ +static void sas_form_port(struct asd_sas_phy *phy) +{ + int i; + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct domain_device *port_dev = NULL; + struct sas_internal *si = + to_sas_internal(sas_ha->shost->transportt); + unsigned long flags; + + if (port) { + if (!phy_is_wideport_member(port, phy)) + sas_deform_port(phy, 0); + else if (phy->suspended) { + phy->suspended = 0; + sas_resume_port(phy); + + /* phy came back, try to cancel the timeout */ + wake_up(&sas_ha->eh_wait_q); + return; + } else { + pr_info("%s: phy%d belongs to port%d already(%d)!\n", + __func__, phy->id, phy->port->id, + phy->port->num_phys); + return; + } + } + + /* see if the phy should be part of a wide port */ + spin_lock_irqsave(&sas_ha->phy_port_lock, flags); + for (i = 0; i < sas_ha->num_phys; i++) { + port = sas_ha->sas_port[i]; + spin_lock(&port->phy_list_lock); + if (*(u64 *) port->sas_addr && + phy_is_wideport_member(port, phy) && port->num_phys > 0) { + /* wide port */ + port_dev = port->port_dev; + sas_form_port_add_phy(port, phy, true); + spin_unlock(&port->phy_list_lock); + break; + } + spin_unlock(&port->phy_list_lock); + } + /* The phy does not match any existing port, create a new one */ + if (i == sas_ha->num_phys) { + for (i = 0; i < sas_ha->num_phys; i++) { + port = sas_ha->sas_port[i]; + spin_lock(&port->phy_list_lock); + if (*(u64 *)port->sas_addr == 0 + && port->num_phys == 0) { + port_dev = port->port_dev; + sas_form_port_add_phy(port, phy, false); + spin_unlock(&port->phy_list_lock); + break; + } + spin_unlock(&port->phy_list_lock); + } + + if (i >= sas_ha->num_phys) { + pr_err("%s: couldn't find a free port, bug?\n", + __func__); + spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); + return; + } + } + spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); + + if (!port->port) { + port->port = sas_port_alloc(phy->phy->dev.parent, port->id); + BUG_ON(!port->port); + sas_port_add(port->port); + } + sas_port_add_phy(port->port, phy->phy); + + pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n", + dev_name(&phy->phy->dev), dev_name(&port->port->dev), + port->phy_mask, + SAS_ADDR(port->attached_sas_addr)); + + if (port_dev) + port_dev->pathways = port->num_phys; + + /* Tell the LLDD about this port formation. */ + if (si->dft->lldd_port_formed) + si->dft->lldd_port_formed(phy); + + sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN); + /* Only insert a revalidate event after initial discovery */ + if (port_dev && dev_is_expander(port_dev->dev_type)) { + struct expander_device *ex_dev = &port_dev->ex_dev; + + ex_dev->ex_change_count = -1; + sas_discover_event(port, DISCE_REVALIDATE_DOMAIN); + } + flush_workqueue(sas_ha->disco_q); +} + +/** + * sas_deform_port - remove this phy from the port it belongs to + * @phy: the phy of interest + * @gone: whether or not the PHY is gone + * + * This is called when the physical link to the other phy has been + * lost (on this phy), in Event thread context. We cannot delay here. + */ +void sas_deform_port(struct asd_sas_phy *phy, int gone) +{ + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *si = + to_sas_internal(sas_ha->shost->transportt); + struct domain_device *dev; + unsigned long flags; + + if (!port) + return; /* done by a phy event */ + + dev = port->port_dev; + if (dev) + dev->pathways--; + + if (port->num_phys == 1) { + sas_unregister_domain_devices(port, gone); + sas_destruct_devices(port); + sas_port_delete(port->port); + port->port = NULL; + } else { + sas_port_delete_phy(port->port, phy->phy); + sas_device_set_phy(dev, port->port); + } + + if (si->dft->lldd_port_deformed) + si->dft->lldd_port_deformed(phy); + + spin_lock_irqsave(&sas_ha->phy_port_lock, flags); + spin_lock(&port->phy_list_lock); + + list_del_init(&phy->port_phy_el); + sas_phy_set_target(phy, NULL); + phy->port = NULL; + port->num_phys--; + port->phy_mask &= ~(1U << phy->id); + + if (port->num_phys == 0) { + INIT_LIST_HEAD(&port->phy_list); + memset(port->sas_addr, 0, SAS_ADDR_SIZE); + memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE); + port->iproto = 0; + port->tproto = 0; + port->oob_mode = 0; + port->phy_mask = 0; + } + spin_unlock(&port->phy_list_lock); + spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); + + /* Only insert revalidate event if the port still has members */ + if (port->port && dev && dev_is_expander(dev->dev_type)) { + struct expander_device *ex_dev = &dev->ex_dev; + + ex_dev->ex_change_count = -1; + sas_discover_event(port, DISCE_REVALIDATE_DOMAIN); + } + flush_workqueue(sas_ha->disco_q); + + return; +} + +/* ---------- SAS port events ---------- */ + +void sas_porte_bytes_dmaed(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + + sas_form_port(phy); +} + +void sas_porte_broadcast_rcvd(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + unsigned long flags; + u32 prim; + + spin_lock_irqsave(&phy->sas_prim_lock, flags); + prim = phy->sas_prim; + spin_unlock_irqrestore(&phy->sas_prim_lock, flags); + + pr_debug("broadcast received: %d\n", prim); + sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); + + if (phy->port) + flush_workqueue(phy->port->ha->disco_q); +} + +void sas_porte_link_reset_err(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + + sas_deform_port(phy, 1); +} + +void sas_porte_timer_event(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + + sas_deform_port(phy, 1); +} + +void sas_porte_hard_reset(struct work_struct *work) +{ + struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + + sas_deform_port(phy, 1); +} + +/* ---------- SAS port registration ---------- */ + +static void sas_init_port(struct asd_sas_port *port, + struct sas_ha_struct *sas_ha, int i) +{ + memset(port, 0, sizeof(*port)); + port->id = i; + INIT_LIST_HEAD(&port->dev_list); + INIT_LIST_HEAD(&port->disco_list); + INIT_LIST_HEAD(&port->destroy_list); + INIT_LIST_HEAD(&port->sas_port_del_list); + spin_lock_init(&port->phy_list_lock); + INIT_LIST_HEAD(&port->phy_list); + port->ha = sas_ha; + + spin_lock_init(&port->dev_list_lock); +} + +int sas_register_ports(struct sas_ha_struct *sas_ha) +{ + int i; + + /* initialize the ports and discovery */ + for (i = 0; i < sas_ha->num_phys; i++) { + struct asd_sas_port *port = sas_ha->sas_port[i]; + + sas_init_port(port, sas_ha, i); + sas_init_disc(&port->disc, port); + } + return 0; +} + +void sas_unregister_ports(struct sas_ha_struct *sas_ha) +{ + int i; + + for (i = 0; i < sas_ha->num_phys; i++) + if (sas_ha->sas_phy[i]->port) + sas_deform_port(sas_ha->sas_phy[i], 0); + +} + +const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { + [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, + [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, + [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, + [PORTE_TIMER_EVENT] = sas_porte_timer_event, + [PORTE_HARD_RESET] = sas_porte_hard_reset, +}; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c new file mode 100644 index 000000000..9047cfcd1 --- /dev/null +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -0,0 +1,1242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Serial Attached SCSI (SAS) class SCSI Host glue. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + */ + +#include +#include +#include +#include +#include + +#include "sas_internal.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi_sas_internal.h" +#include "scsi_transport_api.h" +#include "scsi_priv.h" + +#include +#include +#include +#include +#include +#include + +/* record final status and free the task */ +static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) +{ + struct task_status_struct *ts = &task->task_status; + enum scsi_host_status hs = DID_OK; + enum exec_status stat = SAS_SAM_STAT_GOOD; + + if (ts->resp == SAS_TASK_UNDELIVERED) { + /* transport error */ + hs = DID_NO_CONNECT; + } else { /* ts->resp == SAS_TASK_COMPLETE */ + /* task delivered, what happened afterwards? */ + switch (ts->stat) { + case SAS_DEV_NO_RESPONSE: + case SAS_INTERRUPTED: + case SAS_PHY_DOWN: + case SAS_NAK_R_ERR: + case SAS_OPEN_TO: + hs = DID_NO_CONNECT; + break; + case SAS_DATA_UNDERRUN: + scsi_set_resid(sc, ts->residual); + if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow) + hs = DID_ERROR; + break; + case SAS_DATA_OVERRUN: + hs = DID_ERROR; + break; + case SAS_QUEUE_FULL: + hs = DID_SOFT_ERROR; /* retry */ + break; + case SAS_DEVICE_UNKNOWN: + hs = DID_BAD_TARGET; + break; + case SAS_OPEN_REJECT: + if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY) + hs = DID_SOFT_ERROR; /* retry */ + else + hs = DID_ERROR; + break; + case SAS_PROTO_RESPONSE: + pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n", + task->dev->port->ha->sas_ha_name); + break; + case SAS_ABORTED_TASK: + hs = DID_ABORT; + break; + case SAS_SAM_STAT_CHECK_CONDITION: + memcpy(sc->sense_buffer, ts->buf, + min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); + stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + default: + stat = ts->stat; + break; + } + } + + sc->result = (hs << 16) | stat; + ASSIGN_SAS_TASK(sc, NULL); + sas_free_task(task); +} + +static void sas_scsi_task_done(struct sas_task *task) +{ + struct scsi_cmnd *sc = task->uldd_task; + struct domain_device *dev = task->dev; + struct sas_ha_struct *ha = dev->port->ha; + unsigned long flags; + + spin_lock_irqsave(&dev->done_lock, flags); + if (test_bit(SAS_HA_FROZEN, &ha->state)) + task = NULL; + else + ASSIGN_SAS_TASK(sc, NULL); + spin_unlock_irqrestore(&dev->done_lock, flags); + + if (unlikely(!task)) { + /* task will be completed by the error handler */ + pr_debug("task done but aborted\n"); + return; + } + + if (unlikely(!sc)) { + pr_debug("task_done called with non existing SCSI cmnd!\n"); + sas_free_task(task); + return; + } + + sas_end_task(sc, task); + scsi_done(sc); +} + +static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, + struct domain_device *dev, + gfp_t gfp_flags) +{ + struct sas_task *task = sas_alloc_task(gfp_flags); + struct scsi_lun lun; + + if (!task) + return NULL; + + task->uldd_task = cmd; + ASSIGN_SAS_TASK(cmd, task); + + task->dev = dev; + task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ + + int_to_scsilun(cmd->device->lun, &lun); + memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); + task->ssp_task.task_attr = TASK_ATTR_SIMPLE; + task->ssp_task.cmd = cmd; + + task->scatter = scsi_sglist(cmd); + task->num_scatter = scsi_sg_count(cmd); + task->total_xfer_len = scsi_bufflen(cmd); + task->data_dir = cmd->sc_data_direction; + + task->task_done = sas_scsi_task_done; + + return task; +} + +int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct sas_internal *i = to_sas_internal(host->transportt); + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_task *task; + int res = 0; + + /* If the device fell off, no sense in issuing commands */ + if (test_bit(SAS_DEV_GONE, &dev->state)) { + cmd->result = DID_BAD_TARGET << 16; + goto out_done; + } + + if (dev_is_sata(dev)) { + spin_lock_irq(dev->sata_dev.ap->lock); + res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); + spin_unlock_irq(dev->sata_dev.ap->lock); + return res; + } + + task = sas_create_task(cmd, dev, GFP_ATOMIC); + if (!task) + return SCSI_MLQUEUE_HOST_BUSY; + + res = i->dft->lldd_execute_task(task, GFP_ATOMIC); + if (res) + goto out_free_task; + return 0; + +out_free_task: + pr_debug("lldd_execute_task returned: %d\n", res); + ASSIGN_SAS_TASK(cmd, NULL); + sas_free_task(task); + if (res == -SAS_QUEUE_FULL) + cmd->result = DID_SOFT_ERROR << 16; /* retry */ + else + cmd->result = DID_ERROR << 16; +out_done: + scsi_done(cmd); + return 0; +} +EXPORT_SYMBOL_GPL(sas_queuecommand); + +static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) +{ + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_task *task = TO_SAS_TASK(cmd); + + /* At this point, we only get called following an actual abort + * of the task, so we should be guaranteed not to be racing with + * any completions from the LLD. Task is freed after this. + */ + sas_end_task(cmd, task); + + if (dev_is_sata(dev)) { + /* defer commands to libata so that libata EH can + * handle ata qcs correctly + */ + list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q); + return; + } + + /* now finish the command and move it on to the error + * handler done list, this also takes it off the + * error handler pending list. + */ + scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); +} + +static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + if (cmd->device->sdev_target == my_cmd->device->sdev_target && + cmd->device->lun == my_cmd->device->lun) + sas_eh_finish_cmd(cmd); + } +} + +static void sas_scsi_clear_queue_I_T(struct list_head *error_q, + struct domain_device *dev) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + struct domain_device *x = cmd_to_domain_dev(cmd); + + if (x == dev) + sas_eh_finish_cmd(cmd); + } +} + +static void sas_scsi_clear_queue_port(struct list_head *error_q, + struct asd_sas_port *port) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct asd_sas_port *x = dev->port; + + if (x == port) + sas_eh_finish_cmd(cmd); + } +} + +enum task_disposition { + TASK_IS_DONE, + TASK_IS_ABORTED, + TASK_IS_AT_LU, + TASK_IS_NOT_AT_LU, + TASK_ABORT_FAILED, +}; + +static enum task_disposition sas_scsi_find_task(struct sas_task *task) +{ + unsigned long flags; + int i, res; + struct sas_internal *si = + to_sas_internal(task->dev->port->ha->shost->transportt); + + for (i = 0; i < 5; i++) { + pr_notice("%s: aborting task 0x%p\n", __func__, task); + res = si->dft->lldd_abort_task(task); + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + pr_debug("%s: task 0x%p is done\n", __func__, task); + return TASK_IS_DONE; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (res == TMF_RESP_FUNC_COMPLETE) { + pr_notice("%s: task 0x%p is aborted\n", + __func__, task); + return TASK_IS_ABORTED; + } else if (si->dft->lldd_query_task) { + pr_notice("%s: querying task 0x%p\n", __func__, task); + res = si->dft->lldd_query_task(task); + switch (res) { + case TMF_RESP_FUNC_SUCC: + pr_notice("%s: task 0x%p at LU\n", __func__, + task); + return TASK_IS_AT_LU; + case TMF_RESP_FUNC_COMPLETE: + pr_notice("%s: task 0x%p not at LU\n", + __func__, task); + return TASK_IS_NOT_AT_LU; + case TMF_RESP_FUNC_FAILED: + pr_notice("%s: task 0x%p failed to abort\n", + __func__, task); + return TASK_ABORT_FAILED; + default: + pr_notice("%s: task 0x%p result code %d not handled\n", + __func__, task, res); + } + } + } + return TASK_ABORT_FAILED; +} + +static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) +{ + int res = TMF_RESP_FUNC_FAILED; + struct scsi_lun lun; + struct sas_internal *i = + to_sas_internal(dev->port->ha->shost->transportt); + + int_to_scsilun(cmd->device->lun, &lun); + + pr_notice("eh: device %016llx LUN 0x%llx has the task\n", + SAS_ADDR(dev->sas_addr), + cmd->device->lun); + + if (i->dft->lldd_abort_task_set) + res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); + + if (res == TMF_RESP_FUNC_FAILED) { + if (i->dft->lldd_clear_task_set) + res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun); + } + + if (res == TMF_RESP_FUNC_FAILED) { + if (i->dft->lldd_lu_reset) + res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); + } + + return res; +} + +static int sas_recover_I_T(struct domain_device *dev) +{ + int res = TMF_RESP_FUNC_FAILED; + struct sas_internal *i = + to_sas_internal(dev->port->ha->shost->transportt); + + pr_notice("I_T nexus reset for dev %016llx\n", + SAS_ADDR(dev->sas_addr)); + + if (i->dft->lldd_I_T_nexus_reset) + res = i->dft->lldd_I_T_nexus_reset(dev); + + return res; +} + +/* take a reference on the last known good phy for this device */ +struct sas_phy *sas_get_local_phy(struct domain_device *dev) +{ + struct sas_ha_struct *ha = dev->port->ha; + struct sas_phy *phy; + unsigned long flags; + + /* a published domain device always has a valid phy, it may be + * stale, but it is never NULL + */ + BUG_ON(!dev->phy); + + spin_lock_irqsave(&ha->phy_port_lock, flags); + phy = dev->phy; + get_device(&phy->dev); + spin_unlock_irqrestore(&ha->phy_port_lock, flags); + + return phy; +} +EXPORT_SYMBOL_GPL(sas_get_local_phy); + +static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun) +{ + struct sas_ha_struct *ha = dev->port->ha; + int scheduled = 0, tries = 100; + + /* ata: promote lun reset to bus reset */ + if (dev_is_sata(dev)) { + sas_ata_schedule_reset(dev); + return SUCCESS; + } + + while (!scheduled && tries--) { + spin_lock_irq(&ha->lock); + if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) && + !test_bit(reset_type, &dev->state)) { + scheduled = 1; + ha->eh_active++; + list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q); + set_bit(SAS_DEV_EH_PENDING, &dev->state); + set_bit(reset_type, &dev->state); + int_to_scsilun(lun, &dev->ssp_dev.reset_lun); + scsi_schedule_eh(ha->shost); + } + spin_unlock_irq(&ha->lock); + + if (scheduled) + return SUCCESS; + } + + pr_warn("%s reset of %s failed\n", + reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus", + dev_name(&dev->rphy->dev)); + + return FAILED; +} + +int sas_eh_abort_handler(struct scsi_cmnd *cmd) +{ + int res = TMF_RESP_FUNC_FAILED; + struct sas_task *task = TO_SAS_TASK(cmd); + struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_internal *i = to_sas_internal(host->transportt); + unsigned long flags; + + if (!i->dft->lldd_abort_task) + return FAILED; + + spin_lock_irqsave(host->host_lock, flags); + /* We cannot do async aborts for SATA devices */ + if (dev_is_sata(dev) && !host->host_eh_scheduled) { + spin_unlock_irqrestore(host->host_lock, flags); + return FAILED; + } + spin_unlock_irqrestore(host->host_lock, flags); + + if (task) + res = i->dft->lldd_abort_task(task); + else + pr_notice("no task to abort\n"); + if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) + return SUCCESS; + + return FAILED; +} +EXPORT_SYMBOL_GPL(sas_eh_abort_handler); + +/* Attempt to send a LUN reset message to a device */ +int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + int res; + struct scsi_lun lun; + struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_internal *i = to_sas_internal(host->transportt); + + if (current != host->ehandler) + return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun); + + int_to_scsilun(cmd->device->lun, &lun); + + if (!i->dft->lldd_lu_reset) + return FAILED; + + res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); + if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) + return SUCCESS; + + return FAILED; +} +EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); + +int sas_eh_target_reset_handler(struct scsi_cmnd *cmd) +{ + int res; + struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_internal *i = to_sas_internal(host->transportt); + + if (current != host->ehandler) + return sas_queue_reset(dev, SAS_DEV_RESET, 0); + + if (!i->dft->lldd_I_T_nexus_reset) + return FAILED; + + res = i->dft->lldd_I_T_nexus_reset(dev); + if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE || + res == -ENODEV) + return SUCCESS; + + return FAILED; +} +EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler); + +/* Try to reset a device */ +static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) +{ + int res; + struct Scsi_Host *shost = cmd->device->host; + + if (!shost->hostt->eh_device_reset_handler) + goto try_target_reset; + + res = shost->hostt->eh_device_reset_handler(cmd); + if (res == SUCCESS) + return res; + +try_target_reset: + if (shost->hostt->eh_target_reset_handler) + return shost->hostt->eh_target_reset_handler(cmd); + + return FAILED; +} + +static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q) +{ + struct scsi_cmnd *cmd, *n; + enum task_disposition res = TASK_IS_DONE; + int tmf_resp, need_reset; + struct sas_internal *i = to_sas_internal(shost->transportt); + unsigned long flags; + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + LIST_HEAD(done); + + /* clean out any commands that won the completion vs eh race */ + list_for_each_entry_safe(cmd, n, work_q, eh_entry) { + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_task *task; + + spin_lock_irqsave(&dev->done_lock, flags); + /* by this point the lldd has either observed + * SAS_HA_FROZEN and is leaving the task alone, or has + * won the race with eh and decided to complete it + */ + task = TO_SAS_TASK(cmd); + spin_unlock_irqrestore(&dev->done_lock, flags); + + if (!task) + list_move_tail(&cmd->eh_entry, &done); + } + + Again: + list_for_each_entry_safe(cmd, n, work_q, eh_entry) { + struct sas_task *task = TO_SAS_TASK(cmd); + + list_del_init(&cmd->eh_entry); + + spin_lock_irqsave(&task->task_state_lock, flags); + need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (need_reset) { + pr_notice("%s: task 0x%p requests reset\n", + __func__, task); + goto reset; + } + + pr_debug("trying to find task 0x%p\n", task); + res = sas_scsi_find_task(task); + + switch (res) { + case TASK_IS_DONE: + pr_notice("%s: task 0x%p is done\n", __func__, + task); + sas_eh_finish_cmd(cmd); + continue; + case TASK_IS_ABORTED: + pr_notice("%s: task 0x%p is aborted\n", + __func__, task); + sas_eh_finish_cmd(cmd); + continue; + case TASK_IS_AT_LU: + pr_info("task 0x%p is at LU: lu recover\n", task); + reset: + tmf_resp = sas_recover_lu(task->dev, cmd); + if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { + pr_notice("dev %016llx LU 0x%llx is recovered\n", + SAS_ADDR(task->dev), + cmd->device->lun); + sas_eh_finish_cmd(cmd); + sas_scsi_clear_queue_lu(work_q, cmd); + goto Again; + } + fallthrough; + case TASK_IS_NOT_AT_LU: + case TASK_ABORT_FAILED: + pr_notice("task 0x%p is not at LU: I_T recover\n", + task); + tmf_resp = sas_recover_I_T(task->dev); + if (tmf_resp == TMF_RESP_FUNC_COMPLETE || + tmf_resp == -ENODEV) { + struct domain_device *dev = task->dev; + pr_notice("I_T %016llx recovered\n", + SAS_ADDR(task->dev->sas_addr)); + sas_eh_finish_cmd(cmd); + sas_scsi_clear_queue_I_T(work_q, dev); + goto Again; + } + /* Hammer time :-) */ + try_to_reset_cmd_device(cmd); + if (i->dft->lldd_clear_nexus_port) { + struct asd_sas_port *port = task->dev->port; + pr_debug("clearing nexus for port:%d\n", + port->id); + res = i->dft->lldd_clear_nexus_port(port); + if (res == TMF_RESP_FUNC_COMPLETE) { + pr_notice("clear nexus port:%d succeeded\n", + port->id); + sas_eh_finish_cmd(cmd); + sas_scsi_clear_queue_port(work_q, + port); + goto Again; + } + } + if (i->dft->lldd_clear_nexus_ha) { + pr_debug("clear nexus ha\n"); + res = i->dft->lldd_clear_nexus_ha(ha); + if (res == TMF_RESP_FUNC_COMPLETE) { + pr_notice("clear nexus ha succeeded\n"); + sas_eh_finish_cmd(cmd); + goto clear_q; + } + } + /* If we are here -- this means that no amount + * of effort could recover from errors. Quite + * possibly the HA just disappeared. + */ + pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n", + SAS_ADDR(task->dev->sas_addr), + cmd->device->lun); + + sas_eh_finish_cmd(cmd); + goto clear_q; + } + } + out: + list_splice_tail(&done, work_q); + list_splice_tail_init(&ha->eh_ata_q, work_q); + return; + + clear_q: + pr_debug("--- Exit %s -- clear_q\n", __func__); + list_for_each_entry_safe(cmd, n, work_q, eh_entry) + sas_eh_finish_cmd(cmd); + goto out; +} + +static void sas_eh_handle_resets(struct Scsi_Host *shost) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct sas_internal *i = to_sas_internal(shost->transportt); + + /* handle directed resets to sas devices */ + spin_lock_irq(&ha->lock); + while (!list_empty(&ha->eh_dev_q)) { + struct domain_device *dev; + struct ssp_device *ssp; + + ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node); + list_del_init(&ssp->eh_list_node); + dev = container_of(ssp, typeof(*dev), ssp_dev); + kref_get(&dev->kref); + WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n"); + + spin_unlock_irq(&ha->lock); + + if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state)) + i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun); + + if (test_and_clear_bit(SAS_DEV_RESET, &dev->state)) + i->dft->lldd_I_T_nexus_reset(dev); + + sas_put_device(dev); + spin_lock_irq(&ha->lock); + clear_bit(SAS_DEV_EH_PENDING, &dev->state); + ha->eh_active--; + } + spin_unlock_irq(&ha->lock); +} + + +void sas_scsi_recover_host(struct Scsi_Host *shost) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + LIST_HEAD(eh_work_q); + int tries = 0; + bool retry; + +retry: + tries++; + retry = true; + spin_lock_irq(shost->host_lock); + list_splice_init(&shost->eh_cmd_q, &eh_work_q); + spin_unlock_irq(shost->host_lock); + + pr_notice("Enter %s busy: %d failed: %d\n", + __func__, scsi_host_busy(shost), shost->host_failed); + /* + * Deal with commands that still have SAS tasks (i.e. they didn't + * complete via the normal sas_task completion mechanism), + * SAS_HA_FROZEN gives eh dominion over all sas_task completion. + */ + set_bit(SAS_HA_FROZEN, &ha->state); + sas_eh_handle_sas_errors(shost, &eh_work_q); + clear_bit(SAS_HA_FROZEN, &ha->state); + if (list_empty(&eh_work_q)) + goto out; + + /* + * Now deal with SCSI commands that completed ok but have a an error + * code (and hopefully sense data) attached. This is roughly what + * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any + * command we see here has no sas_task and is thus unknown to the HA. + */ + sas_ata_eh(shost, &eh_work_q); + if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) + scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); + +out: + sas_eh_handle_resets(shost); + + /* now link into libata eh --- if we have any ata devices */ + sas_ata_strategy_handler(shost); + + scsi_eh_flush_done_q(&ha->eh_done_q); + + /* check if any new eh work was scheduled during the last run */ + spin_lock_irq(&ha->lock); + if (ha->eh_active == 0) { + shost->host_eh_scheduled = 0; + retry = false; + } + spin_unlock_irq(&ha->lock); + + if (retry) + goto retry; + + pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n", + __func__, scsi_host_busy(shost), + shost->host_failed, tries); +} + +int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + + if (dev_is_sata(dev)) + return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(sas_ioctl); + +struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct domain_device *found_dev = NULL; + int i; + unsigned long flags; + + spin_lock_irqsave(&ha->phy_port_lock, flags); + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + struct domain_device *dev; + + spin_lock(&port->dev_list_lock); + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (rphy == dev->rphy) { + found_dev = dev; + spin_unlock(&port->dev_list_lock); + goto found; + } + } + spin_unlock(&port->dev_list_lock); + } + found: + spin_unlock_irqrestore(&ha->phy_port_lock, flags); + + return found_dev; +} + +int sas_target_alloc(struct scsi_target *starget) +{ + struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); + struct domain_device *found_dev = sas_find_dev_by_rphy(rphy); + + if (!found_dev) + return -ENODEV; + + kref_get(&found_dev->kref); + starget->hostdata = found_dev; + return 0; +} +EXPORT_SYMBOL_GPL(sas_target_alloc); + +#define SAS_DEF_QD 256 + +int sas_slave_configure(struct scsi_device *scsi_dev) +{ + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + + BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); + + if (dev_is_sata(dev)) { + ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap); + return 0; + } + + sas_read_port_mode_page(scsi_dev); + + if (scsi_dev->tagged_supported) { + scsi_change_queue_depth(scsi_dev, SAS_DEF_QD); + } else { + pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n", + SAS_ADDR(dev->sas_addr), scsi_dev->lun); + scsi_change_queue_depth(scsi_dev, 1); + } + + scsi_dev->allow_restart = 1; + + return 0; +} +EXPORT_SYMBOL_GPL(sas_slave_configure); + +int sas_change_queue_depth(struct scsi_device *sdev, int depth) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + + if (dev_is_sata(dev)) + return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth); + + if (!sdev->tagged_supported) + depth = 1; + return scsi_change_queue_depth(sdev, depth); +} +EXPORT_SYMBOL_GPL(sas_change_queue_depth); + +int sas_bios_param(struct scsi_device *scsi_dev, + struct block_device *bdev, + sector_t capacity, int *hsc) +{ + hsc[0] = 255; + hsc[1] = 63; + sector_div(capacity, 255*63); + hsc[2] = capacity; + + return 0; +} +EXPORT_SYMBOL_GPL(sas_bios_param); + +void sas_task_internal_done(struct sas_task *task) +{ + del_timer(&task->slow_task->timer); + complete(&task->slow_task->completion); +} + +void sas_task_internal_timedout(struct timer_list *t) +{ + struct sas_task_slow *slow = from_timer(slow, t, timer); + struct sas_task *task = slow->task; + bool is_completed = true; + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + is_completed = false; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (!is_completed) + complete(&task->slow_task->completion); +} + +#define TASK_TIMEOUT (20 * HZ) +#define TASK_RETRY 3 + +static int sas_execute_internal_abort(struct domain_device *device, + enum sas_internal_abort type, u16 tag, + unsigned int qid, void *data) +{ + struct sas_ha_struct *ha = device->port->ha; + struct sas_internal *i = to_sas_internal(ha->shost->transportt); + struct sas_task *task = NULL; + int res, retry; + + for (retry = 0; retry < TASK_RETRY; retry++) { + task = sas_alloc_slow_task(GFP_KERNEL); + if (!task) + return -ENOMEM; + + task->dev = device; + task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT; + task->task_done = sas_task_internal_done; + task->slow_task->timer.function = sas_task_internal_timedout; + task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; + add_timer(&task->slow_task->timer); + + task->abort_task.tag = tag; + task->abort_task.type = type; + task->abort_task.qid = qid; + + res = i->dft->lldd_execute_task(task, GFP_KERNEL); + if (res) { + del_timer_sync(&task->slow_task->timer); + pr_err("Executing internal abort failed %016llx (%d)\n", + SAS_ADDR(device->sas_addr), res); + break; + } + + wait_for_completion(&task->slow_task->completion); + res = TMF_RESP_FUNC_FAILED; + + /* Even if the internal abort timed out, return direct. */ + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + bool quit = true; + + if (i->dft->lldd_abort_timeout) + quit = i->dft->lldd_abort_timeout(task, data); + else + pr_err("Internal abort: timeout %016llx\n", + SAS_ADDR(device->sas_addr)); + res = -EIO; + if (quit) + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_SAM_STAT_GOOD) { + res = TMF_RESP_FUNC_COMPLETE; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == TMF_RESP_FUNC_SUCC) { + res = TMF_RESP_FUNC_SUCC; + break; + } + + pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n", + SAS_ADDR(device->sas_addr), task->task_status.resp, + task->task_status.stat); + sas_free_task(task); + task = NULL; + } + BUG_ON(retry == TASK_RETRY && task != NULL); + sas_free_task(task); + return res; +} + +int sas_execute_internal_abort_single(struct domain_device *device, u16 tag, + unsigned int qid, void *data) +{ + return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE, + tag, qid, data); +} +EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single); + +int sas_execute_internal_abort_dev(struct domain_device *device, + unsigned int qid, void *data) +{ + return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV, + SCSI_NO_TAG, qid, data); +} +EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev); + +int sas_execute_tmf(struct domain_device *device, void *parameter, + int para_len, int force_phy_id, + struct sas_tmf_task *tmf) +{ + struct sas_task *task; + struct sas_internal *i = + to_sas_internal(device->port->ha->shost->transportt); + int res, retry; + + for (retry = 0; retry < TASK_RETRY; retry++) { + task = sas_alloc_slow_task(GFP_KERNEL); + if (!task) + return -ENOMEM; + + task->dev = device; + task->task_proto = device->tproto; + + if (dev_is_sata(device)) { + task->ata_task.device_control_reg_update = 1; + if (force_phy_id >= 0) { + task->ata_task.force_phy = true; + task->ata_task.force_phy_id = force_phy_id; + } + memcpy(&task->ata_task.fis, parameter, para_len); + } else { + memcpy(&task->ssp_task, parameter, para_len); + } + + task->task_done = sas_task_internal_done; + task->tmf = tmf; + + task->slow_task->timer.function = sas_task_internal_timedout; + task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; + add_timer(&task->slow_task->timer); + + res = i->dft->lldd_execute_task(task, GFP_KERNEL); + if (res) { + del_timer_sync(&task->slow_task->timer); + pr_err("executing TMF task failed %016llx (%d)\n", + SAS_ADDR(device->sas_addr), res); + break; + } + + wait_for_completion(&task->slow_task->completion); + + if (i->dft->lldd_tmf_exec_complete) + i->dft->lldd_tmf_exec_complete(device); + + res = TMF_RESP_FUNC_FAILED; + + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + pr_err("TMF task timeout for %016llx and not done\n", + SAS_ADDR(device->sas_addr)); + if (i->dft->lldd_tmf_aborted) + i->dft->lldd_tmf_aborted(task); + break; + } + pr_warn("TMF task timeout for %016llx and done\n", + SAS_ADDR(device->sas_addr)); + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { + res = TMF_RESP_FUNC_COMPLETE; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == TMF_RESP_FUNC_SUCC) { + res = TMF_RESP_FUNC_SUCC; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun + */ + pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n", + SAS_ADDR(device->sas_addr), + task->task_status.resp, + task->task_status.stat); + res = task->task_status.residual; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + pr_warn("TMF task blocked task error %016llx\n", + SAS_ADDR(device->sas_addr)); + res = -EMSGSIZE; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_OPEN_REJECT) { + pr_warn("TMF task open reject failed %016llx\n", + SAS_ADDR(device->sas_addr)); + res = -EIO; + } else { + pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n", + SAS_ADDR(device->sas_addr), + task->task_status.resp, + task->task_status.stat); + } + sas_free_task(task); + task = NULL; + } + + if (retry == TASK_RETRY) + pr_warn("executing TMF for %016llx failed after %d attempts!\n", + SAS_ADDR(device->sas_addr), TASK_RETRY); + sas_free_task(task); + + return res; +} + +static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun, + struct sas_tmf_task *tmf) +{ + struct sas_ssp_task ssp_task; + + if (!(device->tproto & SAS_PROTOCOL_SSP)) + return TMF_RESP_FUNC_ESUPP; + + memcpy(ssp_task.LUN, lun, 8); + + return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf); +} + +int sas_abort_task_set(struct domain_device *dev, u8 *lun) +{ + struct sas_tmf_task tmf_task = { + .tmf = TMF_ABORT_TASK_SET, + }; + + return sas_execute_ssp_tmf(dev, lun, &tmf_task); +} +EXPORT_SYMBOL_GPL(sas_abort_task_set); + +int sas_clear_task_set(struct domain_device *dev, u8 *lun) +{ + struct sas_tmf_task tmf_task = { + .tmf = TMF_CLEAR_TASK_SET, + }; + + return sas_execute_ssp_tmf(dev, lun, &tmf_task); +} +EXPORT_SYMBOL_GPL(sas_clear_task_set); + +int sas_lu_reset(struct domain_device *dev, u8 *lun) +{ + struct sas_tmf_task tmf_task = { + .tmf = TMF_LU_RESET, + }; + + return sas_execute_ssp_tmf(dev, lun, &tmf_task); +} +EXPORT_SYMBOL_GPL(sas_lu_reset); + +int sas_query_task(struct sas_task *task, u16 tag) +{ + struct sas_tmf_task tmf_task = { + .tmf = TMF_QUERY_TASK, + .tag_of_task_to_be_managed = tag, + }; + struct scsi_cmnd *cmnd = task->uldd_task; + struct domain_device *dev = task->dev; + struct scsi_lun lun; + + int_to_scsilun(cmnd->device->lun, &lun); + + return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task); +} +EXPORT_SYMBOL_GPL(sas_query_task); + +int sas_abort_task(struct sas_task *task, u16 tag) +{ + struct sas_tmf_task tmf_task = { + .tmf = TMF_ABORT_TASK, + .tag_of_task_to_be_managed = tag, + }; + struct scsi_cmnd *cmnd = task->uldd_task; + struct domain_device *dev = task->dev; + struct scsi_lun lun; + + int_to_scsilun(cmnd->device->lun, &lun); + + return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task); +} +EXPORT_SYMBOL_GPL(sas_abort_task); + +/* + * Tell an upper layer that it needs to initiate an abort for a given task. + * This should only ever be called by an LLDD. + */ +void sas_task_abort(struct sas_task *task) +{ + struct scsi_cmnd *sc = task->uldd_task; + + /* Escape for libsas internal commands */ + if (!sc) { + struct sas_task_slow *slow = task->slow_task; + + if (!slow) + return; + if (!del_timer(&slow->timer)) + return; + slow->timer.function(&slow->timer); + return; + } + + if (dev_is_sata(task->dev)) + sas_ata_task_abort(task); + else + blk_abort_request(scsi_cmd_to_rq(sc)); +} +EXPORT_SYMBOL_GPL(sas_task_abort); + +int sas_slave_alloc(struct scsi_device *sdev) +{ + if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun) + return -ENXIO; + + return 0; +} +EXPORT_SYMBOL_GPL(sas_slave_alloc); + +void sas_target_destroy(struct scsi_target *starget) +{ + struct domain_device *found_dev = starget->hostdata; + + if (!found_dev) + return; + + starget->hostdata = NULL; + sas_put_device(found_dev); +} +EXPORT_SYMBOL_GPL(sas_target_destroy); + +#define SAS_STRING_ADDR_SIZE 16 + +int sas_request_addr(struct Scsi_Host *shost, u8 *addr) +{ + int res; + const struct firmware *fw; + + res = request_firmware(&fw, "sas_addr", &shost->shost_gendev); + if (res) + return res; + + if (fw->size < SAS_STRING_ADDR_SIZE) { + res = -ENODEV; + goto out; + } + + res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2); + if (res) + goto out; + +out: + release_firmware(fw); + return res; +} +EXPORT_SYMBOL_GPL(sas_request_addr); + diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c new file mode 100644 index 000000000..e9d291007 --- /dev/null +++ b/drivers/scsi/libsas/sas_task.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "sas_internal.h" + +#include +#include +#include +#include + +/* fill task_status_struct based on SSP response frame */ +void sas_ssp_task_response(struct device *dev, struct sas_task *task, + struct ssp_response_iu *iu) +{ + struct task_status_struct *tstat = &task->task_status; + + tstat->resp = SAS_TASK_COMPLETE; + + switch (iu->datapres) { + case SAS_DATAPRES_NO_DATA: + tstat->stat = iu->status; + break; + case SAS_DATAPRES_RESPONSE_DATA: + tstat->stat = iu->resp_data[3]; + break; + case SAS_DATAPRES_SENSE_DATA: + tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; + tstat->buf_valid_size = + min_t(int, SAS_STATUS_BUF_SIZE, + be32_to_cpu(iu->sense_data_len)); + memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); + + if (iu->status != SAM_STAT_CHECK_CONDITION) + dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n", + SAS_ADDR(task->dev->sas_addr), iu->status); + break; + default: + /* when datapres contains corrupt/unknown value... */ + tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; + } +} +EXPORT_SYMBOL_GPL(sas_ssp_task_response); + diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile new file mode 100644 index 000000000..bbd1faf41 --- /dev/null +++ b/drivers/scsi/lpfc/Makefile @@ -0,0 +1,36 @@ +#/******************************************************************* +# * This file is part of the Emulex Linux Device Driver for * +# * Fibre Channel Host Bus Adapters. * +# * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * +# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * +# * Copyright (C) 2004-2012 Emulex. All rights reserved. * +# * EMULEX and SLI are trademarks of Emulex. * +# * www.broadcom.com * +# * * +# * This program is free software; you can redistribute it and/or * +# * modify it under the terms of version 2 of the GNU General * +# * Public License as published by the Free Software Foundation. * +# * This program is distributed in the hope that it will be useful. * +# * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * +# * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * +# * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * +# * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * +# * TO BE LEGALLY INVALID. See the GNU General Public License for * +# * more details, a copy of which can be found in the file COPYING * +# * included with this package. * +# *******************************************************************/ +###################################################################### + +ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage +ccflags-$(GCOV) += -O0 + +ifdef WARNINGS_BECOME_ERRORS +ccflags-y += -Werror +endif + +obj-$(CONFIG_SCSI_LPFC) := lpfc.o + +lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \ + lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \ + lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \ + lpfc_nvme.o lpfc_nvmet.o lpfc_vmid.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h new file mode 100644 index 000000000..af15f7a22 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc.h @@ -0,0 +1,1873 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include + +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) +#define CONFIG_SCSI_LPFC_DEBUG_FS +#endif + +struct lpfc_sli2_slim; + +#define ELX_MODEL_NAME_SIZE 80 + +#define LPFC_PCI_DEV_LP 0x1 +#define LPFC_PCI_DEV_OC 0x2 + +#define LPFC_SLI_REV2 2 +#define LPFC_SLI_REV3 3 +#define LPFC_SLI_REV4 4 + +#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ +#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els + requests */ +#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact + the NameServer before giving up. */ +#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ +#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ + +#define LPFC_DEFAULT_XPSGL_SIZE 256 +#define LPFC_MAX_SG_TABLESIZE 0xffff +#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ +#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */ +#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ +#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ +#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */ + +#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ +#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ +#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ +#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ +#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ +#define LPFC_MIN_TGT_QDEPTH 10 +#define LPFC_MAX_TGT_QDEPTH 0xFFFF + +/* + * Following time intervals are used of adjusting SCSI device + * queue depths when there are driver resource error or Firmware + * resource error. + */ +/* 1 Second */ +#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) + +/* Number of exchanges reserved for discovery to complete */ +#define LPFC_DISC_IOCB_BUFF_COUNT 20 + +#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ +#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ + +/* Error Attention event polling interval */ +#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ + +/* Define macros for 64 bit support */ +#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) +#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) +#define getPaddr(high, low) ((dma_addr_t)( \ + (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) +/* Provide maximum configuration definitions. */ +#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ +#define FC_MAX_ADPTMSG 64 + +#define MAX_HBAEVT 32 +#define MAX_HBAS_NO_RESET 16 + +/* Number of MSI-X vectors the driver uses */ +#define LPFC_MSIX_VECTORS 2 + +/* lpfc wait event data ready flag */ +#define LPFC_DATA_READY 0 /* bit 0 */ + +/* queue dump line buffer size */ +#define LPFC_LBUF_SZ 128 + +/* mailbox system shutdown options */ +#define LPFC_MBX_NO_WAIT 0 +#define LPFC_MBX_WAIT 1 + +#define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005 +#define LPFC_PORT_CFG_NAME "/cfg/port.cfg" + +#define lpfc_rangecheck(val, min, max) \ + ((uint)(val) >= (uint)(min) && (val) <= (max)) + +enum lpfc_polling_flags { + ENABLE_FCP_RING_POLLING = 0x1, + DISABLE_FCP_RING_INT = 0x2 +}; + +struct perf_prof { + uint16_t cmd_cpu[40]; + uint16_t rsp_cpu[40]; + uint16_t qh_cpu[40]; + uint16_t wqidx[40]; +}; + +/* + * Provide for FC4 TYPE x28 - NVME. The + * bit mask for FCP and NVME is 0x8 identically + * because they are 32 bit positions distance. + */ +#define LPFC_FC4_TYPE_BITMASK 0x00000100 + +/* Provide DMA memory definitions the driver uses per port instance. */ +struct lpfc_dmabuf { + struct list_head list; + void *virt; /* virtual address ptr */ + dma_addr_t phys; /* mapped address */ + uint32_t buffer_tag; /* used for tagged queue ring */ +}; + +struct lpfc_nvmet_ctxbuf { + struct list_head list; + struct lpfc_async_xchg_ctx *context; + struct lpfc_iocbq *iocbq; + struct lpfc_sglq *sglq; + struct work_struct defer_work; +}; + +struct lpfc_dma_pool { + struct lpfc_dmabuf *elements; + uint32_t max_count; + uint32_t current_count; +}; + +struct hbq_dmabuf { + struct lpfc_dmabuf hbuf; + struct lpfc_dmabuf dbuf; + uint16_t total_size; + uint16_t bytes_recv; + uint32_t tag; + struct lpfc_cq_event cq_event; + unsigned long time_stamp; + void *context; +}; + +struct rqb_dmabuf { + struct lpfc_dmabuf hbuf; + struct lpfc_dmabuf dbuf; + uint16_t total_size; + uint16_t bytes_recv; + uint16_t idx; + struct lpfc_queue *hrq; /* ptr to associated Header RQ */ + struct lpfc_queue *drq; /* ptr to associated Data RQ */ +}; + +/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ +#define MEM_PRI 0x100 + + +/****************************************************************************/ +/* Device VPD save area */ +/****************************************************************************/ +typedef struct lpfc_vpd { + uint32_t status; /* vpd status value */ + uint32_t length; /* number of bytes actually returned */ + struct { + uint32_t rsvd1; /* Revision numbers */ + uint32_t biuRev; + uint32_t smRev; + uint32_t smFwRev; + uint32_t endecRev; + uint16_t rBit; + uint8_t fcphHigh; + uint8_t fcphLow; + uint8_t feaLevelHigh; + uint8_t feaLevelLow; + uint32_t postKernRev; + uint32_t opFwRev; + uint8_t opFwName[16]; + uint32_t sli1FwRev; + uint8_t sli1FwName[16]; + uint32_t sli2FwRev; + uint8_t sli2FwName[16]; + } rev; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd3 :20; /* Reserved */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cmr : 1; /* Configure Max RPIs */ +#else /* __LITTLE_ENDIAN */ + uint32_t cmr : 1; /* Configure Max RPIs */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t rsvd3 :20; /* Reserved */ +#endif + } sli3Feat; +} lpfc_vpd_t; + + +/* + * lpfc stat counters + */ +struct lpfc_stats { + /* Statistics for ELS commands */ + uint32_t elsLogiCol; + uint32_t elsRetryExceeded; + uint32_t elsXmitRetry; + uint32_t elsDelayRetry; + uint32_t elsRcvDrop; + uint32_t elsRcvFrame; + uint32_t elsRcvRSCN; + uint32_t elsRcvRNID; + uint32_t elsRcvFARP; + uint32_t elsRcvFARPR; + uint32_t elsRcvFLOGI; + uint32_t elsRcvPLOGI; + uint32_t elsRcvADISC; + uint32_t elsRcvPDISC; + uint32_t elsRcvFAN; + uint32_t elsRcvLOGO; + uint32_t elsRcvPRLO; + uint32_t elsRcvPRLI; + uint32_t elsRcvLIRR; + uint32_t elsRcvRLS; + uint32_t elsRcvRPL; + uint32_t elsRcvRRQ; + uint32_t elsRcvRTV; + uint32_t elsRcvECHO; + uint32_t elsRcvLCB; + uint32_t elsRcvRDP; + uint32_t elsRcvRDF; + uint32_t elsXmitFLOGI; + uint32_t elsXmitFDISC; + uint32_t elsXmitPLOGI; + uint32_t elsXmitPRLI; + uint32_t elsXmitADISC; + uint32_t elsXmitLOGO; + uint32_t elsXmitSCR; + uint32_t elsXmitRSCN; + uint32_t elsXmitRNID; + uint32_t elsXmitFARP; + uint32_t elsXmitFARPR; + uint32_t elsXmitACC; + uint32_t elsXmitLSRJT; + + uint32_t frameRcvBcast; + uint32_t frameRcvMulti; + uint32_t strayXmitCmpl; + uint32_t frameXmitDelay; + uint32_t xriCmdCmpl; + uint32_t xriStatErr; + uint32_t LinkUp; + uint32_t LinkDown; + uint32_t LinkMultiEvent; + uint32_t NoRcvBuf; + uint32_t fcpCmd; + uint32_t fcpCmpl; + uint32_t fcpRspErr; + uint32_t fcpRemoteStop; + uint32_t fcpPortRjt; + uint32_t fcpPortBusy; + uint32_t fcpError; + uint32_t fcpLocalErr; +}; + +struct lpfc_hba; + + +#define LPFC_VMID_TIMER 300 /* timer interval in seconds */ + +#define LPFC_MAX_VMID_SIZE 256 + +union lpfc_vmid_io_tag { + u32 app_id; /* App Id vmid */ + u8 cs_ctl_vmid; /* Priority tag vmid */ +}; + +#define JIFFIES_PER_HR (HZ * 60 * 60) + +struct lpfc_vmid { + u8 flag; +#define LPFC_VMID_SLOT_FREE 0x0 +#define LPFC_VMID_SLOT_USED 0x1 +#define LPFC_VMID_REQ_REGISTER 0x2 +#define LPFC_VMID_REGISTERED 0x4 +#define LPFC_VMID_DE_REGISTER 0x8 + char host_vmid[LPFC_MAX_VMID_SIZE]; + union lpfc_vmid_io_tag un; + struct hlist_node hnode; + u64 io_rd_cnt; + u64 io_wr_cnt; + u8 vmid_len; + u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */ + u32 hash_index; + u64 __percpu *last_io_time; +}; + +#define lpfc_vmid_is_type_priority_tag(vport)\ + (vport->vmid_priority_tagging ? 1 : 0) + +#define LPFC_VMID_HASH_SIZE 256 +#define LPFC_VMID_HASH_MASK 255 +#define LPFC_VMID_HASH_SHIFT 6 + +struct lpfc_vmid_context { + struct lpfc_vmid *vmp; + struct lpfc_nodelist *nlp; + bool instantiated; +}; + +struct lpfc_vmid_priority_range { + u8 low; + u8 high; + u8 qos; +}; + +struct lpfc_vmid_priority_info { + u32 num_descriptors; + struct lpfc_vmid_priority_range *vmid_range; +}; + +#define QFPA_EVEN_ONLY 0x01 +#define QFPA_ODD_ONLY 0x02 +#define QFPA_EVEN_ODD 0x03 + +enum discovery_state { + LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */ + LPFC_VPORT_FAILED = 1, /* vport has failed */ + LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */ + LPFC_FLOGI = 7, /* FLOGI sent to Fabric */ + LPFC_FDISC = 8, /* FDISC sent for vport */ + LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id + * configured */ + LPFC_NS_REG = 10, /* Register with NameServer */ + LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */ + LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for + * device authentication / discovery */ + LPFC_DISC_AUTH = 13, /* Processing ADISC list */ + LPFC_VPORT_READY = 32, +}; + +enum hba_state { + LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */ + LPFC_WARM_START = 1, /* HBA state after selective reset */ + LPFC_INIT_START = 2, /* Initial state after board reset */ + LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */ + LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */ + LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */ + LPFC_CLEAR_LA = 6, /* authentication cmplt - issue + * CLEAR_LA */ + LPFC_HBA_READY = 32, + LPFC_HBA_ERROR = -1 +}; + +struct lpfc_trunk_link_state { + enum hba_state state; + uint8_t fault; +}; + +struct lpfc_trunk_link { + struct lpfc_trunk_link_state link0, + link1, + link2, + link3; + u32 phy_lnk_speed; +}; + +/* Format of congestion module parameters */ +struct lpfc_cgn_param { + uint32_t cgn_param_magic; + uint8_t cgn_param_version; /* version 1 */ + uint8_t cgn_param_mode; /* 0=off 1=managed 2=monitor only */ +#define LPFC_CFG_OFF 0 +#define LPFC_CFG_MANAGED 1 +#define LPFC_CFG_MONITOR 2 + uint8_t cgn_rsvd1; + uint8_t cgn_rsvd2; + uint8_t cgn_param_level0; + uint8_t cgn_param_level1; + uint8_t cgn_param_level2; + uint8_t byte11; + uint8_t byte12; + uint8_t byte13; + uint8_t byte14; + uint8_t byte15; +}; + +/* Max number of days of congestion data */ +#define LPFC_MAX_CGN_DAYS 10 + +struct lpfc_cgn_ts { + uint8_t month; + uint8_t day; + uint8_t year; + uint8_t hour; + uint8_t minute; + uint8_t second; +}; + +/* Format of congestion buffer info + * This structure defines memory thats allocated and registered with + * the HBA firmware. When adding or removing fields from this structure + * the alignment must match the HBA firmware. + */ + +struct lpfc_cgn_info { + /* Header */ + __le16 cgn_info_size; /* is sizeof(struct lpfc_cgn_info) */ + uint8_t cgn_info_version; /* represents format of structure */ +#define LPFC_CGN_INFO_V1 1 +#define LPFC_CGN_INFO_V2 2 +#define LPFC_CGN_INFO_V3 3 +#define LPFC_CGN_INFO_V4 4 + uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */ + uint8_t cgn_info_detect; + uint8_t cgn_info_action; + uint8_t cgn_info_level0; + uint8_t cgn_info_level1; + uint8_t cgn_info_level2; + + /* Start Time */ + struct lpfc_cgn_ts base_time; + + /* minute / hours / daily indices */ + uint8_t cgn_index_minute; + uint8_t cgn_index_hour; + uint8_t cgn_index_day; + + __le16 cgn_warn_freq; + __le16 cgn_alarm_freq; + __le16 cgn_lunq; + uint8_t cgn_pad1[8]; + + /* Driver Information */ + __le16 cgn_drvr_min[60]; + __le32 cgn_drvr_hr[24]; + __le32 cgn_drvr_day[LPFC_MAX_CGN_DAYS]; + + /* Congestion Warnings */ + __le16 cgn_warn_min[60]; + __le32 cgn_warn_hr[24]; + __le32 cgn_warn_day[LPFC_MAX_CGN_DAYS]; + + /* Latency Information */ + __le32 cgn_latency_min[60]; + __le32 cgn_latency_hr[24]; + __le32 cgn_latency_day[LPFC_MAX_CGN_DAYS]; + + /* Bandwidth Information */ + __le16 cgn_bw_min[60]; + __le16 cgn_bw_hr[24]; + __le16 cgn_bw_day[LPFC_MAX_CGN_DAYS]; + + /* Congestion Alarms */ + __le16 cgn_alarm_min[60]; + __le32 cgn_alarm_hr[24]; + __le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS]; + + struct_group(cgn_stat, + uint8_t cgn_stat_npm; /* Notifications per minute */ + + /* Start Time */ + struct lpfc_cgn_ts stat_start; /* Base time */ + uint8_t cgn_pad2; + + __le32 cgn_notification; + __le32 cgn_peer_notification; + __le32 link_integ_notification; + __le32 delivery_notification; + struct lpfc_cgn_ts stat_fpin; /* Last congestion notification FPIN */ + struct lpfc_cgn_ts stat_peer; /* Last peer congestion FPIN */ + struct lpfc_cgn_ts stat_lnk; /* Last link integrity FPIN */ + struct lpfc_cgn_ts stat_delivery; /* Last delivery notification FPIN */ + ); + + __le32 cgn_info_crc; +#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41 +#define LPFC_CGN_CRC32_SEED 0xFFFFFFFF +}; + +#define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \ + sizeof(uint32_t)) + +struct lpfc_cgn_stat { + atomic64_t total_bytes; + atomic64_t rcv_bytes; + atomic64_t rx_latency; +#define LPFC_CGN_NOT_SENT 0xFFFFFFFFFFFFFFFFLL + atomic_t rx_io_cnt; +}; + +struct lpfc_cgn_acqe_stat { + atomic64_t alarm; + atomic64_t warn; +}; + +struct lpfc_vport { + struct lpfc_hba *phba; + struct list_head listentry; + uint8_t port_type; +#define LPFC_PHYSICAL_PORT 1 +#define LPFC_NPIV_PORT 2 +#define LPFC_FABRIC_PORT 3 + enum discovery_state port_state; + + uint16_t vpi; + uint16_t vfi; + uint8_t vpi_state; +#define LPFC_VPI_REGISTERED 0x1 + + uint32_t fc_flag; /* FC flags */ +/* Several of these flags are HBA centric and should be moved to + * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP) + */ +#define FC_PT2PT 0x1 /* pt2pt with no fabric */ +#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */ +#define FC_DISC_TMO 0x4 /* Discovery timer running */ +#define FC_PUBLIC_LOOP 0x8 /* Public loop */ +#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */ +#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */ +#define FC_NLP_MORE 0x40 /* More node to process in node tbl */ +#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ +#define FC_FABRIC 0x100 /* We are fabric attached */ +#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ +#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ +#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ +#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */ +#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ +#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ +#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ +#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ +#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ +#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ +#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ +#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */ +#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */ +#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */ +#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */ + + uint32_t ct_flags; +#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ +#define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */ +#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */ +#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */ +#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */ +#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */ + + struct list_head fc_nodes; + + /* Keep counters for the number of entries in each list. */ + uint16_t fc_plogi_cnt; + uint16_t fc_adisc_cnt; + uint16_t fc_reglogin_cnt; + uint16_t fc_prli_cnt; + uint16_t fc_unmap_cnt; + uint16_t fc_map_cnt; + uint16_t fc_npr_cnt; + uint16_t fc_unused_cnt; + struct serv_parm fc_sparam; /* buffer for our service parameters */ + + uint32_t fc_myDID; /* fibre channel S_ID */ + uint32_t fc_prevDID; /* previous fibre channel S_ID */ + struct lpfc_name fabric_portname; + struct lpfc_name fabric_nodename; + + int32_t stopped; /* HBA has not been restarted since last ERATT */ + uint8_t fc_linkspeed; /* Link speed after last READ_LA */ + + uint32_t num_disc_nodes; /* in addition to hba_state */ + uint32_t gidft_inp; /* cnt of outstanding GID_FTs */ + + uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ + uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ + uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */ + struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; + struct lpfc_name fc_nodename; /* fc nodename */ + struct lpfc_name fc_portname; /* fc portname */ + + struct lpfc_work_evt disc_timeout_evt; + + struct timer_list fc_disctmo; /* Discovery rescue timer */ + uint8_t fc_ns_retry; /* retries for fabric nameserver */ + uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */ + + spinlock_t work_port_lock; + uint32_t work_port_events; /* Timeout to be handled */ +#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */ +#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */ +#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */ + +#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */ +#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */ +#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */ +#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */ +#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */ +#define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */ +#define WORKER_CHECK_INACTIVE_VMID 0x4000 /* hba: check inactive vmids */ +#define WORKER_CHECK_VMID_ISSUE_QFPA 0x8000 /* vport: Check if qfpa needs + * to be issued */ + + struct timer_list els_tmofunc; + struct timer_list delayed_disc_tmo; + + uint8_t load_flag; +#define FC_LOADING 0x1 /* HBA in process of loading drvr */ +#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ +#define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */ +#define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */ +#define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */ + /* Vport Config Parameters */ + uint32_t cfg_scan_down; + uint32_t cfg_lun_queue_depth; + uint32_t cfg_nodev_tmo; + uint32_t cfg_devloss_tmo; + uint32_t cfg_restrict_login; + uint32_t cfg_peer_port_login; + uint32_t cfg_fcp_class; + uint32_t cfg_use_adisc; + uint32_t cfg_discovery_threads; + uint32_t cfg_log_verbose; + uint32_t cfg_enable_fc4_type; + uint32_t cfg_max_luns; + uint32_t cfg_enable_da_id; + uint32_t cfg_max_scsicmpl_time; + uint32_t cfg_tgt_queue_depth; + uint32_t cfg_first_burst_size; + uint32_t dev_loss_tmo_changed; + /* VMID parameters */ + u8 lpfc_vmid_host_uuid[16]; + u32 max_vmid; /* maximum VMIDs allowed per port */ + u32 cur_vmid_cnt; /* Current VMID count */ +#define LPFC_MIN_VMID 4 +#define LPFC_MAX_VMID 255 + u32 vmid_inactivity_timeout; /* Time after which the VMID */ + /* deregisters from switch */ + u32 vmid_priority_tagging; +#define LPFC_VMID_PRIO_TAG_DISABLE 0 /* Disable */ +#define LPFC_VMID_PRIO_TAG_SUP_TARGETS 1 /* Allow supported targets only */ +#define LPFC_VMID_PRIO_TAG_ALL_TARGETS 2 /* Allow all targets */ + unsigned long *vmid_priority_range; +#define LPFC_VMID_MAX_PRIORITY_RANGE 256 +#define LPFC_VMID_PRIORITY_BITMAP_SIZE 32 + u8 vmid_flag; +#define LPFC_VMID_IN_USE 0x1 +#define LPFC_VMID_ISSUE_QFPA 0x2 +#define LPFC_VMID_QFPA_CMPL 0x4 +#define LPFC_VMID_QOS_ENABLED 0x8 +#define LPFC_VMID_TIMER_ENBLD 0x10 +#define LPFC_VMID_TYPE_PRIO 0x20 + struct fc_qfpa_res *qfpa_res; + + struct fc_vport *fc_vport; + + struct lpfc_vmid *vmid; + DECLARE_HASHTABLE(hash_table, 8); + rwlock_t vmid_lock; + struct lpfc_vmid_priority_info vmid_priority; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct dentry *debug_disc_trc; + struct dentry *debug_nodelist; + struct dentry *debug_nvmestat; + struct dentry *debug_scsistat; + struct dentry *debug_ioktime; + struct dentry *debug_hdwqstat; + struct dentry *vport_debugfs_root; + struct lpfc_debugfs_trc *disc_trc; + atomic_t disc_trc_cnt; +#endif + struct list_head rcv_buffer_list; + unsigned long rcv_buffer_time_stamp; + uint32_t vport_flag; +#define STATIC_VPORT 0x1 +#define FAWWPN_PARAM_CHG 0x2 + + uint16_t fdmi_num_disc; + uint32_t fdmi_hba_mask; + uint32_t fdmi_port_mask; + + /* There is a single nvme instance per vport. */ + struct nvme_fc_local_port *localport; + uint8_t nvmei_support; /* driver supports NVME Initiator */ + uint32_t last_fcp_wqidx; + uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */ +}; + +struct hbq_s { + uint16_t entry_count; /* Current number of HBQ slots */ + uint16_t buffer_count; /* Current number of buffers posted */ + uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */ + uint32_t hbqPutIdx; /* HBQ slot to use */ + uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */ + void *hbq_virt; /* Virtual ptr to this hbq */ + struct list_head hbq_buffer_list; /* buffers assigned to this HBQ */ + /* Callback for HBQ buffer allocation */ + struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *); + /* Callback for HBQ buffer free */ + void (*hbq_free_buffer) (struct lpfc_hba *, + struct hbq_dmabuf *); +}; + +/* this matches the position in the lpfc_hbq_defs array */ +#define LPFC_ELS_HBQ 0 +#define LPFC_MAX_HBQS 1 + +enum hba_temp_state { + HBA_NORMAL_TEMP, + HBA_OVER_TEMP +}; + +enum intr_type_t { + NONE = 0, + INTx, + MSI, + MSIX, +}; + +#define LPFC_CT_CTX_MAX 64 +struct unsol_rcv_ct_ctx { + uint32_t ctxt_id; + uint32_t SID; + uint32_t valid; +#define UNSOL_INVALID 0 +#define UNSOL_VALID 1 + uint16_t oxid; + uint16_t rxid; +}; + +#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/ +#define LPFC_USER_LINK_SPEED_1G 1 /* 1 Gigabaud */ +#define LPFC_USER_LINK_SPEED_2G 2 /* 2 Gigabaud */ +#define LPFC_USER_LINK_SPEED_4G 4 /* 4 Gigabaud */ +#define LPFC_USER_LINK_SPEED_8G 8 /* 8 Gigabaud */ +#define LPFC_USER_LINK_SPEED_10G 10 /* 10 Gigabaud */ +#define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */ +#define LPFC_USER_LINK_SPEED_32G 32 /* 32 Gigabaud */ +#define LPFC_USER_LINK_SPEED_64G 64 /* 64 Gigabaud */ +#define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_64G + +#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64" + +enum nemb_type { + nemb_mse = 1, + nemb_hbd +}; + +enum mbox_type { + mbox_rd = 1, + mbox_wr +}; + +enum dma_type { + dma_mbox = 1, + dma_ebuf +}; + +enum sta_type { + sta_pre_addr = 1, + sta_pos_addr +}; + +struct lpfc_mbox_ext_buf_ctx { + uint32_t state; +#define LPFC_BSG_MBOX_IDLE 0 +#define LPFC_BSG_MBOX_HOST 1 +#define LPFC_BSG_MBOX_PORT 2 +#define LPFC_BSG_MBOX_DONE 3 +#define LPFC_BSG_MBOX_ABTS 4 + enum nemb_type nembType; + enum mbox_type mboxType; + uint32_t numBuf; + uint32_t mbxTag; + uint32_t seqNum; + struct lpfc_dmabuf *mbx_dmabuf; + struct list_head ext_dmabuf_list; +}; + +struct lpfc_epd_pool { + /* Expedite pool */ + struct list_head list; + u32 count; + spinlock_t lock; /* lock for expedite pool */ +}; + +enum ras_state { + INACTIVE, + REG_INPROGRESS, + ACTIVE +}; + +struct lpfc_ras_fwlog { + uint8_t *fwlog_buff; + uint32_t fw_buffcount; /* Buffer size posted to FW */ +#define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */ +#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024) +#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024) +#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024) + uint32_t fw_loglevel; /* Log level set */ + struct lpfc_dmabuf lwpd; + struct list_head fwlog_buff_list; + + /* RAS support status on adapter */ + bool ras_hwsupport; /* RAS Support available on HW or not */ + bool ras_enabled; /* Ras Enabled for the function */ +#define LPFC_RAS_DISABLE_LOGGING 0x00 +#define LPFC_RAS_ENABLE_LOGGING 0x01 + enum ras_state state; /* RAS logging running state */ +}; + +#define DBG_LOG_STR_SZ 256 +#define DBG_LOG_SZ 256 + +struct dbg_log_ent { + char log[DBG_LOG_STR_SZ]; + u64 t_ns; +}; + +enum lpfc_irq_chann_mode { + /* Assign IRQs to all possible cpus that have hardware queues */ + NORMAL_MODE, + + /* Assign IRQs only to cpus on the same numa node as HBA */ + NUMA_MODE, + + /* Assign IRQs only on non-hyperthreaded CPUs. This is the + * same as normal_mode, but assign IRQS only on physical CPUs. + */ + NHT_MODE, +}; + +enum lpfc_hba_bit_flags { + FABRIC_COMANDS_BLOCKED, + HBA_PCI_ERR, + MBX_TMO_ERR, +}; + +struct lpfc_hba { + /* SCSI interface function jump table entries */ + struct lpfc_io_buf * (*lpfc_get_scsi_buf) + (struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd); + int (*lpfc_scsi_prep_dma_buf) + (struct lpfc_hba *, struct lpfc_io_buf *); + void (*lpfc_scsi_unprep_dma_buf) + (struct lpfc_hba *, struct lpfc_io_buf *); + void (*lpfc_release_scsi_buf) + (struct lpfc_hba *, struct lpfc_io_buf *); + void (*lpfc_rampdown_queue_depth) + (struct lpfc_hba *); + void (*lpfc_scsi_prep_cmnd) + (struct lpfc_vport *, struct lpfc_io_buf *, + struct lpfc_nodelist *); + int (*lpfc_scsi_prep_cmnd_buf) + (struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd, + uint8_t tmo); + int (*lpfc_scsi_prep_task_mgmt_cmd) + (struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd, + u64 lun, u8 task_mgmt_cmd); + + /* IOCB interface function jump table entries */ + int (*__lpfc_sli_issue_iocb) + (struct lpfc_hba *, uint32_t, + struct lpfc_iocbq *, uint32_t); + int (*__lpfc_sli_issue_fcp_io) + (struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag); + void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, + struct lpfc_iocbq *); + int (*lpfc_hba_down_post)(struct lpfc_hba *phba); + + /* MBOX interface function jump table entries */ + int (*lpfc_sli_issue_mbox) + (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); + + /* Slow-path IOCB process function jump table entries */ + void (*lpfc_sli_handle_slow_ring_event) + (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + uint32_t mask); + + /* INIT device interface function jump table entries */ + int (*lpfc_sli_hbq_to_firmware) + (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); + int (*lpfc_sli_brdrestart) + (struct lpfc_hba *); + int (*lpfc_sli_brdready) + (struct lpfc_hba *, uint32_t); + void (*lpfc_handle_eratt) + (struct lpfc_hba *); + void (*lpfc_stop_port) + (struct lpfc_hba *); + int (*lpfc_hba_init_link) + (struct lpfc_hba *, uint32_t); + int (*lpfc_hba_down_link) + (struct lpfc_hba *, uint32_t); + int (*lpfc_selective_reset) + (struct lpfc_hba *); + + int (*lpfc_bg_scsi_prep_dma_buf) + (struct lpfc_hba *, struct lpfc_io_buf *); + + /* Prep SLI WQE/IOCB jump table entries */ + void (*__lpfc_sli_prep_els_req_rsp)(struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, + struct lpfc_dmabuf *bmp, + u16 cmd_size, u32 did, u32 elscmd, + u8 tmo, u8 expect_rsp); + void (*__lpfc_sli_prep_gen_req)(struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, + u32 num_entry, u8 tmo); + void (*__lpfc_sli_prep_xmit_seq64)(struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, + u16 ox_id, u32 num_entry, u8 rctl, + u8 last_seq, u8 cr_cx_cmd); + void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq, + u16 ulp_context, u16 iotag, + u8 ulp_class, u16 cqid, bool ia, + bool wqec); + + /* expedite pool */ + struct lpfc_epd_pool epd_pool; + + /* SLI4 specific HBA data structure */ + struct lpfc_sli4_hba sli4_hba; + + struct workqueue_struct *wq; + struct delayed_work eq_delay_work; + +#define LPFC_IDLE_STAT_DELAY 1000 + struct delayed_work idle_stat_delay_work; + + struct lpfc_sli sli; + uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ + uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ + uint32_t sli3_options; /* Mask of enabled SLI3 options */ +#define LPFC_SLI3_HBQ_ENABLED 0x01 +#define LPFC_SLI3_NPIV_ENABLED 0x02 +#define LPFC_SLI3_VPORT_TEARDOWN 0x04 +#define LPFC_SLI3_CRP_ENABLED 0x08 +#define LPFC_SLI3_BG_ENABLED 0x20 +#define LPFC_SLI3_DSS_ENABLED 0x40 +#define LPFC_SLI4_PERFH_ENABLED 0x80 +#define LPFC_SLI4_PHWQ_ENABLED 0x100 + uint32_t iocb_cmd_size; + uint32_t iocb_rsp_size; + + struct lpfc_trunk_link trunk_link; + enum hba_state link_state; + uint32_t link_flag; /* link state flags */ +#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */ + /* This flag is set while issuing */ + /* INIT_LINK mailbox command */ +#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ +#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ +#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ +#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ +#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */ +#define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */ + + uint32_t hba_flag; /* hba generic flags */ +#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ +#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ +#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ +#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ +#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ +#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ +#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */ +#define ASYNC_EVENT 0x80 +#define LINK_DISABLED 0x100 /* Link disabled by user */ +#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ +#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ +#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ +#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ +#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ +#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ +#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ +#define HBA_FORCED_LINK_SPEED 0x40000 /* + * Firmware supports Forced Link Speed + * capability + */ +#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ +#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ +#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ +#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ +#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */ +#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */ +#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */ +#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */ + + struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */ + uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ + struct lpfc_dmabuf slim2p; + + MAILBOX_t *mbox; + uint32_t *mbox_ext; + struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx; + uint32_t ha_copy; + struct _PCB *pcb; + struct _IOCB *IOCBs; + + struct lpfc_dmabuf hbqslimp; + + uint16_t pci_cfg_value; + + uint8_t fc_linkspeed; /* Link speed after last READ_LA */ + + uint32_t fc_eventTag; /* event tag for link attention */ + uint32_t link_events; + + /* These fields used to be binfo */ + uint32_t fc_pref_DID; /* preferred D_ID */ + uint8_t fc_pref_ALPA; /* preferred AL_PA */ + uint32_t fc_edtovResol; /* E_D_TOV timer resolution */ + uint32_t fc_edtov; /* E_D_TOV timer value */ + uint32_t fc_arbtov; /* ARB_TOV timer value */ + uint32_t fc_ratov; /* R_A_TOV timer value */ + uint32_t fc_rttov; /* R_T_TOV timer value */ + uint32_t fc_altov; /* AL_TOV timer value */ + uint32_t fc_crtov; /* C_R_TOV timer value */ + + struct serv_parm fc_fabparam; /* fabric service parameters buffer */ + uint8_t alpa_map[128]; /* AL_PA map from READ_LA */ + + uint32_t lmt; + + uint32_t fc_topology; /* link topology, from LINK INIT */ + uint32_t fc_topology_changed; /* link topology, from LINK INIT */ + + struct lpfc_stats fc_stat; + + struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */ + uint32_t nport_event_cnt; /* timestamp for nlplist entry */ + + uint8_t wwnn[8]; + uint8_t wwpn[8]; + uint32_t RandomData[7]; + uint8_t fcp_embed_io; + uint8_t nvmet_support; /* driver supports NVMET */ +#define LPFC_NVMET_MAX_PORTS 32 + uint8_t mds_diags_support; + uint8_t bbcredit_support; + uint8_t enab_exp_wqcq_pages; + u8 nsler; /* Firmware supports FC-NVMe-2 SLER */ + + /* HBA Config Parameters */ + uint32_t cfg_ack0; + uint32_t cfg_xri_rebalancing; + uint32_t cfg_xpsgl; + uint32_t cfg_enable_npiv; + uint32_t cfg_enable_rrq; + uint32_t cfg_topology; + uint32_t cfg_link_speed; +#define LPFC_FCF_FOV 1 /* Fast fcf failover */ +#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ + uint32_t cfg_fcf_failover_policy; + uint32_t cfg_fcp_io_sched; + uint32_t cfg_ns_query; + uint32_t cfg_fcp2_no_tgt_reset; + uint32_t cfg_cr_delay; + uint32_t cfg_cr_count; + uint32_t cfg_multi_ring_support; + uint32_t cfg_multi_ring_rctl; + uint32_t cfg_multi_ring_type; + uint32_t cfg_poll; + uint32_t cfg_poll_tmo; + uint32_t cfg_task_mgmt_tmo; + uint32_t cfg_use_msi; + uint32_t cfg_auto_imax; + uint32_t cfg_fcp_imax; + uint32_t cfg_force_rscn; + uint32_t cfg_cq_poll_threshold; + uint32_t cfg_cq_max_proc_limit; + uint32_t cfg_fcp_cpu_map; + uint32_t cfg_fcp_mq_threshold; + uint32_t cfg_hdw_queue; + uint32_t cfg_irq_chann; + uint32_t cfg_suppress_rsp; + uint32_t cfg_nvme_oas; + uint32_t cfg_nvme_embed_cmd; + uint32_t cfg_nvmet_mrq_post; + uint32_t cfg_nvmet_mrq; + uint32_t cfg_enable_nvmet; + uint32_t cfg_nvme_enable_fb; + uint32_t cfg_nvmet_fb_size; + uint32_t cfg_total_seg_cnt; + uint32_t cfg_sg_seg_cnt; + uint32_t cfg_nvme_seg_cnt; + uint32_t cfg_scsi_seg_cnt; + uint32_t cfg_sg_dma_buf_size; + uint32_t cfg_hba_queue_depth; + uint32_t cfg_enable_hba_reset; + uint32_t cfg_enable_hba_heartbeat; + uint32_t cfg_fof; + uint32_t cfg_EnableXLane; + uint8_t cfg_oas_tgt_wwpn[8]; + uint8_t cfg_oas_vpt_wwpn[8]; + uint32_t cfg_oas_lun_state; +#define OAS_LUN_ENABLE 1 +#define OAS_LUN_DISABLE 0 + uint32_t cfg_oas_lun_status; +#define OAS_LUN_STATUS_EXISTS 0x01 + uint32_t cfg_oas_flags; +#define OAS_FIND_ANY_VPORT 0x01 +#define OAS_FIND_ANY_TARGET 0x02 +#define OAS_LUN_VALID 0x04 + uint32_t cfg_oas_priority; + uint32_t cfg_XLanePriority; + uint32_t cfg_enable_bg; + uint32_t cfg_prot_mask; + uint32_t cfg_prot_guard; + uint32_t cfg_hostmem_hgp; + uint32_t cfg_log_verbose; + uint32_t cfg_enable_fc4_type; +#define LPFC_ENABLE_FCP 1 +#define LPFC_ENABLE_NVME 2 +#define LPFC_ENABLE_BOTH 3 +#if (IS_ENABLED(CONFIG_NVME_FC)) +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#else +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#endif + uint32_t cfg_sriov_nr_virtfn; + uint32_t cfg_request_firmware_upgrade; + uint32_t cfg_suppress_link_up; + uint32_t cfg_rrq_xri_bitmap_sz; + u32 cfg_fcp_wait_abts_rsp; + uint32_t cfg_delay_discovery; + uint32_t cfg_sli_mode; +#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ +#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ +#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ + uint32_t cfg_fdmi_on; +#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ +#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ + uint32_t cfg_enable_SmartSAN; + uint32_t cfg_enable_mds_diags; + uint32_t cfg_ras_fwlog_level; + uint32_t cfg_ras_fwlog_buffsize; + uint32_t cfg_ras_fwlog_func; + uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ + uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ + uint32_t cfg_enable_pbde; + uint32_t cfg_enable_mi; + struct nvmet_fc_target_port *targetport; + lpfc_vpd_t vpd; /* vital product data */ + + u32 cfg_max_vmid; /* maximum VMIDs allowed per port */ + u32 cfg_vmid_app_header; +#define LPFC_VMID_APP_HEADER_DISABLE 0 +#define LPFC_VMID_APP_HEADER_ENABLE 1 + u32 cfg_vmid_priority_tagging; + u32 cfg_vmid_inactivity_timeout; /* Time after which the VMID */ + /* deregisters from switch */ + struct pci_dev *pcidev; + struct list_head work_list; + uint32_t work_ha; /* Host Attention Bits for WT */ + uint32_t work_ha_mask; /* HA Bits owned by WT */ + uint32_t work_hs; /* HS stored in case of ERRAT */ + uint32_t work_status[2]; /* Extra status from SLIM */ + + wait_queue_head_t work_waitq; + struct task_struct *worker_thread; + unsigned long data_flags; + uint32_t border_sge_num; + + uint32_t hbq_in_use; /* HBQs in use flag */ + uint32_t hbq_count; /* Count of configured HBQs */ + struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ + + atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */ + atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */ + + phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */ + phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */ + phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */ + void __iomem *slim_memmap_p; /* Kernel memory mapped address for + PCI BAR0 */ + void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for + PCI BAR2 */ + + void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for + PCI BAR0 with dual-ULP support */ + void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for + PCI BAR2 with dual-ULP support */ + void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for + PCI BAR4 with dual-ULP support */ +#define PCI_64BIT_BAR0 0 +#define PCI_64BIT_BAR2 2 +#define PCI_64BIT_BAR4 4 + void __iomem *MBslimaddr; /* virtual address for mbox cmds */ + void __iomem *HAregaddr; /* virtual address for host attn reg */ + void __iomem *CAregaddr; /* virtual address for chip attn reg */ + void __iomem *HSregaddr; /* virtual address for host status + reg */ + void __iomem *HCregaddr; /* virtual address for host ctl reg */ + + struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ + struct lpfc_pgp *port_gp; + uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ + uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ + + int brd_no; /* FC board number */ + char SerialNumber[32]; /* adapter Serial Number */ + char OptionROMVersion[32]; /* adapter BIOS / Fcode version */ + char BIOSVersion[16]; /* Boot BIOS version */ + char ModelDesc[256]; /* Model Description */ + char ModelName[80]; /* Model Name */ + char ProgramType[256]; /* Program Type */ + char Port[20]; /* Port No */ + uint8_t vpd_flag; /* VPD data flag */ + +#define VPD_MODEL_DESC 0x1 /* valid vpd model description */ +#define VPD_MODEL_NAME 0x2 /* valid vpd model name */ +#define VPD_PROGRAM_TYPE 0x4 /* valid vpd program type */ +#define VPD_PORT 0x8 /* valid vpd port data */ +#define VPD_MASK 0xf /* mask for any vpd data */ + + + struct timer_list fcp_poll_timer; + struct timer_list eratt_poll; + uint32_t eratt_poll_interval; + + uint64_t bg_guard_err_cnt; + uint64_t bg_apptag_err_cnt; + uint64_t bg_reftag_err_cnt; + + /* fastpath list. */ + spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ + spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ + struct list_head lpfc_scsi_buf_list_get; + struct list_head lpfc_scsi_buf_list_put; + uint32_t total_scsi_bufs; + struct list_head lpfc_iocb_list; + uint32_t total_iocbq_bufs; + struct list_head active_rrq_list; + spinlock_t hbalock; + struct work_struct unblock_request_work; /* SCSI layer unblock IOs */ + + /* dma_mem_pools */ + struct dma_pool *lpfc_sg_dma_buf_pool; + struct dma_pool *lpfc_mbuf_pool; + struct dma_pool *lpfc_hrb_pool; /* header receive buffer pool */ + struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */ + struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ + struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ + struct dma_pool *lpfc_cmd_rsp_buf_pool; + struct lpfc_dma_pool lpfc_mbuf_safety_pool; + + mempool_t *mbox_mem_pool; + mempool_t *nlp_mem_pool; + mempool_t *rrq_pool; + mempool_t *active_rrq_pool; + + struct fc_host_statistics link_stats; + enum lpfc_irq_chann_mode irq_chann_mode; + enum intr_type_t intr_type; + uint32_t intr_mode; +#define LPFC_INTR_ERROR 0xFFFFFFFF + struct list_head port_list; + spinlock_t port_list_lock; /* lock for port_list mutations */ + struct lpfc_vport *pport; /* physical lpfc_vport pointer */ + uint16_t max_vpi; /* Maximum virtual nports */ +#define LPFC_MAX_VPI 0xFF /* Max number VPI supported 0 - 0xff */ +#define LPFC_MAX_VPORTS 0x100 /* Max vports per port, with pport */ + uint16_t max_vports; /* + * For IOV HBAs max_vpi can change + * after a reset. max_vports is max + * number of vports present. This can + * be greater than max_vpi. + */ + uint16_t vpi_base; + uint16_t vfi_base; + unsigned long *vpi_bmask; /* vpi allocation table */ + uint16_t *vpi_ids; + uint16_t vpi_count; + struct list_head lpfc_vpi_blk_list; + + /* Data structure used by fabric iocb scheduler */ + struct list_head fabric_iocb_list; + atomic_t fabric_iocb_count; + struct timer_list fabric_block_timer; + unsigned long bit_flags; + atomic_t num_rsrc_err; + atomic_t num_cmd_success; + unsigned long last_rsrc_error_time; + unsigned long last_ramp_down_time; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct dentry *hba_debugfs_root; + atomic_t debugfs_vport_count; + struct dentry *debug_multixri_pools; + struct dentry *debug_hbqinfo; + struct dentry *debug_dumpHostSlim; + struct dentry *debug_dumpHBASlim; + struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ + struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */ + struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */ + struct dentry *debug_writeGuard; /* inject write guard_tag errors */ + struct dentry *debug_writeApp; /* inject write app_tag errors */ + struct dentry *debug_writeRef; /* inject write ref_tag errors */ + struct dentry *debug_readGuard; /* inject read guard_tag errors */ + struct dentry *debug_readApp; /* inject read app_tag errors */ + struct dentry *debug_readRef; /* inject read ref_tag errors */ + + struct dentry *debug_nvmeio_trc; + struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; + struct dentry *debug_hdwqinfo; +#ifdef LPFC_HDWQ_LOCK_STAT + struct dentry *debug_lockstat; +#endif + struct dentry *debug_cgn_buffer; + struct dentry *debug_rx_monitor; + struct dentry *debug_ras_log; + atomic_t nvmeio_trc_cnt; + uint32_t nvmeio_trc_size; + uint32_t nvmeio_trc_output_idx; + + /* T10 DIF error injection */ + uint32_t lpfc_injerr_wgrd_cnt; + uint32_t lpfc_injerr_wapp_cnt; + uint32_t lpfc_injerr_wref_cnt; + uint32_t lpfc_injerr_rgrd_cnt; + uint32_t lpfc_injerr_rapp_cnt; + uint32_t lpfc_injerr_rref_cnt; + uint32_t lpfc_injerr_nportid; + struct lpfc_name lpfc_injerr_wwpn; + sector_t lpfc_injerr_lba; +#define LPFC_INJERR_LBA_OFF (sector_t)(-1) + + struct dentry *debug_slow_ring_trc; + struct lpfc_debugfs_trc *slow_ring_trc; + atomic_t slow_ring_trc_cnt; + /* iDiag debugfs sub-directory */ + struct dentry *idiag_root; + struct dentry *idiag_pci_cfg; + struct dentry *idiag_bar_acc; + struct dentry *idiag_que_info; + struct dentry *idiag_que_acc; + struct dentry *idiag_drb_acc; + struct dentry *idiag_ctl_acc; + struct dentry *idiag_mbx_acc; + struct dentry *idiag_ext_acc; + uint8_t lpfc_idiag_last_eq; +#endif + uint16_t nvmeio_trc_on; + + /* Used for deferred freeing of ELS data buffers */ + struct list_head elsbuf; + int elsbuf_cnt; + int elsbuf_prev_cnt; + + uint8_t temp_sensor_support; + /* Fields used for heart beat. */ + unsigned long last_completion_time; + unsigned long skipped_hb; + struct timer_list hb_tmofunc; + struct timer_list rrq_tmr; + enum hba_temp_state over_temp_state; + /* + * Following bit will be set for all buffer tags which are not + * associated with any HBQ. + */ +#define QUE_BUFTAG_BIT (1<<31) + uint32_t buffer_tag_count; + +/* Maximum number of events that can be outstanding at any time*/ +#define LPFC_MAX_EVT_COUNT 512 + atomic_t fast_event_count; + uint32_t fcoe_eventtag; + uint32_t fcoe_eventtag_at_fcf_scan; + uint32_t fcoe_cvl_eventtag; + uint32_t fcoe_cvl_eventtag_attn; + struct lpfc_fcf fcf; + uint8_t fc_map[3]; + uint8_t valid_vlan; + uint16_t vlan_id; + struct list_head fcf_conn_rec_list; + + bool defer_flogi_acc_flag; + uint16_t defer_flogi_acc_rx_id; + uint16_t defer_flogi_acc_ox_id; + + spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ + struct list_head ct_ev_waiters; + struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX]; + uint32_t ctx_idx; + struct timer_list inactive_vmid_poll; + + /* RAS Support */ + struct lpfc_ras_fwlog ras_fwlog; + + uint32_t iocb_cnt; + uint32_t iocb_max; + atomic_t sdev_cnt; + spinlock_t devicelock; /* lock for luns list */ + mempool_t *device_data_mem_pool; + struct list_head luns; +#define LPFC_TRANSGRESSION_HIGH_TEMPERATURE 0x0080 +#define LPFC_TRANSGRESSION_LOW_TEMPERATURE 0x0040 +#define LPFC_TRANSGRESSION_HIGH_VOLTAGE 0x0020 +#define LPFC_TRANSGRESSION_LOW_VOLTAGE 0x0010 +#define LPFC_TRANSGRESSION_HIGH_TXBIAS 0x0008 +#define LPFC_TRANSGRESSION_LOW_TXBIAS 0x0004 +#define LPFC_TRANSGRESSION_HIGH_TXPOWER 0x0002 +#define LPFC_TRANSGRESSION_LOW_TXPOWER 0x0001 +#define LPFC_TRANSGRESSION_HIGH_RXPOWER 0x8000 +#define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000 + uint16_t sfp_alarm; + uint16_t sfp_warning; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint16_t hdwqstat_on; +#define LPFC_CHECK_OFF 0 +#define LPFC_CHECK_NVME_IO 1 +#define LPFC_CHECK_NVMET_IO 2 +#define LPFC_CHECK_SCSI_IO 4 + uint16_t ktime_on; + uint64_t ktime_data_samples; + uint64_t ktime_status_samples; + uint64_t ktime_last_cmd; + uint64_t ktime_seg1_total; + uint64_t ktime_seg1_min; + uint64_t ktime_seg1_max; + uint64_t ktime_seg2_total; + uint64_t ktime_seg2_min; + uint64_t ktime_seg2_max; + uint64_t ktime_seg3_total; + uint64_t ktime_seg3_min; + uint64_t ktime_seg3_max; + uint64_t ktime_seg4_total; + uint64_t ktime_seg4_min; + uint64_t ktime_seg4_max; + uint64_t ktime_seg5_total; + uint64_t ktime_seg5_min; + uint64_t ktime_seg5_max; + uint64_t ktime_seg6_total; + uint64_t ktime_seg6_min; + uint64_t ktime_seg6_max; + uint64_t ktime_seg7_total; + uint64_t ktime_seg7_min; + uint64_t ktime_seg7_max; + uint64_t ktime_seg8_total; + uint64_t ktime_seg8_min; + uint64_t ktime_seg8_max; + uint64_t ktime_seg9_total; + uint64_t ktime_seg9_min; + uint64_t ktime_seg9_max; + uint64_t ktime_seg10_total; + uint64_t ktime_seg10_min; + uint64_t ktime_seg10_max; +#endif + /* CMF objects */ + struct lpfc_cgn_stat __percpu *cmf_stat; + uint32_t cmf_interval_rate; /* timer interval limit in ms */ + uint32_t cmf_timer_cnt; +#define LPFC_CMF_INTERVAL 90 + uint64_t cmf_link_byte_count; + uint64_t cmf_max_line_rate; + uint64_t cmf_max_bytes_per_interval; + uint64_t cmf_last_sync_bw; +#define LPFC_CMF_BLK_SIZE 512 + struct hrtimer cmf_timer; + struct hrtimer cmf_stats_timer; /* 1 minute stats timer */ + atomic_t cmf_bw_wait; + atomic_t cmf_busy; + atomic_t cmf_stop_io; /* To block request and stop IO's */ + uint32_t cmf_active_mode; + uint32_t cmf_info_per_interval; +#define LPFC_MAX_CMF_INFO 32 + struct timespec64 cmf_latency; /* Interval congestion timestamp */ + uint32_t cmf_last_ts; /* Interval congestion time (ms) */ + uint32_t cmf_active_info; + + /* Signal / FPIN handling for Congestion Mgmt */ + u8 cgn_reg_fpin; /* Negotiated value from RDF */ + u8 cgn_init_reg_fpin; /* Initial value from READ_CONFIG */ +#define LPFC_CGN_FPIN_NONE 0x0 +#define LPFC_CGN_FPIN_WARN 0x1 +#define LPFC_CGN_FPIN_ALARM 0x2 +#define LPFC_CGN_FPIN_BOTH (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM) + + u8 cgn_reg_signal; /* Negotiated value from EDC */ + u8 cgn_init_reg_signal; /* Initial value from READ_CONFIG */ + /* cgn_reg_signal and cgn_init_reg_signal use + * enum fc_edc_cg_signal_cap_types + */ + u16 cgn_fpin_frequency; /* In units of msecs */ +#define LPFC_FPIN_INIT_FREQ 0xffff + u32 cgn_sig_freq; + u32 cgn_acqe_cnt; + + /* RX monitor handling for CMF */ + struct lpfc_rx_info_monitor *rx_monitor; + atomic_t rx_max_read_cnt; /* Maximum read bytes */ + uint64_t rx_block_cnt; + + /* Congestion parameters from flash */ + struct lpfc_cgn_param cgn_p; + + /* Statistics counter for ACQE cgn alarms and warnings */ + struct lpfc_cgn_acqe_stat cgn_acqe_stat; + + /* Congestion buffer information */ + struct lpfc_dmabuf *cgn_i; /* Congestion Info buffer */ + atomic_t cgn_fabric_warn_cnt; /* Total warning cgn events for info */ + atomic_t cgn_fabric_alarm_cnt; /* Total alarm cgn events for info */ + atomic_t cgn_sync_warn_cnt; /* Total warning events for SYNC wqe */ + atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */ + atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */ + atomic_t cgn_latency_evt_cnt; + atomic64_t cgn_latency_evt; /* Avg latency per minute */ + unsigned long cgn_evt_timestamp; +#define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */ + uint32_t cgn_evt_minute; +#define LPFC_SEC_MIN 60UL +#define LPFC_MIN_HOUR 60 +#define LPFC_HOUR_DAY 24 +#define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY) + + struct hlist_node cpuhp; /* used for cpuhp per hba callback */ + struct timer_list cpuhp_poll_timer; + struct list_head poll_list; /* slowpath eq polling list */ +#define LPFC_POLL_HB 1 /* slowpath heartbeat */ + + char os_host_name[MAXHOSTNAMELEN]; + + /* LD Signaling */ + u32 degrade_activate_threshold; + u32 degrade_deactivate_threshold; + u32 fec_degrade_interval; + + atomic_t dbg_log_idx; + atomic_t dbg_log_cnt; + atomic_t dbg_log_dmping; + struct dbg_log_ent dbg_log[DBG_LOG_SZ]; +}; + +#define LPFC_MAX_RXMONITOR_ENTRY 800 +#define LPFC_MAX_RXMONITOR_DUMP 32 +struct rx_info_entry { + uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */ + uint64_t total_bytes; /* Total no of read bytes requested */ + uint64_t rcv_bytes; /* Total no of read bytes completed */ + uint64_t avg_io_size; + uint64_t avg_io_latency;/* Average io latency in microseconds */ + uint64_t max_read_cnt; /* Maximum read bytes */ + uint64_t max_bytes_per_interval; + uint32_t cmf_busy; + uint32_t cmf_info; /* CMF_SYNC_WQE info */ + uint32_t io_cnt; + uint32_t timer_utilization; + uint32_t timer_interval; +}; + +struct lpfc_rx_info_monitor { + struct rx_info_entry *ring; /* info organized in a circular buffer */ + u32 head_idx, tail_idx; /* index to head/tail of ring */ + spinlock_t lock; /* spinlock for ring */ + u32 entries; /* storing number entries/size of ring */ +}; + +static inline struct Scsi_Host * +lpfc_shost_from_vport(struct lpfc_vport *vport) +{ + return container_of((void *) vport, struct Scsi_Host, hostdata[0]); +} + +static inline void +lpfc_set_loopback_flag(struct lpfc_hba *phba) +{ + if (phba->cfg_topology == FLAGS_LOCAL_LB) + phba->link_flag |= LS_LOOPBACK_MODE; + else + phba->link_flag &= ~LS_LOOPBACK_MODE; +} + +static inline int +lpfc_is_link_up(struct lpfc_hba *phba) +{ + return phba->link_state == LPFC_LINK_UP || + phba->link_state == LPFC_CLEAR_LA || + phba->link_state == LPFC_HBA_READY; +} + +static inline void +lpfc_worker_wake_up(struct lpfc_hba *phba) +{ + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + + /* Wake up worker thread */ + wake_up(&phba->work_waitq); + return; +} + +static inline int +lpfc_readl(void __iomem *addr, uint32_t *data) +{ + uint32_t temp; + temp = readl(addr); + if (temp == 0xffffffff) + return -EIO; + *data = temp; + return 0; +} + +static inline int +lpfc_sli_read_hs(struct lpfc_hba *phba) +{ + /* + * There was a link/board error. Read the status register to retrieve + * the error event and process it. + */ + phba->sli.slistat.err_attn_event++; + + /* Save status info and check for unplug error */ + if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || + lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || + lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { + return -EIO; + } + + /* Clear chip Host Attention error bit */ + writel(HA_ERATT, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + phba->pport->stopped = 1; + + return 0; +} + +static inline struct lpfc_sli_ring * +lpfc_phba_elsring(struct lpfc_hba *phba) +{ + /* Return NULL if sli_rev has become invalid due to bad fw */ + if (phba->sli_rev != LPFC_SLI_REV4 && + phba->sli_rev != LPFC_SLI_REV3 && + phba->sli_rev != LPFC_SLI_REV2) + return NULL; + + if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->sli4_hba.els_wq) + return phba->sli4_hba.els_wq->pring; + else + return NULL; + } + return &phba->sli.sli3_ring[LPFC_ELS_RING]; +} + +/** + * lpfc_next_online_cpu - Finds next online CPU on cpumask + * @mask: Pointer to phba's cpumask member. + * @start: starting cpu index + * + * Note: If no valid cpu found, then nr_cpu_ids is returned. + * + **/ +static inline unsigned int +lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) +{ + unsigned int cpu_it; + + for_each_cpu_wrap(cpu_it, mask, start) { + if (cpu_online(cpu_it)) + break; + } + + return cpu_it; +} +/** + * lpfc_next_present_cpu - Finds next present CPU after n + * @n: the cpu prior to search + * + * Note: If no next present cpu, then fallback to first present cpu. + * + **/ +static inline unsigned int lpfc_next_present_cpu(int n) +{ + unsigned int cpu; + + cpu = cpumask_next(n, cpu_present_mask); + + if (cpu >= nr_cpu_ids) + cpu = cpumask_first(cpu_present_mask); + + return cpu; +} + +/** + * lpfc_sli4_mod_hba_eq_delay - update EQ delay + * @phba: Pointer to HBA context object. + * @q: The Event Queue to update. + * @delay: The delay value (in us) to be written. + * + **/ +static inline void +lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq, + u32 delay) +{ + struct lpfc_register reg_data; + + reg_data.word0 = 0; + bf_set(lpfc_sliport_eqdelay_id, ®_data, eq->queue_id); + bf_set(lpfc_sliport_eqdelay_delay, ®_data, delay); + writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr); + eq->q_mode = delay; +} + + +/* + * Macro that declares tables and a routine to perform enum type to + * ascii string lookup. + * + * Defines a table for an enum. Uses xxx_INIT defines for + * the enum to populate the table. Macro defines a routine (named + * by caller) that will search all elements of the table for the key + * and return the name string if found or "Unrecognized" if not found. + */ +#define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \ +static struct { \ + enum enum_name value; \ + char *name; \ +} fc_##enum_name##_e2str_names[] = enum_init; \ +static const char *routine(enum enum_name table_key) \ +{ \ + int i; \ + char *name = "Unrecognized"; \ + \ + for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\ + if (fc_##enum_name##_e2str_names[i].value == table_key) {\ + name = fc_##enum_name##_e2str_names[i].name; \ + break; \ + } \ + } \ + return name; \ +} + +/** + * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types + * @phba: Pointer to HBA context object. + * + * Relationship between the enable, target support and if vmid tag is required + * for the particular combination + * --------------------------------------------------- + * Switch Enable Flag Target Support VMID Needed + * --------------------------------------------------- + * App Id 0 NA N + * App Id 1 0 N + * App Id 1 1 Y + * Pr Tag 0 NA N + * Pr Tag 1 0 N + * Pr Tag 1 1 Y + * Pr Tag 2 * Y + --------------------------------------------------- + * + **/ +static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba) +{ + return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging; +} + +static inline +u8 get_job_ulpstatus(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return bf_get(lpfc_wcqe_c_status, &iocbq->wcqe_cmpl); + else + return iocbq->iocb.ulpStatus; +} + +static inline +u32 get_job_word4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return iocbq->wcqe_cmpl.parameter; + else + return iocbq->iocb.un.ulpWord[4]; +} + +static inline +u8 get_job_cmnd(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return bf_get(wqe_cmnd, &iocbq->wqe.generic.wqe_com); + else + return iocbq->iocb.ulpCommand; +} + +static inline +u16 get_job_ulpcontext(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return bf_get(wqe_ctxt_tag, &iocbq->wqe.generic.wqe_com); + else + return iocbq->iocb.ulpContext; +} + +static inline +u16 get_job_rcvoxid(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return bf_get(wqe_rcvoxid, &iocbq->wqe.generic.wqe_com); + else + return iocbq->iocb.unsli3.rcvsli3.ox_id; +} + +static inline +u32 get_job_data_placed(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return iocbq->wcqe_cmpl.total_data_placed; + else + return iocbq->iocb.un.genreq64.bdl.bdeSize; +} + +static inline +u32 get_job_abtsiotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return iocbq->wqe.abort_cmd.wqe_com.abort_tag; + else + return iocbq->iocb.un.acxri.abortIoTag; +} + +static inline +u32 get_job_els_rsp64_did(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return bf_get(wqe_els_did, &iocbq->wqe.els_req.wqe_dest); + else + return iocbq->iocb.un.elsreq64.remoteID; +} diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c new file mode 100644 index 000000000..b1c9107d3 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -0,0 +1,7415 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_logmsg.h" +#include "lpfc_version.h" +#include "lpfc_compat.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_attr.h" + +#define LPFC_DEF_DEVLOSS_TMO 30 +#define LPFC_MIN_DEVLOSS_TMO 1 +#define LPFC_MAX_DEVLOSS_TMO 255 + +#define LPFC_MAX_INFO_TMP_LEN 100 +#define LPFC_INFO_MORE_STR "\nCould be more info...\n" +/* + * Write key size should be multiple of 4. If write key is changed + * make sure that library write key is also changed. + */ +#define LPFC_REG_WRITE_KEY_SIZE 4 +#define LPFC_REG_WRITE_KEY "EMLX" + +const char *const trunk_errmsg[] = { /* map errcode */ + "", /* There is no such error code at index 0*/ + "link negotiated speed does not match existing" + " trunk - link was \"low\" speed", + "link negotiated speed does not match" + " existing trunk - link was \"middle\" speed", + "link negotiated speed does not match existing" + " trunk - link was \"high\" speed", + "Attached to non-trunking port - F_Port", + "Attached to non-trunking port - N_Port", + "FLOGI response timeout", + "non-FLOGI frame received", + "Invalid FLOGI response", + "Trunking initialization protocol", + "Trunk peer device mismatch", +}; + +/** + * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules + * @incr: integer to convert. + * @hdw: ascii string holding converted integer plus a string terminator. + * + * Description: + * JEDEC Joint Electron Device Engineering Council. + * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii + * character string. The string is then terminated with a NULL in byte 9. + * Hex 0-9 becomes ascii '0' to '9'. + * Hex a-f becomes ascii '=' to 'B' capital B. + * + * Notes: + * Coded for 32 bit integers only. + **/ +static void +lpfc_jedec_to_ascii(int incr, char hdw[]) +{ + int i, j; + for (i = 0; i < 8; i++) { + j = (incr & 0xf); + if (j <= 9) + hdw[7 - i] = 0x30 + j; + else + hdw[7 - i] = 0x61 + j - 10; + incr = (incr >> 4); + } + hdw[8] = 0; + return; +} + +static ssize_t +lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_cgn_info *cp = NULL; + struct lpfc_cgn_stat *cgs; + int len = 0; + int cpu; + u64 rcv, total; + char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; + + if (phba->cgn_i) + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + scnprintf(tmp, sizeof(tmp), + "Congestion Mgmt Info: E2Eattr %d Ver %d " + "CMF %d cnt %d\n", + phba->sli4_hba.pc_sli4_params.mi_cap, + cp ? cp->cgn_info_version : 0, + phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); + + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (!phba->sli4_hba.pc_sli4_params.cmf) + goto buffer_done; + + switch (phba->cgn_init_reg_signal) { + case EDC_CG_SIG_WARN_ONLY: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:WARN "); + break; + case EDC_CG_SIG_WARN_ALARM: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:WARN|ALARM "); + break; + default: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:NONE "); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + switch (phba->cgn_init_reg_fpin) { + case LPFC_CGN_FPIN_WARN: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN\n"); + break; + case LPFC_CGN_FPIN_ALARM: + scnprintf(tmp, sizeof(tmp), + "FPIN:ALARM\n"); + break; + case LPFC_CGN_FPIN_BOTH: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN|ALARM\n"); + break; + default: + scnprintf(tmp, sizeof(tmp), + "FPIN:NONE\n"); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + switch (phba->cgn_reg_signal) { + case EDC_CG_SIG_WARN_ONLY: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:WARN "); + break; + case EDC_CG_SIG_WARN_ALARM: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:WARN|ALARM "); + break; + default: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:NONE "); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + switch (phba->cgn_reg_fpin) { + case LPFC_CGN_FPIN_WARN: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + case LPFC_CGN_FPIN_ALARM: + scnprintf(tmp, sizeof(tmp), + "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + case LPFC_CGN_FPIN_BOTH: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + default: + scnprintf(tmp, sizeof(tmp), + "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) { + switch (phba->cmf_active_mode) { + case LPFC_CFG_OFF: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n"); + break; + case LPFC_CFG_MANAGED: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n"); + break; + case LPFC_CFG_MONITOR: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n"); + break; + default: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n"); + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Off "); + break; + case LPFC_CFG_MANAGED: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed "); + break; + case LPFC_CFG_MONITOR: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor "); + break; + default: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown "); + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + total = 0; + rcv = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_read(&cgs->total_bytes); + rcv += atomic64_read(&cgs->rcv_bytes); + } + + scnprintf(tmp, sizeof(tmp), + "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n", + atomic_read(&phba->cmf_busy), + phba->cmf_active_info, rcv, total); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "Port_speed:%d Link_byte_cnt:%ld " + "Max_byte_per_interval:%ld\n", + lpfc_sli_port_speed_get(phba), + (unsigned long)phba->cmf_link_byte_count, + (unsigned long)phba->cmf_max_bytes_per_interval); + strlcat(buf, tmp, PAGE_SIZE); + +buffer_done: + len = strnlen(buf, PAGE_SIZE); + + if (unlikely(len >= (PAGE_SIZE - 1))) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6312 Catching potential buffer " + "overflow > PAGE_SIZE = %lu bytes\n", + PAGE_SIZE); + strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), + LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); + } + return len; +} + +/** + * lpfc_drvr_version_show - Return the Emulex driver string with version number + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); +} + +/** + * lpfc_enable_fip_show - Return the fip mode of the HBA + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (phba->hba_flag & HBA_FIP_SUPPORT) + return scnprintf(buf, PAGE_SIZE, "1\n"); + else + return scnprintf(buf, PAGE_SIZE, "0\n"); +} + +static ssize_t +lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct lpfc_nodelist *ndlp; + struct nvme_fc_remote_port *nrport; + struct lpfc_fc4_ctrl_stat *cstat; + uint64_t data1, data2, data3; + uint64_t totin, totout, tot; + char *statep; + int i; + int len = 0; + char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; + + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { + len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); + return len; + } + if (phba->nvmet_support) { + if (!phba->targetport) { + len = scnprintf(buf, PAGE_SIZE, + "NVME Target: x%llx is not allocated\n", + wwn_to_u64(vport->fc_portname.u.wwn)); + return len; + } + /* Port state is only one of two values for now. */ + if (phba->targetport->port_id) + statep = "REGISTERED"; + else + statep = "INIT"; + scnprintf(tmp, sizeof(tmp), + "NVME Target Enabled State %s\n", + statep); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", + "NVME Target: lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + phba->targetport->port_id); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE) + >= PAGE_SIZE) + goto buffer_done; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + scnprintf(tmp, sizeof(tmp), + "LS: Rcv %08x Drop %08x Abort %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_drop), + atomic_read(&tgtp->xmt_ls_abort)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (atomic_read(&tgtp->rcv_ls_req_in) != + atomic_read(&tgtp->rcv_ls_req_out)) { + scnprintf(tmp, sizeof(tmp), + "Rcv LS: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_out)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + + scnprintf(tmp, sizeof(tmp), + "LS: Xmt %08x Drop %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_ls_rsp), + atomic_read(&tgtp->xmt_ls_drop), + atomic_read(&tgtp->xmt_ls_rsp_cmpl)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "LS: RSP Abort %08x xb %08x Err %08x\n", + atomic_read(&tgtp->xmt_ls_rsp_aborted), + atomic_read(&tgtp->xmt_ls_rsp_xb_set), + atomic_read(&tgtp->xmt_ls_rsp_error)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP: Rcv %08x Defer %08x Release %08x " + "Drop %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_defer), + atomic_read(&tgtp->xmt_fcp_release), + atomic_read(&tgtp->rcv_fcp_cmd_drop)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (atomic_read(&tgtp->rcv_fcp_cmd_in) != + atomic_read(&tgtp->rcv_fcp_cmd_out)) { + scnprintf(tmp, sizeof(tmp), + "Rcv FCP: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + + scnprintf(tmp, sizeof(tmp), + "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " + "drop %08x\n", + atomic_read(&tgtp->xmt_fcp_read), + atomic_read(&tgtp->xmt_fcp_read_rsp), + atomic_read(&tgtp->xmt_fcp_write), + atomic_read(&tgtp->xmt_fcp_rsp), + atomic_read(&tgtp->xmt_fcp_drop)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), + atomic_read(&tgtp->xmt_fcp_rsp_error), + atomic_read(&tgtp->xmt_fcp_rsp_drop)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_aborted), + atomic_read(&tgtp->xmt_fcp_rsp_xb_set), + atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "ABORT: Xmt %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_abort_cmpl)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_abort_sol), + atomic_read(&tgtp->xmt_abort_unsol), + atomic_read(&tgtp->xmt_abort_rsp), + atomic_read(&tgtp->xmt_abort_rsp_error)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "DELAY: ctx %08x fod %08x wqfull %08x\n", + atomic_read(&tgtp->defer_ctx), + atomic_read(&tgtp->defer_fod), + atomic_read(&tgtp->defer_wqfull)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + /* Calculate outstanding IOs */ + tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); + tot += atomic_read(&tgtp->xmt_fcp_release); + tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; + + scnprintf(tmp, sizeof(tmp), + "IO_CTX: %08x WAIT: cur %08x tot %08x\n" + "CTX Outstanding %08llx\n\n", + phba->sli4_hba.nvmet_xri_cnt, + phba->sli4_hba.nvmet_io_wait_cnt, + phba->sli4_hba.nvmet_io_wait_total, + tot); + strlcat(buf, tmp, PAGE_SIZE); + goto buffer_done; + } + + localport = vport->localport; + if (!localport) { + len = scnprintf(buf, PAGE_SIZE, + "NVME Initiator x%llx is not allocated\n", + wwn_to_u64(vport->fc_portname.u.wwn)); + return len; + } + lport = (struct lpfc_nvme_lport *)localport->private; + if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "XRI Dist lpfc%d Total %d IO %d ELS %d\n", + phba->brd_no, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.io_xri_max, + lpfc_sli4_get_els_iocb_cnt(phba)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + /* Port state is only one of two values for now. */ + if (localport->port_id) + statep = "ONLINE"; + else + statep = "UNKNOWN "; + + scnprintf(tmp, sizeof(tmp), + "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", + "NVME LPORT lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + localport->port_id, statep); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + spin_lock_irq(shost->host_lock); + + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + nrport = NULL; + spin_lock(&ndlp->lock); + rport = lpfc_ndlp_get_nrport(ndlp); + if (rport) + nrport = rport->remoteport; + spin_unlock(&ndlp->lock); + if (!nrport) + continue; + + /* Port state is only one of two values for now. */ + switch (nrport->port_state) { + case FC_OBJSTATE_ONLINE: + statep = "ONLINE"; + break; + case FC_OBJSTATE_UNKNOWN: + statep = "UNKNOWN "; + break; + default: + statep = "UNSUPPORTED"; + break; + } + + /* Tab in to show lport ownership. */ + if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + if (phba->brd_no >= 10) { + if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + } + + scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", + nrport->port_name); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + + scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", + nrport->node_name); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + + scnprintf(tmp, sizeof(tmp), "DID x%06x ", + nrport->port_id); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + + /* An NVME rport can have multiple roles. */ + if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { + if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + } + if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { + if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + } + if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { + if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + } + if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | + FC_PORT_ROLE_NVME_TARGET | + FC_PORT_ROLE_NVME_DISCOVERY)) { + scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", + nrport->port_role); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + } + + scnprintf(tmp, sizeof(tmp), "%s\n", statep); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto unlock_buf_done; + } + spin_unlock_irq(shost->host_lock); + + if (!lport) + goto buffer_done; + + if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "LS: Xmt %010x Cmpl %010x Abort %08x\n", + atomic_read(&lport->fc4NvmeLsRequests), + atomic_read(&lport->fc4NvmeLsCmpls), + atomic_read(&lport->xmt_ls_abort)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", + atomic_read(&lport->xmt_ls_err), + atomic_read(&lport->cmpl_ls_xb), + atomic_read(&lport->cmpl_ls_err)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + totin = 0; + totout = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; + tot = cstat->io_cmpls; + totin += tot; + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; + totout += (data1 + data2 + data3); + } + scnprintf(tmp, sizeof(tmp), + "Total FCP Cmpl %016llx Issue %016llx " + "OutIO %016llx\n", + totin, totout, totout - totin); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "\tabort %08x noxri %08x nondlp %08x qdepth %08x " + "wqerr %08x err %08x\n", + atomic_read(&lport->xmt_fcp_abort), + atomic_read(&lport->xmt_fcp_noxri), + atomic_read(&lport->xmt_fcp_bad_ndlp), + atomic_read(&lport->xmt_fcp_qdepth), + atomic_read(&lport->xmt_fcp_wqerr), + atomic_read(&lport->xmt_fcp_err)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP CMPL: xb %08x Err %08x\n", + atomic_read(&lport->cmpl_fcp_xb), + atomic_read(&lport->cmpl_fcp_err)); + strlcat(buf, tmp, PAGE_SIZE); + + /* host_lock is already unlocked. */ + goto buffer_done; + + unlock_buf_done: + spin_unlock_irq(shost->host_lock); + + buffer_done: + len = strnlen(buf, PAGE_SIZE); + + if (unlikely(len >= (PAGE_SIZE - 1))) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6314 Catching potential buffer " + "overflow > PAGE_SIZE = %lu bytes\n", + PAGE_SIZE); + strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), + LPFC_INFO_MORE_STR, + sizeof(LPFC_INFO_MORE_STR) + 1); + } + + return len; +} + +static ssize_t +lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + int len; + struct lpfc_fc4_ctrl_stat *cstat; + u64 data1, data2, data3; + u64 tot, totin, totout; + int i; + char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; + + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || + (phba->sli_rev != LPFC_SLI_REV4)) + return 0; + + scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n"); + + totin = 0; + totout = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; + tot = cstat->io_cmpls; + totin += tot; + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; + totout += (data1 + data2 + data3); + + scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " + "IO %016llx ", i, data1, data2, data3); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", + tot, ((data1 + data2 + data3) - tot)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " + "OutIO %016llx\n", totin, totout, totout - totin); + strlcat(buf, tmp, PAGE_SIZE); + +buffer_done: + len = strnlen(buf, PAGE_SIZE); + + return len; +} + +static ssize_t +lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (phba->cfg_enable_bg) { + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) + return scnprintf(buf, PAGE_SIZE, + "BlockGuard Enabled\n"); + else + return scnprintf(buf, PAGE_SIZE, + "BlockGuard Not Supported\n"); + } else + return scnprintf(buf, PAGE_SIZE, + "BlockGuard Disabled\n"); +} + +static ssize_t +lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)phba->bg_guard_err_cnt); +} + +static ssize_t +lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)phba->bg_apptag_err_cnt); +} + +static ssize_t +lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)phba->bg_reftag_err_cnt); +} + +/** + * lpfc_info_show - Return some pci info about the host in ascii + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the formatted text from lpfc_info(). + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host)); +} + +/** + * lpfc_serialnum_show - Return the hba serial number in ascii + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the formatted text serial number. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber); +} + +/** + * lpfc_temp_sensor_show - Return the temperature sensor level + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the formatted support level. + * + * Description: + * Returns a number indicating the temperature sensor level currently + * supported, zero or one in ascii. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support); +} + +/** + * lpfc_modeldesc_show - Return the model description of the hba + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the scsi vpd model description. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc); +} + +/** + * lpfc_modelname_show - Return the model name of the hba + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the scsi vpd model name. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_modelname_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName); +} + +/** + * lpfc_programtype_show - Return the program type of the hba + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the scsi vpd program type. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_programtype_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType); +} + +/** + * lpfc_vportnum_show - Return the port number in ascii of the hba + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains scsi vpd program type. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port); +} + +/** + * lpfc_fwrev_show - Return the firmware rev running in the hba + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the scsi vpd program type. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t if_type; + uint8_t sli_family; + char fwrev[FW_REV_STR_SIZE]; + int len; + + lpfc_decode_firmware_rev(phba, fwrev, 1); + if_type = phba->sli4_hba.pc_sli4_params.if_type; + sli_family = phba->sli4_hba.pc_sli4_params.sli_family; + + if (phba->sli_rev < LPFC_SLI_REV4) + len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n", + fwrev, phba->sli_rev); + else + len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", + fwrev, phba->sli_rev, if_type, sli_family); + + return len; +} + +/** + * lpfc_hdw_show - Return the jedec information about the hba + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the scsi vpd program type. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + char hdw[9]; + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + lpfc_vpd_t *vp = &phba->vpd; + + lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); + return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw, + vp->rev.smRev, vp->rev.smFwRev); +} + +/** + * lpfc_option_rom_version_show - Return the adapter ROM FCode version + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the ROM and FCode ascii strings. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + char fwrev[FW_REV_STR_SIZE]; + + if (phba->sli_rev < LPFC_SLI_REV4) + return scnprintf(buf, PAGE_SIZE, "%s\n", + phba->OptionROMVersion); + + lpfc_decode_firmware_rev(phba, fwrev, 1); + return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev); +} + +/** + * lpfc_link_state_show - Return the link state of the port + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Notes: + * The switch statement has no default so zero will be returned. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_link_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int len = 0; + + switch (phba->link_state) { + case LPFC_LINK_UNKNOWN: + case LPFC_WARM_START: + case LPFC_INIT_START: + case LPFC_INIT_MBX_CMDS: + case LPFC_LINK_DOWN: + case LPFC_HBA_ERROR: + if (phba->hba_flag & LINK_DISABLED) + len += scnprintf(buf + len, PAGE_SIZE-len, + "Link Down - User disabled\n"); + else + len += scnprintf(buf + len, PAGE_SIZE-len, + "Link Down\n"); + break; + case LPFC_LINK_UP: + case LPFC_CLEAR_LA: + case LPFC_HBA_READY: + len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - "); + + switch (vport->port_state) { + case LPFC_LOCAL_CFG_LINK: + len += scnprintf(buf + len, PAGE_SIZE-len, + "Configuring Link\n"); + break; + case LPFC_FDISC: + case LPFC_FLOGI: + case LPFC_FABRIC_CFG_LINK: + case LPFC_NS_REG: + case LPFC_NS_QRY: + case LPFC_BUILD_DISC_LIST: + case LPFC_DISC_AUTH: + len += scnprintf(buf + len, PAGE_SIZE - len, + "Discovery\n"); + break; + case LPFC_VPORT_READY: + len += scnprintf(buf + len, PAGE_SIZE - len, + "Ready\n"); + break; + + case LPFC_VPORT_FAILED: + len += scnprintf(buf + len, PAGE_SIZE - len, + "Failed\n"); + break; + + case LPFC_VPORT_UNKNOWN: + len += scnprintf(buf + len, PAGE_SIZE - len, + "Unknown\n"); + break; + } + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + if (vport->fc_flag & FC_PUBLIC_LOOP) + len += scnprintf(buf + len, PAGE_SIZE-len, + " Public Loop\n"); + else + len += scnprintf(buf + len, PAGE_SIZE-len, + " Private Loop\n"); + } else { + if (vport->fc_flag & FC_FABRIC) { + if (phba->sli_rev == LPFC_SLI_REV4 && + vport->port_type == LPFC_PHYSICAL_PORT && + phba->sli4_hba.fawwpn_flag & + LPFC_FAWWPN_FABRIC) + len += scnprintf(buf + len, + PAGE_SIZE - len, + " Fabric FA-PWWN\n"); + else + len += scnprintf(buf + len, + PAGE_SIZE - len, + " Fabric\n"); + } else { + len += scnprintf(buf + len, PAGE_SIZE-len, + " Point-2-Point\n"); + } + } + } + + if ((phba->sli_rev == LPFC_SLI_REV4) && + ((bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6))) { + struct lpfc_trunk_link link = phba->trunk_link; + + if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) + len += scnprintf(buf + len, PAGE_SIZE - len, + "Trunk port 0: Link %s %s\n", + (link.link0.state == LPFC_LINK_UP) ? + "Up" : "Down. ", + trunk_errmsg[link.link0.fault]); + + if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) + len += scnprintf(buf + len, PAGE_SIZE - len, + "Trunk port 1: Link %s %s\n", + (link.link1.state == LPFC_LINK_UP) ? + "Up" : "Down. ", + trunk_errmsg[link.link1.fault]); + + if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) + len += scnprintf(buf + len, PAGE_SIZE - len, + "Trunk port 2: Link %s %s\n", + (link.link2.state == LPFC_LINK_UP) ? + "Up" : "Down. ", + trunk_errmsg[link.link2.fault]); + + if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) + len += scnprintf(buf + len, PAGE_SIZE - len, + "Trunk port 3: Link %s %s\n", + (link.link3.state == LPFC_LINK_UP) ? + "Up" : "Down. ", + trunk_errmsg[link.link3.fault]); + + } + + return len; +} + +/** + * lpfc_sli4_protocol_show - Return the fip mode of the HBA + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (phba->sli_rev < LPFC_SLI_REV4) + return scnprintf(buf, PAGE_SIZE, "fc\n"); + + if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) { + if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE) + return scnprintf(buf, PAGE_SIZE, "fcoe\n"); + if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) + return scnprintf(buf, PAGE_SIZE, "fc\n"); + } + return scnprintf(buf, PAGE_SIZE, "unknown\n"); +} + +/** + * lpfc_oas_supported_show - Return whether or not Optimized Access Storage + * (OAS) is supported. + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + phba->sli4_hba.pc_sli4_params.oas_supported); +} + +/** + * lpfc_link_state_store - Transition the link_state on an HBA port + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL if the buffer is not "up" or "down" + * return from link state change function if non-zero + * length of the buf on success + **/ +static ssize_t +lpfc_link_state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + int status = -EINVAL; + + if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && + (phba->link_state == LPFC_LINK_DOWN)) + status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && + (phba->link_state >= LPFC_LINK_UP)) + status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); + + if (status == 0) + return strlen(buf); + else + return status; +} + +/** + * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the sum of fc mapped and unmapped. + * + * Description: + * Returns the ascii text number of the sum of the fc mapped and unmapped + * vport counts. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_num_discovered_ports_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + vport->fc_map_cnt + vport->fc_unmap_cnt); +} + +/** + * lpfc_issue_lip - Misnomer, name carried over from long ago + * @shost: Scsi_Host pointer. + * + * Description: + * Bring the link down gracefully then re-init the link. The firmware will + * re-init the fiber channel interface as required. Does not issue a LIP. + * + * Returns: + * -EPERM port offline or management commands are being blocked + * -ENOMEM cannot allocate memory for the mailbox command + * -EIO error sending the mailbox command + * zero for success + **/ +static int +lpfc_issue_lip(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *pmboxq; + int mbxstatus = MBXERR_ERROR; + + /* + * If the link is offline, disabled or BLOCK_MGMT_IO + * it doesn't make any sense to allow issue_lip + */ + if ((vport->fc_flag & FC_OFFLINE_MODE) || + (phba->hba_flag & LINK_DISABLED) || + (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) + return -EPERM; + + pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); + + if (!pmboxq) + return -ENOMEM; + + memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; + pmboxq->u.mb.mbxOwner = OWN_HOST; + + if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) + vport->fc_flag &= ~FC_PT2PT_NO_NVME; + + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); + + if ((mbxstatus == MBX_SUCCESS) && + (pmboxq->u.mb.mbxStatus == 0 || + pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { + memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + lpfc_init_link(phba, pmboxq, phba->cfg_topology, + phba->cfg_link_speed); + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, + phba->fc_ratov * 2); + if ((mbxstatus == MBX_SUCCESS) && + (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "2859 SLI authentication is required " + "for INIT_LINK but has not done yet\n"); + } + + lpfc_set_loopback_flag(phba); + if (mbxstatus != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + + if (mbxstatus == MBXERR_ERROR) + return -EIO; + + return 0; +} + +int +lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) +{ + int cnt = 0; + + spin_lock_irq(lock); + while (!list_empty(q)) { + spin_unlock_irq(lock); + msleep(20); + if (cnt++ > 250) { /* 5 secs */ + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0466 Outstanding IO when " + "bringing Adapter offline\n"); + return 0; + } + spin_lock_irq(lock); + } + spin_unlock_irq(lock); + return 1; +} + +/** + * lpfc_do_offline - Issues a mailbox command to bring the link down + * @phba: lpfc_hba pointer. + * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. + * + * Notes: + * Assumes any error from lpfc_do_offline() will be negative. + * Can wait up to 5 seconds for the port ring buffers count + * to reach zero, prints a warning if it is not zero and continues. + * lpfc_workq_post_event() returns a non-zero return code if call fails. + * + * Returns: + * -EIO error posting the event + * zero for success + **/ +static int +lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) +{ + struct completion online_compl; + struct lpfc_queue *qp = NULL; + struct lpfc_sli_ring *pring; + struct lpfc_sli *psli; + int status = 0; + int i; + int rc; + + init_completion(&online_compl); + rc = lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_OFFLINE_PREP); + if (rc == 0) + return -ENOMEM; + + wait_for_completion(&online_compl); + + if (status != 0) + return -EIO; + + psli = &phba->sli; + + /* + * If freeing the queues have already started, don't access them. + * Otherwise set FREE_WAIT to indicate that queues are being used + * to hold the freeing process until we finish. + */ + spin_lock_irq(&phba->hbalock); + if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) { + psli->sli_flag |= LPFC_QUEUE_FREE_WAIT; + } else { + spin_unlock_irq(&phba->hbalock); + goto skip_wait; + } + spin_unlock_irq(&phba->hbalock); + + /* Wait a little for things to settle down, but not + * long enough for dev loss timeout to expire. + */ + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &phba->hbalock)) + goto out; + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &pring->ring_lock)) + goto out; + } + } +out: + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT; + spin_unlock_irq(&phba->hbalock); + +skip_wait: + init_completion(&online_compl); + rc = lpfc_workq_post_event(phba, &status, &online_compl, type); + if (rc == 0) + return -ENOMEM; + + wait_for_completion(&online_compl); + + if (status != 0) + return -EIO; + + return 0; +} + +/** + * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA + * @phba: lpfc_hba pointer. + * + * Description: + * Issues a PCI secondary bus reset for the phba->pcidev. + * + * Notes: + * First walks the bus_list to ensure only PCI devices with Emulex + * vendor id, device ids that support hot reset, only one occurrence + * of function 0, and all ports on the bus are in offline mode to ensure the + * hot reset only affects one valid HBA. + * + * Returns: + * -ENOTSUPP, cfg_enable_hba_reset must be of value 2 + * -ENODEV, NULL ptr to pcidev + * -EBADSLT, detected invalid device + * -EBUSY, port is not in offline state + * 0, successful + */ +static int +lpfc_reset_pci_bus(struct lpfc_hba *phba) +{ + struct pci_dev *pdev = phba->pcidev; + struct Scsi_Host *shost = NULL; + struct lpfc_hba *phba_other = NULL; + struct pci_dev *ptr = NULL; + int res; + + if (phba->cfg_enable_hba_reset != 2) + return -ENOTSUPP; + + if (!pdev) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n"); + return -ENODEV; + } + + res = lpfc_check_pci_resettable(phba); + if (res) + return res; + + /* Walk the list of devices on the pci_dev's bus */ + list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { + /* Check port is offline */ + shost = pci_get_drvdata(ptr); + if (shost) { + phba_other = + ((struct lpfc_vport *)shost->hostdata)->phba; + if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) { + lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT, + "8349 WWPN = 0x%02x%02x%02x%02x" + "%02x%02x%02x%02x is not " + "offline!\n", + phba_other->wwpn[0], + phba_other->wwpn[1], + phba_other->wwpn[2], + phba_other->wwpn[3], + phba_other->wwpn[4], + phba_other->wwpn[5], + phba_other->wwpn[6], + phba_other->wwpn[7]); + return -EBUSY; + } + } + } + + /* Issue PCI bus reset */ + res = pci_reset_bus(pdev); + if (res) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "8350 PCI reset bus failed: %d\n", res); + } + + return res; +} + +/** + * lpfc_selective_reset - Offline then onlines the port + * @phba: lpfc_hba pointer. + * + * Description: + * If the port is configured to allow a reset then the hba is brought + * offline then online. + * + * Notes: + * Assumes any error from lpfc_do_offline() will be negative. + * Do not make this function static. + * + * Returns: + * lpfc_do_offline() return code if not zero + * -EIO reset not configured or error posting the event + * zero for success + **/ +int +lpfc_selective_reset(struct lpfc_hba *phba) +{ + struct completion online_compl; + int status = 0; + int rc; + + if (!phba->cfg_enable_hba_reset) + return -EACCES; + + if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + + if (status != 0) + return status; + } + + init_completion(&online_compl); + rc = lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + + wait_for_completion(&online_compl); + + if (status != 0) + return -EIO; + + return 0; +} + +/** + * lpfc_issue_reset - Selectively resets an adapter + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the string "selective". + * @count: unused variable. + * + * Description: + * If the buf contains the string "selective" then lpfc_selective_reset() + * is called to perform the reset. + * + * Notes: + * Assumes any error from lpfc_selective_reset() will be negative. + * If lpfc_selective_reset() returns zero then the length of the buffer + * is returned which indicates success + * + * Returns: + * -EINVAL if the buffer does not contain the string "selective" + * length of buf if lpfc-selective_reset() if the call succeeds + * return value of lpfc_selective_reset() if the call fails +**/ +static ssize_t +lpfc_issue_reset(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int status = -EINVAL; + + if (!phba->cfg_enable_hba_reset) + return -EACCES; + + if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) + status = phba->lpfc_selective_reset(phba); + + if (status == 0) + return strlen(buf); + else + return status; +} + +/** + * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness + * @phba: lpfc_hba pointer. + * + * Description: + * SLI4 interface type-2 device to wait on the sliport status register for + * the readyness after performing a firmware reset. + * + * Returns: + * zero for success, -EPERM when port does not have privilege to perform the + * reset, -EIO when port timeout from recovering from the reset. + * + * Note: + * As the caller will interpret the return code by value, be careful in making + * change or addition to return codes. + **/ +int +lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) +{ + struct lpfc_register portstat_reg = {0}; + int i; + + msleep(100); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0)) + return -EIO; + + /* verify if privileged for the request operation */ + if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && + !bf_get(lpfc_sliport_status_err, &portstat_reg)) + return -EPERM; + + /* There is no point to wait if the port is in an unrecoverable + * state. + */ + if (lpfc_sli4_unrecoverable_port(&portstat_reg)) + return -EIO; + + /* wait for the SLI port firmware ready after firmware reset */ + for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { + msleep(10); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0)) + continue; + if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) + continue; + if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) + continue; + if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) + continue; + break; + } + + if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) + return 0; + else + return -EIO; +} + +/** + * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc + * @phba: lpfc_hba pointer. + * @opcode: The sli4 config command opcode. + * + * Description: + * Request SLI4 interface type-2 device to perform a physical register set + * access. + * + * Returns: + * zero for success + **/ +static ssize_t +lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) +{ + struct completion online_compl; + struct pci_dev *pdev = phba->pcidev; + uint32_t before_fc_flag; + uint32_t sriov_nr_virtfn; + uint32_t reg_val; + int status = 0, rc = 0; + int job_posted = 1, sriov_err; + + if (!phba->cfg_enable_hba_reset) + return -EACCES; + + if ((phba->sli_rev < LPFC_SLI_REV4) || + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2)) + return -EPERM; + + /* Keep state if we need to restore back */ + before_fc_flag = phba->pport->fc_flag; + sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; + + if (opcode == LPFC_FW_DUMP) { + init_completion(&online_compl); + phba->fw_dump_cmpl = &online_compl; + } else { + /* Disable SR-IOV virtual functions if enabled */ + if (phba->cfg_sriov_nr_virtfn) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } + + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + + if (status != 0) + return status; + + /* wait for the device to be quiesced before firmware reset */ + msleep(100); + } + + reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PDEV_CTL_OFFSET); + + if (opcode == LPFC_FW_DUMP) + reg_val |= LPFC_FW_DUMP_REQUEST; + else if (opcode == LPFC_FW_RESET) + reg_val |= LPFC_CTL_PDEV_CTL_FRST; + else if (opcode == LPFC_DV_RESET) + reg_val |= LPFC_CTL_PDEV_CTL_DRST; + + writel(reg_val, phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PDEV_CTL_OFFSET); + /* flush */ + readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); + + /* delay driver action following IF_TYPE_2 reset */ + rc = lpfc_sli4_pdev_status_reg_wait(phba); + + if (rc == -EPERM) { + /* no privilege for reset */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3150 No privilege to perform the requested " + "access: x%x\n", reg_val); + } else if (rc == -EIO) { + /* reset failed, there is nothing more we can do */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3153 Fail to perform the requested " + "access: x%x\n", reg_val); + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; + return rc; + } + + /* keep the original port state */ + if (before_fc_flag & FC_OFFLINE_MODE) { + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; + goto out; + } + + /* Firmware dump will trigger an HA_ERATT event, and + * lpfc_handle_eratt_s4 routine already handles bringing the port back + * online. + */ + if (opcode == LPFC_FW_DUMP) { + wait_for_completion(phba->fw_dump_cmpl); + } else { + init_completion(&online_compl); + job_posted = lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + if (!job_posted) + goto out; + + wait_for_completion(&online_compl); + } +out: + /* in any case, restore the virtual functions enabled as before */ + if (sriov_nr_virtfn) { + /* If fw_dump was performed, first disable to clean up */ + if (opcode == LPFC_FW_DUMP) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } + + sriov_err = + lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); + if (!sriov_err) + phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn; + } + + /* return proper error code */ + if (!rc) { + if (!job_posted) + rc = -ENOMEM; + else if (status) + rc = -EIO; + } + return rc; +} + +/** + * lpfc_nport_evt_cnt_show - Return the number of nport events + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the ascii number of nport events. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); +} + +static int +lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) +{ + LPFC_MBOXQ_t *mbox = NULL; + unsigned long val = 0; + char *pval = NULL; + int rc = 0; + + if (!strncmp("enable", buff_out, + strlen("enable"))) { + pval = buff_out + strlen("enable") + 1; + rc = kstrtoul(pval, 0, &val); + if (rc) + return rc; /* Invalid number */ + } else if (!strncmp("disable", buff_out, + strlen("disable"))) { + val = 0; + } else { + return -EINVAL; /* Invalid command */ + } + + switch (val) { + case 0: + val = 0x0; /* Disable */ + break; + case 2: + val = 0x1; /* Enable two port trunk */ + break; + case 4: + val = 0x2; /* Enable four port trunk */ + break; + default: + return -EINVAL; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "0070 Set trunk mode with val %ld ", val); + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE, + 12, LPFC_SLI4_MBX_EMBED); + + bf_set(lpfc_mbx_set_trunk_mode, + &mbox->u.mqe.un.set_trunk_mode, + val); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "0071 Set trunk mode failed with status: %d", + rc); + mempool_free(mbox, phba->mbox_mem_pool); + + return 0; +} + +static ssize_t +lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int rc; + int len = 0; + struct lpfc_rdp_context *rdp_context; + u16 temperature; + u16 rx_power; + u16 tx_bias; + u16 tx_power; + u16 vcc; + char chbuf[128]; + u16 wavelength = 0; + struct sff_trasnceiver_codes_byte7 *trasn_code_byte7; + + /* Get transceiver information */ + rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); + + rc = lpfc_get_sfp_info_wait(phba, rdp_context); + if (rc) { + len = scnprintf(buf, PAGE_SIZE - len, "SFP info NA:\n"); + goto out_free_rdp; + } + + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16); + + len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf); + len += scnprintf(buf + len, PAGE_SIZE - len, + "VendorOUI:\t%02x-%02x-%02x\n", + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI], + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1], + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]); + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16); + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf); + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16); + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf); + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4); + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf); + strscpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8); + len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf); + len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]); + len += scnprintf(buf + len, PAGE_SIZE - len, "ExtIdentifier:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_EXT_IDENTIFIER]); + len += scnprintf(buf + len, PAGE_SIZE - len, "Connector:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_CONNECTOR]); + wavelength = (rdp_context->page_a0[SSF_WAVELENGTH_B1] << 8) | + rdp_context->page_a0[SSF_WAVELENGTH_B0]; + + len += scnprintf(buf + len, PAGE_SIZE - len, "Wavelength:\t%d nm\n", + wavelength); + trasn_code_byte7 = (struct sff_trasnceiver_codes_byte7 *) + &rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7]; + + len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t"); + if (*(uint8_t *)trasn_code_byte7 == 0) { + len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n"); + } else { + if (trasn_code_byte7->fc_sp_100MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "1 "); + if (trasn_code_byte7->fc_sp_200mb) + len += scnprintf(buf + len, PAGE_SIZE - len, "2 "); + if (trasn_code_byte7->fc_sp_400MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "4 "); + if (trasn_code_byte7->fc_sp_800MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "8 "); + if (trasn_code_byte7->fc_sp_1600MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "16 "); + if (trasn_code_byte7->fc_sp_3200MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "32 "); + if (trasn_code_byte7->speed_chk_ecc) + len += scnprintf(buf + len, PAGE_SIZE - len, "64 "); + len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n"); + } + temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 | + rdp_context->page_a2[SFF_TEMPERATURE_B0]); + vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 | + rdp_context->page_a2[SFF_VCC_B0]); + tx_power = (rdp_context->page_a2[SFF_TXPOWER_B1] << 8 | + rdp_context->page_a2[SFF_TXPOWER_B0]); + tx_bias = (rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | + rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B0]); + rx_power = (rdp_context->page_a2[SFF_RXPOWER_B1] << 8 | + rdp_context->page_a2[SFF_RXPOWER_B0]); + + len += scnprintf(buf + len, PAGE_SIZE - len, + "Temperature:\tx%04x C\n", temperature); + len += scnprintf(buf + len, PAGE_SIZE - len, "Vcc:\t\tx%04x V\n", vcc); + len += scnprintf(buf + len, PAGE_SIZE - len, + "TxBiasCurrent:\tx%04x mA\n", tx_bias); + len += scnprintf(buf + len, PAGE_SIZE - len, "TxPower:\tx%04x mW\n", + tx_power); + len += scnprintf(buf + len, PAGE_SIZE - len, "RxPower:\tx%04x mW\n", + rx_power); +out_free_rdp: + kfree(rdp_context); + return len; +} + +/** + * lpfc_board_mode_show - Return the state of the board + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the state of the adapter. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + char * state; + + if (phba->link_state == LPFC_HBA_ERROR) + state = "error"; + else if (phba->link_state == LPFC_WARM_START) + state = "warm start"; + else if (phba->link_state == LPFC_INIT_START) + state = "offline"; + else + state = "online"; + + return scnprintf(buf, PAGE_SIZE, "%s\n", state); +} + +/** + * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing one of the strings "online", "offline", "warm" or "error". + * @count: unused variable. + * + * Returns: + * -EACCES if enable hba reset not enabled + * -EINVAL if the buffer does not contain a valid string (see above) + * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails + * buf length greater than zero indicates success + **/ +static ssize_t +lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct completion online_compl; + char *board_mode_str = NULL; + int status = 0; + int rc; + + if (!phba->cfg_enable_hba_reset) { + status = -EACCES; + goto board_mode_out; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3050 lpfc_board_mode set to %s\n", buf); + + init_completion(&online_compl); + + if(strncmp(buf, "online", sizeof("online") - 1) == 0) { + rc = lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + if (rc == 0) { + status = -ENOMEM; + goto board_mode_out; + } + wait_for_completion(&online_compl); + if (status) + status = -EIO; + } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) + if (phba->sli_rev == LPFC_SLI_REV4) + status = -EINVAL; + else + status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); + else if (strncmp(buf, "error", sizeof("error") - 1) == 0) + if (phba->sli_rev == LPFC_SLI_REV4) + status = -EINVAL; + else + status = lpfc_do_offline(phba, LPFC_EVT_KILL); + else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) + status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP); + else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0) + status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); + else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) + status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); + else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1) + == 0) + status = lpfc_reset_pci_bus(phba); + else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0) + lpfc_issue_hb_tmo(phba); + else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0) + status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk")); + else + status = -EINVAL; + +board_mode_out: + if (!status) + return strlen(buf); + else { + board_mode_str = strchr(buf, '\n'); + if (board_mode_str) + *board_mode_str = '\0'; + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3097 Failed \"%s\", status(%d), " + "fc_flag(x%x)\n", + buf, status, phba->pport->fc_flag); + return status; + } +} + +/** + * lpfc_get_hba_info - Return various bits of informaton about the adapter + * @phba: pointer to the adapter structure. + * @mxri: max xri count. + * @axri: available xri count. + * @mrpi: max rpi count. + * @arpi: available rpi count. + * @mvpi: max vpi count. + * @avpi: available vpi count. + * + * Description: + * If an integer pointer for an count is not null then the value for the + * count is returned. + * + * Returns: + * zero on error + * one for success + **/ +static int +lpfc_get_hba_info(struct lpfc_hba *phba, + uint32_t *mxri, uint32_t *axri, + uint32_t *mrpi, uint32_t *arpi, + uint32_t *mvpi, uint32_t *avpi) +{ + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + struct lpfc_sli4_hba *sli4_hba; + struct lpfc_max_cfg_param *max_cfg_param; + u16 rsrc_ext_cnt, rsrc_ext_size, max_vpi; + + /* + * prevent udev from issuing mailbox commands until the port is + * configured. + */ + if (phba->link_state < LPFC_LINK_DOWN || + !phba->mbox_mem_pool || + (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) + return 0; + + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) + return 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return 0; + memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + + pmb = &pmboxq->u.mb; + pmb->mbxCommand = MBX_READ_CONFIG; + pmb->mbxOwner = OWN_HOST; + pmboxq->ctx_buf = NULL; + + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + rc = MBX_NOT_FINISHED; + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + return 0; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + sli4_hba = &phba->sli4_hba; + max_cfg_param = &sli4_hba->max_cfg_param; + + /* Normally, extents are not used */ + if (!phba->sli4_hba.extents_in_use) { + if (mrpi) + *mrpi = max_cfg_param->max_rpi; + if (mxri) + *mxri = max_cfg_param->max_xri; + if (mvpi) { + max_vpi = max_cfg_param->max_vpi; + + /* Limit the max we support */ + if (max_vpi > LPFC_MAX_VPI) + max_vpi = LPFC_MAX_VPI; + *mvpi = max_vpi; + } + } else { /* Extents in use */ + if (mrpi) { + if (lpfc_sli4_get_avail_extnt_rsrc(phba, + LPFC_RSC_TYPE_FCOE_RPI, + &rsrc_ext_cnt, + &rsrc_ext_size)) { + rc = 0; + goto free_pmboxq; + } + + *mrpi = rsrc_ext_cnt * rsrc_ext_size; + } + + if (mxri) { + if (lpfc_sli4_get_avail_extnt_rsrc(phba, + LPFC_RSC_TYPE_FCOE_XRI, + &rsrc_ext_cnt, + &rsrc_ext_size)) { + rc = 0; + goto free_pmboxq; + } + + *mxri = rsrc_ext_cnt * rsrc_ext_size; + } + + if (mvpi) { + if (lpfc_sli4_get_avail_extnt_rsrc(phba, + LPFC_RSC_TYPE_FCOE_VPI, + &rsrc_ext_cnt, + &rsrc_ext_size)) { + rc = 0; + goto free_pmboxq; + } + + max_vpi = rsrc_ext_cnt * rsrc_ext_size; + + /* Limit the max we support */ + if (max_vpi > LPFC_MAX_VPI) + max_vpi = LPFC_MAX_VPI; + *mvpi = max_vpi; + } + } + } else { + if (mrpi) + *mrpi = pmb->un.varRdConfig.max_rpi; + if (arpi) + *arpi = pmb->un.varRdConfig.avail_rpi; + if (mxri) + *mxri = pmb->un.varRdConfig.max_xri; + if (axri) + *axri = pmb->un.varRdConfig.avail_xri; + if (mvpi) + *mvpi = pmb->un.varRdConfig.max_vpi; + if (avpi) { + /* avail_vpi is only valid if link is up and ready */ + if (phba->link_state == LPFC_HBA_READY) + *avpi = pmb->un.varRdConfig.avail_vpi; + else + *avpi = pmb->un.varRdConfig.max_vpi; + } + } + + /* Success */ + rc = 1; + +free_pmboxq: + mempool_free(pmboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_max_rpi_show - Return maximum rpi + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the maximum rpi count in decimal or "Unknown". + * + * Description: + * Calls lpfc_get_hba_info() asking for just the mrpi count. + * If lpfc_get_hba_info() returns zero (failure) the buffer text is set + * to "Unknown" and the buffer length is returned, therefore the caller + * must check for "Unknown" in the buffer to detect a failure. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt; + + if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) + return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +/** + * lpfc_used_rpi_show - Return maximum rpi minus available rpi + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the used rpi count in decimal or "Unknown". + * + * Description: + * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. + * If lpfc_get_hba_info() returns zero (failure) the buffer text is set + * to "Unknown" and the buffer length is returned, therefore the caller + * must check for "Unknown" in the buffer to detect a failure. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli4_hba *sli4_hba; + struct lpfc_max_cfg_param *max_cfg_param; + u32 cnt = 0, acnt = 0; + + if (phba->sli_rev == LPFC_SLI_REV4) { + sli4_hba = &phba->sli4_hba; + max_cfg_param = &sli4_hba->max_cfg_param; + return scnprintf(buf, PAGE_SIZE, "%d\n", + max_cfg_param->rpi_used); + } else { + if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) + return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + } + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +/** + * lpfc_max_xri_show - Return maximum xri + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the maximum xri count in decimal or "Unknown". + * + * Description: + * Calls lpfc_get_hba_info() asking for just the mrpi count. + * If lpfc_get_hba_info() returns zero (failure) the buffer text is set + * to "Unknown" and the buffer length is returned, therefore the caller + * must check for "Unknown" in the buffer to detect a failure. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt; + + if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) + return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +/** + * lpfc_used_xri_show - Return maximum xpi minus the available xpi + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the used xri count in decimal or "Unknown". + * + * Description: + * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. + * If lpfc_get_hba_info() returns zero (failure) the buffer text is set + * to "Unknown" and the buffer length is returned, therefore the caller + * must check for "Unknown" in the buffer to detect a failure. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli4_hba *sli4_hba; + struct lpfc_max_cfg_param *max_cfg_param; + u32 cnt = 0, acnt = 0; + + if (phba->sli_rev == LPFC_SLI_REV4) { + sli4_hba = &phba->sli4_hba; + max_cfg_param = &sli4_hba->max_cfg_param; + return scnprintf(buf, PAGE_SIZE, "%d\n", + max_cfg_param->xri_used); + } else { + if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) + return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + } + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +/** + * lpfc_max_vpi_show - Return maximum vpi + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the maximum vpi count in decimal or "Unknown". + * + * Description: + * Calls lpfc_get_hba_info() asking for just the mvpi count. + * If lpfc_get_hba_info() returns zero (failure) the buffer text is set + * to "Unknown" and the buffer length is returned, therefore the caller + * must check for "Unknown" in the buffer to detect a failure. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt; + + if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) + return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +/** + * lpfc_used_vpi_show - Return maximum vpi minus the available vpi + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the used vpi count in decimal or "Unknown". + * + * Description: + * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. + * If lpfc_get_hba_info() returns zero (failure) the buffer text is set + * to "Unknown" and the buffer length is returned, therefore the caller + * must check for "Unknown" in the buffer to detect a failure. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli4_hba *sli4_hba; + struct lpfc_max_cfg_param *max_cfg_param; + u32 cnt = 0, acnt = 0; + + if (phba->sli_rev == LPFC_SLI_REV4) { + sli4_hba = &phba->sli4_hba; + max_cfg_param = &sli4_hba->max_cfg_param; + return scnprintf(buf, PAGE_SIZE, "%d\n", + max_cfg_param->vpi_used); + } else { + if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) + return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + } + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +/** + * lpfc_npiv_info_show - Return text about NPIV support for the adapter + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: text that must be interpreted to determine if npiv is supported. + * + * Description: + * Buffer will contain text indicating npiv is not suppoerted on the port, + * the port is an NPIV physical port, or it is an npiv virtual port with + * the id of the vport. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (!(phba->max_vpi)) + return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); + if (vport->port_type == LPFC_PHYSICAL_PORT) + return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n"); + return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); +} + +/** + * lpfc_poll_show - Return text about poll support for the adapter + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the cfg_poll in hex. + * + * Notes: + * cfg_poll should be a lpfc_polling_flags type. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_poll_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); +} + +/** + * lpfc_poll_store - Set the value of cfg_poll for the adapter + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Notes: + * buf contents converted to integer and checked for a valid value. + * + * Returns: + * -EINVAL if the buffer connot be converted or is out of range + * length of the buf on success + **/ +static ssize_t +lpfc_poll_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t creg_val; + uint32_t old_val; + int val=0; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + if ((val & 0x3) != val) + return -EINVAL; + + if (phba->sli_rev == LPFC_SLI_REV4) + val = 0; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3051 lpfc_poll changed from %d to %d\n", + phba->cfg_poll, val); + + spin_lock_irq(&phba->hbalock); + + old_val = phba->cfg_poll; + + if (val & ENABLE_FCP_RING_POLLING) { + if ((val & DISABLE_FCP_RING_INT) && + !(old_val & DISABLE_FCP_RING_INT)) { + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } + creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); + writel(creg_val, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + + lpfc_poll_start_timer(phba); + } + } else if (val != 0x0) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } + + if (!(val & DISABLE_FCP_RING_INT) && + (old_val & DISABLE_FCP_RING_INT)) + { + spin_unlock_irq(&phba->hbalock); + del_timer(&phba->fcp_poll_timer); + spin_lock_irq(&phba->hbalock); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } + creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); + writel(creg_val, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + + phba->cfg_poll = val; + + spin_unlock_irq(&phba->hbalock); + + return strlen(buf); +} + +/** + * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the formatted support level. + * + * Description: + * Returns the maximum number of virtual functions a physical function can + * support, 0 will be returned if called on virtual function. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_sriov_hw_max_virtfn_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint16_t max_nr_virtfn; + + max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); + return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); +} + +/** + * lpfc_enable_bbcr_set: Sets an attribute value. + * @phba: pointer to the adapter structure. + * @val: integer attribute value. + * + * Description: + * Validates the min and max values then sets the + * adapter config field if in the valid range. prints error message + * and does not set the parameter if invalid. + * + * Returns: + * zero on success + * -EINVAL if val is invalid + */ +static ssize_t +lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val) +{ + if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3068 lpfc_enable_bbcr changed from %d to " + "%d\n", phba->cfg_enable_bbcr, val); + phba->cfg_enable_bbcr = val; + return 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0451 lpfc_enable_bbcr cannot set to %d, range is 0, " + "1\n", val); + return -EINVAL; +} + +/* + * lpfc_param_show - Return a cfg attribute value in decimal + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_show. + * + * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the attribute value in decimal. + * + * Returns: size of formatted string. + **/ +#define lpfc_param_show(attr) \ +static ssize_t \ +lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + struct lpfc_hba *phba = vport->phba;\ + return scnprintf(buf, PAGE_SIZE, "%d\n",\ + phba->cfg_##attr);\ +} + +/* + * lpfc_param_hex_show - Return a cfg attribute value in hex + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_show + * + * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the attribute value in hexadecimal. + * + * Returns: size of formatted string. + **/ +#define lpfc_param_hex_show(attr) \ +static ssize_t \ +lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + struct lpfc_hba *phba = vport->phba;\ + uint val = 0;\ + val = phba->cfg_##attr;\ + return scnprintf(buf, PAGE_SIZE, "%#x\n",\ + phba->cfg_##attr);\ +} + +/* + * lpfc_param_init - Initializes a cfg attribute + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_init. The macro also + * takes a default argument, a minimum and maximum argument. + * + * lpfc_##attr##_init: Initializes an attribute. + * @phba: pointer to the adapter structure. + * @val: integer attribute value. + * + * Validates the min and max values then sets the adapter config field + * accordingly, or uses the default if out of range and prints an error message. + * + * Returns: + * zero on success + * -EINVAL if default used + **/ +#define lpfc_param_init(attr, default, minval, maxval) \ +static int \ +lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ +{ \ + if (lpfc_rangecheck(val, minval, maxval)) {\ + phba->cfg_##attr = val;\ + return 0;\ + }\ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ + "0449 lpfc_"#attr" attribute cannot be set to %d, "\ + "allowed range is ["#minval", "#maxval"]\n", val); \ + phba->cfg_##attr = default;\ + return -EINVAL;\ +} + +/* + * lpfc_param_set - Set a cfg attribute value + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_set + * + * lpfc_##attr##_set: Sets an attribute value. + * @phba: pointer to the adapter structure. + * @val: integer attribute value. + * + * Description: + * Validates the min and max values then sets the + * adapter config field if in the valid range. prints error message + * and does not set the parameter if invalid. + * + * Returns: + * zero on success + * -EINVAL if val is invalid + **/ +#define lpfc_param_set(attr, default, minval, maxval) \ +static int \ +lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ +{ \ + if (lpfc_rangecheck(val, minval, maxval)) {\ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ + "3052 lpfc_" #attr " changed from %d to %d\n", \ + phba->cfg_##attr, val); \ + phba->cfg_##attr = val;\ + return 0;\ + }\ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ + "0450 lpfc_"#attr" attribute cannot be set to %d, "\ + "allowed range is ["#minval", "#maxval"]\n", val); \ + return -EINVAL;\ +} + +/* + * lpfc_param_store - Set a vport attribute value + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_store. + * + * lpfc_##attr##_store: Set an sttribute value. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: contains the attribute value in ascii. + * @count: not used. + * + * Description: + * Convert the ascii text number to an integer, then + * use the lpfc_##attr##_set function to set the value. + * + * Returns: + * -EINVAL if val is invalid or lpfc_##attr##_set() fails + * length of buffer upon success. + **/ +#define lpfc_param_store(attr) \ +static ssize_t \ +lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + struct lpfc_hba *phba = vport->phba;\ + uint val = 0;\ + if (!isdigit(buf[0]))\ + return -EINVAL;\ + if (sscanf(buf, "%i", &val) != 1)\ + return -EINVAL;\ + if (lpfc_##attr##_set(phba, val) == 0) \ + return strlen(buf);\ + else \ + return -EINVAL;\ +} + +/* + * lpfc_vport_param_show - Return decimal formatted cfg attribute value + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_show + * + * lpfc_##attr##_show: prints the attribute value in decimal. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the attribute value in decimal. + * + * Returns: length of formatted string. + **/ +#define lpfc_vport_param_show(attr) \ +static ssize_t \ +lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ +} + +/* + * lpfc_vport_param_hex_show - Return hex formatted attribute value + * + * Description: + * Macro that given an attr e.g. + * hba_queue_depth expands into a function with the name + * lpfc_hba_queue_depth_show + * + * lpfc_##attr##_show: prints the attribute value in hexadecimal. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the attribute value in hexadecimal. + * + * Returns: length of formatted string. + **/ +#define lpfc_vport_param_hex_show(attr) \ +static ssize_t \ +lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ +} + +/* + * lpfc_vport_param_init - Initialize a vport cfg attribute + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_init. The macro also + * takes a default argument, a minimum and maximum argument. + * + * lpfc_##attr##_init: validates the min and max values then sets the + * adapter config field accordingly, or uses the default if out of range + * and prints an error message. + * @phba: pointer to the adapter structure. + * @val: integer attribute value. + * + * Returns: + * zero on success + * -EINVAL if default used + **/ +#define lpfc_vport_param_init(attr, default, minval, maxval) \ +static int \ +lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ +{ \ + if (lpfc_rangecheck(val, minval, maxval)) {\ + vport->cfg_##attr = val;\ + return 0;\ + }\ + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ + "0423 lpfc_"#attr" attribute cannot be set to %d, "\ + "allowed range is ["#minval", "#maxval"]\n", val); \ + vport->cfg_##attr = default;\ + return -EINVAL;\ +} + +/* + * lpfc_vport_param_set - Set a vport cfg attribute + * + * Description: + * Macro that given an attr e.g. hba_queue_depth expands + * into a function with the name lpfc_hba_queue_depth_set + * + * lpfc_##attr##_set: validates the min and max values then sets the + * adapter config field if in the valid range. prints error message + * and does not set the parameter if invalid. + * @phba: pointer to the adapter structure. + * @val: integer attribute value. + * + * Returns: + * zero on success + * -EINVAL if val is invalid + **/ +#define lpfc_vport_param_set(attr, default, minval, maxval) \ +static int \ +lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ +{ \ + if (lpfc_rangecheck(val, minval, maxval)) {\ + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ + "3053 lpfc_" #attr \ + " changed from %d (x%x) to %d (x%x)\n", \ + vport->cfg_##attr, vport->cfg_##attr, \ + val, val); \ + vport->cfg_##attr = val;\ + return 0;\ + }\ + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ + "0424 lpfc_"#attr" attribute cannot be set to %d, "\ + "allowed range is ["#minval", "#maxval"]\n", val); \ + return -EINVAL;\ +} + +/* + * lpfc_vport_param_store - Set a vport attribute + * + * Description: + * Macro that given an attr e.g. hba_queue_depth + * expands into a function with the name lpfc_hba_queue_depth_store + * + * lpfc_##attr##_store: convert the ascii text number to an integer, then + * use the lpfc_##attr##_set function to set the value. + * @cdev: class device that is converted into a Scsi_host. + * @buf: contains the attribute value in decimal. + * @count: not used. + * + * Returns: + * -EINVAL if val is invalid or lpfc_##attr##_set() fails + * length of buffer upon success. + **/ +#define lpfc_vport_param_store(attr) \ +static ssize_t \ +lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + uint val = 0;\ + if (!isdigit(buf[0]))\ + return -EINVAL;\ + if (sscanf(buf, "%i", &val) != 1)\ + return -EINVAL;\ + if (lpfc_##attr##_set(vport, val) == 0) \ + return strlen(buf);\ + else \ + return -EINVAL;\ +} + + +static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); +static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL); +static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); +static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); +static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); +static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); +static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); +static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); +static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); +static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); +static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); +static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); +static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); +static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); +static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, + lpfc_link_state_store); +static DEVICE_ATTR(option_rom_version, S_IRUGO, + lpfc_option_rom_version_show, NULL); +static DEVICE_ATTR(num_discovered_ports, S_IRUGO, + lpfc_num_discovered_ports_show, NULL); +static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); +static DEVICE_ATTR_RO(lpfc_drvr_version); +static DEVICE_ATTR_RO(lpfc_enable_fip); +static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, + lpfc_board_mode_show, lpfc_board_mode_store); +static DEVICE_ATTR_RO(lpfc_xcvr_data); +static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); +static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); +static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); +static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); +static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); +static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); +static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); +static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); +static DEVICE_ATTR_RO(lpfc_temp_sensor); +static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); +static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); +static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, + NULL); +static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); + +#define WWN_SZ 8 +/** + * lpfc_wwn_set - Convert string to the 8 byte WWN value. + * @buf: WWN string. + * @cnt: Length of string. + * @wwn: Array to receive converted wwn value. + * + * Returns: + * -EINVAL if the buffer does not contain a valid wwn + * 0 success + **/ +static size_t +lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) +{ + unsigned int i, j; + + /* Count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || + ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + return -EINVAL; + + memset(wwn, 0, WWN_SZ); + + /* Validate and store the new name */ + for (i = 0, j = 0; i < 16; i++) { + if ((*buf >= 'a') && (*buf <= 'f')) + j = ((j << 4) | ((*buf++ - 'a') + 10)); + else if ((*buf >= 'A') && (*buf <= 'F')) + j = ((j << 4) | ((*buf++ - 'A') + 10)); + else if ((*buf >= '0') && (*buf <= '9')) + j = ((j << 4) | (*buf++ - '0')); + else + return -EINVAL; + if (i % 2) { + wwn[i/2] = j & 0xff; + j = 0; + } + } + return 0; +} + + +/** + * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count + **/ +static ssize_t +lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "0x%llx\n", + wwn_to_u64(phba->cfg_oas_tgt_wwpn)); +} + +/** + * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + uint8_t wwpn[WWN_SZ]; + int rc; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (rc) + return rc; + + memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); + memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); + if (wwn_to_u64(wwpn) == 0) + phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; + else + phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; + phba->cfg_oas_flags &= ~OAS_LUN_VALID; + phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; + return count; +} +static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, + lpfc_oas_tgt_show, lpfc_oas_tgt_store); + +/** + * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count + **/ +static ssize_t +lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); +} + +/** + * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + unsigned long val; + int ret; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + ret = kstrtoul(buf, 0, &val); + if (ret || (val > 0x7f)) + return -EINVAL; + + if (val) + phba->cfg_oas_priority = (uint8_t)val; + else + phba->cfg_oas_priority = phba->cfg_XLanePriority; + return count; +} +static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, + lpfc_oas_priority_show, lpfc_oas_priority_store); + +/** + * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "0x%llx\n", + wwn_to_u64(phba->cfg_oas_vpt_wwpn)); +} + +/** + * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + uint8_t wwpn[WWN_SZ]; + int rc; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (rc) + return rc; + + memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); + memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); + if (wwn_to_u64(wwpn) == 0) + phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; + else + phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; + phba->cfg_oas_flags &= ~OAS_LUN_VALID; + if (phba->cfg_oas_priority == 0) + phba->cfg_oas_priority = phba->cfg_XLanePriority; + phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; + return count; +} +static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, + lpfc_oas_vpt_show, lpfc_oas_vpt_store); + +/** + * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) + * of whether luns will be enabled or disabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); +} + +/** + * lpfc_oas_lun_state_store - Store the state (enabled or disabled) + * of whether luns will be enabled or disabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int val = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + if ((val != 0) && (val != 1)) + return -EINVAL; + + phba->cfg_oas_lun_state = val; + return strlen(buf); +} +static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, + lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); + +/** + * lpfc_oas_lun_status_show - Return the status of the Optimized Access + * Storage (OAS) lun returned by the + * lpfc_oas_lun_show function. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) + return -EFAULT; + + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); +} +static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, + lpfc_oas_lun_status_show, NULL); + + +/** + * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage + * (OAS) operations. + * @phba: lpfc_hba pointer. + * @vpt_wwpn: wwpn of the vport associated with the returned lun + * @tgt_wwpn: wwpn of the target associated with the returned lun + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the lun. + * @pri: priority + * + * Returns: + * SUCCESS : 0 + * -EPERM OAS is not enabled or not supported by this port. + * + */ +static size_t +lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint64_t lun, + uint32_t oas_state, uint8_t pri) +{ + + int rc = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (oas_state) { + if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, + lun, pri)) + rc = -ENOMEM; + } else { + lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, lun, pri); + } + return rc; + +} + +/** + * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized + * Access Storage (OAS) operations. + * @phba: lpfc_hba pointer. + * @vpt_wwpn: wwpn of the vport associated with the returned lun + * @tgt_wwpn: wwpn of the target associated with the returned lun + * @lun_status: status of the lun returned lun + * @lun_pri: priority of the lun returned lun + * + * Returns the first or next lun enabled for OAS operations for the vport/target + * specified. If a lun is found, its vport wwpn, target wwpn and status is + * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned. + * + * Return: + * lun that is OAS enabled for the vport/target + * NOT_OAS_ENABLED_LUN when no oas enabled lun found. + */ +static uint64_t +lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint32_t *lun_status, + uint32_t *lun_pri) +{ + uint64_t found_lun; + + if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) + return NOT_OAS_ENABLED_LUN; + if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) + phba->sli4_hba.oas_next_vpt_wwpn, + (struct lpfc_name *) + phba->sli4_hba.oas_next_tgt_wwpn, + &phba->sli4_hba.oas_next_lun, + (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, + &found_lun, lun_status, lun_pri)) + return found_lun; + else + return NOT_OAS_ENABLED_LUN; +} + +/** + * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations + * @phba: lpfc_hba pointer. + * @vpt_wwpn: vport wwpn by reference. + * @tgt_wwpn: target wwpn by reference. + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the oas_lun. + * @pri: priority + * + * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) + * a lun for OAS operations. + * + * Return: + * SUCCESS: 0 + * -ENOMEM: failed to enable an lun for OAS operations + * -EPERM: OAS is not enabled + */ +static ssize_t +lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint64_t lun, + uint32_t oas_state, uint8_t pri) +{ + + int rc; + + rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, + oas_state, pri); + return rc; +} + +/** + * lpfc_oas_lun_show - Return oas enabled luns from a chosen target + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * This routine returns a lun enabled for OAS each time the function + * is called. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + **/ +static ssize_t +lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + uint64_t oas_lun; + int len = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) + if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) + return -EFAULT; + + if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) + if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) + return -EFAULT; + + oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, + phba->cfg_oas_tgt_wwpn, + &phba->cfg_oas_lun_status, + &phba->cfg_oas_priority); + if (oas_lun != NOT_OAS_ENABLED_LUN) + phba->cfg_oas_flags |= OAS_LUN_VALID; + + len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); + + return len; +} + +/** + * lpfc_oas_lun_store - Sets the OAS state for lun + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: size of the formatting string + * + * This function sets the OAS state for lun. Before this function is called, + * the vport wwpn, target wwpn, and oas state need to be set. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint64_t scsi_lun; + uint32_t pri; + ssize_t rc; + + if (!phba->cfg_fof) + return -EPERM; + + if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) + return -EFAULT; + + if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) + return -EFAULT; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "0x%llx", &scsi_lun) != 1) + return -EINVAL; + + pri = phba->cfg_oas_priority; + if (pri == 0) + pri = phba->cfg_XLanePriority; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " + "priority 0x%x with oas state %d\n", + wwn_to_u64(phba->cfg_oas_vpt_wwpn), + wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, + pri, phba->cfg_oas_lun_state); + + rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, + phba->cfg_oas_tgt_wwpn, scsi_lun, + phba->cfg_oas_lun_state, pri); + if (rc) + return rc; + + return count; +} +static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, + lpfc_oas_lun_show, lpfc_oas_lun_store); + +int lpfc_enable_nvmet_cnt; +unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); +MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); + +static int lpfc_poll = 0; +module_param(lpfc_poll, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" + " 0 - none," + " 1 - poll with interrupts enabled" + " 3 - poll and disable FCP ring interrupts"); + +static DEVICE_ATTR_RW(lpfc_poll); + +int lpfc_no_hba_reset_cnt; +unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444); +MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset"); + +LPFC_ATTR(sli_mode, 3, 3, 3, + "SLI mode selector: 3 - select SLI-3"); + +LPFC_ATTR_R(enable_npiv, 1, 0, 1, + "Enable NPIV functionality"); + +LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, + "FCF Fast failover=1 Priority failover=2"); + +/* + * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of + * aborted IO. + * The range is [0,1]. Default value is 0 + * 0, IO completes after ABTS issued (default). + * 1, IO completes after receipt of ABTS response or timeout. + */ +LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion"); + +/* +# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures +# 0x0 = disabled, XRI/OXID use not tracked. +# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. +# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. +*/ +LPFC_ATTR_R(enable_rrq, 2, 0, 2, + "Enable RRQ functionality"); + +/* +# lpfc_suppress_link_up: Bring link up at initialization +# 0x0 = bring link up (issue MBX_INIT_LINK) +# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) +# 0x2 = never bring up link +# Default value is 0. +*/ +LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, + LPFC_DELAY_INIT_LINK_INDEFINITELY, + "Suppress Link Up at initialization"); + +static ssize_t +lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + phba->sli4_hba.pc_sli4_params.pls); +} +static DEVICE_ATTR(pls, 0444, + lpfc_pls_show, NULL); + +static ssize_t +lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); +} +static DEVICE_ATTR(pt, 0444, + lpfc_pt_show, NULL); + +/* +# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS +# 1 - (1024) +# 2 - (2048) +# 3 - (3072) +# 4 - (4096) +# 5 - (5120) +*/ +static ssize_t +lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); +} + +static DEVICE_ATTR(iocb_hw, S_IRUGO, + lpfc_iocb_hw_show, NULL); +static ssize_t +lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txq_max : 0); +} + +static DEVICE_ATTR(txq_hw, S_IRUGO, + lpfc_txq_hw_show, NULL); +static ssize_t +lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txcmplq_max : 0); +} + +static DEVICE_ATTR(txcmplq_hw, S_IRUGO, + lpfc_txcmplq_hw_show, NULL); + +/* +# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear +# until the timer expires. Value range is [0,255]. Default value is 30. +*/ +static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; +static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; +module_param(lpfc_nodev_tmo, int, 0); +MODULE_PARM_DESC(lpfc_nodev_tmo, + "Seconds driver will hold I/O waiting " + "for a device to come back"); + +/** + * lpfc_nodev_tmo_show - Return the hba dev loss timeout value + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the dev loss timeout in decimal. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + + return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); +} + +/** + * lpfc_nodev_tmo_init - Set the hba nodev timeout value + * @vport: lpfc vport structure pointer. + * @val: contains the nodev timeout value. + * + * Description: + * If the devloss tmo is already set then nodev tmo is set to devloss tmo, + * a kernel error message is printed and zero is returned. + * Else if val is in range then nodev tmo and devloss tmo are set to val. + * Otherwise nodev tmo is set to the default value. + * + * Returns: + * zero if already set or if val is in range + * -EINVAL val out of range + **/ +static int +lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) +{ + if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { + vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; + if (val != LPFC_DEF_DEVLOSS_TMO) + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0407 Ignoring lpfc_nodev_tmo module " + "parameter because lpfc_devloss_tmo " + "is set.\n"); + return 0; + } + + if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { + vport->cfg_nodev_tmo = val; + vport->cfg_devloss_tmo = val; + return 0; + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0400 lpfc_nodev_tmo attribute cannot be set to" + " %d, allowed range is [%d, %d]\n", + val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); + vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; + return -EINVAL; +} + +/** + * lpfc_update_rport_devloss_tmo - Update dev loss tmo value + * @vport: lpfc vport structure pointer. + * + * Description: + * Update all the ndlp's dev loss tmo with the vport devloss tmo value. + **/ +static void +lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost; + struct lpfc_nodelist *ndlp; +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *remoteport = NULL; +#endif + + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->rport) + ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; +#if (IS_ENABLED(CONFIG_NVME_FC)) + spin_lock(&ndlp->lock); + rport = lpfc_ndlp_get_nrport(ndlp); + if (rport) + remoteport = rport->remoteport; + spin_unlock(&ndlp->lock); + if (rport && remoteport) + nvme_fc_set_remoteport_devloss(remoteport, + vport->cfg_devloss_tmo); +#endif + } + spin_unlock_irq(shost->host_lock); +} + +/** + * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values + * @vport: lpfc vport structure pointer. + * @val: contains the tmo value. + * + * Description: + * If the devloss tmo is already set or the vport dev loss tmo has changed + * then a kernel error message is printed and zero is returned. + * Else if val is in range then nodev tmo and devloss tmo are set to val. + * Otherwise nodev tmo is set to the default value. + * + * Returns: + * zero if already set or if val is in range + * -EINVAL val out of range + **/ +static int +lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) +{ + if (vport->dev_loss_tmo_changed || + (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0401 Ignoring change to lpfc_nodev_tmo " + "because lpfc_devloss_tmo is set.\n"); + return 0; + } + if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { + vport->cfg_nodev_tmo = val; + vport->cfg_devloss_tmo = val; + /* + * For compat: set the fc_host dev loss so new rports + * will get the value. + */ + fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; + lpfc_update_rport_devloss_tmo(vport); + return 0; + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0403 lpfc_nodev_tmo attribute cannot be set to " + "%d, allowed range is [%d, %d]\n", + val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); + return -EINVAL; +} + +lpfc_vport_param_store(nodev_tmo) + +static DEVICE_ATTR_RW(lpfc_nodev_tmo); + +/* +# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that +# disappear until the timer expires. Value range is [0,255]. Default +# value is 30. +*/ +module_param(lpfc_devloss_tmo, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_devloss_tmo, + "Seconds driver will hold I/O waiting " + "for a device to come back"); +lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, + LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) +lpfc_vport_param_show(devloss_tmo) + +/** + * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit + * @vport: lpfc vport structure pointer. + * @val: contains the tmo value. + * + * Description: + * If val is in a valid range then set the vport nodev tmo, + * devloss tmo, also set the vport dev loss tmo changed flag. + * Else a kernel error message is printed. + * + * Returns: + * zero if val is in range + * -EINVAL val out of range + **/ +static int +lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) +{ + if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { + vport->cfg_nodev_tmo = val; + vport->cfg_devloss_tmo = val; + vport->dev_loss_tmo_changed = 1; + fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; + lpfc_update_rport_devloss_tmo(vport); + return 0; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0404 lpfc_devloss_tmo attribute cannot be set to " + "%d, allowed range is [%d, %d]\n", + val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); + return -EINVAL; +} + +lpfc_vport_param_store(devloss_tmo) +static DEVICE_ATTR_RW(lpfc_devloss_tmo); + +/* + * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it + * lpfc_suppress_rsp = 0 Disable + * lpfc_suppress_rsp = 1 Enable (default) + * + */ +LPFC_ATTR_R(suppress_rsp, 1, 0, 1, + "Enable suppress rsp feature is firmware supports it"); + +/* + * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds + * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs + * lpfc_nvmet_mrq = 1 use a single RQ pair + * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ + * + */ +LPFC_ATTR_R(nvmet_mrq, + LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX, + "Specify number of RQ pairs for processing NVMET cmds"); + +/* + * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post + * to each NVMET RQ. Range 64 to 2048, default is 512. + */ +LPFC_ATTR_R(nvmet_mrq_post, + LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST, + LPFC_NVMET_RQE_DEF_COUNT, + "Specify number of RQ buffers to initially post"); + +/* + * lpfc_enable_fc4_type: Defines what FC4 types are supported. + * Supported Values: 1 - register just FCP + * 3 - register both FCP and NVME + * Supported values are [1,3]. Default value is 3 + */ +LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, + LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, + "Enable FC4 Protocol support - FCP / NVME"); + +/* +# lpfc_log_verbose: Only turn this flag on if you are willing to risk being +# deluged with LOTS of information. +# You can set a bit mask to record specific types of verbose messages: +# See lpfc_logmsh.h for definitions. +*/ +LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, + "Verbose logging bit-mask"); + +/* +# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters +# objects that have been registered with the nameserver after login. +*/ +LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, + "Deregister nameserver objects before LOGO"); + +/* +# lun_queue_depth: This parameter is used to limit the number of outstanding +# commands per FCP LUN. +*/ +LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512, + "Max number of FCP commands we can queue to a specific LUN"); + +/* +# tgt_queue_depth: This parameter is used to limit the number of outstanding +# commands per target port. Value range is [10,65535]. Default value is 65535. +*/ +static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH; +module_param(lpfc_tgt_queue_depth, uint, 0444); +MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth"); +lpfc_vport_param_show(tgt_queue_depth); +lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH, + LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH); + +/** + * lpfc_tgt_queue_depth_set: Sets an attribute value. + * @vport: lpfc vport structure pointer. + * @val: integer attribute value. + * + * Description: Sets the parameter to the new value. + * + * Returns: + * zero on success + * -EINVAL if val is invalid + */ +static int +lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + + if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH)) + return -EINVAL; + + if (val == vport->cfg_tgt_queue_depth) + return 0; + + spin_lock_irq(shost->host_lock); + vport->cfg_tgt_queue_depth = val; + + /* Next loop thru nodelist and change cmd_qdepth */ + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) + ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; + + spin_unlock_irq(shost->host_lock); + return 0; +} + +lpfc_vport_param_store(tgt_queue_depth); +static DEVICE_ATTR_RW(lpfc_tgt_queue_depth); + +/* +# hba_queue_depth: This parameter is used to limit the number of outstanding +# commands per lpfc HBA. Value range is [32,8192]. If this parameter +# value is greater than the maximum number of exchanges supported by the HBA, +# then maximum number of exchanges supported by the HBA is used to determine +# the hba_queue_depth. +*/ +LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, + "Max number of FCP commands we can queue to a lpfc HBA"); + +/* +# peer_port_login: This parameter allows/prevents logins +# between peer ports hosted on the same physical port. +# When this parameter is set 0 peer ports of same physical port +# are not allowed to login to each other. +# When this parameter is set 1 peer ports of same physical port +# are allowed to login to each other. +# Default value of this parameter is 0. +*/ +LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, + "Allow peer ports on the same physical port to login to each " + "other."); + +/* +# restrict_login: This parameter allows/prevents logins +# between Virtual Ports and remote initiators. +# When this parameter is not set (0) Virtual Ports will accept PLOGIs from +# other initiators and will attempt to PLOGI all remote ports. +# When this parameter is set (1) Virtual Ports will reject PLOGIs from +# remote ports and will not attempt to PLOGI to other initiators. +# This parameter does not restrict to the physical port. +# This parameter does not restrict logins to Fabric resident remote ports. +# Default value of this parameter is 1. +*/ +static int lpfc_restrict_login = 1; +module_param(lpfc_restrict_login, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_restrict_login, + "Restrict virtual ports login to remote initiators."); +lpfc_vport_param_show(restrict_login); + +/** + * lpfc_restrict_login_init - Set the vport restrict login flag + * @vport: lpfc vport structure pointer. + * @val: contains the restrict login value. + * + * Description: + * If val is not in a valid range then log a kernel error message and set + * the vport restrict login to one. + * If the port type is physical clear the restrict login flag and return. + * Else set the restrict login flag to val. + * + * Returns: + * zero if val is in range + * -EINVAL val out of range + **/ +static int +lpfc_restrict_login_init(struct lpfc_vport *vport, int val) +{ + if (val < 0 || val > 1) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0422 lpfc_restrict_login attribute cannot " + "be set to %d, allowed range is [0, 1]\n", + val); + vport->cfg_restrict_login = 1; + return -EINVAL; + } + if (vport->port_type == LPFC_PHYSICAL_PORT) { + vport->cfg_restrict_login = 0; + return 0; + } + vport->cfg_restrict_login = val; + return 0; +} + +/** + * lpfc_restrict_login_set - Set the vport restrict login flag + * @vport: lpfc vport structure pointer. + * @val: contains the restrict login value. + * + * Description: + * If val is not in a valid range then log a kernel error message and set + * the vport restrict login to one. + * If the port type is physical and the val is not zero log a kernel + * error message, clear the restrict login flag and return zero. + * Else set the restrict login flag to val. + * + * Returns: + * zero if val is in range + * -EINVAL val out of range + **/ +static int +lpfc_restrict_login_set(struct lpfc_vport *vport, int val) +{ + if (val < 0 || val > 1) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0425 lpfc_restrict_login attribute cannot " + "be set to %d, allowed range is [0, 1]\n", + val); + vport->cfg_restrict_login = 1; + return -EINVAL; + } + if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0468 lpfc_restrict_login must be 0 for " + "Physical ports.\n"); + vport->cfg_restrict_login = 0; + return 0; + } + vport->cfg_restrict_login = val; + return 0; +} +lpfc_vport_param_store(restrict_login); +static DEVICE_ATTR_RW(lpfc_restrict_login); + +/* +# Some disk devices have a "select ID" or "select Target" capability. +# From a protocol standpoint "select ID" usually means select the +# Fibre channel "ALPA". In the FC-AL Profile there is an "informative +# annex" which contains a table that maps a "select ID" (a number +# between 0 and 7F) to an ALPA. By default, for compatibility with +# older drivers, the lpfc driver scans this table from low ALPA to high +# ALPA. +# +# Turning on the scan-down variable (on = 1, off = 0) will +# cause the lpfc driver to use an inverted table, effectively +# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1. +# +# (Note: This "select ID" functionality is a LOOP ONLY characteristic +# and will not work across a fabric. Also this parameter will take +# effect only in the case when ALPA map is not available.) +*/ +LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, + "Start scanning for devices from highest ALPA to lowest"); + +/* +# lpfc_topology: link topology for init link +# 0x0 = attempt loop mode then point-to-point +# 0x01 = internal loopback mode +# 0x02 = attempt point-to-point mode only +# 0x04 = attempt loop mode only +# 0x06 = attempt point-to-point mode then loop +# Set point-to-point mode if you want to run as an N_Port. +# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. +# Default value is 0. +*/ +LPFC_ATTR(topology, 0, 0, 6, + "Select Fibre Channel topology"); + +/** + * lpfc_topology_store - Set the adapters topology field + * @dev: class device that is converted into a scsi_host. + * @attr:device attribute, not used. + * @buf: buffer for passing information. + * @count: size of the data buffer. + * + * Description: + * If val is in a valid range then set the adapter's topology field and + * issue a lip; if the lip fails reset the topology to the old value. + * + * If the value is not in range log a kernel error message and return an error. + * + * Returns: + * zero if val is in range and lip okay + * non-zero return value from lpfc_issue_lip() + * -EINVAL val out of range + **/ +static ssize_t +lpfc_topology_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val = 0; + int nolip = 0; + const char *val_buf = buf; + int err; + uint32_t prev_val; + u8 sli_family, if_type; + + if (!strncmp(buf, "nolip ", strlen("nolip "))) { + nolip = 1; + val_buf = &buf[strlen("nolip ")]; + } + + if (!isdigit(val_buf[0])) + return -EINVAL; + if (sscanf(val_buf, "%i", &val) != 1) + return -EINVAL; + + if (val >= 0 && val <= 6) { + prev_val = phba->cfg_topology; + if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G && + val == 4) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3113 Loop mode not supported at speed %d\n", + val); + return -EINVAL; + } + /* + * The 'topology' is not a configurable parameter if : + * - persistent topology enabled + * - ASIC_GEN_NUM >= 0xC, with no private loop support + */ + sli_family = bf_get(lpfc_sli_intf_sli_family, + &phba->sli4_hba.sli_intf); + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if ((phba->hba_flag & HBA_PERSISTENT_TOPO || + (!phba->sli4_hba.pc_sli4_params.pls && + (sli_family == LPFC_SLI_INTF_FAMILY_G6 || + if_type == LPFC_SLI_INTF_IF_TYPE_6))) && + val == 4) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3114 Loop mode not supported\n"); + return -EINVAL; + } + phba->cfg_topology = val; + if (nolip) + return strlen(buf); + + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3054 lpfc_topology changed from %d to %d\n", + prev_val, val); + if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) + phba->fc_topology_changed = 1; + err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); + if (err) { + phba->cfg_topology = prev_val; + return -EINVAL; + } else + return strlen(buf); + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0467 lpfc_topology attribute cannot be set to %d, " + "allowed range is [0, 6]\n", + phba->brd_no, val); + return -EINVAL; +} + +lpfc_param_show(topology) +static DEVICE_ATTR_RW(lpfc_topology); + +/** + * lpfc_static_vport_show: Read callback function for + * lpfc_static_vport sysfs file. + * @dev: Pointer to class device object. + * @attr: device attribute structure. + * @buf: Data buffer. + * + * This function is the read call back function for + * lpfc_static_vport sysfs file. The lpfc_static_vport + * sysfs file report the mageability of the vport. + **/ +static ssize_t +lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + if (vport->vport_flag & STATIC_VPORT) + sprintf(buf, "1\n"); + else + sprintf(buf, "0\n"); + + return strlen(buf); +} + +/* + * Sysfs attribute to control the statistical data collection. + */ +static DEVICE_ATTR_RO(lpfc_static_vport); + +/* +# lpfc_link_speed: Link speed selection for initializing the Fibre Channel +# connection. +# Value range is [0,16]. Default value is 0. +*/ +/** + * lpfc_link_speed_store - Set the adapters link speed + * @dev: Pointer to class device. + * @attr: Unused. + * @buf: Data buffer. + * @count: Size of the data buffer. + * + * Description: + * If val is in a valid range then set the adapter's link speed field and + * issue a lip; if the lip fails reset the link speed to the old value. + * + * Notes: + * If the value is not in range log a kernel error message and return an error. + * + * Returns: + * zero if val is in range and lip okay. + * non-zero return value from lpfc_issue_lip() + * -EINVAL val out of range + **/ +static ssize_t +lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val = LPFC_USER_LINK_SPEED_AUTO; + int nolip = 0; + const char *val_buf = buf; + int err; + uint32_t prev_val, if_type; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && + phba->hba_flag & HBA_FORCED_LINK_SPEED) + return -EPERM; + + if (!strncmp(buf, "nolip ", strlen("nolip "))) { + nolip = 1; + val_buf = &buf[strlen("nolip ")]; + } + + if (!isdigit(val_buf[0])) + return -EINVAL; + if (sscanf(val_buf, "%i", &val) != 1) + return -EINVAL; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3055 lpfc_link_speed changed from %d to %d %s\n", + phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); + + if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || + ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || + ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || + ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || + ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || + ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) || + ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) || + ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2879 lpfc_link_speed attribute cannot be set " + "to %d. Speed is not supported by this port.\n", + val); + return -EINVAL; + } + if (val >= LPFC_USER_LINK_SPEED_16G && + phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3112 lpfc_link_speed attribute cannot be set " + "to %d. Speed is not supported in loop mode.\n", + val); + return -EINVAL; + } + + switch (val) { + case LPFC_USER_LINK_SPEED_AUTO: + case LPFC_USER_LINK_SPEED_1G: + case LPFC_USER_LINK_SPEED_2G: + case LPFC_USER_LINK_SPEED_4G: + case LPFC_USER_LINK_SPEED_8G: + case LPFC_USER_LINK_SPEED_16G: + case LPFC_USER_LINK_SPEED_32G: + case LPFC_USER_LINK_SPEED_64G: + prev_val = phba->cfg_link_speed; + phba->cfg_link_speed = val; + if (nolip) + return strlen(buf); + + err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); + if (err) { + phba->cfg_link_speed = prev_val; + return -EINVAL; + } + return strlen(buf); + default: + break; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0469 lpfc_link_speed attribute cannot be set to %d, " + "allowed values are [%s]\n", + val, LPFC_LINK_SPEED_STRING); + return -EINVAL; + +} + +static int lpfc_link_speed = 0; +module_param(lpfc_link_speed, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); +lpfc_param_show(link_speed) + +/** + * lpfc_link_speed_init - Set the adapters link speed + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range then set the adapter's link speed field. + * + * Notes: + * If the value is not in range log a kernel error message, clear the link + * speed and return an error. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_link_speed_init(struct lpfc_hba *phba, int val) +{ + if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3111 lpfc_link_speed of %d cannot " + "support loop mode, setting topology to default.\n", + val); + phba->cfg_topology = 0; + } + + switch (val) { + case LPFC_USER_LINK_SPEED_AUTO: + case LPFC_USER_LINK_SPEED_1G: + case LPFC_USER_LINK_SPEED_2G: + case LPFC_USER_LINK_SPEED_4G: + case LPFC_USER_LINK_SPEED_8G: + case LPFC_USER_LINK_SPEED_16G: + case LPFC_USER_LINK_SPEED_32G: + case LPFC_USER_LINK_SPEED_64G: + phba->cfg_link_speed = val; + return 0; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0405 lpfc_link_speed attribute cannot " + "be set to %d, allowed values are " + "["LPFC_LINK_SPEED_STRING"]\n", val); + phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; + return -EINVAL; + } +} + +static DEVICE_ATTR_RW(lpfc_link_speed); + +/* +# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) +# 1 = aer supported and enabled (default) +# PCIe error reporting is always enabled by the PCI core, so this always +# shows 1. +# +# N.B. Parts of LPFC_ATTR open-coded since some of the underlying +# infrastructure (phba->cfg_aer_support) is gone. +*/ +static uint lpfc_aer_support = 1; +module_param(lpfc_aer_support, uint, S_IRUGO); +MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); +static ssize_t +lpfc_aer_support_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", lpfc_aer_support); +} + +/** + * lpfc_aer_support_store - Set the adapter for aer support + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing enable or disable aer flag. + * @count: unused variable. + * + * Description: + * PCIe error reporting is enabled by the PCI core, so drivers don't need + * to do anything. Retain this interface for backwards compatibility, + * but do nothing. + * + * Returns: + * length of the buf on success + * -EINVAL if val out of range + **/ +static ssize_t +lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val = 0; + + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + dev_info_once(dev, "PCIe error reporting automatically enabled by the PCI core; sysfs write ignored\n"); + return strlen(buf); +} + +static DEVICE_ATTR_RW(lpfc_aer_support); + +/** + * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing flag 1 for aer cleanup state. + * @count: unused variable. + * + * Description: + * If the @buf contains 1, invokes the kernel AER helper routine + * pci_aer_clear_nonfatal_status() to clean up the uncorrectable + * error status register. + * + * Notes: + * + * Returns: + * -EINVAL if the buf does not contain 1 + * -EPERM if the OS cannot clear AER error status, i.e., when platform + * firmware owns the AER Capability + **/ +static ssize_t +lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val, rc = -1; + + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + if (val != 1) + return -EINVAL; + + rc = pci_aer_clear_nonfatal_status(phba->pcidev); + + if (rc == 0) + return strlen(buf); + else + return -EPERM; +} + +static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, + lpfc_aer_cleanup_state); + +/** + * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the string the number of vfs to be enabled. + * @count: unused variable. + * + * Description: + * When this api is called either through user sysfs, the driver shall + * try to enable or disable SR-IOV virtual functions according to the + * following: + * + * If zero virtual function has been enabled to the physical function, + * the driver shall invoke the pci enable virtual function api trying + * to enable the virtual functions. If the nr_vfn provided is greater + * than the maximum supported, the maximum virtual function number will + * be used for invoking the api; otherwise, the nr_vfn provided shall + * be used for invoking the api. If the api call returned success, the + * actual number of virtual functions enabled will be set to the driver + * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver + * cfg_sriov_nr_virtfn remains zero. + * + * If none-zero virtual functions have already been enabled to the + * physical function, as reflected by the driver's cfg_sriov_nr_virtfn, + * -EINVAL will be returned and the driver does nothing; + * + * If the nr_vfn provided is zero and none-zero virtual functions have + * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the + * disabling virtual function api shall be invoded to disable all the + * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to + * zero. Otherwise, if zero virtual function has been enabled, do + * nothing. + * + * Returns: + * length of the buf on success if val is in range the intended mode + * is supported. + * -EINVAL if val out of range or intended mode is not supported. + **/ +static ssize_t +lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct pci_dev *pdev = phba->pcidev; + int val = 0, rc = -EINVAL; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + if (val < 0) + return -EINVAL; + + /* Request disabling virtual functions */ + if (val == 0) { + if (phba->cfg_sriov_nr_virtfn > 0) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } + return strlen(buf); + } + + /* Request enabling virtual functions */ + if (phba->cfg_sriov_nr_virtfn > 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3018 There are %d virtual functions " + "enabled on physical function.\n", + phba->cfg_sriov_nr_virtfn); + return -EEXIST; + } + + if (val <= LPFC_MAX_VFN_PER_PFN) + phba->cfg_sriov_nr_virtfn = val; + else { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3019 Enabling %d virtual functions is not " + "allowed.\n", val); + return -EINVAL; + } + + rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); + if (rc) { + phba->cfg_sriov_nr_virtfn = 0; + rc = -EPERM; + } else + rc = strlen(buf); + + return rc; +} + +LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN, + "Enable PCIe device SR-IOV virtual fn"); + +lpfc_param_show(sriov_nr_virtfn) +static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn); + +/** + * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the string the number of vfs to be enabled. + * @count: unused variable. + * + * Description: + * + * Returns: + * length of the buf on success if val is in range the intended mode + * is supported. + * -EINVAL if val out of range or intended mode is not supported. + **/ +static ssize_t +lpfc_request_firmware_upgrade_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val = 0, rc; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + if (val != 1) + return -EINVAL; + + rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE); + if (rc) + rc = -EPERM; + else + rc = strlen(buf); + return rc; +} + +static int lpfc_req_fw_upgrade; +module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade"); +lpfc_param_show(request_firmware_upgrade) + +/** + * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade + * @phba: lpfc_hba pointer. + * @val: 0 or 1. + * + * Description: + * Set the initial Linux generic firmware upgrade enable or disable flag. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val) +{ + if (val >= 0 && val <= 1) { + phba->cfg_request_firmware_upgrade = val; + return 0; + } + return -EINVAL; +} +static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR, + lpfc_request_firmware_upgrade_show, + lpfc_request_firmware_upgrade_store); + +/** + * lpfc_force_rscn_store + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: unused string + * @count: unused variable. + * + * Description: + * Force the switch to send a RSCN to all other NPorts in our zone + * If we are direct connect pt2pt, build the RSCN command ourself + * and send to the other NPort. Not supported for private loop. + * + * Returns: + * 0 - on success + * -EIO - if command is not sent + **/ +static ssize_t +lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + int i; + + i = lpfc_issue_els_rscn(vport, 0); + if (i) + return -EIO; + return strlen(buf); +} + +/* + * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts + * connected to the HBA. + * + * Value range is any ascii value + */ +static int lpfc_force_rscn; +module_param(lpfc_force_rscn, int, 0644); +MODULE_PARM_DESC(lpfc_force_rscn, + "Force an RSCN to be sent to all remote NPorts"); +lpfc_param_show(force_rscn) + +/** + * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts + * @phba: lpfc_hba pointer. + * @val: unused value. + * + * Returns: + * zero if val saved. + **/ +static int +lpfc_force_rscn_init(struct lpfc_hba *phba, int val) +{ + return 0; +} +static DEVICE_ATTR_RW(lpfc_force_rscn); + +/** + * lpfc_fcp_imax_store + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: string with the number of fast-path FCP interrupts per second. + * @count: unused variable. + * + * Description: + * If val is in a valid range [636,651042], then set the adapter's + * maximum number of fast-path FCP interrupts per second. + * + * Returns: + * length of the buf on success if val is in range the intended mode + * is supported. + * -EINVAL if val out of range or intended mode is not supported. + **/ +static ssize_t +lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_eq_intr_info *eqi; + uint32_t usdelay; + int val = 0, i; + + /* fcp_imax is only valid for SLI4 */ + if (phba->sli_rev != LPFC_SLI_REV4) + return -EINVAL; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + /* + * Value range for the HBA is [5000,5000000] + * The value for each EQ depends on how many EQs are configured. + * Allow value == 0 + */ + if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) + return -EINVAL; + + phba->cfg_auto_imax = (val) ? 0 : 1; + if (phba->cfg_fcp_imax && !val) { + queue_delayed_work(phba->wq, &phba->eq_delay_work, + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); + + for_each_present_cpu(i) { + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); + eqi->icnt = 0; + } + } + + phba->cfg_fcp_imax = (uint32_t)val; + + if (phba->cfg_fcp_imax) + usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; + else + usdelay = 0; + + for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT) + lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, + usdelay); + + return strlen(buf); +} + +/* +# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second +# for the HBA. +# +# Value range is [5,000 to 5,000,000]. Default value is 50,000. +*/ +static int lpfc_fcp_imax = LPFC_DEF_IMAX; +module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_imax, + "Set the maximum number of FCP interrupts per second per HBA"); +lpfc_param_show(fcp_imax) + +/** + * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [636,651042], then initialize the adapter's + * maximum number of fast-path FCP interrupts per second. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_imax = 0; + return 0; + } + + if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) || + (val == 0)) { + phba->cfg_fcp_imax = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3016 lpfc_fcp_imax: %d out of range, using default\n", + val); + phba->cfg_fcp_imax = LPFC_DEF_IMAX; + + return 0; +} + +static DEVICE_ATTR_RW(lpfc_fcp_imax); + +/** + * lpfc_cq_max_proc_limit_store + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: string with the cq max processing limit of cqes + * @count: unused variable. + * + * Description: + * If val is in a valid range, then set value on each cq + * + * Returns: + * The length of the buf: if successful + * -ERANGE: if val is not in the valid range + * -EINVAL: if bad value format or intended mode is not supported. + **/ +static ssize_t +lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_queue *eq, *cq; + unsigned long val; + int i; + + /* cq_max_proc_limit is only valid for SLI4 */ + if (phba->sli_rev != LPFC_SLI_REV4) + return -EINVAL; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT) + return -ERANGE; + + phba->cfg_cq_max_proc_limit = (uint32_t)val; + + /* set the values on the cq's */ + for (i = 0; i < phba->cfg_irq_chann; i++) { + /* Get the EQ corresponding to the IRQ vector */ + eq = phba->sli4_hba.hba_eq_hdl[i].eq; + if (!eq) + continue; + + list_for_each_entry(cq, &eq->child_list, list) + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, + cq->entry_count); + } + + return strlen(buf); +} + +/* + * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an + * itteration of CQ processing. + */ +static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; +module_param(lpfc_cq_max_proc_limit, int, 0644); +MODULE_PARM_DESC(lpfc_cq_max_proc_limit, + "Set the maximum number CQEs processed in an iteration of " + "CQ processing"); +lpfc_param_show(cq_max_proc_limit) + +/* + * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a + * single handler call which should request a polled completion rather + * than re-enabling interrupts. + */ +LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, + LPFC_CQ_MIN_THRESHOLD_TO_POLL, + LPFC_CQ_MAX_THRESHOLD_TO_POLL, + "CQE Processing Threshold to enable Polling"); + +/** + * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit + * @phba: lpfc_hba pointer. + * @val: entry limit + * + * Description: + * If val is in a valid range, then initialize the adapter's maximum + * value. + * + * Returns: + * Always returns 0 for success, even if value not always set to + * requested value. If value out of range or not supported, will fall + * back to default. + **/ +static int +lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val) +{ + phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; + + if (phba->sli_rev != LPFC_SLI_REV4) + return 0; + + if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) { + phba->cfg_cq_max_proc_limit = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0371 lpfc_cq_max_proc_limit: %d out of range, using " + "default\n", + phba->cfg_cq_max_proc_limit); + + return 0; +} + +static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit); + +/** + * lpfc_fcp_cpu_map_show - Display current driver CPU affinity + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vector_map_info *cpup; + int len = 0; + + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->intr_type != MSIX)) + return len; + + switch (phba->cfg_fcp_cpu_map) { + case 0: + len += scnprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: No mapping (%d)\n", + phba->cfg_fcp_cpu_map); + return len; + case 1: + len += scnprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: HBA centric mapping (%d): " + "%d of %d CPUs online from %d possible CPUs\n", + phba->cfg_fcp_cpu_map, num_online_cpus(), + num_present_cpus(), + phba->sli4_hba.num_possible_cpu); + break; + } + + while (phba->sli4_hba.curr_disp_cpu < + phba->sli4_hba.num_possible_cpu) { + cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; + + if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) + len += scnprintf(buf + len, PAGE_SIZE - len, + "CPU %02d not present\n", + phba->sli4_hba.curr_disp_cpu); + else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { + if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) + len += scnprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d hdwq None " + "physid %d coreid %d ht %d ua %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->phys_id, cpup->core_id, + (cpup->flag & LPFC_CPU_MAP_HYPER), + (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); + else + len += scnprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d EQ None hdwq %04d " + "physid %d coreid %d ht %d ua %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->hdwq, cpup->phys_id, + cpup->core_id, + (cpup->flag & LPFC_CPU_MAP_HYPER), + (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); + } else { + if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) + len += scnprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d hdwq None " + "physid %d coreid %d ht %d ua %d IRQ %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->phys_id, + cpup->core_id, + (cpup->flag & LPFC_CPU_MAP_HYPER), + (cpup->flag & LPFC_CPU_MAP_UNASSIGN), + lpfc_get_irq(cpup->eq)); + else + len += scnprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d EQ %04d hdwq %04d " + "physid %d coreid %d ht %d ua %d IRQ %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->eq, cpup->hdwq, cpup->phys_id, + cpup->core_id, + (cpup->flag & LPFC_CPU_MAP_HYPER), + (cpup->flag & LPFC_CPU_MAP_UNASSIGN), + lpfc_get_irq(cpup->eq)); + } + + phba->sli4_hba.curr_disp_cpu++; + + /* display max number of CPUs keeping some margin */ + if (phba->sli4_hba.curr_disp_cpu < + phba->sli4_hba.num_possible_cpu && + (len >= (PAGE_SIZE - 64))) { + len += scnprintf(buf + len, + PAGE_SIZE - len, "more...\n"); + break; + } + } + + if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu) + phba->sli4_hba.curr_disp_cpu = 0; + + return len; +} + +/** + * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL - Not implemented yet. + **/ +static ssize_t +lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + return -EINVAL; +} + +/* +# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors +# for the HBA. +# +# Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1). +# 0 - Do not affinitze IRQ vectors +# 1 - Affintize HBA vectors with respect to each HBA +# (start with CPU0 for each HBA) +# This also defines how Hardware Queues are mapped to specific CPUs. +*/ +static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP; +module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_cpu_map, + "Defines how to map CPUs to IRQ vectors per HBA"); + +/** + * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0-2], then affinitze the adapter's + * MSIX vectors. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_cpu_map = 0; + return 0; + } + + if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { + phba->cfg_fcp_cpu_map = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3326 lpfc_fcp_cpu_map: %d out of range, using " + "default\n", val); + phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; + + return 0; +} + +static DEVICE_ATTR_RW(lpfc_fcp_cpu_map); + +/* +# lpfc_fcp_class: Determines FC class to use for the FCP protocol. +# Value range is [2,3]. Default value is 3. +*/ +LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, + "Select Fibre Channel class of service for FCP sequences"); + +/* +# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range +# is [0,1]. Default value is 1. +*/ +LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1, + "Use ADISC on rediscovery to authenticate FCP devices"); + +/* +# lpfc_first_burst_size: First burst size to use on the NPorts +# that support first burst. +# Value range is [0,65536]. Default value is 0. +*/ +LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, + "First burst size for Targets that support first burst"); + +/* +* lpfc_nvmet_fb_size: NVME Target mode supported first burst size. +* When the driver is configured as an NVME target, this value is +* communicated to the NVME initiator in the PRLI response. It is +* used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support +* parameters are set and the target is sending the PRLI RSP. +* Parameter supported on physical port only - no NPIV support. +* Value range is [0,65536]. Default value is 0. +*/ +LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, + "NVME Target mode first burst size in 512B increments."); + +/* + * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. + * For the Initiator (I), enabling this parameter means that an NVMET + * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be + * processed by the initiator for subsequent NVME FCP IO. + * Currently, this feature is not supported on the NVME target + * Value range is [0,1]. Default value is 0 (disabled). + */ +LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, + "Enable First Burst feature for NVME Initiator."); + +/* +# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue +# depth. Default value is 0. When the value of this parameter is zero the +# SCSI command completion time is not used for controlling I/O queue depth. When +# the parameter is set to a non-zero value, the I/O queue depth is controlled +# to limit the I/O completion time to the parameter value. +# The value is set in milliseconds. +*/ +LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000, + "Use command completion time to control queue depth"); + +lpfc_vport_param_show(max_scsicmpl_time); +static int +lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp, *next_ndlp; + + if (val == vport->cfg_max_scsicmpl_time) + return 0; + if ((val < 0) || (val > 60000)) + return -EINVAL; + vport->cfg_max_scsicmpl_time = val; + + spin_lock_irq(shost->host_lock); + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + continue; + ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; + } + spin_unlock_irq(shost->host_lock); + return 0; +} +lpfc_vport_param_store(max_scsicmpl_time); +static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time); + +/* +# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value +# range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); + +/* +# lpfc_xri_rebalancing: enable or disable XRI rebalancing feature +# range is [0,1]. Default value is 1. +*/ +LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing"); + +/* + * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds + * range is [0,1]. Default value is 0. + * For [0], FCP commands are issued to Work Queues based on upper layer + * hardware queue index. + * For [1], FCP commands are issued to a Work Queue associated with the + * current CPU. + * + * LPFC_FCP_SCHED_BY_HDWQ == 0 + * LPFC_FCP_SCHED_BY_CPU == 1 + * + * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu + * affinity for FCP/NVME I/Os through Work Queues associated with the current + * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os + * through WQs will be used. + */ +LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU, + LPFC_FCP_SCHED_BY_HDWQ, + LPFC_FCP_SCHED_BY_CPU, + "Determine scheduling algorithm for " + "issuing commands [0] - Hardware Queue, [1] - Current CPU"); + +/* + * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN + * range is [0,1]. Default value is 0. + * For [0], GID_FT is used for NameServer queries after RSCN (default) + * For [1], GID_PT is used for NameServer queries after RSCN + * + */ +LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT, + LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT, + "Determine algorithm NameServer queries after RSCN " + "[0] - GID_FT, [1] - GID_PT"); + +/* +# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior +# range is [0,1]. Default value is 0. +# For [0], bus reset issues target reset to ALL devices +# For [1], bus reset issues target reset to non-FCP2 devices +*/ +LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for " + "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset"); + + +/* +# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing +# cr_delay (msec) or cr_count outstanding commands. cr_delay can take +# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay +# is 0. Default value of cr_count is 1. The cr_count feature is disabled if +# cr_delay is set to 0. +*/ +LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an " + "interrupt response is generated"); + +LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " + "interrupt response is generated"); + +/* +# lpfc_multi_ring_support: Determines how many rings to spread available +# cmd/rsp IOCB entries across. +# Value range is [1,2]. Default value is 1. +*/ +LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " + "SLI rings to spread IOCB entries across"); + +/* +# lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this +# identifies what rctl value to configure the additional ring for. +# Value range is [1,0xff]. Default value is 4 (Unsolicated Data). +*/ +LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, + 255, "Identifies RCTL for additional ring configuration"); + +/* +# lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this +# identifies what type value to configure the additional ring for. +# Value range is [1,0xff]. Default value is 5 (LLC/SNAP). +*/ +LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, + 255, "Identifies TYPE for additional ring configuration"); + +/* +# lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN +# 0 = SmartSAN functionality disabled (default) +# 1 = SmartSAN functionality enabled +# This parameter will override the value of lpfc_fdmi_on module parameter. +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); + +/* +# lpfc_fdmi_on: Controls FDMI support. +# 0 No FDMI support +# 1 Traditional FDMI support (default) +# Traditional FDMI support means the driver will assume FDMI-2 support; +# however, if that fails, it will fallback to FDMI-1. +# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. +# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of +# lpfc_fdmi_on. +# Value range [0,1]. Default value is 1. +*/ +LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); + +/* +# Specifies the maximum number of ELS cmds we can have outstanding (for +# discovery). Value range is [1,64]. Default value = 32. +*/ +LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " + "during discovery"); + +/* +# lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that +# will be scanned by the SCSI midlayer when sequential scanning is +# used; and is also the highest LUN ID allowed when the SCSI midlayer +# parses REPORT_LUN responses. The lpfc driver has no LUN count or +# LUN ID limit, but the SCSI midlayer requires this field for the uses +# above. The lpfc driver limits the default value to 255 for two reasons. +# As it bounds the sequential scan loop, scanning for thousands of luns +# on a target can take minutes of wall clock time. Additionally, +# there are FC targets, such as JBODs, that only recognize 8-bits of +# LUN ID. When they receive a value greater than 8 bits, they chop off +# the high order bits. In other words, they see LUN IDs 0, 256, 512, +# and so on all as LUN ID 0. This causes the linux kernel, which sees +# valid responses at each of the LUN IDs, to believe there are multiple +# devices present, when in fact, there is only 1. +# A customer that is aware of their target behaviors, and the results as +# indicated above, is welcome to increase the lpfc_max_luns value. +# As mentioned, this value is not used by the lpfc driver, only the +# SCSI midlayer. +# Value range is [0,65535]. Default value is 255. +# NOTE: The SCSI layer might probe all allowed LUN on some old targets. +*/ +LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); + +/* +# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. +# Value range is [1,255], default value is 10. +*/ +LPFC_ATTR_RW(poll_tmo, 10, 1, 255, + "Milliseconds driver will wait between polling FCP ring"); + +/* +# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands +# to complete in seconds. Value range is [5,180], default value is 60. +*/ +LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, + "Maximum time to wait for task management commands to complete"); +/* +# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that +# support this feature +# 0 = MSI disabled +# 1 = MSI enabled +# 2 = MSI-X enabled (default) +# Value range is [0,2]. Default value is 2. +*/ +LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " + "MSI-X (2), if possible"); + +/* + * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs + * + * 0 = NVME OAS disabled + * 1 = NVME OAS enabled + * + * Value range is [0,1]. Default value is 0. + */ +LPFC_ATTR_RW(nvme_oas, 0, 0, 1, + "Use OAS bit on NVME IOs"); + +/* + * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs + * + * 0 = Put NVME Command in SGL + * 1 = Embed NVME Command in WQE (unless G7) + * 2 = Embed NVME Command in WQE (force) + * + * Value range is [0,2]. Default value is 1. + */ +LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, + "Embed NVME Command in WQE"); + +/* + * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues + * the driver will advertise it supports to the SCSI layer. + * + * 0 = Set nr_hw_queues by the number of CPUs or HW queues. + * 1,256 = Manually specify nr_hw_queue value to be advertised, + * + * Value range is [0,256]. Default value is 8. + */ +LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, + LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, + "Set the number of SCSI Queues advertised"); + +/* + * lpfc_hdw_queue: Set the number of Hardware Queues the driver + * will advertise it supports to the NVME and SCSI layers. This also + * will map to the number of CQ/WQ pairs the driver will create. + * + * The NVME Layer will try to create this many, plus 1 administrative + * hardware queue. The administrative queue will always map to WQ 0 + * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. + * + * 0 = Configure the number of hdw queues to the number of active CPUs. + * 1,256 = Manually specify how many hdw queues to use. + * + * Value range is [0,256]. Default value is 0. + */ +LPFC_ATTR_R(hdw_queue, + LPFC_HBA_HDWQ_DEF, + LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, + "Set the number of I/O Hardware Queues"); + +#if IS_ENABLED(CONFIG_X86) +/** + * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on + * irq_chann_mode + * @phba: Pointer to HBA context object. + **/ +static void +lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba) +{ + unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE; + const struct cpumask *sibling_mask; + struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask; + + cpumask_clear(aff_mask); + + if (phba->irq_chann_mode == NUMA_MODE) { + /* Check if we're a NUMA architecture */ + numa_node = dev_to_node(&phba->pcidev->dev); + if (numa_node == NUMA_NO_NODE) { + phba->irq_chann_mode = NORMAL_MODE; + return; + } + } + + for_each_possible_cpu(cpu) { + switch (phba->irq_chann_mode) { + case NUMA_MODE: + if (cpu_to_node(cpu) == numa_node) + cpumask_set_cpu(cpu, aff_mask); + break; + case NHT_MODE: + sibling_mask = topology_sibling_cpumask(cpu); + first_cpu = cpumask_first(sibling_mask); + if (first_cpu < nr_cpu_ids) + cpumask_set_cpu(first_cpu, aff_mask); + break; + default: + break; + } + } +} +#endif + +static void +lpfc_assign_default_irq_chann(struct lpfc_hba *phba) +{ +#if IS_ENABLED(CONFIG_X86) + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + /* If AMD architecture, then default is NUMA_MODE */ + phba->irq_chann_mode = NUMA_MODE; + break; + case X86_VENDOR_INTEL: + /* If Intel architecture, then default is no hyperthread mode */ + phba->irq_chann_mode = NHT_MODE; + break; + default: + phba->irq_chann_mode = NORMAL_MODE; + break; + } + lpfc_cpumask_irq_mode_init(phba); +#else + phba->irq_chann_mode = NORMAL_MODE; +#endif +} + +/* + * lpfc_irq_chann: Set the number of IRQ vectors that are available + * for Hardware Queues to utilize. This also will map to the number + * of EQ / MSI-X vectors the driver will create. This should never be + * more than the number of Hardware Queues + * + * 0 = Configure number of IRQ Channels to: + * if AMD architecture, number of CPUs on HBA's NUMA node + * if Intel architecture, number of physical CPUs. + * otherwise, number of active CPUs. + * [1,256] = Manually specify how many IRQ Channels to use. + * + * Value range is [0,256]. Default value is [0]. + */ +static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF; +module_param(lpfc_irq_chann, uint, 0444); +MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate"); + +/* lpfc_irq_chann_init - Set the hba irq_chann initial value + * @phba: lpfc_hba pointer. + * @val: contains the initial value + * + * Description: + * Validates the initial value is within range and assigns it to the + * adapter. If not in range, an error message is posted and the + * default value is assigned. + * + * Returns: + * zero if value is in range and is set + * -EINVAL if value was out of range + **/ +static int +lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) +{ + const struct cpumask *aff_mask; + + if (phba->cfg_use_msi != 2) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8532 use_msi = %u ignoring cfg_irq_numa\n", + phba->cfg_use_msi); + phba->irq_chann_mode = NORMAL_MODE; + phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; + return 0; + } + + /* Check if default setting was passed */ + if (val == LPFC_IRQ_CHANN_DEF && + phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF && + phba->sli_rev == LPFC_SLI_REV4) + lpfc_assign_default_irq_chann(phba); + + if (phba->irq_chann_mode != NORMAL_MODE) { + aff_mask = &phba->sli4_hba.irq_aff_mask; + + if (cpumask_empty(aff_mask)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8533 Could not identify CPUS for " + "mode %d, ignoring\n", + phba->irq_chann_mode); + phba->irq_chann_mode = NORMAL_MODE; + phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; + } else { + phba->cfg_irq_chann = cpumask_weight(aff_mask); + + /* If no hyperthread mode, then set hdwq count to + * aff_mask weight as well + */ + if (phba->irq_chann_mode == NHT_MODE) + phba->cfg_hdw_queue = phba->cfg_irq_chann; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8543 lpfc_irq_chann set to %u " + "(mode: %d)\n", phba->cfg_irq_chann, + phba->irq_chann_mode); + } + } else { + if (val > LPFC_IRQ_CHANN_MAX) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8545 lpfc_irq_chann attribute cannot " + "be set to %u, allowed range is " + "[%u,%u]\n", + val, + LPFC_IRQ_CHANN_MIN, + LPFC_IRQ_CHANN_MAX); + phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; + return -EINVAL; + } + if (phba->sli_rev == LPFC_SLI_REV4) { + phba->cfg_irq_chann = val; + } else { + phba->cfg_irq_chann = 2; + phba->cfg_hdw_queue = 1; + } + } + + return 0; +} + +/** + * lpfc_irq_chann_show - Display value of irq_chann + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains a string with the list sizes + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann); +} + +static DEVICE_ATTR_RO(lpfc_irq_chann); + +/* +# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. +# 0 = HBA resets disabled +# 1 = HBA resets enabled (default) +# 2 = HBA reset via PCI bus reset enabled +# Value range is [0,2]. Default value is 1. +*/ +LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver."); + +/* +# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. +# 0 = HBA Heartbeat disabled +# 1 = HBA Heartbeat enabled (default) +# Value range is [0,1]. Default value is 1. +*/ +LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); + +/* +# lpfc_EnableXLane: Enable Express Lane Feature +# 0x0 Express Lane Feature disabled +# 0x1 Express Lane Feature enabled +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); + +/* +# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature +# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) +# Value range is [0x0,0x7f]. Default value is 0 +*/ +LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); + +/* +# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) +# 0 = BlockGuard disabled (default) +# 1 = BlockGuard enabled +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); + +/* +# lpfc_prot_mask: +# - Bit mask of host protection capabilities used to register with the +# SCSI mid-layer +# - Only meaningful if BG is turned on (lpfc_enable_bg=1). +# - Allows you to ultimately specify which profiles to use +# - Default will result in registering capabilities for all profiles. +# - SHOST_DIF_TYPE1_PROTECTION 1 +# HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection +# - SHOST_DIX_TYPE0_PROTECTION 8 +# HBA supports DIX Type 0: Host to HBA protection only +# - SHOST_DIX_TYPE1_PROTECTION 16 +# HBA supports DIX Type 1: Host to HBA Type 1 protection +# +*/ +LPFC_ATTR(prot_mask, + (SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION), + 0, + (SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION), + "T10-DIF host protection capabilities mask"); + +/* +# lpfc_prot_guard: +# - Bit mask of protection guard types to register with the SCSI mid-layer +# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum +# - Allows you to ultimately specify which profiles to use +# - Default will result in registering capabilities for all guard types +# +*/ +LPFC_ATTR(prot_guard, + SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP, + "T10-DIF host protection guard type"); + +/* + * Delay initial NPort discovery when Clean Address bit is cleared in + * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. + * This parameter can have value 0 or 1. + * When this parameter is set to 0, no delay is added to the initial + * discovery. + * When this parameter is set to non-zero value, initial Nport discovery is + * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion + * when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Default value is 0. + */ +LPFC_ATTR(delay_discovery, 0, 0, 1, + "Delay NPort discovery when Clean Address bit is cleared."); + +/* + * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count + * This value can be set to values between 64 and 4096. The default value + * is 64, but may be increased to allow for larger Max I/O sizes. The scsi + * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE). + * Because of the additional overhead involved in setting up T10-DIF, + * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 + * and will be limited to 512 if BlockGuard is enabled under SLI3. + */ +static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; +module_param(lpfc_sg_seg_cnt, uint, 0444); +MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count"); + +/** + * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes + * configured for the adapter + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains a string with the list sizes + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int len; + + len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", + phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); + + len += scnprintf(buf + len, PAGE_SIZE - len, + "Cfg: %d SCSI: %d NVME: %d\n", + phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, + phba->cfg_nvme_seg_cnt); + return len; +} + +static DEVICE_ATTR_RO(lpfc_sg_seg_cnt); + +/** + * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value + * @phba: lpfc_hba pointer. + * @val: contains the initial value + * + * Description: + * Validates the initial value is within range and assigns it to the + * adapter. If not in range, an error message is posted and the + * default value is assigned. + * + * Returns: + * zero if value is in range and is set + * -EINVAL if value was out of range + **/ +static int +lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val) +{ + if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) { + phba->cfg_sg_seg_cnt = val; + return 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, " + "allowed range is [%d, %d]\n", + val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT); + phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; + return -EINVAL; +} + +/* + * lpfc_enable_mds_diags: Enable MDS Diagnostics + * 0 = MDS Diagnostics disabled (default) + * 1 = MDS Diagnostics enabled + * Value range is [0,1]. Default value is 0. + */ +LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); + +/* + * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size + * 0 = Disable firmware logging (default) + * [1-4] = Multiple of 1/4th Mb of host memory for FW logging + * Value range [0..4]. Default value is 0 + */ +LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); +lpfc_param_show(ras_fwlog_buffsize); + +static ssize_t +lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) +{ + int ret = 0; + enum ras_state state; + + if (!lpfc_rangecheck(val, 0, 4)) + return -EINVAL; + + if (phba->cfg_ras_fwlog_buffsize == val) + return 0; + + if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) + return -EINVAL; + + spin_lock_irq(&phba->hbalock); + state = phba->ras_fwlog.state; + spin_unlock_irq(&phba->hbalock); + + if (state == REG_INPROGRESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " + "registration is in progress\n"); + return -EBUSY; + } + + /* For disable logging: stop the logs and free the DMA. + * For ras_fwlog_buffsize size change we still need to free and + * reallocate the DMA in lpfc_sli4_ras_fwlog_init. + */ + phba->cfg_ras_fwlog_buffsize = val; + if (state == ACTIVE) { + lpfc_ras_stop_fwlog(phba); + lpfc_sli4_ras_dma_free(phba); + } + + lpfc_sli4_ras_init(phba); + if (phba->ras_fwlog.ras_enabled) + ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, + LPFC_RAS_ENABLE_LOGGING); + return ret; +} + +lpfc_param_store(ras_fwlog_buffsize); +static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize); + +/* + * lpfc_ras_fwlog_level: Firmware logging verbosity level + * Valid only if firmware logging is enabled + * 0(Least Verbosity) 4 (most verbosity) + * Value range is [0..4]. Default value is 0 + */ +LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level"); + +/* + * lpfc_ras_fwlog_func: Firmware logging enabled on function number + * Default function which has RAS support : 0 + * Value Range is [0..7]. + * FW logging is a global action and enablement is via a specific + * port. + */ +LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function"); + +/* + * lpfc_enable_bbcr: Enable BB Credit Recovery + * 0 = BB Credit Recovery disabled + * 1 = BB Credit Recovery enabled (default) + * Value range is [0,1]. Default value is 1. + */ +LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); + +/* Signaling module parameters */ +int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ +module_param(lpfc_fabric_cgn_frequency, int, 0444); +MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq"); + +unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ +module_param(lpfc_acqe_cgn_frequency, byte, 0444); +MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); + +int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ +module_param(lpfc_use_cgn_signal, int, 0444); +MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available"); + +/* + * lpfc_enable_dpp: Enable DPP on G7 + * 0 = DPP on G7 disabled + * 1 = DPP on G7 enabled (default) + * Value range is [0,1]. Default value is 1. + */ +LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); + +/* + * lpfc_enable_mi: Enable FDMI MIB + * 0 = disabled + * 1 = enabled (default) + * Value range is [0,1]. + */ +LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI"); + +/* + * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if + * either vmid_app_header or vmid_priority_tagging is enabled. + * 4 - 255 = vmid support enabled for 4-255 VMs + * Value range is [4,255]. + */ +LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID, + "Maximum number of VMs supported"); + +/* + * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours + * 0 = Timeout is disabled + * Value range is [0,24]. + */ +LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24, + "Inactivity timeout in hours"); + +/* + * lpfc_vmid_app_header: Enable App Header VMID support + * 0 = Support is disabled (default) + * 1 = Support is enabled + * Value range is [0,1]. + */ +LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE, + LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE, + "Enable App Header VMID support"); + +/* + * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support + * 0 = Support is disabled (default) + * 1 = Allow supported targets only + * 2 = Allow all targets + * Value range is [0,2]. + */ +LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE, + LPFC_VMID_PRIO_TAG_DISABLE, + LPFC_VMID_PRIO_TAG_ALL_TARGETS, + "Enable Priority Tagging VMID support"); + +static struct attribute *lpfc_hba_attrs[] = { + &dev_attr_nvme_info.attr, + &dev_attr_scsi_stat.attr, + &dev_attr_bg_info.attr, + &dev_attr_bg_guard_err.attr, + &dev_attr_bg_apptag_err.attr, + &dev_attr_bg_reftag_err.attr, + &dev_attr_info.attr, + &dev_attr_serialnum.attr, + &dev_attr_modeldesc.attr, + &dev_attr_modelname.attr, + &dev_attr_programtype.attr, + &dev_attr_portnum.attr, + &dev_attr_fwrev.attr, + &dev_attr_hdw.attr, + &dev_attr_option_rom_version.attr, + &dev_attr_link_state.attr, + &dev_attr_num_discovered_ports.attr, + &dev_attr_lpfc_drvr_version.attr, + &dev_attr_lpfc_enable_fip.attr, + &dev_attr_lpfc_temp_sensor.attr, + &dev_attr_lpfc_log_verbose.attr, + &dev_attr_lpfc_lun_queue_depth.attr, + &dev_attr_lpfc_tgt_queue_depth.attr, + &dev_attr_lpfc_hba_queue_depth.attr, + &dev_attr_lpfc_peer_port_login.attr, + &dev_attr_lpfc_nodev_tmo.attr, + &dev_attr_lpfc_devloss_tmo.attr, + &dev_attr_lpfc_enable_fc4_type.attr, + &dev_attr_lpfc_fcp_class.attr, + &dev_attr_lpfc_use_adisc.attr, + &dev_attr_lpfc_first_burst_size.attr, + &dev_attr_lpfc_ack0.attr, + &dev_attr_lpfc_xri_rebalancing.attr, + &dev_attr_lpfc_topology.attr, + &dev_attr_lpfc_scan_down.attr, + &dev_attr_lpfc_link_speed.attr, + &dev_attr_lpfc_fcp_io_sched.attr, + &dev_attr_lpfc_ns_query.attr, + &dev_attr_lpfc_fcp2_no_tgt_reset.attr, + &dev_attr_lpfc_cr_delay.attr, + &dev_attr_lpfc_cr_count.attr, + &dev_attr_lpfc_multi_ring_support.attr, + &dev_attr_lpfc_multi_ring_rctl.attr, + &dev_attr_lpfc_multi_ring_type.attr, + &dev_attr_lpfc_fdmi_on.attr, + &dev_attr_lpfc_enable_SmartSAN.attr, + &dev_attr_lpfc_max_luns.attr, + &dev_attr_lpfc_enable_npiv.attr, + &dev_attr_lpfc_fcf_failover_policy.attr, + &dev_attr_lpfc_enable_rrq.attr, + &dev_attr_lpfc_fcp_wait_abts_rsp.attr, + &dev_attr_nport_evt_cnt.attr, + &dev_attr_board_mode.attr, + &dev_attr_lpfc_xcvr_data.attr, + &dev_attr_max_vpi.attr, + &dev_attr_used_vpi.attr, + &dev_attr_max_rpi.attr, + &dev_attr_used_rpi.attr, + &dev_attr_max_xri.attr, + &dev_attr_used_xri.attr, + &dev_attr_npiv_info.attr, + &dev_attr_issue_reset.attr, + &dev_attr_lpfc_poll.attr, + &dev_attr_lpfc_poll_tmo.attr, + &dev_attr_lpfc_task_mgmt_tmo.attr, + &dev_attr_lpfc_use_msi.attr, + &dev_attr_lpfc_nvme_oas.attr, + &dev_attr_lpfc_nvme_embed_cmd.attr, + &dev_attr_lpfc_fcp_imax.attr, + &dev_attr_lpfc_force_rscn.attr, + &dev_attr_lpfc_cq_poll_threshold.attr, + &dev_attr_lpfc_cq_max_proc_limit.attr, + &dev_attr_lpfc_fcp_cpu_map.attr, + &dev_attr_lpfc_fcp_mq_threshold.attr, + &dev_attr_lpfc_hdw_queue.attr, + &dev_attr_lpfc_irq_chann.attr, + &dev_attr_lpfc_suppress_rsp.attr, + &dev_attr_lpfc_nvmet_mrq.attr, + &dev_attr_lpfc_nvmet_mrq_post.attr, + &dev_attr_lpfc_nvme_enable_fb.attr, + &dev_attr_lpfc_nvmet_fb_size.attr, + &dev_attr_lpfc_enable_bg.attr, + &dev_attr_lpfc_enable_hba_reset.attr, + &dev_attr_lpfc_enable_hba_heartbeat.attr, + &dev_attr_lpfc_EnableXLane.attr, + &dev_attr_lpfc_XLanePriority.attr, + &dev_attr_lpfc_xlane_lun.attr, + &dev_attr_lpfc_xlane_tgt.attr, + &dev_attr_lpfc_xlane_vpt.attr, + &dev_attr_lpfc_xlane_lun_state.attr, + &dev_attr_lpfc_xlane_lun_status.attr, + &dev_attr_lpfc_xlane_priority.attr, + &dev_attr_lpfc_sg_seg_cnt.attr, + &dev_attr_lpfc_max_scsicmpl_time.attr, + &dev_attr_lpfc_aer_support.attr, + &dev_attr_lpfc_aer_state_cleanup.attr, + &dev_attr_lpfc_sriov_nr_virtfn.attr, + &dev_attr_lpfc_req_fw_upgrade.attr, + &dev_attr_lpfc_suppress_link_up.attr, + &dev_attr_iocb_hw.attr, + &dev_attr_pls.attr, + &dev_attr_pt.attr, + &dev_attr_txq_hw.attr, + &dev_attr_txcmplq_hw.attr, + &dev_attr_lpfc_sriov_hw_max_virtfn.attr, + &dev_attr_protocol.attr, + &dev_attr_lpfc_xlane_supported.attr, + &dev_attr_lpfc_enable_mds_diags.attr, + &dev_attr_lpfc_ras_fwlog_buffsize.attr, + &dev_attr_lpfc_ras_fwlog_level.attr, + &dev_attr_lpfc_ras_fwlog_func.attr, + &dev_attr_lpfc_enable_bbcr.attr, + &dev_attr_lpfc_enable_dpp.attr, + &dev_attr_lpfc_enable_mi.attr, + &dev_attr_cmf_info.attr, + &dev_attr_lpfc_max_vmid.attr, + &dev_attr_lpfc_vmid_inactivity_timeout.attr, + &dev_attr_lpfc_vmid_app_header.attr, + &dev_attr_lpfc_vmid_priority_tagging.attr, + NULL, +}; + +static const struct attribute_group lpfc_hba_attr_group = { + .attrs = lpfc_hba_attrs +}; + +const struct attribute_group *lpfc_hba_groups[] = { + &lpfc_hba_attr_group, + NULL +}; + +static struct attribute *lpfc_vport_attrs[] = { + &dev_attr_info.attr, + &dev_attr_link_state.attr, + &dev_attr_num_discovered_ports.attr, + &dev_attr_lpfc_drvr_version.attr, + &dev_attr_lpfc_log_verbose.attr, + &dev_attr_lpfc_lun_queue_depth.attr, + &dev_attr_lpfc_tgt_queue_depth.attr, + &dev_attr_lpfc_nodev_tmo.attr, + &dev_attr_lpfc_devloss_tmo.attr, + &dev_attr_lpfc_hba_queue_depth.attr, + &dev_attr_lpfc_peer_port_login.attr, + &dev_attr_lpfc_restrict_login.attr, + &dev_attr_lpfc_fcp_class.attr, + &dev_attr_lpfc_use_adisc.attr, + &dev_attr_lpfc_first_burst_size.attr, + &dev_attr_lpfc_max_luns.attr, + &dev_attr_nport_evt_cnt.attr, + &dev_attr_npiv_info.attr, + &dev_attr_lpfc_enable_da_id.attr, + &dev_attr_lpfc_max_scsicmpl_time.attr, + &dev_attr_lpfc_static_vport.attr, + &dev_attr_cmf_info.attr, + NULL, +}; + +static const struct attribute_group lpfc_vport_attr_group = { + .attrs = lpfc_vport_attrs +}; + +const struct attribute_group *lpfc_vport_groups[] = { + &lpfc_vport_attr_group, + NULL +}; + +/** + * sysfs_ctlreg_write - Write method for writing to ctlreg + * @filp: open sysfs file + * @kobj: kernel kobject that contains the kernel class device. + * @bin_attr: kernel attributes passed to us. + * @buf: contains the data to be written to the adapter IOREG space. + * @off: offset into buffer to beginning of data. + * @count: bytes to transfer. + * + * Description: + * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. + * Uses the adapter io control registers to send buf contents to the adapter. + * + * Returns: + * -ERANGE off and count combo out of range + * -EINVAL off, count or buff address invalid + * -EPERM adapter is offline + * value of count, buf contents written + **/ +static ssize_t +sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + size_t buf_off; + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (phba->sli_rev >= LPFC_SLI_REV4) + return -EPERM; + + if ((off + count) > FF_REG_AREA_SIZE) + return -ERANGE; + + if (count <= LPFC_REG_WRITE_KEY_SIZE) + return 0; + + if (off % 4 || count % 4 || (unsigned long)buf % 4) + return -EINVAL; + + /* This is to protect HBA registers from accidental writes. */ + if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) + return -EINVAL; + + if (!(vport->fc_flag & FC_OFFLINE_MODE)) + return -EPERM; + + spin_lock_irq(&phba->hbalock); + for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; + buf_off += sizeof(uint32_t)) + writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), + phba->ctrl_regs_memmap_p + off + buf_off); + + spin_unlock_irq(&phba->hbalock); + + return count; +} + +/** + * sysfs_ctlreg_read - Read method for reading from ctlreg + * @filp: open sysfs file + * @kobj: kernel kobject that contains the kernel class device. + * @bin_attr: kernel attributes passed to us. + * @buf: if successful contains the data from the adapter IOREG space. + * @off: offset into buffer to beginning of data. + * @count: bytes to transfer. + * + * Description: + * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. + * Uses the adapter io control registers to read data into buf. + * + * Returns: + * -ERANGE off and count combo out of range + * -EINVAL off, count or buff address invalid + * value of count, buf contents read + **/ +static ssize_t +sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + size_t buf_off; + uint32_t * tmp_ptr; + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (phba->sli_rev >= LPFC_SLI_REV4) + return -EPERM; + + if (off > FF_REG_AREA_SIZE) + return -ERANGE; + + if ((off + count) > FF_REG_AREA_SIZE) + count = FF_REG_AREA_SIZE - off; + + if (count == 0) return 0; + + if (off % 4 || count % 4 || (unsigned long)buf % 4) + return -EINVAL; + + spin_lock_irq(&phba->hbalock); + + for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { + tmp_ptr = (uint32_t *)(buf + buf_off); + *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); + } + + spin_unlock_irq(&phba->hbalock); + + return count; +} + +static struct bin_attribute sysfs_ctlreg_attr = { + .attr = { + .name = "ctlreg", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 256, + .read = sysfs_ctlreg_read, + .write = sysfs_ctlreg_write, +}; + +/** + * sysfs_mbox_write - Write method for writing information via mbox + * @filp: open sysfs file + * @kobj: kernel kobject that contains the kernel class device. + * @bin_attr: kernel attributes passed to us. + * @buf: contains the data to be written to sysfs mbox. + * @off: offset into buffer to beginning of data. + * @count: bytes to transfer. + * + * Description: + * Deprecated function. All mailbox access from user space is performed via the + * bsg interface. + * + * Returns: + * -EPERM operation not permitted + **/ +static ssize_t +sysfs_mbox_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + return -EPERM; +} + +/** + * sysfs_mbox_read - Read method for reading information via mbox + * @filp: open sysfs file + * @kobj: kernel kobject that contains the kernel class device. + * @bin_attr: kernel attributes passed to us. + * @buf: contains the data to be read from sysfs mbox. + * @off: offset into buffer to beginning of data. + * @count: bytes to transfer. + * + * Description: + * Deprecated function. All mailbox access from user space is performed via the + * bsg interface. + * + * Returns: + * -EPERM operation not permitted + **/ +static ssize_t +sysfs_mbox_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + return -EPERM; +} + +static struct bin_attribute sysfs_mbox_attr = { + .attr = { + .name = "mbox", + .mode = S_IRUSR | S_IWUSR, + }, + .size = MAILBOX_SYSFS_MAX, + .read = sysfs_mbox_read, + .write = sysfs_mbox_write, +}; + +/** + * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries + * @vport: address of lpfc vport structure. + * + * Return codes: + * zero on success + * error return code from sysfs_create_bin_file() + **/ +int +lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + int error; + + /* Virtual ports do not need ctrl_reg and mbox */ + if (vport->port_type == LPFC_NPIV_PORT) + return 0; + + error = sysfs_create_bin_file(&shost->shost_dev.kobj, + &sysfs_ctlreg_attr); + if (error) + goto out; + + error = sysfs_create_bin_file(&shost->shost_dev.kobj, + &sysfs_mbox_attr); + if (error) + goto out_remove_ctlreg_attr; + + return 0; +out_remove_ctlreg_attr: + sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); +out: + return error; +} + +/** + * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries + * @vport: address of lpfc vport structure. + **/ +void +lpfc_free_sysfs_attr(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* Virtual ports do not need ctrl_reg and mbox */ + if (vport->port_type == LPFC_NPIV_PORT) + return; + sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); + sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); +} + +/* + * Dynamic FC Host Attributes Support + */ + +/** + * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_symbolic_name(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + + lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), + sizeof fc_host_symbolic_name(shost)); +} + +/** + * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_port_id(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + + /* note: fc_myDID already in cpu endianness */ + fc_host_port_id(shost) = vport->fc_myDID; +} + +/** + * lpfc_get_host_port_type - Set the value of the scsi host port type + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_port_type(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + spin_lock_irq(shost->host_lock); + + if (vport->port_type == LPFC_NPIV_PORT) { + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + } else if (lpfc_is_link_up(phba)) { + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + if (vport->fc_flag & FC_PUBLIC_LOOP) + fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; + else + fc_host_port_type(shost) = FC_PORTTYPE_LPORT; + } else { + if (vport->fc_flag & FC_FABRIC) + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + else + fc_host_port_type(shost) = FC_PORTTYPE_PTP; + } + } else + fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; + + spin_unlock_irq(shost->host_lock); +} + +/** + * lpfc_get_host_port_state - Set the value of the scsi host port state + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_port_state(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + spin_lock_irq(shost->host_lock); + + if (vport->fc_flag & FC_OFFLINE_MODE) + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + else { + switch (phba->link_state) { + case LPFC_LINK_UNKNOWN: + case LPFC_LINK_DOWN: + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + break; + case LPFC_LINK_UP: + case LPFC_CLEAR_LA: + case LPFC_HBA_READY: + /* Links up, reports port state accordingly */ + if (vport->port_state < LPFC_VPORT_READY) + fc_host_port_state(shost) = + FC_PORTSTATE_BYPASSED; + else + fc_host_port_state(shost) = + FC_PORTSTATE_ONLINE; + break; + case LPFC_HBA_ERROR: + fc_host_port_state(shost) = FC_PORTSTATE_ERROR; + break; + default: + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + break; + } + } + + spin_unlock_irq(shost->host_lock); +} + +/** + * lpfc_get_host_speed - Set the value of the scsi host speed + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_speed(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + spin_lock_irq(shost->host_lock); + + if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { + switch(phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case LPFC_LINK_SPEED_2GHZ: + fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + break; + case LPFC_LINK_SPEED_4GHZ: + fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + break; + case LPFC_LINK_SPEED_8GHZ: + fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + break; + case LPFC_LINK_SPEED_10GHZ: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case LPFC_LINK_SPEED_16GHZ: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; + case LPFC_LINK_SPEED_32GHZ: + fc_host_speed(shost) = FC_PORTSPEED_32GBIT; + break; + case LPFC_LINK_SPEED_64GHZ: + fc_host_speed(shost) = FC_PORTSPEED_64GBIT; + break; + case LPFC_LINK_SPEED_128GHZ: + fc_host_speed(shost) = FC_PORTSPEED_128GBIT; + break; + case LPFC_LINK_SPEED_256GHZ: + fc_host_speed(shost) = FC_PORTSPEED_256GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } + } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { + switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_1GBPS: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_10GBPS: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_20GBPS: + fc_host_speed(shost) = FC_PORTSPEED_20GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + fc_host_speed(shost) = FC_PORTSPEED_25GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + fc_host_speed(shost) = FC_PORTSPEED_40GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + fc_host_speed(shost) = FC_PORTSPEED_100GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } + } else + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + + spin_unlock_irq(shost->host_lock); +} + +/** + * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_fabric_name (struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + u64 node_name; + + spin_lock_irq(shost->host_lock); + + if ((vport->port_state > LPFC_FLOGI) && + ((vport->fc_flag & FC_FABRIC) || + ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && + (vport->fc_flag & FC_PUBLIC_LOOP)))) + node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); + else + /* fabric is local port if there is no F/FL_Port */ + node_name = 0; + + spin_unlock_irq(shost->host_lock); + + fc_host_fabric_name(shost) = node_name; +} + +/** + * lpfc_get_stats - Return statistical information about the adapter + * @shost: kernel scsi host pointer. + * + * Notes: + * NULL on error for link down, no mbox pool, sli2 active, + * management not allowed, memory allocation error, or mbox error. + * + * Returns: + * NULL for error + * address of the adapter host statistics + **/ +static struct fc_host_statistics * +lpfc_get_stats(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct fc_host_statistics *hs = &phba->link_stats; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + + /* + * prevent udev from issuing mailbox commands until the port is + * configured. + */ + if (phba->link_state < LPFC_LINK_DOWN || + !phba->mbox_mem_pool || + (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) + return NULL; + + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) + return NULL; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return NULL; + memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + + pmb = &pmboxq->u.mb; + pmb->mbxCommand = MBX_READ_STATUS; + pmb->mbxOwner = OWN_HOST; + pmboxq->ctx_buf = NULL; + pmboxq->vport = vport; + + if (vport->fc_flag & FC_OFFLINE_MODE) { + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return NULL; + } + } else { + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + if (rc != MBX_SUCCESS) { + if (rc != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + return NULL; + } + } + + memset(hs, 0, sizeof (struct fc_host_statistics)); + + hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; + hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; + + /* + * The MBX_READ_STATUS returns tx_k_bytes which has to be + * converted to words. + * + * Check if extended byte flag is set, to know when to collect upper + * bits of 64 bit wide statistics counter. + */ + if (pmb->un.varRdStatus.xkb & RD_ST_XKB) { + hs->tx_words = (u64) + ((((u64)(pmb->un.varRdStatus.xmit_xkb & + RD_ST_XMIT_XKB_MASK) << 32) | + (u64)pmb->un.varRdStatus.xmitByteCnt) * + (u64)256); + hs->rx_words = (u64) + ((((u64)(pmb->un.varRdStatus.rcv_xkb & + RD_ST_RCV_XKB_MASK) << 32) | + (u64)pmb->un.varRdStatus.rcvByteCnt) * + (u64)256); + } else { + hs->tx_words = (uint64_t) + ((uint64_t)pmb->un.varRdStatus.xmitByteCnt + * (uint64_t)256); + hs->rx_words = (uint64_t) + ((uint64_t)pmb->un.varRdStatus.rcvByteCnt + * (uint64_t)256); + } + + memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + pmb->mbxCommand = MBX_READ_LNK_STAT; + pmb->mbxOwner = OWN_HOST; + pmboxq->ctx_buf = NULL; + pmboxq->vport = vport; + + if (vport->fc_flag & FC_OFFLINE_MODE) { + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return NULL; + } + } else { + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + if (rc != MBX_SUCCESS) { + if (rc != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + return NULL; + } + } + + hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; + hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; + hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; + hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; + hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; + hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; + hs->error_frames = pmb->un.varRdLnk.crcCnt; + + hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn); + hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm); + + hs->link_failure_count -= lso->link_failure_count; + hs->loss_of_sync_count -= lso->loss_of_sync_count; + hs->loss_of_signal_count -= lso->loss_of_signal_count; + hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; + hs->invalid_tx_word_count -= lso->invalid_tx_word_count; + hs->invalid_crc_count -= lso->invalid_crc_count; + hs->error_frames -= lso->error_frames; + + if (phba->hba_flag & HBA_FCOE_MODE) { + hs->lip_count = -1; + hs->nos_count = (phba->link_events >> 1); + hs->nos_count -= lso->link_events; + } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + hs->lip_count = (phba->fc_eventTag >> 1); + hs->lip_count -= lso->link_events; + hs->nos_count = -1; + } else { + hs->lip_count = -1; + hs->nos_count = (phba->fc_eventTag >> 1); + hs->nos_count -= lso->link_events; + } + + hs->dumped_frames = -1; + + hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start; + + mempool_free(pmboxq, phba->mbox_mem_pool); + + return hs; +} + +/** + * lpfc_reset_stats - Copy the adapter link stats information + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_reset_stats(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) + return; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return; + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + + pmb = &pmboxq->u.mb; + pmb->mbxCommand = MBX_READ_STATUS; + pmb->mbxOwner = OWN_HOST; + pmb->un.varWords[0] = 0x1; /* reset request */ + pmboxq->ctx_buf = NULL; + pmboxq->vport = vport; + + if ((vport->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + } else { + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + if (rc != MBX_SUCCESS) { + if (rc != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + } + + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pmb->mbxCommand = MBX_READ_LNK_STAT; + pmb->mbxOwner = OWN_HOST; + pmboxq->ctx_buf = NULL; + pmboxq->vport = vport; + + if ((vport->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + } else { + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + if (rc != MBX_SUCCESS) { + if (rc != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + } + + lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; + lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; + lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; + lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; + lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; + lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; + lso->error_frames = pmb->un.varRdLnk.crcCnt; + if (phba->hba_flag & HBA_FCOE_MODE) + lso->link_events = (phba->link_events >> 1); + else + lso->link_events = (phba->fc_eventTag >> 1); + + atomic64_set(&phba->cgn_acqe_stat.warn, 0); + atomic64_set(&phba->cgn_acqe_stat.alarm, 0); + + memset(&shost_to_fc_host(shost)->fpin_stats, 0, + sizeof(shost_to_fc_host(shost)->fpin_stats)); + + psli->stats_start = ktime_get_seconds(); + + mempool_free(pmboxq, phba->mbox_mem_pool); + + return; +} + +/* + * The LPFC driver treats linkdown handling as target loss events so there + * are no sysfs handlers for link_down_tmo. + */ + +/** + * lpfc_get_node_by_target - Return the nodelist for a target + * @starget: kernel scsi target pointer. + * + * Returns: + * address of the node list if found + * NULL target not found + **/ +static struct lpfc_nodelist * +lpfc_get_node_by_target(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_nodelist *ndlp; + + spin_lock_irq(shost->host_lock); + /* Search for this, mapped, target ID */ + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && + starget->id == ndlp->nlp_sid) { + spin_unlock_irq(shost->host_lock); + return ndlp; + } + } + spin_unlock_irq(shost->host_lock); + return NULL; +} + +/** + * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1 + * @starget: kernel scsi target pointer. + **/ +static void +lpfc_get_starget_port_id(struct scsi_target *starget) +{ + struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); + + fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; +} + +/** + * lpfc_get_starget_node_name - Set the target node name + * @starget: kernel scsi target pointer. + * + * Description: Set the target node name to the ndlp node name wwn or zero. + **/ +static void +lpfc_get_starget_node_name(struct scsi_target *starget) +{ + struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); + + fc_starget_node_name(starget) = + ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; +} + +/** + * lpfc_get_starget_port_name - Set the target port name + * @starget: kernel scsi target pointer. + * + * Description: set the target port name to the ndlp port name wwn or zero. + **/ +static void +lpfc_get_starget_port_name(struct scsi_target *starget) +{ + struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); + + fc_starget_port_name(starget) = + ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; +} + +/** + * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo + * @rport: fc rport address. + * @timeout: new value for dev loss tmo. + * + * Description: + * If timeout is non zero set the dev_loss_tmo to timeout, else set + * dev_loss_tmo to one. + **/ +static void +lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) +{ + struct lpfc_rport_data *rdata = rport->dd_data; + struct lpfc_nodelist *ndlp = rdata->pnode; +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_nvme_rport *nrport = NULL; +#endif + + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; + + if (!ndlp) { + dev_info(&rport->dev, "Cannot find remote node to " + "set rport dev loss tmo, port_id x%x\n", + rport->port_id); + return; + } + +#if (IS_ENABLED(CONFIG_NVME_FC)) + nrport = lpfc_ndlp_get_nrport(ndlp); + + if (nrport && nrport->remoteport) + nvme_fc_set_remoteport_devloss(nrport->remoteport, + rport->dev_loss_tmo); +#endif +} + +/* + * lpfc_rport_show_function - Return rport target information + * + * Description: + * Macro that uses field to generate a function with the name lpfc_show_rport_ + * + * lpfc_show_rport_##field: returns the bytes formatted in buf + * @cdev: class converted to an fc_rport. + * @buf: on return contains the target_field or zero. + * + * Returns: size of formatted string. + **/ +#define lpfc_rport_show_function(field, format_string, sz, cast) \ +static ssize_t \ +lpfc_show_rport_##field (struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fc_rport *rport = transport_class_to_rport(dev); \ + struct lpfc_rport_data *rdata = rport->hostdata; \ + return scnprintf(buf, sz, format_string, \ + (rdata->target) ? cast rdata->target->field : 0); \ +} + +#define lpfc_rport_rd_attr(field, format_string, sz) \ + lpfc_rport_show_function(field, format_string, sz, ) \ +static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) + +/** + * lpfc_set_vport_symbolic_name - Set the vport's symbolic name + * @fc_vport: The fc_vport who's symbolic name has been changed. + * + * Description: + * This function is called by the transport after the @fc_vport's symbolic name + * has been changed. This function re-registers the symbolic name with the + * switch to propagate the change into the fabric if the vport is active. + **/ +static void +lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) +{ + struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + + if (vport->port_state == LPFC_VPORT_READY) + lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); +} + +/** + * lpfc_hba_log_verbose_init - Set hba's log verbose level + * @phba: Pointer to lpfc_hba struct. + * @verbose: Verbose level to set. + * + * This function is called by the lpfc_get_cfgparam() routine to set the + * module lpfc_log_verbose into the @phba cfg_log_verbose for use with + * log message according to the module's lpfc_log_verbose parameter setting + * before hba port or vport created. + **/ +static void +lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) +{ + phba->cfg_log_verbose = verbose; +} + +struct fc_function_template lpfc_transport_functions = { + /* fixed attributes the driver supports */ + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, + .show_host_symbolic_name = 1, + + /* dynamic attributes the driver supports */ + .get_host_port_id = lpfc_get_host_port_id, + .show_host_port_id = 1, + + .get_host_port_type = lpfc_get_host_port_type, + .show_host_port_type = 1, + + .get_host_port_state = lpfc_get_host_port_state, + .show_host_port_state = 1, + + /* active_fc4s is shown but doesn't change (thus no get function) */ + .show_host_active_fc4s = 1, + + .get_host_speed = lpfc_get_host_speed, + .show_host_speed = 1, + + .get_host_fabric_name = lpfc_get_host_fabric_name, + .show_host_fabric_name = 1, + + /* + * The LPFC driver treats linkdown handling as target loss events + * so there are no sysfs handlers for link_down_tmo. + */ + + .get_fc_host_stats = lpfc_get_stats, + .reset_fc_host_stats = lpfc_reset_stats, + + .dd_fcrport_size = sizeof(struct lpfc_rport_data), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .get_starget_port_id = lpfc_get_starget_port_id, + .show_starget_port_id = 1, + + .get_starget_node_name = lpfc_get_starget_node_name, + .show_starget_node_name = 1, + + .get_starget_port_name = lpfc_get_starget_port_name, + .show_starget_port_name = 1, + + .issue_fc_host_lip = lpfc_issue_lip, + .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, + .terminate_rport_io = lpfc_terminate_rport_io, + + .dd_fcvport_size = sizeof(struct lpfc_vport *), + + .vport_disable = lpfc_vport_disable, + + .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, + + .bsg_request = lpfc_bsg_request, + .bsg_timeout = lpfc_bsg_timeout, +}; + +struct fc_function_template lpfc_vport_transport_functions = { + /* fixed attributes the driver supports */ + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, + .show_host_symbolic_name = 1, + + /* dynamic attributes the driver supports */ + .get_host_port_id = lpfc_get_host_port_id, + .show_host_port_id = 1, + + .get_host_port_type = lpfc_get_host_port_type, + .show_host_port_type = 1, + + .get_host_port_state = lpfc_get_host_port_state, + .show_host_port_state = 1, + + /* active_fc4s is shown but doesn't change (thus no get function) */ + .show_host_active_fc4s = 1, + + .get_host_speed = lpfc_get_host_speed, + .show_host_speed = 1, + + .get_host_fabric_name = lpfc_get_host_fabric_name, + .show_host_fabric_name = 1, + + /* + * The LPFC driver treats linkdown handling as target loss events + * so there are no sysfs handlers for link_down_tmo. + */ + + .get_fc_host_stats = lpfc_get_stats, + .reset_fc_host_stats = lpfc_reset_stats, + + .dd_fcrport_size = sizeof(struct lpfc_rport_data), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .get_starget_port_id = lpfc_get_starget_port_id, + .show_starget_port_id = 1, + + .get_starget_node_name = lpfc_get_starget_node_name, + .show_starget_node_name = 1, + + .get_starget_port_name = lpfc_get_starget_port_name, + .show_starget_port_name = 1, + + .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, + .terminate_rport_io = lpfc_terminate_rport_io, + + .vport_disable = lpfc_vport_disable, + + .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, +}; + +/** + * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE + * Mode + * @phba: lpfc_hba pointer. + **/ +static void +lpfc_get_hba_function_mode(struct lpfc_hba *phba) +{ + /* If the adapter supports FCoE mode */ + switch (phba->pcidev->device) { + case PCI_DEVICE_ID_SKYHAWK: + case PCI_DEVICE_ID_SKYHAWK_VF: + case PCI_DEVICE_ID_LANCER_FCOE: + case PCI_DEVICE_ID_LANCER_FCOE_VF: + case PCI_DEVICE_ID_ZEPHYR_DCSP: + case PCI_DEVICE_ID_TIGERSHARK: + case PCI_DEVICE_ID_TOMCAT: + phba->hba_flag |= HBA_FCOE_MODE; + break; + default: + /* for others, clear the flag */ + phba->hba_flag &= ~HBA_FCOE_MODE; + } +} + +/** + * lpfc_get_cfgparam - Used during probe_one to init the adapter structure + * @phba: lpfc_hba pointer. + **/ +void +lpfc_get_cfgparam(struct lpfc_hba *phba) +{ + lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); + lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); + lpfc_ns_query_init(phba, lpfc_ns_query); + lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); + lpfc_cr_delay_init(phba, lpfc_cr_delay); + lpfc_cr_count_init(phba, lpfc_cr_count); + lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); + lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); + lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); + lpfc_ack0_init(phba, lpfc_ack0); + lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing); + lpfc_topology_init(phba, lpfc_topology); + lpfc_link_speed_init(phba, lpfc_link_speed); + lpfc_poll_tmo_init(phba, lpfc_poll_tmo); + lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); + lpfc_enable_npiv_init(phba, lpfc_enable_npiv); + lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); + lpfc_enable_rrq_init(phba, lpfc_enable_rrq); + lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp); + lpfc_fdmi_on_init(phba, lpfc_fdmi_on); + lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); + lpfc_use_msi_init(phba, lpfc_use_msi); + lpfc_nvme_oas_init(phba, lpfc_nvme_oas); + lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd); + lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_force_rscn_init(phba, lpfc_force_rscn); + lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold); + lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit); + lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); + lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); + lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); + + lpfc_EnableXLane_init(phba, lpfc_EnableXLane); + /* VMID Inits */ + lpfc_max_vmid_init(phba, lpfc_max_vmid); + lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout); + lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header); + lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging); + if (phba->sli_rev != LPFC_SLI_REV4) + phba->cfg_EnableXLane = 0; + lpfc_XLanePriority_init(phba, lpfc_XLanePriority); + + memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); + memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); + phba->cfg_oas_lun_state = 0; + phba->cfg_oas_lun_status = 0; + phba->cfg_oas_flags = 0; + phba->cfg_oas_priority = 0; + lpfc_enable_bg_init(phba, lpfc_enable_bg); + lpfc_prot_mask_init(phba, lpfc_prot_mask); + lpfc_prot_guard_init(phba, lpfc_prot_guard); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->cfg_poll = 0; + else + phba->cfg_poll = lpfc_poll; + + /* Get the function mode */ + lpfc_get_hba_function_mode(phba); + + /* BlockGuard allowed for FC only. */ + if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0581 BlockGuard feature not supported\n"); + /* If set, clear the BlockGuard support param */ + phba->cfg_enable_bg = 0; + } else if (phba->cfg_enable_bg) { + phba->sli3_options |= LPFC_SLI3_BG_ENABLED; + } + + lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); + + lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); + lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); + lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); + + /* Initialize first burst. Target vs Initiator are different. */ + lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); + lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); + lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); + lpfc_hdw_queue_init(phba, lpfc_hdw_queue); + lpfc_irq_chann_init(phba, lpfc_irq_chann); + lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); + lpfc_enable_dpp_init(phba, lpfc_enable_dpp); + lpfc_enable_mi_init(phba, lpfc_enable_mi); + + phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF; + phba->cmf_active_mode = LPFC_CFG_OFF; + if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX || + lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN) + lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ + + if (phba->sli_rev != LPFC_SLI_REV4) { + /* NVME only supported on SLI4 */ + phba->nvmet_support = 0; + phba->cfg_nvmet_mrq = 0; + phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + phba->cfg_enable_bbcr = 0; + phba->cfg_xri_rebalancing = 0; + } else { + /* We MUST have FCP support */ + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; + } + + phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; + + phba->cfg_enable_pbde = 0; + + /* A value of 0 means use the number of CPUs found in the system */ + if (phba->cfg_hdw_queue == 0) + phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; + if (phba->cfg_irq_chann == 0) + phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; + if (phba->cfg_irq_chann > phba->cfg_hdw_queue && + phba->sli_rev == LPFC_SLI_REV4) + phba->cfg_irq_chann = phba->cfg_hdw_queue; + + lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); + lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); + lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); + lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); + lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); + lpfc_delay_discovery_init(phba, lpfc_delay_discovery); + lpfc_sli_mode_init(phba, lpfc_sli_mode); + lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); + lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize); + lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); + lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func); + + return; +} + +/** + * lpfc_nvme_mod_param_dep - Adjust module parameter value based on + * dependencies between protocols and roles. + * @phba: lpfc_hba pointer. + **/ +void +lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) +{ + int logit = 0; + + if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) { + phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; + logit = 1; + } + if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) { + phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; + logit = 1; + } + if (phba->cfg_irq_chann > phba->cfg_hdw_queue) { + phba->cfg_irq_chann = phba->cfg_hdw_queue; + logit = 1; + } + if (logit) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2006 Reducing Queues - CPU limitation: " + "IRQ %d HDWQ %d\n", + phba->cfg_irq_chann, + phba->cfg_hdw_queue); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && + phba->nvmet_support) { + phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6013 %s x%x fb_size x%x, fb_max x%x\n", + "NVME Target PRLI ACC enable_fb ", + phba->cfg_nvme_enable_fb, + phba->cfg_nvmet_fb_size, + LPFC_NVMET_FB_SZ_MAX); + + if (phba->cfg_nvme_enable_fb == 0) + phba->cfg_nvmet_fb_size = 0; + else { + if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX) + phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; + } + + if (!phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; + + /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ + if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6018 Adjust lpfc_nvmet_mrq to %d\n", + phba->cfg_nvmet_mrq); + } + if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) + phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; + + } else { + /* Not NVME Target mode. Turn off Target parameters. */ + phba->nvmet_support = 0; + phba->cfg_nvmet_mrq = 0; + phba->cfg_nvmet_fb_size = 0; + } +} + +/** + * lpfc_get_vport_cfgparam - Used during port create, init the vport structure + * @vport: lpfc_vport pointer. + **/ +void +lpfc_get_vport_cfgparam(struct lpfc_vport *vport) +{ + lpfc_log_verbose_init(vport, lpfc_log_verbose); + lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); + lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth); + lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); + lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); + lpfc_peer_port_login_init(vport, lpfc_peer_port_login); + lpfc_restrict_login_init(vport, lpfc_restrict_login); + lpfc_fcp_class_init(vport, lpfc_fcp_class); + lpfc_use_adisc_init(vport, lpfc_use_adisc); + lpfc_first_burst_size_init(vport, lpfc_first_burst_size); + lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); + lpfc_discovery_threads_init(vport, lpfc_discovery_threads); + lpfc_max_luns_init(vport, lpfc_max_luns); + lpfc_scan_down_init(vport, lpfc_scan_down); + lpfc_enable_da_id_init(vport, lpfc_enable_da_id); + return; +} diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h new file mode 100644 index 000000000..9659a8fff --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -0,0 +1,128 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LPFC_ATTR(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_init(name, defval, minval, maxval) + +#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_BBCR_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, 0444);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, 0444 | 0644,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_init(name, defval, minval, maxval) + +#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \ +static uint64_t lpfc_##name = defval;\ +module_param(lpfc_##name, ullong, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c new file mode 100644 index 000000000..595dca92e --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -0,0 +1,5690 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2009-2015 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_bsg.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_debugfs.h" +#include "lpfc_vport.h" +#include "lpfc_version.h" + +struct lpfc_bsg_event { + struct list_head node; + struct kref kref; + wait_queue_head_t wq; + + /* Event type and waiter identifiers */ + uint32_t type_mask; + uint32_t req_id; + uint32_t reg_id; + + /* next two flags are here for the auto-delete logic */ + unsigned long wait_time_stamp; + int waiting; + + /* seen and not seen events */ + struct list_head events_to_get; + struct list_head events_to_see; + + /* driver data associated with the job */ + void *dd_data; +}; + +struct lpfc_bsg_iocb { + struct lpfc_iocbq *cmdiocbq; + struct lpfc_dmabuf *rmp; + struct lpfc_nodelist *ndlp; +}; + +struct lpfc_bsg_mbox { + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *mb; + struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ + uint8_t *ext; /* extended mailbox data */ + uint32_t mbOffset; /* from app */ + uint32_t inExtWLen; /* from app */ + uint32_t outExtWLen; /* from app */ +}; + +#define TYPE_EVT 1 +#define TYPE_IOCB 2 +#define TYPE_MBOX 3 +struct bsg_job_data { + uint32_t type; + struct bsg_job *set_job; /* job waiting for this iocb to finish */ + union { + struct lpfc_bsg_event *evt; + struct lpfc_bsg_iocb iocb; + struct lpfc_bsg_mbox mbox; + } context_un; +}; + +struct event_data { + struct list_head node; + uint32_t type; + uint32_t immed_dat; + void *data; + uint32_t len; +}; + +#define BUF_SZ_4K 4096 +#define SLI_CT_ELX_LOOPBACK 0x10 + +enum ELX_LOOPBACK_CMD { + ELX_LOOPBACK_XRI_SETUP, + ELX_LOOPBACK_DATA, +}; + +#define ELX_LOOPBACK_HEADER_SZ \ + (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) + +struct lpfc_dmabufext { + struct lpfc_dmabuf dma; + uint32_t size; + uint32_t flag; +}; + +static void +lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) +{ + struct lpfc_dmabuf *mlast, *next_mlast; + + if (mlist) { + list_for_each_entry_safe(mlast, next_mlast, &mlist->list, + list) { + list_del(&mlast->list); + lpfc_mbuf_free(phba, mlast->virt, mlast->phys); + kfree(mlast); + } + lpfc_mbuf_free(phba, mlist->virt, mlist->phys); + kfree(mlist); + } + return; +} + +static struct lpfc_dmabuf * +lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, + int outbound_buffers, struct ulp_bde64 *bpl, + int *bpl_entries) +{ + struct lpfc_dmabuf *mlist = NULL; + struct lpfc_dmabuf *mp; + unsigned int bytes_left = size; + + /* Verify we can support the size specified */ + if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) + return NULL; + + /* Determine the number of dma buffers to allocate */ + *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : + size/LPFC_BPL_SIZE); + + /* Allocate dma buffer and place in BPL passed */ + while (bytes_left) { + /* Allocate dma buffer */ + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + if (mlist) + lpfc_free_bsg_buffers(phba, mlist); + return NULL; + } + + INIT_LIST_HEAD(&mp->list); + mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); + + if (!mp->virt) { + kfree(mp); + if (mlist) + lpfc_free_bsg_buffers(phba, mlist); + return NULL; + } + + /* Queue it to a linked list */ + if (!mlist) + mlist = mp; + else + list_add_tail(&mp->list, &mlist->list); + + /* Add buffer to buffer pointer list */ + if (outbound_buffers) + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + else + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); + bpl->tus.f.bdeSize = (uint16_t) + (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : + bytes_left); + bytes_left -= bpl->tus.f.bdeSize; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + } + return mlist; +} + +static unsigned int +lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, + struct bsg_buffer *bsg_buffers, + unsigned int bytes_to_transfer, int to_buffers) +{ + + struct lpfc_dmabuf *mp; + unsigned int transfer_bytes, bytes_copied = 0; + unsigned int sg_offset, dma_offset; + unsigned char *dma_address, *sg_address; + LIST_HEAD(temp_list); + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + bool sg_valid; + + list_splice_init(&dma_buffers->list, &temp_list); + list_add(&dma_buffers->list, &temp_list); + sg_offset = 0; + if (to_buffers) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, + sg_flags); + local_irq_save(flags); + sg_valid = sg_miter_next(&miter); + list_for_each_entry(mp, &temp_list, list) { + dma_offset = 0; + while (bytes_to_transfer && sg_valid && + (dma_offset < LPFC_BPL_SIZE)) { + dma_address = mp->virt + dma_offset; + if (sg_offset) { + /* Continue previous partial transfer of sg */ + sg_address = miter.addr + sg_offset; + transfer_bytes = miter.length - sg_offset; + } else { + sg_address = miter.addr; + transfer_bytes = miter.length; + } + if (bytes_to_transfer < transfer_bytes) + transfer_bytes = bytes_to_transfer; + if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) + transfer_bytes = LPFC_BPL_SIZE - dma_offset; + if (to_buffers) + memcpy(dma_address, sg_address, transfer_bytes); + else + memcpy(sg_address, dma_address, transfer_bytes); + dma_offset += transfer_bytes; + sg_offset += transfer_bytes; + bytes_to_transfer -= transfer_bytes; + bytes_copied += transfer_bytes; + if (sg_offset >= miter.length) { + sg_offset = 0; + sg_valid = sg_miter_next(&miter); + } + } + } + sg_miter_stop(&miter); + local_irq_restore(flags); + list_del_init(&dma_buffers->list); + list_splice(&temp_list, &dma_buffers->list); + return bytes_copied; +} + +/** + * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler + * @phba: Pointer to HBA context object. + * @cmdiocbq: Pointer to command iocb. + * @rspiocbq: Pointer to response iocb. + * + * This function is the completion handler for iocbs issued using + * lpfc_bsg_send_mgmt_cmd function. This function is called by the + * ring event handler function without any lock held. This function + * can be called from both worker thread context and interrupt + * context. This function also can be called from another thread which + * cleans up the SLI layer objects. + * This function copies the contents of the response iocb to the + * response iocb memory object provided by the caller of + * lpfc_sli_issue_iocb_wait and then wakes up the thread which + * sleeps for the iocb completion. + **/ +static void +lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + struct bsg_job_data *dd_data; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; + struct lpfc_dmabuf *bmp, *cmp, *rmp; + struct lpfc_nodelist *ndlp; + struct lpfc_bsg_iocb *iocb; + unsigned long flags; + int rc = 0; + u32 ulp_status, ulp_word4, total_data_placed; + + dd_data = cmdiocbq->context_un.dd_data; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + bsg_reply = job->reply; + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); + + iocb = &dd_data->context_un.iocb; + ndlp = iocb->cmdiocbq->ndlp; + rmp = iocb->rmp; + cmp = cmdiocbq->cmd_dmabuf; + bmp = cmdiocbq->bpl_dmabuf; + ulp_status = get_job_ulpstatus(phba, rspiocbq); + ulp_word4 = get_job_word4(phba, rspiocbq); + total_data_placed = get_job_data_placed(phba, rspiocbq); + + /* Copy the completed data or set the error status */ + + if (job) { + if (ulp_status) { + if (ulp_status == IOSTAT_LOCAL_REJECT) { + switch (ulp_word4 & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { + rc = -EACCES; + } + } else { + bsg_reply->reply_payload_rcv_len = + lpfc_bsg_copy_data(rmp, &job->reply_payload, + total_data_placed, 0); + } + } + + lpfc_free_bsg_buffers(phba, cmp); + lpfc_free_bsg_buffers(phba, rmp); + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); + lpfc_nlp_put(ndlp); + lpfc_sli_release_iocbq(phba, cmdiocbq); + kfree(dd_data); + + /* Complete the job if the job is still active */ + + if (job) { + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + return; +} + +/** + * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request + * @job: fc_bsg_job to handle + **/ +static int +lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp = rdata->pnode; + struct fc_bsg_reply *bsg_reply = job->reply; + struct ulp_bde64 *bpl = NULL; + struct lpfc_iocbq *cmdiocbq = NULL; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; + int request_nseg, reply_nseg; + u32 num_entry; + struct bsg_job_data *dd_data; + unsigned long flags; + uint32_t creg_val; + int rc = 0; + int iocb_stat; + u16 ulp_context; + + /* in case no data is transferred */ + bsg_reply->reply_payload_rcv_len = 0; + + if (ndlp->nlp_flag & NLP_ELS_SND_MASK) + return -ENODEV; + + /* allocate our bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2733 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto no_dd_data; + } + + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = -ENOMEM; + goto free_dd; + } + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + goto free_cmdiocbq; + } + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { + rc = -ENOMEM; + goto free_bmp; + } + + INIT_LIST_HEAD(&bmp->list); + + bpl = (struct ulp_bde64 *) bmp->virt; + request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &request_nseg); + if (!cmp) { + rc = -ENOMEM; + goto free_bmp; + } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); + + bpl += request_nseg; + reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; + rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, + bpl, &reply_nseg); + if (!rmp) { + rc = -ENOMEM; + goto free_cmp; + } + + num_entry = request_nseg + reply_nseg; + + if (phba->sli_rev == LPFC_SLI_REV4) + ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + else + ulp_context = ndlp->nlp_rpi; + + lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry, + phba->fc_ratov * 2); + + cmdiocbq->num_bdes = num_entry; + cmdiocbq->vport = phba->pport; + cmdiocbq->cmd_dmabuf = cmp; + cmdiocbq->bpl_dmabuf = bmp; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; + + cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; + cmdiocbq->context_un.dd_data = dd_data; + + dd_data->type = TYPE_IOCB; + dd_data->set_job = job; + dd_data->context_un.iocb.cmdiocbq = cmdiocbq; + dd_data->context_un.iocb.rmp = rmp; + job->dd_data = dd_data; + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -EIO ; + goto free_rmp; + } + creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); + writel(creg_val, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + + cmdiocbq->ndlp = lpfc_nlp_get(ndlp); + if (!cmdiocbq->ndlp) { + rc = -ENODEV; + goto free_rmp; + } + + iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); + if (iocb_stat == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed yet */ + if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + return 0; /* done for now */ + } else if (iocb_stat == IOCB_BUSY) { + rc = -EAGAIN; + } else { + rc = -EIO; + } + + /* iocb failed so cleanup */ + lpfc_nlp_put(ndlp); + +free_rmp: + lpfc_free_bsg_buffers(phba, rmp); +free_cmp: + lpfc_free_bsg_buffers(phba, cmp); +free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); +free_cmdiocbq: + lpfc_sli_release_iocbq(phba, cmdiocbq); +free_dd: + kfree(dd_data); +no_dd_data: + /* make error code available to userspace */ + bsg_reply->result = rc; + job->dd_data = NULL; + return rc; +} + +/** + * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler + * @phba: Pointer to HBA context object. + * @cmdiocbq: Pointer to command iocb. + * @rspiocbq: Pointer to response iocb. + * + * This function is the completion handler for iocbs issued using + * lpfc_bsg_rport_els_cmp function. This function is called by the + * ring event handler function without any lock held. This function + * can be called from both worker thread context and interrupt + * context. This function also can be called from other thread which + * cleans up the SLI layer objects. + * This function copies the contents of the response iocb to the + * response iocb memory object provided by the caller of + * lpfc_sli_issue_iocb_wait and then wakes up the thread which + * sleeps for the iocb completion. + **/ +static void +lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + struct bsg_job_data *dd_data; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; + struct fc_bsg_ctels_reply *els_reply; + uint8_t *rjt_data; + unsigned long flags; + unsigned int rsp_size; + int rc = 0; + u32 ulp_status, ulp_word4, total_data_placed; + + dd_data = cmdiocbq->context_un.dd_data; + ndlp = dd_data->context_un.iocb.ndlp; + cmdiocbq->ndlp = ndlp; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + bsg_reply = job->reply; + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); + + ulp_status = get_job_ulpstatus(phba, rspiocbq); + ulp_word4 = get_job_word4(phba, rspiocbq); + total_data_placed = get_job_data_placed(phba, rspiocbq); + pcmd = cmdiocbq->cmd_dmabuf; + prsp = (struct lpfc_dmabuf *)pcmd->list.next; + + /* Copy the completed job data or determine the job status if job is + * still active + */ + + if (job) { + if (ulp_status == IOSTAT_SUCCESS) { + rsp_size = total_data_placed; + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + prsp->virt, + rsp_size); + } else if (ulp_status == IOSTAT_LS_RJT) { + bsg_reply->reply_payload_rcv_len = + sizeof(struct fc_bsg_ctels_reply); + /* LS_RJT data returned in word 4 */ + rjt_data = (uint8_t *)&ulp_word4; + els_reply = &bsg_reply->reply_data.ctels_reply; + els_reply->status = FC_CTELS_STATUS_REJECT; + els_reply->rjt_data.action = rjt_data[3]; + els_reply->rjt_data.reason_code = rjt_data[2]; + els_reply->rjt_data.reason_explanation = rjt_data[1]; + els_reply->rjt_data.vendor_unique = rjt_data[0]; + } else if (ulp_status == IOSTAT_LOCAL_REJECT && + (ulp_word4 & IOERR_PARAM_MASK) == + IOERR_SEQUENCE_TIMEOUT) { + rc = -ETIMEDOUT; + } else { + rc = -EIO; + } + } + + lpfc_els_free_iocb(phba, cmdiocbq); + + lpfc_nlp_put(ndlp); + kfree(dd_data); + + /* Complete the job if the job is still active */ + + if (job) { + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + return; +} + +/** + * lpfc_bsg_rport_els - send an ELS command from a bsg request + * @job: fc_bsg_job to handle + **/ +static int +lpfc_bsg_rport_els(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; + struct lpfc_nodelist *ndlp = rdata->pnode; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + uint32_t elscmd; + uint32_t cmdsize; + struct lpfc_iocbq *cmdiocbq; + uint16_t rpi = 0; + struct bsg_job_data *dd_data; + unsigned long flags; + uint32_t creg_val; + int rc = 0; + + /* in case no data is transferred */ + bsg_reply->reply_payload_rcv_len = 0; + + /* verify the els command is not greater than the + * maximum ELS transfer size. + */ + + if (job->request_payload.payload_len > FCELSSIZE) { + rc = -EINVAL; + goto no_dd_data; + } + + /* allocate our bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2735 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto no_dd_data; + } + + elscmd = bsg_request->rqst_data.r_els.els_code; + cmdsize = job->request_payload.payload_len; + + if (!lpfc_nlp_get(ndlp)) { + rc = -ENODEV; + goto free_dd_data; + } + + /* We will use the allocated dma buffers by prep els iocb for command + * and response to ensure if the job times out and the request is freed, + * we won't be dma into memory that is no longer allocated to for the + * request. + */ + cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, + ndlp->nlp_DID, elscmd); + if (!cmdiocbq) { + rc = -EIO; + goto release_ndlp; + } + + /* Transfer the request payload to allocated command dma buffer */ + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + cmdiocbq->cmd_dmabuf->virt, + cmdsize); + + rpi = ndlp->nlp_rpi; + + if (phba->sli_rev == LPFC_SLI_REV4) + bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com, + phba->sli4_hba.rpi_ids[rpi]); + else + cmdiocbq->iocb.ulpContext = rpi; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; + cmdiocbq->context_un.dd_data = dd_data; + cmdiocbq->ndlp = ndlp; + cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp; + dd_data->type = TYPE_IOCB; + dd_data->set_job = job; + dd_data->context_un.iocb.cmdiocbq = cmdiocbq; + dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.rmp = NULL; + job->dd_data = dd_data; + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -EIO; + goto linkdown_err; + } + creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); + writel(creg_val, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + return 0; /* done for now */ + } else if (rc == IOCB_BUSY) { + rc = -EAGAIN; + } else { + rc = -EIO; + } + + /* I/O issue failed. Cleanup resources. */ + +linkdown_err: + lpfc_els_free_iocb(phba, cmdiocbq); + +release_ndlp: + lpfc_nlp_put(ndlp); + +free_dd_data: + kfree(dd_data); + +no_dd_data: + /* make error code available to userspace */ + bsg_reply->result = rc; + job->dd_data = NULL; + return rc; +} + +/** + * lpfc_bsg_event_free - frees an allocated event structure + * @kref: Pointer to a kref. + * + * Called from kref_put. Back cast the kref into an event structure address. + * Free any events to get, delete associated nodes, free any events to see, + * free any data then free the event itself. + **/ +static void +lpfc_bsg_event_free(struct kref *kref) +{ + struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, + kref); + struct event_data *ed; + + list_del(&evt->node); + + while (!list_empty(&evt->events_to_get)) { + ed = list_entry(evt->events_to_get.next, typeof(*ed), node); + list_del(&ed->node); + kfree(ed->data); + kfree(ed); + } + + while (!list_empty(&evt->events_to_see)) { + ed = list_entry(evt->events_to_see.next, typeof(*ed), node); + list_del(&ed->node); + kfree(ed->data); + kfree(ed); + } + + kfree(evt->dd_data); + kfree(evt); +} + +/** + * lpfc_bsg_event_ref - increments the kref for an event + * @evt: Pointer to an event structure. + **/ +static inline void +lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) +{ + kref_get(&evt->kref); +} + +/** + * lpfc_bsg_event_unref - Uses kref_put to free an event structure + * @evt: Pointer to an event structure. + **/ +static inline void +lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) +{ + kref_put(&evt->kref, lpfc_bsg_event_free); +} + +/** + * lpfc_bsg_event_new - allocate and initialize a event structure + * @ev_mask: Mask of events. + * @ev_reg_id: Event reg id. + * @ev_req_id: Event request id. + **/ +static struct lpfc_bsg_event * +lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) +{ + struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); + + if (!evt) + return NULL; + + INIT_LIST_HEAD(&evt->events_to_get); + INIT_LIST_HEAD(&evt->events_to_see); + evt->type_mask = ev_mask; + evt->req_id = ev_req_id; + evt->reg_id = ev_reg_id; + evt->wait_time_stamp = jiffies; + evt->dd_data = NULL; + init_waitqueue_head(&evt->wq); + kref_init(&evt->kref); + return evt; +} + +/** + * diag_cmd_data_free - Frees an lpfc dma buffer extension + * @phba: Pointer to HBA context object. + * @mlist: Pointer to an lpfc dma buffer extension. + **/ +static int +diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) +{ + struct lpfc_dmabufext *mlast; + struct pci_dev *pcidev; + struct list_head head, *curr, *next; + + if ((!mlist) || (!lpfc_is_link_up(phba) && + (phba->link_flag & LS_LOOPBACK_MODE))) { + return 0; + } + + pcidev = phba->pcidev; + list_add_tail(&head, &mlist->dma.list); + + list_for_each_safe(curr, next, &head) { + mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); + if (mlast->dma.virt) + dma_free_coherent(&pcidev->dev, + mlast->size, + mlast->dma.virt, + mlast->dma.phys); + kfree(mlast); + } + return 0; +} + +/* + * lpfc_bsg_ct_unsol_event - process an unsolicited CT command + * + * This function is called when an unsolicited CT command is received. It + * forwards the event to any processes registered to receive CT events. + **/ +int +lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocbq) +{ + uint32_t evt_req_id = 0; + u16 cmd; + struct lpfc_dmabuf *dmabuf = NULL; + struct lpfc_bsg_event *evt; + struct event_data *evt_dat = NULL; + struct lpfc_iocbq *iocbq; + IOCB_t *iocb = NULL; + size_t offset = 0; + struct list_head head; + struct ulp_bde64 *bde; + dma_addr_t dma_addr; + int i; + struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf; + struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf; + struct lpfc_sli_ct_request *ct_req; + struct bsg_job *job = NULL; + struct fc_bsg_reply *bsg_reply; + struct bsg_job_data *dd_data = NULL; + unsigned long flags; + int size = 0; + u32 bde_count = 0; + + INIT_LIST_HEAD(&head); + list_add_tail(&head, &piocbq->list); + + ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; + evt_req_id = ct_req->FsType; + cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + if (!(evt->type_mask & FC_REG_CT_EVENT) || + evt->req_id != evt_req_id) + continue; + + lpfc_bsg_event_ref(evt); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); + if (evt_dat == NULL) { + spin_lock_irqsave(&phba->ct_ev_lock, flags); + lpfc_bsg_event_unref(evt); + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2614 Memory allocation failed for " + "CT event\n"); + break; + } + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + /* take accumulated byte count from the last iocbq */ + iocbq = list_entry(head.prev, typeof(*iocbq), list); + if (phba->sli_rev == LPFC_SLI_REV4) + evt_dat->len = iocbq->wcqe_cmpl.total_data_placed; + else + evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; + } else { + list_for_each_entry(iocbq, &head, list) { + iocb = &iocbq->iocb; + for (i = 0; i < iocb->ulpBdeCount; + i++) + evt_dat->len += + iocb->un.cont64[i].tus.f.bdeSize; + } + } + + evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); + if (evt_dat->data == NULL) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2615 Memory allocation failed for " + "CT event data, size %d\n", + evt_dat->len); + kfree(evt_dat); + spin_lock_irqsave(&phba->ct_ev_lock, flags); + lpfc_bsg_event_unref(evt); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + goto error_ct_unsol_exit; + } + + list_for_each_entry(iocbq, &head, list) { + size = 0; + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + bdeBuf1 = iocbq->cmd_dmabuf; + bdeBuf2 = iocbq->bpl_dmabuf; + } + if (phba->sli_rev == LPFC_SLI_REV4) + bde_count = iocbq->wcqe_cmpl.word3; + else + bde_count = iocbq->iocb.ulpBdeCount; + for (i = 0; i < bde_count; i++) { + if (phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED) { + if (i == 0) { + size = iocbq->wqe.gen_req.bde.tus.f.bdeSize; + dmabuf = bdeBuf1; + } else if (i == 1) { + size = iocbq->unsol_rcv_len; + dmabuf = bdeBuf2; + } + if ((offset + size) > evt_dat->len) + size = evt_dat->len - offset; + } else { + size = iocbq->iocb.un.cont64[i]. + tus.f.bdeSize; + bde = &iocbq->iocb.un.cont64[i]; + dma_addr = getPaddr(bde->addrHigh, + bde->addrLow); + dmabuf = lpfc_sli_ringpostbuf_get(phba, + pring, dma_addr); + } + if (!dmabuf) { + lpfc_printf_log(phba, KERN_ERR, + LOG_LIBDFC, "2616 No dmabuf " + "found for iocbq x%px\n", + iocbq); + kfree(evt_dat->data); + kfree(evt_dat); + spin_lock_irqsave(&phba->ct_ev_lock, + flags); + lpfc_bsg_event_unref(evt); + spin_unlock_irqrestore( + &phba->ct_ev_lock, flags); + goto error_ct_unsol_exit; + } + memcpy((char *)(evt_dat->data) + offset, + dmabuf->virt, size); + offset += size; + if (evt_req_id != SLI_CT_ELX_LOOPBACK && + !(phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED)) { + lpfc_sli_ringpostbuf_put(phba, pring, + dmabuf); + } else { + switch (cmd) { + case ELX_LOOPBACK_DATA: + if (phba->sli_rev < + LPFC_SLI_REV4) + diag_cmd_data_free(phba, + (struct lpfc_dmabufext + *)dmabuf); + break; + case ELX_LOOPBACK_XRI_SETUP: + if ((phba->sli_rev == + LPFC_SLI_REV2) || + (phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED + )) { + lpfc_in_buf_free(phba, + dmabuf); + } else { + lpfc_sli3_post_buffer(phba, + pring, + 1); + } + break; + default: + if (!(phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED)) + lpfc_sli3_post_buffer(phba, + pring, + 1); + break; + } + } + } + } + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + if (phba->sli_rev == LPFC_SLI_REV4) { + evt_dat->immed_dat = phba->ctx_idx; + phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; + /* Provide warning for over-run of the ct_ctx array */ + if (phba->ct_ctx[evt_dat->immed_dat].valid == + UNSOL_VALID) + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "2717 CT context array entry " + "[%d] over-run: oxid:x%x, " + "sid:x%x\n", phba->ctx_idx, + phba->ct_ctx[ + evt_dat->immed_dat].oxid, + phba->ct_ctx[ + evt_dat->immed_dat].SID); + phba->ct_ctx[evt_dat->immed_dat].rxid = + get_job_ulpcontext(phba, piocbq); + phba->ct_ctx[evt_dat->immed_dat].oxid = + get_job_rcvoxid(phba, piocbq); + phba->ct_ctx[evt_dat->immed_dat].SID = + bf_get(wqe_els_did, + &piocbq->wqe.xmit_els_rsp.wqe_dest); + phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; + } else + evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq); + + evt_dat->type = FC_REG_CT_EVENT; + list_add(&evt_dat->node, &evt->events_to_see); + if (evt_req_id == SLI_CT_ELX_LOOPBACK) { + wake_up_interruptible(&evt->wq); + lpfc_bsg_event_unref(evt); + break; + } + + list_move(evt->events_to_see.prev, &evt->events_to_get); + + dd_data = (struct bsg_job_data *)evt->dd_data; + job = dd_data->set_job; + dd_data->set_job = NULL; + lpfc_bsg_event_unref(evt); + if (job) { + bsg_reply = job->reply; + bsg_reply->reply_payload_rcv_len = size; + /* make error code available to userspace */ + bsg_reply->result = 0; + job->dd_data = NULL; + /* complete the job back to userspace */ + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + spin_lock_irqsave(&phba->ct_ev_lock, flags); + } + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + +error_ct_unsol_exit: + if (!list_empty(&head)) + list_del(&head); + if ((phba->sli_rev < LPFC_SLI_REV4) && + (evt_req_id == SLI_CT_ELX_LOOPBACK)) + return 0; + return 1; +} + +/** + * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane + * @phba: Pointer to HBA context object. + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function handles abort to the CT command toward management plane + * for SLI4 port. + * + * If the pending context of a CT command to management plane present, clears + * such context and returns 1 for handled; otherwise, it returns 0 indicating + * no context exists. + **/ +int +lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header fc_hdr; + struct fc_frame_header *fc_hdr_ptr = &fc_hdr; + int ctx_idx, handled = 0; + uint16_t oxid, rxid; + uint32_t sid; + + memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); + sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); + oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); + rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); + + for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { + if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) + continue; + if (phba->ct_ctx[ctx_idx].rxid != rxid) + continue; + if (phba->ct_ctx[ctx_idx].oxid != oxid) + continue; + if (phba->ct_ctx[ctx_idx].SID != sid) + continue; + phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; + handled = 1; + } + return handled; +} + +/** + * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command + * @job: SET_EVENT fc_bsg_job + **/ +static int +lpfc_bsg_hba_set_event(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct set_ct_event *event_req; + struct lpfc_bsg_event *evt; + int rc = 0; + struct bsg_job_data *dd_data = NULL; + uint32_t ev_mask; + unsigned long flags; + + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2612 Received SET_CT_EVENT below minimum " + "size\n"); + rc = -EINVAL; + goto job_error; + } + + event_req = (struct set_ct_event *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & + FC_REG_EVENT_MASK); + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + if (evt->reg_id == event_req->ev_reg_id) { + lpfc_bsg_event_ref(evt); + evt->wait_time_stamp = jiffies; + dd_data = (struct bsg_job_data *)evt->dd_data; + break; + } + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + if (&evt->node == &phba->ct_ev_waiters) { + /* no event waiting struct yet - first call */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (dd_data == NULL) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2734 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto job_error; + } + evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, + event_req->ev_req_id); + if (!evt) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2617 Failed allocation of event " + "waiter\n"); + rc = -ENOMEM; + goto job_error; + } + dd_data->type = TYPE_EVT; + dd_data->set_job = NULL; + dd_data->context_un.evt = evt; + evt->dd_data = (void *)dd_data; + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_add(&evt->node, &phba->ct_ev_waiters); + lpfc_bsg_event_ref(evt); + evt->wait_time_stamp = jiffies; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + } + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + evt->waiting = 1; + dd_data->set_job = job; /* for unsolicited command */ + job->dd_data = dd_data; /* for fc transport timeout callback*/ + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return 0; /* call job done later */ + +job_error: + kfree(dd_data); + job->dd_data = NULL; + return rc; +} + +/** + * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command + * @job: GET_EVENT fc_bsg_job + **/ +static int +lpfc_bsg_hba_get_event(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct get_ct_event *event_req; + struct get_ct_event_reply *event_reply; + struct lpfc_bsg_event *evt, *evt_next; + struct event_data *evt_dat = NULL; + unsigned long flags; + uint32_t rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2613 Received GET_CT_EVENT request below " + "minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + event_req = (struct get_ct_event *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + event_reply = (struct get_ct_event_reply *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { + if (evt->reg_id == event_req->ev_reg_id) { + if (list_empty(&evt->events_to_get)) + break; + lpfc_bsg_event_ref(evt); + evt->wait_time_stamp = jiffies; + evt_dat = list_entry(evt->events_to_get.prev, + struct event_data, node); + list_del(&evt_dat->node); + break; + } + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* The app may continue to ask for event data until it gets + * an error indicating that there isn't anymore + */ + if (evt_dat == NULL) { + bsg_reply->reply_payload_rcv_len = 0; + rc = -ENOENT; + goto job_error; + } + + if (evt_dat->len > job->request_payload.payload_len) { + evt_dat->len = job->request_payload.payload_len; + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2618 Truncated event data at %d " + "bytes\n", + job->request_payload.payload_len); + } + + event_reply->type = evt_dat->type; + event_reply->immed_data = evt_dat->immed_dat; + if (evt_dat->len > 0) + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + evt_dat->data, evt_dat->len); + else + bsg_reply->reply_payload_rcv_len = 0; + + if (evt_dat) { + kfree(evt_dat->data); + kfree(evt_dat); + } + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + lpfc_bsg_event_unref(evt); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + job->dd_data = NULL; + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; + +job_error: + job->dd_data = NULL; + bsg_reply->result = rc; + return rc; +} + +/** + * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler + * @phba: Pointer to HBA context object. + * @cmdiocbq: Pointer to command iocb. + * @rspiocbq: Pointer to response iocb. + * + * This function is the completion handler for iocbs issued using + * lpfc_issue_ct_rsp_cmp function. This function is called by the + * ring event handler function without any lock held. This function + * can be called from both worker thread context and interrupt + * context. This function also can be called from other thread which + * cleans up the SLI layer objects. + * This function copy the contents of the response iocb to the + * response iocb memory object provided by the caller of + * lpfc_sli_issue_iocb_wait and then wakes up the thread which + * sleeps for the iocb completion. + **/ +static void +lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + struct bsg_job_data *dd_data; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; + struct lpfc_dmabuf *bmp, *cmp; + struct lpfc_nodelist *ndlp; + unsigned long flags; + int rc = 0; + u32 ulp_status, ulp_word4; + + dd_data = cmdiocbq->context_un.dd_data; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); + + ndlp = dd_data->context_un.iocb.ndlp; + cmp = cmdiocbq->cmd_dmabuf; + bmp = cmdiocbq->bpl_dmabuf; + + ulp_status = get_job_ulpstatus(phba, rspiocbq); + ulp_word4 = get_job_word4(phba, rspiocbq); + + /* Copy the completed job data or set the error status */ + + if (job) { + bsg_reply = job->reply; + if (ulp_status) { + if (ulp_status == IOSTAT_LOCAL_REJECT) { + switch (ulp_word4 & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { + rc = -EACCES; + } + } else { + bsg_reply->reply_payload_rcv_len = 0; + } + } + + lpfc_free_bsg_buffers(phba, cmp); + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); + lpfc_sli_release_iocbq(phba, cmdiocbq); + lpfc_nlp_put(ndlp); + kfree(dd_data); + + /* Complete the job if the job is still active */ + + if (job) { + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + return; +} + +/** + * lpfc_issue_ct_rsp - issue a ct response + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @tag: tag index value into the ports context exchange array. + * @cmp: Pointer to a cmp dma buffer descriptor. + * @bmp: Pointer to a bmp dma buffer descriptor. + * @num_entry: Number of enties in the bde. + **/ +static int +lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, + struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, + int num_entry) +{ + struct lpfc_iocbq *ctiocb = NULL; + int rc = 0; + struct lpfc_nodelist *ndlp = NULL; + struct bsg_job_data *dd_data; + unsigned long flags; + uint32_t creg_val; + u16 ulp_context, iotag; + + ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); + if (!ndlp) { + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "2721 ndlp null for oxid %x SID %x\n", + phba->ct_ctx[tag].rxid, + phba->ct_ctx[tag].SID); + return IOCB_ERROR; + } + + /* allocate our bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2736 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto no_dd_data; + } + + /* Allocate buffer for command iocb */ + ctiocb = lpfc_sli_get_iocbq(phba); + if (!ctiocb) { + rc = -ENOMEM; + goto no_ctiocb; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Do not issue unsol response if oxid not marked as valid */ + if (phba->ct_ctx[tag].valid != UNSOL_VALID) { + rc = IOCB_ERROR; + goto issue_ct_rsp_exit; + } + + lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], + phba->ct_ctx[tag].oxid, num_entry, + FC_RCTL_DD_SOL_CTL, 1, + CMD_XMIT_SEQUENCE64_WQE); + + /* The exchange is done, mark the entry as invalid */ + phba->ct_ctx[tag].valid = UNSOL_INVALID; + iotag = get_wqe_reqtag(ctiocb); + } else { + lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry, + FC_RCTL_DD_SOL_CTL, 1, + CMD_XMIT_SEQUENCE64_CX); + ctiocb->num_bdes = num_entry; + iotag = ctiocb->iocb.ulpIoTag; + } + + ulp_context = get_job_ulpcontext(phba, ctiocb); + + /* Xmit CT response on exchange */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", + ulp_context, iotag, tag, phba->link_state); + + ctiocb->cmd_flag |= LPFC_IO_LIBDFC; + ctiocb->vport = phba->pport; + ctiocb->context_un.dd_data = dd_data; + ctiocb->cmd_dmabuf = cmp; + ctiocb->bpl_dmabuf = bmp; + ctiocb->ndlp = ndlp; + ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp; + + dd_data->type = TYPE_IOCB; + dd_data->set_job = job; + dd_data->context_un.iocb.cmdiocbq = ctiocb; + dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp); + if (!dd_data->context_un.iocb.ndlp) { + rc = -IOCB_ERROR; + goto issue_ct_rsp_exit; + } + dd_data->context_un.iocb.rmp = NULL; + job->dd_data = dd_data; + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -IOCB_ERROR; + goto issue_ct_rsp_exit; + } + creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); + writel(creg_val, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + return 0; /* done for now */ + } + + /* iocb failed so cleanup */ + job->dd_data = NULL; + lpfc_nlp_put(ndlp); + +issue_ct_rsp_exit: + lpfc_sli_release_iocbq(phba, ctiocb); +no_ctiocb: + kfree(dd_data); +no_dd_data: + return rc; +} + +/** + * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command + * @job: SEND_MGMT_RESP fc_bsg_job + **/ +static int +lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + struct ulp_bde64 *bpl; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; + int bpl_entries; + uint32_t tag = mgmt_resp->tag; + unsigned long reqbfrcnt = + (unsigned long)job->request_payload.payload_len; + int rc = 0; + + /* in case no data is transferred */ + bsg_reply->reply_payload_rcv_len = 0; + + if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { + rc = -ERANGE; + goto send_mgmt_rsp_exit; + } + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + goto send_mgmt_rsp_exit; + } + + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { + rc = -ENOMEM; + goto send_mgmt_rsp_free_bmp; + } + + INIT_LIST_HEAD(&bmp->list); + bpl = (struct ulp_bde64 *) bmp->virt; + bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &bpl_entries); + if (!cmp) { + rc = -ENOMEM; + goto send_mgmt_rsp_free_bmp; + } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); + + rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); + + if (rc == IOCB_SUCCESS) + return 0; /* done for now */ + + rc = -EACCES; + + lpfc_free_bsg_buffers(phba, cmp); + +send_mgmt_rsp_free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); +send_mgmt_rsp_exit: + /* make error code available to userspace */ + bsg_reply->result = rc; + job->dd_data = NULL; + return rc; +} + +/** + * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode + * @phba: Pointer to HBA context object. + * + * This function is responsible for preparing driver for diag loopback + * on device. + */ +static int +lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + struct lpfc_sli *psli; + struct lpfc_queue *qp = NULL; + struct lpfc_sli_ring *pring; + int i = 0; + + psli = &phba->sli; + if (!psli) + return -ENODEV; + + + if ((phba->link_state == LPFC_HBA_ERROR) || + (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) + return -EACCES; + + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_block_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); + } else { + shost = lpfc_shost_from_vport(phba->pport); + scsi_block_requests(shost); + } + + if (phba->sli_rev != LPFC_SLI_REV4) { + pring = &psli->sli3_ring[LPFC_FCP_RING]; + lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); + return 0; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring || (pring->ringno != LPFC_FCP_RING)) + continue; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &pring->ring_lock)) + break; + } + return 0; +} + +/** + * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode + * @phba: Pointer to HBA context object. + * + * This function is responsible for driver exit processing of setting up + * diag loopback mode on device. + */ +static void +lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) +{ + struct Scsi_Host *shost; + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_unblock_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); + } else { + shost = lpfc_shost_from_vport(phba->pport); + scsi_unblock_requests(shost); + } + return; +} + +/** + * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command + * @phba: Pointer to HBA context object. + * @job: LPFC_BSG_VENDOR_DIAG_MODE + * + * This function is responsible for placing an sli3 port into diagnostic + * loopback mode in order to perform a diagnostic loopback test. + * All new scsi requests are blocked, a small delay is used to allow the + * scsi requests to complete then the link is brought down. If the link is + * is placed in loopback mode then scsi requests are again allowed + * so the scsi mid-layer doesn't give up on the port. + * All of this is done in-line. + */ +static int +lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct diag_mode_set *loopback_mode; + uint32_t link_flags; + uint32_t timeout; + LPFC_MBOXQ_t *pmboxq = NULL; + int mbxstatus = MBX_SUCCESS; + int i = 0; + int rc = 0; + + /* no data to return just the return code */ + bsg_reply->reply_payload_rcv_len = 0; + + if (job->request_len < sizeof(struct fc_bsg_request) + + sizeof(struct diag_mode_set)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2738 Received DIAG MODE request size:%d " + "below the minimum size:%d\n", + job->request_len, + (int)(sizeof(struct fc_bsg_request) + + sizeof(struct diag_mode_set))); + rc = -EINVAL; + goto job_error; + } + + rc = lpfc_bsg_diag_mode_enter(phba); + if (rc) + goto job_error; + + /* bring the link to diagnostic mode */ + loopback_mode = (struct diag_mode_set *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + link_flags = loopback_mode->type; + timeout = loopback_mode->timeout * 100; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) { + rc = -ENOMEM; + goto loopback_mode_exit; + } + memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; + pmboxq->u.mb.mbxOwner = OWN_HOST; + + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); + + if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { + /* wait for link down before proceeding */ + i = 0; + while (phba->link_state != LPFC_LINK_DOWN) { + if (i++ > timeout) { + rc = -ETIMEDOUT; + goto loopback_mode_exit; + } + msleep(10); + } + + memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + if (link_flags == INTERNAL_LOOP_BACK) + pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; + else + pmboxq->u.mb.un.varInitLnk.link_flags = + FLAGS_TOPOLOGY_MODE_LOOP; + + pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; + pmboxq->u.mb.mbxOwner = OWN_HOST; + + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, + LPFC_MBOX_TMO); + + if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) + rc = -ENODEV; + else { + spin_lock_irq(&phba->hbalock); + phba->link_flag |= LS_LOOPBACK_MODE; + spin_unlock_irq(&phba->hbalock); + /* wait for the link attention interrupt */ + msleep(100); + + i = 0; + while (phba->link_state != LPFC_HBA_READY) { + if (i++ > timeout) { + rc = -ETIMEDOUT; + break; + } + + msleep(10); + } + } + + } else + rc = -ENODEV; + +loopback_mode_exit: + lpfc_bsg_diag_mode_exit(phba); + + /* + * Let SLI layer release mboxq if mbox command completed after timeout. + */ + if (pmboxq && mbxstatus != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + +job_error: + /* make error code available to userspace */ + bsg_reply->result = rc; + /* complete the job back to userspace if no error */ + if (rc == 0) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state + * @phba: Pointer to HBA context object. + * @diag: Flag for set link to diag or nomral operation state. + * + * This function is responsible for issuing a sli4 mailbox command for setting + * link to either diag state or normal operation state. + */ +static int +lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) +{ + LPFC_MBOXQ_t *pmboxq; + struct lpfc_mbx_set_link_diag_state *link_diag_state; + uint32_t req_len, alloc_len; + int mbxstatus = MBX_SUCCESS, rc; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return -ENOMEM; + + req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, + req_len, LPFC_SLI4_MBX_EMBED); + if (alloc_len != req_len) { + rc = -ENOMEM; + goto link_diag_state_set_out; + } + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", + diag, phba->sli4_hba.lnk_info.lnk_tp, + phba->sli4_hba.lnk_info.lnk_no); + + link_diag_state = &pmboxq->u.mqe.un.link_diag_state; + bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, + LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); + bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, + phba->sli4_hba.lnk_info.lnk_no); + bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, + phba->sli4_hba.lnk_info.lnk_tp); + if (diag) + bf_set(lpfc_mbx_set_diag_state_diag, + &link_diag_state->u.req, 1); + else + bf_set(lpfc_mbx_set_diag_state_diag, + &link_diag_state->u.req, 0); + + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); + + if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) + rc = 0; + else + rc = -ENODEV; + +link_diag_state_set_out: + if (pmboxq && (mbxstatus != MBX_TIMEOUT)) + mempool_free(pmboxq, phba->mbox_mem_pool); + + return rc; +} + +/** + * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic + * @phba: Pointer to HBA context object. + * @mode: loopback mode to set + * @link_no: link number for loopback mode to set + * + * This function is responsible for issuing a sli4 mailbox command for setting + * up loopback diagnostic for a link. + */ +static int +lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode, + uint32_t link_no) +{ + LPFC_MBOXQ_t *pmboxq; + uint32_t req_len, alloc_len; + struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; + int mbxstatus = MBX_SUCCESS, rc = 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return -ENOMEM; + req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, + req_len, LPFC_SLI4_MBX_EMBED); + if (alloc_len != req_len) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return -ENOMEM; + } + link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; + bf_set(lpfc_mbx_set_diag_state_link_num, + &link_diag_loopback->u.req, link_no); + + if (phba->sli4_hba.conf_trunk & (1 << link_no)) { + bf_set(lpfc_mbx_set_diag_state_link_type, + &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED); + } else { + bf_set(lpfc_mbx_set_diag_state_link_type, + &link_diag_loopback->u.req, + phba->sli4_hba.lnk_info.lnk_tp); + } + + bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, + mode); + + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); + if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3127 Failed setup loopback mode mailbox " + "command, rc:x%x, status:x%x\n", mbxstatus, + pmboxq->u.mb.mbxStatus); + rc = -ENODEV; + } + if (pmboxq && (mbxstatus != MBX_TIMEOUT)) + mempool_free(pmboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic + * @phba: Pointer to HBA context object. + * + * This function set up SLI4 FC port registrations for diagnostic run, which + * includes all the rpis, vfi, and also vpi. + */ +static int +lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) +{ + if (phba->pport->fc_flag & FC_VFI_REGISTERED) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3136 Port still had vfi registered: " + "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", + phba->pport->fc_myDID, phba->fcf.fcfi, + phba->sli4_hba.vfi_ids[phba->pport->vfi], + phba->vpi_ids[phba->pport->vpi]); + return -EINVAL; + } + return lpfc_issue_reg_vfi(phba->pport); +} + +/** + * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command + * @phba: Pointer to HBA context object. + * @job: LPFC_BSG_VENDOR_DIAG_MODE + * + * This function is responsible for placing an sli4 port into diagnostic + * loopback mode in order to perform a diagnostic loopback test. + */ +static int +lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct diag_mode_set *loopback_mode; + uint32_t link_flags, timeout, link_no; + int i, rc = 0; + + /* no data to return just the return code */ + bsg_reply->reply_payload_rcv_len = 0; + + if (job->request_len < sizeof(struct fc_bsg_request) + + sizeof(struct diag_mode_set)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3011 Received DIAG MODE request size:%d " + "below the minimum size:%d\n", + job->request_len, + (int)(sizeof(struct fc_bsg_request) + + sizeof(struct diag_mode_set))); + rc = -EINVAL; + goto job_done; + } + + loopback_mode = (struct diag_mode_set *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + link_flags = loopback_mode->type; + timeout = loopback_mode->timeout * 100; + + if (loopback_mode->physical_link == -1) + link_no = phba->sli4_hba.lnk_info.lnk_no; + else + link_no = loopback_mode->physical_link; + + if (link_flags == DISABLE_LOOP_BACK) { + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_DISABLE, + link_no); + if (!rc) { + /* Unset the need disable bit */ + phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4); + } + goto job_done; + } else { + /* Check if we need to disable the loopback state */ + if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) { + rc = -EPERM; + goto job_done; + } + } + + rc = lpfc_bsg_diag_mode_enter(phba); + if (rc) + goto job_done; + + /* indicate we are in loobpack diagnostic mode */ + spin_lock_irq(&phba->hbalock); + phba->link_flag |= LS_LOOPBACK_MODE; + spin_unlock_irq(&phba->hbalock); + + /* reset port to start frome scratch */ + rc = lpfc_selective_reset(phba); + if (rc) + goto job_done; + + /* bring the link to diagnostic mode */ + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3129 Bring link to diagnostic state.\n"); + + rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3130 Failed to bring link to diagnostic " + "state, rc:x%x\n", rc); + goto loopback_mode_exit; + } + + /* wait for link down before proceeding */ + i = 0; + while (phba->link_state != LPFC_LINK_DOWN) { + if (i++ > timeout) { + rc = -ETIMEDOUT; + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3131 Timeout waiting for link to " + "diagnostic mode, timeout:%d ms\n", + timeout * 10); + goto loopback_mode_exit; + } + msleep(10); + } + + /* set up loopback mode */ + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3132 Set up loopback mode:x%x\n", link_flags); + + switch (link_flags) { + case INTERNAL_LOOP_BACK: + if (phba->sli4_hba.conf_trunk & (1 << link_no)) { + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, + link_no); + } else { + /* Trunk is configured, but link is not in this trunk */ + if (phba->sli4_hba.conf_trunk) { + rc = -ELNRNG; + goto loopback_mode_exit; + } + + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, + link_no); + } + + if (!rc) { + /* Set the need disable bit */ + phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; + } + + break; + case EXTERNAL_LOOP_BACK: + if (phba->sli4_hba.conf_trunk & (1 << link_no)) { + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED, + link_no); + } else { + /* Trunk is configured, but link is not in this trunk */ + if (phba->sli4_hba.conf_trunk) { + rc = -ELNRNG; + goto loopback_mode_exit; + } + + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_SERDES, + link_no); + } + + if (!rc) { + /* Set the need disable bit */ + phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; + } + + break; + default: + rc = -EINVAL; + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "3141 Loopback mode:x%x not supported\n", + link_flags); + goto loopback_mode_exit; + } + + if (!rc) { + /* wait for the link attention interrupt */ + msleep(100); + i = 0; + while (phba->link_state < LPFC_LINK_UP) { + if (i++ > timeout) { + rc = -ETIMEDOUT; + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3137 Timeout waiting for link up " + "in loopback mode, timeout:%d ms\n", + timeout * 10); + break; + } + msleep(10); + } + } + + /* port resource registration setup for loopback diagnostic */ + if (!rc) { + /* set up a none zero myDID for loopback test */ + phba->pport->fc_myDID = 1; + rc = lpfc_sli4_diag_fcport_reg_setup(phba); + } else + goto loopback_mode_exit; + + if (!rc) { + /* wait for the port ready */ + msleep(100); + i = 0; + while (phba->link_state != LPFC_HBA_READY) { + if (i++ > timeout) { + rc = -ETIMEDOUT; + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3133 Timeout waiting for port " + "loopback mode ready, timeout:%d ms\n", + timeout * 10); + break; + } + msleep(10); + } + } + +loopback_mode_exit: + /* clear loopback diagnostic mode */ + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->link_flag &= ~LS_LOOPBACK_MODE; + spin_unlock_irq(&phba->hbalock); + } + lpfc_bsg_diag_mode_exit(phba); + +job_done: + /* make error code available to userspace */ + bsg_reply->result = rc; + /* complete the job back to userspace if no error */ + if (rc == 0) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode + * @job: LPFC_BSG_VENDOR_DIAG_MODE + * + * This function is responsible for responding to check and dispatch bsg diag + * command from the user to proper driver action routines. + */ +static int +lpfc_bsg_diag_loopback_mode(struct bsg_job *job) +{ + struct Scsi_Host *shost; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + int rc; + + shost = fc_bsg_to_shost(job); + if (!shost) + return -ENODEV; + vport = shost_priv(shost); + if (!vport) + return -ENODEV; + phba = vport->phba; + if (!phba) + return -ENODEV; + + if (phba->sli_rev < LPFC_SLI_REV4) + rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); + else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2) + rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); + else + rc = -ENODEV; + + return rc; +} + +/** + * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode + * @job: LPFC_BSG_VENDOR_DIAG_MODE_END + * + * This function is responsible for responding to check and dispatch bsg diag + * command from the user to proper driver action routines. + */ +static int +lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct Scsi_Host *shost; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct diag_mode_set *loopback_mode_end_cmd; + uint32_t timeout; + int rc, i; + + shost = fc_bsg_to_shost(job); + if (!shost) + return -ENODEV; + vport = shost_priv(shost); + if (!vport) + return -ENODEV; + phba = vport->phba; + if (!phba) + return -ENODEV; + + if (phba->sli_rev < LPFC_SLI_REV4) + return -ENODEV; + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2) + return -ENODEV; + + /* clear loopback diagnostic mode */ + spin_lock_irq(&phba->hbalock); + phba->link_flag &= ~LS_LOOPBACK_MODE; + spin_unlock_irq(&phba->hbalock); + loopback_mode_end_cmd = (struct diag_mode_set *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + timeout = loopback_mode_end_cmd->timeout * 100; + + rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3139 Failed to bring link to diagnostic " + "state, rc:x%x\n", rc); + goto loopback_mode_end_exit; + } + + /* wait for link down before proceeding */ + i = 0; + while (phba->link_state != LPFC_LINK_DOWN) { + if (i++ > timeout) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3140 Timeout waiting for link to " + "diagnostic mode_end, timeout:%d ms\n", + timeout * 10); + /* there is nothing much we can do here */ + break; + } + msleep(10); + } + + /* reset port resource registrations */ + rc = lpfc_selective_reset(phba); + phba->pport->fc_myDID = 0; + +loopback_mode_end_exit: + /* make return code available to userspace */ + bsg_reply->result = rc; + /* complete the job back to userspace if no error */ + if (rc == 0) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test + * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST + * + * This function is to perform SLI4 diag link test request from the user + * applicaiton. + */ +static int +lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct Scsi_Host *shost; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + LPFC_MBOXQ_t *pmboxq; + struct sli4_link_diag *link_diag_test_cmd; + uint32_t req_len, alloc_len; + struct lpfc_mbx_run_link_diag_test *run_link_diag_test; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + struct diag_status *diag_status_reply; + int mbxstatus, rc = -ENODEV, rc1 = 0; + + shost = fc_bsg_to_shost(job); + if (!shost) + goto job_error; + + vport = shost_priv(shost); + if (!vport) + goto job_error; + + phba = vport->phba; + if (!phba) + goto job_error; + + + if (phba->sli_rev < LPFC_SLI_REV4) + goto job_error; + + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2) + goto job_error; + + if (job->request_len < sizeof(struct fc_bsg_request) + + sizeof(struct sli4_link_diag)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3013 Received LINK DIAG TEST request " + " size:%d below the minimum size:%d\n", + job->request_len, + (int)(sizeof(struct fc_bsg_request) + + sizeof(struct sli4_link_diag))); + rc = -EINVAL; + goto job_error; + } + + rc = lpfc_bsg_diag_mode_enter(phba); + if (rc) + goto job_error; + + link_diag_test_cmd = (struct sli4_link_diag *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); + + if (rc) + goto job_error; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) { + rc = -ENOMEM; + goto link_diag_test_exit; + } + + req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, + req_len, LPFC_SLI4_MBX_EMBED); + if (alloc_len != req_len) { + rc = -ENOMEM; + goto link_diag_test_exit; + } + + run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; + bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, + phba->sli4_hba.lnk_info.lnk_no); + bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, + phba->sli4_hba.lnk_info.lnk_tp); + bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, + link_diag_test_cmd->test_id); + bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, + link_diag_test_cmd->loops); + bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, + link_diag_test_cmd->test_version); + bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, + link_diag_test_cmd->error_action); + + mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + + shdr = (union lpfc_sli4_cfg_shdr *) + &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || mbxstatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "3010 Run link diag test mailbox failed with " + "mbx_status x%x status x%x, add_status x%x\n", + mbxstatus, shdr_status, shdr_add_status); + } + + diag_status_reply = (struct diag_status *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3012 Received Run link diag test reply " + "below minimum size (%d): reply_len:%d\n", + (int)(sizeof(*bsg_reply) + + sizeof(*diag_status_reply)), + job->reply_len); + rc = -EINVAL; + goto job_error; + } + + diag_status_reply->mbox_status = mbxstatus; + diag_status_reply->shdr_status = shdr_status; + diag_status_reply->shdr_add_status = shdr_add_status; + +link_diag_test_exit: + rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0); + + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); + + lpfc_bsg_diag_mode_exit(phba); + +job_error: + /* make error code available to userspace */ + if (rc1 && !rc) + rc = rc1; + bsg_reply->result = rc; + /* complete the job back to userspace if no error */ + if (rc == 0) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfcdiag_loop_self_reg - obtains a remote port login id + * @phba: Pointer to HBA context object + * @rpi: Pointer to a remote port login id + * + * This function obtains a remote port login id so the diag loopback test + * can send and receive its own unsolicited CT command. + **/ +static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) +{ + LPFC_MBOXQ_t *mbox; + struct lpfc_dmabuf *dmabuff; + int status; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + if (phba->sli_rev < LPFC_SLI_REV4) + status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, + (uint8_t *)&phba->pport->fc_sparam, + mbox, *rpi); + else { + *rpi = lpfc_sli4_alloc_rpi(phba); + if (*rpi == LPFC_RPI_ALLOC_ERROR) { + mempool_free(mbox, phba->mbox_mem_pool); + return -EBUSY; + } + status = lpfc_reg_rpi(phba, phba->pport->vpi, + phba->pport->fc_myDID, + (uint8_t *)&phba->pport->fc_sparam, + mbox, *rpi); + } + + if (status) { + mempool_free(mbox, phba->mbox_mem_pool); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_free_rpi(phba, *rpi); + return -ENOMEM; + } + + dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf; + mbox->ctx_buf = NULL; + mbox->ctx_ndlp = NULL; + status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + + if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { + lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); + kfree(dmabuff); + if (status != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_free_rpi(phba, *rpi); + return -ENODEV; + } + + if (phba->sli_rev < LPFC_SLI_REV4) + *rpi = mbox->u.mb.un.varWords[0]; + + lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); + kfree(dmabuff); + mempool_free(mbox, phba->mbox_mem_pool); + return 0; +} + +/** + * lpfcdiag_loop_self_unreg - unregs from the rpi + * @phba: Pointer to HBA context object + * @rpi: Remote port login id + * + * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg + **/ +static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) +{ + LPFC_MBOXQ_t *mbox; + int status; + + /* Allocate mboxq structure */ + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox == NULL) + return -ENOMEM; + + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_unreg_login(phba, 0, rpi, mbox); + else + lpfc_unreg_login(phba, phba->pport->vpi, + phba->sli4_hba.rpi_ids[rpi], mbox); + + status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + + if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { + if (status != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return -EIO; + } + mempool_free(mbox, phba->mbox_mem_pool); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_free_rpi(phba, rpi); + return 0; +} + +/** + * lpfcdiag_loop_get_xri - obtains the transmit and receive ids + * @phba: Pointer to HBA context object + * @rpi: Remote port login id + * @txxri: Pointer to transmit exchange id + * @rxxri: Pointer to response exchabge id + * + * This function obtains the transmit and receive ids required to send + * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp + * flags are used to the unsolicited response handler is able to process + * the ct command sent on the same port. + **/ +static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, + uint16_t *txxri, uint16_t * rxxri) +{ + struct lpfc_bsg_event *evt; + struct lpfc_iocbq *cmdiocbq, *rspiocbq; + struct lpfc_dmabuf *dmabuf; + struct ulp_bde64 *bpl = NULL; + struct lpfc_sli_ct_request *ctreq = NULL; + int ret_val = 0; + int time_left; + int iocb_stat = IOCB_SUCCESS; + unsigned long flags; + u32 status; + + *txxri = 0; + *rxxri = 0; + evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, + SLI_CT_ELX_LOOPBACK); + if (!evt) + return -ENOMEM; + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_add(&evt->node, &phba->ct_ev_waiters); + lpfc_bsg_event_ref(evt); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + cmdiocbq = lpfc_sli_get_iocbq(phba); + rspiocbq = lpfc_sli_get_iocbq(phba); + + dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (dmabuf) { + dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); + if (dmabuf->virt) { + INIT_LIST_HEAD(&dmabuf->list); + bpl = (struct ulp_bde64 *) dmabuf->virt; + memset(bpl, 0, sizeof(*bpl)); + ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); + bpl->addrHigh = + le32_to_cpu(putPaddrHigh(dmabuf->phys + + sizeof(*bpl))); + bpl->addrLow = + le32_to_cpu(putPaddrLow(dmabuf->phys + + sizeof(*bpl))); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + } + } + + if (cmdiocbq == NULL || rspiocbq == NULL || + dmabuf == NULL || bpl == NULL || ctreq == NULL || + dmabuf->virt == NULL) { + ret_val = -ENOMEM; + goto err_get_xri_exit; + } + + memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); + + ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; + ctreq->RevisionId.bits.InId = 0; + ctreq->FsType = SLI_CT_ELX_LOOPBACK; + ctreq->FsSubType = 0; + ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; + ctreq->CommandResponse.bits.Size = 0; + + cmdiocbq->bpl_dmabuf = dmabuf; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; + cmdiocbq->vport = phba->pport; + cmdiocbq->cmd_cmpl = NULL; + + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1, + FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR); + + iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, + rspiocbq, (phba->fc_ratov * 2) + + LPFC_DRVR_TIMEOUT); + + status = get_job_ulpstatus(phba, rspiocbq); + if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) { + ret_val = -EIO; + goto err_get_xri_exit; + } + *txxri = get_job_ulpcontext(phba, rspiocbq); + + evt->waiting = 1; + evt->wait_time_stamp = jiffies; + time_left = wait_event_interruptible_timeout( + evt->wq, !list_empty(&evt->events_to_see), + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + if (list_empty(&evt->events_to_see)) + ret_val = (time_left) ? -EINTR : -ETIMEDOUT; + else { + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_move(evt->events_to_see.prev, &evt->events_to_get); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + *rxxri = (list_entry(evt->events_to_get.prev, + typeof(struct event_data), + node))->immed_dat; + } + evt->waiting = 0; + +err_get_xri_exit: + spin_lock_irqsave(&phba->ct_ev_lock, flags); + lpfc_bsg_event_unref(evt); /* release ref */ + lpfc_bsg_event_unref(evt); /* delete */ + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + if (dmabuf) { + if (dmabuf->virt) + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + + if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) + lpfc_sli_release_iocbq(phba, cmdiocbq); + if (rspiocbq) + lpfc_sli_release_iocbq(phba, rspiocbq); + return ret_val; +} + +/** + * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers + * @phba: Pointer to HBA context object + * + * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and + * returns the pointer to the buffer. + **/ +static struct lpfc_dmabuf * +lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) +{ + struct lpfc_dmabuf *dmabuf; + struct pci_dev *pcidev = phba->pcidev; + + /* allocate dma buffer struct */ + dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return NULL; + + INIT_LIST_HEAD(&dmabuf->list); + + /* now, allocate dma buffer */ + dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, + &(dmabuf->phys), GFP_KERNEL); + + if (!dmabuf->virt) { + kfree(dmabuf); + return NULL; + } + + return dmabuf; +} + +/** + * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer + * @phba: Pointer to HBA context object. + * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. + * + * This routine just simply frees a dma buffer and its associated buffer + * descriptor referred by @dmabuf. + **/ +static void +lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) +{ + struct pci_dev *pcidev = phba->pcidev; + + if (!dmabuf) + return; + + if (dmabuf->virt) + dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return; +} + +/** + * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers + * @phba: Pointer to HBA context object. + * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. + * + * This routine just simply frees all dma buffers and their associated buffer + * descriptors referred by @dmabuf_list. + **/ +static void +lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, + struct list_head *dmabuf_list) +{ + struct lpfc_dmabuf *dmabuf, *next_dmabuf; + + if (list_empty(dmabuf_list)) + return; + + list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { + list_del_init(&dmabuf->list); + lpfc_bsg_dma_page_free(phba, dmabuf); + } + return; +} + +/** + * diag_cmd_data_alloc - fills in a bde struct with dma buffers + * @phba: Pointer to HBA context object + * @bpl: Pointer to 64 bit bde structure + * @size: Number of bytes to process + * @nocopydata: Flag to copy user data into the allocated buffer + * + * This function allocates page size buffers and populates an lpfc_dmabufext. + * If allowed the user data pointed to with indataptr is copied into the kernel + * memory. The chained list of page size buffers is returned. + **/ +static struct lpfc_dmabufext * +diag_cmd_data_alloc(struct lpfc_hba *phba, + struct ulp_bde64 *bpl, uint32_t size, + int nocopydata) +{ + struct lpfc_dmabufext *mlist = NULL; + struct lpfc_dmabufext *dmp; + int cnt, offset = 0, i = 0; + struct pci_dev *pcidev; + + pcidev = phba->pcidev; + + while (size) { + /* We get chunks of 4K */ + if (size > BUF_SZ_4K) + cnt = BUF_SZ_4K; + else + cnt = size; + + /* allocate struct lpfc_dmabufext buffer header */ + dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); + if (!dmp) + goto out; + + INIT_LIST_HEAD(&dmp->dma.list); + + /* Queue it to a linked list */ + if (mlist) + list_add_tail(&dmp->dma.list, &mlist->dma.list); + else + mlist = dmp; + + /* allocate buffer */ + dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, + cnt, + &(dmp->dma.phys), + GFP_KERNEL); + + if (!dmp->dma.virt) + goto out; + + dmp->size = cnt; + + if (nocopydata) { + bpl->tus.f.bdeFlags = 0; + } else { + memset((uint8_t *)dmp->dma.virt, 0, cnt); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + } + + /* build buffer ptr list for IOCB */ + bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); + bpl->tus.f.bdeSize = (ushort) cnt; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + + i++; + offset += cnt; + size -= cnt; + } + + if (mlist) { + mlist->flag = i; + return mlist; + } +out: + diag_cmd_data_free(phba, mlist); + return NULL; +} + +/** + * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd + * @phba: Pointer to HBA context object + * @rxxri: Receive exchange id + * @len: Number of data bytes + * + * This function allocates and posts a data buffer of sufficient size to receive + * an unsolicited CT command. + **/ +static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, + size_t len) +{ + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *cmdiocbq; + IOCB_t *cmd = NULL; + struct list_head head, *curr, *next; + struct lpfc_dmabuf *rxbmp; + struct lpfc_dmabuf *dmp; + struct lpfc_dmabuf *mp[2] = {NULL, NULL}; + struct ulp_bde64 *rxbpl = NULL; + uint32_t num_bde; + struct lpfc_dmabufext *rxbuffer = NULL; + int ret_val = 0; + int iocb_stat; + int i = 0; + + pring = lpfc_phba_elsring(phba); + + cmdiocbq = lpfc_sli_get_iocbq(phba); + rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (rxbmp != NULL) { + rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); + if (rxbmp->virt) { + INIT_LIST_HEAD(&rxbmp->list); + rxbpl = (struct ulp_bde64 *) rxbmp->virt; + rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); + } + } + + if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { + ret_val = -ENOMEM; + goto err_post_rxbufs_exit; + } + + /* Queue buffers for the receive exchange */ + num_bde = (uint32_t)rxbuffer->flag; + dmp = &rxbuffer->dma; + cmd = &cmdiocbq->iocb; + i = 0; + + INIT_LIST_HEAD(&head); + list_add_tail(&head, &dmp->list); + list_for_each_safe(curr, next, &head) { + mp[i] = list_entry(curr, struct lpfc_dmabuf, list); + list_del(curr); + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); + cmd->un.quexri64cx.buff.bde.addrHigh = + putPaddrHigh(mp[i]->phys); + cmd->un.quexri64cx.buff.bde.addrLow = + putPaddrLow(mp[i]->phys); + cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = + ((struct lpfc_dmabufext *)mp[i])->size; + cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; + cmd->ulpCommand = CMD_QUE_XRI64_CX; + cmd->ulpPU = 0; + cmd->ulpLe = 1; + cmd->ulpBdeCount = 1; + cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; + + } else { + cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); + cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); + cmd->un.cont64[i].tus.f.bdeSize = + ((struct lpfc_dmabufext *)mp[i])->size; + cmd->ulpBdeCount = ++i; + + if ((--num_bde > 0) && (i < 2)) + continue; + + cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; + cmd->ulpLe = 1; + } + + cmd->ulpClass = CLASS3; + cmd->ulpContext = rxxri; + + iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, + 0); + if (iocb_stat == IOCB_ERROR) { + diag_cmd_data_free(phba, + (struct lpfc_dmabufext *)mp[0]); + if (mp[1]) + diag_cmd_data_free(phba, + (struct lpfc_dmabufext *)mp[1]); + dmp = list_entry(next, struct lpfc_dmabuf, list); + ret_val = -EIO; + goto err_post_rxbufs_exit; + } + + lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); + if (mp[1]) { + lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); + mp[1] = NULL; + } + + /* The iocb was freed by lpfc_sli_issue_iocb */ + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + dmp = list_entry(next, struct lpfc_dmabuf, list); + ret_val = -EIO; + goto err_post_rxbufs_exit; + } + cmd = &cmdiocbq->iocb; + i = 0; + } + list_del(&head); + +err_post_rxbufs_exit: + + if (rxbmp) { + if (rxbmp->virt) + lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); + kfree(rxbmp); + } + + if (cmdiocbq) + lpfc_sli_release_iocbq(phba, cmdiocbq); + return ret_val; +} + +/** + * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself + * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job + * + * This function receives a user data buffer to be transmitted and received on + * the same port, the link must be up and in loopback mode prior + * to being called. + * 1. A kernel buffer is allocated to copy the user data into. + * 2. The port registers with "itself". + * 3. The transmit and receive exchange ids are obtained. + * 4. The receive exchange id is posted. + * 5. A new els loopback event is created. + * 6. The command and response iocbs are allocated. + * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. + * + * This function is meant to be called n times while the port is in loopback + * so it is the apps responsibility to issue a reset to take the port out + * of loopback mode. + **/ +static int +lpfc_bsg_diag_loopback_run(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct fc_bsg_reply *bsg_reply = job->reply; + struct lpfc_hba *phba = vport->phba; + struct lpfc_bsg_event *evt; + struct event_data *evdat; + struct lpfc_sli *psli = &phba->sli; + uint32_t size; + uint32_t full_size; + size_t segment_len = 0, segment_offset = 0, current_offset = 0; + uint16_t rpi = 0; + struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; + union lpfc_wqe128 *cmdwqe, *rspwqe; + struct lpfc_sli_ct_request *ctreq; + struct lpfc_dmabuf *txbmp; + struct ulp_bde64 *txbpl = NULL; + struct lpfc_dmabufext *txbuffer = NULL; + struct list_head head; + struct lpfc_dmabuf *curr; + uint16_t txxri = 0, rxxri; + uint32_t num_bde; + uint8_t *ptr = NULL, *rx_databuf = NULL; + int rc = 0; + int time_left; + int iocb_stat = IOCB_SUCCESS; + unsigned long flags; + void *dataout = NULL; + uint32_t total_mem; + + /* in case no data is returned return just the return code */ + bsg_reply->reply_payload_rcv_len = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2739 Received DIAG TEST request below minimum " + "size\n"); + rc = -EINVAL; + goto loopback_test_exit; + } + + if (job->request_payload.payload_len != + job->reply_payload.payload_len) { + rc = -EINVAL; + goto loopback_test_exit; + } + + if ((phba->link_state == LPFC_HBA_ERROR) || + (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { + rc = -EACCES; + goto loopback_test_exit; + } + + if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { + rc = -EACCES; + goto loopback_test_exit; + } + + size = job->request_payload.payload_len; + full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ + + if ((size == 0) || (size > 80 * BUF_SZ_4K)) { + rc = -ERANGE; + goto loopback_test_exit; + } + + if (full_size >= BUF_SZ_4K) { + /* + * Allocate memory for ioctl data. If buffer is bigger than 64k, + * then we allocate 64k and re-use that buffer over and over to + * xfer the whole block. This is because Linux kernel has a + * problem allocating more than 120k of kernel space memory. Saw + * problem with GET_FCPTARGETMAPPING... + */ + if (size <= (64 * 1024)) + total_mem = full_size; + else + total_mem = 64 * 1024; + } else + /* Allocate memory for ioctl data */ + total_mem = BUF_SZ_4K; + + dataout = kmalloc(total_mem, GFP_KERNEL); + if (dataout == NULL) { + rc = -ENOMEM; + goto loopback_test_exit; + } + + ptr = dataout; + ptr += ELX_LOOPBACK_HEADER_SZ; + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + ptr, size); + rc = lpfcdiag_loop_self_reg(phba, &rpi); + if (rc) + goto loopback_test_exit; + + if (phba->sli_rev < LPFC_SLI_REV4) { + rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); + if (rc) { + lpfcdiag_loop_self_unreg(phba, rpi); + goto loopback_test_exit; + } + + rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size); + if (rc) { + lpfcdiag_loop_self_unreg(phba, rpi); + goto loopback_test_exit; + } + } + evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, + SLI_CT_ELX_LOOPBACK); + if (!evt) { + lpfcdiag_loop_self_unreg(phba, rpi); + rc = -ENOMEM; + goto loopback_test_exit; + } + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_add(&evt->node, &phba->ct_ev_waiters); + lpfc_bsg_event_ref(evt); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (phba->sli_rev < LPFC_SLI_REV4) + rspiocbq = lpfc_sli_get_iocbq(phba); + txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + + if (txbmp) { + txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); + if (txbmp->virt) { + INIT_LIST_HEAD(&txbmp->list); + txbpl = (struct ulp_bde64 *) txbmp->virt; + txbuffer = diag_cmd_data_alloc(phba, + txbpl, full_size, 0); + } + } + + if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { + rc = -ENOMEM; + goto err_loopback_test_exit; + } + if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { + rc = -ENOMEM; + goto err_loopback_test_exit; + } + + cmdwqe = &cmdiocbq->wqe; + memset(cmdwqe, 0, sizeof(union lpfc_wqe)); + if (phba->sli_rev < LPFC_SLI_REV4) { + rspwqe = &rspiocbq->wqe; + memset(rspwqe, 0, sizeof(union lpfc_wqe)); + } + + INIT_LIST_HEAD(&head); + list_add_tail(&head, &txbuffer->dma.list); + list_for_each_entry(curr, &head, list) { + segment_len = ((struct lpfc_dmabufext *)curr)->size; + if (current_offset == 0) { + ctreq = curr->virt; + memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); + ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; + ctreq->RevisionId.bits.InId = 0; + ctreq->FsType = SLI_CT_ELX_LOOPBACK; + ctreq->FsSubType = 0; + ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA); + ctreq->CommandResponse.bits.Size = cpu_to_be16(size); + segment_offset = ELX_LOOPBACK_HEADER_SZ; + } else + segment_offset = 0; + + BUG_ON(segment_offset >= segment_len); + memcpy(curr->virt + segment_offset, + ptr + current_offset, + segment_len - segment_offset); + + current_offset += segment_len - segment_offset; + BUG_ON(current_offset > size); + } + list_del(&head); + + /* Build the XMIT_SEQUENCE iocb */ + num_bde = (uint32_t)txbuffer->flag; + + cmdiocbq->num_bdes = num_bde; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; + cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; + cmdiocbq->vport = phba->pport; + cmdiocbq->cmd_cmpl = NULL; + cmdiocbq->bpl_dmabuf = txbmp; + + if (phba->sli_rev < LPFC_SLI_REV4) { + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri, + num_bde, FC_RCTL_DD_UNSOL_CTL, 1, + CMD_XMIT_SEQUENCE64_CX); + + } else { + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, + phba->sli4_hba.rpi_ids[rpi], 0xffff, + full_size, FC_RCTL_DD_UNSOL_CTL, 1, + CMD_XMIT_SEQUENCE64_WQE); + cmdiocbq->sli4_xritag = NO_XRI; + } + + iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, + rspiocbq, (phba->fc_ratov * 2) + + LPFC_DRVR_TIMEOUT); + if (iocb_stat != IOCB_SUCCESS || + (phba->sli_rev < LPFC_SLI_REV4 && + (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "3126 Failed loopback test issue iocb: " + "iocb_stat:x%x\n", iocb_stat); + rc = -EIO; + goto err_loopback_test_exit; + } + + evt->waiting = 1; + time_left = wait_event_interruptible_timeout( + evt->wq, !list_empty(&evt->events_to_see), + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + evt->waiting = 0; + if (list_empty(&evt->events_to_see)) { + rc = (time_left) ? -EINTR : -ETIMEDOUT; + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "3125 Not receiving unsolicited event, " + "rc:x%x\n", rc); + } else { + spin_lock_irqsave(&phba->ct_ev_lock, flags); + list_move(evt->events_to_see.prev, &evt->events_to_get); + evdat = list_entry(evt->events_to_get.prev, + typeof(*evdat), node); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + rx_databuf = evdat->data; + if (evdat->len != full_size) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "1603 Loopback test did not receive expected " + "data length. actual length 0x%x expected " + "length 0x%x\n", + evdat->len, full_size); + rc = -EIO; + } else if (rx_databuf == NULL) + rc = -EIO; + else { + rc = IOCB_SUCCESS; + /* skip over elx loopback header */ + rx_databuf += ELX_LOOPBACK_HEADER_SZ; + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + rx_databuf, size); + bsg_reply->reply_payload_rcv_len = size; + } + } + +err_loopback_test_exit: + lpfcdiag_loop_self_unreg(phba, rpi); + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + lpfc_bsg_event_unref(evt); /* release ref */ + lpfc_bsg_event_unref(evt); /* delete */ + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) + lpfc_sli_release_iocbq(phba, cmdiocbq); + + if (rspiocbq != NULL) + lpfc_sli_release_iocbq(phba, rspiocbq); + + if (txbmp != NULL) { + if (txbpl != NULL) { + if (txbuffer != NULL) + diag_cmd_data_free(phba, txbuffer); + lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); + } + kfree(txbmp); + } + +loopback_test_exit: + kfree(dataout); + /* make error code available to userspace */ + bsg_reply->result = rc; + job->dd_data = NULL; + /* complete the job back to userspace if no error */ + if (rc == IOCB_SUCCESS) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command + * @job: GET_DFC_REV fc_bsg_job + **/ +static int +lpfc_bsg_get_dfc_rev(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct fc_bsg_reply *bsg_reply = job->reply; + struct lpfc_hba *phba = vport->phba; + struct get_mgmt_rev_reply *event_reply; + int rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2740 Received GET_DFC_REV request below " + "minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + event_reply = (struct get_mgmt_rev_reply *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2741 Received GET_DFC_REV reply below " + "minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; + event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; +job_error: + bsg_reply->result = rc; + if (rc == 0) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler + * @phba: Pointer to HBA context object. + * @pmboxq: Pointer to mailbox command. + * + * This is completion handler function for mailbox commands issued from + * lpfc_bsg_issue_mbox function. This function is called by the + * mailbox event handler function with no lock held. This function + * will wake up thread waiting on the wait queue pointed by dd_data + * of the mailbox. + **/ +static void +lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) +{ + struct bsg_job_data *dd_data; + struct fc_bsg_reply *bsg_reply; + struct bsg_job *job; + uint32_t size; + unsigned long flags; + uint8_t *pmb, *pmb_buf; + + dd_data = pmboxq->ctx_ndlp; + + /* + * The outgoing buffer is readily referred from the dma buffer, + * just need to get header part from mailboxq structure. + */ + pmb = (uint8_t *)&pmboxq->u.mb; + pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; + memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); + + /* Determine if job has been aborted */ + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Copy the mailbox data to the job if it is still active */ + + if (job) { + bsg_reply = job->reply; + size = job->reply_payload.payload_len; + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + pmb_buf, size); + } + + dd_data->set_job = NULL; + mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); + lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); + kfree(dd_data); + + /* Complete the job if the job is still active */ + + if (job) { + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + return; +} + +/** + * lpfc_bsg_check_cmd_access - test for a supported mailbox command + * @phba: Pointer to HBA context object. + * @mb: Pointer to a mailbox object. + * @vport: Pointer to a vport object. + * + * Some commands require the port to be offline, some may not be called from + * the application. + **/ +static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, + MAILBOX_t *mb, struct lpfc_vport *vport) +{ + /* return negative error values for bsg job */ + switch (mb->mbxCommand) { + /* Offline only */ + case MBX_INIT_LINK: + case MBX_DOWN_LINK: + case MBX_CONFIG_LINK: + case MBX_CONFIG_RING: + case MBX_RESET_RING: + case MBX_UNREG_LOGIN: + case MBX_CLEAR_LA: + case MBX_DUMP_CONTEXT: + case MBX_RUN_DIAGS: + case MBX_RESTART: + case MBX_SET_MASK: + if (!(vport->fc_flag & FC_OFFLINE_MODE)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2743 Command 0x%x is illegal in on-line " + "state\n", + mb->mbxCommand); + return -EPERM; + } + break; + case MBX_WRITE_NV: + case MBX_WRITE_VPARMS: + case MBX_LOAD_SM: + case MBX_READ_NV: + case MBX_READ_CONFIG: + case MBX_READ_RCONFIG: + case MBX_READ_STATUS: + case MBX_READ_XRI: + case MBX_READ_REV: + case MBX_READ_LNK_STAT: + case MBX_DUMP_MEMORY: + case MBX_DOWN_LOAD: + case MBX_UPDATE_CFG: + case MBX_KILL_BOARD: + case MBX_READ_TOPOLOGY: + case MBX_LOAD_AREA: + case MBX_LOAD_EXP_ROM: + case MBX_BEACON: + case MBX_DEL_LD_ENTRY: + case MBX_SET_DEBUG: + case MBX_WRITE_WWN: + case MBX_SLI4_CONFIG: + case MBX_READ_EVENT_LOG: + case MBX_READ_EVENT_LOG_STATUS: + case MBX_WRITE_EVENT_LOG: + case MBX_PORT_CAPABILITIES: + case MBX_PORT_IOV_CONTROL: + case MBX_RUN_BIU_DIAG64: + break; + case MBX_SET_VARIABLE: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1226 mbox: set_variable 0x%x, 0x%x\n", + mb->un.varWords[0], + mb->un.varWords[1]); + break; + case MBX_READ_SPARM64: + case MBX_REG_LOGIN: + case MBX_REG_LOGIN64: + case MBX_CONFIG_PORT: + case MBX_RUN_BIU_DIAG: + default: + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2742 Unknown Command 0x%x\n", + mb->mbxCommand); + return -EPERM; + } + + return 0; /* ok */ +} + +/** + * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session + * @phba: Pointer to HBA context object. + * + * This is routine clean up and reset BSG handling of multi-buffer mbox + * command session. + **/ +static void +lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) +{ + if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) + return; + + /* free all memory, including dma buffers */ + lpfc_bsg_dma_page_list_free(phba, + &phba->mbox_ext_buf_ctx.ext_dmabuf_list); + lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); + /* multi-buffer write mailbox command pass-through complete */ + memset((char *)&phba->mbox_ext_buf_ctx, 0, + sizeof(struct lpfc_mbox_ext_buf_ctx)); + INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); + + return; +} + +/** + * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl + * @phba: Pointer to HBA context object. + * @pmboxq: Pointer to mailbox command. + * + * This is routine handles BSG job for mailbox commands completions with + * multiple external buffers. + **/ +static struct bsg_job * +lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) +{ + struct bsg_job_data *dd_data; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; + uint8_t *pmb, *pmb_buf; + unsigned long flags; + uint32_t size; + int rc = 0; + struct lpfc_dmabuf *dmabuf; + struct lpfc_sli_config_mbox *sli_cfg_mbx; + uint8_t *pmbx; + + dd_data = pmboxq->ctx_buf; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + bsg_reply = job->reply; + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* + * The outgoing buffer is readily referred from the dma buffer, + * just need to get header part from mailboxq structure. + */ + + pmb = (uint8_t *)&pmboxq->u.mb; + pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; + /* Copy the byte swapped response mailbox back to the user */ + memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); + /* if there is any non-embedded extended data copy that too */ + dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; + sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; + if (!bsg_bf_get(lpfc_mbox_hdr_emb, + &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { + pmbx = (uint8_t *)dmabuf->virt; + /* byte swap the extended data following the mailbox command */ + lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], + &pmbx[sizeof(MAILBOX_t)], + sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); + } + + /* Complete the job if the job is still active */ + + if (job) { + size = job->reply_payload.payload_len; + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + pmb_buf, size); + + /* result for successful */ + bsg_reply->result = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2937 SLI_CONFIG ext-buffer mailbox command " + "(x%x/x%x) complete bsg job done, bsize:%d\n", + phba->mbox_ext_buf_ctx.nembType, + phba->mbox_ext_buf_ctx.mboxType, size); + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, + phba->mbox_ext_buf_ctx.nembType, + phba->mbox_ext_buf_ctx.mboxType, + dma_ebuf, sta_pos_addr, + phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2938 SLI_CONFIG ext-buffer mailbox " + "command (x%x/x%x) failure, rc:x%x\n", + phba->mbox_ext_buf_ctx.nembType, + phba->mbox_ext_buf_ctx.mboxType, rc); + } + + + /* state change */ + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; + kfree(dd_data); + return job; +} + +/** + * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox + * @phba: Pointer to HBA context object. + * @pmboxq: Pointer to mailbox command. + * + * This is completion handler function for mailbox read commands with multiple + * external buffers. + **/ +static void +lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) +{ + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; + + job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); + + /* handle the BSG job with mailbox command */ + if (!job) + pmboxq->u.mb.mbxStatus = MBXERR_ERROR; + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2939 SLI_CONFIG ext-buffer rd mailbox command " + "complete, ctxState:x%x, mbxStatus:x%x\n", + phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); + + if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) + lpfc_bsg_mbox_ext_session_reset(phba); + + /* free base driver mailbox structure memory */ + mempool_free(pmboxq, phba->mbox_mem_pool); + + /* if the job is still active, call job done */ + if (job) { + bsg_reply = job->reply; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + return; +} + +/** + * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox + * @phba: Pointer to HBA context object. + * @pmboxq: Pointer to mailbox command. + * + * This is completion handler function for mailbox write commands with multiple + * external buffers. + **/ +static void +lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) +{ + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; + + job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); + + /* handle the BSG job with the mailbox command */ + if (!job) + pmboxq->u.mb.mbxStatus = MBXERR_ERROR; + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2940 SLI_CONFIG ext-buffer wr mailbox command " + "complete, ctxState:x%x, mbxStatus:x%x\n", + phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); + + /* free all memory, including dma buffers */ + mempool_free(pmboxq, phba->mbox_mem_pool); + lpfc_bsg_mbox_ext_session_reset(phba); + + /* if the job is still active, call job done */ + if (job) { + bsg_reply = job->reply; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + + return; +} + +static void +lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, + uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, + struct lpfc_dmabuf *ext_dmabuf) +{ + struct lpfc_sli_config_mbox *sli_cfg_mbx; + + /* pointer to the start of mailbox command */ + sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; + + if (nemb_tp == nemb_mse) { + if (index == 0) { + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_hi = + putPaddrHigh(mbx_dmabuf->phys + + sizeof(MAILBOX_t)); + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_lo = + putPaddrLow(mbx_dmabuf->phys + + sizeof(MAILBOX_t)); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2943 SLI_CONFIG(mse)[%d], " + "bufLen:%d, addrHi:x%x, addrLo:x%x\n", + index, + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].buf_len, + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_hi, + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_lo); + } else { + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_hi = + putPaddrHigh(ext_dmabuf->phys); + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_lo = + putPaddrLow(ext_dmabuf->phys); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2944 SLI_CONFIG(mse)[%d], " + "bufLen:%d, addrHi:x%x, addrLo:x%x\n", + index, + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].buf_len, + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_hi, + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[index].pa_lo); + } + } else { + if (index == 0) { + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_hi = + putPaddrHigh(mbx_dmabuf->phys + + sizeof(MAILBOX_t)); + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_lo = + putPaddrLow(mbx_dmabuf->phys + + sizeof(MAILBOX_t)); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3007 SLI_CONFIG(hbd)[%d], " + "bufLen:%d, addrHi:x%x, addrLo:x%x\n", + index, + bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, + &sli_cfg_mbx->un. + sli_config_emb1_subsys.hbd[index]), + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_hi, + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_lo); + + } else { + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_hi = + putPaddrHigh(ext_dmabuf->phys); + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_lo = + putPaddrLow(ext_dmabuf->phys); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3008 SLI_CONFIG(hbd)[%d], " + "bufLen:%d, addrHi:x%x, addrLo:x%x\n", + index, + bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, + &sli_cfg_mbx->un. + sli_config_emb1_subsys.hbd[index]), + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_hi, + sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[index].pa_lo); + } + } + return; +} + +/** + * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @nemb_tp: Enumerate of non-embedded mailbox command type. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with + * non-embedded external buffers. + **/ +static int +lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, + enum nemb_type nemb_tp, + struct lpfc_dmabuf *dmabuf) +{ + struct fc_bsg_request *bsg_request = job->request; + struct lpfc_sli_config_mbox *sli_cfg_mbx; + struct dfc_mbox_req *mbox_req; + struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; + uint32_t ext_buf_cnt, ext_buf_index; + struct lpfc_dmabuf *ext_dmabuf = NULL; + struct bsg_job_data *dd_data = NULL; + LPFC_MBOXQ_t *pmboxq = NULL; + MAILBOX_t *pmb; + uint8_t *pmbx; + int rc, i; + + mbox_req = + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* pointer to the start of mailbox command */ + sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; + + if (nemb_tp == nemb_mse) { + ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, + &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); + if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2945 Handled SLI_CONFIG(mse) rd, " + "ext_buf_cnt(%d) out of range(%d)\n", + ext_buf_cnt, + LPFC_MBX_SLI_CONFIG_MAX_MSE); + rc = -ERANGE; + goto job_error; + } + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2941 Handled SLI_CONFIG(mse) rd, " + "ext_buf_cnt:%d\n", ext_buf_cnt); + } else { + /* sanity check on interface type for support */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2) { + rc = -ENODEV; + goto job_error; + } + /* nemb_tp == nemb_hbd */ + ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; + if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2946 Handled SLI_CONFIG(hbd) rd, " + "ext_buf_cnt(%d) out of range(%d)\n", + ext_buf_cnt, + LPFC_MBX_SLI_CONFIG_MAX_HBD); + rc = -ERANGE; + goto job_error; + } + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2942 Handled SLI_CONFIG(hbd) rd, " + "ext_buf_cnt:%d\n", ext_buf_cnt); + } + + /* before dma descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, + sta_pre_addr, dmabuf, ext_buf_cnt); + + /* reject non-embedded mailbox command with none external buffer */ + if (ext_buf_cnt == 0) { + rc = -EPERM; + goto job_error; + } else if (ext_buf_cnt > 1) { + /* additional external read buffers */ + for (i = 1; i < ext_buf_cnt; i++) { + ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); + if (!ext_dmabuf) { + rc = -ENOMEM; + goto job_error; + } + list_add_tail(&ext_dmabuf->list, + &phba->mbox_ext_buf_ctx.ext_dmabuf_list); + } + } + + /* bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + + /* mailbox command structure for base driver */ + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) { + rc = -ENOMEM; + goto job_error; + } + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + + /* for the first external buffer */ + lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); + + /* for the rest of external buffer descriptors if any */ + if (ext_buf_cnt > 1) { + ext_buf_index = 1; + list_for_each_entry_safe(curr_dmabuf, next_dmabuf, + &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { + lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, + ext_buf_index, dmabuf, + curr_dmabuf); + ext_buf_index++; + } + } + + /* after dma descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, + sta_pos_addr, dmabuf, ext_buf_cnt); + + /* construct base driver mbox command */ + pmb = &pmboxq->u.mb; + pmbx = (uint8_t *)dmabuf->virt; + memcpy(pmb, pmbx, sizeof(*pmb)); + pmb->mbxOwner = OWN_HOST; + pmboxq->vport = phba->pport; + + /* multi-buffer handling context */ + phba->mbox_ext_buf_ctx.nembType = nemb_tp; + phba->mbox_ext_buf_ctx.mboxType = mbox_rd; + phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; + phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; + phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; + phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; + + /* callback for multi-buffer read mailbox command */ + pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; + + /* context fields to callback function */ + pmboxq->ctx_buf = dd_data; + dd_data->type = TYPE_MBOX; + dd_data->set_job = job; + dd_data->context_un.mbox.pmboxq = pmboxq; + dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; + job->dd_data = dd_data; + + /* state change */ + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; + + /* + * Non-embedded mailbox subcommand data gets byte swapped here because + * the lower level driver code only does the first 64 mailbox words. + */ + if ((!bsg_bf_get(lpfc_mbox_hdr_emb, + &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && + (nemb_tp == nemb_mse)) + lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], + &pmbx[sizeof(MAILBOX_t)], + sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[0].buf_len); + + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2947 Issued SLI_CONFIG ext-buffer " + "mailbox command, rc:x%x\n", rc); + return SLI_CONFIG_HANDLED; + } + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2948 Failed to issue SLI_CONFIG ext-buffer " + "mailbox command, rc:x%x\n", rc); + rc = -EPIPE; + +job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); + lpfc_bsg_dma_page_list_free(phba, + &phba->mbox_ext_buf_ctx.ext_dmabuf_list); + kfree(dd_data); + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; + return rc; +} + +/** + * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @nemb_tp: Enumerate of non-embedded mailbox command type. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with + * non-embedded external buffers. + **/ +static int +lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, + enum nemb_type nemb_tp, + struct lpfc_dmabuf *dmabuf) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct dfc_mbox_req *mbox_req; + struct lpfc_sli_config_mbox *sli_cfg_mbx; + uint32_t ext_buf_cnt; + struct bsg_job_data *dd_data = NULL; + LPFC_MBOXQ_t *pmboxq = NULL; + MAILBOX_t *pmb; + uint8_t *mbx; + int rc = SLI_CONFIG_NOT_HANDLED, i; + + mbox_req = + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* pointer to the start of mailbox command */ + sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; + + if (nemb_tp == nemb_mse) { + ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, + &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); + if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2953 Failed SLI_CONFIG(mse) wr, " + "ext_buf_cnt(%d) out of range(%d)\n", + ext_buf_cnt, + LPFC_MBX_SLI_CONFIG_MAX_MSE); + return -ERANGE; + } + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2949 Handled SLI_CONFIG(mse) wr, " + "ext_buf_cnt:%d\n", ext_buf_cnt); + } else { + /* sanity check on interface type for support */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2) + return -ENODEV; + /* nemb_tp == nemb_hbd */ + ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; + if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2954 Failed SLI_CONFIG(hbd) wr, " + "ext_buf_cnt(%d) out of range(%d)\n", + ext_buf_cnt, + LPFC_MBX_SLI_CONFIG_MAX_HBD); + return -ERANGE; + } + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2950 Handled SLI_CONFIG(hbd) wr, " + "ext_buf_cnt:%d\n", ext_buf_cnt); + } + + /* before dma buffer descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, + sta_pre_addr, dmabuf, ext_buf_cnt); + + if (ext_buf_cnt == 0) + return -EPERM; + + /* for the first external buffer */ + lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); + + /* after dma descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, + sta_pos_addr, dmabuf, ext_buf_cnt); + + /* log for looking forward */ + for (i = 1; i < ext_buf_cnt; i++) { + if (nemb_tp == nemb_mse) + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", + i, sli_cfg_mbx->un.sli_config_emb0_subsys. + mse[i].buf_len); + else + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", + i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, + &sli_cfg_mbx->un.sli_config_emb1_subsys. + hbd[i])); + } + + /* multi-buffer handling context */ + phba->mbox_ext_buf_ctx.nembType = nemb_tp; + phba->mbox_ext_buf_ctx.mboxType = mbox_wr; + phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; + phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; + phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; + phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; + + if (ext_buf_cnt == 1) { + /* bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + + /* mailbox command structure for base driver */ + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) { + rc = -ENOMEM; + goto job_error; + } + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pmb = &pmboxq->u.mb; + mbx = (uint8_t *)dmabuf->virt; + memcpy(pmb, mbx, sizeof(*pmb)); + pmb->mbxOwner = OWN_HOST; + pmboxq->vport = phba->pport; + + /* callback for multi-buffer read mailbox command */ + pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; + + /* context fields to callback function */ + pmboxq->ctx_buf = dd_data; + dd_data->type = TYPE_MBOX; + dd_data->set_job = job; + dd_data->context_un.mbox.pmboxq = pmboxq; + dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; + job->dd_data = dd_data; + + /* state change */ + + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2955 Issued SLI_CONFIG ext-buffer " + "mailbox command, rc:x%x\n", rc); + return SLI_CONFIG_HANDLED; + } + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2956 Failed to issue SLI_CONFIG ext-buffer " + "mailbox command, rc:x%x\n", rc); + rc = -EPIPE; + goto job_error; + } + + /* wait for additional external buffers */ + + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return SLI_CONFIG_HANDLED; + +job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); + kfree(dd_data); + + return rc; +} + +/** + * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded + * external buffers, including both 0x9B with non-embedded MSEs and 0x9B + * with embedded subsystem 0x1 and opcodes with external HBDs. + **/ +static int +lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, + struct lpfc_dmabuf *dmabuf) +{ + struct lpfc_sli_config_mbox *sli_cfg_mbx; + uint32_t subsys; + uint32_t opcode; + int rc = SLI_CONFIG_NOT_HANDLED; + + /* state change on new multi-buffer pass-through mailbox command */ + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; + + sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; + + if (!bsg_bf_get(lpfc_mbox_hdr_emb, + &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { + subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, + &sli_cfg_mbx->un.sli_config_emb0_subsys); + opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, + &sli_cfg_mbx->un.sli_config_emb0_subsys); + if (subsys == SLI_CONFIG_SUBSYS_FCOE) { + switch (opcode) { + case FCOE_OPCODE_READ_FCF: + case FCOE_OPCODE_GET_DPORT_RESULTS: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2957 Handled SLI_CONFIG " + "subsys_fcoe, opcode:x%x\n", + opcode); + rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, + nemb_mse, dmabuf); + break; + case FCOE_OPCODE_ADD_FCF: + case FCOE_OPCODE_SET_DPORT_MODE: + case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2958 Handled SLI_CONFIG " + "subsys_fcoe, opcode:x%x\n", + opcode); + rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, + nemb_mse, dmabuf); + break; + default: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2959 Reject SLI_CONFIG " + "subsys_fcoe, opcode:x%x\n", + opcode); + rc = -EPERM; + break; + } + } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { + switch (opcode) { + case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: + case COMN_OPCODE_GET_CNTL_ATTRIBUTES: + case COMN_OPCODE_GET_PROFILE_CONFIG: + case COMN_OPCODE_SET_FEATURES: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3106 Handled SLI_CONFIG " + "subsys_comn, opcode:x%x\n", + opcode); + rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, + nemb_mse, dmabuf); + break; + default: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "3107 Reject SLI_CONFIG " + "subsys_comn, opcode:x%x\n", + opcode); + rc = -EPERM; + break; + } + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2977 Reject SLI_CONFIG " + "subsys:x%d, opcode:x%x\n", + subsys, opcode); + rc = -EPERM; + } + } else { + subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, + &sli_cfg_mbx->un.sli_config_emb1_subsys); + opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, + &sli_cfg_mbx->un.sli_config_emb1_subsys); + if (subsys == SLI_CONFIG_SUBSYS_COMN) { + switch (opcode) { + case COMN_OPCODE_READ_OBJECT: + case COMN_OPCODE_READ_OBJECT_LIST: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2960 Handled SLI_CONFIG " + "subsys_comn, opcode:x%x\n", + opcode); + rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, + nemb_hbd, dmabuf); + break; + case COMN_OPCODE_WRITE_OBJECT: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2961 Handled SLI_CONFIG " + "subsys_comn, opcode:x%x\n", + opcode); + rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, + nemb_hbd, dmabuf); + break; + default: + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2962 Not handled SLI_CONFIG " + "subsys_comn, opcode:x%x\n", + opcode); + rc = SLI_CONFIG_NOT_HANDLED; + break; + } + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2978 Not handled SLI_CONFIG " + "subsys:x%d, opcode:x%x\n", + subsys, opcode); + rc = SLI_CONFIG_NOT_HANDLED; + } + } + + /* state reset on not handled new multi-buffer mailbox command */ + if (rc != SLI_CONFIG_HANDLED) + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; + + return rc; +} + +/** + * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers + * @phba: Pointer to HBA context object. + * + * This routine is for requesting to abort a pass-through mailbox command with + * multiple external buffers due to error condition. + **/ +static void +lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) +{ + if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; + else + lpfc_bsg_mbox_ext_session_reset(phba); + return; +} + +/** + * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * + * This routine extracts the next mailbox read external buffer back to + * user space through BSG. + **/ +static int +lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) +{ + struct fc_bsg_reply *bsg_reply = job->reply; + struct lpfc_sli_config_mbox *sli_cfg_mbx; + struct lpfc_dmabuf *dmabuf; + uint8_t *pbuf; + uint32_t size; + uint32_t index; + + index = phba->mbox_ext_buf_ctx.seqNum; + phba->mbox_ext_buf_ctx.seqNum++; + + sli_cfg_mbx = (struct lpfc_sli_config_mbox *) + phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; + + if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { + size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, + &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2963 SLI_CONFIG (mse) ext-buffer rd get " + "buffer[%d], size:%d\n", index, size); + } else { + size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, + &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2964 SLI_CONFIG (hbd) ext-buffer rd get " + "buffer[%d], size:%d\n", index, size); + } + if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) + return -EPIPE; + dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, + struct lpfc_dmabuf, list); + list_del_init(&dmabuf->list); + + /* after dma buffer descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, + mbox_rd, dma_ebuf, sta_pos_addr, + dmabuf, index); + + pbuf = (uint8_t *)dmabuf->virt; + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + pbuf, size); + + lpfc_bsg_dma_page_free(phba, dmabuf); + + if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " + "command session done\n"); + lpfc_bsg_mbox_ext_session_reset(phba); + } + + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return SLI_CONFIG_HANDLED; +} + +/** + * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * This routine sets up the next mailbox read external buffer obtained + * from user space through BSG. + **/ +static int +lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, + struct lpfc_dmabuf *dmabuf) +{ + struct fc_bsg_reply *bsg_reply = job->reply; + struct bsg_job_data *dd_data = NULL; + LPFC_MBOXQ_t *pmboxq = NULL; + MAILBOX_t *pmb; + enum nemb_type nemb_tp; + uint8_t *pbuf; + uint32_t size; + uint32_t index; + int rc; + + index = phba->mbox_ext_buf_ctx.seqNum; + phba->mbox_ext_buf_ctx.seqNum++; + nemb_tp = phba->mbox_ext_buf_ctx.nembType; + + pbuf = (uint8_t *)dmabuf->virt; + size = job->request_payload.payload_len; + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + pbuf, size); + + if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2966 SLI_CONFIG (mse) ext-buffer wr set " + "buffer[%d], size:%d\n", + phba->mbox_ext_buf_ctx.seqNum, size); + + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2967 SLI_CONFIG (hbd) ext-buffer wr set " + "buffer[%d], size:%d\n", + phba->mbox_ext_buf_ctx.seqNum, size); + + } + + /* set up external buffer descriptor and add to external buffer list */ + lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, + phba->mbox_ext_buf_ctx.mbx_dmabuf, + dmabuf); + list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); + + /* after write dma buffer */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, + mbox_wr, dma_ebuf, sta_pos_addr, + dmabuf, index); + + if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2968 SLI_CONFIG ext-buffer wr all %d " + "ebuffers received\n", + phba->mbox_ext_buf_ctx.numBuf); + + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + + /* mailbox command structure for base driver */ + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) { + rc = -ENOMEM; + goto job_error; + } + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; + pmb = &pmboxq->u.mb; + memcpy(pmb, pbuf, sizeof(*pmb)); + pmb->mbxOwner = OWN_HOST; + pmboxq->vport = phba->pport; + + /* callback for multi-buffer write mailbox command */ + pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; + + /* context fields to callback function */ + pmboxq->ctx_buf = dd_data; + dd_data->type = TYPE_MBOX; + dd_data->set_job = job; + dd_data->context_un.mbox.pmboxq = pmboxq; + dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; + job->dd_data = dd_data; + + /* state change */ + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; + + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2969 Issued SLI_CONFIG ext-buffer " + "mailbox command, rc:x%x\n", rc); + return SLI_CONFIG_HANDLED; + } + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2970 Failed to issue SLI_CONFIG ext-buffer " + "mailbox command, rc:x%x\n", rc); + rc = -EPIPE; + goto job_error; + } + + /* wait for additional external buffers */ + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return SLI_CONFIG_HANDLED; + +job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); + lpfc_bsg_dma_page_free(phba, dmabuf); + kfree(dd_data); + + return rc; +} + +/** + * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox + * command with multiple non-embedded external buffers. + **/ +static int +lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, + struct lpfc_dmabuf *dmabuf) +{ + int rc; + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2971 SLI_CONFIG buffer (type:x%x)\n", + phba->mbox_ext_buf_ctx.mboxType); + + if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { + if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2972 SLI_CONFIG rd buffer state " + "mismatch:x%x\n", + phba->mbox_ext_buf_ctx.state); + lpfc_bsg_mbox_ext_abort(phba); + return -EPIPE; + } + rc = lpfc_bsg_read_ebuf_get(phba, job); + if (rc == SLI_CONFIG_HANDLED) + lpfc_bsg_dma_page_free(phba, dmabuf); + } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ + if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2973 SLI_CONFIG wr buffer state " + "mismatch:x%x\n", + phba->mbox_ext_buf_ctx.state); + lpfc_bsg_mbox_ext_abort(phba); + return -EPIPE; + } + rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); + } + return rc; +} + +/** + * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * This routine checks and handles non-embedded multi-buffer SLI_CONFIG + * (0x9B) mailbox commands and external buffers. + **/ +static int +lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, + struct lpfc_dmabuf *dmabuf) +{ + struct fc_bsg_request *bsg_request = job->request; + struct dfc_mbox_req *mbox_req; + int rc = SLI_CONFIG_NOT_HANDLED; + + mbox_req = + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* mbox command with/without single external buffer */ + if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) + return rc; + + /* mbox command and first external buffer */ + if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { + if (mbox_req->extSeqNum == 1) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2974 SLI_CONFIG mailbox: tag:%d, " + "seq:%d\n", mbox_req->extMboxTag, + mbox_req->extSeqNum); + rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); + return rc; + } else + goto sli_cfg_ext_error; + } + + /* + * handle additional external buffers + */ + + /* check broken pipe conditions */ + if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) + goto sli_cfg_ext_error; + if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) + goto sli_cfg_ext_error; + if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) + goto sli_cfg_ext_error; + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2975 SLI_CONFIG mailbox external buffer: " + "extSta:x%x, tag:%d, seq:%d\n", + phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, + mbox_req->extSeqNum); + rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); + return rc; + +sli_cfg_ext_error: + /* all other cases, broken pipe */ + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2976 SLI_CONFIG mailbox broken pipe: " + "ctxSta:x%x, ctxNumBuf:%d " + "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", + phba->mbox_ext_buf_ctx.state, + phba->mbox_ext_buf_ctx.numBuf, + phba->mbox_ext_buf_ctx.mbxTag, + phba->mbox_ext_buf_ctx.seqNum, + mbox_req->extMboxTag, mbox_req->extSeqNum); + + lpfc_bsg_mbox_ext_session_reset(phba); + + return -EPIPE; +} + +/** + * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app + * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. + * @vport: Pointer to a vport object. + * + * Allocate a tracking object, mailbox command memory, get a mailbox + * from the mailbox pool, copy the caller mailbox command. + * + * If offline and the sli is active we need to poll for the command (port is + * being reset) and complete the job, otherwise issue the mailbox command and + * let our completion handler finish the command. + **/ +static int +lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, + struct lpfc_vport *vport) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ + MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ + /* a 4k buffer to hold the mb and extended data from/to the bsg */ + uint8_t *pmbx = NULL; + struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ + struct lpfc_dmabuf *dmabuf = NULL; + struct dfc_mbox_req *mbox_req; + struct READ_EVENT_LOG_VAR *rdEventLog; + uint32_t transmit_length, receive_length, mode; + struct lpfc_mbx_sli4_config *sli4_config; + struct lpfc_mbx_nembed_cmd *nembed_sge; + struct ulp_bde64 *bde; + uint8_t *ext = NULL; + int rc = 0; + uint8_t *from; + uint32_t size; + + /* in case no data is transferred */ + bsg_reply->reply_payload_rcv_len = 0; + + /* sanity check to protect driver */ + if (job->reply_payload.payload_len > BSG_MBOX_SIZE || + job->request_payload.payload_len > BSG_MBOX_SIZE) { + rc = -ERANGE; + goto job_done; + } + + /* + * Don't allow mailbox commands to be sent when blocked or when in + * the middle of discovery + */ + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { + rc = -EAGAIN; + goto job_done; + } + + mbox_req = + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* check if requested extended data lengths are valid */ + if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || + (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { + rc = -ERANGE; + goto job_done; + } + + dmabuf = lpfc_bsg_dma_page_alloc(phba); + if (!dmabuf || !dmabuf->virt) { + rc = -ENOMEM; + goto job_done; + } + + /* Get the mailbox command or external buffer from BSG */ + pmbx = (uint8_t *)dmabuf->virt; + size = job->request_payload.payload_len; + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, pmbx, size); + + /* Handle possible SLI_CONFIG with non-embedded payloads */ + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); + if (rc == SLI_CONFIG_HANDLED) + goto job_cont; + if (rc) + goto job_done; + /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ + } + + rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); + if (rc != 0) + goto job_done; /* must be negative */ + + /* allocate our bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2727 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto job_done; + } + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) { + rc = -ENOMEM; + goto job_done; + } + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + + pmb = &pmboxq->u.mb; + memcpy(pmb, pmbx, sizeof(*pmb)); + pmb->mbxOwner = OWN_HOST; + pmboxq->vport = vport; + + /* If HBA encountered an error attention, allow only DUMP + * or RESTART mailbox commands until the HBA is restarted. + */ + if (phba->pport->stopped && + pmb->mbxCommand != MBX_DUMP_MEMORY && + pmb->mbxCommand != MBX_RESTART && + pmb->mbxCommand != MBX_WRITE_VPARMS && + pmb->mbxCommand != MBX_WRITE_WWN) + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "2797 mbox: Issued mailbox cmd " + "0x%x while in stopped state.\n", + pmb->mbxCommand); + + /* extended mailbox commands will need an extended buffer */ + if (mbox_req->inExtWLen || mbox_req->outExtWLen) { + from = pmbx; + ext = from + sizeof(MAILBOX_t); + pmboxq->ctx_buf = ext; + pmboxq->in_ext_byte_len = + mbox_req->inExtWLen * sizeof(uint32_t); + pmboxq->out_ext_byte_len = + mbox_req->outExtWLen * sizeof(uint32_t); + pmboxq->mbox_offset_word = mbox_req->mbOffset; + } + + /* biu diag will need a kernel buffer to transfer the data + * allocate our own buffer and setup the mailbox command to + * use ours + */ + if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { + transmit_length = pmb->un.varWords[1]; + receive_length = pmb->un.varWords[4]; + /* transmit length cannot be greater than receive length or + * mailbox extension size + */ + if ((transmit_length > receive_length) || + (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { + rc = -ERANGE; + goto job_done; + } + pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = + putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); + pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = + putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); + + pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = + putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) + + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); + pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = + putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) + + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); + } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { + rdEventLog = &pmb->un.varRdEventLog; + receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; + mode = bf_get(lpfc_event_log, rdEventLog); + + /* receive length cannot be greater than mailbox + * extension size + */ + if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { + rc = -ERANGE; + goto job_done; + } + + /* mode zero uses a bde like biu diags command */ + if (mode == 0) { + pmb->un.varWords[3] = putPaddrLow(dmabuf->phys + + sizeof(MAILBOX_t)); + pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys + + sizeof(MAILBOX_t)); + } + } else if (phba->sli_rev == LPFC_SLI_REV4) { + /* Let type 4 (well known data) through because the data is + * returned in varwords[4-8] + * otherwise check the recieve length and fetch the buffer addr + */ + if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && + (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { + /* rebuild the command for sli4 using our own buffers + * like we do for biu diags + */ + receive_length = pmb->un.varWords[2]; + /* receive length cannot be greater than mailbox + * extension size + */ + if (receive_length == 0) { + rc = -ERANGE; + goto job_done; + } + pmb->un.varWords[3] = putPaddrLow(dmabuf->phys + + sizeof(MAILBOX_t)); + pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys + + sizeof(MAILBOX_t)); + } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && + pmb->un.varUpdateCfg.co) { + bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; + + /* bde size cannot be greater than mailbox ext size */ + if (bde->tus.f.bdeSize > + BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { + rc = -ERANGE; + goto job_done; + } + bde->addrHigh = putPaddrHigh(dmabuf->phys + + sizeof(MAILBOX_t)); + bde->addrLow = putPaddrLow(dmabuf->phys + + sizeof(MAILBOX_t)); + } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { + /* Handling non-embedded SLI_CONFIG mailbox command */ + sli4_config = &pmboxq->u.mqe.un.sli4_config; + if (!bf_get(lpfc_mbox_hdr_emb, + &sli4_config->header.cfg_mhdr)) { + /* rebuild the command for sli4 using our + * own buffers like we do for biu diags + */ + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &pmb->un.varWords[0]; + receive_length = nembed_sge->sge[0].length; + + /* receive length cannot be greater than + * mailbox extension size + */ + if ((receive_length == 0) || + (receive_length > + BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { + rc = -ERANGE; + goto job_done; + } + + nembed_sge->sge[0].pa_hi = + putPaddrHigh(dmabuf->phys + + sizeof(MAILBOX_t)); + nembed_sge->sge[0].pa_lo = + putPaddrLow(dmabuf->phys + + sizeof(MAILBOX_t)); + } + } + } + + dd_data->context_un.mbox.dmabuffers = dmabuf; + + /* setup wake call as IOCB callback */ + pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; + + /* setup context field to pass wait_queue pointer to wake function */ + pmboxq->ctx_ndlp = dd_data; + dd_data->type = TYPE_MBOX; + dd_data->set_job = job; + dd_data->context_un.mbox.pmboxq = pmboxq; + dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; + dd_data->context_un.mbox.ext = ext; + dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; + dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; + dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; + job->dd_data = dd_data; + + if ((vport->fc_flag & FC_OFFLINE_MODE) || + (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; + goto job_done; + } + + /* job finished, copy the data */ + memcpy(pmbx, pmb, sizeof(*pmb)); + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + pmbx, size); + /* not waiting mbox already done */ + rc = 0; + goto job_done; + } + + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) + return 1; /* job started */ + +job_done: + /* common exit for error or job completed inline */ + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); + lpfc_bsg_dma_page_free(phba, dmabuf); + kfree(dd_data); + +job_cont: + return rc; +} + +/** + * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command + * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. + **/ +static int +lpfc_bsg_mbox_cmd(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct lpfc_hba *phba = vport->phba; + struct dfc_mbox_req *mbox_req; + int rc = 0; + + /* mix-and-match backward compatibility */ + bsg_reply->reply_payload_rcv_len = 0; + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2737 Mix-and-match backward compatibility " + "between MBOX_REQ old size:%d and " + "new request size:%d\n", + (int)(job->request_len - + sizeof(struct fc_bsg_request)), + (int)sizeof(struct dfc_mbox_req)); + mbox_req = (struct dfc_mbox_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + mbox_req->extMboxTag = 0; + mbox_req->extSeqNum = 0; + } + + rc = lpfc_bsg_issue_mbox(phba, job, vport); + + if (rc == 0) { + /* job done */ + bsg_reply->result = 0; + job->dd_data = NULL; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } else if (rc == 1) + /* job submitted, will complete later*/ + rc = 0; /* return zero, no error */ + else { + /* some error occurred */ + bsg_reply->result = rc; + job->dd_data = NULL; + } + + return rc; +} + +static int +lpfc_forced_link_speed(struct bsg_job *job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_reply *bsg_reply = job->reply; + struct forced_link_speed_support_reply *forced_reply; + int rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct get_forced_link_speed_support)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "0048 Received FORCED_LINK_SPEED request " + "below minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + forced_reply = (struct forced_link_speed_support_reply *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "0049 Received FORCED_LINK_SPEED reply below " + "minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) + ? LPFC_FORCED_LINK_SPEED_SUPPORTED + : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; +job_error: + bsg_reply->result = rc; + if (rc == 0) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfc_check_fwlog_support: Check FW log support on the adapter + * @phba: Pointer to HBA context object. + * + * Check if FW Logging support by the adapter + **/ +int +lpfc_check_fwlog_support(struct lpfc_hba *phba) +{ + struct lpfc_ras_fwlog *ras_fwlog = NULL; + + ras_fwlog = &phba->ras_fwlog; + + if (!ras_fwlog->ras_hwsupport) + return -EACCES; + else if (!ras_fwlog->ras_enabled) + return -EPERM; + else + return 0; +} + +/** + * lpfc_bsg_get_ras_config: Get RAS configuration settings + * @job: fc_bsg_job to handle + * + * Get RAS configuration values set. + **/ +static int +lpfc_bsg_get_ras_config(struct bsg_job *job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct lpfc_vport *vport = shost_priv(shost); + struct fc_bsg_reply *bsg_reply = job->reply; + struct lpfc_hba *phba = vport->phba; + struct lpfc_bsg_get_ras_config_reply *ras_reply; + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + int rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct lpfc_bsg_ras_req)) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "6192 FW_LOG request received " + "below minimum size\n"); + rc = -EINVAL; + goto ras_job_error; + } + + /* Check FW log status */ + rc = lpfc_check_fwlog_support(phba); + if (rc) + goto ras_job_error; + + ras_reply = (struct lpfc_bsg_get_ras_config_reply *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + + /* Current logging state */ + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state == ACTIVE) + ras_reply->state = LPFC_RASLOG_STATE_RUNNING; + else + ras_reply->state = LPFC_RASLOG_STATE_STOPPED; + spin_unlock_irq(&phba->hbalock); + + ras_reply->log_level = phba->ras_fwlog.fw_loglevel; + ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; + +ras_job_error: + /* make error code available to userspace */ + bsg_reply->result = rc; + + /* complete the job back to userspace */ + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + +/** + * lpfc_bsg_set_ras_config: Set FW logging parameters + * @job: fc_bsg_job to handle + * + * Set log-level parameters for FW-logging in host memory + **/ +static int +lpfc_bsg_set_ras_config(struct bsg_job *job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + struct lpfc_bsg_set_ras_config_req *ras_req; + struct fc_bsg_request *bsg_request = job->request; + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + struct fc_bsg_reply *bsg_reply = job->reply; + uint8_t action = 0, log_level = 0; + int rc = 0, action_status = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct lpfc_bsg_set_ras_config_req)) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "6182 Received RAS_LOG request " + "below minimum size\n"); + rc = -EINVAL; + goto ras_job_error; + } + + /* Check FW log status */ + rc = lpfc_check_fwlog_support(phba); + if (rc) + goto ras_job_error; + + ras_req = (struct lpfc_bsg_set_ras_config_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + action = ras_req->action; + log_level = ras_req->log_level; + + if (action == LPFC_RASACTION_STOP_LOGGING) { + /* Check if already disabled */ + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); + rc = -ESRCH; + goto ras_job_error; + } + spin_unlock_irq(&phba->hbalock); + + /* Disable logging */ + lpfc_ras_stop_fwlog(phba); + } else { + /*action = LPFC_RASACTION_START_LOGGING*/ + + /* Even though FW-logging is active re-initialize + * FW-logging with new log-level. Return status + * "Logging already Running" to caller. + **/ + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state != INACTIVE) + action_status = -EINPROGRESS; + spin_unlock_irq(&phba->hbalock); + + /* Enable logging */ + rc = lpfc_sli4_ras_fwlog_init(phba, log_level, + LPFC_RAS_ENABLE_LOGGING); + if (rc) { + rc = -EINVAL; + goto ras_job_error; + } + + /* Check if FW-logging is re-initialized */ + if (action_status == -EINPROGRESS) + rc = action_status; + } +ras_job_error: + /* make error code available to userspace */ + bsg_reply->result = rc; + + /* complete the job back to userspace */ + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return rc; +} + +/** + * lpfc_bsg_get_ras_lwpd: Get log write position data + * @job: fc_bsg_job to handle + * + * Get Offset/Wrap count of the log message written + * in host memory + **/ +static int +lpfc_bsg_get_ras_lwpd(struct bsg_job *job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_bsg_get_ras_lwpd *ras_reply; + struct lpfc_hba *phba = vport->phba; + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + struct fc_bsg_reply *bsg_reply = job->reply; + u32 *lwpd_ptr = NULL; + int rc = 0; + + rc = lpfc_check_fwlog_support(phba); + if (rc) + goto ras_job_error; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct lpfc_bsg_ras_req)) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "6183 Received RAS_LOG request " + "below minimum size\n"); + rc = -EINVAL; + goto ras_job_error; + } + + ras_reply = (struct lpfc_bsg_get_ras_lwpd *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + + if (!ras_fwlog->lwpd.virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "6193 Restart FW Logging\n"); + rc = -EINVAL; + goto ras_job_error; + } + + /* Get lwpd offset */ + lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt); + ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff); + + /* Get wrap count */ + ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff); + +ras_job_error: + /* make error code available to userspace */ + bsg_reply->result = rc; + + /* complete the job back to userspace */ + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return rc; +} + +/** + * lpfc_bsg_get_ras_fwlog: Read FW log + * @job: fc_bsg_job to handle + * + * Copy the FW log into the passed buffer. + **/ +static int +lpfc_bsg_get_ras_fwlog(struct bsg_job *job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct lpfc_bsg_get_fwlog_req *ras_req; + u32 rd_offset, rd_index, offset; + void *src, *fwlog_buff; + struct lpfc_ras_fwlog *ras_fwlog = NULL; + struct lpfc_dmabuf *dmabuf, *next; + int rc = 0; + + ras_fwlog = &phba->ras_fwlog; + + rc = lpfc_check_fwlog_support(phba); + if (rc) + goto ras_job_error; + + /* Logging to be stopped before reading */ + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state == ACTIVE) { + spin_unlock_irq(&phba->hbalock); + rc = -EINPROGRESS; + goto ras_job_error; + } + spin_unlock_irq(&phba->hbalock); + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct lpfc_bsg_get_fwlog_req)) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "6184 Received RAS_LOG request " + "below minimum size\n"); + rc = -EINVAL; + goto ras_job_error; + } + + ras_req = (struct lpfc_bsg_get_fwlog_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + rd_offset = ras_req->read_offset; + + /* Allocate memory to read fw log*/ + fwlog_buff = vmalloc(ras_req->read_size); + if (!fwlog_buff) { + rc = -ENOMEM; + goto ras_job_error; + } + + rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE); + offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE); + + list_for_each_entry_safe(dmabuf, next, + &ras_fwlog->fwlog_buff_list, list) { + + if (dmabuf->buffer_tag < rd_index) + continue; + + src = dmabuf->virt + offset; + memcpy(fwlog_buff, src, ras_req->read_size); + break; + } + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + fwlog_buff, ras_req->read_size); + + vfree(fwlog_buff); + +ras_job_error: + bsg_reply->result = rc; + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return rc; +} + +static int +lpfc_get_trunk_info(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_reply *bsg_reply = job->reply; + struct lpfc_trunk_info *event_reply; + int rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) { + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2744 Received GET TRUNK _INFO request below " + "minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + event_reply = (struct lpfc_trunk_info *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2728 Received GET TRUNK _INFO reply below " + "minimum size\n"); + rc = -EINVAL; + goto job_error; + } + if (event_reply == NULL) { + rc = -EINVAL; + goto job_error; + } + + bsg_bf_set(lpfc_trunk_info_link_status, event_reply, + (phba->link_state >= LPFC_LINK_UP) ? 1 : 0); + + bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply, + (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0); + + bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply, + (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0); + + bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply, + (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0); + + bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply, + (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0); + + bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply, + bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)); + + bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply, + bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)); + + bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply, + bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)); + + bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply, + bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)); + + event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000; + event_reply->logical_speed = + phba->sli4_hba.link_state.logical_speed / 1000; +job_error: + bsg_reply->result = rc; + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; + +} + +static int +lpfc_get_cgnbuf_info(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct get_cgnbuf_info_req *cgnbuf_req; + struct lpfc_cgn_info *cp; + uint8_t *cgn_buff; + int size, cinfosz; + int rc = 0; + + if (job->request_len < sizeof(struct fc_bsg_request) + + sizeof(struct get_cgnbuf_info_req)) { + rc = -ENOMEM; + goto job_exit; + } + + if (!phba->sli4_hba.pc_sli4_params.cmf) { + rc = -ENOENT; + goto job_exit; + } + + if (!phba->cgn_i || !phba->cgn_i->virt) { + rc = -ENOENT; + goto job_exit; + } + + cp = phba->cgn_i->virt; + if (cp->cgn_info_version < LPFC_CGN_INFO_V3) { + rc = -EPERM; + goto job_exit; + } + + cgnbuf_req = (struct get_cgnbuf_info_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* For reset or size == 0 */ + bsg_reply->reply_payload_rcv_len = 0; + + if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) { + lpfc_init_congestion_stat(phba); + goto job_exit; + } + + /* We don't want to include the CRC at the end */ + cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t); + + size = cgnbuf_req->read_size; + if (!size) + goto job_exit; + + if (size < cinfosz) { + /* Just copy back what we can */ + cinfosz = size; + rc = -E2BIG; + } + + /* Allocate memory to read congestion info */ + cgn_buff = vmalloc(cinfosz); + if (!cgn_buff) { + rc = -ENOMEM; + goto job_exit; + } + + memcpy(cgn_buff, cp, cinfosz); + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + cgn_buff, cinfosz); + + vfree(cgn_buff); + +job_exit: + bsg_reply->result = rc; + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + else + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2724 GET CGNBUF error: %d\n", rc); + return rc; +} + +/** + * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job + * @job: fc_bsg_job to handle + **/ +static int +lpfc_bsg_hst_vendor(struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; + int rc; + + switch (command) { + case LPFC_BSG_VENDOR_SET_CT_EVENT: + rc = lpfc_bsg_hba_set_event(job); + break; + case LPFC_BSG_VENDOR_GET_CT_EVENT: + rc = lpfc_bsg_hba_get_event(job); + break; + case LPFC_BSG_VENDOR_SEND_MGMT_RESP: + rc = lpfc_bsg_send_mgmt_rsp(job); + break; + case LPFC_BSG_VENDOR_DIAG_MODE: + rc = lpfc_bsg_diag_loopback_mode(job); + break; + case LPFC_BSG_VENDOR_DIAG_MODE_END: + rc = lpfc_sli4_bsg_diag_mode_end(job); + break; + case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: + rc = lpfc_bsg_diag_loopback_run(job); + break; + case LPFC_BSG_VENDOR_LINK_DIAG_TEST: + rc = lpfc_sli4_bsg_link_diag_test(job); + break; + case LPFC_BSG_VENDOR_GET_MGMT_REV: + rc = lpfc_bsg_get_dfc_rev(job); + break; + case LPFC_BSG_VENDOR_MBOX: + rc = lpfc_bsg_mbox_cmd(job); + break; + case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: + rc = lpfc_forced_link_speed(job); + break; + case LPFC_BSG_VENDOR_RAS_GET_LWPD: + rc = lpfc_bsg_get_ras_lwpd(job); + break; + case LPFC_BSG_VENDOR_RAS_GET_FWLOG: + rc = lpfc_bsg_get_ras_fwlog(job); + break; + case LPFC_BSG_VENDOR_RAS_GET_CONFIG: + rc = lpfc_bsg_get_ras_config(job); + break; + case LPFC_BSG_VENDOR_RAS_SET_CONFIG: + rc = lpfc_bsg_set_ras_config(job); + break; + case LPFC_BSG_VENDOR_GET_TRUNK_INFO: + rc = lpfc_get_trunk_info(job); + break; + case LPFC_BSG_VENDOR_GET_CGNBUF_INFO: + rc = lpfc_get_cgnbuf_info(job); + break; + default: + rc = -EINVAL; + bsg_reply->reply_payload_rcv_len = 0; + /* make error code available to userspace */ + bsg_reply->result = rc; + break; + } + + return rc; +} + +/** + * lpfc_bsg_request - handle a bsg request from the FC transport + * @job: bsg_job to handle + **/ +int +lpfc_bsg_request(struct bsg_job *job) +{ + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + uint32_t msgcode; + int rc; + + msgcode = bsg_request->msgcode; + switch (msgcode) { + case FC_BSG_HST_VENDOR: + rc = lpfc_bsg_hst_vendor(job); + break; + case FC_BSG_RPT_ELS: + rc = lpfc_bsg_rport_els(job); + break; + case FC_BSG_RPT_CT: + rc = lpfc_bsg_send_mgmt_cmd(job); + break; + default: + rc = -EINVAL; + bsg_reply->reply_payload_rcv_len = 0; + /* make error code available to userspace */ + bsg_reply->result = rc; + break; + } + + return rc; +} + +/** + * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport + * @job: bsg_job that has timed out + * + * This function just aborts the job's IOCB. The aborted IOCB will return to + * the waiting function which will handle passing the error back to userspace + **/ +int +lpfc_bsg_timeout(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb; + struct lpfc_sli_ring *pring; + struct bsg_job_data *dd_data; + unsigned long flags; + int rc = 0; + LIST_HEAD(completions); + struct lpfc_iocbq *check_iocb, *next_iocb; + + pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return -EIO; + + /* if job's driver data is NULL, the command completed or is in the + * the process of completing. In this case, return status to request + * so the timeout is retried. This avoids double completion issues + * and the request will be pulled off the timer queue when the + * command's completion handler executes. Otherwise, prevent the + * command's completion handler from executing the job done callback + * and continue processing to abort the outstanding the command. + */ + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + dd_data = (struct bsg_job_data *)job->dd_data; + if (dd_data) { + dd_data->set_job = NULL; + job->dd_data = NULL; + } else { + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return -EAGAIN; + } + + switch (dd_data->type) { + case TYPE_IOCB: + /* Check to see if IOCB was issued to the port or not. If not, + * remove it from the txq queue and call cancel iocbs. + * Otherwise, call abort iotag + */ + cmdiocb = dd_data->context_un.iocb.cmdiocbq; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O abort window is still open */ + if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return -EAGAIN; + } + list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, + list) { + if (check_iocb == cmdiocb) { + list_move_tail(&check_iocb->list, &completions); + break; + } + } + if (list_empty(&completions)) + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (!list_empty(&completions)) { + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + } + break; + + case TYPE_EVT: + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + break; + + case TYPE_MBOX: + /* Update the ext buf ctx state if needed */ + + if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + break; + default: + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + break; + } + + /* scsi transport fc fc_bsg_job_timeout expects a zero return code, + * otherwise an error message will be displayed on the console + * so always return success (zero) + */ + return rc; +} diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h new file mode 100644 index 000000000..3c04ca2d7 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -0,0 +1,387 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2010-2015 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ +/* bsg definitions + * No pointers to user data are allowed, all application buffers and sizes will + * derived through the bsg interface. + * + * These are the vendor unique structures passed in using the bsg + * FC_BSG_HST_VENDOR message code type. + */ +#define LPFC_BSG_VENDOR_SET_CT_EVENT 1 +#define LPFC_BSG_VENDOR_GET_CT_EVENT 2 +#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3 +#define LPFC_BSG_VENDOR_DIAG_MODE 4 +#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5 +#define LPFC_BSG_VENDOR_GET_MGMT_REV 6 +#define LPFC_BSG_VENDOR_MBOX 7 +#define LPFC_BSG_VENDOR_DIAG_MODE_END 10 +#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11 +#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14 +#define LPFC_BSG_VENDOR_RAS_GET_LWPD 16 +#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17 +#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18 +#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19 +#define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20 +#define LPFC_BSG_VENDOR_GET_CGNBUF_INFO 21 + +struct set_ct_event { + uint32_t command; + uint32_t type_mask; + uint32_t ev_req_id; + uint32_t ev_reg_id; +}; + +struct get_ct_event { + uint32_t command; + uint32_t ev_reg_id; + uint32_t ev_req_id; +}; + +struct get_ct_event_reply { + uint32_t immed_data; + uint32_t type; +}; + +struct send_mgmt_resp { + uint32_t command; + uint32_t tag; +}; + + +#define DISABLE_LOOP_BACK 0x0 /* disables loop back */ +#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */ +#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */ + +struct diag_mode_set { + uint32_t command; + uint32_t type; + uint32_t timeout; + uint32_t physical_link; +}; + +struct sli4_link_diag { + uint32_t command; + uint32_t timeout; + uint32_t test_id; + uint32_t loops; + uint32_t test_version; + uint32_t error_action; +}; + +struct diag_mode_test { + uint32_t command; +}; + +struct diag_status { + uint32_t mbox_status; + uint32_t shdr_status; + uint32_t shdr_add_status; +}; + +#define LPFC_WWNN_TYPE 0 +#define LPFC_WWPN_TYPE 1 + +struct get_mgmt_rev { + uint32_t command; +}; + +#define MANAGEMENT_MAJOR_REV 1 +#define MANAGEMENT_MINOR_REV 1 + +/* the MgmtRevInfo structure */ +struct MgmtRevInfo { + uint32_t a_Major; + uint32_t a_Minor; +}; + +struct get_mgmt_rev_reply { + struct MgmtRevInfo info; +}; + +#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */ + +/* BSG mailbox request header */ +struct dfc_mbox_req { + uint32_t command; + uint32_t mbOffset; + uint32_t inExtWLen; + uint32_t outExtWLen; + uint32_t extMboxTag; + uint32_t extSeqNum; +}; + +/* + * macros and data structures for handling sli-config mailbox command + * pass-through support, this header file is shared between user and + * kernel spaces, note the set of macros are duplicates from lpfc_hw4.h, + * with macro names prefixed with bsg_, as the macros defined in + * lpfc_hw4.h are not accessible from user space. + */ + +/* Macros to deal with bit fields. Each bit field must have 3 #defines + * associated with it (_SHIFT, _MASK, and _WORD). + * EG. For a bit field that is in the 7th bit of the "field4" field of a + * structure and is 2 bits in size the following #defines must exist: + * struct temp { + * uint32_t field1; + * uint32_t field2; + * uint32_t field3; + * uint32_t field4; + * #define example_bit_field_SHIFT 7 + * #define example_bit_field_MASK 0x03 + * #define example_bit_field_WORD field4 + * uint32_t field5; + * }; + * Then the macros below may be used to get or set the value of that field. + * EG. To get the value of the bit field from the above example: + * struct temp t1; + * value = bsg_bf_get(example_bit_field, &t1); + * And then to set that bit field: + * bsg_bf_set(example_bit_field, &t1, 2); + * Or clear that bit field: + * bsg_bf_set(example_bit_field, &t1, 0); + */ +#define bsg_bf_get_le32(name, ptr) \ + ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) +#define bsg_bf_get(name, ptr) \ + (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) +#define bsg_bf_set_le32(name, ptr, value) \ + ((ptr)->name##_WORD = cpu_to_le32(((((value) & \ + name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \ + ~(name##_MASK << name##_SHIFT))))) +#define bsg_bf_set(name, ptr, value) \ + ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ + ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) + +/* + * The sli_config structure specified here is based on the following + * restriction: + * + * -- SLI_CONFIG EMB=0, carrying MSEs, will carry subcommands without + * carrying HBD. + * -- SLI_CONFIG EMB=1, not carrying MSE, will carry subcommands with or + * without carrying HBDs. + */ + +struct lpfc_sli_config_mse { + uint32_t pa_lo; + uint32_t pa_hi; + uint32_t buf_len; +#define lpfc_mbox_sli_config_mse_len_SHIFT 0 +#define lpfc_mbox_sli_config_mse_len_MASK 0xffffff +#define lpfc_mbox_sli_config_mse_len_WORD buf_len +}; + +struct lpfc_sli_config_hbd { + uint32_t buf_len; +#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0 +#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff +#define lpfc_mbox_sli_config_ecmn_hbd_len_WORD buf_len + uint32_t pa_lo; + uint32_t pa_hi; +}; + +struct lpfc_sli_config_hdr { + uint32_t word1; +#define lpfc_mbox_hdr_emb_SHIFT 0 +#define lpfc_mbox_hdr_emb_MASK 0x00000001 +#define lpfc_mbox_hdr_emb_WORD word1 +#define lpfc_mbox_hdr_mse_cnt_SHIFT 3 +#define lpfc_mbox_hdr_mse_cnt_MASK 0x0000001f +#define lpfc_mbox_hdr_mse_cnt_WORD word1 + uint32_t payload_length; + uint32_t tag_lo; + uint32_t tag_hi; + uint32_t reserved5; +}; + +#define LPFC_CSF_BOOT_DEV 0x1D +#define LPFC_CSF_QUERY 0 +#define LPFC_CSF_SAVE 1 + +struct lpfc_sli_config_emb0_subsys { + struct lpfc_sli_config_hdr sli_config_hdr; +#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19 + struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE]; + uint32_t padding; + uint32_t word64; +#define lpfc_emb0_subcmnd_opcode_SHIFT 0 +#define lpfc_emb0_subcmnd_opcode_MASK 0xff +#define lpfc_emb0_subcmnd_opcode_WORD word64 +#define lpfc_emb0_subcmnd_subsys_SHIFT 8 +#define lpfc_emb0_subcmnd_subsys_MASK 0xff +#define lpfc_emb0_subcmnd_subsys_WORD word64 +/* Subsystem FCOE (0x0C) OpCodes */ +#define SLI_CONFIG_SUBSYS_FCOE 0x0C +#define FCOE_OPCODE_READ_FCF 0x08 +#define FCOE_OPCODE_ADD_FCF 0x09 +#define FCOE_OPCODE_SET_DPORT_MODE 0x27 +#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28 + uint32_t timeout; /* comn_set_feature timeout */ + uint32_t request_length; /* comn_set_feature request len */ + uint32_t version; /* comn_set_feature version */ + uint32_t csf_feature; /* comn_set_feature feature */ + uint32_t word69; /* comn_set_feature parameter len */ + uint32_t word70; /* comn_set_feature parameter val0 */ +#define lpfc_emb0_subcmnd_csf_p0_SHIFT 0 +#define lpfc_emb0_subcmnd_csf_p0_MASK 0x3 +#define lpfc_emb0_subcmnd_csf_p0_WORD word70 +}; + +struct lpfc_sli_config_emb1_subsys { + struct lpfc_sli_config_hdr sli_config_hdr; + uint32_t word6; +#define lpfc_emb1_subcmnd_opcode_SHIFT 0 +#define lpfc_emb1_subcmnd_opcode_MASK 0xff +#define lpfc_emb1_subcmnd_opcode_WORD word6 +#define lpfc_emb1_subcmnd_subsys_SHIFT 8 +#define lpfc_emb1_subcmnd_subsys_MASK 0xff +#define lpfc_emb1_subcmnd_subsys_WORD word6 +/* Subsystem COMN (0x01) OpCodes */ +#define SLI_CONFIG_SUBSYS_COMN 0x01 +#define COMN_OPCODE_GET_PROFILE_CONFIG 0xA4 +#define COMN_OPCODE_READ_OBJECT 0xAB +#define COMN_OPCODE_WRITE_OBJECT 0xAC +#define COMN_OPCODE_READ_OBJECT_LIST 0xAD +#define COMN_OPCODE_DELETE_OBJECT 0xAE +#define COMN_OPCODE_SET_FEATURES 0xBF +#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79 +#define COMN_OPCODE_GET_CNTL_ATTRIBUTES 0x20 + uint32_t timeout; + uint32_t request_length; + uint32_t word9; +#define lpfc_subcmnd_version_SHIFT 0 +#define lpfc_subcmnd_version_MASK 0xff +#define lpfc_subcmnd_version_WORD word9 + uint32_t word10; +#define lpfc_subcmnd_ask_rd_len_SHIFT 0 +#define lpfc_subcmnd_ask_rd_len_MASK 0xffffff +#define lpfc_subcmnd_ask_rd_len_WORD word10 + uint32_t rd_offset; + uint32_t obj_name[26]; + uint32_t hbd_count; +#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8 + struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD]; +}; + +struct lpfc_sli_config_mbox { + uint32_t word0; +#define lpfc_mqe_status_SHIFT 16 +#define lpfc_mqe_status_MASK 0x0000FFFF +#define lpfc_mqe_status_WORD word0 +#define lpfc_mqe_command_SHIFT 8 +#define lpfc_mqe_command_MASK 0x000000FF +#define lpfc_mqe_command_WORD word0 + union { + struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys; + struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys; + } un; +}; + +#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0 +#define LPFC_FORCED_LINK_SPEED_SUPPORTED 1 +struct get_forced_link_speed_support { + uint32_t command; +}; +struct forced_link_speed_support_reply { + uint8_t supported; +}; + +struct lpfc_bsg_ras_req { + uint32_t command; +}; + +struct lpfc_bsg_get_fwlog_req { + uint32_t command; + uint32_t read_size; + uint32_t read_offset; +}; + +struct lpfc_bsg_get_ras_lwpd { + uint32_t offset; + uint32_t wrap_count; +}; + +struct lpfc_bsg_set_ras_config_req { + uint32_t command; + uint8_t action; +#define LPFC_RASACTION_STOP_LOGGING 0x00 +#define LPFC_RASACTION_START_LOGGING 0x01 + uint8_t log_level; +}; + +struct lpfc_bsg_get_ras_config_reply { + uint8_t state; +#define LPFC_RASLOG_STATE_STOPPED 0x00 +#define LPFC_RASLOG_STATE_RUNNING 0x01 + uint8_t log_level; + uint32_t log_buff_sz; +}; + +struct lpfc_trunk_info { + uint32_t word0; +#define lpfc_trunk_info_link_status_SHIFT 0 +#define lpfc_trunk_info_link_status_MASK 1 +#define lpfc_trunk_info_link_status_WORD word0 +#define lpfc_trunk_info_trunk_active0_SHIFT 8 +#define lpfc_trunk_info_trunk_active0_MASK 1 +#define lpfc_trunk_info_trunk_active0_WORD word0 +#define lpfc_trunk_info_trunk_active1_SHIFT 9 +#define lpfc_trunk_info_trunk_active1_MASK 1 +#define lpfc_trunk_info_trunk_active1_WORD word0 +#define lpfc_trunk_info_trunk_active2_SHIFT 10 +#define lpfc_trunk_info_trunk_active2_MASK 1 +#define lpfc_trunk_info_trunk_active2_WORD word0 +#define lpfc_trunk_info_trunk_active3_SHIFT 11 +#define lpfc_trunk_info_trunk_active3_MASK 1 +#define lpfc_trunk_info_trunk_active3_WORD word0 +#define lpfc_trunk_info_trunk_config0_SHIFT 12 +#define lpfc_trunk_info_trunk_config0_MASK 1 +#define lpfc_trunk_info_trunk_config0_WORD word0 +#define lpfc_trunk_info_trunk_config1_SHIFT 13 +#define lpfc_trunk_info_trunk_config1_MASK 1 +#define lpfc_trunk_info_trunk_config1_WORD word0 +#define lpfc_trunk_info_trunk_config2_SHIFT 14 +#define lpfc_trunk_info_trunk_config2_MASK 1 +#define lpfc_trunk_info_trunk_config2_WORD word0 +#define lpfc_trunk_info_trunk_config3_SHIFT 15 +#define lpfc_trunk_info_trunk_config3_MASK 1 +#define lpfc_trunk_info_trunk_config3_WORD word0 + uint16_t port_speed; + uint16_t logical_speed; + uint32_t reserved3; +}; + +struct get_trunk_info_req { + uint32_t command; +}; + +struct get_cgnbuf_info_req { + uint32_t command; + uint32_t read_size; + uint32_t reset; +#define LPFC_BSG_CGN_RESET_STAT 1 +}; + +/* driver only */ +#define SLI_CONFIG_NOT_HANDLED 0 +#define SLI_CONFIG_HANDLED 1 diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h new file mode 100644 index 000000000..43cf46a3a --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_compat.h @@ -0,0 +1,98 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +/* + * This file provides macros to aid compilation in the Linux 2.4 kernel + * over various platform architectures. + */ + +/******************************************************************* +Note: HBA's SLI memory contains little-endian LW. +Thus to access it from a little-endian host, +memcpy_toio() and memcpy_fromio() can be used. +However on a big-endian host, copy 4 bytes at a time, +using writel() and readl(). + *******************************************************************/ +#include + +#ifdef __BIG_ENDIAN + +static inline void +lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes) +{ + uint32_t __iomem *dest32; + uint32_t *src32; + unsigned int four_bytes; + + + dest32 = (uint32_t __iomem *) dest; + src32 = (uint32_t *) src; + + /* write input bytes, 4 bytes at a time */ + for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) { + writel( *src32, dest32); + readl(dest32); /* flush */ + dest32++; + src32++; + } + + return; +} + +static inline void +lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes) +{ + uint32_t *dest32; + uint32_t __iomem *src32; + unsigned int four_bytes; + + + dest32 = (uint32_t *) dest; + src32 = (uint32_t __iomem *) src; + + /* read input bytes, 4 bytes at a time */ + for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) { + *dest32 = readl( src32); + dest32++; + src32++; + } + + return; +} + +#else + +static inline void +lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) +{ + /* convert bytes in argument list to word count for copy function */ + __iowrite32_copy(dest, src, bytes / sizeof(uint32_t)); +} + +static inline void +lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes) +{ + /* actually returns 1 byte past dest */ + memcpy_fromio( dest, src, bytes); +} + +#endif /* __BIG_ENDIAN */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h new file mode 100644 index 000000000..d4e46a08f --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -0,0 +1,694 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +typedef int (*node_filter)(struct lpfc_nodelist *, void *); + +struct fc_rport; +struct fc_frame_header; +void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_sli_read_link_ste(struct lpfc_hba *); +void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); +void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); +int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); +int lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox); +void lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, + enum lpfc_mbox_ctx locked); +void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *); +void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *); +void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); +void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, + LPFC_MBOXQ_t *, uint16_t); +void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); +void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *); + +void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); +void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, + struct lpfc_nodelist *); +void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); +void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); +void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); +int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *, + uint16_t, uint16_t, bool); +int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_reg_congestion_buf(struct lpfc_hba *phba); +int lpfc_unreg_congestion_buf(struct lpfc_hba *phba); +struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); +void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); +void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); +void lpfc_cleanup_rpis(struct lpfc_vport *, int); +void lpfc_cleanup_pending_mbox(struct lpfc_vport *); +int lpfc_linkdown(struct lpfc_hba *); +void lpfc_linkdown_port(struct lpfc_vport *); +void lpfc_port_link_failure(struct lpfc_vport *); +void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); +void lpfc_retry_pport_discovery(struct lpfc_hba *); +int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt); +void lpfc_free_iocb_list(struct lpfc_hba *phba); +int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, int count, int idx); +int lpfc_read_lds_params(struct lpfc_hba *phba); +uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba); +void lpfc_cmf_signal_init(struct lpfc_hba *phba); +void lpfc_cmf_start(struct lpfc_hba *phba); +void lpfc_cmf_stop(struct lpfc_hba *phba); +void lpfc_init_congestion_stat(struct lpfc_hba *phba); +void lpfc_init_congestion_buf(struct lpfc_hba *phba); +int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba); +uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed); +int lpfc_config_cgn_signal(struct lpfc_hba *phba); +int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total); +void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba); +void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag); +void lpfc_unblock_requests(struct lpfc_hba *phba); +void lpfc_block_requests(struct lpfc_hba *phba); +int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor, + u32 entries); +void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor); +void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor, + struct rx_info_entry *entry); +u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, + struct lpfc_rx_info_monitor *rx_monitor, char *buf, + u32 buf_len, u32 max_read_entries); + +void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); +void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); +void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); +void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); +void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_set_disctmo(struct lpfc_vport *); +int lpfc_can_disctmo(struct lpfc_vport *); +int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_unreg_all_rpis(struct lpfc_vport *); +void lpfc_unreg_hba_rpis(struct lpfc_hba *); +void lpfc_unreg_default_rpis(struct lpfc_vport *); +void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); + +int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *, struct lpfc_nodelist *); +struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did); +struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); +int lpfc_nlp_put(struct lpfc_nodelist *); +void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); +struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); +void lpfc_disc_list_loopmap(struct lpfc_vport *); +void lpfc_disc_start(struct lpfc_vport *); +void lpfc_cleanup_discovery_resources(struct lpfc_vport *); +void lpfc_cleanup(struct lpfc_vport *); +void lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd); +void lpfc_disc_timeout(struct timer_list *); + +int lpfc_unregister_fcf_prep(struct lpfc_hba *); +struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); +struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); +void lpfc_worker_wake_up(struct lpfc_hba *); +int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); +int lpfc_do_work(void *); +int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, + uint32_t); + +void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); +int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, + struct serv_parm *, uint32_t, int); +void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); +void lpfc_more_plogi(struct lpfc_vport *); +void lpfc_more_adisc(struct lpfc_vport *); +void lpfc_end_rscn(struct lpfc_vport *); +int lpfc_els_chk_latt(struct lpfc_vport *); +int lpfc_els_abort_flogi(struct lpfc_hba *); +int lpfc_initial_flogi(struct lpfc_vport *); +void lpfc_issue_init_vfi(struct lpfc_vport *); +int lpfc_initial_fdisc(struct lpfc_vport *); +int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); +int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); +int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); +int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); +int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); +int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry); +int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry); +int lpfc_issue_fabric_reglogin(struct lpfc_vport *); +int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry); +int lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry); +void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length); +int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); +int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); +int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, + struct lpfc_nodelist *, LPFC_MBOXQ_t *); +int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, + struct lpfc_nodelist *, LPFC_MBOXQ_t *); +int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *, + struct lpfc_nodelist *); +int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *, + struct lpfc_nodelist *); +void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_els_retry_delay(struct timer_list *); +void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); +void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *); +int lpfc_els_handle_rscn(struct lpfc_vport *); +void lpfc_els_flush_rscn(struct lpfc_vport *); +int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t); +void lpfc_els_flush_all_cmd(struct lpfc_hba *); +void lpfc_els_flush_cmd(struct lpfc_vport *); +int lpfc_els_disc_adisc(struct lpfc_vport *); +int lpfc_els_disc_plogi(struct lpfc_vport *); +void lpfc_els_timeout(struct timer_list *); +void lpfc_els_timeout_handler(struct lpfc_vport *); +struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t, + uint8_t, struct lpfc_nodelist *, + uint32_t, uint32_t); +void lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job); +void lpfc_hb_timeout_handler(struct lpfc_hba *); + +void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *); +int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); +int lpfc_issue_gidpt(struct lpfc_vport *vport); +int lpfc_issue_gidft(struct lpfc_vport *vport); +int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); +int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); +int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); +void lpfc_fdmi_change_check(struct lpfc_vport *vport); +void lpfc_delayed_disc_tmo(struct timer_list *); +void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); + +int lpfc_config_port_prep(struct lpfc_hba *); +void lpfc_update_vport_wwn(struct lpfc_vport *vport); +int lpfc_config_port_post(struct lpfc_hba *); +int lpfc_sli4_refresh_params(struct lpfc_hba *phba); +int lpfc_hba_down_prep(struct lpfc_hba *); +int lpfc_hba_down_post(struct lpfc_hba *); +void lpfc_hba_init(struct lpfc_hba *, uint32_t *); +int lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt); +void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); +int lpfc_online(struct lpfc_hba *); +void lpfc_unblock_mgmt_io(struct lpfc_hba *); +void lpfc_offline_prep(struct lpfc_hba *, int); +void lpfc_offline(struct lpfc_hba *); +void lpfc_reset_hba(struct lpfc_hba *); +int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd, + spinlock_t *slock); + +int lpfc_sli_setup(struct lpfc_hba *); +int lpfc_sli4_setup(struct lpfc_hba *phba); +void lpfc_sli_queue_init(struct lpfc_hba *phba); +void lpfc_sli4_queue_init(struct lpfc_hba *phba); +struct lpfc_sli_ring *lpfc_sli4_calc_ring(struct lpfc_hba *phba, + struct lpfc_iocbq *iocbq); + +void lpfc_handle_eratt(struct lpfc_hba *); +void lpfc_handle_latt(struct lpfc_hba *); +irqreturn_t lpfc_sli_intr_handler(int, void *); +irqreturn_t lpfc_sli_sp_intr_handler(int, void *); +irqreturn_t lpfc_sli_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_intr_handler(int, void *); +irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); +irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id); + +int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap, + uint32_t len); + +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba); +void lpfc_sli4_poll_hbtimer(struct timer_list *t); +void lpfc_sli4_start_polling(struct lpfc_queue *q); +void lpfc_sli4_stop_polling(struct lpfc_queue *q); + +void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); +void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); +LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_dev_check(struct lpfc_hba *); +int lpfc_mbox_tmo_val(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); +void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); +void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); +void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *); +void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode); +void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); +void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); +int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); +void lpfc_issue_init_vpi(struct lpfc_vport *); + +void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, + uint32_t , LPFC_MBOXQ_t *); +struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); +void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); +struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); +void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); +struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); +void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); +void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, + struct lpfc_nvmet_ctxbuf *ctxp); +int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, + struct fc_frame_header *fc_hdr); +void lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, struct lpfc_queue *wq); +void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba); +void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, + uint16_t); +int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, + struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); +int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); +void lpfc_unregister_fcf(struct lpfc_hba *); +void lpfc_unregister_fcf_rescan(struct lpfc_hba *); +void lpfc_unregister_unused_fcf(struct lpfc_hba *); +int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); +void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); +void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); +uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); +void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); +void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); +void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); + +int lpfc_mem_alloc(struct lpfc_hba *, int align); +int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba); +int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); +void lpfc_mem_free(struct lpfc_hba *); +void lpfc_mem_free_all(struct lpfc_hba *); +void lpfc_stop_vport_timers(struct lpfc_vport *); + +void lpfc_poll_timeout(struct timer_list *t); +void lpfc_poll_start_timer(struct lpfc_hba *); +void lpfc_poll_eratt(struct timer_list *); +int +lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, + struct lpfc_sli_ring *, uint32_t); + +struct lpfc_iocbq *__lpfc_sli_get_iocbq(struct lpfc_hba *); +struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); +void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); +uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); +void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, + uint32_t); +void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_selective_reset(struct lpfc_hba *); +void lpfc_reset_barrier(struct lpfc_hba *); +int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); +int lpfc_sli_brdkill(struct lpfc_hba *); +int lpfc_sli_chipset_init(struct lpfc_hba *phba); +int lpfc_sli_brdreset(struct lpfc_hba *); +int lpfc_sli_brdrestart(struct lpfc_hba *); +int lpfc_sli_hba_setup(struct lpfc_hba *); +int lpfc_sli_config_port(struct lpfc_hba *, int); +int lpfc_sli_host_down(struct lpfc_vport *); +int lpfc_sli_hba_down(struct lpfc_hba *); +int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); +int lpfc_sli_handle_mb_event(struct lpfc_hba *); +void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int); +int lpfc_sli_check_eratt(struct lpfc_hba *); +void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, + struct lpfc_sli_ring *, uint32_t); +void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *); +void lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, + struct fc_frame_header *fc_hdr, bool aborted); +void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, + struct lpfc_iocbq *, uint32_t); +int lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag); +int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, + struct lpfc_iocbq *pwqe); +int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocb, void *cmpl); +void lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, + struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, + u32 elscmd, u8 tmo, u8 expect_rsp); +void lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, + u8 tmo); +void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, + u32 num_entry, u8 rctl, u8 last_seq, + u8 cr_cx_cmd); +void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, + bool ia, bool wqec); +struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri); +struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, + struct lpfc_iocbq *piocbq); +void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); +void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); +void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); +void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); +void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); +void lpfc_sli_flush_io_rings(struct lpfc_hba *phba); +int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_dmabuf *); +struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, + struct lpfc_sli_ring *, + dma_addr_t); + +uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *); +struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *, + struct lpfc_sli_ring *, uint32_t ); + +int lpfc_sli_hbq_count(void); +int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t); +void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *); +int lpfc_sli_hbq_size(void); +int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *, void *); +int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd); +int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, + lpfc_ctx_cmd abort_cmd); +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, + uint16_t, uint64_t, lpfc_ctx_cmd); + +void lpfc_mbox_timeout(struct timer_list *t); +void lpfc_mbox_timeout_handler(struct lpfc_hba *); +int lpfc_issue_hb_mbox(struct lpfc_hba *phba); +void lpfc_issue_hb_tmo(struct lpfc_hba *phba); + +struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); +struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, + struct lpfc_name *); +struct lpfc_nodelist *lpfc_findnode_mapped(struct lpfc_vport *vport); + +int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); + +int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, + struct lpfc_iocbq *, struct lpfc_iocbq *, + uint32_t); +void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); + +void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); + +void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *); +void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); +void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); +void *lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int flags, + dma_addr_t *handle); +void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma); + +void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); +void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp); +void lpfc_setup_fdmi_mask(struct lpfc_vport *vport); +int lpfc_link_reset(struct lpfc_vport *vport); + +/* Function prototypes. */ +int lpfc_check_pci_resettable(struct lpfc_hba *phba); +const char* lpfc_info(struct Scsi_Host *); +int lpfc_scan_finished(struct Scsi_Host *, unsigned long); + +int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_api_table_setup(struct lpfc_hba *, uint8_t); + +void lpfc_get_cfgparam(struct lpfc_hba *); +void lpfc_get_vport_cfgparam(struct lpfc_vport *); +int lpfc_alloc_sysfs_attr(struct lpfc_vport *); +void lpfc_free_sysfs_attr(struct lpfc_vport *); +bool lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, + u32 ulp_word4); +extern const struct attribute_group *lpfc_hba_groups[]; +extern const struct attribute_group *lpfc_vport_groups[]; +extern struct scsi_host_template lpfc_template; +extern struct scsi_host_template lpfc_template_nvme; +extern struct scsi_host_template lpfc_vport_template; +extern struct fc_function_template lpfc_transport_functions; +extern struct fc_function_template lpfc_vport_transport_functions; + +int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); +int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); +void lpfc_terminate_rport_io(struct fc_rport *); +void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); + +struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *); +int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); +int lpfc_mbx_unreg_vpi(struct lpfc_vport *); +void destroy_port(struct lpfc_vport *); +int lpfc_get_instance(void); +void lpfc_host_attrib_init(struct Scsi_Host *); + +extern void lpfc_debugfs_initialize(struct lpfc_vport *); +extern void lpfc_debugfs_terminate(struct lpfc_vport *); +extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t, + uint32_t, uint32_t); +extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t, + uint32_t, uint32_t); +extern void lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt, + uint16_t data1, uint16_t data2, uint32_t data3); +extern struct lpfc_hbq_init *lpfc_hbq_defs[]; + +/* SLI4 if_type 2 externs. */ +int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *); +int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *); +int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t, + uint16_t *, uint16_t *); +int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t, + uint16_t *, uint16_t *); + +/* Interface exported by fabric iocb scheduler */ +void lpfc_fabric_abort_nport(struct lpfc_nodelist *); +void lpfc_fabric_abort_hba(struct lpfc_hba *); +void lpfc_fabric_block_timeout(struct timer_list *); +void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); +void lpfc_rampdown_queue_depth(struct lpfc_hba *); +void lpfc_ramp_down_queue_handler(struct lpfc_hba *); +void lpfc_scsi_dev_block(struct lpfc_hba *); + +void +lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); +struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); +void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); +void lpfc_create_static_vport(struct lpfc_hba *); +void lpfc_stop_hba_timers(struct lpfc_hba *); +void lpfc_stop_port(struct lpfc_hba *); +int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t sz); +int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t val, uint32_t sz, + struct Scsi_Host *shost); +void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); +void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); +void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); +int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); +void lpfc_start_fdiscs(struct lpfc_hba *phba); +struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); +struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); +#define HBA_EVENT_RSCN 5 +#define HBA_EVENT_LINK_UP 2 +#define HBA_EVENT_LINK_DOWN 3 + +/* functions to support SGIOv4/bsg interface */ +int lpfc_bsg_request(struct bsg_job *); +int lpfc_bsg_timeout(struct bsg_job *); +int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *); +int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); +void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *); +struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *, + struct lpfc_sli_ring *); +int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, + struct lpfc_iocbq *, uint32_t); +uint32_t lpfc_drain_txq(struct lpfc_hba *); +void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *); +int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t); +void lpfc_handle_rrq_active(struct lpfc_hba *); +int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); +int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, + uint16_t, uint16_t, uint16_t); +uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t); +void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); +struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, + uint32_t); +void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type, + enum mbox_type, enum dma_type, enum sta_type, + struct lpfc_dmabuf *, uint32_t); +void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *); +int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *); +/* functions to support SR-IOV */ +int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); +uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *); +int lpfc_sli4_queue_create(struct lpfc_hba *); +void lpfc_sli4_queue_destroy(struct lpfc_hba *); +void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *, + struct sli4_wcqe_xri_aborted *); +void lpfc_sli_abts_recover_port(struct lpfc_vport *, + struct lpfc_nodelist *); +int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t); +int lpfc_issue_reg_vfi(struct lpfc_vport *); +int lpfc_issue_unreg_vfi(struct lpfc_vport *); +int lpfc_selective_reset(struct lpfc_hba *); +int lpfc_sli4_read_config(struct lpfc_hba *); +void lpfc_sli4_node_prep(struct lpfc_hba *); +int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); +int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); +int lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf); +int lpfc_sli4_io_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, + struct list_head *blist, int xricnt); +int lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc); +void lpfc_io_free(struct lpfc_hba *phba); +void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); +uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); +int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); +void lpfc_sli4_offline_eratt(struct lpfc_hba *); + +struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *, + struct lpfc_name *, + struct lpfc_name *, + uint64_t, uint32_t, bool); +void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*); +struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, + struct list_head *list, + struct lpfc_name *, + struct lpfc_name *, uint64_t); +bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t, uint8_t); +bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t, uint8_t); +bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t *, struct lpfc_name *, + struct lpfc_name *, uint64_t *, + uint32_t *, uint32_t *); +int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); +void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); + +/* RAS Interface */ +void lpfc_sli4_ras_init(struct lpfc_hba *phba); +void lpfc_sli4_ras_setup(struct lpfc_hba *phba); +int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level, + uint32_t fwlog_enable); +void lpfc_ras_stop_fwlog(struct lpfc_hba *phba); +int lpfc_check_fwlog_support(struct lpfc_hba *phba); + +/* NVME interfaces. */ +void lpfc_nvme_rescan_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +void lpfc_nvme_unregister_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +int lpfc_nvme_register_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +int lpfc_nvme_create_localport(struct lpfc_vport *vport); +void lpfc_nvme_destroy_localport(struct lpfc_vport *vport); +void lpfc_nvme_update_localport(struct lpfc_vport *vport); +int lpfc_nvmet_create_targetport(struct lpfc_hba *phba); +int lpfc_nvmet_update_targetport(struct lpfc_hba *phba); +void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba); +int lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *axchg); +int lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *axchg); +void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx, + struct rqb_dmabuf *nvmebuf, uint64_t isr_ts, + uint8_t cqflag); +void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba); +void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp); +void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); +void lpfc_create_multixri_pools(struct lpfc_hba *phba); +void lpfc_create_destroy_pools(struct lpfc_hba *phba); +void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid); +void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 cnt); +void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid); +void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid); +void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid); +#ifdef LPFC_MXP_STAT +void lpfc_snapshot_mxp(struct lpfc_hba *, u32); +#endif +struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, u32 hwqid, + int); +void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd, + struct lpfc_sli4_hdw_queue *qp); +void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd); +void lpfc_wqe_cmd_template(void); +void lpfc_nvmet_cmd_template(void); +void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + uint32_t stat, uint32_t param); +extern int lpfc_enable_nvmet_cnt; +extern unsigned long long lpfc_enable_nvmet[]; +extern int lpfc_no_hba_reset_cnt; +extern unsigned long lpfc_no_hba_reset[]; +extern unsigned char lpfc_acqe_cgn_frequency; +extern int lpfc_fabric_cgn_frequency; +extern int lpfc_use_cgn_signal; + +extern union lpfc_wqe128 lpfc_iread_cmd_template; +extern union lpfc_wqe128 lpfc_iwrite_cmd_template; +extern union lpfc_wqe128 lpfc_icmnd_cmd_template; + +/* vmid interface */ +int lpfc_vmid_uvem(struct lpfc_vport *vport, struct lpfc_vmid *vmid, bool ins); +uint32_t lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport); +int lpfc_vmid_cmd(struct lpfc_vport *vport, + int cmdcode, struct lpfc_vmid *vmid); +int lpfc_vmid_hash_fn(const char *vmid, int len); +struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, + uint32_t hash, uint8_t *buf); +int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, + enum dma_data_direction iodir, + union lpfc_vmid_io_tag *tag); +void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport); +int lpfc_issue_els_qfpa(struct lpfc_vport *vport); +void lpfc_reinit_vmid(struct lpfc_vport *vport); + +void lpfc_sli_rpi_release(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); + +int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, + struct lpfc_rdp_context *rdp_context); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c new file mode 100644 index 000000000..baae1f827 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -0,0 +1,3825 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +/* + * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_version.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +/* FDMI Port Speed definitions - FC-GS-7 */ +#define HBA_PORTSPEED_1GFC 0x00000001 /* 1G FC */ +#define HBA_PORTSPEED_2GFC 0x00000002 /* 2G FC */ +#define HBA_PORTSPEED_4GFC 0x00000008 /* 4G FC */ +#define HBA_PORTSPEED_10GFC 0x00000004 /* 10G FC */ +#define HBA_PORTSPEED_8GFC 0x00000010 /* 8G FC */ +#define HBA_PORTSPEED_16GFC 0x00000020 /* 16G FC */ +#define HBA_PORTSPEED_32GFC 0x00000040 /* 32G FC */ +#define HBA_PORTSPEED_20GFC 0x00000080 /* 20G FC */ +#define HBA_PORTSPEED_40GFC 0x00000100 /* 40G FC */ +#define HBA_PORTSPEED_128GFC 0x00000200 /* 128G FC */ +#define HBA_PORTSPEED_64GFC 0x00000400 /* 64G FC */ +#define HBA_PORTSPEED_256GFC 0x00000800 /* 256G FC */ +#define HBA_PORTSPEED_UNKNOWN 0x00008000 /* Unknown */ +#define HBA_PORTSPEED_10GE 0x00010000 /* 10G E */ +#define HBA_PORTSPEED_40GE 0x00020000 /* 40G E */ +#define HBA_PORTSPEED_100GE 0x00040000 /* 100G E */ +#define HBA_PORTSPEED_25GE 0x00080000 /* 25G E */ +#define HBA_PORTSPEED_50GE 0x00100000 /* 50G E */ +#define HBA_PORTSPEED_400GE 0x00200000 /* 400G E */ + +#define FOURBYTES 4 + + +static char *lpfc_release_version = LPFC_DRIVER_VERSION; +static void +lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); + +static void +lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_dmabuf *mp, uint32_t size) +{ + if (!mp) { + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "0146 Ignoring unsolicited CT No HBQ " + "status = x%x\n", + get_job_ulpstatus(phba, piocbq)); + } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "0145 Ignoring unsolicited CT HBQ Size:%d " + "status = x%x\n", + size, get_job_ulpstatus(phba, piocbq)); +} + +static void +lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_dmabuf *mp, uint32_t size) +{ + lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size); +} + +/** + * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands + * @phba : pointer to lpfc hba data structure. + * @cmdiocb : pointer to lpfc command iocb data structure. + * @rspiocb : pointer to lpfc response iocb data structure. + * + * This routine is the callback function for issuing unsol ct reject command. + * The memory allocated in the reject command path is freed up here. + **/ +static void +lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *mp, *bmp; + + ndlp = cmdiocb->ndlp; + if (ndlp) + lpfc_nlp_put(ndlp); + + mp = cmdiocb->rsp_dmabuf; + bmp = cmdiocb->bpl_dmabuf; + if (mp) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + cmdiocb->rsp_dmabuf = NULL; + } + + if (bmp) { + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); + cmdiocb->bpl_dmabuf = NULL; + } + + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands + * @ndlp: pointer to a node-list data structure. + * @ct_req: pointer to the CT request data structure. + * @ulp_context: context of received UNSOL CT command + * @ox_id: ox_id of the UNSOL CT command + * + * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending + * a reject response. Reject response is sent for the unhandled commands. + **/ +static void +lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, + struct lpfc_sli_ct_request *ct_req, + u16 ulp_context, u16 ox_id) +{ + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ct_request *ct_rsp; + struct lpfc_iocbq *cmdiocbq = NULL; + struct lpfc_dmabuf *bmp = NULL; + struct lpfc_dmabuf *mp = NULL; + struct ulp_bde64 *bpl; + u8 rc = 0; + u32 tmo; + + /* fill in BDEs for command */ + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (!mp) { + rc = 1; + goto ct_exit; + } + + mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys); + if (!mp->virt) { + rc = 2; + goto ct_free_mp; + } + + /* Allocate buffer for Buffer ptr list */ + bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); + if (!bmp) { + rc = 3; + goto ct_free_mpvirt; + } + + bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys); + if (!bmp->virt) { + rc = 4; + goto ct_free_bmp; + } + + INIT_LIST_HEAD(&mp->list); + INIT_LIST_HEAD(&bmp->list); + + bpl = (struct ulp_bde64 *)bmp->virt; + memset(bpl, 0, sizeof(struct ulp_bde64)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + ct_rsp = (struct lpfc_sli_ct_request *)mp->virt; + memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request)); + + ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION; + ct_rsp->RevisionId.bits.InId = 0; + ct_rsp->FsType = ct_req->FsType; + ct_rsp->FsSubType = ct_req->FsSubType; + ct_rsp->CommandResponse.bits.Size = 0; + ct_rsp->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CT_RESPONSE_FS_RJT); + ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED; + ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL; + + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = 5; + goto ct_free_bmpvirt; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], + ox_id, 1, FC_RCTL_DD_SOL_CTL, 1, + CMD_XMIT_SEQUENCE64_WQE); + } else { + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1, + FC_RCTL_DD_SOL_CTL, 1, + CMD_XMIT_SEQUENCE64_CX); + } + + /* Save for completion so we can release these resources */ + cmdiocbq->rsp_dmabuf = mp; + cmdiocbq->bpl_dmabuf = bmp; + cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl; + tmo = (3 * phba->fc_ratov); + + cmdiocbq->retry = 0; + cmdiocbq->vport = vport; + cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; + + cmdiocbq->ndlp = lpfc_nlp_get(ndlp); + if (!cmdiocbq->ndlp) + goto ct_no_ndlp; + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); + if (rc) { + lpfc_nlp_put(ndlp); + goto ct_no_ndlp; + } + return; + +ct_no_ndlp: + rc = 6; + lpfc_sli_release_iocbq(phba, cmdiocbq); +ct_free_bmpvirt: + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); +ct_free_bmp: + kfree(bmp); +ct_free_mpvirt: + lpfc_mbuf_free(phba, mp->virt, mp->phys); +ct_free_mp: + kfree(mp); +ct_exit: + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "6440 Unsol CT: Rsp err %d Data: x%x\n", + rc, vport->fc_flag); +} + +/** + * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer + * @phba: pointer to lpfc hba data structure. + * @ctiocbq: pointer to lpfc CT command iocb data structure. + * + * This routine is used for processing the IOCB associated with a unsolicited + * CT MIB request. It first determines whether there is an existing ndlp that + * matches the DID from the unsolicited IOCB. If not, it will return. + **/ +static void +lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) +{ + struct lpfc_sli_ct_request *ct_req; + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_vport *vport = ctiocbq->vport; + u32 ulp_status = get_job_ulpstatus(phba, ctiocbq); + u32 ulp_word4 = get_job_word4(phba, ctiocbq); + u32 did; + u16 mi_cmd; + + did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); + if (ulp_status) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6438 Unsol CT: status:x%x/x%x did : x%x\n", + ulp_status, ulp_word4, did); + return; + } + + /* Ignore traffic received during vport shutdown */ + if (vport->fc_flag & FC_UNLOADING) + return; + + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6439 Unsol CT: NDLP Not Found for DID : x%x", + did); + return; + } + + ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; + + mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6442 : MI Cmd : x%x Not Supported\n", mi_cmd); + lpfc_ct_reject_event(ndlp, ct_req, + bf_get(wqe_ctxt_tag, + &ctiocbq->wqe.xmit_els_rsp.wqe_com), + bf_get(wqe_rcvoxid, + &ctiocbq->wqe.xmit_els_rsp.wqe_com)); +} + +/** + * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @ctiocbq: pointer to lpfc ct iocb data structure. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking appropriate routine + * after properly set up the iocb buffer from the SLI ring on which the + * unsolicited event was received. + **/ +void +lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *ctiocbq) +{ + struct lpfc_dmabuf *mp = NULL; + IOCB_t *icmd = &ctiocbq->iocb; + int i; + struct lpfc_iocbq *iocbq; + struct lpfc_iocbq *iocb; + dma_addr_t dma_addr; + uint32_t size; + struct list_head head; + struct lpfc_sli_ct_request *ct_req; + struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf; + struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf; + u32 status, parameter, bde_count = 0; + struct lpfc_wcqe_complete *wcqe_cmpl = NULL; + + ctiocbq->cmd_dmabuf = NULL; + ctiocbq->rsp_dmabuf = NULL; + ctiocbq->bpl_dmabuf = NULL; + + wcqe_cmpl = &ctiocbq->wcqe_cmpl; + status = get_job_ulpstatus(phba, ctiocbq); + parameter = get_job_word4(phba, ctiocbq); + if (phba->sli_rev == LPFC_SLI_REV4) + bde_count = wcqe_cmpl->word3; + else + bde_count = icmd->ulpBdeCount; + + if (unlikely(status == IOSTAT_NEED_BUFFER)) { + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } else if ((status == IOSTAT_LOCAL_REJECT) && + ((parameter & IOERR_PARAM_MASK) == + IOERR_RCV_BUFFER_WAITING)) { + /* Not enough posted buffers; Try posting more buffers */ + phba->fc_stat.NoRcvBuf++; + if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) + lpfc_sli3_post_buffer(phba, pring, 2); + return; + } + + /* If there are no BDEs associated + * with this IOCB, there is nothing to do. + */ + if (bde_count == 0) + return; + + ctiocbq->cmd_dmabuf = bdeBuf1; + if (bde_count == 2) + ctiocbq->bpl_dmabuf = bdeBuf2; + + ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; + + if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE && + ct_req->FsSubType == SLI_CT_MIB_Subtypes) { + lpfc_ct_handle_mibreq(phba, ctiocbq); + } else { + if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq)) + return; + } + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + INIT_LIST_HEAD(&head); + list_add_tail(&head, &ctiocbq->list); + list_for_each_entry(iocb, &head, list) { + if (phba->sli_rev == LPFC_SLI_REV4) + bde_count = iocb->wcqe_cmpl.word3; + else + bde_count = iocb->iocb.ulpBdeCount; + + if (!bde_count) + continue; + bdeBuf1 = iocb->cmd_dmabuf; + iocb->cmd_dmabuf = NULL; + if (phba->sli_rev == LPFC_SLI_REV4) + size = iocb->wqe.gen_req.bde.tus.f.bdeSize; + else + size = iocb->iocb.un.cont64[0].tus.f.bdeSize; + lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size); + lpfc_in_buf_free(phba, bdeBuf1); + if (bde_count == 2) { + bdeBuf2 = iocb->bpl_dmabuf; + iocb->bpl_dmabuf = NULL; + if (phba->sli_rev == LPFC_SLI_REV4) + size = iocb->unsol_rcv_len; + else + size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize; + lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2, + size); + lpfc_in_buf_free(phba, bdeBuf2); + } + } + list_del(&head); + } else { + INIT_LIST_HEAD(&head); + list_add_tail(&head, &ctiocbq->list); + list_for_each_entry(iocbq, &head, list) { + icmd = &iocbq->iocb; + if (icmd->ulpBdeCount == 0) + lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0); + for (i = 0; i < icmd->ulpBdeCount; i++) { + dma_addr = getPaddr(icmd->un.cont64[i].addrHigh, + icmd->un.cont64[i].addrLow); + mp = lpfc_sli_ringpostbuf_get(phba, pring, + dma_addr); + size = icmd->un.cont64[i].tus.f.bdeSize; + lpfc_ct_unsol_buffer(phba, iocbq, mp, size); + lpfc_in_buf_free(phba, mp); + } + lpfc_sli3_post_buffer(phba, pring, i); + } + list_del(&head); + } +} + +/** + * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler + * @phba: Pointer to HBA context object. + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function serves as the upper level protocol abort handler for CT + * protocol. + * + * Return 1 if abort has been handled, 0 otherwise. + **/ +int +lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) +{ + int handled; + + /* CT upper level goes through BSG */ + handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf); + + return handled; +} + +static void +lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) +{ + struct lpfc_dmabuf *mlast, *next_mlast; + + list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) { + list_del(&mlast->list); + lpfc_mbuf_free(phba, mlast->virt, mlast->phys); + kfree(mlast); + } + lpfc_mbuf_free(phba, mlist->virt, mlist->phys); + kfree(mlist); + return; +} + +static struct lpfc_dmabuf * +lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl, + uint32_t size, int *entries) +{ + struct lpfc_dmabuf *mlist = NULL; + struct lpfc_dmabuf *mp; + int cnt, i = 0; + + /* We get chunks of FCELSSIZE */ + cnt = size > FCELSSIZE ? FCELSSIZE: size; + + while (size) { + /* Allocate buffer for rsp payload */ + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + if (mlist) + lpfc_free_ct_rsp(phba, mlist); + return NULL; + } + + INIT_LIST_HEAD(&mp->list); + + if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT || + be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID) + mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); + else + mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); + + if (!mp->virt) { + kfree(mp); + if (mlist) + lpfc_free_ct_rsp(phba, mlist); + return NULL; + } + + /* Queue it to a linked list */ + if (!mlist) + mlist = mp; + else + list_add_tail(&mp->list, &mlist->list); + + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + /* build buffer ptr list for IOCB */ + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); + bpl->tus.f.bdeSize = (uint16_t) cnt; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + + i++; + size -= cnt; + } + + *entries = i; + return mlist; +} + +int +lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) +{ + struct lpfc_dmabuf *buf_ptr; + + /* IOCBQ job structure gets cleaned during release. Just release + * the dma buffers here. + */ + if (ctiocb->cmd_dmabuf) { + buf_ptr = ctiocb->cmd_dmabuf; + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + ctiocb->cmd_dmabuf = NULL; + } + if (ctiocb->rsp_dmabuf) { + lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf); + ctiocb->rsp_dmabuf = NULL; + } + + if (ctiocb->bpl_dmabuf) { + buf_ptr = ctiocb->bpl_dmabuf; + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + ctiocb->bpl_dmabuf = NULL; + } + lpfc_sli_release_iocbq(phba, ctiocb); + return 0; +} + +/* + * lpfc_gen_req - Build and issue a GEN_REQUEST command to the SLI Layer + * @vport: pointer to a host virtual N_Port data structure. + * @bmp: Pointer to BPL for SLI command + * @inp: Pointer to data buffer for response data. + * @outp: Pointer to data buffer that hold the CT command. + * @cmpl: completion routine to call when command completes + * @ndlp: Destination NPort nodelist entry + * + * This function as the final part for issuing a CT command. + */ +static int +lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, + struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, + void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *), + struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry, + uint32_t tmo, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *geniocb; + int rc; + u16 ulp_context; + + /* Allocate buffer for command iocb */ + geniocb = lpfc_sli_get_iocbq(phba); + + if (geniocb == NULL) + return 1; + + /* Update the num_entry bde count */ + geniocb->num_bdes = num_entry; + + geniocb->bpl_dmabuf = bmp; + + /* Save for completion so we can release these resources */ + geniocb->cmd_dmabuf = inp; + geniocb->rsp_dmabuf = outp; + + geniocb->event_tag = event_tag; + + if (!tmo) { + /* FC spec states we need 3 * ratov for CT requests */ + tmo = (3 * phba->fc_ratov); + } + + if (phba->sli_rev == LPFC_SLI_REV4) + ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + else + ulp_context = ndlp->nlp_rpi; + + lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo); + + /* Issue GEN REQ IOCB for NPORT */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0119 Issue GEN REQ IOCB to NPORT x%x " + "Data: x%x x%x\n", + ndlp->nlp_DID, geniocb->iotag, + vport->port_state); + geniocb->cmd_cmpl = cmpl; + geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; + geniocb->vport = vport; + geniocb->retry = retry; + geniocb->ndlp = lpfc_nlp_get(ndlp); + if (!geniocb->ndlp) + goto out; + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); + if (rc == IOCB_ERROR) { + lpfc_nlp_put(ndlp); + goto out; + } + + return 0; +out: + lpfc_sli_release_iocbq(phba, geniocb); + return 1; +} + +/* + * lpfc_ct_cmd - Build and issue a CT command + * @vport: pointer to a host virtual N_Port data structure. + * @inmp: Pointer to data buffer for response data. + * @bmp: Pointer to BPL for SLI command + * @ndlp: Destination NPort nodelist entry + * @cmpl: completion routine to call when command completes + * + * This function is called for issuing a CT command. + */ +static int +lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, + struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, + void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *), + uint32_t rsp_size, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; + struct lpfc_dmabuf *outmp; + int cnt = 0, status; + __be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)-> + CommandResponse.bits.CmdRsp; + + bpl++; /* Skip past ct request */ + + /* Put buffer(s) for ct rsp in bpl */ + outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); + if (!outmp) + return -ENOMEM; + /* + * Form the CT IOCB. The total number of BDEs in this IOCB + * is the single command plus response count from + * lpfc_alloc_ct_rsp. + */ + cnt += 1; + status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, + phba->fc_eventTag, cnt, 0, retry); + if (status) { + lpfc_free_ct_rsp(phba, outmp); + return -ENOMEM; + } + return 0; +} + +struct lpfc_vport * +lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) { + struct lpfc_vport *vport_curr; + unsigned long flags; + + spin_lock_irqsave(&phba->port_list_lock, flags); + list_for_each_entry(vport_curr, &phba->port_list, listentry) { + if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) { + spin_unlock_irqrestore(&phba->port_list_lock, flags); + return vport_curr; + } + } + spin_unlock_irqrestore(&phba->port_list_lock, flags); + return NULL; +} + +static void +lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) +{ + struct lpfc_nodelist *ndlp; + + if ((vport->port_type != LPFC_NPIV_PORT) || + !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { + + ndlp = lpfc_setup_disc_node(vport, Did); + + if (ndlp) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Parse GID_FTrsp: did:x%x flg:x%x x%x", + Did, ndlp->nlp_flag, vport->fc_flag); + + /* By default, the driver expects to support FCP FC4 */ + if (fc4_type == FC_TYPE_FCP) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + + if (fc4_type == FC_TYPE_NVME) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0238 Process x%06x NameServer Rsp " + "Data: x%x x%x x%x x%x x%x\n", Did, + ndlp->nlp_flag, ndlp->nlp_fc4_type, + ndlp->nlp_state, vport->fc_flag, + vport->fc_rscn_id_cnt); + + /* if ndlp needs to be discovered and prior + * state of ndlp hit devloss, change state to + * allow rediscovery. + */ + if (ndlp->nlp_flag & NLP_NPR_2B_DISC && + ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); + } + } else { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0239 Skip x%06x NameServer Rsp " + "Data: x%x x%x x%px\n", + Did, vport->fc_flag, + vport->fc_rscn_id_cnt, ndlp); + } + } else { + if (!(vport->fc_flag & FC_RSCN_MODE) || + lpfc_rscn_payload_check(vport, Did)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Query GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + /* + * This NPortID was previously a FCP/NVMe target, + * Don't even bother to send GFF_ID. + */ + ndlp = lpfc_findnode_did(vport, Did); + if (ndlp && + (ndlp->nlp_type & + (NLP_FCP_TARGET | NLP_NVME_TARGET))) { + if (fc4_type == FC_TYPE_FCP) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if (fc4_type == FC_TYPE_NVME) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_setup_disc_node(vport, Did); + } else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, + 0, Did) == 0) + vport->num_disc_nodes++; + else + lpfc_setup_disc_node(vport, Did); + } else { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0245 Skip x%06x NameServer Rsp " + "Data: x%x x%x\n", Did, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } + } +} + +static void +lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp = NULL; + char *str; + + if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) + str = "GID_FT"; + else + str = "GID_PT"; + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6430 Process %s rsp for %08x type %x %s %s\n", + str, Did, fc4_type, + (fc4_type == FC_TYPE_FCP) ? "FCP" : " ", + (fc4_type == FC_TYPE_NVME) ? "NVME" : " "); + /* + * To conserve rpi's, filter out addresses for other + * vports on the same physical HBAs. + */ + if (Did != vport->fc_myDID && + (!lpfc_find_vport_by_did(phba, Did) || + vport->cfg_peer_port_login)) { + if (!phba->nvmet_support) { + /* FCPI/NVMEI path. Process Did */ + lpfc_prep_node_fc4type(vport, Did, fc4_type); + return; + } + /* NVMET path. NVMET only cares about NVMEI nodes. */ + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_type != NLP_NVME_INITIATOR || + ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + continue; + spin_lock_irq(&ndlp->lock); + if (ndlp->nlp_DID == Did) + ndlp->nlp_flag &= ~NLP_NVMET_RECOV; + else + ndlp->nlp_flag |= NLP_NVMET_RECOV; + spin_unlock_irq(&ndlp->lock); + } + } +} + +static int +lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, + uint32_t Size) +{ + struct lpfc_sli_ct_request *Response = + (struct lpfc_sli_ct_request *) mp->virt; + struct lpfc_dmabuf *mlast, *next_mp; + uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; + uint32_t Did, CTentry; + int Cnt; + struct list_head head; + struct lpfc_nodelist *ndlp = NULL; + + lpfc_set_disctmo(vport); + vport->num_disc_nodes = 0; + vport->fc_ns_retry = 0; + + + list_add_tail(&head, &mp->list); + list_for_each_entry_safe(mp, next_mp, &head, list) { + mlast = mp; + + Cnt = Size > FCELSSIZE ? FCELSSIZE : Size; + + Size -= Cnt; + + if (!ctptr) { + ctptr = (uint32_t *) mlast->virt; + } else + Cnt -= 16; /* subtract length of CT header */ + + /* Loop through entire NameServer list of DIDs */ + while (Cnt >= sizeof(uint32_t)) { + /* Get next DID from NameServer List */ + CTentry = *ctptr++; + Did = ((be32_to_cpu(CTentry)) & Mask_DID); + lpfc_ns_rsp_audit_did(vport, Did, fc4_type); + if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) + goto nsout1; + + Cnt -= sizeof(uint32_t); + } + ctptr = NULL; + + } + + /* All GID_FT entries processed. If the driver is running in + * in target mode, put impacted nodes into recovery and drop + * the RPI to flush outstanding IO. + */ + if (vport->phba->nvmet_support) { + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) + continue; + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NVMET_RECOV; + spin_unlock_irq(&ndlp->lock); + } + } + +nsout1: + list_del(&head); + return 0; +} + +static void +lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_dmabuf *outp; + struct lpfc_dmabuf *inp; + struct lpfc_sli_ct_request *CTrsp; + struct lpfc_sli_ct_request *CTreq; + struct lpfc_nodelist *ndlp; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + int rc, type; + + /* First save ndlp, before we overwrite it */ + ndlp = cmdiocb->ndlp; + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + cmdiocb->rsp_iocb = rspiocb; + inp = cmdiocb->cmd_dmabuf; + outp = cmdiocb->rsp_dmabuf; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_FT cmpl: status:x%x/x%x rtry:%d", + ulp_status, ulp_word4, vport->fc_ns_retry); + + /* Ignore response if link flipped after this request was made */ + if (cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9043 Event tag mismatch. Ignoring NS rsp\n"); + goto out; + } + + /* Don't bother processing response if vport is being torn down. */ + if (vport->load_flag & FC_UNLOADING) { + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + goto out; + } + + if (lpfc_els_chk_latt(vport)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0216 Link event during NS query\n"); + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + goto out; + } + if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0226 NS query failed due to link event: " + "ulp_status x%x ulp_word4 x%x fc_flag x%x " + "port_state x%x gidft_inp x%x\n", + ulp_status, ulp_word4, vport->fc_flag, + vport->port_state, vport->gidft_inp); + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + if (vport->gidft_inp) + vport->gidft_inp--; + goto out; + } + + spin_lock_irq(shost->host_lock); + if (vport->fc_flag & FC_RSCN_DEFERRED) { + vport->fc_flag &= ~FC_RSCN_DEFERRED; + spin_unlock_irq(shost->host_lock); + + /* This is a GID_FT completing so the gidft_inp counter was + * incremented before the GID_FT was issued to the wire. + */ + if (vport->gidft_inp) + vport->gidft_inp--; + + /* + * Skip processing the NS response + * Re-issue the NS cmd + */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0151 Process Deferred RSCN Data: x%x x%x\n", + vport->fc_flag, vport->fc_rscn_id_cnt); + lpfc_els_handle_rscn(vport); + + goto out; + } + spin_unlock_irq(shost->host_lock); + + if (ulp_status) { + /* Check for retry */ + if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { + if (ulp_status != IOSTAT_LOCAL_REJECT || + (ulp_word4 & IOERR_PARAM_MASK) != + IOERR_NO_RESOURCES) + vport->fc_ns_retry++; + + type = lpfc_get_gidft_type(vport, cmdiocb); + if (type == 0) + goto out; + + /* CT command is being retried */ + rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, + vport->fc_ns_retry, type); + if (rc == 0) + goto out; + else { /* Unable to send NS cmd */ + if (vport->gidft_inp) + vport->gidft_inp--; + } + } + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0257 GID_FT Query error: 0x%x 0x%x\n", + ulp_status, vport->fc_ns_retry); + } else { + /* Good status, continue checking */ + CTreq = (struct lpfc_sli_ct_request *) inp->virt; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (CTrsp->CommandResponse.bits.CmdRsp == + cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0208 NameServer Rsp Data: x%x x%x " + "x%x x%x sz x%x\n", + vport->fc_flag, + CTreq->un.gid.Fc4Type, + vport->num_disc_nodes, + vport->gidft_inp, + get_job_data_placed(phba, rspiocb)); + + lpfc_ns_rsp(vport, + outp, + CTreq->un.gid.Fc4Type, + get_job_data_placed(phba, rspiocb)); + } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_RJT) { + /* NameServer Rsp Error */ + if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) + && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "0269 No NameServer Entries " + "Data: x%x x%x x%x x%x\n", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation, + vport->fc_flag); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_FT no entry cmd:x%x rsn:x%x exp:x%x", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation); + } else { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "0240 NameServer Rsp Error " + "Data: x%x x%x x%x x%x\n", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation, + vport->fc_flag); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation); + } + + + } else { + /* NameServer Rsp Error */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0241 NameServer Rsp Error " + "Data: x%x x%x x%x x%x\n", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation, + vport->fc_flag); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation); + } + if (vport->gidft_inp) + vport->gidft_inp--; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "4216 GID_FT cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + + /* Link up / RSCN discovery */ + if ((vport->num_disc_nodes == 0) && + (vport->gidft_inp == 0)) { + /* + * The driver has cycled through all Nports in the RSCN payload. + * Complete the handling by cleaning up and marking the + * current driver state. + */ + if (vport->port_state >= LPFC_DISC_AUTH) { + if (vport->fc_flag & FC_RSCN_MODE) { + lpfc_els_flush_rscn(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ + spin_unlock_irq(shost->host_lock); + } + else + lpfc_els_flush_rscn(vport); + } + + lpfc_disc_start(vport); + } +out: + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + return; +} + +static void +lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_dmabuf *outp; + struct lpfc_dmabuf *inp; + struct lpfc_sli_ct_request *CTrsp; + struct lpfc_sli_ct_request *CTreq; + struct lpfc_nodelist *ndlp; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + int rc; + + /* First save ndlp, before we overwrite it */ + ndlp = cmdiocb->ndlp; + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + cmdiocb->rsp_iocb = rspiocb; + inp = cmdiocb->cmd_dmabuf; + outp = cmdiocb->rsp_dmabuf; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_PT cmpl: status:x%x/x%x rtry:%d", + ulp_status, ulp_word4, + vport->fc_ns_retry); + + /* Ignore response if link flipped after this request was made */ + if (cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9044 Event tag mismatch. Ignoring NS rsp\n"); + goto out; + } + + /* Don't bother processing response if vport is being torn down. */ + if (vport->load_flag & FC_UNLOADING) { + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + goto out; + } + + if (lpfc_els_chk_latt(vport)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "4108 Link event during NS query\n"); + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + goto out; + } + if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "4166 NS query failed due to link event: " + "ulp_status x%x ulp_word4 x%x fc_flag x%x " + "port_state x%x gidft_inp x%x\n", + ulp_status, ulp_word4, vport->fc_flag, + vport->port_state, vport->gidft_inp); + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + if (vport->gidft_inp) + vport->gidft_inp--; + goto out; + } + + spin_lock_irq(shost->host_lock); + if (vport->fc_flag & FC_RSCN_DEFERRED) { + vport->fc_flag &= ~FC_RSCN_DEFERRED; + spin_unlock_irq(shost->host_lock); + + /* This is a GID_PT completing so the gidft_inp counter was + * incremented before the GID_PT was issued to the wire. + */ + if (vport->gidft_inp) + vport->gidft_inp--; + + /* + * Skip processing the NS response + * Re-issue the NS cmd + */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "4167 Process Deferred RSCN Data: x%x x%x\n", + vport->fc_flag, vport->fc_rscn_id_cnt); + lpfc_els_handle_rscn(vport); + + goto out; + } + spin_unlock_irq(shost->host_lock); + + if (ulp_status) { + /* Check for retry */ + if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { + if (ulp_status != IOSTAT_LOCAL_REJECT || + (ulp_word4 & IOERR_PARAM_MASK) != + IOERR_NO_RESOURCES) + vport->fc_ns_retry++; + + /* CT command is being retried */ + rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, + vport->fc_ns_retry, GID_PT_N_PORT); + if (rc == 0) + goto out; + else { /* Unable to send NS cmd */ + if (vport->gidft_inp) + vport->gidft_inp--; + } + } + if (vport->fc_flag & FC_RSCN_MODE) + lpfc_els_flush_rscn(vport); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "4103 GID_FT Query error: 0x%x 0x%x\n", + ulp_status, vport->fc_ns_retry); + } else { + /* Good status, continue checking */ + CTreq = (struct lpfc_sli_ct_request *)inp->virt; + CTrsp = (struct lpfc_sli_ct_request *)outp->virt; + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "4105 NameServer Rsp Data: x%x x%x " + "x%x x%x sz x%x\n", + vport->fc_flag, + CTreq->un.gid.Fc4Type, + vport->num_disc_nodes, + vport->gidft_inp, + get_job_data_placed(phba, rspiocb)); + + lpfc_ns_rsp(vport, + outp, + CTreq->un.gid.Fc4Type, + get_job_data_placed(phba, rspiocb)); + } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_RJT) { + /* NameServer Rsp Error */ + if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) + && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { + lpfc_printf_vlog( + vport, KERN_INFO, LOG_DISCOVERY, + "4106 No NameServer Entries " + "Data: x%x x%x x%x x%x\n", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t)CTrsp->ReasonCode, + (uint32_t)CTrsp->Explanation, + vport->fc_flag); + + lpfc_debugfs_disc_trc( + vport, LPFC_DISC_TRC_CT, + "GID_PT no entry cmd:x%x rsn:x%x exp:x%x", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t)CTrsp->ReasonCode, + (uint32_t)CTrsp->Explanation); + } else { + lpfc_printf_vlog( + vport, KERN_INFO, LOG_DISCOVERY, + "4107 NameServer Rsp Error " + "Data: x%x x%x x%x x%x\n", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t)CTrsp->ReasonCode, + (uint32_t)CTrsp->Explanation, + vport->fc_flag); + + lpfc_debugfs_disc_trc( + vport, LPFC_DISC_TRC_CT, + "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t)CTrsp->ReasonCode, + (uint32_t)CTrsp->Explanation); + } + } else { + /* NameServer Rsp Error */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "4109 NameServer Rsp Error " + "Data: x%x x%x x%x x%x\n", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t)CTrsp->ReasonCode, + (uint32_t)CTrsp->Explanation, + vport->fc_flag); + + lpfc_debugfs_disc_trc( + vport, LPFC_DISC_TRC_CT, + "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x", + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + (uint32_t)CTrsp->ReasonCode, + (uint32_t)CTrsp->Explanation); + } + if (vport->gidft_inp) + vport->gidft_inp--; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6450 GID_PT cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + + /* Link up / RSCN discovery */ + if ((vport->num_disc_nodes == 0) && + (vport->gidft_inp == 0)) { + /* + * The driver has cycled through all Nports in the RSCN payload. + * Complete the handling by cleaning up and marking the + * current driver state. + */ + if (vport->port_state >= LPFC_DISC_AUTH) { + if (vport->fc_flag & FC_RSCN_MODE) { + lpfc_els_flush_rscn(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ + spin_unlock_irq(shost->host_lock); + } else { + lpfc_els_flush_rscn(vport); + } + } + + lpfc_disc_start(vport); + } +out: + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +static void +lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; + struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; + struct lpfc_sli_ct_request *CTrsp; + int did, rc, retry; + uint8_t fbits; + struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId; + did = be32_to_cpu(did); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GFF_ID cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + + /* Ignore response if link flipped after this request was made */ + if (cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9045 Event tag mismatch. Ignoring NS rsp\n"); + goto iocb_free; + } + + if (ulp_status == IOSTAT_SUCCESS) { + /* Good status, continue checking */ + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET]; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6431 Process GFF_ID rsp for %08x " + "fbits %02x %s %s\n", + did, fbits, + (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ", + (fbits & FC4_FEATURE_TARGET) ? "Target" : " "); + + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) { + if ((fbits & FC4_FEATURE_INIT) && + !(fbits & FC4_FEATURE_TARGET)) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "0270 Skip x%x GFF " + "NameServer Rsp Data: (init) " + "x%x x%x\n", did, fbits, + vport->fc_rscn_id_cnt); + goto out; + } + } + } + else { + /* Check for retry */ + if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { + retry = 1; + if (ulp_status == IOSTAT_LOCAL_REJECT) { + switch ((ulp_word4 & + IOERR_PARAM_MASK)) { + + case IOERR_NO_RESOURCES: + /* We don't increment the retry + * count for this case. + */ + break; + case IOERR_LINK_DOWN: + case IOERR_SLI_ABORTED: + case IOERR_SLI_DOWN: + retry = 0; + break; + default: + cmdiocb->retry++; + } + } + else + cmdiocb->retry++; + + if (retry) { + /* CT command is being retried */ + rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, + cmdiocb->retry, did); + if (rc == 0) { + /* success */ + free_ndlp = cmdiocb->ndlp; + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); + return; + } + } + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0267 NameServer GFF Rsp " + "x%x Error (%d %d) Data: x%x x%x\n", + did, ulp_status, ulp_word4, + vport->fc_flag, vport->fc_rscn_id_cnt); + } + + /* This is a target port, unregistered port, or the GFF_ID failed */ + ndlp = lpfc_setup_disc_node(vport, did); + if (ndlp) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0242 Process x%x GFF " + "NameServer Rsp Data: x%x x%x x%x\n", + did, ndlp->nlp_flag, vport->fc_flag, + vport->fc_rscn_id_cnt); + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0243 Skip x%x GFF " + "NameServer Rsp Data: x%x x%x\n", did, + vport->fc_flag, vport->fc_rscn_id_cnt); + } +out: + /* Link up / RSCN discovery */ + if (vport->num_disc_nodes) + vport->num_disc_nodes--; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6451 GFF_ID cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + + if (vport->num_disc_nodes == 0) { + /* + * The driver has cycled through all Nports in the RSCN payload. + * Complete the handling by cleaning up and marking the + * current driver state. + */ + if (vport->port_state >= LPFC_DISC_AUTH) { + if (vport->fc_flag & FC_RSCN_MODE) { + lpfc_els_flush_rscn(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ + spin_unlock_irq(shost->host_lock); + } + else + lpfc_els_flush_rscn(vport); + } + lpfc_disc_start(vport); + } + +iocb_free: + free_ndlp = cmdiocb->ndlp; + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); + return; +} + +static void +lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; + struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; + struct lpfc_sli_ct_request *CTrsp; + int did; + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp; + uint32_t fc4_data_0, fc4_data_1; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId; + did = be32_to_cpu(did); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GFT_ID cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + + /* Ignore response if link flipped after this request was made */ + if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9046 Event tag mismatch. Ignoring NS rsp\n"); + goto out; + } + + if (ulp_status == IOSTAT_SUCCESS) { + /* Good status, continue checking */ + CTrsp = (struct lpfc_sli_ct_request *)outp->virt; + fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); + fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6432 Process GFT_ID rsp for %08x " + "Data %08x %08x %s %s\n", + did, fc4_data_0, fc4_data_1, + (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ? + "FCP" : " ", + (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ? + "NVME" : " "); + + /* Lookup the NPort_ID queried in the GFT_ID and find the + * driver's local node. It's an error if the driver + * doesn't have one. + */ + ndlp = lpfc_findnode_did(vport, did); + if (ndlp) { + /* The bitmask value for FCP and NVME FCP types is + * the same because they are 32 bits distant from + * each other in word0 and word0. + */ + if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_NODE, + "3064 Setting ndlp x%px, DID x%06x " + "with FC4 x%08x, Data: x%08x x%08x " + "%d\n", + ndlp, did, ndlp->nlp_fc4_type, + FC_TYPE_FCP, FC_TYPE_NVME, + ndlp->nlp_state); + + if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_fc4_type) { + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + /* This is a fabric topology so if discovery + * started with an unsolicited PLOGI, don't + * send a PRLI. Targets don't issue PLOGI or + * PRLI when acting as a target. Likely this is + * an initiator function. + */ + if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, 0); + } + } else if (!ndlp->nlp_fc4_type) { + /* If fc4 type is still unknown, then LOGO */ + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_NODE, + "6443 Sending LOGO ndlp x%px," + "DID x%06x with fc4_type: " + "x%08x, state: %d\n", + ndlp, did, ndlp->nlp_fc4_type, + ndlp->nlp_state); + lpfc_issue_els_logo(vport, ndlp, 0); + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); + } + } + } else + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "3065 GFT_ID failed x%08x\n", ulp_status); + +out: + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ns_ndlp); +} + +static void +lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_dmabuf *inp; + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + struct lpfc_nodelist *ndlp; + int cmdcode, rc; + uint8_t retry; + uint32_t latt; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + /* First save ndlp, before we overwrite it */ + ndlp = cmdiocb->ndlp; + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + cmdiocb->rsp_iocb = rspiocb; + + inp = cmdiocb->cmd_dmabuf; + outp = cmdiocb->rsp_dmabuf; + + cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)-> + CommandResponse.bits.CmdRsp); + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + + latt = lpfc_els_chk_latt(vport); + + /* RFT request completes status CmdRsp */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0209 CT Request completes, latt %d, " + "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n", + latt, ulp_status, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), + get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "CT cmd cmpl: status:x%x/x%x cmd:x%x", + ulp_status, ulp_word4, cmdcode); + + if (ulp_status) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0268 NS cmd x%x Error (x%x x%x)\n", + cmdcode, ulp_status, ulp_word4); + + if (ulp_status == IOSTAT_LOCAL_REJECT && + (((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_SLI_DOWN) || + ((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_SLI_ABORTED))) + goto out; + + retry = cmdiocb->retry; + if (retry >= LPFC_MAX_NS_RETRY) + goto out; + + retry++; + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0250 Retrying NS cmd %x\n", cmdcode); + rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); + if (rc == 0) + goto out; + } + +out: + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + return; +} + +static void +lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = cmdiocb->rsp_dmabuf; + CTrsp = (struct lpfc_sli_ct_request *)outp->virt; + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) + vport->ct_flags |= FC_CT_RFT_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +static void +lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = cmdiocb->rsp_dmabuf; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) + vport->ct_flags |= FC_CT_RNN_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +static void +lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = cmdiocb->rsp_dmabuf; + CTrsp = (struct lpfc_sli_ct_request *)outp->virt; + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) + vport->ct_flags |= FC_CT_RSPN_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +static void +lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = cmdiocb->rsp_dmabuf; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) + vport->ct_flags |= FC_CT_RSNN_NN; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +static void +lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + + /* even if it fails we will act as though it succeeded. */ + vport->ct_flags = 0; + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +static void +lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = cmdiocb->rsp_dmabuf; + CTrsp = (struct lpfc_sli_ct_request *)outp->virt; + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) + vport->ct_flags |= FC_CT_RFF_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +/* + * Although the symbolic port name is thought to be an integer + * as of January 18, 2016, leave it as a string until more of + * the record state becomes defined. + */ +int +lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, + size_t size) +{ + int n; + + /* + * Use the lpfc board number as the Symbolic Port + * Name object. NPIV is not in play so this integer + * value is sufficient and unique per FC-ID. + */ + n = scnprintf(symbol, size, "%d", vport->phba->brd_no); + return n; +} + + +int +lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, + size_t size) +{ + char fwrev[FW_REV_STR_SIZE] = {0}; + char tmp[MAXHOSTNAMELEN] = {0}; + + memset(symbol, 0, size); + + scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; + + lpfc_decode_firmware_rev(vport->phba, fwrev, 0); + scnprintf(tmp, sizeof(tmp), " FV%s", fwrev); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; + + /* Note :- OS name is "Linux" */ + scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname); + strlcat(symbol, tmp, size); + +buffer_done: + return strnlen(symbol, size); + +} + +static uint32_t +lpfc_find_map_node(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + struct Scsi_Host *shost; + uint32_t cnt = 0; + + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_type & NLP_FABRIC) + continue; + if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) || + (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)) + cnt++; + } + spin_unlock_irq(shost->host_lock); + return cnt; +} + +/* + * This routine will return the FC4 Type associated with the CT + * GID_FT command. + */ +int +lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_sli_ct_request *CtReq; + struct lpfc_dmabuf *mp; + uint32_t type; + + mp = cmdiocb->cmd_dmabuf; + if (mp == NULL) + return 0; + CtReq = (struct lpfc_sli_ct_request *)mp->virt; + type = (uint32_t)CtReq->un.gid.Fc4Type; + if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME)) + return 0; + return type; +} + +/* + * lpfc_ns_cmd + * Description: + * Issue Cmd to NameServer + * SLI_CTNS_GID_FT + * LI_CTNS_RFT_ID + */ +int +lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, + uint8_t retry, uint32_t context) +{ + struct lpfc_nodelist * ndlp; + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *mp, *bmp; + struct lpfc_sli_ct_request *CtReq; + struct ulp_bde64 *bpl; + void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *) = NULL; + uint32_t *ptr; + uint32_t rsp_size = 1024; + size_t size; + int rc = 0; + + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { + rc=1; + goto ns_cmd_exit; + } + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + rc=2; + goto ns_cmd_exit; + } + + INIT_LIST_HEAD(&mp->list); + mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); + if (!mp->virt) { + rc=3; + goto ns_cmd_free_mp; + } + + /* Allocate buffer for Buffer ptr list */ + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc=4; + goto ns_cmd_free_mpvirt; + } + + INIT_LIST_HEAD(&bmp->list); + bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys)); + if (!bmp->virt) { + rc=5; + goto ns_cmd_free_bmp; + } + + /* NameServer Req */ + lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY, + "0236 NameServer Req Data: x%x x%x x%x x%x\n", + cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt, + context); + + bpl = (struct ulp_bde64 *) bmp->virt; + memset(bpl, 0, sizeof(struct ulp_bde64)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); + bpl->tus.f.bdeFlags = 0; + if (cmdcode == SLI_CTNS_GID_FT) + bpl->tus.f.bdeSize = GID_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_GID_PT) + bpl->tus.f.bdeSize = GID_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_GFF_ID) + bpl->tus.f.bdeSize = GFF_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_GFT_ID) + bpl->tus.f.bdeSize = GFT_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RFT_ID) + bpl->tus.f.bdeSize = RFT_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RNN_ID) + bpl->tus.f.bdeSize = RNN_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RSPN_ID) + bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RSNN_NN) + bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_DA_ID) + bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RFF_ID) + bpl->tus.f.bdeSize = RFF_REQUEST_SZ; + else + bpl->tus.f.bdeSize = 0; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + CtReq = (struct lpfc_sli_ct_request *) mp->virt; + memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request)); + CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; + CtReq->RevisionId.bits.InId = 0; + CtReq->FsType = SLI_CT_DIRECTORY_SERVICE; + CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER; + CtReq->CommandResponse.bits.Size = 0; + switch (cmdcode) { + case SLI_CTNS_GID_FT: + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_GID_FT); + CtReq->un.gid.Fc4Type = context; + + if (vport->port_state < LPFC_NS_QRY) + vport->port_state = LPFC_NS_QRY; + lpfc_set_disctmo(vport); + cmpl = lpfc_cmpl_ct_cmd_gid_ft; + rsp_size = FC_MAX_NS_RSP; + break; + + case SLI_CTNS_GID_PT: + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_GID_PT); + CtReq->un.gid.PortType = context; + + if (vport->port_state < LPFC_NS_QRY) + vport->port_state = LPFC_NS_QRY; + lpfc_set_disctmo(vport); + cmpl = lpfc_cmpl_ct_cmd_gid_pt; + rsp_size = FC_MAX_NS_RSP; + break; + + case SLI_CTNS_GFF_ID: + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_GFF_ID); + CtReq->un.gff.PortId = cpu_to_be32(context); + cmpl = lpfc_cmpl_ct_cmd_gff_id; + break; + + case SLI_CTNS_GFT_ID: + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_GFT_ID); + CtReq->un.gft.PortId = cpu_to_be32(context); + cmpl = lpfc_cmpl_ct_cmd_gft_id; + break; + + case SLI_CTNS_RFT_ID: + vport->ct_flags &= ~FC_CT_RFT_ID; + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_RFT_ID); + CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID); + + /* Register Application Services type if vmid enabled. */ + if (phba->cfg_vmid_app_header) + CtReq->un.rft.app_serv_reg = + cpu_to_be32(RFT_APP_SERV_REG); + + /* Register FC4 FCP type if enabled. */ + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP) + CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG); + + /* Register NVME type if enabled. */ + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG); + + ptr = (uint32_t *)CtReq; + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6433 Issue RFT (%s %s %s): %08x %08x %08x " + "%08x %08x %08x %08x %08x\n", + CtReq->un.rft.fcp_reg ? "FCP" : " ", + CtReq->un.rft.nvme_reg ? "NVME" : " ", + CtReq->un.rft.app_serv_reg ? "APPS" : " ", + *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), + *(ptr + 4), *(ptr + 5), + *(ptr + 6), *(ptr + 7)); + cmpl = lpfc_cmpl_ct_cmd_rft_id; + break; + + case SLI_CTNS_RNN_ID: + vport->ct_flags &= ~FC_CT_RNN_ID; + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_RNN_ID); + CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); + memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, + sizeof(struct lpfc_name)); + cmpl = lpfc_cmpl_ct_cmd_rnn_id; + break; + + case SLI_CTNS_RSPN_ID: + vport->ct_flags &= ~FC_CT_RSPN_ID; + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_RSPN_ID); + CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID); + size = sizeof(CtReq->un.rspn.symbname); + CtReq->un.rspn.len = + lpfc_vport_symbolic_port_name(vport, + CtReq->un.rspn.symbname, size); + cmpl = lpfc_cmpl_ct_cmd_rspn_id; + break; + case SLI_CTNS_RSNN_NN: + vport->ct_flags &= ~FC_CT_RSNN_NN; + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_RSNN_NN); + memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, + sizeof(struct lpfc_name)); + size = sizeof(CtReq->un.rsnn.symbname); + CtReq->un.rsnn.len = + lpfc_vport_symbolic_node_name(vport, + CtReq->un.rsnn.symbname, size); + cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; + break; + case SLI_CTNS_DA_ID: + /* Implement DA_ID Nameserver request */ + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_DA_ID); + CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID); + cmpl = lpfc_cmpl_ct_cmd_da_id; + break; + case SLI_CTNS_RFF_ID: + vport->ct_flags &= ~FC_CT_RFF_ID; + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_RFF_ID); + CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); + CtReq->un.rff.fbits = FC4_FEATURE_INIT; + + /* The driver always supports FC_TYPE_FCP. However, the + * caller can specify NVME (type x28) as well. But only + * these that FC4 type is supported. + */ + if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && + (context == FC_TYPE_NVME)) { + if ((vport == phba->pport) && phba->nvmet_support) { + CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | + FC4_FEATURE_NVME_DISC); + lpfc_nvmet_update_targetport(phba); + } else { + lpfc_nvme_update_localport(vport); + } + CtReq->un.rff.type_code = context; + + } else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && + (context == FC_TYPE_FCP)) + CtReq->un.rff.type_code = context; + + else + goto ns_cmd_free_bmpvirt; + + ptr = (uint32_t *)CtReq; + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6434 Issue RFF (%s): %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", + (context == FC_TYPE_NVME) ? "NVME" : "FCP", + *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), + *(ptr + 4), *(ptr + 5), + *(ptr + 6), *(ptr + 7)); + cmpl = lpfc_cmpl_ct_cmd_rff_id; + break; + } + /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count + * to hold ndlp reference for the corresponding callback function. + */ + if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { + /* On success, The cmpl function will free the buffers */ + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Issue CT cmd: cmd:x%x did:x%x", + cmdcode, ndlp->nlp_DID, 0); + return 0; + } + rc=6; + +ns_cmd_free_bmpvirt: + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); +ns_cmd_free_bmp: + kfree(bmp); +ns_cmd_free_mpvirt: + lpfc_mbuf_free(phba, mp->virt, mp->phys); +ns_cmd_free_mp: + kfree(mp); +ns_cmd_exit: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n", + cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt); + return 1; +} + +/** + * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands + * @phba: Pointer to HBA context object. + * @mask: Initial port attributes mask + * + * This function checks to see if any vports have deferred their FDMI RPRT. + * A vports RPRT may be deferred if it is issued before the primary ports + * RHBA completes. + */ +static void +lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask) +{ + struct lpfc_vport **vports; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + int i; + + phba->hba_flag |= HBA_RHBA_CMPL; + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + vport = vports[i]; + ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); + if (!ndlp) + continue; + if (vport->ct_flags & FC_CT_RPRT_DEFER) { + vport->ct_flags &= ~FC_CT_RPRT_DEFER; + vport->fdmi_port_mask = mask; + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); + } + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to the command IOCBQ. + * @rspiocb: Pointer to the response IOCBQ. + * + * This function to handle the completion of a driver initiated FDMI + * CT command issued during discovery. + */ +static void +lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; + struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; + struct lpfc_sli_ct_request *CTcmd = inp->virt; + struct lpfc_sli_ct_request *CTrsp = outp->virt; + __be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; + __be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; + struct lpfc_nodelist *ndlp, *free_ndlp = NULL; + uint32_t latt, cmd, err; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + latt = lpfc_els_chk_latt(vport); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "FDMI cmpl: status:x%x/x%x latt:%d", + ulp_status, ulp_word4, latt); + + if (latt || ulp_status) { + + /* Look for a retryable error */ + if (ulp_status == IOSTAT_LOCAL_REJECT) { + switch ((ulp_word4 & IOERR_PARAM_MASK)) { + case IOERR_SLI_ABORTED: + case IOERR_SLI_DOWN: + /* Driver aborted this IO. No retry as error + * is likely Offline->Online or some adapter + * error. Recovery will try again. + */ + break; + case IOERR_ABORT_IN_PROGRESS: + case IOERR_SEQUENCE_TIMEOUT: + case IOERR_ILLEGAL_FRAME: + case IOERR_NO_RESOURCES: + case IOERR_ILLEGAL_COMMAND: + cmdiocb->retry++; + if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY) + break; + + /* Retry the same FDMI command */ + err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, + cmdiocb, 0); + if (err == IOCB_ERROR) + break; + return; + default: + break; + } + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0229 FDMI cmd %04x failed, latt = %d " + "ulp_status: x%x, rid x%x\n", + be16_to_cpu(fdmi_cmd), latt, ulp_status, + ulp_word4); + } + + free_ndlp = cmdiocb->ndlp; + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); + + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp) + return; + + /* Check for a CT LS_RJT response */ + cmd = be16_to_cpu(fdmi_cmd); + if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { + /* FDMI rsp failed */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, + "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); + + /* Should we fallback to FDMI-2 / FDMI-1 ? */ + switch (cmd) { + case SLI_MGMT_RHBA: + if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) { + /* Fallback to FDMI-1 for HBA attributes */ + vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; + + /* If HBA attributes are FDMI1, so should + * port attributes be for consistency. + */ + vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; + /* Start over */ + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + } + return; + + case SLI_MGMT_RPRT: + if (vport->port_type != LPFC_PHYSICAL_PORT) { + ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); + if (!ndlp) + return; + } + if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { + /* Fallback to FDMI-1 */ + vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; + /* Start over */ + lpfc_fdmi_cmd(vport, ndlp, cmd, 0); + return; + } + if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + /* Retry the same command */ + lpfc_fdmi_cmd(vport, ndlp, cmd, 0); + } + return; + + case SLI_MGMT_RPA: + /* No retry on Vendor, RPA only done on physical port */ + if (phba->link_flag & LS_CT_VEN_RPA) { + phba->link_flag &= ~LS_CT_VEN_RPA; + if (phba->cmf_active_mode == LPFC_CFG_OFF) + return; + lpfc_printf_log(phba, KERN_WARNING, + LOG_DISCOVERY | LOG_ELS, + "6460 VEN FDMI RPA RJT\n"); + return; + } + if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { + /* Fallback to FDMI-1 */ + vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; + vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; + /* Start over */ + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + return; + } + if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + /* Retry the same command */ + lpfc_fdmi_cmd(vport, ndlp, cmd, 0); + } + return; + } + } + + /* + * On success, need to cycle thru FDMI registration for discovery + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) + */ + switch (cmd) { + case SLI_MGMT_RHBA: + /* Check for any RPRTs deferred till after RHBA completes */ + lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask); + + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0); + break; + + case SLI_MGMT_DHBA: + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + break; + + case SLI_MGMT_DPRT: + if (vport->port_type == LPFC_PHYSICAL_PORT) { + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0); + } else { + ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); + if (!ndlp) + return; + + /* Only issue a RPRT for the vport if the RHBA + * for the physical port completes successfully. + * We may have to defer the RPRT accordingly. + */ + if (phba->hba_flag & HBA_RHBA_CMPL) { + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); + } else { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "6078 RPRT deferred\n"); + vport->ct_flags |= FC_CT_RPRT_DEFER; + } + } + break; + case SLI_MGMT_RPA: + if (vport->port_type == LPFC_PHYSICAL_PORT && + phba->sli4_hba.pc_sli4_params.mi_ver) { + /* mi is only for the phyical port, no vports */ + if (phba->link_flag & LS_CT_VEN_RPA) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_ELS | + LOG_CGN_MGMT, + "6449 VEN RPA FDMI Success\n"); + phba->link_flag &= ~LS_CT_VEN_RPA; + break; + } + + lpfc_printf_log(phba, KERN_INFO, + LOG_DISCOVERY | LOG_CGN_MGMT, + "6210 Issue Vendor MI FDMI %x\n", + phba->sli4_hba.pc_sli4_params.mi_ver); + + /* CGN is only for the physical port, no vports */ + if (lpfc_fdmi_cmd(vport, ndlp, cmd, + LPFC_FDMI_VENDOR_ATTR_mi) == 0) + phba->link_flag |= LS_CT_VEN_RPA; + lpfc_printf_log(phba, KERN_INFO, + LOG_DISCOVERY | LOG_ELS, + "6458 Send MI FDMI:%x Flag x%x\n", + phba->sli4_hba.pc_sli4_params.mi_ver, + phba->link_flag); + } else { + lpfc_printf_log(phba, KERN_INFO, + LOG_DISCOVERY | LOG_ELS, + "6459 No FDMI VEN MI support - " + "RPA Success\n"); + } + break; + } + return; +} + + +/** + * lpfc_fdmi_change_check - Check for changed FDMI parameters + * @vport: pointer to a host virtual N_Port data structure. + * + * Check how many mapped NPorts we are connected to + * Check if our hostname changed + * Called from hbeat timeout routine to check if any FDMI parameters + * changed. If so, re-register those Attributes. + */ +void +lpfc_fdmi_change_check(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + uint16_t cnt; + + if (!lpfc_is_link_up(phba)) + return; + + /* Must be connected to a Fabric */ + if (!(vport->fc_flag & FC_FABRIC)) + return; + + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp) + return; + + /* Check if system hostname changed */ + if (strcmp(phba->os_host_name, init_utsname()->nodename)) { + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); + lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); + + /* Since this effects multiple HBA and PORT attributes, we need + * de-register and go thru the whole FDMI registration cycle. + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) + */ + if (vport->port_type == LPFC_PHYSICAL_PORT) { + /* For extra Vendor RPA */ + phba->link_flag &= ~LS_CT_VEN_RPA; + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + } else { + ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); + if (!ndlp) + return; + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + } + + /* Since this code path registers all the port attributes + * we can just return without further checking. + */ + return; + } + + if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) + return; + + /* Check if the number of mapped NPorts changed */ + cnt = lpfc_find_map_node(vport); + if (cnt == vport->fdmi_num_disc) + return; + + if (vport->port_type == LPFC_PHYSICAL_PORT) { + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, + LPFC_FDMI_PORT_ATTR_num_disc); + } else { + ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); + if (!ndlp) + return; + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, + LPFC_FDMI_PORT_ATTR_num_disc); + } +} + +static inline int +lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval) +{ + struct lpfc_fdmi_attr_u32 *ae = attr; + int size = sizeof(*ae); + + ae->type = cpu_to_be16(attrtype); + ae->len = cpu_to_be16(size); + ae->value_u32 = cpu_to_be32(attrval); + + return size; +} + +static inline int +lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn) +{ + struct lpfc_fdmi_attr_wwn *ae = attr; + int size = sizeof(*ae); + + ae->type = cpu_to_be16(attrtype); + ae->len = cpu_to_be16(size); + /* WWN's assumed to be bytestreams - Big Endian presentation */ + memcpy(ae->name, wwn, + min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); + + return size; +} + +static inline int +lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype, + struct lpfc_name *wwnn, struct lpfc_name *wwpn) +{ + struct lpfc_fdmi_attr_fullwwn *ae = attr; + u8 *nname = ae->nname; + u8 *pname = ae->pname; + int size = sizeof(*ae); + + ae->type = cpu_to_be16(attrtype); + ae->len = cpu_to_be16(size); + /* WWN's assumed to be bytestreams - Big Endian presentation */ + memcpy(nname, wwnn, + min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); + memcpy(pname, wwpn, + min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); + + return size; +} + +static inline int +lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring) +{ + struct lpfc_fdmi_attr_string *ae = attr; + int len, size; + + /* + * We are trusting the caller that if a fdmi string field + * is capped at 64 bytes, the caller passes in a string of + * 64 bytes or less. + */ + + strncpy(ae->value_string, attrstring, sizeof(ae->value_string)); + len = strnlen(ae->value_string, sizeof(ae->value_string)); + /* round string length to a 32bit boundary. Ensure there's a NULL */ + len += (len & 3) ? (4 - (len & 3)) : 4; + /* size is Type/Len (4 bytes) plus string length */ + size = FOURBYTES + len; + + ae->type = cpu_to_be16(attrtype); + ae->len = cpu_to_be16(size); + + return size; +} + +/* Bitfields for FC4 Types that can be reported */ +#define ATTR_FC4_CT 0x00000001 +#define ATTR_FC4_FCP 0x00000002 +#define ATTR_FC4_NVME 0x00000004 + +static inline int +lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask) +{ + struct lpfc_fdmi_attr_fc4types *ae = attr; + int size = sizeof(*ae); + + ae->type = cpu_to_be16(attrtype); + ae->len = cpu_to_be16(size); + + if (typemask & ATTR_FC4_FCP) + ae->value_types[2] = 0x01; /* Type 0x8 - FCP */ + + if (typemask & ATTR_FC4_CT) + ae->value_types[7] = 0x01; /* Type 0x20 - CT */ + + if (typemask & ATTR_FC4_NVME) + ae->value_types[6] = 0x01; /* Type 0x28 - NVME */ + + return size; +} + +/* Routines for all individual HBA attributes */ +static int +lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME, + &vport->fc_sparam.nodeName); +} + +static int +lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr) +{ + /* This string MUST be consistent with other FC platforms + * supported by Broadcom. + */ + return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER, + "Emulex Corporation"); +} + +static int +lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + + return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER, + phba->SerialNumber); +} + +static int +lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + + return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL, + phba->ModelName); +} + +static int +lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + + return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION, + phba->ModelDesc); +} + +static int +lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + lpfc_vpd_t *vp = &phba->vpd; + char buf[16] = { 0 }; + + snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev); + + return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf); +} + +static int +lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION, + lpfc_release_version); +} + +static int +lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + char buf[64] = { 0 }; + + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_decode_firmware_rev(phba, buf, 1); + + return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, + buf); + } + + return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, + phba->OptionROMVersion); +} + +static int +lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + char buf[64] = { 0 }; + + lpfc_decode_firmware_rev(phba, buf, 1); + + return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf); +} + +static int +lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr) +{ + char buf[256] = { 0 }; + + snprintf(buf, sizeof(buf), "%s %s %s", + init_utsname()->sysname, + init_utsname()->release, + init_utsname()->version); + + return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf); +} + +static int +lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN, + LPFC_MAX_CT_SIZE); +} + +static int +lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr) +{ + char buf[256] = { 0 }; + + lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf)); + + return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf); +} + +static int +lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0); +} + +static int +lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr) +{ + /* Each driver instance corresponds to a single port */ + return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1); +} + +static int +lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN, + &vport->fabric_nodename); +} + +static int +lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + + return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION, + phba->BIOSVersion); +} + +static int +lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr) +{ + /* Driver doesn't have access to this information */ + return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0); +} + +static int +lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX"); +} + +/* + * Routines for all individual PORT attributes + */ + +static int +lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + u32 fc4types; + + fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); + + /* Check to see if Firmware supports NVME and on physical port */ + if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) && + phba->sli4_hba.pc_sli4_params.nvme) + fc4types |= ATTR_FC4_NVME; + + return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES, + fc4types); +} + +static int +lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + u32 speeds = 0; + u32 tcfg; + u8 i, cnt; + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + cnt = 0; + if (phba->sli_rev == LPFC_SLI_REV4) { + tcfg = phba->sli4_hba.conf_trunk; + for (i = 0; i < 4; i++, tcfg >>= 1) + if (tcfg & 1) + cnt++; + } + + if (cnt > 2) { /* 4 lane trunk group */ + if (phba->lmt & LMT_64Gb) + speeds |= HBA_PORTSPEED_256GFC; + if (phba->lmt & LMT_32Gb) + speeds |= HBA_PORTSPEED_128GFC; + if (phba->lmt & LMT_16Gb) + speeds |= HBA_PORTSPEED_64GFC; + } else if (cnt) { /* 2 lane trunk group */ + if (phba->lmt & LMT_128Gb) + speeds |= HBA_PORTSPEED_256GFC; + if (phba->lmt & LMT_64Gb) + speeds |= HBA_PORTSPEED_128GFC; + if (phba->lmt & LMT_32Gb) + speeds |= HBA_PORTSPEED_64GFC; + if (phba->lmt & LMT_16Gb) + speeds |= HBA_PORTSPEED_32GFC; + } else { + if (phba->lmt & LMT_256Gb) + speeds |= HBA_PORTSPEED_256GFC; + if (phba->lmt & LMT_128Gb) + speeds |= HBA_PORTSPEED_128GFC; + if (phba->lmt & LMT_64Gb) + speeds |= HBA_PORTSPEED_64GFC; + if (phba->lmt & LMT_32Gb) + speeds |= HBA_PORTSPEED_32GFC; + if (phba->lmt & LMT_16Gb) + speeds |= HBA_PORTSPEED_16GFC; + if (phba->lmt & LMT_10Gb) + speeds |= HBA_PORTSPEED_10GFC; + if (phba->lmt & LMT_8Gb) + speeds |= HBA_PORTSPEED_8GFC; + if (phba->lmt & LMT_4Gb) + speeds |= HBA_PORTSPEED_4GFC; + if (phba->lmt & LMT_2Gb) + speeds |= HBA_PORTSPEED_2GFC; + if (phba->lmt & LMT_1Gb) + speeds |= HBA_PORTSPEED_1GFC; + } + } else { + /* FCoE links support only one speed */ + switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_10GBPS: + speeds = HBA_PORTSPEED_10GE; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + speeds = HBA_PORTSPEED_25GE; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + speeds = HBA_PORTSPEED_40GE; + break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + speeds = HBA_PORTSPEED_100GE; + break; + } + } + + return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds); +} + +static int +lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + u32 speeds = 0; + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + switch (phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: + speeds = HBA_PORTSPEED_1GFC; + break; + case LPFC_LINK_SPEED_2GHZ: + speeds = HBA_PORTSPEED_2GFC; + break; + case LPFC_LINK_SPEED_4GHZ: + speeds = HBA_PORTSPEED_4GFC; + break; + case LPFC_LINK_SPEED_8GHZ: + speeds = HBA_PORTSPEED_8GFC; + break; + case LPFC_LINK_SPEED_10GHZ: + speeds = HBA_PORTSPEED_10GFC; + break; + case LPFC_LINK_SPEED_16GHZ: + speeds = HBA_PORTSPEED_16GFC; + break; + case LPFC_LINK_SPEED_32GHZ: + speeds = HBA_PORTSPEED_32GFC; + break; + case LPFC_LINK_SPEED_64GHZ: + speeds = HBA_PORTSPEED_64GFC; + break; + case LPFC_LINK_SPEED_128GHZ: + speeds = HBA_PORTSPEED_128GFC; + break; + case LPFC_LINK_SPEED_256GHZ: + speeds = HBA_PORTSPEED_256GFC; + break; + default: + speeds = HBA_PORTSPEED_UNKNOWN; + break; + } + } else { + switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_10GBPS: + speeds = HBA_PORTSPEED_10GE; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + speeds = HBA_PORTSPEED_25GE; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + speeds = HBA_PORTSPEED_40GE; + break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + speeds = HBA_PORTSPEED_100GE; + break; + default: + speeds = HBA_PORTSPEED_UNKNOWN; + break; + } + } + + return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds); +} + +static int +lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr) +{ + struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam; + + return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE, + (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | + (uint32_t)hsp->cmn.bbRcvSizeLsb); +} + +static int +lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + char buf[64] = { 0 }; + + snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d", + shost->host_no); + + return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf); +} + +static int +lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr) +{ + char buf[64] = { 0 }; + + scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name); + + return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf); +} + +static int +lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME, + &vport->fc_sparam.nodeName); +} + +static int +lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME, + &vport->fc_sparam.portName); +} + +static int +lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr) +{ + char buf[256] = { 0 }; + + lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf)); + + return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf); +} + +static int +lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + + return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE, + (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ? + LPFC_FDMI_PORTTYPE_NLPORT : + LPFC_FDMI_PORTTYPE_NPORT); +} + +static int +lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS, + FC_COS_CLASS2 | FC_COS_CLASS3); +} + +static int +lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME, + &vport->fabric_portname); +} + +static int +lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + u32 fc4types; + + fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); + + /* Check to see if NVME is configured or not */ + if (vport == phba->pport && + phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + fc4types |= ATTR_FC4_NVME; + + return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES, + fc4types); +} + +static int +lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE, + LPFC_FDMI_PORTSTATE_ONLINE); +} + +static int +lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr) +{ + vport->fdmi_num_disc = lpfc_find_map_node(vport); + + return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT, + vport->fdmi_num_disc); +} + +static int +lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID); +} + +static int +lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE, + "Smart SAN Initiator"); +} + +static int +lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID, + &vport->fc_sparam.nodeName, + &vport->fc_sparam.portName); +} + +static int +lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION, + "Smart SAN Version 2.0"); +} + +static int +lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + + return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL, + phba->ModelName); +} + +static int +lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr) +{ + /* SRIOV (type 3) is not supported */ + + return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO, + (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */); +} + +static int +lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0); +} + +static int +lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr) +{ + return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1); +} + +static int +lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr) +{ + struct lpfc_hba *phba = vport->phba; + char buf[32] = { 0 }; + + sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver); + + return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf); +} + +/* RHBA attribute jump table */ +static int (*lpfc_fdmi_hba_action[]) + (struct lpfc_vport *vport, void *attrbuf) = { + /* Action routine Mask bit Attribute type */ + lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */ + lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */ + lpfc_fdmi_hba_attr_sn, /* bit2 RHBA_SERIAL_NUMBER */ + lpfc_fdmi_hba_attr_model, /* bit3 RHBA_MODEL */ + lpfc_fdmi_hba_attr_description, /* bit4 RHBA_MODEL_DESCRIPTION */ + lpfc_fdmi_hba_attr_hdw_ver, /* bit5 RHBA_HARDWARE_VERSION */ + lpfc_fdmi_hba_attr_drvr_ver, /* bit6 RHBA_DRIVER_VERSION */ + lpfc_fdmi_hba_attr_rom_ver, /* bit7 RHBA_OPTION_ROM_VERSION */ + lpfc_fdmi_hba_attr_fmw_ver, /* bit8 RHBA_FIRMWARE_VERSION */ + lpfc_fdmi_hba_attr_os_ver, /* bit9 RHBA_OS_NAME_VERSION */ + lpfc_fdmi_hba_attr_ct_len, /* bit10 RHBA_MAX_CT_PAYLOAD_LEN */ + lpfc_fdmi_hba_attr_symbolic_name, /* bit11 RHBA_SYM_NODENAME */ + lpfc_fdmi_hba_attr_vendor_info, /* bit12 RHBA_VENDOR_INFO */ + lpfc_fdmi_hba_attr_num_ports, /* bit13 RHBA_NUM_PORTS */ + lpfc_fdmi_hba_attr_fabric_wwnn, /* bit14 RHBA_FABRIC_WWNN */ + lpfc_fdmi_hba_attr_bios_ver, /* bit15 RHBA_BIOS_VERSION */ + lpfc_fdmi_hba_attr_bios_state, /* bit16 RHBA_BIOS_STATE */ + lpfc_fdmi_hba_attr_vendor_id, /* bit17 RHBA_VENDOR_ID */ +}; + +/* RPA / RPRT attribute jump table */ +static int (*lpfc_fdmi_port_action[]) + (struct lpfc_vport *vport, void *attrbuf) = { + /* Action routine Mask bit Attribute type */ + lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */ + lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */ + lpfc_fdmi_port_attr_speed, /* bit2 RPRT_PORT_SPEED */ + lpfc_fdmi_port_attr_max_frame, /* bit3 RPRT_MAX_FRAME_SIZE */ + lpfc_fdmi_port_attr_os_devname, /* bit4 RPRT_OS_DEVICE_NAME */ + lpfc_fdmi_port_attr_host_name, /* bit5 RPRT_HOST_NAME */ + lpfc_fdmi_port_attr_wwnn, /* bit6 RPRT_NODENAME */ + lpfc_fdmi_port_attr_wwpn, /* bit7 RPRT_PORTNAME */ + lpfc_fdmi_port_attr_symbolic_name, /* bit8 RPRT_SYM_PORTNAME */ + lpfc_fdmi_port_attr_port_type, /* bit9 RPRT_PORT_TYPE */ + lpfc_fdmi_port_attr_class, /* bit10 RPRT_SUPPORTED_CLASS */ + lpfc_fdmi_port_attr_fabric_wwpn, /* bit11 RPRT_FABRICNAME */ + lpfc_fdmi_port_attr_active_fc4type, /* bit12 RPRT_ACTIVE_FC4_TYPES */ + lpfc_fdmi_port_attr_port_state, /* bit13 RPRT_PORT_STATE */ + lpfc_fdmi_port_attr_num_disc, /* bit14 RPRT_DISC_PORT */ + lpfc_fdmi_port_attr_nportid, /* bit15 RPRT_PORT_ID */ + lpfc_fdmi_smart_attr_service, /* bit16 RPRT_SMART_SERVICE */ + lpfc_fdmi_smart_attr_guid, /* bit17 RPRT_SMART_GUID */ + lpfc_fdmi_smart_attr_version, /* bit18 RPRT_SMART_VERSION */ + lpfc_fdmi_smart_attr_model, /* bit19 RPRT_SMART_MODEL */ + lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */ + lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */ + lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */ + lpfc_fdmi_vendor_attr_mi, /* bit23 RPRT_VENDOR_MI */ +}; + +/** + * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) + * @cmdcode: FDMI command to send + * @new_mask: Mask of HBA or PORT Attributes to send + * + * Builds and sends a FDMI command using the CT subsystem. + */ +int +lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int cmdcode, uint32_t new_mask) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *rq, *rsp; + struct lpfc_sli_ct_request *CtReq; + struct ulp_bde64_le *bde; + uint32_t bit_pos; + uint32_t size, addsz; + uint32_t rsp_size; + uint32_t mask; + struct lpfc_fdmi_reg_hba *rh; + struct lpfc_fdmi_port_entry *pe; + struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL; + struct lpfc_fdmi_attr_block *ab = NULL; + int (*func)(struct lpfc_vport *vport, void *attrbuf); + void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); + + if (!ndlp) + return 0; + + cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + rq = kmalloc(sizeof(*rq), GFP_KERNEL); + if (!rq) + goto fdmi_cmd_exit; + + rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys); + if (!rq->virt) + goto fdmi_cmd_free_rq; + + /* Allocate buffer for Buffer ptr list */ + rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); + if (!rsp) + goto fdmi_cmd_free_rqvirt; + + rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys); + if (!rsp->virt) + goto fdmi_cmd_free_rsp; + + INIT_LIST_HEAD(&rq->list); + INIT_LIST_HEAD(&rsp->list); + + /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */ + memset(rq->virt, 0, LPFC_BPL_SIZE); + rsp_size = LPFC_BPL_SIZE; + + /* FDMI request */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n", + cmdcode, new_mask, vport->fdmi_port_mask, + vport->fc_flag, vport->port_state); + + CtReq = (struct lpfc_sli_ct_request *)rq->virt; + + /* First populate the CT_IU preamble */ + CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; + CtReq->RevisionId.bits.InId = 0; + + CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE; + CtReq->FsSubType = SLI_CT_FDMI_Subtypes; + + CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); + + size = 0; + + /* Next fill in the specific FDMI cmd information */ + switch (cmdcode) { + case SLI_MGMT_RHAT: + case SLI_MGMT_RHBA: + rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un; + /* HBA Identifier */ + memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size += sizeof(struct lpfc_fdmi_hba_ident); + + if (cmdcode == SLI_MGMT_RHBA) { + /* Registered Port List */ + /* One entry (port) per adapter */ + rh->rpl.EntryCnt = cpu_to_be32(1); + memcpy(&rh->rpl.pe.PortName, + &phba->pport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size += sizeof(struct lpfc_fdmi_reg_port_list); + } + + ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size); + ab->EntryCnt = 0; + size += FOURBYTES; /* add length of EntryCnt field */ + + bit_pos = 0; + if (new_mask) + mask = new_mask; + else + mask = vport->fdmi_hba_mask; + + /* Mask will dictate what attributes to build in the request */ + while (mask) { + if (mask & 0x1) { + func = lpfc_fdmi_hba_action[bit_pos]; + addsz = func(vport, ((uint8_t *)rh + size)); + if (addsz) { + ab->EntryCnt++; + size += addsz; + } + /* check if another attribute fits */ + if ((size + FDMI_MAX_ATTRLEN) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; + } + mask = mask >> 1; + bit_pos++; + } +hba_out: + ab->EntryCnt = cpu_to_be32(ab->EntryCnt); + /* Total size */ + size += GID_REQUEST_SZ - 4; + break; + + case SLI_MGMT_RPRT: + if (vport->port_type != LPFC_PHYSICAL_PORT) { + ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); + if (!ndlp) + return 0; + } + fallthrough; + case SLI_MGMT_RPA: + /* Store base ptr right after preamble */ + base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un; + + if (cmdcode == SLI_MGMT_RPRT) { + rh = (struct lpfc_fdmi_reg_hba *)base; + /* HBA Identifier */ + memcpy(&rh->hi.PortName, + &phba->pport->fc_sparam.portName, + sizeof(struct lpfc_name)); + pab = (struct lpfc_fdmi_reg_portattr *) + ((uint8_t *)base + sizeof(struct lpfc_name)); + size += sizeof(struct lpfc_name); + } else { + pab = base; + } + + memcpy((uint8_t *)&pab->PortName, + (uint8_t *)&vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + pab->ab.EntryCnt = 0; + /* add length of name and EntryCnt field */ + size += sizeof(struct lpfc_name) + FOURBYTES; + + bit_pos = 0; + if (new_mask) + mask = new_mask; + else + mask = vport->fdmi_port_mask; + + /* Mask will dictate what attributes to build in the request */ + while (mask) { + if (mask & 0x1) { + func = lpfc_fdmi_port_action[bit_pos]; + addsz = func(vport, ((uint8_t *)base + size)); + if (addsz) { + pab->ab.EntryCnt++; + size += addsz; + } + /* check if another attribute fits */ + if ((size + FDMI_MAX_ATTRLEN) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + } + mask = mask >> 1; + bit_pos++; + } +port_out: + pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt); + size += GID_REQUEST_SZ - 4; + break; + + case SLI_MGMT_GHAT: + case SLI_MGMT_GRPL: + rsp_size = FC_MAX_NS_RSP; + fallthrough; + case SLI_MGMT_DHBA: + case SLI_MGMT_DHAT: + pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; + memcpy((uint8_t *)&pe->PortName, + (uint8_t *)&vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); + break; + + case SLI_MGMT_GPAT: + case SLI_MGMT_GPAS: + rsp_size = FC_MAX_NS_RSP; + fallthrough; + case SLI_MGMT_DPRT: + if (vport->port_type != LPFC_PHYSICAL_PORT) { + ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); + if (!ndlp) + return 0; + } + fallthrough; + case SLI_MGMT_DPA: + pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; + memcpy((uint8_t *)&pe->PortName, + (uint8_t *)&vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); + break; + case SLI_MGMT_GRHL: + size = GID_REQUEST_SZ - 4; + break; + default: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, + "0298 FDMI cmdcode x%x not supported\n", + cmdcode); + goto fdmi_cmd_free_rspvirt; + } + CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); + + bde = (struct ulp_bde64_le *)rsp->virt; + bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys)); + bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys)); + bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 << + ULP_BDE64_TYPE_SHIFT); + bde->type_size |= cpu_to_le32(size); + + /* + * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count + * to hold ndlp reference for the corresponding callback function. + */ + if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0)) + return 0; + +fdmi_cmd_free_rspvirt: + lpfc_mbuf_free(phba, rsp->virt, rsp->phys); +fdmi_cmd_free_rsp: + kfree(rsp); +fdmi_cmd_free_rqvirt: + lpfc_mbuf_free(phba, rq->virt, rq->phys); +fdmi_cmd_free_rq: + kfree(rq); +fdmi_cmd_exit: + /* Issue FDMI request failed */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0244 Issue FDMI request failed Data: x%x\n", + cmdcode); + return 1; +} + +/** + * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer. + * @t: Context object of the timer. + * + * This function set the WORKER_DELAYED_DISC_TMO flag and wake up + * the worker thread. + **/ +void +lpfc_delayed_disc_tmo(struct timer_list *t) +{ + struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo); + struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; + unsigned long iflag; + + spin_lock_irqsave(&vport->work_port_lock, iflag); + tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO; + if (!tmo_posted) + vport->work_port_events |= WORKER_DELAYED_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflag); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_delayed_disc_timeout_handler - Function called by worker thread to + * handle delayed discovery. + * @vport: pointer to a host virtual N_Port data structure. + * + * This function start nport discovery of the vport. + **/ +void +lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + if (!(vport->fc_flag & FC_DISC_DELAYED)) { + spin_unlock_irq(shost->host_lock); + return; + } + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + + lpfc_do_scr_ns_plogi(vport->phba, vport); +} + +void +lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) +{ + struct lpfc_sli *psli = &phba->sli; + lpfc_vpd_t *vp = &phba->vpd; + uint32_t b1, b2, b3, b4, i, rev; + char c; + uint32_t *ptr, str[4]; + uint8_t *fwname; + + if (phba->sli_rev == LPFC_SLI_REV4) + snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName); + else if (vp->rev.rBit) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) + rev = vp->rev.sli2FwRev; + else + rev = vp->rev.sli1FwRev; + + b1 = (rev & 0x0000f000) >> 12; + b2 = (rev & 0x00000f00) >> 8; + b3 = (rev & 0x000000c0) >> 6; + b4 = (rev & 0x00000030) >> 4; + + switch (b4) { + case 0: + c = 'N'; + break; + case 1: + c = 'A'; + break; + case 2: + c = 'B'; + break; + case 3: + c = 'X'; + break; + default: + c = 0; + break; + } + b4 = (rev & 0x0000000f); + + if (psli->sli_flag & LPFC_SLI_ACTIVE) + fwname = vp->rev.sli2FwName; + else + fwname = vp->rev.sli1FwName; + + for (i = 0; i < 16; i++) + if (fwname[i] == 0x20) + fwname[i] = 0; + + ptr = (uint32_t*)fwname; + + for (i = 0; i < 3; i++) + str[i] = be32_to_cpu(*ptr++); + + if (c == 0) { + if (flag) + sprintf(fwrevision, "%d.%d%d (%s)", + b1, b2, b3, (char *)str); + else + sprintf(fwrevision, "%d.%d%d", b1, + b2, b3); + } else { + if (flag) + sprintf(fwrevision, "%d.%d%d%c%d (%s)", + b1, b2, b3, c, + b4, (char *)str); + else + sprintf(fwrevision, "%d.%d%d%c%d", + b1, b2, b3, c, b4); + } + } else { + rev = vp->rev.smFwRev; + + b1 = (rev & 0xff000000) >> 24; + b2 = (rev & 0x00f00000) >> 20; + b3 = (rev & 0x000f0000) >> 16; + c = (rev & 0x0000ff00) >> 8; + b4 = (rev & 0x000000ff); + + sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); + } + return; +} + +static void +lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; + struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; + struct lpfc_sli_ct_request *ctcmd = inp->virt; + struct lpfc_sli_ct_request *ctrsp = outp->virt; + __be16 rsp = ctrsp->CommandResponse.bits.CmdRsp; + struct app_id_object *app; + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + u32 cmd, hash, bucket; + struct lpfc_vmid *vmp, *cur; + u8 *data = outp->virt; + int i; + + cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp); + if (cmd == SLI_CTAS_DALLAPP_ID) + lpfc_ct_free_iocb(phba, cmdiocb); + + if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) { + if (cmd != SLI_CTAS_DALLAPP_ID) + goto free_res; + } + /* Check for a CT LS_RJT response */ + if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) { + if (cmd != SLI_CTAS_DALLAPP_ID) + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "3306 VMID FS_RJT Data: x%x x%x x%x\n", + cmd, ctrsp->ReasonCode, + ctrsp->Explanation); + if ((cmd != SLI_CTAS_DALLAPP_ID) || + (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) || + (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) { + /* If DALLAPP_ID failed retry later */ + if (cmd == SLI_CTAS_DALLAPP_ID) + vport->load_flag |= FC_DEREGISTER_ALL_APP_ID; + goto free_res; + } + } + + switch (cmd) { + case SLI_CTAS_RAPP_IDENT: + app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data); + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "6712 RAPP_IDENT app id %d port id x%x id " + "len %d\n", be32_to_cpu(app->app_id), + be32_to_cpu(app->port_id), + app->obj.entity_id_len); + + if (app->obj.entity_id_len == 0 || app->port_id == 0) + goto free_res; + + hash = lpfc_vmid_hash_fn(app->obj.entity_id, + app->obj.entity_id_len); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, + app->obj.entity_id); + if (vmp) { + write_lock(&vport->vmid_lock); + vmp->un.app_id = be32_to_cpu(app->app_id); + vmp->flag |= LPFC_VMID_REGISTERED; + vmp->flag &= ~LPFC_VMID_REQ_REGISTER; + write_unlock(&vport->vmid_lock); + /* Set IN USE flag */ + vport->vmid_flag |= LPFC_VMID_IN_USE; + } else { + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "6901 No entry found %s hash %d\n", + app->obj.entity_id, hash); + } + break; + case SLI_CTAS_DAPP_IDENT: + app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data); + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "6713 DAPP_IDENT app id %d port id x%x\n", + be32_to_cpu(app->app_id), + be32_to_cpu(app->port_id)); + break; + case SLI_CTAS_DALLAPP_ID: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "8856 Deregistered all app ids\n"); + read_lock(&vport->vmid_lock); + for (i = 0; i < phba->cfg_max_vmid; i++) { + vmp = &vport->vmid[i]; + if (vmp->flag != LPFC_VMID_SLOT_FREE) + memset(vmp, 0, sizeof(struct lpfc_vmid)); + } + read_unlock(&vport->vmid_lock); + /* for all elements in the hash table */ + if (!hash_empty(vport->hash_table)) + hash_for_each(vport->hash_table, bucket, cur, hnode) + hash_del(&cur->hnode); + vport->load_flag |= FC_ALLOW_VMID; + break; + default: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "8857 Invalid command code\n"); + } +free_res: + lpfc_ct_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +/** + * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort + * @vport: pointer to a host virtual N_Port data structure. + * @cmdcode: application server command code to send + * @vmid: pointer to vmid info structure + * + * Builds and sends a FDMI command using the CT subsystem. + */ +int +lpfc_vmid_cmd(struct lpfc_vport *vport, + int cmdcode, struct lpfc_vmid *vmid) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *mp, *bmp; + struct lpfc_sli_ct_request *ctreq; + struct ulp_bde64 *bpl; + u32 size; + u32 rsp_size; + u8 *data; + struct lpfc_vmid_rapp_ident_list *rap; + struct lpfc_vmid_dapp_ident_list *dap; + u8 retry = 0; + struct lpfc_nodelist *ndlp; + + void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); + + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return 0; + + cmpl = lpfc_cmpl_ct_cmd_vmid; + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (!mp) + goto vmid_free_mp_exit; + + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp->virt) + goto vmid_free_mp_virt_exit; + + /* Allocate buffer for Buffer ptr list */ + bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); + if (!bmp) + goto vmid_free_bmp_exit; + + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) + goto vmid_free_bmp_virt_exit; + + INIT_LIST_HEAD(&mp->list); + INIT_LIST_HEAD(&bmp->list); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3275 VMID Request Data: x%x x%x x%x\n", + vport->fc_flag, vport->port_state, cmdcode); + ctreq = (struct lpfc_sli_ct_request *)mp->virt; + data = mp->virt; + /* First populate the CT_IU preamble */ + memset(data, 0, LPFC_BPL_SIZE); + ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; + ctreq->RevisionId.bits.InId = 0; + + ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE; + ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes; + + ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); + rsp_size = LPFC_BPL_SIZE; + size = 0; + + switch (cmdcode) { + case SLI_CTAS_RAPP_IDENT: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "1329 RAPP_IDENT for %s\n", vmid->host_vmid); + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + rap = (struct lpfc_vmid_rapp_ident_list *) + (DAPP_IDENT_OFFSET + data); + rap->no_of_objects = cpu_to_be32(1); + rap->obj[0].entity_id_len = vmid->vmid_len; + memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); + size = RAPP_IDENT_OFFSET + + struct_size(rap, obj, be32_to_cpu(rap->no_of_objects)); + retry = 1; + break; + + case SLI_CTAS_GALLAPPIA_ID: + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + size = GALLAPPIA_ID_SIZE; + break; + + case SLI_CTAS_DAPP_IDENT: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "1469 DAPP_IDENT for %s\n", vmid->host_vmid); + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + dap = (struct lpfc_vmid_dapp_ident_list *) + (DAPP_IDENT_OFFSET + data); + dap->no_of_objects = cpu_to_be32(1); + dap->obj[0].entity_id_len = vmid->vmid_len; + memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); + size = DAPP_IDENT_OFFSET + + struct_size(dap, obj, be32_to_cpu(dap->no_of_objects)); + write_lock(&vport->vmid_lock); + vmid->flag &= ~LPFC_VMID_REGISTERED; + write_unlock(&vport->vmid_lock); + retry = 1; + break; + + case SLI_CTAS_DALLAPP_ID: + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + size = DALLAPP_ID_SIZE; + break; + + default: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "7062 VMID cmdcode x%x not supported\n", + cmdcode); + goto vmid_free_all_mem; + } + + ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); + + bpl = (struct ulp_bde64 *)bmp->virt; + bpl->addrHigh = putPaddrHigh(mp->phys); + bpl->addrLow = putPaddrLow(mp->phys); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = size; + + /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count + * to hold ndlp reference for the corresponding callback function. + */ + if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) + return 0; + + vmid_free_all_mem: + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + vmid_free_bmp_virt_exit: + kfree(bmp); + vmid_free_bmp_exit: + lpfc_mbuf_free(phba, mp->virt, mp->phys); + vmid_free_mp_virt_exit: + kfree(mp); + vmid_free_mp_exit: + + /* Issue CT request failed */ + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "3276 VMID CT request failed Data: x%x\n", cmdcode); + return -EIO; +} diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c new file mode 100644 index 000000000..ea9b42225 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -0,0 +1,6692 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2007-2015 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_version.h" +#include "lpfc_compat.h" +#include "lpfc_debugfs.h" +#include "lpfc_bsg.h" + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +/* + * debugfs interface + * + * To access this interface the user should: + * # mount -t debugfs none /sys/kernel/debug + * + * The lpfc debugfs directory hierarchy is: + * /sys/kernel/debug/lpfc/fnX/vportY + * where X is the lpfc hba function unique_id + * where Y is the vport VPI on that hba + * + * Debugging services available per vport: + * discovery_trace + * This is an ACSII readable file that contains a trace of the last + * lpfc_debugfs_max_disc_trc events that happened on a specific vport. + * See lpfc_debugfs.h for different categories of discovery events. + * To enable the discovery trace, the following module parameters must be set: + * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support + * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for + * EACH vport. X MUST also be a power of 2. + * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in + * lpfc_debugfs.h . + * + * slow_ring_trace + * This is an ACSII readable file that contains a trace of the last + * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA. + * To enable the slow ring trace, the following module parameters must be set: + * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support + * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for + * the HBA. X MUST also be a power of 2. + */ +static int lpfc_debugfs_enable = 1; +module_param(lpfc_debugfs_enable, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); + +/* This MUST be a power of 2 */ +static int lpfc_debugfs_max_disc_trc; +module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, + "Set debugfs discovery trace depth"); + +/* This MUST be a power of 2 */ +static int lpfc_debugfs_max_slow_ring_trc; +module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, + "Set debugfs slow ring trace depth"); + +/* This MUST be a power of 2 */ +static int lpfc_debugfs_max_nvmeio_trc; +module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444); +MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc, + "Set debugfs NVME IO trace depth"); + +static int lpfc_debugfs_mask_disc_trc; +module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, + "Set debugfs discovery trace mask"); + +#include + +static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); +static unsigned long lpfc_debugfs_start_time = 0L; + +/* iDiag */ +static struct lpfc_idiag idiag; + +/** + * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer + * @vport: The vport to gather the log info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine gathers the lpfc discovery debugfs data from the @vport and + * dumps it to @buf up to @size number of bytes. It will start at the next entry + * in the log and process the log until the end of the buffer. Then it will + * gather from the beginning of the log and process until the current entry. + * + * Notes: + * Discovery logging will be disabled while while this routine dumps the log. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) +{ + int i, index, len, enable; + uint32_t ms; + struct lpfc_debugfs_trc *dtp; + char *buffer; + + buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL); + if (!buffer) + return 0; + + enable = lpfc_debugfs_enable; + lpfc_debugfs_enable = 0; + + len = 0; + index = (atomic_read(&vport->disc_trc_cnt) + 1) & + (lpfc_debugfs_max_disc_trc - 1); + for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { + dtp = vport->disc_trc + i; + if (!dtp->fmt) + continue; + ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); + snprintf(buffer, + LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", + dtp->seq_cnt, ms, dtp->fmt); + len += scnprintf(buf+len, size-len, buffer, + dtp->data1, dtp->data2, dtp->data3); + } + for (i = 0; i < index; i++) { + dtp = vport->disc_trc + i; + if (!dtp->fmt) + continue; + ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); + snprintf(buffer, + LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", + dtp->seq_cnt, ms, dtp->fmt); + len += scnprintf(buf+len, size-len, buffer, + dtp->data1, dtp->data2, dtp->data3); + } + + lpfc_debugfs_enable = enable; + kfree(buffer); + + return len; +} + +/** + * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer + * @phba: The HBA to gather the log info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine gathers the lpfc slow ring debugfs data from the @phba and + * dumps it to @buf up to @size number of bytes. It will start at the next entry + * in the log and process the log until the end of the buffer. Then it will + * gather from the beginning of the log and process until the current entry. + * + * Notes: + * Slow ring logging will be disabled while while this routine dumps the log. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) +{ + int i, index, len, enable; + uint32_t ms; + struct lpfc_debugfs_trc *dtp; + char *buffer; + + buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL); + if (!buffer) + return 0; + + enable = lpfc_debugfs_enable; + lpfc_debugfs_enable = 0; + + len = 0; + index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & + (lpfc_debugfs_max_slow_ring_trc - 1); + for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { + dtp = phba->slow_ring_trc + i; + if (!dtp->fmt) + continue; + ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); + snprintf(buffer, + LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", + dtp->seq_cnt, ms, dtp->fmt); + len += scnprintf(buf+len, size-len, buffer, + dtp->data1, dtp->data2, dtp->data3); + } + for (i = 0; i < index; i++) { + dtp = phba->slow_ring_trc + i; + if (!dtp->fmt) + continue; + ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); + snprintf(buffer, + LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", + dtp->seq_cnt, ms, dtp->fmt); + len += scnprintf(buf+len, size-len, buffer, + dtp->data1, dtp->data2, dtp->data3); + } + + lpfc_debugfs_enable = enable; + kfree(buffer); + + return len; +} + +static int lpfc_debugfs_last_hbq = -1; + +/** + * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the host buffer queue info from the @phba to @buf up to + * @size number of bytes. A header that describes the current hbq state will be + * dumped to @buf first and then info on each hbq entry will be dumped to @buf + * until @size bytes have been dumped or all the hbq info has been dumped. + * + * Notes: + * This routine will rotate through each configured HBQ each time called. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) +{ + int len = 0; + int i, j, found, posted, low; + uint32_t phys, raw_index, getidx; + struct lpfc_hbq_init *hip; + struct hbq_s *hbqs; + struct lpfc_hbq_entry *hbqe; + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *hbq_buf; + + if (phba->sli_rev != 3) + return 0; + + spin_lock_irq(&phba->hbalock); + + /* toggle between multiple hbqs, if any */ + i = lpfc_sli_hbq_count(); + if (i > 1) { + lpfc_debugfs_last_hbq++; + if (lpfc_debugfs_last_hbq >= i) + lpfc_debugfs_last_hbq = 0; + } + else + lpfc_debugfs_last_hbq = 0; + + i = lpfc_debugfs_last_hbq; + + len += scnprintf(buf+len, size-len, "HBQ %d Info\n", i); + + hbqs = &phba->hbqs[i]; + posted = 0; + list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) + posted++; + + hip = lpfc_hbq_defs[i]; + len += scnprintf(buf+len, size-len, + "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n", + hip->hbq_index, hip->profile, hip->rn, + hip->buffer_count, hip->init_count, hip->add_count, posted); + + raw_index = phba->hbq_get[i]; + getidx = le32_to_cpu(raw_index); + len += scnprintf(buf+len, size-len, + "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", + hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, + hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); + + hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; + for (j=0; jentry_count; j++) { + len += scnprintf(buf+len, size-len, + "%03d: %08x %04x %05x ", j, + le32_to_cpu(hbqe->bde.addrLow), + le32_to_cpu(hbqe->bde.tus.w), + le32_to_cpu(hbqe->buffer_tag)); + i = 0; + found = 0; + + /* First calculate if slot has an associated posted buffer */ + low = hbqs->hbqPutIdx - posted; + if (low >= 0) { + if ((j >= hbqs->hbqPutIdx) || (j < low)) { + len += scnprintf(buf + len, size - len, + "Unused\n"); + goto skipit; + } + } + else { + if ((j >= hbqs->hbqPutIdx) && + (j < (hbqs->entry_count+low))) { + len += scnprintf(buf + len, size - len, + "Unused\n"); + goto skipit; + } + } + + /* Get the Buffer info for the posted buffer */ + list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) { + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); + if (phys == le32_to_cpu(hbqe->bde.addrLow)) { + len += scnprintf(buf+len, size-len, + "Buf%d: x%px %06x\n", i, + hbq_buf->dbuf.virt, hbq_buf->tag); + found = 1; + break; + } + i++; + } + if (!found) { + len += scnprintf(buf+len, size-len, "No DMAinfo?\n"); + } +skipit: + hbqe++; + if (len > LPFC_HBQINFO_SIZE - 54) + break; + } + spin_unlock_irq(&phba->hbalock); + return len; +} + +static int lpfc_debugfs_last_xripool; + +/** + * lpfc_debugfs_commonxripools_data - Dump Hardware Queue info to a buffer + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the Hardware Queue info from the @phba to @buf up to + * @size number of bytes. A header that describes the current hdwq state will be + * dumped to @buf first and then info on each hdwq entry will be dumped to @buf + * until @size bytes have been dumped or all the hdwq info has been dumped. + * + * Notes: + * This routine will rotate through each configured Hardware Queue each + * time called. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_sli4_hdw_queue *qp; + int len = 0; + int i, out; + unsigned long iflag; + + for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80)) + break; + qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool]; + + len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i); + spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); + spin_lock(&qp->io_buf_list_get_lock); + spin_lock(&qp->io_buf_list_put_lock); + out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + + qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs); + len += scnprintf(buf + len, size - len, + "tot:%d get:%d put:%d mt:%d " + "ABTS scsi:%d nvme:%d Out:%d\n", + qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs, + qp->empty_io_bufs, qp->abts_scsi_io_bufs, + qp->abts_nvme_io_bufs, out); + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->io_buf_list_get_lock); + spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); + + lpfc_debugfs_last_xripool++; + if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue) + lpfc_debugfs_last_xripool = 0; + } + + return len; +} + +/** + * lpfc_debugfs_multixripools_data - Display multi-XRI pools information + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine displays current multi-XRI pools information including XRI + * count in public, private and txcmplq. It also displays current high and + * low watermark. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) +{ + u32 i; + u32 hwq_count; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_pbl_pool *pbl_pool; + u32 txcmplq_cnt; + char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0}; + + if (phba->sli_rev != LPFC_SLI_REV4) + return 0; + + if (!phba->sli4_hba.hdwq) + return 0; + + if (!phba->cfg_xri_rebalancing) { + i = lpfc_debugfs_commonxripools_data(phba, buf, size); + return i; + } + + /* + * Pbl: Current number of free XRIs in public pool + * Pvt: Current number of free XRIs in private pool + * Busy: Current number of outstanding XRIs + * HWM: Current high watermark + * pvt_empty: Incremented by 1 when IO submission fails (no xri) + * pbl_empty: Incremented by 1 when all pbl_pool are empty during + * IO submission + */ + scnprintf(tmp, sizeof(tmp), + "HWQ: Pbl Pvt Busy HWM | pvt_empty pbl_empty "); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); + +#ifdef LPFC_MXP_STAT + /* + * MAXH: Max high watermark seen so far + * above_lmt: Incremented by 1 if xri_owned > xri_limit during + * IO submission + * below_lmt: Incremented by 1 if xri_owned <= xri_limit during + * IO submission + * locPbl_hit: Incremented by 1 if successfully get a batch of XRI from + * local pbl_pool + * othPbl_hit: Incremented by 1 if successfully get a batch of XRI from + * other pbl_pool + */ + scnprintf(tmp, sizeof(tmp), + "MAXH above_lmt below_lmt locPbl_hit othPbl_hit"); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); + + /* + * sPbl: snapshot of Pbl 15 sec after stat gets cleared + * sPvt: snapshot of Pvt 15 sec after stat gets cleared + * sBusy: snapshot of Busy 15 sec after stat gets cleared + */ + scnprintf(tmp, sizeof(tmp), + " | sPbl sPvt sBusy"); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); +#endif + + scnprintf(tmp, sizeof(tmp), "\n"); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); + + hwq_count = phba->cfg_hdw_queue; + for (i = 0; i < hwq_count; i++) { + qp = &phba->sli4_hba.hdwq[i]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + continue; + pbl_pool = &multixri_pool->pbl_pool; + pvt_pool = &multixri_pool->pvt_pool; + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; + + scnprintf(tmp, sizeof(tmp), + "%03d: %4d %4d %4d %4d | %10d %10d ", + i, pbl_pool->count, pvt_pool->count, + txcmplq_cnt, pvt_pool->high_watermark, + qp->empty_io_bufs, multixri_pool->pbl_empty_count); + if (strlcat(buf, tmp, size) >= size) + break; + +#ifdef LPFC_MXP_STAT + scnprintf(tmp, sizeof(tmp), + "%4d %10d %10d %10d %10d", + multixri_pool->stat_max_hwm, + multixri_pool->above_limit_count, + multixri_pool->below_limit_count, + multixri_pool->local_pbl_hit_count, + multixri_pool->other_pbl_hit_count); + if (strlcat(buf, tmp, size) >= size) + break; + + scnprintf(tmp, sizeof(tmp), + " | %4d %4d %5d", + multixri_pool->stat_pbl_count, + multixri_pool->stat_pvt_count, + multixri_pool->stat_busy_count); + if (strlcat(buf, tmp, size) >= size) + break; +#endif + + scnprintf(tmp, sizeof(tmp), "\n"); + if (strlcat(buf, tmp, size) >= size) + break; + } + return strnlen(buf, size); +} + + +#ifdef LPFC_HDWQ_LOCK_STAT +static int lpfc_debugfs_last_lock; + +/** + * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the Hardware Queue info from the @phba to @buf up to + * @size number of bytes. A header that describes the current hdwq state will be + * dumped to @buf first and then info on each hdwq entry will be dumped to @buf + * until @size bytes have been dumped or all the hdwq info has been dumped. + * + * Notes: + * This routine will rotate through each configured Hardware Queue each + * time called. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_sli4_hdw_queue *qp; + int len = 0; + int i; + + if (phba->sli_rev != LPFC_SLI_REV4) + return 0; + + if (!phba->sli4_hba.hdwq) + return 0; + + for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (len > (LPFC_HDWQINFO_SIZE - 100)) + break; + qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock]; + + len += scnprintf(buf + len, size - len, "HdwQ %03d Lock ", i); + if (phba->cfg_xri_rebalancing) { + len += scnprintf(buf + len, size - len, + "get_pvt:%d mv_pvt:%d " + "mv2pub:%d mv2pvt:%d " + "put_pvt:%d put_pub:%d wq:%d\n", + qp->lock_conflict.alloc_pvt_pool, + qp->lock_conflict.mv_from_pvt_pool, + qp->lock_conflict.mv_to_pub_pool, + qp->lock_conflict.mv_to_pvt_pool, + qp->lock_conflict.free_pvt_pool, + qp->lock_conflict.free_pub_pool, + qp->lock_conflict.wq_access); + } else { + len += scnprintf(buf + len, size - len, + "get:%d put:%d free:%d wq:%d\n", + qp->lock_conflict.alloc_xri_get, + qp->lock_conflict.alloc_xri_put, + qp->lock_conflict.free_xri, + qp->lock_conflict.wq_access); + } + + lpfc_debugfs_last_lock++; + if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue) + lpfc_debugfs_last_lock = 0; + } + + return len; +} +#endif + +static int lpfc_debugfs_last_hba_slim_off; + +/** + * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer + * @phba: The HBA to gather SLIM info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the current contents of HBA SLIM for the HBA associated + * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data. + * + * Notes: + * This routine will only dump up to 1024 bytes of data each time called and + * should be called multiple times to dump the entire HBA SLIM. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) +{ + int len = 0; + int i, off; + uint32_t *ptr; + char *buffer; + + buffer = kmalloc(1024, GFP_KERNEL); + if (!buffer) + return 0; + + off = 0; + spin_lock_irq(&phba->hbalock); + + len += scnprintf(buf+len, size-len, "HBA SLIM\n"); + lpfc_memcpy_from_slim(buffer, + phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024); + + ptr = (uint32_t *)&buffer[0]; + off = lpfc_debugfs_last_hba_slim_off; + + /* Set it up for the next time */ + lpfc_debugfs_last_hba_slim_off += 1024; + if (lpfc_debugfs_last_hba_slim_off >= 4096) + lpfc_debugfs_last_hba_slim_off = 0; + + i = 1024; + while (i > 0) { + len += scnprintf(buf+len, size-len, + "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", + off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), + *(ptr+5), *(ptr+6), *(ptr+7)); + ptr += 8; + i -= (8 * sizeof(uint32_t)); + off += (8 * sizeof(uint32_t)); + } + + spin_unlock_irq(&phba->hbalock); + kfree(buffer); + + return len; +} + +/** + * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer + * @phba: The HBA to gather Host SLIM info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the current contents of host SLIM for the host associated + * with @phba to @buf up to @size bytes of data. The dump will contain the + * Mailbox, PCB, Rings, and Registers that are located in host memory. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) +{ + int len = 0; + int i, off; + uint32_t word0, word1, word2, word3; + uint32_t *ptr; + struct lpfc_pgp *pgpp; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + + off = 0; + spin_lock_irq(&phba->hbalock); + + len += scnprintf(buf+len, size-len, "SLIM Mailbox\n"); + ptr = (uint32_t *)phba->slim2p.virt; + i = sizeof(MAILBOX_t); + while (i > 0) { + len += scnprintf(buf+len, size-len, + "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", + off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), + *(ptr+5), *(ptr+6), *(ptr+7)); + ptr += 8; + i -= (8 * sizeof(uint32_t)); + off += (8 * sizeof(uint32_t)); + } + + len += scnprintf(buf+len, size-len, "SLIM PCB\n"); + ptr = (uint32_t *)phba->pcb; + i = sizeof(PCB_t); + while (i > 0) { + len += scnprintf(buf+len, size-len, + "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", + off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), + *(ptr+5), *(ptr+6), *(ptr+7)); + ptr += 8; + i -= (8 * sizeof(uint32_t)); + off += (8 * sizeof(uint32_t)); + } + + if (phba->sli_rev <= LPFC_SLI_REV3) { + for (i = 0; i < 4; i++) { + pgpp = &phba->port_gp[i]; + pring = &psli->sli3_ring[i]; + len += scnprintf(buf+len, size-len, + "Ring %d: CMD GetInx:%d " + "(Max:%d Next:%d " + "Local:%d flg:x%x) " + "RSP PutInx:%d Max:%d\n", + i, pgpp->cmdGetInx, + pring->sli.sli3.numCiocb, + pring->sli.sli3.next_cmdidx, + pring->sli.sli3.local_getidx, + pring->flag, pgpp->rspPutInx, + pring->sli.sli3.numRiocb); + } + + word0 = readl(phba->HAregaddr); + word1 = readl(phba->CAregaddr); + word2 = readl(phba->HSregaddr); + word3 = readl(phba->HCregaddr); + len += scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " + "HC:%08x\n", word0, word1, word2, word3); + } + spin_unlock_irq(&phba->hbalock); + return len; +} + +/** + * lpfc_debugfs_nodelist_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the current target node list associated with @vport to + * @buf up to @size bytes of data. Each node entry in the dump will contain a + * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) +{ + int len = 0; + int i, iocnt, outio, cnt; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + unsigned char *statep; + struct nvme_fc_local_port *localport; + struct nvme_fc_remote_port *nrport = NULL; + struct lpfc_nvme_rport *rport; + + cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); + outio = 0; + + len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + iocnt = 0; + if (!cnt) { + len += scnprintf(buf+len, size-len, + "Missing Nodelist Entries\n"); + break; + } + cnt--; + switch (ndlp->nlp_state) { + case NLP_STE_UNUSED_NODE: + statep = "UNUSED"; + break; + case NLP_STE_PLOGI_ISSUE: + statep = "PLOGI "; + break; + case NLP_STE_ADISC_ISSUE: + statep = "ADISC "; + break; + case NLP_STE_REG_LOGIN_ISSUE: + statep = "REGLOG"; + break; + case NLP_STE_PRLI_ISSUE: + statep = "PRLI "; + break; + case NLP_STE_LOGO_ISSUE: + statep = "LOGO "; + break; + case NLP_STE_UNMAPPED_NODE: + statep = "UNMAP "; + iocnt = 1; + break; + case NLP_STE_MAPPED_NODE: + statep = "MAPPED"; + iocnt = 1; + break; + case NLP_STE_NPR_NODE: + statep = "NPR "; + break; + default: + statep = "UNKNOWN"; + } + len += scnprintf(buf+len, size-len, "%s DID:x%06x ", + statep, ndlp->nlp_DID); + len += scnprintf(buf+len, size-len, + "WWPN x%016llx ", + wwn_to_u64(ndlp->nlp_portname.u.wwn)); + len += scnprintf(buf+len, size-len, + "WWNN x%016llx ", + wwn_to_u64(ndlp->nlp_nodename.u.wwn)); + len += scnprintf(buf+len, size-len, "RPI:x%04x ", + ndlp->nlp_rpi); + len += scnprintf(buf+len, size-len, "flag:x%08x ", + ndlp->nlp_flag); + if (!ndlp->nlp_type) + len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); + if (ndlp->nlp_type & NLP_FC_NODE) + len += scnprintf(buf+len, size-len, "FC_NODE "); + if (ndlp->nlp_type & NLP_FABRIC) { + len += scnprintf(buf+len, size-len, "FABRIC "); + iocnt = 0; + } + if (ndlp->nlp_type & NLP_FCP_TARGET) + len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ", + ndlp->nlp_sid); + if (ndlp->nlp_type & NLP_FCP_INITIATOR) + len += scnprintf(buf+len, size-len, "FCP_INITIATOR "); + if (ndlp->nlp_type & NLP_NVME_TARGET) + len += scnprintf(buf + len, + size - len, "NVME_TGT sid:%d ", + NLP_NO_SID); + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + len += scnprintf(buf + len, + size - len, "NVME_INITIATOR "); + len += scnprintf(buf+len, size-len, "refcnt:%d", + kref_read(&ndlp->kref)); + if (iocnt) { + i = atomic_read(&ndlp->cmd_pending); + len += scnprintf(buf + len, size - len, + " OutIO:x%x Qdepth x%x", + i, ndlp->cmd_qdepth); + outio += i; + } + len += scnprintf(buf+len, size-len, " xpt:x%x", + ndlp->fc4_xpt_flags); + if (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) + len += scnprintf(buf+len, size-len, " defer:%x", + ndlp->nlp_defer_did); + len += scnprintf(buf+len, size-len, "\n"); + } + spin_unlock_irq(shost->host_lock); + + len += scnprintf(buf + len, size - len, + "\nOutstanding IO x%x\n", outio); + + if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { + len += scnprintf(buf + len, size - len, + "\nNVME Targetport Entry ...\n"); + + /* Port state is only one of two values for now. */ + if (phba->targetport->port_id) + statep = "REGISTERED"; + else + statep = "INIT"; + len += scnprintf(buf + len, size - len, + "TGT WWNN x%llx WWPN x%llx State %s\n", + wwn_to_u64(vport->fc_nodename.u.wwn), + wwn_to_u64(vport->fc_portname.u.wwn), + statep); + len += scnprintf(buf + len, size - len, + " Targetport DID x%06x\n", + phba->targetport->port_id); + goto out_exit; + } + + len += scnprintf(buf + len, size - len, + "\nNVME Lport/Rport Entries ...\n"); + + localport = vport->localport; + if (!localport) + goto out_exit; + + spin_lock_irq(shost->host_lock); + + /* Port state is only one of two values for now. */ + if (localport->port_id) + statep = "ONLINE"; + else + statep = "UNKNOWN "; + + len += scnprintf(buf + len, size - len, + "Lport DID x%06x PortState %s\n", + localport->port_id, statep); + + len += scnprintf(buf + len, size - len, "\tRport List:\n"); + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + /* local short-hand pointer. */ + spin_lock(&ndlp->lock); + rport = lpfc_ndlp_get_nrport(ndlp); + if (rport) + nrport = rport->remoteport; + else + nrport = NULL; + spin_unlock(&ndlp->lock); + if (!nrport) + continue; + + /* Port state is only one of two values for now. */ + switch (nrport->port_state) { + case FC_OBJSTATE_ONLINE: + statep = "ONLINE"; + break; + case FC_OBJSTATE_UNKNOWN: + statep = "UNKNOWN "; + break; + default: + statep = "UNSUPPORTED"; + break; + } + + /* Tab in to show lport ownership. */ + len += scnprintf(buf + len, size - len, + "\t%s Port ID:x%06x ", + statep, nrport->port_id); + len += scnprintf(buf + len, size - len, "WWPN x%llx ", + nrport->port_name); + len += scnprintf(buf + len, size - len, "WWNN x%llx ", + nrport->node_name); + + /* An NVME rport can have multiple roles. */ + if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) + len += scnprintf(buf + len, size - len, + "INITIATOR "); + if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) + len += scnprintf(buf + len, size - len, + "TARGET "); + if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) + len += scnprintf(buf + len, size - len, + "DISCSRVC "); + if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | + FC_PORT_ROLE_NVME_TARGET | + FC_PORT_ROLE_NVME_DISCOVERY)) + len += scnprintf(buf + len, size - len, + "UNKNOWN ROLE x%x", + nrport->port_role); + /* Terminate the string. */ + len += scnprintf(buf + len, size - len, "\n"); + } + + spin_unlock_irq(shost->host_lock); + out_exit: + return len; +} + +/** + * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; + struct nvme_fc_local_port *localport; + struct lpfc_fc4_ctrl_stat *cstat; + struct lpfc_nvme_lport *lport; + uint64_t data1, data2, data3; + uint64_t tot, totin, totout; + int cnt, i; + int len = 0; + + if (phba->nvmet_support) { + if (!phba->targetport) + return len; + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + len += scnprintf(buf + len, size - len, + "\nNVME Targetport Statistics\n"); + + len += scnprintf(buf + len, size - len, + "LS: Rcv %08x Drop %08x Abort %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_drop), + atomic_read(&tgtp->xmt_ls_abort)); + if (atomic_read(&tgtp->rcv_ls_req_in) != + atomic_read(&tgtp->rcv_ls_req_out)) { + len += scnprintf(buf + len, size - len, + "Rcv LS: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_out)); + } + + len += scnprintf(buf + len, size - len, + "LS: Xmt %08x Drop %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_ls_rsp), + atomic_read(&tgtp->xmt_ls_drop), + atomic_read(&tgtp->xmt_ls_rsp_cmpl)); + + len += scnprintf(buf + len, size - len, + "LS: RSP Abort %08x xb %08x Err %08x\n", + atomic_read(&tgtp->xmt_ls_rsp_aborted), + atomic_read(&tgtp->xmt_ls_rsp_xb_set), + atomic_read(&tgtp->xmt_ls_rsp_error)); + + len += scnprintf(buf + len, size - len, + "FCP: Rcv %08x Defer %08x Release %08x " + "Drop %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_defer), + atomic_read(&tgtp->xmt_fcp_release), + atomic_read(&tgtp->rcv_fcp_cmd_drop)); + + if (atomic_read(&tgtp->rcv_fcp_cmd_in) != + atomic_read(&tgtp->rcv_fcp_cmd_out)) { + len += scnprintf(buf + len, size - len, + "Rcv FCP: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out)); + } + + len += scnprintf(buf + len, size - len, + "FCP Rsp: read %08x readrsp %08x " + "write %08x rsp %08x\n", + atomic_read(&tgtp->xmt_fcp_read), + atomic_read(&tgtp->xmt_fcp_read_rsp), + atomic_read(&tgtp->xmt_fcp_write), + atomic_read(&tgtp->xmt_fcp_rsp)); + + len += scnprintf(buf + len, size - len, + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), + atomic_read(&tgtp->xmt_fcp_rsp_error), + atomic_read(&tgtp->xmt_fcp_rsp_drop)); + + len += scnprintf(buf + len, size - len, + "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_aborted), + atomic_read(&tgtp->xmt_fcp_rsp_xb_set), + atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); + + len += scnprintf(buf + len, size - len, + "ABORT: Xmt %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_abort_cmpl)); + + len += scnprintf(buf + len, size - len, + "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", + atomic_read(&tgtp->xmt_abort_sol), + atomic_read(&tgtp->xmt_abort_unsol), + atomic_read(&tgtp->xmt_abort_rsp), + atomic_read(&tgtp->xmt_abort_rsp_error)); + + len += scnprintf(buf + len, size - len, "\n"); + + cnt = 0; + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_for_each_entry_safe(ctxp, next_ctxp, + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + list) { + cnt++; + } + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + if (cnt) { + len += scnprintf(buf + len, size - len, + "ABORT: %d ctx entries\n", cnt); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_for_each_entry_safe(ctxp, next_ctxp, + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + list) { + if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) + break; + len += scnprintf(buf + len, size - len, + "Entry: oxid %x state %x " + "flag %x\n", + ctxp->oxid, ctxp->state, + ctxp->flag); + } + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + } + + /* Calculate outstanding IOs */ + tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); + tot += atomic_read(&tgtp->xmt_fcp_release); + tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; + + len += scnprintf(buf + len, size - len, + "IO_CTX: %08x WAIT: cur %08x tot %08x\n" + "CTX Outstanding %08llx\n", + phba->sli4_hba.nvmet_xri_cnt, + phba->sli4_hba.nvmet_io_wait_cnt, + phba->sli4_hba.nvmet_io_wait_total, + tot); + } else { + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return len; + + localport = vport->localport; + if (!localport) + return len; + lport = (struct lpfc_nvme_lport *)localport->private; + if (!lport) + return len; + + len += scnprintf(buf + len, size - len, + "\nNVME HDWQ Statistics\n"); + + len += scnprintf(buf + len, size - len, + "LS: Xmt %016x Cmpl %016x\n", + atomic_read(&lport->fc4NvmeLsRequests), + atomic_read(&lport->fc4NvmeLsCmpls)); + + totin = 0; + totout = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; + tot = cstat->io_cmpls; + totin += tot; + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; + totout += (data1 + data2 + data3); + + /* Limit to 32, debugfs display buffer limitation */ + if (i >= 32) + continue; + + len += scnprintf(buf + len, PAGE_SIZE - len, + "HDWQ (%d): Rd %016llx Wr %016llx " + "IO %016llx ", + i, data1, data2, data3); + len += scnprintf(buf + len, PAGE_SIZE - len, + "Cmpl %016llx OutIO %016llx\n", + tot, ((data1 + data2 + data3) - tot)); + } + len += scnprintf(buf + len, PAGE_SIZE - len, + "Total FCP Cmpl %016llx Issue %016llx " + "OutIO %016llx\n", + totin, totout, totout - totin); + + len += scnprintf(buf + len, size - len, + "LS Xmt Err: Abrt %08x Err %08x " + "Cmpl Err: xb %08x Err %08x\n", + atomic_read(&lport->xmt_ls_abort), + atomic_read(&lport->xmt_ls_err), + atomic_read(&lport->cmpl_ls_xb), + atomic_read(&lport->cmpl_ls_err)); + + len += scnprintf(buf + len, size - len, + "FCP Xmt Err: noxri %06x nondlp %06x " + "qdepth %06x wqerr %06x err %06x Abrt %06x\n", + atomic_read(&lport->xmt_fcp_noxri), + atomic_read(&lport->xmt_fcp_bad_ndlp), + atomic_read(&lport->xmt_fcp_qdepth), + atomic_read(&lport->xmt_fcp_wqerr), + atomic_read(&lport->xmt_fcp_err), + atomic_read(&lport->xmt_fcp_abort)); + + len += scnprintf(buf + len, size - len, + "FCP Cmpl Err: xb %08x Err %08x\n", + atomic_read(&lport->cmpl_fcp_xb), + atomic_read(&lport->cmpl_fcp_err)); + + } + + return len; +} + +/** + * lpfc_debugfs_scsistat_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the SCSI statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_scsistat_data(struct lpfc_vport *vport, char *buf, int size) +{ + int len; + struct lpfc_hba *phba = vport->phba; + struct lpfc_fc4_ctrl_stat *cstat; + u64 data1, data2, data3; + u64 tot, totin, totout; + int i; + char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; + + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || + (phba->sli_rev != LPFC_SLI_REV4)) + return 0; + + scnprintf(buf, size, "SCSI HDWQ Statistics\n"); + + totin = 0; + totout = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; + tot = cstat->io_cmpls; + totin += tot; + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; + totout += (data1 + data2 + data3); + + scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " + "IO %016llx ", i, data1, data2, data3); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", + tot, ((data1 + data2 + data3) - tot)); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } + scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " + "OutIO %016llx\n", totin, totout, totout - totin); + strlcat(buf, tmp, size); + +buffer_done: + len = strnlen(buf, size); + + return len; +} + +void +lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + uint64_t seg1, seg2, seg3, seg4; + uint64_t segsum; + + if (!lpfc_cmd->ts_last_cmd || + !lpfc_cmd->ts_cmd_start || + !lpfc_cmd->ts_cmd_wqput || + !lpfc_cmd->ts_isr_cmpl || + !lpfc_cmd->ts_data_io) + return; + + if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start) + return; + if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd) + return; + if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start) + return; + if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput) + return; + if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl) + return; + /* + * Segment 1 - Time from Last FCP command cmpl is handed + * off to NVME Layer to start of next command. + * Segment 2 - Time from Driver receives a IO cmd start + * from NVME Layer to WQ put is done on IO cmd. + * Segment 3 - Time from Driver WQ put is done on IO cmd + * to MSI-X ISR for IO cmpl. + * Segment 4 - Time from MSI-X ISR for IO cmpl to when + * cmpl is handled off to the NVME Layer. + */ + seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd; + if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ + seg1 = 0; + + /* Calculate times relative to start of IO */ + seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start); + segsum = seg2; + seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start; + if (segsum > seg3) + return; + seg3 -= segsum; + segsum += seg3; + + seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start; + if (segsum > seg4) + return; + seg4 -= segsum; + + phba->ktime_data_samples++; + phba->ktime_seg1_total += seg1; + if (seg1 < phba->ktime_seg1_min) + phba->ktime_seg1_min = seg1; + else if (seg1 > phba->ktime_seg1_max) + phba->ktime_seg1_max = seg1; + phba->ktime_seg2_total += seg2; + if (seg2 < phba->ktime_seg2_min) + phba->ktime_seg2_min = seg2; + else if (seg2 > phba->ktime_seg2_max) + phba->ktime_seg2_max = seg2; + phba->ktime_seg3_total += seg3; + if (seg3 < phba->ktime_seg3_min) + phba->ktime_seg3_min = seg3; + else if (seg3 > phba->ktime_seg3_max) + phba->ktime_seg3_max = seg3; + phba->ktime_seg4_total += seg4; + if (seg4 < phba->ktime_seg4_min) + phba->ktime_seg4_min = seg4; + else if (seg4 > phba->ktime_seg4_max) + phba->ktime_seg4_max = seg4; + + lpfc_cmd->ts_last_cmd = 0; + lpfc_cmd->ts_cmd_start = 0; + lpfc_cmd->ts_cmd_wqput = 0; + lpfc_cmd->ts_isr_cmpl = 0; + lpfc_cmd->ts_data_io = 0; +} + +/** + * lpfc_debugfs_ioktime_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + int len = 0; + + if (phba->nvmet_support == 0) { + /* Initiator */ + len += scnprintf(buf + len, PAGE_SIZE - len, + "ktime %s: Total Samples: %lld\n", + (phba->ktime_on ? "Enabled" : "Disabled"), + phba->ktime_data_samples); + if (phba->ktime_data_samples == 0) + return len; + + len += scnprintf( + buf + len, PAGE_SIZE - len, + "Segment 1: Last Cmd cmpl " + "done -to- Start of next Cmd (in driver)\n"); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg1_total, + phba->ktime_data_samples), + phba->ktime_seg1_min, + phba->ktime_seg1_max); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "Segment 2: Driver start of Cmd " + "-to- Firmware WQ doorbell\n"); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg2_total, + phba->ktime_data_samples), + phba->ktime_seg2_min, + phba->ktime_seg2_max); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "Segment 3: Firmware WQ doorbell -to- " + "MSI-X ISR cmpl\n"); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg3_total, + phba->ktime_data_samples), + phba->ktime_seg3_min, + phba->ktime_seg3_max); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "Segment 4: MSI-X ISR cmpl -to- " + "Cmd cmpl done\n"); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg4_total, + phba->ktime_data_samples), + phba->ktime_seg4_min, + phba->ktime_seg4_max); + len += scnprintf( + buf + len, PAGE_SIZE - len, + "Total IO avg time: %08lld\n", + div_u64(phba->ktime_seg1_total + + phba->ktime_seg2_total + + phba->ktime_seg3_total + + phba->ktime_seg4_total, + phba->ktime_data_samples)); + return len; + } + + /* NVME Target */ + len += scnprintf(buf + len, PAGE_SIZE-len, + "ktime %s: Total Samples: %lld %lld\n", + (phba->ktime_on ? "Enabled" : "Disabled"), + phba->ktime_data_samples, + phba->ktime_status_samples); + if (phba->ktime_data_samples == 0) + return len; + + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 1: MSI-X ISR Rcv cmd -to- " + "cmd pass to NVME Layer\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg1_total, + phba->ktime_data_samples), + phba->ktime_seg1_min, + phba->ktime_seg1_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 2: cmd pass to NVME Layer- " + "-to- Driver rcv cmd OP (action)\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg2_total, + phba->ktime_data_samples), + phba->ktime_seg2_min, + phba->ktime_seg2_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 3: Driver rcv cmd OP -to- " + "Firmware WQ doorbell: cmd\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg3_total, + phba->ktime_data_samples), + phba->ktime_seg3_min, + phba->ktime_seg3_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 4: Firmware WQ doorbell: cmd " + "-to- MSI-X ISR for cmd cmpl\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg4_total, + phba->ktime_data_samples), + phba->ktime_seg4_min, + phba->ktime_seg4_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 5: MSI-X ISR for cmd cmpl " + "-to- NVME layer passed cmd done\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg5_total, + phba->ktime_data_samples), + phba->ktime_seg5_min, + phba->ktime_seg5_max); + + if (phba->ktime_status_samples == 0) { + len += scnprintf(buf + len, PAGE_SIZE-len, + "Total: cmd received by MSI-X ISR " + "-to- cmd completed on wire\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld " + "max %08lld\n", + div_u64(phba->ktime_seg10_total, + phba->ktime_data_samples), + phba->ktime_seg10_min, + phba->ktime_seg10_max); + return len; + } + + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 6: NVME layer passed cmd done " + "-to- Driver rcv rsp status OP\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg6_total, + phba->ktime_status_samples), + phba->ktime_seg6_min, + phba->ktime_seg6_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 7: Driver rcv rsp status OP " + "-to- Firmware WQ doorbell: status\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg7_total, + phba->ktime_status_samples), + phba->ktime_seg7_min, + phba->ktime_seg7_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 8: Firmware WQ doorbell: status" + " -to- MSI-X ISR for status cmpl\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg8_total, + phba->ktime_status_samples), + phba->ktime_seg8_min, + phba->ktime_seg8_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Segment 9: MSI-X ISR for status cmpl " + "-to- NVME layer passed status done\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg9_total, + phba->ktime_status_samples), + phba->ktime_seg9_min, + phba->ktime_seg9_max); + len += scnprintf(buf + len, PAGE_SIZE-len, + "Total: cmd received by MSI-X ISR -to- " + "cmd completed on wire\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg10_total, + phba->ktime_status_samples), + phba->ktime_seg10_min, + phba->ktime_seg10_max); + return len; +} + +/** + * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer + * @phba: The phba to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME IO trace associated with @phba + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_debugfs_nvmeio_trc *dtp; + int i, state, index, skip; + int len = 0; + + state = phba->nvmeio_trc_on; + + index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) & + (phba->nvmeio_trc_size - 1); + skip = phba->nvmeio_trc_output_idx; + + len += scnprintf(buf + len, size - len, + "%s IO Trace %s: next_idx %d skip %d size %d\n", + (phba->nvmet_support ? "NVME" : "NVMET"), + (state ? "Enabled" : "Disabled"), + index, skip, phba->nvmeio_trc_size); + + if (!phba->nvmeio_trc || state) + return len; + + /* trace MUST bhe off to continue */ + + for (i = index; i < phba->nvmeio_trc_size; i++) { + if (skip) { + skip--; + continue; + } + dtp = phba->nvmeio_trc + i; + phba->nvmeio_trc_output_idx++; + + if (!dtp->fmt) + continue; + + len += scnprintf(buf + len, size - len, dtp->fmt, + dtp->data1, dtp->data2, dtp->data3); + + if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { + phba->nvmeio_trc_output_idx = 0; + len += scnprintf(buf + len, size - len, + "Trace Complete\n"); + goto out; + } + + if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { + len += scnprintf(buf + len, size - len, + "Trace Continue (%d of %d)\n", + phba->nvmeio_trc_output_idx, + phba->nvmeio_trc_size); + goto out; + } + } + for (i = 0; i < index; i++) { + if (skip) { + skip--; + continue; + } + dtp = phba->nvmeio_trc + i; + phba->nvmeio_trc_output_idx++; + + if (!dtp->fmt) + continue; + + len += scnprintf(buf + len, size - len, dtp->fmt, + dtp->data1, dtp->data2, dtp->data3); + + if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { + phba->nvmeio_trc_output_idx = 0; + len += scnprintf(buf + len, size - len, + "Trace Complete\n"); + goto out; + } + + if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { + len += scnprintf(buf + len, size - len, + "Trace Continue (%d of %d)\n", + phba->nvmeio_trc_output_idx, + phba->nvmeio_trc_size); + goto out; + } + } + + len += scnprintf(buf + len, size - len, + "Trace Done\n"); +out: + return len; +} + +/** + * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME + SCSI statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_hdwq_stat *c_stat; + int i, j, len; + uint32_t tot_xmt; + uint32_t tot_rcv; + uint32_t tot_cmpl; + char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; + + scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n"); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ", + (phba->hdwqstat_on & + (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ? + "Enabled" : "Disabled")); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ", + (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ? + "Enabled" : "Disabled")); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "\n\n"); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + for (i = 0; i < phba->cfg_hdw_queue; i++) { + tot_rcv = 0; + tot_xmt = 0; + tot_cmpl = 0; + + for_each_present_cpu(j) { + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j); + + /* Only display for this HDWQ */ + if (i != c_stat->hdwq_no) + continue; + + /* Only display non-zero counters */ + if (!c_stat->xmt_io && !c_stat->cmpl_io && + !c_stat->rcv_io) + continue; + + if (!tot_xmt && !tot_cmpl && !tot_rcv) { + /* Print HDWQ string only the first time */ + scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } + + tot_xmt += c_stat->xmt_io; + tot_cmpl += c_stat->cmpl_io; + if (phba->nvmet_support) + tot_rcv += c_stat->rcv_io; + + scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + if (phba->nvmet_support) { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x RCV 0x%x |", + c_stat->xmt_io, c_stat->cmpl_io, + c_stat->rcv_io); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } else { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x |", + c_stat->xmt_io, c_stat->cmpl_io); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } + } + + /* Check if nothing to display */ + if (!tot_xmt && !tot_cmpl && !tot_rcv) + continue; + + scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: "); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + if (phba->nvmet_support) { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n", + tot_xmt, tot_cmpl, tot_rcv); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } else { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x]\n\n", + tot_xmt, tot_cmpl); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } + } + +buffer_done: + len = strnlen(buf, size); + return len; +} + +#endif + +/** + * lpfc_debugfs_disc_trc - Store discovery trace log + * @vport: The vport to associate this trace string with for retrieval. + * @mask: Log entry classification. + * @fmt: Format string to be displayed when dumping the log. + * @data1: 1st data parameter to be applied to @fmt. + * @data2: 2nd data parameter to be applied to @fmt. + * @data3: 3rd data parameter to be applied to @fmt. + * + * Description: + * This routine is used by the driver code to add a debugfs log entry to the + * discovery trace buffer associated with @vport. Only entries with a @mask that + * match the current debugfs discovery mask will be saved. Entries that do not + * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like + * printf when displaying the log. + **/ +inline void +lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, + uint32_t data1, uint32_t data2, uint32_t data3) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_debugfs_trc *dtp; + int index; + + if (!(lpfc_debugfs_mask_disc_trc & mask)) + return; + + if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc || + !vport || !vport->disc_trc) + return; + + index = atomic_inc_return(&vport->disc_trc_cnt) & + (lpfc_debugfs_max_disc_trc - 1); + dtp = vport->disc_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; + dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; +#endif + return; +} + +/** + * lpfc_debugfs_slow_ring_trc - Store slow ring trace log + * @phba: The phba to associate this trace string with for retrieval. + * @fmt: Format string to be displayed when dumping the log. + * @data1: 1st data parameter to be applied to @fmt. + * @data2: 2nd data parameter to be applied to @fmt. + * @data3: 3rd data parameter to be applied to @fmt. + * + * Description: + * This routine is used by the driver code to add a debugfs log entry to the + * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and + * @data3 are used like printf when displaying the log. + **/ +inline void +lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, + uint32_t data1, uint32_t data2, uint32_t data3) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_debugfs_trc *dtp; + int index; + + if (!lpfc_debugfs_enable || !lpfc_debugfs_max_slow_ring_trc || + !phba || !phba->slow_ring_trc) + return; + + index = atomic_inc_return(&phba->slow_ring_trc_cnt) & + (lpfc_debugfs_max_slow_ring_trc - 1); + dtp = phba->slow_ring_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; + dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; +#endif + return; +} + +/** + * lpfc_debugfs_nvme_trc - Store NVME/NVMET trace log + * @phba: The phba to associate this trace string with for retrieval. + * @fmt: Format string to be displayed when dumping the log. + * @data1: 1st data parameter to be applied to @fmt. + * @data2: 2nd data parameter to be applied to @fmt. + * @data3: 3rd data parameter to be applied to @fmt. + * + * Description: + * This routine is used by the driver code to add a debugfs log entry to the + * nvme trace buffer associated with @phba. @fmt, @data1, @data2, and + * @data3 are used like printf when displaying the log. + **/ +inline void +lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt, + uint16_t data1, uint16_t data2, uint32_t data3) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_debugfs_nvmeio_trc *dtp; + int index; + + if (!phba->nvmeio_trc_on || !phba->nvmeio_trc) + return; + + index = atomic_inc_return(&phba->nvmeio_trc_cnt) & + (phba->nvmeio_trc_size - 1); + dtp = phba->nvmeio_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +#endif +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +/** + * lpfc_debugfs_disc_trc_open - Open the discovery trace log + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int size; + int rc = -ENOMEM; + + if (!lpfc_debugfs_max_disc_trc) { + rc = -ENOSPC; + goto out; + } + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); + size = PAGE_ALIGN(size); + + debug->buffer = kmalloc(size, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +/** + * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int size; + int rc = -ENOMEM; + + if (!lpfc_debugfs_max_slow_ring_trc) { + rc = -ENOSPC; + goto out; + } + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); + size = PAGE_ALIGN(size); + + debug->buffer = kmalloc(size, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_slow_ring_trc_data(phba, debug->buffer, size); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +/** + * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_hbqinfo_data(phba, debug->buffer, + LPFC_HBQINFO_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +/** + * lpfc_debugfs_multixripools_open - Open the multixripool debugfs buffer + * @inode: The inode pointer that contains a hba pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the hba from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this hba, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_multixripools_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kzalloc(LPFC_DUMP_MULTIXRIPOOL_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_multixripools_data( + phba, debug->buffer, LPFC_DUMP_MULTIXRIPOOL_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +#ifdef LPFC_HDWQ_LOCK_STAT +/** + * lpfc_debugfs_lockstat_open - Open the lockstat debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_lockstat_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_HDWQINFO_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_lockstat_data(phba, debug->buffer, + LPFC_HBQINFO_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + struct lpfc_sli4_hdw_queue *qp; + char mybuf[64]; + char *pbuf; + int i; + size_t bsize; + + memset(mybuf, 0, sizeof(mybuf)); + + bsize = min(nbytes, (sizeof(mybuf) - 1)); + + if (copy_from_user(mybuf, buf, bsize)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || + (strncmp(pbuf, "zero", strlen("zero")) == 0)) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { + qp = &phba->sli4_hba.hdwq[i]; + qp->lock_conflict.alloc_xri_get = 0; + qp->lock_conflict.alloc_xri_put = 0; + qp->lock_conflict.free_xri = 0; + qp->lock_conflict.wq_access = 0; + qp->lock_conflict.alloc_pvt_pool = 0; + qp->lock_conflict.mv_from_pvt_pool = 0; + qp->lock_conflict.mv_to_pub_pool = 0; + qp->lock_conflict.mv_to_pvt_pool = 0; + qp->lock_conflict.free_pvt_pool = 0; + qp->lock_conflict.free_pub_pool = 0; + qp->lock_conflict.wq_access = 0; + } + } + return bsize; +} +#endif + +static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba, + char *buffer, int size) +{ + int copied = 0; + struct lpfc_dmabuf *dmabuf, *next; + + memset(buffer, 0, size); + + spin_lock_irq(&phba->hbalock); + if (phba->ras_fwlog.state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(dmabuf, next, + &phba->ras_fwlog.fwlog_buff_list, list) { + /* Check if copying will go over size and a '\0' char */ + if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) { + memcpy(buffer + copied, dmabuf->virt, + size - copied - 1); + copied += size - copied - 1; + break; + } + memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE); + copied += LPFC_RAS_MAX_ENTRY_SIZE; + } + return copied; +} + +static int +lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int size; + int rc = -ENOMEM; + + spin_lock_irq(&phba->hbalock); + if (phba->ras_fwlog.state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); + rc = -EINVAL; + goto out; + } + spin_unlock_irq(&phba->hbalock); + + if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE, + phba->cfg_ras_fwlog_buffsize, &size)) + goto out; + + debug = kzalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + debug->buffer = vmalloc(size); + if (!debug->buffer) + goto free_debug; + + debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size); + if (debug->len < 0) { + rc = -EINVAL; + goto free_buffer; + } + file->private_data = debug; + + return 0; + +free_buffer: + vfree(debug->buffer); +free_debug: + kfree(debug); +out: + return rc; +} + +/** + * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer, + LPFC_DUMPHBASLIM_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +/** + * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer, + LPFC_DUMPHOSTSLIM_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct dentry *dent = file->f_path.dentry; + struct lpfc_hba *phba = file->private_data; + char cbuf[32]; + uint64_t tmp = 0; + int cnt = 0; + + if (dent == phba->debug_writeGuard) + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt); + else if (dent == phba->debug_writeApp) + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt); + else if (dent == phba->debug_writeRef) + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt); + else if (dent == phba->debug_readGuard) + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt); + else if (dent == phba->debug_readApp) + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt); + else if (dent == phba->debug_readRef) + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); + else if (dent == phba->debug_InjErrNPortID) + cnt = scnprintf(cbuf, 32, "0x%06x\n", + phba->lpfc_injerr_nportid); + else if (dent == phba->debug_InjErrWWPN) { + memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)); + tmp = cpu_to_be64(tmp); + cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp); + } else if (dent == phba->debug_InjErrLBA) { + if (phba->lpfc_injerr_lba == (sector_t)(-1)) + cnt = scnprintf(cbuf, 32, "off\n"); + else + cnt = scnprintf(cbuf, 32, "0x%llx\n", + (uint64_t) phba->lpfc_injerr_lba); + } else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0547 Unknown debugfs error injection entry\n"); + + return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt); +} + +static ssize_t +lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct dentry *dent = file->f_path.dentry; + struct lpfc_hba *phba = file->private_data; + char dstbuf[33]; + uint64_t tmp = 0; + int size; + + memset(dstbuf, 0, 33); + size = (nbytes < 32) ? nbytes : 32; + if (copy_from_user(dstbuf, buf, size)) + return -EFAULT; + + if (dent == phba->debug_InjErrLBA) { + if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') && + (dstbuf[2] == 'f')) + tmp = (uint64_t)(-1); + } + + if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) + return -EINVAL; + + if (dent == phba->debug_writeGuard) + phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp; + else if (dent == phba->debug_writeApp) + phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp; + else if (dent == phba->debug_writeRef) + phba->lpfc_injerr_wref_cnt = (uint32_t)tmp; + else if (dent == phba->debug_readGuard) + phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp; + else if (dent == phba->debug_readApp) + phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp; + else if (dent == phba->debug_readRef) + phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; + else if (dent == phba->debug_InjErrLBA) + phba->lpfc_injerr_lba = (sector_t)tmp; + else if (dent == phba->debug_InjErrNPortID) + phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID); + else if (dent == phba->debug_InjErrWWPN) { + tmp = cpu_to_be64(tmp); + memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name)); + } else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0548 Unknown debugfs error injection entry\n"); + + return nbytes; +} + +static int +lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file) +{ + return 0; +} + +/** + * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer, + LPFC_NODELIST_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +/** + * lpfc_debugfs_lseek - Seek through a debugfs file + * @file: The file pointer to seek through. + * @off: The offset to seek to or the amount to seek by. + * @whence: Indicates how to seek. + * + * Description: + * This routine is the entry point for the debugfs lseek file operation. The + * @whence parameter indicates whether @off is the offset to directly seek to, + * or if it is a value to seek forward or reverse by. This function figures out + * what the new offset of the debugfs file will be and assigns that value to the + * f_pos field of @file. + * + * Returns: + * This function returns the new offset if successful and returns a negative + * error if unable to process the seek. + **/ +static loff_t +lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) +{ + struct lpfc_debug *debug = file->private_data; + return fixed_size_llseek(file, off, whence, debug->len); +} + +/** + * lpfc_debugfs_read - Read a debugfs file + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from from the buffer indicated in the private_data + * field of @file. It will start reading at @ppos and copy up to @nbytes of + * data to @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_debugfs_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + + return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer, + debug->len); +} + +/** + * lpfc_debugfs_release - Release the buffer used to store debugfs file data + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine frees the buffer that was allocated when the debugfs file was + * opened. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_debugfs_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine clears multi-XRI pools statistics when buf contains "clear". + * + * Return Value: + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char mybuf[64]; + char *pbuf; + u32 i; + u32 hwq_count; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "clear", strlen("clear"))) == 0) { + hwq_count = phba->cfg_hdw_queue; + for (i = 0; i < hwq_count; i++) { + qp = &phba->sli4_hba.hdwq[i]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + continue; + + qp->empty_io_bufs = 0; + multixri_pool->pbl_empty_count = 0; +#ifdef LPFC_MXP_STAT + multixri_pool->above_limit_count = 0; + multixri_pool->below_limit_count = 0; + multixri_pool->stat_max_hwm = 0; + multixri_pool->local_pbl_hit_count = 0; + multixri_pool->other_pbl_hit_count = 0; + + multixri_pool->stat_pbl_count = 0; + multixri_pool->stat_pvt_count = 0; + multixri_pool->stat_busy_count = 0; + multixri_pool->stat_snapshot_taken = 0; +#endif + } + return strlen(pbuf); + } + + return -EINVAL; +} + +static int +lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMESTAT_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmestat_data(vport, debug->buffer, + LPFC_NVMESTAT_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + char mybuf[64]; + char *pbuf; + + if (!phba->targetport) + return -ENXIO; + + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || + (strncmp(pbuf, "zero", strlen("zero")) == 0)) { + atomic_set(&tgtp->rcv_ls_req_in, 0); + atomic_set(&tgtp->rcv_ls_req_out, 0); + atomic_set(&tgtp->rcv_ls_req_drop, 0); + atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); + atomic_set(&tgtp->xmt_ls_rsp, 0); + atomic_set(&tgtp->xmt_ls_drop, 0); + atomic_set(&tgtp->xmt_ls_rsp_error, 0); + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); + + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); + atomic_set(&tgtp->xmt_fcp_drop, 0); + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); + atomic_set(&tgtp->xmt_fcp_read, 0); + atomic_set(&tgtp->xmt_fcp_write, 0); + atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_release, 0); + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); + atomic_set(&tgtp->xmt_abort_sol, 0); + atomic_set(&tgtp->xmt_abort_unsol, 0); + atomic_set(&tgtp->xmt_abort_rsp, 0); + atomic_set(&tgtp->xmt_abort_rsp_error, 0); + } + return nbytes; +} + +static int +lpfc_debugfs_scsistat_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kzalloc(LPFC_SCSISTAT_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_scsistat_data(vport, debug->buffer, + LPFC_SCSISTAT_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + char mybuf[6] = {0}; + int i; + + if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ? + (sizeof(mybuf) - 1) : nbytes)) + return -EFAULT; + + if ((strncmp(&mybuf[0], "reset", strlen("reset")) == 0) || + (strncmp(&mybuf[0], "zero", strlen("zero")) == 0)) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { + memset(&phba->sli4_hba.hdwq[i].scsi_cstat, 0, + sizeof(phba->sli4_hba.hdwq[i].scsi_cstat)); + } + } + + return nbytes; +} + +static int +lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer, + LPFC_IOKTIME_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + char mybuf[64]; + char *pbuf; + + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + phba->ktime_data_samples = 0; + phba->ktime_status_samples = 0; + phba->ktime_seg1_total = 0; + phba->ktime_seg1_max = 0; + phba->ktime_seg1_min = 0xffffffff; + phba->ktime_seg2_total = 0; + phba->ktime_seg2_max = 0; + phba->ktime_seg2_min = 0xffffffff; + phba->ktime_seg3_total = 0; + phba->ktime_seg3_max = 0; + phba->ktime_seg3_min = 0xffffffff; + phba->ktime_seg4_total = 0; + phba->ktime_seg4_max = 0; + phba->ktime_seg4_min = 0xffffffff; + phba->ktime_seg5_total = 0; + phba->ktime_seg5_max = 0; + phba->ktime_seg5_min = 0xffffffff; + phba->ktime_seg6_total = 0; + phba->ktime_seg6_max = 0; + phba->ktime_seg6_min = 0xffffffff; + phba->ktime_seg7_total = 0; + phba->ktime_seg7_max = 0; + phba->ktime_seg7_min = 0xffffffff; + phba->ktime_seg8_total = 0; + phba->ktime_seg8_max = 0; + phba->ktime_seg8_min = 0xffffffff; + phba->ktime_seg9_total = 0; + phba->ktime_seg9_max = 0; + phba->ktime_seg9_min = 0xffffffff; + phba->ktime_seg10_total = 0; + phba->ktime_seg10_max = 0; + phba->ktime_seg10_min = 0xffffffff; + + phba->ktime_on = 1; + return strlen(pbuf); + } else if ((strncmp(pbuf, "off", + sizeof("off") - 1) == 0)) { + phba->ktime_on = 0; + return strlen(pbuf); + } else if ((strncmp(pbuf, "zero", + sizeof("zero") - 1) == 0)) { + phba->ktime_data_samples = 0; + phba->ktime_status_samples = 0; + phba->ktime_seg1_total = 0; + phba->ktime_seg1_max = 0; + phba->ktime_seg1_min = 0xffffffff; + phba->ktime_seg2_total = 0; + phba->ktime_seg2_max = 0; + phba->ktime_seg2_min = 0xffffffff; + phba->ktime_seg3_total = 0; + phba->ktime_seg3_max = 0; + phba->ktime_seg3_min = 0xffffffff; + phba->ktime_seg4_total = 0; + phba->ktime_seg4_max = 0; + phba->ktime_seg4_min = 0xffffffff; + phba->ktime_seg5_total = 0; + phba->ktime_seg5_max = 0; + phba->ktime_seg5_min = 0xffffffff; + phba->ktime_seg6_total = 0; + phba->ktime_seg6_max = 0; + phba->ktime_seg6_min = 0xffffffff; + phba->ktime_seg7_total = 0; + phba->ktime_seg7_max = 0; + phba->ktime_seg7_min = 0xffffffff; + phba->ktime_seg8_total = 0; + phba->ktime_seg8_max = 0; + phba->ktime_seg8_min = 0xffffffff; + phba->ktime_seg9_total = 0; + phba->ktime_seg9_max = 0; + phba->ktime_seg9_min = 0xffffffff; + phba->ktime_seg10_total = 0; + phba->ktime_seg10_max = 0; + phba->ktime_seg10_min = 0xffffffff; + return strlen(pbuf); + } + return -EINVAL; +} + +static int +lpfc_debugfs_nvmeio_trc_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMEIO_TRC_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmeio_trc_data(phba, debug->buffer, + LPFC_NVMEIO_TRC_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int i; + unsigned long sz; + char mybuf[64]; + char *pbuf; + + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0570 nvmeio_trc_off\n"); + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc_on = 0; + return strlen(pbuf); + } else if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0571 nvmeio_trc_on\n"); + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc_on = 1; + return strlen(pbuf); + } + + /* We must be off to allocate the trace buffer */ + if (phba->nvmeio_trc_on != 0) + return -EINVAL; + + /* If not on or off, the parameter is the trace buffer size */ + i = kstrtoul(pbuf, 0, &sz); + if (i) + return -EINVAL; + phba->nvmeio_trc_size = (uint32_t)sz; + + /* It must be a power of 2 - round down */ + i = 0; + while (sz > 1) { + sz = sz >> 1; + i++; + } + sz = (1 << i); + if (phba->nvmeio_trc_size != sz) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0572 nvmeio_trc_size changed to %ld\n", + sz); + phba->nvmeio_trc_size = (uint32_t)sz; + + /* If one previously exists, free it */ + kfree(phba->nvmeio_trc); + + /* Allocate new trace buffer and initialize */ + phba->nvmeio_trc = kzalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) * + sz), GFP_KERNEL); + if (!phba->nvmeio_trc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0573 Cannot create debugfs " + "nvmeio_trc buffer\n"); + return -ENOMEM; + } + atomic_set(&phba->nvmeio_trc_cnt, 0); + phba->nvmeio_trc_on = 0; + phba->nvmeio_trc_output_idx = 0; + + return strlen(pbuf); +} + +static int +lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer, + LPFC_SCSISTAT_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + struct lpfc_hdwq_stat *c_stat; + char mybuf[64]; + char *pbuf; + int i; + + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + if (phba->nvmet_support) + phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; + else + phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO | + LPFC_CHECK_SCSI_IO); + return strlen(pbuf); + } else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) { + if (phba->nvmet_support) + phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; + else + phba->hdwqstat_on |= LPFC_CHECK_NVME_IO; + return strlen(pbuf); + } else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) { + if (!phba->nvmet_support) + phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO; + return strlen(pbuf); + } else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) { + phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO | + LPFC_CHECK_NVMET_IO); + return strlen(pbuf); + } else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) { + phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO; + return strlen(pbuf); + } else if ((strncmp(pbuf, "off", + sizeof("off") - 1) == 0)) { + phba->hdwqstat_on = LPFC_CHECK_OFF; + return strlen(pbuf); + } else if ((strncmp(pbuf, "zero", + sizeof("zero") - 1) == 0)) { + for_each_present_cpu(i) { + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i); + c_stat->xmt_io = 0; + c_stat->cmpl_io = 0; + c_stat->rcv_io = 0; + } + return strlen(pbuf); + } + return -EINVAL; +} + +/* + * --------------------------------- + * iDiag debugfs file access methods + * --------------------------------- + * + * All access methods are through the proper SLI4 PCI function's debugfs + * iDiag directory: + * + * /sys/kernel/debug/lpfc/fn<#>/iDiag + */ + +/** + * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space + * @buf: The pointer to the user space buffer. + * @nbytes: The number of bytes in the user space buffer. + * @idiag_cmd: pointer to the idiag command struct. + * + * This routine reads data from debugfs user space buffer and parses the + * buffer for getting the idiag command and arguments. The while space in + * between the set of data is used as the parsing separator. + * + * This routine returns 0 when successful, it returns proper error code + * back to the user space in error conditions. + */ +static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, + struct lpfc_idiag_cmd *idiag_cmd) +{ + char mybuf[64]; + char *pbuf, *step_str; + int i; + size_t bsize; + + memset(mybuf, 0, sizeof(mybuf)); + memset(idiag_cmd, 0, sizeof(*idiag_cmd)); + bsize = min(nbytes, (sizeof(mybuf)-1)); + + if (copy_from_user(mybuf, buf, bsize)) + return -EFAULT; + pbuf = &mybuf[0]; + step_str = strsep(&pbuf, "\t "); + + /* The opcode must present */ + if (!step_str) + return -EINVAL; + + idiag_cmd->opcode = simple_strtol(step_str, NULL, 0); + if (idiag_cmd->opcode == 0) + return -EINVAL; + + for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { + step_str = strsep(&pbuf, "\t "); + if (!step_str) + return i; + idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); + } + return i; +} + +/** + * lpfc_idiag_open - idiag open debugfs + * @inode: The inode pointer that contains a pointer to phba. + * @file: The file pointer to attach the file operation. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It + * gets the reference to phba from the i_private field in @inode, it then + * allocates buffer for the file operation, performs the necessary PCI config + * space read into the allocated buffer according to the idiag user command + * setup, and then returns a pointer to buffer in the private_data field in + * @file. + * + * Returns: + * This function returns zero if successful. On error it will return an + * negative error value. + **/ +static int +lpfc_idiag_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->i_private = inode->i_private; + debug->buffer = NULL; + file->private_data = debug; + + return 0; +} + +/** + * lpfc_idiag_release - Release idiag access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine is the generic release routine for the idiag access file + * operation, it frees the buffer that was allocated when the debugfs file + * was opened. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_cmd_release - Release idiag cmd access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine frees the buffer that was allocated when the debugfs file + * was opened. It also reset the fields in the idiag command struct in the + * case of command for write operation. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_cmd_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + if (debug->op == LPFC_IDIAG_OP_WR) { + switch (idiag.cmd.opcode) { + case LPFC_IDIAG_CMD_PCICFG_WR: + case LPFC_IDIAG_CMD_PCICFG_ST: + case LPFC_IDIAG_CMD_PCICFG_CL: + case LPFC_IDIAG_CMD_QUEACC_WR: + case LPFC_IDIAG_CMD_QUEACC_ST: + case LPFC_IDIAG_CMD_QUEACC_CL: + memset(&idiag, 0, sizeof(idiag)); + break; + default: + break; + } + } + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba pci config space according to the + * idiag command, and copies to user @buf. Depending on the PCI config space + * read command setup, it does either a single register read of a byte + * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all + * registers from the 4K extended PCI config space. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE; + int where, count; + char *pbuffer; + struct pci_dev *pdev; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + + pdev = phba->pcidev; + if (!pdev) + return 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; + count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; + } else + return 0; + + /* Read single PCI config space register */ + switch (count) { + case SIZE_U8: /* byte (8 bits) */ + pci_read_config_byte(pdev, where, &u8val); + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %02x\n", where, u8val); + break; + case SIZE_U16: /* word (16 bits) */ + pci_read_config_word(pdev, where, &u16val); + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %04x\n", where, u16val); + break; + case SIZE_U32: /* double word (32 bits) */ + pci_read_config_dword(pdev, where, &u32val); + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %08x\n", where, u32val); + break; + case LPFC_PCI_CFG_BROWSE: /* browse all */ + goto pcicfg_browse; + default: + /* illegal count */ + len = 0; + break; + } + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +pcicfg_browse: + + /* Browse all PCI config space registers */ + offset_label = idiag.offset.last_rd; + offset = offset_label; + + /* Read PCI config space */ + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: ", offset_label); + while (index > 0) { + pci_read_config_dword(pdev, offset, &u32val); + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%08x ", u32val); + offset += sizeof(uint32_t); + if (offset >= LPFC_PCI_CFG_SIZE) { + len += scnprintf(pbuffer+len, + LPFC_PCI_CFG_SIZE-len, "\n"); + break; + } + index -= sizeof(uint32_t); + if (!index) + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n"); + else if (!(index % (8 * sizeof(uint32_t)))) { + offset_label += (8 * sizeof(uint32_t)); + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n%03x: ", offset_label); + } + } + + /* Set up the offset for next portion of pci cfg read */ + if (index == 0) { + idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; + if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) + idiag.offset.last_rd = 0; + } else + idiag.offset.last_rd = 0; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and + * then perform the syntax check for PCI config space read or write command + * accordingly. In the case of PCI config space read command, it sets up + * the command in the idiag command struct for the debugfs read operation. + * In the case of PCI config space write operation, it executes the write + * operation into the PCI config space accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + */ +static ssize_t +lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t where, value, count; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + struct pci_dev *pdev; + int rc; + + pdev = phba->pcidev; + if (!pdev) + return -EFAULT; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + /* Sanity check on PCI config read command line arguments */ + if (rc != LPFC_PCI_CFG_RD_CMD_ARG) + goto error_out; + /* Read command from PCI config space, set up command fields */ + where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; + count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; + if (count == LPFC_PCI_CFG_BROWSE) { + if (where % sizeof(uint32_t)) + goto error_out; + /* Starting offset to browse */ + idiag.offset.last_rd = where; + } else if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + } + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + /* Sanity check on PCI config write command line arguments */ + if (rc != LPFC_PCI_CFG_WR_CMD_ARG) + goto error_out; + /* Write command to PCI config space, read-modify-write */ + where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; + count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; + value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX]; + /* Sanity checks */ + if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_byte(pdev, where, + (uint8_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val |= (uint8_t)value; + pci_write_config_byte(pdev, where, + u8val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val &= (uint8_t)(~value); + pci_write_config_byte(pdev, where, + u8val); + } + } + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_word(pdev, where, + (uint16_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val |= (uint16_t)value; + pci_write_config_word(pdev, where, + u16val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val &= (uint16_t)(~value); + pci_write_config_word(pdev, where, + u16val); + } + } + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_dword(pdev, where, value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val |= value; + pci_write_config_dword(pdev, where, + u32val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val &= ~value; + pci_write_config_dword(pdev, where, + u32val); + } + } + } + } else + /* All other opecodes are illegal for now */ + goto error_out; + + return nbytes; +error_out: + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_baracc_read - idiag debugfs pci bar access read + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba pci bar memory mapped space + * according to the idiag command, and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int offset_label, offset, offset_run, len = 0, index; + int bar_num, acc_range, bar_size; + char *pbuffer; + void __iomem *mem_mapped_bar; + uint32_t if_type; + struct pci_dev *pdev; + uint32_t u32val; + + pdev = phba->pcidev; + if (!pdev) + return 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) { + bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX]; + offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX]; + acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX]; + bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX]; + } else + return 0; + + if (acc_range == 0) + return 0; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (bar_num == IDIAG_BARACC_BAR_0) + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + else if (bar_num == IDIAG_BARACC_BAR_1) + mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p; + else if (bar_num == IDIAG_BARACC_BAR_2) + mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p; + else + return 0; + } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + if (bar_num == IDIAG_BARACC_BAR_0) + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + else + return 0; + } else + return 0; + + /* Read single PCI bar space register */ + if (acc_range == SINGLE_WORD) { + offset_run = offset; + u32val = readl(mem_mapped_bar + offset_run); + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + "%05x: %08x\n", offset_run, u32val); + } else + goto baracc_browse; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +baracc_browse: + + /* Browse all PCI bar space registers */ + offset_label = idiag.offset.last_rd; + offset_run = offset_label; + + /* Read PCI bar memory mapped space */ + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + "%05x: ", offset_label); + index = LPFC_PCI_BAR_RD_SIZE; + while (index > 0) { + u32val = readl(mem_mapped_bar + offset_run); + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + "%08x ", u32val); + offset_run += sizeof(uint32_t); + if (acc_range == LPFC_PCI_BAR_BROWSE) { + if (offset_run >= bar_size) { + len += scnprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + break; + } + } else { + if (offset_run >= offset + + (acc_range * sizeof(uint32_t))) { + len += scnprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + break; + } + } + index -= sizeof(uint32_t); + if (!index) + len += scnprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + else if (!(index % (8 * sizeof(uint32_t)))) { + offset_label += (8 * sizeof(uint32_t)); + len += scnprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, + "\n%05x: ", offset_label); + } + } + + /* Set up the offset for next portion of pci bar read */ + if (index == 0) { + idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE; + if (acc_range == LPFC_PCI_BAR_BROWSE) { + if (idiag.offset.last_rd >= bar_size) + idiag.offset.last_rd = 0; + } else { + if (offset_run >= offset + + (acc_range * sizeof(uint32_t))) + idiag.offset.last_rd = offset; + } + } else { + if (acc_range == LPFC_PCI_BAR_BROWSE) + idiag.offset.last_rd = 0; + else + idiag.offset.last_rd = offset; + } + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and + * then perform the syntax check for PCI bar memory mapped space read or + * write command accordingly. In the case of PCI bar memory mapped space + * read command, it sets up the command in the idiag command struct for + * the debugfs read operation. In the case of PCI bar memorpy mapped space + * write operation, it executes the write operation into the PCI bar memory + * mapped space accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + */ +static ssize_t +lpfc_idiag_baracc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t bar_num, bar_size, offset, value, acc_range; + struct pci_dev *pdev; + void __iomem *mem_mapped_bar; + uint32_t if_type; + uint32_t u32val; + int rc; + + pdev = phba->pcidev; + if (!pdev) + return -EFAULT; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX]; + + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if ((bar_num != IDIAG_BARACC_BAR_0) && + (bar_num != IDIAG_BARACC_BAR_1) && + (bar_num != IDIAG_BARACC_BAR_2)) + goto error_out; + } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + if (bar_num != IDIAG_BARACC_BAR_0) + goto error_out; + } else + goto error_out; + + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (bar_num == IDIAG_BARACC_BAR_0) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF0_BAR0_SIZE; + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + } else if (bar_num == IDIAG_BARACC_BAR_1) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF0_BAR1_SIZE; + mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p; + } else if (bar_num == IDIAG_BARACC_BAR_2) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF0_BAR2_SIZE; + mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p; + } else + goto error_out; + } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + if (bar_num == IDIAG_BARACC_BAR_0) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF2_BAR0_SIZE; + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + } else + goto error_out; + } else + goto error_out; + + offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX]; + if (offset % sizeof(uint32_t)) + goto error_out; + + bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX]; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) { + /* Sanity check on PCI config read command line arguments */ + if (rc != LPFC_PCI_BAR_RD_CMD_ARG) + goto error_out; + acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX]; + if (acc_range == LPFC_PCI_BAR_BROWSE) { + if (offset > bar_size - sizeof(uint32_t)) + goto error_out; + /* Starting offset to browse */ + idiag.offset.last_rd = offset; + } else if (acc_range > SINGLE_WORD) { + if (offset + acc_range * sizeof(uint32_t) > bar_size) + goto error_out; + /* Starting offset to browse */ + idiag.offset.last_rd = offset; + } else if (acc_range != SINGLE_WORD) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) { + /* Sanity check on PCI bar write command line arguments */ + if (rc != LPFC_PCI_BAR_WR_CMD_ARG) + goto error_out; + /* Write command to PCI bar space, read-modify-write */ + acc_range = SINGLE_WORD; + value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX]; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) { + writel(value, mem_mapped_bar + offset); + readl(mem_mapped_bar + offset); + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) { + u32val = readl(mem_mapped_bar + offset); + u32val |= value; + writel(u32val, mem_mapped_bar + offset); + readl(mem_mapped_bar + offset); + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) { + u32val = readl(mem_mapped_bar + offset); + u32val &= ~value; + writel(u32val, mem_mapped_bar + offset); + readl(mem_mapped_bar + offset); + } + } else + /* All other opecodes are illegal for now */ + goto error_out; + + return nbytes; +error_out: + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +static int +__lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\t%s WQ info: ", wqtype); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, + (unsigned long long)qp->q_cnt_4); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index, qp->notify_interval); + len += scnprintf(pbuffer + len, + LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + return len; +} + +static int +lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, + int *len, int max_cnt, int cq_id) +{ + struct lpfc_queue *qp; + int qidx; + + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].io_wq; + if (qp->assoc_qid != cq_id) + continue; + *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); + if (*len >= max_cnt) + return 1; + } + return 0; +} + +static int +__lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t%s CQ info: ", cqtype); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], NTFI[%03d], PLMT[%03d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->notify_interval, qp->max_proc_limit); + + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\n"); + + return len; +} + +static int +__lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, + char *rqtype, char *pbuffer, int len) +{ + if (!qp || !datqp) + return len; + + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\t%s RQ info: ", rqtype); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " + "posted:x%x rcv:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n", + qp->queue_id, qp->entry_count, qp->entry_size, + qp->host_index, qp->hba_index, qp->notify_interval); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n", + datqp->queue_id, datqp->entry_count, + datqp->entry_size, datqp->host_index, + datqp->hba_index, datqp->notify_interval); + return len; +} + +static int +lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, + int *len, int max_cnt, int eqidx, int eq_id) +{ + struct lpfc_queue *qp; + int rc; + + qp = phba->sli4_hba.hdwq[eqidx].io_cq; + + *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len, + max_cnt, qp->queue_id); + if (rc) + return 1; + + if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { + /* NVMET CQset */ + qp = phba->sli4_hba.nvmet_cqset[eqidx]; + *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + /* RQ header */ + qp = phba->sli4_hba.nvmet_mrq_hdr[eqidx]; + *len = __lpfc_idiag_print_rqpair(qp, + phba->sli4_hba.nvmet_mrq_data[eqidx], + "NVMET MRQ", pbuffer, *len); + + if (*len >= max_cnt) + return 1; + } + + return 0; +} + +static int +__lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\n%s EQ info: EQ-STAT[max:x%x noE:x%x " + "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n", + eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, + (unsigned long long)qp->q_cnt_4, qp->q_mode); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], NTFI[%03d], PLMT[%03d], AFFIN[%03d]", + qp->queue_id, qp->entry_count, qp->entry_size, + qp->host_index, qp->notify_interval, + qp->max_proc_limit, qp->chann); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\n"); + + return len; +} + +/** + * lpfc_idiag_queinfo_read - idiag debugfs read queue information + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba SLI4 PCI function queue information, + * and copies to user @buf. + * This routine only returns 1 EQs worth of information. It remembers the last + * EQ read and jumps to the next EQ. Thus subsequent calls to queInfo will + * retrieve all EQs allocated for the phba. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *pbuffer; + int max_cnt, rc, x, len = 0; + struct lpfc_queue *qp = NULL; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 256; + + if (*ppos) + return 0; + + spin_lock_irq(&phba->hbalock); + + /* Fast-path event queue */ + if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) { + + x = phba->lpfc_idiag_last_eq; + phba->lpfc_idiag_last_eq++; + if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue) + phba->lpfc_idiag_last_eq = 0; + + len += scnprintf(pbuffer + len, + LPFC_QUE_INFO_GET_BUF_SIZE - len, + "HDWQ %d out of %d HBA HDWQs\n", + x, phba->cfg_hdw_queue); + + /* Fast-path EQ */ + qp = phba->sli4_hba.hdwq[x].hba_eq; + if (!qp) + goto out; + + len = __lpfc_idiag_print_eq(qp, "HBA", pbuffer, len); + + /* Reset max counter */ + qp->EQ_max_eqe = 0; + + if (len >= max_cnt) + goto too_big; + + /* will dump both fcp and nvme cqs/wqs for the eq */ + rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len, + max_cnt, x, qp->queue_id); + if (rc) + goto too_big; + + /* Only EQ 0 has slow path CQs configured */ + if (x) + goto out; + + /* Slow-path mailbox CQ */ + qp = phba->sli4_hba.mbx_cq; + len = __lpfc_idiag_print_cq(qp, "MBX", pbuffer, len); + if (len >= max_cnt) + goto too_big; + + /* Slow-path MBOX MQ */ + qp = phba->sli4_hba.mbx_wq; + len = __lpfc_idiag_print_wq(qp, "MBX", pbuffer, len); + if (len >= max_cnt) + goto too_big; + + /* Slow-path ELS response CQ */ + qp = phba->sli4_hba.els_cq; + len = __lpfc_idiag_print_cq(qp, "ELS", pbuffer, len); + /* Reset max counter */ + if (qp) + qp->CQ_max_cqe = 0; + if (len >= max_cnt) + goto too_big; + + /* Slow-path ELS WQ */ + qp = phba->sli4_hba.els_wq; + len = __lpfc_idiag_print_wq(qp, "ELS", pbuffer, len); + if (len >= max_cnt) + goto too_big; + + qp = phba->sli4_hba.hdr_rq; + len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq, + "ELS RQpair", pbuffer, len); + if (len >= max_cnt) + goto too_big; + + /* Slow-path NVME LS response CQ */ + qp = phba->sli4_hba.nvmels_cq; + len = __lpfc_idiag_print_cq(qp, "NVME LS", + pbuffer, len); + /* Reset max counter */ + if (qp) + qp->CQ_max_cqe = 0; + if (len >= max_cnt) + goto too_big; + + /* Slow-path NVME LS WQ */ + qp = phba->sli4_hba.nvmels_wq; + len = __lpfc_idiag_print_wq(qp, "NVME LS", + pbuffer, len); + if (len >= max_cnt) + goto too_big; + + goto out; + } + + spin_unlock_irq(&phba->hbalock); + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +too_big: + len += scnprintf(pbuffer + len, + LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n"); +out: + spin_unlock_irq(&phba->hbalock); + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_que_param_check - queue access command parameter sanity check + * @q: The pointer to queue structure. + * @index: The index into a queue entry. + * @count: The number of queue entries to access. + * + * Description: + * The routine performs sanity check on device queue access method commands. + * + * Returns: + * This function returns -EINVAL when fails the sanity check, otherwise, it + * returns 0. + **/ +static int +lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count) +{ + /* Only support single entry read or browsing */ + if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE)) + return -EINVAL; + if (index > q->entry_count - 1) + return -EINVAL; + return 0; +} + +/** + * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index + * @pbuffer: The pointer to buffer to copy the read data into. + * @len: Length of the buffer. + * @pque: The pointer to the queue to be read. + * @index: The index into the queue entry. + * + * Description: + * This routine reads out a single entry from the given queue's index location + * and copies it into the buffer provided. + * + * Returns: + * This function returns 0 when it fails, otherwise, it returns the length of + * the data read into the buffer provided. + **/ +static int +lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque, + uint32_t index) +{ + int offset, esize; + uint32_t *pentry; + + if (!pbuffer || !pque) + return 0; + + esize = pque->entry_size; + len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + "QE-INDEX[%04d]:\n", index); + + offset = 0; + pentry = lpfc_sli4_qe(pque, index); + while (esize > 0) { + len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + "%08x ", *pentry); + pentry++; + offset += sizeof(uint32_t); + esize -= sizeof(uint32_t); + if (esize > 0 && !(offset % (4 * sizeof(uint32_t)))) + len += scnprintf(pbuffer+len, + LPFC_QUE_ACC_BUF_SIZE-len, "\n"); + } + len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); + + return len; +} + +/** + * lpfc_idiag_queacc_read - idiag debugfs read port queue + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba device queue memory according to the + * idiag command, and copies to user @buf. Depending on the queue dump read + * command setup, it does either a single queue entry read or browing through + * all entries of the queue. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + uint32_t last_index, index, count; + struct lpfc_queue *pque = NULL; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { + index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX]; + count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX]; + pque = (struct lpfc_queue *)idiag.ptr_private; + } else + return 0; + + /* Browse the queue starting from index */ + if (count == LPFC_QUE_ACC_BROWSE) + goto que_browse; + + /* Read a single entry from the queue */ + len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +que_browse: + + /* Browse all entries from the queue */ + last_index = idiag.offset.last_rd; + index = last_index; + + while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) { + len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); + index++; + if (index > pque->entry_count - 1) + break; + } + + /* Set up the offset for next portion of pci cfg read */ + if (index > pque->entry_count - 1) + index = 0; + idiag.offset.last_rd = index; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for port queue read (dump) or write (set) command + * accordingly. In the case of port queue read command, it sets up the command + * in the idiag command struct for the following debugfs read operation. In + * the case of port queue write operation, it executes the write operation + * into the port queue entry accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_queacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t qidx, quetp, queid, index, count, offset, value; + uint32_t *pentry; + struct lpfc_queue *pque, *qp; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Get and sanity check on command feilds */ + quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX]; + queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX]; + index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX]; + count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX]; + offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX]; + value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX]; + + /* Sanity check on command line arguments */ + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { + if (rc != LPFC_QUE_ACC_WR_CMD_ARG) + goto error_out; + if (count != 1) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { + if (rc != LPFC_QUE_ACC_RD_CMD_ARG) + goto error_out; + } else + goto error_out; + + switch (quetp) { + case LPFC_IDIAG_EQ: + /* HBA event queue */ + if (phba->sli4_hba.hdwq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].hba_eq; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check(qp, + index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + goto error_out; + + case LPFC_IDIAG_CQ: + /* MBX complete queue */ + if (phba->sli4_hba.mbx_cq && + phba->sli4_hba.mbx_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.mbx_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.mbx_cq; + goto pass_check; + } + /* ELS complete queue */ + if (phba->sli4_hba.els_cq && + phba->sli4_hba.els_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.els_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.els_cq; + goto pass_check; + } + /* NVME LS complete queue */ + if (phba->sli4_hba.nvmels_cq && + phba->sli4_hba.nvmels_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_cq; + goto pass_check; + } + /* FCP complete queue */ + if (phba->sli4_hba.hdwq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; + qidx++) { + qp = phba->sli4_hba.hdwq[qidx].io_cq; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + goto error_out; + + case LPFC_IDIAG_MQ: + /* MBX work queue */ + if (phba->sli4_hba.mbx_wq && + phba->sli4_hba.mbx_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.mbx_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.mbx_wq; + goto pass_check; + } + goto error_out; + + case LPFC_IDIAG_WQ: + /* ELS work queue */ + if (phba->sli4_hba.els_wq && + phba->sli4_hba.els_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.els_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.els_wq; + goto pass_check; + } + /* NVME LS work queue */ + if (phba->sli4_hba.nvmels_wq && + phba->sli4_hba.nvmels_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_wq; + goto pass_check; + } + + if (phba->sli4_hba.hdwq) { + /* FCP/SCSI work queue */ + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].io_wq; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + goto error_out; + + case LPFC_IDIAG_RQ: + /* HDR queue */ + if (phba->sli4_hba.hdr_rq && + phba->sli4_hba.hdr_rq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.hdr_rq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.hdr_rq; + goto pass_check; + } + /* DAT queue */ + if (phba->sli4_hba.dat_rq && + phba->sli4_hba.dat_rq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.dat_rq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.dat_rq; + goto pass_check; + } + goto error_out; + default: + goto error_out; + } + +pass_check: + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { + if (count == LPFC_QUE_ACC_BROWSE) + idiag.offset.last_rd = index; + } + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { + /* Additional sanity checks on write operation */ + pque = (struct lpfc_queue *)idiag.ptr_private; + if (offset > pque->entry_size/sizeof(uint32_t) - 1) + goto error_out; + pentry = lpfc_sli4_qe(pque, index); + pentry += offset; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR) + *pentry = value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST) + *pentry |= value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) + *pentry &= ~value; + } + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register + * @phba: The pointer to hba structure. + * @pbuffer: The pointer to the buffer to copy the data to. + * @len: The length of bytes to copied. + * @drbregid: The id to doorbell registers. + * + * Description: + * This routine reads a doorbell register and copies its content to the + * user buffer pointed to by @pbuffer. + * + * Returns: + * This function returns the amount of data that was copied into @pbuffer. + **/ +static int +lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer, + int len, uint32_t drbregid) +{ + + if (!pbuffer) + return 0; + + switch (drbregid) { + case LPFC_DRB_EQ: + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len, + "EQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.EQDBregaddr)); + break; + case LPFC_DRB_CQ: + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, + "CQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.CQDBregaddr)); + break; + case LPFC_DRB_MQ: + len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + "MQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.MQDBregaddr)); + break; + case LPFC_DRB_WQ: + len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + "WQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.WQDBregaddr)); + break; + case LPFC_DRB_RQ: + len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + "RQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.RQDBregaddr)); + break; + default: + break; + } + + return len; +} + +/** + * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba device doorbell register according + * to the idiag command, and copies to user @buf. Depending on the doorbell + * register read command setup, it does either a single doorbell register + * read or dump all doorbell registers. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t drb_reg_id, i; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) + drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX]; + else + return 0; + + if (drb_reg_id == LPFC_DRB_ACC_ALL) + for (i = 1; i <= LPFC_DRB_MAX; i++) + len = lpfc_idiag_drbacc_read_reg(phba, + pbuffer, len, i); + else + len = lpfc_idiag_drbacc_read_reg(phba, + pbuffer, len, drb_reg_id); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for port doorbell register read (dump) or write + * (set) command accordingly. In the case of port queue read command, it sets + * up the command in the idiag command struct for the following debugfs read + * operation. In the case of port doorbell register write operation, it + * executes the write operation into the port doorbell register accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_drbacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t drb_reg_id, value, reg_val = 0; + void __iomem *drb_reg; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Sanity check on command line arguments */ + drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX]; + value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX]; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { + if (rc != LPFC_DRB_ACC_WR_CMD_ARG) + goto error_out; + if (drb_reg_id > LPFC_DRB_MAX) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) { + if (rc != LPFC_DRB_ACC_RD_CMD_ARG) + goto error_out; + if ((drb_reg_id > LPFC_DRB_MAX) && + (drb_reg_id != LPFC_DRB_ACC_ALL)) + goto error_out; + } else + goto error_out; + + /* Perform the write access operation */ + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { + switch (drb_reg_id) { + case LPFC_DRB_EQ: + drb_reg = phba->sli4_hba.EQDBregaddr; + break; + case LPFC_DRB_CQ: + drb_reg = phba->sli4_hba.CQDBregaddr; + break; + case LPFC_DRB_MQ: + drb_reg = phba->sli4_hba.MQDBregaddr; + break; + case LPFC_DRB_WQ: + drb_reg = phba->sli4_hba.WQDBregaddr; + break; + case LPFC_DRB_RQ: + drb_reg = phba->sli4_hba.RQDBregaddr; + break; + default: + goto error_out; + } + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR) + reg_val = value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) { + reg_val = readl(drb_reg); + reg_val |= value; + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { + reg_val = readl(drb_reg); + reg_val &= ~value; + } + writel(reg_val, drb_reg); + readl(drb_reg); /* flush */ + } + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers + * @phba: The pointer to hba structure. + * @pbuffer: The pointer to the buffer to copy the data to. + * @len: The length of bytes to copied. + * @ctlregid: The id to doorbell registers. + * + * Description: + * This routine reads a control register and copies its content to the + * user buffer pointed to by @pbuffer. + * + * Returns: + * This function returns the amount of data that was copied into @pbuffer. + **/ +static int +lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer, + int len, uint32_t ctlregid) +{ + + if (!pbuffer) + return 0; + + switch (ctlregid) { + case LPFC_CTL_PORT_SEM: + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port SemReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_SEM_OFFSET)); + break; + case LPFC_CTL_PORT_STA: + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port StaReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_STA_OFFSET)); + break; + case LPFC_CTL_PORT_CTL: + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port CtlReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_CTL_OFFSET)); + break; + case LPFC_CTL_PORT_ER1: + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port Er1Reg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER1_OFFSET)); + break; + case LPFC_CTL_PORT_ER2: + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port Er2Reg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER2_OFFSET)); + break; + case LPFC_CTL_PDEV_CTL: + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "PDev CtlReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PDEV_CTL_OFFSET)); + break; + default: + break; + } + return len; +} + +/** + * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba port and device registers according + * to the idiag command, and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t ctl_reg_id, i; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) + ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX]; + else + return 0; + + if (ctl_reg_id == LPFC_CTL_ACC_ALL) + for (i = 1; i <= LPFC_CTL_MAX; i++) + len = lpfc_idiag_ctlacc_read_reg(phba, + pbuffer, len, i); + else + len = lpfc_idiag_ctlacc_read_reg(phba, + pbuffer, len, ctl_reg_id); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for port and device control register read (dump) + * or write (set) command accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t ctl_reg_id, value, reg_val = 0; + void __iomem *ctl_reg; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Sanity check on command line arguments */ + ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX]; + value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX]; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { + if (rc != LPFC_CTL_ACC_WR_CMD_ARG) + goto error_out; + if (ctl_reg_id > LPFC_CTL_MAX) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) { + if (rc != LPFC_CTL_ACC_RD_CMD_ARG) + goto error_out; + if ((ctl_reg_id > LPFC_CTL_MAX) && + (ctl_reg_id != LPFC_CTL_ACC_ALL)) + goto error_out; + } else + goto error_out; + + /* Perform the write access operation */ + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { + switch (ctl_reg_id) { + case LPFC_CTL_PORT_SEM: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_SEM_OFFSET; + break; + case LPFC_CTL_PORT_STA: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_STA_OFFSET; + break; + case LPFC_CTL_PORT_CTL: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_CTL_OFFSET; + break; + case LPFC_CTL_PORT_ER1: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER1_OFFSET; + break; + case LPFC_CTL_PORT_ER2: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER2_OFFSET; + break; + case LPFC_CTL_PDEV_CTL: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PDEV_CTL_OFFSET; + break; + default: + goto error_out; + } + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR) + reg_val = value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) { + reg_val = readl(ctl_reg); + reg_val |= value; + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { + reg_val = readl(ctl_reg); + reg_val &= ~value; + } + writel(reg_val, ctl_reg); + readl(ctl_reg); /* flush */ + } + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup + * @phba: Pointer to HBA context object. + * @pbuffer: Pointer to data buffer. + * + * Description: + * This routine gets the driver mailbox access debugfs setup information. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static int +lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer) +{ + uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd; + int len = 0; + + mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_dump_map: 0x%08x\n", mbx_dump_map); + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_dump_cnt: %04d\n", mbx_dump_cnt); + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_word_cnt: %04d\n", mbx_word_cnt); + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd); + + return len; +} + +/** + * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba driver mailbox access debugfs setup + * information. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) && + (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)) + return 0; + + len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for driver mailbox command (dump) and sets up the + * necessary states in the idiag command struct accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Sanity check on command line arguments */ + mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) { + if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL)) + goto error_out; + if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) && + (mbx_dump_map != LPFC_MBX_DMP_ALL)) + goto error_out; + if (mbx_word_cnt > sizeof(MAILBOX_t)) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) { + if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL)) + goto error_out; + if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) && + (mbx_dump_map != LPFC_MBX_DMP_ALL)) + goto error_out; + if (mbx_word_cnt > (BSG_MBOX_SIZE)/4) + goto error_out; + if (mbx_mbox_cmd != 0x9b) + goto error_out; + } else + goto error_out; + + if (mbx_word_cnt == 0) + goto error_out; + if (rc != LPFC_MBX_DMP_ARG) + goto error_out; + if (mbx_mbox_cmd & ~0xff) + goto error_out; + + /* condition for stop mailbox dump */ + if (mbx_dump_cnt == 0) + goto reset_out; + + return nbytes; + +reset_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_extacc_avail_get - get the available extents information + * @phba: pointer to lpfc hba data structure. + * @pbuffer: pointer to internal buffer. + * @len: length into the internal buffer data has been copied. + * + * Description: + * This routine is to get the available extent information. + * + * Returns: + * overall length of the data read into the internal buffer. + **/ +static int +lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len) +{ + uint16_t ext_cnt = 0, ext_size = 0; + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\nAvailable Extents Information:\n"); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available VPI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI, + &ext_cnt, &ext_size); + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available VFI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI, + &ext_cnt, &ext_size); + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available RPI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI, + &ext_cnt, &ext_size); + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available XRI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI, + &ext_cnt, &ext_size); + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + return len; +} + +/** + * lpfc_idiag_extacc_alloc_get - get the allocated extents information + * @phba: pointer to lpfc hba data structure. + * @pbuffer: pointer to internal buffer. + * @len: length into the internal buffer data has been copied. + * + * Description: + * This routine is to get the allocated extent information. + * + * Returns: + * overall length of the data read into the internal buffer. + **/ +static int +lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len) +{ + uint16_t ext_cnt, ext_size; + int rc; + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\nAllocated Extents Information:\n"); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated VPI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI, + &ext_cnt, &ext_size); + if (!rc) + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated VFI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI, + &ext_cnt, &ext_size); + if (!rc) + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated RPI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI, + &ext_cnt, &ext_size); + if (!rc) + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated XRI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI, + &ext_cnt, &ext_size); + if (!rc) + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + return len; +} + +/** + * lpfc_idiag_extacc_drivr_get - get driver extent information + * @phba: pointer to lpfc hba data structure. + * @pbuffer: pointer to internal buffer. + * @len: length into the internal buffer data has been copied. + * + * Description: + * This routine is to get the driver extent information. + * + * Returns: + * overall length of the data read into the internal buffer. + **/ +static int +lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len) +{ + struct lpfc_rsrc_blks *rsrc_blks; + int index; + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\nDriver Extents Information:\n"); + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tVPI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) { + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tVFI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list, + list) { + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tRPI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list, + list) { + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tXRI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list, + list) { + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + + return len; +} + +/** + * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for extent information access commands and sets + * up the necessary states in the idiag command struct accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_extacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + uint32_t ext_map; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX]; + + if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD) + goto error_out; + if (rc != LPFC_EXT_ACC_CMD_ARG) + goto error_out; + if (!(ext_map & LPFC_EXT_ACC_ALL)) + goto error_out; + + return nbytes; +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_extacc_read - idiag debugfs read access to extent information + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the proper extent information according to + * the idiag command, and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *pbuffer; + uint32_t ext_map; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + if (*ppos) + return 0; + if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD) + return 0; + + ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX]; + if (ext_map & LPFC_EXT_ACC_AVAIL) + len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len); + if (ext_map & LPFC_EXT_ACC_ALLOC) + len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len); + if (ext_map & LPFC_EXT_ACC_DRIVR) + len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +static int +lpfc_cgn_buffer_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *buffer = debug->buffer; + uint32_t *ptr; + int cnt, len = 0; + + if (!phba->sli4_hba.pc_sli4_params.mi_ver || !phba->cgn_i) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Mgmt is not supported\n"); + goto out; + } + ptr = (uint32_t *)phba->cgn_i->virt; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Buffer Header\n"); + /* Dump the first 32 bytes */ + cnt = 32; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "000: %08x %08x %08x %08x %08x %08x %08x %08x\n", + *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), + *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); + ptr += 8; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Buffer Data\n"); + while (cnt < sizeof(struct lpfc_cgn_info)) { + if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Truncated . . .\n"); + goto out; + } + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "%03x: %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", + cnt, *ptr, *(ptr + 1), *(ptr + 2), + *(ptr + 3), *(ptr + 4), *(ptr + 5), + *(ptr + 6), *(ptr + 7)); + cnt += 32; + ptr += 8; + } + if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Truncated . . .\n"); + goto out; + } + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Parameter Data\n"); + ptr = (uint32_t *)&phba->cgn_p; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "%08x %08x %08x %08x\n", + *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3)); +out: + return simple_read_from_buffer(buf, nbytes, ppos, buffer, len); +} + +static int +lpfc_cgn_buffer_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + +static int +lpfc_rx_monitor_open(struct inode *inode, struct file *file) +{ + struct lpfc_rx_monitor_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_rx_monitor_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *buffer = debug->buffer; + + if (!phba->rx_monitor) { + scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE, + "Rx Monitor Info is empty.\n"); + } else { + lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer, + MAX_DEBUGFS_RX_INFO_SIZE, + LPFC_MAX_RXMONITOR_ENTRY); + } + + return simple_read_from_buffer(buf, nbytes, ppos, buffer, + strlen(buffer)); +} + +static int +lpfc_rx_monitor_release(struct inode *inode, struct file *file) +{ + struct lpfc_rx_monitor_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + +#undef lpfc_debugfs_op_disc_trc +static const struct file_operations lpfc_debugfs_op_disc_trc = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_disc_trc_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nodelist +static const struct file_operations lpfc_debugfs_op_nodelist = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nodelist_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_multixripools +static const struct file_operations lpfc_debugfs_op_multixripools = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_multixripools_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_multixripools_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_hbqinfo +static const struct file_operations lpfc_debugfs_op_hbqinfo = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_hbqinfo_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +#ifdef LPFC_HDWQ_LOCK_STAT +#undef lpfc_debugfs_op_lockstat +static const struct file_operations lpfc_debugfs_op_lockstat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_lockstat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_lockstat_write, + .release = lpfc_debugfs_release, +}; +#endif + +#undef lpfc_debugfs_ras_log +static const struct file_operations lpfc_debugfs_ras_log = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_ras_log_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_ras_log_release, +}; + +#undef lpfc_debugfs_op_dumpHBASlim +static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_dumpHBASlim_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_dumpHostSlim +static const struct file_operations lpfc_debugfs_op_dumpHostSlim = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_dumpHostSlim_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nvmestat +static const struct file_operations lpfc_debugfs_op_nvmestat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmestat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmestat_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_scsistat +static const struct file_operations lpfc_debugfs_op_scsistat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_scsistat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_scsistat_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_ioktime +static const struct file_operations lpfc_debugfs_op_ioktime = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_ioktime_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_ioktime_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nvmeio_trc +static const struct file_operations lpfc_debugfs_op_nvmeio_trc = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmeio_trc_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmeio_trc_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_hdwqstat +static const struct file_operations lpfc_debugfs_op_hdwqstat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_hdwqstat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_hdwqstat_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_dif_err +static const struct file_operations lpfc_debugfs_op_dif_err = { + .owner = THIS_MODULE, + .open = simple_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_dif_err_read, + .write = lpfc_debugfs_dif_err_write, + .release = lpfc_debugfs_dif_err_release, +}; + +#undef lpfc_debugfs_op_slow_ring_trc +static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_slow_ring_trc_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +static struct dentry *lpfc_debugfs_root = NULL; +static atomic_t lpfc_debugfs_hba_count; + +/* + * File operations for the iDiag debugfs + */ +#undef lpfc_idiag_op_pciCfg +static const struct file_operations lpfc_idiag_op_pciCfg = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_pcicfg_read, + .write = lpfc_idiag_pcicfg_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_barAcc +static const struct file_operations lpfc_idiag_op_barAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_baracc_read, + .write = lpfc_idiag_baracc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_queInfo +static const struct file_operations lpfc_idiag_op_queInfo = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .read = lpfc_idiag_queinfo_read, + .release = lpfc_idiag_release, +}; + +#undef lpfc_idiag_op_queAcc +static const struct file_operations lpfc_idiag_op_queAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_queacc_read, + .write = lpfc_idiag_queacc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_drbAcc +static const struct file_operations lpfc_idiag_op_drbAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_drbacc_read, + .write = lpfc_idiag_drbacc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_ctlAcc +static const struct file_operations lpfc_idiag_op_ctlAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_ctlacc_read, + .write = lpfc_idiag_ctlacc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_mbxAcc +static const struct file_operations lpfc_idiag_op_mbxAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_mbxacc_read, + .write = lpfc_idiag_mbxacc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_extAcc +static const struct file_operations lpfc_idiag_op_extAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_extacc_read, + .write = lpfc_idiag_extacc_write, + .release = lpfc_idiag_cmd_release, +}; +#undef lpfc_cgn_buffer_op +static const struct file_operations lpfc_cgn_buffer_op = { + .owner = THIS_MODULE, + .open = lpfc_cgn_buffer_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_cgn_buffer_read, + .release = lpfc_cgn_buffer_release, +}; + +#undef lpfc_rx_monitor_op +static const struct file_operations lpfc_rx_monitor_op = { + .owner = THIS_MODULE, + .open = lpfc_rx_monitor_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_rx_monitor_read, + .release = lpfc_rx_monitor_release, +}; +#endif + +/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command + * @phba: Pointer to HBA context object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * Description: + * This routine dump a bsg pass-through non-embedded mailbox command with + * external buffer. + **/ +void +lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, + enum mbox_type mbox_tp, enum dma_type dma_tp, + enum sta_type sta_tp, + struct lpfc_dmabuf *dmabuf, uint32_t ext_buf) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt; + char line_buf[LPFC_MBX_ACC_LBUF_SZ]; + int len = 0; + uint32_t do_dump = 0; + uint32_t *pword; + uint32_t i; + + if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP) + return; + + mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) || + (*mbx_dump_cnt == 0) || + (*mbx_word_cnt == 0)) + return; + + if (*mbx_mbox_cmd != 0x9B) + return; + + if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) { + do_dump |= LPFC_BSG_DMP_MBX_RD_MBX; + pr_err("\nRead mbox command (x%x), " + "nemb:0x%x, extbuf_cnt:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) { + do_dump |= LPFC_BSG_DMP_MBX_RD_BUF; + pr_err("\nRead mbox buffer (x%x), " + "nemb:0x%x, extbuf_seq:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) { + do_dump |= LPFC_BSG_DMP_MBX_WR_MBX; + pr_err("\nWrite mbox command (x%x), " + "nemb:0x%x, extbuf_cnt:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) { + do_dump |= LPFC_BSG_DMP_MBX_WR_BUF; + pr_err("\nWrite mbox buffer (x%x), " + "nemb:0x%x, extbuf_seq:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + + /* dump buffer content */ + if (do_dump) { + pword = (uint32_t *)dmabuf->virt; + for (i = 0; i < *mbx_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + pr_err("%s\n", line_buf); + len = 0; + len += scnprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%03d: ", i); + } + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + "%08x ", (uint32_t)*pword); + pword++; + } + if ((i - 1) % 8) + pr_err("%s\n", line_buf); + (*mbx_dump_cnt)--; + } + + /* Clean out command structure on reaching dump count */ + if (*mbx_dump_cnt == 0) + memset(&idiag, 0, sizeof(idiag)); + return; +#endif +} + +/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command + * @phba: Pointer to HBA context object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * Description: + * This routine dump a pass-through non-embedded mailbox command from issue + * mailbox command. + **/ +void +lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd; + char line_buf[LPFC_MBX_ACC_LBUF_SZ]; + int len = 0; + uint32_t *pword; + uint8_t *pbyte; + uint32_t i, j; + + if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) + return; + + mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) || + (*mbx_dump_cnt == 0) || + (*mbx_word_cnt == 0)) + return; + + if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) && + (*mbx_mbox_cmd != pmbox->mbxCommand)) + return; + + /* dump buffer content */ + if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) { + pr_err("Mailbox command:0x%x dump by word:\n", + pmbox->mbxCommand); + pword = (uint32_t *)pmbox; + for (i = 0; i < *mbx_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + pr_err("%s\n", line_buf); + len = 0; + memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); + len += scnprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%03d: ", i); + } + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + "%08x ", + ((uint32_t)*pword) & 0xffffffff); + pword++; + } + if ((i - 1) % 8) + pr_err("%s\n", line_buf); + pr_err("\n"); + } + if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) { + pr_err("Mailbox command:0x%x dump by byte:\n", + pmbox->mbxCommand); + pbyte = (uint8_t *)pmbox; + for (i = 0; i < *mbx_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + pr_err("%s\n", line_buf); + len = 0; + memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); + len += scnprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%03d: ", i); + } + for (j = 0; j < 4; j++) { + len += scnprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%02x", + ((uint8_t)*pbyte) & 0xff); + pbyte++; + } + len += scnprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, " "); + } + if ((i - 1) % 8) + pr_err("%s\n", line_buf); + pr_err("\n"); + } + (*mbx_dump_cnt)--; + + /* Clean out command structure on reaching dump count */ + if (*mbx_dump_cnt == 0) + memset(&idiag, 0, sizeof(idiag)); + return; +#endif +} + +/** + * lpfc_debugfs_initialize - Initialize debugfs for a vport + * @vport: The vport pointer to initialize. + * + * Description: + * When Debugfs is configured this routine sets up the lpfc debugfs file system. + * If not already created, this routine will create the lpfc directory, and + * lpfcX directory (for this HBA), and vportX directory for this vport. It will + * also create each file used to access lpfc specific debugfs information. + **/ +inline void +lpfc_debugfs_initialize(struct lpfc_vport *vport) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_hba *phba = vport->phba; + char name[64]; + uint32_t num, i; + bool pport_setup = false; + + if (!lpfc_debugfs_enable) + return; + + /* Setup lpfc root directory */ + if (!lpfc_debugfs_root) { + lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL); + atomic_set(&lpfc_debugfs_hba_count, 0); + } + if (!lpfc_debugfs_start_time) + lpfc_debugfs_start_time = jiffies; + + /* Setup funcX directory for specific HBA PCI function */ + snprintf(name, sizeof(name), "fn%d", phba->brd_no); + if (!phba->hba_debugfs_root) { + pport_setup = true; + phba->hba_debugfs_root = + debugfs_create_dir(name, lpfc_debugfs_root); + atomic_inc(&lpfc_debugfs_hba_count); + atomic_set(&phba->debugfs_vport_count, 0); + + /* Multi-XRI pools */ + snprintf(name, sizeof(name), "multixripools"); + phba->debug_multixri_pools = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, + &lpfc_debugfs_op_multixripools); + if (IS_ERR(phba->debug_multixri_pools)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0527 Cannot create debugfs multixripools\n"); + goto debug_failed; + } + + /* Congestion Info Buffer */ + scnprintf(name, sizeof(name), "cgn_buffer"); + phba->debug_cgn_buffer = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_cgn_buffer_op); + if (IS_ERR(phba->debug_cgn_buffer)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6527 Cannot create debugfs " + "cgn_buffer\n"); + goto debug_failed; + } + + /* RX Monitor */ + scnprintf(name, sizeof(name), "rx_monitor"); + phba->debug_rx_monitor = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_rx_monitor_op); + if (IS_ERR(phba->debug_rx_monitor)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6528 Cannot create debugfs " + "rx_monitor\n"); + goto debug_failed; + } + + /* RAS log */ + snprintf(name, sizeof(name), "ras_log"); + phba->debug_ras_log = + debugfs_create_file(name, 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_ras_log); + if (IS_ERR(phba->debug_ras_log)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6148 Cannot create debugfs" + " ras_log\n"); + goto debug_failed; + } + + /* Setup hbqinfo */ + snprintf(name, sizeof(name), "hbqinfo"); + phba->debug_hbqinfo = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_hbqinfo); + +#ifdef LPFC_HDWQ_LOCK_STAT + /* Setup lockstat */ + snprintf(name, sizeof(name), "lockstat"); + phba->debug_lockstat = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_lockstat); + if (IS_ERR(phba->debug_lockstat)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "4610 Can't create debugfs lockstat\n"); + goto debug_failed; + } +#endif + + /* Setup dumpHBASlim */ + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHBASlim"); + phba->debug_dumpHBASlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHBASlim); + } else + phba->debug_dumpHBASlim = NULL; + + /* Setup dumpHostSlim */ + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHostSlim"); + phba->debug_dumpHostSlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHostSlim); + } else + phba->debug_dumpHostSlim = NULL; + + /* Setup DIF Error Injections */ + snprintf(name, sizeof(name), "InjErrLBA"); + phba->debug_InjErrLBA = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + + snprintf(name, sizeof(name), "InjErrNPortID"); + phba->debug_InjErrNPortID = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + snprintf(name, sizeof(name), "InjErrWWPN"); + phba->debug_InjErrWWPN = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + snprintf(name, sizeof(name), "writeGuardInjErr"); + phba->debug_writeGuard = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + snprintf(name, sizeof(name), "writeAppInjErr"); + phba->debug_writeApp = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + snprintf(name, sizeof(name), "writeRefInjErr"); + phba->debug_writeRef = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + snprintf(name, sizeof(name), "readGuardInjErr"); + phba->debug_readGuard = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + snprintf(name, sizeof(name), "readAppInjErr"); + phba->debug_readApp = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + snprintf(name, sizeof(name), "readRefInjErr"); + phba->debug_readRef = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + + /* Setup slow ring trace */ + if (lpfc_debugfs_max_slow_ring_trc) { + num = lpfc_debugfs_max_slow_ring_trc - 1; + if (num & lpfc_debugfs_max_slow_ring_trc) { + /* Change to be a power of 2 */ + num = lpfc_debugfs_max_slow_ring_trc; + i = 0; + while (num > 1) { + num = num >> 1; + i++; + } + lpfc_debugfs_max_slow_ring_trc = (1 << i); + pr_err("lpfc_debugfs_max_disc_trc changed to " + "%d\n", lpfc_debugfs_max_disc_trc); + } + } + + snprintf(name, sizeof(name), "slow_ring_trace"); + phba->debug_slow_ring_trc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_slow_ring_trc); + if (!phba->slow_ring_trc) { + phba->slow_ring_trc = kcalloc( + lpfc_debugfs_max_slow_ring_trc, + sizeof(struct lpfc_debugfs_trc), + GFP_KERNEL); + if (!phba->slow_ring_trc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0416 Cannot create debugfs " + "slow_ring buffer\n"); + goto debug_failed; + } + atomic_set(&phba->slow_ring_trc_cnt, 0); + } + + snprintf(name, sizeof(name), "nvmeio_trc"); + phba->debug_nvmeio_trc = + debugfs_create_file(name, 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_nvmeio_trc); + + atomic_set(&phba->nvmeio_trc_cnt, 0); + if (lpfc_debugfs_max_nvmeio_trc) { + num = lpfc_debugfs_max_nvmeio_trc - 1; + if (num & lpfc_debugfs_max_disc_trc) { + /* Change to be a power of 2 */ + num = lpfc_debugfs_max_nvmeio_trc; + i = 0; + while (num > 1) { + num = num >> 1; + i++; + } + lpfc_debugfs_max_nvmeio_trc = (1 << i); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0575 lpfc_debugfs_max_nvmeio_trc " + "changed to %d\n", + lpfc_debugfs_max_nvmeio_trc); + } + phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc; + + /* Allocate trace buffer and initialize */ + phba->nvmeio_trc = kzalloc( + (sizeof(struct lpfc_debugfs_nvmeio_trc) * + phba->nvmeio_trc_size), GFP_KERNEL); + + if (!phba->nvmeio_trc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0576 Cannot create debugfs " + "nvmeio_trc buffer\n"); + goto nvmeio_off; + } + phba->nvmeio_trc_on = 1; + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc = NULL; + } else { +nvmeio_off: + phba->nvmeio_trc_size = 0; + phba->nvmeio_trc_on = 0; + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc = NULL; + } + } + + snprintf(name, sizeof(name), "vport%d", vport->vpi); + if (!vport->vport_debugfs_root) { + vport->vport_debugfs_root = + debugfs_create_dir(name, phba->hba_debugfs_root); + atomic_inc(&phba->debugfs_vport_count); + } + + if (lpfc_debugfs_max_disc_trc) { + num = lpfc_debugfs_max_disc_trc - 1; + if (num & lpfc_debugfs_max_disc_trc) { + /* Change to be a power of 2 */ + num = lpfc_debugfs_max_disc_trc; + i = 0; + while (num > 1) { + num = num >> 1; + i++; + } + lpfc_debugfs_max_disc_trc = (1 << i); + pr_err("lpfc_debugfs_max_disc_trc changed to %d\n", + lpfc_debugfs_max_disc_trc); + } + } + + vport->disc_trc = kzalloc( + (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc), + GFP_KERNEL); + + if (!vport->disc_trc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0418 Cannot create debugfs disc trace " + "buffer\n"); + goto debug_failed; + } + atomic_set(&vport->disc_trc_cnt, 0); + + snprintf(name, sizeof(name), "discovery_trace"); + vport->debug_disc_trc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_disc_trc); + snprintf(name, sizeof(name), "nodelist"); + vport->debug_nodelist = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_nodelist); + + snprintf(name, sizeof(name), "nvmestat"); + vport->debug_nvmestat = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_nvmestat); + + snprintf(name, sizeof(name), "scsistat"); + vport->debug_scsistat = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_scsistat); + if (IS_ERR(vport->debug_scsistat)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "4611 Cannot create debugfs scsistat\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "ioktime"); + vport->debug_ioktime = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_ioktime); + if (IS_ERR(vport->debug_ioktime)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0815 Cannot create debugfs ioktime\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "hdwqstat"); + vport->debug_hdwqstat = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_hdwqstat); + + /* + * The following section is for additional directories/files for the + * physical port. + */ + + if (!pport_setup) + goto debug_failed; + + /* + * iDiag debugfs root entry points for SLI4 device only + */ + if (phba->sli_rev < LPFC_SLI_REV4) + goto debug_failed; + + snprintf(name, sizeof(name), "iDiag"); + if (!phba->idiag_root) { + phba->idiag_root = + debugfs_create_dir(name, phba->hba_debugfs_root); + /* Initialize iDiag data structure */ + memset(&idiag, 0, sizeof(idiag)); + } + + /* iDiag read PCI config space */ + snprintf(name, sizeof(name), "pciCfg"); + if (!phba->idiag_pci_cfg) { + phba->idiag_pci_cfg = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); + idiag.offset.last_rd = 0; + } + + /* iDiag PCI BAR access */ + snprintf(name, sizeof(name), "barAcc"); + if (!phba->idiag_bar_acc) { + phba->idiag_bar_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_barAcc); + idiag.offset.last_rd = 0; + } + + /* iDiag get PCI function queue information */ + snprintf(name, sizeof(name), "queInfo"); + if (!phba->idiag_que_info) { + phba->idiag_que_info = + debugfs_create_file(name, S_IFREG|S_IRUGO, + phba->idiag_root, phba, &lpfc_idiag_op_queInfo); + } + + /* iDiag access PCI function queue */ + snprintf(name, sizeof(name), "queAcc"); + if (!phba->idiag_que_acc) { + phba->idiag_que_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_queAcc); + } + + /* iDiag access PCI function doorbell registers */ + snprintf(name, sizeof(name), "drbAcc"); + if (!phba->idiag_drb_acc) { + phba->idiag_drb_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_drbAcc); + } + + /* iDiag access PCI function control registers */ + snprintf(name, sizeof(name), "ctlAcc"); + if (!phba->idiag_ctl_acc) { + phba->idiag_ctl_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc); + } + + /* iDiag access mbox commands */ + snprintf(name, sizeof(name), "mbxAcc"); + if (!phba->idiag_mbx_acc) { + phba->idiag_mbx_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc); + } + + /* iDiag extents access commands */ + if (phba->sli4_hba.extents_in_use) { + snprintf(name, sizeof(name), "extAcc"); + if (!phba->idiag_ext_acc) { + phba->idiag_ext_acc = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, + &lpfc_idiag_op_extAcc); + } + } + +debug_failed: + return; +#endif +} + +/** + * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport + * @vport: The vport pointer to remove from debugfs. + * + * Description: + * When Debugfs is configured this routine removes debugfs file system elements + * that are specific to this vport. It also checks to see if there are any + * users left for the debugfs directories associated with the HBA and driver. If + * this is the last user of the HBA directory or driver directory then it will + * remove those from the debugfs infrastructure as well. + **/ +inline void +lpfc_debugfs_terminate(struct lpfc_vport *vport) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_hba *phba = vport->phba; + + kfree(vport->disc_trc); + vport->disc_trc = NULL; + + debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ + vport->debug_disc_trc = NULL; + + debugfs_remove(vport->debug_nodelist); /* nodelist */ + vport->debug_nodelist = NULL; + + debugfs_remove(vport->debug_nvmestat); /* nvmestat */ + vport->debug_nvmestat = NULL; + + debugfs_remove(vport->debug_scsistat); /* scsistat */ + vport->debug_scsistat = NULL; + + debugfs_remove(vport->debug_ioktime); /* ioktime */ + vport->debug_ioktime = NULL; + + debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */ + vport->debug_hdwqstat = NULL; + + if (vport->vport_debugfs_root) { + debugfs_remove(vport->vport_debugfs_root); /* vportX */ + vport->vport_debugfs_root = NULL; + atomic_dec(&phba->debugfs_vport_count); + } + + if (atomic_read(&phba->debugfs_vport_count) == 0) { + + debugfs_remove(phba->debug_multixri_pools); /* multixripools*/ + phba->debug_multixri_pools = NULL; + + debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ + phba->debug_hbqinfo = NULL; + + debugfs_remove(phba->debug_cgn_buffer); + phba->debug_cgn_buffer = NULL; + + debugfs_remove(phba->debug_rx_monitor); + phba->debug_rx_monitor = NULL; + + debugfs_remove(phba->debug_ras_log); + phba->debug_ras_log = NULL; + +#ifdef LPFC_HDWQ_LOCK_STAT + debugfs_remove(phba->debug_lockstat); /* lockstat */ + phba->debug_lockstat = NULL; +#endif + debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ + phba->debug_dumpHBASlim = NULL; + + debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ + phba->debug_dumpHostSlim = NULL; + + debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ + phba->debug_InjErrLBA = NULL; + + debugfs_remove(phba->debug_InjErrNPortID); + phba->debug_InjErrNPortID = NULL; + + debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ + phba->debug_InjErrWWPN = NULL; + + debugfs_remove(phba->debug_writeGuard); /* writeGuard */ + phba->debug_writeGuard = NULL; + + debugfs_remove(phba->debug_writeApp); /* writeApp */ + phba->debug_writeApp = NULL; + + debugfs_remove(phba->debug_writeRef); /* writeRef */ + phba->debug_writeRef = NULL; + + debugfs_remove(phba->debug_readGuard); /* readGuard */ + phba->debug_readGuard = NULL; + + debugfs_remove(phba->debug_readApp); /* readApp */ + phba->debug_readApp = NULL; + + debugfs_remove(phba->debug_readRef); /* readRef */ + phba->debug_readRef = NULL; + + kfree(phba->slow_ring_trc); + phba->slow_ring_trc = NULL; + + /* slow_ring_trace */ + debugfs_remove(phba->debug_slow_ring_trc); + phba->debug_slow_ring_trc = NULL; + + debugfs_remove(phba->debug_nvmeio_trc); + phba->debug_nvmeio_trc = NULL; + + kfree(phba->nvmeio_trc); + phba->nvmeio_trc = NULL; + + /* + * iDiag release + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + /* iDiag extAcc */ + debugfs_remove(phba->idiag_ext_acc); + phba->idiag_ext_acc = NULL; + + /* iDiag mbxAcc */ + debugfs_remove(phba->idiag_mbx_acc); + phba->idiag_mbx_acc = NULL; + + /* iDiag ctlAcc */ + debugfs_remove(phba->idiag_ctl_acc); + phba->idiag_ctl_acc = NULL; + + /* iDiag drbAcc */ + debugfs_remove(phba->idiag_drb_acc); + phba->idiag_drb_acc = NULL; + + /* iDiag queAcc */ + debugfs_remove(phba->idiag_que_acc); + phba->idiag_que_acc = NULL; + + /* iDiag queInfo */ + debugfs_remove(phba->idiag_que_info); + phba->idiag_que_info = NULL; + + /* iDiag barAcc */ + debugfs_remove(phba->idiag_bar_acc); + phba->idiag_bar_acc = NULL; + + /* iDiag pciCfg */ + debugfs_remove(phba->idiag_pci_cfg); + phba->idiag_pci_cfg = NULL; + + /* Finally remove the iDiag debugfs root */ + debugfs_remove(phba->idiag_root); + phba->idiag_root = NULL; + } + + if (phba->hba_debugfs_root) { + debugfs_remove(phba->hba_debugfs_root); /* fnX */ + phba->hba_debugfs_root = NULL; + atomic_dec(&lpfc_debugfs_hba_count); + } + + if (atomic_read(&lpfc_debugfs_hba_count) == 0) { + debugfs_remove(lpfc_debugfs_root); /* lpfc */ + lpfc_debugfs_root = NULL; + } + } +#endif + return; +} + +/* + * Driver debug utility routines outside of debugfs. The debug utility + * routines implemented here is intended to be used in the instrumented + * debug driver for debugging host or port issues. + */ + +/** + * lpfc_debug_dump_all_queues - dump all the queues with a hba + * @phba: Pointer to HBA context object. + * + * This function dumps entries of all the queues asociated with the @phba. + **/ +void +lpfc_debug_dump_all_queues(struct lpfc_hba *phba) +{ + int idx; + + /* + * Dump Work Queues (WQs) + */ + lpfc_debug_dump_wq(phba, DUMP_MBX, 0); + lpfc_debug_dump_wq(phba, DUMP_ELS, 0); + lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); + + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) + lpfc_debug_dump_wq(phba, DUMP_IO, idx); + + lpfc_debug_dump_hdr_rq(phba); + lpfc_debug_dump_dat_rq(phba); + /* + * Dump Complete Queues (CQs) + */ + lpfc_debug_dump_cq(phba, DUMP_MBX, 0); + lpfc_debug_dump_cq(phba, DUMP_ELS, 0); + lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); + + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) + lpfc_debug_dump_cq(phba, DUMP_IO, idx); + + /* + * Dump Event Queues (EQs) + */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) + lpfc_debug_dump_hba_eq(phba, idx); +} diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h new file mode 100644 index 000000000..8d2e8d05b --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -0,0 +1,699 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2007-2011 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#ifndef _H_LPFC_DEBUG_FS +#define _H_LPFC_DEBUG_FS + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + +/* size of output line, for discovery_trace and slow_ring_trace */ +#define LPFC_DEBUG_TRC_ENTRY_SIZE 100 + +/* nodelist output buffer size */ +#define LPFC_NODELIST_SIZE 8192 +#define LPFC_NODELIST_ENTRY_SIZE 120 + +/* dumpHBASlim output buffer size */ +#define LPFC_DUMPHBASLIM_SIZE 4096 + +/* dumpHostSlim output buffer size */ +#define LPFC_DUMPHOSTSLIM_SIZE 4096 + +/* dumpSLIqinfo output buffer size */ +#define LPFC_DUMPSLIQINFO_SIZE 4096 + +/* hbqinfo output buffer size */ +#define LPFC_HBQINFO_SIZE 8192 + +/* nvmestat output buffer size */ +#define LPFC_NVMESTAT_SIZE 8192 +#define LPFC_IOKTIME_SIZE 8192 +#define LPFC_NVMEIO_TRC_SIZE 8192 + +/* scsistat output buffer size */ +#define LPFC_SCSISTAT_SIZE 8192 + +/* Congestion Info Buffer size */ +#define LPFC_CGN_BUF_SIZE 8192 + +#define LPFC_DEBUG_OUT_LINE_SZ 80 + +/* + * For SLI4 iDiag debugfs diagnostics tool + */ + +/* pciConf */ +#define LPFC_PCI_CFG_BROWSE 0xffff +#define LPFC_PCI_CFG_RD_CMD_ARG 2 +#define LPFC_PCI_CFG_WR_CMD_ARG 3 +#define LPFC_PCI_CFG_SIZE 4096 +#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) + +#define IDIAG_PCICFG_WHERE_INDX 0 +#define IDIAG_PCICFG_COUNT_INDX 1 +#define IDIAG_PCICFG_VALUE_INDX 2 + +/* barAcc */ +#define LPFC_PCI_BAR_BROWSE 0xffff +#define LPFC_PCI_BAR_RD_CMD_ARG 3 +#define LPFC_PCI_BAR_WR_CMD_ARG 3 + +#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16) +#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128) +#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128) +#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32) + +#define LPFC_PCI_BAR_RD_BUF_SIZE 4096 +#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4) + +#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4) +#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4) +#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4) +#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4) + +#define IDIAG_BARACC_BAR_NUM_INDX 0 +#define IDIAG_BARACC_OFF_SET_INDX 1 +#define IDIAG_BARACC_ACC_MOD_INDX 2 +#define IDIAG_BARACC_REG_VAL_INDX 2 +#define IDIAG_BARACC_BAR_SZE_INDX 3 + +#define IDIAG_BARACC_BAR_0 0 +#define IDIAG_BARACC_BAR_1 1 +#define IDIAG_BARACC_BAR_2 2 + +#define SINGLE_WORD 1 + +/* queue info */ +#define LPFC_QUE_INFO_GET_BUF_SIZE 4096 + +/* queue acc */ +#define LPFC_QUE_ACC_BROWSE 0xffff +#define LPFC_QUE_ACC_RD_CMD_ARG 4 +#define LPFC_QUE_ACC_WR_CMD_ARG 6 +#define LPFC_QUE_ACC_BUF_SIZE 4096 +#define LPFC_QUE_ACC_SIZE (LPFC_QUE_ACC_BUF_SIZE/2) + +#define LPFC_IDIAG_EQ 1 +#define LPFC_IDIAG_CQ 2 +#define LPFC_IDIAG_MQ 3 +#define LPFC_IDIAG_WQ 4 +#define LPFC_IDIAG_RQ 5 + +#define IDIAG_QUEACC_QUETP_INDX 0 +#define IDIAG_QUEACC_QUEID_INDX 1 +#define IDIAG_QUEACC_INDEX_INDX 2 +#define IDIAG_QUEACC_COUNT_INDX 3 +#define IDIAG_QUEACC_OFFST_INDX 4 +#define IDIAG_QUEACC_VALUE_INDX 5 + +/* doorbell register acc */ +#define LPFC_DRB_ACC_ALL 0xffff +#define LPFC_DRB_ACC_RD_CMD_ARG 1 +#define LPFC_DRB_ACC_WR_CMD_ARG 2 +#define LPFC_DRB_ACC_BUF_SIZE 256 + +#define LPFC_DRB_EQ 1 +#define LPFC_DRB_CQ 2 +#define LPFC_DRB_MQ 3 +#define LPFC_DRB_WQ 4 +#define LPFC_DRB_RQ 5 + +#define LPFC_DRB_MAX 5 + +#define IDIAG_DRBACC_REGID_INDX 0 +#define IDIAG_DRBACC_VALUE_INDX 1 + +/* control register acc */ +#define LPFC_CTL_ACC_ALL 0xffff +#define LPFC_CTL_ACC_RD_CMD_ARG 1 +#define LPFC_CTL_ACC_WR_CMD_ARG 2 +#define LPFC_CTL_ACC_BUF_SIZE 256 + +#define LPFC_CTL_PORT_SEM 1 +#define LPFC_CTL_PORT_STA 2 +#define LPFC_CTL_PORT_CTL 3 +#define LPFC_CTL_PORT_ER1 4 +#define LPFC_CTL_PORT_ER2 5 +#define LPFC_CTL_PDEV_CTL 6 + +#define LPFC_CTL_MAX 6 + +#define IDIAG_CTLACC_REGID_INDX 0 +#define IDIAG_CTLACC_VALUE_INDX 1 + +/* mailbox access */ +#define LPFC_MBX_DMP_ARG 4 + +#define LPFC_MBX_ACC_BUF_SIZE 512 +#define LPFC_MBX_ACC_LBUF_SZ 128 + +#define LPFC_MBX_DMP_MBX_WORD 0x00000001 +#define LPFC_MBX_DMP_MBX_BYTE 0x00000002 +#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE) + +#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001 +#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002 +#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004 +#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008 +#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \ + LPFC_BSG_DMP_MBX_RD_BUF | \ + LPFC_BSG_DMP_MBX_WR_MBX | \ + LPFC_BSG_DMP_MBX_WR_BUF) + +#define LPFC_MBX_DMP_ALL 0xffff +#define LPFC_MBX_ALL_CMD 0xff + +#define IDIAG_MBXACC_MBCMD_INDX 0 +#define IDIAG_MBXACC_DPMAP_INDX 1 +#define IDIAG_MBXACC_DPCNT_INDX 2 +#define IDIAG_MBXACC_WDCNT_INDX 3 + +/* extents access */ +#define LPFC_EXT_ACC_CMD_ARG 1 +#define LPFC_EXT_ACC_BUF_SIZE 4096 + +#define LPFC_EXT_ACC_AVAIL 0x1 +#define LPFC_EXT_ACC_ALLOC 0x2 +#define LPFC_EXT_ACC_DRIVR 0x4 +#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \ + LPFC_EXT_ACC_AVAIL | \ + LPFC_EXT_ACC_ALLOC) + +#define IDIAG_EXTACC_EXMAP_INDX 0 + +#define SIZE_U8 sizeof(uint8_t) +#define SIZE_U16 sizeof(uint16_t) +#define SIZE_U32 sizeof(uint32_t) + +#define lpfc_nvmeio_data(phba, fmt, arg...) \ + { \ + if (phba->nvmeio_trc_on) \ + lpfc_debugfs_nvme_trc(phba, fmt, ##arg); \ + } + +struct lpfc_debug { + char *i_private; + char op; +#define LPFC_IDIAG_OP_RD 1 +#define LPFC_IDIAG_OP_WR 2 + char *buffer; + int len; +}; + +struct lpfc_debugfs_trc { + char *fmt; + uint32_t data1; + uint32_t data2; + uint32_t data3; + uint32_t seq_cnt; + unsigned long jif; +}; + +struct lpfc_debugfs_nvmeio_trc { + char *fmt; + uint16_t data1; + uint16_t data2; + uint32_t data3; +}; + +struct lpfc_idiag_offset { + uint32_t last_rd; +}; + +#define LPFC_IDIAG_CMD_DATA_SIZE 8 +struct lpfc_idiag_cmd { + uint32_t opcode; +#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001 +#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002 +#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 +#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 + +#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008 +#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009 +#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a +#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b + +#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011 +#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012 +#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013 +#define LPFC_IDIAG_CMD_QUEACC_CL 0x00000014 + +#define LPFC_IDIAG_CMD_DRBACC_RD 0x00000021 +#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022 +#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023 +#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024 + +#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031 +#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032 +#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033 +#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034 + +#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041 +#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042 + +#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051 + + uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; +}; + +struct lpfc_idiag { + uint32_t active; + struct lpfc_idiag_cmd cmd; + struct lpfc_idiag_offset offset; + void *ptr_private; +}; + +#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY) +struct lpfc_rx_monitor_debug { + char *i_private; + char *buffer; +}; + +#else + +#define lpfc_nvmeio_data(phba, fmt, arg...) \ + no_printk(fmt, ##arg) + +#endif + +/* multixripool output buffer size */ +#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 + +enum { + DUMP_IO, + DUMP_MBX, + DUMP_ELS, + DUMP_NVMELS, +}; + +/* Mask for discovery_trace */ +#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ +#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ +#define LPFC_DISC_TRC_ELS_UNSOL 0x4 /* Trace ELS rcv'ed */ +#define LPFC_DISC_TRC_ELS_ALL 0x7 /* Trace ELS */ +#define LPFC_DISC_TRC_MBOX_VPORT 0x8 /* Trace vport MBOXs */ +#define LPFC_DISC_TRC_MBOX 0x10 /* Trace other MBOXs */ +#define LPFC_DISC_TRC_MBOX_ALL 0x18 /* Trace all MBOXs */ +#define LPFC_DISC_TRC_CT 0x20 /* Trace disc CT requests */ +#define LPFC_DISC_TRC_DSM 0x40 /* Trace DSM events */ +#define LPFC_DISC_TRC_RPORT 0x80 /* Trace rport events */ +#define LPFC_DISC_TRC_NODE 0x100 /* Trace ndlp state changes */ + +#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general + * discovery */ +#endif /* H_LPFC_DEBUG_FS */ + + +/* + * Driver debug utility routines outside of debugfs. The debug utility + * routines implemented here is intended to be used in the instrumented + * debug driver for debugging host or port issues. + */ + +/** + * lpfc_debug_dump_qe - dump an specific entry from a queue + * @q: Pointer to the queue descriptor. + * @idx: Index to the entry on the queue. + * + * This function dumps an entry indexed by @idx from a queue specified by the + * queue descriptor @q. + **/ +static void +lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx) +{ + char line_buf[LPFC_LBUF_SZ]; + int i, esize, qe_word_cnt, len; + uint32_t *pword; + + /* sanity checks */ + if (!q) + return; + if (idx >= q->entry_count) + return; + + esize = q->entry_size; + qe_word_cnt = esize / sizeof(uint32_t); + pword = lpfc_sli4_qe(q, idx); + + len = 0; + len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx); + if (qe_word_cnt > 8) + printk(KERN_ERR "%s\n", line_buf); + + for (i = 0; i < qe_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + printk(KERN_ERR "%s\n", line_buf); + if (qe_word_cnt > 8) { + len = 0; + memset(line_buf, 0, LPFC_LBUF_SZ); + len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, + "%03d: ", i); + } + } + len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ", + ((uint32_t)*pword) & 0xffffffff); + pword++; + } + if (qe_word_cnt <= 8 || (i - 1) % 8) + printk(KERN_ERR "%s\n", line_buf); +} + +/** + * lpfc_debug_dump_q - dump all entries from an specific queue + * @q: Pointer to the queue descriptor. + * + * This function dumps all entries from a queue specified by the queue + * descriptor @q. + **/ +static inline void +lpfc_debug_dump_q(struct lpfc_queue *q) +{ + int idx, entry_count; + + /* sanity check */ + if (!q) + return; + + dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev, + "%d: [qid:%d, type:%d, subtype:%d, " + "qe_size:%d, qe_count:%d, " + "host_index:%d, port_index:%d]\n", + (q->phba)->brd_no, + q->queue_id, q->type, q->subtype, + q->entry_size, q->entry_count, + q->host_index, q->hba_index); + entry_count = q->entry_count; + for (idx = 0; idx < entry_count; idx++) + lpfc_debug_dump_qe(q, idx); + printk(KERN_ERR "\n"); +} + +/** + * lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue + * @phba: Pointer to HBA context object. + * @wqidx: Index to a FCP or NVME work queue. + * + * This function dumps all entries from a FCP or NVME work queue specified + * by the wqidx. + **/ +static inline void +lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) +{ + struct lpfc_queue *wq; + char *qtypestr; + + if (qtype == DUMP_IO) { + wq = phba->sli4_hba.hdwq[wqidx].io_wq; + qtypestr = "IO"; + } else if (qtype == DUMP_MBX) { + wq = phba->sli4_hba.mbx_wq; + qtypestr = "MBX"; + } else if (qtype == DUMP_ELS) { + wq = phba->sli4_hba.els_wq; + qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + qtypestr = "NVMELS"; + } else + return; + + if (qtype == DUMP_IO) + pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", + qtypestr, wqidx, wq->queue_id); + else + pr_err("%s WQ: WQ[Qid:%d]\n", + qtypestr, wq->queue_id); + + lpfc_debug_dump_q(wq); +} + +/** + * lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's + * cmpl queue + * @phba: Pointer to HBA context object. + * @wqidx: Index to a FCP work queue. + * + * This function dumps all entries from a FCP or NVME completion queue + * which is associated to the work queue specified by the @wqidx. + **/ +static inline void +lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) +{ + struct lpfc_queue *wq, *cq, *eq; + char *qtypestr; + int eqidx; + + /* io wq and cq are 1:1, thus same indexes */ + eq = NULL; + + if (qtype == DUMP_IO) { + wq = phba->sli4_hba.hdwq[wqidx].io_wq; + cq = phba->sli4_hba.hdwq[wqidx].io_cq; + qtypestr = "IO"; + } else if (qtype == DUMP_MBX) { + wq = phba->sli4_hba.mbx_wq; + cq = phba->sli4_hba.mbx_cq; + qtypestr = "MBX"; + } else if (qtype == DUMP_ELS) { + wq = phba->sli4_hba.els_wq; + cq = phba->sli4_hba.els_cq; + qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + cq = phba->sli4_hba.nvmels_cq; + qtypestr = "NVMELS"; + } else + return; + + for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) { + eq = phba->sli4_hba.hdwq[eqidx].hba_eq; + if (cq->assoc_qid == eq->queue_id) + break; + } + if (eqidx == phba->cfg_hdw_queue) { + pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); + eqidx = 0; + eq = phba->sli4_hba.hdwq[0].hba_eq; + } + + if (qtype == DUMP_IO) + pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" + "->EQ[Idx:%d|Qid:%d]:\n", + qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, + eqidx, eq->queue_id); + else + pr_err("%s CQ: WQ[Qid:%d]->CQ[Qid:%d]" + "->EQ[Idx:%d|Qid:%d]:\n", + qtypestr, wq->queue_id, cq->queue_id, + eqidx, eq->queue_id); + + lpfc_debug_dump_q(cq); +} + +/** + * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue + * @phba: Pointer to HBA context object. + * @fcp_wqidx: Index to a FCP work queue. + * + * This function dumps all entries from a FCP event queue which is + * associated to the FCP work queue specified by the @fcp_wqidx. + **/ +static inline void +lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) +{ + struct lpfc_queue *qp; + + qp = phba->sli4_hba.hdwq[qidx].hba_eq; + + pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); + + lpfc_debug_dump_q(qp); +} + +/** + * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the receive data queue. + **/ +static inline void +lpfc_debug_dump_dat_rq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "DAT RQ: RQ[Qid:%d]\n", + phba->sli4_hba.dat_rq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.dat_rq); +} + +/** + * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the receive header queue. + **/ +static inline void +lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "HDR RQ: RQ[Qid:%d]\n", + phba->sli4_hba.hdr_rq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.hdr_rq); +} + +/** + * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Work queue identifier. + * + * This function dumps all entries from a work queue identified by the queue + * identifier. + **/ +static inline void +lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) +{ + int wq_idx; + + for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++) + if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid) + break; + if (wq_idx < phba->cfg_hdw_queue) { + pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq); + return; + } + + if (phba->sli4_hba.els_wq->queue_id == qid) { + pr_err("ELS WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.els_wq); + return; + } + + if (phba->sli4_hba.nvmels_wq->queue_id == qid) { + pr_err("NVME LS WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq); + } +} + +/** + * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Mbox work queue identifier. + * + * This function dumps all entries from a mbox work queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid) +{ + if (phba->sli4_hba.mbx_wq->queue_id == qid) { + printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.mbx_wq); + } +} + +/** + * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Receive queue identifier. + * + * This function dumps all entries from a receive queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid) +{ + if (phba->sli4_hba.hdr_rq->queue_id == qid) { + printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.hdr_rq); + return; + } + if (phba->sli4_hba.dat_rq->queue_id == qid) { + printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.dat_rq); + } +} + +/** + * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Complete queue identifier. + * + * This function dumps all entries from a complete queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) +{ + int cq_idx; + + for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) + if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid) + break; + + if (cq_idx < phba->cfg_hdw_queue) { + pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq); + return; + } + + if (phba->sli4_hba.els_cq->queue_id == qid) { + pr_err("ELS CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.els_cq); + return; + } + + if (phba->sli4_hba.nvmels_cq->queue_id == qid) { + pr_err("NVME LS CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq); + return; + } + + if (phba->sli4_hba.mbx_cq->queue_id == qid) { + pr_err("MBX CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); + } +} + +/** + * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Complete queue identifier. + * + * This function dumps all entries from an event queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) +{ + int eq_idx; + + for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++) + if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid) + break; + + if (eq_idx < phba->cfg_hdw_queue) { + printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq); + return; + } +} + +void lpfc_debug_dump_all_queues(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h new file mode 100644 index 000000000..f82615d87 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -0,0 +1,293 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ +#define FC_MAX_NS_RSP 64512 /* max size NameServer rsp */ +#define FC_MAXLOOP 126 /* max devices supported on a fc loop */ +#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */ + + +/* This is the protocol dependent definition for a Node List Entry. + * This is used by Fibre Channel protocol to support FCP. + */ + +/* worker thread events */ +enum lpfc_work_type { + LPFC_EVT_ONLINE, + LPFC_EVT_OFFLINE_PREP, + LPFC_EVT_OFFLINE, + LPFC_EVT_WARM_START, + LPFC_EVT_KILL, + LPFC_EVT_ELS_RETRY, + LPFC_EVT_DEV_LOSS, + LPFC_EVT_FASTPATH_MGMT_EVT, + LPFC_EVT_RESET_HBA, + LPFC_EVT_RECOVER_PORT +}; + +/* structure used to queue event to the discovery tasklet */ +struct lpfc_work_evt { + struct list_head evt_listp; + void *evt_arg1; + void *evt_arg2; + enum lpfc_work_type evt; +}; + +struct lpfc_scsi_check_condition_event; +struct lpfc_scsi_varqueuedepth_event; +struct lpfc_scsi_event_header; +struct lpfc_fabric_event_header; +struct lpfc_fcprdchkerr_event; + +/* structure used for sending events from fast path */ +struct lpfc_fast_path_event { + struct lpfc_work_evt work_evt; + struct lpfc_vport *vport; + union { + struct lpfc_scsi_check_condition_event check_cond_evt; + struct lpfc_scsi_varqueuedepth_event queue_depth_evt; + struct lpfc_scsi_event_header scsi_evt; + struct lpfc_fabric_event_header fabric_evt; + struct lpfc_fcprdchkerr_event read_check_error; + } un; +}; + +#define LPFC_SLI4_MAX_XRI 1024 /* Used to make the ndlp's xri_bitmap */ +#define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG) +struct lpfc_node_rrqs { + unsigned long xri_bitmap[XRI_BITMAP_ULONGS]; +}; + +enum lpfc_fc4_xpt_flags { + NLP_XPT_REGD = 0x1, + SCSI_XPT_REGD = 0x2, + NVME_XPT_REGD = 0x4, + NVME_XPT_UNREG_WAIT = 0x8, + NLP_XPT_HAS_HH = 0x10 +}; + +enum lpfc_nlp_save_flags { + /* devloss occurred during recovery */ + NLP_IN_RECOV_POST_DEV_LOSS = 0x1, + /* wait for outstanding LOGO to cmpl */ + NLP_WAIT_FOR_LOGO = 0x2, +}; + +struct lpfc_nodelist { + struct list_head nlp_listp; + struct serv_parm fc_sparam; /* buffer for service params */ + struct lpfc_name nlp_portname; + struct lpfc_name nlp_nodename; + + spinlock_t lock; /* Node management lock */ + + uint32_t nlp_flag; /* entry flags */ + uint32_t nlp_DID; /* FC D_ID of entry */ + uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ + uint16_t nlp_type; +#define NLP_FC_NODE 0x1 /* entry is an FC node */ +#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */ +#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */ +#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ +#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ +#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ +#define NLP_NVME_DISCOVERY 0x80 /* entry has NVME disc srvc */ + + uint16_t nlp_fc4_type; /* FC types node supports. */ + /* Assigned from GID_FF, only + * FCP (0x8) and NVME (0x28) + * supported. + */ +#define NLP_FC4_NONE 0x0 +#define NLP_FC4_FCP 0x1 /* FC4 Type FCP (value x8)) */ +#define NLP_FC4_NVME 0x2 /* FC4 TYPE NVME (value x28) */ + + uint16_t nlp_rpi; + uint16_t nlp_state; /* state transition indicator */ + uint16_t nlp_prev_state; /* state transition indicator */ + uint16_t nlp_xri; /* output exchange id for RPI */ + uint16_t nlp_sid; /* scsi id */ +#define NLP_NO_SID 0xffff + uint16_t nlp_maxframe; /* Max RCV frame size */ + uint8_t nlp_class_sup; /* Supported Classes */ + uint8_t nlp_retry; /* used for ELS retries */ + uint8_t nlp_fcp_info; /* class info, bits 0-3 */ +#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ + u8 nlp_nvme_info; /* NVME NSLER Support */ + uint8_t vmid_support; /* destination VMID support */ +#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */ + + struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ + struct lpfc_hba *phba; + struct fc_rport *rport; /* scsi_transport_fc port structure */ + struct lpfc_nvme_rport *nrport; /* nvme transport rport struct. */ + struct lpfc_vport *vport; + struct lpfc_work_evt els_retry_evt; + struct lpfc_work_evt dev_loss_evt; + struct lpfc_work_evt recovery_evt; + struct kref kref; + atomic_t cmd_pending; + uint32_t cmd_qdepth; + unsigned long last_change_time; + unsigned long *active_rrqs_xri_bitmap; + uint32_t fc4_prli_sent; + + /* flags to keep ndlp alive until special conditions are met */ + enum lpfc_nlp_save_flags save_flags; + + enum lpfc_fc4_xpt_flags fc4_xpt_flags; + + uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ +#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ + uint32_t nlp_defer_did; + wait_queue_head_t *logo_waitq; +}; + +struct lpfc_node_rrq { + struct list_head list; + uint16_t xritag; + uint16_t send_rrq; + uint16_t rxid; + uint32_t nlp_DID; /* FC D_ID of entry */ + struct lpfc_vport *vport; + unsigned long rrq_stop_time; +}; + +#define lpfc_ndlp_check_qdepth(phba, ndlp) \ + (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) + +/* Defines for nlp_flag (uint32) */ +#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ +#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ +#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ +#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ +#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ +#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ +#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ +#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ +#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ +#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ +#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ +#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ +#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */ +#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ +#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ +#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ +#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ +#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ +#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ +#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */ +#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful + ACC */ +#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from + NPR list */ +#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ +#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ +#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ +#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ +#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ +#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ + +/* There are 4 different double linked lists nodelist entries can reside on. + * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used + * when Link Up discovery or Registered State Change Notification (RSCN) + * processing is needed. Each list holds the nodes that require a PLOGI or + * ADISC Extended Link Service (ELS) request. These lists keep track of the + * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected + * by Link Up) event. The unmapped_list contains all nodes that have + * successfully logged into at the Fibre Channel level. The + * mapped_list will contain all nodes that are mapped FCP targets. + * + * The bind list is a list of undiscovered (potentially non-existent) nodes + * that we have saved binding information on. This information is used when + * nodes transition from the unmapped to the mapped list. + */ + +/* Defines for nlp_state */ +#define NLP_STE_UNUSED_NODE 0x0 /* node is just allocated */ +#define NLP_STE_PLOGI_ISSUE 0x1 /* PLOGI was sent to NL_PORT */ +#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */ +#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */ +#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */ +#define NLP_STE_LOGO_ISSUE 0x5 /* LOGO was sent to NL_PORT */ +#define NLP_STE_UNMAPPED_NODE 0x6 /* PRLI completed from NL_PORT */ +#define NLP_STE_MAPPED_NODE 0x7 /* Identified as a FCP Target */ +#define NLP_STE_NPR_NODE 0x8 /* NPort disappeared */ +#define NLP_STE_MAX_STATE 0x9 +#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */ + +/* For UNUSED_NODE state, the node has just been allocated. + * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on + * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list + * and put on the unmapped list. For ADISC processing, the node is taken off + * the ADISC list and placed on either the mapped or unmapped list (depending + * on its previous state). Once on the unmapped list, a PRLI is issued and the + * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is + * changed to PRLI_COMPL. If the completion indicates a mapped + * node, the node is taken off the unmapped list. The binding list is checked + * for a valid binding, or a binding is automatically assigned. If binding + * assignment is unsuccessful, the node is left on the unmapped list. If + * binding assignment is successful, the associated binding list entry (if + * any) is removed, and the node is placed on the mapped list. + */ +/* + * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped + * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers + * expire, all effected nodes will receive a DEVICE_RM event. + */ +/* + * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists + * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap + * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to / + * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, + * we will first process the ADISC list. 32 entries are processed initially and + * ADISC is initited for each one. Completions / Events for each node are + * funnelled thru the state machine. As each node finishes ADISC processing, it + * starts ADISC for any nodes waiting for ADISC processing. If no nodes are + * waiting, and the ADISC list count is identically 0, then we are done. For + * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we + * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI + * list. 32 entries are processed initially and PLOGI is initited for each one. + * Completions / Events for each node are funnelled thru the state machine. As + * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting + * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is + * identically 0, then we are done. We have now completed discovery / RSCN + * handling. Upon completion, ALL nodes should be on either the mapped or + * unmapped lists. + */ + +/* Defines for Node List Entry Events that could happen */ +#define NLP_EVT_RCV_PLOGI 0x0 /* Rcv'd an ELS PLOGI command */ +#define NLP_EVT_RCV_PRLI 0x1 /* Rcv'd an ELS PRLI command */ +#define NLP_EVT_RCV_LOGO 0x2 /* Rcv'd an ELS LOGO command */ +#define NLP_EVT_RCV_ADISC 0x3 /* Rcv'd an ELS ADISC command */ +#define NLP_EVT_RCV_PDISC 0x4 /* Rcv'd an ELS PDISC command */ +#define NLP_EVT_RCV_PRLO 0x5 /* Rcv'd an ELS PRLO command */ +#define NLP_EVT_CMPL_PLOGI 0x6 /* Sent an ELS PLOGI command */ +#define NLP_EVT_CMPL_PRLI 0x7 /* Sent an ELS PRLI command */ +#define NLP_EVT_CMPL_LOGO 0x8 /* Sent an ELS LOGO command */ +#define NLP_EVT_CMPL_ADISC 0x9 /* Sent an ELS ADISC command */ +#define NLP_EVT_CMPL_REG_LOGIN 0xa /* REG_LOGIN mbox cmd completed */ +#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */ +#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */ +#define NLP_EVT_MAX_EVENT 0xd +#define NLP_EVT_NOTHING_PENDING 0xff diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c new file mode 100644 index 000000000..54e47f268 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -0,0 +1,12467 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ +/* See Fibre Channel protocol T11 FC-LS for details */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); +static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); +static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); +static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, uint8_t retry); +static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, + struct lpfc_iocbq *iocb); +static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); +static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); + +static int lpfc_max_els_tries = 3; + +static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); +static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); +static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); + +/** + * lpfc_els_chk_latt - Check host link attention event for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine checks whether there is an outstanding host link + * attention event during the discovery process with the @vport. It is done + * by reading the HBA's Host Attention (HA) register. If there is any host + * link attention events during this @vport's discovery process, the @vport + * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall + * be issued if the link state is not already in host link cleared state, + * and a return code shall indicate whether the host link attention event + * had happened. + * + * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport + * state in LPFC_VPORT_READY, the request for checking host link attention + * event will be ignored and a return code shall indicate no host link + * attention event had happened. + * + * Return codes + * 0 - no host link attention event happened + * 1 - host link attention event happened + **/ +int +lpfc_els_chk_latt(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + uint32_t ha_copy; + + if (vport->port_state >= LPFC_VPORT_READY || + phba->link_state == LPFC_LINK_DOWN || + phba->sli_rev > LPFC_SLI_REV3) + return 0; + + /* Read the HBA Host Attention Register */ + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; + + if (!(ha_copy & HA_LATT)) + return 0; + + /* Pending Link Event during Discovery */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0237 Pending Link Event during " + "Discovery: State x%x\n", + phba->pport->port_state); + + /* CLEAR_LA should re-enable link attention events and + * we should then immediately take a LATT event. The + * LATT processing should call lpfc_linkdown() which + * will cleanup any left over in-progress discovery + * events. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_ABORT_DISCOVERY; + spin_unlock_irq(shost->host_lock); + + if (phba->link_state != LPFC_CLEAR_LA) + lpfc_issue_clear_la(phba, vport); + + return 1; +} + +/** + * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure + * @vport: pointer to a host virtual N_Port data structure. + * @expect_rsp: flag indicating whether response is expected. + * @cmd_size: size of the ELS command. + * @retry: number of retries to the command when it fails. + * @ndlp: pointer to a node-list data structure. + * @did: destination identifier. + * @elscmd: the ELS command code. + * + * This routine is used for allocating a lpfc-IOCB data structure from + * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters + * passed into the routine for discovery state machine to issue an Extended + * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation + * and preparation routine that is used by all the discovery state machine + * routines and the ELS command-specific fields will be later set up by + * the individual discovery machine routines after calling this routine + * allocating and preparing a generic IOCB data structure. It fills in the + * Buffer Descriptor Entries (BDEs), allocates buffers for both command + * payload and response payload (if expected). The reference count on the + * ndlp is incremented by 1 and the reference to the ndlp is put into + * ndlp of the IOCB data structure for this IOCB to hold the ndlp + * reference for the command's callback function to access later. + * + * Return code + * Pointer to the newly allocated/prepared els iocb data structure + * NULL - when els iocb data structure allocation/preparation failed + **/ +struct lpfc_iocbq * +lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, + u16 cmd_size, u8 retry, + struct lpfc_nodelist *ndlp, u32 did, + u32 elscmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; + struct ulp_bde64_le *bpl; + u32 timeout = 0; + + if (!lpfc_is_link_up(phba)) + return NULL; + + /* Allocate buffer for command iocb */ + elsiocb = lpfc_sli_get_iocbq(phba); + if (!elsiocb) + return NULL; + + /* + * If this command is for fabric controller and HBA running + * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. + */ + if ((did == Fabric_DID) && + (phba->hba_flag & HBA_FIP_SUPPORT) && + ((elscmd == ELS_CMD_FLOGI) || + (elscmd == ELS_CMD_FDISC) || + (elscmd == ELS_CMD_LOGO))) + switch (elscmd) { + case ELS_CMD_FLOGI: + elsiocb->cmd_flag |= + ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) + & LPFC_FIP_ELS_ID_MASK); + break; + case ELS_CMD_FDISC: + elsiocb->cmd_flag |= + ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) + & LPFC_FIP_ELS_ID_MASK); + break; + case ELS_CMD_LOGO: + elsiocb->cmd_flag |= + ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) + & LPFC_FIP_ELS_ID_MASK); + break; + } + else + elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); + if (pcmd) + pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); + if (!pcmd || !pcmd->virt) + goto els_iocb_free_pcmb_exit; + + INIT_LIST_HEAD(&pcmd->list); + + /* Allocate buffer for response payload */ + if (expect_rsp) { + prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); + if (prsp) + prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, + &prsp->phys); + if (!prsp || !prsp->virt) + goto els_iocb_free_prsp_exit; + INIT_LIST_HEAD(&prsp->list); + } else { + prsp = NULL; + } + + /* Allocate buffer for Buffer ptr list */ + pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); + if (pbuflist) + pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, + &pbuflist->phys); + if (!pbuflist || !pbuflist->virt) + goto els_iocb_free_pbuf_exit; + + INIT_LIST_HEAD(&pbuflist->list); + + if (expect_rsp) { + switch (elscmd) { + case ELS_CMD_FLOGI: + timeout = FF_DEF_RATOV * 2; + break; + case ELS_CMD_LOGO: + timeout = phba->fc_ratov; + break; + default: + timeout = phba->fc_ratov * 2; + } + + /* Fill SGE for the num bde count */ + elsiocb->num_bdes = 2; + } + + if (phba->sli_rev == LPFC_SLI_REV4) + bmp = pcmd; + else + bmp = pbuflist; + + lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, + elscmd, timeout, expect_rsp); + + bpl = (struct ulp_bde64_le *)pbuflist->virt; + bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); + bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); + bpl->type_size = cpu_to_le32(cmd_size); + bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); + + if (expect_rsp) { + bpl++; + bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); + bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); + bpl->type_size = cpu_to_le32(FCELSSIZE); + bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); + } + + elsiocb->cmd_dmabuf = pcmd; + elsiocb->bpl_dmabuf = pbuflist; + elsiocb->retry = retry; + elsiocb->vport = vport; + elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; + + if (prsp) + list_add(&prsp->list, &pcmd->list); + if (expect_rsp) { + /* Xmit ELS command to remote NPORT */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0116 Xmit ELS command x%x to remote " + "NPORT x%x I/O tag: x%x, port state:x%x " + "rpi x%x fc_flag:x%x\n", + elscmd, did, elsiocb->iotag, + vport->port_state, ndlp->nlp_rpi, + vport->fc_flag); + } else { + /* Xmit ELS response to remote NPORT */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0117 Xmit ELS response x%x to remote " + "NPORT x%x I/O tag: x%x, size: x%x " + "port_state x%x rpi x%x fc_flag x%x\n", + elscmd, ndlp->nlp_DID, elsiocb->iotag, + cmd_size, vport->port_state, + ndlp->nlp_rpi, vport->fc_flag); + } + + return elsiocb; + +els_iocb_free_pbuf_exit: + if (expect_rsp) + lpfc_mbuf_free(phba, prsp->virt, prsp->phys); + kfree(pbuflist); + +els_iocb_free_prsp_exit: + lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); + kfree(prsp); + +els_iocb_free_pcmb_exit: + kfree(pcmd); + lpfc_sli_release_iocbq(phba, elsiocb); + return NULL; +} + +/** + * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues a fabric registration login for a @vport. An + * active ndlp node with Fabric_DID must already exist for this @vport. + * The routine invokes two mailbox commands to carry out fabric registration + * login through the HBA firmware: the first mailbox command requests the + * HBA to perform link configuration for the @vport; and the second mailbox + * command requests the HBA to perform the actual fabric registration login + * with the @vport. + * + * Return code + * 0 - successfully issued fabric registration login for @vport + * -ENXIO -- failed to issue fabric registration login for @vport + **/ +int +lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + struct lpfc_nodelist *ndlp; + struct serv_parm *sp; + int rc; + int err = 0; + + sp = &phba->fc_fabparam; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + err = 1; + goto fail; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + err = 2; + goto fail; + } + + vport->port_state = LPFC_FABRIC_CFG_LINK; + lpfc_config_link(phba, mbox); + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + err = 3; + goto fail_free_mbox; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + err = 4; + goto fail; + } + rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, + ndlp->nlp_rpi); + if (rc) { + err = 5; + goto fail_free_mbox; + } + + mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; + mbox->vport = vport; + /* increment the reference count on ndlp to hold reference + * for the callback routine. + */ + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!mbox->ctx_ndlp) { + err = 6; + goto fail_free_mbox; + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + err = 7; + goto fail_issue_reg_login; + } + + return 0; + +fail_issue_reg_login: + /* decrement the reference count on ndlp just incremented + * for the failed mbox command. + */ + lpfc_nlp_put(ndlp); +fail_free_mbox: + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); +fail: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0249 Cannot issue Register Fabric login: Err %d\n", + err); + return -ENXIO; +} + +/** + * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for + * the @vport. This mailbox command is necessary for SLI4 port only. + * + * Return code + * 0 - successfully issued REG_VFI for @vport + * A failure code otherwise. + **/ +int +lpfc_issue_reg_vfi(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mboxq = NULL; + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *dmabuf = NULL; + int rc = 0; + + /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + !(phba->link_flag & LS_LOOPBACK_MODE) && + !(vport->fc_flag & FC_PT2PT)) { + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + rc = -ENODEV; + goto fail; + } + } + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + rc = -ENOMEM; + goto fail; + } + + /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ + if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { + rc = lpfc_mbox_rsrc_prep(phba, mboxq); + if (rc) { + rc = -ENOMEM; + goto fail_mbox; + } + dmabuf = mboxq->ctx_buf; + memcpy(dmabuf->virt, &phba->fc_fabparam, + sizeof(struct serv_parm)); + } + + vport->port_state = LPFC_FABRIC_CFG_LINK; + if (dmabuf) { + lpfc_reg_vfi(mboxq, vport, dmabuf->phys); + /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ + mboxq->ctx_buf = dmabuf; + } else { + lpfc_reg_vfi(mboxq, vport, 0); + } + + mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; + mboxq->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = -ENXIO; + goto fail_mbox; + } + return 0; + +fail_mbox: + lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); +fail: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0289 Issue Register VFI failed: Err %d\n", rc); + return rc; +} + +/** + * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for + * the @vport. This mailbox command is necessary for SLI4 port only. + * + * Return code + * 0 - successfully issued REG_VFI for @vport + * A failure code otherwise. + **/ +int +lpfc_issue_unreg_vfi(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct Scsi_Host *shost; + LPFC_MBOXQ_t *mboxq; + int rc; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2556 UNREG_VFI mbox allocation failed" + "HBA state x%x\n", phba->pport->port_state); + return -ENOMEM; + } + + lpfc_unreg_vfi(mboxq, vport); + mboxq->vport = vport; + mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2557 UNREG_VFI issue mbox failed rc x%x " + "HBA state x%x\n", + rc, phba->pport->port_state); + mempool_free(mboxq, phba->mbox_mem_pool); + return -EIO; + } + + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + return 0; +} + +/** + * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. + * @vport: pointer to a host virtual N_Port data structure. + * @sp: pointer to service parameter data structure. + * + * This routine is called from FLOGI/FDISC completion handler functions. + * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter else return + * 0. This function also set flag in the vport data structure to delay + * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit + * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter. + * + * Return code + * 0 - FCID and Fabric Nodename and Fabric portname is not changed. + * 1 - FCID or Fabric Nodename or Fabric portname is changed. + * + **/ +static uint8_t +lpfc_check_clean_addr_bit(struct lpfc_vport *vport, + struct serv_parm *sp) +{ + struct lpfc_hba *phba = vport->phba; + uint8_t fabric_param_changed = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if ((vport->fc_prevDID != vport->fc_myDID) || + memcmp(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)) || + memcmp(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)) || + (vport->vport_flag & FAWWPN_PARAM_CHG)) { + fabric_param_changed = 1; + vport->vport_flag &= ~FAWWPN_PARAM_CHG; + } + /* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + * + * If fabric parameter is changed and clean address bit is + * cleared delay nport discovery if + * - vport->fc_prevDID != 0 (not initial discovery) OR + * - lpfc_delay_discovery module parameter is set. + */ + if (fabric_param_changed && !sp->cmn.clean_address_bit && + (vport->fc_prevDID || phba->cfg_delay_discovery)) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + } + + return fabric_param_changed; +} + + +/** + * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @sp: pointer to service parameter data structure. + * @ulp_word4: command response value + * + * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback + * function to handle the completion of a Fabric Login (FLOGI) into a fabric + * port in a fabric topology. It properly sets up the parameters to the @ndlp + * from the IOCB response. It also check the newly assigned N_Port ID to the + * @vport against the previously assigned N_Port ID. If it is different from + * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine + * is invoked on all the remaining nodes with the @vport to unregister the + * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() + * is invoked to register login to the fabric. + * + * Return code + * 0 - Success (currently, always return 0) + **/ +static int +lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct serv_parm *sp, uint32_t ulp_word4) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *np; + struct lpfc_nodelist *next_np; + uint8_t fabric_param_changed; + + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_FABRIC; + spin_unlock_irq(shost->host_lock); + + phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); + if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ + phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; + + phba->fc_edtovResol = sp->cmn.edtovResolution; + phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; + + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PUBLIC_LOOP; + spin_unlock_irq(shost->host_lock); + } + + vport->fc_myDID = ulp_word4 & Mask_DID; + memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); + memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); + ndlp->nlp_class_sup = 0; + if (sp->cls1.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS1; + if (sp->cls2.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS2; + if (sp->cls3.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS3; + if (sp->cls4.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS4; + ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | + sp->cmn.bbRcvSizeLsb; + + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + if (fabric_param_changed) { + /* Reset FDMI attribute masks based on config parameter */ + if (phba->cfg_enable_SmartSAN || + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { + /* Setup appropriate attribute masks */ + vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; + if (phba->cfg_enable_SmartSAN) + vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; + else + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + } else { + vport->fdmi_hba_mask = 0; + vport->fdmi_port_mask = 0; + } + + } + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); + + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + if (sp->cmn.response_multiple_NPort) { + lpfc_printf_vlog(vport, KERN_WARNING, + LOG_ELS | LOG_VPORT, + "1816 FLOGI NPIV supported, " + "response data 0x%x\n", + sp->cmn.response_multiple_NPort); + spin_lock_irq(&phba->hbalock); + phba->link_flag |= LS_NPIV_FAB_SUPPORTED; + spin_unlock_irq(&phba->hbalock); + } else { + /* Because we asked f/w for NPIV it still expects us + to call reg_vnpid at least for the physical host */ + lpfc_printf_vlog(vport, KERN_WARNING, + LOG_ELS | LOG_VPORT, + "1817 Fabric does not support NPIV " + "- configuring single port mode.\n"); + spin_lock_irq(&phba->hbalock); + phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; + spin_unlock_irq(&phba->hbalock); + } + } + + /* + * For FC we need to do some special processing because of the SLI + * Port's default settings of the Common Service Parameters. + */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if (fabric_param_changed) + lpfc_unregister_fcf_prep(phba); + + /* This should just update the VFI CSPs*/ + if (vport->fc_flag & FC_VFI_REGISTERED) + lpfc_issue_reg_vfi(vport); + } + + if (fabric_param_changed && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { + + /* If our NportID changed, we need to ensure all + * remaining NPORTs get unreg_login'ed. + */ + list_for_each_entry_safe(np, next_np, + &vport->fc_nodes, nlp_listp) { + if ((np->nlp_state != NLP_STE_NPR_NODE) || + !(np->nlp_flag & NLP_NPR_ADISC)) + continue; + spin_lock_irq(&np->lock); + np->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&np->lock); + lpfc_unreg_rpi(vport, np); + } + lpfc_cleanup_pending_mbox(vport); + + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_unreg_all_rpis(vport); + lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); + } + + /* + * For SLI3 and SLI4, the VPI needs to be reregistered in + * response to this fabric parameter change event. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + } else if ((phba->sli_rev == LPFC_SLI_REV4) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { + /* + * Driver needs to re-reg VPI in order for f/w + * to update the MAC address. + */ + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_register_new_vport(phba, vport, ndlp); + return 0; + } + + if (phba->sli_rev < LPFC_SLI_REV4) { + lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && + vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) + lpfc_register_new_vport(phba, vport, ndlp); + else + lpfc_issue_fabric_reglogin(vport); + } else { + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && + (vport->vpi_state & LPFC_VPI_REGISTERED)) { + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } else if (vport->fc_flag & FC_VFI_REGISTERED) + lpfc_issue_init_vpi(vport); + else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3135 Need register VFI: (x%x/%x)\n", + vport->fc_prevDID, vport->fc_myDID); + lpfc_issue_reg_vfi(vport); + } + } + return 0; +} + +/** + * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @sp: pointer to service parameter data structure. + * + * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback + * function to handle the completion of a Fabric Login (FLOGI) into an N_Port + * in a point-to-point topology. First, the @vport's N_Port Name is compared + * with the received N_Port Name: if the @vport's N_Port Name is greater than + * the received N_Port Name lexicographically, this node shall assign local + * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and + * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, + * this node shall just wait for the remote node to issue PLOGI and assign + * N_Port IDs. + * + * Return code + * 0 - Success + * -ENXIO - Fail + **/ +static int +lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct serv_parm *sp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag |= FC_PT2PT; + spin_unlock_irq(shost->host_lock); + + /* If we are pt2pt with another NPort, force NPIV off! */ + phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; + + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } + + rc = memcmp(&vport->fc_portname, &sp->portName, + sizeof(vport->fc_portname)); + + if (rc >= 0) { + /* This side will initiate the PLOGI */ + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PT2PT_PLOGI; + spin_unlock_irq(shost->host_lock); + + /* + * N_Port ID cannot be 0, set our Id to LocalID + * the other side will be RemoteID. + */ + + /* not equal */ + if (rc) + vport->fc_myDID = PT2PT_LocalID; + + /* If not registered with a transport, decrement ndlp reference + * count indicating that ndlp can be safely released when other + * references are removed. + */ + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) + lpfc_nlp_put(ndlp); + + ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); + if (!ndlp) { + /* + * Cannot find existing Fabric ndlp, so allocate a + * new one + */ + ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); + if (!ndlp) + goto fail; + } + + memcpy(&ndlp->nlp_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&ndlp->nlp_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); + /* Set state will put ndlp onto node list if not already done */ + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + goto fail; + + lpfc_config_link(phba, mbox); + + mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + goto fail; + } + } else { + /* This side will wait for the PLOGI. If not registered with + * a transport, decrement node reference count indicating that + * ndlp can be released when other references are removed. + */ + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) + lpfc_nlp_put(ndlp); + + /* Start discovery - this should just do CLEAR_LA */ + lpfc_disc_start(vport); + } + + return 0; +fail: + return -ENXIO; +} + +/** + * lpfc_cmpl_els_flogi - Completion callback function for flogi + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the top-level completion callback function for issuing + * a Fabric Login (FLOGI) command. If the response IOCB reported error, + * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If + * retry has been made (either immediately or delayed with lpfc_els_retry() + * returning 1), the command IOCB will be released and function returned. + * If the retry attempt has been given up (possibly reach the maximum + * number of retries), one additional decrement of ndlp reference shall be + * invoked before going out after releasing the command IOCB. This will + * actually release the remote node (Note, lpfc_els_free_iocb() will also + * invoke one decrement of ndlp reference count). If no error reported in + * the IOCB status, the command Port ID field is used to determine whether + * this is a point-to-point topology or a fabric topology: if the Port ID + * field is assigned, it is a fabric topology; otherwise, it is a + * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or + * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the + * specific topology completion conditions. + **/ +static void +lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + IOCB_t *irsp; + struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; + struct serv_parm *sp; + uint16_t fcf_index; + int rc; + u32 ulp_status, ulp_word4, tmo; + bool flogi_in_retry = false; + + /* Check to see if link went down during discovery */ + if (lpfc_els_chk_latt(vport)) { + /* One additional decrement on node reference count to + * trigger the release of the node + */ + if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) + lpfc_nlp_put(ndlp); + goto out; + } + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + tmo = get_wqe_tmo(cmdiocb); + } else { + irsp = &rspiocb->iocb; + tmo = irsp->ulpTimeout; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "FLOGI cmpl: status:x%x/x%x state:x%x", + ulp_status, ulp_word4, + vport->port_state); + + if (ulp_status) { + /* + * In case of FIP mode, perform roundrobin FCF failover + * due to new FCF discovery + */ + if ((phba->hba_flag & HBA_FIP_SUPPORT) && + (phba->fcf.fcf_flag & FCF_DISCOVERY)) { + if (phba->link_state < LPFC_LINK_UP) + goto stop_rr_fcf_flogi; + if ((phba->fcoe_cvl_eventtag_attn == + phba->fcoe_cvl_eventtag) && + (ulp_status == IOSTAT_LOCAL_REJECT) && + ((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_SLI_ABORTED)) + goto stop_rr_fcf_flogi; + else + phba->fcoe_cvl_eventtag_attn = + phba->fcoe_cvl_eventtag; + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, + "2611 FLOGI failed on FCF (x%x), " + "status:x%x/x%x, tmo:x%x, perform " + "roundrobin FCF failover\n", + phba->fcf.current_rec.fcf_indx, + ulp_status, ulp_word4, tmo); + lpfc_sli4_set_fcf_flogi_fail(phba, + phba->fcf.current_rec.fcf_indx); + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); + rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); + if (rc) + goto out; + } + +stop_rr_fcf_flogi: + /* FLOGI failure */ + if (!(ulp_status == IOSTAT_LOCAL_REJECT && + ((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_LOOP_OPEN_FAILURE))) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2858 FLOGI failure Status:x%x/x%x TMO" + ":x%x Data x%x x%x\n", + ulp_status, ulp_word4, tmo, + phba->hba_flag, phba->fcf.fcf_flag); + + /* Check for retry */ + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { + /* Address a timing race with dev_loss. If dev_loss + * is active on this FPort node, put the initial ref + * count back to stop premature node release actions. + */ + lpfc_check_nlp_post_devloss(vport, ndlp); + flogi_in_retry = true; + goto out; + } + + /* The FLOGI will not be retried. If the FPort node is not + * registered with the SCSI transport, remove the initial + * reference to trigger node release. + */ + if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) + lpfc_nlp_put(ndlp); + + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0150 FLOGI failure Status:x%x/x%x " + "xri x%x TMO:x%x refcnt %d\n", + ulp_status, ulp_word4, cmdiocb->sli4_xritag, + tmo, kref_read(&ndlp->kref)); + + /* If this is not a loop open failure, bail out */ + if (!(ulp_status == IOSTAT_LOCAL_REJECT && + ((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_LOOP_OPEN_FAILURE))) { + /* FLOGI failure */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0100 FLOGI failure Status:x%x/x%x " + "TMO:x%x\n", + ulp_status, ulp_word4, tmo); + goto flogifail; + } + + /* FLOGI failed, so there is no fabric */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | + FC_PT2PT_NO_NVME); + spin_unlock_irq(shost->host_lock); + + /* If private loop, then allow max outstanding els to be + * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no + * alpa map would take too long otherwise. + */ + if (phba->alpa_map[0] == 0) + vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; + if ((phba->sli_rev == LPFC_SLI_REV4) && + (!(vport->fc_flag & FC_VFI_REGISTERED) || + (vport->fc_prevDID != vport->fc_myDID) || + phba->fc_topology_changed)) { + if (vport->fc_flag & FC_VFI_REGISTERED) { + if (phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } else { + lpfc_sli4_unreg_all_rpis(vport); + } + } + + /* Do not register VFI if the driver aborted FLOGI */ + if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) + lpfc_issue_reg_vfi(vport); + + goto out; + } + goto flogifail; + } + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VPORT_CVL_RCVD; + vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; + spin_unlock_irq(shost->host_lock); + + /* + * The FLOGI succeeded. Sync the data for the CPU before + * accessing it. + */ + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + sp = prsp->virt + sizeof(uint32_t); + + /* FLOGI completes successfully */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0101 FLOGI completes successfully, I/O tag:x%x " + "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", + cmdiocb->iotag, cmdiocb->sli4_xritag, + ulp_word4, sp->cmn.e_d_tov, + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, + vport->port_state, vport->fc_flag, + sp->cmn.priority_tagging, kref_read(&ndlp->kref)); + + if (sp->cmn.priority_tagging) + vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | + LPFC_VMID_TYPE_PRIO); + /* reinitialize the VMID datastructure before returning */ + if (lpfc_is_vmid_enabled(phba)) + lpfc_reinit_vmid(vport); + + /* + * Address a timing race with dev_loss. If dev_loss is active on + * this FPort node, put the initial ref count back to stop premature + * node release actions. + */ + lpfc_check_nlp_post_devloss(vport, ndlp); + if (vport->port_state == LPFC_FLOGI) { + /* + * If Common Service Parameters indicate Nport + * we are point to point, if Fport we are Fabric. + */ + if (sp->cmn.fPort) + rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, + ulp_word4); + else if (!(phba->hba_flag & HBA_FCOE_MODE)) + rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); + else { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2831 FLOGI response with cleared Fabric " + "bit fcf_index 0x%x " + "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " + "Fabric Name " + "%02x%02x%02x%02x%02x%02x%02x%02x\n", + phba->fcf.current_rec.fcf_indx, + phba->fcf.current_rec.switch_name[0], + phba->fcf.current_rec.switch_name[1], + phba->fcf.current_rec.switch_name[2], + phba->fcf.current_rec.switch_name[3], + phba->fcf.current_rec.switch_name[4], + phba->fcf.current_rec.switch_name[5], + phba->fcf.current_rec.switch_name[6], + phba->fcf.current_rec.switch_name[7], + phba->fcf.current_rec.fabric_name[0], + phba->fcf.current_rec.fabric_name[1], + phba->fcf.current_rec.fabric_name[2], + phba->fcf.current_rec.fabric_name[3], + phba->fcf.current_rec.fabric_name[4], + phba->fcf.current_rec.fabric_name[5], + phba->fcf.current_rec.fabric_name[6], + phba->fcf.current_rec.fabric_name[7]); + + lpfc_nlp_put(ndlp); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); + spin_unlock_irq(&phba->hbalock); + phba->fcf.fcf_redisc_attempted = 0; /* reset */ + goto out; + } + if (!rc) { + /* Mark the FCF discovery process done */ + if (phba->hba_flag & HBA_FIP_SUPPORT) + lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | + LOG_ELS, + "2769 FLOGI to FCF (x%x) " + "completed successfully\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); + spin_unlock_irq(&phba->hbalock); + phba->fcf.fcf_redisc_attempted = 0; /* reset */ + goto out; + } + } else if (vport->port_state > LPFC_FLOGI && + vport->fc_flag & FC_PT2PT) { + /* + * In a p2p topology, it is possible that discovery has + * already progressed, and this completion can be ignored. + * Recheck the indicated topology. + */ + if (!sp->cmn.fPort) + goto out; + } + +flogifail: + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + + if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { + /* FLOGI failed, so just use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + + /* Start discovery */ + lpfc_disc_start(vport); + } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || + (((ulp_word4 & IOERR_PARAM_MASK) != + IOERR_SLI_ABORTED) && + ((ulp_word4 & IOERR_PARAM_MASK) != + IOERR_SLI_DOWN))) && + (phba->link_state != LPFC_CLEAR_LA)) { + /* If FLOGI failed enable link interrupt. */ + lpfc_issue_clear_la(phba, vport); + } +out: + if (!flogi_in_retry) + phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; + + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +/** + * lpfc_cmpl_els_link_down - Completion callback function for ELS command + * aborted during a link down + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + */ +static void +lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + uint32_t *pcmd; + uint32_t cmd; + u32 ulp_status, ulp_word4; + + pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; + cmd = *pcmd; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "6445 ELS completes after LINK_DOWN: " + " Status %x/%x cmd x%x flg x%x\n", + ulp_status, ulp_word4, cmd, + cmdiocb->cmd_flag); + + if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { + cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; + atomic_dec(&phba->fabric_iocb_count); + } + lpfc_els_free_iocb(phba, cmdiocb); +} + +/** + * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @retry: number of retries to the command IOCB. + * + * This routine issues a Fabric Login (FLOGI) Request ELS command + * for a @vport. The initiator service parameters are put into the payload + * of the FLOGI Request IOCB and the top-level callback function pointer + * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback + * function field. The lpfc_issue_fabric_iocb routine is invoked to send + * out FLOGI ELS command with one outstanding fabric IOCB at a time. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the FLOGI ELS command. + * + * Return code + * 0 - successfully issued flogi iocb for @vport + * 1 - failed to issue flogi iocb for @vport + **/ +static int +lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct serv_parm *sp; + union lpfc_wqe128 *wqe = NULL; + IOCB_t *icmd = NULL; + struct lpfc_iocbq *elsiocb; + struct lpfc_iocbq defer_flogi_acc; + u8 *pcmd, ct; + uint16_t cmdsize; + uint32_t tmo, did; + int rc; + + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_FLOGI); + + if (!elsiocb) + return 1; + + wqe = &elsiocb->wqe; + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + icmd = &elsiocb->iocb; + + /* For FLOGI request, remainder of payload is service parameters */ + *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); + sp = (struct serv_parm *) pcmd; + + /* Setup CSPs accordingly for Fabric */ + sp->cmn.e_d_tov = 0; + sp->cmn.w2.r_a_tov = 0; + sp->cmn.virtual_fabric_support = 0; + sp->cls1.classValid = 0; + if (sp->cmn.fcphLow < FC_PH3) + sp->cmn.fcphLow = FC_PH3; + if (sp->cmn.fcphHigh < FC_PH3) + sp->cmn.fcphHigh = FC_PH3; + + /* Determine if switch supports priority tagging */ + if (phba->cfg_vmid_priority_tagging) { + sp->cmn.priority_tagging = 1; + /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ + if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, + sizeof(vport->lpfc_vmid_host_uuid))) { + memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, + sizeof(phba->wwpn)); + memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, + sizeof(phba->wwnn)); + } + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_0) { + /* FLOGI needs to be 3 for WQE FCFI */ + ct = SLI4_CT_FCFI; + bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); + + /* Set the fcfi to the fcfi we registered with */ + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + phba->fcf.fcfi); + } + + /* Can't do SLI4 class2 without support sequence coalescing */ + sp->cls2.classValid = 0; + sp->cls2.seqDelivery = 0; + } else { + /* Historical, setting sequential-delivery bit for SLI3 */ + sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; + sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + sp->cmn.request_multiple_Nport = 1; + /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ + icmd->ulpCt_h = 1; + icmd->ulpCt_l = 0; + } else { + sp->cmn.request_multiple_Nport = 0; + } + + if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { + icmd->un.elsreq64.myID = 0; + icmd->un.elsreq64.fl = 1; + } + } + + tmo = phba->fc_ratov; + phba->fc_ratov = LPFC_DISC_FLOGI_TMO; + lpfc_set_disctmo(vport); + phba->fc_ratov = tmo; + + phba->fc_stat.elsXmitFLOGI++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue FLOGI: opt:x%x", + phba->sli3_options, 0, 0); + + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + /* Avoid race with FLOGI completion and hba_flags. */ + phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + + rc = lpfc_issue_fabric_iocb(phba, elsiocb); + if (rc == IOCB_ERROR) { + phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + /* Clear external loopback plug detected flag */ + phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; + + /* Check for a deferred FLOGI ACC condition */ + if (phba->defer_flogi_acc_flag) { + /* lookup ndlp for received FLOGI */ + ndlp = lpfc_findnode_did(vport, 0); + if (!ndlp) + return 0; + + did = vport->fc_myDID; + vport->fc_myDID = Fabric_DID; + + memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); + + if (phba->sli_rev == LPFC_SLI_REV4) { + bf_set(wqe_ctxt_tag, + &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, + phba->defer_flogi_acc_rx_id); + bf_set(wqe_rcvoxid, + &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, + phba->defer_flogi_acc_ox_id); + } else { + icmd = &defer_flogi_acc.iocb; + icmd->ulpContext = phba->defer_flogi_acc_rx_id; + icmd->unsli3.rcvsli3.ox_id = + phba->defer_flogi_acc_ox_id; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3354 Xmit deferred FLOGI ACC: rx_id: x%x," + " ox_id: x%x, hba_flag x%x\n", + phba->defer_flogi_acc_rx_id, + phba->defer_flogi_acc_ox_id, phba->hba_flag); + + /* Send deferred FLOGI ACC */ + lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, + ndlp, NULL); + + phba->defer_flogi_acc_flag = false; + vport->fc_myDID = did; + + /* Decrement ndlp reference count to indicate the node can be + * released when other references are removed. + */ + lpfc_nlp_put(ndlp); + } + + return 0; +} + +/** + * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs + * @phba: pointer to lpfc hba data structure. + * + * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs + * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq + * list and issues an abort IOCB commond on each outstanding IOCB that + * contains a active Fabric_DID ndlp. Note that this function is to issue + * the abort IOCB command on all the outstanding IOCBs, thus when this + * function returns, it does not guarantee all the IOCBs are actually aborted. + * + * Return code + * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) + **/ +int +lpfc_els_abort_flogi(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *iocb, *next_iocb; + struct lpfc_nodelist *ndlp; + u32 ulp_command; + + /* Abort outstanding I/O on NPort */ + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "0201 Abort outstanding I/O on NPort x%x\n", + Fabric_DID); + + pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return -EIO; + + /* + * Check the txcmplq for an iocb that matches the nport the driver is + * searching for. + */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { + ulp_command = get_job_cmnd(phba, iocb); + if (ulp_command == CMD_ELS_REQUEST64_CR) { + ndlp = iocb->ndlp; + if (ndlp && ndlp->nlp_DID == Fabric_DID) { + if ((phba->pport->fc_flag & FC_PT2PT) && + !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) + iocb->fabric_cmd_cmpl = + lpfc_ignore_els_cmpl; + lpfc_sli_issue_abort_iotag(phba, pring, iocb, + NULL); + } + } + } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + spin_unlock_irq(&phba->hbalock); + + return 0; +} + +/** + * lpfc_initial_flogi - Issue an initial fabric login for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues an initial Fabric Login (FLOGI) for the @vport + * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from + * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and + * put it into the @vport's ndlp list. If an inactive ndlp found on the list, + * it will just be enabled and made active. The lpfc_issue_els_flogi() routine + * is then invoked with the @vport and the ndlp to perform the FLOGI for the + * @vport. + * + * Return code + * 0 - failed to issue initial flogi for @vport + * 1 - successfully issued initial flogi for @vport + **/ +int +lpfc_initial_flogi(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + + vport->port_state = LPFC_FLOGI; + lpfc_set_disctmo(vport); + + /* First look for the Fabric ndlp */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + /* Cannot find existing Fabric ndlp, so allocate a new one */ + ndlp = lpfc_nlp_init(vport, Fabric_DID); + if (!ndlp) + return 0; + /* Set the node type */ + ndlp->nlp_type |= NLP_FABRIC; + + /* Put ndlp onto node list */ + lpfc_enqueue_node(vport, ndlp); + } + + /* Reset the Fabric flag, topology change may have happened */ + vport->fc_flag &= ~FC_FABRIC; + if (lpfc_issue_els_flogi(vport, ndlp, 0)) { + /* A node reference should be retained while registered with a + * transport or dev-loss-evt work is pending. + * Otherwise, decrement node reference to trigger release. + */ + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && + !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + lpfc_nlp_put(ndlp); + return 0; + } + return 1; +} + +/** + * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues an initial Fabric Discover (FDISC) for the @vport + * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from + * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and + * put it into the @vport's ndlp list. If an inactive ndlp found on the list, + * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine + * is then invoked with the @vport and the ndlp to perform the FDISC for the + * @vport. + * + * Return code + * 0 - failed to issue initial fdisc for @vport + * 1 - successfully issued initial fdisc for @vport + **/ +int +lpfc_initial_fdisc(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + + /* First look for the Fabric ndlp */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + /* Cannot find existing Fabric ndlp, so allocate a new one */ + ndlp = lpfc_nlp_init(vport, Fabric_DID); + if (!ndlp) + return 0; + + /* NPIV is only supported in Fabrics. */ + ndlp->nlp_type |= NLP_FABRIC; + + /* Put ndlp onto node list */ + lpfc_enqueue_node(vport, ndlp); + } + + if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { + /* A node reference should be retained while registered with a + * transport or dev-loss-evt work is pending. + * Otherwise, decrement node reference to trigger release. + */ + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && + !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + lpfc_nlp_put(ndlp); + return 0; + } + return 1; +} + +/** + * lpfc_more_plogi - Check and issue remaining plogis for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine checks whether there are more remaining Port Logins + * (PLOGI) to be issued for the @vport. If so, it will invoke the routine + * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes + * to issue ELS PLOGIs up to the configured discover threads with the + * @vport (@vport->cfg_discovery_threads). The function also decrement + * the @vport's num_disc_node by 1 if it is not already 0. + **/ +void +lpfc_more_plogi(struct lpfc_vport *vport) +{ + if (vport->num_disc_nodes) + vport->num_disc_nodes--; + + /* Continue discovery with PLOGIs to go */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0232 Continue discovery with %d PLOGIs to go " + "Data: x%x x%x x%x\n", + vport->num_disc_nodes, vport->fc_plogi_cnt, + vport->fc_flag, vport->port_state); + /* Check to see if there are more PLOGIs to be sent */ + if (vport->fc_flag & FC_NLP_MORE) + /* go thru NPR nodes and issue any remaining ELS PLOGIs */ + lpfc_els_disc_plogi(vport); + + return; +} + +/** + * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp + * @phba: pointer to lpfc hba data structure. + * @prsp: pointer to response IOCB payload. + * @ndlp: pointer to a node-list data structure. + * + * This routine checks and indicates whether the WWPN of an N_Port, retrieved + * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. + * The following cases are considered N_Port confirmed: + * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches + * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but + * it does not have WWPN assigned either. If the WWPN is confirmed, the + * pointer to the @ndlp will be returned. If the WWPN is not confirmed: + * 1) if there is a node on vport list other than the @ndlp with the same + * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked + * on that node to release the RPI associated with the node; 2) if there is + * no node found on vport list with the same WWPN of the N_Port PLOGI logged + * into, a new node shall be allocated (or activated). In either case, the + * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall + * be released and the new_ndlp shall be put on to the vport node list and + * its pointer returned as the confirmed node. + * + * Note that before the @ndlp got "released", the keepDID from not-matching + * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID + * of the @ndlp. This is because the release of @ndlp is actually to put it + * into an inactive state on the vport node list and the vport node list + * management algorithm does not allow two node with a same DID. + * + * Return code + * pointer to the PLOGI N_Port @ndlp + **/ +static struct lpfc_nodelist * +lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_nodelist *new_ndlp; + struct serv_parm *sp; + uint8_t name[sizeof(struct lpfc_name)]; + uint32_t keepDID = 0, keep_nlp_flag = 0; + uint32_t keep_new_nlp_flag = 0; + uint16_t keep_nlp_state; + u32 keep_nlp_fc4_type = 0; + struct lpfc_nvme_rport *keep_nrport = NULL; + unsigned long *active_rrqs_xri_bitmap = NULL; + + /* Fabric nodes can have the same WWPN so we don't bother searching + * by WWPN. Just return the ndlp that was given to us. + */ + if (ndlp->nlp_type & NLP_FABRIC) + return ndlp; + + sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); + memset(name, 0, sizeof(struct lpfc_name)); + + /* Now we find out if the NPort we are logging into, matches the WWPN + * we have for that ndlp. If not, we have some work to do. + */ + new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); + + /* return immediately if the WWPN matches ndlp */ + if (!new_ndlp || (new_ndlp == ndlp)) + return ndlp; + + /* + * Unregister from backend if not done yet. Could have been skipped + * due to ADISC + */ + lpfc_nlp_unreg_node(vport, new_ndlp); + + if (phba->sli_rev == LPFC_SLI_REV4) { + active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, + GFP_KERNEL); + if (active_rrqs_xri_bitmap) + memset(active_rrqs_xri_bitmap, 0, + phba->cfg_rrq_xri_bitmap_sz); + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, + "3178 PLOGI confirm: ndlp x%x x%x x%x: " + "new_ndlp x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, + (new_ndlp ? new_ndlp->nlp_DID : 0), + (new_ndlp ? new_ndlp->nlp_flag : 0), + (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); + + keepDID = new_ndlp->nlp_DID; + + if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) + memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); + + /* At this point in this routine, we know new_ndlp will be + * returned. however, any previous GID_FTs that were done + * would have updated nlp_fc4_type in ndlp, so we must ensure + * new_ndlp has the right value. + */ + if (vport->fc_flag & FC_FABRIC) { + keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; + new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; + } + + lpfc_unreg_rpi(vport, new_ndlp); + new_ndlp->nlp_DID = ndlp->nlp_DID; + new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; + if (phba->sli_rev == LPFC_SLI_REV4) + memcpy(new_ndlp->active_rrqs_xri_bitmap, + ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); + + /* Lock both ndlps */ + spin_lock_irq(&ndlp->lock); + spin_lock_irq(&new_ndlp->lock); + keep_new_nlp_flag = new_ndlp->nlp_flag; + keep_nlp_flag = ndlp->nlp_flag; + new_ndlp->nlp_flag = ndlp->nlp_flag; + + /* if new_ndlp had NLP_UNREG_INP set, keep it */ + if (keep_new_nlp_flag & NLP_UNREG_INP) + new_ndlp->nlp_flag |= NLP_UNREG_INP; + else + new_ndlp->nlp_flag &= ~NLP_UNREG_INP; + + /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ + if (keep_new_nlp_flag & NLP_RPI_REGISTERED) + new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; + else + new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + + /* + * Retain the DROPPED flag. This will take care of the init + * refcount when affecting the state change + */ + if (keep_new_nlp_flag & NLP_DROPPED) + new_ndlp->nlp_flag |= NLP_DROPPED; + else + new_ndlp->nlp_flag &= ~NLP_DROPPED; + + ndlp->nlp_flag = keep_new_nlp_flag; + + /* if ndlp had NLP_UNREG_INP set, keep it */ + if (keep_nlp_flag & NLP_UNREG_INP) + ndlp->nlp_flag |= NLP_UNREG_INP; + else + ndlp->nlp_flag &= ~NLP_UNREG_INP; + + /* if ndlp had NLP_RPI_REGISTERED set, keep it */ + if (keep_nlp_flag & NLP_RPI_REGISTERED) + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + else + ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + + /* + * Retain the DROPPED flag. This will take care of the init + * refcount when affecting the state change + */ + if (keep_nlp_flag & NLP_DROPPED) + ndlp->nlp_flag |= NLP_DROPPED; + else + ndlp->nlp_flag &= ~NLP_DROPPED; + + spin_unlock_irq(&new_ndlp->lock); + spin_unlock_irq(&ndlp->lock); + + /* Set nlp_states accordingly */ + keep_nlp_state = new_ndlp->nlp_state; + lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); + + /* interchange the nvme remoteport structs */ + keep_nrport = new_ndlp->nrport; + new_ndlp->nrport = ndlp->nrport; + + /* Move this back to NPR state */ + if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { + /* The ndlp doesn't have a portname yet, but does have an + * NPort ID. The new_ndlp portname matches the Rport's + * portname. Reinstantiate the new_ndlp and reset the ndlp. + */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3179 PLOGI confirm NEW: %x %x\n", + new_ndlp->nlp_DID, keepDID); + + /* Two ndlps cannot have the same did on the nodelist. + * The KeepDID and keep_nlp_fc4_type need to be swapped + * because ndlp is inflight with no WWPN. + */ + ndlp->nlp_DID = keepDID; + ndlp->nlp_fc4_type = keep_nlp_fc4_type; + lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(ndlp->active_rrqs_xri_bitmap, + active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); + + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3180 PLOGI confirm SWAP: %x %x\n", + new_ndlp->nlp_DID, keepDID); + + lpfc_unreg_rpi(vport, ndlp); + + /* The ndlp and new_ndlp both have WWPNs but are swapping + * NPort Ids and attributes. + */ + ndlp->nlp_DID = keepDID; + ndlp->nlp_fc4_type = keep_nlp_fc4_type; + + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(ndlp->active_rrqs_xri_bitmap, + active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); + + /* Since we are switching over to the new_ndlp, + * reset the old ndlp state + */ + if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || + (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) + keep_nlp_state = NLP_STE_NPR_NODE; + lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); + ndlp->nrport = keep_nrport; + } + + /* + * If ndlp is not associated with any rport we can drop it here else + * let dev_loss_tmo_callbk trigger DEVICE_RM event + */ + if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, + "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", + new_ndlp->nlp_DID, new_ndlp->nlp_flag, + new_ndlp->nlp_fc4_type); + + return new_ndlp; +} + +/** + * lpfc_end_rscn - Check and handle more rscn for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine checks whether more Registration State Change + * Notifications (RSCNs) came in while the discovery state machine was in + * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be + * invoked to handle the additional RSCNs for the @vport. Otherwise, the + * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of + * handling the RSCNs. + **/ +void +lpfc_end_rscn(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (vport->fc_flag & FC_RSCN_MODE) { + /* + * Check to see if more RSCNs came in while we were + * processing this one. + */ + if (vport->fc_rscn_id_cnt || + (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) + lpfc_els_handle_rscn(vport); + else { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + } + } +} + +/** + * lpfc_cmpl_els_rrq - Completion handled for els RRQs. + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine will call the clear rrq function to free the rrq and + * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not + * exist then the clear_rrq is still called because the rrq needs to + * be freed. + **/ + +static void +lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + struct lpfc_node_rrq *rrq; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + rrq = cmdiocb->context_un.rrq; + cmdiocb->rsp_iocb = rspiocb; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "RRQ cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, + get_job_els_rsp64_did(phba, cmdiocb)); + + + /* rrq completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2880 RRQ completes to DID x%x " + "Data: x%x x%x x%x x%x x%x\n", + ndlp->nlp_DID, ulp_status, ulp_word4, + get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); + + if (ulp_status) { + /* Check for retry */ + /* RRQ failed Don't print the vport to vport rjts */ + if (ulp_status != IOSTAT_LS_RJT || + (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && + ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || + (phba)->pport->cfg_log_verbose & LOG_ELS) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2881 RRQ failure DID:%06X Status:" + "x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); + } + + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + return; +} +/** + * lpfc_cmpl_els_plogi - Completion callback function for plogi + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function for issuing the Port + * Login (PLOGI) command. For PLOGI completion, there must be an active + * ndlp on the vport node list that matches the remote node ID from the + * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply + * ignored and command IOCB released. The PLOGI response IOCB status is + * checked for error conditions. If there is error status reported, PLOGI + * retry shall be attempted by invoking the lpfc_els_retry() routine. + * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on + * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine + * (DSM) is set for this PLOGI completion. Finally, it checks whether + * there are additional N_Port nodes with the vport that need to perform + * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition + * PLOGIs. + **/ +static void +lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + IOCB_t *irsp; + struct lpfc_nodelist *ndlp, *free_ndlp; + struct lpfc_dmabuf *prsp; + int disc; + struct serv_parm *sp = NULL; + u32 ulp_status, ulp_word4, did, iotag; + bool release_node = false; + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + cmdiocb->rsp_iocb = rspiocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + did = get_job_els_rsp64_did(phba, cmdiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + iotag = get_wqe_reqtag(cmdiocb); + } else { + irsp = &rspiocb->iocb; + iotag = irsp->ulpIoTag; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "PLOGI cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0136 PLOGI completes to NPort x%x " + "with no ndlp. Data: x%x x%x x%x\n", + did, ulp_status, ulp_word4, iotag); + goto out_freeiocb; + } + + /* Since ndlp can be freed in the disc state machine, note if this node + * is being used during discovery. + */ + spin_lock_irq(&ndlp->lock); + disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + + /* PLOGI completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0102 PLOGI completes to NPort x%06x " + "Data: x%x x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_fc4_type, + ulp_status, ulp_word4, + disc, vport->num_disc_nodes); + + /* Check to see if link went down during discovery */ + if (lpfc_els_chk_latt(vport)) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + goto out; + } + + if (ulp_status) { + /* Check for retry */ + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { + /* ELS command is being retried */ + if (disc) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + } + goto out; + } + /* PLOGI failed Don't print the vport to vport rjts */ + if (ulp_status != IOSTAT_LS_RJT || + (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && + ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || + (phba)->pport->cfg_log_verbose & LOG_ELS) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2753 PLOGI failure DID:%06X " + "Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); + + /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ + if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PLOGI); + + /* If a PLOGI collision occurred, the node needs to continue + * with the reglogin process. + */ + spin_lock_irq(&ndlp->lock); + if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && + ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { + spin_unlock_irq(&ndlp->lock); + goto out; + } + + /* No PLOGI collision and the node is not registered with the + * scsi or nvme transport. It is no longer an active node. Just + * start the device remove process. + */ + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + release_node = true; + } + spin_unlock_irq(&ndlp->lock); + + if (release_node) + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_DEVICE_RM); + } else { + /* Good status, call state machine */ + prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, + struct lpfc_dmabuf, list); + ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); + + sp = (struct serv_parm *)((u8 *)prsp->virt + + sizeof(u32)); + + ndlp->vmid_support = 0; + if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || + (phba->cfg_vmid_priority_tagging && + sp->cmn.priority_tagging)) { + lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, + "4018 app_hdr_support %d tagging %d DID x%x\n", + sp->cmn.app_hdr_support, + sp->cmn.priority_tagging, + ndlp->nlp_DID); + /* if the dest port supports VMID, mark it in ndlp */ + ndlp->vmid_support = 1; + } + + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PLOGI); + } + + if (disc && vport->num_disc_nodes) { + /* Check to see if there are more PLOGIs to be sent */ + lpfc_more_plogi(vport); + + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } + +out: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, + "PLOGI Cmpl PUT: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + +out_freeiocb: + /* Release the reference on the original I/O request. */ + free_ndlp = cmdiocb->ndlp; + + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); + return; +} + +/** + * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @did: destination port identifier. + * @retry: number of retries to the command IOCB. + * + * This routine issues a Port Login (PLOGI) command to a remote N_Port + * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, + * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. + * This routine constructs the proper fields of the PLOGI IOCB and invokes + * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. + * + * Note that the ndlp reference count will be incremented by 1 for holding + * the ndlp and the reference to ndlp will be stored into the ndlp field + * of the IOCB for the completion callback function to the PLOGI ELS command. + * + * Return code + * 0 - Successfully issued a plogi for @vport + * 1 - failed to issue a plogi for @vport + **/ +int +lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct serv_parm *sp; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + int ret; + + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp) + return 1; + + /* Defer the processing of the issue PLOGI until after the + * outstanding UNREG_RPI mbox command completes, unless we + * are going offline. This logic does not apply for Fabric DIDs + */ + if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && + ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && + !(vport->fc_flag & FC_OFFLINE_MODE)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "4110 Issue PLOGI x%x deferred " + "on NPort x%x rpi x%x flg x%x Data:" + " x%px\n", + ndlp->nlp_defer_did, ndlp->nlp_DID, + ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); + + /* We can only defer 1st PLOGI */ + if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) + ndlp->nlp_defer_did = did; + return 0; + } + + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, + ELS_CMD_PLOGI); + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + /* For PLOGI request, remainder of payload is service parameters */ + *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); + sp = (struct serv_parm *) pcmd; + + /* + * If we are a N-port connected to a Fabric, fix-up paramm's so logins + * to device on remote loops work. + */ + if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) + sp->cmn.altBbCredit = 1; + + if (sp->cmn.fcphLow < FC_PH_4_3) + sp->cmn.fcphLow = FC_PH_4_3; + + if (sp->cmn.fcphHigh < FC_PH3) + sp->cmn.fcphHigh = FC_PH3; + + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); + sp->cmn.bbRcvSizeMsb &= 0xF; + + /* Check if the destination port supports VMID */ + ndlp->vmid_support = 0; + if (vport->vmid_priority_tagging) + sp->cmn.priority_tagging = 1; + else if (phba->cfg_vmid_app_header && + bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) + sp->cmn.app_hdr_support = 1; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue PLOGI: did:x%x", + did, 0, 0); + + /* If our firmware supports this feature, convey that + * information to the target using the vendor specific field. + */ + if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { + sp->cmn.valid_vendor_ver_level = 1; + sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); + sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); + } + + phba->fc_stat.elsXmitPLOGI++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue PLOGI: did:x%x refcnt %d", + did, kref_read(&ndlp->kref), 0); + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (ret) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_cmpl_els_prli - Completion callback function for prli + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function for a Process Login + * (PRLI) ELS command. The PRLI response IOCB status is checked for error + * status. If there is error status reported, PRLI retry shall be attempted + * by invoking the lpfc_els_retry() routine. Otherwise, the state + * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this + * ndlp to mark the PRLI completion. + **/ +static void +lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_nodelist *ndlp; + char *mode; + u32 loglevel; + u32 ulp_status; + u32 ulp_word4; + bool release_node = false; + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + cmdiocb->rsp_iocb = rspiocb; + + ndlp = cmdiocb->ndlp; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_PRLI_SND; + + /* Driver supports multiple FC4 types. Counters matter. */ + vport->fc_prli_sent--; + ndlp->fc4_prli_sent--; + spin_unlock_irq(&ndlp->lock); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "PRLI cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, + ndlp->nlp_DID); + + /* PRLI completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0103 PRLI completes to NPort x%06x " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ulp_status, ulp_word4, + vport->num_disc_nodes, ndlp->fc4_prli_sent); + + /* Check to see if link went down during discovery */ + if (lpfc_els_chk_latt(vport)) + goto out; + + if (ulp_status) { + /* Check for retry */ + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { + /* ELS command is being retried */ + goto out; + } + + /* If we don't send GFT_ID to Fabric, a PRLI error + * could be expected. + */ + if ((vport->fc_flag & FC_FABRIC) || + (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { + mode = KERN_ERR; + loglevel = LOG_TRACE_EVENT; + } else { + mode = KERN_INFO; + loglevel = LOG_ELS; + } + + /* PRLI failed */ + lpfc_printf_vlog(vport, mode, loglevel, + "2754 PRLI failure DID:%06X Status:x%x/x%x, " + "data: x%x x%x x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4, ndlp->nlp_state, + ndlp->fc4_prli_sent, ndlp->nlp_flag); + + /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ + if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PRLI); + + /* The following condition catches an inflight transition + * mismatch typically caused by an RSCN. Skip any + * processing to allow recovery. + */ + if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || + (ndlp->nlp_state == NLP_STE_NPR_NODE && + ndlp->nlp_flag & NLP_DELAY_TMO)) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "2784 PRLI cmpl: Allow Node recovery " + "DID x%06x nstate x%x nflag x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag); + goto out; + } + + /* + * For P2P topology, retain the node so that PLOGI can be + * attempted on it again. + */ + if (vport->fc_flag & FC_PT2PT) + goto out; + + /* As long as this node is not registered with the SCSI + * or NVMe transport and no other PRLIs are outstanding, + * it is no longer an active node. Otherwise devloss + * handles the final cleanup. + */ + spin_lock_irq(&ndlp->lock); + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && + !ndlp->fc4_prli_sent) { + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + release_node = true; + } + spin_unlock_irq(&ndlp->lock); + + if (release_node) + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_DEVICE_RM); + } else { + /* Good status, call state machine. However, if another + * PRLI is outstanding, don't call the state machine + * because final disposition to Mapped or Unmapped is + * completed there. + */ + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PRLI); + } + +out: + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + return; +} + +/** + * lpfc_issue_els_prli - Issue a prli iocb command for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @retry: number of retries to the command IOCB. + * + * This routine issues a Process Login (PRLI) ELS command for the + * @vport. The PRLI service parameters are set up in the payload of the + * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine + * is put to the IOCB completion callback func field before invoking the + * routine lpfc_sli_issue_iocb() to send out PRLI command. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the PRLI ELS command. + * + * Return code + * 0 - successfully issued prli iocb command for @vport + * 1 - failed to issue prli iocb command for @vport + **/ +int +lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint8_t retry) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + PRLI *npr; + struct lpfc_nvme_prli *npr_nvme; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + u32 local_nlp_type, elscmd; + + /* + * If we are in RSCN mode, the FC4 types supported from a + * previous GFT_ID command may not be accurate. So, if we + * are a NVME Initiator, always look for the possibility of + * the remote NPort beng a NVME Target. + */ + if (phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_RSCN_MODE && + vport->nvmei_support) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + local_nlp_type = ndlp->nlp_fc4_type; + + /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp + * fields here before any of them can complete. + */ + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); + ndlp->nvme_fb_size = 0; + + send_next_prli: + if (local_nlp_type & NLP_FC4_FCP) { + /* Payload is 4 + 16 = 20 x14 bytes. */ + cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); + elscmd = ELS_CMD_PRLI; + } else if (local_nlp_type & NLP_FC4_NVME) { + /* Payload is 4 + 20 = 24 x18 bytes. */ + cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); + elscmd = ELS_CMD_NVMEPRLI; + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3083 Unknown FC_TYPE x%x ndlp x%06x\n", + ndlp->nlp_fc4_type, ndlp->nlp_DID); + return 1; + } + + /* SLI3 ports don't support NVME. If this rport is a strict NVME + * FC4 type, implicitly LOGO. + */ + if (phba->sli_rev == LPFC_SLI_REV3 && + ndlp->nlp_fc4_type == NLP_FC4_NVME) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", + ndlp->nlp_type); + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + return 1; + } + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, elscmd); + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + /* For PRLI request, remainder of payload is service parameters */ + memset(pcmd, 0, cmdsize); + + if (local_nlp_type & NLP_FC4_FCP) { + /* Remainder of payload is FCP PRLI parameter page. + * Note: this data structure is defined as + * BE/LE in the structure definition so no + * byte swap call is made. + */ + *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; + pcmd += sizeof(uint32_t); + npr = (PRLI *)pcmd; + + /* + * If our firmware version is 3.20 or later, + * set the following bits for FC-TAPE support. + */ + if (phba->vpd.rev.feaLevelHigh >= 0x02) { + npr->ConfmComplAllowed = 1; + npr->Retry = 1; + npr->TaskRetryIdReq = 1; + } + npr->estabImagePair = 1; + npr->readXferRdyDis = 1; + if (vport->cfg_first_burst_size) + npr->writeXferRdyDis = 1; + + /* For FCP support */ + npr->prliType = PRLI_FCP_TYPE; + npr->initiatorFunc = 1; + elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; + + /* Remove FCP type - processed. */ + local_nlp_type &= ~NLP_FC4_FCP; + } else if (local_nlp_type & NLP_FC4_NVME) { + /* Remainder of payload is NVME PRLI parameter page. + * This data structure is the newer definition that + * uses bf macros so a byte swap is required. + */ + *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; + pcmd += sizeof(uint32_t); + npr_nvme = (struct lpfc_nvme_prli *)pcmd; + bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); + bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ + if (phba->nsler) { + bf_set(prli_nsler, npr_nvme, 1); + bf_set(prli_conf, npr_nvme, 1); + } + + /* Only initiators request first burst. */ + if ((phba->cfg_nvme_enable_fb) && + !phba->nvmet_support) + bf_set(prli_fba, npr_nvme, 1); + + if (phba->nvmet_support) { + bf_set(prli_tgt, npr_nvme, 1); + bf_set(prli_disc, npr_nvme, 1); + } else { + bf_set(prli_init, npr_nvme, 1); + bf_set(prli_conf, npr_nvme, 1); + } + + npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); + npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); + elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; + + /* Remove NVME type - processed. */ + local_nlp_type &= ~NLP_FC4_NVME; + } + + phba->fc_stat.elsXmitPRLI++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue PRLI: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + /* The vport counters are used for lpfc_scan_finished, but + * the ndlp is used to track outstanding PRLIs for different + * FC4 types. + */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_PRLI_SND; + vport->fc_prli_sent++; + ndlp->fc4_prli_sent++; + spin_unlock_irq(&ndlp->lock); + + /* The driver supports 2 FC4 types. Make sure + * a PRLI is issued for all types before exiting. + */ + if (phba->sli_rev == LPFC_SLI_REV4 && + local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) + goto send_next_prli; + else + return 0; +} + +/** + * lpfc_rscn_disc - Perform rscn discovery for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine performs Registration State Change Notification (RSCN) + * discovery for a @vport. If the @vport's node port recovery count is not + * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all + * the nodes that need recovery. If none of the PLOGI were needed through + * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be + * invoked to check and handle possible more RSCN came in during the period + * of processing the current ones. + **/ +static void +lpfc_rscn_disc(struct lpfc_vport *vport) +{ + lpfc_can_disctmo(vport); + + /* RSCN discovery */ + /* go thru NPR nodes and issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + if (lpfc_els_disc_plogi(vport)) + return; + + lpfc_end_rscn(vport); +} + +/** + * lpfc_adisc_done - Complete the adisc phase of discovery + * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. + * + * This function is called when the final ADISC is completed during discovery. + * This function handles clearing link attention or issuing reg_vpi depending + * on whether npiv is enabled. This function also kicks off the PLOGI phase of + * discovery. + * This function is called with no locks held. + **/ +static void +lpfc_adisc_done(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + + /* + * For NPIV, cmpl_reg_vpi will set port_state to READY, + * and continue discovery. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + !(vport->fc_flag & FC_RSCN_MODE) && + (phba->sli_rev < LPFC_SLI_REV4)) { + + /* + * If link is down, clear_la and reg_vpi will be done after + * flogi following a link up event + */ + if (!lpfc_is_link_up(phba)) + return; + + /* The ADISCs are complete. Doesn't matter if they + * succeeded or failed because the ADISC completion + * routine guarantees to call the state machine and + * the RPI is either unregistered (failed ADISC response) + * or the RPI is still valid and the node is marked + * mapped for a target. The exchanges should be in the + * correct state. This code is specific to SLI3. + */ + lpfc_issue_clear_la(phba, vport); + lpfc_issue_reg_vpi(phba, vport); + return; + } + /* + * For SLI2, we need to set port_state to READY + * and continue discovery. + */ + if (vport->port_state < LPFC_VPORT_READY) { + /* If we get here, there is nothing to ADISC */ + lpfc_issue_clear_la(phba, vport); + if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { + vport->num_disc_nodes = 0; + /* go thru NPR list, issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + lpfc_els_disc_plogi(vport); + if (!vport->num_disc_nodes) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } + vport->port_state = LPFC_VPORT_READY; + } else + lpfc_rscn_disc(vport); +} + +/** + * lpfc_more_adisc - Issue more adisc as needed + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine determines whether there are more ndlps on a @vport + * node list need to have Address Discover (ADISC) issued. If so, it will + * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's + * remaining nodes which need to have ADISC sent. + **/ +void +lpfc_more_adisc(struct lpfc_vport *vport) +{ + if (vport->num_disc_nodes) + vport->num_disc_nodes--; + /* Continue discovery with ADISCs to go */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0210 Continue discovery with %d ADISCs to go " + "Data: x%x x%x x%x\n", + vport->num_disc_nodes, vport->fc_adisc_cnt, + vport->fc_flag, vport->port_state); + /* Check to see if there are more ADISCs to be sent */ + if (vport->fc_flag & FC_NLP_MORE) { + lpfc_set_disctmo(vport); + /* go thru NPR nodes and issue any remaining ELS ADISCs */ + lpfc_els_disc_adisc(vport); + } + if (!vport->num_disc_nodes) + lpfc_adisc_done(vport); + return; +} + +/** + * lpfc_cmpl_els_adisc - Completion callback function for adisc + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion function for issuing the Address Discover + * (ADISC) command. It first checks to see whether link went down during + * the discovery process. If so, the node will be marked as node port + * recovery for issuing discover IOCB by the link attention handler and + * exit. Otherwise, the response status is checked. If error was reported + * in the response status, the ADISC command shall be retried by invoking + * the lpfc_els_retry() routine. Otherwise, if no error was reported in + * the response status, the state machine is invoked to set transition + * with respect to NLP_EVT_CMPL_ADISC event. + **/ +static void +lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp; + struct lpfc_nodelist *ndlp; + int disc; + u32 ulp_status, ulp_word4, tmo; + bool release_node = false; + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + cmdiocb->rsp_iocb = rspiocb; + + ndlp = cmdiocb->ndlp; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + tmo = get_wqe_tmo(cmdiocb); + } else { + irsp = &rspiocb->iocb; + tmo = irsp->ulpTimeout; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "ADISC cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, + ndlp->nlp_DID); + + /* Since ndlp can be freed in the disc state machine, note if this node + * is being used during discovery. + */ + spin_lock_irq(&ndlp->lock); + disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); + spin_unlock_irq(&ndlp->lock); + /* ADISC completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0104 ADISC completes to NPort x%x " + "Data: x%x x%x x%x x%x x%x\n", + ndlp->nlp_DID, ulp_status, ulp_word4, + tmo, disc, vport->num_disc_nodes); + /* Check to see if link went down during discovery */ + if (lpfc_els_chk_latt(vport)) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + goto out; + } + + if (ulp_status) { + /* Check for retry */ + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { + /* ELS command is being retried */ + if (disc) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + lpfc_set_disctmo(vport); + } + goto out; + } + /* ADISC failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2755 ADISC failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_ADISC); + + /* As long as this node is not registered with the SCSI or NVMe + * transport, it is no longer an active node. Otherwise + * devloss handles the final cleanup. + */ + spin_lock_irq(&ndlp->lock); + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + release_node = true; + } + spin_unlock_irq(&ndlp->lock); + + if (release_node) + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_DEVICE_RM); + } else + /* Good status, call state machine */ + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_ADISC); + + /* Check to see if there are more ADISCs to be sent */ + if (disc && vport->num_disc_nodes) + lpfc_more_adisc(vport); +out: + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + return; +} + +/** + * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport + * @vport: pointer to a virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @retry: number of retries to the command IOCB. + * + * This routine issues an Address Discover (ADISC) for an @ndlp on a + * @vport. It prepares the payload of the ADISC ELS command, updates the + * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine + * to issue the ADISC ELS command. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the ADISC ELS command. + * + * Return code + * 0 - successfully issued adisc + * 1 - failed to issue adisc + **/ +int +lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint8_t retry) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + ADISC *ap; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + + cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ADISC); + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + /* For ADISC request, remainder of payload is service parameters */ + *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; + pcmd += sizeof(uint32_t); + + /* Fill in ADISC payload */ + ap = (ADISC *) pcmd; + ap->hardAL_PA = phba->fc_pref_ALPA; + memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); + ap->DID = be32_to_cpu(vport->fc_myDID); + + phba->fc_stat.elsXmitADISC++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_ADISC_SND; + spin_unlock_irq(&ndlp->lock); + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + goto err; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue ADISC: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + goto err; + } + + return 0; + +err: + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_ADISC_SND; + spin_unlock_irq(&ndlp->lock); + return 1; +} + +/** + * lpfc_cmpl_els_logo - Completion callback function for logo + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion function for issuing the ELS Logout (LOGO) + * command. If no error status was reported from the LOGO response, the + * state machine of the associated ndlp shall be invoked for transition with + * respect to NLP_EVT_CMPL_LOGO event. + **/ +static void +lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + struct lpfc_vport *vport = ndlp->vport; + IOCB_t *irsp; + unsigned long flags; + uint32_t skip_recovery = 0; + int wake_up_waiter = 0; + u32 ulp_status; + u32 ulp_word4; + u32 tmo; + + /* we pass cmdiocb to state machine which needs rspiocb as well */ + cmdiocb->rsp_iocb = rspiocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + tmo = get_wqe_tmo(cmdiocb); + } else { + irsp = &rspiocb->iocb; + tmo = irsp->ulpTimeout; + } + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_LOGO_SND; + if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { + wake_up_waiter = 1; + ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; + } + spin_unlock_irq(&ndlp->lock); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "LOGO cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, + ndlp->nlp_DID); + + /* LOGO completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0105 LOGO completes to NPort x%x " + "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, + ulp_status, ulp_word4, + tmo, vport->num_disc_nodes); + + if (lpfc_els_chk_latt(vport)) { + skip_recovery = 1; + goto out; + } + + /* The LOGO will not be retried on failure. A LOGO was + * issued to the remote rport and a ACC or RJT or no Answer are + * all acceptable. Note the failure and move forward with + * discovery. The PLOGI will retry. + */ + if (ulp_status) { + /* LOGO failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2756 LOGO failure, No Retry DID:%06X " + "Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); + + if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) + skip_recovery = 1; + } + + /* Call state machine. This will unregister the rpi if needed. */ + lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); + + if (skip_recovery) + goto out; + + /* The driver sets this flag for an NPIV instance that doesn't want to + * log into the remote port. + */ + if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { + spin_lock_irq(&ndlp->lock); + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_flag |= NLP_RELEASE_RPI; + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_DEVICE_RM); + goto out_rsrc_free; + } + +out: + /* At this point, the LOGO processing is complete. NOTE: For a + * pt2pt topology, we are assuming the NPortID will only change + * on link up processing. For a LOGO / PLOGI initiated by the + * Initiator, we are assuming the NPortID is not going to change. + */ + + if (wake_up_waiter && ndlp->logo_waitq) + wake_up(ndlp->logo_waitq); + /* + * If the node is a target, the handling attempts to recover the port. + * For any other port type, the rpi is unregistered as an implicit + * LOGO. + */ + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && + skip_recovery == 0) { + lpfc_cancel_retry_delay_tmo(vport, ndlp); + spin_lock_irqsave(&ndlp->lock, flags); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irqrestore(&ndlp->lock, flags); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3187 LOGO completes to NPort x%x: Start " + "Recovery Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4, tmo, + vport->num_disc_nodes); + + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + + lpfc_disc_start(vport); + return; + } + + /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the + * driver sends a LOGO to the rport to cleanup. For fabric and + * initiator ports cleanup the node as long as it the node is not + * register with the transport. + */ + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_DEVICE_RM); + } +out_rsrc_free: + /* Driver is done with the I/O. */ + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +/** + * lpfc_issue_els_logo - Issue a logo to an node on a vport + * @vport: pointer to a virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @retry: number of retries to the command IOCB. + * + * This routine constructs and issues an ELS Logout (LOGO) iocb command + * to a remote node, referred by an @ndlp on a @vport. It constructs the + * payload of the IOCB, properly sets up the @ndlp state, and invokes the + * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the LOGO ELS command. + * + * Callers of this routine are expected to unregister the RPI first + * + * Return code + * 0 - successfully issued logo + * 1 - failed to issue logo + **/ +int +lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + int rc; + + spin_lock_irq(&ndlp->lock); + if (ndlp->nlp_flag & NLP_LOGO_SND) { + spin_unlock_irq(&ndlp->lock); + return 0; + } + spin_unlock_irq(&ndlp->lock); + + cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_LOGO); + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; + pcmd += sizeof(uint32_t); + + /* Fill in LOGO payload */ + *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); + + phba->fc_stat.elsXmitLOGO++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_LOGO_SND; + ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; + spin_unlock_irq(&ndlp->lock); + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + goto err; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue LOGO: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + goto err; + } + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_prev_state = ndlp->nlp_state; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); + return 0; + +err: + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_LOGO_SND; + spin_unlock_irq(&ndlp->lock); + return 1; +} + +/** + * lpfc_cmpl_els_cmd - Completion callback function for generic els command + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is a generic completion callback function for ELS commands. + * Specifically, it is the callback function which does not need to perform + * any command specific operations. It is currently used by the ELS command + * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel + * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). + * Other than certain debug loggings, this callback function simply invokes the + * lpfc_els_chk_latt() routine to check whether link went down during the + * discovery process. + **/ +static void +lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_nodelist *free_ndlp; + IOCB_t *irsp; + u32 ulp_status, ulp_word4, tmo, did, iotag; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + did = get_job_els_rsp64_did(phba, cmdiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + tmo = get_wqe_tmo(cmdiocb); + iotag = get_wqe_reqtag(cmdiocb); + } else { + irsp = &rspiocb->iocb; + tmo = irsp->ulpTimeout; + iotag = irsp->ulpIoTag; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "ELS cmd cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + + /* ELS cmd tag completes */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", + iotag, ulp_status, ulp_word4, tmo); + + /* Check to see if link went down during discovery */ + lpfc_els_chk_latt(vport); + + free_ndlp = cmdiocb->ndlp; + + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(free_ndlp); +} + +/** + * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. + * @vport: pointer to lpfc_vport data structure. + * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. + * + * This routine registers the rpi assigned to the fabric controller + * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED + * state triggering a registration with the SCSI transport. + * + * This routine is single out because the fabric controller node + * does not receive a PLOGI. This routine is consumed by the + * SCR and RDF ELS commands. Callers are expected to qualify + * with SLI4 first. + **/ +static int +lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ns_ndlp; + LPFC_MBOXQ_t *mbox; + + if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) + return rc; + + ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (!ns_ndlp) + return -ENODEV; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", + __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, + ns_ndlp->nlp_state); + if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENODEV; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "0936 %s: no memory for reg_login " + "Data: x%x x%x x%x x%x\n", __func__, + fc_ndlp->nlp_DID, fc_ndlp->nlp_state, + fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); + return -ENOMEM; + } + rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, + (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); + if (rc) { + rc = -EACCES; + goto out; + } + + fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; + mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); + if (!mbox->ctx_ndlp) { + rc = -ENOMEM; + goto out; + } + + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = -ENODEV; + lpfc_nlp_put(fc_ndlp); + goto out; + } + /* Success path. Exit. */ + lpfc_nlp_set_state(vport, fc_ndlp, + NLP_STE_REG_LOGIN_ISSUE); + return 0; + + out: + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "0938 %s: failed to format reg_login " + "Data: x%x x%x x%x x%x\n", __func__, + fc_ndlp->nlp_DID, fc_ndlp->nlp_state, + fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); + return rc; +} + +/** + * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is a generic completion callback function for Discovery ELS cmd. + * Currently used by the ELS command issuing routines for the ELS State Change + * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). + * These commands will be retried once only for ELS timeout errors. + **/ +static void +lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp; + struct lpfc_els_rdf_rsp *prdf; + struct lpfc_dmabuf *pcmd, *prsp; + u32 *pdata; + u32 cmd; + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + u32 ulp_status, ulp_word4, tmo, did, iotag; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + did = get_job_els_rsp64_did(phba, cmdiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + tmo = get_wqe_tmo(cmdiocb); + iotag = get_wqe_reqtag(cmdiocb); + } else { + irsp = &rspiocb->iocb; + tmo = irsp->ulpTimeout; + iotag = irsp->ulpIoTag; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "ELS cmd cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + + /* ELS cmd tag completes */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", + iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); + + pcmd = cmdiocb->cmd_dmabuf; + if (!pcmd) + goto out; + + pdata = (u32 *)pcmd->virt; + if (!pdata) + goto out; + cmd = *pdata; + + /* Only 1 retry for ELS Timeout only */ + if (ulp_status == IOSTAT_LOCAL_REJECT && + ((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_SEQUENCE_TIMEOUT)) { + cmdiocb->retry++; + if (cmdiocb->retry <= 1) { + switch (cmd) { + case ELS_CMD_SCR: + lpfc_issue_els_scr(vport, cmdiocb->retry); + break; + case ELS_CMD_EDC: + lpfc_issue_els_edc(vport, cmdiocb->retry); + break; + case ELS_CMD_RDF: + lpfc_issue_els_rdf(vport, cmdiocb->retry); + break; + } + goto out; + } + phba->fc_stat.elsRetryExceeded++; + } + if (cmd == ELS_CMD_EDC) { + /* must be called before checking uplStatus and returning */ + lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); + return; + } + if (ulp_status) { + /* ELS discovery cmd completes with error */ + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, + "4203 ELS cmd x%x error: x%x x%X\n", cmd, + ulp_status, ulp_word4); + goto out; + } + + /* The RDF response doesn't have any impact on the running driver + * but the notification descriptors are dumped here for support. + */ + if (cmd == ELS_CMD_RDF) { + int i; + + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; + if (!prdf) + goto out; + + for (i = 0; i < ELS_RDF_REG_TAG_CNT && + i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_CGN_MGMT, + "4677 Fabric RDF Notification Grant " + "Data: 0x%08x Reg: %x %x\n", + be32_to_cpu( + prdf->reg_d1.desc_tags[i]), + phba->cgn_reg_signal, + phba->cgn_reg_fpin); + } + +out: + /* Check to see if link went down during discovery */ + lpfc_els_chk_latt(vport); + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + return; +} + +/** + * lpfc_issue_els_scr - Issue a scr to an node on a vport + * @vport: pointer to a host virtual N_Port data structure. + * @retry: retry counter for the command IOCB. + * + * This routine issues a State Change Request (SCR) to a fabric node + * on a @vport. The remote node is Fabric Controller (0xfffffd). It + * first search the @vport node list to find the matching ndlp. If no such + * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An + * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() + * routine is invoked to send the SCR IOCB. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the SCR ELS command. + * + * Return code + * 0 - Successfully issued scr command + * 1 - Failed to issue scr command + **/ +int +lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + struct lpfc_nodelist *ndlp; + + cmdsize = (sizeof(uint32_t) + sizeof(SCR)); + + ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); + if (!ndlp) + return 1; + lpfc_enqueue_node(vport, ndlp); + } + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_SCR); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_reg_fab_ctrl_node(vport, ndlp); + if (rc) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "0937 %s: Failed to reg fc node, rc %d\n", + __func__, rc); + return 1; + } + } + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + *((uint32_t *) (pcmd)) = ELS_CMD_SCR; + pcmd += sizeof(uint32_t); + + /* For SCR, remainder of payload is SCR parameter page */ + memset(pcmd, 0, sizeof(SCR)); + ((SCR *) pcmd)->Function = SCR_FUNC_FULL; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue SCR: did:x%x", + ndlp->nlp_DID, 0, 0); + + phba->fc_stat.elsXmitSCR++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue SCR: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) + * or the other nport (pt2pt). + * @vport: pointer to a host virtual N_Port data structure. + * @retry: number of retries to the command IOCB. + * + * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) + * when connected to a fabric, or to the remote port when connected + * in point-to-point mode. When sent to the Fabric Controller, it will + * replay the RSCN to registered recipients. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the RSCN ELS command. + * + * Return code + * 0 - Successfully issued RSCN command + * 1 - Failed to issue RSCN command + **/ +int +lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_nodelist *ndlp; + struct { + struct fc_els_rscn rscn; + struct fc_els_rscn_page portid; + } *event; + uint32_t nportid; + uint16_t cmdsize = sizeof(*event); + + /* Not supported for private loop */ + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && + !(vport->fc_flag & FC_PUBLIC_LOOP)) + return 1; + + if (vport->fc_flag & FC_PT2PT) { + /* find any mapped nport - that would be the other nport */ + ndlp = lpfc_findnode_mapped(vport); + if (!ndlp) + return 1; + } else { + nportid = FC_FID_FCTRL; + /* find the fabric controller node */ + ndlp = lpfc_findnode_did(vport, nportid); + if (!ndlp) { + /* if one didn't exist, make one */ + ndlp = lpfc_nlp_init(vport, nportid); + if (!ndlp) + return 1; + lpfc_enqueue_node(vport, ndlp); + } + } + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_RSCN_XMT); + + if (!elsiocb) + return 1; + + event = elsiocb->cmd_dmabuf->virt; + + event->rscn.rscn_cmd = ELS_RSCN; + event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); + event->rscn.rscn_plen = cpu_to_be16(cmdsize); + + nportid = vport->fc_myDID; + /* appears that page flags must be 0 for fabric to broadcast RSCN */ + event->portid.rscn_page_flags = 0; + event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; + event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; + event->portid.rscn_fid[2] = nportid & 0x000000FF; + + phba->fc_stat.elsXmitRSCN++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue RSCN: did:x%x", + ndlp->nlp_DID, 0, 0); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_issue_els_farpr - Issue a farp to an node on a vport + * @vport: pointer to a host virtual N_Port data structure. + * @nportid: N_Port identifier to the remote node. + * @retry: number of retries to the command IOCB. + * + * This routine issues a Fibre Channel Address Resolution Response + * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) + * is passed into the function. It first search the @vport node list to find + * the matching ndlp. If no such ndlp is found, a new ndlp shall be created + * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the + * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the FARPR ELS command. + * + * Return code + * 0 - Successfully issued farpr command + * 1 - Failed to issue farpr command + **/ +static int +lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + FARP *fp; + uint8_t *pcmd; + uint32_t *lp; + uint16_t cmdsize; + struct lpfc_nodelist *ondlp; + struct lpfc_nodelist *ndlp; + + cmdsize = (sizeof(uint32_t) + sizeof(FARP)); + + ndlp = lpfc_findnode_did(vport, nportid); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, nportid); + if (!ndlp) + return 1; + lpfc_enqueue_node(vport, ndlp); + } + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_FARPR); + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; + pcmd += sizeof(uint32_t); + + /* Fill in FARPR payload */ + fp = (FARP *) (pcmd); + memset(fp, 0, sizeof(FARP)); + lp = (uint32_t *) pcmd; + *lp++ = be32_to_cpu(nportid); + *lp++ = be32_to_cpu(vport->fc_myDID); + fp->Rflags = 0; + fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); + + memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); + ondlp = lpfc_findnode_did(vport, nportid); + if (ondlp) { + memcpy(&fp->OportName, &ondlp->nlp_portname, + sizeof(struct lpfc_name)); + memcpy(&fp->OnodeName, &ondlp->nlp_nodename, + sizeof(struct lpfc_name)); + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue FARPR: did:x%x", + ndlp->nlp_DID, 0, 0); + + phba->fc_stat.elsXmitFARPR++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the release of + * the node. + */ + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + /* This will cause the callback-function lpfc_cmpl_els_cmd to + * trigger the release of the node. + */ + /* Don't release reference count as RDF is likely outstanding */ + return 0; +} + +/** + * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @retry: retry counter for the command IOCB. + * + * This routine issues an ELS RDF to the Fabric Controller to register + * for diagnostic functions. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the RDF ELS command. + * + * Return code + * 0 - Successfully issued rdf command + * 1 - Failed to issue rdf command + **/ +int +lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_els_rdf_req *prdf; + struct lpfc_nodelist *ndlp; + uint16_t cmdsize; + int rc; + + cmdsize = sizeof(*prdf); + + ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); + if (!ndlp) + return -ENODEV; + lpfc_enqueue_node(vport, ndlp); + } + + /* RDF ELS is not required on an NPIV VN_Port. */ + if (vport->port_type == LPFC_NPIV_PORT) + return -EACCES; + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_RDF); + if (!elsiocb) + return -ENOMEM; + + /* Configure the payload for the supported FPIN events. */ + prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; + memset(prdf, 0, cmdsize); + prdf->rdf.fpin_cmd = ELS_RDF; + prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - + sizeof(struct fc_els_rdf)); + prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); + prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( + FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); + prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); + prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); + prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); + prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); + prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", + ndlp->nlp_DID, phba->cgn_reg_signal, + phba->cgn_reg_fpin); + + phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; + elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return -EIO; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue RDF: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return -EIO; + } + return 0; +} + + /** + * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * A received RDF implies a possible change to fabric supported diagnostic + * functions. This routine sends LS_ACC and then has the Nx_Port issue a new + * RDF request to reregister for supported diagnostic functions. + * + * Return code + * 0 - Success + * -EIO - Failed to process received RDF + **/ +static int +lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + /* Send LS_ACC */ + if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "1623 Failed to RDF_ACC from x%x for x%x\n", + ndlp->nlp_DID, vport->fc_myDID); + return -EIO; + } + + /* Issue new RDF for reregistering */ + if (lpfc_issue_els_rdf(vport, 0)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "2623 Failed to re register RDF for x%x\n", + vport->fc_myDID); + return -EIO; + } + + return 0; +} + +/** + * lpfc_least_capable_settings - helper function for EDC rsp processing + * @phba: pointer to lpfc hba data structure. + * @pcgd: pointer to congestion detection descriptor in EDC rsp. + * + * This helper routine determines the least capable setting for + * congestion signals, signal freq, including scale, from the + * congestion detection descriptor in the EDC rsp. The routine + * sets @phba values in preparation for a set_featues mailbox. + **/ +static void +lpfc_least_capable_settings(struct lpfc_hba *phba, + struct fc_diag_cg_sig_desc *pcgd) +{ + u32 rsp_sig_cap = 0, drv_sig_cap = 0; + u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; + + /* Get rsp signal and frequency capabilities. */ + rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); + rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); + rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); + + /* If the Fport does not support signals. Set FPIN only */ + if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) + goto out_no_support; + + /* Apply the xmt scale to the xmt cycle to get the correct frequency. + * Adapter default is 100 millisSeconds. Convert all xmt cycle values + * to milliSeconds. + */ + switch (rsp_sig_freq_scale) { + case EDC_CG_SIGFREQ_SEC: + rsp_sig_freq_cyc *= MSEC_PER_SEC; + break; + case EDC_CG_SIGFREQ_MSEC: + rsp_sig_freq_cyc = 1; + break; + default: + goto out_no_support; + } + + /* Convenient shorthand. */ + drv_sig_cap = phba->cgn_reg_signal; + + /* Choose the least capable frequency. */ + if (rsp_sig_freq_cyc > phba->cgn_sig_freq) + phba->cgn_sig_freq = rsp_sig_freq_cyc; + + /* Should be some common signals support. Settle on least capable + * signal and adjust FPIN values. Initialize defaults to ease the + * decision. + */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && + (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || + drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { + if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; + } + if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + } + + /* We are NOT recording signal frequency in congestion info buffer */ + return; + +out_no_support: + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = 0; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; +} + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, + FC_LS_TLV_DTAG_INIT); + +/** + * lpfc_cmpl_els_edc - Completion callback function for EDC + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function for issuing the Exchange + * Diagnostic Capabilities (EDC) command. The driver issues an EDC to + * notify the FPort of its Congestion and Link Fault capabilities. This + * routine parses the FPort's response and decides on the least common + * values applicable to both FPort and NPort for Warnings and Alarms that + * are communicated via hardware signals. + **/ +static void +lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + IOCB_t *irsp_iocb; + struct fc_els_edc_resp *edc_rsp; + struct fc_tlv_desc *tlv; + struct fc_diag_cg_sig_desc *pcgd; + struct fc_diag_lnkflt_desc *plnkflt; + struct lpfc_dmabuf *pcmd, *prsp; + const char *dtag_nm; + u32 *pdata, dtag; + int desc_cnt = 0, bytes_remain; + bool rcv_cap_desc = false; + struct lpfc_nodelist *ndlp; + u32 ulp_status, ulp_word4, tmo, did, iotag; + + ndlp = cmdiocb->ndlp; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + did = get_job_els_rsp64_did(phba, rspiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + tmo = get_wqe_tmo(rspiocb); + iotag = get_wqe_reqtag(rspiocb); + } else { + irsp_iocb = &rspiocb->iocb; + tmo = irsp_iocb->ulpTimeout; + iotag = irsp_iocb->ulpIoTag; + } + + lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, + "EDC cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + + /* ELS cmd tag completes */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", + iotag, ulp_status, ulp_word4, tmo); + + pcmd = cmdiocb->cmd_dmabuf; + if (!pcmd) + goto out; + + pdata = (u32 *)pcmd->virt; + if (!pdata) + goto out; + + /* Need to clear signal values, send features MB and RDF with FPIN. */ + if (ulp_status) + goto out; + + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + edc_rsp = prsp->virt; + if (!edc_rsp) + goto out; + + /* ELS cmd tag completes */ + lpfc_printf_log(phba, KERN_INFO, + LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, + "4676 Fabric EDC Rsp: " + "0x%02x, 0x%08x\n", + edc_rsp->acc_hdr.la_cmd, + be32_to_cpu(edc_rsp->desc_list_len)); + + /* + * Payload length in bytes is the response descriptor list + * length minus the 12 bytes of Link Service Request + * Information descriptor in the reply. + */ + bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - + sizeof(struct fc_els_lsri_desc); + if (bytes_remain <= 0) + goto out; + + tlv = edc_rsp->desc; + + /* + * cycle through EDC diagnostic descriptors to find the + * congestion signaling capability descriptor + */ + while (bytes_remain) { + if (bytes_remain < FC_TLV_DESC_HDR_SZ) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6461 Truncated TLV hdr on " + "Diagnostic descriptor[%d]\n", + desc_cnt); + goto out; + } + + dtag = be32_to_cpu(tlv->desc_tag); + switch (dtag) { + case ELS_DTAG_LNK_FAULT_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_lnkflt_desc)) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, + "6462 Truncated Link Fault Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_lnkflt_desc)); + goto out; + } + plnkflt = (struct fc_diag_lnkflt_desc *)tlv; + lpfc_printf_log(phba, KERN_INFO, + LOG_ELS | LOG_LDS_EVENT, + "4617 Link Fault Desc Data: 0x%08x 0x%08x " + "0x%08x 0x%08x 0x%08x\n", + be32_to_cpu(plnkflt->desc_tag), + be32_to_cpu(plnkflt->desc_len), + be32_to_cpu( + plnkflt->degrade_activate_threshold), + be32_to_cpu( + plnkflt->degrade_deactivate_threshold), + be32_to_cpu(plnkflt->fec_degrade_interval)); + break; + case ELS_DTAG_CG_SIGNAL_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_cg_sig_desc)) { + lpfc_printf_log( + phba, KERN_WARNING, LOG_CGN_MGMT, + "6463 Truncated Cgn Signal Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_cg_sig_desc)); + goto out; + } + + pcgd = (struct fc_diag_cg_sig_desc *)tlv; + lpfc_printf_log( + phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4616 CGN Desc Data: 0x%08x 0x%08x " + "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", + be32_to_cpu(pcgd->desc_tag), + be32_to_cpu(pcgd->desc_len), + be32_to_cpu(pcgd->xmt_signal_capability), + be16_to_cpu(pcgd->xmt_signal_frequency.count), + be16_to_cpu(pcgd->xmt_signal_frequency.units), + be32_to_cpu(pcgd->rcv_signal_capability), + be16_to_cpu(pcgd->rcv_signal_frequency.count), + be16_to_cpu(pcgd->rcv_signal_frequency.units)); + + /* Compare driver and Fport capabilities and choose + * least common. + */ + lpfc_least_capable_settings(phba, pcgd); + rcv_cap_desc = true; + break; + default: + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4919 unknown Diagnostic " + "Descriptor[%d]: tag x%x (%s)\n", + desc_cnt, dtag, dtag_nm); + } + + bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); + tlv = fc_tlv_next_desc(tlv); + desc_cnt++; + } + +out: + if (!rcv_cap_desc) { + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = 0; + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, + "4202 EDC rsp error - sending RDF " + "for FPIN only.\n"); + } + + lpfc_config_cgn_signal(phba); + + /* Check to see if link went down during discovery */ + lpfc_els_chk_latt(phba->pport); + lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, + "EDC Cmpl: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +static void +lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; + + lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); + lft->desc_len = cpu_to_be32( + FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); + + lft->degrade_activate_threshold = + cpu_to_be32(phba->degrade_activate_threshold); + lft->degrade_deactivate_threshold = + cpu_to_be32(phba->degrade_deactivate_threshold); + lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); +} + +static void +lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; + + /* We are assuming cgd was zero'ed before calling this routine */ + + /* Configure the congestion detection capability */ + cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); + + /* Descriptor len doesn't include the tag or len fields. */ + cgd->desc_len = cpu_to_be32( + FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); + + /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. + * xmt_signal_frequency.count already set to 0. + * xmt_signal_frequency.units already set to 0. + */ + + if (phba->cmf_active_mode == LPFC_CFG_OFF) { + /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. + * rcv_signal_frequency.count already set to 0. + * rcv_signal_frequency.units already set to 0. + */ + phba->cgn_sig_freq = 0; + return; + } + switch (phba->cgn_reg_signal) { + case EDC_CG_SIG_WARN_ONLY: + cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); + break; + case EDC_CG_SIG_WARN_ALARM: + cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); + break; + default: + /* rcv_signal_capability left 0 thus no support */ + break; + } + + /* We start negotiation with lpfc_fabric_cgn_frequency, after + * the completion we settle on the higher frequency. + */ + cgd->rcv_signal_frequency.count = + cpu_to_be16(lpfc_fabric_cgn_frequency); + cgd->rcv_signal_frequency.units = + cpu_to_be16(EDC_CG_SIGFREQ_MSEC); +} + +static bool +lpfc_link_is_lds_capable(struct lpfc_hba *phba) +{ + if (!(phba->lmt & LMT_64Gb)) + return false; + if (phba->sli_rev != LPFC_SLI_REV4) + return false; + + if (phba->sli4_hba.conf_trunk) { + if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) + return true; + } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { + return true; + } + return false; +} + + /** + * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @retry: retry counter for the command iocb. + * + * This routine issues an ELS EDC to the F-Port Controller to communicate + * this N_Port's support of hardware signals in its Congestion + * Capabilities Descriptor. + * + * Note: This routine does not check if one or more signals are + * set in the cgn_reg_signal parameter. The caller makes the + * decision to enforce cgn_reg_signal as nonzero or zero depending + * on the conditions. During Fabric requests, the driver + * requires cgn_reg_signals to be nonzero. But a dynamic request + * to set the congestion mode to OFF from Monitor or Manage + * would correctly issue an EDC with no signals enabled to + * turn off switch functionality and then update the FW. + * + * Return code + * 0 - Successfully issued edc command + * 1 - Failed to issue edc command + **/ +int +lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct fc_els_edc *edc_req; + struct fc_tlv_desc *tlv; + u16 cmdsize; + struct lpfc_nodelist *ndlp; + u8 *pcmd = NULL; + u32 cgn_desc_size, lft_desc_size; + int rc; + + if (vport->port_type == LPFC_NPIV_PORT) + return -EACCES; + + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENODEV; + + cgn_desc_size = (phba->cgn_init_reg_signal) ? + sizeof(struct fc_diag_cg_sig_desc) : 0; + lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? + sizeof(struct fc_diag_lnkflt_desc) : 0; + cmdsize = cgn_desc_size + lft_desc_size; + + /* Skip EDC if no applicable descriptors */ + if (!cmdsize) + goto try_rdf; + + cmdsize += sizeof(struct fc_els_edc); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_EDC); + if (!elsiocb) + goto try_rdf; + + /* Configure the payload for the supported Diagnostics capabilities. */ + pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; + memset(pcmd, 0, cmdsize); + edc_req = (struct fc_els_edc *)pcmd; + edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); + edc_req->edc_cmd = ELS_EDC; + tlv = edc_req->desc; + + if (cgn_desc_size) { + lpfc_format_edc_cgn_desc(phba, tlv); + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + tlv = fc_tlv_next_desc(tlv); + } + + if (lft_desc_size) + lpfc_format_edc_lft_desc(phba, tlv); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4623 Xmit EDC to remote " + "NPORT x%x reg_sig x%x reg_fpin:x%x\n", + ndlp->nlp_DID, phba->cgn_reg_signal, + phba->cgn_reg_fpin); + + elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return -EIO; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue EDC: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the rlease of + * the node. + */ + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + goto try_rdf; + } + return 0; +try_rdf: + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + rc = lpfc_issue_els_rdf(vport, 0); + return rc; +} + +/** + * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry + * @vport: pointer to a host virtual N_Port data structure. + * @nlp: pointer to a node-list data structure. + * + * This routine cancels the timer with a delayed IOCB-command retry for + * a @vport's @ndlp. It stops the timer for the delayed function retrial and + * removes the ELS retry event if it presents. In addition, if the + * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB + * commands are sent for the @vport's nodes that require issuing discovery + * ADISC. + **/ +void +lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_work_evt *evtp; + + if (!(nlp->nlp_flag & NLP_DELAY_TMO)) + return; + spin_lock_irq(&nlp->lock); + nlp->nlp_flag &= ~NLP_DELAY_TMO; + spin_unlock_irq(&nlp->lock); + del_timer_sync(&nlp->nlp_delayfunc); + nlp->nlp_last_elscmd = 0; + if (!list_empty(&nlp->els_retry_evt.evt_listp)) { + list_del_init(&nlp->els_retry_evt.evt_listp); + /* Decrement nlp reference count held for the delayed retry */ + evtp = &nlp->els_retry_evt; + lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); + } + if (nlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(&nlp->lock); + nlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&nlp->lock); + if (vport->num_disc_nodes) { + if (vport->port_state < LPFC_VPORT_READY) { + /* Check if there are more ADISCs to be sent */ + lpfc_more_adisc(vport); + } else { + /* Check if there are more PLOGIs to be sent */ + lpfc_more_plogi(vport); + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } + } + } + return; +} + +/** + * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer + * @t: pointer to the timer function associated data (ndlp). + * + * This routine is invoked by the ndlp delayed-function timer to check + * whether there is any pending ELS retry event(s) with the node. If not, it + * simply returns. Otherwise, if there is at least one ELS delayed event, it + * adds the delayed events to the HBA work list and invokes the + * lpfc_worker_wake_up() routine to wake up worker thread to process the + * event. Note that lpfc_nlp_get() is called before posting the event to + * the work list to hold reference count of ndlp so that it guarantees the + * reference to ndlp will still be available when the worker thread gets + * to the event associated with the ndlp. + **/ +void +lpfc_els_retry_delay(struct timer_list *t) +{ + struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; + unsigned long flags; + struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; + + spin_lock_irqsave(&phba->hbalock, flags); + if (!list_empty(&evtp->evt_listp)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + /* We need to hold the node by incrementing the reference + * count until the queued work is done + */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + if (evtp->evt_arg1) { + evtp->evt = LPFC_EVT_ELS_RETRY; + list_add_tail(&evtp->evt_listp, &phba->work_list); + lpfc_worker_wake_up(phba); + } + spin_unlock_irqrestore(&phba->hbalock, flags); + return; +} + +/** + * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function + * @ndlp: pointer to a node-list data structure. + * + * This routine is the worker-thread handler for processing the @ndlp delayed + * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves + * the last ELS command from the associated ndlp and invokes the proper ELS + * function according to the delayed ELS command to retry the command. + **/ +void +lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) +{ + struct lpfc_vport *vport = ndlp->vport; + uint32_t cmd, retry; + + spin_lock_irq(&ndlp->lock); + cmd = ndlp->nlp_last_elscmd; + ndlp->nlp_last_elscmd = 0; + + if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + spin_unlock_irq(&ndlp->lock); + return; + } + + ndlp->nlp_flag &= ~NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + /* + * If a discovery event readded nlp_delayfunc after timer + * firing and before processing the timer, cancel the + * nlp_delayfunc. + */ + del_timer_sync(&ndlp->nlp_delayfunc); + retry = ndlp->nlp_retry; + ndlp->nlp_retry = 0; + + switch (cmd) { + case ELS_CMD_FLOGI: + lpfc_issue_els_flogi(vport, ndlp, retry); + break; + case ELS_CMD_PLOGI: + if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + } + break; + case ELS_CMD_ADISC: + if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + } + break; + case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: + if (!lpfc_issue_els_prli(vport, ndlp, retry)) { + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); + } + break; + case ELS_CMD_LOGO: + if (!lpfc_issue_els_logo(vport, ndlp, retry)) { + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); + } + break; + case ELS_CMD_FDISC: + if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) + lpfc_issue_els_fdisc(vport, ndlp, retry); + break; + } + return; +} + +/** + * lpfc_link_reset - Issue link reset + * @vport: pointer to a virtual N_Port data structure. + * + * This routine performs link reset by sending INIT_LINK mailbox command. + * For SLI-3 adapter, link attention interrupt is enabled before issuing + * INIT_LINK mailbox command. + * + * Return code + * 0 - Link reset initiated successfully + * 1 - Failed to initiate link reset + **/ +int +lpfc_link_reset(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + uint32_t control; + int rc; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2851 Attempt link reset\n"); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2852 Failed to allocate mbox memory"); + return 1; + } + + /* Enable Link attention interrupts */ + if (phba->sli_rev <= LPFC_SLI_REV3) { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_PROCESS_LA; + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + spin_unlock_irq(&phba->hbalock); + } + + lpfc_init_link(phba, mbox, phba->cfg_topology, + phba->cfg_link_speed); + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2853 Failed to issue INIT_LINK " + "mbox command, rc:x%x\n", rc); + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + + return 0; +} + +/** + * lpfc_els_retry - Make retry decision on an els command iocb + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine makes a retry decision on an ELS command IOCB, which has + * failed. The following ELS IOCBs use this function for retrying the command + * when previously issued command responsed with error status: FLOGI, PLOGI, + * PRLI, ADISC and FDISC. Based on the ELS command type and the + * returned error status, it makes the decision whether a retry shall be + * issued for the command, and whether a retry shall be made immediately or + * delayed. In the former case, the corresponding ELS command issuing-function + * is called to retry the command. In the later case, the ELS command shall + * be posted to the ndlp delayed event and delayed function timer set to the + * ndlp for the delayed command issusing. + * + * Return code + * 0 - No retry of els command is made + * 1 - Immediate or delayed retry of els command is made + **/ +static int +lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + union lpfc_wqe128 *irsp = &rspiocb->wqe; + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; + uint32_t *elscmd; + struct ls_rjt stat; + int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; + int logerr = 0; + uint32_t cmd = 0; + uint32_t did; + int link_reset = 0, rc; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + + /* Note: cmd_dmabuf may be 0 for internal driver abort + * of delays ELS command. + */ + + if (pcmd && pcmd->virt) { + elscmd = (uint32_t *) (pcmd->virt); + cmd = *elscmd++; + } + + if (ndlp) + did = ndlp->nlp_DID; + else { + /* We should only hit this case for retrying PLOGI */ + did = get_job_els_rsp64_did(phba, rspiocb); + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp && (cmd != ELS_CMD_PLOGI)) + return 0; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Retry ELS: wd7:x%x wd4:x%x did:x%x", + *(((uint32_t *)irsp) + 7), ulp_word4, did); + + switch (ulp_status) { + case IOSTAT_FCP_RSP_ERROR: + break; + case IOSTAT_REMOTE_STOP: + if (phba->sli_rev == LPFC_SLI_REV4) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, ndlp, + cmdiocb->sli4_lxritag, 0, 0); + } + break; + case IOSTAT_LOCAL_REJECT: + switch ((ulp_word4 & IOERR_PARAM_MASK)) { + case IOERR_LOOP_OPEN_FAILURE: + if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) + delay = 1000; + retry = 1; + break; + + case IOERR_ILLEGAL_COMMAND: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0124 Retry illegal cmd x%x " + "retry:x%x delay:x%x\n", + cmd, cmdiocb->retry, delay); + retry = 1; + /* All command's retry policy */ + maxretry = 8; + if (cmdiocb->retry > 2) + delay = 1000; + break; + + case IOERR_NO_RESOURCES: + logerr = 1; /* HBA out of resources */ + retry = 1; + if (cmdiocb->retry > 100) + delay = 100; + maxretry = 250; + break; + + case IOERR_ILLEGAL_FRAME: + delay = 100; + retry = 1; + break; + + case IOERR_INVALID_RPI: + if (cmd == ELS_CMD_PLOGI && + did == NameServer_DID) { + /* Continue forever if plogi to */ + /* the nameserver fails */ + maxretry = 0; + delay = 100; + } else if (cmd == ELS_CMD_PRLI && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + /* State-command disagreement. The PRLI was + * failed with an invalid rpi meaning there + * some unexpected state change. Don't retry. + */ + maxretry = 0; + retry = 0; + break; + } + retry = 1; + break; + + case IOERR_SEQUENCE_TIMEOUT: + if (cmd == ELS_CMD_PLOGI && + did == NameServer_DID && + (cmdiocb->retry + 1) == maxretry) { + /* Reset the Link */ + link_reset = 1; + break; + } + retry = 1; + delay = 100; + break; + case IOERR_SLI_ABORTED: + /* Retry ELS PLOGI command? + * Possibly the rport just wasn't ready. + */ + if (cmd == ELS_CMD_PLOGI) { + /* No retry if state change */ + if (ndlp && + ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) + goto out_retry; + retry = 1; + maxretry = 2; + } + break; + } + break; + + case IOSTAT_NPORT_RJT: + case IOSTAT_FABRIC_RJT: + if (ulp_word4 & RJT_UNAVAIL_TEMP) { + retry = 1; + break; + } + break; + + case IOSTAT_NPORT_BSY: + case IOSTAT_FABRIC_BSY: + logerr = 1; /* Fabric / Remote NPort out of resources */ + retry = 1; + break; + + case IOSTAT_LS_RJT: + stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); + /* Added for Vendor specifc support + * Just keep retrying for these Rsn / Exp codes + */ + if ((vport->fc_flag & FC_PT2PT) && + cmd == ELS_CMD_NVMEPRLI) { + switch (stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + case LSRJT_INVALID_CMD: + case LSRJT_LOGICAL_ERR: + case LSRJT_CMD_UNSUPPORTED: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0168 NVME PRLI LS_RJT " + "reason %x port doesn't " + "support NVME, disabling NVME\n", + stat.un.b.lsRjtRsnCode); + retry = 0; + vport->fc_flag |= FC_PT2PT_NO_NVME; + goto out_retry; + } + } + switch (stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + /* Special case for PRLI LS_RJTs. Recall that lpfc + * uses a single routine to issue both PRLI FC4 types. + * If the PRLI is rejected because that FC4 type + * isn't really supported, don't retry and cause + * multiple transport registrations. Otherwise, parse + * the reason code/reason code explanation and take the + * appropriate action. + */ + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_ELS | LOG_NODE, + "0153 ELS cmd x%x LS_RJT by x%x. " + "RsnCode x%x RsnCodeExp x%x\n", + cmd, did, stat.un.b.lsRjtRsnCode, + stat.un.b.lsRjtRsnCodeExp); + + switch (stat.un.b.lsRjtRsnCodeExp) { + case LSEXP_CANT_GIVE_DATA: + case LSEXP_CMD_IN_PROGRESS: + if (cmd == ELS_CMD_PLOGI) { + delay = 1000; + maxretry = 48; + } + retry = 1; + break; + case LSEXP_REQ_UNSUPPORTED: + case LSEXP_NO_RSRC_ASSIGN: + /* These explanation codes get no retry. */ + if (cmd == ELS_CMD_PRLI || + cmd == ELS_CMD_NVMEPRLI) + break; + fallthrough; + default: + /* Limit the delay and retry action to a limited + * cmd set. There are other ELS commands where + * a retry is not expected. + */ + if (cmd == ELS_CMD_PLOGI || + cmd == ELS_CMD_PRLI || + cmd == ELS_CMD_NVMEPRLI) { + delay = 1000; + maxretry = lpfc_max_els_tries + 1; + retry = 1; + } + break; + } + + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (cmd == ELS_CMD_FDISC) && + (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0125 FDISC Failed (x%x). " + "Fabric out of resources\n", + stat.un.lsRjtError); + lpfc_vport_set_state(vport, + FC_VPORT_NO_FABRIC_RSCS); + } + break; + + case LSRJT_LOGICAL_BSY: + if ((cmd == ELS_CMD_PLOGI) || + (cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) { + delay = 1000; + maxretry = 48; + } else if (cmd == ELS_CMD_FDISC) { + /* FDISC retry policy */ + maxretry = 48; + if (cmdiocb->retry >= 32) + delay = 1000; + } + retry = 1; + break; + + case LSRJT_LOGICAL_ERR: + /* There are some cases where switches return this + * error when they are not ready and should be returning + * Logical Busy. We should delay every time. + */ + if (cmd == ELS_CMD_FDISC && + stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { + maxretry = 3; + delay = 1000; + retry = 1; + } else if (cmd == ELS_CMD_FLOGI && + stat.un.b.lsRjtRsnCodeExp == + LSEXP_NOTHING_MORE) { + vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; + retry = 1; + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0820 FLOGI Failed (x%x). " + "BBCredit Not Supported\n", + stat.un.lsRjtError); + } + break; + + case LSRJT_PROTOCOL_ERR: + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (cmd == ELS_CMD_FDISC) && + ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || + (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) + ) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0122 FDISC Failed (x%x). " + "Fabric Detected Bad WWN\n", + stat.un.lsRjtError); + lpfc_vport_set_state(vport, + FC_VPORT_FABRIC_REJ_WWN); + } + break; + case LSRJT_VENDOR_UNIQUE: + if ((stat.un.b.vendorUnique == 0x45) && + (cmd == ELS_CMD_FLOGI)) { + goto out_retry; + } + break; + case LSRJT_CMD_UNSUPPORTED: + /* lpfc nvmet returns this type of LS_RJT when it + * receives an FCP PRLI because lpfc nvmet only + * support NVME. ELS request is terminated for FCP4 + * on this rport. + */ + if (stat.un.b.lsRjtRsnCodeExp == + LSEXP_REQ_UNSUPPORTED) { + if (cmd == ELS_CMD_PRLI) + goto out_retry; + } + break; + } + break; + + case IOSTAT_INTERMED_RSP: + case IOSTAT_BA_RJT: + break; + + default: + break; + } + + if (link_reset) { + rc = lpfc_link_reset(vport); + if (rc) { + /* Do not give up. Retry PLOGI one more time and attempt + * link reset if PLOGI fails again. + */ + retry = 1; + delay = 100; + goto out_retry; + } + return 1; + } + + if (did == FDMI_DID) + retry = 1; + + if ((cmd == ELS_CMD_FLOGI) && + (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && + !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { + /* FLOGI retry policy */ + retry = 1; + /* retry FLOGI forever */ + if (phba->link_flag != LS_LOOPBACK_MODE) + maxretry = 0; + else + maxretry = 2; + + if (cmdiocb->retry >= 100) + delay = 5000; + else if (cmdiocb->retry >= 32) + delay = 1000; + } else if ((cmd == ELS_CMD_FDISC) && + !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { + /* retry FDISCs every second up to devloss */ + retry = 1; + maxretry = vport->cfg_devloss_tmo; + delay = 1000; + } + + cmdiocb->retry++; + if (maxretry && (cmdiocb->retry >= maxretry)) { + phba->fc_stat.elsRetryExceeded++; + retry = 0; + } + + if ((vport->load_flag & FC_UNLOADING) != 0) + retry = 0; + +out_retry: + if (retry) { + if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { + /* Stop retrying PLOGI and FDISC if in FCF discovery */ + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2849 Stop retry ELS command " + "x%x to remote NPORT x%x, " + "Data: x%x x%x\n", cmd, did, + cmdiocb->retry, delay); + return 0; + } + } + + /* Retry ELS command to remote NPORT */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0107 Retry ELS command x%x to remote " + "NPORT x%x Data: x%x x%x\n", + cmd, did, cmdiocb->retry, delay); + + if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && + ((ulp_status != IOSTAT_LOCAL_REJECT) || + ((ulp_word4 & IOERR_PARAM_MASK) != + IOERR_NO_RESOURCES))) { + /* Don't reset timer for no resources */ + + /* If discovery / RSCN timer is running, reset it */ + if (timer_pending(&vport->fc_disctmo) || + (vport->fc_flag & FC_RSCN_MODE)) + lpfc_set_disctmo(vport); + } + + phba->fc_stat.elsXmitRetry++; + if (ndlp && delay) { + phba->fc_stat.elsDelayRetry++; + ndlp->nlp_retry = cmdiocb->retry; + + /* delay is specified in milliseconds */ + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(delay)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + + ndlp->nlp_prev_state = ndlp->nlp_state; + if ((cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_PRLI_ISSUE); + else if (cmd != ELS_CMD_ADISC) + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); + ndlp->nlp_last_elscmd = cmd; + + return 1; + } + switch (cmd) { + case ELS_CMD_FLOGI: + lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); + return 1; + case ELS_CMD_FDISC: + lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); + return 1; + case ELS_CMD_PLOGI: + if (ndlp) { + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_PLOGI_ISSUE); + } + lpfc_issue_els_plogi(vport, did, cmdiocb->retry); + return 1; + case ELS_CMD_ADISC: + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); + return 1; + case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); + return 1; + case ELS_CMD_LOGO: + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); + lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); + return 1; + } + } + /* No retry ELS command to remote NPORT */ + if (logerr) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0137 No retry ELS command x%x to remote " + "NPORT x%x: Out of Resources: Error:x%x/%x\n", + cmd, did, ulp_status, + ulp_word4); + } + else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0108 No retry ELS command x%x to remote " + "NPORT x%x Retried:%d Error:x%x/%x\n", + cmd, did, cmdiocb->retry, ulp_status, + ulp_word4); + } + return 0; +} + +/** + * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb + * @phba: pointer to lpfc hba data structure. + * @buf_ptr1: pointer to the lpfc DMA buffer data structure. + * + * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) + * associated with a command IOCB back to the lpfc DMA buffer pool. It first + * checks to see whether there is a lpfc DMA buffer associated with the + * response of the command IOCB. If so, it will be released before releasing + * the lpfc DMA buffer associated with the IOCB itself. + * + * Return code + * 0 - Successfully released lpfc DMA buffer (currently, always return 0) + **/ +static int +lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) +{ + struct lpfc_dmabuf *buf_ptr; + + /* Free the response before processing the command. */ + if (!list_empty(&buf_ptr1->list)) { + list_remove_head(&buf_ptr1->list, buf_ptr, + struct lpfc_dmabuf, + list); + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + } + lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); + kfree(buf_ptr1); + return 0; +} + +/** + * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl + * @phba: pointer to lpfc hba data structure. + * @buf_ptr: pointer to the lpfc dma buffer data structure. + * + * This routine releases the lpfc Direct Memory Access (DMA) buffer + * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer + * pool. + * + * Return code + * 0 - Successfully released lpfc DMA buffer (currently, always return 0) + **/ +static int +lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) +{ + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + return 0; +} + +/** + * lpfc_els_free_iocb - Free a command iocb and its associated resources + * @phba: pointer to lpfc hba data structure. + * @elsiocb: pointer to lpfc els command iocb data structure. + * + * This routine frees a command IOCB and its associated resources. The + * command IOCB data structure contains the reference to various associated + * resources, these fields must be set to NULL if the associated reference + * not present: + * cmd_dmabuf - reference to cmd. + * cmd_dmabuf->next - reference to rsp + * rsp_dmabuf - unused + * bpl_dmabuf - reference to bpl + * + * It first properly decrements the reference count held on ndlp for the + * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not + * set, it invokes the lpfc_els_free_data() routine to release the Direct + * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it + * adds the DMA buffer the @phba data structure for the delayed release. + * If reference to the Buffer Pointer List (BPL) is present, the + * lpfc_els_free_bpl() routine is invoked to release the DMA memory + * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is + * invoked to release the IOCB data structure back to @phba IOCBQ list. + * + * Return code + * 0 - Success (currently, always return 0) + **/ +int +lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) +{ + struct lpfc_dmabuf *buf_ptr, *buf_ptr1; + + /* The I/O iocb is complete. Clear the node and first dmbuf */ + elsiocb->ndlp = NULL; + + /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ + if (elsiocb->cmd_dmabuf) { + if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { + /* Firmware could still be in progress of DMAing + * payload, so don't free data buffer till after + * a hbeat. + */ + elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; + buf_ptr = elsiocb->cmd_dmabuf; + elsiocb->cmd_dmabuf = NULL; + if (buf_ptr) { + buf_ptr1 = NULL; + spin_lock_irq(&phba->hbalock); + if (!list_empty(&buf_ptr->list)) { + list_remove_head(&buf_ptr->list, + buf_ptr1, struct lpfc_dmabuf, + list); + INIT_LIST_HEAD(&buf_ptr1->list); + list_add_tail(&buf_ptr1->list, + &phba->elsbuf); + phba->elsbuf_cnt++; + } + INIT_LIST_HEAD(&buf_ptr->list); + list_add_tail(&buf_ptr->list, &phba->elsbuf); + phba->elsbuf_cnt++; + spin_unlock_irq(&phba->hbalock); + } + } else { + buf_ptr1 = elsiocb->cmd_dmabuf; + lpfc_els_free_data(phba, buf_ptr1); + elsiocb->cmd_dmabuf = NULL; + } + } + + if (elsiocb->bpl_dmabuf) { + buf_ptr = elsiocb->bpl_dmabuf; + lpfc_els_free_bpl(phba, buf_ptr); + elsiocb->bpl_dmabuf = NULL; + } + lpfc_sli_release_iocbq(phba, elsiocb); + return 0; +} + +/** + * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function to the Logout (LOGO) + * Accept (ACC) Response ELS command. This routine is invoked to indicate + * the completion of the LOGO process. If the node has transitioned to NPR, + * this routine unregisters the RPI if it is still registered. The + * lpfc_els_free_iocb() is invoked to release the IOCB data structure. + **/ +static void +lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + struct lpfc_vport *vport = cmdiocb->vport; + u32 ulp_status, ulp_word4; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "ACC LOGO cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, ndlp->nlp_DID); + /* ACC to LOGO completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0109 ACC to LOGO completes to NPort x%x refcnt %d " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, + ndlp->nlp_state, ndlp->nlp_rpi); + + /* This clause allows the LOGO ACC to complete and free resources + * for the Fabric Domain Controller. It does deliberately skip + * the unreg_rpi and release rpi because some fabrics send RDP + * requests after logging out from the initiator. + */ + if (ndlp->nlp_type & NLP_FABRIC && + ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) + goto out; + + if (ndlp->nlp_state == NLP_STE_NPR_NODE) { + /* If PLOGI is being retried, PLOGI completion will cleanup the + * node. The NLP_NPR_2B_DISC flag needs to be retained to make + * progress on nodes discovered from last RSCN. + */ + if ((ndlp->nlp_flag & NLP_DELAY_TMO) && + (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) + goto out; + + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) + lpfc_unreg_rpi(vport, ndlp); + + } + out: + /* + * The driver received a LOGO from the rport and has ACK'd it. + * At this point, the driver is done so release the IOCB + */ + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +/** + * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * This routine is the completion callback function for unregister default + * RPI (Remote Port Index) mailbox command to the @phba. It simply releases + * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and + * decrements the ndlp reference count held for this completion callback + * function. After that, it invokes the lpfc_drop_node to check + * whether it is appropriate to release the node. + **/ +void +lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; + u32 mbx_flag = pmb->mbox_flag; + u32 mbx_cmd = pmb->u.mb.mbxCommand; + + if (ndlp) { + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "0006 rpi x%x DID:%x flg:%x %d x%px " + "mbx_cmd x%x mbx_flag x%x x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref), ndlp, mbx_cmd, + mbx_flag, pmb); + + /* This ends the default/temporary RPI cleanup logic for this + * ndlp and the node and rpi needs to be released. Free the rpi + * first on an UNREG_LOGIN and then release the final + * references. + */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + if (mbx_cmd == MBX_UNREG_LOGIN) + ndlp->nlp_flag &= ~NLP_UNREG_INP; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); + lpfc_drop_node(ndlp->vport, ndlp); + } + + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); +} + +/** + * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function for ELS Response IOCB + * command. In normal case, this callback function just properly sets the + * nlp_flag bitmap in the ndlp data structure, if the mbox command reference + * field in the command IOCB is not NULL, the referred mailbox command will + * be send out, and then invokes the lpfc_els_free_iocb() routine to release + * the IOCB. + **/ +static void +lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; + struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; + IOCB_t *irsp; + LPFC_MBOXQ_t *mbox = NULL; + u32 ulp_status, ulp_word4, tmo, did, iotag; + + if (!vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3177 ELS response failed\n"); + goto out; + } + if (cmdiocb->context_un.mbox) + mbox = cmdiocb->context_un.mbox; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + did = get_job_els_rsp64_did(phba, cmdiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + tmo = get_wqe_tmo(cmdiocb); + iotag = get_wqe_reqtag(cmdiocb); + } else { + irsp = &rspiocb->iocb; + tmo = irsp->ulpTimeout; + iotag = irsp->ulpIoTag; + } + + /* Check to see if link went down during discovery */ + if (!ndlp || lpfc_els_chk_latt(vport)) { + if (mbox) + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + goto out; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "ELS rsp cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + /* ELS response tag completes */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0110 ELS response tag x%x completes " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", + iotag, ulp_status, ulp_word4, tmo, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); + if (mbox) { + if (ulp_status == 0 + && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { + if (!lpfc_unreg_rpi(vport, ndlp) && + (!(vport->fc_flag & FC_PT2PT))) { + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + ndlp->nlp_state == + NLP_STE_REG_LOGIN_ISSUE) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "0314 PLOGI recov " + "DID x%x " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, + ndlp->nlp_state, + ndlp->nlp_rpi, + ndlp->nlp_flag); + goto out_free_mbox; + } + } + + /* Increment reference count to ndlp to hold the + * reference to ndlp for the callback function. + */ + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!mbox->ctx_ndlp) + goto out_free_mbox; + + mbox->vport = vport; + if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { + mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; + mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; + } + else { + mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_REG_LOGIN_ISSUE); + } + + ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) + != MBX_NOT_FINISHED) + goto out; + + /* Decrement the ndlp reference count we + * set for this failed mailbox command. + */ + lpfc_nlp_put(ndlp); + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + + /* ELS rsp: Cannot issue reg_login for */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0138 ELS rsp: Cannot issue reg_login for x%x " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + } +out_free_mbox: + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + } +out: + if (ndlp && shost) { + spin_lock_irq(&ndlp->lock); + if (mbox) + ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; + spin_unlock_irq(&ndlp->lock); + } + + /* An SLI4 NPIV instance wants to drop the node at this point under + * these conditions and release the RPI. + */ + if (phba->sli_rev == LPFC_SLI_REV4 && + vport && vport->port_type == LPFC_NPIV_PORT && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { + lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + spin_unlock_irq(&ndlp->lock); + } + lpfc_drop_node(vport, ndlp); + } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + /* Drop ndlp if there is no planned or outstanding + * issued PRLI. + * + * In cases when the ndlp is acting as both an initiator + * and target function, let our issued PRLI determine + * the final ndlp kref drop. + */ + lpfc_drop_node(vport, ndlp); + } + } + + /* Release the originating I/O reference. */ + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + return; +} + +/** + * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command + * @vport: pointer to a host virtual N_Port data structure. + * @flag: the els command code to be accepted. + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * @mbox: pointer to the driver internal queue element for mailbox command. + * + * This routine prepares and issues an Accept (ACC) response IOCB + * command. It uses the @flag to properly set up the IOCB field for the + * specific ACC response command to be issued and invokes the + * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a + * @mbox pointer is passed in, it will be put into the context_un.mbox + * field of the IOCB for the completion callback function to issue the + * mailbox command to the HBA later when callback is invoked. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the corresponding + * response ELS IOCB command. + * + * Return code + * 0 - Successfully issued acc response + * 1 - Failed to issue acc response + **/ +int +lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, + LPFC_MBOXQ_t *mbox) +{ + struct lpfc_hba *phba = vport->phba; + IOCB_t *icmd; + IOCB_t *oldcmd; + union lpfc_wqe128 *wqe; + union lpfc_wqe128 *oldwqe = &oldiocb->wqe; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + struct serv_parm *sp; + uint16_t cmdsize; + int rc; + ELS_PKT *els_pkt_ptr; + struct fc_els_rdf_resp *rdf_resp; + + switch (flag) { + case ELS_CMD_ACC: + cmdsize = sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_LOGO_ACC; + spin_unlock_irq(&ndlp->lock); + return 1; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* XRI / rx_id */ + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_ctxt_tag, + &oldwqe->xmit_els_rsp.wqe_com)); + + /* oxid */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_rcvoxid, + &oldwqe->xmit_els_rsp.wqe_com)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + pcmd = elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + break; + case ELS_CMD_FLOGI: + case ELS_CMD_PLOGI: + cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* XRI / rx_id */ + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_ctxt_tag, + &oldwqe->xmit_els_rsp.wqe_com)); + + /* oxid */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_rcvoxid, + &oldwqe->xmit_els_rsp.wqe_com)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; + + if (mbox) + elsiocb->context_un.mbox = mbox; + + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); + sp = (struct serv_parm *)pcmd; + + if (flag == ELS_CMD_FLOGI) { + /* Copy the received service parameters back */ + memcpy(sp, &phba->fc_fabparam, + sizeof(struct serv_parm)); + + /* Clear the F_Port bit */ + sp->cmn.fPort = 0; + + /* Mark all class service parameters as invalid */ + sp->cls1.classValid = 0; + sp->cls2.classValid = 0; + sp->cls3.classValid = 0; + sp->cls4.classValid = 0; + + /* Copy our worldwide names */ + memcpy(&sp->portName, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + } else { + memcpy(pcmd, &vport->fc_sparam, + sizeof(struct serv_parm)); + + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->un.vendorVersion, 0, + sizeof(sp->un.vendorVersion)); + sp->cmn.bbRcvSizeMsb &= 0xF; + + /* If our firmware supports this feature, convey that + * info to the target using the vendor specific field. + */ + if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { + sp->cmn.valid_vendor_ver_level = 1; + sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); + sp->un.vv.flags = + cpu_to_be32(LPFC_VV_SUPPRESS_RSP); + } + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + break; + case ELS_CMD_PRLO: + cmdsize = sizeof(uint32_t) + sizeof(PRLO); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* XRI / rx_id */ + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_ctxt_tag, + &oldwqe->xmit_els_rsp.wqe_com)); + + /* oxid */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_rcvoxid, + &oldwqe->xmit_els_rsp.wqe_com)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; + + memcpy(pcmd, oldiocb->cmd_dmabuf->virt, + sizeof(uint32_t) + sizeof(PRLO)); + *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; + els_pkt_ptr = (ELS_PKT *) pcmd; + els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC PRLO: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + break; + case ELS_CMD_RDF: + cmdsize = sizeof(*rdf_resp); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* XRI / rx_id */ + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_ctxt_tag, + &oldwqe->xmit_els_rsp.wqe_com)); + + /* oxid */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + bf_get(wqe_rcvoxid, + &oldwqe->xmit_els_rsp.wqe_com)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; + rdf_resp = (struct fc_els_rdf_resp *)pcmd; + memset(rdf_resp, 0, sizeof(*rdf_resp)); + rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; + + /* FC-LS-5 specifies desc_list_len shall be set to 12 */ + rdf_resp->desc_list_len = cpu_to_be32(12); + + /* FC-LS-5 specifies LS REQ Information descriptor */ + rdf_resp->lsri.desc_tag = cpu_to_be32(1); + rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); + rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; + break; + default: + return 1; + } + if (ndlp->nlp_flag & NLP_LOGO_ACC) { + spin_lock_irq(&ndlp->lock); + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || + ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) + ndlp->nlp_flag &= ~NLP_LOGO_ACC; + spin_unlock_irq(&ndlp->lock); + elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; + } else { + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + } + + phba->fc_stat.elsXmitACC++; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + /* Xmit ELS ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " + "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "RPI: x%x, fc_flag x%x refcnt %d\n", + rc, elsiocb->iotag, elsiocb->sli4_xritag, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); + return 0; +} + +/** + * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command + * @vport: pointer to a virtual N_Port data structure. + * @rejectError: reject response to issue + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * @mbox: pointer to the driver internal queue element for mailbox command. + * + * This routine prepares and issue an Reject (RJT) response IOCB + * command. If a @mbox pointer is passed in, it will be put into the + * context_un.mbox field of the IOCB for the completion callback function + * to issue to the HBA later. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the reject response + * ELS IOCB command. + * + * Return code + * 0 - Successfully issued reject response + * 1 - Failed to issue reject response + **/ +int +lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, + LPFC_MBOXQ_t *mbox) +{ + int rc; + struct lpfc_hba *phba = vport->phba; + IOCB_t *icmd; + IOCB_t *oldcmd; + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + + cmdsize = 2 * sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_LS_RJT); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, oldiocb)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; + } + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; + pcmd += sizeof(uint32_t); + *((uint32_t *) (pcmd)) = rejectError; + + if (mbox) + elsiocb->context_un.mbox = mbox; + + /* Xmit ELS RJT response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0129 Xmit ELS RJT x%x response tag x%x " + "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "rpi x%x\n", + rejectError, elsiocb->iotag, + get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue LS_RJT: did:x%x flg:x%x err:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, rejectError); + + phba->fc_stat.elsXmitLSRJT++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + /* The NPIV instance is rejecting this unsolicited ELS. Make sure the + * node's assigned RPI gets released provided this node is not already + * registered with the transport. + */ + if (phba->sli_rev == LPFC_SLI_REV4 && + vport->port_type == LPFC_NPIV_PORT && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_RELEASE_RPI; + spin_unlock_irq(&ndlp->lock); + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + + /** + * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: NPort to where rsp is directed + * + * This routine issues an EDC ACC RSP to the F-Port Controller to communicate + * this N_Port's support of hardware signals in its Congestion + * Capabilities Descriptor. + * + * Return code + * 0 - Successfully issued edc rsp command + * 1 - Failed to issue edc rsp command + **/ +static int +lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct fc_els_edc_resp *edc_rsp; + struct fc_tlv_desc *tlv; + struct lpfc_iocbq *elsiocb; + IOCB_t *icmd, *cmd; + union lpfc_wqe128 *wqe; + u32 cgn_desc_size, lft_desc_size; + u16 cmdsize; + uint8_t *pcmd; + int rc; + + cmdsize = sizeof(struct fc_els_edc_resp); + cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); + lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? + sizeof(struct fc_diag_lnkflt_desc) : 0; + cmdsize += cgn_desc_size + lft_desc_size; + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, cmdiocb)); + } else { + icmd = &elsiocb->iocb; + cmd = &cmdiocb->iocb; + icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; + } + + pcmd = elsiocb->cmd_dmabuf->virt; + memset(pcmd, 0, cmdsize); + + edc_rsp = (struct fc_els_edc_resp *)pcmd; + edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; + edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + + cgn_desc_size + lft_desc_size); + edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); + edc_rsp->lsri.desc_len = cpu_to_be32( + FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); + edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; + tlv = edc_rsp->desc; + lpfc_format_edc_cgn_desc(phba, tlv); + tlv = fc_tlv_next_desc(tlv); + if (lft_desc_size) + lpfc_format_edc_lft_desc(phba, tlv); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue EDC ACC: did:x%x flg:x%x refcnt %d", + ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref)); + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + + phba->fc_stat.elsXmitACC++; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + /* Xmit ELS ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " + "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "RPI: x%x, fc_flag x%x\n", + rc, elsiocb->iotag, elsiocb->sli4_xritag, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, vport->fc_flag); + + return 0; +} + +/** + * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd + * @vport: pointer to a virtual N_Port data structure. + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine prepares and issues an Accept (ACC) response to Address + * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB + * and invokes the lpfc_sli_issue_iocb() routine to send out the command. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the ADISC Accept response + * ELS IOCB command. + * + * Return code + * 0 - Successfully issued acc adisc response + * 1 - Failed to issue adisc acc response + **/ +int +lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + ADISC *ap; + IOCB_t *icmd, *oldcmd; + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + int rc; + u32 ulp_context; + + cmdsize = sizeof(uint32_t) + sizeof(ADISC); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* XRI / rx_id */ + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, oldiocb)); + ulp_context = get_job_ulpcontext(phba, elsiocb); + /* oxid */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, oldiocb)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + ulp_context = elsiocb->iocb.ulpContext; + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + /* Xmit ADISC ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0130 Xmit ADISC ACC response iotag x%x xri: " + "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", + elsiocb->iotag, ulp_context, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); + + ap = (ADISC *) (pcmd); + ap->hardAL_PA = phba->fc_pref_ALPA; + memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); + ap->DID = be32_to_cpu(vport->fc_myDID); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", + ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); + + phba->fc_stat.elsXmitACC++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd + * @vport: pointer to a virtual N_Port data structure. + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine prepares and issues an Accept (ACC) response to Process + * Login (PRLI) ELS command. It simply prepares the payload of the IOCB + * and invokes the lpfc_sli_issue_iocb() routine to send out the command. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the PRLI Accept response + * ELS IOCB command. + * + * Return code + * 0 - Successfully issued acc prli response + * 1 - Failed to issue acc prli response + **/ +int +lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + PRLI *npr; + struct lpfc_nvme_prli *npr_nvme; + lpfc_vpd_t *vpd; + IOCB_t *icmd; + IOCB_t *oldcmd; + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + uint32_t prli_fc4_req, *req_payload; + struct lpfc_dmabuf *req_buf; + int rc; + u32 elsrspcmd, ulp_context; + + /* Need the incoming PRLI payload to determine if the ACC is for an + * FC4 or NVME PRLI type. The PRLI type is at word 1. + */ + req_buf = oldiocb->cmd_dmabuf; + req_payload = (((uint32_t *)req_buf->virt) + 1); + + /* PRLI type payload is at byte 3 for FCP or NVME. */ + prli_fc4_req = be32_to_cpu(*req_payload); + prli_fc4_req = (prli_fc4_req >> 24) & 0xff; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", + prli_fc4_req, *((uint32_t *)req_payload)); + + if (prli_fc4_req == PRLI_FCP_TYPE) { + cmdsize = sizeof(uint32_t) + sizeof(PRLI); + elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); + } else if (prli_fc4_req == PRLI_NVME_TYPE) { + cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); + elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); + } else { + return 1; + } + + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, elsrspcmd); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ + ulp_context = get_job_ulpcontext(phba, elsiocb); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, oldiocb)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + ulp_context = elsiocb->iocb.ulpContext; + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + /* Xmit PRLI ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0131 Xmit PRLI ACC response tag x%x xri x%x, " + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + elsiocb->iotag, ulp_context, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + memset(pcmd, 0, cmdsize); + + *((uint32_t *)(pcmd)) = elsrspcmd; + pcmd += sizeof(uint32_t); + + /* For PRLI, remainder of payload is PRLI parameter page */ + vpd = &phba->vpd; + + if (prli_fc4_req == PRLI_FCP_TYPE) { + /* + * If the remote port is a target and our firmware version + * is 3.20 or later, set the following bits for FC-TAPE + * support. + */ + npr = (PRLI *) pcmd; + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + (vpd->rev.feaLevelHigh >= 0x02)) { + npr->ConfmComplAllowed = 1; + npr->Retry = 1; + npr->TaskRetryIdReq = 1; + } + npr->acceptRspCode = PRLI_REQ_EXECUTED; + + /* Set image pair for complementary pairs only. */ + if (ndlp->nlp_type & NLP_FCP_TARGET) + npr->estabImagePair = 1; + else + npr->estabImagePair = 0; + npr->readXferRdyDis = 1; + npr->ConfmComplAllowed = 1; + npr->prliType = PRLI_FCP_TYPE; + npr->initiatorFunc = 1; + + /* Xmit PRLI ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "6014 FCP issue PRLI ACC imgpair %d " + "retry %d task %d\n", + npr->estabImagePair, + npr->Retry, npr->TaskRetryIdReq); + + } else if (prli_fc4_req == PRLI_NVME_TYPE) { + /* Respond with an NVME PRLI Type */ + npr_nvme = (struct lpfc_nvme_prli *) pcmd; + bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); + bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ + bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); + if (phba->nvmet_support) { + bf_set(prli_tgt, npr_nvme, 1); + bf_set(prli_disc, npr_nvme, 1); + if (phba->cfg_nvme_enable_fb) { + bf_set(prli_fba, npr_nvme, 1); + + /* TBD. Target mode needs to post buffers + * that support the configured first burst + * byte size. + */ + bf_set(prli_fb_sz, npr_nvme, + phba->cfg_nvmet_fb_size); + } + } else { + bf_set(prli_init, npr_nvme, 1); + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6015 NVME issue PRLI ACC word1 x%08x " + "word4 x%08x word5 x%08x flag x%x, " + "fcp_info x%x nlp_type x%x\n", + npr_nvme->word1, npr_nvme->word4, + npr_nvme->word5, ndlp->nlp_flag, + ndlp->nlp_fcp_info, ndlp->nlp_type); + npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); + npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); + npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); + } else + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", + prli_fc4_req, ndlp->nlp_fc4_type, + ndlp->nlp_DID); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC PRLI: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); + + phba->fc_stat.elsXmitACC++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command + * @vport: pointer to a virtual N_Port data structure. + * @format: rnid command format. + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine issues a Request Node Identification Data (RNID) Accept + * (ACC) response. It constructs the RNID ACC response command according to + * the proper @format and then calls the lpfc_sli_issue_iocb() routine to + * issue the response. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function. + * + * Return code + * 0 - Successfully issued acc rnid response + * 1 - Failed to issue acc rnid response + **/ +static int +lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + RNID *rn; + IOCB_t *icmd, *oldcmd; + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + int rc; + u32 ulp_context; + + cmdsize = sizeof(uint32_t) + sizeof(uint32_t) + + (2 * sizeof(struct lpfc_name)); + if (format) + cmdsize += sizeof(RNID_TOP_DISC); + + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ + ulp_context = get_job_ulpcontext(phba, elsiocb); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, oldiocb)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + ulp_context = elsiocb->iocb.ulpContext; + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + /* Xmit RNID ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0132 Xmit RNID ACC response tag x%x xri x%x\n", + elsiocb->iotag, ulp_context); + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); + + memset(pcmd, 0, sizeof(RNID)); + rn = (RNID *) (pcmd); + rn->Format = format; + rn->CommonLen = (2 * sizeof(struct lpfc_name)); + memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); + switch (format) { + case 0: + rn->SpecificLen = 0; + break; + case RNID_TOPOLOGY_DISC: + rn->SpecificLen = sizeof(RNID_TOP_DISC); + memcpy(&rn->un.topologyDisc.portName, + &vport->fc_portname, sizeof(struct lpfc_name)); + rn->un.topologyDisc.unitType = RNID_HBA; + rn->un.topologyDisc.physPort = 0; + rn->un.topologyDisc.attachedNodes = 0; + break; + default: + rn->CommonLen = 0; + rn->SpecificLen = 0; + break; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC RNID: did:x%x flg:x%x refcnt %d", + ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); + + phba->fc_stat.elsXmitACC++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_els_clear_rrq - Clear the rq that this rrq describes. + * @vport: pointer to a virtual N_Port data structure. + * @iocb: pointer to the lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return + **/ +static void +lpfc_els_clear_rrq(struct lpfc_vport *vport, + struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + uint8_t *pcmd; + struct RRQ *rrq; + uint16_t rxid; + uint16_t xri; + struct lpfc_node_rrq *prrq; + + + pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; + pcmd += sizeof(uint32_t); + rrq = (struct RRQ *)pcmd; + rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); + rxid = bf_get(rrq_rxid, rrq); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" + " x%x x%x\n", + be32_to_cpu(bf_get(rrq_did, rrq)), + bf_get(rrq_oxid, rrq), + rxid, + get_wqe_reqtag(iocb), + get_job_ulpcontext(phba, iocb)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", + ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); + if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) + xri = bf_get(rrq_oxid, rrq); + else + xri = rxid; + prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); + if (prrq) + lpfc_clr_rrq_active(phba, xri, prrq); + return; +} + +/** + * lpfc_els_rsp_echo_acc - Issue echo acc response + * @vport: pointer to a virtual N_Port data structure. + * @data: pointer to echo data to return in the accept. + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully issued acc echo response + * 1 - Failed to issue acc echo response + **/ +static int +lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + IOCB_t *icmd, *oldcmd; + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + int rc; + u32 ulp_context; + + if (phba->sli_rev == LPFC_SLI_REV4) + cmdsize = oldiocb->wcqe_cmpl.total_data_placed; + else + cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; + + /* The accumulated length can exceed the BPL_SIZE. For + * now, use this as the limit + */ + if (cmdsize > LPFC_BPL_SIZE) + cmdsize = LPFC_BPL_SIZE; + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ + ulp_context = get_job_ulpcontext(phba, elsiocb); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, oldiocb)); + } else { + icmd = &elsiocb->iocb; + oldcmd = &oldiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + ulp_context = elsiocb->iocb.ulpContext; + icmd->unsli3.rcvsli3.ox_id = + oldcmd->unsli3.rcvsli3.ox_id; + } + + /* Xmit ECHO ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2876 Xmit ECHO ACC response tag x%x xri x%x\n", + elsiocb->iotag, ulp_context); + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); + memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", + ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); + + phba->fc_stat.elsXmitACC++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues Address Discover (ADISC) ELS commands to those + * N_Ports which are in node port recovery state and ADISC has not been issued + * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the + * lpfc_issue_els_adisc() routine, the per @vport number of discover count + * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a + * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will + * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC + * IOCBs quit for later pick up. On the other hand, after walking through + * all the ndlps with the @vport and there is none ADISC IOCB issued, the + * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is + * no more ADISC need to be sent. + * + * Return code + * The number of N_Ports with adisc issued. + **/ +int +lpfc_els_disc_adisc(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp, *next_ndlp; + int sentadisc = 0; + + /* go thru NPR nodes and issue any remaining ELS ADISCs */ + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + + if (ndlp->nlp_state != NLP_STE_NPR_NODE || + !(ndlp->nlp_flag & NLP_NPR_ADISC)) + continue; + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + + if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + /* This node was marked for ADISC but was not picked + * for discovery. This is possible if the node was + * missing in gidft response. + * + * At time of marking node for ADISC, we skipped unreg + * from backend + */ + lpfc_nlp_unreg_node(vport, ndlp); + lpfc_unreg_rpi(vport, ndlp); + continue; + } + + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, 0); + sentadisc++; + vport->num_disc_nodes++; + if (vport->num_disc_nodes >= + vport->cfg_discovery_threads) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); + break; + } + + } + if (sentadisc == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); + } + return sentadisc; +} + +/** + * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports + * which are in node port recovery state, with a @vport. Each time an ELS + * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, + * the per @vport number of discover count (num_disc_nodes) shall be + * incremented. If the num_disc_nodes reaches a pre-configured threshold + * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE + * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for + * later pick up. On the other hand, after walking through all the ndlps with + * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag + * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC + * PLOGI need to be sent. + * + * Return code + * The number of N_Ports with plogi issued. + **/ +int +lpfc_els_disc_plogi(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp, *next_ndlp; + int sentplogi = 0; + + /* go thru NPR nodes and issue any remaining ELS PLOGIs */ + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_state == NLP_STE_NPR_NODE && + (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && + (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && + (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + sentplogi++; + vport->num_disc_nodes++; + if (vport->num_disc_nodes >= + vport->cfg_discovery_threads) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); + break; + } + } + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6452 Discover PLOGI %d flag x%x\n", + sentplogi, vport->fc_flag); + + if (sentplogi) { + lpfc_set_disctmo(vport); + } + else { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); + } + return sentplogi; +} + +static uint32_t +lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, + uint32_t word0) +{ + + desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); + desc->payload.els_req = word0; + desc->length = cpu_to_be32(sizeof(desc->payload)); + + return sizeof(struct fc_rdp_link_service_desc); +} + +static uint32_t +lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, + uint8_t *page_a0, uint8_t *page_a2) +{ + uint16_t wavelength; + uint16_t temperature; + uint16_t rx_power; + uint16_t tx_bias; + uint16_t tx_power; + uint16_t vcc; + uint16_t flag = 0; + struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; + struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; + + desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); + + trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) + &page_a0[SSF_TRANSCEIVER_CODE_B4]; + trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) + &page_a0[SSF_TRANSCEIVER_CODE_B5]; + + if ((trasn_code_byte4->fc_sw_laser) || + (trasn_code_byte5->fc_sw_laser_sl) || + (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ + flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); + } else if (trasn_code_byte4->fc_lw_laser) { + wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | + page_a0[SSF_WAVELENGTH_B0]; + if (wavelength == SFP_WAVELENGTH_LC1310) + flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; + if (wavelength == SFP_WAVELENGTH_LL1550) + flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; + } + /* check if its SFP+ */ + flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? + SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) + << SFP_FLAG_CT_SHIFT; + + /* check if its OPTICAL */ + flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? + SFP_FLAG_IS_OPTICAL_PORT : 0) + << SFP_FLAG_IS_OPTICAL_SHIFT; + + temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | + page_a2[SFF_TEMPERATURE_B0]); + vcc = (page_a2[SFF_VCC_B1] << 8 | + page_a2[SFF_VCC_B0]); + tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | + page_a2[SFF_TXPOWER_B0]); + tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | + page_a2[SFF_TX_BIAS_CURRENT_B0]); + rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | + page_a2[SFF_RXPOWER_B0]); + desc->sfp_info.temperature = cpu_to_be16(temperature); + desc->sfp_info.rx_power = cpu_to_be16(rx_power); + desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); + desc->sfp_info.tx_power = cpu_to_be16(tx_power); + desc->sfp_info.vcc = cpu_to_be16(vcc); + + desc->sfp_info.flags = cpu_to_be16(flag); + desc->length = cpu_to_be32(sizeof(desc->sfp_info)); + + return sizeof(struct fc_rdp_sfp_desc); +} + +static uint32_t +lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, + READ_LNK_VAR *stat) +{ + uint32_t type; + + desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); + + type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; + + desc->info.port_type = cpu_to_be32(type); + + desc->info.link_status.link_failure_cnt = + cpu_to_be32(stat->linkFailureCnt); + desc->info.link_status.loss_of_synch_cnt = + cpu_to_be32(stat->lossSyncCnt); + desc->info.link_status.loss_of_signal_cnt = + cpu_to_be32(stat->lossSignalCnt); + desc->info.link_status.primitive_seq_proto_err = + cpu_to_be32(stat->primSeqErrCnt); + desc->info.link_status.invalid_trans_word = + cpu_to_be32(stat->invalidXmitWord); + desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); + + desc->length = cpu_to_be32(sizeof(desc->info)); + + return sizeof(struct fc_rdp_link_error_status_desc); +} + +static uint32_t +lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, + struct lpfc_vport *vport) +{ + uint32_t bbCredit; + + desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); + + bbCredit = vport->fc_sparam.cmn.bbCreditLsb | + (vport->fc_sparam.cmn.bbCreditMsb << 8); + desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); + if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { + bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | + (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); + desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); + } else { + desc->bbc_info.attached_port_bbc = 0; + } + + desc->bbc_info.rtt = 0; + desc->length = cpu_to_be32(sizeof(desc->bbc_info)); + + return sizeof(struct fc_rdp_bbc_desc); +} + +static uint32_t +lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) +{ + uint32_t flags = 0; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) + flags |= RDP_OET_LOW_WARNING; + + flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); +} + +static uint32_t +lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags = 0; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) + flags |= RDP_OET_LOW_WARNING; + + flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); +} + +static uint32_t +lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags = 0; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) + flags |= RDP_OET_LOW_WARNING; + + flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); +} + +static uint32_t +lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags = 0; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) + flags |= RDP_OET_LOW_WARNING; + + flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); +} + + +static uint32_t +lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags = 0; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) + flags |= RDP_OET_LOW_WARNING; + + flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); +} + +static uint32_t +lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, + uint8_t *page_a0, struct lpfc_vport *vport) +{ + desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); + memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); + memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); + memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); + memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); + memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); + desc->length = cpu_to_be32(sizeof(desc->opd_info)); + return sizeof(struct fc_rdp_opd_sfp_desc); +} + +static uint32_t +lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) +{ + if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) + return 0; + desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); + + desc->info.CorrectedBlocks = + cpu_to_be32(stat->fecCorrBlkCount); + desc->info.UncorrectableBlocks = + cpu_to_be32(stat->fecUncorrBlkCount); + + desc->length = cpu_to_be32(sizeof(desc->info)); + + return sizeof(struct fc_fec_rdp_desc); +} + +static uint32_t +lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) +{ + uint16_t rdp_cap = 0; + uint16_t rdp_speed; + + desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); + + switch (phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: + rdp_speed = RDP_PS_1GB; + break; + case LPFC_LINK_SPEED_2GHZ: + rdp_speed = RDP_PS_2GB; + break; + case LPFC_LINK_SPEED_4GHZ: + rdp_speed = RDP_PS_4GB; + break; + case LPFC_LINK_SPEED_8GHZ: + rdp_speed = RDP_PS_8GB; + break; + case LPFC_LINK_SPEED_10GHZ: + rdp_speed = RDP_PS_10GB; + break; + case LPFC_LINK_SPEED_16GHZ: + rdp_speed = RDP_PS_16GB; + break; + case LPFC_LINK_SPEED_32GHZ: + rdp_speed = RDP_PS_32GB; + break; + case LPFC_LINK_SPEED_64GHZ: + rdp_speed = RDP_PS_64GB; + break; + case LPFC_LINK_SPEED_128GHZ: + rdp_speed = RDP_PS_128GB; + break; + case LPFC_LINK_SPEED_256GHZ: + rdp_speed = RDP_PS_256GB; + break; + default: + rdp_speed = RDP_PS_UNKNOWN; + break; + } + + desc->info.port_speed.speed = cpu_to_be16(rdp_speed); + + if (phba->lmt & LMT_256Gb) + rdp_cap |= RDP_PS_256GB; + if (phba->lmt & LMT_128Gb) + rdp_cap |= RDP_PS_128GB; + if (phba->lmt & LMT_64Gb) + rdp_cap |= RDP_PS_64GB; + if (phba->lmt & LMT_32Gb) + rdp_cap |= RDP_PS_32GB; + if (phba->lmt & LMT_16Gb) + rdp_cap |= RDP_PS_16GB; + if (phba->lmt & LMT_10Gb) + rdp_cap |= RDP_PS_10GB; + if (phba->lmt & LMT_8Gb) + rdp_cap |= RDP_PS_8GB; + if (phba->lmt & LMT_4Gb) + rdp_cap |= RDP_PS_4GB; + if (phba->lmt & LMT_2Gb) + rdp_cap |= RDP_PS_2GB; + if (phba->lmt & LMT_1Gb) + rdp_cap |= RDP_PS_1GB; + + if (rdp_cap == 0) + rdp_cap = RDP_CAP_UNKNOWN; + if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) + rdp_cap |= RDP_CAP_USER_CONFIGURED; + + desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); + desc->length = cpu_to_be32(sizeof(desc->info)); + return sizeof(struct fc_rdp_port_speed_desc); +} + +static uint32_t +lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, + struct lpfc_vport *vport) +{ + + desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); + + memcpy(desc->port_names.wwnn, &vport->fc_nodename, + sizeof(desc->port_names.wwnn)); + + memcpy(desc->port_names.wwpn, &vport->fc_portname, + sizeof(desc->port_names.wwpn)); + + desc->length = cpu_to_be32(sizeof(desc->port_names)); + return sizeof(struct fc_rdp_port_name_desc); +} + +static uint32_t +lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, + struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + + desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); + if (vport->fc_flag & FC_FABRIC) { + memcpy(desc->port_names.wwnn, &vport->fabric_nodename, + sizeof(desc->port_names.wwnn)); + + memcpy(desc->port_names.wwpn, &vport->fabric_portname, + sizeof(desc->port_names.wwpn)); + } else { /* Point to Point */ + memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, + sizeof(desc->port_names.wwnn)); + + memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, + sizeof(desc->port_names.wwpn)); + } + + desc->length = cpu_to_be32(sizeof(desc->port_names)); + return sizeof(struct fc_rdp_port_name_desc); +} + +static void +lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, + int status) +{ + struct lpfc_nodelist *ndlp = rdp_context->ndlp; + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_iocbq *elsiocb; + struct ulp_bde64 *bpl; + IOCB_t *icmd; + union lpfc_wqe128 *wqe; + uint8_t *pcmd; + struct ls_rjt *stat; + struct fc_rdp_res_frame *rdp_res; + uint32_t cmdsize, len; + uint16_t *flag_ptr; + int rc; + u32 ulp_context; + + if (status != SUCCESS) + goto error; + + /* This will change once we know the true size of the RDP payload */ + cmdsize = sizeof(struct fc_rdp_res_frame); + + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, + lpfc_max_els_tries, rdp_context->ndlp, + rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + goto free_rdp_context; + + ulp_context = get_job_ulpcontext(phba, elsiocb); + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* ox-id of the frame */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + rdp_context->ox_id); + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, + rdp_context->rx_id); + } else { + icmd = &elsiocb->iocb; + icmd->ulpContext = rdp_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2171 Xmit RDP response tag x%x xri x%x, " + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", + elsiocb->iotag, ulp_context, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + + /* Update Alarm and Warning */ + flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); + phba->sfp_alarm |= *flag_ptr; + flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); + phba->sfp_warning |= *flag_ptr; + + /* For RDP payload */ + len = 8; + len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) + (len + pcmd), ELS_CMD_RDP); + + len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), + rdp_context->page_a0, rdp_context->page_a2); + len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), + phba); + len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) + (len + pcmd), &rdp_context->link_stat); + len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) + (len + pcmd), vport); + len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) + (len + pcmd), vport, ndlp); + len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), + &rdp_context->link_stat); + len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), + &rdp_context->link_stat, vport); + len += lpfc_rdp_res_oed_temp_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_voltage_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_txbias_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_txpower_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_rxpower_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), + rdp_context->page_a0, vport); + + rdp_res->length = cpu_to_be32(len - 8); + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + + /* Now that we know the true size of the payload, update the BPL */ + bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; + bpl->tus.f.bdeSize = len; + bpl->tus.f.bdeFlags = 0; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + phba->fc_stat.elsXmitACC++; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + goto free_rdp_context; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + } + + goto free_rdp_context; + +error: + cmdsize = 2 * sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, + ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); + if (!elsiocb) + goto free_rdp_context; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* ox-id of the frame */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + rdp_context->ox_id); + bf_set(wqe_ctxt_tag, + &wqe->xmit_els_rsp.wqe_com, + rdp_context->rx_id); + } else { + icmd = &elsiocb->iocb; + icmd->ulpContext = rdp_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; + } + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; + stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); + stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + + phba->fc_stat.elsXmitLSRJT++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + goto free_rdp_context; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + } + +free_rdp_context: + /* This reference put is for the original unsolicited RDP. If the + * prep failed, there is no reference to remove. + */ + lpfc_nlp_put(ndlp); + kfree(rdp_context); +} + +static int +lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) +{ + LPFC_MBOXQ_t *mbox = NULL; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, + "7105 failed to allocate mailbox memory"); + return 1; + } + + if (lpfc_sli4_dump_page_a0(phba, mbox)) + goto rdp_fail; + mbox->vport = rdp_context->ndlp->vport; + mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + return 1; + } + + return 0; + +rdp_fail: + mempool_free(mbox, phba->mbox_mem_pool); + return 1; +} + +int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, + struct lpfc_rdp_context *rdp_context) +{ + LPFC_MBOXQ_t *mbox = NULL; + int rc; + struct lpfc_dmabuf *mp; + struct lpfc_dmabuf *mpsave; + void *virt; + MAILBOX_t *mb; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, + "7205 failed to allocate mailbox memory"); + return 1; + } + + if (lpfc_sli4_dump_page_a0(phba, mbox)) + goto sfp_fail; + mp = mbox->ctx_buf; + mpsave = mp; + virt = mp->virt; + if (phba->sli_rev < LPFC_SLI_REV4) { + mb = &mbox->u.mb; + mb->un.varDmp.cv = 1; + mb->un.varDmp.co = 1; + mb->un.varWords[2] = 0; + mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; + mb->un.varWords[4] = 0; + mb->un.varWords[5] = 0; + mb->un.varWords[6] = 0; + mb->un.varWords[7] = 0; + mb->un.varWords[8] = 0; + mb->un.varWords[9] = 0; + mb->un.varWords[10] = 0; + mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; + mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; + mbox->mbox_offset_word = 5; + mbox->ctx_buf = virt; + } else { + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + } + mbox->vport = phba->pport; + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + + rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + if (rc == MBX_NOT_FINISHED) { + rc = 1; + goto error; + } + + if (phba->sli_rev == LPFC_SLI_REV4) + mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); + else + mp = mpsave; + + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { + rc = 1; + goto error; + } + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, + DMP_SFF_PAGE_A0_SIZE); + + memset(mbox, 0, sizeof(*mbox)); + memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); + INIT_LIST_HEAD(&mp->list); + + /* save address for completion */ + mbox->ctx_buf = mp; + mbox->vport = phba->pport; + + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); + bf_set(lpfc_mbx_memory_dump_type3_type, + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + bf_set(lpfc_mbx_memory_dump_type3_link, + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_memory_dump_type3_page_no, + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); + if (phba->sli_rev < LPFC_SLI_REV4) { + mb = &mbox->u.mb; + mb->un.varDmp.cv = 1; + mb->un.varDmp.co = 1; + mb->un.varWords[2] = 0; + mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; + mb->un.varWords[4] = 0; + mb->un.varWords[5] = 0; + mb->un.varWords[6] = 0; + mb->un.varWords[7] = 0; + mb->un.varWords[8] = 0; + mb->un.varWords[9] = 0; + mb->un.varWords[10] = 0; + mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; + mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; + mbox->mbox_offset_word = 5; + mbox->ctx_buf = virt; + } else { + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + } + + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { + rc = 1; + goto error; + } + rc = 0; + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, + DMP_SFF_PAGE_A2_SIZE); + +error: + mbox->ctx_buf = mpsave; + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + + return rc; + +sfp_fail: + mempool_free(mbox, phba->mbox_mem_pool); + return 1; +} + +/* + * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes an unsolicited RDP(Read Diagnostic Parameters) + * IOCB. First, the payload of the unsolicited RDP is checked. + * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 + * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, + * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl + * gather all data and send RDP response. + * + * Return code + * 0 - Sent the acc response + * 1 - Sent the reject response. + */ +static int +lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd; + uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; + struct fc_rdp_req_frame *rdp_req; + struct lpfc_rdp_context *rdp_context; + union lpfc_wqe128 *cmd = NULL; + struct ls_rjt stat; + + if (phba->sli_rev < LPFC_SLI_REV4 || + bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_expl = LSEXP_REQ_UNSUPPORTED; + goto error; + } + + if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_expl = LSEXP_REQ_UNSUPPORTED; + goto error; + } + + pcmd = cmdiocb->cmd_dmabuf; + rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2422 ELS RDP Request " + "dec len %d tag x%x port_id %d len %d\n", + be32_to_cpu(rdp_req->rdp_des_length), + be32_to_cpu(rdp_req->nport_id_desc.tag), + be32_to_cpu(rdp_req->nport_id_desc.nport_id), + be32_to_cpu(rdp_req->nport_id_desc.length)); + + if (sizeof(struct fc_rdp_nport_desc) != + be32_to_cpu(rdp_req->rdp_des_length)) + goto rjt_logerr; + if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) + goto rjt_logerr; + if (RDP_NPORT_ID_SIZE != + be32_to_cpu(rdp_req->nport_id_desc.length)) + goto rjt_logerr; + rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); + if (!rdp_context) { + rjt_err = LSRJT_UNABLE_TPC; + goto error; + } + + cmd = &cmdiocb->wqe; + rdp_context->ndlp = lpfc_nlp_get(ndlp); + if (!rdp_context->ndlp) { + kfree(rdp_context); + rjt_err = LSRJT_UNABLE_TPC; + goto error; + } + rdp_context->ox_id = bf_get(wqe_rcvoxid, + &cmd->xmit_els_rsp.wqe_com); + rdp_context->rx_id = bf_get(wqe_ctxt_tag, + &cmd->xmit_els_rsp.wqe_com); + rdp_context->cmpl = lpfc_els_rdp_cmpl; + if (lpfc_get_rdp_info(phba, rdp_context)) { + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, + "2423 Unable to send mailbox"); + kfree(rdp_context); + rjt_err = LSRJT_UNABLE_TPC; + lpfc_nlp_put(ndlp); + goto error; + } + + return 0; + +rjt_logerr: + rjt_err = LSRJT_LOGICAL_ERR; + +error: + memset(&stat, 0, sizeof(stat)); + stat.un.b.lsRjtRsnCode = rjt_err; + stat.un.b.lsRjtRsnCodeExp = rjt_expl; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 1; +} + + +static void +lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb; + IOCB_t *icmd; + union lpfc_wqe128 *wqe; + uint8_t *pcmd; + struct lpfc_iocbq *elsiocb; + struct lpfc_nodelist *ndlp; + struct ls_rjt *stat; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_lcb_context *lcb_context; + struct fc_lcb_res_frame *lcb_res; + uint32_t cmdsize, shdr_status, shdr_add_status; + int rc; + + mb = &pmb->u.mb; + lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; + ndlp = lcb_context->ndlp; + pmb->ctx_ndlp = NULL; + pmb->ctx_buf = NULL; + + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.beacon_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, + "0194 SET_BEACON_CONFIG mailbox " + "completed with status x%x add_status x%x," + " mbx status x%x\n", + shdr_status, shdr_add_status, mb->mbxStatus); + + if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || + (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || + (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { + mempool_free(pmb, phba->mbox_mem_pool); + goto error; + } + + mempool_free(pmb, phba->mbox_mem_pool); + cmdsize = sizeof(struct fc_lcb_res_frame); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + + /* Decrement the ndlp reference count from previous mbox command */ + lpfc_nlp_put(ndlp); + + if (!elsiocb) + goto free_lcb_context; + + lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; + + memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + lcb_context->ox_id); + } else { + icmd = &elsiocb->iocb; + icmd->ulpContext = lcb_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; + } + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *)(pcmd)) = ELS_CMD_ACC; + lcb_res->lcb_sub_command = lcb_context->sub_command; + lcb_res->lcb_type = lcb_context->type; + lcb_res->capability = lcb_context->capability; + lcb_res->lcb_frequency = lcb_context->frequency; + lcb_res->lcb_duration = lcb_context->duration; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitACC++; + + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + goto out; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + } + out: + kfree(lcb_context); + return; + +error: + cmdsize = sizeof(struct fc_lcb_res_frame); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_LS_RJT); + lpfc_nlp_put(ndlp); + if (!elsiocb) + goto free_lcb_context; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + lcb_context->ox_id); + } else { + icmd = &elsiocb->iocb; + icmd->ulpContext = lcb_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; + } + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; + stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); + stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + + if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) + stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitLSRJT++; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + goto free_lcb_context; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + } +free_lcb_context: + kfree(lcb_context); +} + +static int +lpfc_sli4_set_beacon(struct lpfc_vport *vport, + struct lpfc_lcb_context *lcb_context, + uint32_t beacon_state) +{ + struct lpfc_hba *phba = vport->phba; + union lpfc_sli4_cfg_shdr *cfg_shdr; + LPFC_MBOXQ_t *mbox = NULL; + uint32_t len; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return 1; + + cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + len = sizeof(struct lpfc_mbx_set_beacon_config) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, + LPFC_SLI4_MBX_EMBED); + mbox->ctx_ndlp = (void *)lcb_context; + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_els_lcb_rsp; + bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, + phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, + beacon_state); + mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ + + /* + * Check bv1s bit before issuing the mailbox + * if bv1s == 1, LCB V1 supported + * else, LCB V0 supported + */ + + if (phba->sli4_hba.pc_sli4_params.bv1s) { + /* COMMON_SET_BEACON_CONFIG_V1 */ + cfg_shdr->request.word9 = BEACON_VERSION_V1; + lcb_context->capability |= LCB_CAPABILITY_DURATION; + bf_set(lpfc_mbx_set_beacon_port_type, + &mbox->u.mqe.un.beacon_config, 0); + bf_set(lpfc_mbx_set_beacon_duration_v1, + &mbox->u.mqe.un.beacon_config, + be16_to_cpu(lcb_context->duration)); + } else { + /* COMMON_SET_BEACON_CONFIG_V0 */ + if (be16_to_cpu(lcb_context->duration) != 0) { + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + cfg_shdr->request.word9 = BEACON_VERSION_V0; + lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); + bf_set(lpfc_mbx_set_beacon_state, + &mbox->u.mqe.un.beacon_config, beacon_state); + bf_set(lpfc_mbx_set_beacon_port_type, + &mbox->u.mqe.un.beacon_config, 1); + bf_set(lpfc_mbx_set_beacon_duration, + &mbox->u.mqe.un.beacon_config, + be16_to_cpu(lcb_context->duration)); + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + + return 0; +} + + +/** + * lpfc_els_rcv_lcb - Process an unsolicited LCB + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. + * First, the payload of the unsolicited LCB is checked. + * Then based on Subcommand beacon will either turn on or off. + * + * Return code + * 0 - Sent the acc response + * 1 - Sent the reject response. + **/ +static int +lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd; + uint8_t *lp; + struct fc_lcb_request_frame *beacon; + struct lpfc_lcb_context *lcb_context; + u8 state, rjt_err = 0; + struct ls_rjt stat; + + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint8_t *)pcmd->virt; + beacon = (struct fc_lcb_request_frame *)pcmd->virt; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " + "type x%x frequency %x duration x%x\n", + lp[0], lp[1], lp[2], + beacon->lcb_command, + beacon->lcb_sub_command, + beacon->lcb_type, + beacon->lcb_frequency, + be16_to_cpu(beacon->lcb_duration)); + + if (beacon->lcb_sub_command != LPFC_LCB_ON && + beacon->lcb_sub_command != LPFC_LCB_OFF) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + + if (phba->sli_rev < LPFC_SLI_REV4 || + phba->hba_flag & HBA_FCOE_MODE || + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2)) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + + lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); + if (!lcb_context) { + rjt_err = LSRJT_UNABLE_TPC; + goto rjt; + } + + state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; + lcb_context->sub_command = beacon->lcb_sub_command; + lcb_context->capability = 0; + lcb_context->type = beacon->lcb_type; + lcb_context->frequency = beacon->lcb_frequency; + lcb_context->duration = beacon->lcb_duration; + lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); + lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); + lcb_context->ndlp = lpfc_nlp_get(ndlp); + if (!lcb_context->ndlp) { + rjt_err = LSRJT_UNABLE_TPC; + goto rjt_free; + } + + if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { + lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, + "0193 failed to send mail box"); + lpfc_nlp_put(ndlp); + rjt_err = LSRJT_UNABLE_TPC; + goto rjt_free; + } + return 0; + +rjt_free: + kfree(lcb_context); +rjt: + memset(&stat, 0, sizeof(stat)); + stat.un.b.lsRjtRsnCode = rjt_err; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 1; +} + + +/** + * lpfc_els_flush_rscn - Clean up any rscn activities with a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine cleans up any Registration State Change Notification + * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the + * @vport together with the host_lock is used to prevent multiple thread + * trying to access the RSCN array on a same @vport at the same time. + **/ +void +lpfc_els_flush_rscn(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + int i; + + spin_lock_irq(shost->host_lock); + if (vport->fc_rscn_flush) { + /* Another thread is walking fc_rscn_id_list on this vport */ + spin_unlock_irq(shost->host_lock); + return; + } + /* Indicate we are walking lpfc_els_flush_rscn on this vport */ + vport->fc_rscn_flush = 1; + spin_unlock_irq(shost->host_lock); + + for (i = 0; i < vport->fc_rscn_id_cnt; i++) { + lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); + vport->fc_rscn_id_list[i] = NULL; + } + spin_lock_irq(shost->host_lock); + vport->fc_rscn_id_cnt = 0; + vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + /* Indicate we are done walking this fc_rscn_id_list */ + vport->fc_rscn_flush = 0; +} + +/** + * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did + * @vport: pointer to a host virtual N_Port data structure. + * @did: remote destination port identifier. + * + * This routine checks whether there is any pending Registration State + * Configuration Notification (RSCN) to a @did on @vport. + * + * Return code + * None zero - The @did matched with a pending rscn + * 0 - not able to match @did with a pending rscn + **/ +int +lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) +{ + D_ID ns_did; + D_ID rscn_did; + uint32_t *lp; + uint32_t payload_len, i; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + ns_did.un.word = did; + + /* Never match fabric nodes for RSCNs */ + if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) + return 0; + + /* If we are doing a FULL RSCN rediscovery, match everything */ + if (vport->fc_flag & FC_RSCN_DISCOVERY) + return did; + + spin_lock_irq(shost->host_lock); + if (vport->fc_rscn_flush) { + /* Another thread is walking fc_rscn_id_list on this vport */ + spin_unlock_irq(shost->host_lock); + return 0; + } + /* Indicate we are walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 1; + spin_unlock_irq(shost->host_lock); + for (i = 0; i < vport->fc_rscn_id_cnt; i++) { + lp = vport->fc_rscn_id_list[i]->virt; + payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); + payload_len -= sizeof(uint32_t); /* take off word 0 */ + while (payload_len) { + rscn_did.un.word = be32_to_cpu(*lp++); + payload_len -= sizeof(uint32_t); + switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { + case RSCN_ADDRESS_FORMAT_PORT: + if ((ns_did.un.b.domain == rscn_did.un.b.domain) + && (ns_did.un.b.area == rscn_did.un.b.area) + && (ns_did.un.b.id == rscn_did.un.b.id)) + goto return_did_out; + break; + case RSCN_ADDRESS_FORMAT_AREA: + if ((ns_did.un.b.domain == rscn_did.un.b.domain) + && (ns_did.un.b.area == rscn_did.un.b.area)) + goto return_did_out; + break; + case RSCN_ADDRESS_FORMAT_DOMAIN: + if (ns_did.un.b.domain == rscn_did.un.b.domain) + goto return_did_out; + break; + case RSCN_ADDRESS_FORMAT_FABRIC: + goto return_did_out; + } + } + } + /* Indicate we are done with walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; + return 0; +return_did_out: + /* Indicate we are done with walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; + return did; +} + +/** + * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the + * state machine for a @vport's nodes that are with pending RSCN (Registration + * State Change Notification). + * + * Return code + * 0 - Successful (currently alway return 0) + **/ +static int +lpfc_rscn_recovery_check(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp = NULL, *n; + + /* Move all affected nodes by pending RSCNs to NPR state. */ + list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { + if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || + !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) + continue; + + /* NVME Target mode does not do RSCN Recovery. */ + if (vport->phba->nvmet_support) + continue; + + /* If we are in the process of doing discovery on this + * NPort, let it continue on its own. + */ + switch (ndlp->nlp_state) { + case NLP_STE_PLOGI_ISSUE: + case NLP_STE_ADISC_ISSUE: + case NLP_STE_REG_LOGIN_ISSUE: + case NLP_STE_PRLI_ISSUE: + case NLP_STE_LOGO_ISSUE: + continue; + } + + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); + lpfc_cancel_retry_delay_tmo(vport, ndlp); + } + return 0; +} + +/** + * lpfc_send_rscn_event - Send an RSCN event to management application + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * + * lpfc_send_rscn_event sends an RSCN netlink event to management + * applications. + */ +static void +lpfc_send_rscn_event(struct lpfc_vport *vport, + struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_dmabuf *pcmd; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + uint32_t *payload_ptr; + uint32_t payload_len; + struct lpfc_rscn_event_header *rscn_event_data; + + pcmd = cmdiocb->cmd_dmabuf; + payload_ptr = (uint32_t *) pcmd->virt; + payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); + + rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + + payload_len, GFP_KERNEL); + if (!rscn_event_data) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0147 Failed to allocate memory for RSCN event\n"); + return; + } + rscn_event_data->event_type = FC_REG_RSCN_EVENT; + rscn_event_data->payload_length = payload_len; + memcpy(rscn_event_data->rscn_payload, payload_ptr, + payload_len); + + fc_host_post_vendor_event(shost, + fc_get_event_number(), + sizeof(struct lpfc_rscn_event_header) + payload_len, + (char *)rscn_event_data, + LPFC_NL_VENDOR_ID); + + kfree(rscn_event_data); +} + +/** + * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes an unsolicited RSCN (Registration State Change + * Notification) IOCB. First, the payload of the unsolicited RSCN is walked + * to invoke fc_host_post_event() routine to the FC transport layer. If the + * discover state machine is about to begin discovery, it just accepts the + * RSCN and the discovery process will satisfy the RSCN. If this RSCN only + * contains N_Port IDs for other vports on this HBA, it just accepts the + * RSCN and ignore processing it. If the state machine is in the recovery + * state, the fc_rscn_id_list of this @vport is walked and the + * lpfc_rscn_recovery_check() routine is invoked to send recovery event for + * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() + * routine is invoked to handle the RSCN event. + * + * Return code + * 0 - Just sent the acc response + * 1 - Sent the acc response and waited for name server completion + **/ +static int +lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd; + uint32_t *lp, *datap; + uint32_t payload_len, length, nportid, *cmd; + int rscn_cnt; + int rscn_id = 0, hba_id = 0; + int i, tmo; + + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint32_t *) pcmd->virt; + + payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); + payload_len -= sizeof(uint32_t); /* take off word 0 */ + /* RSCN received */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0214 RSCN received Data: x%x x%x x%x x%x\n", + vport->fc_flag, payload_len, *lp, + vport->fc_rscn_id_cnt); + + /* Send an RSCN event to the management application */ + lpfc_send_rscn_event(vport, cmdiocb); + + for (i = 0; i < payload_len/sizeof(uint32_t); i++) + fc_host_post_event(shost, fc_get_event_number(), + FCH_EVT_RSCN, lp[i]); + + /* Check if RSCN is coming from a direct-connected remote NPort */ + if (vport->fc_flag & FC_PT2PT) { + /* If so, just ACC it, no other action needed for now */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2024 pt2pt RSCN %08x Data: x%x x%x\n", + *lp, vport->fc_flag, payload_len); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + + /* Check to see if we need to NVME rescan this target + * remoteport. + */ + if (ndlp->nlp_fc4_type & NLP_FC4_NVME && + ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) + lpfc_nvme_rescan_port(vport, ndlp); + return 0; + } + + /* If we are about to begin discovery, just ACC the RSCN. + * Discovery processing will satisfy it. + */ + if (vport->port_state <= LPFC_NS_QRY) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); + + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + return 0; + } + + /* If this RSCN just contains NPortIDs for other vports on this HBA, + * just ACC and ignore it. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + !(vport->cfg_peer_port_login)) { + i = payload_len; + datap = lp; + while (i > 0) { + nportid = *datap++; + nportid = ((be32_to_cpu(nportid)) & Mask_DID); + i -= sizeof(uint32_t); + rscn_id++; + if (lpfc_find_vport_by_did(phba, nportid)) + hba_id++; + } + if (rscn_id == hba_id) { + /* ALL NPortIDs in RSCN are on HBA */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0219 Ignore RSCN " + "Data: x%x x%x x%x x%x\n", + vport->fc_flag, payload_len, + *lp, vport->fc_rscn_id_cnt); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, + ndlp->nlp_flag); + + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, + ndlp, NULL); + /* Restart disctmo if its already running */ + if (vport->fc_flag & FC_DISC_TMO) { + tmo = ((phba->fc_ratov * 3) + 3); + mod_timer(&vport->fc_disctmo, + jiffies + + msecs_to_jiffies(1000 * tmo)); + } + return 0; + } + } + + spin_lock_irq(shost->host_lock); + if (vport->fc_rscn_flush) { + /* Another thread is walking fc_rscn_id_list on this vport */ + vport->fc_flag |= FC_RSCN_DISCOVERY; + spin_unlock_irq(shost->host_lock); + /* Send back ACC */ + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + return 0; + } + /* Indicate we are walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 1; + spin_unlock_irq(shost->host_lock); + /* Get the array count after successfully have the token */ + rscn_cnt = vport->fc_rscn_id_cnt; + /* If we are already processing an RSCN, save the received + * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. + */ + if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); + + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_DEFERRED; + + /* Restart disctmo if its already running */ + if (vport->fc_flag & FC_DISC_TMO) { + tmo = ((phba->fc_ratov * 3) + 3); + mod_timer(&vport->fc_disctmo, + jiffies + msecs_to_jiffies(1000 * tmo)); + } + if ((rscn_cnt < FC_MAX_HOLD_RSCN) && + !(vport->fc_flag & FC_RSCN_DISCOVERY)) { + vport->fc_flag |= FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + if (rscn_cnt) { + cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; + length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); + } + if ((rscn_cnt) && + (payload_len + length <= LPFC_BPL_SIZE)) { + *cmd &= ELS_CMD_MASK; + *cmd |= cpu_to_be32(payload_len + length); + memcpy(((uint8_t *)cmd) + length, lp, + payload_len); + } else { + vport->fc_rscn_id_list[rscn_cnt] = pcmd; + vport->fc_rscn_id_cnt++; + /* If we zero, cmdiocb->cmd_dmabuf, the calling + * routine will not try to free it. + */ + cmdiocb->cmd_dmabuf = NULL; + } + /* Deferred RSCN */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0235 Deferred RSCN " + "Data: x%x x%x x%x\n", + vport->fc_rscn_id_cnt, vport->fc_flag, + vport->port_state); + } else { + vport->fc_flag |= FC_RSCN_DISCOVERY; + spin_unlock_irq(shost->host_lock); + /* ReDiscovery RSCN */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0234 ReDiscovery RSCN " + "Data: x%x x%x x%x\n", + vport->fc_rscn_id_cnt, vport->fc_flag, + vport->port_state); + } + /* Indicate we are done walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; + /* Send back ACC */ + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + /* send RECOVERY event for ALL nodes that match RSCN payload */ + lpfc_rscn_recovery_check(vport); + return 0; + } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); + + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; + /* Indicate we are done walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; + /* + * If we zero, cmdiocb->cmd_dmabuf, the calling routine will + * not try to free it. + */ + cmdiocb->cmd_dmabuf = NULL; + lpfc_set_disctmo(vport); + /* Send back ACC */ + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + /* send RECOVERY event for ALL nodes that match RSCN payload */ + lpfc_rscn_recovery_check(vport); + return lpfc_els_handle_rscn(vport); +} + +/** + * lpfc_els_handle_rscn - Handle rscn for a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine handles the Registration State Configuration Notification + * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall + * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, + * if the ndlp to NameServer exists, a Common Transport (CT) command to the + * NameServer shall be issued. If CT command to the NameServer fails to be + * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any + * RSCN activities with the @vport. + * + * Return code + * 0 - Cleaned up rscn on the @vport + * 1 - Wait for plogi to name server before proceed + **/ +int +lpfc_els_handle_rscn(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_hba *phba = vport->phba; + + /* Ignore RSCN if the port is being torn down. */ + if (vport->load_flag & FC_UNLOADING) { + lpfc_els_flush_rscn(vport); + return 0; + } + + /* Start timer for RSCN processing */ + lpfc_set_disctmo(vport); + + /* RSCN processed */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", + vport->fc_flag, 0, vport->fc_rscn_id_cnt, + vport->port_state, vport->num_disc_nodes, + vport->gidft_inp); + + /* To process RSCN, first compare RSCN data with NameServer */ + vport->fc_ns_retry = 0; + vport->num_disc_nodes = 0; + + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + /* Good ndlp, issue CT Request to NameServer. Need to + * know how many gidfts were issued. If none, then just + * flush the RSCN. Otherwise, the outstanding requests + * need to complete. + */ + if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { + if (lpfc_issue_gidft(vport) > 0) + return 1; + } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { + if (lpfc_issue_gidpt(vport) > 0) + return 1; + } else { + return 1; + } + } else { + /* Nameserver login in question. Revalidate. */ + if (ndlp) { + ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + } else { + ndlp = lpfc_nlp_init(vport, NameServer_DID); + if (!ndlp) { + lpfc_els_flush_rscn(vport); + return 0; + } + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + } + ndlp->nlp_type |= NLP_FABRIC; + lpfc_issue_els_plogi(vport, NameServer_DID, 0); + /* Wait for NameServer login cmpl before we can + * continue + */ + return 1; + } + + lpfc_els_flush_rscn(vport); + return 0; +} + +/** + * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Fabric Login (FLOGI) IOCB received as an ELS + * unsolicited event. An unsolicited FLOGI can be received in a point-to- + * point topology. As an unsolicited FLOGI should not be received in a loop + * mode, any unsolicited FLOGI received in loop mode shall be ignored. The + * lpfc_check_sparm() routine is invoked to check the parameters in the + * unsolicited FLOGI. If parameters validation failed, the routine + * lpfc_els_rsp_reject() shall be called with reject reason code set to + * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the + * FLOGI shall be compared with the Port WWN of the @vport to determine who + * will initiate PLOGI. The higher lexicographical value party shall has + * higher priority (as the winning port) and will initiate PLOGI and + * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result + * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI + * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. + * + * Return code + * 0 - Successfully processed the unsolicited flogi + * 1 - Failed to process the unsolicited flogi + **/ +static int +lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; + uint32_t *lp = (uint32_t *) pcmd->virt; + union lpfc_wqe128 *wqe = &cmdiocb->wqe; + struct serv_parm *sp; + LPFC_MBOXQ_t *mbox; + uint32_t cmd, did; + int rc; + uint32_t fc_flag = 0; + uint32_t port_state = 0; + + /* Clear external loopback plug detected flag */ + phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; + + cmd = *lp++; + sp = (struct serv_parm *) lp; + + /* FLOGI received */ + + lpfc_set_disctmo(vport); + + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + /* We should never receive a FLOGI in loop mode, ignore it */ + did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); + + /* An FLOGI ELS command was received from DID in + Loop Mode */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0113 An FLOGI ELS command x%x was " + "received from DID x%x in Loop Mode\n", + cmd, did); + return 1; + } + + (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); + + /* + * If our portname is greater than the remote portname, + * then we initiate Nport login. + */ + + rc = memcmp(&vport->fc_portname, &sp->portName, + sizeof(struct lpfc_name)); + + if (!rc) { + if (phba->sli_rev < LPFC_SLI_REV4) { + mbox = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mbox) + return 1; + lpfc_linkdown(phba); + lpfc_init_link(phba, mbox, + phba->cfg_topology, + phba->cfg_link_speed); + mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, + MBX_NOWAIT); + lpfc_set_loopback_flag(phba); + if (rc == MBX_NOT_FINISHED) + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + + /* External loopback plug insertion detected */ + phba->link_flag |= LS_EXTERNAL_LOOPBACK; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, + "1119 External Loopback plug detected\n"); + + /* abort the flogi coming back to ourselves + * due to external loopback on the port. + */ + lpfc_els_abort_flogi(phba); + return 0; + + } else if (rc > 0) { /* greater than */ + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PT2PT_PLOGI; + spin_unlock_irq(shost->host_lock); + + /* If we have the high WWPN we can assign our own + * myDID; otherwise, we have to WAIT for a PLOGI + * from the remote NPort to find out what it + * will be. + */ + vport->fc_myDID = PT2PT_LocalID; + } else { + vport->fc_myDID = PT2PT_RemoteID; + } + + /* + * The vport state should go to LPFC_FLOGI only + * AFTER we issue a FLOGI, not receive one. + */ + spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; + vport->fc_flag |= FC_PT2PT; + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + + /* Acking an unsol FLOGI. Count 1 for link bounce + * work-around. + */ + vport->rcv_flogi_cnt++; + spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); + + /* + * We temporarily set fc_myDID to make it look like we are + * a Fabric. This is done just so we end up with the right + * did / sid on the FLOGI ACC rsp. + */ + did = vport->fc_myDID; + vport->fc_myDID = Fabric_DID; + + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); + + /* Defer ACC response until AFTER we issue a FLOGI */ + if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { + phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, + &wqe->xmit_els_rsp.wqe_com); + phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, + &wqe->xmit_els_rsp.wqe_com); + + vport->fc_myDID = did; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3344 Deferring FLOGI ACC: rx_id: x%x," + " ox_id: x%x, hba_flag x%x\n", + phba->defer_flogi_acc_rx_id, + phba->defer_flogi_acc_ox_id, phba->hba_flag); + + phba->defer_flogi_acc_flag = true; + + return 0; + } + + /* Send back ACC */ + lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); + + /* Now lets put fc_myDID back to what its supposed to be */ + vport->fc_myDID = did; + + return 0; +} + +/** + * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Request Node Identification Data (RNID) IOCB + * received as an ELS unsolicited event. Only when the RNID specified format + * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) + * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to + * Accept (ACC) the RNID ELS command. All the other RNID formats are + * rejected by invoking the lpfc_els_rsp_reject() routine. + * + * Return code + * 0 - Successfully processed rnid iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_dmabuf *pcmd; + uint32_t *lp; + RNID *rn; + struct ls_rjt stat; + + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint32_t *) pcmd->virt; + + lp++; + rn = (RNID *) lp; + + /* RNID received */ + + switch (rn->Format) { + case 0: + case RNID_TOPOLOGY_DISC: + /* Send back ACC */ + lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); + break; + default: + /* Reject this request because format not supported */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + } + return 0; +} + +/** + * lpfc_els_rcv_echo - Process an unsolicited echo iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully processed echo iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + uint8_t *pcmd; + + pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; + + /* skip over first word of echo command to find echo data */ + pcmd += sizeof(uint32_t); + + lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); + return 0; +} + +/** + * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes a Link Incident Report Registration(LIRR) IOCB + * received as an ELS unsolicited event. Currently, this function just invokes + * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. + * + * Return code + * 0 - Successfully processed lirr iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct ls_rjt stat; + + /* For now, unconditionally reject this command */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 0; +} + +/** + * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB + * received as an ELS unsolicited event. A request to RRQ shall only + * be accepted if the Originator Nx_Port N_Port_ID or the Responder + * Nx_Port N_Port_ID of the target Exchange is the same as the + * N_Port_ID of the Nx_Port that makes the request. If the RRQ is + * not accepted, an LS_RJT with reason code "Unable to perform + * command request" and reason code explanation "Invalid Originator + * S_ID" shall be returned. For now, we just unconditionally accept + * RRQ from the target. + **/ +static void +lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + lpfc_els_clear_rrq(vport, cmdiocb, ndlp); +} + +/** + * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * This routine is the completion callback function for the MBX_READ_LNK_STAT + * mailbox command. This callback function is to actually send the Accept + * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It + * collects the link statistics from the completion of the MBX_READ_LNK_STAT + * mailbox command, constructs the RLS response with the link statistics + * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC + * response to the RLS. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the RLS Accept Response + * ELS IOCB command. + * + **/ +static void +lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + int rc = 0; + MAILBOX_t *mb; + IOCB_t *icmd; + union lpfc_wqe128 *wqe; + struct RLS_RSP *rls_rsp; + uint8_t *pcmd; + struct lpfc_iocbq *elsiocb; + struct lpfc_nodelist *ndlp; + uint16_t oxid; + uint16_t rxid; + uint32_t cmdsize; + u32 ulp_context; + + mb = &pmb->u.mb; + + ndlp = pmb->ctx_ndlp; + rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); + oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); + pmb->ctx_buf = NULL; + pmb->ctx_ndlp = NULL; + + if (mb->mbxStatus) { + mempool_free(pmb, phba->mbox_mem_pool); + return; + } + + cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + + /* Decrement the ndlp reference count from previous mbox command */ + lpfc_nlp_put(ndlp); + + if (!elsiocb) { + mempool_free(pmb, phba->mbox_mem_pool); + return; + } + + ulp_context = get_job_ulpcontext(phba, elsiocb); + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* Xri / rx_id */ + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); + } else { + icmd = &elsiocb->iocb; + icmd->ulpContext = rxid; + icmd->unsli3.rcvsli3.ox_id = oxid; + } + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); /* Skip past command */ + rls_rsp = (struct RLS_RSP *)pcmd; + + rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); + rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); + rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); + rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); + rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); + rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); + mempool_free(pmb, phba->mbox_mem_pool); + /* Xmit ELS RLS ACC response tag */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, + "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + elsiocb->iotag, ulp_context, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitACC++; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + } + return; +} + +/** + * lpfc_els_rcv_rls - Process an unsolicited rls iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Read Link Status (RLS) IOCB received as an + * ELS unsolicited event. It first checks the remote port state. If the + * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE + * state, it invokes the lpfc_els_rsl_reject() routine to send the reject + * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command + * for reading the HBA link statistics. It is for the callback function, + * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command + * to actually sending out RPL Accept (ACC) response. + * + * Return codes + * 0 - Successfully processed rls iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + struct ls_rjt stat; + u32 ctx = get_job_ulpcontext(phba, cmdiocb); + u32 ox_id = get_job_rcvoxid(phba, cmdiocb); + + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) + /* reject the unsolicited RLS request and done with it */ + goto reject_out; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); + if (mbox) { + lpfc_read_lnk_stat(phba, mbox); + mbox->ctx_buf = (void *)((unsigned long) + (ox_id << 16 | ctx)); + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!mbox->ctx_ndlp) + goto node_err; + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) + != MBX_NOT_FINISHED) + /* Mbox completion will send ELS Response */ + return 0; + /* Decrement reference count used for the failed mbox + * command. + */ + lpfc_nlp_put(ndlp); +node_err: + mempool_free(mbox, phba->mbox_mem_pool); + } +reject_out: + /* issue rejection response */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 0; +} + +/** + * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Read Timout Value (RTV) IOCB received as an + * ELS unsolicited event. It first checks the remote port state. If the + * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE + * state, it invokes the lpfc_els_rsl_reject() routine to send the reject + * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout + * Value (RTV) unsolicited IOCB event. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the RTV Accept Response + * ELS IOCB command. + * + * Return codes + * 0 - Successfully processed rtv iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + int rc = 0; + IOCB_t *icmd; + union lpfc_wqe128 *wqe; + struct lpfc_hba *phba = vport->phba; + struct ls_rjt stat; + struct RTV_RSP *rtv_rsp; + uint8_t *pcmd; + struct lpfc_iocbq *elsiocb; + uint32_t cmdsize; + u32 ulp_context; + + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) + /* reject the unsolicited RTV request and done with it */ + goto reject_out; + + cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); /* Skip past command */ + + ulp_context = get_job_ulpcontext(phba, elsiocb); + /* use the command's xri in the response */ + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, cmdiocb)); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, cmdiocb)); + } else { + icmd = &elsiocb->iocb; + icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); + icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); + } + + rtv_rsp = (struct RTV_RSP *)pcmd; + + /* populate RTV payload */ + rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ + rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); + bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); + bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ + rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); + + /* Xmit ELS RLS ACC response tag */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, + "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " + "Data: x%x x%x x%x\n", + elsiocb->iotag, ulp_context, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, + rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitACC++; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 0; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + } + return 0; + +reject_out: + /* issue rejection response */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 0; +} + +/* lpfc_issue_els_rrq - Process an unsolicited rrq iocb + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @did: DID of the target. + * @rrq: Pointer to the rrq struct. + * + * Build a ELS RRQ command and send it to the target. If the issue_iocb is + * successful, the completion handler will clear the RRQ. + * + * Return codes + * 0 - Successfully sent rrq els iocb. + * 1 - Failed to send rrq els iocb. + **/ +static int +lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint32_t did, struct lpfc_node_rrq *rrq) +{ + struct lpfc_hba *phba = vport->phba; + struct RRQ *els_rrq; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + int ret; + + if (!ndlp) + return 1; + + /* If ndlp is not NULL, we will bump the reference count on it */ + cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, + ELS_CMD_RRQ); + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + + /* For RRQ request, remainder of payload is Exchange IDs */ + *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; + pcmd += sizeof(uint32_t); + els_rrq = (struct RRQ *) pcmd; + + bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); + bf_set(rrq_rxid, els_rrq, rrq->rxid); + bf_set(rrq_did, els_rrq, vport->fc_myDID); + els_rrq->rrq = cpu_to_be32(els_rrq->rrq); + els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); + + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue RRQ: did:x%x", + did, rrq->xritag, rrq->rxid); + elsiocb->context_un.rrq = rrq; + elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; + + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) + goto io_err; + + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (ret == IOCB_ERROR) { + lpfc_nlp_put(ndlp); + goto io_err; + } + return 0; + + io_err: + lpfc_els_free_iocb(phba, elsiocb); + return 1; +} + +/** + * lpfc_send_rrq - Sends ELS RRQ if needed. + * @phba: pointer to lpfc hba data structure. + * @rrq: pointer to the active rrq. + * + * This routine will call the lpfc_issue_els_rrq if the rrq is + * still active for the xri. If this function returns a failure then + * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. + * + * Returns 0 Success. + * 1 Failure. + **/ +int +lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) +{ + struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, + rrq->nlp_DID); + if (!ndlp) + return 1; + + if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) + return lpfc_issue_els_rrq(rrq->vport, ndlp, + rrq->nlp_DID, rrq); + else + return 1; +} + +/** + * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command + * @vport: pointer to a host virtual N_Port data structure. + * @cmdsize: size of the ELS command. + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. + * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the RPL Accept Response + * ELS command. + * + * Return code + * 0 - Successfully issued ACC RPL ELS command + * 1 - Failed to issue ACC RPL ELS command + **/ +static int +lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + IOCB_t *icmd; + union lpfc_wqe128 *wqe; + RPL_RSP rpl_rsp; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + u32 ulp_context; + + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + + if (!elsiocb) + return 1; + + ulp_context = get_job_ulpcontext(phba, elsiocb); + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + /* Xri / rx_id */ + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + get_job_ulpcontext(phba, oldiocb)); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + get_job_rcvoxid(phba, oldiocb)); + } else { + icmd = &elsiocb->iocb; + icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); + icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); + } + + pcmd = elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint16_t); + *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); + pcmd += sizeof(uint16_t); + + /* Setup the RPL ACC payload */ + rpl_rsp.listLen = be32_to_cpu(1); + rpl_rsp.index = 0; + rpl_rsp.port_num_blk.portNum = 0; + rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); + memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, + sizeof(struct lpfc_name)); + memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); + /* Xmit ELS RPL ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0120 Xmit ELS RPL ACC response tag x%x " + "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "rpi x%x\n", + elsiocb->iotag, ulp_context, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitACC++; + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + return 0; +} + +/** + * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Read Port List (RPL) IOCB received as an ELS + * unsolicited event. It first checks the remote port state. If the remote + * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it + * invokes the lpfc_els_rsp_reject() routine to send reject response. + * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine + * to accept the RPL. + * + * Return code + * 0 - Successfully processed rpl iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_dmabuf *pcmd; + uint32_t *lp; + uint32_t maxsize; + uint16_t cmdsize; + RPL *rpl; + struct ls_rjt stat; + + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { + /* issue rejection response */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + /* rejected the unsolicited RPL request and done with it */ + return 0; + } + + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint32_t *) pcmd->virt; + rpl = (RPL *) (lp + 1); + maxsize = be32_to_cpu(rpl->maxsize); + + /* We support only one port */ + if ((rpl->index == 0) && + ((maxsize == 0) || + ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { + cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); + } else { + cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); + } + lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); + + return 0; +} + +/** + * lpfc_els_rcv_farp - Process an unsolicited farp request els command + * @vport: pointer to a virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Fibre Channel Address Resolution Protocol + * (FARP) Request IOCB received as an ELS unsolicited event. Currently, + * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, + * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the + * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the + * remote PortName is compared against the FC PortName stored in the @vport + * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is + * compared against the FC NodeName stored in the @vport data structure. + * If any of these matches and the FARP_REQUEST_FARPR flag is set in the + * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is + * invoked to send out FARP Response to the remote node. Before sending the + * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP + * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() + * routine is invoked to log into the remote port first. + * + * Return code + * 0 - Either the FARP Match Mode not supported or successfully processed + **/ +static int +lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_dmabuf *pcmd; + uint32_t *lp; + FARP *fp; + uint32_t cnt, did; + + did = get_job_els_rsp64_did(vport->phba, cmdiocb); + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint32_t *) pcmd->virt; + + lp++; + fp = (FARP *) lp; + /* FARP-REQ received from DID */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0601 FARP-REQ received from DID x%x\n", did); + /* We will only support match on WWPN or WWNN */ + if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { + return 0; + } + + cnt = 0; + /* If this FARP command is searching for my portname */ + if (fp->Mflags & FARP_MATCH_PORT) { + if (memcmp(&fp->RportName, &vport->fc_portname, + sizeof(struct lpfc_name)) == 0) + cnt = 1; + } + + /* If this FARP command is searching for my nodename */ + if (fp->Mflags & FARP_MATCH_NODE) { + if (memcmp(&fp->RnodeName, &vport->fc_nodename, + sizeof(struct lpfc_name)) == 0) + cnt = 1; + } + + if (cnt) { + if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || + (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { + /* Log back into the node before sending the FARP. */ + if (fp->Rflags & FARP_REQUEST_PLOGI) { + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } + + /* Send a FARP response to that node */ + if (fp->Rflags & FARP_REQUEST_FARPR) + lpfc_issue_els_farpr(vport, did, 0); + } + } + return 0; +} + +/** + * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Fibre Channel Address Resolution Protocol + * Response (FARPR) IOCB received as an ELS unsolicited event. It simply + * invokes the lpfc_els_rsp_acc() routine to the remote node to accept + * the FARP response request. + * + * Return code + * 0 - Successfully processed FARPR IOCB (currently always return 0) + **/ +static int +lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + uint32_t did; + + did = get_job_els_rsp64_did(vport->phba, cmdiocb); + + /* FARP-RSP received from DID */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0600 FARP-RSP received from DID x%x\n", did); + /* ACCEPT the Farp resp request */ + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + + return 0; +} + +/** + * lpfc_els_rcv_fan - Process an unsolicited fan iocb command + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @fan_ndlp: pointer to a node-list data structure. + * + * This routine processes a Fabric Address Notification (FAN) IOCB + * command received as an ELS unsolicited event. The FAN ELS command will + * only be processed on a physical port (i.e., the @vport represents the + * physical port). The fabric NodeName and PortName from the FAN IOCB are + * compared against those in the phba data structure. If any of those is + * different, the lpfc_initial_flogi() routine is invoked to initialize + * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, + * if both of those are identical, the lpfc_issue_fabric_reglogin() routine + * is invoked to register login to the fabric. + * + * Return code + * 0 - Successfully processed fan iocb (currently always return 0). + **/ +static int +lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *fan_ndlp) +{ + struct lpfc_hba *phba = vport->phba; + uint32_t *lp; + FAN *fp; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); + lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; + fp = (FAN *) ++lp; + /* FAN received; Fan does not have a reply sequence */ + if ((vport == phba->pport) && + (vport->port_state == LPFC_LOCAL_CFG_LINK)) { + if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, + sizeof(struct lpfc_name))) || + (memcmp(&phba->fc_fabparam.portName, &fp->FportName, + sizeof(struct lpfc_name)))) { + /* This port has switched fabrics. FLOGI is required */ + lpfc_issue_init_vfi(vport); + } else { + /* FAN verified - skip FLOGI */ + vport->fc_myDID = vport->fc_prevDID; + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_issue_fabric_reglogin(vport); + else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3138 Need register VFI: (x%x/%x)\n", + vport->fc_prevDID, vport->fc_myDID); + lpfc_issue_reg_vfi(vport); + } + } + } + return 0; +} + +/** + * lpfc_els_rcv_edc - Process an unsolicited EDC iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully processed echo iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct fc_els_edc *edc_req; + struct fc_tlv_desc *tlv; + uint8_t *payload; + uint32_t *ptr, dtag; + const char *dtag_nm; + int desc_cnt = 0, bytes_remain; + struct fc_diag_lnkflt_desc *plnkflt; + + payload = cmdiocb->cmd_dmabuf->virt; + + edc_req = (struct fc_els_edc *)payload; + bytes_remain = be32_to_cpu(edc_req->desc_len); + + ptr = (uint32_t *)payload; + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, + "3319 Rcv EDC payload len %d: x%x x%x x%x\n", + bytes_remain, be32_to_cpu(*ptr), + be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); + + /* No signal support unless there is a congestion descriptor */ + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = 0; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + + if (bytes_remain <= 0) + goto out; + + tlv = edc_req->desc; + + /* + * cycle through EDC diagnostic descriptors to find the + * congestion signaling capability descriptor + */ + while (bytes_remain) { + if (bytes_remain < FC_TLV_DESC_HDR_SZ) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, + "6464 Truncated TLV hdr on " + "Diagnostic descriptor[%d]\n", + desc_cnt); + goto out; + } + + dtag = be32_to_cpu(tlv->desc_tag); + switch (dtag) { + case ELS_DTAG_LNK_FAULT_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_lnkflt_desc)) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, + "6465 Truncated Link Fault Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_lnkflt_desc)); + goto out; + } + plnkflt = (struct fc_diag_lnkflt_desc *)tlv; + lpfc_printf_log(phba, KERN_INFO, + LOG_ELS | LOG_LDS_EVENT, + "4626 Link Fault Desc Data: x%08x len x%x " + "da x%x dd x%x interval x%x\n", + be32_to_cpu(plnkflt->desc_tag), + be32_to_cpu(plnkflt->desc_len), + be32_to_cpu( + plnkflt->degrade_activate_threshold), + be32_to_cpu( + plnkflt->degrade_deactivate_threshold), + be32_to_cpu(plnkflt->fec_degrade_interval)); + break; + case ELS_DTAG_CG_SIGNAL_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_cg_sig_desc)) { + lpfc_printf_log( + phba, KERN_WARNING, LOG_CGN_MGMT, + "6466 Truncated cgn signal Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_cg_sig_desc)); + goto out; + } + + phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = phba->cgn_init_reg_signal; + + /* We start negotiation with lpfc_fabric_cgn_frequency. + * When we process the EDC, we will settle on the + * higher frequency. + */ + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + + lpfc_least_capable_settings( + phba, (struct fc_diag_cg_sig_desc *)tlv); + break; + default: + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, + "6467 unknown Diagnostic " + "Descriptor[%d]: tag x%x (%s)\n", + desc_cnt, dtag, dtag_nm); + } + bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); + tlv = fc_tlv_next_desc(tlv); + desc_cnt++; + } +out: + /* Need to send back an ACC */ + lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); + + lpfc_config_cgn_signal(phba); + return 0; +} + +/** + * lpfc_els_timeout - Handler funciton to the els timer + * @t: timer context used to obtain the vport. + * + * This routine is invoked by the ELS timer after timeout. It posts the ELS + * timer timeout event by setting the WORKER_ELS_TMO bit to the work port + * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake + * up the worker thread. It is for the worker thread to invoke the routine + * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. + **/ +void +lpfc_els_timeout(struct timer_list *t) +{ + struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); + struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; + unsigned long iflag; + + spin_lock_irqsave(&vport->work_port_lock, iflag); + tmo_posted = vport->work_port_events & WORKER_ELS_TMO; + if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) + vport->work_port_events |= WORKER_ELS_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflag); + + if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) + lpfc_worker_wake_up(phba); + return; +} + + +/** + * lpfc_els_timeout_handler - Process an els timeout event + * @vport: pointer to a virtual N_Port data structure. + * + * This routine is the actual handler function that processes an ELS timeout + * event. It walks the ELS ring to get and abort all the IOCBs (except the + * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by + * invoking the lpfc_sli_issue_abort_iotag() routine. + **/ +void +lpfc_els_timeout_handler(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *tmp_iocb, *piocb; + IOCB_t *cmd = NULL; + struct lpfc_dmabuf *pcmd; + uint32_t els_command = 0; + uint32_t timeout; + uint32_t remote_ID = 0xffffffff; + LIST_HEAD(abort_list); + u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; + + + timeout = (uint32_t)(phba->fc_ratov << 1); + + pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; + + if (phba->pport->load_flag & FC_UNLOADING) + return; + + spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + ulp_command = get_job_cmnd(phba, piocb); + ulp_context = get_job_ulpcontext(phba, piocb); + did = get_job_els_rsp64_did(phba, piocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + iotag = get_wqe_reqtag(piocb); + } else { + cmd = &piocb->iocb; + iotag = cmd->ulpIoTag; + } + + if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || + ulp_command == CMD_ABORT_XRI_CX || + ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_CLOSE_XRI_CN) + continue; + + if (piocb->vport != vport) + continue; + + pcmd = piocb->cmd_dmabuf; + if (pcmd) + els_command = *(uint32_t *) (pcmd->virt); + + if (els_command == ELS_CMD_FARP || + els_command == ELS_CMD_FARPR || + els_command == ELS_CMD_FDISC) + continue; + + if (piocb->drvrTimeout > 0) { + if (piocb->drvrTimeout >= timeout) + piocb->drvrTimeout -= timeout; + else + piocb->drvrTimeout = 0; + continue; + } + + remote_ID = 0xffffffff; + if (ulp_command != CMD_GEN_REQUEST64_CR) { + remote_ID = did; + } else { + struct lpfc_nodelist *ndlp; + ndlp = __lpfc_findnode_rpi(vport, ulp_context); + if (ndlp) + remote_ID = ndlp->nlp_DID; + } + list_add_tail(&piocb->dlist, &abort_list); + } + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0127 ELS timeout Data: x%x x%x x%x " + "x%x\n", els_command, + remote_ID, ulp_command, iotag); + + spin_lock_irq(&phba->hbalock); + list_del_init(&piocb->dlist); + lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); + spin_unlock_irq(&phba->hbalock); + } + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + if (!list_empty(&pring->txcmplq)) + if (!(phba->pport->load_flag & FC_UNLOADING)) + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); +} + +/** + * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine is used to clean up all the outstanding ELS commands on a + * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() + * routine. After that, it walks the ELS transmit queue to remove all the + * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For + * the IOCBs with a non-NULL completion callback function, the callback + * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and + * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion + * callback function, the IOCB will simply be released. Finally, it walks + * the ELS transmit completion queue to issue an abort IOCB to any transmit + * completion queue IOCB that is associated with the @vport and is not + * an IOCB from libdfc (i.e., the management plane IOCBs that are not + * part of the discovery state machine) out to HBA by invoking the + * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the + * abort IOCB to any transmit completion queueed IOCB, it does not guarantee + * the IOCBs are aborted when this function returns. + **/ +void +lpfc_els_flush_cmd(struct lpfc_vport *vport) +{ + LIST_HEAD(abort_list); + LIST_HEAD(cancel_list); + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *tmp_iocb, *piocb; + u32 ulp_command; + unsigned long iflags = 0; + bool mbx_tmo_err; + + lpfc_fabric_abort_vport(vport); + + /* + * For SLI3, only the hbalock is required. But SLI4 needs to coordinate + * with the ring insert operation. Because lpfc_sli_issue_abort_iotag + * ultimately grabs the ring_lock, the driver must splice the list into + * a working list and release the locks before calling the abort. + */ + spin_lock_irqsave(&phba->hbalock, iflags); + pring = lpfc_phba_elsring(phba); + + /* Bail out if we've no ELS wq, like in PCI error recovery case. */ + if (unlikely(!pring)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; + } + + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); + /* First we need to issue aborts to outstanding cmds on txcmpl */ + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) + continue; + + if (piocb->vport != vport) + continue; + + if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) + continue; + + /* On the ELS ring we can have ELS_REQUESTs or + * GEN_REQUESTs waiting for a response. + */ + ulp_command = get_job_cmnd(phba, piocb); + if (ulp_command == CMD_ELS_REQUEST64_CR) { + list_add_tail(&piocb->dlist, &abort_list); + + /* If the link is down when flushing ELS commands + * the firmware will not complete them till after + * the link comes back up. This may confuse + * discovery for the new link up, so we need to + * change the compl routine to just clean up the iocb + * and avoid any retry logic. + */ + if (phba->link_state == LPFC_LINK_DOWN) + piocb->cmd_cmpl = lpfc_cmpl_els_link_down; + } else if (ulp_command == CMD_GEN_REQUEST64_CR || + mbx_tmo_err) + list_add_tail(&piocb->dlist, &abort_list); + } + + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ + list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { + spin_lock_irqsave(&phba->hbalock, iflags); + list_del_init(&piocb->dlist); + if (mbx_tmo_err) + list_move_tail(&piocb->list, &cancel_list); + else + lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); + + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + if (!list_empty(&cancel_list)) + lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + else + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + if (!list_empty(&abort_list)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "3387 abort list for txq not empty\n"); + INIT_LIST_HEAD(&abort_list); + + spin_lock_irqsave(&phba->hbalock, iflags); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + /* No need to abort the txq list, + * just queue them up for lpfc_sli_cancel_iocbs + */ + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { + ulp_command = get_job_cmnd(phba, piocb); + + if (piocb->cmd_flag & LPFC_IO_LIBDFC) + continue; + + /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ + if (ulp_command == CMD_QUE_RING_BUF_CN || + ulp_command == CMD_QUE_RING_BUF64_CN || + ulp_command == CMD_CLOSE_XRI_CN || + ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_ABORT_XRI_CX) + continue; + + if (piocb->vport != vport) + continue; + + list_del_init(&piocb->list); + list_add_tail(&piocb->list, &abort_list); + } + + /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ + if (vport == phba->pport) { + list_for_each_entry_safe(piocb, tmp_iocb, + &phba->fabric_iocb_list, list) { + list_del_init(&piocb->list); + list_add_tail(&piocb->list, &abort_list); + } + } + + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &abort_list, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); + + return; +} + +/** + * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA + * @phba: pointer to lpfc hba data structure. + * + * This routine is used to clean up all the outstanding ELS commands on a + * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() + * routine. After that, it walks the ELS transmit queue to remove all the + * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For + * the IOCBs with the completion callback function associated, the callback + * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and + * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion + * callback function associated, the IOCB will simply be released. Finally, + * it walks the ELS transmit completion queue to issue an abort IOCB to any + * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the + * management plane IOCBs that are not part of the discovery state machine) + * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. + **/ +void +lpfc_els_flush_all_cmd(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + + spin_lock_irq(&phba->port_list_lock); + list_for_each_entry(vport, &phba->port_list, listentry) + lpfc_els_flush_cmd(vport); + spin_unlock_irq(&phba->port_list_lock); + + return; +} + +/** + * lpfc_send_els_failure_event - Posts an ELS command failure event + * @phba: Pointer to hba context object. + * @cmdiocbp: Pointer to command iocb which reported error. + * @rspiocbp: Pointer to response iocb which reported error. + * + * This function sends an event when there is an ELS command + * failure. + **/ +void +lpfc_send_els_failure_event(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbp, + struct lpfc_iocbq *rspiocbp) +{ + struct lpfc_vport *vport = cmdiocbp->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_lsrjt_event lsrjt_event; + struct lpfc_fabric_event_header fabric_event; + struct ls_rjt stat; + struct lpfc_nodelist *ndlp; + uint32_t *pcmd; + u32 ulp_status, ulp_word4; + + ndlp = cmdiocbp->ndlp; + if (!ndlp) + return; + + ulp_status = get_job_ulpstatus(phba, rspiocbp); + ulp_word4 = get_job_word4(phba, rspiocbp); + + if (ulp_status == IOSTAT_LS_RJT) { + lsrjt_event.header.event_type = FC_REG_ELS_EVENT; + lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; + memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, + sizeof(struct lpfc_name)); + memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, + sizeof(struct lpfc_name)); + pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; + lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; + stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); + lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; + lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; + fc_host_post_vendor_event(shost, + fc_get_event_number(), + sizeof(lsrjt_event), + (char *)&lsrjt_event, + LPFC_NL_VENDOR_ID); + return; + } + if (ulp_status == IOSTAT_NPORT_BSY || + ulp_status == IOSTAT_FABRIC_BSY) { + fabric_event.event_type = FC_REG_FABRIC_EVENT; + if (ulp_status == IOSTAT_NPORT_BSY) + fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; + else + fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; + memcpy(fabric_event.wwpn, &ndlp->nlp_portname, + sizeof(struct lpfc_name)); + memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, + sizeof(struct lpfc_name)); + fc_host_post_vendor_event(shost, + fc_get_event_number(), + sizeof(fabric_event), + (char *)&fabric_event, + LPFC_NL_VENDOR_ID); + return; + } + +} + +/** + * lpfc_send_els_event - Posts unsolicited els event + * @vport: Pointer to vport object. + * @ndlp: Pointer FC node object. + * @payload: ELS command code type. + * + * This function posts an event when there is an incoming + * unsolicited ELS command. + **/ +static void +lpfc_send_els_event(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + uint32_t *payload) +{ + struct lpfc_els_event_header *els_data = NULL; + struct lpfc_logo_event *logo_data = NULL; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (*payload == ELS_CMD_LOGO) { + logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); + if (!logo_data) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0148 Failed to allocate memory " + "for LOGO event\n"); + return; + } + els_data = &logo_data->header; + } else { + els_data = kmalloc(sizeof(struct lpfc_els_event_header), + GFP_KERNEL); + if (!els_data) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0149 Failed to allocate memory " + "for ELS event\n"); + return; + } + } + els_data->event_type = FC_REG_ELS_EVENT; + switch (*payload) { + case ELS_CMD_PLOGI: + els_data->subcategory = LPFC_EVENT_PLOGI_RCV; + break; + case ELS_CMD_PRLO: + els_data->subcategory = LPFC_EVENT_PRLO_RCV; + break; + case ELS_CMD_ADISC: + els_data->subcategory = LPFC_EVENT_ADISC_RCV; + break; + case ELS_CMD_LOGO: + els_data->subcategory = LPFC_EVENT_LOGO_RCV; + /* Copy the WWPN in the LOGO payload */ + memcpy(logo_data->logo_wwpn, &payload[2], + sizeof(struct lpfc_name)); + break; + default: + kfree(els_data); + return; + } + memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); + memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); + if (*payload == ELS_CMD_LOGO) { + fc_host_post_vendor_event(shost, + fc_get_event_number(), + sizeof(struct lpfc_logo_event), + (char *)logo_data, + LPFC_NL_VENDOR_ID); + kfree(logo_data); + } else { + fc_host_post_vendor_event(shost, + fc_get_event_number(), + sizeof(struct lpfc_els_event_header), + (char *)els_data, + LPFC_NL_VENDOR_ID); + kfree(els_data); + } + + return; +} + + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, + FC_FPIN_LI_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, + FC_FPIN_DELI_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, + FC_FPIN_CONGN_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, + fc_fpin_congn_severity_types, + FC_FPIN_CONGN_SEVERITY_INIT); + + +/** + * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port + * @phba: Pointer to phba object. + * @wwnlist: Pointer to list of WWPNs in FPIN payload + * @cnt: count of WWPNs in FPIN payload + * + * This routine is called by LI and PC descriptors. + * Limit the number of WWPNs displayed to 6 log messages, 6 per log message + */ +static void +lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) +{ + char buf[LPFC_FPIN_WWPN_LINE_SZ]; + __be64 wwn; + u64 wwpn; + int i, len; + int line = 0; + int wcnt = 0; + bool endit = false; + + len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); + for (i = 0; i < cnt; i++) { + /* Are we on the last WWPN */ + if (i == (cnt - 1)) + endit = true; + + /* Extract the next WWPN from the payload */ + wwn = *wwnlist++; + wwpn = be64_to_cpu(wwn); + len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, + " %016llx", wwpn); + + /* Log a message if we are on the last WWPN + * or if we hit the max allowed per message. + */ + wcnt++; + if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { + buf[len] = 0; + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4686 %s\n", buf); + + /* Check if we reached the last WWPN */ + if (endit) + return; + + /* Limit the number of log message displayed per FPIN */ + line++; + if (line == LPFC_FPIN_WWPN_NUM_LINE) { + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4687 %d WWPNs Truncated\n", + cnt - i - 1); + return; + } + + /* Start over with next log message */ + wcnt = 0; + len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, + "Additional WWPNs:"); + } + } +} + +/** + * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. + * @phba: Pointer to phba object. + * @tlv: Pointer to the Link Integrity Notification Descriptor. + * + * This function processes a Link Integrity FPIN event by logging a message. + **/ +static void +lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; + const char *li_evt_str; + u32 li_evt, cnt; + + li_evt = be16_to_cpu(li->event_type); + li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); + cnt = be32_to_cpu(li->pname_count); + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4680 FPIN Link Integrity %s (x%x) " + "Detecting PN x%016llx Attached PN x%016llx " + "Duration %d mSecs Count %d Port Cnt %d\n", + li_evt_str, li_evt, + be64_to_cpu(li->detecting_wwpn), + be64_to_cpu(li->attached_wwpn), + be32_to_cpu(li->event_threshold), + be32_to_cpu(li->event_count), cnt); + + lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); +} + +/** + * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. + * @phba: Pointer to hba object. + * @tlv: Pointer to the Delivery Notification Descriptor TLV + * + * This function processes a Delivery FPIN event by logging a message. + **/ +static void +lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; + const char *del_rsn_str; + u32 del_rsn; + __be32 *frame; + + del_rsn = be16_to_cpu(del->deli_reason_code); + del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); + + /* Skip over desc_tag/desc_len header to payload */ + frame = (__be32 *)(del + 1); + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4681 FPIN Delivery %s (x%x) " + "Detecting PN x%016llx Attached PN x%016llx " + "DiscHdr0 x%08x " + "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " + "DiscHdr4 x%08x DiscHdr5 x%08x\n", + del_rsn_str, del_rsn, + be64_to_cpu(del->detecting_wwpn), + be64_to_cpu(del->attached_wwpn), + be32_to_cpu(frame[0]), + be32_to_cpu(frame[1]), + be32_to_cpu(frame[2]), + be32_to_cpu(frame[3]), + be32_to_cpu(frame[4]), + be32_to_cpu(frame[5])); +} + +/** + * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. + * @phba: Pointer to hba object. + * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV + * + * This function processes a Peer Congestion FPIN event by logging a message. + **/ +static void +lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; + const char *pc_evt_str; + u32 pc_evt, cnt; + + pc_evt = be16_to_cpu(pc->event_type); + pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); + cnt = be32_to_cpu(pc->pname_count); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, + "4684 FPIN Peer Congestion %s (x%x) " + "Duration %d mSecs " + "Detecting PN x%016llx Attached PN x%016llx " + "Impacted Port Cnt %d\n", + pc_evt_str, pc_evt, + be32_to_cpu(pc->event_period), + be64_to_cpu(pc->detecting_wwpn), + be64_to_cpu(pc->attached_wwpn), + cnt); + + lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); +} + +/** + * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification + * @phba: Pointer to hba object. + * @tlv: Pointer to the Congestion Notification Descriptor TLV + * + * This function processes an FPIN Congestion Notifiction. The notification + * could be an Alarm or Warning. This routine feeds that data into driver's + * running congestion algorithm. It also processes the FPIN by + * logging a message. It returns 1 to indicate deliver this message + * to the upper layer or 0 to indicate don't deliver it. + **/ +static int +lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct lpfc_cgn_info *cp; + struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; + const char *cgn_evt_str; + u32 cgn_evt; + const char *cgn_sev_str; + u32 cgn_sev; + uint16_t value; + u32 crc; + bool nm_log = false; + int rc = 1; + + cgn_evt = be16_to_cpu(cgn->event_type); + cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); + cgn_sev = cgn->severity; + cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); + + /* The driver only takes action on a Credit Stall or Oversubscription + * event type to engage the IO algorithm. The driver prints an + * unmaskable message only for Lost Credit and Credit Stall. + * TODO: Still need to have definition of host action on clear, + * lost credit and device specific event types. + */ + switch (cgn_evt) { + case FPIN_CONGN_LOST_CREDIT: + nm_log = true; + break; + case FPIN_CONGN_CREDIT_STALL: + nm_log = true; + fallthrough; + case FPIN_CONGN_OVERSUBSCRIPTION: + if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) + nm_log = false; + switch (cgn_sev) { + case FPIN_CONGN_SEVERITY_ERROR: + /* Take action here for an Alarm event */ + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { + /* Track of alarm cnt for SYNC_WQE */ + atomic_inc(&phba->cgn_sync_alarm_cnt); + } + /* Track alarm cnt for cgn_info regardless + * of whether CMF is configured for Signals + * or FPINs. + */ + atomic_inc(&phba->cgn_fabric_alarm_cnt); + goto cleanup; + } + break; + case FPIN_CONGN_SEVERITY_WARNING: + /* Take action here for a Warning event */ + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { + /* Track of warning cnt for SYNC_WQE */ + atomic_inc(&phba->cgn_sync_warn_cnt); + } + /* Track warning cnt and freq for cgn_info + * regardless of whether CMF is configured for + * Signals or FPINs. + */ + atomic_inc(&phba->cgn_fabric_warn_cnt); +cleanup: + /* Save frequency in ms */ + phba->cgn_fpin_frequency = + be32_to_cpu(cgn->event_period); + value = phba->cgn_fpin_frequency; + if (phba->cgn_i) { + cp = (struct lpfc_cgn_info *) + phba->cgn_i->virt; + cp->cgn_alarm_freq = + cpu_to_le16(value); + cp->cgn_warn_freq = + cpu_to_le16(value); + crc = lpfc_cgn_calc_crc32 + (cp, + LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + } + + /* Don't deliver to upper layer since + * driver took action on this tlv. + */ + rc = 0; + } + break; + } + break; + } + + /* Change the log level to unmaskable for the following event types. */ + lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), + LOG_CGN_MGMT | LOG_ELS, + "4683 FPIN CONGESTION %s type %s (x%x) Event " + "Duration %d mSecs\n", + cgn_sev_str, cgn_evt_str, cgn_evt, + be32_to_cpu(cgn->event_period)); + return rc; +} + +void +lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) +{ + struct lpfc_hba *phba = vport->phba; + struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; + struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; + const char *dtag_nm; + int desc_cnt = 0, bytes_remain, cnt; + u32 dtag, deliver = 0; + int len; + + /* FPINs handled only if we are in the right discovery state */ + if (vport->port_state < LPFC_DISC_AUTH) + return; + + /* make sure there is the full fpin header */ + if (fpin_length < sizeof(struct fc_els_fpin)) + return; + + /* Sanity check descriptor length. The desc_len value does not + * include space for the ELS command and the desc_len fields. + */ + len = be32_to_cpu(fpin->desc_len); + if (fpin_length < len + sizeof(struct fc_els_fpin)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4671 Bad ELS FPIN length %d: %d\n", + len, fpin_length); + return; + } + + tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; + first_tlv = tlv; + bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); + bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); + + /* process each descriptor separately */ + while (bytes_remain >= FC_TLV_DESC_HDR_SZ && + bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { + dtag = be32_to_cpu(tlv->desc_tag); + switch (dtag) { + case ELS_DTAG_LNK_INTEGRITY: + lpfc_els_rcv_fpin_li(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_DELIVERY: + lpfc_els_rcv_fpin_del(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_PEER_CONGEST: + lpfc_els_rcv_fpin_peer_cgn(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_CONGESTION: + deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); + break; + default: + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4678 unknown FPIN descriptor[%d]: " + "tag x%x (%s)\n", + desc_cnt, dtag, dtag_nm); + + /* If descriptor is bad, drop the rest of the data */ + return; + } + lpfc_cgn_update_stat(phba, dtag); + cnt = be32_to_cpu(tlv->desc_len); + + /* Sanity check descriptor length. The desc_len value does not + * include space for the desc_tag and the desc_len fields. + */ + len -= (cnt + sizeof(struct fc_tlv_desc)); + if (len < 0) { + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4672 Bad FPIN descriptor TLV length " + "%d: %d %d %s\n", + cnt, len, fpin_length, dtag_nm); + return; + } + + current_tlv = tlv; + bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); + tlv = fc_tlv_next_desc(tlv); + + /* Format payload such that the FPIN delivered to the + * upper layer is a single descriptor FPIN. + */ + if (desc_cnt) + memcpy(first_tlv, current_tlv, + (cnt + sizeof(struct fc_els_fpin))); + + /* Adjust the length so that it only reflects a + * single descriptor FPIN. + */ + fpin_length = cnt + sizeof(struct fc_els_fpin); + fpin->desc_len = cpu_to_be32(fpin_length); + fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ + + /* Send every descriptor individually to the upper layer */ + if (deliver) + fc_host_fpin_rcv(lpfc_shost_from_vport(vport), + fpin_length, (char *)fpin, 0); + desc_cnt++; + } +} + +/** + * lpfc_els_unsol_buffer - Process an unsolicited event data buffer + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @vport: pointer to a host virtual N_Port data structure. + * @elsiocb: pointer to lpfc els command iocb data structure. + * + * This routine is used for processing the IOCB associated with a unsolicited + * event. It first determines whether there is an existing ndlp that matches + * the DID from the unsolicited IOCB. If not, it will create a new one with + * the DID from the unsolicited IOCB. The ELS command from the unsolicited + * IOCB is then used to invoke the proper routine and to set up proper state + * of the discovery state machine. + **/ +static void +lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) +{ + struct lpfc_nodelist *ndlp; + struct ls_rjt stat; + u32 *payload, payload_len; + u32 cmd = 0, did = 0, newnode, status = 0; + uint8_t rjt_exp, rjt_err = 0, init_link = 0; + struct lpfc_wcqe_complete *wcqe_cmpl = NULL; + LPFC_MBOXQ_t *mbox; + + if (!vport || !elsiocb->cmd_dmabuf) + goto dropit; + + newnode = 0; + wcqe_cmpl = &elsiocb->wcqe_cmpl; + payload = elsiocb->cmd_dmabuf->virt; + if (phba->sli_rev == LPFC_SLI_REV4) + payload_len = wcqe_cmpl->total_data_placed; + else + payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; + status = get_job_ulpstatus(phba, elsiocb); + cmd = *payload; + if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) + lpfc_sli3_post_buffer(phba, pring, 1); + + did = get_job_els_rsp64_did(phba, elsiocb); + if (status) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV Unsol ELS: status:x%x/x%x did:x%x", + status, get_job_word4(phba, elsiocb), did); + goto dropit; + } + + /* Check to see if link went down during discovery */ + if (lpfc_els_chk_latt(vport)) + goto dropit; + + /* Ignore traffic received during vport shutdown. */ + if (vport->load_flag & FC_UNLOADING) + goto dropit; + + /* If NPort discovery is delayed drop incoming ELS */ + if ((vport->fc_flag & FC_DISC_DELAYED) && + (cmd != ELS_CMD_PLOGI)) + goto dropit; + + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp) { + /* Cannot find existing Fabric ndlp, so allocate a new one */ + ndlp = lpfc_nlp_init(vport, did); + if (!ndlp) + goto dropit; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + newnode = 1; + if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) + ndlp->nlp_type |= NLP_FABRIC; + } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + newnode = 1; + } + + phba->fc_stat.elsRcvFrame++; + + /* + * Do not process any unsolicited ELS commands + * if the ndlp is in DEV_LOSS + */ + spin_lock_irq(&ndlp->lock); + if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { + spin_unlock_irq(&ndlp->lock); + if (newnode) + lpfc_nlp_put(ndlp); + goto dropit; + } + spin_unlock_irq(&ndlp->lock); + + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) + goto dropit; + elsiocb->vport = vport; + + if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { + cmd &= ELS_CMD_MASK; + } + /* ELS command received from NPORT */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0112 ELS command x%x received from NPORT x%x " + "refcnt %d Data: x%x x%x x%x x%x\n", + cmd, did, kref_read(&ndlp->kref), vport->port_state, + vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); + + /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ + if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && + (cmd != ELS_CMD_FLOGI) && + !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { + rjt_err = LSRJT_LOGICAL_BSY; + rjt_exp = LSEXP_NOTHING_MORE; + goto lsrjt; + } + + switch (cmd) { + case ELS_CMD_PLOGI: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PLOGI: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvPLOGI++; + ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->pport->fc_flag & FC_PT2PT)) { + vport->fc_prevDID = vport->fc_myDID; + /* Our DID needs to be updated before registering + * the vfi. This is done in lpfc_rcv_plogi but + * that is called after the reg_vfi. + */ + vport->fc_myDID = + bf_get(els_rsp64_sid, + &elsiocb->wqe.xmit_els_rsp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3312 Remote port assigned DID x%x " + "%x\n", vport->fc_myDID, + vport->fc_prevDID); + } + + lpfc_send_els_event(vport, ndlp, payload); + + /* If Nport discovery is delayed, reject PLOGIs */ + if (vport->fc_flag & FC_DISC_DELAYED) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; + break; + } + + if (vport->port_state < LPFC_DISC_AUTH) { + if (!(phba->pport->fc_flag & FC_PT2PT) || + (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; + break; + } + } + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; + spin_unlock_irq(&ndlp->lock); + + lpfc_disc_state_machine(vport, ndlp, elsiocb, + NLP_EVT_RCV_PLOGI); + + break; + case ELS_CMD_FLOGI: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FLOGI: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvFLOGI++; + + /* If the driver believes fabric discovery is done and is ready, + * bounce the link. There is some descrepancy. + */ + if (vport->port_state >= LPFC_LOCAL_CFG_LINK && + vport->fc_flag & FC_PT2PT && + vport->rcv_flogi_cnt >= 1) { + rjt_err = LSRJT_LOGICAL_BSY; + rjt_exp = LSEXP_NOTHING_MORE; + init_link++; + goto lsrjt; + } + + lpfc_els_rcv_flogi(vport, elsiocb, ndlp); + /* retain node if our response is deferred */ + if (phba->defer_flogi_acc_flag) + break; + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_LOGO: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV LOGO: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvLOGO++; + lpfc_send_els_event(vport, ndlp, payload); + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; + break; + } + lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_PRLO: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PRLO: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvPRLO++; + lpfc_send_els_event(vport, ndlp, payload); + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; + break; + } + lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); + break; + case ELS_CMD_LCB: + phba->fc_stat.elsRcvLCB++; + lpfc_els_rcv_lcb(vport, elsiocb, ndlp); + break; + case ELS_CMD_RDP: + phba->fc_stat.elsRcvRDP++; + lpfc_els_rcv_rdp(vport, elsiocb, ndlp); + break; + case ELS_CMD_RSCN: + phba->fc_stat.elsRcvRSCN++; + lpfc_els_rcv_rscn(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_ADISC: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV ADISC: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + lpfc_send_els_event(vport, ndlp, payload); + phba->fc_stat.elsRcvADISC++; + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; + break; + } + lpfc_disc_state_machine(vport, ndlp, elsiocb, + NLP_EVT_RCV_ADISC); + break; + case ELS_CMD_PDISC: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PDISC: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvPDISC++; + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; + break; + } + lpfc_disc_state_machine(vport, ndlp, elsiocb, + NLP_EVT_RCV_PDISC); + break; + case ELS_CMD_FARPR: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FARPR: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvFARPR++; + lpfc_els_rcv_farpr(vport, elsiocb, ndlp); + break; + case ELS_CMD_FARP: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FARP: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvFARP++; + lpfc_els_rcv_farp(vport, elsiocb, ndlp); + break; + case ELS_CMD_FAN: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FAN: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvFAN++; + lpfc_els_rcv_fan(vport, elsiocb, ndlp); + break; + case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PRLI: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvPRLI++; + if ((vport->port_state < LPFC_DISC_AUTH) && + (vport->fc_flag & FC_FABRIC)) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; + break; + } + lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); + break; + case ELS_CMD_LIRR: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV LIRR: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvLIRR++; + lpfc_els_rcv_lirr(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_RLS: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RLS: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvRLS++; + lpfc_els_rcv_rls(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_RPL: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RPL: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvRPL++; + lpfc_els_rcv_rpl(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_RNID: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RNID: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvRNID++; + lpfc_els_rcv_rnid(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_RTV: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RTV: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvRTV++; + lpfc_els_rcv_rtv(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_RRQ: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RRQ: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvRRQ++; + lpfc_els_rcv_rrq(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_ECHO: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV ECHO: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvECHO++; + lpfc_els_rcv_echo(vport, elsiocb, ndlp); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + case ELS_CMD_REC: + /* receive this due to exchange closed */ + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_INVALID_OX_RX; + break; + case ELS_CMD_FPIN: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FPIN: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, + payload_len); + + /* There are no replies, so no rjt codes */ + break; + case ELS_CMD_EDC: + lpfc_els_rcv_edc(vport, elsiocb, ndlp); + break; + case ELS_CMD_RDF: + phba->fc_stat.elsRcvRDF++; + /* Accept RDF only from fabric controller */ + if (did != Fabric_Cntl_DID) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "1115 Received RDF from invalid DID " + "x%x\n", did); + rjt_err = LSRJT_PROTOCOL_ERR; + rjt_exp = LSEXP_NOTHING_MORE; + goto lsrjt; + } + + lpfc_els_rcv_rdf(vport, elsiocb, ndlp); + break; + default: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", + cmd, did, vport->port_state); + + /* Unsupported ELS command, reject */ + rjt_err = LSRJT_CMD_UNSUPPORTED; + rjt_exp = LSEXP_NOTHING_MORE; + + /* Unknown ELS command received from NPORT */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0115 Unknown ELS command x%x " + "received from NPORT x%x\n", cmd, did); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + break; + } + +lsrjt: + /* check if need to LS_RJT received ELS cmd */ + if (rjt_err) { + memset(&stat, 0, sizeof(stat)); + stat.un.b.lsRjtRsnCode = rjt_err; + stat.un.b.lsRjtRsnCodeExp = rjt_exp; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, + NULL); + /* Remove the reference from above for new nodes. */ + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + } + + /* Release the reference on this elsiocb, not the ndlp. */ + lpfc_nlp_put(elsiocb->ndlp); + elsiocb->ndlp = NULL; + + /* Special case. Driver received an unsolicited command that + * unsupportable given the driver's current state. Reset the + * link and start over. + */ + if (init_link) { + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return; + lpfc_linkdown(phba); + lpfc_init_link(phba, mbox, + phba->cfg_topology, + phba->cfg_link_speed); + mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == + MBX_NOT_FINISHED) + mempool_free(mbox, phba->mbox_mem_pool); + } + + return; + +dropit: + if (vport && !(vport->load_flag & FC_UNLOADING)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0111 Dropping received ELS cmd " + "Data: x%x x%x x%x x%x\n", + cmd, status, get_job_word4(phba, elsiocb), did); + + phba->fc_stat.elsRcvDrop++; +} + +/** + * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @elsiocb: pointer to lpfc els iocb data structure. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking the routine + * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the + * SLI ring on which the unsolicited event was received. + **/ +void +lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *elsiocb) +{ + struct lpfc_vport *vport = elsiocb->vport; + u32 ulp_command, status, parameter, bde_count = 0; + IOCB_t *icmd; + struct lpfc_wcqe_complete *wcqe_cmpl = NULL; + struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; + struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; + dma_addr_t paddr; + + elsiocb->cmd_dmabuf = NULL; + elsiocb->rsp_dmabuf = NULL; + elsiocb->bpl_dmabuf = NULL; + + wcqe_cmpl = &elsiocb->wcqe_cmpl; + ulp_command = get_job_cmnd(phba, elsiocb); + status = get_job_ulpstatus(phba, elsiocb); + parameter = get_job_word4(phba, elsiocb); + if (phba->sli_rev == LPFC_SLI_REV4) + bde_count = wcqe_cmpl->word3; + else + bde_count = elsiocb->iocb.ulpBdeCount; + + if (status == IOSTAT_NEED_BUFFER) { + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } else if (status == IOSTAT_LOCAL_REJECT && + (parameter & IOERR_PARAM_MASK) == + IOERR_RCV_BUFFER_WAITING) { + phba->fc_stat.NoRcvBuf++; + /* Not enough posted buffers; Try posting more buffers */ + if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) + lpfc_sli3_post_buffer(phba, pring, 0); + return; + } + + if (phba->sli_rev == LPFC_SLI_REV3) { + icmd = &elsiocb->iocb; + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (ulp_command == CMD_IOCB_RCV_ELS64_CX || + ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { + if (icmd->unsli3.rcvsli3.vpi == 0xffff) + vport = phba->pport; + else + vport = lpfc_find_vport_by_vpid(phba, + icmd->unsli3.rcvsli3.vpi); + } + } + + /* If there are no BDEs associated + * with this IOCB, there is nothing to do. + */ + if (bde_count == 0) + return; + + /* Account for SLI2 or SLI3 and later unsolicited buffering */ + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + elsiocb->cmd_dmabuf = bdeBuf1; + if (bde_count == 2) + elsiocb->bpl_dmabuf = bdeBuf2; + } else { + icmd = &elsiocb->iocb; + paddr = getPaddr(icmd->un.cont64[0].addrHigh, + icmd->un.cont64[0].addrLow); + elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, + paddr); + if (bde_count == 2) { + paddr = getPaddr(icmd->un.cont64[1].addrHigh, + icmd->un.cont64[1].addrLow); + elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, + pring, + paddr); + } + } + + lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); + /* + * The different unsolicited event handlers would tell us + * if they are done with "mp" by setting cmd_dmabuf to NULL. + */ + if (elsiocb->cmd_dmabuf) { + lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); + elsiocb->cmd_dmabuf = NULL; + } + + if (elsiocb->bpl_dmabuf) { + lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); + elsiocb->bpl_dmabuf = NULL; + } + +} + +static void +lpfc_start_fdmi(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + + /* If this is the first time, allocate an ndlp and initialize + * it. Otherwise, make sure the node is enabled and then do the + * login. + */ + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, FDMI_DID); + if (ndlp) { + ndlp->nlp_type |= NLP_FABRIC; + } else { + return; + } + } + + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); +} + +/** + * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr + * @phba: pointer to lpfc hba data structure. + * @vport: pointer to a virtual N_Port data structure. + * + * This routine issues a Port Login (PLOGI) to the Name Server with + * State Change Request (SCR) for a @vport. This routine will create an + * ndlp for the Name Server associated to the @vport if such node does + * not already exist. The PLOGI to Name Server is issued by invoking the + * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface + * (FDMI) is configured to the @vport, a FDMI node will be created and + * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. + **/ +void +lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* + * If lpfc_delay_discovery parameter is set and the clean address + * bit is cleared and fc fabric parameters chenged, delay FC NPort + * discovery. + */ + spin_lock_irq(shost->host_lock); + if (vport->fc_flag & FC_DISC_DELAYED) { + spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "3334 Delay fc port discovery for %d secs\n", + phba->fc_ratov); + mod_timer(&vport->delayed_disc_tmo, + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); + return; + } + spin_unlock_irq(shost->host_lock); + + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, NameServer_DID); + if (!ndlp) { + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + lpfc_disc_start(vport); + return; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0251 NameServer login: no memory\n"); + return; + } + } + + ndlp->nlp_type |= NLP_FABRIC; + + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + + if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0252 Cannot issue NameServer login\n"); + return; + } + + if ((phba->cfg_enable_SmartSAN || + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && + (vport->load_flag & FC_ALLOW_FDMI)) + lpfc_start_fdmi(vport); +} + +/** + * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * This routine is the completion callback function to register new vport + * mailbox command. If the new vport mailbox command completes successfully, + * the fabric registration login shall be performed on physical port (the + * new vport created is actually a physical port, with VPI 0) or the port + * login to Name Server for State Change Request (SCR) will be performed + * on virtual port (real virtual port, with VPI greater than 0). + **/ +static void +lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; + MAILBOX_t *mb = &pmb->u.mb; + int rc; + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + + if (mb->mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0915 Register VPI failed : Status: x%x" + " upd bit: x%x \n", mb->mbxStatus, + mb->un.varRegVpi.upd); + if (phba->sli_rev == LPFC_SLI_REV4 && + mb->un.varRegVpi.upd) + goto mbox_err_exit ; + + switch (mb->mbxStatus) { + case 0x11: /* unsupported feature */ + case 0x9603: /* max_vpi exceeded */ + case 0x9602: /* Link event since CLEAR_LA */ + /* giving up on vport registration */ + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + break; + /* If reg_vpi fail with invalid VPI status, re-init VPI */ + case 0x20: + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + lpfc_init_vpi(phba, pmb, vport->vpi); + pmb->vport = vport; + pmb->mbox_cmpl = lpfc_init_vpi_cmpl; + rc = lpfc_sli_issue_mbox(phba, pmb, + MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "2732 Failed to issue INIT_VPI" + " mailbox command\n"); + } else { + lpfc_nlp_put(ndlp); + return; + } + fallthrough; + default: + /* Try to recover from this error */ + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vport); + lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + if (mb->mbxStatus == MBX_NOT_FINISHED) + break; + if ((vport->port_type == LPFC_PHYSICAL_PORT) && + !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_issue_init_vfi(vport); + else + lpfc_initial_flogi(vport); + } else { + lpfc_initial_fdisc(vport); + } + break; + } + } else { + spin_lock_irq(shost->host_lock); + vport->vpi_state |= LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + if (vport == phba->pport) { + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_issue_fabric_reglogin(vport); + else { + /* + * If the physical port is instantiated using + * FDISC, do not start vport discovery. + */ + if (vport->port_state != LPFC_FDISC) + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } + } else { + lpfc_do_scr_ns_plogi(phba, vport); + } + } +mbox_err_exit: + /* Now, we decrement the ndlp reference count held for this + * callback function + */ + lpfc_nlp_put(ndlp); + + mempool_free(pmb, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_register_new_vport - Register a new vport with a HBA + * @phba: pointer to lpfc hba data structure. + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine registers the @vport as a new virtual port with a HBA. + * It is done through a registering vpi mailbox command. + **/ +void +lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + LPFC_MBOXQ_t *mbox; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_reg_vpi(vport, mbox); + mbox->vport = vport; + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!mbox->ctx_ndlp) { + mempool_free(mbox, phba->mbox_mem_pool); + goto mbox_err_exit; + } + + mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) + == MBX_NOT_FINISHED) { + /* mailbox command not success, decrement ndlp + * reference count for this command + */ + lpfc_nlp_put(ndlp); + mempool_free(mbox, phba->mbox_mem_pool); + + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0253 Register VPI: Can't send mbox\n"); + goto mbox_err_exit; + } + } else { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0254 Register VPI: no memory\n"); + goto mbox_err_exit; + } + return; + +mbox_err_exit: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + return; +} + +/** + * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer + * @phba: pointer to lpfc hba data structure. + * + * This routine cancels the retry delay timers to all the vports. + **/ +void +lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct lpfc_nodelist *ndlp; + uint32_t link_state; + int i; + + /* Treat this failure as linkdown for all vports */ + link_state = phba->link_state; + lpfc_linkdown(phba); + phba->link_state = link_state; + + vports = lpfc_create_vport_work_array(phba); + + if (vports) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + ndlp = lpfc_findnode_did(vports[i], Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(vports[i], ndlp); + lpfc_els_flush_cmd(vports[i]); + } + lpfc_destroy_vport_work_array(phba, vports); + } +} + +/** + * lpfc_retry_pport_discovery - Start timer to retry FLOGI. + * @phba: pointer to lpfc hba data structure. + * + * This routine abort all pending discovery commands and + * start a timer to retry FLOGI for the physical port + * discovery. + **/ +void +lpfc_retry_pport_discovery(struct lpfc_hba *phba) +{ + struct lpfc_nodelist *ndlp; + + /* Cancel the all vports retry delay retry timers */ + lpfc_cancel_all_vport_retry_delay_timer(phba); + + /* If fabric require FLOGI, then re-instantiate physical login */ + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (!ndlp) + return; + + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; + phba->pport->port_state = LPFC_FLOGI; + return; +} + +/** + * lpfc_fabric_login_reqd - Check if FLOGI required. + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to FDISC command iocb. + * @rspiocb: pointer to FDISC response iocb. + * + * This routine checks if a FLOGI is reguired for FDISC + * to succeed. + **/ +static int +lpfc_fabric_login_reqd(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + if (ulp_status != IOSTAT_FABRIC_RJT || + ulp_word4 != RJT_LOGIN_REQUIRED) + return 0; + else + return 1; +} + +/** + * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function to a Fabric Discover + * (FDISC) ELS command. Since all the FDISC ELS commands are issued + * single threaded, each FDISC completion callback function will reset + * the discovery timer for all vports such that the timers will not get + * unnecessary timeout. The function checks the FDISC IOCB status. If error + * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the + * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID + * assigned to the vport has been changed with the completion of the FDISC + * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) + * are unregistered from the HBA, and then the lpfc_register_new_vport() + * routine is invoked to register new vport with the HBA. Otherwise, the + * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name + * Server for State Change Request (SCR). + **/ +static void +lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + struct lpfc_nodelist *np; + struct lpfc_nodelist *next_np; + struct lpfc_iocbq *piocb; + struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; + struct serv_parm *sp; + uint8_t fabric_param_changed; + u32 ulp_status, ulp_word4; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0123 FDISC completes. x%x/x%x prevDID: x%x\n", + ulp_status, ulp_word4, + vport->fc_prevDID); + /* Since all FDISCs are being single threaded, we + * must reset the discovery timer for ALL vports + * waiting to send FDISC when one completes. + */ + list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { + lpfc_set_disctmo(piocb->vport); + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "FDISC cmpl: status:x%x/x%x prevdid:x%x", + ulp_status, ulp_word4, vport->fc_prevDID); + + if (ulp_status) { + + if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { + lpfc_retry_pport_discovery(phba); + goto out; + } + + /* Check for retry */ + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) + goto out; + /* FDISC failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0126 FDISC failed. (x%x/x%x)\n", + ulp_status, ulp_word4); + goto fdisc_failed; + } + + lpfc_check_nlp_post_devloss(vport, ndlp); + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VPORT_CVL_RCVD; + vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; + vport->fc_flag |= FC_FABRIC; + if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) + vport->fc_flag |= FC_PUBLIC_LOOP; + spin_unlock_irq(shost->host_lock); + + vport->fc_myDID = ulp_word4 & Mask_DID; + lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + sp = prsp->virt + sizeof(uint32_t); + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); + if (fabric_param_changed && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { + /* If our NportID changed, we need to ensure all + * remaining NPORTs get unreg_login'ed so we can + * issue unreg_vpi. + */ + list_for_each_entry_safe(np, next_np, + &vport->fc_nodes, nlp_listp) { + if ((np->nlp_state != NLP_STE_NPR_NODE) || + !(np->nlp_flag & NLP_NPR_ADISC)) + continue; + spin_lock_irq(&ndlp->lock); + np->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + lpfc_unreg_rpi(vport, np); + } + lpfc_cleanup_pending_mbox(vport); + + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vport); + + lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + if (phba->sli_rev == LPFC_SLI_REV4) + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + else + vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; + spin_unlock_irq(shost->host_lock); + } else if ((phba->sli_rev == LPFC_SLI_REV4) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { + /* + * Driver needs to re-reg VPI in order for f/w + * to update the MAC address. + */ + lpfc_register_new_vport(phba, vport, ndlp); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + goto out; + } + + if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) + lpfc_issue_init_vpi(vport); + else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) + lpfc_register_new_vport(phba, vport, ndlp); + else + lpfc_do_scr_ns_plogi(phba, vport); + + /* The FDISC completed successfully. Move the fabric ndlp to + * UNMAPPED state and register with the transport. + */ + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + goto out; + +fdisc_failed: + if (vport->fc_vport && + (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + /* Cancel discovery timer */ + lpfc_can_disctmo(vport); +out: + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +/** + * lpfc_issue_els_fdisc - Issue a fdisc iocb command + * @vport: pointer to a virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @retry: number of retries to the command IOCB. + * + * This routine prepares and issues a Fabric Discover (FDISC) IOCB to + * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() + * routine to issue the IOCB, which makes sure only one outstanding fabric + * IOCB will be sent off HBA at any given time. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the FDISC ELS command. + * + * Return code + * 0 - Successfully issued fdisc iocb command + * 1 - Failed to issue fdisc iocb command + **/ +static int +lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + IOCB_t *icmd; + union lpfc_wqe128 *wqe = NULL; + struct lpfc_iocbq *elsiocb; + struct serv_parm *sp; + uint8_t *pcmd; + uint16_t cmdsize; + int did = ndlp->nlp_DID; + int rc; + + vport->port_state = LPFC_FDISC; + vport->fc_myDID = 0; + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, + ELS_CMD_FDISC); + if (!elsiocb) { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0255 Issue FDISC: no IOCB\n"); + return 1; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + wqe = &elsiocb->wqe; + bf_set(els_req64_sid, &wqe->els_req, 0); + bf_set(els_req64_sp, &wqe->els_req, 1); + } else { + icmd = &elsiocb->iocb; + icmd->un.elsreq64.myID = 0; + icmd->un.elsreq64.fl = 1; + icmd->ulpCt_h = 1; + icmd->ulpCt_l = 0; + } + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; + pcmd += sizeof(uint32_t); /* CSP Word 1 */ + memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); + sp = (struct serv_parm *) pcmd; + /* Setup CSPs accordingly for Fabric */ + sp->cmn.e_d_tov = 0; + sp->cmn.w2.r_a_tov = 0; + sp->cmn.virtual_fabric_support = 0; + sp->cls1.classValid = 0; + sp->cls2.seqDelivery = 1; + sp->cls3.seqDelivery = 1; + + pcmd += sizeof(uint32_t); /* CSP Word 2 */ + pcmd += sizeof(uint32_t); /* CSP Word 3 */ + pcmd += sizeof(uint32_t); /* CSP Word 4 */ + pcmd += sizeof(uint32_t); /* Port Name */ + memcpy(pcmd, &vport->fc_portname, 8); + pcmd += sizeof(uint32_t); /* Node Name */ + pcmd += sizeof(uint32_t); /* Node Name */ + memcpy(pcmd, &vport->fc_nodename, 8); + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); + lpfc_set_disctmo(vport); + + phba->fc_stat.elsXmitFDISC++; + elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue FDISC: did:x%x", + did, 0, 0); + + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) + goto err_out; + + rc = lpfc_issue_fabric_iocb(phba, elsiocb); + if (rc == IOCB_ERROR) { + lpfc_nlp_put(ndlp); + goto err_out; + } + + lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); + return 0; + + err_out: + lpfc_els_free_iocb(phba, elsiocb); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0256 Issue FDISC: Cannot send IOCB\n"); + return 1; +} + +/** + * lpfc_cmpl_els_npiv_logo - Completion function with vport logo + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function to the issuing of a LOGO + * ELS command off a vport. It frees the command IOCB and then decrement the + * reference count held on ndlp for this completion function, indicating that + * the reference to the ndlp is no long needed. Note that the + * lpfc_els_free_iocb() routine decrements the ndlp reference held for this + * callback function and an additional explicit ndlp reference decrementation + * will trigger the actual release of the ndlp. + **/ +static void +lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + u32 ulp_status, ulp_word4, did, tmo; + + ndlp = cmdiocb->ndlp; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + did = get_job_els_rsp64_did(phba, cmdiocb); + tmo = get_wqe_tmo(cmdiocb); + } else { + irsp = &rspiocb->iocb; + did = get_job_els_rsp64_did(phba, rspiocb); + tmo = irsp->ulpTimeout; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "LOGO npiv cmpl: status:x%x/x%x did:x%x", + ulp_status, ulp_word4, did); + + /* NPIV LOGO completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2928 NPIV LOGO completes to NPort x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x\n", + ndlp->nlp_DID, ulp_status, ulp_word4, + tmo, vport->num_disc_nodes, + kref_read(&ndlp->kref), ndlp->nlp_flag, + ndlp->fc4_xpt_flags); + + if (ulp_status == IOSTAT_SUCCESS) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + vport->fc_flag &= ~FC_FABRIC; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + } + + if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { + /* Wake up lpfc_vport_delete if waiting...*/ + if (ndlp->logo_waitq) + wake_up(ndlp->logo_waitq); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); + ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; + spin_unlock_irq(&ndlp->lock); + } + + /* Safe to release resources now. */ + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +/** + * lpfc_issue_els_npiv_logo - Issue a logo off a vport + * @vport: pointer to a virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine issues a LOGO ELS command to an @ndlp off a @vport. + * + * Note that the ndlp reference count will be incremented by 1 for holding the + * ndlp and the reference to ndlp will be stored into the ndlp field of + * the IOCB for the completion callback function to the LOGO ELS command. + * + * Return codes + * 0 - Successfully issued logo off the @vport + * 1 - Failed to issue logo off the @vport + **/ +int +lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + + cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, + ELS_CMD_LOGO); + if (!elsiocb) + return 1; + + pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; + *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; + pcmd += sizeof(uint32_t); + + /* Fill in LOGO payload */ + *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue LOGO npiv did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + + elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_LOGO_SND; + spin_unlock_irq(&ndlp->lock); + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(phba, elsiocb); + goto err; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + goto err; + } + return 0; + +err: + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_LOGO_SND; + spin_unlock_irq(&ndlp->lock); + return 1; +} + +/** + * lpfc_fabric_block_timeout - Handler function to the fabric block timer + * @t: timer context used to obtain the lpfc hba. + * + * This routine is invoked by the fabric iocb block timer after + * timeout. It posts the fabric iocb block timeout event by setting the + * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes + * lpfc_worker_wake_up() routine to wake up the worker thread. It is for + * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the + * posted event WORKER_FABRIC_BLOCK_TMO. + **/ +void +lpfc_fabric_block_timeout(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); + unsigned long iflags; + uint32_t tmo_posted; + + spin_lock_irqsave(&phba->pport->work_port_lock, iflags); + tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; + if (!tmo_posted) + phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list + * @phba: pointer to lpfc hba data structure. + * + * This routine issues one fabric iocb from the driver internal list to + * the HBA. It first checks whether it's ready to issue one fabric iocb to + * the HBA (whether there is no outstanding fabric iocb). If so, it shall + * remove one pending fabric iocb from the driver internal list and invokes + * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. + **/ +static void +lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *iocb; + unsigned long iflags; + int ret; + +repeat: + iocb = NULL; + spin_lock_irqsave(&phba->hbalock, iflags); + /* Post any pending iocb to the SLI layer */ + if (atomic_read(&phba->fabric_iocb_count) == 0) { + list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), + list); + if (iocb) + /* Increment fabric iocb count to hold the position */ + atomic_inc(&phba->fabric_iocb_count); + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (iocb) { + iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; + iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; + iocb->cmd_flag |= LPFC_IO_FABRIC; + + lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, + "Fabric sched1: ste:x%x", + iocb->vport->port_state, 0, 0); + + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); + + if (ret == IOCB_ERROR) { + iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; + iocb->fabric_cmd_cmpl = NULL; + iocb->cmd_flag &= ~LPFC_IO_FABRIC; + set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); + iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; + iocb->cmd_cmpl(phba, iocb, iocb); + + atomic_dec(&phba->fabric_iocb_count); + goto repeat; + } + } +} + +/** + * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command + * @phba: pointer to lpfc hba data structure. + * + * This routine unblocks the issuing fabric iocb command. The function + * will clear the fabric iocb block bit and then invoke the routine + * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb + * from the driver internal fabric iocb list. + **/ +void +lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) +{ + clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + + lpfc_resume_fabric_iocbs(phba); + return; +} + +/** + * lpfc_block_fabric_iocbs - Block issuing fabric iocb command + * @phba: pointer to lpfc hba data structure. + * + * This routine blocks the issuing fabric iocb for a specified amount of + * time (currently 100 ms). This is done by set the fabric iocb block bit + * and set up a timeout timer for 100ms. When the block bit is set, no more + * fabric iocb will be issued out of the HBA. + **/ +static void +lpfc_block_fabric_iocbs(struct lpfc_hba *phba) +{ + int blocked; + + blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + /* Start a timer to unblock fabric iocbs after 100ms */ + if (!blocked) + mod_timer(&phba->fabric_block_timer, + jiffies + msecs_to_jiffies(100)); + + return; +} + +/** + * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the callback function that is put to the fabric iocb's + * callback function pointer (iocb->cmd_cmpl). The original iocb's callback + * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback + * function first restores and invokes the original iocb's callback function + * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next + * fabric bound iocb from the driver internal fabric iocb list onto the wire. + **/ +static void +lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct ls_rjt stat; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + + WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); + + switch (ulp_status) { + case IOSTAT_NPORT_RJT: + case IOSTAT_FABRIC_RJT: + if (ulp_word4 & RJT_UNAVAIL_TEMP) + lpfc_block_fabric_iocbs(phba); + break; + + case IOSTAT_NPORT_BSY: + case IOSTAT_FABRIC_BSY: + lpfc_block_fabric_iocbs(phba); + break; + + case IOSTAT_LS_RJT: + stat.un.ls_rjt_error_be = + cpu_to_be32(ulp_word4); + if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || + (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) + lpfc_block_fabric_iocbs(phba); + break; + } + + BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); + + cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; + cmdiocb->fabric_cmd_cmpl = NULL; + cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; + cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); + + atomic_dec(&phba->fabric_iocb_count); + if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { + /* Post any pending iocbs to HBA */ + lpfc_resume_fabric_iocbs(phba); + } +} + +/** + * lpfc_issue_fabric_iocb - Issue a fabric iocb command + * @phba: pointer to lpfc hba data structure. + * @iocb: pointer to lpfc command iocb data structure. + * + * This routine is used as the top-level API for issuing a fabric iocb command + * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver + * function makes sure that only one fabric bound iocb will be outstanding at + * any given time. As such, this function will first check to see whether there + * is already an outstanding fabric iocb on the wire. If so, it will put the + * newly issued iocb onto the driver internal fabric iocb list, waiting to be + * issued later. Otherwise, it will issue the iocb on the wire and update the + * fabric iocb count it indicate that there is one fabric iocb on the wire. + * + * Note, this implementation has a potential sending out fabric IOCBs out of + * order. The problem is caused by the construction of the "ready" boolen does + * not include the condition that the internal fabric IOCB list is empty. As + * such, it is possible a fabric IOCB issued by this routine might be "jump" + * ahead of the fabric IOCBs in the internal list. + * + * Return code + * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully + * IOCB_ERROR - failed to issue fabric iocb + **/ +static int +lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) +{ + unsigned long iflags; + int ready; + int ret; + + BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); + + spin_lock_irqsave(&phba->hbalock, iflags); + ready = atomic_read(&phba->fabric_iocb_count) == 0 && + !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + + if (ready) + /* Increment fabric iocb count to hold the position */ + atomic_inc(&phba->fabric_iocb_count); + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (ready) { + iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; + iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; + iocb->cmd_flag |= LPFC_IO_FABRIC; + + lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, + "Fabric sched2: ste:x%x", + iocb->vport->port_state, 0, 0); + + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); + + if (ret == IOCB_ERROR) { + iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; + iocb->fabric_cmd_cmpl = NULL; + iocb->cmd_flag &= ~LPFC_IO_FABRIC; + atomic_dec(&phba->fabric_iocb_count); + } + } else { + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&iocb->list, &phba->fabric_iocb_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + ret = IOCB_SUCCESS; + } + return ret; +} + +/** + * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list + * @vport: pointer to a virtual N_Port data structure. + * + * This routine aborts all the IOCBs associated with a @vport from the + * driver internal fabric IOCB list. The list contains fabric IOCBs to be + * issued to the ELS IOCB ring. This abort function walks the fabric IOCB + * list, removes each IOCB associated with the @vport off the list, set the + * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function + * associated with the IOCB. + **/ +static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) +{ + LIST_HEAD(completions); + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *tmp_iocb, *piocb; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, + list) { + + if (piocb->vport != vport) + continue; + + list_move_tail(&piocb->list, &completions); + } + spin_unlock_irq(&phba->hbalock); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); +} + +/** + * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list + * @ndlp: pointer to a node-list data structure. + * + * This routine aborts all the IOCBs associated with an @ndlp from the + * driver internal fabric IOCB list. The list contains fabric IOCBs to be + * issued to the ELS IOCB ring. This abort function walks the fabric IOCB + * list, removes each IOCB associated with the @ndlp off the list, set the + * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function + * associated with the IOCB. + **/ +void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) +{ + LIST_HEAD(completions); + struct lpfc_hba *phba = ndlp->phba; + struct lpfc_iocbq *tmp_iocb, *piocb; + struct lpfc_sli_ring *pring; + + pring = lpfc_phba_elsring(phba); + + if (unlikely(!pring)) + return; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, + list) { + if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { + + list_move_tail(&piocb->list, &completions); + } + } + spin_unlock_irq(&phba->hbalock); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); +} + +/** + * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list + * @phba: pointer to lpfc hba data structure. + * + * This routine aborts all the IOCBs currently on the driver internal + * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS + * IOCB ring. This function takes the entire IOCB list off the fabric IOCB + * list, removes IOCBs off the list, set the status field to + * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with + * the IOCB. + **/ +void lpfc_fabric_abort_hba(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->fabric_iocb_list, &completions); + spin_unlock_irq(&phba->hbalock); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); +} + +/** + * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + struct lpfc_nodelist *ndlp = NULL; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); + list_for_each_entry_safe(sglq_entry, sglq_next, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { + if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { + lpfc_nlp_put(sglq_entry->ndlp); + ndlp = sglq_entry->ndlp; + sglq_entry->ndlp = NULL; + + /* If the xri on the abts_els_sgl list is for the Fport + * node and the vport is unloading, the xri aborted wcqe + * likely isn't coming back. Just release the sgl. + */ + if ((vport->load_flag & FC_UNLOADING) && + ndlp->nlp_DID == Fabric_DID) { + list_del(&sglq_entry->list); + sglq_entry->state = SGL_FREED; + list_add_tail(&sglq_entry->list, + &phba->sli4_hba.lpfc_els_sgl_list); + } + } + } + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); + return; +} + +/** + * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the els xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 slow-path + * ELS aborted xri. + **/ +void +lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); + uint16_t lxri = 0; + + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + unsigned long iflag = 0; + struct lpfc_nodelist *ndlp; + struct lpfc_sli_ring *pring; + + pring = lpfc_phba_elsring(phba); + + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); + list_for_each_entry_safe(sglq_entry, sglq_next, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { + if (sglq_entry->sli4_xritag == xri) { + list_del(&sglq_entry->list); + ndlp = sglq_entry->ndlp; + sglq_entry->ndlp = NULL; + list_add_tail(&sglq_entry->list, + &phba->sli4_hba.lpfc_els_sgl_list); + sglq_entry->state = SGL_FREED; + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, + iflag); + + if (ndlp) { + lpfc_set_rrq_active(phba, ndlp, + sglq_entry->sli4_lxritag, + rxid, 1); + lpfc_nlp_put(ndlp); + } + + /* Check if TXQ queue needs to be serviced */ + if (pring && !list_empty(&pring->txq)) + lpfc_worker_wake_up(phba); + return; + } + } + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); + lxri = lpfc_sli4_xri_inrange(phba, xri); + if (lxri == NO_XRI) + return; + + spin_lock_irqsave(&phba->hbalock, iflag); + sglq_entry = __lpfc_get_active_sglq(phba, lxri); + if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + } + sglq_entry->state = SGL_XRI_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; +} + +/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. + * @vport: pointer to virtual port object. + * @ndlp: nodelist pointer for the impacted node. + * + * The driver calls this routine in response to an SLI4 XRI ABORT CQE + * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, + * the driver is required to send a LOGO to the remote node before it + * attempts to recover its login to the remote node. + */ +void +lpfc_sli_abts_recover_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost; + struct lpfc_hba *phba; + unsigned long flags = 0; + + shost = lpfc_shost_from_vport(vport); + phba = vport->phba; + if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { + lpfc_printf_log(phba, KERN_INFO, + LOG_SLI, "3093 No rport recovery needed. " + "rport in state 0x%x\n", ndlp->nlp_state); + return; + } + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3094 Start rport recovery on shost id 0x%x " + "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " + "flags 0x%x\n", + shost->host_no, ndlp->nlp_DID, + vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, + ndlp->nlp_flag); + /* + * The rport is not responding. Remove the FCP-2 flag to prevent + * an ADISC in the follow-up recovery code. + */ + spin_lock_irqsave(&ndlp->lock, flags); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag |= NLP_ISSUE_LOGO; + spin_unlock_irqrestore(&ndlp->lock, flags); + lpfc_unreg_rpi(vport, ndlp); +} + +static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) +{ + bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); +} + +static void +lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) +{ + u32 i; + + if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) + return; + + for (i = min; i <= max; i++) + set_bit(i, vport->vmid_priority_range); +} + +static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) +{ + set_bit(ctcl_vmid, vport->vmid_priority_range); +} + +u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) +{ + u32 i; + + i = find_first_bit(vport->vmid_priority_range, + LPFC_VMID_MAX_PRIORITY_RANGE); + + if (i == LPFC_VMID_MAX_PRIORITY_RANGE) + return 0; + + clear_bit(i, vport->vmid_priority_range); + return i; +} + +#define MAX_PRIORITY_DESC 255 + +static void +lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct priority_range_desc *desc; + struct lpfc_dmabuf *prsp = NULL; + struct lpfc_vmid_priority_range *vmid_range = NULL; + u32 *data; + struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + u8 *pcmd, max_desc; + u32 len, i; + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + + prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + pcmd = prsp->virt; + data = (u32 *)pcmd; + if (data[0] == ELS_CMD_LS_RJT) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, + "3277 QFPA LS_RJT x%x x%x\n", + data[0], data[1]); + goto out; + } + if (ulp_status) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + "6529 QFPA failed with status x%x x%x\n", + ulp_status, ulp_word4); + goto out; + } + + if (!vport->qfpa_res) { + max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); + vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), + GFP_KERNEL); + if (!vport->qfpa_res) + goto out; + } + + len = *((u32 *)(pcmd + 4)); + len = be32_to_cpu(len); + memcpy(vport->qfpa_res, pcmd, len + 8); + len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; + + desc = (struct priority_range_desc *)(pcmd + 8); + vmid_range = vport->vmid_priority.vmid_range; + if (!vmid_range) { + vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), + GFP_KERNEL); + if (!vmid_range) { + kfree(vport->qfpa_res); + goto out; + } + vport->vmid_priority.vmid_range = vmid_range; + } + vport->vmid_priority.num_descriptors = len; + + for (i = 0; i < len; i++, vmid_range++, desc++) { + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, + "6539 vmid values low=%d, high=%d, qos=%d, " + "local ve id=%d\n", desc->lo_range, + desc->hi_range, desc->qos_priority, + desc->local_ve_id); + + vmid_range->low = desc->lo_range << 1; + if (desc->local_ve_id == QFPA_ODD_ONLY) + vmid_range->low++; + if (desc->qos_priority) + vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; + vmid_range->qos = desc->qos_priority; + + vmid_range->high = desc->hi_range << 1; + if ((desc->local_ve_id == QFPA_ODD_ONLY) || + (desc->local_ve_id == QFPA_EVEN_ODD)) + vmid_range->high++; + } + lpfc_init_cs_ctl_bitmap(vport); + for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { + lpfc_vmid_set_cs_ctl_range(vport, + vport->vmid_priority.vmid_range[i].low, + vport->vmid_priority.vmid_range[i].high); + } + + vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; + out: + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +int lpfc_issue_els_qfpa(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *elsiocb; + u8 *pcmd; + int ret; + + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENXIO; + + elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, + ndlp->nlp_DID, ELS_CMD_QFPA); + if (!elsiocb) + return -ENOMEM; + + pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; + + *((u32 *)(pcmd)) = ELS_CMD_QFPA; + pcmd += 4; + + elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; + + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(vport->phba, elsiocb); + return -ENXIO; + } + + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); + if (ret != IOCB_SUCCESS) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return -EIO; + } + vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; + return 0; +} + +int +lpfc_vmid_uvem(struct lpfc_vport *vport, + struct lpfc_vmid *vmid, bool instantiated) +{ + struct lpfc_vem_id_desc *vem_id_desc; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *elsiocb; + struct instantiated_ve_desc *inst_desc; + struct lpfc_vmid_context *vmid_context; + u8 *pcmd; + u32 *len; + int ret = 0; + + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENXIO; + + vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); + if (!vmid_context) + return -ENOMEM; + elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, + ndlp, Fabric_DID, ELS_CMD_UVEM); + if (!elsiocb) + goto out; + + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, + "3427 Host vmid %s %d\n", + vmid->host_vmid, instantiated); + vmid_context->vmp = vmid; + vmid_context->nlp = ndlp; + vmid_context->instantiated = instantiated; + elsiocb->vmid_tag.vmid_context = vmid_context; + pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; + + if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, + sizeof(vport->lpfc_vmid_host_uuid))) + memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, + sizeof(vport->lpfc_vmid_host_uuid)); + + *((u32 *)(pcmd)) = ELS_CMD_UVEM; + len = (u32 *)(pcmd + 4); + *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); + + vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); + vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); + vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); + memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, + sizeof(vem_id_desc->vem_id)); + + inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); + inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); + inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); + memcpy(inst_desc->global_vem_id, vmid->host_vmid, + sizeof(inst_desc->global_vem_id)); + + bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); + bf_set(lpfc_instantiated_local_id, inst_desc, + vmid->un.cs_ctl_vmid); + if (instantiated) { + inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); + } else { + inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); + lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); + } + inst_desc->word6 = cpu_to_be32(inst_desc->word6); + + elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; + + elsiocb->ndlp = lpfc_nlp_get(ndlp); + if (!elsiocb->ndlp) { + lpfc_els_free_iocb(vport->phba, elsiocb); + goto out; + } + + ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); + if (ret != IOCB_SUCCESS) { + lpfc_els_free_iocb(vport->phba, elsiocb); + lpfc_nlp_put(ndlp); + goto out; + } + + return 0; + out: + kfree(vmid_context); + return -EIO; +} + +static void +lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = icmdiocb->vport; + struct lpfc_dmabuf *prsp = NULL; + struct lpfc_vmid_context *vmid_context = + icmdiocb->vmid_tag.vmid_context; + struct lpfc_nodelist *ndlp = icmdiocb->ndlp; + u8 *pcmd; + u32 *data; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; + struct lpfc_vmid *vmid; + + vmid = vmid_context->vmp; + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + ndlp = NULL; + + prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + pcmd = prsp->virt; + data = (u32 *)pcmd; + if (data[0] == ELS_CMD_LS_RJT) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, + "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); + goto out; + } + if (ulp_status) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, + "4533 UVEM error status %x: %x\n", + ulp_status, ulp_word4); + goto out; + } + spin_lock(&phba->hbalock); + /* Set IN USE flag */ + vport->vmid_flag |= LPFC_VMID_IN_USE; + phba->pport->vmid_flag |= LPFC_VMID_IN_USE; + spin_unlock(&phba->hbalock); + + if (vmid_context->instantiated) { + write_lock(&vport->vmid_lock); + vmid->flag |= LPFC_VMID_REGISTERED; + vmid->flag &= ~LPFC_VMID_REQ_REGISTER; + write_unlock(&vport->vmid_lock); + } + + out: + kfree(vmid_context); + lpfc_els_free_iocb(phba, icmdiocb); + lpfc_nlp_put(ndlp); +} diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c new file mode 100644 index 000000000..5154eeaee --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -0,0 +1,7319 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +/* AlpaArray for assignment of scsid for scan-down and bind_method */ +static uint8_t lpfcAlpaArray[] = { + 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, + 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, + 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, + 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, + 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, + 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, + 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, + 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, + 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, + 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, + 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, + 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, + 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 +}; + +static void lpfc_disc_timeout_handler(struct lpfc_vport *); +static void lpfc_disc_flush_list(struct lpfc_vport *vport); +static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +static int lpfc_fcf_inuse(struct lpfc_hba *); +static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); +static void lpfc_check_inactive_vmid(struct lpfc_hba *phba); +static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba); + +static int +lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp) +{ + if (ndlp->nlp_fc4_type || + ndlp->nlp_type & NLP_FABRIC) + return 1; + return 0; +} +/* The source of a terminate rport I/O is either a dev_loss_tmo + * event or a call to fc_remove_host. While the rport should be + * valid during these downcalls, the transport can call twice + * in a single event. This routine provides somoe protection + * as the NDLP isn't really free, just released to the pool. + */ +static int +lpfc_rport_invalid(struct fc_rport *rport) +{ + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *ndlp; + + if (!rport) { + pr_err("**** %s: NULL rport, exit.\n", __func__); + return -EINVAL; + } + + rdata = rport->dd_data; + if (!rdata) { + pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n", + __func__, rport, rport->scsi_target_id); + return -EINVAL; + } + + ndlp = rdata->pnode; + if (!rdata->pnode) { + pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n", + __func__, rport, rport->scsi_target_id); + return -EINVAL; + } + + if (!ndlp->vport) { + pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px " + "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport, + rport->scsi_target_id); + return -EINVAL; + } + return 0; +} + +void +lpfc_terminate_rport_io(struct fc_rport *rport) +{ + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *ndlp; + struct lpfc_vport *vport; + + if (lpfc_rport_invalid(rport)) + return; + + rdata = rport->dd_data; + ndlp = rdata->pnode; + vport = ndlp->vport; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport terminate: sid:x%x did:x%x flg:x%x", + ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); + + if (ndlp->nlp_sid != NLP_NO_SID) + lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); +} + +/* + * This function will be called when dev_loss_tmo fire. + */ +void +lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_work_evt *evtp; + unsigned long iflags; + + ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; + if (!ndlp) + return; + + vport = ndlp->vport; + phba = vport->phba; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport devlosscb: sid:x%x did:x%x flg:x%x", + ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3181 dev_loss_callbk x%06x, rport x%px flg x%x " + "load_flag x%x refcnt %u state %d xpt x%x\n", + ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, + vport->load_flag, kref_read(&ndlp->kref), + ndlp->nlp_state, ndlp->fc4_xpt_flags); + + /* Don't schedule a worker thread event if the vport is going down. */ + if (vport->load_flag & FC_UNLOADING) { + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->rport = NULL; + + /* The scsi_transport is done with the rport so lpfc cannot + * call to unregister. Remove the scsi transport reference + * and clean up the SCSI transport node details. + */ + if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { + ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; + + /* NVME transport-registered rports need the + * NLP_XPT_REGD flag to complete an unregister. + */ + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_nlp_put(ndlp); + spin_lock_irqsave(&ndlp->lock, iflags); + } + + /* Only 1 thread can drop the initial node reference. If + * another thread has set NLP_DROPPED, this thread is done. + */ + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && + !(ndlp->nlp_flag & NLP_DROPPED)) { + ndlp->nlp_flag |= NLP_DROPPED; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_nlp_put(ndlp); + return; + } + + spin_unlock_irqrestore(&ndlp->lock, iflags); + return; + } + + if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) + return; + + if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6789 rport name %llx != node port name %llx", + rport->port_name, + wwn_to_u64(ndlp->nlp_portname.u.wwn)); + + evtp = &ndlp->dev_loss_evt; + + if (!list_empty(&evtp->evt_listp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6790 rport name %llx dev_loss_evt pending\n", + rport->port_name); + return; + } + + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag |= NLP_IN_DEV_LOSS; + + /* If there is a PLOGI in progress, and we are in a + * NLP_NPR_2B_DISC state, don't turn off the flag. + */ + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + + /* + * The backend does not expect any more calls associated with this + * rport. Remove the association between rport and ndlp. + */ + ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; + ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; + ndlp->rport = NULL; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (phba->worker_thread) { + /* We need to hold the node by incrementing the reference + * count until this queued work is done + */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + + spin_lock_irqsave(&phba->hbalock, iflags); + if (evtp->evt_arg1) { + evtp->evt = LPFC_EVT_DEV_LOSS; + list_add_tail(&evtp->evt_listp, &phba->work_list); + lpfc_worker_wake_up(phba); + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + } else { + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3188 worker thread is stopped %s x%06x, " + " rport x%px flg x%x load_flag x%x refcnt " + "%d\n", __func__, ndlp->nlp_DID, + ndlp->rport, ndlp->nlp_flag, + vport->load_flag, kref_read(&ndlp->kref)); + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { + spin_lock_irqsave(&ndlp->lock, iflags); + /* Node is in dev loss. No further transaction. */ + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + } + + } + + return; +} + +/** + * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport + * @vport: Pointer to vport context object. + * + * This function checks for idle VMID entries related to a particular vport. If + * found unused/idle, free them accordingly. + **/ +static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport) +{ + u16 keep; + u32 difftime = 0, r, bucket; + u64 *lta; + int cpu; + struct lpfc_vmid *vmp; + + write_lock(&vport->vmid_lock); + + if (!vport->cur_vmid_cnt) + goto out; + + /* iterate through the table */ + hash_for_each(vport->hash_table, bucket, vmp, hnode) { + keep = 0; + if (vmp->flag & LPFC_VMID_REGISTERED) { + /* check if the particular VMID is in use */ + /* for all available per cpu variable */ + for_each_possible_cpu(cpu) { + /* if last access time is less than timeout */ + lta = per_cpu_ptr(vmp->last_io_time, cpu); + if (!lta) + continue; + difftime = (jiffies) - (*lta); + if ((vport->vmid_inactivity_timeout * + JIFFIES_PER_HR) > difftime) { + keep = 1; + break; + } + } + + /* if none of the cpus have been used by the vm, */ + /* remove the entry if already registered */ + if (!keep) { + /* mark the entry for deregistration */ + vmp->flag = LPFC_VMID_DE_REGISTER; + write_unlock(&vport->vmid_lock); + if (vport->vmid_priority_tagging) + r = lpfc_vmid_uvem(vport, vmp, false); + else + r = lpfc_vmid_cmd(vport, + SLI_CTAS_DAPP_IDENT, + vmp); + + /* decrement number of active vms and mark */ + /* entry in slot as free */ + write_lock(&vport->vmid_lock); + if (!r) { + struct lpfc_vmid *ht = vmp; + + vport->cur_vmid_cnt--; + ht->flag = LPFC_VMID_SLOT_FREE; + free_percpu(ht->last_io_time); + ht->last_io_time = NULL; + hash_del(&ht->hnode); + } + } + } + } + out: + write_unlock(&vport->vmid_lock); +} + +/** + * lpfc_check_inactive_vmid - VMID inactivity checker + * @phba: Pointer to hba context object. + * + * This function is called from the worker thread to determine if an entry in + * the VMID table can be released since there was no I/O activity seen from that + * particular VM for the specified time. When this happens, the entry in the + * table is released and also the resources on the switch cleared. + **/ + +static void lpfc_check_inactive_vmid(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (!vports) + return; + + for (i = 0; i <= phba->max_vports; i++) { + if ((!vports[i]) && (i == 0)) + vport = phba->pport; + else + vport = vports[i]; + if (!vport) + break; + + lpfc_check_inactive_vmid_one(vport); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss + * @vport: Pointer to vport object. + * @ndlp: Pointer to remote node object. + * + * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of + * node during dev_loss_tmo processing, then this function restores the nlp_put + * kref decrement from lpfc_dev_loss_tmo_handler. + **/ +void +lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + + spin_lock_irqsave(&ndlp->lock, iflags); + if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { + ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_nlp_get(ndlp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, + "8438 Devloss timeout reversed on DID x%x " + "refcnt %d ndlp %p flag x%x " + "port_state = x%x\n", + ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, + ndlp->nlp_flag, vport->port_state); + spin_lock_irqsave(&ndlp->lock, iflags); + } + spin_unlock_irqrestore(&ndlp->lock, iflags); +} + +/** + * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler + * @ndlp: Pointer to remote node object. + * + * This function is called from the worker thread when devloss timeout timer + * expires. For SLI4 host, this routine shall return 1 when at lease one + * remote node, including this @ndlp, is still in use of FCF; otherwise, this + * routine shall return 0 when there is no remote node is still in use of FCF + * when devloss timeout happened to this @ndlp. + **/ +static int +lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) +{ + struct lpfc_vport *vport; + struct lpfc_hba *phba; + uint8_t *name; + int warn_on = 0; + int fcf_inuse = 0; + bool recovering = false; + struct fc_vport *fc_vport = NULL; + unsigned long iflags; + + vport = ndlp->vport; + name = (uint8_t *)&ndlp->nlp_portname; + phba = vport->phba; + + if (phba->sli_rev == LPFC_SLI_REV4) + fcf_inuse = lpfc_fcf_inuse(phba); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport devlosstmo:did:x%x type:x%x id:x%x", + ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", + __func__, ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); + + /* If the driver is recovering the rport, ignore devloss. */ + if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0284 Devloss timeout Ignored on " + "WWPN %x:%x:%x:%x:%x:%x:%x:%x " + "NPort x%x\n", + *name, *(name+1), *(name+2), *(name+3), + *(name+4), *(name+5), *(name+6), *(name+7), + ndlp->nlp_DID); + + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); + return fcf_inuse; + } + + /* Fabric nodes are done. */ + if (ndlp->nlp_type & NLP_FABRIC) { + spin_lock_irqsave(&ndlp->lock, iflags); + + /* The driver has to account for a race between any fabric + * node that's in recovery when dev_loss_tmo expires. When this + * happens, the driver has to allow node recovery. + */ + switch (ndlp->nlp_DID) { + case Fabric_DID: + fc_vport = vport->fc_vport; + if (fc_vport) { + /* NPIV path. */ + if (fc_vport->vport_state == + FC_VPORT_INITIALIZING) + recovering = true; + } else { + /* Physical port path. */ + if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + recovering = true; + } + break; + case Fabric_Cntl_DID: + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + recovering = true; + break; + case FDMI_DID: + fallthrough; + case NameServer_DID: + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) + recovering = true; + break; + default: + /* Ensure the nlp_DID at least has the correct prefix. + * The fabric domain controller's last three nibbles + * vary so we handle it in the default case. + */ + if (ndlp->nlp_DID & Fabric_DID_MASK) { + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) + recovering = true; + } + break; + } + spin_unlock_irqrestore(&ndlp->lock, iflags); + + /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing + * the following lpfc_nlp_put is necessary after fabric node is + * recovered. + */ + if (recovering) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_NODE, + "8436 Devloss timeout marked on " + "DID x%x refcnt %d ndlp %p " + "flag x%x port_state = x%x\n", + ndlp->nlp_DID, kref_read(&ndlp->kref), + ndlp, ndlp->nlp_flag, + vport->port_state); + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); + } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + /* Fabric node fully recovered before this dev_loss_tmo + * queue work is processed. Thus, ignore the + * dev_loss_tmo event. + */ + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_NODE, + "8437 Devloss timeout ignored on " + "DID x%x refcnt %d ndlp %p " + "flag x%x port_state = x%x\n", + ndlp->nlp_DID, kref_read(&ndlp->kref), + ndlp, ndlp->nlp_flag, + vport->port_state); + return fcf_inuse; + } + + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_nlp_put(ndlp); + return fcf_inuse; + } + + if (ndlp->nlp_sid != NLP_NO_SID) { + warn_on = 1; + lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); + } + + if (warn_on) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0203 Devloss timeout on " + "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " + "NPort x%06x Data: x%x x%x x%x refcnt %d\n", + *name, *(name+1), *(name+2), *(name+3), + *(name+4), *(name+5), *(name+6), *(name+7), + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, ndlp->nlp_rpi, + kref_read(&ndlp->kref)); + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, + "0204 Devloss timeout on " + "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " + "NPort x%06x Data: x%x x%x x%x\n", + *name, *(name+1), *(name+2), *(name+3), + *(name+4), *(name+5), *(name+6), *(name+7), + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, ndlp->nlp_rpi); + } + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + /* If we are devloss, but we are in the process of rediscovering the + * ndlp, don't issue a NLP_EVT_DEVICE_RM event. + */ + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { + return fcf_inuse; + } + + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + + return fcf_inuse; +} + +static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (!vports) + return; + + for (i = 0; i <= phba->max_vports; i++) { + if ((!vports[i]) && (i == 0)) + vport = phba->pport; + else + vport = vports[i]; + if (!vport) + break; + + if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { + if (!lpfc_issue_els_qfpa(vport)) + vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA; + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler + * @phba: Pointer to hba context object. + * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. + * @nlp_did: remote node identifer with devloss timeout. + * + * This function is called from the worker thread after invoking devloss + * timeout handler and releasing the reference count for the ndlp with + * which the devloss timeout was handled for SLI4 host. For the devloss + * timeout of the last remote node which had been in use of FCF, when this + * routine is invoked, it shall be guaranteed that none of the remote are + * in-use of FCF. When devloss timeout to the last remote using the FCF, + * if the FIP engine is neither in FCF table scan process nor roundrobin + * failover process, the in-use FCF shall be unregistered. If the FIP + * engine is in FCF discovery process, the devloss timeout state shall + * be set for either the FCF table scan process or roundrobin failover + * process to unregister the in-use FCF. + **/ +static void +lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, + uint32_t nlp_did) +{ + /* If devloss timeout happened to a remote node when FCF had no + * longer been in-use, do nothing. + */ + if (!fcf_inuse) + return; + + if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + if (phba->hba_flag & HBA_DEVLOSS_TMO) { + spin_unlock_irq(&phba->hbalock); + return; + } + phba->hba_flag |= HBA_DEVLOSS_TMO; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2847 Last remote node (x%x) using " + "FCF devloss tmo\n", nlp_did); + } + if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2868 Devloss tmo to FCF rediscovery " + "in progress\n"); + return; + } + if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2869 Devloss tmo to idle FIP engine, " + "unreg in-use FCF and rescan.\n"); + /* Unregister in-use FCF and rescan */ + lpfc_unregister_fcf_rescan(phba); + return; + } + spin_unlock_irq(&phba->hbalock); + if (phba->hba_flag & FCF_TS_INPROG) + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2870 FCF table scan in progress\n"); + if (phba->hba_flag & FCF_RR_INPROG) + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2871 FLOGI roundrobin FCF failover " + "in progress\n"); + } + lpfc_unregister_unused_fcf(phba); +} + +/** + * lpfc_alloc_fast_evt - Allocates data structure for posting event + * @phba: Pointer to hba context object. + * + * This function is called from the functions which need to post + * events from interrupt context. This function allocates data + * structure required for posting event. It also keeps track of + * number of events pending and prevent event storm when there are + * too many events. + **/ +struct lpfc_fast_path_event * +lpfc_alloc_fast_evt(struct lpfc_hba *phba) { + struct lpfc_fast_path_event *ret; + + /* If there are lot of fast event do not exhaust memory due to this */ + if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) + return NULL; + + ret = kzalloc(sizeof(struct lpfc_fast_path_event), + GFP_ATOMIC); + if (ret) { + atomic_inc(&phba->fast_event_count); + INIT_LIST_HEAD(&ret->work_evt.evt_listp); + ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; + } + return ret; +} + +/** + * lpfc_free_fast_evt - Frees event data structure + * @phba: Pointer to hba context object. + * @evt: Event object which need to be freed. + * + * This function frees the data structure required for posting + * events. + **/ +void +lpfc_free_fast_evt(struct lpfc_hba *phba, + struct lpfc_fast_path_event *evt) { + + atomic_dec(&phba->fast_event_count); + kfree(evt); +} + +/** + * lpfc_send_fastpath_evt - Posts events generated from fast path + * @phba: Pointer to hba context object. + * @evtp: Event data structure. + * + * This function is called from worker thread, when the interrupt + * context need to post an event. This function posts the event + * to fc transport netlink interface. + **/ +static void +lpfc_send_fastpath_evt(struct lpfc_hba *phba, + struct lpfc_work_evt *evtp) +{ + unsigned long evt_category, evt_sub_category; + struct lpfc_fast_path_event *fast_evt_data; + char *evt_data; + uint32_t evt_data_size; + struct Scsi_Host *shost; + + fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, + work_evt); + + evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; + evt_sub_category = (unsigned long) fast_evt_data->un. + fabric_evt.subcategory; + shost = lpfc_shost_from_vport(fast_evt_data->vport); + if (evt_category == FC_REG_FABRIC_EVENT) { + if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { + evt_data = (char *) &fast_evt_data->un.read_check_error; + evt_data_size = sizeof(fast_evt_data->un. + read_check_error); + } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || + (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { + evt_data = (char *) &fast_evt_data->un.fabric_evt; + evt_data_size = sizeof(fast_evt_data->un.fabric_evt); + } else { + lpfc_free_fast_evt(phba, fast_evt_data); + return; + } + } else if (evt_category == FC_REG_SCSI_EVENT) { + switch (evt_sub_category) { + case LPFC_EVENT_QFULL: + case LPFC_EVENT_DEVBSY: + evt_data = (char *) &fast_evt_data->un.scsi_evt; + evt_data_size = sizeof(fast_evt_data->un.scsi_evt); + break; + case LPFC_EVENT_CHECK_COND: + evt_data = (char *) &fast_evt_data->un.check_cond_evt; + evt_data_size = sizeof(fast_evt_data->un. + check_cond_evt); + break; + case LPFC_EVENT_VARQUEDEPTH: + evt_data = (char *) &fast_evt_data->un.queue_depth_evt; + evt_data_size = sizeof(fast_evt_data->un. + queue_depth_evt); + break; + default: + lpfc_free_fast_evt(phba, fast_evt_data); + return; + } + } else { + lpfc_free_fast_evt(phba, fast_evt_data); + return; + } + + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_vendor_event(shost, + fc_get_event_number(), + evt_data_size, + evt_data, + LPFC_NL_VENDOR_ID); + + lpfc_free_fast_evt(phba, fast_evt_data); + return; +} + +static void +lpfc_work_list_done(struct lpfc_hba *phba) +{ + struct lpfc_work_evt *evtp = NULL; + struct lpfc_nodelist *ndlp; + int free_evt; + int fcf_inuse; + uint32_t nlp_did; + bool hba_pci_err; + + spin_lock_irq(&phba->hbalock); + while (!list_empty(&phba->work_list)) { + list_remove_head((&phba->work_list), evtp, typeof(*evtp), + evt_listp); + spin_unlock_irq(&phba->hbalock); + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); + free_evt = 1; + switch (evtp->evt) { + case LPFC_EVT_ELS_RETRY: + ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); + if (!hba_pci_err) { + lpfc_els_retry_delay_handler(ndlp); + free_evt = 0; /* evt is part of ndlp */ + } + /* decrement the node reference count held + * for this queued work + */ + lpfc_nlp_put(ndlp); + break; + case LPFC_EVT_DEV_LOSS: + ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); + fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); + free_evt = 0; + /* decrement the node reference count held for + * this queued work + */ + nlp_did = ndlp->nlp_DID; + lpfc_nlp_put(ndlp); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_post_dev_loss_tmo_handler(phba, + fcf_inuse, + nlp_did); + break; + case LPFC_EVT_RECOVER_PORT: + ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); + if (!hba_pci_err) { + lpfc_sli_abts_recover_port(ndlp->vport, ndlp); + free_evt = 0; + } + /* decrement the node reference count held for + * this queued work + */ + lpfc_nlp_put(ndlp); + break; + case LPFC_EVT_ONLINE: + if (phba->link_state < LPFC_LINK_DOWN) + *(int *) (evtp->evt_arg1) = lpfc_online(phba); + else + *(int *) (evtp->evt_arg1) = 0; + complete((struct completion *)(evtp->evt_arg2)); + break; + case LPFC_EVT_OFFLINE_PREP: + if (phba->link_state >= LPFC_LINK_DOWN) + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + *(int *)(evtp->evt_arg1) = 0; + complete((struct completion *)(evtp->evt_arg2)); + break; + case LPFC_EVT_OFFLINE: + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + *(int *)(evtp->evt_arg1) = + lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); + lpfc_unblock_mgmt_io(phba); + complete((struct completion *)(evtp->evt_arg2)); + break; + case LPFC_EVT_WARM_START: + lpfc_offline(phba); + lpfc_reset_barrier(phba); + lpfc_sli_brdreset(phba); + lpfc_hba_down_post(phba); + *(int *)(evtp->evt_arg1) = + lpfc_sli_brdready(phba, HS_MBRDY); + lpfc_unblock_mgmt_io(phba); + complete((struct completion *)(evtp->evt_arg2)); + break; + case LPFC_EVT_KILL: + lpfc_offline(phba); + *(int *)(evtp->evt_arg1) + = (phba->pport->stopped) + ? 0 : lpfc_sli_brdkill(phba); + lpfc_unblock_mgmt_io(phba); + complete((struct completion *)(evtp->evt_arg2)); + break; + case LPFC_EVT_FASTPATH_MGMT_EVT: + lpfc_send_fastpath_evt(phba, evtp); + free_evt = 0; + break; + case LPFC_EVT_RESET_HBA: + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_reset_hba(phba); + break; + } + if (free_evt) + kfree(evtp); + spin_lock_irq(&phba->hbalock); + } + spin_unlock_irq(&phba->hbalock); + +} + +static void +lpfc_work_done(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + uint32_t ha_copy, status, control, work_port_events; + struct lpfc_vport **vports; + struct lpfc_vport *vport; + int i; + bool hba_pci_err; + + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); + spin_lock_irq(&phba->hbalock); + ha_copy = phba->work_ha; + phba->work_ha = 0; + spin_unlock_irq(&phba->hbalock); + if (hba_pci_err) + ha_copy = 0; + + /* First, try to post the next mailbox command to SLI4 device */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) + lpfc_sli4_post_async_mbox(phba); + + if (ha_copy & HA_ERATT) { + /* Handle the error attention event */ + lpfc_handle_eratt(phba); + + if (phba->fw_dump_cmpl) { + complete(phba->fw_dump_cmpl); + phba->fw_dump_cmpl = NULL; + } + } + + if (ha_copy & HA_MBATT) + lpfc_sli_handle_mb_event(phba); + + if (ha_copy & HA_LATT) + lpfc_handle_latt(phba); + + /* Handle VMID Events */ + if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { + if (phba->pport->work_port_events & + WORKER_CHECK_VMID_ISSUE_QFPA) { + lpfc_check_vmid_qfpa_issue(phba); + phba->pport->work_port_events &= + ~WORKER_CHECK_VMID_ISSUE_QFPA; + } + if (phba->pport->work_port_events & + WORKER_CHECK_INACTIVE_VMID) { + lpfc_check_inactive_vmid(phba); + phba->pport->work_port_events &= + ~WORKER_CHECK_INACTIVE_VMID; + } + } + + /* Process SLI4 events */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { + if (phba->hba_flag & HBA_RRQ_ACTIVE) + lpfc_handle_rrq_active(phba); + if (phba->hba_flag & ELS_XRI_ABORT_EVENT) + lpfc_sli4_els_xri_abort_event_proc(phba); + if (phba->hba_flag & ASYNC_EVENT) + lpfc_sli4_async_event_proc(phba); + if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; + spin_unlock_irq(&phba->hbalock); + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } + if (phba->fcf.fcf_flag & FCF_REDISC_EVT) + lpfc_sli4_fcf_redisc_event_proc(phba); + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports; i++) { + /* + * We could have no vports in array if unloading, so if + * this happens then just use the pport + */ + if (vports[i] == NULL && i == 0) + vport = phba->pport; + else + vport = vports[i]; + if (vport == NULL) + break; + spin_lock_irq(&vport->work_port_lock); + work_port_events = vport->work_port_events; + vport->work_port_events &= ~work_port_events; + spin_unlock_irq(&vport->work_port_lock); + if (hba_pci_err) + continue; + if (work_port_events & WORKER_DISC_TMO) + lpfc_disc_timeout_handler(vport); + if (work_port_events & WORKER_ELS_TMO) + lpfc_els_timeout_handler(vport); + if (work_port_events & WORKER_HB_TMO) + lpfc_hb_timeout_handler(phba); + if (work_port_events & WORKER_MBOX_TMO) + lpfc_mbox_timeout_handler(phba); + if (work_port_events & WORKER_FABRIC_BLOCK_TMO) + lpfc_unblock_fabric_iocbs(phba); + if (work_port_events & WORKER_RAMP_DOWN_QUEUE) + lpfc_ramp_down_queue_handler(phba); + if (work_port_events & WORKER_DELAYED_DISC_TMO) + lpfc_delayed_disc_timeout_handler(vport); + } + lpfc_destroy_vport_work_array(phba, vports); + + pring = lpfc_phba_elsring(phba); + status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); + status >>= (4*LPFC_ELS_RING); + if (pring && (status & HA_RXMASK || + pring->flag & LPFC_DEFERRED_RING_EVENT || + phba->hba_flag & HBA_SP_QUEUE_EVT)) { + if (pring->flag & LPFC_STOP_IOCB_EVENT) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Preserve legacy behavior. */ + if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) + set_bit(LPFC_DATA_READY, &phba->data_flags); + } else { + /* Driver could have abort request completed in queue + * when link goes down. Allow for this transition. + */ + if (phba->link_state >= LPFC_LINK_DOWN || + phba->link_flag & LS_MDS_LOOPBACK) { + pring->flag &= ~LPFC_DEFERRED_RING_EVENT; + lpfc_sli_handle_slow_ring_event(phba, pring, + (status & + HA_RXMASK)); + } + } + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_drain_txq(phba); + /* + * Turn on Ring interrupts + */ + if (phba->sli_rev <= LPFC_SLI_REV3) { + spin_lock_irq(&phba->hbalock); + control = readl(phba->HCregaddr); + if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Enable ring: cntl:x%x hacopy:x%x", + control, ha_copy, 0); + + control |= (HC_R0INT_ENA << LPFC_ELS_RING); + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } else { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Ring ok: cntl:x%x hacopy:x%x", + control, ha_copy, 0); + } + spin_unlock_irq(&phba->hbalock); + } + } + lpfc_work_list_done(phba); +} + +int +lpfc_do_work(void *p) +{ + struct lpfc_hba *phba = p; + int rc; + + set_user_nice(current, MIN_NICE); + current->flags |= PF_NOFREEZE; + phba->data_flags = 0; + + while (!kthread_should_stop()) { + /* wait and check worker queue activities */ + rc = wait_event_interruptible(phba->work_waitq, + (test_and_clear_bit(LPFC_DATA_READY, + &phba->data_flags) + || kthread_should_stop())); + /* Signal wakeup shall terminate the worker thread */ + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0433 Wakeup on signal: rc=x%x\n", rc); + break; + } + + /* Attend pending lpfc data processing */ + lpfc_work_done(phba); + } + phba->worker_thread = NULL; + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "0432 Worker thread stopped.\n"); + return 0; +} + +/* + * This is only called to handle FC worker events. Since this a rare + * occurrence, we allocate a struct lpfc_work_evt structure here instead of + * embedding it in the IOCB. + */ +int +lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, + uint32_t evt) +{ + struct lpfc_work_evt *evtp; + unsigned long flags; + + /* + * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will + * be queued to worker thread for processing + */ + evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); + if (!evtp) + return 0; + + evtp->evt_arg1 = arg1; + evtp->evt_arg2 = arg2; + evtp->evt = evt; + + spin_lock_irqsave(&phba->hbalock, flags); + list_add_tail(&evtp->evt_listp, &phba->work_list); + spin_unlock_irqrestore(&phba->hbalock, flags); + + lpfc_worker_wake_up(phba); + + return 1; +} + +void +lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp, *next_ndlp; + + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || + ((vport->port_type == LPFC_NPIV_PORT) && + ((ndlp->nlp_DID == NameServer_DID) || + (ndlp->nlp_DID == FDMI_DID) || + (ndlp->nlp_DID == Fabric_Cntl_DID)))) + lpfc_unreg_rpi(vport, ndlp); + + /* Leave Fabric nodes alone on link down */ + if ((phba->sli_rev < LPFC_SLI_REV4) && + (!remove && ndlp->nlp_type & NLP_FABRIC)) + continue; + + /* Notify transport of connectivity loss to trigger cleanup. */ + if (phba->nvmet_support && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) + lpfc_nvmet_invalidate_host(phba, ndlp); + + lpfc_disc_state_machine(vport, ndlp, NULL, + remove + ? NLP_EVT_DEVICE_RM + : NLP_EVT_DEVICE_RECOVERY); + } + if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vport); + lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + } +} + +void +lpfc_port_link_failure(struct lpfc_vport *vport) +{ + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); + + /* Cleanup any outstanding received buffers */ + lpfc_cleanup_rcv_buffers(vport); + + /* Cleanup any outstanding RSCN activity */ + lpfc_els_flush_rscn(vport); + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_cmd(vport); + + lpfc_cleanup_rpis(vport, 0); + + /* Turn off discovery timer if its running */ + lpfc_can_disctmo(vport); +} + +void +lpfc_linkdown_port(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_event(shost, fc_get_event_number(), + FCH_EVT_LINKDOWN, 0); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Link Down: state:x%x rtry:x%x flg:x%x", + vport->port_state, vport->fc_ns_retry, vport->fc_flag); + + lpfc_port_link_failure(vport); + + /* Stop delayed Nport discovery */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + del_timer_sync(&vport->delayed_disc_tmo); + + if (phba->sli_rev == LPFC_SLI_REV4 && + vport->port_type == LPFC_PHYSICAL_PORT && + phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { + /* Assume success on link up */ + phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; + } +} + +int +lpfc_linkdown(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_vport **vports; + LPFC_MBOXQ_t *mb; + int i; + int offline; + + if (phba->link_state == LPFC_LINK_DOWN) + return 0; + + /* Block all SCSI stack I/Os */ + lpfc_scsi_dev_block(phba); + offline = pci_channel_offline(phba->pcidev); + + phba->defer_flogi_acc_flag = false; + + /* Clear external loopback plug detected flag */ + phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; + + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); + spin_unlock_irq(&phba->hbalock); + if (phba->link_state > LPFC_LINK_DOWN) { + phba->link_state = LPFC_LINK_DOWN; + if (phba->sli4_hba.conf_trunk) { + phba->trunk_link.link0.state = 0; + phba->trunk_link.link1.state = 0; + phba->trunk_link.link2.state = 0; + phba->trunk_link.link3.state = 0; + phba->trunk_link.phy_lnk_speed = + LPFC_LINK_SPEED_UNKNOWN; + phba->sli4_hba.link_state.logical_speed = + LPFC_LINK_SPEED_UNKNOWN; + } + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag &= ~FC_LBIT; + spin_unlock_irq(shost->host_lock); + } + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + /* Issue a LINK DOWN event to all nodes */ + lpfc_linkdown_port(vports[i]); + + vports[i]->fc_myDID = 0; + + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(vports[i]); + } + } + } + lpfc_destroy_vport_work_array(phba, vports); + + /* Clean up any SLI3 firmware default rpi's */ + if (phba->sli_rev > LPFC_SLI_REV3 || offline) + goto skip_unreg_did; + + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mb) { + lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); + mb->vport = vport; + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) + == MBX_NOT_FINISHED) { + mempool_free(mb, phba->mbox_mem_pool); + } + } + + skip_unreg_did: + /* Setup myDID for link up if we are in pt2pt mode */ + if (phba->pport->fc_flag & FC_PT2PT) { + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mb) { + lpfc_config_link(phba, mb); + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mb->vport = vport; + if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) + == MBX_NOT_FINISHED) { + mempool_free(mb, phba->mbox_mem_pool); + } + } + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); + phba->pport->rcv_flogi_cnt = 0; + spin_unlock_irq(shost->host_lock); + } + return 0; +} + +static void +lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); + + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + continue; + if (ndlp->nlp_type & NLP_FABRIC) { + /* On Linkup its safe to clean up the ndlp + * from Fabric connections. + */ + if (ndlp->nlp_DID != Fabric_DID) + lpfc_unreg_rpi(vport, ndlp); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + /* Fail outstanding IO now since device is + * marked for PLOGI. + */ + lpfc_unreg_rpi(vport, ndlp); + } + } +} + +static void +lpfc_linkup_port(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + + if ((vport->load_flag & FC_UNLOADING) != 0) + return; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Link Up: top:x%x speed:x%x flg:x%x", + phba->fc_topology, phba->fc_linkspeed, phba->link_flag); + + /* If NPIV is not enabled, only bring the physical port up */ + if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (vport != phba->pport)) + return; + + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_event(shost, fc_get_event_number(), + FCH_EVT_LINKUP, 0); + + spin_lock_irq(shost->host_lock); + if (phba->defer_flogi_acc_flag) + vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE | + FC_NLP_MORE | FC_RSCN_DISCOVERY); + else + vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | + FC_ABORT_DISCOVERY | FC_RSCN_MODE | + FC_NLP_MORE | FC_RSCN_DISCOVERY); + vport->fc_flag |= FC_NDISC_ACTIVE; + vport->fc_ns_retry = 0; + spin_unlock_irq(shost->host_lock); + lpfc_setup_fdmi_mask(vport); + + lpfc_linkup_cleanup_nodes(vport); +} + +static int +lpfc_linkup(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); + + phba->link_state = LPFC_LINK_UP; + + /* Unblock fabric iocbs if they are blocked */ + clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + del_timer_sync(&phba->fabric_block_timer); + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + lpfc_linkup_port(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); + + /* Clear the pport flogi counter in case the link down was + * absorbed without an ACQE. No lock here - in worker thread + * and discovery is synchronized. + */ + spin_lock_irq(shost->host_lock); + phba->pport->rcv_flogi_cnt = 0; + spin_unlock_irq(shost->host_lock); + + /* reinitialize initial HBA flag */ + phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); + + return 0; +} + +/* + * This routine handles processing a CLEAR_LA mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is + * handed off to the SLI layer. SLI3 only. + */ +static void +lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_sli *psli = &phba->sli; + MAILBOX_t *mb = &pmb->u.mb; + uint32_t control; + + /* Since we don't do discovery right now, turn these off here */ + psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + + /* Check for error */ + if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { + /* CLEAR_LA mbox error state */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0320 CLEAR_LA mbxStatus error x%x hba " + "state x%x\n", + mb->mbxStatus, vport->port_state); + phba->link_state = LPFC_HBA_ERROR; + goto out; + } + + if (vport->port_type == LPFC_PHYSICAL_PORT) + phba->link_state = LPFC_HBA_READY; + + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_PROCESS_LA; + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + spin_unlock_irq(&phba->hbalock); + mempool_free(pmb, phba->mbox_mem_pool); + return; + +out: + /* Device Discovery completes */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0225 Device Discovery completes\n"); + mempool_free(pmb, phba->mbox_mem_pool); + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_ABORT_DISCOVERY; + spin_unlock_irq(shost->host_lock); + + lpfc_can_disctmo(vport); + + /* turn on Link Attention interrupts */ + + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_PROCESS_LA; + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + spin_unlock_irq(&phba->hbalock); + + return; +} + +void +lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + LPFC_MBOXQ_t *sparam_mb; + u16 status = pmb->u.mb.mbxStatus; + int rc; + + mempool_free(pmb, phba->mbox_mem_pool); + + if (status) + goto out; + + /* don't perform discovery for SLI4 loopback diagnostic test */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + !(phba->hba_flag & HBA_FCOE_MODE) && + (phba->link_flag & LS_LOOPBACK_MODE)) + return; + + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && + vport->fc_flag & FC_PUBLIC_LOOP && + !(vport->fc_flag & FC_LBIT)) { + /* Need to wait for FAN - use discovery timer + * for timeout. port_state is identically + * LPFC_LOCAL_CFG_LINK while waiting for FAN + */ + lpfc_set_disctmo(vport); + return; + } + + /* Start discovery by sending a FLOGI. port_state is identically + * LPFC_FLOGI while waiting for FLOGI cmpl. + */ + if (vport->port_state != LPFC_FLOGI) { + /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if + * bb-credit recovery is in place. + */ + if (phba->bbcredit_support && phba->cfg_enable_bbcr && + !(phba->link_flag & LS_LOOPBACK_MODE)) { + sparam_mb = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!sparam_mb) + goto sparam_out; + + rc = lpfc_read_sparam(phba, sparam_mb, 0); + if (rc) { + mempool_free(sparam_mb, phba->mbox_mem_pool); + goto sparam_out; + } + sparam_mb->vport = vport; + sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; + rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_mbox_rsrc_cleanup(phba, sparam_mb, + MBOX_THD_UNLOCKED); + goto sparam_out; + } + + phba->hba_flag |= HBA_DEFER_FLOGI; + } else { + lpfc_initial_flogi(vport); + } + } else { + if (vport->fc_flag & FC_PT2PT) + lpfc_disc_start(vport); + } + return; + +out: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n", + status, vport->port_state); + +sparam_out: + lpfc_linkdown(phba); + + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0200 CONFIG_LINK bad hba state x%x\n", + vport->port_state); + + lpfc_issue_clear_la(phba, vport); + return; +} + +/** + * lpfc_sli4_clear_fcf_rr_bmask + * @phba: pointer to the struct lpfc_hba for this port. + * This fucnction resets the round robin bit mask and clears the + * fcf priority list. The list deletions are done while holding the + * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared + * from the lpfc_fcf_pri record. + **/ +void +lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) +{ + struct lpfc_fcf_pri *fcf_pri; + struct lpfc_fcf_pri *next_fcf_pri; + memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(fcf_pri, next_fcf_pri, + &phba->fcf.fcf_pri_list, list) { + list_del_init(&fcf_pri->list); + fcf_pri->fcf_rec.flag = 0; + } + spin_unlock_irq(&phba->hbalock); +} +static void +lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2017 REG_FCFI mbxStatus error x%x " + "HBA state x%x\n", mboxq->u.mb.mbxStatus, + vport->port_state); + goto fail_out; + } + + /* Start FCoE discovery by sending a FLOGI. */ + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); + /* Set the FCFI registered flag */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= FCF_REGISTERED; + spin_unlock_irq(&phba->hbalock); + + /* If there is a pending FCoE event, restart FCF table scan. */ + if ((!(phba->hba_flag & FCF_RR_INPROG)) && + lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) + goto fail_out; + + /* Mark successful completion of FCF table scan */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); + phba->hba_flag &= ~FCF_TS_INPROG; + if (vport->port_state != LPFC_FLOGI) { + phba->hba_flag |= FCF_RR_INPROG; + spin_unlock_irq(&phba->hbalock); + lpfc_issue_init_vfi(vport); + goto out; + } + spin_unlock_irq(&phba->hbalock); + goto out; + +fail_out: + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_RR_INPROG; + spin_unlock_irq(&phba->hbalock); +out: + mempool_free(mboxq, phba->mbox_mem_pool); +} + +/** + * lpfc_fab_name_match - Check if the fcf fabric name match. + * @fab_name: pointer to fabric name. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's fabric name with provided + * fabric name. If the fabric name are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) +{ + if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) + return 0; + if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) + return 0; + if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) + return 0; + if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) + return 0; + if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) + return 0; + if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) + return 0; + if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) + return 0; + if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) + return 0; + return 1; +} + +/** + * lpfc_sw_name_match - Check if the fcf switch name match. + * @sw_name: pointer to switch name. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's switch name with provided + * switch name. If the switch name are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) +{ + if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) + return 0; + if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) + return 0; + if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) + return 0; + if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) + return 0; + if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) + return 0; + if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) + return 0; + if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) + return 0; + if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) + return 0; + return 1; +} + +/** + * lpfc_mac_addr_match - Check if the fcf mac address match. + * @mac_addr: pointer to mac address. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's mac address with HBA's + * FCF mac address. If the mac addresses are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) +{ + if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) + return 0; + if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) + return 0; + if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) + return 0; + if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) + return 0; + if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) + return 0; + if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) + return 0; + return 1; +} + +static bool +lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) +{ + return (curr_vlan_id == new_vlan_id); +} + +/** + * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: Index for the lpfc_fcf_record. + * @new_fcf_record: pointer to hba fcf record. + * + * This routine updates the driver FCF priority record from the new HBA FCF + * record. The hbalock is asserted held in the code path calling this + * routine. + **/ +static void +__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, + struct fcf_record *new_fcf_record + ) +{ + struct lpfc_fcf_pri *fcf_pri; + + fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + fcf_pri->fcf_rec.fcf_index = fcf_index; + /* FCF record priority */ + fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; + +} + +/** + * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. + * @fcf_rec: pointer to driver fcf record. + * @new_fcf_record: pointer to fcf record. + * + * This routine copies the FCF information from the FCF + * record to lpfc_hba data structure. + **/ +static void +lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, + struct fcf_record *new_fcf_record) +{ + /* Fabric name */ + fcf_rec->fabric_name[0] = + bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); + fcf_rec->fabric_name[1] = + bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); + fcf_rec->fabric_name[2] = + bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); + fcf_rec->fabric_name[3] = + bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); + fcf_rec->fabric_name[4] = + bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); + fcf_rec->fabric_name[5] = + bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); + fcf_rec->fabric_name[6] = + bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); + fcf_rec->fabric_name[7] = + bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); + /* Mac address */ + fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); + fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); + fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); + fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); + fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); + fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); + /* FCF record index */ + fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + /* FCF record priority */ + fcf_rec->priority = new_fcf_record->fip_priority; + /* Switch name */ + fcf_rec->switch_name[0] = + bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); + fcf_rec->switch_name[1] = + bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); + fcf_rec->switch_name[2] = + bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); + fcf_rec->switch_name[3] = + bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); + fcf_rec->switch_name[4] = + bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); + fcf_rec->switch_name[5] = + bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); + fcf_rec->switch_name[6] = + bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); + fcf_rec->switch_name[7] = + bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); +} + +/** + * __lpfc_update_fcf_record - Update driver fcf record + * @phba: pointer to lpfc hba data structure. + * @fcf_rec: pointer to driver fcf record. + * @new_fcf_record: pointer to hba fcf record. + * @addr_mode: address mode to be set to the driver fcf record. + * @vlan_id: vlan tag to be set to the driver fcf record. + * @flag: flag bits to be set to the driver fcf record. + * + * This routine updates the driver FCF record from the new HBA FCF record + * together with the address mode, vlan_id, and other informations. This + * routine is called with the hbalock held. + **/ +static void +__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, + struct fcf_record *new_fcf_record, uint32_t addr_mode, + uint16_t vlan_id, uint32_t flag) +{ + lockdep_assert_held(&phba->hbalock); + + /* Copy the fields from the HBA's FCF record */ + lpfc_copy_fcf_record(fcf_rec, new_fcf_record); + /* Update other fields of driver FCF record */ + fcf_rec->addr_mode = addr_mode; + fcf_rec->vlan_id = vlan_id; + fcf_rec->flag |= (flag | RECORD_VALID); + __lpfc_update_fcf_record_pri(phba, + bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), + new_fcf_record); +} + +/** + * lpfc_register_fcf - Register the FCF with hba. + * @phba: pointer to lpfc hba data structure. + * + * This routine issues a register fcfi mailbox command to register + * the fcf with HBA. + **/ +static void +lpfc_register_fcf(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *fcf_mbxq; + int rc; + + spin_lock_irq(&phba->hbalock); + /* If the FCF is not available do nothing. */ + if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); + spin_unlock_irq(&phba->hbalock); + return; + } + + /* The FCF is already registered, start discovery */ + if (phba->fcf.fcf_flag & FCF_REGISTERED) { + phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); + phba->hba_flag &= ~FCF_TS_INPROG; + if (phba->pport->port_state != LPFC_FLOGI && + phba->pport->fc_flag & FC_FABRIC) { + phba->hba_flag |= FCF_RR_INPROG; + spin_unlock_irq(&phba->hbalock); + lpfc_initial_flogi(phba->pport); + return; + } + spin_unlock_irq(&phba->hbalock); + return; + } + spin_unlock_irq(&phba->hbalock); + + fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!fcf_mbxq) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); + spin_unlock_irq(&phba->hbalock); + return; + } + + lpfc_reg_fcfi(phba, fcf_mbxq); + fcf_mbxq->vport = phba->pport; + fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; + rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); + spin_unlock_irq(&phba->hbalock); + mempool_free(fcf_mbxq, phba->mbox_mem_pool); + } + + return; +} + +/** + * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. + * @phba: pointer to lpfc hba data structure. + * @new_fcf_record: pointer to fcf record. + * @boot_flag: Indicates if this record used by boot bios. + * @addr_mode: The address mode to be used by this FCF + * @vlan_id: The vlan id to be used as vlan tagging by this FCF. + * + * This routine compare the fcf record with connect list obtained from the + * config region to decide if this FCF can be used for SAN discovery. It returns + * 1 if this record can be used for SAN discovery else return zero. If this FCF + * record can be used for SAN discovery, the boot_flag will indicate if this FCF + * is used by boot bios and addr_mode will indicate the addressing mode to be + * used for this FCF when the function returns. + * If the FCF record need to be used with a particular vlan id, the vlan is + * set in the vlan_id on return of the function. If not VLAN tagging need to + * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; + **/ +static int +lpfc_match_fcf_conn_list(struct lpfc_hba *phba, + struct fcf_record *new_fcf_record, + uint32_t *boot_flag, uint32_t *addr_mode, + uint16_t *vlan_id) +{ + struct lpfc_fcf_conn_entry *conn_entry; + int i, j, fcf_vlan_id = 0; + + /* Find the lowest VLAN id in the FCF record */ + for (i = 0; i < 512; i++) { + if (new_fcf_record->vlan_bitmap[i]) { + fcf_vlan_id = i * 8; + j = 0; + while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { + j++; + fcf_vlan_id++; + } + break; + } + } + + /* FCF not valid/available or solicitation in progress */ + if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || + !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || + bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) + return 0; + + if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { + *boot_flag = 0; + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + if (phba->valid_vlan) + *vlan_id = phba->vlan_id; + else + *vlan_id = LPFC_FCOE_NULL_VID; + return 1; + } + + /* + * If there are no FCF connection table entry, driver connect to all + * FCFs. + */ + if (list_empty(&phba->fcf_conn_rec_list)) { + *boot_flag = 0; + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + + /* + * When there are no FCF connect entries, use driver's default + * addressing mode - FPMA. + */ + if (*addr_mode & LPFC_FCF_FPMA) + *addr_mode = LPFC_FCF_FPMA; + + /* If FCF record report a vlan id use that vlan id */ + if (fcf_vlan_id) + *vlan_id = fcf_vlan_id; + else + *vlan_id = LPFC_FCOE_NULL_VID; + return 1; + } + + list_for_each_entry(conn_entry, + &phba->fcf_conn_rec_list, list) { + if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) + continue; + + if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && + !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, + new_fcf_record)) + continue; + if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && + !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, + new_fcf_record)) + continue; + if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { + /* + * If the vlan bit map does not have the bit set for the + * vlan id to be used, then it is not a match. + */ + if (!(new_fcf_record->vlan_bitmap + [conn_entry->conn_rec.vlan_tag / 8] & + (1 << (conn_entry->conn_rec.vlan_tag % 8)))) + continue; + } + + /* + * If connection record does not support any addressing mode, + * skip the FCF record. + */ + if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) + & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) + continue; + + /* + * Check if the connection record specifies a required + * addressing mode. + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { + + /* + * If SPMA required but FCF not support this continue. + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + !(bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record) & LPFC_FCF_SPMA)) + continue; + + /* + * If FPMA required but FCF not support this continue. + */ + if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + !(bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record) & LPFC_FCF_FPMA)) + continue; + } + + /* + * This fcf record matches filtering criteria. + */ + if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) + *boot_flag = 1; + else + *boot_flag = 0; + + /* + * If user did not specify any addressing mode, or if the + * preferred addressing mode specified by user is not supported + * by FCF, allow fabric to pick the addressing mode. + */ + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + /* + * If the user specified a required address mode, assign that + * address mode + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) + *addr_mode = (conn_entry->conn_rec.flags & + FCFCNCT_AM_SPMA) ? + LPFC_FCF_SPMA : LPFC_FCF_FPMA; + /* + * If the user specified a preferred address mode, use the + * addr mode only if FCF support the addr_mode. + */ + else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + (*addr_mode & LPFC_FCF_SPMA)) + *addr_mode = LPFC_FCF_SPMA; + else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && + !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + (*addr_mode & LPFC_FCF_FPMA)) + *addr_mode = LPFC_FCF_FPMA; + + /* If matching connect list has a vlan id, use it */ + if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) + *vlan_id = conn_entry->conn_rec.vlan_tag; + /* + * If no vlan id is specified in connect list, use the vlan id + * in the FCF record + */ + else if (fcf_vlan_id) + *vlan_id = fcf_vlan_id; + else + *vlan_id = LPFC_FCOE_NULL_VID; + + return 1; + } + + return 0; +} + +/** + * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. + * @phba: pointer to lpfc hba data structure. + * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. + * + * This function check if there is any fcoe event pending while driver + * scan FCF entries. If there is any pending event, it will restart the + * FCF saning and return 1 else return 0. + */ +int +lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) +{ + /* + * If the Link is up and no FCoE events while in the + * FCF discovery, no need to restart FCF discovery. + */ + if ((phba->link_state >= LPFC_LINK_UP) && + (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) + return 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2768 Pending link or FCF event during current " + "handling of the previous event: link_state:x%x, " + "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", + phba->link_state, phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_AVAILABLE; + spin_unlock_irq(&phba->hbalock); + + if (phba->link_state >= LPFC_LINK_UP) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2780 Restart FCF table scan due to " + "pending FCF event:evt_tag_at_scan:x%x, " + "evt_tag_current:x%x\n", + phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); + } else { + /* + * Do not continue FCF discovery and clear FCF_TS_INPROG + * flag + */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2833 Stop FCF discovery process due to link " + "state change (x%x)\n", phba->link_state); + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); + spin_unlock_irq(&phba->hbalock); + } + + /* Unregister the currently registered FCF if required */ + if (unreg_fcf) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_REGISTERED; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_unregister_fcf(phba); + } + return 1; +} + +/** + * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record + * @phba: pointer to lpfc hba data structure. + * @fcf_cnt: number of eligible fcf record seen so far. + * + * This function makes an running random selection decision on FCF record to + * use through a sequence of @fcf_cnt eligible FCF records with equal + * probability. To perform integer manunipulation of random numbers with + * size unit32_t, a 16-bit random number returned from get_random_u16() is + * taken as the random random number generated. + * + * Returns true when outcome is for the newly read FCF record should be + * chosen; otherwise, return false when outcome is for keeping the previously + * chosen FCF record. + **/ +static bool +lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) +{ + uint32_t rand_num; + + /* Get 16-bit uniform random number */ + rand_num = get_random_u16(); + + /* Decision with probability 1/fcf_cnt */ + if ((fcf_cnt * rand_num) < 0xFFFF) + return true; + else + return false; +} + +/** + * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * @next_fcf_index: pointer to holder of next fcf index. + * + * This routine parses the non-embedded fcf mailbox command by performing the + * necessarily error checking, non-embedded read FCF record mailbox command + * SGE parsing, and endianness swapping. + * + * Returns the pointer to the new FCF record in the non-embedded mailbox + * command DMA memory if successfully, other NULL. + */ +static struct fcf_record * +lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint16_t *next_fcf_index) +{ + void *virt_addr; + struct lpfc_mbx_sge sge; + struct lpfc_mbx_read_fcf_tbl *read_fcf; + uint32_t shdr_status, shdr_add_status, if_type; + union lpfc_sli4_cfg_shdr *shdr; + struct fcf_record *new_fcf_record; + + /* Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + if (unlikely(!mboxq->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2524 Failed to get the non-embedded SGE " + "virtual address\n"); + return NULL; + } + virt_addr = mboxq->sge_array->addr[0]; + + shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; + lpfc_sli_pcimem_bcopy(shdr, shdr, + sizeof(union lpfc_sli4_cfg_shdr)); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status) { + if (shdr_status == STATUS_FCF_TABLE_EMPTY || + if_type == LPFC_SLI_INTF_IF_TYPE_2) + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2726 READ_FCF_RECORD Indicates empty " + "FCF table.\n"); + else + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2521 READ_FCF_RECORD mailbox failed " + "with status x%x add_status x%x, " + "mbx\n", shdr_status, shdr_add_status); + return NULL; + } + + /* Interpreting the returned information of the FCF record */ + read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; + lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, + sizeof(struct lpfc_mbx_read_fcf_tbl)); + *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); + new_fcf_record = (struct fcf_record *)(virt_addr + + sizeof(struct lpfc_mbx_read_fcf_tbl)); + lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, + offsetof(struct fcf_record, vlan_bitmap)); + new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); + new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); + + return new_fcf_record; +} + +/** + * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the fcf record. + * @vlan_id: the lowest vlan identifier associated to this fcf record. + * @next_fcf_index: the index to the next fcf record in hba's fcf table. + * + * This routine logs the detailed FCF record if the LOG_FIP loggin is + * enabled. + **/ +static void +lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, + struct fcf_record *fcf_record, + uint16_t vlan_id, + uint16_t next_fcf_index) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2764 READ_FCF_RECORD:\n" + "\tFCF_Index : x%x\n" + "\tFCF_Avail : x%x\n" + "\tFCF_Valid : x%x\n" + "\tFCF_SOL : x%x\n" + "\tFIP_Priority : x%x\n" + "\tMAC_Provider : x%x\n" + "\tLowest VLANID : x%x\n" + "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" + "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tNext_FCF_Index: x%x\n", + bf_get(lpfc_fcf_record_fcf_index, fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, fcf_record), + bf_get(lpfc_fcf_record_fcf_sol, fcf_record), + fcf_record->fip_priority, + bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), + vlan_id, + bf_get(lpfc_fcf_record_mac_0, fcf_record), + bf_get(lpfc_fcf_record_mac_1, fcf_record), + bf_get(lpfc_fcf_record_mac_2, fcf_record), + bf_get(lpfc_fcf_record_mac_3, fcf_record), + bf_get(lpfc_fcf_record_mac_4, fcf_record), + bf_get(lpfc_fcf_record_mac_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_0, fcf_record), + bf_get(lpfc_fcf_record_fab_name_1, fcf_record), + bf_get(lpfc_fcf_record_fab_name_2, fcf_record), + bf_get(lpfc_fcf_record_fab_name_3, fcf_record), + bf_get(lpfc_fcf_record_fab_name_4, fcf_record), + bf_get(lpfc_fcf_record_fab_name_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_6, fcf_record), + bf_get(lpfc_fcf_record_fab_name_7, fcf_record), + bf_get(lpfc_fcf_record_switch_name_0, fcf_record), + bf_get(lpfc_fcf_record_switch_name_1, fcf_record), + bf_get(lpfc_fcf_record_switch_name_2, fcf_record), + bf_get(lpfc_fcf_record_switch_name_3, fcf_record), + bf_get(lpfc_fcf_record_switch_name_4, fcf_record), + bf_get(lpfc_fcf_record_switch_name_5, fcf_record), + bf_get(lpfc_fcf_record_switch_name_6, fcf_record), + bf_get(lpfc_fcf_record_switch_name_7, fcf_record), + next_fcf_index); +} + +/** + * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF + * @phba: pointer to lpfc hba data structure. + * @fcf_rec: pointer to an existing FCF record. + * @new_fcf_record: pointer to a new FCF record. + * @new_vlan_id: vlan id from the new FCF record. + * + * This function performs matching test of a new FCF record against an existing + * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id + * will not be used as part of the FCF record matching criteria. + * + * Returns true if all the fields matching, otherwise returns false. + */ +static bool +lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, + struct lpfc_fcf_rec *fcf_rec, + struct fcf_record *new_fcf_record, + uint16_t new_vlan_id) +{ + if (new_vlan_id != LPFC_FCOE_IGNORE_VID) + if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id)) + return false; + if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record)) + return false; + if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record)) + return false; + if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) + return false; + if (fcf_rec->priority != new_fcf_record->fip_priority) + return false; + return true; +} + +/** + * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf + * @vport: Pointer to vport object. + * @fcf_index: index to next fcf. + * + * This function processing the roundrobin fcf failover to next fcf index. + * When this function is invoked, there will be a current fcf registered + * for flogi. + * Return: 0 for continue retrying flogi on currently registered fcf; + * 1 for stop flogi on currently registered fcf; + */ +int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) +{ + struct lpfc_hba *phba = vport->phba; + int rc; + + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & HBA_DEVLOSS_TMO) { + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2872 Devloss tmo with no eligible " + "FCF, unregister in-use FCF (x%x) " + "and rescan FCF table\n", + phba->fcf.current_rec.fcf_indx); + lpfc_unregister_fcf_rescan(phba); + goto stop_flogi_current_fcf; + } + /* Mark the end to FLOGI roundrobin failover */ + phba->hba_flag &= ~FCF_RR_INPROG; + /* Allow action to new fcf asynchronous event */ + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2865 No FCF available, stop roundrobin FCF " + "failover and change port state:x%x/x%x\n", + phba->pport->port_state, LPFC_VPORT_UNKNOWN); + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + + if (!phba->fcf.fcf_redisc_attempted) { + lpfc_unregister_fcf(phba); + + rc = lpfc_sli4_redisc_fcf_table(phba); + if (!rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3195 Rediscover FCF table\n"); + phba->fcf.fcf_redisc_attempted = 1; + lpfc_sli4_clear_fcf_rr_bmask(phba); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "3196 Rediscover FCF table " + "failed. Status:x%x\n", rc); + } + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "3197 Already rediscover FCF table " + "attempted. No more retry\n"); + } + goto stop_flogi_current_fcf; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, + "2794 Try FLOGI roundrobin FCF failover to " + "(x%x)\n", fcf_index); + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); + if (rc) + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, + "2761 FLOGI roundrobin FCF failover " + "failed (rc:x%x) to read FCF (x%x)\n", + rc, phba->fcf.current_rec.fcf_indx); + else + goto stop_flogi_current_fcf; + } + return 0; + +stop_flogi_current_fcf: + lpfc_can_disctmo(vport); + return 1; +} + +/** + * lpfc_sli4_fcf_pri_list_del + * @phba: pointer to lpfc hba data structure. + * @fcf_index: the index of the fcf record to delete + * This routine checks the on list flag of the fcf_index to be deleted. + * If it is one the list then it is removed from the list, and the flag + * is cleared. This routine grab the hbalock before removing the fcf + * record from the list. + **/ +static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, + uint16_t fcf_index) +{ + struct lpfc_fcf_pri *new_fcf_pri; + + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3058 deleting idx x%x pri x%x flg x%x\n", + fcf_index, new_fcf_pri->fcf_rec.priority, + new_fcf_pri->fcf_rec.flag); + spin_lock_irq(&phba->hbalock); + if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { + if (phba->fcf.current_rec.priority == + new_fcf_pri->fcf_rec.priority) + phba->fcf.eligible_fcf_cnt--; + list_del_init(&new_fcf_pri->list); + new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; + } + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_set_fcf_flogi_fail + * @phba: pointer to lpfc hba data structure. + * @fcf_index: the index of the fcf record to update + * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED + * flag so the round robin selection for the particular priority level + * will try a different fcf record that does not have this bit set. + * If the fcf record is re-read for any reason this flag is cleared brfore + * adding it to the priority list. + **/ +void +lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) +{ + struct lpfc_fcf_pri *new_fcf_pri; + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + spin_lock_irq(&phba->hbalock); + new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_fcf_pri_list_add + * @phba: pointer to lpfc hba data structure. + * @fcf_index: the index of the fcf record to add + * @new_fcf_record: pointer to a new FCF record. + * This routine checks the priority of the fcf_index to be added. + * If it is a lower priority than the current head of the fcf_pri list + * then it is added to the list in the right order. + * If it is the same priority as the current head of the list then it + * is added to the head of the list and its bit in the rr_bmask is set. + * If the fcf_index to be added is of a higher priority than the current + * head of the list then the rr_bmask is cleared, its bit is set in the + * rr_bmask and it is added to the head of the list. + * returns: + * 0=success 1=failure + **/ +static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, + uint16_t fcf_index, + struct fcf_record *new_fcf_record) +{ + uint16_t current_fcf_pri; + uint16_t last_index; + struct lpfc_fcf_pri *fcf_pri; + struct lpfc_fcf_pri *next_fcf_pri; + struct lpfc_fcf_pri *new_fcf_pri; + int ret; + + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3059 adding idx x%x pri x%x flg x%x\n", + fcf_index, new_fcf_record->fip_priority, + new_fcf_pri->fcf_rec.flag); + spin_lock_irq(&phba->hbalock); + if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) + list_del_init(&new_fcf_pri->list); + new_fcf_pri->fcf_rec.fcf_index = fcf_index; + new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; + if (list_empty(&phba->fcf.fcf_pri_list)) { + list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); + ret = lpfc_sli4_fcf_rr_index_set(phba, + new_fcf_pri->fcf_rec.fcf_index); + goto out; + } + + last_index = find_first_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX); + if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + ret = 0; /* Empty rr list */ + goto out; + } + current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; + if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { + list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); + if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); + /* fcfs_at_this_priority_level = 1; */ + phba->fcf.eligible_fcf_cnt = 1; + } else + /* fcfs_at_this_priority_level++; */ + phba->fcf.eligible_fcf_cnt++; + ret = lpfc_sli4_fcf_rr_index_set(phba, + new_fcf_pri->fcf_rec.fcf_index); + goto out; + } + + list_for_each_entry_safe(fcf_pri, next_fcf_pri, + &phba->fcf.fcf_pri_list, list) { + if (new_fcf_pri->fcf_rec.priority <= + fcf_pri->fcf_rec.priority) { + if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) + list_add(&new_fcf_pri->list, + &phba->fcf.fcf_pri_list); + else + list_add(&new_fcf_pri->list, + &((struct lpfc_fcf_pri *) + fcf_pri->list.prev)->list); + ret = 0; + goto out; + } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list + || new_fcf_pri->fcf_rec.priority < + next_fcf_pri->fcf_rec.priority) { + list_add(&new_fcf_pri->list, &fcf_pri->list); + ret = 0; + goto out; + } + if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) + continue; + + } + ret = 1; +out: + /* we use = instead of |= to clear the FLOGI_FAILED flag. */ + new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; + spin_unlock_irq(&phba->hbalock); + return ret; +} + +/** + * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This function iterates through all the fcf records available in + * HBA and chooses the optimal FCF record for discovery. After finding + * the FCF for discovery it registers the FCF record and kicks start + * discovery. + * If FCF_IN_USE flag is set in currently used FCF, the routine tries to + * use an FCF record which matches fabric name and mac address of the + * currently used FCF record. + * If the driver supports only one FCF, it will try to use the FCF record + * used by BOOT_BIOS. + */ +void +lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + struct lpfc_fcf_rec *fcf_rec = NULL; + uint16_t vlan_id = LPFC_FCOE_NULL_VID; + bool select_new_fcf; + int rc; + + /* If there is pending FCoE event restart FCF table scan */ + if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2765 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + /* Let next new FCF event trigger fast failover */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Check the FCF record against the connection list */ + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + /* + * If the fcf record does not match with connect list entries + * read the next entry; otherwise, this is an eligible FCF + * record for roundrobin FCF failover. + */ + if (!rc) { + lpfc_sli4_fcf_pri_list_del(phba, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2781 FCF (x%x) failed connection " + "list check: (x%x/x%x/%x)\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_sol, + new_fcf_record)); + if ((phba->fcf.fcf_flag & FCF_IN_USE) && + lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, + new_fcf_record, LPFC_FCOE_IGNORE_VID)) { + if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != + phba->fcf.current_rec.fcf_indx) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2862 FCF (x%x) matches property " + "of in-use FCF (x%x)\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record), + phba->fcf.current_rec.fcf_indx); + goto read_next_fcf; + } + /* + * In case the current in-use FCF record becomes + * invalid/unavailable during FCF discovery that + * was not triggered by fast FCF failover process, + * treat it as fast FCF failover. + */ + if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && + !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2835 Invalid in-use FCF " + "(x%x), enter FCF failover " + "table scan.\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= FCF_REDISC_FOV; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); + return; + } + } + goto read_next_fcf; + } else { + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, + new_fcf_record); + if (rc) + goto read_next_fcf; + } + + /* + * If this is not the first FCF discovery of the HBA, use last + * FCF record for the discovery. The condition that a rescan + * matches the in-use FCF record: fabric name, switch name, mac + * address, and vlan_id. + */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_IN_USE) { + if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && + lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, + new_fcf_record, vlan_id)) { + if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == + phba->fcf.current_rec.fcf_indx) { + phba->fcf.fcf_flag |= FCF_AVAILABLE; + if (phba->fcf.fcf_flag & FCF_REDISC_PEND) + /* Stop FCF redisc wait timer */ + __lpfc_sli4_stop_fcf_redisc_wait_timer( + phba); + else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) + /* Fast failover, mark completed */ + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2836 New FCF matches in-use " + "FCF (x%x), port_state:x%x, " + "fc_flag:x%x\n", + phba->fcf.current_rec.fcf_indx, + phba->pport->port_state, + phba->pport->fc_flag); + goto out; + } else + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2863 New FCF (x%x) matches " + "property of in-use FCF (x%x)\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record), + phba->fcf.current_rec.fcf_indx); + } + /* + * Read next FCF record from HBA searching for the matching + * with in-use record only if not during the fast failover + * period. In case of fast failover period, it shall try to + * determine whether the FCF record just read should be the + * next candidate. + */ + if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { + spin_unlock_irq(&phba->hbalock); + goto read_next_fcf; + } + } + /* + * Update on failover FCF record only if it's in FCF fast-failover + * period; otherwise, update on current FCF record. + */ + if (phba->fcf.fcf_flag & FCF_REDISC_FOV) + fcf_rec = &phba->fcf.failover_rec; + else + fcf_rec = &phba->fcf.current_rec; + + if (phba->fcf.fcf_flag & FCF_AVAILABLE) { + /* + * If the driver FCF record does not have boot flag + * set and new hba fcf record has boot flag set, use + * the new hba fcf record. + */ + if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { + /* Choose this FCF record */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2837 Update current FCF record " + "(x%x) with new FCF record (x%x)\n", + fcf_rec->fcf_indx, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, + addr_mode, vlan_id, BOOT_ENABLE); + spin_unlock_irq(&phba->hbalock); + goto read_next_fcf; + } + /* + * If the driver FCF record has boot flag set and the + * new hba FCF record does not have boot flag, read + * the next FCF record. + */ + if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { + spin_unlock_irq(&phba->hbalock); + goto read_next_fcf; + } + /* + * If the new hba FCF record has lower priority value + * than the driver FCF record, use the new record. + */ + if (new_fcf_record->fip_priority < fcf_rec->priority) { + /* Choose the new FCF record with lower priority */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2838 Update current FCF record " + "(x%x) with new FCF record (x%x)\n", + fcf_rec->fcf_indx, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, + addr_mode, vlan_id, 0); + /* Reset running random FCF selection count */ + phba->fcf.eligible_fcf_cnt = 1; + } else if (new_fcf_record->fip_priority == fcf_rec->priority) { + /* Update running random FCF selection count */ + phba->fcf.eligible_fcf_cnt++; + select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, + phba->fcf.eligible_fcf_cnt); + if (select_new_fcf) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2839 Update current FCF record " + "(x%x) with new FCF record (x%x)\n", + fcf_rec->fcf_indx, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + /* Choose the new FCF by random selection */ + __lpfc_update_fcf_record(phba, fcf_rec, + new_fcf_record, + addr_mode, vlan_id, 0); + } + } + spin_unlock_irq(&phba->hbalock); + goto read_next_fcf; + } + /* + * This is the first suitable FCF record, choose this record for + * initial best-fit FCF. + */ + if (fcf_rec) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2840 Update initial FCF candidate " + "with FCF (x%x)\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, + addr_mode, vlan_id, (boot_flag ? + BOOT_ENABLE : 0)); + phba->fcf.fcf_flag |= FCF_AVAILABLE; + /* Setup initial running random FCF selection count */ + phba->fcf.eligible_fcf_cnt = 1; + } + spin_unlock_irq(&phba->hbalock); + goto read_next_fcf; + +read_next_fcf: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { + if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { + /* + * Case of FCF fast failover scan + */ + + /* + * It has not found any suitable FCF record, cancel + * FCF scan inprogress, and do nothing + */ + if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2782 No suitable FCF found: " + "(x%x/x%x)\n", + phba->fcoe_eventtag_at_fcf_scan, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & HBA_DEVLOSS_TMO) { + phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + /* Unregister in-use FCF and rescan */ + lpfc_printf_log(phba, KERN_INFO, + LOG_FIP, + "2864 On devloss tmo " + "unreg in-use FCF and " + "rescan FCF table\n"); + lpfc_unregister_fcf_rescan(phba); + return; + } + /* + * Let next new FCF event trigger fast failover + */ + phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + return; + } + /* + * It has found a suitable FCF record that is not + * the same as in-use FCF record, unregister the + * in-use FCF record, replace the in-use FCF record + * with the new FCF record, mark FCF fast failover + * completed, and then start register the new FCF + * record. + */ + + /* Unregister the current in-use FCF record */ + lpfc_unregister_fcf(phba); + + /* Replace in-use record with the new record */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2842 Replace in-use FCF (x%x) " + "with failover FCF (x%x)\n", + phba->fcf.current_rec.fcf_indx, + phba->fcf.failover_rec.fcf_indx); + memcpy(&phba->fcf.current_rec, + &phba->fcf.failover_rec, + sizeof(struct lpfc_fcf_rec)); + /* + * Mark the fast FCF failover rediscovery completed + * and the start of the first round of the roundrobin + * FCF failover. + */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + spin_unlock_irq(&phba->hbalock); + /* Register to the new FCF record */ + lpfc_register_fcf(phba); + } else { + /* + * In case of transaction period to fast FCF failover, + * do nothing when search to the end of the FCF table. + */ + if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || + (phba->fcf.fcf_flag & FCF_REDISC_PEND)) + return; + + if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && + phba->fcf.fcf_flag & FCF_IN_USE) { + /* + * In case the current in-use FCF record no + * longer existed during FCF discovery that + * was not triggered by fast FCF failover + * process, treat it as fast FCF failover. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2841 In-use FCF record (x%x) " + "not reported, entering fast " + "FCF failover mode scanning.\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= FCF_REDISC_FOV; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); + return; + } + /* Register to the new FCF record */ + lpfc_register_fcf(phba); + } + } else + lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); + return; + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + lpfc_register_fcf(phba); + + return; +} + +/** + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function for FLOGI failure roundrobin FCF failover + * read FCF record mailbox command from the eligible FCF record bmask for + * performing the failover. If the FCF read back is not valid/available, it + * fails through to retrying FLOGI to the currently registered FCF again. + * Otherwise, if the FCF read back is valid and available, it will set the + * newly read FCF record to the failover FCF record, unregister currently + * registered FCF record, copy the failover FCF record to the current + * FCF record, and then register the current FCF record before proceeding + * to trying FLOGI on the new failover FCF. + */ +void +lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t next_fcf_index, fcf_index; + uint16_t current_fcf_index; + uint16_t vlan_id = LPFC_FCOE_NULL_VID; + int rc; + + /* If link state is not up, stop the roundrobin failover process */ + if (phba->link_state < LPFC_LINK_UP) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~FCF_RR_INPROG; + spin_unlock_irq(&phba->hbalock); + goto out; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2766 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record. " + "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, + phba->fcf.fcf_flag); + lpfc_unregister_fcf_rescan(phba); + goto out; + } + + /* Get the needed parameters from FCF record */ + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + if (!rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2848 Remove ineligible FCF (x%x) from " + "from roundrobin bmask\n", fcf_index); + /* Clear roundrobin bmask bit for ineligible FCF */ + lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); + /* Perform next round of roundrobin FCF failover */ + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); + rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); + if (rc) + goto out; + goto error_out; + } + + if (fcf_index == phba->fcf.current_rec.fcf_indx) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2760 Perform FLOGI roundrobin FCF failover: " + "FCF (x%x) back to FCF (x%x)\n", + phba->fcf.current_rec.fcf_indx, fcf_index); + /* Wait 500 ms before retrying FLOGI to current FCF */ + msleep(500); + lpfc_issue_init_vfi(phba->pport); + goto out; + } + + /* Upload new FCF record to the failover FCF record */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2834 Update current FCF (x%x) with new FCF (x%x)\n", + phba->fcf.failover_rec.fcf_indx, fcf_index); + spin_lock_irq(&phba->hbalock); + __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, + new_fcf_record, addr_mode, vlan_id, + (boot_flag ? BOOT_ENABLE : 0)); + spin_unlock_irq(&phba->hbalock); + + current_fcf_index = phba->fcf.current_rec.fcf_indx; + + /* Unregister the current in-use FCF record */ + lpfc_unregister_fcf(phba); + + /* Replace in-use record with the new record */ + memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, + sizeof(struct lpfc_fcf_rec)); + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2783 Perform FLOGI roundrobin FCF failover: FCF " + "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); + +error_out: + lpfc_register_fcf(phba); +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** + * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function of read FCF record mailbox command for + * updating the eligible FCF bmask for FLOGI failure roundrobin FCF + * failover when a new FCF event happened. If the FCF read back is + * valid/available and it passes the connection list check, it updates + * the bmask for the eligible FCF record for roundrobin failover. + */ +void +lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + uint16_t vlan_id = LPFC_FCOE_NULL_VID; + int rc; + + /* If link state is not up, no need to proceed */ + if (phba->link_state < LPFC_LINK_UP) + goto out; + + /* If FCF discovery period is over, no need to proceed */ + if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) + goto out; + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2767 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + goto out; + } + + /* Check the connection list for eligibility */ + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + if (!rc) + goto out; + + /* Update the eligible FCF record index bmask */ + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + + rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** + * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox data structure. + * + * This function handles completion of init vfi mailbox command. + */ +static void +lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + + /* + * VFI not supported on interface type 0, just do the flogi + * Also continue if the VFI is in use - just use the same one. + */ + if (mboxq->u.mb.mbxStatus && + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_0) && + mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2891 Init VFI mailbox failed 0x%x\n", + mboxq->u.mb.mbxStatus); + mempool_free(mboxq, phba->mbox_mem_pool); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + + lpfc_initial_flogi(vport); + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_issue_init_vfi - Issue init_vfi mailbox command. + * @vport: pointer to lpfc_vport data structure. + * + * This function issue a init_vfi mailbox command to initialize the VFI and + * VPI for the physical port. + */ +void +lpfc_issue_init_vfi(struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *mboxq; + int rc; + struct lpfc_hba *phba = vport->phba; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "2892 Failed to allocate " + "init_vfi mailbox\n"); + return; + } + lpfc_init_vfi(mboxq, vport); + mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2893 Failed to issue init_vfi mailbox\n"); + mempool_free(mboxq, vport->phba->mbox_mem_pool); + } +} + +/** + * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox data structure. + * + * This function handles completion of init vpi mailbox command. + */ +void +lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2609 Init VPI mailbox failed 0x%x\n", + mboxq->u.mb.mbxStatus); + mempool_free(mboxq, phba->mbox_mem_pool); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); + + /* If this port is physical port or FDISC is done, do reg_vpi */ + if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "2731 Cannot find fabric " + "controller node\n"); + else + lpfc_register_new_vport(phba, vport, ndlp); + mempool_free(mboxq, phba->mbox_mem_pool); + return; + } + + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) + lpfc_initial_fdisc(vport); + else { + lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2606 No NPIV Fabric support\n"); + } + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_issue_init_vpi - Issue init_vpi mailbox command. + * @vport: pointer to lpfc_vport data structure. + * + * This function issue a init_vpi mailbox command to initialize + * VPI for the vport. + */ +void +lpfc_issue_init_vpi(struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *mboxq; + int rc, vpi; + + if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { + vpi = lpfc_alloc_vpi(vport->phba); + if (!vpi) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "3303 Failed to obtain vport vpi\n"); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->vpi = vpi; + } + + mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "2607 Failed to allocate " + "init_vpi mailbox\n"); + return; + } + lpfc_init_vpi(vport->phba, mboxq, vport->vpi); + mboxq->vport = vport; + mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; + rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2608 Failed to issue init_vpi mailbox\n"); + mempool_free(mboxq, vport->phba->mbox_mem_pool); + } +} + +/** + * lpfc_start_fdiscs - send fdiscs for each vports on this port. + * @phba: pointer to lpfc hba data structure. + * + * This function loops through the list of vports on the @phba and issues an + * FDISC if possible. + */ +void +lpfc_start_fdiscs(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; + /* There are no vpi for this vport */ + if (vports[i]->vpi > phba->max_vpi) { + lpfc_vport_set_state(vports[i], + FC_VPORT_FAILED); + continue; + } + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + lpfc_vport_set_state(vports[i], + FC_VPORT_LINKDOWN); + continue; + } + if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { + lpfc_issue_init_vpi(vports[i]); + continue; + } + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) + lpfc_initial_fdisc(vports[i]); + else { + lpfc_vport_set_state(vports[i], + FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vports[i], KERN_ERR, + LOG_TRACE_EVENT, + "0259 No NPIV " + "Fabric support\n"); + } + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +void +lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* + * VFI not supported for interface type 0, so ignore any mailbox + * error (except VFI in use) and continue with the discovery. + */ + if (mboxq->u.mb.mbxStatus && + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_0) && + mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2018 REG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + /* FLOGI failed, use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + /* Start discovery */ + lpfc_disc_start(vport); + goto out_free_mem; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + goto out_free_mem; + } + + /* If the VFI is already registered, there is nothing else to do + * Unless this was a VFI update and we are in PT2PT mode, then + * we should drop through to set the port state to ready. + */ + if (vport->fc_flag & FC_VFI_REGISTERED) + if (!(phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_PT2PT)) + goto out_free_mem; + + /* The VPI is implicitly registered when the VFI is registered */ + spin_lock_irq(shost->host_lock); + vport->vpi_state |= LPFC_VPI_REGISTERED; + vport->fc_flag |= FC_VFI_REGISTERED; + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); + + /* In case SLI4 FC loopback test, we are ready */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + (phba->link_flag & LS_LOOPBACK_MODE)) { + phba->link_state = LPFC_HBA_READY; + goto out_free_mem; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " + "alpacnt:%d LinkState:%x topology:%x\n", + vport->port_state, vport->fc_flag, vport->fc_myDID, + vport->phba->alpa_map[0], + phba->link_state, phba->fc_topology); + + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { + /* + * For private loop or for NPort pt2pt, + * just start discovery and we are done. + */ + if ((vport->fc_flag & FC_PT2PT) || + ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && + !(vport->fc_flag & FC_PUBLIC_LOOP))) { + + /* Use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + /* Start discovery */ + if (vport->fc_flag & FC_PT2PT) + vport->port_state = LPFC_VPORT_READY; + else + lpfc_disc_start(vport); + } else { + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } + } + +out_free_mem: + lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); +} + +static void +lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct serv_parm *sp = &vport->fc_sparam; + uint32_t ed_tov; + + /* Check for error */ + if (mb->mbxStatus) { + /* READ_SPARAM mbox error state */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0319 READ_SPARAM mbxStatus error x%x " + "hba state x%x>\n", + mb->mbxStatus, vport->port_state); + lpfc_linkdown(phba); + goto out; + } + + memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, + sizeof (struct serv_parm)); + + ed_tov = be32_to_cpu(sp->cmn.e_d_tov); + if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ + ed_tov = (ed_tov + 999999) / 1000000; + + phba->fc_edtov = ed_tov; + phba->fc_ratov = (2 * ed_tov) / 1000; + if (phba->fc_ratov < FF_DEF_RATOV) { + /* RA_TOV should be atleast 10sec for initial flogi */ + phba->fc_ratov = FF_DEF_RATOV; + } + + lpfc_update_vport_wwn(vport); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + if (vport->port_type == LPFC_PHYSICAL_PORT) { + memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); + memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); + } + + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + + /* Check if sending the FLOGI is being deferred to after we get + * up to date CSPs from MBX_READ_SPARAM. + */ + if (phba->hba_flag & HBA_DEFER_FLOGI) { + lpfc_initial_flogi(vport); + phba->hba_flag &= ~HBA_DEFER_FLOGI; + } + return; + +out: + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + lpfc_issue_clear_la(phba, vport); +} + +static void +lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) +{ + struct lpfc_vport *vport = phba->pport; + LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; + struct Scsi_Host *shost; + int i; + int rc; + struct fcf_record *fcf_record; + uint32_t fc_flags = 0; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { + case LPFC_LINK_SPEED_1GHZ: + case LPFC_LINK_SPEED_2GHZ: + case LPFC_LINK_SPEED_4GHZ: + case LPFC_LINK_SPEED_8GHZ: + case LPFC_LINK_SPEED_10GHZ: + case LPFC_LINK_SPEED_16GHZ: + case LPFC_LINK_SPEED_32GHZ: + case LPFC_LINK_SPEED_64GHZ: + case LPFC_LINK_SPEED_128GHZ: + case LPFC_LINK_SPEED_256GHZ: + break; + default: + phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; + break; + } + } + + if (phba->fc_topology && + phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3314 Toplogy changed was 0x%x is 0x%x\n", + phba->fc_topology, + bf_get(lpfc_mbx_read_top_topology, la)); + phba->fc_topology_changed = 1; + } + + phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); + phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA); + + shost = lpfc_shost_from_vport(vport); + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; + + /* if npiv is enabled and this adapter supports npiv log + * a message that npiv is not supported in this topology + */ + if (phba->cfg_enable_npiv && phba->max_vpi) + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1309 Link Up Event npiv not supported in loop " + "topology\n"); + /* Get Loop Map information */ + if (bf_get(lpfc_mbx_read_top_il, la)) + fc_flags |= FC_LBIT; + + vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); + i = la->lilpBde64.tus.f.bdeSize; + + if (i == 0) { + phba->alpa_map[0] = 0; + } else { + if (vport->cfg_log_verbose & LOG_LINK_EVENT) { + int numalpa, j, k; + union { + uint8_t pamap[16]; + struct { + uint32_t wd1; + uint32_t wd2; + uint32_t wd3; + uint32_t wd4; + } pa; + } un; + numalpa = phba->alpa_map[0]; + j = 0; + while (j < numalpa) { + memset(un.pamap, 0, 16); + for (k = 1; j < numalpa; k++) { + un.pamap[k - 1] = + phba->alpa_map[j + 1]; + j++; + if (k == 16) + break; + } + /* Link Up Event ALPA map */ + lpfc_printf_log(phba, + KERN_WARNING, + LOG_LINK_EVENT, + "1304 Link Up Event " + "ALPA map Data: x%x " + "x%x x%x x%x\n", + un.pa.wd1, un.pa.wd2, + un.pa.wd3, un.pa.wd4); + } + } + } + } else { + if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { + if (phba->max_vpi && phba->cfg_enable_npiv && + (phba->sli_rev >= LPFC_SLI_REV3)) + phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; + } + vport->fc_myDID = phba->fc_pref_DID; + fc_flags |= FC_LBIT; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + + if (fc_flags) { + spin_lock_irqsave(shost->host_lock, iflags); + vport->fc_flag |= fc_flags; + spin_unlock_irqrestore(shost->host_lock, iflags); + } + + lpfc_linkup(phba); + sparam_mbox = NULL; + + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!sparam_mbox) + goto out; + + rc = lpfc_read_sparam(phba, sparam_mbox, 0); + if (rc) { + mempool_free(sparam_mbox, phba->mbox_mem_pool); + goto out; + } + sparam_mbox->vport = vport; + sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; + rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED); + goto out; + } + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!cfglink_mbox) + goto out; + vport->port_state = LPFC_LOCAL_CFG_LINK; + lpfc_config_link(phba, cfglink_mbox); + cfglink_mbox->vport = vport; + cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; + rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(cfglink_mbox, phba->mbox_mem_pool); + goto out; + } + } else { + vport->port_state = LPFC_VPORT_UNKNOWN; + /* + * Add the driver's default FCF record at FCF index 0 now. This + * is phase 1 implementation that support FCF index 0 and driver + * defaults. + */ + if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { + fcf_record = kzalloc(sizeof(struct fcf_record), + GFP_KERNEL); + if (unlikely(!fcf_record)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2554 Could not allocate memory for " + "fcf record\n"); + rc = -ENODEV; + goto out; + } + + lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, + LPFC_FCOE_FCF_DEF_INDEX); + rc = lpfc_sli4_add_fcf_record(phba, fcf_record); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2013 Could not manually add FCF " + "record 0, status %d\n", rc); + rc = -ENODEV; + kfree(fcf_record); + goto out; + } + kfree(fcf_record); + } + /* + * The driver is expected to do FIP/FCF. Call the port + * and get the FCF Table. + */ + spin_lock_irqsave(&phba->hbalock, iflags); + if (phba->hba_flag & FCF_TS_INPROG) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; + } + /* This is the initial FCF discovery scan */ + phba->fcf.fcf_flag |= FCF_INIT_DISC; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2778 Start FCF table scan at linkup\n"); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) { + spin_lock_irqsave(&phba->hbalock, iflags); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out; + } + /* Reset FCF roundrobin bmask for new discovery */ + lpfc_sli4_clear_fcf_rr_bmask(phba); + } + + /* Prepare for LINK up registrations */ + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); + return; +out: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n", + vport->port_state, sparam_mbox, cfglink_mbox); + lpfc_issue_clear_la(phba, vport); + return; +} + +static void +lpfc_enable_la(struct lpfc_hba *phba) +{ + uint32_t control; + struct lpfc_sli *psli = &phba->sli; + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_PROCESS_LA; + if (phba->sli_rev <= LPFC_SLI_REV3) { + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + spin_unlock_irq(&phba->hbalock); +} + +static void +lpfc_mbx_issue_link_down(struct lpfc_hba *phba) +{ + lpfc_linkdown(phba); + lpfc_enable_la(phba); + lpfc_unregister_unused_fcf(phba); + /* turn on Link Attention interrupts - no CLEAR_LA needed */ +} + + +/* + * This routine handles processing a READ_TOPOLOGY mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is + * handed off to the SLI layer. SLI4 only. + */ +void +lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_mbx_read_top *la; + struct lpfc_sli_ring *pring; + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); + uint8_t attn_type; + unsigned long iflags; + + /* Unblock ELS traffic */ + pring = lpfc_phba_elsring(phba); + if (pring) + pring->flag &= ~LPFC_STOP_IOCB_EVENT; + + /* Check for error */ + if (mb->mbxStatus) { + lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, + "1307 READ_LA mbox error x%x state x%x\n", + mb->mbxStatus, vport->port_state); + lpfc_mbx_issue_link_down(phba); + phba->link_state = LPFC_HBA_ERROR; + goto lpfc_mbx_cmpl_read_topology_free_mbuf; + } + + la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; + attn_type = bf_get(lpfc_mbx_read_top_att_type, la); + + memcpy(&phba->alpa_map[0], mp->virt, 128); + + spin_lock_irqsave(shost->host_lock, iflags); + if (bf_get(lpfc_mbx_read_top_pb, la)) + vport->fc_flag |= FC_BYPASSED_MODE; + else + vport->fc_flag &= ~FC_BYPASSED_MODE; + spin_unlock_irqrestore(shost->host_lock, iflags); + + if (phba->fc_eventTag <= la->eventTag) { + phba->fc_stat.LinkMultiEvent++; + if (attn_type == LPFC_ATT_LINK_UP) + if (phba->fc_eventTag != 0) + lpfc_linkdown(phba); + } + + phba->fc_eventTag = la->eventTag; + phba->link_events++; + if (attn_type == LPFC_ATT_LINK_UP) { + phba->fc_stat.LinkUp++; + if (phba->link_flag & LS_LOOPBACK_MODE) { + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1306 Link Up Event in loop back mode " + "x%x received Data: x%x x%x x%x x%x\n", + la->eventTag, phba->fc_eventTag, + bf_get(lpfc_mbx_read_top_alpa_granted, + la), + bf_get(lpfc_mbx_read_top_link_spd, la), + phba->alpa_map[0]); + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1303 Link Up Event x%x received " + "Data: x%x x%x x%x x%x x%x\n", + la->eventTag, phba->fc_eventTag, + bf_get(lpfc_mbx_read_top_alpa_granted, + la), + bf_get(lpfc_mbx_read_top_link_spd, la), + phba->alpa_map[0], + bf_get(lpfc_mbx_read_top_fa, la)); + } + lpfc_mbx_process_link_up(phba, la); + + if (phba->cmf_active_mode != LPFC_CFG_OFF) + lpfc_cmf_signal_init(phba); + + if (phba->lmt & LMT_64Gb) + lpfc_read_lds_params(phba); + + } else if (attn_type == LPFC_ATT_LINK_DOWN || + attn_type == LPFC_ATT_UNEXP_WWPN) { + phba->fc_stat.LinkDown++; + if (phba->link_flag & LS_LOOPBACK_MODE) + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1308 Link Down Event in loop back mode " + "x%x received " + "Data: x%x x%x x%x\n", + la->eventTag, phba->fc_eventTag, + phba->pport->port_state, vport->fc_flag); + else if (attn_type == LPFC_ATT_UNEXP_WWPN) + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1313 Link Down Unexpected FA WWPN Event x%x " + "received Data: x%x x%x x%x x%x\n", + la->eventTag, phba->fc_eventTag, + phba->pport->port_state, vport->fc_flag, + bf_get(lpfc_mbx_read_top_fa, la)); + else + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1305 Link Down Event x%x received " + "Data: x%x x%x x%x x%x\n", + la->eventTag, phba->fc_eventTag, + phba->pport->port_state, vport->fc_flag, + bf_get(lpfc_mbx_read_top_fa, la)); + lpfc_mbx_issue_link_down(phba); + } + + if ((phba->sli_rev < LPFC_SLI_REV4) && + bf_get(lpfc_mbx_read_top_fa, la)) + lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, + "1311 fa %d\n", + bf_get(lpfc_mbx_read_top_fa, la)); + +lpfc_mbx_cmpl_read_topology_free_mbuf: + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); +} + +/* + * This routine handles processing a REG_LOGIN mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is + * handed off to the SLI layer. + */ +void +lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + + /* The driver calls the state machine with the pmb pointer + * but wants to make sure a stale ctx_buf isn't acted on. + * The ctx_buf is restored later and cleaned up. + */ + pmb->ctx_buf = NULL; + pmb->ctx_ndlp = NULL; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, + "0002 rpi:%x DID:%x flg:%x %d x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref), + ndlp); + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + + if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { + /* We rcvd a rscn after issuing this + * mbox reg login, we may have cycled + * back through the state and be + * back at reg login state so this + * mbox needs to be ignored becase + * there is another reg login in + * process. + */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; + spin_unlock_irq(&ndlp->lock); + + /* + * We cannot leave the RPI registered because + * if we go thru discovery again for this ndlp + * a subsequent REG_RPI will fail. + */ + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + lpfc_unreg_rpi(vport, ndlp); + } + + /* Call state machine */ + lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); + pmb->ctx_buf = mp; + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + + /* decrement the node reference count held for this callback + * function. + */ + lpfc_nlp_put(ndlp); + + return; +} + +static void +lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + switch (mb->mbxStatus) { + case 0x0011: + case 0x0020: + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0911 cmpl_unreg_vpi, mb status = 0x%x\n", + mb->mbxStatus); + break; + /* If VPI is busy, reset the HBA */ + case 0x9700: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", + vport->vpi, mb->mbxStatus); + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_workq_post_event(phba, NULL, NULL, + LPFC_EVT_RESET_HBA); + } + spin_lock_irq(shost->host_lock); + vport->vpi_state &= ~LPFC_VPI_REGISTERED; + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + mempool_free(pmb, phba->mbox_mem_pool); + lpfc_cleanup_vports_rrqs(vport, NULL); + /* + * This shost reference might have been taken at the beginning of + * lpfc_vport_delete() + */ + if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) + scsi_host_put(shost); +} + +int +lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return 1; + + lpfc_unreg_vpi(phba, vport->vpi, mbox); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1800 Could not issue unreg_vpi\n"); + mempool_free(mbox, phba->mbox_mem_pool); + return rc; + } + return 0; +} + +static void +lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + MAILBOX_t *mb = &pmb->u.mb; + + switch (mb->mbxStatus) { + case 0x0011: + case 0x9601: + case 0x9602: + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0912 cmpl_reg_vpi, mb status = 0x%x\n", + mb->mbxStatus); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); + vport->fc_myDID = 0; + + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(vport); + } + goto out; + } + + spin_lock_irq(shost->host_lock); + vport->vpi_state |= LPFC_VPI_REGISTERED; + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + vport->num_disc_nodes = 0; + /* go thru NPR list and issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + lpfc_els_disc_plogi(vport); + + if (!vport->num_disc_nodes) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + } + vport->port_state = LPFC_VPORT_READY; + +out: + mempool_free(pmb, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_create_static_vport - Read HBA config region to create static vports. + * @phba: pointer to lpfc hba data structure. + * + * This routine issue a DUMP mailbox command for config region 22 to get + * the list of static vports to be created. The function create vports + * based on the information returned from the HBA. + **/ +void +lpfc_create_static_vport(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmb = NULL; + MAILBOX_t *mb; + struct static_vport_info *vport_info; + int mbx_wait_rc = 0, i; + struct fc_vport_identifiers vport_id; + struct fc_vport *new_fc_vport; + struct Scsi_Host *shost; + struct lpfc_vport *vport; + uint16_t offset = 0; + uint8_t *vport_buff; + struct lpfc_dmabuf *mp; + uint32_t byte_count = 0; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0542 lpfc_create_static_vport failed to" + " allocate mailbox memory\n"); + return; + } + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb = &pmb->u.mb; + + vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); + if (!vport_info) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0543 lpfc_create_static_vport failed to" + " allocate vport_info\n"); + mempool_free(pmb, phba->mbox_mem_pool); + return; + } + + vport_buff = (uint8_t *) vport_info; + do { + /* While loop iteration forces a free dma buffer from + * the previous loop because the mbox is reused and + * the dump routine is a single-use construct. + */ + if (pmb->ctx_buf) { + mp = (struct lpfc_dmabuf *)pmb->ctx_buf; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + pmb->ctx_buf = NULL; + } + if (lpfc_dump_static_vport(phba, pmb, offset)) + goto out; + + pmb->vport = phba->pport; + mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, + LPFC_MBOX_TMO); + + if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0544 lpfc_create_static_vport failed to" + " issue dump mailbox command ret 0x%x " + "status 0x%x\n", + mbx_wait_rc, mb->mbxStatus); + goto out; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + byte_count = pmb->u.mqe.un.mb_words[5]; + mp = (struct lpfc_dmabuf *)pmb->ctx_buf; + if (byte_count > sizeof(struct static_vport_info) - + offset) + byte_count = sizeof(struct static_vport_info) + - offset; + memcpy(vport_buff + offset, mp->virt, byte_count); + offset += byte_count; + } else { + if (mb->un.varDmp.word_cnt > + sizeof(struct static_vport_info) - offset) + mb->un.varDmp.word_cnt = + sizeof(struct static_vport_info) + - offset; + byte_count = mb->un.varDmp.word_cnt; + lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, + vport_buff + offset, + byte_count); + + offset += byte_count; + } + + } while (byte_count && + offset < sizeof(struct static_vport_info)); + + + if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || + ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) + != VPORT_INFO_REV)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0545 lpfc_create_static_vport bad" + " information header 0x%x 0x%x\n", + le32_to_cpu(vport_info->signature), + le32_to_cpu(vport_info->rev) & + VPORT_INFO_REV_MASK); + + goto out; + } + + shost = lpfc_shost_from_vport(phba->pport); + + for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { + memset(&vport_id, 0, sizeof(vport_id)); + vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); + vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); + if (!vport_id.port_name || !vport_id.node_name) + continue; + + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; + vport_id.vport_type = FC_PORTTYPE_NPIV; + vport_id.disable = false; + new_fc_vport = fc_vport_create(shost, 0, &vport_id); + + if (!new_fc_vport) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0546 lpfc_create_static_vport failed to" + " create vport\n"); + continue; + } + + vport = *(struct lpfc_vport **)new_fc_vport->dd_data; + vport->vport_flag |= STATIC_VPORT; + } + +out: + kfree(vport_info); + if (mbx_wait_rc != MBX_TIMEOUT) + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); +} + +/* + * This routine handles processing a Fabric REG_LOGIN mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is + * handed off to the SLI layer. + */ +void +lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + struct Scsi_Host *shost; + + pmb->ctx_ndlp = NULL; + + if (mb->mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0258 Register Fabric login error: 0x%x\n", + mb->mbxStatus); + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + /* FLOGI failed, use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + + /* Start discovery */ + lpfc_disc_start(vport); + /* Decrement the reference count to ndlp after the + * reference to the ndlp are done. + */ + lpfc_nlp_put(ndlp); + return; + } + + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + /* Decrement the reference count to ndlp after the reference + * to the ndlp are done. + */ + lpfc_nlp_put(ndlp); + return; + } + + if (phba->sli_rev < LPFC_SLI_REV4) + ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { + /* when physical port receive logo donot start + * vport discovery */ + if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) + lpfc_start_fdiscs(phba); + else { + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; + spin_unlock_irq(shost->host_lock); + } + lpfc_do_scr_ns_plogi(phba, vport); + } + + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + + /* Drop the reference count from the mbox at the end after + * all the current reference to the ndlp have been done. + */ + lpfc_nlp_put(ndlp); + return; +} + + /* + * This routine will issue a GID_FT for each FC4 Type supported + * by the driver. ALL GID_FTs must complete before discovery is started. + */ +int +lpfc_issue_gidft(struct lpfc_vport *vport) +{ + /* Good status, issue CT Request to NameServer */ + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { + /* Cannot issue NameServer FCP Query, so finish up + * discovery + */ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0604 %s FC TYPE %x %s\n", + "Failed to issue GID_FT to ", + FC_TYPE_FCP, + "Finishing discovery."); + return 0; + } + vport->gidft_inp++; + } + + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { + /* Cannot issue NameServer NVME Query, so finish up + * discovery + */ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0605 %s FC_TYPE %x %s %d\n", + "Failed to issue GID_FT to ", + FC_TYPE_NVME, + "Finishing discovery: gidftinp ", + vport->gidft_inp); + if (vport->gidft_inp == 0) + return 0; + } else + vport->gidft_inp++; + } + return vport->gidft_inp; +} + +/** + * lpfc_issue_gidpt - issue a GID_PT for all N_Ports + * @vport: The virtual port for which this call is being executed. + * + * This routine will issue a GID_PT to get a list of all N_Ports + * + * Return value : + * 0 - Failure to issue a GID_PT + * 1 - GID_PT issued + **/ +int +lpfc_issue_gidpt(struct lpfc_vport *vport) +{ + /* Good status, issue CT Request to NameServer */ + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) { + /* Cannot issue NameServer FCP Query, so finish up + * discovery + */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0606 %s Port TYPE %x %s\n", + "Failed to issue GID_PT to ", + GID_PT_N_PORT, + "Finishing discovery."); + return 0; + } + vport->gidft_inp++; + return 1; +} + +/* + * This routine handles processing a NameServer REG_LOGIN mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is + * handed off to the SLI layer. + */ +void +lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + struct lpfc_vport *vport = pmb->vport; + int rc; + + pmb->ctx_ndlp = NULL; + vport->gidft_inp = 0; + + if (mb->mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0260 Register NameServer error: 0x%x\n", + mb->mbxStatus); + +out: + /* decrement the node reference count held for this + * callback function. + */ + lpfc_nlp_put(ndlp); + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + + /* If the node is not registered with the scsi or nvme + * transport, remove the fabric node. The failed reg_login + * is terminal and forces the removal of the last node + * reference. + */ + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); + } + + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + /* + * RegLogin failed, use loop map to make discovery + * list + */ + lpfc_disc_list_loopmap(vport); + + /* Start discovery */ + lpfc_disc_start(vport); + return; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + + if (phba->sli_rev < LPFC_SLI_REV4) + ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, + "0003 rpi:%x DID:%x flg:%x %d x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref), + ndlp); + + if (vport->port_state < LPFC_VPORT_READY) { + /* Link up discovery requires Fabric registration. */ + lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); + lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); + lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); + lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); + + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); + + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, + FC_TYPE_NVME); + + /* Issue SCR just before NameServer GID_FT Query */ + lpfc_issue_els_scr(vport, 0); + + /* Link was bounced or a Fabric LOGO occurred. Start EDC + * with initial FW values provided the congestion mode is + * not off. Note that signals may or may not be supported + * by the adapter but FPIN is provided by default for 1 + * or both missing signals support. + */ + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = phba->cgn_init_reg_signal; + rc = lpfc_issue_els_edc(vport, 0); + lpfc_printf_log(phba, KERN_INFO, + LOG_INIT | LOG_ELS | LOG_DISCOVERY, + "4220 Issue EDC status x%x Data x%x\n", + rc, phba->cgn_init_reg_signal); + } else if (phba->lmt & LMT_64Gb) { + /* may send link fault capability descriptor */ + lpfc_issue_els_edc(vport, 0); + } else { + lpfc_issue_els_rdf(vport, 0); + } + } + + vport->fc_ns_retry = 0; + if (lpfc_issue_gidft(vport) == 0) + goto out; + + /* + * At this point in time we may need to wait for multiple + * SLI_CTNS_GID_FT CT commands to complete before we start discovery. + * + * decrement the node reference count held for this + * callback function. + */ + lpfc_nlp_put(ndlp); + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + return; +} + +/* + * This routine handles processing a Fabric Controller REG_LOGIN mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is handed off to the SLI layer. + */ +void +lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + + pmb->ctx_ndlp = NULL; + if (mb->mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0933 %s: Register FC login error: 0x%x\n", + __func__, mb->mbxStatus); + goto out; + } + + lpfc_check_nlp_post_devloss(vport, ndlp); + + if (phba->sli_rev < LPFC_SLI_REV4) + ndlp->nlp_rpi = mb->un.varWords[0]; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n", + __func__, ndlp->nlp_DID, ndlp->nlp_rpi, + ndlp->nlp_state); + + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + + out: + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + + /* Drop the reference count from the mbox at the end after + * all the current reference to the ndlp have been done. + */ + lpfc_nlp_put(ndlp); +} + +static void +lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct fc_rport *rport; + struct lpfc_rport_data *rdata; + struct fc_rport_identifiers rport_ids; + struct lpfc_hba *phba = vport->phba; + unsigned long flags; + + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + return; + + /* Remote port has reappeared. Re-register w/ FC transport */ + rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); + rport_ids.port_id = ndlp->nlp_DID; + rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; + + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport add: did:x%x flg:x%x type x%x", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + + /* Don't add the remote port if unloading. */ + if (vport->load_flag & FC_UNLOADING) + return; + + ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); + if (!rport) { + dev_printk(KERN_WARNING, &phba->pcidev->dev, + "Warning: fc_remote_port_add failed\n"); + return; + } + + /* Successful port add. Complete initializing node data */ + rport->maxframe_size = ndlp->nlp_maxframe; + rport->supported_classes = ndlp->nlp_class_sup; + rdata = rport->dd_data; + rdata->pnode = lpfc_nlp_get(ndlp); + if (!rdata->pnode) { + dev_warn(&phba->pcidev->dev, + "Warning - node ref failed. Unreg rport\n"); + fc_remote_port_delete(rport); + ndlp->rport = NULL; + return; + } + + spin_lock_irqsave(&ndlp->lock, flags); + ndlp->fc4_xpt_flags |= SCSI_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, flags); + + if (ndlp->nlp_type & NLP_FCP_TARGET) + rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; + if (ndlp->nlp_type & NLP_FCP_INITIATOR) + rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; + if (ndlp->nlp_type & NLP_NVME_TARGET) + rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; + if (ndlp->nlp_type & NLP_NVME_DISCOVERY) + rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; + + if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) + fc_remote_port_rolechg(rport, rport_ids.roles); + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3183 %s rport x%px DID x%x, role x%x refcnt %d\n", + __func__, rport, rport->port_id, rport->roles, + kref_read(&ndlp->kref)); + + if ((rport->scsi_target_id != -1) && + (rport->scsi_target_id < LPFC_MAX_TARGET)) { + ndlp->nlp_sid = rport->scsi_target_id; + } + + return; +} + +static void +lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) +{ + struct fc_rport *rport = ndlp->rport; + struct lpfc_vport *vport = ndlp->vport; + + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + return; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport delete: did:x%x flg:x%x type x%x", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "3184 rport unregister x%06x, rport x%px " + "xptflg x%x refcnt %d\n", + ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags, + kref_read(&ndlp->kref)); + + fc_remote_port_delete(rport); + lpfc_nlp_put(ndlp); +} + +static void +lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + unsigned long iflags; + + spin_lock_irqsave(shost->host_lock, iflags); + switch (state) { + case NLP_STE_UNUSED_NODE: + vport->fc_unused_cnt += count; + break; + case NLP_STE_PLOGI_ISSUE: + vport->fc_plogi_cnt += count; + break; + case NLP_STE_ADISC_ISSUE: + vport->fc_adisc_cnt += count; + break; + case NLP_STE_REG_LOGIN_ISSUE: + vport->fc_reglogin_cnt += count; + break; + case NLP_STE_PRLI_ISSUE: + vport->fc_prli_cnt += count; + break; + case NLP_STE_UNMAPPED_NODE: + vport->fc_unmap_cnt += count; + break; + case NLP_STE_MAPPED_NODE: + vport->fc_map_cnt += count; + break; + case NLP_STE_NPR_NODE: + if (vport->fc_npr_cnt == 0 && count == -1) + vport->fc_npr_cnt = 0; + else + vport->fc_npr_cnt += count; + break; + } + spin_unlock_irqrestore(shost->host_lock, iflags); +} + +/* Register a node with backend if not already done */ +void +lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + + lpfc_check_nlp_post_devloss(vport, ndlp); + + spin_lock_irqsave(&ndlp->lock, iflags); + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + /* Already registered with backend, trigger rescan */ + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && + ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { + lpfc_nvme_rescan_port(vport, ndlp); + } + return; + } + + ndlp->fc4_xpt_flags |= NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (lpfc_valid_xpt_node(ndlp)) { + vport->phba->nport_event_cnt++; + /* + * Tell the fc transport about the port, if we haven't + * already. If we have, and it's a scsi entity, be + */ + lpfc_register_remote_port(vport, ndlp); + } + + /* We are done if we do not have any NVME remote node */ + if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) + return; + + /* Notify the NVME transport of this new rport. */ + if (vport->phba->sli_rev >= LPFC_SLI_REV4 && + ndlp->nlp_fc4_type & NLP_FC4_NVME) { + if (vport->phba->nvmet_support == 0) { + /* Register this rport with the transport. + * Only NVME Target Rports are registered with + * the transport. + */ + if (ndlp->nlp_type & NLP_NVME_TARGET) { + vport->phba->nport_event_cnt++; + lpfc_nvme_register_port(vport, ndlp); + } + } else { + /* Just take an NDLP ref count since the + * target does not register rports. + */ + lpfc_nlp_get(ndlp); + } + } +} + +/* Unregister a node with backend if not already done */ +void +lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + + spin_lock_irqsave(&ndlp->lock, iflags); + if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "0999 %s Not regd: ndlp x%px rport x%px DID " + "x%x FLG x%x XPT x%x\n", + __func__, ndlp, ndlp->rport, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->fc4_xpt_flags); + return; + } + + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (ndlp->rport && + ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { + vport->phba->nport_event_cnt++; + lpfc_unregister_remote_port(ndlp); + } else if (!ndlp->rport) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "1999 %s NDLP in devloss x%px DID x%x FLG x%x" + " XPT x%x refcnt %u\n", + __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->fc4_xpt_flags, + kref_read(&ndlp->kref)); + } + + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { + vport->phba->nport_event_cnt++; + if (vport->phba->nvmet_support == 0) { + /* Start devloss if target. */ + if (ndlp->nlp_type & NLP_NVME_TARGET) + lpfc_nvme_unregister_port(vport, ndlp); + } else { + /* NVMET has no upcall. */ + lpfc_nlp_put(ndlp); + } + } + +} + +/* + * Adisc state change handling + */ +static void +lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int new_state) +{ + switch (new_state) { + /* + * Any state to ADISC_ISSUE + * Do nothing, adisc cmpl handling will trigger state changes + */ + case NLP_STE_ADISC_ISSUE: + break; + + /* + * ADISC_ISSUE to mapped states + * Trigger a registration with backend, it will be nop if + * already registered + */ + case NLP_STE_UNMAPPED_NODE: + ndlp->nlp_type |= NLP_FC_NODE; + fallthrough; + case NLP_STE_MAPPED_NODE: + ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + lpfc_nlp_reg_node(vport, ndlp); + break; + + /* + * ADISC_ISSUE to non-mapped states + * We are moving from ADISC_ISSUE to a non-mapped state because + * ADISC failed, we would have skipped unregistering with + * backend, attempt it now + */ + case NLP_STE_NPR_NODE: + ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + fallthrough; + default: + lpfc_nlp_unreg_node(vport, ndlp); + break; + } + +} + +static void +lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int old_state, int new_state) +{ + /* Trap ADISC changes here */ + if (new_state == NLP_STE_ADISC_ISSUE || + old_state == NLP_STE_ADISC_ISSUE) { + lpfc_handle_adisc_state(vport, ndlp, new_state); + return; + } + + if (new_state == NLP_STE_UNMAPPED_NODE) { + ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + ndlp->nlp_type |= NLP_FC_NODE; + } + if (new_state == NLP_STE_MAPPED_NODE) + ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + if (new_state == NLP_STE_NPR_NODE) + ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + + /* Reg/Unreg for FCP and NVME Transport interface */ + if ((old_state == NLP_STE_MAPPED_NODE || + old_state == NLP_STE_UNMAPPED_NODE)) { + /* For nodes marked for ADISC, Handle unreg in ADISC cmpl + * if linkup. In linkdown do unreg_node + */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || + !lpfc_is_link_up(vport->phba)) + lpfc_nlp_unreg_node(vport, ndlp); + } + + if (new_state == NLP_STE_MAPPED_NODE || + new_state == NLP_STE_UNMAPPED_NODE) + lpfc_nlp_reg_node(vport, ndlp); + + /* + * If the node just added to Mapped list was an FCP target, + * but the remote port registration failed or assigned a target + * id outside the presentable range - move the node to the + * Unmapped List. + */ + if ((new_state == NLP_STE_MAPPED_NODE) && + (ndlp->nlp_type & NLP_FCP_TARGET) && + (!ndlp->rport || + ndlp->rport->scsi_target_id == -1 || + ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } +} + +static char * +lpfc_nlp_state_name(char *buffer, size_t size, int state) +{ + static char *states[] = { + [NLP_STE_UNUSED_NODE] = "UNUSED", + [NLP_STE_PLOGI_ISSUE] = "PLOGI", + [NLP_STE_ADISC_ISSUE] = "ADISC", + [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", + [NLP_STE_PRLI_ISSUE] = "PRLI", + [NLP_STE_LOGO_ISSUE] = "LOGO", + [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", + [NLP_STE_MAPPED_NODE] = "MAPPED", + [NLP_STE_NPR_NODE] = "NPR", + }; + + if (state < NLP_STE_MAX_STATE && states[state]) + strscpy(buffer, states[state], size); + else + snprintf(buffer, size, "unknown (%d)", state); + return buffer; +} + +void +lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int state) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + int old_state = ndlp->nlp_state; + int node_dropped = ndlp->nlp_flag & NLP_DROPPED; + char name1[16], name2[16]; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0904 NPort state transition x%06x, %s -> %s\n", + ndlp->nlp_DID, + lpfc_nlp_state_name(name1, sizeof(name1), old_state), + lpfc_nlp_state_name(name2, sizeof(name2), state)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, + "node statechg did:x%x old:%d ste:%d", + ndlp->nlp_DID, old_state, state); + + if (node_dropped && old_state == NLP_STE_UNUSED_NODE && + state != NLP_STE_UNUSED_NODE) { + ndlp->nlp_flag &= ~NLP_DROPPED; + lpfc_nlp_get(ndlp); + } + + if (old_state == NLP_STE_NPR_NODE && + state != NLP_STE_NPR_NODE) + lpfc_cancel_retry_delay_tmo(vport, ndlp); + if (old_state == NLP_STE_UNMAPPED_NODE) { + ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; + ndlp->nlp_type &= ~NLP_FC_NODE; + } + + if (list_empty(&ndlp->nlp_listp)) { + spin_lock_irq(shost->host_lock); + list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); + spin_unlock_irq(shost->host_lock); + } else if (old_state) + lpfc_nlp_counters(vport, old_state, -1); + + ndlp->nlp_state = state; + lpfc_nlp_counters(vport, state, 1); + lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); +} + +void +lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (list_empty(&ndlp->nlp_listp)) { + spin_lock_irq(shost->host_lock); + list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); + spin_unlock_irq(shost->host_lock); + } +} + +void +lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + lpfc_cancel_retry_delay_tmo(vport, ndlp); + if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) + lpfc_nlp_counters(vport, ndlp->nlp_state, -1); + spin_lock_irq(shost->host_lock); + list_del_init(&ndlp->nlp_listp); + spin_unlock_irq(shost->host_lock); + lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, + NLP_STE_UNUSED_NODE); +} + +/** + * lpfc_initialize_node - Initialize all fields of node object + * @vport: Pointer to Virtual Port object. + * @ndlp: Pointer to FC node object. + * @did: FC_ID of the node. + * + * This function is always called when node object need to be initialized. + * It initializes all the fields of the node object. Although the reference + * to phba from @ndlp can be obtained indirectly through it's reference to + * @vport, a direct reference to phba is taken here by @ndlp. This is due + * to the life-span of the @ndlp might go beyond the existence of @vport as + * the final release of ndlp is determined by its reference count. And, the + * operation on @ndlp needs the reference to phba. + **/ +static inline void +lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint32_t did) +{ + INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); + INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); + timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); + INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp); + + ndlp->nlp_DID = did; + ndlp->vport = vport; + ndlp->phba = vport->phba; + ndlp->nlp_sid = NLP_NO_SID; + ndlp->nlp_fc4_type = NLP_FC4_NONE; + kref_init(&ndlp->kref); + atomic_set(&ndlp->cmd_pending, 0); + ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; +} + +void +lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + /* + * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should + * be used when lpfc wants to remove the "last" lpfc_nlp_put() to + * release the ndlp from the vport when conditions are correct. + */ + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + return; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + lpfc_cleanup_vports_rrqs(vport, ndlp); + lpfc_unreg_rpi(vport, ndlp); + } + + /* NLP_DROPPED means another thread already removed the initial + * reference from lpfc_nlp_init. If set, don't drop it again and + * introduce an imbalance. + */ + spin_lock_irq(&ndlp->lock); + if (!(ndlp->nlp_flag & NLP_DROPPED)) { + ndlp->nlp_flag |= NLP_DROPPED; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); + return; + } + spin_unlock_irq(&ndlp->lock); +} + +/* + * Start / ReStart rescue timer for Discovery / RSCN handling + */ +void +lpfc_set_disctmo(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + uint32_t tmo; + + if (vport->port_state == LPFC_LOCAL_CFG_LINK) { + /* For FAN, timeout should be greater than edtov */ + tmo = (((phba->fc_edtov + 999) / 1000) + 1); + } else { + /* Normal discovery timeout should be > than ELS/CT timeout + * FC spec states we need 3 * ratov for CT requests + */ + tmo = ((phba->fc_ratov * 3) + 3); + } + + + if (!timer_pending(&vport->fc_disctmo)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "set disc timer: tmo:x%x state:x%x flg:x%x", + tmo, vport->port_state, vport->fc_flag); + } + + mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_DISC_TMO; + spin_unlock_irq(shost->host_lock); + + /* Start Discovery Timer state */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0247 Start Discovery Timer state x%x " + "Data: x%x x%lx x%x x%x\n", + vport->port_state, tmo, + (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, + vport->fc_adisc_cnt); + + return; +} + +/* + * Cancel rescue timer for Discovery / RSCN handling + */ +int +lpfc_can_disctmo(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + unsigned long iflags; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "can disc timer: state:x%x rtry:x%x flg:x%x", + vport->port_state, vport->fc_ns_retry, vport->fc_flag); + + /* Turn off discovery timer if its running */ + if (vport->fc_flag & FC_DISC_TMO || + timer_pending(&vport->fc_disctmo)) { + spin_lock_irqsave(shost->host_lock, iflags); + vport->fc_flag &= ~FC_DISC_TMO; + spin_unlock_irqrestore(shost->host_lock, iflags); + del_timer_sync(&vport->fc_disctmo); + spin_lock_irqsave(&vport->work_port_lock, iflags); + vport->work_port_events &= ~WORKER_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflags); + } + + /* Cancel Discovery Timer state */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0248 Cancel Discovery Timer state x%x " + "Data: x%x x%x x%x\n", + vport->port_state, vport->fc_flag, + vport->fc_plogi_cnt, vport->fc_adisc_cnt); + return 0; +} + +/* + * Check specified ring for outstanding IOCB on the SLI queue + * Return true if iocb matches the specified nport + */ +int +lpfc_check_sli_ndlp(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *iocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_vport *vport = ndlp->vport; + u8 ulp_command; + u16 ulp_context; + u32 remote_id; + + if (iocb->vport != vport) + return 0; + + ulp_command = get_job_cmnd(phba, iocb); + ulp_context = get_job_ulpcontext(phba, iocb); + remote_id = get_job_els_rsp64_did(phba, iocb); + + if (pring->ringno == LPFC_ELS_RING) { + switch (ulp_command) { + case CMD_GEN_REQUEST64_CR: + if (iocb->ndlp == ndlp) + return 1; + fallthrough; + case CMD_ELS_REQUEST64_CR: + if (remote_id == ndlp->nlp_DID) + return 1; + fallthrough; + case CMD_XMIT_ELS_RSP64_CX: + if (iocb->ndlp == ndlp) + return 1; + } + } else if (pring->ringno == LPFC_FCP_RING) { + /* Skip match check if waiting to relogin to FCP target */ + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + (ndlp->nlp_flag & NLP_DELAY_TMO)) { + return 0; + } + if (ulp_context == ndlp->nlp_rpi) + return 1; + } + return 0; +} + +static void +__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, + struct list_head *dequeue_list) +{ + struct lpfc_iocbq *iocb, *next_iocb; + + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + /* Check to see if iocb matches the nport */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) + /* match, dequeue */ + list_move_tail(&iocb->list, dequeue_list); + } +} + +static void +lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) +{ + struct lpfc_sli *psli = &phba->sli; + uint32_t i; + + spin_lock_irq(&phba->hbalock); + for (i = 0; i < psli->num_rings; i++) + __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i], + dequeue_list); + spin_unlock_irq(&phba->hbalock); +} + +static void +lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) +{ + struct lpfc_sli_ring *pring; + struct lpfc_queue *qp = NULL; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock(&pring->ring_lock); + __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); + spin_unlock(&pring->ring_lock); + } + spin_unlock_irq(&phba->hbalock); +} + +/* + * Free resources / clean up outstanding I/Os + * associated with nlp_rpi in the LPFC_NODELIST entry. + */ +static int +lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +{ + LIST_HEAD(completions); + + lpfc_fabric_abort_nport(ndlp); + + /* + * Everything that matches on txcmplq will be returned + * by firmware with a no rpi error. + */ + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (phba->sli_rev != LPFC_SLI_REV4) + lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); + else + lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions); + } + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + + return 0; +} + +/** + * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function will issue an ELS LOGO command after completing + * the UNREG_RPI. + **/ +static void +lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp; + + ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp); + if (!ndlp) + return; + lpfc_issue_els_logo(vport, ndlp, 0); + + /* Check to see if there are any deferred events to process */ + if ((ndlp->nlp_flag & NLP_UNREG_INP) && + (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "1434 UNREG cmpl deferred logo x%x " + "on NPort x%x Data: x%x x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_defer_did, ndlp); + + ndlp->nlp_flag &= ~NLP_UNREG_INP; + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } else { + /* NLP_RELEASE_RPI is only set for SLI4 ports. */ + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + spin_unlock_irq(&ndlp->lock); + } + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_UNREG_INP; + spin_unlock_irq(&ndlp->lock); + } + + /* The node has an outstanding reference for the unreg. Now + * that the LOGO action and cleanup are finished, release + * resources. + */ + lpfc_nlp_put(ndlp); + mempool_free(pmb, phba->mbox_mem_pool); +} + +/* + * Sets the mailbox completion handler to be used for the + * unreg_rpi command. The handler varies based on the state of + * the port and what will be happening to the rpi next. + */ +static void +lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) +{ + unsigned long iflags; + + /* Driver always gets a reference on the mailbox job + * in support of async jobs. + */ + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!mbox->ctx_ndlp) + return; + + if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + mbox->mbox_cmpl = lpfc_nlp_logo_unreg; + + } else if (phba->sli_rev == LPFC_SLI_REV4 && + (!(vport->load_flag & FC_UNLOADING)) && + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2) && + (kref_read(&ndlp->kref) > 0)) { + mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; + } else { + if (vport->load_flag & FC_UNLOADING) { + if (phba->sli_rev == LPFC_SLI_REV4) { + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag |= NLP_RELEASE_RPI; + spin_unlock_irqrestore(&ndlp->lock, iflags); + } + } + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } +} + +/* + * Free rpi associated with LPFC_NODELIST entry. + * This routine is called from lpfc_freenode(), when we are removing + * a LPFC_NODELIST entry. It is also called if the driver initiates a + * LOGO that completes successfully, and we are waiting to PLOGI back + * to the remote NPort. In addition, it is called after we receive + * and unsolicated ELS cmd, send back a rsp, the rsp completes and + * we are waiting to PLOGI back to the remote NPort. + */ +int +lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc, acc_plogi = 1; + uint16_t rpi; + + if (ndlp->nlp_flag & NLP_RPI_REGISTERED || + ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "3366 RPI x%x needs to be " + "unregistered nlp_flag x%x " + "did x%x\n", + ndlp->nlp_rpi, ndlp->nlp_flag, + ndlp->nlp_DID); + + /* If there is already an UNREG in progress for this ndlp, + * no need to queue up another one. + */ + if (ndlp->nlp_flag & NLP_UNREG_INP) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "1436 unreg_rpi SKIP UNREG x%x on " + "NPort x%x deferred x%x flg x%x " + "Data: x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_defer_did, + ndlp->nlp_flag, ndlp); + goto out; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + /* SLI4 ports require the physical rpi value. */ + rpi = ndlp->nlp_rpi; + if (phba->sli_rev == LPFC_SLI_REV4) + rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + + lpfc_unreg_login(phba, vport->vpi, rpi, mbox); + mbox->vport = vport; + lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); + if (!mbox->ctx_ndlp) { + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + + if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) + /* + * accept PLOGIs after unreg_rpi_cmpl + */ + acc_plogi = 0; + if (((ndlp->nlp_DID & Fabric_DID_MASK) != + Fabric_DID_MASK) && + (!(vport->fc_flag & FC_OFFLINE_MODE))) + ndlp->nlp_flag |= NLP_UNREG_INP; + + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "1433 unreg_rpi UNREG x%x on " + "NPort x%x deferred flg x%x " + "Data:x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp); + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + ndlp->nlp_flag &= ~NLP_UNREG_INP; + mempool_free(mbox, phba->mbox_mem_pool); + acc_plogi = 1; + lpfc_nlp_put(ndlp); + } + } else { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "1444 Failed to allocate mempool " + "unreg_rpi UNREG x%x, " + "DID x%x, flag x%x, " + "ndlp x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp); + + /* Because mempool_alloc failed, we + * will issue a LOGO here and keep the rpi alive if + * not unloading. + */ + if (!(vport->load_flag & FC_UNLOADING)) { + ndlp->nlp_flag &= ~NLP_UNREG_INP; + lpfc_issue_els_logo(vport, ndlp, 0); + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); + } + + return 1; + } + lpfc_no_rpi(phba, ndlp); +out: + if (phba->sli_rev != LPFC_SLI_REV4) + ndlp->nlp_rpi = 0; + ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + if (acc_plogi) + ndlp->nlp_flag &= ~NLP_LOGO_ACC; + return 1; + } + ndlp->nlp_flag &= ~NLP_LOGO_ACC; + return 0; +} + +/** + * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unregister all the currently registered RPIs + * to the HBA. + **/ +void +lpfc_unreg_hba_rpis(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (!vports) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2884 Vport array allocation failed \n"); + return; + } + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + /* The mempool_alloc might sleep */ + spin_unlock_irq(shost->host_lock); + lpfc_unreg_rpi(vports[i], ndlp); + spin_lock_irq(shost->host_lock); + } + } + spin_unlock_irq(shost->host_lock); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +void +lpfc_unreg_all_rpis(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_unreg_all_rpis(vport); + return; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, + mbox); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_ndlp = NULL; + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + + if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1836 Could not issue " + "unreg_login(all_rpis) status %d\n", + rc); + } +} + +void +lpfc_unreg_default_rpis(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + /* Unreg DID is an SLI3 operation. */ + if (phba->sli_rev > LPFC_SLI_REV3) + return; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, + mbox); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_ndlp = NULL; + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + + if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1815 Could not issue " + "unreg_did (default rpis) status %d\n", + rc); + } +} + +/* + * Free resources associated with LPFC_NODELIST entry + * so it can be freed. + */ +static int +lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mb, *nextmb; + + /* Cleanup node for NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0900 Cleanup node for NPort x%x " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, ndlp->nlp_rpi); + lpfc_dequeue_node(vport, ndlp); + + /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ + + /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ + if ((mb = phba->sli.mbox_active)) { + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && + (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { + mb->ctx_ndlp = NULL; + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } + } + + spin_lock_irq(&phba->hbalock); + /* Cleanup REG_LOGIN completions which are not yet processed */ + list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { + if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || + (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || + (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp)) + continue; + + mb->ctx_ndlp = NULL; + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } + + list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && + (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { + list_del(&mb->list); + lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED); + + /* Don't invoke lpfc_nlp_put. The driver is in + * lpfc_nlp_release context. + */ + } + } + spin_unlock_irq(&phba->hbalock); + + lpfc_els_abort(phba, ndlp); + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + + ndlp->nlp_last_elscmd = 0; + del_timer_sync(&ndlp->nlp_delayfunc); + + list_del_init(&ndlp->els_retry_evt.evt_listp); + list_del_init(&ndlp->dev_loss_evt.evt_listp); + list_del_init(&ndlp->recovery_evt.evt_listp); + lpfc_cleanup_vports_rrqs(vport, ndlp); + + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_flag |= NLP_RELEASE_RPI; + + return 0; +} + +static int +lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint32_t did) +{ + D_ID mydid, ndlpdid, matchdid; + + if (did == Bcast_DID) + return 0; + + /* First check for Direct match */ + if (ndlp->nlp_DID == did) + return 1; + + /* Next check for area/domain identically equals 0 match */ + mydid.un.word = vport->fc_myDID; + if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { + return 0; + } + + matchdid.un.word = did; + ndlpdid.un.word = ndlp->nlp_DID; + if (matchdid.un.b.id == ndlpdid.un.b.id) { + if ((mydid.un.b.domain == matchdid.un.b.domain) && + (mydid.un.b.area == matchdid.un.b.area)) { + /* This code is supposed to match the ID + * for a private loop device that is + * connect to fl_port. But we need to + * check that the port did not just go + * from pt2pt to fabric or we could end + * up matching ndlp->nlp_DID 000001 to + * fabric DID 0x20101 + */ + if ((ndlpdid.un.b.domain == 0) && + (ndlpdid.un.b.area == 0)) { + if (ndlpdid.un.b.id && + vport->phba->fc_topology == + LPFC_TOPOLOGY_LOOP) + return 1; + } + return 0; + } + + matchdid.un.word = ndlp->nlp_DID; + if ((mydid.un.b.domain == ndlpdid.un.b.domain) && + (mydid.un.b.area == ndlpdid.un.b.area)) { + if ((matchdid.un.b.domain == 0) && + (matchdid.un.b.area == 0)) { + if (matchdid.un.b.id) + return 1; + } + } + } + return 0; +} + +/* Search for a nodelist entry */ +static struct lpfc_nodelist * +__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) +{ + struct lpfc_nodelist *ndlp; + uint32_t data1; + + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (lpfc_matchdid(vport, ndlp, did)) { + data1 = (((uint32_t)ndlp->nlp_state << 24) | + ((uint32_t)ndlp->nlp_xri << 16) | + ((uint32_t)ndlp->nlp_type << 8) + ); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0929 FIND node DID " + "Data: x%px x%x x%x x%x x%x x%px\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag, data1, ndlp->nlp_rpi, + ndlp->active_rrqs_xri_bitmap); + return ndlp; + } + } + + /* FIND node did NOT FOUND */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0932 FIND node did x%x NOT FOUND.\n", did); + return NULL; +} + +struct lpfc_nodelist * +lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + unsigned long iflags; + + spin_lock_irqsave(shost->host_lock, iflags); + ndlp = __lpfc_findnode_did(vport, did); + spin_unlock_irqrestore(shost->host_lock, iflags); + return ndlp; +} + +struct lpfc_nodelist * +lpfc_findnode_mapped(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + uint32_t data1; + unsigned long iflags; + + spin_lock_irqsave(shost->host_lock, iflags); + + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || + ndlp->nlp_state == NLP_STE_MAPPED_NODE) { + data1 = (((uint32_t)ndlp->nlp_state << 24) | + ((uint32_t)ndlp->nlp_xri << 16) | + ((uint32_t)ndlp->nlp_type << 8) | + ((uint32_t)ndlp->nlp_rpi & 0xff)); + spin_unlock_irqrestore(shost->host_lock, iflags); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "2025 FIND node DID " + "Data: x%px x%x x%x x%x x%px\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag, data1, + ndlp->active_rrqs_xri_bitmap); + return ndlp; + } + } + spin_unlock_irqrestore(shost->host_lock, iflags); + + /* FIND node did NOT FOUND */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "2026 FIND mapped did NOT FOUND.\n"); + return NULL; +} + +struct lpfc_nodelist * +lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) +{ + struct lpfc_nodelist *ndlp; + + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp) { + if (vport->phba->nvmet_support) + return NULL; + if ((vport->fc_flag & FC_RSCN_MODE) != 0 && + lpfc_rscn_payload_check(vport, did) == 0) + return NULL; + ndlp = lpfc_nlp_init(vport, did); + if (!ndlp) + return NULL; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6453 Setup New Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + return ndlp; + } + + /* The NVME Target does not want to actively manage an rport. + * The goal is to allow the target to reset its state and clear + * pending IO in preparation for the initiator to recover. + */ + if ((vport->fc_flag & FC_RSCN_MODE) && + !(vport->fc_flag & FC_NDISC_ACTIVE)) { + if (lpfc_rscn_payload_check(vport, did)) { + + /* Since this node is marked for discovery, + * delay timeout is not needed. + */ + lpfc_cancel_retry_delay_tmo(vport, ndlp); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6455 Setup RSCN Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + + /* NVME Target mode waits until rport is known to be + * impacted by the RSCN before it transitions. No + * active management - just go to NPR provided the + * node had a valid login. + */ + if (vport->phba->nvmet_support) + return ndlp; + + /* If we've already received a PLOGI from this NPort + * we don't need to try to discover it again. + */ + if (ndlp->nlp_flag & NLP_RCV_PLOGI && + !(ndlp->nlp_type & + (NLP_FCP_TARGET | NLP_NVME_TARGET))) + return NULL; + + if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && + ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); + } + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6456 Skip Setup RSCN Node x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + ndlp = NULL; + } + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6457 Setup Active Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + + /* If the initiator received a PLOGI from this NPort or if the + * initiator is already in the process of discovery on it, + * there's no need to try to discover it again. + */ + if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || + ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + (!vport->phba->nvmet_support && + ndlp->nlp_flag & NLP_RCV_PLOGI)) + return NULL; + + if (vport->phba->nvmet_support) + return ndlp; + + /* Moving to NPR state clears unsolicited flags and + * allows for rediscovery + */ + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + } + return ndlp; +} + +/* Build a list of nodes to discover based on the loopmap */ +void +lpfc_disc_list_loopmap(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + int j; + uint32_t alpa, index; + + if (!lpfc_is_link_up(phba)) + return; + + if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) + return; + + /* Check for loop map present or not */ + if (phba->alpa_map[0]) { + for (j = 1; j <= phba->alpa_map[0]; j++) { + alpa = phba->alpa_map[j]; + if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) + continue; + lpfc_setup_disc_node(vport, alpa); + } + } else { + /* No alpamap, so try all alpa's */ + for (j = 0; j < FC_MAXLOOP; j++) { + /* If cfg_scan_down is set, start from highest + * ALPA (0xef) to lowest (0x1). + */ + if (vport->cfg_scan_down) + index = j; + else + index = FC_MAXLOOP - j - 1; + alpa = lpfcAlpaArray[index]; + if ((vport->fc_myDID & 0xff) == alpa) + continue; + lpfc_setup_disc_node(vport, alpa); + } + } + return; +} + +/* SLI3 only */ +void +lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *mbox; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING]; + struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; + int rc; + + /* + * if it's not a physical port or if we already send + * clear_la then don't send it. + */ + if ((phba->link_state >= LPFC_CLEAR_LA) || + (vport->port_type != LPFC_PHYSICAL_PORT) || + (phba->sli_rev == LPFC_SLI_REV4)) + return; + + /* Link up discovery */ + if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { + phba->link_state = LPFC_CLEAR_LA; + lpfc_clear_la(phba, mbox); + mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + lpfc_disc_flush_list(vport); + extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; + fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; + phba->link_state = LPFC_HBA_ERROR; + } + } +} + +/* Reg_vpi to tell firmware to resume normal operations */ +void +lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *regvpimbox; + + regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (regvpimbox) { + lpfc_reg_vpi(vport, regvpimbox); + regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; + regvpimbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) + == MBX_NOT_FINISHED) { + mempool_free(regvpimbox, phba->mbox_mem_pool); + } + } +} + +/* Start Link up / RSCN discovery on NPR nodes */ +void +lpfc_disc_start(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + uint32_t num_sent; + uint32_t clear_la_pending; + + if (!lpfc_is_link_up(phba)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3315 Link is not up %x\n", + phba->link_state); + return; + } + + if (phba->link_state == LPFC_CLEAR_LA) + clear_la_pending = 1; + else + clear_la_pending = 0; + + if (vport->port_state < LPFC_VPORT_READY) + vport->port_state = LPFC_DISC_AUTH; + + lpfc_set_disctmo(vport); + + vport->fc_prevDID = vport->fc_myDID; + vport->num_disc_nodes = 0; + + /* Start Discovery state */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0202 Start Discovery port state x%x " + "flg x%x Data: x%x x%x x%x\n", + vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, + vport->fc_adisc_cnt, vport->fc_npr_cnt); + + /* First do ADISCs - if any */ + num_sent = lpfc_els_disc_adisc(vport); + + if (num_sent) + return; + + /* Register the VPI for SLI3, NPIV only. */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + !(vport->fc_flag & FC_PT2PT) && + !(vport->fc_flag & FC_RSCN_MODE) && + (phba->sli_rev < LPFC_SLI_REV4)) { + lpfc_issue_clear_la(phba, vport); + lpfc_issue_reg_vpi(phba, vport); + return; + } + + /* + * For SLI2, we need to set port_state to READY and continue + * discovery. + */ + if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { + /* If we get here, there is nothing to ADISC */ + lpfc_issue_clear_la(phba, vport); + + if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { + vport->num_disc_nodes = 0; + /* go thru NPR nodes and issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + lpfc_els_disc_plogi(vport); + + if (!vport->num_disc_nodes) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + } + } + vport->port_state = LPFC_VPORT_READY; + } else { + /* Next do PLOGIs - if any */ + num_sent = lpfc_els_disc_plogi(vport); + + if (num_sent) + return; + + if (vport->fc_flag & FC_RSCN_MODE) { + /* Check to see if more RSCNs came in while we + * were processing this one. + */ + if ((vport->fc_rscn_id_cnt == 0) && + (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + } else + lpfc_els_handle_rscn(vport); + } + } + return; +} + +/* + * Ignore completion for all IOCBs on tx and txcmpl queue for ELS + * ring the match the sppecified nodelist. + */ +static void +lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +{ + LIST_HEAD(completions); + struct lpfc_iocbq *iocb, *next_iocb; + struct lpfc_sli_ring *pring; + u32 ulp_command; + + pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; + + /* Error matching iocb on txq or txcmplq + * First check the txq. + */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + if (iocb->ndlp != ndlp) + continue; + + ulp_command = get_job_cmnd(phba, iocb); + + if (ulp_command == CMD_ELS_REQUEST64_CR || + ulp_command == CMD_XMIT_ELS_RSP64_CX) { + + list_move_tail(&iocb->list, &completions); + } + } + + /* Next check the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { + if (iocb->ndlp != ndlp) + continue; + + ulp_command = get_job_cmnd(phba, iocb); + + if (ulp_command == CMD_ELS_REQUEST64_CR || + ulp_command == CMD_XMIT_ELS_RSP64_CX) { + lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); + } + } + spin_unlock_irq(&phba->hbalock); + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); +} + +static void +lpfc_disc_flush_list(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_hba *phba = vport->phba; + + if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, + nlp_listp) { + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { + lpfc_free_tx(phba, ndlp); + } + } + } +} + +/* + * lpfc_notify_xport_npr - notifies xport of node disappearance + * @vport: Pointer to Virtual Port object. + * + * Transitions all ndlps to NPR state. When lpfc_nlp_set_state + * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered + * and transport notified that the node is gone. + * Return Code: + * none + */ +static void +lpfc_notify_xport_npr(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, + nlp_listp) { + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + } +} +void +lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) +{ + lpfc_els_flush_rscn(vport); + lpfc_els_flush_cmd(vport); + lpfc_disc_flush_list(vport); + if (pci_channel_offline(vport->phba->pcidev)) + lpfc_notify_xport_npr(vport); +} + +/*****************************************************************************/ +/* + * NAME: lpfc_disc_timeout + * + * FUNCTION: Fibre Channel driver discovery timeout routine. + * + * EXECUTION ENVIRONMENT: interrupt only + * + * CALLED FROM: + * Timer function + * + * RETURNS: + * none + */ +/*****************************************************************************/ +void +lpfc_disc_timeout(struct timer_list *t) +{ + struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); + struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; + unsigned long flags = 0; + + if (unlikely(!phba)) + return; + + spin_lock_irqsave(&vport->work_port_lock, flags); + tmo_posted = vport->work_port_events & WORKER_DISC_TMO; + if (!tmo_posted) + vport->work_port_events |= WORKER_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, flags); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +static void +lpfc_disc_timeout_handler(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_nodelist *ndlp, *next_ndlp; + LPFC_MBOXQ_t *initlinkmbox; + int rc, clrlaerr = 0; + + if (!(vport->fc_flag & FC_DISC_TMO)) + return; + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_DISC_TMO; + spin_unlock_irq(shost->host_lock); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "disc timeout: state:x%x rtry:x%x flg:x%x", + vport->port_state, vport->fc_ns_retry, vport->fc_flag); + + switch (vport->port_state) { + + case LPFC_LOCAL_CFG_LINK: + /* + * port_state is identically LPFC_LOCAL_CFG_LINK while + * waiting for FAN timeout + */ + lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, + "0221 FAN timeout\n"); + + /* Start discovery by sending FLOGI, clean up old rpis */ + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, + nlp_listp) { + if (ndlp->nlp_state != NLP_STE_NPR_NODE) + continue; + if (ndlp->nlp_type & NLP_FABRIC) { + /* Clean up the ndlp on Fabric connections */ + lpfc_drop_node(vport, ndlp); + + } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + /* Fail outstanding IO now since device + * is marked for PLOGI. + */ + lpfc_unreg_rpi(vport, ndlp); + } + } + if (vport->port_state != LPFC_FLOGI) { + if (phba->sli_rev <= LPFC_SLI_REV3) + lpfc_initial_flogi(vport); + else + lpfc_issue_init_vfi(vport); + return; + } + break; + + case LPFC_FDISC: + case LPFC_FLOGI: + /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ + /* Initial FLOGI timeout */ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0222 Initial %s timeout\n", + vport->vpi ? "FDISC" : "FLOGI"); + + /* Assume no Fabric and go on with discovery. + * Check for outstanding ELS FLOGI to abort. + */ + + /* FLOGI failed, so just use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + + /* Start discovery */ + lpfc_disc_start(vport); + break; + + case LPFC_FABRIC_CFG_LINK: + /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for + NameServer login */ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0223 Timeout while waiting for " + "NameServer login\n"); + /* Next look for NameServer ndlp */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp) + lpfc_els_abort(phba, ndlp); + + /* ReStart discovery */ + goto restart_disc; + + case LPFC_NS_QRY: + /* Check for wait for NameServer Rsp timeout */ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0224 NameServer Query timeout " + "Data: x%x x%x\n", + vport->fc_ns_retry, LPFC_MAX_NS_RETRY); + + if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { + /* Try it one more time */ + vport->fc_ns_retry++; + vport->gidft_inp = 0; + rc = lpfc_issue_gidft(vport); + if (rc == 0) + break; + } + vport->fc_ns_retry = 0; + +restart_disc: + /* + * Discovery is over. + * set port_state to PORT_READY if SLI2. + * cmpl_reg_vpi will set port_state to READY for SLI3. + */ + if (phba->sli_rev < LPFC_SLI_REV4) { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; + } + } + + /* Setup and issue mailbox INITIALIZE LINK command */ + initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!initlinkmbox) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0206 Device Discovery " + "completion error\n"); + phba->link_state = LPFC_HBA_ERROR; + break; + } + + lpfc_linkdown(phba); + lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, + phba->cfg_link_speed); + initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; + initlinkmbox->vport = vport; + initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); + lpfc_set_loopback_flag(phba); + if (rc == MBX_NOT_FINISHED) + mempool_free(initlinkmbox, phba->mbox_mem_pool); + + break; + + case LPFC_DISC_AUTH: + /* Node Authentication timeout */ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0227 Node Authentication timeout\n"); + lpfc_disc_flush_list(vport); + + /* + * set port_state to PORT_READY if SLI2. + * cmpl_reg_vpi will set port_state to READY for SLI3. + */ + if (phba->sli_rev < LPFC_SLI_REV4) { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; + } + } + break; + + case LPFC_VPORT_READY: + if (vport->fc_flag & FC_RSCN_MODE) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0231 RSCN timeout Data: x%x " + "x%x x%x x%x\n", + vport->fc_ns_retry, LPFC_MAX_NS_RETRY, + vport->port_state, vport->gidft_inp); + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_cmd(vport); + + lpfc_els_flush_rscn(vport); + lpfc_disc_flush_list(vport); + } + break; + + default: + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0273 Unexpected discovery timeout, " + "vport State x%x\n", vport->port_state); + break; + } + + switch (phba->link_state) { + case LPFC_CLEAR_LA: + /* CLEAR LA timeout */ + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0228 CLEAR LA timeout\n"); + clrlaerr = 1; + break; + + case LPFC_LINK_UP: + lpfc_issue_clear_la(phba, vport); + fallthrough; + case LPFC_LINK_UNKNOWN: + case LPFC_WARM_START: + case LPFC_INIT_START: + case LPFC_INIT_MBX_CMDS: + case LPFC_LINK_DOWN: + case LPFC_HBA_ERROR: + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0230 Unexpected timeout, hba link " + "state x%x\n", phba->link_state); + clrlaerr = 1; + break; + + case LPFC_HBA_READY: + break; + } + + if (clrlaerr) { + lpfc_disc_flush_list(vport); + if (phba->sli_rev != LPFC_SLI_REV4) { + psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= + ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_FCP_RING].flag &= + ~LPFC_STOP_IOCB_EVENT; + } + vport->port_state = LPFC_VPORT_READY; + } + return; +} + +/* + * This routine handles processing a NameServer REG_LOGIN mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is + * handed off to the SLI layer. + */ +void +lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + struct lpfc_vport *vport = pmb->vport; + + pmb->ctx_ndlp = NULL; + + if (phba->sli_rev < LPFC_SLI_REV4) + ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, + "0004 rpi:%x DID:%x flg:%x %d x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref), + ndlp); + /* + * Start issuing Fabric-Device Management Interface (FDMI) command to + * 0xfffffa (FDMI well known port). + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) + */ + if (vport->port_type == LPFC_PHYSICAL_PORT) { + phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */ + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + } else { + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + } + + + /* decrement the node reference count held for this callback + * function. + */ + lpfc_nlp_put(ndlp); + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + return; +} + +static int +lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) +{ + uint16_t *rpi = param; + + return ndlp->nlp_rpi == *rpi; +} + +static int +lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) +{ + return memcmp(&ndlp->nlp_portname, param, + sizeof(ndlp->nlp_portname)) == 0; +} + +static struct lpfc_nodelist * +__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) +{ + struct lpfc_nodelist *ndlp; + + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (filter(ndlp, param)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "3185 FIND node filter %ps DID " + "ndlp x%px did x%x flg x%x st x%x " + "xri x%x type x%x rpi x%x\n", + filter, ndlp, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_xri, ndlp->nlp_type, + ndlp->nlp_rpi); + return ndlp; + } + } + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "3186 FIND node filter %ps NOT FOUND.\n", filter); + return NULL; +} + +/* + * This routine looks up the ndlp lists for the given RPI. If rpi found it + * returns the node list element pointer else return NULL. + */ +struct lpfc_nodelist * +__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) +{ + return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); +} + +/* + * This routine looks up the ndlp lists for the given WWPN. If WWPN found it + * returns the node element list pointer else return NULL. + */ +struct lpfc_nodelist * +lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + + spin_lock_irq(shost->host_lock); + ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); + spin_unlock_irq(shost->host_lock); + return ndlp; +} + +/* + * This routine looks up the ndlp lists for the given RPI. If the rpi + * is found, the routine returns the node element list pointer else + * return NULL. + */ +struct lpfc_nodelist * +lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + ndlp = __lpfc_findnode_rpi(vport, rpi); + spin_unlock_irqrestore(shost->host_lock, flags); + return ndlp; +} + +/** + * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier + * @phba: pointer to lpfc hba data structure. + * @vpi: the physical host virtual N_Port identifier. + * + * This routine finds a vport on a HBA (referred by @phba) through a + * @vpi. The function walks the HBA's vport list and returns the address + * of the vport with the matching @vpi. + * + * Return code + * NULL - No vport with the matching @vpi found + * Otherwise - Address to the vport with the matching @vpi. + **/ +struct lpfc_vport * +lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) +{ + struct lpfc_vport *vport; + unsigned long flags; + int i = 0; + + /* The physical ports are always vpi 0 - translate is unnecessary. */ + if (vpi > 0) { + /* + * Translate the physical vpi to the logical vpi. The + * vport stores the logical vpi. + */ + for (i = 0; i <= phba->max_vpi; i++) { + if (vpi == phba->vpi_ids[i]) + break; + } + + if (i > phba->max_vpi) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2936 Could not find Vport mapped " + "to vpi %d\n", vpi); + return NULL; + } + } + + spin_lock_irqsave(&phba->port_list_lock, flags); + list_for_each_entry(vport, &phba->port_list, listentry) { + if (vport->vpi == i) { + spin_unlock_irqrestore(&phba->port_list_lock, flags); + return vport; + } + } + spin_unlock_irqrestore(&phba->port_list_lock, flags); + return NULL; +} + +struct lpfc_nodelist * +lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) +{ + struct lpfc_nodelist *ndlp; + int rpi = LPFC_RPI_ALLOC_ERROR; + + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + rpi = lpfc_sli4_alloc_rpi(vport->phba); + if (rpi == LPFC_RPI_ALLOC_ERROR) + return NULL; + } + + ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) { + if (vport->phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_free_rpi(vport->phba, rpi); + return NULL; + } + + memset(ndlp, 0, sizeof (struct lpfc_nodelist)); + + spin_lock_init(&ndlp->lock); + + lpfc_initialize_node(vport, ndlp, did); + INIT_LIST_HEAD(&ndlp->nlp_listp); + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + ndlp->nlp_rpi = rpi; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:%x " + "flg:x%x refcnt:%d\n", + ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, kref_read(&ndlp->kref)); + + ndlp->active_rrqs_xri_bitmap = + mempool_alloc(vport->phba->active_rrq_pool, + GFP_KERNEL); + if (ndlp->active_rrqs_xri_bitmap) + memset(ndlp->active_rrqs_xri_bitmap, 0, + ndlp->phba->cfg_rrq_xri_bitmap_sz); + } + + + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, + "node init: did:x%x", + ndlp->nlp_DID, 0, 0); + + return ndlp; +} + +/* This routine releases all resources associated with a specifc NPort's ndlp + * and mempool_free's the nodelist. + */ +static void +lpfc_nlp_release(struct kref *kref) +{ + struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, + kref); + struct lpfc_vport *vport = ndlp->vport; + + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node release: did:x%x flg:x%x type:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n", + __func__, ndlp, ndlp->nlp_DID, + kref_read(&ndlp->kref), ndlp->nlp_rpi); + + /* remove ndlp from action. */ + lpfc_cancel_retry_delay_tmo(vport, ndlp); + lpfc_cleanup_node(vport, ndlp); + + /* Not all ELS transactions have registered the RPI with the port. + * In these cases the rpi usage is temporary and the node is + * released when the WQE is completed. Catch this case to free the + * RPI to the pool. Because this node is in the release path, a lock + * is unnecessary. All references are gone and the node has been + * dequeued. + */ + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { + if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && + !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + } + } + + /* The node is not freed back to memory, it is released to a pool so + * the node fields need to be cleaned up. + */ + ndlp->vport = NULL; + ndlp->nlp_state = NLP_STE_FREED_NODE; + ndlp->nlp_flag = 0; + ndlp->fc4_xpt_flags = 0; + + /* free ndlp memory for final ndlp release */ + if (ndlp->phba->sli_rev == LPFC_SLI_REV4) + mempool_free(ndlp->active_rrqs_xri_bitmap, + ndlp->phba->active_rrq_pool); + mempool_free(ndlp, ndlp->phba->nlp_mem_pool); +} + +/* This routine bumps the reference count for a ndlp structure to ensure + * that one discovery thread won't free a ndlp while another discovery thread + * is using it. + */ +struct lpfc_nodelist * +lpfc_nlp_get(struct lpfc_nodelist *ndlp) +{ + unsigned long flags; + + if (ndlp) { + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node get: did:x%x flg:x%x refcnt:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref)); + + /* The check of ndlp usage to prevent incrementing the + * ndlp reference count that is in the process of being + * released. + */ + spin_lock_irqsave(&ndlp->lock, flags); + if (!kref_get_unless_zero(&ndlp->kref)) { + spin_unlock_irqrestore(&ndlp->lock, flags); + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, + "0276 %s: ndlp:x%px refcnt:%d\n", + __func__, (void *)ndlp, kref_read(&ndlp->kref)); + return NULL; + } + spin_unlock_irqrestore(&ndlp->lock, flags); + } else { + WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__); + } + + return ndlp; +} + +/* This routine decrements the reference count for a ndlp structure. If the + * count goes to 0, this indicates the associated nodelist should be freed. + */ +int +lpfc_nlp_put(struct lpfc_nodelist *ndlp) +{ + if (ndlp) { + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node put: did:x%x flg:x%x refcnt:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref)); + } else { + WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__); + } + + return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; +} + +/** + * lpfc_fcf_inuse - Check if FCF can be unregistered. + * @phba: Pointer to hba context object. + * + * This function iterate through all FC nodes associated + * will all vports to check if there is any node with + * fc_rports associated with it. If there is an fc_rport + * associated with the node, then the node is either in + * discovered state or its devloss_timer is pending. + */ +static int +lpfc_fcf_inuse(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i, ret = 0; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + + vports = lpfc_create_vport_work_array(phba); + + /* If driver cannot allocate memory, indicate fcf is in use */ + if (!vports) + return 1; + + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + /* + * IF the CVL_RCVD bit is not set then we have sent the + * flogi. + * If dev_loss fires while we are waiting we do not want to + * unreg the fcf. + */ + if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { + spin_unlock_irq(shost->host_lock); + ret = 1; + goto out; + } + list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { + if (ndlp->rport && + (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { + ret = 1; + spin_unlock_irq(shost->host_lock); + goto out; + } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + ret = 1; + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "2624 RPI %x DID %x flag %x " + "still logged in\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag); + } + } + spin_unlock_irq(shost->host_lock); + } +out: + lpfc_destroy_vport_work_array(phba, vports); + return ret; +} + +/** + * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. + * @phba: Pointer to hba context object. + * @mboxq: Pointer to mailbox object. + * + * This function frees memory associated with the mailbox command. + */ +void +lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2555 UNREG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + } + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. + * @phba: Pointer to hba context object. + * @mboxq: Pointer to mailbox object. + * + * This function frees memory associated with the mailbox command. + */ +static void +lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2550 UNREG_FCFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + } + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_unregister_fcf_prep - Unregister fcf record preparation + * @phba: Pointer to hba context object. + * + * This function prepare the HBA for unregistering the currently registered + * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and + * VFIs. + */ +int +lpfc_unregister_fcf_prep(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + int i = 0, rc; + + /* Unregister RPIs */ + if (lpfc_fcf_inuse(phba)) + lpfc_unreg_hba_rpis(phba); + + /* At this point, all discovery is aborted */ + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + + /* Unregister VPIs */ + vports = lpfc_create_vport_work_array(phba); + if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + /* Stop FLOGI/FDISC retries */ + ndlp = lpfc_findnode_did(vports[i], Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(vports[i], ndlp); + lpfc_cleanup_pending_mbox(vports[i]); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vports[i]); + lpfc_mbx_unreg_vpi(vports[i]); + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } + lpfc_destroy_vport_work_array(phba, vports); + if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cleanup_pending_mbox(phba->pport); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(phba->pport); + lpfc_mbx_unreg_vpi(phba->pport); + shost = lpfc_shost_from_vport(phba->pport); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_all_cmd(phba); + + /* Unregister the physical port VFI */ + rc = lpfc_issue_unreg_vfi(phba->pport); + return rc; +} + +/** + * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record + * @phba: Pointer to hba context object. + * + * This function issues synchronous unregister FCF mailbox command to HBA to + * unregister the currently registered FCF record. The driver does not reset + * the driver FCF usage state flags. + * + * Return 0 if successfully issued, none-zero otherwise. + */ +int +lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mbox; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2551 UNREG_FCFI mbox allocation failed" + "HBA state x%x\n", phba->pport->port_state); + return -ENOMEM; + } + lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2552 Unregister FCFI command failed rc x%x " + "HBA state x%x\n", + rc, phba->pport->port_state); + return -EINVAL; + } + return 0; +} + +/** + * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan + * @phba: Pointer to hba context object. + * + * This function unregisters the currently reigstered FCF. This function + * also tries to find another FCF for discovery by rescan the HBA FCF table. + */ +void +lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) +{ + int rc; + + /* Preparation for unregistering fcf */ + rc = lpfc_unregister_fcf_prep(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2748 Failed to prepare for unregistering " + "HBA's FCF record: rc=%d\n", rc); + return; + } + + /* Now, unregister FCF record and reset HBA FCF state */ + rc = lpfc_sli4_unregister_fcf(phba); + if (rc) + return; + /* Reset HBA FCF states after successful unregister FCF */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag = 0; + spin_unlock_irq(&phba->hbalock); + phba->fcf.current_rec.flag = 0; + + /* + * If driver is not unloading, check if there is any other + * FCF record that can be used for discovery. + */ + if ((phba->pport->load_flag & FC_UNLOADING) || + (phba->link_state < LPFC_LINK_UP)) + return; + + /* This is considered as the initial FCF discovery scan */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + + /* Reset FCF roundrobin bmask for new discovery */ + lpfc_sli4_clear_fcf_rr_bmask(phba); + + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); + + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2553 lpfc_unregister_unused_fcf failed " + "to read FCF record HBA state x%x\n", + phba->pport->port_state); + } +} + +/** + * lpfc_unregister_fcf - Unregister the currently registered fcf record + * @phba: Pointer to hba context object. + * + * This function just unregisters the currently reigstered FCF. It does not + * try to find another FCF for discovery. + */ +void +lpfc_unregister_fcf(struct lpfc_hba *phba) +{ + int rc; + + /* Preparation for unregistering fcf */ + rc = lpfc_unregister_fcf_prep(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2749 Failed to prepare for unregistering " + "HBA's FCF record: rc=%d\n", rc); + return; + } + + /* Now, unregister FCF record and reset HBA FCF state */ + rc = lpfc_sli4_unregister_fcf(phba); + if (rc) + return; + /* Set proper HBA FCF states after successful unregister FCF */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_REGISTERED; + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. + * @phba: Pointer to hba context object. + * + * This function check if there are any connected remote port for the FCF and + * if all the devices are disconnected, this function unregister FCFI. + * This function also tries to use another FCF for discovery. + */ +void +lpfc_unregister_unused_fcf(struct lpfc_hba *phba) +{ + /* + * If HBA is not running in FIP mode, if HBA does not support + * FCoE, if FCF discovery is ongoing, or if FCF has not been + * registered, do nothing. + */ + spin_lock_irq(&phba->hbalock); + if (!(phba->hba_flag & HBA_FCOE_MODE) || + !(phba->fcf.fcf_flag & FCF_REGISTERED) || + !(phba->hba_flag & HBA_FIP_SUPPORT) || + (phba->fcf.fcf_flag & FCF_DISCOVERY) || + (phba->pport->port_state == LPFC_FLOGI)) { + spin_unlock_irq(&phba->hbalock); + return; + } + spin_unlock_irq(&phba->hbalock); + + if (lpfc_fcf_inuse(phba)) + return; + + lpfc_unregister_fcf_rescan(phba); +} + +/** + * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. + * @phba: Pointer to hba context object. + * @buff: Buffer containing the FCF connection table as in the config + * region. + * This function create driver data structure for the FCF connection + * record table read from config region 23. + */ +static void +lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, + uint8_t *buff) +{ + struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + struct lpfc_fcf_conn_hdr *conn_hdr; + struct lpfc_fcf_conn_rec *conn_rec; + uint32_t record_count; + int i; + + /* Free the current connect table */ + list_for_each_entry_safe(conn_entry, next_conn_entry, + &phba->fcf_conn_rec_list, list) { + list_del_init(&conn_entry->list); + kfree(conn_entry); + } + + conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; + record_count = conn_hdr->length * sizeof(uint32_t)/ + sizeof(struct lpfc_fcf_conn_rec); + + conn_rec = (struct lpfc_fcf_conn_rec *) + (buff + sizeof(struct lpfc_fcf_conn_hdr)); + + for (i = 0; i < record_count; i++) { + if (!(conn_rec[i].flags & FCFCNCT_VALID)) + continue; + conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), + GFP_KERNEL); + if (!conn_entry) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2566 Failed to allocate connection" + " table entry\n"); + return; + } + + memcpy(&conn_entry->conn_rec, &conn_rec[i], + sizeof(struct lpfc_fcf_conn_rec)); + list_add_tail(&conn_entry->list, + &phba->fcf_conn_rec_list); + } + + if (!list_empty(&phba->fcf_conn_rec_list)) { + i = 0; + list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, + list) { + conn_rec = &conn_entry->conn_rec; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3345 FCF connection list rec[%02d]: " + "flags:x%04x, vtag:x%04x, " + "fabric_name:x%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x, " + "switch_name:x%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x\n", i++, + conn_rec->flags, conn_rec->vlan_tag, + conn_rec->fabric_name[0], + conn_rec->fabric_name[1], + conn_rec->fabric_name[2], + conn_rec->fabric_name[3], + conn_rec->fabric_name[4], + conn_rec->fabric_name[5], + conn_rec->fabric_name[6], + conn_rec->fabric_name[7], + conn_rec->switch_name[0], + conn_rec->switch_name[1], + conn_rec->switch_name[2], + conn_rec->switch_name[3], + conn_rec->switch_name[4], + conn_rec->switch_name[5], + conn_rec->switch_name[6], + conn_rec->switch_name[7]); + } + } +} + +/** + * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. + * @phba: Pointer to hba context object. + * @buff: Buffer containing the FCoE parameter data structure. + * + * This function update driver data structure with config + * parameters read from config region 23. + */ +static void +lpfc_read_fcoe_param(struct lpfc_hba *phba, + uint8_t *buff) +{ + struct lpfc_fip_param_hdr *fcoe_param_hdr; + struct lpfc_fcoe_params *fcoe_param; + + fcoe_param_hdr = (struct lpfc_fip_param_hdr *) + buff; + fcoe_param = (struct lpfc_fcoe_params *) + (buff + sizeof(struct lpfc_fip_param_hdr)); + + if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || + (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) + return; + + if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { + phba->valid_vlan = 1; + phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & + 0xFFF; + } + + phba->fc_map[0] = fcoe_param->fc_map[0]; + phba->fc_map[1] = fcoe_param->fc_map[1]; + phba->fc_map[2] = fcoe_param->fc_map[2]; + return; +} + +/** + * lpfc_get_rec_conf23 - Get a record type in config region data. + * @buff: Buffer containing config region 23 data. + * @size: Size of the data buffer. + * @rec_type: Record type to be searched. + * + * This function searches config region data to find the beginning + * of the record specified by record_type. If record found, this + * function return pointer to the record else return NULL. + */ +static uint8_t * +lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) +{ + uint32_t offset = 0, rec_length; + + if ((buff[0] == LPFC_REGION23_LAST_REC) || + (size < sizeof(uint32_t))) + return NULL; + + rec_length = buff[offset + 1]; + + /* + * One TLV record has one word header and number of data words + * specified in the rec_length field of the record header. + */ + while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) + <= size) { + if (buff[offset] == rec_type) + return &buff[offset]; + + if (buff[offset] == LPFC_REGION23_LAST_REC) + return NULL; + + offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); + rec_length = buff[offset + 1]; + } + return NULL; +} + +/** + * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. + * @phba: Pointer to lpfc_hba data structure. + * @buff: Buffer containing config region 23 data. + * @size: Size of the data buffer. + * + * This function parses the FCoE config parameters in config region 23 and + * populate driver data structure with the parameters. + */ +void +lpfc_parse_fcoe_conf(struct lpfc_hba *phba, + uint8_t *buff, + uint32_t size) +{ + uint32_t offset = 0; + uint8_t *rec_ptr; + + /* + * If data size is less than 2 words signature and version cannot be + * verified. + */ + if (size < 2*sizeof(uint32_t)) + return; + + /* Check the region signature first */ + if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2567 Config region 23 has bad signature\n"); + return; + } + + offset += 4; + + /* Check the data structure version */ + if (buff[offset] != LPFC_REGION23_VERSION) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2568 Config region 23 has bad version\n"); + return; + } + offset += 4; + + /* Read FCoE param record */ + rec_ptr = lpfc_get_rec_conf23(&buff[offset], + size - offset, FCOE_PARAM_TYPE); + if (rec_ptr) + lpfc_read_fcoe_param(phba, rec_ptr); + + /* Read FCF connection table */ + rec_ptr = lpfc_get_rec_conf23(&buff[offset], + size - offset, FCOE_CONN_TBL_TYPE); + if (rec_ptr) + lpfc_read_fcf_conn_tbl(phba, rec_ptr); + +} + +/* + * lpfc_error_lost_link - IO failure from link event or FW reset check. + * + * @vport: Pointer to lpfc_vport data structure. + * @ulp_status: IO completion status. + * @ulp_word4: Reason code for the ulp_status. + * + * This function evaluates the ulp_status and ulp_word4 values + * for specific error values that indicate an internal link fault + * or fw reset event for the completing IO. Callers require this + * common data to decide next steps on the IO. + * + * Return: + * false - No link or reset error occurred. + * true - A link or reset error occurred. + */ +bool +lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, u32 ulp_word4) +{ + /* Mask off the extra port data to get just the reason code. */ + u32 rsn_code = IOERR_PARAM_MASK & ulp_word4; + + if (ulp_status == IOSTAT_LOCAL_REJECT && + (rsn_code == IOERR_SLI_ABORTED || + rsn_code == IOERR_LINK_DOWN || + rsn_code == IOERR_SLI_DOWN)) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI | LOG_ELS, + "0408 Report link error true: \n", + ulp_status, ulp_word4); + return true; + } + + return false; +} diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h new file mode 100644 index 000000000..2108b4cb7 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -0,0 +1,4440 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define FDMI_DID 0xfffffaU +#define NameServer_DID 0xfffffcU +#define Fabric_Cntl_DID 0xfffffdU +#define Fabric_DID 0xfffffeU +#define Bcast_DID 0xffffffU +#define Mask_DID 0xffffffU +#define CT_DID_MASK 0xffff00U +#define Fabric_DID_MASK 0xfff000U +#define WELL_KNOWN_DID_MASK 0xfffff0U + +#define PT2PT_LocalID 1 +#define PT2PT_RemoteID 2 + +#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */ +#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */ +#define FF_DEF_RATOV 10 /* Default RA_TOV (10s) */ +#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */ + +#define LPFC_BUF_RING0 64 /* Number of buffers to post to RING + 0 */ + +#define FCELSSIZE 1024 /* maximum ELS transfer size */ + +#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */ +#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ +#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ + +#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ +#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ +#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 extra command ring entries */ +#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 extra response ring entries */ +#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */ +#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */ +#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */ +#define SLI2_IOCB_RSP_R2_ENTRIES 20 /* SLI-2 ELS response ring entries */ +#define SLI2_IOCB_CMD_R3_ENTRIES 0 +#define SLI2_IOCB_RSP_R3_ENTRIES 0 +#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24 +#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32 + +#define SLI2_IOCB_CMD_SIZE 32 +#define SLI2_IOCB_RSP_SIZE 32 +#define SLI3_IOCB_CMD_SIZE 128 +#define SLI3_IOCB_RSP_SIZE 64 + +#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff +#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff + +/* vendor ID used in SCSI netlink calls */ +#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) + +#define FW_REV_STR_SIZE 32 +/* Common Transport structures and definitions */ + +union CtRevisionId { + /* Structure is in Big Endian format */ + struct { + uint32_t Revision:8; + uint32_t InId:24; + } bits; + uint32_t word; +}; + +union CtCommandResponse { + /* Structure is in Big Endian format */ + struct { + __be16 CmdRsp; + __be16 Size; + } bits; + uint32_t word; +}; + +/* FC4 Feature bits for RFF_ID */ +#define FC4_FEATURE_TARGET 0x1 +#define FC4_FEATURE_INIT 0x2 +#define FC4_FEATURE_NVME_DISC 0x4 + +enum rft_word0 { + RFT_FCP_REG = (0x1 << 8), +}; + +enum rft_word1 { + RFT_NVME_REG = (0x1 << 8), +}; + +enum rft_word3 { + RFT_APP_SERV_REG = (0x1 << 0), +}; + +struct lpfc_sli_ct_request { + /* Structure is in Big Endian format */ + union CtRevisionId RevisionId; + uint8_t FsType; + uint8_t FsSubType; + uint8_t Options; + uint8_t Rsrvd1; + union CtCommandResponse CommandResponse; + uint8_t Rsrvd2; + uint8_t ReasonCode; + uint8_t Explanation; + uint8_t VendorUnique; +#define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */ + + union { + __be32 PortID; + struct gid { + uint8_t PortType; /* for GID_PT requests */ +#define GID_PT_N_PORT 1 + uint8_t DomainScope; + uint8_t AreaScope; + uint8_t Fc4Type; /* for GID_FT requests */ + } gid; + struct gid_ff { + uint8_t Flags; + uint8_t DomainScope; + uint8_t AreaScope; + uint8_t rsvd1; + uint8_t rsvd2; + uint8_t rsvd3; + uint8_t Fc4FBits; + uint8_t Fc4Type; + } gid_ff; + struct rft { + __be32 port_id; /* For RFT_ID requests */ + + __be32 fcp_reg; /* rsvd 31:9, fcp_reg 8, rsvd 7:0 */ + __be32 nvme_reg; /* rsvd 31:9, nvme_reg 8, rsvd 7:0 */ + __be32 word2; + __be32 app_serv_reg; /* rsvd 31:1, app_serv_reg 0 */ + __be32 word[4]; + } rft; + struct rnn { + uint32_t PortId; /* For RNN_ID requests */ + uint8_t wwnn[8]; + } rnn; + struct rsnn { /* For RSNN_ID requests */ + uint8_t wwnn[8]; + uint8_t len; + uint8_t symbname[255]; + } rsnn; + struct da_id { /* For DA_ID requests */ + uint32_t port_id; + } da_id; + struct rspn { /* For RSPN_ID requests */ + uint32_t PortId; + uint8_t len; + uint8_t symbname[255]; + } rspn; + struct gff { + uint32_t PortId; + } gff; + struct gff_acc { + uint8_t fbits[128]; + } gff_acc; + struct gft { + uint32_t PortId; + } gft; + struct gft_acc { + uint32_t fc4_types[8]; + } gft_acc; +#define FCP_TYPE_FEATURE_OFFSET 7 + struct rff { + uint32_t PortId; + uint8_t reserved[2]; + uint8_t fbits; + uint8_t type_code; /* type=8 for FCP */ + } rff; + } un; +}; + +#define LPFC_MAX_CT_SIZE (60 * 4096) + +#define SLI_CT_REVISION 1 +#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gid)) +#define GIDFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gid_ff)) +#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gff)) +#define GFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gft)) +#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rft)) +#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rff)) +#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rnn)) +#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rsnn)) +#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct da_id)) +#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rspn)) + +/* + * FsType Definitions + */ + +#define SLI_CT_MANAGEMENT_SERVICE 0xFA +#define SLI_CT_TIME_SERVICE 0xFB +#define SLI_CT_DIRECTORY_SERVICE 0xFC +#define SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD + +/* + * Directory Service Subtypes + */ + +#define SLI_CT_DIRECTORY_NAME_SERVER 0x02 + +/* + * Response Codes + */ + +#define SLI_CT_RESPONSE_FS_RJT 0x8001 +#define SLI_CT_RESPONSE_FS_ACC 0x8002 + +/* + * Reason Codes + */ + +#define SLI_CT_NO_ADDITIONAL_EXPL 0x0 +#define SLI_CT_INVALID_COMMAND 0x01 +#define SLI_CT_INVALID_VERSION 0x02 +#define SLI_CT_LOGICAL_ERROR 0x03 +#define SLI_CT_INVALID_IU_SIZE 0x04 +#define SLI_CT_LOGICAL_BUSY 0x05 +#define SLI_CT_PROTOCOL_ERROR 0x07 +#define SLI_CT_UNABLE_TO_PERFORM_REQ 0x09 +#define SLI_CT_REQ_NOT_SUPPORTED 0x0b +#define SLI_CT_HBA_INFO_NOT_REGISTERED 0x10 +#define SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE 0x11 +#define SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN 0x12 +#define SLI_CT_HBA_ATTR_NOT_PRESENT 0x13 +#define SLI_CT_PORT_INFO_NOT_REGISTERED 0x20 +#define SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21 +#define SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN 0x22 +#define SLI_CT_VENDOR_UNIQUE 0xff + +/* + * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations + */ + +#define SLI_CT_NO_PORT_ID 0x01 +#define SLI_CT_NO_PORT_NAME 0x02 +#define SLI_CT_NO_NODE_NAME 0x03 +#define SLI_CT_NO_CLASS_OF_SERVICE 0x04 +#define SLI_CT_NO_IP_ADDRESS 0x05 +#define SLI_CT_NO_IPA 0x06 +#define SLI_CT_NO_FC4_TYPES 0x07 +#define SLI_CT_NO_SYMBOLIC_PORT_NAME 0x08 +#define SLI_CT_NO_SYMBOLIC_NODE_NAME 0x09 +#define SLI_CT_NO_PORT_TYPE 0x0A +#define SLI_CT_ACCESS_DENIED 0x10 +#define SLI_CT_INVALID_PORT_ID 0x11 +#define SLI_CT_DATABASE_EMPTY 0x12 +#define SLI_CT_APP_ID_NOT_AVAILABLE 0x40 + +/* + * Name Server Command Codes + */ + +#define SLI_CTNS_GA_NXT 0x0100 +#define SLI_CTNS_GPN_ID 0x0112 +#define SLI_CTNS_GNN_ID 0x0113 +#define SLI_CTNS_GCS_ID 0x0114 +#define SLI_CTNS_GFT_ID 0x0117 +#define SLI_CTNS_GSPN_ID 0x0118 +#define SLI_CTNS_GPT_ID 0x011A +#define SLI_CTNS_GFF_ID 0x011F +#define SLI_CTNS_GID_PN 0x0121 +#define SLI_CTNS_GID_NN 0x0131 +#define SLI_CTNS_GIP_NN 0x0135 +#define SLI_CTNS_GIPA_NN 0x0136 +#define SLI_CTNS_GSNN_NN 0x0139 +#define SLI_CTNS_GNN_IP 0x0153 +#define SLI_CTNS_GIPA_IP 0x0156 +#define SLI_CTNS_GID_FT 0x0171 +#define SLI_CTNS_GID_FF 0x01F1 +#define SLI_CTNS_GID_PT 0x01A1 +#define SLI_CTNS_RPN_ID 0x0212 +#define SLI_CTNS_RNN_ID 0x0213 +#define SLI_CTNS_RCS_ID 0x0214 +#define SLI_CTNS_RFT_ID 0x0217 +#define SLI_CTNS_RSPN_ID 0x0218 +#define SLI_CTNS_RPT_ID 0x021A +#define SLI_CTNS_RFF_ID 0x021F +#define SLI_CTNS_RIP_NN 0x0235 +#define SLI_CTNS_RIPA_NN 0x0236 +#define SLI_CTNS_RSNN_NN 0x0239 +#define SLI_CTNS_DA_ID 0x0300 + +/* + * Port Types + */ + +#define SLI_CTPT_N_PORT 0x01 +#define SLI_CTPT_NL_PORT 0x02 +#define SLI_CTPT_FNL_PORT 0x03 +#define SLI_CTPT_IP 0x04 +#define SLI_CTPT_FCP 0x08 +#define SLI_CTPT_NVME 0x28 +#define SLI_CTPT_NX_PORT 0x7F +#define SLI_CTPT_F_PORT 0x81 +#define SLI_CTPT_FL_PORT 0x82 +#define SLI_CTPT_E_PORT 0x84 + +#define SLI_CT_LAST_ENTRY 0x80000000 + +/* Fibre Channel Service Parameter definitions */ + +#define FC_PH_4_0 6 /* FC-PH version 4.0 */ +#define FC_PH_4_1 7 /* FC-PH version 4.1 */ +#define FC_PH_4_2 8 /* FC-PH version 4.2 */ +#define FC_PH_4_3 9 /* FC-PH version 4.3 */ + +#define FC_PH_LOW 8 /* Lowest supported FC-PH version */ +#define FC_PH_HIGH 9 /* Highest supported FC-PH version */ +#define FC_PH3 0x20 /* FC-PH-3 version */ + +#define FF_FRAME_SIZE 2048 + +struct lpfc_name { + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t nameType:4; /* FC Word 0, bit 28:31 */ + uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit + 8:11 of IEEE ext */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit + 8:11 of IEEE ext */ + uint8_t nameType:4; /* FC Word 0, bit 28:31 */ +#endif + +#define NAME_IEEE 0x1 /* IEEE name - nameType */ +#define NAME_IEEE_EXT 0x2 /* IEEE extended name */ +#define NAME_FC_TYPE 0x3 /* FC native name type */ +#define NAME_IP_TYPE 0x4 /* IP address */ +#define NAME_CCITT_TYPE 0xC +#define NAME_CCITT_GR_TYPE 0xE + uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE + extended Lsb */ + uint8_t IEEE[6]; /* FC IEEE address */ + } s; + uint8_t wwn[8]; + uint64_t name __packed __aligned(4); + } u; +}; + +struct csp { + uint8_t fcphHigh; /* FC Word 0, byte 0 */ + uint8_t fcphLow; + uint8_t bbCreditMsb; + uint8_t bbCreditLsb; /* FC Word 0, byte 3 */ + +/* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + */ +#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */ +/* + * Word 1 Bit 30 in common service parameter is overloaded. + * Word 1 Bit 30 in FLOGI request is Virtual Fabrics + * Word 1 Bit 30 in PLOGI request is random offset + */ +#define virtual_fabric_support randomOffset /* Word 1, bit 30 */ +/* + * Word 1 Bit 29 in common service parameter is overloaded. + * Word 1 Bit 29 in FLOGI response is multiple NPort assignment + * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level + */ +#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */ +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ + uint16_t randomOffset:1; /* FC Word 1, bit 30 */ + uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */ + uint16_t fPort:1; /* FC Word 1, bit 28 */ + uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ + uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ + uint16_t multicast:1; /* FC Word 1, bit 25 */ + uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */ + + uint16_t priority_tagging:1; /* FC Word 1, bit 23 */ + uint16_t simplex:1; /* FC Word 1, bit 22 */ + uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ + uint16_t dhd:1; /* FC Word 1, bit 18 */ + uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ + uint16_t payloadlength:1; /* FC Word 1, bit 16 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */ + uint16_t multicast:1; /* FC Word 1, bit 25 */ + uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ + uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ + uint16_t fPort:1; /* FC Word 1, bit 28 */ + uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */ + uint16_t randomOffset:1; /* FC Word 1, bit 30 */ + uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ + + uint16_t payloadlength:1; /* FC Word 1, bit 16 */ + uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ + uint16_t dhd:1; /* FC Word 1, bit 18 */ + uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ + uint16_t simplex:1; /* FC Word 1, bit 22 */ + uint16_t priority_tagging:1; /* FC Word 1, bit 23 */ +#endif + + uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */ + uint8_t bbRcvSizeLsb; /* FC Word 1, byte 3 */ + union { + struct { + uint8_t word2Reserved1; /* FC Word 2 byte 0 */ + + uint8_t totalConcurrSeq; /* FC Word 2 byte 1 */ + uint8_t roByCategoryMsb; /* FC Word 2 byte 2 */ + + uint8_t roByCategoryLsb; /* FC Word 2 byte 3 */ + } nPort; + uint32_t r_a_tov; /* R_A_TOV must be in B.E. format */ + } w2; + + uint32_t e_d_tov; /* E_D_TOV must be in B.E. format */ +}; + +struct class_parms { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t classValid:1; /* FC Word 0, bit 31 */ + uint8_t intermix:1; /* FC Word 0, bit 30 */ + uint8_t stackedXparent:1; /* FC Word 0, bit 29 */ + uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */ + uint8_t seqDelivery:1; /* FC Word 0, bit 27 */ + uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */ + uint8_t seqDelivery:1; /* FC Word 0, bit 27 */ + uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */ + uint8_t stackedXparent:1; /* FC Word 0, bit 29 */ + uint8_t intermix:1; /* FC Word 0, bit 30 */ + uint8_t classValid:1; /* FC Word 0, bit 31 */ + +#endif + + uint8_t word0Reserved2; /* FC Word 0, bit 16:23 */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */ + uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */ + uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */ + uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */ + uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */ + uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */ + uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */ + uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */ + uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */ +#endif + + uint8_t word0Reserved4; /* FC Word 0, bit 0: 7 */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */ + uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */ + uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */ + uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */ + uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */ + uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */ + uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */ + uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */ + uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */ + uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */ + uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */ +#endif + + uint8_t word1Reserved2; /* FC Word 1, bit 16:23 */ + uint8_t rcvDataSizeMsb; /* FC Word 1, bit 8:15 */ + uint8_t rcvDataSizeLsb; /* FC Word 1, bit 0: 7 */ + + uint8_t concurrentSeqMsb; /* FC Word 2, bit 24:31 */ + uint8_t concurrentSeqLsb; /* FC Word 2, bit 16:23 */ + uint8_t EeCreditSeqMsb; /* FC Word 2, bit 8:15 */ + uint8_t EeCreditSeqLsb; /* FC Word 2, bit 0: 7 */ + + uint8_t openSeqPerXchgMsb; /* FC Word 3, bit 24:31 */ + uint8_t openSeqPerXchgLsb; /* FC Word 3, bit 16:23 */ + uint8_t word3Reserved1; /* Fc Word 3, bit 8:15 */ + uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */ +}; + +struct serv_parm { /* Structure is in Big Endian format */ + struct csp cmn; + struct lpfc_name portName; + struct lpfc_name nodeName; + struct class_parms cls1; + struct class_parms cls2; + struct class_parms cls3; + struct class_parms cls4; + union { + uint8_t vendorVersion[16]; + struct { + uint32_t vid; +#define LPFC_VV_EMLX_ID 0x454d4c58 /* EMLX */ + uint32_t flags; +#define LPFC_VV_SUPPRESS_RSP 1 + } vv; + } un; +}; + +/* + * Virtual Fabric Tagging Header + */ +struct fc_vft_header { + uint32_t word0; +#define fc_vft_hdr_r_ctl_SHIFT 24 +#define fc_vft_hdr_r_ctl_MASK 0xFF +#define fc_vft_hdr_r_ctl_WORD word0 +#define fc_vft_hdr_ver_SHIFT 22 +#define fc_vft_hdr_ver_MASK 0x3 +#define fc_vft_hdr_ver_WORD word0 +#define fc_vft_hdr_type_SHIFT 18 +#define fc_vft_hdr_type_MASK 0xF +#define fc_vft_hdr_type_WORD word0 +#define fc_vft_hdr_e_SHIFT 16 +#define fc_vft_hdr_e_MASK 0x1 +#define fc_vft_hdr_e_WORD word0 +#define fc_vft_hdr_priority_SHIFT 13 +#define fc_vft_hdr_priority_MASK 0x7 +#define fc_vft_hdr_priority_WORD word0 +#define fc_vft_hdr_vf_id_SHIFT 1 +#define fc_vft_hdr_vf_id_MASK 0xFFF +#define fc_vft_hdr_vf_id_WORD word0 + uint32_t word1; +#define fc_vft_hdr_hopct_SHIFT 24 +#define fc_vft_hdr_hopct_MASK 0xFF +#define fc_vft_hdr_hopct_WORD word1 +}; + +#include + +/* + * Extended Link Service LS_COMMAND codes (Payload Word 0) + */ +#ifdef __BIG_ENDIAN_BITFIELD +#define ELS_CMD_MASK 0xffff0000 +#define ELS_RSP_MASK 0xff000000 +#define ELS_CMD_LS_RJT 0x01000000 +#define ELS_CMD_ACC 0x02000000 +#define ELS_CMD_PLOGI 0x03000000 +#define ELS_CMD_FLOGI 0x04000000 +#define ELS_CMD_LOGO 0x05000000 +#define ELS_CMD_ABTX 0x06000000 +#define ELS_CMD_RCS 0x07000000 +#define ELS_CMD_RES 0x08000000 +#define ELS_CMD_RSS 0x09000000 +#define ELS_CMD_RSI 0x0A000000 +#define ELS_CMD_ESTS 0x0B000000 +#define ELS_CMD_ESTC 0x0C000000 +#define ELS_CMD_ADVC 0x0D000000 +#define ELS_CMD_RTV 0x0E000000 +#define ELS_CMD_RLS 0x0F000000 +#define ELS_CMD_ECHO 0x10000000 +#define ELS_CMD_TEST 0x11000000 +#define ELS_CMD_RRQ 0x12000000 +#define ELS_CMD_REC 0x13000000 +#define ELS_CMD_RDP 0x18000000 +#define ELS_CMD_RDF 0x19000000 +#define ELS_CMD_PRLI 0x20100014 +#define ELS_CMD_NVMEPRLI 0x20140018 +#define ELS_CMD_PRLO 0x21100014 +#define ELS_CMD_PRLO_ACC 0x02100014 +#define ELS_CMD_PDISC 0x50000000 +#define ELS_CMD_FDISC 0x51000000 +#define ELS_CMD_ADISC 0x52000000 +#define ELS_CMD_FARP 0x54000000 +#define ELS_CMD_FARPR 0x55000000 +#define ELS_CMD_RPL 0x57000000 +#define ELS_CMD_FAN 0x60000000 +#define ELS_CMD_RSCN 0x61040000 +#define ELS_CMD_RSCN_XMT 0x61040008 +#define ELS_CMD_SCR 0x62000000 +#define ELS_CMD_RNID 0x78000000 +#define ELS_CMD_LIRR 0x7A000000 +#define ELS_CMD_LCB 0x81000000 +#define ELS_CMD_FPIN 0x16000000 +#define ELS_CMD_EDC 0x17000000 +#define ELS_CMD_QFPA 0xB0000000 +#define ELS_CMD_UVEM 0xB1000000 +#else /* __LITTLE_ENDIAN_BITFIELD */ +#define ELS_CMD_MASK 0xffff +#define ELS_RSP_MASK 0xff +#define ELS_CMD_LS_RJT 0x01 +#define ELS_CMD_ACC 0x02 +#define ELS_CMD_PLOGI 0x03 +#define ELS_CMD_FLOGI 0x04 +#define ELS_CMD_LOGO 0x05 +#define ELS_CMD_ABTX 0x06 +#define ELS_CMD_RCS 0x07 +#define ELS_CMD_RES 0x08 +#define ELS_CMD_RSS 0x09 +#define ELS_CMD_RSI 0x0A +#define ELS_CMD_ESTS 0x0B +#define ELS_CMD_ESTC 0x0C +#define ELS_CMD_ADVC 0x0D +#define ELS_CMD_RTV 0x0E +#define ELS_CMD_RLS 0x0F +#define ELS_CMD_ECHO 0x10 +#define ELS_CMD_TEST 0x11 +#define ELS_CMD_RRQ 0x12 +#define ELS_CMD_REC 0x13 +#define ELS_CMD_RDP 0x18 +#define ELS_CMD_RDF 0x19 +#define ELS_CMD_PRLI 0x14001020 +#define ELS_CMD_NVMEPRLI 0x18001420 +#define ELS_CMD_PRLO 0x14001021 +#define ELS_CMD_PRLO_ACC 0x14001002 +#define ELS_CMD_PDISC 0x50 +#define ELS_CMD_FDISC 0x51 +#define ELS_CMD_ADISC 0x52 +#define ELS_CMD_FARP 0x54 +#define ELS_CMD_FARPR 0x55 +#define ELS_CMD_RPL 0x57 +#define ELS_CMD_FAN 0x60 +#define ELS_CMD_RSCN 0x0461 +#define ELS_CMD_RSCN_XMT 0x08000461 +#define ELS_CMD_SCR 0x62 +#define ELS_CMD_RNID 0x78 +#define ELS_CMD_LIRR 0x7A +#define ELS_CMD_LCB 0x81 +#define ELS_CMD_FPIN ELS_FPIN +#define ELS_CMD_EDC ELS_EDC +#define ELS_CMD_QFPA 0xB0 +#define ELS_CMD_UVEM 0xB1 +#endif + +/* + * LS_RJT Payload Definition + */ + +struct ls_rjt { /* Structure is in Big Endian format */ + union { + __be32 ls_rjt_error_be; + uint32_t lsRjtError; + struct { + uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */ + + uint8_t lsRjtRsnCode; /* FC Word 0, bit 16:23 */ + /* LS_RJT reason codes */ +#define LSRJT_INVALID_CMD 0x01 +#define LSRJT_LOGICAL_ERR 0x03 +#define LSRJT_LOGICAL_BSY 0x05 +#define LSRJT_PROTOCOL_ERR 0x07 +#define LSRJT_UNABLE_TPC 0x09 /* Unable to perform command */ +#define LSRJT_CMD_UNSUPPORTED 0x0B +#define LSRJT_VENDOR_UNIQUE 0xFF /* See Byte 3 */ + + uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */ + /* LS_RJT reason explanation */ +#define LSEXP_NOTHING_MORE 0x00 +#define LSEXP_SPARM_OPTIONS 0x01 +#define LSEXP_SPARM_ICTL 0x03 +#define LSEXP_SPARM_RCTL 0x05 +#define LSEXP_SPARM_RCV_SIZE 0x07 +#define LSEXP_SPARM_CONCUR_SEQ 0x09 +#define LSEXP_SPARM_CREDIT 0x0B +#define LSEXP_INVALID_PNAME 0x0D +#define LSEXP_INVALID_NNAME 0x0E +#define LSEXP_INVALID_CSP 0x0F +#define LSEXP_INVALID_ASSOC_HDR 0x11 +#define LSEXP_ASSOC_HDR_REQ 0x13 +#define LSEXP_INVALID_O_SID 0x15 +#define LSEXP_INVALID_OX_RX 0x17 +#define LSEXP_CMD_IN_PROGRESS 0x19 +#define LSEXP_PORT_LOGIN_REQ 0x1E +#define LSEXP_INVALID_NPORT_ID 0x1F +#define LSEXP_INVALID_SEQ_ID 0x21 +#define LSEXP_INVALID_XCHG 0x23 +#define LSEXP_INACTIVE_XCHG 0x25 +#define LSEXP_RQ_REQUIRED 0x27 +#define LSEXP_OUT_OF_RESOURCE 0x29 +#define LSEXP_CANT_GIVE_DATA 0x2A +#define LSEXP_REQ_UNSUPPORTED 0x2C +#define LSEXP_NO_RSRC_ASSIGN 0x52 + uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */ + } b; + } un; +}; + +/* + * N_Port Login (FLOGO/PLOGO Request) Payload Definition + */ + +typedef struct _LOGO { /* Structure is in Big Endian format */ + union { + uint32_t nPortId32; /* Access nPortId as a word */ + struct { + uint8_t word1Reserved1; /* FC Word 1, bit 31:24 */ + uint8_t nPortIdByte0; /* N_port ID bit 16:23 */ + uint8_t nPortIdByte1; /* N_port ID bit 8:15 */ + uint8_t nPortIdByte2; /* N_port ID bit 0: 7 */ + } b; + } un; + struct lpfc_name portName; /* N_port name field */ +} LOGO; + +/* + * FCP Login (PRLI Request / ACC) Payload Definition + */ + +#define PRLX_PAGE_LEN 0x10 +#define TPRLO_PAGE_LEN 0x14 + +typedef struct _PRLI { /* Structure is in Big Endian format */ + uint8_t prliType; /* FC Parm Word 0, bit 24:31 */ + +#define PRLI_FCP_TYPE 0x08 +#define PRLI_NVME_TYPE 0x28 + uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */ + uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */ + uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */ + + /* ACC = imagePairEstablished */ + uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */ + uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ + uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */ + uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */ + uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */ + uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */ + /* ACC = imagePairEstablished */ +#endif + +#define PRLI_REQ_EXECUTED 0x1 /* acceptRspCode */ +#define PRLI_NO_RESOURCES 0x2 +#define PRLI_INIT_INCOMPLETE 0x3 +#define PRLI_NO_SUCH_PA 0x4 +#define PRLI_PREDEF_CONFIG 0x5 +#define PRLI_PARTIAL_SUCCESS 0x6 +#define PRLI_INVALID_PAGE_CNT 0x7 +#define PRLI_INV_SRV_PARM 0x8 + + uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */ + + uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */ + + uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */ + + uint8_t word3Reserved1; /* FC Parm Word 3, bit 24:31 */ + uint8_t word3Reserved2; /* FC Parm Word 3, bit 16:23 */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */ + uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */ + uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */ + uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */ + uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */ + uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */ + uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */ + uint16_t Retry:1; /* FC Parm Word 3, bit 8 */ + uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */ + uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */ + uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */ + uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */ + uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */ + uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */ + uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */ + uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t Retry:1; /* FC Parm Word 3, bit 8 */ + uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */ + uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */ + uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */ + uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */ + uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */ + uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */ + uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */ + uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */ + uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */ + uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */ + uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */ + uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */ + uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */ + uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */ + uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */ +#endif +} PRLI; + +/* + * FCP Logout (PRLO Request / ACC) Payload Definition + */ + +typedef struct _PRLO { /* Structure is in Big Endian format */ + uint8_t prloType; /* FC Parm Word 0, bit 24:31 */ + +#define PRLO_FCP_TYPE 0x08 + uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */ + uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */ + uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */ + uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */ + uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */ + uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */ + uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */ +#endif + +#define PRLO_REQ_EXECUTED 0x1 /* acceptRspCode */ +#define PRLO_NO_SUCH_IMAGE 0x4 +#define PRLO_INVALID_PAGE_CNT 0x7 + + uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */ + + uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */ + + uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */ + + uint32_t word3Reserved1; /* FC Parm Word 3, bit 0:31 */ +} PRLO; + +typedef struct _ADISC { /* Structure is in Big Endian format */ + uint32_t hardAL_PA; + struct lpfc_name portName; + struct lpfc_name nodeName; + uint32_t DID; +} ADISC; + +typedef struct _FARP { /* Structure is in Big Endian format */ + uint32_t Mflags:8; + uint32_t Odid:24; +#define FARP_NO_ACTION 0 /* FARP information enclosed, no + action */ +#define FARP_MATCH_PORT 0x1 /* Match on Responder Port Name */ +#define FARP_MATCH_NODE 0x2 /* Match on Responder Node Name */ +#define FARP_MATCH_IP 0x4 /* Match on IP address, not supported */ +#define FARP_MATCH_IPV4 0x5 /* Match on IPV4 address, not + supported */ +#define FARP_MATCH_IPV6 0x6 /* Match on IPV6 address, not + supported */ + uint32_t Rflags:8; + uint32_t Rdid:24; +#define FARP_REQUEST_PLOGI 0x1 /* Request for PLOGI */ +#define FARP_REQUEST_FARPR 0x2 /* Request for FARP Response */ + struct lpfc_name OportName; + struct lpfc_name OnodeName; + struct lpfc_name RportName; + struct lpfc_name RnodeName; + uint8_t Oipaddr[16]; + uint8_t Ripaddr[16]; +} FARP; + +typedef struct _FAN { /* Structure is in Big Endian format */ + uint32_t Fdid; + struct lpfc_name FportName; + struct lpfc_name FnodeName; +} FAN; + +typedef struct _SCR { /* Structure is in Big Endian format */ + uint8_t resvd1; + uint8_t resvd2; + uint8_t resvd3; + uint8_t Function; +#define SCR_FUNC_FABRIC 0x01 +#define SCR_FUNC_NPORT 0x02 +#define SCR_FUNC_FULL 0x03 +#define SCR_CLEAR 0xff +} SCR; + +typedef struct _RNID_TOP_DISC { + struct lpfc_name portName; + uint8_t resvd[8]; + uint32_t unitType; +#define RNID_HBA 0x7 +#define RNID_HOST 0xa +#define RNID_DRIVER 0xd + uint32_t physPort; + uint32_t attachedNodes; + uint16_t ipVersion; +#define RNID_IPV4 0x1 +#define RNID_IPV6 0x2 + uint16_t UDPport; + uint8_t ipAddr[16]; + uint16_t resvd1; + uint16_t flags; +#define RNID_TD_SUPPORT 0x1 +#define RNID_LP_VALID 0x2 +} RNID_TOP_DISC; + +typedef struct _RNID { /* Structure is in Big Endian format */ + uint8_t Format; +#define RNID_TOPOLOGY_DISC 0xdf + uint8_t CommonLen; + uint8_t resvd1; + uint8_t SpecificLen; + struct lpfc_name portName; + struct lpfc_name nodeName; + union { + RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */ + } un; +} RNID; + +struct RLS { /* Structure is in Big Endian format */ + uint32_t rls; +#define rls_rsvd_SHIFT 24 +#define rls_rsvd_MASK 0x000000ff +#define rls_rsvd_WORD rls +#define rls_did_SHIFT 0 +#define rls_did_MASK 0x00ffffff +#define rls_did_WORD rls +}; + +struct RLS_RSP { /* Structure is in Big Endian format */ + uint32_t linkFailureCnt; + uint32_t lossSyncCnt; + uint32_t lossSignalCnt; + uint32_t primSeqErrCnt; + uint32_t invalidXmitWord; + uint32_t crcCnt; +}; + +struct RRQ { /* Structure is in Big Endian format */ + uint32_t rrq; +#define rrq_rsvd_SHIFT 24 +#define rrq_rsvd_MASK 0x000000ff +#define rrq_rsvd_WORD rrq +#define rrq_did_SHIFT 0 +#define rrq_did_MASK 0x00ffffff +#define rrq_did_WORD rrq + uint32_t rrq_exchg; +#define rrq_oxid_SHIFT 16 +#define rrq_oxid_MASK 0xffff +#define rrq_oxid_WORD rrq_exchg +#define rrq_rxid_SHIFT 0 +#define rrq_rxid_MASK 0xffff +#define rrq_rxid_WORD rrq_exchg +}; + +#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */ +#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/ + +struct RTV_RSP { /* Structure is in Big Endian format */ + uint32_t ratov; + uint32_t edtov; + uint32_t qtov; +#define qtov_rsvd0_SHIFT 28 +#define qtov_rsvd0_MASK 0x0000000f +#define qtov_rsvd0_WORD qtov /* reserved */ +#define qtov_edtovres_SHIFT 27 +#define qtov_edtovres_MASK 0x00000001 +#define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */ +#define qtov__rsvd1_SHIFT 19 +#define qtov_rsvd1_MASK 0x0000003f +#define qtov_rsvd1_WORD qtov /* reserved */ +#define qtov_rttov_SHIFT 18 +#define qtov_rttov_MASK 0x00000001 +#define qtov_rttov_WORD qtov /* R_T_TOV value */ +#define qtov_rsvd2_SHIFT 0 +#define qtov_rsvd2_MASK 0x0003ffff +#define qtov_rsvd2_WORD qtov /* reserved */ +}; + + +typedef struct _RPL { /* Structure is in Big Endian format */ + uint32_t maxsize; + uint32_t index; +} RPL; + +typedef struct _PORT_NUM_BLK { + uint32_t portNum; + uint32_t portID; + struct lpfc_name portName; +} PORT_NUM_BLK; + +typedef struct _RPL_RSP { /* Structure is in Big Endian format */ + uint32_t listLen; + uint32_t index; + PORT_NUM_BLK port_num_blk; +} RPL_RSP; + +/* This is used for RSCN command */ +typedef struct _D_ID { /* Structure is in Big Endian format */ + union { + uint32_t word; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t resv; + uint8_t domain; + uint8_t area; + uint8_t id; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t id; + uint8_t area; + uint8_t domain; + uint8_t resv; +#endif + } b; + } un; +} D_ID; + +#define RSCN_ADDRESS_FORMAT_PORT 0x0 +#define RSCN_ADDRESS_FORMAT_AREA 0x1 +#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2 +#define RSCN_ADDRESS_FORMAT_FABRIC 0x3 +#define RSCN_ADDRESS_FORMAT_MASK 0x3 + +/* + * Structure to define all ELS Payload types + */ + +typedef struct _ELS_PKT { /* Structure is in Big Endian format */ + uint8_t elsCode; /* FC Word 0, bit 24:31 */ + uint8_t elsByte1; + uint8_t elsByte2; + uint8_t elsByte3; + union { + struct ls_rjt lsRjt; /* Payload for LS_RJT ELS response */ + struct serv_parm logi; /* Payload for PLOGI/FLOGI/PDISC/ACC */ + LOGO logo; /* Payload for PLOGO/FLOGO/ACC */ + PRLI prli; /* Payload for PRLI/ACC */ + PRLO prlo; /* Payload for PRLO/ACC */ + ADISC adisc; /* Payload for ADISC/ACC */ + FARP farp; /* Payload for FARP/ACC */ + FAN fan; /* Payload for FAN */ + SCR scr; /* Payload for SCR/ACC */ + RNID rnid; /* Payload for RNID */ + uint8_t pad[128 - 4]; /* Pad out to payload of 128 bytes */ + } un; +} ELS_PKT; + +/* + * Link Cable Beacon (LCB) ELS Frame + */ + +struct fc_lcb_request_frame { + uint32_t lcb_command; /* ELS command opcode (0x81) */ + uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ +#define LPFC_LCB_ON 0x1 +#define LPFC_LCB_OFF 0x2 + uint8_t reserved[2]; + uint8_t capability; /* LCB Payload Word 1, bit 0:7 */ + uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ +#define LPFC_LCB_GREEN 0x1 +#define LPFC_LCB_AMBER 0x2 + uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ +#define LCB_CAPABILITY_DURATION 1 +#define BEACON_VERSION_V1 1 +#define BEACON_VERSION_V0 0 + uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ +}; + +/* + * Link Cable Beacon (LCB) ELS Response Frame + */ +struct fc_lcb_res_frame { + uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */ + uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ + uint8_t reserved[2]; + uint8_t capability; /* LCB Payload Word 1, bit 0:7 */ + uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ + uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ + uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ +}; + +/* + * Read Diagnostic Parameters (RDP) ELS frame. + */ +#define SFF_PG0_IDENT_SFP 0x3 + +#define SFP_FLAG_PT_OPTICAL 0x0 +#define SFP_FLAG_PT_SWLASER 0x01 +#define SFP_FLAG_PT_LWLASER_LC1310 0x02 +#define SFP_FLAG_PT_LWLASER_LL1550 0x03 +#define SFP_FLAG_PT_MASK 0x0F +#define SFP_FLAG_PT_SHIFT 0 + +#define SFP_FLAG_IS_OPTICAL_PORT 0x01 +#define SFP_FLAG_IS_OPTICAL_MASK 0x010 +#define SFP_FLAG_IS_OPTICAL_SHIFT 4 + +#define SFP_FLAG_IS_DESC_VALID 0x01 +#define SFP_FLAG_IS_DESC_VALID_MASK 0x020 +#define SFP_FLAG_IS_DESC_VALID_SHIFT 5 + +#define SFP_FLAG_CT_UNKNOWN 0x0 +#define SFP_FLAG_CT_SFP_PLUS 0x01 +#define SFP_FLAG_CT_MASK 0x3C +#define SFP_FLAG_CT_SHIFT 6 + +struct fc_rdp_port_name_info { + uint8_t wwnn[8]; + uint8_t wwpn[8]; +}; + + +/* + * Link Error Status Block Structure (FC-FS-3) for RDP + * This similar to RPS ELS + */ +struct fc_link_status { + uint32_t link_failure_cnt; + uint32_t loss_of_synch_cnt; + uint32_t loss_of_signal_cnt; + uint32_t primitive_seq_proto_err; + uint32_t invalid_trans_word; + uint32_t invalid_crc_cnt; + +}; + +#define RDP_PORT_NAMES_DESC_TAG 0x00010003 +struct fc_rdp_port_name_desc { + uint32_t tag; /* 0001 0003h */ + uint32_t length; /* set to size of payload struct */ + struct fc_rdp_port_name_info port_names; +}; + + +struct fc_rdp_fec_info { + uint32_t CorrectedBlocks; + uint32_t UncorrectableBlocks; +}; + +#define RDP_FEC_DESC_TAG 0x00010005 +struct fc_fec_rdp_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_fec_info info; +}; + +struct fc_rdp_link_error_status_payload_info { + struct fc_link_status link_status; /* 24 bytes */ + uint32_t port_type; /* bits 31-30 only */ +}; + +#define RDP_LINK_ERROR_STATUS_DESC_TAG 0x00010002 +struct fc_rdp_link_error_status_desc { + uint32_t tag; /* 0001 0002h */ + uint32_t length; /* set to size of payload struct */ + struct fc_rdp_link_error_status_payload_info info; +}; + +#define VN_PT_PHY_UNKNOWN 0x00 +#define VN_PT_PHY_PF_PORT 0x01 +#define VN_PT_PHY_ETH_MAC 0x10 +#define VN_PT_PHY_SHIFT 30 + +#define RDP_PS_1GB 0x8000 +#define RDP_PS_2GB 0x4000 +#define RDP_PS_4GB 0x2000 +#define RDP_PS_10GB 0x1000 +#define RDP_PS_8GB 0x0800 +#define RDP_PS_16GB 0x0400 +#define RDP_PS_32GB 0x0200 +#define RDP_PS_64GB 0x0100 +#define RDP_PS_128GB 0x0080 +#define RDP_PS_256GB 0x0040 + +#define RDP_CAP_USER_CONFIGURED 0x0002 +#define RDP_CAP_UNKNOWN 0x0001 +#define RDP_PS_UNKNOWN 0x0002 +#define RDP_PS_NOT_ESTABLISHED 0x0001 + +struct fc_rdp_port_speed { + uint16_t capabilities; + uint16_t speed; +}; + +struct fc_rdp_port_speed_info { + struct fc_rdp_port_speed port_speed; +}; + +#define RDP_PORT_SPEED_DESC_TAG 0x00010001 +struct fc_rdp_port_speed_desc { + uint32_t tag; /* 00010001h */ + uint32_t length; /* set to size of payload struct */ + struct fc_rdp_port_speed_info info; +}; + +#define RDP_NPORT_ID_SIZE 4 +#define RDP_N_PORT_DESC_TAG 0x00000003 +struct fc_rdp_nport_desc { + uint32_t tag; /* 0000 0003h, big endian */ + uint32_t length; /* size of RDP_N_PORT_ID struct */ + uint32_t nport_id : 12; + uint32_t reserved : 8; +}; + + +struct fc_rdp_link_service_info { + uint32_t els_req; /* Request payload word 0 value.*/ +}; + +#define RDP_LINK_SERVICE_DESC_TAG 0x00000001 +struct fc_rdp_link_service_desc { + uint32_t tag; /* Descriptor tag 1 */ + uint32_t length; /* set to size of payload struct. */ + struct fc_rdp_link_service_info payload; + /* must be ELS req Word 0(0x18) */ +}; + +struct fc_rdp_sfp_info { + uint16_t temperature; + uint16_t vcc; + uint16_t tx_bias; + uint16_t tx_power; + uint16_t rx_power; + uint16_t flags; +}; + +#define RDP_SFP_DESC_TAG 0x00010000 +struct fc_rdp_sfp_desc { + uint32_t tag; + uint32_t length; /* set to size of sfp_info struct */ + struct fc_rdp_sfp_info sfp_info; +}; + +/* Buffer Credit Descriptor */ +struct fc_rdp_bbc_info { + uint32_t port_bbc; /* FC_Port buffer-to-buffer credit */ + uint32_t attached_port_bbc; + uint32_t rtt; /* Round trip time */ +}; +#define RDP_BBC_DESC_TAG 0x00010006 +struct fc_rdp_bbc_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_bbc_info bbc_info; +}; + +/* Optical Element Type Transgression Flags */ +#define RDP_OET_LOW_WARNING 0x1 +#define RDP_OET_HIGH_WARNING 0x2 +#define RDP_OET_LOW_ALARM 0x4 +#define RDP_OET_HIGH_ALARM 0x8 + +#define RDP_OED_TEMPERATURE 0x1 +#define RDP_OED_VOLTAGE 0x2 +#define RDP_OED_TXBIAS 0x3 +#define RDP_OED_TXPOWER 0x4 +#define RDP_OED_RXPOWER 0x5 + +#define RDP_OED_TYPE_SHIFT 28 +/* Optical Element Data descriptor */ +struct fc_rdp_oed_info { + uint16_t hi_alarm; + uint16_t lo_alarm; + uint16_t hi_warning; + uint16_t lo_warning; + uint32_t function_flags; +}; +#define RDP_OED_DESC_TAG 0x00010007 +struct fc_rdp_oed_sfp_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_oed_info oed_info; +}; + +/* Optical Product Data descriptor */ +struct fc_rdp_opd_sfp_info { + uint8_t vendor_name[16]; + uint8_t model_number[16]; + uint8_t serial_number[16]; + uint8_t revision[4]; + uint8_t date[8]; +}; + +#define RDP_OPD_DESC_TAG 0x00010008 +struct fc_rdp_opd_sfp_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_opd_sfp_info opd_info; +}; + +struct fc_rdp_req_frame { + uint32_t rdp_command; /* ELS command opcode (0x18)*/ + uint32_t rdp_des_length; /* RDP Payload Word 1 */ + struct fc_rdp_nport_desc nport_id_desc; /* RDP Payload Word 2 - 4 */ +}; + + +struct fc_rdp_res_frame { + uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */ + uint32_t length; /* FC Word 1 */ + struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */ + struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */ + struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10 -12 */ + struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13 -21 */ + struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22 -27 */ + struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28 -33 */ + struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/ + struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/ + struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/ + struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 48-52*/ + struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 53-57*/ + struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 58-62*/ + struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 63-67*/ + struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 68-84*/ +}; + + +/* UVEM */ + +#define LPFC_UVEM_SIZE 60 +#define LPFC_UVEM_VEM_ID_DESC_SIZE 16 +#define LPFC_UVEM_VE_MAP_DESC_SIZE 20 + +#define VEM_ID_DESC_TAG 0x0001000A +struct lpfc_vem_id_desc { + uint32_t tag; + uint32_t length; + uint8_t vem_id[16]; +}; + +#define LPFC_QFPA_SIZE 4 + +#define INSTANTIATED_VE_DESC_TAG 0x0001000B +struct instantiated_ve_desc { + uint32_t tag; + uint32_t length; + uint8_t global_vem_id[16]; + uint32_t word6; +#define lpfc_instantiated_local_id_SHIFT 0 +#define lpfc_instantiated_local_id_MASK 0x000000ff +#define lpfc_instantiated_local_id_WORD word6 +#define lpfc_instantiated_nport_id_SHIFT 8 +#define lpfc_instantiated_nport_id_MASK 0x00ffffff +#define lpfc_instantiated_nport_id_WORD word6 +}; + +#define DEINSTANTIATED_VE_DESC_TAG 0x0001000C +struct deinstantiated_ve_desc { + uint32_t tag; + uint32_t length; + uint8_t global_vem_id[16]; + uint32_t word6; +#define lpfc_deinstantiated_nport_id_SHIFT 0 +#define lpfc_deinstantiated_nport_id_MASK 0x000000ff +#define lpfc_deinstantiated_nport_id_WORD word6 +#define lpfc_deinstantiated_local_id_SHIFT 24 +#define lpfc_deinstantiated_local_id_MASK 0x00ffffff +#define lpfc_deinstantiated_local_id_WORD word6 +}; + +/* Query Fabric Priority Allocation Response */ +#define LPFC_PRIORITY_RANGE_DESC_SIZE 12 + +struct priority_range_desc { + uint32_t tag; + uint32_t length; + uint8_t lo_range; + uint8_t hi_range; + uint8_t qos_priority; + uint8_t local_ve_id; +}; + +struct fc_qfpa_res { + uint32_t reply_sequence; /* LS_ACC or LS_RJT */ + uint32_t length; /* FC Word 1 */ + struct priority_range_desc desc[1]; +}; + +/* Application Server command code */ +/* VMID */ + +#define SLI_CT_APP_SEV_Subtypes 0x20 /* Application Server subtype */ + +#define SLI_CTAS_GAPPIA_ENT 0x0100 /* Get Application Identifier */ +#define SLI_CTAS_GALLAPPIA 0x0101 /* Get All Application Identifier */ +#define SLI_CTAS_GALLAPPIA_ID 0x0102 /* Get All Application Identifier */ + /* for Nport */ +#define SLI_CTAS_GAPPIA_IDAPP 0x0103 /* Get Application Identifier */ + /* for Nport */ +#define SLI_CTAS_RAPP_IDENT 0x0200 /* Register Application Identifier */ +#define SLI_CTAS_DAPP_IDENT 0x0300 /* Deregister Application */ + /* Identifier */ +#define SLI_CTAS_DALLAPP_ID 0x0301 /* Deregister All Application */ + /* Identifier */ + +struct entity_id_object { + uint8_t entity_id_len; + uint8_t entity_id[255]; /* VM UUID */ +}; + +struct app_id_object { + __be32 port_id; + __be32 app_id; + struct entity_id_object obj; +}; + +struct lpfc_vmid_rapp_ident_list { + __be32 no_of_objects; + struct entity_id_object obj[]; +}; + +struct lpfc_vmid_dapp_ident_list { + __be32 no_of_objects; + struct entity_id_object obj[]; +}; + +#define GALLAPPIA_ID_LAST 0x80 +struct lpfc_vmid_gallapp_ident_list { + uint8_t control; + uint8_t reserved[3]; + struct app_id_object app_id; +}; + +#define RAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4) +#define DAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4) +#define GALLAPPIA_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4) +#define DALLAPP_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4) + +/******** FDMI ********/ + +/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ +#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ + +/* Definitions for HBA / Port attribute entries */ + +/* Attribute Entry Structures */ + +struct lpfc_fdmi_attr_u32 { + __be16 type; + __be16 len; + __be32 value_u32; +}; + +struct lpfc_fdmi_attr_wwn { + __be16 type; + __be16 len; + + /* Keep as u8[8] instead of __be64 to avoid accidental zero padding + * by compiler + */ + u8 name[8]; +}; + +struct lpfc_fdmi_attr_fullwwn { + __be16 type; + __be16 len; + + /* Keep as u8[8] instead of __be64 to avoid accidental zero padding + * by compiler + */ + u8 nname[8]; + u8 pname[8]; +}; + +struct lpfc_fdmi_attr_fc4types { + __be16 type; + __be16 len; + u8 value_types[32]; +}; + +struct lpfc_fdmi_attr_string { + __be16 type; + __be16 len; + char value_string[256]; +}; + +/* Maximum FDMI attribute length is Type+Len (4 bytes) + 256 byte string */ +#define FDMI_MAX_ATTRLEN sizeof(struct lpfc_fdmi_attr_string) + +/* + * HBA Attribute Block + */ +struct lpfc_fdmi_attr_block { + uint32_t EntryCnt; /* Number of HBA attribute entries */ + /* Variable Length Attribute Entry TLV's follow */ +}; + +/* + * Port Entry + */ +struct lpfc_fdmi_port_entry { + struct lpfc_name PortName; +}; + +/* + * HBA Identifier + */ +struct lpfc_fdmi_hba_ident { + struct lpfc_name PortName; +}; + +/* + * Registered Port List Format + */ +struct lpfc_fdmi_reg_port_list { + __be32 EntryCnt; + struct lpfc_fdmi_port_entry pe; +}; + +/* + * Register HBA(RHBA) + */ +struct lpfc_fdmi_reg_hba { + struct lpfc_fdmi_hba_ident hi; + struct lpfc_fdmi_reg_port_list rpl; +}; + +/******** MI MIB ********/ +#define SLI_CT_MIB_Subtypes 0x11 + +/* + * Register HBA Attributes (RHAT) + */ +struct lpfc_fdmi_reg_hbaattr { + struct lpfc_name HBA_PortName; + struct lpfc_fdmi_attr_block ab; +}; + +/* + * Register Port Attributes (RPA) + */ +struct lpfc_fdmi_reg_portattr { + struct lpfc_name PortName; + struct lpfc_fdmi_attr_block ab; +}; + +/* + * HBA MAnagement Operations Command Codes + */ +#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */ +#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */ +#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */ +#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */ +#define SLI_MGMT_GPAS 0x120 /* Get Port Statistics */ +#define SLI_MGMT_RHBA 0x200 /* Register HBA */ +#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */ +#define SLI_MGMT_RPRT 0x210 /* Register Port */ +#define SLI_MGMT_RPA 0x211 /* Register Port attributes */ +#define SLI_MGMT_DHBA 0x300 /* De-register HBA */ +#define SLI_MGMT_DHAT 0x301 /* De-register HBA attributes */ +#define SLI_MGMT_DPRT 0x310 /* De-register Port */ +#define SLI_MGMT_DPA 0x311 /* De-register Port attributes */ + +#define LPFC_FDMI_MAX_RETRY 3 /* Max retries for a FDMI command */ + +/* + * HBA Attribute Types + */ +#define RHBA_NODENAME 0x1 /* 8 byte WWNN */ +#define RHBA_MANUFACTURER 0x2 /* 4 to 64 byte ASCII string */ +#define RHBA_SERIAL_NUMBER 0x3 /* 4 to 64 byte ASCII string */ +#define RHBA_MODEL 0x4 /* 4 to 256 byte ASCII string */ +#define RHBA_MODEL_DESCRIPTION 0x5 /* 4 to 256 byte ASCII string */ +#define RHBA_HARDWARE_VERSION 0x6 /* 4 to 256 byte ASCII string */ +#define RHBA_DRIVER_VERSION 0x7 /* 4 to 256 byte ASCII string */ +#define RHBA_OPTION_ROM_VERSION 0x8 /* 4 to 256 byte ASCII string */ +#define RHBA_FIRMWARE_VERSION 0x9 /* 4 to 256 byte ASCII string */ +#define RHBA_OS_NAME_VERSION 0xa /* 4 to 256 byte ASCII string */ +#define RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */ +#define RHBA_SYM_NODENAME 0xc /* 4 to 256 byte ASCII string */ +#define RHBA_VENDOR_INFO 0xd /* 32-bit unsigned int */ +#define RHBA_NUM_PORTS 0xe /* 32-bit unsigned int */ +#define RHBA_FABRIC_WWNN 0xf /* 8 byte WWNN */ +#define RHBA_BIOS_VERSION 0x10 /* 4 to 256 byte ASCII string */ +#define RHBA_BIOS_STATE 0x11 /* 32-bit unsigned int */ +#define RHBA_VENDOR_ID 0xe0 /* 8 byte ASCII string */ + +/* Bit mask for all individual HBA attributes */ +#define LPFC_FDMI_HBA_ATTR_wwnn 0x00000001 +#define LPFC_FDMI_HBA_ATTR_manufacturer 0x00000002 +#define LPFC_FDMI_HBA_ATTR_sn 0x00000004 +#define LPFC_FDMI_HBA_ATTR_model 0x00000008 +#define LPFC_FDMI_HBA_ATTR_description 0x00000010 +#define LPFC_FDMI_HBA_ATTR_hdw_ver 0x00000020 +#define LPFC_FDMI_HBA_ATTR_drvr_ver 0x00000040 +#define LPFC_FDMI_HBA_ATTR_rom_ver 0x00000080 +#define LPFC_FDMI_HBA_ATTR_fmw_ver 0x00000100 +#define LPFC_FDMI_HBA_ATTR_os_ver 0x00000200 +#define LPFC_FDMI_HBA_ATTR_ct_len 0x00000400 +#define LPFC_FDMI_HBA_ATTR_symbolic_name 0x00000800 +#define LPFC_FDMI_HBA_ATTR_vendor_info 0x00001000 /* Not used */ +#define LPFC_FDMI_HBA_ATTR_num_ports 0x00002000 +#define LPFC_FDMI_HBA_ATTR_fabric_wwnn 0x00004000 +#define LPFC_FDMI_HBA_ATTR_bios_ver 0x00008000 +#define LPFC_FDMI_HBA_ATTR_bios_state 0x00010000 /* Not used */ +#define LPFC_FDMI_HBA_ATTR_vendor_id 0x00020000 + +/* Bit mask for FDMI-1 defined HBA attributes */ +#define LPFC_FDMI1_HBA_ATTR 0x000007ff + +/* Bit mask for FDMI-2 defined HBA attributes */ +/* Skip vendor_info and bios_state */ +#define LPFC_FDMI2_HBA_ATTR 0x0002efff + +/* + * Port Attribute Types + */ +#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */ +#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */ +#define RPRT_PORT_SPEED 0x3 /* 32-bit unsigned int */ +#define RPRT_MAX_FRAME_SIZE 0x4 /* 32-bit unsigned int */ +#define RPRT_OS_DEVICE_NAME 0x5 /* 4 to 256 byte ASCII string */ +#define RPRT_HOST_NAME 0x6 /* 4 to 256 byte ASCII string */ +#define RPRT_NODENAME 0x7 /* 8 byte WWNN */ +#define RPRT_PORTNAME 0x8 /* 8 byte WWPN */ +#define RPRT_SYM_PORTNAME 0x9 /* 4 to 256 byte ASCII string */ +#define RPRT_PORT_TYPE 0xa /* 32-bit unsigned int */ +#define RPRT_SUPPORTED_CLASS 0xb /* 32-bit unsigned int */ +#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWPN */ +#define RPRT_ACTIVE_FC4_TYPES 0xd /* 32 byte binary array */ +#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */ +#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */ +#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */ +#define RPRT_VENDOR_MI 0xf047 /* vendor ascii string */ +#define RPRT_SMART_SERVICE 0xf100 /* 4 to 256 byte ASCII string */ +#define RPRT_SMART_GUID 0xf101 /* 8 byte WWNN + 8 byte WWPN */ +#define RPRT_SMART_VERSION 0xf102 /* 4 to 256 byte ASCII string */ +#define RPRT_SMART_MODEL 0xf103 /* 4 to 256 byte ASCII string */ +#define RPRT_SMART_PORT_INFO 0xf104 /* 32-bit unsigned int */ +#define RPRT_SMART_QOS 0xf105 /* 32-bit unsigned int */ +#define RPRT_SMART_SECURITY 0xf106 /* 32-bit unsigned int */ + +/* Bit mask for all individual PORT attributes */ +#define LPFC_FDMI_PORT_ATTR_fc4type 0x00000001 +#define LPFC_FDMI_PORT_ATTR_support_speed 0x00000002 +#define LPFC_FDMI_PORT_ATTR_speed 0x00000004 +#define LPFC_FDMI_PORT_ATTR_max_frame 0x00000008 +#define LPFC_FDMI_PORT_ATTR_os_devname 0x00000010 +#define LPFC_FDMI_PORT_ATTR_host_name 0x00000020 +#define LPFC_FDMI_PORT_ATTR_wwnn 0x00000040 +#define LPFC_FDMI_PORT_ATTR_wwpn 0x00000080 +#define LPFC_FDMI_PORT_ATTR_symbolic_name 0x00000100 +#define LPFC_FDMI_PORT_ATTR_port_type 0x00000200 +#define LPFC_FDMI_PORT_ATTR_class 0x00000400 +#define LPFC_FDMI_PORT_ATTR_fabric_wwpn 0x00000800 +#define LPFC_FDMI_PORT_ATTR_port_state 0x00001000 +#define LPFC_FDMI_PORT_ATTR_active_fc4type 0x00002000 +#define LPFC_FDMI_PORT_ATTR_num_disc 0x00004000 +#define LPFC_FDMI_PORT_ATTR_nportid 0x00008000 +#define LPFC_FDMI_SMART_ATTR_service 0x00010000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_guid 0x00020000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_version 0x00040000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_model 0x00080000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_port_info 0x00100000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_qos 0x00200000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_security 0x00400000 /* Vendor specific */ +#define LPFC_FDMI_VENDOR_ATTR_mi 0x00800000 /* Vendor specific */ + +/* Bit mask for FDMI-1 defined PORT attributes */ +#define LPFC_FDMI1_PORT_ATTR 0x0000003f + +/* Bit mask for FDMI-2 defined PORT attributes */ +#define LPFC_FDMI2_PORT_ATTR 0x0000ffff + +/* Bit mask for Smart SAN defined PORT attributes */ +#define LPFC_FDMI2_SMART_ATTR 0x007fffff + +/* Defines for PORT port state attribute */ +#define LPFC_FDMI_PORTSTATE_UNKNOWN 1 +#define LPFC_FDMI_PORTSTATE_ONLINE 2 + +/* Defines for PORT port type attribute */ +#define LPFC_FDMI_PORTTYPE_UNKNOWN 0 +#define LPFC_FDMI_PORTTYPE_NPORT 1 +#define LPFC_FDMI_PORTTYPE_NLPORT 2 + +/* + * Begin HBA configuration parameters. + * The PCI configuration register BAR assignments are: + * BAR0, offset 0x10 - SLIM base memory address + * BAR1, offset 0x14 - SLIM base memory high address + * BAR2, offset 0x18 - REGISTER base memory address + * BAR3, offset 0x1c - REGISTER base memory high address + * BAR4, offset 0x20 - BIU I/O registers + * BAR5, offset 0x24 - REGISTER base io high address + */ + +/* Number of rings currently used and available. */ +#define MAX_SLI3_CONFIGURED_RINGS 3 +#define MAX_SLI3_RINGS 4 + +/* IOCB / Mailbox is owned by FireFly */ +#define OWN_CHIP 1 + +/* IOCB / Mailbox is owned by Host */ +#define OWN_HOST 0 + +/* Number of 4-byte words in an IOCB. */ +#define IOCB_WORD_SZ 8 + +/* network headers for Dfctl field */ +#define FC_NET_HDR 0x20 + +/* Start FireFly Register definitions */ +#define PCI_VENDOR_ID_EMULEX 0x10df +#define PCI_DEVICE_ID_FIREFLY 0x1ae5 +#define PCI_DEVICE_ID_PROTEUS_VF 0xe100 +#define PCI_DEVICE_ID_BALIUS 0xe131 +#define PCI_DEVICE_ID_PROTEUS_PF 0xe180 +#define PCI_DEVICE_ID_LANCER_FC 0xe200 +#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208 +#define PCI_DEVICE_ID_LANCER_FCOE 0xe260 +#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268 +#define PCI_DEVICE_ID_LANCER_G6_FC 0xe300 +#define PCI_DEVICE_ID_LANCER_G7_FC 0xf400 +#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500 +#define PCI_DEVICE_ID_SAT_SMB 0xf011 +#define PCI_DEVICE_ID_SAT_MID 0xf015 +#define PCI_DEVICE_ID_RFLY 0xf095 +#define PCI_DEVICE_ID_PFLY 0xf098 +#define PCI_DEVICE_ID_LP101 0xf0a1 +#define PCI_DEVICE_ID_TFLY 0xf0a5 +#define PCI_DEVICE_ID_BSMB 0xf0d1 +#define PCI_DEVICE_ID_BMID 0xf0d5 +#define PCI_DEVICE_ID_ZSMB 0xf0e1 +#define PCI_DEVICE_ID_ZMID 0xf0e5 +#define PCI_DEVICE_ID_NEPTUNE 0xf0f5 +#define PCI_DEVICE_ID_NEPTUNE_SCSP 0xf0f6 +#define PCI_DEVICE_ID_NEPTUNE_DCSP 0xf0f7 +#define PCI_DEVICE_ID_SAT 0xf100 +#define PCI_DEVICE_ID_SAT_SCSP 0xf111 +#define PCI_DEVICE_ID_SAT_DCSP 0xf112 +#define PCI_DEVICE_ID_FALCON 0xf180 +#define PCI_DEVICE_ID_SUPERFLY 0xf700 +#define PCI_DEVICE_ID_DRAGONFLY 0xf800 +#define PCI_DEVICE_ID_CENTAUR 0xf900 +#define PCI_DEVICE_ID_PEGASUS 0xf980 +#define PCI_DEVICE_ID_THOR 0xfa00 +#define PCI_DEVICE_ID_VIPER 0xfb00 +#define PCI_DEVICE_ID_LP10000S 0xfc00 +#define PCI_DEVICE_ID_LP11000S 0xfc10 +#define PCI_DEVICE_ID_LPE11000S 0xfc20 +#define PCI_DEVICE_ID_SAT_S 0xfc40 +#define PCI_DEVICE_ID_PROTEUS_S 0xfc50 +#define PCI_DEVICE_ID_HELIOS 0xfd00 +#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11 +#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12 +#define PCI_DEVICE_ID_ZEPHYR 0xfe00 +#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 +#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 +#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 +#define PCI_DEVICE_ID_TIGERSHARK 0x0704 +#define PCI_DEVICE_ID_TOMCAT 0x0714 +#define PCI_DEVICE_ID_SKYHAWK 0x0724 +#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c +#define PCI_VENDOR_ID_ATTO 0x117c +#define PCI_DEVICE_ID_CLRY_16XE 0x0064 +#define PCI_DEVICE_ID_CLRY_161E 0x0063 +#define PCI_DEVICE_ID_CLRY_162E 0x0064 +#define PCI_DEVICE_ID_CLRY_164E 0x0065 +#define PCI_DEVICE_ID_CLRY_16XP 0x0094 +#define PCI_DEVICE_ID_CLRY_161P 0x00a0 +#define PCI_DEVICE_ID_CLRY_162P 0x0094 +#define PCI_DEVICE_ID_CLRY_164P 0x00a1 +#define PCI_DEVICE_ID_CLRY_32XE 0x0094 +#define PCI_DEVICE_ID_CLRY_321E 0x00a2 +#define PCI_DEVICE_ID_CLRY_322E 0x00a3 +#define PCI_DEVICE_ID_CLRY_324E 0x00ac +#define PCI_DEVICE_ID_CLRY_32XP 0x00bb +#define PCI_DEVICE_ID_CLRY_321P 0x00bc +#define PCI_DEVICE_ID_CLRY_322P 0x00bd +#define PCI_DEVICE_ID_CLRY_324P 0x00be +#define PCI_DEVICE_ID_TLFC_2 0x0064 +#define PCI_DEVICE_ID_TLFC_2XX2 0x4064 +#define PCI_DEVICE_ID_TLFC_3 0x0094 +#define PCI_DEVICE_ID_TLFC_3162 0x40a6 +#define PCI_DEVICE_ID_TLFC_3322 0x40a7 + +#define JEDEC_ID_ADDRESS 0x0080001c +#define FIREFLY_JEDEC_ID 0x1ACC +#define SUPERFLY_JEDEC_ID 0x0020 +#define DRAGONFLY_JEDEC_ID 0x0021 +#define DRAGONFLY_V2_JEDEC_ID 0x0025 +#define CENTAUR_2G_JEDEC_ID 0x0026 +#define CENTAUR_1G_JEDEC_ID 0x0028 +#define PEGASUS_ORION_JEDEC_ID 0x0036 +#define PEGASUS_JEDEC_ID 0x0038 +#define THOR_JEDEC_ID 0x0012 +#define HELIOS_JEDEC_ID 0x0364 +#define ZEPHYR_JEDEC_ID 0x0577 +#define VIPER_JEDEC_ID 0x4838 +#define SATURN_JEDEC_ID 0x1004 + +#define JEDEC_ID_MASK 0x0FFFF000 +#define JEDEC_ID_SHIFT 12 +#define FC_JEDEC_ID(id) ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT) + +typedef struct { /* FireFly BIU registers */ + uint32_t hostAtt; /* See definitions for Host Attention + register */ + uint32_t chipAtt; /* See definitions for Chip Attention + register */ + uint32_t hostStatus; /* See definitions for Host Status register */ + uint32_t hostControl; /* See definitions for Host Control register */ + uint32_t buiConfig; /* See definitions for BIU configuration + register */ +} FF_REGS; + +/* IO Register size in bytes */ +#define FF_REG_AREA_SIZE 256 + +/* Host Attention Register */ + +#define HA_REG_OFFSET 0 /* Byte offset from register base address */ + +#define HA_R0RE_REQ 0x00000001 /* Bit 0 */ +#define HA_R0CE_RSP 0x00000002 /* Bit 1 */ +#define HA_R0ATT 0x00000008 /* Bit 3 */ +#define HA_R1RE_REQ 0x00000010 /* Bit 4 */ +#define HA_R1CE_RSP 0x00000020 /* Bit 5 */ +#define HA_R1ATT 0x00000080 /* Bit 7 */ +#define HA_R2RE_REQ 0x00000100 /* Bit 8 */ +#define HA_R2CE_RSP 0x00000200 /* Bit 9 */ +#define HA_R2ATT 0x00000800 /* Bit 11 */ +#define HA_R3RE_REQ 0x00001000 /* Bit 12 */ +#define HA_R3CE_RSP 0x00002000 /* Bit 13 */ +#define HA_R3ATT 0x00008000 /* Bit 15 */ +#define HA_LATT 0x20000000 /* Bit 29 */ +#define HA_MBATT 0x40000000 /* Bit 30 */ +#define HA_ERATT 0x80000000 /* Bit 31 */ + +#define HA_RXRE_REQ 0x00000001 /* Bit 0 */ +#define HA_RXCE_RSP 0x00000002 /* Bit 1 */ +#define HA_RXATT 0x00000008 /* Bit 3 */ +#define HA_RXMASK 0x0000000f + +#define HA_R0_CLR_MSK (HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT) +#define HA_R1_CLR_MSK (HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT) +#define HA_R2_CLR_MSK (HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT) +#define HA_R3_CLR_MSK (HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT) + +#define HA_R0_POS 3 +#define HA_R1_POS 7 +#define HA_R2_POS 11 +#define HA_R3_POS 15 +#define HA_LE_POS 29 +#define HA_MB_POS 30 +#define HA_ER_POS 31 +/* Chip Attention Register */ + +#define CA_REG_OFFSET 4 /* Byte offset from register base address */ + +#define CA_R0CE_REQ 0x00000001 /* Bit 0 */ +#define CA_R0RE_RSP 0x00000002 /* Bit 1 */ +#define CA_R0ATT 0x00000008 /* Bit 3 */ +#define CA_R1CE_REQ 0x00000010 /* Bit 4 */ +#define CA_R1RE_RSP 0x00000020 /* Bit 5 */ +#define CA_R1ATT 0x00000080 /* Bit 7 */ +#define CA_R2CE_REQ 0x00000100 /* Bit 8 */ +#define CA_R2RE_RSP 0x00000200 /* Bit 9 */ +#define CA_R2ATT 0x00000800 /* Bit 11 */ +#define CA_R3CE_REQ 0x00001000 /* Bit 12 */ +#define CA_R3RE_RSP 0x00002000 /* Bit 13 */ +#define CA_R3ATT 0x00008000 /* Bit 15 */ +#define CA_MBATT 0x40000000 /* Bit 30 */ + +/* Host Status Register */ + +#define HS_REG_OFFSET 8 /* Byte offset from register base address */ + +#define HS_MBRDY 0x00400000 /* Bit 22 */ +#define HS_FFRDY 0x00800000 /* Bit 23 */ +#define HS_FFER8 0x01000000 /* Bit 24 */ +#define HS_FFER7 0x02000000 /* Bit 25 */ +#define HS_FFER6 0x04000000 /* Bit 26 */ +#define HS_FFER5 0x08000000 /* Bit 27 */ +#define HS_FFER4 0x10000000 /* Bit 28 */ +#define HS_FFER3 0x20000000 /* Bit 29 */ +#define HS_FFER2 0x40000000 /* Bit 30 */ +#define HS_FFER1 0x80000000 /* Bit 31 */ +#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ +#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ +#define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */ +/* Host Control Register */ + +#define HC_REG_OFFSET 12 /* Byte offset from register base address */ + +#define HC_MBINT_ENA 0x00000001 /* Bit 0 */ +#define HC_R0INT_ENA 0x00000002 /* Bit 1 */ +#define HC_R1INT_ENA 0x00000004 /* Bit 2 */ +#define HC_R2INT_ENA 0x00000008 /* Bit 3 */ +#define HC_R3INT_ENA 0x00000010 /* Bit 4 */ +#define HC_INITHBI 0x02000000 /* Bit 25 */ +#define HC_INITMB 0x04000000 /* Bit 26 */ +#define HC_INITFF 0x08000000 /* Bit 27 */ +#define HC_LAINT_ENA 0x20000000 /* Bit 29 */ +#define HC_ERINT_ENA 0x80000000 /* Bit 31 */ + +/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */ +#define MSIX_DFLT_ID 0 +#define MSIX_RNG0_ID 0 +#define MSIX_RNG1_ID 1 +#define MSIX_RNG2_ID 2 +#define MSIX_RNG3_ID 3 + +#define MSIX_LINK_ID 4 +#define MSIX_MBOX_ID 5 + +#define MSIX_SPARE0_ID 6 +#define MSIX_SPARE1_ID 7 + +/* Mailbox Commands */ +#define MBX_SHUTDOWN 0x00 /* terminate testing */ +#define MBX_LOAD_SM 0x01 +#define MBX_READ_NV 0x02 +#define MBX_WRITE_NV 0x03 +#define MBX_RUN_BIU_DIAG 0x04 +#define MBX_INIT_LINK 0x05 +#define MBX_DOWN_LINK 0x06 +#define MBX_CONFIG_LINK 0x07 +#define MBX_CONFIG_RING 0x09 +#define MBX_RESET_RING 0x0A +#define MBX_READ_CONFIG 0x0B +#define MBX_READ_RCONFIG 0x0C +#define MBX_READ_SPARM 0x0D +#define MBX_READ_STATUS 0x0E +#define MBX_READ_RPI 0x0F +#define MBX_READ_XRI 0x10 +#define MBX_READ_REV 0x11 +#define MBX_READ_LNK_STAT 0x12 +#define MBX_REG_LOGIN 0x13 +#define MBX_UNREG_LOGIN 0x14 +#define MBX_CLEAR_LA 0x16 +#define MBX_DUMP_MEMORY 0x17 +#define MBX_DUMP_CONTEXT 0x18 +#define MBX_RUN_DIAGS 0x19 +#define MBX_RESTART 0x1A +#define MBX_UPDATE_CFG 0x1B +#define MBX_DOWN_LOAD 0x1C +#define MBX_DEL_LD_ENTRY 0x1D +#define MBX_RUN_PROGRAM 0x1E +#define MBX_SET_MASK 0x20 +#define MBX_SET_VARIABLE 0x21 +#define MBX_UNREG_D_ID 0x23 +#define MBX_KILL_BOARD 0x24 +#define MBX_CONFIG_FARP 0x25 +#define MBX_BEACON 0x2A +#define MBX_CONFIG_MSI 0x30 +#define MBX_HEARTBEAT 0x31 +#define MBX_WRITE_VPARMS 0x32 +#define MBX_ASYNCEVT_ENABLE 0x33 +#define MBX_READ_EVENT_LOG_STATUS 0x37 +#define MBX_READ_EVENT_LOG 0x38 +#define MBX_WRITE_EVENT_LOG 0x39 + +#define MBX_PORT_CAPABILITIES 0x3B +#define MBX_PORT_IOV_CONTROL 0x3C + +#define MBX_CONFIG_HBQ 0x7C +#define MBX_LOAD_AREA 0x81 +#define MBX_RUN_BIU_DIAG64 0x84 +#define MBX_CONFIG_PORT 0x88 +#define MBX_READ_SPARM64 0x8D +#define MBX_READ_RPI64 0x8F +#define MBX_REG_LOGIN64 0x93 +#define MBX_READ_TOPOLOGY 0x95 +#define MBX_REG_VPI 0x96 +#define MBX_UNREG_VPI 0x97 + +#define MBX_WRITE_WWN 0x98 +#define MBX_SET_DEBUG 0x99 +#define MBX_LOAD_EXP_ROM 0x9C +#define MBX_SLI4_CONFIG 0x9B +#define MBX_SLI4_REQ_FTRS 0x9D +#define MBX_MAX_CMDS 0x9E +#define MBX_RESUME_RPI 0x9E +#define MBX_SLI2_CMD_MASK 0x80 +#define MBX_REG_VFI 0x9F +#define MBX_REG_FCFI 0xA0 +#define MBX_UNREG_VFI 0xA1 +#define MBX_UNREG_FCFI 0xA2 +#define MBX_INIT_VFI 0xA3 +#define MBX_INIT_VPI 0xA4 +#define MBX_ACCESS_VDATA 0xA5 +#define MBX_REG_FCFI_MRQ 0xAF + +#define MBX_AUTH_PORT 0xF8 +#define MBX_SECURITY_MGMT 0xF9 + +/* IOCB Commands */ + +#define CMD_RCV_SEQUENCE_CX 0x01 +#define CMD_XMIT_SEQUENCE_CR 0x02 +#define CMD_XMIT_SEQUENCE_CX 0x03 +#define CMD_XMIT_BCAST_CN 0x04 +#define CMD_XMIT_BCAST_CX 0x05 +#define CMD_QUE_RING_BUF_CN 0x06 +#define CMD_QUE_XRI_BUF_CX 0x07 +#define CMD_IOCB_CONTINUE_CN 0x08 +#define CMD_RET_XRI_BUF_CX 0x09 +#define CMD_ELS_REQUEST_CR 0x0A +#define CMD_ELS_REQUEST_CX 0x0B +#define CMD_RCV_ELS_REQ_CX 0x0D +#define CMD_ABORT_XRI_CN 0x0E +#define CMD_ABORT_XRI_CX 0x0F +#define CMD_CLOSE_XRI_CN 0x10 +#define CMD_CLOSE_XRI_CX 0x11 +#define CMD_CREATE_XRI_CR 0x12 +#define CMD_CREATE_XRI_CX 0x13 +#define CMD_GET_RPI_CN 0x14 +#define CMD_XMIT_ELS_RSP_CX 0x15 +#define CMD_GET_RPI_CR 0x16 +#define CMD_XRI_ABORTED_CX 0x17 +#define CMD_FCP_IWRITE_CR 0x18 +#define CMD_FCP_IWRITE_CX 0x19 +#define CMD_FCP_IREAD_CR 0x1A +#define CMD_FCP_IREAD_CX 0x1B +#define CMD_FCP_ICMND_CR 0x1C +#define CMD_FCP_ICMND_CX 0x1D +#define CMD_FCP_TSEND_CX 0x1F +#define CMD_FCP_TRECEIVE_CX 0x21 +#define CMD_FCP_TRSP_CX 0x23 +#define CMD_FCP_AUTO_TRSP_CX 0x29 + +#define CMD_ADAPTER_MSG 0x20 +#define CMD_ADAPTER_DUMP 0x22 + +/* SLI_2 IOCB Command Set */ + +#define CMD_ASYNC_STATUS 0x7C +#define CMD_RCV_SEQUENCE64_CX 0x81 +#define CMD_XMIT_SEQUENCE64_CR 0x82 +#define CMD_XMIT_SEQUENCE64_CX 0x83 +#define CMD_XMIT_BCAST64_CN 0x84 +#define CMD_XMIT_BCAST64_CX 0x85 +#define CMD_QUE_RING_BUF64_CN 0x86 +#define CMD_QUE_XRI_BUF64_CX 0x87 +#define CMD_IOCB_CONTINUE64_CN 0x88 +#define CMD_RET_XRI_BUF64_CX 0x89 +#define CMD_ELS_REQUEST64_CR 0x8A +#define CMD_ELS_REQUEST64_CX 0x8B +#define CMD_ABORT_MXRI64_CN 0x8C +#define CMD_RCV_ELS_REQ64_CX 0x8D +#define CMD_XMIT_ELS_RSP64_CX 0x95 +#define CMD_XMIT_BLS_RSP64_CX 0x97 +#define CMD_FCP_IWRITE64_CR 0x98 +#define CMD_FCP_IWRITE64_CX 0x99 +#define CMD_FCP_IREAD64_CR 0x9A +#define CMD_FCP_IREAD64_CX 0x9B +#define CMD_FCP_ICMND64_CR 0x9C +#define CMD_FCP_ICMND64_CX 0x9D +#define CMD_FCP_TSEND64_CX 0x9F +#define CMD_FCP_TRECEIVE64_CX 0xA1 +#define CMD_FCP_TRSP64_CX 0xA3 + +#define CMD_QUE_XRI64_CX 0xB3 +#define CMD_IOCB_RCV_SEQ64_CX 0xB5 +#define CMD_IOCB_RCV_ELS64_CX 0xB7 +#define CMD_IOCB_RET_XRI64_CX 0xB9 +#define CMD_IOCB_RCV_CONT64_CX 0xBB + +#define CMD_GEN_REQUEST64_CR 0xC2 +#define CMD_GEN_REQUEST64_CX 0xC3 + +/* Unhandled SLI-3 Commands */ +#define CMD_IOCB_XMIT_MSEQ64_CR 0xB0 +#define CMD_IOCB_XMIT_MSEQ64_CX 0xB1 +#define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1 +#define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD +#define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6 +#define CMD_IOCB_ABORT_EXTENDED_CN 0xBA +#define CMD_IOCB_RET_HBQE64_CN 0xCA +#define CMD_IOCB_FCP_IBIDIR64_CR 0xAC +#define CMD_IOCB_FCP_IBIDIR64_CX 0xAD +#define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF +#define CMD_IOCB_LOGENTRY_CN 0x94 +#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 + +/* Data Security SLI Commands */ +#define DSSCMD_IWRITE64_CR 0xF8 +#define DSSCMD_IWRITE64_CX 0xF9 +#define DSSCMD_IREAD64_CR 0xFA +#define DSSCMD_IREAD64_CX 0xFB + +#define CMD_MAX_IOCB_CMD 0xFB +#define CMD_IOCB_MASK 0xff + +#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG + iocb */ +#define LPFC_MAX_ADPTMSG 32 /* max msg data */ +/* + * Define Status + */ +#define MBX_SUCCESS 0 +#define MBXERR_NUM_RINGS 1 +#define MBXERR_NUM_IOCBS 2 +#define MBXERR_IOCBS_EXCEEDED 3 +#define MBXERR_BAD_RING_NUMBER 4 +#define MBXERR_MASK_ENTRIES_RANGE 5 +#define MBXERR_MASKS_EXCEEDED 6 +#define MBXERR_BAD_PROFILE 7 +#define MBXERR_BAD_DEF_CLASS 8 +#define MBXERR_BAD_MAX_RESPONDER 9 +#define MBXERR_BAD_MAX_ORIGINATOR 10 +#define MBXERR_RPI_REGISTERED 11 +#define MBXERR_RPI_FULL 12 +#define MBXERR_NO_RESOURCES 13 +#define MBXERR_BAD_RCV_LENGTH 14 +#define MBXERR_DMA_ERROR 15 +#define MBXERR_ERROR 16 +#define MBXERR_LINK_DOWN 0x33 +#define MBXERR_SEC_NO_PERMISSION 0xF02 +#define MBX_NOT_FINISHED 255 + +#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ +#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */ + +#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */ + +/* + * return code Fail + */ +#define FAILURE 1 + +/* + * Begin Structure Definitions for Mailbox Commands + */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t tval; + uint8_t tmask; + uint8_t rval; + uint8_t rmask; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t rmask; + uint8_t rval; + uint8_t tmask; + uint8_t tval; +#endif +} RR_REG; + +struct ulp_bde { + uint32_t bdeAddress; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t bdeReserved:4; + uint32_t bdeAddrHigh:4; + uint32_t bdeSize:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t bdeSize:24; + uint32_t bdeAddrHigh:4; + uint32_t bdeReserved:4; +#endif +}; + +typedef struct ULP_BDL { /* SLI-2 */ +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t bdeFlags:8; /* BDL Flags */ + uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */ + uint32_t bdeFlags:8; /* BDL Flags */ +#endif + + uint32_t addrLow; /* Address 0:31 */ + uint32_t addrHigh; /* Address 32:63 */ + uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */ +} ULP_BDL; + +/* + * BlockGuard Definitions + */ + +enum lpfc_protgrp_type { + LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */ + LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */ + LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */ + LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */ +}; + +/* PDE Descriptors */ +#define LPFC_PDE5_DESCRIPTOR 0x85 +#define LPFC_PDE6_DESCRIPTOR 0x86 +#define LPFC_PDE7_DESCRIPTOR 0x87 + +/* BlockGuard Opcodes */ +#define BG_OP_IN_NODIF_OUT_CRC 0x0 +#define BG_OP_IN_CRC_OUT_NODIF 0x1 +#define BG_OP_IN_NODIF_OUT_CSUM 0x2 +#define BG_OP_IN_CSUM_OUT_NODIF 0x3 +#define BG_OP_IN_CRC_OUT_CRC 0x4 +#define BG_OP_IN_CSUM_OUT_CSUM 0x5 +#define BG_OP_IN_CRC_OUT_CSUM 0x6 +#define BG_OP_IN_CSUM_OUT_CRC 0x7 +#define BG_OP_RAW_MODE 0x8 + +struct lpfc_pde5 { + uint32_t word0; +#define pde5_type_SHIFT 24 +#define pde5_type_MASK 0x000000ff +#define pde5_type_WORD word0 +#define pde5_rsvd0_SHIFT 0 +#define pde5_rsvd0_MASK 0x00ffffff +#define pde5_rsvd0_WORD word0 + uint32_t reftag; /* Reference Tag Value */ + uint32_t reftagtr; /* Reference Tag Translation Value */ +}; + +struct lpfc_pde6 { + uint32_t word0; +#define pde6_type_SHIFT 24 +#define pde6_type_MASK 0x000000ff +#define pde6_type_WORD word0 +#define pde6_rsvd0_SHIFT 0 +#define pde6_rsvd0_MASK 0x00ffffff +#define pde6_rsvd0_WORD word0 + uint32_t word1; +#define pde6_rsvd1_SHIFT 26 +#define pde6_rsvd1_MASK 0x0000003f +#define pde6_rsvd1_WORD word1 +#define pde6_na_SHIFT 25 +#define pde6_na_MASK 0x00000001 +#define pde6_na_WORD word1 +#define pde6_rsvd2_SHIFT 16 +#define pde6_rsvd2_MASK 0x000001FF +#define pde6_rsvd2_WORD word1 +#define pde6_apptagtr_SHIFT 0 +#define pde6_apptagtr_MASK 0x0000ffff +#define pde6_apptagtr_WORD word1 + uint32_t word2; +#define pde6_optx_SHIFT 28 +#define pde6_optx_MASK 0x0000000f +#define pde6_optx_WORD word2 +#define pde6_oprx_SHIFT 24 +#define pde6_oprx_MASK 0x0000000f +#define pde6_oprx_WORD word2 +#define pde6_nr_SHIFT 23 +#define pde6_nr_MASK 0x00000001 +#define pde6_nr_WORD word2 +#define pde6_ce_SHIFT 22 +#define pde6_ce_MASK 0x00000001 +#define pde6_ce_WORD word2 +#define pde6_re_SHIFT 21 +#define pde6_re_MASK 0x00000001 +#define pde6_re_WORD word2 +#define pde6_ae_SHIFT 20 +#define pde6_ae_MASK 0x00000001 +#define pde6_ae_WORD word2 +#define pde6_ai_SHIFT 19 +#define pde6_ai_MASK 0x00000001 +#define pde6_ai_WORD word2 +#define pde6_bs_SHIFT 16 +#define pde6_bs_MASK 0x00000007 +#define pde6_bs_WORD word2 +#define pde6_apptagval_SHIFT 0 +#define pde6_apptagval_MASK 0x0000ffff +#define pde6_apptagval_WORD word2 +}; + +struct lpfc_pde7 { + uint32_t word0; +#define pde7_type_SHIFT 24 +#define pde7_type_MASK 0x000000ff +#define pde7_type_WORD word0 +#define pde7_rsvd0_SHIFT 0 +#define pde7_rsvd0_MASK 0x00ffffff +#define pde7_rsvd0_WORD word0 + uint32_t addrHigh; + uint32_t addrLow; +}; + +/* Structure for MB Command LOAD_SM and DOWN_LOAD */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd2:25; + uint32_t acknowledgment:1; + uint32_t version:1; + uint32_t erase_or_prog:1; + uint32_t update_flash:1; + uint32_t update_ram:1; + uint32_t method:1; + uint32_t load_cmplt:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t load_cmplt:1; + uint32_t method:1; + uint32_t update_ram:1; + uint32_t update_flash:1; + uint32_t erase_or_prog:1; + uint32_t version:1; + uint32_t acknowledgment:1; + uint32_t rsvd2:25; +#endif + + uint32_t dl_to_adr_low; + uint32_t dl_to_adr_high; + uint32_t dl_len; + union { + uint32_t dl_from_mbx_offset; + struct ulp_bde dl_from_bde; + struct ulp_bde64 dl_from_bde64; + } un; + +} LOAD_SM_VAR; + +/* Structure for MB Command READ_NVPARM (02) */ + +typedef struct { + uint32_t rsvd1[3]; /* Read as all one's */ + uint32_t rsvd2; /* Read as all zero's */ + uint32_t portname[2]; /* N_PORT name */ + uint32_t nodename[2]; /* NODE name */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t pref_DID:24; + uint32_t hardAL_PA:8; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t hardAL_PA:8; + uint32_t pref_DID:24; +#endif + + uint32_t rsvd3[21]; /* Read as all one's */ +} READ_NV_VAR; + +/* Structure for MB Command WRITE_NVPARMS (03) */ + +typedef struct { + uint32_t rsvd1[3]; /* Must be all one's */ + uint32_t rsvd2; /* Must be all zero's */ + uint32_t portname[2]; /* N_PORT name */ + uint32_t nodename[2]; /* NODE name */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t pref_DID:24; + uint32_t hardAL_PA:8; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t hardAL_PA:8; + uint32_t pref_DID:24; +#endif + + uint32_t rsvd3[21]; /* Must be all one's */ +} WRITE_NV_VAR; + +/* Structure for MB Command RUN_BIU_DIAG (04) */ +/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */ + +typedef struct { + uint32_t rsvd1; + union { + struct { + struct ulp_bde xmit_bde; + struct ulp_bde rcv_bde; + } s1; + struct { + struct ulp_bde64 xmit_bde64; + struct ulp_bde64 rcv_bde64; + } s2; + } un; +} BIU_DIAG_VAR; + +/* Structure for MB command READ_EVENT_LOG (0x38) */ +struct READ_EVENT_LOG_VAR { + uint32_t word1; +#define lpfc_event_log_SHIFT 29 +#define lpfc_event_log_MASK 0x00000001 +#define lpfc_event_log_WORD word1 +#define USE_MAILBOX_RESPONSE 1 + uint32_t offset; + struct ulp_bde64 rcv_bde64; +}; + +/* Structure for MB Command INIT_LINK (05) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1:24; + uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */ + uint32_t rsvd1:24; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */ + uint8_t rsvd2; + uint16_t link_flags; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t link_flags; + uint8_t rsvd2; + uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */ +#endif + +#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */ +#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */ +#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */ +#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */ +#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */ +#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */ +#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */ + +#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ +#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ +#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */ + + uint32_t link_speed; +#define LINK_SPEED_AUTO 0x0 /* Auto selection */ +#define LINK_SPEED_1G 0x1 /* 1 Gigabaud */ +#define LINK_SPEED_2G 0x2 /* 2 Gigabaud */ +#define LINK_SPEED_4G 0x4 /* 4 Gigabaud */ +#define LINK_SPEED_8G 0x8 /* 8 Gigabaud */ +#define LINK_SPEED_10G 0x10 /* 10 Gigabaud */ +#define LINK_SPEED_16G 0x11 /* 16 Gigabaud */ +#define LINK_SPEED_32G 0x14 /* 32 Gigabaud */ +#define LINK_SPEED_64G 0x17 /* 64 Gigabaud */ +#define LINK_SPEED_128G 0x1A /* 128 Gigabaud */ +#define LINK_SPEED_256G 0x1D /* 256 Gigabaud */ + +} INIT_LINK_VAR; + +/* Structure for MB Command DOWN_LINK (06) */ + +typedef struct { + uint32_t rsvd1; +} DOWN_LINK_VAR; + +/* Structure for MB Command CONFIG_LINK (07) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t cr:1; + uint32_t ci:1; + uint32_t cr_delay:6; + uint32_t cr_count:8; + uint32_t rsvd1:8; + uint32_t MaxBBC:8; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t MaxBBC:8; + uint32_t rsvd1:8; + uint32_t cr_count:8; + uint32_t cr_delay:6; + uint32_t ci:1; + uint32_t cr:1; +#endif + + uint32_t myId; + uint32_t rsvd2; + uint32_t edtov; + uint32_t arbtov; + uint32_t ratov; + uint32_t rttov; + uint32_t altov; + uint32_t crtov; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd4:19; + uint32_t cscn:1; + uint32_t bbscn:4; + uint32_t rsvd3:8; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t rsvd3:8; + uint32_t bbscn:4; + uint32_t cscn:1; + uint32_t rsvd4:19; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rrq_enable:1; + uint32_t rrq_immed:1; + uint32_t rsvd5:29; + uint32_t ack0_enable:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t ack0_enable:1; + uint32_t rsvd5:29; + uint32_t rrq_immed:1; + uint32_t rrq_enable:1; +#endif +} CONFIG_LINK; + +/* Structure for MB Command PART_SLIM (08) + * will be removed since SLI1 is no longer supported! + */ +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t offCiocb; + uint16_t numCiocb; + uint16_t offRiocb; + uint16_t numRiocb; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t numCiocb; + uint16_t offCiocb; + uint16_t numRiocb; + uint16_t offRiocb; +#endif +} RING_DEF; + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t unused1:24; + uint32_t numRing:8; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t numRing:8; + uint32_t unused1:24; +#endif + + RING_DEF ringdef[4]; + uint32_t hbainit; +} PART_SLIM_VAR; + +/* Structure for MB Command CONFIG_RING (09) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t unused2:6; + uint32_t recvSeq:1; + uint32_t recvNotify:1; + uint32_t numMask:8; + uint32_t profile:8; + uint32_t unused1:4; + uint32_t ring:4; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t ring:4; + uint32_t unused1:4; + uint32_t profile:8; + uint32_t numMask:8; + uint32_t recvNotify:1; + uint32_t recvSeq:1; + uint32_t unused2:6; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t maxRespXchg; + uint16_t maxOrigXchg; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t maxOrigXchg; + uint16_t maxRespXchg; +#endif + + RR_REG rrRegs[6]; +} CONFIG_RING_VAR; + +/* Structure for MB Command RESET_RING (10) */ + +typedef struct { + uint32_t ring_no; +} RESET_RING_VAR; + +/* Structure for MB Command READ_CONFIG (11) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t cr:1; + uint32_t ci:1; + uint32_t cr_delay:6; + uint32_t cr_count:8; + uint32_t InitBBC:8; + uint32_t MaxBBC:8; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t MaxBBC:8; + uint32_t InitBBC:8; + uint32_t cr_count:8; + uint32_t cr_delay:6; + uint32_t ci:1; + uint32_t cr:1; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t topology:8; + uint32_t myDid:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t myDid:24; + uint32_t topology:8; +#endif + + /* Defines for topology (defined previously) */ +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t AR:1; + uint32_t IR:1; + uint32_t rsvd1:29; + uint32_t ack0:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t ack0:1; + uint32_t rsvd1:29; + uint32_t IR:1; + uint32_t AR:1; +#endif + + uint32_t edtov; + uint32_t arbtov; + uint32_t ratov; + uint32_t rttov; + uint32_t altov; + uint32_t lmt; +#define LMT_RESERVED 0x000 /* Not used */ +#define LMT_1Gb 0x004 +#define LMT_2Gb 0x008 +#define LMT_4Gb 0x040 +#define LMT_8Gb 0x080 +#define LMT_10Gb 0x100 +#define LMT_16Gb 0x200 +#define LMT_32Gb 0x400 +#define LMT_64Gb 0x800 +#define LMT_128Gb 0x1000 +#define LMT_256Gb 0x2000 + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t max_xri; + uint32_t max_iocb; + uint32_t max_rpi; + uint32_t avail_xri; + uint32_t avail_iocb; + uint32_t avail_rpi; + uint32_t max_vpi; + uint32_t rsvd4; + uint32_t rsvd5; + uint32_t avail_vpi; +} READ_CONFIG_VAR; + +/* Structure for MB Command READ_RCONFIG (12) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd2:7; + uint32_t recvNotify:1; + uint32_t numMask:8; + uint32_t profile:8; + uint32_t rsvd1:4; + uint32_t ring:4; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t ring:4; + uint32_t rsvd1:4; + uint32_t profile:8; + uint32_t numMask:8; + uint32_t recvNotify:1; + uint32_t rsvd2:7; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t maxResp; + uint16_t maxOrig; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t maxOrig; + uint16_t maxResp; +#endif + + RR_REG rrRegs[6]; + +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t cmdRingOffset; + uint16_t cmdEntryCnt; + uint16_t rspRingOffset; + uint16_t rspEntryCnt; + uint16_t nextCmdOffset; + uint16_t rsvd3; + uint16_t nextRspOffset; + uint16_t rsvd4; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t cmdEntryCnt; + uint16_t cmdRingOffset; + uint16_t rspEntryCnt; + uint16_t rspRingOffset; + uint16_t rsvd3; + uint16_t nextCmdOffset; + uint16_t rsvd4; + uint16_t nextRspOffset; +#endif +} READ_RCONF_VAR; + +/* Structure for MB Command READ_SPARM (13) */ +/* Structure for MB Command READ_SPARM64 (0x8D) */ + +typedef struct { + uint32_t rsvd1; + uint32_t rsvd2; + union { + struct ulp_bde sp; /* This BDE points to struct serv_parm + structure */ + struct ulp_bde64 sp64; + } un; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd3; + uint16_t vpi; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t vpi; + uint16_t rsvd3; +#endif +} READ_SPARM_VAR; + +/* Structure for MB Command READ_STATUS (14) */ +enum read_status_word1 { + RD_ST_CC = 0x01, + RD_ST_XKB = 0x80, +}; + +enum read_status_word17 { + RD_ST_XMIT_XKB_MASK = 0x3fffff, +}; + +enum read_status_word18 { + RD_ST_RCV_XKB_MASK = 0x3fffff, +}; + +typedef struct { + u8 clear_counters; /* rsvd 7:1, cc 0 */ + u8 rsvd5; + u8 rsvd6; + u8 xkb; /* xkb 7, rsvd 6:0 */ + + u32 rsvd8; + + uint32_t xmitByteCnt; + uint32_t rcvByteCnt; + uint32_t xmitFrameCnt; + uint32_t rcvFrameCnt; + uint32_t xmitSeqCnt; + uint32_t rcvSeqCnt; + uint32_t totalOrigExchanges; + uint32_t totalRespExchanges; + uint32_t rcvPbsyCnt; + uint32_t rcvFbsyCnt; + + u32 drop_frame_no_rq; + u32 empty_rq; + u32 drop_frame_no_xri; + u32 empty_xri; + + u32 xmit_xkb; /* rsvd 31:22, xmit_xkb 21:0 */ + u32 rcv_xkb; /* rsvd 31:22, rcv_xkb 21:0 */ +} READ_STATUS_VAR; + +/* Structure for MB Command READ_RPI (15) */ +/* Structure for MB Command READ_RPI64 (0x8F) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t nextRpi; + uint16_t reqRpi; + uint32_t rsvd2:8; + uint32_t DID:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t reqRpi; + uint16_t nextRpi; + uint32_t DID:24; + uint32_t rsvd2:8; +#endif + + union { + struct ulp_bde sp; + struct ulp_bde64 sp64; + } un; + +} READ_RPI_VAR; + +/* Structure for MB Command READ_XRI (16) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t nextXri; + uint16_t reqXri; + uint16_t rsvd1; + uint16_t rpi; + uint32_t rsvd2:8; + uint32_t DID:24; + uint32_t rsvd3:8; + uint32_t SID:24; + uint32_t rsvd4; + uint8_t seqId; + uint8_t rsvd5; + uint16_t seqCount; + uint16_t oxId; + uint16_t rxId; + uint32_t rsvd6:30; + uint32_t si:1; + uint32_t exchOrig:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t reqXri; + uint16_t nextXri; + uint16_t rpi; + uint16_t rsvd1; + uint32_t DID:24; + uint32_t rsvd2:8; + uint32_t SID:24; + uint32_t rsvd3:8; + uint32_t rsvd4; + uint16_t seqCount; + uint8_t rsvd5; + uint8_t seqId; + uint16_t rxId; + uint16_t oxId; + uint32_t exchOrig:1; + uint32_t si:1; + uint32_t rsvd6:30; +#endif +} READ_XRI_VAR; + +/* Structure for MB Command READ_REV (17) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t cv:1; + uint32_t rr:1; + uint32_t rsvd2:2; + uint32_t v3req:1; + uint32_t v3rsp:1; + uint32_t rsvd1:25; + uint32_t rv:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t rv:1; + uint32_t rsvd1:25; + uint32_t v3rsp:1; + uint32_t v3req:1; + uint32_t rsvd2:2; + uint32_t rr:1; + uint32_t cv:1; +#endif + + uint32_t biuRev; + uint32_t smRev; + union { + uint32_t smFwRev; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t ProgType; + uint8_t ProgId; + uint16_t ProgVer:4; + uint16_t ProgRev:4; + uint16_t ProgFixLvl:2; + uint16_t ProgDistType:2; + uint16_t DistCnt:4; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t DistCnt:4; + uint16_t ProgDistType:2; + uint16_t ProgFixLvl:2; + uint16_t ProgRev:4; + uint16_t ProgVer:4; + uint8_t ProgId; + uint8_t ProgType; +#endif + + } b; + } un; + uint32_t endecRev; +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t feaLevelHigh; + uint8_t feaLevelLow; + uint8_t fcphHigh; + uint8_t fcphLow; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t fcphLow; + uint8_t fcphHigh; + uint8_t feaLevelLow; + uint8_t feaLevelHigh; +#endif + + uint32_t postKernRev; + uint32_t opFwRev; + uint8_t opFwName[16]; + uint32_t sli1FwRev; + uint8_t sli1FwName[16]; + uint32_t sli2FwRev; + uint8_t sli2FwName[16]; + uint32_t sli3Feat; + uint32_t RandomData[6]; +} READ_REV_VAR; + +/* Structure for MB Command READ_LINK_STAT (18) */ + +typedef struct { + uint32_t word0; + +#define lpfc_read_link_stat_rec_SHIFT 0 +#define lpfc_read_link_stat_rec_MASK 0x1 +#define lpfc_read_link_stat_rec_WORD word0 + +#define lpfc_read_link_stat_gec_SHIFT 1 +#define lpfc_read_link_stat_gec_MASK 0x1 +#define lpfc_read_link_stat_gec_WORD word0 + +#define lpfc_read_link_stat_w02oftow23of_SHIFT 2 +#define lpfc_read_link_stat_w02oftow23of_MASK 0x3FFFFF +#define lpfc_read_link_stat_w02oftow23of_WORD word0 + +#define lpfc_read_link_stat_rsvd_SHIFT 24 +#define lpfc_read_link_stat_rsvd_MASK 0x1F +#define lpfc_read_link_stat_rsvd_WORD word0 + +#define lpfc_read_link_stat_gec2_SHIFT 29 +#define lpfc_read_link_stat_gec2_MASK 0x1 +#define lpfc_read_link_stat_gec2_WORD word0 + +#define lpfc_read_link_stat_clrc_SHIFT 30 +#define lpfc_read_link_stat_clrc_MASK 0x1 +#define lpfc_read_link_stat_clrc_WORD word0 + +#define lpfc_read_link_stat_clof_SHIFT 31 +#define lpfc_read_link_stat_clof_MASK 0x1 +#define lpfc_read_link_stat_clof_WORD word0 + + uint32_t linkFailureCnt; + uint32_t lossSyncCnt; + uint32_t lossSignalCnt; + uint32_t primSeqErrCnt; + uint32_t invalidXmitWord; + uint32_t crcCnt; + uint32_t primSeqTimeout; + uint32_t elasticOverrun; + uint32_t arbTimeout; + uint32_t advRecBufCredit; + uint32_t curRecBufCredit; + uint32_t advTransBufCredit; + uint32_t curTransBufCredit; + uint32_t recEofCount; + uint32_t recEofdtiCount; + uint32_t recEofniCount; + uint32_t recSofcount; + uint32_t rsvd1; + uint32_t rsvd2; + uint32_t recDrpXriCount; + uint32_t fecCorrBlkCount; + uint32_t fecUncorrBlkCount; +} READ_LNK_VAR; + +/* Structure for MB Command REG_LOGIN (19) */ +/* Structure for MB Command REG_LOGIN64 (0x93) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd1; + uint16_t rpi; + uint32_t rsvd2:8; + uint32_t did:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t rpi; + uint16_t rsvd1; + uint32_t did:24; + uint32_t rsvd2:8; +#endif + + union { + struct ulp_bde sp; + struct ulp_bde64 sp64; + } un; + +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd6; + uint16_t vpi; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t vpi; + uint16_t rsvd6; +#endif + +} REG_LOGIN_VAR; + +/* Word 30 contents for REG_LOGIN */ +typedef union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd1:12; + uint16_t wd30_class:4; + uint16_t xri; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t xri; + uint16_t wd30_class:4; + uint16_t rsvd1:12; +#endif + } f; + uint32_t word; +} REG_WD30; + +/* Structure for MB Command UNREG_LOGIN (20) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd1; + uint16_t rpi; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; + uint16_t rsvd6; + uint16_t vpi; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t rpi; + uint16_t rsvd1; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; + uint16_t vpi; + uint16_t rsvd6; +#endif +} UNREG_LOGIN_VAR; + +/* Structure for MB Command REG_VPI (0x96) */ +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1; + uint32_t rsvd2:7; + uint32_t upd:1; + uint32_t sid:24; + uint32_t wwn[2]; + uint32_t rsvd5; + uint16_t vfi; + uint16_t vpi; +#else /* __LITTLE_ENDIAN */ + uint32_t rsvd1; + uint32_t sid:24; + uint32_t upd:1; + uint32_t rsvd2:7; + uint32_t wwn[2]; + uint32_t rsvd5; + uint16_t vpi; + uint16_t vfi; +#endif +} REG_VPI_VAR; + +/* Structure for MB Command UNREG_VPI (0x97) */ +typedef struct { + uint32_t rsvd1; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd2; + uint16_t sli4_vpi; +#else /* __LITTLE_ENDIAN */ + uint16_t sli4_vpi; + uint16_t rsvd2; +#endif + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd6; + uint16_t vpi; +#else /* __LITTLE_ENDIAN */ + uint16_t vpi; + uint16_t rsvd6; +#endif +} UNREG_VPI_VAR; + +/* Structure for MB Command UNREG_D_ID (0x23) */ + +typedef struct { + uint32_t did; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd6; + uint16_t vpi; +#else + uint16_t vpi; + uint16_t rsvd6; +#endif +} UNREG_D_ID_VAR; + +/* Structure for MB Command READ_TOPOLOGY (0x95) */ +struct lpfc_mbx_read_top { + uint32_t eventTag; /* Event tag */ + uint32_t word2; +#define lpfc_mbx_read_top_fa_SHIFT 12 +#define lpfc_mbx_read_top_fa_MASK 0x00000001 +#define lpfc_mbx_read_top_fa_WORD word2 +#define lpfc_mbx_read_top_mm_SHIFT 11 +#define lpfc_mbx_read_top_mm_MASK 0x00000001 +#define lpfc_mbx_read_top_mm_WORD word2 +#define lpfc_mbx_read_top_pb_SHIFT 9 +#define lpfc_mbx_read_top_pb_MASK 0X00000001 +#define lpfc_mbx_read_top_pb_WORD word2 +#define lpfc_mbx_read_top_il_SHIFT 8 +#define lpfc_mbx_read_top_il_MASK 0x00000001 +#define lpfc_mbx_read_top_il_WORD word2 +#define lpfc_mbx_read_top_att_type_SHIFT 0 +#define lpfc_mbx_read_top_att_type_MASK 0x000000FF +#define lpfc_mbx_read_top_att_type_WORD word2 +#define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */ +#define LPFC_ATT_LINK_UP 0x01 /* Link is up */ +#define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */ +#define LPFC_ATT_UNEXP_WWPN 0x06 /* Link is down Unexpected WWWPN */ + uint32_t word3; +#define lpfc_mbx_read_top_alpa_granted_SHIFT 24 +#define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF +#define lpfc_mbx_read_top_alpa_granted_WORD word3 +#define lpfc_mbx_read_top_lip_alps_SHIFT 16 +#define lpfc_mbx_read_top_lip_alps_MASK 0x000000FF +#define lpfc_mbx_read_top_lip_alps_WORD word3 +#define lpfc_mbx_read_top_lip_type_SHIFT 8 +#define lpfc_mbx_read_top_lip_type_MASK 0x000000FF +#define lpfc_mbx_read_top_lip_type_WORD word3 +#define lpfc_mbx_read_top_topology_SHIFT 0 +#define lpfc_mbx_read_top_topology_MASK 0x000000FF +#define lpfc_mbx_read_top_topology_WORD word3 +#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ +#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ + /* store the LILP AL_PA position map into */ + struct ulp_bde64 lilpBde64; +#define LPFC_ALPA_MAP_SIZE 128 + uint32_t word7; +#define lpfc_mbx_read_top_ld_lu_SHIFT 31 +#define lpfc_mbx_read_top_ld_lu_MASK 0x00000001 +#define lpfc_mbx_read_top_ld_lu_WORD word7 +#define lpfc_mbx_read_top_ld_tf_SHIFT 30 +#define lpfc_mbx_read_top_ld_tf_MASK 0x00000001 +#define lpfc_mbx_read_top_ld_tf_WORD word7 +#define lpfc_mbx_read_top_ld_link_spd_SHIFT 8 +#define lpfc_mbx_read_top_ld_link_spd_MASK 0x000000FF +#define lpfc_mbx_read_top_ld_link_spd_WORD word7 +#define lpfc_mbx_read_top_ld_nl_port_SHIFT 4 +#define lpfc_mbx_read_top_ld_nl_port_MASK 0x0000000F +#define lpfc_mbx_read_top_ld_nl_port_WORD word7 +#define lpfc_mbx_read_top_ld_tx_SHIFT 2 +#define lpfc_mbx_read_top_ld_tx_MASK 0x00000003 +#define lpfc_mbx_read_top_ld_tx_WORD word7 +#define lpfc_mbx_read_top_ld_rx_SHIFT 0 +#define lpfc_mbx_read_top_ld_rx_MASK 0x00000003 +#define lpfc_mbx_read_top_ld_rx_WORD word7 + uint32_t word8; +#define lpfc_mbx_read_top_lu_SHIFT 31 +#define lpfc_mbx_read_top_lu_MASK 0x00000001 +#define lpfc_mbx_read_top_lu_WORD word8 +#define lpfc_mbx_read_top_tf_SHIFT 30 +#define lpfc_mbx_read_top_tf_MASK 0x00000001 +#define lpfc_mbx_read_top_tf_WORD word8 +#define lpfc_mbx_read_top_link_spd_SHIFT 8 +#define lpfc_mbx_read_top_link_spd_MASK 0x000000FF +#define lpfc_mbx_read_top_link_spd_WORD word8 +#define lpfc_mbx_read_top_nl_port_SHIFT 4 +#define lpfc_mbx_read_top_nl_port_MASK 0x0000000F +#define lpfc_mbx_read_top_nl_port_WORD word8 +#define lpfc_mbx_read_top_tx_SHIFT 2 +#define lpfc_mbx_read_top_tx_MASK 0x00000003 +#define lpfc_mbx_read_top_tx_WORD word8 +#define lpfc_mbx_read_top_rx_SHIFT 0 +#define lpfc_mbx_read_top_rx_MASK 0x00000003 +#define lpfc_mbx_read_top_rx_WORD word8 +#define LPFC_LINK_SPEED_UNKNOWN 0x0 +#define LPFC_LINK_SPEED_1GHZ 0x04 +#define LPFC_LINK_SPEED_2GHZ 0x08 +#define LPFC_LINK_SPEED_4GHZ 0x10 +#define LPFC_LINK_SPEED_8GHZ 0x20 +#define LPFC_LINK_SPEED_10GHZ 0x40 +#define LPFC_LINK_SPEED_16GHZ 0x80 +#define LPFC_LINK_SPEED_32GHZ 0x90 +#define LPFC_LINK_SPEED_64GHZ 0xA0 +#define LPFC_LINK_SPEED_128GHZ 0xB0 +#define LPFC_LINK_SPEED_256GHZ 0xC0 +}; + +/* Structure for MB Command CLEAR_LA (22) */ + +typedef struct { + uint32_t eventTag; /* Event tag */ + uint32_t rsvd1; +} CLEAR_LA_VAR; + +/* Structure for MB Command DUMP */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd:25; + uint32_t ra:1; + uint32_t co:1; + uint32_t cv:1; + uint32_t type:4; + uint32_t entry_index:16; + uint32_t region_id:16; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t type:4; + uint32_t cv:1; + uint32_t co:1; + uint32_t ra:1; + uint32_t rsvd:25; + uint32_t region_id:16; + uint32_t entry_index:16; +#endif + + uint32_t sli4_length; + uint32_t word_cnt; + uint32_t resp_offset; +} DUMP_VAR; + +#define DMP_MEM_REG 0x1 +#define DMP_NV_PARAMS 0x2 +#define DMP_LMSD 0x3 /* Link Module Serial Data */ +#define DMP_WELL_KNOWN 0x4 + +#define DMP_REGION_VPD 0xe +#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */ +#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ +#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ + +#define DMP_REGION_VPORT 0x16 /* VPort info region */ +#define DMP_VPORT_REGION_SIZE 0x200 +#define DMP_MBOX_OFFSET_WORD 0x5 + +#define DMP_REGION_23 0x17 /* fcoe param and port state region */ +#define DMP_RGN23_SIZE 0x400 + +#define WAKE_UP_PARMS_REGION_ID 4 +#define WAKE_UP_PARMS_WORD_SIZE 15 + +struct vport_rec { + uint8_t wwpn[8]; + uint8_t wwnn[8]; +}; + +#define VPORT_INFO_SIG 0x32324752 +#define VPORT_INFO_REV_MASK 0xff +#define VPORT_INFO_REV 0x1 +#define MAX_STATIC_VPORT_COUNT 16 +struct static_vport_info { + uint32_t signature; + uint32_t rev; + struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; + uint32_t resvd[66]; +}; + +/* Option rom version structure */ +struct prog_id { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t type; + uint8_t id; + uint32_t ver:4; /* Major Version */ + uint32_t rev:4; /* Revision */ + uint32_t lev:2; /* Level */ + uint32_t dist:2; /* Dist Type */ + uint32_t num:4; /* number after dist type */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t num:4; /* number after dist type */ + uint32_t dist:2; /* Dist Type */ + uint32_t lev:2; /* Level */ + uint32_t rev:4; /* Revision */ + uint32_t ver:4; /* Major Version */ + uint8_t id; + uint8_t type; +#endif +}; + +/* Structure for MB Command UPDATE_CFG (0x1B) */ + +struct update_cfg_var { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd2:16; + uint32_t type:8; + uint32_t rsvd:1; + uint32_t ra:1; + uint32_t co:1; + uint32_t cv:1; + uint32_t req:4; + uint32_t entry_length:16; + uint32_t region_id:16; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t req:4; + uint32_t cv:1; + uint32_t co:1; + uint32_t ra:1; + uint32_t rsvd:1; + uint32_t type:8; + uint32_t rsvd2:16; + uint32_t region_id:16; + uint32_t entry_length:16; +#endif + + uint32_t resp_info; + uint32_t byte_cnt; + uint32_t data_offset; +}; + +struct hbq_mask { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t tmatch; + uint8_t tmask; + uint8_t rctlmatch; + uint8_t rctlmask; +#else /* __LITTLE_ENDIAN */ + uint8_t rctlmask; + uint8_t rctlmatch; + uint8_t tmask; + uint8_t tmatch; +#endif +}; + + +/* Structure for MB Command CONFIG_HBQ (7c) */ + +struct config_hbq_var { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1 :7; + uint32_t recvNotify :1; /* Receive Notification */ + uint32_t numMask :8; /* # Mask Entries */ + uint32_t profile :8; /* Selection Profile */ + uint32_t rsvd2 :8; +#else /* __LITTLE_ENDIAN */ + uint32_t rsvd2 :8; + uint32_t profile :8; /* Selection Profile */ + uint32_t numMask :8; /* # Mask Entries */ + uint32_t recvNotify :1; /* Receive Notification */ + uint32_t rsvd1 :7; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t hbqId :16; + uint32_t rsvd3 :12; + uint32_t ringMask :4; +#else /* __LITTLE_ENDIAN */ + uint32_t ringMask :4; + uint32_t rsvd3 :12; + uint32_t hbqId :16; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t entry_count :16; + uint32_t rsvd4 :8; + uint32_t headerLen :8; +#else /* __LITTLE_ENDIAN */ + uint32_t headerLen :8; + uint32_t rsvd4 :8; + uint32_t entry_count :16; +#endif + + uint32_t hbqaddrLow; + uint32_t hbqaddrHigh; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd5 :31; + uint32_t logEntry :1; +#else /* __LITTLE_ENDIAN */ + uint32_t logEntry :1; + uint32_t rsvd5 :31; +#endif + + uint32_t rsvd6; /* w7 */ + uint32_t rsvd7; /* w8 */ + uint32_t rsvd8; /* w9 */ + + struct hbq_mask hbqMasks[6]; + + + union { + uint32_t allprofiles[12]; + + struct { + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t seqlenoff :16; + uint32_t maxlen :16; + #else /* __LITTLE_ENDIAN */ + uint32_t maxlen :16; + uint32_t seqlenoff :16; + #endif + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1 :28; + uint32_t seqlenbcnt :4; + #else /* __LITTLE_ENDIAN */ + uint32_t seqlenbcnt :4; + uint32_t rsvd1 :28; + #endif + uint32_t rsvd[10]; + } profile2; + + struct { + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t seqlenoff :16; + uint32_t maxlen :16; + #else /* __LITTLE_ENDIAN */ + uint32_t maxlen :16; + uint32_t seqlenoff :16; + #endif + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t cmdcodeoff :28; + uint32_t rsvd1 :12; + uint32_t seqlenbcnt :4; + #else /* __LITTLE_ENDIAN */ + uint32_t seqlenbcnt :4; + uint32_t rsvd1 :12; + uint32_t cmdcodeoff :28; + #endif + uint32_t cmdmatch[8]; + + uint32_t rsvd[2]; + } profile3; + + struct { + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t seqlenoff :16; + uint32_t maxlen :16; + #else /* __LITTLE_ENDIAN */ + uint32_t maxlen :16; + uint32_t seqlenoff :16; + #endif + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t cmdcodeoff :28; + uint32_t rsvd1 :12; + uint32_t seqlenbcnt :4; + #else /* __LITTLE_ENDIAN */ + uint32_t seqlenbcnt :4; + uint32_t rsvd1 :12; + uint32_t cmdcodeoff :28; + #endif + uint32_t cmdmatch[8]; + + uint32_t rsvd[2]; + } profile5; + + } profiles; + +}; + + + +/* Structure for MB Command CONFIG_PORT (0x88) */ +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t cBE : 1; + uint32_t cET : 1; + uint32_t cHpcb : 1; + uint32_t cMA : 1; + uint32_t sli_mode : 4; + uint32_t pcbLen : 24; /* bit 23:0 of memory based port + * config block */ +#else /* __LITTLE_ENDIAN */ + uint32_t pcbLen : 24; /* bit 23:0 of memory based port + * config block */ + uint32_t sli_mode : 4; + uint32_t cMA : 1; + uint32_t cHpcb : 1; + uint32_t cET : 1; + uint32_t cBE : 1; +#endif + + uint32_t pcbLow; /* bit 31:0 of memory based port config block */ + uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ + uint32_t hbainit[5]; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */ + uint32_t rsvd : 31; /* least significant 31 bits of word 9 */ +#else /* __LITTLE_ENDIAN */ + uint32_t rsvd : 31; /* least significant 31 bits of word 9 */ + uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */ +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1 : 20; /* Reserved */ + uint32_t casabt : 1; /* Configure async abts status notice */ + uint32_t rsvd2 : 2; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cmr : 1; /* Configure Max RPIs */ +#else /* __LITTLE_ENDIAN */ + uint32_t cmr : 1; /* Configure Max RPIs */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t rsvd2 : 2; /* Reserved */ + uint32_t casabt : 1; /* Configure async abts status notice */ + uint32_t rsvd1 : 20; /* Reserved */ +#endif +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd3 : 20; /* Reserved */ + uint32_t gasabt : 1; /* Grant async abts status notice */ + uint32_t rsvd4 : 2; /* Reserved */ + uint32_t gbg : 1; /* Grant BlockGuard */ + uint32_t gmv : 1; /* Grant Max VPIs */ + uint32_t gcrp : 1; /* Grant Command Ring Polling */ + uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ + uint32_t ghbs : 1; /* Grant Host Backing Store */ + uint32_t ginb : 1; /* Grant Interrupt Notification Block */ + uint32_t gerbm : 1; /* Grant ERBM Request */ + uint32_t gmx : 1; /* Grant Max XRIs */ + uint32_t gmr : 1; /* Grant Max RPIs */ +#else /* __LITTLE_ENDIAN */ + uint32_t gmr : 1; /* Grant Max RPIs */ + uint32_t gmx : 1; /* Grant Max XRIs */ + uint32_t gerbm : 1; /* Grant ERBM Request */ + uint32_t ginb : 1; /* Grant Interrupt Notification Block */ + uint32_t ghbs : 1; /* Grant Host Backing Store */ + uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ + uint32_t gcrp : 1; /* Grant Command Ring Polling */ + uint32_t gmv : 1; /* Grant Max VPIs */ + uint32_t gbg : 1; /* Grant BlockGuard */ + uint32_t rsvd4 : 2; /* Reserved */ + uint32_t gasabt : 1; /* Grant async abts status notice */ + uint32_t rsvd3 : 20; /* Reserved */ +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t max_rpi : 16; /* Max RPIs Port should configure */ + uint32_t max_xri : 16; /* Max XRIs Port should configure */ +#else /* __LITTLE_ENDIAN */ + uint32_t max_xri : 16; /* Max XRIs Port should configure */ + uint32_t max_rpi : 16; /* Max RPIs Port should configure */ +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ +#else /* __LITTLE_ENDIAN */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ + uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ +#endif + + uint32_t rsvd6; /* Reserved */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd7 : 16; + uint32_t max_vpi : 16; /* Max number of virt N-Ports */ +#else /* __LITTLE_ENDIAN */ + uint32_t max_vpi : 16; /* Max number of virt N-Ports */ + uint32_t rsvd7 : 16; +#endif + +} CONFIG_PORT_VAR; + +/* Structure for MB Command CONFIG_MSI (0x30) */ +struct config_msi_var { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t dfltMsgNum:8; /* Default message number */ + uint32_t rsvd1:11; /* Reserved */ + uint32_t NID:5; /* Number of secondary attention IDs */ + uint32_t rsvd2:5; /* Reserved */ + uint32_t dfltPresent:1; /* Default message number present */ + uint32_t addFlag:1; /* Add association flag */ + uint32_t reportFlag:1; /* Report association flag */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t reportFlag:1; /* Report association flag */ + uint32_t addFlag:1; /* Add association flag */ + uint32_t dfltPresent:1; /* Default message number present */ + uint32_t rsvd2:5; /* Reserved */ + uint32_t NID:5; /* Number of secondary attention IDs */ + uint32_t rsvd1:11; /* Reserved */ + uint32_t dfltMsgNum:8; /* Default message number */ +#endif + uint32_t attentionConditions[2]; + uint8_t attentionId[16]; + uint8_t messageNumberByHA[64]; + uint8_t messageNumberByID[16]; + uint32_t autoClearHA[2]; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd3:16; + uint32_t autoClearID:16; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t autoClearID:16; + uint32_t rsvd3:16; +#endif + uint32_t rsvd4; +}; + +/* SLI-2 Port Control Block */ + +/* SLIM POINTER */ +#define SLIMOFF 0x30 /* WORD */ + +typedef struct _SLI2_RDSC { + uint32_t cmdEntries; + uint32_t cmdAddrLow; + uint32_t cmdAddrHigh; + + uint32_t rspEntries; + uint32_t rspAddrLow; + uint32_t rspAddrHigh; +} SLI2_RDSC; + +typedef struct _PCB { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t type:8; +#define TYPE_NATIVE_SLI2 0x01 + uint32_t feature:8; +#define FEATURE_INITIAL_SLI2 0x01 + uint32_t rsvd:12; + uint32_t maxRing:4; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t maxRing:4; + uint32_t rsvd:12; + uint32_t feature:8; +#define FEATURE_INITIAL_SLI2 0x01 + uint32_t type:8; +#define TYPE_NATIVE_SLI2 0x01 +#endif + + uint32_t mailBoxSize; + uint32_t mbAddrLow; + uint32_t mbAddrHigh; + + uint32_t hgpAddrLow; + uint32_t hgpAddrHigh; + + uint32_t pgpAddrLow; + uint32_t pgpAddrHigh; + SLI2_RDSC rdsc[MAX_SLI3_RINGS]; +} PCB_t; + +/* NEW_FEATURE */ +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd0:27; + uint32_t discardFarp:1; + uint32_t IPEnable:1; + uint32_t nodeName:1; + uint32_t portName:1; + uint32_t filterEnable:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t filterEnable:1; + uint32_t portName:1; + uint32_t nodeName:1; + uint32_t IPEnable:1; + uint32_t discardFarp:1; + uint32_t rsvd:27; +#endif + + uint8_t portname[8]; /* Used to be struct lpfc_name */ + uint8_t nodename[8]; + uint32_t rsvd1; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t IPAddress; +} CONFIG_FARP_VAR; + +/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd:30; + uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/ +#else /* __LITTLE_ENDIAN */ + uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/ + uint32_t rsvd:30; +#endif +} ASYNCEVT_ENABLE_VAR; + +/* Union of all Mailbox Command types */ +#define MAILBOX_CMD_WSIZE 32 +#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) +/* ext_wsize times 4 bytes should not be greater than max xmit size */ +#define MAILBOX_EXT_WSIZE 512 +#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t)) +#define MAILBOX_HBA_EXT_OFFSET 0x100 +/* max mbox xmit size is a page size for sysfs IO operations */ +#define MAILBOX_SYSFS_MAX 4096 + +typedef union { + uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ + * feature/max ring number + */ + LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */ + READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */ + WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */ + BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */ + INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */ + DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */ + CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */ + PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */ + CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */ + RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */ + READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */ + READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */ + READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */ + READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */ + READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */ + READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */ + READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */ + READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ + REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ + UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ + CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ + DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ + UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ + CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) + * NEW_FEATURE + */ + struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ + struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/ + CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ + struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */ + REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ + UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ + ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ + struct READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38 + * (READ_EVENT_LOG) + */ + struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */ +} MAILVARIANTS; + +/* + * SLI-2 specific structures + */ + +struct lpfc_hgp { + __le32 cmdPutInx; + __le32 rspGetInx; +}; + +struct lpfc_pgp { + __le32 cmdGetInx; + __le32 rspPutInx; +}; + +struct sli2_desc { + uint32_t unused1[16]; + struct lpfc_hgp host[MAX_SLI3_RINGS]; + struct lpfc_pgp port[MAX_SLI3_RINGS]; +}; + +struct sli3_desc { + struct lpfc_hgp host[MAX_SLI3_RINGS]; + uint32_t reserved[8]; + uint32_t hbq_put[16]; +}; + +struct sli3_pgp { + struct lpfc_pgp port[MAX_SLI3_RINGS]; + uint32_t hbq_get[16]; +}; + +union sli_var { + struct sli2_desc s2; + struct sli3_desc s3; + struct sli3_pgp s3_pgp; +}; + +typedef struct { + struct_group_tagged(MAILBOX_word0, bits, + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t mbxStatus; + uint8_t mbxCommand; + uint8_t mbxReserved:6; + uint8_t mbxHc:1; + uint8_t mbxOwner:1; /* Low order bit first word */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t mbxOwner:1; /* Low order bit first word */ + uint8_t mbxHc:1; + uint8_t mbxReserved:6; + uint8_t mbxCommand; + uint16_t mbxStatus; +#endif + }; + u32 word0; + }; + ); + + MAILVARIANTS un; + union sli_var us; +} MAILBOX_t; + +/* + * Begin Structure Definitions for IOCB Commands + */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t statAction; + uint8_t statRsn; + uint8_t statBaExp; + uint8_t statLocalError; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t statLocalError; + uint8_t statBaExp; + uint8_t statRsn; + uint8_t statAction; +#endif + /* statRsn P/F_RJT reason codes */ +#define RJT_BAD_D_ID 0x01 /* Invalid D_ID field */ +#define RJT_BAD_S_ID 0x02 /* Invalid S_ID field */ +#define RJT_UNAVAIL_TEMP 0x03 /* N_Port unavailable temp. */ +#define RJT_UNAVAIL_PERM 0x04 /* N_Port unavailable perm. */ +#define RJT_UNSUP_CLASS 0x05 /* Class not supported */ +#define RJT_DELIM_ERR 0x06 /* Delimiter usage error */ +#define RJT_UNSUP_TYPE 0x07 /* Type not supported */ +#define RJT_BAD_CONTROL 0x08 /* Invalid link conrtol */ +#define RJT_BAD_RCTL 0x09 /* R_CTL invalid */ +#define RJT_BAD_FCTL 0x0A /* F_CTL invalid */ +#define RJT_BAD_OXID 0x0B /* OX_ID invalid */ +#define RJT_BAD_RXID 0x0C /* RX_ID invalid */ +#define RJT_BAD_SEQID 0x0D /* SEQ_ID invalid */ +#define RJT_BAD_DFCTL 0x0E /* DF_CTL invalid */ +#define RJT_BAD_SEQCNT 0x0F /* SEQ_CNT invalid */ +#define RJT_BAD_PARM 0x10 /* Param. field invalid */ +#define RJT_XCHG_ERR 0x11 /* Exchange error */ +#define RJT_PROT_ERR 0x12 /* Protocol error */ +#define RJT_BAD_LENGTH 0x13 /* Invalid Length */ +#define RJT_UNEXPECTED_ACK 0x14 /* Unexpected ACK */ +#define RJT_LOGIN_REQUIRED 0x16 /* Login required */ +#define RJT_TOO_MANY_SEQ 0x17 /* Excessive sequences */ +#define RJT_XCHG_NOT_STRT 0x18 /* Exchange not started */ +#define RJT_UNSUP_SEC_HDR 0x19 /* Security hdr not supported */ +#define RJT_UNAVAIL_PATH 0x1A /* Fabric Path not available */ +#define RJT_VENDOR_UNIQUE 0xFF /* Vendor unique error */ + +#define IOERR_SUCCESS 0x00 /* statLocalError */ +#define IOERR_MISSING_CONTINUE 0x01 +#define IOERR_SEQUENCE_TIMEOUT 0x02 +#define IOERR_INTERNAL_ERROR 0x03 +#define IOERR_INVALID_RPI 0x04 +#define IOERR_NO_XRI 0x05 +#define IOERR_ILLEGAL_COMMAND 0x06 +#define IOERR_XCHG_DROPPED 0x07 +#define IOERR_ILLEGAL_FIELD 0x08 +#define IOERR_RPI_SUSPENDED 0x09 +#define IOERR_TOO_MANY_BUFFERS 0x0A +#define IOERR_RCV_BUFFER_WAITING 0x0B +#define IOERR_NO_CONNECTION 0x0C +#define IOERR_TX_DMA_FAILED 0x0D +#define IOERR_RX_DMA_FAILED 0x0E +#define IOERR_ILLEGAL_FRAME 0x0F +#define IOERR_EXTRA_DATA 0x10 +#define IOERR_NO_RESOURCES 0x11 +#define IOERR_RESERVED 0x12 +#define IOERR_ILLEGAL_LENGTH 0x13 +#define IOERR_UNSUPPORTED_FEATURE 0x14 +#define IOERR_ABORT_IN_PROGRESS 0x15 +#define IOERR_ABORT_REQUESTED 0x16 +#define IOERR_RECEIVE_BUFFER_TIMEOUT 0x17 +#define IOERR_LOOP_OPEN_FAILURE 0x18 +#define IOERR_RING_RESET 0x19 +#define IOERR_LINK_DOWN 0x1A +#define IOERR_CORRUPTED_DATA 0x1B +#define IOERR_CORRUPTED_RPI 0x1C +#define IOERR_OUT_OF_ORDER_DATA 0x1D +#define IOERR_OUT_OF_ORDER_ACK 0x1E +#define IOERR_DUP_FRAME 0x1F +#define IOERR_LINK_CONTROL_FRAME 0x20 /* ACK_N received */ +#define IOERR_BAD_HOST_ADDRESS 0x21 +#define IOERR_RCV_HDRBUF_WAITING 0x22 +#define IOERR_MISSING_HDR_BUFFER 0x23 +#define IOERR_MSEQ_CHAIN_CORRUPTED 0x24 +#define IOERR_ABORTMULT_REQUESTED 0x25 +#define IOERR_BUFFER_SHORTAGE 0x28 +#define IOERR_DEFAULT 0x29 +#define IOERR_CNT 0x2A +#define IOERR_SLER_FAILURE 0x46 +#define IOERR_SLER_CMD_RCV_FAILURE 0x47 +#define IOERR_SLER_REC_RJT_ERR 0x48 +#define IOERR_SLER_REC_SRR_RETRY_ERR 0x49 +#define IOERR_SLER_SRR_RJT_ERR 0x4A +#define IOERR_SLER_RRQ_RJT_ERR 0x4C +#define IOERR_SLER_RRQ_RETRY_ERR 0x4D +#define IOERR_SLER_ABTS_ERR 0x4E +#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0 +#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1 +#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2 +#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3 +#define IOERR_DRVR_MASK 0x100 +#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ +#define IOERR_SLI_BRESET 0x102 +#define IOERR_SLI_ABORTED 0x103 +#define IOERR_PARAM_MASK 0x1ff +} PARM_ERR; + +typedef union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t Rctl; /* R_CTL field */ + uint8_t Type; /* TYPE field */ + uint8_t Dfctl; /* DF_CTL field */ + uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */ + uint8_t Dfctl; /* DF_CTL field */ + uint8_t Type; /* TYPE field */ + uint8_t Rctl; /* R_CTL field */ +#endif + +#define BC 0x02 /* Broadcast Received - Fctl */ +#define SI 0x04 /* Sequence Initiative */ +#define LA 0x08 /* Ignore Link Attention state */ +#define LS 0x80 /* Last Sequence */ + } hcsw; + uint32_t reserved; +} WORD5; + +/* IOCB Command template for a generic response */ +typedef struct { + uint32_t reserved[4]; + PARM_ERR perr; +} GENERIC_RSP; + +/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */ +typedef struct { + struct ulp_bde xrsqbde[2]; + uint32_t xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} XR_SEQ_FIELDS; + +/* IOCB Command template for ELS_REQUEST */ +typedef struct { + struct ulp_bde elsReq; + struct ulp_bde elsRsp; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t word4Rsvd:7; + uint32_t fl:1; + uint32_t myID:24; + uint32_t word5Rsvd:8; + uint32_t remoteID:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t myID:24; + uint32_t fl:1; + uint32_t word4Rsvd:7; + uint32_t remoteID:24; + uint32_t word5Rsvd:8; +#endif +} ELS_REQUEST; + +/* IOCB Command template for RCV_ELS_REQ */ +typedef struct { + struct ulp_bde elsReq[2]; + uint32_t parmRo; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t word5Rsvd:8; + uint32_t remoteID:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t remoteID:24; + uint32_t word5Rsvd:8; +#endif +} RCV_ELS_REQ; + +/* IOCB Command template for ABORT / CLOSE_XRI */ +typedef struct { + uint32_t rsvd[3]; + uint32_t abortType; +#define ABORT_TYPE_ABTX 0x00000000 +#define ABORT_TYPE_ABTS 0x00000001 + uint32_t parm; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t abortContextTag; /* ulpContext from command to abort/close */ + uint16_t abortIoTag; /* ulpIoTag from command to abort/close */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t abortIoTag; /* ulpIoTag from command to abort/close */ + uint16_t abortContextTag; /* ulpContext from command to abort/close */ +#endif +} AC_XRI; + +/* IOCB Command template for ABORT_MXRI64 */ +typedef struct { + uint32_t rsvd[3]; + uint32_t abortType; + uint32_t parm; + uint32_t iotag32; +} A_MXRI64; + +/* IOCB Command template for GET_RPI */ +typedef struct { + uint32_t rsvd[4]; + uint32_t parmRo; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t word5Rsvd:8; + uint32_t remoteID:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t remoteID:24; + uint32_t word5Rsvd:8; +#endif +} GET_RPI; + +/* IOCB Command template for all FCP Initiator commands */ +typedef struct { + struct ulp_bde fcpi_cmnd; /* FCP_CMND payload descriptor */ + struct ulp_bde fcpi_rsp; /* Rcv buffer */ + uint32_t fcpi_parm; + uint32_t fcpi_XRdy; /* transfer ready for IWRITE */ +} FCPI_FIELDS; + +/* IOCB Command template for all FCP Target commands */ +typedef struct { + struct ulp_bde fcpt_Buffer[2]; /* FCP_CMND payload descriptor */ + uint32_t fcpt_Offset; + uint32_t fcpt_Length; /* transfer ready for IWRITE */ +} FCPT_FIELDS; + +/* SLI-2 IOCB structure definitions */ + +/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */ +typedef struct { + ULP_BDL bdl; + uint32_t xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} XMT_SEQ_FIELDS64; + +/* This word is remote ports D_ID for XMIT_ELS_RSP64 */ +#define xmit_els_remoteID xrsqRo + +/* IOCB Command template for 64 bit RCV_SEQUENCE64 */ +typedef struct { + struct ulp_bde64 rcvBde; + uint32_t rsvd1; + uint32_t xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} RCV_SEQ_FIELDS64; + +/* IOCB Command template for ELS_REQUEST64 */ +typedef struct { + ULP_BDL bdl; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t word4Rsvd:7; + uint32_t fl:1; + uint32_t myID:24; + uint32_t word5Rsvd:8; + uint32_t remoteID:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t myID:24; + uint32_t fl:1; + uint32_t word4Rsvd:7; + uint32_t remoteID:24; + uint32_t word5Rsvd:8; +#endif +} ELS_REQUEST64; + +/* IOCB Command template for GEN_REQUEST64 */ +typedef struct { + ULP_BDL bdl; + uint32_t xrsqRo; /* Starting Relative Offset */ + WORD5 w5; /* Header control/status word */ +} GEN_REQUEST64; + +/* IOCB Command template for RCV_ELS_REQ64 */ +typedef struct { + struct ulp_bde64 elsReq; + uint32_t rcvd1; + uint32_t parmRo; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t word5Rsvd:8; + uint32_t remoteID:24; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t remoteID:24; + uint32_t word5Rsvd:8; +#endif +} RCV_ELS_REQ64; + +/* IOCB Command template for RCV_SEQ64 */ +struct rcv_seq64 { + struct ulp_bde64 elsReq; + uint32_t hbq_1; + uint32_t parmRo; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rctl:8; + uint32_t type:8; + uint32_t dfctl:8; + uint32_t ls:1; + uint32_t fs:1; + uint32_t rsvd2:3; + uint32_t si:1; + uint32_t bc:1; + uint32_t rsvd3:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t rsvd3:1; + uint32_t bc:1; + uint32_t si:1; + uint32_t rsvd2:3; + uint32_t fs:1; + uint32_t ls:1; + uint32_t dfctl:8; + uint32_t type:8; + uint32_t rctl:8; +#endif +}; + +/* IOCB Command template for all 64 bit FCP Initiator commands */ +typedef struct { + ULP_BDL bdl; + uint32_t fcpi_parm; + uint32_t fcpi_XRdy; /* transfer ready for IWRITE */ +} FCPI_FIELDS64; + +/* IOCB Command template for all 64 bit FCP Target commands */ +typedef struct { + ULP_BDL bdl; + uint32_t fcpt_Offset; + uint32_t fcpt_Length; /* transfer ready for IWRITE */ +} FCPT_FIELDS64; + +/* IOCB Command template for Async Status iocb commands */ +typedef struct { + uint32_t rsvd[4]; + uint32_t param; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t evt_code; /* High order bits word 5 */ + uint16_t sub_ctxt_tag; /* Low order bits word 5 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t sub_ctxt_tag; /* High order bits word 5 */ + uint16_t evt_code; /* Low order bits word 5 */ +#endif +} ASYNCSTAT_FIELDS; +#define ASYNC_TEMP_WARN 0x100 +#define ASYNC_TEMP_SAFE 0x101 +#define ASYNC_STATUS_CN 0x102 + +/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7) + or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ + +struct rcv_sli3 { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t ox_id; + uint16_t seq_cnt; + + uint16_t vpi; + uint16_t word9Rsvd; +#else /* __LITTLE_ENDIAN */ + uint16_t seq_cnt; + uint16_t ox_id; + + uint16_t word9Rsvd; + uint16_t vpi; +#endif + uint32_t word10Rsvd; + uint32_t acc_len; /* accumulated length */ + struct ulp_bde64 bde2; +}; + +/* Structure used for a single HBQ entry */ +struct lpfc_hbq_entry { + struct ulp_bde64 bde; + uint32_t buffer_tag; +}; + +/* IOCB Command template for QUE_XRI64_CX (0xB3) command */ +typedef struct { + struct lpfc_hbq_entry buff; + uint32_t rsvd; + uint32_t rsvd1; +} QUE_XRI64_CX_FIELDS; + +struct que_xri64cx_ext_fields { + uint32_t iotag64_low; + uint32_t iotag64_high; + uint32_t ebde_count; + uint32_t rsvd; + struct lpfc_hbq_entry buff[5]; +}; + +struct sli3_bg_fields { + uint32_t filler[6]; /* word 8-13 in IOCB */ + uint32_t bghm; /* word 14 - BlockGuard High Water Mark */ +/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */ +#define BGS_BIDIR_BG_PROF_MASK 0xff000000 +#define BGS_BIDIR_BG_PROF_SHIFT 24 +#define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000 +#define BGS_BIDIR_ERR_COND_SHIFT 16 +#define BGS_BG_PROFILE_MASK 0x0000ff00 +#define BGS_BG_PROFILE_SHIFT 8 +#define BGS_INVALID_PROF_MASK 0x00000020 +#define BGS_INVALID_PROF_SHIFT 5 +#define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010 +#define BGS_UNINIT_DIF_BLOCK_SHIFT 4 +#define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008 +#define BGS_HI_WATER_MARK_PRESENT_SHIFT 3 +#define BGS_REFTAG_ERR_MASK 0x00000004 +#define BGS_REFTAG_ERR_SHIFT 2 +#define BGS_APPTAG_ERR_MASK 0x00000002 +#define BGS_APPTAG_ERR_SHIFT 1 +#define BGS_GUARD_ERR_MASK 0x00000001 +#define BGS_GUARD_ERR_SHIFT 0 + uint32_t bgstat; /* word 15 - BlockGuard Status */ +}; + +static inline uint32_t +lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat) +{ + return (bgstat & BGS_BIDIR_BG_PROF_MASK) >> + BGS_BIDIR_BG_PROF_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_bidir_err_cond(uint32_t bgstat) +{ + return (bgstat & BGS_BIDIR_ERR_COND_FLAGS_MASK) >> + BGS_BIDIR_ERR_COND_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_bg_prof(uint32_t bgstat) +{ + return (bgstat & BGS_BG_PROFILE_MASK) >> + BGS_BG_PROFILE_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_invalid_prof(uint32_t bgstat) +{ + return (bgstat & BGS_INVALID_PROF_MASK) >> + BGS_INVALID_PROF_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_uninit_dif_block(uint32_t bgstat) +{ + return (bgstat & BGS_UNINIT_DIF_BLOCK_MASK) >> + BGS_UNINIT_DIF_BLOCK_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat) +{ + return (bgstat & BGS_HI_WATER_MARK_PRESENT_MASK) >> + BGS_HI_WATER_MARK_PRESENT_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_reftag_err(uint32_t bgstat) +{ + return (bgstat & BGS_REFTAG_ERR_MASK) >> + BGS_REFTAG_ERR_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_apptag_err(uint32_t bgstat) +{ + return (bgstat & BGS_APPTAG_ERR_MASK) >> + BGS_APPTAG_ERR_SHIFT; +} + +static inline uint32_t +lpfc_bgs_get_guard_err(uint32_t bgstat) +{ + return (bgstat & BGS_GUARD_ERR_MASK) >> + BGS_GUARD_ERR_SHIFT; +} + +#define LPFC_EXT_DATA_BDE_COUNT 3 +struct fcp_irw_ext { + uint32_t io_tag64_low; + uint32_t io_tag64_high; +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t reserved1; + uint8_t reserved2; + uint8_t reserved3; + uint8_t ebde_count; +#else /* __LITTLE_ENDIAN */ + uint8_t ebde_count; + uint8_t reserved3; + uint8_t reserved2; + uint8_t reserved1; +#endif + uint32_t reserved4; + struct ulp_bde64 rbde; /* response bde */ + struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT]; /* data BDE or BPL */ + uint8_t icd[32]; /* immediate command data (32 bytes) */ +}; + +typedef struct _IOCB { /* IOCB structure */ + union { + GENERIC_RSP grsp; /* Generic response */ + XR_SEQ_FIELDS xrseq; /* XMIT / BCAST / RCV_SEQUENCE cmd */ + struct ulp_bde cont[3]; /* up to 3 continuation bdes */ + RCV_ELS_REQ rcvels; /* RCV_ELS_REQ template */ + AC_XRI acxri; /* ABORT / CLOSE_XRI template */ + A_MXRI64 amxri; /* abort multiple xri command overlay */ + GET_RPI getrpi; /* GET_RPI template */ + FCPI_FIELDS fcpi; /* FCP Initiator template */ + FCPT_FIELDS fcpt; /* FCP target template */ + + /* SLI-2 structures */ + + struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation + * bde_64s */ + ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */ + GEN_REQUEST64 genreq64; /* GEN_REQUEST template */ + RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */ + XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */ + FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */ + FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */ + ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ + QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ + struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ + struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */ + uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ + } un; + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t ulpContext; /* High order bits word 6 */ + uint16_t ulpIoTag; /* Low order bits word 6 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t ulpIoTag; /* Low order bits word 6 */ + uint16_t ulpContext; /* High order bits word 6 */ +#endif + } t1; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t ulpContext; /* High order bits word 6 */ + uint16_t ulpIoTag1:2; /* Low order bits word 6 */ + uint16_t ulpIoTag0:14; /* Low order bits word 6 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t ulpIoTag0:14; /* Low order bits word 6 */ + uint16_t ulpIoTag1:2; /* Low order bits word 6 */ + uint16_t ulpContext; /* High order bits word 6 */ +#endif + } t2; + } un1; +#define ulpContext un1.t1.ulpContext +#define ulpIoTag un1.t1.ulpIoTag +#define ulpIoTag0 un1.t2.ulpIoTag0 + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t ulpTimeout:8; + uint32_t ulpXS:1; + uint32_t ulpFCP2Rcvy:1; + uint32_t ulpPU:2; + uint32_t ulpIr:1; + uint32_t ulpClass:3; + uint32_t ulpCommand:8; + uint32_t ulpStatus:4; + uint32_t ulpBdeCount:2; + uint32_t ulpLe:1; + uint32_t ulpOwner:1; /* Low order bit word 7 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t ulpOwner:1; /* Low order bit word 7 */ + uint32_t ulpLe:1; + uint32_t ulpBdeCount:2; + uint32_t ulpStatus:4; + uint32_t ulpCommand:8; + uint32_t ulpClass:3; + uint32_t ulpIr:1; + uint32_t ulpPU:2; + uint32_t ulpFCP2Rcvy:1; + uint32_t ulpXS:1; + uint32_t ulpTimeout:8; +#endif + + union { + struct rcv_sli3 rcvsli3; /* words 8 - 15 */ + + /* words 8-31 used for que_xri_cx iocb */ + struct que_xri64cx_ext_fields que_xri64cx_ext_words; + struct fcp_irw_ext fcp_ext; + uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ + + /* words 8-15 for BlockGuard */ + struct sli3_bg_fields sli3_bg; + } unsli3; + +#define ulpCt_h ulpXS +#define ulpCt_l ulpFCP2Rcvy + +#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds-ulpRsvByte */ +#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */ +#define PARM_UNUSED 0 /* PU field (Word 4) not used */ +#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */ +#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */ +#define PARM_NPIV_DID 3 +#define CLASS1 0 /* Class 1 */ +#define CLASS2 1 /* Class 2 */ +#define CLASS3 2 /* Class 3 */ +#define CLASS_FCP_INTERMIX 7 /* FCP Data->Cls 1, all else->Cls 2 */ + +#define IOSTAT_SUCCESS 0x0 /* ulpStatus - HBA defined */ +#define IOSTAT_FCP_RSP_ERROR 0x1 +#define IOSTAT_REMOTE_STOP 0x2 +#define IOSTAT_LOCAL_REJECT 0x3 +#define IOSTAT_NPORT_RJT 0x4 +#define IOSTAT_FABRIC_RJT 0x5 +#define IOSTAT_NPORT_BSY 0x6 +#define IOSTAT_FABRIC_BSY 0x7 +#define IOSTAT_INTERMED_RSP 0x8 +#define IOSTAT_LS_RJT 0x9 +#define IOSTAT_BA_RJT 0xA +#define IOSTAT_RSVD1 0xB +#define IOSTAT_RSVD2 0xC +#define IOSTAT_RSVD3 0xD +#define IOSTAT_RSVD4 0xE +#define IOSTAT_NEED_BUFFER 0xF +#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */ +#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */ +#define IOSTAT_CNT 0x11 + +} IOCB_t; + + +#define SLI1_SLIM_SIZE (4 * 1024) + +/* Up to 498 IOCBs will fit into 16k + * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384 + */ +#define SLI2_SLIM_SIZE (64 * 1024) + +/* Maximum IOCBs that will fit in SLI2 slim */ +#define MAX_SLI2_IOCB 498 +#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \ + (sizeof(MAILBOX_t) + sizeof(PCB_t) + \ + sizeof(uint32_t) * MAILBOX_EXT_WSIZE)) + +/* HBQ entries are 4 words each = 4k */ +#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \ + lpfc_sli_hbq_count()) + +struct lpfc_sli2_slim { + MAILBOX_t mbx; + uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE]; + PCB_t pcb; + IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE]; +}; + +/* + * This function checks PCI device to allow special handling for LC HBAs. + * + * Parameters: + * device : struct pci_dev 's device field + * + * return 1 => TRUE + * 0 => FALSE + */ +static inline int +lpfc_is_LC_HBA(unsigned short device) +{ + if ((device == PCI_DEVICE_ID_TFLY) || + (device == PCI_DEVICE_ID_PFLY) || + (device == PCI_DEVICE_ID_LP101) || + (device == PCI_DEVICE_ID_BMID) || + (device == PCI_DEVICE_ID_BSMB) || + (device == PCI_DEVICE_ID_ZMID) || + (device == PCI_DEVICE_ID_ZSMB) || + (device == PCI_DEVICE_ID_SAT_MID) || + (device == PCI_DEVICE_ID_SAT_SMB) || + (device == PCI_DEVICE_ID_RFLY)) + return 1; + else + return 0; +} + +#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h new file mode 100644 index 000000000..5d4f9f270 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -0,0 +1,5068 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2009-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include + +/* Macros to deal with bit fields. Each bit field must have 3 #defines + * associated with it (_SHIFT, _MASK, and _WORD). + * EG. For a bit field that is in the 7th bit of the "field4" field of a + * structure and is 2 bits in size the following #defines must exist: + * struct temp { + * uint32_t field1; + * uint32_t field2; + * uint32_t field3; + * uint32_t field4; + * #define example_bit_field_SHIFT 7 + * #define example_bit_field_MASK 0x03 + * #define example_bit_field_WORD field4 + * uint32_t field5; + * }; + * Then the macros below may be used to get or set the value of that field. + * EG. To get the value of the bit field from the above example: + * struct temp t1; + * value = bf_get(example_bit_field, &t1); + * And then to set that bit field: + * bf_set(example_bit_field, &t1, 2); + * Or clear that bit field: + * bf_set(example_bit_field, &t1, 0); + */ +#define bf_get_be32(name, ptr) \ + ((be32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) +#define bf_get_le32(name, ptr) \ + ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) +#define bf_get(name, ptr) \ + (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) +#define bf_set_le32(name, ptr, value) \ + ((ptr)->name##_WORD = cpu_to_le32(((((value) & \ + name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \ + ~(name##_MASK << name##_SHIFT))))) +#define bf_set(name, ptr, value) \ + ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ + ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) + +#define get_wqe_reqtag(x) (((x)->wqe.words[9] >> 0) & 0xFFFF) +#define get_wqe_tmo(x) (((x)->wqe.words[7] >> 24) & 0x00FF) + +#define get_job_ulpword(x, y) ((x)->iocb.un.ulpWord[y]) + +#define set_job_ulpstatus(x, y) bf_set(lpfc_wcqe_c_status, &(x)->wcqe_cmpl, y) +#define set_job_ulpword4(x, y) ((&(x)->wcqe_cmpl)->parameter = y) + +struct dma_address { + uint32_t addr_lo; + uint32_t addr_hi; +}; + +struct lpfc_sli_intf { + uint32_t word0; +#define lpfc_sli_intf_valid_SHIFT 29 +#define lpfc_sli_intf_valid_MASK 0x00000007 +#define lpfc_sli_intf_valid_WORD word0 +#define LPFC_SLI_INTF_VALID 6 +#define lpfc_sli_intf_sli_hint2_SHIFT 24 +#define lpfc_sli_intf_sli_hint2_MASK 0x0000001F +#define lpfc_sli_intf_sli_hint2_WORD word0 +#define LPFC_SLI_INTF_SLI_HINT2_NONE 0 +#define lpfc_sli_intf_sli_hint1_SHIFT 16 +#define lpfc_sli_intf_sli_hint1_MASK 0x000000FF +#define lpfc_sli_intf_sli_hint1_WORD word0 +#define LPFC_SLI_INTF_SLI_HINT1_NONE 0 +#define LPFC_SLI_INTF_SLI_HINT1_1 1 +#define LPFC_SLI_INTF_SLI_HINT1_2 2 +#define lpfc_sli_intf_if_type_SHIFT 12 +#define lpfc_sli_intf_if_type_MASK 0x0000000F +#define lpfc_sli_intf_if_type_WORD word0 +#define LPFC_SLI_INTF_IF_TYPE_0 0 +#define LPFC_SLI_INTF_IF_TYPE_1 1 +#define LPFC_SLI_INTF_IF_TYPE_2 2 +#define LPFC_SLI_INTF_IF_TYPE_6 6 +#define lpfc_sli_intf_sli_family_SHIFT 8 +#define lpfc_sli_intf_sli_family_MASK 0x0000000F +#define lpfc_sli_intf_sli_family_WORD word0 +#define LPFC_SLI_INTF_FAMILY_BE2 0x0 +#define LPFC_SLI_INTF_FAMILY_BE3 0x1 +#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa +#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb +#define LPFC_SLI_INTF_FAMILY_G6 0xc +#define LPFC_SLI_INTF_FAMILY_G7 0xd +#define LPFC_SLI_INTF_FAMILY_G7P 0xe +#define lpfc_sli_intf_slirev_SHIFT 4 +#define lpfc_sli_intf_slirev_MASK 0x0000000F +#define lpfc_sli_intf_slirev_WORD word0 +#define LPFC_SLI_INTF_REV_SLI3 3 +#define LPFC_SLI_INTF_REV_SLI4 4 +#define lpfc_sli_intf_func_type_SHIFT 0 +#define lpfc_sli_intf_func_type_MASK 0x00000001 +#define lpfc_sli_intf_func_type_WORD word0 +#define LPFC_SLI_INTF_IF_TYPE_PHYS 0 +#define LPFC_SLI_INTF_IF_TYPE_VIRT 1 +}; + +#define LPFC_SLI4_MBX_EMBED true +#define LPFC_SLI4_MBX_NEMBED false + +#define LPFC_SLI4_MB_WORD_COUNT 64 +#define LPFC_MAX_MQ_PAGE 8 +#define LPFC_MAX_WQ_PAGE_V0 4 +#define LPFC_MAX_WQ_PAGE 8 +#define LPFC_MAX_RQ_PAGE 8 +#define LPFC_MAX_CQ_PAGE 4 +#define LPFC_MAX_EQ_PAGE 8 + +#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */ +#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */ +#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */ + +/* Define SLI4 Alignment requirements. */ +#define LPFC_ALIGN_16_BYTE 16 +#define LPFC_ALIGN_64_BYTE 64 +#define SLI4_PAGE_SIZE 4096 + +/* Define SLI4 specific definitions. */ +#define LPFC_MQ_CQE_BYTE_OFFSET 256 +#define LPFC_MBX_CMD_HDR_LENGTH 16 +#define LPFC_MBX_ERROR_RANGE 0x4000 +#define LPFC_BMBX_BIT1_ADDR_HI 0x2 +#define LPFC_BMBX_BIT1_ADDR_LO 0 +#define LPFC_RPI_HDR_COUNT 64 +#define LPFC_HDR_TEMPLATE_SIZE 4096 +#define LPFC_RPI_ALLOC_ERROR 0xFFFF +#define LPFC_FCF_RECORD_WD_CNT 132 +#define LPFC_ENTIRE_FCF_DATABASE 0 +#define LPFC_DFLT_FCF_INDEX 0 + +/* Virtual function numbers */ +#define LPFC_VF0 0 +#define LPFC_VF1 1 +#define LPFC_VF2 2 +#define LPFC_VF3 3 +#define LPFC_VF4 4 +#define LPFC_VF5 5 +#define LPFC_VF6 6 +#define LPFC_VF7 7 +#define LPFC_VF8 8 +#define LPFC_VF9 9 +#define LPFC_VF10 10 +#define LPFC_VF11 11 +#define LPFC_VF12 12 +#define LPFC_VF13 13 +#define LPFC_VF14 14 +#define LPFC_VF15 15 +#define LPFC_VF16 16 +#define LPFC_VF17 17 +#define LPFC_VF18 18 +#define LPFC_VF19 19 +#define LPFC_VF20 20 +#define LPFC_VF21 21 +#define LPFC_VF22 22 +#define LPFC_VF23 23 +#define LPFC_VF24 24 +#define LPFC_VF25 25 +#define LPFC_VF26 26 +#define LPFC_VF27 27 +#define LPFC_VF28 28 +#define LPFC_VF29 29 +#define LPFC_VF30 30 +#define LPFC_VF31 31 + +/* PCI function numbers */ +#define LPFC_PCI_FUNC0 0 +#define LPFC_PCI_FUNC1 1 +#define LPFC_PCI_FUNC2 2 +#define LPFC_PCI_FUNC3 3 +#define LPFC_PCI_FUNC4 4 + +/* SLI4 interface type-2 PDEV_CTL register */ +#define LPFC_CTL_PDEV_CTL_OFFSET 0x414 +#define LPFC_CTL_PDEV_CTL_DRST 0x00000001 +#define LPFC_CTL_PDEV_CTL_FRST 0x00000002 +#define LPFC_CTL_PDEV_CTL_DD 0x00000004 +#define LPFC_CTL_PDEV_CTL_LC 0x00000008 +#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00 +#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10 +#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20 +#define LPFC_CTL_PDEV_CTL_DDL_RAS 0x1000000 + +#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST) + +/* Active interrupt test count */ +#define LPFC_ACT_INTR_CNT 4 + +/* Algrithmns for scheduling FCP commands to WQs */ +#define LPFC_FCP_SCHED_BY_HDWQ 0 +#define LPFC_FCP_SCHED_BY_CPU 1 + +/* Algrithmns for NameServer Query after RSCN */ +#define LPFC_NS_QUERY_GID_FT 0 +#define LPFC_NS_QUERY_GID_PT 1 + +/* Delay Multiplier constant */ +#define LPFC_DMULT_CONST 651042 +#define LPFC_DMULT_MAX 1023 + +/* Configuration of Interrupts / sec for entire HBA port */ +#define LPFC_MIN_IMAX 5000 +#define LPFC_MAX_IMAX 5000000 +#define LPFC_DEF_IMAX 0 + +#define LPFC_MAX_AUTO_EQ_DELAY 120 +#define LPFC_EQ_DELAY_STEP 15 +#define LPFC_EQD_ISR_TRIGGER 20000 +/* 1s intervals */ +#define LPFC_EQ_DELAY_MSECS 1000 + +#define LPFC_MIN_CPU_MAP 0 +#define LPFC_MAX_CPU_MAP 1 +#define LPFC_HBA_CPU_MAP 1 + +/* PORT_CAPABILITIES constants. */ +#define LPFC_MAX_SUPPORTED_PAGES 8 + +enum ulp_bde64_word3 { + ULP_BDE64_SIZE_MASK = 0xffffff, + + ULP_BDE64_TYPE_SHIFT = 24, + ULP_BDE64_TYPE_MASK = (0xff << ULP_BDE64_TYPE_SHIFT), + + /* BDE (Host_resident) */ + ULP_BDE64_TYPE_BDE_64 = (0x00 << ULP_BDE64_TYPE_SHIFT), + /* Immediate Data BDE */ + ULP_BDE64_TYPE_BDE_IMMED = (0x01 << ULP_BDE64_TYPE_SHIFT), + /* BDE (Port-resident) */ + ULP_BDE64_TYPE_BDE_64P = (0x02 << ULP_BDE64_TYPE_SHIFT), + /* Input BDE (Host-resident) */ + ULP_BDE64_TYPE_BDE_64I = (0x08 << ULP_BDE64_TYPE_SHIFT), + /* Input BDE (Port-resident) */ + ULP_BDE64_TYPE_BDE_64IP = (0x0A << ULP_BDE64_TYPE_SHIFT), + /* BLP (Host-resident) */ + ULP_BDE64_TYPE_BLP_64 = (0x40 << ULP_BDE64_TYPE_SHIFT), + /* BLP (Port-resident) */ + ULP_BDE64_TYPE_BLP_64P = (0x42 << ULP_BDE64_TYPE_SHIFT), +}; + +struct ulp_bde64_le { + __le32 type_size; /* type 31:24, size 23:0 */ + __le32 addr_low; + __le32 addr_high; +}; + +struct ulp_bde64 { + union ULP_BDE_TUS { + uint32_t w; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ +#endif +#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ +#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ +#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ +#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ +#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ +#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ +#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ + } f; + } tus; + uint32_t addrLow; + uint32_t addrHigh; +}; + +/* Maximun size of immediate data that can fit into a 128 byte WQE */ +#define LPFC_MAX_BDE_IMM_SIZE 64 + +struct lpfc_sli4_flags { + uint32_t word0; +#define lpfc_idx_rsrc_rdy_SHIFT 0 +#define lpfc_idx_rsrc_rdy_MASK 0x00000001 +#define lpfc_idx_rsrc_rdy_WORD word0 +#define LPFC_IDX_RSRC_RDY 1 +#define lpfc_rpi_rsrc_rdy_SHIFT 1 +#define lpfc_rpi_rsrc_rdy_MASK 0x00000001 +#define lpfc_rpi_rsrc_rdy_WORD word0 +#define LPFC_RPI_RSRC_RDY 1 +#define lpfc_vpi_rsrc_rdy_SHIFT 2 +#define lpfc_vpi_rsrc_rdy_MASK 0x00000001 +#define lpfc_vpi_rsrc_rdy_WORD word0 +#define LPFC_VPI_RSRC_RDY 1 +#define lpfc_vfi_rsrc_rdy_SHIFT 3 +#define lpfc_vfi_rsrc_rdy_MASK 0x00000001 +#define lpfc_vfi_rsrc_rdy_WORD word0 +#define LPFC_VFI_RSRC_RDY 1 +#define lpfc_ftr_ashdr_SHIFT 4 +#define lpfc_ftr_ashdr_MASK 0x00000001 +#define lpfc_ftr_ashdr_WORD word0 +}; + +struct sli4_bls_rsp { + uint32_t word0_rsvd; /* Word0 must be reserved */ + uint32_t word1; +#define lpfc_abts_orig_SHIFT 0 +#define lpfc_abts_orig_MASK 0x00000001 +#define lpfc_abts_orig_WORD word1 +#define LPFC_ABTS_UNSOL_RSP 1 +#define LPFC_ABTS_UNSOL_INT 0 + uint32_t word2; +#define lpfc_abts_rxid_SHIFT 0 +#define lpfc_abts_rxid_MASK 0x0000FFFF +#define lpfc_abts_rxid_WORD word2 +#define lpfc_abts_oxid_SHIFT 16 +#define lpfc_abts_oxid_MASK 0x0000FFFF +#define lpfc_abts_oxid_WORD word2 + uint32_t word3; +#define lpfc_vndr_code_SHIFT 0 +#define lpfc_vndr_code_MASK 0x000000FF +#define lpfc_vndr_code_WORD word3 +#define lpfc_rsn_expln_SHIFT 8 +#define lpfc_rsn_expln_MASK 0x000000FF +#define lpfc_rsn_expln_WORD word3 +#define lpfc_rsn_code_SHIFT 16 +#define lpfc_rsn_code_MASK 0x000000FF +#define lpfc_rsn_code_WORD word3 + + uint32_t word4; + uint32_t word5_rsvd; /* Word5 must be reserved */ +}; + +/* event queue entry structure */ +struct lpfc_eqe { + uint32_t word0; +#define lpfc_eqe_resource_id_SHIFT 16 +#define lpfc_eqe_resource_id_MASK 0x0000FFFF +#define lpfc_eqe_resource_id_WORD word0 +#define lpfc_eqe_minor_code_SHIFT 4 +#define lpfc_eqe_minor_code_MASK 0x00000FFF +#define lpfc_eqe_minor_code_WORD word0 +#define lpfc_eqe_major_code_SHIFT 1 +#define lpfc_eqe_major_code_MASK 0x00000007 +#define lpfc_eqe_major_code_WORD word0 +#define lpfc_eqe_valid_SHIFT 0 +#define lpfc_eqe_valid_MASK 0x00000001 +#define lpfc_eqe_valid_WORD word0 +}; + +/* completion queue entry structure (common fields for all cqe types) */ +struct lpfc_cqe { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t word3; +#define lpfc_cqe_valid_SHIFT 31 +#define lpfc_cqe_valid_MASK 0x00000001 +#define lpfc_cqe_valid_WORD word3 +#define lpfc_cqe_code_SHIFT 16 +#define lpfc_cqe_code_MASK 0x000000FF +#define lpfc_cqe_code_WORD word3 +}; + +/* Completion Queue Entry Status Codes */ +#define CQE_STATUS_SUCCESS 0x0 +#define CQE_STATUS_FCP_RSP_FAILURE 0x1 +#define CQE_STATUS_REMOTE_STOP 0x2 +#define CQE_STATUS_LOCAL_REJECT 0x3 +#define CQE_STATUS_NPORT_RJT 0x4 +#define CQE_STATUS_FABRIC_RJT 0x5 +#define CQE_STATUS_NPORT_BSY 0x6 +#define CQE_STATUS_FABRIC_BSY 0x7 +#define CQE_STATUS_INTERMED_RSP 0x8 +#define CQE_STATUS_LS_RJT 0x9 +#define CQE_STATUS_CMD_REJECT 0xb +#define CQE_STATUS_FCP_TGT_LENCHECK 0xc +#define CQE_STATUS_NEED_BUFF_ENTRY 0xf +#define CQE_STATUS_DI_ERROR 0x16 + +/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ +#define CQE_HW_STATUS_NO_ERR 0x0 +#define CQE_HW_STATUS_UNDERRUN 0x1 +#define CQE_HW_STATUS_OVERRUN 0x2 + +/* Completion Queue Entry Codes */ +#define CQE_CODE_COMPL_WQE 0x1 +#define CQE_CODE_RELEASE_WQE 0x2 +#define CQE_CODE_RECEIVE 0x4 +#define CQE_CODE_XRI_ABORTED 0x5 +#define CQE_CODE_RECEIVE_V1 0x9 +#define CQE_CODE_NVME_ERSP 0xd + +/* + * Define mask value for xri_aborted and wcqe completed CQE extended status. + * Currently, extended status is limited to 9 bits (0x0 -> 0x103) . + */ +#define WCQE_PARAM_MASK 0x1FF + +/* completion queue entry for wqe completions */ +struct lpfc_wcqe_complete { + uint32_t word0; +#define lpfc_wcqe_c_request_tag_SHIFT 16 +#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF +#define lpfc_wcqe_c_request_tag_WORD word0 +#define lpfc_wcqe_c_status_SHIFT 8 +#define lpfc_wcqe_c_status_MASK 0x000000FF +#define lpfc_wcqe_c_status_WORD word0 +#define lpfc_wcqe_c_hw_status_SHIFT 0 +#define lpfc_wcqe_c_hw_status_MASK 0x000000FF +#define lpfc_wcqe_c_hw_status_WORD word0 +#define lpfc_wcqe_c_ersp0_SHIFT 0 +#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF +#define lpfc_wcqe_c_ersp0_WORD word0 + uint32_t total_data_placed; +#define lpfc_wcqe_c_cmf_cg_SHIFT 31 +#define lpfc_wcqe_c_cmf_cg_MASK 0x00000001 +#define lpfc_wcqe_c_cmf_cg_WORD total_data_placed +#define lpfc_wcqe_c_cmf_bw_SHIFT 0 +#define lpfc_wcqe_c_cmf_bw_MASK 0x0FFFFFFF +#define lpfc_wcqe_c_cmf_bw_WORD total_data_placed + uint32_t parameter; +#define lpfc_wcqe_c_bg_edir_SHIFT 5 +#define lpfc_wcqe_c_bg_edir_MASK 0x00000001 +#define lpfc_wcqe_c_bg_edir_WORD parameter +#define lpfc_wcqe_c_bg_tdpv_SHIFT 3 +#define lpfc_wcqe_c_bg_tdpv_MASK 0x00000001 +#define lpfc_wcqe_c_bg_tdpv_WORD parameter +#define lpfc_wcqe_c_bg_re_SHIFT 2 +#define lpfc_wcqe_c_bg_re_MASK 0x00000001 +#define lpfc_wcqe_c_bg_re_WORD parameter +#define lpfc_wcqe_c_bg_ae_SHIFT 1 +#define lpfc_wcqe_c_bg_ae_MASK 0x00000001 +#define lpfc_wcqe_c_bg_ae_WORD parameter +#define lpfc_wcqe_c_bg_ge_SHIFT 0 +#define lpfc_wcqe_c_bg_ge_MASK 0x00000001 +#define lpfc_wcqe_c_bg_ge_WORD parameter + uint32_t word3; +#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_c_xb_SHIFT 28 +#define lpfc_wcqe_c_xb_MASK 0x00000001 +#define lpfc_wcqe_c_xb_WORD word3 +#define lpfc_wcqe_c_pv_SHIFT 27 +#define lpfc_wcqe_c_pv_MASK 0x00000001 +#define lpfc_wcqe_c_pv_WORD word3 +#define lpfc_wcqe_c_priority_SHIFT 24 +#define lpfc_wcqe_c_priority_MASK 0x00000007 +#define lpfc_wcqe_c_priority_WORD word3 +#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD +#define lpfc_wcqe_c_sqhead_SHIFT 0 +#define lpfc_wcqe_c_sqhead_MASK 0x0000FFFF +#define lpfc_wcqe_c_sqhead_WORD word3 +}; + +/* completion queue entry for wqe release */ +struct lpfc_wcqe_release { + uint32_t reserved0; + uint32_t reserved1; + uint32_t word2; +#define lpfc_wcqe_r_wq_id_SHIFT 16 +#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF +#define lpfc_wcqe_r_wq_id_WORD word2 +#define lpfc_wcqe_r_wqe_index_SHIFT 0 +#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF +#define lpfc_wcqe_r_wqe_index_WORD word2 + uint32_t word3; +#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD +}; + +struct sli4_wcqe_xri_aborted { + uint32_t word0; +#define lpfc_wcqe_xa_status_SHIFT 8 +#define lpfc_wcqe_xa_status_MASK 0x000000FF +#define lpfc_wcqe_xa_status_WORD word0 + uint32_t parameter; + uint32_t word2; +#define lpfc_wcqe_xa_remote_xid_SHIFT 16 +#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF +#define lpfc_wcqe_xa_remote_xid_WORD word2 +#define lpfc_wcqe_xa_xri_SHIFT 0 +#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF +#define lpfc_wcqe_xa_xri_WORD word2 + uint32_t word3; +#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_xa_ia_SHIFT 30 +#define lpfc_wcqe_xa_ia_MASK 0x00000001 +#define lpfc_wcqe_xa_ia_WORD word3 +#define CQE_XRI_ABORTED_IA_REMOTE 0 +#define CQE_XRI_ABORTED_IA_LOCAL 1 +#define lpfc_wcqe_xa_br_SHIFT 29 +#define lpfc_wcqe_xa_br_MASK 0x00000001 +#define lpfc_wcqe_xa_br_WORD word3 +#define CQE_XRI_ABORTED_BR_BA_ACC 0 +#define CQE_XRI_ABORTED_BR_BA_RJT 1 +#define lpfc_wcqe_xa_eo_SHIFT 28 +#define lpfc_wcqe_xa_eo_MASK 0x00000001 +#define lpfc_wcqe_xa_eo_WORD word3 +#define CQE_XRI_ABORTED_EO_REMOTE 0 +#define CQE_XRI_ABORTED_EO_LOCAL 1 +#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD +}; + +/* completion queue entry structure for rqe completion */ +struct lpfc_rcqe { + uint32_t word0; +#define lpfc_rcqe_iv_SHIFT 31 +#define lpfc_rcqe_iv_MASK 0x00000001 +#define lpfc_rcqe_iv_WORD word0 +#define lpfc_rcqe_status_SHIFT 8 +#define lpfc_rcqe_status_MASK 0x000000FF +#define lpfc_rcqe_status_WORD word0 +#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */ +#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ +#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ +#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ +#define FC_STATUS_RQ_DMA_FAILURE 0x14 /* DMA failure */ + uint32_t word1; +#define lpfc_rcqe_fcf_id_v1_SHIFT 0 +#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F +#define lpfc_rcqe_fcf_id_v1_WORD word1 + uint32_t word2; +#define lpfc_rcqe_length_SHIFT 16 +#define lpfc_rcqe_length_MASK 0x0000FFFF +#define lpfc_rcqe_length_WORD word2 +#define lpfc_rcqe_rq_id_SHIFT 6 +#define lpfc_rcqe_rq_id_MASK 0x000003FF +#define lpfc_rcqe_rq_id_WORD word2 +#define lpfc_rcqe_fcf_id_SHIFT 0 +#define lpfc_rcqe_fcf_id_MASK 0x0000003F +#define lpfc_rcqe_fcf_id_WORD word2 +#define lpfc_rcqe_rq_id_v1_SHIFT 0 +#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF +#define lpfc_rcqe_rq_id_v1_WORD word2 + uint32_t word3; +#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_rcqe_port_SHIFT 30 +#define lpfc_rcqe_port_MASK 0x00000001 +#define lpfc_rcqe_port_WORD word3 +#define lpfc_rcqe_hdr_length_SHIFT 24 +#define lpfc_rcqe_hdr_length_MASK 0x0000001F +#define lpfc_rcqe_hdr_length_WORD word3 +#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK +#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD +#define lpfc_rcqe_eof_SHIFT 8 +#define lpfc_rcqe_eof_MASK 0x000000FF +#define lpfc_rcqe_eof_WORD word3 +#define FCOE_EOFn 0x41 +#define FCOE_EOFt 0x42 +#define FCOE_EOFni 0x49 +#define FCOE_EOFa 0x50 +#define lpfc_rcqe_sof_SHIFT 0 +#define lpfc_rcqe_sof_MASK 0x000000FF +#define lpfc_rcqe_sof_WORD word3 +#define FCOE_SOFi2 0x2d +#define FCOE_SOFi3 0x2e +#define FCOE_SOFn2 0x35 +#define FCOE_SOFn3 0x36 +}; + +struct lpfc_rqe { + uint32_t address_hi; + uint32_t address_lo; +}; + +/* buffer descriptors */ +struct lpfc_bde4 { + uint32_t addr_hi; + uint32_t addr_lo; + uint32_t word2; +#define lpfc_bde4_last_SHIFT 31 +#define lpfc_bde4_last_MASK 0x00000001 +#define lpfc_bde4_last_WORD word2 +#define lpfc_bde4_sge_offset_SHIFT 0 +#define lpfc_bde4_sge_offset_MASK 0x000003FF +#define lpfc_bde4_sge_offset_WORD word2 + uint32_t word3; +#define lpfc_bde4_length_SHIFT 0 +#define lpfc_bde4_length_MASK 0x000000FF +#define lpfc_bde4_length_WORD word3 +}; + +struct lpfc_register { + uint32_t word0; +}; + +#define LPFC_PORT_SEM_UE_RECOVERABLE 0xE000 +#define LPFC_PORT_SEM_MASK 0xF000 +/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */ +#define LPFC_UERR_STATUS_HI 0x00A4 +#define LPFC_UERR_STATUS_LO 0x00A0 +#define LPFC_UE_MASK_HI 0x00AC +#define LPFC_UE_MASK_LO 0x00A8 + +/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ +#define LPFC_SLI_INTF 0x0058 +#define LPFC_SLI_ASIC_VER 0x009C + +#define LPFC_CTL_PORT_SEM_OFFSET 0x400 +#define lpfc_port_smphr_perr_SHIFT 31 +#define lpfc_port_smphr_perr_MASK 0x1 +#define lpfc_port_smphr_perr_WORD word0 +#define lpfc_port_smphr_sfi_SHIFT 30 +#define lpfc_port_smphr_sfi_MASK 0x1 +#define lpfc_port_smphr_sfi_WORD word0 +#define lpfc_port_smphr_nip_SHIFT 29 +#define lpfc_port_smphr_nip_MASK 0x1 +#define lpfc_port_smphr_nip_WORD word0 +#define lpfc_port_smphr_ipc_SHIFT 28 +#define lpfc_port_smphr_ipc_MASK 0x1 +#define lpfc_port_smphr_ipc_WORD word0 +#define lpfc_port_smphr_scr1_SHIFT 27 +#define lpfc_port_smphr_scr1_MASK 0x1 +#define lpfc_port_smphr_scr1_WORD word0 +#define lpfc_port_smphr_scr2_SHIFT 26 +#define lpfc_port_smphr_scr2_MASK 0x1 +#define lpfc_port_smphr_scr2_WORD word0 +#define lpfc_port_smphr_host_scratch_SHIFT 16 +#define lpfc_port_smphr_host_scratch_MASK 0xFF +#define lpfc_port_smphr_host_scratch_WORD word0 +#define lpfc_port_smphr_port_status_SHIFT 0 +#define lpfc_port_smphr_port_status_MASK 0xFFFF +#define lpfc_port_smphr_port_status_WORD word0 + +#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 +#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 +#define LPFC_POST_STAGE_HOST_RDY 0x0002 +#define LPFC_POST_STAGE_BE_RESET 0x0003 +#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100 +#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101 +#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200 +#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201 +#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300 +#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301 +#define LPFC_POST_STAGE_DDR_TEST_START 0x0400 +#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401 +#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600 +#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701 +#define LPFC_POST_STAGE_ARMFW_START 0x0800 +#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900 +#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01 +#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00 +#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01 +#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02 +#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03 +#define LPFC_POST_STAGE_PARSE_XML 0x0B04 +#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05 +#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06 +#define LPFC_POST_STAGE_RC_DONE 0x0B07 +#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 +#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 +#define LPFC_POST_STAGE_PORT_READY 0xC000 +#define LPFC_POST_STAGE_PORT_UE 0xF000 + +#define LPFC_CTL_PORT_STA_OFFSET 0x404 +#define lpfc_sliport_status_err_SHIFT 31 +#define lpfc_sliport_status_err_MASK 0x1 +#define lpfc_sliport_status_err_WORD word0 +#define lpfc_sliport_status_end_SHIFT 30 +#define lpfc_sliport_status_end_MASK 0x1 +#define lpfc_sliport_status_end_WORD word0 +#define lpfc_sliport_status_oti_SHIFT 29 +#define lpfc_sliport_status_oti_MASK 0x1 +#define lpfc_sliport_status_oti_WORD word0 +#define lpfc_sliport_status_dip_SHIFT 25 +#define lpfc_sliport_status_dip_MASK 0x1 +#define lpfc_sliport_status_dip_WORD word0 +#define lpfc_sliport_status_rn_SHIFT 24 +#define lpfc_sliport_status_rn_MASK 0x1 +#define lpfc_sliport_status_rn_WORD word0 +#define lpfc_sliport_status_rdy_SHIFT 23 +#define lpfc_sliport_status_rdy_MASK 0x1 +#define lpfc_sliport_status_rdy_WORD word0 +#define lpfc_sliport_status_pldv_SHIFT 0 +#define lpfc_sliport_status_pldv_MASK 0x1 +#define lpfc_sliport_status_pldv_WORD word0 +#define CFG_PLD 0x3C +#define MAX_IF_TYPE_2_RESETS 6 + +#define LPFC_CTL_PORT_CTL_OFFSET 0x408 +#define lpfc_sliport_ctrl_end_SHIFT 30 +#define lpfc_sliport_ctrl_end_MASK 0x1 +#define lpfc_sliport_ctrl_end_WORD word0 +#define LPFC_SLIPORT_LITTLE_ENDIAN 0 +#define LPFC_SLIPORT_BIG_ENDIAN 1 +#define lpfc_sliport_ctrl_ip_SHIFT 27 +#define lpfc_sliport_ctrl_ip_MASK 0x1 +#define lpfc_sliport_ctrl_ip_WORD word0 +#define LPFC_SLIPORT_INIT_PORT 1 + +#define LPFC_CTL_PORT_ER1_OFFSET 0x40C +#define LPFC_CTL_PORT_ER2_OFFSET 0x410 + +#define LPFC_CTL_PORT_EQ_DELAY_OFFSET 0x418 +#define lpfc_sliport_eqdelay_delay_SHIFT 16 +#define lpfc_sliport_eqdelay_delay_MASK 0xffff +#define lpfc_sliport_eqdelay_delay_WORD word0 +#define lpfc_sliport_eqdelay_id_SHIFT 0 +#define lpfc_sliport_eqdelay_id_MASK 0xfff +#define lpfc_sliport_eqdelay_id_WORD word0 +#define LPFC_SEC_TO_USEC 1000000 +#define LPFC_SEC_TO_MSEC 1000 +#define LPFC_MSECS_TO_SECS(msecs) ((msecs) / 1000) + +/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically + * reside in BAR 2. + */ +#define LPFC_SLIPORT_IF0_SMPHR 0x00AC + +#define LPFC_IMR_MASK_ALL 0xFFFFFFFF +#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF + +#define LPFC_HST_ISR0 0x0C18 +#define LPFC_HST_ISR1 0x0C1C +#define LPFC_HST_ISR2 0x0C20 +#define LPFC_HST_ISR3 0x0C24 +#define LPFC_HST_ISR4 0x0C28 + +#define LPFC_HST_IMR0 0x0C48 +#define LPFC_HST_IMR1 0x0C4C +#define LPFC_HST_IMR2 0x0C50 +#define LPFC_HST_IMR3 0x0C54 +#define LPFC_HST_IMR4 0x0C58 + +#define LPFC_HST_ISCR0 0x0C78 +#define LPFC_HST_ISCR1 0x0C7C +#define LPFC_HST_ISCR2 0x0C80 +#define LPFC_HST_ISCR3 0x0C84 +#define LPFC_HST_ISCR4 0x0C88 + +#define LPFC_SLI4_INTR0 BIT0 +#define LPFC_SLI4_INTR1 BIT1 +#define LPFC_SLI4_INTR2 BIT2 +#define LPFC_SLI4_INTR3 BIT3 +#define LPFC_SLI4_INTR4 BIT4 +#define LPFC_SLI4_INTR5 BIT5 +#define LPFC_SLI4_INTR6 BIT6 +#define LPFC_SLI4_INTR7 BIT7 +#define LPFC_SLI4_INTR8 BIT8 +#define LPFC_SLI4_INTR9 BIT9 +#define LPFC_SLI4_INTR10 BIT10 +#define LPFC_SLI4_INTR11 BIT11 +#define LPFC_SLI4_INTR12 BIT12 +#define LPFC_SLI4_INTR13 BIT13 +#define LPFC_SLI4_INTR14 BIT14 +#define LPFC_SLI4_INTR15 BIT15 +#define LPFC_SLI4_INTR16 BIT16 +#define LPFC_SLI4_INTR17 BIT17 +#define LPFC_SLI4_INTR18 BIT18 +#define LPFC_SLI4_INTR19 BIT19 +#define LPFC_SLI4_INTR20 BIT20 +#define LPFC_SLI4_INTR21 BIT21 +#define LPFC_SLI4_INTR22 BIT22 +#define LPFC_SLI4_INTR23 BIT23 +#define LPFC_SLI4_INTR24 BIT24 +#define LPFC_SLI4_INTR25 BIT25 +#define LPFC_SLI4_INTR26 BIT26 +#define LPFC_SLI4_INTR27 BIT27 +#define LPFC_SLI4_INTR28 BIT28 +#define LPFC_SLI4_INTR29 BIT29 +#define LPFC_SLI4_INTR30 BIT30 +#define LPFC_SLI4_INTR31 BIT31 + +/* + * The Doorbell registers defined here exist in different BAR + * register sets depending on the UCNA Port's reported if_type + * value. For UCNA ports running SLI4 and if_type 0, they reside in + * BAR4. For UCNA ports running SLI4 and if_type 2, they reside in + * BAR0. For FC ports running SLI4 and if_type 6, they reside in + * BAR2. The offsets and base address are different, so the driver + * has to compute the register addresses accordingly + */ +#define LPFC_ULP0_RQ_DOORBELL 0x00A0 +#define LPFC_ULP1_RQ_DOORBELL 0x00C0 +#define LPFC_IF6_RQ_DOORBELL 0x0080 +#define lpfc_rq_db_list_fm_num_posted_SHIFT 24 +#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF +#define lpfc_rq_db_list_fm_num_posted_WORD word0 +#define lpfc_rq_db_list_fm_index_SHIFT 16 +#define lpfc_rq_db_list_fm_index_MASK 0x00FF +#define lpfc_rq_db_list_fm_index_WORD word0 +#define lpfc_rq_db_list_fm_id_SHIFT 0 +#define lpfc_rq_db_list_fm_id_MASK 0xFFFF +#define lpfc_rq_db_list_fm_id_WORD word0 +#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16 +#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF +#define lpfc_rq_db_ring_fm_num_posted_WORD word0 +#define lpfc_rq_db_ring_fm_id_SHIFT 0 +#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF +#define lpfc_rq_db_ring_fm_id_WORD word0 + +#define LPFC_ULP0_WQ_DOORBELL 0x0040 +#define LPFC_ULP1_WQ_DOORBELL 0x0060 +#define lpfc_wq_db_list_fm_num_posted_SHIFT 24 +#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF +#define lpfc_wq_db_list_fm_num_posted_WORD word0 +#define lpfc_wq_db_list_fm_index_SHIFT 16 +#define lpfc_wq_db_list_fm_index_MASK 0x00FF +#define lpfc_wq_db_list_fm_index_WORD word0 +#define lpfc_wq_db_list_fm_id_SHIFT 0 +#define lpfc_wq_db_list_fm_id_MASK 0xFFFF +#define lpfc_wq_db_list_fm_id_WORD word0 +#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16 +#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF +#define lpfc_wq_db_ring_fm_num_posted_WORD word0 +#define lpfc_wq_db_ring_fm_id_SHIFT 0 +#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF +#define lpfc_wq_db_ring_fm_id_WORD word0 + +#define LPFC_IF6_WQ_DOORBELL 0x0040 +#define lpfc_if6_wq_db_list_fm_num_posted_SHIFT 24 +#define lpfc_if6_wq_db_list_fm_num_posted_MASK 0x00FF +#define lpfc_if6_wq_db_list_fm_num_posted_WORD word0 +#define lpfc_if6_wq_db_list_fm_dpp_SHIFT 23 +#define lpfc_if6_wq_db_list_fm_dpp_MASK 0x0001 +#define lpfc_if6_wq_db_list_fm_dpp_WORD word0 +#define lpfc_if6_wq_db_list_fm_dpp_id_SHIFT 16 +#define lpfc_if6_wq_db_list_fm_dpp_id_MASK 0x001F +#define lpfc_if6_wq_db_list_fm_dpp_id_WORD word0 +#define lpfc_if6_wq_db_list_fm_id_SHIFT 0 +#define lpfc_if6_wq_db_list_fm_id_MASK 0xFFFF +#define lpfc_if6_wq_db_list_fm_id_WORD word0 + +#define LPFC_EQCQ_DOORBELL 0x0120 +#define lpfc_eqcq_doorbell_se_SHIFT 31 +#define lpfc_eqcq_doorbell_se_MASK 0x0001 +#define lpfc_eqcq_doorbell_se_WORD word0 +#define LPFC_EQCQ_SOLICIT_ENABLE_OFF 0 +#define LPFC_EQCQ_SOLICIT_ENABLE_ON 1 +#define lpfc_eqcq_doorbell_arm_SHIFT 29 +#define lpfc_eqcq_doorbell_arm_MASK 0x0001 +#define lpfc_eqcq_doorbell_arm_WORD word0 +#define lpfc_eqcq_doorbell_num_released_SHIFT 16 +#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF +#define lpfc_eqcq_doorbell_num_released_WORD word0 +#define lpfc_eqcq_doorbell_qt_SHIFT 10 +#define lpfc_eqcq_doorbell_qt_MASK 0x0001 +#define lpfc_eqcq_doorbell_qt_WORD word0 +#define LPFC_QUEUE_TYPE_COMPLETION 0 +#define LPFC_QUEUE_TYPE_EVENT 1 +#define lpfc_eqcq_doorbell_eqci_SHIFT 9 +#define lpfc_eqcq_doorbell_eqci_MASK 0x0001 +#define lpfc_eqcq_doorbell_eqci_WORD word0 +#define lpfc_eqcq_doorbell_cqid_lo_SHIFT 0 +#define lpfc_eqcq_doorbell_cqid_lo_MASK 0x03FF +#define lpfc_eqcq_doorbell_cqid_lo_WORD word0 +#define lpfc_eqcq_doorbell_cqid_hi_SHIFT 11 +#define lpfc_eqcq_doorbell_cqid_hi_MASK 0x001F +#define lpfc_eqcq_doorbell_cqid_hi_WORD word0 +#define lpfc_eqcq_doorbell_eqid_lo_SHIFT 0 +#define lpfc_eqcq_doorbell_eqid_lo_MASK 0x01FF +#define lpfc_eqcq_doorbell_eqid_lo_WORD word0 +#define lpfc_eqcq_doorbell_eqid_hi_SHIFT 11 +#define lpfc_eqcq_doorbell_eqid_hi_MASK 0x001F +#define lpfc_eqcq_doorbell_eqid_hi_WORD word0 +#define LPFC_CQID_HI_FIELD_SHIFT 10 +#define LPFC_EQID_HI_FIELD_SHIFT 9 + +#define LPFC_IF6_CQ_DOORBELL 0x00C0 +#define lpfc_if6_cq_doorbell_se_SHIFT 31 +#define lpfc_if6_cq_doorbell_se_MASK 0x0001 +#define lpfc_if6_cq_doorbell_se_WORD word0 +#define LPFC_IF6_CQ_SOLICIT_ENABLE_OFF 0 +#define LPFC_IF6_CQ_SOLICIT_ENABLE_ON 1 +#define lpfc_if6_cq_doorbell_arm_SHIFT 29 +#define lpfc_if6_cq_doorbell_arm_MASK 0x0001 +#define lpfc_if6_cq_doorbell_arm_WORD word0 +#define lpfc_if6_cq_doorbell_num_released_SHIFT 16 +#define lpfc_if6_cq_doorbell_num_released_MASK 0x1FFF +#define lpfc_if6_cq_doorbell_num_released_WORD word0 +#define lpfc_if6_cq_doorbell_cqid_SHIFT 0 +#define lpfc_if6_cq_doorbell_cqid_MASK 0xFFFF +#define lpfc_if6_cq_doorbell_cqid_WORD word0 + +#define LPFC_IF6_EQ_DOORBELL 0x0120 +#define lpfc_if6_eq_doorbell_io_SHIFT 31 +#define lpfc_if6_eq_doorbell_io_MASK 0x0001 +#define lpfc_if6_eq_doorbell_io_WORD word0 +#define LPFC_IF6_EQ_INTR_OVERRIDE_OFF 0 +#define LPFC_IF6_EQ_INTR_OVERRIDE_ON 1 +#define lpfc_if6_eq_doorbell_arm_SHIFT 29 +#define lpfc_if6_eq_doorbell_arm_MASK 0x0001 +#define lpfc_if6_eq_doorbell_arm_WORD word0 +#define lpfc_if6_eq_doorbell_num_released_SHIFT 16 +#define lpfc_if6_eq_doorbell_num_released_MASK 0x1FFF +#define lpfc_if6_eq_doorbell_num_released_WORD word0 +#define lpfc_if6_eq_doorbell_eqid_SHIFT 0 +#define lpfc_if6_eq_doorbell_eqid_MASK 0x0FFF +#define lpfc_if6_eq_doorbell_eqid_WORD word0 + +#define LPFC_BMBX 0x0160 +#define lpfc_bmbx_addr_SHIFT 2 +#define lpfc_bmbx_addr_MASK 0x3FFFFFFF +#define lpfc_bmbx_addr_WORD word0 +#define lpfc_bmbx_hi_SHIFT 1 +#define lpfc_bmbx_hi_MASK 0x0001 +#define lpfc_bmbx_hi_WORD word0 +#define lpfc_bmbx_rdy_SHIFT 0 +#define lpfc_bmbx_rdy_MASK 0x0001 +#define lpfc_bmbx_rdy_WORD word0 + +#define LPFC_MQ_DOORBELL 0x0140 +#define LPFC_IF6_MQ_DOORBELL 0x0160 +#define lpfc_mq_doorbell_num_posted_SHIFT 16 +#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF +#define lpfc_mq_doorbell_num_posted_WORD word0 +#define lpfc_mq_doorbell_id_SHIFT 0 +#define lpfc_mq_doorbell_id_MASK 0xFFFF +#define lpfc_mq_doorbell_id_WORD word0 + +struct lpfc_sli4_cfg_mhdr { + uint32_t word1; +#define lpfc_mbox_hdr_emb_SHIFT 0 +#define lpfc_mbox_hdr_emb_MASK 0x00000001 +#define lpfc_mbox_hdr_emb_WORD word1 +#define lpfc_mbox_hdr_sge_cnt_SHIFT 3 +#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F +#define lpfc_mbox_hdr_sge_cnt_WORD word1 + uint32_t payload_length; + uint32_t tag_lo; + uint32_t tag_hi; + uint32_t reserved5; +}; + +union lpfc_sli4_cfg_shdr { + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_port_number_SHIFT 16 +#define lpfc_mbox_hdr_port_number_MASK 0x000000FF +#define lpfc_mbox_hdr_port_number_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t timeout; + uint32_t request_length; + uint32_t word9; +#define lpfc_mbox_hdr_version_SHIFT 0 +#define lpfc_mbox_hdr_version_MASK 0x000000FF +#define lpfc_mbox_hdr_version_WORD word9 +#define lpfc_mbox_hdr_pf_num_SHIFT 16 +#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF +#define lpfc_mbox_hdr_pf_num_WORD word9 +#define lpfc_mbox_hdr_vh_num_SHIFT 24 +#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF +#define lpfc_mbox_hdr_vh_num_WORD word9 +#define LPFC_Q_CREATE_VERSION_2 2 +#define LPFC_Q_CREATE_VERSION_1 1 +#define LPFC_Q_CREATE_VERSION_0 0 +#define LPFC_OPCODE_VERSION_0 0 +#define LPFC_OPCODE_VERSION_1 1 + } request; + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t word7; +#define lpfc_mbox_hdr_status_SHIFT 0 +#define lpfc_mbox_hdr_status_MASK 0x000000FF +#define lpfc_mbox_hdr_status_WORD word7 +#define lpfc_mbox_hdr_add_status_SHIFT 8 +#define lpfc_mbox_hdr_add_status_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_WORD word7 +#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2 +#define lpfc_mbox_hdr_add_status_2_SHIFT 16 +#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_2_WORD word7 +#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01 +#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02 + uint32_t response_length; + uint32_t actual_response_length; + } response; +}; + +/* Mailbox Header structures. + * struct mbox_header is defined for first generation SLI4_CFG mailbox + * calls deployed for BE-based ports. + * + * struct sli4_mbox_header is defined for second generation SLI4 + * ports that don't deploy the SLI4_CFG mechanism. + */ +struct mbox_header { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; + union lpfc_sli4_cfg_shdr cfg_shdr; +}; + +#define LPFC_EXTENT_LOCAL 0 +#define LPFC_TIMEOUT_DEFAULT 0 +#define LPFC_EXTENT_VERSION_DEFAULT 0 + +/* Subsystem Definitions */ +#define LPFC_MBOX_SUBSYSTEM_NA 0x0 +#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 +#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL 0xB +#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC + +/* Device Specific Definitions */ + +/* The HOST ENDIAN defines are in Big Endian format. */ +#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF +#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF + +/* Common Opcodes */ +#define LPFC_MBOX_OPCODE_NA 0x00 +#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C +#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D +#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 +#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 +#define LPFC_MBOX_OPCODE_NOP 0x21 +#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY 0x29 +#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 +#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 +#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 +#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A +#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D +#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E +#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43 +#define LPFC_MBOX_OPCODE_SET_BEACON_CONFIG 0x45 +#define LPFC_MBOX_OPCODE_GET_BEACON_CONFIG 0x46 +#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D +#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A +#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B +#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D +#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73 +#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74 +#define LPFC_MBOX_OPCODE_REG_CONGESTION_BUF 0x8E +#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A +#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B +#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C +#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D +#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 +#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES 0xA1 +#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 +#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5 +#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6 +#define LPFC_MBOX_OPCODE_SET_ACT_PROFILE 0xA8 +#define LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG 0xA9 +#define LPFC_MBOX_OPCODE_READ_OBJECT 0xAB +#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC +#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD +#define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE +#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 +#define LPFC_MBOX_OPCODE_SET_FEATURES 0xBF + +/* FCoE Opcodes */ +#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 +#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02 +#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03 +#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04 +#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05 +#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06 +#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08 +#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 +#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A +#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B +#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 +#define LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET 0x1D +#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21 +#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 +#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 +#define LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE 0x42 + +/* Low level Opcodes */ +#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37 + +/* Mailbox command structures */ +struct eq_context { + uint32_t word0; +#define lpfc_eq_context_size_SHIFT 31 +#define lpfc_eq_context_size_MASK 0x00000001 +#define lpfc_eq_context_size_WORD word0 +#define LPFC_EQE_SIZE_4 0x0 +#define LPFC_EQE_SIZE_16 0x1 +#define lpfc_eq_context_valid_SHIFT 29 +#define lpfc_eq_context_valid_MASK 0x00000001 +#define lpfc_eq_context_valid_WORD word0 +#define lpfc_eq_context_autovalid_SHIFT 28 +#define lpfc_eq_context_autovalid_MASK 0x00000001 +#define lpfc_eq_context_autovalid_WORD word0 + uint32_t word1; +#define lpfc_eq_context_count_SHIFT 26 +#define lpfc_eq_context_count_MASK 0x00000003 +#define lpfc_eq_context_count_WORD word1 +#define LPFC_EQ_CNT_256 0x0 +#define LPFC_EQ_CNT_512 0x1 +#define LPFC_EQ_CNT_1024 0x2 +#define LPFC_EQ_CNT_2048 0x3 +#define LPFC_EQ_CNT_4096 0x4 + uint32_t word2; +#define lpfc_eq_context_delay_multi_SHIFT 13 +#define lpfc_eq_context_delay_multi_MASK 0x000003FF +#define lpfc_eq_context_delay_multi_WORD word2 + uint32_t reserved3; +}; + +struct eq_delay_info { + uint32_t eq_id; + uint32_t phase; + uint32_t delay_multi; +}; +#define LPFC_MAX_EQ_DELAY_EQID_CNT 8 + +struct sgl_page_pairs { + uint32_t sgl_pg0_addr_lo; + uint32_t sgl_pg0_addr_hi; + uint32_t sgl_pg1_addr_lo; + uint32_t sgl_pg1_addr_hi; +}; + +struct lpfc_mbx_post_sgl_pages { + struct mbox_header header; + uint32_t word0; +#define lpfc_post_sgl_pages_xri_SHIFT 0 +#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xri_WORD word0 +#define lpfc_post_sgl_pages_xricnt_SHIFT 16 +#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xricnt_WORD word0 + struct sgl_page_pairs sgl_pg_pairs[1]; +}; + +/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */ +struct lpfc_mbx_post_uembed_sgl_page1 { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word0; + struct sgl_page_pairs sgl_pg_pairs; +}; + +struct lpfc_mbx_sge { + uint32_t pa_lo; + uint32_t pa_hi; + uint32_t length; +}; + +struct lpfc_mbx_host_buf { + uint32_t length; + uint32_t pa_lo; + uint32_t pa_hi; +}; + +struct lpfc_mbx_nembed_cmd { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; +#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 + struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +struct lpfc_mbx_nembed_sge_virt { + void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +#define LPFC_MBX_OBJECT_NAME_LEN_DW 26 +struct lpfc_mbx_read_object { /* Version 0 */ + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rd_object_rlen_SHIFT 0 +#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF +#define lpfc_mbx_rd_object_rlen_WORD word0 + uint32_t rd_object_offset; + __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; +#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ + uint32_t rd_object_cnt; + struct lpfc_mbx_host_buf rd_object_hbuf[4]; + } request; + struct { + uint32_t rd_object_actual_rlen; + uint32_t word1; +#define lpfc_mbx_rd_object_eof_SHIFT 31 +#define lpfc_mbx_rd_object_eof_MASK 0x1 +#define lpfc_mbx_rd_object_eof_WORD word1 + } response; + } u; +}; + +struct lpfc_mbx_eq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_num_pages_SHIFT 0 +#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_num_pages_WORD word0 + struct eq_context context; + struct dma_address page[LPFC_MAX_EQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_q_id_SHIFT 0 +#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_modify_eq_delay { + struct mbox_header header; + union { + struct { + uint32_t num_eq; + struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT]; + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_eq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_nop { + struct mbox_header header; + uint32_t context[2]; +}; + + + +struct lpfc_mbx_set_ras_fwlog { + struct mbox_header header; + union { + struct { + uint32_t word4; +#define lpfc_fwlog_enable_SHIFT 0 +#define lpfc_fwlog_enable_MASK 0x00000001 +#define lpfc_fwlog_enable_WORD word4 +#define lpfc_fwlog_loglvl_SHIFT 8 +#define lpfc_fwlog_loglvl_MASK 0x0000000F +#define lpfc_fwlog_loglvl_WORD word4 +#define lpfc_fwlog_ra_SHIFT 15 +#define lpfc_fwlog_ra_WORD 0x00000008 +#define lpfc_fwlog_buffcnt_SHIFT 16 +#define lpfc_fwlog_buffcnt_MASK 0x000000FF +#define lpfc_fwlog_buffcnt_WORD word4 +#define lpfc_fwlog_buffsz_SHIFT 24 +#define lpfc_fwlog_buffsz_MASK 0x000000FF +#define lpfc_fwlog_buffsz_WORD word4 + uint32_t word5; +#define lpfc_fwlog_acqe_SHIFT 0 +#define lpfc_fwlog_acqe_MASK 0x0000FFFF +#define lpfc_fwlog_acqe_WORD word5 +#define lpfc_fwlog_cqid_SHIFT 16 +#define lpfc_fwlog_cqid_MASK 0x0000FFFF +#define lpfc_fwlog_cqid_WORD word5 +#define LPFC_MAX_FWLOG_PAGE 16 + struct dma_address lwpd; + struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE]; + } request; + struct { + uint32_t word0; + } response; + } u; +}; + + +struct cq_context { + uint32_t word0; +#define lpfc_cq_context_event_SHIFT 31 +#define lpfc_cq_context_event_MASK 0x00000001 +#define lpfc_cq_context_event_WORD word0 +#define lpfc_cq_context_valid_SHIFT 29 +#define lpfc_cq_context_valid_MASK 0x00000001 +#define lpfc_cq_context_valid_WORD word0 +#define lpfc_cq_context_count_SHIFT 27 +#define lpfc_cq_context_count_MASK 0x00000003 +#define lpfc_cq_context_count_WORD word0 +#define LPFC_CQ_CNT_256 0x0 +#define LPFC_CQ_CNT_512 0x1 +#define LPFC_CQ_CNT_1024 0x2 +#define LPFC_CQ_CNT_WORD7 0x3 +#define lpfc_cq_context_autovalid_SHIFT 15 +#define lpfc_cq_context_autovalid_MASK 0x00000001 +#define lpfc_cq_context_autovalid_WORD word0 + uint32_t word1; +#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ +#define lpfc_cq_eq_id_MASK 0x000000FF +#define lpfc_cq_eq_id_WORD word1 +#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ +#define lpfc_cq_eq_id_2_MASK 0x0000FFFF +#define lpfc_cq_eq_id_2_WORD word1 + uint32_t lpfc_cq_context_count; /* Version 2 Only */ + uint32_t reserved1; +}; + +struct lpfc_mbx_cq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_mbx_cq_create_page_size_MASK 0x000000FF +#define lpfc_mbx_cq_create_page_size_WORD word0 +#define lpfc_mbx_cq_create_num_pages_SHIFT 0 +#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_num_pages_WORD word0 + struct cq_context context; + struct dma_address page[LPFC_MAX_CQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_q_id_SHIFT 0 +#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_cq_create_set { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_set_page_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_mbx_cq_create_set_page_size_MASK 0x000000FF +#define lpfc_mbx_cq_create_set_page_size_WORD word0 +#define lpfc_mbx_cq_create_set_num_pages_SHIFT 0 +#define lpfc_mbx_cq_create_set_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_pages_WORD word0 + uint32_t word1; +#define lpfc_mbx_cq_create_set_evt_SHIFT 31 +#define lpfc_mbx_cq_create_set_evt_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_evt_WORD word1 +#define lpfc_mbx_cq_create_set_valid_SHIFT 29 +#define lpfc_mbx_cq_create_set_valid_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_valid_WORD word1 +#define lpfc_mbx_cq_create_set_cqe_cnt_SHIFT 27 +#define lpfc_mbx_cq_create_set_cqe_cnt_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_cqe_cnt_WORD word1 +#define lpfc_mbx_cq_create_set_cqe_size_SHIFT 25 +#define lpfc_mbx_cq_create_set_cqe_size_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_cqe_size_WORD word1 +#define lpfc_mbx_cq_create_set_autovalid_SHIFT 15 +#define lpfc_mbx_cq_create_set_autovalid_MASK 0x0000001 +#define lpfc_mbx_cq_create_set_autovalid_WORD word1 +#define lpfc_mbx_cq_create_set_nodelay_SHIFT 14 +#define lpfc_mbx_cq_create_set_nodelay_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_nodelay_WORD word1 +#define lpfc_mbx_cq_create_set_clswm_SHIFT 12 +#define lpfc_mbx_cq_create_set_clswm_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_clswm_WORD word1 + uint32_t word2; +#define lpfc_mbx_cq_create_set_arm_SHIFT 31 +#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_arm_WORD word2 +#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16 +#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF +#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2 +#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0 +#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_cq_WORD word2 + uint32_t word3; +#define lpfc_mbx_cq_create_set_eq_id1_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id1_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id1_WORD word3 +#define lpfc_mbx_cq_create_set_eq_id0_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id0_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id0_WORD word3 + uint32_t word4; +#define lpfc_mbx_cq_create_set_eq_id3_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id3_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id3_WORD word4 +#define lpfc_mbx_cq_create_set_eq_id2_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id2_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id2_WORD word4 + uint32_t word5; +#define lpfc_mbx_cq_create_set_eq_id5_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id5_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id5_WORD word5 +#define lpfc_mbx_cq_create_set_eq_id4_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id4_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id4_WORD word5 + uint32_t word6; +#define lpfc_mbx_cq_create_set_eq_id7_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id7_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id7_WORD word6 +#define lpfc_mbx_cq_create_set_eq_id6_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id6_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id6_WORD word6 + uint32_t word7; +#define lpfc_mbx_cq_create_set_eq_id9_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id9_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id9_WORD word7 +#define lpfc_mbx_cq_create_set_eq_id8_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id8_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id8_WORD word7 + uint32_t word8; +#define lpfc_mbx_cq_create_set_eq_id11_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id11_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id11_WORD word8 +#define lpfc_mbx_cq_create_set_eq_id10_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id10_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id10_WORD word8 + uint32_t word9; +#define lpfc_mbx_cq_create_set_eq_id13_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id13_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id13_WORD word9 +#define lpfc_mbx_cq_create_set_eq_id12_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id12_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id12_WORD word9 + uint32_t word10; +#define lpfc_mbx_cq_create_set_eq_id15_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id15_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id15_WORD word10 +#define lpfc_mbx_cq_create_set_eq_id14_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id14_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id14_WORD word10 + struct dma_address page[1]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_set_num_alloc_SHIFT 16 +#define lpfc_mbx_cq_create_set_num_alloc_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_alloc_WORD word0 +#define lpfc_mbx_cq_create_set_base_id_SHIFT 0 +#define lpfc_mbx_cq_create_set_base_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_base_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_cq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct wq_context { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_wq_create { + struct mbox_header header; + union { + struct { /* Version 0 Request */ + uint32_t word0; +#define lpfc_mbx_wq_create_num_pages_SHIFT 0 +#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF +#define lpfc_mbx_wq_create_num_pages_WORD word0 +#define lpfc_mbx_wq_create_dua_SHIFT 8 +#define lpfc_mbx_wq_create_dua_MASK 0x00000001 +#define lpfc_mbx_wq_create_dua_WORD word0 +#define lpfc_mbx_wq_create_cq_id_SHIFT 16 +#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_cq_id_WORD word0 + struct dma_address page[LPFC_MAX_WQ_PAGE_V0]; + uint32_t word9; +#define lpfc_mbx_wq_create_bua_SHIFT 0 +#define lpfc_mbx_wq_create_bua_MASK 0x00000001 +#define lpfc_mbx_wq_create_bua_WORD word9 +#define lpfc_mbx_wq_create_ulp_num_SHIFT 8 +#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF +#define lpfc_mbx_wq_create_ulp_num_WORD word9 + } request; + struct { /* Version 1 Request */ + uint32_t word0; /* Word 0 is the same as in v0 */ + uint32_t word1; +#define lpfc_mbx_wq_create_page_size_SHIFT 0 +#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF +#define lpfc_mbx_wq_create_page_size_WORD word1 +#define LPFC_WQ_PAGE_SIZE_4096 0x1 +#define lpfc_mbx_wq_create_dpp_req_SHIFT 15 +#define lpfc_mbx_wq_create_dpp_req_MASK 0x00000001 +#define lpfc_mbx_wq_create_dpp_req_WORD word1 +#define lpfc_mbx_wq_create_doe_SHIFT 14 +#define lpfc_mbx_wq_create_doe_MASK 0x00000001 +#define lpfc_mbx_wq_create_doe_WORD word1 +#define lpfc_mbx_wq_create_toe_SHIFT 13 +#define lpfc_mbx_wq_create_toe_MASK 0x00000001 +#define lpfc_mbx_wq_create_toe_WORD word1 +#define lpfc_mbx_wq_create_wqe_size_SHIFT 8 +#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F +#define lpfc_mbx_wq_create_wqe_size_WORD word1 +#define LPFC_WQ_WQE_SIZE_64 0x5 +#define LPFC_WQ_WQE_SIZE_128 0x6 +#define lpfc_mbx_wq_create_wqe_count_SHIFT 16 +#define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_wqe_count_WORD word1 + uint32_t word2; + struct dma_address page[LPFC_MAX_WQ_PAGE-1]; + } request_1; + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_q_id_SHIFT 0 +#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_q_id_WORD word0 + uint32_t doorbell_offset; + uint32_t word2; +#define lpfc_mbx_wq_create_bar_set_SHIFT 0 +#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_bar_set_WORD word2 +#define WQ_PCI_BAR_0_AND_1 0x00 +#define WQ_PCI_BAR_2_AND_3 0x01 +#define WQ_PCI_BAR_4_AND_5 0x02 +#define lpfc_mbx_wq_create_db_format_SHIFT 16 +#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_db_format_WORD word2 + } response; + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_dpp_rsp_SHIFT 31 +#define lpfc_mbx_wq_create_dpp_rsp_MASK 0x00000001 +#define lpfc_mbx_wq_create_dpp_rsp_WORD word0 +#define lpfc_mbx_wq_create_v1_q_id_SHIFT 0 +#define lpfc_mbx_wq_create_v1_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_v1_q_id_WORD word0 + uint32_t word1; +#define lpfc_mbx_wq_create_v1_bar_set_SHIFT 0 +#define lpfc_mbx_wq_create_v1_bar_set_MASK 0x0000000F +#define lpfc_mbx_wq_create_v1_bar_set_WORD word1 + uint32_t doorbell_offset; + uint32_t word3; +#define lpfc_mbx_wq_create_dpp_id_SHIFT 16 +#define lpfc_mbx_wq_create_dpp_id_MASK 0x0000001F +#define lpfc_mbx_wq_create_dpp_id_WORD word3 +#define lpfc_mbx_wq_create_dpp_bar_SHIFT 0 +#define lpfc_mbx_wq_create_dpp_bar_MASK 0x0000000F +#define lpfc_mbx_wq_create_dpp_bar_WORD word3 + uint32_t dpp_offset; + } response_1; + } u; +}; + +struct lpfc_mbx_wq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +#define LPFC_HDR_BUF_SIZE 128 +#define LPFC_DATA_BUF_SIZE 2048 +#define LPFC_NVMET_DATA_BUF_SIZE 128 +struct rq_context { + uint32_t word0; +#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ +#define lpfc_rq_context_rqe_count_MASK 0x0000000F +#define lpfc_rq_context_rqe_count_WORD word0 +#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ +#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ +#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ +#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ +#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1-2 Only */ +#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF +#define lpfc_rq_context_rqe_count_1_WORD word0 +#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1-2 Only */ +#define lpfc_rq_context_rqe_size_MASK 0x0000000F +#define lpfc_rq_context_rqe_size_WORD word0 +#define LPFC_RQE_SIZE_8 2 +#define LPFC_RQE_SIZE_16 3 +#define LPFC_RQE_SIZE_32 4 +#define LPFC_RQE_SIZE_64 5 +#define LPFC_RQE_SIZE_128 6 +#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ +#define lpfc_rq_context_page_size_MASK 0x000000FF +#define lpfc_rq_context_page_size_WORD word0 +#define LPFC_RQ_PAGE_SIZE_4096 0x1 + uint32_t word1; +#define lpfc_rq_context_data_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_rq_context_data_size_MASK 0x0000FFFF +#define lpfc_rq_context_data_size_WORD word1 +#define lpfc_rq_context_hdr_size_SHIFT 0 /* Version 2 Only */ +#define lpfc_rq_context_hdr_size_MASK 0x0000FFFF +#define lpfc_rq_context_hdr_size_WORD word1 + uint32_t word2; +#define lpfc_rq_context_cq_id_SHIFT 16 +#define lpfc_rq_context_cq_id_MASK 0x0000FFFF +#define lpfc_rq_context_cq_id_WORD word2 +#define lpfc_rq_context_buf_size_SHIFT 0 +#define lpfc_rq_context_buf_size_MASK 0x0000FFFF +#define lpfc_rq_context_buf_size_WORD word2 +#define lpfc_rq_context_base_cq_SHIFT 0 /* Version 2 Only */ +#define lpfc_rq_context_base_cq_MASK 0x0000FFFF +#define lpfc_rq_context_base_cq_WORD word2 + uint32_t buffer_size; /* Version 1 Only */ +}; + +struct lpfc_mbx_rq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_num_pages_SHIFT 0 +#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_num_pages_WORD word0 +#define lpfc_mbx_rq_create_dua_SHIFT 16 +#define lpfc_mbx_rq_create_dua_MASK 0x00000001 +#define lpfc_mbx_rq_create_dua_WORD word0 +#define lpfc_mbx_rq_create_bqu_SHIFT 17 +#define lpfc_mbx_rq_create_bqu_MASK 0x00000001 +#define lpfc_mbx_rq_create_bqu_WORD word0 +#define lpfc_mbx_rq_create_ulp_num_SHIFT 24 +#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF +#define lpfc_mbx_rq_create_ulp_num_WORD word0 + struct rq_context context; + struct dma_address page[LPFC_MAX_RQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16 +#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0 +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + uint32_t doorbell_offset; + uint32_t word2; +#define lpfc_mbx_rq_create_bar_set_SHIFT 0 +#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_bar_set_WORD word2 +#define lpfc_mbx_rq_create_db_format_SHIFT 16 +#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_db_format_WORD word2 + } response; + } u; +}; + +struct lpfc_mbx_rq_create_v2 { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_num_pages_SHIFT 0 +#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_num_pages_WORD word0 +#define lpfc_mbx_rq_create_rq_cnt_SHIFT 16 +#define lpfc_mbx_rq_create_rq_cnt_MASK 0x000000FF +#define lpfc_mbx_rq_create_rq_cnt_WORD word0 +#define lpfc_mbx_rq_create_dua_SHIFT 16 +#define lpfc_mbx_rq_create_dua_MASK 0x00000001 +#define lpfc_mbx_rq_create_dua_WORD word0 +#define lpfc_mbx_rq_create_bqu_SHIFT 17 +#define lpfc_mbx_rq_create_bqu_MASK 0x00000001 +#define lpfc_mbx_rq_create_bqu_WORD word0 +#define lpfc_mbx_rq_create_ulp_num_SHIFT 24 +#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF +#define lpfc_mbx_rq_create_ulp_num_WORD word0 +#define lpfc_mbx_rq_create_dim_SHIFT 29 +#define lpfc_mbx_rq_create_dim_MASK 0x00000001 +#define lpfc_mbx_rq_create_dim_WORD word0 +#define lpfc_mbx_rq_create_dfd_SHIFT 30 +#define lpfc_mbx_rq_create_dfd_MASK 0x00000001 +#define lpfc_mbx_rq_create_dfd_WORD word0 +#define lpfc_mbx_rq_create_dnb_SHIFT 31 +#define lpfc_mbx_rq_create_dnb_MASK 0x00000001 +#define lpfc_mbx_rq_create_dnb_WORD word0 + struct rq_context context; + struct dma_address page[1]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16 +#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0 +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + uint32_t doorbell_offset; + uint32_t word2; +#define lpfc_mbx_rq_create_bar_set_SHIFT 0 +#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_bar_set_WORD word2 +#define lpfc_mbx_rq_create_db_format_SHIFT 16 +#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_db_format_WORD word2 + } response; + } u; +}; + +struct lpfc_mbx_rq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct mq_context { + uint32_t word0; +#define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */ +#define lpfc_mq_context_cq_id_MASK 0x000003FF +#define lpfc_mq_context_cq_id_WORD word0 +#define lpfc_mq_context_ring_size_SHIFT 16 +#define lpfc_mq_context_ring_size_MASK 0x0000000F +#define lpfc_mq_context_ring_size_WORD word0 +#define LPFC_MQ_RING_SIZE_16 0x5 +#define LPFC_MQ_RING_SIZE_32 0x6 +#define LPFC_MQ_RING_SIZE_64 0x7 +#define LPFC_MQ_RING_SIZE_128 0x8 + uint32_t word1; +#define lpfc_mq_context_valid_SHIFT 31 +#define lpfc_mq_context_valid_MASK 0x00000001 +#define lpfc_mq_context_valid_WORD word1 + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_mq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_num_pages_WORD word0 + struct mq_context context; + struct dma_address page[LPFC_MAX_MQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_q_id_SHIFT 0 +#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_mq_create_ext { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_ext_num_pages_WORD word0 +#define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */ +#define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_ext_cq_id_WORD word0 + uint32_t async_evt_bmap; +#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK +#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap +#define LPFC_EVT_CODE_LINK_NO_LINK 0x0 +#define LPFC_EVT_CODE_LINK_10_MBIT 0x1 +#define LPFC_EVT_CODE_LINK_100_MBIT 0x2 +#define LPFC_EVT_CODE_LINK_1_GBIT 0x3 +#define LPFC_EVT_CODE_LINK_10_GBIT 0x4 +#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE +#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap +#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5 +#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap +#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC +#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap +#define LPFC_EVT_CODE_FC_NO_LINK 0x0 +#define LPFC_EVT_CODE_FC_1_GBAUD 0x1 +#define LPFC_EVT_CODE_FC_2_GBAUD 0x2 +#define LPFC_EVT_CODE_FC_4_GBAUD 0x4 +#define LPFC_EVT_CODE_FC_8_GBAUD 0x8 +#define LPFC_EVT_CODE_FC_10_GBAUD 0xA +#define LPFC_EVT_CODE_FC_16_GBAUD 0x10 +#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI +#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap + struct mq_context context; + struct dma_address page[LPFC_MAX_MQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_q_id_SHIFT 0 +#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_q_id_WORD word0 + } response; + } u; +#define LPFC_ASYNC_EVENT_LINK_STATE 0x2 +#define LPFC_ASYNC_EVENT_FCF_STATE 0x4 +#define LPFC_ASYNC_EVENT_GROUP5 0x20 +}; + +struct lpfc_mbx_mq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +/* Start Gen 2 SLI4 Mailbox definitions: */ + +/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */ +#define LPFC_RSC_TYPE_FCOE_VFI 0x20 +#define LPFC_RSC_TYPE_FCOE_VPI 0x21 +#define LPFC_RSC_TYPE_FCOE_RPI 0x22 +#define LPFC_RSC_TYPE_FCOE_XRI 0x23 + +struct lpfc_mbx_get_rsrc_extent_info { + struct mbox_header header; + union { + struct { + uint32_t word4; +#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0 +#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF +#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4 + } req; + struct { + uint32_t word4; +#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0 +#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF +#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4 +#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16 +#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF +#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4 + } rsp; + } u; +}; + +struct lpfc_mbx_query_fw_config { + struct mbox_header header; + struct { + uint32_t config_number; +#define LPFC_FC_FCOE 0x00000007 + uint32_t asic_revision; + uint32_t physical_port; + uint32_t function_mode; +#define LPFC_FCOE_INI_MODE 0x00000040 +#define LPFC_FCOE_TGT_MODE 0x00000080 +#define LPFC_DUA_MODE 0x00000800 + uint32_t ulp0_mode; +#define LPFC_ULP_FCOE_INIT_MODE 0x00000040 +#define LPFC_ULP_FCOE_TGT_MODE 0x00000080 + uint32_t ulp0_nap_words[12]; + uint32_t ulp1_mode; + uint32_t ulp1_nap_words[12]; + uint32_t function_capabilities; + uint32_t cqid_base; + uint32_t cqid_tot; + uint32_t eqid_base; + uint32_t eqid_tot; + uint32_t ulp0_nap2_words[2]; + uint32_t ulp1_nap2_words[2]; + } rsp; +}; + +struct lpfc_mbx_set_beacon_config { + struct mbox_header header; + uint32_t word4; +#define lpfc_mbx_set_beacon_port_num_SHIFT 0 +#define lpfc_mbx_set_beacon_port_num_MASK 0x0000003F +#define lpfc_mbx_set_beacon_port_num_WORD word4 +#define lpfc_mbx_set_beacon_port_type_SHIFT 6 +#define lpfc_mbx_set_beacon_port_type_MASK 0x00000003 +#define lpfc_mbx_set_beacon_port_type_WORD word4 +#define lpfc_mbx_set_beacon_state_SHIFT 8 +#define lpfc_mbx_set_beacon_state_MASK 0x000000FF +#define lpfc_mbx_set_beacon_state_WORD word4 +#define lpfc_mbx_set_beacon_duration_SHIFT 16 +#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF +#define lpfc_mbx_set_beacon_duration_WORD word4 + +/* COMMON_SET_BEACON_CONFIG_V1 */ +#define lpfc_mbx_set_beacon_duration_v1_SHIFT 16 +#define lpfc_mbx_set_beacon_duration_v1_MASK 0x0000FFFF +#define lpfc_mbx_set_beacon_duration_v1_WORD word4 + uint32_t word5; /* RESERVED */ +}; + +struct lpfc_id_range { + uint32_t word5; +#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0 +#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF +#define lpfc_mbx_rsrc_id_word4_0_WORD word5 +#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16 +#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF +#define lpfc_mbx_rsrc_id_word4_1_WORD word5 +}; + +struct lpfc_mbx_set_link_diag_state { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_set_diag_state_diag_SHIFT 0 +#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001 +#define lpfc_mbx_set_diag_state_diag_WORD word0 +#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT 2 +#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK 0x00000001 +#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD word0 +#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE 0 +#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE 1 +#define lpfc_mbx_set_diag_state_link_num_SHIFT 16 +#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F +#define lpfc_mbx_set_diag_state_link_num_WORD word0 +#define lpfc_mbx_set_diag_state_link_type_SHIFT 22 +#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003 +#define lpfc_mbx_set_diag_state_link_type_WORD word0 + } req; + struct { + uint32_t word0; + } rsp; + } u; +}; + +struct lpfc_mbx_set_link_diag_loopback { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0 +#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003 +#define lpfc_mbx_set_diag_lpbk_type_WORD word0 +#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0 +#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1 +#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2 +#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED 0x3 +#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16 +#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F +#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0 +#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22 +#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003 +#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0 + } req; + struct { + uint32_t word0; + } rsp; + } u; +}; + +struct lpfc_mbx_run_link_diag_test { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_run_diag_test_link_num_SHIFT 16 +#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F +#define lpfc_mbx_run_diag_test_link_num_WORD word0 +#define lpfc_mbx_run_diag_test_link_type_SHIFT 22 +#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003 +#define lpfc_mbx_run_diag_test_link_type_WORD word0 + uint32_t word1; +#define lpfc_mbx_run_diag_test_test_id_SHIFT 0 +#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF +#define lpfc_mbx_run_diag_test_test_id_WORD word1 +#define lpfc_mbx_run_diag_test_loops_SHIFT 16 +#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF +#define lpfc_mbx_run_diag_test_loops_WORD word1 + uint32_t word2; +#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0 +#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF +#define lpfc_mbx_run_diag_test_test_ver_WORD word2 +#define lpfc_mbx_run_diag_test_err_act_SHIFT 16 +#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF +#define lpfc_mbx_run_diag_test_err_act_WORD word2 + } req; + struct { + uint32_t word0; + } rsp; + } u; +}; + +/* + * struct lpfc_mbx_alloc_rsrc_extents: + * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires + * 6 words of header + 4 words of shared subcommand header + + * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total. + * + * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes + * for extents payload. + * + * 212/2 (bytes per extent) = 106 extents. + * 106/2 (extents per word) = 53 words. + * lpfc_id_range id is statically size to 53. + * + * This mailbox definition is used for ALLOC or GET_ALLOCATED + * extent ranges. For ALLOC, the type and cnt are required. + * For GET_ALLOCATED, only the type is required. + */ +struct lpfc_mbx_alloc_rsrc_extents { + struct mbox_header header; + union { + struct { + uint32_t word4; +#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0 +#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF +#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4 +#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16 +#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF +#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4 + } req; + struct { + uint32_t word4; +#define lpfc_mbx_rsrc_cnt_SHIFT 0 +#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF +#define lpfc_mbx_rsrc_cnt_WORD word4 + struct lpfc_id_range id[53]; + } rsp; + } u; +}; + +/* + * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this + * structure shares the same SHIFT/MASK/WORD defines provided in the + * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in + * the structures defined above. This non-embedded structure provides for the + * maximum number of extents supported by the port. + */ +struct lpfc_mbx_nembed_rsrc_extent { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word4; + struct lpfc_id_range id; +}; + +struct lpfc_mbx_dealloc_rsrc_extents { + struct mbox_header header; + struct { + uint32_t word4; +#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0 +#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF +#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4 + } req; + +}; + +/* Start SLI4 FCoE specific mbox structures. */ + +struct lpfc_mbx_post_hdr_tmpl { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0 +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10 + uint32_t rpi_paddr_lo; + uint32_t rpi_paddr_hi; +}; + +struct sli4_sge { /* SLI-4 */ + uint32_t addr_hi; + uint32_t addr_lo; + + uint32_t word2; +#define lpfc_sli4_sge_offset_SHIFT 0 +#define lpfc_sli4_sge_offset_MASK 0x07FFFFFF +#define lpfc_sli4_sge_offset_WORD word2 +#define lpfc_sli4_sge_type_SHIFT 27 +#define lpfc_sli4_sge_type_MASK 0x0000000F +#define lpfc_sli4_sge_type_WORD word2 +#define LPFC_SGE_TYPE_DATA 0x0 +#define LPFC_SGE_TYPE_DIF 0x4 +#define LPFC_SGE_TYPE_LSP 0x5 +#define LPFC_SGE_TYPE_PEDIF 0x6 +#define LPFC_SGE_TYPE_PESEED 0x7 +#define LPFC_SGE_TYPE_DISEED 0x8 +#define LPFC_SGE_TYPE_ENC 0x9 +#define LPFC_SGE_TYPE_ATM 0xA +#define LPFC_SGE_TYPE_SKIP 0xC +#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets it */ +#define lpfc_sli4_sge_last_MASK 0x00000001 +#define lpfc_sli4_sge_last_WORD word2 + uint32_t sge_len; +}; + +struct sli4_hybrid_sgl { + struct list_head list_node; + struct sli4_sge *dma_sgl; + dma_addr_t dma_phys_sgl; +}; + +struct fcp_cmd_rsp_buf { + struct list_head list_node; + + /* for storing cmd/rsp dma alloc'ed virt_addr */ + struct fcp_cmnd *fcp_cmnd; + struct fcp_rsp *fcp_rsp; + + /* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */ + dma_addr_t fcp_cmd_rsp_dma_handle; +}; + +struct sli4_sge_diseed { /* SLI-4 */ + uint32_t ref_tag; + uint32_t ref_tag_tran; + + uint32_t word2; +#define lpfc_sli4_sge_dif_apptran_SHIFT 0 +#define lpfc_sli4_sge_dif_apptran_MASK 0x0000FFFF +#define lpfc_sli4_sge_dif_apptran_WORD word2 +#define lpfc_sli4_sge_dif_af_SHIFT 24 +#define lpfc_sli4_sge_dif_af_MASK 0x00000001 +#define lpfc_sli4_sge_dif_af_WORD word2 +#define lpfc_sli4_sge_dif_na_SHIFT 25 +#define lpfc_sli4_sge_dif_na_MASK 0x00000001 +#define lpfc_sli4_sge_dif_na_WORD word2 +#define lpfc_sli4_sge_dif_hi_SHIFT 26 +#define lpfc_sli4_sge_dif_hi_MASK 0x00000001 +#define lpfc_sli4_sge_dif_hi_WORD word2 +#define lpfc_sli4_sge_dif_type_SHIFT 27 +#define lpfc_sli4_sge_dif_type_MASK 0x0000000F +#define lpfc_sli4_sge_dif_type_WORD word2 +#define lpfc_sli4_sge_dif_last_SHIFT 31 /* Last SEG in the SGL sets it */ +#define lpfc_sli4_sge_dif_last_MASK 0x00000001 +#define lpfc_sli4_sge_dif_last_WORD word2 + uint32_t word3; +#define lpfc_sli4_sge_dif_apptag_SHIFT 0 +#define lpfc_sli4_sge_dif_apptag_MASK 0x0000FFFF +#define lpfc_sli4_sge_dif_apptag_WORD word3 +#define lpfc_sli4_sge_dif_bs_SHIFT 16 +#define lpfc_sli4_sge_dif_bs_MASK 0x00000007 +#define lpfc_sli4_sge_dif_bs_WORD word3 +#define lpfc_sli4_sge_dif_ai_SHIFT 19 +#define lpfc_sli4_sge_dif_ai_MASK 0x00000001 +#define lpfc_sli4_sge_dif_ai_WORD word3 +#define lpfc_sli4_sge_dif_me_SHIFT 20 +#define lpfc_sli4_sge_dif_me_MASK 0x00000001 +#define lpfc_sli4_sge_dif_me_WORD word3 +#define lpfc_sli4_sge_dif_re_SHIFT 21 +#define lpfc_sli4_sge_dif_re_MASK 0x00000001 +#define lpfc_sli4_sge_dif_re_WORD word3 +#define lpfc_sli4_sge_dif_ce_SHIFT 22 +#define lpfc_sli4_sge_dif_ce_MASK 0x00000001 +#define lpfc_sli4_sge_dif_ce_WORD word3 +#define lpfc_sli4_sge_dif_nr_SHIFT 23 +#define lpfc_sli4_sge_dif_nr_MASK 0x00000001 +#define lpfc_sli4_sge_dif_nr_WORD word3 +#define lpfc_sli4_sge_dif_oprx_SHIFT 24 +#define lpfc_sli4_sge_dif_oprx_MASK 0x0000000F +#define lpfc_sli4_sge_dif_oprx_WORD word3 +#define lpfc_sli4_sge_dif_optx_SHIFT 28 +#define lpfc_sli4_sge_dif_optx_MASK 0x0000000F +#define lpfc_sli4_sge_dif_optx_WORD word3 +/* optx and oprx use BG_OP_IN defines in lpfc_hw.h */ +}; + +struct fcf_record { + uint32_t max_rcv_size; + uint32_t fka_adv_period; + uint32_t fip_priority; + uint32_t word3; +#define lpfc_fcf_record_mac_0_SHIFT 0 +#define lpfc_fcf_record_mac_0_MASK 0x000000FF +#define lpfc_fcf_record_mac_0_WORD word3 +#define lpfc_fcf_record_mac_1_SHIFT 8 +#define lpfc_fcf_record_mac_1_MASK 0x000000FF +#define lpfc_fcf_record_mac_1_WORD word3 +#define lpfc_fcf_record_mac_2_SHIFT 16 +#define lpfc_fcf_record_mac_2_MASK 0x000000FF +#define lpfc_fcf_record_mac_2_WORD word3 +#define lpfc_fcf_record_mac_3_SHIFT 24 +#define lpfc_fcf_record_mac_3_MASK 0x000000FF +#define lpfc_fcf_record_mac_3_WORD word3 + uint32_t word4; +#define lpfc_fcf_record_mac_4_SHIFT 0 +#define lpfc_fcf_record_mac_4_MASK 0x000000FF +#define lpfc_fcf_record_mac_4_WORD word4 +#define lpfc_fcf_record_mac_5_SHIFT 8 +#define lpfc_fcf_record_mac_5_MASK 0x000000FF +#define lpfc_fcf_record_mac_5_WORD word4 +#define lpfc_fcf_record_fcf_avail_SHIFT 16 +#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF +#define lpfc_fcf_record_fcf_avail_WORD word4 +#define lpfc_fcf_record_mac_addr_prov_SHIFT 24 +#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF +#define lpfc_fcf_record_mac_addr_prov_WORD word4 +#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */ +#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */ + uint32_t word5; +#define lpfc_fcf_record_fab_name_0_SHIFT 0 +#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_0_WORD word5 +#define lpfc_fcf_record_fab_name_1_SHIFT 8 +#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_1_WORD word5 +#define lpfc_fcf_record_fab_name_2_SHIFT 16 +#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_2_WORD word5 +#define lpfc_fcf_record_fab_name_3_SHIFT 24 +#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_3_WORD word5 + uint32_t word6; +#define lpfc_fcf_record_fab_name_4_SHIFT 0 +#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_4_WORD word6 +#define lpfc_fcf_record_fab_name_5_SHIFT 8 +#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_5_WORD word6 +#define lpfc_fcf_record_fab_name_6_SHIFT 16 +#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_6_WORD word6 +#define lpfc_fcf_record_fab_name_7_SHIFT 24 +#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_7_WORD word6 + uint32_t word7; +#define lpfc_fcf_record_fc_map_0_SHIFT 0 +#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_0_WORD word7 +#define lpfc_fcf_record_fc_map_1_SHIFT 8 +#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_1_WORD word7 +#define lpfc_fcf_record_fc_map_2_SHIFT 16 +#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_2_WORD word7 +#define lpfc_fcf_record_fcf_valid_SHIFT 24 +#define lpfc_fcf_record_fcf_valid_MASK 0x00000001 +#define lpfc_fcf_record_fcf_valid_WORD word7 +#define lpfc_fcf_record_fcf_fc_SHIFT 25 +#define lpfc_fcf_record_fcf_fc_MASK 0x00000001 +#define lpfc_fcf_record_fcf_fc_WORD word7 +#define lpfc_fcf_record_fcf_sol_SHIFT 31 +#define lpfc_fcf_record_fcf_sol_MASK 0x00000001 +#define lpfc_fcf_record_fcf_sol_WORD word7 + uint32_t word8; +#define lpfc_fcf_record_fcf_index_SHIFT 0 +#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_index_WORD word8 +#define lpfc_fcf_record_fcf_state_SHIFT 16 +#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_state_WORD word8 + uint8_t vlan_bitmap[512]; + uint32_t word137; +#define lpfc_fcf_record_switch_name_0_SHIFT 0 +#define lpfc_fcf_record_switch_name_0_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_0_WORD word137 +#define lpfc_fcf_record_switch_name_1_SHIFT 8 +#define lpfc_fcf_record_switch_name_1_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_1_WORD word137 +#define lpfc_fcf_record_switch_name_2_SHIFT 16 +#define lpfc_fcf_record_switch_name_2_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_2_WORD word137 +#define lpfc_fcf_record_switch_name_3_SHIFT 24 +#define lpfc_fcf_record_switch_name_3_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_3_WORD word137 + uint32_t word138; +#define lpfc_fcf_record_switch_name_4_SHIFT 0 +#define lpfc_fcf_record_switch_name_4_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_4_WORD word138 +#define lpfc_fcf_record_switch_name_5_SHIFT 8 +#define lpfc_fcf_record_switch_name_5_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_5_WORD word138 +#define lpfc_fcf_record_switch_name_6_SHIFT 16 +#define lpfc_fcf_record_switch_name_6_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_6_WORD word138 +#define lpfc_fcf_record_switch_name_7_SHIFT 24 +#define lpfc_fcf_record_switch_name_7_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_7_WORD word138 +}; + +struct lpfc_mbx_read_fcf_tbl { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word10; +#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_indx_WORD word10 + } request; + struct { + uint32_t eventag; + } response; + } u; + uint32_t word11; +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11 +}; + +struct lpfc_mbx_add_fcf_tbl_entry { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word10; +#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0 +#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF +#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10 + struct lpfc_mbx_sge fcf_sge; +}; + +struct lpfc_mbx_del_fcf_tbl_entry { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0 +#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_count_WORD word10 +#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16 +#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_index_WORD word10 +}; + +struct lpfc_mbx_redisc_fcf_tbl { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_redisc_fcf_count_SHIFT 0 +#define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF +#define lpfc_mbx_redisc_fcf_count_WORD word10 + uint32_t resvd; + uint32_t word12; +#define lpfc_mbx_redisc_fcf_index_SHIFT 0 +#define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF +#define lpfc_mbx_redisc_fcf_index_WORD word12 +}; + +/* Status field for embedded SLI_CONFIG mailbox command */ +#define STATUS_SUCCESS 0x0 +#define STATUS_FAILED 0x1 +#define STATUS_ILLEGAL_REQUEST 0x2 +#define STATUS_ILLEGAL_FIELD 0x3 +#define STATUS_INSUFFICIENT_BUFFER 0x4 +#define STATUS_UNAUTHORIZED_REQUEST 0x5 +#define STATUS_FLASHROM_SAVE_FAILED 0x17 +#define STATUS_FLASHROM_RESTORE_FAILED 0x18 +#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a +#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b +#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c +#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d +#define STATUS_ASSERT_FAILED 0x1e +#define STATUS_INVALID_SESSION 0x1f +#define STATUS_INVALID_CONNECTION 0x20 +#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21 +#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24 +#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25 +#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26 +#define STATUS_FLASHROM_READ_FAILED 0x27 +#define STATUS_POLL_IOCTL_TIMEOUT 0x28 +#define STATUS_ERROR_ACITMAIN 0x2a +#define STATUS_REBOOT_REQUIRED 0x2c +#define STATUS_FCF_IN_USE 0x3a +#define STATUS_FCF_TABLE_EMPTY 0x43 + +/* + * Additional status field for embedded SLI_CONFIG mailbox + * command. + */ +#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 +#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB +#define ADD_STATUS_INVALID_REQUEST 0x4B +#define ADD_STATUS_INVALID_OBJECT_NAME 0xA0 +#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58 + +struct lpfc_mbx_sli4_config { + struct mbox_header header; +}; + +struct lpfc_mbx_init_vfi { + uint32_t word1; +#define lpfc_init_vfi_vr_SHIFT 31 +#define lpfc_init_vfi_vr_MASK 0x00000001 +#define lpfc_init_vfi_vr_WORD word1 +#define lpfc_init_vfi_vt_SHIFT 30 +#define lpfc_init_vfi_vt_MASK 0x00000001 +#define lpfc_init_vfi_vt_WORD word1 +#define lpfc_init_vfi_vf_SHIFT 29 +#define lpfc_init_vfi_vf_MASK 0x00000001 +#define lpfc_init_vfi_vf_WORD word1 +#define lpfc_init_vfi_vp_SHIFT 28 +#define lpfc_init_vfi_vp_MASK 0x00000001 +#define lpfc_init_vfi_vp_WORD word1 +#define lpfc_init_vfi_vfi_SHIFT 0 +#define lpfc_init_vfi_vfi_MASK 0x0000FFFF +#define lpfc_init_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_init_vfi_vpi_SHIFT 16 +#define lpfc_init_vfi_vpi_MASK 0x0000FFFF +#define lpfc_init_vfi_vpi_WORD word2 +#define lpfc_init_vfi_fcfi_SHIFT 0 +#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_init_vfi_fcfi_WORD word2 + uint32_t word3; +#define lpfc_init_vfi_pri_SHIFT 13 +#define lpfc_init_vfi_pri_MASK 0x00000007 +#define lpfc_init_vfi_pri_WORD word3 +#define lpfc_init_vfi_vf_id_SHIFT 1 +#define lpfc_init_vfi_vf_id_MASK 0x00000FFF +#define lpfc_init_vfi_vf_id_WORD word3 + uint32_t word4; +#define lpfc_init_vfi_hop_count_SHIFT 24 +#define lpfc_init_vfi_hop_count_MASK 0x000000FF +#define lpfc_init_vfi_hop_count_WORD word4 +}; +#define MBX_VFI_IN_USE 0x9F02 + + +struct lpfc_mbx_reg_vfi { + uint32_t word1; +#define lpfc_reg_vfi_upd_SHIFT 29 +#define lpfc_reg_vfi_upd_MASK 0x00000001 +#define lpfc_reg_vfi_upd_WORD word1 +#define lpfc_reg_vfi_vp_SHIFT 28 +#define lpfc_reg_vfi_vp_MASK 0x00000001 +#define lpfc_reg_vfi_vp_WORD word1 +#define lpfc_reg_vfi_vfi_SHIFT 0 +#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_reg_vfi_vpi_SHIFT 16 +#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vpi_WORD word2 +#define lpfc_reg_vfi_fcfi_SHIFT 0 +#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_fcfi_WORD word2 + uint32_t wwn[2]; + struct ulp_bde64 bde; + uint32_t e_d_tov; + uint32_t r_a_tov; + uint32_t word10; +#define lpfc_reg_vfi_nport_id_SHIFT 0 +#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF +#define lpfc_reg_vfi_nport_id_WORD word10 +#define lpfc_reg_vfi_bbcr_SHIFT 27 +#define lpfc_reg_vfi_bbcr_MASK 0x00000001 +#define lpfc_reg_vfi_bbcr_WORD word10 +#define lpfc_reg_vfi_bbscn_SHIFT 28 +#define lpfc_reg_vfi_bbscn_MASK 0x0000000F +#define lpfc_reg_vfi_bbscn_WORD word10 +}; + +struct lpfc_mbx_init_vpi { + uint32_t word1; +#define lpfc_init_vpi_vfi_SHIFT 16 +#define lpfc_init_vpi_vfi_MASK 0x0000FFFF +#define lpfc_init_vpi_vfi_WORD word1 +#define lpfc_init_vpi_vpi_SHIFT 0 +#define lpfc_init_vpi_vpi_MASK 0x0000FFFF +#define lpfc_init_vpi_vpi_WORD word1 +}; + +struct lpfc_mbx_read_vpi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_mbx_read_vpi_vnportid_SHIFT 0 +#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF +#define lpfc_mbx_read_vpi_vnportid_WORD word2 + uint32_t word3_rsvd; + uint32_t word4; +#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0 +#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_acq_alpa_WORD word4 +#define lpfc_mbx_read_vpi_pb_SHIFT 15 +#define lpfc_mbx_read_vpi_pb_MASK 0x00000001 +#define lpfc_mbx_read_vpi_pb_WORD word4 +#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16 +#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_spec_alpa_WORD word4 +#define lpfc_mbx_read_vpi_ns_SHIFT 30 +#define lpfc_mbx_read_vpi_ns_MASK 0x00000001 +#define lpfc_mbx_read_vpi_ns_WORD word4 +#define lpfc_mbx_read_vpi_hl_SHIFT 31 +#define lpfc_mbx_read_vpi_hl_MASK 0x00000001 +#define lpfc_mbx_read_vpi_hl_WORD word4 + uint32_t word5_rsvd; + uint32_t word6; +#define lpfc_mbx_read_vpi_vpi_SHIFT 0 +#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF +#define lpfc_mbx_read_vpi_vpi_WORD word6 + uint32_t word7; +#define lpfc_mbx_read_vpi_mac_0_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_0_WORD word7 +#define lpfc_mbx_read_vpi_mac_1_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_1_WORD word7 +#define lpfc_mbx_read_vpi_mac_2_SHIFT 16 +#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_2_WORD word7 +#define lpfc_mbx_read_vpi_mac_3_SHIFT 24 +#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_3_WORD word7 + uint32_t word8; +#define lpfc_mbx_read_vpi_mac_4_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_4_WORD word8 +#define lpfc_mbx_read_vpi_mac_5_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_5_WORD word8 +#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16 +#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF +#define lpfc_mbx_read_vpi_vlan_tag_WORD word8 +#define lpfc_mbx_read_vpi_vv_SHIFT 28 +#define lpfc_mbx_read_vpi_vv_MASK 0x0000001 +#define lpfc_mbx_read_vpi_vv_WORD word8 +}; + +struct lpfc_mbx_unreg_vfi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_unreg_vfi_vfi_SHIFT 0 +#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_unreg_vfi_vfi_WORD word2 +}; + +struct lpfc_mbx_resume_rpi { + uint32_t word1; +#define lpfc_resume_rpi_index_SHIFT 0 +#define lpfc_resume_rpi_index_MASK 0x0000FFFF +#define lpfc_resume_rpi_index_WORD word1 +#define lpfc_resume_rpi_ii_SHIFT 30 +#define lpfc_resume_rpi_ii_MASK 0x00000003 +#define lpfc_resume_rpi_ii_WORD word1 +#define RESUME_INDEX_RPI 0 +#define RESUME_INDEX_VPI 1 +#define RESUME_INDEX_VFI 2 +#define RESUME_INDEX_FCFI 3 + uint32_t event_tag; +}; + +#define REG_FCF_INVALID_QID 0xFFFF +struct lpfc_mbx_reg_fcfi { + uint32_t word1; +#define lpfc_reg_fcfi_info_index_SHIFT 0 +#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF +#define lpfc_reg_fcfi_info_index_WORD word1 +#define lpfc_reg_fcfi_fcfi_SHIFT 16 +#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_fcfi_fcfi_WORD word1 + uint32_t word2; +#define lpfc_reg_fcfi_rq_id1_SHIFT 0 +#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id1_WORD word2 +#define lpfc_reg_fcfi_rq_id0_SHIFT 16 +#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id0_WORD word2 + uint32_t word3; +#define lpfc_reg_fcfi_rq_id3_SHIFT 0 +#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id3_WORD word3 +#define lpfc_reg_fcfi_rq_id2_SHIFT 16 +#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id2_WORD word3 + uint32_t word4; +#define lpfc_reg_fcfi_type_match0_SHIFT 24 +#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match0_WORD word4 +#define lpfc_reg_fcfi_type_mask0_SHIFT 16 +#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask0_WORD word4 +#define lpfc_reg_fcfi_rctl_match0_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match0_WORD word4 +#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask0_WORD word4 + uint32_t word5; +#define lpfc_reg_fcfi_type_match1_SHIFT 24 +#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match1_WORD word5 +#define lpfc_reg_fcfi_type_mask1_SHIFT 16 +#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask1_WORD word5 +#define lpfc_reg_fcfi_rctl_match1_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match1_WORD word5 +#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask1_WORD word5 + uint32_t word6; +#define lpfc_reg_fcfi_type_match2_SHIFT 24 +#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match2_WORD word6 +#define lpfc_reg_fcfi_type_mask2_SHIFT 16 +#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask2_WORD word6 +#define lpfc_reg_fcfi_rctl_match2_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match2_WORD word6 +#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask2_WORD word6 + uint32_t word7; +#define lpfc_reg_fcfi_type_match3_SHIFT 24 +#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match3_WORD word7 +#define lpfc_reg_fcfi_type_mask3_SHIFT 16 +#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask3_WORD word7 +#define lpfc_reg_fcfi_rctl_match3_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match3_WORD word7 +#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask3_WORD word7 + uint32_t word8; +#define lpfc_reg_fcfi_mam_SHIFT 13 +#define lpfc_reg_fcfi_mam_MASK 0x00000003 +#define lpfc_reg_fcfi_mam_WORD word8 +#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */ +#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */ +#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */ +#define lpfc_reg_fcfi_vv_SHIFT 12 +#define lpfc_reg_fcfi_vv_MASK 0x00000001 +#define lpfc_reg_fcfi_vv_WORD word8 +#define lpfc_reg_fcfi_vlan_tag_SHIFT 0 +#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF +#define lpfc_reg_fcfi_vlan_tag_WORD word8 +}; + +struct lpfc_mbx_reg_fcfi_mrq { + uint32_t word1; +#define lpfc_reg_fcfi_mrq_info_index_SHIFT 0 +#define lpfc_reg_fcfi_mrq_info_index_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_info_index_WORD word1 +#define lpfc_reg_fcfi_mrq_fcfi_SHIFT 16 +#define lpfc_reg_fcfi_mrq_fcfi_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_fcfi_WORD word1 + uint32_t word2; +#define lpfc_reg_fcfi_mrq_rq_id1_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rq_id1_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id1_WORD word2 +#define lpfc_reg_fcfi_mrq_rq_id0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_rq_id0_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id0_WORD word2 + uint32_t word3; +#define lpfc_reg_fcfi_mrq_rq_id3_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rq_id3_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id3_WORD word3 +#define lpfc_reg_fcfi_mrq_rq_id2_SHIFT 16 +#define lpfc_reg_fcfi_mrq_rq_id2_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id2_WORD word3 + uint32_t word4; +#define lpfc_reg_fcfi_mrq_type_match0_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match0_WORD word4 +#define lpfc_reg_fcfi_mrq_type_mask0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask0_WORD word4 +#define lpfc_reg_fcfi_mrq_rctl_match0_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match0_WORD word4 +#define lpfc_reg_fcfi_mrq_rctl_mask0_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask0_WORD word4 + uint32_t word5; +#define lpfc_reg_fcfi_mrq_type_match1_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match1_WORD word5 +#define lpfc_reg_fcfi_mrq_type_mask1_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask1_WORD word5 +#define lpfc_reg_fcfi_mrq_rctl_match1_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match1_WORD word5 +#define lpfc_reg_fcfi_mrq_rctl_mask1_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask1_WORD word5 + uint32_t word6; +#define lpfc_reg_fcfi_mrq_type_match2_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match2_WORD word6 +#define lpfc_reg_fcfi_mrq_type_mask2_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask2_WORD word6 +#define lpfc_reg_fcfi_mrq_rctl_match2_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match2_WORD word6 +#define lpfc_reg_fcfi_mrq_rctl_mask2_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask2_WORD word6 + uint32_t word7; +#define lpfc_reg_fcfi_mrq_type_match3_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match3_WORD word7 +#define lpfc_reg_fcfi_mrq_type_mask3_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask3_WORD word7 +#define lpfc_reg_fcfi_mrq_rctl_match3_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match3_WORD word7 +#define lpfc_reg_fcfi_mrq_rctl_mask3_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask3_WORD word7 + uint32_t word8; +#define lpfc_reg_fcfi_mrq_ptc7_SHIFT 31 +#define lpfc_reg_fcfi_mrq_ptc7_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc7_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc6_SHIFT 30 +#define lpfc_reg_fcfi_mrq_ptc6_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc6_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc5_SHIFT 29 +#define lpfc_reg_fcfi_mrq_ptc5_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc5_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc4_SHIFT 28 +#define lpfc_reg_fcfi_mrq_ptc4_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc4_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc3_SHIFT 27 +#define lpfc_reg_fcfi_mrq_ptc3_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc3_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc2_SHIFT 26 +#define lpfc_reg_fcfi_mrq_ptc2_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc2_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc1_SHIFT 25 +#define lpfc_reg_fcfi_mrq_ptc1_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc1_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc0_SHIFT 24 +#define lpfc_reg_fcfi_mrq_ptc0_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc0_WORD word8 +#define lpfc_reg_fcfi_mrq_pt7_SHIFT 23 +#define lpfc_reg_fcfi_mrq_pt7_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt7_WORD word8 +#define lpfc_reg_fcfi_mrq_pt6_SHIFT 22 +#define lpfc_reg_fcfi_mrq_pt6_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt6_WORD word8 +#define lpfc_reg_fcfi_mrq_pt5_SHIFT 21 +#define lpfc_reg_fcfi_mrq_pt5_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt5_WORD word8 +#define lpfc_reg_fcfi_mrq_pt4_SHIFT 20 +#define lpfc_reg_fcfi_mrq_pt4_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt4_WORD word8 +#define lpfc_reg_fcfi_mrq_pt3_SHIFT 19 +#define lpfc_reg_fcfi_mrq_pt3_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt3_WORD word8 +#define lpfc_reg_fcfi_mrq_pt2_SHIFT 18 +#define lpfc_reg_fcfi_mrq_pt2_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt2_WORD word8 +#define lpfc_reg_fcfi_mrq_pt1_SHIFT 17 +#define lpfc_reg_fcfi_mrq_pt1_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt1_WORD word8 +#define lpfc_reg_fcfi_mrq_pt0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_pt0_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt0_WORD word8 +#define lpfc_reg_fcfi_mrq_xmv_SHIFT 15 +#define lpfc_reg_fcfi_mrq_xmv_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_xmv_WORD word8 +#define lpfc_reg_fcfi_mrq_mode_SHIFT 13 +#define lpfc_reg_fcfi_mrq_mode_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_mode_WORD word8 +#define lpfc_reg_fcfi_mrq_vv_SHIFT 12 +#define lpfc_reg_fcfi_mrq_vv_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_vv_WORD word8 +#define lpfc_reg_fcfi_mrq_vlan_tag_SHIFT 0 +#define lpfc_reg_fcfi_mrq_vlan_tag_MASK 0x00000FFF +#define lpfc_reg_fcfi_mrq_vlan_tag_WORD word8 + uint32_t word9; +#define lpfc_reg_fcfi_mrq_policy_SHIFT 12 +#define lpfc_reg_fcfi_mrq_policy_MASK 0x0000000F +#define lpfc_reg_fcfi_mrq_policy_WORD word9 +#define lpfc_reg_fcfi_mrq_filter_SHIFT 8 +#define lpfc_reg_fcfi_mrq_filter_MASK 0x0000000F +#define lpfc_reg_fcfi_mrq_filter_WORD word9 +#define lpfc_reg_fcfi_mrq_npairs_SHIFT 0 +#define lpfc_reg_fcfi_mrq_npairs_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_npairs_WORD word9 + uint32_t word10; + uint32_t word11; + uint32_t word12; + uint32_t word13; + uint32_t word14; + uint32_t word15; + uint32_t word16; +}; + +struct lpfc_mbx_unreg_fcfi { + uint32_t word1_rsv; + uint32_t word2; +#define lpfc_unreg_fcfi_SHIFT 0 +#define lpfc_unreg_fcfi_MASK 0x0000FFFF +#define lpfc_unreg_fcfi_WORD word2 +}; + +struct lpfc_mbx_read_rev { + uint32_t word1; +#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16 +#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F +#define lpfc_mbx_rd_rev_sli_lvl_WORD word1 +#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 +#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 +#define lpfc_mbx_rd_rev_fcoe_WORD word1 +#define lpfc_mbx_rd_rev_cee_ver_SHIFT 21 +#define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003 +#define lpfc_mbx_rd_rev_cee_ver_WORD word1 +#define LPFC_PREDCBX_CEE_MODE 0 +#define LPFC_DCBX_CEE_MODE 1 +#define lpfc_mbx_rd_rev_vpd_SHIFT 29 +#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 +#define lpfc_mbx_rd_rev_vpd_WORD word1 + uint32_t first_hw_rev; +#define LPFC_G7_ASIC_1 0xd + uint32_t second_hw_rev; + uint32_t word4_rsvd; + uint32_t third_hw_rev; + uint32_t word6; +#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0 +#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_low_WORD word6 +#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8 +#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_high_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16 +#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24 +#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6 + uint32_t word7_rsvd; + uint32_t fw_id_rev; + uint8_t fw_name[16]; + uint32_t ulp_fw_id_rev; + uint8_t ulp_fw_name[16]; + uint32_t word18_47_rsvd[30]; + uint32_t word48; +#define lpfc_mbx_rd_rev_avail_len_SHIFT 0 +#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF +#define lpfc_mbx_rd_rev_avail_len_WORD word48 + uint32_t vpd_paddr_low; + uint32_t vpd_paddr_high; + uint32_t avail_vpd_len; + uint32_t rsvd_52_63[12]; +}; + +struct lpfc_mbx_read_config { + uint32_t word1; +#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31 +#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001 +#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1 +#define lpfc_mbx_rd_conf_fawwpn_SHIFT 30 +#define lpfc_mbx_rd_conf_fawwpn_MASK 0x00000001 +#define lpfc_mbx_rd_conf_fawwpn_WORD word1 +#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */ +#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001 +#define lpfc_mbx_rd_conf_wcs_WORD word1 +#define lpfc_mbx_rd_conf_acs_SHIFT 27 /* alarm signaling */ +#define lpfc_mbx_rd_conf_acs_MASK 0x00000001 +#define lpfc_mbx_rd_conf_acs_WORD word1 + uint32_t word2; +#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0 +#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F +#define lpfc_mbx_rd_conf_lnk_numb_WORD word2 +#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6 +#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003 +#define lpfc_mbx_rd_conf_lnk_type_WORD word2 +#define LPFC_LNK_TYPE_GE 0 +#define LPFC_LNK_TYPE_FC 1 +#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8 +#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001 +#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2 +#define lpfc_mbx_rd_conf_trunk_SHIFT 12 +#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F +#define lpfc_mbx_rd_conf_trunk_WORD word2 +#define lpfc_mbx_rd_conf_pt_SHIFT 20 +#define lpfc_mbx_rd_conf_pt_MASK 0x00000003 +#define lpfc_mbx_rd_conf_pt_WORD word2 +#define lpfc_mbx_rd_conf_tf_SHIFT 22 +#define lpfc_mbx_rd_conf_tf_MASK 0x00000001 +#define lpfc_mbx_rd_conf_tf_WORD word2 +#define lpfc_mbx_rd_conf_ptv_SHIFT 23 +#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001 +#define lpfc_mbx_rd_conf_ptv_WORD word2 +#define lpfc_mbx_rd_conf_topology_SHIFT 24 +#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF +#define lpfc_mbx_rd_conf_topology_WORD word2 + uint32_t rsvd_3; + uint32_t word4; +#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 + uint32_t rsvd_5; + uint32_t word6; +#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 +#define lpfc_mbx_rd_conf_link_speed_SHIFT 16 +#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_link_speed_WORD word6 + uint32_t rsvd_7; + uint32_t word8; +#define lpfc_mbx_rd_conf_bbscn_min_SHIFT 0 +#define lpfc_mbx_rd_conf_bbscn_min_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bbscn_min_WORD word8 +#define lpfc_mbx_rd_conf_bbscn_max_SHIFT 4 +#define lpfc_mbx_rd_conf_bbscn_max_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bbscn_max_WORD word8 +#define lpfc_mbx_rd_conf_bbscn_def_SHIFT 8 +#define lpfc_mbx_rd_conf_bbscn_def_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bbscn_def_WORD word8 + uint32_t word9; +#define lpfc_mbx_rd_conf_lmt_SHIFT 0 +#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_lmt_WORD word9 + uint32_t rsvd_10; + uint32_t rsvd_11; + uint32_t word12; +#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 +#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_base_WORD word12 +#define lpfc_mbx_rd_conf_xri_count_SHIFT 16 +#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_count_WORD word12 + uint32_t word13; +#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_base_WORD word13 +#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_count_WORD word13 + uint32_t word14; +#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_base_WORD word14 +#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_count_WORD word14 + uint32_t word15; +#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_base_WORD word15 +#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_count_WORD word15 + uint32_t word16; +#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 + uint32_t word17; +#define lpfc_mbx_rd_conf_rq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rq_count_WORD word17 +#define lpfc_mbx_rd_conf_eq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_eq_count_WORD word17 + uint32_t word18; +#define lpfc_mbx_rd_conf_wq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_wq_count_WORD word18 +#define lpfc_mbx_rd_conf_cq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_cq_count_WORD word18 +}; + +struct lpfc_mbx_request_features { + uint32_t word1; +#define lpfc_mbx_rq_ftr_qry_SHIFT 0 +#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_qry_WORD word1 + uint32_t word2; +#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2 +#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2 +#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_dif_WORD word2 +#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_vf_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2 +#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 +#define lpfc_mbx_rq_ftr_rq_iaar_SHIFT 9 +#define lpfc_mbx_rq_ftr_rq_iaar_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_iaar_WORD word2 +#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 +#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16 +#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2 +#define lpfc_mbx_rq_ftr_rq_ashdr_SHIFT 17 +#define lpfc_mbx_rq_ftr_rq_ashdr_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_ashdr_WORD word2 + uint32_t word3; +#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16 +#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_ashdr_SHIFT 17 +#define lpfc_mbx_rq_ftr_rsp_ashdr_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_ashdr_WORD word3 +}; + +struct lpfc_mbx_memory_dump_type3 { + uint32_t word1; +#define lpfc_mbx_memory_dump_type3_type_SHIFT 0 +#define lpfc_mbx_memory_dump_type3_type_MASK 0x0000000f +#define lpfc_mbx_memory_dump_type3_type_WORD word1 +#define lpfc_mbx_memory_dump_type3_link_SHIFT 24 +#define lpfc_mbx_memory_dump_type3_link_MASK 0x000000ff +#define lpfc_mbx_memory_dump_type3_link_WORD word1 + uint32_t word2; +#define lpfc_mbx_memory_dump_type3_page_no_SHIFT 0 +#define lpfc_mbx_memory_dump_type3_page_no_MASK 0x0000ffff +#define lpfc_mbx_memory_dump_type3_page_no_WORD word2 +#define lpfc_mbx_memory_dump_type3_offset_SHIFT 16 +#define lpfc_mbx_memory_dump_type3_offset_MASK 0x0000ffff +#define lpfc_mbx_memory_dump_type3_offset_WORD word2 + uint32_t word3; +#define lpfc_mbx_memory_dump_type3_length_SHIFT 0 +#define lpfc_mbx_memory_dump_type3_length_MASK 0x00ffffff +#define lpfc_mbx_memory_dump_type3_length_WORD word3 + uint32_t addr_lo; + uint32_t addr_hi; + uint32_t return_len; +}; + +#define DMP_PAGE_A0 0xa0 +#define DMP_PAGE_A2 0xa2 +#define DMP_SFF_PAGE_A0_SIZE 256 +#define DMP_SFF_PAGE_A2_SIZE 256 + +#define SFP_WAVELENGTH_LC1310 1310 +#define SFP_WAVELENGTH_LL1550 1550 + + +/* + * * SFF-8472 TABLE 3.4 + * */ +#define SFF_PG0_CONNECTOR_UNKNOWN 0x00 /* Unknown */ +#define SFF_PG0_CONNECTOR_SC 0x01 /* SC */ +#define SFF_PG0_CONNECTOR_FC_COPPER1 0x02 /* FC style 1 copper connector */ +#define SFF_PG0_CONNECTOR_FC_COPPER2 0x03 /* FC style 2 copper connector */ +#define SFF_PG0_CONNECTOR_BNC 0x04 /* BNC / TNC */ +#define SFF_PG0_CONNECTOR__FC_COAX 0x05 /* FC coaxial headers */ +#define SFF_PG0_CONNECTOR_FIBERJACK 0x06 /* FiberJack */ +#define SFF_PG0_CONNECTOR_LC 0x07 /* LC */ +#define SFF_PG0_CONNECTOR_MT 0x08 /* MT - RJ */ +#define SFF_PG0_CONNECTOR_MU 0x09 /* MU */ +#define SFF_PG0_CONNECTOR_SF 0x0A /* SG */ +#define SFF_PG0_CONNECTOR_OPTICAL_PIGTAIL 0x0B /* Optical pigtail */ +#define SFF_PG0_CONNECTOR_OPTICAL_PARALLEL 0x0C /* MPO Parallel Optic */ +#define SFF_PG0_CONNECTOR_HSSDC_II 0x20 /* HSSDC II */ +#define SFF_PG0_CONNECTOR_COPPER_PIGTAIL 0x21 /* Copper pigtail */ +#define SFF_PG0_CONNECTOR_RJ45 0x22 /* RJ45 */ + +/* SFF-8472 Table 3.1 Diagnostics: Data Fields Address/Page A0 */ + +#define SSF_IDENTIFIER 0 +#define SSF_EXT_IDENTIFIER 1 +#define SSF_CONNECTOR 2 +#define SSF_TRANSCEIVER_CODE_B0 3 +#define SSF_TRANSCEIVER_CODE_B1 4 +#define SSF_TRANSCEIVER_CODE_B2 5 +#define SSF_TRANSCEIVER_CODE_B3 6 +#define SSF_TRANSCEIVER_CODE_B4 7 +#define SSF_TRANSCEIVER_CODE_B5 8 +#define SSF_TRANSCEIVER_CODE_B6 9 +#define SSF_TRANSCEIVER_CODE_B7 10 +#define SSF_ENCODING 11 +#define SSF_BR_NOMINAL 12 +#define SSF_RATE_IDENTIFIER 13 +#define SSF_LENGTH_9UM_KM 14 +#define SSF_LENGTH_9UM 15 +#define SSF_LENGTH_50UM_OM2 16 +#define SSF_LENGTH_62UM_OM1 17 +#define SFF_LENGTH_COPPER 18 +#define SSF_LENGTH_50UM_OM3 19 +#define SSF_VENDOR_NAME 20 +#define SSF_TRANSCEIVER2 36 +#define SSF_VENDOR_OUI 37 +#define SSF_VENDOR_PN 40 +#define SSF_VENDOR_REV 56 +#define SSF_WAVELENGTH_B1 60 +#define SSF_WAVELENGTH_B0 61 +#define SSF_CC_BASE 63 +#define SSF_OPTIONS_B1 64 +#define SSF_OPTIONS_B0 65 +#define SSF_BR_MAX 66 +#define SSF_BR_MIN 67 +#define SSF_VENDOR_SN 68 +#define SSF_DATE_CODE 84 +#define SSF_MONITORING_TYPEDIAGNOSTIC 92 +#define SSF_ENHANCED_OPTIONS 93 +#define SFF_8472_COMPLIANCE 94 +#define SSF_CC_EXT 95 +#define SSF_A0_VENDOR_SPECIFIC 96 + +/* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */ + +#define SSF_TEMP_HIGH_ALARM 0 +#define SSF_TEMP_LOW_ALARM 2 +#define SSF_TEMP_HIGH_WARNING 4 +#define SSF_TEMP_LOW_WARNING 6 +#define SSF_VOLTAGE_HIGH_ALARM 8 +#define SSF_VOLTAGE_LOW_ALARM 10 +#define SSF_VOLTAGE_HIGH_WARNING 12 +#define SSF_VOLTAGE_LOW_WARNING 14 +#define SSF_BIAS_HIGH_ALARM 16 +#define SSF_BIAS_LOW_ALARM 18 +#define SSF_BIAS_HIGH_WARNING 20 +#define SSF_BIAS_LOW_WARNING 22 +#define SSF_TXPOWER_HIGH_ALARM 24 +#define SSF_TXPOWER_LOW_ALARM 26 +#define SSF_TXPOWER_HIGH_WARNING 28 +#define SSF_TXPOWER_LOW_WARNING 30 +#define SSF_RXPOWER_HIGH_ALARM 32 +#define SSF_RXPOWER_LOW_ALARM 34 +#define SSF_RXPOWER_HIGH_WARNING 36 +#define SSF_RXPOWER_LOW_WARNING 38 +#define SSF_EXT_CAL_CONSTANTS 56 +#define SSF_CC_DMI 95 +#define SFF_TEMPERATURE_B1 96 +#define SFF_TEMPERATURE_B0 97 +#define SFF_VCC_B1 98 +#define SFF_VCC_B0 99 +#define SFF_TX_BIAS_CURRENT_B1 100 +#define SFF_TX_BIAS_CURRENT_B0 101 +#define SFF_TXPOWER_B1 102 +#define SFF_TXPOWER_B0 103 +#define SFF_RXPOWER_B1 104 +#define SFF_RXPOWER_B0 105 +#define SSF_STATUS_CONTROL 110 +#define SSF_ALARM_FLAGS 112 +#define SSF_WARNING_FLAGS 116 +#define SSF_EXT_TATUS_CONTROL_B1 118 +#define SSF_EXT_TATUS_CONTROL_B0 119 +#define SSF_A2_VENDOR_SPECIFIC 120 +#define SSF_USER_EEPROM 128 +#define SSF_VENDOR_CONTROL 148 + + +/* + * Tranceiver codes Fibre Channel SFF-8472 + * Table 3.5. + */ + +struct sff_trasnceiver_codes_byte0 { + uint8_t inifiband:4; + uint8_t teng_ethernet:4; +}; + +struct sff_trasnceiver_codes_byte1 { + uint8_t sonet:6; + uint8_t escon:2; +}; + +struct sff_trasnceiver_codes_byte2 { + uint8_t soNet:8; +}; + +struct sff_trasnceiver_codes_byte3 { + uint8_t ethernet:8; +}; + +struct sff_trasnceiver_codes_byte4 { + uint8_t fc_el_lo:1; + uint8_t fc_lw_laser:1; + uint8_t fc_sw_laser:1; + uint8_t fc_md_distance:1; + uint8_t fc_lg_distance:1; + uint8_t fc_int_distance:1; + uint8_t fc_short_distance:1; + uint8_t fc_vld_distance:1; +}; + +struct sff_trasnceiver_codes_byte5 { + uint8_t reserved1:1; + uint8_t reserved2:1; + uint8_t fc_sfp_active:1; /* Active cable */ + uint8_t fc_sfp_passive:1; /* Passive cable */ + uint8_t fc_lw_laser:1; /* Longwave laser */ + uint8_t fc_sw_laser_sl:1; + uint8_t fc_sw_laser_sn:1; + uint8_t fc_el_hi:1; /* Electrical enclosure high bit */ +}; + +struct sff_trasnceiver_codes_byte6 { + uint8_t fc_tm_sm:1; /* Single Mode */ + uint8_t reserved:1; + uint8_t fc_tm_m6:1; /* Multimode, 62.5um (M6) */ + uint8_t fc_tm_tv:1; /* Video Coax (TV) */ + uint8_t fc_tm_mi:1; /* Miniature Coax (MI) */ + uint8_t fc_tm_tp:1; /* Twisted Pair (TP) */ + uint8_t fc_tm_tw:1; /* Twin Axial Pair */ +}; + +struct sff_trasnceiver_codes_byte7 { + uint8_t fc_sp_100MB:1; /* 100 MB/sec */ + uint8_t speed_chk_ecc:1; + uint8_t fc_sp_200mb:1; /* 200 MB/sec */ + uint8_t fc_sp_3200MB:1; /* 3200 MB/sec */ + uint8_t fc_sp_400MB:1; /* 400 MB/sec */ + uint8_t fc_sp_1600MB:1; /* 1600 MB/sec */ + uint8_t fc_sp_800MB:1; /* 800 MB/sec */ + uint8_t fc_sp_1200MB:1; /* 1200 MB/sec */ +}; + +/* User writable non-volatile memory, SFF-8472 Table 3.20 */ +struct user_eeprom { + uint8_t vendor_name[16]; + uint8_t vendor_oui[3]; + uint8_t vendor_pn[816]; + uint8_t vendor_rev[4]; + uint8_t vendor_sn[16]; + uint8_t datecode[6]; + uint8_t lot_code[2]; + uint8_t reserved191[57]; +}; + +#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \ + &(~((SLI4_PAGE_SIZE)-1))) + +struct lpfc_sli4_parameters { + uint32_t word0; +#define cfg_prot_type_SHIFT 0 +#define cfg_prot_type_MASK 0x000000FF +#define cfg_prot_type_WORD word0 + uint32_t word1; +#define cfg_ft_SHIFT 0 +#define cfg_ft_MASK 0x00000001 +#define cfg_ft_WORD word1 +#define cfg_sli_rev_SHIFT 4 +#define cfg_sli_rev_MASK 0x0000000f +#define cfg_sli_rev_WORD word1 +#define cfg_sli_family_SHIFT 8 +#define cfg_sli_family_MASK 0x0000000f +#define cfg_sli_family_WORD word1 +#define cfg_if_type_SHIFT 12 +#define cfg_if_type_MASK 0x0000000f +#define cfg_if_type_WORD word1 +#define cfg_sli_hint_1_SHIFT 16 +#define cfg_sli_hint_1_MASK 0x000000ff +#define cfg_sli_hint_1_WORD word1 +#define cfg_sli_hint_2_SHIFT 24 +#define cfg_sli_hint_2_MASK 0x0000001f +#define cfg_sli_hint_2_WORD word1 + uint32_t word2; +#define cfg_eqav_SHIFT 31 +#define cfg_eqav_MASK 0x00000001 +#define cfg_eqav_WORD word2 + uint32_t word3; + uint32_t word4; +#define cfg_cqv_SHIFT 14 +#define cfg_cqv_MASK 0x00000003 +#define cfg_cqv_WORD word4 +#define cfg_cqpsize_SHIFT 16 +#define cfg_cqpsize_MASK 0x000000ff +#define cfg_cqpsize_WORD word4 +#define cfg_cqav_SHIFT 31 +#define cfg_cqav_MASK 0x00000001 +#define cfg_cqav_WORD word4 + uint32_t word5; + uint32_t word6; +#define cfg_mqv_SHIFT 14 +#define cfg_mqv_MASK 0x00000003 +#define cfg_mqv_WORD word6 + uint32_t word7; + uint32_t word8; +#define cfg_wqpcnt_SHIFT 0 +#define cfg_wqpcnt_MASK 0x0000000f +#define cfg_wqpcnt_WORD word8 +#define cfg_wqsize_SHIFT 8 +#define cfg_wqsize_MASK 0x0000000f +#define cfg_wqsize_WORD word8 +#define cfg_wqv_SHIFT 14 +#define cfg_wqv_MASK 0x00000003 +#define cfg_wqv_WORD word8 +#define cfg_wqpsize_SHIFT 16 +#define cfg_wqpsize_MASK 0x000000ff +#define cfg_wqpsize_WORD word8 + uint32_t word9; + uint32_t word10; +#define cfg_rqv_SHIFT 14 +#define cfg_rqv_MASK 0x00000003 +#define cfg_rqv_WORD word10 + uint32_t word11; +#define cfg_rq_db_window_SHIFT 28 +#define cfg_rq_db_window_MASK 0x0000000f +#define cfg_rq_db_window_WORD word11 + uint32_t word12; +#define cfg_fcoe_SHIFT 0 +#define cfg_fcoe_MASK 0x00000001 +#define cfg_fcoe_WORD word12 +#define cfg_ext_SHIFT 1 +#define cfg_ext_MASK 0x00000001 +#define cfg_ext_WORD word12 +#define cfg_hdrr_SHIFT 2 +#define cfg_hdrr_MASK 0x00000001 +#define cfg_hdrr_WORD word12 +#define cfg_phwq_SHIFT 15 +#define cfg_phwq_MASK 0x00000001 +#define cfg_phwq_WORD word12 +#define cfg_oas_SHIFT 25 +#define cfg_oas_MASK 0x00000001 +#define cfg_oas_WORD word12 +#define cfg_loopbk_scope_SHIFT 28 +#define cfg_loopbk_scope_MASK 0x0000000f +#define cfg_loopbk_scope_WORD word12 + uint32_t sge_supp_len; + uint32_t word14; +#define cfg_sgl_page_cnt_SHIFT 0 +#define cfg_sgl_page_cnt_MASK 0x0000000f +#define cfg_sgl_page_cnt_WORD word14 +#define cfg_sgl_page_size_SHIFT 8 +#define cfg_sgl_page_size_MASK 0x000000ff +#define cfg_sgl_page_size_WORD word14 +#define cfg_sgl_pp_align_SHIFT 16 +#define cfg_sgl_pp_align_MASK 0x000000ff +#define cfg_sgl_pp_align_WORD word14 + uint32_t word15; + uint32_t word16; + uint32_t word17; + uint32_t word18; + uint32_t word19; +#define cfg_ext_embed_cb_SHIFT 0 +#define cfg_ext_embed_cb_MASK 0x00000001 +#define cfg_ext_embed_cb_WORD word19 +#define cfg_mds_diags_SHIFT 1 +#define cfg_mds_diags_MASK 0x00000001 +#define cfg_mds_diags_WORD word19 +#define cfg_nvme_SHIFT 3 +#define cfg_nvme_MASK 0x00000001 +#define cfg_nvme_WORD word19 +#define cfg_xib_SHIFT 4 +#define cfg_xib_MASK 0x00000001 +#define cfg_xib_WORD word19 +#define cfg_xpsgl_SHIFT 6 +#define cfg_xpsgl_MASK 0x00000001 +#define cfg_xpsgl_WORD word19 +#define cfg_eqdr_SHIFT 8 +#define cfg_eqdr_MASK 0x00000001 +#define cfg_eqdr_WORD word19 +#define cfg_nosr_SHIFT 9 +#define cfg_nosr_MASK 0x00000001 +#define cfg_nosr_WORD word19 +#define cfg_bv1s_SHIFT 10 +#define cfg_bv1s_MASK 0x00000001 +#define cfg_bv1s_WORD word19 + +#define cfg_nsler_SHIFT 12 +#define cfg_nsler_MASK 0x00000001 +#define cfg_nsler_WORD word19 +#define cfg_pvl_SHIFT 13 +#define cfg_pvl_MASK 0x00000001 +#define cfg_pvl_WORD word19 + +#define cfg_pbde_SHIFT 20 +#define cfg_pbde_MASK 0x00000001 +#define cfg_pbde_WORD word19 + + uint32_t word20; +#define cfg_max_tow_xri_SHIFT 0 +#define cfg_max_tow_xri_MASK 0x0000ffff +#define cfg_max_tow_xri_WORD word20 + + uint32_t word21; +#define cfg_mi_ver_SHIFT 0 +#define cfg_mi_ver_MASK 0x0000ffff +#define cfg_mi_ver_WORD word21 +#define cfg_cmf_SHIFT 24 +#define cfg_cmf_MASK 0x000000ff +#define cfg_cmf_WORD word21 + + uint32_t mib_size; + uint32_t word23; /* RESERVED */ + + uint32_t word24; +#define cfg_frag_field_offset_SHIFT 0 +#define cfg_frag_field_offset_MASK 0x0000ffff +#define cfg_frag_field_offset_WORD word24 + +#define cfg_frag_field_size_SHIFT 16 +#define cfg_frag_field_size_MASK 0x0000ffff +#define cfg_frag_field_size_WORD word24 + + uint32_t word25; +#define cfg_sgl_field_offset_SHIFT 0 +#define cfg_sgl_field_offset_MASK 0x0000ffff +#define cfg_sgl_field_offset_WORD word25 + +#define cfg_sgl_field_size_SHIFT 16 +#define cfg_sgl_field_size_MASK 0x0000ffff +#define cfg_sgl_field_size_WORD word25 + + uint32_t word26; /* Chain SGE initial value LOW */ + uint32_t word27; /* Chain SGE initial value HIGH */ +#define LPFC_NODELAY_MAX_IO 32 +}; + +#define LPFC_SET_UE_RECOVERY 0x10 +#define LPFC_SET_MDS_DIAGS 0x12 +#define LPFC_SET_DUAL_DUMP 0x1e +#define LPFC_SET_CGN_SIGNAL 0x1f +#define LPFC_SET_ENABLE_MI 0x21 +#define LPFC_SET_LD_SIGNAL 0x23 +#define LPFC_SET_ENABLE_CMF 0x24 +struct lpfc_mbx_set_feature { + struct mbox_header header; + uint32_t feature; + uint32_t param_len; + uint32_t word6; +#define lpfc_mbx_set_feature_UER_SHIFT 0 +#define lpfc_mbx_set_feature_UER_MASK 0x00000001 +#define lpfc_mbx_set_feature_UER_WORD word6 +#define lpfc_mbx_set_feature_mds_SHIFT 2 +#define lpfc_mbx_set_feature_mds_MASK 0x00000001 +#define lpfc_mbx_set_feature_mds_WORD word6 +#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 +#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001 +#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6 +#define lpfc_mbx_set_feature_CGN_warn_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_warn_freq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_CGN_warn_freq_WORD word6 +#define lpfc_mbx_set_feature_dd_SHIFT 0 +#define lpfc_mbx_set_feature_dd_MASK 0x00000001 +#define lpfc_mbx_set_feature_dd_WORD word6 +#define lpfc_mbx_set_feature_ddquery_SHIFT 1 +#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001 +#define lpfc_mbx_set_feature_ddquery_WORD word6 +#define LPFC_DISABLE_DUAL_DUMP 0 +#define LPFC_ENABLE_DUAL_DUMP 1 +#define LPFC_QUERY_OP_DUAL_DUMP 2 +#define lpfc_mbx_set_feature_cmf_SHIFT 0 +#define lpfc_mbx_set_feature_cmf_MASK 0x00000001 +#define lpfc_mbx_set_feature_cmf_WORD word6 +#define lpfc_mbx_set_feature_lds_qry_SHIFT 0 +#define lpfc_mbx_set_feature_lds_qry_MASK 0x00000001 +#define lpfc_mbx_set_feature_lds_qry_WORD word6 +#define LPFC_QUERY_LDS_OP 1 +#define lpfc_mbx_set_feature_mi_SHIFT 0 +#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff +#define lpfc_mbx_set_feature_mi_WORD word6 +#define lpfc_mbx_set_feature_milunq_SHIFT 16 +#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_milunq_WORD word6 + u32 word7; +#define lpfc_mbx_set_feature_UERP_SHIFT 0 +#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff +#define lpfc_mbx_set_feature_UERP_WORD word7 +#define lpfc_mbx_set_feature_UESR_SHIFT 16 +#define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff +#define lpfc_mbx_set_feature_UESR_WORD word7 +#define lpfc_mbx_set_feature_CGN_alarm_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_alarm_freq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_CGN_alarm_freq_WORD word7 + u32 word8; +#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff +#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8 + u32 word9; + u32 word10; +}; + + +#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2 +#define LPFC_SET_HOST_DATE_TIME 0x4 + +struct lpfc_mbx_set_host_date_time { + uint32_t word6; +#define lpfc_mbx_set_host_month_WORD word6 +#define lpfc_mbx_set_host_month_SHIFT 16 +#define lpfc_mbx_set_host_month_MASK 0xFF +#define lpfc_mbx_set_host_day_WORD word6 +#define lpfc_mbx_set_host_day_SHIFT 8 +#define lpfc_mbx_set_host_day_MASK 0xFF +#define lpfc_mbx_set_host_year_WORD word6 +#define lpfc_mbx_set_host_year_SHIFT 0 +#define lpfc_mbx_set_host_year_MASK 0xFF + uint32_t word7; +#define lpfc_mbx_set_host_hour_WORD word7 +#define lpfc_mbx_set_host_hour_SHIFT 16 +#define lpfc_mbx_set_host_hour_MASK 0xFF +#define lpfc_mbx_set_host_min_WORD word7 +#define lpfc_mbx_set_host_min_SHIFT 8 +#define lpfc_mbx_set_host_min_MASK 0xFF +#define lpfc_mbx_set_host_sec_WORD word7 +#define lpfc_mbx_set_host_sec_SHIFT 0 +#define lpfc_mbx_set_host_sec_MASK 0xFF +}; + +struct lpfc_mbx_set_host_data { +#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48 + struct mbox_header header; + uint32_t param_id; + uint32_t param_len; + union { + uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE]; + struct lpfc_mbx_set_host_date_time tm; + } un; +}; + +struct lpfc_mbx_set_trunk_mode { + struct mbox_header header; + uint32_t word0; +#define lpfc_mbx_set_trunk_mode_WORD word0 +#define lpfc_mbx_set_trunk_mode_SHIFT 0 +#define lpfc_mbx_set_trunk_mode_MASK 0xFF + uint32_t word1; + uint32_t word2; +}; + +struct lpfc_mbx_get_sli4_parameters { + struct mbox_header header; + struct lpfc_sli4_parameters sli4_parameters; +}; + +struct lpfc_mbx_reg_congestion_buf { + struct mbox_header header; + uint32_t word0; +#define lpfc_mbx_reg_cgn_buf_type_WORD word0 +#define lpfc_mbx_reg_cgn_buf_type_SHIFT 0 +#define lpfc_mbx_reg_cgn_buf_type_MASK 0xFF +#define lpfc_mbx_reg_cgn_buf_cnt_WORD word0 +#define lpfc_mbx_reg_cgn_buf_cnt_SHIFT 16 +#define lpfc_mbx_reg_cgn_buf_cnt_MASK 0xFF + uint32_t word1; + uint32_t length; + uint32_t addr_lo; + uint32_t addr_hi; +}; + +struct lpfc_rscr_desc_generic { +#define LPFC_RSRC_DESC_WSIZE 22 + uint32_t desc[LPFC_RSRC_DESC_WSIZE]; +}; + +struct lpfc_rsrc_desc_pcie { + uint32_t word0; +#define lpfc_rsrc_desc_pcie_type_SHIFT 0 +#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff +#define lpfc_rsrc_desc_pcie_type_WORD word0 +#define LPFC_RSRC_DESC_TYPE_PCIE 0x40 +#define lpfc_rsrc_desc_pcie_length_SHIFT 8 +#define lpfc_rsrc_desc_pcie_length_MASK 0x000000ff +#define lpfc_rsrc_desc_pcie_length_WORD word0 + uint32_t word1; +#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0 +#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff +#define lpfc_rsrc_desc_pcie_pfnum_WORD word1 + uint32_t reserved; + uint32_t word3; +#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0 +#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff +#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3 +#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8 +#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff +#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3 +#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16 +#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff +#define lpfc_rsrc_desc_pcie_pf_type_WORD word3 + uint32_t word4; +#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0 +#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff +#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4 +}; + +struct lpfc_rsrc_desc_fcfcoe { + uint32_t word0; +#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0 +#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff +#define lpfc_rsrc_desc_fcfcoe_type_WORD word0 +#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43 +#define lpfc_rsrc_desc_fcfcoe_length_SHIFT 8 +#define lpfc_rsrc_desc_fcfcoe_length_MASK 0x000000ff +#define lpfc_rsrc_desc_fcfcoe_length_WORD word0 +#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD 0 +#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH 72 +#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH 88 + uint32_t word1; +#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0 +#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff +#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1 +#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16 +#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff +#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1 + uint32_t word2; +#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0 +#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2 +#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16 +#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2 + uint32_t word3; +#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0 +#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3 +#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16 +#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3 + uint32_t word4; +#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0 +#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4 +#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16 +#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4 + uint32_t word5; +#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0 +#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5 +#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16 +#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5 + uint32_t word6; + uint32_t word7; + uint32_t word8; + uint32_t word9; + uint32_t word10; + uint32_t word11; + uint32_t word12; + uint32_t word13; +#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0 +#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f +#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13 +#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6 +#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003 +#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13 +#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8 +#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001 +#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13 +#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9 +#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001 +#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13 +#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16 +#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff +#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13 +/* extended FC/FCoE Resource Descriptor when length = 88 bytes */ + uint32_t bw_min; + uint32_t bw_max; + uint32_t iops_min; + uint32_t iops_max; + uint32_t reserved[4]; +}; + +struct lpfc_func_cfg { +#define LPFC_RSRC_DESC_MAX_NUM 2 + uint32_t rsrc_desc_count; + struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM]; +}; + +struct lpfc_mbx_get_func_cfg { + struct mbox_header header; +#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0 +#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1 +#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2 + struct lpfc_func_cfg func_cfg; +}; + +struct lpfc_prof_cfg { +#define LPFC_RSRC_DESC_MAX_NUM 2 + uint32_t rsrc_desc_count; + struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM]; +}; + +struct lpfc_mbx_get_prof_cfg { + struct mbox_header header; +#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0 +#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1 +#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2 + union { + struct { + uint32_t word10; +#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0 +#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff +#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10 +#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8 +#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003 +#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10 + } request; + struct { + struct lpfc_prof_cfg prof_cfg; + } response; + } u; +}; + +struct lpfc_controller_attribute { + uint32_t version_string[8]; + uint32_t manufacturer_name[8]; + uint32_t supported_modes; + uint32_t word17; +#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0 +#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff +#define lpfc_cntl_attr_eprom_ver_lo_WORD word17 +#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8 +#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff +#define lpfc_cntl_attr_eprom_ver_hi_WORD word17 +#define lpfc_cntl_attr_flash_id_SHIFT 16 +#define lpfc_cntl_attr_flash_id_MASK 0x000000ff +#define lpfc_cntl_attr_flash_id_WORD word17 + uint32_t mbx_da_struct_ver; + uint32_t ep_fw_da_struct_ver; + uint32_t ncsi_ver_str[3]; + uint32_t dflt_ext_timeout; + uint32_t model_number[8]; + uint32_t description[16]; + uint32_t serial_number[8]; + uint32_t ip_ver_str[8]; + uint32_t fw_ver_str[8]; + uint32_t bios_ver_str[8]; + uint32_t redboot_ver_str[8]; + uint32_t driver_ver_str[8]; + uint32_t flash_fw_ver_str[8]; + uint32_t functionality; + uint32_t word105; +#define lpfc_cntl_attr_max_cbd_len_SHIFT 0 +#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff +#define lpfc_cntl_attr_max_cbd_len_WORD word105 +#define lpfc_cntl_attr_asic_rev_SHIFT 16 +#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff +#define lpfc_cntl_attr_asic_rev_WORD word105 +#define lpfc_cntl_attr_gen_guid0_SHIFT 24 +#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff +#define lpfc_cntl_attr_gen_guid0_WORD word105 + uint32_t gen_guid1_12[3]; + uint32_t word109; +#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0 +#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff +#define lpfc_cntl_attr_gen_guid13_14_WORD word109 +#define lpfc_cntl_attr_gen_guid15_SHIFT 16 +#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff +#define lpfc_cntl_attr_gen_guid15_WORD word109 +#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24 +#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff +#define lpfc_cntl_attr_hba_port_cnt_WORD word109 + uint32_t word110; +#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0 +#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff +#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110 +#define lpfc_cntl_attr_multi_func_dev_SHIFT 24 +#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff +#define lpfc_cntl_attr_multi_func_dev_WORD word110 + uint32_t word111; +#define lpfc_cntl_attr_cache_valid_SHIFT 0 +#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff +#define lpfc_cntl_attr_cache_valid_WORD word111 +#define lpfc_cntl_attr_hba_status_SHIFT 8 +#define lpfc_cntl_attr_hba_status_MASK 0x000000ff +#define lpfc_cntl_attr_hba_status_WORD word111 +#define lpfc_cntl_attr_max_domain_SHIFT 16 +#define lpfc_cntl_attr_max_domain_MASK 0x000000ff +#define lpfc_cntl_attr_max_domain_WORD word111 +#define lpfc_cntl_attr_lnk_numb_SHIFT 24 +#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f +#define lpfc_cntl_attr_lnk_numb_WORD word111 +#define lpfc_cntl_attr_lnk_type_SHIFT 30 +#define lpfc_cntl_attr_lnk_type_MASK 0x00000003 +#define lpfc_cntl_attr_lnk_type_WORD word111 + uint32_t fw_post_status; + uint32_t hba_mtu[8]; + uint32_t word121; + uint32_t reserved1[3]; + uint32_t word125; +#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0 +#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_vendor_id_WORD word125 +#define lpfc_cntl_attr_pci_device_id_SHIFT 16 +#define lpfc_cntl_attr_pci_device_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_device_id_WORD word125 + uint32_t word126; +#define lpfc_cntl_attr_pci_subvdr_id_SHIFT 0 +#define lpfc_cntl_attr_pci_subvdr_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_subvdr_id_WORD word126 +#define lpfc_cntl_attr_pci_subsys_id_SHIFT 16 +#define lpfc_cntl_attr_pci_subsys_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_subsys_id_WORD word126 + uint32_t word127; +#define lpfc_cntl_attr_pci_bus_num_SHIFT 0 +#define lpfc_cntl_attr_pci_bus_num_MASK 0x000000ff +#define lpfc_cntl_attr_pci_bus_num_WORD word127 +#define lpfc_cntl_attr_pci_dev_num_SHIFT 8 +#define lpfc_cntl_attr_pci_dev_num_MASK 0x000000ff +#define lpfc_cntl_attr_pci_dev_num_WORD word127 +#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16 +#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff +#define lpfc_cntl_attr_pci_fnc_num_WORD word127 +#define lpfc_cntl_attr_inf_type_SHIFT 24 +#define lpfc_cntl_attr_inf_type_MASK 0x000000ff +#define lpfc_cntl_attr_inf_type_WORD word127 + uint32_t unique_id[2]; + uint32_t word130; +#define lpfc_cntl_attr_num_netfil_SHIFT 0 +#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff +#define lpfc_cntl_attr_num_netfil_WORD word130 + uint32_t reserved2[4]; +}; + +struct lpfc_mbx_get_cntl_attributes { + union lpfc_sli4_cfg_shdr cfg_shdr; + struct lpfc_controller_attribute cntl_attr; +}; + +struct lpfc_mbx_get_port_name { + struct mbox_header header; + union { + struct { + uint32_t word4; +#define lpfc_mbx_get_port_name_lnk_type_SHIFT 0 +#define lpfc_mbx_get_port_name_lnk_type_MASK 0x00000003 +#define lpfc_mbx_get_port_name_lnk_type_WORD word4 + } request; + struct { + uint32_t word4; +#define lpfc_mbx_get_port_name_name0_SHIFT 0 +#define lpfc_mbx_get_port_name_name0_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name0_WORD word4 +#define lpfc_mbx_get_port_name_name1_SHIFT 8 +#define lpfc_mbx_get_port_name_name1_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name1_WORD word4 +#define lpfc_mbx_get_port_name_name2_SHIFT 16 +#define lpfc_mbx_get_port_name_name2_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name2_WORD word4 +#define lpfc_mbx_get_port_name_name3_SHIFT 24 +#define lpfc_mbx_get_port_name_name3_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name3_WORD word4 +#define LPFC_LINK_NUMBER_0 0 +#define LPFC_LINK_NUMBER_1 1 +#define LPFC_LINK_NUMBER_2 2 +#define LPFC_LINK_NUMBER_3 3 + } response; + } u; +}; + +/* Mailbox Completion Queue Error Messages */ +#define MB_CQE_STATUS_SUCCESS 0x0 +#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 +#define MB_CQE_STATUS_INVALID_PARAMETER 0x2 +#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 +#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 +#define MB_CQE_STATUS_DMA_FAILED 0x5 + + +#define LPFC_MBX_WR_CONFIG_MAX_BDE 1 +struct lpfc_mbx_wr_object { + struct mbox_header header; + union { + struct { + uint32_t word4; +#define lpfc_wr_object_eof_SHIFT 31 +#define lpfc_wr_object_eof_MASK 0x00000001 +#define lpfc_wr_object_eof_WORD word4 +#define lpfc_wr_object_eas_SHIFT 29 +#define lpfc_wr_object_eas_MASK 0x00000001 +#define lpfc_wr_object_eas_WORD word4 +#define lpfc_wr_object_write_length_SHIFT 0 +#define lpfc_wr_object_write_length_MASK 0x00FFFFFF +#define lpfc_wr_object_write_length_WORD word4 + uint32_t write_offset; + uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; + uint32_t bde_count; + struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE]; + } request; + struct { + uint32_t actual_write_length; + uint32_t word5; +#define lpfc_wr_object_change_status_SHIFT 0 +#define lpfc_wr_object_change_status_MASK 0x000000FF +#define lpfc_wr_object_change_status_WORD word5 +#define LPFC_CHANGE_STATUS_NO_RESET_NEEDED 0x00 +#define LPFC_CHANGE_STATUS_PHYS_DEV_RESET 0x01 +#define LPFC_CHANGE_STATUS_FW_RESET 0x02 +#define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04 +#define LPFC_CHANGE_STATUS_PCI_RESET 0x05 +#define lpfc_wr_object_csf_SHIFT 8 +#define lpfc_wr_object_csf_MASK 0x00000001 +#define lpfc_wr_object_csf_WORD word5 + } response; + } u; +}; + +/* mailbox queue entry structure */ +struct lpfc_mqe { + uint32_t word0; +#define lpfc_mqe_status_SHIFT 16 +#define lpfc_mqe_status_MASK 0x0000FFFF +#define lpfc_mqe_status_WORD word0 +#define lpfc_mqe_command_SHIFT 8 +#define lpfc_mqe_command_MASK 0x000000FF +#define lpfc_mqe_command_WORD word0 + union { + uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1]; + /* sli4 mailbox commands */ + struct lpfc_mbx_sli4_config sli4_config; + struct lpfc_mbx_init_vfi init_vfi; + struct lpfc_mbx_reg_vfi reg_vfi; + struct lpfc_mbx_reg_vfi unreg_vfi; + struct lpfc_mbx_init_vpi init_vpi; + struct lpfc_mbx_resume_rpi resume_rpi; + struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; + struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; + struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; + struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl; + struct lpfc_mbx_reg_fcfi reg_fcfi; + struct lpfc_mbx_reg_fcfi_mrq reg_fcfi_mrq; + struct lpfc_mbx_unreg_fcfi unreg_fcfi; + struct lpfc_mbx_mq_create mq_create; + struct lpfc_mbx_mq_create_ext mq_create_ext; + struct lpfc_mbx_read_object read_object; + struct lpfc_mbx_eq_create eq_create; + struct lpfc_mbx_modify_eq_delay eq_delay; + struct lpfc_mbx_cq_create cq_create; + struct lpfc_mbx_cq_create_set cq_create_set; + struct lpfc_mbx_wq_create wq_create; + struct lpfc_mbx_rq_create rq_create; + struct lpfc_mbx_rq_create_v2 rq_create_v2; + struct lpfc_mbx_mq_destroy mq_destroy; + struct lpfc_mbx_eq_destroy eq_destroy; + struct lpfc_mbx_cq_destroy cq_destroy; + struct lpfc_mbx_wq_destroy wq_destroy; + struct lpfc_mbx_rq_destroy rq_destroy; + struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info; + struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents; + struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents; + struct lpfc_mbx_post_sgl_pages post_sgl_pages; + struct lpfc_mbx_nembed_cmd nembed_cmd; + struct lpfc_mbx_read_rev read_rev; + struct lpfc_mbx_read_vpi read_vpi; + struct lpfc_mbx_read_config rd_config; + struct lpfc_mbx_request_features req_ftrs; + struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; + struct lpfc_mbx_query_fw_config query_fw_cfg; + struct lpfc_mbx_set_beacon_config beacon_config; + struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; + struct lpfc_mbx_reg_congestion_buf reg_congestion_buf; + struct lpfc_mbx_set_link_diag_state link_diag_state; + struct lpfc_mbx_set_link_diag_loopback link_diag_loopback; + struct lpfc_mbx_run_link_diag_test link_diag_test; + struct lpfc_mbx_get_func_cfg get_func_cfg; + struct lpfc_mbx_get_prof_cfg get_prof_cfg; + struct lpfc_mbx_wr_object wr_object; + struct lpfc_mbx_get_port_name get_port_name; + struct lpfc_mbx_set_feature set_feature; + struct lpfc_mbx_memory_dump_type3 mem_dump_type3; + struct lpfc_mbx_set_host_data set_host_data; + struct lpfc_mbx_set_trunk_mode set_trunk_mode; + struct lpfc_mbx_nop nop; + struct lpfc_mbx_set_ras_fwlog ras_fwlog; + } un; +}; + +struct lpfc_mcqe { + uint32_t word0; +#define lpfc_mcqe_status_SHIFT 0 +#define lpfc_mcqe_status_MASK 0x0000FFFF +#define lpfc_mcqe_status_WORD word0 +#define lpfc_mcqe_ext_status_SHIFT 16 +#define lpfc_mcqe_ext_status_MASK 0x0000FFFF +#define lpfc_mcqe_ext_status_WORD word0 + uint32_t mcqe_tag0; + uint32_t mcqe_tag1; + uint32_t trailer; +#define lpfc_trailer_valid_SHIFT 31 +#define lpfc_trailer_valid_MASK 0x00000001 +#define lpfc_trailer_valid_WORD trailer +#define lpfc_trailer_async_SHIFT 30 +#define lpfc_trailer_async_MASK 0x00000001 +#define lpfc_trailer_async_WORD trailer +#define lpfc_trailer_hpi_SHIFT 29 +#define lpfc_trailer_hpi_MASK 0x00000001 +#define lpfc_trailer_hpi_WORD trailer +#define lpfc_trailer_completed_SHIFT 28 +#define lpfc_trailer_completed_MASK 0x00000001 +#define lpfc_trailer_completed_WORD trailer +#define lpfc_trailer_consumed_SHIFT 27 +#define lpfc_trailer_consumed_MASK 0x00000001 +#define lpfc_trailer_consumed_WORD trailer +#define lpfc_trailer_type_SHIFT 16 +#define lpfc_trailer_type_MASK 0x000000FF +#define lpfc_trailer_type_WORD trailer +#define lpfc_trailer_code_SHIFT 8 +#define lpfc_trailer_code_MASK 0x000000FF +#define lpfc_trailer_code_WORD trailer +#define LPFC_TRAILER_CODE_LINK 0x1 +#define LPFC_TRAILER_CODE_FCOE 0x2 +#define LPFC_TRAILER_CODE_DCBX 0x3 +#define LPFC_TRAILER_CODE_GRP5 0x5 +#define LPFC_TRAILER_CODE_FC 0x10 +#define LPFC_TRAILER_CODE_SLI 0x11 +#define LPFC_TRAILER_CODE_CMSTAT 0x13 +}; + +struct lpfc_acqe_link { + uint32_t word0; +#define lpfc_acqe_link_speed_SHIFT 24 +#define lpfc_acqe_link_speed_MASK 0x000000FF +#define lpfc_acqe_link_speed_WORD word0 +#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0 +#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1 +#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2 +#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3 +#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4 +#define LPFC_ASYNC_LINK_SPEED_20GBPS 0x5 +#define LPFC_ASYNC_LINK_SPEED_25GBPS 0x6 +#define LPFC_ASYNC_LINK_SPEED_40GBPS 0x7 +#define LPFC_ASYNC_LINK_SPEED_100GBPS 0x8 +#define lpfc_acqe_link_duplex_SHIFT 16 +#define lpfc_acqe_link_duplex_MASK 0x000000FF +#define lpfc_acqe_link_duplex_WORD word0 +#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0 +#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1 +#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2 +#define lpfc_acqe_link_status_SHIFT 8 +#define lpfc_acqe_link_status_MASK 0x000000FF +#define lpfc_acqe_link_status_WORD word0 +#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0 +#define LPFC_ASYNC_LINK_STATUS_UP 0x1 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 +#define lpfc_acqe_link_type_SHIFT 6 +#define lpfc_acqe_link_type_MASK 0x00000003 +#define lpfc_acqe_link_type_WORD word0 +#define lpfc_acqe_link_number_SHIFT 0 +#define lpfc_acqe_link_number_MASK 0x0000003F +#define lpfc_acqe_link_number_WORD word0 + uint32_t word1; +#define lpfc_acqe_link_fault_SHIFT 0 +#define lpfc_acqe_link_fault_MASK 0x000000FF +#define lpfc_acqe_link_fault_WORD word1 +#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 +#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 +#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 +#define LPFC_ASYNC_LINK_FAULT_LR_LRR 0x3 +#define lpfc_acqe_logical_link_speed_SHIFT 16 +#define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF +#define lpfc_acqe_logical_link_speed_WORD word1 + uint32_t event_tag; + uint32_t trailer; +#define LPFC_LINK_EVENT_TYPE_PHYSICAL 0x0 +#define LPFC_LINK_EVENT_TYPE_VIRTUAL 0x1 +}; + +struct lpfc_acqe_fip { + uint32_t index; + uint32_t word1; +#define lpfc_acqe_fip_fcf_count_SHIFT 0 +#define lpfc_acqe_fip_fcf_count_MASK 0x0000FFFF +#define lpfc_acqe_fip_fcf_count_WORD word1 +#define lpfc_acqe_fip_event_type_SHIFT 16 +#define lpfc_acqe_fip_event_type_MASK 0x0000FFFF +#define lpfc_acqe_fip_event_type_WORD word1 + uint32_t event_tag; + uint32_t trailer; +#define LPFC_FIP_EVENT_TYPE_NEW_FCF 0x1 +#define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL 0x2 +#define LPFC_FIP_EVENT_TYPE_FCF_DEAD 0x3 +#define LPFC_FIP_EVENT_TYPE_CVL 0x4 +#define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD 0x5 +}; + +struct lpfc_acqe_dcbx { + uint32_t tlv_ttl; + uint32_t reserved; + uint32_t event_tag; + uint32_t trailer; +}; + +struct lpfc_acqe_grp5 { + uint32_t word0; +#define lpfc_acqe_grp5_type_SHIFT 6 +#define lpfc_acqe_grp5_type_MASK 0x00000003 +#define lpfc_acqe_grp5_type_WORD word0 +#define lpfc_acqe_grp5_number_SHIFT 0 +#define lpfc_acqe_grp5_number_MASK 0x0000003F +#define lpfc_acqe_grp5_number_WORD word0 + uint32_t word1; +#define lpfc_acqe_grp5_llink_spd_SHIFT 16 +#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF +#define lpfc_acqe_grp5_llink_spd_WORD word1 + uint32_t event_tag; + uint32_t trailer; +}; + +extern const char *const trunk_errmsg[]; + +struct lpfc_acqe_fc_la { + uint32_t word0; +#define lpfc_acqe_fc_la_speed_SHIFT 24 +#define lpfc_acqe_fc_la_speed_MASK 0x000000FF +#define lpfc_acqe_fc_la_speed_WORD word0 +#define LPFC_FC_LA_SPEED_UNKNOWN 0x0 +#define LPFC_FC_LA_SPEED_1G 0x1 +#define LPFC_FC_LA_SPEED_2G 0x2 +#define LPFC_FC_LA_SPEED_4G 0x4 +#define LPFC_FC_LA_SPEED_8G 0x8 +#define LPFC_FC_LA_SPEED_10G 0xA +#define LPFC_FC_LA_SPEED_16G 0x10 +#define LPFC_FC_LA_SPEED_32G 0x20 +#define LPFC_FC_LA_SPEED_64G 0x21 +#define LPFC_FC_LA_SPEED_128G 0x22 +#define LPFC_FC_LA_SPEED_256G 0x23 +#define lpfc_acqe_fc_la_topology_SHIFT 16 +#define lpfc_acqe_fc_la_topology_MASK 0x000000FF +#define lpfc_acqe_fc_la_topology_WORD word0 +#define LPFC_FC_LA_TOP_UNKOWN 0x0 +#define LPFC_FC_LA_TOP_P2P 0x1 +#define LPFC_FC_LA_TOP_FCAL 0x2 +#define LPFC_FC_LA_TOP_INTERNAL_LOOP 0x3 +#define LPFC_FC_LA_TOP_SERDES_LOOP 0x4 +#define lpfc_acqe_fc_la_att_type_SHIFT 8 +#define lpfc_acqe_fc_la_att_type_MASK 0x000000FF +#define lpfc_acqe_fc_la_att_type_WORD word0 +#define LPFC_FC_LA_TYPE_LINK_UP 0x1 +#define LPFC_FC_LA_TYPE_LINK_DOWN 0x2 +#define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3 +#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4 +#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5 +#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6 +#define LPFC_FC_LA_TYPE_TRUNKING_EVENT 0x7 +#define LPFC_FC_LA_TYPE_ACTIVATE_FAIL 0x8 +#define LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT 0x9 +#define lpfc_acqe_fc_la_port_type_SHIFT 6 +#define lpfc_acqe_fc_la_port_type_MASK 0x00000003 +#define lpfc_acqe_fc_la_port_type_WORD word0 +#define LPFC_LINK_TYPE_ETHERNET 0x0 +#define LPFC_LINK_TYPE_FC 0x1 +#define lpfc_acqe_fc_la_port_number_SHIFT 0 +#define lpfc_acqe_fc_la_port_number_MASK 0x0000003F +#define lpfc_acqe_fc_la_port_number_WORD word0 + +/* Attention Type is 0x07 (Trunking Event) word0 */ +#define lpfc_acqe_fc_la_trunk_link_status_port0_SHIFT 16 +#define lpfc_acqe_fc_la_trunk_link_status_port0_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_link_status_port0_WORD word0 +#define lpfc_acqe_fc_la_trunk_link_status_port1_SHIFT 17 +#define lpfc_acqe_fc_la_trunk_link_status_port1_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_link_status_port1_WORD word0 +#define lpfc_acqe_fc_la_trunk_link_status_port2_SHIFT 18 +#define lpfc_acqe_fc_la_trunk_link_status_port2_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_link_status_port2_WORD word0 +#define lpfc_acqe_fc_la_trunk_link_status_port3_SHIFT 19 +#define lpfc_acqe_fc_la_trunk_link_status_port3_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_link_status_port3_WORD word0 +#define lpfc_acqe_fc_la_trunk_config_port0_SHIFT 20 +#define lpfc_acqe_fc_la_trunk_config_port0_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_config_port0_WORD word0 +#define lpfc_acqe_fc_la_trunk_config_port1_SHIFT 21 +#define lpfc_acqe_fc_la_trunk_config_port1_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_config_port1_WORD word0 +#define lpfc_acqe_fc_la_trunk_config_port2_SHIFT 22 +#define lpfc_acqe_fc_la_trunk_config_port2_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_config_port2_WORD word0 +#define lpfc_acqe_fc_la_trunk_config_port3_SHIFT 23 +#define lpfc_acqe_fc_la_trunk_config_port3_MASK 0x0000001 +#define lpfc_acqe_fc_la_trunk_config_port3_WORD word0 + uint32_t word1; +#define lpfc_acqe_fc_la_llink_spd_SHIFT 16 +#define lpfc_acqe_fc_la_llink_spd_MASK 0x0000FFFF +#define lpfc_acqe_fc_la_llink_spd_WORD word1 +#define lpfc_acqe_fc_la_fault_SHIFT 0 +#define lpfc_acqe_fc_la_fault_MASK 0x000000FF +#define lpfc_acqe_fc_la_fault_WORD word1 +#define lpfc_acqe_fc_la_link_status_SHIFT 8 +#define lpfc_acqe_fc_la_link_status_MASK 0x0000007F +#define lpfc_acqe_fc_la_link_status_WORD word1 +#define lpfc_acqe_fc_la_trunk_fault_SHIFT 0 +#define lpfc_acqe_fc_la_trunk_fault_MASK 0x0000000F +#define lpfc_acqe_fc_la_trunk_fault_WORD word1 +#define lpfc_acqe_fc_la_trunk_linkmask_SHIFT 4 +#define lpfc_acqe_fc_la_trunk_linkmask_MASK 0x000000F +#define lpfc_acqe_fc_la_trunk_linkmask_WORD word1 +#define LPFC_FC_LA_FAULT_NONE 0x0 +#define LPFC_FC_LA_FAULT_LOCAL 0x1 +#define LPFC_FC_LA_FAULT_REMOTE 0x2 + uint32_t event_tag; + uint32_t trailer; +#define LPFC_FC_LA_EVENT_TYPE_FC_LINK 0x1 +#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2 +}; + +struct lpfc_acqe_misconfigured_event { + struct { + uint32_t word0; +#define lpfc_sli_misconfigured_port0_state_SHIFT 0 +#define lpfc_sli_misconfigured_port0_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port0_state_WORD word0 +#define lpfc_sli_misconfigured_port1_state_SHIFT 8 +#define lpfc_sli_misconfigured_port1_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port1_state_WORD word0 +#define lpfc_sli_misconfigured_port2_state_SHIFT 16 +#define lpfc_sli_misconfigured_port2_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port2_state_WORD word0 +#define lpfc_sli_misconfigured_port3_state_SHIFT 24 +#define lpfc_sli_misconfigured_port3_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port3_state_WORD word0 + uint32_t word1; +#define lpfc_sli_misconfigured_port0_op_SHIFT 0 +#define lpfc_sli_misconfigured_port0_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port0_op_WORD word1 +#define lpfc_sli_misconfigured_port0_severity_SHIFT 1 +#define lpfc_sli_misconfigured_port0_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port0_severity_WORD word1 +#define lpfc_sli_misconfigured_port1_op_SHIFT 8 +#define lpfc_sli_misconfigured_port1_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port1_op_WORD word1 +#define lpfc_sli_misconfigured_port1_severity_SHIFT 9 +#define lpfc_sli_misconfigured_port1_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port1_severity_WORD word1 +#define lpfc_sli_misconfigured_port2_op_SHIFT 16 +#define lpfc_sli_misconfigured_port2_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port2_op_WORD word1 +#define lpfc_sli_misconfigured_port2_severity_SHIFT 17 +#define lpfc_sli_misconfigured_port2_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port2_severity_WORD word1 +#define lpfc_sli_misconfigured_port3_op_SHIFT 24 +#define lpfc_sli_misconfigured_port3_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port3_op_WORD word1 +#define lpfc_sli_misconfigured_port3_severity_SHIFT 25 +#define lpfc_sli_misconfigured_port3_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port3_severity_WORD word1 + } theEvent; +#define LPFC_SLI_EVENT_STATUS_VALID 0x00 +#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01 +#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02 +#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03 +#define LPFC_SLI_EVENT_STATUS_UNQUALIFIED 0x04 +#define LPFC_SLI_EVENT_STATUS_UNCERTIFIED 0x05 +}; + +struct lpfc_acqe_cgn_signal { + u32 word0; +#define lpfc_warn_acqe_SHIFT 0 +#define lpfc_warn_acqe_MASK 0x7FFFFFFF +#define lpfc_warn_acqe_WORD word0 +#define lpfc_imm_acqe_SHIFT 31 +#define lpfc_imm_acqe_MASK 0x1 +#define lpfc_imm_acqe_WORD word0 + u32 alarm_cnt; + u32 word2; + u32 trailer; +}; + +struct lpfc_acqe_sli { + uint32_t event_data1; + uint32_t event_data2; + uint32_t event_data3; + uint32_t trailer; +#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1 +#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2 +#define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3 +#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4 +#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 +#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 +#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA +#define LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG 0xE +#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF +#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10 +#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11 +#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12 +}; + +/* + * Define the bootstrap mailbox (bmbx) region used to communicate + * mailbox command between the host and port. The mailbox consists + * of a payload area of 256 bytes and a completion queue of length + * 16 bytes. + */ +struct lpfc_bmbx_create { + struct lpfc_mqe mqe; + struct lpfc_mcqe mcqe; +}; + +#define SGL_ALIGN_SZ 64 +#define SGL_PAGE_SIZE 4096 +/* align SGL addr on a size boundary - adjust address up */ +#define NO_XRI 0xffff + +struct wqe_common { + uint32_t word6; +#define wqe_xri_tag_SHIFT 0 +#define wqe_xri_tag_MASK 0x0000FFFF +#define wqe_xri_tag_WORD word6 +#define wqe_ctxt_tag_SHIFT 16 +#define wqe_ctxt_tag_MASK 0x0000FFFF +#define wqe_ctxt_tag_WORD word6 + uint32_t word7; +#define wqe_dif_SHIFT 0 +#define wqe_dif_MASK 0x00000003 +#define wqe_dif_WORD word7 +#define LPFC_WQE_DIF_PASSTHRU 1 +#define LPFC_WQE_DIF_STRIP 2 +#define LPFC_WQE_DIF_INSERT 3 +#define wqe_ct_SHIFT 2 +#define wqe_ct_MASK 0x00000003 +#define wqe_ct_WORD word7 +#define wqe_status_SHIFT 4 +#define wqe_status_MASK 0x0000000f +#define wqe_status_WORD word7 +#define wqe_cmnd_SHIFT 8 +#define wqe_cmnd_MASK 0x000000ff +#define wqe_cmnd_WORD word7 +#define wqe_class_SHIFT 16 +#define wqe_class_MASK 0x00000007 +#define wqe_class_WORD word7 +#define wqe_ar_SHIFT 19 +#define wqe_ar_MASK 0x00000001 +#define wqe_ar_WORD word7 +#define wqe_ag_SHIFT wqe_ar_SHIFT +#define wqe_ag_MASK wqe_ar_MASK +#define wqe_ag_WORD wqe_ar_WORD +#define wqe_pu_SHIFT 20 +#define wqe_pu_MASK 0x00000003 +#define wqe_pu_WORD word7 +#define wqe_erp_SHIFT 22 +#define wqe_erp_MASK 0x00000001 +#define wqe_erp_WORD word7 +#define wqe_conf_SHIFT wqe_erp_SHIFT +#define wqe_conf_MASK wqe_erp_MASK +#define wqe_conf_WORD wqe_erp_WORD +#define wqe_lnk_SHIFT 23 +#define wqe_lnk_MASK 0x00000001 +#define wqe_lnk_WORD word7 +#define wqe_tmo_SHIFT 24 +#define wqe_tmo_MASK 0x000000ff +#define wqe_tmo_WORD word7 + uint32_t abort_tag; /* word 8 in WQE */ + uint32_t word9; +#define wqe_reqtag_SHIFT 0 +#define wqe_reqtag_MASK 0x0000FFFF +#define wqe_reqtag_WORD word9 +#define wqe_temp_rpi_SHIFT 16 +#define wqe_temp_rpi_MASK 0x0000FFFF +#define wqe_temp_rpi_WORD word9 +#define wqe_rcvoxid_SHIFT 16 +#define wqe_rcvoxid_MASK 0x0000FFFF +#define wqe_rcvoxid_WORD word9 +#define wqe_sof_SHIFT 24 +#define wqe_sof_MASK 0x000000FF +#define wqe_sof_WORD word9 +#define wqe_eof_SHIFT 16 +#define wqe_eof_MASK 0x000000FF +#define wqe_eof_WORD word9 + uint32_t word10; +#define wqe_ebde_cnt_SHIFT 0 +#define wqe_ebde_cnt_MASK 0x0000000f +#define wqe_ebde_cnt_WORD word10 +#define wqe_xchg_SHIFT 4 +#define wqe_xchg_MASK 0x00000001 +#define wqe_xchg_WORD word10 +#define LPFC_SCSI_XCHG 0x0 +#define LPFC_NVME_XCHG 0x1 +#define wqe_appid_SHIFT 5 +#define wqe_appid_MASK 0x00000001 +#define wqe_appid_WORD word10 +#define wqe_oas_SHIFT 6 +#define wqe_oas_MASK 0x00000001 +#define wqe_oas_WORD word10 +#define wqe_lenloc_SHIFT 7 +#define wqe_lenloc_MASK 0x00000003 +#define wqe_lenloc_WORD word10 +#define LPFC_WQE_LENLOC_NONE 0 +#define LPFC_WQE_LENLOC_WORD3 1 +#define LPFC_WQE_LENLOC_WORD12 2 +#define LPFC_WQE_LENLOC_WORD4 3 +#define wqe_qosd_SHIFT 9 +#define wqe_qosd_MASK 0x00000001 +#define wqe_qosd_WORD word10 +#define wqe_xbl_SHIFT 11 +#define wqe_xbl_MASK 0x00000001 +#define wqe_xbl_WORD word10 +#define wqe_iod_SHIFT 13 +#define wqe_iod_MASK 0x00000001 +#define wqe_iod_WORD word10 +#define LPFC_WQE_IOD_NONE 0 +#define LPFC_WQE_IOD_WRITE 0 +#define LPFC_WQE_IOD_READ 1 +#define wqe_dbde_SHIFT 14 +#define wqe_dbde_MASK 0x00000001 +#define wqe_dbde_WORD word10 +#define wqe_wqes_SHIFT 15 +#define wqe_wqes_MASK 0x00000001 +#define wqe_wqes_WORD word10 +/* Note that this field overlaps above fields */ +#define wqe_wqid_SHIFT 1 +#define wqe_wqid_MASK 0x00007fff +#define wqe_wqid_WORD word10 +#define wqe_pri_SHIFT 16 +#define wqe_pri_MASK 0x00000007 +#define wqe_pri_WORD word10 +#define wqe_pv_SHIFT 19 +#define wqe_pv_MASK 0x00000001 +#define wqe_pv_WORD word10 +#define wqe_xc_SHIFT 21 +#define wqe_xc_MASK 0x00000001 +#define wqe_xc_WORD word10 +#define wqe_sr_SHIFT 22 +#define wqe_sr_MASK 0x00000001 +#define wqe_sr_WORD word10 +#define wqe_ccpe_SHIFT 23 +#define wqe_ccpe_MASK 0x00000001 +#define wqe_ccpe_WORD word10 +#define wqe_ccp_SHIFT 24 +#define wqe_ccp_MASK 0x000000ff +#define wqe_ccp_WORD word10 + uint32_t word11; +#define wqe_cmd_type_SHIFT 0 +#define wqe_cmd_type_MASK 0x0000000f +#define wqe_cmd_type_WORD word11 +#define wqe_els_id_SHIFT 4 +#define wqe_els_id_MASK 0x00000007 +#define wqe_els_id_WORD word11 +#define wqe_irsp_SHIFT 4 +#define wqe_irsp_MASK 0x00000001 +#define wqe_irsp_WORD word11 +#define wqe_pbde_SHIFT 5 +#define wqe_pbde_MASK 0x00000001 +#define wqe_pbde_WORD word11 +#define wqe_sup_SHIFT 6 +#define wqe_sup_MASK 0x00000001 +#define wqe_sup_WORD word11 +#define wqe_ffrq_SHIFT 6 +#define wqe_ffrq_MASK 0x00000001 +#define wqe_ffrq_WORD word11 +#define wqe_wqec_SHIFT 7 +#define wqe_wqec_MASK 0x00000001 +#define wqe_wqec_WORD word11 +#define wqe_irsplen_SHIFT 8 +#define wqe_irsplen_MASK 0x0000000f +#define wqe_irsplen_WORD word11 +#define wqe_cqid_SHIFT 16 +#define wqe_cqid_MASK 0x0000ffff +#define wqe_cqid_WORD word11 +#define LPFC_WQE_CQ_ID_DEFAULT 0xffff +}; + +struct wqe_did { + uint32_t word5; +#define wqe_els_did_SHIFT 0 +#define wqe_els_did_MASK 0x00FFFFFF +#define wqe_els_did_WORD word5 +#define wqe_xmit_bls_pt_SHIFT 28 +#define wqe_xmit_bls_pt_MASK 0x00000003 +#define wqe_xmit_bls_pt_WORD word5 +#define wqe_xmit_bls_ar_SHIFT 30 +#define wqe_xmit_bls_ar_MASK 0x00000001 +#define wqe_xmit_bls_ar_WORD word5 +#define wqe_xmit_bls_xo_SHIFT 31 +#define wqe_xmit_bls_xo_MASK 0x00000001 +#define wqe_xmit_bls_xo_WORD word5 +}; + +struct lpfc_wqe_generic{ + struct ulp_bde64 bde; + uint32_t word3; + uint32_t word4; + uint32_t word5; + struct wqe_common wqe_com; + uint32_t payload[4]; +}; + +enum els_request64_wqe_word11 { + LPFC_ELS_ID_DEFAULT, + LPFC_ELS_ID_LOGO, + LPFC_ELS_ID_FDISC, + LPFC_ELS_ID_FLOGI, + LPFC_ELS_ID_PLOGI, +}; + +struct els_request64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; + uint32_t word4; +#define els_req64_sid_SHIFT 0 +#define els_req64_sid_MASK 0x00FFFFFF +#define els_req64_sid_WORD word4 +#define els_req64_sp_SHIFT 24 +#define els_req64_sp_MASK 0x00000001 +#define els_req64_sp_WORD word4 +#define els_req64_vf_SHIFT 25 +#define els_req64_vf_MASK 0x00000001 +#define els_req64_vf_WORD word4 + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t word12; +#define els_req64_vfid_SHIFT 1 +#define els_req64_vfid_MASK 0x00000FFF +#define els_req64_vfid_WORD word12 +#define els_req64_pri_SHIFT 13 +#define els_req64_pri_MASK 0x00000007 +#define els_req64_pri_WORD word12 + uint32_t word13; +#define els_req64_hopcnt_SHIFT 24 +#define els_req64_hopcnt_MASK 0x000000ff +#define els_req64_hopcnt_WORD word13 + uint32_t word14; + uint32_t max_response_payload_len; +}; + +struct xmit_els_rsp64_wqe { + struct ulp_bde64 bde; + uint32_t response_payload_len; + uint32_t word4; +#define els_rsp64_sid_SHIFT 0 +#define els_rsp64_sid_MASK 0x00FFFFFF +#define els_rsp64_sid_WORD word4 +#define els_rsp64_sp_SHIFT 24 +#define els_rsp64_sp_MASK 0x00000001 +#define els_rsp64_sp_WORD word4 + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t word12; +#define wqe_rsp_temp_rpi_SHIFT 0 +#define wqe_rsp_temp_rpi_MASK 0x0000FFFF +#define wqe_rsp_temp_rpi_WORD word12 + uint32_t rsvd_13_15[3]; +}; + +struct xmit_bls_rsp64_wqe { + uint32_t payload0; +/* Payload0 for BA_ACC */ +#define xmit_bls_rsp64_acc_seq_id_SHIFT 16 +#define xmit_bls_rsp64_acc_seq_id_MASK 0x000000ff +#define xmit_bls_rsp64_acc_seq_id_WORD payload0 +#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT 24 +#define xmit_bls_rsp64_acc_seq_id_vald_MASK 0x000000ff +#define xmit_bls_rsp64_acc_seq_id_vald_WORD payload0 +/* Payload0 for BA_RJT */ +#define xmit_bls_rsp64_rjt_vspec_SHIFT 0 +#define xmit_bls_rsp64_rjt_vspec_MASK 0x000000ff +#define xmit_bls_rsp64_rjt_vspec_WORD payload0 +#define xmit_bls_rsp64_rjt_expc_SHIFT 8 +#define xmit_bls_rsp64_rjt_expc_MASK 0x000000ff +#define xmit_bls_rsp64_rjt_expc_WORD payload0 +#define xmit_bls_rsp64_rjt_rsnc_SHIFT 16 +#define xmit_bls_rsp64_rjt_rsnc_MASK 0x000000ff +#define xmit_bls_rsp64_rjt_rsnc_WORD payload0 + uint32_t word1; +#define xmit_bls_rsp64_rxid_SHIFT 0 +#define xmit_bls_rsp64_rxid_MASK 0x0000ffff +#define xmit_bls_rsp64_rxid_WORD word1 +#define xmit_bls_rsp64_oxid_SHIFT 16 +#define xmit_bls_rsp64_oxid_MASK 0x0000ffff +#define xmit_bls_rsp64_oxid_WORD word1 + uint32_t word2; +#define xmit_bls_rsp64_seqcnthi_SHIFT 0 +#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcnthi_WORD word2 +#define xmit_bls_rsp64_seqcntlo_SHIFT 16 +#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcntlo_WORD word2 + uint32_t rsrvd3; + uint32_t rsrvd4; + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t word12; +#define xmit_bls_rsp64_temprpi_SHIFT 0 +#define xmit_bls_rsp64_temprpi_MASK 0x0000ffff +#define xmit_bls_rsp64_temprpi_WORD word12 + uint32_t rsvd_13_15[3]; +}; + +struct wqe_rctl_dfctl { + uint32_t word5; +#define wqe_si_SHIFT 2 +#define wqe_si_MASK 0x000000001 +#define wqe_si_WORD word5 +#define wqe_la_SHIFT 3 +#define wqe_la_MASK 0x000000001 +#define wqe_la_WORD word5 +#define wqe_xo_SHIFT 6 +#define wqe_xo_MASK 0x000000001 +#define wqe_xo_WORD word5 +#define wqe_ls_SHIFT 7 +#define wqe_ls_MASK 0x000000001 +#define wqe_ls_WORD word5 +#define wqe_dfctl_SHIFT 8 +#define wqe_dfctl_MASK 0x0000000ff +#define wqe_dfctl_WORD word5 +#define wqe_type_SHIFT 16 +#define wqe_type_MASK 0x0000000ff +#define wqe_type_WORD word5 +#define wqe_rctl_SHIFT 24 +#define wqe_rctl_MASK 0x0000000ff +#define wqe_rctl_WORD word5 +}; + +struct xmit_seq64_wqe { + struct ulp_bde64 bde; + uint32_t rsvd3; + uint32_t relative_offset; + struct wqe_rctl_dfctl wge_ctl; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t xmit_len; + uint32_t rsvd_12_15[3]; +}; +struct xmit_bcast64_wqe { + struct ulp_bde64 bde; + uint32_t seq_payload_len; + uint32_t rsvd4; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct gen_req64_wqe { + struct ulp_bde64 bde; + uint32_t request_payload_len; + uint32_t relative_offset; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_14[3]; + uint32_t max_response_payload_len; +}; + +/* Define NVME PRLI request to fabric. NVME is a + * fabric-only protocol. + * Updated to red-lined v1.08 on Sept 16, 2016 + */ +struct lpfc_nvme_prli { + uint32_t word1; + /* The Response Code is defined in the FCP PRLI lpfc_hw.h */ +#define prli_acc_rsp_code_SHIFT 8 +#define prli_acc_rsp_code_MASK 0x0000000f +#define prli_acc_rsp_code_WORD word1 +#define prli_estabImagePair_SHIFT 13 +#define prli_estabImagePair_MASK 0x00000001 +#define prli_estabImagePair_WORD word1 +#define prli_type_code_ext_SHIFT 16 +#define prli_type_code_ext_MASK 0x000000ff +#define prli_type_code_ext_WORD word1 +#define prli_type_code_SHIFT 24 +#define prli_type_code_MASK 0x000000ff +#define prli_type_code_WORD word1 + uint32_t word_rsvd2; + uint32_t word_rsvd3; + + uint32_t word4; +#define prli_fba_SHIFT 0 +#define prli_fba_MASK 0x00000001 +#define prli_fba_WORD word4 +#define prli_disc_SHIFT 3 +#define prli_disc_MASK 0x00000001 +#define prli_disc_WORD word4 +#define prli_tgt_SHIFT 4 +#define prli_tgt_MASK 0x00000001 +#define prli_tgt_WORD word4 +#define prli_init_SHIFT 5 +#define prli_init_MASK 0x00000001 +#define prli_init_WORD word4 +#define prli_conf_SHIFT 7 +#define prli_conf_MASK 0x00000001 +#define prli_conf_WORD word4 +#define prli_nsler_SHIFT 8 +#define prli_nsler_MASK 0x00000001 +#define prli_nsler_WORD word4 + uint32_t word5; +#define prli_fb_sz_SHIFT 0 +#define prli_fb_sz_MASK 0x0000ffff +#define prli_fb_sz_WORD word5 +#define LPFC_NVMET_FB_SZ_MAX 65536 /* Driver target mode only. */ +}; + +struct create_xri_wqe { + uint32_t rsrvd[5]; /* words 0-4 */ + struct wqe_did wqe_dest; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +#define T_REQUEST_TAG 3 +#define T_XRI_TAG 1 + +struct cmf_sync_wqe { + uint32_t rsrvd[3]; + uint32_t word3; +#define cmf_sync_interval_SHIFT 0 +#define cmf_sync_interval_MASK 0x00000ffff +#define cmf_sync_interval_WORD word3 +#define cmf_sync_afpin_SHIFT 16 +#define cmf_sync_afpin_MASK 0x000000001 +#define cmf_sync_afpin_WORD word3 +#define cmf_sync_asig_SHIFT 17 +#define cmf_sync_asig_MASK 0x000000001 +#define cmf_sync_asig_WORD word3 +#define cmf_sync_op_SHIFT 20 +#define cmf_sync_op_MASK 0x00000000f +#define cmf_sync_op_WORD word3 +#define cmf_sync_ver_SHIFT 24 +#define cmf_sync_ver_MASK 0x0000000ff +#define cmf_sync_ver_WORD word3 +#define LPFC_CMF_SYNC_VER 1 + uint32_t event_tag; + uint32_t word5; +#define cmf_sync_wsigmax_SHIFT 0 +#define cmf_sync_wsigmax_MASK 0x00000ffff +#define cmf_sync_wsigmax_WORD word5 +#define cmf_sync_wsigcnt_SHIFT 16 +#define cmf_sync_wsigcnt_MASK 0x00000ffff +#define cmf_sync_wsigcnt_WORD word5 + uint32_t word6; + uint32_t word7; +#define cmf_sync_cmnd_SHIFT 8 +#define cmf_sync_cmnd_MASK 0x0000000ff +#define cmf_sync_cmnd_WORD word7 + uint32_t word8; + uint32_t word9; +#define cmf_sync_reqtag_SHIFT 0 +#define cmf_sync_reqtag_MASK 0x00000ffff +#define cmf_sync_reqtag_WORD word9 +#define cmf_sync_wfpinmax_SHIFT 16 +#define cmf_sync_wfpinmax_MASK 0x0000000ff +#define cmf_sync_wfpinmax_WORD word9 +#define cmf_sync_wfpincnt_SHIFT 24 +#define cmf_sync_wfpincnt_MASK 0x0000000ff +#define cmf_sync_wfpincnt_WORD word9 + uint32_t word10; +#define cmf_sync_qosd_SHIFT 9 +#define cmf_sync_qosd_MASK 0x00000001 +#define cmf_sync_qosd_WORD word10 + uint32_t word11; +#define cmf_sync_cmd_type_SHIFT 0 +#define cmf_sync_cmd_type_MASK 0x0000000f +#define cmf_sync_cmd_type_WORD word11 +#define cmf_sync_wqec_SHIFT 7 +#define cmf_sync_wqec_MASK 0x00000001 +#define cmf_sync_wqec_WORD word11 +#define cmf_sync_cqid_SHIFT 16 +#define cmf_sync_cqid_MASK 0x0000ffff +#define cmf_sync_cqid_WORD word11 + uint32_t read_bytes; + uint32_t word13; +#define cmf_sync_period_SHIFT 24 +#define cmf_sync_period_MASK 0x000000ff +#define cmf_sync_period_WORD word13 + uint32_t word14; + uint32_t word15; +}; + +struct abort_cmd_wqe { + uint32_t rsrvd[3]; + uint32_t word3; +#define abort_cmd_ia_SHIFT 0 +#define abort_cmd_ia_MASK 0x000000001 +#define abort_cmd_ia_WORD word3 +#define abort_cmd_criteria_SHIFT 8 +#define abort_cmd_criteria_MASK 0x0000000ff +#define abort_cmd_criteria_WORD word3 + uint32_t rsrvd4; + uint32_t rsrvd5; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_iwrite64_wqe { + struct ulp_bde64 bde; + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 + uint32_t total_xfer_len; + uint32_t initial_xfer_len; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ +}; + +struct fcp_iread64_wqe { + struct ulp_bde64 bde; + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 + uint32_t total_xfer_len; /* word 4 */ + uint32_t rsrvd5; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ +}; + +struct fcp_icmnd64_wqe { + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 + uint32_t rsrvd4; /* word 4 */ + uint32_t rsrvd5; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_trsp64_wqe { + struct ulp_bde64 bde; + uint32_t response_len; + uint32_t rsvd_4_5[2]; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_tsend64_wqe { + struct ulp_bde64 bde; + uint32_t payload_offset_len; + uint32_t relative_offset; + uint32_t reserved; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fcp_data_len; /* word 12 */ + uint32_t rsvd_13_15[3]; /* word 13-15 */ +}; + +struct fcp_treceive64_wqe { + struct ulp_bde64 bde; + uint32_t payload_offset_len; + uint32_t relative_offset; + uint32_t reserved; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fcp_data_len; /* word 12 */ + uint32_t rsvd_13_15[3]; /* word 13-15 */ +}; +#define TXRDY_PAYLOAD_LEN 12 + +#define CMD_SEND_FRAME 0xE1 + +struct send_frame_wqe { + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t frame_len; /* word 3 */ + uint32_t fc_hdr_wd0; /* word 4 */ + uint32_t fc_hdr_wd1; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fc_hdr_wd2; /* word 12 */ + uint32_t fc_hdr_wd3; /* word 13 */ + uint32_t fc_hdr_wd4; /* word 14 */ + uint32_t fc_hdr_wd5; /* word 15 */ +}; + +#define ELS_RDF_REG_TAG_CNT 4 +struct lpfc_els_rdf_reg_desc { + struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */ + __be32 desc_tags[ELS_RDF_REG_TAG_CNT]; + /* tags in reg_desc */ +}; + +struct lpfc_els_rdf_req { + struct fc_els_rdf rdf; /* hdr up to descriptors */ + struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */ +}; + +struct lpfc_els_rdf_rsp { + struct fc_els_rdf_resp rdf_resp; /* hdr up to descriptors */ + struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */ +}; + +union lpfc_wqe { + uint32_t words[16]; + struct lpfc_wqe_generic generic; + struct fcp_icmnd64_wqe fcp_icmd; + struct fcp_iread64_wqe fcp_iread; + struct fcp_iwrite64_wqe fcp_iwrite; + struct abort_cmd_wqe abort_cmd; + struct cmf_sync_wqe cmf_sync; + struct create_xri_wqe create_xri; + struct xmit_bcast64_wqe xmit_bcast64; + struct xmit_seq64_wqe xmit_sequence; + struct xmit_bls_rsp64_wqe xmit_bls_rsp; + struct xmit_els_rsp64_wqe xmit_els_rsp; + struct els_request64_wqe els_req; + struct gen_req64_wqe gen_req; + struct fcp_trsp64_wqe fcp_trsp; + struct fcp_tsend64_wqe fcp_tsend; + struct fcp_treceive64_wqe fcp_treceive; + struct send_frame_wqe send_frame; +}; + +union lpfc_wqe128 { + uint32_t words[32]; + struct lpfc_wqe_generic generic; + struct fcp_icmnd64_wqe fcp_icmd; + struct fcp_iread64_wqe fcp_iread; + struct fcp_iwrite64_wqe fcp_iwrite; + struct abort_cmd_wqe abort_cmd; + struct cmf_sync_wqe cmf_sync; + struct create_xri_wqe create_xri; + struct xmit_bcast64_wqe xmit_bcast64; + struct xmit_seq64_wqe xmit_sequence; + struct xmit_bls_rsp64_wqe xmit_bls_rsp; + struct xmit_els_rsp64_wqe xmit_els_rsp; + struct els_request64_wqe els_req; + struct gen_req64_wqe gen_req; + struct fcp_trsp64_wqe fcp_trsp; + struct fcp_tsend64_wqe fcp_tsend; + struct fcp_treceive64_wqe fcp_treceive; + struct send_frame_wqe send_frame; +}; + +#define MAGIC_NUMBER_G6 0xFEAA0003 +#define MAGIC_NUMBER_G7 0xFEAA0005 +#define MAGIC_NUMBER_G7P 0xFEAA0020 + +struct lpfc_grp_hdr { + uint32_t size; + uint32_t magic_number; + uint32_t word2; +#define lpfc_grp_hdr_file_type_SHIFT 24 +#define lpfc_grp_hdr_file_type_MASK 0x000000FF +#define lpfc_grp_hdr_file_type_WORD word2 +#define lpfc_grp_hdr_id_SHIFT 16 +#define lpfc_grp_hdr_id_MASK 0x000000FF +#define lpfc_grp_hdr_id_WORD word2 + uint8_t rev_name[128]; + uint8_t date[12]; + uint8_t revision[32]; +}; + +/* Defines for WQE command type */ +#define FCP_COMMAND 0x0 +#define NVME_READ_CMD 0x0 +#define FCP_COMMAND_DATA_OUT 0x1 +#define NVME_WRITE_CMD 0x1 +#define COMMAND_DATA_IN 0x0 +#define COMMAND_DATA_OUT 0x1 +#define FCP_COMMAND_TRECEIVE 0x2 +#define FCP_COMMAND_TRSP 0x3 +#define FCP_COMMAND_TSEND 0x7 +#define OTHER_COMMAND 0x8 +#define CMF_SYNC_COMMAND 0xA +#define ELS_COMMAND_NON_FIP 0xC +#define ELS_COMMAND_FIP 0xD + +#define LPFC_NVME_EMBED_CMD 0x0 +#define LPFC_NVME_EMBED_WRITE 0x1 +#define LPFC_NVME_EMBED_READ 0x2 + +/* WQE Commands */ +#define CMD_ABORT_XRI_WQE 0x0F +#define CMD_XMIT_SEQUENCE64_WQE 0x82 +#define CMD_XMIT_BCAST64_WQE 0x84 +#define CMD_ELS_REQUEST64_WQE 0x8A +#define CMD_XMIT_ELS_RSP64_WQE 0x95 +#define CMD_XMIT_BLS_RSP64_WQE 0x97 +#define CMD_FCP_IWRITE64_WQE 0x98 +#define CMD_FCP_IREAD64_WQE 0x9A +#define CMD_FCP_ICMND64_WQE 0x9C +#define CMD_FCP_TSEND64_WQE 0x9F +#define CMD_FCP_TRECEIVE64_WQE 0xA1 +#define CMD_FCP_TRSP64_WQE 0xA3 +#define CMD_GEN_REQUEST64_WQE 0xC2 +#define CMD_CMF_SYNC_WQE 0xE8 + +#define CMD_WQE_MASK 0xff + + +#define LPFC_FW_DUMP 1 +#define LPFC_FW_RESET 2 +#define LPFC_DV_RESET 3 + +/* On some kernels, enum fc_ls_tlv_dtag does not have + * these 2 enums defined, on other kernels it does. + * To get aound this we need to add these 2 defines here. + */ +#ifndef ELS_DTAG_LNK_FAULT_CAP +#define ELS_DTAG_LNK_FAULT_CAP 0x0001000D +#endif +#ifndef ELS_DTAG_CG_SIGNAL_CAP +#define ELS_DTAG_CG_SIGNAL_CAP 0x0001000F +#endif + +/* + * Initializer useful for decoding FPIN string table. + */ +#define FC_FPIN_CONGN_SEVERITY_INIT { \ + { FPIN_CONGN_SEVERITY_WARNING, "Warning" }, \ + { FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \ +} + +/* Used for logging FPIN messages */ +#define LPFC_FPIN_WWPN_LINE_SZ 128 +#define LPFC_FPIN_WWPN_LINE_CNT 6 +#define LPFC_FPIN_WWPN_NUM_LINE 6 diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h new file mode 100644 index 000000000..0b1616e93 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -0,0 +1,156 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include + +const struct pci_device_id lpfc_id_table[] = { + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161E, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162E, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164E, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161P, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162P, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164P, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321E, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322E, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324E, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321P, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322P, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324P, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2XX2, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3162, }, + {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3, + PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3322, }, + { 0 } +}; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c new file mode 100644 index 000000000..9e59c0501 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -0,0 +1,15880 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_version.h" +#include "lpfc_ids.h" + +static enum cpuhp_state lpfc_cpuhp_state; +/* Used when mapping IRQ vectors in a driver centric manner */ +static uint32_t lpfc_present_cpu; +static bool lpfc_pldv_detect; + +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); +static void lpfc_cpuhp_remove(struct lpfc_hba *phba); +static void lpfc_cpuhp_add(struct lpfc_hba *phba); +static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); +static int lpfc_post_rcv_buf(struct lpfc_hba *); +static int lpfc_sli4_queue_verify(struct lpfc_hba *); +static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); +static int lpfc_setup_endian_order(struct lpfc_hba *); +static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); +static void lpfc_free_els_sgl_list(struct lpfc_hba *); +static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); +static void lpfc_init_sgl_list(struct lpfc_hba *); +static int lpfc_init_active_sgl_array(struct lpfc_hba *); +static void lpfc_free_active_sgl(struct lpfc_hba *); +static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); +static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); +static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); +static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); +static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); +static void lpfc_sli4_disable_intr(struct lpfc_hba *); +static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); +static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); +static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); +static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); +static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); +static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba); + +static struct scsi_transport_template *lpfc_transport_template = NULL; +static struct scsi_transport_template *lpfc_vport_transport_template = NULL; +static DEFINE_IDR(lpfc_hba_index); +#define LPFC_NVMET_BUF_POST 254 +static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); +static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts); + +/** + * lpfc_config_port_prep - Perform lpfc initialization prior to config port + * @phba: pointer to lpfc hba data structure. + * + * This routine will do LPFC initialization prior to issuing the CONFIG_PORT + * mailbox command. It retrieves the revision information from the HBA and + * collects the Vital Product Data (VPD) about the HBA for preparing the + * configuration of the HBA. + * + * Return codes: + * 0 - success. + * -ERESTART - requests the SLI layer to reset the HBA and try again. + * Any other value - indicates an error. + **/ +int +lpfc_config_port_prep(struct lpfc_hba *phba) +{ + lpfc_vpd_t *vp = &phba->vpd; + int i = 0, rc; + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + char *lpfc_vpd_data = NULL; + uint16_t offset = 0; + static char licensed[56] = + "key unlock for use with gnu public licensed code only\0"; + static int init_key = 1; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + + mb = &pmb->u.mb; + phba->link_state = LPFC_INIT_MBX_CMDS; + + if (lpfc_is_LC_HBA(phba->pcidev->device)) { + if (init_key) { + uint32_t *ptext = (uint32_t *) licensed; + + for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) + *ptext = cpu_to_be32(*ptext); + init_key = 0; + } + + lpfc_read_nv(phba, pmb); + memset((char*)mb->un.varRDnvp.rsvd3, 0, + sizeof (mb->un.varRDnvp.rsvd3)); + memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, + sizeof (licensed)); + + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0324 Config Port initialization " + "error, mbxCmd x%x READ_NVPARM, " + "mbxStatus x%x\n", + mb->mbxCommand, mb->mbxStatus); + mempool_free(pmb, phba->mbox_mem_pool); + return -ERESTART; + } + memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, + sizeof(phba->wwnn)); + memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, + sizeof(phba->wwpn)); + } + + /* + * Clear all option bits except LPFC_SLI3_BG_ENABLED, + * which was already set in lpfc_get_cfgparam() + */ + phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; + + /* Setup and issue mailbox READ REV command */ + lpfc_read_rev(phba, pmb); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0439 Adapter failed to init, mbxCmd x%x " + "READ_REV, mbxStatus x%x\n", + mb->mbxCommand, mb->mbxStatus); + mempool_free( pmb, phba->mbox_mem_pool); + return -ERESTART; + } + + + /* + * The value of rr must be 1 since the driver set the cv field to 1. + * This setting requires the FW to set all revision fields. + */ + if (mb->un.varRdRev.rr == 0) { + vp->rev.rBit = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0440 Adapter failed to init, READ_REV has " + "missing revision information.\n"); + mempool_free(pmb, phba->mbox_mem_pool); + return -ERESTART; + } + + if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { + mempool_free(pmb, phba->mbox_mem_pool); + return -EINVAL; + } + + /* Save information as VPD data */ + vp->rev.rBit = 1; + memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); + vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; + memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); + vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; + memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); + vp->rev.biuRev = mb->un.varRdRev.biuRev; + vp->rev.smRev = mb->un.varRdRev.smRev; + vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; + vp->rev.endecRev = mb->un.varRdRev.endecRev; + vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; + vp->rev.fcphLow = mb->un.varRdRev.fcphLow; + vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; + vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; + vp->rev.postKernRev = mb->un.varRdRev.postKernRev; + vp->rev.opFwRev = mb->un.varRdRev.opFwRev; + + /* If the sli feature level is less then 9, we must + * tear down all RPIs and VPIs on link down if NPIV + * is enabled. + */ + if (vp->rev.feaLevelHigh < 9) + phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; + + if (lpfc_is_LC_HBA(phba->pcidev->device)) + memcpy(phba->RandomData, (char *)&mb->un.varWords[24], + sizeof (phba->RandomData)); + + /* Get adapter VPD information */ + lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); + if (!lpfc_vpd_data) + goto out_free_mbox; + do { + lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0441 VPD not present on adapter, " + "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", + mb->mbxCommand, mb->mbxStatus); + mb->un.varDmp.word_cnt = 0; + } + /* dump mem may return a zero when finished or we got a + * mailbox error, either way we are done. + */ + if (mb->un.varDmp.word_cnt == 0) + break; + + if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) + mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; + lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, + lpfc_vpd_data + offset, + mb->un.varDmp.word_cnt); + offset += mb->un.varDmp.word_cnt; + } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); + + lpfc_parse_vpd(phba, lpfc_vpd_data, offset); + + kfree(lpfc_vpd_data); +out_free_mbox: + mempool_free(pmb, phba->mbox_mem_pool); + return 0; +} + +/** + * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd + * @phba: pointer to lpfc hba data structure. + * @pmboxq: pointer to the driver internal queue element for mailbox command. + * + * This is the completion handler for driver's configuring asynchronous event + * mailbox command to the device. If the mailbox command returns successfully, + * it will set internal async event support flag to 1; otherwise, it will + * set internal async event support flag to 0. + **/ +static void +lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) +{ + if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) + phba->temp_sensor_support = 1; + else + phba->temp_sensor_support = 0; + mempool_free(pmboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler + * @phba: pointer to lpfc hba data structure. + * @pmboxq: pointer to the driver internal queue element for mailbox command. + * + * This is the completion handler for dump mailbox command for getting + * wake up parameters. When this command complete, the response contain + * Option rom version of the HBA. This function translate the version number + * into a human readable string and store it in OptionROMVersion. + **/ +static void +lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) +{ + struct prog_id *prg; + uint32_t prog_id_word; + char dist = ' '; + /* character array used for decoding dist type. */ + char dist_char[] = "nabx"; + + if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + + prg = (struct prog_id *) &prog_id_word; + + /* word 7 contain option rom version */ + prog_id_word = pmboxq->u.mb.un.varWords[7]; + + /* Decode the Option rom version word to a readable string */ + dist = dist_char[prg->dist]; + + if ((prg->dist == 3) && (prg->num == 0)) + snprintf(phba->OptionROMVersion, 32, "%d.%d%d", + prg->ver, prg->rev, prg->lev); + else + snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", + prg->ver, prg->rev, prg->lev, + dist, prg->num); + mempool_free(pmboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, + * @vport: pointer to lpfc vport data structure. + * + * + * Return codes + * None. + **/ +void +lpfc_update_vport_wwn(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + + /* + * If the name is empty or there exists a soft name + * then copy the service params name, otherwise use the fc name + */ + if (vport->fc_nodename.u.wwn[0] == 0) + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + else + memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, + sizeof(struct lpfc_name)); + + /* + * If the port name has changed, then set the Param changes flag + * to unreg the login + */ + if (vport->fc_portname.u.wwn[0] != 0 && + memcmp(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof(struct lpfc_name))) { + vport->vport_flag |= FAWWPN_PARAM_CHG; + + if (phba->sli_rev == LPFC_SLI_REV4 && + vport->port_type == LPFC_PHYSICAL_PORT && + phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) { + if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)) + phba->sli4_hba.fawwpn_flag &= + ~LPFC_FAWWPN_FABRIC; + lpfc_printf_log(phba, KERN_INFO, + LOG_SLI | LOG_DISCOVERY | LOG_ELS, + "2701 FA-PWWN change WWPN from %llx to " + "%llx: vflag x%x fawwpn_flag x%x\n", + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64 + (vport->fc_sparam.portName.u.wwn), + vport->vport_flag, + phba->sli4_hba.fawwpn_flag); + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + } + } + + if (vport->fc_portname.u.wwn[0] == 0) + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + else + memcpy(&vport->fc_sparam.portName, &vport->fc_portname, + sizeof(struct lpfc_name)); +} + +/** + * lpfc_config_port_post - Perform lpfc initialization after config port + * @phba: pointer to lpfc hba data structure. + * + * This routine will do LPFC initialization after the CONFIG_PORT mailbox + * command call. It performs all internal resource and state setups on the + * port: post IOCB buffers, enable appropriate host interrupt attentions, + * ELS ring timers, etc. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +int +lpfc_config_port_post(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + struct lpfc_dmabuf *mp; + struct lpfc_sli *psli = &phba->sli; + uint32_t status, timeout; + int i, j; + int rc; + + spin_lock_irq(&phba->hbalock); + /* + * If the Config port completed correctly the HBA is not + * over heated any more. + */ + if (phba->over_temp_state == HBA_OVER_TEMP) + phba->over_temp_state = HBA_NORMAL_TEMP; + spin_unlock_irq(&phba->hbalock); + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + mb = &pmb->u.mb; + + /* Get login parameters for NID. */ + rc = lpfc_read_sparam(phba, pmb, 0); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + + pmb->vport = vport; + if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0448 Adapter failed init, mbxCmd x%x " + "READ_SPARM mbxStatus x%x\n", + mb->mbxCommand, mb->mbxStatus); + phba->link_state = LPFC_HBA_ERROR; + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + return -EIO; + } + + mp = (struct lpfc_dmabuf *)pmb->ctx_buf; + + /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no + * longer needed. Prevent unintended ctx_buf access as the mbox is + * reused. + */ + memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + pmb->ctx_buf = NULL; + lpfc_update_vport_wwn(vport); + + /* Update the fc_host data structures with new wwn. */ + fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + fc_host_max_npiv_vports(shost) = phba->max_vpi; + + /* If no serial number in VPD data, use low 6 bytes of WWNN */ + /* This should be consolidated into parse_vpd ? - mr */ + if (phba->SerialNumber[0] == 0) { + uint8_t *outptr; + + outptr = &vport->fc_nodename.u.s.IEEE[0]; + for (i = 0; i < 12; i++) { + status = *outptr++; + j = ((status & 0xf0) >> 4); + if (j <= 9) + phba->SerialNumber[i] = + (char)((uint8_t) 0x30 + (uint8_t) j); + else + phba->SerialNumber[i] = + (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); + i++; + j = (status & 0xf); + if (j <= 9) + phba->SerialNumber[i] = + (char)((uint8_t) 0x30 + (uint8_t) j); + else + phba->SerialNumber[i] = + (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); + } + } + + lpfc_read_config(phba, pmb); + pmb->vport = vport; + if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0453 Adapter failed to init, mbxCmd x%x " + "READ_CONFIG, mbxStatus x%x\n", + mb->mbxCommand, mb->mbxStatus); + phba->link_state = LPFC_HBA_ERROR; + mempool_free( pmb, phba->mbox_mem_pool); + return -EIO; + } + + /* Check if the port is disabled */ + lpfc_sli_read_link_ste(phba); + + /* Reset the DFT_HBA_Q_DEPTH to the max xri */ + if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3359 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, + mb->un.varRdConfig.max_xri); + phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; + } + + phba->lmt = mb->un.varRdConfig.lmt; + + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + + phba->link_state = LPFC_LINK_DOWN; + + /* Only process IOCBs on ELS ring till hba_state is READY */ + if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) + psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; + if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) + psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; + + /* Post receive buffers for desired rings */ + if (phba->sli_rev != 3) + lpfc_post_rcv_buf(phba); + + /* + * Configure HBA MSI-X attention conditions to messages if MSI-X mode + */ + if (phba->intr_type == MSIX) { + rc = lpfc_config_msi(phba, pmb); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0352 Config MSI mailbox command " + "failed, mbxCmd x%x, mbxStatus x%x\n", + pmb->u.mb.mbxCommand, + pmb->u.mb.mbxStatus); + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } + } + + spin_lock_irq(&phba->hbalock); + /* Initialize ERATT handling flag */ + phba->hba_flag &= ~HBA_ERATT_HANDLED; + + /* Enable appropriate host interrupts */ + if (lpfc_readl(phba->HCregaddr, &status)) { + spin_unlock_irq(&phba->hbalock); + return -EIO; + } + status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; + if (psli->num_rings > 0) + status |= HC_R0INT_ENA; + if (psli->num_rings > 1) + status |= HC_R1INT_ENA; + if (psli->num_rings > 2) + status |= HC_R2INT_ENA; + if (psli->num_rings > 3) + status |= HC_R3INT_ENA; + + if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && + (phba->cfg_poll & DISABLE_FCP_RING_INT)) + status &= ~(HC_R0INT_ENA); + + writel(status, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + spin_unlock_irq(&phba->hbalock); + + /* Set up ring-0 (ELS) timer */ + timeout = phba->fc_ratov * 2; + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); + /* Set up heart beat (HB) timer */ + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + phba->last_completion_time = jiffies; + /* Set up error attention (ERATT) polling timer */ + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + + if (phba->hba_flag & LINK_DISABLED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2598 Adapter Link is disabled.\n"); + lpfc_down_link(phba, pmb); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2599 Adapter failed to issue DOWN_LINK" + " mbox command rc 0x%x\n", rc); + + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } + } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { + mempool_free(pmb, phba->mbox_mem_pool); + rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + if (rc) + return rc; + } + /* MBOX buffer will be freed in mbox compl */ + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + + lpfc_config_async(phba, pmb, LPFC_ELS_RING); + pmb->mbox_cmpl = lpfc_config_async_cmpl; + pmb->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + + if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0456 Adapter failed to issue " + "ASYNCEVT_ENABLE mbox status x%x\n", + rc); + mempool_free(pmb, phba->mbox_mem_pool); + } + + /* Get Option rom version */ + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + + lpfc_dump_wakeup_param(phba, pmb); + pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; + pmb->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + + if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0435 Adapter failed " + "to get Option ROM version status x%x\n", rc); + mempool_free(pmb, phba->mbox_mem_pool); + } + + return 0; +} + +/** + * lpfc_sli4_refresh_params - update driver copy of params. + * @phba: Pointer to HBA context object. + * + * This is called to refresh driver copy of dynamic fields from the + * common_get_sli4_parameters descriptor. + **/ +int +lpfc_sli4_refresh_params(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mqe *mqe; + struct lpfc_sli4_parameters *mbx_sli4_parameters; + int length, rc; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + mqe = &mboxq->u.mqe; + /* Read the port's SLI4 Config Parameters */ + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, + length, LPFC_SLI4_MBX_EMBED); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; + } + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + phba->sli4_hba.pc_sli4_params.mi_cap = + bf_get(cfg_mi_ver, mbx_sli4_parameters); + + /* Are we forcing MI off via module parameter? */ + if (phba->cfg_enable_mi) + phba->sli4_hba.pc_sli4_params.mi_ver = + bf_get(cfg_mi_ver, mbx_sli4_parameters); + else + phba->sli4_hba.pc_sli4_params.mi_ver = 0; + + phba->sli4_hba.pc_sli4_params.cmf = + bf_get(cfg_cmf, mbx_sli4_parameters); + phba->sli4_hba.pc_sli4_params.pls = + bf_get(cfg_pvl, mbx_sli4_parameters); + + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_hba_init_link - Initialize the FC link + * @phba: pointer to lpfc hba data structure. + * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT + * + * This routine will issue the INIT_LINK mailbox command call. + * It is available to other drivers through the lpfc_hba data + * structure for use as a delayed link up mechanism with the + * module parameter lpfc_suppress_link_up. + * + * Return code + * 0 - success + * Any other value - error + **/ +static int +lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) +{ + return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); +} + +/** + * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology + * @phba: pointer to lpfc hba data structure. + * @fc_topology: desired fc topology. + * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT + * + * This routine will issue the INIT_LINK mailbox command call. + * It is available to other drivers through the lpfc_hba data + * structure for use as a delayed link up mechanism with the + * module parameter lpfc_suppress_link_up. + * + * Return code + * 0 - success + * Any other value - error + **/ +int +lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, + uint32_t flag) +{ + struct lpfc_vport *vport = phba->pport; + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + int rc; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + mb = &pmb->u.mb; + pmb->vport = vport; + + if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && + !(phba->lmt & LMT_1Gb)) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && + !(phba->lmt & LMT_2Gb)) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && + !(phba->lmt & LMT_4Gb)) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && + !(phba->lmt & LMT_8Gb)) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && + !(phba->lmt & LMT_10Gb)) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && + !(phba->lmt & LMT_16Gb)) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && + !(phba->lmt & LMT_32Gb)) || + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && + !(phba->lmt & LMT_64Gb))) { + /* Reset link speed to auto */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1302 Invalid speed for this board:%d " + "Reset link speed to auto.\n", + phba->cfg_link_speed); + phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; + } + lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_set_loopback_flag(phba); + rc = lpfc_sli_issue_mbox(phba, pmb, flag); + if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0498 Adapter failed to init, mbxCmd x%x " + "INIT_LINK, mbxStatus x%x\n", + mb->mbxCommand, mb->mbxStatus); + if (phba->sli_rev <= LPFC_SLI_REV3) { + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + /* Clear all pending interrupts */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + } + phba->link_state = LPFC_HBA_ERROR; + if (rc != MBX_BUSY || flag == MBX_POLL) + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } + phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; + if (flag == MBX_POLL) + mempool_free(pmb, phba->mbox_mem_pool); + + return 0; +} + +/** + * lpfc_hba_down_link - this routine downs the FC link + * @phba: pointer to lpfc hba data structure. + * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT + * + * This routine will issue the DOWN_LINK mailbox command call. + * It is available to other drivers through the lpfc_hba data + * structure for use to stop the link. + * + * Return code + * 0 - success + * Any other value - error + **/ +static int +lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) +{ + LPFC_MBOXQ_t *pmb; + int rc; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0491 Adapter Link is disabled.\n"); + lpfc_down_link(phba, pmb); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, pmb, flag); + if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2522 Adapter failed to issue DOWN_LINK" + " mbox command rc 0x%x\n", rc); + + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } + if (flag == MBX_POLL) + mempool_free(pmb, phba->mbox_mem_pool); + + return 0; +} + +/** + * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do LPFC uninitialization before the HBA is reset when + * bringing down the SLI Layer. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +int +lpfc_hba_down_prep(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + if (phba->sli_rev <= LPFC_SLI_REV3) { + /* Disable interrupts */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + + if (phba->pport->load_flag & FC_UNLOADING) + lpfc_cleanup_discovery_resources(phba->pport); + else { + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && + vports[i] != NULL; i++) + lpfc_cleanup_discovery_resources(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); + } + return 0; +} + +/** + * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free + * rspiocb which got deferred + * + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup completed slow path events after HBA is reset + * when bringing down the SLI Layer. + * + * + * Return codes + * void. + **/ +static void +lpfc_sli4_free_sp_events(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *rspiocbq; + struct hbq_dmabuf *dmabuf; + struct lpfc_cq_event *cq_event; + + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_SP_QUEUE_EVT; + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&phba->sli4_hba.sp_queue_event)) { + /* Get the response iocb from the head of work queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_queue_event, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + + switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { + case CQE_CODE_COMPL_WQE: + rspiocbq = container_of(cq_event, struct lpfc_iocbq, + cq_event); + lpfc_sli_release_iocbq(phba, rspiocbq); + break; + case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: + dmabuf = container_of(cq_event, struct hbq_dmabuf, + cq_event); + lpfc_in_buf_free(phba, &dmabuf->dbuf); + } + } +} + +/** + * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup posted ELS buffers after the HBA is reset + * when bringing down the SLI Layer. + * + * + * Return codes + * void. + **/ +static void +lpfc_hba_free_post_buf(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + struct lpfc_dmabuf *mp, *next_mp; + LIST_HEAD(buflist); + int count; + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) + lpfc_sli_hbqbuf_free_all(phba); + else { + /* Cleanup preposted buffers on the ELS ring */ + pring = &psli->sli3_ring[LPFC_ELS_RING]; + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->postbufq, &buflist); + spin_unlock_irq(&phba->hbalock); + + count = 0; + list_for_each_entry_safe(mp, next_mp, &buflist, list) { + list_del(&mp->list); + count++; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + + spin_lock_irq(&phba->hbalock); + pring->postbufq_cnt -= count; + spin_unlock_irq(&phba->hbalock); + } +} + +/** + * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup the txcmplq after the HBA is reset when bringing + * down the SLI Layer. + * + * Return codes + * void + **/ +static void +lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; + struct lpfc_sli_ring *pring; + LIST_HEAD(completions); + int i; + struct lpfc_iocbq *piocb, *next_iocb; + + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + spin_lock_irq(&phba->hbalock); + /* At this point in time the HBA is either reset or DOA + * Nothing should be on txcmplq as it will + * NEVER complete. + */ + list_splice_init(&pring->txcmplq, &completions); + pring->txcmplq_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + lpfc_sli_abort_iocb_ring(phba, pring); + } + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); + return; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock_irq(&pring->ring_lock); + list_for_each_entry_safe(piocb, next_iocb, + &pring->txcmplq, list) + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + list_splice_init(&pring->txcmplq, &completions); + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); + lpfc_sli_abort_iocb_ring(phba, pring); + } + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); +} + +/** + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) +{ + lpfc_hba_free_post_buf(phba); + lpfc_hba_clean_txcmplq(phba); + return 0; +} + +/** + * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s4(struct lpfc_hba *phba) +{ + struct lpfc_io_buf *psb, *psb_next; + struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; + struct lpfc_sli4_hdw_queue *qp; + LIST_HEAD(aborts); + LIST_HEAD(nvme_aborts); + LIST_HEAD(nvmet_aborts); + struct lpfc_sglq *sglq_entry = NULL; + int cnt, idx; + + + lpfc_sli_hbqbuf_free_all(phba); + lpfc_hba_clean_txcmplq(phba); + + /* At this point in time the HBA is either reset or DOA. Either + * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be + * on the lpfc_els_sgl_list so that it can either be freed if the + * driver is unloading or reposted if the driver is restarting + * the port. + */ + + /* sgl_list_lock required because worker thread uses this + * list. + */ + spin_lock_irq(&phba->sli4_hba.sgl_list_lock); + list_for_each_entry(sglq_entry, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) + sglq_entry->state = SGL_FREED; + + list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, + &phba->sli4_hba.lpfc_els_sgl_list); + + + spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); + + /* abts_xxxx_buf_list_lock required because worker thread uses this + * list. + */ + spin_lock_irq(&phba->hbalock); + cnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + + spin_lock(&qp->abts_io_buf_list_lock); + list_splice_init(&qp->lpfc_abts_io_buf_list, + &aborts); + + list_for_each_entry_safe(psb, psb_next, &aborts, list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + cnt++; + } + spin_lock(&qp->io_buf_list_put_lock); + list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); + qp->put_io_bufs += qp->abts_scsi_io_bufs; + qp->put_io_bufs += qp->abts_nvme_io_bufs; + qp->abts_scsi_io_bufs = 0; + qp->abts_nvme_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->abts_io_buf_list_lock); + } + spin_unlock_irq(&phba->hbalock); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + &nvmet_aborts); + spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { + ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + } + } + + lpfc_sli4_free_sp_events(phba); + return cnt; +} + +/** + * lpfc_hba_down_post - Wrapper func for hba down post routine + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 routine for performing + * uninitialization after the HBA is reset when bring down the SLI Layer. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +int +lpfc_hba_down_post(struct lpfc_hba *phba) +{ + return (*phba->lpfc_hba_down_post)(phba); +} + +/** + * lpfc_hb_timeout - The HBA-timer timeout handler + * @t: timer context used to obtain the pointer to lpfc hba data structure. + * + * This is the HBA-timer timeout handler registered to the lpfc driver. When + * this timer fires, a HBA timeout event shall be posted to the lpfc driver + * work-port-events bitmap and the worker thread is notified. This timeout + * event will be used by the worker thread to invoke the actual timeout + * handler routine, lpfc_hb_timeout_handler. Any periodical operations will + * be performed in the timeout handler and the HBA timeout event bit shall + * be cleared by the worker thread after it has taken the event bitmap out. + **/ +static void +lpfc_hb_timeout(struct timer_list *t) +{ + struct lpfc_hba *phba; + uint32_t tmo_posted; + unsigned long iflag; + + phba = from_timer(phba, t, hb_tmofunc); + + /* Check for heart beat timeout conditions */ + spin_lock_irqsave(&phba->pport->work_port_lock, iflag); + tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; + if (!tmo_posted) + phba->pport->work_port_events |= WORKER_HB_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + + /* Tell the worker thread there is work to do */ + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_rrq_timeout - The RRQ-timer timeout handler + * @t: timer context used to obtain the pointer to lpfc hba data structure. + * + * This is the RRQ-timer timeout handler registered to the lpfc driver. When + * this timer fires, a RRQ timeout event shall be posted to the lpfc driver + * work-port-events bitmap and the worker thread is notified. This timeout + * event will be used by the worker thread to invoke the actual timeout + * handler routine, lpfc_rrq_handler. Any periodical operations will + * be performed in the timeout handler and the RRQ timeout event bit shall + * be cleared by the worker thread after it has taken the event bitmap out. + **/ +static void +lpfc_rrq_timeout(struct timer_list *t) +{ + struct lpfc_hba *phba; + unsigned long iflag; + + phba = from_timer(phba, t, rrq_tmr); + spin_lock_irqsave(&phba->pport->work_port_lock, iflag); + if (!(phba->pport->load_flag & FC_UNLOADING)) + phba->hba_flag |= HBA_RRQ_ACTIVE; + else + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function + * @phba: pointer to lpfc hba data structure. + * @pmboxq: pointer to the driver internal queue element for mailbox command. + * + * This is the callback function to the lpfc heart-beat mailbox command. + * If configured, the lpfc driver issues the heart-beat mailbox command to + * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the + * heart-beat mailbox command is issued, the driver shall set up heart-beat + * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks + * heart-beat outstanding state. Once the mailbox command comes back and + * no error conditions detected, the heart-beat mailbox command timer is + * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding + * state is cleared for the next heart-beat. If the timer expired with the + * heart-beat outstanding state set, the driver will put the HBA offline. + **/ +static void +lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) +{ + unsigned long drvr_flag; + + spin_lock_irqsave(&phba->hbalock, drvr_flag); + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + + /* Check and reset heart-beat timer if necessary */ + mempool_free(pmboxq, phba->mbox_mem_pool); + if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && + !(phba->link_state == LPFC_HBA_ERROR) && + !(phba->pport->load_flag & FC_UNLOADING)) + mod_timer(&phba->hb_tmofunc, + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + return; +} + +/* + * lpfc_idle_stat_delay_work - idle_stat tracking + * + * This routine tracks per-eq idle_stat and determines polling decisions. + * + * Return codes: + * None + **/ +static void +lpfc_idle_stat_delay_work(struct work_struct *work) +{ + struct lpfc_hba *phba = container_of(to_delayed_work(work), + struct lpfc_hba, + idle_stat_delay_work); + struct lpfc_queue *eq; + struct lpfc_sli4_hdw_queue *hdwq; + struct lpfc_idle_stat *idle_stat; + u32 i, idle_percent; + u64 wall, wall_idle, diff_wall, diff_idle, busy_time; + + if (phba->pport->load_flag & FC_UNLOADING) + return; + + if (phba->link_state == LPFC_HBA_ERROR || + phba->pport->fc_flag & FC_OFFLINE_MODE || + phba->cmf_active_mode != LPFC_CFG_OFF) + goto requeue; + + for_each_present_cpu(i) { + hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; + eq = hdwq->hba_eq; + + /* Skip if we've already handled this eq's primary CPU */ + if (eq->chann != i) + continue; + + idle_stat = &phba->sli4_hba.idle_stat[i]; + + /* get_cpu_idle_time returns values as running counters. Thus, + * to know the amount for this period, the prior counter values + * need to be subtracted from the current counter values. + * From there, the idle time stat can be calculated as a + * percentage of 100 - the sum of the other consumption times. + */ + wall_idle = get_cpu_idle_time(i, &wall, 1); + diff_idle = wall_idle - idle_stat->prev_idle; + diff_wall = wall - idle_stat->prev_wall; + + if (diff_wall <= diff_idle) + busy_time = 0; + else + busy_time = diff_wall - diff_idle; + + idle_percent = div64_u64(100 * busy_time, diff_wall); + idle_percent = 100 - idle_percent; + + if (idle_percent < 15) + eq->poll_mode = LPFC_QUEUE_WORK; + else + eq->poll_mode = LPFC_THREADED_IRQ; + + idle_stat->prev_idle = wall_idle; + idle_stat->prev_wall = wall; + } + +requeue: + schedule_delayed_work(&phba->idle_stat_delay_work, + msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); +} + +static void +lpfc_hb_eq_delay_work(struct work_struct *work) +{ + struct lpfc_hba *phba = container_of(to_delayed_work(work), + struct lpfc_hba, eq_delay_work); + struct lpfc_eq_intr_info *eqi, *eqi_new; + struct lpfc_queue *eq, *eq_next; + unsigned char *ena_delay = NULL; + uint32_t usdelay; + int i; + + if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) + return; + + if (phba->link_state == LPFC_HBA_ERROR || + phba->pport->fc_flag & FC_OFFLINE_MODE) + goto requeue; + + ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), + GFP_KERNEL); + if (!ena_delay) + goto requeue; + + for (i = 0; i < phba->cfg_irq_chann; i++) { + /* Get the EQ corresponding to the IRQ vector */ + eq = phba->sli4_hba.hba_eq_hdl[i].eq; + if (!eq) + continue; + if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { + eq->q_flag &= ~HBA_EQ_DELAY_CHK; + ena_delay[eq->last_cpu] = 1; + } + } + + for_each_present_cpu(i) { + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); + if (ena_delay[i]) { + usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; + if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) + usdelay = LPFC_MAX_AUTO_EQ_DELAY; + } else { + usdelay = 0; + } + + eqi->icnt = 0; + + list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { + if (unlikely(eq->last_cpu != i)) { + eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, + eq->last_cpu); + list_move_tail(&eq->cpu_list, &eqi_new->list); + continue; + } + if (usdelay != eq->q_mode) + lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, + usdelay); + } + } + + kfree(ena_delay); + +requeue: + queue_delayed_work(phba->wq, &phba->eq_delay_work, + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); +} + +/** + * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution + * @phba: pointer to lpfc hba data structure. + * + * For each heartbeat, this routine does some heuristic methods to adjust + * XRI distribution. The goal is to fully utilize free XRIs. + **/ +static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) +{ + u32 i; + u32 hwq_count; + + hwq_count = phba->cfg_hdw_queue; + for (i = 0; i < hwq_count; i++) { + /* Adjust XRIs in private pool */ + lpfc_adjust_pvt_pool_count(phba, i); + + /* Adjust high watermark */ + lpfc_adjust_high_watermark(phba, i); + +#ifdef LPFC_MXP_STAT + /* Snapshot pbl, pvt and busy count */ + lpfc_snapshot_mxp(phba, i); +#endif + } +} + +/** + * lpfc_issue_hb_mbox - Issues heart-beat mailbox command + * @phba: pointer to lpfc hba data structure. + * + * If a HB mbox is not already in progrees, this routine will allocate + * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, + * and issue it. The HBA_HBEAT_INP flag means the command is in progress. + **/ +int +lpfc_issue_hb_mbox(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmboxq; + int retval; + + /* Is a Heartbeat mbox already in progress */ + if (phba->hba_flag & HBA_HBEAT_INP) + return 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return -ENOMEM; + + lpfc_heart_beat(phba, pmboxq); + pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; + pmboxq->vport = phba->pport; + retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + + if (retval != MBX_BUSY && retval != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return -ENXIO; + } + phba->hba_flag |= HBA_HBEAT_INP; + + return 0; +} + +/** + * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command + * @phba: pointer to lpfc hba data structure. + * + * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO + * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless + * of the value of lpfc_enable_hba_heartbeat. + * If lpfc_enable_hba_heartbeat is set, the timeout routine will always + * try to issue a MBX_HEARTBEAT mbox command. + **/ +void +lpfc_issue_hb_tmo(struct lpfc_hba *phba) +{ + if (phba->cfg_enable_hba_heartbeat) + return; + phba->hba_flag |= HBA_HBEAT_TMO; +} + +/** + * lpfc_hb_timeout_handler - The HBA-timer timeout handler + * @phba: pointer to lpfc hba data structure. + * + * This is the actual HBA-timer timeout handler to be invoked by the worker + * thread whenever the HBA timer fired and HBA-timeout event posted. This + * handler performs any periodic operations needed for the device. If such + * periodic event has already been attended to either in the interrupt handler + * or by processing slow-ring or fast-ring events within the HBA-timer + * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets + * the timer for the next timeout period. If lpfc heart-beat mailbox command + * is configured and there is no heart-beat mailbox command outstanding, a + * heart-beat mailbox is issued and timer set properly. Otherwise, if there + * has been a heart-beat mailbox command outstanding, the HBA shall be put + * to offline. + **/ +void +lpfc_hb_timeout_handler(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct lpfc_dmabuf *buf_ptr; + int retval = 0; + int i, tmo; + struct lpfc_sli *psli = &phba->sli; + LIST_HEAD(completions); + + if (phba->cfg_xri_rebalancing) { + /* Multi-XRI pools handler */ + lpfc_hb_mxp_handler(phba); + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + lpfc_rcv_seq_check_edtov(vports[i]); + lpfc_fdmi_change_check(vports[i]); + } + lpfc_destroy_vport_work_array(phba, vports); + + if ((phba->link_state == LPFC_HBA_ERROR) || + (phba->pport->load_flag & FC_UNLOADING) || + (phba->pport->fc_flag & FC_OFFLINE_MODE)) + return; + + if (phba->elsbuf_cnt && + (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->elsbuf, &completions); + phba->elsbuf_cnt = 0; + phba->elsbuf_prev_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&completions)) { + list_remove_head(&completions, buf_ptr, + struct lpfc_dmabuf, list); + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + } + } + phba->elsbuf_prev_cnt = phba->elsbuf_cnt; + + /* If there is no heart beat outstanding, issue a heartbeat command */ + if (phba->cfg_enable_hba_heartbeat) { + /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ + spin_lock_irq(&phba->pport->work_port_lock); + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { + spin_unlock_irq(&phba->pport->work_port_lock); + if (phba->hba_flag & HBA_HBEAT_INP) + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + else + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); + goto out; + } + spin_unlock_irq(&phba->pport->work_port_lock); + + /* Check if a MBX_HEARTBEAT is already in progress */ + if (phba->hba_flag & HBA_HBEAT_INP) { + /* + * If heart beat timeout called with HBA_HBEAT_INP set + * we need to give the hb mailbox cmd a chance to + * complete or TMO. + */ + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0459 Adapter heartbeat still outstanding: " + "last compl time was %d ms.\n", + jiffies_to_msecs(jiffies + - phba->last_completion_time)); + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + } else { + if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && + (list_empty(&psli->mboxq))) { + + retval = lpfc_issue_hb_mbox(phba); + if (retval) { + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); + goto out; + } + phba->skipped_hb = 0; + } else if (time_before_eq(phba->last_completion_time, + phba->skipped_hb)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2857 Last completion time not " + " updated in %d ms\n", + jiffies_to_msecs(jiffies + - phba->last_completion_time)); + } else + phba->skipped_hb = jiffies; + + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + goto out; + } + } else { + /* Check to see if we want to force a MBX_HEARTBEAT */ + if (phba->hba_flag & HBA_HBEAT_TMO) { + retval = lpfc_issue_hb_mbox(phba); + if (retval) + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); + else + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + goto out; + } + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); + } +out: + mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); +} + +/** + * lpfc_offline_eratt - Bring lpfc offline on hardware error attention + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to bring the HBA offline when HBA hardware error + * other than Port Error 6 has been detected. + **/ +static void +lpfc_offline_eratt(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); + + lpfc_offline(phba); + lpfc_reset_barrier(phba); + spin_lock_irq(&phba->hbalock); + lpfc_sli_brdreset(phba); + spin_unlock_irq(&phba->hbalock); + lpfc_hba_down_post(phba); + lpfc_sli_brdready(phba, HS_MBRDY); + lpfc_unblock_mgmt_io(phba); + phba->link_state = LPFC_HBA_ERROR; + return; +} + +/** + * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to bring a SLI4 HBA offline when HBA hardware error + * other than Port Error 6 has been detected. + **/ +void +lpfc_sli4_offline_eratt(struct lpfc_hba *phba) +{ + spin_lock_irq(&phba->hbalock); + if (phba->link_state == LPFC_HBA_ERROR && + test_bit(HBA_PCI_ERR, &phba->bit_flags)) { + spin_unlock_irq(&phba->hbalock); + return; + } + phba->link_state = LPFC_HBA_ERROR; + spin_unlock_irq(&phba->hbalock); + + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); + lpfc_sli_flush_io_rings(phba); + lpfc_offline(phba); + lpfc_hba_down_post(phba); + lpfc_unblock_mgmt_io(phba); +} + +/** + * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to handle the deferred HBA hardware error + * conditions. This type of error is indicated by HBA by setting ER1 + * and another ER bit in the host status register. The driver will + * wait until the ER1 bit clears before handling the error condition. + **/ +static void +lpfc_handle_deferred_eratt(struct lpfc_hba *phba) +{ + uint32_t old_host_status = phba->work_hs; + struct lpfc_sli *psli = &phba->sli; + + /* If the pci channel is offline, ignore possible errors, + * since we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); + return; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0479 Deferred Adapter Hardware Error " + "Data: x%x x%x x%x\n", + phba->work_hs, phba->work_status[0], + phba->work_status[1]); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + + /* + * Firmware stops when it triggred erratt. That could cause the I/Os + * dropped by the firmware. Error iocb (I/O) on txcmplq and let the + * SCSI layer retry it after re-establishing link. + */ + lpfc_sli_abort_fcp_rings(phba); + + /* + * There was a firmware error. Take the hba offline and then + * attempt to restart it. + */ + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + lpfc_offline(phba); + + /* Wait for the ER1 bit to clear.*/ + while (phba->work_hs & HS_FFER1) { + msleep(100); + if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { + phba->work_hs = UNPLUG_ERR ; + break; + } + /* If driver is unloading let the worker thread continue */ + if (phba->pport->load_flag & FC_UNLOADING) { + phba->work_hs = 0; + break; + } + } + + /* + * This is to ptrotect against a race condition in which + * first write to the host attention register clear the + * host status register. + */ + if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) + phba->work_hs = old_host_status & ~HS_FFER1; + + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); + phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); + phba->work_status[1] = readl(phba->MBslimaddr + 0xac); +} + +static void +lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) +{ + struct lpfc_board_event_header board_event; + struct Scsi_Host *shost; + + board_event.event_type = FC_REG_BOARD_EVENT; + board_event.subcategory = LPFC_EVENT_PORTINTERR; + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(board_event), + (char *) &board_event, + LPFC_NL_VENDOR_ID); +} + +/** + * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to handle the following HBA hardware error + * conditions: + * 1 - HBA error attention interrupt + * 2 - DMA ring index out of range + * 3 - Mailbox command came back as unknown + **/ +static void +lpfc_handle_eratt_s3(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct lpfc_sli *psli = &phba->sli; + uint32_t event_data; + unsigned long temperature; + struct temp_event temp_event_data; + struct Scsi_Host *shost; + + /* If the pci channel is offline, ignore possible errors, + * since we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); + return; + } + + /* If resets are disabled then leave the HBA alone and return */ + if (!phba->cfg_enable_hba_reset) + return; + + /* Send an internal error event to mgmt application */ + lpfc_board_errevt_to_mgmt(phba); + + if (phba->hba_flag & DEFER_ERATT) + lpfc_handle_deferred_eratt(phba); + + if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { + if (phba->work_hs & HS_FFER6) + /* Re-establishing Link */ + lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, + "1301 Re-establishing Link " + "Data: x%x x%x x%x\n", + phba->work_hs, phba->work_status[0], + phba->work_status[1]); + if (phba->work_hs & HS_FFER8) + /* Device Zeroization */ + lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, + "2861 Host Authentication device " + "zeroization Data:x%x x%x x%x\n", + phba->work_hs, phba->work_status[0], + phba->work_status[1]); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + /* + * Firmware stops when it triggled erratt with HS_FFER6. + * That could cause the I/Os dropped by the firmware. + * Error iocb (I/O) on txcmplq and let the SCSI layer + * retry it after re-establishing link. + */ + lpfc_sli_abort_fcp_rings(phba); + + /* + * There was a firmware error. Take the hba offline and then + * attempt to restart it. + */ + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + if (lpfc_online(phba) == 0) { /* Initialize the HBA */ + lpfc_unblock_mgmt_io(phba); + return; + } + lpfc_unblock_mgmt_io(phba); + } else if (phba->work_hs & HS_CRIT_TEMP) { + temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_CRIT_TEMP; + temp_event_data.data = (uint32_t)temperature; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0406 Adapter maximum temperature exceeded " + "(%ld), taking this port offline " + "Data: x%x x%x x%x\n", + temperature, phba->work_hs, + phba->work_status[0], phba->work_status[1]); + + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *) &temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + + spin_lock_irq(&phba->hbalock); + phba->over_temp_state = HBA_OVER_TEMP; + spin_unlock_irq(&phba->hbalock); + lpfc_offline_eratt(phba); + + } else { + /* The if clause above forces this code path when the status + * failure is a value other than FFER6. Do not call the offline + * twice. This is the adapter hardware error path. + */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0457 Adapter Hardware Error " + "Data: x%x x%x x%x\n", + phba->work_hs, + phba->work_status[0], phba->work_status[1]); + + event_data = FC_REG_DUMP_EVENT; + shost = lpfc_shost_from_vport(vport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(event_data), (char *) &event_data, + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + + lpfc_offline_eratt(phba); + } + return; +} + +/** + * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg + * @phba: pointer to lpfc hba data structure. + * @mbx_action: flag for mailbox shutdown action. + * @en_rn_msg: send reset/port recovery message. + * This routine is invoked to perform an SLI4 port PCI function reset in + * response to port status register polling attention. It waits for port + * status register (ERR, RDY, RN) bits before proceeding with function reset. + * During this process, interrupt vectors are freed and later requested + * for handling possible port resource change. + **/ +static int +lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, + bool en_rn_msg) +{ + int rc; + uint32_t intr_mode; + LPFC_MBOXQ_t *mboxq; + + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2) { + /* + * On error status condition, driver need to wait for port + * ready before performing reset. + */ + rc = lpfc_sli4_pdev_status_reg_wait(phba); + if (rc) + return rc; + } + + /* need reset: attempt for port recovery */ + if (en_rn_msg) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2887 Reset Needed: Attempting Port " + "Recovery...\n"); + + /* If we are no wait, the HBA has been reset and is not + * functional, thus we should clear + * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. + */ + if (mbx_action == LPFC_MBX_NO_WAIT) { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + } + spin_unlock_irq(&phba->hbalock); + } + + lpfc_offline_prep(phba, mbx_action); + lpfc_sli_flush_io_rings(phba); + lpfc_offline(phba); + /* release interrupt for possible resource change */ + lpfc_sli4_disable_intr(phba); + rc = lpfc_sli_brdrestart(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6309 Failed to restart board\n"); + return rc; + } + /* request and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3175 Failed to enable interrupt\n"); + return -EIO; + } + phba->intr_mode = intr_mode; + rc = lpfc_online(phba); + if (rc == 0) + lpfc_unblock_mgmt_io(phba); + + return rc; +} + +/** + * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to handle the SLI4 HBA hardware error attention + * conditions. + **/ +static void +lpfc_handle_eratt_s4(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + uint32_t event_data; + struct Scsi_Host *shost; + uint32_t if_type; + struct lpfc_register portstat_reg = {0}; + uint32_t reg_err1, reg_err2; + uint32_t uerrlo_reg, uemasklo_reg; + uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; + bool en_rn_msg = true; + struct temp_event temp_event_data; + struct lpfc_register portsmphr_reg; + int rc, i; + + /* If the pci channel is offline, ignore possible errors, since + * we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3166 pci channel is offline\n"); + lpfc_sli_flush_io_rings(phba); + return; + } + + memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + pci_rd_rc1 = lpfc_readl( + phba->sli4_hba.u.if_type0.UERRLOregaddr, + &uerrlo_reg); + pci_rd_rc2 = lpfc_readl( + phba->sli4_hba.u.if_type0.UEMASKLOregaddr, + &uemasklo_reg); + /* consider PCI bus read error as pci_channel_offline */ + if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) + return; + if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { + lpfc_sli4_offline_eratt(phba); + return; + } + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "7623 Checking UE recoverable"); + + for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { + if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0)) + continue; + + smphr_port_status = bf_get(lpfc_port_smphr_port_status, + &portsmphr_reg); + if ((smphr_port_status & LPFC_PORT_SEM_MASK) == + LPFC_PORT_SEM_UE_RECOVERABLE) + break; + /*Sleep for 1Sec, before checking SEMAPHORE */ + msleep(1000); + } + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4827 smphr_port_status x%x : Waited %dSec", + smphr_port_status, i); + + /* Recoverable UE, reset the HBA device */ + if ((smphr_port_status & LPFC_PORT_SEM_MASK) == + LPFC_PORT_SEM_UE_RECOVERABLE) { + for (i = 0; i < 20; i++) { + msleep(1000); + if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0) && + (LPFC_POST_STAGE_PORT_READY == + bf_get(lpfc_port_smphr_port_status, + &portsmphr_reg))) { + rc = lpfc_sli4_port_sta_fn_reset(phba, + LPFC_MBX_NO_WAIT, en_rn_msg); + if (rc == 0) + return; + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "4215 Failed to recover UE"); + break; + } + } + } + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "7624 Firmware not ready: Failing UE recovery," + " waited %dSec", i); + phba->link_state = LPFC_HBA_ERROR; + break; + + case LPFC_SLI_INTF_IF_TYPE_2: + case LPFC_SLI_INTF_IF_TYPE_6: + pci_rd_rc1 = lpfc_readl( + phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0); + /* consider PCI bus read error as pci_channel_offline */ + if (pci_rd_rc1 == -EIO) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3151 PCI bus read access failure: x%x\n", + readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); + lpfc_sli4_offline_eratt(phba); + return; + } + reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); + reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); + if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2889 Port Overtemperature event, " + "taking port offline Data: x%x x%x\n", + reg_err1, reg_err2); + + phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_CRIT_TEMP; + temp_event_data.data = 0xFFFFFFFF; + + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *)&temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + + spin_lock_irq(&phba->hbalock); + phba->over_temp_state = HBA_OVER_TEMP; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_offline_eratt(phba); + return; + } + if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3143 Port Down: Firmware Update " + "Detected\n"); + en_rn_msg = false; + } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3144 Port Down: Debug Dump\n"); + else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3145 Port Down: Provisioning\n"); + + /* If resets are disabled then leave the HBA alone and return */ + if (!phba->cfg_enable_hba_reset) + return; + + /* Check port status register for function reset */ + rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, + en_rn_msg); + if (rc == 0) { + /* don't report event on forced debug dump */ + if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) + return; + else + break; + } + /* fall through for not able to recover */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3152 Unrecoverable error\n"); + lpfc_sli4_offline_eratt(phba); + break; + case LPFC_SLI_INTF_IF_TYPE_1: + default: + break; + } + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3123 Report dump event to upper layer\n"); + /* Send an internal error event to mgmt application */ + lpfc_board_errevt_to_mgmt(phba); + + event_data = FC_REG_DUMP_EVENT; + shost = lpfc_shost_from_vport(vport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(event_data), (char *) &event_data, + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); +} + +/** + * lpfc_handle_eratt - Wrapper func for handling hba error attention + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 hba error attention handling + * routine from the API jump table function pointer from the lpfc_hba struct. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +void +lpfc_handle_eratt(struct lpfc_hba *phba) +{ + (*phba->lpfc_handle_eratt)(phba); +} + +/** + * lpfc_handle_latt - The HBA link event handler + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked from the worker thread to handle a HBA host + * attention link event. SLI3 only. + **/ +void +lpfc_handle_latt(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *pmb; + volatile uint32_t control; + int rc = 0; + + pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + rc = 1; + goto lpfc_handle_latt_err_exit; + } + + rc = lpfc_mbox_rsrc_prep(phba, pmb); + if (rc) { + rc = 2; + mempool_free(pmb, phba->mbox_mem_pool); + goto lpfc_handle_latt_err_exit; + } + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_all_cmd(phba); + psli->slistat.link_event++; + lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); + pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; + pmb->vport = vport; + /* Block ELS IOCBs until we have processed this mbox command */ + phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = 4; + goto lpfc_handle_latt_free_mbuf; + } + + /* Clear Link Attention in HA REG */ + spin_lock_irq(&phba->hbalock); + writel(HA_LATT, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + spin_unlock_irq(&phba->hbalock); + + return; + +lpfc_handle_latt_free_mbuf: + phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); +lpfc_handle_latt_err_exit: + /* Enable Link attention interrupts */ + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_PROCESS_LA; + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + + /* Clear Link Attention in HA REG */ + writel(HA_LATT, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + spin_unlock_irq(&phba->hbalock); + lpfc_linkdown(phba); + phba->link_state = LPFC_HBA_ERROR; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); + + return; +} + +static void +lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex) +{ + int i, j; + + while (length > 0) { + /* Look for Serial Number */ + if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) { + *pindex += 2; + i = vpd[*pindex]; + *pindex += 1; + j = 0; + length -= (3+i); + while (i--) { + phba->SerialNumber[j++] = vpd[(*pindex)++]; + if (j == 31) + break; + } + phba->SerialNumber[j] = 0; + continue; + } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) { + phba->vpd_flag |= VPD_MODEL_DESC; + *pindex += 2; + i = vpd[*pindex]; + *pindex += 1; + j = 0; + length -= (3+i); + while (i--) { + phba->ModelDesc[j++] = vpd[(*pindex)++]; + if (j == 255) + break; + } + phba->ModelDesc[j] = 0; + continue; + } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) { + phba->vpd_flag |= VPD_MODEL_NAME; + *pindex += 2; + i = vpd[*pindex]; + *pindex += 1; + j = 0; + length -= (3+i); + while (i--) { + phba->ModelName[j++] = vpd[(*pindex)++]; + if (j == 79) + break; + } + phba->ModelName[j] = 0; + continue; + } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) { + phba->vpd_flag |= VPD_PROGRAM_TYPE; + *pindex += 2; + i = vpd[*pindex]; + *pindex += 1; + j = 0; + length -= (3+i); + while (i--) { + phba->ProgramType[j++] = vpd[(*pindex)++]; + if (j == 255) + break; + } + phba->ProgramType[j] = 0; + continue; + } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) { + phba->vpd_flag |= VPD_PORT; + *pindex += 2; + i = vpd[*pindex]; + *pindex += 1; + j = 0; + length -= (3 + i); + while (i--) { + if ((phba->sli_rev == LPFC_SLI_REV4) && + (phba->sli4_hba.pport_name_sta == + LPFC_SLI4_PPNAME_GET)) { + j++; + (*pindex)++; + } else + phba->Port[j++] = vpd[(*pindex)++]; + if (j == 19) + break; + } + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->sli4_hba.pport_name_sta == + LPFC_SLI4_PPNAME_NON)) + phba->Port[j] = 0; + continue; + } else { + *pindex += 2; + i = vpd[*pindex]; + *pindex += 1; + *pindex += i; + length -= (3 + i); + } + } +} + +/** + * lpfc_parse_vpd - Parse VPD (Vital Product Data) + * @phba: pointer to lpfc hba data structure. + * @vpd: pointer to the vital product data. + * @len: length of the vital product data in bytes. + * + * This routine parses the Vital Product Data (VPD). The VPD is treated as + * an array of characters. In this routine, the ModelName, ProgramType, and + * ModelDesc, etc. fields of the phba data structure will be populated. + * + * Return codes + * 0 - pointer to the VPD passed in is NULL + * 1 - success + **/ +int +lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) +{ + uint8_t lenlo, lenhi; + int Length; + int i; + int finished = 0; + int index = 0; + + if (!vpd) + return 0; + + /* Vital Product */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0455 Vital Product Data: x%x x%x x%x x%x\n", + (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], + (uint32_t) vpd[3]); + while (!finished && (index < (len - 4))) { + switch (vpd[index]) { + case 0x82: + case 0x91: + index += 1; + lenlo = vpd[index]; + index += 1; + lenhi = vpd[index]; + index += 1; + i = ((((unsigned short)lenhi) << 8) + lenlo); + index += i; + break; + case 0x90: + index += 1; + lenlo = vpd[index]; + index += 1; + lenhi = vpd[index]; + index += 1; + Length = ((((unsigned short)lenhi) << 8) + lenlo); + if (Length > len - index) + Length = len - index; + + lpfc_fill_vpd(phba, vpd, Length, &index); + finished = 0; + break; + case 0x78: + finished = 1; + break; + default: + index ++; + break; + } + } + + return(1); +} + +/** + * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description + * @phba: pointer to lpfc hba data structure. + * @mdp: pointer to the data structure to hold the derived model name. + * @descp: pointer to the data structure to hold the derived description. + * + * This routine retrieves HBA's description based on its registered PCI device + * ID. The @descp passed into this function points to an array of 256 chars. It + * shall be returned with the model name, maximum speed, and the host bus type. + * The @mdp passed into this function points to an array of 80 chars. When the + * function returns, the @mdp will be filled with the model name. + **/ +static void +lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) +{ + uint16_t sub_dev_id = phba->pcidev->subsystem_device; + char *model = ""; + int tbolt = 0; + + switch (sub_dev_id) { + case PCI_DEVICE_ID_CLRY_161E: + model = "161E"; + break; + case PCI_DEVICE_ID_CLRY_162E: + model = "162E"; + break; + case PCI_DEVICE_ID_CLRY_164E: + model = "164E"; + break; + case PCI_DEVICE_ID_CLRY_161P: + model = "161P"; + break; + case PCI_DEVICE_ID_CLRY_162P: + model = "162P"; + break; + case PCI_DEVICE_ID_CLRY_164P: + model = "164P"; + break; + case PCI_DEVICE_ID_CLRY_321E: + model = "321E"; + break; + case PCI_DEVICE_ID_CLRY_322E: + model = "322E"; + break; + case PCI_DEVICE_ID_CLRY_324E: + model = "324E"; + break; + case PCI_DEVICE_ID_CLRY_321P: + model = "321P"; + break; + case PCI_DEVICE_ID_CLRY_322P: + model = "322P"; + break; + case PCI_DEVICE_ID_CLRY_324P: + model = "324P"; + break; + case PCI_DEVICE_ID_TLFC_2XX2: + model = "2XX2"; + tbolt = 1; + break; + case PCI_DEVICE_ID_TLFC_3162: + model = "3162"; + tbolt = 1; + break; + case PCI_DEVICE_ID_TLFC_3322: + model = "3322"; + tbolt = 1; + break; + default: + model = "Unknown"; + break; + } + + if (mdp && mdp[0] == '\0') + snprintf(mdp, 79, "%s", model); + + if (descp && descp[0] == '\0') + snprintf(descp, 255, + "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s", + (tbolt) ? "ThunderLink FC " : "Celerity FC-", + model, + phba->Port); +} + +/** + * lpfc_get_hba_model_desc - Retrieve HBA device model name and description + * @phba: pointer to lpfc hba data structure. + * @mdp: pointer to the data structure to hold the derived model name. + * @descp: pointer to the data structure to hold the derived description. + * + * This routine retrieves HBA's description based on its registered PCI device + * ID. The @descp passed into this function points to an array of 256 chars. It + * shall be returned with the model name, maximum speed, and the host bus type. + * The @mdp passed into this function points to an array of 80 chars. When the + * function returns, the @mdp will be filled with the model name. + **/ +static void +lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) +{ + lpfc_vpd_t *vp; + uint16_t dev_id = phba->pcidev->device; + int max_speed; + int GE = 0; + int oneConnect = 0; /* default is not a oneConnect */ + struct { + char *name; + char *bus; + char *function; + } m = {"", "", ""}; + + if (mdp && mdp[0] != '\0' + && descp && descp[0] != '\0') + return; + + if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) { + lpfc_get_atto_model_desc(phba, mdp, descp); + return; + } + + if (phba->lmt & LMT_64Gb) + max_speed = 64; + else if (phba->lmt & LMT_32Gb) + max_speed = 32; + else if (phba->lmt & LMT_16Gb) + max_speed = 16; + else if (phba->lmt & LMT_10Gb) + max_speed = 10; + else if (phba->lmt & LMT_8Gb) + max_speed = 8; + else if (phba->lmt & LMT_4Gb) + max_speed = 4; + else if (phba->lmt & LMT_2Gb) + max_speed = 2; + else if (phba->lmt & LMT_1Gb) + max_speed = 1; + else + max_speed = 0; + + vp = &phba->vpd; + + switch (dev_id) { + case PCI_DEVICE_ID_FIREFLY: + m = (typeof(m)){"LP6000", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SUPERFLY: + if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) + m = (typeof(m)){"LP7000", "PCI", ""}; + else + m = (typeof(m)){"LP7000E", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; + break; + case PCI_DEVICE_ID_DRAGONFLY: + m = (typeof(m)){"LP8000", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_CENTAUR: + if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) + m = (typeof(m)){"LP9002", "PCI", ""}; + else + m = (typeof(m)){"LP9000", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; + break; + case PCI_DEVICE_ID_RFLY: + m = (typeof(m)){"LP952", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_PEGASUS: + m = (typeof(m)){"LP9802", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_THOR: + m = (typeof(m)){"LP10000", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_VIPER: + m = (typeof(m)){"LPX1000", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_PFLY: + m = (typeof(m)){"LP982", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_TFLY: + m = (typeof(m)){"LP1050", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_HELIOS: + m = (typeof(m)){"LP11000", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_HELIOS_SCSP: + m = (typeof(m)){"LP11000-SP", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_HELIOS_DCSP: + m = (typeof(m)){"LP11002-SP", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_NEPTUNE: + m = (typeof(m)){"LPe1000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_NEPTUNE_SCSP: + m = (typeof(m)){"LPe1000-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_NEPTUNE_DCSP: + m = (typeof(m)){"LPe1002-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_BMID: + m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_BSMB: + m = (typeof(m)){"LP111", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_ZEPHYR: + m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_ZEPHYR_SCSP: + m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_ZEPHYR_DCSP: + m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; + GE = 1; + break; + case PCI_DEVICE_ID_ZMID: + m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_ZSMB: + m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LP101: + m = (typeof(m)){"LP101", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LP10000S: + m = (typeof(m)){"LP10000-S", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LP11000S: + m = (typeof(m)){"LP11000-S", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LPE11000S: + m = (typeof(m)){"LPe11000-S", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SAT: + m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SAT_MID: + m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SAT_SMB: + m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SAT_DCSP: + m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SAT_SCSP: + m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SAT_S: + m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_PROTEUS_VF: + m = (typeof(m)){"LPev12000", "PCIe IOV", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_PROTEUS_PF: + m = (typeof(m)){"LPev12000", "PCIe IOV", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_PROTEUS_S: + m = (typeof(m)){"LPemv12002-S", "PCIe IOV", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_TIGERSHARK: + oneConnect = 1; + m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; + break; + case PCI_DEVICE_ID_TOMCAT: + oneConnect = 1; + m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; + break; + case PCI_DEVICE_ID_FALCON: + m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", + "EmulexSecure Fibre"}; + break; + case PCI_DEVICE_ID_BALIUS: + m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LANCER_FC: + m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LANCER_FC_VF: + m = (typeof(m)){"LPe16000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LANCER_FCOE: + oneConnect = 1; + m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; + break; + case PCI_DEVICE_ID_LANCER_FCOE_VF: + oneConnect = 1; + m = (typeof(m)){"OCe15100", "PCIe", + "Obsolete, Unsupported FCoE"}; + break; + case PCI_DEVICE_ID_LANCER_G6_FC: + m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LANCER_G7_FC: + m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_LANCER_G7P_FC: + m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; + break; + case PCI_DEVICE_ID_SKYHAWK: + case PCI_DEVICE_ID_SKYHAWK_VF: + oneConnect = 1; + m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; + break; + default: + m = (typeof(m)){"Unknown", "", ""}; + break; + } + + if (mdp && mdp[0] == '\0') + snprintf(mdp, 79,"%s", m.name); + /* + * oneConnect hba requires special processing, they are all initiators + * and we put the port number on the end + */ + if (descp && descp[0] == '\0') { + if (oneConnect) + snprintf(descp, 255, + "Emulex OneConnect %s, %s Initiator %s", + m.name, m.function, + phba->Port); + else if (max_speed == 0) + snprintf(descp, 255, + "Emulex %s %s %s", + m.name, m.bus, m.function); + else + snprintf(descp, 255, + "Emulex %s %d%s %s %s", + m.name, max_speed, (GE) ? "GE" : "Gb", + m.bus, m.function); + } +} + +/** + * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a IOCB ring. + * @cnt: the number of IOCBs to be posted to the IOCB ring. + * + * This routine posts a given number of IOCBs with the associated DMA buffer + * descriptors specified by the cnt argument to the given IOCB ring. + * + * Return codes + * The number of IOCBs NOT able to be posted to the IOCB ring. + **/ +int +lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) +{ + IOCB_t *icmd; + struct lpfc_iocbq *iocb; + struct lpfc_dmabuf *mp1, *mp2; + + cnt += pring->missbufcnt; + + /* While there are buffers to post */ + while (cnt > 0) { + /* Allocate buffer for command iocb */ + iocb = lpfc_sli_get_iocbq(phba); + if (iocb == NULL) { + pring->missbufcnt = cnt; + return cnt; + } + icmd = &iocb->iocb; + + /* 2 buffers can be posted per command */ + /* Allocate buffer to post */ + mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + if (mp1) + mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); + if (!mp1 || !mp1->virt) { + kfree(mp1); + lpfc_sli_release_iocbq(phba, iocb); + pring->missbufcnt = cnt; + return cnt; + } + + INIT_LIST_HEAD(&mp1->list); + /* Allocate buffer to post */ + if (cnt > 1) { + mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + if (mp2) + mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, + &mp2->phys); + if (!mp2 || !mp2->virt) { + kfree(mp2); + lpfc_mbuf_free(phba, mp1->virt, mp1->phys); + kfree(mp1); + lpfc_sli_release_iocbq(phba, iocb); + pring->missbufcnt = cnt; + return cnt; + } + + INIT_LIST_HEAD(&mp2->list); + } else { + mp2 = NULL; + } + + icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); + icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); + icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; + icmd->ulpBdeCount = 1; + cnt--; + if (mp2) { + icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); + icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); + icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; + cnt--; + icmd->ulpBdeCount = 2; + } + + icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; + icmd->ulpLe = 1; + + if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == + IOCB_ERROR) { + lpfc_mbuf_free(phba, mp1->virt, mp1->phys); + kfree(mp1); + cnt++; + if (mp2) { + lpfc_mbuf_free(phba, mp2->virt, mp2->phys); + kfree(mp2); + cnt++; + } + lpfc_sli_release_iocbq(phba, iocb); + pring->missbufcnt = cnt; + return cnt; + } + lpfc_sli_ringpostbuf_put(phba, pring, mp1); + if (mp2) + lpfc_sli_ringpostbuf_put(phba, pring, mp2); + } + pring->missbufcnt = 0; + return 0; +} + +/** + * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring + * @phba: pointer to lpfc hba data structure. + * + * This routine posts initial receive IOCB buffers to the ELS ring. The + * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is + * set to 64 IOCBs. SLI3 only. + * + * Return codes + * 0 - success (currently always success) + **/ +static int +lpfc_post_rcv_buf(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + /* Ring 0, ELS / CT buffers */ + lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); + /* Ring 2 - FCP no buffers needed */ + + return 0; +} + +#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) + +/** + * lpfc_sha_init - Set up initial array of hash table entries + * @HashResultPointer: pointer to an array as hash table. + * + * This routine sets up the initial values to the array of hash table entries + * for the LC HBAs. + **/ +static void +lpfc_sha_init(uint32_t * HashResultPointer) +{ + HashResultPointer[0] = 0x67452301; + HashResultPointer[1] = 0xEFCDAB89; + HashResultPointer[2] = 0x98BADCFE; + HashResultPointer[3] = 0x10325476; + HashResultPointer[4] = 0xC3D2E1F0; +} + +/** + * lpfc_sha_iterate - Iterate initial hash table with the working hash table + * @HashResultPointer: pointer to an initial/result hash table. + * @HashWorkingPointer: pointer to an working hash table. + * + * This routine iterates an initial hash table pointed by @HashResultPointer + * with the values from the working hash table pointeed by @HashWorkingPointer. + * The results are putting back to the initial hash table, returned through + * the @HashResultPointer as the result hash table. + **/ +static void +lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) +{ + int t; + uint32_t TEMP; + uint32_t A, B, C, D, E; + t = 16; + do { + HashWorkingPointer[t] = + S(1, + HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - + 8] ^ + HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); + } while (++t <= 79); + t = 0; + A = HashResultPointer[0]; + B = HashResultPointer[1]; + C = HashResultPointer[2]; + D = HashResultPointer[3]; + E = HashResultPointer[4]; + + do { + if (t < 20) { + TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; + } else if (t < 40) { + TEMP = (B ^ C ^ D) + 0x6ED9EBA1; + } else if (t < 60) { + TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; + } else { + TEMP = (B ^ C ^ D) + 0xCA62C1D6; + } + TEMP += S(5, A) + E + HashWorkingPointer[t]; + E = D; + D = C; + C = S(30, B); + B = A; + A = TEMP; + } while (++t <= 79); + + HashResultPointer[0] += A; + HashResultPointer[1] += B; + HashResultPointer[2] += C; + HashResultPointer[3] += D; + HashResultPointer[4] += E; + +} + +/** + * lpfc_challenge_key - Create challenge key based on WWPN of the HBA + * @RandomChallenge: pointer to the entry of host challenge random number array. + * @HashWorking: pointer to the entry of the working hash array. + * + * This routine calculates the working hash array referred by @HashWorking + * from the challenge random numbers associated with the host, referred by + * @RandomChallenge. The result is put into the entry of the working hash + * array and returned by reference through @HashWorking. + **/ +static void +lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) +{ + *HashWorking = (*RandomChallenge ^ *HashWorking); +} + +/** + * lpfc_hba_init - Perform special handling for LC HBA initialization + * @phba: pointer to lpfc hba data structure. + * @hbainit: pointer to an array of unsigned 32-bit integers. + * + * This routine performs the special handling for LC HBA initialization. + **/ +void +lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) +{ + int t; + uint32_t *HashWorking; + uint32_t *pwwnn = (uint32_t *) phba->wwnn; + + HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); + if (!HashWorking) + return; + + HashWorking[0] = HashWorking[78] = *pwwnn++; + HashWorking[1] = HashWorking[79] = *pwwnn; + + for (t = 0; t < 7; t++) + lpfc_challenge_key(phba->RandomData + t, HashWorking + t); + + lpfc_sha_init(hbainit); + lpfc_sha_iterate(hbainit, HashWorking); + kfree(HashWorking); +} + +/** + * lpfc_cleanup - Performs vport cleanups before deleting a vport + * @vport: pointer to a virtual N_Port data structure. + * + * This routine performs the necessary cleanups before deleting the @vport. + * It invokes the discovery state machine to perform necessary state + * transitions and to release the ndlps associated with the @vport. Note, + * the physical port is treated as @vport 0. + **/ +void +lpfc_cleanup(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp, *next_ndlp; + int i = 0; + + if (phba->link_state > LPFC_LINK_DOWN) + lpfc_port_link_failure(vport); + + /* Clean up VMID resources */ + if (lpfc_is_vmid_enabled(phba)) + lpfc_vmid_vport_cleanup(vport); + + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (vport->port_type != LPFC_PHYSICAL_PORT && + ndlp->nlp_DID == Fabric_DID) { + /* Just free up ndlp with Fabric_DID for vports */ + lpfc_nlp_put(ndlp); + continue; + } + + if (ndlp->nlp_DID == Fabric_Cntl_DID && + ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + lpfc_nlp_put(ndlp); + continue; + } + + /* Fabric Ports not in UNMAPPED state are cleaned up in the + * DEVICE_RM event. + */ + if (ndlp->nlp_type & NLP_FABRIC && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); + + if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + } + + /* This is a special case flush to return all + * IOs before entering this loop. There are + * two points in the code where a flush is + * avoided if the FC_UNLOADING flag is set. + * one is in the multipool destroy, + * (this prevents a crash) and the other is + * in the nvme abort handler, ( also prevents + * a crash). Both of these exceptions are + * cases where the slot is still accessible. + * The flush here is only when the pci slot + * is offline. + */ + if (vport->load_flag & FC_UNLOADING && + pci_channel_offline(phba->pcidev)) + lpfc_sli_flush_io_rings(vport->phba); + + /* At this point, ALL ndlp's should be gone + * because of the previous NLP_EVT_DEVICE_RM. + * Lets wait for this to happen, if needed. + */ + while (!list_empty(&vport->fc_nodes)) { + if (i++ > 3000) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0233 Nodelist not empty\n"); + list_for_each_entry_safe(ndlp, next_ndlp, + &vport->fc_nodes, nlp_listp) { + lpfc_printf_vlog(ndlp->vport, KERN_ERR, + LOG_DISCOVERY, + "0282 did:x%x ndlp:x%px " + "refcnt:%d xflags x%x nflag x%x\n", + ndlp->nlp_DID, (void *)ndlp, + kref_read(&ndlp->kref), + ndlp->fc4_xpt_flags, + ndlp->nlp_flag); + } + break; + } + + /* Wait for any activity on ndlps to settle */ + msleep(10); + } + lpfc_cleanup_vports_rrqs(vport, NULL); +} + +/** + * lpfc_stop_vport_timers - Stop all the timers associated with a vport + * @vport: pointer to a virtual N_Port data structure. + * + * This routine stops all the timers associated with a @vport. This function + * is invoked before disabling or deleting a @vport. Note that the physical + * port is treated as @vport 0. + **/ +void +lpfc_stop_vport_timers(struct lpfc_vport *vport) +{ + del_timer_sync(&vport->els_tmofunc); + del_timer_sync(&vport->delayed_disc_tmo); + lpfc_can_disctmo(vport); + return; +} + +/** + * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer + * @phba: pointer to lpfc hba data structure. + * + * This routine stops the SLI4 FCF rediscover wait timer if it's on. The + * caller of this routine should already hold the host lock. + **/ +void +__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) +{ + /* Clear pending FCF rediscovery wait flag */ + phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; + + /* Now, try to stop the timer */ + del_timer(&phba->fcf.redisc_wait); +} + +/** + * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer + * @phba: pointer to lpfc hba data structure. + * + * This routine stops the SLI4 FCF rediscover wait timer if it's on. It + * checks whether the FCF rediscovery wait timer is pending with the host + * lock held before proceeding with disabling the timer and clearing the + * wait timer pendig flag. + **/ +void +lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) +{ + spin_lock_irq(&phba->hbalock); + if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { + /* FCF rediscovery timer already fired or stopped */ + spin_unlock_irq(&phba->hbalock); + return; + } + __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); + /* Clear failover in progress flags */ + phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_cmf_stop - Stop CMF processing + * @phba: pointer to lpfc hba data structure. + * + * This is called when the link goes down or if CMF mode is turned OFF. + * It is also called when going offline or unloaded just before the + * congestion info buffer is unregistered. + **/ +void +lpfc_cmf_stop(struct lpfc_hba *phba) +{ + int cpu; + struct lpfc_cgn_stat *cgs; + + /* We only do something if CMF is enabled */ + if (!phba->sli4_hba.pc_sli4_params.cmf) + return; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6221 Stop CMF / Cancel Timer\n"); + + /* Cancel the CMF timer */ + hrtimer_cancel(&phba->cmf_stats_timer); + hrtimer_cancel(&phba->cmf_timer); + + /* Zero CMF counters */ + atomic_set(&phba->cmf_busy, 0); + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + atomic64_set(&cgs->total_bytes, 0); + atomic64_set(&cgs->rcv_bytes, 0); + atomic_set(&cgs->rx_io_cnt, 0); + atomic64_set(&cgs->rx_latency, 0); + } + atomic_set(&phba->cmf_bw_wait, 0); + + /* Resume any blocked IO - Queue unblock on workqueue */ + queue_work(phba->wq, &phba->unblock_request_work); +} + +static inline uint64_t +lpfc_get_max_line_rate(struct lpfc_hba *phba) +{ + uint64_t rate = lpfc_sli_port_speed_get(phba); + + return ((((unsigned long)rate) * 1024 * 1024) / 10); +} + +void +lpfc_cmf_signal_init(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6223 Signal CMF init\n"); + + /* Use the new fc_linkspeed to recalculate */ + phba->cmf_interval_rate = LPFC_CMF_INTERVAL; + phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); + phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * + phba->cmf_interval_rate, 1000); + phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; + + /* This is a signal to firmware to sync up CMF BW with link speed */ + lpfc_issue_cmf_sync_wqe(phba, 0, 0); +} + +/** + * lpfc_cmf_start - Start CMF processing + * @phba: pointer to lpfc hba data structure. + * + * This is called when the link comes up or if CMF mode is turned OFF + * to Monitor or Managed. + **/ +void +lpfc_cmf_start(struct lpfc_hba *phba) +{ + struct lpfc_cgn_stat *cgs; + int cpu; + + /* We only do something if CMF is enabled */ + if (!phba->sli4_hba.pc_sli4_params.cmf || + phba->cmf_active_mode == LPFC_CFG_OFF) + return; + + /* Reinitialize congestion buffer info */ + lpfc_init_congestion_buf(phba); + + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + + atomic_set(&phba->cmf_busy, 0); + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + atomic64_set(&cgs->total_bytes, 0); + atomic64_set(&cgs->rcv_bytes, 0); + atomic_set(&cgs->rx_io_cnt, 0); + atomic64_set(&cgs->rx_latency, 0); + } + phba->cmf_latency.tv_sec = 0; + phba->cmf_latency.tv_nsec = 0; + + lpfc_cmf_signal_init(phba); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6222 Start CMF / Timer\n"); + + phba->cmf_timer_cnt = 0; + hrtimer_start(&phba->cmf_timer, + ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC), + HRTIMER_MODE_REL); + hrtimer_start(&phba->cmf_stats_timer, + ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC), + HRTIMER_MODE_REL); + /* Setup for latency check in IO cmpl routines */ + ktime_get_real_ts64(&phba->cmf_latency); + + atomic_set(&phba->cmf_bw_wait, 0); + atomic_set(&phba->cmf_stop_io, 0); +} + +/** + * lpfc_stop_hba_timers - Stop all the timers associated with an HBA + * @phba: pointer to lpfc hba data structure. + * + * This routine stops all the timers associated with a HBA. This function is + * invoked before either putting a HBA offline or unloading the driver. + **/ +void +lpfc_stop_hba_timers(struct lpfc_hba *phba) +{ + if (phba->pport) + lpfc_stop_vport_timers(phba->pport); + cancel_delayed_work_sync(&phba->eq_delay_work); + cancel_delayed_work_sync(&phba->idle_stat_delay_work); + del_timer_sync(&phba->sli.mbox_tmo); + del_timer_sync(&phba->fabric_block_timer); + del_timer_sync(&phba->eratt_poll); + del_timer_sync(&phba->hb_tmofunc); + if (phba->sli_rev == LPFC_SLI_REV4) { + del_timer_sync(&phba->rrq_tmr); + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + } + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + /* Stop any LightPulse device specific driver timers */ + del_timer_sync(&phba->fcp_poll_timer); + break; + case LPFC_PCI_DEV_OC: + /* Stop any OneConnect device specific driver timers */ + lpfc_sli4_stop_fcf_redisc_wait_timer(phba); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0297 Invalid device group (x%x)\n", + phba->pci_dev_grp); + break; + } + return; +} + +/** + * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked + * @phba: pointer to lpfc hba data structure. + * @mbx_action: flag for mailbox no wait action. + * + * This routine marks a HBA's management interface as blocked. Once the HBA's + * management interface is marked as blocked, all the user space access to + * the HBA, whether they are from sysfs interface or libdfc interface will + * all be blocked. The HBA is set to block the management interface when the + * driver prepares the HBA interface for online or offline. + **/ +static void +lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) +{ + unsigned long iflag; + uint8_t actcmd = MBX_HEARTBEAT; + unsigned long timeout; + + spin_lock_irqsave(&phba->hbalock, iflag); + phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (mbx_action == LPFC_MBX_NO_WAIT) + return; + timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + spin_lock_irqsave(&phba->hbalock, iflag); + if (phba->sli.mbox_active) { + actcmd = phba->sli.mbox_active->u.mb.mbxCommand; + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active) * 1000) + jiffies; + } + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* Wait for the outstnading mailbox command to complete */ + while (phba->sli.mbox_active) { + /* Check active mailbox complete status every 2ms */ + msleep(2); + if (time_after(jiffies, timeout)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2813 Mgmt IO is Blocked %x " + "- mbox cmd %x still active\n", + phba->sli.sli_flag, actcmd); + break; + } + } +} + +/** + * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * @phba: pointer to lpfc hba data structure. + * + * Allocate RPIs for all active remote nodes. This is needed whenever + * an SLI4 adapter is reset and the driver is not unloading. Its purpose + * is to fixup the temporary rpi assignments. + **/ +void +lpfc_sli4_node_prep(struct lpfc_hba *phba) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_vport **vports; + int i, rpi; + + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + vports = lpfc_create_vport_work_array(phba); + if (vports == NULL) + return; + + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->load_flag & FC_UNLOADING) + continue; + + list_for_each_entry_safe(ndlp, next_ndlp, + &vports[i]->fc_nodes, + nlp_listp) { + rpi = lpfc_sli4_alloc_rpi(phba); + if (rpi == LPFC_RPI_ALLOC_ERROR) { + /* TODO print log? */ + continue; + } + ndlp->nlp_rpi = rpi; + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0009 Assign RPI x%x to ndlp x%px " + "DID:x%06x flg:x%x\n", + ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_create_expedite_pool - create expedite pool + * @phba: pointer to lpfc hba data structure. + * + * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 + * to expedite pool. Mark them as expedite. + **/ +static void lpfc_create_expedite_pool(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + struct lpfc_epd_pool *epd_pool; + unsigned long iflag; + + epd_pool = &phba->epd_pool; + qp = &phba->sli4_hba.hdwq[0]; + + spin_lock_init(&epd_pool->lock); + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + spin_lock(&epd_pool->lock); + INIT_LIST_HEAD(&epd_pool->list); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_put, list) { + list_move_tail(&lpfc_ncmd->list, &epd_pool->list); + lpfc_ncmd->expedite = true; + qp->put_io_bufs--; + epd_pool->count++; + if (epd_pool->count >= XRI_BATCH) + break; + } + spin_unlock(&epd_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); +} + +/** + * lpfc_destroy_expedite_pool - destroy expedite pool + * @phba: pointer to lpfc hba data structure. + * + * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put + * of HWQ 0. Clear the mark. + **/ +static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + struct lpfc_epd_pool *epd_pool; + unsigned long iflag; + + epd_pool = &phba->epd_pool; + qp = &phba->sli4_hba.hdwq[0]; + + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + spin_lock(&epd_pool->lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &epd_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + lpfc_ncmd->flags = false; + qp->put_io_bufs++; + epd_pool->count--; + } + spin_unlock(&epd_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); +} + +/** + * lpfc_create_multixri_pools - create multi-XRI pools + * @phba: pointer to lpfc hba data structure. + * + * This routine initialize public, private per HWQ. Then, move XRIs from + * lpfc_io_buf_list_put to public pool. High and low watermark are also + * Initialized. + **/ +void lpfc_create_multixri_pools(struct lpfc_hba *phba) +{ + u32 i, j; + u32 hwq_count; + u32 count_per_hwq; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", + phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, + phba->sli4_hba.io_xri_cnt); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_create_expedite_pool(phba); + + hwq_count = phba->cfg_hdw_queue; + count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; + + for (i = 0; i < hwq_count; i++) { + multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); + + if (!multixri_pool) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1238 Failed to allocate memory for " + "multixri_pool\n"); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_destroy_expedite_pool(phba); + + j = 0; + while (j < i) { + qp = &phba->sli4_hba.hdwq[j]; + kfree(qp->p_multixri_pool); + j++; + } + phba->cfg_xri_rebalancing = 0; + return; + } + + qp = &phba->sli4_hba.hdwq[i]; + qp->p_multixri_pool = multixri_pool; + + multixri_pool->xri_limit = count_per_hwq; + multixri_pool->rrb_next_hwqid = i; + + /* Deal with public free xri pool */ + pbl_pool = &multixri_pool->pbl_pool; + spin_lock_init(&pbl_pool->lock); + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + spin_lock(&pbl_pool->lock); + INIT_LIST_HEAD(&pbl_pool->list); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_put, list) { + list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); + qp->put_io_bufs--; + pbl_pool->count++; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", + pbl_pool->count, i); + spin_unlock(&pbl_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); + + /* Deal with private free xri pool */ + pvt_pool = &multixri_pool->pvt_pool; + pvt_pool->high_watermark = multixri_pool->xri_limit / 2; + pvt_pool->low_watermark = XRI_BATCH; + spin_lock_init(&pvt_pool->lock); + spin_lock_irqsave(&pvt_pool->lock, iflag); + INIT_LIST_HEAD(&pvt_pool->list); + pvt_pool->count = 0; + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + } +} + +/** + * lpfc_destroy_multixri_pools - destroy multi-XRI pools + * @phba: pointer to lpfc hba data structure. + * + * This routine returns XRIs from public/private to lpfc_io_buf_list_put. + **/ +static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) +{ + u32 i; + u32 hwq_count; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_destroy_expedite_pool(phba); + + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli_flush_io_rings(phba); + + hwq_count = phba->cfg_hdw_queue; + + for (i = 0; i < hwq_count; i++) { + qp = &phba->sli4_hba.hdwq[i]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + continue; + + qp->p_multixri_pool = NULL; + + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + + /* Deal with public free xri pool */ + pbl_pool = &multixri_pool->pbl_pool; + spin_lock(&pbl_pool->lock); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", + pbl_pool->count, i); + + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pbl_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + pbl_pool->count--; + } + + INIT_LIST_HEAD(&pbl_pool->list); + pbl_pool->count = 0; + + spin_unlock(&pbl_pool->lock); + + /* Deal with private free xri pool */ + pvt_pool = &multixri_pool->pvt_pool; + spin_lock(&pvt_pool->lock); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", + pvt_pool->count, i); + + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pvt_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + pvt_pool->count--; + } + + INIT_LIST_HEAD(&pvt_pool->list); + pvt_pool->count = 0; + + spin_unlock(&pvt_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); + + kfree(multixri_pool); + } +} + +/** + * lpfc_online - Initialize and bring a HBA online + * @phba: pointer to lpfc hba data structure. + * + * This routine initializes the HBA and brings a HBA online. During this + * process, the management interface is blocked to prevent user space access + * to the HBA interfering with the driver initialization. + * + * Return codes + * 0 - successful + * 1 - failed + **/ +int +lpfc_online(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct lpfc_vport **vports; + int i, error = 0; + bool vpis_cleared = false; + + if (!phba) + return 0; + vport = phba->pport; + + if (!(vport->fc_flag & FC_OFFLINE_MODE)) + return 0; + + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0458 Bring Adapter online\n"); + + lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); + + if (phba->sli_rev == LPFC_SLI_REV4) { + if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } + spin_lock_irq(&phba->hbalock); + if (!phba->sli4_hba.max_cfg_param.vpi_used) + vpis_cleared = true; + spin_unlock_irq(&phba->hbalock); + + /* Reestablish the local initiator port. + * The offline process destroyed the previous lport. + */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && + !phba->nvmet_support) { + error = lpfc_nvme_create_localport(phba->pport); + if (error) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6132 NVME restore reg failed " + "on nvmei error x%x\n", error); + } + } else { + lpfc_sli_queue_init(phba); + if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + struct Scsi_Host *shost; + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + vports[i]->fc_flag &= ~FC_OFFLINE_MODE; + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + if (phba->sli_rev == LPFC_SLI_REV4) { + vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if ((vpis_cleared) && + (vports[i]->port_type != + LPFC_PHYSICAL_PORT)) + vports[i]->vpi = 0; + } + spin_unlock_irq(shost->host_lock); + } + } + lpfc_destroy_vport_work_array(phba, vports); + + if (phba->cfg_xri_rebalancing) + lpfc_create_multixri_pools(phba); + + lpfc_cpuhp_add(phba); + + lpfc_unblock_mgmt_io(phba); + return 0; +} + +/** + * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked + * @phba: pointer to lpfc hba data structure. + * + * This routine marks a HBA's management interface as not blocked. Once the + * HBA's management interface is marked as not blocked, all the user space + * access to the HBA, whether they are from sysfs interface or libdfc + * interface will be allowed. The HBA is set to block the management interface + * when the driver prepares the HBA interface for online or offline and then + * set to unblock the management interface afterwards. + **/ +void +lpfc_unblock_mgmt_io(struct lpfc_hba * phba) +{ + unsigned long iflag; + + spin_lock_irqsave(&phba->hbalock, iflag); + phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; + spin_unlock_irqrestore(&phba->hbalock, iflag); +} + +/** + * lpfc_offline_prep - Prepare a HBA to be brought offline + * @phba: pointer to lpfc hba data structure. + * @mbx_action: flag for mailbox shutdown action. + * + * This routine is invoked to prepare a HBA to be brought offline. It performs + * unregistration login to all the nodes on all vports and flushes the mailbox + * queue to make it ready to be brought offline. + **/ +void +lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) +{ + struct lpfc_vport *vport = phba->pport; + struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_vport **vports; + struct Scsi_Host *shost; + int i; + int offline; + bool hba_pci_err; + + if (vport->fc_flag & FC_OFFLINE_MODE) + return; + + lpfc_block_mgmt_io(phba, mbx_action); + + lpfc_linkdown(phba); + + offline = pci_channel_offline(phba->pcidev); + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); + + /* Issue an unreg_login to all nodes on all vports */ + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->load_flag & FC_UNLOADING) + continue; + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; + vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + vports[i]->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + + shost = lpfc_shost_from_vport(vports[i]); + list_for_each_entry_safe(ndlp, next_ndlp, + &vports[i]->fc_nodes, + nlp_listp) { + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + + if (offline || hba_pci_err) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_UNREG_INP | + NLP_RPI_REGISTERED); + spin_unlock_irq(&ndlp->lock); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli_rpi_release(vports[i], + ndlp); + } else { + lpfc_unreg_rpi(vports[i], ndlp); + } + /* + * Whenever an SLI4 port goes offline, free the + * RPI. Get a new RPI when the adapter port + * comes back online. + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_printf_vlog(vports[i], KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0011 Free RPI x%x on " + "ndlp: x%px did x%x\n", + ndlp->nlp_rpi, ndlp, + ndlp->nlp_DID); + lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + } + + if (ndlp->nlp_type & NLP_FABRIC) { + lpfc_disc_state_machine(vports[i], ndlp, + NULL, NLP_EVT_DEVICE_RECOVERY); + + /* Don't remove the node unless the node + * has been unregistered with the + * transport, and we're not in recovery + * before dev_loss_tmo triggered. + * Otherwise, let dev_loss take care of + * the node. + */ + if (!(ndlp->save_flags & + NLP_IN_RECOV_POST_DEV_LOSS) && + !(ndlp->fc4_xpt_flags & + (NVME_XPT_REGD | SCSI_XPT_REGD))) + lpfc_disc_state_machine + (vports[i], ndlp, + NULL, + NLP_EVT_DEVICE_RM); + } + } + } + } + lpfc_destroy_vport_work_array(phba, vports); + + lpfc_sli_mbox_sys_shutdown(phba, mbx_action); + + if (phba->wq) + flush_workqueue(phba->wq); +} + +/** + * lpfc_offline - Bring a HBA offline + * @phba: pointer to lpfc hba data structure. + * + * This routine actually brings a HBA offline. It stops all the timers + * associated with the HBA, brings down the SLI layer, and eventually + * marks the HBA as in offline state for the upper layer protocol. + **/ +void +lpfc_offline(struct lpfc_hba *phba) +{ + struct Scsi_Host *shost; + struct lpfc_vport **vports; + int i; + + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + return; + + /* stop port and all timers associated with this hba */ + lpfc_stop_port(phba); + + /* Tear down the local and target port registrations. The + * nvme transports need to cleanup. + */ + lpfc_nvmet_destroy_targetport(phba); + lpfc_nvme_destroy_localport(phba->pport); + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + lpfc_stop_vport_timers(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0460 Bring Adapter offline\n"); + /* Bring down the SLI Layer and cleanup. The HBA is offline + now. */ + lpfc_sli_hba_down(phba); + spin_lock_irq(&phba->hbalock); + phba->work_ha = 0; + spin_unlock_irq(&phba->hbalock); + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + vports[i]->work_port_events = 0; + vports[i]->fc_flag |= FC_OFFLINE_MODE; + spin_unlock_irq(shost->host_lock); + } + lpfc_destroy_vport_work_array(phba, vports); + /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled + * in hba_unset + */ + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + __lpfc_cpuhp_remove(phba); + + if (phba->cfg_xri_rebalancing) + lpfc_destroy_multixri_pools(phba); +} + +/** + * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the SCSI buffers and IOCBs from the driver + * list back to kernel. It is called from lpfc_pci_remove_one to free + * the internal resources before the device is removed from the system. + **/ +static void +lpfc_scsi_free(struct lpfc_hba *phba) +{ + struct lpfc_io_buf *sb, *sb_next; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; + + spin_lock_irq(&phba->hbalock); + + /* Release all the lpfc_scsi_bufs maintained by this host. */ + + spin_lock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, + list) { + list_del(&sb->list); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_put_lock); + + spin_lock(&phba->scsi_buf_list_get_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, + list) { + list_del(&sb->list); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_get_lock); + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the IO buffers and IOCBs from the driver + * list back to kernel. It is called from lpfc_pci_remove_one to free + * the internal resources before the device is removed from the system. + **/ +void +lpfc_io_free(struct lpfc_hba *phba) +{ + struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; + struct lpfc_sli4_hdw_queue *qp; + int idx; + + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + /* Release all the lpfc_nvme_bufs maintained by this host. */ + spin_lock(&qp->io_buf_list_put_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_put, + list) { + list_del(&lpfc_ncmd->list); + qp->put_io_bufs--; + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + if (phba->cfg_xpsgl && !phba->nvmet_support) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); + kfree(lpfc_ncmd); + qp->total_io_bufs--; + } + spin_unlock(&qp->io_buf_list_put_lock); + + spin_lock(&qp->io_buf_list_get_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_get, + list) { + list_del(&lpfc_ncmd->list); + qp->get_io_bufs--; + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + if (phba->cfg_xpsgl && !phba->nvmet_support) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); + kfree(lpfc_ncmd); + qp->total_io_bufs--; + } + spin_unlock(&qp->io_buf_list_get_lock); + } +} + +/** + * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; + uint16_t i, lxri, xri_cnt, els_xri_cnt; + LIST_HEAD(els_sgl_list); + int rc; + + /* + * update on pci function's els xri-sgl list + */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + + if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { + /* els xri-sgl expanded */ + xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3157 ELS xri-sgl count increased from " + "%d to %d\n", phba->sli4_hba.els_xri_cnt, + els_xri_cnt); + /* allocate the additional els sgls */ + for (i = 0; i < xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), + GFP_KERNEL); + if (sglq_entry == NULL) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2562 Failure to allocate an " + "ELS sgl entry:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->buff_type = GEN_BUFF_TYPE; + sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, + &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2563 Failure to allocate an " + "ELS mbuf:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); + sglq_entry->state = SGL_FREED; + list_add_tail(&sglq_entry->list, &els_sgl_list); + } + spin_lock_irq(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&els_sgl_list, + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); + } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { + /* els xri-sgl shrinked */ + xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3158 ELS xri-sgl count decreased from " + "%d to %d\n", phba->sli4_hba.els_xri_cnt, + els_xri_cnt); + spin_lock_irq(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, + &els_sgl_list); + /* release extra els sgls from list */ + for (i = 0; i < xri_cnt; i++) { + list_remove_head(&els_sgl_list, + sglq_entry, struct lpfc_sglq, list); + if (sglq_entry) { + __lpfc_mbuf_free(phba, sglq_entry->virt, + sglq_entry->phys); + kfree(sglq_entry); + } + } + list_splice_init(&els_sgl_list, + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); + } else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3163 ELS xri-sgl count unchanged: %d\n", + els_xri_cnt); + phba->sli4_hba.els_xri_cnt = els_xri_cnt; + + /* update xris to els sgls on the list */ + sglq_entry = NULL; + sglq_entry_next = NULL; + list_for_each_entry_safe(sglq_entry, sglq_entry_next, + &phba->sli4_hba.lpfc_els_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2400 Failed to allocate xri for " + "ELS sgl\n"); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sli4_lxritag = lxri; + sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + return 0; + +out_free_mem: + lpfc_free_els_sgl_list(phba); + return rc; +} + +/** + * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; + uint16_t i, lxri, xri_cnt, els_xri_cnt; + uint16_t nvmet_xri_cnt; + LIST_HEAD(nvmet_sgl_list); + int rc; + + /* + * update on pci function's nvmet xri-sgl list + */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + + /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ + nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { + /* els xri-sgl expanded */ + xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6302 NVMET xri-sgl cnt grew from %d to %d\n", + phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); + /* allocate the additional nvmet sgls */ + for (i = 0; i < xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), + GFP_KERNEL); + if (sglq_entry == NULL) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6303 Failure to allocate an " + "NVMET sgl entry:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->buff_type = NVMET_BUFF_TYPE; + sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, + &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6304 Failure to allocate an " + "NVMET buf:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, + phba->cfg_sg_dma_buf_size); + sglq_entry->state = SGL_FREED; + list_add_tail(&sglq_entry->list, &nvmet_sgl_list); + } + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { + /* nvmet xri-sgl shrunk */ + xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6305 NVMET xri-sgl count decreased from " + "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, + nvmet_xri_cnt); + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, + &nvmet_sgl_list); + /* release extra nvmet sgls from list */ + for (i = 0; i < xri_cnt; i++) { + list_remove_head(&nvmet_sgl_list, + sglq_entry, struct lpfc_sglq, list); + if (sglq_entry) { + lpfc_nvmet_buf_free(phba, sglq_entry->virt, + sglq_entry->phys); + kfree(sglq_entry); + } + } + list_splice_init(&nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + } else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6306 NVMET xri-sgl count unchanged: %d\n", + nvmet_xri_cnt); + phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; + + /* update xris to nvmet sgls on the list */ + sglq_entry = NULL; + sglq_entry_next = NULL; + list_for_each_entry_safe(sglq_entry, sglq_entry_next, + &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6307 Failed to allocate xri for " + "NVMET sgl\n"); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sli4_lxritag = lxri; + sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + return 0; + +out_free_mem: + lpfc_free_nvmet_sgl_list(phba); + return rc; +} + +int +lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) +{ + LIST_HEAD(blist); + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_cmd; + struct lpfc_io_buf *iobufp, *prev_iobufp; + int idx, cnt, xri, inserted; + + cnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_irq(&qp->io_buf_list_get_lock); + spin_lock(&qp->io_buf_list_put_lock); + + /* Take everything off the get and put lists */ + list_splice_init(&qp->lpfc_io_buf_list_get, &blist); + list_splice(&qp->lpfc_io_buf_list_put, &blist); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + cnt += qp->get_io_bufs + qp->put_io_bufs; + qp->get_io_bufs = 0; + qp->put_io_bufs = 0; + qp->total_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock_irq(&qp->io_buf_list_get_lock); + } + + /* + * Take IO buffers off blist and put on cbuf sorted by XRI. + * This is because POST_SGL takes a sequential range of XRIs + * to post to the firmware. + */ + for (idx = 0; idx < cnt; idx++) { + list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); + if (!lpfc_cmd) + return cnt; + if (idx == 0) { + list_add_tail(&lpfc_cmd->list, cbuf); + continue; + } + xri = lpfc_cmd->cur_iocbq.sli4_xritag; + inserted = 0; + prev_iobufp = NULL; + list_for_each_entry(iobufp, cbuf, list) { + if (xri < iobufp->cur_iocbq.sli4_xritag) { + if (prev_iobufp) + list_add(&lpfc_cmd->list, + &prev_iobufp->list); + else + list_add(&lpfc_cmd->list, cbuf); + inserted = 1; + break; + } + prev_iobufp = iobufp; + } + if (!inserted) + list_add_tail(&lpfc_cmd->list, cbuf); + } + return cnt; +} + +int +lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_cmd; + int idx, cnt; + unsigned long iflags; + + qp = phba->sli4_hba.hdwq; + cnt = 0; + while (!list_empty(cbuf)) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + list_remove_head(cbuf, lpfc_cmd, + struct lpfc_io_buf, list); + if (!lpfc_cmd) + return cnt; + cnt++; + qp = &phba->sli4_hba.hdwq[idx]; + lpfc_cmd->hdwq_no = idx; + lpfc_cmd->hdwq = qp; + lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags); + list_add_tail(&lpfc_cmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + qp->total_io_bufs++; + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, + iflags); + } + } + return cnt; +} + +/** + * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; + uint16_t i, lxri, els_xri_cnt; + uint16_t io_xri_cnt, io_xri_max; + LIST_HEAD(io_sgl_list); + int rc, cnt; + + /* + * update on pci function's allocated nvme xri-sgl list + */ + + /* maximum number of xris available for nvme buffers */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.io_xri_max = io_xri_max; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6074 Current allocated XRI sgl count:%d, " + "maximum XRI count:%d els_xri_cnt:%d\n\n", + phba->sli4_hba.io_xri_cnt, + phba->sli4_hba.io_xri_max, + els_xri_cnt); + + cnt = lpfc_io_buf_flush(phba, &io_sgl_list); + + if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { + /* max nvme xri shrunk below the allocated nvme buffers */ + io_xri_cnt = phba->sli4_hba.io_xri_cnt - + phba->sli4_hba.io_xri_max; + /* release the extra allocated nvme buffers */ + for (i = 0; i < io_xri_cnt; i++) { + list_remove_head(&io_sgl_list, lpfc_ncmd, + struct lpfc_io_buf, list); + if (lpfc_ncmd) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + } + } + phba->sli4_hba.io_xri_cnt -= io_xri_cnt; + } + + /* update xris associated to remaining allocated nvme buffers */ + lpfc_ncmd = NULL; + lpfc_ncmd_next = NULL; + phba->sli4_hba.io_xri_cnt = cnt; + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &io_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6075 Failed to allocate xri for " + "nvme buffer\n"); + rc = -ENOMEM; + goto out_free_mem; + } + lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; + lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); + return 0; + +out_free_mem: + lpfc_io_free(phba); + return rc; +} + +/** + * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec + * @phba: Pointer to lpfc hba data structure. + * @num_to_alloc: The requested number of buffers to allocate. + * + * This routine allocates nvme buffers for device with SLI-4 interface spec, + * the nvme buffer contains all the necessary information needed to initiate + * an I/O. After allocating up to @num_to_allocate IO buffers and put + * them on a list, it post them to the port by using SGL block post. + * + * Return codes: + * int - number of IO buffers that were allocated and posted. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +int +lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_iocbq *pwqeq; + uint16_t iotag, lxri = 0; + int bcnt, num_posted; + LIST_HEAD(prep_nblist); + LIST_HEAD(post_nblist); + LIST_HEAD(nvme_nblist); + + phba->sli4_hba.io_xri_cnt = 0; + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); + if (!lpfc_ncmd) + break; + /* + * Get memory from the pci pool to map the virt space to + * pci bus space for an I/O. The DMA buffer includes the + * number of SGE's necessary to support the sg_tablesize. + */ + lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, + GFP_KERNEL, + &lpfc_ncmd->dma_handle); + if (!lpfc_ncmd->data) { + kfree(lpfc_ncmd); + break; + } + + if (phba->cfg_xpsgl && !phba->nvmet_support) { + INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); + } else { + /* + * 4K Page alignment is CRITICAL to BlockGuard, double + * check to be sure. + */ + if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + (((unsigned long)(lpfc_ncmd->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "3369 Memory alignment err: " + "addr=%lx\n", + (unsigned long)lpfc_ncmd->data); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + } + + INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); + + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + pwqeq = &lpfc_ncmd->cur_iocbq; + + /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, pwqeq); + if (iotag == 0) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6121 Failed to allocate IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); + break; + } + pwqeq->sli4_lxritag = lxri; + pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + + /* Initialize local short-hand pointers. */ + lpfc_ncmd->dma_sgl = lpfc_ncmd->data; + lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; + lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd; + spin_lock_init(&lpfc_ncmd->buf_lock); + + /* add the nvme buffer to a post list */ + list_add_tail(&lpfc_ncmd->list, &post_nblist); + phba->sli4_hba.io_xri_cnt++; + } + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6114 Allocate %d out of %d requested new NVME " + "buffers of size x%zu bytes\n", bcnt, num_to_alloc, + sizeof(*lpfc_ncmd)); + + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) + num_posted = lpfc_sli4_post_io_sgl_list( + phba, &post_nblist, bcnt); + else + num_posted = 0; + + return num_posted; +} + +static uint64_t +lpfc_get_wwpn(struct lpfc_hba *phba) +{ + uint64_t wwn; + int rc; + LPFC_MBOXQ_t *mboxq; + MAILBOX_t *mb; + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mboxq) + return (uint64_t)-1; + + /* First get WWN of HBA instance */ + lpfc_read_nv(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6019 Mailbox failed , mbxCmd x%x " + "READ_NV, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + mempool_free(mboxq, phba->mbox_mem_pool); + return (uint64_t) -1; + } + mb = &mboxq->u.mb; + memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); + /* wwn is WWPN of HBA instance */ + mempool_free(mboxq, phba->mbox_mem_pool); + if (phba->sli_rev == LPFC_SLI_REV4) + return be64_to_cpu(wwn); + else + return rol64(wwn, 32); +} + +static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->cfg_xpsgl && !phba->nvmet_support) + return LPFC_MAX_SG_TABLESIZE; + else + return phba->cfg_scsi_seg_cnt; + else + return phba->cfg_sg_seg_cnt; +} + +/** + * lpfc_vmid_res_alloc - Allocates resources for VMID + * @phba: pointer to lpfc hba data structure. + * @vport: pointer to vport data structure + * + * This routine allocated the resources needed for the VMID. + * + * Return codes + * 0 on Success + * Non-0 on Failure + */ +static int +lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + /* VMID feature is supported only on SLI4 */ + if (phba->sli_rev == LPFC_SLI_REV3) { + phba->cfg_vmid_app_header = 0; + phba->cfg_vmid_priority_tagging = 0; + } + + if (lpfc_is_vmid_enabled(phba)) { + vport->vmid = + kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), + GFP_KERNEL); + if (!vport->vmid) + return -ENOMEM; + + rwlock_init(&vport->vmid_lock); + + /* Set the VMID parameters for the vport */ + vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; + vport->vmid_inactivity_timeout = + phba->cfg_vmid_inactivity_timeout; + vport->max_vmid = phba->cfg_max_vmid; + vport->cur_vmid_cnt = 0; + + vport->vmid_priority_range = bitmap_zalloc + (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); + + if (!vport->vmid_priority_range) { + kfree(vport->vmid); + return -ENOMEM; + } + + hash_init(vport->hash_table); + } + return 0; +} + +/** + * lpfc_create_port - Create an FC port + * @phba: pointer to lpfc hba data structure. + * @instance: a unique integer ID to this FC port. + * @dev: pointer to the device data structure. + * + * This routine creates a FC port for the upper layer protocol. The FC port + * can be created on top of either a physical port or a virtual port provided + * by the HBA. This routine also allocates a SCSI host data structure (shost) + * and associates the FC port created before adding the shost into the SCSI + * layer. + * + * Return codes + * @vport - pointer to the virtual N_Port data structure. + * NULL - port create failed. + **/ +struct lpfc_vport * +lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) +{ + struct lpfc_vport *vport; + struct Scsi_Host *shost = NULL; + struct scsi_host_template *template; + int error = 0; + int i; + uint64_t wwn; + bool use_no_reset_hba = false; + int rc; + + if (lpfc_no_hba_reset_cnt) { + if (phba->sli_rev < LPFC_SLI_REV4 && + dev == &phba->pcidev->dev) { + /* Reset the port first */ + lpfc_sli_brdrestart(phba); + rc = lpfc_sli_chipset_init(phba); + if (rc) + return NULL; + } + wwn = lpfc_get_wwpn(phba); + } + + for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { + if (wwn == lpfc_no_hba_reset[i]) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6020 Setting use_no_reset port=%llx\n", + wwn); + use_no_reset_hba = true; + break; + } + } + + /* Seed template for SCSI host registration */ + if (dev == &phba->pcidev->dev) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* Seed physical port template */ + template = &lpfc_template; + + if (use_no_reset_hba) + /* template is for a no reset SCSI Host */ + template->eh_host_reset_handler = NULL; + + /* Seed updated value of sg_tablesize */ + template->sg_tablesize = lpfc_get_sg_tablesize(phba); + } else { + /* NVMET is for physical port only */ + template = &lpfc_template_nvme; + } + } else { + /* Seed vport template */ + template = &lpfc_vport_template; + + /* Seed updated value of sg_tablesize */ + template->sg_tablesize = lpfc_get_sg_tablesize(phba); + } + + shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); + if (!shost) + goto out; + + vport = (struct lpfc_vport *) shost->hostdata; + vport->phba = phba; + vport->load_flag |= FC_LOADING; + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + vport->fc_rscn_flush = 0; + lpfc_get_vport_cfgparam(vport); + + /* Adjust value in vport */ + vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; + + shost->unique_id = instance; + shost->max_id = LPFC_MAX_TARGET; + shost->max_lun = vport->cfg_max_luns; + shost->this_id = -1; + shost->max_cmd_len = 16; + + if (phba->sli_rev == LPFC_SLI_REV4) { + if (!phba->cfg_fcp_mq_threshold || + phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) + phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; + + shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), + phba->cfg_fcp_mq_threshold); + + shost->dma_boundary = + phba->sli4_hba.pc_sli4_params.sge_supp_len-1; + } else + /* SLI-3 has a limited number of hardware queues (3), + * thus there is only one for FCP processing. + */ + shost->nr_hw_queues = 1; + + /* + * Set initial can_queue value since 0 is no longer supported and + * scsi_add_host will fail. This will be adjusted later based on the + * max xri value determined in hba setup. + */ + shost->can_queue = phba->cfg_hba_queue_depth - 10; + if (dev != &phba->pcidev->dev) { + shost->transportt = lpfc_vport_transport_template; + vport->port_type = LPFC_NPIV_PORT; + } else { + shost->transportt = lpfc_transport_template; + vport->port_type = LPFC_PHYSICAL_PORT; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9081 CreatePort TMPLATE type %x TBLsize %d " + "SEGcnt %d/%d\n", + vport->port_type, shost->sg_tablesize, + phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); + + /* Allocate the resources for VMID */ + rc = lpfc_vmid_res_alloc(phba, vport); + + if (rc) + goto out_put_shost; + + /* Initialize all internally managed lists. */ + INIT_LIST_HEAD(&vport->fc_nodes); + INIT_LIST_HEAD(&vport->rcv_buffer_list); + spin_lock_init(&vport->work_port_lock); + + timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); + + timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); + + timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); + + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) + lpfc_setup_bg(phba, shost); + + error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); + if (error) + goto out_free_vmid; + + spin_lock_irq(&phba->port_list_lock); + list_add_tail(&vport->listentry, &phba->port_list); + spin_unlock_irq(&phba->port_list_lock); + return vport; + +out_free_vmid: + kfree(vport->vmid); + bitmap_free(vport->vmid_priority_range); +out_put_shost: + scsi_host_put(shost); +out: + return NULL; +} + +/** + * destroy_port - destroy an FC port + * @vport: pointer to an lpfc virtual N_Port data structure. + * + * This routine destroys a FC port from the upper layer protocol. All the + * resources associated with the port are released. + **/ +void +destroy_port(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + + lpfc_debugfs_terminate(vport); + fc_remove_host(shost); + scsi_remove_host(shost); + + spin_lock_irq(&phba->port_list_lock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->port_list_lock); + + lpfc_cleanup(vport); + return; +} + +/** + * lpfc_get_instance - Get a unique integer ID + * + * This routine allocates a unique integer ID from lpfc_hba_index pool. It + * uses the kernel idr facility to perform the task. + * + * Return codes: + * instance - a unique integer ID allocated as the new instance. + * -1 - lpfc get instance failed. + **/ +int +lpfc_get_instance(void) +{ + int ret; + + ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); + return ret < 0 ? -1 : ret; +} + +/** + * lpfc_scan_finished - method for SCSI layer to detect whether scan is done + * @shost: pointer to SCSI host data structure. + * @time: elapsed time of the scan in jiffies. + * + * This routine is called by the SCSI layer with a SCSI host to determine + * whether the scan host is finished. + * + * Note: there is no scan_start function as adapter initialization will have + * asynchronously kicked off the link initialization. + * + * Return codes + * 0 - SCSI host scan is not over yet. + * 1 - SCSI host scan is over. + **/ +int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int stat = 0; + + spin_lock_irq(shost->host_lock); + + if (vport->load_flag & FC_UNLOADING) { + stat = 1; + goto finished; + } + if (time >= msecs_to_jiffies(30 * 1000)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0461 Scanning longer than 30 " + "seconds. Continuing initialization\n"); + stat = 1; + goto finished; + } + if (time >= msecs_to_jiffies(15 * 1000) && + phba->link_state <= LPFC_LINK_DOWN) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0465 Link down longer than 15 " + "seconds. Continuing initialization\n"); + stat = 1; + goto finished; + } + + if (vport->port_state != LPFC_VPORT_READY) + goto finished; + if (vport->num_disc_nodes || vport->fc_prli_sent) + goto finished; + if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) + goto finished; + if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) + goto finished; + + stat = 1; + +finished: + spin_unlock_irq(shost->host_lock); + return stat; +} + +static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + fc_host_supported_speeds(shost) = 0; + /* + * Avoid reporting supported link speed for FCoE as it can't be + * controlled via FCoE. + */ + if (phba->hba_flag & HBA_FCOE_MODE) + return; + + if (phba->lmt & LMT_256Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; + if (phba->lmt & LMT_128Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; + if (phba->lmt & LMT_64Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; + if (phba->lmt & LMT_32Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; + if (phba->lmt & LMT_16Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; + if (phba->lmt & LMT_10Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; + if (phba->lmt & LMT_8Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; + if (phba->lmt & LMT_4Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; + if (phba->lmt & LMT_2Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; + if (phba->lmt & LMT_1Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; +} + +/** + * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port + * @shost: pointer to SCSI host data structure. + * + * This routine initializes a given SCSI host attributes on a FC port. The + * SCSI host can be either on top of a physical port or a virtual port. + **/ +void lpfc_host_attrib_init(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + /* + * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). + */ + + fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + fc_host_supported_classes(shost) = FC_COS_CLASS3; + + memset(fc_host_supported_fc4s(shost), 0, + sizeof(fc_host_supported_fc4s(shost))); + fc_host_supported_fc4s(shost)[2] = 1; + fc_host_supported_fc4s(shost)[7] = 1; + + lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), + sizeof fc_host_symbolic_name(shost)); + + lpfc_host_supported_speeds_set(shost); + + fc_host_maxframe_size(shost) = + (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | + (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; + + fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; + + /* This value is also unchanging */ + memset(fc_host_active_fc4s(shost), 0, + sizeof(fc_host_active_fc4s(shost))); + fc_host_active_fc4s(shost)[2] = 1; + fc_host_active_fc4s(shost)[7] = 1; + + fc_host_max_npiv_vports(shost) = phba->max_vpi; + spin_lock_irq(shost->host_lock); + vport->load_flag &= ~FC_LOADING; + spin_unlock_irq(shost->host_lock); +} + +/** + * lpfc_stop_port_s3 - Stop SLI3 device port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to stop an SLI3 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. + **/ +static void +lpfc_stop_port_s3(struct lpfc_hba *phba) +{ + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + /* Clear all pending interrupts */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + + /* Reset some HBA SLI setup states */ + lpfc_stop_hba_timers(phba); + phba->pport->work_port_events = 0; +} + +/** + * lpfc_stop_port_s4 - Stop SLI4 device port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to stop an SLI4 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. + **/ +static void +lpfc_stop_port_s4(struct lpfc_hba *phba) +{ + /* Reset some HBA SLI4 setup states */ + lpfc_stop_hba_timers(phba); + if (phba->pport) + phba->pport->work_port_events = 0; + phba->sli4_hba.intr_enable = 0; +} + +/** + * lpfc_stop_port - Wrapper function for stopping hba port + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba stop port routine from + * the API jump table function pointer from the lpfc_hba struct. + **/ +void +lpfc_stop_port(struct lpfc_hba *phba) +{ + phba->lpfc_stop_port(phba); + + if (phba->wq) + flush_workqueue(phba->wq); +} + +/** + * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer + * @phba: Pointer to hba for which this call is being executed. + * + * This routine starts the timer waiting for the FCF rediscovery to complete. + **/ +void +lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) +{ + unsigned long fcf_redisc_wait_tmo = + (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); + /* Start fcf rediscovery wait period timer */ + mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); + spin_lock_irq(&phba->hbalock); + /* Allow action to new fcf asynchronous event */ + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); + /* Mark the FCF rediscovery pending state */ + phba->fcf.fcf_flag |= FCF_REDISC_PEND; + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout + * @t: Timer context used to obtain the pointer to lpfc hba data structure. + * + * This routine is invoked when waiting for FCF table rediscover has been + * timed out. If new FCF record(s) has (have) been discovered during the + * wait period, a new FCF event shall be added to the FCOE async event + * list, and then worker thread shall be waked up for processing from the + * worker thread context. + **/ +static void +lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); + + /* Don't send FCF rediscovery event if timer cancelled */ + spin_lock_irq(&phba->hbalock); + if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { + spin_unlock_irq(&phba->hbalock); + return; + } + /* Clear FCF rediscovery timer pending flag */ + phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; + /* FCF rediscovery event to worker thread */ + phba->fcf.fcf_flag |= FCF_REDISC_EVT; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2776 FCF rediscover quiescent timer expired\n"); + /* wake up worker thread */ + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_vmid_poll - VMID timeout detection + * @t: Timer context used to obtain the pointer to lpfc hba data structure. + * + * This routine is invoked when there is no I/O on by a VM for the specified + * amount of time. When this situation is detected, the VMID has to be + * deregistered from the switch and all the local resources freed. The VMID + * will be reassigned to the VM once the I/O begins. + **/ +static void +lpfc_vmid_poll(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); + u32 wake_up = 0; + + /* check if there is a need to issue QFPA */ + if (phba->pport->vmid_priority_tagging) { + wake_up = 1; + phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; + } + + /* Is the vmid inactivity timer enabled */ + if (phba->pport->vmid_inactivity_timeout || + phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { + wake_up = 1; + phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; + } + + if (wake_up) + lpfc_worker_wake_up(phba); + + /* restart the timer for the next iteration */ + mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * + LPFC_VMID_TIMER)); +} + +/** + * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link-attention link fault code. + **/ +static void +lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) { + case LPFC_FC_LA_TYPE_LINK_DOWN: + case LPFC_FC_LA_TYPE_TRUNKING_EVENT: + case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: + case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: + break; + default: + switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { + case LPFC_ASYNC_LINK_FAULT_NONE: + case LPFC_ASYNC_LINK_FAULT_LOCAL: + case LPFC_ASYNC_LINK_FAULT_REMOTE: + case LPFC_ASYNC_LINK_FAULT_LR_LRR: + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0398 Unknown link fault code: x%x\n", + bf_get(lpfc_acqe_link_fault, acqe_link)); + break; + } + break; + } +} + +/** + * lpfc_sli4_parse_latt_type - Parse sli4 link attention type + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link attention type and translate it + * into the base driver's link attention type coding. + * + * Return: Link attention type in terms of base driver's coding. + **/ +static uint8_t +lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + uint8_t att_type; + + switch (bf_get(lpfc_acqe_link_status, acqe_link)) { + case LPFC_ASYNC_LINK_STATUS_DOWN: + case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: + att_type = LPFC_ATT_LINK_DOWN; + break; + case LPFC_ASYNC_LINK_STATUS_UP: + /* Ignore physical link up events - wait for logical link up */ + att_type = LPFC_ATT_RESERVED; + break; + case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: + att_type = LPFC_ATT_LINK_UP; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0399 Invalid link attention type: x%x\n", + bf_get(lpfc_acqe_link_status, acqe_link)); + att_type = LPFC_ATT_RESERVED; + break; + } + return att_type; +} + +/** + * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed + * @phba: pointer to lpfc hba data structure. + * + * This routine is to get an SLI3 FC port's link speed in Mbps. + * + * Return: link speed in terms of Mbps. + **/ +uint32_t +lpfc_sli_port_speed_get(struct lpfc_hba *phba) +{ + uint32_t link_speed; + + if (!lpfc_is_link_up(phba)) + return 0; + + if (phba->sli_rev <= LPFC_SLI_REV3) { + switch (phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: + link_speed = 1000; + break; + case LPFC_LINK_SPEED_2GHZ: + link_speed = 2000; + break; + case LPFC_LINK_SPEED_4GHZ: + link_speed = 4000; + break; + case LPFC_LINK_SPEED_8GHZ: + link_speed = 8000; + break; + case LPFC_LINK_SPEED_10GHZ: + link_speed = 10000; + break; + case LPFC_LINK_SPEED_16GHZ: + link_speed = 16000; + break; + default: + link_speed = 0; + } + } else { + if (phba->sli4_hba.link_state.logical_speed) + link_speed = + phba->sli4_hba.link_state.logical_speed; + else + link_speed = phba->sli4_hba.link_state.speed; + } + return link_speed; +} + +/** + * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed + * @phba: pointer to lpfc hba data structure. + * @evt_code: asynchronous event code. + * @speed_code: asynchronous event link speed code. + * + * This routine is to parse the giving SLI4 async event link speed code into + * value of Mbps for the link speed. + * + * Return: link speed in terms of Mbps. + **/ +static uint32_t +lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, + uint8_t speed_code) +{ + uint32_t port_speed; + + switch (evt_code) { + case LPFC_TRAILER_CODE_LINK: + switch (speed_code) { + case LPFC_ASYNC_LINK_SPEED_ZERO: + port_speed = 0; + break; + case LPFC_ASYNC_LINK_SPEED_10MBPS: + port_speed = 10; + break; + case LPFC_ASYNC_LINK_SPEED_100MBPS: + port_speed = 100; + break; + case LPFC_ASYNC_LINK_SPEED_1GBPS: + port_speed = 1000; + break; + case LPFC_ASYNC_LINK_SPEED_10GBPS: + port_speed = 10000; + break; + case LPFC_ASYNC_LINK_SPEED_20GBPS: + port_speed = 20000; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + port_speed = 25000; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + port_speed = 40000; + break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + port_speed = 100000; + break; + default: + port_speed = 0; + } + break; + case LPFC_TRAILER_CODE_FC: + switch (speed_code) { + case LPFC_FC_LA_SPEED_UNKNOWN: + port_speed = 0; + break; + case LPFC_FC_LA_SPEED_1G: + port_speed = 1000; + break; + case LPFC_FC_LA_SPEED_2G: + port_speed = 2000; + break; + case LPFC_FC_LA_SPEED_4G: + port_speed = 4000; + break; + case LPFC_FC_LA_SPEED_8G: + port_speed = 8000; + break; + case LPFC_FC_LA_SPEED_10G: + port_speed = 10000; + break; + case LPFC_FC_LA_SPEED_16G: + port_speed = 16000; + break; + case LPFC_FC_LA_SPEED_32G: + port_speed = 32000; + break; + case LPFC_FC_LA_SPEED_64G: + port_speed = 64000; + break; + case LPFC_FC_LA_SPEED_128G: + port_speed = 128000; + break; + case LPFC_FC_LA_SPEED_256G: + port_speed = 256000; + break; + default: + port_speed = 0; + } + break; + default: + port_speed = 0; + } + return port_speed; +} + +/** + * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to handle the SLI4 asynchronous FCoE link event. + **/ +static void +lpfc_sli4_async_link_evt(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + struct lpfc_mbx_read_top *la; + uint8_t att_type; + int rc; + + att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); + if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) + return; + phba->fcoe_eventtag = acqe_link->event_tag; + pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0395 The mboxq allocation failed\n"); + return; + } + + rc = lpfc_mbox_rsrc_prep(phba, pmb); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0396 mailbox allocation failed\n"); + goto out_free_pmb; + } + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_all_cmd(phba); + + /* Block ELS IOCBs until we have done process link event */ + phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; + + /* Update link event statistics */ + phba->sli.slistat.link_event++; + + /* Create lpfc_handle_latt mailbox command from link ACQE */ + lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); + pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; + pmb->vport = phba->pport; + + /* Keep the link status for extra SLI4 state machine reference */ + phba->sli4_hba.link_state.speed = + lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, + bf_get(lpfc_acqe_link_speed, acqe_link)); + phba->sli4_hba.link_state.duplex = + bf_get(lpfc_acqe_link_duplex, acqe_link); + phba->sli4_hba.link_state.status = + bf_get(lpfc_acqe_link_status, acqe_link); + phba->sli4_hba.link_state.type = + bf_get(lpfc_acqe_link_type, acqe_link); + phba->sli4_hba.link_state.number = + bf_get(lpfc_acqe_link_number, acqe_link); + phba->sli4_hba.link_state.fault = + bf_get(lpfc_acqe_link_fault, acqe_link); + phba->sli4_hba.link_state.logical_speed = + bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2900 Async FC/FCoE Link event - Speed:%dGBit " + "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " + "Logical speed:%dMbps Fault:%d\n", + phba->sli4_hba.link_state.speed, + phba->sli4_hba.link_state.topology, + phba->sli4_hba.link_state.status, + phba->sli4_hba.link_state.type, + phba->sli4_hba.link_state.number, + phba->sli4_hba.link_state.logical_speed, + phba->sli4_hba.link_state.fault); + /* + * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch + * topology info. Note: Optional for non FC-AL ports. + */ + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out_free_pmb; + return; + } + /* + * For FCoE Mode: fill in all the topology information we need and call + * the READ_TOPOLOGY completion routine to continue without actually + * sending the READ_TOPOLOGY mailbox command to the port. + */ + /* Initialize completion status */ + mb = &pmb->u.mb; + mb->mbxStatus = MBX_SUCCESS; + + /* Parse port fault information field */ + lpfc_sli4_parse_latt_fault(phba, acqe_link); + + /* Parse and translate link attention fields */ + la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; + la->eventTag = acqe_link->event_tag; + bf_set(lpfc_mbx_read_top_att_type, la, att_type); + bf_set(lpfc_mbx_read_top_link_spd, la, + (bf_get(lpfc_acqe_link_speed, acqe_link))); + + /* Fake the following irrelevant fields */ + bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); + bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); + bf_set(lpfc_mbx_read_top_il, la, 0); + bf_set(lpfc_mbx_read_top_pb, la, 0); + bf_set(lpfc_mbx_read_top_fa, la, 0); + bf_set(lpfc_mbx_read_top_mm, la, 0); + + /* Invoke the lpfc_handle_latt mailbox command callback function */ + lpfc_mbx_cmpl_read_topology(phba, pmb); + + return; + +out_free_pmb: + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); +} + +/** + * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read + * topology. + * @phba: pointer to lpfc hba data structure. + * @speed_code: asynchronous event link speed code. + * + * This routine is to parse the giving SLI4 async event link speed code into + * value of Read topology link speed. + * + * Return: link speed in terms of Read topology. + **/ +static uint8_t +lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) +{ + uint8_t port_speed; + + switch (speed_code) { + case LPFC_FC_LA_SPEED_1G: + port_speed = LPFC_LINK_SPEED_1GHZ; + break; + case LPFC_FC_LA_SPEED_2G: + port_speed = LPFC_LINK_SPEED_2GHZ; + break; + case LPFC_FC_LA_SPEED_4G: + port_speed = LPFC_LINK_SPEED_4GHZ; + break; + case LPFC_FC_LA_SPEED_8G: + port_speed = LPFC_LINK_SPEED_8GHZ; + break; + case LPFC_FC_LA_SPEED_16G: + port_speed = LPFC_LINK_SPEED_16GHZ; + break; + case LPFC_FC_LA_SPEED_32G: + port_speed = LPFC_LINK_SPEED_32GHZ; + break; + case LPFC_FC_LA_SPEED_64G: + port_speed = LPFC_LINK_SPEED_64GHZ; + break; + case LPFC_FC_LA_SPEED_128G: + port_speed = LPFC_LINK_SPEED_128GHZ; + break; + case LPFC_FC_LA_SPEED_256G: + port_speed = LPFC_LINK_SPEED_256GHZ; + break; + default: + port_speed = 0; + break; + } + + return port_speed; +} + +void +lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) +{ + if (!phba->rx_monitor) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4411 Rx Monitor Info is empty.\n"); + } else { + lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0, + LPFC_MAX_RXMONITOR_DUMP); + } +} + +/** + * lpfc_cgn_update_stat - Save data into congestion stats buffer + * @phba: pointer to lpfc hba data structure. + * @dtag: FPIN descriptor received + * + * Increment the FPIN received counter/time when it happens. + */ +void +lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) +{ + struct lpfc_cgn_info *cp; + u32 value; + + /* Make sure we have a congestion info buffer */ + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + /* Update congestion statistics */ + switch (dtag) { + case ELS_DTAG_LNK_INTEGRITY: + le32_add_cpu(&cp->link_integ_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_lnk); + break; + case ELS_DTAG_DELIVERY: + le32_add_cpu(&cp->delivery_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_delivery); + break; + case ELS_DTAG_PEER_CONGEST: + le32_add_cpu(&cp->cgn_peer_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_peer); + break; + case ELS_DTAG_CONGESTION: + le32_add_cpu(&cp->cgn_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_fpin); + } + if (phba->cgn_fpin_frequency && + phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { + value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; + cp->cgn_stat_npm = value; + } + + value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(value); +} + +/** + * lpfc_cgn_update_tstamp - Update cmf timestamp + * @phba: pointer to lpfc hba data structure. + * @ts: structure to write the timestamp to. + */ +void +lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts) +{ + struct timespec64 cur_time; + struct tm tm_val; + + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &tm_val); + + ts->month = tm_val.tm_mon + 1; + ts->day = tm_val.tm_mday; + ts->year = tm_val.tm_year - 100; + ts->hour = tm_val.tm_hour; + ts->minute = tm_val.tm_min; + ts->second = tm_val.tm_sec; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2646 Updated CMF timestamp : " + "%u/%u/%u %u:%u:%u\n", + ts->day, ts->month, + ts->year, ts->hour, + ts->minute, ts->second); +} + +/** + * lpfc_cmf_stats_timer - Save data into registered congestion buffer + * @timer: Timer cookie to access lpfc private data + * + * Save the congestion event data every minute. + * On the hour collapse all the minute data into hour data. Every day + * collapse all the hour data into daily data. Separate driver + * and fabrc congestion event counters that will be saved out + * to the registered congestion buffer every minute. + */ +static enum hrtimer_restart +lpfc_cmf_stats_timer(struct hrtimer *timer) +{ + struct lpfc_hba *phba; + struct lpfc_cgn_info *cp; + uint32_t i, index; + uint16_t value, mvalue; + uint64_t bps; + uint32_t mbps; + uint32_t dvalue, wvalue, lvalue, avalue; + uint64_t latsum; + __le16 *ptr; + __le32 *lptr; + __le16 *mptr; + + phba = container_of(timer, struct lpfc_hba, cmf_stats_timer); + /* Make sure we have a congestion info buffer */ + if (!phba->cgn_i) + return HRTIMER_NORESTART; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + phba->cgn_evt_timestamp = jiffies + + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); + phba->cgn_evt_minute++; + + /* We should get to this point in the routine on 1 minute intervals */ + lpfc_cgn_update_tstamp(phba, &cp->base_time); + + if (phba->cgn_fpin_frequency && + phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { + value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; + cp->cgn_stat_npm = value; + } + + /* Read and clear the latency counters for this minute */ + lvalue = atomic_read(&phba->cgn_latency_evt_cnt); + latsum = atomic64_read(&phba->cgn_latency_evt); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + + /* We need to store MB/sec bandwidth in the congestion information. + * block_cnt is count of 512 byte blocks for the entire minute, + * bps will get bytes per sec before finally converting to MB/sec. + */ + bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; + phba->rx_block_cnt = 0; + mvalue = bps / (1024 * 1024); /* convert to MB/sec */ + + /* Every minute */ + /* cgn parameters */ + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; + cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; + cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; + + /* Fill in default LUN qdepth */ + value = (uint16_t)(phba->pport->cfg_lun_queue_depth); + cp->cgn_lunq = cpu_to_le16(value); + + /* Record congestion buffer info - every minute + * cgn_driver_evt_cnt (Driver events) + * cgn_fabric_warn_cnt (Congestion Warnings) + * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) + * cgn_fabric_alarm_cnt (Congestion Alarms) + */ + index = ++cp->cgn_index_minute; + if (cp->cgn_index_minute == LPFC_MIN_HOUR) { + cp->cgn_index_minute = 0; + index = 0; + } + + /* Get the number of driver events in this sample and reset counter */ + dvalue = atomic_read(&phba->cgn_driver_evt_cnt); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + + /* Get the number of warning events - FPIN and Signal for this minute */ + wvalue = 0; + if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) + wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + + /* Get the number of alarm events - FPIN and Signal for this minute */ + avalue = 0; + if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) + avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + + /* Collect the driver, warning, alarm and latency counts for this + * minute into the driver congestion buffer. + */ + ptr = &cp->cgn_drvr_min[index]; + value = (uint16_t)dvalue; + *ptr = cpu_to_le16(value); + + ptr = &cp->cgn_warn_min[index]; + value = (uint16_t)wvalue; + *ptr = cpu_to_le16(value); + + ptr = &cp->cgn_alarm_min[index]; + value = (uint16_t)avalue; + *ptr = cpu_to_le16(value); + + lptr = &cp->cgn_latency_min[index]; + if (lvalue) { + lvalue = (uint32_t)div_u64(latsum, lvalue); + *lptr = cpu_to_le32(lvalue); + } else { + *lptr = 0; + } + + /* Collect the bandwidth value into the driver's congesion buffer. */ + mptr = &cp->cgn_bw_min[index]; + *mptr = cpu_to_le16(mvalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", + index, dvalue, wvalue, *lptr, mvalue, avalue); + + /* Every hour */ + if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { + /* Record congestion buffer info - every hour + * Collapse all minutes into an hour + */ + index = ++cp->cgn_index_hour; + if (cp->cgn_index_hour == LPFC_HOUR_DAY) { + cp->cgn_index_hour = 0; + index = 0; + } + + dvalue = 0; + wvalue = 0; + lvalue = 0; + avalue = 0; + mvalue = 0; + mbps = 0; + for (i = 0; i < LPFC_MIN_HOUR; i++) { + dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); + wvalue += le16_to_cpu(cp->cgn_warn_min[i]); + lvalue += le32_to_cpu(cp->cgn_latency_min[i]); + mbps += le16_to_cpu(cp->cgn_bw_min[i]); + avalue += le16_to_cpu(cp->cgn_alarm_min[i]); + } + if (lvalue) /* Avg of latency averages */ + lvalue /= LPFC_MIN_HOUR; + if (mbps) /* Avg of Bandwidth averages */ + mvalue = mbps / LPFC_MIN_HOUR; + + lptr = &cp->cgn_drvr_hr[index]; + *lptr = cpu_to_le32(dvalue); + lptr = &cp->cgn_warn_hr[index]; + *lptr = cpu_to_le32(wvalue); + lptr = &cp->cgn_latency_hr[index]; + *lptr = cpu_to_le32(lvalue); + mptr = &cp->cgn_bw_hr[index]; + *mptr = cpu_to_le16(mvalue); + lptr = &cp->cgn_alarm_hr[index]; + *lptr = cpu_to_le32(avalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2419 Congestion Info - hour " + "(%d): %d %d %d %d %d\n", + index, dvalue, wvalue, lvalue, mvalue, avalue); + } + + /* Every day */ + if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { + /* Record congestion buffer info - every hour + * Collapse all hours into a day. Rotate days + * after LPFC_MAX_CGN_DAYS. + */ + index = ++cp->cgn_index_day; + if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { + cp->cgn_index_day = 0; + index = 0; + } + + dvalue = 0; + wvalue = 0; + lvalue = 0; + mvalue = 0; + mbps = 0; + avalue = 0; + for (i = 0; i < LPFC_HOUR_DAY; i++) { + dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); + wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); + lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); + mbps += le16_to_cpu(cp->cgn_bw_hr[i]); + avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); + } + if (lvalue) /* Avg of latency averages */ + lvalue /= LPFC_HOUR_DAY; + if (mbps) /* Avg of Bandwidth averages */ + mvalue = mbps / LPFC_HOUR_DAY; + + lptr = &cp->cgn_drvr_day[index]; + *lptr = cpu_to_le32(dvalue); + lptr = &cp->cgn_warn_day[index]; + *lptr = cpu_to_le32(wvalue); + lptr = &cp->cgn_latency_day[index]; + *lptr = cpu_to_le32(lvalue); + mptr = &cp->cgn_bw_day[index]; + *mptr = cpu_to_le16(mvalue); + lptr = &cp->cgn_alarm_day[index]; + *lptr = cpu_to_le32(avalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2420 Congestion Info - daily (%d): " + "%d %d %d %d %d\n", + index, dvalue, wvalue, lvalue, mvalue, avalue); + } + + /* Use the frequency found in the last rcv'ed FPIN */ + value = phba->cgn_fpin_frequency; + cp->cgn_warn_freq = cpu_to_le16(value); + cp->cgn_alarm_freq = cpu_to_le16(value); + + lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(lvalue); + + hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC)); + + return HRTIMER_RESTART; +} + +/** + * lpfc_calc_cmf_latency - latency from start of rxate timer interval + * @phba: The Hba for which this call is being executed. + * + * The routine calculates the latency from the beginning of the CMF timer + * interval to the current point in time. It is called from IO completion + * when we exceed our Bandwidth limitation for the time interval. + */ +uint32_t +lpfc_calc_cmf_latency(struct lpfc_hba *phba) +{ + struct timespec64 cmpl_time; + uint32_t msec = 0; + + ktime_get_real_ts64(&cmpl_time); + + /* This routine works on a ms granularity so sec and usec are + * converted accordingly. + */ + if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { + msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / + NSEC_PER_MSEC; + } else { + if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { + msec = (cmpl_time.tv_sec - + phba->cmf_latency.tv_sec) * MSEC_PER_SEC; + msec += ((cmpl_time.tv_nsec - + phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); + } else { + msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - + 1) * MSEC_PER_SEC; + msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + + cmpl_time.tv_nsec) / NSEC_PER_MSEC); + } + } + return msec; +} + +/** + * lpfc_cmf_timer - This is the timer function for one congestion + * rate interval. + * @timer: Pointer to the high resolution timer that expired + */ +static enum hrtimer_restart +lpfc_cmf_timer(struct hrtimer *timer) +{ + struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, + cmf_timer); + struct rx_info_entry entry; + uint32_t io_cnt; + uint32_t busy, max_read; + uint64_t total, rcv, lat, mbpi, extra, cnt; + int timer_interval = LPFC_CMF_INTERVAL; + uint32_t ms; + struct lpfc_cgn_stat *cgs; + int cpu; + + /* Only restart the timer if congestion mgmt is on */ + if (phba->cmf_active_mode == LPFC_CFG_OFF || + !phba->cmf_latency.tv_sec) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6224 CMF timer exit: %d %lld\n", + phba->cmf_active_mode, + (uint64_t)phba->cmf_latency.tv_sec); + return HRTIMER_NORESTART; + } + + /* If pport is not ready yet, just exit and wait for + * the next timer cycle to hit. + */ + if (!phba->pport) + goto skip; + + /* Do not block SCSI IO while in the timer routine since + * total_bytes will be cleared + */ + atomic_set(&phba->cmf_stop_io, 1); + + /* First we need to calculate the actual ms between + * the last timer interrupt and this one. We ask for + * LPFC_CMF_INTERVAL, however the actual time may + * vary depending on system overhead. + */ + ms = lpfc_calc_cmf_latency(phba); + + + /* Immediately after we calculate the time since the last + * timer interrupt, set the start time for the next + * interrupt + */ + ktime_get_real_ts64(&phba->cmf_latency); + + phba->cmf_link_byte_count = + div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); + + /* Collect all the stats from the prior timer interval */ + total = 0; + io_cnt = 0; + lat = 0; + rcv = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_xchg(&cgs->total_bytes, 0); + io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); + lat += atomic64_xchg(&cgs->rx_latency, 0); + rcv += atomic64_xchg(&cgs->rcv_bytes, 0); + } + + /* Before we issue another CMF_SYNC_WQE, retrieve the BW + * returned from the last CMF_SYNC_WQE issued, from + * cmf_last_sync_bw. This will be the target BW for + * this next timer interval. + */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED && + phba->link_state != LPFC_LINK_DOWN && + phba->hba_flag & HBA_SETUP) { + mbpi = phba->cmf_last_sync_bw; + phba->cmf_last_sync_bw = 0; + extra = 0; + + /* Calculate any extra bytes needed to account for the + * timer accuracy. If we are less than LPFC_CMF_INTERVAL + * calculate the adjustment needed for total to reflect + * a full LPFC_CMF_INTERVAL. + */ + if (ms && ms < LPFC_CMF_INTERVAL) { + cnt = div_u64(total, ms); /* bytes per ms */ + cnt *= LPFC_CMF_INTERVAL; /* what total should be */ + extra = cnt - total; + } + lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); + } else { + /* For Monitor mode or link down we want mbpi + * to be the full link speed + */ + mbpi = phba->cmf_link_byte_count; + extra = 0; + } + phba->cmf_timer_cnt++; + + if (io_cnt) { + /* Update congestion info buffer latency in us */ + atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); + atomic64_add(lat, &phba->cgn_latency_evt); + } + busy = atomic_xchg(&phba->cmf_busy, 0); + max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); + + /* Calculate MBPI for the next timer interval */ + if (mbpi) { + if (mbpi > phba->cmf_link_byte_count || + phba->cmf_active_mode == LPFC_CFG_MONITOR) + mbpi = phba->cmf_link_byte_count; + + /* Change max_bytes_per_interval to what the prior + * CMF_SYNC_WQE cmpl indicated. + */ + if (mbpi != phba->cmf_max_bytes_per_interval) + phba->cmf_max_bytes_per_interval = mbpi; + } + + /* Save rxmonitor information for debug */ + if (phba->rx_monitor) { + entry.total_bytes = total; + entry.cmf_bytes = total + extra; + entry.rcv_bytes = rcv; + entry.cmf_busy = busy; + entry.cmf_info = phba->cmf_active_info; + if (io_cnt) { + entry.avg_io_latency = div_u64(lat, io_cnt); + entry.avg_io_size = div_u64(rcv, io_cnt); + } else { + entry.avg_io_latency = 0; + entry.avg_io_size = 0; + } + entry.max_read_cnt = max_read; + entry.io_cnt = io_cnt; + entry.max_bytes_per_interval = mbpi; + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + entry.timer_utilization = phba->cmf_last_ts; + else + entry.timer_utilization = ms; + entry.timer_interval = ms; + phba->cmf_last_ts = 0; + + lpfc_rx_monitor_record(phba->rx_monitor, &entry); + } + + if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { + /* If Monitor mode, check if we are oversubscribed + * against the full line rate. + */ + if (mbpi && total > mbpi) + atomic_inc(&phba->cgn_driver_evt_cnt); + } + phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ + + /* Since total_bytes has already been zero'ed, its okay to unblock + * after max_bytes_per_interval is setup. + */ + if (atomic_xchg(&phba->cmf_bw_wait, 0)) + queue_work(phba->wq, &phba->unblock_request_work); + + /* SCSI IO is now unblocked */ + atomic_set(&phba->cmf_stop_io, 0); + +skip: + hrtimer_forward_now(timer, + ktime_set(0, timer_interval * NSEC_PER_MSEC)); + return HRTIMER_RESTART; +} + +#define trunk_link_status(__idx)\ + bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ + ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ + "Link up" : "Link down") : "NA" +/* Did port __idx reported an error */ +#define trunk_port_fault(__idx)\ + bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ + (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" + +static void +lpfc_update_trunk_link_status(struct lpfc_hba *phba, + struct lpfc_acqe_fc_la *acqe_fc) +{ + uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); + uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); + u8 cnt = 0; + + phba->sli4_hba.link_state.speed = + lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, + bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); + + phba->sli4_hba.link_state.logical_speed = + bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; + /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ + phba->fc_linkspeed = + lpfc_async_link_speed_to_read_top( + phba, + bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); + + if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { + phba->trunk_link.link0.state = + bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) + ? LPFC_LINK_UP : LPFC_LINK_DOWN; + phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; + cnt++; + } + if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { + phba->trunk_link.link1.state = + bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) + ? LPFC_LINK_UP : LPFC_LINK_DOWN; + phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; + cnt++; + } + if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { + phba->trunk_link.link2.state = + bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) + ? LPFC_LINK_UP : LPFC_LINK_DOWN; + phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; + cnt++; + } + if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { + phba->trunk_link.link3.state = + bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) + ? LPFC_LINK_UP : LPFC_LINK_DOWN; + phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; + cnt++; + } + + if (cnt) + phba->trunk_link.phy_lnk_speed = + phba->sli4_hba.link_state.logical_speed / (cnt * 1000); + else + phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2910 Async FC Trunking Event - Speed:%d\n" + "\tLogical speed:%d " + "port0: %s port1: %s port2: %s port3: %s\n", + phba->sli4_hba.link_state.speed, + phba->sli4_hba.link_state.logical_speed, + trunk_link_status(0), trunk_link_status(1), + trunk_link_status(2), trunk_link_status(3)); + + if (phba->cmf_active_mode != LPFC_CFG_OFF) + lpfc_cmf_signal_init(phba); + + if (port_fault) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3202 trunk error:0x%x (%s) seen on port0:%s " + /* + * SLI-4: We have only 0xA error codes + * defined as of now. print an appropriate + * message in case driver needs to be updated. + */ + "port1:%s port2:%s port3:%s\n", err, err > 0xA ? + "UNDEFINED. update driver." : trunk_errmsg[err], + trunk_port_fault(0), trunk_port_fault(1), + trunk_port_fault(2), trunk_port_fault(3)); +} + + +/** + * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event + * @phba: pointer to lpfc hba data structure. + * @acqe_fc: pointer to the async fc completion queue entry. + * + * This routine is to handle the SLI4 asynchronous FC event. It will simply log + * that the event was received and then issue a read_topology mailbox command so + * that the rest of the driver will treat it the same as SLI3. + **/ +static void +lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) +{ + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + struct lpfc_mbx_read_top *la; + char *log_level; + int rc; + + if (bf_get(lpfc_trailer_type, acqe_fc) != + LPFC_FC_LA_EVENT_TYPE_FC_LINK) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2895 Non FC link Event detected.(%d)\n", + bf_get(lpfc_trailer_type, acqe_fc)); + return; + } + + if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == + LPFC_FC_LA_TYPE_TRUNKING_EVENT) { + lpfc_update_trunk_link_status(phba, acqe_fc); + return; + } + + /* Keep the link status for extra SLI4 state machine reference */ + phba->sli4_hba.link_state.speed = + lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, + bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); + phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; + phba->sli4_hba.link_state.topology = + bf_get(lpfc_acqe_fc_la_topology, acqe_fc); + phba->sli4_hba.link_state.status = + bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); + phba->sli4_hba.link_state.type = + bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); + phba->sli4_hba.link_state.number = + bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); + phba->sli4_hba.link_state.fault = + bf_get(lpfc_acqe_link_fault, acqe_fc); + phba->sli4_hba.link_state.link_status = + bf_get(lpfc_acqe_fc_la_link_status, acqe_fc); + + /* + * Only select attention types need logical speed modification to what + * was previously set. + */ + if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP && + phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { + if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == + LPFC_FC_LA_TYPE_LINK_DOWN) + phba->sli4_hba.link_state.logical_speed = 0; + else if (!phba->sli4_hba.conf_trunk) + phba->sli4_hba.link_state.logical_speed = + bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2896 Async FC event - Speed:%dGBaud Topology:x%x " + "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" + "%dMbps Fault:x%x Link Status:x%x\n", + phba->sli4_hba.link_state.speed, + phba->sli4_hba.link_state.topology, + phba->sli4_hba.link_state.status, + phba->sli4_hba.link_state.type, + phba->sli4_hba.link_state.number, + phba->sli4_hba.link_state.logical_speed, + phba->sli4_hba.link_state.fault, + phba->sli4_hba.link_state.link_status); + + /* + * The following attention types are informational only, providing + * further details about link status. Overwrite the value of + * link_state.status appropriately. No further action is required. + */ + if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { + switch (phba->sli4_hba.link_state.status) { + case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: + log_level = KERN_WARNING; + phba->sli4_hba.link_state.status = + LPFC_FC_LA_TYPE_LINK_DOWN; + break; + case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: + /* + * During bb credit recovery establishment, receiving + * this attention type is normal. Link Up attention + * type is expected to occur before this informational + * attention type so keep the Link Up status. + */ + log_level = KERN_INFO; + phba->sli4_hba.link_state.status = + LPFC_FC_LA_TYPE_LINK_UP; + break; + default: + log_level = KERN_INFO; + break; + } + lpfc_log_msg(phba, log_level, LOG_SLI, + "2992 Async FC event - Informational Link " + "Attention Type x%x\n", + bf_get(lpfc_acqe_fc_la_att_type, acqe_fc)); + return; + } + + pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2897 The mboxq allocation failed\n"); + return; + } + rc = lpfc_mbox_rsrc_prep(phba, pmb); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2898 The mboxq prep failed\n"); + goto out_free_pmb; + } + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_all_cmd(phba); + + /* Block ELS IOCBs until we have done process link event */ + phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; + + /* Update link event statistics */ + phba->sli.slistat.link_event++; + + /* Create lpfc_handle_latt mailbox command from link ACQE */ + lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); + pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; + pmb->vport = phba->pport; + + if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { + phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); + + switch (phba->sli4_hba.link_state.status) { + case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: + phba->link_flag |= LS_MDS_LINK_DOWN; + break; + case LPFC_FC_LA_TYPE_MDS_LOOPBACK: + phba->link_flag |= LS_MDS_LOOPBACK; + break; + default: + break; + } + + /* Initialize completion status */ + mb = &pmb->u.mb; + mb->mbxStatus = MBX_SUCCESS; + + /* Parse port fault information field */ + lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); + + /* Parse and translate link attention fields */ + la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; + la->eventTag = acqe_fc->event_tag; + + if (phba->sli4_hba.link_state.status == + LPFC_FC_LA_TYPE_UNEXP_WWPN) { + bf_set(lpfc_mbx_read_top_att_type, la, + LPFC_FC_LA_TYPE_UNEXP_WWPN); + } else { + bf_set(lpfc_mbx_read_top_att_type, la, + LPFC_FC_LA_TYPE_LINK_DOWN); + } + /* Invoke the mailbox command callback function */ + lpfc_mbx_cmpl_read_topology(phba, pmb); + + return; + } + + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out_free_pmb; + return; + +out_free_pmb: + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); +} + +/** + * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event + * @phba: pointer to lpfc hba data structure. + * @acqe_sli: pointer to the async SLI completion queue entry. + * + * This routine is to handle the SLI4 asynchronous SLI events. + **/ +static void +lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) +{ + char port_name; + char message[128]; + uint8_t status; + uint8_t evt_type; + uint8_t operational = 0; + struct temp_event temp_event_data; + struct lpfc_acqe_misconfigured_event *misconfigured; + struct lpfc_acqe_cgn_signal *cgn_signal; + struct Scsi_Host *shost; + struct lpfc_vport **vports; + int rc, i, cnt; + + evt_type = bf_get(lpfc_trailer_type, acqe_sli); + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2901 Async SLI event - Type:%d, Event Data: x%08x " + "x%08x x%08x x%08x\n", evt_type, + acqe_sli->event_data1, acqe_sli->event_data2, + acqe_sli->event_data3, acqe_sli->trailer); + + port_name = phba->Port[0]; + if (port_name == 0x00) + port_name = '?'; /* get port name is empty */ + + switch (evt_type) { + case LPFC_SLI_EVENT_TYPE_OVER_TEMP: + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_THRESHOLD_TEMP; + temp_event_data.data = (uint32_t)acqe_sli->event_data1; + + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3190 Over Temperature:%d Celsius- Port Name %c\n", + acqe_sli->event_data1, port_name); + + phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *)&temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + break; + case LPFC_SLI_EVENT_TYPE_NORM_TEMP: + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_NORMAL_TEMP; + temp_event_data.data = (uint32_t)acqe_sli->event_data1; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT, + "3191 Normal Temperature:%d Celsius - Port Name %c\n", + acqe_sli->event_data1, port_name); + + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *)&temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + break; + case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: + misconfigured = (struct lpfc_acqe_misconfigured_event *) + &acqe_sli->event_data1; + + /* fetch the status for this port */ + switch (phba->sli4_hba.lnk_info.lnk_no) { + case LPFC_LINK_NUMBER_0: + status = bf_get(lpfc_sli_misconfigured_port0_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port0_op, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_1: + status = bf_get(lpfc_sli_misconfigured_port1_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port1_op, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_2: + status = bf_get(lpfc_sli_misconfigured_port2_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port2_op, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_3: + status = bf_get(lpfc_sli_misconfigured_port3_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port3_op, + &misconfigured->theEvent); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3296 " + "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " + "event: Invalid link %d", + phba->sli4_hba.lnk_info.lnk_no); + return; + } + + /* Skip if optic state unchanged */ + if (phba->sli4_hba.lnk_info.optic_state == status) + return; + + switch (status) { + case LPFC_SLI_EVENT_STATUS_VALID: + sprintf(message, "Physical Link is functional"); + break; + case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: + sprintf(message, "Optics faulted/incorrectly " + "installed/not installed - Reseat optics, " + "if issue not resolved, replace."); + break; + case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: + sprintf(message, + "Optics of two types installed - Remove one " + "optic or install matching pair of optics."); + break; + case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: + sprintf(message, "Incompatible optics - Replace with " + "compatible optics for card to function."); + break; + case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: + sprintf(message, "Unqualified optics - Replace with " + "Avago optics for Warranty and Technical " + "Support - Link is%s operational", + (operational) ? " not" : ""); + break; + case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: + sprintf(message, "Uncertified optics - Replace with " + "Avago-certified optics to enable link " + "operation - Link is%s operational", + (operational) ? " not" : ""); + break; + default: + /* firmware is reporting a status we don't know about */ + sprintf(message, "Unknown event status x%02x", status); + break; + } + + /* Issue READ_CONFIG mbox command to refresh supported speeds */ + rc = lpfc_sli4_read_config(phba); + if (rc) { + phba->lmt = 0; + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "3194 Unable to retrieve supported " + "speeds, rc = 0x%x\n", rc); + } + rc = lpfc_sli4_refresh_params(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3174 Unable to update pls support, " + "rc x%x\n", rc); + } + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; + i++) { + shost = lpfc_shost_from_vport(vports[i]); + lpfc_host_supported_speeds_set(shost); + } + } + lpfc_destroy_vport_work_array(phba, vports); + + phba->sli4_hba.lnk_info.optic_state = status; + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3176 Port Name %c %s\n", port_name, message); + break; + case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3192 Remote DPort Test Initiated - " + "Event Data1:x%08x Event Data2: x%08x\n", + acqe_sli->event_data1, acqe_sli->event_data2); + break; + case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: + /* Call FW to obtain active parms */ + lpfc_sli4_cgn_parm_chg_evt(phba); + break; + case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: + /* Misconfigured WWN. Reports that the SLI Port is configured + * to use FA-WWN, but the attached device doesn’t support it. + * Event Data1 - N.A, Event Data2 - N.A + * This event only happens on the physical port. + */ + lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY, + "2699 Misconfigured FA-PWWN - Attached device " + "does not support FA-PWWN\n"); + phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC; + memset(phba->pport->fc_portname.u.wwn, 0, + sizeof(struct lpfc_name)); + break; + case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: + /* EEPROM failure. No driver action is required */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2518 EEPROM failure - " + "Event Data1: x%08x Event Data2: x%08x\n", + acqe_sli->event_data1, acqe_sli->event_data2); + break; + case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: + if (phba->cmf_active_mode == LPFC_CFG_OFF) + break; + cgn_signal = (struct lpfc_acqe_cgn_signal *) + &acqe_sli->event_data1; + phba->cgn_acqe_cnt++; + + cnt = bf_get(lpfc_warn_acqe, cgn_signal); + atomic64_add(cnt, &phba->cgn_acqe_stat.warn); + atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); + + /* no threshold for CMF, even 1 signal will trigger an event */ + + /* Alarm overrides warning, so check that first */ + if (cgn_signal->alarm_cnt) { + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* Keep track of alarm cnt for CMF_SYNC_WQE */ + atomic_add(cgn_signal->alarm_cnt, + &phba->cgn_sync_alarm_cnt); + } + } else if (cnt) { + /* signal action needs to be taken */ + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* Keep track of warning cnt for CMF_SYNC_WQE */ + atomic_add(cnt, &phba->cgn_sync_warn_cnt); + } + } + break; + case LPFC_SLI_EVENT_TYPE_RD_SIGNAL: + /* May be accompanied by a temperature event */ + lpfc_printf_log(phba, KERN_INFO, + LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT, + "2902 Remote Degrade Signaling: x%08x x%08x " + "x%08x\n", + acqe_sli->event_data1, acqe_sli->event_data2, + acqe_sli->event_data3); + break; + default: + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3193 Unrecognized SLI event, type: 0x%x", + evt_type); + break; + } +} + +/** + * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport + * @vport: pointer to vport data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on a vport in + * response to a CVL event. + * + * Return the pointer to the ndlp with the vport if successful, otherwise + * return NULL. + **/ +static struct lpfc_nodelist * +lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + struct lpfc_hba *phba; + + if (!vport) + return NULL; + phba = vport->phba; + if (!phba) + return NULL; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + /* Cannot find existing Fabric ndlp, so allocate a new one */ + ndlp = lpfc_nlp_init(vport, Fabric_DID); + if (!ndlp) + return NULL; + /* Set the node type */ + ndlp->nlp_type |= NLP_FABRIC; + /* Put ndlp onto node list */ + lpfc_enqueue_node(vport, ndlp); + } + if ((phba->pport->port_state < LPFC_FLOGI) && + (phba->pport->port_state != LPFC_VPORT_FAILED)) + return NULL; + /* If virtual link is not yet instantiated ignore CVL */ + if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) + && (vport->port_state != LPFC_VPORT_FAILED)) + return NULL; + shost = lpfc_shost_from_vport(vport); + if (!shost) + return NULL; + lpfc_linkdown_port(vport); + lpfc_cleanup_pending_mbox(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_CVL_RCVD; + spin_unlock_irq(shost->host_lock); + + return ndlp; +} + +/** + * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports + * @phba: pointer to lpfc hba data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on all vports in + * response to a FCF dead event. + **/ +static void +lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + lpfc_sli4_perform_vport_cvl(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event + * @phba: pointer to lpfc hba data structure. + * @acqe_fip: pointer to the async fcoe completion queue entry. + * + * This routine is to handle the SLI4 asynchronous fcoe event. + **/ +static void +lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, + struct lpfc_acqe_fip *acqe_fip) +{ + uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); + int rc; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + int active_vlink_present; + struct lpfc_vport **vports; + int i; + + phba->fc_eventTag = acqe_fip->event_tag; + phba->fcoe_eventtag = acqe_fip->event_tag; + switch (event_type) { + case LPFC_FIP_EVENT_TYPE_NEW_FCF: + case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: + if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2546 New FCF event, evt_tag:x%x, " + "index:x%x\n", + acqe_fip->event_tag, + acqe_fip->index); + else + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | + LOG_DISCOVERY, + "2788 FCF param modified event, " + "evt_tag:x%x, index:x%x\n", + acqe_fip->event_tag, + acqe_fip->index); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + /* + * During period of FCF discovery, read the FCF + * table record indexed by the event to update + * FCF roundrobin failover eligible FCF bmask. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2779 Read FCF (x%x) for updating " + "roundrobin FCF failover bmask\n", + acqe_fip->index); + rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); + } + + /* If the FCF discovery is in progress, do nothing. */ + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & FCF_TS_INPROG) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* If fast FCF failover rescan event is pending, do nothing */ + if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { + spin_unlock_irq(&phba->hbalock); + break; + } + + /* If the FCF has been in discovered state, do nothing. */ + if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { + spin_unlock_irq(&phba->hbalock); + break; + } + spin_unlock_irq(&phba->hbalock); + + /* Otherwise, scan the entire FCF table and re-discover SAN */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2770 Start FCF table scan per async FCF " + "event, evt_tag:x%x, index:x%x\n", + acqe_fip->event_tag, acqe_fip->index); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2547 Issue FCF scan read FCF mailbox " + "command failed (x%x)\n", rc); + break; + + case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2548 FCF Table full count 0x%x tag 0x%x\n", + bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), + acqe_fip->event_tag); + break; + + case LPFC_FIP_EVENT_TYPE_FCF_DEAD: + phba->fcoe_cvl_eventtag = acqe_fip->event_tag; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2549 FCF (x%x) disconnected from network, " + "tag:x%x\n", acqe_fip->index, + acqe_fip->event_tag); + /* + * If we are in the middle of FCF failover process, clear + * the corresponding FCF bit in the roundrobin bitmap. + */ + spin_lock_irq(&phba->hbalock); + if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && + (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { + spin_unlock_irq(&phba->hbalock); + /* Update FLOGI FCF failover eligible FCF bmask */ + lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); + break; + } + spin_unlock_irq(&phba->hbalock); + + /* If the event is not for currently used fcf do nothing */ + if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) + break; + + /* + * Otherwise, request the port to rediscover the entire FCF + * table for a fast recovery from case that the current FCF + * is no longer valid as we are not in the middle of FCF + * failover process already. + */ + spin_lock_irq(&phba->hbalock); + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2771 Start FCF fast failover process due to " + "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " + "\n", acqe_fip->event_tag, acqe_fip->index); + rc = lpfc_sli4_redisc_fcf_table(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_TRACE_EVENT, + "2772 Issue FCF rediscover mailbox " + "command failed, fail through to FCF " + "dead event\n"); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * Last resort will fail over by treating this + * as a link down to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } else { + /* Reset FCF roundrobin bmask for new discovery */ + lpfc_sli4_clear_fcf_rr_bmask(phba); + /* + * Handling fast FCF failover to a DEAD FCF event is + * considered equalivant to receiving CVL to all vports. + */ + lpfc_sli4_perform_all_vport_cvl(phba); + } + break; + case LPFC_FIP_EVENT_TYPE_CVL: + phba->fcoe_cvl_eventtag = acqe_fip->event_tag; + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2718 Clear Virtual Link Received for VPI 0x%x" + " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); + + vport = lpfc_find_vport_by_vpid(phba, + acqe_fip->index); + ndlp = lpfc_sli4_perform_vport_cvl(vport); + if (!ndlp) + break; + active_vlink_present = 0; + + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; + i++) { + if ((!(vports[i]->fc_flag & + FC_VPORT_CVL_RCVD)) && + (vports[i]->port_state > LPFC_FDISC)) { + active_vlink_present = 1; + break; + } + } + lpfc_destroy_vport_work_array(phba, vports); + } + + /* + * Don't re-instantiate if vport is marked for deletion. + * If we are here first then vport_delete is going to wait + * for discovery to complete. + */ + if (!(vport->load_flag & FC_UNLOADING) && + active_vlink_present) { + /* + * If there are other active VLinks present, + * re-instantiate the Vlink using FDISC. + */ + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_FDISC; + vport->port_state = LPFC_FDISC; + } else { + /* + * Otherwise, we request port to rediscover + * the entire FCF table for a fast recovery + * from possible case that the current FCF + * is no longer valid if we are not already + * in the FCF failover process. + */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2773 Start FCF failover per CVL, " + "evt_tag:x%x\n", acqe_fip->event_tag); + rc = lpfc_sli4_redisc_fcf_table(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_TRACE_EVENT, + "2774 Issue FCF rediscover " + "mailbox command failed, " + "through to CVL event\n"); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * Last resort will be re-try on the + * the current registered FCF entry. + */ + lpfc_retry_pport_discovery(phba); + } else + /* + * Reset FCF roundrobin bmask for new + * discovery. + */ + lpfc_sli4_clear_fcf_rr_bmask(phba); + } + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0288 Unknown FCoE event type 0x%x event tag " + "0x%x\n", event_type, acqe_fip->event_tag); + break; + } +} + +/** + * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event + * @phba: pointer to lpfc hba data structure. + * @acqe_dcbx: pointer to the async dcbx completion queue entry. + * + * This routine is to handle the SLI4 asynchronous dcbx event. + **/ +static void +lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, + struct lpfc_acqe_dcbx *acqe_dcbx) +{ + phba->fc_eventTag = acqe_dcbx->event_tag; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0290 The SLI4 DCBX asynchronous event is not " + "handled yet\n"); +} + +/** + * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event + * @phba: pointer to lpfc hba data structure. + * @acqe_grp5: pointer to the async grp5 completion queue entry. + * + * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event + * is an asynchronous notified of a logical link speed change. The Port + * reports the logical link speed in units of 10Mbps. + **/ +static void +lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, + struct lpfc_acqe_grp5 *acqe_grp5) +{ + uint16_t prev_ll_spd; + + phba->fc_eventTag = acqe_grp5->event_tag; + phba->fcoe_eventtag = acqe_grp5->event_tag; + prev_ll_spd = phba->sli4_hba.link_state.logical_speed; + phba->sli4_hba.link_state.logical_speed = + (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2789 GRP5 Async Event: Updating logical link speed " + "from %dMbps to %dMbps\n", prev_ll_spd, + phba->sli4_hba.link_state.logical_speed); +} + +/** + * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event + * @phba: pointer to lpfc hba data structure. + * + * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event + * is an asynchronous notification of a request to reset CM stats. + **/ +static void +lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) +{ + if (!phba->cgn_i) + return; + lpfc_init_congestion_stat(phba); +} + +/** + * lpfc_cgn_params_val - Validate FW congestion parameters. + * @phba: pointer to lpfc hba data structure. + * @p_cfg_param: pointer to FW provided congestion parameters. + * + * This routine validates the congestion parameters passed + * by the FW to the driver via an ACQE event. + **/ +static void +lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) +{ + spin_lock_irq(&phba->hbalock); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, + LPFC_CFG_MONITOR)) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6225 CMF mode param out of range: %d\n", + p_cfg_param->cgn_param_mode); + p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; + } + + spin_unlock_irq(&phba->hbalock); +} + +static const char * const lpfc_cmf_mode_to_str[] = { + "OFF", + "MANAGED", + "MONITOR", +}; + +/** + * lpfc_cgn_params_parse - Process a FW cong parm change event + * @phba: pointer to lpfc hba data structure. + * @p_cgn_param: pointer to a data buffer with the FW cong params. + * @len: the size of pdata in bytes. + * + * This routine validates the congestion management buffer signature + * from the FW, validates the contents and makes corrections for + * valid, in-range values. If the signature magic is correct and + * after parameter validation, the contents are copied to the driver's + * @phba structure. If the magic is incorrect, an error message is + * logged. + **/ +static void +lpfc_cgn_params_parse(struct lpfc_hba *phba, + struct lpfc_cgn_param *p_cgn_param, uint32_t len) +{ + struct lpfc_cgn_info *cp; + uint32_t crc, oldmode; + char acr_string[4] = {0}; + + /* Make sure the FW has encoded the correct magic number to + * validate the congestion parameter in FW memory. + */ + if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "4668 FW cgn parm buffer data: " + "magic 0x%x version %d mode %d " + "level0 %d level1 %d " + "level2 %d byte13 %d " + "byte14 %d byte15 %d " + "byte11 %d byte12 %d activeMode %d\n", + p_cgn_param->cgn_param_magic, + p_cgn_param->cgn_param_version, + p_cgn_param->cgn_param_mode, + p_cgn_param->cgn_param_level0, + p_cgn_param->cgn_param_level1, + p_cgn_param->cgn_param_level2, + p_cgn_param->byte13, + p_cgn_param->byte14, + p_cgn_param->byte15, + p_cgn_param->byte11, + p_cgn_param->byte12, + phba->cmf_active_mode); + + oldmode = phba->cmf_active_mode; + + /* Any parameters out of range are corrected to defaults + * by this routine. No need to fail. + */ + lpfc_cgn_params_val(phba, p_cgn_param); + + /* Parameters are verified, move them into driver storage */ + spin_lock_irq(&phba->hbalock); + memcpy(&phba->cgn_p, p_cgn_param, + sizeof(struct lpfc_cgn_param)); + + /* Update parameters in congestion info buffer now */ + if (phba->cgn_i) { + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; + cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; + cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + } + spin_unlock_irq(&phba->hbalock); + + phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; + + switch (oldmode) { + case LPFC_CFG_OFF: + if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { + /* Turning CMF on */ + lpfc_cmf_start(phba); + + if (phba->link_state >= LPFC_LINK_UP) { + phba->cgn_reg_fpin = + phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = + phba->cgn_init_reg_signal; + lpfc_issue_els_edc(phba->pport, 0); + } + } + break; + case LPFC_CFG_MANAGED: + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + /* Turning CMF off */ + lpfc_cmf_stop(phba); + if (phba->link_state >= LPFC_LINK_UP) + lpfc_issue_els_edc(phba->pport, 0); + break; + case LPFC_CFG_MONITOR: + phba->cmf_max_bytes_per_interval = + phba->cmf_link_byte_count; + + /* Resume blocked IO - unblock on workqueue */ + queue_work(phba->wq, + &phba->unblock_request_work); + break; + } + break; + case LPFC_CFG_MONITOR: + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + /* Turning CMF off */ + lpfc_cmf_stop(phba); + if (phba->link_state >= LPFC_LINK_UP) + lpfc_issue_els_edc(phba->pport, 0); + break; + case LPFC_CFG_MANAGED: + lpfc_cmf_signal_init(phba); + break; + } + break; + } + if (oldmode != LPFC_CFG_OFF || + oldmode != phba->cgn_p.cgn_param_mode) { + if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED) + scnprintf(acr_string, sizeof(acr_string), "%u", + phba->cgn_p.cgn_param_level0); + else + scnprintf(acr_string, sizeof(acr_string), "NA"); + + dev_info(&phba->pcidev->dev, "%d: " + "4663 CMF: Mode %s acr %s\n", + phba->brd_no, + lpfc_cmf_mode_to_str + [phba->cgn_p.cgn_param_mode], + acr_string); + } + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4669 FW cgn parm buf wrong magic 0x%x " + "version %d\n", p_cgn_param->cgn_param_magic, + p_cgn_param->cgn_param_version); + } +} + +/** + * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. + * @phba: pointer to lpfc hba data structure. + * + * This routine issues a read_object mailbox command to + * get the congestion management parameters from the FW + * parses it and updates the driver maintained values. + * + * Returns + * 0 if the object was empty + * -Eval if an error was encountered + * Count if bytes were read from object + **/ +int +lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) +{ + int ret = 0; + struct lpfc_cgn_param *p_cgn_param = NULL; + u32 *pdata = NULL; + u32 len = 0; + + /* Find out if the FW has a new set of congestion parameters. */ + len = sizeof(struct lpfc_cgn_param); + pdata = kzalloc(len, GFP_KERNEL); + if (!pdata) + return -ENOMEM; + ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, + pdata, len); + + /* 0 means no data. A negative means error. A positive means + * bytes were copied. + */ + if (!ret) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4670 CGN RD OBJ returns no data\n"); + goto rd_obj_err; + } else if (ret < 0) { + /* Some error. Just exit and return it to the caller.*/ + goto rd_obj_err; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "6234 READ CGN PARAMS Successful %d\n", len); + + /* Parse data pointer over len and update the phba congestion + * parameters with values passed back. The receive rate values + * may have been altered in FW, but take no action here. + */ + p_cgn_param = (struct lpfc_cgn_param *)pdata; + lpfc_cgn_params_parse(phba, p_cgn_param, len); + + rd_obj_err: + kfree(pdata); + return ret; +} + +/** + * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event + * @phba: pointer to lpfc hba data structure. + * + * The FW generated Async ACQE SLI event calls this routine when + * the event type is an SLI Internal Port Event and the Event Code + * indicates a change to the FW maintained congestion parameters. + * + * This routine executes a Read_Object mailbox call to obtain the + * current congestion parameters maintained in FW and corrects + * the driver's active congestion parameters. + * + * The acqe event is not passed because there is no further data + * required. + * + * Returns nonzero error if event processing encountered an error. + * Zero otherwise for success. + **/ +static int +lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) +{ + int ret = 0; + + if (!phba->sli4_hba.pc_sli4_params.cmf) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4664 Cgn Evt when E2E off. Drop event\n"); + return -EACCES; + } + + /* If the event is claiming an empty object, it's ok. A write + * could have cleared it. Only error is a negative return + * status. + */ + ret = lpfc_sli4_cgn_params_read(phba); + if (ret < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4667 Error reading Cgn Params (%d)\n", + ret); + } else if (!ret) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4673 CGN Event empty object.\n"); + } + return ret; +} + +/** + * lpfc_sli4_async_event_proc - Process all the pending asynchronous event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 asynchronous events. + **/ +void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + /* First, declare the async event has been handled */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag &= ~ASYNC_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Now, handle all the async events */ + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { + list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, + iflags); + + /* Process the asynchronous event */ + switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { + case LPFC_TRAILER_CODE_LINK: + lpfc_sli4_async_link_evt(phba, + &cq_event->cqe.acqe_link); + break; + case LPFC_TRAILER_CODE_FCOE: + lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); + break; + case LPFC_TRAILER_CODE_DCBX: + lpfc_sli4_async_dcbx_evt(phba, + &cq_event->cqe.acqe_dcbx); + break; + case LPFC_TRAILER_CODE_GRP5: + lpfc_sli4_async_grp5_evt(phba, + &cq_event->cqe.acqe_grp5); + break; + case LPFC_TRAILER_CODE_FC: + lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); + break; + case LPFC_TRAILER_CODE_SLI: + lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); + break; + case LPFC_TRAILER_CODE_CMSTAT: + lpfc_sli4_async_cmstat_evt(phba); + break; + default: + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "1804 Invalid asynchronous event code: " + "x%x\n", bf_get(lpfc_trailer_code, + &cq_event->cqe.mcqe_cmpl)); + break; + } + + /* Free the completion event processed to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + } + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); +} + +/** + * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process FCF table + * rediscovery pending completion event. + **/ +void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) +{ + int rc; + + spin_lock_irq(&phba->hbalock); + /* Clear FCF rediscovery timeout event */ + phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; + /* Clear driver fast failover FCF record flag */ + phba->fcf.failover_rec.flag = 0; + /* Set state for FCF fast failover */ + phba->fcf.fcf_flag |= FCF_REDISC_FOV; + spin_unlock_irq(&phba->hbalock); + + /* Scan FCF table from the first entry to re-discover SAN */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2777 Start post-quiescent FCF table scan\n"); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2747 Issue FCF scan read FCF mailbox " + "command failed 0x%x\n", rc); +} + +/** + * lpfc_api_table_setup - Set up per hba pci-device group func api jump table + * @phba: pointer to lpfc hba data structure. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine is invoked to set up the per HBA PCI-Device group function + * API jump table entries. + * + * Return: 0 if success, otherwise -ENODEV + **/ +int +lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + int rc; + + /* Set up lpfc PCI-device group */ + phba->pci_dev_grp = dev_grp; + + /* The LPFC_PCI_DEV_OC uses SLI4 */ + if (dev_grp == LPFC_PCI_DEV_OC) + phba->sli_rev = LPFC_SLI_REV4; + + /* Set up device INIT API function jump table */ + rc = lpfc_init_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SCSI API function jump table */ + rc = lpfc_scsi_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SLI API function jump table */ + rc = lpfc_sli_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up MBOX API function jump table */ + rc = lpfc_mbox_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + + return 0; +} + +/** + * lpfc_log_intr_mode - Log the active interrupt mode + * @phba: pointer to lpfc hba data structure. + * @intr_mode: active interrupt mode adopted. + * + * This routine it invoked to log the currently used active interrupt mode + * to the device. + **/ +static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) +{ + switch (intr_mode) { + case 0: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0470 Enable INTx interrupt mode.\n"); + break; + case 1: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0481 Enabled MSI interrupt mode.\n"); + break; + case 2: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0480 Enabled MSI-X interrupt mode.\n"); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0482 Illegal interrupt mode.\n"); + break; + } + return; +} + +/** + * lpfc_enable_pci_dev - Enable a generic PCI device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the PCI device that is common to all + * PCI devices. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_enable_pci_dev(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + goto out_error; + else + pdev = phba->pcidev; + /* Enable PCI device */ + if (pci_enable_device_mem(pdev)) + goto out_error; + /* Request PCI resource for the device */ + if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) + goto out_disable_device; + /* Set up device as PCI master and save state for EEH */ + pci_set_master(pdev); + pci_try_set_mwi(pdev); + pci_save_state(pdev); + + /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ + if (pci_is_pcie(pdev)) + pdev->needs_freset = 1; + + return 0; + +out_disable_device: + pci_disable_device(pdev); +out_error: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1401 Failed to enable pci device\n"); + return -ENODEV; +} + +/** + * lpfc_disable_pci_dev - Disable a generic PCI device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the PCI device that is common to all + * PCI devices. + **/ +static void +lpfc_disable_pci_dev(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + /* Release PCI resource and disable PCI device */ + pci_release_mem_regions(pdev); + pci_disable_device(pdev); + + return; +} + +/** + * lpfc_reset_hba - Reset a hba + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to reset a hba device. It brings the HBA + * offline, performs a board restart, and then brings the board back + * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up + * on outstanding mailbox commands. + **/ +void +lpfc_reset_hba(struct lpfc_hba *phba) +{ + int rc = 0; + + /* If resets are disabled then set error state and return. */ + if (!phba->cfg_enable_hba_reset) { + phba->link_state = LPFC_HBA_ERROR; + return; + } + + /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + } else { + if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) { + /* Perform a PCI function reset to start from clean */ + rc = lpfc_pci_function_reset(phba); + lpfc_els_flush_all_cmd(phba); + } + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); + lpfc_sli_flush_io_rings(phba); + } + lpfc_offline(phba); + clear_bit(MBX_TMO_ERR, &phba->bit_flags); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "8888 PCI function reset failed rc %x\n", + rc); + } else { + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + lpfc_unblock_mgmt_io(phba); + } +} + +/** + * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions + * @phba: pointer to lpfc hba data structure. + * + * This function enables the PCI SR-IOV virtual functions to a physical + * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to + * enable the number of virtual functions to the physical function. As + * not all devices support SR-IOV, the return code from the pci_enable_sriov() + * API call does not considered as an error condition for most of the device. + **/ +uint16_t +lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) +{ + struct pci_dev *pdev = phba->pcidev; + uint16_t nr_virtfn; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos == 0) + return 0; + + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); + return nr_virtfn; +} + +/** + * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions + * @phba: pointer to lpfc hba data structure. + * @nr_vfn: number of virtual functions to be enabled. + * + * This function enables the PCI SR-IOV virtual functions to a physical + * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to + * enable the number of virtual functions to the physical function. As + * not all devices support SR-IOV, the return code from the pci_enable_sriov() + * API call does not considered as an error condition for most of the device. + **/ +int +lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) +{ + struct pci_dev *pdev = phba->pcidev; + uint16_t max_nr_vfn; + int rc; + + max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); + if (nr_vfn > max_nr_vfn) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3057 Requested vfs (%d) greater than " + "supported vfs (%d)", nr_vfn, max_nr_vfn); + return -EINVAL; + } + + rc = pci_enable_sriov(pdev, nr_vfn); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2806 Failed to enable sriov on this device " + "with vfn number nr_vf:%d, rc:%d\n", + nr_vfn, rc); + } else + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2807 Successful enable sriov on this device " + "with vfn number nr_vf:%d\n", nr_vfn); + return rc; +} + +static void +lpfc_unblock_requests_work(struct work_struct *work) +{ + struct lpfc_hba *phba = container_of(work, struct lpfc_hba, + unblock_request_work); + + lpfc_unblock_requests(phba); +} + +/** + * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources before the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + /* + * Driver resources common to all SLI revisions + */ + atomic_set(&phba->fast_event_count, 0); + atomic_set(&phba->dbg_log_idx, 0); + atomic_set(&phba->dbg_log_cnt, 0); + atomic_set(&phba->dbg_log_dmping, 0); + spin_lock_init(&phba->hbalock); + + /* Initialize port_list spinlock */ + spin_lock_init(&phba->port_list_lock); + INIT_LIST_HEAD(&phba->port_list); + + INIT_LIST_HEAD(&phba->work_list); + + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1403 Protocols supported %s %s %s\n", + ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? + "SCSI" : " "), + ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? + "NVME" : " "), + (phba->nvmet_support ? "NVMET" : " ")); + + /* Initialize the IO buffer list used by driver for SLI3 SCSI */ + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + + /* Initialize the fabric iocb list */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + + /* Initialize FCF connection rec list */ + INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + + /* Initialize OAS configuration list */ + spin_lock_init(&phba->devicelock); + INIT_LIST_HEAD(&phba->luns); + + /* MBOX heartbeat timer */ + timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); + /* Fabric block timer */ + timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); + /* EA polling mode timer */ + timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); + /* Heartbeat timer */ + timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); + + INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); + + INIT_DELAYED_WORK(&phba->idle_stat_delay_work, + lpfc_idle_stat_delay_work); + INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); + return 0; +} + +/** + * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-3 HBA device it attached to. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) +{ + int rc, entry_sz; + + /* + * Initialize timers used by driver + */ + + /* FCP polling mode timer */ + timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); + + /* Host attention work mask setup */ + phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); + phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); + + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); + /* Set up phase-1 common device driver resources */ + + rc = lpfc_setup_driver_resource_phase1(phba); + if (rc) + return -ENODEV; + + if (!phba->sli.sli3_ring) + phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, + sizeof(struct lpfc_sli_ring), + GFP_KERNEL); + if (!phba->sli.sli3_ring) + return -ENOMEM; + + /* + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be dynamically calculated. + */ + + if (phba->sli_rev == LPFC_SLI_REV4) + entry_sz = sizeof(struct sli4_sge); + else + entry_sz = sizeof(struct ulp_bde64); + + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ + if (phba->cfg_enable_bg) { + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a BDE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough BDEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (LPFC_MAX_SG_SEG_CNT * entry_sz); + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + + /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a BDE for each, and a BDE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * entry_sz); + + /* Total BDEs in BPL for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); + + phba->max_vpi = LPFC_MAX_VPI; + /* This will be set to correct value after config_port mbox */ + phba->max_vports = 0; + + /* + * Initialize the SLI Layer to run with lpfc HBAs. + */ + lpfc_sli_setup(phba); + lpfc_sli_queue_init(phba); + + /* Allocate device driver memory */ + if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) + return -ENOMEM; + + phba->lpfc_sg_dma_buf_pool = + dma_pool_create("lpfc_sg_dma_buf_pool", + &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, + BPL_ALIGN_SZ, 0); + + if (!phba->lpfc_sg_dma_buf_pool) + goto fail_free_mem; + + phba->lpfc_cmd_rsp_buf_pool = + dma_pool_create("lpfc_cmd_rsp_buf_pool", + &phba->pcidev->dev, + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp), + BPL_ALIGN_SZ, 0); + + if (!phba->lpfc_cmd_rsp_buf_pool) + goto fail_free_dma_buf_pool; + + /* + * Enable sr-iov virtual functions if supported and configured + * through the module parameter. + */ + if (phba->cfg_sriov_nr_virtfn > 0) { + rc = lpfc_sli_probe_sriov_nr_virtfn(phba, + phba->cfg_sriov_nr_virtfn); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2808 Requested number of SR-IOV " + "virtual functions (%d) is not " + "supported\n", + phba->cfg_sriov_nr_virtfn); + phba->cfg_sriov_nr_virtfn = 0; + } + } + + return 0; + +fail_free_dma_buf_pool: + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; +fail_free_mem: + lpfc_mem_free(phba); + return -ENOMEM; +} + +/** + * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up + * specific for supporting the SLI-3 HBA device it attached to. + **/ +static void +lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) +{ + /* Free device driver memory allocated */ + lpfc_mem_free_all(phba); + + return; +} + +/** + * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-4 HBA device it attached to. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + MAILBOX_t *mb; + int rc, i, max_buf_size; + int longs; + int extra; + uint64_t wwn; + u32 if_type; + u32 if_fam; + + phba->sli4_hba.num_present_cpu = lpfc_present_cpu; + phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; + phba->sli4_hba.curr_disp_cpu = 0; + + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); + + /* Set up phase-1 common device driver resources */ + rc = lpfc_setup_driver_resource_phase1(phba); + if (rc) + return -ENODEV; + + /* Before proceed, wait for POST done and device ready */ + rc = lpfc_sli4_post_status_check(phba); + if (rc) + return -ENODEV; + + /* Allocate all driver workqueues here */ + + /* The lpfc_wq workqueue for deferred irq use */ + phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); + if (!phba->wq) + return -ENOMEM; + + /* + * Initialize timers used by driver + */ + + timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); + + /* FCF rediscover timer */ + timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); + + /* CMF congestion timer */ + hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + phba->cmf_timer.function = lpfc_cmf_timer; + /* CMF 1 minute stats collection timer */ + hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; + + /* + * Control structure for handling external multi-buffer mailbox + * command pass-through. + */ + memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, + sizeof(struct lpfc_mbox_ext_buf_ctx)); + INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); + + phba->max_vpi = LPFC_MAX_VPI; + + /* This will be set to correct value after the read_config mbox */ + phba->max_vports = 0; + + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; + + /* + * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands + * we will associate a new ring, for each EQ/CQ/WQ tuple. + * The WQ create will allocate the ring. + */ + + /* Initialize buffer queue management fields */ + INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; + + /* for VMID idle timeout if VMID is enabled */ + if (lpfc_is_vmid_enabled(phba)) + timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); + + /* + * Initialize the SLI Layer to run with lpfc SLI4 HBAs. + */ + /* Initialize the Abort buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Initialize the Abort nvme buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); + spin_lock_init(&phba->sli4_hba.t_active_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); + } + + /* This abort list used by worker thread */ + spin_lock_init(&phba->sli4_hba.sgl_list_lock); + spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); + spin_lock_init(&phba->sli4_hba.asynce_list_lock); + spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); + + /* + * Initialize driver internal slow-path work queues + */ + + /* Driver internel slow-path CQ Event pool */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); + /* Response IOCB work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); + /* Asynchronous event CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); + /* Slow-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Receive queue CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); + + /* Initialize extent block lists. */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); + INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); + + /* Initialize mboxq lists. If the early init routines fail + * these lists need to be correctly initialized. + */ + INIT_LIST_HEAD(&phba->sli.mboxq); + INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); + + /* initialize optic_state to 0xFF */ + phba->sli4_hba.lnk_info.optic_state = 0xff; + + /* Allocate device driver memory */ + rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); + if (rc) + goto out_destroy_workqueue; + + /* IF Type 2 ports get initialized now. */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2) { + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) { + rc = -ENODEV; + goto out_free_mem; + } + phba->temp_sensor_support = 1; + } + + /* Create the bootstrap mailbox command */ + rc = lpfc_create_bootstrap_mbox(phba); + if (unlikely(rc)) + goto out_free_mem; + + /* Set up the host's endian order with the device. */ + rc = lpfc_setup_endian_order(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Set up the hba's configuration parameters. */ + rc = lpfc_sli4_read_config(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { + /* Right now the link is down, if FA-PWWN is configured the + * firmware will try FLOGI before the driver gets a link up. + * If it fails, the driver should get a MISCONFIGURED async + * event which will clear this flag. The only notification + * the driver gets is if it fails, if it succeeds there is no + * notification given. Assume success. + */ + phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; + } + + rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* IF Type 0 ports get initialized now. */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_0) { + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + } + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mboxq) { + rc = -ENOMEM; + goto out_free_bsmbx; + } + + /* Check for NVMET being configured */ + phba->nvmet_support = 0; + if (lpfc_enable_nvmet_cnt) { + + /* First get WWN of HBA instance */ + lpfc_read_nv(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6016 Mailbox failed , mbxCmd x%x " + "READ_NV, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; + goto out_free_bsmbx; + } + mb = &mboxq->u.mb; + memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, + sizeof(uint64_t)); + wwn = cpu_to_be64(wwn); + phba->sli4_hba.wwnn.u.name = wwn; + memcpy(&wwn, (char *)mb->un.varRDnvp.portname, + sizeof(uint64_t)); + /* wwn is WWPN of HBA instance */ + wwn = cpu_to_be64(wwn); + phba->sli4_hba.wwpn.u.name = wwn; + + /* Check to see if it matches any module parameter */ + for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { + if (wwn == lpfc_enable_nvmet[i]) { +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + if (lpfc_nvmet_mem_alloc(phba)) + break; + + phba->nvmet_support = 1; /* a match */ + + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6017 NVME Target %016llx\n", + wwn); +#else + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6021 Can't enable NVME Target." + " NVME_TARGET_FC infrastructure" + " is not in kernel\n"); +#endif + /* Not supported for NVMET */ + phba->cfg_xri_rebalancing = 0; + if (phba->irq_chann_mode == NHT_MODE) { + phba->cfg_irq_chann = + phba->sli4_hba.num_present_cpu; + phba->cfg_hdw_queue = + phba->sli4_hba.num_present_cpu; + phba->irq_chann_mode = NORMAL_MODE; + } + break; + } + } + } + + lpfc_nvme_mod_param_dep(phba); + + /* + * Get sli4 parameters that override parameters from Port capabilities. + * If this call fails, it isn't critical unless the SLI4 parameters come + * back in conflict. + */ + rc = lpfc_get_sli4_parameters(phba, mboxq); + if (rc) { + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if_fam = bf_get(lpfc_sli_intf_sli_family, + &phba->sli4_hba.sli_intf); + if (phba->sli4_hba.extents_in_use && + phba->sli4_hba.rpi_hdrs_in_use) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2999 Unsupported SLI4 Parameters " + "Extents and RPI headers enabled.\n"); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && + if_fam == LPFC_SLI_INTF_FAMILY_BE2) { + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; + goto out_free_bsmbx; + } + } + if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && + if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; + goto out_free_bsmbx; + } + } + + /* + * 1 for cmd, 1 for rsp, NVME adds an extra one + * for boundary conditions in its max_sgl_segment template. + */ + extra = 2; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + extra++; + + /* + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + + /* + * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be calculated. + */ + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { + /* Both cfg_enable_bg and cfg_external_dif code paths */ + + /* + * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, + * the FCP rsp, and a SGE. Sice we have no control + * over how many protection segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt + * to minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + /* + * If supporting DIF, reduce the seg count for scsi to + * allow room for the DIF sges. + */ + if (phba->cfg_enable_bg && + phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) + phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; + else + phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; + + } else { + /* + * The scsi_buf for a regular I/O holds the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + extra) * + sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; + phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; + + /* + * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only + * need to post 1 page for the SGL. + */ + } + + if (phba->cfg_xpsgl && !phba->nvmet_support) + phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; + else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + phba->border_sge_num = phba->cfg_sg_dma_buf_size / + sizeof(struct sli4_sge); + + /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, + "6300 Reducing NVME sg segment " + "cnt to %d\n", + LPFC_MAX_NVME_SEG_CNT); + phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; + } else + phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_seg_cnt:%d dmabuf_size:%d " + "total:%d scsi:%d nvme:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, + phba->cfg_nvme_seg_cnt); + + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + + phba->lpfc_sg_dma_buf_pool = + dma_pool_create("lpfc_sg_dma_buf_pool", + &phba->pcidev->dev, + phba->cfg_sg_dma_buf_size, + i, 0); + if (!phba->lpfc_sg_dma_buf_pool) { + rc = -ENOMEM; + goto out_free_bsmbx; + } + + phba->lpfc_cmd_rsp_buf_pool = + dma_pool_create("lpfc_cmd_rsp_buf_pool", + &phba->pcidev->dev, + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp), + i, 0); + if (!phba->lpfc_cmd_rsp_buf_pool) { + rc = -ENOMEM; + goto out_free_sg_dma_buf; + } + + mempool_free(mboxq, phba->mbox_mem_pool); + + /* Verify OAS is supported */ + lpfc_sli4_oas_verify(phba); + + /* Verify RAS support on adapter */ + lpfc_sli4_ras_init(phba); + + /* Verify all the SLI4 queues */ + rc = lpfc_sli4_queue_verify(phba); + if (rc) + goto out_free_cmd_rsp_buf; + + /* Create driver internal CQE event pool */ + rc = lpfc_sli4_cq_event_pool_create(phba); + if (rc) + goto out_free_cmd_rsp_buf; + + /* Initialize sgl lists per host */ + lpfc_init_sgl_list(phba); + + /* Allocate and initialize active sgl array */ + rc = lpfc_init_active_sgl_array(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1430 Failed to initialize sgl list.\n"); + goto out_destroy_cq_event_pool; + } + rc = lpfc_sli4_init_rpi_hdrs(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1432 Failed to initialize rpi headers.\n"); + goto out_free_active_sgl; + } + + /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ + longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; + phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), + GFP_KERNEL); + if (!phba->fcf.fcf_rr_bmask) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2759 Failed allocate memory for FCF round " + "robin failover bmask\n"); + rc = -ENOMEM; + goto out_remove_rpi_hdrs; + } + + phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, + sizeof(struct lpfc_hba_eq_hdl), + GFP_KERNEL); + if (!phba->sli4_hba.hba_eq_hdl) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2572 Failed allocate memory for " + "fast-path per-EQ handle array\n"); + rc = -ENOMEM; + goto out_free_fcf_rr_bmask; + } + + phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, + sizeof(struct lpfc_vector_map_info), + GFP_KERNEL); + if (!phba->sli4_hba.cpu_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3327 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + rc = -ENOMEM; + goto out_free_hba_eq_hdl; + } + + phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); + if (!phba->sli4_hba.eq_info) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3321 Failed allocation for per_cpu stats\n"); + rc = -ENOMEM; + goto out_free_hba_cpu_map; + } + + phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, + sizeof(*phba->sli4_hba.idle_stat), + GFP_KERNEL); + if (!phba->sli4_hba.idle_stat) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3390 Failed allocation for idle_stat\n"); + rc = -ENOMEM; + goto out_free_hba_eq_info; + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); + if (!phba->sli4_hba.c_stat) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3332 Failed allocating per cpu hdwq stats\n"); + rc = -ENOMEM; + goto out_free_hba_idle_stat; + } +#endif + + phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); + if (!phba->cmf_stat) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3331 Failed allocating per cpu cgn stats\n"); + rc = -ENOMEM; + goto out_free_hba_hdwq_info; + } + + /* + * Enable sr-iov virtual functions if supported and configured + * through the module parameter. + */ + if (phba->cfg_sriov_nr_virtfn > 0) { + rc = lpfc_sli_probe_sriov_nr_virtfn(phba, + phba->cfg_sriov_nr_virtfn); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3020 Requested number of SR-IOV " + "virtual functions (%d) is not " + "supported\n", + phba->cfg_sriov_nr_virtfn); + phba->cfg_sriov_nr_virtfn = 0; + } + } + + return 0; + +out_free_hba_hdwq_info: +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + free_percpu(phba->sli4_hba.c_stat); +out_free_hba_idle_stat: +#endif + kfree(phba->sli4_hba.idle_stat); +out_free_hba_eq_info: + free_percpu(phba->sli4_hba.eq_info); +out_free_hba_cpu_map: + kfree(phba->sli4_hba.cpu_map); +out_free_hba_eq_hdl: + kfree(phba->sli4_hba.hba_eq_hdl); +out_free_fcf_rr_bmask: + kfree(phba->fcf.fcf_rr_bmask); +out_remove_rpi_hdrs: + lpfc_sli4_remove_rpi_hdrs(phba); +out_free_active_sgl: + lpfc_free_active_sgl(phba); +out_destroy_cq_event_pool: + lpfc_sli4_cq_event_pool_destroy(phba); +out_free_cmd_rsp_buf: + dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); + phba->lpfc_cmd_rsp_buf_pool = NULL; +out_free_sg_dma_buf: + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; +out_free_bsmbx: + lpfc_destroy_bootstrap_mbox(phba); +out_free_mem: + lpfc_mem_free(phba); +out_destroy_workqueue: + destroy_workqueue(phba->wq); + phba->wq = NULL; + return rc; +} + +/** + * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up + * specific for supporting the SLI-4 HBA device it attached to. + **/ +static void +lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) +{ + struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + + free_percpu(phba->sli4_hba.eq_info); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + free_percpu(phba->sli4_hba.c_stat); +#endif + free_percpu(phba->cmf_stat); + kfree(phba->sli4_hba.idle_stat); + + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ + kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_possible_cpu = 0; + phba->sli4_hba.num_present_cpu = 0; + phba->sli4_hba.curr_disp_cpu = 0; + cpumask_clear(&phba->sli4_hba.irq_aff_mask); + + /* Free memory allocated for fast-path work queue handles */ + kfree(phba->sli4_hba.hba_eq_hdl); + + /* Free the allocated rpi headers. */ + lpfc_sli4_remove_rpi_hdrs(phba); + lpfc_sli4_remove_rpis(phba); + + /* Free eligible FCF index bmask */ + kfree(phba->fcf.fcf_rr_bmask); + + /* Free the ELS sgl list */ + lpfc_free_active_sgl(phba); + lpfc_free_els_sgl_list(phba); + lpfc_free_nvmet_sgl_list(phba); + + /* Free the completion queue EQ event pool */ + lpfc_sli4_cq_event_release_all(phba); + lpfc_sli4_cq_event_pool_destroy(phba); + + /* Release resource identifiers. */ + lpfc_sli4_dealloc_resource_identifiers(phba); + + /* Free the bsmbx region. */ + lpfc_destroy_bootstrap_mbox(phba); + + /* Free the SLI Layer memory with SLI4 HBAs */ + lpfc_mem_free_all(phba); + + /* Free the current connect table */ + list_for_each_entry_safe(conn_entry, next_conn_entry, + &phba->fcf_conn_rec_list, list) { + list_del_init(&conn_entry->list); + kfree(conn_entry); + } + + return; +} + +/** + * lpfc_init_api_table_setup - Set up init api function jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the device INIT interface API function jump table + * in @phba struct. + * + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + phba->lpfc_hba_init_link = lpfc_hba_init_link; + phba->lpfc_hba_down_link = lpfc_hba_down_link; + phba->lpfc_selective_reset = lpfc_selective_reset; + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; + phba->lpfc_stop_port = lpfc_stop_port_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; + phba->lpfc_stop_port = lpfc_stop_port_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1431 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + } + return 0; +} + +/** + * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources after the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) +{ + int error; + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + return error; + } + + return 0; +} + +/** + * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up after + * the device specific resource setup for supporting the HBA device it + * attached to. + **/ +static void +lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) +{ + if (phba->wq) { + destroy_workqueue(phba->wq); + phba->wq = NULL; + } + + /* Stop kernel worker thread */ + if (phba->worker_thread) + kthread_stop(phba->worker_thread); +} + +/** + * lpfc_free_iocb_list - Free iocb list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's IOCB list and memory. + **/ +void +lpfc_free_iocb_list(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(iocbq_entry, iocbq_next, + &phba->lpfc_iocb_list, list) { + list_del(&iocbq_entry->list); + kfree(iocbq_entry); + phba->total_iocbq_bufs--; + } + spin_unlock_irq(&phba->hbalock); + + return; +} + +/** + * lpfc_init_iocb_list - Allocate and initialize iocb list. + * @phba: pointer to lpfc hba data structure. + * @iocb_count: number of requested iocbs + * + * This routine is invoked to allocate and initizlize the driver's IOCB + * list and set up the IOCB tag array accordingly. + * + * Return codes + * 0 - successful + * other values - error + **/ +int +lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) +{ + struct lpfc_iocbq *iocbq_entry = NULL; + uint16_t iotag; + int i; + + /* Initialize and populate the iocb list per host. */ + INIT_LIST_HEAD(&phba->lpfc_iocb_list); + for (i = 0; i < iocb_count; i++) { + iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); + if (iocbq_entry == NULL) { + printk(KERN_ERR "%s: only allocated %d iocbs of " + "expected %d count. Unloading driver.\n", + __func__, i, iocb_count); + goto out_free_iocbq; + } + + iotag = lpfc_sli_next_iotag(phba, iocbq_entry); + if (iotag == 0) { + kfree(iocbq_entry); + printk(KERN_ERR "%s: failed to allocate IOTAG. " + "Unloading driver.\n", __func__); + goto out_free_iocbq; + } + iocbq_entry->sli4_lxritag = NO_XRI; + iocbq_entry->sli4_xritag = NO_XRI; + + spin_lock_irq(&phba->hbalock); + list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); + phba->total_iocbq_bufs++; + spin_unlock_irq(&phba->hbalock); + } + + return 0; + +out_free_iocbq: + lpfc_free_iocb_list(phba); + + return -ENOMEM; +} + +/** + * lpfc_free_sgl_list - Free a given sgl list. + * @phba: pointer to lpfc hba data structure. + * @sglq_list: pointer to the head of sgl list. + * + * This routine is invoked to free a give sgl list and memory. + **/ +void +lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + + list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + } +} + +/** + * lpfc_free_els_sgl_list - Free els sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's els sgl list and memory. + **/ +static void +lpfc_free_els_sgl_list(struct lpfc_hba *phba) +{ + LIST_HEAD(sglq_list); + + /* Retrieve all els sgls from driver list */ + spin_lock_irq(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); + spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); + + /* Now free the sgl list */ + lpfc_free_sgl_list(phba, &sglq_list); +} + +/** + * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's nvmet sgl list and memory. + **/ +static void +lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + LIST_HEAD(sglq_list); + + /* Retrieve all nvmet sgls from driver list */ + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + + /* Now free the sgl list */ + list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + } + + /* Update the nvmet_xri_cnt to reflect no current sgls. + * The next initialization cycle sets the count and allocates + * the sgls over again. + */ + phba->sli4_hba.nvmet_xri_cnt = 0; +} + +/** + * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate the driver's active sgl memory. + * This array will hold the sglq_entry's for active IOs. + **/ +static int +lpfc_init_active_sgl_array(struct lpfc_hba *phba) +{ + int size; + size = sizeof(struct lpfc_sglq *); + size *= phba->sli4_hba.max_cfg_param.max_xri; + + phba->sli4_hba.lpfc_sglq_active_list = + kzalloc(size, GFP_KERNEL); + if (!phba->sli4_hba.lpfc_sglq_active_list) + return -ENOMEM; + return 0; +} + +/** + * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to walk through the array of active sglq entries + * and free all of the resources. + * This is just a place holder for now. + **/ +static void +lpfc_free_active_sgl(struct lpfc_hba *phba) +{ + kfree(phba->sli4_hba.lpfc_sglq_active_list); +} + +/** + * lpfc_init_sgl_list - Allocate and initialize sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and initizlize the driver's sgl + * list and set up the sgl xritag tag array accordingly. + * + **/ +static void +lpfc_init_sgl_list(struct lpfc_hba *phba) +{ + /* Initialize and populate the sglq list per host/VF. */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); + + /* els xri-sgl book keeping */ + phba->sli4_hba.els_xri_cnt = 0; + + /* nvme xri-buffer book keeping */ + phba->sli4_hba.io_xri_cnt = 0; +} + +/** + * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * port for those SLI4 ports that do not support extents. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine + * and should be called only when interrupts are disabled. + * + * Return codes + * 0 - successful + * -ERROR - otherwise. + **/ +int +lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) +{ + int rc = 0; + struct lpfc_rpi_hdr *rpi_hdr; + + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); + if (!phba->sli4_hba.rpi_hdrs_in_use) + return rc; + if (phba->sli4_hba.extents_in_use) + return -EIO; + + rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); + if (!rpi_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0391 Error during rpi post operation\n"); + lpfc_sli4_remove_rpis(phba); + rc = -ENODEV; + } + + return rc; +} + +/** + * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate a single 4KB memory region to + * support rpis and stores them in the phba. This single region + * provides support for up to 64 rpis. The region is used globally + * by the device. + * + * Returns: + * A valid rpi hdr on success. + * A NULL pointer on any failure. + **/ +struct lpfc_rpi_hdr * +lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) +{ + uint16_t rpi_limit, curr_rpi_range; + struct lpfc_dmabuf *dmabuf; + struct lpfc_rpi_hdr *rpi_hdr; + + /* + * If the SLI4 port supports extents, posting the rpi header isn't + * required. Set the expected maximum count and let the actual value + * get set when extents are fully allocated. + */ + if (!phba->sli4_hba.rpi_hdrs_in_use) + return NULL; + if (phba->sli4_hba.extents_in_use) + return NULL; + + /* The limit on the logical index is just the max_rpi count. */ + rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; + + spin_lock_irq(&phba->hbalock); + /* + * Establish the starting RPI in this header block. The starting + * rpi is normalized to a zero base because the physical rpi is + * port based. + */ + curr_rpi_range = phba->sli4_hba.next_rpi; + spin_unlock_irq(&phba->hbalock); + + /* Reached full RPI range */ + if (curr_rpi_range == rpi_limit) + return NULL; + + /* + * First allocate the protocol header region for the port. The + * port expects a 4KB DMA-mapped memory region that is 4K aligned. + */ + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return NULL; + + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + LPFC_HDR_TEMPLATE_SIZE, + &dmabuf->phys, GFP_KERNEL); + if (!dmabuf->virt) { + rpi_hdr = NULL; + goto err_free_dmabuf; + } + + if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { + rpi_hdr = NULL; + goto err_free_coherent; + } + + /* Save the rpi header data for cleanup later. */ + rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); + if (!rpi_hdr) + goto err_free_coherent; + + rpi_hdr->dmabuf = dmabuf; + rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; + rpi_hdr->page_count = 1; + spin_lock_irq(&phba->hbalock); + + /* The rpi_hdr stores the logical index only. */ + rpi_hdr->start_rpi = curr_rpi_range; + rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; + list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); + + spin_unlock_irq(&phba->hbalock); + return rpi_hdr; + + err_free_coherent: + dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, + dmabuf->virt, dmabuf->phys); + err_free_dmabuf: + kfree(dmabuf); + return NULL; +} + +/** + * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove all memory resources allocated + * to support rpis for SLI4 ports not supporting extents. This routine + * presumes the caller has released all rpis consumed by fabric or port + * logins and is prepared to have the header pages removed. + **/ +void +lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) +{ + struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; + + if (!phba->sli4_hba.rpi_hdrs_in_use) + goto exit; + + list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, + &phba->sli4_hba.lpfc_rpi_hdr_list, list) { + list_del(&rpi_hdr->list); + dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, + rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); + kfree(rpi_hdr->dmabuf); + kfree(rpi_hdr); + } + exit: + /* There are no rpis available to the port now. */ + phba->sli4_hba.next_rpi = 0; +} + +/** + * lpfc_hba_alloc - Allocate driver hba data structure for a device. + * @pdev: pointer to pci device data structure. + * + * This routine is invoked to allocate the driver hba data structure for an + * HBA device. If the allocation is successful, the phba reference to the + * PCI device data structure is set. + * + * Return codes + * pointer to @phba - successful + * NULL - error + **/ +static struct lpfc_hba * +lpfc_hba_alloc(struct pci_dev *pdev) +{ + struct lpfc_hba *phba; + + /* Allocate memory for HBA structure */ + phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); + if (!phba) { + dev_err(&pdev->dev, "failed to allocate hba struct\n"); + return NULL; + } + + /* Set reference to PCI device in HBA structure */ + phba->pcidev = pdev; + + /* Assign an unused board number */ + phba->brd_no = lpfc_get_instance(); + if (phba->brd_no < 0) { + kfree(phba); + return NULL; + } + phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; + + spin_lock_init(&phba->ct_ev_lock); + INIT_LIST_HEAD(&phba->ct_ev_waiters); + + return phba; +} + +/** + * lpfc_hba_free - Free driver hba data structure with a device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver hba data structure with an + * HBA device. + **/ +static void +lpfc_hba_free(struct lpfc_hba *phba) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + kfree(phba->sli4_hba.hdwq); + + /* Release the driver assigned board number */ + idr_remove(&lpfc_hba_index, phba->brd_no); + + /* Free memory allocated with sli3 rings */ + kfree(phba->sli.sli3_ring); + phba->sli.sli3_ring = NULL; + + kfree(phba); + return; +} + +/** + * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes + * @vport: pointer to lpfc vport data structure. + * + * This routine is will setup initial FDMI attribute masks for + * FDMI2 or SmartSAN depending on module parameters. The driver will attempt + * to get these attributes first before falling back, the attribute + * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1 + **/ +void +lpfc_setup_fdmi_mask(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + + vport->load_flag |= FC_ALLOW_FDMI; + if (phba->cfg_enable_SmartSAN || + phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) { + /* Setup appropriate attribute masks */ + vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; + if (phba->cfg_enable_SmartSAN) + vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; + else + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "6077 Setup FDMI mask: hba x%x port x%x\n", + vport->fdmi_hba_mask, vport->fdmi_port_mask); +} + +/** + * lpfc_create_shost - Create hba physical port with associated scsi host. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to create HBA physical port and associate a SCSI + * host with it. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_create_shost(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct Scsi_Host *shost; + + /* Initialize HBA FC structure */ + phba->fc_edtov = FF_DEF_EDTOV; + phba->fc_ratov = FF_DEF_RATOV; + phba->fc_altov = FF_DEF_ALTOV; + phba->fc_arbtov = FF_DEF_ARBTOV; + + atomic_set(&phba->sdev_cnt, 0); + vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); + if (!vport) + return -ENODEV; + + shost = lpfc_shost_from_vport(vport); + phba->pport = vport; + + if (phba->nvmet_support) { + /* Only 1 vport (pport) will support NVME target */ + phba->targetport = NULL; + phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, + "6076 NVME Target Found\n"); + } + + lpfc_debugfs_initialize(vport); + /* Put reference to SCSI host to driver's device private data */ + pci_set_drvdata(phba->pcidev, shost); + + lpfc_setup_fdmi_mask(vport); + + /* + * At this point we are fully registered with PSA. In addition, + * any initial discovery should be completed. + */ + return 0; +} + +/** + * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to destroy HBA physical port and the associated + * SCSI host. + **/ +static void +lpfc_destroy_shost(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + + /* Destroy physical port that associated with the SCSI host */ + destroy_port(vport); + + return; +} + +/** + * lpfc_setup_bg - Setup Block guard structures and debug areas. + * @phba: pointer to lpfc hba data structure. + * @shost: the shost to be used to detect Block guard settings. + * + * This routine sets up the local Block guard protocol settings for @shost. + * This routine also allocates memory for debugging bg buffers. + **/ +static void +lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) +{ + uint32_t old_mask; + uint32_t old_guard; + + if (phba->cfg_prot_mask && phba->cfg_prot_guard) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1478 Registering BlockGuard with the " + "SCSI layer\n"); + + old_mask = phba->cfg_prot_mask; + old_guard = phba->cfg_prot_guard; + + /* Only allow supported values */ + phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION); + phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | + SHOST_DIX_GUARD_CRC); + + /* DIF Type 1 protection for profiles AST1/C1 is end to end */ + if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) + phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; + + if (phba->cfg_prot_mask && phba->cfg_prot_guard) { + if ((old_mask != phba->cfg_prot_mask) || + (old_guard != phba->cfg_prot_guard)) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1475 Registering BlockGuard with the " + "SCSI layer: mask %d guard %d\n", + phba->cfg_prot_mask, + phba->cfg_prot_guard); + + scsi_host_set_prot(shost, phba->cfg_prot_mask); + scsi_host_set_guard(shost, phba->cfg_prot_guard); + } else + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1479 Not Registering BlockGuard with the SCSI " + "layer, Bad protection parameters: %d %d\n", + old_mask, old_guard); + } +} + +/** + * lpfc_post_init_setup - Perform necessary device post initialization setup. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to perform all the necessary post initialization + * setup for the device. + **/ +static void +lpfc_post_init_setup(struct lpfc_hba *phba) +{ + struct Scsi_Host *shost; + struct lpfc_adapter_event_header adapter_event; + + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + + /* + * hba setup may have changed the hba_queue_depth so we need to + * adjust the value of can_queue. + */ + shost = pci_get_drvdata(phba->pcidev); + shost->can_queue = phba->cfg_hba_queue_depth - 10; + + lpfc_host_attrib_init(shost); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + spin_lock_irq(shost->host_lock); + lpfc_poll_start_timer(phba); + spin_unlock_irq(shost->host_lock); + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0428 Perform SCSI scan\n"); + /* Send board arrival event to upper layer */ + adapter_event.event_type = FC_REG_ADAPTER_EVENT; + adapter_event.subcategory = LPFC_EVENT_ARRIVAL; + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(adapter_event), + (char *) &adapter_event, + LPFC_NL_VENDOR_ID); + return; +} + +/** + * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the PCI device memory space for device + * with SLI-3 interface spec. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) +{ + struct pci_dev *pdev = phba->pcidev; + unsigned long bar0map_len, bar2map_len; + int i, hbq_count; + void *ptr; + int error; + + if (!pdev) + return -ENODEV; + + /* Set the device DMA mask size */ + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) + return error; + error = -ENODEV; + + /* Get the bus address of Bar0 and Bar2 and the number of bytes + * required by each mapping. + */ + phba->pci_bar0_map = pci_resource_start(pdev, 0); + bar0map_len = pci_resource_len(pdev, 0); + + phba->pci_bar2_map = pci_resource_start(pdev, 2); + bar2map_len = pci_resource_len(pdev, 2); + + /* Map HBA SLIM to a kernel virtual address. */ + phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->slim_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLIM memory.\n"); + goto out; + } + + /* Map HBA Control Registers to a kernel virtual address. */ + phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->ctrl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for HBA control registers.\n"); + goto out_iounmap_slim; + } + + /* Allocate memory for SLI-2 structures */ + phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, + &phba->slim2p.phys, GFP_KERNEL); + if (!phba->slim2p.virt) + goto out_iounmap; + + phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); + phba->mbox_ext = (phba->slim2p.virt + + offsetof(struct lpfc_sli2_slim, mbx_ext_words)); + phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); + phba->IOCBs = (phba->slim2p.virt + + offsetof(struct lpfc_sli2_slim, IOCBs)); + + phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, + lpfc_sli_hbq_size(), + &phba->hbqslimp.phys, + GFP_KERNEL); + if (!phba->hbqslimp.virt) + goto out_free_slim; + + hbq_count = lpfc_sli_hbq_count(); + ptr = phba->hbqslimp.virt; + for (i = 0; i < hbq_count; ++i) { + phba->hbqs[i].hbq_virt = ptr; + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + ptr += (lpfc_hbq_defs[i]->entry_count * + sizeof(struct lpfc_hbq_entry)); + } + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; + + memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); + + phba->MBslimaddr = phba->slim_memmap_p; + phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; + phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; + phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; + phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; + + return 0; + +out_free_slim: + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); +out_iounmap: + iounmap(phba->ctrl_regs_memmap_p); +out_iounmap_slim: + iounmap(phba->slim_memmap_p); +out: + return error; +} + +/** + * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the PCI device memory space for device + * with SLI-3 interface spec. + **/ +static void +lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + + /* Free coherent DMA memory allocated */ + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), + phba->hbqslimp.virt, phba->hbqslimp.phys); + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); + + /* I/O memory unmap */ + iounmap(phba->ctrl_regs_memmap_p); + iounmap(phba->slim_memmap_p); + + return; +} + +/** + * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to wait for SLI4 device Power On Self Test (POST) + * done and check status. + * + * Return 0 if successful, otherwise -ENODEV. + **/ +int +lpfc_sli4_post_status_check(struct lpfc_hba *phba) +{ + struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; + struct lpfc_register reg_data; + int i, port_error = 0; + uint32_t if_type; + + memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); + memset(®_data, 0, sizeof(reg_data)); + if (!phba->sli4_hba.PSMPHRregaddr) + return -ENODEV; + + /* Wait up to 30 seconds for the SLI Port POST done and ready */ + for (i = 0; i < 3000; i++) { + if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0) || + (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { + /* Port has a fatal POST error, break out */ + port_error = -ENODEV; + break; + } + if (LPFC_POST_STAGE_PORT_READY == + bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) + break; + msleep(10); + } + + /* + * If there was a port error during POST, then don't proceed with + * other register reads as the data may not be valid. Just exit. + */ + if (port_error) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1408 Port Failed POST - portsmphr=0x%x, " + "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " + "scr2=x%x, hscratch=x%x, pstatus=x%x\n", + portsmphr_reg.word0, + bf_get(lpfc_port_smphr_perr, &portsmphr_reg), + bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), + bf_get(lpfc_port_smphr_nip, &portsmphr_reg), + bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), + bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), + bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), + bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), + bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2534 Device Info: SLIFamily=0x%x, " + "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " + "SLIHint_2=0x%x, FT=0x%x\n", + bf_get(lpfc_sli_intf_sli_family, + &phba->sli4_hba.sli_intf), + bf_get(lpfc_sli_intf_slirev, + &phba->sli4_hba.sli_intf), + bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf), + bf_get(lpfc_sli_intf_sli_hint1, + &phba->sli4_hba.sli_intf), + bf_get(lpfc_sli_intf_sli_hint2, + &phba->sli4_hba.sli_intf), + bf_get(lpfc_sli_intf_func_type, + &phba->sli4_hba.sli_intf)); + /* + * Check for other Port errors during the initialization + * process. Fail the load if the port did not come up + * correctly. + */ + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + phba->sli4_hba.ue_mask_lo = + readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); + phba->sli4_hba.ue_mask_hi = + readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); + uerrlo_reg.word0 = + readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); + uerrhi_reg.word0 = + readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); + if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || + (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "1422 Unrecoverable Error " + "Detected during POST " + "uerr_lo_reg=0x%x, " + "uerr_hi_reg=0x%x, " + "ue_mask_lo_reg=0x%x, " + "ue_mask_hi_reg=0x%x\n", + uerrlo_reg.word0, + uerrhi_reg.word0, + phba->sli4_hba.ue_mask_lo, + phba->sli4_hba.ue_mask_hi); + port_error = -ENODEV; + } + break; + case LPFC_SLI_INTF_IF_TYPE_2: + case LPFC_SLI_INTF_IF_TYPE_6: + /* Final checks. The port status should be clean. */ + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + ®_data.word0) || + lpfc_sli4_unrecoverable_port(®_data)) { + phba->work_status[0] = + readl(phba->sli4_hba.u.if_type2. + ERR1regaddr); + phba->work_status[1] = + readl(phba->sli4_hba.u.if_type2. + ERR2regaddr); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2888 Unrecoverable port error " + "following POST: port status reg " + "0x%x, port_smphr reg 0x%x, " + "error 1=0x%x, error 2=0x%x\n", + reg_data.word0, + portsmphr_reg.word0, + phba->work_status[0], + phba->work_status[1]); + port_error = -ENODEV; + break; + } + + if (lpfc_pldv_detect && + bf_get(lpfc_sli_intf_sli_family, + &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_G6) + pci_write_config_byte(phba->pcidev, + LPFC_SLI_INTF, CFG_PLD); + break; + case LPFC_SLI_INTF_IF_TYPE_1: + default: + break; + } + } + return port_error; +} + +/** + * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. + * @phba: pointer to lpfc hba data structure. + * @if_type: The SLI4 interface type getting configured. + * + * This routine is invoked to set up SLI4 BAR0 PCI config space register + * memory map. + **/ +static void +lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) +{ + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + phba->sli4_hba.u.if_type0.UERRLOregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; + phba->sli4_hba.u.if_type0.UERRHIregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; + phba->sli4_hba.u.if_type0.UEMASKLOregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; + phba->sli4_hba.u.if_type0.UEMASKHIregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; + phba->sli4_hba.SLIINTFregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; + break; + case LPFC_SLI_INTF_IF_TYPE_2: + phba->sli4_hba.u.if_type2.EQDregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_EQ_DELAY_OFFSET; + phba->sli4_hba.u.if_type2.ERR1regaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER1_OFFSET; + phba->sli4_hba.u.if_type2.ERR2regaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER2_OFFSET; + phba->sli4_hba.u.if_type2.CTRLregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_CTL_OFFSET; + phba->sli4_hba.u.if_type2.STATUSregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_STA_OFFSET; + phba->sli4_hba.SLIINTFregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; + phba->sli4_hba.PSMPHRregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_SEM_OFFSET; + phba->sli4_hba.RQDBregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_ULP0_RQ_DOORBELL; + phba->sli4_hba.WQDBregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_ULP0_WQ_DOORBELL; + phba->sli4_hba.CQDBregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; + phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; + phba->sli4_hba.MQDBregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; + phba->sli4_hba.BMBXregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; + break; + case LPFC_SLI_INTF_IF_TYPE_6: + phba->sli4_hba.u.if_type2.EQDregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_EQ_DELAY_OFFSET; + phba->sli4_hba.u.if_type2.ERR1regaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER1_OFFSET; + phba->sli4_hba.u.if_type2.ERR2regaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER2_OFFSET; + phba->sli4_hba.u.if_type2.CTRLregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_CTL_OFFSET; + phba->sli4_hba.u.if_type2.STATUSregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_STA_OFFSET; + phba->sli4_hba.PSMPHRregaddr = + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_SEM_OFFSET; + phba->sli4_hba.BMBXregaddr = + phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; + break; + case LPFC_SLI_INTF_IF_TYPE_1: + default: + dev_printk(KERN_ERR, &phba->pcidev->dev, + "FATAL - unsupported SLI4 interface type - %d\n", + if_type); + break; + } +} + +/** + * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. + * @phba: pointer to lpfc hba data structure. + * @if_type: sli if type to operate on. + * + * This routine is invoked to set up SLI4 BAR1 register memory map. + **/ +static void +lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) +{ + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + phba->sli4_hba.PSMPHRregaddr = + phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_SLIPORT_IF0_SMPHR; + phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISR0; + phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_IMR0; + phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISCR0; + break; + case LPFC_SLI_INTF_IF_TYPE_6: + phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + + LPFC_IF6_RQ_DOORBELL; + phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + + LPFC_IF6_WQ_DOORBELL; + phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + + LPFC_IF6_CQ_DOORBELL; + phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + + LPFC_IF6_EQ_DOORBELL; + phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + + LPFC_IF6_MQ_DOORBELL; + break; + case LPFC_SLI_INTF_IF_TYPE_2: + case LPFC_SLI_INTF_IF_TYPE_1: + default: + dev_err(&phba->pcidev->dev, + "FATAL - unsupported SLI4 interface type - %d\n", + if_type); + break; + } +} + +/** + * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. + * @phba: pointer to lpfc hba data structure. + * @vf: virtual function number + * + * This routine is invoked to set up SLI4 BAR2 doorbell register memory map + * based on the given viftual function number, @vf. + * + * Return 0 if successful, otherwise -ENODEV. + **/ +static int +lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) +{ + if (vf > LPFC_VIR_FUNC_MAX) + return -ENODEV; + + phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + + LPFC_ULP0_RQ_DOORBELL); + phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + + LPFC_ULP0_WQ_DOORBELL); + phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + + LPFC_EQCQ_DOORBELL); + phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; + phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); + phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); + return 0; +} + +/** + * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to create the bootstrap mailbox + * region consistent with the SLI-4 interface spec. This + * routine allocates all memory necessary to communicate + * mailbox commands to the port and sets up all alignment + * needs. No locks are expected to be held when calling + * this routine. + * + * Return codes + * 0 - successful + * -ENOMEM - could not allocated memory. + **/ +static int +lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) +{ + uint32_t bmbx_size; + struct lpfc_dmabuf *dmabuf; + struct dma_address *dma_address; + uint32_t pa_addr; + uint64_t phys_addr; + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + /* + * The bootstrap mailbox region is comprised of 2 parts + * plus an alignment restriction of 16 bytes. + */ + bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, + &dmabuf->phys, GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; + } + + /* + * Initialize the bootstrap mailbox pointers now so that the register + * operations are simple later. The mailbox dma address is required + * to be 16-byte aligned. Also align the virtual memory as each + * maibox is copied into the bmbx mailbox region before issuing the + * command to the port. + */ + phba->sli4_hba.bmbx.dmabuf = dmabuf; + phba->sli4_hba.bmbx.bmbx_size = bmbx_size; + + phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, + LPFC_ALIGN_16_BYTE); + phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, + LPFC_ALIGN_16_BYTE); + + /* + * Set the high and low physical addresses now. The SLI4 alignment + * requirement is 16 bytes and the mailbox is posted to the port + * as two 30-bit addresses. The other data is a bit marking whether + * the 30-bit address is the high or low address. + * Upcast bmbx aphys to 64bits so shift instruction compiles + * clean on 32 bit machines. + */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; + pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); + dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_HI); + + pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); + dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_LO); + return 0; +} + +/** + * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to teardown the bootstrap mailbox + * region and release all host resources. This routine requires + * the caller to ensure all mailbox commands recovered, no + * additional mailbox comands are sent, and interrupts are disabled + * before calling this routine. + * + **/ +static void +lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) +{ + dma_free_coherent(&phba->pcidev->dev, + phba->sli4_hba.bmbx.bmbx_size, + phba->sli4_hba.bmbx.dmabuf->virt, + phba->sli4_hba.bmbx.dmabuf->phys); + + kfree(phba->sli4_hba.bmbx.dmabuf); + memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); +} + +static const char * const lpfc_topo_to_str[] = { + "Loop then P2P", + "Loopback", + "P2P Only", + "Unsupported", + "Loop Only", + "Unsupported", + "P2P then Loop", +}; + +#define LINK_FLAGS_DEF 0x0 +#define LINK_FLAGS_P2P 0x1 +#define LINK_FLAGS_LOOP 0x2 +/** + * lpfc_map_topology - Map the topology read from READ_CONFIG + * @phba: pointer to lpfc hba data structure. + * @rd_config: pointer to read config data + * + * This routine is invoked to map the topology values as read + * from the read config mailbox command. If the persistent + * topology feature is supported, the firmware will provide the + * saved topology information to be used in INIT_LINK + **/ +static void +lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) +{ + u8 ptv, tf, pt; + + ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); + tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); + pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", + ptv, tf, pt); + if (!ptv) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2019 FW does not support persistent topology " + "Using driver parameter defined value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + return; + } + /* FW supports persistent topology - override module parameter value */ + phba->hba_flag |= HBA_PERSISTENT_TOPO; + + /* if ASIC_GEN_NUM >= 0xC) */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6) || + (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_G6)) { + if (!tf) { + phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) + ? FLAGS_TOPOLOGY_MODE_LOOP + : FLAGS_TOPOLOGY_MODE_PT_PT); + } else { + phba->hba_flag &= ~HBA_PERSISTENT_TOPO; + } + } else { /* G5 */ + if (tf) { + /* If topology failover set - pt is '0' or '1' */ + phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : + FLAGS_TOPOLOGY_MODE_LOOP_PT); + } else { + phba->cfg_topology = ((pt == LINK_FLAGS_P2P) + ? FLAGS_TOPOLOGY_MODE_PT_PT + : FLAGS_TOPOLOGY_MODE_LOOP); + } + } + if (phba->hba_flag & HBA_PERSISTENT_TOPO) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2020 Using persistent topology value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2021 Invalid topology values from FW " + "Using driver parameter defined value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + } +} + +/** + * lpfc_sli4_read_config - Get the config parameters. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to read the configuration parameters from the HBA. + * The configuration parameters are used to set the base and maximum values + * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource + * allocation for the port. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_read_config(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmb; + struct lpfc_mbx_read_config *rd_config; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + struct lpfc_mbx_get_func_cfg *get_func_cfg; + struct lpfc_rsrc_desc_fcfcoe *desc; + char *pdesc_0; + uint16_t forced_link_speed; + uint32_t if_type, qmin, fawwpn; + int length, i, rc = 0, rc2; + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2011 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + lpfc_read_config(phba, pmb); + + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2012 Mailbox failed , mbxCmd x%x " + "READ_CONFIG, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &pmb->u.mqe), + bf_get(lpfc_mqe_status, &pmb->u.mqe)); + rc = -EIO; + } else { + rd_config = &pmb->u.mqe.un.rd_config; + if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; + phba->sli4_hba.lnk_info.lnk_tp = + bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); + phba->sli4_hba.lnk_info.lnk_no = + bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3081 lnk_type:%d, lnk_numb:%d\n", + phba->sli4_hba.lnk_info.lnk_tp, + phba->sli4_hba.lnk_info.lnk_no); + } else + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3082 Mailbox (x%x) returned ldv:x0\n", + bf_get(lpfc_mqe_command, &pmb->u.mqe)); + if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { + phba->bbcredit_support = 1; + phba->sli4_hba.bbscn_params.word0 = rd_config->word8; + } + + fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config); + + if (fawwpn) { + lpfc_printf_log(phba, KERN_INFO, + LOG_INIT | LOG_DISCOVERY, + "2702 READ_CONFIG: FA-PWWN is " + "configured on\n"); + phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG; + } else { + /* Clear FW configured flag, preserve driver flag */ + phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG; + } + + phba->sli4_hba.conf_trunk = + bf_get(lpfc_mbx_rd_conf_trunk, rd_config); + phba->sli4_hba.extents_in_use = + bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); + + phba->sli4_hba.max_cfg_param.max_xri = + bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + /* Reduce resource usage in kdump environment */ + if (is_kdump_kernel() && + phba->sli4_hba.max_cfg_param.max_xri > 512) + phba->sli4_hba.max_cfg_param.max_xri = 512; + phba->sli4_hba.max_cfg_param.xri_base = + bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vpi = + bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + /* Limit the max we support */ + if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) + phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; + phba->sli4_hba.max_cfg_param.vpi_base = + bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_rpi = + bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); + phba->sli4_hba.max_cfg_param.rpi_base = + bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vfi = + bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); + phba->sli4_hba.max_cfg_param.vfi_base = + bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_fcfi = + bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); + phba->sli4_hba.max_cfg_param.max_eq = + bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_rq = + bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_wq = + bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_cq = + bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); + phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); + phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; + phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; + phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; + phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? + (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; + phba->max_vports = phba->max_vpi; + + /* Next decide on FPIN or Signal E2E CGN support + * For congestion alarms and warnings valid combination are: + * 1. FPIN alarms / FPIN warnings + * 2. Signal alarms / Signal warnings + * 3. FPIN alarms / Signal warnings + * 4. Signal alarms / FPIN warnings + * + * Initialize the adapter frequency to 100 mSecs + */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + + if (lpfc_use_cgn_signal) { + if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { + /* MUST support both alarm and warning + * because EDC does not support alarm alone. + */ + if (phba->cgn_reg_signal != + EDC_CG_SIG_WARN_ONLY) { + /* Must support both or none */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; + phba->cgn_reg_signal = + EDC_CG_SIG_NOTSUPPORTED; + } else { + phba->cgn_reg_signal = + EDC_CG_SIG_WARN_ALARM; + phba->cgn_reg_fpin = + LPFC_CGN_FPIN_NONE; + } + } + } + + /* Set the congestion initial signal and fpin values. */ + phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; + phba->cgn_init_reg_signal = phba->cgn_reg_signal; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", + phba->cgn_reg_signal, phba->cgn_reg_fpin); + + lpfc_map_topology(phba, rd_config); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2003 cfg params Extents? %d " + "XRI(B:%d M:%d), " + "VPI(B:%d M:%d) " + "VFI(B:%d M:%d) " + "RPI(B:%d M:%d) " + "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", + phba->sli4_hba.extents_in_use, + phba->sli4_hba.max_cfg_param.xri_base, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.vpi_base, + phba->sli4_hba.max_cfg_param.max_vpi, + phba->sli4_hba.max_cfg_param.vfi_base, + phba->sli4_hba.max_cfg_param.max_vfi, + phba->sli4_hba.max_cfg_param.rpi_base, + phba->sli4_hba.max_cfg_param.max_rpi, + phba->sli4_hba.max_cfg_param.max_fcfi, + phba->sli4_hba.max_cfg_param.max_eq, + phba->sli4_hba.max_cfg_param.max_cq, + phba->sli4_hba.max_cfg_param.max_wq, + phba->sli4_hba.max_cfg_param.max_rq, + phba->lmt); + + /* + * Calculate queue resources based on how + * many WQ/CQ/EQs are available. + */ + qmin = phba->sli4_hba.max_cfg_param.max_wq; + if (phba->sli4_hba.max_cfg_param.max_cq < qmin) + qmin = phba->sli4_hba.max_cfg_param.max_cq; + /* + * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and + * the remainder can be used for NVME / FCP. + */ + qmin -= 4; + if (phba->sli4_hba.max_cfg_param.max_eq < qmin) + qmin = phba->sli4_hba.max_cfg_param.max_eq; + + /* Check to see if there is enough for default cfg */ + if ((phba->cfg_irq_chann > qmin) || + (phba->cfg_hdw_queue > qmin)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2005 Reducing Queues - " + "FW resource limitation: " + "WQ %d CQ %d EQ %d: min %d: " + "IRQ %d HDWQ %d\n", + phba->sli4_hba.max_cfg_param.max_wq, + phba->sli4_hba.max_cfg_param.max_cq, + phba->sli4_hba.max_cfg_param.max_eq, + qmin, phba->cfg_irq_chann, + phba->cfg_hdw_queue); + + if (phba->cfg_irq_chann > qmin) + phba->cfg_irq_chann = qmin; + if (phba->cfg_hdw_queue > qmin) + phba->cfg_hdw_queue = qmin; + } + } + + if (rc) + goto read_cfg_out; + + /* Update link speed if forced link speed is supported */ + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { + forced_link_speed = + bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); + if (forced_link_speed) { + phba->hba_flag |= HBA_FORCED_LINK_SPEED; + + switch (forced_link_speed) { + case LINK_SPEED_1G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_1G; + break; + case LINK_SPEED_2G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_2G; + break; + case LINK_SPEED_4G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_4G; + break; + case LINK_SPEED_8G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_8G; + break; + case LINK_SPEED_10G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_10G; + break; + case LINK_SPEED_16G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_16G; + break; + case LINK_SPEED_32G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_32G; + break; + case LINK_SPEED_64G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_64G; + break; + case 0xffff: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_AUTO; + break; + default: + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "0047 Unrecognized link " + "speed : %d\n", + forced_link_speed); + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_AUTO; + } + } + } + + /* Reset the DFT_HBA_Q_DEPTH to the max xri */ + length = phba->sli4_hba.max_cfg_param.max_xri - + lpfc_sli4_get_els_iocb_cnt(phba); + if (phba->cfg_hba_queue_depth > length) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3361 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, length); + phba->cfg_hba_queue_depth = length; + } + + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2) + goto read_cfg_out; + + /* get the pf# and vf# for SLI4 if_type 2 port */ + length = (sizeof(struct lpfc_mbx_get_func_cfg) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, + length, LPFC_SLI4_MBX_EMBED); + + rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc2 || shdr_status || shdr_add_status) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3026 Mailbox failed , mbxCmd x%x " + "GET_FUNCTION_CONFIG, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &pmb->u.mqe), + bf_get(lpfc_mqe_status, &pmb->u.mqe)); + goto read_cfg_out; + } + + /* search for fc_fcoe resrouce descriptor */ + get_func_cfg = &pmb->u.mqe.un.get_func_cfg; + + pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; + desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; + length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); + if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) + length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; + else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) + goto read_cfg_out; + + for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { + desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); + if (LPFC_RSRC_DESC_TYPE_FCFCOE == + bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { + phba->sli4_hba.iov.pf_number = + bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); + phba->sli4_hba.iov.vf_number = + bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); + break; + } + } + + if (i < LPFC_RSRC_DESC_MAX_NUM) + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3027 GET_FUNCTION_CONFIG: pf_number:%d, " + "vf_number:%d\n", phba->sli4_hba.iov.pf_number, + phba->sli4_hba.iov.vf_number); + else + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3028 GET_FUNCTION_CONFIG: failed to find " + "Resource Descriptor:x%x\n", + LPFC_RSRC_DESC_TYPE_FCFCOE); + +read_cfg_out: + mempool_free(pmb, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to setup the port-side endian order when + * the port if_type is 0. This routine has no function for other + * if_types. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + **/ +static int +lpfc_setup_endian_order(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t if_type, rc = 0; + uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, + HOST_ENDIAN_HIGH_WORD1}; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0492 Unable to allocate memory for " + "issuing SLI_CONFIG_SPECIAL mailbox " + "command\n"); + return -ENOMEM; + } + + /* + * The SLI4_CONFIG_SPECIAL mailbox command requires the first + * two words to contain special data values and no other data. + */ + memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); + memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0493 SLI_CONFIG_SPECIAL mailbox " + "failed with status x%x\n", + rc); + rc = -EIO; + } + mempool_free(mboxq, phba->mbox_mem_pool); + break; + case LPFC_SLI_INTF_IF_TYPE_6: + case LPFC_SLI_INTF_IF_TYPE_2: + case LPFC_SLI_INTF_IF_TYPE_1: + default: + break; + } + return rc; +} + +/** + * lpfc_sli4_queue_verify - Verify and update EQ counts + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to check the user settable queue counts for EQs. + * After this routine is called the counts will be set to valid values that + * adhere to the constraints of the system's interrupt vectors and the port's + * queue resources. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + **/ +static int +lpfc_sli4_queue_verify(struct lpfc_hba *phba) +{ + /* + * Sanity check for configured queue parameters against the run-time + * device parameters + */ + + if (phba->nvmet_support) { + if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; + if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) + phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", + phba->cfg_hdw_queue, phba->cfg_irq_chann, + phba->cfg_nvmet_mrq); + + /* Get EQ depth from module parameter, fake the default for now */ + phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; + phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; + + /* Get CQ depth from module parameter, fake the default for now */ + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + return 0; +} + +static int +lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) +{ + struct lpfc_queue *qdesc; + u32 wqesize; + int cpu; + + cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); + /* Create Fast Path IO CQs */ + if (phba->enab_exp_wqcq_pages) + /* Increase the CQ size when WQEs contain an embedded cdb */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, + phba->sli4_hba.cq_esize, + LPFC_CQE_EXP_COUNT, cpu); + + else + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0499 Failed allocate fast-path IO CQ (%d)\n", + idx); + return 1; + } + qdesc->qe_valid = 1; + qdesc->hdwq = idx; + qdesc->chann = cpu; + phba->sli4_hba.hdwq[idx].io_cq = qdesc; + + /* Create Fast Path IO WQs */ + if (phba->enab_exp_wqcq_pages) { + /* Increase the WQ size when WQEs contain an embedded cdb */ + wqesize = (phba->fcp_embed_io) ? + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, + wqesize, + LPFC_WQE_EXP_COUNT, cpu); + } else + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount, cpu); + + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0503 Failed allocate fast-path IO WQ (%d)\n", + idx); + return 1; + } + qdesc->hdwq = idx; + qdesc->chann = cpu; + phba->sli4_hba.hdwq[idx].io_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + return 0; +} + +/** + * lpfc_sli4_queue_create - Create all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + * 0 - successful + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_queue_create(struct lpfc_hba *phba) +{ + struct lpfc_queue *qdesc; + int idx, cpu, eqcpu; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_vector_map_info *cpup; + struct lpfc_vector_map_info *eqcpup; + struct lpfc_eq_intr_info *eqi; + + /* + * Create HBA Record arrays. + * Both NVME and FCP will share that same vectors / EQs + */ + phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; + phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; + phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; + phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; + phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; + phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; + phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + + if (!phba->sli4_hba.hdwq) { + phba->sli4_hba.hdwq = kcalloc( + phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), + GFP_KERNEL); + if (!phba->sli4_hba.hdwq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6427 Failed allocate memory for " + "fast-path Hardware Queue array\n"); + goto out_error; + } + /* Prepare hardware queues to take IO buffers */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_init(&qp->io_buf_list_get_lock); + spin_lock_init(&qp->io_buf_list_put_lock); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + qp->get_io_bufs = 0; + qp->put_io_bufs = 0; + qp->total_io_bufs = 0; + spin_lock_init(&qp->abts_io_buf_list_lock); + INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); + qp->abts_scsi_io_bufs = 0; + qp->abts_nvme_io_bufs = 0; + INIT_LIST_HEAD(&qp->sgl_list); + INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); + spin_lock_init(&qp->hdwq_lock); + } + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + if (phba->nvmet_support) { + phba->sli4_hba.nvmet_cqset = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_cqset) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3121 Fail allocate memory for " + "fast-path CQ set array\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_hdr = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_mrq_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3122 Fail allocate memory for " + "fast-path RQ set hdr array\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_data = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_mrq_data) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3124 Fail allocate memory for " + "fast-path RQ set data array\n"); + goto out_error; + } + } + } + + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); + + /* Create HBA Event Queues (EQs) */ + for_each_present_cpu(cpu) { + /* We only want to create 1 EQ per vector, even though + * multiple CPUs might be using that vector. so only + * selects the CPUs that are LPFC_CPU_FIRST_IRQ. + */ + cpup = &phba->sli4_hba.cpu_map[cpu]; + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) + continue; + + /* Get a ptr to the Hardware Queue associated with this CPU */ + qp = &phba->sli4_hba.hdwq[cpup->hdwq]; + + /* Allocate an EQ */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0497 Failed allocate EQ (%d)\n", + cpup->hdwq); + goto out_error; + } + qdesc->qe_valid = 1; + qdesc->hdwq = cpup->hdwq; + qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ + qdesc->last_cpu = qdesc->chann; + + /* Save the allocated EQ in the Hardware Queue */ + qp->hba_eq = qdesc; + + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); + list_add(&qdesc->cpu_list, &eqi->list); + } + + /* Now we need to populate the other Hardware Queues, that share + * an IRQ vector, with the associated EQ ptr. + */ + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* Check for EQ already allocated in previous loop */ + if (cpup->flag & LPFC_CPU_FIRST_IRQ) + continue; + + /* Check for multiple CPUs per hdwq */ + qp = &phba->sli4_hba.hdwq[cpup->hdwq]; + if (qp->hba_eq) + continue; + + /* We need to share an EQ for this hdwq */ + eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); + eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; + qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; + } + + /* Allocate IO Path SLI4 CQ/WQs */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + if (lpfc_alloc_io_wq_cq(phba, idx)) + goto out_error; + } + + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + cpu = lpfc_find_cpu_handle(phba, idx, + LPFC_FIND_BY_HDWQ); + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount, + cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3142 Failed allocate NVME " + "CQ Set (%d)\n", idx); + goto out_error; + } + qdesc->qe_valid = 1; + qdesc->hdwq = idx; + qdesc->chann = cpu; + phba->sli4_hba.nvmet_cqset[idx] = qdesc; + } + } + + /* + * Create Slow Path Completion Queues (CQs) + */ + + cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); + /* Create slow-path Mailbox Command Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0500 Failed allocate slow-path mailbox CQ\n"); + goto out_error; + } + qdesc->qe_valid = 1; + phba->sli4_hba.mbx_cq = qdesc; + + /* Create slow-path ELS Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0501 Failed allocate slow-path ELS CQ\n"); + goto out_error; + } + qdesc->qe_valid = 1; + qdesc->chann = cpu; + phba->sli4_hba.els_cq = qdesc; + + + /* + * Create Slow Path Work Queues (WQs) + */ + + /* Create Mailbox Command Queue */ + + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.mq_esize, + phba->sli4_hba.mq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0505 Failed allocate slow-path MQ\n"); + goto out_error; + } + qdesc->chann = cpu; + phba->sli4_hba.mbx_wq = qdesc; + + /* + * Create ELS Work Queues + */ + + /* Create slow-path ELS Work Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0504 Failed allocate slow-path ELS WQ\n"); + goto out_error; + } + qdesc->chann = cpu; + phba->sli4_hba.els_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Create NVME LS Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6079 Failed allocate NVME LS CQ\n"); + goto out_error; + } + qdesc->chann = cpu; + qdesc->qe_valid = 1; + phba->sli4_hba.nvmels_cq = qdesc; + + /* Create NVME LS Work Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6080 Failed allocate NVME LS WQ\n"); + goto out_error; + } + qdesc->chann = cpu; + phba->sli4_hba.nvmels_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + } + + /* + * Create Receive Queue (RQ) + */ + + /* Create Receive Queue for header */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0506 Failed allocate receive HRQ\n"); + goto out_error; + } + phba->sli4_hba.hdr_rq = qdesc; + + /* Create Receive Queue for data */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount, cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0507 Failed allocate receive DRQ\n"); + goto out_error; + } + phba->sli4_hba.dat_rq = qdesc; + + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + cpu = lpfc_find_cpu_handle(phba, idx, + LPFC_FIND_BY_HDWQ); + /* Create NVMET Receive Queue for header */ + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.rq_esize, + LPFC_NVMET_RQE_DEF_COUNT, + cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3146 Failed allocate " + "receive HRQ\n"); + goto out_error; + } + qdesc->hdwq = idx; + phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; + + /* Only needed for header of RQ pair */ + qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), + GFP_KERNEL, + cpu_to_node(cpu)); + if (qdesc->rqbp == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6131 Failed allocate " + "Header RQBP\n"); + goto out_error; + } + + /* Put list in known state in case driver load fails. */ + INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); + + /* Create NVMET Receive Queue for data */ + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.rq_esize, + LPFC_NVMET_RQE_DEF_COUNT, + cpu); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3156 Failed allocate " + "receive DRQ\n"); + goto out_error; + } + qdesc->hdwq = idx; + phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; + } + } + + /* Clear NVME stats */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, + sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); + } + } + + /* Clear SCSI stats */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, + sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); + } + } + + return 0; + +out_error: + lpfc_sli4_queue_destroy(phba); + return -ENOMEM; +} + +static inline void +__lpfc_sli4_release_queue(struct lpfc_queue **qp) +{ + if (*qp != NULL) { + lpfc_sli4_queue_free(*qp); + *qp = NULL; + } +} + +static inline void +lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) +{ + int idx; + + if (*qs == NULL) + return; + + for (idx = 0; idx < max; idx++) + __lpfc_sli4_release_queue(&(*qs)[idx]); + + kfree(*qs); + *qs = NULL; +} + +static inline void +lpfc_sli4_release_hdwq(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hdw_queue *hdwq; + struct lpfc_queue *eq; + uint32_t idx; + + hdwq = phba->sli4_hba.hdwq; + + /* Loop thru all Hardware Queues */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + /* Free the CQ/WQ corresponding to the Hardware Queue */ + lpfc_sli4_queue_free(hdwq[idx].io_cq); + lpfc_sli4_queue_free(hdwq[idx].io_wq); + hdwq[idx].hba_eq = NULL; + hdwq[idx].io_cq = NULL; + hdwq[idx].io_wq = NULL; + if (phba->cfg_xpsgl && !phba->nvmet_support) + lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); + lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); + } + /* Loop thru all IRQ vectors */ + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + /* Free the EQ corresponding to the IRQ vector */ + eq = phba->sli4_hba.hba_eq_hdl[idx].eq; + lpfc_sli4_queue_free(eq); + phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; + } +} + +/** + * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + **/ +void +lpfc_sli4_queue_destroy(struct lpfc_hba *phba) +{ + /* + * Set FREE_INIT before beginning to free the queues. + * Wait until the users of queues to acknowledge to + * release queues by clearing FREE_WAIT. + */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; + while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { + spin_unlock_irq(&phba->hbalock); + msleep(20); + spin_lock_irq(&phba->hbalock); + } + spin_unlock_irq(&phba->hbalock); + + lpfc_sli4_cleanup_poll_list(phba); + + /* Release HBA eqs */ + if (phba->sli4_hba.hdwq) + lpfc_sli4_release_hdwq(phba); + + if (phba->nvmet_support) { + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, + phba->cfg_nvmet_mrq); + + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, + phba->cfg_nvmet_mrq); + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, + phba->cfg_nvmet_mrq); + } + + /* Release mailbox command work queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); + + /* Release ELS work queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); + + /* Release ELS work queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); + + /* Release unsolicited receive queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); + __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); + + /* Release ELS complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); + + /* Release NVME LS complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); + + /* Release mailbox command complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); + + /* Everything on this list has been freed */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); + + /* Done with freeing the queues */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; + spin_unlock_irq(&phba->hbalock); +} + +int +lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) +{ + struct lpfc_rqb *rqbp; + struct lpfc_dmabuf *h_buf; + struct rqb_dmabuf *rqb_buffer; + + rqbp = rq->rqbp; + while (!list_empty(&rqbp->rqb_buffer_list)) { + list_remove_head(&rqbp->rqb_buffer_list, h_buf, + struct lpfc_dmabuf, list); + + rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); + (rqbp->rqb_free_buffer)(phba, rqb_buffer); + rqbp->buffer_count--; + } + return 1; +} + +static int +lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, + struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, + int qidx, uint32_t qtype) +{ + struct lpfc_sli_ring *pring; + int rc; + + if (!eq || !cq || !wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6085 Fast-path %s (%d) not allocated\n", + ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); + return -ENOMEM; + } + + /* create the Cq first */ + rc = lpfc_cq_create(phba, cq, eq, + (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6086 Failed setup of CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + return rc; + } + + if (qtype != LPFC_MBOX) { + /* Setup cq_map for fast lookup */ + if (cq_map) + *cq_map = cq->queue_id; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", + qidx, cq->queue_id, qidx, eq->queue_id); + + /* create the wq */ + rc = lpfc_wq_create(phba, wq, cq, qtype); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + /* no need to tear down cq - caller will do so */ + return rc; + } + + /* Bind this CQ/WQ to the NVME ring */ + pring = wq->pring; + pring->sli.sli4.wqp = (void *)wq; + cq->pring = pring; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", + qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); + } else { + rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0539 Failed setup of slow-path MQ: " + "rc = 0x%x\n", rc); + /* no need to tear down cq - caller will do so */ + return rc; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_cq->queue_id); + } + + return 0; +} + +/** + * lpfc_setup_cq_lookup - Setup the CQ lookup table + * @phba: pointer to lpfc hba data structure. + * + * This routine will populate the cq_lookup table by all + * available CQ queue_id's. + **/ +static void +lpfc_setup_cq_lookup(struct lpfc_hba *phba) +{ + struct lpfc_queue *eq, *childq; + int qidx; + + memset(phba->sli4_hba.cq_lookup, 0, + (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); + /* Loop thru all IRQ vectors */ + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { + /* Get the EQ corresponding to the IRQ vector */ + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; + if (!eq) + continue; + /* Loop through all CQs associated with that EQ */ + list_for_each_entry(childq, &eq->child_list, list) { + if (childq->queue_id > phba->sli4_hba.cq_max) + continue; + if (childq->subtype == LPFC_IO) + phba->sli4_hba.cq_lookup[childq->queue_id] = + childq; + } + } +} + +/** + * lpfc_sli4_queue_setup - Set up all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the SLI4 queues for the FCoE HBA + * operation. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_queue_setup(struct lpfc_hba *phba) +{ + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_vector_map_info *cpup; + struct lpfc_sli4_hdw_queue *qp; + LPFC_MBOXQ_t *mboxq; + int qidx, cpu; + uint32_t length, usdelay; + int rc = -ENOMEM; + + /* Check for dual-ULP support */ + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3249 Unable to allocate memory for " + "QUERY_FW_CFG mailbox command\n"); + return -ENOMEM; + } + length = (sizeof(struct lpfc_mbx_query_fw_config) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_QUERY_FW_CFG, + length, LPFC_SLI4_MBX_EMBED); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3250 QUERY_FW_CFG mailbox failed with status " + "x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -ENXIO; + goto out_error; + } + + phba->sli4_hba.fw_func_mode = + mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; + phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; + phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; + phba->sli4_hba.physical_port = + mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " + "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, + phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); + + mempool_free(mboxq, phba->mbox_mem_pool); + + /* + * Set up HBA Event Queues (EQs) + */ + qp = phba->sli4_hba.hdwq; + + /* Set up HBA event queue */ + if (!qp) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3147 Fast-path EQs not allocated\n"); + rc = -ENOMEM; + goto out_error; + } + + /* Loop thru all IRQ vectors */ + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { + /* Create HBA Event Queues (EQs) in order */ + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* Look for the CPU thats using that vector with + * LPFC_CPU_FIRST_IRQ set. + */ + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) + continue; + if (qidx != cpup->eq) + continue; + + /* Create an EQ for that vector */ + rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, + phba->cfg_fcp_imax); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0523 Failed setup of fast-path" + " EQ (%d), rc = 0x%x\n", + cpup->eq, (uint32_t)rc); + goto out_destroy; + } + + /* Save the EQ for that vector in the hba_eq_hdl */ + phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = + qp[cpup->hdwq].hba_eq; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2584 HBA EQ setup: queue[%d]-id=%d\n", + cpup->eq, + qp[cpup->hdwq].hba_eq->queue_id); + } + } + + /* Loop thru all Hardware Queues */ + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* Create the CQ/WQ corresponding to the Hardware Queue */ + rc = lpfc_create_wq_cq(phba, + phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, + qp[qidx].io_cq, + qp[qidx].io_wq, + &phba->sli4_hba.hdwq[qidx].io_cq_map, + qidx, + LPFC_IO); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0535 Failed to setup fastpath " + "IO WQ/CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + goto out_destroy; + } + } + + /* + * Set up Slow Path Complete Queues (CQs) + */ + + /* Set up slow-path MBOX CQ/MQ */ + + if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0528 %s not allocated\n", + phba->sli4_hba.mbx_cq ? + "Mailbox WQ" : "Mailbox CQ"); + rc = -ENOMEM; + goto out_destroy; + } + + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, + phba->sli4_hba.mbx_cq, + phba->sli4_hba.mbx_wq, + NULL, 0, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + if (phba->nvmet_support) { + if (!phba->sli4_hba.nvmet_cqset) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3165 Fast-path NVME CQ Set " + "array not allocated\n"); + rc = -ENOMEM; + goto out_destroy; + } + if (phba->cfg_nvmet_mrq > 1) { + rc = lpfc_cq_create_set(phba, + phba->sli4_hba.nvmet_cqset, + qp, + LPFC_WCQ, LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3164 Failed setup of NVME CQ " + "Set, rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + } else { + /* Set up NVMET Receive Complete Queue */ + rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], + qp[0].hba_eq, + LPFC_WCQ, LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6089 Failed setup NVMET CQ: " + "rc = 0x%x\n", (uint32_t)rc); + goto out_destroy; + } + phba->sli4_hba.nvmet_cqset[0]->chann = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6090 NVMET CQ setup: cq-id=%d, " + "parent eq-id=%d\n", + phba->sli4_hba.nvmet_cqset[0]->queue_id, + qp[0].hba_eq->queue_id); + } + } + + /* Set up slow-path ELS WQ/CQ */ + if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0530 ELS %s not allocated\n", + phba->sli4_hba.els_cq ? "WQ" : "CQ"); + rc = -ENOMEM; + goto out_destroy; + } + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, + phba->sli4_hba.els_cq, + phba->sli4_hba.els_wq, + NULL, 0, LPFC_ELS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_cq->queue_id); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Set up NVME LS Complete Queue */ + if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6091 LS %s not allocated\n", + phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); + rc = -ENOMEM; + goto out_destroy; + } + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, + phba->sli4_hba.nvmels_cq, + phba->sli4_hba.nvmels_wq, + NULL, 0, LPFC_NVME_LS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0526 Failed setup of NVVME LS WQ/CQ: " + "rc = 0x%x\n", (uint32_t)rc); + goto out_destroy; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6096 ELS WQ setup: wq-id=%d, " + "parent cq-id=%d\n", + phba->sli4_hba.nvmels_wq->queue_id, + phba->sli4_hba.nvmels_cq->queue_id); + } + + /* + * Create NVMET Receive Queue (RQ) + */ + if (phba->nvmet_support) { + if ((!phba->sli4_hba.nvmet_cqset) || + (!phba->sli4_hba.nvmet_mrq_hdr) || + (!phba->sli4_hba.nvmet_mrq_data)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6130 MRQ CQ Queues not " + "allocated\n"); + rc = -ENOMEM; + goto out_destroy; + } + if (phba->cfg_nvmet_mrq > 1) { + rc = lpfc_mrq_create(phba, + phba->sli4_hba.nvmet_mrq_hdr, + phba->sli4_hba.nvmet_mrq_data, + phba->sli4_hba.nvmet_cqset, + LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6098 Failed setup of NVMET " + "MRQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + + } else { + rc = lpfc_rq_create(phba, + phba->sli4_hba.nvmet_mrq_hdr[0], + phba->sli4_hba.nvmet_mrq_data[0], + phba->sli4_hba.nvmet_cqset[0], + LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6057 Failed setup of NVMET " + "Receive Queue: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + + lpfc_printf_log( + phba, KERN_INFO, LOG_INIT, + "6099 NVMET RQ setup: hdr-rq-id=%d, " + "dat-rq-id=%d parent cq-id=%d\n", + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, + phba->sli4_hba.nvmet_mrq_data[0]->queue_id, + phba->sli4_hba.nvmet_cqset[0]->queue_id); + + } + } + + if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0540 Receive Queue not allocated\n"); + rc = -ENOMEM; + goto out_destroy; + } + + rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, + phba->sli4_hba.els_cq, LPFC_USOL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0541 Failed setup of Receive Queue: " + "rc = 0x%x\n", (uint32_t)rc); + goto out_destroy; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " + "parent cq-id=%d\n", + phba->sli4_hba.hdr_rq->queue_id, + phba->sli4_hba.dat_rq->queue_id, + phba->sli4_hba.els_cq->queue_id); + + if (phba->cfg_fcp_imax) + usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; + else + usdelay = 0; + + for (qidx = 0; qidx < phba->cfg_irq_chann; + qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) + lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, + usdelay); + + if (phba->sli4_hba.cq_max) { + kfree(phba->sli4_hba.cq_lookup); + phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), + sizeof(struct lpfc_queue *), GFP_KERNEL); + if (!phba->sli4_hba.cq_lookup) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0549 Failed setup of CQ Lookup table: " + "size 0x%x\n", phba->sli4_hba.cq_max); + rc = -ENOMEM; + goto out_destroy; + } + lpfc_setup_cq_lookup(phba); + } + return 0; + +out_destroy: + lpfc_sli4_queue_unset(phba); +out_error: + return rc; +} + +/** + * lpfc_sli4_queue_unset - Unset all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + **/ +void +lpfc_sli4_queue_unset(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_queue *eq; + int qidx; + + /* Unset mailbox command work queue */ + if (phba->sli4_hba.mbx_wq) + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + + /* Unset NVME LS work queue */ + if (phba->sli4_hba.nvmels_wq) + lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); + + /* Unset ELS work queue */ + if (phba->sli4_hba.els_wq) + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + + /* Unset unsolicited receive queue */ + if (phba->sli4_hba.hdr_rq) + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, + phba->sli4_hba.dat_rq); + + /* Unset mailbox command complete queue */ + if (phba->sli4_hba.mbx_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + + /* Unset ELS complete queue */ + if (phba->sli4_hba.els_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); + + /* Unset NVME LS complete queue */ + if (phba->sli4_hba.nvmels_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); + + if (phba->nvmet_support) { + /* Unset NVMET MRQ queue */ + if (phba->sli4_hba.nvmet_mrq_hdr) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_rq_destroy( + phba, + phba->sli4_hba.nvmet_mrq_hdr[qidx], + phba->sli4_hba.nvmet_mrq_data[qidx]); + } + + /* Unset NVMET CQ Set complete queue */ + if (phba->sli4_hba.nvmet_cqset) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_cq_destroy( + phba, phba->sli4_hba.nvmet_cqset[qidx]); + } + } + + /* Unset fast-path SLI4 queues */ + if (phba->sli4_hba.hdwq) { + /* Loop thru all Hardware Queues */ + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + /* Destroy the CQ/WQ corresponding to Hardware Queue */ + qp = &phba->sli4_hba.hdwq[qidx]; + lpfc_wq_destroy(phba, qp->io_wq); + lpfc_cq_destroy(phba, qp->io_cq); + } + /* Loop thru all IRQ vectors */ + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { + /* Destroy the EQ corresponding to the IRQ vector */ + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; + lpfc_eq_destroy(phba, eq); + } + } + + kfree(phba->sli4_hba.cq_lookup); + phba->sli4_hba.cq_lookup = NULL; + phba->sli4_hba.cq_max = 0; +} + +/** + * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and set up a pool of completion queue + * events. The body of the completion queue event is a completion queue entry + * CQE. For now, this pool is used for the interrupt service routine to queue + * the following HBA completion queue events for the worker thread to process: + * - Mailbox asynchronous events + * - Receive queue completion unsolicited events + * Later, this can be used for all the slow-path events. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + **/ +static int +lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + int i; + + for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { + cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); + if (!cq_event) + goto out_pool_create_fail; + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_cqe_event_pool); + } + return 0; + +out_pool_create_fail: + lpfc_sli4_cq_event_pool_destroy(phba); + return -ENOMEM; +} + +/** + * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the pool of completion queue events at + * driver unload time. Note that, it is the responsibility of the driver + * cleanup routine to free all the outstanding completion-queue events + * allocated from this pool back into the pool before invoking this routine + * to destroy the pool. + **/ +static void +lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event, *next_cq_event; + + list_for_each_entry_safe(cq_event, next_cq_event, + &phba->sli4_hba.sp_cqe_event_pool, list) { + list_del(&cq_event->list); + kfree(cq_event); + } +} + +/** + * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock free version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event = NULL; + + list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, + struct lpfc_cq_event, list); + return cq_event; +} + +/** + * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + cq_event = __lpfc_sli4_cq_event_alloc(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return cq_event; +} + +/** + * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock free version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); +} + +/** + * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + unsigned long iflags; + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_sli4_cq_event_release(phba, cq_event); + spin_unlock_irqrestore(&phba->hbalock, iflags); +} + +/** + * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the pending completion-queue events to the + * back into the free pool for device reset. + **/ +static void +lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) +{ + LIST_HEAD(cq_event_list); + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + /* Retrieve all the pending WCQEs from pending WCQE lists */ + + /* Pending ELS XRI abort events */ + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + &cq_event_list); + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + + /* Pending asynnc events */ + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, + &cq_event_list); + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); + + while (!list_empty(&cq_event_list)) { + list_remove_head(&cq_event_list, cq_event, + struct lpfc_cq_event, list); + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +/** + * lpfc_pci_function_reset - Reset pci function. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to request a PCI function reset. It will destroys + * all resources assigned to the PCI function which originates this request. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_pci_function_reset(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t rc = 0, if_type; + uint32_t shdr_status, shdr_add_status; + uint32_t rdy_chk; + uint32_t port_reset = 0; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_register reg_data; + uint16_t devid; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0494 Unable to allocate memory for " + "issuing SLI_FUNCTION_RESET mailbox " + "command\n"); + return -ENOMEM; + } + + /* Setup PCI function reset mailbox-ioctl command */ + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, + LPFC_SLI4_MBX_EMBED); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0495 SLI_FUNCTION_RESET mailbox " + "failed with status x%x add_status x%x," + " mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + break; + case LPFC_SLI_INTF_IF_TYPE_2: + case LPFC_SLI_INTF_IF_TYPE_6: +wait: + /* + * Poll the Port Status Register and wait for RDY for + * up to 30 seconds. If the port doesn't respond, treat + * it as an error. + */ + for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { + if (lpfc_readl(phba->sli4_hba.u.if_type2. + STATUSregaddr, ®_data.word0)) { + rc = -ENODEV; + goto out; + } + if (bf_get(lpfc_sliport_status_rdy, ®_data)) + break; + msleep(20); + } + + if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { + phba->work_status[0] = readl( + phba->sli4_hba.u.if_type2.ERR1regaddr); + phba->work_status[1] = readl( + phba->sli4_hba.u.if_type2.ERR2regaddr); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2890 Port not ready, port status reg " + "0x%x error 1=0x%x, error 2=0x%x\n", + reg_data.word0, + phba->work_status[0], + phba->work_status[1]); + rc = -ENODEV; + goto out; + } + + if (bf_get(lpfc_sliport_status_pldv, ®_data)) + lpfc_pldv_detect = true; + + if (!port_reset) { + /* + * Reset the port now + */ + reg_data.word0 = 0; + bf_set(lpfc_sliport_ctrl_end, ®_data, + LPFC_SLIPORT_LITTLE_ENDIAN); + bf_set(lpfc_sliport_ctrl_ip, ®_data, + LPFC_SLIPORT_INIT_PORT); + writel(reg_data.word0, phba->sli4_hba.u.if_type2. + CTRLregaddr); + /* flush */ + pci_read_config_word(phba->pcidev, + PCI_DEVICE_ID, &devid); + + port_reset = 1; + msleep(20); + goto wait; + } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { + rc = -ENODEV; + goto out; + } + break; + + case LPFC_SLI_INTF_IF_TYPE_1: + default: + break; + } + +out: + /* Catch the not-ready port failure after a port reset. */ + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3317 HBA not functional: IP Reset Failed " + "try: echo fw_reset > board_mode\n"); + rc = -ENODEV; + } + + return rc; +} + +/** + * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the PCI device memory space for device + * with SLI-4 interface spec. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) +{ + struct pci_dev *pdev = phba->pcidev; + unsigned long bar0map_len, bar1map_len, bar2map_len; + int error; + uint32_t if_type; + + if (!pdev) + return -ENODEV; + + /* Set the device DMA mask size */ + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) + return error; + + /* + * The BARs and register set definitions and offset locations are + * dependent on the if_type. + */ + if (pci_read_config_dword(pdev, LPFC_SLI_INTF, + &phba->sli4_hba.sli_intf.word0)) { + return -ENODEV; + } + + /* There is no SLI3 failback for SLI4 devices. */ + if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_VALID) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2894 SLI_INTF reg contents invalid " + "sli_intf reg 0x%x\n", + phba->sli4_hba.sli_intf.word0); + return -ENODEV; + } + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + /* + * Get the bus address of SLI4 device Bar regions and the + * number of bytes required by each mapping. The mapping of the + * particular PCI BARs regions is dependent on the type of + * SLI4 device. + */ + if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { + phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); + bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); + + /* + * Map SLI4 PCI Config Space Register base to a kernel virtual + * addr + */ + phba->sli4_hba.conf_regs_memmap_p = + ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->sli4_hba.conf_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 PCI config " + "registers.\n"); + return -ENODEV; + } + phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; + /* Set up BAR0 PCI config space register memory map */ + lpfc_sli4_bar0_register_memmap(phba, if_type); + } else { + phba->pci_bar0_map = pci_resource_start(pdev, 1); + bar0map_len = pci_resource_len(pdev, 1); + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { + dev_printk(KERN_ERR, &pdev->dev, + "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); + return -ENODEV; + } + phba->sli4_hba.conf_regs_memmap_p = + ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->sli4_hba.conf_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 PCI config " + "registers.\n"); + return -ENODEV; + } + lpfc_sli4_bar0_register_memmap(phba, if_type); + } + + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { + /* + * Map SLI4 if type 0 HBA Control Register base to a + * kernel virtual address and setup the registers. + */ + phba->pci_bar1_map = pci_resource_start(pdev, + PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, + bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA " + "control registers.\n"); + error = -ENOMEM; + goto out_iounmap_conf; + } + phba->pci_bar2_memmap_p = + phba->sli4_hba.ctrl_regs_memmap_p; + lpfc_sli4_bar1_register_memmap(phba, if_type); + } else { + error = -ENOMEM; + goto out_iounmap_conf; + } + } + + if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && + (pci_resource_start(pdev, PCI_64BIT_BAR2))) { + /* + * Map SLI4 if type 6 HBA Doorbell Register base to a kernel + * virtual address and setup the registers. + */ + phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar1_map, bar1map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA doorbell registers.\n"); + error = -ENOMEM; + goto out_iounmap_conf; + } + phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; + lpfc_sli4_bar1_register_memmap(phba, if_type); + } + + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { + /* + * Map SLI4 if type 0 HBA Doorbell Register base to + * a kernel virtual address and setup the registers. + */ + phba->pci_bar2_map = pci_resource_start(pdev, + PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, + bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA" + " doorbell registers.\n"); + error = -ENOMEM; + goto out_iounmap_ctrl; + } + phba->pci_bar4_memmap_p = + phba->sli4_hba.drbl_regs_memmap_p; + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + } else { + error = -ENOMEM; + goto out_iounmap_ctrl; + } + } + + if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && + pci_resource_start(pdev, PCI_64BIT_BAR4)) { + /* + * Map SLI4 if type 6 HBA DPP Register base to a kernel + * virtual address and setup the registers. + */ + phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); + phba->sli4_hba.dpp_regs_memmap_p = + ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->sli4_hba.dpp_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA dpp registers.\n"); + error = -ENOMEM; + goto out_iounmap_all; + } + phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; + } + + /* Set up the EQ/CQ register handeling functions now */ + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + case LPFC_SLI_INTF_IF_TYPE_2: + phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; + phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; + phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; + break; + case LPFC_SLI_INTF_IF_TYPE_6: + phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; + phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; + phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; + break; + default: + break; + } + + return 0; + +out_iounmap_all: + if (phba->sli4_hba.drbl_regs_memmap_p) + iounmap(phba->sli4_hba.drbl_regs_memmap_p); +out_iounmap_ctrl: + if (phba->sli4_hba.ctrl_regs_memmap_p) + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); +out_iounmap_conf: + iounmap(phba->sli4_hba.conf_regs_memmap_p); + + return error; +} + +/** + * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the PCI device memory space for device + * with SLI-4 interface spec. + **/ +static void +lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) +{ + uint32_t if_type; + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + iounmap(phba->sli4_hba.drbl_regs_memmap_p); + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); + iounmap(phba->sli4_hba.conf_regs_memmap_p); + break; + case LPFC_SLI_INTF_IF_TYPE_2: + iounmap(phba->sli4_hba.conf_regs_memmap_p); + break; + case LPFC_SLI_INTF_IF_TYPE_6: + iounmap(phba->sli4_hba.drbl_regs_memmap_p); + iounmap(phba->sli4_hba.conf_regs_memmap_p); + if (phba->sli4_hba.dpp_regs_memmap_p) + iounmap(phba->sli4_hba.dpp_regs_memmap_p); + break; + case LPFC_SLI_INTF_IF_TYPE_1: + break; + default: + dev_printk(KERN_ERR, &phba->pcidev->dev, + "FATAL - unsupported SLI4 interface type - %d\n", + if_type); + break; + } +} + +/** + * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-3 interface specs. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli_enable_msix(struct lpfc_hba *phba) +{ + int rc; + LPFC_MBOXQ_t *pmb; + + /* Set up MSI-X multi-message vectors */ + rc = pci_alloc_irq_vectors(phba->pcidev, + LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); + if (rc < 0) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0420 PCI enable MSI-X failed (%d)\n", rc); + goto vec_fail_out; + } + + /* + * Assign MSI-X vectors to interrupt handlers + */ + + /* vector-0 is associated to slow-path handler */ + rc = request_irq(pci_irq_vector(phba->pcidev, 0), + &lpfc_sli_sp_intr_handler, 0, + LPFC_SP_DRIVER_HANDLER_NAME, phba); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0421 MSI-X slow-path request_irq failed " + "(%d)\n", rc); + goto msi_fail_out; + } + + /* vector-1 is associated to fast-path handler */ + rc = request_irq(pci_irq_vector(phba->pcidev, 1), + &lpfc_sli_fp_intr_handler, 0, + LPFC_FP_DRIVER_HANDLER_NAME, phba); + + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0429 MSI-X fast-path request_irq failed " + "(%d)\n", rc); + goto irq_fail_out; + } + + /* + * Configure HBA MSI-X attention conditions to messages + */ + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!pmb) { + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0474 Unable to allocate memory for issuing " + "MBOX_CONFIG_MSI command\n"); + goto mem_fail_out; + } + rc = lpfc_config_msi(phba, pmb); + if (rc) + goto mbx_fail_out; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "0351 Config MSI mailbox command failed, " + "mbxCmd x%x, mbxStatus x%x\n", + pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); + goto mbx_fail_out; + } + + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); + return rc; + +mbx_fail_out: + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); + +mem_fail_out: + /* free the irq already requested */ + free_irq(pci_irq_vector(phba->pcidev, 1), phba); + +irq_fail_out: + /* free the irq already requested */ + free_irq(pci_irq_vector(phba->pcidev, 0), phba); + +msi_fail_out: + /* Unconfigure MSI-X capability structure */ + pci_free_irq_vectors(phba->pcidev); + +vec_fail_out: + return rc; +} + +/** + * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-3 interface spec. The kernel function pci_enable_msi() is called to + * enable the MSI vector. The device driver is responsible for calling the + * request_irq() to register MSI vector with a interrupt the handler, which + * is done in this function. + * + * Return codes + * 0 - successful + * other values - error + */ +static int +lpfc_sli_enable_msi(struct lpfc_hba *phba) +{ + int rc; + + rc = pci_enable_msi(phba->pcidev); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0012 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0471 PCI enable MSI mode failed (%d)\n", rc); + return rc; + } + + rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, + 0, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_disable_msi(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0478 MSI request_irq failed (%d)\n", rc); + } + return rc; +} + +/** + * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). + * + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface + * spec. Depends on the interrupt mode configured to the driver, the driver + * will try to fallback from the configured interrupt mode to an interrupt + * mode which is supported by the platform, kernel, and device in the order + * of: + * MSI-X -> MSI -> IRQ. + * + * Return codes + * 0 - successful + * other values - error + **/ +static uint32_t +lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +{ + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval; + + /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ + retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); + if (retval) + return intr_mode; + phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; + + if (cfg_mode == 2) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_sli_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_sli_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } + + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + } + } + return intr_mode; +} + +/** + * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable device interrupt and disassociate the + * driver's interrupt handler(s) from interrupt vector(s) to device with + * SLI-3 interface spec. Depending on the interrupt mode, the driver will + * release the interrupt vector(s) for the message signaled interrupt. + **/ +static void +lpfc_sli_disable_intr(struct lpfc_hba *phba) +{ + int nr_irqs, i; + + if (phba->intr_type == MSIX) + nr_irqs = LPFC_MSIX_VECTORS; + else + nr_irqs = 1; + + for (i = 0; i < nr_irqs; i++) + free_irq(pci_irq_vector(phba->pcidev, i), phba); + pci_free_irq_vectors(phba->pcidev); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; +} + +/** + * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue + * @phba: pointer to lpfc hba data structure. + * @id: EQ vector index or Hardware Queue index + * @match: LPFC_FIND_BY_EQ = match by EQ + * LPFC_FIND_BY_HDWQ = match by Hardware Queue + * Return the CPU that matches the selection criteria + */ +static uint16_t +lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + /* Loop through all CPUs */ + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* If we are matching by EQ, there may be multiple CPUs using + * using the same vector, so select the one with + * LPFC_CPU_FIRST_IRQ set. + */ + if ((match == LPFC_FIND_BY_EQ) && + (cpup->flag & LPFC_CPU_FIRST_IRQ) && + (cpup->eq == id)) + return cpu; + + /* If matching by HDWQ, select the first CPU that matches */ + if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) + return cpu; + } + return 0; +} + +#ifdef CONFIG_X86 +/** + * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded + * @phba: pointer to lpfc hba data structure. + * @cpu: CPU map index + * @phys_id: CPU package physical id + * @core_id: CPU core id + */ +static int +lpfc_find_hyper(struct lpfc_hba *phba, int cpu, + uint16_t phys_id, uint16_t core_id) +{ + struct lpfc_vector_map_info *cpup; + int idx; + + for_each_present_cpu(idx) { + cpup = &phba->sli4_hba.cpu_map[idx]; + /* Does the cpup match the one we are looking for */ + if ((cpup->phys_id == phys_id) && + (cpup->core_id == core_id) && + (cpu != idx)) + return 1; + } + return 0; +} +#endif + +/* + * lpfc_assign_eq_map_info - Assigns eq for vector_map structure + * @phba: pointer to lpfc hba data structure. + * @eqidx: index for eq and irq vector + * @flag: flags to set for vector_map structure + * @cpu: cpu used to index vector_map structure + * + * The routine assigns eq info into vector_map structure + */ +static inline void +lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, + unsigned int cpu) +{ + struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; + struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); + + cpup->eq = eqidx; + cpup->flag |= flag; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", + cpu, eqhdl->irq, cpup->eq, cpup->flag); +} + +/** + * lpfc_cpu_map_array_init - Initialize cpu_map structure + * @phba: pointer to lpfc hba data structure. + * + * The routine initializes the cpu_map array structure + */ +static void +lpfc_cpu_map_array_init(struct lpfc_hba *phba) +{ + struct lpfc_vector_map_info *cpup; + struct lpfc_eq_intr_info *eqi; + int cpu; + + for_each_possible_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; + cpup->core_id = LPFC_VECTOR_MAP_EMPTY; + cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; + cpup->eq = LPFC_VECTOR_MAP_EMPTY; + cpup->flag = 0; + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); + INIT_LIST_HEAD(&eqi->list); + eqi->icnt = 0; + } +} + +/** + * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure + * @phba: pointer to lpfc hba data structure. + * + * The routine initializes the hba_eq_hdl array structure + */ +static void +lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) +{ + struct lpfc_hba_eq_hdl *eqhdl; + int i; + + for (i = 0; i < phba->cfg_irq_chann; i++) { + eqhdl = lpfc_get_eq_hdl(i); + eqhdl->irq = LPFC_IRQ_EMPTY; + eqhdl->phba = phba; + } +} + +/** + * lpfc_cpu_affinity_check - Check vector CPU affinity mappings + * @phba: pointer to lpfc hba data structure. + * @vectors: number of msix vectors allocated. + * + * The routine will figure out the CPU affinity assignment for every + * MSI-X vector allocated for the HBA. + * In addition, the CPU to IO channel mapping will be calculated + * and the phba->sli4_hba.cpu_map array will reflect this. + */ +static void +lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) +{ + int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; + int max_phys_id, min_phys_id; + int max_core_id, min_core_id; + struct lpfc_vector_map_info *cpup; + struct lpfc_vector_map_info *new_cpup; +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo; +#endif +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_hdwq_stat *c_stat; +#endif + + max_phys_id = 0; + min_phys_id = LPFC_VECTOR_MAP_EMPTY; + max_core_id = 0; + min_core_id = LPFC_VECTOR_MAP_EMPTY; + + /* Update CPU map with physical id and core id of each CPU */ + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; +#ifdef CONFIG_X86 + cpuinfo = &cpu_data(cpu); + cpup->phys_id = cpuinfo->phys_proc_id; + cpup->core_id = cpuinfo->cpu_core_id; + if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) + cpup->flag |= LPFC_CPU_MAP_HYPER; +#else + /* No distinction between CPUs for other platforms */ + cpup->phys_id = 0; + cpup->core_id = cpu; +#endif + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU %d physid %d coreid %d flag x%x\n", + cpu, cpup->phys_id, cpup->core_id, cpup->flag); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + if (cpup->phys_id < min_phys_id) + min_phys_id = cpup->phys_id; + + if (cpup->core_id > max_core_id) + max_core_id = cpup->core_id; + if (cpup->core_id < min_core_id) + min_core_id = cpup->core_id; + } + + /* After looking at each irq vector assigned to this pcidev, its + * possible to see that not ALL CPUs have been accounted for. + * Next we will set any unassigned (unaffinitized) cpu map + * entries to a IRQ on the same phys_id. + */ + first_cpu = cpumask_first(cpu_present_mask); + start_cpu = first_cpu; + + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* Is this CPU entry unassigned */ + if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { + /* Mark CPU as IRQ not assigned by the kernel */ + cpup->flag |= LPFC_CPU_MAP_UNASSIGN; + + /* If so, find a new_cpup that is on the SAME + * phys_id as cpup. start_cpu will start where we + * left off so all unassigned entries don't get assgined + * the IRQ of the first entry. + */ + new_cpu = start_cpu; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; + if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && + (new_cpup->phys_id == cpup->phys_id)) + goto found_same; + new_cpu = lpfc_next_present_cpu(new_cpu); + } + /* At this point, we leave the CPU as unassigned */ + continue; +found_same: + /* We found a matching phys_id, so copy the IRQ info */ + cpup->eq = new_cpup->eq; + + /* Bump start_cpu to the next slot to minmize the + * chance of having multiple unassigned CPU entries + * selecting the same IRQ. + */ + start_cpu = lpfc_next_present_cpu(new_cpu); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3337 Set Affinity: CPU %d " + "eq %d from peer cpu %d same " + "phys_id (%d)\n", + cpu, cpup->eq, new_cpu, + cpup->phys_id); + } + } + + /* Set any unassigned cpu map entries to a IRQ on any phys_id */ + start_cpu = first_cpu; + + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* Is this entry unassigned */ + if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { + /* Mark it as IRQ not assigned by the kernel */ + cpup->flag |= LPFC_CPU_MAP_UNASSIGN; + + /* If so, find a new_cpup thats on ANY phys_id + * as the cpup. start_cpu will start where we + * left off so all unassigned entries don't get + * assigned the IRQ of the first entry. + */ + new_cpu = start_cpu; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; + if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) + goto found_any; + new_cpu = lpfc_next_present_cpu(new_cpu); + } + /* We should never leave an entry unassigned */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3339 Set Affinity: CPU %d " + "eq %d UNASSIGNED\n", + cpup->hdwq, cpup->eq); + continue; +found_any: + /* We found an available entry, copy the IRQ info */ + cpup->eq = new_cpup->eq; + + /* Bump start_cpu to the next slot to minmize the + * chance of having multiple unassigned CPU entries + * selecting the same IRQ. + */ + start_cpu = lpfc_next_present_cpu(new_cpu); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3338 Set Affinity: CPU %d " + "eq %d from peer cpu %d (%d/%d)\n", + cpu, cpup->eq, new_cpu, + new_cpup->phys_id, new_cpup->core_id); + } + } + + /* Assign hdwq indices that are unique across all cpus in the map + * that are also FIRST_CPUs. + */ + idx = 0; + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* Only FIRST IRQs get a hdwq index assignment. */ + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) + continue; + + /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ + cpup->hdwq = idx; + idx++; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3333 Set Affinity: CPU %d (phys %d core %d): " + "hdwq %d eq %d flg x%x\n", + cpu, cpup->phys_id, cpup->core_id, + cpup->hdwq, cpup->eq, cpup->flag); + } + /* Associate a hdwq with each cpu_map entry + * This will be 1 to 1 - hdwq to cpu, unless there are less + * hardware queues then CPUs. For that case we will just round-robin + * the available hardware queues as they get assigned to CPUs. + * The next_idx is the idx from the FIRST_CPU loop above to account + * for irq_chann < hdwq. The idx is used for round-robin assignments + * and needs to start at 0. + */ + next_idx = idx; + start_cpu = 0; + idx = 0; + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* FIRST cpus are already mapped. */ + if (cpup->flag & LPFC_CPU_FIRST_IRQ) + continue; + + /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq + * of the unassigned cpus to the next idx so that all + * hdw queues are fully utilized. + */ + if (next_idx < phba->cfg_hdw_queue) { + cpup->hdwq = next_idx; + next_idx++; + continue; + } + + /* Not a First CPU and all hdw_queues are used. Reuse a + * Hardware Queue for another CPU, so be smart about it + * and pick one that has its IRQ/EQ mapped to the same phys_id + * (CPU package) and core_id. + */ + new_cpu = start_cpu; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; + if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && + new_cpup->phys_id == cpup->phys_id && + new_cpup->core_id == cpup->core_id) { + goto found_hdwq; + } + new_cpu = lpfc_next_present_cpu(new_cpu); + } + + /* If we can't match both phys_id and core_id, + * settle for just a phys_id match. + */ + new_cpu = start_cpu; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; + if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && + new_cpup->phys_id == cpup->phys_id) + goto found_hdwq; + new_cpu = lpfc_next_present_cpu(new_cpu); + } + + /* Otherwise just round robin on cfg_hdw_queue */ + cpup->hdwq = idx % phba->cfg_hdw_queue; + idx++; + goto logit; + found_hdwq: + /* We found an available entry, copy the IRQ info */ + start_cpu = lpfc_next_present_cpu(new_cpu); + cpup->hdwq = new_cpup->hdwq; + logit: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3335 Set Affinity: CPU %d (phys %d core %d): " + "hdwq %d eq %d flg x%x\n", + cpu, cpup->phys_id, cpup->core_id, + cpup->hdwq, cpup->eq, cpup->flag); + } + + /* + * Initialize the cpu_map slots for not-present cpus in case + * a cpu is hot-added. Perform a simple hdwq round robin assignment. + */ + idx = 0; + for_each_possible_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); + c_stat->hdwq_no = cpup->hdwq; +#endif + if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) + continue; + + cpup->hdwq = idx++ % phba->cfg_hdw_queue; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + c_stat->hdwq_no = cpup->hdwq; +#endif + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3340 Set Affinity: not present " + "CPU %d hdwq %d\n", + cpu, cpup->hdwq); + } + + /* The cpu_map array will be used later during initialization + * when EQ / CQ / WQs are allocated and configured. + */ + return; +} + +/** + * lpfc_cpuhp_get_eq + * + * @phba: pointer to lpfc hba data structure. + * @cpu: cpu going offline + * @eqlist: eq list to append to + */ +static int +lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, + struct list_head *eqlist) +{ + const struct cpumask *maskp; + struct lpfc_queue *eq; + struct cpumask *tmp; + u16 idx; + + tmp = kzalloc(cpumask_size(), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + maskp = pci_irq_get_affinity(phba->pcidev, idx); + if (!maskp) + continue; + /* + * if irq is not affinitized to the cpu going + * then we don't need to poll the eq attached + * to it. + */ + if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) + continue; + /* get the cpus that are online and are affini- + * tized to this irq vector. If the count is + * more than 1 then cpuhp is not going to shut- + * down this vector. Since this cpu has not + * gone offline yet, we need >1. + */ + cpumask_and(tmp, maskp, cpu_online_mask); + if (cpumask_weight(tmp) > 1) + continue; + + /* Now that we have an irq to shutdown, get the eq + * mapped to this irq. Note: multiple hdwq's in + * the software can share an eq, but eventually + * only eq will be mapped to this vector + */ + eq = phba->sli4_hba.hba_eq_hdl[idx].eq; + list_add(&eq->_poll_list, eqlist); + } + kfree(tmp); + return 0; +} + +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) +{ + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, + &phba->cpuhp); + /* + * unregistering the instance doesn't stop the polling + * timer. Wait for the poll timer to retire. + */ + synchronize_rcu(); + del_timer_sync(&phba->cpuhp_poll_timer); +} + +static void lpfc_cpuhp_remove(struct lpfc_hba *phba) +{ + if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE)) + return; + + __lpfc_cpuhp_remove(phba); +} + +static void lpfc_cpuhp_add(struct lpfc_hba *phba) +{ + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + rcu_read_lock(); + + if (!list_empty(&phba->poll_list)) + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + + rcu_read_unlock(); + + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, + &phba->cpuhp); +} + +static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) +{ + if (phba->pport->load_flag & FC_UNLOADING) { + *retval = -EAGAIN; + return true; + } + + if (phba->sli_rev != LPFC_SLI_REV4) { + *retval = 0; + return true; + } + + /* proceed with the hotplug */ + return false; +} + +/** + * lpfc_irq_set_aff - set IRQ affinity + * @eqhdl: EQ handle + * @cpu: cpu to set affinity + * + **/ +static inline void +lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) +{ + cpumask_clear(&eqhdl->aff_mask); + cpumask_set_cpu(cpu, &eqhdl->aff_mask); + irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); + irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); +} + +/** + * lpfc_irq_clear_aff - clear IRQ affinity + * @eqhdl: EQ handle + * + **/ +static inline void +lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) +{ + cpumask_clear(&eqhdl->aff_mask); + irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); +} + +/** + * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event + * @phba: pointer to HBA context object. + * @cpu: cpu going offline/online + * @offline: true, cpu is going offline. false, cpu is coming online. + * + * If cpu is going offline, we'll try our best effort to find the next + * online cpu on the phba's original_mask and migrate all offlining IRQ + * affinities. + * + * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. + * + * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on + * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. + * + **/ +static void +lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) +{ + struct lpfc_vector_map_info *cpup; + struct cpumask *aff_mask; + unsigned int cpu_select, cpu_next, idx; + const struct cpumask *orig_mask; + + if (phba->irq_chann_mode == NORMAL_MODE) + return; + + orig_mask = &phba->sli4_hba.irq_aff_mask; + + if (!cpumask_test_cpu(cpu, orig_mask)) + return; + + cpup = &phba->sli4_hba.cpu_map[cpu]; + + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) + return; + + if (offline) { + /* Find next online CPU on original mask */ + cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); + cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); + + /* Found a valid CPU */ + if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { + /* Go through each eqhdl and ensure offlining + * cpu aff_mask is migrated + */ + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + aff_mask = lpfc_get_aff_mask(idx); + + /* Migrate affinity */ + if (cpumask_test_cpu(cpu, aff_mask)) + lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), + cpu_select); + } + } else { + /* Rely on irqbalance if no online CPUs left on NUMA */ + for (idx = 0; idx < phba->cfg_irq_chann; idx++) + lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); + } + } else { + /* Migrate affinity back to this CPU */ + lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); + } +} + +static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); + struct lpfc_queue *eq, *next; + LIST_HEAD(eqlist); + int retval; + + if (!phba) { + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); + return 0; + } + + if (__lpfc_cpuhp_checks(phba, &retval)) + return retval; + + lpfc_irq_rebalance(phba, cpu, true); + + retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); + if (retval) + return retval; + + /* start polling on these eq's */ + list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { + list_del_init(&eq->_poll_list); + lpfc_sli4_start_polling(eq); + } + + return 0; +} + +static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); + struct lpfc_queue *eq, *next; + unsigned int n; + int retval; + + if (!phba) { + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); + return 0; + } + + if (__lpfc_cpuhp_checks(phba, &retval)) + return retval; + + lpfc_irq_rebalance(phba, cpu, false); + + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { + n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); + if (n == cpu) + lpfc_sli4_stop_polling(eq); + } + + return 0; +} + +/** + * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them + * to cpus on the system. + * + * When cfg_irq_numa is enabled, the adapter will only allocate vectors for + * the number of cpus on the same numa node as this adapter. The vectors are + * allocated without requesting OS affinity mapping. A vector will be + * allocated and assigned to each online and offline cpu. If the cpu is + * online, then affinity will be set to that cpu. If the cpu is offline, then + * affinity will be set to the nearest peer cpu within the numa node that is + * online. If there are no online cpus within the numa node, affinity is not + * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping + * is consistent with the way cpu online/offline is handled when cfg_irq_numa is + * configured. + * + * If numa mode is not enabled and there is more than 1 vector allocated, then + * the driver relies on the managed irq interface where the OS assigns vector to + * cpu affinity. The driver will then use that affinity mapping to setup its + * cpu mapping table. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli4_enable_msix(struct lpfc_hba *phba) +{ + int vectors, rc, index; + char *name; + const struct cpumask *aff_mask = NULL; + unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; + struct lpfc_vector_map_info *cpup; + struct lpfc_hba_eq_hdl *eqhdl; + const struct cpumask *maskp; + unsigned int flags = PCI_IRQ_MSIX; + + /* Set up MSI-X multi-message vectors */ + vectors = phba->cfg_irq_chann; + + if (phba->irq_chann_mode != NORMAL_MODE) + aff_mask = &phba->sli4_hba.irq_aff_mask; + + if (aff_mask) { + cpu_cnt = cpumask_weight(aff_mask); + vectors = min(phba->cfg_irq_chann, cpu_cnt); + + /* cpu: iterates over aff_mask including offline or online + * cpu_select: iterates over online aff_mask to set affinity + */ + cpu = cpumask_first(aff_mask); + cpu_select = lpfc_next_online_cpu(aff_mask, cpu); + } else { + flags |= PCI_IRQ_AFFINITY; + } + + rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); + if (rc < 0) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0484 PCI enable MSI-X failed (%d)\n", rc); + goto vec_fail_out; + } + vectors = rc; + + /* Assign MSI-X vectors to interrupt handlers */ + for (index = 0; index < vectors; index++) { + eqhdl = lpfc_get_eq_hdl(index); + name = eqhdl->handler_name; + memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); + snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, + LPFC_DRIVER_HANDLER_NAME"%d", index); + + eqhdl->idx = index; + rc = pci_irq_vector(phba->pcidev, index); + if (rc < 0) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0489 MSI-X fast-path (%d) " + "pci_irq_vec failed (%d)\n", index, rc); + goto cfg_fail_out; + } + eqhdl->irq = rc; + + rc = request_threaded_irq(eqhdl->irq, + &lpfc_sli4_hba_intr_handler, + &lpfc_sli4_hba_intr_handler_th, + IRQF_ONESHOT, name, eqhdl); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0486 MSI-X fast-path (%d) " + "request_irq failed (%d)\n", index, rc); + goto cfg_fail_out; + } + + if (aff_mask) { + /* If found a neighboring online cpu, set affinity */ + if (cpu_select < nr_cpu_ids) + lpfc_irq_set_aff(eqhdl, cpu_select); + + /* Assign EQ to cpu_map */ + lpfc_assign_eq_map_info(phba, index, + LPFC_CPU_FIRST_IRQ, + cpu); + + /* Iterate to next offline or online cpu in aff_mask */ + cpu = cpumask_next(cpu, aff_mask); + + /* Find next online cpu in aff_mask to set affinity */ + cpu_select = lpfc_next_online_cpu(aff_mask, cpu); + } else if (vectors == 1) { + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, + cpu); + } else { + maskp = pci_irq_get_affinity(phba->pcidev, index); + + /* Loop through all CPUs associated with vector index */ + for_each_cpu_and(cpu, maskp, cpu_present_mask) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* If this is the first CPU thats assigned to + * this vector, set LPFC_CPU_FIRST_IRQ. + * + * With certain platforms its possible that irq + * vectors are affinitized to all the cpu's. + * This can result in each cpu_map.eq to be set + * to the last vector, resulting in overwrite + * of all the previous cpu_map.eq. Ensure that + * each vector receives a place in cpu_map. + * Later call to lpfc_cpu_affinity_check will + * ensure we are nicely balanced out. + */ + if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) + continue; + lpfc_assign_eq_map_info(phba, index, + LPFC_CPU_FIRST_IRQ, + cpu); + break; + } + } + } + + if (vectors != phba->cfg_irq_chann) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3238 Reducing IO channels to match number of " + "MSI-X vectors, requested %d got %d\n", + phba->cfg_irq_chann, vectors); + if (phba->cfg_irq_chann > vectors) + phba->cfg_irq_chann = vectors; + } + + return rc; + +cfg_fail_out: + /* free the irq already requested */ + for (--index; index >= 0; index--) { + eqhdl = lpfc_get_eq_hdl(index); + lpfc_irq_clear_aff(eqhdl); + free_irq(eqhdl->irq, eqhdl); + } + + /* Unconfigure MSI-X capability structure */ + pci_free_irq_vectors(phba->pcidev); + +vec_fail_out: + return rc; +} + +/** + * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is + * called to enable the MSI vector. The device driver is responsible for + * calling the request_irq() to register MSI vector with a interrupt the + * handler, which is done in this function. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli4_enable_msi(struct lpfc_hba *phba) +{ + int rc, index; + unsigned int cpu; + struct lpfc_hba_eq_hdl *eqhdl; + + rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_AFFINITY); + if (rc > 0) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0487 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0488 PCI enable MSI mode failed (%d)\n", rc); + return rc ? rc : -1; + } + + rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, + 0, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_free_irq_vectors(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0490 MSI request_irq failed (%d)\n", rc); + return rc; + } + + eqhdl = lpfc_get_eq_hdl(0); + rc = pci_irq_vector(phba->pcidev, 0); + if (rc < 0) { + pci_free_irq_vectors(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0496 MSI pci_irq_vec failed (%d)\n", rc); + return rc; + } + eqhdl->irq = rc; + + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); + + for (index = 0; index < phba->cfg_irq_chann; index++) { + eqhdl = lpfc_get_eq_hdl(index); + eqhdl->idx = index; + } + + return 0; +} + +/** + * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). + * + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s) to device with SLI-4 + * interface spec. Depends on the interrupt mode configured to the driver, + * the driver will try to fallback from the configured interrupt mode to an + * interrupt mode which is supported by the platform, kernel, and device in + * the order of: + * MSI-X -> MSI -> IRQ. + * + * Return codes + * Interrupt mode (2, 1, 0) - successful + * LPFC_INTR_ERROR - error + **/ +static uint32_t +lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +{ + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval, idx; + + if (cfg_mode == 2) { + /* Preparation before conf_msi mbox cmd */ + retval = 0; + if (!retval) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_sli4_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_sli4_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } + + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + struct lpfc_hba_eq_hdl *eqhdl; + unsigned int cpu; + + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + + eqhdl = lpfc_get_eq_hdl(0); + retval = pci_irq_vector(phba->pcidev, 0); + if (retval < 0) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0502 INTR pci_irq_vec failed (%d)\n", + retval); + return LPFC_INTR_ERROR; + } + eqhdl->irq = retval; + + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, + cpu); + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + eqhdl = lpfc_get_eq_hdl(idx); + eqhdl->idx = idx; + } + } + } + return intr_mode; +} + +/** + * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable device interrupt and disassociate + * the driver's interrupt handler(s) from interrupt vector(s) to device + * with SLI-4 interface spec. Depending on the interrupt mode, the driver + * will release the interrupt vector(s) for the message signaled interrupt. + **/ +static void +lpfc_sli4_disable_intr(struct lpfc_hba *phba) +{ + /* Disable the currently initialized interrupt mode */ + if (phba->intr_type == MSIX) { + int index; + struct lpfc_hba_eq_hdl *eqhdl; + + /* Free up MSI-X multi-message vectors */ + for (index = 0; index < phba->cfg_irq_chann; index++) { + eqhdl = lpfc_get_eq_hdl(index); + lpfc_irq_clear_aff(eqhdl); + free_irq(eqhdl->irq, eqhdl); + } + } else { + free_irq(phba->pcidev->irq, phba); + } + + pci_free_irq_vectors(phba->pcidev); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; +} + +/** + * lpfc_unset_hba - Unset SLI3 hba device initialization + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the HBA device initialization steps to + * a device with SLI-3 interface spec. + **/ +static void +lpfc_unset_hba(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); + + kfree(phba->vpi_bmask); + kfree(phba->vpi_ids); + + lpfc_stop_hba_timers(phba); + + phba->pport->work_port_events = 0; + + lpfc_sli_hba_down(phba); + + lpfc_sli_brdrestart(phba); + + lpfc_sli_disable_intr(phba); + + return; +} + +/** + * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI4 code path to wait for completion + * of device's XRIs exchange busy. It will check the XRI exchange busy + * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after + * that, it will check the XRI exchange busy on outstanding FCP and ELS + * I/Os every 30 seconds, log error message, and wait forever. Only when + * all XRI exchange busy complete, the driver unload shall proceed with + * invoking the function reset ioctl mailbox command to the CNA and the + * the rest of the driver unload resource release. + **/ +static void +lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hdw_queue *qp; + int idx, ccnt; + int wait_time = 0; + int io_xri_cmpl = 1; + int nvmet_xri_cmpl = 1; + int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + /* Driver just aborted IOs during the hba_unset process. Pause + * here to give the HBA time to complete the IO and get entries + * into the abts lists. + */ + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); + + /* Wait for NVME pending IO to flush back to transport. */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_nvme_wait_for_io_drain(phba); + + ccnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; + } + if (ccnt) + io_xri_cmpl = 0; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + nvmet_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); + } + + while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { + if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { + if (!nvmet_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6424 NVMET XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); + if (!io_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6100 IO XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); + if (!els_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2878 ELS XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; + } else { + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; + } + + ccnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + io_xri_cmpl = list_empty( + &qp->lpfc_abts_io_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; + } + if (ccnt) + io_xri_cmpl = 0; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + nvmet_xri_cmpl = list_empty( + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); + } + els_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + } +} + +/** + * lpfc_sli4_hba_unset - Unset the fcoe hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI4 code path to reset the HBA's FCoE + * function. The caller is not required to hold any lock. This routine + * issues PCI function reset mailbox command to reset the FCoE function. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static void +lpfc_sli4_hba_unset(struct lpfc_hba *phba) +{ + int wait_cnt = 0; + LPFC_MBOXQ_t *mboxq; + struct pci_dev *pdev = phba->pcidev; + + lpfc_stop_hba_timers(phba); + hrtimer_cancel(&phba->cmf_stats_timer); + hrtimer_cancel(&phba->cmf_timer); + + if (phba->pport) + phba->sli4_hba.intr_enable = 0; + + /* + * Gracefully wait out the potential current outstanding asynchronous + * mailbox command. + */ + + /* First, block any pending async mailbox command from posted */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + /* Now, trying to wait it out if we can */ + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + msleep(10); + if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) + break; + } + /* Forcefully release the outstanding mailbox command if timed out */ + if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_lock_irq(&phba->hbalock); + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irq(&phba->hbalock); + } + + /* Abort all iocbs associated with the hba */ + lpfc_sli_hba_iocb_abort(phba); + + if (!pci_channel_offline(phba->pcidev)) + /* Wait for completion of device XRI exchange busy */ + lpfc_sli4_xri_exchange_busy_wait(phba); + + /* per-phba callback de-registration for hotplug event */ + if (phba->pport) + lpfc_cpuhp_remove(phba); + + /* Disable PCI subsystem interrupt */ + lpfc_sli4_disable_intr(phba); + + /* Disable SR-IOV if enabled */ + if (phba->cfg_sriov_nr_virtfn) + pci_disable_sriov(pdev); + + /* Stop kthread signal shall trigger work_done one more time */ + kthread_stop(phba->worker_thread); + + /* Disable FW logging to host memory */ + lpfc_ras_stop_fwlog(phba); + + /* Reset SLI4 HBA FCoE function */ + lpfc_pci_function_reset(phba); + + /* release all queue allocated resources. */ + lpfc_sli4_queue_destroy(phba); + + /* Free RAS DMA memory */ + if (phba->ras_fwlog.ras_enabled) + lpfc_sli4_ras_dma_free(phba); + + /* Stop the SLI4 device port */ + if (phba->pport) + phba->pport->work_port_events = 0; +} + +static uint32_t +lpfc_cgn_crc32(uint32_t crc, u8 byte) +{ + uint32_t msb = 0; + uint32_t bit; + + for (bit = 0; bit < 8; bit++) { + msb = (crc >> 31) & 1; + crc <<= 1; + + if (msb ^ (byte & 1)) { + crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; + crc |= 1; + } + byte >>= 1; + } + return crc; +} + +static uint32_t +lpfc_cgn_reverse_bits(uint32_t wd) +{ + uint32_t result = 0; + uint32_t i; + + for (i = 0; i < 32; i++) { + result <<= 1; + result |= (1 & (wd >> i)); + } + return result; +} + +/* + * The routine corresponds with the algorithm the HBA firmware + * uses to validate the data integrity. + */ +uint32_t +lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) +{ + uint32_t i; + uint32_t result; + uint8_t *data = (uint8_t *)ptr; + + for (i = 0; i < byteLen; ++i) + crc = lpfc_cgn_crc32(crc, data[i]); + + result = ~lpfc_cgn_reverse_bits(crc); + return result; +} + +void +lpfc_init_congestion_buf(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + uint16_t size; + uint32_t crc; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6235 INIT Congestion Buffer %p\n", phba->cgn_i); + + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + + atomic_set(&phba->cgn_driver_evt_cnt, 0); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + phba->cgn_evt_minute = 0; + + memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); + cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); + cp->cgn_info_version = LPFC_CGN_INFO_V4; + + /* cgn parameters */ + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; + cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; + cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; + + lpfc_cgn_update_tstamp(phba, &cp->base_time); + + /* Fill in default LUN qdepth */ + if (phba->pport) { + size = (uint16_t)(phba->pport->cfg_lun_queue_depth); + cp->cgn_lunq = cpu_to_le16(size); + } + + /* last used Index initialized to 0xff already */ + + cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); + cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + + phba->cgn_evt_timestamp = jiffies + + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); +} + +void +lpfc_init_congestion_stat(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + uint32_t crc; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6236 INIT Congestion Stat %p\n", phba->cgn_i); + + if (!phba->cgn_i) + return; + + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); + + lpfc_cgn_update_tstamp(phba, &cp->stat_start); + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); +} + +/** + * __lpfc_reg_congestion_buf - register congestion info buffer with HBA + * @phba: Pointer to hba context object. + * @reg: flag to determine register or unregister. + */ +static int +__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) +{ + struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + LPFC_MBOXQ_t *mboxq; + int length, rc; + + if (!phba->cgn_i) + return -ENXIO; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2641 REG_CONGESTION_BUF mbox allocation fail: " + "HBA state x%x reg %d\n", + phba->pport->port_state, reg); + return -ENOMEM; + } + + length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, + LPFC_SLI4_MBX_EMBED); + reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; + bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); + if (reg > 0) + bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); + else + bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); + reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); + reg_congestion_buf->addr_lo = + putPaddrLow(phba->cgn_i->phys); + reg_congestion_buf->addr_hi = + putPaddrHigh(phba->cgn_i->phys); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2642 REG_CONGESTION_BUF mailbox " + "failed with status x%x add_status x%x," + " mbx status x%x reg %d\n", + shdr_status, shdr_add_status, rc, reg); + return -ENXIO; + } + return 0; +} + +int +lpfc_unreg_congestion_buf(struct lpfc_hba *phba) +{ + lpfc_cmf_stop(phba); + return __lpfc_reg_congestion_buf(phba, 0); +} + +int +lpfc_reg_congestion_buf(struct lpfc_hba *phba) +{ + return __lpfc_reg_congestion_buf(phba, 1); +} + +/** + * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to the mailboxq memory for the mailbox command response. + * + * This function is called in the SLI4 code path to read the port's + * sli4 capabilities. + * + * This function may be be called from any context that can block-wait + * for the completion. The expectation is that this routine is called + * typically from probe_one or from the online routine. + **/ +int +lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc; + struct lpfc_mqe *mqe = &mboxq->u.mqe; + struct lpfc_pc_sli4_params *sli4_params; + uint32_t mbox_tmo; + int length; + bool exp_wqcq_pages = true; + struct lpfc_sli4_parameters *mbx_sli4_parameters; + + /* + * By default, the driver assumes the SLI4 port requires RPI + * header postings. The SLI4_PARAM response will correct this + * assumption. + */ + phba->sli4_hba.rpi_hdrs_in_use = 1; + + /* Read the port's SLI4 Config Parameters */ + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, + length, LPFC_SLI4_MBX_EMBED); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + } + if (unlikely(rc)) + return rc; + sli4_params = &phba->sli4_hba.pc_sli4_params; + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); + sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); + sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); + sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, + mbx_sli4_parameters); + sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, + mbx_sli4_parameters); + if (bf_get(cfg_phwq, mbx_sli4_parameters)) + phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; + sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; + sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, + mbx_sli4_parameters); + sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); + sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); + sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); + sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); + sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); + sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); + sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); + sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); + sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); + sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); + sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, + mbx_sli4_parameters); + sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); + sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, + mbx_sli4_parameters); + phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); + phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); + sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters); + + /* Check for Extended Pre-Registered SGL support */ + phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); + + /* Check for firmware nvme support */ + rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && + bf_get(cfg_xib, mbx_sli4_parameters)); + + if (rc) { + /* Save this to indicate the Firmware supports NVME */ + sli4_params->nvme = 1; + + /* Firmware NVME support, check driver FC4 NVME support */ + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, + "6133 Disabling NVME support: " + "FC4 type not supported: x%x\n", + phba->cfg_enable_fc4_type); + goto fcponly; + } + } else { + /* No firmware NVME support, check driver FC4 NVME support */ + sli4_params->nvme = 0; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, + "6101 Disabling NVME support: Not " + "supported by firmware (%d %d) x%x\n", + bf_get(cfg_nvme, mbx_sli4_parameters), + bf_get(cfg_xib, mbx_sli4_parameters), + phba->cfg_enable_fc4_type); +fcponly: + phba->nvmet_support = 0; + phba->cfg_nvmet_mrq = 0; + phba->cfg_nvme_seg_cnt = 0; + + /* If no FC4 type support, move to just SCSI support */ + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return -ENODEV; + phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + } + } + + /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to + * accommodate 512K and 1M IOs in a single nvme buf. + */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; + + /* Enable embedded Payload BDE if support is indicated */ + if (bf_get(cfg_pbde, mbx_sli4_parameters)) + phba->cfg_enable_pbde = 1; + else + phba->cfg_enable_pbde = 0; + + /* + * To support Suppress Response feature we must satisfy 3 conditions. + * lpfc_suppress_rsp module parameter must be set (default). + * In SLI4-Parameters Descriptor: + * Extended Inline Buffers (XIB) must be supported. + * Suppress Response IU Not Supported (SRIUNS) must NOT be supported + * (double negative). + */ + if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && + !(bf_get(cfg_nosr, mbx_sli4_parameters))) + phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; + else + phba->cfg_suppress_rsp = 0; + + if (bf_get(cfg_eqdr, mbx_sli4_parameters)) + phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; + + /* Make sure that sge_supp_len can be handled by the driver */ + if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) + sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; + + rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6400 Can't set dma maximum segment size\n"); + return rc; + } + + /* + * Check whether the adapter supports an embedded copy of the + * FCP CMD IU within the WQE for FCP_Ixxx commands. In order + * to use this option, 128-byte WQEs must be used. + */ + if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) + phba->fcp_embed_io = 1; + else + phba->fcp_embed_io = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, + "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", + bf_get(cfg_xib, mbx_sli4_parameters), + phba->cfg_enable_pbde, + phba->fcp_embed_io, sli4_params->nvme, + phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); + + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_2) && + (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_LNCR_A0)) + exp_wqcq_pages = false; + + if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && + (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && + exp_wqcq_pages && + (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) + phba->enab_exp_wqcq_pages = 1; + else + phba->enab_exp_wqcq_pages = 0; + /* + * Check if the SLI port supports MDS Diagnostics + */ + if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) + phba->mds_diags_support = 1; + else + phba->mds_diags_support = 0; + + /* + * Check if the SLI port supports NSLER + */ + if (bf_get(cfg_nsler, mbx_sli4_parameters)) + phba->nsler = 1; + else + phba->nsler = 0; + + return 0; +} + +/** + * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is to be called to attach a device with SLI-3 interface spec + * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific + * information of the device and driver to see if the driver state that it can + * support this kind of device. If the match is successful, the driver core + * invokes this routine. If this routine determines it can claim the HBA, it + * does all the initialization that it needs to do to handle the HBA properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int +lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_hba *phba; + struct lpfc_vport *vport = NULL; + struct Scsi_Host *shost = NULL; + int error; + uint32_t cfg_mode, intr_mode; + + /* Allocate memory for HBA structure */ + phba = lpfc_hba_alloc(pdev); + if (!phba) + return -ENOMEM; + + /* Perform generic PCI device enabling operation */ + error = lpfc_enable_pci_dev(phba); + if (error) + goto out_free_phba; + + /* Set up SLI API function jump table for PCI-device group-0 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); + if (error) + goto out_disable_pci_dev; + + /* Set up SLI-3 specific device PCI memory space */ + error = lpfc_sli_pci_mem_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1402 Failed to set up pci memory space.\n"); + goto out_disable_pci_dev; + } + + /* Set up SLI-3 specific device driver resources */ + error = lpfc_sli_driver_resource_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1404 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s3; + } + + /* Initialize and populate the iocb list per host */ + + error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1405 Failed to initialize iocb list.\n"); + goto out_unset_driver_resource_s3; + } + + /* Set up common device driver resources */ + error = lpfc_setup_driver_resource_phase2(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1406 Failed to set up driver resource.\n"); + goto out_free_iocb_list; + } + + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1407 Failed to create scsi host.\n"); + goto out_unset_driver_resource; + } + + /* Configure sysfs attributes */ + vport = phba->pport; + error = lpfc_alloc_sysfs_attr(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1476 Failed to allocate sysfs attr\n"); + goto out_destroy_shost; + } + + shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ + /* Now, trying to enable interrupt and bring up the device */ + cfg_mode = phba->cfg_use_msi; + while (true) { + /* Put device to a known state before enabling interrupt */ + lpfc_stop_port(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0431 Failed to enable interrupt.\n"); + error = -ENODEV; + goto out_free_sysfs_attr; + } + /* SLI-3 HBA setup */ + if (lpfc_sli_hba_setup(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1477 Failed to set up hba\n"); + error = -ENODEV; + goto out_remove_device; + } + + /* Wait 50ms for the interrupts of previous mailbox commands */ + msleep(50); + /* Check active interrupts on message signaled interrupts */ + if (intr_mode == 0 || + phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { + /* Log the current active interrupt mode */ + phba->intr_mode = intr_mode; + lpfc_log_intr_mode(phba, intr_mode); + break; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0447 Configure interrupt mode (%d) " + "failed active interrupt test.\n", + intr_mode); + /* Disable the current interrupt mode */ + lpfc_sli_disable_intr(phba); + /* Try next level of interrupt mode */ + cfg_mode = --intr_mode; + } + } + + /* Perform post initialization setup */ + lpfc_post_init_setup(phba); + + /* Check if there are static vports to be created. */ + lpfc_create_static_vport(phba); + + return 0; + +out_remove_device: + lpfc_unset_hba(phba); +out_free_sysfs_attr: + lpfc_free_sysfs_attr(vport); +out_destroy_shost: + lpfc_destroy_shost(phba); +out_unset_driver_resource: + lpfc_unset_driver_resource_phase2(phba); +out_free_iocb_list: + lpfc_free_iocb_list(phba); +out_unset_driver_resource_s3: + lpfc_sli_driver_resource_unset(phba); +out_unset_pci_mem_s3: + lpfc_sli_pci_mem_unset(phba); +out_disable_pci_dev: + lpfc_disable_pci_dev(phba); + if (shost) + scsi_host_put(shost); +out_free_phba: + lpfc_hba_free(phba); + return error; +} + +/** + * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. + * @pdev: pointer to PCI device + * + * This routine is to be called to disattach a device with SLI-3 interface + * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * removed from PCI bus, it performs all the necessary cleanup for the HBA + * device to be removed from the PCI subsystem properly. + **/ +static void +lpfc_pci_remove_one_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_vport **vports; + struct lpfc_hba *phba = vport->phba; + int i; + + spin_lock_irq(&phba->hbalock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(&phba->hbalock); + + lpfc_free_sysfs_attr(vport); + + /* Release all the vports against this physical port */ + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; + fc_vport_terminate(vports[i]->fc_vport); + } + lpfc_destroy_vport_work_array(phba, vports); + + /* Remove FC host with the physical port */ + fc_remove_host(shost); + scsi_remove_host(shost); + + /* Clean up all nodes, mailboxes and IOs. */ + lpfc_cleanup(vport); + + /* + * Bring down the SLI Layer. This step disable all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA. + */ + + /* HBA interrupt will be disabled after this call */ + lpfc_sli_hba_down(phba); + /* Stop kthread signal shall trigger work_done one more time */ + kthread_stop(phba->worker_thread); + /* Final cleanup of txcmplq and reset the HBA */ + lpfc_sli_brdrestart(phba); + + kfree(phba->vpi_bmask); + kfree(phba->vpi_ids); + + lpfc_stop_hba_timers(phba); + spin_lock_irq(&phba->port_list_lock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->port_list_lock); + + lpfc_debugfs_terminate(vport); + + /* Disable SR-IOV if enabled */ + if (phba->cfg_sriov_nr_virtfn) + pci_disable_sriov(pdev); + + /* Disable interrupt */ + lpfc_sli_disable_intr(phba); + + scsi_host_put(shost); + + /* + * Call scsi_free before mem_free since scsi bufs are released to their + * corresponding pools here. + */ + lpfc_scsi_free(phba); + lpfc_free_iocb_list(phba); + + lpfc_mem_free_all(phba); + + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), + phba->hbqslimp.virt, phba->hbqslimp.phys); + + /* Free resources associated with SLI2 interface */ + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); + + /* unmap adapter SLIM and Control Registers */ + iounmap(phba->ctrl_regs_memmap_p); + iounmap(phba->slim_memmap_p); + + lpfc_hba_free(phba); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +/** + * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt + * @dev_d: pointer to device + * + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When + * PM invokes this method, it quiesces the device by stopping the driver's + * worker thread for the device, turning off device's interrupt and DMA, + * and bring the device offline. Note that as the driver implements the + * minimum PM requirements to a power-aware driver's PM support for the + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver will + * fully reinitialize its device during resume() method call, the driver will + * set device to PCI_D3hot state in PCI config space instead of setting it + * according to the @msg provided by the PM. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int __maybe_unused +lpfc_pci_suspend_one_s3(struct device *dev_d) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev_d); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0473 PCI device Power Management suspend.\n"); + + /* Bring down the device */ + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + lpfc_offline(phba); + kthread_stop(phba->worker_thread); + + /* Disable interrupt from device */ + lpfc_sli_disable_intr(phba); + + return 0; +} + +/** + * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt + * @dev_d: pointer to device + * + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When PM + * invokes this method, it restores the device's PCI config space state and + * fully reinitializes the device and brings it online. Note that as the + * driver implements the minimum PM requirements to a power-aware driver's + * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, + * FREEZE) to the suspend() method call will be treated as SUSPEND and the + * driver will fully reinitialize its device during resume() method call, + * the device will be set to PCI_D0 directly in PCI config space before + * restoring the state. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int __maybe_unused +lpfc_pci_resume_one_s3(struct device *dev_d) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev_d); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint32_t intr_mode; + int error; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0452 PCI device Power Management resume.\n"); + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0434 PM resume failed to start worker " + "thread: error=x%x.\n", error); + return error; + } + + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); + /* Init hba_eq_hdl array */ + lpfc_hba_eq_hdl_array_init(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0430 PM resume Failed to enable interrupt\n"); + return -EIO; + } else + phba->intr_mode = intr_mode; + + /* Restart HBA and bring it online */ + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return 0; +} + +/** + * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI3 device for PCI slot recover. It + * aborts all the outstanding SCSI I/Os to the pci device. + **/ +static void +lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2723 PCI channel I/O abort preparing for recovery\n"); + + /* + * There may be errored I/Os through HBA, abort all I/Os on txcmplq + * and let the SCSI mid-layer to retry them to recover. + */ + lpfc_sli_abort_fcp_rings(phba); +} + +/** + * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI3 device for PCI slot reset. It + * disables the device interrupt and pci device, and aborts the internal FCP + * pending I/Os. + **/ +static void +lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2710 PCI channel disable preparing for reset\n"); + + /* Block any management I/Os to the device */ + lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); + + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_io_rings(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + + /* Disable interrupt and pci device */ + lpfc_sli_disable_intr(phba); + pci_disable_device(phba->pcidev); +} + +/** + * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI3 device for PCI slot permanently + * disabling. It blocks the SCSI transport layer traffic and flushes the FCP + * pending I/Os. + **/ +static void +lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2711 PCI channel permanent disable for failure\n"); + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + lpfc_sli4_prep_dev_for_reset(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + + /* Clean up all driver's outstanding SCSI I/Os */ + lpfc_sli_flush_io_rings(phba); +} + +/** + * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is called from the PCI subsystem for I/O error handling to + * device with SLI-3 interface spec. This function is called by the PCI + * subsystem after a PCI bus error affecting this device has been detected. + * When this function is invoked, it will need to stop all the I/Os and + * interrupt(s) to the device. Once that is done, it will return + * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery + * as desired. + * + * Return codes + * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (state) { + case pci_channel_io_normal: + /* Non-fatal error, prepare for recovery */ + lpfc_sli_prep_dev_for_recover(phba); + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + /* Fatal error, prepare for slot reset */ + lpfc_sli_prep_dev_for_reset(phba); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent failure, prepare for device down */ + lpfc_sli_prep_dev_for_perm_failure(phba); + return PCI_ERS_RESULT_DISCONNECT; + default: + /* Unknown state, prepare and request slot reset */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0472 Unknown PCI error state: x%x\n", state); + lpfc_sli_prep_dev_for_reset(phba); + return PCI_ERS_RESULT_NEED_RESET; + } +} + +/** + * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. + * @pdev: pointer to PCI device. + * + * This routine is called from the PCI subsystem for error handling to + * device with SLI-3 interface spec. This is called after PCI bus has been + * reset to restart the PCI card from scratch, as if from a cold-boot. + * During the PCI subsystem error recovery, after driver returns + * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error + * recovery and then call this routine before calling the .resume method + * to recover the device. This function will initialize the HBA device, + * enable the interrupt, but it will just put the HBA to offline state + * without passing any I/O traffic. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + */ +static pci_ers_result_t +lpfc_io_slot_reset_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + struct lpfc_sli *psli = &phba->sli; + uint32_t intr_mode; + + dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); + if (pci_enable_device_mem(pdev)) { + printk(KERN_ERR "lpfc: Cannot re-enable " + "PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_restore_state(pdev); + + /* + * As the new kernel behavior of pci_restore_state() API call clears + * device saved_state flag, need to save the restored state again. + */ + pci_save_state(pdev); + + if (pdev->is_busmaster) + pci_set_master(pdev); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0427 Cannot re-enable interrupt after " + "slot reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } else + phba->intr_mode = intr_mode; + + /* Take device offline, it will perform cleanup */ + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. + * @pdev: pointer to PCI device + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-3 interface spec. It is called when kernel error recovery tells + * the lpfc driver that it is ok to resume normal PCI operation after PCI bus + * error recovery. After this call, traffic can start to flow from this device + * again. + */ +static void +lpfc_io_resume_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + /* Bring device online, it will be no-op for non-fatal error resume */ + lpfc_online(phba); +} + +/** + * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve + * @phba: pointer to lpfc hba data structure. + * + * returns the number of ELS/CT IOCBs to reserve + **/ +int +lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) +{ + int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + + if (phba->sli_rev == LPFC_SLI_REV4) { + if (max_xri <= 100) + return 10; + else if (max_xri <= 256) + return 25; + else if (max_xri <= 512) + return 50; + else if (max_xri <= 1024) + return 100; + else if (max_xri <= 1536) + return 150; + else if (max_xri <= 2048) + return 200; + else + return 250; + } else + return 0; +} + +/** + * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve + * @phba: pointer to lpfc hba data structure. + * + * returns the number of ELS/CT + NVMET IOCBs to reserve + **/ +int +lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) +{ + int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); + + if (phba->nvmet_support) + max_xri += LPFC_NVMET_BUF_POST; + return max_xri; +} + + +static int +lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, + uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, + const struct firmware *fw) +{ + int rc; + u8 sli_family; + + sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); + /* Three cases: (1) FW was not supported on the detected adapter. + * (2) FW update has been locked out administratively. + * (3) Some other error during FW update. + * In each case, an unmaskable message is written to the console + * for admin diagnosis. + */ + if (offset == ADD_STATUS_FW_NOT_SUPPORTED || + (sli_family == LPFC_SLI_INTF_FAMILY_G6 && + magic_number != MAGIC_NUMBER_G6) || + (sli_family == LPFC_SLI_INTF_FAMILY_G7 && + magic_number != MAGIC_NUMBER_G7) || + (sli_family == LPFC_SLI_INTF_FAMILY_G7P && + magic_number != MAGIC_NUMBER_G7P)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3030 This firmware version is not supported on" + " this HBA model. Device:%x Magic:%x Type:%x " + "ID:%x Size %d %zd\n", + phba->pcidev->device, magic_number, ftype, fid, + fsize, fw->size); + rc = -EINVAL; + } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3021 Firmware downloads have been prohibited " + "by a system configuration setting on " + "Device:%x Magic:%x Type:%x ID:%x Size %d " + "%zd\n", + phba->pcidev->device, magic_number, ftype, fid, + fsize, fw->size); + rc = -EACCES; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3022 FW Download failed. Add Status x%x " + "Device:%x Magic:%x Type:%x ID:%x Size %d " + "%zd\n", + offset, phba->pcidev->device, magic_number, + ftype, fid, fsize, fw->size); + rc = -EIO; + } + return rc; +} + +/** + * lpfc_write_firmware - attempt to write a firmware image to the port + * @fw: pointer to firmware image returned from request_firmware. + * @context: pointer to firmware image returned from request_firmware. + * + **/ +static void +lpfc_write_firmware(const struct firmware *fw, void *context) +{ + struct lpfc_hba *phba = (struct lpfc_hba *)context; + char fwrev[FW_REV_STR_SIZE]; + struct lpfc_grp_hdr *image; + struct list_head dma_buffer_list; + int i, rc = 0; + struct lpfc_dmabuf *dmabuf, *next; + uint32_t offset = 0, temp_offset = 0; + uint32_t magic_number, ftype, fid, fsize; + + /* It can be null in no-wait mode, sanity check */ + if (!fw) { + rc = -ENXIO; + goto out; + } + image = (struct lpfc_grp_hdr *)fw->data; + + magic_number = be32_to_cpu(image->magic_number); + ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); + fid = bf_get_be32(lpfc_grp_hdr_id, image); + fsize = be32_to_cpu(image->size); + + INIT_LIST_HEAD(&dma_buffer_list); + lpfc_decode_firmware_rev(phba, fwrev, 1); + if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3023 Updating Firmware, Current Version:%s " + "New Version:%s\n", + fwrev, image->revision); + for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), + GFP_KERNEL); + if (!dmabuf) { + rc = -ENOMEM; + goto release_out; + } + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + rc = -ENOMEM; + goto release_out; + } + list_add_tail(&dmabuf->list, &dma_buffer_list); + } + while (offset < fw->size) { + temp_offset = offset; + list_for_each_entry(dmabuf, &dma_buffer_list, list) { + if (temp_offset + SLI4_PAGE_SIZE > fw->size) { + memcpy(dmabuf->virt, + fw->data + temp_offset, + fw->size - temp_offset); + temp_offset = fw->size; + break; + } + memcpy(dmabuf->virt, fw->data + temp_offset, + SLI4_PAGE_SIZE); + temp_offset += SLI4_PAGE_SIZE; + } + rc = lpfc_wr_object(phba, &dma_buffer_list, + (fw->size - offset), &offset); + if (rc) { + rc = lpfc_log_write_firmware_error(phba, offset, + magic_number, + ftype, + fid, + fsize, + fw); + goto release_out; + } + } + rc = offset; + } else + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3029 Skipped Firmware update, Current " + "Version:%s New Version:%s\n", + fwrev, image->revision); + +release_out: + list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { + list_del(&dmabuf->list); + dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + release_firmware(fw); +out: + if (rc < 0) + lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI, + "3062 Firmware update error, status %d.\n", rc); + else + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3024 Firmware update success: size %d.\n", rc); +} + +/** + * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade + * @phba: pointer to lpfc hba data structure. + * @fw_upgrade: which firmware to update. + * + * This routine is called to perform Linux generic firmware upgrade on device + * that supports such feature. + **/ +int +lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) +{ + uint8_t file_name[ELX_MODEL_NAME_SIZE]; + int ret; + const struct firmware *fw; + + /* Only supported on SLI4 interface type 2 for now */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2) + return -EPERM; + + snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); + + if (fw_upgrade == INT_FW_UPGRADE) { + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, + file_name, &phba->pcidev->dev, + GFP_KERNEL, (void *)phba, + lpfc_write_firmware); + } else if (fw_upgrade == RUN_FW_UPGRADE) { + ret = request_firmware(&fw, file_name, &phba->pcidev->dev); + if (!ret) + lpfc_write_firmware(fw, (void *)phba); + } else { + ret = -EINVAL; + } + + return ret; +} + +/** + * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is + * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific + * information of the device and driver to see if the driver state that it + * can support this kind of device. If the match is successful, the driver + * core invokes this routine. If this routine determines it can claim the HBA, + * it does all the initialization that it needs to do to handle the HBA + * properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int +lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_hba *phba; + struct lpfc_vport *vport = NULL; + struct Scsi_Host *shost = NULL; + int error; + uint32_t cfg_mode, intr_mode; + + /* Allocate memory for HBA structure */ + phba = lpfc_hba_alloc(pdev); + if (!phba) + return -ENOMEM; + + INIT_LIST_HEAD(&phba->poll_list); + + /* Perform generic PCI device enabling operation */ + error = lpfc_enable_pci_dev(phba); + if (error) + goto out_free_phba; + + /* Set up SLI API function jump table for PCI-device group-1 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); + if (error) + goto out_disable_pci_dev; + + /* Set up SLI-4 specific device PCI memory space */ + error = lpfc_sli4_pci_mem_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1410 Failed to set up pci memory space.\n"); + goto out_disable_pci_dev; + } + + /* Set up SLI-4 Specific device driver resources */ + error = lpfc_sli4_driver_resource_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1412 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s4; + } + + INIT_LIST_HEAD(&phba->active_rrq_list); + INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); + + /* Set up common device driver resources */ + error = lpfc_setup_driver_resource_phase2(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1414 Failed to set up driver resource.\n"); + goto out_unset_driver_resource_s4; + } + + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + + /* Now, trying to enable interrupt and bring up the device */ + cfg_mode = phba->cfg_use_msi; + + /* Put device to a known state before enabling interrupt */ + phba->pport = NULL; + lpfc_stop_port(phba); + + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); + + /* Init hba_eq_hdl array */ + lpfc_hba_eq_hdl_array_init(phba); + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0426 Failed to enable interrupt.\n"); + error = -ENODEV; + goto out_unset_driver_resource; + } + /* Default to single EQ for non-MSI-X */ + if (phba->intr_type != MSIX) { + phba->cfg_irq_chann = 1; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + if (phba->nvmet_support) + phba->cfg_nvmet_mrq = 1; + } + } + lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); + + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1415 Failed to create scsi host.\n"); + goto out_disable_intr; + } + vport = phba->pport; + shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ + + /* Configure sysfs attributes */ + error = lpfc_alloc_sysfs_attr(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1416 Failed to allocate sysfs attr\n"); + goto out_destroy_shost; + } + + /* Set up SLI-4 HBA */ + if (lpfc_sli4_hba_setup(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1421 Failed to set up hba\n"); + error = -ENODEV; + goto out_free_sysfs_attr; + } + + /* Log the current active interrupt mode */ + phba->intr_mode = intr_mode; + lpfc_log_intr_mode(phba, intr_mode); + + /* Perform post initialization setup */ + lpfc_post_init_setup(phba); + + /* NVME support in FW earlier in the driver load corrects the + * FC4 type making a check for nvme_support unnecessary. + */ + if (phba->nvmet_support == 0) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Create NVME binding with nvme_fc_transport. This + * ensures the vport is initialized. If the localport + * create fails, it should not unload the driver to + * support field issues. + */ + error = lpfc_nvme_create_localport(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6004 NVME registration " + "failed, error x%x\n", + error); + } + } + } + + /* check for firmware upgrade or downgrade */ + if (phba->cfg_request_firmware_upgrade) + lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); + + /* Check if there are static vports to be created. */ + lpfc_create_static_vport(phba); + + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); + + return 0; + +out_free_sysfs_attr: + lpfc_free_sysfs_attr(vport); +out_destroy_shost: + lpfc_destroy_shost(phba); +out_disable_intr: + lpfc_sli4_disable_intr(phba); +out_unset_driver_resource: + lpfc_unset_driver_resource_phase2(phba); +out_unset_driver_resource_s4: + lpfc_sli4_driver_resource_unset(phba); +out_unset_pci_mem_s4: + lpfc_sli4_pci_mem_unset(phba); +out_disable_pci_dev: + lpfc_disable_pci_dev(phba); + if (shost) + scsi_host_put(shost); +out_free_phba: + lpfc_hba_free(phba); + return error; +} + +/** + * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem + * @pdev: pointer to PCI device + * + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is + * removed from PCI bus, it performs all the necessary cleanup for the HBA + * device to be removed from the PCI subsystem properly. + **/ +static void +lpfc_pci_remove_one_s4(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_vport **vports; + struct lpfc_hba *phba = vport->phba; + int i; + + /* Mark the device unloading flag */ + spin_lock_irq(&phba->hbalock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(&phba->hbalock); + if (phba->cgn_i) + lpfc_unreg_congestion_buf(phba); + + lpfc_free_sysfs_attr(vport); + + /* Release all the vports against this physical port */ + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; + fc_vport_terminate(vports[i]->fc_vport); + } + lpfc_destroy_vport_work_array(phba, vports); + + /* Remove FC host with the physical port */ + fc_remove_host(shost); + scsi_remove_host(shost); + + /* Perform ndlp cleanup on the physical port. The nvme and nvmet + * localports are destroyed after to cleanup all transport memory. + */ + lpfc_cleanup(vport); + lpfc_nvmet_destroy_targetport(phba); + lpfc_nvme_destroy_localport(vport); + + /* De-allocate multi-XRI pools */ + if (phba->cfg_xri_rebalancing) + lpfc_destroy_multixri_pools(phba); + + /* + * Bring down the SLI Layer. This step disables all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA FCoE function. + */ + lpfc_debugfs_terminate(vport); + + lpfc_stop_hba_timers(phba); + spin_lock_irq(&phba->port_list_lock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->port_list_lock); + + /* Perform scsi free before driver resource_unset since scsi + * buffers are released to their corresponding pools here. + */ + lpfc_io_free(phba); + lpfc_free_iocb_list(phba); + lpfc_sli4_hba_unset(phba); + + lpfc_unset_driver_resource_phase2(phba); + lpfc_sli4_driver_resource_unset(phba); + + /* Unmap adapter Control and Doorbell registers */ + lpfc_sli4_pci_mem_unset(phba); + + /* Release PCI resources and disable device's PCI function */ + scsi_host_put(shost); + lpfc_disable_pci_dev(phba); + + /* Finally, free the driver's device data structure */ + lpfc_hba_free(phba); + + return; +} + +/** + * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt + * @dev_d: pointer to device + * + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spec. When PM invokes + * this method, it quiesces the device by stopping the driver's worker + * thread for the device, turning off device's interrupt and DMA, and bring + * the device offline. Note that as the driver implements the minimum PM + * requirements to a power-aware driver's PM support for suspend/resume -- all + * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() + * method call will be treated as SUSPEND and the driver will fully + * reinitialize its device during resume() method call, the driver will set + * device to PCI_D3hot state in PCI config space instead of setting it + * according to the @msg provided by the PM. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int __maybe_unused +lpfc_pci_suspend_one_s4(struct device *dev_d) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev_d); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2843 PCI device Power Management suspend.\n"); + + /* Bring down the device */ + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + lpfc_offline(phba); + kthread_stop(phba->worker_thread); + + /* Disable interrupt from device */ + lpfc_sli4_disable_intr(phba); + lpfc_sli4_queue_destroy(phba); + + return 0; +} + +/** + * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt + * @dev_d: pointer to device + * + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spac. When PM invokes + * this method, it restores the device's PCI config space state and fully + * reinitializes the device and brings it online. Note that as the driver + * implements the minimum PM requirements to a power-aware driver's PM for + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver + * will fully reinitialize its device during resume() method call, the device + * will be set to PCI_D0 directly in PCI config space before restoring the + * state. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int __maybe_unused +lpfc_pci_resume_one_s4(struct device *dev_d) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev_d); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint32_t intr_mode; + int error; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0292 PCI device Power Management resume.\n"); + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0293 PM resume failed to start worker " + "thread: error=x%x.\n", error); + return error; + } + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0294 PM resume Failed to enable interrupt\n"); + return -EIO; + } else + phba->intr_mode = intr_mode; + + /* Restart HBA and bring it online */ + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return 0; +} + +/** + * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI4 device for PCI slot recover. It + * aborts all the outstanding SCSI I/Os to the pci device. + **/ +static void +lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2828 PCI channel I/O abort preparing for recovery\n"); + /* + * There may be errored I/Os through HBA, abort all I/Os on txcmplq + * and let the SCSI mid-layer to retry them to recover. + */ + lpfc_sli_abort_fcp_rings(phba); +} + +/** + * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI4 device for PCI slot reset. It + * disables the device interrupt and pci device, and aborts the internal FCP + * pending I/Os. + **/ +static void +lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) +{ + int offline = pci_channel_offline(phba->pcidev); + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2826 PCI channel disable preparing for reset offline" + " %d\n", offline); + + /* Block any management I/Os to the device */ + lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); + + + /* HBA_PCI_ERR was set in io_error_detect */ + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); + /* Flush all driver's outstanding I/Os as we are to reset */ + lpfc_sli_flush_io_rings(phba); + lpfc_offline(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + + lpfc_sli4_queue_destroy(phba); + /* Disable interrupt and pci device */ + lpfc_sli4_disable_intr(phba); + pci_disable_device(phba->pcidev); +} + +/** + * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI4 device for PCI slot permanently + * disabling. It blocks the SCSI transport layer traffic and flushes the FCP + * pending I/Os. + **/ +static void +lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2827 PCI channel permanent disable for failure\n"); + + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + + /* Clean up all driver's outstanding I/Os */ + lpfc_sli_flush_io_rings(phba); +} + +/** + * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. This function is called by the PCI subsystem + * after a PCI bus error affecting this device has been detected. When this + * function is invoked, it will need to stop all the I/Os and interrupt(s) + * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET + * for the PCI subsystem to perform proper recovery as desired. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + bool hba_pci_err; + + switch (state) { + case pci_channel_io_normal: + /* Non-fatal error, prepare for recovery */ + lpfc_sli4_prep_dev_for_recover(phba); + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); + /* Fatal error, prepare for slot reset */ + if (!hba_pci_err) + lpfc_sli4_prep_dev_for_reset(phba); + else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2832 Already handling PCI error " + "state: x%x\n", state); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + set_bit(HBA_PCI_ERR, &phba->bit_flags); + /* Permanent failure, prepare for device down */ + lpfc_sli4_prep_dev_for_perm_failure(phba); + return PCI_ERS_RESULT_DISCONNECT; + default: + hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); + if (!hba_pci_err) + lpfc_sli4_prep_dev_for_reset(phba); + /* Unknown state, prepare and request slot reset */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2825 Unknown PCI error state: x%x\n", state); + lpfc_sli4_prep_dev_for_reset(phba); + return PCI_ERS_RESULT_NEED_RESET; + } +} + +/** + * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch + * @pdev: pointer to PCI device. + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. It is called after PCI bus has been reset to + * restart the PCI card from scratch, as if from a cold-boot. During the + * PCI subsystem error recovery, after the driver returns + * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error + * recovery and then call this routine before calling the .resume method to + * recover the device. This function will initialize the HBA device, enable + * the interrupt, but it will just put the HBA to offline state without + * passing any I/O traffic. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + */ +static pci_ers_result_t +lpfc_io_slot_reset_s4(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + struct lpfc_sli *psli = &phba->sli; + uint32_t intr_mode; + bool hba_pci_err; + + dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); + if (pci_enable_device_mem(pdev)) { + printk(KERN_ERR "lpfc: Cannot re-enable " + "PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_restore_state(pdev); + + hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); + if (!hba_pci_err) + dev_info(&pdev->dev, + "hba_pci_err was not set, recovering slot reset.\n"); + /* + * As the new kernel behavior of pci_restore_state() API call clears + * device saved_state flag, need to save the restored state again. + */ + pci_save_state(pdev); + + if (pdev->is_busmaster) + pci_set_master(pdev); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2824 Cannot re-enable interrupt after " + "slot reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } else + phba->intr_mode = intr_mode; + lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device + * @pdev: pointer to PCI device + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. It is called when kernel error recovery tells + * the lpfc driver that it is ok to resume normal PCI operation after PCI bus + * error recovery. After this call, traffic can start to flow from this device + * again. + **/ +static void +lpfc_io_resume_s4(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + /* + * In case of slot reset, as function reset is performed through + * mailbox command which needs DMA to be enabled, this operation + * has to be moved to the io resume phase. Taking device offline + * will perform the necessary cleanup. + */ + if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { + /* Perform device reset */ + lpfc_sli_brdrestart(phba); + /* Bring the device back online */ + lpfc_online(phba); + } +} + +/** + * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks + * at PCI device-specific information of the device and driver to see if the + * driver state that it can support this kind of device. If the match is + * successful, the driver core invokes this routine. This routine dispatches + * the action to the proper SLI-3 or SLI-4 device probing routine, which will + * do all the initialization that it needs to do to handle the HBA device + * properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int +lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + int rc; + struct lpfc_sli_intf intf; + + if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) + return -ENODEV; + + if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && + (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) + rc = lpfc_pci_probe_one_s4(pdev, pid); + else + rc = lpfc_pci_probe_one_s3(pdev, pid); + + return rc; +} + +/** + * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem + * @pdev: pointer to PCI device + * + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA is removed from PCI bus, the driver core invokes this routine. + * This routine dispatches the action to the proper SLI-3 or SLI-4 device + * remove routine, which will perform all the necessary cleanup for the + * device to be removed from the PCI subsystem properly. + **/ +static void +lpfc_pci_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + lpfc_pci_remove_one_s3(pdev); + break; + case LPFC_PCI_DEV_OC: + lpfc_pci_remove_one_s4(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1424 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return; +} + +/** + * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management + * @dev: pointer to device + * + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it dispatches + * the action to the proper SLI-3 or SLI-4 device suspend routine, which will + * suspend the device. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int __maybe_unused +lpfc_pci_suspend_one(struct device *dev) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int rc = -ENODEV; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_pci_suspend_one_s3(dev); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_suspend_one_s4(dev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1425 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management + * @dev: pointer to device + * + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it dispatches + * the action to the proper SLI-3 or SLI-4 device resume routine, which will + * resume the device. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int __maybe_unused +lpfc_pci_resume_one(struct device *dev) +{ + struct Scsi_Host *shost = dev_get_drvdata(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int rc = -ENODEV; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_pci_resume_one_s3(dev); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_resume_one_s4(dev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1426 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_error_detected - lpfc method for handling PCI I/O error + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is registered to the PCI subsystem for error handling. This + * function is called by the PCI subsystem after a PCI bus error affecting + * this device has been detected. When this routine is invoked, it dispatches + * the action to the proper SLI-3 or SLI-4 device error detected handling + * routine, which will perform the proper error detected operation. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + if (phba->link_state == LPFC_HBA_ERROR && + phba->hba_flag & HBA_IOQ_FLUSH) + return PCI_ERS_RESULT_NEED_RESET; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_io_error_detected_s3(pdev, state); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_error_detected_s4(pdev, state); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1427 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch + * @pdev: pointer to PCI device. + * + * This routine is registered to the PCI subsystem for error handling. This + * function is called after PCI bus has been reset to restart the PCI card + * from scratch, as if from a cold-boot. When this routine is invoked, it + * dispatches the action to the proper SLI-3 or SLI-4 device reset handling + * routine, which will perform the proper device reset. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_io_slot_reset_s3(pdev); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_slot_reset_s4(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1428 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_resume - lpfc method for resuming PCI I/O operation + * @pdev: pointer to PCI device + * + * This routine is registered to the PCI subsystem for error handling. It + * is called when kernel error recovery tells the lpfc driver that it is + * OK to resume normal PCI operation after PCI bus error recovery. When + * this routine is invoked, it dispatches the action to the proper SLI-3 + * or SLI-4 device io_resume routine, which will resume the device operation. + **/ +static void +lpfc_io_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + lpfc_io_resume_s3(pdev); + break; + case LPFC_PCI_DEV_OC: + lpfc_io_resume_s4(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1429 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return; +} + +/** + * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter + * @phba: pointer to lpfc hba data structure. + * + * This routine checks to see if OAS is supported for this adapter. If + * supported, the configure Flash Optimized Fabric flag is set. Otherwise, + * the enable oas flag is cleared and the pool created for OAS device data + * is destroyed. + * + **/ +static void +lpfc_sli4_oas_verify(struct lpfc_hba *phba) +{ + + if (!phba->cfg_EnableXLane) + return; + + if (phba->sli4_hba.pc_sli4_params.oas_supported) { + phba->cfg_fof = 1; + } else { + phba->cfg_fof = 0; + mempool_destroy(phba->device_data_mem_pool); + phba->device_data_mem_pool = NULL; + } + + return; +} + +/** + * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter + * @phba: pointer to lpfc hba data structure. + * + * This routine checks to see if RAS is supported by the adapter. Check the + * function through which RAS support enablement is to be done. + **/ +void +lpfc_sli4_ras_init(struct lpfc_hba *phba) +{ + /* if ASIC_GEN_NUM >= 0xC) */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6) || + (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_G6)) { + phba->ras_fwlog.ras_hwsupport = true; + if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && + phba->cfg_ras_fwlog_buffsize) + phba->ras_fwlog.ras_enabled = true; + else + phba->ras_fwlog.ras_enabled = false; + } else { + phba->ras_fwlog.ras_hwsupport = false; + } +} + + +MODULE_DEVICE_TABLE(pci, lpfc_id_table); + +static const struct pci_error_handlers lpfc_err_handler = { + .error_detected = lpfc_io_error_detected, + .slot_reset = lpfc_io_slot_reset, + .resume = lpfc_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, + lpfc_pci_suspend_one, + lpfc_pci_resume_one); + +static struct pci_driver lpfc_driver = { + .name = LPFC_DRIVER_NAME, + .id_table = lpfc_id_table, + .probe = lpfc_pci_probe_one, + .remove = lpfc_pci_remove_one, + .shutdown = lpfc_pci_remove_one, + .driver.pm = &lpfc_pci_pm_ops_one, + .err_handler = &lpfc_err_handler, +}; + +static const struct file_operations lpfc_mgmt_fop = { + .owner = THIS_MODULE, +}; + +static struct miscdevice lpfc_mgmt_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lpfcmgmt", + .fops = &lpfc_mgmt_fop, +}; + +/** + * lpfc_init - lpfc module initialization routine + * + * This routine is to be invoked when the lpfc module is loaded into the + * kernel. The special kernel macro module_init() is used to indicate the + * role of this routine to the kernel as lpfc module entry point. + * + * Return codes + * 0 - successful + * -ENOMEM - FC attach transport failed + * all others - failed + */ +static int __init +lpfc_init(void) +{ + int error = 0; + + pr_info(LPFC_MODULE_DESC "\n"); + pr_info(LPFC_COPYRIGHT "\n"); + + error = misc_register(&lpfc_mgmt_dev); + if (error) + printk(KERN_ERR "Could not register lpfcmgmt device, " + "misc_register returned with status %d", error); + + error = -ENOMEM; + lpfc_transport_functions.vport_create = lpfc_vport_create; + lpfc_transport_functions.vport_delete = lpfc_vport_delete; + lpfc_transport_template = + fc_attach_transport(&lpfc_transport_functions); + if (lpfc_transport_template == NULL) + goto unregister; + lpfc_vport_transport_template = + fc_attach_transport(&lpfc_vport_transport_functions); + if (lpfc_vport_transport_template == NULL) { + fc_release_transport(lpfc_transport_template); + goto unregister; + } + lpfc_wqe_cmd_template(); + lpfc_nvmet_cmd_template(); + + /* Initialize in case vector mapping is needed */ + lpfc_present_cpu = num_present_cpus(); + + lpfc_pldv_detect = false; + + error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "lpfc/sli4:online", + lpfc_cpu_online, lpfc_cpu_offline); + if (error < 0) + goto cpuhp_failure; + lpfc_cpuhp_state = error; + + error = pci_register_driver(&lpfc_driver); + if (error) + goto unwind; + + return error; + +unwind: + cpuhp_remove_multi_state(lpfc_cpuhp_state); +cpuhp_failure: + fc_release_transport(lpfc_transport_template); + fc_release_transport(lpfc_vport_transport_template); +unregister: + misc_deregister(&lpfc_mgmt_dev); + + return error; +} + +void lpfc_dmp_dbg(struct lpfc_hba *phba) +{ + unsigned int start_idx; + unsigned int dbg_cnt; + unsigned int temp_idx; + int i; + int j = 0; + unsigned long rem_nsec; + + if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) + return; + + start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; + dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); + if (!dbg_cnt) + goto out; + temp_idx = start_idx; + if (dbg_cnt >= DBG_LOG_SZ) { + dbg_cnt = DBG_LOG_SZ; + temp_idx -= 1; + } else { + if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { + temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; + } else { + if (start_idx < dbg_cnt) + start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); + else + start_idx -= dbg_cnt; + } + } + dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", + start_idx, temp_idx, dbg_cnt); + + for (i = 0; i < dbg_cnt; i++) { + if ((start_idx + i) < DBG_LOG_SZ) + temp_idx = (start_idx + i) % DBG_LOG_SZ; + else + temp_idx = j++; + rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); + dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", + temp_idx, + (unsigned long)phba->dbg_log[temp_idx].t_ns, + rem_nsec / 1000, + phba->dbg_log[temp_idx].log); + } +out: + atomic_set(&phba->dbg_log_cnt, 0); + atomic_set(&phba->dbg_log_dmping, 0); +} + +__printf(2, 3) +void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) +{ + unsigned int idx; + va_list args; + int dbg_dmping = atomic_read(&phba->dbg_log_dmping); + struct va_format vaf; + + + va_start(args, fmt); + if (unlikely(dbg_dmping)) { + vaf.fmt = fmt; + vaf.va = &args; + dev_info(&phba->pcidev->dev, "%pV", &vaf); + va_end(args); + return; + } + idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % + DBG_LOG_SZ; + + atomic_inc(&phba->dbg_log_cnt); + + vscnprintf(phba->dbg_log[idx].log, + sizeof(phba->dbg_log[idx].log), fmt, args); + va_end(args); + + phba->dbg_log[idx].t_ns = local_clock(); +} + +/** + * lpfc_exit - lpfc module removal routine + * + * This routine is invoked when the lpfc module is removed from the kernel. + * The special kernel macro module_exit() is used to indicate the role of + * this routine to the kernel as lpfc module exit point. + */ +static void __exit +lpfc_exit(void) +{ + misc_deregister(&lpfc_mgmt_dev); + pci_unregister_driver(&lpfc_driver); + cpuhp_remove_multi_state(lpfc_cpuhp_state); + fc_release_transport(lpfc_transport_template); + fc_release_transport(lpfc_vport_transport_template); + idr_destroy(&lpfc_hba_index); +} + +module_init(lpfc_init); +module_exit(lpfc_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(LPFC_MODULE_DESC); +MODULE_AUTHOR("Broadcom"); +MODULE_VERSION("0:" LPFC_DRIVER_VERSION); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h new file mode 100644 index 000000000..f896ec610 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -0,0 +1,99 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LOG_ELS 0x00000001 /* ELS events */ +#define LOG_DISCOVERY 0x00000002 /* Link discovery events */ +#define LOG_MBOX 0x00000004 /* Mailbox events */ +#define LOG_INIT 0x00000008 /* Initialization events */ +#define LOG_LINK_EVENT 0x00000010 /* Link events */ +#define LOG_IP 0x00000020 /* IP traffic history */ +#define LOG_FCP 0x00000040 /* FCP traffic history */ +#define LOG_NODE 0x00000080 /* Node table events */ +#define LOG_TEMP 0x00000100 /* Temperature sensor events */ +#define LOG_BG 0x00000200 /* BlockGuard events */ +#define LOG_MISC 0x00000400 /* Miscellaneous events */ +#define LOG_SLI 0x00000800 /* SLI events */ +#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */ +#define LOG_LIBDFC 0x00002000 /* Libdfc events */ +#define LOG_VPORT 0x00004000 /* NPIV events */ +#define LOG_LDS_EVENT 0x00008000 /* Link Degrade Signaling events */ +#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ +#define LOG_FIP 0x00020000 /* FIP events */ +#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ +#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ +#define LOG_NVME 0x00100000 /* NVME general events. */ +#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */ +#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */ +#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ +#define LOG_RSVD1 0x01000000 /* Reserved */ +#define LOG_RSVD2 0x02000000 /* Reserved */ +#define LOG_CGN_MGMT 0x04000000 /* Congestion Mgmt events */ +#define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */ +#define LOG_ALL_MSG 0x7fffffff /* LOG all messages */ + +void lpfc_dmp_dbg(struct lpfc_hba *phba); +void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...); + +/* generate message by verbose log setting or severity */ +#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \ +{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '5')) \ + dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } + +#define lpfc_log_msg(phba, level, mask, fmt, arg...) \ +do { \ + { uint32_t log_verbose = (phba)->pport ? \ + (phba)->pport->cfg_log_verbose : \ + (phba)->cfg_log_verbose; \ + if (((mask) & log_verbose) || (level[1] <= '5')) \ + dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ + fmt, phba->brd_no, ##arg); \ + } \ +} while (0) + +#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ +do { \ + { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \ + if ((mask) & LOG_TRACE_EVENT && !(vport)->cfg_log_verbose) \ + lpfc_dmp_dbg((vport)->phba); \ + dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \ + } else if (!(vport)->cfg_log_verbose) \ + lpfc_dbg_print((vport)->phba, "%d:(%d):" fmt, \ + (vport)->phba->brd_no, (vport)->vpi, ##arg); \ + } \ +} while (0) + +#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ +do { \ + { uint32_t log_verbose = (phba)->pport ? \ + (phba)->pport->cfg_log_verbose : \ + (phba)->cfg_log_verbose; \ + if (((mask) & log_verbose) || (level[1] <= '3')) { \ + if ((mask) & LOG_TRACE_EVENT && !log_verbose) \ + lpfc_dmp_dbg(phba); \ + dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ + fmt, phba->brd_no, ##arg); \ + } else if (!log_verbose)\ + lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \ + } \ +} while (0) diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c new file mode 100644 index 000000000..0dfdc0c4c --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -0,0 +1,2671 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_compat.h" + +/** + * lpfc_mbox_rsrc_prep - Prepare a mailbox with DMA buffer memory. + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to the driver internal queue element for mailbox command. + * + * A mailbox command consists of the pool memory for the command, @mbox, and + * one or more DMA buffers for the data transfer. This routine provides + * a standard framework for allocating the dma buffer and assigning to the + * @mbox. Callers should cleanup the mbox with a call to + * lpfc_mbox_rsrc_cleanup. + * + * The lpfc_mbuf_alloc routine acquires the hbalock so the caller is + * responsible to ensure the hbalock is released. Also note that the + * driver design is a single dmabuf/mbuf per mbox in the ctx_buf. + * + **/ +int +lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + struct lpfc_dmabuf *mp; + + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (!mp) + return -ENOMEM; + + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp->virt) { + kfree(mp); + return -ENOMEM; + } + + memset(mp->virt, 0, LPFC_BPL_SIZE); + + /* Initialization only. Driver does not use a list of dmabufs. */ + INIT_LIST_HEAD(&mp->list); + mbox->ctx_buf = mp; + return 0; +} + +/** + * lpfc_mbox_rsrc_cleanup - Free the mailbox DMA buffer and virtual memory. + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to the driver internal queue element for mailbox command. + * @locked: value that indicates if the hbalock is held (1) or not (0). + * + * A mailbox command consists of the pool memory for the command, @mbox, and + * possibly a DMA buffer for the data transfer. This routine provides + * a standard framework for releasing any dma buffers and freeing all + * memory resources in it as well as releasing the @mbox back to the @phba pool. + * Callers should use this routine for cleanup for all mailboxes prepped with + * lpfc_mbox_rsrc_prep. + * + **/ +void +lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, + enum lpfc_mbox_ctx locked) +{ + struct lpfc_dmabuf *mp; + + mp = (struct lpfc_dmabuf *)mbox->ctx_buf; + mbox->ctx_buf = NULL; + + /* Release the generic BPL buffer memory. */ + if (mp) { + if (locked == MBOX_THD_LOCKED) + __lpfc_mbuf_free(phba, mp->virt, mp->phys); + else + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + + mempool_free(mbox, phba->mbox_mem_pool); +} + +/** + * lpfc_dump_static_vport - Dump HBA's static vport information. + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @offset: offset for dumping vport info. + * + * The dump mailbox command provides a method for the device driver to obtain + * various types of information from the HBA device. + * + * This routine prepares the mailbox command for dumping list of static + * vports to be created. + **/ +int +lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, + uint16_t offset) +{ + MAILBOX_t *mb; + struct lpfc_dmabuf *mp; + int rc; + + mb = &pmb->u.mb; + + /* Setup to dump vport info region */ + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.entry_index = offset; + mb->un.varDmp.region_id = DMP_REGION_VPORT; + mb->mbxOwner = OWN_HOST; + + /* For SLI3 HBAs data is embedded in mailbox */ + if (phba->sli_rev != LPFC_SLI_REV4) { + mb->un.varDmp.cv = 1; + mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); + return 0; + } + + rc = lpfc_mbox_rsrc_prep(phba, pmb); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2605 %s: memory allocation failed\n", + __func__); + return 1; + } + + mp = pmb->ctx_buf; + mb->un.varWords[3] = putPaddrLow(mp->phys); + mb->un.varWords[4] = putPaddrHigh(mp->phys); + mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); + + return 0; +} + +/** + * lpfc_down_link - Bring down HBAs link. + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * This routine prepares a mailbox command to bring down HBA link. + **/ +void +lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb; + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb = &pmb->u.mb; + mb->mbxCommand = MBX_DOWN_LINK; + mb->mbxOwner = OWN_HOST; +} + +/** + * lpfc_dump_mem - Prepare a mailbox command for reading a region. + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @offset: offset into the region. + * @region_id: config region id. + * + * The dump mailbox command provides a method for the device driver to obtain + * various types of information from the HBA device. + * + * This routine prepares the mailbox command for dumping HBA's config region. + **/ +void +lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset, + uint16_t region_id) +{ + MAILBOX_t *mb; + void *ctx; + + mb = &pmb->u.mb; + ctx = pmb->ctx_buf; + + /* Setup to dump VPD region */ + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.cv = 1; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.entry_index = offset; + mb->un.varDmp.region_id = region_id; + mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); + mb->un.varDmp.co = 0; + mb->un.varDmp.resp_offset = 0; + pmb->ctx_buf = ctx; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * This function create a dump memory mailbox command to dump wake up + * parameters. + */ +void +lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb; + void *ctx; + + mb = &pmb->u.mb; + /* Save context so that we can restore after memset */ + ctx = pmb->ctx_buf; + + /* Setup to dump VPD region */ + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->mbxOwner = OWN_HOST; + mb->un.varDmp.cv = 1; + mb->un.varDmp.type = DMP_NV_PARAMS; + if (phba->sli_rev < LPFC_SLI_REV4) + mb->un.varDmp.entry_index = 0; + mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; + mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; + mb->un.varDmp.co = 0; + mb->un.varDmp.resp_offset = 0; + pmb->ctx_buf = ctx; + return; +} + +/** + * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The read NVRAM mailbox command returns the HBA's non-volatile parameters + * that are used as defaults when the Fibre Channel link is brought on-line. + * + * This routine prepares the mailbox command for reading information stored + * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN. + **/ +void +lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_READ_NV; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_config_async - Prepare a mailbox command for enabling HBA async event + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @ring: ring number for the asynchronous event to be configured. + * + * The asynchronous event enable mailbox command is used to enable the + * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and + * specifies the default ring to which events are posted. + * + * This routine prepares the mailbox command for enabling HBA asynchronous + * event support on a IOCB ring. + **/ +void +lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, + uint32_t ring) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_ASYNCEVT_ENABLE; + mb->un.varCfgAsyncEvent.ring = ring; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_heart_beat - Prepare a mailbox command for heart beat + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The heart beat mailbox command is used to detect an unresponsive HBA, which + * is defined as any device where no error attention is sent and both mailbox + * and rings are not processed. + * + * This routine prepares the mailbox command for issuing a heart beat in the + * form of mailbox command to the HBA. The timely completion of the heart + * beat mailbox command indicates the health of the HBA. + **/ +void +lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_HEARTBEAT; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_read_topology - Prepare a mailbox command for reading HBA topology + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @mp: DMA buffer memory for reading the link attention information into. + * + * The read topology mailbox command is issued to read the link topology + * information indicated by the HBA port when the Link Event bit of the Host + * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link + * Attention ACQE is received from the port (For SLI-4). A Link Event + * Attention occurs based on an exception detected at the Fibre Channel link + * interface. + * + * This routine prepares the mailbox command for reading HBA link topology + * information. A DMA memory has been set aside and address passed to the + * HBA through @mp for the HBA to DMA link attention information into the + * memory as part of the execution of the mailbox command. + * + * Return codes + * 0 - Success (currently always return 0) + **/ +int +lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, + struct lpfc_dmabuf *mp) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + INIT_LIST_HEAD(&mp->list); + mb->mbxCommand = MBX_READ_TOPOLOGY; + mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE; + mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys); + mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys); + + /* Save address for later completion and set the owner to host so that + * the FW knows this mailbox is available for processing. + */ + pmb->ctx_buf = (uint8_t *)mp; + mb->mbxOwner = OWN_HOST; + return (0); +} + +/** + * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The clear link attention mailbox command is issued to clear the link event + * attention condition indicated by the Link Event bit of the Host Attention + * (HSTATT) register. The link event attention condition is cleared only if + * the event tag specified matches that of the current link event counter. + * The current event tag is read using the read link attention event mailbox + * command. + * + * This routine prepares the mailbox command for clearing HBA link attention + * information. + **/ +void +lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->un.varClearLA.eventTag = phba->fc_eventTag; + mb->mbxCommand = MBX_CLEAR_LA; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The configure link mailbox command is used before the initialize link + * mailbox command to override default value and to configure link-oriented + * parameters such as DID address and various timers. Typically, this + * command would be used after an F_Port login to set the returned DID address + * and the fabric timeout values. This command is not valid before a configure + * port command has configured the HBA port. + * + * This routine prepares the mailbox command for configuring link on a HBA. + **/ +void +lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + struct lpfc_vport *vport = phba->pport; + MAILBOX_t *mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + /* NEW_FEATURE + * SLI-2, Coalescing Response Feature. + */ + if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) { + mb->un.varCfgLnk.cr = 1; + mb->un.varCfgLnk.ci = 1; + mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; + mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; + } + + mb->un.varCfgLnk.myId = vport->fc_myDID; + mb->un.varCfgLnk.edtov = phba->fc_edtov; + mb->un.varCfgLnk.arbtov = phba->fc_arbtov; + mb->un.varCfgLnk.ratov = phba->fc_ratov; + mb->un.varCfgLnk.rttov = phba->fc_rttov; + mb->un.varCfgLnk.altov = phba->fc_altov; + mb->un.varCfgLnk.crtov = phba->fc_crtov; + mb->un.varCfgLnk.cscn = 0; + if (phba->bbcredit_support && phba->cfg_enable_bbcr) { + mb->un.varCfgLnk.cscn = 1; + mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def, + &phba->sli4_hba.bbscn_params); + } + + if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4)) + mb->un.varCfgLnk.ack0_enable = 1; + + mb->mbxCommand = MBX_CONFIG_LINK; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_config_msi - Prepare a mailbox command for configuring msi-x + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The configure MSI-X mailbox command is used to configure the HBA's SLI-3 + * MSI-X multi-message interrupt vector association to interrupt attention + * conditions. + * + * Return codes + * 0 - Success + * -EINVAL - Failure + **/ +int +lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + uint32_t attentionConditions[2]; + + /* Sanity check */ + if (phba->cfg_use_msi != 2) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0475 Not configured for supporting MSI-X " + "cfg_use_msi: 0x%x\n", phba->cfg_use_msi); + return -EINVAL; + } + + if (phba->sli_rev < 3) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0476 HBA not supporting SLI-3 or later " + "SLI Revision: 0x%x\n", phba->sli_rev); + return -EINVAL; + } + + /* Clear mailbox command fields */ + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + + /* + * SLI-3, Message Signaled Interrupt Feature. + */ + + /* Multi-message attention configuration */ + attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT | + HA_LATT | HA_MBATT); + attentionConditions[1] = 0; + + mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0]; + mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1]; + + /* + * Set up message number to HA bit association + */ +#ifdef __BIG_ENDIAN_BITFIELD + /* RA0 (FCP Ring) */ + mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1; + /* RA1 (Other Protocol Extra Ring) */ + mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + /* RA0 (FCP Ring) */ + mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1; + /* RA1 (Other Protocol Extra Ring) */ + mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1; +#endif + /* Multi-message interrupt autoclear configuration*/ + mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0]; + mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1]; + + /* For now, HBA autoclear does not work reliably, disable it */ + mb->un.varCfgMSI.autoClearHA[0] = 0; + mb->un.varCfgMSI.autoClearHA[1] = 0; + + /* Set command and owner bit */ + mb->mbxCommand = MBX_CONFIG_MSI; + mb->mbxOwner = OWN_HOST; + + return 0; +} + +/** + * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @topology: the link topology for the link to be initialized to. + * @linkspeed: the link speed for the link to be initialized to. + * + * The initialize link mailbox command is used to initialize the Fibre + * Channel link. This command must follow a configure port command that + * establishes the mode of operation. + * + * This routine prepares the mailbox command for initializing link on a HBA + * with the specified link topology and speed. + **/ +void +lpfc_init_link(struct lpfc_hba * phba, + LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) +{ + lpfc_vpd_t *vpd; + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + switch (topology) { + case FLAGS_TOPOLOGY_MODE_LOOP_PT: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; + mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; + break; + case FLAGS_TOPOLOGY_MODE_PT_PT: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; + break; + case FLAGS_TOPOLOGY_MODE_LOOP: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; + break; + case FLAGS_TOPOLOGY_MODE_PT_LOOP: + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; + mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; + break; + case FLAGS_LOCAL_LB: + mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB; + break; + } + + /* Topology handling for ASIC_GEN_NUM 0xC and later */ + if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 || + phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) && + !(phba->sli4_hba.pc_sli4_params.pls) && + mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { + mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; + phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; + } + + /* Enable asynchronous ABTS responses from firmware */ + if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp) + mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; + + /* NEW_FEATURE + * Setting up the link speed + */ + vpd = &phba->vpd; + if (vpd->rev.feaLevelHigh >= 0x02){ + switch(linkspeed){ + case LPFC_USER_LINK_SPEED_1G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_1G; + break; + case LPFC_USER_LINK_SPEED_2G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_2G; + break; + case LPFC_USER_LINK_SPEED_4G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_4G; + break; + case LPFC_USER_LINK_SPEED_8G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_8G; + break; + case LPFC_USER_LINK_SPEED_10G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_10G; + break; + case LPFC_USER_LINK_SPEED_16G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_16G; + break; + case LPFC_USER_LINK_SPEED_32G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_32G; + break; + case LPFC_USER_LINK_SPEED_64G: + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; + mb->un.varInitLnk.link_speed = LINK_SPEED_64G; + break; + case LPFC_USER_LINK_SPEED_AUTO: + default: + mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; + break; + } + + } + else + mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; + + mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK; + mb->mbxOwner = OWN_HOST; + mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA; + return; +} + +/** + * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @vpi: virtual N_Port identifier. + * + * The read service parameter mailbox command is used to read the HBA port + * service parameters. The service parameters are read into the buffer + * specified directly by a BDE in the mailbox command. These service + * parameters may then be used to build the payload of an N_Port/F_POrt + * login request and reply (LOGI/ACC). + * + * This routine prepares the mailbox command for reading HBA port service + * parameters. The DMA memory is allocated in this function and the addresses + * are populated into the mailbox command for the HBA to DMA the service + * parameters into. + * + * Return codes + * 0 - Success + * 1 - DMA memory allocation failed + **/ +int +lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) +{ + struct lpfc_dmabuf *mp; + MAILBOX_t *mb; + int rc; + + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + /* Get a buffer to hold the HBAs Service Parameters */ + rc = lpfc_mbox_rsrc_prep(phba, pmb); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "0301 READ_SPARAM: no buffers\n"); + return 1; + } + + mp = pmb->ctx_buf; + mb = &pmb->u.mb; + mb->mbxOwner = OWN_HOST; + mb->mbxCommand = MBX_READ_SPARM64; + mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); + mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); + mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); + if (phba->sli_rev >= LPFC_SLI_REV3) + mb->un.varRdSparm.vpi = phba->vpi_ids[vpi]; + + return (0); +} + +/** + * lpfc_unreg_did - Prepare a mailbox command for unregistering DID + * @phba: pointer to lpfc hba data structure. + * @vpi: virtual N_Port identifier. + * @did: remote port identifier. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The unregister DID mailbox command is used to unregister an N_Port/F_Port + * login for an unknown RPI by specifying the DID of a remote port. This + * command frees an RPI context in the HBA port. This has the effect of + * performing an implicit N_Port/F_Port logout. + * + * This routine prepares the mailbox command for unregistering a remote + * N_Port/F_Port (DID) login. + **/ +void +lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, + LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->un.varUnregDID.did = did; + mb->un.varUnregDID.vpi = vpi; + if ((vpi != 0xffff) && + (phba->sli_rev == LPFC_SLI_REV4)) + mb->un.varUnregDID.vpi = phba->vpi_ids[vpi]; + + mb->mbxCommand = MBX_UNREG_D_ID; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_read_config - Prepare a mailbox command for reading HBA configuration + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The read configuration mailbox command is used to read the HBA port + * configuration parameters. This mailbox command provides a method for + * seeing any parameters that may have changed via various configuration + * mailbox commands. + * + * This routine prepares the mailbox command for reading out HBA configuration + * parameters. + **/ +void +lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->mbxCommand = MBX_READ_CONFIG; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The read link status mailbox command is used to read the link status from + * the HBA. Link status includes all link-related error counters. These + * counters are maintained by the HBA and originated in the link hardware + * unit. Note that all of these counters wrap. + * + * This routine prepares the mailbox command for reading out HBA link status. + **/ +void +lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->mbxCommand = MBX_READ_LNK_STAT; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_reg_rpi - Prepare a mailbox command for registering remote login + * @phba: pointer to lpfc hba data structure. + * @vpi: virtual N_Port identifier. + * @did: remote port identifier. + * @param: pointer to memory holding the server parameters. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @rpi: the rpi to use in the registration (usually only used for SLI4. + * + * The registration login mailbox command is used to register an N_Port or + * F_Port login. This registration allows the HBA to cache the remote N_Port + * service parameters internally and thereby make the appropriate FC-2 + * decisions. The remote port service parameters are handed off by the driver + * to the HBA using a descriptor entry that directly identifies a buffer in + * host memory. In exchange, the HBA returns an RPI identifier. + * + * This routine prepares the mailbox command for registering remote port login. + * The function allocates DMA buffer for passing the service parameters to the + * HBA with the mailbox command. + * + * Return codes + * 0 - Success + * 1 - DMA memory allocation failed + **/ +int +lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, + uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi) +{ + MAILBOX_t *mb = &pmb->u.mb; + uint8_t *sparam; + struct lpfc_dmabuf *mp; + int rc; + + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->un.varRegLogin.rpi = 0; + if (phba->sli_rev == LPFC_SLI_REV4) + mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi]; + if (phba->sli_rev >= LPFC_SLI_REV3) + mb->un.varRegLogin.vpi = phba->vpi_ids[vpi]; + mb->un.varRegLogin.did = did; + mb->mbxOwner = OWN_HOST; + + /* Get a buffer to hold NPorts Service Parameters */ + rc = lpfc_mbox_rsrc_prep(phba, pmb); + if (rc) { + mb->mbxCommand = MBX_REG_LOGIN64; + /* REG_LOGIN: no buffers */ + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " + "rpi x%x\n", vpi, did, rpi); + return 1; + } + + /* Copy param's into a new buffer */ + mp = pmb->ctx_buf; + sparam = mp->virt; + memcpy(sparam, param, sizeof (struct serv_parm)); + + /* Finish initializing the mailbox. */ + mb->mbxCommand = MBX_REG_LOGIN64; + mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); + mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); + mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); + + return 0; +} + +/** + * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login + * @phba: pointer to lpfc hba data structure. + * @vpi: virtual N_Port identifier. + * @rpi: remote port identifier + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The unregistration login mailbox command is used to unregister an N_Port + * or F_Port login. This command frees an RPI context in the HBA. It has the + * effect of performing an implicit N_Port/F_Port logout. + * + * This routine prepares the mailbox command for unregistering remote port + * login. + * + * For SLI4 ports, the rpi passed to this function must be the physical + * rpi value, not the logical index. + **/ +void +lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, + LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->un.varUnregLogin.rpi = rpi; + mb->un.varUnregLogin.rsvd1 = 0; + if (phba->sli_rev >= LPFC_SLI_REV3) + mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi]; + + mb->mbxCommand = MBX_UNREG_LOGIN; + mb->mbxOwner = OWN_HOST; + + return; +} + +/** + * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. + * @vport: pointer to a vport object. + * + * This routine sends mailbox command to unregister all active RPIs for + * a vport. + **/ +void +lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + /* + * For SLI4 functions, the rpi field is overloaded for + * the vport context unreg all. This routine passes + * 0 for the rpi field in lpfc_unreg_login for compatibility + * with SLI3 and then overrides the rpi field with the + * expected value for SLI4. + */ + lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi], + mbox); + mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000; + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_ndlp = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + mempool_free(mbox, phba->mbox_mem_pool); + } +} + +/** + * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier + * @vport: pointer to a vport object. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The registration vport identifier mailbox command is used to activate a + * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the + * N_Port_ID against the information in the selected virtual N_Port context + * block and marks it active to allow normal processing of IOCB commands and + * received unsolicited exchanges. + * + * This routine prepares the mailbox command for registering a virtual N_Port. + **/ +void +lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_hba *phba = vport->phba; + + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + /* + * Set the re-reg VPI bit for f/w to update the MAC address. + */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) + mb->un.varRegVpi.upd = 1; + + mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi]; + mb->un.varRegVpi.sid = vport->fc_myDID; + if (phba->sli_rev == LPFC_SLI_REV4) + mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi]; + else + mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; + memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, + sizeof(struct lpfc_name)); + mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); + mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]); + + mb->mbxCommand = MBX_REG_VPI; + mb->mbxOwner = OWN_HOST; + return; + +} + +/** + * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id + * @phba: pointer to lpfc hba data structure. + * @vpi: virtual N_Port identifier. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The unregistration vport identifier mailbox command is used to inactivate + * a virtual N_Port. The driver must have logged out and unregistered all + * remote N_Ports to abort any activity on the virtual N_Port. The HBA will + * unregisters any default RPIs associated with the specified vpi, aborting + * any active exchanges. The HBA will post the mailbox response after making + * the virtual N_Port inactive. + * + * This routine prepares the mailbox command for unregistering a virtual + * N_Port. + **/ +void +lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + if (phba->sli_rev == LPFC_SLI_REV3) + mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi]; + else if (phba->sli_rev >= LPFC_SLI_REV4) + mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi]; + + mb->mbxCommand = MBX_UNREG_VPI; + mb->mbxOwner = OWN_HOST; + return; + +} + +/** + * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB) + * @phba: pointer to lpfc hba data structure. + * + * This routine sets up and initializes the IOCB rings in the Port Control + * Block (PCB). + **/ +static void +lpfc_config_pcb_setup(struct lpfc_hba * phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + PCB_t *pcbp = phba->pcb; + dma_addr_t pdma_addr; + uint32_t offset; + uint32_t iocbCnt = 0; + int i; + + pcbp->maxRing = (psli->num_rings - 1); + + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + + pring->sli.sli3.sizeCiocb = + phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : + SLI2_IOCB_CMD_SIZE; + pring->sli.sli3.sizeRiocb = + phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE : + SLI2_IOCB_RSP_SIZE; + /* A ring MUST have both cmd and rsp entries defined to be + valid */ + if ((pring->sli.sli3.numCiocb == 0) || + (pring->sli.sli3.numRiocb == 0)) { + pcbp->rdsc[i].cmdEntries = 0; + pcbp->rdsc[i].rspEntries = 0; + pcbp->rdsc[i].cmdAddrHigh = 0; + pcbp->rdsc[i].rspAddrHigh = 0; + pcbp->rdsc[i].cmdAddrLow = 0; + pcbp->rdsc[i].rspAddrLow = 0; + pring->sli.sli3.cmdringaddr = NULL; + pring->sli.sli3.rspringaddr = NULL; + continue; + } + /* Command ring setup for ring */ + pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; + pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb; + + offset = (uint8_t *) &phba->IOCBs[iocbCnt] - + (uint8_t *) phba->slim2p.virt; + pdma_addr = phba->slim2p.phys + offset; + pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); + pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); + iocbCnt += pring->sli.sli3.numCiocb; + + /* Response ring setup for ring */ + pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt]; + + pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb; + offset = (uint8_t *)&phba->IOCBs[iocbCnt] - + (uint8_t *)phba->slim2p.virt; + pdma_addr = phba->slim2p.phys + offset; + pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); + pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); + iocbCnt += pring->sli.sli3.numRiocb; + } +} + +/** + * lpfc_read_rev - Prepare a mailbox command for reading HBA revision + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The read revision mailbox command is used to read the revision levels of + * the HBA components. These components include hardware units, resident + * firmware, and available firmware. HBAs that supports SLI-3 mode of + * operation provide different response information depending on the version + * requested by the driver. + * + * This routine prepares the mailbox command for reading HBA revision + * information. + **/ +void +lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + mb->un.varRdRev.cv = 1; + mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ + mb->mbxCommand = MBX_READ_REV; + mb->mbxOwner = OWN_HOST; + return; +} + +void +lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_mqe *mqe; + + switch (mb->mbxCommand) { + case MBX_READ_REV: + mqe = &pmb->u.mqe; + lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name, + mqe->un.read_rev.fw_name, 16); + lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name, + mqe->un.read_rev.ulp_fw_name, 16); + break; + default: + break; + } + return; +} + +/** + * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2 + * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. + * @hbq_desc: pointer to the HBQ selection profile descriptor. + * + * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA + * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs + * the Sequence Length Test using the fields in the Selection Profile 2 + * extension in words 20:31. + **/ +static void +lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, + struct lpfc_hbq_init *hbq_desc) +{ + hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt; + hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen; + hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; +} + +/** + * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3 + * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. + * @hbq_desc: pointer to the HBQ selection profile descriptor. + * + * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA + * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs + * the Sequence Length Test and Byte Field Test using the fields in the + * Selection Profile 3 extension in words 20:31. + **/ +static void +lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, + struct lpfc_hbq_init *hbq_desc) +{ + hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt; + hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen; + hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff; + hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff; + memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch, + sizeof(hbqmb->profiles.profile3.cmdmatch)); +} + +/** + * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5 + * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. + * @hbq_desc: pointer to the HBQ selection profile descriptor. + * + * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The + * HBA tests the initial frame of an incoming sequence using the frame's + * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test + * and Byte Field Test using the fields in the Selection Profile 5 extension + * words 20:31. + **/ +static void +lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, + struct lpfc_hbq_init *hbq_desc) +{ + hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt; + hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen; + hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff; + hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff; + memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch, + sizeof(hbqmb->profiles.profile5.cmdmatch)); +} + +/** + * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ + * @phba: pointer to lpfc hba data structure. + * @id: HBQ identifier. + * @hbq_desc: pointer to the HBA descriptor data structure. + * @hbq_entry_index: index of the HBQ entry data structures. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The configure HBQ (Host Buffer Queue) mailbox command is used to configure + * an HBQ. The configuration binds events that require buffers to a particular + * ring and HBQ based on a selection profile. + * + * This routine prepares the mailbox command for configuring an HBQ. + **/ +void +lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, + struct lpfc_hbq_init *hbq_desc, + uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) +{ + int i; + MAILBOX_t *mb = &pmb->u.mb; + struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; + + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + hbqmb->hbqId = id; + hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */ + hbqmb->recvNotify = hbq_desc->rn; /* Receive + * Notification */ + hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks + * # in words 0-19 */ + hbqmb->profile = hbq_desc->profile; /* Selection profile: + * 0 = all, + * 7 = logentry */ + hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring + * e.g. Ring0=b0001, + * ring2=b0100 */ + hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4 + * or 5 */ + hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this + * HBQ will be used + * for LogEntry + * buffers */ + hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) + + hbq_entry_index * sizeof(struct lpfc_hbq_entry); + hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys); + + mb->mbxCommand = MBX_CONFIG_HBQ; + mb->mbxOwner = OWN_HOST; + + /* Copy info for profiles 2,3,5. Other + * profiles this area is reserved + */ + if (hbq_desc->profile == 2) + lpfc_build_hbq_profile2(hbqmb, hbq_desc); + else if (hbq_desc->profile == 3) + lpfc_build_hbq_profile3(hbqmb, hbq_desc); + else if (hbq_desc->profile == 5) + lpfc_build_hbq_profile5(hbqmb, hbq_desc); + + /* Return if no rctl / type masks for this HBQ */ + if (!hbq_desc->mask_count) + return; + + /* Otherwise we setup specific rctl / type masks for this HBQ */ + for (i = 0; i < hbq_desc->mask_count; i++) { + hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch; + hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask; + hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch; + hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask; + } + + return; +} + +/** + * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring + * @phba: pointer to lpfc hba data structure. + * @ring: ring number/index + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The configure ring mailbox command is used to configure an IOCB ring. This + * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the + * ring. This is used to map incoming sequences to a particular ring whose + * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not + * attempt to configure a ring whose number is greater than the number + * specified in the Port Control Block (PCB). It is an error to issue the + * configure ring command more than once with the same ring number. The HBA + * returns an error if the driver attempts this. + * + * This routine prepares the mailbox command for configuring IOCB ring. + **/ +void +lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) +{ + int i; + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->un.varCfgRing.ring = ring; + mb->un.varCfgRing.maxOrigXchg = 0; + mb->un.varCfgRing.maxRespXchg = 0; + mb->un.varCfgRing.recvNotify = 1; + + psli = &phba->sli; + pring = &psli->sli3_ring[ring]; + mb->un.varCfgRing.numMask = pring->num_mask; + mb->mbxCommand = MBX_CONFIG_RING; + mb->mbxOwner = OWN_HOST; + + /* Is this ring configured for a specific profile */ + if (pring->prt[0].profile) { + mb->un.varCfgRing.profile = pring->prt[0].profile; + return; + } + + /* Otherwise we setup specific rctl / type masks for this ring */ + for (i = 0; i < pring->num_mask; i++) { + mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; + if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ) + mb->un.varCfgRing.rrRegs[i].rmask = 0xff; + else + mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; + mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type; + mb->un.varCfgRing.rrRegs[i].tmask = 0xff; + } + + return; +} + +/** + * lpfc_config_port - Prepare a mailbox command for configuring port + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The configure port mailbox command is used to identify the Port Control + * Block (PCB) in the driver memory. After this command is issued, the + * driver must not access the mailbox in the HBA without first resetting + * the HBA. The HBA may copy the PCB information to internal storage for + * subsequent use; the driver can not change the PCB information unless it + * resets the HBA. + * + * This routine prepares the mailbox command for configuring port. + **/ +void +lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; + MAILBOX_t *mb = &pmb->u.mb; + dma_addr_t pdma_addr; + uint32_t bar_low, bar_high; + size_t offset; + struct lpfc_hgp hgp; + int i; + uint32_t pgp_offset; + + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_CONFIG_PORT; + mb->mbxOwner = OWN_HOST; + + mb->un.varCfgPort.pcbLen = sizeof(PCB_t); + + offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt; + pdma_addr = phba->slim2p.phys + offset; + mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); + mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); + + /* Always Host Group Pointer is in SLIM */ + mb->un.varCfgPort.hps = 1; + + /* If HBA supports SLI=3 ask for it */ + + if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { + if (phba->cfg_enable_bg) + mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ + mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ + mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ + mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); + if (phba->max_vpi && phba->cfg_enable_npiv && + phba->vpd.sli3Feat.cmv) { + mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; + mb->un.varCfgPort.cmv = 1; + } else + mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; + } else + phba->sli_rev = LPFC_SLI_REV2; + mb->un.varCfgPort.sli_mode = phba->sli_rev; + + /* If this is an SLI3 port, configure async status notification. */ + if (phba->sli_rev == LPFC_SLI_REV3) + mb->un.varCfgPort.casabt = 1; + + /* Now setup pcb */ + phba->pcb->type = TYPE_NATIVE_SLI2; + phba->pcb->feature = FEATURE_INITIAL_SLI2; + + /* Setup Mailbox pointers */ + phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE; + offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; + pdma_addr = phba->slim2p.phys + offset; + phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); + phba->pcb->mbAddrLow = putPaddrLow(pdma_addr); + + /* + * Setup Host Group ring pointer. + * + * For efficiency reasons, the ring get/put pointers can be + * placed in adapter memory (SLIM) rather than in host memory. + * This allows firmware to avoid PCI reads/writes when updating + * and checking pointers. + * + * The firmware recognizes the use of SLIM memory by comparing + * the address of the get/put pointers structure with that of + * the SLIM BAR (BAR0). + * + * Caution: be sure to use the PCI config space value of BAR0/BAR1 + * (the hardware's view of the base address), not the OS's + * value of pci_resource_start() as the OS value may be a cookie + * for ioremap/iomap. + */ + + + pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); + pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); + + /* + * Set up HGP - Port Memory + * + * The port expects the host get/put pointers to reside in memory + * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes) + * area of SLIM. In SLI-2 mode, there's an additional 16 reserved + * words (0x40 bytes). This area is not reserved if HBQs are + * configured in SLI-3. + * + * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 + * RR0Get 0xc4 0x84 + * CR1Put 0xc8 0x88 + * RR1Get 0xcc 0x8c + * CR2Put 0xd0 0x90 + * RR2Get 0xd4 0x94 + * CR3Put 0xd8 0x98 + * RR3Get 0xdc 0x9c + * + * Reserved 0xa0-0xbf + * If HBQs configured: + * HBQ 0 Put ptr 0xc0 + * HBQ 1 Put ptr 0xc4 + * HBQ 2 Put ptr 0xc8 + * ...... + * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 + * + */ + + if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { + phba->host_gp = (struct lpfc_hgp __iomem *) + &phba->mbox->us.s2.host[0]; + phba->hbq_put = NULL; + offset = (uint8_t *)&phba->mbox->us.s2.host - + (uint8_t *)phba->slim2p.virt; + pdma_addr = phba->slim2p.phys + offset; + phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr); + phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr); + } else { + /* Always Host Group Pointer is in SLIM */ + mb->un.varCfgPort.hps = 1; + + if (phba->sli_rev == 3) { + phba->host_gp = &mb_slim->us.s3.host[0]; + phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; + } else { + phba->host_gp = &mb_slim->us.s2.host[0]; + phba->hbq_put = NULL; + } + + /* mask off BAR0's flag bits 0 - 3 */ + phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + + (void __iomem *)phba->host_gp - + (void __iomem *)phba->MBslimaddr; + if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) + phba->pcb->hgpAddrHigh = bar_high; + else + phba->pcb->hgpAddrHigh = 0; + /* write HGP data to SLIM at the required longword offset */ + memset(&hgp, 0, sizeof(struct lpfc_hgp)); + + for (i = 0; i < phba->sli.num_rings; i++) { + lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, + sizeof(*phba->host_gp)); + } + } + + /* Setup Port Group offset */ + if (phba->sli_rev == 3) + pgp_offset = offsetof(struct lpfc_sli2_slim, + mbx.us.s3_pgp.port); + else + pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); + pdma_addr = phba->slim2p.phys + pgp_offset; + phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); + phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr); + + /* Use callback routine to setp rings in the pcb */ + lpfc_config_pcb_setup(phba); + + /* special handling for LC HBAs */ + if (lpfc_is_LC_HBA(phba->pcidev->device)) { + uint32_t hbainit[5]; + + lpfc_hba_init(phba, hbainit); + + memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20); + } + + /* Swap PCB if needed */ + lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t)); +} + +/** + * lpfc_kill_board - Prepare a mailbox command for killing board + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * The kill board mailbox command is used to tell firmware to perform a + * graceful shutdown of a channel on a specified board to prepare for reset. + * When the kill board mailbox command is received, the ER3 bit is set to 1 + * in the Host Status register and the ER Attention bit is set to 1 in the + * Host Attention register of the HBA function that received the kill board + * command. + * + * This routine prepares the mailbox command for killing the board in + * preparation for a graceful shutdown. + **/ +void +lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb = &pmb->u.mb; + + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_KILL_BOARD; + mb->mbxOwner = OWN_HOST; + return; +} + +/** + * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue + * @phba: pointer to lpfc hba data structure. + * @mbq: pointer to the driver internal queue element for mailbox command. + * + * Driver maintains a internal mailbox command queue implemented as a linked + * list. When a mailbox command is issued, it shall be put into the mailbox + * command queue such that they shall be processed orderly as HBA can process + * one mailbox command at a time. + **/ +void +lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) +{ + struct lpfc_sli *psli; + + psli = &phba->sli; + + list_add_tail(&mbq->list, &psli->mboxq); + + psli->mboxq_cnt++; + + return; +} + +/** + * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue + * @phba: pointer to lpfc hba data structure. + * + * Driver maintains a internal mailbox command queue implemented as a linked + * list. When a mailbox command is issued, it shall be put into the mailbox + * command queue such that they shall be processed orderly as HBA can process + * one mailbox command at a time. After HBA finished processing a mailbox + * command, the driver will remove a pending mailbox command from the head of + * the mailbox command queue and send to the HBA for processing. + * + * Return codes + * pointer to the driver internal queue element for mailbox command. + **/ +LPFC_MBOXQ_t * +lpfc_mbox_get(struct lpfc_hba * phba) +{ + LPFC_MBOXQ_t *mbq = NULL; + struct lpfc_sli *psli = &phba->sli; + + list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list); + if (mbq) + psli->mboxq_cnt--; + + return mbq; +} + +/** + * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list + * @phba: pointer to lpfc hba data structure. + * @mbq: pointer to the driver internal queue element for mailbox command. + * + * This routine put the completed mailbox command into the mailbox command + * complete list. This is the unlocked version of the routine. The mailbox + * complete list is used by the driver worker thread to process mailbox + * complete callback functions outside the driver interrupt handler. + **/ +void +__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) +{ + list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); +} + +/** + * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list + * @phba: pointer to lpfc hba data structure. + * @mbq: pointer to the driver internal queue element for mailbox command. + * + * This routine put the completed mailbox command into the mailbox command + * complete list. This is the locked version of the routine. The mailbox + * complete list is used by the driver worker thread to process mailbox + * complete callback functions outside the driver interrupt handler. + **/ +void +lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) +{ + unsigned long iflag; + + /* This function expects to be called from interrupt context */ + spin_lock_irqsave(&phba->hbalock, iflag); + __lpfc_mbox_cmpl_put(phba, mbq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; +} + +/** + * lpfc_mbox_cmd_check - Check the validality of a mailbox command + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the driver internal queue element for mailbox command. + * + * This routine is to check whether a mailbox command is valid to be issued. + * This check will be performed by both the mailbox issue API when a client + * is to issue a mailbox command to the mailbox transport. + * + * Return 0 - pass the check, -ENODEV - fail the check + **/ +int +lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + /* Mailbox command that have a completion handler must also have a + * vport specified. + */ + if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && + mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { + if (!mboxq->vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, + "1814 Mbox x%x failed, no vport\n", + mboxq->u.mb.mbxCommand); + dump_stack(); + return -ENODEV; + } + } + return 0; +} + +/** + * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command + * @phba: pointer to lpfc hba data structure. + * + * This routine is to check whether the HBA device is ready for posting a + * mailbox command. It is used by the mailbox transport API at the time the + * to post a mailbox command to the device. + * + * Return 0 - pass the check, -ENODEV - fail the check + **/ +int +lpfc_mbox_dev_check(struct lpfc_hba *phba) +{ + /* If the PCI channel is in offline state, do not issue mbox */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return -ENODEV; + + /* If the HBA is in error state, do not issue mbox */ + if (phba->link_state == LPFC_HBA_ERROR) + return -ENODEV; + + return 0; +} + +/** + * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the driver internal queue element for mailbox command. + * + * This routine retrieves the proper timeout value according to the mailbox + * command code. + * + * Return codes + * Timeout value to be used for the given mailbox command + **/ +int +lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + MAILBOX_t *mbox = &mboxq->u.mb; + uint8_t subsys, opcode; + + switch (mbox->mbxCommand) { + case MBX_WRITE_NV: /* 0x03 */ + case MBX_DUMP_MEMORY: /* 0x17 */ + case MBX_UPDATE_CFG: /* 0x1B */ + case MBX_DOWN_LOAD: /* 0x1C */ + case MBX_DEL_LD_ENTRY: /* 0x1D */ + case MBX_WRITE_VPARMS: /* 0x32 */ + case MBX_LOAD_AREA: /* 0x81 */ + case MBX_WRITE_WWN: /* 0x98 */ + case MBX_LOAD_EXP_ROM: /* 0x9C */ + case MBX_ACCESS_VDATA: /* 0xA5 */ + return LPFC_MBOX_TMO_FLASH_CMD; + case MBX_SLI4_CONFIG: /* 0x9b */ + subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); + opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq); + if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) { + switch (opcode) { + case LPFC_MBOX_OPCODE_READ_OBJECT: + case LPFC_MBOX_OPCODE_WRITE_OBJECT: + case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: + case LPFC_MBOX_OPCODE_DELETE_OBJECT: + case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: + case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: + case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG: + case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: + case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: + case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES: + case LPFC_MBOX_OPCODE_SEND_ACTIVATION: + case LPFC_MBOX_OPCODE_RESET_LICENSES: + case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG: + case LPFC_MBOX_OPCODE_GET_VPD_DATA: + case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG: + return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; + } + } + if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) { + switch (opcode) { + case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS: + return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; + } + } + return LPFC_MBOX_SLI4_CONFIG_TMO; + } + return LPFC_MBOX_TMO; +} + +/** + * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command + * @mbox: pointer to lpfc mbox command. + * @sgentry: sge entry index. + * @phyaddr: physical address for the sge + * @length: Length of the sge. + * + * This routine sets up an entry in the non-embedded mailbox command at the sge + * index location. + **/ +void +lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, + dma_addr_t phyaddr, uint32_t length) +{ + struct lpfc_mbx_nembed_cmd *nembed_sge; + + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &mbox->u.mqe.un.nembed_cmd; + nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); + nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); + nembed_sge->sge[sgentry].length = length; +} + +/** + * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command + * @mbox: pointer to lpfc mbox command. + * @sgentry: sge entry index. + * @sge: pointer to lpfc mailbox sge to load into. + * + * This routine gets an entry from the non-embedded mailbox command at the sge + * index location. + **/ +void +lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, + struct lpfc_mbx_sge *sge) +{ + struct lpfc_mbx_nembed_cmd *nembed_sge; + + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &mbox->u.mqe.un.nembed_cmd; + sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; + sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; + sge->length = nembed_sge->sge[sgentry].length; +} + +/** + * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * + * This routine cleans up and releases an SLI4 mailbox command that was + * configured using lpfc_sli4_config. It accounts for the embedded and + * non-embedded config types. + **/ +void +lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + struct lpfc_mbx_sge sge; + dma_addr_t phyaddr; + uint32_t sgecount, sgentry; + + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, just free the mbox command */ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + + /* For non-embedded mbox command, we need to free the pages first */ + sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); + /* There is nothing we can do if there is no sge address array */ + if (unlikely(!mbox->sge_array)) { + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + /* Each non-embedded DMA memory was allocated in the length of a page */ + for (sgentry = 0; sgentry < sgecount; sgentry++) { + lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); + phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); + dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, + mbox->sge_array->addr[sgentry], phyaddr); + } + /* Free the sge address array memory */ + kfree(mbox->sge_array); + /* Finally, free the mailbox command itself */ + mempool_free(mbox, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * @subsystem: The sli4 config sub mailbox subsystem. + * @opcode: The sli4 config sub mailbox command opcode. + * @length: Length of the sli4 config mailbox command (including sub-header). + * @emb: True if embedded mbox command should be setup. + * + * This routine sets up the header fields of SLI4 specific mailbox command + * for sending IOCTL command. + * + * Return: the actual length of the mbox command allocated (mostly useful + * for none embedded mailbox command). + **/ +int +lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, + uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) +{ + struct lpfc_mbx_sli4_config *sli4_config; + union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; + uint32_t alloc_len; + uint32_t resid_len; + uint32_t pagen, pcount; + void *viraddr; + dma_addr_t phyaddr; + + /* Set up SLI4 mailbox command header fields */ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); + + /* Set up SLI4 ioctl command header fields */ + sli4_config = &mbox->u.mqe.un.sli4_config; + + /* Setup for the embedded mbox command */ + if (emb) { + /* Set up main header fields */ + bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); + sli4_config->header.cfg_mhdr.payload_length = length; + /* Set up sub-header fields following main header */ + bf_set(lpfc_mbox_hdr_opcode, + &sli4_config->header.cfg_shdr.request, opcode); + bf_set(lpfc_mbox_hdr_subsystem, + &sli4_config->header.cfg_shdr.request, subsystem); + sli4_config->header.cfg_shdr.request.request_length = + length - LPFC_MBX_CMD_HDR_LENGTH; + return length; + } + + /* Setup for the non-embedded mbox command */ + pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; + pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? + LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; + /* Allocate record for keeping SGE virtual addresses */ + mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), + GFP_KERNEL); + if (!mbox->sge_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2527 Failed to allocate non-embedded SGE " + "array.\n"); + return 0; + } + for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { + /* The DMA memory is always allocated in the length of a + * page even though the last SGE might not fill up to a + * page, this is used as a priori size of SLI4_PAGE_SIZE for + * the later DMA memory free. + */ + viraddr = dma_alloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, &phyaddr, + GFP_KERNEL); + /* In case of malloc fails, proceed with whatever we have */ + if (!viraddr) + break; + mbox->sge_array->addr[pagen] = viraddr; + /* Keep the first page for later sub-header construction */ + if (pagen == 0) + cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; + resid_len = length - alloc_len; + if (resid_len > SLI4_PAGE_SIZE) { + lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, + SLI4_PAGE_SIZE); + alloc_len += SLI4_PAGE_SIZE; + } else { + lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, + resid_len); + alloc_len = length; + } + } + + /* Set up main header fields in mailbox command */ + sli4_config->header.cfg_mhdr.payload_length = alloc_len; + bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); + + /* Set up sub-header fields into the first page */ + if (pagen > 0) { + bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); + bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); + cfg_shdr->request.request_length = + alloc_len - sizeof(union lpfc_sli4_cfg_shdr); + } + /* The sub-header is in DMA memory, which needs endian converstion */ + if (cfg_shdr) + lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, + sizeof(union lpfc_sli4_cfg_shdr)); + return alloc_len; +} + +/** + * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent. + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to an allocated lpfc mbox resource. + * @exts_count: the number of extents, if required, to allocate. + * @rsrc_type: the resource extent type. + * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED. + * + * This routine completes the subcommand header for SLI4 resource extent + * mailbox commands. It is called after lpfc_sli4_config. The caller must + * pass an allocated mailbox and the attributes required to initialize the + * mailbox correctly. + * + * Return: the actual length of the mbox command allocated. + **/ +int +lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox, + uint16_t exts_count, uint16_t rsrc_type, bool emb) +{ + uint8_t opcode = 0; + struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL; + void *virtaddr = NULL; + + /* Set up SLI4 ioctl command header fields */ + if (emb == LPFC_SLI4_MBX_NEMBED) { + /* Get the first SGE entry from the non-embedded DMA memory */ + virtaddr = mbox->sge_array->addr[0]; + if (virtaddr == NULL) + return 1; + n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; + } + + /* + * The resource type is common to all extent Opcodes and resides in the + * same position. + */ + if (emb == LPFC_SLI4_MBX_EMBED) + bf_set(lpfc_mbx_alloc_rsrc_extents_type, + &mbox->u.mqe.un.alloc_rsrc_extents.u.req, + rsrc_type); + else { + /* This is DMA data. Byteswap is required. */ + bf_set(lpfc_mbx_alloc_rsrc_extents_type, + n_rsrc_extnt, rsrc_type); + lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4, + &n_rsrc_extnt->word4, + sizeof(uint32_t)); + } + + /* Complete the initialization for the particular Opcode. */ + opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox); + switch (opcode) { + case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT: + if (emb == LPFC_SLI4_MBX_EMBED) + bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, + &mbox->u.mqe.un.alloc_rsrc_extents.u.req, + exts_count); + else + bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, + n_rsrc_extnt, exts_count); + break; + case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT: + case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO: + case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT: + /* Initialization is complete.*/ + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2929 Resource Extent Opcode x%x is " + "unsupported\n", opcode); + return 1; + } + + return 0; +} + +/** + * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command queue entry. + * + * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox + * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the + * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall + * be returned. + **/ +uint8_t +lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + union lpfc_sli4_cfg_shdr *cfg_shdr; + + if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) + return LPFC_MBOX_SUBSYSTEM_NA; + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, get opcode from embedded sub-header*/ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); + } + + /* For non-embedded mbox command, get opcode from first dma page */ + if (unlikely(!mbox->sge_array)) + return LPFC_MBOX_SUBSYSTEM_NA; + cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; + return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); +} + +/** + * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command queue entry. + * + * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox + * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if + * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be + * returned. + **/ +uint8_t +lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + union lpfc_sli4_cfg_shdr *cfg_shdr; + + if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) + return LPFC_MBOX_OPCODE_NA; + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, get opcode from embedded sub-header*/ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); + } + + /* For non-embedded mbox command, get opcode from first dma page */ + if (unlikely(!mbox->sge_array)) + return LPFC_MBOX_OPCODE_NA; + cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; + return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); +} + +/** + * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to lpfc mbox command. + * @fcf_index: index to fcf table. + * + * This routine routine allocates and constructs non-embedded mailbox command + * for reading a FCF table entry referred by @fcf_index. + * + * Return: pointer to the mailbox command constructed if successful, otherwise + * NULL. + **/ +int +lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, + struct lpfcMboxq *mboxq, + uint16_t fcf_index) +{ + void *virt_addr; + uint8_t *bytep; + struct lpfc_mbx_sge sge; + uint32_t alloc_len, req_len; + struct lpfc_mbx_read_fcf_tbl *read_fcf; + + if (!mboxq) + return -ENOMEM; + + req_len = sizeof(struct fcf_record) + + sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); + + /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ + alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, + LPFC_SLI4_MBX_NEMBED); + + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "0291 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + virt_addr = mboxq->sge_array->addr[0]; + read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; + + /* Set up command fields */ + bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); + /* Perform necessary endian conversion */ + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); + + return 0; +} + +/** + * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to lpfc mbox command. + * + * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES + * mailbox command. + **/ +void +lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) +{ + /* Set up SLI4 mailbox command header fields */ + memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); + bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); + + /* Set up host requested features. */ + bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); + bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); + + /* Enable DIF (block guard) only if configured to do so. */ + if (phba->cfg_enable_bg) + bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); + + /* Enable NPIV only if configured to do so. */ + if (phba->max_vpi && phba->cfg_enable_npiv) + bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); + + if (phba->nvmet_support) { + bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1); + /* iaab/iaar NOT set for now */ + bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0); + bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0); + } + + /* Enable Application Services Header for appheader VMID */ + if (phba->cfg_vmid_app_header) { + bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1); + bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1); + } + return; +} + +/** + * lpfc_init_vfi - Initialize the INIT_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vport: Vport associated with the VF. + * + * This routine initializes @mbox to all zeros and then fills in the mailbox + * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI + * in the context of an FCF. The driver issues this command to setup a VFI + * before issuing a FLOGI to login to the VSAN. The driver should also issue a + * REG_VFI after a successful VSAN login. + **/ +void +lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) +{ + struct lpfc_mbx_init_vfi *init_vfi; + + memset(mbox, 0, sizeof(*mbox)); + mbox->vport = vport; + init_vfi = &mbox->u.mqe.un.init_vfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); + bf_set(lpfc_init_vfi_vr, init_vfi, 1); + bf_set(lpfc_init_vfi_vt, init_vfi, 1); + bf_set(lpfc_init_vfi_vp, init_vfi, 1); + bf_set(lpfc_init_vfi_vfi, init_vfi, + vport->phba->sli4_hba.vfi_ids[vport->vfi]); + bf_set(lpfc_init_vfi_vpi, init_vfi, + vport->phba->vpi_ids[vport->vpi]); + bf_set(lpfc_init_vfi_fcfi, init_vfi, + vport->phba->fcf.fcfi); +} + +/** + * lpfc_reg_vfi - Initialize the REG_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vport: vport associated with the VF. + * @phys: BDE DMA bus address used to send the service parameters to the HBA. + * + * This routine initializes @mbox to all zeros and then fills in the mailbox + * fields from @vport, and uses @buf as a DMAable buffer to send the vport's + * fc service parameters to the HBA for this VFI. REG_VFI configures virtual + * fabrics identified by VFI in the context of an FCF. + **/ +void +lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) +{ + struct lpfc_mbx_reg_vfi *reg_vfi; + struct lpfc_hba *phba = vport->phba; + uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0; + + memset(mbox, 0, sizeof(*mbox)); + reg_vfi = &mbox->u.mqe.un.reg_vfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); + bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); + bf_set(lpfc_reg_vfi_vfi, reg_vfi, + phba->sli4_hba.vfi_ids[vport->vfi]); + bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi); + bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]); + memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); + reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); + reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); + reg_vfi->e_d_tov = phba->fc_edtov; + reg_vfi->r_a_tov = phba->fc_ratov; + if (phys) { + reg_vfi->bde.addrHigh = putPaddrHigh(phys); + reg_vfi->bde.addrLow = putPaddrLow(phys); + reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); + reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + } + bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); + + /* Only FC supports upd bit */ + if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && + (vport->fc_flag & FC_VFI_REGISTERED) && + (!phba->fc_topology_changed)) + bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); + + bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0); + bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0); + bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF; + + if (phba->bbcredit_support && phba->cfg_enable_bbcr && + bbscn_fabric != 0) { + bbscn_max = bf_get(lpfc_bbscn_max, + &phba->sli4_hba.bbscn_params); + if (bbscn_fabric <= bbscn_max) { + bbscn_def = bf_get(lpfc_bbscn_def, + &phba->sli4_hba.bbscn_params); + + if (bbscn_fabric > bbscn_def) + bf_set(lpfc_reg_vfi_bbscn, reg_vfi, + bbscn_fabric); + else + bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def); + + bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1); + } + } + lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, + "3134 Register VFI, mydid:x%x, fcfi:%d, " + " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" + " port_state:x%x topology chg:%d bbscn_fabric :%d\n", + vport->fc_myDID, + phba->fcf.fcfi, + phba->sli4_hba.vfi_ids[vport->vfi], + phba->vpi_ids[vport->vpi], + reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, + vport->port_state, phba->fc_topology_changed, + bbscn_fabric); +} + +/** + * lpfc_init_vpi - Initialize the INIT_VPI mailbox command + * @phba: pointer to the hba structure to init the VPI for. + * @mbox: pointer to lpfc mbox command to initialize. + * @vpi: VPI to be initialized. + * + * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the + * command to activate a virtual N_Port. The HBA assigns a MAC address to use + * with the virtual N Port. The SLI Host issues this command before issuing a + * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a + * successful virtual NPort login. + **/ +void +lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); + bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, + phba->vpi_ids[vpi]); + bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, + phba->sli4_hba.vfi_ids[phba->pport->vfi]); +} + +/** + * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vport: vport associated with the VF. + * + * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric + * (logical NPort) into the inactive state. The SLI Host must have logged out + * and unregistered all remote N_Ports to abort any activity on the virtual + * fabric. The SLI Port posts the mailbox response after marking the virtual + * fabric inactive. + **/ +void +lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); + bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, + vport->phba->sli4_hba.vfi_ids[vport->vfi]); +} + +/** + * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23 + * @phba: pointer to the hba structure containing. + * @mbox: pointer to lpfc mbox command to initialize. + * + * This function create a SLI4 dump mailbox command to dump configure + * region 23. + **/ +int +lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_dmabuf *mp = NULL; + MAILBOX_t *mb; + int rc; + + memset(mbox, 0, sizeof(*mbox)); + mb = &mbox->u.mb; + + rc = lpfc_mbox_rsrc_prep(phba, mbox); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "2569 %s: memory allocation failed\n", + __func__); + return 1; + } + + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.region_id = DMP_REGION_23; + mb->un.varDmp.sli4_length = DMP_RGN23_SIZE; + mp = mbox->ctx_buf; + mb->un.varWords[3] = putPaddrLow(mp->phys); + mb->un.varWords[4] = putPaddrHigh(mp->phys); + return 0; +} + +static void +lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + MAILBOX_t *mb; + int rc = FAILURE; + struct lpfc_rdp_context *rdp_context = + (struct lpfc_rdp_context *)(mboxq->ctx_ndlp); + + mb = &mboxq->u.mb; + if (mb->mbxStatus) + goto mbx_failed; + + memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR)); + + rc = SUCCESS; + +mbx_failed: + lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); + rdp_context->cmpl(phba, rdp_context, rc); +} + +static void +lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf; + struct lpfc_rdp_context *rdp_context = + (struct lpfc_rdp_context *)(mbox->ctx_ndlp); + + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) + goto error_mbox_free; + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, + DMP_SFF_PAGE_A2_SIZE); + + lpfc_read_lnk_stat(phba, mbox); + mbox->vport = rdp_context->ndlp->vport; + + /* Save the dma buffer for cleanup in the final completion. */ + mbox->ctx_buf = mp; + mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat; + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) + goto error_mbox_free; + + return; + +error_mbox_free: + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + rdp_context->cmpl(phba, rdp_context, FAILURE); +} + +void +lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + int rc; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); + struct lpfc_rdp_context *rdp_context = + (struct lpfc_rdp_context *)(mbox->ctx_ndlp); + + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) + goto error; + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, + DMP_SFF_PAGE_A0_SIZE); + + memset(mbox, 0, sizeof(*mbox)); + + memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); + INIT_LIST_HEAD(&mp->list); + + /* save address for completion */ + mbox->ctx_buf = mp; + mbox->vport = rdp_context->ndlp->vport; + + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); + bf_set(lpfc_mbx_memory_dump_type3_type, + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + bf_set(lpfc_mbx_memory_dump_type3_link, + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_memory_dump_type3_page_no, + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + + mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2; + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto error; + + return; + +error: + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + rdp_context->cmpl(phba, rdp_context, FAILURE); +} + + +/* + * lpfc_sli4_dump_page_a0 - Dump sli4 read SFP Diagnostic. + * @phba: pointer to the hba structure containing. + * @mbox: pointer to lpfc mbox command to initialize. + * + * This function create a SLI4 dump mailbox command to dump configure + * type 3 page 0xA0. + */ +int +lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + int rc; + struct lpfc_dmabuf *mp = NULL; + + memset(mbox, 0, sizeof(*mbox)); + + rc = lpfc_mbox_rsrc_prep(phba, mbox); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "3569 dump type 3 page 0xA0 allocation failed\n"); + return 1; + } + + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); + bf_set(lpfc_mbx_memory_dump_type3_type, + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + bf_set(lpfc_mbx_memory_dump_type3_link, + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_memory_dump_type3_page_no, + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0); + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); + + mp = mbox->ctx_buf; + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + + return 0; +} + +/** + * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command + * @phba: pointer to the hba structure containing the FCF index and RQ ID. + * @mbox: pointer to lpfc mbox command to initialize. + * + * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The + * SLI Host uses the command to activate an FCF after it has acquired FCF + * information via a READ_FCF mailbox command. This mailbox command also is used + * to indicate where received unsolicited frames from this FCF will be sent. By + * default this routine will set up the FCF to forward all unsolicited frames + * to the RQ ID passed in the @phba. This can be overridden by the caller for + * more complicated setups. + **/ +void +lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_reg_fcfi *reg_fcfi; + + memset(mbox, 0, sizeof(*mbox)); + reg_fcfi = &mbox->u.mqe.un.reg_fcfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); + if (phba->nvmet_support == 0) { + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything - rq_id0 */ + bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0); + + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); + + /* addr mode is bit wise inverted value of fcf addr_mode */ + bf_set(lpfc_reg_fcfi_mam, reg_fcfi, + (~phba->fcf.addr_mode) & 0x3); + } else { + /* This is ONLY for NVMET MRQ == 1 */ + if (phba->cfg_nvmet_mrq != 1) + return; + + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); + /* Match type FCP - rq_id0 */ + bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP); + bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, + FC_RCTL_DD_UNSOL_CMD); + + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything else - rq_id1 */ + bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0); + } + bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, + phba->fcf.current_rec.fcf_indx); + if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { + bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, + phba->fcf.current_rec.vlan_id); + } +} + +/** + * lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command + * @phba: pointer to the hba structure containing the FCF index and RQ ID. + * @mbox: pointer to lpfc mbox command to initialize. + * @mode: 0 to register FCFI, 1 to register MRQs + * + * The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs). + * The SLI Host uses the command to activate an FCF after it has acquired FCF + * information via a READ_FCF mailbox command. This mailbox command also is used + * to indicate where received unsolicited frames from this FCF will be sent. By + * default this routine will set up the FCF to forward all unsolicited frames + * to the RQ ID passed in the @phba. This can be overridden by the caller for + * more complicated setups. + **/ +void +lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode) +{ + struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi; + + /* This is ONLY for MRQ */ + if (phba->cfg_nvmet_mrq <= 1) + return; + + memset(mbox, 0, sizeof(*mbox)); + reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ); + if (mode == 0) { + bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi, + phba->fcf.current_rec.fcf_indx); + if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { + bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi, + phba->fcf.current_rec.vlan_id); + } + return; + } + + bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi, + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); + /* Match NVME frames of type FCP (protocol NVME) - rq_id0 */ + bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP); + bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD); + bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1); + + bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */ + bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */ + bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq); + + bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything - rq_id1 */ + bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0); + + bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); +} + +/** + * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @fcfi: FCFI to be unregistered. + * + * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). + * The SLI Host uses the command to inactivate an FCFI. + **/ +void +lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); + bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); +} + +/** + * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @ndlp: The nodelist structure that describes the RPI to resume. + * + * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a + * link event. + **/ +void +lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = ndlp->phba; + struct lpfc_mbx_resume_rpi *resume_rpi; + + memset(mbox, 0, sizeof(*mbox)); + resume_rpi = &mbox->u.mqe.un.resume_rpi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); + bf_set(lpfc_resume_rpi_index, resume_rpi, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); + resume_rpi->event_tag = ndlp->phba->fc_eventTag; +} + diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c new file mode 100644 index 000000000..89cbeba06 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -0,0 +1,756 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_crtn.h" +#include "lpfc_logmsg.h" + +#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ +#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ +#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ +#define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */ +#define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */ + +int +lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { + size_t bytes; + int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + + if (max_xri <= 0) + return -ENOMEM; + bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * + sizeof(unsigned long); + phba->cfg_rrq_xri_bitmap_sz = bytes; + phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, + bytes); + if (!phba->active_rrq_pool) + return -ENOMEM; + else + return 0; +} + +/** + * lpfc_mem_alloc - create and allocate all PCI and memory pools + * @phba: HBA to allocate pools for + * @align: alignment requirement for blocks; must be a power of two + * + * Description: Creates and allocates PCI pools lpfc_mbuf_pool, + * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools + * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. + * + * Notes: Not interrupt-safe. Must be called with no locks held. If any + * allocation fails, frees all successfully allocated memory before returning. + * + * Returns: + * 0 on success + * -ENOMEM on failure (if any memory allocations fail) + **/ +int +lpfc_mem_alloc(struct lpfc_hba *phba, int align) +{ + struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + int i; + + + phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, + LPFC_BPL_SIZE, + align, 0); + if (!phba->lpfc_mbuf_pool) + goto fail; + + pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, + sizeof(struct lpfc_dmabuf), + GFP_KERNEL); + if (!pool->elements) + goto fail_free_lpfc_mbuf_pool; + + pool->max_count = 0; + pool->current_count = 0; + for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { + pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, + GFP_KERNEL, &pool->elements[i].phys); + if (!pool->elements[i].virt) + goto fail_free_mbuf_pool; + pool->max_count++; + pool->current_count++; + } + + phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, + sizeof(LPFC_MBOXQ_t)); + if (!phba->mbox_mem_pool) + goto fail_free_mbuf_pool; + + phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, + sizeof(struct lpfc_nodelist)); + if (!phba->nlp_mem_pool) + goto fail_free_mbox_pool; + + if (phba->sli_rev == LPFC_SLI_REV4) { + phba->rrq_pool = + mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE, + sizeof(struct lpfc_node_rrq)); + if (!phba->rrq_pool) + goto fail_free_nlp_mem_pool; + phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", + &phba->pcidev->dev, + LPFC_HDR_BUF_SIZE, align, 0); + if (!phba->lpfc_hrb_pool) + goto fail_free_rrq_mem_pool; + + phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", + &phba->pcidev->dev, + LPFC_DATA_BUF_SIZE, align, 0); + if (!phba->lpfc_drb_pool) + goto fail_free_hrb_pool; + phba->lpfc_hbq_pool = NULL; + } else { + phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", + &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); + if (!phba->lpfc_hbq_pool) + goto fail_free_nlp_mem_pool; + phba->lpfc_hrb_pool = NULL; + phba->lpfc_drb_pool = NULL; + } + + if (phba->cfg_EnableXLane) { + phba->device_data_mem_pool = mempool_create_kmalloc_pool( + LPFC_DEVICE_DATA_POOL_SIZE, + sizeof(struct lpfc_device_data)); + if (!phba->device_data_mem_pool) + goto fail_free_drb_pool; + } else { + phba->device_data_mem_pool = NULL; + } + + return 0; +fail_free_drb_pool: + dma_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; + fail_free_hrb_pool: + dma_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; + fail_free_rrq_mem_pool: + mempool_destroy(phba->rrq_pool); + phba->rrq_pool = NULL; + fail_free_nlp_mem_pool: + mempool_destroy(phba->nlp_mem_pool); + phba->nlp_mem_pool = NULL; + fail_free_mbox_pool: + mempool_destroy(phba->mbox_mem_pool); + phba->mbox_mem_pool = NULL; + fail_free_mbuf_pool: + while (i--) + dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + pool->elements[i].phys); + kfree(pool->elements); + fail_free_lpfc_mbuf_pool: + dma_pool_destroy(phba->lpfc_mbuf_pool); + phba->lpfc_mbuf_pool = NULL; + fail: + return -ENOMEM; +} + +int +lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) +{ + phba->lpfc_nvmet_drb_pool = + dma_pool_create("lpfc_nvmet_drb_pool", + &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, + SGL_ALIGN_SZ, 0); + if (!phba->lpfc_nvmet_drb_pool) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6024 Can't enable NVME Target - no memory\n"); + return -ENOMEM; + } + return 0; +} + +/** + * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc + * @phba: HBA to free memory for + * + * Description: Free the memory allocated by lpfc_mem_alloc routine. This + * routine is a the counterpart of lpfc_mem_alloc. + * + * Returns: None + **/ +void +lpfc_mem_free(struct lpfc_hba *phba) +{ + int i; + struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + struct lpfc_device_data *device_data; + + /* Free HBQ pools */ + lpfc_sli_hbqbuf_free_all(phba); + dma_pool_destroy(phba->lpfc_nvmet_drb_pool); + phba->lpfc_nvmet_drb_pool = NULL; + + dma_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; + + dma_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; + + dma_pool_destroy(phba->lpfc_hbq_pool); + phba->lpfc_hbq_pool = NULL; + + mempool_destroy(phba->rrq_pool); + phba->rrq_pool = NULL; + + /* Free NLP memory pool */ + mempool_destroy(phba->nlp_mem_pool); + phba->nlp_mem_pool = NULL; + if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { + mempool_destroy(phba->active_rrq_pool); + phba->active_rrq_pool = NULL; + } + + /* Free mbox memory pool */ + mempool_destroy(phba->mbox_mem_pool); + phba->mbox_mem_pool = NULL; + + /* Free MBUF memory pool */ + for (i = 0; i < pool->current_count; i++) + dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + pool->elements[i].phys); + kfree(pool->elements); + + dma_pool_destroy(phba->lpfc_mbuf_pool); + phba->lpfc_mbuf_pool = NULL; + + /* Free Device Data memory pool */ + if (phba->device_data_mem_pool) { + /* Ensure all objects have been returned to the pool */ + while (!list_empty(&phba->luns)) { + device_data = list_first_entry(&phba->luns, + struct lpfc_device_data, + listentry); + list_del(&device_data->listentry); + mempool_free(device_data, phba->device_data_mem_pool); + } + mempool_destroy(phba->device_data_mem_pool); + } + phba->device_data_mem_pool = NULL; + return; +} + +/** + * lpfc_mem_free_all - Frees all PCI and driver memory + * @phba: HBA to free memory for + * + * Description: Free memory from PCI and driver memory pools and also those + * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees + * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees + * the VPI bitmask. + * + * Returns: None + **/ +void +lpfc_mem_free_all(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mbox, *next_mbox; + struct lpfc_dmabuf *mp; + + /* Free memory used in mailbox queue back to mailbox memory pool */ + list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { + mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); + if (mp) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + list_del(&mbox->list); + mempool_free(mbox, phba->mbox_mem_pool); + } + /* Free memory used in mailbox cmpl list back to mailbox memory pool */ + list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { + mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); + if (mp) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + list_del(&mbox->list); + mempool_free(mbox, phba->mbox_mem_pool); + } + /* Free the active mailbox command back to the mailbox memory pool */ + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(&phba->hbalock); + if (psli->mbox_active) { + mbox = psli->mbox_active; + mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); + if (mp) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + mempool_free(mbox, phba->mbox_mem_pool); + psli->mbox_active = NULL; + } + + /* Free and destroy all the allocated memory pools */ + lpfc_mem_free(phba); + + /* Free DMA buffer memory pool */ + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; + + dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); + phba->lpfc_cmd_rsp_buf_pool = NULL; + + /* Free Congestion Data buffer */ + if (phba->cgn_i) { + dma_free_coherent(&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + phba->cgn_i->virt, phba->cgn_i->phys); + kfree(phba->cgn_i); + phba->cgn_i = NULL; + } + + /* Free RX Monitor */ + if (phba->rx_monitor) { + lpfc_rx_monitor_destroy_ring(phba->rx_monitor); + kfree(phba->rx_monitor); + phba->rx_monitor = NULL; + } + + /* Free the iocb lookup array */ + kfree(psli->iocbq_lookup); + psli->iocbq_lookup = NULL; + + return; +} + +/** + * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool + * @phba: HBA which owns the pool to allocate from + * @mem_flags: indicates if this is a priority (MEM_PRI) allocation + * @handle: used to return the DMA-mapped address of the mbuf + * + * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. + * Allocates from generic dma_pool_alloc function first and if that fails and + * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the + * HBA's pool. + * + * Notes: Not interrupt-safe. Must be called with no locks held. Takes + * phba->hbalock. + * + * Returns: + * pointer to the allocated mbuf on success + * NULL on failure + **/ +void * +lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) +{ + struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + unsigned long iflags; + void *ret; + + ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); + + spin_lock_irqsave(&phba->hbalock, iflags); + if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { + pool->current_count--; + ret = pool->elements[pool->current_count].virt; + *handle = pool->elements[pool->current_count].phys; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + return ret; +} + +/** + * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) + * @phba: HBA which owns the pool to return to + * @virt: mbuf to free + * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed + * + * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if + * it is below its max_count, frees the mbuf otherwise. + * + * Notes: Must be called with phba->hbalock held to synchronize access to + * lpfc_mbuf_safety_pool. + * + * Returns: None + **/ +void +__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) +{ + struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + + if (pool->current_count < pool->max_count) { + pool->elements[pool->current_count].virt = virt; + pool->elements[pool->current_count].phys = dma; + pool->current_count++; + } else { + dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); + } + return; +} + +/** + * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) + * @phba: HBA which owns the pool to return to + * @virt: mbuf to free + * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed + * + * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if + * it is below its max_count, frees the mbuf otherwise. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) +{ + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_mbuf_free(phba, virt, dma); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; +} + +/** + * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the + * lpfc_sg_dma_buf_pool PCI pool + * @phba: HBA which owns the pool to allocate from + * @mem_flags: indicates if this is a priority (MEM_PRI) allocation + * @handle: used to return the DMA-mapped address of the nvmet_buf + * + * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool + * PCI pool. Allocates from generic dma_pool_alloc function. + * + * Returns: + * pointer to the allocated nvmet_buf on success + * NULL on failure + **/ +void * +lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) +{ + void *ret; + + ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); + return ret; +} + +/** + * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool + * PCI pool + * @phba: HBA which owns the pool to return to + * @virt: nvmet_buf to free + * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed + * + * Returns: None + **/ +void +lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) +{ + dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); +} + +/** + * lpfc_els_hbq_alloc - Allocate an HBQ buffer + * @phba: HBA to allocate HBQ buffer for + * + * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct hbq_dmabuf * +lpfc_els_hbq_alloc(struct lpfc_hba *phba) +{ + struct hbq_dmabuf *hbqbp; + + hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); + if (!hbqbp) + return NULL; + + hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, + &hbqbp->dbuf.phys); + if (!hbqbp->dbuf.virt) { + kfree(hbqbp); + return NULL; + } + hbqbp->total_size = LPFC_BPL_SIZE; + return hbqbp; +} + +/** + * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc + * @phba: HBA buffer was allocated for + * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffer returned by + * lpfc_els_hbq_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) +{ + dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); + kfree(hbqbp); + return; +} + +/** + * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct hbq_dmabuf * +lpfc_sli4_rb_alloc(struct lpfc_hba *phba) +{ + struct hbq_dmabuf *dma_buf; + + dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->total_size = LPFC_DATA_BUF_SIZE; + return dma_buf; +} + +/** + * lpfc_sli4_rb_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_rb_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) +{ + dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); +} + +/** + * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct rqb_dmabuf * +lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) +{ + struct rqb_dmabuf *dma_buf; + + dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, + GFP_KERNEL, &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; + return dma_buf; +} + +/** + * lpfc_sli4_nvmet_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_nvmet_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) +{ + dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + dma_pool_free(phba->lpfc_nvmet_drb_pool, + dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); +} + +/** + * lpfc_in_buf_free - Free a DMA buffer + * @phba: HBA buffer is associated with + * @mp: Buffer to free + * + * Description: Frees the given DMA buffer in the appropriate way given if the + * HBA is running in SLI3 mode with HBQs enabled. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) +{ + struct hbq_dmabuf *hbq_entry; + unsigned long flags; + + if (!mp) + return; + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); + /* Check whether HBQ is still in use */ + spin_lock_irqsave(&phba->hbalock, flags); + if (!phba->hbq_in_use) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + list_del(&hbq_entry->dbuf.list); + if (hbq_entry->tag == -1) { + (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) + (phba, hbq_entry); + } else { + lpfc_sli_free_hbq(phba, hbq_entry); + } + spin_unlock_irqrestore(&phba->hbalock, flags); + } else { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + return; +} + +/** + * lpfc_rq_buf_free - Free a RQ DMA buffer + * @phba: HBA buffer is associated with + * @mp: Buffer to free + * + * Description: Frees the given DMA buffer in the appropriate way given by + * reposting it to its associated RQ so it can be reused. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) +{ + struct lpfc_rqb *rqbp; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct rqb_dmabuf *rqb_entry; + unsigned long flags; + int rc; + + if (!mp) + return; + + rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); + rqbp = rqb_entry->hrq->rqbp; + + spin_lock_irqsave(&phba->hbalock, flags); + list_del(&rqb_entry->hbuf.list); + hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); + if (rc < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6409 Cannot post to HRQ %d: %x %x %x " + "DRQ %x %x\n", + rqb_entry->hrq->queue_id, + rqb_entry->hrq->host_index, + rqb_entry->hrq->hba_index, + rqb_entry->hrq->entry_count, + rqb_entry->drq->host_index, + rqb_entry->drq->hba_index); + (rqbp->rqb_free_buffer)(phba, rqb_entry); + } else { + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } + + spin_unlock_irqrestore(&phba->hbalock, flags); +} diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h new file mode 100644 index 000000000..95d60ab5e --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nl.h @@ -0,0 +1,181 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2010 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +/* Event definitions for RegisterForEvent */ +#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */ +#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */ +#define FC_REG_CT_EVENT 0x0004 /* CT request events */ +#define FC_REG_DUMP_EVENT 0x0010 /* Dump events */ +#define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */ +#define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */ +#define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */ +#define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */ +#define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */ +#define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */ +#define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */ +#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \ + FC_REG_RSCN_EVENT | \ + FC_REG_CT_EVENT | \ + FC_REG_DUMP_EVENT | \ + FC_REG_TEMPERATURE_EVENT | \ + FC_REG_VPORTRSCN_EVENT | \ + FC_REG_ELS_EVENT | \ + FC_REG_FABRIC_EVENT | \ + FC_REG_SCSI_EVENT | \ + FC_REG_BOARD_EVENT | \ + FC_REG_ADAPTER_EVENT) +/* Temperature events */ +#define LPFC_CRIT_TEMP 0x1 +#define LPFC_THRESHOLD_TEMP 0x2 +#define LPFC_NORMAL_TEMP 0x3 +/* + * All net link event payloads will begin with and event type + * and subcategory. The event type must come first. + * The subcategory further defines the data that follows in the rest + * of the payload. Each category will have its own unique header plus + * any additional data unique to the subcategory. + * The payload sent via the fc transport is one-way driver->application. + */ + +/* RSCN event header */ +struct lpfc_rscn_event_header { + uint32_t event_type; + uint32_t payload_length; /* RSCN data length in bytes */ + uint32_t rscn_payload[]; +}; + +/* els event header */ +struct lpfc_els_event_header { + uint32_t event_type; + uint32_t subcategory; + uint8_t wwpn[8]; + uint8_t wwnn[8]; +}; + +/* subcategory codes for FC_REG_ELS_EVENT */ +#define LPFC_EVENT_PLOGI_RCV 0x01 +#define LPFC_EVENT_PRLO_RCV 0x02 +#define LPFC_EVENT_ADISC_RCV 0x04 +#define LPFC_EVENT_LSRJT_RCV 0x08 +#define LPFC_EVENT_LOGO_RCV 0x10 + +/* special els lsrjt event */ +struct lpfc_lsrjt_event { + struct lpfc_els_event_header header; + uint32_t command; + uint32_t reason_code; + uint32_t explanation; +}; + +/* special els logo event */ +struct lpfc_logo_event { + struct lpfc_els_event_header header; + uint8_t logo_wwpn[8]; +}; + +/* fabric event header */ +struct lpfc_fabric_event_header { + uint32_t event_type; + uint32_t subcategory; + uint8_t wwpn[8]; + uint8_t wwnn[8]; +}; + +/* subcategory codes for FC_REG_FABRIC_EVENT */ +#define LPFC_EVENT_FABRIC_BUSY 0x01 +#define LPFC_EVENT_PORT_BUSY 0x02 +#define LPFC_EVENT_FCPRDCHKERR 0x04 + +/* special case fabric fcprdchkerr event */ +struct lpfc_fcprdchkerr_event { + struct lpfc_fabric_event_header header; + uint32_t lun; + uint32_t opcode; + uint32_t fcpiparam; +}; + + +/* scsi event header */ +struct lpfc_scsi_event_header { + uint32_t event_type; + uint32_t subcategory; + uint32_t lun; + uint8_t wwpn[8]; + uint8_t wwnn[8]; +}; + +/* subcategory codes for FC_REG_SCSI_EVENT */ +#define LPFC_EVENT_QFULL 0x0001 +#define LPFC_EVENT_DEVBSY 0x0002 +#define LPFC_EVENT_CHECK_COND 0x0004 +#define LPFC_EVENT_LUNRESET 0x0008 +#define LPFC_EVENT_TGTRESET 0x0010 +#define LPFC_EVENT_BUSRESET 0x0020 +#define LPFC_EVENT_VARQUEDEPTH 0x0040 + +/* special case scsi varqueuedepth event */ +struct lpfc_scsi_varqueuedepth_event { + struct lpfc_scsi_event_header scsi_event; + uint32_t oldval; + uint32_t newval; +}; + +/* special case scsi check condition event */ +struct lpfc_scsi_check_condition_event { + struct lpfc_scsi_event_header scsi_event; + uint8_t opcode; + uint8_t sense_key; + uint8_t asc; + uint8_t ascq; +}; + +/* event codes for FC_REG_BOARD_EVENT */ +#define LPFC_EVENT_PORTINTERR 0x01 + +/* board event header */ +struct lpfc_board_event_header { + uint32_t event_type; + uint32_t subcategory; +}; + + +/* event codes for FC_REG_ADAPTER_EVENT */ +#define LPFC_EVENT_ARRIVAL 0x01 + +/* adapter event header */ +struct lpfc_adapter_event_header { + uint32_t event_type; + uint32_t subcategory; +}; + + +/* event codes for temp_event */ +#define LPFC_CRIT_TEMP 0x1 +#define LPFC_THRESHOLD_TEMP 0x2 +#define LPFC_NORMAL_TEMP 0x3 + +struct temp_event { + uint32_t event_type; + uint32_t event_code; + uint32_t data; +}; + diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c new file mode 100644 index 000000000..1eb7f7e60 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -0,0 +1,3153 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + + +/* Called to verify a rcv'ed ADISC was intended for us. */ +static int +lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_name *nn, struct lpfc_name *pn) +{ + /* First, we MUST have a RPI registered */ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + return 0; + + /* Compare the ADISC rsp WWNN / WWPN matches our internal node + * table entry for that node. + */ + if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) + return 0; + + if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) + return 0; + + /* we match, return success */ + return 1; +} + +int +lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct serv_parm *sp, uint32_t class, int flogi) +{ + volatile struct serv_parm *hsp = &vport->fc_sparam; + uint16_t hsp_value, ssp_value = 0; + + /* + * The receive data field size and buffer-to-buffer receive data field + * size entries are 16 bits but are represented as two 8-bit fields in + * the driver data structure to account for rsvd bits and other control + * bits. Reconstruct and compare the fields as a 16-bit values before + * correcting the byte values. + */ + if (sp->cls1.classValid) { + if (!flogi) { + hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) | + hsp->cls1.rcvDataSizeLsb); + ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) | + sp->cls1.rcvDataSizeLsb); + if (!ssp_value) + goto bad_service_param; + if (ssp_value > hsp_value) { + sp->cls1.rcvDataSizeLsb = + hsp->cls1.rcvDataSizeLsb; + sp->cls1.rcvDataSizeMsb = + hsp->cls1.rcvDataSizeMsb; + } + } + } else if (class == CLASS1) + goto bad_service_param; + if (sp->cls2.classValid) { + if (!flogi) { + hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) | + hsp->cls2.rcvDataSizeLsb); + ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) | + sp->cls2.rcvDataSizeLsb); + if (!ssp_value) + goto bad_service_param; + if (ssp_value > hsp_value) { + sp->cls2.rcvDataSizeLsb = + hsp->cls2.rcvDataSizeLsb; + sp->cls2.rcvDataSizeMsb = + hsp->cls2.rcvDataSizeMsb; + } + } + } else if (class == CLASS2) + goto bad_service_param; + if (sp->cls3.classValid) { + if (!flogi) { + hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) | + hsp->cls3.rcvDataSizeLsb); + ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) | + sp->cls3.rcvDataSizeLsb); + if (!ssp_value) + goto bad_service_param; + if (ssp_value > hsp_value) { + sp->cls3.rcvDataSizeLsb = + hsp->cls3.rcvDataSizeLsb; + sp->cls3.rcvDataSizeMsb = + hsp->cls3.rcvDataSizeMsb; + } + } + } else if (class == CLASS3) + goto bad_service_param; + + /* + * Preserve the upper four bits of the MSB from the PLOGI response. + * These bits contain the Buffer-to-Buffer State Change Number + * from the target and need to be passed to the FW. + */ + hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; + ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; + if (ssp_value > hsp_value) { + sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; + sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | + (hsp->cmn.bbRcvSizeMsb & 0x0F); + } + + memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); + memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); + return 1; +bad_service_param: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0207 Device %x " + "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " + "invalid service parameters. Ignoring device.\n", + ndlp->nlp_DID, + sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], + sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], + sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], + sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); + return 0; +} + +static void * +lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_dmabuf *pcmd, *prsp; + uint32_t *lp; + void *ptr = NULL; + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + + pcmd = cmdiocb->cmd_dmabuf; + + /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay + * freeing associated memory till after ABTS completes. + */ + if (pcmd) { + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, + list); + if (prsp) { + lp = (uint32_t *) prsp->virt; + ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); + } + } else { + /* Force ulp_status error since we are returning NULL ptr */ + if (!(ulp_status)) { + if (phba->sli_rev == LPFC_SLI_REV4) { + bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl, + IOSTAT_LOCAL_REJECT); + rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; + } else { + rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; + rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; + } + } + ptr = NULL; + } + return ptr; +} + + + +/* + * Free resources / clean up outstanding I/Os + * associated with a LPFC_NODELIST entry. This + * routine effectively results in a "software abort". + */ +void +lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +{ + LIST_HEAD(abort_list); + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *iocb, *next_iocb; + + pring = lpfc_phba_elsring(phba); + + /* In case of error recovery path, we might have a NULL pring here */ + if (unlikely(!pring)) + return; + + /* Abort outstanding I/O on NPort */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, + "2819 Abort outstanding I/O on NPort x%x " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + /* Clean up all fabric IOs first.*/ + lpfc_fabric_abort_nport(ndlp); + + /* + * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list + * of all ELS IOs that need an ABTS. The IOs need to stay on the + * txcmplq so that the abort operation completes them successfully. + */ + spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { + /* Add to abort_list on on NDLP match. */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) + list_add_tail(&iocb->dlist, &abort_list); + } + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + + /* Abort the targeted IOs and remove them from the abort list. */ + list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { + spin_lock_irq(&phba->hbalock); + list_del_init(&iocb->dlist); + lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); + spin_unlock_irq(&phba->hbalock); + } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + INIT_LIST_HEAD(&abort_list); + + /* Now process the txq */ + spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + /* Check to see if iocb matches the nport we are looking for */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { + list_del_init(&iocb->list); + list_add_tail(&iocb->list, &abort_list); + } + } + + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &abort_list, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); + + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); +} + +/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes + * @phba: pointer to lpfc hba data structure. + * @login_mbox: pointer to REG_RPI mailbox object + * + * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes + */ +static void +lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox) +{ + struct lpfc_iocbq *save_iocb; + struct lpfc_nodelist *ndlp; + MAILBOX_t *mb = &login_mbox->u.mb; + + int rc; + + ndlp = login_mbox->ctx_ndlp; + save_iocb = login_mbox->context3; + + if (mb->mbxStatus == MBX_SUCCESS) { + /* Now that REG_RPI completed successfully, + * we can now proceed with sending the PLOGI ACC. + */ + rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI, + save_iocb, ndlp, NULL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4576 PLOGI ACC fails pt2pt discovery: " + "DID %x Data: %x\n", ndlp->nlp_DID, rc); + } + } + + /* Now process the REG_RPI cmpl */ + lpfc_mbx_cmpl_reg_login(phba, login_mbox); + ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + kfree(save_iocb); +} + +static int +lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd; + uint64_t nlp_portwwn = 0; + uint32_t *lp; + union lpfc_wqe128 *wqe; + IOCB_t *icmd; + struct serv_parm *sp; + uint32_t ed_tov; + LPFC_MBOXQ_t *link_mbox; + LPFC_MBOXQ_t *login_mbox; + struct lpfc_iocbq *save_iocb; + struct ls_rjt stat; + uint32_t vid, flag; + int rc; + u32 remote_did; + + memset(&stat, 0, sizeof (struct ls_rjt)); + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint32_t *) pcmd->virt; + sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); + if (wwn_to_u64(sp->portName.u.wwn) == 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0140 PLOGI Reject: invalid pname\n"); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + return 0; + } + if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0141 PLOGI Reject: invalid nname\n"); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + return 0; + } + + nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn); + if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) { + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + return 0; + } + + if (phba->sli_rev == LPFC_SLI_REV4) + wqe = &cmdiocb->wqe; + else + icmd = &cmdiocb->iocb; + + /* PLOGI chkparm OK */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, + ndlp->nlp_rpi, vport->port_state, + vport->fc_flag); + + if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) + ndlp->nlp_fcp_info |= CLASS2; + else + ndlp->nlp_fcp_info |= CLASS3; + + ndlp->nlp_class_sup = 0; + if (sp->cls1.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS1; + if (sp->cls2.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS2; + if (sp->cls3.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS3; + if (sp->cls4.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS4; + ndlp->nlp_maxframe = + ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; + /* if already logged in, do implicit logout */ + switch (ndlp->nlp_state) { + case NLP_STE_NPR_NODE: + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + break; + fallthrough; + case NLP_STE_REG_LOGIN_ISSUE: + case NLP_STE_PRLI_ISSUE: + case NLP_STE_UNMAPPED_NODE: + case NLP_STE_MAPPED_NODE: + /* For initiators, lpfc_plogi_confirm_nport skips fabric did. + * For target mode, execute implicit logo. + * Fabric nodes go into NPR. + */ + if (!(ndlp->nlp_type & NLP_FABRIC) && + !(phba->nvmet_support)) { + /* Clear ndlp info, since follow up PRLI may have + * updated ndlp information + */ + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; + + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, + ndlp, NULL); + return 1; + } + if (nlp_portwwn != 0 && + nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0143 PLOGI recv'd from DID: x%x " + "WWPN changed: old %llx new %llx\n", + ndlp->nlp_DID, + (unsigned long long)nlp_portwwn, + (unsigned long long) + wwn_to_u64(sp->portName.u.wwn)); + + /* Notify transport of connectivity loss to trigger cleanup. */ + if (phba->nvmet_support && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) + lpfc_nvmet_invalidate_host(phba, ndlp); + + ndlp->nlp_prev_state = ndlp->nlp_state; + /* rport needs to be unregistered first */ + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + break; + } + + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; + + login_mbox = NULL; + link_mbox = NULL; + save_iocb = NULL; + + /* Check for Nport to NPort pt2pt protocol */ + if ((vport->fc_flag & FC_PT2PT) && + !(vport->fc_flag & FC_PT2PT_PLOGI)) { + /* rcv'ed PLOGI decides what our NPortId will be */ + if (phba->sli_rev == LPFC_SLI_REV4) { + vport->fc_myDID = bf_get(els_rsp64_sid, + &cmdiocb->wqe.xmit_els_rsp); + } else { + vport->fc_myDID = icmd->un.rcvels.parmRo; + } + + /* If there is an outstanding FLOGI, abort it now. + * The remote NPort is not going to ACC our FLOGI + * if its already issuing a PLOGI for pt2pt mode. + * This indicates our FLOGI was dropped; however, we + * must have ACCed the remote NPorts FLOGI to us + * to make it here. + */ + if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + lpfc_els_abort_flogi(phba); + + ed_tov = be32_to_cpu(sp->cmn.e_d_tov); + if (sp->cmn.edtovResolution) { + /* E_D_TOV ticks are in nanoseconds */ + ed_tov = (phba->fc_edtov + 999999) / 1000000; + } + + /* + * For pt-to-pt, use the larger EDTOV + * RATOV = 2 * EDTOV + */ + if (ed_tov > phba->fc_edtov) + phba->fc_edtov = ed_tov; + phba->fc_ratov = (2 * phba->fc_edtov) / 1000; + + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); + + /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4, + * to account for updated TOV's / parameters + */ + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_issue_reg_vfi(vport); + else { + link_mbox = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!link_mbox) + goto out; + lpfc_config_link(phba, link_mbox); + link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + link_mbox->vport = vport; + + /* The default completion handling for CONFIG_LINK + * does not require the ndlp so no reference is needed. + */ + link_mbox->ctx_ndlp = ndlp; + + rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(link_mbox, phba->mbox_mem_pool); + goto out; + } + } + + lpfc_can_disctmo(vport); + } + + ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && + sp->cmn.valid_vendor_ver_level) { + vid = be32_to_cpu(sp->un.vv.vid); + flag = be32_to_cpu(sp->un.vv.flags); + if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) + ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + } + + login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!login_mbox) + goto out; + + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); + if (!save_iocb) + goto out; + + /* Save info from cmd IOCB to be used in rsp after all mbox completes */ + memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, + sizeof(struct lpfc_iocbq)); + + /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_unreg_rpi(vport, ndlp); + + /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will + * always be deferring the ACC. + */ + if (phba->sli_rev == LPFC_SLI_REV4) + remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); + else + remote_did = icmd->un.rcvels.remoteID; + rc = lpfc_reg_rpi(phba, vport->vpi, remote_did, + (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); + if (rc) + goto out; + + login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + login_mbox->vport = vport; + + /* + * If there is an outstanding PLOGI issued, abort it before + * sending ACC rsp for received PLOGI. If pending plogi + * is not canceled here, the plogi will be rejected by + * remote port and will be retried. On a configuration with + * single discovery thread, this will cause a huge delay in + * discovery. Also this will cause multiple state machines + * running in parallel for this node. + * This only applies to a fabric environment. + */ + if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && + (vport->fc_flag & FC_FABRIC)) { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp); + } + + if ((vport->port_type == LPFC_NPIV_PORT && + vport->cfg_restrict_login)) { + + /* no deferred ACC */ + kfree(save_iocb); + + /* This is an NPIV SLI4 instance that does not need to register + * a default RPI. + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_mbox_rsrc_cleanup(phba, login_mbox, + MBOX_THD_UNLOCKED); + login_mbox = NULL; + } else { + /* In order to preserve RPIs, we want to cleanup + * the default RPI the firmware created to rcv + * this ELS request. The only way to do this is + * to register, then unregister the RPI. + */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | + NLP_RCV_PLOGI); + spin_unlock_irq(&ndlp->lock); + } + + stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + ndlp, login_mbox); + if (rc && login_mbox) + lpfc_mbox_rsrc_cleanup(phba, login_mbox, + MBOX_THD_UNLOCKED); + return 1; + } + + /* So the order here should be: + * SLI3 pt2pt + * Issue CONFIG_LINK mbox + * CONFIG_LINK cmpl + * SLI4 pt2pt + * Issue REG_VFI mbox + * REG_VFI cmpl + * SLI4 + * Issue UNREG RPI mbx + * UNREG RPI cmpl + * Issue REG_RPI mbox + * REG RPI cmpl + * Issue PLOGI ACC + * PLOGI ACC cmpl + */ + login_mbox->mbox_cmpl = lpfc_defer_plogi_acc; + login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!login_mbox->ctx_ndlp) + goto out; + + login_mbox->context3 = save_iocb; /* For PLOGI ACC */ + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); + spin_unlock_irq(&ndlp->lock); + + /* Start the ball rolling by issuing REG_LOGIN here */ + rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_nlp_put(ndlp); + goto out; + } + lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); + + return 1; +out: + kfree(save_iocb); + if (login_mbox) + mempool_free(login_mbox, phba->mbox_mem_pool); + + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 0; +} + +/** + * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object + * + * This routine is invoked to issue a completion to a rcv'ed + * ADISC or PDISC after the paused RPI has been resumed. + **/ +static void +lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport; + struct lpfc_iocbq *elsiocb; + struct lpfc_nodelist *ndlp; + uint32_t cmd; + + elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf; + ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp; + vport = mboxq->vport; + cmd = elsiocb->drvrTimeout; + + if (cmd == ELS_CMD_ADISC) { + lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp); + } else { + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb, + ndlp, NULL); + } + + /* This nlp_put pairs with lpfc_sli4_resume_rpi */ + lpfc_nlp_put(ndlp); + + kfree(elsiocb); + mempool_free(mboxq, phba->mbox_mem_pool); +} + +static int +lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_dmabuf *pcmd; + struct serv_parm *sp; + struct lpfc_name *pnn, *ppn; + struct ls_rjt stat; + ADISC *ap; + uint32_t *lp; + uint32_t cmd; + + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint32_t *) pcmd->virt; + + cmd = *lp++; + if (cmd == ELS_CMD_ADISC) { + ap = (ADISC *) lp; + pnn = (struct lpfc_name *) & ap->nodeName; + ppn = (struct lpfc_name *) & ap->portName; + } else { + sp = (struct serv_parm *) lp; + pnn = (struct lpfc_name *) & sp->nodeName; + ppn = (struct lpfc_name *) & sp->portName; + } + + if (get_job_ulpstatus(phba, cmdiocb) == 0 && + lpfc_check_adisc(vport, ndlp, pnn, ppn)) { + + /* + * As soon as we send ACC, the remote NPort can + * start sending us data. Thus, for SLI4 we must + * resume the RPI before the ACC goes out. + */ + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + elsiocb = kmalloc(sizeof(struct lpfc_iocbq), + GFP_KERNEL); + if (elsiocb) { + /* Save info from cmd IOCB used in rsp */ + memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb, + sizeof(struct lpfc_iocbq)); + + /* Save the ELS cmd */ + elsiocb->drvrTimeout = cmd; + + lpfc_sli4_resume_rpi(ndlp, + lpfc_mbx_cmpl_resume_rpi, elsiocb); + goto out; + } + } + + if (cmd == ELS_CMD_ADISC) { + lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); + } else { + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, + ndlp, NULL); + } +out: + /* If we are authenticated, move to the proper state. + * It is possible an ADISC arrived and the remote nport + * is already in MAPPED or UNMAPPED state. Catch this + * condition and don't set the nlp_state again because + * it causes an unnecessary transport unregister/register. + * + * Nodes marked for ADISC will move MAPPED or UNMAPPED state + * after issuing ADISC + */ + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { + if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && + !(ndlp->nlp_flag & NLP_NPR_ADISC)) + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_MAPPED_NODE); + } + + return 1; + } + /* Reject this request because invalid parameters */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + + /* 1 sec timeout */ + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + return 0; +} + +static int +lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_vport **vports; + int i, active_vlink_present = 0 ; + + /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ + /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary + * PLOGIs during LOGO storms from a device. + */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_LOGO_ACC; + spin_unlock_irq(&ndlp->lock); + if (els_cmd == ELS_CMD_PRLO) + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); + else + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + + /* This clause allows the initiator to ACC the LOGO back to the + * Fabric Domain Controller. It does deliberately skip all other + * steps because some fabrics send RDP requests after logging out + * from the initiator. + */ + if (ndlp->nlp_type & NLP_FABRIC && + ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) + return 0; + + /* Notify transport of connectivity loss to trigger cleanup. */ + if (phba->nvmet_support && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) + lpfc_nvmet_invalidate_host(phba, ndlp); + + if (ndlp->nlp_DID == Fabric_DID) { + if (vport->port_state <= LPFC_FDISC || + vport->fc_flag & FC_PT2PT) + goto out; + lpfc_linkdown_port(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_LOGO_RCVD; + spin_unlock_irq(shost->host_lock); + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; + i++) { + if ((!(vports[i]->fc_flag & + FC_VPORT_LOGO_RCVD)) && + (vports[i]->port_state > LPFC_FDISC)) { + active_vlink_present = 1; + break; + } + } + lpfc_destroy_vport_work_array(phba, vports); + } + + /* + * Don't re-instantiate if vport is marked for deletion. + * If we are here first then vport_delete is going to wait + * for discovery to complete. + */ + if (!(vport->load_flag & FC_UNLOADING) && + active_vlink_present) { + /* + * If there are other active VLinks present, + * re-instantiate the Vlink using FDISC. + */ + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_FDISC; + vport->port_state = LPFC_FDISC; + } else { + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG; + spin_unlock_irq(shost->host_lock); + lpfc_retry_pport_discovery(phba); + } + } else { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_ELS | LOG_DISCOVERY, + "3203 LOGO recover nport x%06x state x%x " + "ntype x%x fc_flag x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_type, vport->fc_flag); + + /* Special cases for rports that recover post LOGO. */ + if ((!(ndlp->nlp_type == NLP_FABRIC) && + (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) || + vport->fc_flag & FC_PT2PT)) || + (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE || + ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_ELS | LOG_DISCOVERY, + "3204 Start nlpdelay on DID x%06x " + "nflag x%x lastels x%x ref cnt %u", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_last_elscmd, + kref_read(&ndlp->kref)); + } + } +out: + /* Unregister from backend, could have been skipped due to ADISC */ + lpfc_nlp_unreg_node(vport, ndlp); + + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + /* The driver has to wait until the ACC completes before it continues + * processing the LOGO. The action will resume in + * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an + * unreg_login, the driver waits so the ACC does not get aborted. + */ + return 0; +} + +static uint32_t +lpfc_rcv_prli_support_check(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb) +{ + struct ls_rjt stat; + uint32_t *payload; + uint32_t cmd; + + payload = cmdiocb->cmd_dmabuf->virt; + cmd = *payload; + if (vport->phba->nvmet_support) { + /* Must be a NVME PRLI */ + if (cmd == ELS_CMD_PRLI) + goto out; + } else { + /* Initiator mode. */ + if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI)) + goto out; + } + return 1; +out: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC, + "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " + "state x%x flags x%x\n", + cmd, ndlp->nlp_rpi, ndlp->nlp_state, + ndlp->nlp_flag); + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED; + stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + ndlp, NULL); + return 0; +} + +static void +lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd; + uint32_t *lp; + PRLI *npr; + struct fc_rport *rport = ndlp->rport; + u32 roles; + + pcmd = cmdiocb->cmd_dmabuf; + lp = (uint32_t *)pcmd->virt; + npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t)); + + if ((npr->prliType == PRLI_FCP_TYPE) || + (npr->prliType == PRLI_NVME_TYPE)) { + if (npr->initiatorFunc) { + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_type |= NLP_FCP_INITIATOR; + if (npr->prliType == PRLI_NVME_TYPE) + ndlp->nlp_type |= NLP_NVME_INITIATOR; + } + if (npr->targetFunc) { + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->prliType == PRLI_NVME_TYPE) + ndlp->nlp_type |= NLP_NVME_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } + if (npr->Retry && ndlp->nlp_type & + (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) + ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + + if (npr->Retry && phba->nsler && + ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET)) + ndlp->nlp_nvme_info |= NLP_NVME_NSLER; + + + /* If this driver is in nvme target mode, set the ndlp's fc4 + * type to NVME provided the PRLI response claims NVME FC4 + * type. Target mode does not issue gft_id so doesn't get + * the fc4 type set until now. + */ + if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) { + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } + + /* Fabric Controllers send FCP PRLI as an initiator but should + * not get recognized as FCP type and registered with transport. + */ + if (npr->prliType == PRLI_FCP_TYPE && + !(ndlp->nlp_type & NLP_FABRIC)) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + } + if (rport) { + /* We need to update the rport role values */ + roles = FC_RPORT_ROLE_UNKNOWN; + if (ndlp->nlp_type & NLP_FCP_INITIATOR) + roles |= FC_RPORT_ROLE_FCP_INITIATOR; + if (ndlp->nlp_type & NLP_FCP_TARGET) + roles |= FC_RPORT_ROLE_FCP_TARGET; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport rolechg: role:x%x did:x%x flg:x%x", + roles, ndlp->nlp_DID, ndlp->nlp_flag); + + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_remote_port_rolechg(rport, roles); + } +} + +static uint32_t +lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + return 0; + } + + if (!(vport->fc_flag & FC_PT2PT)) { + /* Check config parameter use-adisc or FCP-2 */ + if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || + ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && + (ndlp->nlp_type & NLP_FCP_TARGET)))) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + return 1; + } + } + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + lpfc_unreg_rpi(vport, ndlp); + return 0; +} + +/** + * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. + * @phba : Pointer to lpfc_hba structure. + * @vport: Pointer to lpfc_vport structure. + * @ndlp: Pointer to lpfc_nodelist structure. + * @rpi : rpi to be release. + * + * This function will send a unreg_login mailbox command to the firmware + * to release a rpi. + **/ +static void +lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, uint16_t rpi) +{ + LPFC_MBOXQ_t *pmb; + int rc; + + /* If there is already an UNREG in progress for this ndlp, + * no need to queue up another one. + */ + if (ndlp->nlp_flag & NLP_UNREG_INP) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "1435 release_rpi SKIP UNREG x%x on " + "NPort x%x deferred x%x flg x%x " + "Data: x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_defer_did, + ndlp->nlp_flag, ndlp); + return; + } + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!pmb) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2796 mailbox memory allocation failed \n"); + else { + lpfc_unreg_login(phba, vport->vpi, rpi, pmb); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + pmb->vport = vport; + pmb->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!pmb->ctx_ndlp) { + mempool_free(pmb, phba->mbox_mem_pool); + return; + } + + if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && + (!(vport->fc_flag & FC_OFFLINE_MODE))) + ndlp->nlp_flag |= NLP_UNREG_INP; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "1437 release_rpi UNREG x%x " + "on NPort x%x flg x%x\n", + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); + + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_nlp_put(ndlp); + mempool_free(pmb, phba->mbox_mem_pool); + } + } +} + +static uint32_t +lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba; + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + uint16_t rpi; + + phba = vport->phba; + /* Release the RPI if reglogin completing */ + if (!(phba->pport->load_flag & FC_UNLOADING) && + (evt == NLP_EVT_CMPL_REG_LOGIN) && + (!pmb->u.mb.mbxStatus)) { + rpi = pmb->u.mb.un.varWords[0]; + lpfc_release_rpi(phba, vport, ndlp, rpi); + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0271 Illegal State Transition: node x%x " + "event x%x, state x%x Data: x%x x%x\n", + ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, + ndlp->nlp_flag); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + /* This transition is only legal if we previously + * rcv'ed a PLOGI. Since we don't want 2 discovery threads + * working on the same NPortID, do nothing for this thread + * to stop it. + */ + if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0272 Illegal State Transition: node x%x " + "event x%x, state x%x Data: x%x x%x\n", + ndlp->nlp_DID, evt, ndlp->nlp_state, + ndlp->nlp_rpi, ndlp->nlp_flag); + } + return ndlp->nlp_state; +} + +/* Start of Discovery State Machine routines */ + +static uint32_t +lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb; + + cmdiocb = (struct lpfc_iocbq *) arg; + + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { + return ndlp->nlp_state; + } + return NLP_STE_FREED_NODE; +} + +static uint32_t +lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + lpfc_issue_els_logo(vport, ndlp, 0); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_LOGO_ACC; + spin_unlock_irq(&ndlp->lock); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + return NLP_STE_FREED_NODE; +} + +static uint32_t +lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + return NLP_STE_FREED_NODE; +} + +static uint32_t +lpfc_device_recov_unused_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb = arg; + struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; + uint32_t *lp = (uint32_t *) pcmd->virt; + struct serv_parm *sp = (struct serv_parm *) (lp + 1); + struct ls_rjt stat; + int port_cmp; + + memset(&stat, 0, sizeof (struct ls_rjt)); + + /* For a PLOGI, we only accept if our portname is less + * than the remote portname. + */ + phba->fc_stat.elsLogiCol++; + port_cmp = memcmp(&vport->fc_portname, &sp->portName, + sizeof(struct lpfc_name)); + + if (port_cmp >= 0) { + /* Reject this request because the remote node will accept + ours */ + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + } else { + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && + (ndlp->nlp_flag & NLP_NPR_2B_DISC) && + (vport->num_disc_nodes)) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + /* Check if there are more PLOGIs to be sent */ + lpfc_more_plogi(vport); + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } + } /* If our portname was less */ + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof (struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */ + if (vport->phba->sli_rev == LPFC_SLI_REV3) + ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag; + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp); + + if (evt == NLP_EVT_RCV_LOGO) { + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + } else { + lpfc_issue_els_logo(vport, ndlp, 0); + } + + /* Put ndlp in npr state set plogi timer for 1 sec */ + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; + ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb, *rspiocb; + struct lpfc_dmabuf *pcmd, *prsp; + uint32_t *lp; + uint32_t vid, flag; + struct serv_parm *sp; + uint32_t ed_tov; + LPFC_MBOXQ_t *mbox; + int rc; + u32 ulp_status; + u32 did; + + cmdiocb = (struct lpfc_iocbq *) arg; + rspiocb = cmdiocb->rsp_iocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + /* Recovery from PLOGI collision logic */ + return ndlp->nlp_state; + } + + if (ulp_status) + goto out; + + pcmd = cmdiocb->cmd_dmabuf; + + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + lp = (uint32_t *) prsp->virt; + sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); + + /* Some switches have FDMI servers returning 0 for WWN */ + if ((ndlp->nlp_DID != FDMI_DID) && + (wwn_to_u64(sp->portName.u.wwn) == 0 || + wwn_to_u64(sp->nodeName.u.wwn) == 0)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0142 PLOGI RSP: Invalid WWN.\n"); + goto out; + } + if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0)) + goto out; + /* PLOGI chkparm OK */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) + ndlp->nlp_fcp_info |= CLASS2; + else + ndlp->nlp_fcp_info |= CLASS3; + + ndlp->nlp_class_sup = 0; + if (sp->cls1.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS1; + if (sp->cls2.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS2; + if (sp->cls3.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS3; + if (sp->cls4.classValid) + ndlp->nlp_class_sup |= FC_COS_CLASS4; + ndlp->nlp_maxframe = + ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; + + if ((vport->fc_flag & FC_PT2PT) && + (vport->fc_flag & FC_PT2PT_PLOGI)) { + ed_tov = be32_to_cpu(sp->cmn.e_d_tov); + if (sp->cmn.edtovResolution) { + /* E_D_TOV ticks are in nanoseconds */ + ed_tov = (phba->fc_edtov + 999999) / 1000000; + } + + ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && + sp->cmn.valid_vendor_ver_level) { + vid = be32_to_cpu(sp->un.vv.vid); + flag = be32_to_cpu(sp->un.vv.flags); + if ((vid == LPFC_VV_EMLX_ID) && + (flag & LPFC_VV_SUPPRESS_RSP)) + ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + } + + /* + * Use the larger EDTOV + * RATOV = 2 * EDTOV for pt-to-pt + */ + if (ed_tov > phba->fc_edtov) + phba->fc_edtov = ed_tov; + phba->fc_ratov = (2 * phba->fc_edtov) / 1000; + + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); + + /* Issue config_link / reg_vfi to account for updated TOV's */ + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_issue_reg_vfi(vport); + } else { + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "0133 PLOGI: no memory " + "for config_link " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + goto out; + } + + lpfc_config_link(phba, mbox); + + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + } + } + + lpfc_unreg_rpi(vport, ndlp); + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0018 PLOGI: no memory for reg_login " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + goto out; + } + + did = get_job_els_rsp64_did(phba, cmdiocb); + + if (lpfc_reg_rpi(phba, vport->vpi, did, + (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) { + switch (ndlp->nlp_DID) { + case NameServer_DID: + mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; + /* Fabric Controller Node needs these parameters. */ + memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm)); + break; + case FDMI_DID: + mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; + break; + default: + ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + } + + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + if (!mbox->ctx_ndlp) + goto out; + + mbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) + != MBX_NOT_FINISHED) { + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_REG_LOGIN_ISSUE); + return ndlp->nlp_state; + } + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + /* decrement node reference count to the failed mbox + * command + */ + lpfc_nlp_put(ndlp); + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0134 PLOGI: cannot issue reg_login " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + } else { + mempool_free(mbox, phba->mbox_mem_pool); + + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0135 PLOGI: cannot format reg_login " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + } + + +out: + if (ndlp->nlp_DID == NameServer_DID) { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0261 Cannot Register NameServer login\n"); + } + + /* + ** In case the node reference counter does not go to zero, ensure that + ** the stale state for the node is not processed. + */ + + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + return NLP_STE_FREED_NODE; +} + +static uint32_t +lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) +{ + struct lpfc_hba *phba; + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + MAILBOX_t *mb = &pmb->u.mb; + uint16_t rpi; + + phba = vport->phba; + /* Release the RPI */ + if (!(phba->pport->load_flag & FC_UNLOADING) && + !mb->mbxStatus) { + rpi = pmb->u.mb.un.varWords[0]; + lpfc_release_rpi(phba, vport, ndlp, rpi); + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(&ndlp->lock); + return ndlp->nlp_state; + } else { + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; + } +} + +static uint32_t +lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp); + + ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + spin_unlock_irq(&ndlp->lock); + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb; + + /* software abort outstanding ADISC */ + lpfc_els_abort(phba, ndlp); + + cmdiocb = (struct lpfc_iocbq *) arg; + + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + if (vport->num_disc_nodes) + lpfc_more_adisc(vport); + } + return ndlp->nlp_state; + } + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb; + + cmdiocb = (struct lpfc_iocbq *) arg; + + /* software abort outstanding ADISC */ + lpfc_els_abort(phba, ndlp); + + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb; + + cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_padisc(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb; + + cmdiocb = (struct lpfc_iocbq *) arg; + + /* Treat like rcv logo */ + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb, *rspiocb; + ADISC *ap; + int rc; + u32 ulp_status; + + cmdiocb = (struct lpfc_iocbq *) arg; + rspiocb = cmdiocb->rsp_iocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + + ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); + + if ((ulp_status) || + (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { + /* 1 sec timeout */ + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; + + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + lpfc_unreg_rpi(vport, ndlp); + return ndlp->nlp_state; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL); + if (rc) { + /* Stay in state and retry. */ + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + return ndlp->nlp_state; + } + } + + if (ndlp->nlp_type & NLP_FCP_TARGET) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + + if (ndlp->nlp_type & NLP_NVME_TARGET) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); + } else { + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(&ndlp->lock); + return ndlp->nlp_state; + } else { + /* software abort outstanding ADISC */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; + } +} + +static uint32_t +lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + + /* software abort outstanding ADISC */ + lpfc_els_abort(phba, ndlp); + + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + spin_unlock_irq(&ndlp->lock); + lpfc_disc_set_adisc(vport, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_plogi(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + struct ls_rjt stat; + + if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) { + return ndlp->nlp_state; + } + if (vport->phba->nvmet_support) { + /* NVME Target mode. Handle and respond to the PRLI and + * transition to UNMAPPED provided the RPI has completed + * registration. + */ + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + lpfc_rcv_prli(vport, ndlp, cmdiocb); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + } else { + /* RPI registration has not completed. Reject the PRLI + * to prevent an illegal state transition when the + * rpi registration does complete. + */ + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + ndlp, NULL); + return ndlp->nlp_state; + } + } else { + /* Initiator mode. */ + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + LPFC_MBOXQ_t *mb; + LPFC_MBOXQ_t *nextmb; + + cmdiocb = (struct lpfc_iocbq *) arg; + + /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ + if ((mb = phba->sli.mbox_active)) { + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + lpfc_nlp_put(ndlp); + mb->ctx_ndlp = NULL; + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } + } + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + lpfc_nlp_put(ndlp); + list_del(&mb->list); + phba->sli.mboxq_cnt--; + lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED); + } + } + spin_unlock_irq(&phba->hbalock); + + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_padisc(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb; + + cmdiocb = (struct lpfc_iocbq *) arg; + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + MAILBOX_t *mb = &pmb->u.mb; + uint32_t did = mb->un.varWords[1]; + + if (mb->mbxStatus) { + /* RegLogin failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0246 RegLogin failed Data: x%x x%x x%x x%x " + "x%x\n", + did, mb->mbxStatus, vport->port_state, + mb->un.varRegLogin.vpi, + mb->un.varRegLogin.rpi); + /* + * If RegLogin failed due to lack of HBA resources do not + * retry discovery. + */ + if (mb->mbxStatus == MBXERR_RPI_FULL) { + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + return ndlp->nlp_state; + } + + /* Put ndlp in npr state set plogi timer for 1 sec */ + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; + + lpfc_issue_els_logo(vport, ndlp, 0); + return ndlp->nlp_state; + } + + /* SLI4 ports have preallocated logical rpis. */ + if (phba->sli_rev < LPFC_SLI_REV4) + ndlp->nlp_rpi = mb->un.varWords[0]; + + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + + /* Only if we are not a fabric nport do we issue PRLI */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3066 RegLogin Complete on x%x x%x x%x\n", + did, ndlp->nlp_type, ndlp->nlp_fc4_type); + if (!(ndlp->nlp_type & NLP_FABRIC) && + (phba->nvmet_support == 0)) { + /* The driver supports FCP and NVME concurrently. If the + * ndlp's nlp_fc4_type is still zero, the driver doesn't + * know what PRLI to send yet. Figure that out now and + * call PRLI depending on the outcome. + */ + if (vport->fc_flag & FC_PT2PT) { + /* If we are pt2pt, there is no Fabric to determine + * the FC4 type of the remote nport. So if NVME + * is configured try it. + */ + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && + (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + /* We need to update the localport also */ + lpfc_nvme_update_localport(vport); + } + + } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + + } else if (ndlp->nlp_fc4_type == 0) { + /* If we are only configured for FCP, the driver + * should just issue PRLI for FCP. Otherwise issue + * GFT_ID to determine if remote port supports NVME. + */ + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) { + lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0, + ndlp->nlp_DID); + return ndlp->nlp_state; + } + ndlp->nlp_fc4_type = NLP_FC4_FCP; + } + + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); + if (lpfc_issue_els_prli(vport, ndlp, 0)) { + lpfc_issue_els_logo(vport, ndlp, 0); + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + } + } else { + if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) + phba->targetport->port_id = vport->fc_myDID; + + /* Only Fabric ports should transition. NVME target + * must complete PRLI. + */ + if (ndlp->nlp_type & NLP_FABRIC) { + ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(&ndlp->lock); + return ndlp->nlp_state; + } else { + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; + } +} + +static uint32_t +lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + + /* If we are a target we won't immediately transition into PRLI, + * so if REG_LOGIN already completed we don't need to ignore it. + */ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + !vport->phba->nvmet_support) + ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + spin_unlock_irq(&ndlp->lock); + lpfc_disc_set_adisc(vport, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb; + + cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_plogi(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) + return ndlp->nlp_state; + lpfc_rcv_prli(vport, ndlp, cmdiocb); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + /* Software abort outstanding PRLI before sending acc */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_padisc(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +/* This routine is envoked when we rcv a PRLO request from a nport + * we are logged into. We should send back a PRLO rsp setting the + * appropriate bits. + * NEXT STATE = PRLI_ISSUE + */ +static uint32_t +lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb, *rspiocb; + struct lpfc_hba *phba = vport->phba; + PRLI *npr; + struct lpfc_nvme_prli *nvpr; + void *temp_ptr; + u32 ulp_status; + bool acc_imode_sps = false; + + cmdiocb = (struct lpfc_iocbq *) arg; + rspiocb = cmdiocb->rsp_iocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + + /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp + * format is different so NULL the two PRLI types so that the + * driver correctly gets the correct context. + */ + npr = NULL; + nvpr = NULL; + temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); + if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ) + npr = (PRLI *) temp_ptr; + else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ) + nvpr = (struct lpfc_nvme_prli *) temp_ptr; + + if (ulp_status) { + if ((vport->port_type == LPFC_NPIV_PORT) && + vport->cfg_restrict_login) { + goto out; + } + + /* Adjust the nlp_type accordingly if the PRLI failed */ + if (npr) + ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; + if (nvpr) + ndlp->nlp_fc4_type &= ~NLP_FC4_NVME; + + /* We can't set the DSM state till BOTH PRLIs complete */ + goto out_err; + } + + if (npr && npr->prliType == PRLI_FCP_TYPE) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "6028 FCP NPR PRLI Cmpl Init %d Target %d " + "EIP %d AccCode x%x\n", + npr->initiatorFunc, npr->targetFunc, + npr->estabImagePair, npr->acceptRspCode); + + if (npr->acceptRspCode == PRLI_INV_SRV_PARM) { + /* Strict initiators don't establish an image pair. */ + if (npr->initiatorFunc && !npr->targetFunc && + !npr->estabImagePair) + acc_imode_sps = true; + } + + if (npr->acceptRspCode == PRLI_REQ_EXECUTED || acc_imode_sps) { + if (npr->initiatorFunc) + ndlp->nlp_type |= NLP_FCP_INITIATOR; + if (npr->targetFunc) { + ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } + if (npr->Retry) + ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + } + } else if (nvpr && + (bf_get_be32(prli_acc_rsp_code, nvpr) == + PRLI_REQ_EXECUTED) && + (bf_get_be32(prli_type_code, nvpr) == + PRLI_NVME_TYPE)) { + + /* Complete setting up the remote ndlp personality. */ + if (bf_get_be32(prli_init, nvpr)) + ndlp->nlp_type |= NLP_NVME_INITIATOR; + + if (phba->nsler && bf_get_be32(prli_nsler, nvpr) && + bf_get_be32(prli_conf, nvpr)) + + ndlp->nlp_nvme_info |= NLP_NVME_NSLER; + else + ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; + + /* Target driver cannot solicit NVME FB. */ + if (bf_get_be32(prli_tgt, nvpr)) { + /* Complete the nvme target roles. The transport + * needs to know if the rport is capable of + * discovery in addition to its role. + */ + ndlp->nlp_type |= NLP_NVME_TARGET; + if (bf_get_be32(prli_disc, nvpr)) + ndlp->nlp_type |= NLP_NVME_DISCOVERY; + + /* + * If prli_fba is set, the Target supports FirstBurst. + * If prli_fb_sz is 0, the FirstBurst size is unlimited, + * otherwise it defines the actual size supported by + * the NVME Target. + */ + if ((bf_get_be32(prli_fba, nvpr) == 1) && + (phba->cfg_nvme_enable_fb) && + (!phba->nvmet_support)) { + /* Both sides support FB. The target's first + * burst size is a 512 byte encoded value. + */ + ndlp->nlp_flag |= NLP_FIRSTBURST; + ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, + nvpr); + + /* Expressed in units of 512 bytes */ + if (ndlp->nvme_fb_size) + ndlp->nvme_fb_size <<= + LPFC_NVME_FB_SHIFT; + else + ndlp->nvme_fb_size = LPFC_NVME_MAX_FB; + } + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6029 NVME PRLI Cmpl w1 x%08x " + "w4 x%08x w5 x%08x flag x%x, " + "fcp_info x%x nlp_type x%x\n", + be32_to_cpu(nvpr->word1), + be32_to_cpu(nvpr->word4), + be32_to_cpu(nvpr->word5), + ndlp->nlp_flag, ndlp->nlp_fcp_info, + ndlp->nlp_type); + } + if (!(ndlp->nlp_type & NLP_FCP_TARGET) && + (vport->port_type == LPFC_NPIV_PORT) && + vport->cfg_restrict_login) { +out: + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_TARGET_REMOVE; + spin_unlock_irq(&ndlp->lock); + lpfc_issue_els_logo(vport, ndlp, 0); + + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + return ndlp->nlp_state; + } + +out_err: + /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs + * are complete. + */ + if (ndlp->fc4_prli_sent == 0) { + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); + else if (ndlp->nlp_type & + (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR)) + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } else + lpfc_printf_vlog(vport, + KERN_INFO, LOG_ELS, + "3067 PRLI's still outstanding " + "on x%06x - count %d, Pend Node Mode " + "transition...\n", + ndlp->nlp_DID, ndlp->fc4_prli_sent); + + return ndlp->nlp_state; +} + +/*! lpfc_device_rm_prli_issue + * + * \pre + * \post + * \param phba + * \param ndlp + * \param arg + * \param evt + * \return uint32_t + * + * \b Description: + * This routine is envoked when we a request to remove a nport we are in the + * process of PRLIing. We should software abort outstanding prli, unreg + * login, send a logout. We will change node state to UNUSED_NODE, put it + * on plogi list so it can be freed when LOGO completes. + * + */ + +static uint32_t +lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(&ndlp->lock); + return ndlp->nlp_state; + } else { + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; + } +} + + +/*! lpfc_device_recov_prli_issue + * + * \pre + * \post + * \param phba + * \param ndlp + * \param arg + * \param evt + * \return uint32_t + * + * \b Description: + * The routine is envoked when the state of a device is unknown, like + * during a link down. We should remove the nodelist entry from the + * unmapped list, issue a UNREG_LOGIN, do a software abort of the + * outstanding PRLI command, then free the node entry. + */ +static uint32_t +lpfc_device_recov_prli_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + + /* software abort outstanding PRLI */ + lpfc_els_abort(phba, ndlp); + + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + spin_unlock_irq(&ndlp->lock); + lpfc_disc_set_adisc(vport, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_LOGO_ACC; + spin_unlock_irq(&ndlp->lock); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + spin_unlock_irq(&ndlp->lock); + lpfc_disc_set_adisc(vport, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + /* + * DevLoss has timed out and is calling for Device Remove. + * In this case, abort the LOGO and cleanup the ndlp + */ + + lpfc_unreg_rpi(vport, ndlp); + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; +} + +static uint32_t +lpfc_device_recov_logo_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + /* + * Device Recovery events have no meaning for a node with a LOGO + * outstanding. The LOGO has to complete first and handle the + * node from that point. + */ + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_plogi(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) + return ndlp->nlp_state; + + lpfc_rcv_prli(vport, ndlp, cmdiocb); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_padisc(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_rm_unmap_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; +} + +static uint32_t +lpfc_device_recov_unmap_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); + spin_unlock_irq(&ndlp->lock); + lpfc_disc_set_adisc(vport, ndlp); + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_plogi(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) + return ndlp->nlp_state; + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_padisc(vport, ndlp, cmdiocb); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + /* flush the target */ + lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); + + /* Treat like rcv logo */ + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_recov_mapped_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) +{ + lpfc_disc_set_adisc(vport, ndlp); + + ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); + spin_unlock_irq(&ndlp->lock); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + /* Ignore PLOGI if we have an outstanding LOGO */ + if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) + return ndlp->nlp_state; + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { + lpfc_cancel_retry_delay_tmo(vport, ndlp); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); + spin_unlock_irq(&ndlp->lock); + } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + /* send PLOGI immediately, move to PLOGI issue state */ + if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + ndlp->nlp_prev_state = NLP_STE_NPR_NODE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof (struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + + if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + /* + * ADISC nodes will be handled in regular discovery path after + * receiving response from NS. + * + * For other nodes, Send PLOGI to trigger an implicit LOGO. + */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + ndlp->nlp_prev_state = NLP_STE_NPR_NODE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + lpfc_rcv_padisc(vport, ndlp, cmdiocb); + /* + * Do not start discovery if discovery is about to start + * or discovery in progress for this node. Starting discovery + * here will affect the counting of discovery threads. + */ + if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && + !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + /* + * ADISC nodes will be handled in regular discovery path after + * receiving response from NS. + * + * For other nodes, Send PLOGI to trigger an implicit LOGO. + */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + ndlp->nlp_prev_state = NLP_STE_NPR_NODE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_LOGO_ACC; + spin_unlock_irq(&ndlp->lock); + + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + + if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; + } else { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb, *rspiocb; + u32 ulp_status; + + cmdiocb = (struct lpfc_iocbq *) arg; + rspiocb = cmdiocb->rsp_iocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status) + return NLP_STE_FREED_NODE; + + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb, *rspiocb; + u32 ulp_status; + + cmdiocb = (struct lpfc_iocbq *) arg; + rspiocb = cmdiocb->rsp_iocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* For the fabric port just clear the fc flags. */ + if (ndlp->nlp_DID == Fabric_DID) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); + } + lpfc_unreg_rpi(vport, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb, *rspiocb; + u32 ulp_status; + + cmdiocb = (struct lpfc_iocbq *) arg; + rspiocb = cmdiocb->rsp_iocb; + + ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + MAILBOX_t *mb = &pmb->u.mb; + + if (!mb->mbxStatus) { + /* SLI4 ports have preallocated logical rpis. */ + if (vport->phba->sli_rev < LPFC_SLI_REV4) + ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (ndlp->nlp_flag & NLP_LOGO_ACC) { + lpfc_unreg_rpi(vport, ndlp); + } + } else { + if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; + } + } + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(&ndlp->lock); + return ndlp->nlp_state; + } + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; +} + +static uint32_t +lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + + lpfc_cancel_retry_delay_tmo(vport, ndlp); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); + spin_unlock_irq(&ndlp->lock); + return ndlp->nlp_state; +} + + +/* This next section defines the NPort Discovery State Machine */ + +/* There are 4 different double linked lists nodelist entries can reside on. + * The plogi list and adisc list are used when Link Up discovery or RSCN + * processing is needed. Each list holds the nodes that we will send PLOGI + * or ADISC on. These lists will keep track of what nodes will be effected + * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). + * The unmapped_list will contain all nodes that we have successfully logged + * into at the Fibre Channel level. The mapped_list will contain all nodes + * that are mapped FCP targets. + */ +/* + * The bind list is a list of undiscovered (potentially non-existent) nodes + * that we have saved binding information on. This information is used when + * nodes transition from the unmapped to the mapped list. + */ +/* For UNUSED_NODE state, the node has just been allocated . + * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on + * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list + * and put on the unmapped list. For ADISC processing, the node is taken off + * the ADISC list and placed on either the mapped or unmapped list (depending + * on its previous state). Once on the unmapped list, a PRLI is issued and the + * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is + * changed to UNMAPPED_NODE. If the completion indicates a mapped + * node, the node is taken off the unmapped list. The binding list is checked + * for a valid binding, or a binding is automatically assigned. If binding + * assignment is unsuccessful, the node is left on the unmapped list. If + * binding assignment is successful, the associated binding list entry (if + * any) is removed, and the node is placed on the mapped list. + */ +/* + * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped + * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers + * expire, all effected nodes will receive a DEVICE_RM event. + */ +/* + * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists + * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap + * check, additional nodes may be added or removed (via DEVICE_RM) to / from + * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, + * we will first process the ADISC list. 32 entries are processed initially and + * ADISC is initited for each one. Completions / Events for each node are + * funnelled thru the state machine. As each node finishes ADISC processing, it + * starts ADISC for any nodes waiting for ADISC processing. If no nodes are + * waiting, and the ADISC list count is identically 0, then we are done. For + * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we + * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI + * list. 32 entries are processed initially and PLOGI is initited for each one. + * Completions / Events for each node are funnelled thru the state machine. As + * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting + * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is + * indentically 0, then we are done. We have now completed discovery / RSCN + * handling. Upon completion, ALL nodes should be on either the mapped or + * unmapped lists. + */ + +static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) + (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { + /* Action routine Event Current State */ + lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ + lpfc_rcv_els_unused_node, /* RCV_PRLI */ + lpfc_rcv_logo_unused_node, /* RCV_LOGO */ + lpfc_rcv_els_unused_node, /* RCV_ADISC */ + lpfc_rcv_els_unused_node, /* RCV_PDISC */ + lpfc_rcv_els_unused_node, /* RCV_PRLO */ + lpfc_disc_illegal, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_device_rm_unused_node, /* DEVICE_RM */ + lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ + lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ + lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ + lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ + lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ + lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ + lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ + lpfc_device_rm_plogi_issue, /* DEVICE_RM */ + lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ + lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ + lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ + lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ + lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ + lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ + lpfc_disc_illegal, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_disc_illegal, /* CMPL_LOGO */ + lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ + lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_device_rm_adisc_issue, /* DEVICE_RM */ + lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ + lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ + lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ + lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ + lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ + lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ + lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_disc_illegal, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ + lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ + lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ + lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ + lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ + lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ + lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ + lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ + lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ + lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ + lpfc_disc_illegal, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_device_rm_prli_issue, /* DEVICE_RM */ + lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */ + lpfc_rcv_prli_logo_issue, /* RCV_PRLI */ + lpfc_rcv_logo_logo_issue, /* RCV_LOGO */ + lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */ + lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */ + lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */ + lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_device_rm_logo_issue, /* DEVICE_RM */ + lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ + lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ + lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ + lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ + lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ + lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ + lpfc_disc_illegal, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_disc_illegal, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_device_rm_unmap_node, /* DEVICE_RM */ + lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ + lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ + lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ + lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ + lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ + lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ + lpfc_disc_illegal, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_disc_illegal, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_disc_illegal, /* DEVICE_RM */ + lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ + + lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ + lpfc_rcv_prli_npr_node, /* RCV_PRLI */ + lpfc_rcv_logo_npr_node, /* RCV_LOGO */ + lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ + lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ + lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ + lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ + lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ + lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ + lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ + lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ + lpfc_device_rm_npr_node, /* DEVICE_RM */ + lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ +}; + +int +lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + uint32_t cur_state, rc; + uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, + uint32_t); + uint32_t got_ndlp = 0; + uint32_t data1; + + if (lpfc_nlp_get(ndlp)) + got_ndlp = 1; + + cur_state = ndlp->nlp_state; + + data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | + ((uint32_t)ndlp->nlp_type)); + /* DSM in event on NPort in state */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0211 DSM in event x%x on NPort x%x in " + "state %d rpi x%x Data: x%x x%x\n", + evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, + ndlp->nlp_flag, data1); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM in: evt:%d ste:%d did:x%x", + evt, cur_state, ndlp->nlp_DID); + + func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; + rc = (func) (vport, ndlp, arg, evt); + + /* DSM out state on NPort */ + if (got_ndlp) { + data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | + ((uint32_t)ndlp->nlp_type)); + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0212 DSM out state %d on NPort x%x " + "rpi x%x Data: x%x x%x\n", + rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, + data1); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM out: ste:%d did:x%x flg:x%x", + rc, ndlp->nlp_DID, ndlp->nlp_flag); + /* Decrement the ndlp reference count held for this function */ + lpfc_nlp_put(ndlp); + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0213 DSM out state %d on NPort free\n", rc); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM out: ste:%d did:x%x flg:x%x", + rc, 0, 0); + } + + return rc; +} diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c new file mode 100644 index 000000000..96e11a26c --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -0,0 +1,2846 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "lpfc_version.h" +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_nvme.h" +#include "lpfc_scsi.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +/* NVME initiator-based functions */ + +static struct lpfc_io_buf * +lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + int idx, int expedite); + +static void +lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); + +static struct nvme_fc_port_template lpfc_nvme_template; + +/** + * lpfc_nvme_create_queue - + * @pnvme_lport: Transport localport that LS is to be issued from + * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. + * @qsize: Size of the queue in bytes + * @handle: An opaque driver handle used in follow-up calls. + * + * Driver registers this routine to preallocate and initialize any + * internal data structures to bind the @qidx to its internal IO queues. + * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. + * + * Return value : + * 0 - Success + * -EINVAL - Unsupported input value. + * -ENOMEM - Could not alloc necessary memory + **/ +static int +lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, + unsigned int qidx, u16 qsize, + void **handle) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_nvme_qhandle *qhandle; + char *str; + + if (!pnvme_lport->private) + return -ENOMEM; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + + if (!vport || vport->load_flag & FC_UNLOADING || + vport->phba->hba_flag & HBA_IOQ_FLUSH) + return -ENODEV; + + qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); + if (qhandle == NULL) + return -ENOMEM; + + qhandle->cpu_id = raw_smp_processor_id(); + qhandle->qidx = qidx; + /* + * NVME qidx == 0 is the admin queue, so both admin queue + * and first IO queue will use MSI-X vector and associated + * EQ/CQ/WQ at index 0. After that they are sequentially assigned. + */ + if (qidx) { + str = "IO "; /* IO queue */ + qhandle->index = ((qidx - 1) % + lpfc_nvme_template.max_hw_queues); + } else { + str = "ADM"; /* Admin queue */ + qhandle->index = qidx; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6073 Binding %s HdwQueue %d (cpu %d) to " + "hdw_queue %d qhandle x%px\n", str, + qidx, qhandle->cpu_id, qhandle->index, qhandle); + *handle = (void *)qhandle; + return 0; +} + +/** + * lpfc_nvme_delete_queue - + * @pnvme_lport: Transport localport that LS is to be issued from + * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. + * @handle: An opaque driver handle from lpfc_nvme_create_queue + * + * Driver registers this routine to free + * any internal data structures to bind the @qidx to its internal + * IO queues. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, + unsigned int qidx, + void *handle) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + + if (!pnvme_lport->private) + return; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", + lport, qidx, handle); + kfree(handle); +} + +static void +lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) +{ + struct lpfc_nvme_lport *lport = localport->private; + + lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, + "6173 localport x%px delete complete\n", + lport); + + /* release any threads waiting for the unreg to complete */ + if (lport->vport->localport) + complete(lport->lport_unreg_cmp); +} + +/* lpfc_nvme_remoteport_delete + * + * @remoteport: Pointer to an nvme transport remoteport instance. + * + * This is a template downcall. NVME transport calls this function + * when it has completed the unregistration of a previously + * registered remoteport. + * + * Return value : + * None + */ +static void +lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) +{ + struct lpfc_nvme_rport *rport = remoteport->private; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + u32 fc4_xpt_flags; + + ndlp = rport->ndlp; + if (!ndlp) { + pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n", + __func__, rport, remoteport); + goto rport_err; + } + + vport = ndlp->vport; + if (!vport) { + pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n", + __func__, ndlp, ndlp->nlp_state, rport); + goto rport_err; + } + + fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD; + + /* Remove this rport from the lport's list - memory is owned by the + * transport. Remove the ndlp reference for the NVME transport before + * calling state machine to remove the node. + */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6146 remoteport delete of remoteport x%px, ndlp x%px " + "DID x%x xflags x%x\n", + remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags); + spin_lock_irq(&ndlp->lock); + + /* The register rebind might have occurred before the delete + * downcall. Guard against this race. + */ + if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) + ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); + + spin_unlock_irq(&ndlp->lock); + + /* On a devloss timeout event, one more put is executed provided the + * NVME and SCSI rport unregister requests are complete. + */ + if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags)) + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + + rport_err: + return; +} + +/** + * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request + * @phba: pointer to lpfc hba data structure. + * @axchg: pointer to exchange context for the NVME LS request + * + * This routine is used for processing an asychronously received NVME LS + * request. Any remaining validation is done and the LS is then forwarded + * to the nvme-fc transport via nvme_fc_rcv_ls_req(). + * + * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing) + * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done. + * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. + * + * Returns 0 if LS was handled and delivered to the transport + * Returns 1 if LS failed to be handled and should be dropped + */ +int +lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *axchg) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_vport *vport; + struct lpfc_nvme_rport *lpfc_rport; + struct nvme_fc_remote_port *remoteport; + struct lpfc_nvme_lport *lport; + uint32_t *payload = axchg->payload; + int rc; + + vport = axchg->ndlp->vport; + lpfc_rport = axchg->ndlp->nrport; + if (!lpfc_rport) + return -EINVAL; + + remoteport = lpfc_rport->remoteport; + if (!vport->localport || + vport->phba->hba_flag & HBA_IOQ_FLUSH) + return -EINVAL; + + lport = vport->localport->private; + if (!lport) + return -EINVAL; + + rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload, + axchg->size); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x " + "%08x %08x %08x\n", + axchg->size, rc, + *payload, *(payload+1), *(payload+2), + *(payload+3), *(payload+4), *(payload+5)); + + if (!rc) + return 0; +#endif + return 1; +} + +/** + * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME + * LS request. + * @phba: Pointer to HBA context object + * @vport: The local port that issued the LS + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * This function is the generic completion handler for NVME LS requests. + * The function updates any states and statistics, calls the transport + * ls_req done() routine, then tears down the command and buffers used + * for the LS request. + **/ +void +__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct nvmefc_ls_req *pnvme_lsreq; + struct lpfc_dmabuf *buf_ptr; + struct lpfc_nodelist *ndlp; + int status; + + pnvme_lsreq = cmdwqe->context_un.nvme_lsreq; + ndlp = cmdwqe->ndlp; + buf_ptr = cmdwqe->bpl_dmabuf; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x " + "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px " + "ndlp:x%px\n", + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, + cmdwqe->sli4_xritag, status, + (wcqe->parameter & 0xffff), + cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf, + ndlp); + + lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n", + cmdwqe->sli4_xritag, status, wcqe->parameter); + + if (buf_ptr) { + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + cmdwqe->bpl_dmabuf = NULL; + } + if (pnvme_lsreq->done) { + if (status != CQE_STATUS_SUCCESS) + status = -ENXIO; + pnvme_lsreq->done(pnvme_lsreq, status); + } else { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6046 NVMEx cmpl without done call back? " + "Data x%px DID %x Xri: %x status %x\n", + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, + cmdwqe->sli4_xritag, status); + } + if (ndlp) { + lpfc_nlp_put(ndlp); + cmdwqe->ndlp = NULL; + } + lpfc_sli_release_iocbq(phba, cmdwqe); +} + +static void +lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_vport *vport = cmdwqe->vport; + struct lpfc_nvme_lport *lport; + uint32_t status; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + + if (vport->localport) { + lport = (struct lpfc_nvme_lport *)vport->localport->private; + if (lport) { + atomic_inc(&lport->fc4NvmeLsCmpls); + if (status) { + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + atomic_inc(&lport->cmpl_ls_xb); + atomic_inc(&lport->cmpl_ls_err); + } + } + } + + __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe); +} + +static int +lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, + struct lpfc_dmabuf *inp, + struct nvmefc_ls_req *pnvme_lsreq, + void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *), + struct lpfc_nodelist *ndlp, uint32_t num_entry, + uint32_t tmo, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *genwqe; + struct ulp_bde64 *bpl; + struct ulp_bde64 bde; + int i, rc, xmit_len, first_len; + + /* Allocate buffer for command WQE */ + genwqe = lpfc_sli_get_iocbq(phba); + if (genwqe == NULL) + return 1; + + wqe = &genwqe->wqe; + /* Initialize only 64 bytes */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + + genwqe->bpl_dmabuf = bmp; + genwqe->cmd_flag |= LPFC_IO_NVME_LS; + + /* Save for completion so we can release these resources */ + genwqe->ndlp = lpfc_nlp_get(ndlp); + if (!genwqe->ndlp) { + dev_warn(&phba->pcidev->dev, + "Warning: Failed node ref, not sending LS_REQ\n"); + lpfc_sli_release_iocbq(phba, genwqe); + return 1; + } + + genwqe->context_un.nvme_lsreq = pnvme_lsreq; + /* Fill in payload, bp points to frame payload */ + + if (!tmo) + /* FC spec states we need 3 * ratov for CT requests */ + tmo = (3 * phba->fc_ratov); + + /* For this command calculate the xmit length of the request bde. */ + xmit_len = 0; + first_len = 0; + bpl = (struct ulp_bde64 *)bmp->virt; + for (i = 0; i < num_entry; i++) { + bde.tus.w = bpl[i].tus.w; + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + break; + xmit_len += bde.tus.f.bdeSize; + if (i == 0) + first_len = xmit_len; + } + + genwqe->num_bdes = num_entry; + genwqe->hba_wqidx = 0; + + /* Words 0 - 2 */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->generic.bde.tus.f.bdeSize = first_len; + wqe->generic.bde.addrLow = bpl[0].addrLow; + wqe->generic.bde.addrHigh = bpl[0].addrHigh; + + /* Word 3 */ + wqe->gen_req.request_payload_len = first_len; + + /* Word 4 */ + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); + bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); + bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); + bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); + bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo); + bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); + bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); + bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); + + /* Word 8 */ + wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); + + + /* Issue GEN REQ WQE for NPORT */ + genwqe->cmd_cmpl = cmpl; + genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; + genwqe->vport = vport; + genwqe->retry = retry; + + lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", + genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); + + rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); + if (rc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6045 Issue GEN REQ WQE to NPORT x%x " + "Data: x%x x%x rc x%x\n", + ndlp->nlp_DID, genwqe->iotag, + vport->port_state, rc); + lpfc_nlp_put(ndlp); + lpfc_sli_release_iocbq(phba, genwqe); + return 1; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS, + "6050 Issue GEN REQ WQE to NPORT x%x " + "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px " + "bmp:x%px xmit:%d 1st:%d\n", + ndlp->nlp_DID, genwqe->sli4_xritag, + vport->port_state, + genwqe, pnvme_lsreq, bmp, xmit_len, first_len); + return 0; +} + + +/** + * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request + * @vport: The local port issuing the LS + * @ndlp: The remote port to send the LS to + * @pnvme_lsreq: Pointer to LS request structure from the transport + * @gen_req_cmp: Completion call-back + * + * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST + * WQE to perform the LS operation. + * + * Return value : + * 0 - Success + * non-zero: various error codes, in form of -Exxx + **/ +int +__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct nvmefc_ls_req *pnvme_lsreq, + void (*gen_req_cmp)(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe)) +{ + struct lpfc_dmabuf *bmp; + struct ulp_bde64 *bpl; + int ret; + uint16_t ntype, nstate; + + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " + "LS Req\n", + ndlp); + return -ENODEV; + } + + ntype = ndlp->nlp_type; + nstate = ndlp->nlp_state; + if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || + (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6088 NVMEx LS REQ: Fail DID x%06x not " + "ready for IO. Type x%x, State x%x\n", + ndlp->nlp_DID, ntype, nstate); + return -ENODEV; + } + if (vport->phba->hba_flag & HBA_IOQ_FLUSH) + return -ENODEV; + + if (!vport->phba->sli4_hba.nvmels_wq) + return -ENOMEM; + + /* + * there are two dma buf in the request, actually there is one and + * the second one is just the start address + cmd size. + * Before calling lpfc_nvme_gen_req these buffers need to be wrapped + * in a lpfc_dmabuf struct. When freeing we just free the wrapper + * because the nvem layer owns the data bufs. + * We do not have to break these packets open, we don't care what is + * in them. And we do not have to look at the resonse data, we only + * care that we got a response. All of the caring is going to happen + * in the nvme-fc layer. + */ + + bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); + if (!bmp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6044 NVMEx LS REQ: Could not alloc LS buf " + "for DID %x\n", + ndlp->nlp_DID); + return -ENOMEM; + } + + bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); + if (!bmp->virt) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6042 NVMEx LS REQ: Could not alloc mbuf " + "for DID %x\n", + ndlp->nlp_DID); + kfree(bmp); + return -ENOMEM; + } + + INIT_LIST_HEAD(&bmp->list); + + bpl = (struct ulp_bde64 *)bmp->virt; + bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); + bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + + bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); + bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, " + "rqstlen:%d rsplen:%d %pad %pad\n", + ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen, + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, + &pnvme_lsreq->rspdma); + + ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, + pnvme_lsreq, gen_req_cmp, ndlp, 2, + pnvme_lsreq->timeout, 0); + if (ret != WQE_SUCCESS) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6052 NVMEx REQ: EXIT. issue ls wqe failed " + "lsreq x%px Status %x DID %x\n", + pnvme_lsreq, ret, ndlp->nlp_DID); + lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); + kfree(bmp); + return -EIO; + } + + return 0; +} + +/** + * lpfc_nvme_ls_req - Issue an NVME Link Service request + * @pnvme_lport: Transport localport that LS is to be issued from. + * @pnvme_rport: Transport remoteport that LS is to be sent to. + * @pnvme_lsreq: the transport nvme_ls_req structure for the LS + * + * Driver registers this routine to handle any link service request + * from the nvme_fc transport to a remote nvme-aware port. + * + * Return value : + * 0 - Success + * non-zero: various error codes, in form of -Exxx + **/ +static int +lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct lpfc_vport *vport; + int ret; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + rport = (struct lpfc_nvme_rport *)pnvme_rport->private; + if (unlikely(!lport) || unlikely(!rport)) + return -EINVAL; + + vport = lport->vport; + if (vport->load_flag & FC_UNLOADING || + vport->phba->hba_flag & HBA_IOQ_FLUSH) + return -ENODEV; + + atomic_inc(&lport->fc4NvmeLsRequests); + + ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq, + lpfc_nvme_ls_req_cmp); + if (ret) + atomic_inc(&lport->xmt_ls_err); + + return ret; +} + +/** + * __lpfc_nvme_ls_abort - Generic service routine to abort a prior + * NVME LS request + * @vport: The local port that issued the LS + * @ndlp: The remote port the LS was sent to + * @pnvme_lsreq: Pointer to LS request structure from the transport + * + * The driver validates the ndlp, looks for the LS, and aborts the + * LS if found. + * + * Returns: + * 0 : if LS found and aborted + * non-zero: various error conditions in form -Exxx + **/ +int +__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *wqe, *next_wqe; + bool foundit = false; + + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " + "x%06x, Failing LS Req\n", + ndlp, ndlp ? ndlp->nlp_DID : 0); + return -EINVAL; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, + "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq " + "x%px rqstlen:%d rsplen:%d %pad %pad\n", + pnvme_lsreq, pnvme_lsreq->rqstlen, + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, + &pnvme_lsreq->rspdma); + + /* + * Lock the ELS ring txcmplq and look for the wqe that matches + * this ELS. If found, issue an abort on the wqe. + */ + pring = phba->sli4_hba.nvmels_wq->pring; + spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { + if (wqe->context_un.nvme_lsreq == pnvme_lsreq) { + wqe->cmd_flag |= LPFC_DRIVER_ABORTED; + foundit = true; + break; + } + } + spin_unlock(&pring->ring_lock); + + if (foundit) + lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL); + spin_unlock_irq(&phba->hbalock); + + if (foundit) + return 0; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, + "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n", + pnvme_lsreq); + return -EINVAL; +} + +static int +lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport, + struct nvme_fc_remote_port *remoteport, + struct nvmefc_ls_rsp *ls_rsp) +{ + struct lpfc_async_xchg_ctx *axchg = + container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); + struct lpfc_nvme_lport *lport; + int rc; + + if (axchg->phba->pport->load_flag & FC_UNLOADING) + return -ENODEV; + + lport = (struct lpfc_nvme_lport *)localport->private; + + rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp); + + if (rc) { + /* + * unless the failure is due to having already sent + * the response, an abort will be generated for the + * exchange if the rsp can't be sent. + */ + if (rc != -EALREADY) + atomic_inc(&lport->xmt_ls_abort); + return rc; + } + + return 0; +} + +/** + * lpfc_nvme_ls_abort - Abort a prior NVME LS request + * @pnvme_lport: Transport localport that LS is to be issued from. + * @pnvme_rport: Transport remoteport that LS is to be sent to. + * @pnvme_lsreq: the transport nvme_ls_req structure for the LS + * + * Driver registers this routine to abort a NVME LS request that is + * in progress (from the transports perspective). + **/ +static void +lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + int ret; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + if (unlikely(!lport)) + return; + vport = lport->vport; + + if (vport->load_flag & FC_UNLOADING) + return; + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + + ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq); + if (!ret) + atomic_inc(&lport->xmt_ls_abort); +} + +/* Fix up the existing sgls for NVME IO. */ +static inline void +lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_ncmd, + struct nvmefc_fcp_req *nCmd) +{ + struct lpfc_hba *phba = vport->phba; + struct sli4_sge *sgl; + union lpfc_wqe128 *wqe; + uint32_t *wptr, *dptr; + + /* + * Get a local pointer to the built-in wqe and correct + * the cmd size to match NVME's 96 bytes and fix + * the dma address. + */ + + wqe = &lpfc_ncmd->cur_iocbq.wqe; + + /* + * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to + * match NVME. NVME sends 96 bytes. Also, use the + * nvme commands command and response dma addresses + * rather than the virtual memory to ease the restore + * operation. + */ + sgl = lpfc_ncmd->dma_sgl; + sgl->sge_len = cpu_to_le32(nCmd->cmdlen); + if (phba->cfg_nvme_embed_cmd) { + sgl->addr_hi = 0; + sgl->addr_lo = 0; + + /* Word 0-2 - NVME CMND IU (embedded payload) */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; + wqe->generic.bde.tus.f.bdeSize = 56; + wqe->generic.bde.addrHigh = 0; + wqe->generic.bde.addrLow = 64; /* Word 16 */ + + /* Word 10 - dbde is 0, wqes is 1 in template */ + + /* + * Embed the payload in the last half of the WQE + * WQE words 16-30 get the NVME CMD IU payload + * + * WQE words 16-19 get payload Words 1-4 + * WQE words 20-21 get payload Words 6-7 + * WQE words 22-29 get payload Words 16-23 + */ + wptr = &wqe->words[16]; /* WQE ptr */ + dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ + dptr++; /* Skip Word 0 in payload */ + + *wptr++ = *dptr++; /* Word 1 */ + *wptr++ = *dptr++; /* Word 2 */ + *wptr++ = *dptr++; /* Word 3 */ + *wptr++ = *dptr++; /* Word 4 */ + dptr++; /* Skip Word 5 in payload */ + *wptr++ = *dptr++; /* Word 6 */ + *wptr++ = *dptr++; /* Word 7 */ + dptr += 8; /* Skip Words 8-15 in payload */ + *wptr++ = *dptr++; /* Word 16 */ + *wptr++ = *dptr++; /* Word 17 */ + *wptr++ = *dptr++; /* Word 18 */ + *wptr++ = *dptr++; /* Word 19 */ + *wptr++ = *dptr++; /* Word 20 */ + *wptr++ = *dptr++; /* Word 21 */ + *wptr++ = *dptr++; /* Word 22 */ + *wptr = *dptr; /* Word 23 */ + } else { + sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma)); + + /* Word 0-2 - NVME CMND IU Inline BDE */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen; + wqe->generic.bde.addrHigh = sgl->addr_hi; + wqe->generic.bde.addrLow = sgl->addr_lo; + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); + bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); + } + + sgl++; + + /* Setup the physical region for the FCP RSP */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); + sgl->word2 = le32_to_cpu(sgl->word2); + if (nCmd->sg_cnt) + bf_set(lpfc_sli4_sge_last, sgl, 0); + else + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(nCmd->rsplen); +} + + +/* + * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + struct lpfc_iocbq *pwqeOut) +{ + struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf; + struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; + struct lpfc_vport *vport = pwqeIn->vport; + struct nvmefc_fcp_req *nCmd; + struct nvme_fc_ersp_iu *ep; + struct nvme_fc_cmd_iu *cp; + struct lpfc_nodelist *ndlp; + struct lpfc_nvme_fcpreq_priv *freqpriv; + struct lpfc_nvme_lport *lport; + uint32_t code, status, idx; + uint16_t cid, sqhd, data; + uint32_t *ptr; + uint32_t lat; + bool call_done = false; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int cpu; +#endif + int offline = 0; + + /* Sanity check on return of outstanding command */ + if (!lpfc_ncmd) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6071 Null lpfc_ncmd pointer. No " + "release, skip completion\n"); + return; + } + + /* Guard against abort handler being called at same time */ + spin_lock(&lpfc_ncmd->buf_lock); + + if (!lpfc_ncmd->nvmeCmd) { + spin_unlock(&lpfc_ncmd->buf_lock); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " + "nvmeCmd x%px\n", + lpfc_ncmd, lpfc_ncmd->nvmeCmd); + + /* Release the lpfc_ncmd regardless of the missing elements. */ + lpfc_release_nvme_buf(phba, lpfc_ncmd); + return; + } + nCmd = lpfc_ncmd->nvmeCmd; + status = bf_get(lpfc_wcqe_c_status, wcqe); + + idx = lpfc_ncmd->cur_iocbq.hba_wqidx; + phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; + + if (unlikely(status && vport->localport)) { + lport = (struct lpfc_nvme_lport *)vport->localport->private; + if (lport) { + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + atomic_inc(&lport->cmpl_fcp_xb); + atomic_inc(&lport->cmpl_fcp_err); + } + } + + lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + status, wcqe->parameter); + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = lpfc_ncmd->ndlp; + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6062 Ignoring NVME cmpl. No ndlp\n"); + goto out_err; + } + + code = bf_get(lpfc_wcqe_c_code, wcqe); + if (code == CQE_CODE_NVME_ERSP) { + /* For this type of CQE, we need to rebuild the rsp */ + ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; + + /* + * Get Command Id from cmd to plug into response. This + * code is not needed in the next NVME Transport drop. + */ + cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; + cid = cp->sqe.common.command_id; + + /* + * RSN is in CQE word 2 + * SQHD is in CQE Word 3 bits 15:0 + * Cmd Specific info is in CQE Word 1 + * and in CQE Word 0 bits 15:0 + */ + sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); + + /* Now lets build the NVME ERSP IU */ + ep->iu_len = cpu_to_be16(8); + ep->rsn = wcqe->parameter; + ep->xfrd_len = cpu_to_be32(nCmd->payload_length); + ep->rsvd12 = 0; + ptr = (uint32_t *)&ep->cqe.result.u64; + *ptr++ = wcqe->total_data_placed; + data = bf_get(lpfc_wcqe_c_ersp0, wcqe); + *ptr = (uint32_t)data; + ep->cqe.sq_head = sqhd; + ep->cqe.sq_id = nCmd->sqid; + ep->cqe.command_id = cid; + ep->cqe.status = 0; + + lpfc_ncmd->status = IOSTAT_SUCCESS; + lpfc_ncmd->result = 0; + nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; + nCmd->transferred_length = nCmd->payload_length; + } else { + lpfc_ncmd->status = status; + lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); + + /* For NVME, the only failure path that results in an + * IO error is when the adapter rejects it. All other + * conditions are a success case and resolved by the + * transport. + * IOSTAT_FCP_RSP_ERROR means: + * 1. Length of data received doesn't match total + * transfer length in WQE + * 2. If the RSP payload does NOT match these cases: + * a. RSP length 12/24 bytes and all zeros + * b. NVME ERSP + */ + switch (lpfc_ncmd->status) { + case IOSTAT_SUCCESS: + nCmd->transferred_length = wcqe->total_data_placed; + nCmd->rcv_rsplen = 0; + nCmd->status = 0; + break; + case IOSTAT_FCP_RSP_ERROR: + nCmd->transferred_length = wcqe->total_data_placed; + nCmd->rcv_rsplen = wcqe->parameter; + nCmd->status = 0; + + /* Get the NVME cmd details for this unique error. */ + cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; + ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; + + /* Check if this is really an ERSP */ + if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { + lpfc_ncmd->status = IOSTAT_SUCCESS; + lpfc_ncmd->result = 0; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6084 NVME FCP_ERR ERSP: " + "xri %x placed x%x opcode x%x cmd_id " + "x%x cqe_status x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); + break; + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6081 NVME Completion Protocol Error: " + "xri %x status x%x result x%x " + "placed x%x opcode x%x cmd_id x%x, " + "cqe_status x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + lpfc_ncmd->status, lpfc_ncmd->result, + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); + break; + case IOSTAT_LOCAL_REJECT: + /* Let fall through to set command final state. */ + if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NVME_IOERR, + "6032 Delay Aborted cmd x%px " + "nvme cmd x%px, xri x%x, " + "xb %d\n", + lpfc_ncmd, nCmd, + lpfc_ncmd->cur_iocbq.sli4_xritag, + bf_get(lpfc_wcqe_c_xb, wcqe)); + fallthrough; + default: +out_err: + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6072 NVME Completion Error: xri %x " + "status x%x result x%x [x%x] " + "placed x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + lpfc_ncmd->status, lpfc_ncmd->result, + wcqe->parameter, + wcqe->total_data_placed); + nCmd->transferred_length = 0; + nCmd->rcv_rsplen = 0; + nCmd->status = NVME_SC_INTERNAL; + offline = pci_channel_offline(vport->phba->pcidev); + } + } + + /* pick up SLI4 exhange busy condition */ + if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline) + lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; + + /* Update stats and complete the IO. There is + * no need for dma unprep because the nvme_transport + * owns the dma address. + */ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_ncmd->ts_cmd_start) { + lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; + lpfc_ncmd->ts_data_io = ktime_get_ns(); + phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; + lpfc_io_ktime(phba, lpfc_ncmd); + } + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { + cpu = raw_smp_processor_id(); + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); + if (lpfc_ncmd->cpu != cpu) + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_IOERR, + "6701 CPU Check cmpl: " + "cpu %d expect %d\n", + cpu, lpfc_ncmd->cpu); + } +#endif + + /* NVME targets need completion held off until the abort exchange + * completes unless the NVME Rport is getting unregistered. + */ + + if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { + freqpriv = nCmd->private; + freqpriv->nvme_buf = NULL; + lpfc_ncmd->nvmeCmd = NULL; + call_done = true; + } + spin_unlock(&lpfc_ncmd->buf_lock); + + /* Check if IO qualified for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + nCmd->io_dir == NVMEFC_FCP_READ && + nCmd->payload_length) { + /* Used when calculating average latency */ + lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; + lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); + } + + if (call_done) + nCmd->done(nCmd); + + /* Call release with XB=1 to queue the IO into the abort list. */ + lpfc_release_nvme_buf(phba, lpfc_ncmd); +} + + +/** + * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO + * @vport: pointer to a host virtual N_Port data structure + * @lpfc_ncmd: Pointer to lpfc scsi command + * @pnode: pointer to a node-list data structure + * @cstat: pointer to the control status structure + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_ncmd, + struct lpfc_nodelist *pnode, + struct lpfc_fc4_ctrl_stat *cstat) +{ + struct lpfc_hba *phba = vport->phba; + struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; + struct nvme_common_command *sqe; + struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; + union lpfc_wqe128 *wqe = &pwqeq->wqe; + uint32_t req_len; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. + */ + if (nCmd->sg_cnt) { + if (nCmd->io_dir == NVMEFC_FCP_WRITE) { + /* From the iwrite template, initialize words 7 - 11 */ + memcpy(&wqe->words[7], + &lpfc_iwrite_cmd_template.words[7], + sizeof(uint32_t) * 5); + + /* Word 4 */ + wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length; + + /* Word 5 */ + if ((phba->cfg_nvme_enable_fb) && + (pnode->nlp_flag & NLP_FIRSTBURST)) { + req_len = lpfc_ncmd->nvmeCmd->payload_length; + if (req_len < pnode->nvme_fb_size) + wqe->fcp_iwrite.initial_xfer_len = + req_len; + else + wqe->fcp_iwrite.initial_xfer_len = + pnode->nvme_fb_size; + } else { + wqe->fcp_iwrite.initial_xfer_len = 0; + } + cstat->output_requests++; + } else { + /* From the iread template, initialize words 7 - 11 */ + memcpy(&wqe->words[7], + &lpfc_iread_cmd_template.words[7], + sizeof(uint32_t) * 5); + + /* Word 4 */ + wqe->fcp_iread.total_xfer_len = nCmd->payload_length; + + /* Word 5 */ + wqe->fcp_iread.rsrvd5 = 0; + + /* For a CMF Managed port, iod must be zero'ed */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_NONE); + cstat->input_requests++; + } + } else { + /* From the icmnd template, initialize words 4 - 11 */ + memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], + sizeof(uint32_t) * 8); + cstat->control_requests++; + } + + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { + bf_set(wqe_erp, &wqe->generic.wqe_com, 1); + sqe = &((struct nvme_fc_cmd_iu *) + nCmd->cmdaddr)->sqe.common; + if (sqe->opcode == nvme_admin_async_event) + bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); + } + + /* + * Finish initializing those WQE fields that are independent + * of the nvme_cmnd request_buffer + */ + + /* Word 3 */ + bf_set(payload_offset_len, &wqe->fcp_icmd, + (nCmd->rsplen + nCmd->cmdlen)); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); + + /* Word 8 */ + wqe->generic.wqe_com.abort_tag = pwqeq->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); + + /* Word 10 */ + bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG); + + /* Words 13 14 15 are for PBDE support */ + + /* add the VMID tags as per switch response */ + if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) { + if (phba->pport->vmid_priority_tagging) { + bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, + lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid); + } else { + bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); + wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id; + } + } + + pwqeq->vport = vport; + return 0; +} + + +/** + * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO + * @vport: pointer to a host virtual N_Port data structure + * @lpfc_ncmd: Pointer to lpfc scsi command + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_ncmd) +{ + struct lpfc_hba *phba = vport->phba; + struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; + union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; + struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; + struct sli4_hybrid_sgl *sgl_xtra = NULL; + struct scatterlist *data_sg; + struct sli4_sge *first_data_sgl; + struct ulp_bde64 *bde; + dma_addr_t physaddr = 0; + uint32_t dma_len = 0; + uint32_t dma_offset = 0; + int nseg, i, j; + bool lsp_just_set = false; + + /* Fix up the command and response DMA stuff. */ + lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. + */ + if (nCmd->sg_cnt) { + /* + * Jump over the cmd and rsp SGEs. The fix routine + * has already adjusted for this. + */ + sgl += 2; + + first_data_sgl = sgl; + lpfc_ncmd->seg_cnt = nCmd->sg_cnt; + if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6058 Too many sg segments from " + "NVME Transport. Max %d, " + "nvmeIO sg_cnt %d\n", + phba->cfg_nvme_seg_cnt + 1, + lpfc_ncmd->seg_cnt); + lpfc_ncmd->seg_cnt = 0; + return 1; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single nvme command. Just run through the seg_cnt and format + * the sge's. + */ + nseg = nCmd->sg_cnt; + data_sg = nCmd->first_sgl; + + /* for tracking the segment boundaries */ + j = 2; + for (i = 0; i < nseg; i++) { + if (data_sg == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6059 dptr err %d, nseg %d\n", + i, nseg); + lpfc_ncmd->seg_cnt = 0; + return 1; + } + + sgl->word2 = 0; + if (nseg == 1) { + bf_set(lpfc_sli4_sge_last, sgl, 1); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } else { + bf_set(lpfc_sli4_sge_last, sgl, 0); + + /* expand the segment */ + if (!lsp_just_set && + !((j + 1) % phba->border_sge_num) && + ((nseg - 1) != i)) { + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq( + phba, lpfc_ncmd); + + if (unlikely(!sgl_xtra)) { + lpfc_ncmd->seg_cnt = 0; + return 1; + } + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + + } else { + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } + } + + if (!(bf_get(lpfc_sli4_sge_type, sgl) & + LPFC_SGE_TYPE_LSP)) { + if ((nseg - 1) == i) + bf_set(lpfc_sli4_sge_last, sgl, 1); + + physaddr = sg_dma_address(data_sg); + dma_len = sg_dma_len(data_sg); + sgl->addr_lo = cpu_to_le32( + putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32( + putPaddrHigh(physaddr)); + + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + data_sg = sg_next(data_sg); + + sgl++; + + lsp_just_set = false; + } else { + sgl->word2 = cpu_to_le32(sgl->word2); + + sgl->sge_len = cpu_to_le32( + phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + i = i - 1; + + lsp_just_set = true; + } + + j++; + } + + /* PBDE support for first data SGE only */ + if (nseg == 1 && phba->cfg_enable_pbde) { + /* Words 13-15 */ + bde = (struct ulp_bde64 *) + &wqe->words[13]; + bde->addrLow = first_data_sgl->addr_lo; + bde->addrHigh = first_data_sgl->addr_hi; + bde->tus.f.bdeSize = + le32_to_cpu(first_data_sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + + /* Word 11 - set PBDE bit */ + bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); + } else { + memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); + /* Word 11 - PBDE bit disabled by default template */ + } + + } else { + lpfc_ncmd->seg_cnt = 0; + + /* For this clause to be valid, the payload_length + * and sg_cnt must zero. + */ + if (nCmd->payload_length != 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6063 NVME DMA Prep Err: sg_cnt %d " + "payload_length x%x\n", + nCmd->sg_cnt, nCmd->payload_length); + return 1; + } + } + return 0; +} + +/** + * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO + * @pnvme_lport: Pointer to the driver's local port data + * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * @pnvme_fcreq: IO request from nvme fc to driver. + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + void *hw_queue_handle, + struct nvmefc_fcp_req *pnvme_fcreq) +{ + int ret = 0; + int expedite = 0; + int idx, cpu; + struct lpfc_nvme_lport *lport; + struct lpfc_fc4_ctrl_stat *cstat; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_nvme_rport *rport; + struct lpfc_nvme_qhandle *lpfc_queue_info; + struct lpfc_nvme_fcpreq_priv *freqpriv; + struct nvme_common_command *sqe; + uint64_t start = 0; +#if (IS_ENABLED(CONFIG_NVME_FC)) + u8 *uuid = NULL; + int err; + enum dma_data_direction iodir; +#endif + + /* Validate pointers. LLDD fault handling with transport does + * have timing races. + */ + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + if (unlikely(!lport)) { + ret = -EINVAL; + goto out_fail; + } + + vport = lport->vport; + + if (unlikely(!hw_queue_handle)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6117 Fail IO, NULL hw_queue_handle\n"); + atomic_inc(&lport->xmt_fcp_err); + ret = -EBUSY; + goto out_fail; + } + + phba = vport->phba; + + if ((unlikely(vport->load_flag & FC_UNLOADING)) || + phba->hba_flag & HBA_IOQ_FLUSH) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6124 Fail IO, Driver unload\n"); + atomic_inc(&lport->xmt_fcp_err); + ret = -ENODEV; + goto out_fail; + } + + freqpriv = pnvme_fcreq->private; + if (unlikely(!freqpriv)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6158 Fail IO, NULL request data\n"); + atomic_inc(&lport->xmt_fcp_err); + ret = -EINVAL; + goto out_fail; + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + start = ktime_get_ns(); +#endif + rport = (struct lpfc_nvme_rport *)pnvme_rport->private; + lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; + + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = rport->ndlp; + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, + "6053 Busy IO, ndlp not ready: rport x%px " + "ndlp x%px, DID x%06x\n", + rport, ndlp, pnvme_rport->port_id); + atomic_inc(&lport->xmt_fcp_err); + ret = -EBUSY; + goto out_fail; + } + + /* The remote node has to be a mapped target or it's an error. */ + if ((ndlp->nlp_type & NLP_NVME_TARGET) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, + "6036 Fail IO, DID x%06x not ready for " + "IO. State x%x, Type x%x Flg x%x\n", + pnvme_rport->port_id, + ndlp->nlp_state, ndlp->nlp_type, + ndlp->fc4_xpt_flags); + atomic_inc(&lport->xmt_fcp_bad_ndlp); + ret = -EBUSY; + goto out_fail; + + } + + /* Currently only NVME Keep alive commands should be expedited + * if the driver runs out of a resource. These should only be + * issued on the admin queue, qidx 0 + */ + if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { + sqe = &((struct nvme_fc_cmd_iu *) + pnvme_fcreq->cmdaddr)->sqe.common; + if (sqe->opcode == nvme_admin_keep_alive) + expedite = 1; + } + + /* Check if IO qualifies for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + pnvme_fcreq->io_dir == NVMEFC_FCP_READ && + pnvme_fcreq->payload_length) { + ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); + if (ret) { + ret = -EBUSY; + goto out_fail; + } + /* Get start time for IO latency */ + start = ktime_get_ns(); + } + + /* The node is shared with FCP IO, make sure the IO pending count does + * not exceed the programmed depth. + */ + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && + !expedite) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6174 Fail IO, ndlp qdepth exceeded: " + "idx %d DID %x pend %d qdepth %d\n", + lpfc_queue_info->index, ndlp->nlp_DID, + atomic_read(&ndlp->cmd_pending), + ndlp->cmd_qdepth); + atomic_inc(&lport->xmt_fcp_qdepth); + ret = -EBUSY; + goto out_fail1; + } + } + + /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { + idx = lpfc_queue_info->index; + } else { + cpu = raw_smp_processor_id(); + idx = phba->sli4_hba.cpu_map[cpu].hdwq; + } + + lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); + if (lpfc_ncmd == NULL) { + atomic_inc(&lport->xmt_fcp_noxri); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6065 Fail IO, driver buffer pool is empty: " + "idx %d DID %x\n", + lpfc_queue_info->index, ndlp->nlp_DID); + ret = -EBUSY; + goto out_fail1; + } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (start) { + lpfc_ncmd->ts_cmd_start = start; + lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; + } else { + lpfc_ncmd->ts_cmd_start = 0; + } +#endif + lpfc_ncmd->rx_cmd_start = start; + + /* + * Store the data needed by the driver to issue, abort, and complete + * an IO. + * Do not let the IO hang out forever. There is no midlayer issuing + * an abort so inform the FW of the maximum IO pending time. + */ + freqpriv->nvme_buf = lpfc_ncmd; + lpfc_ncmd->nvmeCmd = pnvme_fcreq; + lpfc_ncmd->ndlp = ndlp; + lpfc_ncmd->qidx = lpfc_queue_info->qidx; + +#if (IS_ENABLED(CONFIG_NVME_FC)) + /* check the necessary and sufficient condition to support VMID */ + if (lpfc_is_vmid_enabled(phba) && + (ndlp->vmid_support || + phba->pport->vmid_priority_tagging == + LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { + /* is the I/O generated by a VM, get the associated virtual */ + /* entity id */ + uuid = nvme_fc_io_getuuid(pnvme_fcreq); + + if (uuid) { + if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE) + iodir = DMA_TO_DEVICE; + else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ) + iodir = DMA_FROM_DEVICE; + else + iodir = DMA_NONE; + + err = lpfc_vmid_get_appid(vport, uuid, iodir, + (union lpfc_vmid_io_tag *) + &lpfc_ncmd->cur_iocbq.vmid_tag); + if (!err) + lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID; + } + } +#endif + + /* + * Issue the IO on the WQ indicated by index in the hw_queue_handle. + * This identfier was create in our hardware queue create callback + * routine. The driver now is dependent on the IO queue steering from + * the transport. We are trusting the upper NVME layers know which + * index to use and that they have affinitized a CPU to this hardware + * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. + */ + lpfc_ncmd->cur_iocbq.hba_wqidx = idx; + cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; + + lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); + ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); + if (ret) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6175 Fail IO, Prep DMA: " + "idx %d DID %x\n", + lpfc_queue_info->index, ndlp->nlp_DID); + atomic_inc(&lport->xmt_fcp_err); + ret = -ENOMEM; + goto out_free_nvme_buf; + } + + lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + lpfc_queue_info->index, ndlp->nlp_DID); + + ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); + if (ret) { + atomic_inc(&lport->xmt_fcp_wqerr); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6113 Fail IO, Could not issue WQE err %x " + "sid: x%x did: x%x oxid: x%x\n", + ret, vport->fc_myDID, ndlp->nlp_DID, + lpfc_ncmd->cur_iocbq.sli4_xritag); + goto out_free_nvme_buf; + } + + if (phba->cfg_xri_rebalancing) + lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_ncmd->ts_cmd_start) + lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); + + if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { + cpu = raw_smp_processor_id(); + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); + lpfc_ncmd->cpu = cpu; + if (idx != cpu) + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_IOERR, + "6702 CPU Check cmd: " + "cpu %d wq %d\n", + lpfc_ncmd->cpu, + lpfc_queue_info->index); + } +#endif + return 0; + + out_free_nvme_buf: + if (lpfc_ncmd->nvmeCmd->sg_cnt) { + if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) + cstat->output_requests--; + else + cstat->input_requests--; + } else + cstat->control_requests--; + lpfc_release_nvme_buf(phba, lpfc_ncmd); + out_fail1: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, + pnvme_fcreq->payload_length, NULL); + out_fail: + return ret; +} + +/** + * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. + * @phba: Pointer to HBA context object + * @cmdiocb: Pointer to command iocb object. + * @rspiocb: Pointer to response iocb object. + * + * This is the callback function for any NVME FCP IO that was aborted. + * + * Return value: + * None + **/ +void +lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6145 ABORT_XRI_CN completing on rpi x%x " + "original iotag x%x, abort cmd iotag x%x " + "req_tag x%x, status x%x, hwstatus x%x\n", + bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com), + get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag, + bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), + bf_get(lpfc_wcqe_c_status, abts_cmpl), + bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS + * @pnvme_lport: Pointer to the driver's local port data + * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * @pnvme_fcreq: IO request from nvme fc to driver. + * + * Driver registers this routine as its nvme request io abort handler. This + * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. This routine + * is executed asynchronously - one the target is validated as "MAPPED" and + * ready for IO, the driver issues the abort request and returns. + * + * Return value: + * None + **/ +static void +lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + void *hw_queue_handle, + struct nvmefc_fcp_req *pnvme_fcreq) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_io_buf *lpfc_nbuf; + struct lpfc_iocbq *nvmereq_wqe; + struct lpfc_nvme_fcpreq_priv *freqpriv; + unsigned long flags; + int ret_val; + + /* Validate pointers. LLDD fault handling with transport does + * have timing races. + */ + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + if (unlikely(!lport)) + return; + + vport = lport->vport; + + if (unlikely(!hw_queue_handle)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, + "6129 Fail Abort, HW Queue Handle NULL.\n"); + return; + } + + phba = vport->phba; + freqpriv = pnvme_fcreq->private; + + if (unlikely(!freqpriv)) + return; + if (vport->load_flag & FC_UNLOADING) + return; + + /* Announce entry to new IO submit field. */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, + "6002 Abort Request to rport DID x%06x " + "for nvme_fc_req x%px\n", + pnvme_rport->port_id, + pnvme_fcreq); + + lpfc_nbuf = freqpriv->nvme_buf; + if (!lpfc_nbuf) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6140 NVME IO req has no matching lpfc nvme " + "io buffer. Skipping abort req.\n"); + return; + } else if (!lpfc_nbuf->nvmeCmd) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6141 lpfc NVME IO req has no nvme_fcreq " + "io buffer. Skipping abort req.\n"); + return; + } + + /* Guard against IO completion being called at same time */ + spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock(&phba->hbalock); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_IOQ_FLUSH) { + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6139 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x\n", + phba->hba_flag); + return; + } + + nvmereq_wqe = &lpfc_nbuf->cur_iocbq; + + /* + * The lpfc_nbuf and the mapped nvme_fcreq in the driver's + * state must match the nvme_fcreq passed by the nvme + * transport. If they don't match, it is likely the driver + * has already completed the NVME IO and the nvme transport + * has not seen it yet. + */ + if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6143 NVME req mismatch: " + "lpfc_nbuf x%px nvmeCmd x%px, " + "pnvme_fcreq x%px. Skipping Abort xri x%x\n", + lpfc_nbuf, lpfc_nbuf->nvmeCmd, + pnvme_fcreq, nvmereq_wqe->sli4_xritag); + goto out_unlock; + } + + /* Don't abort IOs no longer on the pending queue. */ + if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6142 NVME IO req x%px not queued - skipping " + "abort req xri x%x\n", + pnvme_fcreq, nvmereq_wqe->sli4_xritag); + goto out_unlock; + } + + atomic_inc(&lport->xmt_fcp_abort); + lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", + nvmereq_wqe->sli4_xritag, + nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); + + /* Outstanding abort is in progress */ + if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6144 Outstanding NVME I/O Abort Request " + "still pending on nvme_fcreq x%px, " + "lpfc_ncmd x%px xri x%x\n", + pnvme_fcreq, lpfc_nbuf, + nvmereq_wqe->sli4_xritag); + goto out_unlock; + } + + ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, + lpfc_nvme_abort_fcreq_cmpl); + + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + if (ret_val != WQE_SUCCESS) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6137 Failed abts issue_wqe with status x%x " + "for nvme_fcreq x%px.\n", + ret_val, pnvme_fcreq); + return; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, + "6138 Transport Abort NVME Request Issued for " + "ox_id x%x\n", + nvmereq_wqe->sli4_xritag); + return; + +out_unlock: + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); + return; +} + +/* Declare and initialization an instance of the FC NVME template. */ +static struct nvme_fc_port_template lpfc_nvme_template = { + /* initiator-based functions */ + .localport_delete = lpfc_nvme_localport_delete, + .remoteport_delete = lpfc_nvme_remoteport_delete, + .create_queue = lpfc_nvme_create_queue, + .delete_queue = lpfc_nvme_delete_queue, + .ls_req = lpfc_nvme_ls_req, + .fcp_io = lpfc_nvme_fcp_io_submit, + .ls_abort = lpfc_nvme_ls_abort, + .fcp_abort = lpfc_nvme_fcp_abort, + .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp, + + .max_hw_queues = 1, + .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, + .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, + .dma_boundary = 0xFFFFFFFF, + + /* Sizes of additional private data for data structures. + * No use for the last two sizes at this time. + */ + .local_priv_sz = sizeof(struct lpfc_nvme_lport), + .remote_priv_sz = sizeof(struct lpfc_nvme_rport), + .lsrqst_priv_sz = 0, + .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv), +}; + +/* + * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA + * + * This routine removes a nvme buffer from head of @hdwq io_buf_list + * and returns to caller. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_nvme_buf - Success + **/ +static struct lpfc_io_buf * +lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + int idx, int expedite) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_sli4_hdw_queue *qp; + struct sli4_sge *sgl; + struct lpfc_iocbq *pwqeq; + union lpfc_wqe128 *wqe; + + lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); + + if (lpfc_ncmd) { + pwqeq = &(lpfc_ncmd->cur_iocbq); + wqe = &pwqeq->wqe; + + /* Setup key fields in buffer that may have been changed + * if other protocols used this buffer. + */ + pwqeq->cmd_flag = LPFC_IO_NVME; + pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl; + lpfc_ncmd->start_time = jiffies; + lpfc_ncmd->flags = 0; + + /* Rsp SGE will be filled in when we rcv an IO + * from the NVME Layer to be sent. + * The cmd is going to be embedded so we need a SKIP SGE. + */ + sgl = lpfc_ncmd->dma_sgl; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + /* Fill in word 3 / sgl_len during cmd submission */ + + /* Initialize 64 bytes only */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + atomic_inc(&ndlp->cmd_pending); + lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; + } + + } else { + qp = &phba->sli4_hba.hdwq[idx]; + qp->empty_io_bufs++; + } + + return lpfc_ncmd; +} + +/** + * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. + * @phba: The Hba for which this call is being executed. + * @lpfc_ncmd: The nvme buffer which is being released. + * + * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba + * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer + * and cannot be reused for at least RA_TOV amount of time if it was + * aborted. + **/ +static void +lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) +{ + struct lpfc_sli4_hdw_queue *qp; + unsigned long iflag = 0; + + if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) + atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); + + lpfc_ncmd->ndlp = NULL; + lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; + + qp = lpfc_ncmd->hdwq; + if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6310 XB release deferred for " + "ox_id x%x on reqtag x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + lpfc_ncmd->cur_iocbq.iotag); + + spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); + list_add_tail(&lpfc_ncmd->list, + &qp->lpfc_abts_io_buf_list); + qp->abts_nvme_io_bufs++; + spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); + } else + lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); +} + +/** + * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. + * @vport: the lpfc_vport instance requesting a localport. + * + * This routine is invoked to create an nvme localport instance to bind + * to the nvme_fc_transport. It is called once during driver load + * like lpfc_create_shost after all other services are initialized. + * It requires a vport, vpi, and wwns at call time. Other localport + * parameters are modified as the driver's FCID and the Fabric WWN + * are established. + * + * Return codes + * 0 - successful + * -ENOMEM - no heap memory available + * other values - from nvme registration upcall + **/ +int +lpfc_nvme_create_localport(struct lpfc_vport *vport) +{ + int ret = 0; + struct lpfc_hba *phba = vport->phba; + struct nvme_fc_port_info nfcp_info; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + + /* Initialize this localport instance. The vport wwn usage ensures + * that NPIV is accounted for. + */ + memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); + nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; + nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); + nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); + + /* We need to tell the transport layer + 1 because it takes page + * alignment into account. When space for the SGL is allocated we + * allocate + 3, one for cmd, one for rsp and one for this alignment + */ + lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; + + /* Advertise how many hw queues we support based on cfg_hdw_queue, + * which will not exceed cpu count. + */ + lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; + + if (!IS_ENABLED(CONFIG_NVME_FC)) + return ret; + + /* localport is allocated from the stack, but the registration + * call allocates heap memory as well as the private area. + */ + + ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, + &vport->phba->pcidev->dev, &localport); + if (!ret) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, + "6005 Successfully registered local " + "NVME port num %d, localP x%px, private " + "x%px, sg_seg %d\n", + localport->port_num, localport, + localport->private, + lpfc_nvme_template.max_sgl_segments); + + /* Private is our lport size declared in the template. */ + lport = (struct lpfc_nvme_lport *)localport->private; + vport->localport = localport; + lport->vport = vport; + vport->nvmei_support = 1; + + atomic_set(&lport->xmt_fcp_noxri, 0); + atomic_set(&lport->xmt_fcp_bad_ndlp, 0); + atomic_set(&lport->xmt_fcp_qdepth, 0); + atomic_set(&lport->xmt_fcp_err, 0); + atomic_set(&lport->xmt_fcp_wqerr, 0); + atomic_set(&lport->xmt_fcp_abort, 0); + atomic_set(&lport->xmt_ls_abort, 0); + atomic_set(&lport->xmt_ls_err, 0); + atomic_set(&lport->cmpl_fcp_xb, 0); + atomic_set(&lport->cmpl_fcp_err, 0); + atomic_set(&lport->cmpl_ls_xb, 0); + atomic_set(&lport->cmpl_ls_err, 0); + + atomic_set(&lport->fc4NvmeLsRequests, 0); + atomic_set(&lport->fc4NvmeLsCmpls, 0); + } + + return ret; +} + +#if (IS_ENABLED(CONFIG_NVME_FC)) +/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. + * + * The driver has to wait for the host nvme transport to callback + * indicating the localport has successfully unregistered all + * resources. Since this is an uninterruptible wait, loop every ten + * seconds and print a message indicating no progress. + * + * An uninterruptible wait is used because of the risk of transport-to- + * driver state mismatch. + */ +static void +lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, + struct lpfc_nvme_lport *lport, + struct completion *lport_unreg_cmp) +{ + u32 wait_tmo; + int ret, i, pending = 0; + struct lpfc_sli_ring *pring; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli4_hdw_queue *qp; + int abts_scsi, abts_nvme; + + /* Host transport has to clean up and confirm requiring an indefinite + * wait. Print a message if a 10 second wait expires and renew the + * wait. This is unexpected. + */ + wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); + while (true) { + ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); + if (unlikely(!ret)) { + pending = 0; + abts_scsi = 0; + abts_nvme = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + qp = &phba->sli4_hba.hdwq[i]; + if (!vport->localport || !qp || !qp->io_wq) + return; + + pring = qp->io_wq->pring; + if (!pring) + continue; + pending += pring->txcmplq_cnt; + abts_scsi += qp->abts_scsi_io_bufs; + abts_nvme += qp->abts_nvme_io_bufs; + } + if (!vport->localport || + test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || + phba->link_state == LPFC_HBA_ERROR || + vport->load_flag & FC_UNLOADING) + return; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6176 Lport x%px Localport x%px wait " + "timed out. Pending %d [%d:%d]. " + "Renewing.\n", + lport, vport->localport, pending, + abts_scsi, abts_nvme); + continue; + } + break; + } + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6177 Lport x%px Localport x%px Complete Success\n", + lport, vport->localport); +} +#endif + +/** + * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. + * @vport: pointer to a host virtual N_Port data structure + * + * This routine is invoked to destroy all lports bound to the phba. + * The lport memory was allocated by the nvme fc transport and is + * released there. This routine ensures all rports bound to the + * lport have been disconnected. + * + **/ +void +lpfc_nvme_destroy_localport(struct lpfc_vport *vport) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + int ret; + DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); + + if (vport->nvmei_support == 0) + return; + + localport = vport->localport; + if (!localport) + return; + lport = (struct lpfc_nvme_lport *)localport->private; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6011 Destroying NVME localport x%px\n", + localport); + + /* lport's rport list is clear. Unregister + * lport and release resources. + */ + lport->lport_unreg_cmp = &lport_unreg_cmp; + ret = nvme_fc_unregister_localport(localport); + + /* Wait for completion. This either blocks + * indefinitely or succeeds + */ + lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); + vport->localport = NULL; + + /* Regardless of the unregister upcall response, clear + * nvmei_support. All rports are unregistered and the + * driver will clean up. + */ + vport->nvmei_support = 0; + if (ret == 0) { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_DISC, + "6009 Unregistered lport Success\n"); + } else { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_DISC, + "6010 Unregistered lport " + "Failed, status x%x\n", + ret); + } +#endif +} + +void +lpfc_nvme_update_localport(struct lpfc_vport *vport) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + + localport = vport->localport; + if (!localport) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, + "6710 Update NVME fail. No localport\n"); + return; + } + lport = (struct lpfc_nvme_lport *)localport->private; + if (!lport) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, + "6171 Update NVME fail. localP x%px, No lport\n", + localport); + return; + } + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6012 Update NVME lport x%px did x%x\n", + localport, vport->fc_myDID); + + localport->port_id = vport->fc_myDID; + if (localport->port_id == 0) + localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; + else + localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6030 bound lport x%px to DID x%06x\n", + lport, localport->port_id); +#endif +} + +int +lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + int ret = 0; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct lpfc_nvme_rport *oldrport; + struct nvme_fc_remote_port *remote_port; + struct nvme_fc_port_info rpinfo; + struct lpfc_nodelist *prev_ndlp = NULL; + struct fc_rport *srport = ndlp->rport; + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, + "6006 Register NVME PORT. DID x%06x nlptype x%x\n", + ndlp->nlp_DID, ndlp->nlp_type); + + localport = vport->localport; + if (!localport) + return 0; + + lport = (struct lpfc_nvme_lport *)localport->private; + + /* NVME rports are not preserved across devloss. + * Just register this instance. Note, rpinfo->dev_loss_tmo + * is left 0 to indicate accept transport defaults. The + * driver communicates port role capabilities consistent + * with the PRLI response data. + */ + memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info)); + rpinfo.port_id = ndlp->nlp_DID; + if (ndlp->nlp_type & NLP_NVME_TARGET) + rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; + + if (ndlp->nlp_type & NLP_NVME_DISCOVERY) + rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; + + rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); + rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + if (srport) + rpinfo.dev_loss_tmo = srport->dev_loss_tmo; + else + rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; + + spin_lock_irq(&ndlp->lock); + + /* If an oldrport exists, so does the ndlp reference. If not + * a new reference is needed because either the node has never + * been registered or it's been unregistered and getting deleted. + */ + oldrport = lpfc_ndlp_get_nrport(ndlp); + if (oldrport) { + prev_ndlp = oldrport->ndlp; + spin_unlock_irq(&ndlp->lock); + } else { + spin_unlock_irq(&ndlp->lock); + if (!lpfc_nlp_get(ndlp)) { + dev_warn(&vport->phba->pcidev->dev, + "Warning - No node ref - exit register\n"); + return 0; + } + } + + ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); + if (!ret) { + /* If the ndlp already has an nrport, this is just + * a resume of the existing rport. Else this is a + * new rport. + */ + /* Guard against an unregister/reregister + * race that leaves the WAIT flag set. + */ + spin_lock_irq(&ndlp->lock); + ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; + ndlp->fc4_xpt_flags |= NVME_XPT_REGD; + spin_unlock_irq(&ndlp->lock); + rport = remote_port->private; + if (oldrport) { + + /* Sever the ndlp<->rport association + * before dropping the ndlp ref from + * register. + */ + spin_lock_irq(&ndlp->lock); + ndlp->nrport = NULL; + ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; + spin_unlock_irq(&ndlp->lock); + rport->ndlp = NULL; + rport->remoteport = NULL; + + /* Reference only removed if previous NDLP is no longer + * active. It might be just a swap and removing the + * reference would cause a premature cleanup. + */ + if (prev_ndlp && prev_ndlp != ndlp) { + if (!prev_ndlp->nrport) + lpfc_nlp_put(prev_ndlp); + } + } + + /* Clean bind the rport to the ndlp. */ + rport->remoteport = remote_port; + rport->lport = lport; + rport->ndlp = ndlp; + spin_lock_irq(&ndlp->lock); + ndlp->nrport = rport; + spin_unlock_irq(&ndlp->lock); + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NVME_DISC | LOG_NODE, + "6022 Bind lport x%px to remoteport x%px " + "rport x%px WWNN 0x%llx, " + "Rport WWPN 0x%llx DID " + "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", + lport, remote_port, rport, + rpinfo.node_name, rpinfo.port_name, + rpinfo.port_id, rpinfo.port_role, + ndlp, prev_ndlp); + } else { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, + "6031 RemotePort Registration failed " + "err: %d, DID x%06x ref %u\n", + ret, ndlp->nlp_DID, kref_read(&ndlp->kref)); + lpfc_nlp_put(ndlp); + } + + return ret; +#else + return 0; +#endif +} + +/* + * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport + * + * If the ndlp represents an NVME Target, that we are logged into, + * ping the NVME FC Transport layer to initiate a device rescan + * on this remote NPort. + */ +void +lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_nvme_rport *nrport; + struct nvme_fc_remote_port *remoteport = NULL; + + spin_lock_irq(&ndlp->lock); + nrport = lpfc_ndlp_get_nrport(ndlp); + if (nrport) + remoteport = nrport->remoteport; + spin_unlock_irq(&ndlp->lock); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6170 Rescan NPort DID x%06x type x%x " + "state x%x nrport x%px remoteport x%px\n", + ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, + nrport, remoteport); + + if (!nrport || !remoteport) + goto rescan_exit; + + /* Rescan an NVME target in MAPPED state with DISCOVERY role set */ + if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && + ndlp->nlp_state == NLP_STE_MAPPED_NODE) { + nvme_fc_rescan_remoteport(remoteport); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6172 NVME rescanned DID x%06x " + "port_state x%x\n", + ndlp->nlp_DID, remoteport->port_state); + } + return; + rescan_exit: + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6169 Skip NVME Rport Rescan, NVME remoteport " + "unregistered\n"); +#endif +} + +/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. + * + * There is no notion of Devloss or rport recovery from the current + * nvme_transport perspective. Loss of an rport just means IO cannot + * be sent and recovery is completely up to the initator. + * For now, the driver just unbinds the DID and port_role so that + * no further IO can be issued. + */ +void +lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + int ret; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *remoteport = NULL; + + localport = vport->localport; + + /* This is fundamental error. The localport is always + * available until driver unload. Just exit. + */ + if (!localport) + return; + + lport = (struct lpfc_nvme_lport *)localport->private; + if (!lport) + goto input_err; + + spin_lock_irq(&ndlp->lock); + rport = lpfc_ndlp_get_nrport(ndlp); + if (rport) + remoteport = rport->remoteport; + spin_unlock_irq(&ndlp->lock); + if (!remoteport) + goto input_err; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6033 Unreg nvme remoteport x%px, portname x%llx, " + "port_id x%06x, portstate x%x port type x%x " + "refcnt %d\n", + remoteport, remoteport->port_name, + remoteport->port_id, remoteport->port_state, + ndlp->nlp_type, kref_read(&ndlp->kref)); + + /* Sanity check ndlp type. Only call for NVME ports. Don't + * clear any rport state until the transport calls back. + */ + + if (ndlp->nlp_type & NLP_NVME_TARGET) { + /* No concern about the role change on the nvme remoteport. + * The transport will update it. + */ + spin_lock_irq(&vport->phba->hbalock); + ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; + spin_unlock_irq(&vport->phba->hbalock); + + /* Don't let the host nvme transport keep sending keep-alives + * on this remoteport. Vport is unloading, no recovery. The + * return values is ignored. The upcall is a courtesy to the + * transport. + */ + if (vport->load_flag & FC_UNLOADING || + unlikely(vport->phba->link_state == LPFC_HBA_ERROR)) + (void)nvme_fc_set_remoteport_devloss(remoteport, 0); + + ret = nvme_fc_unregister_remoteport(remoteport); + + /* The driver no longer knows if the nrport memory is valid. + * because the controller teardown process has begun and + * is asynchronous. Break the binding in the ndlp. Also + * remove the register ndlp reference to setup node release. + */ + ndlp->nrport = NULL; + lpfc_nlp_put(ndlp); + if (ret != 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6167 NVME unregister failed %d " + "port_state x%x\n", + ret, remoteport->port_state); + + if (vport->load_flag & FC_UNLOADING) { + /* Only 1 thread can drop the initial node + * reference. Check if another thread has set + * NLP_DROPPED. + */ + spin_lock_irq(&ndlp->lock); + if (!(ndlp->nlp_flag & NLP_DROPPED)) { + ndlp->nlp_flag |= NLP_DROPPED; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); + return; + } + spin_unlock_irq(&ndlp->lock); + } + } + } + return; + + input_err: +#endif + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6168 State error: lport x%px, rport x%px FCID x%06x\n", + vport->localport, ndlp->rport, ndlp->nlp_DID); +} + +/** + * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort + * @phba: pointer to lpfc hba data structure. + * @lpfc_ncmd: The nvme job structure for the request being aborted. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * NVME aborted xri. Aborted NVME IO commands are completed to the transport + * here. + **/ +void +lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_ncmd) +{ + struct nvmefc_fcp_req *nvme_cmd = NULL; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6533 %s nvme_cmd %p tag x%x abort complete and " + "xri released\n", __func__, + lpfc_ncmd->nvmeCmd, + lpfc_ncmd->cur_iocbq.iotag); + + /* Aborted NVME commands are required to not complete + * before the abort exchange command fully completes. + * Once completed, it is available via the put list. + */ + if (lpfc_ncmd->nvmeCmd) { + nvme_cmd = lpfc_ncmd->nvmeCmd; + nvme_cmd->transferred_length = 0; + nvme_cmd->rcv_rsplen = 0; + nvme_cmd->status = NVME_SC_INTERNAL; + nvme_cmd->done(nvme_cmd); + lpfc_ncmd->nvmeCmd = NULL; + } + lpfc_release_nvme_buf(phba, lpfc_ncmd); +} + +/** + * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the fcp xri abort wcqe structure. + * @lpfc_ncmd: The nvme job structure for the request being aborted. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * NVME aborted xri. Aborted NVME IO commands are completed to the transport + * here. + **/ +void +lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri, + struct lpfc_io_buf *lpfc_ncmd) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + struct nvmefc_fcp_req *nvme_cmd = NULL; + struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; + + + if (ndlp) + lpfc_sli4_abts_err_handler(phba, ndlp, axri); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6311 nvme_cmd %p xri x%x tag x%x abort complete and " + "xri released\n", + lpfc_ncmd->nvmeCmd, xri, + lpfc_ncmd->cur_iocbq.iotag); + + /* Aborted NVME commands are required to not complete + * before the abort exchange command fully completes. + * Once completed, it is available via the put list. + */ + if (lpfc_ncmd->nvmeCmd) { + nvme_cmd = lpfc_ncmd->nvmeCmd; + nvme_cmd->done(nvme_cmd); + lpfc_ncmd->nvmeCmd = NULL; + } + lpfc_release_nvme_buf(phba, lpfc_ncmd); +} + +/** + * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete + * @phba: Pointer to HBA context object. + * + * This function flushes all wqes in the nvme rings and frees all resources + * in the txcmplq. This function does not issue abort wqes for the IO + * commands in txcmplq, they will just be returned with + * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI + * slot has been permanently disabled. + **/ +void +lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + u32 i, wait_cnt = 0; + + if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) + return; + + /* Cycle through all IO rings and make sure all outstanding + * WQEs have been removed from the txcmplqs. + */ + for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (!phba->sli4_hba.hdwq[i].io_wq) + continue; + pring = phba->sli4_hba.hdwq[i].io_wq->pring; + + if (!pring) + continue; + + /* Retrieve everything on the txcmplq */ + while (!list_empty(&pring->txcmplq)) { + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); + wait_cnt++; + + /* The sleep is 10mS. Every ten seconds, + * dump a message. Something is wrong. + */ + if ((wait_cnt % 1000) == 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6178 NVME IO not empty, " + "cnt %d\n", wait_cnt); + } + } + } + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + +} + +void +lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + uint32_t stat, uint32_t param) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_io_buf *lpfc_ncmd; + struct nvmefc_fcp_req *nCmd; + struct lpfc_wcqe_complete wcqe; + struct lpfc_wcqe_complete *wcqep = &wcqe; + + lpfc_ncmd = pwqeIn->io_buf; + if (!lpfc_ncmd) { + lpfc_sli_release_iocbq(phba, pwqeIn); + return; + } + /* For abort iocb just return, IO iocb will do a done call */ + if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == + CMD_ABORT_XRI_CX) { + lpfc_sli_release_iocbq(phba, pwqeIn); + return; + } + + spin_lock(&lpfc_ncmd->buf_lock); + nCmd = lpfc_ncmd->nvmeCmd; + if (!nCmd) { + spin_unlock(&lpfc_ncmd->buf_lock); + lpfc_release_nvme_buf(phba, lpfc_ncmd); + return; + } + spin_unlock(&lpfc_ncmd->buf_lock); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6194 NVME Cancel xri %x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag); + + wcqep->word0 = 0; + bf_set(lpfc_wcqe_c_status, wcqep, stat); + wcqep->parameter = param; + wcqep->total_data_placed = 0; + wcqep->word3 = 0; /* xb is 0 */ + + /* Call release with XB=1 to queue the IO into the abort list. */ + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + bf_set(lpfc_wcqe_c_xb, wcqep, 1); + + memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep)); + (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); +#endif +} diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h new file mode 100644 index 000000000..733c27794 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -0,0 +1,253 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ + +#include +#include +#include + +#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */ + +#define LPFC_NVME_ERSP_LEN 0x20 + +#define LPFC_NVME_WAIT_TMO 10 +#define LPFC_NVME_EXPEDITE_XRICNT 8 +#define LPFC_NVME_FB_SHIFT 9 +#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */ + +#define lpfc_ndlp_get_nrport(ndlp) \ + ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\ + ? NULL : ndlp->nrport) + +struct lpfc_nvme_qhandle { + uint32_t index; /* WQ index to use */ + uint32_t qidx; /* queue index passed to create */ + uint32_t cpu_id; /* current cpu id at time of create */ +}; + +/* Declare nvme-based local and remote port definitions. */ +struct lpfc_nvme_lport { + struct lpfc_vport *vport; + struct completion *lport_unreg_cmp; + /* Add stats counters here */ + atomic_t fc4NvmeLsRequests; + atomic_t fc4NvmeLsCmpls; + atomic_t xmt_fcp_noxri; + atomic_t xmt_fcp_bad_ndlp; + atomic_t xmt_fcp_qdepth; + atomic_t xmt_fcp_wqerr; + atomic_t xmt_fcp_err; + atomic_t xmt_fcp_abort; + atomic_t xmt_ls_abort; + atomic_t xmt_ls_err; + atomic_t cmpl_fcp_xb; + atomic_t cmpl_fcp_err; + atomic_t cmpl_ls_xb; + atomic_t cmpl_ls_err; +}; + +struct lpfc_nvme_rport { + struct lpfc_nvme_lport *lport; + struct nvme_fc_remote_port *remoteport; + struct lpfc_nodelist *ndlp; + struct completion rport_unreg_done; +}; + +struct lpfc_nvme_fcpreq_priv { + struct lpfc_io_buf *nvme_buf; +}; + +/* + * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV + * set by the spec, which appears to have issues with some devices. + */ +#define LPFC_NVME_LS_TIMEOUT 30 + + +#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ +#define LPFC_NVMET_RQE_MIN_POST 128 +#define LPFC_NVMET_RQE_DEF_POST 512 +#define LPFC_NVMET_RQE_DEF_COUNT 2048 +#define LPFC_NVMET_SUCCESS_LEN 12 + +#define LPFC_NVMET_MRQ_AUTO 0 +#define LPFC_NVMET_MRQ_MAX 16 + +#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC) + +/* Used for NVME Target */ +#define LPFC_NVMET_INV_HOST_ACTIVE 1 + +struct lpfc_nvmet_tgtport { + struct lpfc_hba *phba; + struct completion *tport_unreg_cmp; + atomic_t state; /* tracks nvmet hosthandle invalidation */ + + /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ + atomic_t rcv_ls_req_in; + atomic_t rcv_ls_req_out; + atomic_t rcv_ls_req_drop; + atomic_t xmt_ls_abort; + atomic_t xmt_ls_abort_cmpl; + + /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ + atomic_t xmt_ls_rsp; + atomic_t xmt_ls_drop; + + /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */ + atomic_t xmt_ls_rsp_error; + atomic_t xmt_ls_rsp_aborted; + atomic_t xmt_ls_rsp_xb_set; + atomic_t xmt_ls_rsp_cmpl; + + /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */ + atomic_t rcv_fcp_cmd_in; + atomic_t rcv_fcp_cmd_out; + atomic_t rcv_fcp_cmd_drop; + atomic_t rcv_fcp_cmd_defer; + atomic_t xmt_fcp_release; + + /* Stats counters - lpfc_nvmet_xmt_fcp_op */ + atomic_t xmt_fcp_drop; + atomic_t xmt_fcp_read_rsp; + atomic_t xmt_fcp_read; + atomic_t xmt_fcp_write; + atomic_t xmt_fcp_rsp; + + /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */ + atomic_t xmt_fcp_rsp_xb_set; + atomic_t xmt_fcp_rsp_cmpl; + atomic_t xmt_fcp_rsp_error; + atomic_t xmt_fcp_rsp_aborted; + atomic_t xmt_fcp_rsp_drop; + + /* Stats counters - lpfc_nvmet_xmt_fcp_abort */ + atomic_t xmt_fcp_xri_abort_cqe; + atomic_t xmt_fcp_abort; + atomic_t xmt_fcp_abort_cmpl; + atomic_t xmt_abort_sol; + atomic_t xmt_abort_unsol; + atomic_t xmt_abort_rsp; + atomic_t xmt_abort_rsp_error; + + /* Stats counters - defer IO */ + atomic_t defer_ctx; + atomic_t defer_fod; + atomic_t defer_wqfull; +}; + +struct lpfc_nvmet_ctx_info { + struct list_head nvmet_ctx_list; + spinlock_t nvmet_ctx_list_lock; /* lock per CPU */ + struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu; + struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu; + uint16_t nvmet_ctx_list_cnt; + char pad[16]; /* pad to a cache-line */ +}; + +/* This retrieves the context info associated with the specified cpu / mrq */ +#define lpfc_get_ctx_list(phba, cpu, mrq) \ + (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq)) + +/* Values for state field of struct lpfc_async_xchg_ctx */ +#define LPFC_NVME_STE_LS_RCV 1 +#define LPFC_NVME_STE_LS_ABORT 2 +#define LPFC_NVME_STE_LS_RSP 3 +#define LPFC_NVME_STE_RCV 4 +#define LPFC_NVME_STE_DATA 5 +#define LPFC_NVME_STE_ABORT 6 +#define LPFC_NVME_STE_DONE 7 +#define LPFC_NVME_STE_FREE 0xff + +/* Values for flag field of struct lpfc_async_xchg_ctx */ +#define LPFC_NVME_IO_INP 0x1 /* IO is in progress on exchange */ +#define LPFC_NVME_ABORT_OP 0x2 /* Abort WQE issued on exchange */ +#define LPFC_NVME_XBUSY 0x4 /* XB bit set on IO cmpl */ +#define LPFC_NVME_CTX_RLS 0x8 /* ctx free requested */ +#define LPFC_NVME_ABTS_RCV 0x10 /* ABTS received on exchange */ +#define LPFC_NVME_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */ +#define LPFC_NVME_DEFER_WQFULL 0x40 /* Waiting on a free WQE */ +#define LPFC_NVME_TNOTIFY 0x80 /* notify transport of abts */ + +struct lpfc_async_xchg_ctx { + union { + struct nvmefc_tgt_fcp_req fcp_req; + } hdlrctx; + struct list_head list; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + struct nvmefc_ls_req *ls_req; + struct nvmefc_ls_rsp ls_rsp; + struct lpfc_iocbq *wqeq; + struct lpfc_iocbq *abort_wqeq; + spinlock_t ctxlock; /* protect flag access */ + uint32_t sid; + uint32_t offset; + uint16_t oxid; + uint16_t size; + uint16_t entry_cnt; + uint16_t cpu; + uint16_t idx; + uint16_t state; + uint16_t flag; + void *payload; + struct rqb_dmabuf *rqb_buffer; + struct lpfc_nvmet_ctxbuf *ctxbuf; + struct lpfc_sli4_hdw_queue *hdwq; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t ts_isr_cmd; + uint64_t ts_cmd_nvme; + uint64_t ts_nvme_data; + uint64_t ts_data_wqput; + uint64_t ts_isr_data; + uint64_t ts_data_nvme; + uint64_t ts_nvme_status; + uint64_t ts_status_wqput; + uint64_t ts_isr_status; + uint64_t ts_status_nvme; +#endif +}; + + +/* routines found in lpfc_nvme.c */ +int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct nvmefc_ls_req *pnvme_lsreq, + void (*gen_req_cmp)(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe)); +void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe); +int __lpfc_nvme_ls_abort(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq); + +/* routines found in lpfc_nvmet.c */ +int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, + uint16_t xri); +int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, + struct nvmefc_ls_rsp *ls_rsp, + void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe)); +void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe); diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c new file mode 100644 index 000000000..425328d9c --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -0,0 +1,3642 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "lpfc_version.h" +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, + struct lpfc_async_xchg_ctx *, + dma_addr_t rspbuf, + uint16_t rspsize); +static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, + struct lpfc_async_xchg_ctx *); +static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, + struct lpfc_async_xchg_ctx *, + uint32_t, uint16_t); +static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, + struct lpfc_async_xchg_ctx *, + uint32_t, uint16_t); +static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_async_xchg_ctx *); +static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *); + +static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf); + +static union lpfc_wqe128 lpfc_tsend_cmd_template; +static union lpfc_wqe128 lpfc_treceive_cmd_template; +static union lpfc_wqe128 lpfc_trsp_cmd_template; + +/* Setup WQE templates for NVME IOs */ +void +lpfc_nvmet_cmd_template(void) +{ + union lpfc_wqe128 *wqe; + + /* TSEND template */ + wqe = &lpfc_tsend_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 - payload_offset_len is zero */ + + /* Word 4 - relative_offset is variable */ + + /* Word 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 - wqe_ar is variable */ + bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); + bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); + bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag, rcvoxid is variable */ + + /* Word 10 - wqes, xc is variable */ + bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); + bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12); + + /* Word 11 - sup, irsp, irsplen is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND); + bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0); + + /* Word 12 - fcp_data_len is variable */ + + /* Word 13, 14, 15 - PBDE is zero */ + + /* TRECEIVE template */ + wqe = &lpfc_treceive_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 */ + wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; + + /* Word 4 - relative_offset is variable */ + + /* Word 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE); + bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF); + bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI); + bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag, rcvoxid is variable */ + + /* Word 10 - xc is variable */ + bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG); + bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); + + /* Word 11 - pbde is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE); + bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1); + + /* Word 12 - fcp_data_len is variable */ + + /* Word 13, 14, 15 - PBDE is variable */ + + /* TRSP template */ + wqe = &lpfc_trsp_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 - response_len is variable */ + + /* Word 4, 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); + bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED); + bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI); + bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */ + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag is variable */ + + /* Word 10 wqes, xc is variable */ + bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG); + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE); + bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3); + + /* Word 11 irsp, irsplen is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP); + bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0); + + /* Word 12, 13, 14, 15 - is zero */ +} + +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) +static struct lpfc_async_xchg_ctx * +lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) +{ + struct lpfc_async_xchg_ctx *ctxp; + unsigned long iflag; + bool found = false; + + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { + if (ctxp->ctxbuf->sglq->sli4_xritag != xri) + continue; + + found = true; + break; + } + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); + if (found) + return ctxp; + + return NULL; +} + +static struct lpfc_async_xchg_ctx * +lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) +{ + struct lpfc_async_xchg_ctx *ctxp; + unsigned long iflag; + bool found = false; + + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { + if (ctxp->oxid != oxid || ctxp->sid != sid) + continue; + + found = true; + break; + } + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); + if (found) + return ctxp; + + return NULL; +} +#endif + +static void +lpfc_nvmet_defer_release(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp) +{ + lockdep_assert_held(&ctxp->ctxlock); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6313 NVMET Defer ctx release oxid x%x flg x%x\n", + ctxp->oxid, ctxp->flag); + + if (ctxp->flag & LPFC_NVME_CTX_RLS) + return; + + ctxp->flag |= LPFC_NVME_CTX_RLS; + spin_lock(&phba->sli4_hba.t_active_list_lock); + list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.t_active_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); +} + +/** + * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the + * transmission of an NVME LS response. + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * The function is called from SLI ring event handler with no + * lock held. The function frees memory resources used for the command + * used to send the NVME LS RSP. + **/ +void +__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; + uint32_t status, result; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6410 NVMEx LS cmpl state mismatch IO x%x: " + "%d %d\n", + axchg->oxid, axchg->state, axchg->entry_cnt); + } + + lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n", + axchg->oxid, status, result); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n", + status, result, axchg->oxid); + + lpfc_nlp_put(cmdwqe->ndlp); + cmdwqe->context_un.axchg = NULL; + cmdwqe->bpl_dmabuf = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + ls_rsp->done(ls_rsp); + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n", + status, axchg->oxid); + kfree(axchg); +} + +/** + * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME LS commands + * The function updates any states and statistics, then calls the + * generic completion handler to free resources. + **/ +static void +lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + + if (!phba->targetport) + goto finish; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (tgtp) { + if (status) { + atomic_inc(&tgtp->xmt_ls_rsp_error); + if (result == IOERR_ABORT_REQUESTED) + atomic_inc(&tgtp->xmt_ls_rsp_aborted); + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + atomic_inc(&tgtp->xmt_ls_rsp_xb_set); + } else { + atomic_inc(&tgtp->xmt_ls_rsp_cmpl); + } + } + +finish: + __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe); +} + +/** + * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context + * @phba: HBA buffer is associated with + * @ctx_buf: ctx buffer context + * + * Description: Frees the given DMA buffer in the appropriate way given by + * reposting it to its associated RQ so it can be reused. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; + struct lpfc_nvmet_tgtport *tgtp; + struct fc_frame_header *fc_hdr; + struct rqb_dmabuf *nvmebuf; + struct lpfc_nvmet_ctx_info *infop; + uint32_t size, oxid, sid; + int cpu; + unsigned long iflag; + + if (ctxp->state == LPFC_NVME_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6411 NVMET free, already free IO x%x: %d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + } + + if (ctxp->rqb_buffer) { + spin_lock_irqsave(&ctxp->ctxlock, iflag); + nvmebuf = ctxp->rqb_buffer; + /* check if freed in another path whilst acquiring lock */ + if (nvmebuf) { + ctxp->rqb_buffer = NULL; + if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { + ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, + nvmebuf); + } else { + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + /* repost */ + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); + } + } else { + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + } + } + ctxp->state = LPFC_NVME_STE_FREE; + + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + if (phba->sli4_hba.nvmet_io_wait_cnt) { + list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, + nvmebuf, struct rqb_dmabuf, + hbuf.list); + phba->sli4_hba.nvmet_io_wait_cnt--; + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, + iflag); + + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + size = nvmebuf->bytes_recv; + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; + ctxp->wqeq = NULL; + ctxp->offset = 0; + ctxp->phba = phba; + ctxp->size = size; + ctxp->oxid = oxid; + ctxp->sid = sid; + ctxp->state = LPFC_NVME_STE_RCV; + ctxp->entry_cnt = 1; + ctxp->flag = 0; + ctxp->ctxbuf = ctx_buf; + ctxp->rqb_buffer = (void *)nvmebuf; + spin_lock_init(&ctxp->ctxlock); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + /* NOTE: isr time stamp is stale when context is re-assigned*/ + if (ctxp->ts_isr_cmd) { + ctxp->ts_cmd_nvme = 0; + ctxp->ts_nvme_data = 0; + ctxp->ts_data_wqput = 0; + ctxp->ts_isr_data = 0; + ctxp->ts_data_nvme = 0; + ctxp->ts_nvme_status = 0; + ctxp->ts_status_wqput = 0; + ctxp->ts_isr_status = 0; + ctxp->ts_status_nvme = 0; + } +#endif + atomic_inc(&tgtp->rcv_fcp_cmd_in); + + /* Indicate that a replacement buffer has been posted */ + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + if (!queue_work(phba->wq, &ctx_buf->defer_work)) { + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6181 Unable to queue deferred work " + "for oxid x%x. " + "FCP Drop IO [x%x x%x x%x]\n", + ctxp->oxid, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); + } + return; + } + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + + /* + * Use the CPU context list, from the MRQ the IO was received on + * (ctxp->idx), to save context structure. + */ + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_del_init(&ctxp->list); + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); + cpu = raw_smp_processor_id(); + infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); + spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); + list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); + infop->nvmet_ctx_list_cnt++; + spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag); +#endif +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +static void +lpfc_nvmet_ktime(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp) +{ + uint64_t seg1, seg2, seg3, seg4, seg5; + uint64_t seg6, seg7, seg8, seg9, seg10; + uint64_t segsum; + + if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || + !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || + !ctxp->ts_isr_data || !ctxp->ts_data_nvme || + !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || + !ctxp->ts_isr_status || !ctxp->ts_status_nvme) + return; + + if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd) + return; + if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) + return; + if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) + return; + if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) + return; + if (ctxp->ts_data_wqput > ctxp->ts_isr_data) + return; + if (ctxp->ts_isr_data > ctxp->ts_data_nvme) + return; + if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) + return; + if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) + return; + if (ctxp->ts_status_wqput > ctxp->ts_isr_status) + return; + if (ctxp->ts_isr_status > ctxp->ts_status_nvme) + return; + /* + * Segment 1 - Time from FCP command received by MSI-X ISR + * to FCP command is passed to NVME Layer. + * Segment 2 - Time from FCP command payload handed + * off to NVME Layer to Driver receives a Command op + * from NVME Layer. + * Segment 3 - Time from Driver receives a Command op + * from NVME Layer to Command is put on WQ. + * Segment 4 - Time from Driver WQ put is done + * to MSI-X ISR for Command cmpl. + * Segment 5 - Time from MSI-X ISR for Command cmpl to + * Command cmpl is passed to NVME Layer. + * Segment 6 - Time from Command cmpl is passed to NVME + * Layer to Driver receives a RSP op from NVME Layer. + * Segment 7 - Time from Driver receives a RSP op from + * NVME Layer to WQ put is done on TRSP FCP Status. + * Segment 8 - Time from Driver WQ put is done on TRSP + * FCP Status to MSI-X ISR for TRSP cmpl. + * Segment 9 - Time from MSI-X ISR for TRSP cmpl to + * TRSP cmpl is passed to NVME Layer. + * Segment 10 - Time from FCP command received by + * MSI-X ISR to command is completed on wire. + * (Segments 1 thru 8) for READDATA / WRITEDATA + * (Segments 1 thru 4) for READDATA_RSP + */ + seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; + segsum = seg1; + + seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd; + if (segsum > seg2) + return; + seg2 -= segsum; + segsum += seg2; + + seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd; + if (segsum > seg3) + return; + seg3 -= segsum; + segsum += seg3; + + seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd; + if (segsum > seg4) + return; + seg4 -= segsum; + segsum += seg4; + + seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd; + if (segsum > seg5) + return; + seg5 -= segsum; + segsum += seg5; + + + /* For auto rsp commands seg6 thru seg10 will be 0 */ + if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { + seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd; + if (segsum > seg6) + return; + seg6 -= segsum; + segsum += seg6; + + seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd; + if (segsum > seg7) + return; + seg7 -= segsum; + segsum += seg7; + + seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd; + if (segsum > seg8) + return; + seg8 -= segsum; + segsum += seg8; + + seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd; + if (segsum > seg9) + return; + seg9 -= segsum; + segsum += seg9; + + if (ctxp->ts_isr_status < ctxp->ts_isr_cmd) + return; + seg10 = (ctxp->ts_isr_status - + ctxp->ts_isr_cmd); + } else { + if (ctxp->ts_isr_data < ctxp->ts_isr_cmd) + return; + seg6 = 0; + seg7 = 0; + seg8 = 0; + seg9 = 0; + seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); + } + + phba->ktime_seg1_total += seg1; + if (seg1 < phba->ktime_seg1_min) + phba->ktime_seg1_min = seg1; + else if (seg1 > phba->ktime_seg1_max) + phba->ktime_seg1_max = seg1; + + phba->ktime_seg2_total += seg2; + if (seg2 < phba->ktime_seg2_min) + phba->ktime_seg2_min = seg2; + else if (seg2 > phba->ktime_seg2_max) + phba->ktime_seg2_max = seg2; + + phba->ktime_seg3_total += seg3; + if (seg3 < phba->ktime_seg3_min) + phba->ktime_seg3_min = seg3; + else if (seg3 > phba->ktime_seg3_max) + phba->ktime_seg3_max = seg3; + + phba->ktime_seg4_total += seg4; + if (seg4 < phba->ktime_seg4_min) + phba->ktime_seg4_min = seg4; + else if (seg4 > phba->ktime_seg4_max) + phba->ktime_seg4_max = seg4; + + phba->ktime_seg5_total += seg5; + if (seg5 < phba->ktime_seg5_min) + phba->ktime_seg5_min = seg5; + else if (seg5 > phba->ktime_seg5_max) + phba->ktime_seg5_max = seg5; + + phba->ktime_data_samples++; + if (!seg6) + goto out; + + phba->ktime_seg6_total += seg6; + if (seg6 < phba->ktime_seg6_min) + phba->ktime_seg6_min = seg6; + else if (seg6 > phba->ktime_seg6_max) + phba->ktime_seg6_max = seg6; + + phba->ktime_seg7_total += seg7; + if (seg7 < phba->ktime_seg7_min) + phba->ktime_seg7_min = seg7; + else if (seg7 > phba->ktime_seg7_max) + phba->ktime_seg7_max = seg7; + + phba->ktime_seg8_total += seg8; + if (seg8 < phba->ktime_seg8_min) + phba->ktime_seg8_min = seg8; + else if (seg8 > phba->ktime_seg8_max) + phba->ktime_seg8_max = seg8; + + phba->ktime_seg9_total += seg9; + if (seg9 < phba->ktime_seg9_min) + phba->ktime_seg9_min = seg9; + else if (seg9 > phba->ktime_seg9_max) + phba->ktime_seg9_max = seg9; +out: + phba->ktime_seg10_total += seg10; + if (seg10 < phba->ktime_seg10_min) + phba->ktime_seg10_min = seg10; + else if (seg10 > phba->ktime_seg10_max) + phba->ktime_seg10_max = seg10; + phba->ktime_status_samples++; +} +#endif + +/** + * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME FCP commands + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_fcp_req *rsp; + struct lpfc_async_xchg_ctx *ctxp; + uint32_t status, result, op, logerr; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int id; +#endif + + ctxp = cmdwqe->context_un.axchg; + ctxp->flag &= ~LPFC_NVME_IO_INP; + + rsp = &ctxp->hdlrctx.fcp_req; + op = rsp->op; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + if (phba->targetport) + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + else + tgtp = NULL; + + lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", + ctxp->oxid, op, status); + + if (status) { + rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; + rsp->transferred_length = 0; + if (tgtp) { + atomic_inc(&tgtp->xmt_fcp_rsp_error); + if (result == IOERR_ABORT_REQUESTED) + atomic_inc(&tgtp->xmt_fcp_rsp_aborted); + } + + logerr = LOG_NVME_IOERR; + + /* pick up SLI4 exhange busy condition */ + if (bf_get(lpfc_wcqe_c_xb, wcqe)) { + ctxp->flag |= LPFC_NVME_XBUSY; + logerr |= LOG_NVME_ABTS; + if (tgtp) + atomic_inc(&tgtp->xmt_fcp_rsp_xb_set); + + } else { + ctxp->flag &= ~LPFC_NVME_XBUSY; + } + + lpfc_printf_log(phba, KERN_INFO, logerr, + "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x " + "XBUSY:x%x\n", + ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag, + status, result, ctxp->flag); + + } else { + rsp->fcp_error = NVME_SC_SUCCESS; + if (op == NVMET_FCOP_RSP) + rsp->transferred_length = rsp->rsplen; + else + rsp->transferred_length = rsp->transfer_length; + if (tgtp) + atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); + } + + if ((op == NVMET_FCOP_READDATA_RSP) || + (op == NVMET_FCOP_RSP)) { + /* Sanity check */ + ctxp->state = LPFC_NVME_STE_DONE; + ctxp->entry_cnt++; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_cmd_nvme) { + if (rsp->op == NVMET_FCOP_READDATA_RSP) { + ctxp->ts_isr_data = + cmdwqe->isr_timestamp; + ctxp->ts_data_nvme = + ktime_get_ns(); + ctxp->ts_nvme_status = + ctxp->ts_data_nvme; + ctxp->ts_status_wqput = + ctxp->ts_data_nvme; + ctxp->ts_isr_status = + ctxp->ts_data_nvme; + ctxp->ts_status_nvme = + ctxp->ts_data_nvme; + } else { + ctxp->ts_isr_status = + cmdwqe->isr_timestamp; + ctxp->ts_status_nvme = + ktime_get_ns(); + } + } +#endif + rsp->done(rsp); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_cmd_nvme) + lpfc_nvmet_ktime(phba, ctxp); +#endif + /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ + } else { + ctxp->entry_cnt++; + memset_startat(cmdwqe, 0, cmd_flag); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_cmd_nvme) { + ctxp->ts_isr_data = cmdwqe->isr_timestamp; + ctxp->ts_data_nvme = ktime_get_ns(); + } +#endif + rsp->done(rsp); + } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { + id = raw_smp_processor_id(); + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); + if (ctxp->cpu != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6704 CPU Check cmdcmpl: " + "cpu %d expect %d\n", + id, ctxp->cpu); + } +#endif +} + +/** + * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit + * an NVME LS rsp for a prior NVME LS request that was received. + * @axchg: pointer to exchange context for the NVME LS request the response + * is for. + * @ls_rsp: pointer to the transport LS RSP that is to be sent + * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done + * + * This routine is used to format and send a WQE to transmit a NVME LS + * Response. The response is for a prior NVME LS request that was + * received and posted to the transport. + * + * Returns: + * 0 : if response successfully transmit + * non-zero : if response failed to transmit, of the form -Exxx. + **/ +int +__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, + struct nvmefc_ls_rsp *ls_rsp, + void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe)) +{ + struct lpfc_hba *phba = axchg->phba; + struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_dmabuf dmabuf; + struct ulp_bde64 bpl; + int rc; + + if (phba->pport->load_flag & FC_UNLOADING) + return -ENODEV; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid); + + if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6412 NVMEx LS rsp state mismatch " + "oxid x%x: %d %d\n", + axchg->oxid, axchg->state, axchg->entry_cnt); + return -EALREADY; + } + axchg->state = LPFC_NVME_STE_LS_RSP; + axchg->entry_cnt++; + + nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma, + ls_rsp->rsplen); + if (nvmewqeq == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6150 NVMEx LS Drop Rsp x%x: Prep\n", + axchg->oxid); + rc = -ENOMEM; + goto out_free_buf; + } + + /* Save numBdes for bpl2sgl */ + nvmewqeq->num_bdes = 1; + nvmewqeq->hba_wqidx = 0; + nvmewqeq->bpl_dmabuf = &dmabuf; + dmabuf.virt = &bpl; + bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; + bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; + bpl.tus.f.bdeSize = ls_rsp->rsplen; + bpl.tus.f.bdeFlags = 0; + bpl.tus.w = le32_to_cpu(bpl.tus.w); + /* + * Note: although we're using stack space for the dmabuf, the + * call to lpfc_sli4_issue_wqe is synchronous, so it will not + * be referenced after it returns back to this routine. + */ + + nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp; + nvmewqeq->context_un.axchg = axchg; + + lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n", + axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen); + + rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq); + + /* clear to be sure there's no reference */ + nvmewqeq->bpl_dmabuf = NULL; + + if (rc == WQE_SUCCESS) { + /* + * Okay to repost buffer here, but wait till cmpl + * before freeing ctxp and iocbq. + */ + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6151 NVMEx LS RSP x%x: failed to transmit %d\n", + axchg->oxid, rc); + + rc = -ENXIO; + + lpfc_nlp_put(nvmewqeq->ndlp); + +out_free_buf: + /* Give back resources */ + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + + /* + * As transport doesn't track completions of responses, if the rsp + * fails to send, the transport will effectively ignore the rsp + * and consider the LS done. However, the driver has an active + * exchange open for the LS - so be sure to abort the exchange + * if the response isn't sent. + */ + lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid); + return rc; +} + +/** + * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response + * @tgtport: pointer to target port that NVME LS is to be transmit from. + * @ls_rsp: pointer to the transport LS RSP that is to be sent + * + * Driver registers this routine to transmit responses for received NVME + * LS requests. + * + * This routine is used to format and send a WQE to transmit a NVME LS + * Response. The ls_rsp is used to reverse-map the LS to the original + * NVME LS request sequence, which provides addressing information for + * the remote port the LS to be sent to, as well as the exchange id + * that is the LS is bound to. + * + * Returns: + * 0 : if response successfully transmit + * non-zero : if response failed to transmit, of the form -Exxx. + **/ +static int +lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, + struct nvmefc_ls_rsp *ls_rsp) +{ + struct lpfc_async_xchg_ctx *axchg = + container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); + struct lpfc_nvmet_tgtport *nvmep = tgtport->private; + int rc; + + if (axchg->phba->pport->load_flag & FC_UNLOADING) + return -ENODEV; + + rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp); + + if (rc) { + atomic_inc(&nvmep->xmt_ls_drop); + /* + * unless the failure is due to having already sent + * the response, an abort will be generated for the + * exchange if the rsp can't be sent. + */ + if (rc != -EALREADY) + atomic_inc(&nvmep->xmt_ls_abort); + return rc; + } + + atomic_inc(&nvmep->xmt_ls_rsp); + return 0; +} + +static int +lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *rsp) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; + struct lpfc_async_xchg_ctx *ctxp = + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); + struct lpfc_hba *phba = ctxp->phba; + struct lpfc_queue *wq; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_sli_ring *pring; + unsigned long iflags; + int rc; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int id; +#endif + + if (phba->pport->load_flag & FC_UNLOADING) { + rc = -ENODEV; + goto aerr; + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_cmd_nvme) { + if (rsp->op == NVMET_FCOP_RSP) + ctxp->ts_nvme_status = ktime_get_ns(); + else + ctxp->ts_nvme_data = ktime_get_ns(); + } + + /* Setup the hdw queue if not already set */ + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; + + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { + id = raw_smp_processor_id(); + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); + if (rsp->hwqid != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6705 CPU Check OP: " + "cpu %d expect %d\n", + id, rsp->hwqid); + ctxp->cpu = id; /* Setup cpu for cmpl check */ + } +#endif + + /* Sanity check */ + if ((ctxp->flag & LPFC_NVME_ABTS_RCV) || + (ctxp->state == LPFC_NVME_STE_ABORT)) { + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6102 IO oxid x%x aborted\n", + ctxp->oxid); + rc = -ENXIO; + goto aerr; + } + + nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); + if (nvmewqeq == NULL) { + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6152 FCP Drop IO x%x: Prep\n", + ctxp->oxid); + rc = -ENXIO; + goto aerr; + } + + nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; + nvmewqeq->context_un.axchg = ctxp; + nvmewqeq->cmd_flag |= LPFC_IO_NVMET; + ctxp->wqeq->hba_wqidx = rsp->hwqid; + + lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", + ctxp->oxid, rsp->op, rsp->rsplen); + + ctxp->flag |= LPFC_NVME_IO_INP; + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); + if (rc == WQE_SUCCESS) { +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (!ctxp->ts_cmd_nvme) + return 0; + if (rsp->op == NVMET_FCOP_RSP) + ctxp->ts_status_wqput = ktime_get_ns(); + else + ctxp->ts_data_wqput = ktime_get_ns(); +#endif + return 0; + } + + if (rc == -EBUSY) { + /* + * WQ was full, so queue nvmewqeq to be sent after + * WQE release CQE + */ + ctxp->flag |= LPFC_NVME_DEFER_WQFULL; + wq = ctxp->hdwq->io_wq; + pring = wq->pring; + spin_lock_irqsave(&pring->ring_lock, iflags); + list_add_tail(&nvmewqeq->list, &wq->wqfull_list); + wq->q_flag |= HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); + atomic_inc(&lpfc_nvmep->defer_wqfull); + return 0; + } + + /* Give back resources */ + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6153 FCP Drop IO x%x: Issue: %d\n", + ctxp->oxid, rc); + + ctxp->wqeq->hba_wqidx = 0; + nvmewqeq->context_un.axchg = NULL; + nvmewqeq->bpl_dmabuf = NULL; + rc = -EBUSY; +aerr: + return rc; +} + +static void +lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) +{ + struct lpfc_nvmet_tgtport *tport = targetport->private; + + /* release any threads waiting for the unreg to complete */ + if (tport->phba->targetport) + complete(tport->tport_unreg_cmp); +} + +static void +lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *req) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; + struct lpfc_async_xchg_ctx *ctxp = + container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); + struct lpfc_hba *phba = ctxp->phba; + struct lpfc_queue *wq; + unsigned long flags; + + if (phba->pport->load_flag & FC_UNLOADING) + return; + + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[0]; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n", + ctxp->oxid, ctxp->flag, ctxp->state); + + lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + + atomic_inc(&lpfc_nvmep->xmt_fcp_abort); + + spin_lock_irqsave(&ctxp->ctxlock, flags); + + /* Since iaab/iaar are NOT set, we need to check + * if the firmware is in process of aborting IO + */ + if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) { + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + return; + } + ctxp->flag |= LPFC_NVME_ABORT_OP; + + if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) { + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + wq = ctxp->hdwq->io_wq; + lpfc_nvmet_wqfull_flush(phba, wq, ctxp); + return; + } + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + + /* A state of LPFC_NVME_STE_RCV means we have just received + * the NVME command and have not started processing it. + * (by issuing any IO WQEs on this exchange yet) + */ + if (ctxp->state == LPFC_NVME_STE_RCV) + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + else + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); +} + +static void +lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *rsp) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; + struct lpfc_async_xchg_ctx *ctxp = + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); + struct lpfc_hba *phba = ctxp->phba; + unsigned long flags; + bool aborting = false; + + spin_lock_irqsave(&ctxp->ctxlock, flags); + if (ctxp->flag & LPFC_NVME_XBUSY) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6027 NVMET release with XBUSY flag x%x" + " oxid x%x\n", + ctxp->flag, ctxp->oxid); + else if (ctxp->state != LPFC_NVME_STE_DONE && + ctxp->state != LPFC_NVME_STE_ABORT) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6413 NVMET release bad state %d %d oxid x%x\n", + ctxp->state, ctxp->entry_cnt, ctxp->oxid); + + if ((ctxp->flag & LPFC_NVME_ABORT_OP) || + (ctxp->flag & LPFC_NVME_XBUSY)) { + aborting = true; + /* let the abort path do the real release */ + lpfc_nvmet_defer_release(phba, ctxp); + } + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + + lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, + ctxp->state, aborting); + + atomic_inc(&lpfc_nvmep->xmt_fcp_release); + ctxp->flag &= ~LPFC_NVME_TNOTIFY; + + if (aborting) + return; + + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); +} + +static void +lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *rsp) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_async_xchg_ctx *ctxp = + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct lpfc_hba *phba = ctxp->phba; + unsigned long iflag; + + + lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", + ctxp->oxid, ctxp->size, raw_smp_processor_id()); + + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6425 Defer rcv: no buffer oxid x%x: " + "flg %x ste %x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + return; + } + + tgtp = phba->targetport->private; + if (tgtp) + atomic_inc(&tgtp->rcv_fcp_cmd_defer); + + /* Free the nvmebuf since a new buffer already replaced it */ + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->rqb_buffer = NULL; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); +} + +/** + * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request + * @phba: Pointer to HBA context object + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * This function is the completion handler for NVME LS requests. + * The function updates any states and statistics, then calls the + * generic completion handler to finish completion of the request. + **/ +static void +lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe); +} + +/** + * lpfc_nvmet_ls_req - Issue an Link Service request + * @targetport: pointer to target instance registered with nvmet transport. + * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. + * Driver sets this value to the ndlp pointer. + * @pnvme_lsreq: the transport nvme_ls_req structure for the LS + * + * Driver registers this routine to handle any link service request + * from the nvme_fc transport to a remote nvme-aware port. + * + * Return value : + * 0 - Success + * non-zero: various error codes, in form of -Exxx + **/ +static int +lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport, + void *hosthandle, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + int ret; + u32 hstate; + + if (!lpfc_nvmet) + return -EINVAL; + + phba = lpfc_nvmet->phba; + if (phba->pport->load_flag & FC_UNLOADING) + return -EINVAL; + + hstate = atomic_read(&lpfc_nvmet->state); + if (hstate == LPFC_NVMET_INV_HOST_ACTIVE) + return -EACCES; + + ndlp = (struct lpfc_nodelist *)hosthandle; + + ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq, + lpfc_nvmet_ls_req_cmp); + + return ret; +} + +/** + * lpfc_nvmet_ls_abort - Abort a prior NVME LS request + * @targetport: Transport targetport, that LS was issued from. + * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. + * Driver sets this value to the ndlp pointer. + * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted + * + * Driver registers this routine to abort an NVME LS request that is + * in progress (from the transports perspective). + **/ +static void +lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, + void *hosthandle, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + int ret; + + phba = lpfc_nvmet->phba; + if (phba->pport->load_flag & FC_UNLOADING) + return; + + ndlp = (struct lpfc_nodelist *)hosthandle; + + ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq); + if (!ret) + atomic_inc(&lpfc_nvmet->xmt_ls_abort); +} + +static void +lpfc_nvmet_host_release(void *hosthandle) +{ + struct lpfc_nodelist *ndlp = hosthandle; + struct lpfc_hba *phba = ndlp->phba; + struct lpfc_nvmet_tgtport *tgtp; + + if (!phba->targetport || !phba->targetport->private) + return; + + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6202 NVMET XPT releasing hosthandle x%px " + "DID x%x xflags x%x refcnt %d\n", + hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags, + kref_read(&ndlp->kref)); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + spin_lock_irq(&ndlp->lock); + ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); + atomic_set(&tgtp->state, 0); +} + +static void +lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_hba *phba; + uint32_t rc; + + tgtp = tgtport->private; + phba = tgtp->phba; + + rc = lpfc_issue_els_rscn(phba->pport, 0); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6420 NVMET subsystem change: Notification %s\n", + (rc) ? "Failed" : "Sent"); +} + +static struct nvmet_fc_target_template lpfc_tgttemplate = { + .targetport_delete = lpfc_nvmet_targetport_delete, + .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, + .fcp_op = lpfc_nvmet_xmt_fcp_op, + .fcp_abort = lpfc_nvmet_xmt_fcp_abort, + .fcp_req_release = lpfc_nvmet_xmt_fcp_release, + .defer_rcv = lpfc_nvmet_defer_rcv, + .discovery_event = lpfc_nvmet_discovery_event, + .ls_req = lpfc_nvmet_ls_req, + .ls_abort = lpfc_nvmet_ls_abort, + .host_release = lpfc_nvmet_host_release, + + .max_hw_queues = 1, + .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, + .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, + .dma_boundary = 0xFFFFFFFF, + + /* optional features */ + .target_features = 0, + /* sizes of additional private data for data structures */ + .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), + .lsrqst_priv_sz = 0, +}; + +static void +__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, + struct lpfc_nvmet_ctx_info *infop) +{ + struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; + unsigned long flags; + + spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); + list_for_each_entry_safe(ctx_buf, next_ctx_buf, + &infop->nvmet_ctx_list, list) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_del_init(&ctx_buf->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + + spin_lock(&phba->hbalock); + __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); + spin_unlock(&phba->hbalock); + + ctx_buf->sglq->state = SGL_FREED; + ctx_buf->sglq->ndlp = NULL; + + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_add_tail(&ctx_buf->sglq->list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + + lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); + kfree(ctx_buf->context); + } + spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags); +} + +static void +lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) +{ + struct lpfc_nvmet_ctx_info *infop; + int i, j; + + /* The first context list, MRQ 0 CPU 0 */ + infop = phba->sli4_hba.nvmet_ctx_info; + if (!infop) + return; + + /* Cycle the entire CPU context list for every MRQ */ + for (i = 0; i < phba->cfg_nvmet_mrq; i++) { + for_each_present_cpu(j) { + infop = lpfc_get_ctx_list(phba, j, i); + __lpfc_nvmet_clean_io_for_cpu(phba, infop); + } + } + kfree(phba->sli4_hba.nvmet_ctx_info); + phba->sli4_hba.nvmet_ctx_info = NULL; +} + +static int +lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) +{ + struct lpfc_nvmet_ctxbuf *ctx_buf; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe128 *wqe; + struct lpfc_nvmet_ctx_info *last_infop; + struct lpfc_nvmet_ctx_info *infop; + int i, j, idx, cpu; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6403 Allocate NVMET resources for %d XRIs\n", + phba->sli4_hba.nvmet_xri_cnt); + + phba->sli4_hba.nvmet_ctx_info = kcalloc( + phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, + sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); + if (!phba->sli4_hba.nvmet_ctx_info) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6419 Failed allocate memory for " + "nvmet context lists\n"); + return -ENOMEM; + } + + /* + * Assuming X CPUs in the system, and Y MRQs, allocate some + * lpfc_nvmet_ctx_info structures as follows: + * + * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0 + * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1 + * ... + * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY + * + * Each line represents a MRQ "silo" containing an entry for + * every CPU. + * + * MRQ X is initially assumed to be associated with CPU X, thus + * contexts are initially distributed across all MRQs using + * the MRQ index (N) as follows cpuN/mrqN. When contexts are + * freed, the are freed to the MRQ silo based on the CPU number + * of the IO completion. Thus a context that was allocated for MRQ A + * whose IO completed on CPU B will be freed to cpuB/mrqA. + */ + for_each_possible_cpu(i) { + for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + infop = lpfc_get_ctx_list(phba, i, j); + INIT_LIST_HEAD(&infop->nvmet_ctx_list); + spin_lock_init(&infop->nvmet_ctx_list_lock); + infop->nvmet_ctx_list_cnt = 0; + } + } + + /* + * Setup the next CPU context info ptr for each MRQ. + * MRQ 0 will cycle thru CPUs 0 - X separately from + * MRQ 1 cycling thru CPUs 0 - X, and so on. + */ + for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + last_infop = lpfc_get_ctx_list(phba, + cpumask_first(cpu_present_mask), + j); + for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) { + infop = lpfc_get_ctx_list(phba, i, j); + infop->nvmet_ctx_next_cpu = last_infop; + last_infop = infop; + } + } + + /* For all nvmet xris, allocate resources needed to process a + * received command on a per xri basis. + */ + idx = 0; + cpu = cpumask_first(cpu_present_mask); + for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { + ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); + if (!ctx_buf) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6404 Ran out of memory for NVMET\n"); + return -ENOMEM; + } + + ctx_buf->context = kzalloc(sizeof(*ctx_buf->context), + GFP_KERNEL); + if (!ctx_buf->context) { + kfree(ctx_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6405 Ran out of NVMET " + "context memory\n"); + return -ENOMEM; + } + ctx_buf->context->ctxbuf = ctx_buf; + ctx_buf->context->state = LPFC_NVME_STE_FREE; + + ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); + if (!ctx_buf->iocbq) { + kfree(ctx_buf->context); + kfree(ctx_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6406 Ran out of NVMET iocb/WQEs\n"); + return -ENOMEM; + } + ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET; + nvmewqe = ctx_buf->iocbq; + wqe = &nvmewqe->wqe; + + /* Initialize WQE */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + + ctx_buf->iocbq->cmd_dmabuf = NULL; + spin_lock(&phba->sli4_hba.sgl_list_lock); + ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + if (!ctx_buf->sglq) { + lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); + kfree(ctx_buf->context); + kfree(ctx_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6407 Ran out of NVMET XRIs\n"); + return -ENOMEM; + } + INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work); + + /* + * Add ctx to MRQidx context list. Our initial assumption + * is MRQidx will be associated with CPUidx. This association + * can change on the fly. + */ + infop = lpfc_get_ctx_list(phba, cpu, idx); + spin_lock(&infop->nvmet_ctx_list_lock); + list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); + infop->nvmet_ctx_list_cnt++; + spin_unlock(&infop->nvmet_ctx_list_lock); + + /* Spread ctx structures evenly across all MRQs */ + idx++; + if (idx >= phba->cfg_nvmet_mrq) { + idx = 0; + cpu = cpumask_first(cpu_present_mask); + continue; + } + cpu = lpfc_next_present_cpu(cpu); + } + + for_each_present_cpu(i) { + for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + infop = lpfc_get_ctx_list(phba, i, j); + lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, + "6408 TOTAL NVMET ctx for CPU %d " + "MRQ %d: cnt %d nextcpu x%px\n", + i, j, infop->nvmet_ctx_list_cnt, + infop->nvmet_ctx_next_cpu); + } + } + return 0; +} + +int +lpfc_nvmet_create_targetport(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct lpfc_nvmet_tgtport *tgtp; + struct nvmet_fc_port_info pinfo; + int error; + + if (phba->targetport) + return 0; + + error = lpfc_nvmet_setup_io_context(phba); + if (error) + return error; + + memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); + pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); + pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); + pinfo.port_id = vport->fc_myDID; + + /* We need to tell the transport layer + 1 because it takes page + * alignment into account. When space for the SGL is allocated we + * allocate + 3, one for cmd, one for rsp and one for this alignment + */ + lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; + lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; + lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; + +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, + &phba->pcidev->dev, + &phba->targetport); +#else + error = -ENOENT; +#endif + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6025 Cannot register NVME targetport x%x: " + "portnm %llx nodenm %llx segs %d qs %d\n", + error, + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); + phba->targetport = NULL; + phba->nvmet_support = 0; + + lpfc_nvmet_cleanup_io_context(phba); + + } else { + tgtp = (struct lpfc_nvmet_tgtport *) + phba->targetport->private; + tgtp->phba = phba; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6026 Registered NVME " + "targetport: x%px, private x%px " + "portnm %llx nodenm %llx segs %d qs %d\n", + phba->targetport, tgtp, + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); + + atomic_set(&tgtp->rcv_ls_req_in, 0); + atomic_set(&tgtp->rcv_ls_req_out, 0); + atomic_set(&tgtp->rcv_ls_req_drop, 0); + atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); + atomic_set(&tgtp->xmt_ls_rsp, 0); + atomic_set(&tgtp->xmt_ls_drop, 0); + atomic_set(&tgtp->xmt_ls_rsp_error, 0); + atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0); + atomic_set(&tgtp->xmt_ls_rsp_aborted, 0); + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); + atomic_set(&tgtp->xmt_fcp_drop, 0); + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); + atomic_set(&tgtp->xmt_fcp_read, 0); + atomic_set(&tgtp->xmt_fcp_write, 0); + atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_release, 0); + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); + atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0); + atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0); + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0); + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); + atomic_set(&tgtp->xmt_abort_unsol, 0); + atomic_set(&tgtp->xmt_abort_sol, 0); + atomic_set(&tgtp->xmt_abort_rsp, 0); + atomic_set(&tgtp->xmt_abort_rsp_error, 0); + atomic_set(&tgtp->defer_ctx, 0); + atomic_set(&tgtp->defer_fod, 0); + atomic_set(&tgtp->defer_wqfull, 0); + } + return error; +} + +int +lpfc_nvmet_update_targetport(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + + if (!phba->targetport) + return 0; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6007 Update NVMET port x%px did x%x\n", + phba->targetport, vport->fc_myDID); + + phba->targetport->port_id = vport->fc_myDID; + return 0; +} + +/** + * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the nvmet xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * NVMET aborted xri. + **/ +void +lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); + struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; + struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_fcp_req *req = NULL; + struct lpfc_nodelist *ndlp; + unsigned long iflag = 0; + int rrq_empty = 0; + bool released = false; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return; + + if (phba->targetport) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe); + } + + spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); + list_for_each_entry_safe(ctxp, next_ctxp, + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + list) { + if (ctxp->ctxbuf->sglq->sli4_xritag != xri) + continue; + + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + /* Check if we already received a free context call + * and we have completed processing an abort situation. + */ + if (ctxp->flag & LPFC_NVME_CTX_RLS && + !(ctxp->flag & LPFC_NVME_ABORT_OP)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + released = true; + } + ctxp->flag &= ~LPFC_NVME_XBUSY; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + rrq_empty = list_empty(&phba->active_rrq_list); + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (ndlp && + (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || + ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { + lpfc_set_rrq_active(phba, ndlp, + ctxp->ctxbuf->sglq->sli4_lxritag, + rxid, 1); + lpfc_sli4_abts_err_handler(phba, ndlp, axri); + } + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6318 XB aborted oxid x%x flg x%x (%x)\n", + ctxp->oxid, ctxp->flag, released); + if (released) + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + + if (rrq_empty) + lpfc_worker_wake_up(phba); + return; + } + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); + ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); + if (ctxp) { + /* + * Abort already done by FW, so BA_ACC sent. + * However, the transport may be unaware. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6323 NVMET Rcv ABTS xri x%x ctxp state x%x " + "flag x%x oxid x%x rxid x%x\n", + xri, ctxp->state, ctxp->flag, ctxp->oxid, + rxid); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVME_ABTS_RCV; + ctxp->state = LPFC_NVME_STE_ABORT; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + req = &ctxp->hdlrctx.fcp_req; + if (req) + nvmet_fc_rcv_fcp_abort(phba->targetport, req); + } +#endif +} + +int +lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, + struct fc_frame_header *fc_hdr) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_hba *phba = vport->phba; + struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; + struct nvmefc_tgt_fcp_req *rsp; + uint32_t sid; + uint16_t oxid, xri; + unsigned long iflag = 0; + + sid = sli4_sid_from_fc_hdr(fc_hdr); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + + spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); + list_for_each_entry_safe(ctxp, next_ctxp, + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + list) { + if (ctxp->oxid != oxid || ctxp->sid != sid) + continue; + + xri = ctxp->ctxbuf->sglq->sli4_xritag; + + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVME_ABTS_RCV; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); + + rsp = &ctxp->hdlrctx.fcp_req; + nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); + + /* Respond with BA_ACC accordingly */ + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); + /* check the wait list */ + if (phba->sli4_hba.nvmet_io_wait_cnt) { + struct rqb_dmabuf *nvmebuf; + struct fc_frame_header *fc_hdr_tmp; + u32 sid_tmp; + u16 oxid_tmp; + bool found = false; + + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + + /* match by oxid and s_id */ + list_for_each_entry(nvmebuf, + &phba->sli4_hba.lpfc_nvmet_io_wait_list, + hbuf.list) { + fc_hdr_tmp = (struct fc_frame_header *) + (nvmebuf->hbuf.virt); + oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id); + sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp); + if (oxid_tmp != oxid || sid_tmp != sid) + continue; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6321 NVMET Rcv ABTS oxid x%x from x%x " + "is waiting for a ctxp\n", + oxid, sid); + + list_del_init(&nvmebuf->hbuf.list); + phba->sli4_hba.nvmet_io_wait_cnt--; + found = true; + break; + } + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, + iflag); + + /* free buffer since already posted a new DMA buffer to RQ */ + if (found) { + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + /* Respond with BA_ACC accordingly */ + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + } + + /* check active list */ + ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); + if (ctxp) { + xri = ctxp->ctxbuf->sglq->sli4_xritag; + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x " + "flag x%x state x%x\n", + ctxp->oxid, xri, ctxp->flag, ctxp->state); + + if (ctxp->flag & LPFC_NVME_TNOTIFY) { + /* Notify the transport */ + nvmet_fc_rcv_fcp_abort(phba->targetport, + &ctxp->hdlrctx.fcp_req); + } else { + cancel_work_sync(&ctxp->ctxbuf->defer_work); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + } + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + + lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n", + oxid, raw_smp_processor_id(), 1); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid); + + /* Respond with BA_RJT accordingly */ + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); +#endif + return 0; +} + +static void +lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, + struct lpfc_async_xchg_ctx *ctxp) +{ + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_iocbq *next_nvmewqeq; + unsigned long iflags; + struct lpfc_wcqe_complete wcqe; + struct lpfc_wcqe_complete *wcqep; + + pring = wq->pring; + wcqep = &wcqe; + + /* Fake an ABORT error code back to cmpl routine */ + memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete)); + bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT); + wcqep->parameter = IOERR_ABORT_REQUESTED; + + spin_lock_irqsave(&pring->ring_lock, iflags); + list_for_each_entry_safe(nvmewqeq, next_nvmewqeq, + &wq->wqfull_list, list) { + if (ctxp) { + /* Checking for a specific IO to flush */ + if (nvmewqeq->context_un.axchg == ctxp) { + list_del(&nvmewqeq->list); + spin_unlock_irqrestore(&pring->ring_lock, + iflags); + memcpy(&nvmewqeq->wcqe_cmpl, wcqep, + sizeof(*wcqep)); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, + nvmewqeq); + return; + } + continue; + } else { + /* Flush all IOs */ + list_del(&nvmewqeq->list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep)); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq); + spin_lock_irqsave(&pring->ring_lock, iflags); + } + } + if (!ctxp) + wq->q_flag &= ~HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); +} + +void +lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, + struct lpfc_queue *wq) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_async_xchg_ctx *ctxp; + unsigned long iflags; + int rc; + + /* + * Some WQE slots are available, so try to re-issue anything + * on the WQ wqfull_list. + */ + pring = wq->pring; + spin_lock_irqsave(&pring->ring_lock, iflags); + while (!list_empty(&wq->wqfull_list)) { + list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, + list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + ctxp = nvmewqeq->context_un.axchg; + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); + spin_lock_irqsave(&pring->ring_lock, iflags); + if (rc == -EBUSY) { + /* WQ was full again, so put it back on the list */ + list_add(&nvmewqeq->list, &wq->wqfull_list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return; + } + if (rc == WQE_SUCCESS) { +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_cmd_nvme) { + if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP) + ctxp->ts_status_wqput = ktime_get_ns(); + else + ctxp->ts_data_wqput = ktime_get_ns(); + } +#endif + } else { + WARN_ON(rc); + } + } + wq->q_flag &= ~HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); + +#endif +} + +void +lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_queue *wq; + uint32_t qidx; + DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp); + + if (phba->nvmet_support == 0) + return; + if (phba->targetport) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + wq = phba->sli4_hba.hdwq[qidx].io_wq; + lpfc_nvmet_wqfull_flush(phba, wq, NULL); + } + tgtp->tport_unreg_cmp = &tport_unreg_cmp; + nvmet_fc_unregister_targetport(phba->targetport); + if (!wait_for_completion_timeout(&tport_unreg_cmp, + msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6179 Unreg targetport x%px timeout " + "reached.\n", phba->targetport); + lpfc_nvmet_cleanup_io_context(phba); + } + phba->targetport = NULL; +#endif +} + +/** + * lpfc_nvmet_handle_lsreq - Process an NVME LS request + * @phba: pointer to lpfc hba data structure. + * @axchg: pointer to exchange context for the NVME LS request + * + * This routine is used for processing an asychronously received NVME LS + * request. Any remaining validation is done and the LS is then forwarded + * to the nvmet-fc transport via nvmet_fc_rcv_ls_req(). + * + * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing) + * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done. + * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. + * + * Returns 0 if LS was handled and delivered to the transport + * Returns 1 if LS failed to be handled and should be dropped + */ +int +lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *axchg) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private; + uint32_t *payload = axchg->payload; + int rc; + + atomic_inc(&tgtp->rcv_ls_req_in); + + /* + * Driver passes the ndlp as the hosthandle argument allowing + * the transport to generate LS requests for any associateions + * that are created. + */ + rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp, + axchg->payload, axchg->size); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " + "%08x %08x %08x\n", axchg->size, rc, + *payload, *(payload+1), *(payload+2), + *(payload+3), *(payload+4), *(payload+5)); + + if (!rc) { + atomic_inc(&tgtp->rcv_ls_req_out); + return 0; + } + + atomic_inc(&tgtp->rcv_ls_req_drop); +#endif + return 1; +} + +static void +lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; + struct lpfc_hba *phba = ctxp->phba; + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t *payload, qno; + uint32_t rc; + unsigned long iflags; + + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6159 process_rcv_fcp_req, nvmebuf is NULL, " + "oxid: x%x flg: x%x state: x%x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + return; + } + + if (ctxp->flag & LPFC_NVME_ABTS_RCV) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6324 IO oxid x%x aborted\n", + ctxp->oxid); + return; + } + + payload = (uint32_t *)(nvmebuf->dbuf.virt); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + ctxp->flag |= LPFC_NVME_TNOTIFY; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_isr_cmd) + ctxp->ts_cmd_nvme = ktime_get_ns(); +#endif + /* + * The calling sequence should be: + * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + * When we return from nvmet_fc_rcv_fcp_req, all relevant info + * the NVME command / FC header is stored. + * A buffer has already been reposted for this IO, so just free + * the nvmebuf. + */ + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req, + payload, ctxp->size); + /* Process FCP command */ + if (rc == 0) { + atomic_inc(&tgtp->rcv_fcp_cmd_out); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) || + (nvmebuf != ctxp->rqb_buffer)) { + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + return; + } + ctxp->rqb_buffer = NULL; + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ + return; + } + + /* Processing of FCP command is deferred */ + if (rc == -EOVERFLOW) { + lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " + "from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + atomic_inc(&tgtp->rcv_fcp_cmd_out); + atomic_inc(&tgtp->defer_fod); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + return; + } + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + /* + * Post a replacement DMA buffer to RQ and defer + * freeing rcv buffer till .defer_rcv callback + */ + qno = nvmebuf->idx; + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[qno], + phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); + return; + } + ctxp->flag &= ~LPFC_NVME_TNOTIFY; + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", + ctxp->oxid, rc, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); +#endif +} + +static void +lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_ctxbuf *ctx_buf = + container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); + + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); +#endif +} + +static struct lpfc_nvmet_ctxbuf * +lpfc_nvmet_replenish_context(struct lpfc_hba *phba, + struct lpfc_nvmet_ctx_info *current_infop) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; + struct lpfc_nvmet_ctx_info *get_infop; + int i; + + /* + * The current_infop for the MRQ a NVME command IU was received + * on is empty. Our goal is to replenish this MRQs context + * list from a another CPUs. + * + * First we need to pick a context list to start looking on. + * nvmet_ctx_start_cpu has available context the last time + * we needed to replenish this CPU where nvmet_ctx_next_cpu + * is just the next sequential CPU for this MRQ. + */ + if (current_infop->nvmet_ctx_start_cpu) + get_infop = current_infop->nvmet_ctx_start_cpu; + else + get_infop = current_infop->nvmet_ctx_next_cpu; + + for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) { + if (get_infop == current_infop) { + get_infop = get_infop->nvmet_ctx_next_cpu; + continue; + } + spin_lock(&get_infop->nvmet_ctx_list_lock); + + /* Just take the entire context list, if there are any */ + if (get_infop->nvmet_ctx_list_cnt) { + list_splice_init(&get_infop->nvmet_ctx_list, + ¤t_infop->nvmet_ctx_list); + current_infop->nvmet_ctx_list_cnt = + get_infop->nvmet_ctx_list_cnt - 1; + get_infop->nvmet_ctx_list_cnt = 0; + spin_unlock(&get_infop->nvmet_ctx_list_lock); + + current_infop->nvmet_ctx_start_cpu = get_infop; + list_remove_head(¤t_infop->nvmet_ctx_list, + ctx_buf, struct lpfc_nvmet_ctxbuf, + list); + return ctx_buf; + } + + /* Otherwise, move on to the next CPU for this MRQ */ + spin_unlock(&get_infop->nvmet_ctx_list_lock); + get_infop = get_infop->nvmet_ctx_next_cpu; + } + +#endif + /* Nothing found, all contexts for the MRQ are in-flight */ + return NULL; +} + +/** + * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer + * @phba: pointer to lpfc hba data structure. + * @idx: relative index of MRQ vector + * @nvmebuf: pointer to lpfc nvme command HBQ data structure. + * @isr_timestamp: in jiffies. + * @cqflag: cq processing information regarding workload. + * + * This routine is used for processing the WQE associated with a unsolicited + * event. It first determines whether there is an existing ndlp that matches + * the DID from the unsolicited WQE. If not, it will create a new one with + * the DID from the unsolicited WQE. The ELS command from the unsolicited + * WQE is then used to invoke the proper routine and to set up proper state + * of the discovery state machine. + **/ +static void +lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, + uint32_t idx, + struct rqb_dmabuf *nvmebuf, + uint64_t isr_timestamp, + uint8_t cqflag) +{ + struct lpfc_async_xchg_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + struct fc_frame_header *fc_hdr; + struct lpfc_nvmet_ctxbuf *ctx_buf; + struct lpfc_nvmet_ctx_info *current_infop; + uint32_t size, oxid, sid, qno; + unsigned long iflag; + int current_cpu; + + if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) + return; + + ctx_buf = NULL; + if (!nvmebuf || !phba->targetport) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6157 NVMET FCP Drop IO\n"); + if (nvmebuf) + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); + return; + } + + /* + * Get a pointer to the context list for this MRQ based on + * the CPU this MRQ IRQ is associated with. If the CPU association + * changes from our initial assumption, the context list could + * be empty, thus it would need to be replenished with the + * context list from another CPU for this MRQ. + */ + current_cpu = raw_smp_processor_id(); + current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); + spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag); + if (current_infop->nvmet_ctx_list_cnt) { + list_remove_head(¤t_infop->nvmet_ctx_list, + ctx_buf, struct lpfc_nvmet_ctxbuf, list); + current_infop->nvmet_ctx_list_cnt--; + } else { + ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop); + } + spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag); + + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + size = nvmebuf->bytes_recv; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { + this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); + if (idx != current_cpu) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6703 CPU Check rcv: " + "cpu %d expect %d\n", + current_cpu, idx); + } +#endif + + lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", + oxid, size, raw_smp_processor_id()); + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + + if (!ctx_buf) { + /* Queue this NVME IO to process later */ + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + list_add_tail(&nvmebuf->hbuf.list, + &phba->sli4_hba.lpfc_nvmet_io_wait_list); + phba->sli4_hba.nvmet_io_wait_cnt++; + phba->sli4_hba.nvmet_io_wait_total++; + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, + iflag); + + /* Post a brand new DMA buffer to RQ */ + qno = nvmebuf->idx; + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[qno], + phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); + + atomic_inc(&tgtp->defer_ctx); + return; + } + + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); + if (ctxp->state != LPFC_NVME_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6414 NVMET Context corrupt %d %d oxid x%x\n", + ctxp->state, ctxp->entry_cnt, ctxp->oxid); + } + ctxp->wqeq = NULL; + ctxp->offset = 0; + ctxp->phba = phba; + ctxp->size = size; + ctxp->oxid = oxid; + ctxp->sid = sid; + ctxp->idx = idx; + ctxp->state = LPFC_NVME_STE_RCV; + ctxp->entry_cnt = 1; + ctxp->flag = 0; + ctxp->ctxbuf = ctx_buf; + ctxp->rqb_buffer = (void *)nvmebuf; + ctxp->hdwq = NULL; + spin_lock_init(&ctxp->ctxlock); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (isr_timestamp) + ctxp->ts_isr_cmd = isr_timestamp; + ctxp->ts_cmd_nvme = 0; + ctxp->ts_nvme_data = 0; + ctxp->ts_data_wqput = 0; + ctxp->ts_isr_data = 0; + ctxp->ts_data_nvme = 0; + ctxp->ts_nvme_status = 0; + ctxp->ts_status_wqput = 0; + ctxp->ts_isr_status = 0; + ctxp->ts_status_nvme = 0; +#endif + + atomic_inc(&tgtp->rcv_fcp_cmd_in); + /* check for cq processing load */ + if (!cqflag) { + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); + return; + } + + if (!queue_work(phba->wq, &ctx_buf->defer_work)) { + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6325 Unable to queue work for oxid x%x. " + "FCP Drop IO [x%x x%x x%x]\n", + ctxp->oxid, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); + } +} + +/** + * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport + * @phba: pointer to lpfc hba data structure. + * @idx: relative index of MRQ vector + * @nvmebuf: pointer to received nvme data structure. + * @isr_timestamp: in jiffies. + * @cqflag: cq processing information regarding workload. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking the routine + * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the + * SLI RQ on which the unsolicited event was received. + **/ +void +lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, + uint32_t idx, + struct rqb_dmabuf *nvmebuf, + uint64_t isr_timestamp, + uint8_t cqflag) +{ + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3167 NVMET FCP Drop IO\n"); + return; + } + if (phba->nvmet_support == 0) { + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); + return; + } + lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag); +} + +/** + * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure + * @phba: pointer to a host N_Port data structure. + * @ctxp: Context info for NVME LS Request + * @rspbuf: DMA buffer of NVME command. + * @rspsize: size of the NVME command. + * + * This routine is used for allocating a lpfc-WQE data structure from + * the driver lpfc-WQE free-list and prepare the WQE with the parameters + * passed into the routine for discovery state machine to issue an Extended + * Link Service (NVME) commands. It is a generic lpfc-WQE allocation + * and preparation routine that is used by all the discovery state machine + * routines and the NVME command-specific fields will be later set up by + * the individual discovery machine routines after calling this routine + * allocating and preparing a generic WQE data structure. It fills in the + * Buffer Descriptor Entries (BDEs), allocates buffers for both command + * payload and response payload (if expected). The reference count on the + * ndlp is incremented by 1 and the reference to the ndlp is put into + * context1 of the WQE data structure for this WQE to hold the ndlp + * reference for the command's callback function to access later. + * + * Return code + * Pointer to the newly allocated/prepared nvme wqe data structure + * NULL - when nvme wqe data structure allocation/preparation failed + **/ +static struct lpfc_iocbq * +lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp, + dma_addr_t rspbuf, uint16_t rspsize) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe128 *wqe; + + if (!lpfc_is_link_up(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6104 NVMET prep LS wqe: link err: " + "NPORT x%x oxid:x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); + return NULL; + } + + /* Allocate buffer for command wqe */ + nvmewqe = lpfc_sli_get_iocbq(phba); + if (nvmewqe == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6105 NVMET prep LS wqe: No WQE: " + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); + return NULL; + } + + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (!ndlp || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6106 NVMET prep LS wqe: No ndlp: " + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); + goto nvme_wqe_free_wqeq_exit; + } + ctxp->wqeq = nvmewqe; + + /* prevent preparing wqe with NULL ndlp reference */ + nvmewqe->ndlp = lpfc_nlp_get(ndlp); + if (!nvmewqe->ndlp) + goto nvme_wqe_free_wqeq_exit; + nvmewqe->context_un.axchg = ctxp; + + wqe = &nvmewqe->wqe; + memset(wqe, 0, sizeof(union lpfc_wqe)); + + /* Words 0 - 2 */ + wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; + wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); + wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); + + /* Word 3 */ + + /* Word 4 */ + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); + bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); + bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); + + /* Word 8 */ + wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); + /* Needs to be set by caller */ + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, + OTHER_COMMAND); + + /* Word 12 */ + wqe->xmit_sequence.xmit_len = rspsize; + + nvmewqe->retry = 1; + nvmewqe->vport = phba->pport; + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; + nvmewqe->cmd_flag |= LPFC_IO_NVME_LS; + + /* Xmit NVMET response to remote NPORT */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6039 Xmit NVMET LS response to remote " + "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", + ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, + rspsize); + return nvmewqe; + +nvme_wqe_free_wqeq_exit: + nvmewqe->context_un.axchg = NULL; + nvmewqe->ndlp = NULL; + nvmewqe->bpl_dmabuf = NULL; + lpfc_sli_release_iocbq(phba, nvmewqe); + return NULL; +} + + +static struct lpfc_iocbq * +lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp) +{ + struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req; + struct lpfc_nvmet_tgtport *tgtp; + struct sli4_sge *sgl; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *nvmewqe; + struct scatterlist *sgel; + union lpfc_wqe128 *wqe; + struct ulp_bde64 *bde; + dma_addr_t physaddr; + int i, cnt, nsegs; + bool use_pbde = false; + int xc = 1; + + if (!lpfc_is_link_up(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6107 NVMET prep FCP wqe: link err:" + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); + return NULL; + } + + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (!ndlp || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6108 NVMET prep FCP wqe: no ndlp: " + "NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); + return NULL; + } + + if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6109 NVMET prep FCP wqe: seg cnt err: " + "NPORT x%x oxid x%x ste %d cnt %d\n", + ctxp->sid, ctxp->oxid, ctxp->state, + phba->cfg_nvme_seg_cnt); + return NULL; + } + nsegs = rsp->sg_cnt; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + nvmewqe = ctxp->wqeq; + if (nvmewqe == NULL) { + /* Allocate buffer for command wqe */ + nvmewqe = ctxp->ctxbuf->iocbq; + if (nvmewqe == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6110 NVMET prep FCP wqe: No " + "WQE: NPORT x%x oxid x%x ste %d\n", + ctxp->sid, ctxp->oxid, ctxp->state); + return NULL; + } + ctxp->wqeq = nvmewqe; + xc = 0; /* create new XRI */ + nvmewqe->sli4_lxritag = NO_XRI; + nvmewqe->sli4_xritag = NO_XRI; + } + + /* Sanity check */ + if (((ctxp->state == LPFC_NVME_STE_RCV) && + (ctxp->entry_cnt == 1)) || + (ctxp->state == LPFC_NVME_STE_DATA)) { + wqe = &nvmewqe->wqe; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6111 Wrong state NVMET FCP: %d cnt %d\n", + ctxp->state, ctxp->entry_cnt); + return NULL; + } + + sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; + switch (rsp->op) { + case NVMET_FCOP_READDATA: + case NVMET_FCOP_READDATA_RSP: + /* From the tsend template, initialize words 7 - 11 */ + memcpy(&wqe->words[7], + &lpfc_tsend_cmd_template.words[7], + sizeof(uint32_t) * 5); + + /* Words 0 - 2 : The first sg segment */ + sgel = &rsp->sg[0]; + physaddr = sg_dma_address(sgel); + wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); + wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_tsend.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_tsend.payload_offset_len = 0; + + /* Word 4 */ + wqe->fcp_tsend.relative_offset = ctxp->offset; + + /* Word 5 */ + wqe->fcp_tsend.reserved = 0; + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 - set ar later */ + + /* Word 8 */ + wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); + + /* Word 10 - set wqes later, in template xc=1 */ + if (!xc) + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0); + + /* Word 12 */ + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; + + /* Setup 2 SKIP SGEs */ + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + if (rsp->op == NVMET_FCOP_READDATA_RSP) { + atomic_inc(&tgtp->xmt_fcp_read_rsp); + + /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ + + if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { + if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + bf_set(wqe_sup, + &wqe->fcp_tsend.wqe_com, 1); + } else { + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, + ((rsp->rsplen >> 2) - 1)); + memcpy(&wqe->words[16], rsp->rspaddr, + rsp->rsplen); + } + } else { + atomic_inc(&tgtp->xmt_fcp_read); + + /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); + } + break; + + case NVMET_FCOP_WRITEDATA: + /* From the treceive template, initialize words 3 - 11 */ + memcpy(&wqe->words[3], + &lpfc_treceive_cmd_template.words[3], + sizeof(uint32_t) * 9); + + /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */ + wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP; + wqe->fcp_treceive.bde.tus.f.bdeSize = 0; + wqe->fcp_treceive.bde.addrLow = 0; + wqe->fcp_treceive.bde.addrHigh = 0; + + /* Word 4 */ + wqe->fcp_treceive.relative_offset = ctxp->offset; + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + + /* Word 8 */ + wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); + + /* Word 10 - in template xc=1 */ + if (!xc) + bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); + + /* Word 11 - check for pbde */ + if (nsegs == 1 && phba->cfg_enable_pbde) { + use_pbde = true; + /* Word 11 - PBDE bit already preset by template */ + } else { + /* Overwrite default template setting */ + bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); + } + + /* Word 12 */ + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; + + /* Setup 2 SKIP SGEs */ + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + atomic_inc(&tgtp->xmt_fcp_write); + break; + + case NVMET_FCOP_RSP: + /* From the treceive template, initialize words 4 - 11 */ + memcpy(&wqe->words[4], + &lpfc_trsp_cmd_template.words[4], + sizeof(uint32_t) * 8); + + /* Words 0 - 2 */ + physaddr = rsp->rspdma; + wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; + wqe->fcp_trsp.bde.addrLow = + cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_trsp.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_trsp.response_len = rsp->rsplen; + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + + /* Word 8 */ + wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); + + /* Word 10 */ + if (xc) + bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1); + + /* Word 11 */ + /* In template wqes=0 irsp=0 irsplen=0 - good response */ + if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) { + /* Bad response - embed it */ + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, + ((rsp->rsplen >> 2) - 1)); + memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); + } + + /* Word 12 */ + wqe->fcp_trsp.rsvd_12_15[0] = 0; + + /* Use rspbuf, NOT sg list */ + nsegs = 0; + sgl->word2 = 0; + atomic_inc(&tgtp->xmt_fcp_rsp); + break; + + default: + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6064 Unknown Rsp Op %d\n", + rsp->op); + return NULL; + } + + nvmewqe->retry = 1; + nvmewqe->vport = phba->pport; + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; + nvmewqe->ndlp = ndlp; + + for_each_sg(rsp->sg, sgel, nsegs, i) { + physaddr = sg_dma_address(sgel); + cnt = sg_dma_len(sgel); + sgl->addr_hi = putPaddrHigh(physaddr); + sgl->addr_lo = putPaddrLow(physaddr); + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); + if ((i+1) == rsp->sg_cnt) + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(cnt); + sgl++; + ctxp->offset += cnt; + } + + bde = (struct ulp_bde64 *)&wqe->words[13]; + if (use_pbde) { + /* decrement sgl ptr backwards once to first data sge */ + sgl--; + + /* Words 13-15 (PBDE) */ + bde->addrLow = sgl->addr_lo; + bde->addrHigh = sgl->addr_hi; + bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } else { + memset(bde, 0, sizeof(struct ulp_bde64)); + } + ctxp->state = LPFC_NVME_STE_DATA; + ctxp->entry_cnt++; + return nvmewqe; +} + +/** + * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for FCP cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_async_xchg_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t result; + unsigned long flags; + bool released = false; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + + ctxp = cmdwqe->context_un.axchg; + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (ctxp->flag & LPFC_NVME_ABORT_OP) + atomic_inc(&tgtp->xmt_fcp_abort_cmpl); + + spin_lock_irqsave(&ctxp->ctxlock, flags); + ctxp->state = LPFC_NVME_STE_DONE; + + /* Check if we already received a free context call + * and we have completed processing an abort situation. + */ + if ((ctxp->flag & LPFC_NVME_CTX_RLS) && + !(ctxp->flag & LPFC_NVME_XBUSY)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + released = true; + } + ctxp->flag &= ~LPFC_NVME_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + atomic_inc(&tgtp->xmt_abort_rsp); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6165 ABORT cmpl: oxid x%x flg x%x (%d) " + "WCQE: %08x %08x %08x %08x\n", + ctxp->oxid, ctxp->flag, released, + wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; + /* + * if transport has released ctx, then can reuse it. Otherwise, + * will be recycled by transport release call. + */ + if (released) + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + + /* This is the iocbq for the abort, not the command */ + lpfc_sli_release_iocbq(phba, cmdwqe); + + /* Since iaab/iaar are NOT set, there is no work left. + * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted + * should have been called already. + */ +} + +/** + * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for FCP cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_async_xchg_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + unsigned long flags; + uint32_t result; + bool released = false; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + + ctxp = cmdwqe->context_un.axchg; + result = wcqe->parameter; + + if (!ctxp) { + /* if context is clear, related io alrady complete */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", + wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + return; + } + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + spin_lock_irqsave(&ctxp->ctxlock, flags); + if (ctxp->flag & LPFC_NVME_ABORT_OP) + atomic_inc(&tgtp->xmt_fcp_abort_cmpl); + + /* Sanity check */ + if (ctxp->state != LPFC_NVME_STE_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6112 ABTS Wrong state:%d oxid x%x\n", + ctxp->state, ctxp->oxid); + } + + /* Check if we already received a free context call + * and we have completed processing an abort situation. + */ + ctxp->state = LPFC_NVME_STE_DONE; + if ((ctxp->flag & LPFC_NVME_CTX_RLS) && + !(ctxp->flag & LPFC_NVME_XBUSY)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + released = true; + } + ctxp->flag &= ~LPFC_NVME_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + atomic_inc(&tgtp->xmt_abort_rsp); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6316 ABTS cmpl oxid x%x flg x%x (%x) " + "WCQE: %08x %08x %08x %08x\n", + ctxp->oxid, ctxp->flag, released, + wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; + /* + * if transport has released ctx, then can reuse it. Otherwise, + * will be recycled by transport release call. + */ + if (released) + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + + /* Since iaab/iaar are NOT set, there is no work left. + * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted + * should have been called already. + */ +} + +/** + * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for LS cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_async_xchg_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t result; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + + ctxp = cmdwqe->context_un.axchg; + result = wcqe->parameter; + + if (phba->nvmet_support) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_ls_abort_cmpl); + } + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n", + ctxp, wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + if (!ctxp) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6415 NVMET LS Abort No ctx: WCQE: " + "%08x %08x %08x %08x\n", + wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + lpfc_sli_release_iocbq(phba, cmdwqe); + return; + } + + if (ctxp->state != LPFC_NVME_STE_LS_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6416 NVMET LS abort cmpl state mismatch: " + "oxid x%x: %d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + } + + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + kfree(ctxp); +} + +static int +lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp = NULL; + struct lpfc_iocbq *abts_wqeq; + union lpfc_wqe128 *wqe_abts; + struct lpfc_nodelist *ndlp; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6067 ABTS: sid %x xri x%x/x%x\n", + sid, xri, ctxp->wqeq->sli4_xritag); + + if (phba->nvmet_support && phba->targetport) + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + if (tgtp) + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6134 Drop ABTS - wrong NDLP state x%x.\n", + (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); + + /* No failure to an ABTS request. */ + return 0; + } + + abts_wqeq = ctxp->wqeq; + wqe_abts = &abts_wqeq->wqe; + + /* + * Since we zero the whole WQE, we need to ensure we set the WQE fields + * that were initialized in lpfc_sli4_nvmet_alloc. + */ + memset(wqe_abts, 0, sizeof(union lpfc_wqe)); + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); + bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); + bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); + bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, + abts_wqeq->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); + + /* Word 8 */ + wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); + /* Needs to be set by caller */ + bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); + + /* Word 10 */ + bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); + bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, + OTHER_COMMAND); + + abts_wqeq->vport = phba->pport; + abts_wqeq->ndlp = ndlp; + abts_wqeq->context_un.axchg = ctxp; + abts_wqeq->bpl_dmabuf = NULL; + abts_wqeq->num_bdes = 0; + /* hba_wqidx should already be setup from command we are aborting */ + abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; + abts_wqeq->iocb.ulpLe = 1; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6069 Issue ABTS to xri x%x reqtag x%x\n", + xri, abts_wqeq->iotag); + return 1; +} + +static int +lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + struct lpfc_nodelist *ndlp; + unsigned long flags; + bool ia; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + ctxp->wqeq = ctxp->ctxbuf->iocbq; + ctxp->wqeq->hba_wqidx = 0; + } + + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6160 Drop ABORT - wrong NDLP state x%x.\n", + (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); + + /* No failure to an ABTS request. */ + spin_lock_irqsave(&ctxp->ctxlock, flags); + ctxp->flag &= ~LPFC_NVME_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + return 0; + } + + /* Issue ABTS for this WQE based on iotag */ + ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); + spin_lock_irqsave(&ctxp->ctxlock, flags); + if (!ctxp->abort_wqeq) { + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6161 ABORT failed: No wqeqs: " + "xri: x%x\n", ctxp->oxid); + /* No failure to an ABTS request. */ + ctxp->flag &= ~LPFC_NVME_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + return 0; + } + abts_wqeq = ctxp->abort_wqeq; + ctxp->state = LPFC_NVME_STE_ABORT; + ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + + /* Announce entry to new IO submit field. */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6162 ABORT Request to rport DID x%06x " + "for xri x%x x%x\n", + ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock_irqsave(&phba->hbalock, flags); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_IOQ_FLUSH) { + spin_unlock_irqrestore(&phba->hbalock, flags); + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6163 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x oxid x%x\n", + phba->hba_flag, ctxp->oxid); + lpfc_sli_release_iocbq(phba, abts_wqeq); + spin_lock_irqsave(&ctxp->ctxlock, flags); + ctxp->flag &= ~LPFC_NVME_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + return 0; + } + + /* Outstanding abort is in progress */ + if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { + spin_unlock_irqrestore(&phba->hbalock, flags); + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6164 Outstanding NVME I/O Abort Request " + "still pending on oxid x%x\n", + ctxp->oxid); + lpfc_sli_release_iocbq(phba, abts_wqeq); + spin_lock_irqsave(&ctxp->ctxlock, flags); + ctxp->flag &= ~LPFC_NVME_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + return 0; + } + + /* Ready - mark outstanding as aborted by driver. */ + abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED; + + lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag, + abts_wqeq->iotag, CLASS3, + LPFC_WQE_CQ_ID_DEFAULT, ia, true); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; + abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVME; + abts_wqeq->context_un.axchg = ctxp; + abts_wqeq->vport = phba->pport; + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; + + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) { + atomic_inc(&tgtp->xmt_abort_sol); + return 0; + } + + atomic_inc(&tgtp->xmt_abort_rsp_error); + spin_lock_irqsave(&ctxp->ctxlock, flags); + ctxp->flag &= ~LPFC_NVME_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + lpfc_sli_release_iocbq(phba, abts_wqeq); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6166 Failed ABORT issue_wqe with status x%x " + "for oxid x%x.\n", + rc, ctxp->oxid); + return 1; +} + +static int +lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + unsigned long flags; + bool released = false; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + ctxp->wqeq = ctxp->ctxbuf->iocbq; + ctxp->wqeq->hba_wqidx = 0; + } + + if (ctxp->state == LPFC_NVME_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", + ctxp->state, ctxp->entry_cnt, ctxp->oxid); + rc = WQE_BUSY; + goto aerr; + } + ctxp->state = LPFC_NVME_STE_ABORT; + ctxp->entry_cnt++; + rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); + if (rc == 0) + goto aerr; + + spin_lock_irqsave(&phba->hbalock, flags); + abts_wqeq = ctxp->wqeq; + abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVMET; + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; + + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) { + return 0; + } + +aerr: + spin_lock_irqsave(&ctxp->ctxlock, flags); + if (ctxp->flag & LPFC_NVME_CTX_RLS) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + released = true; + } + ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS); + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6135 Failed to Issue ABTS for oxid x%x. Status x%x " + "(%x)\n", + ctxp->oxid, rc, released); + if (released) + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + return 1; +} + +/** + * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received + * via async frame receive where the frame is not handled. + * @phba: pointer to adapter structure + * @ctxp: pointer to the asynchronously received received sequence + * @sid: address of the remote port to send the ABTS to + * @xri: oxid value to for the ABTS (other side's exchange id). + **/ +int +lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp = NULL; + struct lpfc_iocbq *abts_wqeq; + unsigned long flags; + int rc; + + if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) || + (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) { + ctxp->state = LPFC_NVME_STE_LS_ABORT; + ctxp->entry_cnt++; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6418 NVMET LS abort state mismatch " + "IO x%x: %d %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + ctxp->state = LPFC_NVME_STE_LS_ABORT; + } + + if (phba->nvmet_support && phba->targetport) + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + + if (!ctxp->wqeq) { + /* Issue ABTS for this WQE based on iotag */ + ctxp->wqeq = lpfc_sli_get_iocbq(phba); + if (!ctxp->wqeq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6068 Abort failed: No wqeqs: " + "xri: x%x\n", xri); + /* No failure to an ABTS request. */ + kfree(ctxp); + return 0; + } + } + abts_wqeq = ctxp->wqeq; + + if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { + rc = WQE_BUSY; + goto out; + } + + spin_lock_irqsave(&phba->hbalock, flags); + abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS; + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) { + if (tgtp) + atomic_inc(&tgtp->xmt_abort_unsol); + return 0; + } +out: + if (tgtp) + atomic_inc(&tgtp->xmt_abort_rsp_error); + abts_wqeq->rsp_dmabuf = NULL; + abts_wqeq->bpl_dmabuf = NULL; + lpfc_sli_release_iocbq(phba, abts_wqeq); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6056 Failed to Issue ABTS. Status x%x\n", rc); + return 1; +} + +/** + * lpfc_nvmet_invalidate_host + * + * @phba: pointer to the driver instance bound to an adapter port. + * @ndlp: pointer to an lpfc_nodelist type + * + * This routine upcalls the nvmet transport to invalidate an NVME + * host to which this target instance had active connections. + */ +void +lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +{ + u32 ndlp_has_hh; + struct lpfc_nvmet_tgtport *tgtp; + + lpfc_printf_log(phba, KERN_INFO, + LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, + "6203 Invalidating hosthandle x%px\n", + ndlp); + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE); + + spin_lock_irq(&ndlp->lock); + ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH; + spin_unlock_irq(&ndlp->lock); + + /* Do not invalidate any nodes that do not have a hosthandle. + * The host_release callbk will cause a node reference + * count imbalance and a crash. + */ + if (!ndlp_has_hh) { + lpfc_printf_log(phba, KERN_INFO, + LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, + "6204 Skip invalidate on node x%px DID x%x\n", + ndlp, ndlp->nlp_DID); + return; + } + +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + /* Need to get the nvmet_fc_target_port pointer here.*/ + nvmet_fc_invalidate_host(phba->targetport, ndlp); +#endif +} diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c new file mode 100644 index 000000000..d26941b13 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -0,0 +1,6801 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "lpfc_version.h" +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" + +#define LPFC_RESET_WAIT 2 +#define LPFC_ABORT_WAIT 2 + +static char *dif_op_str[] = { + "PROT_NORMAL", + "PROT_READ_INSERT", + "PROT_WRITE_STRIP", + "PROT_READ_STRIP", + "PROT_WRITE_INSERT", + "PROT_READ_PASS", + "PROT_WRITE_PASS", +}; + +struct scsi_dif_tuple { + __be16 guard_tag; /* Checksum */ + __be16 app_tag; /* Opaque storage */ + __be32 ref_tag; /* Target LBA or indirect LBA */ +}; + +static struct lpfc_rport_data * +lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; + + if (vport->phba->cfg_fof) + return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; + else + return (struct lpfc_rport_data *)sdev->hostdata; +} + +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); +static void +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); +static int +lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); + +/** + * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. + * @phba: Pointer to HBA object. + * @lpfc_cmd: lpfc scsi command object pointer. + * + * This function is called from the lpfc_prep_task_mgmt_cmd function to + * set the last bit in the response sge entry. + **/ +static void +lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_cmd) +{ + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + if (sgl) { + sgl += 1; + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + } +} + +/** + * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread + * @phba: The Hba for which this call is being executed. + * + * This routine is called when there is resource error in driver or firmware. + * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine + * posts at most 1 event each second. This routine wakes up worker thread of + * @phba to process WORKER_RAM_DOWN_EVENT event. + * + * This routine should be called with no lock held. + **/ +void +lpfc_rampdown_queue_depth(struct lpfc_hba *phba) +{ + unsigned long flags; + uint32_t evt_posted; + unsigned long expires; + + spin_lock_irqsave(&phba->hbalock, flags); + atomic_inc(&phba->num_rsrc_err); + phba->last_rsrc_error_time = jiffies; + + expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; + if (time_after(expires, jiffies)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + phba->last_ramp_down_time = jiffies; + + spin_unlock_irqrestore(&phba->hbalock, flags); + + spin_lock_irqsave(&phba->pport->work_port_lock, flags); + evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; + if (!evt_posted) + phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; + spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); + + if (!evt_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler + * @phba: The Hba for which this call is being executed. + * + * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker + * thread.This routine reduces queue depth for all scsi device on each vport + * associated with @phba. + **/ +void +lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + struct scsi_device *sdev; + unsigned long new_queue_depth; + unsigned long num_rsrc_err, num_cmd_success; + int i; + + num_rsrc_err = atomic_read(&phba->num_rsrc_err); + num_cmd_success = atomic_read(&phba->num_cmd_success); + + /* + * The error and success command counters are global per + * driver instance. If another handler has already + * operated on this error event, just exit. + */ + if (num_rsrc_err == 0) + return; + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + shost_for_each_device(sdev, shost) { + new_queue_depth = + sdev->queue_depth * num_rsrc_err / + (num_rsrc_err + num_cmd_success); + if (!new_queue_depth) + new_queue_depth = sdev->queue_depth - 1; + else + new_queue_depth = sdev->queue_depth - + new_queue_depth; + scsi_change_queue_depth(sdev, new_queue_depth); + } + } + lpfc_destroy_vport_work_array(phba, vports); + atomic_set(&phba->num_rsrc_err, 0); + atomic_set(&phba->num_cmd_success, 0); +} + +/** + * lpfc_scsi_dev_block - set all scsi hosts to block state + * @phba: Pointer to HBA context object. + * + * This function walks vport list and set each SCSI host to block state + * by invoking fc_remote_port_delete() routine. This function is invoked + * with EEH when device's PCI slot has been permanently disabled. + **/ +void +lpfc_scsi_dev_block(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + struct scsi_device *sdev; + struct fc_rport *rport; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + shost_for_each_device(sdev, shost) { + rport = starget_to_rport(scsi_target(sdev)); + fc_remote_port_delete(rport); + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_alloc: The requested number of buffers to allocate. + * + * This routine allocates a scsi buffer for device with SLI-3 interface spec, + * the scsi buffer contains all the necessary information needed to initiate + * a SCSI I/O. The non-DMAable buffer region contains information to build + * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, + * and the initial BPL. In addition to allocating memory, the FCP CMND and + * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. + * + * Return codes: + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static int +lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_io_buf *psb; + struct ulp_bde64 *bpl; + IOCB_t *iocb; + dma_addr_t pdma_phys_fcp_cmd; + dma_addr_t pdma_phys_fcp_rsp; + dma_addr_t pdma_phys_sgl; + uint16_t iotag; + int bcnt, bpl_size; + + bpl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp), bpl_size); + + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); + if (!psb) + break; + + /* + * Get memory from the pci pool to map the virt space to pci + * bus space for an I/O. The DMA buffer includes space for the + * struct fcp_cmnd, struct fcp_rsp and the number of bde's + * necessary to support the sg_tablesize. + */ + psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, + GFP_KERNEL, &psb->dma_handle); + if (!psb->data) { + kfree(psb); + break; + } + + + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP; + + psb->fcp_cmnd = psb->data; + psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); + psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + + /* Initialize local short-hand pointers. */ + bpl = (struct ulp_bde64 *)psb->dma_sgl; + pdma_phys_fcp_cmd = psb->dma_handle; + pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); + pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + + /* + * The first two bdes are the FCP_CMD and FCP_RSP. The balance + * are sg list bdes. Initialize the first two and leave the + * rest for queuecommand. + */ + bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); + bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); + bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); + bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); + + /* Setup the physical region for the FCP RSP */ + bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); + bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); + bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); + bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); + + /* + * Since the IOCB for the FCP I/O is built into this + * lpfc_scsi_buf, initialize it with all known data now. + */ + iocb = &psb->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + if ((phba->sli_rev == 3) && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { + /* fill in immediate fcp command BDE */ + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; + iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); + iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, + unsli3.fcp_ext.icd); + iocb->un.fcpi64.bdl.addrHigh = 0; + iocb->ulpBdeCount = 0; + iocb->ulpLe = 0; + /* fill in response BDE */ + iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = + BUFF_TYPE_BDE_64; + iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = + sizeof(struct fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrLow = + putPaddrLow(pdma_phys_fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrHigh = + putPaddrHigh(pdma_phys_fcp_rsp); + } else { + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + iocb->un.fcpi64.bdl.bdeSize = + (2 * sizeof(struct ulp_bde64)); + iocb->un.fcpi64.bdl.addrLow = + putPaddrLow(pdma_phys_sgl); + iocb->un.fcpi64.bdl.addrHigh = + putPaddrHigh(pdma_phys_sgl); + iocb->ulpBdeCount = 1; + iocb->ulpLe = 1; + } + iocb->ulpClass = CLASS3; + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + psb->cur_iocbq.io_buf = psb; + spin_lock_init(&psb->buf_lock); + lpfc_release_scsi_buf_s3(phba, psb); + + } + + return bcnt; +} + +/** + * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_io_buf *psb, *next_psb; + struct lpfc_sli4_hdw_queue *qp; + unsigned long iflag = 0; + int idx; + + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; + + spin_lock_irqsave(&phba->hbalock, iflag); + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + + spin_lock(&qp->abts_io_buf_list_lock); + list_for_each_entry_safe(psb, next_psb, + &qp->lpfc_abts_io_buf_list, list) { + if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) + continue; + + if (psb->rdata && psb->rdata->pnode && + psb->rdata->pnode->vport == vport) + psb->rdata = NULL; + } + spin_unlock(&qp->abts_io_buf_list_lock); + } + spin_unlock_irqrestore(&phba->hbalock, iflag); +} + +/** + * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the fcp xri abort wcqe structure. + * @idx: index into hdwq + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * FCP or NVME aborted xri. + **/ +void +lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri, int idx) +{ + u16 xri = 0; + u16 rxid = 0; + struct lpfc_io_buf *psb, *next_psb; + struct lpfc_sli4_hdw_queue *qp; + unsigned long iflag = 0; + struct lpfc_iocbq *iocbq; + int i; + struct lpfc_nodelist *ndlp; + int rrq_empty = 0; + struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; + struct scsi_cmnd *cmd; + int offline = 0; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; + offline = pci_channel_offline(phba->pcidev); + if (!offline) { + xri = bf_get(lpfc_wcqe_xa_xri, axri); + rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); + } + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&qp->abts_io_buf_list_lock); + list_for_each_entry_safe(psb, next_psb, + &qp->lpfc_abts_io_buf_list, list) { + if (offline) + xri = psb->cur_iocbq.sli4_xritag; + if (psb->cur_iocbq.sli4_xritag == xri) { + list_del_init(&psb->list); + psb->flags &= ~LPFC_SBUF_XBUSY; + psb->status = IOSTAT_SUCCESS; + if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) { + qp->abts_nvme_io_bufs--; + spin_unlock(&qp->abts_io_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (!offline) { + lpfc_sli4_nvme_xri_aborted(phba, axri, + psb); + return; + } + lpfc_sli4_nvme_pci_offline_aborted(phba, psb); + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&qp->abts_io_buf_list_lock); + continue; + } + qp->abts_scsi_io_bufs--; + spin_unlock(&qp->abts_io_buf_list_lock); + + if (psb->rdata && psb->rdata->pnode) + ndlp = psb->rdata->pnode; + else + ndlp = NULL; + + rrq_empty = list_empty(&phba->active_rrq_list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (ndlp && !offline) { + lpfc_set_rrq_active(phba, ndlp, + psb->cur_iocbq.sli4_lxritag, rxid, 1); + lpfc_sli4_abts_err_handler(phba, ndlp, axri); + } + + if (phba->cfg_fcp_wait_abts_rsp || offline) { + spin_lock_irqsave(&psb->buf_lock, iflag); + cmd = psb->pCmd; + psb->pCmd = NULL; + spin_unlock_irqrestore(&psb->buf_lock, iflag); + + /* The sdev is not guaranteed to be valid post + * scsi_done upcall. + */ + if (cmd) + scsi_done(cmd); + + /* + * We expect there is an abort thread waiting + * for command completion wake up the thread. + */ + spin_lock_irqsave(&psb->buf_lock, iflag); + psb->cur_iocbq.cmd_flag &= + ~LPFC_DRIVER_ABORTED; + if (psb->waitq) + wake_up(psb->waitq); + spin_unlock_irqrestore(&psb->buf_lock, iflag); + } + + lpfc_release_scsi_buf_s4(phba, psb); + if (rrq_empty) + lpfc_worker_wake_up(phba); + if (!offline) + return; + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&qp->abts_io_buf_list_lock); + continue; + } + } + spin_unlock(&qp->abts_io_buf_list_lock); + if (!offline) { + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (!(iocbq->cmd_flag & LPFC_IO_FCP) || + (iocbq->cmd_flag & LPFC_IO_LIBDFC)) + continue; + if (iocbq->sli4_xritag != xri) + continue; + psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); + psb->flags &= ~LPFC_SBUF_XBUSY; + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (!list_empty(&pring->txq)) + lpfc_worker_wake_up(phba); + return; + } + } + spin_unlock_irqrestore(&phba->hbalock, iflag); +} + +/** + * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA + * @phba: The HBA for which this call is being executed. + * @ndlp: pointer to a node-list data structure. + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list + * and returns to caller. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_scsi_buf - Success + **/ +static struct lpfc_io_buf * +lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd) +{ + struct lpfc_io_buf *lpfc_cmd = NULL; + struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, + list); + if (!lpfc_cmd) { + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + list_remove_head(scsi_buf_list_get, lpfc_cmd, + struct lpfc_io_buf, list); + spin_unlock(&phba->scsi_buf_list_put_lock); + } + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); + + if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { + atomic_inc(&ndlp->cmd_pending); + lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; + } + return lpfc_cmd; +} +/** + * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA + * @phba: The HBA for which this call is being executed. + * @ndlp: pointer to a node-list data structure. + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine removes a scsi buffer from head of @hdwq io_buf_list + * and returns to caller. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_scsi_buf - Success + **/ +static struct lpfc_io_buf * +lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd) +{ + struct lpfc_io_buf *lpfc_cmd; + struct lpfc_sli4_hdw_queue *qp; + struct sli4_sge *sgl; + dma_addr_t pdma_phys_fcp_rsp; + dma_addr_t pdma_phys_fcp_cmd; + uint32_t cpu, idx; + int tag; + struct fcp_cmd_rsp_buf *tmp = NULL; + + cpu = raw_smp_processor_id(); + if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { + tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); + idx = blk_mq_unique_tag_to_hwq(tag); + } else { + idx = phba->sli4_hba.cpu_map[cpu].hdwq; + } + + lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, + !phba->cfg_xri_rebalancing); + if (!lpfc_cmd) { + qp = &phba->sli4_hba.hdwq[idx]; + qp->empty_io_bufs++; + return NULL; + } + + /* Setup key fields in buffer that may have been changed + * if other protocols used this buffer. + */ + lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP; + lpfc_cmd->prot_seg_cnt = 0; + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->timeout = 0; + lpfc_cmd->flags = 0; + lpfc_cmd->start_time = jiffies; + lpfc_cmd->waitq = NULL; + lpfc_cmd->cpu = cpu; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + lpfc_cmd->prot_data_type = 0; +#endif + tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); + if (!tmp) { + lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); + return NULL; + } + + lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; + lpfc_cmd->fcp_rsp = tmp->fcp_rsp; + + /* + * The first two SGEs are the FCP_CMD and FCP_RSP. + * The balance are sg list bdes. Initialize the + * first two and leave the rest for queuecommand. + */ + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + sgl++; + + /* Setup the physical region for the FCP RSP */ + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); + + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + atomic_inc(&ndlp->cmd_pending); + lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; + } + return lpfc_cmd; +} +/** + * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA + * @phba: The HBA for which this call is being executed. + * @ndlp: pointer to a node-list data structure. + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list + * and returns to caller. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_scsi_buf - Success + **/ +static struct lpfc_io_buf* +lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd) +{ + return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); +} + +/** + * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @phba + * lpfc_scsi_buf_list list. + **/ +static void +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) +{ + unsigned long iflag = 0; + + psb->seg_cnt = 0; + psb->prot_seg_cnt = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + psb->pCmd = NULL; + psb->cur_iocbq.cmd_flag = LPFC_IO_FCP; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); +} + +/** + * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @hdwq + * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer + * and cannot be reused for at least RA_TOV amount of time if it was + * aborted. + **/ +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) +{ + struct lpfc_sli4_hdw_queue *qp; + unsigned long iflag = 0; + + psb->seg_cnt = 0; + psb->prot_seg_cnt = 0; + + qp = psb->hdwq; + if (psb->flags & LPFC_SBUF_XBUSY) { + spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); + if (!phba->cfg_fcp_wait_abts_rsp) + psb->pCmd = NULL; + list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); + qp->abts_scsi_io_bufs++; + spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); + } else { + lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); + } +} + +/** + * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @phba + * lpfc_scsi_buf_list list. + **/ +static void +lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) +{ + if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) + atomic_dec(&psb->ndlp->cmd_pending); + + psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; + phba->lpfc_release_scsi_buf(phba, psb); +} + +/** + * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB + * @data: A pointer to the immediate command data portion of the IOCB. + * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. + * + * The routine copies the entire FCP command from @fcp_cmnd to @data while + * byte swapping the data to big endian format for transmission on the wire. + **/ +static void +lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) +{ + int i, j; + + for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); + i += sizeof(uint32_t), j++) { + ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); + } +} + +/** + * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine does the pci dma mapping for scatter-gather list of scsi cmnd + * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans + * through sg elements and format the bde. This routine also initializes all + * IOCB fields which are dependent on scsi command request buffer. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static int +lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct scatterlist *sgel = NULL; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; + struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; + dma_addr_t physaddr; + uint32_t num_bde = 0; + int nseg, datadir = scsi_cmnd->sc_data_direction; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. Start the lpfc command prep by + * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first + * data bde entry. + */ + bpl += 2; + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from dma_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + + nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), + scsi_sg_count(scsi_cmnd), datadir); + if (unlikely(!nseg)) + return 1; + + lpfc_cmd->seg_cnt = nseg; + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9064 BLKGRD: %s: Too many sg segments" + " from dma_map_sg. Config %d, seg_cnt" + " %d\n", __func__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); + lpfc_cmd->seg_cnt = 0; + scsi_dma_unmap(scsi_cmnd); + return 2; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single scsi command. Just run through the seg_cnt and format + * the bde's. + * When using SLI-3 the driver will try to fit all the BDEs into + * the IOCB. If it can't then the BDEs get added to a BPL as it + * does for SLI-2 mode. + */ + scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { + physaddr = sg_dma_address(sgel); + if (phba->sli_rev == 3 && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->cmd_flag & DSS_SECURITY_OP) && + nseg <= LPFC_EXT_DATA_BDE_COUNT) { + data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + data_bde->tus.f.bdeSize = sg_dma_len(sgel); + data_bde->addrLow = putPaddrLow(physaddr); + data_bde->addrHigh = putPaddrHigh(physaddr); + data_bde++; + } else { + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl->addrLow = + le32_to_cpu(putPaddrLow(physaddr)); + bpl->addrHigh = + le32_to_cpu(putPaddrHigh(physaddr)); + bpl++; + } + } + } + + /* + * Finish initializing those IOCB fields that are dependent on the + * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is + * explicitly reinitialized and for SLI-3 the extended bde count is + * explicitly reinitialized since all iocb memory resources are reused. + */ + if (phba->sli_rev == 3 && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->cmd_flag & DSS_SECURITY_OP)) { + if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { + /* + * The extended IOCB format can only fit 3 BDE or a BPL. + * This I/O has more than 3 BDE so the 1st data bde will + * be a BPL that is filled in here. + */ + physaddr = lpfc_cmd->dma_handle; + data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; + data_bde->tus.f.bdeSize = (num_bde * + sizeof(struct ulp_bde64)); + physaddr += (sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (2 * sizeof(struct ulp_bde64))); + data_bde->addrHigh = putPaddrHigh(physaddr); + data_bde->addrLow = putPaddrLow(physaddr); + /* ebde count includes the response bde and data bpl */ + iocb_cmd->unsli3.fcp_ext.ebde_count = 2; + } else { + /* ebde count includes the response bde and data bdes */ + iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); + } + } else { + iocb_cmd->un.fcpi64.bdl.bdeSize = + ((num_bde + 2) * sizeof(struct ulp_bde64)); + iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); + } + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); + return 0; +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + +/* Return BG_ERR_INIT if error injection is detected by Initiator */ +#define BG_ERR_INIT 0x1 +/* Return BG_ERR_TGT if error injection is detected by Target */ +#define BG_ERR_TGT 0x2 +/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ +#define BG_ERR_SWAP 0x10 +/* + * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for + * error injection + */ +#define BG_ERR_CHECK 0x20 + +/** + * lpfc_bg_err_inject - Determine if we should inject an error + * @phba: The Hba for which this call is being executed. + * @sc: The SCSI command to examine + * @reftag: (out) BlockGuard reference tag for transmitted data + * @apptag: (out) BlockGuard application tag for transmitted data + * @new_guard: (in) Value to replace CRC with if needed + * + * Returns BG_ERR_* bit mask or 0 if request ignored + **/ +static int +lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, + uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct lpfc_io_buf *lpfc_cmd = NULL; + struct scsi_dif_tuple *src = NULL; + struct lpfc_nodelist *ndlp; + struct lpfc_rport_data *rdata; + uint32_t op = scsi_get_prot_op(sc); + uint32_t blksize; + uint32_t numblks; + u32 lba; + int rc = 0; + int blockoff = 0; + + if (op == SCSI_PROT_NORMAL) + return 0; + + sgpe = scsi_prot_sglist(sc); + lba = scsi_prot_ref_tag(sc); + + /* First check if we need to match the LBA */ + if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { + blksize = scsi_prot_interval(sc); + numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; + + /* Make sure we have the right LBA if one is specified */ + if (phba->lpfc_injerr_lba < (u64)lba || + (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) + return 0; + if (sgpe) { + blockoff = phba->lpfc_injerr_lba - (u64)lba; + numblks = sg_dma_len(sgpe) / + sizeof(struct scsi_dif_tuple); + if (numblks < blockoff) + blockoff = numblks; + } + } + + /* Next check if we need to match the remote NPortID or WWPN */ + rdata = lpfc_rport_data_from_scsi_device(sc->device); + if (rdata && rdata->pnode) { + ndlp = rdata->pnode; + + /* Make sure we have the right NPortID if one is specified */ + if (phba->lpfc_injerr_nportid && + (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) + return 0; + + /* + * Make sure we have the right WWPN if one is specified. + * wwn[0] should be a non-zero NAA in a good WWPN. + */ + if (phba->lpfc_injerr_wwpn.u.wwn[0] && + (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, + sizeof(struct lpfc_name)) != 0)) + return 0; + } + + /* Setup a ptr to the protection data if the SCSI host provides it */ + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + src += blockoff; + lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; + } + + /* Should we change the Reference Tag */ + if (reftag) { + if (phba->lpfc_injerr_wref_cnt) { + switch (op) { + case SCSI_PROT_WRITE_PASS: + if (src) { + /* + * For WRITE_PASS, force the error + * to be sent on the wire. It should + * be detected by the Target. + * If blockoff != 0 error will be + * inserted in middle of the IO. + */ + + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "9076 BLKGRD: Injecting reftag error: " + "write lba x%lx + x%x oldrefTag x%x\n", + (unsigned long)lba, blockoff, + be32_to_cpu(src->ref_tag)); + + /* + * Save the old ref_tag so we can + * restore it on completion. + */ + if (lpfc_cmd) { + lpfc_cmd->prot_data_type = + LPFC_INJERR_REFTAG; + lpfc_cmd->prot_data_segment = + src; + lpfc_cmd->prot_data = + src->ref_tag; + } + src->ref_tag = cpu_to_be32(0xDEADBEEF); + phba->lpfc_injerr_wref_cnt--; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; + + break; + } + fallthrough; + case SCSI_PROT_WRITE_INSERT: + /* + * For WRITE_INSERT, force the error + * to be sent on the wire. It should be + * detected by the Target. + */ + /* DEADBEEF will be the reftag on the wire */ + *reftag = 0xDEADBEEF; + phba->lpfc_injerr_wref_cnt--; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9078 BLKGRD: Injecting reftag error: " + "write lba x%lx\n", (unsigned long)lba); + break; + case SCSI_PROT_WRITE_STRIP: + /* + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. + */ + *reftag = 0xDEADBEEF; + phba->lpfc_injerr_wref_cnt--; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9077 BLKGRD: Injecting reftag error: " + "write lba x%lx\n", (unsigned long)lba); + break; + } + } + if (phba->lpfc_injerr_rref_cnt) { + switch (op) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_READ_PASS: + /* + * For READ_STRIP and READ_PASS, force the + * error on data being read off the wire. It + * should force an IO error to the driver. + */ + *reftag = 0xDEADBEEF; + phba->lpfc_injerr_rref_cnt--; + if (phba->lpfc_injerr_rref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9079 BLKGRD: Injecting reftag error: " + "read lba x%lx\n", (unsigned long)lba); + break; + } + } + } + + /* Should we change the Application Tag */ + if (apptag) { + if (phba->lpfc_injerr_wapp_cnt) { + switch (op) { + case SCSI_PROT_WRITE_PASS: + if (src) { + /* + * For WRITE_PASS, force the error + * to be sent on the wire. It should + * be detected by the Target. + * If blockoff != 0 error will be + * inserted in middle of the IO. + */ + + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "9080 BLKGRD: Injecting apptag error: " + "write lba x%lx + x%x oldappTag x%x\n", + (unsigned long)lba, blockoff, + be16_to_cpu(src->app_tag)); + + /* + * Save the old app_tag so we can + * restore it on completion. + */ + if (lpfc_cmd) { + lpfc_cmd->prot_data_type = + LPFC_INJERR_APPTAG; + lpfc_cmd->prot_data_segment = + src; + lpfc_cmd->prot_data = + src->app_tag; + } + src->app_tag = cpu_to_be16(0xDEAD); + phba->lpfc_injerr_wapp_cnt--; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; + break; + } + fallthrough; + case SCSI_PROT_WRITE_INSERT: + /* + * For WRITE_INSERT, force the + * error to be sent on the wire. It should be + * detected by the Target. + */ + /* DEAD will be the apptag on the wire */ + *apptag = 0xDEAD; + phba->lpfc_injerr_wapp_cnt--; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0813 BLKGRD: Injecting apptag error: " + "write lba x%lx\n", (unsigned long)lba); + break; + case SCSI_PROT_WRITE_STRIP: + /* + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. + */ + *apptag = 0xDEAD; + phba->lpfc_injerr_wapp_cnt--; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0812 BLKGRD: Injecting apptag error: " + "write lba x%lx\n", (unsigned long)lba); + break; + } + } + if (phba->lpfc_injerr_rapp_cnt) { + switch (op) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_READ_PASS: + /* + * For READ_STRIP and READ_PASS, force the + * error on data being read off the wire. It + * should force an IO error to the driver. + */ + *apptag = 0xDEAD; + phba->lpfc_injerr_rapp_cnt--; + if (phba->lpfc_injerr_rapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0814 BLKGRD: Injecting apptag error: " + "read lba x%lx\n", (unsigned long)lba); + break; + } + } + } + + + /* Should we change the Guard Tag */ + if (new_guard) { + if (phba->lpfc_injerr_wgrd_cnt) { + switch (op) { + case SCSI_PROT_WRITE_PASS: + rc = BG_ERR_CHECK; + fallthrough; + + case SCSI_PROT_WRITE_INSERT: + /* + * For WRITE_INSERT, force the + * error to be sent on the wire. It should be + * detected by the Target. + */ + phba->lpfc_injerr_wgrd_cnt--; + if (phba->lpfc_injerr_wgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + + rc |= BG_ERR_TGT | BG_ERR_SWAP; + /* Signals the caller to swap CRC->CSUM */ + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0817 BLKGRD: Injecting guard error: " + "write lba x%lx\n", (unsigned long)lba); + break; + case SCSI_PROT_WRITE_STRIP: + /* + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. + */ + phba->lpfc_injerr_wgrd_cnt--; + if (phba->lpfc_injerr_wgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + + rc = BG_ERR_INIT | BG_ERR_SWAP; + /* Signals the caller to swap CRC->CSUM */ + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0816 BLKGRD: Injecting guard error: " + "write lba x%lx\n", (unsigned long)lba); + break; + } + } + if (phba->lpfc_injerr_rgrd_cnt) { + switch (op) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_READ_PASS: + /* + * For READ_STRIP and READ_PASS, force the + * error on data being read off the wire. It + * should force an IO error to the driver. + */ + phba->lpfc_injerr_rgrd_cnt--; + if (phba->lpfc_injerr_rgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + + rc = BG_ERR_INIT | BG_ERR_SWAP; + /* Signals the caller to swap CRC->CSUM */ + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0818 BLKGRD: Injecting guard error: " + "read lba x%lx\n", (unsigned long)lba); + } + } + } + + return rc; +} +#endif + +/** + * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with + * the specified SCSI command. + * @phba: The Hba for which this call is being executed. + * @sc: The SCSI command to examine + * @txop: (out) BlockGuard operation for transmitted data + * @rxop: (out) BlockGuard operation for received data + * + * Returns: zero on success; non-zero if tx and/or rx op cannot be determined + * + **/ +static int +lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, + uint8_t *txop, uint8_t *rxop) +{ + uint8_t ret = 0; + + if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + *rxop = BG_OP_IN_NODIF_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_NODIF; + break; + + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + *rxop = BG_OP_IN_CRC_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CRC; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + *rxop = BG_OP_IN_CRC_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_CRC; + break; + + case SCSI_PROT_NORMAL: + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9063 BLKGRD: Bad op/guard:%d/IP combination\n", + scsi_get_prot_op(sc)); + ret = 1; + break; + + } + } else { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + *rxop = BG_OP_IN_CRC_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CRC; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + *rxop = BG_OP_IN_CRC_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_CRC; + break; + + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + *rxop = BG_OP_IN_NODIF_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_NODIF; + break; + + case SCSI_PROT_NORMAL: + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", + scsi_get_prot_op(sc)); + ret = 1; + break; + } + } + + return ret; +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +/** + * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with + * the specified SCSI command in order to force a guard tag error. + * @phba: The Hba for which this call is being executed. + * @sc: The SCSI command to examine + * @txop: (out) BlockGuard operation for transmitted data + * @rxop: (out) BlockGuard operation for received data + * + * Returns: zero on success; non-zero if tx and/or rx op cannot be determined + * + **/ +static int +lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, + uint8_t *txop, uint8_t *rxop) +{ + + if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + *rxop = BG_OP_IN_NODIF_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_NODIF; + break; + + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + *rxop = BG_OP_IN_CSUM_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CSUM; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + *rxop = BG_OP_IN_CSUM_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_CSUM; + break; + + case SCSI_PROT_NORMAL: + default: + break; + + } + } else { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + *rxop = BG_OP_IN_CSUM_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CSUM; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + *rxop = BG_OP_IN_CSUM_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_CSUM; + break; + + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + *rxop = BG_OP_IN_NODIF_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_NODIF; + break; + + case SCSI_PROT_NORMAL: + default: + break; + } + } + + return 0; +} +#endif + +/** + * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @bpl: pointer to buffer list for protection groups + * @datasegcnt: number of segments of data that have been dma mapped + * + * This function sets up BPL buffer list for protection groups of + * type LPFC_PG_TYPE_NO_DIF + * + * This is usually used when the HBA is instructed to generate + * DIFs and insert them into data stream (or strip DIF from + * incoming data stream) + * + * The buffer list consists of just one protection group described + * below: + * +-------------------------+ + * start of prot group --> | PDE_5 | + * +-------------------------+ + * | PDE_6 | + * +-------------------------+ + * | Data BDE | + * +-------------------------+ + * |more Data BDE's ... (opt)| + * +-------------------------+ + * + * + * Note: Data s/g buffers have been dma mapped + * + * Returns the number of BDEs added to the BPL. + **/ +static int +lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct ulp_bde64 *bpl, int datasegcnt) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct lpfc_pde5 *pde5 = NULL; + struct lpfc_pde6 *pde6 = NULL; + dma_addr_t physaddr; + int i = 0, num_bde = 0, status; + int datadir = sc->sc_data_direction; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; + uint32_t reftag; + uint8_t txop, rxop; + + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) + goto out; + + /* extract some info from the scsi command for pde*/ + reftag = scsi_prot_ref_tag(sc); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } +#endif + + /* setup PDE5 with what we have */ + pde5 = (struct lpfc_pde5 *) bpl; + memset(pde5, 0, sizeof(struct lpfc_pde5)); + bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); + + /* Endianness conversion if necessary for PDE5 */ + pde5->word0 = cpu_to_le32(pde5->word0); + pde5->reftag = cpu_to_le32(reftag); + + /* advance bpl and increment bde count */ + num_bde++; + bpl++; + pde6 = (struct lpfc_pde6 *) bpl; + + /* setup PDE6 with the rest of the info */ + memset(pde6, 0, sizeof(struct lpfc_pde6)); + bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); + bf_set(pde6_optx, pde6, txop); + bf_set(pde6_oprx, pde6, rxop); + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (datadir == DMA_FROM_DEVICE) { + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (sc->prot_flags & SCSI_PROT_REF_CHECK) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + } + bf_set(pde6_ai, pde6, 1); + bf_set(pde6_ae, pde6, 0); + bf_set(pde6_apptagval, pde6, 0); + + /* Endianness conversion if necessary for PDE6 */ + pde6->word0 = cpu_to_le32(pde6->word0); + pde6->word1 = cpu_to_le32(pde6->word1); + pde6->word2 = cpu_to_le32(pde6->word2); + + /* advance bpl and increment bde count */ + num_bde++; + bpl++; + + /* assumption: caller has already run dma_map_sg on command data */ + scsi_for_each_sg(sc, sgde, datasegcnt, i) { + physaddr = sg_dma_address(sgde); + bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); + bpl->tus.f.bdeSize = sg_dma_len(sgde); + if (datadir == DMA_TO_DEVICE) + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + else + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + num_bde++; + } + +out: + return num_bde; +} + +/** + * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @bpl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * @protcnt: number of segment of protection data that have been dma mapped + * + * This function sets up BPL buffer list for protection groups of + * type LPFC_PG_TYPE_DIF + * + * This is usually used when DIFs are in their own buffers, + * separate from the data. The HBA can then by instructed + * to place the DIFs in the outgoing stream. For read operations, + * The HBA could extract the DIFs and place it in DIF buffers. + * + * The buffer list for this type consists of one or more of the + * protection groups described below: + * +-------------------------+ + * start of first prot group --> | PDE_5 | + * +-------------------------+ + * | PDE_6 | + * +-------------------------+ + * | PDE_7 (Prot BDE) | + * +-------------------------+ + * | Data BDE | + * +-------------------------+ + * |more Data BDE's ... (opt)| + * +-------------------------+ + * start of new prot group --> | PDE_5 | + * +-------------------------+ + * | ... | + * +-------------------------+ + * + * Note: It is assumed that both data and protection s/g buffers have been + * mapped for DMA + * + * Returns the number of BDEs added to the BPL. + **/ +static int +lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct ulp_bde64 *bpl, int datacnt, int protcnt) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct scatterlist *sgpe = NULL; /* s/g prot entry */ + struct lpfc_pde5 *pde5 = NULL; + struct lpfc_pde6 *pde6 = NULL; + struct lpfc_pde7 *pde7 = NULL; + dma_addr_t dataphysaddr, protphysaddr; + unsigned short curr_prot = 0; + unsigned int split_offset; + unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; + unsigned int protgrp_blks, protgrp_bytes; + unsigned int remainder, subtotal; + int status; + int datadir = sc->sc_data_direction; + unsigned char pgdone = 0, alldone = 0; + unsigned blksize; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; + uint32_t reftag; + uint8_t txop, rxop; + int num_bde = 0; + + sgpe = scsi_prot_sglist(sc); + sgde = scsi_sglist(sc); + + if (!sgpe || !sgde) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9020 Invalid s/g entry: data=x%px prot=x%px\n", + sgpe, sgde); + return 0; + } + + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) + goto out; + + /* extract some info from the scsi command */ + blksize = scsi_prot_interval(sc); + reftag = scsi_prot_ref_tag(sc); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } +#endif + + split_offset = 0; + do { + /* Check to see if we ran out of space */ + if (num_bde >= (phba->cfg_total_seg_cnt - 2)) + return num_bde + 3; + + /* setup PDE5 with what we have */ + pde5 = (struct lpfc_pde5 *) bpl; + memset(pde5, 0, sizeof(struct lpfc_pde5)); + bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); + + /* Endianness conversion if necessary for PDE5 */ + pde5->word0 = cpu_to_le32(pde5->word0); + pde5->reftag = cpu_to_le32(reftag); + + /* advance bpl and increment bde count */ + num_bde++; + bpl++; + pde6 = (struct lpfc_pde6 *) bpl; + + /* setup PDE6 with the rest of the info */ + memset(pde6, 0, sizeof(struct lpfc_pde6)); + bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); + bf_set(pde6_optx, pde6, txop); + bf_set(pde6_oprx, pde6, rxop); + + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (sc->prot_flags & SCSI_PROT_REF_CHECK) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + + bf_set(pde6_ai, pde6, 1); + bf_set(pde6_ae, pde6, 0); + bf_set(pde6_apptagval, pde6, 0); + + /* Endianness conversion if necessary for PDE6 */ + pde6->word0 = cpu_to_le32(pde6->word0); + pde6->word1 = cpu_to_le32(pde6->word1); + pde6->word2 = cpu_to_le32(pde6->word2); + + /* advance bpl and increment bde count */ + num_bde++; + bpl++; + + /* setup the first BDE that points to protection buffer */ + protphysaddr = sg_dma_address(sgpe) + protgroup_offset; + protgroup_len = sg_dma_len(sgpe) - protgroup_offset; + + /* must be integer multiple of the DIF block length */ + BUG_ON(protgroup_len % 8); + + pde7 = (struct lpfc_pde7 *) bpl; + memset(pde7, 0, sizeof(struct lpfc_pde7)); + bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); + + pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); + pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); + + protgrp_blks = protgroup_len / 8; + protgrp_bytes = protgrp_blks * blksize; + + /* check if this pde is crossing the 4K boundary; if so split */ + if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { + protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); + protgroup_offset += protgroup_remainder; + protgrp_blks = protgroup_remainder / 8; + protgrp_bytes = protgrp_blks * blksize; + } else { + protgroup_offset = 0; + curr_prot++; + } + + num_bde++; + + /* setup BDE's for data blocks associated with DIF data */ + pgdone = 0; + subtotal = 0; /* total bytes processed for current prot grp */ + while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_bde >= phba->cfg_total_seg_cnt) + return num_bde + 1; + + if (!sgde) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9065 BLKGRD:%s Invalid data segment\n", + __func__); + return 0; + } + bpl++; + dataphysaddr = sg_dma_address(sgde) + split_offset; + bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); + + remainder = sg_dma_len(sgde) - split_offset; + + if ((subtotal + remainder) <= protgrp_bytes) { + /* we can use this whole buffer */ + bpl->tus.f.bdeSize = remainder; + split_offset = 0; + + if ((subtotal + remainder) == protgrp_bytes) + pgdone = 1; + } else { + /* must split this buffer with next prot grp */ + bpl->tus.f.bdeSize = protgrp_bytes - subtotal; + split_offset += bpl->tus.f.bdeSize; + } + + subtotal += bpl->tus.f.bdeSize; + + if (datadir == DMA_TO_DEVICE) + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + else + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + num_bde++; + + if (split_offset) + break; + + /* Move to the next s/g segment if possible */ + sgde = sg_next(sgde); + + } + + if (protgroup_offset) { + /* update the reference tag */ + reftag += protgrp_blks; + bpl++; + continue; + } + + /* are we done ? */ + if (curr_prot == protcnt) { + alldone = 1; + } else if (curr_prot < protcnt) { + /* advance to next prot buffer */ + sgpe = sg_next(sgpe); + bpl++; + + /* update the reference tag */ + reftag += protgrp_blks; + } else { + /* if we're here, we have a bug */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9054 BLKGRD: bug in %s\n", __func__); + } + + } while (!alldone); +out: + + return num_bde; +} + +/** + * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @sgl: pointer to buffer list for protection groups + * @datasegcnt: number of segments of data that have been dma mapped + * @lpfc_cmd: lpfc scsi command object pointer. + * + * This function sets up SGL buffer list for protection groups of + * type LPFC_PG_TYPE_NO_DIF + * + * This is usually used when the HBA is instructed to generate + * DIFs and insert them into data stream (or strip DIF from + * incoming data stream) + * + * The buffer list consists of just one protection group described + * below: + * +-------------------------+ + * start of prot group --> | DI_SEED | + * +-------------------------+ + * | Data SGE | + * +-------------------------+ + * |more Data SGE's ... (opt)| + * +-------------------------+ + * + * + * Note: Data s/g buffers have been dma mapped + * + * Returns the number of SGEs added to the SGL. + **/ +static int +lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct sli4_sge *sgl, int datasegcnt, + struct lpfc_io_buf *lpfc_cmd) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct sli4_sge_diseed *diseed = NULL; + dma_addr_t physaddr; + int i = 0, num_sge = 0, status; + uint32_t reftag; + uint8_t txop, rxop; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; + uint32_t dma_len; + uint32_t dma_offset = 0; + struct sli4_hybrid_sgl *sgl_xtra = NULL; + int j; + bool lsp_just_set = false; + + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) + goto out; + + /* extract some info from the scsi command for pde*/ + reftag = scsi_prot_ref_tag(sc); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } +#endif + + /* setup DISEED with what we have */ + diseed = (struct sli4_sge_diseed *) sgl; + memset(diseed, 0, sizeof(struct sli4_sge_diseed)); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); + + /* Endianness conversion if necessary */ + diseed->ref_tag = cpu_to_le32(reftag); + diseed->ref_tag_tran = diseed->ref_tag; + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + + if (sc->prot_flags & SCSI_PROT_REF_CHECK) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + } + + /* setup DISEED with the rest of the info */ + bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); + bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); + + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); + bf_set(lpfc_sli4_sge_dif_me, diseed, 0); + + /* Endianness conversion if necessary for DISEED */ + diseed->word2 = cpu_to_le32(diseed->word2); + diseed->word3 = cpu_to_le32(diseed->word3); + + /* advance bpl and increment sge count */ + num_sge++; + sgl++; + + /* assumption: caller has already run dma_map_sg on command data */ + sgde = scsi_sglist(sc); + j = 3; + for (i = 0; i < datasegcnt; i++) { + /* clear it */ + sgl->word2 = 0; + + /* do we need to expand the segment */ + if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && + ((datasegcnt - 1) != i)) { + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + lpfc_cmd->seg_cnt = 0; + return 0; + } + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + + } else { + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + } + + if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { + if ((datasegcnt - 1) == i) + bf_set(lpfc_sli4_sge_last, sgl, 1); + physaddr = sg_dma_address(sgde); + dma_len = sg_dma_len(sgde); + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + sgde = sg_next(sgde); + + sgl++; + num_sge++; + lsp_just_set = false; + + } else { + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + i = i - 1; + + lsp_just_set = true; + } + + j++; + + } + +out: + return num_sge; +} + +/** + * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @sgl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * @protcnt: number of segment of protection data that have been dma mapped + * @lpfc_cmd: lpfc scsi command object pointer. + * + * This function sets up SGL buffer list for protection groups of + * type LPFC_PG_TYPE_DIF + * + * This is usually used when DIFs are in their own buffers, + * separate from the data. The HBA can then by instructed + * to place the DIFs in the outgoing stream. For read operations, + * The HBA could extract the DIFs and place it in DIF buffers. + * + * The buffer list for this type consists of one or more of the + * protection groups described below: + * +-------------------------+ + * start of first prot group --> | DISEED | + * +-------------------------+ + * | DIF (Prot SGE) | + * +-------------------------+ + * | Data SGE | + * +-------------------------+ + * |more Data SGE's ... (opt)| + * +-------------------------+ + * start of new prot group --> | DISEED | + * +-------------------------+ + * | ... | + * +-------------------------+ + * + * Note: It is assumed that both data and protection s/g buffers have been + * mapped for DMA + * + * Returns the number of SGEs added to the SGL. + **/ +static int +lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct sli4_sge *sgl, int datacnt, int protcnt, + struct lpfc_io_buf *lpfc_cmd) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct scatterlist *sgpe = NULL; /* s/g prot entry */ + struct sli4_sge_diseed *diseed = NULL; + dma_addr_t dataphysaddr, protphysaddr; + unsigned short curr_prot = 0; + unsigned int split_offset; + unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; + unsigned int protgrp_blks, protgrp_bytes; + unsigned int remainder, subtotal; + int status; + unsigned char pgdone = 0, alldone = 0; + unsigned blksize; + uint32_t reftag; + uint8_t txop, rxop; + uint32_t dma_len; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; + uint32_t dma_offset = 0; + int num_sge = 0, j = 2; + struct sli4_hybrid_sgl *sgl_xtra = NULL; + + sgpe = scsi_prot_sglist(sc); + sgde = scsi_sglist(sc); + + if (!sgpe || !sgde) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9082 Invalid s/g entry: data=x%px prot=x%px\n", + sgpe, sgde); + return 0; + } + + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) + goto out; + + /* extract some info from the scsi command */ + blksize = scsi_prot_interval(sc); + reftag = scsi_prot_ref_tag(sc); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } +#endif + + split_offset = 0; + do { + /* Check to see if we ran out of space */ + if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && + !(phba->cfg_xpsgl)) + return num_sge + 3; + + /* DISEED and DIF have to be together */ + if (!((j + 1) % phba->border_sge_num) || + !((j + 2) % phba->border_sge_num) || + !((j + 3) % phba->border_sge_num)) { + sgl->word2 = 0; + + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + goto out; + } else { + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + } + + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + j = 0; + } + + /* setup DISEED with what we have */ + diseed = (struct sli4_sge_diseed *) sgl; + memset(diseed, 0, sizeof(struct sli4_sge_diseed)); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); + + /* Endianness conversion if necessary */ + diseed->ref_tag = cpu_to_le32(reftag); + diseed->ref_tag_tran = diseed->ref_tag; + + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + } else { + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + /* + * When in this mode, the hardware will replace + * the guard tag from the host with a + * newly generated good CRC for the wire. + * Switch to raw mode here to avoid this + * behavior. What the host sends gets put on the wire. + */ + if (txop == BG_OP_IN_CRC_OUT_CRC) { + txop = BG_OP_RAW_MODE; + rxop = BG_OP_RAW_MODE; + } + } + + + if (sc->prot_flags & SCSI_PROT_REF_CHECK) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + + /* setup DISEED with the rest of the info */ + bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); + bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); + + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); + bf_set(lpfc_sli4_sge_dif_me, diseed, 0); + + /* Endianness conversion if necessary for DISEED */ + diseed->word2 = cpu_to_le32(diseed->word2); + diseed->word3 = cpu_to_le32(diseed->word3); + + /* advance sgl and increment bde count */ + num_sge++; + + sgl++; + j++; + + /* setup the first BDE that points to protection buffer */ + protphysaddr = sg_dma_address(sgpe) + protgroup_offset; + protgroup_len = sg_dma_len(sgpe) - protgroup_offset; + + /* must be integer multiple of the DIF block length */ + BUG_ON(protgroup_len % 8); + + /* Now setup DIF SGE */ + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); + sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); + sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + + protgrp_blks = protgroup_len / 8; + protgrp_bytes = protgrp_blks * blksize; + + /* check if DIF SGE is crossing the 4K boundary; if so split */ + if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { + protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); + protgroup_offset += protgroup_remainder; + protgrp_blks = protgroup_remainder / 8; + protgrp_bytes = protgrp_blks * blksize; + } else { + protgroup_offset = 0; + curr_prot++; + } + + num_sge++; + + /* setup SGE's for data blocks associated with DIF data */ + pgdone = 0; + subtotal = 0; /* total bytes processed for current prot grp */ + + sgl++; + j++; + + while (!pgdone) { + /* Check to see if we ran out of space */ + if ((num_sge >= phba->cfg_total_seg_cnt) && + !phba->cfg_xpsgl) + return num_sge + 1; + + if (!sgde) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9086 BLKGRD:%s Invalid data segment\n", + __func__); + return 0; + } + + if (!((j + 1) % phba->border_sge_num)) { + sgl->word2 = 0; + + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq(phba, + lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + goto out; + } else { + sgl->addr_lo = cpu_to_le32( + putPaddrLow(sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32( + putPaddrHigh(sgl_xtra->dma_phys_sgl)); + } + + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32( + phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + } else { + dataphysaddr = sg_dma_address(sgde) + + split_offset; + + remainder = sg_dma_len(sgde) - split_offset; + + if ((subtotal + remainder) <= protgrp_bytes) { + /* we can use this whole buffer */ + dma_len = remainder; + split_offset = 0; + + if ((subtotal + remainder) == + protgrp_bytes) + pgdone = 1; + } else { + /* must split this buffer with next + * prot grp + */ + dma_len = protgrp_bytes - subtotal; + split_offset += dma_len; + } + + subtotal += dma_len; + + sgl->word2 = 0; + sgl->addr_lo = cpu_to_le32(putPaddrLow( + dataphysaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + dataphysaddr)); + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + + sgl->sge_len = cpu_to_le32(dma_len); + dma_offset += dma_len; + + num_sge++; + + if (split_offset) { + sgl++; + j++; + break; + } + + /* Move to the next s/g segment if possible */ + sgde = sg_next(sgde); + + sgl++; + } + + j++; + } + + if (protgroup_offset) { + /* update the reference tag */ + reftag += protgrp_blks; + continue; + } + + /* are we done ? */ + if (curr_prot == protcnt) { + /* mark the last SGL */ + sgl--; + bf_set(lpfc_sli4_sge_last, sgl, 1); + alldone = 1; + } else if (curr_prot < protcnt) { + /* advance to next prot buffer */ + sgpe = sg_next(sgpe); + + /* update the reference tag */ + reftag += protgrp_blks; + } else { + /* if we're here, we have a bug */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9085 BLKGRD: bug in %s\n", __func__); + } + + } while (!alldone); + +out: + + return num_sge; +} + +/** + * lpfc_prot_group_type - Get prtotection group type of SCSI command + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * + * Given a SCSI command that supports DIF, determine composition of protection + * groups involved in setting up buffer lists + * + * Returns: Protection group type (with or without DIF) + * + **/ +static int +lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) +{ + int ret = LPFC_PG_TYPE_INVALID; + unsigned char op = scsi_get_prot_op(sc); + + switch (op) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + ret = LPFC_PG_TYPE_NO_DIF; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + ret = LPFC_PG_TYPE_DIF_BUF; + break; + default: + if (phba) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9021 Unsupported protection op:%d\n", + op); + break; + } + return ret; +} + +/** + * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be adjusted. + * + * Adjust the data length to account for how much data + * is actually on the wire. + * + * returns the adjusted data length + **/ +static int +lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_cmd) +{ + struct scsi_cmnd *sc = lpfc_cmd->pCmd; + int fcpdl; + + fcpdl = scsi_bufflen(sc); + + /* Check if there is protection data on the wire */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + /* Read check for protection data */ + if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + return fcpdl; + + } else { + /* Write check for protection data */ + if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + return fcpdl; + } + + /* + * If we are in DIF Type 1 mode every data block has a 8 byte + * DIF (trailer) attached to it. Must ajust FCP data length + * to account for the protection data. + */ + fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8; + + return fcpdl; +} + +/** + * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be prep'ed. + * + * This is the protection/DIF aware version of + * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the + * two functions eventually, but for now, it's here. + * RETURNS 0 - SUCCESS, + * 1 - Failed DMA map, retry. + * 2 - Invalid scsi cmd or prot-type. Do not rety. + **/ +static int +lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + uint32_t num_bde = 0; + int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; + int prot_group_type = 0; + int fcpdl; + int ret = 1; + struct lpfc_vport *vport = phba->pport; + + /* + * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd + * fcp_rsp regions to the first data bde entry + */ + bpl += 2; + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from dma_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + datasegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_sglist(scsi_cmnd), + scsi_sg_count(scsi_cmnd), datadir); + if (unlikely(!datasegcnt)) + return 1; + + lpfc_cmd->seg_cnt = datasegcnt; + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); + ret = 2; + goto err; + } + + prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); + + switch (prot_group_type) { + case LPFC_PG_TYPE_NO_DIF: + + /* Here we need to add a PDE5 and PDE6 to the count */ + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { + ret = 2; + goto err; + } + + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, + datasegcnt); + /* we should have 2 or more entries in buffer list */ + if (num_bde < 2) { + ret = 2; + goto err; + } + break; + + case LPFC_PG_TYPE_DIF_BUF: + /* + * This type indicates that protection buffers are + * passed to the driver, so that needs to be prepared + * for DMA + */ + protsegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), datadir); + if (unlikely(!protsegcnt)) { + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + lpfc_cmd->prot_seg_cnt = protsegcnt; + + /* + * There is a minimun of 4 BPLs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 4) > + (phba->cfg_total_seg_cnt - 2)) { + ret = 2; + goto err; + } + + num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, + datasegcnt, protsegcnt); + /* we should have 3 or more entries in buffer list */ + if ((num_bde < 3) || + (num_bde > phba->cfg_total_seg_cnt)) { + ret = 2; + goto err; + } + break; + + case LPFC_PG_TYPE_INVALID: + default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9022 Unexpected protection group %i\n", + prot_group_type); + return 2; + } + } + + /* + * Finish initializing those IOCB fields that are dependent on the + * scsi_cmnd request_buffer. Note that the bdeSize is explicitly + * reinitialized since all iocb memory resources are used many times + * for transmit, receive, and continuation bpl's. + */ + iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); + iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); + iocb_cmd->ulpBdeCount = 1; + iocb_cmd->ulpLe = 1; + + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); + fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = fcpdl; + + /* + * For First burst, we may need to adjust the initial transfer + * length for DIF + */ + if (iocb_cmd->un.fcpi.fcpi_XRdy && + (fcpdl < vport->cfg_first_burst_size)) + iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; + + return 0; +err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9023 Cannot setup S/G List for HBA" + "IO segs %d/%d BPL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_bde); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; + return ret; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CRC algorithmn + * using crc_t10dif. + */ +static uint16_t +lpfc_bg_crc(uint8_t *data, int count) +{ + uint16_t crc = 0; + uint16_t x; + + crc = crc_t10dif(data, count); + x = cpu_to_be16(crc); + return x; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CSUM algorithmn + * using ip_compute_csum. + */ +static uint16_t +lpfc_bg_csum(uint8_t *data, int count) +{ + uint16_t ret; + + ret = ip_compute_csum(data, count); + return ret; +} + +/* + * This function examines the protection data to try to determine + * what type of T10-DIF error occurred. + */ +static void +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct scsi_dif_tuple *src = NULL; + uint8_t *data_src = NULL; + uint16_t guard_tag; + uint16_t start_app_tag, app_tag; + uint32_t start_ref_tag, ref_tag; + int prot, protsegcnt; + int err_type, len, data_len; + int chk_ref, chk_app, chk_guard; + uint16_t sum; + unsigned blksize; + + err_type = BGS_GUARD_ERR_MASK; + sum = 0; + guard_tag = 0; + + /* First check to see if there is protection data to examine */ + prot = scsi_get_prot_op(cmd); + if ((prot == SCSI_PROT_READ_STRIP) || + (prot == SCSI_PROT_WRITE_INSERT) || + (prot == SCSI_PROT_NORMAL)) + goto out; + + /* Currently the driver just supports ref_tag and guard_tag checking */ + chk_ref = 1; + chk_app = 0; + chk_guard = 0; + + /* Setup a ptr to the protection data provided by the SCSI host */ + sgpe = scsi_prot_sglist(cmd); + protsegcnt = lpfc_cmd->prot_seg_cnt; + + if (sgpe && protsegcnt) { + + /* + * We will only try to verify guard tag if the segment + * data length is a multiple of the blksize. + */ + sgde = scsi_sglist(cmd); + blksize = scsi_prot_interval(cmd); + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + start_ref_tag = scsi_prot_ref_tag(cmd); + start_app_tag = src->app_tag; + len = sgpe->length; + while (src && protsegcnt) { + while (len) { + + /* + * First check to see if a protection data + * check is valid + */ + if ((src->ref_tag == T10_PI_REF_ESCAPE) || + (src->app_tag == T10_PI_APP_ESCAPE)) { + start_ref_tag++; + goto skipit; + } + + /* First Guard Tag checking */ + if (chk_guard) { + guard_tag = src->guard_tag; + if (cmd->prot_flags + & SCSI_PROT_IP_CHECKSUM) + sum = lpfc_bg_csum(data_src, + blksize); + else + sum = lpfc_bg_crc(data_src, + blksize); + if ((guard_tag != sum)) { + err_type = BGS_GUARD_ERR_MASK; + goto out; + } + } + + /* Reference Tag checking */ + ref_tag = be32_to_cpu(src->ref_tag); + if (chk_ref && (ref_tag != start_ref_tag)) { + err_type = BGS_REFTAG_ERR_MASK; + goto out; + } + start_ref_tag++; + + /* App Tag checking */ + app_tag = src->app_tag; + if (chk_app && (app_tag != start_app_tag)) { + err_type = BGS_APPTAG_ERR_MASK; + goto out; + } +skipit: + len -= sizeof(struct scsi_dif_tuple); + if (len < 0) + len = 0; + src++; + + data_src += blksize; + data_len -= blksize; + + /* + * Are we at the end of the Data segment? + * The data segment is only used for Guard + * tag checking. + */ + if (chk_guard && (data_len == 0)) { + chk_guard = 0; + sgde = sg_next(sgde); + if (!sgde) + goto out; + + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + } + } + + /* Goto the next Protection data segment */ + sgpe = sg_next(sgpe); + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + } else { + src = NULL; + } + protsegcnt--; + } + } +out: + if (err_type == BGS_GUARD_ERR_MASK) { + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); + set_host_byte(cmd, DID_ABORT); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", + scsi_prot_ref_tag(cmd), + sum, guard_tag); + + } else if (err_type == BGS_REFTAG_ERR_MASK) { + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); + set_host_byte(cmd, DID_ABORT); + + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", + scsi_prot_ref_tag(cmd), + ref_tag, start_ref_tag); + + } else if (err_type == BGS_APPTAG_ERR_MASK) { + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); + set_host_byte(cmd, DID_ABORT); + + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9041 BLKGRD: reftag %x app_tag err %x != %x\n", + scsi_prot_ref_tag(cmd), + app_tag, start_app_tag); + } +} + +/* + * This function checks for BlockGuard errors detected by + * the HBA. In case of errors, the ASC/ASCQ fields in the + * sense buffer will be set accordingly, paired with + * ILLEGAL_REQUEST to signal to the kernel that the HBA + * detected corruption. + * + * Returns: + * 0 - No error found + * 1 - BlockGuard error found + * -1 - Internal error (bad profile, ...etc) + */ +static int +lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, + struct lpfc_iocbq *pIocbOut) +{ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct sli3_bg_fields *bgf; + int ret = 0; + struct lpfc_wcqe_complete *wcqe; + u32 status; + u32 bghm = 0; + u32 bgstat = 0; + u64 failing_sector = 0; + + if (phba->sli_rev == LPFC_SLI_REV4) { + wcqe = &pIocbOut->wcqe_cmpl; + status = bf_get(lpfc_wcqe_c_status, wcqe); + + if (status == CQE_STATUS_DI_ERROR) { + /* Guard Check failed */ + if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) + bgstat |= BGS_GUARD_ERR_MASK; + + /* AppTag Check failed */ + if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) + bgstat |= BGS_APPTAG_ERR_MASK; + + /* RefTag Check failed */ + if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) + bgstat |= BGS_REFTAG_ERR_MASK; + + /* Check to see if there was any good data before the + * error + */ + if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { + bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; + bghm = wcqe->total_data_placed; + } + + /* + * Set ALL the error bits to indicate we don't know what + * type of error it is. + */ + if (!bgstat) + bgstat |= (BGS_REFTAG_ERR_MASK | + BGS_APPTAG_ERR_MASK | + BGS_GUARD_ERR_MASK); + } + + } else { + bgf = &pIocbOut->iocb.unsli3.sli3_bg; + bghm = bgf->bghm; + bgstat = bgf->bgstat; + } + + if (lpfc_bgs_get_invalid_prof(bgstat)) { + cmd->result = DID_ERROR << 16; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9072 BLKGRD: Invalid BG Profile in cmd " + "0x%x reftag 0x%x blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); + ret = (-1); + goto out; + } + + if (lpfc_bgs_get_uninit_dif_block(bgstat)) { + cmd->result = DID_ERROR << 16; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9073 BLKGRD: Invalid BG PDIF Block in cmd " + "0x%x reftag 0x%x blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); + ret = (-1); + goto out; + } + + if (lpfc_bgs_get_guard_err(bgstat)) { + ret = 1; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); + set_host_byte(cmd, DID_ABORT); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9055 BLKGRD: Guard Tag error in cmd " + "0x%x reftag 0x%x blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); + } + + if (lpfc_bgs_get_reftag_err(bgstat)) { + ret = 1; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); + set_host_byte(cmd, DID_ABORT); + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9056 BLKGRD: Ref Tag error in cmd " + "0x%x reftag 0x%x blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); + } + + if (lpfc_bgs_get_apptag_err(bgstat)) { + ret = 1; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); + set_host_byte(cmd, DID_ABORT); + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9061 BLKGRD: App Tag error in cmd " + "0x%x reftag 0x%x blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); + } + + if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { + /* + * setup sense data descriptor 0 per SPC-4 as an information + * field, and put the failing LBA in it. + * This code assumes there was also a guard/app/ref tag error + * indication. + */ + cmd->sense_buffer[7] = 0xc; /* Additional sense length */ + cmd->sense_buffer[8] = 0; /* Information descriptor type */ + cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ + cmd->sense_buffer[10] = 0x80; /* Validity bit */ + + /* bghm is a "on the wire" FC frame based count */ + switch (scsi_get_prot_op(cmd)) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + bghm /= cmd->device->sector_size; + break; + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + bghm /= (cmd->device->sector_size + + sizeof(struct scsi_dif_tuple)); + break; + } + + failing_sector = scsi_get_lba(cmd); + failing_sector += bghm; + + /* Descriptor Information */ + put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); + } + + if (!ret) { + /* No error was reported - problem in FW? */ + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9057 BLKGRD: Unknown error in cmd " + "0x%x reftag 0x%x blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); + + /* Calculate what type of error it was */ + lpfc_calc_bg_err(phba, lpfc_cmd); + } +out: + return ret; +} + +/** + * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine does the pci dma mapping for scatter-gather list of scsi cmnd + * field of @lpfc_cmd for device with SLI-4 interface spec. + * + * Return codes: + * 2 - Error - Do not retry + * 1 - Error - Retry + * 0 - Success + **/ +static int +lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct scatterlist *sgel = NULL; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + struct sli4_sge *first_data_sgl; + struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; + struct lpfc_vport *vport = phba->pport; + union lpfc_wqe128 *wqe = &pwqeq->wqe; + dma_addr_t physaddr; + uint32_t dma_len; + uint32_t dma_offset = 0; + int nseg, i, j; + struct ulp_bde64 *bde; + bool lsp_just_set = false; + struct sli4_hybrid_sgl *sgl_xtra = NULL; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. Start the lpfc command prep by + * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first + * data bde entry. + */ + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from dma_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + + nseg = scsi_dma_map(scsi_cmnd); + if (unlikely(nseg <= 0)) + return 1; + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl += 1; + first_data_sgl = sgl; + lpfc_cmd->seg_cnt = nseg; + if (!phba->cfg_xpsgl && + lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9074 BLKGRD:" + " %s: Too many sg segments from " + "dma_map_sg. Config %d, seg_cnt %d\n", + __func__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); + lpfc_cmd->seg_cnt = 0; + scsi_dma_unmap(scsi_cmnd); + return 2; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single scsi command. Just run through the seg_cnt and format + * the sge's. + * When using SLI-3 the driver will try to fit all the BDEs into + * the IOCB. If it can't then the BDEs get added to a BPL as it + * does for SLI-2 mode. + */ + + /* for tracking segment boundaries */ + sgel = scsi_sglist(scsi_cmnd); + j = 2; + for (i = 0; i < nseg; i++) { + sgl->word2 = 0; + if (nseg == 1) { + bf_set(lpfc_sli4_sge_last, sgl, 1); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } else { + bf_set(lpfc_sli4_sge_last, sgl, 0); + + /* do we need to expand the segment */ + if (!lsp_just_set && + !((j + 1) % phba->border_sge_num) && + ((nseg - 1) != i)) { + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq( + phba, lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + lpfc_cmd->seg_cnt = 0; + scsi_dma_unmap(scsi_cmnd); + return 1; + } + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + + } else { + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } + } + + if (!(bf_get(lpfc_sli4_sge_type, sgl) & + LPFC_SGE_TYPE_LSP)) { + if ((nseg - 1) == i) + bf_set(lpfc_sli4_sge_last, sgl, 1); + + physaddr = sg_dma_address(sgel); + dma_len = sg_dma_len(sgel); + sgl->addr_lo = cpu_to_le32(putPaddrLow( + physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + physaddr)); + + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + sgel = sg_next(sgel); + + sgl++; + lsp_just_set = false; + + } else { + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32( + phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + i = i - 1; + + lsp_just_set = true; + } + + j++; + } + + /* PBDE support for first data SGE only. + * For FCoE, we key off Performance Hints. + * For FC, we key off lpfc_enable_pbde. + */ + if (nseg == 1 && + ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || + phba->cfg_enable_pbde)) { + /* Words 13-15 */ + bde = (struct ulp_bde64 *) + &wqe->words[13]; + bde->addrLow = first_data_sgl->addr_lo; + bde->addrHigh = first_data_sgl->addr_hi; + bde->tus.f.bdeSize = + le32_to_cpu(first_data_sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + + /* Word 11 - set PBDE bit */ + bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); + } else { + memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); + /* Word 11 - PBDE bit disabled by default template */ + } + } else { + sgl += 1; + /* set the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + + if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || + phba->cfg_enable_pbde) { + bde = (struct ulp_bde64 *) + &wqe->words[13]; + memset(bde, 0, (sizeof(uint32_t) * 3)); + } + } + + /* + * Finish initializing those IOCB fields that are dependent on the + * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is + * explicitly reinitialized. + * all iocb memory resources are reused. + */ + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + /* Set first-burst provided it was successfully negotiated */ + if (!(phba->hba_flag & HBA_FCOE_MODE) && + vport->cfg_first_burst_size && + scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { + u32 init_len, total_len; + + total_len = be32_to_cpu(fcp_cmnd->fcpDl); + init_len = min(total_len, vport->cfg_first_burst_size); + + /* Word 4 & 5 */ + wqe->fcp_iwrite.initial_xfer_len = init_len; + wqe->fcp_iwrite.total_xfer_len = total_len; + } else { + /* Word 4 */ + wqe->fcp_iwrite.total_xfer_len = + be32_to_cpu(fcp_cmnd->fcpDl); + } + + /* + * If the OAS driver feature is enabled and the lun is enabled for + * OAS, set the oas iocb related flags. + */ + if ((phba->cfg_fof) && ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->oas_enabled) { + lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); + lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->priority; + + /* Word 10 */ + bf_set(wqe_oas, &wqe->generic.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); + + if (lpfc_cmd->cur_iocbq.priority) + bf_set(wqe_ccp, &wqe->generic.wqe_com, + (lpfc_cmd->cur_iocbq.priority << 1)); + else + bf_set(wqe_ccp, &wqe->generic.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + + return 0; +} + +/** + * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This is the protection/DIF aware version of + * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the + * two functions eventually, but for now, it's here + * Return codes: + * 2 - Error - Do not retry + * 1 - Error - Retry + * 0 - Success + **/ +static int +lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); + struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; + union lpfc_wqe128 *wqe = &pwqeq->wqe; + uint32_t num_sge = 0; + int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; + int prot_group_type = 0; + int fcpdl; + int ret = 1; + struct lpfc_vport *vport = phba->pport; + + /* + * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd + * fcp_rsp regions to the first data sge entry + */ + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from dma_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + datasegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_sglist(scsi_cmnd), + scsi_sg_count(scsi_cmnd), datadir); + if (unlikely(!datasegcnt)) + return 1; + + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + + sgl += 1; + lpfc_cmd->seg_cnt = datasegcnt; + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && + !phba->cfg_xpsgl) { + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); + ret = 2; + goto err; + } + + prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); + + switch (prot_group_type) { + case LPFC_PG_TYPE_NO_DIF: + /* Here we need to add a DISEED to the count */ + if (((lpfc_cmd->seg_cnt + 1) > + phba->cfg_total_seg_cnt) && + !phba->cfg_xpsgl) { + ret = 2; + goto err; + } + + num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, + datasegcnt, lpfc_cmd); + + /* we should have 2 or more entries in buffer list */ + if (num_sge < 2) { + ret = 2; + goto err; + } + break; + + case LPFC_PG_TYPE_DIF_BUF: + /* + * This type indicates that protection buffers are + * passed to the driver, so that needs to be prepared + * for DMA + */ + protsegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), datadir); + if (unlikely(!protsegcnt)) { + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + lpfc_cmd->prot_seg_cnt = protsegcnt; + /* + * There is a minimun of 3 SGEs used for every + * protection data segment. + */ + if (((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) && + !phba->cfg_xpsgl) { + ret = 2; + goto err; + } + + num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, + datasegcnt, protsegcnt, lpfc_cmd); + + /* we should have 3 or more entries in buffer list */ + if (num_sge < 3 || + (num_sge > phba->cfg_total_seg_cnt && + !phba->cfg_xpsgl)) { + ret = 2; + goto err; + } + break; + + case LPFC_PG_TYPE_INVALID: + default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9083 Unexpected protection group %i\n", + prot_group_type); + return 2; + } + } + + switch (scsi_get_prot_op(scsi_cmnd)) { + case SCSI_PROT_WRITE_STRIP: + case SCSI_PROT_READ_STRIP: + lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP; + break; + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_INSERT: + lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT; + break; + case SCSI_PROT_WRITE_PASS: + case SCSI_PROT_READ_PASS: + lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS; + break; + } + + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); + fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + + /* Set first-burst provided it was successfully negotiated */ + if (!(phba->hba_flag & HBA_FCOE_MODE) && + vport->cfg_first_burst_size && + scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { + u32 init_len, total_len; + + total_len = be32_to_cpu(fcp_cmnd->fcpDl); + init_len = min(total_len, vport->cfg_first_burst_size); + + /* Word 4 & 5 */ + wqe->fcp_iwrite.initial_xfer_len = init_len; + wqe->fcp_iwrite.total_xfer_len = total_len; + } else { + /* Word 4 */ + wqe->fcp_iwrite.total_xfer_len = + be32_to_cpu(fcp_cmnd->fcpDl); + } + + /* + * If the OAS driver feature is enabled and the lun is enabled for + * OAS, set the oas iocb related flags. + */ + if ((phba->cfg_fof) && ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->oas_enabled) { + lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); + + /* Word 10 */ + bf_set(wqe_oas, &wqe->generic.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); + bf_set(wqe_ccp, &wqe->generic.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + + /* Word 7. DIF Flags */ + if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS) + bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); + else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP) + bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); + else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT) + bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); + + lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS | + LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); + + return 0; +err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9084 Cannot setup S/G List for HBA " + "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_sge, ret); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; + return ret; +} + +/** + * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine wraps the actual DMA mapping function pointer from the + * lpfc_hba struct. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static inline int +lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); +} + +/** + * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer + * using BlockGuard. + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine wraps the actual DMA mapping function pointer from the + * lpfc_hba struct. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static inline int +lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); +} + +/** + * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi + * buffer + * @vport: Pointer to vport object. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * @tmo: Timeout value for IO + * + * This routine initializes IOCB/WQE data structure from scsi command + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static inline int +lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + uint8_t tmo) +{ + return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); +} + +/** + * lpfc_send_scsi_error_event - Posts an event when there is SCSI error + * @phba: Pointer to hba context object. + * @vport: Pointer to vport object. + * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. + * @fcpi_parm: FCP Initiator parameter. + * + * This function posts an event when there is a SCSI command reporting + * error from the scsi device. + **/ +static void +lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; + uint32_t resp_info = fcprsp->rspStatus2; + uint32_t scsi_status = fcprsp->rspStatus3; + struct lpfc_fast_path_event *fast_path_evt = NULL; + struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; + unsigned long flags; + + if (!pnode) + return; + + /* If there is queuefull or busy condition send a scsi event */ + if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || + (cmnd->result == SAM_STAT_BUSY)) { + fast_path_evt = lpfc_alloc_fast_evt(phba); + if (!fast_path_evt) + return; + fast_path_evt->un.scsi_evt.event_type = + FC_REG_SCSI_EVENT; + fast_path_evt->un.scsi_evt.subcategory = + (cmnd->result == SAM_STAT_TASK_SET_FULL) ? + LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; + fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; + memcpy(&fast_path_evt->un.scsi_evt.wwpn, + &pnode->nlp_portname, sizeof(struct lpfc_name)); + memcpy(&fast_path_evt->un.scsi_evt.wwnn, + &pnode->nlp_nodename, sizeof(struct lpfc_name)); + } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && + ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { + fast_path_evt = lpfc_alloc_fast_evt(phba); + if (!fast_path_evt) + return; + fast_path_evt->un.check_cond_evt.scsi_event.event_type = + FC_REG_SCSI_EVENT; + fast_path_evt->un.check_cond_evt.scsi_event.subcategory = + LPFC_EVENT_CHECK_COND; + fast_path_evt->un.check_cond_evt.scsi_event.lun = + cmnd->device->lun; + memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, + &pnode->nlp_portname, sizeof(struct lpfc_name)); + memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, + &pnode->nlp_nodename, sizeof(struct lpfc_name)); + fast_path_evt->un.check_cond_evt.sense_key = + cmnd->sense_buffer[2] & 0xf; + fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; + fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; + } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && + fcpi_parm && + ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || + ((scsi_status == SAM_STAT_GOOD) && + !(resp_info & (RESID_UNDER | RESID_OVER))))) { + /* + * If status is good or resid does not match with fcp_param and + * there is valid fcpi_parm, then there is a read_check error + */ + fast_path_evt = lpfc_alloc_fast_evt(phba); + if (!fast_path_evt) + return; + fast_path_evt->un.read_check_error.header.event_type = + FC_REG_FABRIC_EVENT; + fast_path_evt->un.read_check_error.header.subcategory = + LPFC_EVENT_FCPRDCHKERR; + memcpy(&fast_path_evt->un.read_check_error.header.wwpn, + &pnode->nlp_portname, sizeof(struct lpfc_name)); + memcpy(&fast_path_evt->un.read_check_error.header.wwnn, + &pnode->nlp_nodename, sizeof(struct lpfc_name)); + fast_path_evt->un.read_check_error.lun = cmnd->device->lun; + fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; + fast_path_evt->un.read_check_error.fcpiparam = + fcpi_parm; + } else + return; + + fast_path_evt->vport = vport; + spin_lock_irqsave(&phba->hbalock, flags); + list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev + * @phba: The HBA for which this call is being executed. + * @psb: The scsi buffer which is going to be un-mapped. + * + * This routine does DMA un-mapping of scatter gather list of scsi command + * field of @lpfc_cmd for device with SLI-3 interface spec. + **/ +static void +lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) +{ + /* + * There are only two special cases to consider. (1) the scsi command + * requested scatter-gather usage or (2) the scsi command allocated + * a request buffer, but did not request use_sg. There is a third + * case, but it does not require resource deallocation. + */ + if (psb->seg_cnt > 0) + scsi_dma_unmap(psb->pCmd); + if (psb->prot_seg_cnt > 0) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), + scsi_prot_sg_count(psb->pCmd), + psb->pCmd->sc_data_direction); +} + +/** + * lpfc_unblock_requests - allow further commands to be queued. + * @phba: pointer to phba object + * + * For single vport, just call scsi_unblock_requests on physical port. + * For multiple vports, send scsi_unblock_requests for all the vports. + */ +void +lpfc_unblock_requests(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + int i; + + if (phba->sli_rev == LPFC_SLI_REV4 && + !phba->sli4_hba.max_cfg_param.vpi_used) { + shost = lpfc_shost_from_vport(phba->pport); + scsi_unblock_requests(shost); + return; + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_unblock_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_block_requests - prevent further commands from being queued. + * @phba: pointer to phba object + * + * For single vport, just call scsi_block_requests on physical port. + * For multiple vports, send scsi_block_requests for all the vports. + */ +void +lpfc_block_requests(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + int i; + + if (atomic_read(&phba->cmf_stop_io)) + return; + + if (phba->sli_rev == LPFC_SLI_REV4 && + !phba->sli4_hba.max_cfg_param.vpi_used) { + shost = lpfc_shost_from_vport(phba->pport); + scsi_block_requests(shost); + return; + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_block_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion + * @phba: The HBA for which this call is being executed. + * @time: The latency of the IO that completed (in ns) + * @size: The size of the IO that completed + * @shost: SCSI host the IO completed on (NULL for a NVME IO) + * + * The routine adjusts the various Burst and Bandwidth counters used in + * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, + * that means the IO was never issued to the HBA, so this routine is + * just being called to cleanup the counter from a previous + * lpfc_update_cmf_cmd call. + */ +int +lpfc_update_cmf_cmpl(struct lpfc_hba *phba, + uint64_t time, uint32_t size, struct Scsi_Host *shost) +{ + struct lpfc_cgn_stat *cgs; + + if (time != LPFC_CGN_NOT_SENT) { + /* lat is ns coming in, save latency in us */ + if (time < 1000) + time = 1; + else + time = div_u64(time + 500, 1000); /* round it */ + + cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); + atomic64_add(size, &cgs->rcv_bytes); + atomic64_add(time, &cgs->rx_latency); + atomic_inc(&cgs->rx_io_cnt); + } + return 0; +} + +/** + * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission + * @phba: The HBA for which this call is being executed. + * @size: The size of the IO that will be issued + * + * The routine adjusts the various Burst and Bandwidth counters used in + * Congestion management and E2E. + */ +int +lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) +{ + uint64_t total; + struct lpfc_cgn_stat *cgs; + int cpu; + + /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED && + phba->cmf_max_bytes_per_interval) { + total = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_read(&cgs->total_bytes); + } + if (total >= phba->cmf_max_bytes_per_interval) { + if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { + lpfc_block_requests(phba); + phba->cmf_last_ts = + lpfc_calc_cmf_latency(phba); + } + atomic_inc(&phba->cmf_busy); + return -EBUSY; + } + if (size > atomic_read(&phba->rx_max_read_cnt)) + atomic_set(&phba->rx_max_read_cnt, size); + } + + cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); + atomic64_add(size, &cgs->total_bytes); + return 0; +} + +/** + * lpfc_handle_fcp_err - FCP response handler + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. + * @fcpi_parm: FCP Initiator parameter. + * + * This routine is called to process response IOCB with status field + * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command + * based upon SCSI and FCP error. + **/ +static void +lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + uint32_t fcpi_parm) +{ + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; + uint32_t resp_info = fcprsp->rspStatus2; + uint32_t scsi_status = fcprsp->rspStatus3; + uint32_t *lp; + uint32_t host_status = DID_OK; + uint32_t rsplen = 0; + uint32_t fcpDl; + uint32_t logit = LOG_FCP | LOG_FCP_ERROR; + + + /* + * If this is a task management command, there is no + * scsi packet associated with this lpfc_cmd. The driver + * consumes it. + */ + if (fcpcmd->fcpCntl2) { + scsi_status = 0; + goto out; + } + + if (resp_info & RSP_LEN_VALID) { + rsplen = be32_to_cpu(fcprsp->rspRspLen); + if (rsplen != 0 && rsplen != 4 && rsplen != 8) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2719 Invalid response length: " + "tgt x%x lun x%llx cmnd x%x rsplen " + "x%x\n", cmnd->device->id, + cmnd->device->lun, cmnd->cmnd[0], + rsplen); + host_status = DID_ERROR; + goto out; + } + if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2757 Protocol failure detected during " + "processing of FCP I/O op: " + "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", + cmnd->device->id, + cmnd->device->lun, cmnd->cmnd[0], + fcprsp->rspInfo3); + host_status = DID_ERROR; + goto out; + } + } + + if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { + uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); + if (snslen > SCSI_SENSE_BUFFERSIZE) + snslen = SCSI_SENSE_BUFFERSIZE; + + if (resp_info & RSP_LEN_VALID) + rsplen = be32_to_cpu(fcprsp->rspRspLen); + memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); + } + lp = (uint32_t *)cmnd->sense_buffer; + + /* special handling for under run conditions */ + if (!scsi_status && (resp_info & RESID_UNDER)) { + /* don't log under runs if fcp set... */ + if (vport->cfg_log_verbose & LOG_FCP) + logit = LOG_FCP_ERROR; + /* unless operator says so */ + if (vport->cfg_log_verbose & LOG_FCP_UNDER) + logit = LOG_FCP_UNDER; + } + + lpfc_printf_vlog(vport, KERN_WARNING, logit, + "9024 FCP command x%x failed: x%x SNS x%x x%x " + "Data: x%x x%x x%x x%x x%x\n", + cmnd->cmnd[0], scsi_status, + be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, + be32_to_cpu(fcprsp->rspResId), + be32_to_cpu(fcprsp->rspSnsLen), + be32_to_cpu(fcprsp->rspRspLen), + fcprsp->rspInfo3); + + scsi_set_resid(cmnd, 0); + fcpDl = be32_to_cpu(fcpcmd->fcpDl); + if (resp_info & RESID_UNDER) { + scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, + "9025 FCP Underrun, expected %d, " + "residual %d Data: x%x x%x x%x\n", + fcpDl, + scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], + cmnd->underflow); + + /* + * If there is an under run, check if under run reported by + * storage array is same as the under run reported by HBA. + * If this is not same, there is a dropped frame. + */ + if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { + lpfc_printf_vlog(vport, KERN_WARNING, + LOG_FCP | LOG_FCP_ERROR, + "9026 FCP Read Check Error " + "and Underrun Data: x%x x%x x%x x%x\n", + fcpDl, + scsi_get_resid(cmnd), fcpi_parm, + cmnd->cmnd[0]); + scsi_set_resid(cmnd, scsi_bufflen(cmnd)); + host_status = DID_ERROR; + } + /* + * The cmnd->underflow is the minimum number of bytes that must + * be transferred for this command. Provided a sense condition + * is not present, make sure the actual amount transferred is at + * least the underflow value or fail. + */ + if (!(resp_info & SNS_LEN_VALID) && + (scsi_status == SAM_STAT_GOOD) && + (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) + < cmnd->underflow)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9027 FCP command x%x residual " + "underrun converted to error " + "Data: x%x x%x x%x\n", + cmnd->cmnd[0], scsi_bufflen(cmnd), + scsi_get_resid(cmnd), cmnd->underflow); + host_status = DID_ERROR; + } + } else if (resp_info & RESID_OVER) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "9028 FCP command x%x residual overrun error. " + "Data: x%x x%x\n", cmnd->cmnd[0], + scsi_bufflen(cmnd), scsi_get_resid(cmnd)); + host_status = DID_ERROR; + + /* + * Check SLI validation that all the transfer was actually done + * (fcpi_parm should be zero). Apply check only to reads. + */ + } else if (fcpi_parm) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, + "9029 FCP %s Check Error Data: " + "x%x x%x x%x x%x x%x\n", + ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? + "Read" : "Write"), + fcpDl, be32_to_cpu(fcprsp->rspResId), + fcpi_parm, cmnd->cmnd[0], scsi_status); + + /* There is some issue with the LPe12000 that causes it + * to miscalculate the fcpi_parm and falsely trip this + * recovery logic. Detect this case and don't error when true. + */ + if (fcpi_parm > fcpDl) + goto out; + + switch (scsi_status) { + case SAM_STAT_GOOD: + case SAM_STAT_CHECK_CONDITION: + /* Fabric dropped a data frame. Fail any successful + * command in which we detected dropped frames. + * A status of good or some check conditions could + * be considered a successful command. + */ + host_status = DID_ERROR; + break; + } + scsi_set_resid(cmnd, scsi_bufflen(cmnd)); + } + + out: + cmnd->result = host_status << 16 | scsi_status; + lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); +} + +/** + * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO + * @phba: The hba for which this call is being executed. + * @pwqeIn: The command WQE for the scsi cmnd. + * @pwqeOut: Pointer to driver response WQE object. + * + * This routine assigns scsi command result by looking into response WQE + * status field appropriately. This routine handles QUEUE FULL condition as + * well by ramping down device queue depth. + **/ +static void +lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + struct lpfc_iocbq *pwqeOut) +{ + struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf; + struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; + struct lpfc_vport *vport = pwqeIn->vport; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *ndlp; + struct scsi_cmnd *cmd; + unsigned long flags; + struct lpfc_fast_path_event *fast_path_evt; + struct Scsi_Host *shost; + u32 logit = LOG_FCP; + u32 idx; + u32 lat; + u8 wait_xb_clr = 0; + + /* Sanity check on return of outstanding command */ + if (!lpfc_cmd) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "9032 Null lpfc_cmd pointer. No " + "release, skip completion\n"); + return; + } + + rdata = lpfc_cmd->rdata; + ndlp = rdata->pnode; + + /* Sanity check on return of outstanding command */ + cmd = lpfc_cmd->pCmd; + if (!cmd) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "9042 I/O completion: Not an active IO\n"); + lpfc_release_scsi_buf(phba, lpfc_cmd); + return; + } + /* Guard against abort handler being called at same time */ + spin_lock(&lpfc_cmd->buf_lock); + idx = lpfc_cmd->cur_iocbq.hba_wqidx; + if (phba->sli4_hba.hdwq) + phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); +#endif + shost = cmd->device->host; + + lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe); + lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); + + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; + if (bf_get(lpfc_wcqe_c_xb, wcqe)) { + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + if (phba->cfg_fcp_wait_abts_rsp) + wait_xb_clr = 1; + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_cmd->prot_data_type) { + struct scsi_dif_tuple *src = NULL; + + src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; + /* + * Used to restore any changes to protection + * data for error injection. + */ + switch (lpfc_cmd->prot_data_type) { + case LPFC_INJERR_REFTAG: + src->ref_tag = + lpfc_cmd->prot_data; + break; + case LPFC_INJERR_APPTAG: + src->app_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + case LPFC_INJERR_GUARD: + src->guard_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + default: + break; + } + + lpfc_cmd->prot_data = 0; + lpfc_cmd->prot_data_type = 0; + lpfc_cmd->prot_data_segment = NULL; + } +#endif + if (unlikely(lpfc_cmd->status)) { + if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && + !lpfc_cmd->fcp_rsp->rspStatus3 && + (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && + !(vport->cfg_log_verbose & LOG_FCP_UNDER)) + logit = 0; + else + logit = LOG_FCP | LOG_FCP_UNDER; + lpfc_printf_vlog(vport, KERN_WARNING, logit, + "9034 FCP cmd x%x failed <%d/%lld> " + "status: x%x result: x%x " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x x%x\n", + cmd->cmnd[0], + cmd->device ? cmd->device->id : 0xffff, + cmd->device ? cmd->device->lun : 0xffff, + lpfc_cmd->status, lpfc_cmd->result, + vport->fc_myDID, + (ndlp) ? ndlp->nlp_DID : 0, + lpfc_cmd->cur_iocbq.sli4_xritag, + wcqe->parameter, wcqe->total_data_placed, + lpfc_cmd->cur_iocbq.iotag); + } + + switch (lpfc_cmd->status) { + case CQE_STATUS_SUCCESS: + cmd->result = DID_OK << 16; + break; + case CQE_STATUS_FCP_RSP_FAILURE: + lpfc_handle_fcp_err(vport, lpfc_cmd, + pwqeIn->wqe.fcp_iread.total_xfer_len - + wcqe->total_data_placed); + break; + case CQE_STATUS_NPORT_BSY: + case CQE_STATUS_FABRIC_BSY: + cmd->result = DID_TRANSPORT_DISRUPTED << 16; + fast_path_evt = lpfc_alloc_fast_evt(phba); + if (!fast_path_evt) + break; + fast_path_evt->un.fabric_evt.event_type = + FC_REG_FABRIC_EVENT; + fast_path_evt->un.fabric_evt.subcategory = + (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? + LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; + if (ndlp) { + memcpy(&fast_path_evt->un.fabric_evt.wwpn, + &ndlp->nlp_portname, + sizeof(struct lpfc_name)); + memcpy(&fast_path_evt->un.fabric_evt.wwnn, + &ndlp->nlp_nodename, + sizeof(struct lpfc_name)); + } + fast_path_evt->vport = vport; + fast_path_evt->work_evt.evt = + LPFC_EVT_FASTPATH_MGMT_EVT; + spin_lock_irqsave(&phba->hbalock, flags); + list_add_tail(&fast_path_evt->work_evt.evt_listp, + &phba->work_list); + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_worker_wake_up(phba); + lpfc_printf_vlog(vport, KERN_WARNING, logit, + "9035 Fabric/Node busy FCP cmd x%x failed" + " <%d/%lld> " + "status: x%x result: x%x " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x x%x\n", + cmd->cmnd[0], + cmd->device ? cmd->device->id : 0xffff, + cmd->device ? cmd->device->lun : 0xffff, + lpfc_cmd->status, lpfc_cmd->result, + vport->fc_myDID, + (ndlp) ? ndlp->nlp_DID : 0, + lpfc_cmd->cur_iocbq.sli4_xritag, + wcqe->parameter, + wcqe->total_data_placed, + lpfc_cmd->cur_iocbq.iocb.ulpIoTag); + break; + case CQE_STATUS_DI_ERROR: + if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) + lpfc_cmd->result = IOERR_RX_DMA_FAILED; + else + lpfc_cmd->result = IOERR_TX_DMA_FAILED; + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG, + "9048 DI Error xri x%x status x%x DI ext " + "status x%x data placed x%x\n", + lpfc_cmd->cur_iocbq.sli4_xritag, + lpfc_cmd->status, wcqe->parameter, + wcqe->total_data_placed); + if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + /* BG enabled cmd. Parse BG error */ + lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); + break; + } + cmd->result = DID_ERROR << 16; + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "9040 DI Error on unprotected cmd\n"); + break; + case CQE_STATUS_REMOTE_STOP: + if (ndlp) { + /* This I/O was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, ndlp, + lpfc_cmd->cur_iocbq.sli4_lxritag, + 0, 0); + } + fallthrough; + case CQE_STATUS_LOCAL_REJECT: + if (lpfc_cmd->result & IOERR_DRVR_MASK) + lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || + lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { + cmd->result = DID_NO_CONNECT << 16; + break; + } + if (lpfc_cmd->result == IOERR_INVALID_RPI || + lpfc_cmd->result == IOERR_LINK_DOWN || + lpfc_cmd->result == IOERR_NO_RESOURCES || + lpfc_cmd->result == IOERR_ABORT_REQUESTED || + lpfc_cmd->result == IOERR_RPI_SUSPENDED || + lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { + cmd->result = DID_TRANSPORT_DISRUPTED << 16; + break; + } + lpfc_printf_vlog(vport, KERN_WARNING, logit, + "9036 Local Reject FCP cmd x%x failed" + " <%d/%lld> " + "status: x%x result: x%x " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x x%x\n", + cmd->cmnd[0], + cmd->device ? cmd->device->id : 0xffff, + cmd->device ? cmd->device->lun : 0xffff, + lpfc_cmd->status, lpfc_cmd->result, + vport->fc_myDID, + (ndlp) ? ndlp->nlp_DID : 0, + lpfc_cmd->cur_iocbq.sli4_xritag, + wcqe->parameter, + wcqe->total_data_placed, + lpfc_cmd->cur_iocbq.iocb.ulpIoTag); + fallthrough; + default: + cmd->result = DID_ERROR << 16; + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9037 FCP Completion Error: xri %x " + "status x%x result x%x [x%x] " + "placed x%x\n", + lpfc_cmd->cur_iocbq.sli4_xritag, + lpfc_cmd->status, lpfc_cmd->result, + wcqe->parameter, + wcqe->total_data_placed); + } + if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { + u32 *lp = (u32 *)cmd->sense_buffer; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9039 Iodone <%d/%llu> cmd x%px, error " + "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n", + cmd->device->id, cmd->device->lun, cmd, + cmd->result, *lp, *(lp + 3), + (cmd->device->sector_size) ? + (u64)scsi_get_lba(cmd) : 0, + cmd->retries, scsi_get_resid(cmd)); + } + + if (vport->cfg_max_scsicmpl_time && + time_after(jiffies, lpfc_cmd->start_time + + msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { + spin_lock_irqsave(shost->host_lock, flags); + if (ndlp) { + if (ndlp->cmd_qdepth > + atomic_read(&ndlp->cmd_pending) && + (atomic_read(&ndlp->cmd_pending) > + LPFC_MIN_TGT_QDEPTH) && + (cmd->cmnd[0] == READ_10 || + cmd->cmnd[0] == WRITE_10)) + ndlp->cmd_qdepth = + atomic_read(&ndlp->cmd_pending); + + ndlp->last_change_time = jiffies; + } + spin_unlock_irqrestore(shost->host_lock, flags); + } + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_cmd->ts_cmd_start) { + lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; + lpfc_cmd->ts_data_io = ktime_get_ns(); + phba->ktime_last_cmd = lpfc_cmd->ts_data_io; + lpfc_io_ktime(phba, lpfc_cmd); + } +#endif + if (likely(!wait_xb_clr)) + lpfc_cmd->pCmd = NULL; + spin_unlock(&lpfc_cmd->buf_lock); + + /* Check if IO qualified for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + cmd->sc_data_direction == DMA_FROM_DEVICE && + (scsi_sg_count(cmd))) { + /* Used when calculating average latency */ + lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; + lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); + } + + if (wait_xb_clr) + goto out; + + /* The sdev is not guaranteed to be valid post scsi_done upcall. */ + scsi_done(cmd); + + /* + * If there is an abort thread waiting for command completion + * wake up the thread. + */ + spin_lock(&lpfc_cmd->buf_lock); + lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; + if (lpfc_cmd->waitq) + wake_up(lpfc_cmd->waitq); + spin_unlock(&lpfc_cmd->buf_lock); +out: + lpfc_release_scsi_buf(phba, lpfc_cmd); +} + +/** + * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine + * @phba: The Hba for which this call is being executed. + * @pIocbIn: The command IOCBQ for the scsi cmnd. + * @pIocbOut: The response IOCBQ for the scsi cmnd. + * + * This routine assigns scsi command result by looking into response IOCB + * status field appropriately. This routine handles QUEUE FULL condition as + * well by ramping down device queue depth. + **/ +static void +lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, + struct lpfc_iocbq *pIocbOut) +{ + struct lpfc_io_buf *lpfc_cmd = + (struct lpfc_io_buf *) pIocbIn->io_buf; + struct lpfc_vport *vport = pIocbIn->vport; + struct lpfc_rport_data *rdata = lpfc_cmd->rdata; + struct lpfc_nodelist *pnode = rdata->pnode; + struct scsi_cmnd *cmd; + unsigned long flags; + struct lpfc_fast_path_event *fast_path_evt; + struct Scsi_Host *shost; + int idx; + uint32_t logit = LOG_FCP; + + /* Guard against abort handler being called at same time */ + spin_lock(&lpfc_cmd->buf_lock); + + /* Sanity check on return of outstanding command */ + cmd = lpfc_cmd->pCmd; + if (!cmd || !phba) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2621 IO completion: Not an active IO\n"); + spin_unlock(&lpfc_cmd->buf_lock); + return; + } + + idx = lpfc_cmd->cur_iocbq.hba_wqidx; + if (phba->sli4_hba.hdwq) + phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); +#endif + shost = cmd->device->host; + + lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); + lpfc_cmd->status = pIocbOut->iocb.ulpStatus; + /* pick up SLI4 exchange busy status from HBA */ + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; + if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY) + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_cmd->prot_data_type) { + struct scsi_dif_tuple *src = NULL; + + src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; + /* + * Used to restore any changes to protection + * data for error injection. + */ + switch (lpfc_cmd->prot_data_type) { + case LPFC_INJERR_REFTAG: + src->ref_tag = + lpfc_cmd->prot_data; + break; + case LPFC_INJERR_APPTAG: + src->app_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + case LPFC_INJERR_GUARD: + src->guard_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + default: + break; + } + + lpfc_cmd->prot_data = 0; + lpfc_cmd->prot_data_type = 0; + lpfc_cmd->prot_data_segment = NULL; + } +#endif + + if (unlikely(lpfc_cmd->status)) { + if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && + (lpfc_cmd->result & IOERR_DRVR_MASK)) + lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + else if (lpfc_cmd->status >= IOSTAT_CNT) + lpfc_cmd->status = IOSTAT_DEFAULT; + if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && + !lpfc_cmd->fcp_rsp->rspStatus3 && + (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && + !(vport->cfg_log_verbose & LOG_FCP_UNDER)) + logit = 0; + else + logit = LOG_FCP | LOG_FCP_UNDER; + lpfc_printf_vlog(vport, KERN_WARNING, logit, + "9030 FCP cmd x%x failed <%d/%lld> " + "status: x%x result: x%x " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x\n", + cmd->cmnd[0], + cmd->device ? cmd->device->id : 0xffff, + cmd->device ? cmd->device->lun : 0xffff, + lpfc_cmd->status, lpfc_cmd->result, + vport->fc_myDID, + (pnode) ? pnode->nlp_DID : 0, + phba->sli_rev == LPFC_SLI_REV4 ? + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, + pIocbOut->iocb.ulpContext, + lpfc_cmd->cur_iocbq.iocb.ulpIoTag); + + switch (lpfc_cmd->status) { + case IOSTAT_FCP_RSP_ERROR: + /* Call FCP RSP handler to determine result */ + lpfc_handle_fcp_err(vport, lpfc_cmd, + pIocbOut->iocb.un.fcpi.fcpi_parm); + break; + case IOSTAT_NPORT_BSY: + case IOSTAT_FABRIC_BSY: + cmd->result = DID_TRANSPORT_DISRUPTED << 16; + fast_path_evt = lpfc_alloc_fast_evt(phba); + if (!fast_path_evt) + break; + fast_path_evt->un.fabric_evt.event_type = + FC_REG_FABRIC_EVENT; + fast_path_evt->un.fabric_evt.subcategory = + (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? + LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; + if (pnode) { + memcpy(&fast_path_evt->un.fabric_evt.wwpn, + &pnode->nlp_portname, + sizeof(struct lpfc_name)); + memcpy(&fast_path_evt->un.fabric_evt.wwnn, + &pnode->nlp_nodename, + sizeof(struct lpfc_name)); + } + fast_path_evt->vport = vport; + fast_path_evt->work_evt.evt = + LPFC_EVT_FASTPATH_MGMT_EVT; + spin_lock_irqsave(&phba->hbalock, flags); + list_add_tail(&fast_path_evt->work_evt.evt_listp, + &phba->work_list); + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_worker_wake_up(phba); + break; + case IOSTAT_LOCAL_REJECT: + case IOSTAT_REMOTE_STOP: + if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || + lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { + cmd->result = DID_NO_CONNECT << 16; + break; + } + if (lpfc_cmd->result == IOERR_INVALID_RPI || + lpfc_cmd->result == IOERR_NO_RESOURCES || + lpfc_cmd->result == IOERR_ABORT_REQUESTED || + lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { + cmd->result = DID_TRANSPORT_DISRUPTED << 16; + break; + } + if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || + lpfc_cmd->result == IOERR_TX_DMA_FAILED) && + pIocbOut->iocb.unsli3.sli3_bg.bgstat) { + if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + /* + * This is a response for a BG enabled + * cmd. Parse BG error + */ + lpfc_parse_bg_err(phba, lpfc_cmd, + pIocbOut); + break; + } else { + lpfc_printf_vlog(vport, KERN_WARNING, + LOG_BG, + "9031 non-zero BGSTAT " + "on unprotected cmd\n"); + } + } + if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) + && (phba->sli_rev == LPFC_SLI_REV4) + && pnode) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, pnode, + lpfc_cmd->cur_iocbq.sli4_lxritag, + 0, 0); + } + fallthrough; + default: + cmd->result = DID_ERROR << 16; + break; + } + + if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) + cmd->result = DID_TRANSPORT_DISRUPTED << 16 | + SAM_STAT_BUSY; + } else + cmd->result = DID_OK << 16; + + if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { + uint32_t *lp = (uint32_t *)cmd->sense_buffer; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0710 Iodone <%d/%llu> cmd x%px, error " + "x%x SNS x%x x%x Data: x%x x%x\n", + cmd->device->id, cmd->device->lun, cmd, + cmd->result, *lp, *(lp + 3), cmd->retries, + scsi_get_resid(cmd)); + } + + if (vport->cfg_max_scsicmpl_time && + time_after(jiffies, lpfc_cmd->start_time + + msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { + spin_lock_irqsave(shost->host_lock, flags); + if (pnode) { + if (pnode->cmd_qdepth > + atomic_read(&pnode->cmd_pending) && + (atomic_read(&pnode->cmd_pending) > + LPFC_MIN_TGT_QDEPTH) && + ((cmd->cmnd[0] == READ_10) || + (cmd->cmnd[0] == WRITE_10))) + pnode->cmd_qdepth = + atomic_read(&pnode->cmd_pending); + + pnode->last_change_time = jiffies; + } + spin_unlock_irqrestore(shost->host_lock, flags); + } + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); + + lpfc_cmd->pCmd = NULL; + spin_unlock(&lpfc_cmd->buf_lock); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_cmd->ts_cmd_start) { + lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; + lpfc_cmd->ts_data_io = ktime_get_ns(); + phba->ktime_last_cmd = lpfc_cmd->ts_data_io; + lpfc_io_ktime(phba, lpfc_cmd); + } +#endif + + /* The sdev is not guaranteed to be valid post scsi_done upcall. */ + scsi_done(cmd); + + /* + * If there is an abort thread waiting for command completion + * wake up the thread. + */ + spin_lock(&lpfc_cmd->buf_lock); + lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; + if (lpfc_cmd->waitq) + wake_up(lpfc_cmd->waitq); + spin_unlock(&lpfc_cmd->buf_lock); + + lpfc_release_scsi_buf(phba, lpfc_cmd); +} + +/** + * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO + * @vport: Pointer to vport object. + * @lpfc_cmd: The scsi buffer which is going to be prep'ed. + * @tmo: timeout value for the IO + * + * Based on the data-direction of the command, initialize IOCB + * in the I/O buffer. Fill in the IOCB fields which are independent + * of the scsi buffer + * + * RETURNS 0 - SUCCESS, + **/ +static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd, + uint8_t tmo) +{ + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; + int datadir = scsi_cmnd->sc_data_direction; + u32 fcpdl; + + piocbq->iocb.un.fcpi.fcpi_XRdy = 0; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. Start the lpfc command prep by + * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first + * data bde entry. + */ + if (scsi_sg_count(scsi_cmnd)) { + if (datadir == DMA_TO_DEVICE) { + iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; + iocb_cmd->ulpPU = PARM_READ_CHECK; + if (vport->cfg_first_burst_size && + (pnode->nlp_flag & NLP_FIRSTBURST)) { + u32 xrdy_len; + + fcpdl = scsi_bufflen(scsi_cmnd); + xrdy_len = min(fcpdl, + vport->cfg_first_burst_size); + piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; + } + fcp_cmnd->fcpCntl3 = WRITE_DATA; + } else { + iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; + iocb_cmd->ulpPU = PARM_READ_CHECK; + fcp_cmnd->fcpCntl3 = READ_DATA; + } + } else { + iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; + iocb_cmd->un.fcpi.fcpi_parm = 0; + iocb_cmd->ulpPU = 0; + fcp_cmnd->fcpCntl3 = 0; + } + + /* + * Finish initializing those IOCB fields that are independent + * of the scsi_cmnd request_buffer + */ + piocbq->iocb.ulpContext = pnode->nlp_rpi; + if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) + piocbq->iocb.ulpFCP2Rcvy = 1; + else + piocbq->iocb.ulpFCP2Rcvy = 0; + + piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); + piocbq->io_buf = lpfc_cmd; + if (!piocbq->cmd_cmpl) + piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl; + piocbq->iocb.ulpTimeout = tmo; + piocbq->vport = vport; + return 0; +} + +/** + * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO + * @vport: Pointer to vport object. + * @lpfc_cmd: The scsi buffer which is going to be prep'ed. + * @tmo: timeout value for the IO + * + * Based on the data-direction of the command copy WQE template + * to I/O buffer WQE. Fill in the WQE fields which are independent + * of the scsi buffer + * + * RETURNS 0 - SUCCESS, + **/ +static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd, + uint8_t tmo) +{ + struct lpfc_hba *phba = vport->phba; + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct lpfc_sli4_hdw_queue *hdwq = NULL; + struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; + struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; + union lpfc_wqe128 *wqe = &pwqeq->wqe; + u16 idx = lpfc_cmd->hdwq_no; + int datadir = scsi_cmnd->sc_data_direction; + + hdwq = &phba->sli4_hba.hdwq[idx]; + + /* Initialize 64 bytes only */ + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. + */ + if (scsi_sg_count(scsi_cmnd)) { + if (datadir == DMA_TO_DEVICE) { + /* From the iwrite template, initialize words 7 - 11 */ + memcpy(&wqe->words[7], + &lpfc_iwrite_cmd_template.words[7], + sizeof(uint32_t) * 5); + + fcp_cmnd->fcpCntl3 = WRITE_DATA; + if (hdwq) + hdwq->scsi_cstat.output_requests++; + } else { + /* From the iread template, initialize words 7 - 11 */ + memcpy(&wqe->words[7], + &lpfc_iread_cmd_template.words[7], + sizeof(uint32_t) * 5); + + /* Word 7 */ + bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); + + fcp_cmnd->fcpCntl3 = READ_DATA; + if (hdwq) + hdwq->scsi_cstat.input_requests++; + + /* For a CMF Managed port, iod must be zero'ed */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_NONE); + } + } else { + /* From the icmnd template, initialize words 4 - 11 */ + memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], + sizeof(uint32_t) * 8); + + /* Word 7 */ + bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); + + fcp_cmnd->fcpCntl3 = 0; + if (hdwq) + hdwq->scsi_cstat.control_requests++; + } + + /* + * Finish initializing those WQE fields that are independent + * of the request_buffer + */ + + /* Word 3 */ + bf_set(payload_offset_len, &wqe->fcp_icmd, + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); + + /* Word 7*/ + if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) + bf_set(wqe_erp, &wqe->generic.wqe_com, 1); + + bf_set(wqe_class, &wqe->generic.wqe_com, + (pnode->nlp_fcp_info & 0x0f)); + + /* Word 8 */ + wqe->generic.wqe_com.abort_tag = pwqeq->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); + + pwqeq->vport = vport; + pwqeq->io_buf = lpfc_cmd; + pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; + pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; + + return 0; +} + +/** + * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: The scsi command which needs to send. + * @pnode: Pointer to lpfc_nodelist. + * + * This routine initializes fcp_cmnd and iocb data structure from scsi command + * to transfer for device with SLI3 interface spec. + **/ +static int +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + struct lpfc_nodelist *pnode) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + u8 *ptr; + + if (!pnode) + return 0; + + lpfc_cmd->fcp_rsp->rspSnsLen = 0; + /* clear task management bits */ + lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; + + int_to_scsilun(lpfc_cmd->pCmd->device->lun, + &lpfc_cmd->fcp_cmnd->fcp_lun); + + ptr = &fcp_cmnd->fcpCdb[0]; + memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { + ptr += scsi_cmnd->cmd_len; + memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); + } + + fcp_cmnd->fcpCntl1 = SIMPLE_Q; + + lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); + + return 0; +} + +/** + * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. + * @lun: Logical unit number. + * @task_mgmt_cmd: SCSI task management command. + * + * This routine creates FCP information unit corresponding to @task_mgmt_cmd + * for device with SLI-3 interface spec. + * + * Return codes: + * 0 - Error + * 1 - Success + **/ +static int +lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd, + u64 lun, u8 task_mgmt_cmd) +{ + struct lpfc_iocbq *piocbq; + IOCB_t *piocb; + struct fcp_cmnd *fcp_cmnd; + struct lpfc_rport_data *rdata = lpfc_cmd->rdata; + struct lpfc_nodelist *ndlp = rdata->pnode; + + if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) + return 0; + + piocbq = &(lpfc_cmd->cur_iocbq); + piocbq->vport = vport; + + piocb = &piocbq->iocb; + + fcp_cmnd = lpfc_cmd->fcp_cmnd; + /* Clear out any old data in the FCP command area */ + memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + int_to_scsilun(lun, &fcp_cmnd->fcp_lun); + fcp_cmnd->fcpCntl2 = task_mgmt_cmd; + if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) + lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); + piocb->ulpCommand = CMD_FCP_ICMND64_CR; + piocb->ulpContext = ndlp->nlp_rpi; + piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; + piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); + piocb->ulpPU = 0; + piocb->un.fcpi.fcpi_parm = 0; + + /* ulpTimeout is only one byte */ + if (lpfc_cmd->timeout > 0xff) { + /* + * Do not timeout the command at the firmware level. + * The driver will provide the timeout mechanism. + */ + piocb->ulpTimeout = 0; + } else + piocb->ulpTimeout = lpfc_cmd->timeout; + + return 1; +} + +/** + * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. + * @lun: Logical unit number. + * @task_mgmt_cmd: SCSI task management command. + * + * This routine creates FCP information unit corresponding to @task_mgmt_cmd + * for device with SLI-4 interface spec. + * + * Return codes: + * 0 - Error + * 1 - Success + **/ +static int +lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd, + u64 lun, u8 task_mgmt_cmd) +{ + struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; + union lpfc_wqe128 *wqe = &pwqeq->wqe; + struct fcp_cmnd *fcp_cmnd; + struct lpfc_rport_data *rdata = lpfc_cmd->rdata; + struct lpfc_nodelist *ndlp = rdata->pnode; + + if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) + return 0; + + pwqeq->vport = vport; + /* Initialize 64 bytes only */ + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* From the icmnd template, initialize words 4 - 11 */ + memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], + sizeof(uint32_t) * 8); + + fcp_cmnd = lpfc_cmd->fcp_cmnd; + /* Clear out any old data in the FCP command area */ + memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + int_to_scsilun(lun, &fcp_cmnd->fcp_lun); + fcp_cmnd->fcpCntl3 = 0; + fcp_cmnd->fcpCntl2 = task_mgmt_cmd; + + bf_set(payload_offset_len, &wqe->fcp_icmd, + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_icmd, 0); + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */ + vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, + ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0)); + bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, + (ndlp->nlp_fcp_info & 0x0f)); + + /* ulpTimeout is only one byte */ + if (lpfc_cmd->timeout > 0xff) { + /* + * Do not timeout the command at the firmware level. + * The driver will provide the timeout mechanism. + */ + bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0); + } else { + bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout); + } + + lpfc_prep_embed_io(vport->phba, lpfc_cmd); + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); + wqe->generic.wqe_com.abort_tag = pwqeq->iotag; + bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); + + lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); + + return 1; +} + +/** + * lpfc_scsi_api_table_setup - Set up scsi api function jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the SCSI interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; + phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; + phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; + phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; + phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; + phba->lpfc_scsi_prep_task_mgmt_cmd = + lpfc_scsi_prep_task_mgmt_cmd_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; + phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; + phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; + phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; + phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; + phba->lpfc_scsi_prep_task_mgmt_cmd = + lpfc_scsi_prep_task_mgmt_cmd_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1418 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + } + phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; + return 0; +} + +/** + * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command + * @phba: The Hba for which this call is being executed. + * @cmdiocbq: Pointer to lpfc_iocbq data structure. + * @rspiocbq: Pointer to lpfc_iocbq data structure. + * + * This routine is IOCB completion routine for device reset and target reset + * routine. This routine release scsi buffer associated with lpfc_cmd. + **/ +static void +lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf; + if (lpfc_cmd) + lpfc_release_scsi_buf(phba, lpfc_cmd); + return; +} + +/** + * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check + * if issuing a pci_bus_reset is possibly unsafe + * @phba: lpfc_hba pointer. + * + * Description: + * Walks the bus_list to ensure only PCI devices with Emulex + * vendor id, device ids that support hot reset, and only one occurrence + * of function 0. + * + * Returns: + * -EBADSLT, detected invalid device + * 0, successful + */ +int +lpfc_check_pci_resettable(struct lpfc_hba *phba) +{ + const struct pci_dev *pdev = phba->pcidev; + struct pci_dev *ptr = NULL; + u8 counter = 0; + + /* Walk the list of devices on the pci_dev's bus */ + list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { + /* Check for Emulex Vendor ID */ + if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8346 Non-Emulex vendor found: " + "0x%04x\n", ptr->vendor); + return -EBADSLT; + } + + /* Check for valid Emulex Device ID */ + if (phba->sli_rev != LPFC_SLI_REV4 || + phba->hba_flag & HBA_FCOE_MODE) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8347 Incapable PCI reset device: " + "0x%04x\n", ptr->device); + return -EBADSLT; + } + + /* Check for only one function 0 ID to ensure only one HBA on + * secondary bus + */ + if (ptr->devfn == 0) { + if (++counter > 1) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8348 More than one device on " + "secondary bus found\n"); + return -EBADSLT; + } + } + } + + return 0; +} + +/** + * lpfc_info - Info entry point of scsi_host_template data structure + * @host: The scsi host for which this call is being executed. + * + * This routine provides module information about hba. + * + * Reutrn code: + * Pointer to char - Success. + **/ +const char * +lpfc_info(struct Scsi_Host *host) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; + struct lpfc_hba *phba = vport->phba; + int link_speed = 0; + static char lpfcinfobuf[384]; + char tmp[384] = {0}; + + memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); + if (phba && phba->pcidev){ + /* Model Description */ + scnprintf(tmp, sizeof(tmp), phba->ModelDesc); + if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= + sizeof(lpfcinfobuf)) + goto buffer_done; + + /* PCI Info */ + scnprintf(tmp, sizeof(tmp), + " on PCI bus %02x device %02x irq %d", + phba->pcidev->bus->number, phba->pcidev->devfn, + phba->pcidev->irq); + if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= + sizeof(lpfcinfobuf)) + goto buffer_done; + + /* Port Number */ + if (phba->Port[0]) { + scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); + if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= + sizeof(lpfcinfobuf)) + goto buffer_done; + } + + /* Link Speed */ + link_speed = lpfc_sli_port_speed_get(phba); + if (link_speed != 0) { + scnprintf(tmp, sizeof(tmp), + " Logical Link Speed: %d Mbps", link_speed); + if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= + sizeof(lpfcinfobuf)) + goto buffer_done; + } + + /* PCI resettable */ + if (!lpfc_check_pci_resettable(phba)) { + scnprintf(tmp, sizeof(tmp), " PCI resettable"); + strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); + } + } + +buffer_done: + return lpfcinfobuf; +} + +/** + * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba + * @phba: The Hba for which this call is being executed. + * + * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. + * The default value of cfg_poll_tmo is 10 milliseconds. + **/ +static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) +{ + unsigned long poll_tmo_expires = + (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); + + if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) + mod_timer(&phba->fcp_poll_timer, + poll_tmo_expires); +} + +/** + * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA + * @phba: The Hba for which this call is being executed. + * + * This routine starts the fcp_poll_timer of @phba. + **/ +void lpfc_poll_start_timer(struct lpfc_hba * phba) +{ + lpfc_poll_rearm_timer(phba); +} + +/** + * lpfc_poll_timeout - Restart polling timer + * @t: Timer construct where lpfc_hba data structure pointer is obtained. + * + * This routine restarts fcp_poll timer, when FCP ring polling is enable + * and FCP Ring interrupt is disable. + **/ +void lpfc_poll_timeout(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); + + if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) + lpfc_poll_rearm_timer(phba); + } +} + +/* + * lpfc_is_command_vm_io - get the UUID from blk cgroup + * @cmd: Pointer to scsi_cmnd data structure + * Returns UUID if present, otherwise NULL + */ +static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) +{ + struct bio *bio = scsi_cmd_to_rq(cmd)->bio; + + if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio) + return NULL; + return blkcg_get_fc_appid(bio); +} + +/** + * lpfc_queuecommand - scsi_host_template queuecommand entry point + * @shost: kernel scsi host pointer. + * @cmnd: Pointer to scsi_cmnd data structure. + * + * Driver registers this routine to scsi midlayer to submit a @cmd to process. + * This routine prepares an IOCB from scsi command and provides to firmware. + * The @done callback is invoked after driver finished processing the command. + * + * Return value : + * 0 - Success + * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. + **/ +static int +lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cur_iocbq = NULL; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *ndlp; + struct lpfc_io_buf *lpfc_cmd; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + int err, idx; + u8 *uuid = NULL; + uint64_t start; + + start = ktime_get_ns(); + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); + + /* sanity check on references */ + if (unlikely(!rdata) || unlikely(!rport)) + goto out_fail_command; + + err = fc_remote_port_chkready(rport); + if (err) { + cmnd->result = err; + goto out_fail_command; + } + ndlp = rdata->pnode; + + if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && + (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" + " op:%02x str=%s without registering for" + " BlockGuard - Rejecting command\n", + cmnd->cmnd[0], scsi_get_prot_op(cmnd), + dif_op_str[scsi_get_prot_op(cmnd)]); + goto out_fail_command; + } + + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + if (!ndlp) + goto out_tgt_busy1; + + /* Check if IO qualifies for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + cmnd->sc_data_direction == DMA_FROM_DEVICE && + (scsi_sg_count(cmnd))) { + /* Latency start time saved in rx_cmd_start later in routine */ + err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); + if (err) + goto out_tgt_busy1; + } + + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, + "3377 Target Queue Full, scsi Id:%d " + "Qdepth:%d Pending command:%d" + " WWNN:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x, " + " WWPN:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x", + ndlp->nlp_sid, ndlp->cmd_qdepth, + atomic_read(&ndlp->cmd_pending), + ndlp->nlp_nodename.u.wwn[0], + ndlp->nlp_nodename.u.wwn[1], + ndlp->nlp_nodename.u.wwn[2], + ndlp->nlp_nodename.u.wwn[3], + ndlp->nlp_nodename.u.wwn[4], + ndlp->nlp_nodename.u.wwn[5], + ndlp->nlp_nodename.u.wwn[6], + ndlp->nlp_nodename.u.wwn[7], + ndlp->nlp_portname.u.wwn[0], + ndlp->nlp_portname.u.wwn[1], + ndlp->nlp_portname.u.wwn[2], + ndlp->nlp_portname.u.wwn[3], + ndlp->nlp_portname.u.wwn[4], + ndlp->nlp_portname.u.wwn[5], + ndlp->nlp_portname.u.wwn[6], + ndlp->nlp_portname.u.wwn[7]); + goto out_tgt_busy2; + } + } + + lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); + if (lpfc_cmd == NULL) { + lpfc_rampdown_queue_depth(phba); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, + "0707 driver's buffer pool is empty, " + "IO busied\n"); + goto out_host_busy; + } + lpfc_cmd->rx_cmd_start = start; + + cur_iocbq = &lpfc_cmd->cur_iocbq; + /* + * Store the midlayer's command structure for the completion phase + * and complete the command initialization. + */ + lpfc_cmd->pCmd = cmnd; + lpfc_cmd->rdata = rdata; + lpfc_cmd->ndlp = ndlp; + cur_iocbq->cmd_cmpl = NULL; + cmnd->host_scribble = (unsigned char *)lpfc_cmd; + + err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); + if (err) + goto out_host_busy_release_buf; + + if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { + if (vport->phba->cfg_enable_bg) { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, + "9033 BLKGRD: rcvd %s cmd:x%x " + "reftag x%x cnt %u pt %x\n", + dif_op_str[scsi_get_prot_op(cmnd)], + cmnd->cmnd[0], + scsi_prot_ref_tag(cmnd), + scsi_logical_block_count(cmnd), + (cmnd->cmnd[1]>>5)); + } + err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); + } else { + if (vport->phba->cfg_enable_bg) { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, + "9038 BLKGRD: rcvd PROT_NORMAL cmd: " + "x%x reftag x%x cnt %u pt %x\n", + cmnd->cmnd[0], + scsi_prot_ref_tag(cmnd), + scsi_logical_block_count(cmnd), + (cmnd->cmnd[1]>>5)); + } + err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); + } + + if (unlikely(err)) { + if (err == 2) { + cmnd->result = DID_ERROR << 16; + goto out_fail_command_release_buf; + } + goto out_host_busy_free_buf; + } + + /* check the necessary and sufficient condition to support VMID */ + if (lpfc_is_vmid_enabled(phba) && + (ndlp->vmid_support || + phba->pport->vmid_priority_tagging == + LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { + /* is the I/O generated by a VM, get the associated virtual */ + /* entity id */ + uuid = lpfc_is_command_vm_io(cmnd); + + if (uuid) { + err = lpfc_vmid_get_appid(vport, uuid, + cmnd->sc_data_direction, + (union lpfc_vmid_io_tag *) + &cur_iocbq->vmid_tag); + if (!err) + cur_iocbq->cmd_flag |= LPFC_IO_VMID; + } + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); +#endif + /* Issue I/O to adapter */ + err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq, + SLI_IOCB_RET_IOCB); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (start) { + lpfc_cmd->ts_cmd_start = start; + lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; + lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); + } else { + lpfc_cmd->ts_cmd_start = 0; + } +#endif + if (err) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "3376 FCP could not issue iocb err %x " + "FCP cmd x%x <%d/%llu> " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x x%x x%x\n", + err, cmnd->cmnd[0], + cmnd->device ? cmnd->device->id : 0xffff, + cmnd->device ? cmnd->device->lun : (u64)-1, + vport->fc_myDID, ndlp->nlp_DID, + phba->sli_rev == LPFC_SLI_REV4 ? + cur_iocbq->sli4_xritag : 0xffff, + phba->sli_rev == LPFC_SLI_REV4 ? + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : + cur_iocbq->iocb.ulpContext, + cur_iocbq->iotag, + phba->sli_rev == LPFC_SLI_REV4 ? + bf_get(wqe_tmo, + &cur_iocbq->wqe.generic.wqe_com) : + cur_iocbq->iocb.ulpTimeout, + (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); + + goto out_host_busy_free_buf; + } + + if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) + lpfc_poll_rearm_timer(phba); + } + + if (phba->cfg_xri_rebalancing) + lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); + + return 0; + + out_host_busy_free_buf: + idx = lpfc_cmd->hdwq_no; + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); + if (phba->sli4_hba.hdwq) { + switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { + case WRITE_DATA: + phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; + break; + case READ_DATA: + phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; + break; + default: + phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; + } + } + out_host_busy_release_buf: + lpfc_release_scsi_buf(phba, lpfc_cmd); + out_host_busy: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); + return SCSI_MLQUEUE_HOST_BUSY; + + out_tgt_busy2: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); + out_tgt_busy1: + return SCSI_MLQUEUE_TARGET_BUSY; + + out_fail_command_release_buf: + lpfc_release_scsi_buf(phba, lpfc_cmd); + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); + + out_fail_command: + scsi_done(cmnd); + return 0; +} + +/* + * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport + * @vport: The virtual port for which this call is being executed. + */ +void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) +{ + u32 bucket; + struct lpfc_vmid *cur; + + if (vport->port_type == LPFC_PHYSICAL_PORT) + del_timer_sync(&vport->phba->inactive_vmid_poll); + + kfree(vport->qfpa_res); + kfree(vport->vmid_priority.vmid_range); + kfree(vport->vmid); + + if (!hash_empty(vport->hash_table)) + hash_for_each(vport->hash_table, bucket, cur, hnode) + hash_del(&cur->hnode); + + vport->qfpa_res = NULL; + vport->vmid_priority.vmid_range = NULL; + vport->vmid = NULL; + vport->cur_vmid_cnt = 0; +} + +/** + * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine aborts @cmnd pending in base driver. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_abort_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *iocb; + struct lpfc_io_buf *lpfc_cmd; + int ret = SUCCESS, status = 0; + struct lpfc_sli_ring *pring_s4 = NULL; + struct lpfc_sli_ring *pring = NULL; + int ret_val; + unsigned long flags; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); + + status = fc_block_rport(rport); + if (status != 0 && status != SUCCESS) + return status; + + lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; + if (!lpfc_cmd) + return ret; + + /* Guard against IO completion being called at same time */ + spin_lock_irqsave(&lpfc_cmd->buf_lock, flags); + + spin_lock(&phba->hbalock); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_IOQ_FLUSH) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3168 SCSI Layer abort requested I/O has been " + "flushed by LLD.\n"); + ret = FAILED; + goto out_unlock_hba; + } + + if (!lpfc_cmd->pCmd) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "2873 SCSI Layer I/O Abort Request IO CMPL Status " + "x%x ID %d LUN %llu\n", + SUCCESS, cmnd->device->id, cmnd->device->lun); + goto out_unlock_hba; + } + + iocb = &lpfc_cmd->cur_iocbq; + if (phba->sli_rev == LPFC_SLI_REV4) { + pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; + if (!pring_s4) { + ret = FAILED; + goto out_unlock_hba; + } + spin_lock(&pring_s4->ring_lock); + } + /* the command is in process of being cancelled */ + if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3169 SCSI Layer abort requested I/O has been " + "cancelled by LLD.\n"); + ret = FAILED; + goto out_unlock_ring; + } + /* + * If pCmd field of the corresponding lpfc_io_buf structure + * points to a different SCSI command, then the driver has + * already completed this command, but the midlayer did not + * see the completion before the eh fired. Just return SUCCESS. + */ + if (lpfc_cmd->pCmd != cmnd) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3170 SCSI Layer abort requested I/O has been " + "completed by LLD.\n"); + goto out_unlock_ring; + } + + WARN_ON(iocb->io_buf != lpfc_cmd); + + /* abort issued in recovery is still in progress */ + if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3389 SCSI Layer I/O Abort Request is pending\n"); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); + goto wait_for_cmpl; + } + + lpfc_cmd->waitq = &waitq; + if (phba->sli_rev == LPFC_SLI_REV4) { + spin_unlock(&pring_s4->ring_lock); + ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, + lpfc_sli_abort_fcp_cmpl); + } else { + pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; + ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, + lpfc_sli_abort_fcp_cmpl); + } + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + if (ret_val != IOCB_SUCCESS) { + /* Indicate the IO is not being aborted by the driver. */ + lpfc_cmd->waitq = NULL; + ret = FAILED; + goto out_unlock_hba; + } + + /* no longer need the lock after this point */ + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); + +wait_for_cmpl: + /* + * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait + * for abort to complete. + */ + wait_event_timeout(waitq, + (lpfc_cmd->pCmd != cmnd), + msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); + + spin_lock(&lpfc_cmd->buf_lock); + + if (lpfc_cmd->pCmd == cmnd) { + ret = FAILED; + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0748 abort handler timed out waiting " + "for aborting I/O (xri:x%x) to complete: " + "ret %#x, ID %d, LUN %llu\n", + iocb->sli4_xritag, ret, + cmnd->device->id, cmnd->device->lun); + } + + lpfc_cmd->waitq = NULL; + + spin_unlock(&lpfc_cmd->buf_lock); + goto out; + +out_unlock_ring: + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); +out_unlock_hba: + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); +out: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "0749 SCSI Layer I/O Abort Request Status x%x ID %d " + "LUN %llu\n", ret, cmnd->device->id, + cmnd->device->lun); + return ret; +} + +static char * +lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) +{ + switch (task_mgmt_cmd) { + case FCP_ABORT_TASK_SET: + return "ABORT_TASK_SET"; + case FCP_CLEAR_TASK_SET: + return "FCP_CLEAR_TASK_SET"; + case FCP_BUS_RESET: + return "FCP_BUS_RESET"; + case FCP_LUN_RESET: + return "FCP_LUN_RESET"; + case FCP_TARGET_RESET: + return "FCP_TARGET_RESET"; + case FCP_CLEAR_ACA: + return "FCP_CLEAR_ACA"; + case FCP_TERMINATE_TASK: + return "FCP_TERMINATE_TASK"; + default: + return "unknown"; + } +} + + +/** + * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. + * + * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) +{ + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; + uint32_t rsp_info; + uint32_t rsp_len; + uint8_t rsp_info_code; + int ret = FAILED; + + + if (fcprsp == NULL) + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0703 fcp_rsp is missing\n"); + else { + rsp_info = fcprsp->rspStatus2; + rsp_len = be32_to_cpu(fcprsp->rspRspLen); + rsp_info_code = fcprsp->rspInfo3; + + + lpfc_printf_vlog(vport, KERN_INFO, + LOG_FCP, + "0706 fcp_rsp valid 0x%x," + " rsp len=%d code 0x%x\n", + rsp_info, + rsp_len, rsp_info_code); + + /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN + * field specifies the number of valid bytes of FCP_RSP_INFO. + * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 + */ + if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && + ((rsp_len == 8) || (rsp_len == 4))) { + switch (rsp_info_code) { + case RSP_NO_FAILURE: + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0715 Task Mgmt No Failure\n"); + ret = SUCCESS; + break; + case RSP_TM_NOT_SUPPORTED: /* TM rejected */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0716 Task Mgmt Target " + "reject\n"); + break; + case RSP_TM_NOT_COMPLETED: /* TM failed */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0717 Task Mgmt Target " + "failed TM\n"); + break; + case RSP_TM_INVALID_LU: /* TM to invalid LU! */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0718 Task Mgmt to invalid " + "LUN\n"); + break; + } + } + } + return ret; +} + + +/** + * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler + * @vport: The virtual port for which this call is being executed. + * @rport: Pointer to remote port + * @tgt_id: Target ID of remote device. + * @lun_id: Lun number for the TMF + * @task_mgmt_cmd: type of TMF to send + * + * This routine builds and sends a TMF (SCSI Task Mgmt Function) to + * a remote port. + * + * Return Code: + * 0x2003 - Error + * 0x2002 - Success. + **/ +static int +lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, + unsigned int tgt_id, uint64_t lun_id, + uint8_t task_mgmt_cmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_io_buf *lpfc_cmd; + struct lpfc_iocbq *iocbq; + struct lpfc_iocbq *iocbqrsp; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode; + int ret; + int status; + + rdata = rport->dd_data; + if (!rdata || !rdata->pnode) + return FAILED; + pnode = rdata->pnode; + + lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL); + if (lpfc_cmd == NULL) + return FAILED; + lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; + lpfc_cmd->rdata = rdata; + lpfc_cmd->pCmd = NULL; + lpfc_cmd->ndlp = pnode; + + status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, + task_mgmt_cmd); + if (!status) { + lpfc_release_scsi_buf(phba, lpfc_cmd); + return FAILED; + } + + iocbq = &lpfc_cmd->cur_iocbq; + iocbqrsp = lpfc_sli_get_iocbq(phba); + if (iocbqrsp == NULL) { + lpfc_release_scsi_buf(phba, lpfc_cmd); + return FAILED; + } + iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl; + iocbq->vport = vport; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0702 Issue %s to TGT %d LUN %llu " + "rpi x%x nlp_flag x%x Data: x%x x%x\n", + lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, + pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, + iocbq->cmd_flag); + + status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, + iocbq, iocbqrsp, lpfc_cmd->timeout); + if ((status != IOCB_SUCCESS) || + (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) { + if (status != IOCB_SUCCESS || + get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0727 TMF %s to TGT %d LUN %llu " + "failed (%d, %d) cmd_flag x%x\n", + lpfc_taskmgmt_name(task_mgmt_cmd), + tgt_id, lun_id, + get_job_ulpstatus(phba, iocbqrsp), + get_job_word4(phba, iocbqrsp), + iocbq->cmd_flag); + /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ + if (status == IOCB_SUCCESS) { + if (get_job_ulpstatus(phba, iocbqrsp) == + IOSTAT_FCP_RSP_ERROR) + /* Something in the FCP_RSP was invalid. + * Check conditions */ + ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); + else + ret = FAILED; + } else if ((status == IOCB_TIMEDOUT) || + (status == IOCB_ABORTED)) { + ret = TIMEOUT_ERROR; + } else { + ret = FAILED; + } + } else + ret = SUCCESS; + + lpfc_sli_release_iocbq(phba, iocbqrsp); + + if (status != IOCB_TIMEDOUT) + lpfc_release_scsi_buf(phba, lpfc_cmd); + + return ret; +} + +/** + * lpfc_chk_tgt_mapped - + * @vport: The virtual port to check on + * @rport: Pointer to fc_rport data structure. + * + * This routine delays until the scsi target (aka rport) for the + * command exists (is present and logged in) or we declare it non-existent. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) +{ + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode = NULL; + unsigned long later; + + rdata = rport->dd_data; + if (!rdata) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0797 Tgt Map rport failure: rdata x%px\n", rdata); + return FAILED; + } + pnode = rdata->pnode; + + /* + * If target is not in a MAPPED state, delay until + * target is rediscovered or devloss timeout expires. + */ + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies)) { + if (!pnode) + return FAILED; + if (pnode->nlp_state == NLP_STE_MAPPED_NODE) + return SUCCESS; + schedule_timeout_uninterruptible(msecs_to_jiffies(500)); + rdata = rport->dd_data; + if (!rdata) + return FAILED; + pnode = rdata->pnode; + } + if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) + return FAILED; + return SUCCESS; +} + +/** + * lpfc_reset_flush_io_context - + * @vport: The virtual port (scsi_host) for the flush context + * @tgt_id: If aborting by Target contect - specifies the target id + * @lun_id: If aborting by Lun context - specifies the lun id + * @context: specifies the context level to flush at. + * + * After a reset condition via TMF, we need to flush orphaned i/o + * contexts from the adapter. This routine aborts any contexts + * outstanding, then waits for their completions. The wait is + * bounded by devloss_tmo though. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, + uint64_t lun_id, lpfc_ctx_cmd context) +{ + struct lpfc_hba *phba = vport->phba; + unsigned long later; + int cnt; + + cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); + if (cnt) + lpfc_sli_abort_taskmgmt(vport, + &phba->sli.sli3_ring[LPFC_FCP_RING], + tgt_id, lun_id, context); + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies) && cnt) { + schedule_timeout_uninterruptible(msecs_to_jiffies(20)); + cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); + } + if (cnt) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0724 I/O flush failure for context %s : cnt x%x\n", + ((context == LPFC_CTX_LUN) ? "LUN" : + ((context == LPFC_CTX_TGT) ? "TGT" : + ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), + cnt); + return FAILED; + } + return SUCCESS; +} + +/** + * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine does a device reset by sending a LUN_RESET task management + * command. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_device_reset_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode; + unsigned tgt_id = cmnd->device->id; + uint64_t lun_id = cmnd->device->lun; + struct lpfc_scsi_event_header scsi_event; + int status; + u32 logit = LOG_FCP; + + if (!rport) + return FAILED; + + rdata = rport->dd_data; + if (!rdata || !rdata->pnode) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0798 Device Reset rdata failure: rdata x%px\n", + rdata); + return FAILED; + } + pnode = rdata->pnode; + status = fc_block_rport(rport); + if (status != 0 && status != SUCCESS) + return status; + + status = lpfc_chk_tgt_mapped(vport, rport); + if (status == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0721 Device Reset rport failure: rdata x%px\n", rdata); + return FAILED; + } + + scsi_event.event_type = FC_REG_SCSI_EVENT; + scsi_event.subcategory = LPFC_EVENT_LUNRESET; + scsi_event.lun = lun_id; + memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); + memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); + + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); + + status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, + FCP_LUN_RESET); + if (status != SUCCESS) + logit = LOG_TRACE_EVENT; + + lpfc_printf_vlog(vport, KERN_ERR, logit, + "0713 SCSI layer issued Device Reset (%d, %llu) " + "return x%x\n", tgt_id, lun_id, status); + + /* + * We have to clean up i/o as : they may be orphaned by the TMF; + * or if the TMF failed, they may be in an indeterminate state. + * So, continue on. + * We will report success if all the i/o aborts successfully. + */ + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_LUN); + + return status; +} + +/** + * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine does a target reset by sending a TARGET_RESET task management + * command. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_target_reset_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode; + unsigned tgt_id = cmnd->device->id; + uint64_t lun_id = cmnd->device->lun; + struct lpfc_scsi_event_header scsi_event; + int status; + u32 logit = LOG_FCP; + u32 dev_loss_tmo = vport->cfg_devloss_tmo; + unsigned long flags; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); + + if (!rport) + return FAILED; + + rdata = rport->dd_data; + if (!rdata || !rdata->pnode) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0799 Target Reset rdata failure: rdata x%px\n", + rdata); + return FAILED; + } + pnode = rdata->pnode; + status = fc_block_rport(rport); + if (status != 0 && status != SUCCESS) + return status; + + status = lpfc_chk_tgt_mapped(vport, rport); + if (status == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0722 Target Reset rport failure: rdata x%px\n", rdata); + if (pnode) { + spin_lock_irqsave(&pnode->lock, flags); + pnode->nlp_flag &= ~NLP_NPR_ADISC; + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + spin_unlock_irqrestore(&pnode->lock, flags); + } + lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_TGT); + return FAST_IO_FAIL; + } + + scsi_event.event_type = FC_REG_SCSI_EVENT; + scsi_event.subcategory = LPFC_EVENT_TGTRESET; + scsi_event.lun = 0; + memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); + memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); + + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); + + status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, + FCP_TARGET_RESET); + if (status != SUCCESS) { + logit = LOG_TRACE_EVENT; + + /* Issue LOGO, if no LOGO is outstanding */ + spin_lock_irqsave(&pnode->lock, flags); + if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && + !pnode->logo_waitq) { + pnode->logo_waitq = &waitq; + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + pnode->nlp_flag |= NLP_ISSUE_LOGO; + pnode->save_flags |= NLP_WAIT_FOR_LOGO; + spin_unlock_irqrestore(&pnode->lock, flags); + lpfc_unreg_rpi(vport, pnode); + wait_event_timeout(waitq, + (!(pnode->save_flags & + NLP_WAIT_FOR_LOGO)), + msecs_to_jiffies(dev_loss_tmo * + 1000)); + + if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { + lpfc_printf_vlog(vport, KERN_ERR, logit, + "0725 SCSI layer TGTRST " + "failed & LOGO TMO (%d, %llu) " + "return x%x\n", + tgt_id, lun_id, status); + spin_lock_irqsave(&pnode->lock, flags); + pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; + } else { + spin_lock_irqsave(&pnode->lock, flags); + } + pnode->logo_waitq = NULL; + spin_unlock_irqrestore(&pnode->lock, flags); + status = SUCCESS; + + } else { + spin_unlock_irqrestore(&pnode->lock, flags); + status = FAILED; + } + } + + lpfc_printf_vlog(vport, KERN_ERR, logit, + "0723 SCSI layer issued Target Reset (%d, %llu) " + "return x%x\n", tgt_id, lun_id, status); + + /* + * We have to clean up i/o as : they may be orphaned by the TMF; + * or if the TMF failed, they may be in an indeterminate state. + * So, continue on. + * We will report success if all the i/o aborts successfully. + */ + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_TGT); + return status; +} + +/** + * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine does host reset to the adaptor port. It brings the HBA + * offline, performs a board restart, and then brings the board back online. + * The lpfc_offline calls lpfc_sli_hba_down which will abort and local + * reject all outstanding SCSI commands to the host and error returned + * back to SCSI mid-level. As this will be SCSI mid-level's last resort + * of error handling, it will only return error if resetting of the adapter + * is not successful; in all other cases, will return success. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_host_reset_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int rc, ret = SUCCESS; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3172 SCSI layer issued Host Reset Data:\n"); + + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + lpfc_offline(phba); + rc = lpfc_sli_brdrestart(phba); + if (rc) + goto error; + + /* Wait for successful restart of adapter */ + if (phba->sli_rev < LPFC_SLI_REV4) { + rc = lpfc_sli_chipset_init(phba); + if (rc) + goto error; + } + + rc = lpfc_online(phba); + if (rc) + goto error; + + lpfc_unblock_mgmt_io(phba); + + return ret; +error: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "3323 Failed host reset\n"); + lpfc_unblock_mgmt_io(phba); + return FAILED; +} + +/** + * lpfc_slave_alloc - scsi_host_template slave_alloc entry point + * @sdev: Pointer to scsi_device. + * + * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's + * globally available list of scsi buffers. This routine also makes sure scsi + * buffer is not allocated more than HBA limit conveyed to midlayer. This list + * of scsi buffer exists for the lifetime of the driver. + * + * Return codes: + * non-0 - Error + * 0 - Success + **/ +static int +lpfc_slave_alloc(struct scsi_device *sdev) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; + struct lpfc_hba *phba = vport->phba; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + uint32_t total = 0; + uint32_t num_to_alloc = 0; + int num_allocated = 0; + uint32_t sdev_cnt; + struct lpfc_device_data *device_data; + unsigned long flags; + struct lpfc_name target_wwpn; + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + if (phba->cfg_fof) { + + /* + * Check to see if the device data structure for the lun + * exists. If not, create one. + */ + + u64_to_wwn(rport->port_name, target_wwpn.u.wwn); + spin_lock_irqsave(&phba->devicelock, flags); + device_data = __lpfc_get_device_data(phba, + &phba->luns, + &vport->fc_portname, + &target_wwpn, + sdev->lun); + if (!device_data) { + spin_unlock_irqrestore(&phba->devicelock, flags); + device_data = lpfc_create_device_data(phba, + &vport->fc_portname, + &target_wwpn, + sdev->lun, + phba->cfg_XLanePriority, + true); + if (!device_data) + return -ENOMEM; + spin_lock_irqsave(&phba->devicelock, flags); + list_add_tail(&device_data->listentry, &phba->luns); + } + device_data->rport_data = rport->dd_data; + device_data->available = true; + spin_unlock_irqrestore(&phba->devicelock, flags); + sdev->hostdata = device_data; + } else { + sdev->hostdata = rport->dd_data; + } + sdev_cnt = atomic_inc_return(&phba->sdev_cnt); + + /* For SLI4, all IO buffers are pre-allocated */ + if (phba->sli_rev == LPFC_SLI_REV4) + return 0; + + /* This code path is now ONLY for SLI3 adapters */ + + /* + * Populate the cmds_per_lun count scsi_bufs into this host's globally + * available list of scsi buffers. Don't allocate more than the + * HBA limit conveyed to the midlayer via the host structure. The + * formula accounts for the lun_queue_depth + error handlers + 1 + * extra. This list of scsi bufs exists for the lifetime of the driver. + */ + total = phba->total_scsi_bufs; + num_to_alloc = vport->cfg_lun_queue_depth + 2; + + /* If allocated buffers are enough do nothing */ + if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) + return 0; + + /* Allow some exchanges to be available always to complete discovery */ + if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "0704 At limitation of %d preallocated " + "command buffers\n", total); + return 0; + /* Allow some exchanges to be available always to complete discovery */ + } else if (total + num_to_alloc > + phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "0705 Allocation request of %d " + "command buffers will exceed max of %d. " + "Reducing allocation request to %d.\n", + num_to_alloc, phba->cfg_hba_queue_depth, + (phba->cfg_hba_queue_depth - total)); + num_to_alloc = phba->cfg_hba_queue_depth - total; + } + num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); + if (num_to_alloc != num_allocated) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); + } + if (num_allocated > 0) + phba->total_scsi_bufs += num_allocated; + return 0; +} + +/** + * lpfc_slave_configure - scsi_host_template slave_configure entry point + * @sdev: Pointer to scsi_device. + * + * This routine configures following items + * - Tag command queuing support for @sdev if supported. + * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. + * + * Return codes: + * 0 - Success + **/ +static int +lpfc_slave_configure(struct scsi_device *sdev) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; + struct lpfc_hba *phba = vport->phba; + + scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); + + if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); + if (phba->cfg_poll & DISABLE_FCP_RING_INT) + lpfc_poll_rearm_timer(phba); + } + + return 0; +} + +/** + * lpfc_slave_destroy - slave_destroy entry point of SHT data structure + * @sdev: Pointer to scsi_device. + * + * This routine sets @sdev hostatdata filed to null. + **/ +static void +lpfc_slave_destroy(struct scsi_device *sdev) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; + struct lpfc_hba *phba = vport->phba; + unsigned long flags; + struct lpfc_device_data *device_data = sdev->hostdata; + + atomic_dec(&phba->sdev_cnt); + if ((phba->cfg_fof) && (device_data)) { + spin_lock_irqsave(&phba->devicelock, flags); + device_data->available = false; + if (!device_data->oas_enabled) + lpfc_delete_device_data(phba, device_data); + spin_unlock_irqrestore(&phba->devicelock, flags); + } + sdev->hostdata = NULL; + return; +} + +/** + * lpfc_create_device_data - creates and initializes device data structure for OAS + * @phba: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * @pri: Priority + * @atomic_create: Flag to indicate if memory should be allocated using the + * GFP_ATOMIC flag or not. + * + * This routine creates a device data structure which will contain identifying + * information for the device (host wwpn, target wwpn, lun), state of OAS, + * whether or not the corresponding lun is available by the system, + * and pointer to the rport data. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun, + uint32_t pri, bool atomic_create) +{ + + struct lpfc_device_data *lun_info; + int memory_flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !(phba->cfg_fof)) + return NULL; + + /* Attempt to create the device data to contain lun info */ + + if (atomic_create) + memory_flags = GFP_ATOMIC; + else + memory_flags = GFP_KERNEL; + lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); + if (!lun_info) + return NULL; + INIT_LIST_HEAD(&lun_info->listentry); + lun_info->rport_data = NULL; + memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)); + lun_info->device_id.lun = lun; + lun_info->oas_enabled = false; + lun_info->priority = pri; + lun_info->available = false; + return lun_info; +} + +/** + * lpfc_delete_device_data - frees a device data structure for OAS + * @phba: Pointer to host bus adapter structure. + * @lun_info: Pointer to device data structure to free. + * + * This routine frees the previously allocated device data structure passed. + * + **/ +void +lpfc_delete_device_data(struct lpfc_hba *phba, + struct lpfc_device_data *lun_info) +{ + + if (unlikely(!phba) || !lun_info || + !(phba->cfg_fof)) + return; + + if (!list_empty(&lun_info->listentry)) + list_del(&lun_info->listentry); + mempool_free(lun_info, phba->device_data_mem_pool); + return; +} + +/** + * __lpfc_get_device_data - returns the device data for the specified lun + * @phba: Pointer to host bus adapter structure. + * @list: Point to list to search. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * + * This routine searches the list passed for the specified lun's device data. + * This function does not hold locks, it is the responsibility of the caller + * to ensure the proper lock is held before calling the function. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, + struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun) +{ + + struct lpfc_device_data *lun_info; + + if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return NULL; + + /* Check to see if the lun is already enabled for OAS. */ + + list_for_each_entry(lun_info, list, listentry) { + if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)) == 0) && + (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)) == 0) && + (lun_info->device_id.lun == lun)) + return lun_info; + } + + return NULL; +} + +/** + * lpfc_find_next_oas_lun - searches for the next oas lun + * @phba: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @starting_lun: Pointer to the lun to start searching for + * @found_vport_wwpn: Pointer to the found lun's vport wwpn information + * @found_target_wwpn: Pointer to the found lun's target wwpn information + * @found_lun: Pointer to the found lun. + * @found_lun_status: Pointer to status of the found lun. + * @found_lun_pri: Pointer to priority of the found lun. + * + * This routine searches the luns list for the specified lun + * or the first lun for the vport/target. If the vport wwpn contains + * a zero value then a specific vport is not specified. In this case + * any vport which contains the lun will be considered a match. If the + * target wwpn contains a zero value then a specific target is not specified. + * In this case any target which contains the lun will be considered a + * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status + * are returned. The function will also return the next lun if available. + * If the next lun is not found, starting_lun parameter will be set to + * NO_MORE_OAS_LUN. + * + * Return codes: + * non-0 - Error + * 0 - Success + **/ +bool +lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t *starting_lun, + struct lpfc_name *found_vport_wwpn, + struct lpfc_name *found_target_wwpn, + uint64_t *found_lun, + uint32_t *found_lun_status, + uint32_t *found_lun_pri) +{ + + unsigned long flags; + struct lpfc_device_data *lun_info; + struct lpfc_device_id *device_id; + uint64_t lun; + bool found = false; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !starting_lun || !found_vport_wwpn || + !found_target_wwpn || !found_lun || !found_lun_status || + (*starting_lun == NO_MORE_OAS_LUN) || + !phba->cfg_fof) + return false; + + lun = *starting_lun; + *found_lun = NO_MORE_OAS_LUN; + *starting_lun = NO_MORE_OAS_LUN; + + /* Search for lun or the lun closet in value */ + + spin_lock_irqsave(&phba->devicelock, flags); + list_for_each_entry(lun_info, &phba->luns, listentry) { + if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || + (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)) == 0)) && + ((wwn_to_u64(target_wwpn->u.wwn) == 0) || + (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)) == 0)) && + (lun_info->oas_enabled)) { + device_id = &lun_info->device_id; + if ((!found) && + ((lun == FIND_FIRST_OAS_LUN) || + (device_id->lun == lun))) { + *found_lun = device_id->lun; + memcpy(found_vport_wwpn, + &device_id->vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(found_target_wwpn, + &device_id->target_wwpn, + sizeof(struct lpfc_name)); + if (lun_info->available) + *found_lun_status = + OAS_LUN_STATUS_EXISTS; + else + *found_lun_status = 0; + *found_lun_pri = lun_info->priority; + if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) + memset(vport_wwpn, 0x0, + sizeof(struct lpfc_name)); + if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) + memset(target_wwpn, 0x0, + sizeof(struct lpfc_name)); + found = true; + } else if (found) { + *starting_lun = device_id->lun; + memcpy(vport_wwpn, &device_id->vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(target_wwpn, &device_id->target_wwpn, + sizeof(struct lpfc_name)); + break; + } + } + } + spin_unlock_irqrestore(&phba->devicelock, flags); + return found; +} + +/** + * lpfc_enable_oas_lun - enables a lun for OAS operations + * @phba: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * @pri: Priority + * + * This routine enables a lun for oas operations. The routines does so by + * doing the following : + * + * 1) Checks to see if the device data for the lun has been created. + * 2) If found, sets the OAS enabled flag if not set and returns. + * 3) Otherwise, creates a device data structure. + * 4) If successfully created, indicates the device data is for an OAS lun, + * indicates the lun is not available and add to the list of luns. + * + * Return codes: + * false - Error + * true - Success + **/ +bool +lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) +{ + + struct lpfc_device_data *lun_info; + unsigned long flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return false; + + spin_lock_irqsave(&phba->devicelock, flags); + + /* Check to see if the device data for the lun has been created */ + lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, + target_wwpn, lun); + if (lun_info) { + if (!lun_info->oas_enabled) + lun_info->oas_enabled = true; + lun_info->priority = pri; + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + + /* Create an lun info structure and add to list of luns */ + lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, + pri, true); + if (lun_info) { + lun_info->oas_enabled = true; + lun_info->priority = pri; + lun_info->available = false; + list_add_tail(&lun_info->listentry, &phba->luns); + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + spin_unlock_irqrestore(&phba->devicelock, flags); + return false; +} + +/** + * lpfc_disable_oas_lun - disables a lun for OAS operations + * @phba: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * @pri: Priority + * + * This routine disables a lun for oas operations. The routines does so by + * doing the following : + * + * 1) Checks to see if the device data for the lun is created. + * 2) If present, clears the flag indicating this lun is for OAS. + * 3) If the lun is not available by the system, the device data is + * freed. + * + * Return codes: + * false - Error + * true - Success + **/ +bool +lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) +{ + + struct lpfc_device_data *lun_info; + unsigned long flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return false; + + spin_lock_irqsave(&phba->devicelock, flags); + + /* Check to see if the lun is available. */ + lun_info = __lpfc_get_device_data(phba, + &phba->luns, vport_wwpn, + target_wwpn, lun); + if (lun_info) { + lun_info->oas_enabled = false; + lun_info->priority = pri; + if (!lun_info->available) + lpfc_delete_device_data(phba, lun_info); + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + + spin_unlock_irqrestore(&phba->devicelock, flags); + return false; +} + +static int +lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) +{ + return SCSI_MLQUEUE_HOST_BUSY; +} + +static int +lpfc_no_slave(struct scsi_device *sdev) +{ + return -ENODEV; +} + +struct scsi_host_template lpfc_template_nvme = { + .module = THIS_MODULE, + .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, + .info = lpfc_info, + .queuecommand = lpfc_no_command, + .slave_alloc = lpfc_no_slave, + .slave_configure = lpfc_no_slave, + .scan_finished = lpfc_scan_finished, + .this_id = -1, + .sg_tablesize = 1, + .cmd_per_lun = 1, + .shost_groups = lpfc_hba_groups, + .max_sectors = 0xFFFFFFFF, + .vendor_id = LPFC_NL_VENDOR_ID, + .track_queue_depth = 0, +}; + +struct scsi_host_template lpfc_template = { + .module = THIS_MODULE, + .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, + .info = lpfc_info, + .queuecommand = lpfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_should_retry_cmd = fc_eh_should_retry_cmd, + .eh_abort_handler = lpfc_abort_handler, + .eh_device_reset_handler = lpfc_device_reset_handler, + .eh_target_reset_handler = lpfc_target_reset_handler, + .eh_host_reset_handler = lpfc_host_reset_handler, + .slave_alloc = lpfc_slave_alloc, + .slave_configure = lpfc_slave_configure, + .slave_destroy = lpfc_slave_destroy, + .scan_finished = lpfc_scan_finished, + .this_id = -1, + .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, + .cmd_per_lun = LPFC_CMD_PER_LUN, + .shost_groups = lpfc_hba_groups, + .max_sectors = 0xFFFFFFFF, + .vendor_id = LPFC_NL_VENDOR_ID, + .change_queue_depth = scsi_change_queue_depth, + .track_queue_depth = 1, +}; + +struct scsi_host_template lpfc_vport_template = { + .module = THIS_MODULE, + .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, + .info = lpfc_info, + .queuecommand = lpfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_should_retry_cmd = fc_eh_should_retry_cmd, + .eh_abort_handler = lpfc_abort_handler, + .eh_device_reset_handler = lpfc_device_reset_handler, + .eh_target_reset_handler = lpfc_target_reset_handler, + .eh_bus_reset_handler = NULL, + .eh_host_reset_handler = NULL, + .slave_alloc = lpfc_slave_alloc, + .slave_configure = lpfc_slave_configure, + .slave_destroy = lpfc_slave_destroy, + .scan_finished = lpfc_scan_finished, + .this_id = -1, + .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, + .cmd_per_lun = LPFC_CMD_PER_LUN, + .shost_groups = lpfc_vport_groups, + .max_sectors = 0xFFFFFFFF, + .vendor_id = 0, + .change_queue_depth = scsi_change_queue_depth, + .track_queue_depth = 1, +}; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h new file mode 100644 index 000000000..eae56944f --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -0,0 +1,149 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include + +struct lpfc_hba; +#define LPFC_FCP_CDB_LEN 16 + +#define list_remove_head(list, entry, type, member) \ + do { \ + entry = NULL; \ + if (!list_empty(list)) { \ + entry = list_entry((list)->next, type, member); \ + list_del_init(&entry->member); \ + } \ + } while(0) + +#define list_get_first(list, type, member) \ + (list_empty(list)) ? NULL : \ + list_entry((list)->next, type, member) + +/* per-port data that is allocated in the FC transport for us */ +struct lpfc_rport_data { + struct lpfc_nodelist *pnode; /* Pointer to the node structure. */ +}; + +struct lpfc_device_id { + struct lpfc_name vport_wwpn; + struct lpfc_name target_wwpn; + uint64_t lun; +}; + +struct lpfc_device_data { + struct list_head listentry; + struct lpfc_rport_data *rport_data; + struct lpfc_device_id device_id; + uint8_t priority; + bool oas_enabled; + bool available; +}; + +struct fcp_rsp { + uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */ + uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */ + + uint8_t rspStatus0; /* FCP_STATUS byte 0 (reserved) */ + uint8_t rspStatus1; /* FCP_STATUS byte 1 (reserved) */ + uint8_t rspStatus2; /* FCP_STATUS byte 2 field validity */ +#define RSP_LEN_VALID 0x01 /* bit 0 */ +#define SNS_LEN_VALID 0x02 /* bit 1 */ +#define RESID_OVER 0x04 /* bit 2 */ +#define RESID_UNDER 0x08 /* bit 3 */ + uint8_t rspStatus3; /* FCP_STATUS byte 3 SCSI status byte */ + + uint32_t rspResId; /* Residual xfer if residual count field set in + fcpStatus2 */ + /* Received in Big Endian format */ + uint32_t rspSnsLen; /* Length of sense data in fcpSnsInfo */ + /* Received in Big Endian format */ + uint32_t rspRspLen; /* Length of FCP response data in fcpRspInfo */ + /* Received in Big Endian format */ + + uint8_t rspInfo0; /* FCP_RSP_INFO byte 0 (reserved) */ + uint8_t rspInfo1; /* FCP_RSP_INFO byte 1 (reserved) */ + uint8_t rspInfo2; /* FCP_RSP_INFO byte 2 (reserved) */ + uint8_t rspInfo3; /* FCP_RSP_INFO RSP_CODE byte 3 */ + +#define RSP_NO_FAILURE 0x00 +#define RSP_DATA_BURST_ERR 0x01 +#define RSP_CMD_FIELD_ERR 0x02 +#define RSP_RO_MISMATCH_ERR 0x03 +#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */ +#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */ +#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */ + + uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */ + + uint8_t rspSnsInfo[128]; +#define SNS_ILLEGAL_REQ 0x05 /* sense key is byte 3 ([2]) */ +#define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */ +}; + +struct fcp_cmnd { + struct scsi_lun fcp_lun; + + uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ + uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ +#define SIMPLE_Q 0x00 +#define HEAD_OF_Q 0x01 +#define ORDERED_Q 0x02 +#define ACA_Q 0x04 +#define UNTAGGED 0x05 + uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */ +#define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */ +#define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */ +#define FCP_BUS_RESET 0x08 /* bit 3 */ +#define FCP_LUN_RESET 0x10 /* bit 4 */ +#define FCP_TARGET_RESET 0x20 /* bit 5 */ +#define FCP_CLEAR_ACA 0x40 /* bit 6 */ +#define FCP_TERMINATE_TASK 0x80 /* bit 7 */ + uint8_t fcpCntl3; +#define WRITE_DATA 0x01 /* Bit 0 */ +#define READ_DATA 0x02 /* Bit 1 */ + + uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */ + uint32_t fcpDl; /* Total transfer length */ + +}; + +#define LPFC_SCSI_DMA_EXT_SIZE 264 +#define LPFC_BPL_SIZE 1024 +#define MDAC_DIRECT_CMD 0x22 + +#define FIND_FIRST_OAS_LUN 0 +#define NO_MORE_OAS_LUN -1 +#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN + +#ifndef FC_PORTSPEED_128GBIT +#define FC_PORTSPEED_128GBIT 0x2000 +#endif + +#ifndef FC_PORTSPEED_256GBIT +#define FC_PORTSPEED_256GBIT 0x4000 +#endif + +#define TXRDY_PAYLOAD_LEN 12 + +/* For sysfs/debugfs tmp string max len */ +#define LPFC_MAX_SCSI_INFO_TMP_LEN 79 + diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c new file mode 100644 index 000000000..4dfadf254 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -0,0 +1,22736 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_X86 +#include +#endif + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_crtn.h" +#include "lpfc_logmsg.h" +#include "lpfc_compat.h" +#include "lpfc_debugfs.h" +#include "lpfc_vport.h" +#include "lpfc_version.h" + +/* There are only four IOCB completion types. */ +typedef enum _lpfc_iocb_type { + LPFC_UNKNOWN_IOCB, + LPFC_UNSOL_IOCB, + LPFC_SOL_IOCB, + LPFC_ABORT_IOCB +} lpfc_iocb_type; + + +/* Provide function prototypes local to this module. */ +static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, + uint32_t); +static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, + uint8_t *, uint32_t *); +static struct lpfc_iocbq * +lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, + struct lpfc_iocbq *rspiocbq); +static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, + struct hbq_dmabuf *); +static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf); +static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, struct lpfc_cqe *cqe); +static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, + int); +static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, + struct lpfc_queue *eq, + struct lpfc_eqe *eqe, + enum lpfc_poll_mode poll_mode); +static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); +static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); +static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); +static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, + struct lpfc_cqe *cqe); +static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, + struct lpfc_iocbq *pwqeq, + struct lpfc_sglq *sglq); + +union lpfc_wqe128 lpfc_iread_cmd_template; +union lpfc_wqe128 lpfc_iwrite_cmd_template; +union lpfc_wqe128 lpfc_icmnd_cmd_template; + +/* Setup WQE templates for IOs */ +void lpfc_wqe_cmd_template(void) +{ + union lpfc_wqe128 *wqe; + + /* IREAD template */ + wqe = &lpfc_iread_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 - cmd_buff_len, payload_offset_len is zero */ + + /* Word 4 - total_xfer_len is variable */ + + /* Word 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); + bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); + bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag is variable */ + + /* Word 10 - dbde, wqes is variable */ + bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); + bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); + bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); + + /* Word 11 - pbde is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN); + bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); + + /* Word 12 - is zero */ + + /* Word 13, 14, 15 - PBDE is variable */ + + /* IWRITE template */ + wqe = &lpfc_iwrite_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 - cmd_buff_len, payload_offset_len is zero */ + + /* Word 4 - total_xfer_len is variable */ + + /* Word 5 - initial_xfer_len is variable */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE); + bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK); + bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI); + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag is variable */ + + /* Word 10 - dbde, wqes is variable */ + bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); + bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); + bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); + + /* Word 11 - pbde is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT); + bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); + + /* Word 12 - is zero */ + + /* Word 13, 14, 15 - PBDE is variable */ + + /* ICMND template */ + wqe = &lpfc_icmnd_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 - payload_offset_len is variable */ + + /* Word 4, 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE); + bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); + bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI); + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag is variable */ + + /* Word 10 - dbde, wqes is variable */ + bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE); + bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); + bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN); + bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0); + + /* Word 12, 13, 14, 15 - is zero */ +} + +#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) +/** + * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function + * @srcp: Source memory pointer. + * @destp: Destination memory pointer. + * @cnt: Number of words required to be copied. + * Must be a multiple of sizeof(uint64_t) + * + * This function is used for copying data between driver memory + * and the SLI WQ. This function also changes the endianness + * of each word if native endianness is different from SLI + * endianness. This function can be called with or without + * lock. + **/ +static void +lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) +{ + uint64_t *src = srcp; + uint64_t *dest = destp; + int i; + + for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) + *dest++ = *src++; +} +#else +#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) +#endif + +/** + * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue + * @q: The Work Queue to operate on. + * @wqe: The work Queue Entry to put on the Work queue. + * + * This routine will copy the contents of @wqe to the next available entry on + * the @q. This function will then ring the Work Queue Doorbell to signal the + * HBA to start processing the Work Queue Entry. This function returns 0 if + * successful. If no entries are available on @q then this function will return + * -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static int +lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) +{ + union lpfc_wqe *temp_wqe; + struct lpfc_register doorbell; + uint32_t host_index; + uint32_t idx; + uint32_t i = 0; + uint8_t *tmp; + u32 if_type; + + /* sanity check on queue memory */ + if (unlikely(!q)) + return -ENOMEM; + + temp_wqe = lpfc_sli4_qe(q, q->host_index); + + /* If the host has not yet processed the next entry then we are done */ + idx = ((q->host_index + 1) % q->entry_count); + if (idx == q->hba_index) { + q->WQ_overflow++; + return -EBUSY; + } + q->WQ_posted++; + /* set consumption flag every once in a while */ + if (!((q->host_index + 1) % q->notify_interval)) + bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); + else + bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); + if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) + bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); + lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); + if (q->dpp_enable && q->phba->cfg_enable_dpp) { + /* write to DPP aperture taking advatage of Combined Writes */ + tmp = (uint8_t *)temp_wqe; +#ifdef __raw_writeq + for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) + __raw_writeq(*((uint64_t *)(tmp + i)), + q->dpp_regaddr + i); +#else + for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) + __raw_writel(*((uint32_t *)(tmp + i)), + q->dpp_regaddr + i); +#endif + } + /* ensure WQE bcopy and DPP flushed before doorbell write */ + wmb(); + + /* Update the host index before invoking device */ + host_index = q->host_index; + + q->host_index = idx; + + /* Ring Doorbell */ + doorbell.word0 = 0; + if (q->db_format == LPFC_DB_LIST_FORMAT) { + if (q->dpp_enable && q->phba->cfg_enable_dpp) { + bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); + bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); + bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, + q->dpp_id); + bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, + q->queue_id); + } else { + bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); + bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); + + /* Leave bits <23:16> clear for if_type 6 dpp */ + if_type = bf_get(lpfc_sli_intf_if_type, + &q->phba->sli4_hba.sli_intf); + if (if_type != LPFC_SLI_INTF_IF_TYPE_6) + bf_set(lpfc_wq_db_list_fm_index, &doorbell, + host_index); + } + } else if (q->db_format == LPFC_DB_RING_FORMAT) { + bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); + bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); + } else { + return -EINVAL; + } + writel(doorbell.word0, q->db_regaddr); + + return 0; +} + +/** + * lpfc_sli4_wq_release - Updates internal hba index for WQ + * @q: The Work Queue to operate on. + * @index: The index to advance the hba index to. + * + * This routine will update the HBA index of a queue to reflect consumption of + * Work Queue Entries by the HBA. When the HBA indicates that it has consumed + * an entry the host calls this function to update the queue's internal + * pointers. + **/ +static void +lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) +{ + /* sanity check on queue memory */ + if (unlikely(!q)) + return; + + q->hba_index = index; +} + +/** + * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue + * @q: The Mailbox Queue to operate on. + * @mqe: The Mailbox Queue Entry to put on the Work queue. + * + * This routine will copy the contents of @mqe to the next available entry on + * the @q. This function will then ring the Work Queue Doorbell to signal the + * HBA to start processing the Work Queue Entry. This function returns 0 if + * successful. If no entries are available on @q then this function will return + * -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static uint32_t +lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) +{ + struct lpfc_mqe *temp_mqe; + struct lpfc_register doorbell; + + /* sanity check on queue memory */ + if (unlikely(!q)) + return -ENOMEM; + temp_mqe = lpfc_sli4_qe(q, q->host_index); + + /* If the host has not yet processed the next entry then we are done */ + if (((q->host_index + 1) % q->entry_count) == q->hba_index) + return -ENOMEM; + lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); + /* Save off the mailbox pointer for completion */ + q->phba->mbox = (MAILBOX_t *)temp_mqe; + + /* Update the host index before invoking device */ + q->host_index = ((q->host_index + 1) % q->entry_count); + + /* Ring Doorbell */ + doorbell.word0 = 0; + bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); + bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); + return 0; +} + +/** + * lpfc_sli4_mq_release - Updates internal hba index for MQ + * @q: The Mailbox Queue to operate on. + * + * This routine will update the HBA index of a queue to reflect consumption of + * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed + * an entry the host calls this function to update the queue's internal + * pointers. This routine returns the number of entries that were consumed by + * the HBA. + **/ +static uint32_t +lpfc_sli4_mq_release(struct lpfc_queue *q) +{ + /* sanity check on queue memory */ + if (unlikely(!q)) + return 0; + + /* Clear the mailbox pointer for completion */ + q->phba->mbox = NULL; + q->hba_index = ((q->hba_index + 1) % q->entry_count); + return 1; +} + +/** + * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ + * @q: The Event Queue to get the first valid EQE from + * + * This routine will get the first valid Event Queue Entry from @q, update + * the queue's internal hba index, and return the EQE. If no valid EQEs are in + * the Queue (no more work to do), or the Queue is full of EQEs that have been + * processed, but not popped back to the HBA then this routine will return NULL. + **/ +static struct lpfc_eqe * +lpfc_sli4_eq_get(struct lpfc_queue *q) +{ + struct lpfc_eqe *eqe; + + /* sanity check on queue memory */ + if (unlikely(!q)) + return NULL; + eqe = lpfc_sli4_qe(q, q->host_index); + + /* If the next EQE is not valid then we are done */ + if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) + return NULL; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Speculative instructions were allowing a bcopy at the start + * of lpfc_sli4_fp_handle_wcqe(), which is called immediately + * after our return, to copy data before the valid bit check above + * was done. As such, some of the copied data was stale. The barrier + * ensures the check is before any data is copied. + */ + mb(); + return eqe; +} + +/** + * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ + * @q: The Event Queue to disable interrupts + * + **/ +void +lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) +{ + struct lpfc_register doorbell; + + doorbell.word0 = 0; + bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); + bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, + (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); + bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); +} + +/** + * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ + * @q: The Event Queue to disable interrupts + * + **/ +void +lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) +{ + struct lpfc_register doorbell; + + doorbell.word0 = 0; + bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); +} + +/** + * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state + * @phba: adapter with EQ + * @q: The Event Queue that the host has completed processing for. + * @count: Number of elements that have been consumed + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will notify the HBA, by ringing the doorbell, that count + * number of EQEs have been processed. The @arm parameter indicates whether + * the queue should be rearmed when ringing the doorbell. + **/ +void +lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) +{ + struct lpfc_register doorbell; + + /* sanity check on queue memory */ + if (unlikely(!q || (count == 0 && !arm))) + return; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) { + bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); + } + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); + bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, + (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); + bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); + /* PCI read to flush PCI pipeline on re-arming for INTx mode */ + if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) + readl(q->phba->sli4_hba.EQDBregaddr); +} + +/** + * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state + * @phba: adapter with EQ + * @q: The Event Queue that the host has completed processing for. + * @count: Number of elements that have been consumed + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will notify the HBA, by ringing the doorbell, that count + * number of EQEs have been processed. The @arm parameter indicates whether + * the queue should be rearmed when ringing the doorbell. + **/ +void +lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) +{ + struct lpfc_register doorbell; + + /* sanity check on queue memory */ + if (unlikely(!q || (count == 0 && !arm))) + return; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) + bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); + bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); + /* PCI read to flush PCI pipeline on re-arming for INTx mode */ + if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) + readl(q->phba->sli4_hba.EQDBregaddr); +} + +static void +__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, + struct lpfc_eqe *eqe) +{ + if (!phba->sli4_hba.pc_sli4_params.eqav) + bf_set_le32(lpfc_eqe_valid, eqe, 0); + + eq->host_index = ((eq->host_index + 1) % eq->entry_count); + + /* if the index wrapped around, toggle the valid bit */ + if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) + eq->qe_valid = (eq->qe_valid) ? 0 : 1; +} + +static void +lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + struct lpfc_eqe *eqe = NULL; + u32 eq_count = 0, cq_count = 0; + struct lpfc_cqe *cqe = NULL; + struct lpfc_queue *cq = NULL, *childq = NULL; + int cqid = 0; + + /* walk all the EQ entries and drop on the floor */ + eqe = lpfc_sli4_eq_get(eq); + while (eqe) { + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + cq = NULL; + + list_for_each_entry(childq, &eq->child_list, list) { + if (childq->queue_id == cqid) { + cq = childq; + break; + } + } + /* If CQ is valid, iterate through it and drop all the CQEs */ + if (cq) { + cqe = lpfc_sli4_cq_get(cq); + while (cqe) { + __lpfc_sli4_consume_cqe(phba, cq, cqe); + cq_count++; + cqe = lpfc_sli4_cq_get(cq); + } + /* Clear and re-arm the CQ */ + phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, + LPFC_QUEUE_REARM); + cq_count = 0; + } + __lpfc_sli4_consume_eqe(phba, eq, eqe); + eq_count++; + eqe = lpfc_sli4_eq_get(eq); + } + + /* Clear and re-arm the EQ */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); +} + +static int +lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, + u8 rearm, enum lpfc_poll_mode poll_mode) +{ + struct lpfc_eqe *eqe; + int count = 0, consumed = 0; + + if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) + goto rearm_and_exit; + + eqe = lpfc_sli4_eq_get(eq); + while (eqe) { + lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode); + __lpfc_sli4_consume_eqe(phba, eq, eqe); + + consumed++; + if (!(++count % eq->max_proc_limit)) + break; + + if (!(count % eq->notify_interval)) { + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, + LPFC_QUEUE_NOARM); + consumed = 0; + } + + eqe = lpfc_sli4_eq_get(eq); + } + eq->EQ_processed += count; + + /* Track the max number of EQEs processed in 1 intr */ + if (count > eq->EQ_max_eqe) + eq->EQ_max_eqe = count; + + xchg(&eq->queue_claimed, 0); + +rearm_and_exit: + /* Always clear the EQ. */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); + + return count; +} + +/** + * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ + * @q: The Completion Queue to get the first valid CQE from + * + * This routine will get the first valid Completion Queue Entry from @q, update + * the queue's internal hba index, and return the CQE. If no valid CQEs are in + * the Queue (no more work to do), or the Queue is full of CQEs that have been + * processed, but not popped back to the HBA then this routine will return NULL. + **/ +static struct lpfc_cqe * +lpfc_sli4_cq_get(struct lpfc_queue *q) +{ + struct lpfc_cqe *cqe; + + /* sanity check on queue memory */ + if (unlikely(!q)) + return NULL; + cqe = lpfc_sli4_qe(q, q->host_index); + + /* If the next CQE is not valid then we are done */ + if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) + return NULL; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Given what was seen in lpfc_sli4_cq_get() of speculative + * instructions allowing action on content before valid bit checked, + * add barrier here as well. May not be needed as "content" is a + * single 32-bit entity here (vs multi word structure for cq's). + */ + mb(); + return cqe; +} + +static void +__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + if (!phba->sli4_hba.pc_sli4_params.cqav) + bf_set_le32(lpfc_cqe_valid, cqe, 0); + + cq->host_index = ((cq->host_index + 1) % cq->entry_count); + + /* if the index wrapped around, toggle the valid bit */ + if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) + cq->qe_valid = (cq->qe_valid) ? 0 : 1; +} + +/** + * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. + * @phba: the adapter with the CQ + * @q: The Completion Queue that the host has completed processing for. + * @count: the number of elements that were consumed + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will notify the HBA, by ringing the doorbell, that the + * CQEs have been processed. The @arm parameter specifies whether the + * queue should be rearmed when ringing the doorbell. + **/ +void +lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) +{ + struct lpfc_register doorbell; + + /* sanity check on queue memory */ + if (unlikely(!q || (count == 0 && !arm))) + return; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) + bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); + bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, + (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); + bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); +} + +/** + * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. + * @phba: the adapter with the CQ + * @q: The Completion Queue that the host has completed processing for. + * @count: the number of elements that were consumed + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will notify the HBA, by ringing the doorbell, that the + * CQEs have been processed. The @arm parameter specifies whether the + * queue should be rearmed when ringing the doorbell. + **/ +void +lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) +{ + struct lpfc_register doorbell; + + /* sanity check on queue memory */ + if (unlikely(!q || (count == 0 && !arm))) + return; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) + bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); + bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); +} + +/* + * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue + * + * This routine will copy the contents of @wqe to the next available entry on + * the @q. This function will then ring the Receive Queue Doorbell to signal the + * HBA to start processing the Receive Queue Entry. This function returns the + * index that the rqe was copied to if successful. If no entries are available + * on @q then this function will return -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +int +lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, + struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) +{ + struct lpfc_rqe *temp_hrqe; + struct lpfc_rqe *temp_drqe; + struct lpfc_register doorbell; + int hq_put_index; + int dq_put_index; + + /* sanity check on queue memory */ + if (unlikely(!hq) || unlikely(!dq)) + return -ENOMEM; + hq_put_index = hq->host_index; + dq_put_index = dq->host_index; + temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); + temp_drqe = lpfc_sli4_qe(dq, dq_put_index); + + if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) + return -EINVAL; + if (hq_put_index != dq_put_index) + return -EINVAL; + /* If the host has not yet processed the next entry then we are done */ + if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) + return -EBUSY; + lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); + lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); + + /* Update the host index to point to the next slot */ + hq->host_index = ((hq_put_index + 1) % hq->entry_count); + dq->host_index = ((dq_put_index + 1) % dq->entry_count); + hq->RQ_buf_posted++; + + /* Ring The Header Receive Queue Doorbell */ + if (!(hq->host_index % hq->notify_interval)) { + doorbell.word0 = 0; + if (hq->db_format == LPFC_DB_RING_FORMAT) { + bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, + hq->notify_interval); + bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); + } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { + bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, + hq->notify_interval); + bf_set(lpfc_rq_db_list_fm_index, &doorbell, + hq->host_index); + bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); + } else { + return -EINVAL; + } + writel(doorbell.word0, hq->db_regaddr); + } + return hq_put_index; +} + +/* + * lpfc_sli4_rq_release - Updates internal hba index for RQ + * + * This routine will update the HBA index of a queue to reflect consumption of + * one Receive Queue Entry by the HBA. When the HBA indicates that it has + * consumed an entry the host calls this function to update the queue's + * internal pointers. This routine returns the number of entries that were + * consumed by the HBA. + **/ +static uint32_t +lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) +{ + /* sanity check on queue memory */ + if (unlikely(!hq) || unlikely(!dq)) + return 0; + + if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) + return 0; + hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); + dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); + return 1; +} + +/** + * lpfc_cmd_iocb - Get next command iocb entry in the ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function returns pointer to next command iocb entry + * in the command ring. The caller must hold hbalock to prevent + * other threads consume the next command iocb. + * SLI-2/SLI-3 provide different sized iocbs. + **/ +static inline IOCB_t * +lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + + pring->sli.sli3.cmdidx * phba->iocb_cmd_size); +} + +/** + * lpfc_resp_iocb - Get next response iocb entry in the ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function returns pointer to next response iocb entry + * in the response ring. The caller must hold hbalock to make sure + * that no other thread consume the next response iocb. + * SLI-2/SLI-3 provide different sized iocbs. + **/ +static inline IOCB_t * +lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + + pring->sli.sli3.rspidx * phba->iocb_rsp_size); +} + +/** + * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool + * @phba: Pointer to HBA context object. + * + * This function is called with hbalock held. This function + * allocates a new driver iocb object from the iocb pool. If the + * allocation is successful, it returns pointer to the newly + * allocated iocb object else it returns NULL. + **/ +struct lpfc_iocbq * +__lpfc_sli_get_iocbq(struct lpfc_hba *phba) +{ + struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; + struct lpfc_iocbq * iocbq = NULL; + + lockdep_assert_held(&phba->hbalock); + + list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); + if (iocbq) + phba->iocb_cnt++; + if (phba->iocb_cnt > phba->iocb_max) + phba->iocb_max = phba->iocb_cnt; + return iocbq; +} + +/** + * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function clears the sglq pointer from the array of active + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +struct lpfc_sglq * +__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + struct lpfc_sglq *sglq; + + sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; + phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; + return sglq; +} + +/** + * __lpfc_get_active_sglq - Get the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function returns the sglq pointer from the array of active + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +struct lpfc_sglq * +__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + struct lpfc_sglq *sglq; + + sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; + return sglq; +} + +/** + * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. + * @phba: Pointer to HBA context object. + * @xritag: xri used in this exchange. + * @rrq: The RRQ to be cleared. + * + **/ +void +lpfc_clr_rrq_active(struct lpfc_hba *phba, + uint16_t xritag, + struct lpfc_node_rrq *rrq) +{ + struct lpfc_nodelist *ndlp = NULL; + + /* Lookup did to verify if did is still active on this vport */ + if (rrq->vport) + ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); + + if (!ndlp) + goto out; + + if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { + rrq->send_rrq = 0; + rrq->xritag = 0; + rrq->rrq_stop_time = 0; + } +out: + mempool_free(rrq, phba->rrq_pool); +} + +/** + * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. + * @phba: Pointer to HBA context object. + * + * This function is called with hbalock held. This function + * Checks if stop_time (ratov from setting rrq active) has + * been reached, if it has and the send_rrq flag is set then + * it will call lpfc_send_rrq. If the send_rrq flag is not set + * then it will just call the routine to clear the rrq and + * free the rrq resource. + * The timer is set to the next rrq that is going to expire before + * leaving the routine. + * + **/ +void +lpfc_handle_rrq_active(struct lpfc_hba *phba) +{ + struct lpfc_node_rrq *rrq; + struct lpfc_node_rrq *nextrrq; + unsigned long next_time; + unsigned long iflags; + LIST_HEAD(send_rrq); + + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + list_for_each_entry_safe(rrq, nextrrq, + &phba->active_rrq_list, list) { + if (time_after(jiffies, rrq->rrq_stop_time)) + list_move(&rrq->list, &send_rrq); + else if (time_before(rrq->rrq_stop_time, next_time)) + next_time = rrq->rrq_stop_time; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + if ((!list_empty(&phba->active_rrq_list)) && + (!(phba->pport->load_flag & FC_UNLOADING))) + mod_timer(&phba->rrq_tmr, next_time); + list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { + list_del(&rrq->list); + if (!rrq->send_rrq) { + /* this call will free the rrq */ + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + } else if (lpfc_send_rrq(phba, rrq)) { + /* if we send the rrq then the completion handler + * will clear the bit in the xribitmap. + */ + lpfc_clr_rrq_active(phba, rrq->xritag, + rrq); + } + } +} + +/** + * lpfc_get_active_rrq - Get the active RRQ for this exchange. + * @vport: Pointer to vport context object. + * @xri: The xri used in the exchange. + * @did: The targets DID for this exchange. + * + * returns NULL = rrq not found in the phba->active_rrq_list. + * rrq = rrq for this xri and target. + **/ +struct lpfc_node_rrq * +lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_node_rrq *rrq; + struct lpfc_node_rrq *nextrrq; + unsigned long iflags; + + if (phba->sli_rev != LPFC_SLI_REV4) + return NULL; + spin_lock_irqsave(&phba->hbalock, iflags); + list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { + if (rrq->vport == vport && rrq->xritag == xri && + rrq->nlp_DID == did){ + list_del(&rrq->list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return rrq; + } + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + return NULL; +} + +/** + * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. + * @vport: Pointer to vport context object. + * @ndlp: Pointer to the lpfc_node_list structure. + * If ndlp is NULL Remove all active RRQs for this vport from the + * phba->active_rrq_list and clear the rrq. + * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. + **/ +void +lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) + +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_node_rrq *rrq; + struct lpfc_node_rrq *nextrrq; + unsigned long iflags; + LIST_HEAD(rrq_list); + + if (phba->sli_rev != LPFC_SLI_REV4) + return; + if (!ndlp) { + lpfc_sli4_vport_delete_els_xri_aborted(vport); + lpfc_sli4_vport_delete_fcp_xri_aborted(vport); + } + spin_lock_irqsave(&phba->hbalock, iflags); + list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { + if (rrq->vport != vport) + continue; + + if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID)) + list_move(&rrq->list, &rrq_list); + + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + + list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { + list_del(&rrq->list); + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + } +} + +/** + * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. + * @phba: Pointer to HBA context object. + * @ndlp: Targets nodelist pointer for this exchange. + * @xritag: the xri in the bitmap to test. + * + * This function returns: + * 0 = rrq not active for this xri + * 1 = rrq is valid for this xri. + **/ +int +lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + uint16_t xritag) +{ + if (!ndlp) + return 0; + if (!ndlp->active_rrqs_xri_bitmap) + return 0; + if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) + return 1; + else + return 0; +} + +/** + * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. + * @phba: Pointer to HBA context object. + * @ndlp: nodelist pointer for this target. + * @xritag: xri used in this exchange. + * @rxid: Remote Exchange ID. + * @send_rrq: Flag used to determine if we should send rrq els cmd. + * + * This function takes the hbalock. + * The active bit is always set in the active rrq xri_bitmap even + * if there is no slot avaiable for the other rrq information. + * + * returns 0 rrq actived for this xri + * < 0 No memory or invalid ndlp. + **/ +int +lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + uint16_t xritag, uint16_t rxid, uint16_t send_rrq) +{ + unsigned long iflags; + struct lpfc_node_rrq *rrq; + int empty; + + if (!ndlp) + return -EINVAL; + + if (!phba->cfg_enable_rrq) + return -EINVAL; + + spin_lock_irqsave(&phba->hbalock, iflags); + if (phba->pport->load_flag & FC_UNLOADING) { + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + goto out; + } + + if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) + goto out; + + if (!ndlp->active_rrqs_xri_bitmap) + goto out; + + if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) + goto out; + + spin_unlock_irqrestore(&phba->hbalock, iflags); + rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC); + if (!rrq) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" + " DID:0x%x Send:%d\n", + xritag, rxid, ndlp->nlp_DID, send_rrq); + return -EINVAL; + } + if (phba->cfg_enable_rrq == 1) + rrq->send_rrq = send_rrq; + else + rrq->send_rrq = 0; + rrq->xritag = xritag; + rrq->rrq_stop_time = jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + rrq->nlp_DID = ndlp->nlp_DID; + rrq->vport = ndlp->vport; + rrq->rxid = rxid; + spin_lock_irqsave(&phba->hbalock, iflags); + empty = list_empty(&phba->active_rrq_list); + list_add_tail(&rrq->list, &phba->active_rrq_list); + phba->hba_flag |= HBA_RRQ_ACTIVE; + if (empty) + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return 0; +out: + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2921 Can't set rrq active xri:0x%x rxid:0x%x" + " DID:0x%x Send:%d\n", + xritag, rxid, ndlp->nlp_DID, send_rrq); + return -EINVAL; +} + +/** + * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool + * @phba: Pointer to HBA context object. + * @piocbq: Pointer to the iocbq. + * + * The driver calls this function with either the nvme ls ring lock + * or the fc els ring lock held depending on the iocb usage. This function + * gets a new driver sglq object from the sglq list. If the list is not empty + * then it is successful, it returns pointer to the newly allocated sglq + * object else it returns NULL. + **/ +static struct lpfc_sglq * +__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) +{ + struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; + struct lpfc_sglq *sglq = NULL; + struct lpfc_sglq *start_sglq = NULL; + struct lpfc_io_buf *lpfc_cmd; + struct lpfc_nodelist *ndlp; + int found = 0; + u8 cmnd; + + cmnd = get_job_cmnd(phba, piocbq); + + if (piocbq->cmd_flag & LPFC_IO_FCP) { + lpfc_cmd = piocbq->io_buf; + ndlp = lpfc_cmd->rdata->pnode; + } else if ((cmnd == CMD_GEN_REQUEST64_CR) && + !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) { + ndlp = piocbq->ndlp; + } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) { + if (piocbq->cmd_flag & LPFC_IO_LOOPBACK) + ndlp = NULL; + else + ndlp = piocbq->ndlp; + } else { + ndlp = piocbq->ndlp; + } + + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); + start_sglq = sglq; + while (!found) { + if (!sglq) + break; + if (ndlp && ndlp->active_rrqs_xri_bitmap && + test_bit(sglq->sli4_lxritag, + ndlp->active_rrqs_xri_bitmap)) { + /* This xri has an rrq outstanding for this DID. + * put it back in the list and get another xri. + */ + list_add_tail(&sglq->list, lpfc_els_sgl_list); + sglq = NULL; + list_remove_head(lpfc_els_sgl_list, sglq, + struct lpfc_sglq, list); + if (sglq == start_sglq) { + list_add_tail(&sglq->list, lpfc_els_sgl_list); + sglq = NULL; + break; + } else + continue; + } + sglq->ndlp = ndlp; + found = 1; + phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; + sglq->state = SGL_ALLOCATED; + } + spin_unlock(&phba->sli4_hba.sgl_list_lock); + return sglq; +} + +/** + * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool + * @phba: Pointer to HBA context object. + * @piocbq: Pointer to the iocbq. + * + * This function is called with the sgl_list lock held. This function + * gets a new driver sglq object from the sglq list. If the + * list is not empty then it is successful, it returns pointer to the newly + * allocated sglq object else it returns NULL. + **/ +struct lpfc_sglq * +__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) +{ + struct list_head *lpfc_nvmet_sgl_list; + struct lpfc_sglq *sglq = NULL; + + lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; + + lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); + + list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); + if (!sglq) + return NULL; + phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; + sglq->state = SGL_ALLOCATED; + return sglq; +} + +/** + * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool + * @phba: Pointer to HBA context object. + * + * This function is called with no lock held. This function + * allocates a new driver iocb object from the iocb pool. If the + * allocation is successful, it returns pointer to the newly + * allocated iocb object else it returns NULL. + **/ +struct lpfc_iocbq * +lpfc_sli_get_iocbq(struct lpfc_hba *phba) +{ + struct lpfc_iocbq * iocbq = NULL; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + iocbq = __lpfc_sli_get_iocbq(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return iocbq; +} + +/** + * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called to release the driver iocb object + * to the iocb pool. The iotag in the iocb object + * does not change for each use of the iocb object. This function + * clears all other fields of the iocb object when it is freed. + * The sqlq structure that holds the xritag and phys and virtual + * mappings for the scatter gather list is retrieved from the + * active array of sglq. The get of the sglq pointer also clears + * the entry in the array. If the status of the IO indiactes that + * this IO was aborted then the sglq entry it put on the + * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the + * IO has good status or fails for any other reason then the sglq + * entry is added to the free list (lpfc_els_sgl_list). The hbalock is + * asserted held in the code path calling this routine. + **/ +static void +__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + struct lpfc_sglq *sglq; + unsigned long iflag = 0; + struct lpfc_sli_ring *pring; + + if (iocbq->sli4_xritag == NO_XRI) + sglq = NULL; + else + sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); + + + if (sglq) { + if (iocbq->cmd_flag & LPFC_IO_NVMET) { + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); + sglq->state = SGL_FREED; + sglq->ndlp = NULL; + list_add_tail(&sglq->list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.sgl_list_lock, iflag); + goto out; + } + + if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) && + (!(unlikely(pci_channel_offline(phba->pcidev)))) && + sglq->state != SGL_XRI_ABORTED) { + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); + + /* Check if we can get a reference on ndlp */ + if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp)) + sglq->ndlp = NULL; + + list_add(&sglq->list, + &phba->sli4_hba.lpfc_abts_els_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.sgl_list_lock, iflag); + } else { + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); + sglq->state = SGL_FREED; + sglq->ndlp = NULL; + list_add_tail(&sglq->list, + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.sgl_list_lock, iflag); + pring = lpfc_phba_elsring(phba); + /* Check if TXQ queue needs to be serviced */ + if (pring && (!list_empty(&pring->txq))) + lpfc_worker_wake_up(phba); + } + } + +out: + /* + * Clean all volatile data fields, preserve iotag and node struct. + */ + memset_startat(iocbq, 0, wqe); + iocbq->sli4_lxritag = NO_XRI; + iocbq->sli4_xritag = NO_XRI; + iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | + LPFC_IO_NVME_LS); + list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); +} + + +/** + * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called to release the driver iocb object to the + * iocb pool. The iotag in the iocb object does not change for each + * use of the iocb object. This function clears all other fields of + * the iocb object when it is freed. The hbalock is asserted held in + * the code path calling this routine. + **/ +static void +__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + + /* + * Clean all volatile data fields, preserve iotag and node struct. + */ + memset_startat(iocbq, 0, iocb); + iocbq->sli4_xritag = NO_XRI; + list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); +} + +/** + * __lpfc_sli_release_iocbq - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called with hbalock held to release driver + * iocb object to the iocb pool. The iotag in the iocb object + * does not change for each use of the iocb object. This function + * clears all other fields of the iocb object when it is freed. + **/ +static void +__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + lockdep_assert_held(&phba->hbalock); + + phba->__lpfc_sli_release_iocbq(phba, iocbq); + phba->iocb_cnt--; +} + +/** + * lpfc_sli_release_iocbq - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called with no lock held to release the iocb to + * iocb pool. + **/ +void +lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + unsigned long iflags; + + /* + * Clean all volatile data fields, preserve iotag and node struct. + */ + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_sli_release_iocbq(phba, iocbq); + spin_unlock_irqrestore(&phba->hbalock, iflags); +} + +/** + * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. + * @phba: Pointer to HBA context object. + * @iocblist: List of IOCBs. + * @ulpstatus: ULP status in IOCB command field. + * @ulpWord4: ULP word-4 in IOCB command field. + * + * This function is called with a list of IOCBs to cancel. It cancels the IOCB + * on the list by invoking the complete callback function associated with the + * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond + * fields. + **/ +void +lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, + uint32_t ulpstatus, uint32_t ulpWord4) +{ + struct lpfc_iocbq *piocb; + + while (!list_empty(iocblist)) { + list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); + if (piocb->cmd_cmpl) { + if (piocb->cmd_flag & LPFC_IO_NVME) { + lpfc_nvme_cancel_iocb(phba, piocb, + ulpstatus, ulpWord4); + } else { + if (phba->sli_rev == LPFC_SLI_REV4) { + bf_set(lpfc_wcqe_c_status, + &piocb->wcqe_cmpl, ulpstatus); + piocb->wcqe_cmpl.parameter = ulpWord4; + } else { + piocb->iocb.ulpStatus = ulpstatus; + piocb->iocb.un.ulpWord[4] = ulpWord4; + } + (piocb->cmd_cmpl) (phba, piocb, piocb); + } + } else { + lpfc_sli_release_iocbq(phba, piocb); + } + } + return; +} + +/** + * lpfc_sli_iocb_cmd_type - Get the iocb type + * @iocb_cmnd: iocb command code. + * + * This function is called by ring event handler function to get the iocb type. + * This function translates the iocb command to an iocb command type used to + * decide the final disposition of each completed IOCB. + * The function returns + * LPFC_UNKNOWN_IOCB if it is an unsupported iocb + * LPFC_SOL_IOCB if it is a solicited iocb completion + * LPFC_ABORT_IOCB if it is an abort iocb + * LPFC_UNSOL_IOCB if it is an unsolicited iocb + * + * The caller is not required to hold any lock. + **/ +static lpfc_iocb_type +lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) +{ + lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; + + if (iocb_cmnd > CMD_MAX_IOCB_CMD) + return 0; + + switch (iocb_cmnd) { + case CMD_XMIT_SEQUENCE_CR: + case CMD_XMIT_SEQUENCE_CX: + case CMD_XMIT_BCAST_CN: + case CMD_XMIT_BCAST_CX: + case CMD_ELS_REQUEST_CR: + case CMD_ELS_REQUEST_CX: + case CMD_CREATE_XRI_CR: + case CMD_CREATE_XRI_CX: + case CMD_GET_RPI_CN: + case CMD_XMIT_ELS_RSP_CX: + case CMD_GET_RPI_CR: + case CMD_FCP_IWRITE_CR: + case CMD_FCP_IWRITE_CX: + case CMD_FCP_IREAD_CR: + case CMD_FCP_IREAD_CX: + case CMD_FCP_ICMND_CR: + case CMD_FCP_ICMND_CX: + case CMD_FCP_TSEND_CX: + case CMD_FCP_TRSP_CX: + case CMD_FCP_TRECEIVE_CX: + case CMD_FCP_AUTO_TRSP_CX: + case CMD_ADAPTER_MSG: + case CMD_ADAPTER_DUMP: + case CMD_XMIT_SEQUENCE64_CR: + case CMD_XMIT_SEQUENCE64_CX: + case CMD_XMIT_BCAST64_CN: + case CMD_XMIT_BCAST64_CX: + case CMD_ELS_REQUEST64_CR: + case CMD_ELS_REQUEST64_CX: + case CMD_FCP_IWRITE64_CR: + case CMD_FCP_IWRITE64_CX: + case CMD_FCP_IREAD64_CR: + case CMD_FCP_IREAD64_CX: + case CMD_FCP_ICMND64_CR: + case CMD_FCP_ICMND64_CX: + case CMD_FCP_TSEND64_CX: + case CMD_FCP_TRSP64_CX: + case CMD_FCP_TRECEIVE64_CX: + case CMD_GEN_REQUEST64_CR: + case CMD_GEN_REQUEST64_CX: + case CMD_XMIT_ELS_RSP64_CX: + case DSSCMD_IWRITE64_CR: + case DSSCMD_IWRITE64_CX: + case DSSCMD_IREAD64_CR: + case DSSCMD_IREAD64_CX: + case CMD_SEND_FRAME: + type = LPFC_SOL_IOCB; + break; + case CMD_ABORT_XRI_CN: + case CMD_ABORT_XRI_CX: + case CMD_CLOSE_XRI_CN: + case CMD_CLOSE_XRI_CX: + case CMD_XRI_ABORTED_CX: + case CMD_ABORT_MXRI64_CN: + case CMD_XMIT_BLS_RSP64_CX: + type = LPFC_ABORT_IOCB; + break; + case CMD_RCV_SEQUENCE_CX: + case CMD_RCV_ELS_REQ_CX: + case CMD_RCV_SEQUENCE64_CX: + case CMD_RCV_ELS_REQ64_CX: + case CMD_ASYNC_STATUS: + case CMD_IOCB_RCV_SEQ64_CX: + case CMD_IOCB_RCV_ELS64_CX: + case CMD_IOCB_RCV_CONT64_CX: + case CMD_IOCB_RET_XRI64_CX: + type = LPFC_UNSOL_IOCB; + break; + case CMD_IOCB_XMIT_MSEQ64_CR: + case CMD_IOCB_XMIT_MSEQ64_CX: + case CMD_IOCB_RCV_SEQ_LIST64_CX: + case CMD_IOCB_RCV_ELS_LIST64_CX: + case CMD_IOCB_CLOSE_EXTENDED_CN: + case CMD_IOCB_ABORT_EXTENDED_CN: + case CMD_IOCB_RET_HBQE64_CN: + case CMD_IOCB_FCP_IBIDIR64_CR: + case CMD_IOCB_FCP_IBIDIR64_CX: + case CMD_IOCB_FCP_ITASKMGT64_CX: + case CMD_IOCB_LOGENTRY_CN: + case CMD_IOCB_LOGENTRY_ASYNC_CN: + printk("%s - Unhandled SLI-3 Command x%x\n", + __func__, iocb_cmnd); + type = LPFC_UNKNOWN_IOCB; + break; + default: + type = LPFC_UNKNOWN_IOCB; + break; + } + + return type; +} + +/** + * lpfc_sli_ring_map - Issue config_ring mbox for all rings + * @phba: Pointer to HBA context object. + * + * This function is called from SLI initialization code + * to configure every ring of the HBA's SLI interface. The + * caller is not required to hold any lock. This function issues + * a config_ring mailbox command for each ring. + * This function returns zero if successful else returns a negative + * error code. + **/ +static int +lpfc_sli_ring_map(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *pmb; + MAILBOX_t *pmbox; + int i, rc, ret = 0; + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) + return -ENOMEM; + pmbox = &pmb->u.mb; + phba->link_state = LPFC_INIT_MBX_CMDS; + for (i = 0; i < psli->num_rings; i++) { + lpfc_config_ring(phba, i, pmb); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0446 Adapter failed to init (%d), " + "mbxCmd x%x CFG_RING, mbxStatus x%x, " + "ring %d\n", + rc, pmbox->mbxCommand, + pmbox->mbxStatus, i); + phba->link_state = LPFC_HBA_ERROR; + ret = -ENXIO; + break; + } + } + mempool_free(pmb, phba->mbox_mem_pool); + return ret; +} + +/** + * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @piocb: Pointer to the driver iocb object. + * + * The driver calls this function with the hbalock held for SLI3 ports or + * the ring lock held for SLI4 ports. The function adds the + * new iocb to txcmplq of the given ring. This function always returns + * 0. If this function is called for ELS ring, this function checks if + * there is a vport associated with the ELS command. This function also + * starts els_tmofunc timer if this is an ELS command. + **/ +static int +lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb) +{ + u32 ulp_command = 0; + + BUG_ON(!piocb); + ulp_command = get_job_cmnd(phba, piocb); + + list_add_tail(&piocb->list, &pring->txcmplq); + piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ; + pring->txcmplq_cnt++; + if ((unlikely(pring->ringno == LPFC_ELS_RING)) && + (ulp_command != CMD_ABORT_XRI_WQE) && + (ulp_command != CMD_ABORT_XRI_CN) && + (ulp_command != CMD_CLOSE_XRI_CN)) { + BUG_ON(!piocb->vport); + if (!(piocb->vport->load_flag & FC_UNLOADING)) + mod_timer(&piocb->vport->els_tmofunc, + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); + } + + return 0; +} + +/** + * lpfc_sli_ringtx_get - Get first element of the txq + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function is called with hbalock held to get next + * iocb in txq of the given ring. If there is any iocb in + * the txq, the function returns first iocb in the list after + * removing the iocb from the list, else it returns NULL. + **/ +struct lpfc_iocbq * +lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + struct lpfc_iocbq *cmd_iocb; + + lockdep_assert_held(&phba->hbalock); + + list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); + return cmd_iocb; +} + +/** + * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to driver command iocb object. + * @rspiocb: Pointer to driver response iocb object. + * + * This routine will inform the driver of any BW adjustments we need + * to make. These changes will be picked up during the next CMF + * timer interrupt. In addition, any BW changes will be logged + * with LOG_CGN_MGMT. + **/ +static void +lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + union lpfc_wqe128 *wqe; + uint32_t status, info; + struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl; + uint64_t bw, bwdif, slop; + uint64_t pcent, bwpcent; + int asig, afpin, sigcnt, fpincnt; + int wsigmax, wfpinmax, cg, tdp; + char *s; + + /* First check for error */ + status = bf_get(lpfc_wcqe_c_status, wcqe); + if (status) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6211 CMF_SYNC_WQE Error " + "req_tag x%x status x%x hwstatus x%x " + "tdatap x%x parm x%x\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe), + bf_get(lpfc_wcqe_c_status, wcqe), + bf_get(lpfc_wcqe_c_hw_status, wcqe), + wcqe->total_data_placed, + wcqe->parameter); + goto out; + } + + /* Gather congestion information on a successful cmpl */ + info = wcqe->parameter; + phba->cmf_active_info = info; + + /* See if firmware info count is valid or has changed */ + if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info) + info = 0; + else + phba->cmf_info_per_interval = info; + + tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe); + cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe); + + /* Get BW requirement from firmware */ + bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; + if (!bw) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6212 CMF_SYNC_WQE x%x: NULL bw\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + goto out; + } + + /* Gather information needed for logging if a BW change is required */ + wqe = &cmdiocb->wqe; + asig = bf_get(cmf_sync_asig, &wqe->cmf_sync); + afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync); + fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync); + sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync); + if (phba->cmf_max_bytes_per_interval != bw || + (asig || afpin || sigcnt || fpincnt)) { + /* Are we increasing or decreasing BW */ + if (phba->cmf_max_bytes_per_interval < bw) { + bwdif = bw - phba->cmf_max_bytes_per_interval; + s = "Increase"; + } else { + bwdif = phba->cmf_max_bytes_per_interval - bw; + s = "Decrease"; + } + + /* What is the change percentage */ + slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/ + pcent = div64_u64(bwdif * 100 + slop, + phba->cmf_link_byte_count); + bwpcent = div64_u64(bw * 100 + slop, + phba->cmf_link_byte_count); + /* Because of bytes adjustment due to shorter timer in + * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and + * may seem like BW is above 100%. + */ + if (bwpcent > 100) + bwpcent = 100; + + if (phba->cmf_max_bytes_per_interval < bw && + bwpcent > 95) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6208 Congestion bandwidth " + "limits removed\n"); + else if ((phba->cmf_max_bytes_per_interval > bw) && + ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95)) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6209 Congestion bandwidth " + "limits in effect\n"); + + if (asig) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6237 BW Threshold %lld%% (%lld): " + "%lld%% %s: Signal Alarm: cg:%d " + "Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } else if (afpin) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6238 BW Threshold %lld%% (%lld): " + "%lld%% %s: FPIN Alarm: cg:%d " + "Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } else if (sigcnt) { + wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6239 BW Threshold %lld%% (%lld): " + "%lld%% %s: Signal Warning: " + "Cnt %d Max %d: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, sigcnt, + wsigmax, cg, phba->cmf_active_info); + } else if (fpincnt) { + wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6240 BW Threshold %lld%% (%lld): " + "%lld%% %s: FPIN Warning: " + "Cnt %d Max %d: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, fpincnt, + wfpinmax, cg, phba->cmf_active_info); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6241 BW Threshold %lld%% (%lld): " + "CMF %lld%% %s: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } + } else if (info) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6246 Info Threshold %u\n", info); + } + + /* Save BW change to be picked up during next timer interrupt */ + phba->cmf_last_sync_bw = bw; +out: + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE + * @phba: Pointer to HBA context object. + * @ms: ms to set in WQE interval, 0 means use init op + * @total: Total rcv bytes for this interval + * + * This routine is called every CMF timer interrupt. Its purpose is + * to issue a CMF_SYNC_WQE to the firmware to inform it of any events + * that may indicate we have congestion (FPINs or Signals). Upon + * completion, the firmware will indicate any BW restrictions the + * driver may need to take. + **/ +int +lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) +{ + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *sync_buf; + unsigned long iflags; + u32 ret_val; + u32 atot, wtot, max; + u8 warn_sync_period = 0; + + /* First address any alarm / warning activity */ + atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); + wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); + + /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ + if (phba->cmf_active_mode != LPFC_CFG_MANAGED || + phba->link_state == LPFC_LINK_DOWN) + return 0; + + spin_lock_irqsave(&phba->hbalock, iflags); + sync_buf = __lpfc_sli_get_iocbq(phba); + if (!sync_buf) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6244 No available WQEs for CMF_SYNC_WQE\n"); + ret_val = ENOMEM; + goto out_unlock; + } + + wqe = &sync_buf->wqe; + + /* WQEs are reused. Clear stale data and set key fields to zero */ + memset(wqe, 0, sizeof(*wqe)); + + /* If this is the very first CMF_SYNC_WQE, issue an init operation */ + if (!ms) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6441 CMF Init %d - CMF_SYNC_WQE\n", + phba->fc_eventTag); + bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */ + bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL); + goto initpath; + } + + bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */ + bf_set(cmf_sync_interval, &wqe->cmf_sync, ms); + + /* Check for alarms / warnings */ + if (atot) { + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* We hit an Signal alarm condition */ + bf_set(cmf_sync_asig, &wqe->cmf_sync, 1); + } else { + /* We hit a FPIN alarm condition */ + bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1); + } + } else if (wtot) { + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* We hit an Signal warning condition */ + max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * + lpfc_acqe_cgn_frequency; + bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); + bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); + warn_sync_period = lpfc_acqe_cgn_frequency; + } else { + /* We hit a FPIN warning condition */ + bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1); + bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1); + if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) + warn_sync_period = + LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency); + } + } + + /* Update total read blocks during previous timer interval */ + wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE); + +initpath: + bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER); + wqe->cmf_sync.event_tag = phba->fc_eventTag; + bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE); + + /* Setup reqtag to match the wqe completion. */ + bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag); + + bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1); + bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period); + + bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND); + bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1); + bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); + + sync_buf->vport = phba->pport; + sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl; + sync_buf->cmd_dmabuf = NULL; + sync_buf->rsp_dmabuf = NULL; + sync_buf->bpl_dmabuf = NULL; + sync_buf->sli4_xritag = NO_XRI; + + sync_buf->cmd_flag |= LPFC_IO_CMF; + ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf); + if (ret_val) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6214 Cannot issue CMF_SYNC_WQE: x%x\n", + ret_val); + __lpfc_sli_release_iocbq(phba, sync_buf); + } +out_unlock: + spin_unlock_irqrestore(&phba->hbalock, iflags); + return ret_val; +} + +/** + * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function is called with hbalock held and the caller must post the + * iocb without releasing the lock. If the caller releases the lock, + * iocb slot returned by the function is not guaranteed to be available. + * The function returns pointer to the next available iocb slot if there + * is available slot in the ring, else it returns NULL. + * If the get index of the ring is ahead of the put index, the function + * will post an error attention event to the worker thread to take the + * HBA to offline state. + **/ +static IOCB_t * +lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; + uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; + + lockdep_assert_held(&phba->hbalock); + + if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && + (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) + pring->sli.sli3.next_cmdidx = 0; + + if (unlikely(pring->sli.sli3.local_getidx == + pring->sli.sli3.next_cmdidx)) { + + pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); + + if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0315 Ring %d issue: portCmdGet %d " + "is bigger than cmd ring %d\n", + pring->ringno, + pring->sli.sli3.local_getidx, + max_cmd_idx); + + phba->link_state = LPFC_HBA_ERROR; + /* + * All error attention handlers are posted to + * worker thread + */ + phba->work_ha |= HA_ERATT; + phba->work_hs = HS_FFER3; + + lpfc_worker_wake_up(phba); + + return NULL; + } + + if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) + return NULL; + } + + return lpfc_cmd_iocb(phba, pring); +} + +/** + * lpfc_sli_next_iotag - Get an iotag for the iocb + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function gets an iotag for the iocb. If there is no unused iotag and + * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup + * array and assigns a new iotag. + * The function returns the allocated iotag if successful, else returns zero. + * Zero is not a valid iotag. + * The caller is not required to hold any lock. + **/ +uint16_t +lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + struct lpfc_iocbq **new_arr; + struct lpfc_iocbq **old_arr; + size_t new_len; + struct lpfc_sli *psli = &phba->sli; + uint16_t iotag; + + spin_lock_irq(&phba->hbalock); + iotag = psli->last_iotag; + if(++iotag < psli->iocbq_lookup_len) { + psli->last_iotag = iotag; + psli->iocbq_lookup[iotag] = iocbq; + spin_unlock_irq(&phba->hbalock); + iocbq->iotag = iotag; + return iotag; + } else if (psli->iocbq_lookup_len < (0xffff + - LPFC_IOCBQ_LOOKUP_INCREMENT)) { + new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; + spin_unlock_irq(&phba->hbalock); + new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), + GFP_KERNEL); + if (new_arr) { + spin_lock_irq(&phba->hbalock); + old_arr = psli->iocbq_lookup; + if (new_len <= psli->iocbq_lookup_len) { + /* highly unprobable case */ + kfree(new_arr); + iotag = psli->last_iotag; + if(++iotag < psli->iocbq_lookup_len) { + psli->last_iotag = iotag; + psli->iocbq_lookup[iotag] = iocbq; + spin_unlock_irq(&phba->hbalock); + iocbq->iotag = iotag; + return iotag; + } + spin_unlock_irq(&phba->hbalock); + return 0; + } + if (psli->iocbq_lookup) + memcpy(new_arr, old_arr, + ((psli->last_iotag + 1) * + sizeof (struct lpfc_iocbq *))); + psli->iocbq_lookup = new_arr; + psli->iocbq_lookup_len = new_len; + psli->last_iotag = iotag; + psli->iocbq_lookup[iotag] = iocbq; + spin_unlock_irq(&phba->hbalock); + iocbq->iotag = iotag; + kfree(old_arr); + return iotag; + } + } else + spin_unlock_irq(&phba->hbalock); + + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0318 Failed to allocate IOTAG.last IOTAG is %d\n", + psli->last_iotag); + + return 0; +} + +/** + * lpfc_sli_submit_iocb - Submit an iocb to the firmware + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @iocb: Pointer to iocb slot in the ring. + * @nextiocb: Pointer to driver iocb object which need to be + * posted to firmware. + * + * This function is called to post a new iocb to the firmware. This + * function copies the new iocb to ring iocb slot and updates the + * ring pointers. It adds the new iocb to txcmplq if there is + * a completion call back for this iocb else the function will free the + * iocb object. The hbalock is asserted held in the code path calling + * this routine. + **/ +static void +lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + IOCB_t *iocb, struct lpfc_iocbq *nextiocb) +{ + /* + * Set up an iotag + */ + nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0; + + + if (pring->ringno == LPFC_ELS_RING) { + lpfc_debugfs_slow_ring_trc(phba, + "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", + *(((uint32_t *) &nextiocb->iocb) + 4), + *(((uint32_t *) &nextiocb->iocb) + 6), + *(((uint32_t *) &nextiocb->iocb) + 7)); + } + + /* + * Issue iocb command to adapter + */ + lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); + wmb(); + pring->stats.iocb_cmd++; + + /* + * If there is no completion routine to call, we can release the + * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, + * that have no rsp ring completion, cmd_cmpl MUST be NULL. + */ + if (nextiocb->cmd_cmpl) + lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); + else + __lpfc_sli_release_iocbq(phba, nextiocb); + + /* + * Let the HBA know what IOCB slot will be the next one the + * driver will put a command into. + */ + pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; + writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); +} + +/** + * lpfc_sli_update_full_ring - Update the chip attention register + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * The caller is not required to hold any lock for calling this function. + * This function updates the chip attention bits for the ring to inform firmware + * that there are pending work to be done for this ring and requests an + * interrupt when there is space available in the ring. This function is + * called when the driver is unable to post more iocbs to the ring due + * to unavailability of space in the ring. + **/ +static void +lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + int ringno = pring->ringno; + + pring->flag |= LPFC_CALL_RING_AVAILABLE; + + wmb(); + + /* + * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. + * The HBA will tell us when an IOCB entry is available. + */ + writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); + readl(phba->CAregaddr); /* flush */ + + pring->stats.iocb_cmd_full++; +} + +/** + * lpfc_sli_update_ring - Update chip attention register + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function updates the chip attention register bit for the + * given ring to inform HBA that there is more work to be done + * in this ring. The caller is not required to hold any lock. + **/ +static void +lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + int ringno = pring->ringno; + + /* + * Tell the HBA that there is work to do in this ring. + */ + if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { + wmb(); + writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); + readl(phba->CAregaddr); /* flush */ + } +} + +/** + * lpfc_sli_resume_iocb - Process iocbs in the txq + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function is called with hbalock held to post pending iocbs + * in the txq to the firmware. This function is called when driver + * detects space available in the ring. + **/ +static void +lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + IOCB_t *iocb; + struct lpfc_iocbq *nextiocb; + + lockdep_assert_held(&phba->hbalock); + + /* + * Check to see if: + * (a) there is anything on the txq to send + * (b) link is up + * (c) link attention events can be processed (fcp ring only) + * (d) IOCB processing is not blocked by the outstanding mbox command. + */ + + if (lpfc_is_link_up(phba) && + (!list_empty(&pring->txq)) && + (pring->ringno != LPFC_FCP_RING || + phba->sli.sli_flag & LPFC_PROCESS_LA)) { + + while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && + (nextiocb = lpfc_sli_ringtx_get(phba, pring))) + lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); + + if (iocb) + lpfc_sli_update_ring(phba, pring); + else + lpfc_sli_update_full_ring(phba, pring); + } + + return; +} + +/** + * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * + * This function is called with hbalock held to get the next + * available slot for the given HBQ. If there is free slot + * available for the HBQ it will return pointer to the next available + * HBQ entry else it will return NULL. + **/ +static struct lpfc_hbq_entry * +lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) +{ + struct hbq_s *hbqp = &phba->hbqs[hbqno]; + + lockdep_assert_held(&phba->hbalock); + + if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && + ++hbqp->next_hbqPutIdx >= hbqp->entry_count) + hbqp->next_hbqPutIdx = 0; + + if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { + uint32_t raw_index = phba->hbq_get[hbqno]; + uint32_t getidx = le32_to_cpu(raw_index); + + hbqp->local_hbqGetIdx = getidx; + + if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1802 HBQ %d: local_hbqGetIdx " + "%u is > than hbqp->entry_count %u\n", + hbqno, hbqp->local_hbqGetIdx, + hbqp->entry_count); + + phba->link_state = LPFC_HBA_ERROR; + return NULL; + } + + if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) + return NULL; + } + + return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + + hbqp->hbqPutIdx; +} + +/** + * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers + * @phba: Pointer to HBA context object. + * + * This function is called with no lock held to free all the + * hbq buffers while uninitializing the SLI interface. It also + * frees the HBQ buffers returned by the firmware but not yet + * processed by the upper layers. + **/ +void +lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) +{ + struct lpfc_dmabuf *dmabuf, *next_dmabuf; + struct hbq_dmabuf *hbq_buf; + unsigned long flags; + int i, hbq_count; + + hbq_count = lpfc_sli_hbq_count(); + /* Return all memory used by all HBQs */ + spin_lock_irqsave(&phba->hbalock, flags); + for (i = 0; i < hbq_count; ++i) { + list_for_each_entry_safe(dmabuf, next_dmabuf, + &phba->hbqs[i].hbq_buffer_list, list) { + hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); + list_del(&hbq_buf->dbuf.list); + (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); + } + phba->hbqs[i].buffer_count = 0; + } + + /* Mark the HBQs not in use */ + phba->hbq_in_use = 0; + spin_unlock_irqrestore(&phba->hbalock, flags); +} + +/** + * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @hbq_buf: Pointer to HBQ buffer. + * + * This function is called with the hbalock held to post a + * hbq buffer to the firmware. If the function finds an empty + * slot in the HBQ, it will post the buffer. The function will return + * pointer to the hbq entry if it successfully post the buffer + * else it will return NULL. + **/ +static int +lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) +{ + lockdep_assert_held(&phba->hbalock); + return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); +} + +/** + * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @hbq_buf: Pointer to HBQ buffer. + * + * This function is called with the hbalock held to post a hbq buffer to the + * firmware. If the function finds an empty slot in the HBQ, it will post the + * buffer and place it on the hbq_buffer_list. The function will return zero if + * it successfully post the buffer else it will return an error. + **/ +static int +lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) +{ + struct lpfc_hbq_entry *hbqe; + dma_addr_t physaddr = hbq_buf->dbuf.phys; + + lockdep_assert_held(&phba->hbalock); + /* Get next HBQ entry slot to use */ + hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); + if (hbqe) { + struct hbq_s *hbqp = &phba->hbqs[hbqno]; + + hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); + hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); + hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; + hbqe->bde.tus.f.bdeFlags = 0; + hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); + hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); + /* Sync SLIM */ + hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; + writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); + /* flush */ + readl(phba->hbq_put + hbqno); + list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); + return 0; + } else + return -ENOMEM; +} + +/** + * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @hbq_buf: Pointer to HBQ buffer. + * + * This function is called with the hbalock held to post an RQE to the SLI4 + * firmware. If able to post the RQE to the RQ it will queue the hbq entry to + * the hbq_buffer_list and return zero, otherwise it will return an error. + **/ +static int +lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) +{ + int rc; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct lpfc_queue *hrq; + struct lpfc_queue *drq; + + if (hbqno != LPFC_ELS_HBQ) + return 1; + hrq = phba->sli4_hba.hdr_rq; + drq = phba->sli4_hba.dat_rq; + + lockdep_assert_held(&phba->hbalock); + hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); + hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); + drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); + drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); + rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); + if (rc < 0) + return rc; + hbq_buf->tag = (rc | (hbqno << 16)); + list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); + return 0; +} + +/* HBQ for ELS and CT traffic. */ +static struct lpfc_hbq_init lpfc_els_hbq = { + .rn = 1, + .entry_count = 256, + .mask_count = 0, + .profile = 0, + .ring_mask = (1 << LPFC_ELS_RING), + .buffer_count = 0, + .init_count = 40, + .add_count = 40, +}; + +/* Array of HBQs */ +struct lpfc_hbq_init *lpfc_hbq_defs[] = { + &lpfc_els_hbq, +}; + +/** + * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @count: Number of HBQ buffers to be posted. + * + * This function is called with no lock held to post more hbq buffers to the + * given HBQ. The function returns the number of HBQ buffers successfully + * posted. + **/ +static int +lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) +{ + uint32_t i, posted = 0; + unsigned long flags; + struct hbq_dmabuf *hbq_buffer; + LIST_HEAD(hbq_buf_list); + if (!phba->hbqs[hbqno].hbq_alloc_buffer) + return 0; + + if ((phba->hbqs[hbqno].buffer_count + count) > + lpfc_hbq_defs[hbqno]->entry_count) + count = lpfc_hbq_defs[hbqno]->entry_count - + phba->hbqs[hbqno].buffer_count; + if (!count) + return 0; + /* Allocate HBQ entries */ + for (i = 0; i < count; i++) { + hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); + if (!hbq_buffer) + break; + list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); + } + /* Check whether HBQ is still in use */ + spin_lock_irqsave(&phba->hbalock, flags); + if (!phba->hbq_in_use) + goto err; + while (!list_empty(&hbq_buf_list)) { + list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, + dbuf.list); + hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | + (hbqno << 16)); + if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { + phba->hbqs[hbqno].buffer_count++; + posted++; + } else + (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); + } + spin_unlock_irqrestore(&phba->hbalock, flags); + return posted; +err: + spin_unlock_irqrestore(&phba->hbalock, flags); + while (!list_empty(&hbq_buf_list)) { + list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, + dbuf.list); + (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); + } + return 0; +} + +/** + * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware + * @phba: Pointer to HBA context object. + * @qno: HBQ number. + * + * This function posts more buffers to the HBQ. This function + * is called with no lock held. The function returns the number of HBQ entries + * successfully allocated. + **/ +int +lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return 0; + else + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->add_count); +} + +/** + * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ + * @phba: Pointer to HBA context object. + * @qno: HBQ queue number. + * + * This function is called from SLI initialization code path with + * no lock held to post initial HBQ buffers to firmware. The + * function returns the number of HBQ entries successfully allocated. + **/ +static int +lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->entry_count); + else + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->init_count); +} + +/* + * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list + * + * This function removes the first hbq buffer on an hbq list and returns a + * pointer to that buffer. If it finds no buffers on the list it returns NULL. + **/ +static struct hbq_dmabuf * +lpfc_sli_hbqbuf_get(struct list_head *rb_list) +{ + struct lpfc_dmabuf *d_buf; + + list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); + if (!d_buf) + return NULL; + return container_of(d_buf, struct hbq_dmabuf, dbuf); +} + +/** + * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list + * @phba: Pointer to HBA context object. + * @hrq: HBQ number. + * + * This function removes the first RQ buffer on an RQ buffer list and returns a + * pointer to that buffer. If it finds no buffers on the list it returns NULL. + **/ +static struct rqb_dmabuf * +lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) +{ + struct lpfc_dmabuf *h_buf; + struct lpfc_rqb *rqbp; + + rqbp = hrq->rqbp; + list_remove_head(&rqbp->rqb_buffer_list, h_buf, + struct lpfc_dmabuf, list); + if (!h_buf) + return NULL; + rqbp->buffer_count--; + return container_of(h_buf, struct rqb_dmabuf, hbuf); +} + +/** + * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag + * @phba: Pointer to HBA context object. + * @tag: Tag of the hbq buffer. + * + * This function searches for the hbq buffer associated with the given tag in + * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer + * otherwise it returns NULL. + **/ +static struct hbq_dmabuf * +lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) +{ + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *hbq_buf; + uint32_t hbqno; + + hbqno = tag >> 16; + if (hbqno >= LPFC_MAX_HBQS) + return NULL; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + if (hbq_buf->tag == tag) { + spin_unlock_irq(&phba->hbalock); + return hbq_buf; + } + } + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1803 Bad hbq tag. Data: x%x x%x\n", + tag, phba->hbqs[tag >> 16].buffer_count); + return NULL; +} + +/** + * lpfc_sli_free_hbq - Give back the hbq buffer to firmware + * @phba: Pointer to HBA context object. + * @hbq_buffer: Pointer to HBQ buffer. + * + * This function is called with hbalock. This function gives back + * the hbq buffer to firmware. If the HBQ does not have space to + * post the buffer, it will free the buffer. + **/ +void +lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) +{ + uint32_t hbqno; + + if (hbq_buffer) { + hbqno = hbq_buffer->tag >> 16; + if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) + (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); + } +} + +/** + * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox + * @mbxCommand: mailbox command code. + * + * This function is called by the mailbox event handler function to verify + * that the completed mailbox command is a legitimate mailbox command. If the + * completed mailbox is not known to the function, it will return MBX_SHUTDOWN + * and the mailbox event handler will take the HBA offline. + **/ +static int +lpfc_sli_chk_mbx_command(uint8_t mbxCommand) +{ + uint8_t ret; + + switch (mbxCommand) { + case MBX_LOAD_SM: + case MBX_READ_NV: + case MBX_WRITE_NV: + case MBX_WRITE_VPARMS: + case MBX_RUN_BIU_DIAG: + case MBX_INIT_LINK: + case MBX_DOWN_LINK: + case MBX_CONFIG_LINK: + case MBX_CONFIG_RING: + case MBX_RESET_RING: + case MBX_READ_CONFIG: + case MBX_READ_RCONFIG: + case MBX_READ_SPARM: + case MBX_READ_STATUS: + case MBX_READ_RPI: + case MBX_READ_XRI: + case MBX_READ_REV: + case MBX_READ_LNK_STAT: + case MBX_REG_LOGIN: + case MBX_UNREG_LOGIN: + case MBX_CLEAR_LA: + case MBX_DUMP_MEMORY: + case MBX_DUMP_CONTEXT: + case MBX_RUN_DIAGS: + case MBX_RESTART: + case MBX_UPDATE_CFG: + case MBX_DOWN_LOAD: + case MBX_DEL_LD_ENTRY: + case MBX_RUN_PROGRAM: + case MBX_SET_MASK: + case MBX_SET_VARIABLE: + case MBX_UNREG_D_ID: + case MBX_KILL_BOARD: + case MBX_CONFIG_FARP: + case MBX_BEACON: + case MBX_LOAD_AREA: + case MBX_RUN_BIU_DIAG64: + case MBX_CONFIG_PORT: + case MBX_READ_SPARM64: + case MBX_READ_RPI64: + case MBX_REG_LOGIN64: + case MBX_READ_TOPOLOGY: + case MBX_WRITE_WWN: + case MBX_SET_DEBUG: + case MBX_LOAD_EXP_ROM: + case MBX_ASYNCEVT_ENABLE: + case MBX_REG_VPI: + case MBX_UNREG_VPI: + case MBX_HEARTBEAT: + case MBX_PORT_CAPABILITIES: + case MBX_PORT_IOV_CONTROL: + case MBX_SLI4_CONFIG: + case MBX_SLI4_REQ_FTRS: + case MBX_REG_FCFI: + case MBX_UNREG_FCFI: + case MBX_REG_VFI: + case MBX_UNREG_VFI: + case MBX_INIT_VPI: + case MBX_INIT_VFI: + case MBX_RESUME_RPI: + case MBX_READ_EVENT_LOG_STATUS: + case MBX_READ_EVENT_LOG: + case MBX_SECURITY_MGMT: + case MBX_AUTH_PORT: + case MBX_ACCESS_VDATA: + ret = mbxCommand; + break; + default: + ret = MBX_SHUTDOWN; + break; + } + return ret; +} + +/** + * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler + * @phba: Pointer to HBA context object. + * @pmboxq: Pointer to mailbox command. + * + * This is completion handler function for mailbox commands issued from + * lpfc_sli_issue_mbox_wait function. This function is called by the + * mailbox event handler function with no lock held. This function + * will wake up thread waiting on the wait queue pointed by context1 + * of the mailbox. + **/ +void +lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) +{ + unsigned long drvr_flag; + struct completion *pmbox_done; + + /* + * If pmbox_done is empty, the driver thread gave up waiting and + * continued running. + */ + pmboxq->mbox_flag |= LPFC_MBX_WAKE; + spin_lock_irqsave(&phba->hbalock, drvr_flag); + pmbox_done = (struct completion *)pmboxq->context3; + if (pmbox_done) + complete(pmbox_done); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + return; +} + +static void +__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + spin_unlock_irqrestore(&ndlp->lock, iflags); + } + ndlp->nlp_flag &= ~NLP_UNREG_INP; +} + +void +lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + __lpfc_sli_rpi_release(vport, ndlp); +} + +/** + * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function is the default mailbox completion handler. It + * frees the memory resources associated with the completed mailbox + * command. If the completed command is a REG_LOGIN mailbox command, + * this function will issue a UREG_LOGIN to re-claim the RPI. + **/ +void +lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_dmabuf *mp; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + uint16_t rpi, vpi; + int rc; + + /* + * If a REG_LOGIN succeeded after node is destroyed or node + * is in re-discovery driver need to cleanup the RPI. + */ + if (!(phba->pport->load_flag & FC_UNLOADING) && + pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && + !pmb->u.mb.mbxStatus) { + mp = (struct lpfc_dmabuf *)pmb->ctx_buf; + if (mp) { + pmb->ctx_buf = NULL; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + rpi = pmb->u.mb.un.varWords[0]; + vpi = pmb->u.mb.un.varRegLogin.vpi; + if (phba->sli_rev == LPFC_SLI_REV4) + vpi -= phba->sli4_hba.max_cfg_param.vpi_base; + lpfc_unreg_login(phba, vpi, rpi, pmb); + pmb->vport = vport; + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc != MBX_NOT_FINISHED) + return; + } + + if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && + !(phba->pport->load_flag & FC_UNLOADING) && + !pmb->u.mb.mbxStatus) { + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + vport->vpi_state |= LPFC_VPI_REGISTERED; + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + } + + if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + lpfc_nlp_put(ndlp); + } + + if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + + /* Check to see if there are any deferred events to process */ + if (ndlp) { + lpfc_printf_vlog( + vport, + KERN_INFO, LOG_MBOX | LOG_DISCOVERY, + "1438 UNREG cmpl deferred mbox x%x " + "on NPort x%x Data: x%x x%x x%px x%x x%x\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->nlp_defer_did, + ndlp, vport->load_flag, kref_read(&ndlp->kref)); + + if ((ndlp->nlp_flag & NLP_UNREG_INP) && + (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { + ndlp->nlp_flag &= ~NLP_UNREG_INP; + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } else { + __lpfc_sli_rpi_release(vport, ndlp); + } + + /* The unreg_login mailbox is complete and had a + * reference that has to be released. The PLOGI + * got its own ref. + */ + lpfc_nlp_put(ndlp); + pmb->ctx_ndlp = NULL; + } + } + + /* This nlp_put pairs with lpfc_sli4_resume_rpi */ + if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) { + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + lpfc_nlp_put(ndlp); + } + + /* Check security permission status on INIT_LINK mailbox command */ + if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && + (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2860 SLI authentication is required " + "for INIT_LINK but has not done yet\n"); + + if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) + lpfc_sli4_mbox_cmd_free(phba, pmb); + else + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); +} + /** + * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function is the unreg rpi mailbox completion handler. It + * frees the memory resources associated with the completed mailbox + * command. An additional reference is put on the ndlp to prevent + * lpfc_nlp_release from freeing the rpi bit in the bitmask before + * the unreg mailbox command completes, this routine puts the + * reference back. + * + **/ +void +lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp; + + ndlp = pmb->ctx_ndlp; + if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { + if (phba->sli_rev == LPFC_SLI_REV4 && + (bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2)) { + if (ndlp) { + lpfc_printf_vlog( + vport, KERN_INFO, LOG_MBOX | LOG_SLI, + "0010 UNREG_LOGIN vpi:%x " + "rpi:%x DID:%x defer x%x flg x%x " + "x%px\n", + vport->vpi, ndlp->nlp_rpi, + ndlp->nlp_DID, ndlp->nlp_defer_did, + ndlp->nlp_flag, + ndlp); + ndlp->nlp_flag &= ~NLP_LOGO_ACC; + + /* Check to see if there are any deferred + * events to process + */ + if ((ndlp->nlp_flag & NLP_UNREG_INP) && + (ndlp->nlp_defer_did != + NLP_EVT_NOTHING_PENDING)) { + lpfc_printf_vlog( + vport, KERN_INFO, LOG_DISCOVERY, + "4111 UNREG cmpl deferred " + "clr x%x on " + "NPort x%x Data: x%x x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_defer_did, ndlp); + ndlp->nlp_flag &= ~NLP_UNREG_INP; + ndlp->nlp_defer_did = + NLP_EVT_NOTHING_PENDING; + lpfc_issue_els_plogi( + vport, ndlp->nlp_DID, 0); + } else { + __lpfc_sli_rpi_release(vport, ndlp); + } + lpfc_nlp_put(ndlp); + } + } + } + + mempool_free(pmb, phba->mbox_mem_pool); +} + +/** + * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware + * @phba: Pointer to HBA context object. + * + * This function is called with no lock held. This function processes all + * the completed mailbox commands and gives it to upper layers. The interrupt + * service routine processes mailbox completion interrupt and adds completed + * mailbox commands to the mboxq_cmpl queue and signals the worker thread. + * Worker thread call lpfc_sli_handle_mb_event, which will return the + * completed mailbox commands in mboxq_cmpl queue to the upper layers. This + * function returns the mailbox commands to the upper layer by calling the + * completion handler function of each mailbox. + **/ +int +lpfc_sli_handle_mb_event(struct lpfc_hba *phba) +{ + MAILBOX_t *pmbox; + LPFC_MBOXQ_t *pmb; + int rc; + LIST_HEAD(cmplq); + + phba->sli.slistat.mbox_event++; + + /* Get all completed mailboxe buffers into the cmplq */ + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); + spin_unlock_irq(&phba->hbalock); + + /* Get a Mailbox buffer to setup mailbox commands for callback */ + do { + list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); + if (pmb == NULL) + break; + + pmbox = &pmb->u.mb; + + if (pmbox->mbxCommand != MBX_HEARTBEAT) { + if (pmb->vport) { + lpfc_debugfs_disc_trc(pmb->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX cmpl vport: cmd:x%x mb:x%x x%x", + (uint32_t)pmbox->mbxCommand, + pmbox->un.varWords[0], + pmbox->un.varWords[1]); + } + else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX cmpl: cmd:x%x mb:x%x x%x", + (uint32_t)pmbox->mbxCommand, + pmbox->un.varWords[0], + pmbox->un.varWords[1]); + } + } + + /* + * It is a fatal error if unknown mbox command completion. + */ + if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == + MBX_SHUTDOWN) { + /* Unknown mailbox command compl */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):0323 Unknown Mailbox command " + "x%x (x%x/x%x) Cmpl\n", + pmb->vport ? pmb->vport->vpi : + LPFC_VPORT_UNKNOWN, + pmbox->mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, + pmb), + lpfc_sli_config_mbox_opcode_get(phba, + pmb)); + phba->link_state = LPFC_HBA_ERROR; + phba->work_hs = HS_FFER3; + lpfc_handle_eratt(phba); + continue; + } + + if (pmbox->mbxStatus) { + phba->sli.slistat.mbox_stat_err++; + if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { + /* Mbox cmd cmpl error - RETRYing */ + lpfc_printf_log(phba, KERN_INFO, + LOG_MBOX | LOG_SLI, + "(%d):0305 Mbox cmd cmpl " + "error - RETRYing Data: x%x " + "(x%x/x%x) x%x x%x x%x\n", + pmb->vport ? pmb->vport->vpi : + LPFC_VPORT_UNKNOWN, + pmbox->mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, + pmb), + lpfc_sli_config_mbox_opcode_get(phba, + pmb), + pmbox->mbxStatus, + pmbox->un.varWords[0], + pmb->vport ? pmb->vport->port_state : + LPFC_VPORT_UNKNOWN); + pmbox->mbxStatus = 0; + pmbox->mbxOwner = OWN_HOST; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc != MBX_NOT_FINISHED) + continue; + } + } + + /* Mailbox cmd Cmpl */ + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x\n", + pmb->vport ? pmb->vport->vpi : 0, + pmbox->mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, pmb), + lpfc_sli_config_mbox_opcode_get(phba, pmb), + pmb->mbox_cmpl, + *((uint32_t *) pmbox), + pmbox->un.varWords[0], + pmbox->un.varWords[1], + pmbox->un.varWords[2], + pmbox->un.varWords[3], + pmbox->un.varWords[4], + pmbox->un.varWords[5], + pmbox->un.varWords[6], + pmbox->un.varWords[7], + pmbox->un.varWords[8], + pmbox->un.varWords[9], + pmbox->un.varWords[10]); + + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba,pmb); + } while (1); + return 0; +} + +/** + * lpfc_sli_get_buff - Get the buffer associated with the buffer tag + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @tag: buffer tag. + * + * This function is called with no lock held. When QUE_BUFTAG_BIT bit + * is set in the tag the buffer is posted for a particular exchange, + * the function will return the buffer without replacing the buffer. + * If the buffer is for unsolicited ELS or CT traffic, this function + * returns the buffer and also posts another buffer to the firmware. + **/ +static struct lpfc_dmabuf * +lpfc_sli_get_buff(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + uint32_t tag) +{ + struct hbq_dmabuf *hbq_entry; + + if (tag & QUE_BUFTAG_BIT) + return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); + hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); + if (!hbq_entry) + return NULL; + return &hbq_entry->dbuf; +} + +/** + * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer + * containing a NVME LS request. + * @phba: pointer to lpfc hba data structure. + * @piocb: pointer to the iocbq struct representing the sequence starting + * frame. + * + * This routine initially validates the NVME LS, validates there is a login + * with the port that sent the LS, and then calls the appropriate nvme host + * or target LS request handler. + **/ +static void +lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *nvmebuf; + struct fc_frame_header *fc_hdr; + struct lpfc_async_xchg_ctx *axchg = NULL; + char *failwhy = NULL; + uint32_t oxid, sid, did, fctl, size; + int ret = 1; + + d_buf = piocb->cmd_dmabuf; + + nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + fc_hdr = nvmebuf->hbuf.virt; + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + sid = sli4_sid_from_fc_hdr(fc_hdr); + did = sli4_did_from_fc_hdr(fc_hdr); + fctl = (fc_hdr->fh_f_ctl[0] << 16 | + fc_hdr->fh_f_ctl[1] << 8 | + fc_hdr->fh_f_ctl[2]); + size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); + + lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n", + oxid, size, sid); + + if (phba->pport->load_flag & FC_UNLOADING) { + failwhy = "Driver Unloading"; + } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { + failwhy = "NVME FC4 Disabled"; + } else if (!phba->nvmet_support && !phba->pport->localport) { + failwhy = "No Localport"; + } else if (phba->nvmet_support && !phba->targetport) { + failwhy = "No Targetport"; + } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) { + failwhy = "Bad NVME LS R_CTL"; + } else if (unlikely((fctl & 0x00FF0000) != + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) { + failwhy = "Bad NVME LS F_CTL"; + } else { + axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC); + if (!axchg) + failwhy = "No CTX memory"; + } + + if (unlikely(failwhy)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6154 Drop NVME LS: SID %06X OXID x%X: %s\n", + sid, oxid, failwhy); + goto out_fail; + } + + /* validate the source of the LS is logged in */ + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6216 NVME Unsol rcv: No ndlp: " + "NPort_ID x%x oxid x%x\n", + sid, oxid); + goto out_fail; + } + + axchg->phba = phba; + axchg->ndlp = ndlp; + axchg->size = size; + axchg->oxid = oxid; + axchg->sid = sid; + axchg->wqeq = NULL; + axchg->state = LPFC_NVME_STE_LS_RCV; + axchg->entry_cnt = 1; + axchg->rqb_buffer = (void *)nvmebuf; + axchg->hdwq = &phba->sli4_hba.hdwq[0]; + axchg->payload = nvmebuf->dbuf.virt; + INIT_LIST_HEAD(&axchg->list); + + if (phba->nvmet_support) { + ret = lpfc_nvmet_handle_lsreq(phba, axchg); + spin_lock_irq(&ndlp->lock); + if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) { + ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH; + spin_unlock_irq(&ndlp->lock); + + /* This reference is a single occurrence to hold the + * node valid until the nvmet transport calls + * host_release. + */ + if (!lpfc_nlp_get(ndlp)) + goto out_fail; + + lpfc_printf_log(phba, KERN_ERR, LOG_NODE, + "6206 NVMET unsol ls_req ndlp x%px " + "DID x%x xflags x%x refcnt %d\n", + ndlp, ndlp->nlp_DID, + ndlp->fc4_xpt_flags, + kref_read(&ndlp->kref)); + } else { + spin_unlock_irq(&ndlp->lock); + } + } else { + ret = lpfc_nvme_handle_lsreq(phba, axchg); + } + + /* if zero, LS was successfully handled. If non-zero, LS not handled */ + if (!ret) + return; + +out_fail: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X " + "NVMe%s handler failed %d\n", + did, sid, oxid, + (phba->nvmet_support) ? "T" : "I", ret); + + /* recycle receive buffer */ + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + + /* If start of new exchange, abort it */ + if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX))) + ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid); + + if (ret) + kfree(axchg); +} + +/** + * lpfc_complete_unsol_iocb - Complete an unsolicited sequence + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @saveq: Pointer to the iocbq struct representing the sequence starting frame. + * @fch_r_ctl: the r_ctl for the first frame of the sequence. + * @fch_type: the type for the first frame of the sequence. + * + * This function is called with no lock held. This function uses the r_ctl and + * type of the received sequence to find the correct callback function to call + * to process the sequence. + **/ +static int +lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, + uint32_t fch_type) +{ + int i; + + switch (fch_type) { + case FC_TYPE_NVME: + lpfc_nvme_unsol_ls_handler(phba, saveq); + return 1; + default: + break; + } + + /* unSolicited Responses */ + if (pring->prt[0].profile) { + if (pring->prt[0].lpfc_sli_rcv_unsol_event) + (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, + saveq); + return 1; + } + /* We must search, based on rctl / type + for the right routine */ + for (i = 0; i < pring->num_mask; i++) { + if ((pring->prt[i].rctl == fch_r_ctl) && + (pring->prt[i].type == fch_type)) { + if (pring->prt[i].lpfc_sli_rcv_unsol_event) + (pring->prt[i].lpfc_sli_rcv_unsol_event) + (phba, pring, saveq); + return 1; + } + } + return 0; +} + +static void +lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba, + struct lpfc_iocbq *saveq) +{ + IOCB_t *irsp; + union lpfc_wqe128 *wqe; + u16 i = 0; + + irsp = &saveq->iocb; + wqe = &saveq->wqe; + + /* Fill wcqe with the IOCB status fields */ + bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus); + saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount; + saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4]; + saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len; + + /* Source ID */ + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo); + + /* rx-id of the response frame */ + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext); + + /* ox-id of the frame */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + irsp->unsli3.rcvsli3.ox_id); + + /* DID */ + bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, + irsp->un.rcvels.remoteID); + + /* unsol data len */ + for (i = 0; i < irsp->ulpBdeCount; i++) { + struct lpfc_hbq_entry *hbqe = NULL; + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + if (i == 0) { + hbqe = (struct lpfc_hbq_entry *) + &irsp->un.ulpWord[0]; + saveq->wqe.gen_req.bde.tus.f.bdeSize = + hbqe->bde.tus.f.bdeSize; + } else if (i == 1) { + hbqe = (struct lpfc_hbq_entry *) + &irsp->unsli3.sli3Words[4]; + saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize; + } + } + } +} + +/** + * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @saveq: Pointer to the unsolicited iocb. + * + * This function is called with no lock held by the ring event handler + * when there is an unsolicited iocb posted to the response ring by the + * firmware. This function gets the buffer associated with the iocbs + * and calls the event handler for the ring. This function handles both + * qring buffers and hbq buffers. + * When the function returns 1 the caller can free the iocb object otherwise + * upper layer functions will free the iocb objects. + **/ +static int +lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *saveq) +{ + IOCB_t * irsp; + WORD5 * w5p; + dma_addr_t paddr; + uint32_t Rctl, Type; + struct lpfc_iocbq *iocbq; + struct lpfc_dmabuf *dmzbuf; + + irsp = &saveq->iocb; + saveq->vport = phba->pport; + + if (irsp->ulpCommand == CMD_ASYNC_STATUS) { + if (pring->lpfc_sli_rcv_async_status) + pring->lpfc_sli_rcv_async_status(phba, pring, saveq); + else + lpfc_printf_log(phba, + KERN_WARNING, + LOG_SLI, + "0316 Ring %d handler: unexpected " + "ASYNC_STATUS iocb received evt_code " + "0x%x\n", + pring->ringno, + irsp->un.asyncstat.evt_code); + return 1; + } + + if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && + (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { + if (irsp->ulpBdeCount > 0) { + dmzbuf = lpfc_sli_get_buff(phba, pring, + irsp->un.ulpWord[3]); + lpfc_in_buf_free(phba, dmzbuf); + } + + if (irsp->ulpBdeCount > 1) { + dmzbuf = lpfc_sli_get_buff(phba, pring, + irsp->unsli3.sli3Words[3]); + lpfc_in_buf_free(phba, dmzbuf); + } + + if (irsp->ulpBdeCount > 2) { + dmzbuf = lpfc_sli_get_buff(phba, pring, + irsp->unsli3.sli3Words[7]); + lpfc_in_buf_free(phba, dmzbuf); + } + + return 1; + } + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + if (irsp->ulpBdeCount != 0) { + saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring, + irsp->un.ulpWord[3]); + if (!saveq->cmd_dmabuf) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0341 Ring %d Cannot find buffer for " + "an unsolicited iocb. tag 0x%x\n", + pring->ringno, + irsp->un.ulpWord[3]); + } + if (irsp->ulpBdeCount == 2) { + saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring, + irsp->unsli3.sli3Words[7]); + if (!saveq->bpl_dmabuf) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0342 Ring %d Cannot find buffer for an" + " unsolicited iocb. tag 0x%x\n", + pring->ringno, + irsp->unsli3.sli3Words[7]); + } + list_for_each_entry(iocbq, &saveq->list, list) { + irsp = &iocbq->iocb; + if (irsp->ulpBdeCount != 0) { + iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba, + pring, + irsp->un.ulpWord[3]); + if (!iocbq->cmd_dmabuf) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0343 Ring %d Cannot find " + "buffer for an unsolicited iocb" + ". tag 0x%x\n", pring->ringno, + irsp->un.ulpWord[3]); + } + if (irsp->ulpBdeCount == 2) { + iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba, + pring, + irsp->unsli3.sli3Words[7]); + if (!iocbq->bpl_dmabuf) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0344 Ring %d Cannot find " + "buffer for an unsolicited " + "iocb. tag 0x%x\n", + pring->ringno, + irsp->unsli3.sli3Words[7]); + } + } + } else { + paddr = getPaddr(irsp->un.cont64[0].addrHigh, + irsp->un.cont64[0].addrLow); + saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, + paddr); + if (irsp->ulpBdeCount == 2) { + paddr = getPaddr(irsp->un.cont64[1].addrHigh, + irsp->un.cont64[1].addrLow); + saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, + pring, + paddr); + } + } + + if (irsp->ulpBdeCount != 0 && + (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || + irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { + int found = 0; + + /* search continue save q for same XRI */ + list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { + if (iocbq->iocb.unsli3.rcvsli3.ox_id == + saveq->iocb.unsli3.rcvsli3.ox_id) { + list_add_tail(&saveq->list, &iocbq->list); + found = 1; + break; + } + } + if (!found) + list_add_tail(&saveq->clist, + &pring->iocb_continue_saveq); + + if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { + list_del_init(&iocbq->clist); + saveq = iocbq; + irsp = &saveq->iocb; + } else { + return 0; + } + } + if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || + (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || + (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { + Rctl = FC_RCTL_ELS_REQ; + Type = FC_TYPE_ELS; + } else { + w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); + Rctl = w5p->hcsw.Rctl; + Type = w5p->hcsw.Type; + + /* Firmware Workaround */ + if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && + (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || + irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { + Rctl = FC_RCTL_ELS_REQ; + Type = FC_TYPE_ELS; + w5p->hcsw.Rctl = Rctl; + w5p->hcsw.Type = Type; + } + } + + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX || + irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { + if (irsp->unsli3.rcvsli3.vpi == 0xffff) + saveq->vport = phba->pport; + else + saveq->vport = lpfc_find_vport_by_vpid(phba, + irsp->unsli3.rcvsli3.vpi); + } + + /* Prepare WQE with Unsol frame */ + lpfc_sli_prep_unsol_wqe(phba, saveq); + + if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0313 Ring %d handler: unexpected Rctl x%x " + "Type x%x received\n", + pring->ringno, Rctl, Type); + + return 1; +} + +/** + * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @prspiocb: Pointer to response iocb object. + * + * This function looks up the iocb_lookup table to get the command iocb + * corresponding to the given response iocb using the iotag of the + * response iocb. The driver calls this function with the hbalock held + * for SLI3 ports or the ring lock held for SLI4 ports. + * This function returns the command iocb object if it finds the command + * iocb else returns NULL. + **/ +static struct lpfc_iocbq * +lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *prspiocb) +{ + struct lpfc_iocbq *cmd_iocb = NULL; + u16 iotag; + + if (phba->sli_rev == LPFC_SLI_REV4) + iotag = get_wqe_reqtag(prspiocb); + else + iotag = prspiocb->iocb.ulpIoTag; + + if (iotag != 0 && iotag <= phba->sli.last_iotag) { + cmd_iocb = phba->sli.iocbq_lookup[iotag]; + if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { + /* remove from txcmpl queue list */ + list_del_init(&cmd_iocb->list); + cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + pring->txcmplq_cnt--; + return cmd_iocb; + } + } + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0317 iotag x%x is out of " + "range: max iotag x%x\n", + iotag, phba->sli.last_iotag); + return NULL; +} + +/** + * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @iotag: IOCB tag. + * + * This function looks up the iocb_lookup table to get the command iocb + * corresponding to the given iotag. The driver calls this function with + * the ring lock held because this function is an SLI4 port only helper. + * This function returns the command iocb object if it finds the command + * iocb else returns NULL. + **/ +static struct lpfc_iocbq * +lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint16_t iotag) +{ + struct lpfc_iocbq *cmd_iocb = NULL; + + if (iotag != 0 && iotag <= phba->sli.last_iotag) { + cmd_iocb = phba->sli.iocbq_lookup[iotag]; + if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { + /* remove from txcmpl queue list */ + list_del_init(&cmd_iocb->list); + cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + pring->txcmplq_cnt--; + return cmd_iocb; + } + } + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0372 iotag x%x lookup error: max iotag (x%x) " + "cmd_flag x%x\n", + iotag, phba->sli.last_iotag, + cmd_iocb ? cmd_iocb->cmd_flag : 0xffff); + return NULL; +} + +/** + * lpfc_sli_process_sol_iocb - process solicited iocb completion + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @saveq: Pointer to the response iocb to be processed. + * + * This function is called by the ring event handler for non-fcp + * rings when there is a new response iocb in the response ring. + * The caller is not required to hold any locks. This function + * gets the command iocb associated with the response iocb and + * calls the completion handler for the command iocb. If there + * is no completion handler, the function will free the resources + * associated with command iocb. If the response iocb is for + * an already aborted command iocb, the status of the completion + * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. + * This function always returns 1. + **/ +static int +lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *saveq) +{ + struct lpfc_iocbq *cmdiocbp; + unsigned long iflag; + u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; + + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock_irqsave(&pring->ring_lock, iflag); + else + spin_lock_irqsave(&phba->hbalock, iflag); + cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock_irqrestore(&pring->ring_lock, iflag); + else + spin_unlock_irqrestore(&phba->hbalock, iflag); + + ulp_command = get_job_cmnd(phba, saveq); + ulp_status = get_job_ulpstatus(phba, saveq); + ulp_word4 = get_job_word4(phba, saveq); + ulp_context = get_job_ulpcontext(phba, saveq); + if (phba->sli_rev == LPFC_SLI_REV4) + iotag = get_wqe_reqtag(saveq); + else + iotag = saveq->iocb.ulpIoTag; + + if (cmdiocbp) { + ulp_command = get_job_cmnd(phba, cmdiocbp); + if (cmdiocbp->cmd_cmpl) { + /* + * If an ELS command failed send an event to mgmt + * application. + */ + if (ulp_status && + (pring->ringno == LPFC_ELS_RING) && + (ulp_command == CMD_ELS_REQUEST64_CR)) + lpfc_send_els_failure_event(phba, + cmdiocbp, saveq); + + /* + * Post all ELS completions to the worker thread. + * All other are passed to the completion callback. + */ + if (pring->ringno == LPFC_ELS_RING) { + if ((phba->sli_rev < LPFC_SLI_REV4) && + (cmdiocbp->cmd_flag & + LPFC_DRIVER_ABORTED)) { + spin_lock_irqsave(&phba->hbalock, + iflag); + cmdiocbp->cmd_flag &= + ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, + iflag); + saveq->iocb.ulpStatus = + IOSTAT_LOCAL_REJECT; + saveq->iocb.un.ulpWord[4] = + IOERR_SLI_ABORTED; + + /* Firmware could still be in progress + * of DMAing payload, so don't free data + * buffer till after a hbeat. + */ + spin_lock_irqsave(&phba->hbalock, + iflag); + saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; + spin_unlock_irqrestore(&phba->hbalock, + iflag); + } + if (phba->sli_rev == LPFC_SLI_REV4) { + if (saveq->cmd_flag & + LPFC_EXCHANGE_BUSY) { + /* Set cmdiocb flag for the + * exchange busy so sgl (xri) + * will not be released until + * the abort xri is received + * from hba. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->cmd_flag |= + LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } + if (cmdiocbp->cmd_flag & + LPFC_DRIVER_ABORTED) { + /* + * Clear LPFC_DRIVER_ABORTED + * bit in case it was driver + * initiated abort. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->cmd_flag &= + ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + set_job_ulpstatus(cmdiocbp, + IOSTAT_LOCAL_REJECT); + set_job_ulpword4(cmdiocbp, + IOERR_ABORT_REQUESTED); + /* + * For SLI4, irspiocb contains + * NO_XRI in sli_xritag, it + * shall not affect releasing + * sgl (xri) process. + */ + set_job_ulpstatus(saveq, + IOSTAT_LOCAL_REJECT); + set_job_ulpword4(saveq, + IOERR_SLI_ABORTED); + spin_lock_irqsave( + &phba->hbalock, iflag); + saveq->cmd_flag |= + LPFC_DELAY_MEM_FREE; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } + } + } + cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); + } else + lpfc_sli_release_iocbq(phba, cmdiocbp); + } else { + /* + * Unknown initiating command based on the response iotag. + * This could be the case on the ELS ring because of + * lpfc_els_abort(). + */ + if (pring->ringno != LPFC_ELS_RING) { + /* + * Ring handler: unexpected completion IoTag + * + */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0322 Ring %d handler: " + "unexpected completion IoTag x%x " + "Data: x%x x%x x%x x%x\n", + pring->ringno, iotag, ulp_status, + ulp_word4, ulp_command, ulp_context); + } + } + + return 1; +} + +/** + * lpfc_sli_rsp_pointers_error - Response ring pointer error handler + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function is called from the iocb ring event handlers when + * put pointer is ahead of the get pointer for a ring. This function signal + * an error attention condition to the worker thread and the worker + * thread will transition the HBA to offline state. + **/ +static void +lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; + /* + * Ring handler: portRspPut is bigger than + * rsp ring + */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0312 Ring %d handler: portRspPut %d " + "is bigger than rsp ring %d\n", + pring->ringno, le32_to_cpu(pgp->rspPutInx), + pring->sli.sli3.numRiocb); + + phba->link_state = LPFC_HBA_ERROR; + + /* + * All error attention handlers are posted to + * worker thread + */ + phba->work_ha |= HA_ERATT; + phba->work_hs = HS_FFER3; + + lpfc_worker_wake_up(phba); + + return; +} + +/** + * lpfc_poll_eratt - Error attention polling timer timeout handler + * @t: Context to fetch pointer to address of HBA context object from. + * + * This function is invoked by the Error Attention polling timer when the + * timer times out. It will check the SLI Error Attention register for + * possible attention events. If so, it will post an Error Attention event + * and wake up worker thread to process it. Otherwise, it will set up the + * Error Attention polling timer for the next poll. + **/ +void lpfc_poll_eratt(struct timer_list *t) +{ + struct lpfc_hba *phba; + uint32_t eratt = 0; + uint64_t sli_intr, cnt; + + phba = from_timer(phba, t, eratt_poll); + if (!(phba->hba_flag & HBA_SETUP)) + return; + + /* Here we will also keep track of interrupts per sec of the hba */ + sli_intr = phba->sli.slistat.sli_intr; + + if (phba->sli.slistat.sli_prev_intr > sli_intr) + cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + + sli_intr); + else + cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); + + /* 64-bit integer division not supported on 32-bit x86 - use do_div */ + do_div(cnt, phba->eratt_poll_interval); + phba->sli.slistat.sli_ips = cnt; + + phba->sli.slistat.sli_prev_intr = sli_intr; + + /* Check chip HA register for error event */ + eratt = lpfc_sli_check_eratt(phba); + + if (eratt) + /* Tell the worker thread there is work to do */ + lpfc_worker_wake_up(phba); + else + /* Restart the timer for next eratt poll */ + mod_timer(&phba->eratt_poll, + jiffies + + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + return; +} + + +/** + * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This function is called from the interrupt context when there is a ring + * event for the fcp ring. The caller does not hold any lock. + * The function processes each response iocb in the response ring until it + * finds an iocb with LE bit set and chains all the iocbs up to the iocb with + * LE bit set. The function will call the completion handler of the command iocb + * if the response iocb indicates a completion for a command iocb or it is + * an abort completion. The function will call lpfc_sli_process_unsol_iocb + * function if this is an unsolicited iocb. + * This routine presumes LPFC_FCP_RING handling and doesn't bother + * to check it explicitly. + */ +int +lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) +{ + struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; + IOCB_t *irsp = NULL; + IOCB_t *entry = NULL; + struct lpfc_iocbq *cmdiocbq = NULL; + struct lpfc_iocbq rspiocbq; + uint32_t status; + uint32_t portRspPut, portRspMax; + int rc = 1; + lpfc_iocb_type type; + unsigned long iflag; + uint32_t rsp_cmpl = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + pring->stats.iocb_event++; + + /* + * The next available response entry should never exceed the maximum + * entries. If it does, treat it as an adapter hardware error. + */ + portRspMax = pring->sli.sli3.numRiocb; + portRspPut = le32_to_cpu(pgp->rspPutInx); + if (unlikely(portRspPut >= portRspMax)) { + lpfc_sli_rsp_pointers_error(phba, pring); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return 1; + } + if (phba->fcp_ring_in_use) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return 1; + } else + phba->fcp_ring_in_use = 1; + + rmb(); + while (pring->sli.sli3.rspidx != portRspPut) { + /* + * Fetch an entry off the ring and copy it into a local data + * structure. The copy involves a byte-swap since the + * network byte order and pci byte orders are different. + */ + entry = lpfc_resp_iocb(phba, pring); + phba->last_completion_time = jiffies; + + if (++pring->sli.sli3.rspidx >= portRspMax) + pring->sli.sli3.rspidx = 0; + + lpfc_sli_pcimem_bcopy((uint32_t *) entry, + (uint32_t *) &rspiocbq.iocb, + phba->iocb_rsp_size); + INIT_LIST_HEAD(&(rspiocbq.list)); + irsp = &rspiocbq.iocb; + + type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); + pring->stats.iocb_rsp++; + rsp_cmpl++; + + if (unlikely(irsp->ulpStatus)) { + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + phba->lpfc_rampdown_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } + + /* Rsp ring error: IOCB */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0336 Rsp Ring %d error: IOCB Data: " + "x%x x%x x%x x%x x%x x%x x%x x%x\n", + pring->ringno, + irsp->un.ulpWord[0], + irsp->un.ulpWord[1], + irsp->un.ulpWord[2], + irsp->un.ulpWord[3], + irsp->un.ulpWord[4], + irsp->un.ulpWord[5], + *(uint32_t *)&irsp->un1, + *((uint32_t *)&irsp->un1 + 1)); + } + + switch (type) { + case LPFC_ABORT_IOCB: + case LPFC_SOL_IOCB: + /* + * Idle exchange closed via ABTS from port. No iocb + * resources need to be recovered. + */ + if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0333 IOCB cmd 0x%x" + " processed. Skipping" + " completion\n", + irsp->ulpCommand); + break; + } + + cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, + &rspiocbq); + if (unlikely(!cmdiocbq)) + break; + if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) + cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; + if (cmdiocbq->cmd_cmpl) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); + spin_lock_irqsave(&phba->hbalock, iflag); + } + break; + case LPFC_UNSOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); + spin_lock_irqsave(&phba->hbalock, iflag); + break; + default: + if (irsp->ulpCommand == CMD_ADAPTER_MSG) { + char adaptermsg[LPFC_MAX_ADPTMSG]; + memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); + memcpy(&adaptermsg[0], (uint8_t *) irsp, + MAX_MSG_DATA); + dev_warn(&((phba->pcidev)->dev), + "lpfc%d: %s\n", + phba->brd_no, adaptermsg); + } else { + /* Unknown IOCB command */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0334 Unknown IOCB command " + "Data: x%x, x%x x%x x%x x%x\n", + type, irsp->ulpCommand, + irsp->ulpStatus, + irsp->ulpIoTag, + irsp->ulpContext); + } + break; + } + + /* + * The response IOCB has been processed. Update the ring + * pointer in SLIM. If the port response put pointer has not + * been updated, sync the pgp->rspPutInx and fetch the new port + * response put pointer. + */ + writel(pring->sli.sli3.rspidx, + &phba->host_gp[pring->ringno].rspGetInx); + + if (pring->sli.sli3.rspidx == portRspPut) + portRspPut = le32_to_cpu(pgp->rspPutInx); + } + + if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { + pring->stats.iocb_rsp_full++; + status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); + writel(status, phba->CAregaddr); + readl(phba->CAregaddr); + } + if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { + pring->flag &= ~LPFC_CALL_RING_AVAILABLE; + pring->stats.iocb_cmd_empty++; + + /* Force update of the local copy of cmdGetInx */ + pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); + lpfc_sli_resume_iocb(phba, pring); + + if ((pring->lpfc_sli_cmd_available)) + (pring->lpfc_sli_cmd_available) (phba, pring); + + } + + phba->fcp_ring_in_use = 0; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rc; +} + +/** + * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @rspiocbp: Pointer to driver response IOCB object. + * + * This function is called from the worker thread when there is a slow-path + * response IOCB to process. This function chains all the response iocbs until + * seeing the iocb with the LE bit set. The function will call + * lpfc_sli_process_sol_iocb function if the response iocb indicates a + * completion of a command iocb. The function will call the + * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. + * The function frees the resources or calls the completion handler if this + * iocb is an abort completion. The function returns NULL when the response + * iocb has the LE bit set and all the chained iocbs are processed, otherwise + * this function shall chain the iocb on to the iocb_continueq and return the + * response iocb passed in. + **/ +static struct lpfc_iocbq * +lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *rspiocbp) +{ + struct lpfc_iocbq *saveq; + struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *next_iocb; + IOCB_t *irsp; + uint32_t free_saveq; + u8 cmd_type; + lpfc_iocb_type type; + unsigned long iflag; + u32 ulp_status = get_job_ulpstatus(phba, rspiocbp); + u32 ulp_word4 = get_job_word4(phba, rspiocbp); + u32 ulp_command = get_job_cmnd(phba, rspiocbp); + int rc; + + spin_lock_irqsave(&phba->hbalock, iflag); + /* First add the response iocb to the countinueq list */ + list_add_tail(&rspiocbp->list, &pring->iocb_continueq); + pring->iocb_continueq_cnt++; + + /* + * By default, the driver expects to free all resources + * associated with this iocb completion. + */ + free_saveq = 1; + saveq = list_get_first(&pring->iocb_continueq, + struct lpfc_iocbq, list); + list_del_init(&pring->iocb_continueq); + pring->iocb_continueq_cnt = 0; + + pring->stats.iocb_rsp++; + + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if (ulp_status == IOSTAT_LOCAL_REJECT && + ((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + phba->lpfc_rampdown_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } + + if (ulp_status) { + /* Rsp ring error: IOCB */ + if (phba->sli_rev < LPFC_SLI_REV4) { + irsp = &rspiocbp->iocb; + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0328 Rsp Ring %d error: ulp_status x%x " + "IOCB Data: " + "x%08x x%08x x%08x x%08x " + "x%08x x%08x x%08x x%08x " + "x%08x x%08x x%08x x%08x " + "x%08x x%08x x%08x x%08x\n", + pring->ringno, ulp_status, + get_job_ulpword(rspiocbp, 0), + get_job_ulpword(rspiocbp, 1), + get_job_ulpword(rspiocbp, 2), + get_job_ulpword(rspiocbp, 3), + get_job_ulpword(rspiocbp, 4), + get_job_ulpword(rspiocbp, 5), + *(((uint32_t *)irsp) + 6), + *(((uint32_t *)irsp) + 7), + *(((uint32_t *)irsp) + 8), + *(((uint32_t *)irsp) + 9), + *(((uint32_t *)irsp) + 10), + *(((uint32_t *)irsp) + 11), + *(((uint32_t *)irsp) + 12), + *(((uint32_t *)irsp) + 13), + *(((uint32_t *)irsp) + 14), + *(((uint32_t *)irsp) + 15)); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0321 Rsp Ring %d error: " + "IOCB Data: " + "x%x x%x x%x x%x\n", + pring->ringno, + rspiocbp->wcqe_cmpl.word0, + rspiocbp->wcqe_cmpl.total_data_placed, + rspiocbp->wcqe_cmpl.parameter, + rspiocbp->wcqe_cmpl.word3); + } + } + + + /* + * Fetch the iocb command type and call the correct completion + * routine. Solicited and Unsolicited IOCBs on the ELS ring + * get freed back to the lpfc_iocb_list by the discovery + * kernel thread. + */ + cmd_type = ulp_command & CMD_IOCB_MASK; + type = lpfc_sli_iocb_cmd_type(cmd_type); + switch (type) { + case LPFC_SOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + break; + case LPFC_UNSOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + if (!rc) + free_saveq = 0; + break; + case LPFC_ABORT_IOCB: + cmdiocb = NULL; + if (ulp_command != CMD_XRI_ABORTED_CX) + cmdiocb = lpfc_sli_iocbq_lookup(phba, pring, + saveq); + if (cmdiocb) { + /* Call the specified completion routine */ + if (cmdiocb->cmd_cmpl) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + cmdiocb->cmd_cmpl(phba, cmdiocb, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + } else { + __lpfc_sli_release_iocbq(phba, cmdiocb); + } + } + break; + case LPFC_UNKNOWN_IOCB: + if (ulp_command == CMD_ADAPTER_MSG) { + char adaptermsg[LPFC_MAX_ADPTMSG]; + + memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); + memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe, + MAX_MSG_DATA); + dev_warn(&((phba->pcidev)->dev), + "lpfc%d: %s\n", + phba->brd_no, adaptermsg); + } else { + /* Unknown command */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0335 Unknown IOCB " + "command Data: x%x " + "x%x x%x x%x\n", + ulp_command, + ulp_status, + get_wqe_reqtag(rspiocbp), + get_job_ulpcontext(phba, rspiocbp)); + } + break; + } + + if (free_saveq) { + list_for_each_entry_safe(rspiocbp, next_iocb, + &saveq->list, list) { + list_del_init(&rspiocbp->list); + __lpfc_sli_release_iocbq(phba, rspiocbp); + } + __lpfc_sli_release_iocbq(phba, saveq); + } + rspiocbp = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rspiocbp; +} + +/** + * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This routine wraps the actual slow_ring event process routine from the + * API jump table function pointer from the lpfc_hba struct. + **/ +void +lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) +{ + phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); +} + +/** + * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This function is called from the worker thread when there is a ring event + * for non-fcp rings. The caller does not hold any lock. The function will + * remove each response iocb in the response ring and calls the handle + * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. + **/ +static void +lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) +{ + struct lpfc_pgp *pgp; + IOCB_t *entry; + IOCB_t *irsp = NULL; + struct lpfc_iocbq *rspiocbp = NULL; + uint32_t portRspPut, portRspMax; + unsigned long iflag; + uint32_t status; + + pgp = &phba->port_gp[pring->ringno]; + spin_lock_irqsave(&phba->hbalock, iflag); + pring->stats.iocb_event++; + + /* + * The next available response entry should never exceed the maximum + * entries. If it does, treat it as an adapter hardware error. + */ + portRspMax = pring->sli.sli3.numRiocb; + portRspPut = le32_to_cpu(pgp->rspPutInx); + if (portRspPut >= portRspMax) { + /* + * Ring handler: portRspPut is bigger than + * rsp ring + */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0303 Ring %d handler: portRspPut %d " + "is bigger than rsp ring %d\n", + pring->ringno, portRspPut, portRspMax); + + phba->link_state = LPFC_HBA_ERROR; + spin_unlock_irqrestore(&phba->hbalock, iflag); + + phba->work_hs = HS_FFER3; + lpfc_handle_eratt(phba); + + return; + } + + rmb(); + while (pring->sli.sli3.rspidx != portRspPut) { + /* + * Build a completion list and call the appropriate handler. + * The process is to get the next available response iocb, get + * a free iocb from the list, copy the response data into the + * free iocb, insert to the continuation list, and update the + * next response index to slim. This process makes response + * iocb's in the ring available to DMA as fast as possible but + * pays a penalty for a copy operation. Since the iocb is + * only 32 bytes, this penalty is considered small relative to + * the PCI reads for register values and a slim write. When + * the ulpLe field is set, the entire Command has been + * received. + */ + entry = lpfc_resp_iocb(phba, pring); + + phba->last_completion_time = jiffies; + rspiocbp = __lpfc_sli_get_iocbq(phba); + if (rspiocbp == NULL) { + printk(KERN_ERR "%s: out of buffers! Failing " + "completion.\n", __func__); + break; + } + + lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, + phba->iocb_rsp_size); + irsp = &rspiocbp->iocb; + + if (++pring->sli.sli3.rspidx >= portRspMax) + pring->sli.sli3.rspidx = 0; + + if (pring->ringno == LPFC_ELS_RING) { + lpfc_debugfs_slow_ring_trc(phba, + "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", + *(((uint32_t *) irsp) + 4), + *(((uint32_t *) irsp) + 6), + *(((uint32_t *) irsp) + 7)); + } + + writel(pring->sli.sli3.rspidx, + &phba->host_gp[pring->ringno].rspGetInx); + + spin_unlock_irqrestore(&phba->hbalock, iflag); + /* Handle the response IOCB */ + rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); + spin_lock_irqsave(&phba->hbalock, iflag); + + /* + * If the port response put pointer has not been updated, sync + * the pgp->rspPutInx in the MAILBOX_tand fetch the new port + * response put pointer. + */ + if (pring->sli.sli3.rspidx == portRspPut) { + portRspPut = le32_to_cpu(pgp->rspPutInx); + } + } /* while (pring->sli.sli3.rspidx != portRspPut) */ + + if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { + /* At least one response entry has been freed */ + pring->stats.iocb_rsp_full++; + /* SET RxRE_RSP in Chip Att register */ + status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); + writel(status, phba->CAregaddr); + readl(phba->CAregaddr); /* flush */ + } + if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { + pring->flag &= ~LPFC_CALL_RING_AVAILABLE; + pring->stats.iocb_cmd_empty++; + + /* Force update of the local copy of cmdGetInx */ + pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); + lpfc_sli_resume_iocb(phba, pring); + + if ((pring->lpfc_sli_cmd_available)) + (pring->lpfc_sli_cmd_available) (phba, pring); + + } + + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; +} + +/** + * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This function is called from the worker thread when there is a pending + * ELS response iocb on the driver internal slow-path response iocb worker + * queue. The caller does not hold any lock. The function will remove each + * response iocb from the response worker queue and calls the handle + * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. + **/ +static void +lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) +{ + struct lpfc_iocbq *irspiocbq; + struct hbq_dmabuf *dmabuf; + struct lpfc_cq_event *cq_event; + unsigned long iflag; + int count = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + phba->hba_flag &= ~HBA_SP_QUEUE_EVT; + spin_unlock_irqrestore(&phba->hbalock, iflag); + while (!list_empty(&phba->sli4_hba.sp_queue_event)) { + /* Get the response iocb from the head of work queue */ + spin_lock_irqsave(&phba->hbalock, iflag); + list_remove_head(&phba->sli4_hba.sp_queue_event, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + + switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { + case CQE_CODE_COMPL_WQE: + irspiocbq = container_of(cq_event, struct lpfc_iocbq, + cq_event); + /* Translate ELS WCQE to response IOCBQ */ + irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba, + irspiocbq); + if (irspiocbq) + lpfc_sli_sp_handle_rspiocb(phba, pring, + irspiocbq); + count++; + break; + case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: + dmabuf = container_of(cq_event, struct hbq_dmabuf, + cq_event); + lpfc_sli4_handle_received_buffer(phba, dmabuf); + count++; + break; + default: + break; + } + + /* Limit the number of events to 64 to avoid soft lockups */ + if (count == 64) + break; + } +} + +/** + * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in the given ring and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + LIST_HEAD(tx_completions); + LIST_HEAD(txcmplq_completions); + struct lpfc_iocbq *iocb, *next_iocb; + int offline; + + if (pring->ringno == LPFC_ELS_RING) { + lpfc_fabric_abort_hba(phba); + } + offline = pci_channel_offline(phba->pcidev); + + /* Error everything on txq and txcmplq + * First do the txq. + */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txq, &tx_completions); + pring->txq_cnt = 0; + + if (offline) { + list_splice_init(&pring->txcmplq, + &txcmplq_completions); + } else { + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, + iocb, NULL); + } + spin_unlock_irq(&pring->ring_lock); + } else { + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->txq, &tx_completions); + pring->txq_cnt = 0; + + if (offline) { + list_splice_init(&pring->txcmplq, &txcmplq_completions); + } else { + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, + iocb, NULL); + } + spin_unlock_irq(&phba->hbalock); + } + + if (offline) { + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &txcmplq_completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); + } else { + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + } + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); +} + +/** + * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings + * @phba: Pointer to HBA context object. + * + * This function aborts all iocbs in FCP rings and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + uint32_t i; + + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].io_wq->pring; + lpfc_sli_abort_iocb_ring(phba, pring); + } + } else { + pring = &psli->sli3_ring[LPFC_FCP_RING]; + lpfc_sli_abort_iocb_ring(phba, pring); + } +} + +/** + * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring + * @phba: Pointer to HBA context object. + * + * This function flushes all iocbs in the IO ring and frees all the iocb + * objects in txq and txcmplq. This function will not issue abort iocbs + * for all the iocb commands in txcmplq, they will just be returned with + * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI + * slot has been permanently disabled. + **/ +void +lpfc_sli_flush_io_rings(struct lpfc_hba *phba) +{ + LIST_HEAD(txq); + LIST_HEAD(txcmplq); + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + uint32_t i; + struct lpfc_iocbq *piocb, *next_iocb; + + spin_lock_irq(&phba->hbalock); + /* Indicate the I/O queues are flushed */ + phba->hba_flag |= HBA_IOQ_FLUSH; + spin_unlock_irq(&phba->hbalock); + + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].io_wq->pring; + + spin_lock_irq(&pring->ring_lock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + list_for_each_entry_safe(piocb, next_iocb, + &pring->txcmplq, list) + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmplq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + if (unlikely(pci_channel_offline(phba->pcidev))) + lpfc_sli4_io_xri_aborted(phba, NULL, 0); + } + } else { + pring = &psli->sli3_ring[LPFC_FCP_RING]; + + spin_lock_irq(&phba->hbalock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + list_for_each_entry_safe(piocb, next_iocb, + &pring->txcmplq, list) + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmpq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } +} + +/** + * lpfc_sli_brdready_s3 - Check for sli3 host ready status + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This function reads the host status register and compares + * with the provided bit mask to check if HBA completed + * the restart. This function will wait in a loop for the + * HBA to complete restart. If the HBA does not restart within + * 15 iterations, the function will reset the HBA again. The + * function returns 1 when HBA fail to restart otherwise returns + * zero. + **/ +static int +lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) +{ + uint32_t status; + int i = 0; + int retval = 0; + + /* Read the HBA Host Status Register */ + if (lpfc_readl(phba->HSregaddr, &status)) + return 1; + + phba->hba_flag |= HBA_NEEDS_CFG_PORT; + + /* + * Check status register every 100ms for 5 retries, then every + * 500ms for 5, then every 2.5 sec for 5, then reset board and + * every 2.5 sec for 4. + * Break our of the loop if errors occurred during init. + */ + while (((status & mask) != mask) && + !(status & HS_FFERM) && + i++ < 20) { + + if (i <= 5) + msleep(10); + else if (i <= 10) + msleep(500); + else + msleep(2500); + + if (i == 15) { + /* Do post */ + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + lpfc_sli_brdrestart(phba); + } + /* Read the HBA Host Status Register */ + if (lpfc_readl(phba->HSregaddr, &status)) { + retval = 1; + break; + } + } + + /* Check to see if any errors occurred during init */ + if ((status & HS_FFERM) || (i >= 20)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2751 Adapter failed to restart, " + "status reg x%x, FW Data: A8 x%x AC x%x\n", + status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); + phba->link_state = LPFC_HBA_ERROR; + retval = 1; + } + + return retval; +} + +/** + * lpfc_sli_brdready_s4 - Check for sli4 host ready status + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This function checks the host status register to check if HBA is + * ready. This function will wait in a loop for the HBA to be ready + * If the HBA is not ready , the function will will reset the HBA PCI + * function again. The function returns 1 when HBA fail to be ready + * otherwise returns zero. + **/ +static int +lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) +{ + uint32_t status; + int retval = 0; + + /* Read the HBA Host Status Register */ + status = lpfc_sli4_post_status_check(phba); + + if (status) { + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + lpfc_sli_brdrestart(phba); + status = lpfc_sli4_post_status_check(phba); + } + + /* Check to see if any errors occurred during init */ + if (status) { + phba->link_state = LPFC_HBA_ERROR; + retval = 1; + } else + phba->sli4_hba.intr_enable = 0; + + phba->hba_flag &= ~HBA_SETUP; + return retval; +} + +/** + * lpfc_sli_brdready - Wrapper func for checking the hba readyness + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This routine wraps the actual SLI3 or SLI4 hba readyness check routine + * from the API jump table function pointer from the lpfc_hba struct. + **/ +int +lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) +{ + return phba->lpfc_sli_brdready(phba, mask); +} + +#define BARRIER_TEST_PATTERN (0xdeadbeef) + +/** + * lpfc_reset_barrier - Make HBA ready for HBA reset + * @phba: Pointer to HBA context object. + * + * This function is called before resetting an HBA. This function is called + * with hbalock held and requests HBA to quiesce DMAs before a reset. + **/ +void lpfc_reset_barrier(struct lpfc_hba *phba) +{ + uint32_t __iomem *resp_buf; + uint32_t __iomem *mbox_buf; + volatile struct MAILBOX_word0 mbox; + uint32_t hc_copy, ha_copy, resp_data; + int i; + uint8_t hdrtype; + + lockdep_assert_held(&phba->hbalock); + + pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); + if (hdrtype != 0x80 || + (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && + FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) + return; + + /* + * Tell the other part of the chip to suspend temporarily all + * its DMA activity. + */ + resp_buf = phba->MBslimaddr; + + /* Disable the error attention */ + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + return; + writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + phba->link_flag |= LS_IGNORE_ERATT; + + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (ha_copy & HA_ERATT) { + /* Clear Chip error bit */ + writel(HA_ERATT, phba->HAregaddr); + phba->pport->stopped = 1; + } + + mbox.word0 = 0; + mbox.mbxCommand = MBX_KILL_BOARD; + mbox.mbxOwner = OWN_CHIP; + + writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); + mbox_buf = phba->MBslimaddr; + writel(mbox.word0, mbox_buf); + + for (i = 0; i < 50; i++) { + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) + mdelay(1); + else + break; + } + resp_data = 0; + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) { + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || + phba->pport->stopped) + goto restore_hc; + else + goto clear_errat; + } + + mbox.mbxOwner = OWN_HOST; + resp_data = 0; + for (i = 0; i < 500; i++) { + if (lpfc_readl(resp_buf, &resp_data)) + return; + if (resp_data != mbox.word0) + mdelay(1); + else + break; + } + +clear_errat: + + while (++i < 500) { + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (!(ha_copy & HA_ERATT)) + mdelay(1); + else + break; + } + + if (readl(phba->HAregaddr) & HA_ERATT) { + writel(HA_ERATT, phba->HAregaddr); + phba->pport->stopped = 1; + } + +restore_hc: + phba->link_flag &= ~LS_IGNORE_ERATT; + writel(hc_copy, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ +} + +/** + * lpfc_sli_brdkill - Issue a kill_board mailbox command + * @phba: Pointer to HBA context object. + * + * This function issues a kill_board mailbox command and waits for + * the error attention interrupt. This function is called for stopping + * the firmware processing. The caller is not required to hold any + * locks. This function calls lpfc_hba_down_post function to free + * any pending commands after the kill. The function will return 1 when it + * fails to kill the board else will return 0. + **/ +int +lpfc_sli_brdkill(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + LPFC_MBOXQ_t *pmb; + uint32_t status; + uint32_t ha_copy; + int retval; + int i = 0; + + psli = &phba->sli; + + /* Kill HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0329 Kill HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) + return 1; + + /* Disable the error attention */ + spin_lock_irq(&phba->hbalock); + if (lpfc_readl(phba->HCregaddr, &status)) { + spin_unlock_irq(&phba->hbalock); + mempool_free(pmb, phba->mbox_mem_pool); + return 1; + } + status &= ~HC_ERINT_ENA; + writel(status, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + phba->link_flag |= LS_IGNORE_ERATT; + spin_unlock_irq(&phba->hbalock); + + lpfc_kill_board(phba, pmb); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + + if (retval != MBX_SUCCESS) { + if (retval != MBX_BUSY) + mempool_free(pmb, phba->mbox_mem_pool); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2752 KILL_BOARD command failed retval %d\n", + retval); + spin_lock_irq(&phba->hbalock); + phba->link_flag &= ~LS_IGNORE_ERATT; + spin_unlock_irq(&phba->hbalock); + return 1; + } + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + mempool_free(pmb, phba->mbox_mem_pool); + + /* There is no completion for a KILL_BOARD mbox cmd. Check for an error + * attention every 100ms for 3 seconds. If we don't get ERATT after + * 3 seconds we still set HBA_ERROR state because the status of the + * board is now undefined. + */ + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; + while ((i++ < 30) && !(ha_copy & HA_ERATT)) { + mdelay(100); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; + } + + del_timer_sync(&psli->mbox_tmo); + if (ha_copy & HA_ERATT) { + writel(HA_ERATT, phba->HAregaddr); + phba->pport->stopped = 1; + } + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + psli->mbox_active = NULL; + phba->link_flag &= ~LS_IGNORE_ERATT; + spin_unlock_irq(&phba->hbalock); + + lpfc_hba_down_post(phba); + phba->link_state = LPFC_HBA_ERROR; + + return ha_copy & HA_ERATT ? 0 : 1; +} + +/** + * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA + * @phba: Pointer to HBA context object. + * + * This function resets the HBA by writing HC_INITFF to the control + * register. After the HBA resets, this function resets all the iocb ring + * indices. This function disables PCI layer parity checking during + * the reset. + * This function returns 0 always. + * The caller is not required to hold any locks. + **/ +int +lpfc_sli_brdreset(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + uint16_t cfg_value; + int i; + + psli = &phba->sli; + + /* Reset HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0325 Reset HBA Data: x%x x%x\n", + (phba->pport) ? phba->pport->port_state : 0, + psli->sli_flag); + + /* perform board reset */ + phba->fc_eventTag = 0; + phba->link_events = 0; + phba->hba_flag |= HBA_NEEDS_CFG_PORT; + if (phba->pport) { + phba->pport->fc_myDID = 0; + phba->pport->fc_prevDID = 0; + } + + /* Turn off parity checking and serr during the physical reset */ + if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) + return -EIO; + + pci_write_config_word(phba->pcidev, PCI_COMMAND, + (cfg_value & + ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); + + psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); + + /* Now toggle INITFF bit in the Host Control Register */ + writel(HC_INITFF, phba->HCregaddr); + mdelay(1); + readl(phba->HCregaddr); /* flush */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + + /* Restore PCI cmd register */ + pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); + + /* Initialize relevant SLI info */ + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + pring->flag = 0; + pring->sli.sli3.rspidx = 0; + pring->sli.sli3.next_cmdidx = 0; + pring->sli.sli3.local_getidx = 0; + pring->sli.sli3.cmdidx = 0; + pring->missbufcnt = 0; + } + + phba->link_state = LPFC_WARM_START; + return 0; +} + +/** + * lpfc_sli4_brdreset - Reset a sli-4 HBA + * @phba: Pointer to HBA context object. + * + * This function resets a SLI4 HBA. This function disables PCI layer parity + * checking during resets the device. The caller is not required to hold + * any locks. + * + * This function returns 0 on success else returns negative error code. + **/ +int +lpfc_sli4_brdreset(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + uint16_t cfg_value; + int rc = 0; + + /* Reset HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0295 Reset HBA Data: x%x x%x x%x\n", + phba->pport->port_state, psli->sli_flag, + phba->hba_flag); + + /* perform board reset */ + phba->fc_eventTag = 0; + phba->link_events = 0; + phba->pport->fc_myDID = 0; + phba->pport->fc_prevDID = 0; + phba->hba_flag &= ~HBA_SETUP; + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~(LPFC_PROCESS_LA); + phba->fcf.fcf_flag = 0; + spin_unlock_irq(&phba->hbalock); + + /* Now physically reset the device */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0389 Performing PCI function reset!\n"); + + /* Turn off parity checking and serr during the physical reset */ + if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3205 PCI read Config failed\n"); + return -EIO; + } + + pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & + ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); + + /* Perform FCoE PCI function reset before freeing queue memory */ + rc = lpfc_pci_function_reset(phba); + + /* Restore PCI cmd register */ + pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); + + return rc; +} + +/** + * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI initialization code path to + * restart the HBA. The caller is not required to hold any lock. + * This function writes MBX_RESTART mailbox command to the SLIM and + * resets the HBA. At the end of the function, it calls lpfc_hba_down_post + * function to free any pending commands. The function enables + * POST only during the first initialization. The function returns zero. + * The function does not guarantee completion of MBX_RESTART mailbox + * command before the return of this function. + **/ +static int +lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) +{ + volatile struct MAILBOX_word0 mb; + struct lpfc_sli *psli; + void __iomem *to_slim; + + spin_lock_irq(&phba->hbalock); + + psli = &phba->sli; + + /* Restart HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0337 Restart HBA Data: x%x x%x\n", + (phba->pport) ? phba->pport->port_state : 0, + psli->sli_flag); + + mb.word0 = 0; + mb.mbxCommand = MBX_RESTART; + mb.mbxHc = 1; + + lpfc_reset_barrier(phba); + + to_slim = phba->MBslimaddr; + writel(mb.word0, to_slim); + readl(to_slim); /* flush */ + + /* Only skip post after fc_ffinit is completed */ + if (phba->pport && phba->pport->port_state) + mb.word0 = 1; /* This is really setting up word1 */ + else + mb.word0 = 0; /* This is really setting up word1 */ + to_slim = phba->MBslimaddr + sizeof (uint32_t); + writel(mb.word0, to_slim); + readl(to_slim); /* flush */ + + lpfc_sli_brdreset(phba); + if (phba->pport) + phba->pport->stopped = 0; + phba->link_state = LPFC_INIT_START; + phba->hba_flag = 0; + spin_unlock_irq(&phba->hbalock); + + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = ktime_get_seconds(); + + /* Give the INITFF and Post time to settle. */ + mdelay(100); + + lpfc_hba_down_post(phba); + + return 0; +} + +/** + * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI initialization code path to restart + * a SLI4 HBA. The caller is not required to hold any lock. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static int +lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + int rc; + + /* Restart HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0296 Restart HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + rc = lpfc_sli4_brdreset(phba); + if (rc) { + phba->link_state = LPFC_HBA_ERROR; + goto hba_down_queue; + } + + spin_lock_irq(&phba->hbalock); + phba->pport->stopped = 0; + phba->link_state = LPFC_INIT_START; + phba->hba_flag = 0; + /* Preserve FA-PWWN expectation */ + phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC; + spin_unlock_irq(&phba->hbalock); + + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = ktime_get_seconds(); + +hba_down_queue: + lpfc_hba_down_post(phba); + lpfc_sli4_queue_destroy(phba); + + return rc; +} + +/** + * lpfc_sli_brdrestart - Wrapper func for restarting hba + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba restart routine from the + * API jump table function pointer from the lpfc_hba struct. +**/ +int +lpfc_sli_brdrestart(struct lpfc_hba *phba) +{ + return phba->lpfc_sli_brdrestart(phba); +} + +/** + * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart + * @phba: Pointer to HBA context object. + * + * This function is called after a HBA restart to wait for successful + * restart of the HBA. Successful restart of the HBA is indicated by + * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 + * iteration, the function will restart the HBA again. The function returns + * zero if HBA successfully restarted else returns negative error code. + **/ +int +lpfc_sli_chipset_init(struct lpfc_hba *phba) +{ + uint32_t status, i = 0; + + /* Read the HBA Host Status Register */ + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; + + /* Check status register to see what current state is */ + i = 0; + while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { + + /* Check every 10ms for 10 retries, then every 100ms for 90 + * retries, then every 1 sec for 50 retires for a total of + * ~60 seconds before reset the board again and check every + * 1 sec for 50 retries. The up to 60 seconds before the + * board ready is required by the Falcon FIPS zeroization + * complete, and any reset the board in between shall cause + * restart of zeroization, further delay the board ready. + */ + if (i++ >= 200) { + /* Adapter failed to init, timeout, status reg + */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0436 Adapter failed to init, " + "timeout, status reg x%x, " + "FW Data: A8 x%x AC x%x\n", status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); + phba->link_state = LPFC_HBA_ERROR; + return -ETIMEDOUT; + } + + /* Check to see if any errors occurred during init */ + if (status & HS_FFERM) { + /* ERROR: During chipset initialization */ + /* Adapter failed to init, chipset, status reg + */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0437 Adapter failed to init, " + "chipset, status reg x%x, " + "FW Data: A8 x%x AC x%x\n", status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); + phba->link_state = LPFC_HBA_ERROR; + return -EIO; + } + + if (i <= 10) + msleep(10); + else if (i <= 100) + msleep(100); + else + msleep(1000); + + if (i == 150) { + /* Do post */ + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + lpfc_sli_brdrestart(phba); + } + /* Read the HBA Host Status Register */ + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; + } + + /* Check to see if any errors occurred during init */ + if (status & HS_FFERM) { + /* ERROR: During chipset initialization */ + /* Adapter failed to init, chipset, status reg */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0438 Adapter failed to init, chipset, " + "status reg x%x, " + "FW Data: A8 x%x AC x%x\n", status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); + phba->link_state = LPFC_HBA_ERROR; + return -EIO; + } + + phba->hba_flag |= HBA_NEEDS_CFG_PORT; + + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + + /* setup host attn register */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + return 0; +} + +/** + * lpfc_sli_hbq_count - Get the number of HBQs to be configured + * + * This function calculates and returns the number of HBQs required to be + * configured. + **/ +int +lpfc_sli_hbq_count(void) +{ + return ARRAY_SIZE(lpfc_hbq_defs); +} + +/** + * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries + * + * This function adds the number of hbq entries in every HBQ to get + * the total number of hbq entries required for the HBA and returns + * the total count. + **/ +static int +lpfc_sli_hbq_entry_count(void) +{ + int hbq_count = lpfc_sli_hbq_count(); + int count = 0; + int i; + + for (i = 0; i < hbq_count; ++i) + count += lpfc_hbq_defs[i]->entry_count; + return count; +} + +/** + * lpfc_sli_hbq_size - Calculate memory required for all hbq entries + * + * This function calculates amount of memory required for all hbq entries + * to be configured and returns the total memory required. + **/ +int +lpfc_sli_hbq_size(void) +{ + return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); +} + +/** + * lpfc_sli_hbq_setup - configure and initialize HBQs + * @phba: Pointer to HBA context object. + * + * This function is called during the SLI initialization to configure + * all the HBQs and post buffers to the HBQ. The caller is not + * required to hold any locks. This function will return zero if successful + * else it will return negative error code. + **/ +static int +lpfc_sli_hbq_setup(struct lpfc_hba *phba) +{ + int hbq_count = lpfc_sli_hbq_count(); + LPFC_MBOXQ_t *pmb; + MAILBOX_t *pmbox; + uint32_t hbqno; + uint32_t hbq_entry_index; + + /* Get a Mailbox buffer to setup mailbox + * commands for HBA initialization + */ + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!pmb) + return -ENOMEM; + + pmbox = &pmb->u.mb; + + /* Initialize the struct lpfc_sli_hbq structure for each hbq */ + phba->link_state = LPFC_INIT_MBX_CMDS; + phba->hbq_in_use = 1; + + hbq_entry_index = 0; + for (hbqno = 0; hbqno < hbq_count; ++hbqno) { + phba->hbqs[hbqno].next_hbqPutIdx = 0; + phba->hbqs[hbqno].hbqPutIdx = 0; + phba->hbqs[hbqno].local_hbqGetIdx = 0; + phba->hbqs[hbqno].entry_count = + lpfc_hbq_defs[hbqno]->entry_count; + lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], + hbq_entry_index, pmb); + hbq_entry_index += phba->hbqs[hbqno].entry_count; + + if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd CFG_RING, + mbxStatus , ring */ + + lpfc_printf_log(phba, KERN_ERR, + LOG_SLI | LOG_VPORT, + "1805 Adapter failed to init. " + "Data: x%x x%x x%x\n", + pmbox->mbxCommand, + pmbox->mbxStatus, hbqno); + + phba->link_state = LPFC_HBA_ERROR; + mempool_free(pmb, phba->mbox_mem_pool); + return -ENXIO; + } + } + phba->hbq_count = hbq_count; + + mempool_free(pmb, phba->mbox_mem_pool); + + /* Initially populate or replenish the HBQs */ + for (hbqno = 0; hbqno < hbq_count; ++hbqno) + lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); + return 0; +} + +/** + * lpfc_sli4_rb_setup - Initialize and post RBs to HBA + * @phba: Pointer to HBA context object. + * + * This function is called during the SLI initialization to configure + * all the HBQs and post buffers to the HBQ. The caller is not + * required to hold any locks. This function will return zero if successful + * else it will return negative error code. + **/ +static int +lpfc_sli4_rb_setup(struct lpfc_hba *phba) +{ + phba->hbq_in_use = 1; + /** + * Specific case when the MDS diagnostics is enabled and supported. + * The receive buffer count is truncated to manage the incoming + * traffic. + **/ + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; + else + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; + phba->hbq_count = 1; + lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); + /* Initially populate or replenish the HBQs */ + return 0; +} + +/** + * lpfc_sli_config_port - Issue config port mailbox command + * @phba: Pointer to HBA context object. + * @sli_mode: sli mode - 2/3 + * + * This function is called by the sli initialization code path + * to issue config_port mailbox command. This function restarts the + * HBA firmware and issues a config_port mailbox command to configure + * the SLI interface in the sli mode specified by sli_mode + * variable. The caller is not required to hold any locks. + * The function returns 0 if successful, else returns negative error + * code. + **/ +int +lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) +{ + LPFC_MBOXQ_t *pmb; + uint32_t resetcount = 0, rc = 0, done = 0; + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + + phba->sli_rev = sli_mode; + while (resetcount < 2 && !done) { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(&phba->hbalock); + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + lpfc_sli_brdrestart(phba); + rc = lpfc_sli_chipset_init(phba); + if (rc) + break; + + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(&phba->hbalock); + resetcount++; + + /* Call pre CONFIG_PORT mailbox command initialization. A + * value of 0 means the call was successful. Any other + * nonzero value is a failure, but if ERESTART is returned, + * the driver may reset the HBA and try again. + */ + rc = lpfc_config_port_prep(phba); + if (rc == -ERESTART) { + phba->link_state = LPFC_LINK_UNKNOWN; + continue; + } else if (rc) + break; + + phba->link_state = LPFC_INIT_MBX_CMDS; + lpfc_config_port(phba, pmb); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | + LPFC_SLI3_HBQ_ENABLED | + LPFC_SLI3_CRP_ENABLED | + LPFC_SLI3_DSS_ENABLED); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0442 Adapter failed to init, mbxCmd x%x " + "CONFIG_PORT, mbxStatus x%x Data: x%x\n", + pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + rc = -ENXIO; + } else { + /* Allow asynchronous mailbox command to go through */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + done = 1; + + if ((pmb->u.mb.un.varCfgPort.casabt == 1) && + (pmb->u.mb.un.varCfgPort.gasabt == 0)) + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3110 Port did not grant ASABT\n"); + } + } + if (!done) { + rc = -EINVAL; + goto do_prep_failed; + } + if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { + if (!pmb->u.mb.un.varCfgPort.cMA) { + rc = -ENXIO; + goto do_prep_failed; + } + if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { + phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; + phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; + phba->max_vports = (phba->max_vpi > phba->max_vports) ? + phba->max_vpi : phba->max_vports; + + } else + phba->max_vpi = 0; + if (pmb->u.mb.un.varCfgPort.gerbm) + phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; + if (pmb->u.mb.un.varCfgPort.gcrp) + phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; + + phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; + phba->port_gp = phba->mbox->us.s3_pgp.port; + + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { + if (pmb->u.mb.un.varCfgPort.gbg == 0) { + phba->cfg_enable_bg = 0; + phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0443 Adapter did not grant " + "BlockGuard\n"); + } + } + } else { + phba->hbq_get = NULL; + phba->port_gp = phba->mbox->us.s2.port; + phba->max_vpi = 0; + } +do_prep_failed: + mempool_free(pmb, phba->mbox_mem_pool); + return rc; +} + + +/** + * lpfc_sli_hba_setup - SLI initialization function + * @phba: Pointer to HBA context object. + * + * This function is the main SLI initialization function. This function + * is called by the HBA initialization code, HBA reset code and HBA + * error attention handler code. Caller is not required to hold any + * locks. This function issues config_port mailbox command to configure + * the SLI, setup iocb rings and HBQ rings. In the end the function + * calls the config_port_post function to issue init_link mailbox + * command and to start the discovery. The function will return zero + * if successful, else it will return negative error code. + **/ +int +lpfc_sli_hba_setup(struct lpfc_hba *phba) +{ + uint32_t rc; + int i; + int longs; + + /* Enable ISR already does config_port because of config_msi mbx */ + if (phba->hba_flag & HBA_NEEDS_CFG_PORT) { + rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); + if (rc) + return -EIO; + phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; + } + phba->fcp_embed_io = 0; /* SLI4 FC support only */ + + if (phba->sli_rev == 3) { + phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; + phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; + } else { + phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; + phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; + phba->sli3_options = 0; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0444 Firmware in SLI %x mode. Max_vpi %d\n", + phba->sli_rev, phba->max_vpi); + rc = lpfc_sli_ring_map(phba); + + if (rc) + goto lpfc_sli_hba_setup_error; + + /* Initialize VPIs. */ + if (phba->sli_rev == LPFC_SLI_REV3) { + /* + * The VPI bitmask and physical ID array are allocated + * and initialized once only - at driver load. A port + * reset doesn't need to reinitialize this memory. + */ + if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { + longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; + phba->vpi_bmask = kcalloc(longs, + sizeof(unsigned long), + GFP_KERNEL); + if (!phba->vpi_bmask) { + rc = -ENOMEM; + goto lpfc_sli_hba_setup_error; + } + + phba->vpi_ids = kcalloc(phba->max_vpi + 1, + sizeof(uint16_t), + GFP_KERNEL); + if (!phba->vpi_ids) { + kfree(phba->vpi_bmask); + rc = -ENOMEM; + goto lpfc_sli_hba_setup_error; + } + for (i = 0; i < phba->max_vpi; i++) + phba->vpi_ids[i] = i; + } + } + + /* Init HBQs */ + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + rc = lpfc_sli_hbq_setup(phba); + if (rc) + goto lpfc_sli_hba_setup_error; + } + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_PROCESS_LA; + spin_unlock_irq(&phba->hbalock); + + rc = lpfc_config_port_post(phba); + if (rc) + goto lpfc_sli_hba_setup_error; + + return rc; + +lpfc_sli_hba_setup_error: + phba->link_state = LPFC_HBA_ERROR; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0445 Firmware initialization failed\n"); + return rc; +} + +/** + * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region + * @phba: Pointer to HBA context object. + * + * This function issue a dump mailbox command to read config region + * 23 and parse the records in the region and populate driver + * data structure. + **/ +static int +lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_dmabuf *mp; + struct lpfc_mqe *mqe; + uint32_t data_length; + int rc; + + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + mqe = &mboxq->u.mqe; + if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { + rc = -ENOMEM; + goto out_free_mboxq; + } + + mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):2571 Mailbox cmd x%x Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_command, mqe), + bf_get(lpfc_mqe_status, mqe), + mqe->un.mb_words[0], mqe->un.mb_words[1], + mqe->un.mb_words[2], mqe->un.mb_words[3], + mqe->un.mb_words[4], mqe->un.mb_words[5], + mqe->un.mb_words[6], mqe->un.mb_words[7], + mqe->un.mb_words[8], mqe->un.mb_words[9], + mqe->un.mb_words[10], mqe->un.mb_words[11], + mqe->un.mb_words[12], mqe->un.mb_words[13], + mqe->un.mb_words[14], mqe->un.mb_words[15], + mqe->un.mb_words[16], mqe->un.mb_words[50], + mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); + + if (rc) { + rc = -EIO; + goto out_free_mboxq; + } + data_length = mqe->un.mb_words[5]; + if (data_length > DMP_RGN23_SIZE) { + rc = -EIO; + goto out_free_mboxq; + } + + lpfc_parse_fcoe_conf(phba, mp->virt, data_length); + rc = 0; + +out_free_mboxq: + lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); + return rc; +} + +/** + * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the LPFC_MBOXQ_t structure. + * @vpd: pointer to the memory to hold resulting port vpd data. + * @vpd_size: On input, the number of bytes allocated to @vpd. + * On output, the number of data bytes in @vpd. + * + * This routine executes a READ_REV SLI4 mailbox command. In + * addition, this routine gets the port vpd data. + * + * Return codes + * 0 - successful + * -ENOMEM - could not allocated memory. + **/ +static int +lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint8_t *vpd, uint32_t *vpd_size) +{ + int rc = 0; + uint32_t dma_size; + struct lpfc_dmabuf *dmabuf; + struct lpfc_mqe *mqe; + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + /* + * Get a DMA buffer for the vpd data resulting from the READ_REV + * mailbox command. + */ + dma_size = *vpd_size; + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, + &dmabuf->phys, GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; + } + + /* + * The SLI4 implementation of READ_REV conflicts at word1, + * bits 31:16 and SLI4 adds vpd functionality not present + * in SLI3. This code corrects the conflicts. + */ + lpfc_read_rev(phba, mboxq); + mqe = &mboxq->u.mqe; + mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); + mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); + mqe->un.read_rev.word1 &= 0x0000FFFF; + bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); + bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc) { + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return -EIO; + } + + /* + * The available vpd length cannot be bigger than the + * DMA buffer passed to the port. Catch the less than + * case and update the caller's size. + */ + if (mqe->un.read_rev.avail_vpd_len < *vpd_size) + *vpd_size = mqe->un.read_rev.avail_vpd_len; + + memcpy(vpd, dmabuf->virt, *vpd_size); + + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return 0; +} + +/** + * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes + * @phba: pointer to lpfc hba data structure. + * + * This routine retrieves SLI4 device physical port name this PCI function + * is attached to. + * + * Return codes + * 0 - successful + * otherwise - failed to retrieve controller attributes + **/ +static int +lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; + struct lpfc_controller_attribute *cntl_attr; + void *virtaddr = NULL; + uint32_t alloclen, reqlen; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + int rc; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ + reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); + alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3084 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, reqlen); + rc = -ENOMEM; + goto out_free_mboxq; + } + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + virtaddr = mboxq->sge_array->addr[0]; + mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; + shdr = &mbx_cntl_attr->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3085 Mailbox x%x (x%x/x%x) failed, " + "rc:x%x, status:x%x, add_status:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + rc, shdr_status, shdr_add_status); + rc = -ENXIO; + goto out_free_mboxq; + } + + cntl_attr = &mbx_cntl_attr->cntl_attr; + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; + phba->sli4_hba.lnk_info.lnk_tp = + bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); + phba->sli4_hba.lnk_info.lnk_no = + bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); + phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); + phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); + + memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); + strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + sizeof(phba->BIOSVersion)); + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " + "flash_id: x%02x, asic_rev: x%02x\n", + phba->sli4_hba.lnk_info.lnk_tp, + phba->sli4_hba.lnk_info.lnk_no, + phba->BIOSVersion, phba->sli4_hba.flash_id, + phba->sli4_hba.asic_rev); +out_free_mboxq: + if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + else + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name + * @phba: pointer to lpfc hba data structure. + * + * This routine retrieves SLI4 device physical port name this PCI function + * is attached to. + * + * Return codes + * 0 - successful + * otherwise - failed to retrieve physical port name + **/ +static int +lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_get_port_name *get_port_name; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + char cport_name = 0; + int rc; + + /* We assume nothing at this point */ + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + /* obtain link type and link number via READ_CONFIG */ + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; + lpfc_sli4_read_config(phba); + + if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) + phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; + + if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) + goto retrieve_ppname; + + /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ + rc = lpfc_sli4_get_ctl_attr(phba); + if (rc) + goto out_free_mboxq; + +retrieve_ppname: + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_PORT_NAME, + sizeof(struct lpfc_mbx_get_port_name) - + sizeof(struct lpfc_sli4_cfg_mhdr), + LPFC_SLI4_MBX_EMBED); + get_port_name = &mboxq->u.mqe.un.get_port_name; + shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); + bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, + phba->sli4_hba.lnk_info.lnk_tp); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3087 Mailbox x%x (x%x/x%x) failed: " + "rc:x%x, status:x%x, add_status:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + rc, shdr_status, shdr_add_status); + rc = -ENXIO; + goto out_free_mboxq; + } + switch (phba->sli4_hba.lnk_info.lnk_no) { + case LPFC_LINK_NUMBER_0: + cport_name = bf_get(lpfc_mbx_get_port_name_name0, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + case LPFC_LINK_NUMBER_1: + cport_name = bf_get(lpfc_mbx_get_port_name_name1, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + case LPFC_LINK_NUMBER_2: + cport_name = bf_get(lpfc_mbx_get_port_name_name2, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + case LPFC_LINK_NUMBER_3: + cport_name = bf_get(lpfc_mbx_get_port_name_name3, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + default: + break; + } + + if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { + phba->Port[0] = cport_name; + phba->Port[1] = '\0'; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3091 SLI get port name: %s\n", phba->Port); + } + +out_free_mboxq: + if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + else + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to explicitly arm the SLI4 device's completion and + * event queues + **/ +static void +lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) +{ + int qidx; + struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_queue *eq; + + sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); + sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); + if (sli4_hba->nvmels_cq) + sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, + LPFC_QUEUE_REARM); + + if (sli4_hba->hdwq) { + /* Loop thru all Hardware Queues */ + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = &sli4_hba->hdwq[qidx]; + /* ARM the corresponding CQ */ + sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, + LPFC_QUEUE_REARM); + } + + /* Loop thru all IRQ vectors */ + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { + eq = sli4_hba->hba_eq_hdl[qidx].eq; + /* ARM the corresponding EQ */ + sli4_hba->sli4_write_eq_db(phba, eq, + 0, LPFC_QUEUE_REARM); + } + } + + if (phba->nvmet_support) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { + sli4_hba->sli4_write_cq_db(phba, + sli4_hba->nvmet_cqset[qidx], 0, + LPFC_QUEUE_REARM); + } + } +} + +/** + * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. + * @phba: Pointer to HBA context object. + * @type: The resource extent type. + * @extnt_count: buffer to hold port available extent count. + * @extnt_size: buffer to hold element count per extent. + * + * This function calls the port and retrievs the number of available + * extents and their size for a particular extent type. + * + * Returns: 0 if successful. Nonzero otherwise. + **/ +int +lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, + uint16_t *extnt_count, uint16_t *extnt_size) +{ + int rc = 0; + uint32_t length; + uint32_t mbox_tmo; + struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; + LPFC_MBOXQ_t *mbox; + + *extnt_count = 0; + *extnt_size = 0; + + mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + /* Find out how many extents are available for this resource type */ + length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, + length, LPFC_SLI4_MBX_EMBED); + + /* Send an extents count of 0 - the GET doesn't use it. */ + rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, + LPFC_SLI4_MBX_EMBED); + if (unlikely(rc)) { + rc = -EIO; + goto err_exit; + } + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + if (unlikely(rc)) { + rc = -EIO; + goto err_exit; + } + + rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; + if (bf_get(lpfc_mbox_hdr_status, + &rsrc_info->header.cfg_shdr.response)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2930 Failed to get resource extents " + "Status 0x%x Add'l Status 0x%x\n", + bf_get(lpfc_mbox_hdr_status, + &rsrc_info->header.cfg_shdr.response), + bf_get(lpfc_mbox_hdr_add_status, + &rsrc_info->header.cfg_shdr.response)); + rc = -EIO; + goto err_exit; + } + + *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, + &rsrc_info->u.rsp); + *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, + &rsrc_info->u.rsp); + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3162 Retrieved extents type-%d from port: count:%d, " + "size:%d\n", type, *extnt_count, *extnt_size); + +err_exit: + mempool_free(mbox, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. + * @phba: Pointer to HBA context object. + * @type: The extent type to check. + * + * This function reads the current available extents from the port and checks + * if the extent count or extent size has changed since the last access. + * Callers use this routine post port reset to understand if there is a + * extent reprovisioning requirement. + * + * Returns: + * -Error: error indicates problem. + * 1: Extent count or size has changed. + * 0: No changes. + **/ +static int +lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) +{ + uint16_t curr_ext_cnt, rsrc_ext_cnt; + uint16_t size_diff, rsrc_ext_size; + int rc = 0; + struct lpfc_rsrc_blks *rsrc_entry; + struct list_head *rsrc_blk_list = NULL; + + size_diff = 0; + curr_ext_cnt = 0; + rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, + &rsrc_ext_cnt, + &rsrc_ext_size); + if (unlikely(rc)) + return -EIO; + + switch (type) { + case LPFC_RSC_TYPE_FCOE_RPI: + rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_VPI: + rsrc_blk_list = &phba->lpfc_vpi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_XRI: + rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_VFI: + rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; + break; + default: + break; + } + + list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { + curr_ext_cnt++; + if (rsrc_entry->rsrc_size != rsrc_ext_size) + size_diff++; + } + + if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) + rc = 1; + + return rc; +} + +/** + * lpfc_sli4_cfg_post_extnts - + * @phba: Pointer to HBA context object. + * @extnt_cnt: number of available extents. + * @type: the extent type (rpi, xri, vfi, vpi). + * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation. + * @mbox: pointer to the caller's allocated mailbox structure. + * + * This function executes the extents allocation request. It also + * takes care of the amount of memory needed to allocate or get the + * allocated extents. It is the caller's responsibility to evaluate + * the response. + * + * Returns: + * -Error: Error value describes the condition found. + * 0: if successful + **/ +static int +lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, + uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) +{ + int rc = 0; + uint32_t req_len; + uint32_t emb_len; + uint32_t alloc_len, mbox_tmo; + + /* Calculate the total requested length of the dma memory */ + req_len = extnt_cnt * sizeof(uint16_t); + + /* + * Calculate the size of an embedded mailbox. The uint32_t + * accounts for extents-specific word. + */ + emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - + sizeof(uint32_t); + + /* + * Presume the allocation and response will fit into an embedded + * mailbox. If not true, reconfigure to a non-embedded mailbox. + */ + *emb = LPFC_SLI4_MBX_EMBED; + if (req_len > emb_len) { + req_len = extnt_cnt * sizeof(uint16_t) + + sizeof(union lpfc_sli4_cfg_shdr) + + sizeof(uint32_t); + *emb = LPFC_SLI4_MBX_NEMBED; + } + + alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, + req_len, *emb); + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2982 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + return -ENOMEM; + } + rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); + if (unlikely(rc)) + return -EIO; + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + + if (unlikely(rc)) + rc = -EIO; + return rc; +} + +/** + * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. + * @phba: Pointer to HBA context object. + * @type: The resource extent type to allocate. + * + * This function allocates the number of elements for the specified + * resource type. + **/ +static int +lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) +{ + bool emb = false; + uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; + uint16_t rsrc_id, rsrc_start, j, k; + uint16_t *ids; + int i, rc; + unsigned long longs; + unsigned long *bmask; + struct lpfc_rsrc_blks *rsrc_blks; + LPFC_MBOXQ_t *mbox; + uint32_t length; + struct lpfc_id_range *id_array = NULL; + void *virtaddr = NULL; + struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; + struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; + struct list_head *ext_blk_list; + + rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, + &rsrc_cnt, + &rsrc_size); + if (unlikely(rc)) + return -EIO; + + if ((rsrc_cnt == 0) || (rsrc_size == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3009 No available Resource Extents " + "for resource type 0x%x: Count: 0x%x, " + "Size 0x%x\n", type, rsrc_cnt, + rsrc_size); + return -ENOMEM; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, + "2903 Post resource extents type-0x%x: " + "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); + + mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); + if (unlikely(rc)) { + rc = -EIO; + goto err_exit; + } + + /* + * Figure out where the response is located. Then get local pointers + * to the response data. The port does not guarantee to respond to + * all extents counts request so update the local variable with the + * allocated count from the port. + */ + if (emb == LPFC_SLI4_MBX_EMBED) { + rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; + id_array = &rsrc_ext->u.rsp.id[0]; + rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); + } else { + virtaddr = mbox->sge_array->addr[0]; + n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; + rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); + id_array = &n_rsrc->id; + } + + longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; + rsrc_id_cnt = rsrc_cnt * rsrc_size; + + /* + * Based on the resource size and count, correct the base and max + * resource values. + */ + length = sizeof(struct lpfc_rsrc_blks); + switch (type) { + case LPFC_RSC_TYPE_FCOE_RPI: + phba->sli4_hba.rpi_bmask = kcalloc(longs, + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.rpi_bmask)) { + rc = -ENOMEM; + goto err_exit; + } + phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, + sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.rpi_ids)) { + kfree(phba->sli4_hba.rpi_bmask); + rc = -ENOMEM; + goto err_exit; + } + + /* + * The next_rpi was initialized with the maximum available + * count but the port may allocate a smaller number. Catch + * that case and update the next_rpi. + */ + phba->sli4_hba.next_rpi = rsrc_id_cnt; + + /* Initialize local ptrs for common extent processing later. */ + bmask = phba->sli4_hba.rpi_bmask; + ids = phba->sli4_hba.rpi_ids; + ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_VPI: + phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->vpi_bmask)) { + rc = -ENOMEM; + goto err_exit; + } + phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->vpi_ids)) { + kfree(phba->vpi_bmask); + rc = -ENOMEM; + goto err_exit; + } + + /* Initialize local ptrs for common extent processing later. */ + bmask = phba->vpi_bmask; + ids = phba->vpi_ids; + ext_blk_list = &phba->lpfc_vpi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_XRI: + phba->sli4_hba.xri_bmask = kcalloc(longs, + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.xri_bmask)) { + rc = -ENOMEM; + goto err_exit; + } + phba->sli4_hba.max_cfg_param.xri_used = 0; + phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, + sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.xri_ids)) { + kfree(phba->sli4_hba.xri_bmask); + rc = -ENOMEM; + goto err_exit; + } + + /* Initialize local ptrs for common extent processing later. */ + bmask = phba->sli4_hba.xri_bmask; + ids = phba->sli4_hba.xri_ids; + ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_VFI: + phba->sli4_hba.vfi_bmask = kcalloc(longs, + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.vfi_bmask)) { + rc = -ENOMEM; + goto err_exit; + } + phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, + sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.vfi_ids)) { + kfree(phba->sli4_hba.vfi_bmask); + rc = -ENOMEM; + goto err_exit; + } + + /* Initialize local ptrs for common extent processing later. */ + bmask = phba->sli4_hba.vfi_bmask; + ids = phba->sli4_hba.vfi_ids; + ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; + break; + default: + /* Unsupported Opcode. Fail call. */ + id_array = NULL; + bmask = NULL; + ids = NULL; + ext_blk_list = NULL; + goto err_exit; + } + + /* + * Complete initializing the extent configuration with the + * allocated ids assigned to this function. The bitmask serves + * as an index into the array and manages the available ids. The + * array just stores the ids communicated to the port via the wqes. + */ + for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { + if ((i % 2) == 0) + rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, + &id_array[k]); + else + rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, + &id_array[k]); + + rsrc_blks = kzalloc(length, GFP_KERNEL); + if (unlikely(!rsrc_blks)) { + rc = -ENOMEM; + kfree(bmask); + kfree(ids); + goto err_exit; + } + rsrc_blks->rsrc_start = rsrc_id; + rsrc_blks->rsrc_size = rsrc_size; + list_add_tail(&rsrc_blks->list, ext_blk_list); + rsrc_start = rsrc_id; + if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { + phba->sli4_hba.io_xri_start = rsrc_start + + lpfc_sli4_get_iocb_cnt(phba); + } + + while (rsrc_id < (rsrc_start + rsrc_size)) { + ids[j] = rsrc_id; + rsrc_id++; + j++; + } + /* Entire word processed. Get next word.*/ + if ((i % 2) == 1) + k++; + } + err_exit: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return rc; +} + + + +/** + * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. + * @phba: Pointer to HBA context object. + * @type: the extent's type. + * + * This function deallocates all extents of a particular resource type. + * SLI4 does not allow for deallocating a particular extent range. It + * is the caller's responsibility to release all kernel memory resources. + **/ +static int +lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) +{ + int rc; + uint32_t length, mbox_tmo = 0; + LPFC_MBOXQ_t *mbox; + struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; + struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; + + mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + /* + * This function sends an embedded mailbox because it only sends the + * the resource type. All extents of this type are released by the + * port. + */ + length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, + length, LPFC_SLI4_MBX_EMBED); + + /* Send an extents count of 0 - the dealloc doesn't use it. */ + rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, + LPFC_SLI4_MBX_EMBED); + if (unlikely(rc)) { + rc = -EIO; + goto out_free_mbox; + } + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + if (unlikely(rc)) { + rc = -EIO; + goto out_free_mbox; + } + + dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; + if (bf_get(lpfc_mbox_hdr_status, + &dealloc_rsrc->header.cfg_shdr.response)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2919 Failed to release resource extents " + "for type %d - Status 0x%x Add'l Status 0x%x. " + "Resource memory not released.\n", + type, + bf_get(lpfc_mbox_hdr_status, + &dealloc_rsrc->header.cfg_shdr.response), + bf_get(lpfc_mbox_hdr_add_status, + &dealloc_rsrc->header.cfg_shdr.response)); + rc = -EIO; + goto out_free_mbox; + } + + /* Release kernel memory resources for the specific type. */ + switch (type) { + case LPFC_RSC_TYPE_FCOE_VPI: + kfree(phba->vpi_bmask); + kfree(phba->vpi_ids); + bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); + list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, + &phba->lpfc_vpi_blk_list, list) { + list_del_init(&rsrc_blk->list); + kfree(rsrc_blk); + } + phba->sli4_hba.max_cfg_param.vpi_used = 0; + break; + case LPFC_RSC_TYPE_FCOE_XRI: + kfree(phba->sli4_hba.xri_bmask); + kfree(phba->sli4_hba.xri_ids); + list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, + &phba->sli4_hba.lpfc_xri_blk_list, list) { + list_del_init(&rsrc_blk->list); + kfree(rsrc_blk); + } + break; + case LPFC_RSC_TYPE_FCOE_VFI: + kfree(phba->sli4_hba.vfi_bmask); + kfree(phba->sli4_hba.vfi_ids); + bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); + list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, + &phba->sli4_hba.lpfc_vfi_blk_list, list) { + list_del_init(&rsrc_blk->list); + kfree(rsrc_blk); + } + break; + case LPFC_RSC_TYPE_FCOE_RPI: + /* RPI bitmask and physical id array are cleaned up earlier. */ + list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, + &phba->sli4_hba.lpfc_rpi_blk_list, list) { + list_del_init(&rsrc_blk->list); + kfree(rsrc_blk); + } + break; + default: + break; + } + + bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); + + out_free_mbox: + mempool_free(mbox, phba->mbox_mem_pool); + return rc; +} + +static void +lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, + uint32_t feature) +{ + uint32_t len; + u32 sig_freq = 0; + + len = sizeof(struct lpfc_mbx_set_feature) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_FEATURES, len, + LPFC_SLI4_MBX_EMBED); + + switch (feature) { + case LPFC_SET_UE_RECOVERY: + bf_set(lpfc_mbx_set_feature_UER, + &mbox->u.mqe.un.set_feature, 1); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; + mbox->u.mqe.un.set_feature.param_len = 8; + break; + case LPFC_SET_MDS_DIAGS: + bf_set(lpfc_mbx_set_feature_mds, + &mbox->u.mqe.un.set_feature, 1); + bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, + &mbox->u.mqe.un.set_feature, 1); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; + mbox->u.mqe.un.set_feature.param_len = 8; + break; + case LPFC_SET_CGN_SIGNAL: + if (phba->cmf_active_mode == LPFC_CFG_OFF) + sig_freq = 0; + else + sig_freq = phba->cgn_sig_freq; + + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + bf_set(lpfc_mbx_set_feature_CGN_alarm_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + bf_set(lpfc_mbx_set_feature_CGN_warn_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + } + + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) + bf_set(lpfc_mbx_set_feature_CGN_warn_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + + if (phba->cmf_active_mode == LPFC_CFG_OFF || + phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED) + sig_freq = 0; + else + sig_freq = lpfc_acqe_cgn_frequency; + + bf_set(lpfc_mbx_set_feature_CGN_acqe_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + + mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL; + mbox->u.mqe.un.set_feature.param_len = 12; + break; + case LPFC_SET_DUAL_DUMP: + bf_set(lpfc_mbx_set_feature_dd, + &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); + bf_set(lpfc_mbx_set_feature_ddquery, + &mbox->u.mqe.un.set_feature, 0); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; + mbox->u.mqe.un.set_feature.param_len = 4; + break; + case LPFC_SET_ENABLE_MI: + mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI; + mbox->u.mqe.un.set_feature.param_len = 4; + bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature, + phba->pport->cfg_lun_queue_depth); + bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature, + phba->sli4_hba.pc_sli4_params.mi_ver); + break; + case LPFC_SET_LD_SIGNAL: + mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL; + mbox->u.mqe.un.set_feature.param_len = 16; + bf_set(lpfc_mbx_set_feature_lds_qry, + &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP); + break; + case LPFC_SET_ENABLE_CMF: + mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF; + mbox->u.mqe.un.set_feature.param_len = 4; + bf_set(lpfc_mbx_set_feature_cmf, + &mbox->u.mqe.un.set_feature, 1); + break; + } + return; +} + +/** + * lpfc_ras_stop_fwlog: Disable FW logging by the adapter + * @phba: Pointer to HBA context object. + * + * Disable FW logging into host memory on the adapter. To + * be done before reading logs from the host memory. + **/ +void +lpfc_ras_stop_fwlog(struct lpfc_hba *phba) +{ + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); + + /* Disable FW logging to host memory */ + writel(LPFC_CTL_PDEV_CTL_DDL_RAS, + phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); + + /* Wait 10ms for firmware to stop using DMA buffer */ + usleep_range(10 * 1000, 20 * 1000); +} + +/** + * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. + * @phba: Pointer to HBA context object. + * + * This function is called to free memory allocated for RAS FW logging + * support in the driver. + **/ +void +lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) +{ + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + struct lpfc_dmabuf *dmabuf, *next; + + if (!list_empty(&ras_fwlog->fwlog_buff_list)) { + list_for_each_entry_safe(dmabuf, next, + &ras_fwlog->fwlog_buff_list, + list) { + list_del(&dmabuf->list); + dma_free_coherent(&phba->pcidev->dev, + LPFC_RAS_MAX_ENTRY_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + } + + if (ras_fwlog->lwpd.virt) { + dma_free_coherent(&phba->pcidev->dev, + sizeof(uint32_t) * 2, + ras_fwlog->lwpd.virt, + ras_fwlog->lwpd.phys); + ras_fwlog->lwpd.virt = NULL; + } + + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support + * @phba: Pointer to HBA context object. + * @fwlog_buff_count: Count of buffers to be created. + * + * This routine DMA memory for Log Write Position Data[LPWD] and buffer + * to update FW log is posted to the adapter. + * Buffer count is calculated based on module param ras_fwlog_buffsize + * Size of each buffer posted to FW is 64K. + **/ + +static int +lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, + uint32_t fwlog_buff_count) +{ + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + struct lpfc_dmabuf *dmabuf; + int rc = 0, i = 0; + + /* Initialize List */ + INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); + + /* Allocate memory for the LWPD */ + ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, + sizeof(uint32_t) * 2, + &ras_fwlog->lwpd.phys, + GFP_KERNEL); + if (!ras_fwlog->lwpd.virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6185 LWPD Memory Alloc Failed\n"); + + return -ENOMEM; + } + + ras_fwlog->fw_buffcount = fwlog_buff_count; + for (i = 0; i < ras_fwlog->fw_buffcount; i++) { + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), + GFP_KERNEL); + if (!dmabuf) { + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "6186 Memory Alloc failed FW logging"); + goto free_mem; + } + + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + LPFC_RAS_MAX_ENTRY_SIZE, + &dmabuf->phys, GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "6187 DMA Alloc Failed FW logging"); + goto free_mem; + } + dmabuf->buffer_tag = i; + list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); + } + +free_mem: + if (rc) + lpfc_sli4_ras_dma_free(phba); + + return rc; +} + +/** + * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * Completion handler for driver's RAS MBX command to the device. + **/ +static void +lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + + mb = &pmb->u.mb; + + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6188 FW LOG mailbox " + "completed with status x%x add_status x%x," + " mbx status x%x\n", + shdr_status, shdr_add_status, mb->mbxStatus); + + ras_fwlog->ras_hwsupport = false; + goto disable_ras; + } + + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = ACTIVE; + spin_unlock_irq(&phba->hbalock); + mempool_free(pmb, phba->mbox_mem_pool); + + return; + +disable_ras: + /* Free RAS DMA memory */ + lpfc_sli4_ras_dma_free(phba); + mempool_free(pmb, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command + * @phba: pointer to lpfc hba data structure. + * @fwlog_level: Logging verbosity level. + * @fwlog_enable: Enable/Disable logging. + * + * Initialize memory and post mailbox command to enable FW logging in host + * memory. + **/ +int +lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, + uint32_t fwlog_level, + uint32_t fwlog_enable) +{ + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; + struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; + int rc = 0; + + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); + + fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * + phba->cfg_ras_fwlog_buffsize); + fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); + + /* + * If re-enabling FW logging support use earlier allocated + * DMA buffers while posting MBX command. + **/ + if (!ras_fwlog->lwpd.virt) { + rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "6189 FW Log Memory Allocation Failed"); + return rc; + } + } + + /* Setup Mailbox command */ + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6190 RAS MBX Alloc Failed"); + rc = -ENOMEM; + goto mem_free; + } + + ras_fwlog->fw_loglevel = fwlog_level; + len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, + LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, + len, LPFC_SLI4_MBX_EMBED); + + mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; + bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, + fwlog_enable); + bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, + ras_fwlog->fw_loglevel); + bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, + ras_fwlog->fw_buffcount); + bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, + LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); + + /* Update DMA buffer address */ + list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { + memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); + + mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + + mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + + /* Update LPWD address */ + mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); + mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); + + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = REG_INPROGRESS; + spin_unlock_irq(&phba->hbalock); + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6191 FW-Log Mailbox failed. " + "status %d mbxStatus : x%x", rc, + bf_get(lpfc_mqe_status, &mbox->u.mqe)); + mempool_free(mbox, phba->mbox_mem_pool); + rc = -EIO; + goto mem_free; + } else + rc = 0; +mem_free: + if (rc) + lpfc_sli4_ras_dma_free(phba); + + return rc; +} + +/** + * lpfc_sli4_ras_setup - Check if RAS supported on the adapter + * @phba: Pointer to HBA context object. + * + * Check if RAS is supported on the adapter and initialize it. + **/ +void +lpfc_sli4_ras_setup(struct lpfc_hba *phba) +{ + /* Check RAS FW Log needs to be enabled or not */ + if (lpfc_check_fwlog_support(phba)) + return; + + lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, + LPFC_RAS_ENABLE_LOGGING); +} + +/** + * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. + * @phba: Pointer to HBA context object. + * + * This function allocates all SLI4 resource identifiers. + **/ +int +lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) +{ + int i, rc, error = 0; + uint16_t count, base; + unsigned long longs; + + if (!phba->sli4_hba.rpi_hdrs_in_use) + phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; + if (phba->sli4_hba.extents_in_use) { + /* + * The port supports resource extents. The XRI, VPI, VFI, RPI + * resource extent count must be read and allocated before + * provisioning the resource id arrays. + */ + if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == + LPFC_IDX_RSRC_RDY) { + /* + * Extent-based resources are set - the driver could + * be in a port reset. Figure out if any corrective + * actions need to be taken. + */ + rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, + LPFC_RSC_TYPE_FCOE_VFI); + if (rc != 0) + error++; + rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, + LPFC_RSC_TYPE_FCOE_VPI); + if (rc != 0) + error++; + rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, + LPFC_RSC_TYPE_FCOE_XRI); + if (rc != 0) + error++; + rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, + LPFC_RSC_TYPE_FCOE_RPI); + if (rc != 0) + error++; + + /* + * It's possible that the number of resources + * provided to this port instance changed between + * resets. Detect this condition and reallocate + * resources. Otherwise, there is no action. + */ + if (error) { + lpfc_printf_log(phba, KERN_INFO, + LOG_MBOX | LOG_INIT, + "2931 Detected extent resource " + "change. Reallocating all " + "extents.\n"); + rc = lpfc_sli4_dealloc_extent(phba, + LPFC_RSC_TYPE_FCOE_VFI); + rc = lpfc_sli4_dealloc_extent(phba, + LPFC_RSC_TYPE_FCOE_VPI); + rc = lpfc_sli4_dealloc_extent(phba, + LPFC_RSC_TYPE_FCOE_XRI); + rc = lpfc_sli4_dealloc_extent(phba, + LPFC_RSC_TYPE_FCOE_RPI); + } else + return 0; + } + + rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); + if (unlikely(rc)) + goto err_exit; + + rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); + if (unlikely(rc)) + goto err_exit; + + rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); + if (unlikely(rc)) + goto err_exit; + + rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); + if (unlikely(rc)) + goto err_exit; + bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, + LPFC_IDX_RSRC_RDY); + return rc; + } else { + /* + * The port does not support resource extents. The XRI, VPI, + * VFI, RPI resource ids were determined from READ_CONFIG. + * Just allocate the bitmasks and provision the resource id + * arrays. If a port reset is active, the resources don't + * need any action - just exit. + */ + if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == + LPFC_IDX_RSRC_RDY) { + lpfc_sli4_dealloc_resource_identifiers(phba); + lpfc_sli4_remove_rpis(phba); + } + /* RPIs. */ + count = phba->sli4_hba.max_cfg_param.max_rpi; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3279 Invalid provisioning of " + "rpi:%d\n", count); + rc = -EINVAL; + goto err_exit; + } + base = phba->sli4_hba.max_cfg_param.rpi_base; + longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->sli4_hba.rpi_bmask = kcalloc(longs, + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.rpi_bmask)) { + rc = -ENOMEM; + goto err_exit; + } + phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.rpi_ids)) { + rc = -ENOMEM; + goto free_rpi_bmask; + } + + for (i = 0; i < count; i++) + phba->sli4_hba.rpi_ids[i] = base + i; + + /* VPIs. */ + count = phba->sli4_hba.max_cfg_param.max_vpi; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3280 Invalid provisioning of " + "vpi:%d\n", count); + rc = -EINVAL; + goto free_rpi_ids; + } + base = phba->sli4_hba.max_cfg_param.vpi_base; + longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->vpi_bmask)) { + rc = -ENOMEM; + goto free_rpi_ids; + } + phba->vpi_ids = kcalloc(count, sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->vpi_ids)) { + rc = -ENOMEM; + goto free_vpi_bmask; + } + + for (i = 0; i < count; i++) + phba->vpi_ids[i] = base + i; + + /* XRIs. */ + count = phba->sli4_hba.max_cfg_param.max_xri; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3281 Invalid provisioning of " + "xri:%d\n", count); + rc = -EINVAL; + goto free_vpi_ids; + } + base = phba->sli4_hba.max_cfg_param.xri_base; + longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->sli4_hba.xri_bmask = kcalloc(longs, + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.xri_bmask)) { + rc = -ENOMEM; + goto free_vpi_ids; + } + phba->sli4_hba.max_cfg_param.xri_used = 0; + phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.xri_ids)) { + rc = -ENOMEM; + goto free_xri_bmask; + } + + for (i = 0; i < count; i++) + phba->sli4_hba.xri_ids[i] = base + i; + + /* VFIs. */ + count = phba->sli4_hba.max_cfg_param.max_vfi; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3282 Invalid provisioning of " + "vfi:%d\n", count); + rc = -EINVAL; + goto free_xri_ids; + } + base = phba->sli4_hba.max_cfg_param.vfi_base; + longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->sli4_hba.vfi_bmask = kcalloc(longs, + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.vfi_bmask)) { + rc = -ENOMEM; + goto free_xri_ids; + } + phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), + GFP_KERNEL); + if (unlikely(!phba->sli4_hba.vfi_ids)) { + rc = -ENOMEM; + goto free_vfi_bmask; + } + + for (i = 0; i < count; i++) + phba->sli4_hba.vfi_ids[i] = base + i; + + /* + * Mark all resources ready. An HBA reset doesn't need + * to reset the initialization. + */ + bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, + LPFC_IDX_RSRC_RDY); + return 0; + } + + free_vfi_bmask: + kfree(phba->sli4_hba.vfi_bmask); + phba->sli4_hba.vfi_bmask = NULL; + free_xri_ids: + kfree(phba->sli4_hba.xri_ids); + phba->sli4_hba.xri_ids = NULL; + free_xri_bmask: + kfree(phba->sli4_hba.xri_bmask); + phba->sli4_hba.xri_bmask = NULL; + free_vpi_ids: + kfree(phba->vpi_ids); + phba->vpi_ids = NULL; + free_vpi_bmask: + kfree(phba->vpi_bmask); + phba->vpi_bmask = NULL; + free_rpi_ids: + kfree(phba->sli4_hba.rpi_ids); + phba->sli4_hba.rpi_ids = NULL; + free_rpi_bmask: + kfree(phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_bmask = NULL; + err_exit: + return rc; +} + +/** + * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. + * @phba: Pointer to HBA context object. + * + * This function allocates the number of elements for the specified + * resource type. + **/ +int +lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) +{ + if (phba->sli4_hba.extents_in_use) { + lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); + lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); + lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); + lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); + } else { + kfree(phba->vpi_bmask); + phba->sli4_hba.max_cfg_param.vpi_used = 0; + kfree(phba->vpi_ids); + bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); + kfree(phba->sli4_hba.xri_bmask); + kfree(phba->sli4_hba.xri_ids); + kfree(phba->sli4_hba.vfi_bmask); + kfree(phba->sli4_hba.vfi_ids); + bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); + bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); + } + + return 0; +} + +/** + * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. + * @phba: Pointer to HBA context object. + * @type: The resource extent type. + * @extnt_cnt: buffer to hold port extent count response + * @extnt_size: buffer to hold port extent size response. + * + * This function calls the port to read the host allocated extents + * for a particular type. + **/ +int +lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, + uint16_t *extnt_cnt, uint16_t *extnt_size) +{ + bool emb; + int rc = 0; + uint16_t curr_blks = 0; + uint32_t req_len, emb_len; + uint32_t alloc_len, mbox_tmo; + struct list_head *blk_list_head; + struct lpfc_rsrc_blks *rsrc_blk; + LPFC_MBOXQ_t *mbox; + void *virtaddr = NULL; + struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; + struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; + union lpfc_sli4_cfg_shdr *shdr; + + switch (type) { + case LPFC_RSC_TYPE_FCOE_VPI: + blk_list_head = &phba->lpfc_vpi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_XRI: + blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_VFI: + blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_RPI: + blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; + break; + default: + return -EIO; + } + + /* Count the number of extents currently allocatd for this type. */ + list_for_each_entry(rsrc_blk, blk_list_head, list) { + if (curr_blks == 0) { + /* + * The GET_ALLOCATED mailbox does not return the size, + * just the count. The size should be just the size + * stored in the current allocated block and all sizes + * for an extent type are the same so set the return + * value now. + */ + *extnt_size = rsrc_blk->rsrc_size; + } + curr_blks++; + } + + /* + * Calculate the size of an embedded mailbox. The uint32_t + * accounts for extents-specific word. + */ + emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - + sizeof(uint32_t); + + /* + * Presume the allocation and response will fit into an embedded + * mailbox. If not true, reconfigure to a non-embedded mailbox. + */ + emb = LPFC_SLI4_MBX_EMBED; + req_len = emb_len; + if (req_len > emb_len) { + req_len = curr_blks * sizeof(uint16_t) + + sizeof(union lpfc_sli4_cfg_shdr) + + sizeof(uint32_t); + emb = LPFC_SLI4_MBX_NEMBED; + } + + mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); + + alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, + req_len, emb); + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2983 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + rc = -ENOMEM; + goto err_exit; + } + rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); + if (unlikely(rc)) { + rc = -EIO; + goto err_exit; + } + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + + if (unlikely(rc)) { + rc = -EIO; + goto err_exit; + } + + /* + * Figure out where the response is located. Then get local pointers + * to the response data. The port does not guarantee to respond to + * all extents counts request so update the local variable with the + * allocated count from the port. + */ + if (emb == LPFC_SLI4_MBX_EMBED) { + rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; + shdr = &rsrc_ext->header.cfg_shdr; + *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); + } else { + virtaddr = mbox->sge_array->addr[0]; + n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; + shdr = &n_rsrc->cfg_shdr; + *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); + } + + if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2984 Failed to read allocated resources " + "for type %d - Status 0x%x Add'l Status 0x%x.\n", + type, + bf_get(lpfc_mbox_hdr_status, &shdr->response), + bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); + rc = -EIO; + goto err_exit; + } + err_exit: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return rc; +} + +/** + * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block + * @phba: pointer to lpfc hba data structure. + * @sgl_list: linked link of sgl buffers to post + * @cnt: number of linked list buffers + * + * This routine walks the list of buffers that have been allocated and + * repost them to the port by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. It attempts to construct blocks + * of buffer sgls which contains contiguous xris and uses the non-embedded + * SGL block post mailbox commands to post them to the port. For single + * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post + * mailbox command for posting. + * + * Returns: 0 = success, non-zero failure. + **/ +static int +lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, + struct list_head *sgl_list, int cnt) +{ + struct lpfc_sglq *sglq_entry = NULL; + struct lpfc_sglq *sglq_entry_next = NULL; + struct lpfc_sglq *sglq_entry_first = NULL; + int status, total_cnt; + int post_cnt = 0, num_posted = 0, block_cnt = 0; + int last_xritag = NO_XRI; + LIST_HEAD(prep_sgl_list); + LIST_HEAD(blck_sgl_list); + LIST_HEAD(allc_sgl_list); + LIST_HEAD(post_sgl_list); + LIST_HEAD(free_sgl_list); + + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(sgl_list, &allc_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + + total_cnt = cnt; + list_for_each_entry_safe(sglq_entry, sglq_entry_next, + &allc_sgl_list, list) { + list_del_init(&sglq_entry->list); + block_cnt++; + if ((last_xritag != NO_XRI) && + (sglq_entry->sli4_xritag != last_xritag + 1)) { + /* a hole in xri block, form a sgl posting block */ + list_splice_init(&prep_sgl_list, &blck_sgl_list); + post_cnt = block_cnt - 1; + /* prepare list for next posting block */ + list_add_tail(&sglq_entry->list, &prep_sgl_list); + block_cnt = 1; + } else { + /* prepare list for next posting block */ + list_add_tail(&sglq_entry->list, &prep_sgl_list); + /* enough sgls for non-embed sgl mbox command */ + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { + list_splice_init(&prep_sgl_list, + &blck_sgl_list); + post_cnt = block_cnt; + block_cnt = 0; + } + } + num_posted++; + + /* keep track of last sgl's xritag */ + last_xritag = sglq_entry->sli4_xritag; + + /* end of repost sgl list condition for buffers */ + if (num_posted == total_cnt) { + if (post_cnt == 0) { + list_splice_init(&prep_sgl_list, + &blck_sgl_list); + post_cnt = block_cnt; + } else if (block_cnt == 1) { + status = lpfc_sli4_post_sgl(phba, + sglq_entry->phys, 0, + sglq_entry->sli4_xritag); + if (!status) { + /* successful, put sgl to posted list */ + list_add_tail(&sglq_entry->list, + &post_sgl_list); + } else { + /* Failure, put sgl to free list */ + lpfc_printf_log(phba, KERN_WARNING, + LOG_SLI, + "3159 Failed to post " + "sgl, xritag:x%x\n", + sglq_entry->sli4_xritag); + list_add_tail(&sglq_entry->list, + &free_sgl_list); + total_cnt--; + } + } + } + + /* continue until a nembed page worth of sgls */ + if (post_cnt == 0) + continue; + + /* post the buffer list sgls as a block */ + status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, + post_cnt); + + if (!status) { + /* success, put sgl list to posted sgl list */ + list_splice_init(&blck_sgl_list, &post_sgl_list); + } else { + /* Failure, put sgl list to free sgl list */ + sglq_entry_first = list_first_entry(&blck_sgl_list, + struct lpfc_sglq, + list); + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3160 Failed to post sgl-list, " + "xritag:x%x-x%x\n", + sglq_entry_first->sli4_xritag, + (sglq_entry_first->sli4_xritag + + post_cnt - 1)); + list_splice_init(&blck_sgl_list, &free_sgl_list); + total_cnt -= post_cnt; + } + + /* don't reset xirtag due to hole in xri block */ + if (block_cnt == 0) + last_xritag = NO_XRI; + + /* reset sgl post count for next round of posting */ + post_cnt = 0; + } + + /* free the sgls failed to post */ + lpfc_free_sgl_list(phba, &free_sgl_list); + + /* push sgls posted to the available list */ + if (!list_empty(&post_sgl_list)) { + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&post_sgl_list, sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3161 Failure to post sgl to port,status %x " + "blkcnt %d totalcnt %d postcnt %d\n", + status, block_cnt, total_cnt, post_cnt); + return -EIO; + } + + /* return the number of XRIs actually posted */ + return total_cnt; +} + +/** + * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of nvme buffers that have been allocated and + * repost them to the port by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list + * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +static int +lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) +{ + LIST_HEAD(post_nblist); + int num_posted, rc = 0; + + /* get all NVME buffers need to repost to a local list */ + lpfc_io_buf_flush(phba, &post_nblist); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) { + num_posted = lpfc_sli4_post_io_sgl_list( + phba, &post_nblist, phba->sli4_hba.io_xri_cnt); + /* failed to post any nvme buffer, return error */ + if (num_posted == 0) + rc = -EIO; + } + return rc; +} + +static void +lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + uint32_t len; + + len = sizeof(struct lpfc_mbx_set_host_data) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_HOST_DATA, len, + LPFC_SLI4_MBX_EMBED); + + mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; + mbox->u.mqe.un.set_host_data.param_len = + LPFC_HOST_OS_DRIVER_VERSION_SIZE; + snprintf(mbox->u.mqe.un.set_host_data.un.data, + LPFC_HOST_OS_DRIVER_VERSION_SIZE, + "Linux %s v"LPFC_DRIVER_VERSION, + (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); +} + +int +lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, int count, int idx) +{ + int rc, i; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct lpfc_rqb *rqbp; + unsigned long flags; + struct rqb_dmabuf *rqb_buffer; + LIST_HEAD(rqb_buf_list); + + rqbp = hrq->rqbp; + for (i = 0; i < count; i++) { + spin_lock_irqsave(&phba->hbalock, flags); + /* IF RQ is already full, don't bother */ + if (rqbp->buffer_count + i >= rqbp->entry_count - 1) { + spin_unlock_irqrestore(&phba->hbalock, flags); + break; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + + rqb_buffer = rqbp->rqb_alloc_buffer(phba); + if (!rqb_buffer) + break; + rqb_buffer->hrq = hrq; + rqb_buffer->drq = drq; + rqb_buffer->idx = idx; + list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); + } + + spin_lock_irqsave(&phba->hbalock, flags); + while (!list_empty(&rqb_buf_list)) { + list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, + hbuf.list); + + hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); + rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); + if (rc < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6421 Cannot post to HRQ %d: %x %x %x " + "DRQ %x %x\n", + hrq->queue_id, + hrq->host_index, + hrq->hba_index, + hrq->entry_count, + drq->host_index, + drq->hba_index); + rqbp->rqb_free_buffer(phba, rqb_buffer); + } else { + list_add_tail(&rqb_buffer->hbuf.list, + &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } + } + spin_unlock_irqrestore(&phba->hbalock, flags); + return 1; +} + +static void +lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + union lpfc_sli4_cfg_shdr *shdr; + u32 shdr_status, shdr_add_status; + + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX, + "4622 SET_FEATURE (x%x) mbox failed, " + "status x%x add_status x%x, mbx status x%x\n", + LPFC_SET_LD_SIGNAL, shdr_status, + shdr_add_status, pmb->u.mb.mbxStatus); + phba->degrade_activate_threshold = 0; + phba->degrade_deactivate_threshold = 0; + phba->fec_degrade_interval = 0; + goto out; + } + + phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7; + phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8; + phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10; + + lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT, + "4624 Success: da x%x dd x%x interval x%x\n", + phba->degrade_activate_threshold, + phba->degrade_deactivate_threshold, + phba->fec_degrade_interval); +out: + mempool_free(pmb, phba->mbox_mem_pool); +} + +int +lpfc_read_lds_params(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + int rc; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mboxq, phba->mbox_mem_pool); + return -EIO; + } + return 0; +} + +static void +lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + union lpfc_sli4_cfg_shdr *shdr; + u32 shdr_status, shdr_add_status; + u32 sig, acqe; + + /* Two outcomes. (1) Set featurs was successul and EDC negotiation + * is done. (2) Mailbox failed and send FPIN support only. + */ + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "2516 CGN SET_FEATURE mbox failed with " + "status x%x add_status x%x, mbx status x%x " + "Reset Congestion to FPINs only\n", + shdr_status, shdr_add_status, + pmb->u.mb.mbxStatus); + /* If there is a mbox error, move on to RDF */ + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + goto out; + } + + /* Zero out Congestion Signal ACQE counter */ + phba->cgn_acqe_cnt = 0; + + acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, + &pmb->u.mqe.un.set_feature); + sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq, + &pmb->u.mqe.un.set_feature); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4620 SET_FEATURES Success: Freq: %ds %dms " + " Reg: x%x x%x\n", acqe, sig, + phba->cgn_reg_signal, phba->cgn_reg_fpin); +out: + mempool_free(pmb, phba->mbox_mem_pool); + + /* Register for FPIN events from the fabric now that the + * EDC common_set_features has completed. + */ + lpfc_issue_els_rdf(vport, 0); +} + +int +lpfc_config_cgn_signal(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + u32 rc; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + goto out_rdf; + + lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4621 SET_FEATURES: FREQ sig x%x acqe x%x: " + "Reg: x%x x%x\n", + phba->cgn_sig_freq, lpfc_acqe_cgn_frequency, + phba->cgn_reg_signal, phba->cgn_reg_fpin); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out; + return 0; + +out: + mempool_free(mboxq, phba->mbox_mem_pool); +out_rdf: + /* If there is a mbox error, move on to RDF */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + lpfc_issue_els_rdf(phba->pport, 0); + return -EIO; +} + +/** + * lpfc_init_idle_stat_hb - Initialize idle_stat tracking + * @phba: pointer to lpfc hba data structure. + * + * This routine initializes the per-eq idle_stat to dynamically dictate + * polling decisions. + * + * Return codes: + * None + **/ +static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) +{ + int i; + struct lpfc_sli4_hdw_queue *hdwq; + struct lpfc_queue *eq; + struct lpfc_idle_stat *idle_stat; + u64 wall; + + for_each_present_cpu(i) { + hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; + eq = hdwq->hba_eq; + + /* Skip if we've already handled this eq's primary CPU */ + if (eq->chann != i) + continue; + + idle_stat = &phba->sli4_hba.idle_stat[i]; + + idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1); + idle_stat->prev_wall = wall; + + if (phba->nvmet_support || + phba->cmf_active_mode != LPFC_CFG_OFF || + phba->intr_type != MSIX) + eq->poll_mode = LPFC_QUEUE_WORK; + else + eq->poll_mode = LPFC_THREADED_IRQ; + } + + if (!phba->nvmet_support && phba->intr_type == MSIX) + schedule_delayed_work(&phba->idle_stat_delay_work, + msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); +} + +static void lpfc_sli4_dip(struct lpfc_hba *phba) +{ + uint32_t if_type; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_2 || + if_type == LPFC_SLI_INTF_IF_TYPE_6) { + struct lpfc_register reg_data; + + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + ®_data.word0)) + return; + + if (bf_get(lpfc_sliport_status_dip, ®_data)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2904 Firmware Dump Image Present" + " on Adapter"); + } +} + +/** + * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor + * @rx_monitor: Pointer to lpfc_rx_info_monitor object + * @entries: Number of rx_info_entry objects to allocate in ring + * + * Return: + * 0 - Success + * ENOMEM - Failure to kmalloc + **/ +int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor, + u32 entries) +{ + rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry), + GFP_KERNEL); + if (!rx_monitor->ring) + return -ENOMEM; + + rx_monitor->head_idx = 0; + rx_monitor->tail_idx = 0; + spin_lock_init(&rx_monitor->lock); + rx_monitor->entries = entries; + + return 0; +} + +/** + * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor + * @rx_monitor: Pointer to lpfc_rx_info_monitor object + * + * Called after cancellation of cmf_timer. + **/ +void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor) +{ + kfree(rx_monitor->ring); + rx_monitor->ring = NULL; + rx_monitor->entries = 0; + rx_monitor->head_idx = 0; + rx_monitor->tail_idx = 0; +} + +/** + * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring + * @rx_monitor: Pointer to lpfc_rx_info_monitor object + * @entry: Pointer to rx_info_entry + * + * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a + * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr. + * + * This is called from lpfc_cmf_timer, which is in timer/softirq context. + * + * In cases of old data overflow, we do a best effort of FIFO order. + **/ +void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor, + struct rx_info_entry *entry) +{ + struct rx_info_entry *ring = rx_monitor->ring; + u32 *head_idx = &rx_monitor->head_idx; + u32 *tail_idx = &rx_monitor->tail_idx; + spinlock_t *ring_lock = &rx_monitor->lock; + u32 ring_size = rx_monitor->entries; + + spin_lock(ring_lock); + memcpy(&ring[*tail_idx], entry, sizeof(*entry)); + *tail_idx = (*tail_idx + 1) % ring_size; + + /* Best effort of FIFO saved data */ + if (*tail_idx == *head_idx) + *head_idx = (*head_idx + 1) % ring_size; + + spin_unlock(ring_lock); +} + +/** + * lpfc_rx_monitor_report - Read out rx_monitor's ring + * @phba: Pointer to lpfc_hba object + * @rx_monitor: Pointer to lpfc_rx_info_monitor object + * @buf: Pointer to char buffer that will contain rx monitor info data + * @buf_len: Length buf including null char + * @max_read_entries: Maximum number of entries to read out of ring + * + * Used to dump/read what's in rx_monitor's ring buffer. + * + * If buf is NULL || buf_len == 0, then it is implied that we want to log the + * information to kmsg instead of filling out buf. + * + * Return: + * Number of entries read out of the ring + **/ +u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, + struct lpfc_rx_info_monitor *rx_monitor, char *buf, + u32 buf_len, u32 max_read_entries) +{ + struct rx_info_entry *ring = rx_monitor->ring; + struct rx_info_entry *entry; + u32 *head_idx = &rx_monitor->head_idx; + u32 *tail_idx = &rx_monitor->tail_idx; + spinlock_t *ring_lock = &rx_monitor->lock; + u32 ring_size = rx_monitor->entries; + u32 cnt = 0; + char tmp[DBG_LOG_STR_SZ] = {0}; + bool log_to_kmsg = (!buf || !buf_len) ? true : false; + + if (!log_to_kmsg) { + /* clear the buffer to be sure */ + memset(buf, 0, buf_len); + + scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s" + "%-8s%-8s%-8s%-16s\n", + "MaxBPI", "Tot_Data_CMF", + "Tot_Data_Cmd", "Tot_Data_Cmpl", + "Lat(us)", "Avg_IO", "Max_IO", "Bsy", + "IO_cnt", "Info", "BWutil(ms)"); + } + + /* Needs to be _irq because record is called from timer interrupt + * context + */ + spin_lock_irq(ring_lock); + while (*head_idx != *tail_idx) { + entry = &ring[*head_idx]; + + /* Read out this entry's data. */ + if (!log_to_kmsg) { + /* If !log_to_kmsg, then store to buf. */ + scnprintf(tmp, sizeof(tmp), + "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu" + "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n", + *head_idx, entry->max_bytes_per_interval, + entry->cmf_bytes, entry->total_bytes, + entry->rcv_bytes, entry->avg_io_latency, + entry->avg_io_size, entry->max_read_cnt, + entry->cmf_busy, entry->io_cnt, + entry->cmf_info, entry->timer_utilization, + entry->timer_interval); + + /* Check for buffer overflow */ + if ((strlen(buf) + strlen(tmp)) >= buf_len) + break; + + /* Append entry's data to buffer */ + strlcat(buf, tmp, buf_len); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4410 %02u: MBPI %llu Xmit %llu " + "Cmpl %llu Lat %llu ASz %llu Info %02u " + "BWUtil %u Int %u slot %u\n", + cnt, entry->max_bytes_per_interval, + entry->total_bytes, entry->rcv_bytes, + entry->avg_io_latency, + entry->avg_io_size, entry->cmf_info, + entry->timer_utilization, + entry->timer_interval, *head_idx); + } + + *head_idx = (*head_idx + 1) % ring_size; + + /* Don't feed more than max_read_entries */ + cnt++; + if (cnt >= max_read_entries) + break; + } + spin_unlock_irq(ring_lock); + + return cnt; +} + +/** + * lpfc_cmf_setup - Initialize idle_stat tracking + * @phba: Pointer to HBA context object. + * + * This is called from HBA setup during driver load or when the HBA + * comes online. this does all the initialization to support CMF and MI. + **/ +static int +lpfc_cmf_setup(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_dmabuf *mp; + struct lpfc_pc_sli4_params *sli4_params; + int rc, cmf, mi_ver; + + rc = lpfc_sli4_refresh_params(phba); + if (unlikely(rc)) + return rc; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + sli4_params = &phba->sli4_hba.pc_sli4_params; + + /* Always try to enable MI feature if we can */ + if (sli4_params->mi_ver) { + lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mi_ver = bf_get(lpfc_mbx_set_feature_mi, + &mboxq->u.mqe.un.set_feature); + + if (rc == MBX_SUCCESS) { + if (mi_ver) { + lpfc_printf_log(phba, + KERN_WARNING, LOG_CGN_MGMT, + "6215 MI is enabled\n"); + sli4_params->mi_ver = mi_ver; + } else { + lpfc_printf_log(phba, + KERN_WARNING, LOG_CGN_MGMT, + "6338 MI is disabled\n"); + sli4_params->mi_ver = 0; + } + } else { + /* mi_ver is already set from GET_SLI4_PARAMETERS */ + lpfc_printf_log(phba, KERN_INFO, + LOG_CGN_MGMT | LOG_INIT, + "6245 Enable MI Mailbox x%x (x%x/x%x) " + "failed, rc:x%x mi:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get + (phba, mboxq), + lpfc_sli_config_mbox_opcode_get + (phba, mboxq), + rc, sli4_params->mi_ver); + } + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6217 MI is disabled\n"); + } + + /* Ensure FDMI is enabled for MI if enable_mi is set */ + if (sli4_params->mi_ver) + phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; + + /* Always try to enable CMF feature if we can */ + if (sli4_params->cmf) { + lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + cmf = bf_get(lpfc_mbx_set_feature_cmf, + &mboxq->u.mqe.un.set_feature); + if (rc == MBX_SUCCESS && cmf) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6218 CMF is enabled: mode %d\n", + phba->cmf_active_mode); + } else { + lpfc_printf_log(phba, KERN_WARNING, + LOG_CGN_MGMT | LOG_INIT, + "6219 Enable CMF Mailbox x%x (x%x/x%x) " + "failed, rc:x%x dd:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get + (phba, mboxq), + lpfc_sli_config_mbox_opcode_get + (phba, mboxq), + rc, cmf); + sli4_params->cmf = 0; + phba->cmf_active_mode = LPFC_CFG_OFF; + goto no_cmf; + } + + /* Allocate Congestion Information Buffer */ + if (!phba->cgn_i) { + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (mp) + mp->virt = dma_alloc_coherent + (&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + &mp->phys, GFP_KERNEL); + if (!mp || !mp->virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2640 Failed to alloc memory " + "for Congestion Info\n"); + kfree(mp); + sli4_params->cmf = 0; + phba->cmf_active_mode = LPFC_CFG_OFF; + goto no_cmf; + } + phba->cgn_i = mp; + + /* initialize congestion buffer info */ + lpfc_init_congestion_buf(phba); + lpfc_init_congestion_stat(phba); + + /* Zero out Congestion Signal counters */ + atomic64_set(&phba->cgn_acqe_stat.alarm, 0); + atomic64_set(&phba->cgn_acqe_stat.warn, 0); + } + + rc = lpfc_sli4_cgn_params_read(phba); + if (rc < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "6242 Error reading Cgn Params (%d)\n", + rc); + /* Ensure CGN Mode is off */ + sli4_params->cmf = 0; + } else if (!rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "6243 CGN Event empty object.\n"); + /* Ensure CGN Mode is off */ + sli4_params->cmf = 0; + } + } else { +no_cmf: + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6220 CMF is disabled\n"); + } + + /* Only register congestion buffer with firmware if BOTH + * CMF and E2E are enabled. + */ + if (sli4_params->cmf && sli4_params->mi_ver) { + rc = lpfc_reg_congestion_buf(phba); + if (rc) { + dma_free_coherent(&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + phba->cgn_i->virt, phba->cgn_i->phys); + kfree(phba->cgn_i); + phba->cgn_i = NULL; + /* Ensure CGN Mode is off */ + phba->cmf_active_mode = LPFC_CFG_OFF; + sli4_params->cmf = 0; + return 0; + } + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6470 Setup MI version %d CMF %d mode %d\n", + sli4_params->mi_ver, sli4_params->cmf, + phba->cmf_active_mode); + + mempool_free(mboxq, phba->mbox_mem_pool); + + /* Initialize atomic counters */ + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + + phba->cmf_interval_rate = LPFC_CMF_INTERVAL; + + /* Allocate RX Monitor Buffer */ + if (!phba->rx_monitor) { + phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor), + GFP_KERNEL); + + if (!phba->rx_monitor) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2644 Failed to alloc memory " + "for RX Monitor Buffer\n"); + return -ENOMEM; + } + + /* Instruct the rx_monitor object to instantiate its ring */ + if (lpfc_rx_monitor_create_ring(phba->rx_monitor, + LPFC_MAX_RXMONITOR_ENTRY)) { + kfree(phba->rx_monitor); + phba->rx_monitor = NULL; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2645 Failed to alloc memory " + "for RX Monitor's Ring\n"); + return -ENOMEM; + } + } + + return 0; +} + +static int +lpfc_set_host_tm(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t len, rc; + struct timespec64 cur_time; + struct tm broken; + uint32_t month, day, year; + uint32_t hour, minute, second; + struct lpfc_mbx_set_host_date_time *tm; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + len = sizeof(struct lpfc_mbx_set_host_data) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_HOST_DATA, len, + LPFC_SLI4_MBX_EMBED); + + mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME; + mboxq->u.mqe.un.set_host_data.param_len = + sizeof(struct lpfc_mbx_set_host_date_time); + tm = &mboxq->u.mqe.un.set_host_data.un.tm; + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &broken); + month = broken.tm_mon + 1; + day = broken.tm_mday; + year = broken.tm_year - 100; + hour = broken.tm_hour; + minute = broken.tm_min; + second = broken.tm_sec; + bf_set(lpfc_mbx_set_host_month, tm, month); + bf_set(lpfc_mbx_set_host_day, tm, day); + bf_set(lpfc_mbx_set_host_year, tm, year); + bf_set(lpfc_mbx_set_host_hour, tm, hour); + bf_set(lpfc_mbx_set_host_min, tm, minute); + bf_set(lpfc_mbx_set_host_sec, tm, second); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_sli4_hba_setup - SLI4 device initialization PCI function + * @phba: Pointer to HBA context object. + * + * This function is the main SLI4 device initialization PCI function. This + * function is called by the HBA initialization code, HBA reset code and + * HBA error attention handler code. Caller is not required to hold any + * locks. + **/ +int +lpfc_sli4_hba_setup(struct lpfc_hba *phba) +{ + int rc, i, cnt, len, dd; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mqe *mqe; + uint8_t *vpd; + uint32_t vpd_size; + uint32_t ftr_rsp = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); + struct lpfc_vport *vport = phba->pport; + struct lpfc_dmabuf *mp; + struct lpfc_rqb *rqbp; + u32 flg; + + /* Perform a PCI function reset to start from clean */ + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + return -ENODEV; + + /* Check the HBA Host Status Register for readyness */ + rc = lpfc_sli4_post_status_check(phba); + if (unlikely(rc)) + return -ENODEV; + else { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + flg = phba->sli.sli_flag; + spin_unlock_irq(&phba->hbalock); + /* Allow a little time after setting SLI_ACTIVE for any polled + * MBX commands to complete via BSG. + */ + for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { + msleep(20); + spin_lock_irq(&phba->hbalock); + flg = phba->sli.sli_flag; + spin_unlock_irq(&phba->hbalock); + } + } + phba->hba_flag &= ~HBA_SETUP; + + lpfc_sli4_dip(phba); + + /* + * Allocate a single mailbox container for initializing the + * port. + */ + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* Issue READ_REV to collect vpd and FW information. */ + vpd_size = SLI4_PAGE_SIZE; + vpd = kzalloc(vpd_size, GFP_KERNEL); + if (!vpd) { + rc = -ENOMEM; + goto out_free_mbox; + } + + rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); + if (unlikely(rc)) { + kfree(vpd); + goto out_free_mbox; + } + + mqe = &mboxq->u.mqe; + phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); + if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { + phba->hba_flag |= HBA_FCOE_MODE; + phba->fcp_embed_io = 0; /* SLI4 FC support only */ + } else { + phba->hba_flag &= ~HBA_FCOE_MODE; + } + + if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == + LPFC_DCBX_CEE_MODE) + phba->hba_flag |= HBA_FIP_SUPPORT; + else + phba->hba_flag &= ~HBA_FIP_SUPPORT; + + phba->hba_flag &= ~HBA_IOQ_FLUSH; + + if (phba->sli_rev != LPFC_SLI_REV4) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0376 READ_REV Error. SLI Level %d " + "FCoE enabled %d\n", + phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); + rc = -EIO; + kfree(vpd); + goto out_free_mbox; + } + + rc = lpfc_set_host_tm(phba); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + "6468 Set host date / time: Status x%x:\n", rc); + + /* + * Continue initialization with default values even if driver failed + * to read FCoE param config regions, only read parameters if the + * board is FCoE + */ + if (phba->hba_flag & HBA_FCOE_MODE && + lpfc_sli4_read_fcoe_params(phba)) + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, + "2570 Failed to read FCoE parameters\n"); + + /* + * Retrieve sli4 device physical port name, failure of doing it + * is considered as non-fatal. + */ + rc = lpfc_sli4_retrieve_pport_name(phba); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3080 Successful retrieving SLI4 device " + "physical port name: %s.\n", phba->Port); + + rc = lpfc_sli4_get_ctl_attr(phba); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "8351 Successful retrieving SLI4 device " + "CTL ATTR\n"); + + /* + * Evaluate the read rev and vpd data. Populate the driver + * state with the results. If this routine fails, the failure + * is not fatal as the driver will use generic values. + */ + rc = lpfc_parse_vpd(phba, vpd, vpd_size); + if (unlikely(!rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0377 Error %d parsing vpd. " + "Using defaults.\n", rc); + rc = 0; + } + kfree(vpd); + + /* Save information as VPD data */ + phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; + phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; + + /* + * This is because first G7 ASIC doesn't support the standard + * 0x5a NVME cmd descriptor type/subtype + */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6) && + (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && + (phba->vpd.rev.smRev == 0) && + (phba->cfg_nvme_embed_cmd == 1)) + phba->cfg_nvme_embed_cmd = 0; + + phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; + phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, + &mqe->un.read_rev); + phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, + &mqe->un.read_rev); + phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, + &mqe->un.read_rev); + phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, + &mqe->un.read_rev); + phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; + memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); + phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; + memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); + phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; + memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0380 READ_REV Status x%x " + "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_status, mqe), + phba->vpd.rev.opFwName, + phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, + phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); + + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_0) { + lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc == MBX_SUCCESS) { + phba->hba_flag |= HBA_RECOVERABLE_UE; + /* Set 1Sec interval to detect UE */ + phba->eratt_poll_interval = 1; + phba->sli4_hba.ue_to_sr = bf_get( + lpfc_mbx_set_feature_UESR, + &mboxq->u.mqe.un.set_feature); + phba->sli4_hba.ue_to_rp = bf_get( + lpfc_mbx_set_feature_UERP, + &mboxq->u.mqe.un.set_feature); + } + } + + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { + /* Enable MDS Diagnostics only if the SLI Port supports it */ + lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + phba->mds_diags_support = 0; + } + + /* + * Discover the port's supported feature set and match it against the + * hosts requests. + */ + lpfc_request_features(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + rc = -EIO; + goto out_free_mbox; + } + + /* Disable VMID if app header is not supported */ + if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr, + &mqe->un.req_ftrs))) { + bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0); + phba->cfg_vmid_app_header = 0; + lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI, + "1242 vmid feature not supported\n"); + } + + /* + * The port must support FCP initiator mode as this is the + * only mode running in the host. + */ + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0378 No support for fcpi mode.\n"); + ftr_rsp++; + } + + /* Performance Hints are ONLY for FCoE */ + if (phba->hba_flag & HBA_FCOE_MODE) { + if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) + phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; + } + + /* + * If the port cannot support the host's requested features + * then turn off the global config parameters to disable the + * feature in the driver. This is not a fatal error. + */ + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { + phba->cfg_enable_bg = 0; + phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; + ftr_rsp++; + } + } + + if (phba->max_vpi && phba->cfg_enable_npiv && + !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + ftr_rsp++; + + if (ftr_rsp) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0379 Feature Mismatch Data: x%08x %08x " + "x%x x%x x%x\n", mqe->un.req_ftrs.word2, + mqe->un.req_ftrs.word3, phba->cfg_enable_bg, + phba->cfg_enable_npiv, phba->max_vpi); + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) + phba->cfg_enable_bg = 0; + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + phba->cfg_enable_npiv = 0; + } + + /* These SLI3 features are assumed in SLI4 */ + spin_lock_irq(&phba->hbalock); + phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); + spin_unlock_irq(&phba->hbalock); + + /* Always try to enable dual dump feature if we can */ + lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); + if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6448 Dual Dump is enabled\n"); + else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, + "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " + "rc:x%x dd:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get( + phba, mboxq), + lpfc_sli_config_mbox_opcode_get( + phba, mboxq), + rc, dd); + /* + * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent + * calls depends on these resources to complete port setup. + */ + rc = lpfc_sli4_alloc_resource_identifiers(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2920 Failed to alloc Resource IDs " + "rc = x%x\n", rc); + goto out_free_mbox; + } + + lpfc_set_host_data(phba, mboxq); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "2134 Failed to set host os driver version %x", + rc); + } + + /* Read the port's service parameters. */ + rc = lpfc_read_sparam(phba, mboxq, vport->vpi); + if (rc) { + phba->link_state = LPFC_HBA_ERROR; + rc = -ENOMEM; + goto out_free_mbox; + } + + mboxq->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; + if (rc == MBX_SUCCESS) { + memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); + rc = 0; + } + + /* + * This memory was allocated by the lpfc_read_sparam routine but is + * no longer needed. It is released and ctx_buf NULLed to prevent + * unintended pointer access as the mbox is reused. + */ + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mboxq->ctx_buf = NULL; + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0382 READ_SPARAM command failed " + "status %d, mbxStatus x%x\n", + rc, bf_get(lpfc_mqe_status, mqe)); + phba->link_state = LPFC_HBA_ERROR; + rc = -EIO; + goto out_free_mbox; + } + + lpfc_update_vport_wwn(vport); + + /* Update the fc_host data structures with new wwn. */ + fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + + /* Create all the SLI4 queues */ + rc = lpfc_sli4_queue_create(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3089 Failed to allocate queues\n"); + rc = -ENODEV; + goto out_free_mbox; + } + /* Set up all the queues to the device */ + rc = lpfc_sli4_queue_setup(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0381 Error %d during queue setup.\n ", rc); + goto out_stop_timers; + } + /* Initialize the driver internal SLI layer lists. */ + lpfc_sli4_setup(phba); + lpfc_sli4_queue_init(phba); + + /* update host els xri-sgl sizes and mappings */ + rc = lpfc_sli4_els_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1400 Failed to update xri-sgl size and " + "mapping: %d\n", rc); + goto out_destroy_queue; + } + + /* register the els sgl pool to the port */ + rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, + phba->sli4_hba.els_xri_cnt); + if (unlikely(rc < 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0582 Error %d during els sgl post " + "operation\n", rc); + rc = -ENODEV; + goto out_destroy_queue; + } + phba->sli4_hba.els_xri_cnt = rc; + + if (phba->nvmet_support) { + /* update host nvmet xri-sgl sizes and mappings */ + rc = lpfc_sli4_nvmet_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6308 Failed to update nvmet-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + + /* register the nvmet sgl pool to the port */ + rc = lpfc_sli4_repost_sgl_list( + phba, + &phba->sli4_hba.lpfc_nvmet_sgl_list, + phba->sli4_hba.nvmet_xri_cnt); + if (unlikely(rc < 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3117 Error %d during nvmet " + "sgl post\n", rc); + rc = -ENODEV; + goto out_destroy_queue; + } + phba->sli4_hba.nvmet_xri_cnt = rc; + + /* We allocate an iocbq for every receive context SGL. + * The additional allocation is for abort and ls handling. + */ + cnt = phba->sli4_hba.nvmet_xri_cnt + + phba->sli4_hba.max_cfg_param.max_xri; + } else { + /* update host common xri-sgl sizes and mappings */ + rc = lpfc_sli4_io_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6082 Failed to update nvme-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + + /* register the allocated common sgl pool to the port */ + rc = lpfc_sli4_repost_io_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6116 Error %d during nvme sgl post " + "operation\n", rc); + /* Some NVME buffers were moved to abort nvme list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_destroy_queue; + } + /* Each lpfc_io_buf job structure has an iocbq element. + * This cnt provides for abort, els, ct and ls requests. + */ + cnt = phba->sli4_hba.max_cfg_param.max_xri; + } + + if (!phba->sli.iocbq_lookup) { + /* Initialize and populate the iocb list per host */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2821 initialize iocb list with %d entries\n", + cnt); + rc = lpfc_init_iocb_list(phba, cnt); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1413 Failed to init iocb list.\n"); + goto out_destroy_queue; + } + } + + if (phba->nvmet_support) + lpfc_nvmet_create_targetport(phba); + + if (phba->nvmet_support && phba->cfg_nvmet_mrq) { + /* Post initial buffers to all RQs created */ + for (i = 0; i < phba->cfg_nvmet_mrq; i++) { + rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; + INIT_LIST_HEAD(&rqbp->rqb_buffer_list); + rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; + rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; + rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; + rqbp->buffer_count = 0; + + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[i], + phba->sli4_hba.nvmet_mrq_data[i], + phba->cfg_nvmet_mrq_post, i); + } + } + + /* Post the rpi header region to the device. */ + rc = lpfc_sli4_post_all_rpi_hdrs(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0393 Error %d during rpi post operation\n", + rc); + rc = -ENODEV; + goto out_free_iocblist; + } + lpfc_sli4_node_prep(phba); + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { + /* + * The FC Port needs to register FCFI (index 0) + */ + lpfc_reg_fcfi(phba, mboxq); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, + &mboxq->u.mqe.un.reg_fcfi); + } else { + /* We are a NVME Target mode with MRQ > 1 */ + + /* First register the FCFI */ + lpfc_reg_fcfi_mrq(phba, mboxq, 0); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, + &mboxq->u.mqe.un.reg_fcfi_mrq); + + /* Next register the MRQs */ + lpfc_reg_fcfi_mrq(phba, mboxq, 1); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + } + /* Check if the port is configured to be disabled */ + lpfc_sli_read_link_ste(phba); + } + + /* Don't post more new bufs if repost already recovered + * the nvme sgls. + */ + if (phba->nvmet_support == 0) { + if (phba->sli4_hba.io_xri_cnt == 0) { + len = lpfc_new_io_buf( + phba, phba->sli4_hba.io_xri_max); + if (len == 0) { + rc = -ENOMEM; + goto out_unset_queue; + } + + if (phba->cfg_xri_rebalancing) + lpfc_create_multixri_pools(phba); + } + } else { + phba->cfg_xri_rebalancing = 0; + } + + /* Allow asynchronous mailbox command to go through */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + + /* Post receive buffers to the device */ + lpfc_sli4_rb_setup(phba); + + /* Reset HBA FCF states after HBA reset */ + phba->fcf.fcf_flag = 0; + phba->fcf.current_rec.flag = 0; + + /* Start the ELS watchdog timer */ + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); + + /* Start heart beat timer */ + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + phba->last_completion_time = jiffies; + + /* start eq_delay heartbeat */ + if (phba->cfg_auto_imax) + queue_delayed_work(phba->wq, &phba->eq_delay_work, + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); + + /* start per phba idle_stat_delay heartbeat */ + lpfc_init_idle_stat_hb(phba); + + /* Start error attention (ERATT) polling timer */ + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + + /* + * The port is ready, set the host's link state to LINK_DOWN + * in preparation for link interrupts. + */ + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_LINK_DOWN; + + /* Check if physical ports are trunked */ + if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) + phba->trunk_link.link0.state = LPFC_LINK_DOWN; + if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) + phba->trunk_link.link1.state = LPFC_LINK_DOWN; + if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) + phba->trunk_link.link2.state = LPFC_LINK_DOWN; + if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) + phba->trunk_link.link3.state = LPFC_LINK_DOWN; + spin_unlock_irq(&phba->hbalock); + + /* Arm the CQs and then EQs on device */ + lpfc_sli4_arm_cqeq_intr(phba); + + /* Indicate device interrupt mode */ + phba->sli4_hba.intr_enable = 1; + + /* Setup CMF after HBA is initialized */ + lpfc_cmf_setup(phba); + + if (!(phba->hba_flag & HBA_FCOE_MODE) && + (phba->hba_flag & LINK_DISABLED)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3103 Adapter Link is disabled.\n"); + lpfc_down_link(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3104 Adapter failed to issue " + "DOWN_LINK mbox cmd, rc:x%x\n", rc); + goto out_io_buff_free; + } + } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { + /* don't perform init_link on SLI4 FC port loopback test */ + if (!(phba->link_flag & LS_LOOPBACK_MODE)) { + rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + if (rc) + goto out_io_buff_free; + } + } + mempool_free(mboxq, phba->mbox_mem_pool); + + /* Enable RAS FW log support */ + lpfc_sli4_ras_setup(phba); + + phba->hba_flag |= HBA_SETUP; + return rc; + +out_io_buff_free: + /* Free allocated IO Buffers */ + lpfc_io_free(phba); +out_unset_queue: + /* Unset all the queues set up in this routine when error out */ + lpfc_sli4_queue_unset(phba); +out_free_iocblist: + lpfc_free_iocb_list(phba); +out_destroy_queue: + lpfc_sli4_queue_destroy(phba); +out_stop_timers: + lpfc_stop_hba_timers(phba); +out_free_mbox: + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_mbox_timeout - Timeout call back function for mbox timer + * @t: Context to fetch pointer to hba structure from. + * + * This is the callback function for mailbox timer. The mailbox + * timer is armed when a new mailbox command is issued and the timer + * is deleted when the mailbox complete. The function is called by + * the kernel timer code when a mailbox does not complete within + * expected time. This function wakes up the worker thread to + * process the mailbox timeout and returns. All the processing is + * done by the worker thread function lpfc_mbox_timeout_handler. + **/ +void +lpfc_mbox_timeout(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); + unsigned long iflag; + uint32_t tmo_posted; + + spin_lock_irqsave(&phba->pport->work_port_lock, iflag); + tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; + if (!tmo_posted) + phba->pport->work_port_events |= WORKER_MBOX_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions + * are pending + * @phba: Pointer to HBA context object. + * + * This function checks if any mailbox completions are present on the mailbox + * completion queue. + **/ +static bool +lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) +{ + + uint32_t idx; + struct lpfc_queue *mcq; + struct lpfc_mcqe *mcqe; + bool pending_completions = false; + uint8_t qe_valid; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Check for completions on mailbox completion queue */ + + mcq = phba->sli4_hba.mbx_cq; + idx = mcq->hba_index; + qe_valid = mcq->qe_valid; + while (bf_get_le32(lpfc_cqe_valid, + (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { + mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); + if (bf_get_le32(lpfc_trailer_completed, mcqe) && + (!bf_get_le32(lpfc_trailer_async, mcqe))) { + pending_completions = true; + break; + } + idx = (idx + 1) % mcq->entry_count; + if (mcq->hba_index == idx) + break; + + /* if the index wrapped around, toggle the valid bit */ + if (phba->sli4_hba.pc_sli4_params.cqav && !idx) + qe_valid = (qe_valid) ? 0 : 1; + } + return pending_completions; + +} + +/** + * lpfc_sli4_process_missed_mbox_completions - process mbox completions + * that were missed. + * @phba: Pointer to HBA context object. + * + * For sli4, it is possible to miss an interrupt. As such mbox completions + * maybe missed causing erroneous mailbox timeouts to occur. This function + * checks to see if mbox completions are on the mailbox completion queue + * and will process all the completions associated with the eq for the + * mailbox completion queue. + **/ +static bool +lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; + uint32_t eqidx; + struct lpfc_queue *fpeq = NULL; + struct lpfc_queue *eq; + bool mbox_pending; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Find the EQ associated with the mbox CQ */ + if (sli4_hba->hdwq) { + for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { + eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; + if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { + fpeq = eq; + break; + } + } + } + if (!fpeq) + return false; + + /* Turn off interrupts from this EQ */ + + sli4_hba->sli4_eq_clr_intr(fpeq); + + /* Check to see if a mbox completion is pending */ + + mbox_pending = lpfc_sli4_mbox_completions_pending(phba); + + /* + * If a mbox completion is pending, process all the events on EQ + * associated with the mbox completion queue (this could include + * mailbox commands, async events, els commands, receive queue data + * and fcp commands) + */ + + if (mbox_pending) + /* process and rearm the EQ */ + lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_QUEUE_WORK); + else + /* Always clear and re-arm the EQ */ + sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); + + return mbox_pending; + +} + +/** + * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout + * @phba: Pointer to HBA context object. + * + * This function is called from worker thread when a mailbox command times out. + * The caller is not required to hold any locks. This function will reset the + * HBA and recover all the pending commands. + **/ +void +lpfc_mbox_timeout_handler(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; + MAILBOX_t *mb = NULL; + + struct lpfc_sli *psli = &phba->sli; + + /* If the mailbox completed, process the completion */ + lpfc_sli4_process_missed_mbox_completions(phba); + + if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) + return; + + if (pmbox != NULL) + mb = &pmbox->u.mb; + /* Check the pmbox pointer first. There is a race condition + * between the mbox timeout handler getting executed in the + * worklist and the mailbox actually completing. When this + * race condition occurs, the mbox_active will be NULL. + */ + spin_lock_irq(&phba->hbalock); + if (pmbox == NULL) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_MBOX | LOG_SLI, + "0353 Active Mailbox cleared - mailbox timeout " + "exiting\n"); + spin_unlock_irq(&phba->hbalock); + return; + } + + /* Mbox cmd timeout */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", + mb->mbxCommand, + phba->pport->port_state, + phba->sli.sli_flag, + phba->sli.mbox_active); + spin_unlock_irq(&phba->hbalock); + + /* Setting state unknown so lpfc_sli_abort_iocb_ring + * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing + * it to fail all outstanding SCSI IO. + */ + set_bit(MBX_TMO_ERR, &phba->bit_flags); + spin_lock_irq(&phba->pport->work_port_lock); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock_irq(&phba->pport->work_port_lock); + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_LINK_UNKNOWN; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0345 Resetting board due to mailbox timeout\n"); + + /* Reset the HBA device */ + lpfc_reset_hba(phba); +} + +/** + * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This function is called by discovery code and HBA management code + * to submit a mailbox command to firmware with SLI-3 interface spec. This + * function gets the hbalock to protect the data structures. + * The mailbox command can be submitted in polling mode, in which case + * this function will wait in a polling loop for the completion of the + * mailbox. + * If the mailbox is submitted in no_wait mode (not polling) the + * function will submit the command and returns immediately without waiting + * for the mailbox completion. The no_wait is supported only when HBA + * is in SLI2/SLI3 mode - interrupts are enabled. + * The SLI interface allows only one mailbox pending at a time. If the + * mailbox is issued in polling mode and there is already a mailbox + * pending, then the function will return an error. If the mailbox is issued + * in NO_WAIT mode and there is a mailbox pending already, the function + * will return MBX_BUSY after queuing the mailbox into mailbox queue. + * The sli layer owns the mailbox object until the completion of mailbox + * command if this function return MBX_BUSY or MBX_SUCCESS. For all other + * return codes the caller owns the mailbox command after the return of + * the function. + **/ +static int +lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, + uint32_t flag) +{ + MAILBOX_t *mbx; + struct lpfc_sli *psli = &phba->sli; + uint32_t status, evtctr; + uint32_t ha_copy, hc_copy; + int i; + unsigned long timeout; + unsigned long drvr_flag = 0; + uint32_t word0, ldata; + void __iomem *to_slim; + int processing_queue = 0; + + spin_lock_irqsave(&phba->hbalock, drvr_flag); + if (!pmbox) { + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + /* processing mbox queue from intr_handler */ + if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + return MBX_SUCCESS; + } + processing_queue = 1; + pmbox = lpfc_mbox_get(phba); + if (!pmbox) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + return MBX_SUCCESS; + } + } + + if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && + pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { + if(!pmbox->vport) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + lpfc_printf_log(phba, KERN_ERR, + LOG_MBOX | LOG_VPORT, + "1806 Mbox x%x failed. No vport\n", + pmbox->u.mb.mbxCommand); + dump_stack(); + goto out_not_finished; + } + } + + /* If the PCI channel is in offline state, do not post mbox. */ + if (unlikely(pci_channel_offline(phba->pcidev))) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + goto out_not_finished; + } + + /* If HBA has a deferred error attention, fail the iocb. */ + if (unlikely(phba->hba_flag & DEFER_ERATT)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + goto out_not_finished; + } + + psli = &phba->sli; + + mbx = &pmbox->u.mb; + status = MBX_SUCCESS; + + if (phba->link_state == LPFC_HBA_ERROR) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + + /* Mbox command cannot issue */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):0311 Mailbox command x%x cannot " + "issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, psli->sli_flag, flag); + goto out_not_finished; + } + + if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { + if (lpfc_readl(phba->HCregaddr, &hc_copy) || + !(hc_copy & HC_MBINT_ENA)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2528 Mailbox command x%x cannot " + "issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, psli->sli_flag, flag); + goto out_not_finished; + } + } + + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + /* Polling for a mbox command when another one is already active + * is not allowed in SLI. Also, the driver must have established + * SLI2 mode to queue and process multiple mbox commands. + */ + + if (flag & MBX_POLL) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + + /* Mbox command cannot issue */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2529 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); + goto out_not_finished; + } + + if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + /* Mbox command cannot issue */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2530 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); + goto out_not_finished; + } + + /* Another mailbox command is still being processed, queue this + * command to be processed later. + */ + lpfc_mbox_put(phba, pmbox); + + /* Mbox cmd issue - BUSY */ + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0308 Mbox cmd issue - BUSY Data: " + "x%x x%x x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0xffffff, + mbx->mbxCommand, + phba->pport ? phba->pport->port_state : 0xff, + psli->sli_flag, flag); + + psli->slistat.mbox_busy++; + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + + if (pmbox->vport) { + lpfc_debugfs_disc_trc(pmbox->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Bsy vport: cmd:x%x mb:x%x x%x", + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); + } + else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Bsy: cmd:x%x mb:x%x x%x", + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); + } + + return MBX_BUSY; + } + + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + + /* If we are not polling, we MUST be in SLI2 mode */ + if (flag != MBX_POLL) { + if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && + (mbx->mbxCommand != MBX_KILL_BOARD)) { + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + /* Mbox command cannot issue */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2531 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); + goto out_not_finished; + } + /* timeout active mbox command */ + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000); + mod_timer(&psli->mbox_tmo, jiffies + timeout); + } + + /* Mailbox cmd issue */ + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " + "x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + mbx->mbxCommand, + phba->pport ? phba->pport->port_state : 0xff, + psli->sli_flag, flag); + + if (mbx->mbxCommand != MBX_HEARTBEAT) { + if (pmbox->vport) { + lpfc_debugfs_disc_trc(pmbox->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Send vport: cmd:x%x mb:x%x x%x", + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); + } + else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Send: cmd:x%x mb:x%x x%x", + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); + } + } + + psli->slistat.mbox_cmd++; + evtctr = psli->slistat.mbox_event; + + /* next set own bit for the adapter and copy over command word */ + mbx->mbxOwner = OWN_CHIP; + + if (psli->sli_flag & LPFC_SLI_ACTIVE) { + /* Populate mbox extension offset word. */ + if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { + *(((uint32_t *)mbx) + pmbox->mbox_offset_word) + = (uint8_t *)phba->mbox_ext + - (uint8_t *)phba->mbox; + } + + /* Copy the mailbox extension data */ + if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { + lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, + (uint8_t *)phba->mbox_ext, + pmbox->in_ext_byte_len); + } + /* Copy command data to host SLIM area */ + lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); + } else { + /* Populate mbox extension offset word. */ + if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) + *(((uint32_t *)mbx) + pmbox->mbox_offset_word) + = MAILBOX_HBA_EXT_OFFSET; + + /* Copy the mailbox extension data */ + if (pmbox->in_ext_byte_len && pmbox->ctx_buf) + lpfc_memcpy_to_slim(phba->MBslimaddr + + MAILBOX_HBA_EXT_OFFSET, + pmbox->ctx_buf, pmbox->in_ext_byte_len); + + if (mbx->mbxCommand == MBX_CONFIG_PORT) + /* copy command data into host mbox for cmpl */ + lpfc_sli_pcimem_bcopy(mbx, phba->mbox, + MAILBOX_CMD_SIZE); + + /* First copy mbox command data to HBA SLIM, skip past first + word */ + to_slim = phba->MBslimaddr + sizeof (uint32_t); + lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], + MAILBOX_CMD_SIZE - sizeof (uint32_t)); + + /* Next copy over first word, with mbxOwner set */ + ldata = *((uint32_t *)mbx); + to_slim = phba->MBslimaddr; + writel(ldata, to_slim); + readl(to_slim); /* flush */ + + if (mbx->mbxCommand == MBX_CONFIG_PORT) + /* switch over to host mailbox */ + psli->sli_flag |= LPFC_SLI_ACTIVE; + } + + wmb(); + + switch (flag) { + case MBX_NOWAIT: + /* Set up reference to mailbox command */ + psli->mbox_active = pmbox; + /* Interrupt board to do it */ + writel(CA_MBATT, phba->CAregaddr); + readl(phba->CAregaddr); /* flush */ + /* Don't wait for it to finish, just return */ + break; + + case MBX_POLL: + /* Set up null reference to mailbox command */ + psli->mbox_active = NULL; + /* Interrupt board to do it */ + writel(CA_MBATT, phba->CAregaddr); + readl(phba->CAregaddr); /* flush */ + + if (psli->sli_flag & LPFC_SLI_ACTIVE) { + /* First read mbox status word */ + word0 = *((uint32_t *)phba->mbox); + word0 = le32_to_cpu(word0); + } else { + /* First read mbox status word */ + if (lpfc_readl(phba->MBslimaddr, &word0)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } + } + + /* Read the HBA Host Attention Register */ + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000) + jiffies; + i = 0; + /* Wait for command to complete */ + while (((word0 & OWN_CHIP) == OWN_CHIP) || + (!(ha_copy & HA_MBATT) && + (phba->link_state > LPFC_WARM_START))) { + if (time_after(jiffies, timeout)) { + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } + + /* Check if we took a mbox interrupt while we were + polling */ + if (((word0 & OWN_CHIP) != OWN_CHIP) + && (evtctr != psli->slistat.mbox_event)) + break; + + if (i++ > 10) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + msleep(1); + spin_lock_irqsave(&phba->hbalock, drvr_flag); + } + + if (psli->sli_flag & LPFC_SLI_ACTIVE) { + /* First copy command data */ + word0 = *((uint32_t *)phba->mbox); + word0 = le32_to_cpu(word0); + if (mbx->mbxCommand == MBX_CONFIG_PORT) { + MAILBOX_t *slimmb; + uint32_t slimword0; + /* Check real SLIM for any errors */ + slimword0 = readl(phba->MBslimaddr); + slimmb = (MAILBOX_t *) & slimword0; + if (((slimword0 & OWN_CHIP) != OWN_CHIP) + && slimmb->mbxStatus) { + psli->sli_flag &= + ~LPFC_SLI_ACTIVE; + word0 = slimword0; + } + } + } else { + /* First copy command data */ + word0 = readl(phba->MBslimaddr); + } + /* Read the HBA Host Attention Register */ + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } + } + + if (psli->sli_flag & LPFC_SLI_ACTIVE) { + /* copy results back to user */ + lpfc_sli_pcimem_bcopy(phba->mbox, mbx, + MAILBOX_CMD_SIZE); + /* Copy the mailbox extension data */ + if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { + lpfc_sli_pcimem_bcopy(phba->mbox_ext, + pmbox->ctx_buf, + pmbox->out_ext_byte_len); + } + } else { + /* First copy command data */ + lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, + MAILBOX_CMD_SIZE); + /* Copy the mailbox extension data */ + if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { + lpfc_memcpy_from_slim( + pmbox->ctx_buf, + phba->MBslimaddr + + MAILBOX_HBA_EXT_OFFSET, + pmbox->out_ext_byte_len); + } + } + + writel(HA_MBATT, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + status = mbx->mbxStatus; + } + + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + return status; + +out_not_finished: + if (processing_queue) { + pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; + lpfc_mbox_cmpl_put(phba, pmbox); + } + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command + * @phba: Pointer to HBA context object. + * + * The function blocks the posting of SLI4 asynchronous mailbox commands from + * the driver internal pending mailbox queue. It will then try to wait out the + * possible outstanding mailbox command before return. + * + * Returns: + * 0 - the outstanding mailbox command completed; otherwise, the wait for + * the outstanding mailbox command timed out. + **/ +static int +lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; + int rc = 0; + unsigned long timeout = 0; + u32 sli_flag; + u8 cmd, subsys, opcode; + + /* Mark the asynchronous mailbox command posting as blocked */ + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + if (phba->sli.mbox_active) + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active) * + 1000) + jiffies; + spin_unlock_irq(&phba->hbalock); + + /* Make sure the mailbox is really active */ + if (timeout) + lpfc_sli4_process_missed_mbox_completions(phba); + + /* Wait for the outstanding mailbox command to complete */ + while (phba->sli.mbox_active) { + /* Check active mailbox complete status every 2ms */ + msleep(2); + if (time_after(jiffies, timeout)) { + /* Timeout, mark the outstanding cmd not complete */ + + /* Sanity check sli.mbox_active has not completed or + * cancelled from another context during last 2ms sleep, + * so take hbalock to be sure before logging. + */ + spin_lock_irq(&phba->hbalock); + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + cmd = mboxq->u.mb.mbxCommand; + subsys = lpfc_sli_config_mbox_subsys_get(phba, + mboxq); + opcode = lpfc_sli_config_mbox_opcode_get(phba, + mboxq); + sli_flag = psli->sli_flag; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2352 Mailbox command x%x " + "(x%x/x%x) sli_flag x%x could " + "not complete\n", + cmd, subsys, opcode, + sli_flag); + } else { + spin_unlock_irq(&phba->hbalock); + } + + rc = 1; + break; + } + } + + /* Can not cleanly block async mailbox command, fails it */ + if (rc) { + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + } + return rc; +} + +/** + * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command + * @phba: Pointer to HBA context object. + * + * The function unblocks and resume posting of SLI4 asynchronous mailbox + * commands from the driver internal pending mailbox queue. It makes sure + * that there is no outstanding mailbox command before resuming posting + * asynchronous mailbox commands. If, for any reason, there is outstanding + * mailbox command, it will try to wait it out before resuming asynchronous + * mailbox command posting. + **/ +static void +lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + spin_lock_irq(&phba->hbalock); + if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + /* Asynchronous mailbox posting is not blocked, do nothing */ + spin_unlock_irq(&phba->hbalock); + return; + } + + /* Outstanding synchronous mailbox command is guaranteed to be done, + * successful or timeout, after timing-out the outstanding mailbox + * command shall always be removed, so just unblock posting async + * mailbox command and resume + */ + psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + + /* wake up worker thread to post asynchronous mailbox command */ + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to mailbox object. + * + * The function waits for the bootstrap mailbox register ready bit from + * port for twice the regular mailbox command timeout value. + * + * 0 - no timeout on waiting for bootstrap mailbox register ready. + * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port + * is in an unrecoverable state. + **/ +static int +lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + uint32_t db_ready; + unsigned long timeout; + struct lpfc_register bmbx_reg; + struct lpfc_register portstat_reg = {-1}; + + /* Sanity check - there is no point to wait if the port is in an + * unrecoverable state. + */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2) { + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0) || + lpfc_sli4_unrecoverable_port(&portstat_reg)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3858 Skipping bmbx ready because " + "Port Status x%x\n", + portstat_reg.word0); + return MBXERR_ERROR; + } + } + + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) + * 1000) + jiffies; + + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + mdelay(2); + + if (time_after(jiffies, timeout)) + return MBXERR_ERROR; + } while (!db_ready); + + return 0; +} + +/** + * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to mailbox object. + * + * The function posts a mailbox to the port. The mailbox is expected + * to be comletely filled in and ready for the port to operate on it. + * This routine executes a synchronous completion operation on the + * mailbox by polling for its completion. + * + * The caller must not be holding any locks when calling this routine. + * + * Returns: + * MBX_SUCCESS - mailbox posted successfully + * Any of the MBX error values. + **/ +static int +lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc = MBX_SUCCESS; + unsigned long iflag; + uint32_t mcqe_status; + uint32_t mbx_cmnd; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_mqe *mb = &mboxq->u.mqe; + struct lpfc_bmbx_create *mbox_rgn; + struct dma_address *dma_address; + + /* + * Only one mailbox can be active to the bootstrap mailbox region + * at a time and there is no queueing provided. + */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2532 Mailbox command x%x (x%x/x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_POLL); + return MBXERR_ERROR; + } + /* The server grabs the token and owns it until release */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* wait for bootstrap mbox register for readyness */ + rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); + if (rc) + goto exit; + /* + * Initialize the bootstrap memory region to avoid stale data areas + * in the mailbox post. Then copy the caller's mailbox contents to + * the bmbx mailbox region. + */ + mbx_cmnd = bf_get(lpfc_mqe_command, mb); + memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); + lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, + sizeof(struct lpfc_mqe)); + + /* Post the high mailbox dma address to the port and wait for ready. */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); + + /* wait for bootstrap mbox register for hi-address write done */ + rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); + if (rc) + goto exit; + + /* Post the low mailbox dma address to the port. */ + writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); + + /* wait for bootstrap mbox register for low address write done */ + rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); + if (rc) + goto exit; + + /* + * Read the CQ to ensure the mailbox has completed. + * If so, update the mailbox status so that the upper layers + * can complete the request normally. + */ + lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, + sizeof(struct lpfc_mqe)); + mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; + lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, + sizeof(struct lpfc_mcqe)); + mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); + /* + * When the CQE status indicates a failure and the mailbox status + * indicates success then copy the CQE status into the mailbox status + * (and prefix it with x4000). + */ + if (mcqe_status != MB_CQE_STATUS_SUCCESS) { + if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) + bf_set(lpfc_mqe_status, mb, + (LPFC_MBX_ERROR_RANGE | mcqe_status)); + rc = MBXERR_ERROR; + } else + lpfc_sli4_swap_str(phba, mboxq); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" + " x%x x%x CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + bf_get(lpfc_mqe_status, mb), + mb->un.mb_words[0], mb->un.mb_words[1], + mb->un.mb_words[2], mb->un.mb_words[3], + mb->un.mb_words[4], mb->un.mb_words[5], + mb->un.mb_words[6], mb->un.mb_words[7], + mb->un.mb_words[8], mb->un.mb_words[9], + mb->un.mb_words[10], mb->un.mb_words[11], + mb->un.mb_words[12], mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); +exit: + /* We are holding the token, no needed for lock when release */ + spin_lock_irqsave(&phba->hbalock, iflag); + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rc; +} + +/** + * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This function is called by discovery code and HBA management code to submit + * a mailbox command to firmware with SLI-4 interface spec. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +static int +lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint32_t flag) +{ + struct lpfc_sli *psli = &phba->sli; + unsigned long iflags; + int rc; + + /* dump from issue mailbox command if setup */ + lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); + + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2544 Mailbox command x%x (x%x/x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + + /* Detect polling mode and jump to a handler */ + if (!phba->sli4_hba.intr_enable) { + if (flag == MBX_POLL) + rc = lpfc_sli4_post_sync_mbox(phba, mboxq); + else + rc = -EIO; + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "(%d):2541 Mailbox command x%x " + "(x%x/x%x) failure: " + "mqe_sta: x%x mcqe_sta: x%x/x%x " + "Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, + mboxq), + lpfc_sli_config_mbox_opcode_get(phba, + mboxq), + bf_get(lpfc_mqe_status, &mboxq->u.mqe), + bf_get(lpfc_mcqe_status, &mboxq->mcqe), + bf_get(lpfc_mcqe_ext_status, + &mboxq->mcqe), + psli->sli_flag, flag); + return rc; + } else if (flag == MBX_POLL) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "(%d):2542 Try to issue mailbox command " + "x%x (x%x/x%x) synchronously ahead of async " + "mailbox command queue: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + /* Try to block the asynchronous mailbox posting */ + rc = lpfc_sli4_async_mbox_block(phba); + if (!rc) { + /* Successfully blocked, now issue sync mbox cmd */ + rc = lpfc_sli4_post_sync_mbox(phba, mboxq); + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_WARNING, + LOG_MBOX | LOG_SLI, + "(%d):2597 Sync Mailbox command " + "x%x (x%x/x%x) failure: " + "mqe_sta: x%x mcqe_sta: x%x/x%x " + "Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, + mboxq), + lpfc_sli_config_mbox_opcode_get(phba, + mboxq), + bf_get(lpfc_mqe_status, &mboxq->u.mqe), + bf_get(lpfc_mcqe_status, &mboxq->mcqe), + bf_get(lpfc_mcqe_ext_status, + &mboxq->mcqe), + psli->sli_flag, flag); + /* Unblock the async mailbox posting afterward */ + lpfc_sli4_async_mbox_unblock(phba); + } + return rc; + } + + /* Now, interrupt mode asynchronous mailbox command */ + rc = lpfc_mbox_cmd_check(phba, mboxq); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2543 Mailbox command x%x (x%x/x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + + /* Put the mailbox command to the driver internal FIFO */ + psli->slistat.mbox_busy++; + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_mbox_put(phba, mboxq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0354 Mbox cmd issue - Enqueue Data: " + "x%x (x%x/x%x) x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0xffffff, + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, + psli->sli_flag, MBX_NOWAIT); + /* Wake up worker thread to transport mailbox command from head */ + lpfc_worker_wake_up(phba); + + return MBX_BUSY; + +out_not_finished: + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device + * @phba: Pointer to HBA context object. + * + * This function is called by worker thread to send a mailbox command to + * SLI4 HBA firmware. + * + **/ +int +lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; + int rc = MBX_SUCCESS; + unsigned long iflags; + struct lpfc_mqe *mqe; + uint32_t mbx_cmnd; + + /* Check interrupt mode before post async mailbox command */ + if (unlikely(!phba->sli4_hba.intr_enable)) + return MBX_NOT_FINISHED; + + /* Check for mailbox command service token */ + spin_lock_irqsave(&phba->hbalock, iflags); + if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (unlikely(phba->sli.mbox_active)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0384 There is pending active mailbox cmd\n"); + return MBX_NOT_FINISHED; + } + /* Take the mailbox command service token */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + + /* Get the next mailbox command from head of queue */ + mboxq = lpfc_mbox_get(phba); + + /* If no more mailbox command waiting for post, we're done */ + if (!mboxq) { + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_SUCCESS; + } + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Check device readiness for posting mailbox command */ + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) + /* Driver clean routine will clean up pending mailbox */ + goto out_not_finished; + + /* Prepare the mbox command to be posted */ + mqe = &mboxq->u.mqe; + mbx_cmnd = bf_get(lpfc_mqe_command, mqe); + + /* Start timer for the mbox_tmo and log some mailbox post messages */ + mod_timer(&psli->mbox_tmo, (jiffies + + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " + "x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, psli->sli_flag); + + if (mbx_cmnd != MBX_HEARTBEAT) { + if (mboxq->vport) { + lpfc_debugfs_disc_trc(mboxq->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Send vport: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Send: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } + } + psli->slistat.mbox_cmd++; + + /* Post the mailbox command to the port */ + rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):2533 Mailbox command x%x (x%x/x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_NOWAIT); + goto out_not_finished; + } + + return rc; + +out_not_finished: + spin_lock_irqsave(&phba->hbalock, iflags); + if (phba->sli.mbox_active) { + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + /* Release the token */ + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from + * the API jump table function pointer from the lpfc_hba struct. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +int +lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) +{ + return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); +} + +/** + * lpfc_mbox_api_table_setup - Set up mbox api function jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the mbox interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s3; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s4; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1420 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + } + return 0; +} + +/** + * __lpfc_sli_ringtx_put - Add an iocb to the txq + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @piocb: Pointer to address of newly added command iocb. + * + * This function is called with hbalock held for SLI3 ports or + * the ring lock held for SLI4 ports to add a command + * iocb to the txq when SLI layer cannot submit the command iocb + * to the ring. + **/ +void +__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + lockdep_assert_held(&pring->ring_lock); + else + lockdep_assert_held(&phba->hbalock); + /* Insert the caller's iocb in the txq tail for later processing. */ + list_add_tail(&piocb->list, &pring->txq); +} + +/** + * lpfc_sli_next_iocb - Get the next iocb in the txq + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @piocb: Pointer to address of newly added command iocb. + * + * This function is called with hbalock held before a new + * iocb is submitted to the firmware. This function checks + * txq to flush the iocbs in txq to Firmware before + * submitting new iocbs to the Firmware. + * If there are iocbs in the txq which need to be submitted + * to firmware, lpfc_sli_next_iocb returns the first element + * of the txq after dequeuing it from txq. + * If there is no iocb in the txq then the function will return + * *piocb and *piocb is set to NULL. Caller needs to check + * *piocb to find if there are more commands in the txq. + **/ +static struct lpfc_iocbq * +lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq **piocb) +{ + struct lpfc_iocbq * nextiocb; + + lockdep_assert_held(&phba->hbalock); + + nextiocb = lpfc_sli_ringtx_get(phba, pring); + if (!nextiocb) { + nextiocb = *piocb; + *piocb = NULL; + } + + return nextiocb; +} + +/** + * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb + * @phba: Pointer to HBA context object. + * @ring_number: SLI ring number to issue iocb on. + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue + * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is + * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT + * flag is turned on, the function returns IOCB_ERROR. When the link is down, + * this function allows only iocbs for posting buffers. This function finds + * next available slot in the command ring and posts the command to the + * available slot and writes the port attention register to request HBA start + * processing new iocb. If there is no slot available in the ring and + * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise + * the function returns IOCB_BUSY. + * + * This function is called with hbalock held. The function will return success + * after it successfully submit the iocb to firmware or after adding to the + * txq. + **/ +static int +__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + struct lpfc_iocbq *nextiocb; + IOCB_t *iocb; + struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; + + lockdep_assert_held(&phba->hbalock); + + if (piocb->cmd_cmpl && (!piocb->vport) && + (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1807 IOCB x%x failed. No vport\n", + piocb->iocb.ulpCommand); + dump_stack(); + return IOCB_ERROR; + } + + + /* If the PCI channel is in offline state, do not post iocbs. */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return IOCB_ERROR; + + /* If HBA has a deferred error attention, fail the iocb. */ + if (unlikely(phba->hba_flag & DEFER_ERATT)) + return IOCB_ERROR; + + /* + * We should never get an IOCB if we are in a < LINK_DOWN state + */ + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + return IOCB_ERROR; + + /* + * Check to see if we are blocking IOCB processing because of a + * outstanding event. + */ + if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) + goto iocb_busy; + + if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { + /* + * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF + * can be issued if the link is not up. + */ + switch (piocb->iocb.ulpCommand) { + case CMD_QUE_RING_BUF_CN: + case CMD_QUE_RING_BUF64_CN: + /* + * For IOCBs, like QUE_RING_BUF, that have no rsp ring + * completion, cmd_cmpl MUST be 0. + */ + if (piocb->cmd_cmpl) + piocb->cmd_cmpl = NULL; + fallthrough; + case CMD_CREATE_XRI_CR: + case CMD_CLOSE_XRI_CN: + case CMD_CLOSE_XRI_CX: + break; + default: + goto iocb_busy; + } + + /* + * For FCP commands, we must be in a state where we can process link + * attention events. + */ + } else if (unlikely(pring->ringno == LPFC_FCP_RING && + !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { + goto iocb_busy; + } + + while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && + (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) + lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); + + if (iocb) + lpfc_sli_update_ring(phba, pring); + else + lpfc_sli_update_full_ring(phba, pring); + + if (!piocb) + return IOCB_SUCCESS; + + goto out_busy; + + iocb_busy: + pring->stats.iocb_cmd_delay++; + + out_busy: + + if (!(flag & SLI_IOCB_RET_IOCB)) { + __lpfc_sli_ringtx_put(phba, pring, piocb); + return IOCB_SUCCESS; + } + + return IOCB_BUSY; +} + +/** + * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb + * @phba: Pointer to HBA context object. + * @ring_number: SLI ring number to issue wqe on. + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to + * send an iocb command to an HBA with SLI-3 interface spec. + * + * This function takes the hbalock before invoking the lockless version. + * The function will return success after it successfully submit the wqe to + * firmware or after adding to the txq. + **/ +static int +__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + unsigned long iflags; + int rc; + + spin_lock_irqsave(&phba->hbalock, iflags); + rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return rc; +} + +/** + * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe + * @phba: Pointer to HBA context object. + * @ring_number: SLI ring number to issue wqe on. + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue + * an wqe command to an HBA with SLI-4 interface spec. + * + * This function is a lockless version. The function will return success + * after it successfully submit the wqe to firmware or after adding to the + * txq. + **/ +static int +__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + struct lpfc_io_buf *lpfc_cmd = piocb->io_buf; + + lpfc_prep_embed_io(phba, lpfc_cmd); + return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb); +} + +void +lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; + union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; + struct sli4_sge *sgl; + + /* 128 byte wqe support here */ + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + + if (phba->fcp_embed_io) { + struct fcp_cmnd *fcp_cmnd; + u32 *ptr; + + fcp_cmnd = lpfc_cmd->fcp_cmnd; + + /* Word 0-2 - FCP_CMND */ + wqe->generic.bde.tus.f.bdeFlags = + BUFF_TYPE_BDE_IMMED; + wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; + wqe->generic.bde.addrHigh = 0; + wqe->generic.bde.addrLow = 88; /* Word 22 */ + + bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); + + /* Word 22-29 FCP CMND Payload */ + ptr = &wqe->words[22]; + memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); + } else { + /* Word 0-2 - Inline BDE */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd); + wqe->generic.bde.addrHigh = sgl->addr_hi; + wqe->generic.bde.addrLow = sgl->addr_lo; + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); + bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); + } + + /* add the VMID tags as per switch response */ + if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) { + if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { + bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, + (piocb->vmid_tag.cs_ctl_vmid)); + } else if (phba->cfg_vmid_app_header) { + bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); + wqe->words[31] = piocb->vmid_tag.app_id; + } + } +} + +/** + * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb + * @phba: Pointer to HBA context object. + * @ring_number: SLI ring number to issue iocb on. + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue + * an iocb command to an HBA with SLI-4 interface spec. + * + * This function is called with ringlock held. The function will return success + * after it successfully submit the iocb to firmware or after adding to the + * txq. + **/ +static int +__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + struct lpfc_sglq *sglq; + union lpfc_wqe128 *wqe; + struct lpfc_queue *wq; + struct lpfc_sli_ring *pring; + u32 ulp_command = get_job_cmnd(phba, piocb); + + /* Get the WQ */ + if ((piocb->cmd_flag & LPFC_IO_FCP) || + (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { + wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; + } else { + wq = phba->sli4_hba.els_wq; + } + + /* Get corresponding ring */ + pring = wq->pring; + + /* + * The WQE can be either 64 or 128 bytes, + */ + + lockdep_assert_held(&pring->ring_lock); + wqe = &piocb->wqe; + if (piocb->sli4_xritag == NO_XRI) { + if (ulp_command == CMD_ABORT_XRI_CX) + sglq = NULL; + else { + sglq = __lpfc_sli_get_els_sglq(phba, piocb); + if (!sglq) { + if (!(flag & SLI_IOCB_RET_IOCB)) { + __lpfc_sli_ringtx_put(phba, + pring, + piocb); + return IOCB_SUCCESS; + } else { + return IOCB_BUSY; + } + } + } + } else if (piocb->cmd_flag & LPFC_IO_FCP) { + /* These IO's already have an XRI and a mapped sgl. */ + sglq = NULL; + } + else { + /* + * This is a continuation of a commandi,(CX) so this + * sglq is on the active list + */ + sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); + if (!sglq) + return IOCB_ERROR; + } + + if (sglq) { + piocb->sli4_lxritag = sglq->sli4_lxritag; + piocb->sli4_xritag = sglq->sli4_xritag; + + /* ABTS sent by initiator to CT exchange, the + * RX_ID field will be filled with the newly + * allocated responder XRI. + */ + if (ulp_command == CMD_XMIT_BLS_RSP64_CX && + piocb->abort_bls == LPFC_ABTS_UNSOL_INT) + bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, + piocb->sli4_xritag); + + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, + piocb->sli4_xritag); + + if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI) + return IOCB_ERROR; + } + + if (lpfc_sli4_wq_put(wq, wqe)) + return IOCB_ERROR; + + lpfc_sli_ringtxcmpl_put(phba, pring, piocb); + + return 0; +} + +/* + * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o + * + * This routine wraps the actual fcp i/o function for issusing WQE for sli-4 + * or IOCB for sli-3 function. + * pointer from the lpfc_hba struct. + * + * Return codes: + * IOCB_ERROR - Error + * IOCB_SUCCESS - Success + * IOCB_BUSY - Busy + **/ +int +lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag); +} + +/* + * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb + * + * This routine wraps the actual lockless version for issusing IOCB function + * pointer from the lpfc_hba struct. + * + * Return codes: + * IOCB_ERROR - Error + * IOCB_SUCCESS - Success + * IOCB_BUSY - Busy + **/ +int +__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); +} + +static void +__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, + struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, + u32 elscmd, u8 tmo, u8 expect_rsp) +{ + struct lpfc_hba *phba = vport->phba; + IOCB_t *cmd; + + cmd = &cmdiocbq->iocb; + memset(cmd, 0, sizeof(*cmd)); + + cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + + if (expect_rsp) { + cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); + cmd->un.elsreq64.remoteID = did; /* DID */ + cmd->ulpCommand = CMD_ELS_REQUEST64_CR; + cmd->ulpTimeout = tmo; + } else { + cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); + cmd->un.genreq64.xmit_els_remoteID = did; /* DID */ + cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; + cmd->ulpPU = PARM_NPIV_DID; + } + cmd->ulpBdeCount = 1; + cmd->ulpLe = 1; + cmd->ulpClass = CLASS3; + + /* If we have NPIV enabled, we want to send ELS traffic by VPI. */ + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + if (expect_rsp) { + cmd->un.elsreq64.myID = vport->fc_myDID; + + /* For ELS_REQUEST64_CR, use the VPI by default */ + cmd->ulpContext = phba->vpi_ids[vport->vpi]; + } + + cmd->ulpCt_h = 0; + /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ + if (elscmd == ELS_CMD_ECHO) + cmd->ulpCt_l = 0; /* context = invalid RPI */ + else + cmd->ulpCt_l = 1; /* context = VPI */ + } +} + +static void +__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, + struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, + u32 elscmd, u8 tmo, u8 expect_rsp) +{ + struct lpfc_hba *phba = vport->phba; + union lpfc_wqe128 *wqe; + struct ulp_bde64_le *bde; + u8 els_id; + + wqe = &cmdiocbq->wqe; + memset(wqe, 0, sizeof(*wqe)); + + /* Word 0 - 2 BDE */ + bde = (struct ulp_bde64_le *)&wqe->generic.bde; + bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); + bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); + bde->type_size = cpu_to_le32(cmd_size); + bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); + + if (expect_rsp) { + bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE); + + /* Transfer length */ + wqe->els_req.payload_len = cmd_size; + wqe->els_req.max_response_payload_len = FCELSSIZE; + + /* DID */ + bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did); + + /* Word 11 - ELS_ID */ + switch (elscmd) { + case ELS_CMD_PLOGI: + els_id = LPFC_ELS_ID_PLOGI; + break; + case ELS_CMD_FLOGI: + els_id = LPFC_ELS_ID_FLOGI; + break; + case ELS_CMD_LOGO: + els_id = LPFC_ELS_ID_LOGO; + break; + case ELS_CMD_FDISC: + if (!vport->fc_myDID) { + els_id = LPFC_ELS_ID_FDISC; + break; + } + fallthrough; + default: + els_id = LPFC_ELS_ID_DEFAULT; + break; + } + + bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); + } else { + /* DID */ + bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did); + + /* Transfer length */ + wqe->xmit_els_rsp.response_payload_len = cmd_size; + + bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com, + CMD_XMIT_ELS_RSP64_WQE); + } + + bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo); + bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag); + bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); + + /* If we have NPIV enabled, we want to send ELS traffic by VPI. + * For SLI4, since the driver controls VPIs we also want to include + * all ELS pt2pt protocol traffic as well. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || + (vport->fc_flag & FC_PT2PT)) { + if (expect_rsp) { + bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID); + + /* For ELS_REQUEST64_WQE, use the VPI by default */ + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + phba->vpi_ids[vport->vpi]); + } + + /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ + if (elscmd == ELS_CMD_ECHO) + bf_set(wqe_ct, &wqe->generic.wqe_com, 0); + else + bf_set(wqe_ct, &wqe->generic.wqe_com, 1); + } +} + +void +lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, + u16 cmd_size, u32 did, u32 elscmd, u8 tmo, + u8 expect_rsp) +{ + phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did, + elscmd, tmo, expect_rsp); +} + +static void +__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, + u16 rpi, u32 num_entry, u8 tmo) +{ + IOCB_t *cmd; + + cmd = &cmdiocbq->iocb; + memset(cmd, 0, sizeof(*cmd)); + + cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64); + + cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; + cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; + cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + + cmd->ulpContext = rpi; + cmd->ulpClass = CLASS3; + cmd->ulpCommand = CMD_GEN_REQUEST64_CR; + cmd->ulpBdeCount = 1; + cmd->ulpLe = 1; + cmd->ulpOwner = OWN_CHIP; + cmd->ulpTimeout = tmo; +} + +static void +__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, + u16 rpi, u32 num_entry, u8 tmo) +{ + union lpfc_wqe128 *cmdwqe; + struct ulp_bde64_le *bde, *bpl; + u32 xmit_len = 0, total_len = 0, size, type, i; + + cmdwqe = &cmdiocbq->wqe; + memset(cmdwqe, 0, sizeof(*cmdwqe)); + + /* Calculate total_len and xmit_len */ + bpl = (struct ulp_bde64_le *)bmp->virt; + for (i = 0; i < num_entry; i++) { + size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; + total_len += size; + } + for (i = 0; i < num_entry; i++) { + size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; + type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK; + if (type != ULP_BDE64_TYPE_BDE_64) + break; + xmit_len += size; + } + + /* Words 0 - 2 */ + bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde; + bde->addr_low = bpl->addr_low; + bde->addr_high = bpl->addr_high; + bde->type_size = cpu_to_le32(xmit_len); + bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); + + /* Word 3 */ + cmdwqe->gen_req.request_payload_len = xmit_len; + + /* Word 5 */ + bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT); + bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); + bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1); + bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi); + + /* Word 7 */ + bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo); + bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3); + bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR); + bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI); + + /* Word 12 */ + cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len; +} + +void +lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) +{ + phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo); +} + +static void +__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, + u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) +{ + IOCB_t *icmd; + + icmd = &cmdiocbq->iocb; + memset(icmd, 0, sizeof(*icmd)); + + icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); + icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); + icmd->un.xseq64.w5.hcsw.Fctl = LA; + if (last_seq) + icmd->un.xseq64.w5.hcsw.Fctl |= LS; + icmd->un.xseq64.w5.hcsw.Dfctl = 0; + icmd->un.xseq64.w5.hcsw.Rctl = rctl; + icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; + + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + + switch (cr_cx_cmd) { + case CMD_XMIT_SEQUENCE64_CR: + icmd->ulpContext = rpi; + icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; + break; + case CMD_XMIT_SEQUENCE64_CX: + icmd->ulpContext = ox_id; + icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; + break; + default: + break; + } +} + +static void +__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, + u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd) +{ + union lpfc_wqe128 *wqe; + struct ulp_bde64 *bpl; + + wqe = &cmdiocbq->wqe; + memset(wqe, 0, sizeof(*wqe)); + + /* Words 0 - 2 */ + bpl = (struct ulp_bde64 *)bmp->virt; + wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh; + wqe->xmit_sequence.bde.addrLow = bpl->addrLow; + wqe->xmit_sequence.bde.tus.w = bpl->tus.w; + + /* Word 5 */ + bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq); + bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1); + bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl); + bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi); + + bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + + /* Word 7 */ + bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); + + /* Word 9 */ + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); + + /* Word 12 */ + if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) + wqe->xmit_sequence.xmit_len = full_size; + else + wqe->xmit_sequence.xmit_len = + wqe->xmit_sequence.bde.tus.f.bdeSize; +} + +void +lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, + u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) +{ + phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry, + rctl, last_seq, cr_cx_cmd); +} + +static void +__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, + u16 iotag, u8 ulp_class, u16 cqid, bool ia, + bool wqec) +{ + IOCB_t *icmd = NULL; + + icmd = &cmdiocbq->iocb; + memset(icmd, 0, sizeof(*icmd)); + + /* Word 5 */ + icmd->un.acxri.abortContextTag = ulp_context; + icmd->un.acxri.abortIoTag = iotag; + + if (ia) { + /* Word 7 */ + icmd->ulpCommand = CMD_CLOSE_XRI_CN; + } else { + /* Word 3 */ + icmd->un.acxri.abortType = ABORT_TYPE_ABTS; + + /* Word 7 */ + icmd->ulpClass = ulp_class; + icmd->ulpCommand = CMD_ABORT_XRI_CN; + } + + /* Word 7 */ + icmd->ulpLe = 1; +} + +static void +__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, + u16 iotag, u8 ulp_class, u16 cqid, bool ia, + bool wqec) +{ + union lpfc_wqe128 *wqe; + + wqe = &cmdiocbq->wqe; + memset(wqe, 0, sizeof(*wqe)); + + /* Word 3 */ + bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); + if (ia) + bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); + else + bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE); + + /* Word 8 */ + wqe->abort_cmd.wqe_com.abort_tag = ulp_context; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); + + /* Word 11 */ + if (wqec) + bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid); + bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); +} + +void +lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, + bool ia, bool wqec) +{ + phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class, + cqid, ia, wqec); +} + +/** + * lpfc_sli_api_table_setup - Set up sli api function jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the SLI interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; + phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; + phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3; + phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3; + phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3; + phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3; + phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3; + break; + case LPFC_PCI_DEV_OC: + phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; + phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; + phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4; + phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4; + phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4; + phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4; + phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1419 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + } + return 0; +} + +/** + * lpfc_sli4_calc_ring - Calculates which ring to use + * @phba: Pointer to HBA context object. + * @piocb: Pointer to command iocb. + * + * For SLI4 only, FCP IO can deferred to one fo many WQs, based on + * hba_wqidx, thus we need to calculate the corresponding ring. + * Since ABORTS must go on the same WQ of the command they are + * aborting, we use command's hba_wqidx. + */ +struct lpfc_sli_ring * +lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) +{ + struct lpfc_io_buf *lpfc_cmd; + + if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + if (unlikely(!phba->sli4_hba.hdwq)) + return NULL; + /* + * for abort iocb hba_wqidx should already + * be setup based on what work queue we used. + */ + if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { + lpfc_cmd = piocb->io_buf; + piocb->hba_wqidx = lpfc_cmd->hdwq_no; + } + return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; + } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return NULL; + piocb->hba_wqidx = 0; + return phba->sli4_hba.els_wq->pring; + } +} + +inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* + * Unlocking an irq is one of the entry point to check + * for re-schedule, but we are good for io submission + * path as midlayer does a get_cpu to glue us in. Flush + * out the invalidate queue so we can see the updated + * value for flag. + */ + smp_rmb(); + + if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) + /* We will not likely get the completion for the caller + * during this iteration but i guess that's fine. + * Future io's coming on this eq should be able to + * pick it up. As for the case of single io's, they + * will be handled through a sched from polling timer + * function which is currently triggered every 1msec. + */ + lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM, + LPFC_QUEUE_WORK); +} + +/** + * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb + * @phba: Pointer to HBA context object. + * @ring_number: Ring number + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb + * function. This function gets the hbalock and calls + * __lpfc_sli_issue_iocb function and will return the error returned + * by __lpfc_sli_issue_iocb function. This wrapper is used by + * functions which do not hold hbalock. + **/ +int +lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + struct lpfc_sli_ring *pring; + struct lpfc_queue *eq; + unsigned long iflags; + int rc; + + /* If the PCI channel is in offline state, do not post iocbs. */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return IOCB_ERROR; + + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli_prep_wqe(phba, piocb); + + eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; + + pring = lpfc_sli4_calc_ring(phba, piocb); + if (unlikely(pring == NULL)) + return IOCB_ERROR; + + spin_lock_irqsave(&pring->ring_lock, iflags); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(eq); + } else { + /* For now, SLI2/3 will still use hbalock */ + spin_lock_irqsave(&phba->hbalock, iflags); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + return rc; +} + +/** + * lpfc_extra_ring_setup - Extra ring setup function + * @phba: Pointer to HBA context object. + * + * This function is called while driver attaches with the + * HBA to setup the extra ring. The extra ring is used + * only when driver needs to support target mode functionality + * or IP over FC functionalities. + * + * This function is called with no lock held. SLI3 only. + **/ +static int +lpfc_extra_ring_setup( struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + + psli = &phba->sli; + + /* Adjust cmd/rsp ring iocb entries more evenly */ + + /* Take some away from the FCP ring */ + pring = &psli->sli3_ring[LPFC_FCP_RING]; + pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; + + /* and give them to the extra ring */ + pring = &psli->sli3_ring[LPFC_EXTRA_RING]; + + pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; + + /* Setup default profile for this ring */ + pring->iotag_max = 4096; + pring->num_mask = 1; + pring->prt[0].profile = 0; /* Mask 0 */ + pring->prt[0].rctl = phba->cfg_multi_ring_rctl; + pring->prt[0].type = phba->cfg_multi_ring_type; + pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; + return 0; +} + +static void +lpfc_sli_post_recovery_event(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + struct lpfc_work_evt *evtp = &ndlp->recovery_evt; + + spin_lock_irqsave(&phba->hbalock, iflags); + if (!list_empty(&evtp->evt_listp)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; + } + + /* Incrementing the reference count until the queued work is done. */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + if (!evtp->evt_arg1) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; + } + evtp->evt = LPFC_EVT_RECOVER_PORT; + list_add_tail(&evtp->evt_listp, &phba->work_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + lpfc_worker_wake_up(phba); +} + +/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to iocb object. + * + * The async_event handler calls this routine when it receives + * an ASYNC_STATUS_CN event from the port. The port generates + * this event when an Abort Sequence request to an rport fails + * twice in succession. The abort could be originated by the + * driver or by the port. The ABTS could have been for an ELS + * or FCP IO. The port only generates this event when an ABTS + * fails to complete after one retry. + */ +static void +lpfc_sli_abts_err_handler(struct lpfc_hba *phba, + struct lpfc_iocbq *iocbq) +{ + struct lpfc_nodelist *ndlp = NULL; + uint16_t rpi = 0, vpi = 0; + struct lpfc_vport *vport = NULL; + + /* The rpi in the ulpContext is vport-sensitive. */ + vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; + rpi = iocbq->iocb.ulpContext; + + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3092 Port generated ABTS async event " + "on vpi %d rpi %d status 0x%x\n", + vpi, rpi, iocbq->iocb.ulpStatus); + + vport = lpfc_find_vport_by_vpid(phba, vpi); + if (!vport) + goto err_exit; + ndlp = lpfc_findnode_rpi(vport, rpi); + if (!ndlp) + goto err_exit; + + if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) + lpfc_sli_abts_recover_port(vport, ndlp); + return; + + err_exit: + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3095 Event Context not found, no " + "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", + vpi, rpi, iocbq->iocb.ulpStatus, + iocbq->iocb.ulpContext); +} + +/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. + * @phba: pointer to HBA context object. + * @ndlp: nodelist pointer for the impacted rport. + * @axri: pointer to the wcqe containing the failed exchange. + * + * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the + * port. The port generates this event when an abort exchange request to an + * rport fails twice in succession with no reply. The abort could be originated + * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. + */ +void +lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, + struct sli4_wcqe_xri_aborted *axri) +{ + uint32_t ext_status = 0; + + if (!ndlp) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3115 Node Context not found, driver " + "ignoring abts err event\n"); + return; + } + + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3116 Port generated FCP XRI ABORT event on " + "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", + ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], + bf_get(lpfc_wcqe_xa_xri, axri), + bf_get(lpfc_wcqe_xa_status, axri), + axri->parameter); + + /* + * Catch the ABTS protocol failure case. Older OCe FW releases returned + * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and + * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. + */ + ext_status = axri->parameter & IOERR_PARAM_MASK; + if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && + ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) + lpfc_sli_post_recovery_event(phba, ndlp); +} + +/** + * lpfc_sli_async_event_handler - ASYNC iocb handler function + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @iocbq: Pointer to iocb object. + * + * This function is called by the slow ring event handler + * function when there is an ASYNC event iocb in the ring. + * This function is called with no lock held. + * Currently this function handles only temperature related + * ASYNC events. The function decodes the temperature sensor + * event message and posts events for the management applications. + **/ +static void +lpfc_sli_async_event_handler(struct lpfc_hba * phba, + struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) +{ + IOCB_t *icmd; + uint16_t evt_code; + struct temp_event temp_event_data; + struct Scsi_Host *shost; + uint32_t *iocb_w; + + icmd = &iocbq->iocb; + evt_code = icmd->un.asyncstat.evt_code; + + switch (evt_code) { + case ASYNC_TEMP_WARN: + case ASYNC_TEMP_SAFE: + temp_event_data.data = (uint32_t) icmd->ulpContext; + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + if (evt_code == ASYNC_TEMP_WARN) { + temp_event_data.event_code = LPFC_THRESHOLD_TEMP; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0347 Adapter is very hot, please take " + "corrective action. temperature : %d Celsius\n", + (uint32_t) icmd->ulpContext); + } else { + temp_event_data.event_code = LPFC_NORMAL_TEMP; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0340 Adapter temperature is OK now. " + "temperature : %d Celsius\n", + (uint32_t) icmd->ulpContext); + } + + /* Send temperature change event to applications */ + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), (char *) &temp_event_data, + LPFC_NL_VENDOR_ID); + break; + case ASYNC_STATUS_CN: + lpfc_sli_abts_err_handler(phba, iocbq); + break; + default: + iocb_w = (uint32_t *) icmd; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0346 Ring %d handler: unexpected ASYNC_STATUS" + " evt_code 0x%x\n" + "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" + "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" + "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" + "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", + pring->ringno, icmd->un.asyncstat.evt_code, + iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], + iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], + iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], + iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); + + break; + } +} + + +/** + * lpfc_sli4_setup - SLI ring setup function + * @phba: Pointer to HBA context object. + * + * lpfc_sli_setup sets up rings of the SLI interface with + * number of iocbs per ring and iotags. This function is + * called while driver attach to the HBA and before the + * interrupts are enabled. So there is no need for locking. + * + * This function always returns 0. + **/ +int +lpfc_sli4_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + + pring = phba->sli4_hba.els_wq->pring; + pring->num_mask = LPFC_MAX_RING_MASK; + pring->prt[0].profile = 0; /* Mask 0 */ + pring->prt[0].rctl = FC_RCTL_ELS_REQ; + pring->prt[0].type = FC_TYPE_ELS; + pring->prt[0].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[1].profile = 0; /* Mask 1 */ + pring->prt[1].rctl = FC_RCTL_ELS_REP; + pring->prt[1].type = FC_TYPE_ELS; + pring->prt[1].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[2].profile = 0; /* Mask 2 */ + /* NameServer Inquiry */ + pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; + /* NameServer */ + pring->prt[2].type = FC_TYPE_CT; + pring->prt[2].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + pring->prt[3].profile = 0; /* Mask 3 */ + /* NameServer response */ + pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; + /* NameServer */ + pring->prt[3].type = FC_TYPE_CT; + pring->prt[3].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + return 0; +} + +/** + * lpfc_sli_setup - SLI ring setup function + * @phba: Pointer to HBA context object. + * + * lpfc_sli_setup sets up rings of the SLI interface with + * number of iocbs per ring and iotags. This function is + * called while driver attach to the HBA and before the + * interrupts are enabled. So there is no need for locking. + * + * This function always returns 0. SLI3 only. + **/ +int +lpfc_sli_setup(struct lpfc_hba *phba) +{ + int i, totiocbsize = 0; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + + psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; + psli->sli_flag = 0; + + psli->iocbq_lookup = NULL; + psli->iocbq_lookup_len = 0; + psli->last_iotag = 0; + + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + switch (i) { + case LPFC_FCP_RING: /* ring 0 - FCP */ + /* numCiocb and numRiocb are used in config_port */ + pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; + pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; + pring->sli.sli3.numCiocb += + SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->sli.sli3.numRiocb += + SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->sli.sli3.numCiocb += + SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->sli.sli3.numRiocb += + SLI2_IOCB_RSP_R3XTRA_ENTRIES; + pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_CMD_SIZE : + SLI2_IOCB_CMD_SIZE; + pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_RSP_SIZE : + SLI2_IOCB_RSP_SIZE; + pring->iotag_ctr = 0; + pring->iotag_max = + (phba->cfg_hba_queue_depth * 2); + pring->fast_iotag = pring->iotag_max; + pring->num_mask = 0; + break; + case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ + /* numCiocb and numRiocb are used in config_port */ + pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; + pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; + pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_CMD_SIZE : + SLI2_IOCB_CMD_SIZE; + pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_RSP_SIZE : + SLI2_IOCB_RSP_SIZE; + pring->iotag_max = phba->cfg_hba_queue_depth; + pring->num_mask = 0; + break; + case LPFC_ELS_RING: /* ring 2 - ELS / CT */ + /* numCiocb and numRiocb are used in config_port */ + pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; + pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; + pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_CMD_SIZE : + SLI2_IOCB_CMD_SIZE; + pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_RSP_SIZE : + SLI2_IOCB_RSP_SIZE; + pring->fast_iotag = 0; + pring->iotag_ctr = 0; + pring->iotag_max = 4096; + pring->lpfc_sli_rcv_async_status = + lpfc_sli_async_event_handler; + pring->num_mask = LPFC_MAX_RING_MASK; + pring->prt[0].profile = 0; /* Mask 0 */ + pring->prt[0].rctl = FC_RCTL_ELS_REQ; + pring->prt[0].type = FC_TYPE_ELS; + pring->prt[0].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[1].profile = 0; /* Mask 1 */ + pring->prt[1].rctl = FC_RCTL_ELS_REP; + pring->prt[1].type = FC_TYPE_ELS; + pring->prt[1].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[2].profile = 0; /* Mask 2 */ + /* NameServer Inquiry */ + pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; + /* NameServer */ + pring->prt[2].type = FC_TYPE_CT; + pring->prt[2].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + pring->prt[3].profile = 0; /* Mask 3 */ + /* NameServer response */ + pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; + /* NameServer */ + pring->prt[3].type = FC_TYPE_CT; + pring->prt[3].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + break; + } + totiocbsize += (pring->sli.sli3.numCiocb * + pring->sli.sli3.sizeCiocb) + + (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); + } + if (totiocbsize > MAX_SLIM_IOCB_SIZE) { + /* Too many cmd / rsp ring entries in SLI2 SLIM */ + printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " + "SLI2 SLIM Data: x%x x%lx\n", + phba->brd_no, totiocbsize, + (unsigned long) MAX_SLIM_IOCB_SIZE); + } + if (phba->cfg_multi_ring_support == 2) + lpfc_extra_ring_setup(phba); + + return 0; +} + +/** + * lpfc_sli4_queue_init - Queue initialization function + * @phba: Pointer to HBA context object. + * + * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each + * ring. This function also initializes ring indices of each ring. + * This function is called during the initialization of the SLI + * interface of an HBA. + * This function is called with no lock held and always returns + * 1. + **/ +void +lpfc_sli4_queue_init(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + int i; + + psli = &phba->sli; + spin_lock_irq(&phba->hbalock); + INIT_LIST_HEAD(&psli->mboxq); + INIT_LIST_HEAD(&psli->mboxq_cmpl); + /* Initialize list headers for txq and txcmplq as double linked lists */ + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].io_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + pring->txcmplq_cnt = 0; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + pring = phba->sli4_hba.els_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_ELS_RING; + pring->txcmplq_cnt = 0; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + pring = phba->sli4_hba.nvmels_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_ELS_RING; + pring->txcmplq_cnt = 0; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli_queue_init - Queue initialization function + * @phba: Pointer to HBA context object. + * + * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each + * ring. This function also initializes ring indices of each ring. + * This function is called during the initialization of the SLI + * interface of an HBA. + * This function is called with no lock held and always returns + * 1. + **/ +void +lpfc_sli_queue_init(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + int i; + + psli = &phba->sli; + spin_lock_irq(&phba->hbalock); + INIT_LIST_HEAD(&psli->mboxq); + INIT_LIST_HEAD(&psli->mboxq_cmpl); + /* Initialize list headers for txq and txcmplq as double linked lists */ + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + pring->ringno = i; + pring->sli.sli3.next_cmdidx = 0; + pring->sli.sli3.local_getidx = 0; + pring->sli.sli3.cmdidx = 0; + INIT_LIST_HEAD(&pring->iocb_continueq); + INIT_LIST_HEAD(&pring->iocb_continue_saveq); + INIT_LIST_HEAD(&pring->postbufq); + pring->flag = 0; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + spin_lock_init(&pring->ring_lock); + } + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system + * @phba: Pointer to HBA context object. + * + * This routine flushes the mailbox command subsystem. It will unconditionally + * flush all the mailbox commands in the three possible stages in the mailbox + * command sub-system: pending mailbox command queue; the outstanding mailbox + * command; and completed mailbox command queue. It is caller's responsibility + * to make sure that the driver is in the proper state to flush the mailbox + * command sub-system. Namely, the posting of mailbox commands into the + * pending mailbox command queue from the various clients must be stopped; + * either the HBA is in a state that it will never works on the outstanding + * mailbox command (such as in EEH or ERATT conditions) or the outstanding + * mailbox command has been completed. + **/ +static void +lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *pmb; + unsigned long iflag; + + /* Disable softirqs, including timers from obtaining phba->hbalock */ + local_bh_disable(); + + /* Flush all the mailbox commands in the mbox system */ + spin_lock_irqsave(&phba->hbalock, iflag); + + /* The pending mailbox command queue */ + list_splice_init(&phba->sli.mboxq, &completions); + /* The outstanding active mailbox command */ + if (psli->mbox_active) { + list_add_tail(&psli->mbox_active->list, &completions); + psli->mbox_active = NULL; + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + } + /* The completed mailbox command queue */ + list_splice_init(&phba->sli.mboxq_cmpl, &completions); + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* Enable softirqs again, done with phba->hbalock */ + local_bh_enable(); + + /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ + while (!list_empty(&completions)) { + list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); + pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba, pmb); + } +} + +/** + * lpfc_sli_host_down - Vport cleanup function + * @vport: Pointer to virtual port object. + * + * lpfc_sli_host_down is called to clean up the resources + * associated with a vport before destroying virtual + * port data structures. + * This function does following operations: + * - Free discovery resources associated with this virtual + * port. + * - Free iocbs associated with this virtual port in + * the txq. + * - Send abort for all iocb commands associated with this + * vport in txcmplq. + * + * This function is called with no lock held and always returns 1. + **/ +int +lpfc_sli_host_down(struct lpfc_vport *vport) +{ + LIST_HEAD(completions); + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *iocb, *next_iocb; + int i; + unsigned long flags = 0; + uint16_t prev_pring_flag; + + lpfc_cleanup_discovery_resources(vport); + + spin_lock_irqsave(&phba->hbalock, flags); + + /* + * Error everything on the txq since these iocbs + * have not been given to the FW yet. + * Also issue ABTS for everything on the txcmplq + */ + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + prev_pring_flag = pring->flag; + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + list_for_each_entry_safe(iocb, next_iocb, + &pring->txq, list) { + if (iocb->vport != vport) + continue; + list_move_tail(&iocb->list, &completions); + } + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) { + if (iocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, iocb, + NULL); + } + pring->flag = prev_pring_flag; + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + if (pring == phba->sli4_hba.els_wq->pring) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + prev_pring_flag = pring->flag; + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, + &pring->txq, list) { + if (iocb->vport != vport) + continue; + list_move_tail(&iocb->list, &completions); + } + spin_unlock(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) { + if (iocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, iocb, + NULL); + } + pring->flag = prev_pring_flag; + } + } + spin_unlock_irqrestore(&phba->hbalock, flags); + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + return 1; +} + +/** + * lpfc_sli_hba_down - Resource cleanup function for the HBA + * @phba: Pointer to HBA context object. + * + * This function cleans up all iocb, buffers, mailbox commands + * while shutting down the HBA. This function is called with no + * lock held and always returns 1. + * This function does the following to cleanup driver resources: + * - Free discovery resources for each virtual port + * - Cleanup any pending fabric iocbs + * - Iterate through the iocb txq and free each entry + * in the list. + * - Free up any buffer posted to the HBA + * - Free mailbox commands in the mailbox queue. + **/ +int +lpfc_sli_hba_down(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; + struct lpfc_sli_ring *pring; + struct lpfc_dmabuf *buf_ptr; + unsigned long flags = 0; + int i; + + /* Shutdown the mailbox command sub-system */ + lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); + + lpfc_hba_down_prep(phba); + + /* Disable softirqs, including timers from obtaining phba->hbalock */ + local_bh_disable(); + + lpfc_fabric_abort_hba(phba); + + spin_lock_irqsave(&phba->hbalock, flags); + + /* + * Error everything on the txq since these iocbs + * have not been given to the FW yet. + */ + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + list_splice_init(&pring->txq, &completions); + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock(&pring->ring_lock); + list_splice_init(&pring->txq, &completions); + spin_unlock(&pring->ring_lock); + if (pring == phba->sli4_hba.els_wq->pring) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + } + } + spin_unlock_irqrestore(&phba->hbalock, flags); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + + spin_lock_irqsave(&phba->hbalock, flags); + list_splice_init(&phba->elsbuf, &completions); + phba->elsbuf_cnt = 0; + phba->elsbuf_prev_cnt = 0; + spin_unlock_irqrestore(&phba->hbalock, flags); + + while (!list_empty(&completions)) { + list_remove_head(&completions, buf_ptr, + struct lpfc_dmabuf, list); + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + } + + /* Enable softirqs again, done with phba->hbalock */ + local_bh_enable(); + + /* Return any active mbox cmds */ + del_timer_sync(&psli->mbox_tmo); + + spin_lock_irqsave(&phba->pport->work_port_lock, flags); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); + + return 1; +} + +/** + * lpfc_sli_pcimem_bcopy - SLI memory copy function + * @srcp: Source memory pointer. + * @destp: Destination memory pointer. + * @cnt: Number of words required to be copied. + * + * This function is used for copying data between driver memory + * and the SLI memory. This function also changes the endianness + * of each word if native endianness is different from SLI + * endianness. This function can be called with or without + * lock. + **/ +void +lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) +{ + uint32_t *src = srcp; + uint32_t *dest = destp; + uint32_t ldata; + int i; + + for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { + ldata = *src; + ldata = le32_to_cpu(ldata); + *dest = ldata; + src++; + dest++; + } +} + + +/** + * lpfc_sli_bemem_bcopy - SLI memory copy function + * @srcp: Source memory pointer. + * @destp: Destination memory pointer. + * @cnt: Number of words required to be copied. + * + * This function is used for copying data between a data structure + * with big endian representation to local endianness. + * This function can be called with or without lock. + **/ +void +lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) +{ + uint32_t *src = srcp; + uint32_t *dest = destp; + uint32_t ldata; + int i; + + for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { + ldata = *src; + ldata = be32_to_cpu(ldata); + *dest = ldata; + src++; + dest++; + } +} + +/** + * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mp: Pointer to driver buffer object. + * + * This function is called with no lock held. + * It always return zero after adding the buffer to the postbufq + * buffer list. + **/ +int +lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_dmabuf *mp) +{ + /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up + later */ + spin_lock_irq(&phba->hbalock); + list_add_tail(&mp->list, &pring->postbufq); + pring->postbufq_cnt++; + spin_unlock_irq(&phba->hbalock); + return 0; +} + +/** + * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer + * @phba: Pointer to HBA context object. + * + * When HBQ is enabled, buffers are searched based on tags. This function + * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The + * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag + * does not conflict with tags of buffer posted for unsolicited events. + * The function returns the allocated tag. The function is called with + * no locks held. + **/ +uint32_t +lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) +{ + spin_lock_irq(&phba->hbalock); + phba->buffer_tag_count++; + /* + * Always set the QUE_BUFTAG_BIT to distiguish between + * a tag assigned by HBQ. + */ + phba->buffer_tag_count |= QUE_BUFTAG_BIT; + spin_unlock_irq(&phba->hbalock); + return phba->buffer_tag_count; +} + +/** + * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @tag: Buffer tag. + * + * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq + * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX + * iocb is posted to the response ring with the tag of the buffer. + * This function searches the pring->postbufq list using the tag + * to find buffer associated with CMD_IOCB_RET_XRI64_CX + * iocb. If the buffer is found then lpfc_dmabuf object of the + * buffer is returned to the caller else NULL is returned. + * This function is called with no lock held. + **/ +struct lpfc_dmabuf * +lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + uint32_t tag) +{ + struct lpfc_dmabuf *mp, *next_mp; + struct list_head *slp = &pring->postbufq; + + /* Search postbufq, from the beginning, looking for a match on tag */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { + if (mp->buffer_tag == tag) { + list_del_init(&mp->list); + pring->postbufq_cnt--; + spin_unlock_irq(&phba->hbalock); + return mp; + } + } + + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0402 Cannot find virtual addr for buffer tag on " + "ring %d Data x%lx x%px x%px x%x\n", + pring->ringno, (unsigned long) tag, + slp->next, slp->prev, pring->postbufq_cnt); + + return NULL; +} + +/** + * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @phys: DMA address of the buffer. + * + * This function searches the buffer list using the dma_address + * of unsolicited event to find the driver's lpfc_dmabuf object + * corresponding to the dma_address. The function returns the + * lpfc_dmabuf object if a buffer is found else it returns NULL. + * This function is called by the ct and els unsolicited event + * handlers to get the buffer associated with the unsolicited + * event. + * + * This function is called with no lock held. + **/ +struct lpfc_dmabuf * +lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + dma_addr_t phys) +{ + struct lpfc_dmabuf *mp, *next_mp; + struct list_head *slp = &pring->postbufq; + + /* Search postbufq, from the beginning, looking for a match on phys */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { + if (mp->phys == phys) { + list_del_init(&mp->list); + pring->postbufq_cnt--; + spin_unlock_irq(&phba->hbalock); + return mp; + } + } + + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0410 Cannot find virtual addr for mapped buf on " + "ring %d Data x%llx x%px x%px x%x\n", + pring->ringno, (unsigned long long)phys, + slp->next, slp->prev, pring->postbufq_cnt); + return NULL; +} + +/** + * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to driver command iocb object. + * @rspiocb: Pointer to driver response iocb object. + * + * This function is the completion handler for the abort iocbs for + * ELS commands. This function is called from the ELS ring event + * handler with no lock held. This function frees memory resources + * associated with the abort iocb. + **/ +static void +lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + u8 cmnd = get_job_cmnd(phba, cmdiocb); + + if (ulp_status) { + /* + * Assume that the port already completed and returned, or + * will return the iocb. Just Log the message. + */ + if (phba->sli_rev < LPFC_SLI_REV4) { + if (cmnd == CMD_ABORT_XRI_CX && + ulp_status == IOSTAT_LOCAL_REJECT && + ulp_word4 == IOERR_ABORT_REQUESTED) { + goto release_iocb; + } + } + + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, + "0327 Cannot abort els iocb x%px " + "with io cmd xri %x abort tag : x%x, " + "abort status %x abort code %x\n", + cmdiocb, get_job_abtsiotag(phba, cmdiocb), + (phba->sli_rev == LPFC_SLI_REV4) ? + get_wqe_reqtag(cmdiocb) : + cmdiocb->iocb.un.acxri.abortContextTag, + ulp_status, ulp_word4); + + } +release_iocb: + lpfc_sli_release_iocbq(phba, cmdiocb); + return; +} + +/** + * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to driver command iocb object. + * @rspiocb: Pointer to driver response iocb object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for ELS commands + * which are aborted. The function frees memory resources used for + * the aborted ELS commands. + **/ +void +lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_nodelist *ndlp = cmdiocb->ndlp; + IOCB_t *irsp; + LPFC_MBOXQ_t *mbox; + u32 ulp_command, ulp_status, ulp_word4, iotag; + + ulp_command = get_job_cmnd(phba, cmdiocb); + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + iotag = get_wqe_reqtag(cmdiocb); + } else { + irsp = &rspiocb->iocb; + iotag = irsp->ulpIoTag; + + /* It is possible a PLOGI_RJT for NPIV ports to get aborted. + * The MBX_REG_LOGIN64 mbox command is freed back to the + * mbox_mem_pool here. + */ + if (cmdiocb->context_un.mbox) { + mbox = cmdiocb->context_un.mbox; + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + cmdiocb->context_un.mbox = NULL; + } + } + + /* ELS cmd tag completes */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "0139 Ignoring ELS cmd code x%x completion Data: " + "x%x x%x x%x x%px\n", + ulp_command, ulp_status, ulp_word4, iotag, + cmdiocb->ndlp); + /* + * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp + * if exchange is busy. + */ + if (ulp_command == CMD_GEN_REQUEST64_CR) + lpfc_ct_free_iocb(phba, cmdiocb); + else + lpfc_els_free_iocb(phba, cmdiocb); + + lpfc_nlp_put(ndlp); +} + +/** + * lpfc_sli_issue_abort_iotag - Abort function for a command iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @cmdiocb: Pointer to driver command iocb object. + * @cmpl: completion function. + * + * This function issues an abort iocb for the provided command iocb. In case + * of unloading, the abort iocb will not be issued to commands on the ELS + * ring. Instead, the callback function shall be changed to those commands + * so that nothing happens when them finishes. This function is called with + * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS + * when the command iocb is an abort request. + * + **/ +int +lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *cmdiocb, void *cmpl) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_iocbq *abtsiocbp; + int retval = IOCB_ERROR; + unsigned long iflags; + struct lpfc_nodelist *ndlp = NULL; + u32 ulp_command = get_job_cmnd(phba, cmdiocb); + u16 ulp_context, iotag; + bool ia; + + /* + * There are certain command types we don't want to abort. And we + * don't want to abort commands that are already in the process of + * being aborted. + */ + if (ulp_command == CMD_ABORT_XRI_WQE || + ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_CLOSE_XRI_CN || + cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED) + return IOCB_ABORTING; + + if (!pring) { + if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; + else + cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; + return retval; + } + + /* + * If we're unloading, don't abort iocb on the ELS ring, but change + * the callback so that nothing happens when it finishes. + */ + if ((vport->load_flag & FC_UNLOADING) && + pring->ringno == LPFC_ELS_RING) { + if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; + else + cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; + return retval; + } + + /* issue ABTS for this IOCB based on iotag */ + abtsiocbp = __lpfc_sli_get_iocbq(phba); + if (abtsiocbp == NULL) + return IOCB_NORESOURCE; + + /* This signals the response to set the correct status + * before calling the completion handler + */ + cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; + + if (phba->sli_rev == LPFC_SLI_REV4) { + ulp_context = cmdiocb->sli4_xritag; + iotag = abtsiocbp->iotag; + } else { + iotag = cmdiocb->iocb.ulpIoTag; + if (pring->ringno == LPFC_ELS_RING) { + ndlp = cmdiocb->ndlp; + ulp_context = ndlp->nlp_rpi; + } else { + ulp_context = cmdiocb->iocb.ulpContext; + } + } + + if (phba->link_state < LPFC_LINK_UP || + (phba->sli_rev == LPFC_SLI_REV4 && + phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) || + (phba->link_flag & LS_EXTERNAL_LOOPBACK)) + ia = true; + else + ia = false; + + lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag, + cmdiocb->iocb.ulpClass, + LPFC_WQE_CQ_ID_DEFAULT, ia, false); + + abtsiocbp->vport = vport; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; + if (cmdiocb->cmd_flag & LPFC_IO_FCP) + abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); + + if (cmdiocb->cmd_flag & LPFC_IO_FOF) + abtsiocbp->cmd_flag |= LPFC_IO_FOF; + + if (cmpl) + abtsiocbp->cmd_cmpl = cmpl; + else + abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl; + abtsiocbp->vport = vport; + + if (phba->sli_rev == LPFC_SLI_REV4) { + pring = lpfc_sli4_calc_ring(phba, abtsiocbp); + if (unlikely(pring == NULL)) + goto abort_iotag_exit; + /* Note: both hbalock and ring_lock need to be set here */ + spin_lock_irqsave(&pring->ring_lock, iflags); + retval = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbp, 0); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + } else { + retval = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbp, 0); + } + +abort_iotag_exit: + + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "0339 Abort IO XRI x%x, Original iotag x%x, " + "abort tag x%x Cmdjob : x%px Abortjob : x%px " + "retval x%x\n", + ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? + cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, + retval); + if (retval) { + cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; + __lpfc_sli_release_iocbq(phba, abtsiocbp); + } + + /* + * Caller to this routine should check for IOCB_ERROR + * and handle it properly. This routine no longer removes + * iocb off txcmplq and call compl in case of IOCB_ERROR. + */ + return retval; +} + +/** + * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. + * @phba: pointer to lpfc HBA data structure. + * + * This routine will abort all pending and outstanding iocbs to an HBA. + **/ +void +lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + struct lpfc_queue *qp = NULL; + int i; + + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + lpfc_sli_abort_iocb_ring(phba, pring); + } + return; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + lpfc_sli_abort_iocb_ring(phba, pring); + } +} + +/** + * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts + * @iocbq: Pointer to iocb object. + * @vport: Pointer to driver virtual port object. + * + * This function acts as an iocb filter for functions which abort FCP iocbs. + * + * Return values + * -ENODEV, if a null iocb or vport ptr is encountered + * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as + * driver already started the abort process, or is an abort iocb itself + * 0, passes criteria for aborting the FCP I/O iocb + **/ +static int +lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, + struct lpfc_vport *vport) +{ + u8 ulp_command; + + /* No null ptr vports */ + if (!iocbq || iocbq->vport != vport) + return -ENODEV; + + /* iocb must be for FCP IO, already exists on the TX cmpl queue, + * can't be premarked as driver aborted, nor be an ABORT iocb itself + */ + ulp_command = get_job_cmnd(vport->phba, iocbq); + if (!(iocbq->cmd_flag & LPFC_IO_FCP) || + !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) || + (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || + (ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_CLOSE_XRI_CN || + ulp_command == CMD_ABORT_XRI_WQE)) + return -EINVAL; + + return 0; +} + +/** + * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target + * @iocbq: Pointer to driver iocb object. + * @vport: Pointer to driver virtual port object. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST + * + * This function acts as an iocb filter for validating a lun/SCSI target/SCSI + * host. + * + * It will return + * 0 if the filtering criteria is met for the given iocb and will return + * 1 if the filtering criteria is not met. + * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the + * given iocb is for the SCSI device specified by vport, tgt_id and + * lun_id parameter. + * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the + * given iocb is for the SCSI target specified by vport and tgt_id + * parameters. + * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the + * given iocb is for the SCSI host associated with the given vport. + * This function is called with no locks held. + **/ +static int +lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, + uint16_t tgt_id, uint64_t lun_id, + lpfc_ctx_cmd ctx_cmd) +{ + struct lpfc_io_buf *lpfc_cmd; + int rc = 1; + + lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); + + if (lpfc_cmd->pCmd == NULL) + return rc; + + switch (ctx_cmd) { + case LPFC_CTX_LUN: + if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && + (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && + (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) + rc = 0; + break; + case LPFC_CTX_TGT: + if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && + (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) + rc = 0; + break; + case LPFC_CTX_HOST: + rc = 0; + break; + default: + printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", + __func__, ctx_cmd); + break; + } + + return rc; +} + +/** + * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending + * @vport: Pointer to virtual port. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. + * + * This function returns number of FCP commands pending for the vport. + * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP + * commands pending on the vport associated with SCSI device specified + * by tgt_id and lun_id parameters. + * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP + * commands pending on the vport associated with SCSI target specified + * by tgt_id parameter. + * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP + * commands pending on the vport. + * This function returns the number of iocbs which satisfy the filter. + * This function is called without any lock held. + **/ +int +lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, + lpfc_ctx_cmd ctx_cmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *iocbq; + int sum, i; + unsigned long iflags; + u8 ulp_command; + + spin_lock_irqsave(&phba->hbalock, iflags); + for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (!iocbq || iocbq->vport != vport) + continue; + if (!(iocbq->cmd_flag & LPFC_IO_FCP) || + !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) + continue; + + /* Include counting outstanding aborts */ + ulp_command = get_job_cmnd(phba, iocbq); + if (ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_CLOSE_XRI_CN || + ulp_command == CMD_ABORT_XRI_WQE) { + sum++; + continue; + } + + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, + ctx_cmd) == 0) + sum++; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return sum; +} + +/** + * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs + * @phba: Pointer to HBA context object + * @cmdiocb: Pointer to command iocb object. + * @rspiocb: Pointer to response iocb object. + * + * This function is called when an aborted FCP iocb completes. This + * function is called by the ring event handler with no lock held. + * This function frees the iocb. + **/ +void +lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3096 ABORT_XRI_CX completing on rpi x%x " + "original iotag x%x, abort cmd iotag x%x " + "status 0x%x, reason 0x%x\n", + (phba->sli_rev == LPFC_SLI_REV4) ? + cmdiocb->sli4_xritag : + cmdiocb->iocb.un.acxri.abortContextTag, + get_job_abtsiotag(phba, cmdiocb), + cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb), + get_job_word4(phba, rspiocb)); + lpfc_sli_release_iocbq(phba, cmdiocb); + return; +} + +/** + * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN + * @vport: Pointer to virtual port. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. + * + * This function sends an abort command for every SCSI command + * associated with the given virtual port pending on the ring + * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then + * lpfc_sli_validate_fcp_iocb function. The ordering for validation before + * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort + * followed by lpfc_sli_validate_fcp_iocb. + * + * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the + * FCP iocbs associated with lun specified by tgt_id and lun_id + * parameters + * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the + * FCP iocbs associated with SCSI target specified by tgt_id parameter. + * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all + * FCP iocbs associated with virtual port. + * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4 + * lpfc_sli4_calc_ring is used. + * This function returns number of iocbs it failed to abort. + * This function is called with no locks held. + **/ +int +lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, + lpfc_ctx_cmd abort_cmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ring *pring = NULL; + struct lpfc_iocbq *iocbq; + int errcnt = 0, ret_val = 0; + unsigned long iflags; + int i; + + /* all I/Os are in process of being flushed */ + if (phba->hba_flag & HBA_IOQ_FLUSH) + return errcnt; + + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) + continue; + + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, + abort_cmd) != 0) + continue; + + spin_lock_irqsave(&phba->hbalock, iflags); + if (phba->sli_rev == LPFC_SLI_REV3) { + pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; + } else if (phba->sli_rev == LPFC_SLI_REV4) { + pring = lpfc_sli4_calc_ring(phba, iocbq); + } + ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq, + lpfc_sli_abort_fcp_cmpl); + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (ret_val != IOCB_SUCCESS) + errcnt++; + } + + return errcnt; +} + +/** + * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN + * @vport: Pointer to virtual port. + * @pring: Pointer to driver SLI ring object. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. + * + * This function sends an abort command for every SCSI command + * associated with the given virtual port pending on the ring + * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then + * lpfc_sli_validate_fcp_iocb function. The ordering for validation before + * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort + * followed by lpfc_sli_validate_fcp_iocb. + * + * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the + * FCP iocbs associated with lun specified by tgt_id and lun_id + * parameters + * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the + * FCP iocbs associated with SCSI target specified by tgt_id parameter. + * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all + * FCP iocbs associated with virtual port. + * This function returns number of iocbs it aborted . + * This function is called with no locks held right after a taskmgmt + * command is sent. + **/ +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, + uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_io_buf *lpfc_cmd; + struct lpfc_iocbq *abtsiocbq; + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_iocbq *iocbq; + int sum, i, ret_val; + unsigned long iflags; + struct lpfc_sli_ring *pring_s4 = NULL; + u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; + bool ia; + + spin_lock_irqsave(&phba->hbalock, iflags); + + /* all I/Os are in process of being flushed */ + if (phba->hba_flag & HBA_IOQ_FLUSH) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return 0; + } + sum = 0; + + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) + continue; + + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, + cmd) != 0) + continue; + + /* Guard against IO completion being called at same time */ + lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); + spin_lock(&lpfc_cmd->buf_lock); + + if (!lpfc_cmd->pCmd) { + spin_unlock(&lpfc_cmd->buf_lock); + continue; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + pring_s4 = + phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; + if (!pring_s4) { + spin_unlock(&lpfc_cmd->buf_lock); + continue; + } + /* Note: both hbalock and ring_lock must be set here */ + spin_lock(&pring_s4->ring_lock); + } + + /* + * If the iocbq is already being aborted, don't take a second + * action, but do count it. + */ + if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || + !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); + spin_unlock(&lpfc_cmd->buf_lock); + continue; + } + + /* issue ABTS for this IOCB based on iotag */ + abtsiocbq = __lpfc_sli_get_iocbq(phba); + if (!abtsiocbq) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); + spin_unlock(&lpfc_cmd->buf_lock); + continue; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + iotag = abtsiocbq->iotag; + ulp_context = iocbq->sli4_xritag; + cqid = lpfc_cmd->hdwq->io_cq_map; + } else { + iotag = iocbq->iocb.ulpIoTag; + if (pring->ringno == LPFC_ELS_RING) { + ndlp = iocbq->ndlp; + ulp_context = ndlp->nlp_rpi; + } else { + ulp_context = iocbq->iocb.ulpContext; + } + } + + ndlp = lpfc_cmd->rdata->pnode; + + if (lpfc_is_link_up(phba) && + (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) && + !(phba->link_flag & LS_EXTERNAL_LOOPBACK)) + ia = false; + else + ia = true; + + lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag, + iocbq->iocb.ulpClass, cqid, + ia, false); + + abtsiocbq->vport = vport; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbq->hba_wqidx = iocbq->hba_wqidx; + if (iocbq->cmd_flag & LPFC_IO_FCP) + abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX; + if (iocbq->cmd_flag & LPFC_IO_FOF) + abtsiocbq->cmd_flag |= LPFC_IO_FOF; + + /* Setup callback routine and issue the command. */ + abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl; + + /* + * Indicate the IO is being aborted by the driver and set + * the caller's flag into the aborted IO. + */ + iocbq->cmd_flag |= LPFC_DRIVER_ABORTED; + + if (phba->sli_rev == LPFC_SLI_REV4) { + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocbq, 0); + spin_unlock(&pring_s4->ring_lock); + } else { + ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbq, 0); + } + + spin_unlock(&lpfc_cmd->buf_lock); + + if (ret_val == IOCB_ERROR) + __lpfc_sli_release_iocbq(phba, abtsiocbq); + else + sum++; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + return sum; +} + +/** + * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler + * @phba: Pointer to HBA context object. + * @cmdiocbq: Pointer to command iocb. + * @rspiocbq: Pointer to response iocb. + * + * This function is the completion handler for iocbs issued using + * lpfc_sli_issue_iocb_wait function. This function is called by the + * ring event handler function without any lock held. This function + * can be called from both worker thread context and interrupt + * context. This function also can be called from other thread which + * cleans up the SLI layer objects. + * This function copy the contents of the response iocb to the + * response iocb memory object provided by the caller of + * lpfc_sli_issue_iocb_wait and then wakes up the thread which + * sleeps for the iocb completion. + **/ +static void +lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + wait_queue_head_t *pdone_q; + unsigned long iflags; + struct lpfc_io_buf *lpfc_cmd; + size_t offset = offsetof(struct lpfc_iocbq, wqe); + + spin_lock_irqsave(&phba->hbalock, iflags); + if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) { + + /* + * A time out has occurred for the iocb. If a time out + * completion handler has been supplied, call it. Otherwise, + * just free the iocbq. + */ + + spin_unlock_irqrestore(&phba->hbalock, iflags); + cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; + cmdiocbq->wait_cmd_cmpl = NULL; + if (cmdiocbq->cmd_cmpl) + cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); + else + lpfc_sli_release_iocbq(phba, cmdiocbq); + return; + } + + /* Copy the contents of the local rspiocb into the caller's buffer. */ + cmdiocbq->cmd_flag |= LPFC_IO_WAKE; + if (cmdiocbq->rsp_iocb && rspiocbq) + memcpy((char *)cmdiocbq->rsp_iocb + offset, + (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset); + + /* Set the exchange busy flag for task management commands */ + if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && + !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { + lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, + cur_iocbq); + if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; + } + + pdone_q = cmdiocbq->context_un.wait_queue; + if (pdone_q) + wake_up(pdone_q); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; +} + +/** + * lpfc_chk_iocb_flg - Test IOCB flag with lock held. + * @phba: Pointer to HBA context object.. + * @piocbq: Pointer to command iocb. + * @flag: Flag to test. + * + * This routine grabs the hbalock and then test the cmd_flag to + * see if the passed in flag is set. + * Returns: + * 1 if flag is set. + * 0 if flag is not set. + **/ +static int +lpfc_chk_iocb_flg(struct lpfc_hba *phba, + struct lpfc_iocbq *piocbq, uint32_t flag) +{ + unsigned long iflags; + int ret; + + spin_lock_irqsave(&phba->hbalock, iflags); + ret = piocbq->cmd_flag & flag; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return ret; + +} + +/** + * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands + * @phba: Pointer to HBA context object.. + * @ring_number: Ring number + * @piocb: Pointer to command iocb. + * @prspiocbq: Pointer to response iocb. + * @timeout: Timeout in number of seconds. + * + * This function issues the iocb to firmware and waits for the + * iocb to complete. The cmd_cmpl field of the shall be used + * to handle iocbs which time out. If the field is NULL, the + * function shall free the iocbq structure. If more clean up is + * needed, the caller is expected to provide a completion function + * that will provide the needed clean up. If the iocb command is + * not completed within timeout seconds, the function will either + * free the iocbq structure (if cmd_cmpl == NULL) or execute the + * completion function set in the cmd_cmpl field and then return + * a status of IOCB_TIMEDOUT. The caller should not free the iocb + * resources if this function returns IOCB_TIMEDOUT. + * The function waits for the iocb completion using an + * non-interruptible wait. + * This function will sleep while waiting for iocb completion. + * So, this function should not be called from any context which + * does not allow sleeping. Due to the same reason, this function + * cannot be called with interrupt disabled. + * This function assumes that the iocb completions occur while + * this function sleep. So, this function cannot be called from + * the thread which process iocb completion for this ring. + * This function clears the cmd_flag of the iocb object before + * issuing the iocb and the iocb completion handler sets this + * flag and wakes this thread when the iocb completes. + * The contents of the response iocb will be copied to prspiocbq + * by the completion handler when the command completes. + * This function returns IOCB_SUCCESS when success. + * This function is called with no lock held. + **/ +int +lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, + uint32_t ring_number, + struct lpfc_iocbq *piocb, + struct lpfc_iocbq *prspiocbq, + uint32_t timeout) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); + long timeleft, timeout_req = 0; + int retval = IOCB_SUCCESS; + uint32_t creg_val; + struct lpfc_iocbq *iocb; + int txq_cnt = 0; + int txcmplq_cnt = 0; + struct lpfc_sli_ring *pring; + unsigned long iflags; + bool iocb_completed = true; + + if (phba->sli_rev >= LPFC_SLI_REV4) { + lpfc_sli_prep_wqe(phba, piocb); + + pring = lpfc_sli4_calc_ring(phba, piocb); + } else + pring = &phba->sli.sli3_ring[ring_number]; + /* + * If the caller has provided a response iocbq buffer, then rsp_iocb + * is NULL or its an error. + */ + if (prspiocbq) { + if (piocb->rsp_iocb) + return IOCB_ERROR; + piocb->rsp_iocb = prspiocbq; + } + + piocb->wait_cmd_cmpl = piocb->cmd_cmpl; + piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait; + piocb->context_un.wait_queue = &done_q; + piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; + creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); + writel(creg_val, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + + retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, + SLI_IOCB_RET_IOCB); + if (retval == IOCB_SUCCESS) { + timeout_req = msecs_to_jiffies(timeout * 1000); + timeleft = wait_event_timeout(done_q, + lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), + timeout_req); + spin_lock_irqsave(&phba->hbalock, iflags); + if (!(piocb->cmd_flag & LPFC_IO_WAKE)) { + + /* + * IOCB timed out. Inform the wake iocb wait + * completion function and set local status + */ + + iocb_completed = false; + piocb->cmd_flag |= LPFC_IO_WAKE_TMO; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (iocb_completed) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0331 IOCB wake signaled\n"); + /* Note: we are not indicating if the IOCB has a success + * status or not - that's for the caller to check. + * IOCB_SUCCESS means just that the command was sent and + * completed. Not that it completed successfully. + * */ + } else if (timeleft == 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0338 IOCB wait timeout error - no " + "wake response Data x%x\n", timeout); + retval = IOCB_TIMEDOUT; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0330 IOCB wake NOT set, " + "Data x%x x%lx\n", + timeout, (timeleft / jiffies)); + retval = IOCB_TIMEDOUT; + } + } else if (retval == IOCB_BUSY) { + if (phba->cfg_log_verbose & LOG_SLI) { + list_for_each_entry(iocb, &pring->txq, list) { + txq_cnt++; + } + list_for_each_entry(iocb, &pring->txcmplq, list) { + txcmplq_cnt++; + } + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", + phba->iocb_cnt, txq_cnt, txcmplq_cnt); + } + return retval; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0332 IOCB wait issue failed, Data x%x\n", + retval); + retval = IOCB_ERROR; + } + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; + creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); + writel(creg_val, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + + if (prspiocbq) + piocb->rsp_iocb = NULL; + + piocb->context_un.wait_queue = NULL; + piocb->cmd_cmpl = NULL; + return retval; +} + +/** + * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox + * @phba: Pointer to HBA context object. + * @pmboxq: Pointer to driver mailbox object. + * @timeout: Timeout in number of seconds. + * + * This function issues the mailbox to firmware and waits for the + * mailbox command to complete. If the mailbox command is not + * completed within timeout seconds, it returns MBX_TIMEOUT. + * The function waits for the mailbox completion using an + * interruptible wait. If the thread is woken up due to a + * signal, MBX_TIMEOUT error is returned to the caller. Caller + * should not free the mailbox resources, if this function returns + * MBX_TIMEOUT. + * This function will sleep while waiting for mailbox completion. + * So, this function should not be called from any context which + * does not allow sleeping. Due to the same reason, this function + * cannot be called with interrupt disabled. + * This function assumes that the mailbox completion occurs while + * this function sleep. So, this function cannot be called from + * the worker thread which processes mailbox completion. + * This function is called in the context of HBA management + * applications. + * This function returns MBX_SUCCESS when successful. + * This function is called with no lock held. + **/ +int +lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, + uint32_t timeout) +{ + struct completion mbox_done; + int retval; + unsigned long flag; + + pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; + /* setup wake call as IOCB callback */ + pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; + + /* setup context3 field to pass wait_queue pointer to wake function */ + init_completion(&mbox_done); + pmboxq->context3 = &mbox_done; + /* now issue the command */ + retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + if (retval == MBX_BUSY || retval == MBX_SUCCESS) { + wait_for_completion_timeout(&mbox_done, + msecs_to_jiffies(timeout * 1000)); + + spin_lock_irqsave(&phba->hbalock, flag); + pmboxq->context3 = NULL; + /* + * if LPFC_MBX_WAKE flag is set the mailbox is completed + * else do not free the resources. + */ + if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { + retval = MBX_SUCCESS; + } else { + retval = MBX_TIMEOUT; + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } + spin_unlock_irqrestore(&phba->hbalock, flag); + } + return retval; +} + +/** + * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system + * @phba: Pointer to HBA context. + * @mbx_action: Mailbox shutdown options. + * + * This function is called to shutdown the driver's mailbox sub-system. + * It first marks the mailbox sub-system is in a block state to prevent + * the asynchronous mailbox command from issued off the pending mailbox + * command queue. If the mailbox command sub-system shutdown is due to + * HBA error conditions such as EEH or ERATT, this routine shall invoke + * the mailbox sub-system flush routine to forcefully bring down the + * mailbox sub-system. Otherwise, if it is due to normal condition (such + * as with offline or HBA function reset), this routine will wait for the + * outstanding mailbox command to complete before invoking the mailbox + * sub-system flush routine to gracefully bring down mailbox sub-system. + **/ +void +lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) +{ + struct lpfc_sli *psli = &phba->sli; + unsigned long timeout; + + if (mbx_action == LPFC_MBX_NO_WAIT) { + /* delay 100ms for port state */ + msleep(100); + lpfc_sli_mbox_sys_flush(phba); + return; + } + timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + + /* Disable softirqs, including timers from obtaining phba->hbalock */ + local_bh_disable(); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + + if (psli->sli_flag & LPFC_SLI_ACTIVE) { + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + if (phba->sli.mbox_active) + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active) * + 1000) + jiffies; + spin_unlock_irq(&phba->hbalock); + + /* Enable softirqs again, done with phba->hbalock */ + local_bh_enable(); + + while (phba->sli.mbox_active) { + /* Check active mailbox complete status every 2ms */ + msleep(2); + if (time_after(jiffies, timeout)) + /* Timeout, let the mailbox flush routine to + * forcefully release active mailbox command + */ + break; + } + } else { + spin_unlock_irq(&phba->hbalock); + + /* Enable softirqs again, done with phba->hbalock */ + local_bh_enable(); + } + + lpfc_sli_mbox_sys_flush(phba); +} + +/** + * lpfc_sli_eratt_read - read sli-3 error attention events + * @phba: Pointer to HBA context. + * + * This function is called to read the SLI3 device error attention registers + * for possible error attention events. The caller must hold the hostlock + * with spin_lock_irq(). + * + * This function returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +static int +lpfc_sli_eratt_read(struct lpfc_hba *phba) +{ + uint32_t ha_copy; + + /* Read chip Host Attention (HA) register */ + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_err; + + if (ha_copy & HA_ERATT) { + /* Read host status register to retrieve error event */ + if (lpfc_sli_read_hs(phba)) + goto unplug_err; + + /* Check if there is a deferred error condition is active */ + if ((HS_FFER1 & phba->work_hs) && + ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | + HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { + phba->hba_flag |= DEFER_ERATT; + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); + } + + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } + return 0; + +unplug_err: + /* Set the driver HS work bitmap */ + phba->work_hs |= UNPLUG_ERR; + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; +} + +/** + * lpfc_sli4_eratt_read - read sli-4 error attention events + * @phba: Pointer to HBA context. + * + * This function is called to read the SLI4 device error attention registers + * for possible error attention events. The caller must hold the hostlock + * with spin_lock_irq(). + * + * This function returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +static int +lpfc_sli4_eratt_read(struct lpfc_hba *phba) +{ + uint32_t uerr_sta_hi, uerr_sta_lo; + uint32_t if_type, portsmphr; + struct lpfc_register portstat_reg; + u32 logmask; + + /* + * For now, use the SLI4 device internal unrecoverable error + * registers for error attention. This can be changed later. + */ + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_0: + if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, + &uerr_sta_lo) || + lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, + &uerr_sta_hi)) { + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } + if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || + (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1423 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "ue_mask_lo_reg=0x%x, " + "ue_mask_hi_reg=0x%x\n", + uerr_sta_lo, uerr_sta_hi, + phba->sli4_hba.ue_mask_lo, + phba->sli4_hba.ue_mask_hi); + phba->work_status[0] = uerr_sta_lo; + phba->work_status[1] = uerr_sta_hi; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } + break; + case LPFC_SLI_INTF_IF_TYPE_2: + case LPFC_SLI_INTF_IF_TYPE_6: + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0) || + lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr)){ + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } + if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { + phba->work_status[0] = + readl(phba->sli4_hba.u.if_type2.ERR1regaddr); + phba->work_status[1] = + readl(phba->sli4_hba.u.if_type2.ERR2regaddr); + logmask = LOG_TRACE_EVENT; + if (phba->work_status[0] == + SLIPORT_ERR1_REG_ERR_CODE_2 && + phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) + logmask = LOG_SLI; + lpfc_printf_log(phba, KERN_ERR, logmask, + "2885 Port Status Event: " + "port status reg 0x%x, " + "port smphr reg 0x%x, " + "error 1=0x%x, error 2=0x%x\n", + portstat_reg.word0, + portsmphr, + phba->work_status[0], + phba->work_status[1]); + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } + break; + case LPFC_SLI_INTF_IF_TYPE_1: + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2886 HBA Error Attention on unsupported " + "if type %d.", if_type); + return 1; + } + + return 0; +} + +/** + * lpfc_sli_check_eratt - check error attention events + * @phba: Pointer to HBA context. + * + * This function is called from timer soft interrupt context to check HBA's + * error attention register bit for error attention events. + * + * This function returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +int +lpfc_sli_check_eratt(struct lpfc_hba *phba) +{ + uint32_t ha_copy; + + /* If somebody is waiting to handle an eratt, don't process it + * here. The brdkill function will do this. + */ + if (phba->link_flag & LS_IGNORE_ERATT) + return 0; + + /* Check if interrupt handler handles this ERATT */ + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & HBA_ERATT_HANDLED) { + /* Interrupt handler has handled ERATT */ + spin_unlock_irq(&phba->hbalock); + return 0; + } + + /* + * If there is deferred error attention, do not check for error + * attention + */ + if (unlikely(phba->hba_flag & DEFER_ERATT)) { + spin_unlock_irq(&phba->hbalock); + return 0; + } + + /* If PCI channel is offline, don't process it */ + if (unlikely(pci_channel_offline(phba->pcidev))) { + spin_unlock_irq(&phba->hbalock); + return 0; + } + + switch (phba->sli_rev) { + case LPFC_SLI_REV2: + case LPFC_SLI_REV3: + /* Read chip Host Attention (HA) register */ + ha_copy = lpfc_sli_eratt_read(phba); + break; + case LPFC_SLI_REV4: + /* Read device Uncoverable Error (UERR) registers */ + ha_copy = lpfc_sli4_eratt_read(phba); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0299 Invalid SLI revision (%d)\n", + phba->sli_rev); + ha_copy = 0; + break; + } + spin_unlock_irq(&phba->hbalock); + + return ha_copy; +} + +/** + * lpfc_intr_state_check - Check device state for interrupt handling + * @phba: Pointer to HBA context. + * + * This inline routine checks whether a device or its PCI slot is in a state + * that the interrupt should be handled. + * + * This function returns 0 if the device or the PCI slot is in a state that + * interrupt should be handled, otherwise -EIO. + */ +static inline int +lpfc_intr_state_check(struct lpfc_hba *phba) +{ + /* If the pci channel is offline, ignore all the interrupts */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return -EIO; + + /* Update device level interrupt statistics */ + phba->sli.slistat.sli_intr++; + + /* Ignore all interrupts during initialization. */ + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + return -EIO; + + return 0; +} + +/** + * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-3 interface spec is enabled with + * MSI-X multi-message interrupt mode and there are slow-path events in + * the HBA. However, when the device is enabled with either MSI or Pin-IRQ + * interrupt mode, this function is called as part of the device-level + * interrupt handler. When the PCI slot is in error recovery or the HBA + * is undergoing initialization, the interrupt handler will not process + * the interrupt. The link attention and ELS ring attention events are + * handled by the worker thread. The interrupt handler signals the worker + * thread and returns for these events. This function is called without + * any lock held. It gets the hbalock to access and update SLI data + * structures. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli_sp_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + uint32_t ha_copy, hc_copy; + uint32_t work_ha_copy; + unsigned long status; + unsigned long iflag; + uint32_t control; + + MAILBOX_t *mbox, *pmbox; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *mp; + LPFC_MBOXQ_t *pmb; + int rc; + + /* + * Get the driver's phba structure from the dev_id and + * assume the HBA is not interrupting. + */ + phba = (struct lpfc_hba *)dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* + * Stuff needs to be attented to when this function is invoked as an + * individual interrupt handler in MSI-X multi-message interrupt mode + */ + if (phba->intr_type == MSIX) { + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) + return IRQ_NONE; + /* Need to read HA REG for slow-path events */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_error; + /* If somebody is waiting to handle an eratt don't process it + * here. The brdkill function will do this. + */ + if (phba->link_flag & LS_IGNORE_ERATT) + ha_copy &= ~HA_ERATT; + /* Check the need for handling ERATT in interrupt handler */ + if (ha_copy & HA_ERATT) { + if (phba->hba_flag & HBA_ERATT_HANDLED) + /* ERATT polling has handled ERATT */ + ha_copy &= ~HA_ERATT; + else + /* Indicate interrupt handler handles ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + } + + /* + * If there is deferred error attention, do not check for any + * interrupt. + */ + if (unlikely(phba->hba_flag & DEFER_ERATT)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + + /* Clear up only attention source related to slow-path */ + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + goto unplug_error; + + writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | + HC_LAINT_ENA | HC_ERINT_ENA), + phba->HCregaddr); + writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), + phba->HAregaddr); + writel(hc_copy, phba->HCregaddr); + readl(phba->HAregaddr); /* flush */ + spin_unlock_irqrestore(&phba->hbalock, iflag); + } else + ha_copy = phba->ha_copy; + + work_ha_copy = ha_copy & phba->work_ha_mask; + + if (work_ha_copy) { + if (work_ha_copy & HA_LATT) { + if (phba->sli.sli_flag & LPFC_PROCESS_LA) { + /* + * Turn off Link Attention interrupts + * until CLEAR_LA done + */ + spin_lock_irqsave(&phba->hbalock, iflag); + phba->sli.sli_flag &= ~LPFC_PROCESS_LA; + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; + control &= ~HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + spin_unlock_irqrestore(&phba->hbalock, iflag); + } + else + work_ha_copy &= ~HA_LATT; + } + + if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { + /* + * Turn off Slow Rings interrupts, LPFC_ELS_RING is + * the only slow ring. + */ + status = (work_ha_copy & + (HA_RXMASK << (4*LPFC_ELS_RING))); + status >>= (4*LPFC_ELS_RING); + if (status & HA_RXMASK) { + spin_lock_irqsave(&phba->hbalock, iflag); + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; + + lpfc_debugfs_slow_ring_trc(phba, + "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", + control, status, + (uint32_t)phba->sli.slistat.sli_intr); + + if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { + lpfc_debugfs_slow_ring_trc(phba, + "ISR Disable ring:" + "pwork:x%x hawork:x%x wait:x%x", + phba->work_ha, work_ha_copy, + (uint32_t)((unsigned long) + &phba->work_waitq)); + + control &= + ~(HC_R0INT_ENA << LPFC_ELS_RING); + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + else { + lpfc_debugfs_slow_ring_trc(phba, + "ISR slow ring: pwork:" + "x%x hawork:x%x wait:x%x", + phba->work_ha, work_ha_copy, + (uint32_t)((unsigned long) + &phba->work_waitq)); + } + spin_unlock_irqrestore(&phba->hbalock, iflag); + } + } + spin_lock_irqsave(&phba->hbalock, iflag); + if (work_ha_copy & HA_ERATT) { + if (lpfc_sli_read_hs(phba)) + goto unplug_error; + /* + * Check if there is a deferred error condition + * is active + */ + if ((HS_FFER1 & phba->work_hs) && + ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | + HS_FFER6 | HS_FFER7 | HS_FFER8) & + phba->work_hs)) { + phba->hba_flag |= DEFER_ERATT; + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); + } + } + + if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { + pmb = phba->sli.mbox_active; + pmbox = &pmb->u.mb; + mbox = phba->mbox; + vport = pmb->vport; + + /* First check out the status word */ + lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); + if (pmbox->mbxOwner != OWN_HOST) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + /* + * Stray Mailbox Interrupt, mbxCommand + * mbxStatus + */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "(%d):0304 Stray Mailbox " + "Interrupt mbxCommand x%x " + "mbxStatus x%x\n", + (vport ? vport->vpi : 0), + pmbox->mbxCommand, + pmbox->mbxStatus); + /* clear mailbox attention bit */ + work_ha_copy &= ~HA_MBATT; + } else { + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + phba->last_completion_time = jiffies; + del_timer(&phba->sli.mbox_tmo); + if (pmb->mbox_cmpl) { + lpfc_sli_pcimem_bcopy(mbox, pmbox, + MAILBOX_CMD_SIZE); + if (pmb->out_ext_byte_len && + pmb->ctx_buf) + lpfc_sli_pcimem_bcopy( + phba->mbox_ext, + pmb->ctx_buf, + pmb->out_ext_byte_len); + } + if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { + pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; + + lpfc_debugfs_disc_trc(vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX dflt rpi: : " + "status:x%x rpi:x%x", + (uint32_t)pmbox->mbxStatus, + pmbox->un.varWords[0], 0); + + if (!pmbox->mbxStatus) { + mp = (struct lpfc_dmabuf *) + (pmb->ctx_buf); + ndlp = (struct lpfc_nodelist *) + pmb->ctx_ndlp; + + /* Reg_LOGIN of dflt RPI was + * successful. new lets get + * rid of the RPI using the + * same mbox buffer. + */ + lpfc_unreg_login(phba, + vport->vpi, + pmbox->un.varWords[0], + pmb); + pmb->mbox_cmpl = + lpfc_mbx_cmpl_dflt_rpi; + pmb->ctx_buf = mp; + pmb->ctx_ndlp = ndlp; + pmb->vport = vport; + rc = lpfc_sli_issue_mbox(phba, + pmb, + MBX_NOWAIT); + if (rc != MBX_BUSY) + lpfc_printf_log(phba, + KERN_ERR, + LOG_TRACE_EVENT, + "0350 rc should have" + "been MBX_BUSY\n"); + if (rc != MBX_NOT_FINISHED) + goto send_current_mbox; + } + } + spin_lock_irqsave( + &phba->pport->work_port_lock, + iflag); + phba->pport->work_port_events &= + ~WORKER_MBOX_TMO; + spin_unlock_irqrestore( + &phba->pport->work_port_lock, + iflag); + + /* Do NOT queue MBX_HEARTBEAT to the worker + * thread for processing. + */ + if (pmbox->mbxCommand == MBX_HEARTBEAT) { + /* Process mbox now */ + phba->sli.mbox_active = NULL; + phba->sli.sli_flag &= + ~LPFC_SLI_MBOX_ACTIVE; + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba, pmb); + } else { + /* Queue to worker thread to process */ + lpfc_mbox_cmpl_put(phba, pmb); + } + } + } else + spin_unlock_irqrestore(&phba->hbalock, iflag); + + if ((work_ha_copy & HA_MBATT) && + (phba->sli.mbox_active == NULL)) { +send_current_mbox: + /* Process next mailbox command if there is one */ + do { + rc = lpfc_sli_issue_mbox(phba, NULL, + MBX_NOWAIT); + } while (rc == MBX_NOT_FINISHED); + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "0349 rc should be " + "MBX_SUCCESS\n"); + } + + spin_lock_irqsave(&phba->hbalock, iflag); + phba->work_ha |= work_ha_copy; + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_worker_wake_up(phba); + } + return IRQ_HANDLED; +unplug_error: + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_HANDLED; + +} /* lpfc_sli_sp_intr_handler */ + +/** + * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-3 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB + * ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The SCSI FCP fast-path ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli_fp_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + uint32_t ha_copy; + unsigned long status; + unsigned long iflag; + struct lpfc_sli_ring *pring; + + /* Get the driver's phba structure from the dev_id and + * assume the HBA is not interrupting. + */ + phba = (struct lpfc_hba *) dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* + * Stuff needs to be attented to when this function is invoked as an + * individual interrupt handler in MSI-X multi-message interrupt mode + */ + if (phba->intr_type == MSIX) { + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) + return IRQ_NONE; + /* Need to read HA REG for FCP ring and other ring events */ + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return IRQ_HANDLED; + /* Clear up only attention source related to fast-path */ + spin_lock_irqsave(&phba->hbalock, iflag); + /* + * If there is deferred error attention, do not check for + * any interrupt. + */ + if (unlikely(phba->hba_flag & DEFER_ERATT)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), + phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + spin_unlock_irqrestore(&phba->hbalock, iflag); + } else + ha_copy = phba->ha_copy; + + /* + * Process all events on FCP ring. Take the optimized path for FCP IO. + */ + ha_copy &= ~(phba->work_ha_mask); + + status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); + status >>= (4*LPFC_FCP_RING); + pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; + if (status & HA_RXMASK) + lpfc_sli_handle_fast_ring_event(phba, pring, status); + + if (phba->cfg_multi_ring_support == 2) { + /* + * Process all events on extra ring. Take the optimized path + * for extra ring IO. + */ + status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); + status >>= (4*LPFC_EXTRA_RING); + if (status & HA_RXMASK) { + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.sli3_ring[LPFC_EXTRA_RING], + status); + } + } + return IRQ_HANDLED; +} /* lpfc_sli_fp_intr_handler */ + +/** + * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is the HBA device-level interrupt handler to device with + * SLI-3 interface spec, called from the PCI layer when either MSI or + * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which + * requires driver attention. This function invokes the slow-path interrupt + * attention handling function and fast-path interrupt attention handling + * function in turn to process the relevant HBA attention events. This + * function is called without any lock held. It gets the hbalock to access + * and update SLI data structures. + * + * This function returns IRQ_HANDLED when interrupt is handled, else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + irqreturn_t sp_irq_rc, fp_irq_rc; + unsigned long status1, status2; + uint32_t hc_copy; + + /* + * Get the driver's phba structure from the dev_id and + * assume the HBA is not interrupting. + */ + phba = (struct lpfc_hba *) dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) + return IRQ_NONE; + + spin_lock(&phba->hbalock); + if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } + + if (unlikely(!phba->ha_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_NONE; + } else if (phba->ha_copy & HA_ERATT) { + if (phba->hba_flag & HBA_ERATT_HANDLED) + /* ERATT polling has handled ERATT */ + phba->ha_copy &= ~HA_ERATT; + else + /* Indicate interrupt handler handles ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + } + + /* + * If there is deferred error attention, do not check for any interrupt. + */ + if (unlikely(phba->hba_flag & DEFER_ERATT)) { + spin_unlock(&phba->hbalock); + return IRQ_NONE; + } + + /* Clear attention sources except link and error attentions */ + if (lpfc_readl(phba->HCregaddr, &hc_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } + writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA + | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), + phba->HCregaddr); + writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); + writel(hc_copy, phba->HCregaddr); + readl(phba->HAregaddr); /* flush */ + spin_unlock(&phba->hbalock); + + /* + * Invokes slow-path host attention interrupt handling as appropriate. + */ + + /* status of events with mailbox and link attention */ + status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); + + /* status of events with ELS ring */ + status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); + status2 >>= (4*LPFC_ELS_RING); + + if (status1 || (status2 & HA_RXMASK)) + sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); + else + sp_irq_rc = IRQ_NONE; + + /* + * Invoke fast-path host attention interrupt handling as appropriate. + */ + + /* status of events with FCP ring */ + status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); + status1 >>= (4*LPFC_FCP_RING); + + /* status of events with extra ring */ + if (phba->cfg_multi_ring_support == 2) { + status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); + status2 >>= (4*LPFC_EXTRA_RING); + } else + status2 = 0; + + if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) + fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); + else + fp_irq_rc = IRQ_NONE; + + /* Return device-level interrupt handling status */ + return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; +} /* lpfc_sli_intr_handler */ + +/** + * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 els abort xri events. + **/ +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + /* First, declare the els xri abort event has been handled */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Now, handle all the els xri abort events */ + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { + /* Get the first event from the head of the event queue */ + list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); + /* Notify aborted XRI for ELS work queue */ + lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); + + /* Free the event processed back to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); + } + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); +} + +/** + * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe + * @phba: Pointer to HBA context object. + * @irspiocbq: Pointer to work-queue completion queue entry. + * + * This routine handles an ELS work-queue completion event and construct + * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common + * discovery engine to handle. + * + * Return: Pointer to the receive IOCBQ, NULL otherwise. + **/ +static struct lpfc_iocbq * +lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, + struct lpfc_iocbq *irspiocbq) +{ + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *cmdiocbq; + struct lpfc_wcqe_complete *wcqe; + unsigned long iflags; + + pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return NULL; + + wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; + spin_lock_irqsave(&pring->ring_lock, iflags); + pring->stats.iocb_event++; + /* Look up the ELS command IOCB and create pseudo response IOCB */ + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + if (unlikely(!cmdiocbq)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0386 ELS complete with no corresponding " + "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", + wcqe->word0, wcqe->total_data_placed, + wcqe->parameter, wcqe->word3); + lpfc_sli_release_iocbq(phba, irspiocbq); + return NULL; + } + + memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128)); + memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe)); + + /* Put the iocb back on the txcmplq */ + lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + if (bf_get(lpfc_wcqe_c_xb, wcqe)) { + spin_lock_irqsave(&phba->hbalock, iflags); + irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + + return irspiocbq; +} + +inline struct lpfc_cq_event * +lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) +{ + struct lpfc_cq_event *cq_event; + + /* Allocate a new internal CQ_EVENT entry */ + cq_event = lpfc_sli4_cq_event_alloc(phba); + if (!cq_event) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0602 Failed to alloc CQ_EVENT entry\n"); + return NULL; + } + + /* Move the CQE into the event */ + memcpy(&cq_event->cqe, entry, size); + return cq_event; +} + +/** + * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event + * @phba: Pointer to HBA context object. + * @mcqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry with asynchronous + * event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0392 Async Event: word0:x%x, word1:x%x, " + "word2:x%x, word3:x%x\n", mcqe->word0, + mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); + + cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); + if (!cq_event) + return false; + + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); + + /* Set the async event flag */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag |= ASYNC_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return true; +} + +/** + * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event + * @phba: Pointer to HBA context object. + * @mcqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry with mailbox + * completion event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) +{ + uint32_t mcqe_status; + MAILBOX_t *mbox, *pmbox; + struct lpfc_mqe *mqe; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *mp; + unsigned long iflags; + LPFC_MBOXQ_t *pmb; + bool workposted = false; + int rc; + + /* If not a mailbox complete MCQE, out by checking mailbox consume */ + if (!bf_get(lpfc_trailer_completed, mcqe)) + goto out_no_mqe_complete; + + /* Get the reference to the active mbox command */ + spin_lock_irqsave(&phba->hbalock, iflags); + pmb = phba->sli.mbox_active; + if (unlikely(!pmb)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1832 No pending MBOX command to handle\n"); + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out_no_mqe_complete; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + mqe = &pmb->u.mqe; + pmbox = (MAILBOX_t *)&pmb->u.mqe; + mbox = phba->mbox; + vport = pmb->vport; + + /* Reset heartbeat timer */ + phba->last_completion_time = jiffies; + del_timer(&phba->sli.mbox_tmo); + + /* Move mbox data to caller's mailbox region, do endian swapping */ + if (pmb->mbox_cmpl && mbox) + lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); + + /* + * For mcqe errors, conditionally move a modified error code to + * the mbox so that the error will not be missed. + */ + mcqe_status = bf_get(lpfc_mcqe_status, mcqe); + if (mcqe_status != MB_CQE_STATUS_SUCCESS) { + if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) + bf_set(lpfc_mqe_status, mqe, + (LPFC_MBX_ERROR_RANGE | mcqe_status)); + } + if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { + pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, + "MBOX dflt rpi: status:x%x rpi:x%x", + mcqe_status, + pmbox->un.varWords[0], 0); + if (mcqe_status == MB_CQE_STATUS_SUCCESS) { + mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + + /* Reg_LOGIN of dflt RPI was successful. Mark the + * node as having an UNREG_LOGIN in progress to stop + * an unsolicited PLOGI from the same NPortId from + * starting another mailbox transaction. + */ + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag |= NLP_UNREG_INP; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_unreg_login(phba, vport->vpi, + pmbox->un.varWords[0], pmb); + pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; + pmb->ctx_buf = mp; + + /* No reference taken here. This is a default + * RPI reg/immediate unreg cycle. The reference was + * taken in the reg rpi path and is released when + * this mailbox completes. + */ + pmb->ctx_ndlp = ndlp; + pmb->vport = vport; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc != MBX_BUSY) + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "0385 rc should " + "have been MBX_BUSY\n"); + if (rc != MBX_NOT_FINISHED) + goto send_current_mbox; + } + } + spin_lock_irqsave(&phba->pport->work_port_lock, iflags); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); + + /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */ + if (pmbox->mbxCommand == MBX_HEARTBEAT) { + spin_lock_irqsave(&phba->hbalock, iflags); + /* Release the mailbox command posting token */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Post the next mbox command, if there is one */ + lpfc_sli4_post_async_mbox(phba); + + /* Process cmpl now */ + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba, pmb); + return false; + } + + /* There is mailbox completion work to queue to the worker thread */ + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_mbox_cmpl_put(phba, pmb); + phba->work_ha |= HA_MBATT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + +send_current_mbox: + spin_lock_irqsave(&phba->hbalock, iflags); + /* Release the mailbox command posting token */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + /* Setting active mailbox pointer need to be in sync to flag clear */ + phba->sli.mbox_active = NULL; + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Wake up worker thread to post the next pending mailbox command */ + lpfc_worker_wake_up(phba); + return workposted; + +out_no_mqe_complete: + spin_lock_irqsave(&phba->hbalock, iflags); + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return false; +} + +/** + * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry + * @phba: Pointer to HBA context object. + * @cq: Pointer to associated CQ + * @cqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry, it invokes the + * proper mailbox complete handling or asynchronous event handling routine + * according to the MCQE's async bit. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_mcqe mcqe; + bool workposted; + + cq->CQ_mbox++; + + /* Copy the mailbox MCQE and convert endian order as needed */ + lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); + + /* Invoke the proper event handling routine */ + if (!bf_get(lpfc_trailer_async, &mcqe)) + workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); + else + workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); + return workposted; +} + +/** + * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event + * @phba: Pointer to HBA context object. + * @cq: Pointer to associated CQ + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an ELS work-queue completion event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_iocbq *irspiocbq; + unsigned long iflags; + struct lpfc_sli_ring *pring = cq->pring; + int txq_cnt = 0; + int txcmplq_cnt = 0; + + /* Check for response status */ + if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { + /* Log the error status */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0357 ELS CQE error: status=x%x: " + "CQE: %08x %08x %08x %08x\n", + bf_get(lpfc_wcqe_c_status, wcqe), + wcqe->word0, wcqe->total_data_placed, + wcqe->parameter, wcqe->word3); + } + + /* Get an irspiocbq for later ELS response processing use */ + irspiocbq = lpfc_sli_get_iocbq(phba); + if (!irspiocbq) { + if (!list_empty(&pring->txq)) + txq_cnt++; + if (!list_empty(&pring->txcmplq)) + txcmplq_cnt++; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " + "els_txcmplq_cnt=%d\n", + txq_cnt, phba->iocb_cnt, + txcmplq_cnt); + return false; + } + + /* Save off the slow-path queue event for work thread to process */ + memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&irspiocbq->cq_event.list, + &phba->sli4_hba.sp_queue_event); + phba->hba_flag |= HBA_SP_QUEUE_EVT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return true; +} + +/** + * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event + * @phba: Pointer to HBA context object. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles slow-path WQ entry consumed event by invoking the + * proper WQ release routine to the slow-path WQ. + **/ +static void +lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, + struct lpfc_wcqe_release *wcqe) +{ + /* sanity check on queue memory */ + if (unlikely(!phba->sli4_hba.els_wq)) + return; + /* Check for the slow-path ELS work queue */ + if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) + lpfc_sli4_wq_release(phba->sli4_hba.els_wq, + bf_get(lpfc_wcqe_r_wqe_index, wcqe)); + else + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2579 Slow-path wqe consume event carries " + "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", + bf_get(lpfc_wcqe_r_wqe_index, wcqe), + phba->sli4_hba.els_wq->queue_id); +} + +/** + * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event + * @phba: Pointer to HBA context object. + * @cq: Pointer to a WQ completion queue. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an XRI abort event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, + struct sli4_wcqe_xri_aborted *wcqe) +{ + bool workposted = false; + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + switch (cq->subtype) { + case LPFC_IO: + lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Notify aborted XRI for NVME work queue */ + if (phba->nvmet_support) + lpfc_sli4_nvmet_xri_aborted(phba, wcqe); + } + workposted = false; + break; + case LPFC_NVME_LS: /* NVME LS uses ELS resources */ + case LPFC_ELS: + cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe)); + if (!cq_event) { + workposted = false; + break; + } + cq_event->hdwq = cq->hdwq; + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Set the els xri abort event flag */ + phba->hba_flag |= ELS_XRI_ABORT_EVENT; + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); + workposted = true; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0603 Invalid CQ subtype %d: " + "%08x %08x %08x %08x\n", + cq->subtype, wcqe->word0, wcqe->parameter, + wcqe->word2, wcqe->word3); + workposted = false; + break; + } + return workposted; +} + +#define FC_RCTL_MDS_DIAGS 0xF4 + +/** + * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry + * @phba: Pointer to HBA context object. + * @rcqe: Pointer to receive-queue completion queue entry. + * + * This routine process a receive-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) +{ + bool workposted = false; + struct fc_frame_header *fc_hdr; + struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; + struct lpfc_queue *drq = phba->sli4_hba.dat_rq; + struct lpfc_nvmet_tgtport *tgtp; + struct hbq_dmabuf *dma_buf; + uint32_t status, rq_id; + unsigned long iflags; + + /* sanity check on queue memory */ + if (unlikely(!hrq) || unlikely(!drq)) + return workposted; + + if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) + rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); + else + rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); + if (rq_id != hrq->queue_id) + goto out; + + status = bf_get(lpfc_rcqe_status, rcqe); + switch (status) { + case FC_STATUS_RQ_BUF_LEN_EXCEEDED: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2537 Receive Frame Truncated!!\n"); + fallthrough; + case FC_STATUS_RQ_SUCCESS: + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out; + } + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); + + fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; + + if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || + fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Handle MDS Loopback frames */ + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli4_handle_mds_loopback(phba->pport, + dma_buf); + else + lpfc_in_buf_free(phba, &dma_buf->dbuf); + break; + } + + /* save off the frame for the work thread to process */ + list_add_tail(&dma_buf->cq_event.list, + &phba->sli4_hba.sp_queue_event); + /* Frame received */ + phba->hba_flag |= HBA_SP_QUEUE_EVT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + case FC_STATUS_INSUFF_BUF_FRM_DISC: + if (phba->nvmet_support) { + tgtp = phba->targetport->private; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6402 RQE Error x%x, posted %d err_cnt " + "%d: %x %x %x\n", + status, hrq->RQ_buf_posted, + hrq->RQ_no_posted_buf, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + } + fallthrough; + + case FC_STATUS_INSUFF_BUF_NEED_BUF: + hrq->RQ_no_posted_buf++; + /* Post more buffers if possible */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + case FC_STATUS_RQ_DMA_FAILURE: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2564 RQE DMA Error x%x, x%08x x%08x x%08x " + "x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + + /* If IV set, no further recovery */ + if (bf_get(lpfc_rcqe_iv, rcqe)) + break; + + /* recycle consumed resource */ + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + break; + } + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_in_buf_free(phba, &dma_buf->dbuf); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2565 Unexpected RQE Status x%x, w0-3 x%08x " + "x%08x x%08x x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + break; + } +out: + return workposted; +} + +/** + * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry + * @phba: Pointer to HBA context object. + * @cq: Pointer to the completion queue. + * @cqe: Pointer to a completion queue entry. + * + * This routine process a slow-path work-queue or receive queue completion queue + * entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_cqe cqevt; + bool workposted = false; + + /* Copy the work queue CQE and convert endian order if needed */ + lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); + + /* Check and process for different type of WCQE and dispatch */ + switch (bf_get(lpfc_cqe_code, &cqevt)) { + case CQE_CODE_COMPL_WQE: + /* Process the WQ/RQ complete event */ + phba->last_completion_time = jiffies; + workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, + (struct lpfc_wcqe_complete *)&cqevt); + break; + case CQE_CODE_RELEASE_WQE: + /* Process the WQ release event */ + lpfc_sli4_sp_handle_rel_wcqe(phba, + (struct lpfc_wcqe_release *)&cqevt); + break; + case CQE_CODE_XRI_ABORTED: + /* Process the WQ XRI abort event */ + phba->last_completion_time = jiffies; + workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, + (struct sli4_wcqe_xri_aborted *)&cqevt); + break; + case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: + /* Process the RQ event */ + phba->last_completion_time = jiffies; + workposted = lpfc_sli4_sp_handle_rcqe(phba, + (struct lpfc_rcqe *)&cqevt); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0388 Not a valid WCQE code: x%x\n", + bf_get(lpfc_cqe_code, &cqevt)); + break; + } + return workposted; +} + +/** + * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * @speq: Pointer to slow-path event queue. + * + * This routine process a event queue entry from the slow-path event queue. + * It will check the MajorCode and MinorCode to determine this is for a + * completion event on a completion queue, if not, an error shall be logged + * and just return. Otherwise, it will get to the corresponding completion + * queue and process all the entries on that completion queue, rearm the + * completion queue, and then return. + * + **/ +static void +lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, + struct lpfc_queue *speq) +{ + struct lpfc_queue *cq = NULL, *childq; + uint16_t cqid; + int ret = 0; + + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + + list_for_each_entry(childq, &speq->child_list, list) { + if (childq->queue_id == cqid) { + cq = childq; + break; + } + } + if (unlikely(!cq)) { + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0365 Slow-path CQ identifier " + "(%d) does not exist\n", cqid); + return; + } + + /* Save EQ associated with this CQ */ + cq->assoc_qp = speq; + + if (is_kdump_kernel()) + ret = queue_work(phba->wq, &cq->spwork); + else + ret = queue_work_on(cq->chann, phba->wq, &cq->spwork); + + if (!ret) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0390 Cannot schedule queue work " + "for CQ eqcqid=%d, cqid=%d on CPU %d\n", + cqid, cq->queue_id, raw_smp_processor_id()); +} + +/** + * __lpfc_sli4_process_cq - Process elements of a CQ + * @phba: Pointer to HBA context object. + * @cq: Pointer to CQ to be processed + * @handler: Routine to process each cqe + * @delay: Pointer to usdelay to set in case of rescheduling of the handler + * + * This routine processes completion queue entries in a CQ. While a valid + * queue element is found, the handler is called. During processing checks + * are made for periodic doorbell writes to let the hardware know of + * element consumption. + * + * If the max limit on cqes to process is hit, or there are no more valid + * entries, the loop stops. If we processed a sufficient number of elements, + * meaning there is sufficient load, rather than rearming and generating + * another interrupt, a cq rescheduling delay will be set. A delay of 0 + * indicates no rescheduling. + * + * Returns True if work scheduled, False otherwise. + **/ +static bool +__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, + bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_cqe *), unsigned long *delay) +{ + struct lpfc_cqe *cqe; + bool workposted = false; + int count = 0, consumed = 0; + bool arm = true; + + /* default - no reschedule */ + *delay = 0; + + if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) + goto rearm_and_exit; + + /* Process all the entries to the CQ */ + cq->q_flag = 0; + cqe = lpfc_sli4_cq_get(cq); + while (cqe) { + workposted |= handler(phba, cq, cqe); + __lpfc_sli4_consume_cqe(phba, cq, cqe); + + consumed++; + if (!(++count % cq->max_proc_limit)) + break; + + if (!(count % cq->notify_interval)) { + phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, + LPFC_QUEUE_NOARM); + consumed = 0; + cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; + } + + if (count == LPFC_NVMET_CQ_NOTIFY) + cq->q_flag |= HBA_NVMET_CQ_NOTIFY; + + cqe = lpfc_sli4_cq_get(cq); + } + if (count >= phba->cfg_cq_poll_threshold) { + *delay = 1; + arm = false; + } + + /* Track the max number of CQEs processed in 1 EQ */ + if (count > cq->CQ_max_cqe) + cq->CQ_max_cqe = count; + + cq->assoc_qp->EQ_cqe_cnt += count; + + /* Catch the no cq entry condition */ + if (unlikely(count == 0)) + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0369 No entry from completion queue " + "qid=%d\n", cq->queue_id); + + xchg(&cq->queue_claimed, 0); + +rearm_and_exit: + phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, + arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); + + return workposted; +} + +/** + * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry + * @cq: pointer to CQ to process + * + * This routine calls the cq processing routine with a handler specific + * to the type of queue bound to it. + * + * The CQ routine returns two values: the first is the calling status, + * which indicates whether work was queued to the background discovery + * thread. If true, the routine should wakeup the discovery thread; + * the second is the delay parameter. If non-zero, rather than rearming + * the CQ and yet another interrupt, the CQ handler should be queued so + * that it is processed in a subsequent polling action. The value of + * the delay indicates when to reschedule it. + **/ +static void +__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) +{ + struct lpfc_hba *phba = cq->phba; + unsigned long delay; + bool workposted = false; + int ret = 0; + + /* Process and rearm the CQ */ + switch (cq->type) { + case LPFC_MCQ: + workposted |= __lpfc_sli4_process_cq(phba, cq, + lpfc_sli4_sp_handle_mcqe, + &delay); + break; + case LPFC_WCQ: + if (cq->subtype == LPFC_IO) + workposted |= __lpfc_sli4_process_cq(phba, cq, + lpfc_sli4_fp_handle_cqe, + &delay); + else + workposted |= __lpfc_sli4_process_cq(phba, cq, + lpfc_sli4_sp_handle_cqe, + &delay); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0370 Invalid completion queue type (%d)\n", + cq->type); + return; + } + + if (delay) { + if (is_kdump_kernel()) + ret = queue_delayed_work(phba->wq, &cq->sched_spwork, + delay); + else + ret = queue_delayed_work_on(cq->chann, phba->wq, + &cq->sched_spwork, delay); + if (!ret) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0394 Cannot schedule queue work " + "for cqid=%d on CPU %d\n", + cq->queue_id, cq->chann); + } + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_sp_process_cq - slow-path work handler when started by + * interrupt + * @work: pointer to work element + * + * translates from the work handler and calls the slow-path handler. + **/ +static void +lpfc_sli4_sp_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); + + __lpfc_sli4_sp_process_cq(cq); +} + +/** + * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer + * @work: pointer to work element + * + * translates from the work handler and calls the slow-path handler. + **/ +static void +lpfc_sli4_dly_sp_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(to_delayed_work(work), + struct lpfc_queue, sched_spwork); + + __lpfc_sli4_sp_process_cq(cq); +} + +/** + * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry + * @phba: Pointer to HBA context object. + * @cq: Pointer to associated CQ + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine process a fast-path work queue completion entry from fast-path + * event queue for FCP command response completion. + **/ +static void +lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_sli_ring *pring = cq->pring; + struct lpfc_iocbq *cmdiocbq; + unsigned long iflags; + + /* Check for response status */ + if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { + /* If resource errors reported from HBA, reduce queue + * depth of the SCSI device. + */ + if (((bf_get(lpfc_wcqe_c_status, wcqe) == + IOSTAT_LOCAL_REJECT)) && + ((wcqe->parameter & IOERR_PARAM_MASK) == + IOERR_NO_RESOURCES)) + phba->lpfc_rampdown_queue_depth(phba); + + /* Log the cmpl status */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0373 FCP CQE cmpl: status=x%x: " + "CQE: %08x %08x %08x %08x\n", + bf_get(lpfc_wcqe_c_status, wcqe), + wcqe->word0, wcqe->total_data_placed, + wcqe->parameter, wcqe->word3); + } + + /* Look up the FCP command IOCB and create pseudo response IOCB */ + spin_lock_irqsave(&pring->ring_lock, iflags); + pring->stats.iocb_event++; + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + if (unlikely(!cmdiocbq)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0374 FCP complete with no corresponding " + "cmdiocb: iotag (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + return; + } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + cmdiocbq->isr_timestamp = cq->isr_timestamp; +#endif + if (bf_get(lpfc_wcqe_c_xb, wcqe)) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + + if (cmdiocbq->cmd_cmpl) { + /* For FCP the flag is cleared in cmd_cmpl */ + if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) && + cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + + /* Pass the cmd_iocb and the wcqe to the upper layer */ + memcpy(&cmdiocbq->wcqe_cmpl, wcqe, + sizeof(struct lpfc_wcqe_complete)); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0375 FCP cmdiocb not callback function " + "iotag: (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + } +} + +/** + * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event + * @phba: Pointer to HBA context object. + * @cq: Pointer to completion queue. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an fast-path WQ entry consumed event by invoking the + * proper WQ release routine to the slow-path WQ. + **/ +static void +lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_wcqe_release *wcqe) +{ + struct lpfc_queue *childwq; + bool wqid_matched = false; + uint16_t hba_wqid; + + /* Check for fast-path FCP work queue release */ + hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); + list_for_each_entry(childwq, &cq->child_list, list) { + if (childwq->queue_id == hba_wqid) { + lpfc_sli4_wq_release(childwq, + bf_get(lpfc_wcqe_r_wqe_index, wcqe)); + if (childwq->q_flag & HBA_NVMET_WQFULL) + lpfc_nvmet_wqfull_process(phba, childwq); + wqid_matched = true; + break; + } + } + /* Report warning log message if no match found */ + if (wqid_matched != true) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2580 Fast-path wqe consume event carries " + "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); +} + +/** + * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry + * @phba: Pointer to HBA context object. + * @cq: Pointer to completion queue. + * @rcqe: Pointer to receive-queue completion queue entry. + * + * This routine process a receive-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_rcqe *rcqe) +{ + bool workposted = false; + struct lpfc_queue *hrq; + struct lpfc_queue *drq; + struct rqb_dmabuf *dma_buf; + struct fc_frame_header *fc_hdr; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, rq_id; + unsigned long iflags; + uint32_t fctl, idx; + + if ((phba->nvmet_support == 0) || + (phba->sli4_hba.nvmet_cqset == NULL)) + return workposted; + + idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; + hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; + drq = phba->sli4_hba.nvmet_mrq_data[idx]; + + /* sanity check on queue memory */ + if (unlikely(!hrq) || unlikely(!drq)) + return workposted; + + if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) + rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); + else + rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); + + if ((phba->nvmet_support == 0) || + (rq_id != hrq->queue_id)) + return workposted; + + status = bf_get(lpfc_rcqe_status, rcqe); + switch (status) { + case FC_STATUS_RQ_BUF_LEN_EXCEEDED: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6126 Receive Frame Truncated!!\n"); + fallthrough; + case FC_STATUS_RQ_SUCCESS: + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_rqbuf_get(phba, hrq); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; + + /* Just some basic sanity checks on FCP Command frame */ + fctl = (fc_hdr->fh_f_ctl[0] << 16 | + fc_hdr->fh_f_ctl[1] << 8 | + fc_hdr->fh_f_ctl[2]); + if (((fctl & + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || + (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ + goto drop; + + if (fc_hdr->fh_type == FC_TYPE_FCP) { + dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); + lpfc_nvmet_unsol_fcp_event( + phba, idx, dma_buf, cq->isr_timestamp, + cq->q_flag & HBA_NVMET_CQ_NOTIFY); + return false; + } +drop: + lpfc_rq_buf_free(phba, &dma_buf->hbuf); + break; + case FC_STATUS_INSUFF_BUF_FRM_DISC: + if (phba->nvmet_support) { + tgtp = phba->targetport->private; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6401 RQE Error x%x, posted %d err_cnt " + "%d: %x %x %x\n", + status, hrq->RQ_buf_posted, + hrq->RQ_no_posted_buf, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + } + fallthrough; + + case FC_STATUS_INSUFF_BUF_NEED_BUF: + hrq->RQ_no_posted_buf++; + /* Post more buffers if possible */ + break; + case FC_STATUS_RQ_DMA_FAILURE: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2575 RQE DMA Error x%x, x%08x x%08x x%08x " + "x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + + /* If IV set, no further recovery */ + if (bf_get(lpfc_rcqe_iv, rcqe)) + break; + + /* recycle consumed resource */ + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_rqbuf_get(phba, hrq); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + break; + } + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_rq_buf_free(phba, &dma_buf->hbuf); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2576 Unexpected RQE Status x%x, w0-3 x%08x " + "x%08x x%08x x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + break; + } +out: + return workposted; +} + +/** + * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry + * @phba: adapter with cq + * @cq: Pointer to the completion queue. + * @cqe: Pointer to fast-path completion queue entry. + * + * This routine process a fast-path work queue completion entry from fast-path + * event queue for FCP command response completion. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_wcqe_release wcqe; + bool workposted = false; + + /* Copy the work queue CQE and convert endian order if needed */ + lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); + + /* Check and process for different type of WCQE and dispatch */ + switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { + case CQE_CODE_COMPL_WQE: + case CQE_CODE_NVME_ERSP: + cq->CQ_wq++; + /* Process the WQ complete event */ + phba->last_completion_time = jiffies; + if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) + lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, + (struct lpfc_wcqe_complete *)&wcqe); + break; + case CQE_CODE_RELEASE_WQE: + cq->CQ_release_wqe++; + /* Process the WQ release event */ + lpfc_sli4_fp_handle_rel_wcqe(phba, cq, + (struct lpfc_wcqe_release *)&wcqe); + break; + case CQE_CODE_XRI_ABORTED: + cq->CQ_xri_aborted++; + /* Process the WQ XRI abort event */ + phba->last_completion_time = jiffies; + workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, + (struct sli4_wcqe_xri_aborted *)&wcqe); + break; + case CQE_CODE_RECEIVE_V1: + case CQE_CODE_RECEIVE: + phba->last_completion_time = jiffies; + if (cq->subtype == LPFC_NVMET) { + workposted = lpfc_sli4_nvmet_handle_rcqe( + phba, cq, (struct lpfc_rcqe *)&wcqe); + } + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0144 Not a valid CQE code: x%x\n", + bf_get(lpfc_wcqe_c_code, &wcqe)); + break; + } + return workposted; +} + +/** + * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry + * @cq: Pointer to CQ to be processed + * + * This routine calls the cq processing routine with the handler for + * fast path CQEs. + * + * The CQ routine returns two values: the first is the calling status, + * which indicates whether work was queued to the background discovery + * thread. If true, the routine should wakeup the discovery thread; + * the second is the delay parameter. If non-zero, rather than rearming + * the CQ and yet another interrupt, the CQ handler should be queued so + * that it is processed in a subsequent polling action. The value of + * the delay indicates when to reschedule it. + **/ +static void +__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) +{ + struct lpfc_hba *phba = cq->phba; + unsigned long delay; + bool workposted = false; + int ret; + + /* process and rearm the CQ */ + workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, + &delay); + + if (delay) { + if (is_kdump_kernel()) + ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, + delay); + else + ret = queue_delayed_work_on(cq->chann, phba->wq, + &cq->sched_irqwork, delay); + if (!ret) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0367 Cannot schedule queue work " + "for cqid=%d on CPU %d\n", + cq->queue_id, cq->chann); + } + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_hba_process_cq - fast-path work handler when started by + * interrupt + * @work: pointer to work element + * + * translates from the work handler and calls the fast-path handler. + **/ +static void +lpfc_sli4_hba_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); + + __lpfc_sli4_hba_process_cq(cq); +} + +/** + * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry + * @phba: Pointer to HBA context object. + * @eq: Pointer to the queue structure. + * @eqe: Pointer to fast-path event queue entry. + * @poll_mode: poll_mode to execute processing the cq. + * + * This routine process a event queue entry from the fast-path event queue. + * It will check the MajorCode and MinorCode to determine this is for a + * completion event on a completion queue, if not, an error shall be logged + * and just return. Otherwise, it will get to the corresponding completion + * queue and process all the entries on the completion queue, rearm the + * completion queue, and then return. + **/ +static void +lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, + struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode) +{ + struct lpfc_queue *cq = NULL; + uint32_t qidx = eq->hdwq; + uint16_t cqid, id; + int ret; + + if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0366 Not a valid completion " + "event: majorcode=x%x, minorcode=x%x\n", + bf_get_le32(lpfc_eqe_major_code, eqe), + bf_get_le32(lpfc_eqe_minor_code, eqe)); + return; + } + + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + + /* Use the fast lookup method first */ + if (cqid <= phba->sli4_hba.cq_max) { + cq = phba->sli4_hba.cq_lookup[cqid]; + if (cq) + goto work_cq; + } + + /* Next check for NVMET completion */ + if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { + id = phba->sli4_hba.nvmet_cqset[0]->queue_id; + if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { + /* Process NVMET unsol rcv */ + cq = phba->sli4_hba.nvmet_cqset[cqid - id]; + goto process_cq; + } + } + + if (phba->sli4_hba.nvmels_cq && + (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { + /* Process NVME unsol rcv */ + cq = phba->sli4_hba.nvmels_cq; + } + + /* Otherwise this is a Slow path event */ + if (cq == NULL) { + lpfc_sli4_sp_handle_eqe(phba, eqe, + phba->sli4_hba.hdwq[qidx].hba_eq); + return; + } + +process_cq: + if (unlikely(cqid != cq->queue_id)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0368 Miss-matched fast-path completion " + "queue identifier: eqcqid=%d, fcpcqid=%d\n", + cqid, cq->queue_id); + return; + } + +work_cq: +#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) + if (phba->ktime_on) + cq->isr_timestamp = ktime_get_ns(); + else + cq->isr_timestamp = 0; +#endif + + switch (poll_mode) { + case LPFC_THREADED_IRQ: + __lpfc_sli4_hba_process_cq(cq); + break; + case LPFC_QUEUE_WORK: + default: + if (is_kdump_kernel()) + ret = queue_work(phba->wq, &cq->irqwork); + else + ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); + if (!ret) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0383 Cannot schedule queue work " + "for CQ eqcqid=%d, cqid=%d on CPU %d\n", + cqid, cq->queue_id, + raw_smp_processor_id()); + break; + } +} + +/** + * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer + * @work: pointer to work element + * + * translates from the work handler and calls the fast-path handler. + **/ +static void +lpfc_sli4_dly_hba_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(to_delayed_work(work), + struct lpfc_queue, sched_irqwork); + + __lpfc_sli4_hba_process_cq(cq); +} + +/** + * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-4 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB + * ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The SCSI FCP fast-path ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. Note that, + * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is + * equal to that of FCP CQ index. + * + * The link attention and ELS ring attention events are handled + * by the worker thread. The interrupt handler signals the worker thread + * and returns for these events. This function is called without any lock + * held. It gets the hbalock to access and update SLI data structures. + * + * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD + * when interrupt is scheduled to be handled from a threaded irq context, or + * else returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_hba_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + struct lpfc_hba_eq_hdl *hba_eq_hdl; + struct lpfc_queue *fpeq; + unsigned long iflag; + int hba_eqidx; + int ecount = 0; + struct lpfc_eq_intr_info *eqi; + + /* Get the driver's phba structure from the dev_id */ + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; + hba_eqidx = hba_eq_hdl->idx; + + if (unlikely(!phba)) + return IRQ_NONE; + if (unlikely(!phba->sli4_hba.hdwq)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; + if (unlikely(!fpeq)) + return IRQ_NONE; + + /* Check device state for handling interrupt */ + if (unlikely(lpfc_intr_state_check(phba))) { + /* Check again for link_state with lock held */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (phba->link_state < LPFC_LINK_DOWN) + /* Flush, clear interrupt, and rearm the EQ */ + lpfc_sli4_eqcq_flush(phba, fpeq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + + switch (fpeq->poll_mode) { + case LPFC_THREADED_IRQ: + /* CGN mgmt is mutually exclusive from irq processing */ + if (phba->cmf_active_mode == LPFC_CFG_OFF) + return IRQ_WAKE_THREAD; + fallthrough; + case LPFC_QUEUE_WORK: + default: + eqi = this_cpu_ptr(phba->sli4_hba.eq_info); + eqi->icnt++; + + fpeq->last_cpu = raw_smp_processor_id(); + + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && + fpeq->q_flag & HBA_EQ_DELAY_CHK && + phba->cfg_auto_imax && + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, + LPFC_MAX_AUTO_EQ_DELAY); + + /* process and rearm the EQ */ + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_QUEUE_WORK); + + if (unlikely(ecount == 0)) { + fpeq->EQ_no_entry++; + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + } + + return IRQ_HANDLED; +} /* lpfc_sli4_hba_intr_handler */ + +/** + * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is the device-level interrupt handler to device with SLI-4 + * interface spec, called from the PCI layer when either MSI or Pin-IRQ + * interrupt mode is enabled and there is an event in the HBA which requires + * driver attention. This function invokes the slow-path interrupt attention + * handling function and fast-path interrupt attention handling function in + * turn to process the relevant HBA attention events. This function is called + * without any lock held. It gets the hbalock to access and update SLI data + * structures. + * + * This function returns IRQ_HANDLED when interrupt is handled, else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + irqreturn_t hba_irq_rc; + bool hba_handled = false; + int qidx; + + /* Get the driver's phba structure from the dev_id */ + phba = (struct lpfc_hba *)dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* + * Invoke fast-path host attention interrupt handling as appropriate. + */ + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { + hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, + &phba->sli4_hba.hba_eq_hdl[qidx]); + if (hba_irq_rc == IRQ_HANDLED) + hba_handled |= true; + } + + return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; +} /* lpfc_sli4_intr_handler */ + +void lpfc_sli4_poll_hbtimer(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); + struct lpfc_queue *eq; + + rcu_read_lock(); + + list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) + lpfc_sli4_poll_eq(eq); + if (!list_empty(&phba->poll_list)) + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + + rcu_read_unlock(); +} + +static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* kickstart slowpath processing if needed */ + if (list_empty(&phba->poll_list)) + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + + list_add_rcu(&eq->_poll_list, &phba->poll_list); + synchronize_rcu(); +} + +static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* Disable slowpath processing for this eq. Kick start the eq + * by RE-ARMING the eq's ASAP + */ + list_del_rcu(&eq->_poll_list); + synchronize_rcu(); + + if (list_empty(&phba->poll_list)) + del_timer_sync(&phba->cpuhp_poll_timer); +} + +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) +{ + struct lpfc_queue *eq, *next; + + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) + list_del(&eq->_poll_list); + + INIT_LIST_HEAD(&phba->poll_list); + synchronize_rcu(); +} + +static inline void +__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) +{ + if (mode == eq->mode) + return; + /* + * currently this function is only called during a hotplug + * event and the cpu on which this function is executing + * is going offline. By now the hotplug has instructed + * the scheduler to remove this cpu from cpu active mask. + * So we don't need to work about being put aside by the + * scheduler for a high priority process. Yes, the inte- + * rrupts could come but they are known to retire ASAP. + */ + + /* Disable polling in the fastpath */ + WRITE_ONCE(eq->mode, mode); + /* flush out the store buffer */ + smp_wmb(); + + /* + * Add this eq to the polling list and start polling. For + * a grace period both interrupt handler and poller will + * try to process the eq _but_ that's fine. We have a + * synchronization mechanism in place (queue_claimed) to + * deal with it. This is just a draining phase for int- + * errupt handler (not eq's) as we have guranteed through + * barrier that all the CPUs have seen the new CQ_POLLED + * state. which will effectively disable the REARMING of + * the EQ. The whole idea is eq's die off eventually as + * we are not rearming EQ's anymore. + */ + mode ? lpfc_sli4_add_to_poll_list(eq) : + lpfc_sli4_remove_from_poll_list(eq); +} + +void lpfc_sli4_start_polling(struct lpfc_queue *eq) +{ + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); +} + +void lpfc_sli4_stop_polling(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); + + /* Kick start for the pending io's in h/w. + * Once we switch back to interrupt processing on a eq + * the io path completion will only arm eq's when it + * receives a completion. But since eq's are in disa- + * rmed state it doesn't receive a completion. This + * creates a deadlock scenaro. + */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); +} + +/** + * lpfc_sli4_queue_free - free a queue structure and associated memory + * @queue: The queue structure to free. + * + * This function frees a queue structure and the DMAable memory used for + * the host resident queue. This function must be called after destroying the + * queue on the HBA. + **/ +void +lpfc_sli4_queue_free(struct lpfc_queue *queue) +{ + struct lpfc_dmabuf *dmabuf; + + if (!queue) + return; + + if (!list_empty(&queue->wq_list)) + list_del(&queue->wq_list); + + while (!list_empty(&queue->page_list)) { + list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, + list); + dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + if (queue->rqbp) { + lpfc_free_rq_buffer(queue->phba, queue); + kfree(queue->rqbp); + } + + if (!list_empty(&queue->cpu_list)) + list_del(&queue->cpu_list); + + kfree(queue); + return; +} + +/** + * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure + * @phba: The HBA that this queue is being created on. + * @page_size: The size of a queue page + * @entry_size: The size of each queue entry for this queue. + * @entry_count: The number of entries that this queue will handle. + * @cpu: The cpu that will primarily utilize this queue. + * + * This function allocates a queue structure and the DMAable memory used for + * the host resident queue. This function must be called before creating the + * queue on the HBA. + **/ +struct lpfc_queue * +lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, + uint32_t entry_size, uint32_t entry_count, int cpu) +{ + struct lpfc_queue *queue; + struct lpfc_dmabuf *dmabuf; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + uint16_t x, pgcnt; + + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = page_size; + + pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; + + /* If needed, Adjust page count to match the max the adapter supports */ + if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) + pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; + + queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), + GFP_KERNEL, cpu_to_node(cpu)); + if (!queue) + return NULL; + + INIT_LIST_HEAD(&queue->list); + INIT_LIST_HEAD(&queue->_poll_list); + INIT_LIST_HEAD(&queue->wq_list); + INIT_LIST_HEAD(&queue->wqfull_list); + INIT_LIST_HEAD(&queue->page_list); + INIT_LIST_HEAD(&queue->child_list); + INIT_LIST_HEAD(&queue->cpu_list); + + /* Set queue parameters now. If the system cannot provide memory + * resources, the free routine needs to know what was allocated. + */ + queue->page_count = pgcnt; + queue->q_pgs = (void **)&queue[1]; + queue->entry_cnt_per_pg = hw_page_size / entry_size; + queue->entry_size = entry_size; + queue->entry_count = entry_count; + queue->page_size = hw_page_size; + queue->phba = phba; + + for (x = 0; x < queue->page_count; x++) { + dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, + dev_to_node(&phba->pcidev->dev)); + if (!dmabuf) + goto out_fail; + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + hw_page_size, &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + goto out_fail; + } + dmabuf->buffer_tag = x; + list_add_tail(&dmabuf->list, &queue->page_list); + /* use lpfc_sli4_qe to index a paritcular entry in this page */ + queue->q_pgs[x] = dmabuf->virt; + } + INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); + INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); + INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); + INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); + + /* notify_interval will be set during q creation */ + + return queue; +out_fail: + lpfc_sli4_queue_free(queue); + return NULL; +} + +/** + * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory + * @phba: HBA structure that indicates port to create a queue on. + * @pci_barset: PCI BAR set flag. + * + * This function shall perform iomap of the specified PCI BAR address to host + * memory address if not already done so and return it. The returned host + * memory address can be NULL. + */ +static void __iomem * +lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) +{ + if (!phba->pcidev) + return NULL; + + switch (pci_barset) { + case WQ_PCI_BAR_0_AND_1: + return phba->pci_bar0_memmap_p; + case WQ_PCI_BAR_2_AND_3: + return phba->pci_bar2_memmap_p; + case WQ_PCI_BAR_4_AND_5: + return phba->pci_bar4_memmap_p; + default: + break; + } + return NULL; +} + +/** + * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs + * @phba: HBA structure that EQs are on. + * @startq: The starting EQ index to modify + * @numq: The number of EQs (consecutive indexes) to modify + * @usdelay: amount of delay + * + * This function revises the EQ delay on 1 or more EQs. The EQ delay + * is set either by writing to a register (if supported by the SLI Port) + * or by mailbox command. The mailbox command allows several EQs to be + * updated at once. + * + * The @phba struct is used to send a mailbox command to HBA. The @startq + * is used to get the starting EQ index to change. The @numq value is + * used to specify how many consecutive EQ indexes, starting at EQ index, + * are to be changed. This function is asynchronous and will wait for any + * mailbox commands to finish before returning. + * + * On success this function will return a zero. If unable to allocate + * enough memory this function will return -ENOMEM. If a mailbox command + * fails this function will return -ENXIO. Note: on ENXIO, some EQs may + * have had their delay multipler changed. + **/ +void +lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, + uint32_t numq, uint32_t usdelay) +{ + struct lpfc_mbx_modify_eq_delay *eq_delay; + LPFC_MBOXQ_t *mbox; + struct lpfc_queue *eq; + int cnt = 0, rc, length; + uint32_t shdr_status, shdr_add_status; + uint32_t dmult; + int qidx; + union lpfc_sli4_cfg_shdr *shdr; + + if (startq >= phba->cfg_irq_chann) + return; + + if (usdelay > 0xFFFF) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, + "6429 usdelay %d too large. Scaled down to " + "0xFFFF.\n", usdelay); + usdelay = 0xFFFF; + } + + /* set values by EQ_DELAY register if supported */ + if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { + for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; + if (!eq) + continue; + + lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); + + if (++cnt >= numq) + break; + } + return; + } + + /* Otherwise, set values by mailbox cmd */ + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6428 Failed allocating mailbox cmd buffer." + " EQ delay was not set.\n"); + return; + } + length = (sizeof(struct lpfc_mbx_modify_eq_delay) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, + length, LPFC_SLI4_MBX_EMBED); + eq_delay = &mbox->u.mqe.un.eq_delay; + + /* Calculate delay multiper from maximum interrupt per second */ + dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; + if (dmult) + dmult--; + if (dmult > LPFC_DMULT_MAX) + dmult = LPFC_DMULT_MAX; + + for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; + if (!eq) + continue; + eq->q_mode = usdelay; + eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; + eq_delay->u.request.eq[cnt].phase = 0; + eq_delay->u.request.eq[cnt].delay_multi = dmult; + + if (++cnt >= numq) + break; + } + eq_delay->u.request.num_eq = cnt; + + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_ndlp = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2512 MODIFY_EQ_DELAY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + } + mempool_free(mbox, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_eq_create - Create an Event Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @eq: The queue structure to use to create the event queue. + * @imax: The maximum interrupt per second limit. + * + * This function creates an event queue, as detailed in @eq, on a port, + * described by @phba by sending an EQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @eq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. This + * function will send the EQ_CREATE mailbox command to the HBA to setup the + * event queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) +{ + struct lpfc_mbx_eq_create *eq_create; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + struct lpfc_dmabuf *dmabuf; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint16_t dmult; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + /* sanity check on queue memory */ + if (!eq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_eq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_EQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + eq_create = &mbox->u.mqe.un.eq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; + bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, + eq->page_count); + bf_set(lpfc_eq_context_size, &eq_create->u.request.context, + LPFC_EQE_SIZE); + bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); + + /* Use version 2 of CREATE_EQ if eqav is set */ + if (phba->sli4_hba.pc_sli4_params.eqav) { + bf_set(lpfc_mbox_hdr_version, &shdr->request, + LPFC_Q_CREATE_VERSION_2); + bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, + phba->sli4_hba.pc_sli4_params.eqav); + } + + /* don't setup delay multiplier using EQ_CREATE */ + dmult = 0; + bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, + dmult); + switch (eq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0360 Unsupported EQ count. (%d)\n", + eq->entry_count); + if (eq->entry_count < 256) { + status = -EINVAL; + goto out; + } + fallthrough; /* otherwise default to smallest count */ + case 256: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_256); + break; + case 512: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_512); + break; + case 1024: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_1024); + break; + case 2048: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_2048); + break; + case 4096: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_4096); + break; + } + list_for_each_entry(dmabuf, &eq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_buf = NULL; + mbox->ctx_ndlp = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2500 EQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + eq->type = LPFC_EQ; + eq->subtype = LPFC_NONE; + eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); + if (eq->queue_id == 0xFFFF) + status = -ENXIO; + eq->host_index = 0; + eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; + eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; +out: + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within + * threaded irq context. + * + * Returns + * IRQ_HANDLED - interrupt is handled + * IRQ_NONE - otherwise + **/ +irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + struct lpfc_hba_eq_hdl *hba_eq_hdl; + struct lpfc_queue *fpeq; + int ecount = 0; + int hba_eqidx; + struct lpfc_eq_intr_info *eqi; + + /* Get the driver's phba structure from the dev_id */ + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; + hba_eqidx = hba_eq_hdl->idx; + + if (unlikely(!phba)) + return IRQ_NONE; + if (unlikely(!phba->sli4_hba.hdwq)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; + if (unlikely(!fpeq)) + return IRQ_NONE; + + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id()); + eqi->icnt++; + + fpeq->last_cpu = raw_smp_processor_id(); + + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && + fpeq->q_flag & HBA_EQ_DELAY_CHK && + phba->cfg_auto_imax && + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); + + /* process and rearm the EQ */ + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_THREADED_IRQ); + + if (unlikely(ecount == 0)) { + fpeq->EQ_no_entry++; + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + return IRQ_HANDLED; +} + +/** + * lpfc_cq_create - Create a Completion Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @cq: The queue structure to use to create the completion queue. + * @eq: The event queue to bind this completion queue to. + * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). + * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). + * + * This function creates a completion queue, as detailed in @wq, on a port, + * described by @phba by sending a CQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @eq + * is used to indicate which event queue to bind this completion queue to. This + * function will send the CQ_CREATE mailbox command to the HBA to setup the + * completion queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_queue *eq, uint32_t type, uint32_t subtype) +{ + struct lpfc_mbx_cq_create *cq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* sanity check on queue memory */ + if (!cq || !eq) + return -ENODEV; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_cq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_CQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + cq_create = &mbox->u.mqe.un.cq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; + bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, + cq->page_count); + bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); + bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.cqv); + if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { + bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, + (cq->page_size / SLI4_PAGE_SIZE)); + bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, + eq->queue_id); + bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, + phba->sli4_hba.pc_sli4_params.cqav); + } else { + bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, + eq->queue_id); + } + switch (cq->entry_count) { + case 2048: + case 4096: + if (phba->sli4_hba.pc_sli4_params.cqv == + LPFC_Q_CREATE_VERSION_2) { + cq_create->u.request.context.lpfc_cq_context_count = + cq->entry_count; + bf_set(lpfc_cq_context_count, + &cq_create->u.request.context, + LPFC_CQ_CNT_WORD7); + break; + } + fallthrough; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0361 Unsupported CQ count: " + "entry cnt %d sz %d pg cnt %d\n", + cq->entry_count, cq->entry_size, + cq->page_count); + if (cq->entry_count < 256) { + status = -EINVAL; + goto out; + } + fallthrough; /* otherwise default to smallest count */ + case 256: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_256); + break; + case 512: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_512); + break; + case 1024: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_1024); + break; + } + list_for_each_entry(dmabuf, &cq->page_list, list) { + memset(dmabuf->virt, 0, cq->page_size); + cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2501 CQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + if (cq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + /* link the cq onto the parent eq child list */ + list_add_tail(&cq->list, &eq->child_list); + /* Set up completion queue's type and subtype */ + cq->type = type; + cq->subtype = subtype; + cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + cq->assoc_qid = eq->queue_id; + cq->assoc_qp = eq; + cq->host_index = 0; + cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); + + if (cq->queue_id > phba->sli4_hba.cq_max) + phba->sli4_hba.cq_max = cq->queue_id; +out: + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ + * @phba: HBA structure that indicates port to create a queue on. + * @cqp: The queue structure array to use to create the completion queues. + * @hdwq: The hardware queue array with the EQ to bind completion queues to. + * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). + * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). + * + * This function creates a set of completion queue, s to support MRQ + * as detailed in @cqp, on a port, + * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @eq + * is used to indicate which event queue to bind this completion queue to. This + * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the + * completion queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, + struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, + uint32_t subtype) +{ + struct lpfc_queue *cq; + struct lpfc_queue *eq; + struct lpfc_mbx_cq_create_set *cq_set; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, alloclen, status = 0; + int cnt, idx, numcq, page_idx = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + /* sanity check on queue memory */ + numcq = phba->cfg_nvmet_mrq; + if (!cqp || !hdwq || !numcq) + return -ENODEV; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + length = sizeof(struct lpfc_mbx_cq_create_set); + length += ((numcq * cqp[0]->page_count) * + sizeof(struct dma_address)); + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < length) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3098 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, length); + status = -ENOMEM; + goto out; + } + cq_set = mbox->sge_array->addr[0]; + shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); + + for (idx = 0; idx < numcq; idx++) { + cq = cqp[idx]; + eq = hdwq[idx].hba_eq; + if (!cq || !eq) { + status = -ENOMEM; + goto out; + } + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = cq->page_size; + + switch (idx) { + case 0: + bf_set(lpfc_mbx_cq_create_set_page_size, + &cq_set->u.request, + (hw_page_size / SLI4_PAGE_SIZE)); + bf_set(lpfc_mbx_cq_create_set_num_pages, + &cq_set->u.request, cq->page_count); + bf_set(lpfc_mbx_cq_create_set_evt, + &cq_set->u.request, 1); + bf_set(lpfc_mbx_cq_create_set_valid, + &cq_set->u.request, 1); + bf_set(lpfc_mbx_cq_create_set_cqe_size, + &cq_set->u.request, 0); + bf_set(lpfc_mbx_cq_create_set_num_cq, + &cq_set->u.request, numcq); + bf_set(lpfc_mbx_cq_create_set_autovalid, + &cq_set->u.request, + phba->sli4_hba.pc_sli4_params.cqav); + switch (cq->entry_count) { + case 2048: + case 4096: + if (phba->sli4_hba.pc_sli4_params.cqv == + LPFC_Q_CREATE_VERSION_2) { + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, + cq->entry_count); + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, + LPFC_CQ_CNT_WORD7); + break; + } + fallthrough; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3118 Bad CQ count. (%d)\n", + cq->entry_count); + if (cq->entry_count < 256) { + status = -EINVAL; + goto out; + } + fallthrough; /* otherwise default to smallest */ + case 256: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_256); + break; + case 512: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_512); + break; + case 1024: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_1024); + break; + } + bf_set(lpfc_mbx_cq_create_set_eq_id0, + &cq_set->u.request, eq->queue_id); + break; + case 1: + bf_set(lpfc_mbx_cq_create_set_eq_id1, + &cq_set->u.request, eq->queue_id); + break; + case 2: + bf_set(lpfc_mbx_cq_create_set_eq_id2, + &cq_set->u.request, eq->queue_id); + break; + case 3: + bf_set(lpfc_mbx_cq_create_set_eq_id3, + &cq_set->u.request, eq->queue_id); + break; + case 4: + bf_set(lpfc_mbx_cq_create_set_eq_id4, + &cq_set->u.request, eq->queue_id); + break; + case 5: + bf_set(lpfc_mbx_cq_create_set_eq_id5, + &cq_set->u.request, eq->queue_id); + break; + case 6: + bf_set(lpfc_mbx_cq_create_set_eq_id6, + &cq_set->u.request, eq->queue_id); + break; + case 7: + bf_set(lpfc_mbx_cq_create_set_eq_id7, + &cq_set->u.request, eq->queue_id); + break; + case 8: + bf_set(lpfc_mbx_cq_create_set_eq_id8, + &cq_set->u.request, eq->queue_id); + break; + case 9: + bf_set(lpfc_mbx_cq_create_set_eq_id9, + &cq_set->u.request, eq->queue_id); + break; + case 10: + bf_set(lpfc_mbx_cq_create_set_eq_id10, + &cq_set->u.request, eq->queue_id); + break; + case 11: + bf_set(lpfc_mbx_cq_create_set_eq_id11, + &cq_set->u.request, eq->queue_id); + break; + case 12: + bf_set(lpfc_mbx_cq_create_set_eq_id12, + &cq_set->u.request, eq->queue_id); + break; + case 13: + bf_set(lpfc_mbx_cq_create_set_eq_id13, + &cq_set->u.request, eq->queue_id); + break; + case 14: + bf_set(lpfc_mbx_cq_create_set_eq_id14, + &cq_set->u.request, eq->queue_id); + break; + case 15: + bf_set(lpfc_mbx_cq_create_set_eq_id15, + &cq_set->u.request, eq->queue_id); + break; + } + + /* link the cq onto the parent eq child list */ + list_add_tail(&cq->list, &eq->child_list); + /* Set up completion queue's type and subtype */ + cq->type = type; + cq->subtype = subtype; + cq->assoc_qid = eq->queue_id; + cq->assoc_qp = eq; + cq->host_index = 0; + cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, + cq->entry_count); + cq->chann = idx; + + rc = 0; + list_for_each_entry(dmabuf, &cq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + cq_set->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + cq_set->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3119 CQ_CREATE_SET mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); + if (rc == 0xFFFF) { + status = -ENXIO; + goto out; + } + + for (idx = 0; idx < numcq; idx++) { + cq = cqp[idx]; + cq->queue_id = rc + idx; + if (cq->queue_id > phba->sli4_hba.cq_max) + phba->sli4_hba.cq_max = cq->queue_id; + } + +out: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return status; +} + +/** + * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration + * @phba: HBA structure that indicates port to create a queue on. + * @mq: The queue structure to use to create the mailbox queue. + * @mbox: An allocated pointer to type LPFC_MBOXQ_t + * @cq: The completion queue to associate with this cq. + * + * This function provides failback (fb) functionality when the + * mq_create_ext fails on older FW generations. It's purpose is identical + * to mq_create_ext otherwise. + * + * This routine cannot fail as all attributes were previously accessed and + * initialized in mq_create_ext. + **/ +static void +lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, + LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) +{ + struct lpfc_mbx_mq_create *mq_create; + struct lpfc_dmabuf *dmabuf; + int length; + + length = (sizeof(struct lpfc_mbx_mq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + mq_create = &mbox->u.mqe.un.mq_create; + bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, + mq->page_count); + bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); + switch (mq->entry_count) { + case 16: + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_16); + break; + case 32: + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_32); + break; + case 64: + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_64); + break; + case 128: + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_128); + break; + } + list_for_each_entry(dmabuf, &mq->page_list, list) { + mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } +} + +/** + * lpfc_mq_create - Create a mailbox Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @mq: The queue structure to use to create the mailbox queue. + * @cq: The completion queue to associate with this cq. + * @subtype: The queue's subtype. + * + * This function creates a mailbox queue, as detailed in @mq, on a port, + * described by @phba by sending a MQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. This + * function will send the MQ_CREATE mailbox command to the HBA to setup the + * mailbox queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int32_t +lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, + struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_mq_create *mq_create; + struct lpfc_mbx_mq_create_ext *mq_create_ext; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + /* sanity check on queue memory */ + if (!mq || !cq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_mq_create_ext) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_CREATE_EXT, + length, LPFC_SLI4_MBX_EMBED); + + mq_create_ext = &mbox->u.mqe.un.mq_create_ext; + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; + bf_set(lpfc_mbx_mq_create_ext_num_pages, + &mq_create_ext->u.request, mq->page_count); + bf_set(lpfc_mbx_mq_create_ext_async_evt_link, + &mq_create_ext->u.request, 1); + bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, + &mq_create_ext->u.request, 1); + bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, + &mq_create_ext->u.request, 1); + bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, + &mq_create_ext->u.request, 1); + bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, + &mq_create_ext->u.request, 1); + bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.mqv); + if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) + bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, + cq->queue_id); + else + bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, + cq->queue_id); + switch (mq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0362 Unsupported MQ count. (%d)\n", + mq->entry_count); + if (mq->entry_count < 16) { + status = -EINVAL; + goto out; + } + fallthrough; /* otherwise default to smallest count */ + case 16: + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_16); + break; + case 32: + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_32); + break; + case 64: + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_64); + break; + case 128: + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_128); + break; + } + list_for_each_entry(dmabuf, &mq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, + &mq_create_ext->u.response); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2795 MQ_CREATE_EXT failed with " + "status x%x. Failback to MQ_CREATE.\n", + rc); + lpfc_mq_create_fb_init(phba, mq, mbox, cq); + mq_create = &mbox->u.mqe.un.mq_create; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; + mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, + &mq_create->u.response); + } + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2502 MQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + if (mq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + mq->type = LPFC_MQ; + mq->assoc_qid = cq->queue_id; + mq->subtype = subtype; + mq->host_index = 0; + mq->hba_index = 0; + + /* link the mq onto the parent cq child list */ + list_add_tail(&mq->list, &cq->child_list); +out: + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_wq_create - Create a Work Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @wq: The queue structure to use to create the work queue. + * @cq: The completion queue to bind this work queue to. + * @subtype: The subtype of the work queue indicating its functionality. + * + * This function creates a work queue, as detailed in @wq, on a port, described + * by @phba by sending a WQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @wq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @cq + * is used to indicate which completion queue to bind this work queue to. This + * function will send the WQ_CREATE mailbox command to the HBA to setup the + * work queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, + struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_wq_create *wq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + struct dma_address *page; + void __iomem *bar_memmap_p; + uint32_t db_offset; + uint16_t pci_barset; + uint8_t dpp_barset; + uint32_t dpp_offset; + uint8_t wq_create_version; +#ifdef CONFIG_X86 + unsigned long pg_addr; +#endif + + /* sanity check on queue memory */ + if (!wq || !cq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = wq->page_size; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_wq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + wq_create = &mbox->u.mqe.un.wq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; + bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, + wq->page_count); + bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, + cq->queue_id); + + /* wqv is the earliest version supported, NOT the latest */ + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.wqv); + + if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || + (wq->page_size > SLI4_PAGE_SIZE)) + wq_create_version = LPFC_Q_CREATE_VERSION_1; + else + wq_create_version = LPFC_Q_CREATE_VERSION_0; + + switch (wq_create_version) { + case LPFC_Q_CREATE_VERSION_1: + bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, + wq->entry_count); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + LPFC_Q_CREATE_VERSION_1); + + switch (wq->entry_size) { + default: + case 64: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_64); + break; + case 128: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_128); + break; + } + /* Request DPP by default */ + bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, + (wq->page_size / SLI4_PAGE_SIZE)); + page = wq_create->u.request_1.page; + break; + default: + page = wq_create->u.request.page; + break; + } + + list_for_each_entry(dmabuf, &wq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); + page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); + } + + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) + bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2503 WQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + + if (wq_create_version == LPFC_Q_CREATE_VERSION_0) + wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, + &wq_create->u.response); + else + wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, + &wq_create->u.response_1); + + if (wq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + + wq->db_format = LPFC_DB_LIST_FORMAT; + if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { + wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, + &wq_create->u.response); + if ((wq->db_format != LPFC_DB_LIST_FORMAT) && + (wq->db_format != LPFC_DB_RING_FORMAT)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3265 WQ[%d] doorbell format " + "not supported: x%x\n", + wq->queue_id, wq->db_format); + status = -EINVAL; + goto out; + } + pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, + &wq_create->u.response); + bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, + pci_barset); + if (!bar_memmap_p) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3263 WQ[%d] failed to memmap " + "pci barset:x%x\n", + wq->queue_id, pci_barset); + status = -ENOMEM; + goto out; + } + db_offset = wq_create->u.response.doorbell_offset; + if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && + (db_offset != LPFC_ULP1_WQ_DOORBELL)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3252 WQ[%d] doorbell offset " + "not supported: x%x\n", + wq->queue_id, db_offset); + status = -EINVAL; + goto out; + } + wq->db_regaddr = bar_memmap_p + db_offset; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3264 WQ[%d]: barset:x%x, offset:x%x, " + "format:x%x\n", wq->queue_id, + pci_barset, db_offset, wq->db_format); + } else + wq->db_regaddr = phba->sli4_hba.WQDBregaddr; + } else { + /* Check if DPP was honored by the firmware */ + wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, + &wq_create->u.response_1); + if (wq->dpp_enable) { + pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, + &wq_create->u.response_1); + bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, + pci_barset); + if (!bar_memmap_p) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3267 WQ[%d] failed to memmap " + "pci barset:x%x\n", + wq->queue_id, pci_barset); + status = -ENOMEM; + goto out; + } + db_offset = wq_create->u.response_1.doorbell_offset; + wq->db_regaddr = bar_memmap_p + db_offset; + wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, + &wq_create->u.response_1); + dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, + &wq_create->u.response_1); + bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, + dpp_barset); + if (!bar_memmap_p) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3268 WQ[%d] failed to memmap " + "pci barset:x%x\n", + wq->queue_id, dpp_barset); + status = -ENOMEM; + goto out; + } + dpp_offset = wq_create->u.response_1.dpp_offset; + wq->dpp_regaddr = bar_memmap_p + dpp_offset; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3271 WQ[%d]: barset:x%x, offset:x%x, " + "dpp_id:x%x dpp_barset:x%x " + "dpp_offset:x%x\n", + wq->queue_id, pci_barset, db_offset, + wq->dpp_id, dpp_barset, dpp_offset); + +#ifdef CONFIG_X86 + /* Enable combined writes for DPP aperture */ + pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; + rc = set_memory_wc(pg_addr, 1); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3272 Cannot setup Combined " + "Write on WQ[%d] - disable DPP\n", + wq->queue_id); + phba->cfg_enable_dpp = 0; + } +#else + phba->cfg_enable_dpp = 0; +#endif + } else + wq->db_regaddr = phba->sli4_hba.WQDBregaddr; + } + wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); + if (wq->pring == NULL) { + status = -ENOMEM; + goto out; + } + wq->type = LPFC_WQ; + wq->assoc_qid = cq->queue_id; + wq->subtype = subtype; + wq->host_index = 0; + wq->hba_index = 0; + wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; + + /* link the wq onto the parent cq child list */ + list_add_tail(&wq->list, &cq->child_list); +out: + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_rq_create - Create a Receive Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @hrq: The queue structure to use to create the header receive queue. + * @drq: The queue structure to use to create the data receive queue. + * @cq: The completion queue to bind this work queue to. + * @subtype: The subtype of the work queue indicating its functionality. + * + * This function creates a receive buffer queue pair , as detailed in @hrq and + * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command + * to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq + * struct is used to get the entry count that is necessary to determine the + * number of pages to use for this queue. The @cq is used to indicate which + * completion queue to bind received buffers that are posted to these queues to. + * This function will send the RQ_CREATE mailbox command to the HBA to setup the + * receive queue pair. This function is asynchronous and will wait for the + * mailbox command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_rq_create *rq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + void __iomem *bar_memmap_p; + uint32_t db_offset; + uint16_t pci_barset; + + /* sanity check on queue memory */ + if (!hrq || !drq || !cq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + if (hrq->entry_count != drq->entry_count) + return -EINVAL; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_rq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + rq_create = &mbox->u.mqe.un.rq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; + bf_set(lpfc_rq_context_rqe_size, + &rq_create->u.request.context, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, + &rq_create->u.request.context, + LPFC_RQ_PAGE_SIZE_4096); + } else { + switch (hrq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2535 Unsupported RQ count. (%d)\n", + hrq->entry_count); + if (hrq->entry_count < 512) { + status = -EINVAL; + goto out; + } + fallthrough; /* otherwise default to smallest count */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); + } + bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, + hrq->page_count); + list_for_each_entry(dmabuf, &hrq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) + bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2504 RQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (hrq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { + hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, + &rq_create->u.response); + if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && + (hrq->db_format != LPFC_DB_RING_FORMAT)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3262 RQ [%d] doorbell format not " + "supported: x%x\n", hrq->queue_id, + hrq->db_format); + status = -EINVAL; + goto out; + } + + pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, + &rq_create->u.response); + bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); + if (!bar_memmap_p) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3269 RQ[%d] failed to memmap pci " + "barset:x%x\n", hrq->queue_id, + pci_barset); + status = -ENOMEM; + goto out; + } + + db_offset = rq_create->u.response.doorbell_offset; + if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && + (db_offset != LPFC_ULP1_RQ_DOORBELL)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3270 RQ[%d] doorbell offset not " + "supported: x%x\n", hrq->queue_id, + db_offset); + status = -EINVAL; + goto out; + } + hrq->db_regaddr = bar_memmap_p + db_offset; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " + "format:x%x\n", hrq->queue_id, pci_barset, + db_offset, hrq->db_format); + } else { + hrq->db_format = LPFC_DB_RING_FORMAT; + hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; + } + hrq->type = LPFC_HRQ; + hrq->assoc_qid = cq->queue_id; + hrq->subtype = subtype; + hrq->host_index = 0; + hrq->hba_index = 0; + hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; + + /* now create the data queue */ + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, hrq->entry_count); + if (subtype == LPFC_NVMET) + rq_create->u.request.context.buffer_size = + LPFC_NVMET_DATA_BUF_SIZE; + else + rq_create->u.request.context.buffer_size = + LPFC_DATA_BUF_SIZE; + bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + } else { + switch (drq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2536 Unsupported RQ count. (%d)\n", + drq->entry_count); + if (drq->entry_count < 512) { + status = -EINVAL; + goto out; + } + fallthrough; /* otherwise default to smallest count */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + if (subtype == LPFC_NVMET) + bf_set(lpfc_rq_context_buf_size, + &rq_create->u.request.context, + LPFC_NVMET_DATA_BUF_SIZE); + else + bf_set(lpfc_rq_context_buf_size, + &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); + } + bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, + drq->page_count); + list_for_each_entry(dmabuf, &drq->page_list, list) { + rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) + bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + status = -ENXIO; + goto out; + } + drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (drq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + drq->type = LPFC_DRQ; + drq->assoc_qid = cq->queue_id; + drq->subtype = subtype; + drq->host_index = 0; + drq->hba_index = 0; + drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; + + /* link the header and data RQs onto the parent cq child list */ + list_add_tail(&hrq->list, &cq->child_list); + list_add_tail(&drq->list, &cq->child_list); + +out: + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_mrq_create - Create MRQ Receive Queues on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @hrqp: The queue structure array to use to create the header receive queues. + * @drqp: The queue structure array to use to create the data receive queues. + * @cqp: The completion queue array to bind these receive queues to. + * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). + * + * This function creates a receive buffer queue pair , as detailed in @hrq and + * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command + * to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq + * struct is used to get the entry count that is necessary to determine the + * number of pages to use for this queue. The @cq is used to indicate which + * completion queue to bind received buffers that are posted to these queues to. + * This function will send the RQ_CREATE mailbox command to the HBA to setup the + * receive queue pair. This function is asynchronous and will wait for the + * mailbox command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, + struct lpfc_queue **drqp, struct lpfc_queue **cqp, + uint32_t subtype) +{ + struct lpfc_queue *hrq, *drq, *cq; + struct lpfc_mbx_rq_create_v2 *rq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, alloclen, status = 0; + int cnt, idx, numrq, page_idx = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + numrq = phba->cfg_nvmet_mrq; + /* sanity check on array memory */ + if (!hrqp || !drqp || !cqp || !numrq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + length = sizeof(struct lpfc_mbx_rq_create_v2); + length += ((2 * numrq * hrqp[0]->page_count) * + sizeof(struct dma_address)); + + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < length) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3099 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, length); + status = -ENOMEM; + goto out; + } + + + + rq_create = mbox->sge_array->addr[0]; + shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; + + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); + cnt = 0; + + for (idx = 0; idx < numrq; idx++) { + hrq = hrqp[idx]; + drq = drqp[idx]; + cq = cqp[idx]; + + /* sanity check on queue memory */ + if (!hrq || !drq || !cq) { + status = -ENODEV; + goto out; + } + + if (hrq->entry_count != drq->entry_count) { + status = -EINVAL; + goto out; + } + + if (idx == 0) { + bf_set(lpfc_mbx_rq_create_num_pages, + &rq_create->u.request, + hrq->page_count); + bf_set(lpfc_mbx_rq_create_rq_cnt, + &rq_create->u.request, (numrq * 2)); + bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, + 1); + bf_set(lpfc_rq_context_base_cq, + &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_rq_context_data_size, + &rq_create->u.request.context, + LPFC_NVMET_DATA_BUF_SIZE); + bf_set(lpfc_rq_context_hdr_size, + &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + bf_set(lpfc_rq_context_rqe_size, + &rq_create->u.request.context, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, + &rq_create->u.request.context, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + } + rc = 0; + list_for_each_entry(dmabuf, &hrq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + rq_create->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + + rc = 0; + list_for_each_entry(dmabuf, &drq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + rq_create->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + + hrq->db_format = LPFC_DB_RING_FORMAT; + hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; + hrq->type = LPFC_HRQ; + hrq->assoc_qid = cq->queue_id; + hrq->subtype = subtype; + hrq->host_index = 0; + hrq->hba_index = 0; + hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; + + drq->db_format = LPFC_DB_RING_FORMAT; + drq->db_regaddr = phba->sli4_hba.RQDBregaddr; + drq->type = LPFC_DRQ; + drq->assoc_qid = cq->queue_id; + drq->subtype = subtype; + drq->host_index = 0; + drq->hba_index = 0; + drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; + + list_add_tail(&hrq->list, &cq->child_list); + list_add_tail(&drq->list, &cq->child_list); + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3120 RQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (rc == 0xFFFF) { + status = -ENXIO; + goto out; + } + + /* Initialize all RQs with associated queue id */ + for (idx = 0; idx < numrq; idx++) { + hrq = hrqp[idx]; + hrq->queue_id = rc + (2 * idx); + drq = drqp[idx]; + drq->queue_id = rc + (2 * idx) + 1; + } + +out: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return status; +} + +/** + * lpfc_eq_destroy - Destroy an event Queue on the HBA + * @phba: HBA structure that indicates port to destroy a queue on. + * @eq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @eq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @eq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return -ENXIO. + **/ +int +lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* sanity check on queue memory */ + if (!eq) + return -ENODEV; + + mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_eq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_EQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, + eq->queue_id); + mbox->vport = eq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + + rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2505 EQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + + /* Remove eq from any list */ + list_del_init(&eq->list); + mempool_free(mbox, eq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_cq_destroy - Destroy a Completion Queue on the HBA + * @phba: HBA structure that indicates port to destroy a queue on. + * @cq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @cq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @cq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return -ENXIO. + **/ +int +lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* sanity check on queue memory */ + if (!cq) + return -ENODEV; + mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_cq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_CQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, + cq->queue_id); + mbox->vport = cq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.wq_create.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2506 CQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove cq from any list */ + list_del_init(&cq->list); + mempool_free(mbox, cq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA + * @phba: HBA structure that indicates port to destroy a queue on. + * @mq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @mq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @mq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return -ENXIO. + **/ +int +lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* sanity check on queue memory */ + if (!mq) + return -ENODEV; + mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_mq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, + mq->queue_id); + mbox->vport = mq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2507 MQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove mq from any list */ + list_del_init(&mq->list); + mempool_free(mbox, mq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_wq_destroy - Destroy a Work Queue on the HBA + * @phba: HBA structure that indicates port to destroy a queue on. + * @wq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @wq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @wq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return -ENXIO. + **/ +int +lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* sanity check on queue memory */ + if (!wq) + return -ENODEV; + mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_wq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, + wq->queue_id); + mbox->vport = wq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2508 WQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove wq from any list */ + list_del_init(&wq->list); + kfree(wq->pring); + wq->pring = NULL; + mempool_free(mbox, wq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_rq_destroy - Destroy a Receive Queue on the HBA + * @phba: HBA structure that indicates port to destroy a queue on. + * @hrq: The queue structure associated with the queue to destroy. + * @drq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @rq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @rq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return -ENXIO. + **/ +int +lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* sanity check on queue memory */ + if (!hrq || !drq) + return -ENODEV; + mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_rq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, + hrq->queue_id); + mbox->vport = hrq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2509 RQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + mempool_free(mbox, hrq->phba->mbox_mem_pool); + return -ENXIO; + } + bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, + drq->queue_id); + rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2510 RQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + list_del_init(&hrq->list); + list_del_init(&drq->list); + mempool_free(mbox, hrq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA + * @phba: The virtual port for which this call being executed. + * @pdma_phys_addr0: Physical address of the 1st SGL page. + * @pdma_phys_addr1: Physical address of the 2nd SGL page. + * @xritag: the xritag that ties this io to the SGL pages. + * + * This routine will post the sgl pages for the IO that has the xritag + * that is in the iocbq structure. The xritag is assigned during iocbq + * creation and persists for as long as the driver is loaded. + * if the caller has fewer than 256 scatter gather segments to map then + * pdma_phys_addr1 should be 0. + * If the caller needs to map more than 256 scatter gather segment then + * pdma_phys_addr1 should be a valid physical address. + * physical address for SGLs must be 64 byte aligned. + * If you are going to map 2 SGL's then the first one must have 256 entries + * the second sgl can have between 1 and 256 entries. + * + * Return codes: + * 0 - Success + * -ENXIO, -ENOMEM - Failure + **/ +int +lpfc_sli4_post_sgl(struct lpfc_hba *phba, + dma_addr_t pdma_phys_addr0, + dma_addr_t pdma_phys_addr1, + uint16_t xritag) +{ + struct lpfc_mbx_post_sgl_pages *post_sgl_pages; + LPFC_MBOXQ_t *mbox; + int rc; + uint32_t shdr_status, shdr_add_status; + uint32_t mbox_tmo; + union lpfc_sli4_cfg_shdr *shdr; + + if (xritag == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0364 Invalid param:\n"); + return -EINVAL; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, + sizeof(struct lpfc_mbx_post_sgl_pages) - + sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); + + post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) + &mbox->u.mqe.un.post_sgl_pages; + bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); + bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); + + post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_addr0)); + post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); + + post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_addr1)); + post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (!phba->sli4_hba.intr_enable) + mempool_free(mbox, phba->mbox_mem_pool); + else if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2511 POST_SGL mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + } + return 0; +} + +/** + * lpfc_sli4_alloc_xri - Get an available rpi in the device's range + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a SLI4_PAGE_SIZE memory region to the port to hold up to + * SLI4_PAGE_SIZE modulo 64 rpi context headers. + * + * Returns + * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful + * LPFC_RPI_ALLOC_ERROR if no rpis are available. + **/ +static uint16_t +lpfc_sli4_alloc_xri(struct lpfc_hba *phba) +{ + unsigned long xri; + + /* + * Fetch the next logical xri. Because this index is logical, + * the driver starts at 0 each time. + */ + spin_lock_irq(&phba->hbalock); + xri = find_first_zero_bit(phba->sli4_hba.xri_bmask, + phba->sli4_hba.max_cfg_param.max_xri); + if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { + spin_unlock_irq(&phba->hbalock); + return NO_XRI; + } else { + set_bit(xri, phba->sli4_hba.xri_bmask); + phba->sli4_hba.max_cfg_param.xri_used++; + } + spin_unlock_irq(&phba->hbalock); + return xri; +} + +/** + * __lpfc_sli4_free_xri - Release an xri for reuse. + * @phba: pointer to lpfc hba data structure. + * @xri: xri to release. + * + * This routine is invoked to release an xri to the pool of + * available rpis maintained by the driver. + **/ +static void +__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) +{ + if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { + phba->sli4_hba.max_cfg_param.xri_used--; + } +} + +/** + * lpfc_sli4_free_xri - Release an xri for reuse. + * @phba: pointer to lpfc hba data structure. + * @xri: xri to release. + * + * This routine is invoked to release an xri to the pool of + * available rpis maintained by the driver. + **/ +void +lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) +{ + spin_lock_irq(&phba->hbalock); + __lpfc_sli4_free_xri(phba, xri); + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_next_xritag - Get an xritag for the io + * @phba: Pointer to HBA context object. + * + * This function gets an xritag for the iocb. If there is no unused xritag + * it will return 0xffff. + * The function returns the allocated xritag if successful, else returns zero. + * Zero is not a valid xritag. + * The caller is not required to hold any lock. + **/ +uint16_t +lpfc_sli4_next_xritag(struct lpfc_hba *phba) +{ + uint16_t xri_index; + + xri_index = lpfc_sli4_alloc_xri(phba); + if (xri_index == NO_XRI) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2004 Failed to allocate XRI.last XRITAG is %d" + " Max XRI is %d, Used XRI is %d\n", + xri_index, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.xri_used); + return xri_index; +} + +/** + * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. + * @phba: pointer to lpfc hba data structure. + * @post_sgl_list: pointer to els sgl entry list. + * @post_cnt: number of els sgl entries on the list. + * + * This routine is invoked to post a block of driver's sgl pages to the + * HBA using non-embedded mailbox command. No Lock is held. This routine + * is only called when the driver is loading and after all IO has been + * stopped. + **/ +static int +lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, + struct list_head *post_sgl_list, + int post_cnt) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + reqlen = post_cnt * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > SLI4_PAGE_SIZE) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2559 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0285 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + /* Set up the SGL pages in the non-embedded DMA pages */ + viraddr = mbox->sge_array->addr[0]; + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + pg_pairs = 0; + list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(sglq_entry->phys)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(sglq_entry->phys)); + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(0)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(0)); + + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = sglq_entry->sli4_xritag; + sgl_pg_pairs++; + pg_pairs++; + } + + /* Complete initialization and perform endian conversion. */ + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (!phba->sli4_hba.intr_enable) + lpfc_sli4_mbox_cmd_free(phba, mbox); + else if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2513 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware + * @phba: pointer to lpfc hba data structure. + * @nblist: pointer to nvme buffer list. + * @count: number of scsi buffers on the list. + * + * This routine is invoked to post a block of @count scsi sgl pages from a + * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. + * No Lock is held. + * + **/ +static int +lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, + int count) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int rc = 0; + uint32_t shdr_status, shdr_add_status; + dma_addr_t pdma_phys_bpl1; + union lpfc_sli4_cfg_shdr *shdr; + + /* Calculate the requested length of the dma memory */ + reqlen = count * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > SLI4_PAGE_SIZE) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "6118 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6119 Failed to allocate mbox cmd memory\n"); + return -ENOMEM; + } + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, + reqlen, LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6120 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory */ + viraddr = mbox->sge_array->addr[0]; + + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + pg_pairs = 0; + list_for_each_entry(lpfc_ncmd, nblist, list) { + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; + sgl_pg_pairs++; + pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) { + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + } else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (!phba->sli4_hba.intr_enable) + lpfc_sli4_mbox_cmd_free(phba, mbox); + else if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6125 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list + * @phba: pointer to lpfc hba data structure. + * @post_nblist: pointer to the nvme buffer list. + * @sb_count: number of nvme buffers. + * + * This routine walks a list of nvme buffers that was passed in. It attempts + * to construct blocks of nvme buffer sgls which contains contiguous xris and + * uses the non-embedded SGL block post mailbox commands to post to the port. + * For single NVME buffer sgl with non-contiguous xri, if any, it shall use + * embedded SGL post mailbox command for posting. The @post_nblist passed in + * must be local list, thus no lock is needed when manipulate the list. + * + * Returns: 0 = failure, non-zero number of successfully posted buffers. + **/ +int +lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, + struct list_head *post_nblist, int sb_count) +{ + struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; + int status, sgl_size; + int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; + dma_addr_t pdma_phys_sgl1; + int last_xritag = NO_XRI; + int cur_xritag; + LIST_HEAD(prep_nblist); + LIST_HEAD(blck_nblist); + LIST_HEAD(nvme_nblist); + + /* sanity check */ + if (sb_count <= 0) + return -EINVAL; + + sgl_size = phba->cfg_sg_dma_buf_size; + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { + list_del_init(&lpfc_ncmd->list); + block_cnt++; + if ((last_xritag != NO_XRI) && + (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { + /* a hole in xri block, form a sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt - 1; + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + block_cnt = 1; + } else { + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + /* enough sgls for non-embed sgl mbox command */ + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + block_cnt = 0; + } + } + num_posting++; + last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + + /* end of repost sgl list condition for NVME buffers */ + if (num_posting == sb_count) { + if (post_cnt == 0) { + /* last sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + } else if (block_cnt == 1) { + /* last single sgl with non-contiguous xri */ + if (sgl_size > SGL_PAGE_SIZE) + pdma_phys_sgl1 = + lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_sgl1 = 0; + cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + status = lpfc_sli4_post_sgl( + phba, lpfc_ncmd->dma_phys_sgl, + pdma_phys_sgl1, cur_xritag); + if (status) { + /* Post error. Buffer unavailable. */ + lpfc_ncmd->flags |= + LPFC_SBUF_NOT_POSTED; + } else { + /* Post success. Bffer available. */ + lpfc_ncmd->flags &= + ~LPFC_SBUF_NOT_POSTED; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + /* success, put on NVME buffer sgl list */ + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } + } + + /* continue until a nembed page worth of sgls */ + if (post_cnt == 0) + continue; + + /* post block of NVME buffer list sgls */ + status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, + post_cnt); + + /* don't reset xirtag due to hole in xri block */ + if (block_cnt == 0) + last_xritag = NO_XRI; + + /* reset NVME buffer post count for next round of posting */ + post_cnt = 0; + + /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ + while (!list_empty(&blck_nblist)) { + list_remove_head(&blck_nblist, lpfc_ncmd, + struct lpfc_io_buf, list); + if (status) { + /* Post error. Mark buffer unavailable. */ + lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; + } else { + /* Post success, Mark buffer available. */ + lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } + } + /* Push NVME buffers with sgl posted to the available list */ + lpfc_io_buf_replenish(phba, &nvme_nblist); + + return num_posted; +} + +/** + * lpfc_fc_frame_check - Check that this frame is a valid frame to handle + * @phba: pointer to lpfc_hba struct that the frame was received on + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * + * This function checks the fields in the @fc_hdr to see if the FC frame is a + * valid type of frame that the LPFC driver will handle. This function will + * return a zero if the frame is a valid frame or a non zero value when the + * frame does not pass the check. + **/ +static int +lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) +{ + /* make rctl_names static to save stack space */ + struct fc_vft_header *fc_vft_hdr; + uint32_t *header = (uint32_t *) fc_hdr; + +#define FC_RCTL_MDS_DIAGS 0xF4 + + switch (fc_hdr->fh_r_ctl) { + case FC_RCTL_DD_UNCAT: /* uncategorized information */ + case FC_RCTL_DD_SOL_DATA: /* solicited data */ + case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ + case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ + case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ + case FC_RCTL_DD_DATA_DESC: /* data descriptor */ + case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ + case FC_RCTL_DD_CMD_STATUS: /* command status */ + case FC_RCTL_ELS_REQ: /* extended link services request */ + case FC_RCTL_ELS_REP: /* extended link services reply */ + case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ + case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ + case FC_RCTL_BA_ABTS: /* basic link service abort */ + case FC_RCTL_BA_RMC: /* remove connection */ + case FC_RCTL_BA_ACC: /* basic accept */ + case FC_RCTL_BA_RJT: /* basic reject */ + case FC_RCTL_BA_PRMT: + case FC_RCTL_ACK_1: /* acknowledge_1 */ + case FC_RCTL_ACK_0: /* acknowledge_0 */ + case FC_RCTL_P_RJT: /* port reject */ + case FC_RCTL_F_RJT: /* fabric reject */ + case FC_RCTL_P_BSY: /* port busy */ + case FC_RCTL_F_BSY: /* fabric busy to data frame */ + case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ + case FC_RCTL_LCR: /* link credit reset */ + case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ + case FC_RCTL_END: /* end */ + break; + case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ + fc_vft_hdr = (struct fc_vft_header *)fc_hdr; + fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; + return lpfc_fc_frame_check(phba, fc_hdr); + case FC_RCTL_BA_NOP: /* basic link service NOP */ + default: + goto drop; + } + + switch (fc_hdr->fh_type) { + case FC_TYPE_BLS: + case FC_TYPE_ELS: + case FC_TYPE_FCP: + case FC_TYPE_CT: + case FC_TYPE_NVME: + break; + case FC_TYPE_IP: + case FC_TYPE_ILS: + default: + goto drop; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "2538 Received frame rctl:x%x, type:x%x, " + "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + fc_hdr->fh_r_ctl, fc_hdr->fh_type, + be32_to_cpu(header[0]), be32_to_cpu(header[1]), + be32_to_cpu(header[2]), be32_to_cpu(header[3]), + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); + return 0; +drop: + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "2539 Dropped frame rctl:x%x type:x%x\n", + fc_hdr->fh_r_ctl, fc_hdr->fh_type); + return 1; +} + +/** + * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * + * This function processes the FC header to retrieve the VFI from the VF + * header, if one exists. This function will return the VFI if one exists + * or 0 if no VSAN Header exists. + **/ +static uint32_t +lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) +{ + struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; + + if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) + return 0; + return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); +} + +/** + * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to + * @phba: Pointer to the HBA structure to search for the vport on + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * @fcfi: The FC Fabric ID that the frame came from + * @did: Destination ID to match against + * + * This function searches the @phba for a vport that matches the content of the + * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the + * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function + * returns the matching vport pointer or NULL if unable to match frame to a + * vport. + **/ +static struct lpfc_vport * +lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, + uint16_t fcfi, uint32_t did) +{ + struct lpfc_vport **vports; + struct lpfc_vport *vport = NULL; + int i; + + if (did == Fabric_DID) + return phba->pport; + if ((phba->pport->fc_flag & FC_PT2PT) && + !(phba->link_state == LPFC_HBA_READY)) + return phba->pport; + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + if (phba->fcf.fcfi == fcfi && + vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && + vports[i]->fc_myDID == did) { + vport = vports[i]; + break; + } + } + } + lpfc_destroy_vport_work_array(phba, vports); + return vport; +} + +/** + * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp + * @vport: The vport to work on. + * + * This function updates the receive sequence time stamp for this vport. The + * receive sequence time stamp indicates the time that the last frame of the + * the sequence that has been idle for the longest amount of time was received. + * the driver uses this time stamp to indicate if any received sequences have + * timed out. + **/ +static void +lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) +{ + struct lpfc_dmabuf *h_buf; + struct hbq_dmabuf *dmabuf = NULL; + + /* get the oldest sequence on the rcv list */ + h_buf = list_get_first(&vport->rcv_buffer_list, + struct lpfc_dmabuf, list); + if (!h_buf) + return; + dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + vport->rcv_buffer_time_stamp = dmabuf->time_stamp; +} + +/** + * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. + * @vport: The vport that the received sequences were sent to. + * + * This function cleans up all outstanding received sequences. This is called + * by the driver when a link event or user action invalidates all the received + * sequences. + **/ +void +lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) +{ + struct lpfc_dmabuf *h_buf, *hnext; + struct lpfc_dmabuf *d_buf, *dnext; + struct hbq_dmabuf *dmabuf = NULL; + + /* start with the oldest sequence on the rcv list */ + list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { + dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + list_del_init(&dmabuf->hbuf.list); + list_for_each_entry_safe(d_buf, dnext, + &dmabuf->dbuf.list, list) { + list_del_init(&d_buf->list); + lpfc_in_buf_free(vport->phba, d_buf); + } + lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); + } +} + +/** + * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. + * @vport: The vport that the received sequences were sent to. + * + * This function determines whether any received sequences have timed out by + * first checking the vport's rcv_buffer_time_stamp. If this time_stamp + * indicates that there is at least one timed out sequence this routine will + * go through the received sequences one at a time from most inactive to most + * active to determine which ones need to be cleaned up. Once it has determined + * that a sequence needs to be cleaned up it will simply free up the resources + * without sending an abort. + **/ +void +lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) +{ + struct lpfc_dmabuf *h_buf, *hnext; + struct lpfc_dmabuf *d_buf, *dnext; + struct hbq_dmabuf *dmabuf = NULL; + unsigned long timeout; + int abort_count = 0; + + timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + + vport->rcv_buffer_time_stamp); + if (list_empty(&vport->rcv_buffer_list) || + time_before(jiffies, timeout)) + return; + /* start with the oldest sequence on the rcv list */ + list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { + dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + + dmabuf->time_stamp); + if (time_before(jiffies, timeout)) + break; + abort_count++; + list_del_init(&dmabuf->hbuf.list); + list_for_each_entry_safe(d_buf, dnext, + &dmabuf->dbuf.list, list) { + list_del_init(&d_buf->list); + lpfc_in_buf_free(vport->phba, d_buf); + } + lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); + } + if (abort_count) + lpfc_update_rcv_time_stamp(vport); +} + +/** + * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences + * @vport: pointer to a vitural port + * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame + * + * This function searches through the existing incomplete sequences that have + * been sent to this @vport. If the frame matches one of the incomplete + * sequences then the dbuf in the @dmabuf is added to the list of frames that + * make up that sequence. If no sequence is found that matches this frame then + * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list + * This function returns a pointer to the first dmabuf in the sequence list that + * the frame was linked to. + **/ +static struct hbq_dmabuf * +lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *new_hdr; + struct fc_frame_header *temp_hdr; + struct lpfc_dmabuf *d_buf; + struct lpfc_dmabuf *h_buf; + struct hbq_dmabuf *seq_dmabuf = NULL; + struct hbq_dmabuf *temp_dmabuf = NULL; + uint8_t found = 0; + + INIT_LIST_HEAD(&dmabuf->dbuf.list); + dmabuf->time_stamp = jiffies; + new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + + /* Use the hdr_buf to find the sequence that this frame belongs to */ + list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { + temp_hdr = (struct fc_frame_header *)h_buf->virt; + if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || + (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || + (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) + continue; + /* found a pending sequence that matches this frame */ + seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + break; + } + if (!seq_dmabuf) { + /* + * This indicates first frame received for this sequence. + * Queue the buffer on the vport's rcv_buffer_list. + */ + list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); + lpfc_update_rcv_time_stamp(vport); + return dmabuf; + } + temp_hdr = seq_dmabuf->hbuf.virt; + if (be16_to_cpu(new_hdr->fh_seq_cnt) < + be16_to_cpu(temp_hdr->fh_seq_cnt)) { + list_del_init(&seq_dmabuf->hbuf.list); + list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); + list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); + lpfc_update_rcv_time_stamp(vport); + return dmabuf; + } + /* move this sequence to the tail to indicate a young sequence */ + list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); + seq_dmabuf->time_stamp = jiffies; + lpfc_update_rcv_time_stamp(vport); + if (list_empty(&seq_dmabuf->dbuf.list)) { + list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); + return seq_dmabuf; + } + /* find the correct place in the sequence to insert this frame */ + d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); + while (!found) { + temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; + /* + * If the frame's sequence count is greater than the frame on + * the list then insert the frame right after this frame + */ + if (be16_to_cpu(new_hdr->fh_seq_cnt) > + be16_to_cpu(temp_hdr->fh_seq_cnt)) { + list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); + found = 1; + break; + } + + if (&d_buf->list == &seq_dmabuf->dbuf.list) + break; + d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); + } + + if (found) + return seq_dmabuf; + return NULL; +} + +/** + * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence + * @vport: pointer to a vitural port + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function tries to abort from the partially assembed sequence, described + * by the information from basic abbort @dmabuf. It checks to see whether such + * partially assembled sequence held by the driver. If so, it shall free up all + * the frames from the partially assembled sequence. + * + * Return + * true -- if there is matching partially assembled sequence present and all + * the frames freed with the sequence; + * false -- if there is no matching partially assembled sequence present so + * nothing got aborted in the lower layer driver + **/ +static bool +lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *new_hdr; + struct fc_frame_header *temp_hdr; + struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; + struct hbq_dmabuf *seq_dmabuf = NULL; + + /* Use the hdr_buf to find the sequence that matches this frame */ + INIT_LIST_HEAD(&dmabuf->dbuf.list); + INIT_LIST_HEAD(&dmabuf->hbuf.list); + new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { + temp_hdr = (struct fc_frame_header *)h_buf->virt; + if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || + (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || + (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) + continue; + /* found a pending sequence that matches this frame */ + seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + break; + } + + /* Free up all the frames from the partially assembled sequence */ + if (seq_dmabuf) { + list_for_each_entry_safe(d_buf, n_buf, + &seq_dmabuf->dbuf.list, list) { + list_del_init(&d_buf->list); + lpfc_in_buf_free(vport->phba, d_buf); + } + return true; + } + return false; +} + +/** + * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp + * @vport: pointer to a vitural port + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function tries to abort from the assembed sequence from upper level + * protocol, described by the information from basic abbort @dmabuf. It + * checks to see whether such pending context exists at upper level protocol. + * If so, it shall clean up the pending context. + * + * Return + * true -- if there is matching pending context of the sequence cleaned + * at ulp; + * false -- if there is no matching pending context of the sequence present + * at ulp. + **/ +static bool +lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) +{ + struct lpfc_hba *phba = vport->phba; + int handled; + + /* Accepting abort at ulp with SLI4 only */ + if (phba->sli_rev < LPFC_SLI_REV4) + return false; + + /* Register all caring upper level protocols to attend abort */ + handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); + if (handled) + return true; + + return false; +} + +/** + * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler + * @phba: Pointer to HBA context object. + * @cmd_iocbq: pointer to the command iocbq structure. + * @rsp_iocbq: pointer to the response iocbq structure. + * + * This function handles the sequence abort response iocb command complete + * event. It properly releases the memory allocated to the sequence abort + * accept iocb. + **/ +static void +lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, + struct lpfc_iocbq *cmd_iocbq, + struct lpfc_iocbq *rsp_iocbq) +{ + if (cmd_iocbq) { + lpfc_nlp_put(cmd_iocbq->ndlp); + lpfc_sli_release_iocbq(phba, cmd_iocbq); + } + + /* Failure means BLS ABORT RSP did not get delivered to remote node*/ + if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3154 BLS ABORT RSP failed, data: x%x/x%x\n", + get_job_ulpstatus(phba, rsp_iocbq), + get_job_word4(phba, rsp_iocbq)); +} + +/** + * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. + * @phba: Pointer to HBA context object. + * @xri: xri id in transaction. + * + * This function validates the xri maps to the known range of XRIs allocated an + * used by the driver. + **/ +uint16_t +lpfc_sli4_xri_inrange(struct lpfc_hba *phba, + uint16_t xri) +{ + uint16_t i; + + for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { + if (xri == phba->sli4_hba.xri_ids[i]) + return i; + } + return NO_XRI; +} + +/** + * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort + * @vport: pointer to a virtual port. + * @fc_hdr: pointer to a FC frame header. + * @aborted: was the partially assembled receive sequence successfully aborted + * + * This function sends a basic response to a previous unsol sequence abort + * event after aborting the sequence handling. + **/ +void +lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, + struct fc_frame_header *fc_hdr, bool aborted) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *ctiocb = NULL; + struct lpfc_nodelist *ndlp; + uint16_t oxid, rxid, xri, lxri; + uint32_t sid, fctl; + union lpfc_wqe128 *icmd; + int rc; + + if (!lpfc_is_link_up(phba)) + return; + + sid = sli4_sid_from_fc_hdr(fc_hdr); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + rxid = be16_to_cpu(fc_hdr->fh_rx_id); + + ndlp = lpfc_findnode_did(vport, sid); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, sid); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "1268 Failed to allocate ndlp for " + "oxid:x%x SID:x%x\n", oxid, sid); + return; + } + /* Put ndlp onto pport node list */ + lpfc_enqueue_node(vport, ndlp); + } + + /* Allocate buffer for rsp iocb */ + ctiocb = lpfc_sli_get_iocbq(phba); + if (!ctiocb) + return; + + icmd = &ctiocb->wqe; + + /* Extract the F_CTL field from FC_HDR */ + fctl = sli4_fctl_from_fc_hdr(fc_hdr); + + ctiocb->ndlp = lpfc_nlp_get(ndlp); + if (!ctiocb->ndlp) { + lpfc_sli_release_iocbq(phba, ctiocb); + return; + } + + ctiocb->vport = phba->pport; + ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; + ctiocb->sli4_lxritag = NO_XRI; + ctiocb->sli4_xritag = NO_XRI; + ctiocb->abort_rctl = FC_RCTL_BA_ACC; + + if (fctl & FC_FC_EX_CTX) + /* Exchange responder sent the abort so we + * own the oxid. + */ + xri = oxid; + else + xri = rxid; + lxri = lpfc_sli4_xri_inrange(phba, xri); + if (lxri != NO_XRI) + lpfc_set_rrq_active(phba, ndlp, lxri, + (xri == oxid) ? rxid : oxid, 0); + /* For BA_ABTS from exchange responder, if the logical xri with + * the oxid maps to the FCP XRI range, the port no longer has + * that exchange context, send a BLS_RJT. Override the IOCB for + * a BA_RJT. + */ + if ((fctl & FC_FC_EX_CTX) && + (lxri > lpfc_sli4_get_iocb_cnt(phba))) { + ctiocb->abort_rctl = FC_RCTL_BA_RJT; + bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); + bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, + FC_BA_RJT_INV_XID); + bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, + FC_BA_RJT_UNABLE); + } + + /* If BA_ABTS failed to abort a partially assembled receive sequence, + * the driver no longer has that exchange, send a BLS_RJT. Override + * the IOCB for a BA_RJT. + */ + if (aborted == false) { + ctiocb->abort_rctl = FC_RCTL_BA_RJT; + bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); + bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, + FC_BA_RJT_INV_XID); + bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, + FC_BA_RJT_UNABLE); + } + + if (fctl & FC_FC_EX_CTX) { + /* ABTS sent by responder to CT exchange, construction + * of BA_ACC will use OX_ID from ABTS for the XRI_TAG + * field and RX_ID from ABTS for RX_ID field. + */ + ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP; + bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid); + } else { + /* ABTS sent by initiator to CT exchange, construction + * of BA_ACC will need to allocate a new XRI as for the + * XRI_TAG field. + */ + ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT; + } + + /* OX_ID is invariable to who sent ABTS to CT exchange */ + bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid); + bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid); + + /* Use CT=VPI */ + bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest, + ndlp->nlp_DID); + bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX); + + /* Xmit CT abts response on exchange */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", + ctiocb->abort_rctl, oxid, phba->link_state); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2925 Failed to issue CT ABTS RSP x%x on " + "xri x%x, Data x%x\n", + ctiocb->abort_rctl, oxid, + phba->link_state); + lpfc_nlp_put(ndlp); + ctiocb->ndlp = NULL; + lpfc_sli_release_iocbq(phba, ctiocb); + } +} + +/** + * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event + * @vport: Pointer to the vport on which this sequence was received + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function handles an SLI-4 unsolicited abort event. If the unsolicited + * receive sequence is only partially assembed by the driver, it shall abort + * the partially assembled frames for the sequence. Otherwise, if the + * unsolicited receive sequence has been completely assembled and passed to + * the Upper Layer Protocol (ULP), it then mark the per oxid status for the + * unsolicited sequence has been aborted. After that, it will issue a basic + * accept to accept the abort. + **/ +static void +lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf) +{ + struct lpfc_hba *phba = vport->phba; + struct fc_frame_header fc_hdr; + uint32_t fctl; + bool aborted; + + /* Make a copy of fc_hdr before the dmabuf being released */ + memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); + fctl = sli4_fctl_from_fc_hdr(&fc_hdr); + + if (fctl & FC_FC_EX_CTX) { + /* ABTS by responder to exchange, no cleanup needed */ + aborted = true; + } else { + /* ABTS by initiator to exchange, need to do cleanup */ + aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); + if (aborted == false) + aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); + } + lpfc_in_buf_free(phba, &dmabuf->dbuf); + + if (phba->nvmet_support) { + lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); + return; + } + + /* Respond with BA_ACC or BA_RJT accordingly */ + lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); +} + +/** + * lpfc_seq_complete - Indicates if a sequence is complete + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function checks the sequence, starting with the frame described by + * @dmabuf, to see if all the frames associated with this sequence are present. + * the frames associated with this sequence are linked to the @dmabuf using the + * dbuf list. This function looks for two major things. 1) That the first frame + * has a sequence count of zero. 2) There is a frame with last frame of sequence + * set. 3) That there are no holes in the sequence count. The function will + * return 1 when the sequence is complete, otherwise it will return 0. + **/ +static int +lpfc_seq_complete(struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *hdr; + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *seq_dmabuf; + uint32_t fctl; + int seq_count = 0; + + hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* make sure first fame of sequence has a sequence count of zero */ + if (hdr->fh_seq_cnt != seq_count) + return 0; + fctl = (hdr->fh_f_ctl[0] << 16 | + hdr->fh_f_ctl[1] << 8 | + hdr->fh_f_ctl[2]); + /* If last frame of sequence we can return success. */ + if (fctl & FC_FC_END_SEQ) + return 1; + list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { + seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + /* If there is a hole in the sequence count then fail. */ + if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) + return 0; + fctl = (hdr->fh_f_ctl[0] << 16 | + hdr->fh_f_ctl[1] << 8 | + hdr->fh_f_ctl[2]); + /* If last frame of sequence we can return success. */ + if (fctl & FC_FC_END_SEQ) + return 1; + } + return 0; +} + +/** + * lpfc_prep_seq - Prep sequence for ULP processing + * @vport: Pointer to the vport on which this sequence was received + * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function takes a sequence, described by a list of frames, and creates + * a list of iocbq structures to describe the sequence. This iocbq list will be + * used to issue to the generic unsolicited sequence handler. This routine + * returns a pointer to the first iocbq in the list. If the function is unable + * to allocate an iocbq then it throw out the received frames that were not + * able to be described and return a pointer to the first iocbq. If unable to + * allocate any iocbqs (including the first) this function will return NULL. + **/ +static struct lpfc_iocbq * +lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) +{ + struct hbq_dmabuf *hbq_buf; + struct lpfc_dmabuf *d_buf, *n_buf; + struct lpfc_iocbq *first_iocbq, *iocbq; + struct fc_frame_header *fc_hdr; + uint32_t sid; + uint32_t len, tot_len; + + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + /* remove from receive buffer list */ + list_del_init(&seq_dmabuf->hbuf.list); + lpfc_update_rcv_time_stamp(vport); + /* get the Remote Port's SID */ + sid = sli4_sid_from_fc_hdr(fc_hdr); + tot_len = 0; + /* Get an iocbq struct to fill in. */ + first_iocbq = lpfc_sli_get_iocbq(vport->phba); + if (first_iocbq) { + /* Initialize the first IOCB. */ + first_iocbq->wcqe_cmpl.total_data_placed = 0; + bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl, + IOSTAT_SUCCESS); + first_iocbq->vport = vport; + + /* Check FC Header to see what TYPE of frame we are rcv'ing */ + if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { + bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp, + sli4_did_from_fc_hdr(fc_hdr)); + } + + bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com, + NO_XRI); + bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com, + be16_to_cpu(fc_hdr->fh_ox_id)); + + /* put the first buffer into the first iocb */ + tot_len = bf_get(lpfc_rcqe_length, + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + + first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf; + first_iocbq->bpl_dmabuf = NULL; + /* Keep track of the BDE count */ + first_iocbq->wcqe_cmpl.word3 = 1; + + if (tot_len > LPFC_DATA_BUF_SIZE) + first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + else + first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len; + + first_iocbq->wcqe_cmpl.total_data_placed = tot_len; + bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest, + sid); + } + iocbq = first_iocbq; + /* + * Each IOCBq can have two Buffers assigned, so go through the list + * of buffers for this sequence and save two buffers in each IOCBq + */ + list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { + if (!iocbq) { + lpfc_in_buf_free(vport->phba, d_buf); + continue; + } + if (!iocbq->bpl_dmabuf) { + iocbq->bpl_dmabuf = d_buf; + iocbq->wcqe_cmpl.word3++; + /* We need to get the size out of the right CQE */ + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + len = bf_get(lpfc_rcqe_length, + &hbq_buf->cq_event.cqe.rcqe_cmpl); + iocbq->unsol_rcv_len = len; + iocbq->wcqe_cmpl.total_data_placed += len; + tot_len += len; + } else { + iocbq = lpfc_sli_get_iocbq(vport->phba); + if (!iocbq) { + if (first_iocbq) { + bf_set(lpfc_wcqe_c_status, + &first_iocbq->wcqe_cmpl, + IOSTAT_SUCCESS); + first_iocbq->wcqe_cmpl.parameter = + IOERR_NO_RESOURCES; + } + lpfc_in_buf_free(vport->phba, d_buf); + continue; + } + /* We need to get the size out of the right CQE */ + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + len = bf_get(lpfc_rcqe_length, + &hbq_buf->cq_event.cqe.rcqe_cmpl); + iocbq->cmd_dmabuf = d_buf; + iocbq->bpl_dmabuf = NULL; + iocbq->wcqe_cmpl.word3 = 1; + + if (len > LPFC_DATA_BUF_SIZE) + iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + else + iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = + len; + + tot_len += len; + iocbq->wcqe_cmpl.total_data_placed = tot_len; + bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest, + sid); + list_add_tail(&iocbq->list, &first_iocbq->list); + } + } + /* Free the sequence's header buffer */ + if (!first_iocbq) + lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); + + return first_iocbq; +} + +static void +lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, + struct hbq_dmabuf *seq_dmabuf) +{ + struct fc_frame_header *fc_hdr; + struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; + struct lpfc_hba *phba = vport->phba; + + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + iocbq = lpfc_prep_seq(vport, seq_dmabuf); + if (!iocbq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2707 Ring %d handler: Failed to allocate " + "iocb Rctl x%x Type x%x received\n", + LPFC_ELS_RING, + fc_hdr->fh_r_ctl, fc_hdr->fh_type); + return; + } + if (!lpfc_complete_unsol_iocb(phba, + phba->sli4_hba.els_wq->pring, + iocbq, fc_hdr->fh_r_ctl, + fc_hdr->fh_type)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2540 Ring %d handler: unexpected Rctl " + "x%x Type x%x received\n", + LPFC_ELS_RING, + fc_hdr->fh_r_ctl, fc_hdr->fh_type); + lpfc_in_buf_free(phba, &seq_dmabuf->dbuf); + } + + /* Free iocb created in lpfc_prep_seq */ + list_for_each_entry_safe(curr_iocb, next_iocb, + &iocbq->list, list) { + list_del_init(&curr_iocb->list); + lpfc_sli_release_iocbq(phba, curr_iocb); + } + lpfc_sli_release_iocbq(phba, iocbq); +} + +static void +lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; + + if (pcmd && pcmd->virt) + dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); + kfree(pcmd); + lpfc_sli_release_iocbq(phba, cmdiocb); + lpfc_drain_txq(phba); +} + +static void +lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *fc_hdr; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *iocbq = NULL; + union lpfc_wqe128 *pwqe; + struct lpfc_dmabuf *pcmd = NULL; + uint32_t frame_len; + int rc; + unsigned long iflags; + + fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); + + /* Send the received frame back */ + iocbq = lpfc_sli_get_iocbq(phba); + if (!iocbq) { + /* Queue cq event and wakeup worker thread to process it */ + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&dmabuf->cq_event.list, + &phba->sli4_hba.sp_queue_event); + phba->hba_flag |= HBA_SP_QUEUE_EVT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_worker_wake_up(phba); + return; + } + + /* Allocate buffer for command payload */ + pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (pcmd) + pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &pcmd->phys); + if (!pcmd || !pcmd->virt) + goto exit; + + INIT_LIST_HEAD(&pcmd->list); + + /* copyin the payload */ + memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); + + iocbq->cmd_dmabuf = pcmd; + iocbq->vport = vport; + iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; + iocbq->cmd_flag |= LPFC_USE_FCPWQIDX; + iocbq->num_bdes = 0; + + pwqe = &iocbq->wqe; + /* fill in BDE's for command */ + pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys); + pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys); + pwqe->gen_req.bde.tus.f.bdeSize = frame_len; + pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + + pwqe->send_frame.frame_len = frame_len; + pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr)); + pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1)); + pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2)); + pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3)); + pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4)); + pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5)); + + pwqe->generic.wqe_com.word7 = 0; + pwqe->generic.wqe_com.word10 = 0; + + bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME); + bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */ + bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */ + bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1); + bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1); + bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1); + bf_set(wqe_xc, &pwqe->generic.wqe_com, 1); + bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA); + bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag); + bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag); + bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3); + pwqe->generic.wqe_com.abort_tag = iocbq->iotag; + + iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl; + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); + if (rc == IOCB_ERROR) + goto exit; + + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + +exit: + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2023 Unable to process MDS loopback frame\n"); + if (pcmd && pcmd->virt) + dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); + kfree(pcmd); + if (iocbq) + lpfc_sli_release_iocbq(phba, iocbq); + lpfc_in_buf_free(phba, &dmabuf->dbuf); +} + +/** + * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware + * @phba: Pointer to HBA context object. + * @dmabuf: Pointer to a dmabuf that describes the FC sequence. + * + * This function is called with no lock held. This function processes all + * the received buffers and gives it to upper layers when a received buffer + * indicates that it is the final frame in the sequence. The interrupt + * service routine processes received buffers at interrupt contexts. + * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the + * appropriate receive function when the final frame in a sequence is received. + **/ +void +lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, + struct hbq_dmabuf *dmabuf) +{ + struct hbq_dmabuf *seq_dmabuf; + struct fc_frame_header *fc_hdr; + struct lpfc_vport *vport; + uint32_t fcfi; + uint32_t did; + + /* Process each received buffer */ + fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + + if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || + fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { + vport = phba->pport; + /* Handle MDS Loopback frames */ + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli4_handle_mds_loopback(vport, dmabuf); + else + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + + /* check to see if this a valid type of frame */ + if (lpfc_fc_frame_check(phba, fc_hdr)) { + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + + if ((bf_get(lpfc_cqe_code, + &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) + fcfi = bf_get(lpfc_rcqe_fcf_id_v1, + &dmabuf->cq_event.cqe.rcqe_cmpl); + else + fcfi = bf_get(lpfc_rcqe_fcf_id, + &dmabuf->cq_event.cqe.rcqe_cmpl); + + if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { + vport = phba->pport; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2023 MDS Loopback %d bytes\n", + bf_get(lpfc_rcqe_length, + &dmabuf->cq_event.cqe.rcqe_cmpl)); + /* Handle MDS Loopback frames */ + lpfc_sli4_handle_mds_loopback(vport, dmabuf); + return; + } + + /* d_id this frame is directed to */ + did = sli4_did_from_fc_hdr(fc_hdr); + + vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); + if (!vport) { + /* throw out the frame */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + + /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ + if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && + (did != Fabric_DID)) { + /* + * Throw out the frame if we are not pt2pt. + * The pt2pt protocol allows for discovery frames + * to be received without a registered VPI. + */ + if (!(vport->fc_flag & FC_PT2PT) || + (phba->link_state == LPFC_HBA_READY)) { + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + } + + /* Handle the basic abort sequence (BA_ABTS) event */ + if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { + lpfc_sli4_handle_unsol_abort(vport, dmabuf); + return; + } + + /* Link this frame */ + seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); + if (!seq_dmabuf) { + /* unable to add frame to vport - throw it out */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + /* If not last frame in sequence continue processing frames. */ + if (!lpfc_seq_complete(seq_dmabuf)) + return; + + /* Send the complete sequence to the upper layer protocol */ + lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); +} + +/** + * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a SLI4_PAGE_SIZE memory region to the port to hold up to + * SLI4_PAGE_SIZE modulo 64 rpi context headers. + * + * This routine does not require any locks. It's usage is expected + * to be driver load or reset recovery when the driver is + * sequential. + * + * Return codes + * 0 - successful + * -EIO - The mailbox failed to complete successfully. + * When this error occurs, the driver is not guaranteed + * to have any rpi regions posted to the device and + * must either attempt to repost the regions or take a + * fatal error. + **/ +int +lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) +{ + struct lpfc_rpi_hdr *rpi_page; + uint32_t rc = 0; + uint16_t lrpi = 0; + + /* SLI4 ports that support extents do not require RPI headers. */ + if (!phba->sli4_hba.rpi_hdrs_in_use) + goto exit; + if (phba->sli4_hba.extents_in_use) + return -EIO; + + list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { + /* + * Assign the rpi headers a physical rpi only if the driver + * has not initialized those resources. A port reset only + * needs the headers posted. + */ + if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != + LPFC_RPI_RSRC_RDY) + rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; + + rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2008 Error %d posting all rpi " + "headers\n", rc); + rc = -EIO; + break; + } + } + + exit: + bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, + LPFC_RPI_RSRC_RDY); + return rc; +} + +/** + * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * @rpi_page: pointer to the rpi memory region. + * + * This routine is invoked to post a single rpi header to the + * HBA consistent with the SLI-4 interface spec. This memory region + * maps up to 64 rpi context regions. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; + uint32_t rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* SLI4 ports that support extents do not require RPI headers. */ + if (!phba->sli4_hba.rpi_hdrs_in_use) + return rc; + if (phba->sli4_hba.extents_in_use) + return -EIO; + + /* The port is notified of the header region via a mailbox command. */ + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2001 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + /* Post all rpi memory regions to the port. */ + hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, + sizeof(struct lpfc_mbx_post_hdr_tmpl) - + sizeof(struct lpfc_sli4_cfg_mhdr), + LPFC_SLI4_MBX_EMBED); + + + /* Post the physical rpi to the port for this rpi header. */ + bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, + rpi_page->start_rpi); + bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, + hdr_tmpl, rpi_page->page_count); + + hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); + hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2514 POST_RPI_HDR mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } else { + /* + * The next_rpi stores the next logical module-64 rpi value used + * to post physical rpis in subsequent rpi postings. + */ + spin_lock_irq(&phba->hbalock); + phba->sli4_hba.next_rpi = rpi_page->next_rpi; + spin_unlock_irq(&phba->hbalock); + } + return rc; +} + +/** + * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a SLI4_PAGE_SIZE memory region to the port to hold up to + * SLI4_PAGE_SIZE modulo 64 rpi context headers. + * + * Returns + * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful + * LPFC_RPI_ALLOC_ERROR if no rpis are available. + **/ +int +lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) +{ + unsigned long rpi; + uint16_t max_rpi, rpi_limit; + uint16_t rpi_remaining, lrpi = 0; + struct lpfc_rpi_hdr *rpi_hdr; + unsigned long iflag; + + /* + * Fetch the next logical rpi. Because this index is logical, + * the driver starts at 0 each time. + */ + spin_lock_irqsave(&phba->hbalock, iflag); + max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; + rpi_limit = phba->sli4_hba.next_rpi; + + rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit); + if (rpi >= rpi_limit) + rpi = LPFC_RPI_ALLOC_ERROR; + else { + set_bit(rpi, phba->sli4_hba.rpi_bmask); + phba->sli4_hba.max_cfg_param.rpi_used++; + phba->sli4_hba.rpi_count++; + } + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0001 Allocated rpi:x%x max:x%x lim:x%x\n", + (int) rpi, max_rpi, rpi_limit); + + /* + * Don't try to allocate more rpi header regions if the device limit + * has been exhausted. + */ + if ((rpi == LPFC_RPI_ALLOC_ERROR) && + (phba->sli4_hba.rpi_count >= max_rpi)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rpi; + } + + /* + * RPI header postings are not required for SLI4 ports capable of + * extents. + */ + if (!phba->sli4_hba.rpi_hdrs_in_use) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rpi; + } + + /* + * If the driver is running low on rpi resources, allocate another + * page now. Note that the next_rpi value is used because + * it represents how many are actually in use whereas max_rpi notes + * how many are supported max by the device. + */ + rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { + rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); + if (!rpi_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2002 Error Could not grow rpi " + "count\n"); + } else { + lrpi = rpi_hdr->start_rpi; + rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; + lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); + } + } + + return rpi; +} + +/** + * __lpfc_sli4_free_rpi - Release an rpi for reuse. + * @phba: pointer to lpfc hba data structure. + * @rpi: rpi to free + * + * This routine is invoked to release an rpi to the pool of + * available rpis maintained by the driver. + **/ +static void +__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) +{ + /* + * if the rpi value indicates a prior unreg has already + * been done, skip the unreg. + */ + if (rpi == LPFC_RPI_ALLOC_ERROR) + return; + + if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { + phba->sli4_hba.rpi_count--; + phba->sli4_hba.max_cfg_param.rpi_used--; + } else { + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "2016 rpi %x not inuse\n", + rpi); + } +} + +/** + * lpfc_sli4_free_rpi - Release an rpi for reuse. + * @phba: pointer to lpfc hba data structure. + * @rpi: rpi to free + * + * This routine is invoked to release an rpi to the pool of + * available rpis maintained by the driver. + **/ +void +lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) +{ + spin_lock_irq(&phba->hbalock); + __lpfc_sli4_free_rpi(phba, rpi); + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_remove_rpis - Remove the rpi bitmask region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the memory region that + * provided rpi via a bitmask. + **/ +void +lpfc_sli4_remove_rpis(struct lpfc_hba *phba) +{ + kfree(phba->sli4_hba.rpi_bmask); + kfree(phba->sli4_hba.rpi_ids); + bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); +} + +/** + * lpfc_sli4_resume_rpi - Remove the rpi bitmask region + * @ndlp: pointer to lpfc nodelist data structure. + * @cmpl: completion call-back. + * @arg: data to load as MBox 'caller buffer information' + * + * This routine is invoked to remove the memory region that + * provided rpi via a bitmask. + **/ +int +lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, + void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_hba *phba = ndlp->phba; + int rc; + + /* The port is notified of the header region via a mailbox command. */ + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* If cmpl assigned, then this nlp_get pairs with + * lpfc_mbx_cmpl_resume_rpi. + * + * Else cmpl is NULL, then this nlp_get pairs with + * lpfc_sli_def_mbox_cmpl. + */ + if (!lpfc_nlp_get(ndlp)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2122 %s: Failed to get nlp ref\n", + __func__); + mempool_free(mboxq, phba->mbox_mem_pool); + return -EIO; + } + + /* Post all rpi memory regions to the port. */ + lpfc_resume_rpi(mboxq, ndlp); + if (cmpl) { + mboxq->mbox_cmpl = cmpl; + mboxq->ctx_buf = arg; + } else + mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mboxq->ctx_ndlp = ndlp; + mboxq->vport = ndlp->vport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2010 Resume RPI Mailbox failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + lpfc_nlp_put(ndlp); + mempool_free(mboxq, phba->mbox_mem_pool); + return -EIO; + } + return 0; +} + +/** + * lpfc_sli4_init_vpi - Initialize a vpi with the port + * @vport: Pointer to the vport for which the vpi is being initialized + * + * This routine is invoked to activate a vpi with the port. + * + * Returns: + * 0 success + * -Evalue otherwise + **/ +int +lpfc_sli4_init_vpi(struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *mboxq; + int rc = 0; + int retval = MBX_SUCCESS; + uint32_t mbox_tmo; + struct lpfc_hba *phba = vport->phba; + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + lpfc_init_vpi(phba, mboxq, vport->vpi); + mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + if (rc != MBX_SUCCESS) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2022 INIT VPI Mailbox failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + retval = -EIO; + } + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, vport->phba->mbox_mem_pool); + + return retval; +} + +/** + * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: Pointer to mailbox object. + * + * This routine is invoked to manually add a single FCF record. The caller + * must pass a completely initialized FCF_Record. This routine takes + * care of the nonembedded mailbox operations. + **/ +static void +lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + void *virt_addr; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + + virt_addr = mboxq->sge_array->addr[0]; + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if ((shdr_status || shdr_add_status) && + (shdr_status != STATUS_FCF_IN_USE)) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2558 ADD_FCF_RECORD mailbox failed with " + "status x%x add_status x%x\n", + shdr_status, shdr_add_status); + + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** + * lpfc_sli4_add_fcf_record - Manually add an FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the initialized fcf record to add. + * + * This routine is invoked to manually add a single FCF record. The caller + * must pass a completely initialized FCF_Record. This routine takes + * care of the nonembedded mailbox operations. + **/ +int +lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) +{ + int rc = 0; + LPFC_MBOXQ_t *mboxq; + uint8_t *bytep; + void *virt_addr; + struct lpfc_mbx_sge sge; + uint32_t alloc_len, req_len; + uint32_t fcfindex; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2009 Failed to allocate mbox for ADD_FCF cmd\n"); + return -ENOMEM; + } + + req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + + sizeof(uint32_t); + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_ADD_FCF, + req_len, LPFC_SLI4_MBX_NEMBED); + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2523 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + + /* + * Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + virt_addr = mboxq->sge_array->addr[0]; + /* + * Configure the FCF record for FCFI 0. This is the driver's + * hardcoded default and gets used in nonFIP mode. + */ + fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); + + /* + * Copy the fcf_index and the FCF Record Data. The data starts after + * the FCoE header plus word10. The data copy needs to be endian + * correct. + */ + bytep += sizeof(uint32_t); + lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2515 ADD_FCF_RECORD mailbox failed with " + "status 0x%x\n", rc); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + rc = -EIO; + } else + rc = 0; + + return rc; +} + +/** + * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the fcf record to write the default data. + * @fcf_index: FCF table entry index. + * + * This routine is invoked to build the driver's default FCF record. The + * values used are hardcoded. This routine handles memory initialization. + * + **/ +void +lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, + struct fcf_record *fcf_record, + uint16_t fcf_index) +{ + memset(fcf_record, 0, sizeof(struct fcf_record)); + fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; + fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; + fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; + bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); + bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); + bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); + bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); + bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); + bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); + bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); + bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); + bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); + bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); + bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); + bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); + bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, + LPFC_FCF_FPMA | LPFC_FCF_SPMA); + /* Set the VLAN bit map */ + if (phba->valid_vlan) { + fcf_record->vlan_bitmap[phba->vlan_id / 8] + = 1 << (phba->vlan_id % 8); + } +} + +/** + * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to scan the entire FCF table by reading FCF + * record and processing it one at a time starting from the @fcf_index + * for initial FCF discovery or fast FCF failover rediscovery. + * + * Return 0 if the mailbox command is submitted successfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; + phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2000 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_scan; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_scan; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; + + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else { + /* Reset eligible FCF count for new scan */ + if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) + phba->fcf.eligible_fcf_cnt = 0; + error = 0; + } +fail_fcf_scan: + if (error) { + if (mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + /* FCF scan failed, clear FCF_TS_INPROG flag */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + } + return error; +} + +/** + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index + * and to use it for FLOGI roundrobin FCF failover. + * + * Return 0 if the mailbox command is submitted successfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2763 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index to + * determine whether it's eligible for FLOGI roundrobin failover list. + * + * Return 0 if the mailbox command is submitted successfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2758 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_check_next_fcf_pri_level + * @phba: pointer to the lpfc_hba struct for this port. + * This routine is called from the lpfc_sli4_fcf_rr_next_index_get + * routine when the rr_bmask is empty. The FCF indecies are put into the + * rr_bmask based on their priority level. Starting from the highest priority + * to the lowest. The most likely FCF candidate will be in the highest + * priority group. When this routine is called it searches the fcf_pri list for + * next lowest priority group and repopulates the rr_bmask with only those + * fcf_indexes. + * returns: + * 1=success 0=failure + **/ +static int +lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) +{ + uint16_t next_fcf_pri; + uint16_t last_index; + struct lpfc_fcf_pri *fcf_pri; + int rc; + int ret = 0; + + last_index = find_first_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3060 Last IDX %d\n", last_index); + + /* Verify the priority list has 2 or more entries */ + spin_lock_irq(&phba->hbalock); + if (list_empty(&phba->fcf.fcf_pri_list) || + list_is_singular(&phba->fcf.fcf_pri_list)) { + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "3061 Last IDX %d\n", last_index); + return 0; /* Empty rr list */ + } + spin_unlock_irq(&phba->hbalock); + + next_fcf_pri = 0; + /* + * Clear the rr_bmask and set all of the bits that are at this + * priority. + */ + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); + spin_lock_irq(&phba->hbalock); + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) + continue; + /* + * the 1st priority that has not FLOGI failed + * will be the highest. + */ + if (!next_fcf_pri) + next_fcf_pri = fcf_pri->fcf_rec.priority; + spin_unlock_irq(&phba->hbalock); + if (fcf_pri->fcf_rec.priority == next_fcf_pri) { + rc = lpfc_sli4_fcf_rr_index_set(phba, + fcf_pri->fcf_rec.fcf_index); + if (rc) + return 0; + } + spin_lock_irq(&phba->hbalock); + } + /* + * if next_fcf_pri was not set above and the list is not empty then + * we have failed flogis on all of them. So reset flogi failed + * and start at the beginning. + */ + if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; + /* + * the 1st priority that has not FLOGI failed + * will be the highest. + */ + if (!next_fcf_pri) + next_fcf_pri = fcf_pri->fcf_rec.priority; + spin_unlock_irq(&phba->hbalock); + if (fcf_pri->fcf_rec.priority == next_fcf_pri) { + rc = lpfc_sli4_fcf_rr_index_set(phba, + fcf_pri->fcf_rec.fcf_index); + if (rc) + return 0; + } + spin_lock_irq(&phba->hbalock); + } + } else + ret = 1; + spin_unlock_irq(&phba->hbalock); + + return ret; +} +/** + * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine is to get the next eligible FCF record index in a round + * robin fashion. If the next eligible FCF record index equals to the + * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) + * shall be returned, otherwise, the next eligible FCF record's index + * shall be returned. + **/ +uint16_t +lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) +{ + uint16_t next_fcf_index; + +initial_priority: + /* Search start from next bit of currently registered FCF index */ + next_fcf_index = phba->fcf.current_rec.fcf_indx; + +next_priority: + /* Determine the next fcf index to check */ + next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX, + next_fcf_index); + + /* Wrap around condition on phba->fcf.fcf_rr_bmask */ + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + /* + * If we have wrapped then we need to clear the bits that + * have been tested so that we can detect when we should + * change the priority level. + */ + next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX); + } + + + /* Check roundrobin failover list empty condition */ + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || + next_fcf_index == phba->fcf.current_rec.fcf_indx) { + /* + * If next fcf index is not found check if there are lower + * Priority level fcf's in the fcf_priority list. + * Set up the rr_bmask with all of the avaiable fcf bits + * at that level and continue the selection process. + */ + if (lpfc_check_next_fcf_pri_level(phba)) + goto initial_priority; + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2844 No roundrobin failover FCF available\n"); + + return LPFC_FCOE_FCF_NEXT_NONE; + } + + if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && + phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & + LPFC_FCF_FLOGI_FAILED) { + if (list_is_singular(&phba->fcf.fcf_pri_list)) + return LPFC_FCOE_FCF_NEXT_NONE; + + goto next_priority; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2845 Get next roundrobin failover FCF (x%x)\n", + next_fcf_index); + + return next_fcf_index; +} + +/** + * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * @fcf_index: index into the FCF table to 'set' + * + * This routine sets the FCF record index in to the eligible bmask for + * roundrobin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before setting the bit. + * + * Returns 0 if the index bit successfully set, otherwise, it returns + * -EINVAL. + **/ +int +lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) +{ + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2610 FCF (x%x) reached driver's book " + "keeping dimension:x%x\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return -EINVAL; + } + /* Set the eligible FCF record index bmask */ + set_bit(fcf_index, phba->fcf.fcf_rr_bmask); + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2790 Set FCF (x%x) to roundrobin FCF failover " + "bmask\n", fcf_index); + + return 0; +} + +/** + * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * @fcf_index: index into the FCF table to 'clear' + * + * This routine clears the FCF record index from the eligible bmask for + * roundrobin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before clearing the bit. + **/ +void +lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) +{ + struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2762 FCF (x%x) reached driver's book " + "keeping dimension:x%x\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return; + } + /* Clear the eligible FCF record index bmask */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, + list) { + if (fcf_pri->fcf_rec.fcf_index == fcf_index) { + list_del_init(&fcf_pri->list); + break; + } + } + spin_unlock_irq(&phba->hbalock); + clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2791 Clear FCF (x%x) from roundrobin failover " + "bmask\n", fcf_index); +} + +/** + * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table + * @phba: pointer to lpfc hba data structure. + * @mbox: An allocated pointer to type LPFC_MBOXQ_t + * + * This routine is the completion routine for the rediscover FCF table mailbox + * command. If the mailbox command returned failure, it will try to stop the + * FCF rediscover wait timer. + **/ +static void +lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; + uint32_t shdr_status, shdr_add_status; + + redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; + + shdr_status = bf_get(lpfc_mbox_hdr_status, + &redisc_fcf->header.cfg_shdr.response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &redisc_fcf->header.cfg_shdr.response); + if (shdr_status || shdr_add_status) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2746 Requesting for FCF rediscovery failed " + "status x%x add_status x%x\n", + shdr_status, shdr_add_status); + if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * CVL event triggered FCF rediscover request failed, + * last resort to re-try current registered FCF entry. + */ + lpfc_retry_pport_discovery(phba); + } else { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * DEAD FCF event triggered FCF rediscover request + * failed, last resort to fail over as a link down + * to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2775 Start FCF rediscover quiescent timer\n"); + /* + * Start FCF rediscovery wait timer for pending FCF + * before rescan FCF record table. + */ + lpfc_fcf_redisc_wait_start_timer(phba); + } + + mempool_free(mbox, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to request for rediscovery of the entire FCF table + * by the port. + **/ +int +lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mbox; + struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; + int rc, length; + + /* Cancel retry delay timers to all vports before FCF rediscover */ + lpfc_cancel_all_vport_retry_delay_timer(phba); + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2745 Failed to allocate mbox for " + "requesting FCF rediscover.\n"); + return -ENOMEM; + } + + length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, + length, LPFC_SLI4_MBX_EMBED); + + redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; + /* Set count to 0 for invalidating the entire FCF database */ + bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); + + /* Issue the mailbox command asynchronously */ + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + return -EIO; + } + return 0; +} + +/** + * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event + * @phba: pointer to lpfc hba data structure. + * + * This function is the failover routine as a last resort to the FCF DEAD + * event when driver failed to perform fast FCF failover. + **/ +void +lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) +{ + uint32_t link_state; + + /* + * Last resort as FCF DEAD event failover will treat this as + * a link down, but save the link state because we don't want + * it to be changed to Link Down unless it is already down. + */ + link_state = phba->link_state; + lpfc_linkdown(phba); + phba->link_state = link_state; + + /* Unregister FCF if no devices connected to it */ + lpfc_unregister_unused_fcf(phba); +} + +/** + * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. + * @phba: pointer to lpfc hba data structure. + * @rgn23_data: pointer to configure region 23 data. + * + * This function gets SLI3 port configure region 23 data through memory dump + * mailbox command. When it successfully retrieves data, the size of the data + * will be returned, otherwise, 0 will be returned. + **/ +static uint32_t +lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) +{ + LPFC_MBOXQ_t *pmb = NULL; + MAILBOX_t *mb; + uint32_t offset = 0; + int rc; + + if (!rgn23_data) + return 0; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2600 failed to allocate mailbox memory\n"); + return 0; + } + mb = &pmb->u.mb; + + do { + lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2601 failed to read config " + "region 23, rc 0x%x Status 0x%x\n", + rc, mb->mbxStatus); + mb->un.varDmp.word_cnt = 0; + } + /* + * dump mem may return a zero when finished or we got a + * mailbox error, either way we are done. + */ + if (mb->un.varDmp.word_cnt == 0) + break; + + if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) + mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; + + lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, + rgn23_data + offset, + mb->un.varDmp.word_cnt); + offset += mb->un.varDmp.word_cnt; + } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); + + mempool_free(pmb, phba->mbox_mem_pool); + return offset; +} + +/** + * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. + * @phba: pointer to lpfc hba data structure. + * @rgn23_data: pointer to configure region 23 data. + * + * This function gets SLI4 port configure region 23 data through memory dump + * mailbox command. When it successfully retrieves data, the size of the data + * will be returned, otherwise, 0 will be returned. + **/ +static uint32_t +lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) +{ + LPFC_MBOXQ_t *mboxq = NULL; + struct lpfc_dmabuf *mp = NULL; + struct lpfc_mqe *mqe; + uint32_t data_length = 0; + int rc; + + if (!rgn23_data) + return 0; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3105 failed to allocate mailbox memory\n"); + return 0; + } + + if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) + goto out; + mqe = &mboxq->u.mqe; + mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc) + goto out; + data_length = mqe->un.mb_words[5]; + if (data_length == 0) + goto out; + if (data_length > DMP_RGN23_SIZE) { + data_length = 0; + goto out; + } + lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); +out: + lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); + return data_length; +} + +/** + * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. + * @phba: pointer to lpfc hba data structure. + * + * This function read region 23 and parse TLV for port status to + * decide if the user disaled the port. If the TLV indicates the + * port is disabled, the hba_flag is set accordingly. + **/ +void +lpfc_sli_read_link_ste(struct lpfc_hba *phba) +{ + uint8_t *rgn23_data = NULL; + uint32_t if_type, data_size, sub_tlv_len, tlv_offset; + uint32_t offset = 0; + + /* Get adapter Region 23 data */ + rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); + if (!rgn23_data) + goto out; + + if (phba->sli_rev < LPFC_SLI_REV4) + data_size = lpfc_sli_get_config_region23(phba, rgn23_data); + else { + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) + goto out; + data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); + } + + if (!data_size) + goto out; + + /* Check the region signature first */ + if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2619 Config region 23 has bad signature\n"); + goto out; + } + offset += 4; + + /* Check the data structure version */ + if (rgn23_data[offset] != LPFC_REGION23_VERSION) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2620 Config region 23 has bad version\n"); + goto out; + } + offset += 4; + + /* Parse TLV entries in the region */ + while (offset < data_size) { + if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) + break; + /* + * If the TLV is not driver specific TLV or driver id is + * not linux driver id, skip the record. + */ + if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || + (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || + (rgn23_data[offset + 3] != 0)) { + offset += rgn23_data[offset + 1] * 4 + 4; + continue; + } + + /* Driver found a driver specific TLV in the config region */ + sub_tlv_len = rgn23_data[offset + 1] * 4; + offset += 4; + tlv_offset = 0; + + /* + * Search for configured port state sub-TLV. + */ + while ((offset < data_size) && + (tlv_offset < sub_tlv_len)) { + if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { + offset += 4; + tlv_offset += 4; + break; + } + if (rgn23_data[offset] != PORT_STE_TYPE) { + offset += rgn23_data[offset + 1] * 4 + 4; + tlv_offset += rgn23_data[offset + 1] * 4 + 4; + continue; + } + + /* This HBA contains PORT_STE configured */ + if (!rgn23_data[offset + 2]) + phba->hba_flag |= LINK_DISABLED; + + goto out; + } + } + +out: + kfree(rgn23_data); + return; +} + +/** + * lpfc_log_fw_write_cmpl - logs firmware write completion status + * @phba: pointer to lpfc hba data structure + * @shdr_status: wr_object rsp's status field + * @shdr_add_status: wr_object rsp's add_status field + * @shdr_add_status_2: wr_object rsp's add_status_2 field + * @shdr_change_status: wr_object rsp's change_status field + * @shdr_csf: wr_object rsp's csf bit + * + * This routine is intended to be called after a firmware write completes. + * It will log next action items to be performed by the user to instantiate + * the newly downloaded firmware or reason for incompatibility. + **/ +static void +lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, + u32 shdr_add_status, u32 shdr_add_status_2, + u32 shdr_change_status, u32 shdr_csf) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "4198 %s: flash_id x%02x, asic_rev x%02x, " + "status x%02x, add_status x%02x, add_status_2 x%02x, " + "change_status x%02x, csf %01x\n", __func__, + phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, + shdr_status, shdr_add_status, shdr_add_status_2, + shdr_change_status, shdr_csf); + + if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { + switch (shdr_add_status_2) { + case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4199 Firmware write failed: " + "image incompatible with flash x%02x\n", + phba->sli4_hba.flash_id); + break; + case LPFC_ADD_STATUS_2_INCORRECT_ASIC: + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4200 Firmware write failed: " + "image incompatible with ASIC " + "architecture x%02x\n", + phba->sli4_hba.asic_rev); + break; + default: + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4210 Firmware write failed: " + "add_status_2 x%02x\n", + shdr_add_status_2); + break; + } + } else if (!shdr_status && !shdr_add_status) { + if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || + shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { + if (shdr_csf) + shdr_change_status = + LPFC_CHANGE_STATUS_PCI_RESET; + } + + switch (shdr_change_status) { + case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3198 Firmware write complete: System " + "reboot required to instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_FW_RESET): + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3199 Firmware write complete: " + "Firmware reset required to " + "instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PORT_MIGRATION): + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3200 Firmware write complete: Port " + "Migration or PCI Reset required to " + "instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PCI_RESET): + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3201 Firmware write complete: PCI " + "Reset required to instantiate\n"); + break; + default: + break; + } + } +} + +/** + * lpfc_wr_object - write an object to the firmware + * @phba: HBA structure that indicates port to create a queue on. + * @dmabuf_list: list of dmabufs to write to the port. + * @size: the total byte value of the objects to write to the port. + * @offset: the current offset to be used to start the transfer. + * + * This routine will create a wr_object mailbox command to send to the port. + * the mailbox command will be constructed using the dma buffers described in + * @dmabuf_list to create a list of BDEs. This routine will fill in as many + * BDEs that the imbedded mailbox can support. The @offset variable will be + * used to indicate the starting offset of the transfer and will also return + * the offset after the write object mailbox has completed. @size is used to + * determine the end of the object and whether the eof bit should be set. + * + * Return 0 is successful and offset will contain the new offset to use + * for the next write. + * Return negative value for error cases. + **/ +int +lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, + uint32_t size, uint32_t *offset) +{ + struct lpfc_mbx_wr_object *wr_object; + LPFC_MBOXQ_t *mbox; + int rc = 0, i = 0; + int mbox_status = 0; + uint32_t shdr_status, shdr_add_status, shdr_add_status_2; + uint32_t shdr_change_status = 0, shdr_csf = 0; + uint32_t mbox_tmo; + struct lpfc_dmabuf *dmabuf; + uint32_t written = 0; + bool check_change_status = false; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_WRITE_OBJECT, + sizeof(struct lpfc_mbx_wr_object) - + sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); + + wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; + wr_object->u.request.write_offset = *offset; + sprintf((uint8_t *)wr_object->u.request.object_name, "/"); + wr_object->u.request.object_name[0] = + cpu_to_le32(wr_object->u.request.object_name[0]); + bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); + list_for_each_entry(dmabuf, dmabuf_list, list) { + if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) + break; + wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); + wr_object->u.request.bde[i].addrHigh = + putPaddrHigh(dmabuf->phys); + if (written + SLI4_PAGE_SIZE >= size) { + wr_object->u.request.bde[i].tus.f.bdeSize = + (size - written); + written += (size - written); + bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); + bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); + check_change_status = true; + } else { + wr_object->u.request.bde[i].tus.f.bdeSize = + SLI4_PAGE_SIZE; + written += SLI4_PAGE_SIZE; + } + i++; + } + wr_object->u.request.bde_count = i; + bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); + if (!phba->sli4_hba.intr_enable) + mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + + /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */ + rc = mbox_status; + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, + &wr_object->header.cfg_shdr.response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &wr_object->header.cfg_shdr.response); + shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, + &wr_object->header.cfg_shdr.response); + if (check_change_status) { + shdr_change_status = bf_get(lpfc_wr_object_change_status, + &wr_object->u.response); + shdr_csf = bf_get(lpfc_wr_object_csf, + &wr_object->u.response); + } + + if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3025 Write Object mailbox failed with " + "status x%x add_status x%x, add_status_2 x%x, " + "mbx status x%x\n", + shdr_status, shdr_add_status, shdr_add_status_2, + rc); + rc = -ENXIO; + *offset = shdr_add_status; + } else { + *offset += wr_object->u.response.actual_write_length; + } + + if (rc || check_change_status) + lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, + shdr_add_status_2, shdr_change_status, + shdr_csf); + + if (!phba->sli4_hba.intr_enable) + mempool_free(mbox, phba->mbox_mem_pool); + else if (mbox_status != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + + return rc; +} + +/** + * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. + * @vport: pointer to vport data structure. + * + * This function iterate through the mailboxq and clean up all REG_LOGIN + * and REG_VPI mailbox commands associated with the vport. This function + * is called when driver want to restart discovery of the vport due to + * a Clear Virtual Link event. + **/ +void +lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mb, *nextmb; + struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *act_mbx_ndlp = NULL; + LIST_HEAD(mbox_cmd_list); + uint8_t restart_loop; + + /* Clean up internally queued mailbox commands with the vport */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { + if (mb->vport != vport) + continue; + + if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && + (mb->u.mb.mbxCommand != MBX_REG_VPI)) + continue; + + list_move_tail(&mb->list, &mbox_cmd_list); + } + /* Clean up active mailbox command with the vport */ + mb = phba->sli.mbox_active; + if (mb && (mb->vport == vport)) { + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || + (mb->u.mb.mbxCommand == MBX_REG_VPI)) + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { + act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; + + /* This reference is local to this routine. The + * reference is removed at routine exit. + */ + act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); + + /* Unregister the RPI when mailbox complete */ + mb->mbox_flag |= LPFC_MBX_IMED_UNREG; + } + } + /* Cleanup any mailbox completions which are not yet processed */ + do { + restart_loop = 0; + list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { + /* + * If this mailox is already processed or it is + * for another vport ignore it. + */ + if ((mb->vport != vport) || + (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) + continue; + + if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && + (mb->u.mb.mbxCommand != MBX_REG_VPI)) + continue; + + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { + ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; + /* Unregister the RPI when mailbox complete */ + mb->mbox_flag |= LPFC_MBX_IMED_UNREG; + restart_loop = 1; + spin_unlock_irq(&phba->hbalock); + spin_lock(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; + spin_unlock(&ndlp->lock); + spin_lock_irq(&phba->hbalock); + break; + } + } + } while (restart_loop); + + spin_unlock_irq(&phba->hbalock); + + /* Release the cleaned-up mailbox commands */ + while (!list_empty(&mbox_cmd_list)) { + list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); + if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { + ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; + mb->ctx_ndlp = NULL; + if (ndlp) { + spin_lock(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; + spin_unlock(&ndlp->lock); + lpfc_nlp_put(ndlp); + } + } + lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED); + } + + /* Release the ndlp with the cleaned-up active mailbox command */ + if (act_mbx_ndlp) { + spin_lock(&act_mbx_ndlp->lock); + act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; + spin_unlock(&act_mbx_ndlp->lock); + lpfc_nlp_put(act_mbx_ndlp); + } +} + +/** + * lpfc_drain_txq - Drain the txq + * @phba: Pointer to HBA context object. + * + * This function attempt to submit IOCBs on the txq + * to the adapter. For SLI4 adapters, the txq contains + * ELS IOCBs that have been deferred because the there + * are no SGLs. This congestion can occur with large + * vport counts during node discovery. + **/ + +uint32_t +lpfc_drain_txq(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *piocbq = NULL; + unsigned long iflags = 0; + char *fail_msg = NULL; + uint32_t txq_cnt = 0; + struct lpfc_queue *wq; + int ret = 0; + + if (phba->link_flag & LS_MDS_LOOPBACK) { + /* MDS WQE are posted only to first WQ*/ + wq = phba->sli4_hba.hdwq[0].io_wq; + if (unlikely(!wq)) + return 0; + pring = wq->pring; + } else { + wq = phba->sli4_hba.els_wq; + if (unlikely(!wq)) + return 0; + pring = lpfc_phba_elsring(phba); + } + + if (unlikely(!pring) || list_empty(&pring->txq)) + return 0; + + spin_lock_irqsave(&pring->ring_lock, iflags); + list_for_each_entry(piocbq, &pring->txq, list) { + txq_cnt++; + } + + if (txq_cnt > pring->txq_max) + pring->txq_max = txq_cnt; + + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + while (!list_empty(&pring->txq)) { + spin_lock_irqsave(&pring->ring_lock, iflags); + + piocbq = lpfc_sli_ringtx_get(phba, pring); + if (!piocbq) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2823 txq empty and txq_cnt is %d\n ", + txq_cnt); + break; + } + txq_cnt--; + + ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0); + + if (ret && ret != IOCB_BUSY) { + fail_msg = " - Cannot send IO "; + piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; + } + if (fail_msg) { + piocbq->cmd_flag |= LPFC_DRIVER_ABORTED; + /* Failed means we can't issue and need to cancel */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2822 IOCB failed %s iotag 0x%x " + "xri 0x%x %d flg x%x\n", + fail_msg, piocbq->iotag, + piocbq->sli4_xritag, ret, + piocbq->cmd_flag); + list_add_tail(&piocbq->list, &completions); + fail_msg = NULL; + } + spin_unlock_irqrestore(&pring->ring_lock, iflags); + if (txq_cnt == 0 || ret == IOCB_BUSY) + break; + } + /* Cancel all the IOCBs that cannot be issued */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + + return txq_cnt; +} + +/** + * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. + * @phba: Pointer to HBA context object. + * @pwqeq: Pointer to command WQE. + * @sglq: Pointer to the scatter gather queue object. + * + * This routine converts the bpl or bde that is in the WQE + * to a sgl list for the sli4 hardware. The physical address + * of the bpl/bde is converted back to a virtual address. + * If the WQE contains a BPL then the list of BDE's is + * converted to sli4_sge's. If the WQE contains a single + * BDE then it is converted to a single sli_sge. + * The WQE is still in cpu endianness so the contents of + * the bpl can be used without byte swapping. + * + * Returns valid XRI = Success, NO_XRI = Failure. + */ +static uint16_t +lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, + struct lpfc_sglq *sglq) +{ + uint16_t xritag = NO_XRI; + struct ulp_bde64 *bpl = NULL; + struct ulp_bde64 bde; + struct sli4_sge *sgl = NULL; + struct lpfc_dmabuf *dmabuf; + union lpfc_wqe128 *wqe; + int numBdes = 0; + int i = 0; + uint32_t offset = 0; /* accumulated offset in the sg request list */ + int inbound = 0; /* number of sg reply entries inbound from firmware */ + uint32_t cmd; + + if (!pwqeq || !sglq) + return xritag; + + sgl = (struct sli4_sge *)sglq->sgl; + wqe = &pwqeq->wqe; + pwqeq->iocb.ulpIoTag = pwqeq->iotag; + + cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); + if (cmd == CMD_XMIT_BLS_RSP64_WQE) + return sglq->sli4_xritag; + numBdes = pwqeq->num_bdes; + if (numBdes) { + /* The addrHigh and addrLow fields within the WQE + * have not been byteswapped yet so there is no + * need to swap them back. + */ + if (pwqeq->bpl_dmabuf) + dmabuf = pwqeq->bpl_dmabuf; + else + return xritag; + + bpl = (struct ulp_bde64 *)dmabuf->virt; + if (!bpl) + return xritag; + + for (i = 0; i < numBdes; i++) { + /* Should already be byte swapped. */ + sgl->addr_hi = bpl->addrHigh; + sgl->addr_lo = bpl->addrLow; + + sgl->word2 = le32_to_cpu(sgl->word2); + if ((i+1) == numBdes) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + /* swap the size field back to the cpu so we + * can assign it to the sgl. + */ + bde.tus.w = le32_to_cpu(bpl->tus.w); + sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); + /* The offsets in the sgl need to be accumulated + * separately for the request and reply lists. + * The request is always first, the reply follows. + */ + switch (cmd) { + case CMD_GEN_REQUEST64_WQE: + /* add up the reply sg entries */ + if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) + inbound++; + /* first inbound? reset the offset */ + if (inbound == 1) + offset = 0; + bf_set(lpfc_sli4_sge_offset, sgl, offset); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + offset += bde.tus.f.bdeSize; + break; + case CMD_FCP_TRSP64_WQE: + bf_set(lpfc_sli4_sge_offset, sgl, 0); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + break; + case CMD_FCP_TSEND64_WQE: + case CMD_FCP_TRECEIVE64_WQE: + bf_set(lpfc_sli4_sge_type, sgl, + bpl->tus.f.bdeFlags); + if (i < 3) + offset = 0; + else + offset += bde.tus.f.bdeSize; + bf_set(lpfc_sli4_sge_offset, sgl, offset); + break; + } + sgl->word2 = cpu_to_le32(sgl->word2); + bpl++; + sgl++; + } + } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { + /* The addrHigh and addrLow fields of the BDE have not + * been byteswapped yet so they need to be swapped + * before putting them in the sgl. + */ + sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); + sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); + } + return sglq->sli4_xritag; +} + +/** + * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) + * @phba: Pointer to HBA context object. + * @qp: Pointer to HDW queue. + * @pwqe: Pointer to command WQE. + **/ +int +lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, + struct lpfc_iocbq *pwqe) +{ + union lpfc_wqe128 *wqe = &pwqe->wqe; + struct lpfc_async_xchg_ctx *ctxp; + struct lpfc_queue *wq; + struct lpfc_sglq *sglq; + struct lpfc_sli_ring *pring; + unsigned long iflags; + uint32_t ret = 0; + + /* NVME_LS and NVME_LS ABTS requests. */ + if (pwqe->cmd_flag & LPFC_IO_NVME_LS) { + pring = phba->sli4_hba.nvmels_wq->pring; + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, + qp, wq_access); + sglq = __lpfc_sli_get_els_sglq(phba, pwqe); + if (!sglq) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_BUSY; + } + pwqe->sli4_lxritag = sglq->sli4_lxritag; + pwqe->sli4_xritag = sglq->sli4_xritag; + if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, + pwqe->sli4_xritag); + ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); + if (ret) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return ret; + } + + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq); + return 0; + } + + /* NVME_FCREQ and NVME_ABTS requests */ + if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { + /* Get the IO distribution (hba_wqidx) for WQ assignment. */ + wq = qp->io_wq; + pring = wq->pring; + + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); + + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, + qp, wq_access); + ret = lpfc_sli4_wq_put(wq, wqe); + if (ret) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return ret; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq); + return 0; + } + + /* NVMET requests */ + if (pwqe->cmd_flag & LPFC_IO_NVMET) { + /* Get the IO distribution (hba_wqidx) for WQ assignment. */ + wq = qp->io_wq; + pring = wq->pring; + + ctxp = pwqe->context_un.axchg; + sglq = ctxp->ctxbuf->sglq; + if (pwqe->sli4_xritag == NO_XRI) { + pwqe->sli4_lxritag = sglq->sli4_lxritag; + pwqe->sli4_xritag = sglq->sli4_xritag; + } + bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, + pwqe->sli4_xritag); + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); + + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, + qp, wq_access); + ret = lpfc_sli4_wq_put(wq, wqe); + if (ret) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return ret; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq); + return 0; + } + return WQE_ERROR; +} + +/** + * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to driver command iocb object. + * @cmpl: completion function. + * + * Fill the appropriate fields for the abort WQE and call + * internal routine lpfc_sli4_issue_wqe to send the WQE + * This function is called with hbalock held and no ring_lock held. + * + * RETURNS 0 - SUCCESS + **/ + +int +lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + void *cmpl) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_iocbq *abtsiocb = NULL; + union lpfc_wqe128 *abtswqe; + struct lpfc_io_buf *lpfc_cmd; + int retval = IOCB_ERROR; + u16 xritag = cmdiocb->sli4_xritag; + + /* + * The scsi command can not be in txq and it is in flight because the + * pCmd is still pointing at the SCSI command we have to abort. There + * is no need to search the txcmplq. Just send an abort to the FW. + */ + + abtsiocb = __lpfc_sli_get_iocbq(phba); + if (!abtsiocb) + return WQE_NORESOURCE; + + /* Indicate the IO is being aborted by the driver. */ + cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; + + abtswqe = &abtsiocb->wqe; + memset(abtswqe, 0, sizeof(*abtswqe)); + + if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) + bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1); + bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG); + abtswqe->abort_cmd.rsrvd5 = 0; + abtswqe->abort_cmd.wqe_com.abort_tag = xritag; + bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag); + bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0); + bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocb->hba_wqidx = cmdiocb->hba_wqidx; + abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX; + if (cmdiocb->cmd_flag & LPFC_IO_FCP) + abtsiocb->cmd_flag |= LPFC_IO_FCP; + if (cmdiocb->cmd_flag & LPFC_IO_NVME) + abtsiocb->cmd_flag |= LPFC_IO_NVME; + if (cmdiocb->cmd_flag & LPFC_IO_FOF) + abtsiocb->cmd_flag |= LPFC_IO_FOF; + abtsiocb->vport = vport; + abtsiocb->cmd_cmpl = cmpl; + + lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq); + retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, + "0359 Abort xri x%x, original iotag x%x, " + "abort cmd iotag x%x retval x%x\n", + xritag, cmdiocb->iotag, abtsiocb->iotag, retval); + + if (retval) { + cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; + __lpfc_sli_release_iocbq(phba, abtsiocb); + } + + return retval; +} + +#ifdef LPFC_MXP_STAT +/** + * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * The purpose of this routine is to take a snapshot of pbl, pvt and busy count + * 15 seconds after a test case is running. + * + * The user should call lpfc_debugfs_multixripools_write before running a test + * case to clear stat_snapshot_taken. Then the user starts a test case. During + * test case is running, stat_snapshot_taken is incremented by 1 every time when + * this routine is called from heartbeat timer. When stat_snapshot_taken is + * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. + **/ +void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_pbl_pool *pbl_pool; + u32 txcmplq_cnt; + + qp = &phba->sli4_hba.hdwq[hwqid]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + return; + + if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { + pvt_pool = &qp->p_multixri_pool->pvt_pool; + pbl_pool = &qp->p_multixri_pool->pbl_pool; + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; + + multixri_pool->stat_pbl_count = pbl_pool->count; + multixri_pool->stat_pvt_count = pvt_pool->count; + multixri_pool->stat_busy_count = txcmplq_cnt; + } + + multixri_pool->stat_snapshot_taken++; +} +#endif + +/** + * lpfc_adjust_pvt_pool_count - Adjust private pool count + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * This routine moves some XRIs from private to public pool when private pool + * is not busy. + **/ +void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_multixri_pool *multixri_pool; + u32 io_req_count; + u32 prev_io_req_count; + + multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; + if (!multixri_pool) + return; + io_req_count = multixri_pool->io_req_count; + prev_io_req_count = multixri_pool->prev_io_req_count; + + if (prev_io_req_count != io_req_count) { + /* Private pool is busy */ + multixri_pool->prev_io_req_count = io_req_count; + } else { + /* Private pool is not busy. + * Move XRIs from private to public pool. + */ + lpfc_move_xri_pvt_to_pbl(phba, hwqid); + } +} + +/** + * lpfc_adjust_high_watermark - Adjust high watermark + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * This routine sets high watermark as number of outstanding XRIs, + * but make sure the new value is between xri_limit/2 and xri_limit. + **/ +void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) +{ + u32 new_watermark; + u32 watermark_max; + u32 watermark_min; + u32 xri_limit; + u32 txcmplq_cnt; + u32 abts_io_bufs; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_sli4_hdw_queue *qp; + + qp = &phba->sli4_hba.hdwq[hwqid]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + return; + xri_limit = multixri_pool->xri_limit; + + watermark_max = xri_limit; + watermark_min = xri_limit / 2; + + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; + abts_io_bufs = qp->abts_scsi_io_bufs; + abts_io_bufs += qp->abts_nvme_io_bufs; + + new_watermark = txcmplq_cnt + abts_io_bufs; + new_watermark = min(watermark_max, new_watermark); + new_watermark = max(watermark_min, new_watermark); + multixri_pool->pvt_pool.high_watermark = new_watermark; + +#ifdef LPFC_MXP_STAT + multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, + new_watermark); +#endif +} + +/** + * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * This routine is called from hearbeat timer when pvt_pool is idle. + * All free XRIs are moved from private to public pool on hwqid with 2 steps. + * The first step moves (all - low_watermark) amount of XRIs. + * The second step moves the rest of XRIs. + **/ +void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct list_head tmp_list; + u32 tmp_count; + + qp = &phba->sli4_hba.hdwq[hwqid]; + pbl_pool = &qp->p_multixri_pool->pbl_pool; + pvt_pool = &qp->p_multixri_pool->pvt_pool; + tmp_count = 0; + + lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); + lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); + + if (pvt_pool->count > pvt_pool->low_watermark) { + /* Step 1: move (all - low_watermark) from pvt_pool + * to pbl_pool + */ + + /* Move low watermark of bufs from pvt_pool to tmp_list */ + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pvt_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, &tmp_list); + tmp_count++; + if (tmp_count >= pvt_pool->low_watermark) + break; + } + + /* Move all bufs from pvt_pool to pbl_pool */ + list_splice_init(&pvt_pool->list, &pbl_pool->list); + + /* Move all bufs from tmp_list to pvt_pool */ + list_splice(&tmp_list, &pvt_pool->list); + + pbl_pool->count += (pvt_pool->count - tmp_count); + pvt_pool->count = tmp_count; + } else { + /* Step 2: move the rest from pvt_pool to pbl_pool */ + list_splice_init(&pvt_pool->list, &pbl_pool->list); + pbl_pool->count += pvt_pool->count; + pvt_pool->count = 0; + } + + spin_unlock(&pvt_pool->lock); + spin_unlock_irqrestore(&pbl_pool->lock, iflag); +} + +/** + * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool + * @phba: pointer to lpfc hba data structure + * @qp: pointer to HDW queue + * @pbl_pool: specified public free XRI pool + * @pvt_pool: specified private free XRI pool + * @count: number of XRIs to move + * + * This routine tries to move some free common bufs from the specified pbl_pool + * to the specified pvt_pool. It might move less than count XRIs if there's not + * enough in public pool. + * + * Return: + * true - if XRIs are successfully moved from the specified pbl_pool to the + * specified pvt_pool + * false - if the specified pbl_pool is empty or locked by someone else + **/ +static bool +_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, + struct lpfc_pbl_pool *pbl_pool, + struct lpfc_pvt_pool *pvt_pool, u32 count) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + int ret; + + ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); + if (ret) { + if (pbl_pool->count) { + /* Move a batch of XRIs from public to private pool */ + lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); + list_for_each_entry_safe(lpfc_ncmd, + lpfc_ncmd_next, + &pbl_pool->list, + list) { + list_move_tail(&lpfc_ncmd->list, + &pvt_pool->list); + pvt_pool->count++; + pbl_pool->count--; + count--; + if (count == 0) + break; + } + + spin_unlock(&pvt_pool->lock); + spin_unlock_irqrestore(&pbl_pool->lock, iflag); + return true; + } + spin_unlock_irqrestore(&pbl_pool->lock, iflag); + } + + return false; +} + +/** + * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * @count: number of XRIs to move + * + * This routine tries to find some free common bufs in one of public pools with + * Round Robin method. The search always starts from local hwqid, then the next + * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, + * a batch of free common bufs are moved to private pool on hwqid. + * It might move less than count XRIs if there's not enough in public pool. + **/ +void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) +{ + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_multixri_pool *next_multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_sli4_hdw_queue *qp; + u32 next_hwqid; + u32 hwq_count; + int ret; + + qp = &phba->sli4_hba.hdwq[hwqid]; + multixri_pool = qp->p_multixri_pool; + pvt_pool = &multixri_pool->pvt_pool; + pbl_pool = &multixri_pool->pbl_pool; + + /* Check if local pbl_pool is available */ + ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); + if (ret) { +#ifdef LPFC_MXP_STAT + multixri_pool->local_pbl_hit_count++; +#endif + return; + } + + hwq_count = phba->cfg_hdw_queue; + + /* Get the next hwqid which was found last time */ + next_hwqid = multixri_pool->rrb_next_hwqid; + + do { + /* Go to next hwq */ + next_hwqid = (next_hwqid + 1) % hwq_count; + + next_multixri_pool = + phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; + pbl_pool = &next_multixri_pool->pbl_pool; + + /* Check if the public free xri pool is available */ + ret = _lpfc_move_xri_pbl_to_pvt( + phba, qp, pbl_pool, pvt_pool, count); + + /* Exit while-loop if success or all hwqid are checked */ + } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); + + /* Starting point for the next time */ + multixri_pool->rrb_next_hwqid = next_hwqid; + + if (!ret) { + /* stats: all public pools are empty*/ + multixri_pool->pbl_empty_count++; + } + +#ifdef LPFC_MXP_STAT + if (ret) { + if (next_hwqid == hwqid) + multixri_pool->local_pbl_hit_count++; + else + multixri_pool->other_pbl_hit_count++; + } +#endif +} + +/** + * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than + * low watermark. + **/ +void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + + multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; + pvt_pool = &multixri_pool->pvt_pool; + + if (pvt_pool->count < pvt_pool->low_watermark) + lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); +} + +/** + * lpfc_release_io_buf - Return one IO buf back to free pool + * @phba: pointer to lpfc hba data structure. + * @lpfc_ncmd: IO buf to be returned. + * @qp: belong to which HWQ. + * + * This routine returns one IO buf back to free pool. If this is an urgent IO, + * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, + * the IO buf is returned to pbl_pool or pvt_pool based on watermark and + * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to + * lpfc_io_buf_list_put. + **/ +void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, + struct lpfc_sli4_hdw_queue *qp) +{ + unsigned long iflag; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_epd_pool *epd_pool; + u32 txcmplq_cnt; + u32 xri_owned; + u32 xri_limit; + u32 abts_io_bufs; + + /* MUST zero fields if buffer is reused by another protocol */ + lpfc_ncmd->nvmeCmd = NULL; + lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL; + + if (phba->cfg_xpsgl && !phba->nvmet_support && + !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + + if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); + + if (phba->cfg_xri_rebalancing) { + if (lpfc_ncmd->expedite) { + /* Return to expedite pool */ + epd_pool = &phba->epd_pool; + spin_lock_irqsave(&epd_pool->lock, iflag); + list_add_tail(&lpfc_ncmd->list, &epd_pool->list); + epd_pool->count++; + spin_unlock_irqrestore(&epd_pool->lock, iflag); + return; + } + + /* Avoid invalid access if an IO sneaks in and is being rejected + * just _after_ xri pools are destroyed in lpfc_offline. + * Nothing much can be done at this point. + */ + if (!qp->p_multixri_pool) + return; + + pbl_pool = &qp->p_multixri_pool->pbl_pool; + pvt_pool = &qp->p_multixri_pool->pvt_pool; + + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; + abts_io_bufs = qp->abts_scsi_io_bufs; + abts_io_bufs += qp->abts_nvme_io_bufs; + + xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; + xri_limit = qp->p_multixri_pool->xri_limit; + +#ifdef LPFC_MXP_STAT + if (xri_owned <= xri_limit) + qp->p_multixri_pool->below_limit_count++; + else + qp->p_multixri_pool->above_limit_count++; +#endif + + /* XRI goes to either public or private free xri pool + * based on watermark and xri_limit + */ + if ((pvt_pool->count < pvt_pool->low_watermark) || + (xri_owned < xri_limit && + pvt_pool->count < pvt_pool->high_watermark)) { + lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, + qp, free_pvt_pool); + list_add_tail(&lpfc_ncmd->list, + &pvt_pool->list); + pvt_pool->count++; + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + } else { + lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, + qp, free_pub_pool); + list_add_tail(&lpfc_ncmd->list, + &pbl_pool->list); + pbl_pool->count++; + spin_unlock_irqrestore(&pbl_pool->lock, iflag); + } + } else { + lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, + qp, free_xri); + list_add_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, + iflag); + } +} + +/** + * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool + * @phba: pointer to lpfc hba data structure. + * @qp: pointer to HDW queue + * @pvt_pool: pointer to private pool data structure. + * @ndlp: pointer to lpfc nodelist data structure. + * + * This routine tries to get one free IO buf from private pool. + * + * Return: + * pointer to one free IO buf - if private pool is not empty + * NULL - if private pool is empty + **/ +static struct lpfc_io_buf * +lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *qp, + struct lpfc_pvt_pool *pvt_pool, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + + lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pvt_pool->list, list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_ncmd->list); + pvt_pool->count--; + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + return lpfc_ncmd; + } + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + + return NULL; +} + +/** + * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool + * @phba: pointer to lpfc hba data structure. + * + * This routine tries to get one free IO buf from expedite pool. + * + * Return: + * pointer to one free IO buf - if expedite pool is not empty + * NULL - if expedite pool is empty + **/ +static struct lpfc_io_buf * +lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) +{ + struct lpfc_io_buf *lpfc_ncmd = NULL, *iter; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct lpfc_epd_pool *epd_pool; + + epd_pool = &phba->epd_pool; + + spin_lock_irqsave(&epd_pool->lock, iflag); + if (epd_pool->count > 0) { + list_for_each_entry_safe(iter, lpfc_ncmd_next, + &epd_pool->list, list) { + list_del(&iter->list); + epd_pool->count--; + lpfc_ncmd = iter; + break; + } + } + spin_unlock_irqrestore(&epd_pool->lock, iflag); + + return lpfc_ncmd; +} + +/** + * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs + * @phba: pointer to lpfc hba data structure. + * @ndlp: pointer to lpfc nodelist data structure. + * @hwqid: belong to which HWQ + * @expedite: 1 means this request is urgent. + * + * This routine will do the following actions and then return a pointer to + * one free IO buf. + * + * 1. If private free xri count is empty, move some XRIs from public to + * private pool. + * 2. Get one XRI from private free xri pool. + * 3. If we fail to get one from pvt_pool and this is an expedite request, + * get one free xri from expedite pool. + * + * Note: ndlp is only used on SCSI side for RRQ testing. + * The caller should pass NULL for ndlp on NVME side. + * + * Return: + * pointer to one free IO buf - if private pool is not empty + * NULL - if private pool is empty + **/ +static struct lpfc_io_buf * +lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, + int hwqid, int expedite) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_io_buf *lpfc_ncmd; + + qp = &phba->sli4_hba.hdwq[hwqid]; + lpfc_ncmd = NULL; + if (!qp) { + lpfc_printf_log(phba, KERN_INFO, + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, + "5556 NULL qp for hwqid x%x\n", hwqid); + return lpfc_ncmd; + } + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) { + lpfc_printf_log(phba, KERN_INFO, + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, + "5557 NULL multixri for hwqid x%x\n", hwqid); + return lpfc_ncmd; + } + pvt_pool = &multixri_pool->pvt_pool; + if (!pvt_pool) { + lpfc_printf_log(phba, KERN_INFO, + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, + "5558 NULL pvt_pool for hwqid x%x\n", hwqid); + return lpfc_ncmd; + } + multixri_pool->io_req_count++; + + /* If pvt_pool is empty, move some XRIs from public to private pool */ + if (pvt_pool->count == 0) + lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); + + /* Get one XRI from private free xri pool */ + lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); + + if (lpfc_ncmd) { + lpfc_ncmd->hdwq = qp; + lpfc_ncmd->hdwq_no = hwqid; + } else if (expedite) { + /* If we fail to get one from pvt_pool and this is an expedite + * request, get one free xri from expedite pool. + */ + lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); + } + + return lpfc_ncmd; +} + +static inline struct lpfc_io_buf * +lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; + + qp = &phba->sli4_hba.hdwq[idx]; + list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, + &qp->lpfc_io_buf_list_get, list) { + if (lpfc_test_rrq_active(phba, ndlp, + lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + + if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) + continue; + + list_del_init(&lpfc_cmd->list); + qp->get_io_bufs--; + lpfc_cmd->hdwq = qp; + lpfc_cmd->hdwq_no = idx; + return lpfc_cmd; + } + return NULL; +} + +/** + * lpfc_get_io_buf - Get one IO buffer from free pool + * @phba: The HBA for which this call is being executed. + * @ndlp: pointer to lpfc nodelist data structure. + * @hwqid: belong to which HWQ + * @expedite: 1 means this request is urgent. + * + * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, + * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes + * a IO buffer from head of @hdwq io_buf_list and returns to caller. + * + * Note: ndlp is only used on SCSI side for RRQ testing. + * The caller should pass NULL for ndlp on NVME side. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_io_buf - Success + **/ +struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, + u32 hwqid, int expedite) +{ + struct lpfc_sli4_hdw_queue *qp; + unsigned long iflag; + struct lpfc_io_buf *lpfc_cmd; + + qp = &phba->sli4_hba.hdwq[hwqid]; + lpfc_cmd = NULL; + if (!qp) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, + "5555 NULL qp for hwqid x%x\n", hwqid); + return lpfc_cmd; + } + + if (phba->cfg_xri_rebalancing) + lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( + phba, ndlp, hwqid, expedite); + else { + lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, + qp, alloc_xri_get); + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) + lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); + if (!lpfc_cmd) { + lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, + qp, alloc_xri_put); + list_splice(&qp->lpfc_io_buf_list_put, + &qp->lpfc_io_buf_list_get); + qp->get_io_bufs += qp->put_io_bufs; + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + qp->put_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || + expedite) + lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); + } + spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); + } + + return lpfc_cmd; +} + +/** + * lpfc_read_object - Retrieve object data from HBA + * @phba: The HBA for which this call is being executed. + * @rdobject: Pathname of object data we want to read. + * @datap: Pointer to where data will be copied to. + * @datasz: size of data area + * + * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. + * The data will be truncated if datasz is not large enough. + * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. + * Returns the actual bytes read from the object. + */ +int +lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap, + uint32_t datasz) +{ + struct lpfc_mbx_read_object *read_object; + LPFC_MBOXQ_t *mbox; + int rc, length, eof, j, byte_cnt = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_dmabuf *pcmd; + u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_read_object) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_READ_OBJECT, + length, LPFC_SLI4_MBX_EMBED); + read_object = &mbox->u.mqe.un.read_object; + shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr; + + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); + bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz); + read_object->u.request.rd_object_offset = 0; + read_object->u.request.rd_object_cnt = 1; + + memset((void *)read_object->u.request.rd_object_name, 0, + LPFC_OBJ_NAME_SZ); + scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject); + for (j = 0; j < strlen(rdobject); j++) + read_object->u.request.rd_object_name[j] = + cpu_to_le32(rd_object_name[j]); + + pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); + if (pcmd) + pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); + if (!pcmd || !pcmd->virt) { + kfree(pcmd); + mempool_free(mbox, phba->mbox_mem_pool); + return -ENOMEM; + } + memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); + read_object->u.request.rd_object_hbuf[0].pa_lo = + putPaddrLow(pcmd->phys); + read_object->u.request.rd_object_hbuf[0].pa_hi = + putPaddrHigh(pcmd->phys); + read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE; + + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_ndlp = NULL; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if (shdr_status == STATUS_FAILED && + shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "4674 No port cfg file in FW.\n"); + byte_cnt = -ENOENT; + } else if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "2625 READ_OBJECT mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + byte_cnt = -ENXIO; + } else { + /* Success */ + length = read_object->u.response.rd_object_actual_rlen; + eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response); + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, + "2626 READ_OBJECT Success len %d:%d, EOF %d\n", + length, datasz, eof); + + /* Detect the port config file exists but is empty */ + if (!length && eof) { + byte_cnt = 0; + goto exit; + } + + byte_cnt = length; + lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt); + } + + exit: + /* This is an embedded SLI4 mailbox with an external buffer allocated. + * Free the pcmd and then cleanup with the correct routine. + */ + lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); + kfree(pcmd); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return byte_cnt; +} + +/** + * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure to append the SGL chunk + * + * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, + * and will allocate an SGL chunk if the pool is empty. + * + * Return codes: + * NULL - Error + * Pointer to sli4_hybrid_sgl - Success + **/ +struct sli4_hybrid_sgl * +lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) +{ + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + struct sli4_hybrid_sgl *allocated_sgl = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->sgl_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(buf_list))) { + /* break off 1 chunk from the sgl_list */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, list_node) { + list_move_tail(&list_entry->list_node, + &lpfc_buf->dma_sgl_xtra_list); + break; + } + } else { + /* allocate more */ + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, + cpu_to_node(hdwq->io_wq->chann)); + if (!tmp) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8353 error kmalloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + return NULL; + } + + tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, + GFP_ATOMIC, &tmp->dma_phys_sgl); + if (!tmp->dma_sgl) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8354 error pool_alloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + kfree(tmp); + return NULL; + } + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); + } + + allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, + struct sli4_hybrid_sgl, + list_node); + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + + return allocated_sgl; +} + +/** + * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure with the SGL chunk + * + * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. + * + * Return codes: + * 0 - Success + * -EINVAL - Error + **/ +int +lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) +{ + int rc = 0; + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->sgl_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { + list_for_each_entry_safe(list_entry, tmp, + &lpfc_buf->dma_sgl_xtra_list, + list_node) { + list_move_tail(&list_entry->list_node, + buf_list); + } + } else { + rc = -EINVAL; + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + return rc; +} + +/** + * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool + * @phba: phba object + * @hdwq: hdwq to cleanup sgl buff resources on + * + * This routine frees all SGL chunks of hdwq SGL chunk pool. + * + * Return codes: + * None + **/ +void +lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq) +{ + struct list_head *buf_list = &hdwq->sgl_list; + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + /* Free sgl pool */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, list_node) { + list_del(&list_entry->list_node); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + list_entry->dma_sgl, + list_entry->dma_phys_sgl); + kfree(list_entry); + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); +} + +/** + * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer + * + * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, + * and will allocate an CMD/RSP buffer if the pool is empty. + * + * Return codes: + * NULL - Error + * Pointer to fcp_cmd_rsp_buf - Success + **/ +struct fcp_cmd_rsp_buf * +lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_buf) +{ + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + struct fcp_cmd_rsp_buf *allocated_buf = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(buf_list))) { + /* break off 1 chunk from the list */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, + list_node) { + list_move_tail(&list_entry->list_node, + &lpfc_buf->dma_cmd_rsp_list); + break; + } + } else { + /* allocate more */ + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, + cpu_to_node(hdwq->io_wq->chann)); + if (!tmp) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8355 error kmalloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + return NULL; + } + + tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool, + GFP_ATOMIC, + &tmp->fcp_cmd_rsp_dma_handle); + + if (!tmp->fcp_cmnd) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8356 error pool_alloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + kfree(tmp); + return NULL; + } + + tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + + sizeof(struct fcp_cmnd)); + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); + } + + allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, + struct fcp_cmd_rsp_buf, + list_node); + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + + return allocated_buf; +} + +/** + * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure with the CMD/RSP buf + * + * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. + * + * Return codes: + * 0 - Success + * -EINVAL - Error + **/ +int +lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_buf) +{ + int rc = 0; + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { + list_for_each_entry_safe(list_entry, tmp, + &lpfc_buf->dma_cmd_rsp_list, + list_node) { + list_move_tail(&list_entry->list_node, + buf_list); + } + } else { + rc = -EINVAL; + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + return rc; +} + +/** + * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool + * @phba: phba object + * @hdwq: hdwq to cleanup cmd rsp buff resources on + * + * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. + * + * Return codes: + * None + **/ +void +lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq) +{ + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + /* Free cmd_rsp buf pool */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, + list_node) { + list_del(&list_entry->list_node); + dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, + list_entry->fcp_cmnd, + list_entry->fcp_cmd_rsp_dma_handle); + kfree(list_entry); + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); +} + +/** + * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted + * @phba: phba object + * @job: job entry of the command to be posted. + * + * Fill the common fields of the wqe for each of the command. + * + * Return codes: + * None + **/ +void +lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) +{ + u8 cmnd; + u32 *pcmd; + u32 if_type = 0; + u32 fip, abort_tag; + struct lpfc_nodelist *ndlp = NULL; + union lpfc_wqe128 *wqe = &job->wqe; + u8 command_type = ELS_COMMAND_NON_FIP; + + fip = phba->hba_flag & HBA_FIP_SUPPORT; + /* The fcp commands will set command type */ + if (job->cmd_flag & LPFC_IO_FCP) + command_type = FCP_COMMAND; + else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK)) + command_type = ELS_COMMAND_FIP; + else + command_type = ELS_COMMAND_NON_FIP; + + abort_tag = job->iotag; + cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com); + + switch (cmnd) { + case CMD_ELS_REQUEST64_WQE: + ndlp = job->ndlp; + + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { + pcmd = (u32 *)job->cmd_dmabuf->virt; + if (pcmd && (*pcmd == ELS_CMD_FLOGI || + *pcmd == ELS_CMD_SCR || + *pcmd == ELS_CMD_RDF || + *pcmd == ELS_CMD_EDC || + *pcmd == ELS_CMD_RSCN_XMT || + *pcmd == ELS_CMD_FDISC || + *pcmd == ELS_CMD_LOGO || + *pcmd == ELS_CMD_QFPA || + *pcmd == ELS_CMD_UVEM || + *pcmd == ELS_CMD_PLOGI)) { + bf_set(els_req64_sp, &wqe->els_req, 1); + bf_set(els_req64_sid, &wqe->els_req, + job->vport->fc_myDID); + + if ((*pcmd == ELS_CMD_FLOGI) && + !(phba->fc_topology == + LPFC_TOPOLOGY_LOOP)) + bf_set(els_req64_sid, &wqe->els_req, 0); + + bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + phba->vpi_ids[job->vport->vpi]); + } else if (pcmd) { + bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + } + } + + bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + + bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); + break; + case CMD_XMIT_ELS_RSP64_WQE: + ndlp = job->ndlp; + + /* word4 */ + wqe->xmit_els_rsp.word4 = 0; + + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { + if (job->vport->fc_flag & FC_PT2PT) { + bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, + job->vport->fc_myDID); + if (job->vport->fc_myDID == Fabric_DID) { + bf_set(wqe_els_did, + &wqe->xmit_els_rsp.wqe_dest, 0); + } + } + } + + bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, + LPFC_WQE_LENLOC_WORD3); + bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); + + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, + job->vport->fc_myDID); + bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + + if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com)) + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, + phba->vpi_ids[job->vport->vpi]); + } + command_type = OTHER_COMMAND; + break; + case CMD_GEN_REQUEST64_WQE: + /* Word 10 */ + bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + command_type = OTHER_COMMAND; + break; + case CMD_XMIT_SEQUENCE64_WQE: + if (phba->link_flag & LS_LOOPBACK_MODE) + bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); + + wqe->xmit_sequence.rsvd3 = 0; + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); + command_type = OTHER_COMMAND; + break; + case CMD_XMIT_BLS_RSP64_WQE: + bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); + bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); + bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); + bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, + phba->vpi_ids[phba->pport->vpi]); + bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, + LPFC_WQE_LENLOC_NONE); + /* Overwrite the pre-set comnd type with OTHER_COMMAND */ + command_type = OTHER_COMMAND; + break; + case CMD_FCP_ICMND64_WQE: /* task mgmt commands */ + case CMD_ABORT_XRI_WQE: /* abort iotag */ + case CMD_SEND_FRAME: /* mds loopback */ + /* cases already formatted for sli4 wqe - no chgs necessary */ + return; + default: + dump_stack(); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6207 Invalid command 0x%x\n", + cmnd); + break; + } + + wqe->generic.wqe_com.abort_tag = abort_tag; + bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag); + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); +} diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h new file mode 100644 index 000000000..cd33dfec7 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -0,0 +1,483 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) +#define CONFIG_SCSI_LPFC_DEBUG_FS +#endif + +/* forward declaration for LPFC_IOCB_t's use */ +struct lpfc_hba; +struct lpfc_vport; + +/* Define the context types that SLI handles for abort and sums. */ +typedef enum _lpfc_ctx_cmd { + LPFC_CTX_LUN, + LPFC_CTX_TGT, + LPFC_CTX_HOST +} lpfc_ctx_cmd; + +/* Enumeration to describe the thread lock context. */ +enum lpfc_mbox_ctx { + MBOX_THD_UNLOCKED, + MBOX_THD_LOCKED +}; + +union lpfc_vmid_tag { + uint32_t app_id; + uint8_t cs_ctl_vmid; + struct lpfc_vmid_context *vmid_context; /* UVEM context information */ +}; + +struct lpfc_cq_event { + struct list_head list; + uint16_t hdwq; + union { + struct lpfc_mcqe mcqe_cmpl; + struct lpfc_acqe_link acqe_link; + struct lpfc_acqe_fip acqe_fip; + struct lpfc_acqe_dcbx acqe_dcbx; + struct lpfc_acqe_grp5 acqe_grp5; + struct lpfc_acqe_fc_la acqe_fc; + struct lpfc_acqe_sli acqe_sli; + struct lpfc_rcqe rcqe_cmpl; + struct sli4_wcqe_xri_aborted wcqe_axri; + struct lpfc_wcqe_complete wcqe_cmpl; + } cqe; +}; + +/* This structure is used to handle IOCB requests / responses */ +struct lpfc_iocbq { + /* lpfc_iocbqs are used in double linked lists */ + struct list_head list; + struct list_head clist; + struct list_head dlist; + uint16_t iotag; /* pre-assigned IO tag */ + uint16_t sli4_lxritag; /* logical pre-assigned XRI. */ + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + uint16_t hba_wqidx; /* index to HBA work queue */ + struct lpfc_cq_event cq_event; + uint64_t isr_timestamp; + + union lpfc_wqe128 wqe; /* SLI-4 */ + IOCB_t iocb; /* SLI-3 */ + struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */ + + u32 unsol_rcv_len; /* Receive len in usol path */ + + /* Pack the u8's together and make them module-4. */ + u8 num_bdes; /* Number of BDEs */ + u8 abort_bls; /* ABTS by initiator or responder */ + u8 abort_rctl; /* ACC or RJT flag */ + u8 priority; /* OAS priority */ + u8 retry; /* retry counter for IOCB cmd - if needed */ + u8 rsvd1; /* Pad for u32 */ + u8 rsvd2; /* Pad for u32 */ + u8 rsvd3; /* Pad for u32 */ + + u32 cmd_flag; +#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ +#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ +#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ +#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ +#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ +#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ +#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ +#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ +#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ +#define DSS_SECURITY_OP 0x100 /* security IO */ +#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */ +#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ +#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ +#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ +#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ + +#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ +#define LPFC_FIP_ELS_ID_SHIFT 14 + +#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ +#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */ +#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */ +#define LPFC_PRLI_NVME_REQ 0x80000 /* This is an NVME PRLI. */ +#define LPFC_PRLI_FCP_REQ 0x100000 /* This is an NVME PRLI. */ +#define LPFC_IO_NVME 0x200000 /* NVME FCP command */ +#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */ +#define LPFC_IO_NVMET 0x800000 /* NVMET command */ +#define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */ +#define LPFC_IO_CMF 0x4000000 /* CMF command */ + + uint32_t drvrTimeout; /* driver timeout in seconds */ + struct lpfc_vport *vport;/* virtual port pointer */ + struct lpfc_dmabuf *cmd_dmabuf; + struct lpfc_dmabuf *rsp_dmabuf; + struct lpfc_dmabuf *bpl_dmabuf; + uint32_t event_tag; /* LA Event tag */ + union { + wait_queue_head_t *wait_queue; + struct lpfcMboxq *mbox; + struct lpfc_node_rrq *rrq; + struct nvmefc_ls_req *nvme_lsreq; + struct lpfc_async_xchg_ctx *axchg; + struct bsg_job_data *dd_data; + } context_un; + + struct lpfc_io_buf *io_buf; + struct lpfc_iocbq *rsp_iocb; + struct lpfc_nodelist *ndlp; + union lpfc_vmid_tag vmid_tag; + void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd, + struct lpfc_iocbq *rsp); + void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd, + struct lpfc_iocbq *rsp); + void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd, + struct lpfc_iocbq *rsp); +}; + +#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ + +#define IOCB_SUCCESS 0 +#define IOCB_BUSY 1 +#define IOCB_ERROR 2 +#define IOCB_TIMEDOUT 3 +#define IOCB_ABORTED 4 +#define IOCB_ABORTING 5 +#define IOCB_NORESOURCE 6 + +#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */ + +#define WQE_SUCCESS 0 +#define WQE_BUSY 1 +#define WQE_ERROR 2 +#define WQE_TIMEDOUT 3 +#define WQE_ABORTED 4 +#define WQE_ABORTING 5 +#define WQE_NORESOURCE 6 + +#define LPFC_MBX_WAKE 1 +#define LPFC_MBX_IMED_UNREG 2 + +typedef struct lpfcMboxq { + /* MBOXQs are used in single linked lists */ + struct list_head list; /* ptr to next mailbox command */ + union { + MAILBOX_t mb; /* Mailbox cmd */ + struct lpfc_mqe mqe; + } u; + struct lpfc_vport *vport; /* virtual port pointer */ + void *ctx_ndlp; /* caller ndlp information */ + void *ctx_buf; /* caller buffer information */ + void *context3; + + void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); + uint8_t mbox_flag; + uint16_t in_ext_byte_len; + uint16_t out_ext_byte_len; + uint8_t mbox_offset_word; + struct lpfc_mcqe mcqe; + struct lpfc_mbx_nembed_sge_virt *sge_array; +} LPFC_MBOXQ_t; + +#define MBX_POLL 1 /* poll mailbox till command done, then + return */ +#define MBX_NOWAIT 2 /* issue command then return immediately */ + +#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per + ring */ +#define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver. + For SLI4, an additional ring for each + FCP WQ will be allocated. */ + +struct lpfc_sli_ring; + +struct lpfc_sli_ring_mask { + uint8_t profile; /* profile associated with ring */ + uint8_t rctl; /* rctl / type pair configured for ring */ + uint8_t type; /* rctl / type pair configured for ring */ + uint8_t rsvd; + /* rcv'd unsol event */ + void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *, + struct lpfc_sli_ring *, + struct lpfc_iocbq *); +}; + + +/* Structure used to hold SLI statistical counters and info */ +struct lpfc_sli_ring_stat { + uint64_t iocb_event; /* IOCB event counters */ + uint64_t iocb_cmd; /* IOCB cmd issued */ + uint64_t iocb_rsp; /* IOCB rsp received */ + uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */ + uint64_t iocb_cmd_full; /* IOCB cmd ring full */ + uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */ + uint64_t iocb_rsp_full; /* IOCB rsp ring full */ +}; + +struct lpfc_sli3_ring { + uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ + uint32_t next_cmdidx; /* next_cmd index */ + uint32_t rspidx; /* current index in response ring */ + uint32_t cmdidx; /* current index in command ring */ + uint16_t numCiocb; /* number of command iocb's per ring */ + uint16_t numRiocb; /* number of rsp iocb's per ring */ + uint16_t sizeCiocb; /* Size of command iocb's in this ring */ + uint16_t sizeRiocb; /* Size of response iocb's in this ring */ + uint32_t *cmdringaddr; /* virtual address for cmd rings */ + uint32_t *rspringaddr; /* virtual address for rsp rings */ +}; + +struct lpfc_sli4_ring { + struct lpfc_queue *wqp; /* Pointer to associated WQ */ +}; + + +/* Structure used to hold SLI ring information */ +struct lpfc_sli_ring { + uint16_t flag; /* ring flags */ +#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */ +#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */ +#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ + uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ + + uint8_t rsvd; + uint8_t ringno; /* ring number */ + + spinlock_t ring_lock; /* lock for issuing commands */ + + uint32_t fast_iotag; /* max fastlookup based iotag */ + uint32_t iotag_ctr; /* keeps track of the next iotag to use */ + uint32_t iotag_max; /* max iotag value to use */ + struct list_head txq; + uint16_t txq_cnt; /* current length of queue */ + uint16_t txq_max; /* max length */ + struct list_head txcmplq; + uint16_t txcmplq_cnt; /* current length of queue */ + uint16_t txcmplq_max; /* max length */ + uint32_t missbufcnt; /* keep track of buffers to post */ + struct list_head postbufq; + uint16_t postbufq_cnt; /* current length of queue */ + uint16_t postbufq_max; /* max length */ + struct list_head iocb_continueq; + uint16_t iocb_continueq_cnt; /* current length of queue */ + uint16_t iocb_continueq_max; /* max length */ + struct list_head iocb_continue_saveq; + + struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK]; + uint32_t num_mask; /* number of mask entries in prt array */ + void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *, + struct lpfc_sli_ring *, struct lpfc_iocbq *); + + struct lpfc_sli_ring_stat stats; /* SLI statistical info */ + + /* cmd ring available */ + void (*lpfc_sli_cmd_available) (struct lpfc_hba *, + struct lpfc_sli_ring *); + union { + struct lpfc_sli3_ring sli3; + struct lpfc_sli4_ring sli4; + } sli; +}; + +/* Structure used for configuring rings to a specific profile or rctl / type */ +struct lpfc_hbq_init { + uint32_t rn; /* Receive buffer notification */ + uint32_t entry_count; /* max # of entries in HBQ */ + uint32_t headerLen; /* 0 if not profile 4 or 5 */ + uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */ + uint32_t profile; /* Selection profile 0=all, 7=logentry */ + uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001, + * ring2=b0100 */ + uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */ + + uint32_t seqlenoff; + uint32_t maxlen; + uint32_t seqlenbcnt; + uint32_t cmdcodeoff; + uint32_t cmdmatch[8]; + uint32_t mask_count; /* number of mask entries in prt array */ + struct hbq_mask hbqMasks[6]; + + /* Non-config rings fields to keep track of buffer allocations */ + uint32_t buffer_count; /* number of buffers allocated */ + uint32_t init_count; /* number to allocate when initialized */ + uint32_t add_count; /* number to allocate when starved */ +} ; + +/* Structure used to hold SLI statistical counters and info */ +struct lpfc_sli_stat { + uint64_t mbox_stat_err; /* Mbox cmds completed status error */ + uint64_t mbox_cmd; /* Mailbox commands issued */ + uint64_t sli_intr; /* Count of Host Attention interrupts */ + uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */ + uint64_t sli_ips; /* Host Attention interrupts per sec */ + uint32_t err_attn_event; /* Error Attn event counters */ + uint32_t link_event; /* Link event counters */ + uint32_t mbox_event; /* Mailbox event counters */ + uint32_t mbox_busy; /* Mailbox cmd busy */ +}; + +/* Structure to store link status values when port stats are reset */ +struct lpfc_lnk_stat { + uint32_t link_failure_count; + uint32_t loss_of_sync_count; + uint32_t loss_of_signal_count; + uint32_t prim_seq_protocol_err_count; + uint32_t invalid_tx_word_count; + uint32_t invalid_crc_count; + uint32_t error_frames; + uint32_t link_events; +}; + +/* Structure used to hold SLI information */ +struct lpfc_sli { + uint32_t num_rings; + uint32_t sli_flag; + + /* Additional sli_flags */ +#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ +#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */ +#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ +#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ +#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ +#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */ +#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */ +#define LPFC_QUEUE_FREE_INIT 0x10000 /* Queue freeing is in progress */ +#define LPFC_QUEUE_FREE_WAIT 0x20000 /* Hold Queue free as it is being + * used outside worker thread + */ + + struct lpfc_sli_ring *sli3_ring; + + struct lpfc_sli_stat slistat; /* SLI statistical info */ + struct list_head mboxq; + uint16_t mboxq_cnt; /* current length of queue */ + uint16_t mboxq_max; /* max length */ + LPFC_MBOXQ_t *mbox_active; /* active mboxq information */ + struct list_head mboxq_cmpl; + + struct timer_list mbox_tmo; /* Hold clk to timeout active mbox + cmd */ + +#define LPFC_IOCBQ_LOOKUP_INCREMENT 1024 + struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ + size_t iocbq_lookup_len; /* current lengs of the array */ + uint16_t last_iotag; /* last allocated IOTAG */ + time64_t stats_start; /* in seconds */ + struct lpfc_lnk_stat lnk_stat_offsets; +}; + +/* Timeout for normal outstanding mbox command (Seconds) */ +#define LPFC_MBOX_TMO 30 +/* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */ +#define LPFC_MBOX_SLI4_CONFIG_TMO 60 +/* Timeout for flash-based outstanding sli_config mbox command (Seconds) */ +#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300 +/* Timeout for other flash-based outstanding mbox command (Seconds) */ +#define LPFC_MBOX_TMO_FLASH_CMD 300 + +struct lpfc_io_buf { + /* Common fields */ + struct list_head list; + void *data; + + dma_addr_t dma_handle; + dma_addr_t dma_phys_sgl; + + struct sli4_sge *dma_sgl; /* initial segment chunk */ + + /* linked list of extra sli4_hybrid_sge */ + struct list_head dma_sgl_xtra_list; + + /* list head for fcp_cmd_rsp buf */ + struct list_head dma_cmd_rsp_list; + + struct lpfc_iocbq cur_iocbq; + struct lpfc_sli4_hdw_queue *hdwq; + uint16_t hdwq_no; + uint16_t cpu; + + struct lpfc_nodelist *ndlp; + uint32_t timeout; + uint16_t flags; +#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ +#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ + /* External DIF device IO conversions */ +#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */ +#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */ +#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */ + uint16_t status; /* From IOCB Word 7- ulpStatus */ + uint32_t result; /* From IOCB Word 4. */ + + uint32_t seg_cnt; /* Number of scatter-gather segments returned by + * dma_map_sg. The driver needs this for calls + * to dma_unmap_sg. + */ + unsigned long start_time; + spinlock_t buf_lock; /* lock used in case of simultaneous abort */ + bool expedite; /* this is an expedite io_buf */ + + union { + /* SCSI specific fields */ + struct { + struct scsi_cmnd *pCmd; + struct lpfc_rport_data *rdata; + uint32_t prot_seg_cnt; /* seg_cnt's counterpart for + * protection data + */ + + /* + * data and dma_handle are the kernel virtual and bus + * address of the dma-able buffer containing the + * fcp_cmd, fcp_rsp and a scatter gather bde list that + * supports the sg_tablesize value. + */ + struct fcp_cmnd *fcp_cmnd; + struct fcp_rsp *fcp_rsp; + + wait_queue_head_t *waitq; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + /* Used to restore any changes to protection data for + * error injection + */ + void *prot_data_segment; + uint32_t prot_data; + uint32_t prot_data_type; +#define LPFC_INJERR_REFTAG 1 +#define LPFC_INJERR_APPTAG 2 +#define LPFC_INJERR_GUARD 3 +#endif + }; + + /* NVME specific fields */ + struct { + struct nvmefc_fcp_req *nvmeCmd; + uint16_t qidx; + }; + }; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t ts_cmd_start; + uint64_t ts_last_cmd; + uint64_t ts_cmd_wqput; + uint64_t ts_isr_cmpl; + uint64_t ts_data_io; +#endif + uint64_t rx_cmd_start; +}; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h new file mode 100644 index 000000000..2541a8fba --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -0,0 +1,1199 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2009-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include + +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) +#define CONFIG_SCSI_LPFC_DEBUG_FS +#endif + +#define LPFC_ACTIVE_MBOX_WAIT_CNT 100 +#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 +#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 +#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 +#define LPFC_RPI_LOW_WATER_MARK 10 + +#define LPFC_UNREG_FCF 1 +#define LPFC_SKIP_UNREG_FCF 0 + +/* Amount of time in seconds for waiting FCF rediscovery to complete */ +#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ + +/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ +#define LPFC_NEMBED_MBOX_SGL_CNT 254 + +/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ +#define LPFC_HBA_HDWQ_MIN 0 +#define LPFC_HBA_HDWQ_MAX 256 +#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN + +/* irq_chann range, values */ +#define LPFC_IRQ_CHANN_MIN 0 +#define LPFC_IRQ_CHANN_MAX 256 +#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN + +/* FCP MQ queue count limiting */ +#define LPFC_FCP_MQ_THRESHOLD_MIN 0 +#define LPFC_FCP_MQ_THRESHOLD_MAX 256 +#define LPFC_FCP_MQ_THRESHOLD_DEF 8 + +/* + * Provide the default FCF Record attributes used by the driver + * when nonFIP mode is configured and there is no other default + * FCF Record attributes. + */ +#define LPFC_FCOE_FCF_DEF_INDEX 0 +#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF +#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF + +#define LPFC_FCOE_NULL_VID 0xFFF +#define LPFC_FCOE_IGNORE_VID 0xFFFF + +/* First 3 bytes of default FCF MAC is specified by FC_MAP */ +#define LPFC_FCOE_FCF_MAC3 0xFF +#define LPFC_FCOE_FCF_MAC4 0xFF +#define LPFC_FCOE_FCF_MAC5 0xFE +#define LPFC_FCOE_FCF_MAP0 0x0E +#define LPFC_FCOE_FCF_MAP1 0xFC +#define LPFC_FCOE_FCF_MAP2 0x00 +#define LPFC_FCOE_MAX_RCV_SIZE 0x800 +#define LPFC_FCOE_FKA_ADV_PER 0 +#define LPFC_FCOE_FIP_PRIORITY 0x80 + +#define sli4_sid_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_s_id[0] << 16 | \ + (fc_hdr)->fh_s_id[1] << 8 | \ + (fc_hdr)->fh_s_id[2]) + +#define sli4_did_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_d_id[0] << 16 | \ + (fc_hdr)->fh_d_id[1] << 8 | \ + (fc_hdr)->fh_d_id[2]) + +#define sli4_fctl_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_f_ctl[0] << 16 | \ + (fc_hdr)->fh_f_ctl[1] << 8 | \ + (fc_hdr)->fh_f_ctl[2]) + +#define sli4_type_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_type) + +#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000 + +#define INT_FW_UPGRADE 0 +#define RUN_FW_UPGRADE 1 + +enum lpfc_sli4_queue_type { + LPFC_EQ, + LPFC_GCQ, + LPFC_MCQ, + LPFC_WCQ, + LPFC_RCQ, + LPFC_MQ, + LPFC_WQ, + LPFC_HRQ, + LPFC_DRQ +}; + +/* The queue sub-type defines the functional purpose of the queue */ +enum lpfc_sli4_queue_subtype { + LPFC_NONE, + LPFC_MBOX, + LPFC_IO, + LPFC_ELS, + LPFC_NVMET, + LPFC_NVME_LS, + LPFC_USOL +}; + +/* RQ buffer list */ +struct lpfc_rqb { + uint16_t entry_count; /* Current number of RQ slots */ + uint16_t buffer_count; /* Current number of buffers posted */ + struct list_head rqb_buffer_list; /* buffers assigned to this HBQ */ + /* Callback for HBQ buffer allocation */ + struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *); + /* Callback for HBQ buffer free */ + void (*rqb_free_buffer)(struct lpfc_hba *, + struct rqb_dmabuf *); +}; + +enum lpfc_poll_mode { + LPFC_QUEUE_WORK, + LPFC_THREADED_IRQ, +}; + +struct lpfc_idle_stat { + u64 prev_idle; + u64 prev_wall; +}; + +struct lpfc_queue { + struct list_head list; + struct list_head wq_list; + + /* + * If interrupts are in effect on _all_ the eq's the footprint + * of polling code is zero (except mode). This memory is chec- + * ked for every io to see if the io needs to be polled and + * while completion to check if the eq's needs to be rearmed. + * Keep in same cacheline as the queue ptr to avoid cpu fetch + * stalls. Using 1B memory will leave us with 7B hole. Fill + * it with other frequently used members. + */ + uint16_t last_cpu; /* most recent cpu */ + uint16_t hdwq; + uint8_t qe_valid; + uint8_t mode; /* interrupt or polling */ +#define LPFC_EQ_INTERRUPT 0 +#define LPFC_EQ_POLL 1 + + struct list_head wqfull_list; + enum lpfc_sli4_queue_type type; + enum lpfc_sli4_queue_subtype subtype; + struct lpfc_hba *phba; + struct list_head child_list; + struct list_head page_list; + struct list_head sgl_list; + struct list_head cpu_list; + uint32_t entry_count; /* Number of entries to support on the queue */ + uint32_t entry_size; /* Size of each queue entry. */ + uint32_t entry_cnt_per_pg; + uint32_t notify_interval; /* Queue Notification Interval + * For chip->host queues (EQ, CQ, RQ): + * specifies the interval (number of + * entries) where the doorbell is rung to + * notify the chip of entry consumption. + * For host->chip queues (WQ): + * specifies the interval (number of + * entries) where consumption CQE is + * requested to indicate WQ entries + * consumed by the chip. + * Not used on an MQ. + */ +#define LPFC_EQ_NOTIFY_INTRVL 16 +#define LPFC_CQ_NOTIFY_INTRVL 16 +#define LPFC_WQ_NOTIFY_INTRVL 16 +#define LPFC_RQ_NOTIFY_INTRVL 16 + uint32_t max_proc_limit; /* Queue Processing Limit + * For chip->host queues (EQ, CQ): + * specifies the maximum number of + * entries to be consumed in one + * processing iteration sequence. Queue + * will be rearmed after each iteration. + * Not used on an MQ, RQ or WQ. + */ +#define LPFC_EQ_MAX_PROC_LIMIT 256 +#define LPFC_CQ_MIN_PROC_LIMIT 64 +#define LPFC_CQ_MAX_PROC_LIMIT LPFC_CQE_EXP_COUNT // 4096 +#define LPFC_CQ_DEF_MAX_PROC_LIMIT LPFC_CQE_DEF_COUNT // 1024 +#define LPFC_CQ_MIN_THRESHOLD_TO_POLL 64 +#define LPFC_CQ_MAX_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT +#define LPFC_CQ_DEF_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT + uint32_t queue_claimed; /* indicates queue is being processed */ + uint32_t queue_id; /* Queue ID assigned by the hardware */ + uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ + uint32_t host_index; /* The host's index for putting or getting */ + uint32_t hba_index; /* The last known hba index for get or put */ + uint32_t q_mode; + + struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ + struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ + + uint16_t page_count; /* Number of pages allocated for this queue */ + uint16_t page_size; /* size of page allocated for this queue */ +#define LPFC_EXPANDED_PAGE_SIZE 16384 +#define LPFC_DEFAULT_PAGE_SIZE 4096 + uint16_t chann; /* Hardware Queue association WQ/CQ */ + /* CPU affinity for EQ */ +#define LPFC_FIND_BY_EQ 0 +#define LPFC_FIND_BY_HDWQ 1 + uint8_t db_format; +#define LPFC_DB_RING_FORMAT 0x01 +#define LPFC_DB_LIST_FORMAT 0x02 + uint8_t q_flag; +#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */ +#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */ +#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */ +#define LPFC_NVMET_CQ_NOTIFY 4 + void __iomem *db_regaddr; + uint16_t dpp_enable; + uint16_t dpp_id; + void __iomem *dpp_regaddr; + + /* For q stats */ + uint32_t q_cnt_1; + uint32_t q_cnt_2; + uint32_t q_cnt_3; + uint64_t q_cnt_4; +/* defines for EQ stats */ +#define EQ_max_eqe q_cnt_1 +#define EQ_no_entry q_cnt_2 +#define EQ_cqe_cnt q_cnt_3 +#define EQ_processed q_cnt_4 + +/* defines for CQ stats */ +#define CQ_mbox q_cnt_1 +#define CQ_max_cqe q_cnt_1 +#define CQ_release_wqe q_cnt_2 +#define CQ_xri_aborted q_cnt_3 +#define CQ_wq q_cnt_4 + +/* defines for WQ stats */ +#define WQ_overflow q_cnt_1 +#define WQ_posted q_cnt_4 + +/* defines for RQ stats */ +#define RQ_no_posted_buf q_cnt_1 +#define RQ_no_buf_found q_cnt_2 +#define RQ_buf_posted q_cnt_3 +#define RQ_rcv_buf q_cnt_4 + + struct work_struct irqwork; + struct work_struct spwork; + struct delayed_work sched_irqwork; + struct delayed_work sched_spwork; + + uint64_t isr_timestamp; + struct lpfc_queue *assoc_qp; + struct list_head _poll_list; + void **q_pgs; /* array to index entries per page */ + + enum lpfc_poll_mode poll_mode; +}; + +struct lpfc_sli4_link { + uint32_t speed; + uint8_t duplex; + uint8_t status; + uint8_t type; + uint8_t number; + uint8_t fault; + uint8_t link_status; + uint16_t topology; + uint32_t logical_speed; +}; + +struct lpfc_fcf_rec { + uint8_t fabric_name[8]; + uint8_t switch_name[8]; + uint8_t mac_addr[6]; + uint16_t fcf_indx; + uint32_t priority; + uint16_t vlan_id; + uint32_t addr_mode; + uint32_t flag; +#define BOOT_ENABLE 0x01 +#define RECORD_VALID 0x02 +}; + +struct lpfc_fcf_pri_rec { + uint16_t fcf_index; +#define LPFC_FCF_ON_PRI_LIST 0x0001 +#define LPFC_FCF_FLOGI_FAILED 0x0002 + uint16_t flag; + uint32_t priority; +}; + +struct lpfc_fcf_pri { + struct list_head list; + struct lpfc_fcf_pri_rec fcf_rec; +}; + +/* + * Maximum FCF table index, it is for driver internal book keeping, it + * just needs to be no less than the supported HBA's FCF table size. + */ +#define LPFC_SLI4_FCF_TBL_INDX_MAX 32 + +struct lpfc_fcf { + uint16_t fcfi; + uint32_t fcf_flag; +#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ +#define FCF_REGISTERED 0x02 /* FCF registered with FW */ +#define FCF_SCAN_DONE 0x04 /* FCF table scan done */ +#define FCF_IN_USE 0x08 /* Atleast one discovery completed */ +#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */ +#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */ +#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */ +#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC) +#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ +#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ +#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ +#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) + uint16_t fcf_redisc_attempted; + uint32_t addr_mode; + uint32_t eligible_fcf_cnt; + struct lpfc_fcf_rec current_rec; + struct lpfc_fcf_rec failover_rec; + struct list_head fcf_pri_list; + struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX]; + uint32_t current_fcf_scan_pri; + struct timer_list redisc_wait; + unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ +}; + + +#define LPFC_REGION23_SIGNATURE "RG23" +#define LPFC_REGION23_VERSION 1 +#define LPFC_REGION23_LAST_REC 0xff +#define DRIVER_SPECIFIC_TYPE 0xA2 +#define LINUX_DRIVER_ID 0x20 +#define PORT_STE_TYPE 0x1 + +struct lpfc_fip_param_hdr { + uint8_t type; +#define FCOE_PARAM_TYPE 0xA0 + uint8_t length; +#define FCOE_PARAM_LENGTH 2 + uint8_t parm_version; +#define FIPP_VERSION 0x01 + uint8_t parm_flags; +#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 +#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 +#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags +#define FIPP_MODE_ON 0x1 +#define FIPP_MODE_OFF 0x0 +#define FIPP_VLAN_VALID 0x1 +}; + +struct lpfc_fcoe_params { + uint8_t fc_map[3]; + uint8_t reserved1; + uint16_t vlan_tag; + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_hdr { + uint8_t type; +#define FCOE_CONN_TBL_TYPE 0xA1 + uint8_t length; /* words */ + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_rec { + uint16_t flags; +#define FCFCNCT_VALID 0x0001 +#define FCFCNCT_BOOT 0x0002 +#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */ +#define FCFCNCT_FBNM_VALID 0x0008 +#define FCFCNCT_SWNM_VALID 0x0010 +#define FCFCNCT_VLAN_VALID 0x0020 +#define FCFCNCT_AM_VALID 0x0040 +#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */ +#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */ + + uint16_t vlan_tag; + uint8_t fabric_name[8]; + uint8_t switch_name[8]; +}; + +struct lpfc_fcf_conn_entry { + struct list_head list; + struct lpfc_fcf_conn_rec conn_rec; +}; + +/* + * Define the host's bootstrap mailbox. This structure contains + * the member attributes needed to create, use, and destroy the + * bootstrap mailbox region. + * + * The macro definitions for the bmbx data structure are defined + * in lpfc_hw4.h with the register definition. + */ +struct lpfc_bmbx { + struct lpfc_dmabuf *dmabuf; + struct dma_address dma_address; + void *avirt; + dma_addr_t aphys; + uint32_t bmbx_size; +}; + +#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4 + +#define LPFC_EQE_SIZE_4B 4 +#define LPFC_EQE_SIZE_16B 16 +#define LPFC_CQE_SIZE 16 +#define LPFC_WQE_SIZE 64 +#define LPFC_WQE128_SIZE 128 +#define LPFC_MQE_SIZE 256 +#define LPFC_RQE_SIZE 8 + +#define LPFC_EQE_DEF_COUNT 1024 +#define LPFC_CQE_DEF_COUNT 1024 +#define LPFC_CQE_EXP_COUNT 4096 +#define LPFC_WQE_DEF_COUNT 256 +#define LPFC_WQE_EXP_COUNT 1024 +#define LPFC_MQE_DEF_COUNT 16 +#define LPFC_RQE_DEF_COUNT 512 + +#define LPFC_QUEUE_NOARM false +#define LPFC_QUEUE_REARM true + + +/* + * SLI4 CT field defines + */ +#define SLI4_CT_RPI 0 +#define SLI4_CT_VPI 1 +#define SLI4_CT_VFI 2 +#define SLI4_CT_FCFI 3 + +/* + * SLI4 specific data structures + */ +struct lpfc_max_cfg_param { + uint16_t max_xri; + uint16_t xri_base; + uint16_t xri_used; + uint16_t max_rpi; + uint16_t rpi_base; + uint16_t rpi_used; + uint16_t max_vpi; + uint16_t vpi_base; + uint16_t vpi_used; + uint16_t max_vfi; + uint16_t vfi_base; + uint16_t vfi_used; + uint16_t max_fcfi; + uint16_t fcfi_used; + uint16_t max_eq; + uint16_t max_rq; + uint16_t max_cq; + uint16_t max_wq; +}; + +struct lpfc_hba; +/* SLI4 HBA multi-fcp queue handler struct */ +#define LPFC_SLI4_HANDLER_NAME_SZ 16 +struct lpfc_hba_eq_hdl { + uint32_t idx; + int irq; + char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; + struct lpfc_hba *phba; + struct lpfc_queue *eq; + struct cpumask aff_mask; +}; + +#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx]) +#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask) +#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq) + +/*BB Credit recovery value*/ +struct lpfc_bbscn_params { + uint32_t word0; +#define lpfc_bbscn_min_SHIFT 0 +#define lpfc_bbscn_min_MASK 0x0000000F +#define lpfc_bbscn_min_WORD word0 +#define lpfc_bbscn_max_SHIFT 4 +#define lpfc_bbscn_max_MASK 0x0000000F +#define lpfc_bbscn_max_WORD word0 +#define lpfc_bbscn_def_SHIFT 8 +#define lpfc_bbscn_def_MASK 0x0000000F +#define lpfc_bbscn_def_WORD word0 +}; + +/* Port Capabilities for SLI4 Parameters */ +struct lpfc_pc_sli4_params { + uint32_t supported; + uint32_t if_type; + uint32_t sli_rev; + uint32_t sli_family; + uint32_t featurelevel_1; + uint32_t featurelevel_2; + uint32_t proto_types; +#define LPFC_SLI4_PROTO_FCOE 0x0000001 +#define LPFC_SLI4_PROTO_FC 0x0000002 +#define LPFC_SLI4_PROTO_NIC 0x0000004 +#define LPFC_SLI4_PROTO_ISCSI 0x0000008 +#define LPFC_SLI4_PROTO_RDMA 0x0000010 + uint32_t sge_supp_len; + uint32_t if_page_sz; + uint32_t rq_db_window; + uint32_t loopbk_scope; + uint32_t oas_supported; + uint32_t eq_pages_max; + uint32_t eqe_size; + uint32_t cq_pages_max; + uint32_t cqe_size; + uint32_t mq_pages_max; + uint32_t mqe_size; + uint32_t mq_elem_cnt; + uint32_t wq_pages_max; + uint32_t wqe_size; + uint32_t rq_pages_max; + uint32_t rqe_size; + uint32_t hdr_pages_max; + uint32_t hdr_size; + uint32_t hdr_pp_align; + uint32_t sgl_pages_max; + uint32_t sgl_pp_align; + uint32_t mib_size; + uint16_t mi_ver; +#define LPFC_MIB1_SUPPORT 1 +#define LPFC_MIB2_SUPPORT 2 +#define LPFC_MIB3_SUPPORT 3 + uint16_t mi_value; +#define LPFC_DFLT_MIB_VAL 2 + uint8_t mi_cap; + uint8_t mib_bde_cnt; + uint8_t cmf; + uint8_t cqv; + uint8_t mqv; + uint8_t wqv; + uint8_t rqv; + uint8_t eqav; + uint8_t cqav; + uint8_t wqsize; + uint8_t bv1s; + uint8_t pls; +#define LPFC_WQ_SZ64_SUPPORT 1 +#define LPFC_WQ_SZ128_SUPPORT 2 + uint8_t wqpcnt; + uint8_t nvme; +}; + +#define LPFC_CQ_4K_PAGE_SZ 0x1 +#define LPFC_CQ_16K_PAGE_SZ 0x4 +#define LPFC_WQ_4K_PAGE_SZ 0x1 +#define LPFC_WQ_16K_PAGE_SZ 0x4 + +struct lpfc_iov { + uint32_t pf_number; + uint32_t vf_number; +}; + +struct lpfc_sli4_lnk_info { + uint8_t lnk_dv; +#define LPFC_LNK_DAT_INVAL 0 +#define LPFC_LNK_DAT_VAL 1 + uint8_t lnk_tp; +#define LPFC_LNK_GE 0x0 /* FCoE */ +#define LPFC_LNK_FC 0x1 /* FC */ +#define LPFC_LNK_FC_TRUNKED 0x2 /* FC_Trunked */ + uint8_t lnk_no; + uint8_t optic_state; +}; + +#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \ + LPFC_FOF_IO_CHAN_NUM) + +/* Used for tracking CPU mapping attributes */ +struct lpfc_vector_map_info { + uint16_t phys_id; + uint16_t core_id; + uint16_t eq; + uint16_t hdwq; + uint16_t flag; +#define LPFC_CPU_MAP_HYPER 0x1 +#define LPFC_CPU_MAP_UNASSIGN 0x2 +#define LPFC_CPU_FIRST_IRQ 0x4 +}; +#define LPFC_VECTOR_MAP_EMPTY 0xffff + +#define LPFC_IRQ_EMPTY 0xffffffff + +/* Multi-XRI pool */ +#define XRI_BATCH 8 + +struct lpfc_pbl_pool { + struct list_head list; + u32 count; + spinlock_t lock; /* lock for pbl_pool*/ +}; + +struct lpfc_pvt_pool { + u32 low_watermark; + u32 high_watermark; + + struct list_head list; + u32 count; + spinlock_t lock; /* lock for pvt_pool */ +}; + +struct lpfc_multixri_pool { + u32 xri_limit; + + /* Starting point when searching a pbl_pool with round-robin method */ + u32 rrb_next_hwqid; + + /* Used by lpfc_adjust_pvt_pool_count. + * io_req_count is incremented by 1 during IO submission. The heartbeat + * handler uses these two variables to determine if pvt_pool is idle or + * busy. + */ + u32 prev_io_req_count; + u32 io_req_count; + + /* statistics */ + u32 pbl_empty_count; +#ifdef LPFC_MXP_STAT + u32 above_limit_count; + u32 below_limit_count; + u32 local_pbl_hit_count; + u32 other_pbl_hit_count; + u32 stat_max_hwm; + +#define LPFC_MXP_SNAPSHOT_TAKEN 3 /* snapshot is taken at 3rd heartbeats */ + u32 stat_pbl_count; + u32 stat_pvt_count; + u32 stat_busy_count; + u32 stat_snapshot_taken; +#endif + + /* TODO: Separate pvt_pool into get and put list */ + struct lpfc_pbl_pool pbl_pool; /* Public free XRI pool */ + struct lpfc_pvt_pool pvt_pool; /* Private free XRI pool */ +}; + +struct lpfc_fc4_ctrl_stat { + u32 input_requests; + u32 output_requests; + u32 control_requests; + u32 io_cmpls; +}; + +#ifdef LPFC_HDWQ_LOCK_STAT +struct lpfc_lock_stat { + uint32_t alloc_xri_get; + uint32_t alloc_xri_put; + uint32_t free_xri; + uint32_t wq_access; + uint32_t alloc_pvt_pool; + uint32_t mv_from_pvt_pool; + uint32_t mv_to_pub_pool; + uint32_t mv_to_pvt_pool; + uint32_t free_pub_pool; + uint32_t free_pvt_pool; +}; +#endif + +struct lpfc_eq_intr_info { + struct list_head list; + uint32_t icnt; +}; + +/* SLI4 HBA data structure entries */ +struct lpfc_sli4_hdw_queue { + /* Pointers to the constructed SLI4 queues */ + struct lpfc_queue *hba_eq; /* Event queues for HBA */ + struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */ + struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */ + uint16_t io_cq_map; + + /* Keep track of IO buffers for this hardware queue */ + spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */ + struct list_head lpfc_io_buf_list_get; + spinlock_t io_buf_list_put_lock; /* Common buf free list lock */ + struct list_head lpfc_io_buf_list_put; + spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */ + struct list_head lpfc_abts_io_buf_list; + uint32_t total_io_bufs; + uint32_t get_io_bufs; + uint32_t put_io_bufs; + uint32_t empty_io_bufs; + uint32_t abts_scsi_io_bufs; + uint32_t abts_nvme_io_bufs; + + /* Multi-XRI pool per HWQ */ + struct lpfc_multixri_pool *p_multixri_pool; + + /* FC-4 Stats counters */ + struct lpfc_fc4_ctrl_stat nvme_cstat; + struct lpfc_fc4_ctrl_stat scsi_cstat; +#ifdef LPFC_HDWQ_LOCK_STAT + struct lpfc_lock_stat lock_conflict; +#endif + + /* Per HDWQ pool resources */ + struct list_head sgl_list; + struct list_head cmd_rsp_buf_list; + + /* Lock for syncing Per HDWQ pool resources */ + spinlock_t hdwq_lock; +}; + +#ifdef LPFC_HDWQ_LOCK_STAT +/* compile time trylock stats */ +#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \ + { \ + int only_once = 1; \ + while (spin_trylock_irqsave(lock, flag) == 0) { \ + if (only_once) { \ + only_once = 0; \ + qp->lock_conflict.lstat++; \ + } \ + } \ + } +#define lpfc_qp_spin_lock(lock, qp, lstat) \ + { \ + int only_once = 1; \ + while (spin_trylock(lock) == 0) { \ + if (only_once) { \ + only_once = 0; \ + qp->lock_conflict.lstat++; \ + } \ + } \ + } +#else +#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \ + spin_lock_irqsave(lock, flag) +#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock) +#endif + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +struct lpfc_hdwq_stat { + u32 hdwq_no; + u32 rcv_io; + u32 xmt_io; + u32 cmpl_io; +}; +#endif + +struct lpfc_sli4_hba { + void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for + * config space registers + */ + void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for + * control registers + */ + void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for + * doorbell registers + */ + void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for + * dpp registers + */ + union { + struct { + /* IF Type 0, BAR 0 PCI cfg space reg mem map */ + void __iomem *UERRLOregaddr; + void __iomem *UERRHIregaddr; + void __iomem *UEMASKLOregaddr; + void __iomem *UEMASKHIregaddr; + } if_type0; + struct { + /* IF Type 2, BAR 0 PCI cfg space reg mem map. */ + void __iomem *STATUSregaddr; + void __iomem *CTRLregaddr; + void __iomem *ERR1regaddr; +#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1 +#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2 + void __iomem *ERR2regaddr; +#define SLIPORT_ERR2_REG_FW_RESTART 0x0 +#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1 +#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2 +#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3 +#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4 +#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5 +#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6 + void __iomem *EQDregaddr; + } if_type2; + } u; + + /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */ + void __iomem *PSMPHRregaddr; + + /* Well-known SLI INTF register memory map. */ + void __iomem *SLIINTFregaddr; + + /* IF type 0, BAR 1 function CSR register memory map */ + void __iomem *ISRregaddr; /* HST_ISR register */ + void __iomem *IMRregaddr; /* HST_IMR register */ + void __iomem *ISCRregaddr; /* HST_ISCR register */ + /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */ + void __iomem *RQDBregaddr; /* RQ_DOORBELL register */ + void __iomem *WQDBregaddr; /* WQ_DOORBELL register */ + void __iomem *CQDBregaddr; /* CQ_DOORBELL register */ + void __iomem *EQDBregaddr; /* EQ_DOORBELL register */ + void __iomem *MQDBregaddr; /* MQ_DOORBELL register */ + void __iomem *BMBXregaddr; /* BootStrap MBX register */ + + uint32_t ue_mask_lo; + uint32_t ue_mask_hi; + uint32_t ue_to_sr; + uint32_t ue_to_rp; + struct lpfc_register sli_intf; + struct lpfc_pc_sli4_params pc_sli4_params; + struct lpfc_bbscn_params bbscn_params; + struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */ + + void (*sli4_eq_clr_intr)(struct lpfc_queue *q); + void (*sli4_write_eq_db)(struct lpfc_hba *phba, struct lpfc_queue *eq, + uint32_t count, bool arm); + void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq, + uint32_t count, bool arm); + + /* Pointers to the constructed SLI4 queues */ + struct lpfc_sli4_hdw_queue *hdwq; + struct list_head lpfc_wq_list; + + /* Pointers to the constructed SLI4 queues for NVMET */ + struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */ + struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */ + struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */ + + struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ + struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ + struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */ + struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ + struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ + struct lpfc_queue *nvmels_wq; /* NVME LS work queue */ + struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ + struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ + + struct lpfc_name wwnn; + struct lpfc_name wwpn; + + uint32_t fw_func_mode; /* FW function protocol mode */ + uint32_t ulp0_mode; /* ULP0 protocol mode */ + uint32_t ulp1_mode; /* ULP1 protocol mode */ + + /* Optimized Access Storage specific queues/structures */ + uint64_t oas_next_lun; + uint8_t oas_next_tgt_wwpn[8]; + uint8_t oas_next_vpt_wwpn[8]; + + /* Setup information for various queue parameters */ + int eq_esize; + int eq_ecount; + int cq_esize; + int cq_ecount; + int wq_esize; + int wq_ecount; + int mq_esize; + int mq_ecount; + int rq_esize; + int rq_ecount; +#define LPFC_SP_EQ_MAX_INTR_SEC 10000 +#define LPFC_FP_EQ_MAX_INTR_SEC 10000 + + uint32_t intr_enable; + struct lpfc_bmbx bmbx; + struct lpfc_max_cfg_param max_cfg_param; + uint16_t extents_in_use; /* must allocate resource extents. */ + uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */ + uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ + uint16_t next_rpi; + uint16_t io_xri_max; + uint16_t io_xri_cnt; + uint16_t io_xri_start; + uint16_t els_xri_cnt; + uint16_t nvmet_xri_cnt; + uint16_t nvmet_io_wait_cnt; + uint16_t nvmet_io_wait_total; + uint16_t cq_max; + struct lpfc_queue **cq_lookup; + struct list_head lpfc_els_sgl_list; + struct list_head lpfc_abts_els_sgl_list; + spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */ + struct list_head lpfc_abts_io_buf_list; + struct list_head lpfc_nvmet_sgl_list; + spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */ + struct list_head lpfc_abts_nvmet_ctx_list; + spinlock_t t_active_list_lock; /* list of active NVMET IOs */ + struct list_head t_active_ctx_list; + struct list_head lpfc_nvmet_io_wait_list; + struct lpfc_nvmet_ctx_info *nvmet_ctx_info; + struct lpfc_sglq **lpfc_sglq_active_list; + struct list_head lpfc_rpi_hdr_list; + unsigned long *rpi_bmask; + uint16_t *rpi_ids; + uint16_t rpi_count; + struct list_head lpfc_rpi_blk_list; + unsigned long *xri_bmask; + uint16_t *xri_ids; + struct list_head lpfc_xri_blk_list; + unsigned long *vfi_bmask; + uint16_t *vfi_ids; + uint16_t vfi_count; + struct list_head lpfc_vfi_blk_list; + struct lpfc_sli4_flags sli4_flags; + struct list_head sp_queue_event; + struct list_head sp_cqe_event_pool; + struct list_head sp_asynce_work_queue; + spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */ + struct list_head sp_els_xri_aborted_work_queue; + spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */ + struct list_head sp_unsol_work_queue; + struct lpfc_sli4_link link_state; + struct lpfc_sli4_lnk_info lnk_info; + uint32_t pport_name_sta; +#define LPFC_SLI4_PPNAME_NON 0 +#define LPFC_SLI4_PPNAME_GET 1 + struct lpfc_iov iov; + spinlock_t sgl_list_lock; /* list of aborted els IOs */ + spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ + uint32_t physical_port; + + /* CPU to vector mapping information */ + struct lpfc_vector_map_info *cpu_map; + uint16_t num_possible_cpu; + uint16_t num_present_cpu; + struct cpumask irq_aff_mask; + uint16_t curr_disp_cpu; + struct lpfc_eq_intr_info __percpu *eq_info; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_hdwq_stat __percpu *c_stat; +#endif + struct lpfc_idle_stat *idle_stat; + uint32_t conf_trunk; +#define lpfc_conf_trunk_port0_WORD conf_trunk +#define lpfc_conf_trunk_port0_SHIFT 0 +#define lpfc_conf_trunk_port0_MASK 0x1 +#define lpfc_conf_trunk_port1_WORD conf_trunk +#define lpfc_conf_trunk_port1_SHIFT 1 +#define lpfc_conf_trunk_port1_MASK 0x1 +#define lpfc_conf_trunk_port2_WORD conf_trunk +#define lpfc_conf_trunk_port2_SHIFT 2 +#define lpfc_conf_trunk_port2_MASK 0x1 +#define lpfc_conf_trunk_port3_WORD conf_trunk +#define lpfc_conf_trunk_port3_SHIFT 3 +#define lpfc_conf_trunk_port3_MASK 0x1 +#define lpfc_conf_trunk_port0_nd_WORD conf_trunk +#define lpfc_conf_trunk_port0_nd_SHIFT 4 +#define lpfc_conf_trunk_port0_nd_MASK 0x1 +#define lpfc_conf_trunk_port1_nd_WORD conf_trunk +#define lpfc_conf_trunk_port1_nd_SHIFT 5 +#define lpfc_conf_trunk_port1_nd_MASK 0x1 +#define lpfc_conf_trunk_port2_nd_WORD conf_trunk +#define lpfc_conf_trunk_port2_nd_SHIFT 6 +#define lpfc_conf_trunk_port2_nd_MASK 0x1 +#define lpfc_conf_trunk_port3_nd_WORD conf_trunk +#define lpfc_conf_trunk_port3_nd_SHIFT 7 +#define lpfc_conf_trunk_port3_nd_MASK 0x1 + uint8_t flash_id; + uint8_t asic_rev; + uint16_t fawwpn_flag; /* FA-WWPN support state */ +#define LPFC_FAWWPN_CONFIG 0x1 /* FA-PWWN is configured */ +#define LPFC_FAWWPN_FABRIC 0x2 /* FA-PWWN success with Fabric */ +}; + +enum lpfc_sge_type { + GEN_BUFF_TYPE, + SCSI_BUFF_TYPE, + NVMET_BUFF_TYPE +}; + +enum lpfc_sgl_state { + SGL_FREED, + SGL_ALLOCATED, + SGL_XRI_ABORTED +}; + +struct lpfc_sglq { + /* lpfc_sglqs are used in double linked lists */ + struct list_head list; + struct list_head clist; + enum lpfc_sge_type buff_type; /* is this a scsi sgl */ + enum lpfc_sgl_state state; + struct lpfc_nodelist *ndlp; /* ndlp associated with IO */ + uint16_t iotag; /* pre-assigned IO tag */ + uint16_t sli4_lxritag; /* logical pre-assigned xri. */ + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + struct sli4_sge *sgl; /* pre-assigned SGL */ + void *virt; /* virtual address. */ + dma_addr_t phys; /* physical address */ +}; + +struct lpfc_rpi_hdr { + struct list_head list; + uint32_t len; + struct lpfc_dmabuf *dmabuf; + uint32_t page_count; + uint32_t start_rpi; + uint16_t next_rpi; +}; + +struct lpfc_rsrc_blks { + struct list_head list; + uint16_t rsrc_start; + uint16_t rsrc_size; + uint16_t rsrc_used; +}; + +struct lpfc_rdp_context { + struct lpfc_nodelist *ndlp; + uint16_t ox_id; + uint16_t rx_id; + READ_LNK_VAR link_stat; + uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE]; + uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE]; + void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int); +}; + +struct lpfc_lcb_context { + uint8_t sub_command; + uint8_t type; + uint8_t capability; + uint8_t frequency; + uint16_t duration; + uint16_t ox_id; + uint16_t rx_id; + struct lpfc_nodelist *ndlp; +}; + + +/* + * SLI4 specific function prototypes + */ +int lpfc_pci_function_reset(struct lpfc_hba *); +int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *); +int lpfc_sli4_hba_setup(struct lpfc_hba *); +int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, + uint8_t, uint32_t, bool); +void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); +void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, + struct lpfc_mbx_sge *); +int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *, + uint16_t); + +void lpfc_sli4_hba_reset(struct lpfc_hba *); +struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *phba, + uint32_t page_size, + uint32_t entry_size, + uint32_t entry_count, int cpu); +void lpfc_sli4_queue_free(struct lpfc_queue *); +int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); +void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, + uint32_t numq, uint32_t usdelay); +int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t, uint32_t); +int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, + struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, + uint32_t subtype); +int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, struct lpfc_queue *, uint32_t); +int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, + struct lpfc_queue **drqp, struct lpfc_queue **cqp, + uint32_t subtype); +int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); +int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *); +int lpfc_sli4_queue_setup(struct lpfc_hba *); +void lpfc_sli4_queue_unset(struct lpfc_hba *); +int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); +int lpfc_repost_io_sgl_list(struct lpfc_hba *phba); +uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +void lpfc_sli4_free_xri(struct lpfc_hba *, int); +int lpfc_sli4_post_async_mbox(struct lpfc_hba *); +struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *); +int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *); +struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); +void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_alloc_rpi(struct lpfc_hba *); +void lpfc_sli4_free_rpi(struct lpfc_hba *, int); +void lpfc_sli4_remove_rpis(struct lpfc_hba *); +void lpfc_sli4_async_event_proc(struct lpfc_hba *); +void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *); +int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, + void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba); +void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_ncmd); +void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri, + struct lpfc_io_buf *lpfc_ncmd); +void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri, int idx); +void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri); +void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, + struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *); +void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *); +int lpfc_sli4_brdreset(struct lpfc_hba *); +int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); +void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); +int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); +int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba); +int lpfc_sli4_init_vpi(struct lpfc_vport *); +void lpfc_sli4_eq_clr_intr(struct lpfc_queue *); +void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); +void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); +void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q); +void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); +void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); +void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t); +void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_unregister_fcf(struct lpfc_hba *); +int lpfc_sli4_post_status_check(struct lpfc_hba *); +uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *); +uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba); +struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *buf); +struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *buf); +int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf); +int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *buf); +void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq); +void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq); +static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx) +{ + return q->q_pgs[idx / q->entry_cnt_per_pg] + + (q->entry_size * (idx % q->entry_cnt_per_pg)); +} + +/** + * lpfc_sli4_unrecoverable_port - Check ERR and RN bits in portstat_reg + * @portstat_reg: portstat_reg pointer containing portstat_reg contents + * + * Description: + * Use only for SLI4 interface type-2 or later. If ERR is set && RN is 0, then + * port is deemed unrecoverable. + * + * Returns: + * true - ERR && !RN + * false - otherwise + */ +static inline bool +lpfc_sli4_unrecoverable_port(struct lpfc_register *portstat_reg) +{ + return bf_get(lpfc_sliport_status_err, portstat_reg) && + !bf_get(lpfc_sliport_status_rn, portstat_reg); +} diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h new file mode 100644 index 000000000..13a547277 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -0,0 +1,37 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LPFC_DRIVER_VERSION "14.2.0.14" +#define LPFC_DRIVER_NAME "lpfc" + +/* Used for SLI 2/3 */ +#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" +#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" + +/* Used for SLI4 */ +#define LPFC_DRIVER_HANDLER_NAME "lpfc:" + +#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ + LPFC_DRIVER_VERSION +#define LPFC_COPYRIGHT "Copyright (C) 2017-2023 Broadcom. All Rights " \ + "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ + "and/or its subsidiaries." diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c new file mode 100644 index 000000000..cf8ba840d --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_vmid.c @@ -0,0 +1,325 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include + +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_crtn.h" + + +/* + * lpfc_get_vmid_from_hashtable - search the UUID in the hash table + * @vport: The virtual port for which this call is being executed. + * @hash: calculated hash value + * @buf: uuid associated with the VE + * Return the VMID entry associated with the UUID + * Make sure to acquire the appropriate lock before invoking this routine. + */ +struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, + u32 hash, u8 *buf) +{ + struct lpfc_vmid *vmp; + + hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { + if (memcmp(&vmp->host_vmid[0], buf, 16) == 0) + return vmp; + } + return NULL; +} + +/* + * lpfc_put_vmid_in_hashtable - put the VMID in the hash table + * @vport: The virtual port for which this call is being executed. + * @hash - calculated hash value + * @vmp: Pointer to a VMID entry representing a VM sending I/O + * + * This routine will insert the newly acquired VMID entity in the hash table. + * Make sure to acquire the appropriate lock before invoking this routine. + */ +static void +lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, + struct lpfc_vmid *vmp) +{ + hash_add(vport->hash_table, &vmp->hnode, hash); +} + +/* + * lpfc_vmid_hash_fn - create a hash value of the UUID + * @vmid: uuid associated with the VE + * @len: length of the VMID string + * Returns the calculated hash value + */ +int lpfc_vmid_hash_fn(const char *vmid, int len) +{ + int c; + int hash = 0; + + if (len == 0) + return 0; + while (len--) { + c = *vmid++; + if (c >= 'A' && c <= 'Z') + c += 'a' - 'A'; + + hash = (hash + (c << LPFC_VMID_HASH_SHIFT) + + (c >> LPFC_VMID_HASH_SHIFT)) * 19; + } + + return hash & LPFC_VMID_HASH_MASK; +} + +/* + * lpfc_vmid_update_entry - update the vmid entry in the hash table + * @vport: The virtual port for which this call is being executed. + * @iodir: io direction + * @vmp: Pointer to a VMID entry representing a VM sending I/O + * @tag: VMID tag + */ +static void lpfc_vmid_update_entry(struct lpfc_vport *vport, + enum dma_data_direction iodir, + struct lpfc_vmid *vmp, + union lpfc_vmid_io_tag *tag) +{ + u64 *lta; + + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid; + else if (vport->phba->cfg_vmid_app_header) + tag->app_id = vmp->un.app_id; + + if (iodir == DMA_TO_DEVICE) + vmp->io_wr_cnt++; + else if (iodir == DMA_FROM_DEVICE) + vmp->io_rd_cnt++; + + /* update the last access timestamp in the table */ + lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id()); + *lta = jiffies; +} + +static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, + struct lpfc_vmid *vmid) +{ + u32 hash; + struct lpfc_vmid *pvmid; + + if (vport->port_type == LPFC_PHYSICAL_PORT) { + vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); + } else { + hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len); + pvmid = + lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, + vmid->host_vmid); + if (pvmid) + vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid; + else + vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); + } +} + +/* + * lpfc_vmid_get_appid - get the VMID associated with the UUID + * @vport: The virtual port for which this call is being executed. + * @uuid: UUID associated with the VE + * @cmd: address of scsi_cmd descriptor + * @iodir: io direction + * @tag: VMID tag + * Returns status of the function + */ +int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, + enum dma_data_direction iodir, + union lpfc_vmid_io_tag *tag) +{ + struct lpfc_vmid *vmp = NULL; + int hash, len, rc = -EPERM, i; + + /* check if QFPA is complete */ + if (lpfc_vmid_is_type_priority_tag(vport) && + !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) && + (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) { + vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; + return -EAGAIN; + } + + /* search if the UUID has already been mapped to the VMID */ + len = strlen(uuid); + hash = lpfc_vmid_hash_fn(uuid, len); + + /* search for the VMID in the table */ + read_lock(&vport->vmid_lock); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); + + /* if found, check if its already registered */ + if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { + read_unlock(&vport->vmid_lock); + lpfc_vmid_update_entry(vport, iodir, vmp, tag); + rc = 0; + } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER || + vmp->flag & LPFC_VMID_DE_REGISTER)) { + /* else if register or dereg request has already been sent */ + /* Hence VMID tag will not be added for this I/O */ + read_unlock(&vport->vmid_lock); + rc = -EBUSY; + } else { + /* The VMID was not found in the hashtable. At this point, */ + /* drop the read lock first before proceeding further */ + read_unlock(&vport->vmid_lock); + /* start the process to obtain one as per the */ + /* type of the VMID indicated */ + write_lock(&vport->vmid_lock); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); + + /* while the read lock was released, in case the entry was */ + /* added by other context or is in process of being added */ + if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { + lpfc_vmid_update_entry(vport, iodir, vmp, tag); + write_unlock(&vport->vmid_lock); + return 0; + } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) { + write_unlock(&vport->vmid_lock); + return -EBUSY; + } + + /* else search and allocate a free slot in the hash table */ + if (vport->cur_vmid_cnt < vport->max_vmid) { + for (i = 0; i < vport->max_vmid; i++) { + vmp = vport->vmid + i; + if (vmp->flag == LPFC_VMID_SLOT_FREE) + break; + } + if (i == vport->max_vmid) + vmp = NULL; + } else { + vmp = NULL; + } + + if (!vmp) { + write_unlock(&vport->vmid_lock); + return -ENOMEM; + } + + /* Add the vmid and register */ + lpfc_put_vmid_in_hashtable(vport, hash, vmp); + vmp->vmid_len = len; + memcpy(vmp->host_vmid, uuid, vmp->vmid_len); + vmp->io_rd_cnt = 0; + vmp->io_wr_cnt = 0; + vmp->flag = LPFC_VMID_SLOT_USED; + + vmp->delete_inactive = + vport->vmid_inactivity_timeout ? 1 : 0; + + /* if type priority tag, get next available VMID */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + lpfc_vmid_assign_cs_ctl(vport, vmp); + + /* allocate the per cpu variable for holding */ + /* the last access time stamp only if VMID is enabled */ + if (!vmp->last_io_time) + vmp->last_io_time = alloc_percpu_gfp(u64, GFP_ATOMIC); + if (!vmp->last_io_time) { + hash_del(&vmp->hnode); + vmp->flag = LPFC_VMID_SLOT_FREE; + write_unlock(&vport->vmid_lock); + return -EIO; + } + + write_unlock(&vport->vmid_lock); + + /* complete transaction with switch */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + rc = lpfc_vmid_uvem(vport, vmp, true); + else if (vport->phba->cfg_vmid_app_header) + rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp); + if (!rc) { + write_lock(&vport->vmid_lock); + vport->cur_vmid_cnt++; + vmp->flag |= LPFC_VMID_REQ_REGISTER; + write_unlock(&vport->vmid_lock); + } else { + write_lock(&vport->vmid_lock); + hash_del(&vmp->hnode); + vmp->flag = LPFC_VMID_SLOT_FREE; + free_percpu(vmp->last_io_time); + write_unlock(&vport->vmid_lock); + return -EIO; + } + + /* finally, enable the idle timer once */ + if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { + mod_timer(&vport->phba->inactive_vmid_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); + vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; + } + } + return rc; +} + +/* + * lpfc_reinit_vmid - reinitializes the vmid data structure + * @vport: pointer to vport data structure + * + * This routine reinitializes the vmid post flogi completion + * + * Return codes + * None + */ +void +lpfc_reinit_vmid(struct lpfc_vport *vport) +{ + u32 bucket, i, cpu; + struct lpfc_vmid *cur; + struct lpfc_vmid *vmp = NULL; + struct hlist_node *tmp; + + write_lock(&vport->vmid_lock); + vport->cur_vmid_cnt = 0; + + for (i = 0; i < vport->max_vmid; i++) { + vmp = &vport->vmid[i]; + vmp->flag = LPFC_VMID_SLOT_FREE; + memset(vmp->host_vmid, 0, sizeof(vmp->host_vmid)); + vmp->io_rd_cnt = 0; + vmp->io_wr_cnt = 0; + + if (vmp->last_io_time) + for_each_possible_cpu(cpu) + *per_cpu_ptr(vmp->last_io_time, cpu) = 0; + } + + /* for all elements in the hash table */ + if (!hash_empty(vport->hash_table)) + hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode) + hash_del(&cur->hnode); + write_unlock(&vport->vmid_lock); +} diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c new file mode 100644 index 000000000..6c7559cf1 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -0,0 +1,801 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_version.h" +#include "lpfc_vport.h" + +inline void lpfc_vport_set_state(struct lpfc_vport *vport, + enum fc_vport_state new_state) +{ + struct fc_vport *fc_vport = vport->fc_vport; + + if (fc_vport) { + /* + * When the transport defines fc_vport_set state we will replace + * this code with the following line + */ + /* fc_vport_set_state(fc_vport, new_state); */ + if (new_state != FC_VPORT_INITIALIZING) + fc_vport->vport_last_state = fc_vport->vport_state; + fc_vport->vport_state = new_state; + } + + /* for all the error states we will set the invternal state to FAILED */ + switch (new_state) { + case FC_VPORT_NO_FABRIC_SUPP: + case FC_VPORT_NO_FABRIC_RSCS: + case FC_VPORT_FABRIC_LOGOUT: + case FC_VPORT_FABRIC_REJ_WWN: + case FC_VPORT_FAILED: + vport->port_state = LPFC_VPORT_FAILED; + break; + case FC_VPORT_LINKDOWN: + vport->port_state = LPFC_VPORT_UNKNOWN; + break; + default: + /* do nothing */ + break; + } +} + +int +lpfc_alloc_vpi(struct lpfc_hba *phba) +{ + unsigned long vpi; + + spin_lock_irq(&phba->hbalock); + /* Start at bit 1 because vpi zero is reserved for the physical port */ + vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1); + if (vpi > phba->max_vpi) + vpi = 0; + else + set_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used++; + spin_unlock_irq(&phba->hbalock); + return vpi; +} + +static void +lpfc_free_vpi(struct lpfc_hba *phba, int vpi) +{ + if (vpi == 0) + return; + spin_lock_irq(&phba->hbalock); + clear_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used--; + spin_unlock_irq(&phba->hbalock); +} + +static int +lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + struct lpfc_dmabuf *mp; + int rc; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + return -ENOMEM; + } + mb = &pmb->u.mb; + + rc = lpfc_read_sparam(phba, pmb, vport->vpi); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + + /* + * Wait for the read_sparams mailbox to complete. Driver needs + * this per vport to start the FDISC. If the mailbox fails, + * just cleanup and return an error unless the failure is a + * mailbox timeout. For MBX_TIMEOUT, allow the default + * mbox completion handler to take care of the cleanup. This + * is safe as the mailbox command isn't one that triggers + * another mailbox. + */ + pmb->vport = vport; + rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); + if (rc != MBX_SUCCESS) { + if (signal_pending(current)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1830 Signal aborted mbxCmd x%x\n", + mb->mbxCommand); + if (rc != MBX_TIMEOUT) + lpfc_mbox_rsrc_cleanup(phba, pmb, + MBOX_THD_UNLOCKED); + return -EINTR; + } else { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1818 VPort failed init, mbxCmd x%x " + "READ_SPARM mbxStatus x%x, rc = x%x\n", + mb->mbxCommand, mb->mbxStatus, rc); + if (rc != MBX_TIMEOUT) + lpfc_mbox_rsrc_cleanup(phba, pmb, + MBOX_THD_UNLOCKED); + return -EIO; + } + } + + mp = (struct lpfc_dmabuf *)pmb->ctx_buf; + memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, + sizeof (struct lpfc_name)); + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof (struct lpfc_name)); + lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); + return 0; +} + +static int +lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn, + const char *name_type) +{ + /* ensure that IEEE format 1 addresses + * contain zeros in bits 59-48 + */ + if (!((wwn->u.wwn[0] >> 4) == 1 && + ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0))) + return 1; + + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "1822 Invalid %s: %02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x\n", + name_type, + wwn->u.wwn[0], wwn->u.wwn[1], + wwn->u.wwn[2], wwn->u.wwn[3], + wwn->u.wwn[4], wwn->u.wwn[5], + wwn->u.wwn[6], wwn->u.wwn[7]); + return 0; +} + +static int +lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) +{ + struct lpfc_vport *vport; + unsigned long flags; + + spin_lock_irqsave(&phba->port_list_lock, flags); + list_for_each_entry(vport, &phba->port_list, listentry) { + if (vport == new_vport) + continue; + /* If they match, return not unique */ + if (memcmp(&vport->fc_sparam.portName, + &new_vport->fc_sparam.portName, + sizeof(struct lpfc_name)) == 0) { + spin_unlock_irqrestore(&phba->port_list_lock, flags); + return 0; + } + } + spin_unlock_irqrestore(&phba->port_list_lock, flags); + return 1; +} + +/** + * lpfc_discovery_wait - Wait for driver discovery to quiesce + * @vport: The virtual port for which this call is being executed. + * + * This driver calls this routine specifically from lpfc_vport_delete + * to enforce a synchronous execution of vport + * delete relative to discovery activities. The + * lpfc_vport_delete routine should not return until it + * can reasonably guarantee that discovery has quiesced. + * Post FDISC LOGO, the driver must wait until its SAN teardown is + * complete and all resources recovered before allowing + * cleanup. + * + * This routine does not require any locks held. + **/ +static void lpfc_discovery_wait(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + uint32_t wait_flags = 0; + unsigned long wait_time_max; + unsigned long start_time; + + wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE | + FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO; + + /* + * The time constraint on this loop is a balance between the + * fabric RA_TOV value and dev_loss tmo. The driver's + * devloss_tmo is 10 giving this loop a 3x multiplier minimally. + */ + wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); + wait_time_max += jiffies; + start_time = jiffies; + while (time_before(jiffies, wait_time_max)) { + if ((vport->num_disc_nodes > 0) || + (vport->fc_flag & wait_flags) || + ((vport->port_state > LPFC_VPORT_FAILED) && + (vport->port_state < LPFC_VPORT_READY))) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, + "1833 Vport discovery quiesce Wait:" + " state x%x fc_flags x%x" + " num_nodes x%x, waiting 1000 msecs" + " total wait msecs x%x\n", + vport->port_state, vport->fc_flag, + vport->num_disc_nodes, + jiffies_to_msecs(jiffies - start_time)); + msleep(1000); + } else { + /* Base case. Wait variants satisfied. Break out */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, + "1834 Vport discovery quiesced:" + " state x%x fc_flags x%x" + " wait msecs x%x\n", + vport->port_state, vport->fc_flag, + jiffies_to_msecs(jiffies + - start_time)); + break; + } + } + + if (time_after(jiffies, wait_time_max)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1835 Vport discovery quiesce failed:" + " state x%x fc_flags x%x wait msecs x%x\n", + vport->port_state, vport->fc_flag, + jiffies_to_msecs(jiffies - start_time)); +} + +int +lpfc_vport_create(struct fc_vport *fc_vport, bool disable) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost = fc_vport->shost; + struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = pport->phba; + struct lpfc_vport *vport = NULL; + int instance; + int vpi; + int rc = VPORT_ERROR; + int status; + + if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1808 Create VPORT failed: " + "NPIV is not enabled: SLImode:%d\n", + phba->sli_rev); + rc = VPORT_INVAL; + goto error_out; + } + + /* NPIV is not supported if HBA has NVME Target enabled */ + if (phba->nvmet_support) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3189 Create VPORT failed: " + "NPIV is not supported on NVME Target\n"); + rc = VPORT_INVAL; + goto error_out; + } + + vpi = lpfc_alloc_vpi(phba); + if (vpi == 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1809 Create VPORT failed: " + "Max VPORTs (%d) exceeded\n", + phba->max_vpi); + rc = VPORT_NORESOURCES; + goto error_out; + } + + /* Assign an unused board number */ + if ((instance = lpfc_get_instance()) < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1810 Create VPORT failed: Cannot get " + "instance number\n"); + lpfc_free_vpi(phba, vpi); + rc = VPORT_NORESOURCES; + goto error_out; + } + + vport = lpfc_create_port(phba, instance, &fc_vport->dev); + if (!vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1811 Create VPORT failed: vpi x%x\n", vpi); + lpfc_free_vpi(phba, vpi); + rc = VPORT_NORESOURCES; + goto error_out; + } + + vport->vpi = vpi; + lpfc_debugfs_initialize(vport); + + if ((status = lpfc_vport_sparm(phba, vport))) { + if (status == -EINTR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1831 Create VPORT Interrupted.\n"); + rc = VPORT_ERROR; + } else { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1813 Create VPORT failed. " + "Cannot get sparam\n"); + rc = VPORT_NORESOURCES; + } + lpfc_free_vpi(phba, vpi); + destroy_port(vport); + goto error_out; + } + + u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); + u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn); + + memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); + memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); + + if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || + !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1821 Create VPORT failed. " + "Invalid WWN format\n"); + lpfc_free_vpi(phba, vpi); + destroy_port(vport); + rc = VPORT_INVAL; + goto error_out; + } + + if (!lpfc_unique_wwpn(phba, vport)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1823 Create VPORT failed. " + "Duplicate WWN on HBA\n"); + lpfc_free_vpi(phba, vpi); + destroy_port(vport); + rc = VPORT_INVAL; + goto error_out; + } + + /* Create binary sysfs attribute for vport */ + lpfc_alloc_sysfs_attr(vport); + + /* Set the DFT_LUN_Q_DEPTH accordingly */ + vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; + + /* Only the physical port can support NVME for now */ + vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + + *(struct lpfc_vport **)fc_vport->dd_data = vport; + vport->fc_vport = fc_vport; + + /* At this point we are fully registered with SCSI Layer. */ + vport->load_flag |= FC_ALLOW_FDMI; + if (phba->cfg_enable_SmartSAN || + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { + /* Setup appropriate attribute masks */ + vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; + vport->fdmi_port_mask = phba->pport->fdmi_port_mask; + } + + /* + * In SLI4, the vpi must be activated before it can be used + * by the port. + */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + (pport->fc_flag & FC_VFI_REGISTERED)) { + rc = lpfc_sli4_init_vpi(vport); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1838 Failed to INIT_VPI on vpi %d " + "status %d\n", vpi, rc); + rc = VPORT_NORESOURCES; + lpfc_free_vpi(phba, vpi); + goto error_out; + } + } else if (phba->sli_rev == LPFC_SLI_REV4) { + /* + * Driver cannot INIT_VPI now. Set the flags to + * init_vpi when reg_vfi complete. + */ + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); + rc = VPORT_OK; + goto out; + } + + if ((phba->link_state < LPFC_LINK_UP) || + (pport->port_state < LPFC_FABRIC_CFG_LINK) || + (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); + rc = VPORT_OK; + goto out; + } + + if (disable) { + lpfc_vport_set_state(vport, FC_VPORT_DISABLED); + rc = VPORT_OK; + goto out; + } + + /* Use the Physical nodes Fabric NDLP to determine if the link is + * up and ready to FDISC. + */ + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { + lpfc_set_disctmo(vport); + lpfc_initial_fdisc(vport); + } else { + lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0262 No NPIV Fabric support\n"); + } + } else { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + } + rc = VPORT_OK; + +out: + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1825 Vport Created.\n"); + lpfc_host_attrib_init(lpfc_shost_from_vport(vport)); +error_out: + return rc; +} + +static int +lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + int rc; + struct lpfc_hba *phba = vport->phba; + + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); + + spin_lock_irq(&ndlp->lock); + if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) && + !ndlp->logo_waitq) { + ndlp->logo_waitq = &waitq; + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag |= NLP_ISSUE_LOGO; + ndlp->save_flags |= NLP_WAIT_FOR_LOGO; + } + spin_unlock_irq(&ndlp->lock); + rc = lpfc_issue_els_npiv_logo(vport, ndlp); + if (!rc) { + wait_event_timeout(waitq, + (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)), + msecs_to_jiffies(phba->fc_ratov * 2000)); + + if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)) + goto logo_cmpl; + /* LOGO wait failed. Correct status. */ + rc = -EINTR; + } else { + rc = -EIO; + } + + /* Error - clean up node flags. */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; + ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; + spin_unlock_irq(&ndlp->lock); + + logo_cmpl: + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, + "1824 Issue LOGO completes with status %d\n", + rc); + spin_lock_irq(&ndlp->lock); + ndlp->logo_waitq = NULL; + spin_unlock_irq(&ndlp->lock); + return rc; +} + +static int +disable_vport(struct fc_vport *fc_vport) +{ + struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp = NULL; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* Can't disable during an outstanding delete. */ + if (vport->load_flag & FC_UNLOADING) + return 0; + + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (ndlp && phba->link_state >= LPFC_LINK_UP) + (void)lpfc_send_npiv_logo(vport, ndlp); + + lpfc_sli_host_down(vport); + lpfc_cleanup_rpis(vport, 0); + + lpfc_stop_vport_timers(vport); + lpfc_unreg_all_rpis(vport); + lpfc_unreg_default_rpis(vport); + /* + * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the + * scsi_host_put() to release the vport. + */ + lpfc_mbx_unreg_vpi(vport); + if (phba->sli_rev == LPFC_SLI_REV4) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); + } + + lpfc_vport_set_state(vport, FC_VPORT_DISABLED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1826 Vport Disabled.\n"); + return VPORT_OK; +} + +static int +enable_vport(struct fc_vport *fc_vport) +{ + struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp = NULL; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if ((phba->link_state < LPFC_LINK_UP) || + (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); + return VPORT_OK; + } + + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_LOADING; + if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { + spin_unlock_irq(shost->host_lock); + lpfc_issue_init_vpi(vport); + goto out; + } + + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + + /* Use the Physical nodes Fabric NDLP to determine if the link is + * up and ready to FDISC. + */ + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { + lpfc_set_disctmo(vport); + lpfc_initial_fdisc(vport); + } else { + lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0264 No NPIV Fabric support\n"); + } + } else { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + } + +out: + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1827 Vport Enabled.\n"); + return VPORT_OK; +} + +int +lpfc_vport_disable(struct fc_vport *fc_vport, bool disable) +{ + if (disable) + return disable_vport(fc_vport); + else + return enable_vport(fc_vport); +} + +int +lpfc_vport_delete(struct fc_vport *fc_vport) +{ + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + int rc; + + if (vport->port_type == LPFC_PHYSICAL_PORT) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1812 vport_delete failed: Cannot delete " + "physical host\n"); + return VPORT_ERROR; + } + + /* If the vport is a static vport fail the deletion. */ + if ((vport->vport_flag & STATIC_VPORT) && + !(phba->pport->load_flag & FC_UNLOADING)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1837 vport_delete failed: Cannot delete " + "static vport.\n"); + return VPORT_ERROR; + } + + spin_lock_irq(&phba->hbalock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(&phba->hbalock); + + /* + * If we are not unloading the driver then prevent the vport_delete + * from happening until after this vport's discovery is finished. + */ + if (!(phba->pport->load_flag & FC_UNLOADING)) { + int check_count = 0; + while (check_count < ((phba->fc_ratov * 3) + 3) && + vport->port_state > LPFC_VPORT_FAILED && + vport->port_state < LPFC_VPORT_READY) { + check_count++; + msleep(1000); + } + if (vport->port_state > LPFC_VPORT_FAILED && + vport->port_state < LPFC_VPORT_READY) + return -EAGAIN; + } + + /* + * Take early refcount for outstanding I/O requests we schedule during + * delete processing for unreg_vpi. Always keep this before + * scsi_remove_host() as we can no longer obtain a reference through + * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. + */ + if (!scsi_host_get(shost)) + return VPORT_INVAL; + + lpfc_free_sysfs_attr(vport); + lpfc_debugfs_terminate(vport); + + /* Remove FC host to break driver binding. */ + fc_remove_host(shost); + scsi_remove_host(shost); + + /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + goto skip_logo; + + if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && + phba->link_state >= LPFC_LINK_UP && + phba->fc_topology != LPFC_TOPOLOGY_LOOP) { + if (vport->cfg_enable_da_id) { + /* Send DA_ID and wait for a completion. */ + rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0); + if (rc) { + lpfc_printf_log(vport->phba, KERN_WARNING, + LOG_VPORT, + "1829 CT command failed to " + "delete objects on fabric, " + "rc %d\n", rc); + } + } + + /* + * If the vpi is not registered, then a valid FDISC doesn't + * exist and there is no need for a ELS LOGO. Just cleanup + * the ndlp. + */ + if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) + goto skip_logo; + + /* Issue a Fabric LOGO to cleanup fabric resources. */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + goto skip_logo; + + rc = lpfc_send_npiv_logo(vport, ndlp); + if (rc) + goto skip_logo; + } + + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_discovery_wait(vport); + +skip_logo: + + lpfc_cleanup(vport); + + /* Remove scsi host now. The nodes are cleaned up. */ + lpfc_sli_host_down(vport); + lpfc_stop_vport_timers(vport); + + if (!(phba->pport->load_flag & FC_UNLOADING)) { + lpfc_unreg_all_rpis(vport); + lpfc_unreg_default_rpis(vport); + /* + * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) + * does the scsi_host_put() to release the vport. + */ + if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || + lpfc_mbx_unreg_vpi(vport)) + scsi_host_put(shost); + } else { + scsi_host_put(shost); + } + + lpfc_free_vpi(phba, vport->vpi); + vport->work_port_events = 0; + spin_lock_irq(&phba->port_list_lock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->port_list_lock); + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1828 Vport Deleted.\n"); + scsi_host_put(shost); + return VPORT_OK; +} + +struct lpfc_vport ** +lpfc_create_vport_work_array(struct lpfc_hba *phba) +{ + struct lpfc_vport *port_iterator; + struct lpfc_vport **vports; + int index = 0; + vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *), + GFP_KERNEL); + if (vports == NULL) + return NULL; + spin_lock_irq(&phba->port_list_lock); + list_for_each_entry(port_iterator, &phba->port_list, listentry) { + if (port_iterator->load_flag & FC_UNLOADING) + continue; + if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { + lpfc_printf_vlog(port_iterator, KERN_ERR, + LOG_TRACE_EVENT, + "1801 Create vport work array FAILED: " + "cannot do scsi_host_get\n"); + continue; + } + vports[index++] = port_iterator; + } + spin_unlock_irq(&phba->port_list_lock); + return vports; +} + +void +lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) +{ + int i; + if (vports == NULL) + return; + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + scsi_host_put(lpfc_shost_from_vport(vports[i])); + kfree(vports); +} + diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h new file mode 100644 index 000000000..fa60c146c --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -0,0 +1,118 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2006 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#ifndef _H_LPFC_VPORT +#define _H_LPFC_VPORT + +/* API version values (each will be an individual bit) */ +#define VPORT_API_VERSION_1 0x01 + +/* Values returned via lpfc_vport_getinfo() */ +struct vport_info { + + uint32_t api_versions; + uint8_t linktype; +#define VPORT_TYPE_PHYSICAL 0 +#define VPORT_TYPE_VIRTUAL 1 + + uint8_t state; +#define VPORT_STATE_OFFLINE 0 +#define VPORT_STATE_ACTIVE 1 +#define VPORT_STATE_FAILED 2 + + uint8_t fail_reason; + uint8_t prev_fail_reason; +#define VPORT_FAIL_UNKNOWN 0 +#define VPORT_FAIL_LINKDOWN 1 +#define VPORT_FAIL_FAB_UNSUPPORTED 2 +#define VPORT_FAIL_FAB_NORESOURCES 3 +#define VPORT_FAIL_FAB_LOGOUT 4 +#define VPORT_FAIL_ADAP_NORESOURCES 5 + + uint8_t node_name[8]; /* WWNN */ + uint8_t port_name[8]; /* WWPN */ + + struct Scsi_Host *shost; + +/* Following values are valid only on physical links */ + uint32_t vports_max; + uint32_t vports_inuse; + uint32_t rpi_max; + uint32_t rpi_inuse; +#define VPORT_CNT_INVALID 0xFFFFFFFF +}; + +/* data used in link creation */ +struct vport_data { + uint32_t api_version; + + uint32_t options; +#define VPORT_OPT_AUTORETRY 0x01 + + uint8_t node_name[8]; /* WWNN */ + uint8_t port_name[8]; /* WWPN */ + +/* + * Upon successful creation, vport_shost will point to the new Scsi_Host + * structure for the new virtual link. + */ + struct Scsi_Host *vport_shost; +}; + +/* API function return codes */ +#define VPORT_OK 0 +#define VPORT_ERROR -1 +#define VPORT_INVAL -2 +#define VPORT_NOMEM -3 +#define VPORT_NORESOURCES -4 + +int lpfc_vport_create(struct fc_vport *, bool); +int lpfc_vport_delete(struct fc_vport *); +int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); +int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); +struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); +void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); +int lpfc_alloc_vpi(struct lpfc_hba *phba); + +/* + * queuecommand VPORT-specific return codes. Specified in the host byte code. + * Returned when the virtual link has failed or is not active. + */ +#define DID_VPORT_ERROR 0x0f + +#define VPORT_INFO 0x1 +#define VPORT_CREATE 0x2 +#define VPORT_DELETE 0x4 + +struct vport_cmd_tag { + uint32_t cmd; + struct vport_data cdata; + struct vport_info cinfo; + void *vport; + int vport_num; +}; + +void lpfc_vport_set_state(struct lpfc_vport *vport, + enum fc_vport_state new_state); + +#endif /* H_LPFC_VPORT */ diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c new file mode 100644 index 000000000..6a0191321 --- /dev/null +++ b/drivers/scsi/mac53c94.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SCSI low-level driver for the 53c94 SCSI bus adaptor found + * on Power Macintosh computers, controlling the external SCSI chain. + * We assume the 53c94 is connected to a DBDMA (descriptor-based DMA) + * controller. + * + * Paul Mackerras, August 1996. + * Copyright (C) 1996 Paul Mackerras. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mac53c94.h" + +enum fsc_phase { + idle, + selecting, + dataing, + completing, + busfreeing, +}; + +struct fsc_state { + struct mac53c94_regs __iomem *regs; + int intr; + struct dbdma_regs __iomem *dma; + int dmaintr; + int clk_freq; + struct Scsi_Host *host; + struct scsi_cmnd *request_q; + struct scsi_cmnd *request_qtail; + struct scsi_cmnd *current_req; /* req we're currently working on */ + enum fsc_phase phase; /* what we're currently trying to do */ + struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ + void *dma_cmd_space; + struct pci_dev *pdev; + dma_addr_t dma_addr; + struct macio_dev *mdev; +}; + +static void mac53c94_init(struct fsc_state *); +static void mac53c94_start(struct fsc_state *); +static void mac53c94_interrupt(int, void *); +static irqreturn_t do_mac53c94_interrupt(int, void *); +static void cmd_done(struct fsc_state *, int result); +static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); + +static int mac53c94_queue_lck(struct scsi_cmnd *cmd) +{ + struct fsc_state *state; + +#if 0 + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + int i; + printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd); + for (i = 0; i < cmd->cmd_len; ++i) + printk(KERN_CONT " %.2x", cmd->cmnd[i]); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", + scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd)); + } +#endif + + cmd->host_scribble = NULL; + + state = (struct fsc_state *) cmd->device->host->hostdata; + + if (state->request_q == NULL) + state->request_q = cmd; + else + state->request_qtail->host_scribble = (void *) cmd; + state->request_qtail = cmd; + + if (state->phase == idle) + mac53c94_start(state); + + return 0; +} + +static DEF_SCSI_QCMD(mac53c94_queue) + +static int mac53c94_host_reset(struct scsi_cmnd *cmd) +{ + struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; + struct mac53c94_regs __iomem *regs = state->regs; + struct dbdma_regs __iomem *dma = state->dma; + unsigned long flags; + + spin_lock_irqsave(cmd->device->host->host_lock, flags); + + writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); + writeb(CMD_SCSI_RESET, ®s->command); /* assert RST */ + udelay(100); /* leave it on for a while (>= 25us) */ + writeb(CMD_RESET, ®s->command); + udelay(20); + mac53c94_init(state); + writeb(CMD_NOP, ®s->command); + + spin_unlock_irqrestore(cmd->device->host->host_lock, flags); + return SUCCESS; +} + +static void mac53c94_init(struct fsc_state *state) +{ + struct mac53c94_regs __iomem *regs = state->regs; + struct dbdma_regs __iomem *dma = state->dma; + + writeb(state->host->this_id | CF1_PAR_ENABLE, ®s->config1); + writeb(TIMO_VAL(250), ®s->sel_timeout); /* 250ms */ + writeb(CLKF_VAL(state->clk_freq), ®s->clk_factor); + writeb(CF2_FEATURE_EN, ®s->config2); + writeb(0, ®s->config3); + writeb(0, ®s->sync_period); + writeb(0, ®s->sync_offset); + (void)readb(®s->interrupt); + writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); +} + +/* + * Start the next command for a 53C94. + * Should be called with interrupts disabled. + */ +static void mac53c94_start(struct fsc_state *state) +{ + struct scsi_cmnd *cmd; + struct mac53c94_regs __iomem *regs = state->regs; + int i; + + if (state->phase != idle || state->current_req != NULL) + panic("inappropriate mac53c94_start (state=%p)", state); + if (state->request_q == NULL) + return; + state->current_req = cmd = state->request_q; + state->request_q = (struct scsi_cmnd *) cmd->host_scribble; + + /* Off we go */ + writeb(0, ®s->count_lo); + writeb(0, ®s->count_mid); + writeb(0, ®s->count_hi); + writeb(CMD_NOP + CMD_DMA_MODE, ®s->command); + udelay(1); + writeb(CMD_FLUSH, ®s->command); + udelay(1); + writeb(cmd->device->id, ®s->dest_id); + writeb(0, ®s->sync_period); + writeb(0, ®s->sync_offset); + + /* load the command into the FIFO */ + for (i = 0; i < cmd->cmd_len; ++i) + writeb(cmd->cmnd[i], ®s->fifo); + + /* do select without ATN XXX */ + writeb(CMD_SELECT, ®s->command); + state->phase = selecting; + + set_dma_cmds(state, cmd); +} + +static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id) +{ + unsigned long flags; + struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->device->host; + + spin_lock_irqsave(dev->host_lock, flags); + mac53c94_interrupt(irq, dev_id); + spin_unlock_irqrestore(dev->host_lock, flags); + return IRQ_HANDLED; +} + +static void mac53c94_interrupt(int irq, void *dev_id) +{ + struct fsc_state *state = (struct fsc_state *) dev_id; + struct mac53c94_regs __iomem *regs = state->regs; + struct dbdma_regs __iomem *dma = state->dma; + struct scsi_cmnd *const cmd = state->current_req; + struct mac53c94_cmd_priv *const mcmd = mac53c94_priv(cmd); + int nb, stat, seq, intr; + static int mac53c94_errors; + + /* + * Apparently, reading the interrupt register unlatches + * the status and sequence step registers. + */ + seq = readb(®s->seqstep); + stat = readb(®s->status); + intr = readb(®s->interrupt); + +#if 0 + printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n", + intr, stat, seq, state->phase); +#endif + + if (intr & INTR_RESET) { + /* SCSI bus was reset */ + printk(KERN_INFO "external SCSI bus reset detected\n"); + writeb(CMD_NOP, ®s->command); + writel(RUN << 16, &dma->control); /* stop dma */ + cmd_done(state, DID_RESET << 16); + return; + } + if (intr & INTR_ILL_CMD) { + printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n", + intr, stat, seq, state->phase); + cmd_done(state, DID_ERROR << 16); + return; + } + if (stat & STAT_ERROR) { +#if 0 + /* XXX these seem to be harmless? */ + printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n", + intr, stat, seq, state->phase); +#endif + ++mac53c94_errors; + writeb(CMD_NOP + CMD_DMA_MODE, ®s->command); + } + if (!cmd) { + printk(KERN_DEBUG "53c94: interrupt with no command active?\n"); + return; + } + if (stat & STAT_PARITY) { + printk(KERN_ERR "mac53c94: parity error\n"); + cmd_done(state, DID_PARITY << 16); + return; + } + switch (state->phase) { + case selecting: + if (intr & INTR_DISCONNECT) { + /* selection timed out */ + cmd_done(state, DID_BAD_TARGET << 16); + return; + } + if (intr != INTR_BUS_SERV + INTR_DONE) { + printk(KERN_DEBUG "got intr %x during selection\n", intr); + cmd_done(state, DID_ERROR << 16); + return; + } + if ((seq & SS_MASK) != SS_DONE) { + printk(KERN_DEBUG "seq step %x after command\n", seq); + cmd_done(state, DID_ERROR << 16); + return; + } + writeb(CMD_NOP, ®s->command); + /* set DMA controller going if any data to transfer */ + if ((stat & (STAT_MSG|STAT_CD)) == 0 + && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) { + nb = mcmd->this_residual; + if (nb > 0xfff0) + nb = 0xfff0; + mcmd->this_residual -= nb; + writeb(nb, ®s->count_lo); + writeb(nb >> 8, ®s->count_mid); + writeb(CMD_DMA_MODE + CMD_NOP, ®s->command); + writel(virt_to_phys(state->dma_cmds), &dma->cmdptr); + writel((RUN << 16) | RUN, &dma->control); + writeb(CMD_DMA_MODE + CMD_XFER_DATA, ®s->command); + state->phase = dataing; + break; + } else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) { + /* up to status phase already */ + writeb(CMD_I_COMPLETE, ®s->command); + state->phase = completing; + } else { + printk(KERN_DEBUG "in unexpected phase %x after cmd\n", + stat & STAT_PHASE); + cmd_done(state, DID_ERROR << 16); + return; + } + break; + + case dataing: + if (intr != INTR_BUS_SERV) { + printk(KERN_DEBUG "got intr %x before status\n", intr); + cmd_done(state, DID_ERROR << 16); + return; + } + if (mcmd->this_residual != 0 + && (stat & (STAT_MSG|STAT_CD)) == 0) { + /* Set up the count regs to transfer more */ + nb = mcmd->this_residual; + if (nb > 0xfff0) + nb = 0xfff0; + mcmd->this_residual -= nb; + writeb(nb, ®s->count_lo); + writeb(nb >> 8, ®s->count_mid); + writeb(CMD_DMA_MODE + CMD_NOP, ®s->command); + writeb(CMD_DMA_MODE + CMD_XFER_DATA, ®s->command); + break; + } + if ((stat & STAT_PHASE) != STAT_CD + STAT_IO) { + printk(KERN_DEBUG "intr %x before data xfer complete\n", intr); + } + writel(RUN << 16, &dma->control); /* stop dma */ + scsi_dma_unmap(cmd); + /* should check dma status */ + writeb(CMD_I_COMPLETE, ®s->command); + state->phase = completing; + break; + case completing: + if (intr != INTR_DONE) { + printk(KERN_DEBUG "got intr %x on completion\n", intr); + cmd_done(state, DID_ERROR << 16); + return; + } + mcmd->status = readb(®s->fifo); + mcmd->message = readb(®s->fifo); + writeb(CMD_ACCEPT_MSG, ®s->command); + state->phase = busfreeing; + break; + case busfreeing: + if (intr != INTR_DISCONNECT) { + printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr); + } + cmd_done(state, (DID_OK << 16) + (mcmd->message << 8) + mcmd->status); + break; + default: + printk(KERN_DEBUG "don't know about phase %d\n", state->phase); + } +} + +static void cmd_done(struct fsc_state *state, int result) +{ + struct scsi_cmnd *cmd; + + cmd = state->current_req; + if (cmd) { + cmd->result = result; + scsi_done(cmd); + state->current_req = NULL; + } + state->phase = idle; + mac53c94_start(state); +} + +/* + * Set up DMA commands for transferring data. + */ +static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) +{ + int i, dma_cmd, total, nseg; + struct scatterlist *scl; + struct dbdma_cmd *dcmds; + dma_addr_t dma_addr; + u32 dma_len; + + nseg = scsi_dma_map(cmd); + BUG_ON(nseg < 0); + if (!nseg) + return; + + dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ? + OUTPUT_MORE : INPUT_MORE; + dcmds = state->dma_cmds; + total = 0; + + scsi_for_each_sg(cmd, scl, nseg, i) { + dma_addr = sg_dma_address(scl); + dma_len = sg_dma_len(scl); + if (dma_len > 0xffff) + panic("mac53c94: scatterlist element >= 64k"); + total += dma_len; + dcmds->req_count = cpu_to_le16(dma_len); + dcmds->command = cpu_to_le16(dma_cmd); + dcmds->phy_addr = cpu_to_le32(dma_addr); + dcmds->xfer_status = 0; + ++dcmds; + } + + dma_cmd += OUTPUT_LAST - OUTPUT_MORE; + dcmds[-1].command = cpu_to_le16(dma_cmd); + dcmds->command = cpu_to_le16(DBDMA_STOP); + mac53c94_priv(cmd)->this_residual = total; +} + +static const struct scsi_host_template mac53c94_template = { + .proc_name = "53c94", + .name = "53C94", + .queuecommand = mac53c94_queue, + .eh_host_reset_handler = mac53c94_host_reset, + .can_queue = 1, + .this_id = 7, + .sg_tablesize = SG_ALL, + .max_segment_size = 65535, + .cmd_size = sizeof(struct mac53c94_cmd_priv), +}; + +static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) +{ + struct device_node *node = macio_get_of_node(mdev); + struct pci_dev *pdev = macio_get_pci_dev(mdev); + struct fsc_state *state; + struct Scsi_Host *host; + void *dma_cmd_space; + const unsigned char *clkprop; + int proplen, rc = -ENODEV; + + if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { + printk(KERN_ERR "mac53c94: expected 2 addrs and intrs" + " (got %d/%d)\n", + macio_resource_count(mdev), macio_irq_count(mdev)); + return -ENODEV; + } + + if (macio_request_resources(mdev, "mac53c94") != 0) { + printk(KERN_ERR "mac53c94: unable to request memory resources"); + return -EBUSY; + } + + host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state)); + if (host == NULL) { + printk(KERN_ERR "mac53c94: couldn't register host"); + rc = -ENOMEM; + goto out_release; + } + + state = (struct fsc_state *) host->hostdata; + macio_set_drvdata(mdev, state); + state->host = host; + state->pdev = pdev; + state->mdev = mdev; + + state->regs = (struct mac53c94_regs __iomem *) + ioremap(macio_resource_start(mdev, 0), 0x1000); + state->intr = macio_irq(mdev, 0); + state->dma = (struct dbdma_regs __iomem *) + ioremap(macio_resource_start(mdev, 1), 0x1000); + state->dmaintr = macio_irq(mdev, 1); + if (state->regs == NULL || state->dma == NULL) { + printk(KERN_ERR "mac53c94: ioremap failed for %pOF\n", node); + goto out_free; + } + + clkprop = of_get_property(node, "clock-frequency", &proplen); + if (clkprop == NULL || proplen != sizeof(int)) { + printk(KERN_ERR "%pOF: can't get clock frequency, " + "assuming 25MHz\n", node); + state->clk_freq = 25000000; + } else + state->clk_freq = *(int *)clkprop; + + /* Space for dma command list: +1 for stop command, + * +1 to allow for aligning. + * XXX FIXME: Use DMA consistent routines + */ + dma_cmd_space = kmalloc_array(host->sg_tablesize + 2, + sizeof(struct dbdma_cmd), + GFP_KERNEL); + if (!dma_cmd_space) { + printk(KERN_ERR "mac53c94: couldn't allocate dma " + "command space for %pOF\n", node); + rc = -ENOMEM; + goto out_free; + } + + state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space); + memset(state->dma_cmds, 0, (host->sg_tablesize + 1) + * sizeof(struct dbdma_cmd)); + state->dma_cmd_space = dma_cmd_space; + + mac53c94_init(state); + + if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) { + printk(KERN_ERR "mac53C94: can't get irq %d for %pOF\n", + state->intr, node); + goto out_free_dma; + } + + rc = scsi_add_host(host, &mdev->ofdev.dev); + if (rc != 0) + goto out_release_irq; + + scsi_scan_host(host); + return 0; + + out_release_irq: + free_irq(state->intr, state); + out_free_dma: + kfree(state->dma_cmd_space); + out_free: + if (state->dma != NULL) + iounmap(state->dma); + if (state->regs != NULL) + iounmap(state->regs); + scsi_host_put(host); + out_release: + macio_release_resources(mdev); + + return rc; +} + +static int mac53c94_remove(struct macio_dev *mdev) +{ + struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev); + struct Scsi_Host *host = fp->host; + + scsi_remove_host(host); + + free_irq(fp->intr, fp); + + if (fp->regs) + iounmap(fp->regs); + if (fp->dma) + iounmap(fp->dma); + kfree(fp->dma_cmd_space); + + scsi_host_put(host); + + macio_release_resources(mdev); + + return 0; +} + + +static struct of_device_id mac53c94_match[] = +{ + { + .name = "53c94", + }, + {}, +}; +MODULE_DEVICE_TABLE (of, mac53c94_match); + +static struct macio_driver mac53c94_driver = +{ + .driver = { + .name = "mac53c94", + .owner = THIS_MODULE, + .of_match_table = mac53c94_match, + }, + .probe = mac53c94_probe, + .remove = mac53c94_remove, +}; + + +static int __init init_mac53c94(void) +{ + return macio_register_driver(&mac53c94_driver); +} + +static void __exit exit_mac53c94(void) +{ + return macio_unregister_driver(&mac53c94_driver); +} + +module_init(init_mac53c94); +module_exit(exit_mac53c94); + +MODULE_DESCRIPTION("PowerMac 53c94 SCSI driver"); +MODULE_AUTHOR("Paul Mackerras "); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/mac53c94.h b/drivers/scsi/mac53c94.h new file mode 100644 index 000000000..b4093027f --- /dev/null +++ b/drivers/scsi/mac53c94.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mac53c94.h: definitions for the driver for the 53c94 SCSI bus adaptor + * found on Power Macintosh computers, controlling the external SCSI chain. + * + * Copyright (C) 1996 Paul Mackerras. + */ +#ifndef _MAC53C94_H +#define _MAC53C94_H + +/* + * Registers in the 53C94 controller. + */ + +struct mac53c94_regs { + unsigned char count_lo; + char pad0[15]; + unsigned char count_mid; + char pad1[15]; + unsigned char fifo; + char pad2[15]; + unsigned char command; + char pad3[15]; + unsigned char status; + char pad4[15]; + unsigned char interrupt; + char pad5[15]; + unsigned char seqstep; + char pad6[15]; + unsigned char flags; + char pad7[15]; + unsigned char config1; + char pad8[15]; + unsigned char clk_factor; + char pad9[15]; + unsigned char test; + char pad10[15]; + unsigned char config2; + char pad11[15]; + unsigned char config3; + char pad12[15]; + unsigned char config4; + char pad13[15]; + unsigned char count_hi; + char pad14[15]; + unsigned char fifo_res; + char pad15[15]; +}; + +/* + * Alternate functions for some registers. + */ +#define dest_id status +#define sel_timeout interrupt +#define sync_period seqstep +#define sync_offset flags + +/* + * Bits in command register. + */ +#define CMD_DMA_MODE 0x80 +#define CMD_MODE_MASK 0x70 +#define CMD_MODE_INIT 0x10 +#define CMD_MODE_TARG 0x20 +#define CMD_MODE_DISC 0x40 + +#define CMD_NOP 0 +#define CMD_FLUSH 1 +#define CMD_RESET 2 +#define CMD_SCSI_RESET 3 + +#define CMD_XFER_DATA 0x10 +#define CMD_I_COMPLETE 0x11 +#define CMD_ACCEPT_MSG 0x12 +#define CMD_XFER_PAD 0x18 +#define CMD_SET_ATN 0x1a +#define CMD_CLR_ATN 0x1b + +#define CMD_SEND_MSG 0x20 +#define CMD_SEND_STATUS 0x21 +#define CMD_SEND_DATA 0x22 +#define CMD_DISC_SEQ 0x23 +#define CMD_TERMINATE 0x24 +#define CMD_T_COMPLETE 0x25 +#define CMD_DISCONNECT 0x27 +#define CMD_RECV_MSG 0x28 +#define CMD_RECV_CDB 0x29 +#define CMD_RECV_DATA 0x2a +#define CMD_RECV_CMD 0x2b +#define CMD_ABORT_DMA 0x04 + +#define CMD_RESELECT 0x40 +#define CMD_SELECT 0x41 +#define CMD_SELECT_ATN 0x42 +#define CMD_SELATN_STOP 0x43 +#define CMD_ENABLE_SEL 0x44 +#define CMD_DISABLE_SEL 0x45 +#define CMD_SEL_ATN3 0x46 +#define CMD_RESEL_ATN3 0x47 + +/* + * Bits in status register. + */ +#define STAT_IRQ 0x80 +#define STAT_ERROR 0x40 +#define STAT_PARITY 0x20 +#define STAT_TC_ZERO 0x10 +#define STAT_DONE 0x08 +#define STAT_PHASE 0x07 +#define STAT_MSG 0x04 +#define STAT_CD 0x02 +#define STAT_IO 0x01 + +/* + * Bits in interrupt register. + */ +#define INTR_RESET 0x80 /* SCSI bus was reset */ +#define INTR_ILL_CMD 0x40 /* illegal command */ +#define INTR_DISCONNECT 0x20 /* we got disconnected */ +#define INTR_BUS_SERV 0x10 /* bus service requested */ +#define INTR_DONE 0x08 /* function completed */ +#define INTR_RESELECTED 0x04 /* we were reselected */ +#define INTR_SEL_ATN 0x02 /* we were selected, ATN asserted */ +#define INTR_SELECT 0x01 /* we were selected, ATN negated */ + +/* + * Encoding for the select timeout. + */ +#define TIMO_VAL(x) ((x) * 5000 / 7682) + +/* + * Bits in sequence step register. + */ +#define SS_MASK 7 +#define SS_ARB_SEL 0 /* Selection & arbitration complete */ +#define SS_MSG_SENT 1 /* One message byte sent */ +#define SS_NOT_CMD 2 /* Not in command phase */ +#define SS_PHASE_CHG 3 /* Early phase change, cmd bytes lost */ +#define SS_DONE 4 /* Command was sent OK */ + +/* + * Encoding for sync transfer period. + */ +#define SYNCP_MASK 0x1f +#define SYNCP_MIN 4 +#define SYNCP_MAX 31 + +/* + * Bits in flags register. + */ +#define FLAGS_FIFO_LEV 0x1f +#define FLAGS_SEQ_STEP 0xe0 + +/* + * Encoding for sync offset. + */ +#define SYNCO_MASK 0x0f +#define SYNCO_ASS_CTRL 0x30 /* REQ/ACK assertion control */ +#define SYNCO_NEG_CTRL 0xc0 /* REQ/ACK negation control */ + +/* + * Bits in config1 register. + */ +#define CF1_SLOW_CABLE 0x80 /* Slow cable mode */ +#define CF1_NO_RES_REP 0x40 /* Disable SCSI reset reports */ +#define CF1_PAR_TEST 0x20 /* Parity test mode enable */ +#define CF1_PAR_ENABLE 0x10 /* Enable parity checks */ +#define CF1_TEST 0x08 /* Chip tests */ +#define CF1_MY_ID 0x07 /* Controller's address on bus */ + +/* + * Encoding for clk_factor register. + */ +#define CLKF_MASK 7 +#define CLKF_VAL(freq) ((((freq) + 4999999) / 5000000) & CLKF_MASK) + +/* + * Bits in test mode register. + */ +#define TEST_TARGET 1 /* target test mode */ +#define TEST_INITIATOR 2 /* initiator test mode */ +#define TEST_TRISTATE 4 /* tristate (hi-z) test mode */ + +/* + * Bits in config2 register. + */ +#define CF2_RFB 0x80 +#define CF2_FEATURE_EN 0x40 /* enable features / phase latch */ +#define CF2_BYTECTRL 0x20 +#define CF2_DREQ_HIZ 0x10 +#define CF2_SCSI2 0x08 +#define CF2_PAR_ABORT 0x04 /* bad parity target abort */ +#define CF2_REG_PARERR 0x02 /* register parity error */ +#define CF2_DMA_PARERR 0x01 /* DMA parity error */ + +/* + * Bits in the config3 register. + */ +#define CF3_ID_MSG_CHK 0x80 +#define CF3_3B_MSGS 0x40 +#define CF3_CDB10 0x20 +#define CF3_FASTSCSI 0x10 /* enable fast SCSI support */ +#define CF3_FASTCLOCK 0x08 +#define CF3_SAVERESID 0x04 +#define CF3_ALT_DMA 0x02 +#define CF3_THRESH_8 0x01 + +/* + * Bits in the config4 register. + */ +#define CF4_EAN 0x04 +#define CF4_TEST 0x02 +#define CF4_BBTE 0x01 + +struct mac53c94_cmd_priv { + int this_residual; + int status; + int message; +}; + +static inline struct mac53c94_cmd_priv *mac53c94_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +#endif /* _MAC53C94_H */ diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c new file mode 100644 index 000000000..3f0061b00 --- /dev/null +++ b/drivers/scsi/mac_esp.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* mac_esp.c: ESP front-end for Macintosh Quadra systems. + * + * Adapted from jazz_esp.c and the old mac_esp.c. + * + * The pseudo DMA algorithm is based on the one used in NetBSD. + * See sys/arch/mac68k/obio/esp.c for some background information. + * + * Copyright (C) 2007-2008 Finn Thain + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "esp_scsi.h" + +#define DRV_MODULE_NAME "mac_esp" +#define PFX DRV_MODULE_NAME ": " +#define DRV_VERSION "1.000" +#define DRV_MODULE_RELDATE "Sept 15, 2007" + +#define MAC_ESP_IO_BASE 0x50F00000 +#define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000) +#define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000) +#define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000) +#define MAC_ESP_REGS_SPACING 0x402 +#define MAC_ESP_PDMA_REG 0xF9800024 +#define MAC_ESP_PDMA_REG_SPACING 0x4 +#define MAC_ESP_PDMA_IO_OFFSET 0x100 + +#define esp_read8(REG) mac_esp_read8(esp, REG) +#define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG) + +struct mac_esp_priv { + struct esp *esp; + void __iomem *pdma_regs; + void __iomem *pdma_io; +}; +static struct esp *esp_chips[2]; +static DEFINE_SPINLOCK(esp_chips_lock); + +#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ + dev_get_drvdata((esp)->dev)) + +static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg) +{ + nubus_writeb(val, esp->regs + reg * 16); +} + +static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg) +{ + return nubus_readb(esp->regs + reg * 16); +} + +static void mac_esp_reset_dma(struct esp *esp) +{ + /* Nothing to do. */ +} + +static void mac_esp_dma_drain(struct esp *esp) +{ + /* Nothing to do. */ +} + +static void mac_esp_dma_invalidate(struct esp *esp) +{ + /* Nothing to do. */ +} + +static int mac_esp_dma_error(struct esp *esp) +{ + return esp->send_cmd_error; +} + +static inline int mac_esp_wait_for_empty_fifo(struct esp *esp) +{ + int i = 500000; + + do { + if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)) + return 0; + + if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) + return 1; + + udelay(2); + } while (--i); + + printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n", + esp_read8(ESP_STATUS)); + esp->send_cmd_error = 1; + return 1; +} + +static inline int mac_esp_wait_for_dreq(struct esp *esp) +{ + struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); + int i = 500000; + + do { + if (mep->pdma_regs == NULL) { + if (via2_scsi_drq_pending()) + return 0; + } else { + if (nubus_readl(mep->pdma_regs) & 0x200) + return 0; + } + + if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) + return 1; + + udelay(2); + } while (--i); + + printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n", + esp_read8(ESP_STATUS)); + esp->send_cmd_error = 1; + return 1; +} + +#define MAC_ESP_PDMA_LOOP(operands) \ + asm volatile ( \ + " tstw %1 \n" \ + " jbeq 20f \n" \ + "1: movew " operands " \n" \ + "2: movew " operands " \n" \ + "3: movew " operands " \n" \ + "4: movew " operands " \n" \ + "5: movew " operands " \n" \ + "6: movew " operands " \n" \ + "7: movew " operands " \n" \ + "8: movew " operands " \n" \ + "9: movew " operands " \n" \ + "10: movew " operands " \n" \ + "11: movew " operands " \n" \ + "12: movew " operands " \n" \ + "13: movew " operands " \n" \ + "14: movew " operands " \n" \ + "15: movew " operands " \n" \ + "16: movew " operands " \n" \ + " subqw #1,%1 \n" \ + " jbne 1b \n" \ + "20: tstw %2 \n" \ + " jbeq 30f \n" \ + "21: movew " operands " \n" \ + " subqw #1,%2 \n" \ + " jbne 21b \n" \ + "30: tstw %3 \n" \ + " jbeq 40f \n" \ + "31: moveb " operands " \n" \ + "32: nop \n" \ + "40: \n" \ + " \n" \ + " .section __ex_table,\"a\" \n" \ + " .align 4 \n" \ + " .long 1b,40b \n" \ + " .long 2b,40b \n" \ + " .long 3b,40b \n" \ + " .long 4b,40b \n" \ + " .long 5b,40b \n" \ + " .long 6b,40b \n" \ + " .long 7b,40b \n" \ + " .long 8b,40b \n" \ + " .long 9b,40b \n" \ + " .long 10b,40b \n" \ + " .long 11b,40b \n" \ + " .long 12b,40b \n" \ + " .long 13b,40b \n" \ + " .long 14b,40b \n" \ + " .long 15b,40b \n" \ + " .long 16b,40b \n" \ + " .long 21b,40b \n" \ + " .long 31b,40b \n" \ + " .long 32b,40b \n" \ + " .previous \n" \ + : "+a" (addr), "+r" (count32), "+r" (count2) \ + : "g" (count1), "a" (mep->pdma_io)) + +static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, + u32 dma_count, int write, u8 cmd) +{ + struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); + + esp->send_cmd_error = 0; + + if (!write) + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + + esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW); + esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED); + + scsi_esp_cmd(esp, cmd); + + do { + unsigned int count32 = esp_count >> 5; + unsigned int count2 = (esp_count & 0x1F) >> 1; + unsigned int count1 = esp_count & 1; + unsigned int start_addr = addr; + + if (mac_esp_wait_for_dreq(esp)) + break; + + if (write) { + MAC_ESP_PDMA_LOOP("%4@,%0@+"); + + esp_count -= addr - start_addr; + } else { + unsigned int n; + + MAC_ESP_PDMA_LOOP("%0@+,%4@"); + + if (mac_esp_wait_for_empty_fifo(esp)) + break; + + n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW); + addr = start_addr + esp_count - n; + esp_count = n; + } + } while (esp_count); +} + +static int mac_esp_irq_pending(struct esp *esp) +{ + if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) + return 1; + return 0; +} + +static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) +{ + return dma_len > 0xFFFF ? 0xFFFF : dma_len; +} + +static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id) +{ + int got_intr; + + /* + * This is an edge triggered IRQ, so we have to be careful to + * avoid missing a transition when it is shared by two ESP devices. + */ + + do { + got_intr = 0; + if (esp_chips[0] && + (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) { + (void)scsi_esp_intr(irq, esp_chips[0]); + got_intr = 1; + } + if (esp_chips[1] && + (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) { + (void)scsi_esp_intr(irq, esp_chips[1]); + got_intr = 1; + } + } while (got_intr); + + return IRQ_HANDLED; +} + +static struct esp_driver_ops mac_esp_ops = { + .esp_write8 = mac_esp_write8, + .esp_read8 = mac_esp_read8, + .irq_pending = mac_esp_irq_pending, + .dma_length_limit = mac_esp_dma_length_limit, + .reset_dma = mac_esp_reset_dma, + .dma_drain = mac_esp_dma_drain, + .dma_invalidate = mac_esp_dma_invalidate, + .send_dma_cmd = mac_esp_send_pdma_cmd, + .dma_error = mac_esp_dma_error, +}; + +static int esp_mac_probe(struct platform_device *dev) +{ + const struct scsi_host_template *tpnt = &scsi_esp_template; + struct Scsi_Host *host; + struct esp *esp; + int err; + struct mac_esp_priv *mep; + + if (!MACH_IS_MAC) + return -ENODEV; + + if (dev->id > 1) + return -ENODEV; + + host = scsi_host_alloc(tpnt, sizeof(struct esp)); + + err = -ENOMEM; + if (!host) + goto fail; + + host->max_id = 8; + host->dma_boundary = PAGE_SIZE - 1; + esp = shost_priv(host); + + esp->host = host; + esp->dev = &dev->dev; + + esp->command_block = kzalloc(16, GFP_KERNEL); + if (!esp->command_block) + goto fail_unlink; + esp->command_block_dma = (dma_addr_t)esp->command_block; + + esp->scsi_id = 7; + host->this_id = esp->scsi_id; + esp->scsi_id_mask = 1 << esp->scsi_id; + + mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL); + if (!mep) + goto fail_free_command_block; + mep->esp = esp; + platform_set_drvdata(dev, mep); + + switch (macintosh_config->scsi_type) { + case MAC_SCSI_QUADRA: + esp->cfreq = 16500000; + esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA; + mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; + mep->pdma_regs = NULL; + break; + case MAC_SCSI_QUADRA2: + esp->cfreq = 25000000; + esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 + + dev->id * MAC_ESP_REGS_SPACING); + mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; + mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG + + dev->id * MAC_ESP_PDMA_REG_SPACING); + nubus_writel(0x1d1, mep->pdma_regs); + break; + case MAC_SCSI_QUADRA3: + /* These quadras have a real DMA controller (the PSC) but we + * don't know how to drive it so we must use PIO instead. + */ + esp->cfreq = 25000000; + esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3; + mep->pdma_io = NULL; + mep->pdma_regs = NULL; + break; + } + esp->fifo_reg = esp->regs + ESP_FDATA * 16; + + esp->ops = &mac_esp_ops; + esp->flags = ESP_FLAG_NO_DMA_MAP; + if (mep->pdma_io == NULL) { + printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id); + esp_write8(0, ESP_TCLOW); + esp_write8(0, ESP_TCMED); + esp->flags |= ESP_FLAG_DISABLE_SYNC; + mac_esp_ops.send_dma_cmd = esp_send_pio_cmd; + } else { + printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id); + } + + host->irq = IRQ_MAC_SCSI; + + /* The request_irq() call is intended to succeed for the first device + * and fail for the second device. + */ + err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); + spin_lock(&esp_chips_lock); + if (err < 0 && esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); + goto fail_free_priv; + } + esp_chips[dev->id] = esp; + spin_unlock(&esp_chips_lock); + + err = scsi_esp_register(esp); + if (err) + goto fail_free_irq; + + return 0; + +fail_free_irq: + spin_lock(&esp_chips_lock); + esp_chips[dev->id] = NULL; + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); + free_irq(host->irq, NULL); + } else + spin_unlock(&esp_chips_lock); +fail_free_priv: + kfree(mep); +fail_free_command_block: + kfree(esp->command_block); +fail_unlink: + scsi_host_put(host); +fail: + return err; +} + +static int esp_mac_remove(struct platform_device *dev) +{ + struct mac_esp_priv *mep = platform_get_drvdata(dev); + struct esp *esp = mep->esp; + unsigned int irq = esp->host->irq; + + scsi_esp_unregister(esp); + + spin_lock(&esp_chips_lock); + esp_chips[dev->id] = NULL; + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); + free_irq(irq, NULL); + } else + spin_unlock(&esp_chips_lock); + + kfree(mep); + + kfree(esp->command_block); + + scsi_host_put(esp->host); + + return 0; +} + +static struct platform_driver esp_mac_driver = { + .probe = esp_mac_probe, + .remove = esp_mac_remove, + .driver = { + .name = DRV_MODULE_NAME, + }, +}; +module_platform_driver(esp_mac_driver); + +MODULE_DESCRIPTION("Mac ESP SCSI driver"); +MODULE_AUTHOR("Finn Thain"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:" DRV_MODULE_NAME); diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c new file mode 100644 index 000000000..2e511697f --- /dev/null +++ b/drivers/scsi/mac_scsi.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic Macintosh NCR5380 driver + * + * Copyright 1998, Michael Schmitz + * + * Copyright 2019 Finn Thain + * + * derived in part from: + */ +/* + * Generic Generic NCR5380 driver + * + * Copyright 1995, Russell King + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +/* Definitions for the core NCR5380 driver. */ + +#define NCR5380_implementation_fields int pdma_residual + +#define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4)) +#define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value) + +#define NCR5380_dma_xfer_len macscsi_dma_xfer_len +#define NCR5380_dma_recv_setup macscsi_pread +#define NCR5380_dma_send_setup macscsi_pwrite +#define NCR5380_dma_residual macscsi_dma_residual + +#define NCR5380_intr macscsi_intr +#define NCR5380_queue_command macscsi_queue_command +#define NCR5380_abort macscsi_abort +#define NCR5380_host_reset macscsi_host_reset +#define NCR5380_info macscsi_info + +#include "NCR5380.h" + +static int setup_can_queue = -1; +module_param(setup_can_queue, int, 0); +static int setup_cmd_per_lun = -1; +module_param(setup_cmd_per_lun, int, 0); +static int setup_sg_tablesize = -1; +module_param(setup_sg_tablesize, int, 0); +static int setup_use_pdma = 512; +module_param(setup_use_pdma, int, 0); +static int setup_hostid = -1; +module_param(setup_hostid, int, 0); +static int setup_toshiba_delay = -1; +module_param(setup_toshiba_delay, int, 0); + +#ifndef MODULE +static int __init mac_scsi_setup(char *str) +{ + int ints[8]; + + (void)get_options(str, ARRAY_SIZE(ints), ints); + + if (ints[0] < 1) { + pr_err("Usage: mac5380=[,[,[,[,[,[,]]]]]]\n"); + return 0; + } + if (ints[0] >= 1) + setup_can_queue = ints[1]; + if (ints[0] >= 2) + setup_cmd_per_lun = ints[2]; + if (ints[0] >= 3) + setup_sg_tablesize = ints[3]; + if (ints[0] >= 4) + setup_hostid = ints[4]; + /* ints[5] (use_tagged_queuing) is ignored */ + if (ints[0] >= 6) + setup_use_pdma = ints[6]; + if (ints[0] >= 7) + setup_toshiba_delay = ints[7]; + return 1; +} + +__setup("mac5380=", mac_scsi_setup); +#endif /* !MODULE */ + +/* + * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to + * specify the number of bytes between the delays expected from a SCSI target. + * This allows the operating system to "prevent bus errors when a target fails + * to deliver the next byte within the processor bus error timeout period." + * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets + * so bus errors are unavoidable. + * + * If a MOVE.B instruction faults, we assume that zero bytes were transferred + * and simply retry. That assumption probably depends on target behaviour but + * seems to hold up okay. The NOP provides synchronization: without it the + * fault can sometimes occur after the program counter has moved past the + * offending instruction. Post-increment addressing can't be used. + */ + +#define MOVE_BYTE(operands) \ + asm volatile ( \ + "1: moveb " operands " \n" \ + "11: nop \n" \ + " addq #1,%0 \n" \ + " subq #1,%1 \n" \ + "40: \n" \ + " \n" \ + ".section .fixup,\"ax\" \n" \ + ".even \n" \ + "90: movel #1, %2 \n" \ + " jra 40b \n" \ + ".previous \n" \ + " \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 4 \n" \ + ".long 1b,90b \n" \ + ".long 11b,90b \n" \ + ".previous \n" \ + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) + +/* + * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because + * the residual byte count would be uncertain. In that situation the MOVE_WORD + * macro clears n in the fixup section to abort the transfer. + */ + +#define MOVE_WORD(operands) \ + asm volatile ( \ + "1: movew " operands " \n" \ + "11: nop \n" \ + " subq #2,%1 \n" \ + "40: \n" \ + " \n" \ + ".section .fixup,\"ax\" \n" \ + ".even \n" \ + "90: movel #0, %1 \n" \ + " movel #2, %2 \n" \ + " jra 40b \n" \ + ".previous \n" \ + " \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 4 \n" \ + ".long 1b,90b \n" \ + ".long 11b,90b \n" \ + ".previous \n" \ + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) + +#define MOVE_16_WORDS(operands) \ + asm volatile ( \ + "1: movew " operands " \n" \ + "2: movew " operands " \n" \ + "3: movew " operands " \n" \ + "4: movew " operands " \n" \ + "5: movew " operands " \n" \ + "6: movew " operands " \n" \ + "7: movew " operands " \n" \ + "8: movew " operands " \n" \ + "9: movew " operands " \n" \ + "10: movew " operands " \n" \ + "11: movew " operands " \n" \ + "12: movew " operands " \n" \ + "13: movew " operands " \n" \ + "14: movew " operands " \n" \ + "15: movew " operands " \n" \ + "16: movew " operands " \n" \ + "17: nop \n" \ + " subl #32,%1 \n" \ + "40: \n" \ + " \n" \ + ".section .fixup,\"ax\" \n" \ + ".even \n" \ + "90: movel #0, %1 \n" \ + " movel #2, %2 \n" \ + " jra 40b \n" \ + ".previous \n" \ + " \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 4 \n" \ + ".long 1b,90b \n" \ + ".long 2b,90b \n" \ + ".long 3b,90b \n" \ + ".long 4b,90b \n" \ + ".long 5b,90b \n" \ + ".long 6b,90b \n" \ + ".long 7b,90b \n" \ + ".long 8b,90b \n" \ + ".long 9b,90b \n" \ + ".long 10b,90b \n" \ + ".long 11b,90b \n" \ + ".long 12b,90b \n" \ + ".long 13b,90b \n" \ + ".long 14b,90b \n" \ + ".long 15b,90b \n" \ + ".long 16b,90b \n" \ + ".long 17b,90b \n" \ + ".previous \n" \ + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) + +#define MAC_PDMA_DELAY 32 + +static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) +{ + unsigned char *addr = start; + int result = 0; + + if (n >= 1) { + MOVE_BYTE("%3@,%0@"); + if (result) + goto out; + } + if (n >= 1 && ((unsigned long)addr & 1)) { + MOVE_BYTE("%3@,%0@"); + if (result) + goto out; + } + while (n >= 32) + MOVE_16_WORDS("%3@,%0@+"); + while (n >= 2) + MOVE_WORD("%3@,%0@+"); + if (result) + return start - addr; /* Negated to indicate uncertain length */ + if (n == 1) + MOVE_BYTE("%3@,%0@"); +out: + return addr - start; +} + +static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) +{ + unsigned char *addr = start; + int result = 0; + + if (n >= 1) { + MOVE_BYTE("%0@,%3@"); + if (result) + goto out; + } + if (n >= 1 && ((unsigned long)addr & 1)) { + MOVE_BYTE("%0@,%3@"); + if (result) + goto out; + } + while (n >= 32) + MOVE_16_WORDS("%0@+,%3@"); + while (n >= 2) + MOVE_WORD("%0@+,%3@"); + if (result) + return start - addr; /* Negated to indicate uncertain length */ + if (n == 1) + MOVE_BYTE("%0@,%3@"); +out: + return addr - start; +} + +/* The "SCSI DMA" chip on the IIfx implements this register. */ +#define CTRL_REG 0x8 +#define CTRL_INTERRUPTS_ENABLE BIT(1) +#define CTRL_HANDSHAKE_MODE BIT(3) + +static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value) +{ + out_be32(hostdata->io + (CTRL_REG << 4), value); +} + +static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, + unsigned char *dst, int len) +{ + u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); + unsigned char *d = dst; + int result = 0; + + hostdata->pdma_residual = len; + + while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_DRQ | BASR_PHASE_MATCH, + BASR_DRQ | BASR_PHASE_MATCH, 0)) { + int bytes; + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | + CTRL_INTERRUPTS_ENABLE); + + bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); + + if (bytes > 0) { + d += bytes; + hostdata->pdma_residual -= bytes; + } + + if (hostdata->pdma_residual == 0) + goto out; + + if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, + BUS_AND_STATUS_REG, BASR_ACK, + BASR_ACK, 0) < 0) + scmd_printk(KERN_DEBUG, hostdata->connected, + "%s: !REQ and !ACK\n", __func__); + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) + goto out; + + if (bytes == 0) + udelay(MAC_PDMA_DELAY); + + if (bytes >= 0) + continue; + + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error (%d/%d)\n", __func__, d - dst, len); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + result = -1; + goto out; + } + + scmd_printk(KERN_ERR, hostdata->connected, + "%s: phase mismatch or !DRQ\n", __func__); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + result = -1; +out: + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); + return result; +} + +static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, + unsigned char *src, int len) +{ + unsigned char *s = src; + u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); + int result = 0; + + hostdata->pdma_residual = len; + + while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_DRQ | BASR_PHASE_MATCH, + BASR_DRQ | BASR_PHASE_MATCH, 0)) { + int bytes; + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | + CTRL_INTERRUPTS_ENABLE); + + bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); + + if (bytes > 0) { + s += bytes; + hostdata->pdma_residual -= bytes; + } + + if (hostdata->pdma_residual == 0) { + if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, + TCR_LAST_BYTE_SENT, + TCR_LAST_BYTE_SENT, + 0) < 0) { + scmd_printk(KERN_ERR, hostdata->connected, + "%s: Last Byte Sent timeout\n", __func__); + result = -1; + } + goto out; + } + + if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, + BUS_AND_STATUS_REG, BASR_ACK, + BASR_ACK, 0) < 0) + scmd_printk(KERN_DEBUG, hostdata->connected, + "%s: !REQ and !ACK\n", __func__); + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) + goto out; + + if (bytes == 0) + udelay(MAC_PDMA_DELAY); + + if (bytes >= 0) + continue; + + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error (%d/%d)\n", __func__, s - src, len); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + result = -1; + goto out; + } + + scmd_printk(KERN_ERR, hostdata->connected, + "%s: phase mismatch or !DRQ\n", __func__); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + result = -1; +out: + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); + return result; +} + +static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + int resid = NCR5380_to_ncmd(cmd)->this_residual; + + if (hostdata->flags & FLAG_NO_PSEUDO_DMA || resid < setup_use_pdma) + return 0; + + return resid; +} + +static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata) +{ + return hostdata->pdma_residual; +} + +#include "NCR5380.c" + +#define DRV_MODULE_NAME "mac_scsi" +#define PFX DRV_MODULE_NAME ": " + +static struct scsi_host_template mac_scsi_template = { + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, + .name = "Macintosh NCR5380 SCSI", + .info = macscsi_info, + .queuecommand = macscsi_queue_command, + .eh_abort_handler = macscsi_abort, + .eh_host_reset_handler = macscsi_host_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = 1, + .cmd_per_lun = 2, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct NCR5380_cmd), + .max_sectors = 128, +}; + +static int __init mac_scsi_probe(struct platform_device *pdev) +{ + struct Scsi_Host *instance; + struct NCR5380_hostdata *hostdata; + int error; + int host_flags = 0; + struct resource *irq, *pio_mem, *pdma_mem = NULL; + + pio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pio_mem) + return -ENODEV; + + pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + + if (!hwreg_present((unsigned char *)pio_mem->start + + (STATUS_REG << 4))) { + pr_info(PFX "no device detected at %pap\n", &pio_mem->start); + return -ENODEV; + } + + if (setup_can_queue > 0) + mac_scsi_template.can_queue = setup_can_queue; + if (setup_cmd_per_lun > 0) + mac_scsi_template.cmd_per_lun = setup_cmd_per_lun; + if (setup_sg_tablesize > 0) + mac_scsi_template.sg_tablesize = setup_sg_tablesize; + if (setup_hostid >= 0) + mac_scsi_template.this_id = setup_hostid & 7; + + instance = scsi_host_alloc(&mac_scsi_template, + sizeof(struct NCR5380_hostdata)); + if (!instance) + return -ENOMEM; + + if (irq) + instance->irq = irq->start; + else + instance->irq = NO_IRQ; + + hostdata = shost_priv(instance); + hostdata->base = pio_mem->start; + hostdata->io = (u8 __iomem *)pio_mem->start; + + if (pdma_mem && setup_use_pdma) + hostdata->pdma_io = (u8 __iomem *)pdma_mem->start; + else + host_flags |= FLAG_NO_PSEUDO_DMA; + + host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; + + error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); + if (error) + goto fail_init; + + if (instance->irq != NO_IRQ) { + error = request_irq(instance->irq, macscsi_intr, IRQF_SHARED, + "NCR5380", instance); + if (error) + goto fail_irq; + } + + NCR5380_maybe_reset_bus(instance); + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); +fail_irq: + NCR5380_exit(instance); +fail_init: + scsi_host_put(instance); + return error; +} + +static int __exit mac_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + + scsi_remove_host(instance); + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); + NCR5380_exit(instance); + scsi_host_put(instance); + return 0; +} + +static struct platform_driver mac_scsi_driver = { + .remove = __exit_p(mac_scsi_remove), + .driver = { + .name = DRV_MODULE_NAME, + }, +}; + +module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe); + +MODULE_ALIAS("platform:" DRV_MODULE_NAME); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c new file mode 100644 index 000000000..e92f1a73c --- /dev/null +++ b/drivers/scsi/megaraid.c @@ -0,0 +1,4636 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2002 LSI Logic Corporation. + * + * Copyright (c) 2002 Red Hat, Inc. All rights reserved. + * - fixes + * - speed-ups (list handling fixes, issued_list, optimizations.) + * - lots of cleanups. + * + * Copyright (c) 2003 Christoph Hellwig + * - new-style, hotplug-aware pci probing and scsi registration + * + * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju + * + * + * Description: Linux device driver for LSI Logic MegaRAID controller + * + * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 + * 518, 520, 531, 532 + * + * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, + * and others. Please send updates to the mailing list + * linux-scsi@vger.kernel.org . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "megaraid.h" + +#define MEGARAID_MODULE_VERSION "2.00.4" + +MODULE_AUTHOR ("sju@lsil.com"); +MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); +MODULE_LICENSE ("GPL"); +MODULE_VERSION(MEGARAID_MODULE_VERSION); + +static DEFINE_MUTEX(megadev_mutex); +static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; +module_param(max_cmd_per_lun, uint, 0); +MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); + +static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; +module_param(max_sectors_per_io, ushort, 0); +MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); + + +static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; +module_param(max_mbox_busy_wait, ushort, 0); +MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); + +#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) +#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) +#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) +#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) + +/* + * Global variables + */ + +static int hba_count; +static adapter_t *hba_soft_state[MAX_CONTROLLERS]; +static struct proc_dir_entry *mega_proc_dir_entry; + +/* For controller re-ordering */ +static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; + +static long +megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); + +/* + * The File Operations structure for the serial/ioctl interface of the driver + */ +static const struct file_operations megadev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = megadev_unlocked_ioctl, + .open = megadev_open, + .llseek = noop_llseek, +}; + +/* + * Array to structures for storing the information about the controllers. This + * information is sent to the user level applications, when they do an ioctl + * for this information. + */ +static struct mcontroller mcontroller[MAX_CONTROLLERS]; + +/* The current driver version */ +static u32 driver_ver = 0x02000000; + +/* major number used by the device for character interface */ +static int major; + +#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) + + +/* + * Debug variable to print some diagnostic messages + */ +static int trace_level; + +/** + * mega_setup_mailbox() + * @adapter: pointer to our soft state + * + * Allocates a 8 byte aligned memory for the handshake mailbox. + */ +static int +mega_setup_mailbox(adapter_t *adapter) +{ + unsigned long align; + + adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev, + sizeof(mbox64_t), + &adapter->una_mbox64_dma, + GFP_KERNEL); + + if( !adapter->una_mbox64 ) return -1; + + adapter->mbox = &adapter->una_mbox64->mbox; + + adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & + (~0UL ^ 0xFUL)); + + adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); + + align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); + + adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; + + /* + * Register the mailbox if the controller is an io-mapped controller + */ + if( adapter->flag & BOARD_IOMAP ) { + + outb(adapter->mbox_dma & 0xFF, + adapter->host->io_port + MBOX_PORT0); + + outb((adapter->mbox_dma >> 8) & 0xFF, + adapter->host->io_port + MBOX_PORT1); + + outb((adapter->mbox_dma >> 16) & 0xFF, + adapter->host->io_port + MBOX_PORT2); + + outb((adapter->mbox_dma >> 24) & 0xFF, + adapter->host->io_port + MBOX_PORT3); + + outb(ENABLE_MBOX_BYTE, + adapter->host->io_port + ENABLE_MBOX_REGION); + + irq_ack(adapter); + + irq_enable(adapter); + } + + return 0; +} + + +/* + * mega_query_adapter() + * @adapter - pointer to our soft state + * + * Issue the adapter inquiry commands to the controller and find out + * information and parameter about the devices attached + */ +static int +mega_query_adapter(adapter_t *adapter) +{ + dma_addr_t prod_info_dma_handle; + mega_inquiry3 *inquiry3; + struct mbox_out mbox; + u8 *raw_mbox = (u8 *)&mbox; + int retval; + + /* Initialize adapter inquiry mailbox */ + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + memset(&mbox, 0, sizeof(mbox)); + + /* + * Try to issue Inquiry3 command + * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and + * update enquiry3 structure + */ + mbox.xferaddr = (u32)adapter->buf_dma_handle; + + inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; + + raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ + raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ + raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ + + /* Issue a blocking command to the card */ + if ((retval = issue_scb_block(adapter, raw_mbox))) { + /* the adapter does not support 40ld */ + + mraid_ext_inquiry *ext_inq; + mraid_inquiry *inq; + dma_addr_t dma_handle; + + ext_inq = dma_alloc_coherent(&adapter->dev->dev, + sizeof(mraid_ext_inquiry), + &dma_handle, GFP_KERNEL); + + if( ext_inq == NULL ) return -1; + + inq = &ext_inq->raid_inq; + + mbox.xferaddr = (u32)dma_handle; + + /*issue old 0x04 command to adapter */ + mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ; + + issue_scb_block(adapter, raw_mbox); + + /* + * update Enquiry3 and ProductInfo structures with + * mraid_inquiry structure + */ + mega_8_to_40ld(inq, inquiry3, + (mega_product_info *)&adapter->product_info); + + dma_free_coherent(&adapter->dev->dev, + sizeof(mraid_ext_inquiry), ext_inq, + dma_handle); + + } else { /*adapter supports 40ld */ + adapter->flag |= BOARD_40LD; + + /* + * get product_info, which is static information and will be + * unchanged + */ + prod_info_dma_handle = dma_map_single(&adapter->dev->dev, + (void *)&adapter->product_info, + sizeof(mega_product_info), + DMA_FROM_DEVICE); + + mbox.xferaddr = prod_info_dma_handle; + + raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ + raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ + + if ((retval = issue_scb_block(adapter, raw_mbox))) + dev_warn(&adapter->dev->dev, + "Product_info cmd failed with error: %d\n", + retval); + + dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle, + sizeof(mega_product_info), DMA_FROM_DEVICE); + } + + + /* + * kernel scans the channels from 0 to <= max_channel + */ + adapter->host->max_channel = + adapter->product_info.nchannels + NVIRT_CHAN -1; + + adapter->host->max_id = 16; /* max targets per channel */ + + adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ + + adapter->host->cmd_per_lun = max_cmd_per_lun; + + adapter->numldrv = inquiry3->num_ldrv; + + adapter->max_cmds = adapter->product_info.max_commands; + + if(adapter->max_cmds > MAX_COMMANDS) + adapter->max_cmds = MAX_COMMANDS; + + adapter->host->can_queue = adapter->max_cmds - 1; + + /* + * Get the maximum number of scatter-gather elements supported by this + * firmware + */ + mega_get_max_sgl(adapter); + + adapter->host->sg_tablesize = adapter->sglen; + + /* use HP firmware and bios version encoding + Note: fw_version[0|1] and bios_version[0|1] were originally shifted + right 8 bits making them zero. This 0 value was hardcoded to fix + sparse warnings. */ + if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { + snprintf(adapter->fw_version, sizeof(adapter->fw_version), + "%c%d%d.%d%d", + adapter->product_info.fw_version[2], + 0, + adapter->product_info.fw_version[1] & 0x0f, + 0, + adapter->product_info.fw_version[0] & 0x0f); + snprintf(adapter->bios_version, sizeof(adapter->fw_version), + "%c%d%d.%d%d", + adapter->product_info.bios_version[2], + 0, + adapter->product_info.bios_version[1] & 0x0f, + 0, + adapter->product_info.bios_version[0] & 0x0f); + } else { + memcpy(adapter->fw_version, + (char *)adapter->product_info.fw_version, 4); + adapter->fw_version[4] = 0; + + memcpy(adapter->bios_version, + (char *)adapter->product_info.bios_version, 4); + + adapter->bios_version[4] = 0; + } + + dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", + adapter->fw_version, adapter->bios_version, adapter->numldrv); + + /* + * Do we support extended (>10 bytes) cdbs + */ + adapter->support_ext_cdb = mega_support_ext_cdb(adapter); + if (adapter->support_ext_cdb) + dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); + + + return 0; +} + +/** + * mega_runpendq() + * @adapter: pointer to our soft state + * + * Runs through the list of pending requests. + */ +static inline void +mega_runpendq(adapter_t *adapter) +{ + if(!list_empty(&adapter->pending_list)) + __mega_runpendq(adapter); +} + +/* + * megaraid_queue() + * @scmd - Issue this scsi command + * @done - the callback hook into the scsi mid-layer + * + * The command queuing entry point for the mid-layer. + */ +static int megaraid_queue_lck(struct scsi_cmnd *scmd) +{ + adapter_t *adapter; + scb_t *scb; + int busy=0; + unsigned long flags; + + adapter = (adapter_t *)scmd->device->host->hostdata; + + /* + * Allocate and build a SCB request + * busy flag will be set if mega_build_cmd() command could not + * allocate scb. We will return non-zero status in that case. + * NOTE: scb can be null even though certain commands completed + * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would + * return 0 in that case. + */ + + spin_lock_irqsave(&adapter->lock, flags); + scb = mega_build_cmd(adapter, scmd, &busy); + if (!scb) + goto out; + + scb->state |= SCB_PENDQ; + list_add_tail(&scb->list, &adapter->pending_list); + + /* + * Check if the HBA is in quiescent state, e.g., during a + * delete logical drive opertion. If it is, don't run + * the pending_list. + */ + if (atomic_read(&adapter->quiescent) == 0) + mega_runpendq(adapter); + + busy = 0; + out: + spin_unlock_irqrestore(&adapter->lock, flags); + return busy; +} + +static DEF_SCSI_QCMD(megaraid_queue) + +/** + * mega_allocate_scb() + * @adapter: pointer to our soft state + * @cmd: scsi command from the mid-layer + * + * Allocate a SCB structure. This is the central structure for controller + * commands. + */ +static inline scb_t * +mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) +{ + struct list_head *head = &adapter->free_list; + scb_t *scb; + + /* Unlink command from Free List */ + if( !list_empty(head) ) { + + scb = list_entry(head->next, scb_t, list); + + list_del_init(head->next); + + scb->state = SCB_ACTIVE; + scb->cmd = cmd; + scb->dma_type = MEGA_DMA_TYPE_NONE; + + return scb; + } + + return NULL; +} + +/** + * mega_get_ldrv_num() + * @adapter: pointer to our soft state + * @cmd: scsi mid layer command + * @channel: channel on the controller + * + * Calculate the logical drive number based on the information in scsi command + * and the channel number. + */ +static inline int +mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) +{ + int tgt; + int ldrv_num; + + tgt = cmd->device->id; + + if ( tgt > adapter->this_id ) + tgt--; /* we do not get inquires for initiator id */ + + ldrv_num = (channel * 15) + tgt; + + + /* + * If we have a logical drive with boot enabled, project it first + */ + if( adapter->boot_ldrv_enabled ) { + if( ldrv_num == 0 ) { + ldrv_num = adapter->boot_ldrv; + } + else { + if( ldrv_num <= adapter->boot_ldrv ) { + ldrv_num--; + } + } + } + + /* + * If "delete logical drive" feature is enabled on this controller. + * Do only if at least one delete logical drive operation was done. + * + * Also, after logical drive deletion, instead of logical drive number, + * the value returned should be 0x80+logical drive id. + * + * These is valid only for IO commands. + */ + + if (adapter->support_random_del && adapter->read_ldidmap ) + switch (cmd->cmnd[0]) { + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + ldrv_num += 0x80; + } + + return ldrv_num; +} + +/** + * mega_build_cmd() + * @adapter: pointer to our soft state + * @cmd: Prepare using this scsi command + * @busy: busy flag if no resources + * + * Prepares a command and scatter gather list for the controller. This routine + * also finds out if the commands is intended for a logical drive or a + * physical device and prepares the controller command accordingly. + * + * We also re-order the logical drives and physical devices based on their + * boot settings. + */ +static scb_t * +mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) +{ + mega_passthru *pthru; + scb_t *scb; + mbox_t *mbox; + u32 seg; + char islogical; + int max_ldrv_num; + int channel = 0; + int target = 0; + int ldrv_num = 0; /* logical drive number */ + + /* + * We know what channels our logical drives are on - mega_find_card() + */ + islogical = adapter->logdrv_chan[cmd->device->channel]; + + /* + * The theory: If physical drive is chosen for boot, all the physical + * devices are exported before the logical drives, otherwise physical + * devices are pushed after logical drives, in which case - Kernel sees + * the physical devices on virtual channel which is obviously converted + * to actual channel on the HBA. + */ + if( adapter->boot_pdrv_enabled ) { + if( islogical ) { + /* logical channel */ + channel = cmd->device->channel - + adapter->product_info.nchannels; + } + else { + /* this is physical channel */ + channel = cmd->device->channel; + target = cmd->device->id; + + /* + * boot from a physical disk, that disk needs to be + * exposed first IF both the channels are SCSI, then + * booting from the second channel is not allowed. + */ + if( target == 0 ) { + target = adapter->boot_pdrv_tgt; + } + else if( target == adapter->boot_pdrv_tgt ) { + target = 0; + } + } + } + else { + if( islogical ) { + /* this is the logical channel */ + channel = cmd->device->channel; + } + else { + /* physical channel */ + channel = cmd->device->channel - NVIRT_CHAN; + target = cmd->device->id; + } + } + + + if(islogical) { + + /* have just LUN 0 for each target on virtual channels */ + if (cmd->device->lun) { + cmd->result = (DID_BAD_TARGET << 16); + scsi_done(cmd); + return NULL; + } + + ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); + + + max_ldrv_num = (adapter->flag & BOARD_40LD) ? + MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; + + /* + * max_ldrv_num increases by 0x80 if some logical drive was + * deleted. + */ + if(adapter->read_ldidmap) + max_ldrv_num += 0x80; + + if(ldrv_num > max_ldrv_num ) { + cmd->result = (DID_BAD_TARGET << 16); + scsi_done(cmd); + return NULL; + } + + } + else { + if( cmd->device->lun > 7) { + /* + * Do not support lun >7 for physically accessed + * devices + */ + cmd->result = (DID_BAD_TARGET << 16); + scsi_done(cmd); + return NULL; + } + } + + /* + * + * Logical drive commands + * + */ + if(islogical) { + switch (cmd->cmnd[0]) { + case TEST_UNIT_READY: +#if MEGA_HAVE_CLUSTERING + /* + * Do we support clustering and is the support enabled + * If no, return success always + */ + if( !adapter->has_cluster ) { + cmd->result = (DID_OK << 16); + scsi_done(cmd); + return NULL; + } + + if(!(scb = mega_allocate_scb(adapter, cmd))) { + *busy = 1; + return NULL; + } + + scb->raw_mbox[0] = MEGA_CLUSTER_CMD; + scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; + scb->raw_mbox[3] = ldrv_num; + + scb->dma_direction = DMA_NONE; + + return scb; +#else + cmd->result = (DID_OK << 16); + scsi_done(cmd); + return NULL; +#endif + + case MODE_SENSE: { + char *buf; + struct scatterlist *sg; + + sg = scsi_sglist(cmd); + buf = kmap_atomic(sg_page(sg)) + sg->offset; + + memset(buf, 0, cmd->cmnd[4]); + kunmap_atomic(buf - sg->offset); + + cmd->result = (DID_OK << 16); + scsi_done(cmd); + return NULL; + } + + case READ_CAPACITY: + case INQUIRY: + + if(!(adapter->flag & (1L << cmd->device->channel))) { + + dev_notice(&adapter->dev->dev, + "scsi%d: scanning scsi channel %d " + "for logical drives\n", + adapter->host->host_no, + cmd->device->channel); + + adapter->flag |= (1L << cmd->device->channel); + } + + /* Allocate a SCB and initialize passthru */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + *busy = 1; + return NULL; + } + pthru = scb->pthru; + + mbox = (mbox_t *)scb->raw_mbox; + memset(mbox, 0, sizeof(scb->raw_mbox)); + memset(pthru, 0, sizeof(mega_passthru)); + + pthru->timeout = 0; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 1; + pthru->logdrv = ldrv_num; + pthru->cdblen = cmd->cmd_len; + memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); + + if( adapter->has_64bit_addr ) { + mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; + } + else { + mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; + } + + scb->dma_direction = DMA_FROM_DEVICE; + + pthru->numsgelements = mega_build_sglist(adapter, scb, + &pthru->dataxferaddr, &pthru->dataxferlen); + + mbox->m_out.xferaddr = scb->pthru_dma_addr; + + return scb; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + + /* Allocate a SCB and initialize mailbox */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + *busy = 1; + return NULL; + } + mbox = (mbox_t *)scb->raw_mbox; + + memset(mbox, 0, sizeof(scb->raw_mbox)); + mbox->m_out.logdrv = ldrv_num; + + /* + * A little hack: 2nd bit is zero for all scsi read + * commands and is set for all scsi write commands + */ + if( adapter->has_64bit_addr ) { + mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? + MEGA_MBOXCMD_LWRITE64: + MEGA_MBOXCMD_LREAD64 ; + } + else { + mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? + MEGA_MBOXCMD_LWRITE: + MEGA_MBOXCMD_LREAD ; + } + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if( cmd->cmd_len == 6 ) { + mbox->m_out.numsectors = (u32) cmd->cmnd[4]; + mbox->m_out.lba = + ((u32)cmd->cmnd[1] << 16) | + ((u32)cmd->cmnd[2] << 8) | + (u32)cmd->cmnd[3]; + + mbox->m_out.lba &= 0x1FFFFF; + +#if MEGA_HAVE_STATS + /* + * Take modulo 0x80, since the logical drive + * number increases by 0x80 when a logical + * drive was deleted + */ + if (*cmd->cmnd == READ_6) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->m_out.numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->m_out.numsectors; + } +#endif + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + if( cmd->cmd_len == 10 ) { + mbox->m_out.numsectors = + (u32)cmd->cmnd[8] | + ((u32)cmd->cmnd[7] << 8); + mbox->m_out.lba = + ((u32)cmd->cmnd[2] << 24) | + ((u32)cmd->cmnd[3] << 16) | + ((u32)cmd->cmnd[4] << 8) | + (u32)cmd->cmnd[5]; + +#if MEGA_HAVE_STATS + if (*cmd->cmnd == READ_10) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->m_out.numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->m_out.numsectors; + } +#endif + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + if( cmd->cmd_len == 12 ) { + mbox->m_out.lba = + ((u32)cmd->cmnd[2] << 24) | + ((u32)cmd->cmnd[3] << 16) | + ((u32)cmd->cmnd[4] << 8) | + (u32)cmd->cmnd[5]; + + mbox->m_out.numsectors = + ((u32)cmd->cmnd[6] << 24) | + ((u32)cmd->cmnd[7] << 16) | + ((u32)cmd->cmnd[8] << 8) | + (u32)cmd->cmnd[9]; + +#if MEGA_HAVE_STATS + if (*cmd->cmnd == READ_12) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->m_out.numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->m_out.numsectors; + } +#endif + } + + /* + * If it is a read command + */ + if( (*cmd->cmnd & 0x0F) == 0x08 ) { + scb->dma_direction = DMA_FROM_DEVICE; + } + else { + scb->dma_direction = DMA_TO_DEVICE; + } + + /* Calculate Scatter-Gather info */ + mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, + (u32 *)&mbox->m_out.xferaddr, &seg); + + return scb; + +#if MEGA_HAVE_CLUSTERING + case RESERVE: + case RELEASE: + + /* + * Do we support clustering and is the support enabled + */ + if( ! adapter->has_cluster ) { + + cmd->result = (DID_BAD_TARGET << 16); + scsi_done(cmd); + return NULL; + } + + /* Allocate a SCB and initialize mailbox */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + *busy = 1; + return NULL; + } + + scb->raw_mbox[0] = MEGA_CLUSTER_CMD; + scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? + MEGA_RESERVE_LD : MEGA_RELEASE_LD; + + scb->raw_mbox[3] = ldrv_num; + + scb->dma_direction = DMA_NONE; + + return scb; +#endif + + default: + cmd->result = (DID_BAD_TARGET << 16); + scsi_done(cmd); + return NULL; + } + } + + /* + * Passthru drive commands + */ + else { + /* Allocate a SCB and initialize passthru */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + *busy = 1; + return NULL; + } + + mbox = (mbox_t *)scb->raw_mbox; + memset(mbox, 0, sizeof(scb->raw_mbox)); + + if( adapter->support_ext_cdb ) { + + mega_prepare_extpassthru(adapter, scb, cmd, + channel, target); + + mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; + + mbox->m_out.xferaddr = scb->epthru_dma_addr; + + } + else { + + pthru = mega_prepare_passthru(adapter, scb, cmd, + channel, target); + + /* Initialize mailbox */ + if( adapter->has_64bit_addr ) { + mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; + } + else { + mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; + } + + mbox->m_out.xferaddr = scb->pthru_dma_addr; + + } + return scb; + } + return NULL; +} + + +/** + * mega_prepare_passthru() + * @adapter: pointer to our soft state + * @scb: our scsi control block + * @cmd: scsi command from the mid-layer + * @channel: actual channel on the controller + * @target: actual id on the controller. + * + * prepare a command for the scsi physical devices. + */ +static mega_passthru * +mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, + int channel, int target) +{ + mega_passthru *pthru; + + pthru = scb->pthru; + memset(pthru, 0, sizeof (mega_passthru)); + + /* 0=6sec/1=60sec/2=10min/3=3hrs */ + pthru->timeout = 2; + + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 0; + + pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; + + pthru->target = (adapter->flag & BOARD_40LD) ? + (channel << 4) | target : target; + + pthru->cdblen = cmd->cmd_len; + pthru->logdrv = cmd->device->lun; + + memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); + + /* Not sure about the direction */ + scb->dma_direction = DMA_BIDIRECTIONAL; + + /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ + switch (cmd->cmnd[0]) { + case INQUIRY: + case READ_CAPACITY: + if(!(adapter->flag & (1L << cmd->device->channel))) { + + dev_notice(&adapter->dev->dev, + "scsi%d: scanning scsi channel %d [P%d] " + "for physical devices\n", + adapter->host->host_no, + cmd->device->channel, channel); + + adapter->flag |= (1L << cmd->device->channel); + } + fallthrough; + default: + pthru->numsgelements = mega_build_sglist(adapter, scb, + &pthru->dataxferaddr, &pthru->dataxferlen); + break; + } + return pthru; +} + + +/** + * mega_prepare_extpassthru() + * @adapter: pointer to our soft state + * @scb: our scsi control block + * @cmd: scsi command from the mid-layer + * @channel: actual channel on the controller + * @target: actual id on the controller. + * + * prepare a command for the scsi physical devices. This rountine prepares + * commands for devices which can take extended CDBs (>10 bytes) + */ +static mega_ext_passthru * +mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, + struct scsi_cmnd *cmd, + int channel, int target) +{ + mega_ext_passthru *epthru; + + epthru = scb->epthru; + memset(epthru, 0, sizeof(mega_ext_passthru)); + + /* 0=6sec/1=60sec/2=10min/3=3hrs */ + epthru->timeout = 2; + + epthru->ars = 1; + epthru->reqsenselen = 14; + epthru->islogical = 0; + + epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; + epthru->target = (adapter->flag & BOARD_40LD) ? + (channel << 4) | target : target; + + epthru->cdblen = cmd->cmd_len; + epthru->logdrv = cmd->device->lun; + + memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); + + /* Not sure about the direction */ + scb->dma_direction = DMA_BIDIRECTIONAL; + + switch(cmd->cmnd[0]) { + case INQUIRY: + case READ_CAPACITY: + if(!(adapter->flag & (1L << cmd->device->channel))) { + + dev_notice(&adapter->dev->dev, + "scsi%d: scanning scsi channel %d [P%d] " + "for physical devices\n", + adapter->host->host_no, + cmd->device->channel, channel); + + adapter->flag |= (1L << cmd->device->channel); + } + fallthrough; + default: + epthru->numsgelements = mega_build_sglist(adapter, scb, + &epthru->dataxferaddr, &epthru->dataxferlen); + break; + } + + return epthru; +} + +static void +__mega_runpendq(adapter_t *adapter) +{ + scb_t *scb; + struct list_head *pos, *next; + + /* Issue any pending commands to the card */ + list_for_each_safe(pos, next, &adapter->pending_list) { + + scb = list_entry(pos, scb_t, list); + + if( !(scb->state & SCB_ISSUED) ) { + + if( issue_scb(adapter, scb) != 0 ) + return; + } + } + + return; +} + + +/** + * issue_scb() + * @adapter: pointer to our soft state + * @scb: scsi control block + * + * Post a command to the card if the mailbox is available, otherwise return + * busy. We also take the scb from the pending list if the mailbox is + * available. + */ +static int +issue_scb(adapter_t *adapter, scb_t *scb) +{ + volatile mbox64_t *mbox64 = adapter->mbox64; + volatile mbox_t *mbox = adapter->mbox; + unsigned int i = 0; + + if(unlikely(mbox->m_in.busy)) { + do { + udelay(1); + i++; + } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); + + if(mbox->m_in.busy) return -1; + } + + /* Copy mailbox data into host structure */ + memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, + sizeof(struct mbox_out)); + + mbox->m_out.cmdid = scb->idx; /* Set cmdid */ + mbox->m_in.busy = 1; /* Set busy */ + + + /* + * Increment the pending queue counter + */ + atomic_inc(&adapter->pend_cmds); + + switch (mbox->m_out.cmd) { + case MEGA_MBOXCMD_LREAD64: + case MEGA_MBOXCMD_LWRITE64: + case MEGA_MBOXCMD_PASSTHRU64: + case MEGA_MBOXCMD_EXTPTHRU: + mbox64->xfer_segment_lo = mbox->m_out.xferaddr; + mbox64->xfer_segment_hi = 0; + mbox->m_out.xferaddr = 0xFFFFFFFF; + break; + default: + mbox64->xfer_segment_lo = 0; + mbox64->xfer_segment_hi = 0; + } + + /* + * post the command + */ + scb->state |= SCB_ISSUED; + + if( likely(adapter->flag & BOARD_MEMMAP) ) { + mbox->m_in.poll = 0; + mbox->m_in.ack = 0; + WRINDOOR(adapter, adapter->mbox_dma | 0x1); + } + else { + irq_enable(adapter); + issue_command(adapter); + } + + return 0; +} + +/* + * Wait until the controller's mailbox is available + */ +static inline int +mega_busywait_mbox (adapter_t *adapter) +{ + if (adapter->mbox->m_in.busy) + return __mega_busywait_mbox(adapter); + return 0; +} + +/** + * issue_scb_block() + * @adapter: pointer to our soft state + * @raw_mbox: the mailbox + * + * Issue a scb in synchronous and non-interrupt mode + */ +static int +issue_scb_block(adapter_t *adapter, u_char *raw_mbox) +{ + volatile mbox64_t *mbox64 = adapter->mbox64; + volatile mbox_t *mbox = adapter->mbox; + u8 byte; + + /* Wait until mailbox is free */ + if(mega_busywait_mbox (adapter)) + goto bug_blocked_mailbox; + + /* Copy mailbox data into host structure */ + memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); + mbox->m_out.cmdid = 0xFE; + mbox->m_in.busy = 1; + + switch (raw_mbox[0]) { + case MEGA_MBOXCMD_LREAD64: + case MEGA_MBOXCMD_LWRITE64: + case MEGA_MBOXCMD_PASSTHRU64: + case MEGA_MBOXCMD_EXTPTHRU: + mbox64->xfer_segment_lo = mbox->m_out.xferaddr; + mbox64->xfer_segment_hi = 0; + mbox->m_out.xferaddr = 0xFFFFFFFF; + break; + default: + mbox64->xfer_segment_lo = 0; + mbox64->xfer_segment_hi = 0; + } + + if( likely(adapter->flag & BOARD_MEMMAP) ) { + mbox->m_in.poll = 0; + mbox->m_in.ack = 0; + mbox->m_in.numstatus = 0xFF; + mbox->m_in.status = 0xFF; + WRINDOOR(adapter, adapter->mbox_dma | 0x1); + + while((volatile u8)mbox->m_in.numstatus == 0xFF) + cpu_relax(); + + mbox->m_in.numstatus = 0xFF; + + while( (volatile u8)mbox->m_in.poll != 0x77 ) + cpu_relax(); + + mbox->m_in.poll = 0; + mbox->m_in.ack = 0x77; + + WRINDOOR(adapter, adapter->mbox_dma | 0x2); + + while(RDINDOOR(adapter) & 0x2) + cpu_relax(); + } + else { + irq_disable(adapter); + issue_command(adapter); + + while (!((byte = irq_state(adapter)) & INTR_VALID)) + cpu_relax(); + + set_irq_state(adapter, byte); + irq_enable(adapter); + irq_ack(adapter); + } + + return mbox->m_in.status; + +bug_blocked_mailbox: + dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); + udelay (1000); + return -1; +} + + +/** + * megaraid_isr_iomapped() + * @irq: irq + * @devp: pointer to our soft state + * + * Interrupt service routine for io-mapped controllers. + * Find out if our device is interrupting. If yes, acknowledge the interrupt + * and service the completed commands. + */ +static irqreturn_t +megaraid_isr_iomapped(int irq, void *devp) +{ + adapter_t *adapter = devp; + unsigned long flags; + u8 status; + u8 nstatus; + u8 completed[MAX_FIRMWARE_STATUS]; + u8 byte; + int handled = 0; + + + /* + * loop till F/W has more commands for us to complete. + */ + spin_lock_irqsave(&adapter->lock, flags); + + do { + /* Check if a valid interrupt is pending */ + byte = irq_state(adapter); + if( (byte & VALID_INTR_BYTE) == 0 ) { + /* + * No more pending commands + */ + goto out_unlock; + } + set_irq_state(adapter, byte); + + while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) + == 0xFF) + cpu_relax(); + adapter->mbox->m_in.numstatus = 0xFF; + + status = adapter->mbox->m_in.status; + + /* + * decrement the pending queue counter + */ + atomic_sub(nstatus, &adapter->pend_cmds); + + memcpy(completed, (void *)adapter->mbox->m_in.completed, + nstatus); + + /* Acknowledge interrupt */ + irq_ack(adapter); + + mega_cmd_done(adapter, completed, nstatus, status); + + mega_rundoneq(adapter); + + handled = 1; + + /* Loop through any pending requests */ + if(atomic_read(&adapter->quiescent) == 0) { + mega_runpendq(adapter); + } + + } while(1); + + out_unlock: + + spin_unlock_irqrestore(&adapter->lock, flags); + + return IRQ_RETVAL(handled); +} + + +/** + * megaraid_isr_memmapped() + * @irq: irq + * @devp: pointer to our soft state + * + * Interrupt service routine for memory-mapped controllers. + * Find out if our device is interrupting. If yes, acknowledge the interrupt + * and service the completed commands. + */ +static irqreturn_t +megaraid_isr_memmapped(int irq, void *devp) +{ + adapter_t *adapter = devp; + unsigned long flags; + u8 status; + u32 dword = 0; + u8 nstatus; + u8 completed[MAX_FIRMWARE_STATUS]; + int handled = 0; + + + /* + * loop till F/W has more commands for us to complete. + */ + spin_lock_irqsave(&adapter->lock, flags); + + do { + /* Check if a valid interrupt is pending */ + dword = RDOUTDOOR(adapter); + if(dword != 0x10001234) { + /* + * No more pending commands + */ + goto out_unlock; + } + WROUTDOOR(adapter, 0x10001234); + + while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) + == 0xFF) { + cpu_relax(); + } + adapter->mbox->m_in.numstatus = 0xFF; + + status = adapter->mbox->m_in.status; + + /* + * decrement the pending queue counter + */ + atomic_sub(nstatus, &adapter->pend_cmds); + + memcpy(completed, (void *)adapter->mbox->m_in.completed, + nstatus); + + /* Acknowledge interrupt */ + WRINDOOR(adapter, 0x2); + + handled = 1; + + while( RDINDOOR(adapter) & 0x02 ) + cpu_relax(); + + mega_cmd_done(adapter, completed, nstatus, status); + + mega_rundoneq(adapter); + + /* Loop through any pending requests */ + if(atomic_read(&adapter->quiescent) == 0) { + mega_runpendq(adapter); + } + + } while(1); + + out_unlock: + + spin_unlock_irqrestore(&adapter->lock, flags); + + return IRQ_RETVAL(handled); +} +/** + * mega_cmd_done() + * @adapter: pointer to our soft state + * @completed: array of ids of completed commands + * @nstatus: number of completed commands + * @status: status of the last command completed + * + * Complete the commands and call the scsi mid-layer callback hooks. + */ +static void +mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) +{ + mega_ext_passthru *epthru = NULL; + struct scatterlist *sgl; + struct scsi_cmnd *cmd = NULL; + mega_passthru *pthru = NULL; + mbox_t *mbox = NULL; + u8 c; + scb_t *scb; + int islogical; + int cmdid; + int i; + + /* + * for all the commands completed, call the mid-layer callback routine + * and free the scb. + */ + for( i = 0; i < nstatus; i++ ) { + + cmdid = completed[i]; + + /* + * Only free SCBs for the commands coming down from the + * mid-layer, not for which were issued internally + * + * For internal command, restore the status returned by the + * firmware so that user can interpret it. + */ + if (cmdid == CMDID_INT_CMDS) { + scb = &adapter->int_scb; + cmd = scb->cmd; + + list_del_init(&scb->list); + scb->state = SCB_FREE; + + adapter->int_status = status; + complete(&adapter->int_waitq); + } else { + scb = &adapter->scb_list[cmdid]; + + /* + * Make sure f/w has completed a valid command + */ + if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { + dev_crit(&adapter->dev->dev, "invalid command " + "Id %d, scb->state:%x, scsi cmd:%p\n", + cmdid, scb->state, scb->cmd); + + continue; + } + + /* + * Was a abort issued for this command + */ + if( scb->state & SCB_ABORT ) { + + dev_warn(&adapter->dev->dev, + "aborted cmd [%x] complete\n", + scb->idx); + + scb->cmd->result = (DID_ABORT << 16); + + list_add_tail(SCSI_LIST(scb->cmd), + &adapter->completed_list); + + mega_free_scb(adapter, scb); + + continue; + } + + /* + * Was a reset issued for this command + */ + if( scb->state & SCB_RESET ) { + + dev_warn(&adapter->dev->dev, + "reset cmd [%x] complete\n", + scb->idx); + + scb->cmd->result = (DID_RESET << 16); + + list_add_tail(SCSI_LIST(scb->cmd), + &adapter->completed_list); + + mega_free_scb (adapter, scb); + + continue; + } + + cmd = scb->cmd; + pthru = scb->pthru; + epthru = scb->epthru; + mbox = (mbox_t *)scb->raw_mbox; + +#if MEGA_HAVE_STATS + { + + int logdrv = mbox->m_out.logdrv; + + islogical = adapter->logdrv_chan[cmd->channel]; + /* + * Maintain an error counter for the logical drive. + * Some application like SNMP agent need such + * statistics + */ + if( status && islogical && (cmd->cmnd[0] == READ_6 || + cmd->cmnd[0] == READ_10 || + cmd->cmnd[0] == READ_12)) { + /* + * Logical drive number increases by 0x80 when + * a logical drive is deleted + */ + adapter->rd_errors[logdrv%0x80]++; + } + + if( status && islogical && (cmd->cmnd[0] == WRITE_6 || + cmd->cmnd[0] == WRITE_10 || + cmd->cmnd[0] == WRITE_12)) { + /* + * Logical drive number increases by 0x80 when + * a logical drive is deleted + */ + adapter->wr_errors[logdrv%0x80]++; + } + + } +#endif + } + + /* + * Do not return the presence of hard disk on the channel so, + * inquiry sent, and returned data==hard disk or removable + * hard disk and not logical, request should return failure! - + * PJ + */ + islogical = adapter->logdrv_chan[cmd->device->channel]; + if( cmd->cmnd[0] == INQUIRY && !islogical ) { + + sgl = scsi_sglist(cmd); + if( sg_page(sgl) ) { + c = *(unsigned char *) sg_virt(&sgl[0]); + } else { + dev_warn(&adapter->dev->dev, "invalid sg\n"); + c = 0; + } + + if(IS_RAID_CH(adapter, cmd->device->channel) && + ((c & 0x1F ) == TYPE_DISK)) { + status = 0xF0; + } + } + + /* clear result; otherwise, success returns corrupt value */ + cmd->result = 0; + + /* Convert MegaRAID status to Linux error code */ + switch (status) { + case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ + cmd->result |= (DID_OK << 16); + break; + + case 0x02: /* ERROR_ABORTED, i.e. + SCSI_STATUS_CHECK_CONDITION */ + + /* set sense_buffer and result fields */ + if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || + mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { + + memcpy(cmd->sense_buffer, pthru->reqsensearea, + 14); + + cmd->result = SAM_STAT_CHECK_CONDITION; + } + else { + if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { + + memcpy(cmd->sense_buffer, + epthru->reqsensearea, 14); + + cmd->result = SAM_STAT_CHECK_CONDITION; + } else + scsi_build_sense(cmd, 0, + ABORTED_COMMAND, 0, 0); + } + break; + + case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. + SCSI_STATUS_BUSY */ + cmd->result |= (DID_BUS_BUSY << 16) | status; + break; + + default: +#if MEGA_HAVE_CLUSTERING + /* + * If TEST_UNIT_READY fails, we know + * MEGA_RESERVATION_STATUS failed + */ + if( cmd->cmnd[0] == TEST_UNIT_READY ) { + cmd->result |= (DID_ERROR << 16) | + SAM_STAT_RESERVATION_CONFLICT; + } + else + /* + * Error code returned is 1 if Reserve or Release + * failed or the input parameter is invalid + */ + if( status == 1 && + (cmd->cmnd[0] == RESERVE || + cmd->cmnd[0] == RELEASE) ) { + + cmd->result |= (DID_ERROR << 16) | + SAM_STAT_RESERVATION_CONFLICT; + } + else +#endif + cmd->result |= (DID_BAD_TARGET << 16)|status; + } + + mega_free_scb(adapter, scb); + + /* Add Scsi_Command to end of completed queue */ + list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); + } +} + + +/* + * mega_runpendq() + * + * Run through the list of completed requests and finish it + */ +static void +mega_rundoneq (adapter_t *adapter) +{ + struct megaraid_cmd_priv *cmd_priv; + + list_for_each_entry(cmd_priv, &adapter->completed_list, entry) + scsi_done(megaraid_to_scsi_cmd(cmd_priv)); + + INIT_LIST_HEAD(&adapter->completed_list); +} + + +/* + * Free a SCB structure + * Note: We assume the scsi commands associated with this scb is not free yet. + */ +static void +mega_free_scb(adapter_t *adapter, scb_t *scb) +{ + switch( scb->dma_type ) { + + case MEGA_DMA_TYPE_NONE: + break; + + case MEGA_SGLIST: + scsi_dma_unmap(scb->cmd); + break; + default: + break; + } + + /* + * Remove from the pending list + */ + list_del_init(&scb->list); + + /* Link the scb back into free list */ + scb->state = SCB_FREE; + scb->cmd = NULL; + + list_add(&scb->list, &adapter->free_list); +} + + +static int +__mega_busywait_mbox (adapter_t *adapter) +{ + volatile mbox_t *mbox = adapter->mbox; + long counter; + + for (counter = 0; counter < 10000; counter++) { + if (!mbox->m_in.busy) + return 0; + udelay(100); + cond_resched(); + } + return -1; /* give up after 1 second */ +} + +/* + * Copies data to SGLIST + * Note: For 64 bit cards, we need a minimum of one SG element for read/write + */ +static int +mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) +{ + struct scatterlist *sg; + struct scsi_cmnd *cmd; + int sgcnt; + int idx; + + cmd = scb->cmd; + + /* + * Copy Scatter-Gather list info into controller structure. + * + * The number of sg elements returned must not exceed our limit + */ + sgcnt = scsi_dma_map(cmd); + + scb->dma_type = MEGA_SGLIST; + + BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); + + *len = 0; + + if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { + sg = scsi_sglist(cmd); + scb->dma_h_bulkdata = sg_dma_address(sg); + *buf = (u32)scb->dma_h_bulkdata; + *len = sg_dma_len(sg); + return 0; + } + + scsi_for_each_sg(cmd, sg, sgcnt, idx) { + if (adapter->has_64bit_addr) { + scb->sgl64[idx].address = sg_dma_address(sg); + *len += scb->sgl64[idx].length = sg_dma_len(sg); + } else { + scb->sgl[idx].address = sg_dma_address(sg); + *len += scb->sgl[idx].length = sg_dma_len(sg); + } + } + + /* Reset pointer and length fields */ + *buf = scb->sgl_dma_addr; + + /* Return count of SG requests */ + return sgcnt; +} + + +/* + * mega_8_to_40ld() + * + * takes all info in AdapterInquiry structure and puts it into ProductInfo and + * Enquiry3 structures for later use + */ +static void +mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, + mega_product_info *product_info) +{ + int i; + + product_info->max_commands = inquiry->adapter_info.max_commands; + enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; + product_info->nchannels = inquiry->adapter_info.nchannels; + + for (i = 0; i < 4; i++) { + product_info->fw_version[i] = + inquiry->adapter_info.fw_version[i]; + + product_info->bios_version[i] = + inquiry->adapter_info.bios_version[i]; + } + enquiry3->cache_flush_interval = + inquiry->adapter_info.cache_flush_interval; + + product_info->dram_size = inquiry->adapter_info.dram_size; + + enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; + + for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { + enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; + enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; + enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; + } + + for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) + enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; +} + +static inline void +mega_free_sgl(adapter_t *adapter) +{ + scb_t *scb; + int i; + + for(i = 0; i < adapter->max_cmds; i++) { + + scb = &adapter->scb_list[i]; + + if( scb->sgl64 ) { + dma_free_coherent(&adapter->dev->dev, + sizeof(mega_sgl64) * adapter->sglen, + scb->sgl64, scb->sgl_dma_addr); + + scb->sgl64 = NULL; + } + + if( scb->pthru ) { + dma_free_coherent(&adapter->dev->dev, + sizeof(mega_passthru), scb->pthru, + scb->pthru_dma_addr); + + scb->pthru = NULL; + } + + if( scb->epthru ) { + dma_free_coherent(&adapter->dev->dev, + sizeof(mega_ext_passthru), + scb->epthru, scb->epthru_dma_addr); + + scb->epthru = NULL; + } + + } +} + + +/* + * Get information about the card/driver + */ +const char * +megaraid_info(struct Scsi_Host *host) +{ + static char buffer[512]; + adapter_t *adapter; + + adapter = (adapter_t *)host->hostdata; + + sprintf (buffer, + "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", + adapter->fw_version, adapter->product_info.max_commands, + adapter->host->max_id, adapter->host->max_channel, + (u32)adapter->host->max_lun); + return buffer; +} + +/* + * Abort a previous SCSI request. Only commands on the pending list can be + * aborted. All the commands issued to the F/W must complete. + */ +static int +megaraid_abort(struct scsi_cmnd *cmd) +{ + adapter_t *adapter; + int rval; + + adapter = (adapter_t *)cmd->device->host->hostdata; + + rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); + + /* + * This is required here to complete any completed requests + * to be communicated over to the mid layer. + */ + mega_rundoneq(adapter); + + return rval; +} + + +static int +megaraid_reset(struct scsi_cmnd *cmd) +{ + adapter_t *adapter; + megacmd_t mc; + int rval; + + adapter = (adapter_t *)cmd->device->host->hostdata; + +#if MEGA_HAVE_CLUSTERING + mc.cmd = MEGA_CLUSTER_CMD; + mc.opcode = MEGA_RESET_RESERVATIONS; + + if( mega_internal_command(adapter, &mc, NULL) != 0 ) { + dev_warn(&adapter->dev->dev, "reservation reset failed\n"); + } + else { + dev_info(&adapter->dev->dev, "reservation reset\n"); + } +#endif + + spin_lock_irq(&adapter->lock); + + rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); + + /* + * This is required here to complete any completed requests + * to be communicated over to the mid layer. + */ + mega_rundoneq(adapter); + spin_unlock_irq(&adapter->lock); + + return rval; +} + +/** + * megaraid_abort_and_reset() + * @adapter: megaraid soft state + * @cmd: scsi command to be aborted or reset + * @aor: abort or reset flag + * + * Try to locate the scsi command in the pending queue. If found and is not + * issued to the controller, abort/reset it. Otherwise return failure + */ +static int +megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) +{ + struct list_head *pos, *next; + scb_t *scb; + + dev_warn(&adapter->dev->dev, "%s cmd=%x \n", + (aor == SCB_ABORT)? "ABORTING":"RESET", + cmd->cmnd[0], cmd->device->channel, + cmd->device->id, (u32)cmd->device->lun); + + if(list_empty(&adapter->pending_list)) + return FAILED; + + list_for_each_safe(pos, next, &adapter->pending_list) { + + scb = list_entry(pos, scb_t, list); + + if (scb->cmd == cmd) { /* Found command */ + + scb->state |= aor; + + /* + * Check if this command has firmware ownership. If + * yes, we cannot reset this command. Whenever f/w + * completes this command, we will return appropriate + * status from ISR. + */ + if( scb->state & SCB_ISSUED ) { + + dev_warn(&adapter->dev->dev, + "%s[%x], fw owner\n", + (aor==SCB_ABORT) ? "ABORTING":"RESET", + scb->idx); + + return FAILED; + } + else { + + /* + * Not yet issued! Remove from the pending + * list + */ + dev_warn(&adapter->dev->dev, + "%s-[%x], driver owner\n", + (aor==SCB_ABORT) ? "ABORTING":"RESET", + scb->idx); + + mega_free_scb(adapter, scb); + + if( aor == SCB_ABORT ) { + cmd->result = (DID_ABORT << 16); + } + else { + cmd->result = (DID_RESET << 16); + } + + list_add_tail(SCSI_LIST(cmd), + &adapter->completed_list); + + return SUCCESS; + } + } + } + + return FAILED; +} + +static inline int +make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) +{ + *pdev = pci_alloc_dev(NULL); + + if( *pdev == NULL ) return -1; + + memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); + + if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) { + kfree(*pdev); + return -1; + } + + return 0; +} + +static inline void +free_local_pdev(struct pci_dev *pdev) +{ + kfree(pdev); +} + +/** + * mega_allocate_inquiry() + * @dma_handle: handle returned for dma address + * @pdev: handle to pci device + * + * allocates memory for inquiry structure + */ +static inline void * +mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) +{ + return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3), + dma_handle, GFP_KERNEL); +} + + +static inline void +mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) +{ + dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry, + dma_handle); +} + + +#ifdef CONFIG_PROC_FS +/* Following code handles /proc fs */ + +/** + * proc_show_config() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display configuration information about the controller. + */ +static int +proc_show_config(struct seq_file *m, void *v) +{ + + adapter_t *adapter = m->private; + + seq_puts(m, MEGARAID_VERSION); + if(adapter->product_info.product_name[0]) + seq_printf(m, "%s\n", adapter->product_info.product_name); + + seq_puts(m, "Controller Type: "); + + if( adapter->flag & BOARD_MEMMAP ) + seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); + else + seq_puts(m, "418/428/434\n"); + + if(adapter->flag & BOARD_40LD) + seq_puts(m, "Controller Supports 40 Logical Drives\n"); + + if(adapter->flag & BOARD_64BIT) + seq_puts(m, "Controller capable of 64-bit memory addressing\n"); + if( adapter->has_64bit_addr ) + seq_puts(m, "Controller using 64-bit memory addressing\n"); + else + seq_puts(m, "Controller is not using 64-bit memory addressing\n"); + + seq_printf(m, "Base = %08lx, Irq = %d, ", + adapter->base, adapter->host->irq); + + seq_printf(m, "Logical Drives = %d, Channels = %d\n", + adapter->numldrv, adapter->product_info.nchannels); + + seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", + adapter->fw_version, adapter->bios_version, + adapter->product_info.dram_size); + + seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", + adapter->product_info.max_commands, adapter->max_cmds); + + seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); + seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); + seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); + seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); + seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); + seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); + seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); + seq_printf(m, "quiescent = %d\n", + atomic_read(&adapter->quiescent)); + seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); + + seq_puts(m, "\nModule Parameters:\n"); + seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); + seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); + return 0; +} + +/** + * proc_show_stat() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display statistical information about the I/O activity. + */ +static int +proc_show_stat(struct seq_file *m, void *v) +{ + adapter_t *adapter = m->private; +#if MEGA_HAVE_STATS + int i; +#endif + + seq_puts(m, "Statistical Information for this controller\n"); + seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); +#if MEGA_HAVE_STATS + for(i = 0; i < adapter->numldrv; i++) { + seq_printf(m, "Logical Drive %d:\n", i); + seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", + adapter->nreads[i], adapter->nwrites[i]); + seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", + adapter->nreadblocks[i], adapter->nwriteblocks[i]); + seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", + adapter->rd_errors[i], adapter->wr_errors[i]); + } +#else + seq_puts(m, "IO and error counters not compiled in driver.\n"); +#endif + return 0; +} + + +/** + * proc_show_mbox() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display mailbox information for the last command issued. This information + * is good for debugging. + */ +static int +proc_show_mbox(struct seq_file *m, void *v) +{ + adapter_t *adapter = m->private; + volatile mbox_t *mbox = adapter->mbox; + + seq_puts(m, "Contents of Mail Box Structure\n"); + seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); + seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); + seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); + seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); + seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); + seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); + seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); + seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); + seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); + return 0; +} + + +/** + * proc_show_rebuild_rate() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display current rebuild rate + */ +static int +proc_show_rebuild_rate(struct seq_file *m, void *v) +{ + adapter_t *adapter = m->private; + dma_addr_t dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) + goto free_pdev; + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + seq_puts(m, "Adapter inquiry failed.\n"); + dev_warn(&adapter->dev->dev, "inquiry failed\n"); + goto free_inquiry; + } + + if( adapter->flag & BOARD_40LD ) + seq_printf(m, "Rebuild Rate: [%d%%]\n", + ((mega_inquiry3 *)inquiry)->rebuild_rate); + else + seq_printf(m, "Rebuild Rate: [%d%%]\n", + ((mraid_ext_inquiry *) + inquiry)->raid_inq.adapter_info.rebuild_rate); + +free_inquiry: + mega_free_inquiry(inquiry, dma_handle, pdev); +free_pdev: + free_local_pdev(pdev); + return 0; +} + + +/** + * proc_show_battery() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display information about the battery module on the controller. + */ +static int +proc_show_battery(struct seq_file *m, void *v) +{ + adapter_t *adapter = m->private; + dma_addr_t dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 battery_status; + + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) + goto free_pdev; + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + seq_puts(m, "Adapter inquiry failed.\n"); + dev_warn(&adapter->dev->dev, "inquiry failed\n"); + goto free_inquiry; + } + + if( adapter->flag & BOARD_40LD ) { + battery_status = ((mega_inquiry3 *)inquiry)->battery_status; + } + else { + battery_status = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.adapter_info.battery_status; + } + + /* + * Decode the battery status + */ + seq_printf(m, "Battery Status:[%d]", battery_status); + + if(battery_status == MEGA_BATT_CHARGE_DONE) + seq_puts(m, " Charge Done"); + + if(battery_status & MEGA_BATT_MODULE_MISSING) + seq_puts(m, " Module Missing"); + + if(battery_status & MEGA_BATT_LOW_VOLTAGE) + seq_puts(m, " Low Voltage"); + + if(battery_status & MEGA_BATT_TEMP_HIGH) + seq_puts(m, " Temperature High"); + + if(battery_status & MEGA_BATT_PACK_MISSING) + seq_puts(m, " Pack Missing"); + + if(battery_status & MEGA_BATT_CHARGE_INPROG) + seq_puts(m, " Charge In-progress"); + + if(battery_status & MEGA_BATT_CHARGE_FAIL) + seq_puts(m, " Charge Fail"); + + if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) + seq_puts(m, " Cycles Exceeded"); + + seq_putc(m, '\n'); + +free_inquiry: + mega_free_inquiry(inquiry, dma_handle, pdev); +free_pdev: + free_local_pdev(pdev); + return 0; +} + + +/* + * Display scsi inquiry + */ +static void +mega_print_inquiry(struct seq_file *m, char *scsi_inq) +{ + int i; + + seq_puts(m, " Vendor: "); + seq_write(m, scsi_inq + 8, 8); + seq_puts(m, " Model: "); + seq_write(m, scsi_inq + 16, 16); + seq_puts(m, " Rev: "); + seq_write(m, scsi_inq + 32, 4); + seq_putc(m, '\n'); + + i = scsi_inq[0] & 0x1f; + seq_printf(m, " Type: %s ", scsi_device_type(i)); + + seq_printf(m, " ANSI SCSI revision: %02x", + scsi_inq[2] & 0x07); + + if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) + seq_puts(m, " CCS\n"); + else + seq_putc(m, '\n'); +} + +/** + * proc_show_pdrv() + * @m: Synthetic file construction data + * @adapter: pointer to our soft state + * @channel: channel + * + * Display information about the physical drives. + */ +static int +proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) +{ + dma_addr_t dma_handle; + char *scsi_inq; + dma_addr_t scsi_inq_dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 *pdrv_state; + u8 state; + int tgt; + int max_channels; + int i; + + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) + goto free_pdev; + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + seq_puts(m, "Adapter inquiry failed.\n"); + dev_warn(&adapter->dev->dev, "inquiry failed\n"); + goto free_inquiry; + } + + + scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle, + GFP_KERNEL); + if( scsi_inq == NULL ) { + seq_puts(m, "memory not available for scsi inq.\n"); + goto free_inquiry; + } + + if( adapter->flag & BOARD_40LD ) { + pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; + } + else { + pdrv_state = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.pdrv_info.pdrv_state; + } + + max_channels = adapter->product_info.nchannels; + + if( channel >= max_channels ) { + goto free_pci; + } + + for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { + + i = channel*16 + tgt; + + state = *(pdrv_state + i); + switch( state & 0x0F ) { + case PDRV_ONLINE: + seq_printf(m, "Channel:%2d Id:%2d State: Online", + channel, tgt); + break; + + case PDRV_FAILED: + seq_printf(m, "Channel:%2d Id:%2d State: Failed", + channel, tgt); + break; + + case PDRV_RBLD: + seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", + channel, tgt); + break; + + case PDRV_HOTSPARE: + seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", + channel, tgt); + break; + + default: + seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", + channel, tgt); + break; + } + + /* + * This interface displays inquiries for disk drives + * only. Inquries for logical drives and non-disk + * devices are available through /proc/scsi/scsi + */ + memset(scsi_inq, 0, 256); + if( mega_internal_dev_inquiry(adapter, channel, tgt, + scsi_inq_dma_handle) || + (scsi_inq[0] & 0x1F) != TYPE_DISK ) { + continue; + } + + /* + * Check for overflow. We print less than 240 + * characters for inquiry + */ + seq_puts(m, ".\n"); + mega_print_inquiry(m, scsi_inq); + } + +free_pci: + dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle); +free_inquiry: + mega_free_inquiry(inquiry, dma_handle, pdev); +free_pdev: + free_local_pdev(pdev); + return 0; +} + +/** + * proc_show_pdrv_ch0() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display information about the physical drives on physical channel 0. + */ +static int +proc_show_pdrv_ch0(struct seq_file *m, void *v) +{ + return proc_show_pdrv(m, m->private, 0); +} + + +/** + * proc_show_pdrv_ch1() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display information about the physical drives on physical channel 1. + */ +static int +proc_show_pdrv_ch1(struct seq_file *m, void *v) +{ + return proc_show_pdrv(m, m->private, 1); +} + + +/** + * proc_show_pdrv_ch2() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display information about the physical drives on physical channel 2. + */ +static int +proc_show_pdrv_ch2(struct seq_file *m, void *v) +{ + return proc_show_pdrv(m, m->private, 2); +} + + +/** + * proc_show_pdrv_ch3() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display information about the physical drives on physical channel 3. + */ +static int +proc_show_pdrv_ch3(struct seq_file *m, void *v) +{ + return proc_show_pdrv(m, m->private, 3); +} + + +/** + * proc_show_rdrv() + * @m: Synthetic file construction data + * @adapter: pointer to our soft state + * @start: starting logical drive to display + * @end: ending logical drive to display + * + * We do not print the inquiry information since its already available through + * /proc/scsi/scsi interface + */ +static int +proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) +{ + dma_addr_t dma_handle; + logdrv_param *lparam; + megacmd_t mc; + char *disk_array; + dma_addr_t disk_array_dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 *rdrv_state; + int num_ldrv; + u32 array_sz; + int i; + + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) + goto free_pdev; + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + seq_puts(m, "Adapter inquiry failed.\n"); + dev_warn(&adapter->dev->dev, "inquiry failed\n"); + goto free_inquiry; + } + + memset(&mc, 0, sizeof(megacmd_t)); + + if( adapter->flag & BOARD_40LD ) { + array_sz = sizeof(disk_array_40ld); + + rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; + + num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; + } + else { + array_sz = sizeof(disk_array_8ld); + + rdrv_state = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.logdrv_info.ldrv_state; + + num_ldrv = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.logdrv_info.num_ldrv; + } + + disk_array = dma_alloc_coherent(&pdev->dev, array_sz, + &disk_array_dma_handle, GFP_KERNEL); + + if( disk_array == NULL ) { + seq_puts(m, "memory not available.\n"); + goto free_inquiry; + } + + mc.xferaddr = (u32)disk_array_dma_handle; + + if( adapter->flag & BOARD_40LD ) { + mc.cmd = FC_NEW_CONFIG; + mc.opcode = OP_DCMD_READ_CONFIG; + + if( mega_internal_command(adapter, &mc, NULL) ) { + seq_puts(m, "40LD read config failed.\n"); + goto free_pci; + } + + } + else { + mc.cmd = NEW_READ_CONFIG_8LD; + + if( mega_internal_command(adapter, &mc, NULL) ) { + mc.cmd = READ_CONFIG_8LD; + if( mega_internal_command(adapter, &mc, NULL) ) { + seq_puts(m, "8LD read config failed.\n"); + goto free_pci; + } + } + } + + for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { + + if( adapter->flag & BOARD_40LD ) { + lparam = + &((disk_array_40ld *)disk_array)->ldrv[i].lparam; + } + else { + lparam = + &((disk_array_8ld *)disk_array)->ldrv[i].lparam; + } + + /* + * Check for overflow. We print less than 240 characters for + * information about each logical drive. + */ + seq_printf(m, "Logical drive:%2d:, ", i); + + switch( rdrv_state[i] & 0x0F ) { + case RDRV_OFFLINE: + seq_puts(m, "state: offline"); + break; + case RDRV_DEGRADED: + seq_puts(m, "state: degraded"); + break; + case RDRV_OPTIMAL: + seq_puts(m, "state: optimal"); + break; + case RDRV_DELETED: + seq_puts(m, "state: deleted"); + break; + default: + seq_puts(m, "state: unknown"); + break; + } + + /* + * Check if check consistency or initialization is going on + * for this logical drive. + */ + if( (rdrv_state[i] & 0xF0) == 0x20 ) + seq_puts(m, ", check-consistency in progress"); + else if( (rdrv_state[i] & 0xF0) == 0x10 ) + seq_puts(m, ", initialization in progress"); + + seq_putc(m, '\n'); + + seq_printf(m, "Span depth:%3d, ", lparam->span_depth); + seq_printf(m, "RAID level:%3d, ", lparam->level); + seq_printf(m, "Stripe size:%3d, ", + lparam->stripe_sz ? lparam->stripe_sz/2: 128); + seq_printf(m, "Row size:%3d\n", lparam->row_size); + + seq_puts(m, "Read Policy: "); + switch(lparam->read_ahead) { + case NO_READ_AHEAD: + seq_puts(m, "No read ahead, "); + break; + case READ_AHEAD: + seq_puts(m, "Read ahead, "); + break; + case ADAP_READ_AHEAD: + seq_puts(m, "Adaptive, "); + break; + + } + + seq_puts(m, "Write Policy: "); + switch(lparam->write_mode) { + case WRMODE_WRITE_THRU: + seq_puts(m, "Write thru, "); + break; + case WRMODE_WRITE_BACK: + seq_puts(m, "Write back, "); + break; + } + + seq_puts(m, "Cache Policy: "); + switch(lparam->direct_io) { + case CACHED_IO: + seq_puts(m, "Cached IO\n\n"); + break; + case DIRECT_IO: + seq_puts(m, "Direct IO\n\n"); + break; + } + } + +free_pci: + dma_free_coherent(&pdev->dev, array_sz, disk_array, + disk_array_dma_handle); +free_inquiry: + mega_free_inquiry(inquiry, dma_handle, pdev); +free_pdev: + free_local_pdev(pdev); + return 0; +} + +/** + * proc_show_rdrv_10() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_10(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 0, 9); +} + + +/** + * proc_show_rdrv_20() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_20(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 10, 19); +} + + +/** + * proc_show_rdrv_30() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_30(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 20, 29); +} + + +/** + * proc_show_rdrv_40() + * @m: Synthetic file construction data + * @v: File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_40(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 30, 39); +} + +/** + * mega_create_proc_entry() + * @index: index in soft state array + * @parent: parent node for this /proc entry + * + * Creates /proc entries for our controllers. + */ +static void +mega_create_proc_entry(int index, struct proc_dir_entry *parent) +{ + adapter_t *adapter = hba_soft_state[index]; + struct proc_dir_entry *dir; + u8 string[16]; + + sprintf(string, "hba%d", adapter->host->host_no); + dir = proc_mkdir_data(string, 0, parent, adapter); + if (!dir) { + dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); + return; + } + + proc_create_single_data("config", S_IRUSR, dir, + proc_show_config, adapter); + proc_create_single_data("stat", S_IRUSR, dir, + proc_show_stat, adapter); + proc_create_single_data("mailbox", S_IRUSR, dir, + proc_show_mbox, adapter); +#if MEGA_HAVE_ENH_PROC + proc_create_single_data("rebuild-rate", S_IRUSR, dir, + proc_show_rebuild_rate, adapter); + proc_create_single_data("battery-status", S_IRUSR, dir, + proc_show_battery, adapter); + proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, + proc_show_pdrv_ch0, adapter); + proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, + proc_show_pdrv_ch1, adapter); + proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, + proc_show_pdrv_ch2, adapter); + proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, + proc_show_pdrv_ch3, adapter); + proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, + proc_show_rdrv_10, adapter); + proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, + proc_show_rdrv_20, adapter); + proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, + proc_show_rdrv_30, adapter); + proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, + proc_show_rdrv_40, adapter); +#endif +} + +#else +static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) +{ +} +#endif + + +/* + * megaraid_biosparam() + * + * Return the disk geometry for a particular disk + */ +static int +megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + adapter_t *adapter; + int heads; + int sectors; + int cylinders; + + /* Get pointer to host config structure */ + adapter = (adapter_t *)sdev->host->hostdata; + + if (IS_RAID_CH(adapter, sdev->channel)) { + /* Default heads (64) & sectors (32) */ + heads = 64; + sectors = 32; + cylinders = (ulong)capacity / (heads * sectors); + + /* + * Handle extended translation size for logical drives + * > 1Gb + */ + if ((ulong)capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = (ulong)capacity / (heads * sectors); + } + + /* return result */ + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + } + else { + if (scsi_partsize(bdev, capacity, geom)) + return 0; + + dev_info(&adapter->dev->dev, + "invalid partition on this disk on channel %d\n", + sdev->channel); + + /* Default heads (64) & sectors (32) */ + heads = 64; + sectors = 32; + cylinders = (ulong)capacity / (heads * sectors); + + /* Handle extended translation size for logical drives > 1Gb */ + if ((ulong)capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = (ulong)capacity / (heads * sectors); + } + + /* return result */ + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + } + + return 0; +} + +/** + * mega_init_scb() + * @adapter: pointer to our soft state + * + * Allocate memory for the various pointers in the scb structures: + * scatter-gather list pointer, passthru and extended passthru structure + * pointers. + */ +static int +mega_init_scb(adapter_t *adapter) +{ + scb_t *scb; + int i; + + for( i = 0; i < adapter->max_cmds; i++ ) { + + scb = &adapter->scb_list[i]; + + scb->sgl64 = NULL; + scb->sgl = NULL; + scb->pthru = NULL; + scb->epthru = NULL; + } + + for( i = 0; i < adapter->max_cmds; i++ ) { + + scb = &adapter->scb_list[i]; + + scb->idx = i; + + scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev, + sizeof(mega_sgl64) * adapter->sglen, + &scb->sgl_dma_addr, GFP_KERNEL); + + scb->sgl = (mega_sglist *)scb->sgl64; + + if( !scb->sgl ) { + dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); + mega_free_sgl(adapter); + return -1; + } + + scb->pthru = dma_alloc_coherent(&adapter->dev->dev, + sizeof(mega_passthru), + &scb->pthru_dma_addr, GFP_KERNEL); + + if( !scb->pthru ) { + dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); + mega_free_sgl(adapter); + return -1; + } + + scb->epthru = dma_alloc_coherent(&adapter->dev->dev, + sizeof(mega_ext_passthru), + &scb->epthru_dma_addr, GFP_KERNEL); + + if( !scb->epthru ) { + dev_warn(&adapter->dev->dev, + "Can't allocate extended passthru\n"); + mega_free_sgl(adapter); + return -1; + } + + + scb->dma_type = MEGA_DMA_TYPE_NONE; + + /* + * Link to free list + * lock not required since we are loading the driver, so no + * commands possible right now. + */ + scb->state = SCB_FREE; + scb->cmd = NULL; + list_add(&scb->list, &adapter->free_list); + } + + return 0; +} + + +/** + * megadev_open() + * @inode: unused + * @filep: unused + * + * Routines for the character/ioctl interface to the driver. Find out if this + * is a valid open. + */ +static int +megadev_open (struct inode *inode, struct file *filep) +{ + /* + * Only allow superuser to access private ioctl interface + */ + if( !capable(CAP_SYS_ADMIN) ) return -EACCES; + + return 0; +} + + +/** + * megadev_ioctl() + * @filep: Our device file + * @cmd: ioctl command + * @arg: user buffer + * + * ioctl entry point for our private ioctl interface. We move the data in from + * the user space, prepare the command (if necessary, convert the old MIMD + * ioctl to new ioctl command), and issue a synchronous command to the + * controller. + */ +static int +megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + adapter_t *adapter; + nitioctl_t uioc; + int adapno; + int rval; + mega_passthru __user *upthru; /* user address for passthru */ + mega_passthru *pthru; /* copy user passthru here */ + dma_addr_t pthru_dma_hndl; + void *data = NULL; /* data to be transferred */ + dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ + megacmd_t mc; +#if MEGA_HAVE_STATS + megastat_t __user *ustats = NULL; + int num_ldrv = 0; +#endif + u32 uxferaddr = 0; + struct pci_dev *pdev; + + /* + * Make sure only USCSICMD are issued through this interface. + * MIMD application would still fire different command. + */ + if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { + return -EINVAL; + } + + /* + * Check and convert a possible MIMD command to NIT command. + * mega_m_to_n() copies the data from the user space, so we do not + * have to do it here. + * NOTE: We will need some user address to copyout the data, therefore + * the inteface layer will also provide us with the required user + * addresses. + */ + memset(&uioc, 0, sizeof(nitioctl_t)); + if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) + return rval; + + + switch( uioc.opcode ) { + + case GET_DRIVER_VER: + if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) + return (-EFAULT); + + break; + + case GET_N_ADAP: + if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) + return (-EFAULT); + + /* + * Shucks. MIMD interface returns a positive value for number + * of adapters. TODO: Change it to return 0 when there is no + * applicatio using mimd interface. + */ + return hba_count; + + case GET_ADAP_INFO: + + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, + sizeof(struct mcontroller)) ) + return (-EFAULT); + break; + +#if MEGA_HAVE_STATS + + case GET_STATS: + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + adapter = hba_soft_state[adapno]; + + ustats = uioc.uioc_uaddr; + + if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) + return (-EFAULT); + + /* + * Check for the validity of the logical drive number + */ + if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; + + if( copy_to_user(ustats->nreads, adapter->nreads, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nwrites, adapter->nwrites, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->rd_errors, adapter->rd_errors, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->wr_errors, adapter->wr_errors, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + return 0; + +#endif + case MBOX_CMD: + + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + adapter = hba_soft_state[adapno]; + + /* + * Deletion of logical drive is a special case. The adapter + * should be quiescent before this command is issued. + */ + if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && + uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { + + /* + * Do we support this feature + */ + if( !adapter->support_random_del ) { + dev_warn(&adapter->dev->dev, "logdrv " + "delete on non-supporting F/W\n"); + + return (-EINVAL); + } + + rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); + + if( rval == 0 ) { + memset(&mc, 0, sizeof(megacmd_t)); + + mc.status = rval; + + rval = mega_n_to_m((void __user *)arg, &mc); + } + + return rval; + } + /* + * This interface only support the regular passthru commands. + * Reject extended passthru and 64-bit passthru + */ + if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || + uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { + + dev_warn(&adapter->dev->dev, "rejected passthru\n"); + + return (-EINVAL); + } + + /* + * For all internal commands, the buffer must be allocated in + * <4GB address range + */ + if( make_local_pdev(adapter, &pdev) != 0 ) + return -EIO; + + /* Is it a passthru command or a DCMD */ + if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { + /* Passthru commands */ + + pthru = dma_alloc_coherent(&pdev->dev, + sizeof(mega_passthru), + &pthru_dma_hndl, GFP_KERNEL); + + if( pthru == NULL ) { + free_local_pdev(pdev); + return (-ENOMEM); + } + + /* + * The user passthru structure + */ + upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; + + /* + * Copy in the user passthru here. + */ + if( copy_from_user(pthru, upthru, + sizeof(mega_passthru)) ) { + + dma_free_coherent(&pdev->dev, + sizeof(mega_passthru), + pthru, pthru_dma_hndl); + + free_local_pdev(pdev); + + return (-EFAULT); + } + + /* + * Is there a data transfer + */ + if( pthru->dataxferlen ) { + data = dma_alloc_coherent(&pdev->dev, + pthru->dataxferlen, + &data_dma_hndl, + GFP_KERNEL); + + if( data == NULL ) { + dma_free_coherent(&pdev->dev, + sizeof(mega_passthru), + pthru, + pthru_dma_hndl); + + free_local_pdev(pdev); + + return (-ENOMEM); + } + + /* + * Save the user address and point the kernel + * address at just allocated memory + */ + uxferaddr = pthru->dataxferaddr; + pthru->dataxferaddr = data_dma_hndl; + } + + + /* + * Is data coming down-stream + */ + if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { + /* + * Get the user data + */ + if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, + pthru->dataxferlen) ) { + rval = (-EFAULT); + goto freemem_and_return; + } + } + + memset(&mc, 0, sizeof(megacmd_t)); + + mc.cmd = MEGA_MBOXCMD_PASSTHRU; + mc.xferaddr = (u32)pthru_dma_hndl; + + /* + * Issue the command + */ + mega_internal_command(adapter, &mc, pthru); + + rval = mega_n_to_m((void __user *)arg, &mc); + + if( rval ) goto freemem_and_return; + + + /* + * Is data going up-stream + */ + if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { + if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, + pthru->dataxferlen) ) { + rval = (-EFAULT); + } + } + + /* + * Send the request sense data also, irrespective of + * whether the user has asked for it or not. + */ + if (copy_to_user(upthru->reqsensearea, + pthru->reqsensearea, 14)) + rval = -EFAULT; + +freemem_and_return: + if( pthru->dataxferlen ) { + dma_free_coherent(&pdev->dev, + pthru->dataxferlen, data, + data_dma_hndl); + } + + dma_free_coherent(&pdev->dev, sizeof(mega_passthru), + pthru, pthru_dma_hndl); + + free_local_pdev(pdev); + + return rval; + } + else { + /* DCMD commands */ + + /* + * Is there a data transfer + */ + if( uioc.xferlen ) { + data = dma_alloc_coherent(&pdev->dev, + uioc.xferlen, + &data_dma_hndl, + GFP_KERNEL); + + if( data == NULL ) { + free_local_pdev(pdev); + return (-ENOMEM); + } + + uxferaddr = MBOX(uioc)->xferaddr; + } + + /* + * Is data coming down-stream + */ + if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { + /* + * Get the user data + */ + if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, + uioc.xferlen) ) { + + dma_free_coherent(&pdev->dev, + uioc.xferlen, data, + data_dma_hndl); + + free_local_pdev(pdev); + + return (-EFAULT); + } + } + + memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); + + mc.xferaddr = (u32)data_dma_hndl; + + /* + * Issue the command + */ + mega_internal_command(adapter, &mc, NULL); + + rval = mega_n_to_m((void __user *)arg, &mc); + + if( rval ) { + if( uioc.xferlen ) { + dma_free_coherent(&pdev->dev, + uioc.xferlen, data, + data_dma_hndl); + } + + free_local_pdev(pdev); + + return rval; + } + + /* + * Is data going up-stream + */ + if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { + if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, + uioc.xferlen) ) { + + rval = (-EFAULT); + } + } + + if( uioc.xferlen ) { + dma_free_coherent(&pdev->dev, uioc.xferlen, + data, data_dma_hndl); + } + + free_local_pdev(pdev); + + return rval; + } + + default: + return (-EINVAL); + } + + return 0; +} + +static long +megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + int ret; + + mutex_lock(&megadev_mutex); + ret = megadev_ioctl(filep, cmd, arg); + mutex_unlock(&megadev_mutex); + + return ret; +} + +/** + * mega_m_to_n() + * @arg: user address + * @uioc: new ioctl structure + * + * A thin layer to convert older mimd interface ioctl structure to NIT ioctl + * structure + * + * Converts the older mimd ioctl structure to newer NIT structure + */ +static int +mega_m_to_n(void __user *arg, nitioctl_t *uioc) +{ + struct uioctl_t uioc_mimd; + char signature[8] = {0}; + u8 opcode; + u8 subopcode; + + + /* + * check is the application conforms to NIT. We do not have to do much + * in that case. + * We exploit the fact that the signature is stored in the very + * beginning of the structure. + */ + + if( copy_from_user(signature, arg, 7) ) + return (-EFAULT); + + if( memcmp(signature, "MEGANIT", 7) == 0 ) { + + /* + * NOTE NOTE: The nit ioctl is still under flux because of + * change of mailbox definition, in HPE. No applications yet + * use this interface and let's not have applications use this + * interface till the new specifitions are in place. + */ + return -EINVAL; +#if 0 + if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) + return (-EFAULT); + return 0; +#endif + } + + /* + * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t + * + * Get the user ioctl structure + */ + if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) + return (-EFAULT); + + + /* + * Get the opcode and subopcode for the commands + */ + opcode = uioc_mimd.ui.fcs.opcode; + subopcode = uioc_mimd.ui.fcs.subopcode; + + switch (opcode) { + case 0x82: + + switch (subopcode) { + + case MEGAIOC_QDRVRVER: /* Query driver version */ + uioc->opcode = GET_DRIVER_VER; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + case MEGAIOC_QNADAP: /* Get # of adapters */ + uioc->opcode = GET_N_ADAP; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + case MEGAIOC_QADAPINFO: /* Get adapter information */ + uioc->opcode = GET_ADAP_INFO; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + default: + return(-EINVAL); + } + + break; + + + case 0x81: + + uioc->opcode = MBOX_CMD; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + + memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); + + uioc->xferlen = uioc_mimd.ui.fcs.length; + + if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; + if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; + + break; + + case 0x80: + + uioc->opcode = MBOX_CMD; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + + memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); + + /* + * Choose the xferlen bigger of input and output data + */ + uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? + uioc_mimd.outlen : uioc_mimd.inlen; + + if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; + if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; + + break; + + default: + return (-EINVAL); + + } + + return 0; +} + +/* + * mega_n_to_m() + * @arg: user address + * @mc: mailbox command + * + * Updates the status information to the application, depending on application + * conforms to older mimd ioctl interface or newer NIT ioctl interface + */ +static int +mega_n_to_m(void __user *arg, megacmd_t *mc) +{ + nitioctl_t __user *uiocp; + megacmd_t __user *umc; + mega_passthru __user *upthru; + struct uioctl_t __user *uioc_mimd; + char signature[8] = {0}; + + /* + * check is the application conforms to NIT. + */ + if( copy_from_user(signature, arg, 7) ) + return -EFAULT; + + if( memcmp(signature, "MEGANIT", 7) == 0 ) { + + uiocp = arg; + + if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) + return (-EFAULT); + + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + umc = MBOX_P(uiocp); + + if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) + return -EFAULT; + + if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) + return (-EFAULT); + } + } + else { + uioc_mimd = arg; + + if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) + return (-EFAULT); + + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + umc = (megacmd_t __user *)uioc_mimd->mbox; + + if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) + return (-EFAULT); + + if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) + return (-EFAULT); + } + } + + return 0; +} + + +/* + * MEGARAID 'FW' commands. + */ + +/** + * mega_is_bios_enabled() + * @adapter: pointer to our soft state + * + * issue command to find out if the BIOS is enabled for this controller + */ +static int +mega_is_bios_enabled(adapter_t *adapter) +{ + struct mbox_out mbox; + unsigned char *raw_mbox = (u8 *)&mbox; + + memset(&mbox, 0, sizeof(mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox.xferaddr = (u32)adapter->buf_dma_handle; + + raw_mbox[0] = IS_BIOS_ENABLED; + raw_mbox[2] = GET_BIOS; + + issue_scb_block(adapter, raw_mbox); + + return *(char *)adapter->mega_buffer; +} + + +/** + * mega_enum_raid_scsi() + * @adapter: pointer to our soft state + * + * Find out what channels are RAID/SCSI. This information is used to + * differentiate the virtual channels and physical channels and to support + * ROMB feature and non-disk devices. + */ +static void +mega_enum_raid_scsi(adapter_t *adapter) +{ + struct mbox_out mbox; + unsigned char *raw_mbox = (u8 *)&mbox; + int i; + + memset(&mbox, 0, sizeof(mbox)); + + /* + * issue command to find out what channels are raid/scsi + */ + raw_mbox[0] = CHNL_CLASS; + raw_mbox[2] = GET_CHNL_CLASS; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox.xferaddr = (u32)adapter->buf_dma_handle; + + /* + * Non-ROMB firmware fail this command, so all channels + * must be shown RAID + */ + adapter->mega_ch_class = 0xFF; + + if(!issue_scb_block(adapter, raw_mbox)) { + adapter->mega_ch_class = *((char *)adapter->mega_buffer); + + } + + for( i = 0; i < adapter->product_info.nchannels; i++ ) { + if( (adapter->mega_ch_class >> i) & 0x01 ) { + dev_info(&adapter->dev->dev, "channel[%d] is raid\n", + i); + } + else { + dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", + i); + } + } + + return; +} + + +/** + * mega_get_boot_drv() + * @adapter: pointer to our soft state + * + * Find out which device is the boot device. Note, any logical drive or any + * phyical device (e.g., a CDROM) can be designated as a boot device. + */ +static void +mega_get_boot_drv(adapter_t *adapter) +{ + struct private_bios_data *prv_bios_data; + struct mbox_out mbox; + unsigned char *raw_mbox = (u8 *)&mbox; + u16 cksum = 0; + u8 *cksum_p; + u8 boot_pdrv; + int i; + + memset(&mbox, 0, sizeof(mbox)); + + raw_mbox[0] = BIOS_PVT_DATA; + raw_mbox[2] = GET_BIOS_PVT_DATA; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox.xferaddr = (u32)adapter->buf_dma_handle; + + adapter->boot_ldrv_enabled = 0; + adapter->boot_ldrv = 0; + + adapter->boot_pdrv_enabled = 0; + adapter->boot_pdrv_ch = 0; + adapter->boot_pdrv_tgt = 0; + + if(issue_scb_block(adapter, raw_mbox) == 0) { + prv_bios_data = + (struct private_bios_data *)adapter->mega_buffer; + + cksum = 0; + cksum_p = (char *)prv_bios_data; + for (i = 0; i < 14; i++ ) { + cksum += (u16)(*cksum_p++); + } + + if (prv_bios_data->cksum == (u16)(0-cksum) ) { + + /* + * If MSB is set, a physical drive is set as boot + * device + */ + if( prv_bios_data->boot_drv & 0x80 ) { + adapter->boot_pdrv_enabled = 1; + boot_pdrv = prv_bios_data->boot_drv & 0x7F; + adapter->boot_pdrv_ch = boot_pdrv / 16; + adapter->boot_pdrv_tgt = boot_pdrv % 16; + } + else { + adapter->boot_ldrv_enabled = 1; + adapter->boot_ldrv = prv_bios_data->boot_drv; + } + } + } + +} + +/** + * mega_support_random_del() + * @adapter: pointer to our soft state + * + * Find out if this controller supports random deletion and addition of + * logical drives + */ +static int +mega_support_random_del(adapter_t *adapter) +{ + struct mbox_out mbox; + unsigned char *raw_mbox = (u8 *)&mbox; + int rval; + + memset(&mbox, 0, sizeof(mbox)); + + /* + * issue command + */ + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_SUP_DEL_LOGDRV; + + rval = issue_scb_block(adapter, raw_mbox); + + return !rval; +} + + +/** + * mega_support_ext_cdb() + * @adapter: pointer to our soft state + * + * Find out if this firmware support cdblen > 10 + */ +static int +mega_support_ext_cdb(adapter_t *adapter) +{ + struct mbox_out mbox; + unsigned char *raw_mbox = (u8 *)&mbox; + int rval; + + memset(&mbox, 0, sizeof(mbox)); + /* + * issue command to find out if controller supports extended CDBs. + */ + raw_mbox[0] = 0xA4; + raw_mbox[2] = 0x16; + + rval = issue_scb_block(adapter, raw_mbox); + + return !rval; +} + + +/** + * mega_del_logdrv() + * @adapter: pointer to our soft state + * @logdrv: logical drive to be deleted + * + * Delete the specified logical drive. It is the responsibility of the user + * app to let the OS know about this operation. + */ +static int +mega_del_logdrv(adapter_t *adapter, int logdrv) +{ + unsigned long flags; + scb_t *scb; + int rval; + + /* + * Stop sending commands to the controller, queue them internally. + * When deletion is complete, ISR will flush the queue. + */ + atomic_set(&adapter->quiescent, 1); + + /* + * Wait till all the issued commands are complete and there are no + * commands in the pending queue + */ + while (atomic_read(&adapter->pend_cmds) > 0 || + !list_empty(&adapter->pending_list)) + msleep(1000); /* sleep for 1s */ + + rval = mega_do_del_logdrv(adapter, logdrv); + + spin_lock_irqsave(&adapter->lock, flags); + + /* + * If delete operation was successful, add 0x80 to the logical drive + * ids for commands in the pending queue. + */ + if (adapter->read_ldidmap) { + struct list_head *pos; + list_for_each(pos, &adapter->pending_list) { + scb = list_entry(pos, scb_t, list); + if (scb->pthru->logdrv < 0x80 ) + scb->pthru->logdrv += 0x80; + } + } + + atomic_set(&adapter->quiescent, 0); + + mega_runpendq(adapter); + + spin_unlock_irqrestore(&adapter->lock, flags); + + return rval; +} + + +static int +mega_do_del_logdrv(adapter_t *adapter, int logdrv) +{ + megacmd_t mc; + int rval; + + memset( &mc, 0, sizeof(megacmd_t)); + + mc.cmd = FC_DEL_LOGDRV; + mc.opcode = OP_DEL_LOGDRV; + mc.subopcode = logdrv; + + rval = mega_internal_command(adapter, &mc, NULL); + + /* log this event */ + if(rval) { + dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); + return rval; + } + + /* + * After deleting first logical drive, the logical drives must be + * addressed by adding 0x80 to the logical drive id. + */ + adapter->read_ldidmap = 1; + + return rval; +} + + +/** + * mega_get_max_sgl() + * @adapter: pointer to our soft state + * + * Find out the maximum number of scatter-gather elements supported by this + * version of the firmware + */ +static void +mega_get_max_sgl(adapter_t *adapter) +{ + struct mbox_out mbox; + unsigned char *raw_mbox = (u8 *)&mbox; + + memset(&mbox, 0, sizeof(mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox.xferaddr = (u32)adapter->buf_dma_handle; + + raw_mbox[0] = MAIN_MISC_OPCODE; + raw_mbox[2] = GET_MAX_SG_SUPPORT; + + + if( issue_scb_block(adapter, raw_mbox) ) { + /* + * f/w does not support this command. Choose the default value + */ + adapter->sglen = MIN_SGLIST; + } + else { + adapter->sglen = *((char *)adapter->mega_buffer); + + /* + * Make sure this is not more than the resources we are + * planning to allocate + */ + if ( adapter->sglen > MAX_SGLIST ) + adapter->sglen = MAX_SGLIST; + } + + return; +} + + +/** + * mega_support_cluster() + * @adapter: pointer to our soft state + * + * Find out if this firmware support cluster calls. + */ +static int +mega_support_cluster(adapter_t *adapter) +{ + struct mbox_out mbox; + unsigned char *raw_mbox = (u8 *)&mbox; + + memset(&mbox, 0, sizeof(mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox.xferaddr = (u32)adapter->buf_dma_handle; + + /* + * Try to get the initiator id. This command will succeed iff the + * clustering is available on this HBA. + */ + raw_mbox[0] = MEGA_GET_TARGET_ID; + + if( issue_scb_block(adapter, raw_mbox) == 0 ) { + + /* + * Cluster support available. Get the initiator target id. + * Tell our id to mid-layer too. + */ + adapter->this_id = *(u32 *)adapter->mega_buffer; + adapter->host->this_id = adapter->this_id; + + return 1; + } + + return 0; +} + +#ifdef CONFIG_PROC_FS +/** + * mega_adapinq() + * @adapter: pointer to our soft state + * @dma_handle: DMA address of the buffer + * + * Issue internal commands while interrupts are available. + * We only issue direct mailbox commands from within the driver. ioctl() + * interface using these routines can issue passthru commands. + */ +static int +mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) +{ + megacmd_t mc; + + memset(&mc, 0, sizeof(megacmd_t)); + + if( adapter->flag & BOARD_40LD ) { + mc.cmd = FC_NEW_CONFIG; + mc.opcode = NC_SUBOP_ENQUIRY3; + mc.subopcode = ENQ3_GET_SOLICITED_FULL; + } + else { + mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; + } + + mc.xferaddr = (u32)dma_handle; + + if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { + return -1; + } + + return 0; +} + + +/** + * mega_internal_dev_inquiry() + * @adapter: pointer to our soft state + * @ch: channel for this device + * @tgt: ID of this device + * @buf_dma_handle: DMA address of the buffer + * + * Issue the scsi inquiry for the specified device. + */ +static int +mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, + dma_addr_t buf_dma_handle) +{ + mega_passthru *pthru; + dma_addr_t pthru_dma_handle; + megacmd_t mc; + int rval; + struct pci_dev *pdev; + + + /* + * For all internal commands, the buffer must be allocated in <4GB + * address range + */ + if( make_local_pdev(adapter, &pdev) != 0 ) return -1; + + pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), + &pthru_dma_handle, GFP_KERNEL); + + if( pthru == NULL ) { + free_local_pdev(pdev); + return -1; + } + + pthru->timeout = 2; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 0; + + pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; + + pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; + + pthru->cdblen = 6; + + pthru->cdb[0] = INQUIRY; + pthru->cdb[1] = 0; + pthru->cdb[2] = 0; + pthru->cdb[3] = 0; + pthru->cdb[4] = 255; + pthru->cdb[5] = 0; + + + pthru->dataxferaddr = (u32)buf_dma_handle; + pthru->dataxferlen = 256; + + memset(&mc, 0, sizeof(megacmd_t)); + + mc.cmd = MEGA_MBOXCMD_PASSTHRU; + mc.xferaddr = (u32)pthru_dma_handle; + + rval = mega_internal_command(adapter, &mc, pthru); + + dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, + pthru_dma_handle); + + free_local_pdev(pdev); + + return rval; +} +#endif + +/** + * mega_internal_command() + * @adapter: pointer to our soft state + * @mc: the mailbox command + * @pthru: Passthru structure for DCDB commands + * + * Issue the internal commands in interrupt mode. + * The last argument is the address of the passthru structure if the command + * to be fired is a passthru command + * + * Note: parameter 'pthru' is null for non-passthru commands. + */ +static int +mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) +{ + unsigned long flags; + scb_t *scb; + int rval; + + /* + * The internal commands share one command id and hence are + * serialized. This is so because we want to reserve maximum number of + * available command ids for the I/O commands. + */ + mutex_lock(&adapter->int_mtx); + + scb = &adapter->int_scb; + memset(scb, 0, sizeof(scb_t)); + + scb->idx = CMDID_INT_CMDS; + scb->state |= SCB_ACTIVE | SCB_PENDQ; + + memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); + + /* + * Is it a passthru command + */ + if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) + scb->pthru = pthru; + + spin_lock_irqsave(&adapter->lock, flags); + list_add_tail(&scb->list, &adapter->pending_list); + /* + * Check if the HBA is in quiescent state, e.g., during a + * delete logical drive opertion. If it is, don't run + * the pending_list. + */ + if (atomic_read(&adapter->quiescent) == 0) + mega_runpendq(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); + + wait_for_completion(&adapter->int_waitq); + + mc->status = rval = adapter->int_status; + + /* + * Print a debug message for all failed commands. Applications can use + * this information. + */ + if (rval && trace_level) { + dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", + mc->cmd, mc->opcode, mc->subopcode, rval); + } + + mutex_unlock(&adapter->int_mtx); + return rval; +} + +static const struct scsi_host_template megaraid_template = { + .module = THIS_MODULE, + .name = "MegaRAID", + .proc_name = "megaraid_legacy", + .info = megaraid_info, + .queuecommand = megaraid_queue, + .bios_param = megaraid_biosparam, + .max_sectors = MAX_SECTORS_PER_IO, + .can_queue = MAX_COMMANDS, + .this_id = DEFAULT_INITIATOR_ID, + .sg_tablesize = MAX_SGLIST, + .cmd_per_lun = DEF_CMD_PER_LUN, + .eh_abort_handler = megaraid_abort, + .eh_device_reset_handler = megaraid_reset, + .eh_bus_reset_handler = megaraid_reset, + .eh_host_reset_handler = megaraid_reset, + .no_write_same = 1, + .cmd_size = sizeof(struct megaraid_cmd_priv), +}; + +static int +megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct Scsi_Host *host; + adapter_t *adapter; + unsigned long mega_baseport, tbase, flag = 0; + u16 subsysid, subsysvid; + u8 pci_bus, pci_dev_func; + int irq, i, j; + int error = -ENODEV; + + if (hba_count >= MAX_CONTROLLERS) + goto out; + + if (pci_enable_device(pdev)) + goto out; + pci_set_master(pdev); + + pci_bus = pdev->bus->number; + pci_dev_func = pdev->devfn; + + /* + * The megaraid3 stuff reports the ID of the Intel part which is not + * remotely specific to the megaraid + */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { + u16 magic; + /* + * Don't fall over the Compaq management cards using the same + * PCI identifier + */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && + pdev->subsystem_device == 0xC000) + goto out_disable_device; + /* Now check the magic signature byte */ + pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); + if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) + goto out_disable_device; + /* Ok it is probably a megaraid */ + } + + /* + * For these vendor and device ids, signature offsets are not + * valid and 64 bit is implicit + */ + if (id->driver_data & BOARD_64BIT) + flag |= BOARD_64BIT; + else { + u32 magic64; + + pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); + if (magic64 == HBA_SIGNATURE_64BIT) + flag |= BOARD_64BIT; + } + + subsysvid = pdev->subsystem_vendor; + subsysid = pdev->subsystem_device; + + dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", + id->vendor, id->device); + + /* Read the base port and IRQ from PCI */ + mega_baseport = pci_resource_start(pdev, 0); + irq = pdev->irq; + + tbase = mega_baseport; + if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { + flag |= BOARD_MEMMAP; + + if (!request_mem_region(mega_baseport, 128, "megaraid")) { + dev_warn(&pdev->dev, "mem region busy!\n"); + goto out_disable_device; + } + + mega_baseport = (unsigned long)ioremap(mega_baseport, 128); + if (!mega_baseport) { + dev_warn(&pdev->dev, "could not map hba memory\n"); + goto out_release_region; + } + } else { + flag |= BOARD_IOMAP; + mega_baseport += 0x10; + + if (!request_region(mega_baseport, 16, "megaraid")) + goto out_disable_device; + } + + /* Initialize SCSI Host structure */ + host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); + if (!host) + goto out_iounmap; + + adapter = (adapter_t *)host->hostdata; + memset(adapter, 0, sizeof(adapter_t)); + + dev_notice(&pdev->dev, + "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", + host->host_no, mega_baseport, irq); + + adapter->base = mega_baseport; + if (flag & BOARD_MEMMAP) + adapter->mmio_base = (void __iomem *) mega_baseport; + + INIT_LIST_HEAD(&adapter->free_list); + INIT_LIST_HEAD(&adapter->pending_list); + INIT_LIST_HEAD(&adapter->completed_list); + + adapter->flag = flag; + spin_lock_init(&adapter->lock); + + host->cmd_per_lun = max_cmd_per_lun; + host->max_sectors = max_sectors_per_io; + + adapter->dev = pdev; + adapter->host = host; + + adapter->host->irq = irq; + + if (flag & BOARD_MEMMAP) + adapter->host->base = tbase; + else { + adapter->host->io_port = tbase; + adapter->host->n_io_port = 16; + } + + adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; + + /* + * Allocate buffer to issue internal commands. + */ + adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev, + MEGA_BUFFER_SIZE, + &adapter->buf_dma_handle, + GFP_KERNEL); + if (!adapter->mega_buffer) { + dev_warn(&pdev->dev, "out of RAM\n"); + goto out_host_put; + } + + adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), + GFP_KERNEL); + if (!adapter->scb_list) { + dev_warn(&pdev->dev, "out of RAM\n"); + goto out_free_cmd_buffer; + } + + if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? + megaraid_isr_memmapped : megaraid_isr_iomapped, + IRQF_SHARED, "megaraid", adapter)) { + dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); + goto out_free_scb_list; + } + + if (mega_setup_mailbox(adapter)) + goto out_free_irq; + + if (mega_query_adapter(adapter)) + goto out_free_mbox; + + /* + * Have checks for some buggy f/w + */ + if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { + /* + * Which firmware + */ + if (!strcmp(adapter->fw_version, "3.00") || + !strcmp(adapter->fw_version, "3.01")) { + + dev_warn(&pdev->dev, + "Your card is a Dell PERC " + "2/SC RAID controller with " + "firmware\nmegaraid: 3.00 or 3.01. " + "This driver is known to have " + "corruption issues\nmegaraid: with " + "those firmware versions on this " + "specific card. In order\nmegaraid: " + "to protect your data, please upgrade " + "your firmware to version\nmegaraid: " + "3.10 or later, available from the " + "Dell Technical Support web\n" + "megaraid: site at\nhttp://support." + "dell.com/us/en/filelib/download/" + "index.asp?fileid=2940\n" + ); + } + } + + /* + * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with + * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit + * support, since this firmware cannot handle 64 bit + * addressing + */ + if ((subsysvid == PCI_VENDOR_ID_HP) && + ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { + /* + * which firmware + */ + if (!strcmp(adapter->fw_version, "H01.07") || + !strcmp(adapter->fw_version, "H01.08") || + !strcmp(adapter->fw_version, "H01.09") ) { + dev_warn(&pdev->dev, + "Firmware H.01.07, " + "H.01.08, and H.01.09 on 1M/2M " + "controllers\n" + "do not support 64 bit " + "addressing.\nDISABLING " + "64 bit support.\n"); + adapter->flag &= ~BOARD_64BIT; + } + } + + if (mega_is_bios_enabled(adapter)) + mega_hbas[hba_count].is_bios_enabled = 1; + mega_hbas[hba_count].hostdata_addr = adapter; + + /* + * Find out which channel is raid and which is scsi. This is + * for ROMB support. + */ + mega_enum_raid_scsi(adapter); + + /* + * Find out if a logical drive is set as the boot drive. If + * there is one, will make that as the first logical drive. + * ROMB: Do we have to boot from a physical drive. Then all + * the physical drives would appear before the logical disks. + * Else, all the physical drives would be exported to the mid + * layer after logical drives. + */ + mega_get_boot_drv(adapter); + + if (adapter->boot_pdrv_enabled) { + j = adapter->product_info.nchannels; + for( i = 0; i < j; i++ ) + adapter->logdrv_chan[i] = 0; + for( i = j; i < NVIRT_CHAN + j; i++ ) + adapter->logdrv_chan[i] = 1; + } else { + for (i = 0; i < NVIRT_CHAN; i++) + adapter->logdrv_chan[i] = 1; + for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) + adapter->logdrv_chan[i] = 0; + adapter->mega_ch_class <<= NVIRT_CHAN; + } + + /* + * Do we support random deletion and addition of logical + * drives + */ + adapter->read_ldidmap = 0; /* set it after first logdrv + delete cmd */ + adapter->support_random_del = mega_support_random_del(adapter); + + /* Initialize SCBs */ + if (mega_init_scb(adapter)) + goto out_free_mbox; + + /* + * Reset the pending commands counter + */ + atomic_set(&adapter->pend_cmds, 0); + + /* + * Reset the adapter quiescent flag + */ + atomic_set(&adapter->quiescent, 0); + + hba_soft_state[hba_count] = adapter; + + /* + * Fill in the structure which needs to be passed back to the + * application when it does an ioctl() for controller related + * information. + */ + i = hba_count; + + mcontroller[i].base = mega_baseport; + mcontroller[i].irq = irq; + mcontroller[i].numldrv = adapter->numldrv; + mcontroller[i].pcibus = pci_bus; + mcontroller[i].pcidev = id->device; + mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); + mcontroller[i].pciid = -1; + mcontroller[i].pcivendor = id->vendor; + mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); + mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; + + + /* Set the Mode of addressing to 64 bit if we can */ + if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + adapter->has_64bit_addr = 1; + } else { + dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + adapter->has_64bit_addr = 0; + } + + mutex_init(&adapter->int_mtx); + init_completion(&adapter->int_waitq); + + adapter->this_id = DEFAULT_INITIATOR_ID; + adapter->host->this_id = DEFAULT_INITIATOR_ID; + +#if MEGA_HAVE_CLUSTERING + /* + * Is cluster support enabled on this controller + * Note: In a cluster the HBAs ( the initiators ) will have + * different target IDs and we cannot assume it to be 7. Call + * to mega_support_cluster() will get the target ids also if + * the cluster support is available + */ + adapter->has_cluster = mega_support_cluster(adapter); + if (adapter->has_cluster) { + dev_notice(&pdev->dev, + "Cluster driver, initiator id:%d\n", + adapter->this_id); + } +#endif + + pci_set_drvdata(pdev, host); + + mega_create_proc_entry(hba_count, mega_proc_dir_entry); + + error = scsi_add_host(host, &pdev->dev); + if (error) + goto out_free_mbox; + + scsi_scan_host(host); + hba_count++; + return 0; + + out_free_mbox: + dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), + adapter->una_mbox64, adapter->una_mbox64_dma); + out_free_irq: + free_irq(adapter->host->irq, adapter); + out_free_scb_list: + kfree(adapter->scb_list); + out_free_cmd_buffer: + dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, + adapter->mega_buffer, adapter->buf_dma_handle); + out_host_put: + scsi_host_put(host); + out_iounmap: + if (flag & BOARD_MEMMAP) + iounmap((void *)mega_baseport); + out_release_region: + if (flag & BOARD_MEMMAP) + release_mem_region(tbase, 128); + else + release_region(mega_baseport, 16); + out_disable_device: + pci_disable_device(pdev); + out: + return error; +} + +static void +__megaraid_shutdown(adapter_t *adapter) +{ + u_char raw_mbox[sizeof(struct mbox_out)]; + mbox_t *mbox = (mbox_t *)raw_mbox; + int i; + + /* Flush adapter cache */ + memset(&mbox->m_out, 0, sizeof(raw_mbox)); + raw_mbox[0] = FLUSH_ADAPTER; + + free_irq(adapter->host->irq, adapter); + + /* Issue a blocking (interrupts disabled) command to the card */ + issue_scb_block(adapter, raw_mbox); + + /* Flush disks cache */ + memset(&mbox->m_out, 0, sizeof(raw_mbox)); + raw_mbox[0] = FLUSH_SYSTEM; + + /* Issue a blocking (interrupts disabled) command to the card */ + issue_scb_block(adapter, raw_mbox); + + if (atomic_read(&adapter->pend_cmds) > 0) + dev_warn(&adapter->dev->dev, "pending commands!!\n"); + + /* + * Have a delibrate delay to make sure all the caches are + * actually flushed. + */ + for (i = 0; i <= 10; i++) + mdelay(1000); +} + +static void +megaraid_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + adapter_t *adapter = (adapter_t *)host->hostdata; + char buf[12] = { 0 }; + + scsi_remove_host(host); + + __megaraid_shutdown(adapter); + + /* Free our resources */ + if (adapter->flag & BOARD_MEMMAP) { + iounmap((void *)adapter->base); + release_mem_region(adapter->host->base, 128); + } else + release_region(adapter->base, 16); + + mega_free_sgl(adapter); + + sprintf(buf, "hba%d", adapter->host->host_no); + remove_proc_subtree(buf, mega_proc_dir_entry); + + dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, + adapter->mega_buffer, adapter->buf_dma_handle); + kfree(adapter->scb_list); + dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), + adapter->una_mbox64, adapter->una_mbox64_dma); + + scsi_host_put(host); + pci_disable_device(pdev); + + hba_count--; +} + +static void +megaraid_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + adapter_t *adapter = (adapter_t *)host->hostdata; + + __megaraid_shutdown(adapter); +} + +static struct pci_device_id megaraid_pci_tbl[] = { + {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); + +static struct pci_driver megaraid_pci_driver = { + .name = "megaraid_legacy", + .id_table = megaraid_pci_tbl, + .probe = megaraid_probe_one, + .remove = megaraid_remove_one, + .shutdown = megaraid_shutdown, +}; + +static int __init megaraid_init(void) +{ + int error; + + if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) + max_cmd_per_lun = MAX_CMD_PER_LUN; + if (max_mbox_busy_wait > MBOX_BUSY_WAIT) + max_mbox_busy_wait = MBOX_BUSY_WAIT; + +#ifdef CONFIG_PROC_FS + mega_proc_dir_entry = proc_mkdir("megaraid", NULL); + if (!mega_proc_dir_entry) { + printk(KERN_WARNING + "megaraid: failed to create megaraid root\n"); + } +#endif + error = pci_register_driver(&megaraid_pci_driver); + if (error) { +#ifdef CONFIG_PROC_FS + remove_proc_entry("megaraid", NULL); +#endif + return error; + } + + /* + * Register the driver as a character device, for applications + * to access it for ioctls. + * First argument (major) to register_chrdev implies a dynamic + * major number allocation. + */ + major = register_chrdev(0, "megadev_legacy", &megadev_fops); + if (major < 0) { + printk(KERN_WARNING + "megaraid: failed to register char device\n"); + } + + return 0; +} + +static void __exit megaraid_exit(void) +{ + /* + * Unregister the character device interface to the driver. + */ + unregister_chrdev(major, "megadev_legacy"); + + pci_unregister_driver(&megaraid_pci_driver); + +#ifdef CONFIG_PROC_FS + remove_proc_entry("megaraid", NULL); +#endif +} + +module_init(megaraid_init); +module_exit(megaraid_exit); + +/* vi: set ts=8 sw=8 tw=78: */ diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h new file mode 100644 index 000000000..013fbfb91 --- /dev/null +++ b/drivers/scsi/megaraid.h @@ -0,0 +1,1020 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MEGARAID_H__ +#define __MEGARAID_H__ + +#include +#include +#include + +#define MEGARAID_VERSION \ + "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n" + +/* + * Driver features - change the values to enable or disable features in the + * driver. + */ + +/* + * Command coalescing - This feature allows the driver to be able to combine + * two or more commands and issue as one command in order to boost I/O + * performance. Useful if the nature of the I/O is sequential. It is not very + * useful for random natured I/Os. + */ +#define MEGA_HAVE_COALESCING 0 + +/* + * Clustering support - Set this flag if you are planning to use the + * clustering services provided by the megaraid controllers and planning to + * setup a cluster + */ +#define MEGA_HAVE_CLUSTERING 1 + +/* + * Driver statistics - Set this flag if you are interested in statics about + * number of I/O completed on each logical drive and how many interrupts + * generated. If enabled, this information is available through /proc + * interface and through the private ioctl. Setting this flag has a + * performance penalty. + */ +#define MEGA_HAVE_STATS 0 + +/* + * Enhanced /proc interface - This feature will allow you to have a more + * detailed /proc interface for megaraid driver. E.g., a real time update of + * the status of the logical drives, battery status, physical drives etc. + */ +#define MEGA_HAVE_ENH_PROC 1 + +#define MAX_DEV_TYPE 32 + +#define PCI_DEVICE_ID_DISCOVERY 0x000E +#define PCI_DEVICE_ID_PERC4_DI 0x000F +#define PCI_DEVICE_ID_PERC4_QC_VERDE 0x0407 + +#define HBA_SIGNATURE 0x3344 +#define HBA_SIGNATURE_471 0xCCCC +#define HBA_SIGNATURE_64BIT 0x0299 + +#define MBOX_BUSY_WAIT 10 /* wait for up to 10 usec for + mailbox to be free */ +#define DEFAULT_INITIATOR_ID 7 + +#define MAX_SGLIST 64 /* max supported in f/w */ +#define MIN_SGLIST 26 /* guaranteed to support these many */ +#define MAX_COMMANDS 126 +#define CMDID_INT_CMDS MAX_COMMANDS+1 /* make sure CMDID_INT_CMDS + is less than max commands + supported by any f/w */ + +#define MAX_CDB_LEN 10 +#define MAX_EXT_CDB_LEN 16 /* we support cdb length up to 16 */ + +#define DEF_CMD_PER_LUN 63 +#define MAX_CMD_PER_LUN MAX_COMMANDS +#define MAX_FIRMWARE_STATUS 46 +#define MAX_XFER_PER_CMD (64*1024) +#define MAX_SECTORS_PER_IO 128 + +#define MAX_LOGICAL_DRIVES_40LD 40 +#define FC_MAX_PHYSICAL_DEVICES 256 +#define MAX_LOGICAL_DRIVES_8LD 8 +#define MAX_CHANNELS 5 +#define MAX_TARGET 15 +#define MAX_PHYSICAL_DRIVES MAX_CHANNELS*MAX_TARGET +#define MAX_ROW_SIZE_40LD 32 +#define MAX_ROW_SIZE_8LD 8 +#define MAX_SPAN_DEPTH 8 + +#define NVIRT_CHAN 4 /* # of virtual channels to represent + up to 60 logical drives */ +struct mbox_out { + /* 0x0 */ u8 cmd; + /* 0x1 */ u8 cmdid; + /* 0x2 */ u16 numsectors; + /* 0x4 */ u32 lba; + /* 0x8 */ u32 xferaddr; + /* 0xC */ u8 logdrv; + /* 0xD */ u8 numsgelements; + /* 0xE */ u8 resvd; +} __attribute__ ((packed)); + +struct mbox_in { + /* 0xF */ volatile u8 busy; + /* 0x10 */ volatile u8 numstatus; + /* 0x11 */ volatile u8 status; + /* 0x12 */ volatile u8 completed[MAX_FIRMWARE_STATUS]; + volatile u8 poll; + volatile u8 ack; +} __attribute__ ((packed)); + +typedef struct { + struct mbox_out m_out; + struct mbox_in m_in; +} __attribute__ ((packed)) mbox_t; + +typedef struct { + u32 xfer_segment_lo; + u32 xfer_segment_hi; + mbox_t mbox; +} __attribute__ ((packed)) mbox64_t; + + +/* + * Passthru definitions + */ +#define MAX_REQ_SENSE_LEN 0x20 + +typedef struct { + u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */ + u8 ars:1; + u8 reserved:3; + u8 islogical:1; + u8 logdrv; /* if islogical == 1 */ + u8 channel; /* if islogical == 0 */ + u8 target; /* if islogical == 0 */ + u8 queuetag; /* unused */ + u8 queueaction; /* unused */ + u8 cdb[MAX_CDB_LEN]; + u8 cdblen; + u8 reqsenselen; + u8 reqsensearea[MAX_REQ_SENSE_LEN]; + u8 numsgelements; + u8 scsistatus; + u32 dataxferaddr; + u32 dataxferlen; +} __attribute__ ((packed)) mega_passthru; + + +/* + * Extended passthru: support CDB > 10 bytes + */ +typedef struct { + u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */ + u8 ars:1; + u8 rsvd1:1; + u8 cd_rom:1; + u8 rsvd2:1; + u8 islogical:1; + u8 logdrv; /* if islogical == 1 */ + u8 channel; /* if islogical == 0 */ + u8 target; /* if islogical == 0 */ + u8 queuetag; /* unused */ + u8 queueaction; /* unused */ + u8 cdblen; + u8 rsvd3; + u8 cdb[MAX_EXT_CDB_LEN]; + u8 numsgelements; + u8 status; + u8 reqsenselen; + u8 reqsensearea[MAX_REQ_SENSE_LEN]; + u8 rsvd4; + u32 dataxferaddr; + u32 dataxferlen; +} __attribute__ ((packed)) mega_ext_passthru; + +typedef struct { + u64 address; + u32 length; +} __attribute__ ((packed)) mega_sgl64; + +typedef struct { + u32 address; + u32 length; +} __attribute__ ((packed)) mega_sglist; + + +/* Queued command data */ +typedef struct { + int idx; + u32 state; + struct list_head list; + u8 raw_mbox[66]; + u32 dma_type; + u32 dma_direction; + + struct scsi_cmnd *cmd; + dma_addr_t dma_h_bulkdata; + dma_addr_t dma_h_sgdata; + + mega_sglist *sgl; + mega_sgl64 *sgl64; + dma_addr_t sgl_dma_addr; + + mega_passthru *pthru; + dma_addr_t pthru_dma_addr; + mega_ext_passthru *epthru; + dma_addr_t epthru_dma_addr; +} scb_t; + +/* + * Flags to follow the scb as it transitions between various stages + */ +#define SCB_FREE 0x0000 /* on the free list */ +#define SCB_ACTIVE 0x0001 /* off the free list */ +#define SCB_PENDQ 0x0002 /* on the pending queue */ +#define SCB_ISSUED 0x0004 /* issued - owner f/w */ +#define SCB_ABORT 0x0008 /* Got an abort for this one */ +#define SCB_RESET 0x0010 /* Got a reset for this one */ + +/* + * Utilities declare this strcture size as 1024 bytes. So more fields can + * be added in future. + */ +typedef struct { + u32 data_size; /* current size in bytes (not including resvd) */ + + u32 config_signature; + /* Current value is 0x00282008 + * 0x28=MAX_LOGICAL_DRIVES, + * 0x20=Number of stripes and + * 0x08=Number of spans */ + + u8 fw_version[16]; /* printable ASCI string */ + u8 bios_version[16]; /* printable ASCI string */ + u8 product_name[80]; /* printable ASCI string */ + + u8 max_commands; /* Max. concurrent commands supported */ + u8 nchannels; /* Number of SCSI Channels detected */ + u8 fc_loop_present; /* Number of Fibre Loops detected */ + u8 mem_type; /* EDO, FPM, SDRAM etc */ + + u32 signature; + u16 dram_size; /* In terms of MB */ + u16 subsysid; + + u16 subsysvid; + u8 notify_counters; + u8 pad1k[889]; /* 135 + 889 resvd = 1024 total size */ +} __attribute__ ((packed)) mega_product_info; + +struct notify { + u32 global_counter; /* Any change increments this counter */ + + u8 param_counter; /* Indicates any params changed */ + u8 param_id; /* Param modified - defined below */ + u16 param_val; /* New val of last param modified */ + + u8 write_config_counter; /* write config occurred */ + u8 write_config_rsvd[3]; + + u8 ldrv_op_counter; /* Indicates ldrv op started/completed */ + u8 ldrv_opid; /* ldrv num */ + u8 ldrv_opcmd; /* ldrv operation - defined below */ + u8 ldrv_opstatus; /* status of the operation */ + + u8 ldrv_state_counter; /* Indicates change of ldrv state */ + u8 ldrv_state_id; /* ldrv num */ + u8 ldrv_state_new; /* New state */ + u8 ldrv_state_old; /* old state */ + + u8 pdrv_state_counter; /* Indicates change of ldrv state */ + u8 pdrv_state_id; /* pdrv id */ + u8 pdrv_state_new; /* New state */ + u8 pdrv_state_old; /* old state */ + + u8 pdrv_fmt_counter; /* Indicates pdrv format started/over */ + u8 pdrv_fmt_id; /* pdrv id */ + u8 pdrv_fmt_val; /* format started/over */ + u8 pdrv_fmt_rsvd; + + u8 targ_xfer_counter; /* Indicates SCSI-2 Xfer rate change */ + u8 targ_xfer_id; /* pdrv Id */ + u8 targ_xfer_val; /* new Xfer params of last pdrv */ + u8 targ_xfer_rsvd; + + u8 fcloop_id_chg_counter; /* Indicates loopid changed */ + u8 fcloopid_pdrvid; /* pdrv id */ + u8 fcloop_id0; /* loopid on fc loop 0 */ + u8 fcloop_id1; /* loopid on fc loop 1 */ + + u8 fcloop_state_counter; /* Indicates loop state changed */ + u8 fcloop_state0; /* state of fc loop 0 */ + u8 fcloop_state1; /* state of fc loop 1 */ + u8 fcloop_state_rsvd; +} __attribute__ ((packed)); + +#define MAX_NOTIFY_SIZE 0x80 +#define CUR_NOTIFY_SIZE sizeof(struct notify) + +typedef struct { + u32 data_size; /* current size in bytes (not including resvd) */ + + struct notify notify; + + u8 notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE]; + + u8 rebuild_rate; /* Rebuild rate (0% - 100%) */ + u8 cache_flush_interval; /* In terms of Seconds */ + u8 sense_alert; + u8 drive_insert_count; /* drive insertion count */ + + u8 battery_status; + u8 num_ldrv; /* No. of Log Drives configured */ + u8 recon_state[MAX_LOGICAL_DRIVES_40LD / 8]; /* State of + reconstruct */ + u16 ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8]; /* logdrv + Status */ + + u32 ldrv_size[MAX_LOGICAL_DRIVES_40LD];/* Size of each log drv */ + u8 ldrv_prop[MAX_LOGICAL_DRIVES_40LD]; + u8 ldrv_state[MAX_LOGICAL_DRIVES_40LD];/* State of log drives */ + u8 pdrv_state[FC_MAX_PHYSICAL_DEVICES];/* State of phys drvs. */ + u16 pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16]; + + u8 targ_xfer[80]; /* phys device transfer rate */ + u8 pad1k[263]; /* 761 + 263reserved = 1024 bytes total size */ +} __attribute__ ((packed)) mega_inquiry3; + + +/* Structures */ +typedef struct { + u8 max_commands; /* Max concurrent commands supported */ + u8 rebuild_rate; /* Rebuild rate - 0% thru 100% */ + u8 max_targ_per_chan; /* Max targ per channel */ + u8 nchannels; /* Number of channels on HBA */ + u8 fw_version[4]; /* Firmware version */ + u16 age_of_flash; /* Number of times FW has been flashed */ + u8 chip_set_value; /* Contents of 0xC0000832 */ + u8 dram_size; /* In MB */ + u8 cache_flush_interval; /* in seconds */ + u8 bios_version[4]; + u8 board_type; + u8 sense_alert; + u8 write_config_count; /* Increase with every configuration + change */ + u8 drive_inserted_count; /* Increase with every drive inserted + */ + u8 inserted_drive; /* Channel:Id of inserted drive */ + u8 battery_status; /* + * BIT 0: battery module missing + * BIT 1: VBAD + * BIT 2: temperature high + * BIT 3: battery pack missing + * BIT 4,5: + * 00 - charge complete + * 01 - fast charge in progress + * 10 - fast charge fail + * 11 - undefined + * Bit 6: counter > 1000 + * Bit 7: Undefined + */ + u8 dec_fault_bus_info; +} __attribute__ ((packed)) mega_adp_info; + + +typedef struct { + u8 num_ldrv; /* Number of logical drives configured */ + u8 rsvd[3]; + u32 ldrv_size[MAX_LOGICAL_DRIVES_8LD]; + u8 ldrv_prop[MAX_LOGICAL_DRIVES_8LD]; + u8 ldrv_state[MAX_LOGICAL_DRIVES_8LD]; +} __attribute__ ((packed)) mega_ldrv_info; + +typedef struct { + u8 pdrv_state[MAX_PHYSICAL_DRIVES]; + u8 rsvd; +} __attribute__ ((packed)) mega_pdrv_info; + +/* RAID inquiry: Mailbox command 0x05*/ +typedef struct { + mega_adp_info adapter_info; + mega_ldrv_info logdrv_info; + mega_pdrv_info pdrv_info; +} __attribute__ ((packed)) mraid_inquiry; + + +/* RAID extended inquiry: Mailbox command 0x04*/ +typedef struct { + mraid_inquiry raid_inq; + u16 phys_drv_format[MAX_CHANNELS]; + u8 stack_attn; + u8 modem_status; + u8 rsvd[2]; +} __attribute__ ((packed)) mraid_ext_inquiry; + + +typedef struct { + u8 channel; + u8 target; +}__attribute__ ((packed)) adp_device; + +typedef struct { + u32 start_blk; /* starting block */ + u32 num_blks; /* # of blocks */ + adp_device device[MAX_ROW_SIZE_40LD]; +}__attribute__ ((packed)) adp_span_40ld; + +typedef struct { + u32 start_blk; /* starting block */ + u32 num_blks; /* # of blocks */ + adp_device device[MAX_ROW_SIZE_8LD]; +}__attribute__ ((packed)) adp_span_8ld; + +typedef struct { + u8 span_depth; /* Total # of spans */ + u8 level; /* RAID level */ + u8 read_ahead; /* read ahead, no read ahead, adaptive read + ahead */ + u8 stripe_sz; /* Encoded stripe size */ + u8 status; /* Status of the logical drive */ + u8 write_mode; /* write mode, write_through/write_back */ + u8 direct_io; /* direct io or through cache */ + u8 row_size; /* Number of stripes in a row */ +} __attribute__ ((packed)) logdrv_param; + +typedef struct { + logdrv_param lparam; + adp_span_40ld span[MAX_SPAN_DEPTH]; +}__attribute__ ((packed)) logdrv_40ld; + +typedef struct { + logdrv_param lparam; + adp_span_8ld span[MAX_SPAN_DEPTH]; +}__attribute__ ((packed)) logdrv_8ld; + +typedef struct { + u8 type; /* Type of the device */ + u8 cur_status; /* current status of the device */ + u8 tag_depth; /* Level of tagging */ + u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */ + u32 size; /* configurable size in terms of 512 byte + blocks */ +}__attribute__ ((packed)) phys_drv; + +typedef struct { + u8 nlog_drives; /* number of logical drives */ + u8 resvd[3]; + logdrv_40ld ldrv[MAX_LOGICAL_DRIVES_40LD]; + phys_drv pdrv[MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_40ld; + +typedef struct { + u8 nlog_drives; /* number of logical drives */ + u8 resvd[3]; + logdrv_8ld ldrv[MAX_LOGICAL_DRIVES_8LD]; + phys_drv pdrv[MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_8ld; + + +/* + * User ioctl structure. + * This structure will be used for Traditional Method ioctl interface + * commands (0x80),Alternate Buffer Method (0x81) ioctl commands and the + * Driver ioctls. + * The Driver ioctl interface handles the commands at the driver level, + * without being sent to the card. + */ +/* system call imposed limit. Change accordingly */ +#define IOCTL_MAX_DATALEN 4096 + +struct uioctl_t { + u32 inlen; + u32 outlen; + union { + u8 fca[16]; + struct { + u8 opcode; + u8 subopcode; + u16 adapno; +#if BITS_PER_LONG == 32 + u8 *buffer; + u8 pad[4]; +#endif +#if BITS_PER_LONG == 64 + u8 *buffer; +#endif + u32 length; + } __attribute__ ((packed)) fcs; + } __attribute__ ((packed)) ui; + u8 mbox[18]; /* 16 bytes + 2 status bytes */ + mega_passthru pthru; +#if BITS_PER_LONG == 32 + char __user *data; /* buffer <= 4096 for 0x80 commands */ + char pad[4]; +#endif +#if BITS_PER_LONG == 64 + char __user *data; +#endif +} __attribute__ ((packed)); + +/* + * struct mcontroller is used to pass information about the controllers in the + * system. Its up to the application how to use the information. We are passing + * as much info about the cards as possible and useful. Before issuing the + * call to find information about the cards, the application needs to issue a + * ioctl first to find out the number of controllers in the system. + */ +#define MAX_CONTROLLERS 32 + +struct mcontroller { + u64 base; + u8 irq; + u8 numldrv; + u8 pcibus; + u16 pcidev; + u8 pcifun; + u16 pciid; + u16 pcivendor; + u8 pcislot; + u32 uid; +}; + +/* + * mailbox structure used for internal commands + */ +typedef struct { + u8 cmd; + u8 cmdid; + u8 opcode; + u8 subopcode; + u32 lba; + u32 xferaddr; + u8 logdrv; + u8 rsvd[3]; + u8 numstatus; + u8 status; +} __attribute__ ((packed)) megacmd_t; + +/* + * Defines for Driver IOCTL interface + */ +#define MEGAIOC_MAGIC 'm' + +#define MEGAIOC_QNADAP 'm' /* Query # of adapters */ +#define MEGAIOC_QDRVRVER 'e' /* Query driver version */ +#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */ +#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) ) +#define GETADAP(mkadap) ( (mkadap) ^ MEGAIOC_MAGIC << 8 ) + +/* + * Definition for the new ioctl interface (NIT) + */ + +/* + * Vendor specific Group-7 commands + */ +#define VENDOR_SPECIFIC_COMMANDS 0xE0 +#define MEGA_INTERNAL_CMD VENDOR_SPECIFIC_COMMANDS + 0x01 + +/* + * The ioctl command. No other command shall be used for this interface + */ +#define USCSICMD VENDOR_SPECIFIC_COMMANDS + +/* + * Data direction flags + */ +#define UIOC_RD 0x00001 +#define UIOC_WR 0x00002 + +/* + * ioctl opcodes + */ +#define MBOX_CMD 0x00000 /* DCMD or passthru command */ +#define GET_DRIVER_VER 0x10000 /* Get driver version */ +#define GET_N_ADAP 0x20000 /* Get number of adapters */ +#define GET_ADAP_INFO 0x30000 /* Get information about a adapter */ +#define GET_CAP 0x40000 /* Get ioctl capabilities */ +#define GET_STATS 0x50000 /* Get statistics, including error info */ + + +/* + * The ioctl structure. + * MBOX macro converts a nitioctl_t structure to megacmd_t pointer and + * MBOX_P macro converts a nitioctl_t pointer to megacmd_t pointer. + */ +typedef struct { + char signature[8]; /* Must contain "MEGANIT" */ + u32 opcode; /* opcode for the command */ + u32 adapno; /* adapter number */ + union { + u8 __raw_mbox[18]; + void __user *__uaddr; /* xferaddr for non-mbox cmds */ + }__ua; + +#define uioc_rmbox __ua.__raw_mbox +#define MBOX(uioc) ((megacmd_t *)&((uioc).__ua.__raw_mbox[0])) +#define MBOX_P(uioc) ((megacmd_t __user *)&((uioc)->__ua.__raw_mbox[0])) +#define uioc_uaddr __ua.__uaddr + + u32 xferlen; /* xferlen for DCMD and non-mbox + commands */ + u32 flags; /* data direction flags */ +}nitioctl_t; + + +/* + * I/O statistics for some applications like SNMP agent. The caller must + * provide the number of logical drives for which status should be reported. + */ +typedef struct { + int num_ldrv; /* Number for logical drives for which the + status should be reported. */ + u32 nreads[MAX_LOGICAL_DRIVES_40LD]; /* number of reads for + each logical drive */ + u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks + read for each logical + drive */ + u32 nwrites[MAX_LOGICAL_DRIVES_40LD]; /* number of writes + for each logical + drive */ + u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks + writes for each + logical drive */ + u32 rd_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of read + errors for each + logical drive */ + u32 wr_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of write + errors for each + logical drive */ +}megastat_t; + + +struct private_bios_data { + u8 geometry:4; /* + * bits 0-3 - BIOS geometry + * 0x0001 - 1GB + * 0x0010 - 2GB + * 0x1000 - 8GB + * Others values are invalid + */ + u8 unused:4; /* bits 4-7 are unused */ + u8 boot_drv; /* + * logical drive set as boot drive + * 0..7 - for 8LD cards + * 0..39 - for 40LD cards + */ + u8 rsvd[12]; + u16 cksum; /* 0-(sum of first 13 bytes of this structure) */ +} __attribute__ ((packed)); + + + + +/* + * Mailbox and firmware commands and subopcodes used in this driver. + */ + +#define MEGA_MBOXCMD_LREAD 0x01 +#define MEGA_MBOXCMD_LWRITE 0x02 +#define MEGA_MBOXCMD_PASSTHRU 0x03 +#define MEGA_MBOXCMD_ADPEXTINQ 0x04 +#define MEGA_MBOXCMD_ADAPTERINQ 0x05 +#define MEGA_MBOXCMD_LREAD64 0xA7 +#define MEGA_MBOXCMD_LWRITE64 0xA8 +#define MEGA_MBOXCMD_PASSTHRU64 0xC3 +#define MEGA_MBOXCMD_EXTPTHRU 0xE3 + +#define MAIN_MISC_OPCODE 0xA4 /* f/w misc opcode */ +#define GET_MAX_SG_SUPPORT 0x01 /* get max sg len supported by f/w */ + +#define FC_NEW_CONFIG 0xA1 +#define NC_SUBOP_PRODUCT_INFO 0x0E +#define NC_SUBOP_ENQUIRY3 0x0F +#define ENQ3_GET_SOLICITED_FULL 0x02 +#define OP_DCMD_READ_CONFIG 0x04 +#define NEW_READ_CONFIG_8LD 0x67 +#define READ_CONFIG_8LD 0x07 +#define FLUSH_ADAPTER 0x0A +#define FLUSH_SYSTEM 0xFE + +/* + * Command for random deletion of logical drives + */ +#define FC_DEL_LOGDRV 0xA4 /* f/w command */ +#define OP_SUP_DEL_LOGDRV 0x2A /* is feature supported */ +#define OP_GET_LDID_MAP 0x18 /* get ldid and logdrv number map */ +#define OP_DEL_LOGDRV 0x1C /* delete logical drive */ + +/* + * BIOS commands + */ +#define IS_BIOS_ENABLED 0x62 +#define GET_BIOS 0x01 +#define CHNL_CLASS 0xA9 +#define GET_CHNL_CLASS 0x00 +#define SET_CHNL_CLASS 0x01 +#define CH_RAID 0x01 +#define CH_SCSI 0x00 +#define BIOS_PVT_DATA 0x40 +#define GET_BIOS_PVT_DATA 0x00 + + +/* + * Commands to support clustering + */ +#define MEGA_GET_TARGET_ID 0x7D +#define MEGA_CLUSTER_OP 0x70 +#define MEGA_GET_CLUSTER_MODE 0x02 +#define MEGA_CLUSTER_CMD 0x6E +#define MEGA_RESERVE_LD 0x01 +#define MEGA_RELEASE_LD 0x02 +#define MEGA_RESET_RESERVATIONS 0x03 +#define MEGA_RESERVATION_STATUS 0x04 +#define MEGA_RESERVE_PD 0x05 +#define MEGA_RELEASE_PD 0x06 + + +/* + * Module battery status + */ +#define MEGA_BATT_MODULE_MISSING 0x01 +#define MEGA_BATT_LOW_VOLTAGE 0x02 +#define MEGA_BATT_TEMP_HIGH 0x04 +#define MEGA_BATT_PACK_MISSING 0x08 +#define MEGA_BATT_CHARGE_MASK 0x30 +#define MEGA_BATT_CHARGE_DONE 0x00 +#define MEGA_BATT_CHARGE_INPROG 0x10 +#define MEGA_BATT_CHARGE_FAIL 0x20 +#define MEGA_BATT_CYCLES_EXCEEDED 0x40 + +/* + * Physical drive states. + */ +#define PDRV_UNCNF 0 +#define PDRV_ONLINE 3 +#define PDRV_FAILED 4 +#define PDRV_RBLD 5 +#define PDRV_HOTSPARE 6 + + +/* + * Raid logical drive states. + */ +#define RDRV_OFFLINE 0 +#define RDRV_DEGRADED 1 +#define RDRV_OPTIMAL 2 +#define RDRV_DELETED 3 + +/* + * Read, write and cache policies + */ +#define NO_READ_AHEAD 0 +#define READ_AHEAD 1 +#define ADAP_READ_AHEAD 2 +#define WRMODE_WRITE_THRU 0 +#define WRMODE_WRITE_BACK 1 +#define CACHED_IO 0 +#define DIRECT_IO 1 + +struct megaraid_cmd_priv { + struct list_head entry; +}; + +#define SCSI_LIST(scp) \ + (&((struct megaraid_cmd_priv *)scsi_cmd_priv(scp))->entry) + +struct scsi_cmd_and_priv { + struct scsi_cmnd cmd; + struct megaraid_cmd_priv priv; +}; + +static inline struct scsi_cmnd * +megaraid_to_scsi_cmd(struct megaraid_cmd_priv *cmd_priv) +{ + /* See also scsi_mq_setup_tags() */ + BUILD_BUG_ON(sizeof(struct scsi_cmd_and_priv) != + sizeof(struct scsi_cmnd) + + sizeof(struct megaraid_cmd_priv)); + + return &container_of(cmd_priv, struct scsi_cmd_and_priv, priv)->cmd; +} + +/* + * Each controller's soft state + */ +typedef struct { + int this_id; /* our id, may set to different than 7 if + clustering is available */ + u32 flag; + + unsigned long base; + void __iomem *mmio_base; + + /* mbox64 with mbox not aligned on 16-byte boundary */ + mbox64_t *una_mbox64; + dma_addr_t una_mbox64_dma; + + volatile mbox64_t *mbox64;/* ptr to 64-bit mailbox */ + volatile mbox_t *mbox; /* ptr to standard mailbox */ + dma_addr_t mbox_dma; + + struct pci_dev *dev; + + struct list_head free_list; + struct list_head pending_list; + struct list_head completed_list; + + struct Scsi_Host *host; + +#define MEGA_BUFFER_SIZE (2*1024) + u8 *mega_buffer; + dma_addr_t buf_dma_handle; + + mega_product_info product_info; + + u8 max_cmds; + scb_t *scb_list; + + atomic_t pend_cmds; /* maintain a counter for pending + commands in firmware */ + +#if MEGA_HAVE_STATS + u32 nreads[MAX_LOGICAL_DRIVES_40LD]; + u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD]; + u32 nwrites[MAX_LOGICAL_DRIVES_40LD]; + u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD]; + u32 rd_errors[MAX_LOGICAL_DRIVES_40LD]; + u32 wr_errors[MAX_LOGICAL_DRIVES_40LD]; +#endif + + /* Host adapter parameters */ + u8 numldrv; + u8 fw_version[7]; + u8 bios_version[7]; + +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *controller_proc_dir_entry; +#endif + + int has_64bit_addr; /* are we using 64-bit addressing */ + int support_ext_cdb; + int boot_ldrv_enabled; + int boot_ldrv; + int boot_pdrv_enabled; /* boot from physical drive */ + int boot_pdrv_ch; /* boot physical drive channel */ + int boot_pdrv_tgt; /* boot physical drive target */ + + + int support_random_del; /* Do we support random deletion of + logdrvs */ + int read_ldidmap; /* set after logical drive deltion. The + logical drive number must be read from the + map */ + atomic_t quiescent; /* a stage reached when delete logical + drive needs to be done. Stop + sending requests to the hba till + delete operation is completed */ + spinlock_t lock; + + u8 logdrv_chan[MAX_CHANNELS+NVIRT_CHAN]; /* logical drive are on + what channels. */ + int mega_ch_class; + + u8 sglen; /* f/w supported scatter-gather list length */ + + scb_t int_scb; + struct mutex int_mtx; /* To synchronize the internal + commands */ + int int_status; /* status of internal cmd */ + struct completion int_waitq; /* wait queue for internal + cmds */ + + int has_cluster; /* cluster support on this HBA */ +}adapter_t; + + +struct mega_hbas { + int is_bios_enabled; + adapter_t *hostdata_addr; +}; + + +/* + * For state flag. Do not use LSB(8 bits) which are + * reserved for storing info about channels. + */ +#define IN_ABORT 0x80000000L +#define IN_RESET 0x40000000L +#define BOARD_MEMMAP 0x20000000L +#define BOARD_IOMAP 0x10000000L +#define BOARD_40LD 0x08000000L +#define BOARD_64BIT 0x04000000L + +#define INTR_VALID 0x40 + +#define PCI_CONF_AMISIG 0xa0 +#define PCI_CONF_AMISIG64 0xa4 + + +#define MEGA_DMA_TYPE_NONE 0xFFFF +#define MEGA_BULK_DATA 0x0001 +#define MEGA_SGLIST 0x0002 + +/* + * Parameters for the io-mapped controllers + */ + +/* I/O Port offsets */ +#define CMD_PORT 0x00 +#define ACK_PORT 0x00 +#define TOGGLE_PORT 0x01 +#define INTR_PORT 0x0a + +#define MBOX_BUSY_PORT 0x00 +#define MBOX_PORT0 0x04 +#define MBOX_PORT1 0x05 +#define MBOX_PORT2 0x06 +#define MBOX_PORT3 0x07 +#define ENABLE_MBOX_REGION 0x0B + +/* I/O Port Values */ +#define ISSUE_BYTE 0x10 +#define ACK_BYTE 0x08 +#define ENABLE_INTR_BYTE 0xc0 +#define DISABLE_INTR_BYTE 0x00 +#define VALID_INTR_BYTE 0x40 +#define MBOX_BUSY_BYTE 0x10 +#define ENABLE_MBOX_BYTE 0x00 + + +/* Setup some port macros here */ +#define issue_command(adapter) \ + outb_p(ISSUE_BYTE, (adapter)->base + CMD_PORT) + +#define irq_state(adapter) inb_p((adapter)->base + INTR_PORT) + +#define set_irq_state(adapter, value) \ + outb_p((value), (adapter)->base + INTR_PORT) + +#define irq_ack(adapter) \ + outb_p(ACK_BYTE, (adapter)->base + ACK_PORT) + +#define irq_enable(adapter) \ + outb_p(ENABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT) + +#define irq_disable(adapter) \ + outb_p(DISABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT) + + +/* + * This is our SYSDEP area. All kernel specific detail should be placed here - + * as much as possible + */ + +/* + * End of SYSDEP area + */ + +const char *megaraid_info (struct Scsi_Host *); + +static int mega_query_adapter(adapter_t *); +static int issue_scb(adapter_t *, scb_t *); +static int mega_setup_mailbox(adapter_t *); + +static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *); +static scb_t * mega_build_cmd(adapter_t *, struct scsi_cmnd *, int *); +static void __mega_runpendq(adapter_t *); +static int issue_scb_block(adapter_t *, u_char *); + +static irqreturn_t megaraid_isr_memmapped(int, void *); +static irqreturn_t megaraid_isr_iomapped(int, void *); + +static void mega_free_scb(adapter_t *, scb_t *); + +static int megaraid_abort(struct scsi_cmnd *); +static int megaraid_reset(struct scsi_cmnd *); +static int megaraid_abort_and_reset(adapter_t *, struct scsi_cmnd *, int); +static int megaraid_biosparam(struct scsi_device *, struct block_device *, + sector_t, int []); + +static int mega_build_sglist (adapter_t *adapter, scb_t *scb, + u32 *buffer, u32 *length); +static int __mega_busywait_mbox (adapter_t *); +static void mega_rundoneq (adapter_t *); +static void mega_cmd_done(adapter_t *, u8 [], int, int); +static inline void mega_free_sgl (adapter_t *adapter); +static void mega_8_to_40ld (mraid_inquiry *inquiry, + mega_inquiry3 *enquiry3, mega_product_info *); + +static int megadev_open (struct inode *, struct file *); +static int megadev_ioctl (struct file *, unsigned int, unsigned long); +static int mega_m_to_n(void __user *, nitioctl_t *); +static int mega_n_to_m(void __user *, megacmd_t *); + +static int mega_init_scb (adapter_t *); + +static int mega_is_bios_enabled (adapter_t *); + +#ifdef CONFIG_PROC_FS +static void mega_create_proc_entry(int, struct proc_dir_entry *); +static int mega_adapinq(adapter_t *, dma_addr_t); +static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t); +#endif + +static int mega_support_ext_cdb(adapter_t *); +static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *, + struct scsi_cmnd *, int, int); +static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *, + scb_t *, struct scsi_cmnd *, int, int); +static void mega_enum_raid_scsi(adapter_t *); +static void mega_get_boot_drv(adapter_t *); +static int mega_support_random_del(adapter_t *); +static int mega_del_logdrv(adapter_t *, int); +static int mega_do_del_logdrv(adapter_t *, int); +static void mega_get_max_sgl(adapter_t *); +static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *); +static int mega_support_cluster(adapter_t *); +#endif + +/* vi: set ts=8 sw=8 tw=78: */ diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid new file mode 100644 index 000000000..3f2ce1eb0 --- /dev/null +++ b/drivers/scsi/megaraid/Kconfig.megaraid @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: GPL-2.0-only +config MEGARAID_NEWGEN + bool "LSI Logic New Generation RAID Device Drivers" + depends on PCI && HAS_IOPORT && SCSI + help + LSI Logic RAID Device Drivers + +config MEGARAID_MM + tristate "LSI Logic Management Module (New Driver)" + depends on PCI && HAS_IOPORT && SCSI && MEGARAID_NEWGEN + help + Management Module provides ioctl, sysfs support for LSI Logic + RAID controllers. + To compile this driver as a module, choose M here: the + module will be called megaraid_mm + + +config MEGARAID_MAILBOX + tristate "LSI Logic MegaRAID Driver (New Driver)" + depends on PCI && SCSI && MEGARAID_MM + help + List of supported controllers + + OEM Product Name VID :DID :SVID:SSID + --- ------------ ---- ---- ---- ---- + Dell PERC3/QC 101E:1960:1028:0471 + Dell PERC3/DC 101E:1960:1028:0493 + Dell PERC3/SC 101E:1960:1028:0475 + Dell PERC3/Di 1028:000E:1028:0123 + Dell PERC4/SC 1000:1960:1028:0520 + Dell PERC4/DC 1000:1960:1028:0518 + Dell PERC4/QC 1000:0407:1028:0531 + Dell PERC4/Di 1028:000F:1028:014A + Dell PERC 4e/Si 1028:0013:1028:016c + Dell PERC 4e/Di 1028:0013:1028:016d + Dell PERC 4e/Di 1028:0013:1028:016e + Dell PERC 4e/Di 1028:0013:1028:016f + Dell PERC 4e/Di 1028:0013:1028:0170 + Dell PERC 4e/DC 1000:0408:1028:0002 + Dell PERC 4e/SC 1000:0408:1028:0001 + LSI MegaRAID SCSI 320-0 1000:1960:1000:A520 + LSI MegaRAID SCSI 320-1 1000:1960:1000:0520 + LSI MegaRAID SCSI 320-2 1000:1960:1000:0518 + LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530 + LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532 + LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531 + LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001 + LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002 + LSI MegaRAID SATA 150-4 1000:1960:1000:4523 + LSI MegaRAID SATA 150-6 1000:1960:1000:0523 + LSI MegaRAID SATA 300-4X 1000:0409:1000:3004 + LSI MegaRAID SATA 300-8X 1000:0409:1000:3008 + INTEL RAID Controller SRCU42X 1000:0407:8086:0532 + INTEL RAID Controller SRCS16 1000:1960:8086:0523 + INTEL RAID Controller SRCU42E 1000:0408:8086:0002 + INTEL RAID Controller SRCZCRX 1000:0407:8086:0530 + INTEL RAID Controller SRCS28X 1000:0409:8086:3008 + INTEL RAID Controller SROMBU42E 1000:0408:8086:3431 + INTEL RAID Controller SROMBU42E 1000:0408:8086:3499 + INTEL RAID Controller SRCU51L 1000:1960:8086:0520 + FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065 + ACER MegaRAID ROMB-2E 1000:0408:1025:004D + NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287 + + To compile this driver as a module, choose M here: the + module will be called megaraid_mbox + +config MEGARAID_LEGACY + tristate "LSI Logic Legacy MegaRAID Driver" + depends on PCI && HAS_IOPORT && SCSI + help + This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490 + and 467 SCSI host adapters. This driver also support the all U320 + RAID controllers + + To compile this driver as a module, choose M here: the + module will be called megaraid + +config MEGARAID_SAS + tristate "LSI Logic MegaRAID SAS RAID Module" + depends on PCI && SCSI + select IRQ_POLL + help + Module for LSI Logic's SAS based RAID controllers. + To compile this driver as a module, choose 'm' here. + Module will be called megaraid_sas + diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile new file mode 100644 index 000000000..12177e4ca --- /dev/null +++ b/drivers/scsi/megaraid/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o +obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o +obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o +megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \ + megaraid_sas_fp.o megaraid_sas_debugfs.o diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h new file mode 100644 index 000000000..f0ef8f7f8 --- /dev/null +++ b/drivers/scsi/megaraid/mbox_defs.h @@ -0,0 +1,783 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Linux MegaRAID Unified device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * FILE : mbox_defs.h + */ +#ifndef _MRAID_MBOX_DEFS_H_ +#define _MRAID_MBOX_DEFS_H_ + +#include + +/* + * Commands and states for mailbox based controllers + */ + +#define MBOXCMD_LREAD 0x01 +#define MBOXCMD_LWRITE 0x02 +#define MBOXCMD_PASSTHRU 0x03 +#define MBOXCMD_ADPEXTINQ 0x04 +#define MBOXCMD_ADAPTERINQ 0x05 +#define MBOXCMD_LREAD64 0xA7 +#define MBOXCMD_LWRITE64 0xA8 +#define MBOXCMD_PASSTHRU64 0xC3 +#define MBOXCMD_EXTPTHRU 0xE3 + +#define MAIN_MISC_OPCODE 0xA4 +#define GET_MAX_SG_SUPPORT 0x01 +#define SUPPORT_EXT_CDB 0x16 + +#define FC_NEW_CONFIG 0xA1 +#define NC_SUBOP_PRODUCT_INFO 0x0E +#define NC_SUBOP_ENQUIRY3 0x0F +#define ENQ3_GET_SOLICITED_FULL 0x02 +#define OP_DCMD_READ_CONFIG 0x04 +#define NEW_READ_CONFIG_8LD 0x67 +#define READ_CONFIG_8LD 0x07 +#define FLUSH_ADAPTER 0x0A +#define FLUSH_SYSTEM 0xFE + +/* + * Command for random deletion of logical drives + */ +#define FC_DEL_LOGDRV 0xA4 +#define OP_SUP_DEL_LOGDRV 0x2A +#define OP_GET_LDID_MAP 0x18 +#define OP_DEL_LOGDRV 0x1C + +/* + * BIOS commands + */ +#define IS_BIOS_ENABLED 0x62 +#define GET_BIOS 0x01 +#define CHNL_CLASS 0xA9 +#define GET_CHNL_CLASS 0x00 +#define SET_CHNL_CLASS 0x01 +#define CH_RAID 0x01 +#define CH_SCSI 0x00 +#define BIOS_PVT_DATA 0x40 +#define GET_BIOS_PVT_DATA 0x00 + + +/* + * Commands to support clustering + */ +#define GET_TARGET_ID 0x7D +#define CLUSTER_OP 0x70 +#define GET_CLUSTER_MODE 0x02 +#define CLUSTER_CMD 0x6E +#define RESERVE_LD 0x01 +#define RELEASE_LD 0x02 +#define RESET_RESERVATIONS 0x03 +#define RESERVATION_STATUS 0x04 +#define RESERVE_PD 0x05 +#define RELEASE_PD 0x06 + + +/* + * Module battery status + */ +#define BATTERY_MODULE_MISSING 0x01 +#define BATTERY_LOW_VOLTAGE 0x02 +#define BATTERY_TEMP_HIGH 0x04 +#define BATTERY_PACK_MISSING 0x08 +#define BATTERY_CHARGE_MASK 0x30 +#define BATTERY_CHARGE_DONE 0x00 +#define BATTERY_CHARGE_INPROG 0x10 +#define BATTERY_CHARGE_FAIL 0x20 +#define BATTERY_CYCLES_EXCEEDED 0x40 + +/* + * Physical drive states. + */ +#define PDRV_UNCNF 0 +#define PDRV_ONLINE 3 +#define PDRV_FAILED 4 +#define PDRV_RBLD 5 +#define PDRV_HOTSPARE 6 + + +/* + * Raid logical drive states. + */ +#define RDRV_OFFLINE 0 +#define RDRV_DEGRADED 1 +#define RDRV_OPTIMAL 2 +#define RDRV_DELETED 3 + +/* + * Read, write and cache policies + */ +#define NO_READ_AHEAD 0 +#define READ_AHEAD 1 +#define ADAP_READ_AHEAD 2 +#define WRMODE_WRITE_THRU 0 +#define WRMODE_WRITE_BACK 1 +#define CACHED_IO 0 +#define DIRECT_IO 1 + +#define MAX_LOGICAL_DRIVES_8LD 8 +#define MAX_LOGICAL_DRIVES_40LD 40 +#define FC_MAX_PHYSICAL_DEVICES 256 +#define MAX_MBOX_CHANNELS 5 +#define MAX_MBOX_TARGET 15 +#define MBOX_MAX_PHYSICAL_DRIVES MAX_MBOX_CHANNELS*MAX_MBOX_TARGET +#define MAX_ROW_SIZE_40LD 32 +#define MAX_ROW_SIZE_8LD 8 +#define SPAN_DEPTH_8_SPANS 8 +#define SPAN_DEPTH_4_SPANS 4 +#define MAX_REQ_SENSE_LEN 0x20 + + + +/** + * struct mbox_t - Driver and f/w handshake structure. + * @cmd : firmware command + * @cmdid : command id + * @numsectors : number of sectors to be transferred + * @lba : Logical Block Address on LD + * @xferaddr : DMA address for data transfer + * @logdrv : logical drive number + * @numsge : number of scatter gather elements in sg list + * @resvd : reserved + * @busy : f/w busy, must wait to issue more commands. + * @numstatus : number of commands completed. + * @status : status of the commands completed + * @completed : array of completed command ids. + * @poll : poll and ack sequence + * @ack : poll and ack sequence + * + * The central handshake structure between the driver and the firmware. This + * structure must be allocated by the driver and aligned at 8-byte boundary. + */ +#define MBOX_MAX_FIRMWARE_STATUS 46 +typedef struct { + uint8_t cmd; + uint8_t cmdid; + uint16_t numsectors; + uint32_t lba; + uint32_t xferaddr; + uint8_t logdrv; + uint8_t numsge; + uint8_t resvd; + uint8_t busy; + uint8_t numstatus; + uint8_t status; + uint8_t completed[MBOX_MAX_FIRMWARE_STATUS]; + uint8_t poll; + uint8_t ack; +} __attribute__ ((packed)) mbox_t; + + +/** + * mbox64_t - 64-bit extension for the mailbox + * @segment_lo : the low 32-bits of the address of the scatter-gather list + * @segment_hi : the upper 32-bits of the address of the scatter-gather list + * @mbox : 32-bit mailbox, whose xferadder field must be set to + * 0xFFFFFFFF + * + * This is the extension of the 32-bit mailbox to be able to perform DMA + * beyond 4GB address range. + */ +typedef struct { + uint32_t xferaddr_lo; + uint32_t xferaddr_hi; + mbox_t mbox32; +} __attribute__ ((packed)) mbox64_t; + +/* + * mailbox structure used for internal commands + */ +typedef struct { + u8 cmd; + u8 cmdid; + u8 opcode; + u8 subopcode; + u32 lba; + u32 xferaddr; + u8 logdrv; + u8 rsvd[3]; + u8 numstatus; + u8 status; +} __attribute__ ((packed)) int_mbox_t; + +/** + * mraid_passthru_t - passthru structure to issue commands to physical devices + * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr + * @ars : set if ARS required after check condition + * @islogical : set if command meant for logical devices + * @logdrv : logical drive number if command for LD + * @channel : Channel on which physical device is located + * @target : SCSI target of the device + * @queuetag : unused + * @queueaction : unused + * @cdb : SCSI CDB + * @cdblen : length of the CDB + * @reqsenselen : amount of request sense data to be returned + * @reqsensearea : Sense information buffer + * @numsge : number of scatter-gather elements in the sg list + * @scsistatus : SCSI status of the command completed. + * @dataxferaddr : DMA data transfer address + * @dataxferlen : amount of the data to be transferred. + */ +typedef struct { + uint8_t timeout :3; + uint8_t ars :1; + uint8_t reserved :3; + uint8_t islogical :1; + uint8_t logdrv; + uint8_t channel; + uint8_t target; + uint8_t queuetag; + uint8_t queueaction; + uint8_t cdb[10]; + uint8_t cdblen; + uint8_t reqsenselen; + uint8_t reqsensearea[MAX_REQ_SENSE_LEN]; + uint8_t numsge; + uint8_t scsistatus; + uint32_t dataxferaddr; + uint32_t dataxferlen; +} __attribute__ ((packed)) mraid_passthru_t; + +typedef struct { + + uint32_t dataxferaddr_lo; + uint32_t dataxferaddr_hi; + mraid_passthru_t pthru32; + +} __attribute__ ((packed)) mega_passthru64_t; + +/** + * mraid_epassthru_t - passthru structure to issue commands to physical devices + * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr + * @ars : set if ARS required after check condition + * @rsvd1 : reserved field + * @cd_rom : (?) + * @rsvd2 : reserved field + * @islogical : set if command meant for logical devices + * @logdrv : logical drive number if command for LD + * @channel : Channel on which physical device is located + * @target : SCSI target of the device + * @queuetag : unused + * @queueaction : unused + * @cdblen : length of the CDB + * @rsvd3 : reserved field + * @cdb : SCSI CDB + * @numsge : number of scatter-gather elements in the sg list + * @status : SCSI status of the command completed. + * @reqsenselen : amount of request sense data to be returned + * @reqsensearea : Sense information buffer + * @rsvd4 : reserved field + * @dataxferaddr : DMA data transfer address + * @dataxferlen : amount of the data to be transferred. + */ +typedef struct { + uint8_t timeout :3; + uint8_t ars :1; + uint8_t rsvd1 :1; + uint8_t cd_rom :1; + uint8_t rsvd2 :1; + uint8_t islogical :1; + uint8_t logdrv; + uint8_t channel; + uint8_t target; + uint8_t queuetag; + uint8_t queueaction; + uint8_t cdblen; + uint8_t rsvd3; + uint8_t cdb[16]; + uint8_t numsge; + uint8_t status; + uint8_t reqsenselen; + uint8_t reqsensearea[MAX_REQ_SENSE_LEN]; + uint8_t rsvd4; + uint32_t dataxferaddr; + uint32_t dataxferlen; +} __attribute__ ((packed)) mraid_epassthru_t; + + +/** + * mraid_pinfo_t - product info, static information about the controller + * @data_size : current size in bytes (not including resvd) + * @config_signature : Current value is 0x00282008 + * @fw_version : Firmware version + * @bios_version : version of the BIOS + * @product_name : Name given to the controller + * @max_commands : Maximum concurrent commands supported + * @nchannels : Number of SCSI Channels detected + * @fc_loop_present : Number of Fibre Loops detected + * @mem_type : EDO, FPM, SDRAM etc + * @signature : + * @dram_size : In terms of MB + * @subsysid : device PCI subsystem ID + * @subsysvid : device PCI subsystem vendor ID + * @notify_counters : + * @pad1k : 135 + 889 resvd = 1024 total size + * + * This structures holds the information about the controller which is not + * expected to change dynamically. + * + * The current value of config signature is 0x00282008: + * 0x28 = MAX_LOGICAL_DRIVES, + * 0x20 = Number of stripes and + * 0x08 = Number of spans + */ +typedef struct { + uint32_t data_size; + uint32_t config_signature; + uint8_t fw_version[16]; + uint8_t bios_version[16]; + uint8_t product_name[80]; + uint8_t max_commands; + uint8_t nchannels; + uint8_t fc_loop_present; + uint8_t mem_type; + uint32_t signature; + uint16_t dram_size; + uint16_t subsysid; + uint16_t subsysvid; + uint8_t notify_counters; + uint8_t pad1k[889]; +} __attribute__ ((packed)) mraid_pinfo_t; + + +/** + * mraid_notify_t - the notification structure + * @global_counter : Any change increments this counter + * @param_counter : Indicates any params changed + * @param_id : Param modified - defined below + * @param_val : New val of last param modified + * @write_config_counter : write config occurred + * @write_config_rsvd : + * @ldrv_op_counter : Indicates ldrv op started/completed + * @ldrv_opid : ldrv num + * @ldrv_opcmd : ldrv operation - defined below + * @ldrv_opstatus : status of the operation + * @ldrv_state_counter : Indicates change of ldrv state + * @ldrv_state_id : ldrv num + * @ldrv_state_new : New state + * @ldrv_state_old : old state + * @pdrv_state_counter : Indicates change of ldrv state + * @pdrv_state_id : pdrv id + * @pdrv_state_new : New state + * @pdrv_state_old : old state + * @pdrv_fmt_counter : Indicates pdrv format started/over + * @pdrv_fmt_id : pdrv id + * @pdrv_fmt_val : format started/over + * @pdrv_fmt_rsvd : + * @targ_xfer_counter : Indicates SCSI-2 Xfer rate change + * @targ_xfer_id : pdrv Id + * @targ_xfer_val : new Xfer params of last pdrv + * @targ_xfer_rsvd : + * @fcloop_id_chg_counter : Indicates loopid changed + * @fcloopid_pdrvid : pdrv id + * @fcloop_id0 : loopid on fc loop 0 + * @fcloop_id1 : loopid on fc loop 1 + * @fcloop_state_counter : Indicates loop state changed + * @fcloop_state0 : state of fc loop 0 + * @fcloop_state1 : state of fc loop 1 + * @fcloop_state_rsvd : + */ +typedef struct { + uint32_t global_counter; + uint8_t param_counter; + uint8_t param_id; + uint16_t param_val; + uint8_t write_config_counter; + uint8_t write_config_rsvd[3]; + uint8_t ldrv_op_counter; + uint8_t ldrv_opid; + uint8_t ldrv_opcmd; + uint8_t ldrv_opstatus; + uint8_t ldrv_state_counter; + uint8_t ldrv_state_id; + uint8_t ldrv_state_new; + uint8_t ldrv_state_old; + uint8_t pdrv_state_counter; + uint8_t pdrv_state_id; + uint8_t pdrv_state_new; + uint8_t pdrv_state_old; + uint8_t pdrv_fmt_counter; + uint8_t pdrv_fmt_id; + uint8_t pdrv_fmt_val; + uint8_t pdrv_fmt_rsvd; + uint8_t targ_xfer_counter; + uint8_t targ_xfer_id; + uint8_t targ_xfer_val; + uint8_t targ_xfer_rsvd; + uint8_t fcloop_id_chg_counter; + uint8_t fcloopid_pdrvid; + uint8_t fcloop_id0; + uint8_t fcloop_id1; + uint8_t fcloop_state_counter; + uint8_t fcloop_state0; + uint8_t fcloop_state1; + uint8_t fcloop_state_rsvd; +} __attribute__ ((packed)) mraid_notify_t; + + +/** + * mraid_inquiry3_t - enquiry for device information + * + * @data_size : current size in bytes (not including resvd) + * @notify : + * @notify_rsvd : + * @rebuild_rate : rebuild rate (0% - 100%) + * @cache_flush_int : cache flush interval in seconds + * @sense_alert : + * @drive_insert_count : drive insertion count + * @battery_status : + * @num_ldrv : no. of Log Drives configured + * @recon_state : state of reconstruct + * @ldrv_op_status : logdrv Status + * @ldrv_size : size of each log drv + * @ldrv_prop : + * @ldrv_state : state of log drives + * @pdrv_state : state of phys drvs. + * @pdrv_format : + * @targ_xfer : phys device transfer rate + * @pad1k : 761 + 263reserved = 1024 bytes total size + */ +#define MAX_NOTIFY_SIZE 0x80 +#define CUR_NOTIFY_SIZE sizeof(mraid_notify_t) + +typedef struct { + uint32_t data_size; + + mraid_notify_t notify; + + uint8_t notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE]; + + uint8_t rebuild_rate; + uint8_t cache_flush_int; + uint8_t sense_alert; + uint8_t drive_insert_count; + + uint8_t battery_status; + uint8_t num_ldrv; + uint8_t recon_state[MAX_LOGICAL_DRIVES_40LD / 8]; + uint16_t ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8]; + + uint32_t ldrv_size[MAX_LOGICAL_DRIVES_40LD]; + uint8_t ldrv_prop[MAX_LOGICAL_DRIVES_40LD]; + uint8_t ldrv_state[MAX_LOGICAL_DRIVES_40LD]; + uint8_t pdrv_state[FC_MAX_PHYSICAL_DEVICES]; + uint16_t pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16]; + + uint8_t targ_xfer[80]; + uint8_t pad1k[263]; +} __attribute__ ((packed)) mraid_inquiry3_t; + + +/** + * mraid_adapinfo_t - information about the adapter + * @max_commands : max concurrent commands supported + * @rebuild_rate : rebuild rate - 0% thru 100% + * @max_targ_per_chan : max targ per channel + * @nchannels : number of channels on HBA + * @fw_version : firmware version + * @age_of_flash : number of times FW has been flashed + * @chip_set_value : contents of 0xC0000832 + * @dram_size : in MB + * @cache_flush_interval : in seconds + * @bios_version : + * @board_type : + * @sense_alert : + * @write_config_count : increase with every configuration change + * @drive_inserted_count : increase with every drive inserted + * @inserted_drive : channel:Id of inserted drive + * @battery_status : bit 0: battery module missing + * bit 1: VBAD + * bit 2: temperature high + * bit 3: battery pack missing + * bit 4,5: + * 00 - charge complete + * 01 - fast charge in progress + * 10 - fast charge fail + * 11 - undefined + * bit 6: counter > 1000 + * bit 7: Undefined + * @dec_fault_bus_info : + */ +typedef struct { + uint8_t max_commands; + uint8_t rebuild_rate; + uint8_t max_targ_per_chan; + uint8_t nchannels; + uint8_t fw_version[4]; + uint16_t age_of_flash; + uint8_t chip_set_value; + uint8_t dram_size; + uint8_t cache_flush_interval; + uint8_t bios_version[4]; + uint8_t board_type; + uint8_t sense_alert; + uint8_t write_config_count; + uint8_t battery_status; + uint8_t dec_fault_bus_info; +} __attribute__ ((packed)) mraid_adapinfo_t; + + +/** + * mraid_ldrv_info_t - information about the logical drives + * @nldrv : Number of logical drives configured + * @rsvd : + * @size : size of each logical drive + * @prop : + * @state : state of each logical drive + */ +typedef struct { + uint8_t nldrv; + uint8_t rsvd[3]; + uint32_t size[MAX_LOGICAL_DRIVES_8LD]; + uint8_t prop[MAX_LOGICAL_DRIVES_8LD]; + uint8_t state[MAX_LOGICAL_DRIVES_8LD]; +} __attribute__ ((packed)) mraid_ldrv_info_t; + + +/** + * mraid_pdrv_info_t - information about the physical drives + * @pdrv_state : state of each physical drive + */ +typedef struct { + uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES]; + uint8_t rsvd; +} __attribute__ ((packed)) mraid_pdrv_info_t; + + +/** + * mraid_inquiry_t - RAID inquiry, mailbox command 0x05 + * @mraid_adapinfo_t : adapter information + * @mraid_ldrv_info_t : logical drives information + * @mraid_pdrv_info_t : physical drives information + */ +typedef struct { + mraid_adapinfo_t adapter_info; + mraid_ldrv_info_t logdrv_info; + mraid_pdrv_info_t pdrv_info; +} __attribute__ ((packed)) mraid_inquiry_t; + + +/** + * mraid_extinq_t - RAID extended inquiry, mailbox command 0x04 + * + * @raid_inq : raid inquiry + * @phys_drv_format : + * @stack_attn : + * @modem_status : + * @rsvd : + */ +typedef struct { + mraid_inquiry_t raid_inq; + uint16_t phys_drv_format[MAX_MBOX_CHANNELS]; + uint8_t stack_attn; + uint8_t modem_status; + uint8_t rsvd[2]; +} __attribute__ ((packed)) mraid_extinq_t; + + +/** + * adap_device_t - device information + * @channel : channel fpor the device + * @target : target ID of the device + */ +typedef struct { + uint8_t channel; + uint8_t target; +}__attribute__ ((packed)) adap_device_t; + + +/** + * adap_span_40ld_t - 40LD span + * @start_blk : starting block + * @num_blks : number of blocks + */ +typedef struct { + uint32_t start_blk; + uint32_t num_blks; + adap_device_t device[MAX_ROW_SIZE_40LD]; +}__attribute__ ((packed)) adap_span_40ld_t; + + +/** + * adap_span_8ld_t - 8LD span + * @start_blk : starting block + * @num_blks : number of blocks + */ +typedef struct { + uint32_t start_blk; + uint32_t num_blks; + adap_device_t device[MAX_ROW_SIZE_8LD]; +}__attribute__ ((packed)) adap_span_8ld_t; + + +/** + * logdrv_param_t - logical drives parameters + * + * @span_depth : total number of spans + * @level : RAID level + * @read_ahead : read ahead, no read ahead, adaptive read ahead + * @stripe_sz : encoded stripe size + * @status : status of the logical drive + * @write_mode : write mode, write_through/write_back + * @direct_io : direct io or through cache + * @row_size : number of stripes in a row + */ +typedef struct { + uint8_t span_depth; + uint8_t level; + uint8_t read_ahead; + uint8_t stripe_sz; + uint8_t status; + uint8_t write_mode; + uint8_t direct_io; + uint8_t row_size; +} __attribute__ ((packed)) logdrv_param_t; + + +/** + * logdrv_40ld_t - logical drive definition for 40LD controllers + * @lparam : logical drives parameters + * @span : span + */ +typedef struct { + logdrv_param_t lparam; + adap_span_40ld_t span[SPAN_DEPTH_8_SPANS]; +}__attribute__ ((packed)) logdrv_40ld_t; + + +/** + * logdrv_8ld_span8_t - logical drive definition for 8LD controllers + * @lparam : logical drives parameters + * @span : span + * + * 8-LD logical drive with up to 8 spans + */ +typedef struct { + logdrv_param_t lparam; + adap_span_8ld_t span[SPAN_DEPTH_8_SPANS]; +}__attribute__ ((packed)) logdrv_8ld_span8_t; + + +/** + * logdrv_8ld_span4_t - logical drive definition for 8LD controllers + * @lparam : logical drives parameters + * @span : span + * + * 8-LD logical drive with up to 4 spans + */ +typedef struct { + logdrv_param_t lparam; + adap_span_8ld_t span[SPAN_DEPTH_4_SPANS]; +}__attribute__ ((packed)) logdrv_8ld_span4_t; + + +/** + * phys_drive_t - physical device information + * @type : Type of the device + * @cur_status : current status of the device + * @tag_depth : Level of tagging + * @sync_neg : sync negotiation - ENABLE or DISABLE + * @size : configurable size in terms of 512 byte + */ +typedef struct { + uint8_t type; + uint8_t cur_status; + uint8_t tag_depth; + uint8_t sync_neg; + uint32_t size; +}__attribute__ ((packed)) phys_drive_t; + + +/** + * disk_array_40ld_t - disk array for 40LD controllers + * @numldrv : number of logical drives + * @resvd : + * @ldrv : logical drives information + * @pdrv : physical drives information + */ +typedef struct { + uint8_t numldrv; + uint8_t resvd[3]; + logdrv_40ld_t ldrv[MAX_LOGICAL_DRIVES_40LD]; + phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_40ld_t; + + +/** + * disk_array_8ld_span8_t - disk array for 8LD controllers + * @numldrv : number of logical drives + * @resvd : + * @ldrv : logical drives information + * @pdrv : physical drives information + * + * Disk array for 8LD logical drives with up to 8 spans + */ +typedef struct { + uint8_t numldrv; + uint8_t resvd[3]; + logdrv_8ld_span8_t ldrv[MAX_LOGICAL_DRIVES_8LD]; + phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_8ld_span8_t; + + +/** + * disk_array_8ld_span4_t - disk array for 8LD controllers + * @numldrv : number of logical drives + * @resvd : + * @ldrv : logical drives information + * @pdrv : physical drives information + * + * Disk array for 8LD logical drives with up to 4 spans + */ +typedef struct { + uint8_t numldrv; + uint8_t resvd[3]; + logdrv_8ld_span4_t ldrv[MAX_LOGICAL_DRIVES_8LD]; + phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_8ld_span4_t; + + +/** + * struct private_bios_data - bios private data for boot devices + * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB, + * 0x1000 - 8GB, Others values are invalid + * @unused : bits 4-7 are unused + * @boot_drv : logical drive set as boot drive, 0..7 - for 8LD cards, + * 0..39 - for 40LD cards + * @cksum : 0-(sum of first 13 bytes of this structure) + */ +struct private_bios_data { + uint8_t geometry :4; + uint8_t unused :4; + uint8_t boot_drv; + uint8_t rsvd[12]; + uint16_t cksum; +} __attribute__ ((packed)); + + +/** + * mbox_sgl64 - 64-bit scatter list for mailbox based controllers + * @address : address of the buffer + * @length : data transfer length + */ +typedef struct { + uint64_t address; + uint32_t length; +} __attribute__ ((packed)) mbox_sgl64; + +/** + * mbox_sgl32 - 32-bit scatter list for mailbox based controllers + * @address : address of the buffer + * @length : data transfer length + */ +typedef struct { + uint32_t address; + uint32_t length; +} __attribute__ ((packed)) mbox_sgl32; + +#endif // _MRAID_MBOX_DEFS_H_ diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h new file mode 100644 index 000000000..2ad0aa2f8 --- /dev/null +++ b/drivers/scsi/megaraid/mega_common.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * FILE : mega_common.h + * + * Libaray of common routine used by all low-level megaraid drivers + */ + +#ifndef _MEGA_COMMON_H_ +#define _MEGA_COMMON_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define LSI_MAX_CHANNELS 16 +#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) + +#define HBA_SIGNATURE_64_BIT 0x299 +#define PCI_CONF_AMISIG64 0xa4 + +#define MEGA_SCSI_INQ_EVPD 1 +#define MEGA_INVALID_FIELD_IN_CDB 0x24 + + +/** + * scb_t - scsi command control block + * @ccb : command control block for individual driver + * @list : list of control blocks + * @gp : general purpose field for LLDs + * @sno : all SCBs have a serial number + * @scp : associated scsi command + * @state : current state of scb + * @dma_dir : direction of data transfer + * @dma_type : transfer with sg list, buffer, or no data transfer + * @dev_channel : actual channel on the device + * @dev_target : actual target on the device + * @status : completion status + * + * This is our central data structure to issue commands the each driver. + * Driver specific data structures are maintained in the ccb field. + * scb provides a field 'gp', which can be used by LLD for its own purposes + * + * dev_channel and dev_target must be initialized with the actual channel and + * target on the controller. + */ +typedef struct { + caddr_t ccb; + struct list_head list; + unsigned long gp; + unsigned int sno; + struct scsi_cmnd *scp; + uint32_t state; + uint32_t dma_direction; + uint32_t dma_type; + uint16_t dev_channel; + uint16_t dev_target; + uint32_t status; +} scb_t; + +/* + * SCB states as it transitions from one state to another + */ +#define SCB_FREE 0x0000 /* on the free list */ +#define SCB_ACTIVE 0x0001 /* off the free list */ +#define SCB_PENDQ 0x0002 /* on the pending queue */ +#define SCB_ISSUED 0x0004 /* issued - owner f/w */ +#define SCB_ABORT 0x0008 /* Got an abort for this one */ +#define SCB_RESET 0x0010 /* Got a reset for this one */ + +/* + * DMA types for scb + */ +#define MRAID_DMA_NONE 0x0000 /* no data transfer for this command */ +#define MRAID_DMA_WSG 0x0001 /* data transfer using a sg list */ +#define MRAID_DMA_WBUF 0x0002 /* data transfer using a contiguous buffer */ + + +/** + * struct adapter_t - driver's initialization structure + * @aram dpc_h : tasklet handle + * @pdev : pci configuration pointer for kernel + * @host : pointer to host structure of mid-layer + * @lock : synchronization lock for mid-layer and driver + * @quiescent : driver is quiescent for now. + * @outstanding_cmds : number of commands pending in the driver + * @kscb_list : pointer to the bulk of SCBs pointers for IO + * @kscb_pool : pool of free scbs for IO + * @kscb_pool_lock : lock for pool of free scbs + * @pend_list : pending commands list + * @pend_list_lock : exclusion lock for pending commands list + * @completed_list : list of completed commands + * @completed_list_lock : exclusion lock for list of completed commands + * @sglen : max sg elements supported + * @device_ids : to convert kernel device addr to our devices. + * @raid_device : raid adapter specific pointer + * @max_channel : maximum channel number supported - inclusive + * @max_target : max target supported - inclusive + * @max_lun : max lun supported - inclusive + * @unique_id : unique identifier for each adapter + * @irq : IRQ for this adapter + * @ito : internal timeout value, (-1) means no timeout + * @ibuf : buffer to issue internal commands + * @ibuf_dma_h : dma handle for the above buffer + * @uscb_list : SCB pointers for user cmds, common mgmt module + * @uscb_pool : pool of SCBs for user commands + * @uscb_pool_lock : exclusion lock for these SCBs + * @max_cmds : max outstanding commands + * @fw_version : firmware version + * @bios_version : bios version + * @max_cdb_sz : biggest CDB size supported. + * @ha : is high availability present - clustering + * @init_id : initiator ID, the default value should be 7 + * @max_sectors : max sectors per request + * @cmd_per_lun : max outstanding commands per LUN + * @being_detached : set when unloading, no more mgmt calls + * + * + * mraid_setup_device_map() can be called anytime after the device map is + * available and MRAID_GET_DEVICE_MAP() can be called whenever the mapping is + * required, usually from LLD's queue entry point. The formar API sets up the + * MRAID_IS_LOGICAL(adapter_t *, struct scsi_cmnd *) to find out if the + * device in question is a logical drive. + * + * quiescent flag should be set by the driver if it is not accepting more + * commands + * + * NOTE: The fields of this structures are placed to minimize cache misses + */ + +// amount of space required to store the bios and firmware version strings +#define VERSION_SIZE 16 + +typedef struct { + struct tasklet_struct dpc_h; + struct pci_dev *pdev; + struct Scsi_Host *host; + spinlock_t lock; + uint8_t quiescent; + int outstanding_cmds; + scb_t *kscb_list; + struct list_head kscb_pool; + spinlock_t kscb_pool_lock; + struct list_head pend_list; + spinlock_t pend_list_lock; + struct list_head completed_list; + spinlock_t completed_list_lock; + uint16_t sglen; + int device_ids[LSI_MAX_CHANNELS] + [LSI_MAX_LOGICAL_DRIVES_64LD]; + caddr_t raid_device; + uint8_t max_channel; + uint16_t max_target; + uint8_t max_lun; + + uint32_t unique_id; + int irq; + uint8_t ito; + caddr_t ibuf; + dma_addr_t ibuf_dma_h; + scb_t *uscb_list; + struct list_head uscb_pool; + spinlock_t uscb_pool_lock; + int max_cmds; + uint8_t fw_version[VERSION_SIZE]; + uint8_t bios_version[VERSION_SIZE]; + uint8_t max_cdb_sz; + uint8_t ha; + uint16_t init_id; + uint16_t max_sectors; + uint16_t cmd_per_lun; + atomic_t being_detached; +} adapter_t; + +#define SCSI_FREE_LIST_LOCK(adapter) (&adapter->kscb_pool_lock) +#define USER_FREE_LIST_LOCK(adapter) (&adapter->uscb_pool_lock) +#define PENDING_LIST_LOCK(adapter) (&adapter->pend_list_lock) +#define COMPLETED_LIST_LOCK(adapter) (&adapter->completed_list_lock) + + +// conversion from scsi command +#define SCP2HOST(scp) (scp)->device->host // to host +#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state +#define SCP2CHANNEL(scp) (scp)->device->channel // to channel +#define SCP2TARGET(scp) (scp)->device->id // to target +#define SCP2LUN(scp) (u32)(scp)->device->lun // to LUN + +// generic macro to convert scsi command and host to controller's soft state +#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0]) +#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp)) + + +#define MRAID_IS_LOGICAL(adp, scp) \ + (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0 + +#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \ + (sdev->channel == (adp)->max_channel) ? 1 : 0 + +/** + * MRAID_GET_DEVICE_MAP - device ids + * @adp : adapter's soft state + * @scp : mid-layer scsi command pointer + * @p_chan : physical channel on the controller + * @target : target id of the device or logical drive number + * @islogical : set if the command is for the logical drive + * + * Macro to retrieve information about device class, logical or physical and + * the corresponding physical channel and target or logical drive number + */ +#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \ + /* \ + * Is the request coming for the virtual channel \ + */ \ + islogical = MRAID_IS_LOGICAL(adp, scp); \ + \ + /* \ + * Get an index into our table of drive ids mapping \ + */ \ + if (islogical) { \ + p_chan = 0xFF; \ + target = \ + (adp)->device_ids[(adp)->max_channel][SCP2TARGET(scp)]; \ + } \ + else { \ + p_chan = ((adp)->device_ids[SCP2CHANNEL(scp)] \ + [SCP2TARGET(scp)] >> 8) & 0xFF; \ + target = ((adp)->device_ids[SCP2CHANNEL(scp)] \ + [SCP2TARGET(scp)] & 0xFF); \ + } + +/* + * ### Helper routines ### + */ +#define LSI_DBGLVL mraid_debug_level // each LLD must define a global + // mraid_debug_level + +#ifdef DEBUG +#if defined (_ASSERT_PANIC) +#define ASSERT_ACTION panic +#else +#define ASSERT_ACTION printk +#endif + +#define ASSERT(expression) \ + if (!(expression)) { \ + ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \ + #expression, __FILE__, __LINE__, __func__); \ + } +#else +#define ASSERT(expression) +#endif + +/** + * struct mraid_pci_blk - structure holds DMA memory block info + * @vaddr : virtual address to a memory block + * @dma_addr : DMA handle to a memory block + * + * This structure is filled up for the caller. It is the responsibilty of the + * caller to allocate this array big enough to store addresses for all + * requested elements + */ +struct mraid_pci_blk { + caddr_t vaddr; + dma_addr_t dma_addr; +}; + +#endif // _MEGA_COMMON_H_ diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h new file mode 100644 index 000000000..ae9c2ff7e --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_ioctl.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * FILE : megaraid_ioctl.h + * + * Definitions to interface with user level applications + */ + +#ifndef _MEGARAID_IOCTL_H_ +#define _MEGARAID_IOCTL_H_ + +#include +#include +#include + +#include "mbox_defs.h" + +/* + * console messages debug levels + */ +#define CL_ANN 0 /* print unconditionally, announcements */ +#define CL_DLEVEL1 1 /* debug level 1, informative */ +#define CL_DLEVEL2 2 /* debug level 2, verbose */ +#define CL_DLEVEL3 3 /* debug level 3, very verbose */ + +/** + * con_log() - console log routine + * @level : indicates the severity of the message. + * @fmt : format string + * + * con_log displays the error messages on the console based on the current + * debug level. Also it attaches the appropriate kernel severity level with + * the message. + */ +#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt; + +/* + * Definitions & Declarations needed to use common management module + */ + +#define MEGAIOC_MAGIC 'm' +#define MEGAIOCCMD _IOWR(MEGAIOC_MAGIC, 0, mimd_t) + +#define MEGAIOC_QNADAP 'm' /* Query # of adapters */ +#define MEGAIOC_QDRVRVER 'e' /* Query driver version */ +#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */ + +#define USCSICMD 0x80 +#define UIOC_RD 0x00001 +#define UIOC_WR 0x00002 + +#define MBOX_CMD 0x00000 +#define GET_DRIVER_VER 0x10000 +#define GET_N_ADAP 0x20000 +#define GET_ADAP_INFO 0x30000 +#define GET_CAP 0x40000 +#define GET_STATS 0x50000 +#define GET_IOCTL_VERSION 0x01 + +#define EXT_IOCTL_SIGN_SZ 16 +#define EXT_IOCTL_SIGN "$$_EXTD_IOCTL_$$" + +#define MBOX_LEGACY 0x00 /* ioctl has legacy mbox*/ +#define MBOX_HPE 0x01 /* ioctl has hpe mbox */ + +#define APPTYPE_MIMD 0x00 /* old existing apps */ +#define APPTYPE_UIOC 0x01 /* new apps using uioc */ + +#define IOCTL_ISSUE 0x00000001 /* Issue ioctl */ +#define IOCTL_ABORT 0x00000002 /* Abort previous ioctl */ + +#define DRVRTYPE_MBOX 0x00000001 /* regular mbox driver */ +#define DRVRTYPE_HPE 0x00000002 /* new hpe driver */ + +#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) ) +#define GETADAP(mkadap) ((mkadap) ^ MEGAIOC_MAGIC << 8) + +#define MAX_DMA_POOLS 5 /* 4k, 8k, 16k, 32k, 64k*/ + + +/** + * struct uioc_t - the common ioctl packet structure + * + * @signature : Must be "$$_EXTD_IOCTL_$$" + * @mb_type : Type of the mail box (MB_LEGACY or MB_HPE) + * @app_type : Type of the issuing application (existing or new) + * @opcode : Opcode of the command + * @adapno : Adapter number + * @cmdbuf : Pointer to buffer - can point to mbox or plain data buffer + * @xferlen : xferlen for DCMD and non mailbox commands + * @data_dir : Direction of the data transfer + * @status : Status from the driver + * @reserved : reserved bytes for future expansion + * + * @user_data : user data transfer address is saved in this + * @user_data_len: length of the data buffer sent by user app + * @user_pthru : user passthru address is saves in this (null if DCMD) + * @pthru32 : kernel address passthru (allocated per kioc) + * @pthru32_h : physicall address of @pthru32 + * @list : for kioc free pool list maintenance + * @done : call back routine for llds to call when kioc is completed + * @buf_vaddr : dma pool buffer attached to kioc for data transfer + * @buf_paddr : physical address of the dma pool buffer + * @pool_index : index of the dma pool that @buf_vaddr is taken from + * @free_buf : indicates if buffer needs to be freed after kioc completes + * + * Note : All LSI drivers understand only this packet. Any other + * : format sent by applications would be converted to this. + */ +typedef struct uioc { + +/* User Apps: */ + + uint8_t signature[EXT_IOCTL_SIGN_SZ]; + uint16_t mb_type; + uint16_t app_type; + uint32_t opcode; + uint32_t adapno; + uint64_t cmdbuf; + uint32_t xferlen; + uint32_t data_dir; + int32_t status; + uint8_t reserved[128]; + +/* Driver Data: */ + void __user * user_data; + uint32_t user_data_len; + + /* 64bit alignment */ + uint32_t pad_for_64bit_align; + + mraid_passthru_t __user *user_pthru; + + mraid_passthru_t *pthru32; + dma_addr_t pthru32_h; + + struct list_head list; + void (*done)(struct uioc*); + + caddr_t buf_vaddr; + dma_addr_t buf_paddr; + int8_t pool_index; + uint8_t free_buf; + + uint8_t timedout; + +} __attribute__ ((aligned(1024),packed)) uioc_t; + +/* For on-stack uioc timers. */ +struct uioc_timeout { + struct timer_list timer; + uioc_t *uioc; +}; + +/** + * struct mraid_hba_info - information about the controller + * + * @pci_vendor_id : PCI vendor id + * @pci_device_id : PCI device id + * @subsystem_vendor_id : PCI subsystem vendor id + * @subsystem_device_id : PCI subsystem device id + * @baseport : base port of hba memory + * @pci_bus : PCI bus + * @pci_dev_fn : PCI device/function values + * @irq : interrupt vector for the device + * + * Extended information of 256 bytes about the controller. Align on the single + * byte boundary so that 32-bit applications can be run on 64-bit platform + * drivers withoug re-compilation. + * NOTE: reduce the number of reserved bytes whenever new field are added, so + * that total size of the structure remains 256 bytes. + */ +typedef struct mraid_hba_info { + + uint16_t pci_vendor_id; + uint16_t pci_device_id; + uint16_t subsys_vendor_id; + uint16_t subsys_device_id; + + uint64_t baseport; + uint8_t pci_bus; + uint8_t pci_dev_fn; + uint8_t pci_slot; + uint8_t irq; + + uint32_t unique_id; + uint32_t host_no; + + uint8_t num_ldrv; +} __attribute__ ((aligned(256), packed)) mraid_hba_info_t; + + +/** + * mcontroller : adapter info structure for old mimd_t apps + * + * @base : base address + * @irq : irq number + * @numldrv : number of logical drives + * @pcibus : pci bus + * @pcidev : pci device + * @pcifun : pci function + * @pciid : pci id + * @pcivendor : vendor id + * @pcislot : slot number + * @uid : unique id + */ +typedef struct mcontroller { + + uint64_t base; + uint8_t irq; + uint8_t numldrv; + uint8_t pcibus; + uint16_t pcidev; + uint8_t pcifun; + uint16_t pciid; + uint16_t pcivendor; + uint8_t pcislot; + uint32_t uid; + +} __attribute__ ((packed)) mcontroller_t; + + +/** + * mm_dmapool_t : Represents one dma pool with just one buffer + * + * @vaddr : Virtual address + * @paddr : DMA physicall address + * @bufsize : In KB - 4 = 4k, 8 = 8k etc. + * @handle : Handle to the dma pool + * @lock : lock to synchronize access to the pool + * @in_use : If pool already in use, attach new block + */ +typedef struct mm_dmapool { + caddr_t vaddr; + dma_addr_t paddr; + uint32_t buf_size; + struct dma_pool *handle; + spinlock_t lock; + uint8_t in_use; +} mm_dmapool_t; + + +/** + * mraid_mmadp_t: Structure that drivers pass during (un)registration + * + * @unique_id : Any unique id (usually PCI bus+dev+fn) + * @drvr_type : megaraid or hpe (DRVRTYPE_MBOX or DRVRTYPE_HPE) + * @drv_data : Driver specific; not touched by the common module + * @timeout : timeout for issued kiocs + * @max_kioc : Maximum ioctl packets acceptable by the lld + * @pdev : pci dev; used for allocating dma'ble memory + * @issue_uioc : Driver supplied routine to issue uioc_t commands + * : issue_uioc(drvr_data, kioc, ISSUE/ABORT, uioc_done) + * @quiescent : flag to indicate if ioctl can be issued to this adp + * @list : attach with the global list of adapters + * @kioc_list : block of mem for @max_kioc number of kiocs + * @kioc_pool : pool of free kiocs + * @kioc_pool_lock : protection for free pool + * @kioc_semaphore : so as not to exceed @max_kioc parallel ioctls + * @mbox_list : block of mem for @max_kioc number of mboxes + * @pthru_dma_pool : DMA pool to allocate passthru packets + * @dma_pool_list : array of dma pools + */ + +typedef struct mraid_mmadp { + +/* Filled by driver */ + + uint32_t unique_id; + uint32_t drvr_type; + unsigned long drvr_data; + uint16_t timeout; + uint8_t max_kioc; + + struct pci_dev *pdev; + + int(*issue_uioc)(unsigned long, uioc_t *, uint32_t); + +/* Maintained by common module */ + uint32_t quiescent; + + struct list_head list; + uioc_t *kioc_list; + struct list_head kioc_pool; + spinlock_t kioc_pool_lock; + struct semaphore kioc_semaphore; + + mbox64_t *mbox_list; + struct dma_pool *pthru_dma_pool; + mm_dmapool_t dma_pool_list[MAX_DMA_POOLS]; + +} mraid_mmadp_t; + +int mraid_mm_register_adp(mraid_mmadp_t *); +int mraid_mm_unregister_adp(uint32_t); +uint32_t mraid_mm_adapter_app_handle(uint32_t); + +#endif /* _MEGARAID_IOCTL_H_ */ diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c new file mode 100644 index 000000000..bc867da65 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -0,0 +1,4060 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * FILE : megaraid_mbox.c + * Version : v2.20.5.1 (Nov 16 2006) + * + * Authors: + * Atul Mukker + * Sreenivas Bagalkote + * Manoj Jose + * Seokmann Ju + * + * List of supported controllers + * + * OEM Product Name VID DID SSVID SSID + * --- ------------ --- --- ---- ---- + * Dell PERC3/QC 101E 1960 1028 0471 + * Dell PERC3/DC 101E 1960 1028 0493 + * Dell PERC3/SC 101E 1960 1028 0475 + * Dell PERC3/Di 1028 1960 1028 0123 + * Dell PERC4/SC 1000 1960 1028 0520 + * Dell PERC4/DC 1000 1960 1028 0518 + * Dell PERC4/QC 1000 0407 1028 0531 + * Dell PERC4/Di 1028 000F 1028 014A + * Dell PERC 4e/Si 1028 0013 1028 016c + * Dell PERC 4e/Di 1028 0013 1028 016d + * Dell PERC 4e/Di 1028 0013 1028 016e + * Dell PERC 4e/Di 1028 0013 1028 016f + * Dell PERC 4e/Di 1028 0013 1028 0170 + * Dell PERC 4e/DC 1000 0408 1028 0002 + * Dell PERC 4e/SC 1000 0408 1028 0001 + * + * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520 + * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520 + * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518 + * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530 + * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532 + * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531 + * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001 + * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002 + * LSI MegaRAID SATA 150-4 1000 1960 1000 4523 + * LSI MegaRAID SATA 150-6 1000 1960 1000 0523 + * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004 + * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008 + * + * INTEL RAID Controller SRCU42X 1000 0407 8086 0532 + * INTEL RAID Controller SRCS16 1000 1960 8086 0523 + * INTEL RAID Controller SRCU42E 1000 0408 8086 0002 + * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530 + * INTEL RAID Controller SRCS28X 1000 0409 8086 3008 + * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431 + * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499 + * INTEL RAID Controller SRCU51L 1000 1960 8086 0520 + * + * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065 + * + * ACER MegaRAID ROMB-2E 1000 0408 1025 004D + * + * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287 + * + * For history of changes, see Documentation/scsi/ChangeLog.megaraid + */ + +#include +#include +#include "megaraid_mbox.h" + +static int megaraid_init(void); +static void megaraid_exit(void); + +static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *); +static void megaraid_detach_one(struct pci_dev *); +static void megaraid_mbox_shutdown(struct pci_dev *); + +static int megaraid_io_attach(adapter_t *); +static void megaraid_io_detach(adapter_t *); + +static int megaraid_init_mbox(adapter_t *); +static void megaraid_fini_mbox(adapter_t *); + +static int megaraid_alloc_cmd_packets(adapter_t *); +static void megaraid_free_cmd_packets(adapter_t *); + +static int megaraid_mbox_setup_dma_pools(adapter_t *); +static void megaraid_mbox_teardown_dma_pools(adapter_t *); + +static int megaraid_sysfs_alloc_resources(adapter_t *); +static void megaraid_sysfs_free_resources(adapter_t *); + +static int megaraid_abort_handler(struct scsi_cmnd *); +static int megaraid_reset_handler(struct scsi_cmnd *); + +static int mbox_post_sync_cmd(adapter_t *, uint8_t []); +static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []); +static int megaraid_busywait_mbox(mraid_device_t *); +static int megaraid_mbox_product_info(adapter_t *); +static int megaraid_mbox_extended_cdb(adapter_t *); +static int megaraid_mbox_support_ha(adapter_t *, uint16_t *); +static int megaraid_mbox_support_random_del(adapter_t *); +static int megaraid_mbox_get_max_sg(adapter_t *); +static void megaraid_mbox_enum_raid_scsi(adapter_t *); +static void megaraid_mbox_flush_cache(adapter_t *); +static int megaraid_mbox_fire_sync_cmd(adapter_t *); + +static void megaraid_mbox_display_scb(adapter_t *, scb_t *); +static void megaraid_mbox_setup_device_map(adapter_t *); + +static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *); +static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); +static void megaraid_mbox_runpendq(adapter_t *, scb_t *); +static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, + struct scsi_cmnd *); +static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *, + struct scsi_cmnd *); + +static irqreturn_t megaraid_isr(int, void *); + +static void megaraid_mbox_dpc(unsigned long); + +static ssize_t megaraid_mbox_app_hndl_show(struct device *, struct device_attribute *attr, char *); +static ssize_t megaraid_mbox_ld_show(struct device *, struct device_attribute *attr, char *); + +static int megaraid_cmm_register(adapter_t *); +static int megaraid_cmm_unregister(adapter_t *); +static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t); +static int megaraid_mbox_mm_command(adapter_t *, uioc_t *); +static void megaraid_mbox_mm_done(adapter_t *, scb_t *); +static int gather_hbainfo(adapter_t *, mraid_hba_info_t *); +static int wait_till_fw_empty(adapter_t *); + + + +MODULE_AUTHOR("megaraidlinux@lsi.com"); +MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(MEGARAID_VERSION); + +/* + * ### modules parameters for driver ### + */ + +/* + * Set to enable driver to expose unconfigured disk to kernel + */ +static int megaraid_expose_unconf_disks = 0; +module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0); +MODULE_PARM_DESC(unconf_disks, + "Set to expose unconfigured disks to kernel (default=0)"); + +/* + * driver wait time if the adapter's mailbox is busy + */ +static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT; +module_param_named(busy_wait, max_mbox_busy_wait, int, 0); +MODULE_PARM_DESC(busy_wait, + "Max wait for mailbox in microseconds if busy (default=10)"); + +/* + * number of sectors per IO command + */ +static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS; +module_param_named(max_sectors, megaraid_max_sectors, int, 0); +MODULE_PARM_DESC(max_sectors, + "Maximum number of sectors per IO command (default=128)"); + +/* + * number of commands per logical unit + */ +static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN; +module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0); +MODULE_PARM_DESC(cmd_per_lun, + "Maximum number of commands per logical unit (default=64)"); + + +/* + * Fast driver load option, skip scanning for physical devices during load. + * This would result in non-disk devices being skipped during driver load + * time. These can be later added though, using /proc/scsi/scsi + */ +static unsigned int megaraid_fast_load; +module_param_named(fast_load, megaraid_fast_load, int, 0); +MODULE_PARM_DESC(fast_load, + "Faster loading of the driver, skips physical devices! (default=0)"); + + +/* + * mraid_debug level - threshold for amount of information to be displayed by + * the driver. This level can be changed through modules parameters, ioctl or + * sysfs/proc interface. By default, print the announcement messages only. + */ +int mraid_debug_level = CL_ANN; +module_param_named(debug_level, mraid_debug_level, int, 0); +MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)"); + +/* + * PCI table for all supported controllers. + */ +static struct pci_device_id pci_id_table_g[] = { + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4_DI_DISCOVERY, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_DI_DISCOVERY, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_PERC4_SC, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_SC, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_PERC4_DC, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_DC, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_VERDE, + PCI_ANY_ID, + PCI_ANY_ID, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4_DI_EVERGLADES, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_DI_EVERGLADES, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_SI_BIGBEND, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_SI_BIGBEND, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_KOBUK, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_KOBUK, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_CORVETTE, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_CORVETTE, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_EXPEDITION, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_GUADALUPE, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_DOBSON, + PCI_ANY_ID, + PCI_ANY_ID, + }, + { + PCI_VENDOR_ID_AMI, + PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_ANY_ID, + PCI_ANY_ID, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_ANY_ID, + PCI_ANY_ID, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_LINDSAY, + PCI_ANY_ID, + PCI_ANY_ID, + }, + {0} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, pci_id_table_g); + + +static struct pci_driver megaraid_pci_driver = { + .name = "megaraid", + .id_table = pci_id_table_g, + .probe = megaraid_probe_one, + .remove = megaraid_detach_one, + .shutdown = megaraid_mbox_shutdown, +}; + + + +// definitions for the device attributes for exporting logical drive number +// for a scsi address (Host, Channel, Id, Lun) + +static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_app_hndl); + +// Host template initializer for megaraid mbox sysfs device attributes +static struct attribute *megaraid_shost_attrs[] = { + &dev_attr_megaraid_mbox_app_hndl.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(megaraid_shost); + +static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_ld); + +// Host template initializer for megaraid mbox sysfs device attributes +static struct attribute *megaraid_sdev_attrs[] = { + &dev_attr_megaraid_mbox_ld.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(megaraid_sdev); + +/* + * Scsi host template for megaraid unified driver + */ +static const struct scsi_host_template megaraid_template_g = { + .module = THIS_MODULE, + .name = "LSI Logic MegaRAID driver", + .proc_name = "megaraid", + .queuecommand = megaraid_queue_command, + .eh_abort_handler = megaraid_abort_handler, + .eh_host_reset_handler = megaraid_reset_handler, + .change_queue_depth = scsi_change_queue_depth, + .no_write_same = 1, + .sdev_groups = megaraid_sdev_groups, + .shost_groups = megaraid_shost_groups, +}; + + +/** + * megaraid_init - module load hook + * + * We register ourselves as hotplug enabled module and let PCI subsystem + * discover our adapters. + */ +static int __init +megaraid_init(void) +{ + int rval; + + // Announce the driver version + con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION, + MEGARAID_EXT_VERSION)); + + // check validity of module parameters + if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: max commands per lun reset to %d\n", + MBOX_MAX_SCSI_CMDS)); + + megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS; + } + + + // register as a PCI hot-plug driver module + rval = pci_register_driver(&megaraid_pci_driver); + if (rval < 0) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not register hotplug support.\n")); + } + + return rval; +} + + +/** + * megaraid_exit - driver unload entry point + * + * We simply unwrap the megaraid_init routine here. + */ +static void __exit +megaraid_exit(void) +{ + con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n")); + + // unregister as PCI hotplug driver + pci_unregister_driver(&megaraid_pci_driver); + + return; +} + + +/** + * megaraid_probe_one - PCI hotplug entry point + * @pdev : handle to this controller's PCI configuration space + * @id : pci device id of the class of controllers + * + * This routine should be called whenever a new adapter is detected by the + * PCI hotplug susbsystem. + */ +static int +megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + adapter_t *adapter; + + + // detected a new controller + con_log(CL_ANN, (KERN_INFO + "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device)); + + con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn))); + + if (pci_enable_device(pdev)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: pci_enable_device failed\n")); + + return -ENODEV; + } + + // Enable bus-mastering on this controller + pci_set_master(pdev); + + // Allocate the per driver initialization structure + adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL); + + if (adapter == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d.\n", __func__, __LINE__)); + + goto out_probe_one; + } + + + // set up PCI related soft state and other pre-known parameters + adapter->unique_id = pci_dev_id(pdev); + adapter->irq = pdev->irq; + adapter->pdev = pdev; + + atomic_set(&adapter->being_detached, 0); + + // Setup the default DMA mask. This would be changed later on + // depending on hardware capabilities + if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: dma_set_mask failed:%d\n", __LINE__)); + + goto out_free_adapter; + } + + + // Initialize the synchronization lock for kernel and LLD + spin_lock_init(&adapter->lock); + + // Initialize the command queues: the list of free SCBs and the list + // of pending SCBs. + INIT_LIST_HEAD(&adapter->kscb_pool); + spin_lock_init(SCSI_FREE_LIST_LOCK(adapter)); + + INIT_LIST_HEAD(&adapter->pend_list); + spin_lock_init(PENDING_LIST_LOCK(adapter)); + + INIT_LIST_HEAD(&adapter->completed_list); + spin_lock_init(COMPLETED_LIST_LOCK(adapter)); + + + // Start the mailbox based controller + if (megaraid_init_mbox(adapter) != 0) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: mailbox adapter did not initialize\n")); + + goto out_free_adapter; + } + + // Register with LSI Common Management Module + if (megaraid_cmm_register(adapter) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not register with management module\n")); + + goto out_fini_mbox; + } + + // setup adapter handle in PCI soft state + pci_set_drvdata(pdev, adapter); + + // attach with scsi mid-layer + if (megaraid_io_attach(adapter) != 0) { + + con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n")); + + goto out_cmm_unreg; + } + + return 0; + +out_cmm_unreg: + megaraid_cmm_unregister(adapter); +out_fini_mbox: + megaraid_fini_mbox(adapter); +out_free_adapter: + kfree(adapter); +out_probe_one: + pci_disable_device(pdev); + + return -ENODEV; +} + + +/** + * megaraid_detach_one - release framework resources and call LLD release routine + * @pdev : handle for our PCI configuration space + * + * This routine is called during driver unload. We free all the allocated + * resources and call the corresponding LLD so that it can also release all + * its resources. + * + * This routine is also called from the PCI hotplug system. + */ +static void +megaraid_detach_one(struct pci_dev *pdev) +{ + adapter_t *adapter; + struct Scsi_Host *host; + + + // Start a rollback on this adapter + adapter = pci_get_drvdata(pdev); + + if (!adapter) { + con_log(CL_ANN, (KERN_CRIT + "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device)); + + return; + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device)); + } + + + host = adapter->host; + + // do not allow any more requests from the management module for this + // adapter. + // FIXME: How do we account for the request which might still be + // pending with us? + atomic_set(&adapter->being_detached, 1); + + // detach from the IO sub-system + megaraid_io_detach(adapter); + + // Unregister from common management module + // + // FIXME: this must return success or failure for conditions if there + // is a command pending with LLD or not. + megaraid_cmm_unregister(adapter); + + // finalize the mailbox based controller and release all resources + megaraid_fini_mbox(adapter); + + kfree(adapter); + + scsi_host_put(host); + + pci_disable_device(pdev); + + return; +} + + +/** + * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA + * @pdev : generic driver model device + * + * Shutdown notification, perform flush cache. + */ +static void +megaraid_mbox_shutdown(struct pci_dev *pdev) +{ + adapter_t *adapter = pci_get_drvdata(pdev); + static int counter; + + if (!adapter) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: null device in shutdown\n")); + return; + } + + // flush caches now + con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...", + counter++)); + + megaraid_mbox_flush_cache(adapter); + + con_log(CL_ANN, ("done\n")); +} + + +/** + * megaraid_io_attach - attach a device with the IO subsystem + * @adapter : controller's soft state + * + * Attach this device with the IO subsystem. + */ +static int +megaraid_io_attach(adapter_t *adapter) +{ + struct Scsi_Host *host; + + // Initialize SCSI Host structure + host = scsi_host_alloc(&megaraid_template_g, 8); + if (!host) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: scsi_register failed\n")); + + return -1; + } + + SCSIHOST2ADAP(host) = (caddr_t)adapter; + adapter->host = host; + + host->irq = adapter->irq; + host->unique_id = adapter->unique_id; + host->can_queue = adapter->max_cmds; + host->this_id = adapter->init_id; + host->sg_tablesize = adapter->sglen; + host->max_sectors = adapter->max_sectors; + host->cmd_per_lun = adapter->cmd_per_lun; + host->max_channel = adapter->max_channel; + host->max_id = adapter->max_target; + host->max_lun = adapter->max_lun; + + + // notify mid-layer about the new controller + if (scsi_add_host(host, &adapter->pdev->dev)) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: scsi_add_host failed\n")); + + scsi_host_put(host); + + return -1; + } + + scsi_scan_host(host); + + return 0; +} + + +/** + * megaraid_io_detach - detach a device from the IO subsystem + * @adapter : controller's soft state + * + * Detach this device from the IO subsystem. + */ +static void +megaraid_io_detach(adapter_t *adapter) +{ + struct Scsi_Host *host; + + con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n")); + + host = adapter->host; + + scsi_remove_host(host); + + return; +} + + +/* + * START: Mailbox Low Level Driver + * + * This is section specific to the single mailbox based controllers + */ + +/** + * megaraid_init_mbox - initialize controller + * @adapter : our soft state + * + * - Allocate 16-byte aligned mailbox memory for firmware handshake + * - Allocate controller's memory resources + * - Find out all initialization data + * - Allocate memory required for all the commands + * - Use internal library of FW routines, build up complete soft state + */ +static int +megaraid_init_mbox(adapter_t *adapter) +{ + struct pci_dev *pdev; + mraid_device_t *raid_dev; + int i; + uint32_t magic64; + + + adapter->ito = MBOX_TIMEOUT; + pdev = adapter->pdev; + + /* + * Allocate and initialize the init data structure for mailbox + * controllers + */ + raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL); + if (raid_dev == NULL) return -1; + + + /* + * Attach the adapter soft state to raid device soft state + */ + adapter->raid_device = (caddr_t)raid_dev; + raid_dev->fast_load = megaraid_fast_load; + + + // our baseport + raid_dev->baseport = pci_resource_start(pdev, 0); + + if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: mem region busy\n")); + + goto out_free_raid_dev; + } + + raid_dev->baseaddr = ioremap(raid_dev->baseport, 128); + + if (!raid_dev->baseaddr) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not map hba memory\n") ); + + goto out_release_regions; + } + + /* initialize the mutual exclusion lock for the mailbox */ + spin_lock_init(&raid_dev->mailbox_lock); + + /* allocate memory required for commands */ + if (megaraid_alloc_cmd_packets(adapter) != 0) + goto out_iounmap; + + /* + * Issue SYNC cmd to flush the pending cmds in the adapter + * and initialize its internal state + */ + + if (megaraid_mbox_fire_sync_cmd(adapter)) + con_log(CL_ANN, ("megaraid: sync cmd failed\n")); + + /* + * Setup the rest of the soft state using the library of + * FW routines + */ + + /* request IRQ and register the interrupt service routine */ + if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid", + adapter)) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: Couldn't register IRQ %d!\n", adapter->irq)); + goto out_alloc_cmds; + + } + + // Product info + if (megaraid_mbox_product_info(adapter) != 0) + goto out_free_irq; + + // Do we support extended CDBs + adapter->max_cdb_sz = 10; + if (megaraid_mbox_extended_cdb(adapter) == 0) { + adapter->max_cdb_sz = 16; + } + + /* + * Do we support cluster environment, if we do, what is the initiator + * id. + * NOTE: In a non-cluster aware firmware environment, the LLD should + * return 7 as initiator id. + */ + adapter->ha = 0; + adapter->init_id = -1; + if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) { + adapter->ha = 1; + } + + /* + * Prepare the device ids array to have the mapping between the kernel + * device address and megaraid device address. + * We export the physical devices on their actual addresses. The + * logical drives are exported on a virtual SCSI channel + */ + megaraid_mbox_setup_device_map(adapter); + + // If the firmware supports random deletion, update the device id map + if (megaraid_mbox_support_random_del(adapter)) { + + // Change the logical drives numbers in device_ids array one + // slot in device_ids is reserved for target id, that's why + // "<=" below + for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) { + adapter->device_ids[adapter->max_channel][i] += 0x80; + } + adapter->device_ids[adapter->max_channel][adapter->init_id] = + 0xFF; + + raid_dev->random_del_supported = 1; + } + + /* + * find out the maximum number of scatter-gather elements supported by + * this firmware + */ + adapter->sglen = megaraid_mbox_get_max_sg(adapter); + + // enumerate RAID and SCSI channels so that all devices on SCSI + // channels can later be exported, including disk devices + megaraid_mbox_enum_raid_scsi(adapter); + + /* + * Other parameters required by upper layer + * + * maximum number of sectors per IO command + */ + adapter->max_sectors = megaraid_max_sectors; + + /* + * number of queued commands per LUN. + */ + adapter->cmd_per_lun = megaraid_cmd_per_lun; + + /* + * Allocate resources required to issue FW calls, when sysfs is + * accessed + */ + if (megaraid_sysfs_alloc_resources(adapter) != 0) + goto out_free_irq; + + // Set the DMA mask to 64-bit. All supported controllers as capable of + // DMA in this range + pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); + + if (((magic64 == HBA_SIGNATURE_64_BIT) && + ((adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_6) && + (adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_VERDE) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { + if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: DMA mask for 64-bit failed\n")); + + if (dma_set_mask(&adapter->pdev->dev, + DMA_BIT_MASK(32))) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: 32-bit DMA mask failed\n")); + goto out_free_sysfs_res; + } + } + } + + // setup tasklet for DPC + tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc, + (unsigned long)adapter); + + con_log(CL_DLEVEL1, (KERN_INFO + "megaraid mbox hba successfully initialized\n")); + + return 0; + +out_free_sysfs_res: + megaraid_sysfs_free_resources(adapter); +out_free_irq: + free_irq(adapter->irq, adapter); +out_alloc_cmds: + megaraid_free_cmd_packets(adapter); +out_iounmap: + iounmap(raid_dev->baseaddr); +out_release_regions: + pci_release_regions(pdev); +out_free_raid_dev: + kfree(raid_dev); + + return -1; +} + + +/** + * megaraid_fini_mbox - undo controller initialization + * @adapter : our soft state + */ +static void +megaraid_fini_mbox(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + // flush all caches + megaraid_mbox_flush_cache(adapter); + + tasklet_kill(&adapter->dpc_h); + + megaraid_sysfs_free_resources(adapter); + + megaraid_free_cmd_packets(adapter); + + free_irq(adapter->irq, adapter); + + iounmap(raid_dev->baseaddr); + + pci_release_regions(adapter->pdev); + + kfree(raid_dev); + + return; +} + + +/** + * megaraid_alloc_cmd_packets - allocate shared mailbox + * @adapter : soft state of the raid controller + * + * Allocate and align the shared mailbox. This mailbox is used to issue + * all the commands. For IO based controllers, the mailbox is also registered + * with the FW. Allocate memory for all commands as well. + * This is our big allocator. + */ +static int +megaraid_alloc_cmd_packets(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + struct pci_dev *pdev; + unsigned long align; + scb_t *scb; + mbox_ccb_t *ccb; + struct mraid_pci_blk *epthru_pci_blk; + struct mraid_pci_blk *sg_pci_blk; + struct mraid_pci_blk *mbox_pci_blk; + int i; + + pdev = adapter->pdev; + + /* + * Setup the mailbox + * Allocate the common 16-byte aligned memory for the handshake + * mailbox. + */ + raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(mbox64_t), + &raid_dev->una_mbox64_dma, + GFP_KERNEL); + + if (!raid_dev->una_mbox64) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __func__, + __LINE__)); + return -1; + } + + /* + * Align the mailbox at 16-byte boundary + */ + raid_dev->mbox = &raid_dev->una_mbox64->mbox32; + + raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) & + (~0UL ^ 0xFUL)); + + raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8); + + align = ((void *)raid_dev->mbox - + ((void *)&raid_dev->una_mbox64->mbox32)); + + raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 + + align; + + // Allocate memory for commands issued internally + adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, + &adapter->ibuf_dma_h, GFP_KERNEL); + if (!adapter->ibuf) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __func__, + __LINE__)); + + goto out_free_common_mbox; + } + + // Allocate memory for our SCSI Command Blocks and their associated + // memory + + /* + * Allocate memory for the base list of scb. Later allocate memory for + * CCBs and embedded components of each CCB and point the pointers in + * scb to the allocated components + * NOTE: The code to allocate SCB will be duplicated in all the LLD + * since the calling routine does not yet know the number of available + * commands. + */ + adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL); + + if (adapter->kscb_list == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __func__, + __LINE__)); + goto out_free_ibuf; + } + + // memory allocation for our command packets + if (megaraid_mbox_setup_dma_pools(adapter) != 0) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __func__, + __LINE__)); + goto out_free_scb_list; + } + + // Adjust the scb pointers and link in the free pool + epthru_pci_blk = raid_dev->epthru_pool; + sg_pci_blk = raid_dev->sg_pool; + mbox_pci_blk = raid_dev->mbox_pool; + + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + scb = adapter->kscb_list + i; + ccb = raid_dev->ccb_list + i; + + ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16); + ccb->raw_mbox = (uint8_t *)ccb->mbox; + ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8); + ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16; + + // make sure the mailbox is aligned properly + if (ccb->mbox_dma_h & 0x0F) { + con_log(CL_ANN, (KERN_CRIT + "megaraid mbox: not aligned on 16-bytes\n")); + + goto out_teardown_dma_pools; + } + + ccb->epthru = (mraid_epassthru_t *) + epthru_pci_blk[i].vaddr; + ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr; + ccb->pthru = (mraid_passthru_t *)ccb->epthru; + ccb->pthru_dma_h = ccb->epthru_dma_h; + + + ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr; + ccb->sgl_dma_h = sg_pci_blk[i].dma_addr; + ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64; + + scb->ccb = (caddr_t)ccb; + scb->gp = 0; + + scb->sno = i; // command index + + scb->scp = NULL; + scb->state = SCB_FREE; + scb->dma_direction = DMA_NONE; + scb->dma_type = MRAID_DMA_NONE; + scb->dev_channel = -1; + scb->dev_target = -1; + + // put scb in the free pool + list_add_tail(&scb->list, &adapter->kscb_pool); + } + + return 0; + +out_teardown_dma_pools: + megaraid_mbox_teardown_dma_pools(adapter); +out_free_scb_list: + kfree(adapter->kscb_list); +out_free_ibuf: + dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, + adapter->ibuf_dma_h); +out_free_common_mbox: + dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t), + (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); + + return -1; +} + + +/** + * megaraid_free_cmd_packets - free memory + * @adapter : soft state of the raid controller + * + * Release memory resources allocated for commands. + */ +static void +megaraid_free_cmd_packets(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + megaraid_mbox_teardown_dma_pools(adapter); + + kfree(adapter->kscb_list); + + dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE, + (void *)adapter->ibuf, adapter->ibuf_dma_h); + + dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t), + (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); + return; +} + + +/** + * megaraid_mbox_setup_dma_pools - setup dma pool for command packets + * @adapter : HBA soft state + * + * Setup the dma pools for mailbox, passthru and extended passthru structures, + * and scatter-gather lists. + */ +static int +megaraid_mbox_setup_dma_pools(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + struct mraid_pci_blk *epthru_pci_blk; + struct mraid_pci_blk *sg_pci_blk; + struct mraid_pci_blk *mbox_pci_blk; + int i; + + + + // Allocate memory for 16-bytes aligned mailboxes + raid_dev->mbox_pool_handle = dma_pool_create("megaraid mbox pool", + &adapter->pdev->dev, + sizeof(mbox64_t) + 16, + 16, 0); + + if (raid_dev->mbox_pool_handle == NULL) { + goto fail_setup_dma_pool; + } + + mbox_pci_blk = raid_dev->mbox_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + mbox_pci_blk[i].vaddr = dma_pool_alloc( + raid_dev->mbox_pool_handle, + GFP_KERNEL, + &mbox_pci_blk[i].dma_addr); + if (!mbox_pci_blk[i].vaddr) { + goto fail_setup_dma_pool; + } + } + + /* + * Allocate memory for each embedded passthru strucuture pointer + * Request for a 128 bytes aligned structure for each passthru command + * structure + * Since passthru and extended passthru commands are exclusive, they + * share common memory pool. Passthru structures piggyback on memory + * allocated to extended passthru since passthru is smaller of the two + */ + raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru", + &adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0); + + if (raid_dev->epthru_pool_handle == NULL) { + goto fail_setup_dma_pool; + } + + epthru_pci_blk = raid_dev->epthru_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + epthru_pci_blk[i].vaddr = dma_pool_alloc( + raid_dev->epthru_pool_handle, + GFP_KERNEL, + &epthru_pci_blk[i].dma_addr); + if (!epthru_pci_blk[i].vaddr) { + goto fail_setup_dma_pool; + } + } + + + // Allocate memory for each scatter-gather list. Request for 512 bytes + // alignment for each sg list + raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg", + &adapter->pdev->dev, + sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE, + 512, 0); + + if (raid_dev->sg_pool_handle == NULL) { + goto fail_setup_dma_pool; + } + + sg_pci_blk = raid_dev->sg_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + sg_pci_blk[i].vaddr = dma_pool_alloc( + raid_dev->sg_pool_handle, + GFP_KERNEL, + &sg_pci_blk[i].dma_addr); + if (!sg_pci_blk[i].vaddr) { + goto fail_setup_dma_pool; + } + } + + return 0; + +fail_setup_dma_pool: + megaraid_mbox_teardown_dma_pools(adapter); + return -1; +} + + +/** + * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets + * @adapter : HBA soft state + * + * Teardown the dma pool for mailbox, passthru and extended passthru + * structures, and scatter-gather lists. + */ +static void +megaraid_mbox_teardown_dma_pools(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + struct mraid_pci_blk *epthru_pci_blk; + struct mraid_pci_blk *sg_pci_blk; + struct mraid_pci_blk *mbox_pci_blk; + int i; + + + sg_pci_blk = raid_dev->sg_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { + dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, + sg_pci_blk[i].dma_addr); + } + dma_pool_destroy(raid_dev->sg_pool_handle); + + + epthru_pci_blk = raid_dev->epthru_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { + dma_pool_free(raid_dev->epthru_pool_handle, + epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); + } + dma_pool_destroy(raid_dev->epthru_pool_handle); + + + mbox_pci_blk = raid_dev->mbox_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { + dma_pool_free(raid_dev->mbox_pool_handle, + mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); + } + dma_pool_destroy(raid_dev->mbox_pool_handle); + + return; +} + + +/** + * megaraid_alloc_scb - detach and return a scb from the free list + * @adapter : controller's soft state + * @scp : pointer to the scsi command to be executed + * + * Return the scb from the head of the free list. %NULL if there are none + * available. + */ +static scb_t * +megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) +{ + struct list_head *head = &adapter->kscb_pool; + scb_t *scb = NULL; + unsigned long flags; + + // detach scb from free pool + spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); + + if (list_empty(head)) { + spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); + return NULL; + } + + scb = list_entry(head->next, scb_t, list); + list_del_init(&scb->list); + + spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); + + scb->state = SCB_ACTIVE; + scb->scp = scp; + scb->dma_type = MRAID_DMA_NONE; + + return scb; +} + + +/** + * megaraid_dealloc_scb - return the scb to the free pool + * @adapter : controller's soft state + * @scb : scb to be freed + * + * Return the scb back to the free list of scbs. The caller must 'flush' the + * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. + * NOTE NOTE: Make sure the scb is not on any list before calling this + * routine. + */ +static inline void +megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) +{ + unsigned long flags; + + // put scb in the free pool + scb->state = SCB_FREE; + scb->scp = NULL; + spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); + + list_add(&scb->list, &adapter->kscb_pool); + + spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); + + return; +} + + +/** + * megaraid_mbox_mksgl - make the scatter-gather list + * @adapter : controller's soft state + * @scb : scsi control block + * + * Prepare the scatter-gather list. + */ +static int +megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) +{ + struct scatterlist *sgl; + mbox_ccb_t *ccb; + struct scsi_cmnd *scp; + int sgcnt; + int i; + + + scp = scb->scp; + ccb = (mbox_ccb_t *)scb->ccb; + + sgcnt = scsi_dma_map(scp); + BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen); + + // no mapping required if no data to be transferred + if (!sgcnt) + return 0; + + scb->dma_type = MRAID_DMA_WSG; + + scsi_for_each_sg(scp, sgl, sgcnt, i) { + ccb->sgl64[i].address = sg_dma_address(sgl); + ccb->sgl64[i].length = sg_dma_len(sgl); + } + + // Return count of SG nodes + return sgcnt; +} + + +/** + * mbox_post_cmd - issue a mailbox command + * @adapter : controller's soft state + * @scb : command to be issued + * + * Post the command to the controller if mailbox is available. + */ +static int +mbox_post_cmd(adapter_t *adapter, scb_t *scb) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox64_t *mbox64; + mbox_t *mbox; + mbox_ccb_t *ccb; + unsigned long flags; + unsigned int i = 0; + + + ccb = (mbox_ccb_t *)scb->ccb; + mbox = raid_dev->mbox; + mbox64 = raid_dev->mbox64; + + /* + * Check for busy mailbox. If it is, return failure - the caller + * should retry later. + */ + spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); + + if (unlikely(mbox->busy)) { + do { + udelay(1); + i++; + rmb(); + } while(mbox->busy && (i < max_mbox_busy_wait)); + + if (mbox->busy) { + + spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); + + return -1; + } + } + + + // Copy this command's mailbox data into "adapter's" mailbox + memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22); + mbox->cmdid = scb->sno; + + adapter->outstanding_cmds++; + + mbox->busy = 1; // Set busy + mbox->poll = 0; + mbox->ack = 0; + wmb(); + + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); + + return 0; +} + + +/** + * megaraid_queue_command_lck - generic queue entry point for all LLDs + * @scp : pointer to the scsi command to be executed + * + * Queue entry point for mailbox based controllers. + */ +static int megaraid_queue_command_lck(struct scsi_cmnd *scp) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + adapter_t *adapter; + scb_t *scb; + int if_busy; + + adapter = SCP2ADAPTER(scp); + scp->result = 0; + + /* + * Allocate and build a SCB request + * if_busy flag will be set if megaraid_mbox_build_cmd() command could + * not allocate scb. We will return non-zero status in that case. + * NOTE: scb can be null even though certain commands completed + * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would + * return 0 in that case, and we would do the callback right away. + */ + if_busy = 0; + scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy); + if (!scb) { // command already completed + done(scp); + return 0; + } + + megaraid_mbox_runpendq(adapter, scb); + return if_busy; +} + +static DEF_SCSI_QCMD(megaraid_queue_command) + +/** + * megaraid_mbox_build_cmd - transform the mid-layer scsi commands + * @adapter : controller's soft state + * @scp : mid-layer scsi command pointer + * @busy : set if request could not be completed because of lack of + * resources + * + * Transform the mid-layer scsi command to megaraid firmware lingua. + * Convert the command issued by mid-layer to format understood by megaraid + * firmware. We also complete certain commands without sending them to firmware. + */ +static scb_t * +megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) +{ + mraid_device_t *rdev = ADAP2RAIDDEV(adapter); + int channel; + int target; + int islogical; + mbox_ccb_t *ccb; + mraid_passthru_t *pthru; + mbox64_t *mbox64; + mbox_t *mbox; + scb_t *scb; + char skip[] = "skipping"; + char scan[] = "scanning"; + char *ss; + + + /* + * Get the appropriate device map for the device this command is + * intended for + */ + MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical); + + /* + * Logical drive commands + */ + if (islogical) { + switch (scp->cmnd[0]) { + case TEST_UNIT_READY: + /* + * Do we support clustering and is the support enabled + * If no, return success always + */ + if (!adapter->ha) { + scp->result = (DID_OK << 16); + return NULL; + } + + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + scb->dma_direction = scp->sc_data_direction; + scb->dev_channel = 0xFF; + scb->dev_target = target; + ccb = (mbox_ccb_t *)scb->ccb; + + /* + * The command id will be provided by the command + * issuance routine + */ + ccb->raw_mbox[0] = CLUSTER_CMD; + ccb->raw_mbox[2] = RESERVATION_STATUS; + ccb->raw_mbox[3] = target; + + return scb; + + case MODE_SENSE: + { + struct scatterlist *sgl; + caddr_t vaddr; + + sgl = scsi_sglist(scp); + if (sg_page(sgl)) { + vaddr = (caddr_t) sg_virt(&sgl[0]); + + memset(vaddr, 0, scp->cmnd[4]); + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: invalid sg:%d\n", + __LINE__)); + } + } + scp->result = (DID_OK << 16); + return NULL; + + case INQUIRY: + /* + * Display the channel scan for logical drives + * Do not display scan for a channel if already done. + */ + if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { + + con_log(CL_ANN, (KERN_INFO + "scsi[%d]: scanning scsi channel %d", + adapter->host->host_no, + SCP2CHANNEL(scp))); + + con_log(CL_ANN, ( + " [virtual] for logical drives\n")); + + rdev->last_disp |= (1L << SCP2CHANNEL(scp)); + } + + if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { + scsi_build_sense(scp, 0, ILLEGAL_REQUEST, + MEGA_INVALID_FIELD_IN_CDB, 0); + return NULL; + } + + fallthrough; + + case READ_CAPACITY: + /* + * Do not allow LUN > 0 for logical drives and + * requests for more than 40 logical drives + */ + if (SCP2LUN(scp)) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + + /* Allocate a SCB and initialize passthru */ + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = 0xFF; + scb->dev_target = target; + pthru = ccb->pthru; + mbox = ccb->mbox; + mbox64 = ccb->mbox64; + + pthru->timeout = 0; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 1; + pthru->logdrv = target; + pthru->cdblen = scp->cmd_len; + memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + + mbox->cmd = MBOXCMD_PASSTHRU64; + scb->dma_direction = scp->sc_data_direction; + + pthru->dataxferlen = scsi_bufflen(scp); + pthru->dataxferaddr = ccb->sgl_dma_h; + pthru->numsge = megaraid_mbox_mksgl(adapter, + scb); + + mbox->xferaddr = 0xFFFFFFFF; + mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h; + mbox64->xferaddr_hi = 0; + + return scb; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + + /* + * Allocate a SCB and initialize mailbox + */ + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = 0xFF; + scb->dev_target = target; + mbox = ccb->mbox; + mbox64 = ccb->mbox64; + mbox->logdrv = target; + + /* + * A little HACK: 2nd bit is zero for all scsi read + * commands and is set for all scsi write commands + */ + mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64: + MBOXCMD_LREAD64 ; + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if (scp->cmd_len == 6) { + mbox->numsectors = (uint32_t)scp->cmnd[4]; + mbox->lba = + ((uint32_t)scp->cmnd[1] << 16) | + ((uint32_t)scp->cmnd[2] << 8) | + (uint32_t)scp->cmnd[3]; + + mbox->lba &= 0x1FFFFF; + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + else if (scp->cmd_len == 10) { + mbox->numsectors = + (uint32_t)scp->cmnd[8] | + ((uint32_t)scp->cmnd[7] << 8); + mbox->lba = + ((uint32_t)scp->cmnd[2] << 24) | + ((uint32_t)scp->cmnd[3] << 16) | + ((uint32_t)scp->cmnd[4] << 8) | + (uint32_t)scp->cmnd[5]; + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + else if (scp->cmd_len == 12) { + mbox->lba = + ((uint32_t)scp->cmnd[2] << 24) | + ((uint32_t)scp->cmnd[3] << 16) | + ((uint32_t)scp->cmnd[4] << 8) | + (uint32_t)scp->cmnd[5]; + + mbox->numsectors = + ((uint32_t)scp->cmnd[6] << 24) | + ((uint32_t)scp->cmnd[7] << 16) | + ((uint32_t)scp->cmnd[8] << 8) | + (uint32_t)scp->cmnd[9]; + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid: unsupported CDB length\n")); + + megaraid_dealloc_scb(adapter, scb); + + scp->result = (DID_ERROR << 16); + return NULL; + } + + scb->dma_direction = scp->sc_data_direction; + + // Calculate Scatter-Gather info + mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h; + mbox->numsge = megaraid_mbox_mksgl(adapter, + scb); + mbox->xferaddr = 0xFFFFFFFF; + mbox64->xferaddr_hi = 0; + + return scb; + + case RESERVE: + case RELEASE: + /* + * Do we support clustering and is the support enabled + */ + if (!adapter->ha) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + /* + * Allocate a SCB and initialize mailbox + */ + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = 0xFF; + scb->dev_target = target; + ccb->raw_mbox[0] = CLUSTER_CMD; + ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ? + RESERVE_LD : RELEASE_LD; + + ccb->raw_mbox[3] = target; + scb->dma_direction = scp->sc_data_direction; + + return scb; + + default: + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + } + else { // Passthru device commands + + // Do not allow access to target id > 15 or LUN > 7 + if (target > 15 || SCP2LUN(scp) > 7) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + // if fast load option was set and scan for last device is + // over, reset the fast_load flag so that during a possible + // next scan, devices can be made available + if (rdev->fast_load && (target == 15) && + (SCP2CHANNEL(scp) == adapter->max_channel -1)) { + + con_log(CL_ANN, (KERN_INFO + "megaraid[%d]: physical device scan re-enabled\n", + adapter->host->host_no)); + rdev->fast_load = 0; + } + + /* + * Display the channel scan for physical devices + */ + if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { + + ss = rdev->fast_load ? skip : scan; + + con_log(CL_ANN, (KERN_INFO + "scsi[%d]: %s scsi channel %d [Phy %d]", + adapter->host->host_no, ss, SCP2CHANNEL(scp), + channel)); + + con_log(CL_ANN, ( + " for non-raid devices\n")); + + rdev->last_disp |= (1L << SCP2CHANNEL(scp)); + } + + // disable channel sweep if fast load option given + if (rdev->fast_load) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + // Allocate a SCB and initialize passthru + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = channel; + scb->dev_target = target; + scb->dma_direction = scp->sc_data_direction; + mbox = ccb->mbox; + mbox64 = ccb->mbox64; + + // Does this firmware support extended CDBs + if (adapter->max_cdb_sz == 16) { + mbox->cmd = MBOXCMD_EXTPTHRU; + + megaraid_mbox_prepare_epthru(adapter, scb, scp); + + mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h; + mbox64->xferaddr_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + } + else { + mbox->cmd = MBOXCMD_PASSTHRU64; + + megaraid_mbox_prepare_pthru(adapter, scb, scp); + + mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h; + mbox64->xferaddr_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + } + return scb; + } + + // NOT REACHED +} + + +/** + * megaraid_mbox_runpendq - execute commands queued in the pending queue + * @adapter : controller's soft state + * @scb_q : SCB to be queued in the pending list + * + * Scan the pending list for commands which are not yet issued and try to + * post to the controller. The SCB can be a null pointer, which would indicate + * no SCB to be queue, just try to execute the ones in the pending list. + * + * NOTE: We do not actually traverse the pending list. The SCBs are plucked + * out from the head of the pending list. If it is successfully issued, the + * next SCB is at the head now. + */ +static void +megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q) +{ + scb_t *scb; + unsigned long flags; + + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + + if (scb_q) { + scb_q->state = SCB_PENDQ; + list_add_tail(&scb_q->list, &adapter->pend_list); + } + + // if the adapter in not in quiescent mode, post the commands to FW + if (adapter->quiescent) { + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + return; + } + + while (!list_empty(&adapter->pend_list)) { + + assert_spin_locked(PENDING_LIST_LOCK(adapter)); + + scb = list_entry(adapter->pend_list.next, scb_t, list); + + // remove the scb from the pending list and try to + // issue. If we are unable to issue it, put back in + // the pending list and return + + list_del_init(&scb->list); + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + // if mailbox was busy, return SCB back to pending + // list. Make sure to add at the head, since that's + // where it would have been removed from + + scb->state = SCB_ISSUED; + + if (mbox_post_cmd(adapter, scb) != 0) { + + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + + scb->state = SCB_PENDQ; + + list_add(&scb->list, &adapter->pend_list); + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), + flags); + + return; + } + + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + } + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + + return; +} + + +/** + * megaraid_mbox_prepare_pthru - prepare a command for physical devices + * @adapter : pointer to controller's soft state + * @scb : scsi control block + * @scp : scsi command from the mid-layer + * + * Prepare a command for the scsi physical devices. + */ +static void +megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, + struct scsi_cmnd *scp) +{ + mbox_ccb_t *ccb; + mraid_passthru_t *pthru; + uint8_t channel; + uint8_t target; + + ccb = (mbox_ccb_t *)scb->ccb; + pthru = ccb->pthru; + channel = scb->dev_channel; + target = scb->dev_target; + + // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout + pthru->timeout = 4; + pthru->ars = 1; + pthru->islogical = 0; + pthru->channel = 0; + pthru->target = (channel << 4) | target; + pthru->logdrv = SCP2LUN(scp); + pthru->reqsenselen = 14; + pthru->cdblen = scp->cmd_len; + + memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + + if (scsi_bufflen(scp)) { + pthru->dataxferlen = scsi_bufflen(scp); + pthru->dataxferaddr = ccb->sgl_dma_h; + pthru->numsge = megaraid_mbox_mksgl(adapter, scb); + } + else { + pthru->dataxferaddr = 0; + pthru->dataxferlen = 0; + pthru->numsge = 0; + } + return; +} + + +/** + * megaraid_mbox_prepare_epthru - prepare a command for physical devices + * @adapter : pointer to controller's soft state + * @scb : scsi control block + * @scp : scsi command from the mid-layer + * + * Prepare a command for the scsi physical devices. This routine prepares + * commands for devices which can take extended CDBs (>10 bytes). + */ +static void +megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, + struct scsi_cmnd *scp) +{ + mbox_ccb_t *ccb; + mraid_epassthru_t *epthru; + uint8_t channel; + uint8_t target; + + ccb = (mbox_ccb_t *)scb->ccb; + epthru = ccb->epthru; + channel = scb->dev_channel; + target = scb->dev_target; + + // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout + epthru->timeout = 4; + epthru->ars = 1; + epthru->islogical = 0; + epthru->channel = 0; + epthru->target = (channel << 4) | target; + epthru->logdrv = SCP2LUN(scp); + epthru->reqsenselen = 14; + epthru->cdblen = scp->cmd_len; + + memcpy(epthru->cdb, scp->cmnd, scp->cmd_len); + + if (scsi_bufflen(scp)) { + epthru->dataxferlen = scsi_bufflen(scp); + epthru->dataxferaddr = ccb->sgl_dma_h; + epthru->numsge = megaraid_mbox_mksgl(adapter, scb); + } + else { + epthru->dataxferaddr = 0; + epthru->dataxferlen = 0; + epthru->numsge = 0; + } + return; +} + + +/** + * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs + * @adapter : controller's soft state + * + * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the + * completed command and put them on the completed list for later processing. + * + * Returns: 1 if the interrupt is valid, 0 otherwise + */ +static int +megaraid_ack_sequence(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + scb_t *scb; + uint8_t nstatus; + uint8_t completed[MBOX_MAX_FIRMWARE_STATUS]; + struct list_head clist; + int handled; + uint32_t dword; + unsigned long flags; + int i, j; + + + mbox = raid_dev->mbox; + + // move the SCBs from the firmware completed array to our local list + INIT_LIST_HEAD(&clist); + + // loop till F/W has more commands for us to complete + handled = 0; + spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); + do { + /* + * Check if a valid interrupt is pending. If found, force the + * interrupt line low. + */ + dword = RDOUTDOOR(raid_dev); + if (dword != 0x10001234) break; + + handled = 1; + + WROUTDOOR(raid_dev, 0x10001234); + + nstatus = 0; + // wait for valid numstatus to post + for (i = 0; i < 0xFFFFF; i++) { + if (mbox->numstatus != 0xFF) { + nstatus = mbox->numstatus; + break; + } + rmb(); + } + mbox->numstatus = 0xFF; + + adapter->outstanding_cmds -= nstatus; + + for (i = 0; i < nstatus; i++) { + + // wait for valid command index to post + for (j = 0; j < 0xFFFFF; j++) { + if (mbox->completed[i] != 0xFF) break; + rmb(); + } + completed[i] = mbox->completed[i]; + mbox->completed[i] = 0xFF; + + if (completed[i] == 0xFF) { + con_log(CL_ANN, (KERN_CRIT + "megaraid: command posting timed out\n")); + + BUG(); + continue; + } + + // Get SCB associated with this command id + if (completed[i] >= MBOX_MAX_SCSI_CMDS) { + // a cmm command + scb = adapter->uscb_list + (completed[i] - + MBOX_MAX_SCSI_CMDS); + } + else { + // an os command + scb = adapter->kscb_list + completed[i]; + } + + scb->status = mbox->status; + list_add_tail(&scb->list, &clist); + } + + // Acknowledge interrupt + WRINDOOR(raid_dev, 0x02); + + } while(1); + + spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); + + + // put the completed commands in the completed list. DPC would + // complete these commands later + spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); + + list_splice(&clist, &adapter->completed_list); + + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); + + + // schedule the DPC if there is some work for it + if (handled) + tasklet_schedule(&adapter->dpc_h); + + return handled; +} + + +/** + * megaraid_isr - isr for memory based mailbox based controllers + * @irq : irq + * @devp : pointer to our soft state + * + * Interrupt service routine for memory-mapped mailbox controllers. + */ +static irqreturn_t +megaraid_isr(int irq, void *devp) +{ + adapter_t *adapter = devp; + int handled; + + handled = megaraid_ack_sequence(adapter); + + /* Loop through any pending requests */ + if (!adapter->quiescent) { + megaraid_mbox_runpendq(adapter, NULL); + } + + return IRQ_RETVAL(handled); +} + + +/** + * megaraid_mbox_dpc - the tasklet to complete the commands from completed list + * @devp : pointer to HBA soft state + * + * Pick up the commands from the completed list and send back to the owners. + * This is a reentrant function and does not assume any locks are held while + * it is being called. + */ +static void +megaraid_mbox_dpc(unsigned long devp) +{ + adapter_t *adapter = (adapter_t *)devp; + mraid_device_t *raid_dev; + struct list_head clist; + struct scatterlist *sgl; + scb_t *scb; + scb_t *tmp; + struct scsi_cmnd *scp; + mraid_passthru_t *pthru; + mraid_epassthru_t *epthru; + mbox_ccb_t *ccb; + int islogical; + int pdev_index; + int pdev_state; + mbox_t *mbox; + unsigned long flags; + uint8_t c; + int status; + uioc_t *kioc; + + + if (!adapter) return; + + raid_dev = ADAP2RAIDDEV(adapter); + + // move the SCBs from the completed list to our local list + INIT_LIST_HEAD(&clist); + + spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); + + list_splice_init(&adapter->completed_list, &clist); + + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); + + + list_for_each_entry_safe(scb, tmp, &clist, list) { + + status = scb->status; + scp = scb->scp; + ccb = (mbox_ccb_t *)scb->ccb; + pthru = ccb->pthru; + epthru = ccb->epthru; + mbox = ccb->mbox; + + // Make sure f/w has completed a valid command + if (scb->state != SCB_ISSUED) { + con_log(CL_ANN, (KERN_CRIT + "megaraid critical err: invalid command %d:%d:%p\n", + scb->sno, scb->state, scp)); + BUG(); + continue; // Must never happen! + } + + // check for the management command and complete it right away + if (scb->sno >= MBOX_MAX_SCSI_CMDS) { + scb->state = SCB_FREE; + scb->status = status; + + // remove from local clist + list_del_init(&scb->list); + + kioc = (uioc_t *)scb->gp; + kioc->status = 0; + + megaraid_mbox_mm_done(adapter, scb); + + continue; + } + + // Was an abort issued for this command earlier + if (scb->state & SCB_ABORT) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: aborted cmd [%x] completed\n", + scb->sno)); + } + + /* + * If the inquiry came of a disk drive which is not part of + * any RAID array, expose it to the kernel. For this to be + * enabled, user must set the "megaraid_expose_unconf_disks" + * flag to 1 by specifying it on module parameter list. + * This would enable data migration off drives from other + * configurations. + */ + islogical = MRAID_IS_LOGICAL(adapter, scp); + if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 + && IS_RAID_CH(raid_dev, scb->dev_channel)) { + + sgl = scsi_sglist(scp); + if (sg_page(sgl)) { + c = *(unsigned char *) sg_virt(&sgl[0]); + } else { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: invalid sg:%d\n", + __LINE__)); + c = 0; + } + + if ((c & 0x1F ) == TYPE_DISK) { + pdev_index = (scb->dev_channel * 16) + + scb->dev_target; + pdev_state = + raid_dev->pdrv_state[pdev_index] & 0x0F; + + if (pdev_state == PDRV_ONLINE || + pdev_state == PDRV_FAILED || + pdev_state == PDRV_RBLD || + pdev_state == PDRV_HOTSPARE || + megaraid_expose_unconf_disks == 0) { + + status = 0xF0; + } + } + } + + // Convert MegaRAID status to Linux error code + switch (status) { + + case 0x00: + + scp->result = (DID_OK << 16); + break; + + case 0x02: + + /* set sense_buffer and result fields */ + if (mbox->cmd == MBOXCMD_PASSTHRU || + mbox->cmd == MBOXCMD_PASSTHRU64) { + + memcpy(scp->sense_buffer, pthru->reqsensearea, + 14); + + scp->result = SAM_STAT_CHECK_CONDITION; + } + else { + if (mbox->cmd == MBOXCMD_EXTPTHRU) { + + memcpy(scp->sense_buffer, + epthru->reqsensearea, 14); + + scp->result = SAM_STAT_CHECK_CONDITION; + } else + scsi_build_sense(scp, 0, + ABORTED_COMMAND, 0, 0); + } + break; + + case 0x08: + + scp->result = DID_BUS_BUSY << 16 | status; + break; + + default: + + /* + * If TEST_UNIT_READY fails, we know RESERVATION_STATUS + * failed + */ + if (scp->cmnd[0] == TEST_UNIT_READY) { + scp->result = DID_ERROR << 16 | + SAM_STAT_RESERVATION_CONFLICT; + } + else + /* + * Error code returned is 1 if Reserve or Release + * failed or the input parameter is invalid + */ + if (status == 1 && (scp->cmnd[0] == RESERVE || + scp->cmnd[0] == RELEASE)) { + + scp->result = DID_ERROR << 16 | + SAM_STAT_RESERVATION_CONFLICT; + } + else { + scp->result = DID_BAD_TARGET << 16 | status; + } + } + + // print a debug message for all failed commands + if (status) { + megaraid_mbox_display_scb(adapter, scb); + } + + scsi_dma_unmap(scp); + + // remove from local clist + list_del_init(&scb->list); + + // put back in free list + megaraid_dealloc_scb(adapter, scb); + + // send the scsi packet back to kernel + scsi_done(scp); + } + + return; +} + + +/** + * megaraid_abort_handler - abort the scsi command + * @scp : command to be aborted + * + * Abort a previous SCSI request. Only commands on the pending list can be + * aborted. All the commands issued to the F/W must complete. + **/ +static int +megaraid_abort_handler(struct scsi_cmnd *scp) +{ + adapter_t *adapter; + mraid_device_t *raid_dev; + scb_t *scb; + scb_t *tmp; + int found; + unsigned long flags; + int i; + + + adapter = SCP2ADAPTER(scp); + raid_dev = ADAP2RAIDDEV(adapter); + + con_log(CL_ANN, (KERN_WARNING + "megaraid: aborting cmd=%x \n", + scp->cmnd[0], SCP2CHANNEL(scp), + SCP2TARGET(scp), SCP2LUN(scp))); + + // If FW has stopped responding, simply return failure + if (raid_dev->hw_error) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: hw error, not aborting\n")); + return FAILED; + } + + // There might a race here, where the command was completed by the + // firmware and now it is on the completed list. Before we could + // complete the command to the kernel in dpc, the abort came. + // Find out if this is the case to avoid the race. + scb = NULL; + spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); + list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) { + + if (scb->scp == scp) { // Found command + + list_del_init(&scb->list); // from completed list + + con_log(CL_ANN, (KERN_WARNING + "megaraid: %d[%d:%d], abort from completed list\n", + scb->sno, scb->dev_channel, scb->dev_target)); + + scp->result = (DID_ABORT << 16); + scsi_done(scp); + + megaraid_dealloc_scb(adapter, scb); + + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), + flags); + + return SUCCESS; + } + } + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); + + + // Find out if this command is still on the pending list. If it is and + // was never issued, abort and return success. If the command is owned + // by the firmware, we must wait for it to complete by the FW. + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { + + if (scb->scp == scp) { // Found command + + list_del_init(&scb->list); // from pending list + + ASSERT(!(scb->state & SCB_ISSUED)); + + con_log(CL_ANN, (KERN_WARNING + "megaraid abort: [%d:%d], driver owner\n", + scb->dev_channel, scb->dev_target)); + + scp->result = (DID_ABORT << 16); + scsi_done(scp); + + megaraid_dealloc_scb(adapter, scb); + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), + flags); + + return SUCCESS; + } + } + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + + // Check do we even own this command, in which case this would be + // owned by the firmware. The only way to locate the FW scb is to + // traverse through the list of all SCB, since driver does not + // maintain these SCBs on any list + found = 0; + spin_lock_irq(&adapter->lock); + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + scb = adapter->kscb_list + i; + + if (scb->scp == scp) { + + found = 1; + + if (!(scb->state & SCB_ISSUED)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid abort: %d[%d:%d], invalid state\n", + scb->sno, scb->dev_channel, scb->dev_target)); + BUG(); + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid abort: %d[%d:%d], fw owner\n", + scb->sno, scb->dev_channel, scb->dev_target)); + } + } + } + spin_unlock_irq(&adapter->lock); + + if (!found) { + con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n")); + + // FIXME: Should there be a callback for this command? + return SUCCESS; + } + + // We cannot actually abort a command owned by firmware, return + // failure and wait for reset. In host reset handler, we will find out + // if the HBA is still live + return FAILED; +} + +/** + * megaraid_reset_handler - device reset handler for mailbox based driver + * @scp : reference command + * + * Reset handler for the mailbox based controller. First try to find out if + * the FW is still live, in which case the outstanding commands counter mut go + * down to 0. If that happens, also issue the reservation reset command to + * relinquish (possible) reservations on the logical drives connected to this + * host. + **/ +static int +megaraid_reset_handler(struct scsi_cmnd *scp) +{ + adapter_t *adapter; + scb_t *scb; + scb_t *tmp; + mraid_device_t *raid_dev; + unsigned long flags; + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + int recovery_window; + int i; + uioc_t *kioc; + + adapter = SCP2ADAPTER(scp); + raid_dev = ADAP2RAIDDEV(adapter); + + // return failure if adapter is not responding + if (raid_dev->hw_error) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: hw error, cannot reset\n")); + return FAILED; + } + + // Under exceptional conditions, FW can take up to 3 minutes to + // complete command processing. Wait for additional 2 minutes for the + // pending commands counter to go down to 0. If it doesn't, let the + // controller be marked offline + // Also, reset all the commands currently owned by the driver + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { + list_del_init(&scb->list); // from pending list + + if (scb->sno >= MBOX_MAX_SCSI_CMDS) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: IOCTL packet with %d[%d:%d] being reset\n", + scb->sno, scb->dev_channel, scb->dev_target)); + + scb->status = -1; + + kioc = (uioc_t *)scb->gp; + kioc->status = -EFAULT; + + megaraid_mbox_mm_done(adapter, scb); + } else { + if (scb->scp == scp) { // Found command + con_log(CL_ANN, (KERN_WARNING + "megaraid: %d[%d:%d], reset from pending list\n", + scb->sno, scb->dev_channel, scb->dev_target)); + } else { + con_log(CL_ANN, (KERN_WARNING + "megaraid: IO packet with %d[%d:%d] being reset\n", + scb->sno, scb->dev_channel, scb->dev_target)); + } + + scb->scp->result = (DID_RESET << 16); + scsi_done(scb->scp); + + megaraid_dealloc_scb(adapter, scb); + } + } + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + if (adapter->outstanding_cmds) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: %d outstanding commands. Max wait %d sec\n", + adapter->outstanding_cmds, + (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT))); + } + + recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; + + for (i = 0; i < recovery_window; i++) { + + megaraid_ack_sequence(adapter); + + // print a message once every 5 seconds only + if (!(i % 5)) { + con_log(CL_ANN, ( + "megaraid mbox: Wait for %d commands to complete:%d\n", + adapter->outstanding_cmds, + (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i)); + } + + // bailout if no recovery happened in reset time + if (adapter->outstanding_cmds == 0) { + break; + } + + msleep(1000); + } + + spin_lock(&adapter->lock); + + // If still outstanding commands, bail out + if (adapter->outstanding_cmds) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: critical hardware error!\n")); + + raid_dev->hw_error = 1; + + rval = FAILED; + goto out; + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid mbox: reset sequence completed successfully\n")); + } + + + // If the controller supports clustering, reset reservations + if (!adapter->ha) { + rval = SUCCESS; + goto out; + } + + // clear reservations if any + raw_mbox[0] = CLUSTER_CMD; + raw_mbox[2] = RESET_RESERVATIONS; + + rval = SUCCESS; + if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) { + con_log(CL_ANN, + (KERN_INFO "megaraid: reservation reset\n")); + } + else { + rval = FAILED; + con_log(CL_ANN, (KERN_WARNING + "megaraid: reservation reset failed\n")); + } + + out: + spin_unlock(&adapter->lock); + return rval; +} + +/* + * START: internal commands library + * + * This section of the driver has the common routine used by the driver and + * also has all the FW routines + */ + +/** + * mbox_post_sync_cmd() - blocking command to the mailbox based controllers + * @adapter : controller's soft state + * @raw_mbox : the mailbox + * + * Issue a scb in synchronous and non-interrupt mode for mailbox based + * controllers. + */ +static int +mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + uint8_t status; + int i; + + mbox = raid_dev->mbox; + + /* + * Wait until mailbox is free + */ + if (megaraid_busywait_mbox(raid_dev) != 0) + goto blocked_mailbox; + + /* + * Copy mailbox data into host structure + */ + memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); + mbox->cmdid = 0xFE; + mbox->busy = 1; + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + + wmb(); + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + // wait for maximum 1 second for status to post. If the status is not + // available within 1 second, assume FW is initializing and wait + // for an extended amount of time + if (mbox->numstatus == 0xFF) { // status not yet available + udelay(25); + + for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) { + rmb(); + msleep(1); + } + + + if (i == 1000) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid mailbox: wait for FW to boot ")); + + for (i = 0; (mbox->numstatus == 0xFF) && + (i < MBOX_RESET_WAIT); i++) { + rmb(); + con_log(CL_ANN, ("\b\b\b\b\b[%03d]", + MBOX_RESET_WAIT - i)); + msleep(1000); + } + + if (i == MBOX_RESET_WAIT) { + + con_log(CL_ANN, ( + "\nmegaraid mailbox: status not available\n")); + + return -1; + } + con_log(CL_ANN, ("\b\b\b\b\b[ok] \n")); + } + } + + // wait for maximum 1 second for poll semaphore + if (mbox->poll != 0x77) { + udelay(25); + + for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) { + rmb(); + msleep(1); + } + + if (i == 1000) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: could not get poll semaphore\n")); + return -1; + } + } + + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); + wmb(); + + // wait for maximum 1 second for acknowledgement + if (RDINDOOR(raid_dev) & 0x2) { + udelay(25); + + for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) { + rmb(); + msleep(1); + } + + if (i == 1000) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: could not acknowledge\n")); + return -1; + } + } + mbox->poll = 0; + mbox->ack = 0x77; + + status = mbox->status; + + // invalidate the completed command id array. After command + // completion, firmware would write the valid id. + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) { + mbox->completed[i] = 0xFF; + } + + return status; + +blocked_mailbox: + + con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") ); + return -1; +} + + +/** + * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers + * @adapter : controller's soft state + * @raw_mbox : the mailbox + * + * Issue a scb in synchronous and non-interrupt mode for mailbox based + * controllers. This is a faster version of the synchronous command and + * therefore can be called in interrupt-context as well. + */ +static int +mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + long i; + + + mbox = raid_dev->mbox; + + // return immediately if the mailbox is busy + if (mbox->busy) return -1; + + // Copy mailbox data into host structure + memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14); + mbox->cmdid = 0xFE; + mbox->busy = 1; + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + + wmb(); + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) { + if (mbox->numstatus != 0xFF) break; + rmb(); + udelay(MBOX_SYNC_DELAY_200); + } + + if (i == MBOX_SYNC_WAIT_CNT) { + // We may need to re-calibrate the counter + con_log(CL_ANN, (KERN_CRIT + "megaraid: fast sync command timed out\n")); + } + + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); + wmb(); + + return mbox->status; +} + + +/** + * megaraid_busywait_mbox() - Wait until the controller's mailbox is available + * @raid_dev : RAID device (HBA) soft state + * + * Wait until the controller's mailbox is available to accept more commands. + * Wait for at most 1 second. + */ +static int +megaraid_busywait_mbox(mraid_device_t *raid_dev) +{ + mbox_t *mbox = raid_dev->mbox; + int i = 0; + + if (mbox->busy) { + udelay(25); + for (i = 0; mbox->busy && i < 1000; i++) + msleep(1); + } + + if (i < 1000) return 0; + else return -1; +} + + +/** + * megaraid_mbox_product_info - some static information about the controller + * @adapter : our soft state + * + * Issue commands to the controller to grab some parameters required by our + * caller. + */ +static int +megaraid_mbox_product_info(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + mraid_pinfo_t *pinfo; + dma_addr_t pinfo_dma_h; + mraid_inquiry3_t *mraid_inq3; + int i; + + + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + mbox = (mbox_t *)raw_mbox; + + /* + * Issue an ENQUIRY3 command to find out certain adapter parameters, + * e.g., max channels, max commands etc. + */ + pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), + &pinfo_dma_h, GFP_KERNEL); + if (pinfo == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __func__, + __LINE__)); + + return -1; + } + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = FC_NEW_CONFIG; + raw_mbox[2] = NC_SUBOP_ENQUIRY3; + raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; + + // Issue the command + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + + con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n")); + + dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), + pinfo, pinfo_dma_h); + + return -1; + } + + /* + * Collect information about state of each physical drive + * attached to the controller. We will expose all the disks + * which are not part of RAID + */ + mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf; + for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) { + raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i]; + } + + /* + * Get product info for information like number of channels, + * maximum commands supported. + */ + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + mbox->xferaddr = (uint32_t)pinfo_dma_h; + + raw_mbox[0] = FC_NEW_CONFIG; + raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; + + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: product info failed\n")); + + dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), + pinfo, pinfo_dma_h); + + return -1; + } + + /* + * Setup some parameters for host, as required by our caller + */ + adapter->max_channel = pinfo->nchannels; + + /* + * we will export all the logical drives on a single channel. + * Add 1 since inquires do not come for inititor ID + */ + adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1; + adapter->max_lun = 8; // up to 8 LUNs for non-disk devices + + /* + * These are the maximum outstanding commands for the scsi-layer + */ + adapter->max_cmds = MBOX_MAX_SCSI_CMDS; + + memset(adapter->fw_version, 0, VERSION_SIZE); + memset(adapter->bios_version, 0, VERSION_SIZE); + + memcpy(adapter->fw_version, pinfo->fw_version, 4); + adapter->fw_version[4] = 0; + + memcpy(adapter->bios_version, pinfo->bios_version, 4); + adapter->bios_version[4] = 0; + + con_log(CL_ANN, (KERN_NOTICE + "megaraid: fw version:[%s] bios version:[%s]\n", + adapter->fw_version, adapter->bios_version)); + + dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo, + pinfo_dma_h); + + return 0; +} + + + +/** + * megaraid_mbox_extended_cdb - check for support for extended CDBs + * @adapter : soft state for the controller + * + * This routine check whether the controller in question supports extended + * ( > 10 bytes ) CDBs. + */ +static int +megaraid_mbox_extended_cdb(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = MAIN_MISC_OPCODE; + raw_mbox[2] = SUPPORT_EXT_CDB; + + /* + * Issue the command + */ + rval = 0; + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + rval = -1; + } + + return rval; +} + + +/** + * megaraid_mbox_support_ha - Do we support clustering + * @adapter : soft state for the controller + * @init_id : ID of the initiator + * + * Determine if the firmware supports clustering and the ID of the initiator. + */ +static int +megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = GET_TARGET_ID; + + // Issue the command + *init_id = 7; + rval = -1; + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + + *init_id = *(uint8_t *)adapter->ibuf; + + con_log(CL_ANN, (KERN_INFO + "megaraid: cluster firmware, initiator ID: %d\n", + *init_id)); + + rval = 0; + } + + return rval; +} + + +/** + * megaraid_mbox_support_random_del - Do we support random deletion + * @adapter : soft state for the controller + * + * Determine if the firmware supports random deletion. + * Return: 1 is operation supported, 0 otherwise + */ +static int +megaraid_mbox_support_random_del(adapter_t *adapter) +{ + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + + /* + * Newer firmware on Dell CERC expect a different + * random deletion handling, so disable it. + */ + if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI && + adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 && + adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH && + (adapter->fw_version[0] > '6' || + (adapter->fw_version[0] == '6' && + adapter->fw_version[2] > '6') || + (adapter->fw_version[0] == '6' + && adapter->fw_version[2] == '6' + && adapter->fw_version[3] > '1'))) { + con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n")); + return 0; + } + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_SUP_DEL_LOGDRV; + + // Issue the command + rval = 0; + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + + con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n")); + + rval = 1; + } + + return rval; +} + + +/** + * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware + * @adapter : soft state for the controller + * + * Find out the maximum number of scatter-gather elements supported by the + * firmware. + */ +static int +megaraid_mbox_get_max_sg(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + int nsg; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = MAIN_MISC_OPCODE; + raw_mbox[2] = GET_MAX_SG_SUPPORT; + + // Issue the command + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + nsg = *(uint8_t *)adapter->ibuf; + } + else { + nsg = MBOX_DEFAULT_SG_SIZE; + } + + if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE; + + return nsg; +} + + +/** + * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels + * @adapter : soft state for the controller + * + * Enumerate the RAID and SCSI channels for ROMB platforms so that channels + * can be exported as regular SCSI channels. + */ +static void +megaraid_mbox_enum_raid_scsi(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = CHNL_CLASS; + raw_mbox[2] = GET_CHNL_CLASS; + + // Issue the command. If the command fails, all channels are RAID + // channels + raid_dev->channel_class = 0xFF; + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + raid_dev->channel_class = *(uint8_t *)adapter->ibuf; + } + + return; +} + + +/** + * megaraid_mbox_flush_cache - flush adapter and disks cache + * @adapter : soft state for the controller + * + * Flush adapter cache followed by disks cache. + */ +static void +megaraid_mbox_flush_cache(adapter_t *adapter) +{ + uint8_t raw_mbox[sizeof(mbox_t)]; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + raw_mbox[0] = FLUSH_ADAPTER; + + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + con_log(CL_ANN, ("megaraid: flush adapter failed\n")); + } + + raw_mbox[0] = FLUSH_SYSTEM; + + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + con_log(CL_ANN, ("megaraid: flush disks cache failed\n")); + } + + return; +} + + +/** + * megaraid_mbox_fire_sync_cmd - fire the sync cmd + * @adapter : soft state for the controller + * + * Clears the pending cmds in FW and reinits its RAID structs. + */ +static int +megaraid_mbox_fire_sync_cmd(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + int status = 0; + int i; + uint32_t dword; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + raw_mbox[0] = 0xFF; + + mbox = raid_dev->mbox; + + /* Wait until mailbox is free */ + if (megaraid_busywait_mbox(raid_dev) != 0) { + status = 1; + goto blocked_mailbox; + } + + /* Copy mailbox data into host structure */ + memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); + mbox->cmdid = 0xFE; + mbox->busy = 1; + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0; + mbox->status = 0; + + wmb(); + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + /* Wait for maximum 1 min for status to post. + * If the Firmware SUPPORTS the ABOVE COMMAND, + * mbox->cmd will be set to 0 + * else + * the firmware will reject the command with + * mbox->numstatus set to 1 + */ + + i = 0; + status = 0; + while (!mbox->numstatus && mbox->cmd == 0xFF) { + rmb(); + msleep(1); + i++; + if (i > 1000 * 60) { + status = 1; + break; + } + } + if (mbox->numstatus == 1) + status = 1; /*cmd not supported*/ + + /* Check for interrupt line */ + dword = RDOUTDOOR(raid_dev); + WROUTDOOR(raid_dev, dword); + WRINDOOR(raid_dev,2); + + return status; + +blocked_mailbox: + con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n")); + return status; +} + +/** + * megaraid_mbox_display_scb - display SCB information, mostly debug purposes + * @adapter : controller's soft state + * @scb : SCB to be displayed + * + * Diplay information about the given SCB iff the current debug level is + * verbose. + */ +static void +megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) +{ + mbox_ccb_t *ccb; + struct scsi_cmnd *scp; + mbox_t *mbox; + int level; + int i; + + + ccb = (mbox_ccb_t *)scb->ccb; + scp = scb->scp; + mbox = ccb->mbox; + + level = CL_DLEVEL3; + + con_log(level, (KERN_NOTICE + "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status, + mbox->cmd, scb->sno)); + + con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n", + mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv, + mbox->numsge)); + + if (!scp) return; + + con_log(level, (KERN_NOTICE "scsi cmnd: ")); + + for (i = 0; i < scp->cmd_len; i++) { + con_log(level, ("%#2.02x ", scp->cmnd[i])); + } + + con_log(level, ("\n")); + + return; +} + + +/** + * megaraid_mbox_setup_device_map - manage device ids + * @adapter : Driver's soft state + * + * Manage the device ids to have an appropriate mapping between the kernel + * scsi addresses and megaraid scsi and logical drive addresses. We export + * scsi devices on their actual addresses, whereas the logical drives are + * exported on a virtual scsi channel. + */ +static void +megaraid_mbox_setup_device_map(adapter_t *adapter) +{ + uint8_t c; + uint8_t t; + + /* + * First fill the values on the logical drive channel + */ + for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) + adapter->device_ids[adapter->max_channel][t] = + (t < adapter->init_id) ? t : t - 1; + + adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF; + + /* + * Fill the values on the physical devices channels + */ + for (c = 0; c < adapter->max_channel; c++) + for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) + adapter->device_ids[c][t] = (c << 8) | t; +} + + +/* + * END: internal commands library + */ + +/* + * START: Interface for the common management module + * + * This is the module, which interfaces with the common management module to + * provide support for ioctl and sysfs + */ + +/** + * megaraid_cmm_register - register with the management module + * @adapter : HBA soft state + * + * Register with the management module, which allows applications to issue + * ioctl calls to the drivers. This interface is used by the management module + * to setup sysfs support as well. + */ +static int +megaraid_cmm_register(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mraid_mmadp_t adp; + scb_t *scb; + mbox_ccb_t *ccb; + int rval; + int i; + + // Allocate memory for the base list of scb for management module. + adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL); + + if (adapter->uscb_list == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __func__, + __LINE__)); + return -1; + } + + + // Initialize the synchronization parameters for resources for + // commands for management module + INIT_LIST_HEAD(&adapter->uscb_pool); + + spin_lock_init(USER_FREE_LIST_LOCK(adapter)); + + + + // link all the packets. Note, CCB for commands, coming from the + // commom management module, mailbox physical address are already + // setup by it. We just need placeholder for that in our local command + // control blocks + for (i = 0; i < MBOX_MAX_USER_CMDS; i++) { + + scb = adapter->uscb_list + i; + ccb = raid_dev->uccb_list + i; + + scb->ccb = (caddr_t)ccb; + ccb->mbox64 = raid_dev->umbox64 + i; + ccb->mbox = &ccb->mbox64->mbox32; + ccb->raw_mbox = (uint8_t *)ccb->mbox; + + scb->gp = 0; + + // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR + // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER) + scb->sno = i + MBOX_MAX_SCSI_CMDS; + + scb->scp = NULL; + scb->state = SCB_FREE; + scb->dma_direction = DMA_NONE; + scb->dma_type = MRAID_DMA_NONE; + scb->dev_channel = -1; + scb->dev_target = -1; + + // put scb in the free pool + list_add_tail(&scb->list, &adapter->uscb_pool); + } + + adp.unique_id = adapter->unique_id; + adp.drvr_type = DRVRTYPE_MBOX; + adp.drvr_data = (unsigned long)adapter; + adp.pdev = adapter->pdev; + adp.issue_uioc = megaraid_mbox_mm_handler; + adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; + adp.max_kioc = MBOX_MAX_USER_CMDS; + + if ((rval = mraid_mm_register_adp(&adp)) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: did not register with CMM\n")); + + kfree(adapter->uscb_list); + } + + return rval; +} + + +/** + * megaraid_cmm_unregister - un-register with the management module + * @adapter : HBA soft state + * + * Un-register with the management module. + * FIXME: mgmt module must return failure for unregister if it has pending + * commands in LLD. + */ +static int +megaraid_cmm_unregister(adapter_t *adapter) +{ + kfree(adapter->uscb_list); + mraid_mm_unregister_adp(adapter->unique_id); + return 0; +} + + +/** + * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD + * @drvr_data : LLD specific data + * @kioc : CMM interface packet + * @action : command action + * + * This routine is invoked whenever the Common Management Module (CMM) has a + * command for us. The 'action' parameter specifies if this is a new command + * or otherwise. + */ +static int +megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action) +{ + adapter_t *adapter; + + if (action != IOCTL_ISSUE) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: unsupported management action:%#2x\n", + action)); + return (-ENOTSUPP); + } + + adapter = (adapter_t *)drvr_data; + + // make sure this adapter is not being detached right now. + if (atomic_read(&adapter->being_detached)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: reject management request, detaching\n")); + return (-ENODEV); + } + + switch (kioc->opcode) { + + case GET_ADAP_INFO: + + kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *) + (unsigned long)kioc->buf_vaddr); + + kioc->done(kioc); + + return kioc->status; + + case MBOX_CMD: + + return megaraid_mbox_mm_command(adapter, kioc); + + default: + kioc->status = (-EINVAL); + kioc->done(kioc); + return (-EINVAL); + } + + return 0; // not reached +} + +/** + * megaraid_mbox_mm_command - issues commands routed through CMM + * @adapter : HBA soft state + * @kioc : management command packet + * + * Issues commands, which are routed through the management module. + */ +static int +megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc) +{ + struct list_head *head = &adapter->uscb_pool; + mbox64_t *mbox64; + uint8_t *raw_mbox; + scb_t *scb; + mbox_ccb_t *ccb; + unsigned long flags; + + // detach one scb from free pool + spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); + + if (list_empty(head)) { // should never happen because of CMM + + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: bug in cmm handler, lost resources\n")); + + spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); + + return (-EINVAL); + } + + scb = list_entry(head->next, scb_t, list); + list_del_init(&scb->list); + + spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); + + scb->state = SCB_ACTIVE; + scb->dma_type = MRAID_DMA_NONE; + scb->dma_direction = DMA_NONE; + + ccb = (mbox_ccb_t *)scb->ccb; + mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; + raw_mbox = (uint8_t *)&mbox64->mbox32; + + memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t)); + + scb->gp = (unsigned long)kioc; + + /* + * If it is a logdrv random delete operation, we have to wait till + * there are no outstanding cmds at the fw and then issue it directly + */ + if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { + + if (wait_till_fw_empty(adapter)) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid mbox: LD delete, timed out\n")); + + kioc->status = -ETIME; + + scb->status = -1; + + megaraid_mbox_mm_done(adapter, scb); + + return (-ETIME); + } + + INIT_LIST_HEAD(&scb->list); + + scb->state = SCB_ISSUED; + if (mbox_post_cmd(adapter, scb) != 0) { + + con_log(CL_ANN, (KERN_NOTICE + "megaraid mbox: LD delete, mailbox busy\n")); + + kioc->status = -EBUSY; + + scb->status = -1; + + megaraid_mbox_mm_done(adapter, scb); + + return (-EBUSY); + } + + return 0; + } + + // put the command on the pending list and execute + megaraid_mbox_runpendq(adapter, scb); + + return 0; +} + + +static int +wait_till_fw_empty(adapter_t *adapter) +{ + unsigned long flags = 0; + int i; + + + /* + * Set the quiescent flag to stop issuing cmds to FW. + */ + spin_lock_irqsave(&adapter->lock, flags); + adapter->quiescent++; + spin_unlock_irqrestore(&adapter->lock, flags); + + /* + * Wait till there are no more cmds outstanding at FW. Try for at most + * 60 seconds + */ + for (i = 0; i < 60 && adapter->outstanding_cmds; i++) { + con_log(CL_DLEVEL1, (KERN_INFO + "megaraid: FW has %d pending commands\n", + adapter->outstanding_cmds)); + + msleep(1000); + } + + return adapter->outstanding_cmds; +} + + +/** + * megaraid_mbox_mm_done - callback for CMM commands + * @adapter : HBA soft state + * @scb : completed command + * + * Callback routine for internal commands originated from the management + * module. + */ +static void +megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) +{ + uioc_t *kioc; + mbox64_t *mbox64; + uint8_t *raw_mbox; + unsigned long flags; + + kioc = (uioc_t *)scb->gp; + mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; + mbox64->mbox32.status = scb->status; + raw_mbox = (uint8_t *)&mbox64->mbox32; + + + // put scb in the free pool + scb->state = SCB_FREE; + scb->scp = NULL; + + spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); + + list_add(&scb->list, &adapter->uscb_pool); + + spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); + + // if a delete logical drive operation succeeded, restart the + // controller + if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { + + adapter->quiescent--; + + megaraid_mbox_runpendq(adapter, NULL); + } + + kioc->done(kioc); + + return; +} + + +/** + * gather_hbainfo - HBA characteristics for the applications + * @adapter : HBA soft state + * @hinfo : pointer to the caller's host info strucuture + */ +static int +gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) +{ + hinfo->pci_vendor_id = adapter->pdev->vendor; + hinfo->pci_device_id = adapter->pdev->device; + hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor; + hinfo->subsys_device_id = adapter->pdev->subsystem_device; + + hinfo->pci_bus = adapter->pdev->bus->number; + hinfo->pci_dev_fn = adapter->pdev->devfn; + hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn); + hinfo->irq = adapter->host->irq; + hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport; + + hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn; + hinfo->host_no = adapter->host->host_no; + + return 0; +} + +/* + * END: Interface for the common management module + */ + + + +/** + * megaraid_sysfs_alloc_resources - allocate sysfs related resources + * @adapter : controller's soft state + * + * Allocate packets required to issue FW calls whenever the sysfs attributes + * are read. These attributes would require up-to-date information from the + * FW. Also set up resources for mutual exclusion to share these resources and + * the wait queue. + * + * Return 0 on success. + * Return -ERROR_CODE on failure. + */ +static int +megaraid_sysfs_alloc_resources(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + int rval = 0; + + raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL); + + raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL); + + raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev, + PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL); + + if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 || + !raid_dev->sysfs_buffer) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __func__, + __LINE__)); + + rval = -ENOMEM; + + megaraid_sysfs_free_resources(adapter); + } + + mutex_init(&raid_dev->sysfs_mtx); + + init_waitqueue_head(&raid_dev->sysfs_wait_q); + + return rval; +} + + +/** + * megaraid_sysfs_free_resources - free sysfs related resources + * @adapter : controller's soft state + * + * Free packets allocated for sysfs FW commands + */ +static void +megaraid_sysfs_free_resources(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + kfree(raid_dev->sysfs_uioc); + kfree(raid_dev->sysfs_mbox64); + + if (raid_dev->sysfs_buffer) { + dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE, + raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma); + } +} + + +/** + * megaraid_sysfs_get_ldmap_done - callback for get ldmap + * @uioc : completed packet + * + * Callback routine called in the ISR/tasklet context for get ldmap call + */ +static void +megaraid_sysfs_get_ldmap_done(uioc_t *uioc) +{ + adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + uioc->status = 0; + + wake_up(&raid_dev->sysfs_wait_q); +} + +/** + * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap + * @t : timed out timer + * + * Timeout routine to recover and return to application, in case the adapter + * has stopped responding. A timeout of 60 seconds for this command seems like + * a good value. + */ +static void +megaraid_sysfs_get_ldmap_timeout(struct timer_list *t) +{ + struct uioc_timeout *timeout = from_timer(timeout, t, timer); + uioc_t *uioc = timeout->uioc; + adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + uioc->status = -ETIME; + + wake_up(&raid_dev->sysfs_wait_q); +} + + +/** + * megaraid_sysfs_get_ldmap - get update logical drive map + * @adapter : controller's soft state + * + * This routine will be called whenever user reads the logical drive + * attributes, go get the current logical drive mapping table from the + * firmware. We use the management API's to issue commands to the controller. + * + * NOTE: The commands issuance functionality is not generalized and + * implemented in context of "get ld map" command only. If required, the + * command issuance logical can be trivially pulled out and implemented as a + * standalone library. For now, this should suffice since there is no other + * user of this interface. + * + * Return 0 on success. + * Return -1 on failure. + */ +static int +megaraid_sysfs_get_ldmap(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + uioc_t *uioc; + mbox64_t *mbox64; + mbox_t *mbox; + char *raw_mbox; + struct uioc_timeout timeout; + caddr_t ldmap; + int rval = 0; + + /* + * Allow only one read at a time to go through the sysfs attributes + */ + mutex_lock(&raid_dev->sysfs_mtx); + + uioc = raid_dev->sysfs_uioc; + mbox64 = raid_dev->sysfs_mbox64; + ldmap = raid_dev->sysfs_buffer; + + memset(uioc, 0, sizeof(uioc_t)); + memset(mbox64, 0, sizeof(mbox64_t)); + memset(ldmap, 0, sizeof(raid_dev->curr_ldmap)); + + mbox = &mbox64->mbox32; + raw_mbox = (char *)mbox; + uioc->cmdbuf = (uint64_t)(unsigned long)mbox64; + uioc->buf_vaddr = (caddr_t)adapter; + uioc->status = -ENODATA; + uioc->done = megaraid_sysfs_get_ldmap_done; + + /* + * Prepare the mailbox packet to get the current logical drive mapping + * table + */ + mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma; + + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_GET_LDID_MAP; + + /* + * Setup a timer to recover from a non-responding controller + */ + timeout.uioc = uioc; + timer_setup_on_stack(&timeout.timer, + megaraid_sysfs_get_ldmap_timeout, 0); + + timeout.timer.expires = jiffies + 60 * HZ; + add_timer(&timeout.timer); + + /* + * Send the command to the firmware + */ + rval = megaraid_mbox_mm_command(adapter, uioc); + + if (rval == 0) { // command successfully issued + wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA)); + + /* + * Check if the command timed out + */ + if (uioc->status == -ETIME) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: sysfs get ld map timed out\n")); + + rval = -ETIME; + } + else { + rval = mbox->status; + } + + if (rval == 0) { + memcpy(raid_dev->curr_ldmap, ldmap, + sizeof(raid_dev->curr_ldmap)); + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: get ld map failed with %x\n", rval)); + } + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: could not issue ldmap command:%x\n", rval)); + } + + + del_timer_sync(&timeout.timer); + destroy_timer_on_stack(&timeout.timer); + + mutex_unlock(&raid_dev->sysfs_mtx); + + return rval; +} + + +/** + * megaraid_mbox_app_hndl_show - display application handle for this adapter + * @dev : class device object representation for the host + * @attr : device attribute (unused) + * @buf : buffer to send data to + * + * Display the handle used by the applications while executing management + * tasks on the adapter. We invoke a management module API to get the adapter + * handle, since we do not interface with applications directly. + */ +static ssize_t +megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost); + uint32_t app_hndl; + + app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id); + + return sysfs_emit(buf, "%u\n", app_hndl); +} + + +/** + * megaraid_mbox_ld_show - display the logical drive number for this device + * @dev : device object representation for the scsi device + * @attr : device attribute to show + * @buf : buffer to send data to + * + * Display the logical drive number for the device in question, if it a valid + * logical drive. For physical devices, "-1" is returned. + * + * The logical drive number is displayed in following format: + * + * + * + * + */ +static ssize_t +megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host); + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + int scsi_id = -1; + int logical_drv = -1; + int ldid_map = -1; + uint32_t app_hndl = 0; + int mapped_sdev_id; + int rval; + int i; + + if (raid_dev->random_del_supported && + MRAID_IS_LOGICAL_SDEV(adapter, sdev)) { + + rval = megaraid_sysfs_get_ldmap(adapter); + if (rval == 0) { + + for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) { + + mapped_sdev_id = sdev->id; + + if (sdev->id > adapter->init_id) { + mapped_sdev_id -= 1; + } + + if (raid_dev->curr_ldmap[i] == mapped_sdev_id) { + + scsi_id = sdev->id; + + logical_drv = i; + + ldid_map = raid_dev->curr_ldmap[i]; + + app_hndl = mraid_mm_adapter_app_handle( + adapter->unique_id); + + break; + } + } + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: sysfs get ld map failed: %x\n", + rval)); + } + } + + return sysfs_emit(buf, "%d %d %d %d\n", scsi_id, logical_drv, + ldid_map, app_hndl); +} + + +/* + * END: Mailbox Low Level Driver + */ +module_init(megaraid_init); +module_exit(megaraid_exit); diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h new file mode 100644 index 000000000..d2fe7f69c --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * FILE : megaraid_mbox.h + */ + +#ifndef _MEGARAID_H_ +#define _MEGARAID_H_ + + +#include "mega_common.h" +#include "mbox_defs.h" +#include "megaraid_ioctl.h" + + +#define MEGARAID_VERSION "2.20.5.1" +#define MEGARAID_EXT_VERSION "(Release Date: Thu Nov 16 15:32:35 EST 2006)" + + +/* + * Define some PCI values here until they are put in the kernel + */ +#define PCI_DEVICE_ID_PERC4_DI_DISCOVERY 0x000E +#define PCI_SUBSYS_ID_PERC4_DI_DISCOVERY 0x0123 + +#define PCI_DEVICE_ID_PERC4_SC 0x1960 +#define PCI_SUBSYS_ID_PERC4_SC 0x0520 + +#define PCI_DEVICE_ID_PERC4_DC 0x1960 +#define PCI_SUBSYS_ID_PERC4_DC 0x0518 + +#define PCI_DEVICE_ID_VERDE 0x0407 + +#define PCI_DEVICE_ID_PERC4_DI_EVERGLADES 0x000F +#define PCI_SUBSYS_ID_PERC4_DI_EVERGLADES 0x014A + +#define PCI_DEVICE_ID_PERC4E_SI_BIGBEND 0x0013 +#define PCI_SUBSYS_ID_PERC4E_SI_BIGBEND 0x016c + +#define PCI_DEVICE_ID_PERC4E_DI_KOBUK 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_KOBUK 0x016d + +#define PCI_DEVICE_ID_PERC4E_DI_CORVETTE 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_CORVETTE 0x016e + +#define PCI_DEVICE_ID_PERC4E_DI_EXPEDITION 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION 0x016f + +#define PCI_DEVICE_ID_PERC4E_DI_GUADALUPE 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE 0x0170 + +#define PCI_DEVICE_ID_DOBSON 0x0408 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0 0xA520 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1 0x0520 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2 0x0518 + +#define PCI_DEVICE_ID_MEGARAID_I4_133_RAID 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_I4_133_RAID 0x0522 + +#define PCI_DEVICE_ID_MEGARAID_SATA_150_4 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SATA_150_4 0x4523 + +#define PCI_DEVICE_ID_MEGARAID_SATA_150_6 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SATA_150_6 0x0523 + +#define PCI_DEVICE_ID_LINDSAY 0x0409 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCS16 0x1960 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCS16 0x0523 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x1960 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x0520 + +#define PCI_SUBSYS_ID_PERC3_QC 0x0471 +#define PCI_SUBSYS_ID_PERC3_DC 0x0493 +#define PCI_SUBSYS_ID_PERC3_SC 0x0475 +#define PCI_SUBSYS_ID_CERC_ATA100_4CH 0x0511 + + +#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel +#define MBOX_MAX_USER_CMDS 32 // number of cmds for applications +#define MBOX_DEF_CMD_PER_LUN 64 // default commands per lun +#define MBOX_DEFAULT_SG_SIZE 26 // default sg size supported by all fw +#define MBOX_MAX_SG_SIZE 32 // maximum scatter-gather list size +#define MBOX_MAX_SECTORS 128 // maximum sectors per IO +#define MBOX_TIMEOUT 30 // timeout value for internal cmds +#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox +#define MBOX_RESET_WAIT 180 // wait these many seconds in reset +#define MBOX_RESET_EXT_WAIT 120 // extended wait reset +#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode + +#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds + +/* + * maximum transfer that can happen through the firmware commands issued + * internnaly from the driver. + */ +#define MBOX_IBUF_SIZE 4096 + + +/** + * mbox_ccb_t - command control block specific to mailbox based controllers + * @raw_mbox : raw mailbox pointer + * @mbox : mailbox + * @mbox64 : extended mailbox + * @mbox_dma_h : mailbox dma address + * @sgl64 : 64-bit scatter-gather list + * @sgl32 : 32-bit scatter-gather list + * @sgl_dma_h : dma handle for the scatter-gather list + * @pthru : passthru structure + * @pthru_dma_h : dma handle for the passthru structure + * @epthru : extended passthru structure + * @epthru_dma_h : dma handle for extended passthru structure + * @buf_dma_h : dma handle for buffers w/o sg list + * + * command control block specific to the mailbox based controllers + */ +typedef struct { + uint8_t *raw_mbox; + mbox_t *mbox; + mbox64_t *mbox64; + dma_addr_t mbox_dma_h; + mbox_sgl64 *sgl64; + mbox_sgl32 *sgl32; + dma_addr_t sgl_dma_h; + mraid_passthru_t *pthru; + dma_addr_t pthru_dma_h; + mraid_epassthru_t *epthru; + dma_addr_t epthru_dma_h; + dma_addr_t buf_dma_h; +} mbox_ccb_t; + + +/** + * mraid_device_t - adapter soft state structure for mailbox controllers + * @una_mbox64 : 64-bit mbox - unaligned + * @una_mbox64_dma : mbox dma addr - unaligned + * @mbox : 32-bit mbox - aligned + * @mbox64 : 64-bit mbox - aligned + * @mbox_dma : mbox dma addr - aligned + * @mailbox_lock : exclusion lock for the mailbox + * @baseport : base port of hba memory + * @baseaddr : mapped addr of hba memory + * @mbox_pool : pool of mailboxes + * @mbox_pool_handle : handle for the mailbox pool memory + * @epthru_pool : a pool for extended passthru commands + * @epthru_pool_handle : handle to the pool above + * @sg_pool : pool of scatter-gather lists for this driver + * @sg_pool_handle : handle to the pool above + * @ccb_list : list of our command control blocks + * @uccb_list : list of cmd control blocks for mgmt module + * @umbox64 : array of mailbox for user commands (cmm) + * @pdrv_state : array for state of each physical drive. + * @last_disp : flag used to show device scanning + * @hw_error : set if FW not responding + * @fast_load : If set, skip physical device scanning + * @channel_class : channel class, RAID or SCSI + * @sysfs_mtx : mutex to serialize access to sysfs res. + * @sysfs_uioc : management packet to issue FW calls from sysfs + * @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs + * @sysfs_buffer : data buffer for FW commands issued from sysfs + * @sysfs_buffer_dma : DMA buffer for FW commands issued from sysfs + * @sysfs_wait_q : wait queue for sysfs operations + * @random_del_supported : set if the random deletion is supported + * @curr_ldmap : current LDID map + * + * Initialization structure for mailbox controllers: memory based and IO based + * All the fields in this structure are LLD specific and may be discovered at + * init() or start() time. + * + * NOTE: The fields of this structures are placed to minimize cache misses + */ +#define MAX_LD_EXTENDED64 64 +typedef struct { + mbox64_t *una_mbox64; + dma_addr_t una_mbox64_dma; + mbox_t *mbox; + mbox64_t *mbox64; + dma_addr_t mbox_dma; + spinlock_t mailbox_lock; + unsigned long baseport; + void __iomem * baseaddr; + struct mraid_pci_blk mbox_pool[MBOX_MAX_SCSI_CMDS]; + struct dma_pool *mbox_pool_handle; + struct mraid_pci_blk epthru_pool[MBOX_MAX_SCSI_CMDS]; + struct dma_pool *epthru_pool_handle; + struct mraid_pci_blk sg_pool[MBOX_MAX_SCSI_CMDS]; + struct dma_pool *sg_pool_handle; + mbox_ccb_t ccb_list[MBOX_MAX_SCSI_CMDS]; + mbox_ccb_t uccb_list[MBOX_MAX_USER_CMDS]; + mbox64_t umbox64[MBOX_MAX_USER_CMDS]; + + uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES]; + uint32_t last_disp; + int hw_error; + int fast_load; + uint8_t channel_class; + struct mutex sysfs_mtx; + uioc_t *sysfs_uioc; + mbox64_t *sysfs_mbox64; + caddr_t sysfs_buffer; + dma_addr_t sysfs_buffer_dma; + wait_queue_head_t sysfs_wait_q; + int random_del_supported; + uint16_t curr_ldmap[MAX_LD_EXTENDED64]; +} mraid_device_t; + +// route to raid device from adapter +#define ADAP2RAIDDEV(adp) ((mraid_device_t *)((adp)->raid_device)) + +#define MAILBOX_LOCK(rdev) (&(rdev)->mailbox_lock) + +// Find out if this channel is a RAID or SCSI +#define IS_RAID_CH(rdev, ch) (((rdev)->channel_class >> (ch)) & 0x01) + + +#define RDINDOOR(rdev) readl((rdev)->baseaddr + 0x20) +#define RDOUTDOOR(rdev) readl((rdev)->baseaddr + 0x2C) +#define WRINDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x20) +#define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C) + +#endif // _MEGARAID_H_ diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c new file mode 100644 index 000000000..c509440bd --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -0,0 +1,1246 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * FILE : megaraid_mm.c + * Version : v2.20.2.7 (Jul 16 2006) + * + * Common management module + */ +#include +#include +#include +#include "megaraid_mm.h" + + +// Entry points for char node driver +static DEFINE_MUTEX(mraid_mm_mutex); +static int mraid_mm_open(struct inode *, struct file *); +static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); + + +// routines to convert to and from the old the format +static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *); +static int kioc_to_mimd(uioc_t *, mimd_t __user *); + + +// Helper functions +static int handle_drvrcmd(void __user *, uint8_t, int *); +static int lld_ioctl(mraid_mmadp_t *, uioc_t *); +static void ioctl_done(uioc_t *); +static void lld_timedout(struct timer_list *); +static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); +static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); +static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); +static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *); +static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int); +static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); +static void mraid_mm_free_adp_resources(mraid_mmadp_t *); +static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); + +MODULE_AUTHOR("LSI Logic Corporation"); +MODULE_DESCRIPTION("LSI Logic Management Module"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LSI_COMMON_MOD_VERSION); + +static int dbglevel = CL_ANN; +module_param_named(dlevel, dbglevel, int, 0); +MODULE_PARM_DESC(dlevel, "Debug level (default=0)"); + +EXPORT_SYMBOL(mraid_mm_register_adp); +EXPORT_SYMBOL(mraid_mm_unregister_adp); +EXPORT_SYMBOL(mraid_mm_adapter_app_handle); + +static uint32_t drvr_ver = 0x02200207; + +static int adapters_count_g; +static struct list_head adapters_list_g; + +static wait_queue_head_t wait_q; + +static const struct file_operations lsi_fops = { + .open = mraid_mm_open, + .unlocked_ioctl = mraid_mm_unlocked_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +static struct miscdevice megaraid_mm_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "megadev0", + .fops = &lsi_fops, +}; + +/** + * mraid_mm_open - open routine for char node interface + * @inode : unused + * @filep : unused + * + * Allow ioctl operations by apps only if they have superuser privilege. + */ +static int +mraid_mm_open(struct inode *inode, struct file *filep) +{ + /* + * Only allow superuser to access private ioctl interface + */ + if (!capable(CAP_SYS_ADMIN)) return (-EACCES); + + return 0; +} + +/** + * mraid_mm_ioctl - module entry-point for ioctls + * @filep : file operations pointer (ignored) + * @cmd : ioctl command + * @arg : user ioctl packet + */ +static int +mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + uioc_t *kioc; + char signature[EXT_IOCTL_SIGN_SZ] = {0}; + int rval; + mraid_mmadp_t *adp; + uint8_t old_ioctl; + int drvrcmd_rval; + void __user *argp = (void __user *)arg; + + /* + * Make sure only USCSICMD are issued through this interface. + * MIMD application would still fire different command. + */ + + if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) { + return (-EINVAL); + } + + /* + * Look for signature to see if this is the new or old ioctl format. + */ + if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: copy from usr addr failed\n")); + return (-EFAULT); + } + + if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0) + old_ioctl = 0; + else + old_ioctl = 1; + + /* + * At present, we don't support the new ioctl packet + */ + if (!old_ioctl ) + return (-EINVAL); + + /* + * If it is a driver ioctl (as opposed to fw ioctls), then we can + * handle the command locally. rval > 0 means it is not a drvr cmd + */ + rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval); + + if (rval < 0) + return rval; + else if (rval == 0) + return drvrcmd_rval; + + rval = 0; + if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) { + return rval; + } + + /* + * Check if adapter can accept ioctl. We may have marked it offline + * if any previous kioc had timedout on this controller. + */ + if (!adp->quiescent) { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: controller cannot accept cmds due to " + "earlier errors\n" )); + return -EFAULT; + } + + /* + * The following call will block till a kioc is available + * or return NULL if the list head is empty for the pointer + * of type mraid_mmapt passed to mraid_mm_alloc_kioc + */ + kioc = mraid_mm_alloc_kioc(adp); + if (!kioc) + return -ENXIO; + + /* + * User sent the old mimd_t ioctl packet. Convert it to uioc_t. + */ + if ((rval = mimd_to_kioc(argp, adp, kioc))) { + mraid_mm_dealloc_kioc(adp, kioc); + return rval; + } + + kioc->done = ioctl_done; + + /* + * Issue the IOCTL to the low level driver. After the IOCTL completes + * release the kioc if and only if it was _not_ timedout. If it was + * timedout, that means that resources are still with low level driver. + */ + if ((rval = lld_ioctl(adp, kioc))) { + + if (!kioc->timedout) + mraid_mm_dealloc_kioc(adp, kioc); + + return rval; + } + + /* + * Convert the kioc back to user space + */ + rval = kioc_to_mimd(kioc, argp); + + /* + * Return the kioc to free pool + */ + mraid_mm_dealloc_kioc(adp, kioc); + + return rval; +} + +static long +mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int err; + + mutex_lock(&mraid_mm_mutex); + err = mraid_mm_ioctl(filep, cmd, arg); + mutex_unlock(&mraid_mm_mutex); + + return err; +} + +/** + * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet + * @umimd : User space mimd_t ioctl packet + * @rval : returned success/error status + * + * The function return value is a pointer to the located @adapter. + */ +static mraid_mmadp_t * +mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) +{ + mraid_mmadp_t *adapter; + mimd_t mimd; + uint32_t adapno; + int iterator; + bool is_found; + + if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { + *rval = -EFAULT; + return NULL; + } + + adapno = GETADAP(mimd.ui.fcs.adapno); + + if (adapno >= adapters_count_g) { + *rval = -ENODEV; + return NULL; + } + + adapter = NULL; + iterator = 0; + is_found = false; + + list_for_each_entry(adapter, &adapters_list_g, list) { + if (iterator++ == adapno) { + is_found = true; + break; + } + } + + if (!is_found) { + *rval = -ENODEV; + return NULL; + } + + return adapter; +} + +/** + * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it. + * @arg : packet sent by the user app + * @old_ioctl : mimd if 1; uioc otherwise + * @rval : pointer for command's returned value (not function status) + */ +static int +handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) +{ + mimd_t __user *umimd; + mimd_t kmimd; + uint8_t opcode; + uint8_t subopcode; + + if (old_ioctl) + goto old_packet; + else + goto new_packet; + +new_packet: + return (-ENOTSUPP); + +old_packet: + *rval = 0; + umimd = arg; + + if (copy_from_user(&kmimd, umimd, sizeof(mimd_t))) + return (-EFAULT); + + opcode = kmimd.ui.fcs.opcode; + subopcode = kmimd.ui.fcs.subopcode; + + /* + * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or + * GET_NUMADP, then we can handle. Otherwise we should return 1 to + * indicate that we cannot handle this. + */ + if (opcode != 0x82) + return 1; + + switch (subopcode) { + + case MEGAIOC_QDRVRVER: + + if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t))) + return (-EFAULT); + + return 0; + + case MEGAIOC_QNADAP: + + *rval = adapters_count_g; + + if (copy_to_user(kmimd.data, &adapters_count_g, + sizeof(uint32_t))) + return (-EFAULT); + + return 0; + + default: + /* cannot handle */ + return 1; + } + + return 0; +} + + +/** + * mimd_to_kioc - Converter from old to new ioctl format + * @umimd : user space old MIMD IOCTL + * @adp : adapter softstate + * @kioc : kernel space new format IOCTL + * + * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The + * new packet is in kernel space so that driver can perform operations on it + * freely. + */ + +static int +mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) +{ + mbox64_t *mbox64; + mbox_t *mbox; + mraid_passthru_t *pthru32; + uint32_t adapno; + uint8_t opcode; + uint8_t subopcode; + mimd_t mimd; + + if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) + return (-EFAULT); + + /* + * Applications are not allowed to send extd pthru + */ + if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) || + (mimd.mbox[0] == MBOXCMD_EXTPTHRU)) + return (-EINVAL); + + opcode = mimd.ui.fcs.opcode; + subopcode = mimd.ui.fcs.subopcode; + adapno = GETADAP(mimd.ui.fcs.adapno); + + if (adapno >= adapters_count_g) + return (-ENODEV); + + kioc->adapno = adapno; + kioc->mb_type = MBOX_LEGACY; + kioc->app_type = APPTYPE_MIMD; + + switch (opcode) { + + case 0x82: + + if (subopcode == MEGAIOC_QADAPINFO) { + + kioc->opcode = GET_ADAP_INFO; + kioc->data_dir = UIOC_RD; + kioc->xferlen = sizeof(mraid_hba_info_t); + + if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) + return (-ENOMEM); + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: Invalid subop\n")); + return (-EINVAL); + } + + break; + + case 0x81: + + kioc->opcode = MBOX_CMD; + kioc->xferlen = mimd.ui.fcs.length; + kioc->user_data_len = kioc->xferlen; + kioc->user_data = mimd.ui.fcs.buffer; + + if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) + return (-ENOMEM); + + if (mimd.outlen) kioc->data_dir = UIOC_RD; + if (mimd.inlen) kioc->data_dir |= UIOC_WR; + + break; + + case 0x80: + + kioc->opcode = MBOX_CMD; + kioc->xferlen = (mimd.outlen > mimd.inlen) ? + mimd.outlen : mimd.inlen; + kioc->user_data_len = kioc->xferlen; + kioc->user_data = mimd.data; + + if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) + return (-ENOMEM); + + if (mimd.outlen) kioc->data_dir = UIOC_RD; + if (mimd.inlen) kioc->data_dir |= UIOC_WR; + + break; + + default: + return (-EINVAL); + } + + /* + * If driver command, nothing else to do + */ + if (opcode == 0x82) + return 0; + + /* + * This is a mailbox cmd; copy the mailbox from mimd + */ + mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf); + mbox = &mbox64->mbox32; + memcpy(mbox, mimd.mbox, 14); + + if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD + + mbox->xferaddr = (uint32_t)kioc->buf_paddr; + + if (kioc->data_dir & UIOC_WR) { + if (copy_from_user(kioc->buf_vaddr, kioc->user_data, + kioc->xferlen)) { + return (-EFAULT); + } + } + + return 0; + } + + /* + * This is a regular 32-bit pthru cmd; mbox points to pthru struct. + * Just like in above case, the beginning for memblk is treated as + * a mailbox. The passthru will begin at next 1K boundary. And the + * data will start 1K after that. + */ + pthru32 = kioc->pthru32; + kioc->user_pthru = &umimd->pthru; + mbox->xferaddr = (uint32_t)kioc->pthru32_h; + + if (copy_from_user(pthru32, kioc->user_pthru, + sizeof(mraid_passthru_t))) { + return (-EFAULT); + } + + pthru32->dataxferaddr = kioc->buf_paddr; + if (kioc->data_dir & UIOC_WR) { + if (pthru32->dataxferlen > kioc->xferlen) + return -EINVAL; + if (copy_from_user(kioc->buf_vaddr, kioc->user_data, + pthru32->dataxferlen)) { + return (-EFAULT); + } + } + + return 0; +} + +/** + * mraid_mm_attach_buf - Attach a free dma buffer for required size + * @adp : Adapter softstate + * @kioc : kioc that the buffer needs to be attached to + * @xferlen : required length for buffer + * + * First we search for a pool with smallest buffer that is >= @xferlen. If + * that pool has no free buffer, we will try for the next bigger size. If none + * is available, we will try to allocate the smallest buffer that is >= + * @xferlen and attach it the pool. + */ +static int +mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) +{ + mm_dmapool_t *pool; + int right_pool = -1; + unsigned long flags; + int i; + + kioc->pool_index = -1; + kioc->buf_vaddr = NULL; + kioc->buf_paddr = 0; + kioc->free_buf = 0; + + /* + * We need xferlen amount of memory. See if we can get it from our + * dma pools. If we don't get exact size, we will try bigger buffer + */ + + for (i = 0; i < MAX_DMA_POOLS; i++) { + + pool = &adp->dma_pool_list[i]; + + if (xferlen > pool->buf_size) + continue; + + if (right_pool == -1) + right_pool = i; + + spin_lock_irqsave(&pool->lock, flags); + + if (!pool->in_use) { + + pool->in_use = 1; + kioc->pool_index = i; + kioc->buf_vaddr = pool->vaddr; + kioc->buf_paddr = pool->paddr; + + spin_unlock_irqrestore(&pool->lock, flags); + return 0; + } + else { + spin_unlock_irqrestore(&pool->lock, flags); + continue; + } + } + + /* + * If xferlen doesn't match any of our pools, return error + */ + if (right_pool == -1) + return -EINVAL; + + /* + * We did not get any buffer from the preallocated pool. Let us try + * to allocate one new buffer. NOTE: This is a blocking call. + */ + pool = &adp->dma_pool_list[right_pool]; + + spin_lock_irqsave(&pool->lock, flags); + + kioc->pool_index = right_pool; + kioc->free_buf = 1; + kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC, + &kioc->buf_paddr); + spin_unlock_irqrestore(&pool->lock, flags); + + if (!kioc->buf_vaddr) + return -ENOMEM; + + return 0; +} + +/** + * mraid_mm_alloc_kioc - Returns a uioc_t from free list + * @adp : Adapter softstate for this module + * + * The kioc_semaphore is initialized with number of kioc nodes in the + * free kioc pool. If the kioc pool is empty, this function blocks till + * a kioc becomes free. + */ +static uioc_t * +mraid_mm_alloc_kioc(mraid_mmadp_t *adp) +{ + uioc_t *kioc; + struct list_head* head; + unsigned long flags; + + down(&adp->kioc_semaphore); + + spin_lock_irqsave(&adp->kioc_pool_lock, flags); + + head = &adp->kioc_pool; + + if (list_empty(head)) { + up(&adp->kioc_semaphore); + spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); + + con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n")); + return NULL; + } + + kioc = list_entry(head->next, uioc_t, list); + list_del_init(&kioc->list); + + spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); + + memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t)); + memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t)); + + kioc->buf_vaddr = NULL; + kioc->buf_paddr = 0; + kioc->pool_index =-1; + kioc->free_buf = 0; + kioc->user_data = NULL; + kioc->user_data_len = 0; + kioc->user_pthru = NULL; + kioc->timedout = 0; + + return kioc; +} + +/** + * mraid_mm_dealloc_kioc - Return kioc to free pool + * @adp : Adapter softstate + * @kioc : uioc_t node to be returned to free pool + */ +static void +mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) +{ + mm_dmapool_t *pool; + unsigned long flags; + + if (kioc->pool_index != -1) { + pool = &adp->dma_pool_list[kioc->pool_index]; + + /* This routine may be called in non-isr context also */ + spin_lock_irqsave(&pool->lock, flags); + + /* + * While attaching the dma buffer, if we didn't get the + * required buffer from the pool, we would have allocated + * it at the run time and set the free_buf flag. We must + * free that buffer. Otherwise, just mark that the buffer is + * not in use + */ + if (kioc->free_buf == 1) + dma_pool_free(pool->handle, kioc->buf_vaddr, + kioc->buf_paddr); + else + pool->in_use = 0; + + spin_unlock_irqrestore(&pool->lock, flags); + } + + /* Return the kioc to the free pool */ + spin_lock_irqsave(&adp->kioc_pool_lock, flags); + list_add(&kioc->list, &adp->kioc_pool); + spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); + + /* increment the free kioc count */ + up(&adp->kioc_semaphore); + + return; +} + +/** + * lld_ioctl - Routine to issue ioctl to low level drvr + * @adp : The adapter handle + * @kioc : The ioctl packet with kernel addresses + */ +static int +lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) +{ + int rval; + struct uioc_timeout timeout = { }; + + kioc->status = -ENODATA; + rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); + + if (rval) return rval; + + /* + * Start the timer + */ + if (adp->timeout > 0) { + timeout.uioc = kioc; + timer_setup_on_stack(&timeout.timer, lld_timedout, 0); + + timeout.timer.expires = jiffies + adp->timeout * HZ; + + add_timer(&timeout.timer); + } + + /* + * Wait till the low level driver completes the ioctl. After this + * call, the ioctl either completed successfully or timedout. + */ + wait_event(wait_q, (kioc->status != -ENODATA)); + if (timeout.timer.function) { + del_timer_sync(&timeout.timer); + destroy_timer_on_stack(&timeout.timer); + } + + /* + * If the command had timedout, we mark the controller offline + * before returning + */ + if (kioc->timedout) { + adp->quiescent = 0; + } + + return kioc->status; +} + + +/** + * ioctl_done - callback from the low level driver + * @kioc : completed ioctl packet + */ +static void +ioctl_done(uioc_t *kioc) +{ + uint32_t adapno; + int iterator; + mraid_mmadp_t* adapter; + bool is_found; + + /* + * When the kioc returns from driver, make sure it still doesn't + * have ENODATA in status. Otherwise, driver will hang on wait_event + * forever + */ + if (kioc->status == -ENODATA) { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: lld didn't change status!\n")); + + kioc->status = -EINVAL; + } + + /* + * Check if this kioc was timedout before. If so, nobody is waiting + * on this kioc. We don't have to wake up anybody. Instead, we just + * have to free the kioc + */ + if (kioc->timedout) { + iterator = 0; + adapter = NULL; + adapno = kioc->adapno; + is_found = false; + + con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " + "ioctl that was timedout before\n")); + + list_for_each_entry(adapter, &adapters_list_g, list) { + if (iterator++ == adapno) { + is_found = true; + break; + } + } + + kioc->timedout = 0; + + if (is_found) + mraid_mm_dealloc_kioc( adapter, kioc ); + + } + else { + wake_up(&wait_q); + } +} + + +/** + * lld_timedout - callback from the expired timer + * @t : timer that timed out + */ +static void +lld_timedout(struct timer_list *t) +{ + struct uioc_timeout *timeout = from_timer(timeout, t, timer); + uioc_t *kioc = timeout->uioc; + + kioc->status = -ETIME; + kioc->timedout = 1; + + con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n")); + + wake_up(&wait_q); +} + + +/** + * kioc_to_mimd - Converter from new back to old format + * @kioc : Kernel space IOCTL packet (successfully issued) + * @mimd : User space MIMD packet + */ +static int +kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) +{ + mimd_t kmimd; + uint8_t opcode; + uint8_t subopcode; + + mbox64_t *mbox64; + mraid_passthru_t __user *upthru32; + mraid_passthru_t *kpthru32; + mcontroller_t cinfo; + mraid_hba_info_t *hinfo; + + + if (copy_from_user(&kmimd, mimd, sizeof(mimd_t))) + return (-EFAULT); + + opcode = kmimd.ui.fcs.opcode; + subopcode = kmimd.ui.fcs.subopcode; + + if (opcode == 0x82) { + switch (subopcode) { + + case MEGAIOC_QADAPINFO: + + hinfo = (mraid_hba_info_t *)(unsigned long) + kioc->buf_vaddr; + + hinfo_to_cinfo(hinfo, &cinfo); + + if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo))) + return (-EFAULT); + + return 0; + + default: + return (-EINVAL); + } + + return 0; + } + + mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; + + if (kioc->user_pthru) { + + upthru32 = kioc->user_pthru; + kpthru32 = kioc->pthru32; + + if (copy_to_user(&upthru32->scsistatus, + &kpthru32->scsistatus, + sizeof(uint8_t))) { + return (-EFAULT); + } + } + + if (kioc->user_data) { + if (copy_to_user(kioc->user_data, kioc->buf_vaddr, + kioc->user_data_len)) { + return (-EFAULT); + } + } + + if (copy_to_user(&mimd->mbox[17], + &mbox64->mbox32.status, sizeof(uint8_t))) { + return (-EFAULT); + } + + return 0; +} + + +/** + * hinfo_to_cinfo - Convert new format hba info into old format + * @hinfo : New format, more comprehensive adapter info + * @cinfo : Old format adapter info to support mimd_t apps + */ +static void +hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) +{ + if (!hinfo || !cinfo) + return; + + cinfo->base = hinfo->baseport; + cinfo->irq = hinfo->irq; + cinfo->numldrv = hinfo->num_ldrv; + cinfo->pcibus = hinfo->pci_bus; + cinfo->pcidev = hinfo->pci_slot; + cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn); + cinfo->pciid = hinfo->pci_device_id; + cinfo->pcivendor = hinfo->pci_vendor_id; + cinfo->pcislot = hinfo->pci_slot; + cinfo->uid = hinfo->unique_id; +} + + +/** + * mraid_mm_register_adp - Registration routine for low level drivers + * @lld_adp : Adapter object + */ +int +mraid_mm_register_adp(mraid_mmadp_t *lld_adp) +{ + mraid_mmadp_t *adapter; + mbox64_t *mbox_list; + uioc_t *kioc; + uint32_t rval; + int i; + + + if (lld_adp->drvr_type != DRVRTYPE_MBOX) + return (-EINVAL); + + adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); + + if (!adapter) + return -ENOMEM; + + + adapter->unique_id = lld_adp->unique_id; + adapter->drvr_type = lld_adp->drvr_type; + adapter->drvr_data = lld_adp->drvr_data; + adapter->pdev = lld_adp->pdev; + adapter->issue_uioc = lld_adp->issue_uioc; + adapter->timeout = lld_adp->timeout; + adapter->max_kioc = lld_adp->max_kioc; + adapter->quiescent = 1; + + /* + * Allocate single blocks of memory for all required kiocs, + * mailboxes and passthru structures. + */ + adapter->kioc_list = kmalloc_array(lld_adp->max_kioc, + sizeof(uioc_t), + GFP_KERNEL); + adapter->mbox_list = kmalloc_array(lld_adp->max_kioc, + sizeof(mbox64_t), + GFP_KERNEL); + adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool", + &adapter->pdev->dev, + sizeof(mraid_passthru_t), + 16, 0); + + if (!adapter->kioc_list || !adapter->mbox_list || + !adapter->pthru_dma_pool) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: out of memory, %s %d\n", __func__, + __LINE__)); + + rval = (-ENOMEM); + + goto memalloc_error; + } + + /* + * Slice kioc_list and make a kioc_pool with the individiual kiocs + */ + INIT_LIST_HEAD(&adapter->kioc_pool); + spin_lock_init(&adapter->kioc_pool_lock); + sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc); + + mbox_list = (mbox64_t *)adapter->mbox_list; + + for (i = 0; i < lld_adp->max_kioc; i++) { + + kioc = adapter->kioc_list + i; + kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); + kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool, + GFP_KERNEL, &kioc->pthru32_h); + + if (!kioc->pthru32) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: out of memory, %s %d\n", + __func__, __LINE__)); + + rval = (-ENOMEM); + + goto pthru_dma_pool_error; + } + + list_add_tail(&kioc->list, &adapter->kioc_pool); + } + + // Setup the dma pools for data buffers + if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) { + goto dma_pool_error; + } + + list_add_tail(&adapter->list, &adapters_list_g); + + adapters_count_g++; + + return 0; + +dma_pool_error: + /* Do nothing */ + +pthru_dma_pool_error: + + for (i = 0; i < lld_adp->max_kioc; i++) { + kioc = adapter->kioc_list + i; + if (kioc->pthru32) { + dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32, + kioc->pthru32_h); + } + } + +memalloc_error: + + kfree(adapter->kioc_list); + kfree(adapter->mbox_list); + + dma_pool_destroy(adapter->pthru_dma_pool); + + kfree(adapter); + + return rval; +} + + +/** + * mraid_mm_adapter_app_handle - return the application handle for this adapter + * @unique_id : adapter unique identifier + * + * For the given driver data, locate the adapter in our global list and + * return the corresponding handle, which is also used by applications to + * uniquely identify an adapter. + * + * Return adapter handle if found in the list. + * Return 0 if adapter could not be located, should never happen though. + */ +uint32_t +mraid_mm_adapter_app_handle(uint32_t unique_id) +{ + mraid_mmadp_t *adapter; + mraid_mmadp_t *tmp; + int index = 0; + + list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { + + if (adapter->unique_id == unique_id) { + + return MKADAP(index); + } + + index++; + } + + return 0; +} + + +/** + * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter + * @adp : Adapter softstate + * + * We maintain a pool of dma buffers per each adapter. Each pool has one + * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers. + * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We + * dont' want to waste too much memory by allocating more buffers per each + * pool. + */ +static int +mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) +{ + mm_dmapool_t *pool; + int bufsize; + int i; + + /* + * Create MAX_DMA_POOLS number of pools + */ + bufsize = MRAID_MM_INIT_BUFF_SIZE; + + for (i = 0; i < MAX_DMA_POOLS; i++){ + + pool = &adp->dma_pool_list[i]; + + pool->buf_size = bufsize; + spin_lock_init(&pool->lock); + + pool->handle = dma_pool_create("megaraid mm data buffer", + &adp->pdev->dev, bufsize, + 16, 0); + + if (!pool->handle) { + goto dma_pool_setup_error; + } + + pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL, + &pool->paddr); + + if (!pool->vaddr) + goto dma_pool_setup_error; + + bufsize = bufsize * 2; + } + + return 0; + +dma_pool_setup_error: + + mraid_mm_teardown_dma_pools(adp); + return (-ENOMEM); +} + + +/** + * mraid_mm_unregister_adp - Unregister routine for low level drivers + * @unique_id : UID of the adpater + * + * Assumes no outstanding ioctls to llds. + */ +int +mraid_mm_unregister_adp(uint32_t unique_id) +{ + mraid_mmadp_t *adapter; + mraid_mmadp_t *tmp; + + list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { + + + if (adapter->unique_id == unique_id) { + + adapters_count_g--; + + list_del_init(&adapter->list); + + mraid_mm_free_adp_resources(adapter); + + kfree(adapter); + + con_log(CL_ANN, ( + "megaraid cmm: Unregistered one adapter:%#x\n", + unique_id)); + + return 0; + } + } + + return (-ENODEV); +} + +/** + * mraid_mm_free_adp_resources - Free adapter softstate + * @adp : Adapter softstate + */ +static void +mraid_mm_free_adp_resources(mraid_mmadp_t *adp) +{ + uioc_t *kioc; + int i; + + mraid_mm_teardown_dma_pools(adp); + + for (i = 0; i < adp->max_kioc; i++) { + + kioc = adp->kioc_list + i; + + dma_pool_free(adp->pthru_dma_pool, kioc->pthru32, + kioc->pthru32_h); + } + + kfree(adp->kioc_list); + kfree(adp->mbox_list); + + dma_pool_destroy(adp->pthru_dma_pool); + + + return; +} + + +/** + * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers + * @adp : Adapter softstate + */ +static void +mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) +{ + int i; + mm_dmapool_t *pool; + + for (i = 0; i < MAX_DMA_POOLS; i++) { + + pool = &adp->dma_pool_list[i]; + + if (pool->handle) { + + if (pool->vaddr) + dma_pool_free(pool->handle, pool->vaddr, + pool->paddr); + + dma_pool_destroy(pool->handle); + pool->handle = NULL; + } + } + + return; +} + +/** + * mraid_mm_init - Module entry point + */ +static int __init +mraid_mm_init(void) +{ + int err; + + // Announce the driver version + con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", + LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); + + err = misc_register(&megaraid_mm_dev); + if (err < 0) { + con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); + return err; + } + + init_waitqueue_head(&wait_q); + + INIT_LIST_HEAD(&adapters_list_g); + + return 0; +} + + +/** + * mraid_mm_exit - Module exit point + */ +static void __exit +mraid_mm_exit(void) +{ + con_log(CL_DLEVEL1 , ("exiting common mod\n")); + + misc_deregister(&megaraid_mm_dev); +} + +module_init(mraid_mm_init); +module_exit(mraid_mm_exit); + +/* vi: set ts=8 sw=8 tw=78: */ diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h new file mode 100644 index 000000000..bf4011590 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mm.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * FILE : megaraid_mm.h + */ + +#ifndef MEGARAID_MM_H +#define MEGARAID_MM_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mbox_defs.h" +#include "megaraid_ioctl.h" + + +#define LSI_COMMON_MOD_VERSION "2.20.2.7" +#define LSI_COMMON_MOD_EXT_VERSION \ + "(Release Date: Sun Jul 16 00:01:03 EST 2006)" + + +#define LSI_DBGLVL dbglevel + +// The smallest dma pool +#define MRAID_MM_INIT_BUFF_SIZE 4096 + +/** + * mimd_t : Old style ioctl packet structure (deprecated) + * + * @inlen : + * @outlen : + * @fca : + * @opcode : + * @subopcode : + * @adapno : + * @buffer : + * @pad : + * @length : + * @mbox : + * @pthru : + * @data : + * @pad : + * + * Note : This structure is DEPRECATED. New applications must use + * : uioc_t structure instead. All new hba drivers use the new + * : format. If we get this mimd packet, we will convert it into + * : new uioc_t format and send it to the hba drivers. + */ + +typedef struct mimd { + + uint32_t inlen; + uint32_t outlen; + + union { + uint8_t fca[16]; + struct { + uint8_t opcode; + uint8_t subopcode; + uint16_t adapno; +#if BITS_PER_LONG == 32 + uint8_t __user *buffer; + uint8_t pad[4]; +#endif +#if BITS_PER_LONG == 64 + uint8_t __user *buffer; +#endif + uint32_t length; + } __attribute__ ((packed)) fcs; + } __attribute__ ((packed)) ui; + + uint8_t mbox[18]; /* 16 bytes + 2 status bytes */ + mraid_passthru_t pthru; + +#if BITS_PER_LONG == 32 + char __user *data; /* buffer <= 4096 for 0x80 commands */ + char pad[4]; +#endif +#if BITS_PER_LONG == 64 + char __user *data; +#endif + +} __attribute__ ((packed))mimd_t; + +#endif // MEGARAID_MM_H + +// vi: set ts=8 sw=8 tw=78: diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h new file mode 100644 index 000000000..94abba575 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -0,0 +1,2764 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2003-2013 LSI Corporation + * Copyright (c) 2013-2016 Avago Technologies + * Copyright (c) 2016-2018 Broadcom Inc. + * + * FILE: megaraid_sas.h + * + * Authors: Broadcom Inc. + * Kashyap Desai + * Sumit Saxena + * + * Send feedback to: megaraidlinux.pdl@broadcom.com + */ + +#ifndef LSI_MEGARAID_SAS_H +#define LSI_MEGARAID_SAS_H + +#include + +/* + * MegaRAID SAS Driver meta data + */ +#define MEGASAS_VERSION "07.725.01.00-rc1" +#define MEGASAS_RELDATE "Mar 2, 2023" + +#define MEGASAS_MSIX_NAME_LEN 32 + +/* + * Device IDs + */ +#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060 +#define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C +#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 +#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078 +#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 +#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 +#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 +#define PCI_DEVICE_ID_LSI_FUSION 0x005b +#define PCI_DEVICE_ID_LSI_PLASMA 0x002f +#define PCI_DEVICE_ID_LSI_INVADER 0x005d +#define PCI_DEVICE_ID_LSI_FURY 0x005f +#define PCI_DEVICE_ID_LSI_INTRUDER 0x00ce +#define PCI_DEVICE_ID_LSI_INTRUDER_24 0x00cf +#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052 +#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053 +#define PCI_DEVICE_ID_LSI_VENTURA 0x0014 +#define PCI_DEVICE_ID_LSI_CRUSADER 0x0015 +#define PCI_DEVICE_ID_LSI_HARPOON 0x0016 +#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017 +#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B +#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C +#define PCI_DEVICE_ID_LSI_AERO_10E1 0x10e1 +#define PCI_DEVICE_ID_LSI_AERO_10E2 0x10e2 +#define PCI_DEVICE_ID_LSI_AERO_10E5 0x10e5 +#define PCI_DEVICE_ID_LSI_AERO_10E6 0x10e6 +#define PCI_DEVICE_ID_LSI_AERO_10E0 0x10e0 +#define PCI_DEVICE_ID_LSI_AERO_10E3 0x10e3 +#define PCI_DEVICE_ID_LSI_AERO_10E4 0x10e4 +#define PCI_DEVICE_ID_LSI_AERO_10E7 0x10e7 + +/* + * Intel HBA SSDIDs + */ +#define MEGARAID_INTEL_RS3DC080_SSDID 0x9360 +#define MEGARAID_INTEL_RS3DC040_SSDID 0x9362 +#define MEGARAID_INTEL_RS3SC008_SSDID 0x9380 +#define MEGARAID_INTEL_RS3MC044_SSDID 0x9381 +#define MEGARAID_INTEL_RS3WC080_SSDID 0x9341 +#define MEGARAID_INTEL_RS3WC040_SSDID 0x9343 +#define MEGARAID_INTEL_RMS3BC160_SSDID 0x352B + +/* + * Intruder HBA SSDIDs + */ +#define MEGARAID_INTRUDER_SSDID1 0x9371 +#define MEGARAID_INTRUDER_SSDID2 0x9390 +#define MEGARAID_INTRUDER_SSDID3 0x9370 + +/* + * Intel HBA branding + */ +#define MEGARAID_INTEL_RS3DC080_BRANDING \ + "Intel(R) RAID Controller RS3DC080" +#define MEGARAID_INTEL_RS3DC040_BRANDING \ + "Intel(R) RAID Controller RS3DC040" +#define MEGARAID_INTEL_RS3SC008_BRANDING \ + "Intel(R) RAID Controller RS3SC008" +#define MEGARAID_INTEL_RS3MC044_BRANDING \ + "Intel(R) RAID Controller RS3MC044" +#define MEGARAID_INTEL_RS3WC080_BRANDING \ + "Intel(R) RAID Controller RS3WC080" +#define MEGARAID_INTEL_RS3WC040_BRANDING \ + "Intel(R) RAID Controller RS3WC040" +#define MEGARAID_INTEL_RMS3BC160_BRANDING \ + "Intel(R) Integrated RAID Module RMS3BC160" + +/* + * ===================================== + * MegaRAID SAS MFI firmware definitions + * ===================================== + */ + +/* + * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for + * protocol between the software and firmware. Commands are issued using + * "message frames" + */ + +/* + * FW posts its state in upper 4 bits of outbound_msg_0 register + */ +#define MFI_STATE_MASK 0xF0000000 +#define MFI_STATE_UNDEFINED 0x00000000 +#define MFI_STATE_BB_INIT 0x10000000 +#define MFI_STATE_FW_INIT 0x40000000 +#define MFI_STATE_WAIT_HANDSHAKE 0x60000000 +#define MFI_STATE_FW_INIT_2 0x70000000 +#define MFI_STATE_DEVICE_SCAN 0x80000000 +#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000 +#define MFI_STATE_FLUSH_CACHE 0xA0000000 +#define MFI_STATE_READY 0xB0000000 +#define MFI_STATE_OPERATIONAL 0xC0000000 +#define MFI_STATE_FAULT 0xF0000000 +#define MFI_STATE_FORCE_OCR 0x00000080 +#define MFI_STATE_DMADONE 0x00000008 +#define MFI_STATE_CRASH_DUMP_DONE 0x00000004 +#define MFI_RESET_REQUIRED 0x00000001 +#define MFI_RESET_ADAPTER 0x00000002 +#define MEGAMFI_FRAME_SIZE 64 + +#define MFI_STATE_FAULT_CODE 0x0FFF0000 +#define MFI_STATE_FAULT_SUBCODE 0x0000FF00 +/* + * During FW init, clear pending cmds & reset state using inbound_msg_0 + * + * ABORT : Abort all pending cmds + * READY : Move from OPERATIONAL to READY state; discard queue info + * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??) + * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver + * HOTPLUG : Resume from Hotplug + * MFI_STOP_ADP : Send signal to FW to stop processing + * MFI_ADP_TRIGGER_SNAP_DUMP: Inform firmware to initiate snap dump + */ +#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */ +#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */ +#define DIAG_WRITE_ENABLE (0x00000080) +#define DIAG_RESET_ADAPTER (0x00000004) + +#define MFI_ADP_RESET 0x00000040 +#define MFI_INIT_ABORT 0x00000001 +#define MFI_INIT_READY 0x00000002 +#define MFI_INIT_MFIMODE 0x00000004 +#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008 +#define MFI_INIT_HOTPLUG 0x00000010 +#define MFI_STOP_ADP 0x00000020 +#define MFI_RESET_FLAGS MFI_INIT_READY| \ + MFI_INIT_MFIMODE| \ + MFI_INIT_ABORT +#define MFI_ADP_TRIGGER_SNAP_DUMP 0x00000100 +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + +/* + * MFI frame flags + */ +#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 +#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001 +#define MFI_FRAME_SGL32 0x0000 +#define MFI_FRAME_SGL64 0x0002 +#define MFI_FRAME_SENSE32 0x0000 +#define MFI_FRAME_SENSE64 0x0004 +#define MFI_FRAME_DIR_NONE 0x0000 +#define MFI_FRAME_DIR_WRITE 0x0008 +#define MFI_FRAME_DIR_READ 0x0010 +#define MFI_FRAME_DIR_BOTH 0x0018 +#define MFI_FRAME_IEEE 0x0020 + +/* Driver internal */ +#define DRV_DCMD_POLLED_MODE 0x1 +#define DRV_DCMD_SKIP_REFIRE 0x2 + +/* + * Definition for cmd_status + */ +#define MFI_CMD_STATUS_POLL_MODE 0xFF + +/* + * MFI command opcodes + */ +enum MFI_CMD_OP { + MFI_CMD_INIT = 0x0, + MFI_CMD_LD_READ = 0x1, + MFI_CMD_LD_WRITE = 0x2, + MFI_CMD_LD_SCSI_IO = 0x3, + MFI_CMD_PD_SCSI_IO = 0x4, + MFI_CMD_DCMD = 0x5, + MFI_CMD_ABORT = 0x6, + MFI_CMD_SMP = 0x7, + MFI_CMD_STP = 0x8, + MFI_CMD_NVME = 0x9, + MFI_CMD_TOOLBOX = 0xa, + MFI_CMD_OP_COUNT, + MFI_CMD_INVALID = 0xff +}; + +#define MR_DCMD_CTRL_GET_INFO 0x01010000 +#define MR_DCMD_LD_GET_LIST 0x03010000 +#define MR_DCMD_LD_LIST_QUERY 0x03010100 + +#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 +#define MR_FLUSH_CTRL_CACHE 0x01 +#define MR_FLUSH_DISK_CACHE 0x02 + +#define MR_DCMD_CTRL_SHUTDOWN 0x01050000 +#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000 +#define MR_ENABLE_DRIVE_SPINDOWN 0x01 + +#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100 +#define MR_DCMD_CTRL_EVENT_GET 0x01040300 +#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500 +#define MR_DCMD_LD_GET_PROPERTIES 0x03030000 + +#define MR_DCMD_CLUSTER 0x08000000 +#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 +#define MR_DCMD_CLUSTER_RESET_LD 0x08010200 +#define MR_DCMD_PD_LIST_QUERY 0x02010100 + +#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100 +#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600) +#define MR_DCMD_PD_GET_INFO 0x02020000 + +/* + * Global functions + */ +extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id); + + +/* + * MFI command completion codes + */ +enum MFI_STAT { + MFI_STAT_OK = 0x00, + MFI_STAT_INVALID_CMD = 0x01, + MFI_STAT_INVALID_DCMD = 0x02, + MFI_STAT_INVALID_PARAMETER = 0x03, + MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04, + MFI_STAT_ABORT_NOT_POSSIBLE = 0x05, + MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06, + MFI_STAT_APP_IN_USE = 0x07, + MFI_STAT_APP_NOT_INITIALIZED = 0x08, + MFI_STAT_ARRAY_INDEX_INVALID = 0x09, + MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a, + MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b, + MFI_STAT_DEVICE_NOT_FOUND = 0x0c, + MFI_STAT_DRIVE_TOO_SMALL = 0x0d, + MFI_STAT_FLASH_ALLOC_FAIL = 0x0e, + MFI_STAT_FLASH_BUSY = 0x0f, + MFI_STAT_FLASH_ERROR = 0x10, + MFI_STAT_FLASH_IMAGE_BAD = 0x11, + MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12, + MFI_STAT_FLASH_NOT_OPEN = 0x13, + MFI_STAT_FLASH_NOT_STARTED = 0x14, + MFI_STAT_FLUSH_FAILED = 0x15, + MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16, + MFI_STAT_LD_CC_IN_PROGRESS = 0x17, + MFI_STAT_LD_INIT_IN_PROGRESS = 0x18, + MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19, + MFI_STAT_LD_MAX_CONFIGURED = 0x1a, + MFI_STAT_LD_NOT_OPTIMAL = 0x1b, + MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c, + MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d, + MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e, + MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f, + MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, + MFI_STAT_MFC_HW_ERROR = 0x21, + MFI_STAT_NO_HW_PRESENT = 0x22, + MFI_STAT_NOT_FOUND = 0x23, + MFI_STAT_NOT_IN_ENCL = 0x24, + MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25, + MFI_STAT_PD_TYPE_WRONG = 0x26, + MFI_STAT_PR_DISABLED = 0x27, + MFI_STAT_ROW_INDEX_INVALID = 0x28, + MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29, + MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a, + MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b, + MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c, + MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d, + MFI_STAT_SCSI_IO_FAILED = 0x2e, + MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f, + MFI_STAT_SHUTDOWN_FAILED = 0x30, + MFI_STAT_TIME_NOT_SET = 0x31, + MFI_STAT_WRONG_STATE = 0x32, + MFI_STAT_LD_OFFLINE = 0x33, + MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34, + MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35, + MFI_STAT_RESERVATION_IN_PROGRESS = 0x36, + MFI_STAT_I2C_ERRORS_DETECTED = 0x37, + MFI_STAT_PCI_ERRORS_DETECTED = 0x38, + MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67, + + MFI_STAT_INVALID_STATUS = 0xFF +}; + +enum mfi_evt_class { + MFI_EVT_CLASS_DEBUG = -2, + MFI_EVT_CLASS_PROGRESS = -1, + MFI_EVT_CLASS_INFO = 0, + MFI_EVT_CLASS_WARNING = 1, + MFI_EVT_CLASS_CRITICAL = 2, + MFI_EVT_CLASS_FATAL = 3, + MFI_EVT_CLASS_DEAD = 4 +}; + +/* + * Crash dump related defines + */ +#define MAX_CRASH_DUMP_SIZE 512 +#define CRASH_DMA_BUF_SIZE (1024 * 1024) + +enum MR_FW_CRASH_DUMP_STATE { + UNAVAILABLE = 0, + AVAILABLE = 1, + COPYING = 2, + COPIED = 3, + COPY_ERROR = 4, +}; + +enum _MR_CRASH_BUF_STATUS { + MR_CRASH_BUF_TURN_OFF = 0, + MR_CRASH_BUF_TURN_ON = 1, +}; + +/* + * Number of mailbox bytes in DCMD message frame + */ +#define MFI_MBOX_SIZE 12 + +enum MR_EVT_CLASS { + + MR_EVT_CLASS_DEBUG = -2, + MR_EVT_CLASS_PROGRESS = -1, + MR_EVT_CLASS_INFO = 0, + MR_EVT_CLASS_WARNING = 1, + MR_EVT_CLASS_CRITICAL = 2, + MR_EVT_CLASS_FATAL = 3, + MR_EVT_CLASS_DEAD = 4, + +}; + +enum MR_EVT_LOCALE { + + MR_EVT_LOCALE_LD = 0x0001, + MR_EVT_LOCALE_PD = 0x0002, + MR_EVT_LOCALE_ENCL = 0x0004, + MR_EVT_LOCALE_BBU = 0x0008, + MR_EVT_LOCALE_SAS = 0x0010, + MR_EVT_LOCALE_CTRL = 0x0020, + MR_EVT_LOCALE_CONFIG = 0x0040, + MR_EVT_LOCALE_CLUSTER = 0x0080, + MR_EVT_LOCALE_ALL = 0xffff, + +}; + +enum MR_EVT_ARGS { + + MR_EVT_ARGS_NONE, + MR_EVT_ARGS_CDB_SENSE, + MR_EVT_ARGS_LD, + MR_EVT_ARGS_LD_COUNT, + MR_EVT_ARGS_LD_LBA, + MR_EVT_ARGS_LD_OWNER, + MR_EVT_ARGS_LD_LBA_PD_LBA, + MR_EVT_ARGS_LD_PROG, + MR_EVT_ARGS_LD_STATE, + MR_EVT_ARGS_LD_STRIP, + MR_EVT_ARGS_PD, + MR_EVT_ARGS_PD_ERR, + MR_EVT_ARGS_PD_LBA, + MR_EVT_ARGS_PD_LBA_LD, + MR_EVT_ARGS_PD_PROG, + MR_EVT_ARGS_PD_STATE, + MR_EVT_ARGS_PCI, + MR_EVT_ARGS_RATE, + MR_EVT_ARGS_STR, + MR_EVT_ARGS_TIME, + MR_EVT_ARGS_ECC, + MR_EVT_ARGS_LD_PROP, + MR_EVT_ARGS_PD_SPARE, + MR_EVT_ARGS_PD_INDEX, + MR_EVT_ARGS_DIAG_PASS, + MR_EVT_ARGS_DIAG_FAIL, + MR_EVT_ARGS_PD_LBA_LBA, + MR_EVT_ARGS_PORT_PHY, + MR_EVT_ARGS_PD_MISSING, + MR_EVT_ARGS_PD_ADDRESS, + MR_EVT_ARGS_BITMAP, + MR_EVT_ARGS_CONNECTOR, + MR_EVT_ARGS_PD_PD, + MR_EVT_ARGS_PD_FRU, + MR_EVT_ARGS_PD_PATHINFO, + MR_EVT_ARGS_PD_POWER_STATE, + MR_EVT_ARGS_GENERIC, +}; + + +#define SGE_BUFFER_SIZE 4096 +#define MEGASAS_CLUSTER_ID_SIZE 16 +/* + * define constants for device list query options + */ +enum MR_PD_QUERY_TYPE { + MR_PD_QUERY_TYPE_ALL = 0, + MR_PD_QUERY_TYPE_STATE = 1, + MR_PD_QUERY_TYPE_POWER_STATE = 2, + MR_PD_QUERY_TYPE_MEDIA_TYPE = 3, + MR_PD_QUERY_TYPE_SPEED = 4, + MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, +}; + +enum MR_LD_QUERY_TYPE { + MR_LD_QUERY_TYPE_ALL = 0, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1, + MR_LD_QUERY_TYPE_USED_TGT_IDS = 2, + MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3, + MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4, +}; + + +#define MR_EVT_CFG_CLEARED 0x0004 +#define MR_EVT_LD_STATE_CHANGE 0x0051 +#define MR_EVT_PD_INSERTED 0x005b +#define MR_EVT_PD_REMOVED 0x0070 +#define MR_EVT_LD_CREATED 0x008a +#define MR_EVT_LD_DELETED 0x008b +#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db +#define MR_EVT_LD_OFFLINE 0x00fc +#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 +#define MR_EVT_CTRL_PROP_CHANGED 0x012f + +enum MR_PD_STATE { + MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, + MR_PD_STATE_UNCONFIGURED_BAD = 0x01, + MR_PD_STATE_HOT_SPARE = 0x02, + MR_PD_STATE_OFFLINE = 0x10, + MR_PD_STATE_FAILED = 0x11, + MR_PD_STATE_REBUILD = 0x14, + MR_PD_STATE_ONLINE = 0x18, + MR_PD_STATE_COPYBACK = 0x20, + MR_PD_STATE_SYSTEM = 0x40, + }; + +union MR_PD_REF { + struct { + u16 deviceId; + u16 seqNum; + } mrPdRef; + u32 ref; +}; + +/* + * define the DDF Type bit structure + */ +union MR_PD_DDF_TYPE { + struct { + union { + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u16 forcedPDGUID:1; + u16 inVD:1; + u16 isGlobalSpare:1; + u16 isSpare:1; + u16 isForeign:1; + u16 reserved:7; + u16 intf:4; +#else + u16 intf:4; + u16 reserved:7; + u16 isForeign:1; + u16 isSpare:1; + u16 isGlobalSpare:1; + u16 inVD:1; + u16 forcedPDGUID:1; +#endif + } pdType; + u16 type; + }; + u16 reserved; + } ddf; + struct { + u32 reserved; + } nonDisk; + u32 type; +} __packed; + +/* + * defines the progress structure + */ +union MR_PROGRESS { + struct { + u16 progress; + union { + u16 elapsedSecs; + u16 elapsedSecsForLastPercent; + }; + } mrProgress; + u32 w; +} __packed; + +/* + * defines the physical drive progress structure + */ +struct MR_PD_PROGRESS { + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u32 rbld:1; + u32 patrol:1; + u32 clear:1; + u32 copyBack:1; + u32 erase:1; + u32 locate:1; + u32 reserved:26; +#else + u32 reserved:26; + u32 locate:1; + u32 erase:1; + u32 copyBack:1; + u32 clear:1; + u32 patrol:1; + u32 rbld:1; +#endif + } active; + union MR_PROGRESS rbld; + union MR_PROGRESS patrol; + union { + union MR_PROGRESS clear; + union MR_PROGRESS erase; + }; + + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u32 rbld:1; + u32 patrol:1; + u32 clear:1; + u32 copyBack:1; + u32 erase:1; + u32 reserved:27; +#else + u32 reserved:27; + u32 erase:1; + u32 copyBack:1; + u32 clear:1; + u32 patrol:1; + u32 rbld:1; +#endif + } pause; + + union MR_PROGRESS reserved[3]; +} __packed; + +struct MR_PD_INFO { + union MR_PD_REF ref; + u8 inquiryData[96]; + u8 vpdPage83[64]; + u8 notSupported; + u8 scsiDevType; + + union { + u8 connectedPortBitmap; + u8 connectedPortNumbers; + }; + + u8 deviceSpeed; + u32 mediaErrCount; + u32 otherErrCount; + u32 predFailCount; + u32 lastPredFailEventSeqNum; + + u16 fwState; + u8 disabledForRemoval; + u8 linkSpeed; + union MR_PD_DDF_TYPE state; + + struct { + u8 count; +#ifndef __BIG_ENDIAN_BITFIELD + u8 isPathBroken:4; + u8 reserved3:3; + u8 widePortCapable:1; +#else + u8 widePortCapable:1; + u8 reserved3:3; + u8 isPathBroken:4; +#endif + + u8 connectorIndex[2]; + u8 reserved[4]; + u64 sasAddr[2]; + u8 reserved2[16]; + } pathInfo; + + u64 rawSize; + u64 nonCoercedSize; + u64 coercedSize; + u16 enclDeviceId; + u8 enclIndex; + + union { + u8 slotNumber; + u8 enclConnectorIndex; + }; + + struct MR_PD_PROGRESS progInfo; + u8 badBlockTableFull; + u8 unusableInCurrentConfig; + u8 vpdPage83Ext[64]; + u8 powerState; + u8 enclPosition; + u32 allowedOps; + u16 copyBackPartnerId; + u16 enclPartnerDeviceId; + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u16 fdeCapable:1; + u16 fdeEnabled:1; + u16 secured:1; + u16 locked:1; + u16 foreign:1; + u16 needsEKM:1; + u16 reserved:10; +#else + u16 reserved:10; + u16 needsEKM:1; + u16 foreign:1; + u16 locked:1; + u16 secured:1; + u16 fdeEnabled:1; + u16 fdeCapable:1; +#endif + } security; + u8 mediaType; + u8 notCertified; + u8 bridgeVendor[8]; + u8 bridgeProductIdentification[16]; + u8 bridgeProductRevisionLevel[4]; + u8 satBridgeExists; + + u8 interfaceType; + u8 temperature; + u8 emulatedBlockSize; + u16 userDataBlockSize; + u16 reserved2; + + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u32 piType:3; + u32 piFormatted:1; + u32 piEligible:1; + u32 NCQ:1; + u32 WCE:1; + u32 commissionedSpare:1; + u32 emergencySpare:1; + u32 ineligibleForSSCD:1; + u32 ineligibleForLd:1; + u32 useSSEraseType:1; + u32 wceUnchanged:1; + u32 supportScsiUnmap:1; + u32 reserved:18; +#else + u32 reserved:18; + u32 supportScsiUnmap:1; + u32 wceUnchanged:1; + u32 useSSEraseType:1; + u32 ineligibleForLd:1; + u32 ineligibleForSSCD:1; + u32 emergencySpare:1; + u32 commissionedSpare:1; + u32 WCE:1; + u32 NCQ:1; + u32 piEligible:1; + u32 piFormatted:1; + u32 piType:3; +#endif + } properties; + + u64 shieldDiagCompletionTime; + u8 shieldCounter; + + u8 linkSpeedOther; + u8 reserved4[2]; + + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u32 bbmErrCountSupported:1; + u32 bbmErrCount:31; +#else + u32 bbmErrCount:31; + u32 bbmErrCountSupported:1; +#endif + } bbmErr; + + u8 reserved1[512-428]; +} __packed; + +/* + * Definition of structure used to expose attributes of VD or JBOD + * (this structure is to be filled by firmware when MR_DCMD_DRV_GET_TARGET_PROP + * is fired by driver) + */ +struct MR_TARGET_PROPERTIES { + u32 max_io_size_kb; + u32 device_qdepth; + u32 sector_size; + u8 reset_tmo; + u8 reserved[499]; +} __packed; + + /* + * defines the physical drive address structure + */ +struct MR_PD_ADDRESS { + __le16 deviceId; + u16 enclDeviceId; + + union { + struct { + u8 enclIndex; + u8 slotNumber; + } mrPdAddress; + struct { + u8 enclPosition; + u8 enclConnectorIndex; + } mrEnclAddress; + }; + u8 scsiDevType; + union { + u8 connectedPortBitmap; + u8 connectedPortNumbers; + }; + u64 sasAddr[2]; +} __packed; + +/* + * defines the physical drive list structure + */ +struct MR_PD_LIST { + __le32 size; + __le32 count; + struct MR_PD_ADDRESS addr[1]; +} __packed; + +struct megasas_pd_list { + u16 tid; + u8 driveType; + u8 driveState; +} __packed; + + /* + * defines the logical drive reference structure + */ +union MR_LD_REF { + struct { + u8 targetId; + u8 reserved; + __le16 seqNum; + }; + __le32 ref; +} __packed; + +/* + * defines the logical drive list structure + */ +struct MR_LD_LIST { + __le32 ldCount; + __le32 reserved; + struct { + union MR_LD_REF ref; + u8 state; + u8 reserved[3]; + __le64 size; + } ldList[MAX_LOGICAL_DRIVES_EXT]; +} __packed; + +struct MR_LD_TARGETID_LIST { + __le32 size; + __le32 count; + u8 pad[3]; + u8 targetId[MAX_LOGICAL_DRIVES_EXT]; +}; + +struct MR_HOST_DEVICE_LIST_ENTRY { + struct { + union { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:7; + u8 is_sys_pd:1; +#else + u8 is_sys_pd:1; + u8 reserved:7; +#endif + } bits; + u8 byte; + } u; + } flags; + u8 scsi_type; + __le16 target_id; + u8 reserved[4]; + __le64 sas_addr[2]; +} __packed; + +struct MR_HOST_DEVICE_LIST { + __le32 size; + __le32 count; + __le32 reserved[2]; + struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1]; +} __packed; + +#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \ + (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \ + (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1))) + + +/* + * SAS controller properties + */ +struct megasas_ctrl_prop { + + u16 seq_num; + u16 pred_fail_poll_interval; + u16 intr_throttle_count; + u16 intr_throttle_timeouts; + u8 rebuild_rate; + u8 patrol_read_rate; + u8 bgi_rate; + u8 cc_rate; + u8 recon_rate; + u8 cache_flush_interval; + u8 spinup_drv_count; + u8 spinup_delay; + u8 cluster_enable; + u8 coercion_mode; + u8 alarm_enable; + u8 disable_auto_rebuild; + u8 disable_battery_warn; + u8 ecc_bucket_size; + u16 ecc_bucket_leak_rate; + u8 restore_hotspare_on_insertion; + u8 expose_encl_devices; + u8 maintainPdFailHistory; + u8 disallowHostRequestReordering; + u8 abortCCOnError; + u8 loadBalanceMode; + u8 disableAutoDetectBackplane; + + u8 snapVDSpace; + + /* + * Add properties that can be controlled by + * a bit in the following structure. + */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:18; + u32 enableJBOD:1; + u32 disableSpinDownHS:1; + u32 allowBootWithPinnedCache:1; + u32 disableOnlineCtrlReset:1; + u32 enableSecretKeyControl:1; + u32 autoEnhancedImport:1; + u32 enableSpinDownUnconfigured:1; + u32 SSDPatrolReadEnabled:1; + u32 SSDSMARTerEnabled:1; + u32 disableNCQ:1; + u32 useFdeOnly:1; + u32 prCorrectUnconfiguredAreas:1; + u32 SMARTerEnabled:1; + u32 copyBackDisabled:1; +#else + u32 copyBackDisabled:1; + u32 SMARTerEnabled:1; + u32 prCorrectUnconfiguredAreas:1; + u32 useFdeOnly:1; + u32 disableNCQ:1; + u32 SSDSMARTerEnabled:1; + u32 SSDPatrolReadEnabled:1; + u32 enableSpinDownUnconfigured:1; + u32 autoEnhancedImport:1; + u32 enableSecretKeyControl:1; + u32 disableOnlineCtrlReset:1; + u32 allowBootWithPinnedCache:1; + u32 disableSpinDownHS:1; + u32 enableJBOD:1; + u32 reserved:18; +#endif + } OnOffProperties; + + union { + u8 autoSnapVDSpace; + u8 viewSpace; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u16 reserved3:9; + u16 enable_fw_dev_list:1; + u16 reserved2:1; + u16 enable_snap_dump:1; + u16 reserved1:4; +#else + u16 reserved1:4; + u16 enable_snap_dump:1; + u16 reserved2:1; + u16 enable_fw_dev_list:1; + u16 reserved3:9; +#endif + } on_off_properties2; + }; + __le16 spinDownTime; + u8 reserved[24]; +} __packed; + +/* + * SAS controller information + */ +struct megasas_ctrl_info { + + /* + * PCI device information + */ + struct { + + __le16 vendor_id; + __le16 device_id; + __le16 sub_vendor_id; + __le16 sub_device_id; + u8 reserved[24]; + + } __attribute__ ((packed)) pci; + + /* + * Host interface information + */ + struct { + + u8 PCIX:1; + u8 PCIE:1; + u8 iSCSI:1; + u8 SAS_3G:1; + u8 SRIOV:1; + u8 reserved_0:3; + u8 reserved_1[6]; + u8 port_count; + u64 port_addr[8]; + + } __attribute__ ((packed)) host_interface; + + /* + * Device (backend) interface information + */ + struct { + + u8 SPI:1; + u8 SAS_3G:1; + u8 SATA_1_5G:1; + u8 SATA_3G:1; + u8 reserved_0:4; + u8 reserved_1[6]; + u8 port_count; + u64 port_addr[8]; + + } __attribute__ ((packed)) device_interface; + + /* + * List of components residing in flash. All str are null terminated + */ + __le32 image_check_word; + __le32 image_component_count; + + struct { + + char name[8]; + char version[32]; + char build_date[16]; + char built_time[16]; + + } __attribute__ ((packed)) image_component[8]; + + /* + * List of flash components that have been flashed on the card, but + * are not in use, pending reset of the adapter. This list will be + * empty if a flash operation has not occurred. All stings are null + * terminated + */ + __le32 pending_image_component_count; + + struct { + + char name[8]; + char version[32]; + char build_date[16]; + char build_time[16]; + + } __attribute__ ((packed)) pending_image_component[8]; + + u8 max_arms; + u8 max_spans; + u8 max_arrays; + u8 max_lds; + + char product_name[80]; + char serial_no[32]; + + /* + * Other physical/controller/operation information. Indicates the + * presence of the hardware + */ + struct { + + u32 bbu:1; + u32 alarm:1; + u32 nvram:1; + u32 uart:1; + u32 reserved:28; + + } __attribute__ ((packed)) hw_present; + + __le32 current_fw_time; + + /* + * Maximum data transfer sizes + */ + __le16 max_concurrent_cmds; + __le16 max_sge_count; + __le32 max_request_size; + + /* + * Logical and physical device counts + */ + __le16 ld_present_count; + __le16 ld_degraded_count; + __le16 ld_offline_count; + + __le16 pd_present_count; + __le16 pd_disk_present_count; + __le16 pd_disk_pred_failure_count; + __le16 pd_disk_failed_count; + + /* + * Memory size information + */ + __le16 nvram_size; + __le16 memory_size; + __le16 flash_size; + + /* + * Error counters + */ + __le16 mem_correctable_error_count; + __le16 mem_uncorrectable_error_count; + + /* + * Cluster information + */ + u8 cluster_permitted; + u8 cluster_active; + + /* + * Additional max data transfer sizes + */ + __le16 max_strips_per_io; + + /* + * Controller capabilities structures + */ + struct { + + u32 raid_level_0:1; + u32 raid_level_1:1; + u32 raid_level_5:1; + u32 raid_level_1E:1; + u32 raid_level_6:1; + u32 reserved:27; + + } __attribute__ ((packed)) raid_levels; + + struct { + + u32 rbld_rate:1; + u32 cc_rate:1; + u32 bgi_rate:1; + u32 recon_rate:1; + u32 patrol_rate:1; + u32 alarm_control:1; + u32 cluster_supported:1; + u32 bbu:1; + u32 spanning_allowed:1; + u32 dedicated_hotspares:1; + u32 revertible_hotspares:1; + u32 foreign_config_import:1; + u32 self_diagnostic:1; + u32 mixed_redundancy_arr:1; + u32 global_hot_spares:1; + u32 reserved:17; + + } __attribute__ ((packed)) adapter_operations; + + struct { + + u32 read_policy:1; + u32 write_policy:1; + u32 io_policy:1; + u32 access_policy:1; + u32 disk_cache_policy:1; + u32 reserved:27; + + } __attribute__ ((packed)) ld_operations; + + struct { + + u8 min; + u8 max; + u8 reserved[2]; + + } __attribute__ ((packed)) stripe_sz_ops; + + struct { + + u32 force_online:1; + u32 force_offline:1; + u32 force_rebuild:1; + u32 reserved:29; + + } __attribute__ ((packed)) pd_operations; + + struct { + + u32 ctrl_supports_sas:1; + u32 ctrl_supports_sata:1; + u32 allow_mix_in_encl:1; + u32 allow_mix_in_ld:1; + u32 allow_sata_in_cluster:1; + u32 reserved:27; + + } __attribute__ ((packed)) pd_mix_support; + + /* + * Define ECC single-bit-error bucket information + */ + u8 ecc_bucket_count; + u8 reserved_2[11]; + + /* + * Include the controller properties (changeable items) + */ + struct megasas_ctrl_prop properties; + + /* + * Define FW pkg version (set in envt v'bles on OEM basis) + */ + char package_version[0x60]; + + + /* + * If adapterOperations.supportMoreThan8Phys is set, + * and deviceInterface.portCount is greater than 8, + * SAS Addrs for first 8 ports shall be populated in + * deviceInterface.portAddr, and the rest shall be + * populated in deviceInterfacePortAddr2. + */ + __le64 deviceInterfacePortAddr2[8]; /*6a0h */ + u8 reserved3[128]; /*6e0h */ + + struct { /*760h */ + u16 minPdRaidLevel_0:4; + u16 maxPdRaidLevel_0:12; + + u16 minPdRaidLevel_1:4; + u16 maxPdRaidLevel_1:12; + + u16 minPdRaidLevel_5:4; + u16 maxPdRaidLevel_5:12; + + u16 minPdRaidLevel_1E:4; + u16 maxPdRaidLevel_1E:12; + + u16 minPdRaidLevel_6:4; + u16 maxPdRaidLevel_6:12; + + u16 minPdRaidLevel_10:4; + u16 maxPdRaidLevel_10:12; + + u16 minPdRaidLevel_50:4; + u16 maxPdRaidLevel_50:12; + + u16 minPdRaidLevel_60:4; + u16 maxPdRaidLevel_60:12; + + u16 minPdRaidLevel_1E_RLQ0:4; + u16 maxPdRaidLevel_1E_RLQ0:12; + + u16 minPdRaidLevel_1E0_RLQ0:4; + u16 maxPdRaidLevel_1E0_RLQ0:12; + + u16 reserved[6]; + } pdsForRaidLevels; + + __le16 maxPds; /*780h */ + __le16 maxDedHSPs; /*782h */ + __le16 maxGlobalHSP; /*784h */ + __le16 ddfSize; /*786h */ + u8 maxLdsPerArray; /*788h */ + u8 partitionsInDDF; /*789h */ + u8 lockKeyBinding; /*78ah */ + u8 maxPITsPerLd; /*78bh */ + u8 maxViewsPerLd; /*78ch */ + u8 maxTargetId; /*78dh */ + __le16 maxBvlVdSize; /*78eh */ + + __le16 maxConfigurableSSCSize; /*790h */ + __le16 currentSSCsize; /*792h */ + + char expanderFwVersion[12]; /*794h */ + + __le16 PFKTrialTimeRemaining; /*7A0h */ + + __le16 cacheMemorySize; /*7A2h */ + + struct { /*7A4h */ +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:5; + u32 activePassive:2; + u32 supportConfigAutoBalance:1; + u32 mpio:1; + u32 supportDataLDonSSCArray:1; + u32 supportPointInTimeProgress:1; + u32 supportUnevenSpans:1; + u32 dedicatedHotSparesLimited:1; + u32 headlessMode:1; + u32 supportEmulatedDrives:1; + u32 supportResetNow:1; + u32 realTimeScheduler:1; + u32 supportSSDPatrolRead:1; + u32 supportPerfTuning:1; + u32 disableOnlinePFKChange:1; + u32 supportJBOD:1; + u32 supportBootTimePFKChange:1; + u32 supportSetLinkSpeed:1; + u32 supportEmergencySpares:1; + u32 supportSuspendResumeBGops:1; + u32 blockSSDWriteCacheChange:1; + u32 supportShieldState:1; + u32 supportLdBBMInfo:1; + u32 supportLdPIType3:1; + u32 supportLdPIType2:1; + u32 supportLdPIType1:1; + u32 supportPIcontroller:1; +#else + u32 supportPIcontroller:1; + u32 supportLdPIType1:1; + u32 supportLdPIType2:1; + u32 supportLdPIType3:1; + u32 supportLdBBMInfo:1; + u32 supportShieldState:1; + u32 blockSSDWriteCacheChange:1; + u32 supportSuspendResumeBGops:1; + u32 supportEmergencySpares:1; + u32 supportSetLinkSpeed:1; + u32 supportBootTimePFKChange:1; + u32 supportJBOD:1; + u32 disableOnlinePFKChange:1; + u32 supportPerfTuning:1; + u32 supportSSDPatrolRead:1; + u32 realTimeScheduler:1; + + u32 supportResetNow:1; + u32 supportEmulatedDrives:1; + u32 headlessMode:1; + u32 dedicatedHotSparesLimited:1; + + + u32 supportUnevenSpans:1; + u32 supportPointInTimeProgress:1; + u32 supportDataLDonSSCArray:1; + u32 mpio:1; + u32 supportConfigAutoBalance:1; + u32 activePassive:2; + u32 reserved:5; +#endif + } adapterOperations2; + + u8 driverVersion[32]; /*7A8h */ + u8 maxDAPdCountSpinup60; /*7C8h */ + u8 temperatureROC; /*7C9h */ + u8 temperatureCtrl; /*7CAh */ + u8 reserved4; /*7CBh */ + __le16 maxConfigurablePds; /*7CCh */ + + + u8 reserved5[2]; /*0x7CDh */ + + /* + * HA cluster information + */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:25; + u32 passive:1; + u32 premiumFeatureMismatch:1; + u32 ctrlPropIncompatible:1; + u32 fwVersionMismatch:1; + u32 hwIncompatible:1; + u32 peerIsIncompatible:1; + u32 peerIsPresent:1; +#else + u32 peerIsPresent:1; + u32 peerIsIncompatible:1; + u32 hwIncompatible:1; + u32 fwVersionMismatch:1; + u32 ctrlPropIncompatible:1; + u32 premiumFeatureMismatch:1; + u32 passive:1; + u32 reserved:25; +#endif + } cluster; + + char clusterId[MEGASAS_CLUSTER_ID_SIZE]; /*0x7D4 */ + struct { + u8 maxVFsSupported; /*0x7E4*/ + u8 numVFsEnabled; /*0x7E5*/ + u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/ + u8 reserved; /*0x7E7*/ + } iov; + + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:7; + u32 useSeqNumJbodFP:1; + u32 supportExtendedSSCSize:1; + u32 supportDiskCacheSettingForSysPDs:1; + u32 supportCPLDUpdate:1; + u32 supportTTYLogCompression:1; + u32 discardCacheDuringLDDelete:1; + u32 supportSecurityonJBOD:1; + u32 supportCacheBypassModes:1; + u32 supportDisableSESMonitoring:1; + u32 supportForceFlash:1; + u32 supportNVDRAM:1; + u32 supportDrvActivityLEDSetting:1; + u32 supportAllowedOpsforDrvRemoval:1; + u32 supportHOQRebuild:1; + u32 supportForceTo512e:1; + u32 supportNVCacheErase:1; + u32 supportDebugQueue:1; + u32 supportSwZone:1; + u32 supportCrashDump:1; + u32 supportMaxExtLDs:1; + u32 supportT10RebuildAssist:1; + u32 supportDisableImmediateIO:1; + u32 supportThermalPollInterval:1; + u32 supportPersonalityChange:2; +#else + u32 supportPersonalityChange:2; + u32 supportThermalPollInterval:1; + u32 supportDisableImmediateIO:1; + u32 supportT10RebuildAssist:1; + u32 supportMaxExtLDs:1; + u32 supportCrashDump:1; + u32 supportSwZone:1; + u32 supportDebugQueue:1; + u32 supportNVCacheErase:1; + u32 supportForceTo512e:1; + u32 supportHOQRebuild:1; + u32 supportAllowedOpsforDrvRemoval:1; + u32 supportDrvActivityLEDSetting:1; + u32 supportNVDRAM:1; + u32 supportForceFlash:1; + u32 supportDisableSESMonitoring:1; + u32 supportCacheBypassModes:1; + u32 supportSecurityonJBOD:1; + u32 discardCacheDuringLDDelete:1; + u32 supportTTYLogCompression:1; + u32 supportCPLDUpdate:1; + u32 supportDiskCacheSettingForSysPDs:1; + u32 supportExtendedSSCSize:1; + u32 useSeqNumJbodFP:1; + u32 reserved:7; +#endif + } adapterOperations3; + + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:7; + /* Indicates whether the CPLD image is part of + * the package and stored in flash + */ + u8 cpld_in_flash:1; +#else + u8 cpld_in_flash:1; + u8 reserved:7; +#endif + u8 reserved1[3]; + /* Null terminated string. Has the version + * information if cpld_in_flash = FALSE + */ + u8 userCodeDefinition[12]; + } cpld; /* Valid only if upgradableCPLD is TRUE */ + + struct { + #if defined(__BIG_ENDIAN_BITFIELD) + u16 reserved:2; + u16 support_nvme_passthru:1; + u16 support_pl_debug_info:1; + u16 support_flash_comp_info:1; + u16 support_host_info:1; + u16 support_dual_fw_update:1; + u16 support_ssc_rev3:1; + u16 fw_swaps_bbu_vpd_info:1; + u16 support_pd_map_target_id:1; + u16 support_ses_ctrl_in_multipathcfg:1; + u16 image_upload_supported:1; + u16 support_encrypted_mfc:1; + u16 supported_enc_algo:1; + u16 support_ibutton_less:1; + u16 ctrl_info_ext_supported:1; + #else + + u16 ctrl_info_ext_supported:1; + u16 support_ibutton_less:1; + u16 supported_enc_algo:1; + u16 support_encrypted_mfc:1; + u16 image_upload_supported:1; + /* FW supports LUN based association and target port based */ + u16 support_ses_ctrl_in_multipathcfg:1; + /* association for the SES device connected in multipath mode */ + /* FW defines Jbod target Id within MR_PD_CFG_SEQ */ + u16 support_pd_map_target_id:1; + /* FW swaps relevant fields in MR_BBU_VPD_INFO_FIXED to + * provide the data in little endian order + */ + u16 fw_swaps_bbu_vpd_info:1; + u16 support_ssc_rev3:1; + /* FW supports CacheCade 3.0, only one SSCD creation allowed */ + u16 support_dual_fw_update:1; + /* FW supports dual firmware update feature */ + u16 support_host_info:1; + /* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */ + u16 support_flash_comp_info:1; + /* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */ + u16 support_pl_debug_info:1; + /* FW supports retrieval of PL debug information through apps */ + u16 support_nvme_passthru:1; + /* FW supports NVMe passthru commands */ + u16 reserved:2; + #endif + } adapter_operations4; + u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */ + + u32 size; + u32 pad1; + + u8 reserved6[64]; + + struct { + #if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:19; + u32 support_pci_lane_margining: 1; + u32 support_psoc_update:1; + u32 support_force_personality_change:1; + u32 support_fde_type_mix:1; + u32 support_snap_dump:1; + u32 support_nvme_tm:1; + u32 support_oce_only:1; + u32 support_ext_mfg_vpd:1; + u32 support_pcie:1; + u32 support_cvhealth_info:1; + u32 support_profile_change:2; + u32 mr_config_ext2_supported:1; + #else + u32 mr_config_ext2_supported:1; + u32 support_profile_change:2; + u32 support_cvhealth_info:1; + u32 support_pcie:1; + u32 support_ext_mfg_vpd:1; + u32 support_oce_only:1; + u32 support_nvme_tm:1; + u32 support_snap_dump:1; + u32 support_fde_type_mix:1; + u32 support_force_personality_change:1; + u32 support_psoc_update:1; + u32 support_pci_lane_margining: 1; + u32 reserved:19; + #endif + } adapter_operations5; + + u32 rsvdForAdptOp[63]; + + u8 reserved7[3]; + + u8 TaskAbortTO; /* Timeout value in seconds used by Abort Task TM */ + u8 MaxResetTO; /* Max Supported Reset timeout in seconds. */ + u8 reserved8[3]; +} __packed; + +/* + * =============================== + * MegaRAID SAS driver definitions + * =============================== + */ +#define MEGASAS_MAX_PD_CHANNELS 2 +#define MEGASAS_MAX_LD_CHANNELS 2 +#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ + MEGASAS_MAX_LD_CHANNELS) +#define MEGASAS_MAX_DEV_PER_CHANNEL 128 +#define MEGASAS_DEFAULT_INIT_ID -1 +#define MEGASAS_MAX_LUN 8 +#define MEGASAS_DEFAULT_CMD_PER_LUN 256 +#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ + MEGASAS_MAX_DEV_PER_CHANNEL) +#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ + MEGASAS_MAX_DEV_PER_CHANNEL) + +#define MEGASAS_MAX_SUPPORTED_LD_IDS 240 + +#define MEGASAS_MAX_SECTORS (2*1024) +#define MEGASAS_MAX_SECTORS_IEEE (2*128) +#define MEGASAS_DBG_LVL 1 + +#define MEGASAS_FW_BUSY 1 + +/* Driver's internal Logging levels*/ +#define OCR_DEBUG (1 << 0) +#define TM_DEBUG (1 << 1) +#define LD_PD_DEBUG (1 << 2) + +#define SCAN_PD_CHANNEL 0x1 +#define SCAN_VD_CHANNEL 0x2 + +#define MEGASAS_KDUMP_QUEUE_DEPTH 100 +#define MR_LARGE_IO_MIN_SIZE (32 * 1024) +#define MR_R1_LDIO_PIGGYBACK_DEFAULT 4 + +enum MR_SCSI_CMD_TYPE { + READ_WRITE_LDIO = 0, + NON_READ_WRITE_LDIO = 1, + READ_WRITE_SYSPDIO = 2, + NON_READ_WRITE_SYSPDIO = 3, +}; + +enum DCMD_TIMEOUT_ACTION { + INITIATE_OCR = 0, + KILL_ADAPTER = 1, + IGNORE_TIMEOUT = 2, +}; + +enum FW_BOOT_CONTEXT { + PROBE_CONTEXT = 0, + OCR_CONTEXT = 1, +}; + +/* Frame Type */ +#define IO_FRAME 0 +#define PTHRU_FRAME 1 + +/* + * When SCSI mid-layer calls driver's reset routine, driver waits for + * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note + * that the driver cannot _actually_ abort or reset pending commands. While + * it is waiting for the commands to complete, it prints a diagnostic message + * every MEGASAS_RESET_NOTICE_INTERVAL seconds + */ +#define MEGASAS_RESET_WAIT_TIME 180 +#define MEGASAS_INTERNAL_CMD_WAIT_TIME 180 +#define MEGASAS_RESET_NOTICE_INTERVAL 5 +#define MEGASAS_IOCTL_CMD 0 +#define MEGASAS_DEFAULT_CMD_TIMEOUT 90 +#define MEGASAS_THROTTLE_QUEUE_DEPTH 16 +#define MEGASAS_DEFAULT_TM_TIMEOUT 50 +/* + * FW reports the maximum of number of commands that it can accept (maximum + * commands that can be outstanding) at any time. The driver must report a + * lower number to the mid layer because it can issue a few internal commands + * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs + * is shown below + */ +#define MEGASAS_INT_CMDS 32 +#define MEGASAS_SKINNY_INT_CMDS 5 +#define MEGASAS_FUSION_INTERNAL_CMDS 8 +#define MEGASAS_FUSION_IOCTL_CMDS 3 +#define MEGASAS_MFI_IOCTL_CMDS 27 + +#define MEGASAS_MAX_MSIX_QUEUES 128 +/* + * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit + * SGLs based on the size of dma_addr_t + */ +#define IS_DMA64 (sizeof(dma_addr_t) == 8) + +#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001 + +#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001 +#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002 +#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004 + +#define MFI_OB_INTR_STATUS_MASK 0x00000002 +#define MFI_POLL_TIMEOUT_SECS 60 +#define MFI_IO_TIMEOUT_SECS 180 +#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ) +#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30) +#define MEGASAS_SRIOV_MAX_RESET_TRIES_VF 1 +#define MEGASAS_ROUTINE_WAIT_TIME_VF 300 +#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 +#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 +#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) +#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 +#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) + +#define MFI_1068_PCSR_OFFSET 0x84 +#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64 +#define MFI_1068_FW_READY 0xDDDD0000 + +#define MR_MAX_REPLY_QUEUES_OFFSET 0X0000001F +#define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000 +#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 +#define MR_MAX_MSIX_REG_ARRAY 16 +#define MR_RDPQ_MODE_OFFSET 0X00800000 + +#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16 +#define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF +#define MR_MIN_MAP_SIZE 0x10000 +/* 64k */ + +#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 + +#define MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET (1 << 24) + +#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25) +#define MR_INTR_COALESCING_SUPPORT_OFFSET (1 << 26) + +#define MEGASAS_WATCHDOG_THREAD_INTERVAL 1000 +#define MEGASAS_WAIT_FOR_NEXT_DMA_MSECS 20 +#define MEGASAS_WATCHDOG_WAIT_COUNT 50 + +enum MR_ADAPTER_TYPE { + MFI_SERIES = 1, + THUNDERBOLT_SERIES = 2, + INVADER_SERIES = 3, + VENTURA_SERIES = 4, + AERO_SERIES = 5, +}; + +/* +* register set for both 1068 and 1078 controllers +* structure extended for 1078 registers +*/ + +struct megasas_register_set { + u32 doorbell; /*0000h*/ + u32 fusion_seq_offset; /*0004h*/ + u32 fusion_host_diag; /*0008h*/ + u32 reserved_01; /*000Ch*/ + + u32 inbound_msg_0; /*0010h*/ + u32 inbound_msg_1; /*0014h*/ + u32 outbound_msg_0; /*0018h*/ + u32 outbound_msg_1; /*001Ch*/ + + u32 inbound_doorbell; /*0020h*/ + u32 inbound_intr_status; /*0024h*/ + u32 inbound_intr_mask; /*0028h*/ + + u32 outbound_doorbell; /*002Ch*/ + u32 outbound_intr_status; /*0030h*/ + u32 outbound_intr_mask; /*0034h*/ + + u32 reserved_1[2]; /*0038h*/ + + u32 inbound_queue_port; /*0040h*/ + u32 outbound_queue_port; /*0044h*/ + + u32 reserved_2[9]; /*0048h*/ + u32 reply_post_host_index; /*006Ch*/ + u32 reserved_2_2[12]; /*0070h*/ + + u32 outbound_doorbell_clear; /*00A0h*/ + + u32 reserved_3[3]; /*00A4h*/ + + u32 outbound_scratch_pad_0; /*00B0h*/ + u32 outbound_scratch_pad_1; /*00B4h*/ + u32 outbound_scratch_pad_2; /*00B8h*/ + u32 outbound_scratch_pad_3; /*00BCh*/ + + u32 inbound_low_queue_port ; /*00C0h*/ + + u32 inbound_high_queue_port ; /*00C4h*/ + + u32 inbound_single_queue_port; /*00C8h*/ + u32 res_6[11]; /*CCh*/ + u32 host_diag; + u32 seq_offset; + u32 index_registers[807]; /*00CCh*/ +} __attribute__ ((packed)); + +struct megasas_sge32 { + + __le32 phys_addr; + __le32 length; + +} __attribute__ ((packed)); + +struct megasas_sge64 { + + __le64 phys_addr; + __le32 length; + +} __attribute__ ((packed)); + +struct megasas_sge_skinny { + __le64 phys_addr; + __le32 length; + __le32 flag; +} __packed; + +union megasas_sgl { + DECLARE_FLEX_ARRAY(struct megasas_sge32, sge32); + DECLARE_FLEX_ARRAY(struct megasas_sge64, sge64); + DECLARE_FLEX_ARRAY(struct megasas_sge_skinny, sge_skinny); +} __attribute__ ((packed)); + +struct megasas_header { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 lun; /*05h */ + u8 cdb_len; /*06h */ + u8 sge_count; /*07h */ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + __le32 data_xferlen; /*14h */ + +} __attribute__ ((packed)); + +union megasas_sgl_frame { + + struct megasas_sge32 sge32[8]; + struct megasas_sge64 sge64[5]; + +} __attribute__ ((packed)); + +typedef union _MFI_CAPABILITIES { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:15; + u32 support_memdump:1; + u32 support_fw_exposed_dev_list:1; + u32 support_nvme_passthru:1; + u32 support_64bit_mode:1; + u32 support_pd_map_target_id:1; + u32 support_qd_throttling:1; + u32 support_fp_rlbypass:1; + u32 support_vfid_in_ioframe:1; + u32 support_ext_io_size:1; + u32 support_ext_queue_depth:1; + u32 security_protocol_cmds_fw:1; + u32 support_core_affinity:1; + u32 support_ndrive_r1_lb:1; + u32 support_max_255lds:1; + u32 support_fastpath_wb:1; + u32 support_additional_msix:1; + u32 support_fp_remote_lun:1; +#else + u32 support_fp_remote_lun:1; + u32 support_additional_msix:1; + u32 support_fastpath_wb:1; + u32 support_max_255lds:1; + u32 support_ndrive_r1_lb:1; + u32 support_core_affinity:1; + u32 security_protocol_cmds_fw:1; + u32 support_ext_queue_depth:1; + u32 support_ext_io_size:1; + u32 support_vfid_in_ioframe:1; + u32 support_fp_rlbypass:1; + u32 support_qd_throttling:1; + u32 support_pd_map_target_id:1; + u32 support_64bit_mode:1; + u32 support_nvme_passthru:1; + u32 support_fw_exposed_dev_list:1; + u32 support_memdump:1; + u32 reserved:15; +#endif + } mfi_capabilities; + __le32 reg; +} MFI_CAPABILITIES; + +struct megasas_init_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + + u8 reserved_1; /*03h */ + MFI_CAPABILITIES driver_operations; /*04h*/ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 replyqueue_mask; /*12h */ + __le32 data_xfer_len; /*14h */ + + __le32 queue_info_new_phys_addr_lo; /*18h */ + __le32 queue_info_new_phys_addr_hi; /*1Ch */ + __le32 queue_info_old_phys_addr_lo; /*20h */ + __le32 queue_info_old_phys_addr_hi; /*24h */ + __le32 reserved_4[2]; /*28h */ + __le32 system_info_lo; /*30h */ + __le32 system_info_hi; /*34h */ + __le32 reserved_5[2]; /*38h */ + +} __attribute__ ((packed)); + +struct megasas_init_queue_info { + + __le32 init_flags; /*00h */ + __le32 reply_queue_entries; /*04h */ + + __le32 reply_queue_start_phys_addr_lo; /*08h */ + __le32 reply_queue_start_phys_addr_hi; /*0Ch */ + __le32 producer_index_phys_addr_lo; /*10h */ + __le32 producer_index_phys_addr_hi; /*14h */ + __le32 consumer_index_phys_addr_lo; /*18h */ + __le32 consumer_index_phys_addr_hi; /*1Ch */ + +} __attribute__ ((packed)); + +struct megasas_io_frame { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 access_byte; /*05h */ + u8 reserved_0; /*06h */ + u8 sge_count; /*07h */ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + __le32 lba_count; /*14h */ + + __le32 sense_buf_phys_addr_lo; /*18h */ + __le32 sense_buf_phys_addr_hi; /*1Ch */ + + __le32 start_lba_lo; /*20h */ + __le32 start_lba_hi; /*24h */ + + union megasas_sgl sgl; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_pthru_frame { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 lun; /*05h */ + u8 cdb_len; /*06h */ + u8 sge_count; /*07h */ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + __le32 data_xfer_len; /*14h */ + + __le32 sense_buf_phys_addr_lo; /*18h */ + __le32 sense_buf_phys_addr_hi; /*1Ch */ + + u8 cdb[16]; /*20h */ + union megasas_sgl sgl; /*30h */ + +} __attribute__ ((packed)); + +struct megasas_dcmd_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + u8 reserved_1[4]; /*03h */ + u8 sge_count; /*07h */ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + + __le32 data_xfer_len; /*14h */ + __le32 opcode; /*18h */ + + union { /*1Ch */ + u8 b[12]; + __le16 s[6]; + __le32 w[3]; + } mbox; + + union megasas_sgl sgl; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_abort_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + + u8 reserved_1; /*03h */ + __le32 reserved_2; /*04h */ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 reserved_3; /*12h */ + __le32 reserved_4; /*14h */ + + __le32 abort_context; /*18h */ + __le32 pad_1; /*1Ch */ + + __le32 abort_mfi_phys_addr_lo; /*20h */ + __le32 abort_mfi_phys_addr_hi; /*24h */ + + __le32 reserved_5[6]; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_smp_frame { + + u8 cmd; /*00h */ + u8 reserved_1; /*01h */ + u8 cmd_status; /*02h */ + u8 connection_status; /*03h */ + + u8 reserved_2[3]; /*04h */ + u8 sge_count; /*07h */ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + + __le32 data_xfer_len; /*14h */ + __le64 sas_addr; /*18h */ + + union { + struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */ + struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */ + } sgl; + +} __attribute__ ((packed)); + +struct megasas_stp_frame { + + u8 cmd; /*00h */ + u8 reserved_1; /*01h */ + u8 cmd_status; /*02h */ + u8 reserved_2; /*03h */ + + u8 target_id; /*04h */ + u8 reserved_3[2]; /*05h */ + u8 sge_count; /*07h */ + + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ + + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + + __le32 data_xfer_len; /*14h */ + + __le16 fis[10]; /*18h */ + __le32 stp_flags; + + union { + struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */ + struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */ + } sgl; + +} __attribute__ ((packed)); + +union megasas_frame { + + struct megasas_header hdr; + struct megasas_init_frame init; + struct megasas_io_frame io; + struct megasas_pthru_frame pthru; + struct megasas_dcmd_frame dcmd; + struct megasas_abort_frame abort; + struct megasas_smp_frame smp; + struct megasas_stp_frame stp; + + u8 raw_bytes[64]; +}; + +/** + * struct MR_PRIV_DEVICE - sdev private hostdata + * @is_tm_capable: firmware managed tm_capable flag + * @tm_busy: TM request is in progress + * @sdev_priv_busy: pending command per sdev + */ +struct MR_PRIV_DEVICE { + bool is_tm_capable; + bool tm_busy; + atomic_t sdev_priv_busy; + atomic_t r1_ldio_hint; + u8 interface_type; + u8 task_abort_tmo; + u8 target_reset_tmo; +}; +struct megasas_cmd; + +union megasas_evt_class_locale { + + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u16 locale; + u8 reserved; + s8 class; +#else + s8 class; + u8 reserved; + u16 locale; +#endif + } __attribute__ ((packed)) members; + + u32 word; + +} __attribute__ ((packed)); + +struct megasas_evt_log_info { + __le32 newest_seq_num; + __le32 oldest_seq_num; + __le32 clear_seq_num; + __le32 shutdown_seq_num; + __le32 boot_seq_num; + +} __attribute__ ((packed)); + +struct megasas_progress { + + __le16 progress; + __le16 elapsed_seconds; + +} __attribute__ ((packed)); + +struct megasas_evtarg_ld { + + u16 target_id; + u8 ld_index; + u8 reserved; + +} __attribute__ ((packed)); + +struct megasas_evtarg_pd { + u16 device_id; + u8 encl_index; + u8 slot_number; + +} __attribute__ ((packed)); + +struct megasas_evt_detail { + + __le32 seq_num; + __le32 time_stamp; + __le32 code; + union megasas_evt_class_locale cl; + u8 arg_type; + u8 reserved1[15]; + + union { + struct { + struct megasas_evtarg_pd pd; + u8 cdb_length; + u8 sense_length; + u8 reserved[2]; + u8 cdb[16]; + u8 sense[64]; + } __attribute__ ((packed)) cdbSense; + + struct megasas_evtarg_ld ld; + + struct { + struct megasas_evtarg_ld ld; + __le64 count; + } __attribute__ ((packed)) ld_count; + + struct { + __le64 lba; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) ld_lba; + + struct { + struct megasas_evtarg_ld ld; + __le32 prevOwner; + __le32 newOwner; + } __attribute__ ((packed)) ld_owner; + + struct { + u64 ld_lba; + u64 pd_lba; + struct megasas_evtarg_ld ld; + struct megasas_evtarg_pd pd; + } __attribute__ ((packed)) ld_lba_pd_lba; + + struct { + struct megasas_evtarg_ld ld; + struct megasas_progress prog; + } __attribute__ ((packed)) ld_prog; + + struct { + struct megasas_evtarg_ld ld; + u32 prev_state; + u32 new_state; + } __attribute__ ((packed)) ld_state; + + struct { + u64 strip; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) ld_strip; + + struct megasas_evtarg_pd pd; + + struct { + struct megasas_evtarg_pd pd; + u32 err; + } __attribute__ ((packed)) pd_err; + + struct { + u64 lba; + struct megasas_evtarg_pd pd; + } __attribute__ ((packed)) pd_lba; + + struct { + u64 lba; + struct megasas_evtarg_pd pd; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) pd_lba_ld; + + struct { + struct megasas_evtarg_pd pd; + struct megasas_progress prog; + } __attribute__ ((packed)) pd_prog; + + struct { + struct megasas_evtarg_pd pd; + u32 prevState; + u32 newState; + } __attribute__ ((packed)) pd_state; + + struct { + u16 vendorId; + __le16 deviceId; + u16 subVendorId; + u16 subDeviceId; + } __attribute__ ((packed)) pci; + + u32 rate; + char str[96]; + + struct { + u32 rtc; + u32 elapsedSeconds; + } __attribute__ ((packed)) time; + + struct { + u32 ecar; + u32 elog; + char str[64]; + } __attribute__ ((packed)) ecc; + + u8 b[96]; + __le16 s[48]; + __le32 w[24]; + __le64 d[12]; + } args; + + char description[128]; + +} __attribute__ ((packed)); + +struct megasas_aen_event { + struct delayed_work hotplug_work; + struct megasas_instance *instance; +}; + +struct megasas_irq_context { + char name[MEGASAS_MSIX_NAME_LEN]; + struct megasas_instance *instance; + u32 MSIxIndex; + u32 os_irq; + struct irq_poll irqpoll; + bool irq_poll_scheduled; + bool irq_line_enable; + atomic_t in_used; +}; + +struct MR_DRV_SYSTEM_INFO { + u8 infoVersion; + u8 systemIdLength; + u16 reserved0; + u8 systemId[64]; + u8 reserved[1980]; +}; + +enum MR_PD_TYPE { + UNKNOWN_DRIVE = 0, + PARALLEL_SCSI = 1, + SAS_PD = 2, + SATA_PD = 3, + FC_PD = 4, + NVME_PD = 5, +}; + +/* JBOD Queue depth definitions */ +#define MEGASAS_SATA_QD 32 +#define MEGASAS_SAS_QD 256 +#define MEGASAS_DEFAULT_PD_QD 64 +#define MEGASAS_NVME_QD 64 + +#define MR_DEFAULT_NVME_PAGE_SIZE 4096 +#define MR_DEFAULT_NVME_PAGE_SHIFT 12 +#define MR_DEFAULT_NVME_MDTS_KB 128 +#define MR_NVME_PAGE_SIZE_MASK 0x000000FF + +/*Aero performance parameters*/ +#define MR_HIGH_IOPS_QUEUE_COUNT 8 +#define MR_DEVICE_HIGH_IOPS_DEPTH 8 +#define MR_HIGH_IOPS_BATCH_COUNT 16 + +enum MR_PERF_MODE { + MR_BALANCED_PERF_MODE = 0, + MR_IOPS_PERF_MODE = 1, + MR_LATENCY_PERF_MODE = 2, +}; + +#define MEGASAS_PERF_MODE_2STR(mode) \ + ((mode) == MR_BALANCED_PERF_MODE ? "Balanced" : \ + (mode) == MR_IOPS_PERF_MODE ? "IOPS" : \ + (mode) == MR_LATENCY_PERF_MODE ? "Latency" : \ + "Unknown") + +enum MEGASAS_LD_TARGET_ID_STATUS { + LD_TARGET_ID_INITIAL, + LD_TARGET_ID_ACTIVE, + LD_TARGET_ID_DELETED, +}; + +#define MEGASAS_TARGET_ID(sdev) \ + (((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id) + +struct megasas_instance { + + unsigned int *reply_map; + __le32 *producer; + dma_addr_t producer_h; + __le32 *consumer; + dma_addr_t consumer_h; + struct MR_DRV_SYSTEM_INFO *system_info_buf; + dma_addr_t system_info_h; + struct MR_LD_VF_AFFILIATION *vf_affiliation; + dma_addr_t vf_affiliation_h; + struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111; + dma_addr_t vf_affiliation_111_h; + struct MR_CTRL_HB_HOST_MEM *hb_host_mem; + dma_addr_t hb_host_mem_h; + struct MR_PD_INFO *pd_info; + dma_addr_t pd_info_h; + struct MR_TARGET_PROPERTIES *tgt_prop; + dma_addr_t tgt_prop_h; + + __le32 *reply_queue; + dma_addr_t reply_queue_h; + + u32 *crash_dump_buf; + dma_addr_t crash_dump_h; + + struct MR_PD_LIST *pd_list_buf; + dma_addr_t pd_list_buf_h; + + struct megasas_ctrl_info *ctrl_info_buf; + dma_addr_t ctrl_info_buf_h; + + struct MR_LD_LIST *ld_list_buf; + dma_addr_t ld_list_buf_h; + + struct MR_LD_TARGETID_LIST *ld_targetid_list_buf; + dma_addr_t ld_targetid_list_buf_h; + + struct MR_HOST_DEVICE_LIST *host_device_list_buf; + dma_addr_t host_device_list_buf_h; + + struct MR_SNAPDUMP_PROPERTIES *snapdump_prop; + dma_addr_t snapdump_prop_h; + + void *crash_buf[MAX_CRASH_DUMP_SIZE]; + unsigned int fw_crash_buffer_size; + unsigned int fw_crash_state; + unsigned int fw_crash_buffer_offset; + u32 drv_buf_index; + u32 drv_buf_alloc; + u32 crash_dump_fw_support; + u32 crash_dump_drv_support; + u32 crash_dump_app_support; + u32 secure_jbod_support; + u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */ + bool use_seqnum_jbod_fp; /* Added for PD sequence */ + bool smp_affinity_enable; + struct mutex crashdump_lock; + + struct megasas_register_set __iomem *reg_set; + u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; + struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; + struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; + u8 ld_ids[MEGASAS_MAX_LD_IDS]; + u8 ld_tgtid_status[MEGASAS_MAX_LD_IDS]; + u8 ld_ids_prev[MEGASAS_MAX_LD_IDS]; + u8 ld_ids_from_raidmap[MEGASAS_MAX_LD_IDS]; + s8 init_id; + + u16 max_num_sge; + u16 max_fw_cmds; + u16 max_mpt_cmds; + u16 max_mfi_cmds; + u16 max_scsi_cmds; + u16 ldio_threshold; + u16 cur_can_queue; + u32 max_sectors_per_req; + bool msix_load_balance; + struct megasas_aen_event *ev; + + struct megasas_cmd **cmd_list; + struct list_head cmd_pool; + /* used to sync fire the cmd to fw */ + spinlock_t mfi_pool_lock; + /* used to sync fire the cmd to fw */ + spinlock_t hba_lock; + /* used to synch producer, consumer ptrs in dpc */ + spinlock_t stream_lock; + spinlock_t completion_lock; + struct dma_pool *frame_dma_pool; + struct dma_pool *sense_dma_pool; + + struct megasas_evt_detail *evt_detail; + dma_addr_t evt_detail_h; + struct megasas_cmd *aen_cmd; + struct semaphore ioctl_sem; + + struct Scsi_Host *host; + + wait_queue_head_t int_cmd_wait_q; + wait_queue_head_t abort_cmd_wait_q; + + struct pci_dev *pdev; + u32 unique_id; + u32 fw_support_ieee; + u32 threshold_reply_count; + + atomic_t fw_outstanding; + atomic_t ldio_outstanding; + atomic_t fw_reset_no_pci_access; + atomic64_t total_io_count; + atomic64_t high_iops_outstanding; + + struct megasas_instance_template *instancet; + struct tasklet_struct isr_tasklet; + struct work_struct work_init; + struct delayed_work fw_fault_work; + struct workqueue_struct *fw_fault_work_q; + char fault_handler_work_q_name[48]; + + u8 flag; + u8 unload; + u8 flag_ieee; + u8 issuepend_done; + u8 disableOnlineCtrlReset; + u8 UnevenSpanSupport; + + u8 supportmax256vd; + u8 pd_list_not_supported; + u16 fw_supported_vd_count; + u16 fw_supported_pd_count; + + u16 drv_supported_vd_count; + u16 drv_supported_pd_count; + + atomic_t adprecovery; + unsigned long last_time; + u32 mfiStatus; + u32 last_seq_num; + + struct list_head internal_reset_pending_q; + + /* Ptr to hba specific information */ + void *ctrl_context; + unsigned int msix_vectors; + struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES]; + u64 map_id; + u64 pd_seq_map_id; + struct megasas_cmd *map_update_cmd; + struct megasas_cmd *jbod_seq_cmd; + unsigned long bar; + long reset_flags; + struct mutex reset_mutex; + struct timer_list sriov_heartbeat_timer; + char skip_heartbeat_timer_del; + u8 requestorId; + char PlasmaFW111; + char clusterId[MEGASAS_CLUSTER_ID_SIZE]; + u8 peerIsPresent; + u8 passive; + u16 throttlequeuedepth; + u8 mask_interrupts; + u16 max_chain_frame_sz; + u8 is_imr; + u8 is_rdpq; + bool dev_handle; + bool fw_sync_cache_support; + u32 mfi_frame_size; + bool msix_combined; + u16 max_raid_mapsize; + /* preffered count to send as LDIO irrspective of FP capable.*/ + u8 r1_ldio_hint_default; + u32 nvme_page_size; + u8 adapter_type; + bool consistent_mask_64bit; + bool support_nvme_passthru; + bool enable_sdev_max_qd; + u8 task_abort_tmo; + u8 max_reset_tmo; + u8 snapdump_wait_time; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; + struct dentry *raidmap_dump; +#endif + u8 enable_fw_dev_list; + bool atomic_desc_support; + bool support_seqnum_jbod_fp; + bool support_pci_lane_margining; + u8 low_latency_index_start; + int perf_mode; + int iopoll_q_count; +}; + +struct MR_LD_VF_MAP { + u32 size; + union MR_LD_REF ref; + u8 ldVfCount; + u8 reserved[6]; + u8 policy[1]; +}; + +struct MR_LD_VF_AFFILIATION { + u32 size; + u8 ldCount; + u8 vfCount; + u8 thisVf; + u8 reserved[9]; + struct MR_LD_VF_MAP map[1]; +}; + +/* Plasma 1.11 FW backward compatibility structures */ +#define IOV_111_OFFSET 0x7CE +#define MAX_VIRTUAL_FUNCTIONS 8 +#define MR_LD_ACCESS_HIDDEN 15 + +struct IOV_111 { + u8 maxVFsSupported; + u8 numVFsEnabled; + u8 requestorId; + u8 reserved[5]; +}; + +struct MR_LD_VF_MAP_111 { + u8 targetId; + u8 reserved[3]; + u8 policy[MAX_VIRTUAL_FUNCTIONS]; +}; + +struct MR_LD_VF_AFFILIATION_111 { + u8 vdCount; + u8 vfCount; + u8 thisVf; + u8 reserved[5]; + struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES]; +}; + +struct MR_CTRL_HB_HOST_MEM { + struct { + u32 fwCounter; /* Firmware heart beat counter */ + struct { + u32 debugmode:1; /* 1=Firmware is in debug mode. + Heart beat will not be updated. */ + u32 reserved:31; + } debug; + u32 reserved_fw[6]; + u32 driverCounter; /* Driver heart beat counter. 0x20 */ + u32 reserved_driver[7]; + } HB; + u8 pad[0x400-0x40]; +}; + +enum { + MEGASAS_HBA_OPERATIONAL = 0, + MEGASAS_ADPRESET_SM_INFAULT = 1, + MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2, + MEGASAS_ADPRESET_SM_OPERATIONAL = 3, + MEGASAS_HW_CRITICAL_ERROR = 4, + MEGASAS_ADPRESET_SM_POLLING = 5, + MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD, +}; + +struct megasas_instance_template { + void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \ + u32, struct megasas_register_set __iomem *); + + void (*enable_intr)(struct megasas_instance *); + void (*disable_intr)(struct megasas_instance *); + + int (*clear_intr)(struct megasas_instance *); + + u32 (*read_fw_status_reg)(struct megasas_instance *); + int (*adp_reset)(struct megasas_instance *, \ + struct megasas_register_set __iomem *); + int (*check_reset)(struct megasas_instance *, \ + struct megasas_register_set __iomem *); + irqreturn_t (*service_isr)(int irq, void *devp); + void (*tasklet)(unsigned long); + u32 (*init_adapter)(struct megasas_instance *); + u32 (*build_and_issue_cmd) (struct megasas_instance *, + struct scsi_cmnd *); + void (*issue_dcmd)(struct megasas_instance *instance, + struct megasas_cmd *cmd); +}; + +#define MEGASAS_IS_LOGICAL(sdev) \ + ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) + +#define MEGASAS_IS_LUN_VALID(sdev) \ + (((sdev)->lun == 0) ? 1 : 0) + +#define MEGASAS_DEV_INDEX(scp) \ + (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ + scp->device->id) + +#define MEGASAS_PD_INDEX(scp) \ + ((scp->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + \ + scp->device->id) + +struct megasas_cmd { + + union megasas_frame *frame; + dma_addr_t frame_phys_addr; + u8 *sense; + dma_addr_t sense_phys_addr; + + u32 index; + u8 sync_cmd; + u8 cmd_status_drv; + u8 abort_aen; + u8 retry_for_fw_reset; + + + struct list_head list; + struct scsi_cmnd *scmd; + u8 flags; + + struct megasas_instance *instance; + union { + struct { + u16 smid; + u16 resvd; + } context; + u32 frame_count; + }; +}; + +struct megasas_cmd_priv { + void *cmd_priv; + u8 status; +}; + +static inline struct megasas_cmd_priv *megasas_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +#define MAX_MGMT_ADAPTERS 1024 +#define MAX_IOCTL_SGE 16 + +struct megasas_iocpacket { + + u16 host_no; + u16 __pad1; + u32 sgl_off; + u32 sge_count; + u32 sense_off; + u32 sense_len; + union { + u8 raw[128]; + struct megasas_header hdr; + } frame; + + struct iovec sgl[MAX_IOCTL_SGE]; + +} __attribute__ ((packed)); + +struct megasas_aen { + u16 host_no; + u16 __pad1; + u32 seq_num; + u32 class_locale_word; +} __attribute__ ((packed)); + +struct compat_megasas_iocpacket { + u16 host_no; + u16 __pad1; + u32 sgl_off; + u32 sge_count; + u32 sense_off; + u32 sense_len; + union { + u8 raw[128]; + struct megasas_header hdr; + } frame; + struct compat_iovec sgl[MAX_IOCTL_SGE]; +} __attribute__ ((packed)); + +#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket) + +#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket) +#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen) + +struct megasas_mgmt_info { + + u16 count; + struct megasas_instance *instance[MAX_MGMT_ADAPTERS]; + int max_index; +}; + +enum MEGASAS_OCR_CAUSE { + FW_FAULT_OCR = 0, + SCSIIO_TIMEOUT_OCR = 1, + MFI_IO_TIMEOUT_OCR = 2, +}; + +enum DCMD_RETURN_STATUS { + DCMD_SUCCESS = 0x00, + DCMD_TIMEOUT = 0x01, + DCMD_FAILED = 0x02, + DCMD_BUSY = 0x03, + DCMD_INIT = 0xff, +}; + +u8 +MR_BuildRaidContext(struct megasas_instance *instance, + struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN); +u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); +struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); +__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); + +__le16 get_updated_dev_handle(struct megasas_instance *instance, + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *in_info, + struct MR_DRV_RAID_MAP_ALL *drv_map); +void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, + struct LD_LOAD_BALANCE_INFO *lbInfo); +int megasas_get_ctrl_info(struct megasas_instance *instance); +/* PD sequence */ +int +megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend); +void megasas_set_dynamic_target_properties(struct scsi_device *sdev, + bool is_target_prop); +int megasas_get_target_prop(struct megasas_instance *instance, + struct scsi_device *sdev); +void megasas_get_snapdump_properties(struct megasas_instance *instance); + +int megasas_set_crash_dump_params(struct megasas_instance *instance, + u8 crash_buf_state); +void megasas_free_host_crash_buffer(struct megasas_instance *instance); + +void megasas_return_cmd_fusion(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd); +int megasas_issue_blocked_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd, int timeout); +void __megasas_return_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd); + +void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, + struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion); +int megasas_cmd_type(struct scsi_cmnd *cmd); +void megasas_setup_jbod_map(struct megasas_instance *instance); + +void megasas_update_sdev_properties(struct scsi_device *sdev); +int megasas_reset_fusion(struct Scsi_Host *shost, int reason); +int megasas_task_abort_fusion(struct scsi_cmnd *scmd); +int megasas_reset_target_fusion(struct scsi_cmnd *scmd); +u32 mega_mod64(u64 dividend, u32 divisor); +int megasas_alloc_fusion_context(struct megasas_instance *instance); +void megasas_free_fusion_context(struct megasas_instance *instance); +int megasas_fusion_start_watchdog(struct megasas_instance *instance); +void megasas_fusion_stop_watchdog(struct megasas_instance *instance); + +void megasas_set_dma_settings(struct megasas_instance *instance, + struct megasas_dcmd_frame *dcmd, + dma_addr_t dma_addr, u32 dma_len); +int megasas_adp_reset_wait_for_ready(struct megasas_instance *instance, + bool do_adp_reset, + int ocr_context); +int megasas_irqpoll(struct irq_poll *irqpoll, int budget); +void megasas_dump_fusion_io(struct scsi_cmnd *scmd); +u32 megasas_readl(struct megasas_instance *instance, + const volatile void __iomem *addr); +struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance); +void megasas_return_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd); +int megasas_issue_polled(struct megasas_instance *instance, + struct megasas_cmd *cmd); +void megaraid_sas_kill_hba(struct megasas_instance *instance); +void megasas_check_and_restore_queue_depth(struct megasas_instance *instance); +void megasas_start_timer(struct megasas_instance *instance); +int megasas_sriov_start_heartbeat(struct megasas_instance *instance, + int initial); +int megasas_alloc_cmds(struct megasas_instance *instance); +void megasas_free_cmds(struct megasas_instance *instance); + +void megasas_init_debugfs(void); +void megasas_exit_debugfs(void); +void megasas_setup_debugfs(struct megasas_instance *instance); +void megasas_destroy_debugfs(struct megasas_instance *instance); +int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); + +#endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c new file mode 100644 index 000000000..3d4f13da1 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -0,0 +1,9129 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2003-2013 LSI Corporation + * Copyright (c) 2013-2016 Avago Technologies + * Copyright (c) 2016-2018 Broadcom Inc. + * + * Authors: Broadcom Inc. + * Sreenivas Bagalkote + * Sumant Patro + * Bo Yang + * Adam Radford + * Kashyap Desai + * Sumit Saxena + * + * Send feedback to: megaraidlinux.pdl@broadcom.com + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "megaraid_sas_fusion.h" +#include "megaraid_sas.h" + +/* + * Number of sectors per IO command + * Will be set in megasas_init_mfi if user does not provide + */ +static unsigned int max_sectors; +module_param_named(max_sectors, max_sectors, int, 0444); +MODULE_PARM_DESC(max_sectors, + "Maximum number of sectors per IO command"); + +static int msix_disable; +module_param(msix_disable, int, 0444); +MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); + +static unsigned int msix_vectors; +module_param(msix_vectors, int, 0444); +MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); + +static int allow_vf_ioctls; +module_param(allow_vf_ioctls, int, 0444); +MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); + +static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; +module_param(throttlequeuedepth, int, 0444); +MODULE_PARM_DESC(throttlequeuedepth, + "Adapter queue depth when throttled due to I/O timeout. Default: 16"); + +unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; +module_param(resetwaittime, int, 0444); +MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); + +static int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, 0444); +MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); + +static int rdpq_enable = 1; +module_param(rdpq_enable, int, 0444); +MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); + +unsigned int dual_qdepth_disable; +module_param(dual_qdepth_disable, int, 0444); +MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); + +static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; +module_param(scmd_timeout, int, 0444); +MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); + +int perf_mode = -1; +module_param(perf_mode, int, 0444); +MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" + "0 - balanced: High iops and low latency queues are allocated &\n\t\t" + "interrupt coalescing is enabled only on high iops queues\n\t\t" + "1 - iops: High iops queues are not allocated &\n\t\t" + "interrupt coalescing is enabled on all queues\n\t\t" + "2 - latency: High iops queues are not allocated &\n\t\t" + "interrupt coalescing is disabled on all queues\n\t\t" + "default mode is 'balanced'" + ); + +int event_log_level = MFI_EVT_CLASS_CRITICAL; +module_param(event_log_level, int, 0644); +MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); + +unsigned int enable_sdev_max_qd; +module_param(enable_sdev_max_qd, int, 0444); +MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); + +int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1 &\n\t\t" + "It is not applicable for MFI_SERIES. &\n\t\t" + "Driver will work in latency mode. &\n\t\t" + "High iops queues are not allocated &\n\t\t" + ); + +int host_tagset_enable = 1; +module_param(host_tagset_enable, int, 0444); +MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); + +MODULE_LICENSE("GPL"); +MODULE_VERSION(MEGASAS_VERSION); +MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); +MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); + +int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); +static int megasas_get_pd_list(struct megasas_instance *instance); +static int megasas_ld_list_query(struct megasas_instance *instance, + u8 query_type); +static int megasas_issue_init_mfi(struct megasas_instance *instance); +static int megasas_register_aen(struct megasas_instance *instance, + u32 seq_num, u32 class_locale_word); +static void megasas_get_pd_info(struct megasas_instance *instance, + struct scsi_device *sdev); +static void +megasas_set_ld_removed_by_fw(struct megasas_instance *instance); + +/* + * PCI ID table for all supported controllers + */ +static struct pci_device_id megasas_pci_table[] = { + + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, + /* xscale IOP */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, + /* ppc IOP */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, + /* ppc IOP */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, + /* gen2*/ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, + /* gen2*/ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, + /* skinny*/ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, + /* skinny*/ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, + /* xscale IOP, vega */ + {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, + /* xscale IOP */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, + /* Fusion */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, + /* Plasma */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, + /* Invader */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, + /* Fury */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, + /* Intruder */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, + /* Intruder 24 port*/ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, + /* VENTURA */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, + {} +}; + +MODULE_DEVICE_TABLE(pci, megasas_pci_table); + +static int megasas_mgmt_majorno; +struct megasas_mgmt_info megasas_mgmt_info; +static struct fasync_struct *megasas_async_queue; +static DEFINE_MUTEX(megasas_async_queue_mutex); + +static int megasas_poll_wait_aen; +static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); +static u32 support_poll_for_event; +u32 megasas_dbg_lvl; +static u32 support_device_change; +static bool support_nvme_encapsulation; +static bool support_pci_lane_margining; + +/* define lock for aen poll */ +static DEFINE_SPINLOCK(poll_aen_lock); + +extern struct dentry *megasas_debugfs_root; +extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); + +void +megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, + u8 alt_status); +static u32 +megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); +static int +megasas_adp_reset_gen2(struct megasas_instance *instance, + struct megasas_register_set __iomem *reg_set); +static irqreturn_t megasas_isr(int irq, void *devp); +static u32 +megasas_init_adapter_mfi(struct megasas_instance *instance); +u32 +megasas_build_and_issue_cmd(struct megasas_instance *instance, + struct scsi_cmnd *scmd); +static void megasas_complete_cmd_dpc(unsigned long instance_addr); +int +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, + int seconds); +void megasas_fusion_ocr_wq(struct work_struct *work); +static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, + int initial); +static int +megasas_set_dma_mask(struct megasas_instance *instance); +static int +megasas_alloc_ctrl_mem(struct megasas_instance *instance); +static inline void +megasas_free_ctrl_mem(struct megasas_instance *instance); +static inline int +megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); +static inline void +megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); +static inline void +megasas_init_ctrl_params(struct megasas_instance *instance); + +u32 megasas_readl(struct megasas_instance *instance, + const volatile void __iomem *addr) +{ + u32 i = 0, ret_val; + /* + * Due to a HW errata in Aero controllers, reads to certain + * Fusion registers could intermittently return all zeroes. + * This behavior is transient in nature and subsequent reads will + * return valid value. As a workaround in driver, retry readl for + * up to thirty times until a non-zero value is read. + */ + if (instance->adapter_type == AERO_SERIES) { + do { + ret_val = readl(addr); + i++; + } while (ret_val == 0 && i < 30); + return ret_val; + } else { + return readl(addr); + } +} + +/** + * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs + * @instance: Adapter soft state + * @dcmd: DCMD frame inside MFI command + * @dma_addr: DMA address of buffer to be passed to FW + * @dma_len: Length of DMA buffer to be passed to FW + * @return: void + */ +void megasas_set_dma_settings(struct megasas_instance *instance, + struct megasas_dcmd_frame *dcmd, + dma_addr_t dma_addr, u32 dma_len) +{ + if (instance->consistent_mask_64bit) { + dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); + dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); + dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); + + } else { + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(lower_32_bits(dma_addr)); + dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); + dcmd->flags = cpu_to_le16(dcmd->flags); + } +} + +static void +megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, 0, instance->reg_set); + return; +} + +/** + * megasas_get_cmd - Get a command from the free pool + * @instance: Adapter soft state + * + * Returns a free command from the pool + */ +struct megasas_cmd *megasas_get_cmd(struct megasas_instance + *instance) +{ + unsigned long flags; + struct megasas_cmd *cmd = NULL; + + spin_lock_irqsave(&instance->mfi_pool_lock, flags); + + if (!list_empty(&instance->cmd_pool)) { + cmd = list_entry((&instance->cmd_pool)->next, + struct megasas_cmd, list); + list_del_init(&cmd->list); + } else { + dev_err(&instance->pdev->dev, "Command pool empty!\n"); + } + + spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); + return cmd; +} + +/** + * megasas_return_cmd - Return a cmd to free command pool + * @instance: Adapter soft state + * @cmd: Command packet to be returned to free command pool + */ +void +megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + unsigned long flags; + u32 blk_tags; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion = instance->ctrl_context; + + /* This flag is used only for fusion adapter. + * Wait for Interrupt for Polled mode DCMD + */ + if (cmd->flags & DRV_DCMD_POLLED_MODE) + return; + + spin_lock_irqsave(&instance->mfi_pool_lock, flags); + + if (fusion) { + blk_tags = instance->max_scsi_cmds + cmd->index; + cmd_fusion = fusion->cmd_list[blk_tags]; + megasas_return_cmd_fusion(instance, cmd_fusion); + } + cmd->scmd = NULL; + cmd->frame_count = 0; + cmd->flags = 0; + memset(cmd->frame, 0, instance->mfi_frame_size); + cmd->frame->io.context = cpu_to_le32(cmd->index); + if (!fusion && reset_devices) + cmd->frame->hdr.cmd = MFI_CMD_INVALID; + list_add(&cmd->list, (&instance->cmd_pool)->next); + + spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); + +} + +static const char * +format_timestamp(uint32_t timestamp) +{ + static char buffer[32]; + + if ((timestamp & 0xff000000) == 0xff000000) + snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & + 0x00ffffff); + else + snprintf(buffer, sizeof(buffer), "%us", timestamp); + return buffer; +} + +static const char * +format_class(int8_t class) +{ + static char buffer[6]; + + switch (class) { + case MFI_EVT_CLASS_DEBUG: + return "debug"; + case MFI_EVT_CLASS_PROGRESS: + return "progress"; + case MFI_EVT_CLASS_INFO: + return "info"; + case MFI_EVT_CLASS_WARNING: + return "WARN"; + case MFI_EVT_CLASS_CRITICAL: + return "CRIT"; + case MFI_EVT_CLASS_FATAL: + return "FATAL"; + case MFI_EVT_CLASS_DEAD: + return "DEAD"; + default: + snprintf(buffer, sizeof(buffer), "%d", class); + return buffer; + } +} + +/** + * megasas_decode_evt: Decode FW AEN event and print critical event + * for information. + * @instance: Adapter soft state + */ +static void +megasas_decode_evt(struct megasas_instance *instance) +{ + struct megasas_evt_detail *evt_detail = instance->evt_detail; + union megasas_evt_class_locale class_locale; + class_locale.word = le32_to_cpu(evt_detail->cl.word); + + if ((event_log_level < MFI_EVT_CLASS_DEBUG) || + (event_log_level > MFI_EVT_CLASS_DEAD)) { + printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); + event_log_level = MFI_EVT_CLASS_CRITICAL; + } + + if (class_locale.members.class >= event_log_level) + dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", + le32_to_cpu(evt_detail->seq_num), + format_timestamp(le32_to_cpu(evt_detail->time_stamp)), + (class_locale.members.locale), + format_class(class_locale.members.class), + evt_detail->description); + + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, + "evt_detail.args.ld.target_id/index %d/%d\n", + evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index); + +} + +/* + * The following functions are defined for xscale + * (deviceid : 1064R, PERC5) controllers + */ + +/** + * megasas_enable_intr_xscale - Enables interrupts + * @instance: Adapter soft state + */ +static inline void +megasas_enable_intr_xscale(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + + regs = instance->reg_set; + writel(0, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_disable_intr_xscale -Disables interrupt + * @instance: Adapter soft state + */ +static inline void +megasas_disable_intr_xscale(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + u32 mask = 0x1f; + + regs = instance->reg_set; + writel(mask, ®s->outbound_intr_mask); + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_read_fw_status_reg_xscale - returns the current FW status value + * @instance: Adapter soft state + */ +static u32 +megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) +{ + return readl(&instance->reg_set->outbound_msg_0); +} +/** + * megasas_clear_intr_xscale - Check & clear interrupt + * @instance: Adapter soft state + */ +static int +megasas_clear_intr_xscale(struct megasas_instance *instance) +{ + u32 status; + u32 mfiStatus = 0; + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + + /* + * Check if it is our interrupt + */ + status = readl(®s->outbound_intr_status); + + if (status & MFI_OB_INTR_STATUS_MASK) + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) + mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; + + /* + * Clear the interrupt by writing back the same value + */ + if (mfiStatus) + writel(status, ®s->outbound_intr_status); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_status); + + return mfiStatus; +} + +/** + * megasas_fire_cmd_xscale - Sends command to the FW + * @instance: Adapter soft state + * @frame_phys_addr : Physical address of cmd + * @frame_count : Number of frames for the command + * @regs : MFI register set + */ +static inline void +megasas_fire_cmd_xscale(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, + struct megasas_register_set __iomem *regs) +{ + unsigned long flags; + + spin_lock_irqsave(&instance->hba_lock, flags); + writel((frame_phys_addr >> 3)|(frame_count), + &(regs)->inbound_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_adp_reset_xscale - For controller reset + * @instance: Adapter soft state + * @regs: MFI register set + */ +static int +megasas_adp_reset_xscale(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + u32 i; + u32 pcidata; + + writel(MFI_ADP_RESET, ®s->inbound_doorbell); + + for (i = 0; i < 3; i++) + msleep(1000); /* sleep for 3 secs */ + pcidata = 0; + pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); + dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); + if (pcidata & 0x2) { + dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); + pcidata &= ~0x2; + pci_write_config_dword(instance->pdev, + MFI_1068_PCSR_OFFSET, pcidata); + + for (i = 0; i < 2; i++) + msleep(1000); /* need to wait 2 secs again */ + + pcidata = 0; + pci_read_config_dword(instance->pdev, + MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); + dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); + if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { + dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); + pcidata = 0; + pci_write_config_dword(instance->pdev, + MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); + } + } + return 0; +} + +/** + * megasas_check_reset_xscale - For controller reset check + * @instance: Adapter soft state + * @regs: MFI register set + */ +static int +megasas_check_reset_xscale(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && + (le32_to_cpu(*instance->consumer) == + MEGASAS_ADPRESET_INPROG_SIGN)) + return 1; + return 0; +} + +static struct megasas_instance_template megasas_instance_template_xscale = { + + .fire_cmd = megasas_fire_cmd_xscale, + .enable_intr = megasas_enable_intr_xscale, + .disable_intr = megasas_disable_intr_xscale, + .clear_intr = megasas_clear_intr_xscale, + .read_fw_status_reg = megasas_read_fw_status_reg_xscale, + .adp_reset = megasas_adp_reset_xscale, + .check_reset = megasas_check_reset_xscale, + .service_isr = megasas_isr, + .tasklet = megasas_complete_cmd_dpc, + .init_adapter = megasas_init_adapter_mfi, + .build_and_issue_cmd = megasas_build_and_issue_cmd, + .issue_dcmd = megasas_issue_dcmd, +}; + +/* + * This is the end of set of functions & definitions specific + * to xscale (deviceid : 1064R, PERC5) controllers + */ + +/* + * The following functions are defined for ppc (deviceid : 0x60) + * controllers + */ + +/** + * megasas_enable_intr_ppc - Enables interrupts + * @instance: Adapter soft state + */ +static inline void +megasas_enable_intr_ppc(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + + regs = instance->reg_set; + writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); + + writel(~0x80000000, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_disable_intr_ppc - Disable interrupt + * @instance: Adapter soft state + */ +static inline void +megasas_disable_intr_ppc(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + u32 mask = 0xFFFFFFFF; + + regs = instance->reg_set; + writel(mask, ®s->outbound_intr_mask); + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_read_fw_status_reg_ppc - returns the current FW status value + * @instance: Adapter soft state + */ +static u32 +megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) +{ + return readl(&instance->reg_set->outbound_scratch_pad_0); +} + +/** + * megasas_clear_intr_ppc - Check & clear interrupt + * @instance: Adapter soft state + */ +static int +megasas_clear_intr_ppc(struct megasas_instance *instance) +{ + u32 status, mfiStatus = 0; + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + + /* + * Check if it is our interrupt + */ + status = readl(®s->outbound_intr_status); + + if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + + if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) + mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; + + /* + * Clear the interrupt by writing back the same value + */ + writel(status, ®s->outbound_doorbell_clear); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_doorbell_clear); + + return mfiStatus; +} + +/** + * megasas_fire_cmd_ppc - Sends command to the FW + * @instance: Adapter soft state + * @frame_phys_addr: Physical address of cmd + * @frame_count: Number of frames for the command + * @regs: MFI register set + */ +static inline void +megasas_fire_cmd_ppc(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, + struct megasas_register_set __iomem *regs) +{ + unsigned long flags; + + spin_lock_irqsave(&instance->hba_lock, flags); + writel((frame_phys_addr | (frame_count<<1))|1, + &(regs)->inbound_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_check_reset_ppc - For controller reset check + * @instance: Adapter soft state + * @regs: MFI register set + */ +static int +megasas_check_reset_ppc(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) + return 1; + + return 0; +} + +static struct megasas_instance_template megasas_instance_template_ppc = { + + .fire_cmd = megasas_fire_cmd_ppc, + .enable_intr = megasas_enable_intr_ppc, + .disable_intr = megasas_disable_intr_ppc, + .clear_intr = megasas_clear_intr_ppc, + .read_fw_status_reg = megasas_read_fw_status_reg_ppc, + .adp_reset = megasas_adp_reset_xscale, + .check_reset = megasas_check_reset_ppc, + .service_isr = megasas_isr, + .tasklet = megasas_complete_cmd_dpc, + .init_adapter = megasas_init_adapter_mfi, + .build_and_issue_cmd = megasas_build_and_issue_cmd, + .issue_dcmd = megasas_issue_dcmd, +}; + +/** + * megasas_enable_intr_skinny - Enables interrupts + * @instance: Adapter soft state + */ +static inline void +megasas_enable_intr_skinny(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + + regs = instance->reg_set; + writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); + + writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_disable_intr_skinny - Disables interrupt + * @instance: Adapter soft state + */ +static inline void +megasas_disable_intr_skinny(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + u32 mask = 0xFFFFFFFF; + + regs = instance->reg_set; + writel(mask, ®s->outbound_intr_mask); + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_read_fw_status_reg_skinny - returns the current FW status value + * @instance: Adapter soft state + */ +static u32 +megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) +{ + return readl(&instance->reg_set->outbound_scratch_pad_0); +} + +/** + * megasas_clear_intr_skinny - Check & clear interrupt + * @instance: Adapter soft state + */ +static int +megasas_clear_intr_skinny(struct megasas_instance *instance) +{ + u32 status; + u32 mfiStatus = 0; + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + + /* + * Check if it is our interrupt + */ + status = readl(®s->outbound_intr_status); + + if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { + return 0; + } + + /* + * Check if it is our interrupt + */ + if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == + MFI_STATE_FAULT) { + mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; + } else + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + + /* + * Clear the interrupt by writing back the same value + */ + writel(status, ®s->outbound_intr_status); + + /* + * dummy read to flush PCI + */ + readl(®s->outbound_intr_status); + + return mfiStatus; +} + +/** + * megasas_fire_cmd_skinny - Sends command to the FW + * @instance: Adapter soft state + * @frame_phys_addr: Physical address of cmd + * @frame_count: Number of frames for the command + * @regs: MFI register set + */ +static inline void +megasas_fire_cmd_skinny(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, + struct megasas_register_set __iomem *regs) +{ + unsigned long flags; + + spin_lock_irqsave(&instance->hba_lock, flags); + writel(upper_32_bits(frame_phys_addr), + &(regs)->inbound_high_queue_port); + writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, + &(regs)->inbound_low_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_check_reset_skinny - For controller reset check + * @instance: Adapter soft state + * @regs: MFI register set + */ +static int +megasas_check_reset_skinny(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) + return 1; + + return 0; +} + +static struct megasas_instance_template megasas_instance_template_skinny = { + + .fire_cmd = megasas_fire_cmd_skinny, + .enable_intr = megasas_enable_intr_skinny, + .disable_intr = megasas_disable_intr_skinny, + .clear_intr = megasas_clear_intr_skinny, + .read_fw_status_reg = megasas_read_fw_status_reg_skinny, + .adp_reset = megasas_adp_reset_gen2, + .check_reset = megasas_check_reset_skinny, + .service_isr = megasas_isr, + .tasklet = megasas_complete_cmd_dpc, + .init_adapter = megasas_init_adapter_mfi, + .build_and_issue_cmd = megasas_build_and_issue_cmd, + .issue_dcmd = megasas_issue_dcmd, +}; + + +/* + * The following functions are defined for gen2 (deviceid : 0x78 0x79) + * controllers + */ + +/** + * megasas_enable_intr_gen2 - Enables interrupts + * @instance: Adapter soft state + */ +static inline void +megasas_enable_intr_gen2(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + + regs = instance->reg_set; + writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); + + /* write ~0x00000005 (4 & 1) to the intr mask*/ + writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_disable_intr_gen2 - Disables interrupt + * @instance: Adapter soft state + */ +static inline void +megasas_disable_intr_gen2(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + u32 mask = 0xFFFFFFFF; + + regs = instance->reg_set; + writel(mask, ®s->outbound_intr_mask); + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_read_fw_status_reg_gen2 - returns the current FW status value + * @instance: Adapter soft state + */ +static u32 +megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) +{ + return readl(&instance->reg_set->outbound_scratch_pad_0); +} + +/** + * megasas_clear_intr_gen2 - Check & clear interrupt + * @instance: Adapter soft state + */ +static int +megasas_clear_intr_gen2(struct megasas_instance *instance) +{ + u32 status; + u32 mfiStatus = 0; + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + + /* + * Check if it is our interrupt + */ + status = readl(®s->outbound_intr_status); + + if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + } + if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { + mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; + } + + /* + * Clear the interrupt by writing back the same value + */ + if (mfiStatus) + writel(status, ®s->outbound_doorbell_clear); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_status); + + return mfiStatus; +} + +/** + * megasas_fire_cmd_gen2 - Sends command to the FW + * @instance: Adapter soft state + * @frame_phys_addr: Physical address of cmd + * @frame_count: Number of frames for the command + * @regs: MFI register set + */ +static inline void +megasas_fire_cmd_gen2(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, + struct megasas_register_set __iomem *regs) +{ + unsigned long flags; + + spin_lock_irqsave(&instance->hba_lock, flags); + writel((frame_phys_addr | (frame_count<<1))|1, + &(regs)->inbound_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_adp_reset_gen2 - For controller reset + * @instance: Adapter soft state + * @reg_set: MFI register set + */ +static int +megasas_adp_reset_gen2(struct megasas_instance *instance, + struct megasas_register_set __iomem *reg_set) +{ + u32 retry = 0 ; + u32 HostDiag; + u32 __iomem *seq_offset = ®_set->seq_offset; + u32 __iomem *hostdiag_offset = ®_set->host_diag; + + if (instance->instancet == &megasas_instance_template_skinny) { + seq_offset = ®_set->fusion_seq_offset; + hostdiag_offset = ®_set->fusion_host_diag; + } + + writel(0, seq_offset); + writel(4, seq_offset); + writel(0xb, seq_offset); + writel(2, seq_offset); + writel(7, seq_offset); + writel(0xd, seq_offset); + + msleep(1000); + + HostDiag = (u32)readl(hostdiag_offset); + + while (!(HostDiag & DIAG_WRITE_ENABLE)) { + msleep(100); + HostDiag = (u32)readl(hostdiag_offset); + dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", + retry, HostDiag); + + if (retry++ >= 100) + return 1; + + } + + dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); + + writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); + + ssleep(10); + + HostDiag = (u32)readl(hostdiag_offset); + while (HostDiag & DIAG_RESET_ADAPTER) { + msleep(100); + HostDiag = (u32)readl(hostdiag_offset); + dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", + retry, HostDiag); + + if (retry++ >= 1000) + return 1; + + } + return 0; +} + +/** + * megasas_check_reset_gen2 - For controller reset check + * @instance: Adapter soft state + * @regs: MFI register set + */ +static int +megasas_check_reset_gen2(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) + return 1; + + return 0; +} + +static struct megasas_instance_template megasas_instance_template_gen2 = { + + .fire_cmd = megasas_fire_cmd_gen2, + .enable_intr = megasas_enable_intr_gen2, + .disable_intr = megasas_disable_intr_gen2, + .clear_intr = megasas_clear_intr_gen2, + .read_fw_status_reg = megasas_read_fw_status_reg_gen2, + .adp_reset = megasas_adp_reset_gen2, + .check_reset = megasas_check_reset_gen2, + .service_isr = megasas_isr, + .tasklet = megasas_complete_cmd_dpc, + .init_adapter = megasas_init_adapter_mfi, + .build_and_issue_cmd = megasas_build_and_issue_cmd, + .issue_dcmd = megasas_issue_dcmd, +}; + +/* + * This is the end of set of functions & definitions + * specific to gen2 (deviceid : 0x78, 0x79) controllers + */ + +/* + * Template added for TB (Fusion) + */ +extern struct megasas_instance_template megasas_instance_template_fusion; + +/** + * megasas_issue_polled - Issues a polling command + * @instance: Adapter soft state + * @cmd: Command packet to be issued + * + * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. + */ +int +megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + struct megasas_header *frame_hdr = &cmd->frame->hdr; + + frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return DCMD_INIT; + } + + instance->instancet->issue_dcmd(instance, cmd); + + return wait_and_poll(instance, cmd, instance->requestorId ? + MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); +} + +/** + * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds + * @instance: Adapter soft state + * @cmd: Command to be issued + * @timeout: Timeout in seconds + * + * This function waits on an event for the command to be returned from ISR. + * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs + * Used to issue ioctl commands. + */ +int +megasas_issue_blocked_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd, int timeout) +{ + int ret = 0; + cmd->cmd_status_drv = DCMD_INIT; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return DCMD_INIT; + } + + instance->instancet->issue_dcmd(instance, cmd); + + if (timeout) { + ret = wait_event_timeout(instance->int_cmd_wait_q, + cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); + if (!ret) { + dev_err(&instance->pdev->dev, + "DCMD(opcode: 0x%x) is timed out, func:%s\n", + cmd->frame->dcmd.opcode, __func__); + return DCMD_TIMEOUT; + } + } else + wait_event(instance->int_cmd_wait_q, + cmd->cmd_status_drv != DCMD_INIT); + + return cmd->cmd_status_drv; +} + +/** + * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd + * @instance: Adapter soft state + * @cmd_to_abort: Previously issued cmd to be aborted + * @timeout: Timeout in seconds + * + * MFI firmware can abort previously issued AEN comamnd (automatic event + * notification). The megasas_issue_blocked_abort_cmd() issues such abort + * cmd and waits for return status. + * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs + */ +static int +megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd_to_abort, int timeout) +{ + struct megasas_cmd *cmd; + struct megasas_abort_frame *abort_fr; + int ret = 0; + u32 opcode; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return -1; + + abort_fr = &cmd->frame->abort; + + /* + * Prepare and issue the abort frame + */ + abort_fr->cmd = MFI_CMD_ABORT; + abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; + abort_fr->flags = cpu_to_le16(0); + abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); + abort_fr->abort_mfi_phys_addr_lo = + cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); + abort_fr->abort_mfi_phys_addr_hi = + cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); + + cmd->sync_cmd = 1; + cmd->cmd_status_drv = DCMD_INIT; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return DCMD_INIT; + } + + instance->instancet->issue_dcmd(instance, cmd); + + if (timeout) { + ret = wait_event_timeout(instance->abort_cmd_wait_q, + cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); + if (!ret) { + opcode = cmd_to_abort->frame->dcmd.opcode; + dev_err(&instance->pdev->dev, + "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", + opcode, __func__); + return DCMD_TIMEOUT; + } + } else + wait_event(instance->abort_cmd_wait_q, + cmd->cmd_status_drv != DCMD_INIT); + + cmd->sync_cmd = 0; + + megasas_return_cmd(instance, cmd); + return cmd->cmd_status_drv; +} + +/** + * megasas_make_sgl32 - Prepares 32-bit SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static int +megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, + union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + sge_count = scsi_dma_map(scp); + BUG_ON(sge_count < 0); + + if (sge_count) { + scsi_for_each_sg(scp, os_sgl, sge_count, i) { + mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); + mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); + } + } + return sge_count; +} + +/** + * megasas_make_sgl64 - Prepares 64-bit SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static int +megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, + union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + sge_count = scsi_dma_map(scp); + BUG_ON(sge_count < 0); + + if (sge_count) { + scsi_for_each_sg(scp, os_sgl, sge_count, i) { + mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); + mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); + } + } + return sge_count; +} + +/** + * megasas_make_sgl_skinny - Prepares IEEE SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static int +megasas_make_sgl_skinny(struct megasas_instance *instance, + struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + sge_count = scsi_dma_map(scp); + + if (sge_count) { + scsi_for_each_sg(scp, os_sgl, sge_count, i) { + mfi_sgl->sge_skinny[i].length = + cpu_to_le32(sg_dma_len(os_sgl)); + mfi_sgl->sge_skinny[i].phys_addr = + cpu_to_le64(sg_dma_address(os_sgl)); + mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); + } + } + return sge_count; +} + + /** + * megasas_get_frame_count - Computes the number of frames + * @frame_type : type of frame- io or pthru frame + * @sge_count : number of sg elements + * + * Returns the number of frames required for numnber of sge's (sge_count) + */ + +static u32 megasas_get_frame_count(struct megasas_instance *instance, + u8 sge_count, u8 frame_type) +{ + int num_cnt; + int sge_bytes; + u32 sge_sz; + u32 frame_count = 0; + + sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : + sizeof(struct megasas_sge32); + + if (instance->flag_ieee) { + sge_sz = sizeof(struct megasas_sge_skinny); + } + + /* + * Main frame can contain 2 SGEs for 64-bit SGLs and + * 3 SGEs for 32-bit SGLs for ldio & + * 1 SGEs for 64-bit SGLs and + * 2 SGEs for 32-bit SGLs for pthru frame + */ + if (unlikely(frame_type == PTHRU_FRAME)) { + if (instance->flag_ieee == 1) { + num_cnt = sge_count - 1; + } else if (IS_DMA64) + num_cnt = sge_count - 1; + else + num_cnt = sge_count - 2; + } else { + if (instance->flag_ieee == 1) { + num_cnt = sge_count - 1; + } else if (IS_DMA64) + num_cnt = sge_count - 2; + else + num_cnt = sge_count - 3; + } + + if (num_cnt > 0) { + sge_bytes = sge_sz * num_cnt; + + frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; + } + /* Main frame */ + frame_count += 1; + + if (frame_count > 7) + frame_count = 8; + return frame_count; +} + +/** + * megasas_build_dcdb - Prepares a direct cdb (DCDB) command + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to be prepared in + * + * This function prepares CDB commands. These are typcially pass-through + * commands to the devices. + */ +static int +megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd *cmd) +{ + u32 is_logical; + u32 device_id; + u16 flags = 0; + struct megasas_pthru_frame *pthru; + + is_logical = MEGASAS_IS_LOGICAL(scp->device); + device_id = MEGASAS_DEV_INDEX(scp); + pthru = (struct megasas_pthru_frame *)cmd->frame; + + if (scp->sc_data_direction == DMA_TO_DEVICE) + flags = MFI_FRAME_DIR_WRITE; + else if (scp->sc_data_direction == DMA_FROM_DEVICE) + flags = MFI_FRAME_DIR_READ; + else if (scp->sc_data_direction == DMA_NONE) + flags = MFI_FRAME_DIR_NONE; + + if (instance->flag_ieee == 1) { + flags |= MFI_FRAME_IEEE; + } + + /* + * Prepare the DCDB frame + */ + pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; + pthru->cmd_status = 0x0; + pthru->scsi_status = 0x0; + pthru->target_id = device_id; + pthru->lun = scp->device->lun; + pthru->cdb_len = scp->cmd_len; + pthru->timeout = 0; + pthru->pad_0 = 0; + pthru->flags = cpu_to_le16(flags); + pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); + + memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + + /* + * If the command is for the tape device, set the + * pthru timeout to the os layer timeout value. + */ + if (scp->device->type == TYPE_TAPE) { + if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF) + pthru->timeout = cpu_to_le16(0xFFFF); + else + pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ); + } + + /* + * Construct SGL + */ + if (instance->flag_ieee == 1) { + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); + pthru->sge_count = megasas_make_sgl_skinny(instance, scp, + &pthru->sgl); + } else if (IS_DMA64) { + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); + pthru->sge_count = megasas_make_sgl64(instance, scp, + &pthru->sgl); + } else + pthru->sge_count = megasas_make_sgl32(instance, scp, + &pthru->sgl); + + if (pthru->sge_count > instance->max_num_sge) { + dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", + pthru->sge_count); + return 0; + } + + /* + * Sense info specific + */ + pthru->sense_len = SCSI_SENSE_BUFFERSIZE; + pthru->sense_buf_phys_addr_hi = + cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); + pthru->sense_buf_phys_addr_lo = + cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); + + /* + * Compute the total number of frames this command consumes. FW uses + * this number to pull sufficient number of frames from host memory. + */ + cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, + PTHRU_FRAME); + + return cmd->frame_count; +} + +/** + * megasas_build_ldio - Prepares IOs to logical devices + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to be prepared + * + * Frames (and accompanying SGLs) for regular SCSI IOs use this function. + */ +static int +megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd *cmd) +{ + u32 device_id; + u8 sc = scp->cmnd[0]; + u16 flags = 0; + struct megasas_io_frame *ldio; + + device_id = MEGASAS_DEV_INDEX(scp); + ldio = (struct megasas_io_frame *)cmd->frame; + + if (scp->sc_data_direction == DMA_TO_DEVICE) + flags = MFI_FRAME_DIR_WRITE; + else if (scp->sc_data_direction == DMA_FROM_DEVICE) + flags = MFI_FRAME_DIR_READ; + + if (instance->flag_ieee == 1) { + flags |= MFI_FRAME_IEEE; + } + + /* + * Prepare the Logical IO frame: 2nd bit is zero for all read cmds + */ + ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; + ldio->cmd_status = 0x0; + ldio->scsi_status = 0x0; + ldio->target_id = device_id; + ldio->timeout = 0; + ldio->reserved_0 = 0; + ldio->pad_0 = 0; + ldio->flags = cpu_to_le16(flags); + ldio->start_lba_hi = 0; + ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if (scp->cmd_len == 6) { + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | + ((u32) scp->cmnd[2] << 8) | + (u32) scp->cmnd[3]); + + ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + else if (scp->cmd_len == 10) { + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | + ((u32) scp->cmnd[7] << 8)); + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + else if (scp->cmd_len == 12) { + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | + (u32) scp->cmnd[9]); + + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); + } + + /* + * 16-byte READ(0x88) or WRITE(0x8A) cdb + */ + else if (scp->cmd_len == 16) { + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | + ((u32) scp->cmnd[11] << 16) | + ((u32) scp->cmnd[12] << 8) | + (u32) scp->cmnd[13]); + + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | + (u32) scp->cmnd[9]); + + ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); + + } + + /* + * Construct SGL + */ + if (instance->flag_ieee) { + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); + ldio->sge_count = megasas_make_sgl_skinny(instance, scp, + &ldio->sgl); + } else if (IS_DMA64) { + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); + ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); + } else + ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); + + if (ldio->sge_count > instance->max_num_sge) { + dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", + ldio->sge_count); + return 0; + } + + /* + * Sense info specific + */ + ldio->sense_len = SCSI_SENSE_BUFFERSIZE; + ldio->sense_buf_phys_addr_hi = 0; + ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); + + /* + * Compute the total number of frames this command consumes. FW uses + * this number to pull sufficient number of frames from host memory. + */ + cmd->frame_count = megasas_get_frame_count(instance, + ldio->sge_count, IO_FRAME); + + return cmd->frame_count; +} + +/** + * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD + * and whether it's RW or non RW + * @cmd: SCSI command + * + */ +inline int megasas_cmd_type(struct scsi_cmnd *cmd) +{ + int ret; + + switch (cmd->cmnd[0]) { + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + case READ_6: + case WRITE_6: + case READ_16: + case WRITE_16: + ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? + READ_WRITE_LDIO : READ_WRITE_SYSPDIO; + break; + default: + ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? + NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; + } + return ret; +} + + /** + * megasas_dump_pending_frames - Dumps the frame address of all pending cmds + * in FW + * @instance: Adapter soft state + */ +static inline void +megasas_dump_pending_frames(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + int i,n; + union megasas_sgl *mfi_sgl; + struct megasas_io_frame *ldio; + struct megasas_pthru_frame *pthru; + u32 sgcount; + u16 max_cmd = instance->max_fw_cmds; + + dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); + dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); + if (IS_DMA64) + dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); + else + dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); + + dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); + for (i = 0; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + if (!cmd->scmd) + continue; + dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); + if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { + ldio = (struct megasas_io_frame *)cmd->frame; + mfi_sgl = &ldio->sgl; + sgcount = ldio->sge_count; + dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," + " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", + instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, + le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), + le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); + } else { + pthru = (struct megasas_pthru_frame *) cmd->frame; + mfi_sgl = &pthru->sgl; + sgcount = pthru->sge_count; + dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " + "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", + instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, + pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), + le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); + } + if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { + for (n = 0; n < sgcount; n++) { + if (IS_DMA64) + dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", + le32_to_cpu(mfi_sgl->sge64[n].length), + le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); + else + dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", + le32_to_cpu(mfi_sgl->sge32[n].length), + le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); + } + } + } /*for max_cmd*/ + dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); + for (i = 0; i < max_cmd; i++) { + + cmd = instance->cmd_list[i]; + + if (cmd->sync_cmd == 1) + dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); + } + dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); +} + +u32 +megasas_build_and_issue_cmd(struct megasas_instance *instance, + struct scsi_cmnd *scmd) +{ + struct megasas_cmd *cmd; + u32 frame_count; + + cmd = megasas_get_cmd(instance); + if (!cmd) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * Logical drive command + */ + if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) + frame_count = megasas_build_ldio(instance, scmd, cmd); + else + frame_count = megasas_build_dcdb(instance, scmd, cmd); + + if (!frame_count) + goto out_return_cmd; + + cmd->scmd = scmd; + megasas_priv(scmd)->cmd_priv = cmd; + + /* + * Issue the command to the FW + */ + atomic_inc(&instance->fw_outstanding); + + instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, + cmd->frame_count-1, instance->reg_set); + + return 0; +out_return_cmd: + megasas_return_cmd(instance, cmd); + return SCSI_MLQUEUE_HOST_BUSY; +} + + +/** + * megasas_queue_command - Queue entry point + * @shost: adapter SCSI host + * @scmd: SCSI command to be queued + */ +static int +megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct megasas_instance *instance; + struct MR_PRIV_DEVICE *mr_device_priv_data; + u32 ld_tgt_id; + + instance = (struct megasas_instance *) + scmd->device->host->hostdata; + + if (instance->unload == 1) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + if (instance->issuepend_done == 0) + return SCSI_MLQUEUE_HOST_BUSY; + + + /* Check for an mpio path and adjust behavior */ + if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { + if (megasas_check_mpio_paths(instance, scmd) == + (DID_REQUEUE << 16)) { + return SCSI_MLQUEUE_HOST_BUSY; + } else { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + } + + mr_device_priv_data = scmd->device->hostdata; + if (!mr_device_priv_data || + (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + if (MEGASAS_IS_LOGICAL(scmd->device)) { + ld_tgt_id = MEGASAS_TARGET_ID(scmd->device); + if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + } + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) + return SCSI_MLQUEUE_HOST_BUSY; + + if (mr_device_priv_data->tm_busy) + return SCSI_MLQUEUE_DEVICE_BUSY; + + + scmd->result = 0; + + if (MEGASAS_IS_LOGICAL(scmd->device) && + (scmd->device->id >= instance->fw_supported_vd_count || + scmd->device->lun)) { + scmd->result = DID_BAD_TARGET << 16; + goto out_done; + } + + if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && + MEGASAS_IS_LOGICAL(scmd->device) && + (!instance->fw_sync_cache_support)) { + scmd->result = DID_OK << 16; + goto out_done; + } + + return instance->instancet->build_and_issue_cmd(instance, scmd); + + out_done: + scsi_done(scmd); + return 0; +} + +static struct megasas_instance *megasas_lookup_instance(u16 host_no) +{ + int i; + + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + + if ((megasas_mgmt_info.instance[i]) && + (megasas_mgmt_info.instance[i]->host->host_no == host_no)) + return megasas_mgmt_info.instance[i]; + } + + return NULL; +} + +/* +* megasas_set_dynamic_target_properties - +* Device property set by driver may not be static and it is required to be +* updated after OCR +* +* set tm_capable. +* set dma alignment (only for eedp protection enable vd). +* +* @sdev: OS provided scsi device +* +* Returns void +*/ +void megasas_set_dynamic_target_properties(struct scsi_device *sdev, + bool is_target_prop) +{ + u16 pd_index = 0, ld; + u32 device_id; + struct megasas_instance *instance; + struct fusion_context *fusion; + struct MR_PRIV_DEVICE *mr_device_priv_data; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + struct MR_LD_RAID *raid; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; + + instance = megasas_lookup_instance(sdev->host->host_no); + fusion = instance->ctrl_context; + mr_device_priv_data = sdev->hostdata; + + if (!fusion || !mr_device_priv_data) + return; + + if (MEGASAS_IS_LOGICAL(sdev)) { + device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + if (ld >= instance->fw_supported_vd_count) + return; + raid = MR_LdRaidGet(ld, local_map_ptr); + + if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + + mr_device_priv_data->is_tm_capable = + raid->capability.tmCapable; + + if (!raid->flags.isEPD) + sdev->no_write_same = 1; + + } else if (instance->use_seqnum_jbod_fp) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + mr_device_priv_data->is_tm_capable = + pd_sync->seq[pd_index].capability.tmCapable; + } + + if (is_target_prop && instance->tgt_prop->reset_tmo) { + /* + * If FW provides a target reset timeout value, driver will use + * it. If not set, fallback to default values. + */ + mr_device_priv_data->target_reset_tmo = + min_t(u8, instance->max_reset_tmo, + instance->tgt_prop->reset_tmo); + mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; + } else { + mr_device_priv_data->target_reset_tmo = + MEGASAS_DEFAULT_TM_TIMEOUT; + mr_device_priv_data->task_abort_tmo = + MEGASAS_DEFAULT_TM_TIMEOUT; + } +} + +/* + * megasas_set_nvme_device_properties - + * set nomerges=2 + * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). + * set maximum io transfer = MDTS of NVME device provided by MR firmware. + * + * MR firmware provides value in KB. Caller of this function converts + * kb into bytes. + * + * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, + * MR firmware provides value 128 as (32 * 4K) = 128K. + * + * @sdev: scsi device + * @max_io_size: maximum io transfer size + * + */ +static inline void +megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) +{ + struct megasas_instance *instance; + u32 mr_nvme_pg_size; + + instance = (struct megasas_instance *)sdev->host->hostdata; + mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); + + blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); + + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); + blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); +} + +/* + * megasas_set_fw_assisted_qd - + * set device queue depth to can_queue + * set device queue depth to fw assisted qd + * + * @sdev: scsi device + * @is_target_prop true, if fw provided target properties. + */ +static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, + bool is_target_prop) +{ + u8 interface_type; + u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; + u32 tgt_device_qd; + struct megasas_instance *instance; + struct MR_PRIV_DEVICE *mr_device_priv_data; + + instance = megasas_lookup_instance(sdev->host->host_no); + mr_device_priv_data = sdev->hostdata; + interface_type = mr_device_priv_data->interface_type; + + switch (interface_type) { + case SAS_PD: + device_qd = MEGASAS_SAS_QD; + break; + case SATA_PD: + device_qd = MEGASAS_SATA_QD; + break; + case NVME_PD: + device_qd = MEGASAS_NVME_QD; + break; + } + + if (is_target_prop) { + tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); + if (tgt_device_qd) + device_qd = min(instance->host->can_queue, + (int)tgt_device_qd); + } + + if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) + device_qd = instance->host->can_queue; + + scsi_change_queue_depth(sdev, device_qd); +} + +/* + * megasas_set_static_target_properties - + * Device property set by driver are static and it is not required to be + * updated after OCR. + * + * set io timeout + * set device queue depth + * set nvme device properties. see - megasas_set_nvme_device_properties + * + * @sdev: scsi device + * @is_target_prop true, if fw provided target properties. + */ +static void megasas_set_static_target_properties(struct scsi_device *sdev, + bool is_target_prop) +{ + u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; + struct megasas_instance *instance; + + instance = megasas_lookup_instance(sdev->host->host_no); + + /* + * The RAID firmware may require extended timeouts. + */ + blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); + + /* max_io_size_kb will be set to non zero for + * nvme based vd and syspd. + */ + if (is_target_prop) + max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); + + if (instance->nvme_page_size && max_io_size_kb) + megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); + + megasas_set_fw_assisted_qd(sdev, is_target_prop); +} + + +static int megasas_slave_configure(struct scsi_device *sdev) +{ + u16 pd_index = 0; + struct megasas_instance *instance; + int ret_target_prop = DCMD_FAILED; + bool is_target_prop = false; + + instance = megasas_lookup_instance(sdev->host->host_no); + if (instance->pd_list_not_supported) { + if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + if (instance->pd_list[pd_index].driveState != + MR_PD_STATE_SYSTEM) + return -ENXIO; + } + } + + mutex_lock(&instance->reset_mutex); + /* Send DCMD to Firmware and cache the information */ + if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) + megasas_get_pd_info(instance, sdev); + + /* Some ventura firmware may not have instance->nvme_page_size set. + * Do not send MR_DCMD_DRV_GET_TARGET_PROP + */ + if ((instance->tgt_prop) && (instance->nvme_page_size)) + ret_target_prop = megasas_get_target_prop(instance, sdev); + + is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; + megasas_set_static_target_properties(sdev, is_target_prop); + + /* This sdev property may change post OCR */ + megasas_set_dynamic_target_properties(sdev, is_target_prop); + + mutex_unlock(&instance->reset_mutex); + + return 0; +} + +static int megasas_slave_alloc(struct scsi_device *sdev) +{ + u16 pd_index = 0, ld_tgt_id; + struct megasas_instance *instance ; + struct MR_PRIV_DEVICE *mr_device_priv_data; + + instance = megasas_lookup_instance(sdev->host->host_no); + if (!MEGASAS_IS_LOGICAL(sdev)) { + /* + * Open the OS scan to the SYSTEM PD + */ + pd_index = + (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + if ((instance->pd_list_not_supported || + instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM)) { + goto scan_target; + } + return -ENXIO; + } else if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return -ENXIO; + } + +scan_target: + mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), + GFP_KERNEL); + if (!mr_device_priv_data) + return -ENOMEM; + + if (MEGASAS_IS_LOGICAL(sdev)) { + ld_tgt_id = MEGASAS_TARGET_ID(sdev); + instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE; + if (megasas_dbg_lvl & LD_PD_DEBUG) + sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id); + } + + sdev->hostdata = mr_device_priv_data; + + atomic_set(&mr_device_priv_data->r1_ldio_hint, + instance->r1_ldio_hint_default); + return 0; +} + +static void megasas_slave_destroy(struct scsi_device *sdev) +{ + u16 ld_tgt_id; + struct megasas_instance *instance; + + instance = megasas_lookup_instance(sdev->host->host_no); + + if (MEGASAS_IS_LOGICAL(sdev)) { + if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return; + } + ld_tgt_id = MEGASAS_TARGET_ID(sdev); + instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; + if (megasas_dbg_lvl & LD_PD_DEBUG) + sdev_printk(KERN_INFO, sdev, + "LD target ID %d removed from OS stack\n", ld_tgt_id); + } + + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +/* +* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a +* kill adapter +* @instance: Adapter soft state +* +*/ +static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) +{ + int i; + struct megasas_cmd *cmd_mfi; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion = instance->ctrl_context; + + /* Find all outstanding ioctls */ + if (fusion) { + for (i = 0; i < instance->max_fw_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + if (cmd_mfi->sync_cmd && + (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { + cmd_mfi->frame->hdr.cmd_status = + MFI_STAT_WRONG_STATE; + megasas_complete_cmd(instance, + cmd_mfi, DID_OK); + } + } + } + } else { + for (i = 0; i < instance->max_fw_cmds; i++) { + cmd_mfi = instance->cmd_list[i]; + if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != + MFI_CMD_ABORT) + megasas_complete_cmd(instance, cmd_mfi, DID_OK); + } + } +} + + +void megaraid_sas_kill_hba(struct megasas_instance *instance) +{ + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_warn(&instance->pdev->dev, + "Adapter already dead, skipping kill HBA\n"); + return; + } + + /* Set critical error to block I/O & ioctls in case caller didn't */ + atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); + /* Wait 1 second to ensure IO or ioctls in build have posted */ + msleep(1000); + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || + (instance->adapter_type != MFI_SERIES)) { + if (!instance->requestorId) { + writel(MFI_STOP_ADP, &instance->reg_set->doorbell); + /* Flush */ + readl(&instance->reg_set->doorbell); + } + if (instance->requestorId && instance->peerIsPresent) + memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + } else { + writel(MFI_STOP_ADP, + &instance->reg_set->inbound_doorbell); + } + /* Complete outstanding ioctls when adapter is killed */ + megasas_complete_outstanding_ioctls(instance); +} + + /** + * megasas_check_and_restore_queue_depth - Check if queue depth needs to be + * restored to max value + * @instance: Adapter soft state + * + */ +void +megasas_check_and_restore_queue_depth(struct megasas_instance *instance) +{ + unsigned long flags; + + if (instance->flag & MEGASAS_FW_BUSY + && time_after(jiffies, instance->last_time + 5 * HZ) + && atomic_read(&instance->fw_outstanding) < + instance->throttlequeuedepth + 1) { + + spin_lock_irqsave(instance->host->host_lock, flags); + instance->flag &= ~MEGASAS_FW_BUSY; + + instance->host->can_queue = instance->cur_can_queue; + spin_unlock_irqrestore(instance->host->host_lock, flags); + } +} + +/** + * megasas_complete_cmd_dpc - Returns FW's controller structure + * @instance_addr: Address of adapter soft state + * + * Tasklet to complete cmds + */ +static void megasas_complete_cmd_dpc(unsigned long instance_addr) +{ + u32 producer; + u32 consumer; + u32 context; + struct megasas_cmd *cmd; + struct megasas_instance *instance = + (struct megasas_instance *)instance_addr; + unsigned long flags; + + /* If we have already declared adapter dead, donot complete cmds */ + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) + return; + + spin_lock_irqsave(&instance->completion_lock, flags); + + producer = le32_to_cpu(*instance->producer); + consumer = le32_to_cpu(*instance->consumer); + + while (consumer != producer) { + context = le32_to_cpu(instance->reply_queue[consumer]); + if (context >= instance->max_fw_cmds) { + dev_err(&instance->pdev->dev, "Unexpected context value %x\n", + context); + BUG(); + } + + cmd = instance->cmd_list[context]; + + megasas_complete_cmd(instance, cmd, DID_OK); + + consumer++; + if (consumer == (instance->max_fw_cmds + 1)) { + consumer = 0; + } + } + + *instance->consumer = cpu_to_le32(producer); + + spin_unlock_irqrestore(&instance->completion_lock, flags); + + /* + * Check if we can restore can_queue + */ + megasas_check_and_restore_queue_depth(instance); +} + +static void megasas_sriov_heartbeat_handler(struct timer_list *t); + +/** + * megasas_start_timer - Initializes sriov heartbeat timer object + * @instance: Adapter soft state + * + */ +void megasas_start_timer(struct megasas_instance *instance) +{ + struct timer_list *timer = &instance->sriov_heartbeat_timer; + + timer_setup(timer, megasas_sriov_heartbeat_handler, 0); + timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; + add_timer(timer); +} + +static void +megasas_internal_reset_defer_cmds(struct megasas_instance *instance); + +static void +process_fw_state_change_wq(struct work_struct *work); + +static void megasas_do_ocr(struct megasas_instance *instance) +{ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || + (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { + *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); + } + instance->instancet->disable_intr(instance); + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); + instance->issuepend_done = 0; + + atomic_set(&instance->fw_outstanding, 0); + megasas_internal_reset_defer_cmds(instance); + process_fw_state_change_wq(&instance->work_init); +} + +static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, + int initial) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; + dma_addr_t new_affiliation_111_h; + int ld, retval = 0; + u8 thisVf; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" + "Failed to get cmd for scsi%d\n", + instance->host->host_no); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + if (!instance->vf_affiliation_111) { + dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " + "affiliation for scsi%d\n", instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + if (initial) + memset(instance->vf_affiliation_111, 0, + sizeof(struct MR_LD_VF_AFFILIATION_111)); + else { + new_affiliation_111 = + dma_alloc_coherent(&instance->pdev->dev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + &new_affiliation_111_h, GFP_KERNEL); + if (!new_affiliation_111) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " + "memory for new affiliation for scsi%d\n", + instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + } + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = + cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); + + if (initial) + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(instance->vf_affiliation_111_h); + else + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(new_affiliation_111_h); + + dcmd->sgl.sge32[0].length = cpu_to_le32( + sizeof(struct MR_LD_VF_AFFILIATION_111)); + + dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " + "scsi%d\n", instance->host->host_no); + + if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { + dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" + " failed with status 0x%x for scsi%d\n", + dcmd->cmd_status, instance->host->host_no); + retval = 1; /* Do a scan if we couldn't get affiliation */ + goto out; + } + + if (!initial) { + thisVf = new_affiliation_111->thisVf; + for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) + if (instance->vf_affiliation_111->map[ld].policy[thisVf] != + new_affiliation_111->map[ld].policy[thisVf]) { + dev_warn(&instance->pdev->dev, "SR-IOV: " + "Got new LD/VF affiliation for scsi%d\n", + instance->host->host_no); + memcpy(instance->vf_affiliation_111, + new_affiliation_111, + sizeof(struct MR_LD_VF_AFFILIATION_111)); + retval = 1; + goto out; + } + } +out: + if (new_affiliation_111) { + dma_free_coherent(&instance->pdev->dev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + new_affiliation_111, + new_affiliation_111_h); + } + + megasas_return_cmd(instance, cmd); + + return retval; +} + +static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, + int initial) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; + struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; + dma_addr_t new_affiliation_h; + int i, j, retval = 0, found = 0, doscan = 0; + u8 thisVf; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " + "Failed to get cmd for scsi%d\n", + instance->host->host_no); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + if (!instance->vf_affiliation) { + dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " + "affiliation for scsi%d\n", instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + if (initial) + memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + else { + new_affiliation = + dma_alloc_coherent(&instance->pdev->dev, + (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), + &new_affiliation_h, GFP_KERNEL); + if (!new_affiliation) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " + "memory for new affiliation for scsi%d\n", + instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + } + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); + + if (initial) + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(instance->vf_affiliation_h); + else + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(new_affiliation_h); + + dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + + dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " + "scsi%d\n", instance->host->host_no); + + + if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { + dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" + " failed with status 0x%x for scsi%d\n", + dcmd->cmd_status, instance->host->host_no); + retval = 1; /* Do a scan if we couldn't get affiliation */ + goto out; + } + + if (!initial) { + if (!new_affiliation->ldCount) { + dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " + "affiliation for passive path for scsi%d\n", + instance->host->host_no); + retval = 1; + goto out; + } + newmap = new_affiliation->map; + savedmap = instance->vf_affiliation->map; + thisVf = new_affiliation->thisVf; + for (i = 0 ; i < new_affiliation->ldCount; i++) { + found = 0; + for (j = 0; j < instance->vf_affiliation->ldCount; + j++) { + if (newmap->ref.targetId == + savedmap->ref.targetId) { + found = 1; + if (newmap->policy[thisVf] != + savedmap->policy[thisVf]) { + doscan = 1; + goto out; + } + } + savedmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)savedmap + + savedmap->size); + } + if (!found && newmap->policy[thisVf] != + MR_LD_ACCESS_HIDDEN) { + doscan = 1; + goto out; + } + newmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)newmap + newmap->size); + } + + newmap = new_affiliation->map; + savedmap = instance->vf_affiliation->map; + + for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { + found = 0; + for (j = 0 ; j < new_affiliation->ldCount; j++) { + if (savedmap->ref.targetId == + newmap->ref.targetId) { + found = 1; + if (savedmap->policy[thisVf] != + newmap->policy[thisVf]) { + doscan = 1; + goto out; + } + } + newmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)newmap + + newmap->size); + } + if (!found && savedmap->policy[thisVf] != + MR_LD_ACCESS_HIDDEN) { + doscan = 1; + goto out; + } + savedmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)savedmap + + savedmap->size); + } + } +out: + if (doscan) { + dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " + "affiliation for scsi%d\n", instance->host->host_no); + memcpy(instance->vf_affiliation, new_affiliation, + new_affiliation->size); + retval = 1; + } + + if (new_affiliation) + dma_free_coherent(&instance->pdev->dev, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + new_affiliation, new_affiliation_h); + megasas_return_cmd(instance, cmd); + + return retval; +} + +/* This function will get the current SR-IOV LD/VF affiliation */ +static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, + int initial) +{ + int retval; + + if (instance->PlasmaFW111) + retval = megasas_get_ld_vf_affiliation_111(instance, initial); + else + retval = megasas_get_ld_vf_affiliation_12(instance, initial); + return retval; +} + +/* This function will tell FW to start the SR-IOV heartbeat */ +int megasas_sriov_start_heartbeat(struct megasas_instance *instance, + int initial) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + int retval = 0; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " + "Failed to get cmd for scsi%d\n", + instance->host->host_no); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + if (initial) { + instance->hb_host_mem = + dma_alloc_coherent(&instance->pdev->dev, + sizeof(struct MR_CTRL_HB_HOST_MEM), + &instance->hb_host_mem_h, + GFP_KERNEL); + if (!instance->hb_host_mem) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" + " memory for heartbeat host memory for scsi%d\n", + instance->host->host_no); + retval = -ENOMEM; + goto out; + } + } + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); + + megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, + sizeof(struct MR_CTRL_HB_HOST_MEM)); + + dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", + instance->host->host_no); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) + retval = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_ROUTINE_WAIT_TIME_VF); + else + retval = megasas_issue_polled(instance, cmd); + + if (retval) { + dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" + "_MEM_ALLOC DCMD %s for scsi%d\n", + (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? + "timed out" : "failed", instance->host->host_no); + retval = 1; + } + +out: + megasas_return_cmd(instance, cmd); + + return retval; +} + +/* Handler for SR-IOV heartbeat */ +static void megasas_sriov_heartbeat_handler(struct timer_list *t) +{ + struct megasas_instance *instance = + from_timer(instance, t, sriov_heartbeat_timer); + + if (instance->hb_host_mem->HB.fwCounter != + instance->hb_host_mem->HB.driverCounter) { + instance->hb_host_mem->HB.driverCounter = + instance->hb_host_mem->HB.fwCounter; + mod_timer(&instance->sriov_heartbeat_timer, + jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + } else { + dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " + "completed for scsi%d\n", instance->host->host_no); + schedule_work(&instance->work_init); + } +} + +/** + * megasas_wait_for_outstanding - Wait for all outstanding cmds + * @instance: Adapter soft state + * + * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to + * complete all its outstanding commands. Returns error if one or more IOs + * are pending after this time period. It also marks the controller dead. + */ +static int megasas_wait_for_outstanding(struct megasas_instance *instance) +{ + int i, sl, outstanding; + u32 reset_index; + u32 wait_time = MEGASAS_RESET_WAIT_TIME; + unsigned long flags; + struct list_head clist_local; + struct megasas_cmd *reset_cmd; + u32 fw_state; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", + __func__, __LINE__); + return FAILED; + } + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { + + INIT_LIST_HEAD(&clist_local); + spin_lock_irqsave(&instance->hba_lock, flags); + list_splice_init(&instance->internal_reset_pending_q, + &clist_local); + spin_unlock_irqrestore(&instance->hba_lock, flags); + + dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); + for (i = 0; i < wait_time; i++) { + msleep(1000); + if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) + break; + } + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { + dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); + atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); + return FAILED; + } + + reset_index = 0; + while (!list_empty(&clist_local)) { + reset_cmd = list_entry((&clist_local)->next, + struct megasas_cmd, list); + list_del_init(&reset_cmd->list); + if (reset_cmd->scmd) { + reset_cmd->scmd->result = DID_REQUEUE << 16; + dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", + reset_index, reset_cmd, + reset_cmd->scmd->cmnd[0]); + + scsi_done(reset_cmd->scmd); + megasas_return_cmd(instance, reset_cmd); + } else if (reset_cmd->sync_cmd) { + dev_notice(&instance->pdev->dev, "%p synch cmds" + "reset queue\n", + reset_cmd); + + reset_cmd->cmd_status_drv = DCMD_INIT; + instance->instancet->fire_cmd(instance, + reset_cmd->frame_phys_addr, + 0, instance->reg_set); + } else { + dev_notice(&instance->pdev->dev, "%p unexpected" + "cmds lst\n", + reset_cmd); + } + reset_index++; + } + + return SUCCESS; + } + + for (i = 0; i < resetwaittime; i++) { + outstanding = atomic_read(&instance->fw_outstanding); + + if (!outstanding) + break; + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { + dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " + "commands to complete\n",i,outstanding); + /* + * Call cmd completion routine. Cmd to be + * be completed directly without depending on isr. + */ + megasas_complete_cmd_dpc((unsigned long)instance); + } + + msleep(1000); + } + + i = 0; + outstanding = atomic_read(&instance->fw_outstanding); + fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; + + if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) + goto no_outstanding; + + if (instance->disableOnlineCtrlReset) + goto kill_hba_and_failed; + do { + if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { + dev_info(&instance->pdev->dev, + "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", + __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); + if (i == 3) + goto kill_hba_and_failed; + megasas_do_ocr(instance); + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", + __func__, __LINE__); + return FAILED; + } + dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", + __func__, __LINE__); + + for (sl = 0; sl < 10; sl++) + msleep(500); + + outstanding = atomic_read(&instance->fw_outstanding); + + fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; + if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) + goto no_outstanding; + } + i++; + } while (i <= 3); + +no_outstanding: + + dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", + __func__, __LINE__); + return SUCCESS; + +kill_hba_and_failed: + + /* Reset not supported, kill adapter */ + dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" + " disableOnlineCtrlReset %d fw_outstanding %d \n", + __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, + atomic_read(&instance->fw_outstanding)); + megasas_dump_pending_frames(instance); + megaraid_sas_kill_hba(instance); + + return FAILED; +} + +/** + * megasas_generic_reset - Generic reset routine + * @scmd: Mid-layer SCSI command + * + * This routine implements a generic reset handler for device, bus and host + * reset requests. Device, bus and host specific reset handlers can use this + * function after they do their specific tasks. + */ +static int megasas_generic_reset(struct scsi_cmnd *scmd) +{ + int ret_val; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", + scmd->cmnd[0], scmd->retries); + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); + return FAILED; + } + + ret_val = megasas_wait_for_outstanding(instance); + if (ret_val == SUCCESS) + dev_notice(&instance->pdev->dev, "reset successful\n"); + else + dev_err(&instance->pdev->dev, "failed to do reset\n"); + + return ret_val; +} + +/** + * megasas_reset_timer - quiesce the adapter if required + * @scmd: scsi cmnd + * + * Sets the FW busy flag and reduces the host->can_queue if the + * cmd has not been completed within the timeout period. + */ +static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd) +{ + struct megasas_instance *instance; + unsigned long flags; + + if (time_after(jiffies, scmd->jiffies_at_alloc + + (scmd_timeout * 2) * HZ)) { + return SCSI_EH_NOT_HANDLED; + } + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + if (!(instance->flag & MEGASAS_FW_BUSY)) { + /* FW is busy, throttle IO */ + spin_lock_irqsave(instance->host->host_lock, flags); + + instance->host->can_queue = instance->throttlequeuedepth; + instance->last_time = jiffies; + instance->flag |= MEGASAS_FW_BUSY; + + spin_unlock_irqrestore(instance->host->host_lock, flags); + } + return SCSI_EH_RESET_TIMER; +} + +/** + * megasas_dump - This function will print hexdump of provided buffer. + * @buf: Buffer to be dumped + * @sz: Size in bytes + * @format: Different formats of dumping e.g. format=n will + * cause only 'n' 32 bit words to be dumped in a single + * line. + */ +inline void +megasas_dump(void *buf, int sz, int format) +{ + int i; + __le32 *buf_loc = (__le32 *)buf; + + for (i = 0; i < (sz / sizeof(__le32)); i++) { + if ((i % format) == 0) { + if (i != 0) + printk(KERN_CONT "\n"); + printk(KERN_CONT "%08x: ", (i * 4)); + } + printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); + } + printk(KERN_CONT "\n"); +} + +/** + * megasas_dump_reg_set - This function will print hexdump of register set + * @reg_set: Register set to be dumped + */ +inline void +megasas_dump_reg_set(void __iomem *reg_set) +{ + unsigned int i, sz = 256; + u32 __iomem *reg = (u32 __iomem *)reg_set; + + for (i = 0; i < (sz / sizeof(u32)); i++) + printk("%08x: %08x\n", (i * 4), readl(®[i])); +} + +/** + * megasas_dump_fusion_io - This function will print key details + * of SCSI IO + * @scmd: SCSI command pointer of SCSI IO + */ +void +megasas_dump_fusion_io(struct scsi_cmnd *scmd) +{ + struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + scmd_printk(KERN_INFO, scmd, + "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", + scmd, scmd->retries, scmd->allowed); + scsi_print_command(scmd); + + if (cmd) { + req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; + scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); + scmd_printk(KERN_INFO, scmd, + "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", + req_desc->SCSIIO.RequestFlags, + req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, + req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); + + printk(KERN_INFO "IO request frame:\n"); + megasas_dump(cmd->io_request, + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); + printk(KERN_INFO "Chain frame:\n"); + megasas_dump(cmd->sg_frame, + instance->max_chain_frame_sz, 8); + } + +} + +/* + * megasas_dump_sys_regs - This function will dump system registers through + * sysfs. + * @reg_set: Pointer to System register set. + * @buf: Buffer to which output is to be written. + * @return: Number of bytes written to buffer. + */ +static inline ssize_t +megasas_dump_sys_regs(void __iomem *reg_set, char *buf) +{ + unsigned int i, sz = 256; + int bytes_wrote = 0; + char *loc = (char *)buf; + u32 __iomem *reg = (u32 __iomem *)reg_set; + + for (i = 0; i < sz / sizeof(u32); i++) { + bytes_wrote += scnprintf(loc + bytes_wrote, + PAGE_SIZE - bytes_wrote, + "%08x: %08x\n", (i * 4), + readl(®[i])); + } + return bytes_wrote; +} + +/** + * megasas_reset_bus_host - Bus & host reset handler entry point + * @scmd: Mid-layer SCSI command + */ +static int megasas_reset_bus_host(struct scsi_cmnd *scmd) +{ + int ret; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + scmd_printk(KERN_INFO, scmd, + "OCR is requested due to IO timeout!!\n"); + + scmd_printk(KERN_INFO, scmd, + "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", + scmd->device->host->shost_state, + scsi_host_busy(scmd->device->host), + atomic_read(&instance->fw_outstanding)); + /* + * First wait for all commands to complete + */ + if (instance->adapter_type == MFI_SERIES) { + ret = megasas_generic_reset(scmd); + } else { + megasas_dump_fusion_io(scmd); + ret = megasas_reset_fusion(scmd->device->host, + SCSIIO_TIMEOUT_OCR); + } + + return ret; +} + +/** + * megasas_task_abort - Issues task abort request to firmware + * (supported only for fusion adapters) + * @scmd: SCSI command pointer + */ +static int megasas_task_abort(struct scsi_cmnd *scmd) +{ + int ret; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + if (instance->adapter_type != MFI_SERIES) + ret = megasas_task_abort_fusion(scmd); + else { + sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); + ret = FAILED; + } + + return ret; +} + +/** + * megasas_reset_target: Issues target reset request to firmware + * (supported only for fusion adapters) + * @scmd: SCSI command pointer + */ +static int megasas_reset_target(struct scsi_cmnd *scmd) +{ + int ret; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + if (instance->adapter_type != MFI_SERIES) + ret = megasas_reset_target_fusion(scmd); + else { + sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); + ret = FAILED; + } + + return ret; +} + +/** + * megasas_bios_param - Returns disk geometry for a disk + * @sdev: device handle + * @bdev: block device + * @capacity: drive capacity + * @geom: geometry parameters + */ +static int +megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + int heads; + int sectors; + sector_t cylinders; + unsigned long tmp; + + /* Default heads (64) & sectors (32) */ + heads = 64; + sectors = 32; + + tmp = heads * sectors; + cylinders = capacity; + + sector_div(cylinders, tmp); + + /* + * Handle extended translation size for logical drives > 1Gb + */ + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + tmp = heads*sectors; + cylinders = capacity; + sector_div(cylinders, tmp); + } + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} + +static void megasas_map_queues(struct Scsi_Host *shost) +{ + struct megasas_instance *instance; + int qoff = 0, offset; + struct blk_mq_queue_map *map; + + instance = (struct megasas_instance *)shost->hostdata; + + if (shost->nr_hw_queues == 1) + return; + + offset = instance->low_latency_index_start; + + /* Setup Default hctx */ + map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + map->nr_queues = instance->msix_vectors - offset; + map->queue_offset = 0; + blk_mq_pci_map_queues(map, instance->pdev, offset); + qoff += map->nr_queues; + offset += map->nr_queues; + + /* we never use READ queue, so can't cheat blk-mq */ + shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0; + + /* Setup Poll hctx */ + map = &shost->tag_set.map[HCTX_TYPE_POLL]; + map->nr_queues = instance->iopoll_q_count; + if (map->nr_queues) { + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + blk_mq_map_queues(map); + } +} + +static void megasas_aen_polling(struct work_struct *work); + +/** + * megasas_service_aen - Processes an event notification + * @instance: Adapter soft state + * @cmd: AEN command completed by the ISR + * + * For AEN, driver sends a command down to FW that is held by the FW till an + * event occurs. When an event of interest occurs, FW completes the command + * that it was previously holding. + * + * This routines sends SIGIO signal to processes that have registered with the + * driver for AEN. + */ +static void +megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + unsigned long flags; + + /* + * Don't signal app if it is just an aborted previously registered aen + */ + if ((!cmd->abort_aen) && (instance->unload == 0)) { + spin_lock_irqsave(&poll_aen_lock, flags); + megasas_poll_wait_aen = 1; + spin_unlock_irqrestore(&poll_aen_lock, flags); + wake_up(&megasas_poll_wait); + kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); + } + else + cmd->abort_aen = 0; + + instance->aen_cmd = NULL; + + megasas_return_cmd(instance, cmd); + + if ((instance->unload == 0) && + ((instance->issuepend_done == 1))) { + struct megasas_aen_event *ev; + + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) { + dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); + } else { + ev->instance = instance; + instance->ev = ev; + INIT_DELAYED_WORK(&ev->hotplug_work, + megasas_aen_polling); + schedule_delayed_work(&ev->hotplug_work, 0); + } + } +} + +static ssize_t +fw_crash_buffer_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + int val = 0; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + mutex_lock(&instance->crashdump_lock); + instance->fw_crash_buffer_offset = val; + mutex_unlock(&instance->crashdump_lock); + return strlen(buf); +} + +static ssize_t +fw_crash_buffer_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + u32 size; + unsigned long dmachunk = CRASH_DMA_BUF_SIZE; + unsigned long chunk_left_bytes; + unsigned long src_addr; + u32 buff_offset; + + mutex_lock(&instance->crashdump_lock); + buff_offset = instance->fw_crash_buffer_offset; + if (!instance->crash_dump_buf || + !((instance->fw_crash_state == AVAILABLE) || + (instance->fw_crash_state == COPYING))) { + dev_err(&instance->pdev->dev, + "Firmware crash dump is not available\n"); + mutex_unlock(&instance->crashdump_lock); + return -EINVAL; + } + + if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { + dev_err(&instance->pdev->dev, + "Firmware crash dump offset is out of range\n"); + mutex_unlock(&instance->crashdump_lock); + return 0; + } + + size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; + chunk_left_bytes = dmachunk - (buff_offset % dmachunk); + size = (size > chunk_left_bytes) ? chunk_left_bytes : size; + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; + + src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + + (buff_offset % dmachunk); + memcpy(buf, (void *)src_addr, size); + mutex_unlock(&instance->crashdump_lock); + + return size; +} + +static ssize_t +fw_crash_buffer_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) + ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); +} + +static ssize_t +fw_crash_state_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + int val = 0; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + if ((val <= AVAILABLE || val > COPY_ERROR)) { + dev_err(&instance->pdev->dev, "application updates invalid " + "firmware crash state\n"); + return -EINVAL; + } + + instance->fw_crash_state = val; + + if ((val == COPIED) || (val == COPY_ERROR)) { + mutex_lock(&instance->crashdump_lock); + megasas_free_host_crash_buffer(instance); + mutex_unlock(&instance->crashdump_lock); + if (val == COPY_ERROR) + dev_info(&instance->pdev->dev, "application failed to " + "copy Firmware crash dump\n"); + else + dev_info(&instance->pdev->dev, "Firmware crash dump " + "copied successfully\n"); + } + return strlen(buf); +} + +static ssize_t +fw_crash_state_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *) shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); +} + +static ssize_t +page_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); +} + +static ssize_t +ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); +} + +static ssize_t +fw_cmds_outstanding_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); +} + +static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); +} + +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; + u32 val = 0; + bool is_target_prop; + int ret_target_prop = DCMD_FAILED; + struct scsi_device *sdev; + + if (kstrtou32(buf, 0, &val) != 0) { + pr_err("megasas: could not set enable_sdev_max_qd\n"); + return -EINVAL; + } + + mutex_lock(&instance->reset_mutex); + if (val) + instance->enable_sdev_max_qd = true; + else + instance->enable_sdev_max_qd = false; + + shost_for_each_device(sdev, shost) { + ret_target_prop = megasas_get_target_prop(instance, sdev); + is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; + megasas_set_fw_assisted_qd(sdev, is_target_prop); + } + mutex_unlock(&instance->reset_mutex); + + return strlen(buf); +} + +static ssize_t +dump_system_regs_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *)shost->hostdata; + + return megasas_dump_sys_regs(instance->reg_set, buf); +} + +static ssize_t +raid_map_id_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = + (struct megasas_instance *)shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%ld\n", + (unsigned long)instance->map_id); +} + +static DEVICE_ATTR_RW(fw_crash_buffer); +static DEVICE_ATTR_RO(fw_crash_buffer_size); +static DEVICE_ATTR_RW(fw_crash_state); +static DEVICE_ATTR_RO(page_size); +static DEVICE_ATTR_RO(ldio_outstanding); +static DEVICE_ATTR_RO(fw_cmds_outstanding); +static DEVICE_ATTR_RW(enable_sdev_max_qd); +static DEVICE_ATTR_RO(dump_system_regs); +static DEVICE_ATTR_RO(raid_map_id); + +static struct attribute *megaraid_host_attrs[] = { + &dev_attr_fw_crash_buffer_size.attr, + &dev_attr_fw_crash_buffer.attr, + &dev_attr_fw_crash_state.attr, + &dev_attr_page_size.attr, + &dev_attr_ldio_outstanding.attr, + &dev_attr_fw_cmds_outstanding.attr, + &dev_attr_enable_sdev_max_qd.attr, + &dev_attr_dump_system_regs.attr, + &dev_attr_raid_map_id.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(megaraid_host); + +/* + * Scsi host template for megaraid_sas driver + */ +static const struct scsi_host_template megasas_template = { + + .module = THIS_MODULE, + .name = "Avago SAS based MegaRAID driver", + .proc_name = "megaraid_sas", + .slave_configure = megasas_slave_configure, + .slave_alloc = megasas_slave_alloc, + .slave_destroy = megasas_slave_destroy, + .queuecommand = megasas_queue_command, + .eh_target_reset_handler = megasas_reset_target, + .eh_abort_handler = megasas_task_abort, + .eh_host_reset_handler = megasas_reset_bus_host, + .eh_timed_out = megasas_reset_timer, + .shost_groups = megaraid_host_groups, + .bios_param = megasas_bios_param, + .map_queues = megasas_map_queues, + .mq_poll = megasas_blk_mq_poll, + .change_queue_depth = scsi_change_queue_depth, + .max_segment_size = 0xffffffff, + .cmd_size = sizeof(struct megasas_cmd_priv), +}; + +/** + * megasas_complete_int_cmd - Completes an internal command + * @instance: Adapter soft state + * @cmd: Command to be completed + * + * The megasas_issue_blocked_cmd() function waits for a command to complete + * after it issues a command. This function wakes up that waiting routine by + * calling wake_up() on the wait queue. + */ +static void +megasas_complete_int_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + if (cmd->cmd_status_drv == DCMD_INIT) + cmd->cmd_status_drv = + (cmd->frame->io.cmd_status == MFI_STAT_OK) ? + DCMD_SUCCESS : DCMD_FAILED; + + wake_up(&instance->int_cmd_wait_q); +} + +/** + * megasas_complete_abort - Completes aborting a command + * @instance: Adapter soft state + * @cmd: Cmd that was issued to abort another cmd + * + * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q + * after it issues an abort on a previously issued command. This function + * wakes up all functions waiting on the same wait queue. + */ +static void +megasas_complete_abort(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + if (cmd->sync_cmd) { + cmd->sync_cmd = 0; + cmd->cmd_status_drv = DCMD_SUCCESS; + wake_up(&instance->abort_cmd_wait_q); + } +} + +static void +megasas_set_ld_removed_by_fw(struct megasas_instance *instance) +{ + uint i; + + for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) { + if (instance->ld_ids_prev[i] != 0xff && + instance->ld_ids_from_raidmap[i] == 0xff) { + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, + "LD target ID %d removed from RAID map\n", i); + instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED; + } + } +} + +/** + * megasas_complete_cmd - Completes a command + * @instance: Adapter soft state + * @cmd: Command to be completed + * @alt_status: If non-zero, use this value as status to + * SCSI mid-layer instead of the value returned + * by the FW. This should be used if caller wants + * an alternate status (as in the case of aborted + * commands) + */ +void +megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, + u8 alt_status) +{ + int exception = 0; + struct megasas_header *hdr = &cmd->frame->hdr; + unsigned long flags; + struct fusion_context *fusion = instance->ctrl_context; + u32 opcode, status; + + /* flag for the retry reset */ + cmd->retry_for_fw_reset = 0; + + if (cmd->scmd) + megasas_priv(cmd->scmd)->cmd_priv = NULL; + + switch (hdr->cmd) { + case MFI_CMD_INVALID: + /* Some older 1068 controller FW may keep a pended + MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel + when booting the kdump kernel. Ignore this command to + prevent a kernel panic on shutdown of the kdump kernel. */ + dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " + "completed\n"); + dev_warn(&instance->pdev->dev, "If you have a controller " + "other than PERC5, please upgrade your firmware\n"); + break; + case MFI_CMD_PD_SCSI_IO: + case MFI_CMD_LD_SCSI_IO: + + /* + * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been + * issued either through an IO path or an IOCTL path. If it + * was via IOCTL, we will send it to internal completion. + */ + if (cmd->sync_cmd) { + cmd->sync_cmd = 0; + megasas_complete_int_cmd(instance, cmd); + break; + } + fallthrough; + + case MFI_CMD_LD_READ: + case MFI_CMD_LD_WRITE: + + if (alt_status) { + cmd->scmd->result = alt_status << 16; + exception = 1; + } + + if (exception) { + + atomic_dec(&instance->fw_outstanding); + + scsi_dma_unmap(cmd->scmd); + scsi_done(cmd->scmd); + megasas_return_cmd(instance, cmd); + + break; + } + + switch (hdr->cmd_status) { + + case MFI_STAT_OK: + cmd->scmd->result = DID_OK << 16; + break; + + case MFI_STAT_SCSI_IO_FAILED: + case MFI_STAT_LD_INIT_IN_PROGRESS: + cmd->scmd->result = + (DID_ERROR << 16) | hdr->scsi_status; + break; + + case MFI_STAT_SCSI_DONE_WITH_ERROR: + + cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; + + if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { + memset(cmd->scmd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); + memcpy(cmd->scmd->sense_buffer, cmd->sense, + hdr->sense_len); + } + + break; + + case MFI_STAT_LD_OFFLINE: + case MFI_STAT_DEVICE_NOT_FOUND: + cmd->scmd->result = DID_BAD_TARGET << 16; + break; + + default: + dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", + hdr->cmd_status); + cmd->scmd->result = DID_ERROR << 16; + break; + } + + atomic_dec(&instance->fw_outstanding); + + scsi_dma_unmap(cmd->scmd); + scsi_done(cmd->scmd); + megasas_return_cmd(instance, cmd); + + break; + + case MFI_CMD_SMP: + case MFI_CMD_STP: + case MFI_CMD_NVME: + case MFI_CMD_TOOLBOX: + megasas_complete_int_cmd(instance, cmd); + break; + + case MFI_CMD_DCMD: + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); + /* Check for LD map update */ + if ((opcode == MR_DCMD_LD_MAP_GET_INFO) + && (cmd->frame->dcmd.mbox.b[1] == 1)) { + fusion->fast_path_io = 0; + spin_lock_irqsave(instance->host->host_lock, flags); + status = cmd->frame->hdr.cmd_status; + instance->map_update_cmd = NULL; + if (status != MFI_STAT_OK) { + if (status != MFI_STAT_NOT_FOUND) + dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", + cmd->frame->hdr.cmd_status); + else { + megasas_return_cmd(instance, cmd); + spin_unlock_irqrestore( + instance->host->host_lock, + flags); + break; + } + } + + megasas_return_cmd(instance, cmd); + + /* + * Set fast path IO to ZERO. + * Validate Map will set proper value. + * Meanwhile all IOs will go as LD IO. + */ + if (status == MFI_STAT_OK && + (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { + instance->map_id++; + fusion->fast_path_io = 1; + } else { + fusion->fast_path_io = 0; + } + + if (instance->adapter_type >= INVADER_SERIES) + megasas_set_ld_removed_by_fw(instance); + + megasas_sync_map_info(instance); + spin_unlock_irqrestore(instance->host->host_lock, + flags); + + break; + } + if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || + opcode == MR_DCMD_CTRL_EVENT_GET) { + spin_lock_irqsave(&poll_aen_lock, flags); + megasas_poll_wait_aen = 0; + spin_unlock_irqrestore(&poll_aen_lock, flags); + } + + /* FW has an updated PD sequence */ + if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && + (cmd->frame->dcmd.mbox.b[0] == 1)) { + + spin_lock_irqsave(instance->host->host_lock, flags); + status = cmd->frame->hdr.cmd_status; + instance->jbod_seq_cmd = NULL; + megasas_return_cmd(instance, cmd); + + if (status == MFI_STAT_OK) { + instance->pd_seq_map_id++; + /* Re-register a pd sync seq num cmd */ + if (megasas_sync_pd_seq_num(instance, true)) + instance->use_seqnum_jbod_fp = false; + } else + instance->use_seqnum_jbod_fp = false; + + spin_unlock_irqrestore(instance->host->host_lock, flags); + break; + } + + /* + * See if got an event notification + */ + if (opcode == MR_DCMD_CTRL_EVENT_WAIT) + megasas_service_aen(instance, cmd); + else + megasas_complete_int_cmd(instance, cmd); + + break; + + case MFI_CMD_ABORT: + /* + * Cmd issued to abort another cmd returned + */ + megasas_complete_abort(instance, cmd); + break; + + default: + dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", + hdr->cmd); + megasas_complete_int_cmd(instance, cmd); + break; + } +} + +/** + * megasas_issue_pending_cmds_again - issue all pending cmds + * in FW again because of the fw reset + * @instance: Adapter soft state + */ +static inline void +megasas_issue_pending_cmds_again(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + struct list_head clist_local; + union megasas_evt_class_locale class_locale; + unsigned long flags; + u32 seq_num; + + INIT_LIST_HEAD(&clist_local); + spin_lock_irqsave(&instance->hba_lock, flags); + list_splice_init(&instance->internal_reset_pending_q, &clist_local); + spin_unlock_irqrestore(&instance->hba_lock, flags); + + while (!list_empty(&clist_local)) { + cmd = list_entry((&clist_local)->next, + struct megasas_cmd, list); + list_del_init(&cmd->list); + + if (cmd->sync_cmd || cmd->scmd) { + dev_notice(&instance->pdev->dev, "command %p, %p:%d" + "detected to be pending while HBA reset\n", + cmd, cmd->scmd, cmd->sync_cmd); + + cmd->retry_for_fw_reset++; + + if (cmd->retry_for_fw_reset == 3) { + dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" + "was tried multiple times during reset." + "Shutting down the HBA\n", + cmd, cmd->scmd, cmd->sync_cmd); + instance->instancet->disable_intr(instance); + atomic_set(&instance->fw_reset_no_pci_access, 1); + megaraid_sas_kill_hba(instance); + return; + } + } + + if (cmd->sync_cmd == 1) { + if (cmd->scmd) { + dev_notice(&instance->pdev->dev, "unexpected" + "cmd attached to internal command!\n"); + } + dev_notice(&instance->pdev->dev, "%p synchronous cmd" + "on the internal reset queue," + "issue it again.\n", cmd); + cmd->cmd_status_drv = DCMD_INIT; + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, + 0, instance->reg_set); + } else if (cmd->scmd) { + dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" + "detected on the internal queue, issue again.\n", + cmd, cmd->scmd->cmnd[0]); + + atomic_inc(&instance->fw_outstanding); + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, + cmd->frame_count-1, instance->reg_set); + } else { + dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" + "internal reset defer list while re-issue!!\n", + cmd); + } + } + + if (instance->aen_cmd) { + dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); + megasas_return_cmd(instance, instance->aen_cmd); + + instance->aen_cmd = NULL; + } + + /* + * Initiate AEN (Asynchronous Event Notification) + */ + seq_num = instance->last_seq_num; + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + + megasas_register_aen(instance, seq_num, class_locale.word); +} + +/* + * Move the internal reset pending commands to a deferred queue. + * + * We move the commands pending at internal reset time to a + * pending queue. This queue would be flushed after successful + * completion of the internal reset sequence. if the internal reset + * did not complete in time, the kernel reset handler would flush + * these commands. + */ +static void +megasas_internal_reset_defer_cmds(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + int i; + u16 max_cmd = instance->max_fw_cmds; + u32 defer_index; + unsigned long flags; + + defer_index = 0; + spin_lock_irqsave(&instance->mfi_pool_lock, flags); + for (i = 0; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + if (cmd->sync_cmd == 1 || cmd->scmd) { + dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" + "on the defer queue as internal\n", + defer_index, cmd, cmd->sync_cmd, cmd->scmd); + + if (!list_empty(&cmd->list)) { + dev_notice(&instance->pdev->dev, "ERROR while" + " moving this cmd:%p, %d %p, it was" + "discovered on some list?\n", + cmd, cmd->sync_cmd, cmd->scmd); + + list_del_init(&cmd->list); + } + defer_index++; + list_add_tail(&cmd->list, + &instance->internal_reset_pending_q); + } + } + spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); +} + + +static void +process_fw_state_change_wq(struct work_struct *work) +{ + struct megasas_instance *instance = + container_of(work, struct megasas_instance, work_init); + u32 wait; + unsigned long flags; + + if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { + dev_notice(&instance->pdev->dev, "error, recovery st %x\n", + atomic_read(&instance->adprecovery)); + return ; + } + + if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { + dev_notice(&instance->pdev->dev, "FW detected to be in fault" + "state, restarting it...\n"); + + instance->instancet->disable_intr(instance); + atomic_set(&instance->fw_outstanding, 0); + + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset(instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + + dev_notice(&instance->pdev->dev, "FW restarted successfully," + "initiating next stage...\n"); + + dev_notice(&instance->pdev->dev, "HBA recovery state machine," + "state 2 starting...\n"); + + /* waiting for about 20 second before start the second init */ + for (wait = 0; wait < 30; wait++) { + msleep(1000); + } + + if (megasas_transition_to_ready(instance, 1)) { + dev_notice(&instance->pdev->dev, "adapter not ready\n"); + + atomic_set(&instance->fw_reset_no_pci_access, 1); + megaraid_sas_kill_hba(instance); + return ; + } + + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || + (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) + ) { + *instance->consumer = *instance->producer; + } else { + *instance->consumer = 0; + *instance->producer = 0; + } + + megasas_issue_init_mfi(instance); + + spin_lock_irqsave(&instance->hba_lock, flags); + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); + spin_unlock_irqrestore(&instance->hba_lock, flags); + instance->instancet->enable_intr(instance); + + megasas_issue_pending_cmds_again(instance); + instance->issuepend_done = 1; + } +} + +/** + * megasas_deplete_reply_queue - Processes all completed commands + * @instance: Adapter soft state + * @alt_status: Alternate status to be returned to + * SCSI mid-layer instead of the status + * returned by the FW + * Note: this must be called with hba lock held + */ +static int +megasas_deplete_reply_queue(struct megasas_instance *instance, + u8 alt_status) +{ + u32 mfiStatus; + u32 fw_state; + + if (instance->instancet->check_reset(instance, instance->reg_set) == 1) + return IRQ_HANDLED; + + mfiStatus = instance->instancet->clear_intr(instance); + if (mfiStatus == 0) { + /* Hardware may not set outbound_intr_status in MSI-X mode */ + if (!instance->msix_vectors) + return IRQ_NONE; + } + + instance->mfiStatus = mfiStatus; + + if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { + fw_state = instance->instancet->read_fw_status_reg( + instance) & MFI_STATE_MASK; + + if (fw_state != MFI_STATE_FAULT) { + dev_notice(&instance->pdev->dev, "fw state:%x\n", + fw_state); + } + + if ((fw_state == MFI_STATE_FAULT) && + (instance->disableOnlineCtrlReset == 0)) { + dev_notice(&instance->pdev->dev, "wait adp restart\n"); + + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS1064R) || + (instance->pdev->device == + PCI_DEVICE_ID_DELL_PERC5) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_VERDE_ZCR)) { + + *instance->consumer = + cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); + } + + + instance->instancet->disable_intr(instance); + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); + instance->issuepend_done = 0; + + atomic_set(&instance->fw_outstanding, 0); + megasas_internal_reset_defer_cmds(instance); + + dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", + fw_state, atomic_read(&instance->adprecovery)); + + schedule_work(&instance->work_init); + return IRQ_HANDLED; + + } else { + dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", + fw_state, instance->disableOnlineCtrlReset); + } + } + + tasklet_schedule(&instance->isr_tasklet); + return IRQ_HANDLED; +} + +/** + * megasas_isr - isr entry point + * @irq: IRQ number + * @devp: IRQ context address + */ +static irqreturn_t megasas_isr(int irq, void *devp) +{ + struct megasas_irq_context *irq_context = devp; + struct megasas_instance *instance = irq_context->instance; + unsigned long flags; + irqreturn_t rc; + + if (atomic_read(&instance->fw_reset_no_pci_access)) + return IRQ_HANDLED; + + spin_lock_irqsave(&instance->hba_lock, flags); + rc = megasas_deplete_reply_queue(instance, DID_OK); + spin_unlock_irqrestore(&instance->hba_lock, flags); + + return rc; +} + +/** + * megasas_transition_to_ready - Move the FW to READY state + * @instance: Adapter soft state + * @ocr: Adapter reset state + * + * During the initialization, FW passes can potentially be in any one of + * several possible states. If the FW in operational, waiting-for-handshake + * states, driver must take steps to bring it to ready state. Otherwise, it + * has to wait for the ready state. + */ +int +megasas_transition_to_ready(struct megasas_instance *instance, int ocr) +{ + int i; + u8 max_wait; + u32 fw_state; + u32 abs_state, curr_abs_state; + + abs_state = instance->instancet->read_fw_status_reg(instance); + fw_state = abs_state & MFI_STATE_MASK; + + if (fw_state != MFI_STATE_READY) + dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" + " state\n"); + + while (fw_state != MFI_STATE_READY) { + + switch (fw_state) { + + case MFI_STATE_FAULT: + dev_printk(KERN_ERR, &instance->pdev->dev, + "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", + abs_state & MFI_STATE_FAULT_CODE, + abs_state & MFI_STATE_FAULT_SUBCODE, __func__); + if (ocr) { + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + } else { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); + megasas_dump_reg_set(instance->reg_set); + return -ENODEV; + } + + case MFI_STATE_WAIT_HANDSHAKE: + /* + * Set the CLR bit in inbound doorbell + */ + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY) || + (instance->adapter_type != MFI_SERIES)) + writel( + MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, + &instance->reg_set->doorbell); + else + writel( + MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, + &instance->reg_set->inbound_doorbell); + + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_BOOT_MESSAGE_PENDING: + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY) || + (instance->adapter_type != MFI_SERIES)) + writel(MFI_INIT_HOTPLUG, + &instance->reg_set->doorbell); + else + writel(MFI_INIT_HOTPLUG, + &instance->reg_set->inbound_doorbell); + + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_OPERATIONAL: + /* + * Bring it to READY state; assuming max wait 10 secs + */ + instance->instancet->disable_intr(instance); + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY) || + (instance->adapter_type != MFI_SERIES)) { + writel(MFI_RESET_FLAGS, + &instance->reg_set->doorbell); + + if (instance->adapter_type != MFI_SERIES) { + for (i = 0; i < (10 * 1000); i += 20) { + if (megasas_readl( + instance, + &instance-> + reg_set-> + doorbell) & 1) + msleep(20); + else + break; + } + } + } else + writel(MFI_RESET_FLAGS, + &instance->reg_set->inbound_doorbell); + + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_UNDEFINED: + /* + * This state should not last for more than 2 seconds + */ + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_BB_INIT: + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_FW_INIT: + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_FW_INIT_2: + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_DEVICE_SCAN: + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + case MFI_STATE_FLUSH_CACHE: + max_wait = MEGASAS_RESET_WAIT_TIME; + break; + + default: + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", + fw_state); + dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); + megasas_dump_reg_set(instance->reg_set); + return -ENODEV; + } + + /* + * The cur_state should not last for more than max_wait secs + */ + for (i = 0; i < max_wait * 50; i++) { + curr_abs_state = instance->instancet-> + read_fw_status_reg(instance); + + if (abs_state == curr_abs_state) { + msleep(20); + } else + break; + } + + /* + * Return error if fw_state hasn't changed after max_wait + */ + if (curr_abs_state == abs_state) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " + "in %d secs\n", fw_state, max_wait); + dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); + megasas_dump_reg_set(instance->reg_set); + return -ENODEV; + } + + abs_state = curr_abs_state; + fw_state = curr_abs_state & MFI_STATE_MASK; + } + dev_info(&instance->pdev->dev, "FW now in Ready state\n"); + + return 0; +} + +/** + * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool + * @instance: Adapter soft state + */ +static void megasas_teardown_frame_pool(struct megasas_instance *instance) +{ + int i; + u16 max_cmd = instance->max_mfi_cmds; + struct megasas_cmd *cmd; + + if (!instance->frame_dma_pool) + return; + + /* + * Return all frames to pool + */ + for (i = 0; i < max_cmd; i++) { + + cmd = instance->cmd_list[i]; + + if (cmd->frame) + dma_pool_free(instance->frame_dma_pool, cmd->frame, + cmd->frame_phys_addr); + + if (cmd->sense) + dma_pool_free(instance->sense_dma_pool, cmd->sense, + cmd->sense_phys_addr); + } + + /* + * Now destroy the pool itself + */ + dma_pool_destroy(instance->frame_dma_pool); + dma_pool_destroy(instance->sense_dma_pool); + + instance->frame_dma_pool = NULL; + instance->sense_dma_pool = NULL; +} + +/** + * megasas_create_frame_pool - Creates DMA pool for cmd frames + * @instance: Adapter soft state + * + * Each command packet has an embedded DMA memory buffer that is used for + * filling MFI frame and the SG list that immediately follows the frame. This + * function creates those DMA memory buffers for each command packet by using + * PCI pool facility. + */ +static int megasas_create_frame_pool(struct megasas_instance *instance) +{ + int i; + u16 max_cmd; + u32 frame_count; + struct megasas_cmd *cmd; + + max_cmd = instance->max_mfi_cmds; + + /* + * For MFI controllers. + * max_num_sge = 60 + * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) + * Total 960 byte (15 MFI frame of 64 byte) + * + * Fusion adapter require only 3 extra frame. + * max_num_sge = 16 (defined as MAX_IOCTL_SGE) + * max_sge_sz = 12 byte (sizeof megasas_sge64) + * Total 192 byte (3 MFI frame of 64 byte) + */ + frame_count = (instance->adapter_type == MFI_SERIES) ? + (15 + 1) : (3 + 1); + instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; + /* + * Use DMA pool facility provided by PCI layer + */ + instance->frame_dma_pool = dma_pool_create("megasas frame pool", + &instance->pdev->dev, + instance->mfi_frame_size, 256, 0); + + if (!instance->frame_dma_pool) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); + return -ENOMEM; + } + + instance->sense_dma_pool = dma_pool_create("megasas sense pool", + &instance->pdev->dev, 128, + 4, 0); + + if (!instance->sense_dma_pool) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); + + dma_pool_destroy(instance->frame_dma_pool); + instance->frame_dma_pool = NULL; + + return -ENOMEM; + } + + /* + * Allocate and attach a frame to each of the commands in cmd_list. + * By making cmd->index as the context instead of the &cmd, we can + * always use 32bit context regardless of the architecture + */ + for (i = 0; i < max_cmd; i++) { + + cmd = instance->cmd_list[i]; + + cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, + GFP_KERNEL, &cmd->frame_phys_addr); + + cmd->sense = dma_pool_alloc(instance->sense_dma_pool, + GFP_KERNEL, &cmd->sense_phys_addr); + + /* + * megasas_teardown_frame_pool() takes care of freeing + * whatever has been allocated + */ + if (!cmd->frame || !cmd->sense) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); + megasas_teardown_frame_pool(instance); + return -ENOMEM; + } + + cmd->frame->io.context = cpu_to_le32(cmd->index); + cmd->frame->io.pad_0 = 0; + if ((instance->adapter_type == MFI_SERIES) && reset_devices) + cmd->frame->hdr.cmd = MFI_CMD_INVALID; + } + + return 0; +} + +/** + * megasas_free_cmds - Free all the cmds in the free cmd pool + * @instance: Adapter soft state + */ +void megasas_free_cmds(struct megasas_instance *instance) +{ + int i; + + /* First free the MFI frame pool */ + megasas_teardown_frame_pool(instance); + + /* Free all the commands in the cmd_list */ + for (i = 0; i < instance->max_mfi_cmds; i++) + + kfree(instance->cmd_list[i]); + + /* Free the cmd_list buffer itself */ + kfree(instance->cmd_list); + instance->cmd_list = NULL; + + INIT_LIST_HEAD(&instance->cmd_pool); +} + +/** + * megasas_alloc_cmds - Allocates the command packets + * @instance: Adapter soft state + * + * Each command that is issued to the FW, whether IO commands from the OS or + * internal commands like IOCTLs, are wrapped in local data structure called + * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to + * the FW. + * + * Each frame has a 32-bit field called context (tag). This context is used + * to get back the megasas_cmd from the frame when a frame gets completed in + * the ISR. Typically the address of the megasas_cmd itself would be used as + * the context. But we wanted to keep the differences between 32 and 64 bit + * systems to the mininum. We always use 32 bit integers for the context. In + * this driver, the 32 bit values are the indices into an array cmd_list. + * This array is used only to look up the megasas_cmd given the context. The + * free commands themselves are maintained in a linked list called cmd_pool. + */ +int megasas_alloc_cmds(struct megasas_instance *instance) +{ + int i; + int j; + u16 max_cmd; + struct megasas_cmd *cmd; + + max_cmd = instance->max_mfi_cmds; + + /* + * instance->cmd_list is an array of struct megasas_cmd pointers. + * Allocate the dynamic array first and then allocate individual + * commands. + */ + instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); + + if (!instance->cmd_list) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); + return -ENOMEM; + } + + for (i = 0; i < max_cmd; i++) { + instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), + GFP_KERNEL); + + if (!instance->cmd_list[i]) { + + for (j = 0; j < i; j++) + kfree(instance->cmd_list[j]); + + kfree(instance->cmd_list); + instance->cmd_list = NULL; + + return -ENOMEM; + } + } + + for (i = 0; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + memset(cmd, 0, sizeof(struct megasas_cmd)); + cmd->index = i; + cmd->scmd = NULL; + cmd->instance = instance; + + list_add_tail(&cmd->list, &instance->cmd_pool); + } + + /* + * Create a frame pool and assign one frame to each cmd + */ + if (megasas_create_frame_pool(instance)) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); + megasas_free_cmds(instance); + return -ENOMEM; + } + + return 0; +} + +/* + * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. + * @instance: Adapter soft state + * + * Return 0 for only Fusion adapter, if driver load/unload is not in progress + * or FW is not under OCR. + */ +inline int +dcmd_timeout_ocr_possible(struct megasas_instance *instance) { + + if (instance->adapter_type == MFI_SERIES) + return KILL_ADAPTER; + else if (instance->unload || + test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, + &instance->reset_flags)) + return IGNORE_TIMEOUT; + else + return INITIATE_OCR; +} + +static void +megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) +{ + int ret; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + struct MR_PRIV_DEVICE *mr_device_priv_data; + u16 device_id = 0; + + device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); + return; + } + + dcmd = &cmd->frame->dcmd; + + memset(instance->pd_info, 0, sizeof(*instance->pd_info)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.s[0] = cpu_to_le16(device_id); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); + dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); + + megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, + sizeof(struct MR_PD_INFO)); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + switch (ret) { + case DCMD_SUCCESS: + mr_device_priv_data = sdev->hostdata; + le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); + mr_device_priv_data->interface_type = + instance->pd_info->state.ddf.pdType.intf; + break; + + case DCMD_TIMEOUT: + + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + + break; + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return; +} +/* + * megasas_get_pd_list_info - Returns FW's pd_list structure + * @instance: Adapter soft state + * @pd_list: pd_list structure + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +static int +megasas_get_pd_list(struct megasas_instance *instance) +{ + int ret = 0, pd_index = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_PD_LIST *ci; + struct MR_PD_ADDRESS *pd_addr; + + if (instance->pd_list_not_supported) { + dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " + "not supported by firmware\n"); + return ret; + } + + ci = instance->pd_list_buf; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; + dcmd->mbox.b[1] = 0; + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); + + megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, + (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + switch (ret) { + case DCMD_FAILED: + dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " + "failed/not supported by firmware\n"); + + if (instance->adapter_type != MFI_SERIES) + megaraid_sas_kill_hba(instance); + else + instance->pd_list_not_supported = 1; + break; + case DCMD_TIMEOUT: + + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + /* + * DCMD failed from AEN path. + * AEN path already hold reset_mutex to avoid PCI access + * while OCR is in progress. + */ + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", + __func__, __LINE__); + break; + } + + break; + + case DCMD_SUCCESS: + pd_addr = ci->addr; + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", + __func__, le32_to_cpu(ci->count)); + + if ((le32_to_cpu(ci->count) > + (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) + break; + + memset(instance->local_pd_list, 0, + MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); + + for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = + le16_to_cpu(pd_addr->deviceId); + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = + pd_addr->scsiDevType; + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = + MR_PD_STATE_SYSTEM; + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, + "PD%d: targetID: 0x%03x deviceType:0x%x\n", + pd_index, le16_to_cpu(pd_addr->deviceId), + pd_addr->scsiDevType); + pd_addr++; + } + + memcpy(instance->pd_list, instance->local_pd_list, + sizeof(instance->pd_list)); + break; + + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return ret; +} + +/* + * megasas_get_ld_list_info - Returns FW's ld_list structure + * @instance: Adapter soft state + * @ld_list: ld_list structure + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +static int +megasas_get_ld_list(struct megasas_instance *instance) +{ + int ret = 0, ld_index = 0, ids = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_LIST *ci; + dma_addr_t ci_h = 0; + u32 ld_count; + + ci = instance->ld_list_buf; + ci_h = instance->ld_list_buf_h; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + if (instance->supportmax256vd) + dcmd->mbox.b[0] = 1; + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); + dcmd->pad_0 = 0; + + megasas_set_dma_settings(instance, dcmd, ci_h, + sizeof(struct MR_LD_LIST)); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + ld_count = le32_to_cpu(ci->ldCount); + + switch (ret) { + case DCMD_FAILED: + megaraid_sas_kill_hba(instance); + break; + case DCMD_TIMEOUT: + + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + /* + * DCMD failed from AEN path. + * AEN path already hold reset_mutex to avoid PCI access + * while OCR is in progress. + */ + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + + break; + + case DCMD_SUCCESS: + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", + __func__, ld_count); + + if (ld_count > instance->fw_supported_vd_count) + break; + + memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); + + for (ld_index = 0; ld_index < ld_count; ld_index++) { + if (ci->ldList[ld_index].state != 0) { + ids = ci->ldList[ld_index].ref.targetId; + instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, + "LD%d: targetID: 0x%03x\n", + ld_index, ids); + } + } + + break; + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return ret; +} + +/** + * megasas_ld_list_query - Returns FW's ld_list structure + * @instance: Adapter soft state + * @query_type: ld_list structure type + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +static int +megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) +{ + int ret = 0, ld_index = 0, ids = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_TARGETID_LIST *ci; + dma_addr_t ci_h = 0; + u32 tgtid_count; + + ci = instance->ld_targetid_list_buf; + ci_h = instance->ld_targetid_list_buf_h; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_warn(&instance->pdev->dev, + "megasas_ld_list_query: Failed to get cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.b[0] = query_type; + if (instance->supportmax256vd) + dcmd->mbox.b[2] = 1; + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); + dcmd->pad_0 = 0; + + megasas_set_dma_settings(instance, dcmd, ci_h, + sizeof(struct MR_LD_TARGETID_LIST)); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + switch (ret) { + case DCMD_FAILED: + dev_info(&instance->pdev->dev, + "DCMD not supported by firmware - %s %d\n", + __func__, __LINE__); + ret = megasas_get_ld_list(instance); + break; + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + /* + * DCMD failed from AEN path. + * AEN path already hold reset_mutex to avoid PCI access + * while OCR is in progress. + */ + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + + break; + case DCMD_SUCCESS: + tgtid_count = le32_to_cpu(ci->count); + + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", + __func__, tgtid_count); + + if ((tgtid_count > (instance->fw_supported_vd_count))) + break; + + memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + for (ld_index = 0; ld_index < tgtid_count; ld_index++) { + ids = ci->targetId[ld_index]; + instance->ld_ids[ids] = ci->targetId[ld_index]; + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", + ld_index, ci->targetId[ld_index]); + } + + break; + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return ret; +} + +/** + * megasas_host_device_list_query + * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET + * dcmd.mbox - reserved + * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure + * Desc: This DCMD will return the combined device list + * Status: MFI_STAT_OK - List returned successfully + * MFI_STAT_INVALID_CMD - Firmware support for the feature has been + * disabled + * @instance: Adapter soft state + * @is_probe: Driver probe check + * Return: 0 if DCMD succeeded + * non-zero if failed + */ +static int +megasas_host_device_list_query(struct megasas_instance *instance, + bool is_probe) +{ + int ret, i, target_id; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_HOST_DEVICE_LIST *ci; + u32 count; + dma_addr_t ci_h; + + ci = instance->host_device_list_buf; + ci_h = instance->host_device_list_buf_h; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_warn(&instance->pdev->dev, + "%s: failed to get cmd\n", + __func__); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.b[0] = is_probe ? 0 : 1; + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); + + megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); + + if (!instance->mask_interrupts) { + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); + } else { + ret = megasas_issue_polled(instance, cmd); + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + } + + switch (ret) { + case DCMD_SUCCESS: + /* Fill the internal pd_list and ld_ids array based on + * targetIds returned by FW + */ + count = le32_to_cpu(ci->count); + + if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) + break; + + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", + __func__, count); + + memset(instance->local_pd_list, 0, + MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); + memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); + for (i = 0; i < count; i++) { + target_id = le16_to_cpu(ci->host_device_list[i].target_id); + if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { + instance->local_pd_list[target_id].tid = target_id; + instance->local_pd_list[target_id].driveType = + ci->host_device_list[i].scsi_type; + instance->local_pd_list[target_id].driveState = + MR_PD_STATE_SYSTEM; + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, + "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", + i, target_id, ci->host_device_list[i].scsi_type); + } else { + instance->ld_ids[target_id] = target_id; + if (megasas_dbg_lvl & LD_PD_DEBUG) + dev_info(&instance->pdev->dev, + "Device %d: LD targetID: 0x%03x\n", + i, target_id); + } + } + + memcpy(instance->pd_list, instance->local_pd_list, + sizeof(instance->pd_list)); + break; + + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + break; + case DCMD_FAILED: + dev_err(&instance->pdev->dev, + "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", + __func__); + break; + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return ret; +} + +/* + * megasas_update_ext_vd_details : Update details w.r.t Extended VD + * instance : Controller's instance +*/ +static void megasas_update_ext_vd_details(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + u32 ventura_map_sz = 0; + + fusion = instance->ctrl_context; + /* For MFI based controllers return dummy success */ + if (!fusion) + return; + + instance->supportmax256vd = + instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; + /* Below is additional check to address future FW enhancement */ + if (instance->ctrl_info_buf->max_lds > 64) + instance->supportmax256vd = 1; + + instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS + * MEGASAS_MAX_DEV_PER_CHANNEL; + instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS + * MEGASAS_MAX_DEV_PER_CHANNEL; + if (instance->supportmax256vd) { + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; + } else { + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; + } + + dev_info(&instance->pdev->dev, + "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", + instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, + instance->ctrl_info_buf->max_lds); + + if (instance->max_raid_mapsize) { + ventura_map_sz = instance->max_raid_mapsize * + MR_MIN_MAP_SIZE; /* 64k */ + fusion->current_map_sz = ventura_map_sz; + fusion->max_map_sz = ventura_map_sz; + } else { + fusion->old_map_sz = + struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap, + instance->fw_supported_vd_count); + fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); + + fusion->max_map_sz = + max(fusion->old_map_sz, fusion->new_map_sz); + + if (instance->supportmax256vd) + fusion->current_map_sz = fusion->new_map_sz; + else + fusion->current_map_sz = fusion->old_map_sz; + } + /* irrespective of FW raid maps, driver raid map is constant */ + fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); +} + +/* + * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES + * dcmd.hdr.length - number of bytes to read + * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES + * Desc: Fill in snapdump properties + * Status: MFI_STAT_OK- Command successful + */ +void megasas_get_snapdump_properties(struct megasas_instance *instance) +{ + int ret = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_SNAPDUMP_PROPERTIES *ci; + dma_addr_t ci_h = 0; + + ci = instance->snapdump_prop; + ci_h = instance->snapdump_prop_h; + + if (!ci) + return; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); + return; + } + + dcmd = &cmd->frame->dcmd; + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); + + megasas_set_dma_settings(instance, dcmd, ci_h, + sizeof(struct MR_SNAPDUMP_PROPERTIES)); + + if (!instance->mask_interrupts) { + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); + } else { + ret = megasas_issue_polled(instance, cmd); + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + } + + switch (ret) { + case DCMD_SUCCESS: + instance->snapdump_wait_time = + min_t(u8, ci->trigger_min_num_sec_before_ocr, + MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); + break; + + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); +} + +/** + * megasas_get_ctrl_info - Returns FW's controller structure + * @instance: Adapter soft state + * + * Issues an internal command (DCMD) to get the FW's controller structure. + * This information is mainly used to find out the maximum IO transfer per + * command supported by the FW. + */ +int +megasas_get_ctrl_info(struct megasas_instance *instance) +{ + int ret = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct megasas_ctrl_info *ci; + dma_addr_t ci_h = 0; + + ci = instance->ctrl_info_buf; + ci_h = instance->ctrl_info_buf_h; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); + dcmd->mbox.b[0] = 1; + + megasas_set_dma_settings(instance, dcmd, ci_h, + sizeof(struct megasas_ctrl_info)); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) { + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); + } else { + ret = megasas_issue_polled(instance, cmd); + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + } + + switch (ret) { + case DCMD_SUCCESS: + /* Save required controller information in + * CPU endianness format. + */ + le32_to_cpus((u32 *)&ci->properties.OnOffProperties); + le16_to_cpus((u16 *)&ci->properties.on_off_properties2); + le32_to_cpus((u32 *)&ci->adapterOperations2); + le32_to_cpus((u32 *)&ci->adapterOperations3); + le16_to_cpus((u16 *)&ci->adapter_operations4); + le32_to_cpus((u32 *)&ci->adapter_operations5); + + /* Update the latest Ext VD info. + * From Init path, store current firmware details. + * From OCR path, detect any firmware properties changes. + * in case of Firmware upgrade without system reboot. + */ + megasas_update_ext_vd_details(instance); + instance->support_seqnum_jbod_fp = + ci->adapterOperations3.useSeqNumJbodFP; + instance->support_morethan256jbod = + ci->adapter_operations4.support_pd_map_target_id; + instance->support_nvme_passthru = + ci->adapter_operations4.support_nvme_passthru; + instance->support_pci_lane_margining = + ci->adapter_operations5.support_pci_lane_margining; + instance->task_abort_tmo = ci->TaskAbortTO; + instance->max_reset_tmo = ci->MaxResetTO; + + /*Check whether controller is iMR or MR */ + instance->is_imr = (ci->memory_size ? 0 : 1); + + instance->snapdump_wait_time = + (ci->properties.on_off_properties2.enable_snap_dump ? + MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); + + instance->enable_fw_dev_list = + ci->properties.on_off_properties2.enable_fw_dev_list; + + dev_info(&instance->pdev->dev, + "controller type\t: %s(%dMB)\n", + instance->is_imr ? "iMR" : "MR", + le16_to_cpu(ci->memory_size)); + + instance->disableOnlineCtrlReset = + ci->properties.OnOffProperties.disableOnlineCtrlReset; + instance->secure_jbod_support = + ci->adapterOperations3.supportSecurityonJBOD; + dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", + instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); + dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", + instance->secure_jbod_support ? "Yes" : "No"); + dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", + instance->support_nvme_passthru ? "Yes" : "No"); + dev_info(&instance->pdev->dev, + "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", + instance->task_abort_tmo, instance->max_reset_tmo); + dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", + instance->support_seqnum_jbod_fp ? "Yes" : "No"); + dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", + instance->support_pci_lane_margining ? "Yes" : "No"); + + break; + + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + break; + case DCMD_FAILED: + megaraid_sas_kill_hba(instance); + break; + + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return ret; +} + +/* + * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer + * to firmware + * + * @instance: Adapter soft state + * @crash_buf_state - tell FW to turn ON/OFF crash dump feature + MR_CRASH_BUF_TURN_OFF = 0 + MR_CRASH_BUF_TURN_ON = 1 + * @return 0 on success non-zero on failure. + * Issues an internal command (DCMD) to set parameters for crash dump feature. + * Driver will send address of crash dump DMA buffer and set mbox to tell FW + * that driver supports crash dump feature. This DCMD will be sent only if + * crash dump feature is supported by the FW. + * + */ +int megasas_set_crash_dump_params(struct megasas_instance *instance, + u8 crash_buf_state) +{ + int ret = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); + return -ENOMEM; + } + + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + dcmd->mbox.b[0] = crash_buf_state; + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); + + megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, + CRASH_DMA_BUF_SIZE); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + if (ret == DCMD_TIMEOUT) { + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + } else + megasas_return_cmd(instance, cmd); + + return ret; +} + +/** + * megasas_issue_init_mfi - Initializes the FW + * @instance: Adapter soft state + * + * Issues the INIT MFI cmd + */ +static int +megasas_issue_init_mfi(struct megasas_instance *instance) +{ + __le32 context; + struct megasas_cmd *cmd; + struct megasas_init_frame *init_frame; + struct megasas_init_queue_info *initq_info; + dma_addr_t init_frame_h; + dma_addr_t initq_info_h; + + /* + * Prepare a init frame. Note the init frame points to queue info + * structure. Each frame has SGL allocated after first 64 bytes. For + * this frame - since we don't need any SGL - we use SGL's space as + * queue info structure + * + * We will not get a NULL command below. We just created the pool. + */ + cmd = megasas_get_cmd(instance); + + init_frame = (struct megasas_init_frame *)cmd->frame; + initq_info = (struct megasas_init_queue_info *) + ((unsigned long)init_frame + 64); + + init_frame_h = cmd->frame_phys_addr; + initq_info_h = init_frame_h + 64; + + context = init_frame->context; + memset(init_frame, 0, MEGAMFI_FRAME_SIZE); + memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); + init_frame->context = context; + + initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); + initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); + + initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); + initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); + + init_frame->cmd = MFI_CMD_INIT; + init_frame->cmd_status = MFI_STAT_INVALID_STATUS; + init_frame->queue_info_new_phys_addr_lo = + cpu_to_le32(lower_32_bits(initq_info_h)); + init_frame->queue_info_new_phys_addr_hi = + cpu_to_le32(upper_32_bits(initq_info_h)); + + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); + + /* + * disable the intr before firing the init frame to FW + */ + instance->instancet->disable_intr(instance); + + /* + * Issue the init frame in polled mode + */ + + if (megasas_issue_polled(instance, cmd)) { + dev_err(&instance->pdev->dev, "Failed to init firmware\n"); + megasas_return_cmd(instance, cmd); + goto fail_fw_init; + } + + megasas_return_cmd(instance, cmd); + + return 0; + +fail_fw_init: + return -EINVAL; +} + +static u32 +megasas_init_adapter_mfi(struct megasas_instance *instance) +{ + u32 context_sz; + u32 reply_q_sz; + + /* + * Get various operational parameters from status register + */ + instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; + /* + * Reduce the max supported cmds by 1. This is to ensure that the + * reply_q_sz (1 more than the max cmd that driver may send) + * does not exceed max cmds that the FW can support + */ + instance->max_fw_cmds = instance->max_fw_cmds-1; + instance->max_mfi_cmds = instance->max_fw_cmds; + instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> + 0x10; + /* + * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands + * are reserved for IOCTL + driver's internal DCMDs. + */ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + instance->max_scsi_cmds = (instance->max_fw_cmds - + MEGASAS_SKINNY_INT_CMDS); + sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); + } else { + instance->max_scsi_cmds = (instance->max_fw_cmds - + MEGASAS_INT_CMDS); + sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); + } + + instance->cur_can_queue = instance->max_scsi_cmds; + /* + * Create a pool of commands + */ + if (megasas_alloc_cmds(instance)) + goto fail_alloc_cmds; + + /* + * Allocate memory for reply queue. Length of reply queue should + * be _one_ more than the maximum commands handled by the firmware. + * + * Note: When FW completes commands, it places corresponding contex + * values in this circular reply queue. This circular queue is a fairly + * typical producer-consumer queue. FW is the producer (of completed + * commands) and the driver is the consumer. + */ + context_sz = sizeof(u32); + reply_q_sz = context_sz * (instance->max_fw_cmds + 1); + + instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, + reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); + + if (!instance->reply_queue) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); + goto fail_reply_queue; + } + + if (megasas_issue_init_mfi(instance)) + goto fail_fw_init; + + if (megasas_get_ctrl_info(instance)) { + dev_err(&instance->pdev->dev, "(%d): Could get controller info " + "Fail from %s %d\n", instance->unique_id, + __func__, __LINE__); + goto fail_fw_init; + } + + instance->fw_support_ieee = 0; + instance->fw_support_ieee = + (instance->instancet->read_fw_status_reg(instance) & + 0x04000000); + + dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", + instance->fw_support_ieee); + + if (instance->fw_support_ieee) + instance->flag_ieee = 1; + + return 0; + +fail_fw_init: + + dma_free_coherent(&instance->pdev->dev, reply_q_sz, + instance->reply_queue, instance->reply_queue_h); +fail_reply_queue: + megasas_free_cmds(instance); + +fail_alloc_cmds: + return 1; +} + +static +void megasas_setup_irq_poll(struct megasas_instance *instance) +{ + struct megasas_irq_context *irq_ctx; + u32 count, i; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + + /* Initialize IRQ poll */ + for (i = 0; i < count; i++) { + irq_ctx = &instance->irq_context[i]; + irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); + irq_ctx->irq_poll_scheduled = false; + irq_poll_init(&irq_ctx->irqpoll, + instance->threshold_reply_count, + megasas_irqpoll); + } +} + +/* + * megasas_setup_irqs_ioapic - register legacy interrupts. + * @instance: Adapter soft state + * + * Do not enable interrupt, only setup ISRs. + * + * Return 0 on success. + */ +static int +megasas_setup_irqs_ioapic(struct megasas_instance *instance) +{ + struct pci_dev *pdev; + + pdev = instance->pdev; + instance->irq_context[0].instance = instance; + instance->irq_context[0].MSIxIndex = 0; + snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", + "megasas", instance->host->host_no); + if (request_irq(pci_irq_vector(pdev, 0), + instance->instancet->service_isr, IRQF_SHARED, + instance->irq_context->name, &instance->irq_context[0])) { + dev_err(&instance->pdev->dev, + "Failed to register IRQ from %s %d\n", + __func__, __LINE__); + return -1; + } + instance->perf_mode = MR_LATENCY_PERF_MODE; + instance->low_latency_index_start = 0; + return 0; +} + +/** + * megasas_setup_irqs_msix - register MSI-x interrupts. + * @instance: Adapter soft state + * @is_probe: Driver probe check + * + * Do not enable interrupt, only setup ISRs. + * + * Return 0 on success. + */ +static int +megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) +{ + int i, j; + struct pci_dev *pdev; + + pdev = instance->pdev; + + /* Try MSI-x */ + for (i = 0; i < instance->msix_vectors; i++) { + instance->irq_context[i].instance = instance; + instance->irq_context[i].MSIxIndex = i; + snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", + "megasas", instance->host->host_no, i); + if (request_irq(pci_irq_vector(pdev, i), + instance->instancet->service_isr, 0, instance->irq_context[i].name, + &instance->irq_context[i])) { + dev_err(&instance->pdev->dev, + "Failed to register IRQ for vector %d.\n", i); + for (j = 0; j < i; j++) { + if (j < instance->low_latency_index_start) + irq_update_affinity_hint( + pci_irq_vector(pdev, j), NULL); + free_irq(pci_irq_vector(pdev, j), + &instance->irq_context[j]); + } + /* Retry irq register for IO_APIC*/ + instance->msix_vectors = 0; + instance->msix_load_balance = false; + if (is_probe) { + pci_free_irq_vectors(instance->pdev); + return megasas_setup_irqs_ioapic(instance); + } else { + return -1; + } + } + } + + return 0; +} + +/* + * megasas_destroy_irqs- unregister interrupts. + * @instance: Adapter soft state + * return: void + */ +static void +megasas_destroy_irqs(struct megasas_instance *instance) { + + int i; + int count; + struct megasas_irq_context *irq_ctx; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + if (instance->adapter_type != MFI_SERIES) { + for (i = 0; i < count; i++) { + irq_ctx = &instance->irq_context[i]; + irq_poll_disable(&irq_ctx->irqpoll); + } + } + + if (instance->msix_vectors) + for (i = 0; i < instance->msix_vectors; i++) { + if (i < instance->low_latency_index_start) + irq_update_affinity_hint( + pci_irq_vector(instance->pdev, i), NULL); + free_irq(pci_irq_vector(instance->pdev, i), + &instance->irq_context[i]); + } + else + free_irq(pci_irq_vector(instance->pdev, 0), + &instance->irq_context[0]); +} + +/** + * megasas_setup_jbod_map - setup jbod map for FP seq_number. + * @instance: Adapter soft state + * + * Return 0 on success. + */ +void +megasas_setup_jbod_map(struct megasas_instance *instance) +{ + int i; + struct fusion_context *fusion = instance->ctrl_context; + size_t pd_seq_map_sz; + + pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq, + MAX_PHYSICAL_DEVICES); + + instance->use_seqnum_jbod_fp = + instance->support_seqnum_jbod_fp; + if (reset_devices || !fusion || + !instance->support_seqnum_jbod_fp) { + dev_info(&instance->pdev->dev, + "JBOD sequence map is disabled %s %d\n", + __func__, __LINE__); + instance->use_seqnum_jbod_fp = false; + return; + } + + if (fusion->pd_seq_sync[0]) + goto skip_alloc; + + for (i = 0; i < JBOD_MAPS_COUNT; i++) { + fusion->pd_seq_sync[i] = dma_alloc_coherent + (&instance->pdev->dev, pd_seq_map_sz, + &fusion->pd_seq_phys[i], GFP_KERNEL); + if (!fusion->pd_seq_sync[i]) { + dev_err(&instance->pdev->dev, + "Failed to allocate memory from %s %d\n", + __func__, __LINE__); + if (i == 1) { + dma_free_coherent(&instance->pdev->dev, + pd_seq_map_sz, fusion->pd_seq_sync[0], + fusion->pd_seq_phys[0]); + fusion->pd_seq_sync[0] = NULL; + } + instance->use_seqnum_jbod_fp = false; + return; + } + } + +skip_alloc: + if (!megasas_sync_pd_seq_num(instance, false) && + !megasas_sync_pd_seq_num(instance, true)) + instance->use_seqnum_jbod_fp = true; + else + instance->use_seqnum_jbod_fp = false; +} + +static void megasas_setup_reply_map(struct megasas_instance *instance) +{ + const struct cpumask *mask; + unsigned int queue, cpu, low_latency_index_start; + + low_latency_index_start = instance->low_latency_index_start; + + for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { + mask = pci_irq_get_affinity(instance->pdev, queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + instance->reply_map[cpu] = queue; + } + return; + +fallback: + queue = low_latency_index_start; + for_each_possible_cpu(cpu) { + instance->reply_map[cpu] = queue; + if (queue == (instance->msix_vectors - 1)) + queue = low_latency_index_start; + else + queue++; + } +} + +/** + * megasas_get_device_list - Get the PD and LD device list from FW. + * @instance: Adapter soft state + * @return: Success or failure + * + * Issue DCMDs to Firmware to get the PD and LD list. + * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination + * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. + */ +static +int megasas_get_device_list(struct megasas_instance *instance) +{ + if (instance->enable_fw_dev_list) { + if (megasas_host_device_list_query(instance, true)) + return FAILED; + } else { + if (megasas_get_pd_list(instance) < 0) { + dev_err(&instance->pdev->dev, "failed to get PD list\n"); + return FAILED; + } + + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { + dev_err(&instance->pdev->dev, "failed to get LD list\n"); + return FAILED; + } + } + + return SUCCESS; +} + +/** + * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint + * for high IOPS queues + * @instance: Adapter soft state + * return: void + */ +static inline void +megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) +{ + int i; + unsigned int irq; + const struct cpumask *mask; + + if (instance->perf_mode == MR_BALANCED_PERF_MODE) { + mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); + + for (i = 0; i < instance->low_latency_index_start; i++) { + irq = pci_irq_vector(instance->pdev, i); + irq_set_affinity_and_hint(irq, mask); + } + } +} + +static int +__megasas_alloc_irq_vectors(struct megasas_instance *instance) +{ + int i, irq_flags; + struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; + struct irq_affinity *descp = &desc; + + irq_flags = PCI_IRQ_MSIX; + + if (instance->smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + else + descp = NULL; + + /* Do not allocate msix vectors for poll_queues. + * msix_vectors is always within a range of FW supported reply queue. + */ + i = pci_alloc_irq_vectors_affinity(instance->pdev, + instance->low_latency_index_start, + instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp); + + return i; +} + +/** + * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors + * @instance: Adapter soft state + * return: void + */ +static void +megasas_alloc_irq_vectors(struct megasas_instance *instance) +{ + int i; + unsigned int num_msix_req; + + instance->iopoll_q_count = 0; + if ((instance->adapter_type != MFI_SERIES) && + poll_queues) { + + instance->perf_mode = MR_LATENCY_PERF_MODE; + instance->low_latency_index_start = 1; + + /* reserve for default and non-mananged pre-vector. */ + if (instance->msix_vectors > (poll_queues + 2)) + instance->iopoll_q_count = poll_queues; + else + instance->iopoll_q_count = 0; + + num_msix_req = num_online_cpus() + instance->low_latency_index_start; + instance->msix_vectors = min(num_msix_req, + instance->msix_vectors); + + } + + i = __megasas_alloc_irq_vectors(instance); + + if (((instance->perf_mode == MR_BALANCED_PERF_MODE) + || instance->iopoll_q_count) && + (i != (instance->msix_vectors - instance->iopoll_q_count))) { + if (instance->msix_vectors) + pci_free_irq_vectors(instance->pdev); + /* Disable Balanced IOPS mode and try realloc vectors */ + instance->perf_mode = MR_LATENCY_PERF_MODE; + instance->low_latency_index_start = 1; + num_msix_req = num_online_cpus() + instance->low_latency_index_start; + + instance->msix_vectors = min(num_msix_req, + instance->msix_vectors); + + instance->iopoll_q_count = 0; + i = __megasas_alloc_irq_vectors(instance); + + } + + dev_info(&instance->pdev->dev, + "requested/available msix %d/%d poll_queue %d\n", + instance->msix_vectors - instance->iopoll_q_count, + i, instance->iopoll_q_count); + + if (i > 0) + instance->msix_vectors = i; + else + instance->msix_vectors = 0; + + if (instance->smp_affinity_enable) + megasas_set_high_iops_queue_affinity_and_hint(instance); +} + +/** + * megasas_init_fw - Initializes the FW + * @instance: Adapter soft state + * + * This is the main function for initializing firmware + */ + +static int megasas_init_fw(struct megasas_instance *instance) +{ + u32 max_sectors_1; + u32 max_sectors_2, tmp_sectors, msix_enable; + u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; + resource_size_t base_addr; + void *base_addr_phys; + struct megasas_ctrl_info *ctrl_info = NULL; + unsigned long bar_list; + int i, j, loop; + struct IOV_111 *iovPtr; + struct fusion_context *fusion; + bool intr_coalescing; + unsigned int num_msix_req; + u16 lnksta, speed; + + fusion = instance->ctrl_context; + + /* Find first memory bar */ + bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); + instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); + if (pci_request_selected_regions(instance->pdev, 1<bar, + "megasas: LSI")) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); + return -EBUSY; + } + + base_addr = pci_resource_start(instance->pdev, instance->bar); + instance->reg_set = ioremap(base_addr, 8192); + + if (!instance->reg_set) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); + goto fail_ioremap; + } + + base_addr_phys = &base_addr; + dev_printk(KERN_DEBUG, &instance->pdev->dev, + "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", + instance->bar, base_addr_phys, instance->reg_set); + + if (instance->adapter_type != MFI_SERIES) + instance->instancet = &megasas_instance_template_fusion; + else { + switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_SAS1078R: + case PCI_DEVICE_ID_LSI_SAS1078DE: + instance->instancet = &megasas_instance_template_ppc; + break; + case PCI_DEVICE_ID_LSI_SAS1078GEN2: + case PCI_DEVICE_ID_LSI_SAS0079GEN2: + instance->instancet = &megasas_instance_template_gen2; + break; + case PCI_DEVICE_ID_LSI_SAS0073SKINNY: + case PCI_DEVICE_ID_LSI_SAS0071SKINNY: + instance->instancet = &megasas_instance_template_skinny; + break; + case PCI_DEVICE_ID_LSI_SAS1064R: + case PCI_DEVICE_ID_DELL_PERC5: + default: + instance->instancet = &megasas_instance_template_xscale; + instance->pd_list_not_supported = 1; + break; + } + } + + if (megasas_transition_to_ready(instance, 0)) { + dev_info(&instance->pdev->dev, + "Failed to transition controller to ready from %s!\n", + __func__); + if (instance->adapter_type != MFI_SERIES) { + status_reg = instance->instancet->read_fw_status_reg( + instance); + if (status_reg & MFI_RESET_ADAPTER) { + if (megasas_adp_reset_wait_for_ready + (instance, true, 0) == FAILED) + goto fail_ready_state; + } else { + goto fail_ready_state; + } + } else { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + + /*waiting for about 30 second before retry*/ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } + + dev_info(&instance->pdev->dev, + "FW restarted successfully from %s!\n", + __func__); + } + + megasas_init_ctrl_params(instance); + + if (megasas_set_dma_mask(instance)) + goto fail_ready_state; + + if (megasas_alloc_ctrl_mem(instance)) + goto fail_alloc_dma_buf; + + if (megasas_alloc_ctrl_dma_buffers(instance)) + goto fail_alloc_dma_buf; + + fusion = instance->ctrl_context; + + if (instance->adapter_type >= VENTURA_SERIES) { + scratch_pad_2 = + megasas_readl(instance, + &instance->reg_set->outbound_scratch_pad_2); + instance->max_raid_mapsize = ((scratch_pad_2 >> + MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & + MR_MAX_RAID_MAP_SIZE_MASK); + } + + instance->enable_sdev_max_qd = enable_sdev_max_qd; + + switch (instance->adapter_type) { + case VENTURA_SERIES: + fusion->pcie_bw_limitation = true; + break; + case AERO_SERIES: + fusion->r56_div_offload = true; + break; + default: + break; + } + + /* Check if MSI-X is supported while in ready state */ + msix_enable = (instance->instancet->read_fw_status_reg(instance) & + 0x4000000) >> 0x1a; + if (msix_enable && !msix_disable) { + + scratch_pad_1 = megasas_readl + (instance, &instance->reg_set->outbound_scratch_pad_1); + /* Check max MSI-X vectors */ + if (fusion) { + if (instance->adapter_type == THUNDERBOLT_SERIES) { + /* Thunderbolt Series*/ + instance->msix_vectors = (scratch_pad_1 + & MR_MAX_REPLY_QUEUES_OFFSET) + 1; + } else { + instance->msix_vectors = ((scratch_pad_1 + & MR_MAX_REPLY_QUEUES_EXT_OFFSET) + >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; + + /* + * For Invader series, > 8 MSI-x vectors + * supported by FW/HW implies combined + * reply queue mode is enabled. + * For Ventura series, > 16 MSI-x vectors + * supported by FW/HW implies combined + * reply queue mode is enabled. + */ + switch (instance->adapter_type) { + case INVADER_SERIES: + if (instance->msix_vectors > 8) + instance->msix_combined = true; + break; + case AERO_SERIES: + case VENTURA_SERIES: + if (instance->msix_vectors > 16) + instance->msix_combined = true; + break; + } + + if (rdpq_enable) + instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? + 1 : 0; + + if (instance->adapter_type >= INVADER_SERIES && + !instance->msix_combined) { + instance->msix_load_balance = true; + instance->smp_affinity_enable = false; + } + + /* Save 1-15 reply post index address to local memory + * Index 0 is already saved from reg offset + * MPI2_REPLY_POST_HOST_INDEX_OFFSET + */ + for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { + instance->reply_post_host_index_addr[loop] = + (u32 __iomem *) + ((u8 __iomem *)instance->reg_set + + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + + (loop * 0x10)); + } + } + + dev_info(&instance->pdev->dev, + "firmware supports msix\t: (%d)", + instance->msix_vectors); + if (msix_vectors) + instance->msix_vectors = min(msix_vectors, + instance->msix_vectors); + } else /* MFI adapters */ + instance->msix_vectors = 1; + + + /* + * For Aero (if some conditions are met), driver will configure a + * few additional reply queues with interrupt coalescing enabled. + * These queues with interrupt coalescing enabled are called + * High IOPS queues and rest of reply queues (based on number of + * logical CPUs) are termed as Low latency queues. + * + * Total Number of reply queues = High IOPS queues + low latency queues + * + * For rest of fusion adapters, 1 additional reply queue will be + * reserved for management commands, rest of reply queues + * (based on number of logical CPUs) will be used for IOs and + * referenced as IO queues. + * Total Number of reply queues = 1 + IO queues + * + * MFI adapters supports single MSI-x so single reply queue + * will be used for IO and management commands. + */ + + intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? + true : false; + if (intr_coalescing && + (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && + (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) + instance->perf_mode = MR_BALANCED_PERF_MODE; + else + instance->perf_mode = MR_LATENCY_PERF_MODE; + + + if (instance->adapter_type == AERO_SERIES) { + pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); + speed = lnksta & PCI_EXP_LNKSTA_CLS; + + /* + * For Aero, if PCIe link speed is <16 GT/s, then driver should operate + * in latency perf mode and enable R1 PCI bandwidth algorithm + */ + if (speed < 0x4) { + instance->perf_mode = MR_LATENCY_PERF_MODE; + fusion->pcie_bw_limitation = true; + } + + /* + * Performance mode settings provided through module parameter-perf_mode will + * take affect only for: + * 1. Aero family of adapters. + * 2. When user sets module parameter- perf_mode in range of 0-2. + */ + if ((perf_mode >= MR_BALANCED_PERF_MODE) && + (perf_mode <= MR_LATENCY_PERF_MODE)) + instance->perf_mode = perf_mode; + /* + * If intr coalescing is not supported by controller FW, then IOPS + * and Balanced modes are not feasible. + */ + if (!intr_coalescing) + instance->perf_mode = MR_LATENCY_PERF_MODE; + + } + + if (instance->perf_mode == MR_BALANCED_PERF_MODE) + instance->low_latency_index_start = + MR_HIGH_IOPS_QUEUE_COUNT; + else + instance->low_latency_index_start = 1; + + num_msix_req = num_online_cpus() + instance->low_latency_index_start; + + instance->msix_vectors = min(num_msix_req, + instance->msix_vectors); + + megasas_alloc_irq_vectors(instance); + if (!instance->msix_vectors) + instance->msix_load_balance = false; + } + /* + * MSI-X host index 0 is common for all adapter. + * It is used for all MPT based Adapters. + */ + if (instance->msix_combined) { + instance->reply_post_host_index_addr[0] = + (u32 *)((u8 *)instance->reg_set + + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); + } else { + instance->reply_post_host_index_addr[0] = + (u32 *)((u8 *)instance->reg_set + + MPI2_REPLY_POST_HOST_INDEX_OFFSET); + } + + if (!instance->msix_vectors) { + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); + if (i < 0) + goto fail_init_adapter; + } + + megasas_setup_reply_map(instance); + + dev_info(&instance->pdev->dev, + "current msix/online cpus\t: (%d/%d)\n", + instance->msix_vectors, (unsigned int)num_online_cpus()); + dev_info(&instance->pdev->dev, + "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); + + tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, + (unsigned long)instance); + + /* + * Below are default value for legacy Firmware. + * non-fusion based controllers + */ + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; + /* Get operational params, sge flags, send init cmd to controller */ + if (instance->instancet->init_adapter(instance)) + goto fail_init_adapter; + + if (instance->adapter_type >= VENTURA_SERIES) { + scratch_pad_3 = + megasas_readl(instance, + &instance->reg_set->outbound_scratch_pad_3); + if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= + MR_DEFAULT_NVME_PAGE_SHIFT) + instance->nvme_page_size = + (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); + + dev_info(&instance->pdev->dev, + "NVME page size\t: (%d)\n", instance->nvme_page_size); + } + + if (instance->msix_vectors ? + megasas_setup_irqs_msix(instance, 1) : + megasas_setup_irqs_ioapic(instance)) + goto fail_init_adapter; + + if (instance->adapter_type != MFI_SERIES) + megasas_setup_irq_poll(instance); + + instance->instancet->enable_intr(instance); + + dev_info(&instance->pdev->dev, "INIT adapter done\n"); + + megasas_setup_jbod_map(instance); + + if (megasas_get_device_list(instance) != SUCCESS) { + dev_err(&instance->pdev->dev, + "%s: megasas_get_device_list failed\n", + __func__); + goto fail_get_ld_pd_list; + } + + /* stream detection initialization */ + if (instance->adapter_type >= VENTURA_SERIES) { + fusion->stream_detect_by_ld = + kcalloc(MAX_LOGICAL_DRIVES_EXT, + sizeof(struct LD_STREAM_DETECT *), + GFP_KERNEL); + if (!fusion->stream_detect_by_ld) { + dev_err(&instance->pdev->dev, + "unable to allocate stream detection for pool of LDs\n"); + goto fail_get_ld_pd_list; + } + for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { + fusion->stream_detect_by_ld[i] = + kzalloc(sizeof(struct LD_STREAM_DETECT), + GFP_KERNEL); + if (!fusion->stream_detect_by_ld[i]) { + dev_err(&instance->pdev->dev, + "unable to allocate stream detect by LD\n "); + for (j = 0; j < i; ++j) + kfree(fusion->stream_detect_by_ld[j]); + kfree(fusion->stream_detect_by_ld); + fusion->stream_detect_by_ld = NULL; + goto fail_get_ld_pd_list; + } + fusion->stream_detect_by_ld[i]->mru_bit_map + = MR_STREAM_BITMAP; + } + } + + /* + * Compute the max allowed sectors per IO: The controller info has two + * limits on max sectors. Driver should use the minimum of these two. + * + * 1 << stripe_sz_ops.min = max sectors per strip + * + * Note that older firmwares ( < FW ver 30) didn't report information + * to calculate max_sectors_1. So the number ended up as zero always. + */ + tmp_sectors = 0; + ctrl_info = instance->ctrl_info_buf; + + max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * + le16_to_cpu(ctrl_info->max_strips_per_io); + max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); + + tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); + + instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; + instance->passive = ctrl_info->cluster.passive; + memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); + instance->UnevenSpanSupport = + ctrl_info->adapterOperations2.supportUnevenSpans; + if (instance->UnevenSpanSupport) { + struct fusion_context *fusion = instance->ctrl_context; + if (MR_ValidateMapInfo(instance, instance->map_id)) + fusion->fast_path_io = 1; + else + fusion->fast_path_io = 0; + + } + if (ctrl_info->host_interface.SRIOV) { + instance->requestorId = ctrl_info->iov.requestorId; + if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { + if (!ctrl_info->adapterOperations2.activePassive) + instance->PlasmaFW111 = 1; + + dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", + instance->PlasmaFW111 ? "1.11" : "new"); + + if (instance->PlasmaFW111) { + iovPtr = (struct IOV_111 *) + ((unsigned char *)ctrl_info + IOV_111_OFFSET); + instance->requestorId = iovPtr->requestorId; + } + } + dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", + instance->requestorId); + } + + instance->crash_dump_fw_support = + ctrl_info->adapterOperations3.supportCrashDump; + instance->crash_dump_drv_support = + (instance->crash_dump_fw_support && + instance->crash_dump_buf); + if (instance->crash_dump_drv_support) + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_OFF); + + else { + if (instance->crash_dump_buf) + dma_free_coherent(&instance->pdev->dev, + CRASH_DMA_BUF_SIZE, + instance->crash_dump_buf, + instance->crash_dump_h); + instance->crash_dump_buf = NULL; + } + + if (instance->snapdump_wait_time) { + megasas_get_snapdump_properties(instance); + dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", + instance->snapdump_wait_time); + } + + dev_info(&instance->pdev->dev, + "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", + le16_to_cpu(ctrl_info->pci.vendor_id), + le16_to_cpu(ctrl_info->pci.device_id), + le16_to_cpu(ctrl_info->pci.sub_vendor_id), + le16_to_cpu(ctrl_info->pci.sub_device_id)); + dev_info(&instance->pdev->dev, "unevenspan support : %s\n", + instance->UnevenSpanSupport ? "yes" : "no"); + dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", + instance->crash_dump_drv_support ? "yes" : "no"); + dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", + instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); + + instance->max_sectors_per_req = instance->max_num_sge * + SGE_BUFFER_SIZE / 512; + if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) + instance->max_sectors_per_req = tmp_sectors; + + /* Check for valid throttlequeuedepth module parameter */ + if (throttlequeuedepth && + throttlequeuedepth <= instance->max_scsi_cmds) + instance->throttlequeuedepth = throttlequeuedepth; + else + instance->throttlequeuedepth = + MEGASAS_THROTTLE_QUEUE_DEPTH; + + if ((resetwaittime < 1) || + (resetwaittime > MEGASAS_RESET_WAIT_TIME)) + resetwaittime = MEGASAS_RESET_WAIT_TIME; + + if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) + scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; + + /* Launch SR-IOV heartbeat timer */ + if (instance->requestorId) { + if (!megasas_sriov_start_heartbeat(instance, 1)) { + megasas_start_timer(instance); + } else { + instance->skip_heartbeat_timer_del = 1; + goto fail_get_ld_pd_list; + } + } + + /* + * Create and start watchdog thread which will monitor + * controller state every 1 sec and trigger OCR when + * it enters fault state + */ + if (instance->adapter_type != MFI_SERIES) + if (megasas_fusion_start_watchdog(instance) != SUCCESS) + goto fail_start_watchdog; + + return 0; + +fail_start_watchdog: + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); +fail_get_ld_pd_list: + instance->instancet->disable_intr(instance); + megasas_destroy_irqs(instance); +fail_init_adapter: + if (instance->msix_vectors) + pci_free_irq_vectors(instance->pdev); + instance->msix_vectors = 0; +fail_alloc_dma_buf: + megasas_free_ctrl_dma_buffers(instance); + megasas_free_ctrl_mem(instance); +fail_ready_state: + iounmap(instance->reg_set); + +fail_ioremap: + pci_release_selected_regions(instance->pdev, 1<bar); + + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return -EINVAL; +} + +/** + * megasas_release_mfi - Reverses the FW initialization + * @instance: Adapter soft state + */ +static void megasas_release_mfi(struct megasas_instance *instance) +{ + u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); + + if (instance->reply_queue) + dma_free_coherent(&instance->pdev->dev, reply_q_sz, + instance->reply_queue, instance->reply_queue_h); + + megasas_free_cmds(instance); + + iounmap(instance->reg_set); + + pci_release_selected_regions(instance->pdev, 1<bar); +} + +/** + * megasas_get_seq_num - Gets latest event sequence numbers + * @instance: Adapter soft state + * @eli: FW event log sequence numbers information + * + * FW maintains a log of all events in a non-volatile area. Upper layers would + * usually find out the latest sequence number of the events, the seq number at + * the boot etc. They would "read" all the events below the latest seq number + * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq + * number), they would subsribe to AEN (asynchronous event notification) and + * wait for the events to happen. + */ +static int +megasas_get_seq_num(struct megasas_instance *instance, + struct megasas_evt_log_info *eli) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct megasas_evt_log_info *el_info; + dma_addr_t el_info_h = 0; + int ret; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + el_info = dma_alloc_coherent(&instance->pdev->dev, + sizeof(struct megasas_evt_log_info), + &el_info_h, GFP_KERNEL); + if (!el_info) { + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); + + megasas_set_dma_settings(instance, dcmd, el_info_h, + sizeof(struct megasas_evt_log_info)); + + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); + if (ret != DCMD_SUCCESS) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + goto dcmd_failed; + } + + /* + * Copy the data back into callers buffer + */ + eli->newest_seq_num = el_info->newest_seq_num; + eli->oldest_seq_num = el_info->oldest_seq_num; + eli->clear_seq_num = el_info->clear_seq_num; + eli->shutdown_seq_num = el_info->shutdown_seq_num; + eli->boot_seq_num = el_info->boot_seq_num; + +dcmd_failed: + dma_free_coherent(&instance->pdev->dev, + sizeof(struct megasas_evt_log_info), + el_info, el_info_h); + + megasas_return_cmd(instance, cmd); + + return ret; +} + +/** + * megasas_register_aen - Registers for asynchronous event notification + * @instance: Adapter soft state + * @seq_num: The starting sequence number + * @class_locale_word: Class of the event + * + * This function subscribes for AEN for events beyond the @seq_num. It requests + * to be notified if and only if the event is of type @class_locale + */ +static int +megasas_register_aen(struct megasas_instance *instance, u32 seq_num, + u32 class_locale_word) +{ + int ret_val; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + union megasas_evt_class_locale curr_aen; + union megasas_evt_class_locale prev_aen; + + /* + * If there an AEN pending already (aen_cmd), check if the + * class_locale of that pending AEN is inclusive of the new + * AEN request we currently have. If it is, then we don't have + * to do anything. In other words, whichever events the current + * AEN request is subscribing to, have already been subscribed + * to. + * + * If the old_cmd is _not_ inclusive, then we have to abort + * that command, form a class_locale that is superset of both + * old and current and re-issue to the FW + */ + + curr_aen.word = class_locale_word; + + if (instance->aen_cmd) { + + prev_aen.word = + le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); + + if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || + (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { + dev_info(&instance->pdev->dev, + "%s %d out of range class %d send by application\n", + __func__, __LINE__, curr_aen.members.class); + return 0; + } + + /* + * A class whose enum value is smaller is inclusive of all + * higher values. If a PROGRESS (= -1) was previously + * registered, then a new registration requests for higher + * classes need not be sent to FW. They are automatically + * included. + * + * Locale numbers don't have such hierarchy. They are bitmap + * values + */ + if ((prev_aen.members.class <= curr_aen.members.class) && + !((prev_aen.members.locale & curr_aen.members.locale) ^ + curr_aen.members.locale)) { + /* + * Previously issued event registration includes + * current request. Nothing to do. + */ + return 0; + } else { + curr_aen.members.locale |= prev_aen.members.locale; + + if (prev_aen.members.class < curr_aen.members.class) + curr_aen.members.class = prev_aen.members.class; + + instance->aen_cmd->abort_aen = 1; + ret_val = megasas_issue_blocked_abort_cmd(instance, + instance-> + aen_cmd, 30); + + if (ret_val) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " + "previous AEN command\n"); + return ret_val; + } + } + } + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return -ENOMEM; + + dcmd = &cmd->frame->dcmd; + + memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); + + /* + * Prepare DCMD for aen registration + */ + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); + dcmd->mbox.w[0] = cpu_to_le32(seq_num); + instance->last_seq_num = seq_num; + dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); + + megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, + sizeof(struct megasas_evt_detail)); + + if (instance->aen_cmd != NULL) { + megasas_return_cmd(instance, cmd); + return 0; + } + + /* + * Store reference to the cmd used to register for AEN. When an + * application wants us to register for AEN, we have to abort this + * cmd and re-register with a new EVENT LOCALE supplied by that app + */ + instance->aen_cmd = cmd; + + /* + * Issue the aen registration frame + */ + instance->instancet->issue_dcmd(instance, cmd); + + return 0; +} + +/* megasas_get_target_prop - Send DCMD with below details to firmware. + * + * This DCMD will fetch few properties of LD/system PD defined + * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. + * + * DCMD send by drivers whenever new target is added to the OS. + * + * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP + * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. + * 0 = system PD, 1 = LD. + * dcmd.mbox.s[1] - TargetID for LD/system PD. + * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. + * + * @instance: Adapter soft state + * @sdev: OS provided scsi device + * + * Returns 0 on success non-zero on failure. + */ +int +megasas_get_target_prop(struct megasas_instance *instance, + struct scsi_device *sdev) +{ + int ret; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_err(&instance->pdev->dev, + "Failed to get cmd %s\n", __func__); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); + + dcmd->mbox.s[1] = cpu_to_le16(targetId); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = + cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); + dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); + + megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, + sizeof(struct MR_TARGET_PROPERTIES)); + + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, + cmd, MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + switch (ret) { + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, + "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + break; + + default: + megasas_return_cmd(instance, cmd); + } + if (ret != DCMD_SUCCESS) + dev_err(&instance->pdev->dev, + "return from %s %d return value %d\n", + __func__, __LINE__, ret); + + return ret; +} + +/** + * megasas_start_aen - Subscribes to AEN during driver load time + * @instance: Adapter soft state + */ +static int megasas_start_aen(struct megasas_instance *instance) +{ + struct megasas_evt_log_info eli; + union megasas_evt_class_locale class_locale; + + /* + * Get the latest sequence number from FW + */ + memset(&eli, 0, sizeof(eli)); + + if (megasas_get_seq_num(instance, &eli)) + return -1; + + /* + * Register AEN with FW for latest sequence number plus 1 + */ + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + + return megasas_register_aen(instance, + le32_to_cpu(eli.newest_seq_num) + 1, + class_locale.word); +} + +/** + * megasas_io_attach - Attaches this driver to SCSI mid-layer + * @instance: Adapter soft state + */ +static int megasas_io_attach(struct megasas_instance *instance) +{ + struct Scsi_Host *host = instance->host; + + /* + * Export parameters required by SCSI mid-layer + */ + host->unique_id = instance->unique_id; + host->can_queue = instance->max_scsi_cmds; + host->this_id = instance->init_id; + host->sg_tablesize = instance->max_num_sge; + + if (instance->fw_support_ieee) + instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; + + /* + * Check if the module parameter value for max_sectors can be used + */ + if (max_sectors && max_sectors < instance->max_sectors_per_req) + instance->max_sectors_per_req = max_sectors; + else { + if (max_sectors) { + if (((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS1078GEN2) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0079GEN2)) && + (max_sectors <= MEGASAS_MAX_SECTORS)) { + instance->max_sectors_per_req = max_sectors; + } else { + dev_info(&instance->pdev->dev, "max_sectors should be > 0" + "and <= %d (or < 1MB for GEN2 controller)\n", + instance->max_sectors_per_req); + } + } + } + + host->max_sectors = instance->max_sectors_per_req; + host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; + host->max_channel = MEGASAS_MAX_CHANNELS - 1; + host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; + host->max_lun = MEGASAS_MAX_LUN; + host->max_cmd_len = 16; + + /* Use shared host tagset only for fusion adaptors + * if there are managed interrupts (smp affinity enabled case). + * Single msix_vectors in kdump, so shared host tag is also disabled. + */ + + host->host_tagset = 0; + host->nr_hw_queues = 1; + + if ((instance->adapter_type != MFI_SERIES) && + (instance->msix_vectors > instance->low_latency_index_start) && + host_tagset_enable && + instance->smp_affinity_enable) { + host->host_tagset = 1; + host->nr_hw_queues = instance->msix_vectors - + instance->low_latency_index_start + instance->iopoll_q_count; + if (instance->iopoll_q_count) + host->nr_maps = 3; + } else { + instance->iopoll_q_count = 0; + } + + dev_info(&instance->pdev->dev, + "Max firmware commands: %d shared with default " + "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds, + host->nr_hw_queues - instance->iopoll_q_count, + instance->iopoll_q_count); + /* + * Notify the mid-layer about the new controller + */ + if (scsi_add_host(host, &instance->pdev->dev)) { + dev_err(&instance->pdev->dev, + "Failed to add host from %s %d\n", + __func__, __LINE__); + return -ENODEV; + } + + return 0; +} + +/** + * megasas_set_dma_mask - Set DMA mask for supported controllers + * + * @instance: Adapter soft state + * Description: + * + * For Ventura, driver/FW will operate in 63bit DMA addresses. + * + * For invader- + * By default, driver/FW will operate in 32bit DMA addresses + * for consistent DMA mapping but if 32 bit consistent + * DMA mask fails, driver will try with 63 bit consistent + * mask provided FW is true 63bit DMA capable + * + * For older controllers(Thunderbolt and MFI based adapters)- + * driver/FW will operate in 32 bit consistent DMA addresses. + */ +static int +megasas_set_dma_mask(struct megasas_instance *instance) +{ + u64 consistent_mask; + struct pci_dev *pdev; + u32 scratch_pad_1; + + pdev = instance->pdev; + consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? + DMA_BIT_MASK(63) : DMA_BIT_MASK(32); + + if (IS_DMA64) { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + goto fail_set_dma_mask; + + if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && + (dma_set_coherent_mask(&pdev->dev, consistent_mask) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { + /* + * If 32 bit DMA mask fails, then try for 64 bit mask + * for FW capable of handling 64 bit DMA. + */ + scratch_pad_1 = megasas_readl + (instance, &instance->reg_set->outbound_scratch_pad_1); + + if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) + goto fail_set_dma_mask; + else if (dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(63))) + goto fail_set_dma_mask; + } + } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + goto fail_set_dma_mask; + + if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) + instance->consistent_mask_64bit = false; + else + instance->consistent_mask_64bit = true; + + dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", + ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), + (instance->consistent_mask_64bit ? "63" : "32")); + + return 0; + +fail_set_dma_mask: + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + return -1; + +} + +/* + * megasas_set_adapter_type - Set adapter type. + * Supported controllers can be divided in + * different categories- + * enum MR_ADAPTER_TYPE { + * MFI_SERIES = 1, + * THUNDERBOLT_SERIES = 2, + * INVADER_SERIES = 3, + * VENTURA_SERIES = 4, + * AERO_SERIES = 5, + * }; + * @instance: Adapter soft state + * return: void + */ +static inline void megasas_set_adapter_type(struct megasas_instance *instance) +{ + if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && + (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { + instance->adapter_type = MFI_SERIES; + } else { + switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_AERO_10E1: + case PCI_DEVICE_ID_LSI_AERO_10E2: + case PCI_DEVICE_ID_LSI_AERO_10E5: + case PCI_DEVICE_ID_LSI_AERO_10E6: + instance->adapter_type = AERO_SERIES; + break; + case PCI_DEVICE_ID_LSI_VENTURA: + case PCI_DEVICE_ID_LSI_CRUSADER: + case PCI_DEVICE_ID_LSI_HARPOON: + case PCI_DEVICE_ID_LSI_TOMCAT: + case PCI_DEVICE_ID_LSI_VENTURA_4PORT: + case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: + instance->adapter_type = VENTURA_SERIES; + break; + case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_PLASMA: + instance->adapter_type = THUNDERBOLT_SERIES; + break; + case PCI_DEVICE_ID_LSI_INVADER: + case PCI_DEVICE_ID_LSI_INTRUDER: + case PCI_DEVICE_ID_LSI_INTRUDER_24: + case PCI_DEVICE_ID_LSI_CUTLASS_52: + case PCI_DEVICE_ID_LSI_CUTLASS_53: + case PCI_DEVICE_ID_LSI_FURY: + instance->adapter_type = INVADER_SERIES; + break; + default: /* For all other supported controllers */ + instance->adapter_type = MFI_SERIES; + break; + } + } +} + +static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) +{ + instance->producer = dma_alloc_coherent(&instance->pdev->dev, + sizeof(u32), &instance->producer_h, GFP_KERNEL); + instance->consumer = dma_alloc_coherent(&instance->pdev->dev, + sizeof(u32), &instance->consumer_h, GFP_KERNEL); + + if (!instance->producer || !instance->consumer) { + dev_err(&instance->pdev->dev, + "Failed to allocate memory for producer, consumer\n"); + return -1; + } + + *instance->producer = 0; + *instance->consumer = 0; + return 0; +} + +/** + * megasas_alloc_ctrl_mem - Allocate per controller memory for core data + * structures which are not common across MFI + * adapters and fusion adapters. + * For MFI based adapters, allocate producer and + * consumer buffers. For fusion adapters, allocate + * memory for fusion context. + * @instance: Adapter soft state + * return: 0 for SUCCESS + */ +static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) +{ + instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), + GFP_KERNEL); + if (!instance->reply_map) + return -ENOMEM; + + switch (instance->adapter_type) { + case MFI_SERIES: + if (megasas_alloc_mfi_ctrl_mem(instance)) + return -ENOMEM; + break; + case AERO_SERIES: + case VENTURA_SERIES: + case THUNDERBOLT_SERIES: + case INVADER_SERIES: + if (megasas_alloc_fusion_context(instance)) + return -ENOMEM; + break; + } + + return 0; +} + +/* + * megasas_free_ctrl_mem - Free fusion context for fusion adapters and + * producer, consumer buffers for MFI adapters + * + * @instance - Adapter soft instance + * + */ +static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) +{ + kfree(instance->reply_map); + if (instance->adapter_type == MFI_SERIES) { + if (instance->producer) + dma_free_coherent(&instance->pdev->dev, sizeof(u32), + instance->producer, + instance->producer_h); + if (instance->consumer) + dma_free_coherent(&instance->pdev->dev, sizeof(u32), + instance->consumer, + instance->consumer_h); + } else { + megasas_free_fusion_context(instance); + } +} + +/** + * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during + * driver load time + * + * @instance: Adapter soft instance + * + * @return: O for SUCCESS + */ +static inline +int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) +{ + struct pci_dev *pdev = instance->pdev; + struct fusion_context *fusion = instance->ctrl_context; + + instance->evt_detail = dma_alloc_coherent(&pdev->dev, + sizeof(struct megasas_evt_detail), + &instance->evt_detail_h, GFP_KERNEL); + + if (!instance->evt_detail) { + dev_err(&instance->pdev->dev, + "Failed to allocate event detail buffer\n"); + return -ENOMEM; + } + + if (fusion) { + fusion->ioc_init_request = + dma_alloc_coherent(&pdev->dev, + sizeof(struct MPI2_IOC_INIT_REQUEST), + &fusion->ioc_init_request_phys, + GFP_KERNEL); + + if (!fusion->ioc_init_request) { + dev_err(&pdev->dev, + "Failed to allocate ioc init request\n"); + return -ENOMEM; + } + + instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, + sizeof(struct MR_SNAPDUMP_PROPERTIES), + &instance->snapdump_prop_h, GFP_KERNEL); + + if (!instance->snapdump_prop) + dev_err(&pdev->dev, + "Failed to allocate snapdump properties buffer\n"); + + instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, + HOST_DEVICE_LIST_SZ, + &instance->host_device_list_buf_h, + GFP_KERNEL); + + if (!instance->host_device_list_buf) { + dev_err(&pdev->dev, + "Failed to allocate targetid list buffer\n"); + return -ENOMEM; + } + + } + + instance->pd_list_buf = + dma_alloc_coherent(&pdev->dev, + MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), + &instance->pd_list_buf_h, GFP_KERNEL); + + if (!instance->pd_list_buf) { + dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); + return -ENOMEM; + } + + instance->ctrl_info_buf = + dma_alloc_coherent(&pdev->dev, + sizeof(struct megasas_ctrl_info), + &instance->ctrl_info_buf_h, GFP_KERNEL); + + if (!instance->ctrl_info_buf) { + dev_err(&pdev->dev, + "Failed to allocate controller info buffer\n"); + return -ENOMEM; + } + + instance->ld_list_buf = + dma_alloc_coherent(&pdev->dev, + sizeof(struct MR_LD_LIST), + &instance->ld_list_buf_h, GFP_KERNEL); + + if (!instance->ld_list_buf) { + dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); + return -ENOMEM; + } + + instance->ld_targetid_list_buf = + dma_alloc_coherent(&pdev->dev, + sizeof(struct MR_LD_TARGETID_LIST), + &instance->ld_targetid_list_buf_h, GFP_KERNEL); + + if (!instance->ld_targetid_list_buf) { + dev_err(&pdev->dev, + "Failed to allocate LD targetid list buffer\n"); + return -ENOMEM; + } + + if (!reset_devices) { + instance->system_info_buf = + dma_alloc_coherent(&pdev->dev, + sizeof(struct MR_DRV_SYSTEM_INFO), + &instance->system_info_h, GFP_KERNEL); + instance->pd_info = + dma_alloc_coherent(&pdev->dev, + sizeof(struct MR_PD_INFO), + &instance->pd_info_h, GFP_KERNEL); + instance->tgt_prop = + dma_alloc_coherent(&pdev->dev, + sizeof(struct MR_TARGET_PROPERTIES), + &instance->tgt_prop_h, GFP_KERNEL); + instance->crash_dump_buf = + dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, + &instance->crash_dump_h, GFP_KERNEL); + + if (!instance->system_info_buf) + dev_err(&instance->pdev->dev, + "Failed to allocate system info buffer\n"); + + if (!instance->pd_info) + dev_err(&instance->pdev->dev, + "Failed to allocate pd_info buffer\n"); + + if (!instance->tgt_prop) + dev_err(&instance->pdev->dev, + "Failed to allocate tgt_prop buffer\n"); + + if (!instance->crash_dump_buf) + dev_err(&instance->pdev->dev, + "Failed to allocate crash dump buffer\n"); + } + + return 0; +} + +/* + * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated + * during driver load time + * + * @instance- Adapter soft instance + * + */ +static inline +void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) +{ + struct pci_dev *pdev = instance->pdev; + struct fusion_context *fusion = instance->ctrl_context; + + if (instance->evt_detail) + dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), + instance->evt_detail, + instance->evt_detail_h); + + if (fusion && fusion->ioc_init_request) + dma_free_coherent(&pdev->dev, + sizeof(struct MPI2_IOC_INIT_REQUEST), + fusion->ioc_init_request, + fusion->ioc_init_request_phys); + + if (instance->pd_list_buf) + dma_free_coherent(&pdev->dev, + MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), + instance->pd_list_buf, + instance->pd_list_buf_h); + + if (instance->ld_list_buf) + dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), + instance->ld_list_buf, + instance->ld_list_buf_h); + + if (instance->ld_targetid_list_buf) + dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), + instance->ld_targetid_list_buf, + instance->ld_targetid_list_buf_h); + + if (instance->ctrl_info_buf) + dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), + instance->ctrl_info_buf, + instance->ctrl_info_buf_h); + + if (instance->system_info_buf) + dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), + instance->system_info_buf, + instance->system_info_h); + + if (instance->pd_info) + dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), + instance->pd_info, instance->pd_info_h); + + if (instance->tgt_prop) + dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), + instance->tgt_prop, instance->tgt_prop_h); + + if (instance->crash_dump_buf) + dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, + instance->crash_dump_buf, + instance->crash_dump_h); + + if (instance->snapdump_prop) + dma_free_coherent(&pdev->dev, + sizeof(struct MR_SNAPDUMP_PROPERTIES), + instance->snapdump_prop, + instance->snapdump_prop_h); + + if (instance->host_device_list_buf) + dma_free_coherent(&pdev->dev, + HOST_DEVICE_LIST_SZ, + instance->host_device_list_buf, + instance->host_device_list_buf_h); + +} + +/* + * megasas_init_ctrl_params - Initialize controller's instance + * parameters before FW init + * @instance - Adapter soft instance + * @return - void + */ +static inline void megasas_init_ctrl_params(struct megasas_instance *instance) +{ + instance->fw_crash_state = UNAVAILABLE; + + megasas_poll_wait_aen = 0; + instance->issuepend_done = 1; + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); + + /* + * Initialize locks and queues + */ + INIT_LIST_HEAD(&instance->cmd_pool); + INIT_LIST_HEAD(&instance->internal_reset_pending_q); + + atomic_set(&instance->fw_outstanding, 0); + atomic64_set(&instance->total_io_count, 0); + + init_waitqueue_head(&instance->int_cmd_wait_q); + init_waitqueue_head(&instance->abort_cmd_wait_q); + + mutex_init(&instance->crashdump_lock); + spin_lock_init(&instance->mfi_pool_lock); + spin_lock_init(&instance->hba_lock); + spin_lock_init(&instance->stream_lock); + spin_lock_init(&instance->completion_lock); + + mutex_init(&instance->reset_mutex); + + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) + instance->flag_ieee = 1; + + instance->flag = 0; + instance->unload = 1; + instance->last_time = 0; + instance->disableOnlineCtrlReset = 1; + instance->UnevenSpanSupport = 0; + instance->smp_affinity_enable = smp_affinity_enable ? true : false; + instance->msix_load_balance = false; + + if (instance->adapter_type != MFI_SERIES) + INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); + else + INIT_WORK(&instance->work_init, process_fw_state_change_wq); +} + +/** + * megasas_probe_one - PCI hotplug entry point + * @pdev: PCI device structure + * @id: PCI ids of supported hotplugged adapter + */ +static int megasas_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int rval, pos; + struct Scsi_Host *host; + struct megasas_instance *instance; + u16 control = 0; + + switch (pdev->device) { + case PCI_DEVICE_ID_LSI_AERO_10E0: + case PCI_DEVICE_ID_LSI_AERO_10E3: + case PCI_DEVICE_ID_LSI_AERO_10E4: + case PCI_DEVICE_ID_LSI_AERO_10E7: + dev_err(&pdev->dev, "Adapter is in non secure mode\n"); + return 1; + case PCI_DEVICE_ID_LSI_AERO_10E1: + case PCI_DEVICE_ID_LSI_AERO_10E5: + dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); + break; + } + + /* Reset MSI-X in the kdump kernel */ + if (reset_devices) { + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, + &control); + if (control & PCI_MSIX_FLAGS_ENABLE) { + dev_info(&pdev->dev, "resetting MSI-X\n"); + pci_write_config_word(pdev, + pos + PCI_MSIX_FLAGS, + control & + ~PCI_MSIX_FLAGS_ENABLE); + } + } + } + + /* + * PCI prepping: enable device set bus mastering and dma mask + */ + rval = pci_enable_device_mem(pdev); + + if (rval) { + return rval; + } + + pci_set_master(pdev); + + host = scsi_host_alloc(&megasas_template, + sizeof(struct megasas_instance)); + + if (!host) { + dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); + goto fail_alloc_instance; + } + + instance = (struct megasas_instance *)host->hostdata; + memset(instance, 0, sizeof(*instance)); + atomic_set(&instance->fw_reset_no_pci_access, 0); + + /* + * Initialize PCI related and misc parameters + */ + instance->pdev = pdev; + instance->host = host; + instance->unique_id = pci_dev_id(pdev); + instance->init_id = MEGASAS_DEFAULT_INIT_ID; + + megasas_set_adapter_type(instance); + + /* + * Initialize MFI Firmware + */ + if (megasas_init_fw(instance)) + goto fail_init_mfi; + + if (instance->requestorId) { + if (instance->PlasmaFW111) { + instance->vf_affiliation_111 = + dma_alloc_coherent(&pdev->dev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + &instance->vf_affiliation_111_h, + GFP_KERNEL); + if (!instance->vf_affiliation_111) + dev_warn(&pdev->dev, "Can't allocate " + "memory for VF affiliation buffer\n"); + } else { + instance->vf_affiliation = + dma_alloc_coherent(&pdev->dev, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + &instance->vf_affiliation_h, + GFP_KERNEL); + if (!instance->vf_affiliation) + dev_warn(&pdev->dev, "Can't allocate " + "memory for VF affiliation buffer\n"); + } + } + + /* + * Store instance in PCI softstate + */ + pci_set_drvdata(pdev, instance); + + /* + * Add this controller to megasas_mgmt_info structure so that it + * can be exported to management applications + */ + megasas_mgmt_info.count++; + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; + megasas_mgmt_info.max_index++; + + /* + * Register with SCSI mid-layer + */ + if (megasas_io_attach(instance)) + goto fail_io_attach; + + instance->unload = 0; + /* + * Trigger SCSI to scan our drives + */ + if (!instance->enable_fw_dev_list || + (instance->host_device_list_buf->count > 0)) + scsi_scan_host(host); + + /* + * Initiate AEN (Asynchronous Event Notification) + */ + if (megasas_start_aen(instance)) { + dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); + goto fail_start_aen; + } + + megasas_setup_debugfs(instance); + + /* Get current SR-IOV LD/VF affiliation */ + if (instance->requestorId) + megasas_get_ld_vf_affiliation(instance, 1); + + return 0; + +fail_start_aen: + instance->unload = 1; + scsi_remove_host(instance->host); +fail_io_attach: + megasas_mgmt_info.count--; + megasas_mgmt_info.max_index--; + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; + + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); + + instance->instancet->disable_intr(instance); + megasas_destroy_irqs(instance); + + if (instance->adapter_type != MFI_SERIES) + megasas_release_fusion(instance); + else + megasas_release_mfi(instance); + + if (instance->msix_vectors) + pci_free_irq_vectors(instance->pdev); + instance->msix_vectors = 0; + + if (instance->fw_crash_state != UNAVAILABLE) + megasas_free_host_crash_buffer(instance); + + if (instance->adapter_type != MFI_SERIES) + megasas_fusion_stop_watchdog(instance); +fail_init_mfi: + scsi_host_put(host); +fail_alloc_instance: + pci_disable_device(pdev); + + return -ENODEV; +} + +/** + * megasas_flush_cache - Requests FW to flush all its caches + * @instance: Adapter soft state + */ +static void megasas_flush_cache(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) + return; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return; + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 0; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = 0; + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); + dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; + + if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) + != DCMD_SUCCESS) { + dev_err(&instance->pdev->dev, + "return from %s %d\n", __func__, __LINE__); + return; + } + + megasas_return_cmd(instance, cmd); +} + +/** + * megasas_shutdown_controller - Instructs FW to shutdown the controller + * @instance: Adapter soft state + * @opcode: Shutdown/Hibernate + */ +static void megasas_shutdown_controller(struct megasas_instance *instance, + u32 opcode) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) + return; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return; + + if (instance->aen_cmd) + megasas_issue_blocked_abort_cmd(instance, + instance->aen_cmd, MFI_IO_TIMEOUT_SECS); + if (instance->map_update_cmd) + megasas_issue_blocked_abort_cmd(instance, + instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); + if (instance->jbod_seq_cmd) + megasas_issue_blocked_abort_cmd(instance, + instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 0; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = 0; + dcmd->opcode = cpu_to_le32(opcode); + + if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) + != DCMD_SUCCESS) { + dev_err(&instance->pdev->dev, + "return from %s %d\n", __func__, __LINE__); + return; + } + + megasas_return_cmd(instance, cmd); +} + +/** + * megasas_suspend - driver suspend entry point + * @dev: Device structure + */ +static int __maybe_unused +megasas_suspend(struct device *dev) +{ + struct megasas_instance *instance; + + instance = dev_get_drvdata(dev); + + if (!instance) + return 0; + + instance->unload = 1; + + dev_info(dev, "%s is called\n", __func__); + + /* Shutdown SR-IOV heartbeat timer */ + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); + + /* Stop the FW fault detection watchdog */ + if (instance->adapter_type != MFI_SERIES) + megasas_fusion_stop_watchdog(instance); + + megasas_flush_cache(instance); + megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); + + /* cancel the delayed work if this work still in queue */ + if (instance->ev != NULL) { + struct megasas_aen_event *ev = instance->ev; + cancel_delayed_work_sync(&ev->hotplug_work); + instance->ev = NULL; + } + + tasklet_kill(&instance->isr_tasklet); + + pci_set_drvdata(instance->pdev, instance); + instance->instancet->disable_intr(instance); + + megasas_destroy_irqs(instance); + + if (instance->msix_vectors) + pci_free_irq_vectors(instance->pdev); + + return 0; +} + +/** + * megasas_resume- driver resume entry point + * @dev: Device structure + */ +static int __maybe_unused +megasas_resume(struct device *dev) +{ + int rval; + struct Scsi_Host *host; + struct megasas_instance *instance; + u32 status_reg; + + instance = dev_get_drvdata(dev); + + if (!instance) + return 0; + + host = instance->host; + + dev_info(dev, "%s is called\n", __func__); + + /* + * We expect the FW state to be READY + */ + + if (megasas_transition_to_ready(instance, 0)) { + dev_info(&instance->pdev->dev, + "Failed to transition controller to ready from %s!\n", + __func__); + if (instance->adapter_type != MFI_SERIES) { + status_reg = + instance->instancet->read_fw_status_reg(instance); + if (!(status_reg & MFI_RESET_ADAPTER) || + ((megasas_adp_reset_wait_for_ready + (instance, true, 0)) == FAILED)) + goto fail_ready_state; + } else { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + + /* waiting for about 30 seconds before retry */ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } + + dev_info(&instance->pdev->dev, + "FW restarted successfully from %s!\n", + __func__); + } + if (megasas_set_dma_mask(instance)) + goto fail_set_dma_mask; + + /* + * Initialize MFI Firmware + */ + + atomic_set(&instance->fw_outstanding, 0); + atomic_set(&instance->ldio_outstanding, 0); + + /* Now re-enable MSI-X */ + if (instance->msix_vectors) + megasas_alloc_irq_vectors(instance); + + if (!instance->msix_vectors) { + rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, + PCI_IRQ_LEGACY); + if (rval < 0) + goto fail_reenable_msix; + } + + megasas_setup_reply_map(instance); + + if (instance->adapter_type != MFI_SERIES) { + megasas_reset_reply_desc(instance); + if (megasas_ioc_init_fusion(instance)) { + megasas_free_cmds(instance); + megasas_free_cmds_fusion(instance); + goto fail_init_mfi; + } + if (!megasas_get_map_info(instance)) + megasas_sync_map_info(instance); + } else { + *instance->producer = 0; + *instance->consumer = 0; + if (megasas_issue_init_mfi(instance)) + goto fail_init_mfi; + } + + if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) + goto fail_init_mfi; + + tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, + (unsigned long)instance); + + if (instance->msix_vectors ? + megasas_setup_irqs_msix(instance, 0) : + megasas_setup_irqs_ioapic(instance)) + goto fail_init_mfi; + + if (instance->adapter_type != MFI_SERIES) + megasas_setup_irq_poll(instance); + + /* Re-launch SR-IOV heartbeat timer */ + if (instance->requestorId) { + if (!megasas_sriov_start_heartbeat(instance, 0)) + megasas_start_timer(instance); + else { + instance->skip_heartbeat_timer_del = 1; + goto fail_init_mfi; + } + } + + instance->instancet->enable_intr(instance); + megasas_setup_jbod_map(instance); + instance->unload = 0; + + /* + * Initiate AEN (Asynchronous Event Notification) + */ + if (megasas_start_aen(instance)) + dev_err(&instance->pdev->dev, "Start AEN failed\n"); + + /* Re-launch FW fault watchdog */ + if (instance->adapter_type != MFI_SERIES) + if (megasas_fusion_start_watchdog(instance) != SUCCESS) + goto fail_start_watchdog; + + return 0; + +fail_start_watchdog: + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); +fail_init_mfi: + megasas_free_ctrl_dma_buffers(instance); + megasas_free_ctrl_mem(instance); + scsi_host_put(host); + +fail_reenable_msix: +fail_set_dma_mask: +fail_ready_state: + + return -ENODEV; +} + +static inline int +megasas_wait_for_adapter_operational(struct megasas_instance *instance) +{ + int wait_time = MEGASAS_RESET_WAIT_TIME * 2; + int i; + u8 adp_state; + + for (i = 0; i < wait_time; i++) { + adp_state = atomic_read(&instance->adprecovery); + if ((adp_state == MEGASAS_HBA_OPERATIONAL) || + (adp_state == MEGASAS_HW_CRITICAL_ERROR)) + break; + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) + dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); + + msleep(1000); + } + + if (adp_state != MEGASAS_HBA_OPERATIONAL) { + dev_info(&instance->pdev->dev, + "%s HBA failed to become operational, adp_state %d\n", + __func__, adp_state); + return 1; + } + + return 0; +} + +/** + * megasas_detach_one - PCI hot"un"plug entry point + * @pdev: PCI device structure + */ +static void megasas_detach_one(struct pci_dev *pdev) +{ + int i; + struct Scsi_Host *host; + struct megasas_instance *instance; + struct fusion_context *fusion; + size_t pd_seq_map_sz; + + instance = pci_get_drvdata(pdev); + + if (!instance) + return; + + host = instance->host; + fusion = instance->ctrl_context; + + /* Shutdown SR-IOV heartbeat timer */ + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); + + /* Stop the FW fault detection watchdog */ + if (instance->adapter_type != MFI_SERIES) + megasas_fusion_stop_watchdog(instance); + + if (instance->fw_crash_state != UNAVAILABLE) + megasas_free_host_crash_buffer(instance); + scsi_remove_host(instance->host); + instance->unload = 1; + + if (megasas_wait_for_adapter_operational(instance)) + goto skip_firing_dcmds; + + megasas_flush_cache(instance); + megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); + +skip_firing_dcmds: + /* cancel the delayed work if this work still in queue*/ + if (instance->ev != NULL) { + struct megasas_aen_event *ev = instance->ev; + cancel_delayed_work_sync(&ev->hotplug_work); + instance->ev = NULL; + } + + /* cancel all wait events */ + wake_up_all(&instance->int_cmd_wait_q); + + tasklet_kill(&instance->isr_tasklet); + + /* + * Take the instance off the instance array. Note that we will not + * decrement the max_index. We let this array be sparse array + */ + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + if (megasas_mgmt_info.instance[i] == instance) { + megasas_mgmt_info.count--; + megasas_mgmt_info.instance[i] = NULL; + + break; + } + } + + instance->instancet->disable_intr(instance); + + megasas_destroy_irqs(instance); + + if (instance->msix_vectors) + pci_free_irq_vectors(instance->pdev); + + if (instance->adapter_type >= VENTURA_SERIES) { + for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) + kfree(fusion->stream_detect_by_ld[i]); + kfree(fusion->stream_detect_by_ld); + fusion->stream_detect_by_ld = NULL; + } + + + if (instance->adapter_type != MFI_SERIES) { + megasas_release_fusion(instance); + pd_seq_map_sz = + struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, + seq, MAX_PHYSICAL_DEVICES); + for (i = 0; i < 2 ; i++) { + if (fusion->ld_map[i]) + dma_free_coherent(&instance->pdev->dev, + fusion->max_map_sz, + fusion->ld_map[i], + fusion->ld_map_phys[i]); + if (fusion->ld_drv_map[i]) { + if (is_vmalloc_addr(fusion->ld_drv_map[i])) + vfree(fusion->ld_drv_map[i]); + else + free_pages((ulong)fusion->ld_drv_map[i], + fusion->drv_map_pages); + } + + if (fusion->pd_seq_sync[i]) + dma_free_coherent(&instance->pdev->dev, + pd_seq_map_sz, + fusion->pd_seq_sync[i], + fusion->pd_seq_phys[i]); + } + } else { + megasas_release_mfi(instance); + } + + if (instance->vf_affiliation) + dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + instance->vf_affiliation, + instance->vf_affiliation_h); + + if (instance->vf_affiliation_111) + dma_free_coherent(&pdev->dev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + instance->vf_affiliation_111, + instance->vf_affiliation_111_h); + + if (instance->hb_host_mem) + dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), + instance->hb_host_mem, + instance->hb_host_mem_h); + + megasas_free_ctrl_dma_buffers(instance); + + megasas_free_ctrl_mem(instance); + + megasas_destroy_debugfs(instance); + + scsi_host_put(host); + + pci_disable_device(pdev); +} + +/** + * megasas_shutdown - Shutdown entry point + * @pdev: PCI device structure + */ +static void megasas_shutdown(struct pci_dev *pdev) +{ + struct megasas_instance *instance = pci_get_drvdata(pdev); + + if (!instance) + return; + + instance->unload = 1; + + if (megasas_wait_for_adapter_operational(instance)) + goto skip_firing_dcmds; + + megasas_flush_cache(instance); + megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); + +skip_firing_dcmds: + instance->instancet->disable_intr(instance); + megasas_destroy_irqs(instance); + + if (instance->msix_vectors) + pci_free_irq_vectors(instance->pdev); +} + +/* + * megasas_mgmt_open - char node "open" entry point + * @inode: char node inode + * @filep: char node file + */ +static int megasas_mgmt_open(struct inode *inode, struct file *filep) +{ + /* + * Allow only those users with admin rights + */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + return 0; +} + +/* + * megasas_mgmt_fasync - Async notifier registration from applications + * @fd: char node file descriptor number + * @filep: char node file + * @mode: notifier on/off + * + * This function adds the calling process to a driver global queue. When an + * event occurs, SIGIO will be sent to all processes in this queue. + */ +static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) +{ + int rc; + + mutex_lock(&megasas_async_queue_mutex); + + rc = fasync_helper(fd, filep, mode, &megasas_async_queue); + + mutex_unlock(&megasas_async_queue_mutex); + + if (rc >= 0) { + /* For sanity check when we get ioctl */ + filep->private_data = filep; + return 0; + } + + printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); + + return rc; +} + +/* + * megasas_mgmt_poll - char node "poll" entry point + * @filep: char node file + * @wait: Events to poll for + */ +static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) +{ + __poll_t mask; + unsigned long flags; + + poll_wait(file, &megasas_poll_wait, wait); + spin_lock_irqsave(&poll_aen_lock, flags); + if (megasas_poll_wait_aen) + mask = (EPOLLIN | EPOLLRDNORM); + else + mask = 0; + megasas_poll_wait_aen = 0; + spin_unlock_irqrestore(&poll_aen_lock, flags); + return mask; +} + +/* + * megasas_set_crash_dump_params_ioctl: + * Send CRASH_DUMP_MODE DCMD to all controllers + * @cmd: MFI command frame + */ + +static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) +{ + struct megasas_instance *local_instance; + int i, error = 0; + int crash_support; + + crash_support = cmd->frame->dcmd.mbox.w[0]; + + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + local_instance = megasas_mgmt_info.instance[i]; + if (local_instance && local_instance->crash_dump_drv_support) { + if ((atomic_read(&local_instance->adprecovery) == + MEGASAS_HBA_OPERATIONAL) && + !megasas_set_crash_dump_params(local_instance, + crash_support)) { + local_instance->crash_dump_app_support = + crash_support; + dev_info(&local_instance->pdev->dev, + "Application firmware crash " + "dump mode set success\n"); + error = 0; + } else { + dev_info(&local_instance->pdev->dev, + "Application firmware crash " + "dump mode set failed\n"); + error = -1; + } + } + } + return error; +} + +/** + * megasas_mgmt_fw_ioctl - Issues management ioctls to FW + * @instance: Adapter soft state + * @user_ioc: User's ioctl packet + * @ioc: ioctl packet + */ +static int +megasas_mgmt_fw_ioctl(struct megasas_instance *instance, + struct megasas_iocpacket __user * user_ioc, + struct megasas_iocpacket *ioc) +{ + struct megasas_sge64 *kern_sge64 = NULL; + struct megasas_sge32 *kern_sge32 = NULL; + struct megasas_cmd *cmd; + void *kbuff_arr[MAX_IOCTL_SGE]; + dma_addr_t buf_handle = 0; + int error = 0, i; + void *sense = NULL; + dma_addr_t sense_handle; + void *sense_ptr; + u32 opcode = 0; + int ret = DCMD_SUCCESS; + + memset(kbuff_arr, 0, sizeof(kbuff_arr)); + + if (ioc->sge_count > MAX_IOCTL_SGE) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", + ioc->sge_count, MAX_IOCTL_SGE); + return -EINVAL; + } + + if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || + ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && + !instance->support_nvme_passthru) || + ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && + !instance->support_pci_lane_margining)) { + dev_err(&instance->pdev->dev, + "Received invalid ioctl command 0x%x\n", + ioc->frame.hdr.cmd); + return -ENOTSUPP; + } + + cmd = megasas_get_cmd(instance); + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); + return -ENOMEM; + } + + /* + * User's IOCTL packet has 2 frames (maximum). Copy those two + * frames into our cmd's frames. cmd->frame's context will get + * overwritten when we copy from user's frames. So set that value + * alone separately + */ + memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); + cmd->frame->hdr.context = cpu_to_le32(cmd->index); + cmd->frame->hdr.pad_0 = 0; + + cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); + + if (instance->consistent_mask_64bit) + cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | + MFI_FRAME_SENSE64)); + else + cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | + MFI_FRAME_SENSE64)); + + if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); + + if (opcode == MR_DCMD_CTRL_SHUTDOWN) { + mutex_lock(&instance->reset_mutex); + if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { + megasas_return_cmd(instance, cmd); + mutex_unlock(&instance->reset_mutex); + return -1; + } + mutex_unlock(&instance->reset_mutex); + } + + if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { + error = megasas_set_crash_dump_params_ioctl(cmd); + megasas_return_cmd(instance, cmd); + return error; + } + + /* + * The management interface between applications and the fw uses + * MFI frames. E.g, RAID configuration changes, LD property changes + * etc are accomplishes through different kinds of MFI frames. The + * driver needs to care only about substituting user buffers with + * kernel buffers in SGLs. The location of SGL is embedded in the + * struct iocpacket itself. + */ + if (instance->consistent_mask_64bit) + kern_sge64 = (struct megasas_sge64 *) + ((unsigned long)cmd->frame + ioc->sgl_off); + else + kern_sge32 = (struct megasas_sge32 *) + ((unsigned long)cmd->frame + ioc->sgl_off); + + /* + * For each user buffer, create a mirror buffer and copy in + */ + for (i = 0; i < ioc->sge_count; i++) { + if (!ioc->sgl[i].iov_len) + continue; + + kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, + ioc->sgl[i].iov_len, + &buf_handle, GFP_KERNEL); + if (!kbuff_arr[i]) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " + "kernel SGL buffer for IOCTL\n"); + error = -ENOMEM; + goto out; + } + + /* + * We don't change the dma_coherent_mask, so + * dma_alloc_coherent only returns 32bit addresses + */ + if (instance->consistent_mask_64bit) { + kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); + kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); + } else { + kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); + kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); + } + + /* + * We created a kernel buffer corresponding to the + * user buffer. Now copy in from the user buffer + */ + if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, + (u32) (ioc->sgl[i].iov_len))) { + error = -EFAULT; + goto out; + } + } + + if (ioc->sense_len) { + /* make sure the pointer is part of the frame */ + if (ioc->sense_off > + (sizeof(union megasas_frame) - sizeof(__le64))) { + error = -EINVAL; + goto out; + } + + sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, + &sense_handle, GFP_KERNEL); + if (!sense) { + error = -ENOMEM; + goto out; + } + + /* always store 64 bits regardless of addressing */ + sense_ptr = (void *)cmd->frame + ioc->sense_off; + put_unaligned_le64(sense_handle, sense_ptr); + } + + /* + * Set the sync_cmd flag so that the ISR knows not to complete this + * cmd to the SCSI mid-layer + */ + cmd->sync_cmd = 1; + + ret = megasas_issue_blocked_cmd(instance, cmd, 0); + switch (ret) { + case DCMD_INIT: + case DCMD_BUSY: + cmd->sync_cmd = 0; + dev_err(&instance->pdev->dev, + "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", + __func__, __LINE__, cmd->frame->hdr.cmd, opcode, + cmd->cmd_status_drv); + error = -EBUSY; + goto out; + } + + cmd->sync_cmd = 0; + + if (instance->unload == 1) { + dev_info(&instance->pdev->dev, "Driver unload is in progress " + "don't submit data to application\n"); + goto out; + } + /* + * copy out the kernel buffers to user buffers + */ + for (i = 0; i < ioc->sge_count; i++) { + if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], + ioc->sgl[i].iov_len)) { + error = -EFAULT; + goto out; + } + } + + /* + * copy out the sense + */ + if (ioc->sense_len) { + void __user *uptr; + /* + * sense_ptr points to the location that has the user + * sense buffer address + */ + sense_ptr = (void *)ioc->frame.raw + ioc->sense_off; + if (in_compat_syscall()) + uptr = compat_ptr(get_unaligned((compat_uptr_t *) + sense_ptr)); + else + uptr = get_unaligned((void __user **)sense_ptr); + + if (copy_to_user(uptr, sense, ioc->sense_len)) { + dev_err(&instance->pdev->dev, "Failed to copy out to user " + "sense data\n"); + error = -EFAULT; + goto out; + } + } + + /* + * copy the status codes returned by the fw + */ + if (copy_to_user(&user_ioc->frame.hdr.cmd_status, + &cmd->frame->hdr.cmd_status, sizeof(u8))) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); + error = -EFAULT; + } + +out: + if (sense) { + dma_free_coherent(&instance->pdev->dev, ioc->sense_len, + sense, sense_handle); + } + + for (i = 0; i < ioc->sge_count; i++) { + if (kbuff_arr[i]) { + if (instance->consistent_mask_64bit) + dma_free_coherent(&instance->pdev->dev, + le32_to_cpu(kern_sge64[i].length), + kbuff_arr[i], + le64_to_cpu(kern_sge64[i].phys_addr)); + else + dma_free_coherent(&instance->pdev->dev, + le32_to_cpu(kern_sge32[i].length), + kbuff_arr[i], + le32_to_cpu(kern_sge32[i].phys_addr)); + kbuff_arr[i] = NULL; + } + } + + megasas_return_cmd(instance, cmd); + return error; +} + +static struct megasas_iocpacket * +megasas_compat_iocpacket_get_user(void __user *arg) +{ + struct megasas_iocpacket *ioc; + struct compat_megasas_iocpacket __user *cioc = arg; + size_t size; + int err = -EFAULT; + int i; + + ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); + if (!ioc) + return ERR_PTR(-ENOMEM); + size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame); + if (copy_from_user(ioc, arg, size)) + goto out; + + for (i = 0; i < MAX_IOCTL_SGE; i++) { + compat_uptr_t iov_base; + + if (get_user(iov_base, &cioc->sgl[i].iov_base) || + get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len)) + goto out; + + ioc->sgl[i].iov_base = compat_ptr(iov_base); + } + + return ioc; +out: + kfree(ioc); + return ERR_PTR(err); +} + +static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) +{ + struct megasas_iocpacket __user *user_ioc = + (struct megasas_iocpacket __user *)arg; + struct megasas_iocpacket *ioc; + struct megasas_instance *instance; + int error; + + if (in_compat_syscall()) + ioc = megasas_compat_iocpacket_get_user(user_ioc); + else + ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket)); + + if (IS_ERR(ioc)) + return PTR_ERR(ioc); + + instance = megasas_lookup_instance(ioc->host_no); + if (!instance) { + error = -ENODEV; + goto out_kfree_ioc; + } + + /* Block ioctls in VF mode */ + if (instance->requestorId && !allow_vf_ioctls) { + error = -ENODEV; + goto out_kfree_ioc; + } + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_err(&instance->pdev->dev, "Controller in crit error\n"); + error = -ENODEV; + goto out_kfree_ioc; + } + + if (instance->unload == 1) { + error = -ENODEV; + goto out_kfree_ioc; + } + + if (down_interruptible(&instance->ioctl_sem)) { + error = -ERESTARTSYS; + goto out_kfree_ioc; + } + + if (megasas_wait_for_adapter_operational(instance)) { + error = -ENODEV; + goto out_up; + } + + error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); +out_up: + up(&instance->ioctl_sem); + +out_kfree_ioc: + kfree(ioc); + return error; +} + +static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) +{ + struct megasas_instance *instance; + struct megasas_aen aen; + int error; + + if (file->private_data != file) { + printk(KERN_DEBUG "megasas: fasync_helper was not " + "called first\n"); + return -EINVAL; + } + + if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) + return -EFAULT; + + instance = megasas_lookup_instance(aen.host_no); + + if (!instance) + return -ENODEV; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + return -ENODEV; + } + + if (instance->unload == 1) { + return -ENODEV; + } + + if (megasas_wait_for_adapter_operational(instance)) + return -ENODEV; + + mutex_lock(&instance->reset_mutex); + error = megasas_register_aen(instance, aen.seq_num, + aen.class_locale_word); + mutex_unlock(&instance->reset_mutex); + return error; +} + +/** + * megasas_mgmt_ioctl - char node ioctl entry point + * @file: char device file pointer + * @cmd: ioctl command + * @arg: ioctl command arguments address + */ +static long +megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case MEGASAS_IOC_FIRMWARE: + return megasas_mgmt_ioctl_fw(file, arg); + + case MEGASAS_IOC_GET_AEN: + return megasas_mgmt_ioctl_aen(file, arg); + } + + return -ENOTTY; +} + +#ifdef CONFIG_COMPAT +static long +megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case MEGASAS_IOC_FIRMWARE32: + return megasas_mgmt_ioctl_fw(file, arg); + case MEGASAS_IOC_GET_AEN: + return megasas_mgmt_ioctl_aen(file, arg); + } + + return -ENOTTY; +} +#endif + +/* + * File operations structure for management interface + */ +static const struct file_operations megasas_mgmt_fops = { + .owner = THIS_MODULE, + .open = megasas_mgmt_open, + .fasync = megasas_mgmt_fasync, + .unlocked_ioctl = megasas_mgmt_ioctl, + .poll = megasas_mgmt_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = megasas_mgmt_compat_ioctl, +#endif + .llseek = noop_llseek, +}; + +static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume); + +/* + * PCI hotplug support registration structure + */ +static struct pci_driver megasas_pci_driver = { + + .name = "megaraid_sas", + .id_table = megasas_pci_table, + .probe = megasas_probe_one, + .remove = megasas_detach_one, + .driver.pm = &megasas_pm_ops, + .shutdown = megasas_shutdown, +}; + +/* + * Sysfs driver attributes + */ +static ssize_t version_show(struct device_driver *dd, char *buf) +{ + return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", + MEGASAS_VERSION); +} +static DRIVER_ATTR_RO(version); + +static ssize_t release_date_show(struct device_driver *dd, char *buf) +{ + return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", + MEGASAS_RELDATE); +} +static DRIVER_ATTR_RO(release_date); + +static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%u\n", support_poll_for_event); +} +static DRIVER_ATTR_RO(support_poll_for_event); + +static ssize_t support_device_change_show(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%u\n", support_device_change); +} +static DRIVER_ATTR_RO(support_device_change); + +static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%u\n", megasas_dbg_lvl); +} + +static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, + size_t count) +{ + int retval = count; + + if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { + printk(KERN_ERR "megasas: could not set dbg_lvl\n"); + retval = -EINVAL; + } + return retval; +} +static DRIVER_ATTR_RW(dbg_lvl); + +static ssize_t +support_nvme_encapsulation_show(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%u\n", support_nvme_encapsulation); +} + +static DRIVER_ATTR_RO(support_nvme_encapsulation); + +static ssize_t +support_pci_lane_margining_show(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%u\n", support_pci_lane_margining); +} + +static DRIVER_ATTR_RO(support_pci_lane_margining); + +static inline void megasas_remove_scsi_device(struct scsi_device *sdev) +{ + sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); + scsi_remove_device(sdev); + scsi_device_put(sdev); +} + +/** + * megasas_update_device_list - Update the PD and LD device list from FW + * after an AEN event notification + * @instance: Adapter soft state + * @event_type: Indicates type of event (PD or LD event) + * + * @return: Success or failure + * + * Issue DCMDs to Firmware to update the internal device list in driver. + * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination + * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. + */ +static +int megasas_update_device_list(struct megasas_instance *instance, + int event_type) +{ + int dcmd_ret; + + if (instance->enable_fw_dev_list) { + return megasas_host_device_list_query(instance, false); + } else { + if (event_type & SCAN_PD_CHANNEL) { + dcmd_ret = megasas_get_pd_list(instance); + if (dcmd_ret != DCMD_SUCCESS) + return dcmd_ret; + } + + if (event_type & SCAN_VD_CHANNEL) { + if (!instance->requestorId || + megasas_get_ld_vf_affiliation(instance, 0)) { + return megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); + } + } + } + return DCMD_SUCCESS; +} + +/** + * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer + * after an AEN event notification + * @instance: Adapter soft state + * @scan_type: Indicates type of devices (PD/LD) to add + * @return void + */ +static +void megasas_add_remove_devices(struct megasas_instance *instance, + int scan_type) +{ + int i, j; + u16 pd_index = 0; + u16 ld_index = 0; + u16 channel = 0, id = 0; + struct Scsi_Host *host; + struct scsi_device *sdev1; + struct MR_HOST_DEVICE_LIST *targetid_list = NULL; + struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; + + host = instance->host; + + if (instance->enable_fw_dev_list) { + targetid_list = instance->host_device_list_buf; + for (i = 0; i < targetid_list->count; i++) { + targetid_entry = &targetid_list->host_device_list[i]; + if (targetid_entry->flags.u.bits.is_sys_pd) { + channel = le16_to_cpu(targetid_entry->target_id) / + MEGASAS_MAX_DEV_PER_CHANNEL; + id = le16_to_cpu(targetid_entry->target_id) % + MEGASAS_MAX_DEV_PER_CHANNEL; + } else { + channel = MEGASAS_MAX_PD_CHANNELS + + (le16_to_cpu(targetid_entry->target_id) / + MEGASAS_MAX_DEV_PER_CHANNEL); + id = le16_to_cpu(targetid_entry->target_id) % + MEGASAS_MAX_DEV_PER_CHANNEL; + } + sdev1 = scsi_device_lookup(host, channel, id, 0); + if (!sdev1) { + scsi_add_device(host, channel, id, 0); + } else { + scsi_device_put(sdev1); + } + } + } + + if (scan_type & SCAN_PD_CHANNEL) { + for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; + sdev1 = scsi_device_lookup(host, i, j, 0); + if (instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) { + if (!sdev1) + scsi_add_device(host, i, j, 0); + else + scsi_device_put(sdev1); + } else { + if (sdev1) + megasas_remove_scsi_device(sdev1); + } + } + } + } + + if (scan_type & SCAN_VD_CHANNEL) { + for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + sdev1 = scsi_device_lookup(host, + MEGASAS_MAX_PD_CHANNELS + i, j, 0); + if (instance->ld_ids[ld_index] != 0xff) { + if (!sdev1) + scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + else + scsi_device_put(sdev1); + } else { + if (sdev1) + megasas_remove_scsi_device(sdev1); + } + } + } + } + +} + +static void +megasas_aen_polling(struct work_struct *work) +{ + struct megasas_aen_event *ev = + container_of(work, struct megasas_aen_event, hotplug_work.work); + struct megasas_instance *instance = ev->instance; + union megasas_evt_class_locale class_locale; + int event_type = 0; + u32 seq_num; + u16 ld_target_id; + int error; + u8 dcmd_ret = DCMD_SUCCESS; + struct scsi_device *sdev1; + + if (!instance) { + printk(KERN_ERR "invalid instance!\n"); + kfree(ev); + return; + } + + /* Don't run the event workqueue thread if OCR is running */ + mutex_lock(&instance->reset_mutex); + + instance->ev = NULL; + if (instance->evt_detail) { + megasas_decode_evt(instance); + + switch (le32_to_cpu(instance->evt_detail->code)) { + + case MR_EVT_PD_INSERTED: + case MR_EVT_PD_REMOVED: + event_type = SCAN_PD_CHANNEL; + break; + + case MR_EVT_LD_OFFLINE: + case MR_EVT_LD_DELETED: + ld_target_id = instance->evt_detail->args.ld.target_id; + sdev1 = scsi_device_lookup(instance->host, + MEGASAS_MAX_PD_CHANNELS + + (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), + (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), + 0); + if (sdev1) + megasas_remove_scsi_device(sdev1); + + event_type = SCAN_VD_CHANNEL; + break; + case MR_EVT_LD_CREATED: + event_type = SCAN_VD_CHANNEL; + break; + + case MR_EVT_CFG_CLEARED: + case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: + case MR_EVT_FOREIGN_CFG_IMPORTED: + case MR_EVT_LD_STATE_CHANGE: + event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; + dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", + instance->host->host_no); + break; + + case MR_EVT_CTRL_PROP_CHANGED: + dcmd_ret = megasas_get_ctrl_info(instance); + if (dcmd_ret == DCMD_SUCCESS && + instance->snapdump_wait_time) { + megasas_get_snapdump_properties(instance); + dev_info(&instance->pdev->dev, + "Snap dump wait time\t: %d\n", + instance->snapdump_wait_time); + } + break; + default: + event_type = 0; + break; + } + } else { + dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); + mutex_unlock(&instance->reset_mutex); + kfree(ev); + return; + } + + if (event_type) + dcmd_ret = megasas_update_device_list(instance, event_type); + + mutex_unlock(&instance->reset_mutex); + + if (event_type && dcmd_ret == DCMD_SUCCESS) + megasas_add_remove_devices(instance, event_type); + + if (dcmd_ret == DCMD_SUCCESS) + seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; + else + seq_num = instance->last_seq_num; + + /* Register AEN with FW for latest sequence number plus 1 */ + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + + if (instance->aen_cmd != NULL) { + kfree(ev); + return; + } + + mutex_lock(&instance->reset_mutex); + error = megasas_register_aen(instance, seq_num, + class_locale.word); + if (error) + dev_err(&instance->pdev->dev, + "register aen failed error %x\n", error); + + mutex_unlock(&instance->reset_mutex); + kfree(ev); +} + +/** + * megasas_init - Driver load entry point + */ +static int __init megasas_init(void) +{ + int rval; + + /* + * Booted in kdump kernel, minimize memory footprints by + * disabling few features + */ + if (reset_devices) { + msix_vectors = 1; + rdpq_enable = 0; + dual_qdepth_disable = 1; + poll_queues = 0; + } + + /* + * Announce driver version and other information + */ + pr_info("megasas: %s\n", MEGASAS_VERSION); + + megasas_dbg_lvl = 0; + support_poll_for_event = 2; + support_device_change = 1; + support_nvme_encapsulation = true; + support_pci_lane_margining = true; + + memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); + + /* + * Register character device node + */ + rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); + + if (rval < 0) { + printk(KERN_DEBUG "megasas: failed to open device node\n"); + return rval; + } + + megasas_mgmt_majorno = rval; + + megasas_init_debugfs(); + + /* + * Register ourselves as PCI hotplug module + */ + rval = pci_register_driver(&megasas_pci_driver); + + if (rval) { + printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); + goto err_pcidrv; + } + + if ((event_log_level < MFI_EVT_CLASS_DEBUG) || + (event_log_level > MFI_EVT_CLASS_DEAD)) { + pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); + event_log_level = MFI_EVT_CLASS_CRITICAL; + } + + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_version); + if (rval) + goto err_dcf_attr_ver; + + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_release_date); + if (rval) + goto err_dcf_rel_date; + + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_support_poll_for_event); + if (rval) + goto err_dcf_support_poll_for_event; + + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_dbg_lvl); + if (rval) + goto err_dcf_dbg_lvl; + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_support_device_change); + if (rval) + goto err_dcf_support_device_change; + + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_support_nvme_encapsulation); + if (rval) + goto err_dcf_support_nvme_encapsulation; + + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_support_pci_lane_margining); + if (rval) + goto err_dcf_support_pci_lane_margining; + + return rval; + +err_dcf_support_pci_lane_margining: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_nvme_encapsulation); + +err_dcf_support_nvme_encapsulation: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_device_change); + +err_dcf_support_device_change: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_dbg_lvl); +err_dcf_dbg_lvl: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_poll_for_event); +err_dcf_support_poll_for_event: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_release_date); +err_dcf_rel_date: + driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); +err_dcf_attr_ver: + pci_unregister_driver(&megasas_pci_driver); +err_pcidrv: + megasas_exit_debugfs(); + unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); + return rval; +} + +/** + * megasas_exit - Driver unload entry point + */ +static void __exit megasas_exit(void) +{ + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_dbg_lvl); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_poll_for_event); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_device_change); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_release_date); + driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_nvme_encapsulation); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_pci_lane_margining); + + pci_unregister_driver(&megasas_pci_driver); + megasas_exit_debugfs(); + unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); +} + +module_init(megasas_init); +module_exit(megasas_exit); diff --git a/drivers/scsi/megaraid/megaraid_sas_debugfs.c b/drivers/scsi/megaraid/megaraid_sas_debugfs.c new file mode 100644 index 000000000..c69760775 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_debugfs.c @@ -0,0 +1,179 @@ +/* + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2003-2018 LSI Corporation. + * Copyright (c) 2003-2018 Avago Technologies. + * Copyright (c) 2003-2018 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Broadcom Inc. + * Kashyap Desai + * Sumit Saxena + * Shivasharan S + * + * Send feedback to: megaraidlinux.pdl@broadcom.com + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "megaraid_sas_fusion.h" +#include "megaraid_sas.h" + +#ifdef CONFIG_DEBUG_FS +#include + +struct dentry *megasas_debugfs_root; + +static ssize_t +megasas_debugfs_read(struct file *filp, char __user *ubuf, size_t cnt, + loff_t *ppos) +{ + struct megasas_debugfs_buffer *debug = filp->private_data; + + if (!debug || !debug->buf) + return 0; + + return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len); +} + +static int +megasas_debugfs_raidmap_open(struct inode *inode, struct file *file) +{ + struct megasas_instance *instance = inode->i_private; + struct megasas_debugfs_buffer *debug; + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + debug = kzalloc(sizeof(struct megasas_debugfs_buffer), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->buf = (void *)fusion->ld_drv_map[(instance->map_id & 1)]; + debug->len = fusion->drv_map_sz; + file->private_data = debug; + + return 0; +} + +static int +megasas_debugfs_release(struct inode *inode, struct file *file) +{ + struct megasas_debug_buffer *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static const struct file_operations megasas_debugfs_raidmap_fops = { + .owner = THIS_MODULE, + .open = megasas_debugfs_raidmap_open, + .read = megasas_debugfs_read, + .release = megasas_debugfs_release, +}; + +/* + * megasas_init_debugfs : Create debugfs root for megaraid_sas driver + */ +void megasas_init_debugfs(void) +{ + megasas_debugfs_root = debugfs_create_dir("megaraid_sas", NULL); + if (!megasas_debugfs_root) + pr_info("Cannot create debugfs root\n"); +} + +/* + * megasas_exit_debugfs : Remove debugfs root for megaraid_sas driver + */ +void megasas_exit_debugfs(void) +{ + debugfs_remove_recursive(megasas_debugfs_root); +} + +/* + * megasas_setup_debugfs : Setup debugfs per Fusion adapter + * instance: Soft instance of adapter + */ +void +megasas_setup_debugfs(struct megasas_instance *instance) +{ + char name[64]; + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + if (fusion) { + snprintf(name, sizeof(name), + "scsi_host%d", instance->host->host_no); + if (!instance->debugfs_root) { + instance->debugfs_root = + debugfs_create_dir(name, megasas_debugfs_root); + if (!instance->debugfs_root) { + dev_err(&instance->pdev->dev, + "Cannot create per adapter debugfs directory\n"); + return; + } + } + + snprintf(name, sizeof(name), "raidmap_dump"); + instance->raidmap_dump = + debugfs_create_file(name, S_IRUGO, + instance->debugfs_root, instance, + &megasas_debugfs_raidmap_fops); + if (!instance->raidmap_dump) { + dev_err(&instance->pdev->dev, + "Cannot create raidmap debugfs file\n"); + debugfs_remove(instance->debugfs_root); + return; + } + } + +} + +/* + * megasas_destroy_debugfs : Destroy debugfs per Fusion adapter + * instance: Soft instance of adapter + */ +void megasas_destroy_debugfs(struct megasas_instance *instance) +{ + debugfs_remove_recursive(instance->debugfs_root); +} + +#else +void megasas_init_debugfs(void) +{ +} +void megasas_exit_debugfs(void) +{ +} +void megasas_setup_debugfs(struct megasas_instance *instance) +{ +} +void megasas_destroy_debugfs(struct megasas_instance *instance) +{ +} +#endif /*CONFIG_DEBUG_FS*/ diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c new file mode 100644 index 000000000..b8b388a4e --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -0,0 +1,1425 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2016 Avago Technologies + * Copyright (c) 2016-2018 Broadcom Inc. + * + * FILE: megaraid_sas_fp.c + * + * Authors: Broadcom Inc. + * Sumant Patro + * Varad Talamacki + * Manoj Jose + * Kashyap Desai + * Sumit Saxena + * + * Send feedback to: megaraidlinux.pdl@broadcom.com + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "megaraid_sas_fusion.h" +#include "megaraid_sas.h" +#include + +#define LB_PENDING_CMDS_DEFAULT 4 +static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; +module_param(lb_pending_cmds, int, 0444); +MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " + "threshold. Valid Values are 1-128. Default: 4"); + + +#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define MR_LD_STATE_OPTIMAL 3 + +#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) +#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) +#define SPAN_INVALID 0xff + +/* Prototypes */ +static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, + PLD_SPAN_INFO ldSpanInfo); +static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, + u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map); +static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, + u64 strip, struct MR_DRV_RAID_MAP_ALL *map); + +u32 mega_mod64(u64 dividend, u32 divisor) +{ + u64 d; + u32 remainder; + + if (!divisor) + printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n"); + d = dividend; + remainder = do_div(d, divisor); + return remainder; +} + +/** + * mega_div64_32 - Do a 64-bit division + * @dividend: Dividend + * @divisor: Divisor + * + * @return quotient + **/ +static u64 mega_div64_32(uint64_t dividend, uint32_t divisor) +{ + u64 d = dividend; + + if (!divisor) + printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n"); + + do_div(d, divisor); + + return d; +} + +struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) +{ + return &map->raidMap.ldSpanMap[ld].ldRaid; +} + +static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, + struct MR_DRV_RAID_MAP_ALL + *map) +{ + return &map->raidMap.ldSpanMap[ld].spanBlock[0]; +} + +static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map) +{ + return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; +} + +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) +{ + return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); +} + +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) +{ + return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); +} + +__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) +{ + return map->raidMap.devHndlInfo[pd].curDevHdl; +} + +static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) +{ + return map->raidMap.devHndlInfo[pd].interfaceType; +} + +u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) +{ + return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); +} + +u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) +{ + return map->raidMap.ldTgtIdToLd[ldTgtId]; +} + +static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, + struct MR_DRV_RAID_MAP_ALL *map) +{ + return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; +} + +/* + * This function will Populate Driver Map using firmware raid map + */ +static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; + struct MR_FW_RAID_MAP *pFwRaidMap = NULL; + int i, j; + u16 ld_count; + struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; + struct MR_FW_RAID_MAP_EXT *fw_map_ext; + struct MR_RAID_MAP_DESC_TABLE *desc_table; + + + struct MR_DRV_RAID_MAP_ALL *drv_map = + fusion->ld_drv_map[(map_id & 1)]; + struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; + void *raid_map_data = NULL; + + memset(drv_map, 0, fusion->drv_map_sz); + memset(pDrvRaidMap->ldTgtIdToLd, + 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); + + if (instance->max_raid_mapsize) { + fw_map_dyn = fusion->ld_map[(map_id & 1)]; + desc_table = + (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); + if (desc_table != fw_map_dyn->raid_map_desc_table) + dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n", + desc_table, fw_map_dyn->raid_map_desc_table); + + ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count); + pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); + pDrvRaidMap->fpPdIoTimeoutSec = + fw_map_dyn->fp_pd_io_timeout_sec; + pDrvRaidMap->totalSize = + cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL)); + /* point to actual data starting point*/ + raid_map_data = (void *)fw_map_dyn + + le32_to_cpu(fw_map_dyn->desc_table_offset) + + le32_to_cpu(fw_map_dyn->desc_table_size); + + for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) { + switch (le32_to_cpu(desc_table->raid_map_desc_type)) { + case RAID_MAP_DESC_TYPE_DEVHDL_INFO: + fw_map_dyn->dev_hndl_info = + (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); + memcpy(pDrvRaidMap->devHndlInfo, + fw_map_dyn->dev_hndl_info, + sizeof(struct MR_DEV_HANDLE_INFO) * + le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + case RAID_MAP_DESC_TYPE_TGTID_INFO: + fw_map_dyn->ld_tgt_id_to_ld = + (u16 *)(raid_map_data + + le32_to_cpu(desc_table->raid_map_desc_offset)); + for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { + pDrvRaidMap->ldTgtIdToLd[j] = + le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); + } + break; + case RAID_MAP_DESC_TYPE_ARRAY_INFO: + fw_map_dyn->ar_map_info = + (struct MR_ARRAY_INFO *) + (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); + memcpy(pDrvRaidMap->arMapInfo, + fw_map_dyn->ar_map_info, + sizeof(struct MR_ARRAY_INFO) * + le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + case RAID_MAP_DESC_TYPE_SPAN_INFO: + fw_map_dyn->ld_span_map = + (struct MR_LD_SPAN_MAP *) + (raid_map_data + + le32_to_cpu(desc_table->raid_map_desc_offset)); + memcpy(pDrvRaidMap->ldSpanMap, + fw_map_dyn->ld_span_map, + sizeof(struct MR_LD_SPAN_MAP) * + le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + default: + dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", + fw_map_dyn->desc_table_num_elements); + } + ++desc_table; + } + + } else if (instance->supportmax256vd) { + fw_map_ext = + (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)]; + ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); + if (ld_count > MAX_LOGICAL_DRIVES_EXT) { + dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); + return 1; + } + + pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); + pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; + for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) + pDrvRaidMap->ldTgtIdToLd[i] = + (u16)fw_map_ext->ldTgtIdToLd[i]; + memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, + sizeof(struct MR_LD_SPAN_MAP) * ld_count); + memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, + sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); + memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, + sizeof(struct MR_DEV_HANDLE_INFO) * + MAX_RAIDMAP_PHYSICAL_DEVICES); + + /* New Raid map will not set totalSize, so keep expected value + * for legacy code in ValidateMapInfo + */ + pDrvRaidMap->totalSize = + cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT)); + } else { + fw_map_old = (struct MR_FW_RAID_MAP_ALL *) + fusion->ld_map[(map_id & 1)]; + pFwRaidMap = &fw_map_old->raidMap; + ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); + if (ld_count > MAX_LOGICAL_DRIVES) { + dev_dbg(&instance->pdev->dev, + "LD count exposed in RAID map in not valid\n"); + return 1; + } + + pDrvRaidMap->totalSize = pFwRaidMap->totalSize; + pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); + pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; + for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) + pDrvRaidMap->ldTgtIdToLd[i] = + (u8)pFwRaidMap->ldTgtIdToLd[i]; + for (i = 0; i < ld_count; i++) { + pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; + } + memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, + sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); + memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, + sizeof(struct MR_DEV_HANDLE_INFO) * + MAX_RAIDMAP_PHYSICAL_DEVICES); + } + + return 0; +} + +/* + * This function will validate Map info data provided by FW + */ +u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id) +{ + struct fusion_context *fusion; + struct MR_DRV_RAID_MAP_ALL *drv_map; + struct MR_DRV_RAID_MAP *pDrvRaidMap; + struct LD_LOAD_BALANCE_INFO *lbInfo; + PLD_SPAN_INFO ldSpanInfo; + struct MR_LD_RAID *raid; + u16 num_lds, i; + u16 ld; + u32 expected_size; + + if (MR_PopulateDrvRaidMap(instance, map_id)) + return 0; + + fusion = instance->ctrl_context; + drv_map = fusion->ld_drv_map[(map_id & 1)]; + pDrvRaidMap = &drv_map->raidMap; + + lbInfo = fusion->load_balance_info; + ldSpanInfo = fusion->log_to_span; + + if (instance->max_raid_mapsize) + expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL); + else if (instance->supportmax256vd) + expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); + else + expected_size = struct_size_t(struct MR_FW_RAID_MAP, + ldSpanMap, + le16_to_cpu(pDrvRaidMap->ldCount)); + + if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { + dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", + le32_to_cpu(pDrvRaidMap->totalSize)); + dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n", + (unsigned int)expected_size); + dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", + (unsigned int)sizeof(struct MR_LD_SPAN_MAP), + le32_to_cpu(pDrvRaidMap->totalSize)); + return 0; + } + + if (instance->UnevenSpanSupport) + mr_update_span_set(drv_map, ldSpanInfo); + + if (lbInfo) + mr_update_load_balance_params(drv_map, lbInfo); + + num_lds = le16_to_cpu(drv_map->raidMap.ldCount); + + memcpy(instance->ld_ids_prev, + instance->ld_ids_from_raidmap, + sizeof(instance->ld_ids_from_raidmap)); + memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS); + /*Convert Raid capability values to CPU arch */ + for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) { + ld = MR_TargetIdToLdGet(i, drv_map); + + /* For non existing VDs, iterate to next VD*/ + if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS) + continue; + + raid = MR_LdRaidGet(ld, drv_map); + le32_to_cpus((u32 *)&raid->capability); + instance->ld_ids_from_raidmap[i] = i; + num_lds--; + } + + return 1; +} + +static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, + struct MR_DRV_RAID_MAP_ALL *map) +{ + struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); + struct MR_QUAD_ELEMENT *quad; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + u32 span, j; + + for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { + + for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { + quad = &pSpanBlock->block_span_info.quad[j]; + + if (le32_to_cpu(quad->diff) == 0) + return SPAN_INVALID; + if (le64_to_cpu(quad->logStart) <= row && row <= + le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), + le32_to_cpu(quad->diff))) == 0) { + if (span_blk != NULL) { + u64 blk; + blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); + + blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; + *span_blk = blk; + } + return span; + } + } + } + return SPAN_INVALID; +} + +/* +****************************************************************************** +* +* This routine calculates the Span block for given row using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* row - Row number +* map - LD map +* +* Outputs : +* +* span - Span number +* block - Absolute Block number in the physical disk +* div_error - Devide error code. +*/ + +static u32 mr_spanset_get_span_block(struct megasas_instance *instance, + u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + struct MR_QUAD_ELEMENT *quad; + u32 span, info; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + + for (info = 0; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + + if (row > span_set->data_row_end) + continue; + + for (span = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { + quad = &map->raidMap.ldSpanMap[ld]. + spanBlock[span]. + block_span_info.quad[info]; + if (le32_to_cpu(quad->diff) == 0) + return SPAN_INVALID; + if (le64_to_cpu(quad->logStart) <= row && + row <= le64_to_cpu(quad->logEnd) && + (mega_mod64(row - le64_to_cpu(quad->logStart), + le32_to_cpu(quad->diff))) == 0) { + if (span_blk != NULL) { + u64 blk; + blk = mega_div64_32 + ((row - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)); + blk = (blk + le64_to_cpu(quad->offsetInSpan)) + << raid->stripeShift; + *span_blk = blk; + } + return span; + } + } + } + return SPAN_INVALID; +} + +/* +****************************************************************************** +* +* This routine calculates the row for given strip using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* Strip - Strip +* map - LD map +* +* Outputs : +* +* row - row associated with strip +*/ + +static u64 get_row_from_strip(struct megasas_instance *instance, + u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + u32 info, strip_offset, span, span_offset; + u64 span_set_Strip, span_set_Row, retval; + + for (info = 0; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + if (strip > span_set->data_strip_end) + continue; + + span_set_Strip = strip - span_set->data_strip_start; + strip_offset = mega_mod64(span_set_Strip, + span_set->span_row_data_width); + span_set_Row = mega_div64_32(span_set_Strip, + span_set->span_row_data_width) * span_set->diff; + for (span = 0, span_offset = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { + if (strip_offset >= + span_set->strip_offset[span]) + span_offset++; + else + break; + } + + retval = (span_set->data_row_start + span_set_Row + + (span_offset - 1)); + return retval; + } + return -1LLU; +} + + +/* +****************************************************************************** +* +* This routine calculates the Start Strip for given row using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* row - Row number +* map - LD map +* +* Outputs : +* +* Strip - Start strip associated with row +*/ + +static u64 get_strip_from_row(struct megasas_instance *instance, + u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + struct MR_QUAD_ELEMENT *quad; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + u32 span, info; + u64 strip; + + for (info = 0; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + if (row > span_set->data_row_end) + continue; + + for (span = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { + quad = &map->raidMap.ldSpanMap[ld]. + spanBlock[span].block_span_info.quad[info]; + if (le64_to_cpu(quad->logStart) <= row && + row <= le64_to_cpu(quad->logEnd) && + mega_mod64((row - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)) == 0) { + strip = mega_div64_32 + (((row - span_set->data_row_start) + - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)); + strip *= span_set->span_row_data_width; + strip += span_set->data_strip_start; + strip += span_set->strip_offset[span]; + return strip; + } + } + } + dev_err(&instance->pdev->dev, "get_strip_from_row" + "returns invalid strip for ld=%x, row=%lx\n", + ld, (long unsigned int)row); + return -1; +} + +/* +****************************************************************************** +* +* This routine calculates the Physical Arm for given strip using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* strip - Strip +* map - LD map +* +* Outputs : +* +* Phys Arm - Phys Arm associated with strip +*/ + +static u32 get_arm_from_strip(struct megasas_instance *instance, + u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + u32 info, strip_offset, span, span_offset, retval; + + for (info = 0 ; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + if (strip > span_set->data_strip_end) + continue; + + strip_offset = (uint)mega_mod64 + ((strip - span_set->data_strip_start), + span_set->span_row_data_width); + + for (span = 0, span_offset = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { + if (strip_offset >= + span_set->strip_offset[span]) + span_offset = + span_set->strip_offset[span]; + else + break; + } + + retval = (strip_offset - span_offset); + return retval; + } + + dev_err(&instance->pdev->dev, "get_arm_from_strip" + "returns invalid arm for ld=%x strip=%lx\n", + ld, (long unsigned int)strip); + + return -1; +} + +/* This Function will return Phys arm */ +static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, + struct MR_DRV_RAID_MAP_ALL *map) +{ + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + /* Need to check correct default value */ + u32 arm = 0; + + switch (raid->level) { + case 0: + case 5: + case 6: + arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); + break; + case 1: + /* start with logical arm */ + arm = get_arm_from_strip(instance, ld, stripe, map); + if (arm != -1U) + arm *= 2; + break; + } + + return arm; +} + + +/* +****************************************************************************** +* +* This routine calculates the arm, span and block for the specified stripe and +* reference in stripe using spanset +* +* Inputs : +* +* ld - Logical drive number +* stripRow - Stripe number +* stripRef - Reference in stripe +* +* Outputs : +* +* span - Span number +* block - Absolute Block number in the physical disk +*/ +static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, + u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_DRV_RAID_MAP_ALL *map) +{ + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + u32 pd, arRef, r1_alt_pd; + u8 physArm, span; + u64 row; + u8 retval = true; + u64 *pdBlock = &io_info->pdBlock; + __le16 *pDevHandle = &io_info->devHandle; + u8 *pPdInterface = &io_info->pd_interface; + u32 logArm, rowMod, armQ, arm; + + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); + + /*Get row and span from io_info for Uneven Span IO.*/ + row = io_info->start_row; + span = io_info->start_span; + + + if (raid->level == 6) { + logArm = get_arm_from_strip(instance, ld, stripRow, map); + if (logArm == -1U) + return false; + rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); + armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; + arm = armQ + 1 + logArm; + if (arm >= SPAN_ROW_SIZE(map, ld, span)) + arm -= SPAN_ROW_SIZE(map, ld, span); + physArm = (u8)arm; + } else + /* Calculate the arm */ + physArm = get_arm(instance, ld, span, stripRow, map); + if (physArm == 0xFF) + return false; + + arRef = MR_LdSpanArrayGet(ld, span, map); + pd = MR_ArPdGet(arRef, physArm, map); + + if (pd != MR_PD_INVALID) { + *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + /* get second pd also for raid 1/10 fast path writes*/ + if ((instance->adapter_type >= VENTURA_SERIES) && + (raid->level == 1) && + !io_info->isRead) { + r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); + if (r1_alt_pd != MR_PD_INVALID) + io_info->r1_alt_dev_handle = + MR_PdDevHandleGet(r1_alt_pd, map); + } + } else { + if ((raid->level >= 5) && + ((instance->adapter_type == THUNDERBOLT_SERIES) || + ((instance->adapter_type == INVADER_SERIES) && + (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) + pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; + else if (raid->level == 1) { + physArm = physArm + 1; + pd = MR_ArPdGet(arRef, physArm, map); + if (pd != MR_PD_INVALID) { + *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + } + } + } + + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); + if (instance->adapter_type >= VENTURA_SERIES) { + ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + } else { + pRAID_Context->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->span_arm; + } + io_info->pd_after_lb = pd; + return retval; +} + +/* +****************************************************************************** +* +* This routine calculates the arm, span and block for the specified stripe and +* reference in stripe. +* +* Inputs : +* +* ld - Logical drive number +* stripRow - Stripe number +* stripRef - Reference in stripe +* +* Outputs : +* +* span - Span number +* block - Absolute Block number in the physical disk +*/ +static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, + u16 stripRef, struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_DRV_RAID_MAP_ALL *map) +{ + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + u32 pd, arRef, r1_alt_pd; + u8 physArm, span; + u64 row; + u8 retval = true; + u64 *pdBlock = &io_info->pdBlock; + __le16 *pDevHandle = &io_info->devHandle; + u8 *pPdInterface = &io_info->pd_interface; + + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); + + row = mega_div64_32(stripRow, raid->rowDataSize); + + if (raid->level == 6) { + /* logical arm within row */ + u32 logArm = mega_mod64(stripRow, raid->rowDataSize); + u32 rowMod, armQ, arm; + + if (raid->rowSize == 0) + return false; + /* get logical row mod */ + rowMod = mega_mod64(row, raid->rowSize); + armQ = raid->rowSize-1-rowMod; /* index of Q drive */ + arm = armQ+1+logArm; /* data always logically follows Q */ + if (arm >= raid->rowSize) /* handle wrap condition */ + arm -= raid->rowSize; + physArm = (u8)arm; + } else { + if (raid->modFactor == 0) + return false; + physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, + raid->modFactor), + map); + } + + if (raid->spanDepth == 1) { + span = 0; + *pdBlock = row << raid->stripeShift; + } else { + span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); + if (span == SPAN_INVALID) + return false; + } + + /* Get the array on which this span is present */ + arRef = MR_LdSpanArrayGet(ld, span, map); + pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ + + if (pd != MR_PD_INVALID) { + /* Get dev handle from Pd. */ + *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + /* get second pd also for raid 1/10 fast path writes*/ + if ((instance->adapter_type >= VENTURA_SERIES) && + (raid->level == 1) && + !io_info->isRead) { + r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); + if (r1_alt_pd != MR_PD_INVALID) + io_info->r1_alt_dev_handle = + MR_PdDevHandleGet(r1_alt_pd, map); + } + } else { + if ((raid->level >= 5) && + ((instance->adapter_type == THUNDERBOLT_SERIES) || + ((instance->adapter_type == INVADER_SERIES) && + (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) + pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; + else if (raid->level == 1) { + /* Get alternate Pd. */ + physArm = physArm + 1; + pd = MR_ArPdGet(arRef, physArm, map); + if (pd != MR_PD_INVALID) { + /* Get dev handle from Pd */ + *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + } + } + } + + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); + if (instance->adapter_type >= VENTURA_SERIES) { + ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + } else { + pRAID_Context->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->span_arm; + } + io_info->pd_after_lb = pd; + return retval; +} + +/* + * mr_get_phy_params_r56_rmw - Calculate parameters for R56 CTIO write operation + * @instance: Adapter soft state + * @ld: LD index + * @stripNo: Strip Number + * @io_info: IO info structure pointer + * pRAID_Context: RAID context pointer + * map: RAID map pointer + * + * This routine calculates the logical arm, data Arm, row number and parity arm + * for R56 CTIO write operation. + */ +static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance, + u32 ld, u64 stripNo, + struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT_G35 *pRAID_Context, + struct MR_DRV_RAID_MAP_ALL *map) +{ + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + u8 span, dataArms, arms, dataArm, logArm; + s8 rightmostParityArm, PParityArm; + u64 rowNum; + u64 *pdBlock = &io_info->pdBlock; + + dataArms = raid->rowDataSize; + arms = raid->rowSize; + + rowNum = mega_div64_32(stripNo, dataArms); + /* parity disk arm, first arm is 0 */ + rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms); + + /* logical arm within row */ + logArm = mega_mod64(stripNo, dataArms); + /* physical arm for data */ + dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms); + + if (raid->spanDepth == 1) { + span = 0; + } else { + span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map); + if (span == SPAN_INVALID) + return; + } + + if (raid->level == 6) { + /* P Parity arm, note this can go negative adjust if negative */ + PParityArm = (arms - 2) - mega_mod64(rowNum, arms); + + if (PParityArm < 0) + PParityArm += arms; + + /* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */ + pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm; + pRAID_Context->flow_specific.r56_arm_map |= + (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT); + } else { + pRAID_Context->flow_specific.r56_arm_map |= + (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT); + } + + pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum); + pRAID_Context->flow_specific.r56_arm_map |= + (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT); + cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map); + pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm; + pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD << + MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + + return; +} + +/* +****************************************************************************** +* +* MR_BuildRaidContext function +* +* This function will initiate command processing. The start/end row and strip +* information is calculated then the lock is acquired. +* This function will return 0 if region lock was acquired OR return num strips +*/ +u8 +MR_BuildRaidContext(struct megasas_instance *instance, + struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN) +{ + struct fusion_context *fusion; + struct MR_LD_RAID *raid; + u32 stripSize, stripe_mask; + u64 endLba, endStrip, endRow, start_row, start_strip; + u64 regStart; + u32 regSize; + u8 num_strips, numRows; + u16 ref_in_start_stripe, ref_in_end_stripe; + u64 ldStartBlock; + u32 numBlocks, ldTgtId; + u8 isRead; + u8 retval = 0; + u8 startlba_span = SPAN_INVALID; + u64 *pdBlock = &io_info->pdBlock; + u16 ld; + + ldStartBlock = io_info->ldStartBlock; + numBlocks = io_info->numBlocks; + ldTgtId = io_info->ldTgtId; + isRead = io_info->isRead; + io_info->IoforUnevenSpan = 0; + io_info->start_span = SPAN_INVALID; + fusion = instance->ctrl_context; + + ld = MR_TargetIdToLdGet(ldTgtId, map); + raid = MR_LdRaidGet(ld, map); + /*check read ahead bit*/ + io_info->ra_capable = raid->capability.ra_capable; + + /* + * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero + * return FALSE + */ + if (raid->rowDataSize == 0) { + if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) + return false; + else if (instance->UnevenSpanSupport) { + io_info->IoforUnevenSpan = 1; + } else { + dev_info(&instance->pdev->dev, + "raid->rowDataSize is 0, but has SPAN[0]" + "rowDataSize = 0x%0x," + "but there is _NO_ UnevenSpanSupport\n", + MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); + return false; + } + } + + stripSize = 1 << raid->stripeShift; + stripe_mask = stripSize-1; + + io_info->data_arms = raid->rowDataSize; + + /* + * calculate starting row and stripe, and number of strips and rows + */ + start_strip = ldStartBlock >> raid->stripeShift; + ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask); + endLba = ldStartBlock + numBlocks - 1; + ref_in_end_stripe = (u16)(endLba & stripe_mask); + endStrip = endLba >> raid->stripeShift; + num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ + + if (io_info->IoforUnevenSpan) { + start_row = get_row_from_strip(instance, ld, start_strip, map); + endRow = get_row_from_strip(instance, ld, endStrip, map); + if (start_row == -1ULL || endRow == -1ULL) { + dev_info(&instance->pdev->dev, "return from %s %d." + "Send IO w/o region lock.\n", + __func__, __LINE__); + return false; + } + + if (raid->spanDepth == 1) { + startlba_span = 0; + *pdBlock = start_row << raid->stripeShift; + } else + startlba_span = (u8)mr_spanset_get_span_block(instance, + ld, start_row, pdBlock, map); + if (startlba_span == SPAN_INVALID) { + dev_info(&instance->pdev->dev, "return from %s %d" + "for row 0x%llx,start strip %llx" + "endSrip %llx\n", __func__, __LINE__, + (unsigned long long)start_row, + (unsigned long long)start_strip, + (unsigned long long)endStrip); + return false; + } + io_info->start_span = startlba_span; + io_info->start_row = start_row; + } else { + start_row = mega_div64_32(start_strip, raid->rowDataSize); + endRow = mega_div64_32(endStrip, raid->rowDataSize); + } + numRows = (u8)(endRow - start_row + 1); + + /* + * calculate region info. + */ + + /* assume region is at the start of the first row */ + regStart = start_row << raid->stripeShift; + /* assume this IO needs the full row - we'll adjust if not true */ + regSize = stripSize; + + io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock; + + /* Check if we can send this I/O via FastPath */ + if (raid->capability.fpCapable) { + if (isRead) + io_info->fpOkForIo = (raid->capability.fpReadCapable && + ((num_strips == 1) || + raid->capability. + fpReadAcrossStripe)); + else + io_info->fpOkForIo = (raid->capability.fpWriteCapable && + ((num_strips == 1) || + raid->capability. + fpWriteAcrossStripe)); + } else + io_info->fpOkForIo = false; + + if (numRows == 1) { + /* single-strip IOs can always lock only the data needed */ + if (num_strips == 1) { + regStart += ref_in_start_stripe; + regSize = numBlocks; + } + /* multi-strip IOs always need to full stripe locked */ + } else if (io_info->IoforUnevenSpan == 0) { + /* + * For Even span region lock optimization. + * If the start strip is the last in the start row + */ + if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { + regStart += ref_in_start_stripe; + /* initialize count to sectors from startref to end + of strip */ + regSize = stripSize - ref_in_start_stripe; + } + + /* add complete rows in the middle of the transfer */ + if (numRows > 2) + regSize += (numRows-2) << raid->stripeShift; + + /* if IO ends within first strip of last row*/ + if (endStrip == endRow*raid->rowDataSize) + regSize += ref_in_end_stripe+1; + else + regSize += stripSize; + } else { + /* + * For Uneven span region lock optimization. + * If the start strip is the last in the start row + */ + if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + + SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { + regStart += ref_in_start_stripe; + /* initialize count to sectors from + * startRef to end of strip + */ + regSize = stripSize - ref_in_start_stripe; + } + /* Add complete rows in the middle of the transfer*/ + + if (numRows > 2) + /* Add complete rows in the middle of the transfer*/ + regSize += (numRows-2) << raid->stripeShift; + + /* if IO ends within first strip of last row */ + if (endStrip == get_strip_from_row(instance, ld, endRow, map)) + regSize += ref_in_end_stripe + 1; + else + regSize += stripSize; + } + + pRAID_Context->timeout_value = + cpu_to_le16(raid->fpIoTimeoutForLd ? + raid->fpIoTimeoutForLd : + map->raidMap.fpPdIoTimeoutSec); + if (instance->adapter_type == INVADER_SERIES) + pRAID_Context->reg_lock_flags = (isRead) ? + raid->regTypeReqOnRead : raid->regTypeReqOnWrite; + else if (instance->adapter_type == THUNDERBOLT_SERIES) + pRAID_Context->reg_lock_flags = (isRead) ? + REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; + pRAID_Context->virtual_disk_tgt_id = raid->targetId; + pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart); + pRAID_Context->reg_lock_length = cpu_to_le32(regSize); + pRAID_Context->config_seq_num = raid->seqNum; + /* save pointer to raid->LUN array */ + *raidLUN = raid->LUN; + + /* Aero R5/6 Division Offload for WRITE */ + if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) { + mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info, + (struct RAID_CONTEXT_G35 *)pRAID_Context, + map); + return true; + } + + /*Get Phy Params only if FP capable, or else leave it to MR firmware + to do the calculation.*/ + if (io_info->fpOkForIo) { + retval = io_info->IoforUnevenSpan ? + mr_spanset_get_phy_params(instance, ld, + start_strip, ref_in_start_stripe, + io_info, pRAID_Context, map) : + MR_GetPhyParams(instance, ld, start_strip, + ref_in_start_stripe, io_info, + pRAID_Context, map); + /* If IO on an invalid Pd, then FP is not possible.*/ + if (io_info->devHandle == MR_DEVHANDLE_INVALID) + io_info->fpOkForIo = false; + return retval; + } else if (isRead) { + uint stripIdx; + for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { + retval = io_info->IoforUnevenSpan ? + mr_spanset_get_phy_params(instance, ld, + start_strip + stripIdx, + ref_in_start_stripe, io_info, + pRAID_Context, map) : + MR_GetPhyParams(instance, ld, + start_strip + stripIdx, ref_in_start_stripe, + io_info, pRAID_Context, map); + if (!retval) + return true; + } + } + return true; +} + +/* +****************************************************************************** +* +* This routine pepare spanset info from Valid Raid map and store it into +* local copy of ldSpanInfo per instance data structure. +* +* Inputs : +* map - LD map +* ldSpanInfo - ldSpanInfo per HBA instance +* +*/ +void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, + PLD_SPAN_INFO ldSpanInfo) +{ + u8 span, count; + u32 element, span_row_width; + u64 span_row; + struct MR_LD_RAID *raid; + LD_SPAN_SET *span_set, *span_set_prev; + struct MR_QUAD_ELEMENT *quad; + int ldCount; + u16 ld; + + + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, map); + if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) + continue; + raid = MR_LdRaidGet(ld, map); + for (element = 0; element < MAX_QUAD_DEPTH; element++) { + for (span = 0; span < raid->spanDepth; span++) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) < + element + 1) + continue; + span_set = &(ldSpanInfo[ld].span_set[element]); + quad = &map->raidMap.ldSpanMap[ld]. + spanBlock[span].block_span_info. + quad[element]; + + span_set->diff = le32_to_cpu(quad->diff); + + for (count = 0, span_row_width = 0; + count < raid->spanDepth; count++) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. + spanBlock[count]. + block_span_info. + noElements) >= element + 1) { + span_set->strip_offset[count] = + span_row_width; + span_row_width += + MR_LdSpanPtrGet + (ld, count, map)->spanRowDataSize; + } + } + + span_set->span_row_data_width = span_row_width; + span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - + le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), + le32_to_cpu(quad->diff)); + + if (element == 0) { + span_set->log_start_lba = 0; + span_set->log_end_lba = + ((span_row << raid->stripeShift) + * span_row_width) - 1; + + span_set->span_row_start = 0; + span_set->span_row_end = span_row - 1; + + span_set->data_strip_start = 0; + span_set->data_strip_end = + (span_row * span_row_width) - 1; + + span_set->data_row_start = 0; + span_set->data_row_end = + (span_row * le32_to_cpu(quad->diff)) - 1; + } else { + span_set_prev = &(ldSpanInfo[ld]. + span_set[element - 1]); + span_set->log_start_lba = + span_set_prev->log_end_lba + 1; + span_set->log_end_lba = + span_set->log_start_lba + + ((span_row << raid->stripeShift) + * span_row_width) - 1; + + span_set->span_row_start = + span_set_prev->span_row_end + 1; + span_set->span_row_end = + span_set->span_row_start + span_row - 1; + + span_set->data_strip_start = + span_set_prev->data_strip_end + 1; + span_set->data_strip_end = + span_set->data_strip_start + + (span_row * span_row_width) - 1; + + span_set->data_row_start = + span_set_prev->data_row_end + 1; + span_set->data_row_end = + span_set->data_row_start + + (span_row * le32_to_cpu(quad->diff)) - 1; + } + break; + } + if (span == raid->spanDepth) + break; + } + } +} + +void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, + struct LD_LOAD_BALANCE_INFO *lbInfo) +{ + int ldCount; + u16 ld; + struct MR_LD_RAID *raid; + + if (lb_pending_cmds > 128 || lb_pending_cmds < 1) + lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; + + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, drv_map); + if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) { + lbInfo[ldCount].loadBalanceFlag = 0; + continue; + } + + raid = MR_LdRaidGet(ld, drv_map); + if ((raid->level != 1) || + (raid->ldState != MR_LD_STATE_OPTIMAL)) { + lbInfo[ldCount].loadBalanceFlag = 0; + continue; + } + lbInfo[ldCount].loadBalanceFlag = 1; + } +} + +static u8 megasas_get_best_arm_pd(struct megasas_instance *instance, + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *io_info, + struct MR_DRV_RAID_MAP_ALL *drv_map) +{ + struct MR_LD_RAID *raid; + u16 pd1_dev_handle; + u16 pend0, pend1, ld; + u64 diff0, diff1; + u8 bestArm, pd0, pd1, span, arm; + u32 arRef, span_row_size; + + u64 block = io_info->ldStartBlock; + u32 count = io_info->numBlocks; + + span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) + >> RAID_CTX_SPANARM_SPAN_SHIFT); + arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); + + ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); + raid = MR_LdRaidGet(ld, drv_map); + span_row_size = instance->UnevenSpanSupport ? + SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; + + arRef = MR_LdSpanArrayGet(ld, span, drv_map); + pd0 = MR_ArPdGet(arRef, arm, drv_map); + pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? + (arm + 1 - span_row_size) : arm + 1, drv_map); + + /* Get PD1 Dev Handle */ + + pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map); + + if (pd1_dev_handle == MR_DEVHANDLE_INVALID) { + bestArm = arm; + } else { + /* get the pending cmds for the data and mirror arms */ + pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); + pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); + + /* Determine the disk whose head is nearer to the req. block */ + diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); + diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); + bestArm = (diff0 <= diff1 ? arm : arm ^ 1); + + /* Make balance count from 16 to 4 to + * keep driver in sync with Firmware + */ + if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || + (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) + bestArm ^= 1; + + /* Update the last accessed block on the correct pd */ + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; + io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; + } + + lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; + return io_info->pd_after_lb; +} + +__le16 get_updated_dev_handle(struct megasas_instance *instance, + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *io_info, + struct MR_DRV_RAID_MAP_ALL *drv_map) +{ + u8 arm_pd; + __le16 devHandle; + + /* get best new arm (PD ID) */ + arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map); + devHandle = MR_PdDevHandleGet(arm_pd, drv_map); + io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map); + atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); + + return devHandle; +} diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c new file mode 100644 index 000000000..8a83f3fc2 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -0,0 +1,5375 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2016 Avago Technologies + * Copyright (c) 2016-2018 Broadcom Inc. + * + * FILE: megaraid_sas_fusion.c + * + * Authors: Broadcom Inc. + * Sumant Patro + * Adam Radford + * Kashyap Desai + * Sumit Saxena + * + * Send feedback to: megaraidlinux.pdl@broadcom.com + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "megaraid_sas_fusion.h" +#include "megaraid_sas.h" + + +extern void +megasas_complete_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd, u8 alt_status); +int +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, + int seconds); + +int +megasas_clear_intr_fusion(struct megasas_instance *instance); + +int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); + +extern u32 megasas_dbg_lvl; +int megasas_sriov_start_heartbeat(struct megasas_instance *instance, + int initial); +extern struct megasas_mgmt_info megasas_mgmt_info; +extern unsigned int resetwaittime; +extern unsigned int dual_qdepth_disable; +static void megasas_free_rdpq_fusion(struct megasas_instance *instance); +static void megasas_free_reply_fusion(struct megasas_instance *instance); +static inline +void megasas_configure_queue_sizes(struct megasas_instance *instance); +static void megasas_fusion_crash_dump(struct megasas_instance *instance); + +/** + * megasas_adp_reset_wait_for_ready - initiate chip reset and wait for + * controller to come to ready state + * @instance: adapter's soft state + * @do_adp_reset: If true, do a chip reset + * @ocr_context: If called from OCR context this will + * be set to 1, else 0 + * + * This function initiates a chip reset followed by a wait for controller to + * transition to ready state. + * During this, driver will block all access to PCI config space from userspace + */ +int +megasas_adp_reset_wait_for_ready(struct megasas_instance *instance, + bool do_adp_reset, + int ocr_context) +{ + int ret = FAILED; + + /* + * Block access to PCI config space from userspace + * when diag reset is initiated from driver + */ + if (megasas_dbg_lvl & OCR_DEBUG) + dev_info(&instance->pdev->dev, + "Block access to PCI config space %s %d\n", + __func__, __LINE__); + + pci_cfg_access_lock(instance->pdev); + + if (do_adp_reset) { + if (instance->instancet->adp_reset + (instance, instance->reg_set)) + goto out; + } + + /* Wait for FW to become ready */ + if (megasas_transition_to_ready(instance, ocr_context)) { + dev_warn(&instance->pdev->dev, + "Failed to transition controller to ready for scsi%d.\n", + instance->host->host_no); + goto out; + } + + ret = SUCCESS; +out: + if (megasas_dbg_lvl & OCR_DEBUG) + dev_info(&instance->pdev->dev, + "Unlock access to PCI config space %s %d\n", + __func__, __LINE__); + + pci_cfg_access_unlock(instance->pdev); + + return ret; +} + +/** + * megasas_check_same_4gb_region - check if allocation + * crosses same 4GB boundary or not + * @instance: adapter's soft instance + * @start_addr: start address of DMA allocation + * @size: size of allocation in bytes + * @return: true : allocation does not cross same + * 4GB boundary + * false: allocation crosses same + * 4GB boundary + */ +static inline bool megasas_check_same_4gb_region + (struct megasas_instance *instance, dma_addr_t start_addr, size_t size) +{ + dma_addr_t end_addr; + + end_addr = start_addr + size; + + if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) { + dev_err(&instance->pdev->dev, + "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n", + (unsigned long long)start_addr, + (unsigned long long)end_addr); + return false; + } + + return true; +} + +/** + * megasas_enable_intr_fusion - Enables interrupts + * @instance: adapter's soft instance + */ +static void +megasas_enable_intr_fusion(struct megasas_instance *instance) +{ + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + + instance->mask_interrupts = 0; + /* For Thunderbolt/Invader also clear intr on enable */ + writel(~0, ®s->outbound_intr_status); + readl(®s->outbound_intr_status); + + writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", + __func__, readl(®s->outbound_intr_mask)); +} + +/** + * megasas_disable_intr_fusion - Disables interrupt + * @instance: adapter's soft instance + */ +static void +megasas_disable_intr_fusion(struct megasas_instance *instance) +{ + u32 mask = 0xFFFFFFFF; + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + instance->mask_interrupts = 1; + + writel(mask, ®s->outbound_intr_mask); + /* Dummy readl to force pci flush */ + dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", + __func__, readl(®s->outbound_intr_mask)); +} + +int +megasas_clear_intr_fusion(struct megasas_instance *instance) +{ + u32 status; + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + /* + * Check if it is our interrupt + */ + status = megasas_readl(instance, + ®s->outbound_intr_status); + + if (status & 1) { + writel(status, ®s->outbound_intr_status); + readl(®s->outbound_intr_status); + return 1; + } + if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) + return 0; + + return 1; +} + +static inline void +megasas_sdev_busy_inc(struct megasas_instance *instance, + struct scsi_cmnd *scmd) +{ + if (instance->perf_mode == MR_BALANCED_PERF_MODE) { + struct MR_PRIV_DEVICE *mr_device_priv_data = + scmd->device->hostdata; + atomic_inc(&mr_device_priv_data->sdev_priv_busy); + } +} + +static inline void +megasas_sdev_busy_dec(struct megasas_instance *instance, + struct scsi_cmnd *scmd) +{ + if (instance->perf_mode == MR_BALANCED_PERF_MODE) { + struct MR_PRIV_DEVICE *mr_device_priv_data = + scmd->device->hostdata; + atomic_dec(&mr_device_priv_data->sdev_priv_busy); + } +} + +static inline int +megasas_sdev_busy_read(struct megasas_instance *instance, + struct scsi_cmnd *scmd) +{ + if (instance->perf_mode == MR_BALANCED_PERF_MODE) { + struct MR_PRIV_DEVICE *mr_device_priv_data = + scmd->device->hostdata; + return atomic_read(&mr_device_priv_data->sdev_priv_busy); + } + return 0; +} + +/** + * megasas_get_cmd_fusion - Get a command from the free pool + * @instance: Adapter soft state + * @blk_tag: Command tag + * + * Returns a blk_tag indexed mpt frame + */ +inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance + *instance, u32 blk_tag) +{ + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + return fusion->cmd_list[blk_tag]; +} + +/** + * megasas_return_cmd_fusion - Return a cmd to free command pool + * @instance: Adapter soft state + * @cmd: Command packet to be returned to free command pool + */ +inline void megasas_return_cmd_fusion(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd) +{ + cmd->scmd = NULL; + memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + cmd->cmd_completed = false; +} + +/** + * megasas_write_64bit_req_desc - PCI writes 64bit request descriptor + * @instance: Adapter soft state + * @req_desc: 64bit Request descriptor + */ +static void +megasas_write_64bit_req_desc(struct megasas_instance *instance, + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) +{ +#if defined(writeq) && defined(CONFIG_64BIT) + u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | + le32_to_cpu(req_desc->u.low)); + writeq(req_data, &instance->reg_set->inbound_low_queue_port); +#else + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc->u.high), + &instance->reg_set->inbound_high_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +#endif +} + +/** + * megasas_fire_cmd_fusion - Sends command to the FW + * @instance: Adapter soft state + * @req_desc: 32bit or 64bit Request descriptor + * + * Perform PCI Write. AERO SERIES supports 32 bit Descriptor. + * Prior to AERO_SERIES support 64 bit Descriptor. + */ +static void +megasas_fire_cmd_fusion(struct megasas_instance *instance, + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) +{ + if (instance->atomic_desc_support) + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_single_queue_port); + else + megasas_write_64bit_req_desc(instance, req_desc); +} + +/** + * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here + * @instance: Adapter soft state + * @fw_boot_context: Whether this function called during probe or after OCR + * + * This function is only for fusion controllers. + * Update host can queue, if firmware downgrade max supported firmware commands. + * Firmware upgrade case will be skipped because underlying firmware has + * more resource than exposed to the OS. + * + */ +static void +megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context) +{ + u16 cur_max_fw_cmds = 0; + u16 ldio_threshold = 0; + + /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ + if (instance->adapter_type < VENTURA_SERIES) + cur_max_fw_cmds = + megasas_readl(instance, + &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF; + + if (dual_qdepth_disable || !cur_max_fw_cmds) + cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; + else + ldio_threshold = + (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; + + dev_info(&instance->pdev->dev, + "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n", + cur_max_fw_cmds, ldio_threshold); + + if (fw_boot_context == OCR_CONTEXT) { + cur_max_fw_cmds = cur_max_fw_cmds - 1; + if (cur_max_fw_cmds < instance->max_fw_cmds) { + instance->cur_can_queue = + cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + + MEGASAS_FUSION_IOCTL_CMDS); + instance->host->can_queue = instance->cur_can_queue; + instance->ldio_threshold = ldio_threshold; + } + } else { + instance->max_fw_cmds = cur_max_fw_cmds; + instance->ldio_threshold = ldio_threshold; + + if (reset_devices) + instance->max_fw_cmds = min(instance->max_fw_cmds, + (u16)MEGASAS_KDUMP_QUEUE_DEPTH); + /* + * Reduce the max supported cmds by 1. This is to ensure that the + * reply_q_sz (1 more than the max cmd that driver may send) + * does not exceed max cmds that the FW can support + */ + instance->max_fw_cmds = instance->max_fw_cmds-1; + } +} + +static inline void +megasas_get_msix_index(struct megasas_instance *instance, + struct scsi_cmnd *scmd, + struct megasas_cmd_fusion *cmd, + u8 data_arms) +{ + if (instance->perf_mode == MR_BALANCED_PERF_MODE && + (megasas_sdev_busy_read(instance, scmd) > + (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))) { + cmd->request_desc->SCSIIO.MSIxIndex = + mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / + MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); + } else if (instance->msix_load_balance) { + cmd->request_desc->SCSIIO.MSIxIndex = + (mega_mod64(atomic64_add_return(1, &instance->total_io_count), + instance->msix_vectors)); + } else if (instance->host->nr_hw_queues > 1) { + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) + + instance->low_latency_index_start; + } else { + cmd->request_desc->SCSIIO.MSIxIndex = + instance->reply_map[raw_smp_processor_id()]; + } +} + +/** + * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool + * @instance: Adapter soft state + */ +void +megasas_free_cmds_fusion(struct megasas_instance *instance) +{ + int i; + struct fusion_context *fusion = instance->ctrl_context; + struct megasas_cmd_fusion *cmd; + + if (fusion->sense) + dma_pool_free(fusion->sense_dma_pool, fusion->sense, + fusion->sense_phys_addr); + + /* SG */ + if (fusion->cmd_list) { + for (i = 0; i < instance->max_mpt_cmds; i++) { + cmd = fusion->cmd_list[i]; + if (cmd) { + if (cmd->sg_frame) + dma_pool_free(fusion->sg_dma_pool, + cmd->sg_frame, + cmd->sg_frame_phys_addr); + } + kfree(cmd); + } + kfree(fusion->cmd_list); + } + + if (fusion->sg_dma_pool) { + dma_pool_destroy(fusion->sg_dma_pool); + fusion->sg_dma_pool = NULL; + } + if (fusion->sense_dma_pool) { + dma_pool_destroy(fusion->sense_dma_pool); + fusion->sense_dma_pool = NULL; + } + + + /* Reply Frame, Desc*/ + if (instance->is_rdpq) + megasas_free_rdpq_fusion(instance); + else + megasas_free_reply_fusion(instance); + + /* Request Frame, Desc*/ + if (fusion->req_frames_desc) + dma_free_coherent(&instance->pdev->dev, + fusion->request_alloc_sz, fusion->req_frames_desc, + fusion->req_frames_desc_phys); + if (fusion->io_request_frames) + dma_pool_free(fusion->io_request_frames_pool, + fusion->io_request_frames, + fusion->io_request_frames_phys); + if (fusion->io_request_frames_pool) { + dma_pool_destroy(fusion->io_request_frames_pool); + fusion->io_request_frames_pool = NULL; + } +} + +/** + * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames + * @instance: Adapter soft state + * + */ +static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) +{ + int i; + u16 max_cmd; + struct fusion_context *fusion; + struct megasas_cmd_fusion *cmd; + int sense_sz; + u32 offset; + + fusion = instance->ctrl_context; + max_cmd = instance->max_fw_cmds; + sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE; + + fusion->sg_dma_pool = + dma_pool_create("mr_sg", &instance->pdev->dev, + instance->max_chain_frame_sz, + MR_DEFAULT_NVME_PAGE_SIZE, 0); + /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ + fusion->sense_dma_pool = + dma_pool_create("mr_sense", &instance->pdev->dev, + sense_sz, 64, 0); + + if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, + GFP_KERNEL, &fusion->sense_phys_addr); + if (!fusion->sense) { + dev_err(&instance->pdev->dev, + "failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + /* sense buffer, request frame and reply desc pool requires to be in + * same 4 gb region. Below function will check this. + * In case of failure, new pci pool will be created with updated + * alignment. + * Older allocation and pool will be destroyed. + * Alignment will be used such a way that next allocation if success, + * will always meet same 4gb region requirement. + * Actual requirement is not alignment, but we need start and end of + * DMA address must have same upper 32 bit address. + */ + + if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr, + sense_sz)) { + dma_pool_free(fusion->sense_dma_pool, fusion->sense, + fusion->sense_phys_addr); + fusion->sense = NULL; + dma_pool_destroy(fusion->sense_dma_pool); + + fusion->sense_dma_pool = + dma_pool_create("mr_sense_align", &instance->pdev->dev, + sense_sz, roundup_pow_of_two(sense_sz), + 0); + if (!fusion->sense_dma_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, + GFP_KERNEL, + &fusion->sense_phys_addr); + if (!fusion->sense) { + dev_err(&instance->pdev->dev, + "failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + /* + * Allocate and attach a frame to each of the commands in cmd_list + */ + for (i = 0; i < max_cmd; i++) { + cmd = fusion->cmd_list[i]; + cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool, + GFP_KERNEL, &cmd->sg_frame_phys_addr); + + offset = SCSI_SENSE_BUFFERSIZE * i; + cmd->sense = (u8 *)fusion->sense + offset; + cmd->sense_phys_addr = fusion->sense_phys_addr + offset; + + if (!cmd->sg_frame) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + /* create sense buffer for the raid 1/10 fp */ + for (i = max_cmd; i < instance->max_mpt_cmds; i++) { + cmd = fusion->cmd_list[i]; + offset = SCSI_SENSE_BUFFERSIZE * i; + cmd->sense = (u8 *)fusion->sense + offset; + cmd->sense_phys_addr = fusion->sense_phys_addr + offset; + + } + + return 0; +} + +static int +megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) +{ + u32 max_mpt_cmd, i, j; + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + max_mpt_cmd = instance->max_mpt_cmds; + + /* + * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. + * Allocate the dynamic array first and then allocate individual + * commands. + */ + fusion->cmd_list = + kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *), + GFP_KERNEL); + if (!fusion->cmd_list) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + for (i = 0; i < max_mpt_cmd; i++) { + fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), + GFP_KERNEL); + if (!fusion->cmd_list[i]) { + for (j = 0; j < i; j++) + kfree(fusion->cmd_list[j]); + kfree(fusion->cmd_list); + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + return 0; +} + +static int +megasas_alloc_request_fusion(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + +retry_alloc: + fusion->io_request_frames_pool = + dma_pool_create("mr_ioreq", &instance->pdev->dev, + fusion->io_frames_alloc_sz, 16, 0); + + if (!fusion->io_request_frames_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + fusion->io_request_frames = + dma_pool_alloc(fusion->io_request_frames_pool, + GFP_KERNEL | __GFP_NOWARN, + &fusion->io_request_frames_phys); + if (!fusion->io_request_frames) { + if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) { + instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT; + dma_pool_destroy(fusion->io_request_frames_pool); + megasas_configure_queue_sizes(instance); + goto retry_alloc; + } else { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + if (!megasas_check_same_4gb_region(instance, + fusion->io_request_frames_phys, + fusion->io_frames_alloc_sz)) { + dma_pool_free(fusion->io_request_frames_pool, + fusion->io_request_frames, + fusion->io_request_frames_phys); + fusion->io_request_frames = NULL; + dma_pool_destroy(fusion->io_request_frames_pool); + + fusion->io_request_frames_pool = + dma_pool_create("mr_ioreq_align", + &instance->pdev->dev, + fusion->io_frames_alloc_sz, + roundup_pow_of_two(fusion->io_frames_alloc_sz), + 0); + + if (!fusion->io_request_frames_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + fusion->io_request_frames = + dma_pool_alloc(fusion->io_request_frames_pool, + GFP_KERNEL | __GFP_NOWARN, + &fusion->io_request_frames_phys); + + if (!fusion->io_request_frames) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + fusion->req_frames_desc = + dma_alloc_coherent(&instance->pdev->dev, + fusion->request_alloc_sz, + &fusion->req_frames_desc_phys, GFP_KERNEL); + if (!fusion->req_frames_desc) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + return 0; +} + +static int +megasas_alloc_reply_fusion(struct megasas_instance *instance) +{ + int i, count; + struct fusion_context *fusion; + union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; + fusion = instance->ctrl_context; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + count += instance->iopoll_q_count; + + fusion->reply_frames_desc_pool = + dma_pool_create("mr_reply", &instance->pdev->dev, + fusion->reply_alloc_sz * count, 16, 0); + + if (!fusion->reply_frames_desc_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + fusion->reply_frames_desc[0] = + dma_pool_alloc(fusion->reply_frames_desc_pool, + GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); + if (!fusion->reply_frames_desc[0]) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + if (!megasas_check_same_4gb_region(instance, + fusion->reply_frames_desc_phys[0], + (fusion->reply_alloc_sz * count))) { + dma_pool_free(fusion->reply_frames_desc_pool, + fusion->reply_frames_desc[0], + fusion->reply_frames_desc_phys[0]); + fusion->reply_frames_desc[0] = NULL; + dma_pool_destroy(fusion->reply_frames_desc_pool); + + fusion->reply_frames_desc_pool = + dma_pool_create("mr_reply_align", + &instance->pdev->dev, + fusion->reply_alloc_sz * count, + roundup_pow_of_two(fusion->reply_alloc_sz * count), + 0); + + if (!fusion->reply_frames_desc_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + fusion->reply_frames_desc[0] = + dma_pool_alloc(fusion->reply_frames_desc_pool, + GFP_KERNEL, + &fusion->reply_frames_desc_phys[0]); + + if (!fusion->reply_frames_desc[0]) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + reply_desc = fusion->reply_frames_desc[0]; + for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) + reply_desc->Words = cpu_to_le64(ULLONG_MAX); + + /* This is not a rdpq mode, but driver still populate + * reply_frame_desc array to use same msix index in ISR path. + */ + for (i = 0; i < (count - 1); i++) + fusion->reply_frames_desc[i + 1] = + fusion->reply_frames_desc[i] + + (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION); + + return 0; +} + +static int +megasas_alloc_rdpq_fusion(struct megasas_instance *instance) +{ + int i, j, k, msix_count; + struct fusion_context *fusion; + union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; + union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT]; + dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT]; + u8 dma_alloc_count, abs_index; + u32 chunk_size, array_size, offset; + + fusion = instance->ctrl_context; + chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; + array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * + MAX_MSIX_QUEUES_FUSION; + + fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, + array_size, &fusion->rdpq_phys, + GFP_KERNEL); + if (!fusion->rdpq_virt) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + msix_count += instance->iopoll_q_count; + + fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", + &instance->pdev->dev, + chunk_size, 16, 0); + fusion->reply_frames_desc_pool_align = + dma_pool_create("mr_rdpq_align", + &instance->pdev->dev, + chunk_size, + roundup_pow_of_two(chunk_size), + 0); + + if (!fusion->reply_frames_desc_pool || + !fusion->reply_frames_desc_pool_align) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + +/* + * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and + * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be + * within 4GB boundary and also reply queues in a set must have same + * upper 32-bits in their memory address. so here driver is allocating the + * DMA'able memory for reply queues according. Driver uses limitation of + * VENTURA_SERIES to manage INVADER_SERIES as well. + */ + dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK); + + for (i = 0; i < dma_alloc_count; i++) { + rdpq_chunk_virt[i] = + dma_pool_alloc(fusion->reply_frames_desc_pool, + GFP_KERNEL, &rdpq_chunk_phys[i]); + if (!rdpq_chunk_virt[i]) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + /* reply desc pool requires to be in same 4 gb region. + * Below function will check this. + * In case of failure, new pci pool will be created with updated + * alignment. + * For RDPQ buffers, driver always allocate two separate pci pool. + * Alignment will be used such a way that next allocation if + * success, will always meet same 4gb region requirement. + * rdpq_tracker keep track of each buffer's physical, + * virtual address and pci pool descriptor. It will help driver + * while freeing the resources. + * + */ + if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i], + chunk_size)) { + dma_pool_free(fusion->reply_frames_desc_pool, + rdpq_chunk_virt[i], + rdpq_chunk_phys[i]); + + rdpq_chunk_virt[i] = + dma_pool_alloc(fusion->reply_frames_desc_pool_align, + GFP_KERNEL, &rdpq_chunk_phys[i]); + if (!rdpq_chunk_virt[i]) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", + __func__, __LINE__); + return -ENOMEM; + } + fusion->rdpq_tracker[i].dma_pool_ptr = + fusion->reply_frames_desc_pool_align; + } else { + fusion->rdpq_tracker[i].dma_pool_ptr = + fusion->reply_frames_desc_pool; + } + + fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i]; + fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i]; + } + + for (k = 0; k < dma_alloc_count; k++) { + for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) { + abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i; + + if (abs_index == msix_count) + break; + offset = fusion->reply_alloc_sz * i; + fusion->rdpq_virt[abs_index].RDPQBaseAddress = + cpu_to_le64(rdpq_chunk_phys[k] + offset); + fusion->reply_frames_desc_phys[abs_index] = + rdpq_chunk_phys[k] + offset; + fusion->reply_frames_desc[abs_index] = + (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset); + + reply_desc = fusion->reply_frames_desc[abs_index]; + for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) + reply_desc->Words = ULLONG_MAX; + } + } + + return 0; +} + +static void +megasas_free_rdpq_fusion(struct megasas_instance *instance) { + + int i; + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) { + if (fusion->rdpq_tracker[i].pool_entry_virt) + dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr, + fusion->rdpq_tracker[i].pool_entry_virt, + fusion->rdpq_tracker[i].pool_entry_phys); + + } + + dma_pool_destroy(fusion->reply_frames_desc_pool); + dma_pool_destroy(fusion->reply_frames_desc_pool_align); + + if (fusion->rdpq_virt) + dma_free_coherent(&instance->pdev->dev, + sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, + fusion->rdpq_virt, fusion->rdpq_phys); +} + +static void +megasas_free_reply_fusion(struct megasas_instance *instance) { + + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + if (fusion->reply_frames_desc[0]) + dma_pool_free(fusion->reply_frames_desc_pool, + fusion->reply_frames_desc[0], + fusion->reply_frames_desc_phys[0]); + + dma_pool_destroy(fusion->reply_frames_desc_pool); + +} + + +/** + * megasas_alloc_cmds_fusion - Allocates the command packets + * @instance: Adapter soft state + * + * + * Each frame has a 32-bit field called context. This context is used to get + * back the megasas_cmd_fusion from the frame when a frame gets completed + * In this driver, the 32 bit values are the indices into an array cmd_list. + * This array is used only to look up the megasas_cmd_fusion given the context. + * The free commands themselves are maintained in a linked list called cmd_pool. + * + * cmds are formed in the io_request and sg_frame members of the + * megasas_cmd_fusion. The context field is used to get a request descriptor + * and is used as SMID of the cmd. + * SMID value range is from 1 to max_fw_cmds. + */ +static int +megasas_alloc_cmds_fusion(struct megasas_instance *instance) +{ + int i; + struct fusion_context *fusion; + struct megasas_cmd_fusion *cmd; + u32 offset; + dma_addr_t io_req_base_phys; + u8 *io_req_base; + + + fusion = instance->ctrl_context; + + if (megasas_alloc_request_fusion(instance)) + goto fail_exit; + + if (instance->is_rdpq) { + if (megasas_alloc_rdpq_fusion(instance)) + goto fail_exit; + } else + if (megasas_alloc_reply_fusion(instance)) + goto fail_exit; + + if (megasas_alloc_cmdlist_fusion(instance)) + goto fail_exit; + + /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ + io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; + io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; + + /* + * Add all the commands to command pool (fusion->cmd_pool) + */ + + /* SMID 0 is reserved. Set SMID/index from 1 */ + for (i = 0; i < instance->max_mpt_cmds; i++) { + cmd = fusion->cmd_list[i]; + offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; + memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); + cmd->index = i + 1; + cmd->scmd = NULL; + cmd->sync_cmd_idx = + (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ? + (i - instance->max_scsi_cmds) : + (u32)ULONG_MAX; /* Set to Invalid */ + cmd->instance = instance; + cmd->io_request = + (struct MPI2_RAID_SCSI_IO_REQUEST *) + (io_req_base + offset); + memset(cmd->io_request, 0, + sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); + cmd->io_request_phys_addr = io_req_base_phys + offset; + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + } + + if (megasas_create_sg_sense_fusion(instance)) + goto fail_exit; + + return 0; + +fail_exit: + megasas_free_cmds_fusion(instance); + return -ENOMEM; +} + +/** + * wait_and_poll - Issues a polling command + * @instance: Adapter soft state + * @cmd: Command packet to be issued + * @seconds: Maximum poll time + * + * For polling, MFI requires the cmd_status to be set to 0xFF before posting. + */ +int +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, + int seconds) +{ + int i; + struct megasas_header *frame_hdr = &cmd->frame->hdr; + u32 status_reg; + + u32 msecs = seconds * 1000; + + /* + * Wait for cmd_status to change + */ + for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { + rmb(); + msleep(20); + if (!(i % 5000)) { + status_reg = instance->instancet->read_fw_status_reg(instance) + & MFI_STATE_MASK; + if (status_reg == MFI_STATE_FAULT) + break; + } + } + + if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS) + return DCMD_TIMEOUT; + else if (frame_hdr->cmd_status == MFI_STAT_OK) + return DCMD_SUCCESS; + else + return DCMD_FAILED; +} + +/** + * megasas_ioc_init_fusion - Initializes the FW + * @instance: Adapter soft state + * + * Issues the IOC Init cmd + */ +int +megasas_ioc_init_fusion(struct megasas_instance *instance) +{ + struct megasas_init_frame *init_frame; + struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL; + dma_addr_t ioc_init_handle; + struct megasas_cmd *cmd; + u8 ret, cur_rdpq_mode; + struct fusion_context *fusion; + union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; + int i; + struct megasas_header *frame_hdr; + const char *sys_info; + MFI_CAPABILITIES *drv_ops; + u32 scratch_pad_1; + ktime_t time; + bool cur_fw_64bit_dma_capable; + bool cur_intr_coalescing; + + fusion = instance->ctrl_context; + + ioc_init_handle = fusion->ioc_init_request_phys; + IOCInitMessage = fusion->ioc_init_request; + + cmd = fusion->ioc_init_cmd; + + scratch_pad_1 = megasas_readl + (instance, &instance->reg_set->outbound_scratch_pad_1); + + cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; + + if (instance->adapter_type == INVADER_SERIES) { + cur_fw_64bit_dma_capable = + (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false; + + if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) { + dev_err(&instance->pdev->dev, "Driver was operating on 64bit " + "DMA mask, but upcoming FW does not support 64bit DMA mask\n"); + megaraid_sas_kill_hba(instance); + ret = 1; + goto fail_fw_init; + } + } + + if (instance->is_rdpq && !cur_rdpq_mode) { + dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" + " from RDPQ mode to non RDPQ mode\n"); + ret = 1; + goto fail_fw_init; + } + + cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? + true : false; + + if ((instance->low_latency_index_start == + MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing) + instance->perf_mode = MR_BALANCED_PERF_MODE; + + dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n", + MEGASAS_PERF_MODE_2STR(instance->perf_mode), + instance->low_latency_index_start); + + instance->fw_sync_cache_support = (scratch_pad_1 & + MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; + dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", + instance->fw_sync_cache_support ? "Yes" : "No"); + + memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); + + IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; + IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; + IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); + IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); + IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); + + IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); + IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ? + cpu_to_le64(fusion->rdpq_phys) : + cpu_to_le64(fusion->reply_frames_desc_phys[0]); + IOCInitMessage->MsgFlags = instance->is_rdpq ? + MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; + IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); + IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr)); + IOCInitMessage->HostMSIxVectors = instance->msix_vectors + instance->iopoll_q_count; + IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; + + time = ktime_get_real(); + /* Convert to milliseconds as per FW requirement */ + IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time)); + + init_frame = (struct megasas_init_frame *)cmd->frame; + memset(init_frame, 0, IOC_INIT_FRAME_SIZE); + + frame_hdr = &cmd->frame->hdr; + frame_hdr->cmd_status = 0xFF; + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); + + init_frame->cmd = MFI_CMD_INIT; + init_frame->cmd_status = 0xFF; + + drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); + + /* driver support Extended MSIX */ + if (instance->adapter_type >= INVADER_SERIES) + drv_ops->mfi_capabilities.support_additional_msix = 1; + /* driver supports HA / Remote LUN over Fast Path interface */ + drv_ops->mfi_capabilities.support_fp_remote_lun = 1; + + drv_ops->mfi_capabilities.support_max_255lds = 1; + drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1; + drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1; + + if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) + drv_ops->mfi_capabilities.support_ext_io_size = 1; + + drv_ops->mfi_capabilities.support_fp_rlbypass = 1; + if (!dual_qdepth_disable) + drv_ops->mfi_capabilities.support_ext_queue_depth = 1; + + drv_ops->mfi_capabilities.support_qd_throttling = 1; + drv_ops->mfi_capabilities.support_pd_map_target_id = 1; + drv_ops->mfi_capabilities.support_nvme_passthru = 1; + drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1; + + if (reset_devices) + drv_ops->mfi_capabilities.support_memdump = 1; + + if (instance->consistent_mask_64bit) + drv_ops->mfi_capabilities.support_64bit_mode = 1; + + /* Convert capability to LE32 */ + cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); + + sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); + if (instance->system_info_buf && sys_info) { + memcpy(instance->system_info_buf->systemId, sys_info, + strlen(sys_info) > 64 ? 64 : strlen(sys_info)); + instance->system_info_buf->systemIdLength = + strlen(sys_info) > 64 ? 64 : strlen(sys_info); + init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h)); + init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h)); + } + + init_frame->queue_info_new_phys_addr_hi = + cpu_to_le32(upper_32_bits(ioc_init_handle)); + init_frame->queue_info_new_phys_addr_lo = + cpu_to_le32(lower_32_bits(ioc_init_handle)); + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); + + /* + * Each bit in replyqueue_mask represents one group of MSI-x vectors + * (each group has 8 vectors) + */ + switch (instance->perf_mode) { + case MR_BALANCED_PERF_MODE: + init_frame->replyqueue_mask = + cpu_to_le16(~(~0 << instance->low_latency_index_start/8)); + break; + case MR_IOPS_PERF_MODE: + init_frame->replyqueue_mask = + cpu_to_le16(~(~0 << instance->msix_vectors/8)); + break; + } + + + req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); + req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); + req_desc.MFAIo.RequestFlags = + (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + /* + * disable the intr before firing the init frame + */ + instance->instancet->disable_intr(instance); + + for (i = 0; i < (10 * 1000); i += 20) { + if (megasas_readl(instance, &instance->reg_set->doorbell) & 1) + msleep(20); + else + break; + } + + /* For AERO also, IOC_INIT requires 64 bit descriptor write */ + megasas_write_64bit_req_desc(instance, &req_desc); + + wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS); + + frame_hdr = &cmd->frame->hdr; + if (frame_hdr->cmd_status != 0) { + ret = 1; + goto fail_fw_init; + } + + if (instance->adapter_type >= AERO_SERIES) { + scratch_pad_1 = megasas_readl + (instance, &instance->reg_set->outbound_scratch_pad_1); + + instance->atomic_desc_support = + (scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; + + dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n", + instance->atomic_desc_support ? "Yes" : "No"); + } + + return 0; + +fail_fw_init: + dev_err(&instance->pdev->dev, + "Init cmd return status FAILED for SCSI host %d\n", + instance->host->host_no); + + return ret; +} + +/** + * megasas_sync_pd_seq_num - JBOD SEQ MAP + * @instance: Adapter soft state + * @pend: set to 1, if it is pended jbod map. + * + * Issue Jbod map to the firmware. If it is pended command, + * issue command and return. If it is first instance of jbod map + * issue and receive command. + */ +int +megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { + int ret = 0; + size_t pd_seq_map_sz; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct fusion_context *fusion = instance->ctrl_context; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + dma_addr_t pd_seq_h; + + pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)]; + pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)]; + pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES); + + cmd = megasas_get_cmd(instance); + if (!cmd) { + dev_err(&instance->pdev->dev, + "Could not get mfi cmd. Fail from %s %d\n", + __func__, __LINE__); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(pd_sync, 0, pd_seq_map_sz); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + if (pend) { + dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG; + dcmd->flags = MFI_FRAME_DIR_WRITE; + instance->jbod_seq_cmd = cmd; + } else { + dcmd->flags = MFI_FRAME_DIR_READ; + } + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz); + dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); + + megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz); + + if (pend) { + instance->instancet->issue_dcmd(instance, cmd); + return 0; + } + + /* Below code is only for non pended DCMD */ + if (!instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) { + dev_warn(&instance->pdev->dev, + "driver supports max %d JBOD, but FW reports %d\n", + MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count)); + ret = -EINVAL; + } + + if (ret == DCMD_TIMEOUT) + dev_warn(&instance->pdev->dev, + "%s DCMD timed out, continue without JBOD sequence map\n", + __func__); + + if (ret == DCMD_SUCCESS) + instance->pd_seq_map_id++; + + megasas_return_cmd(instance, cmd); + return ret; +} + +/* + * megasas_get_ld_map_info - Returns FW's ld_map structure + * @instance: Adapter soft state + * @pend: Pend the command or not + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO + * dcmd.mbox.b[0] - number of LDs being sync'd + * dcmd.mbox.b[1] - 0 - complete command immediately. + * - 1 - pend till config change + * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP + * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and + * uses extended struct MR_FW_RAID_MAP_EXT + */ +static int +megasas_get_ld_map_info(struct megasas_instance *instance) +{ + int ret = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + void *ci; + dma_addr_t ci_h = 0; + u32 size_map_info; + struct fusion_context *fusion; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); + return -ENOMEM; + } + + fusion = instance->ctrl_context; + + if (!fusion) { + megasas_return_cmd(instance, cmd); + return -ENXIO; + } + + dcmd = &cmd->frame->dcmd; + + size_map_info = fusion->current_map_sz; + + ci = (void *) fusion->ld_map[(instance->map_id & 1)]; + ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; + + if (!ci) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n"); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(ci, 0, fusion->max_map_sz); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(size_map_info); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); + + megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); + + if (!instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + if (ret == DCMD_TIMEOUT) + dev_warn(&instance->pdev->dev, + "%s DCMD timed out, RAID map is disabled\n", + __func__); + + megasas_return_cmd(instance, cmd); + + return ret; +} + +u8 +megasas_get_map_info(struct megasas_instance *instance) +{ + struct fusion_context *fusion = instance->ctrl_context; + + fusion->fast_path_io = 0; + if (!megasas_get_ld_map_info(instance)) { + if (MR_ValidateMapInfo(instance, instance->map_id)) { + fusion->fast_path_io = 1; + return 0; + } + } + return 1; +} + +/* + * megasas_sync_map_info - Returns FW's ld_map structure + * @instance: Adapter soft state + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +int +megasas_sync_map_info(struct megasas_instance *instance) +{ + int i; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + u16 num_lds; + struct fusion_context *fusion; + struct MR_LD_TARGET_SYNC *ci = NULL; + struct MR_DRV_RAID_MAP_ALL *map; + struct MR_LD_RAID *raid; + struct MR_LD_TARGET_SYNC *ld_sync; + dma_addr_t ci_h = 0; + u32 size_map_info; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); + return -ENOMEM; + } + + fusion = instance->ctrl_context; + + if (!fusion) { + megasas_return_cmd(instance, cmd); + return 1; + } + + map = fusion->ld_drv_map[instance->map_id & 1]; + + num_lds = le16_to_cpu(map->raidMap.ldCount); + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + ci = (struct MR_LD_TARGET_SYNC *) + fusion->ld_map[(instance->map_id - 1) & 1]; + memset(ci, 0, fusion->max_map_sz); + + ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; + + ld_sync = (struct MR_LD_TARGET_SYNC *)ci; + + for (i = 0; i < num_lds; i++, ld_sync++) { + raid = MR_LdRaidGet(i, map); + ld_sync->targetId = MR_GetLDTgtId(i, map); + ld_sync->seqNum = raid->seqNum; + } + + size_map_info = fusion->current_map_sz; + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_WRITE; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(size_map_info); + dcmd->mbox.b[0] = num_lds; + dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); + + megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); + + instance->map_update_cmd = cmd; + + instance->instancet->issue_dcmd(instance, cmd); + + return 0; +} + +/* + * meagasas_display_intel_branding - Display branding string + * @instance: per adapter object + * + * Return nothing. + */ +static void +megasas_display_intel_branding(struct megasas_instance *instance) +{ + if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_INVADER: + switch (instance->pdev->subsystem_device) { + case MEGARAID_INTEL_RS3DC080_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3DC080_BRANDING); + break; + case MEGARAID_INTEL_RS3DC040_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3DC040_BRANDING); + break; + case MEGARAID_INTEL_RS3SC008_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3SC008_BRANDING); + break; + case MEGARAID_INTEL_RS3MC044_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3MC044_BRANDING); + break; + default: + break; + } + break; + case PCI_DEVICE_ID_LSI_FURY: + switch (instance->pdev->subsystem_device) { + case MEGARAID_INTEL_RS3WC080_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3WC080_BRANDING); + break; + case MEGARAID_INTEL_RS3WC040_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3WC040_BRANDING); + break; + default: + break; + } + break; + case PCI_DEVICE_ID_LSI_CUTLASS_52: + case PCI_DEVICE_ID_LSI_CUTLASS_53: + switch (instance->pdev->subsystem_device) { + case MEGARAID_INTEL_RMS3BC160_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RMS3BC160_BRANDING); + break; + default: + break; + } + break; + default: + break; + } +} + +/** + * megasas_allocate_raid_maps - Allocate memory for RAID maps + * @instance: Adapter soft state + * + * return: if success: return 0 + * failed: return -ENOMEM + */ +static inline int megasas_allocate_raid_maps(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + int i = 0; + + fusion = instance->ctrl_context; + + fusion->drv_map_pages = get_order(fusion->drv_map_sz); + + for (i = 0; i < 2; i++) { + fusion->ld_map[i] = NULL; + + fusion->ld_drv_map[i] = (void *) + __get_free_pages(__GFP_ZERO | GFP_KERNEL, + fusion->drv_map_pages); + + if (!fusion->ld_drv_map[i]) { + fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz); + + if (!fusion->ld_drv_map[i]) { + dev_err(&instance->pdev->dev, + "Could not allocate memory for local map" + " size requested: %d\n", + fusion->drv_map_sz); + goto ld_drv_map_alloc_fail; + } + } + } + + for (i = 0; i < 2; i++) { + fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, + fusion->max_map_sz, + &fusion->ld_map_phys[i], + GFP_KERNEL); + if (!fusion->ld_map[i]) { + dev_err(&instance->pdev->dev, + "Could not allocate memory for map info %s:%d\n", + __func__, __LINE__); + goto ld_map_alloc_fail; + } + } + + return 0; + +ld_map_alloc_fail: + for (i = 0; i < 2; i++) { + if (fusion->ld_map[i]) + dma_free_coherent(&instance->pdev->dev, + fusion->max_map_sz, + fusion->ld_map[i], + fusion->ld_map_phys[i]); + } + +ld_drv_map_alloc_fail: + for (i = 0; i < 2; i++) { + if (fusion->ld_drv_map[i]) { + if (is_vmalloc_addr(fusion->ld_drv_map[i])) + vfree(fusion->ld_drv_map[i]); + else + free_pages((ulong)fusion->ld_drv_map[i], + fusion->drv_map_pages); + } + } + + return -ENOMEM; +} + +/** + * megasas_configure_queue_sizes - Calculate size of request desc queue, + * reply desc queue, + * IO request frame queue, set can_queue. + * @instance: Adapter soft state + * @return: void + */ +static inline +void megasas_configure_queue_sizes(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + u16 max_cmd; + + fusion = instance->ctrl_context; + max_cmd = instance->max_fw_cmds; + + if (instance->adapter_type >= VENTURA_SERIES) + instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS; + else + instance->max_mpt_cmds = instance->max_fw_cmds; + + instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds; + instance->cur_can_queue = instance->max_scsi_cmds; + instance->host->can_queue = instance->cur_can_queue; + + fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16; + + fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * + instance->max_mpt_cmds; + fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) * + (fusion->reply_q_depth); + fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + + (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */ +} + +static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + struct megasas_cmd *cmd; + + fusion = instance->ctrl_context; + + cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL); + + if (!cmd) { + dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", + __func__, __LINE__); + return -ENOMEM; + } + + cmd->frame = dma_alloc_coherent(&instance->pdev->dev, + IOC_INIT_FRAME_SIZE, + &cmd->frame_phys_addr, GFP_KERNEL); + + if (!cmd->frame) { + dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", + __func__, __LINE__); + kfree(cmd); + return -ENOMEM; + } + + fusion->ioc_init_cmd = cmd; + return 0; +} + +/** + * megasas_free_ioc_init_cmd - Free IOC INIT command frame + * @instance: Adapter soft state + */ +static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame) + dma_free_coherent(&instance->pdev->dev, + IOC_INIT_FRAME_SIZE, + fusion->ioc_init_cmd->frame, + fusion->ioc_init_cmd->frame_phys_addr); + + kfree(fusion->ioc_init_cmd); +} + +/** + * megasas_init_adapter_fusion - Initializes the FW + * @instance: Adapter soft state + * + * This is the main function for initializing firmware. + */ +static u32 +megasas_init_adapter_fusion(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + u32 scratch_pad_1; + int i = 0, count; + u32 status_reg; + + fusion = instance->ctrl_context; + + megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); + + /* + * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames + */ + instance->max_mfi_cmds = + MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; + + megasas_configure_queue_sizes(instance); + + scratch_pad_1 = megasas_readl(instance, + &instance->reg_set->outbound_scratch_pad_1); + /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, + * Firmware support extended IO chain frame which is 4 times more than + * legacy Firmware. + * Legacy Firmware - Frame size is (8 * 128) = 1K + * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K + */ + if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) + instance->max_chain_frame_sz = + ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> + MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; + else + instance->max_chain_frame_sz = + ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> + MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO; + + if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) { + dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n", + instance->max_chain_frame_sz, + MEGASAS_CHAIN_FRAME_SZ_MIN); + instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN; + } + + fusion->max_sge_in_main_msg = + (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; + + fusion->max_sge_in_chain = + instance->max_chain_frame_sz + / sizeof(union MPI2_SGE_IO_UNION); + + instance->max_num_sge = + rounddown_pow_of_two(fusion->max_sge_in_main_msg + + fusion->max_sge_in_chain - 2); + + /* Used for pass thru MFI frame (DCMD) */ + fusion->chain_offset_mfi_pthru = + offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; + + fusion->chain_offset_io_request = + (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - + sizeof(union MPI2_SGE_IO_UNION))/16; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + count += instance->iopoll_q_count; + + for (i = 0 ; i < count; i++) + fusion->last_reply_idx[i] = 0; + + /* + * For fusion adapters, 3 commands for IOCTL and 8 commands + * for driver's internal DCMDs. + */ + instance->max_scsi_cmds = instance->max_fw_cmds - + (MEGASAS_FUSION_INTERNAL_CMDS + + MEGASAS_FUSION_IOCTL_CMDS); + sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); + + for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) + atomic_set(&fusion->busy_mq_poll[i], 0); + + if (megasas_alloc_ioc_init_frame(instance)) + return 1; + + /* + * Allocate memory for descriptors + * Create a pool of commands + */ + if (megasas_alloc_cmds(instance)) + goto fail_alloc_mfi_cmds; + if (megasas_alloc_cmds_fusion(instance)) + goto fail_alloc_cmds; + + if (megasas_ioc_init_fusion(instance)) { + status_reg = instance->instancet->read_fw_status_reg(instance); + if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) && + (status_reg & MFI_RESET_ADAPTER)) { + /* Do a chip reset and then retry IOC INIT once */ + if (megasas_adp_reset_wait_for_ready + (instance, true, 0) == FAILED) + goto fail_ioc_init; + + if (megasas_ioc_init_fusion(instance)) + goto fail_ioc_init; + } else { + goto fail_ioc_init; + } + } + + megasas_display_intel_branding(instance); + if (megasas_get_ctrl_info(instance)) { + dev_err(&instance->pdev->dev, + "Could not get controller info. Fail from %s %d\n", + __func__, __LINE__); + goto fail_ioc_init; + } + + instance->flag_ieee = 1; + instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; + instance->threshold_reply_count = instance->max_fw_cmds / 4; + fusion->fast_path_io = 0; + + if (megasas_allocate_raid_maps(instance)) + goto fail_ioc_init; + + if (!megasas_get_map_info(instance)) + megasas_sync_map_info(instance); + + return 0; + +fail_ioc_init: + megasas_free_cmds_fusion(instance); +fail_alloc_cmds: + megasas_free_cmds(instance); +fail_alloc_mfi_cmds: + megasas_free_ioc_init_cmd(instance); + return 1; +} + +/** + * megasas_fault_detect_work - Worker function of + * FW fault handling workqueue. + * @work: FW fault work struct + */ +static void +megasas_fault_detect_work(struct work_struct *work) +{ + struct megasas_instance *instance = + container_of(work, struct megasas_instance, + fw_fault_work.work); + u32 fw_state, dma_state, status; + + /* Check the fw state */ + fw_state = instance->instancet->read_fw_status_reg(instance) & + MFI_STATE_MASK; + + if (fw_state == MFI_STATE_FAULT) { + dma_state = instance->instancet->read_fw_status_reg(instance) & + MFI_STATE_DMADONE; + /* Start collecting crash, if DMA bit is done */ + if (instance->crash_dump_drv_support && + instance->crash_dump_app_support && dma_state) { + megasas_fusion_crash_dump(instance); + } else { + if (instance->unload == 0) { + status = megasas_reset_fusion(instance->host, 0); + if (status != SUCCESS) { + dev_err(&instance->pdev->dev, + "Failed from %s %d, do not re-arm timer\n", + __func__, __LINE__); + return; + } + } + } + } + + if (instance->fw_fault_work_q) + queue_delayed_work(instance->fw_fault_work_q, + &instance->fw_fault_work, + msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); +} + +int +megasas_fusion_start_watchdog(struct megasas_instance *instance) +{ + /* Check if the Fault WQ is already started */ + if (instance->fw_fault_work_q) + return SUCCESS; + + INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work); + + snprintf(instance->fault_handler_work_q_name, + sizeof(instance->fault_handler_work_q_name), + "poll_megasas%d_status", instance->host->host_no); + + instance->fw_fault_work_q = + create_singlethread_workqueue(instance->fault_handler_work_q_name); + if (!instance->fw_fault_work_q) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return FAILED; + } + + queue_delayed_work(instance->fw_fault_work_q, + &instance->fw_fault_work, + msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); + + return SUCCESS; +} + +void +megasas_fusion_stop_watchdog(struct megasas_instance *instance) +{ + struct workqueue_struct *wq; + + if (instance->fw_fault_work_q) { + wq = instance->fw_fault_work_q; + instance->fw_fault_work_q = NULL; + if (!cancel_delayed_work_sync(&instance->fw_fault_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +/** + * map_cmd_status - Maps FW cmd status to OS cmd status + * @fusion: fusion context + * @scmd: Pointer to cmd + * @status: status of cmd returned by FW + * @ext_status: ext status of cmd returned by FW + * @data_length: command data length + * @sense: command sense data + */ +static void +map_cmd_status(struct fusion_context *fusion, + struct scsi_cmnd *scmd, u8 status, u8 ext_status, + u32 data_length, u8 *sense) +{ + u8 cmd_type; + int resid; + + cmd_type = megasas_cmd_type(scmd); + switch (status) { + + case MFI_STAT_OK: + scmd->result = DID_OK << 16; + break; + + case MFI_STAT_SCSI_IO_FAILED: + case MFI_STAT_LD_INIT_IN_PROGRESS: + scmd->result = (DID_ERROR << 16) | ext_status; + break; + + case MFI_STAT_SCSI_DONE_WITH_ERROR: + + scmd->result = (DID_OK << 16) | ext_status; + if (ext_status == SAM_STAT_CHECK_CONDITION) { + memcpy(scmd->sense_buffer, sense, + SCSI_SENSE_BUFFERSIZE); + } + + /* + * If the IO request is partially completed, then MR FW will + * update "io_request->DataLength" field with actual number of + * bytes transferred.Driver will set residual bytes count in + * SCSI command structure. + */ + resid = (scsi_bufflen(scmd) - data_length); + scsi_set_resid(scmd, resid); + + if (resid && + ((cmd_type == READ_WRITE_LDIO) || + (cmd_type == READ_WRITE_SYSPDIO))) + scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" + " requested/completed 0x%x/0x%x\n", + status, scsi_bufflen(scmd), data_length); + break; + + case MFI_STAT_LD_OFFLINE: + case MFI_STAT_DEVICE_NOT_FOUND: + scmd->result = DID_BAD_TARGET << 16; + break; + case MFI_STAT_CONFIG_SEQ_MISMATCH: + scmd->result = DID_IMM_RETRY << 16; + break; + default: + scmd->result = DID_ERROR << 16; + break; + } +} + +/** + * megasas_is_prp_possible - + * Checks if native NVMe PRPs can be built for the IO + * + * @instance: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sge_count: scatter gather element count. + * + * Returns: true: PRPs can be built + * false: IEEE SGLs needs to be built + */ +static bool +megasas_is_prp_possible(struct megasas_instance *instance, + struct scsi_cmnd *scmd, int sge_count) +{ + u32 data_length = 0; + struct scatterlist *sg_scmd; + bool build_prp = false; + u32 mr_nvme_pg_size; + + mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); + data_length = scsi_bufflen(scmd); + sg_scmd = scsi_sglist(scmd); + + /* + * NVMe uses one PRP for each page (or part of a page) + * look at the data length - if 4 pages or less then IEEE is OK + * if > 5 pages then we need to build a native SGL + * if > 4 and <= 5 pages, then check physical address of 1st SG entry + * if this first size in the page is >= the residual beyond 4 pages + * then use IEEE, otherwise use native SGL + */ + + if (data_length > (mr_nvme_pg_size * 5)) { + build_prp = true; + } else if ((data_length > (mr_nvme_pg_size * 4)) && + (data_length <= (mr_nvme_pg_size * 5))) { + /* check if 1st SG entry size is < residual beyond 4 pages */ + if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4))) + build_prp = true; + } + + return build_prp; +} + +/** + * megasas_make_prp_nvme - + * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only + * + * @instance: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sgl_ptr: SGL to be filled in + * @cmd: Fusion command frame + * @sge_count: scatter gather element count. + * + * Returns: true: PRPs are built + * false: IEEE SGLs needs to be built + */ +static bool +megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, + struct megasas_cmd_fusion *cmd, int sge_count) +{ + int sge_len, offset, num_prp_in_chain = 0; + struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; + u64 *ptr_sgl; + dma_addr_t ptr_sgl_phys; + u64 sge_addr; + u32 page_mask, page_mask_result; + struct scatterlist *sg_scmd; + u32 first_prp_len; + bool build_prp = false; + int data_len = scsi_bufflen(scmd); + u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); + + build_prp = megasas_is_prp_possible(instance, scmd, sge_count); + + if (!build_prp) + return false; + + /* + * Nvme has a very convoluted prp format. One prp is required + * for each page or partial page. Driver need to split up OS sg_list + * entries if it is longer than one page or cross a page + * boundary. Driver also have to insert a PRP list pointer entry as + * the last entry in each physical page of the PRP list. + * + * NOTE: The first PRP "entry" is actually placed in the first + * SGL entry in the main message as IEEE 64 format. The 2nd + * entry in the main message is the chain element, and the rest + * of the PRP entries are built in the contiguous pcie buffer. + */ + page_mask = mr_nvme_pg_size - 1; + ptr_sgl = (u64 *)cmd->sg_frame; + ptr_sgl_phys = cmd->sg_frame_phys_addr; + memset(ptr_sgl, 0, instance->max_chain_frame_sz); + + /* Build chain frame element which holds all prps except first*/ + main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) + ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); + + main_chain_element->Address = cpu_to_le64(ptr_sgl_phys); + main_chain_element->NextChainOffset = 0; + main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | + IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; + + /* Build first prp, sge need not to be page aligned*/ + ptr_first_sgl = sgl_ptr; + sg_scmd = scsi_sglist(scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + + offset = (u32)(sge_addr & page_mask); + first_prp_len = mr_nvme_pg_size - offset; + + ptr_first_sgl->Address = cpu_to_le64(sge_addr); + ptr_first_sgl->Length = cpu_to_le32(first_prp_len); + + data_len -= first_prp_len; + + if (sge_len > first_prp_len) { + sge_addr += first_prp_len; + sge_len -= first_prp_len; + } else if (sge_len == first_prp_len) { + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + for (;;) { + offset = (u32)(sge_addr & page_mask); + + /* Put PRP pointer due to page boundary*/ + page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; + if (unlikely(!page_mask_result)) { + scmd_printk(KERN_NOTICE, + scmd, "page boundary ptr_sgl: 0x%p\n", + ptr_sgl); + ptr_sgl_phys += 8; + *ptr_sgl = cpu_to_le64(ptr_sgl_phys); + ptr_sgl++; + num_prp_in_chain++; + } + + *ptr_sgl = cpu_to_le64(sge_addr); + ptr_sgl++; + ptr_sgl_phys += 8; + num_prp_in_chain++; + + sge_addr += mr_nvme_pg_size; + sge_len -= mr_nvme_pg_size; + data_len -= mr_nvme_pg_size; + + if (data_len <= 0) + break; + + if (sge_len > 0) + continue; + + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + main_chain_element->Length = + cpu_to_le32(num_prp_in_chain * sizeof(u64)); + + return build_prp; +} + +/** + * megasas_make_sgl_fusion - Prepares 32-bit SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @sgl_ptr: SGL to be filled in + * @cmd: cmd we are working on + * @sge_count: sge count + * + */ +static void +megasas_make_sgl_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scp, + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, + struct megasas_cmd_fusion *cmd, int sge_count) +{ + int i, sg_processed; + struct scatterlist *os_sgl; + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + if (instance->adapter_type >= INVADER_SERIES) { + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; + sgl_ptr_end += fusion->max_sge_in_main_msg - 1; + sgl_ptr_end->Flags = 0; + } + + scsi_for_each_sg(scp, os_sgl, sge_count, i) { + sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); + sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); + sgl_ptr->Flags = 0; + if (instance->adapter_type >= INVADER_SERIES) + if (i == sge_count - 1) + sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; + sgl_ptr++; + sg_processed = i + 1; + + if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && + (sge_count > fusion->max_sge_in_main_msg)) { + + struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; + if (instance->adapter_type >= INVADER_SERIES) { + if ((le16_to_cpu(cmd->io_request->IoFlags) & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) + cmd->io_request->ChainOffset = + fusion-> + chain_offset_io_request; + else + cmd->io_request->ChainOffset = 0; + } else + cmd->io_request->ChainOffset = + fusion->chain_offset_io_request; + + sg_chain = sgl_ptr; + /* Prepare chain element */ + sg_chain->NextChainOffset = 0; + if (instance->adapter_type >= INVADER_SERIES) + sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; + else + sg_chain->Flags = + (IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); + sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); + sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); + + sgl_ptr = + (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; + memset(sgl_ptr, 0, instance->max_chain_frame_sz); + } + } +} + +/** + * megasas_make_sgl - Build Scatter Gather List(SGLs) + * @scp: SCSI command pointer + * @instance: Soft instance of controller + * @cmd: Fusion command pointer + * + * This function will build sgls based on device type. + * For nvme drives, there is different way of building sgls in nvme native + * format- PRPs(Physical Region Page). + * + * Returns the number of sg lists actually used, zero if the sg lists + * is NULL, or -ENOMEM if the mapping failed + */ +static +int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd_fusion *cmd) +{ + int sge_count; + bool build_prp = false; + struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64; + + sge_count = scsi_dma_map(scp); + + if ((sge_count > instance->max_num_sge) || (sge_count <= 0)) + return sge_count; + + sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL; + if ((le16_to_cpu(cmd->io_request->IoFlags) & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && + (cmd->pd_interface == NVME_PD)) + build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64, + cmd, sge_count); + + if (!build_prp) + megasas_make_sgl_fusion(instance, scp, sgl_chain64, + cmd, sge_count); + + return sge_count; +} + +/** + * megasas_set_pd_lba - Sets PD LBA + * @io_request: IO request + * @cdb_len: cdb length + * @io_info: IO information + * @scp: SCSI command + * @local_map_ptr: Raid map + * @ref_tag: Primary reference tag + * + * Used to set the PD LBA in CDB for FP IOs + */ +static void +megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, + struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, + struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) +{ + struct MR_LD_RAID *raid; + u16 ld; + u64 start_blk = io_info->pdBlock; + u8 *cdb = io_request->CDB.CDB32; + u32 num_blocks = io_info->numBlocks; + u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; + + /* Check if T10 PI (DIF) is enabled for this LD */ + ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); + raid = MR_LdRaidGet(ld, local_map_ptr); + if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { + memset(cdb, 0, sizeof(io_request->CDB.CDB32)); + cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; + cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; + + if (scp->sc_data_direction == DMA_FROM_DEVICE) + cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; + else + cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; + cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; + + /* LBA */ + cdb[12] = (u8)((start_blk >> 56) & 0xff); + cdb[13] = (u8)((start_blk >> 48) & 0xff); + cdb[14] = (u8)((start_blk >> 40) & 0xff); + cdb[15] = (u8)((start_blk >> 32) & 0xff); + cdb[16] = (u8)((start_blk >> 24) & 0xff); + cdb[17] = (u8)((start_blk >> 16) & 0xff); + cdb[18] = (u8)((start_blk >> 8) & 0xff); + cdb[19] = (u8)(start_blk & 0xff); + + /* Logical block reference tag */ + io_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(ref_tag); + io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); + io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ + + /* Transfer length */ + cdb[28] = (u8)((num_blocks >> 24) & 0xff); + cdb[29] = (u8)((num_blocks >> 16) & 0xff); + cdb[30] = (u8)((num_blocks >> 8) & 0xff); + cdb[31] = (u8)(num_blocks & 0xff); + + /* set SCSI IO EEDPFlags */ + if (scp->sc_data_direction == DMA_FROM_DEVICE) { + io_request->EEDPFlags = cpu_to_le16( + MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | + MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | + MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); + } else { + io_request->EEDPFlags = cpu_to_le16( + MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); + } + io_request->Control |= cpu_to_le32((0x4 << 26)); + io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); + } else { + /* Some drives don't support 16/12 byte CDB's, convert to 10 */ + if (((cdb_len == 12) || (cdb_len == 16)) && + (start_blk <= 0xffffffff)) { + if (cdb_len == 16) { + opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; + flagvals = cdb[1]; + groupnum = cdb[14]; + control = cdb[15]; + } else { + opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; + flagvals = cdb[1]; + groupnum = cdb[10]; + control = cdb[11]; + } + + memset(cdb, 0, sizeof(io_request->CDB.CDB32)); + + cdb[0] = opcode; + cdb[1] = flagvals; + cdb[6] = groupnum; + cdb[9] = control; + + /* Transfer length */ + cdb[8] = (u8)(num_blocks & 0xff); + cdb[7] = (u8)((num_blocks >> 8) & 0xff); + + io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ + cdb_len = 10; + } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { + /* Convert to 16 byte CDB for large LBA's */ + switch (cdb_len) { + case 6: + opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; + control = cdb[5]; + break; + case 10: + opcode = + cdb[0] == READ_10 ? READ_16 : WRITE_16; + flagvals = cdb[1]; + groupnum = cdb[6]; + control = cdb[9]; + break; + case 12: + opcode = + cdb[0] == READ_12 ? READ_16 : WRITE_16; + flagvals = cdb[1]; + groupnum = cdb[10]; + control = cdb[11]; + break; + } + + memset(cdb, 0, sizeof(io_request->CDB.CDB32)); + + cdb[0] = opcode; + cdb[1] = flagvals; + cdb[14] = groupnum; + cdb[15] = control; + + /* Transfer length */ + cdb[13] = (u8)(num_blocks & 0xff); + cdb[12] = (u8)((num_blocks >> 8) & 0xff); + cdb[11] = (u8)((num_blocks >> 16) & 0xff); + cdb[10] = (u8)((num_blocks >> 24) & 0xff); + + io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ + cdb_len = 16; + } + + /* Normal case, just load LBA here */ + switch (cdb_len) { + case 6: + { + u8 val = cdb[1] & 0xE0; + cdb[3] = (u8)(start_blk & 0xff); + cdb[2] = (u8)((start_blk >> 8) & 0xff); + cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); + break; + } + case 10: + cdb[5] = (u8)(start_blk & 0xff); + cdb[4] = (u8)((start_blk >> 8) & 0xff); + cdb[3] = (u8)((start_blk >> 16) & 0xff); + cdb[2] = (u8)((start_blk >> 24) & 0xff); + break; + case 12: + cdb[5] = (u8)(start_blk & 0xff); + cdb[4] = (u8)((start_blk >> 8) & 0xff); + cdb[3] = (u8)((start_blk >> 16) & 0xff); + cdb[2] = (u8)((start_blk >> 24) & 0xff); + break; + case 16: + cdb[9] = (u8)(start_blk & 0xff); + cdb[8] = (u8)((start_blk >> 8) & 0xff); + cdb[7] = (u8)((start_blk >> 16) & 0xff); + cdb[6] = (u8)((start_blk >> 24) & 0xff); + cdb[5] = (u8)((start_blk >> 32) & 0xff); + cdb[4] = (u8)((start_blk >> 40) & 0xff); + cdb[3] = (u8)((start_blk >> 48) & 0xff); + cdb[2] = (u8)((start_blk >> 56) & 0xff); + break; + } + } +} + +/** + * megasas_stream_detect - stream detection on read and and write IOs + * @instance: Adapter soft state + * @cmd: Command to be prepared + * @io_info: IO Request info + * + */ + +/** stream detection on read and and write IOs */ +static void megasas_stream_detect(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct IO_REQUEST_INFO *io_info) +{ + struct fusion_context *fusion = instance->ctrl_context; + u32 device_id = io_info->ldTgtId; + struct LD_STREAM_DETECT *current_ld_sd + = fusion->stream_detect_by_ld[device_id]; + u32 *track_stream = ¤t_ld_sd->mru_bit_map, stream_num; + u32 shifted_values, unshifted_values; + u32 index_value_mask, shifted_values_mask; + int i; + bool is_read_ahead = false; + struct STREAM_DETECT *current_sd; + /* find possible stream */ + for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { + stream_num = (*track_stream >> + (i * BITS_PER_INDEX_STREAM)) & + STREAM_MASK; + current_sd = ¤t_ld_sd->stream_track[stream_num]; + /* if we found a stream, update the raid + * context and also update the mruBitMap + */ + /* boundary condition */ + if ((current_sd->next_seq_lba) && + (io_info->ldStartBlock >= current_sd->next_seq_lba) && + (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) && + (current_sd->is_read == io_info->isRead)) { + + if ((io_info->ldStartBlock != current_sd->next_seq_lba) && + ((!io_info->isRead) || (!is_read_ahead))) + /* + * Once the API is available we need to change this. + * At this point we are not allowing any gap + */ + continue; + + SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); + current_sd->next_seq_lba = + io_info->ldStartBlock + io_info->numBlocks; + /* + * update the mruBitMap LRU + */ + shifted_values_mask = + (1 << i * BITS_PER_INDEX_STREAM) - 1; + shifted_values = ((*track_stream & shifted_values_mask) + << BITS_PER_INDEX_STREAM); + index_value_mask = + STREAM_MASK << i * BITS_PER_INDEX_STREAM; + unshifted_values = + *track_stream & ~(shifted_values_mask | + index_value_mask); + *track_stream = + unshifted_values | shifted_values | stream_num; + return; + } + } + /* + * if we did not find any stream, create a new one + * from the least recently used + */ + stream_num = (*track_stream >> + ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & + STREAM_MASK; + current_sd = ¤t_ld_sd->stream_track[stream_num]; + current_sd->is_read = io_info->isRead; + current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; + *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); + return; +} + +/** + * megasas_set_raidflag_cpu_affinity - This function sets the cpu + * affinity (cpu of the controller) and raid_flags in the raid context + * based on IO type. + * + * @fusion: Fusion context + * @praid_context: IO RAID context + * @raid: LD raid map + * @fp_possible: Is fast path possible? + * @is_read: Is read IO? + * @scsi_buff_len: SCSI command buffer length + * + */ +static void +megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion, + union RAID_CONTEXT_UNION *praid_context, + struct MR_LD_RAID *raid, bool fp_possible, + u8 is_read, u32 scsi_buff_len) +{ + u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; + struct RAID_CONTEXT_G35 *rctx_g35; + + rctx_g35 = &praid_context->raid_context_g35; + if (fp_possible) { + if (is_read) { + if ((raid->cpuAffinity.pdRead.cpu0) && + (raid->cpuAffinity.pdRead.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdRead.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + } else { + if ((raid->cpuAffinity.pdWrite.cpu0) && + (raid->cpuAffinity.pdWrite.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdWrite.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + /* Fast path cache by pass capable R0/R1 VD */ + if ((raid->level <= 1) && + (raid->capability.fp_cache_bypass_capable)) { + rctx_g35->routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT); + rctx_g35->raid_flags = + (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + } + } + } else { + if (is_read) { + if ((raid->cpuAffinity.ldRead.cpu0) && + (raid->cpuAffinity.ldRead.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldRead.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + } else { + if ((raid->cpuAffinity.ldWrite.cpu0) && + (raid->cpuAffinity.ldWrite.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldWrite.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + + if (is_stream_detected(rctx_g35) && + ((raid->level == 5) || (raid->level == 6)) && + (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && + (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) + cpu_sel = MR_RAID_CTX_CPUSEL_0; + } + } + + rctx_g35->routing_flags |= + (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); + + /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT + * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. + * IO Subtype is not bitmap. + */ + if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) && + (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) { + praid_context->raid_context_g35.raid_flags = + (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + } +} + +/** + * megasas_build_ldio_fusion - Prepares IOs to devices + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to be prepared + * + * Prepares the io_request and chain elements (sg_frame) for IO + * The IO can be for PD (Fast Path) or LD + */ +static void +megasas_build_ldio_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scp, + struct megasas_cmd_fusion *cmd) +{ + bool fp_possible; + u16 ld; + u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; + u32 scsi_buff_len; + struct MPI2_RAID_SCSI_IO_REQUEST *io_request; + struct IO_REQUEST_INFO io_info; + struct fusion_context *fusion; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; + u8 *raidLUN; + unsigned long spinlock_flags; + struct MR_LD_RAID *raid = NULL; + struct MR_PRIV_DEVICE *mrdev_priv; + struct RAID_CONTEXT *rctx; + struct RAID_CONTEXT_G35 *rctx_g35; + + device_id = MEGASAS_DEV_INDEX(scp); + + fusion = instance->ctrl_context; + + io_request = cmd->io_request; + rctx = &io_request->RaidContext.raid_context; + rctx_g35 = &io_request->RaidContext.raid_context_g35; + + rctx->virtual_disk_tgt_id = cpu_to_le16(device_id); + rctx->status = 0; + rctx->ex_status = 0; + + start_lba_lo = 0; + start_lba_hi = 0; + fp_possible = false; + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if (scp->cmd_len == 6) { + datalength = (u32) scp->cmnd[4]; + start_lba_lo = ((u32) scp->cmnd[1] << 16) | + ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; + + start_lba_lo &= 0x1FFFFF; + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + else if (scp->cmd_len == 10) { + datalength = (u32) scp->cmnd[8] | + ((u32) scp->cmnd[7] << 8); + start_lba_lo = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + else if (scp->cmd_len == 12) { + datalength = ((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + start_lba_lo = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + } + + /* + * 16-byte READ(0x88) or WRITE(0x8A) cdb + */ + else if (scp->cmd_len == 16) { + datalength = ((u32) scp->cmnd[10] << 24) | + ((u32) scp->cmnd[11] << 16) | + ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; + start_lba_lo = ((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + + start_lba_hi = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + } + + memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); + io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; + io_info.numBlocks = datalength; + io_info.ldTgtId = device_id; + io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + scsi_buff_len = scsi_bufflen(scp); + io_request->DataLength = cpu_to_le32(scsi_buff_len); + io_info.data_arms = 1; + + if (scp->sc_data_direction == DMA_FROM_DEVICE) + io_info.isRead = 1; + + local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + + if (ld < instance->fw_supported_vd_count) + raid = MR_LdRaidGet(ld, local_map_ptr); + + if (!raid || (!fusion->fast_path_io)) { + rctx->reg_lock_flags = 0; + fp_possible = false; + } else { + if (MR_BuildRaidContext(instance, &io_info, rctx, + local_map_ptr, &raidLUN)) + fp_possible = (io_info.fpOkForIo > 0) ? true : false; + } + + megasas_get_msix_index(instance, scp, cmd, io_info.data_arms); + + if (instance->adapter_type >= VENTURA_SERIES) { + /* FP for Optimal raid level 1. + * All large RAID-1 writes (> 32 KiB, both WT and WB modes) + * are built by the driver as LD I/Os. + * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os + * (there is never a reason to process these as buffered writes) + * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os + * with the SLD bit asserted. + */ + if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { + mrdev_priv = scp->device->hostdata; + + if (atomic_inc_return(&instance->fw_outstanding) > + (instance->host->can_queue)) { + fp_possible = false; + atomic_dec(&instance->fw_outstanding); + } else if (fusion->pcie_bw_limitation && + ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || + (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) { + fp_possible = false; + atomic_dec(&instance->fw_outstanding); + if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) + atomic_set(&mrdev_priv->r1_ldio_hint, + instance->r1_ldio_hint_default); + } + } + + if (!fp_possible || + (io_info.isRead && io_info.ra_capable)) { + spin_lock_irqsave(&instance->stream_lock, + spinlock_flags); + megasas_stream_detect(instance, cmd, &io_info); + spin_unlock_irqrestore(&instance->stream_lock, + spinlock_flags); + /* In ventura if stream detected for a read and it is + * read ahead capable make this IO as LDIO + */ + if (is_stream_detected(rctx_g35)) + fp_possible = false; + } + + /* If raid is NULL, set CPU affinity to default CPU0 */ + if (raid) + megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext, + raid, fp_possible, io_info.isRead, + scsi_buff_len); + else + rctx_g35->routing_flags |= + (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); + } + + if (fp_possible) { + megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, + local_map_ptr, start_lba_lo); + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_FP_IO + << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + if (instance->adapter_type == INVADER_SERIES) { + rctx->type = MPI2_TYPE_CUDA; + rctx->nseg = 0x1; + io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); + rctx->reg_lock_flags |= + (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | + MR_RL_FLAGS_SEQ_NUM_ENABLE); + } else if (instance->adapter_type >= VENTURA_SERIES) { + rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); + rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); + rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + io_request->IoFlags |= + cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); + } + if (fusion->load_balance_info && + (fusion->load_balance_info[device_id].loadBalanceFlag) && + (io_info.isRead)) { + io_info.devHandle = + get_updated_dev_handle(instance, + &fusion->load_balance_info[device_id], + &io_info, local_map_ptr); + megasas_priv(scp)->status |= MEGASAS_LOAD_BALANCE_FLAG; + cmd->pd_r1_lb = io_info.pd_after_lb; + if (instance->adapter_type >= VENTURA_SERIES) + rctx_g35->span_arm = io_info.span_arm; + else + rctx->span_arm = io_info.span_arm; + + } else + megasas_priv(scp)->status &= ~MEGASAS_LOAD_BALANCE_FLAG; + + if (instance->adapter_type >= VENTURA_SERIES) + cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; + else + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + + if ((raidLUN[0] == 1) && + (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { + instance->dev_handle = !(instance->dev_handle); + io_info.devHandle = + local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; + } + + cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; + io_request->DevHandle = io_info.devHandle; + cmd->pd_interface = io_info.pd_interface; + /* populate the LUN field */ + memcpy(io_request->LUN, raidLUN, 8); + } else { + rctx->timeout_value = + cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); + cmd->request_desc->SCSIIO.RequestFlags = + (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO + << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + if (instance->adapter_type == INVADER_SERIES) { + if (io_info.do_fp_rlbypass || + (rctx->reg_lock_flags == REGION_TYPE_UNUSED)) + cmd->request_desc->SCSIIO.RequestFlags = + (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + rctx->type = MPI2_TYPE_CUDA; + rctx->reg_lock_flags |= + (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | + MR_RL_FLAGS_SEQ_NUM_ENABLE); + rctx->nseg = 0x1; + } else if (instance->adapter_type >= VENTURA_SERIES) { + rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); + rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); + } + io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; + io_request->DevHandle = cpu_to_le16(device_id); + + } /* Not FP */ +} + +/** + * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk + * @instance: Adapter soft state + * @scmd: SCSI command + * @cmd: Command to be prepared + * + * Prepares the io_request frame for non-rw io cmds for vd. + */ +static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) +{ + u32 device_id; + struct MPI2_RAID_SCSI_IO_REQUEST *io_request; + u16 ld; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; + struct fusion_context *fusion = instance->ctrl_context; + u8 span, physArm; + __le16 devHandle; + u32 arRef, pd; + struct MR_LD_RAID *raid; + struct RAID_CONTEXT *pRAID_Context; + u8 fp_possible = 1; + + io_request = cmd->io_request; + device_id = MEGASAS_DEV_INDEX(scmd); + local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; + io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + /* get RAID_Context pointer */ + pRAID_Context = &io_request->RaidContext.raid_context; + /* Check with FW team */ + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->reg_lock_row_lba = 0; + pRAID_Context->reg_lock_length = 0; + + if (fusion->fast_path_io && ( + device_id < instance->fw_supported_vd_count)) { + + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + if (ld >= instance->fw_supported_vd_count - 1) + fp_possible = 0; + else { + raid = MR_LdRaidGet(ld, local_map_ptr); + if (!(raid->capability.fpNonRWCapable)) + fp_possible = 0; + } + } else + fp_possible = 0; + + if (!fp_possible) { + io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; + io_request->DevHandle = cpu_to_le16(device_id); + io_request->LUN[1] = scmd->device->lun; + pRAID_Context->timeout_value = + cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ); + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + } else { + + /* set RAID context values */ + pRAID_Context->config_seq_num = raid->seqNum; + if (instance->adapter_type < VENTURA_SERIES) + pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; + pRAID_Context->timeout_value = + cpu_to_le16(raid->fpIoTimeoutForLd); + + /* get the DevHandle for the PD (since this is + fpNonRWCapable, this is a single disk RAID0) */ + span = physArm = 0; + arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); + pd = MR_ArPdGet(arRef, physArm, local_map_ptr); + devHandle = MR_PdDevHandleGet(pd, local_map_ptr); + + /* build request descriptor */ + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + cmd->request_desc->SCSIIO.DevHandle = devHandle; + + /* populate the LUN field */ + memcpy(io_request->LUN, raid->LUN, 8); + + /* build the raidScsiIO structure */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + io_request->DevHandle = devHandle; + } +} + +/** + * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd + * @instance: Adapter soft state + * @scmd: SCSI command + * @cmd: Command to be prepared + * @fp_possible: parameter to detect fast path or firmware path io. + * + * Prepares the io_request frame for rw/non-rw io cmds for syspds + */ +static void +megasas_build_syspd_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, + bool fp_possible) +{ + u32 device_id; + struct MPI2_RAID_SCSI_IO_REQUEST *io_request; + u16 pd_index = 0; + u16 os_timeout_value; + u16 timeout_limit; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; + struct RAID_CONTEXT *pRAID_Context; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + struct MR_PRIV_DEVICE *mr_device_priv_data; + struct fusion_context *fusion = instance->ctrl_context; + pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; + + device_id = MEGASAS_DEV_INDEX(scmd); + pd_index = MEGASAS_PD_INDEX(scmd); + os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ; + mr_device_priv_data = scmd->device->hostdata; + cmd->pd_interface = mr_device_priv_data->interface_type; + + io_request = cmd->io_request; + /* get RAID_Context pointer */ + pRAID_Context = &io_request->RaidContext.raid_context; + pRAID_Context->reg_lock_flags = 0; + pRAID_Context->reg_lock_row_lba = 0; + pRAID_Context->reg_lock_length = 0; + io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + io_request->LUN[1] = scmd->device->lun; + pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; + + /* If FW supports PD sequence number */ + if (instance->support_seqnum_jbod_fp) { + if (instance->use_seqnum_jbod_fp && + instance->pd_list[pd_index].driveType == TYPE_DISK) { + + /* More than 256 PD/JBOD support for Ventura */ + if (instance->support_morethan256jbod) + pRAID_Context->virtual_disk_tgt_id = + pd_sync->seq[pd_index].pd_target_id; + else + pRAID_Context->virtual_disk_tgt_id = + cpu_to_le16(device_id + + (MAX_PHYSICAL_DEVICES - 1)); + pRAID_Context->config_seq_num = + pd_sync->seq[pd_index].seqNum; + io_request->DevHandle = + pd_sync->seq[pd_index].devHandle; + if (instance->adapter_type >= VENTURA_SERIES) { + io_request->RaidContext.raid_context_g35.routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (1 << RAID_CONTEXT_NSEG_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); + } else { + pRAID_Context->type = MPI2_TYPE_CUDA; + pRAID_Context->nseg = 0x1; + pRAID_Context->reg_lock_flags |= + (MR_RL_FLAGS_SEQ_NUM_ENABLE | + MR_RL_FLAGS_GRANT_DESTINATION_CUDA); + } + } else { + pRAID_Context->virtual_disk_tgt_id = + cpu_to_le16(device_id + + (MAX_PHYSICAL_DEVICES - 1)); + pRAID_Context->config_seq_num = 0; + io_request->DevHandle = cpu_to_le16(0xFFFF); + } + } else { + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->config_seq_num = 0; + + if (fusion->fast_path_io) { + local_map_ptr = + fusion->ld_drv_map[(instance->map_id & 1)]; + io_request->DevHandle = + local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; + } else { + io_request->DevHandle = cpu_to_le16(0xFFFF); + } + } + + cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; + + megasas_get_msix_index(instance, scmd, cmd, 1); + + if (!fp_possible) { + /* system pd firmware path */ + io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + } else { + if (os_timeout_value) + os_timeout_value++; + + /* system pd Fast Path */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + timeout_limit = (scmd->device->type == TYPE_DISK) ? + 255 : 0xFFFF; + pRAID_Context->timeout_value = + cpu_to_le16((os_timeout_value > timeout_limit) ? + timeout_limit : os_timeout_value); + if (instance->adapter_type >= INVADER_SERIES) + io_request->IoFlags |= + cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); + + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + } +} + +/** + * megasas_build_io_fusion - Prepares IOs to devices + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to be prepared + * + * Invokes helper functions to prepare request frames + * and sets flags appropriate for IO/Non-IO cmd + */ +static int +megasas_build_io_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scp, + struct megasas_cmd_fusion *cmd) +{ + int sge_count; + u16 pd_index = 0; + u8 drive_type = 0; + struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; + struct MR_PRIV_DEVICE *mr_device_priv_data; + mr_device_priv_data = scp->device->hostdata; + + /* Zero out some fields so they don't get reused */ + memset(io_request->LUN, 0x0, 8); + io_request->CDB.EEDP32.PrimaryReferenceTag = 0; + io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; + io_request->EEDPFlags = 0; + io_request->Control = 0; + io_request->EEDPBlockSize = 0; + io_request->ChainOffset = 0; + io_request->RaidContext.raid_context.raid_flags = 0; + io_request->RaidContext.raid_context.type = 0; + io_request->RaidContext.raid_context.nseg = 0; + + memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); + /* + * Just the CDB length,rest of the Flags are zero + * This will be modified for FP in build_ldio_fusion + */ + io_request->IoFlags = cpu_to_le16(scp->cmd_len); + + switch (megasas_cmd_type(scp)) { + case READ_WRITE_LDIO: + megasas_build_ldio_fusion(instance, scp, cmd); + break; + case NON_READ_WRITE_LDIO: + megasas_build_ld_nonrw_fusion(instance, scp, cmd); + break; + case READ_WRITE_SYSPDIO: + megasas_build_syspd_fusion(instance, scp, cmd, true); + break; + case NON_READ_WRITE_SYSPDIO: + pd_index = MEGASAS_PD_INDEX(scp); + drive_type = instance->pd_list[pd_index].driveType; + if ((instance->secure_jbod_support || + mr_device_priv_data->is_tm_capable) || + (instance->adapter_type >= VENTURA_SERIES && + drive_type == TYPE_ENCLOSURE)) + megasas_build_syspd_fusion(instance, scp, cmd, false); + else + megasas_build_syspd_fusion(instance, scp, cmd, true); + break; + default: + break; + } + + /* + * Construct SGL + */ + + sge_count = megasas_make_sgl(instance, scp, cmd); + + if (sge_count > instance->max_num_sge || (sge_count < 0)) { + dev_err(&instance->pdev->dev, + "%s %d sge_count (%d) is out of range. Range is: 0-%d\n", + __func__, __LINE__, sge_count, instance->max_num_sge); + return 1; + } + + if (instance->adapter_type >= VENTURA_SERIES) { + set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); + cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); + cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); + } else { + /* numSGE store lower 8 bit of sge_count. + * numSGEExt store higher 8 bit of sge_count + */ + io_request->RaidContext.raid_context.num_sge = sge_count; + io_request->RaidContext.raid_context.num_sge_ext = + (u8)(sge_count >> 8); + } + + io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); + + if (scp->sc_data_direction == DMA_TO_DEVICE) + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); + else if (scp->sc_data_direction == DMA_FROM_DEVICE) + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); + + io_request->SGLOffset0 = + offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; + + io_request->SenseBufferLowAddress = + cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); + io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + + cmd->scmd = scp; + megasas_priv(scp)->cmd_priv = cmd; + + return 0; +} + +static union MEGASAS_REQUEST_DESCRIPTOR_UNION * +megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) +{ + u8 *p; + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + p = fusion->req_frames_desc + + sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; + + return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; +} + + +/* megasas_prepate_secondRaid1_IO + * It prepares the raid 1 second IO + */ +static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct megasas_cmd_fusion *r1_cmd) +{ + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; + struct fusion_context *fusion; + fusion = instance->ctrl_context; + req_desc = cmd->request_desc; + /* copy the io request frame as well as 8 SGEs data for r1 command*/ + memcpy(r1_cmd->io_request, cmd->io_request, + (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); + memcpy(r1_cmd->io_request->SGLs, cmd->io_request->SGLs, + (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); + /*sense buffer is different for r1 command*/ + r1_cmd->io_request->SenseBufferLowAddress = + cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr)); + r1_cmd->scmd = cmd->scmd; + req_desc2 = megasas_get_request_descriptor(instance, + (r1_cmd->index - 1)); + req_desc2->Words = 0; + r1_cmd->request_desc = req_desc2; + req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index); + req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; + r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; + r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; + r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; + cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = + cpu_to_le16(r1_cmd->index); + r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = + cpu_to_le16(cmd->index); + /*MSIxIndex of both commands request descriptors should be same*/ + r1_cmd->request_desc->SCSIIO.MSIxIndex = + cmd->request_desc->SCSIIO.MSIxIndex; + /*span arm is different for r1 cmd*/ + r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = + cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; +} + +/** + * megasas_build_and_issue_cmd_fusion -Main routine for building and + * issuing non IOCTL cmd + * @instance: Adapter soft state + * @scmd: pointer to scsi cmd from OS + */ +static u32 +megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scmd) +{ + struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + u32 index; + + if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && + instance->ldio_threshold && + (atomic_inc_return(&instance->ldio_outstanding) > + instance->ldio_threshold)) { + atomic_dec(&instance->ldio_outstanding); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + if (atomic_inc_return(&instance->fw_outstanding) > + instance->host->can_queue) { + atomic_dec(&instance->fw_outstanding); + return SCSI_MLQUEUE_HOST_BUSY; + } + + cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag); + + if (!cmd) { + atomic_dec(&instance->fw_outstanding); + return SCSI_MLQUEUE_HOST_BUSY; + } + + index = cmd->index; + + req_desc = megasas_get_request_descriptor(instance, index-1); + + req_desc->Words = 0; + cmd->request_desc = req_desc; + + if (megasas_build_io_fusion(instance, scmd, cmd)) { + megasas_return_cmd_fusion(instance, cmd); + dev_err(&instance->pdev->dev, "Error building command\n"); + cmd->request_desc = NULL; + atomic_dec(&instance->fw_outstanding); + return SCSI_MLQUEUE_HOST_BUSY; + } + + req_desc = cmd->request_desc; + req_desc->SCSIIO.SMID = cpu_to_le16(index); + + if (cmd->io_request->ChainOffset != 0 && + cmd->io_request->ChainOffset != 0xF) + dev_err(&instance->pdev->dev, "The chain offset value is not " + "correct : %x\n", cmd->io_request->ChainOffset); + /* + * if it is raid 1/10 fp write capable. + * try to get second command from pool and construct it. + * From FW, it has confirmed that lba values of two PDs + * corresponds to single R1/10 LD are always same + * + */ + /* driver side count always should be less than max_fw_cmds + * to get new command + */ + if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { + r1_cmd = megasas_get_cmd_fusion(instance, + scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds); + megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); + } + + + /* + * Issue the command to the FW + */ + + megasas_sdev_busy_inc(instance, scmd); + megasas_fire_cmd_fusion(instance, req_desc); + + if (r1_cmd) + megasas_fire_cmd_fusion(instance, r1_cmd->request_desc); + + + return 0; +} + +/** + * megasas_complete_r1_command - + * completes R1 FP write commands which has valid peer smid + * @instance: Adapter soft state + * @cmd: MPT command frame + * + */ +static inline void +megasas_complete_r1_command(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd) +{ + u8 *sense, status, ex_status; + u32 data_length; + u16 peer_smid; + struct fusion_context *fusion; + struct megasas_cmd_fusion *r1_cmd = NULL; + struct scsi_cmnd *scmd_local = NULL; + struct RAID_CONTEXT_G35 *rctx_g35; + + rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35; + fusion = instance->ctrl_context; + peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid); + + r1_cmd = fusion->cmd_list[peer_smid - 1]; + scmd_local = cmd->scmd; + status = rctx_g35->status; + ex_status = rctx_g35->ex_status; + data_length = cmd->io_request->DataLength; + sense = cmd->sense; + + cmd->cmd_completed = true; + + /* Check if peer command is completed or not*/ + if (r1_cmd->cmd_completed) { + rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35; + if (rctx_g35->status != MFI_STAT_OK) { + status = rctx_g35->status; + ex_status = rctx_g35->ex_status; + data_length = r1_cmd->io_request->DataLength; + sense = r1_cmd->sense; + } + + megasas_return_cmd_fusion(instance, r1_cmd); + map_cmd_status(fusion, scmd_local, status, ex_status, + le32_to_cpu(data_length), sense); + if (instance->ldio_threshold && + megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + atomic_dec(&instance->ldio_outstanding); + megasas_priv(scmd_local)->cmd_priv = NULL; + megasas_return_cmd_fusion(instance, cmd); + scsi_dma_unmap(scmd_local); + megasas_sdev_busy_dec(instance, scmd_local); + scsi_done(scmd_local); + } +} + +/** + * access_irq_context: Access to reply processing + * @irq_context: IRQ context + * + * Synchronize access to reply processing. + * + * Return: true on success, false on failure. + */ +static inline +bool access_irq_context(struct megasas_irq_context *irq_context) +{ + if (!irq_context) + return true; + + if (atomic_add_unless(&irq_context->in_used, 1, 1)) + return true; + + return false; +} + +/** + * release_irq_context: Release reply processing + * @irq_context: IRQ context + * + * Release access of reply processing. + * + * Return: Nothing. + */ +static inline +void release_irq_context(struct megasas_irq_context *irq_context) +{ + if (irq_context) + atomic_dec(&irq_context->in_used); +} + +/** + * complete_cmd_fusion - Completes command + * @instance: Adapter soft state + * @MSIxIndex: MSI number + * @irq_context: IRQ context + * + * Completes all commands that is in reply descriptor queue + */ +static int +complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, + struct megasas_irq_context *irq_context) +{ + union MPI2_REPLY_DESCRIPTORS_UNION *desc; + struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; + struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; + struct fusion_context *fusion; + struct megasas_cmd *cmd_mfi; + struct megasas_cmd_fusion *cmd_fusion; + u16 smid, num_completed; + u8 reply_descript_type, *sense, status, extStatus; + u32 device_id, data_length; + union desc_value d_val; + struct LD_LOAD_BALANCE_INFO *lbinfo; + int threshold_reply_count = 0; + struct scsi_cmnd *scmd_local = NULL; + struct MR_TASK_MANAGE_REQUEST *mr_tm_req; + struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; + + fusion = instance->ctrl_context; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) + return IRQ_HANDLED; + + if (!access_irq_context(irq_context)) + return 0; + + desc = fusion->reply_frames_desc[MSIxIndex] + + fusion->last_reply_idx[MSIxIndex]; + + reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; + + d_val.word = desc->Words; + + reply_descript_type = reply_desc->ReplyFlags & + MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + + if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + release_irq_context(irq_context); + return IRQ_NONE; + } + + num_completed = 0; + + while (d_val.u.low != cpu_to_le32(UINT_MAX) && + d_val.u.high != cpu_to_le32(UINT_MAX)) { + + smid = le16_to_cpu(reply_desc->SMID); + cmd_fusion = fusion->cmd_list[smid - 1]; + scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) + cmd_fusion->io_request; + + scmd_local = cmd_fusion->scmd; + status = scsi_io_req->RaidContext.raid_context.status; + extStatus = scsi_io_req->RaidContext.raid_context.ex_status; + sense = cmd_fusion->sense; + data_length = scsi_io_req->DataLength; + + switch (scsi_io_req->Function) { + case MPI2_FUNCTION_SCSI_TASK_MGMT: + mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *) + cmd_fusion->io_request; + mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) + &mr_tm_req->TmRequest; + dev_dbg(&instance->pdev->dev, "TM completion:" + "type: 0x%x TaskMID: 0x%x\n", + mpi_tm_req->TaskType, mpi_tm_req->TaskMID); + complete(&cmd_fusion->done); + break; + case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ + /* Update load balancing info */ + if (fusion->load_balance_info && + (megasas_priv(cmd_fusion->scmd)->status & + MEGASAS_LOAD_BALANCE_FLAG)) { + device_id = MEGASAS_DEV_INDEX(scmd_local); + lbinfo = &fusion->load_balance_info[device_id]; + atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); + megasas_priv(cmd_fusion->scmd)->status &= + ~MEGASAS_LOAD_BALANCE_FLAG; + } + fallthrough; /* and complete IO */ + case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ + atomic_dec(&instance->fw_outstanding); + if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { + map_cmd_status(fusion, scmd_local, status, + extStatus, le32_to_cpu(data_length), + sense); + if (instance->ldio_threshold && + (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)) + atomic_dec(&instance->ldio_outstanding); + megasas_priv(scmd_local)->cmd_priv = NULL; + megasas_return_cmd_fusion(instance, cmd_fusion); + scsi_dma_unmap(scmd_local); + megasas_sdev_busy_dec(instance, scmd_local); + scsi_done(scmd_local); + } else /* Optimal VD - R1 FP command completion. */ + megasas_complete_r1_command(instance, cmd_fusion); + break; + case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + /* Poll mode. Dummy free. + * In case of Interrupt mode, caller has reverse check. + */ + if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { + cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; + megasas_return_cmd(instance, cmd_mfi); + } else + megasas_complete_cmd(instance, cmd_mfi, DID_OK); + break; + } + + fusion->last_reply_idx[MSIxIndex]++; + if (fusion->last_reply_idx[MSIxIndex] >= + fusion->reply_q_depth) + fusion->last_reply_idx[MSIxIndex] = 0; + + desc->Words = cpu_to_le64(ULLONG_MAX); + num_completed++; + threshold_reply_count++; + + /* Get the next reply descriptor */ + if (!fusion->last_reply_idx[MSIxIndex]) + desc = fusion->reply_frames_desc[MSIxIndex]; + else + desc++; + + reply_desc = + (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; + + d_val.word = desc->Words; + + reply_descript_type = reply_desc->ReplyFlags & + MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + + if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + break; + /* + * Write to reply post host index register after completing threshold + * number of reply counts and still there are more replies in reply queue + * pending to be completed + */ + if (threshold_reply_count >= instance->threshold_reply_count) { + if (instance->msix_combined) + writel(((MSIxIndex & 0x7) << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[MSIxIndex/8]); + else + writel((MSIxIndex << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[0]); + threshold_reply_count = 0; + if (irq_context) { + if (!irq_context->irq_poll_scheduled) { + irq_context->irq_poll_scheduled = true; + irq_context->irq_line_enable = true; + irq_poll_sched(&irq_context->irqpoll); + } + release_irq_context(irq_context); + return num_completed; + } + } + } + + if (num_completed) { + wmb(); + if (instance->msix_combined) + writel(((MSIxIndex & 0x7) << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[MSIxIndex/8]); + else + writel((MSIxIndex << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[0]); + megasas_check_and_restore_queue_depth(instance); + } + + release_irq_context(irq_context); + + return num_completed; +} + +int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + + struct megasas_instance *instance; + int num_entries = 0; + struct fusion_context *fusion; + + instance = (struct megasas_instance *)shost->hostdata; + + fusion = instance->ctrl_context; + + queue_num = queue_num + instance->low_latency_index_start; + + if (!atomic_add_unless(&fusion->busy_mq_poll[queue_num], 1, 1)) + return 0; + + num_entries = complete_cmd_fusion(instance, queue_num, NULL); + atomic_dec(&fusion->busy_mq_poll[queue_num]); + + return num_entries; +} + +/** + * megasas_enable_irq_poll() - enable irqpoll + * @instance: Adapter soft state + */ +static void megasas_enable_irq_poll(struct megasas_instance *instance) +{ + u32 count, i; + struct megasas_irq_context *irq_ctx; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + + for (i = 0; i < count; i++) { + irq_ctx = &instance->irq_context[i]; + irq_poll_enable(&irq_ctx->irqpoll); + } +} + +/** + * megasas_sync_irqs - Synchronizes all IRQs owned by adapter + * @instance_addr: Adapter soft state address + */ +static void megasas_sync_irqs(unsigned long instance_addr) +{ + u32 count, i; + struct megasas_instance *instance = + (struct megasas_instance *)instance_addr; + struct megasas_irq_context *irq_ctx; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + + for (i = 0; i < count; i++) { + synchronize_irq(pci_irq_vector(instance->pdev, i)); + irq_ctx = &instance->irq_context[i]; + irq_poll_disable(&irq_ctx->irqpoll); + if (irq_ctx->irq_poll_scheduled) { + irq_ctx->irq_poll_scheduled = false; + enable_irq(irq_ctx->os_irq); + complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); + } + } +} + +/** + * megasas_irqpoll() - process a queue for completed reply descriptors + * @irqpoll: IRQ poll structure associated with queue to poll. + * @budget: Threshold of reply descriptors to process per poll. + * + * Return: The number of entries processed. + */ + +int megasas_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct megasas_irq_context *irq_ctx; + struct megasas_instance *instance; + int num_entries; + + irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll); + instance = irq_ctx->instance; + + if (irq_ctx->irq_line_enable) { + disable_irq_nosync(irq_ctx->os_irq); + irq_ctx->irq_line_enable = false; + } + + num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); + if (num_entries < budget) { + irq_poll_complete(irqpoll); + irq_ctx->irq_poll_scheduled = false; + enable_irq(irq_ctx->os_irq); + complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); + } + + return num_entries; +} + +/** + * megasas_complete_cmd_dpc_fusion - Completes command + * @instance_addr: Adapter soft state address + * + * Tasklet to complete cmds + */ +static void +megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) +{ + struct megasas_instance *instance = + (struct megasas_instance *)instance_addr; + struct megasas_irq_context *irq_ctx = NULL; + u32 count, MSIxIndex; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + + /* If we have already declared adapter dead, donot complete cmds */ + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) + return; + + for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) { + irq_ctx = &instance->irq_context[MSIxIndex]; + complete_cmd_fusion(instance, MSIxIndex, irq_ctx); + } +} + +/** + * megasas_isr_fusion - isr entry point + * @irq: IRQ number + * @devp: IRQ context + */ +static irqreturn_t megasas_isr_fusion(int irq, void *devp) +{ + struct megasas_irq_context *irq_context = devp; + struct megasas_instance *instance = irq_context->instance; + u32 mfiStatus; + + if (instance->mask_interrupts) + return IRQ_NONE; + + if (irq_context->irq_poll_scheduled) + return IRQ_HANDLED; + + if (!instance->msix_vectors) { + mfiStatus = instance->instancet->clear_intr(instance); + if (!mfiStatus) + return IRQ_NONE; + } + + /* If we are resetting, bail */ + if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { + instance->instancet->clear_intr(instance); + return IRQ_HANDLED; + } + + return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context) + ? IRQ_HANDLED : IRQ_NONE; +} + +/** + * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru + * @instance: Adapter soft state + * @mfi_cmd: megasas_cmd pointer + * + */ +static void +build_mpt_mfi_pass_thru(struct megasas_instance *instance, + struct megasas_cmd *mfi_cmd) +{ + struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; + struct MPI2_RAID_SCSI_IO_REQUEST *io_req; + struct megasas_cmd_fusion *cmd; + struct fusion_context *fusion; + struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; + + fusion = instance->ctrl_context; + + cmd = megasas_get_cmd_fusion(instance, + instance->max_scsi_cmds + mfi_cmd->index); + + /* Save the smid. To be used for returning the cmd */ + mfi_cmd->context.smid = cmd->index; + + /* + * For cmds where the flag is set, store the flag and check + * on completion. For cmds with this flag, don't call + * megasas_complete_cmd + */ + + if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) + mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; + + io_req = cmd->io_request; + + if (instance->adapter_type >= INVADER_SERIES) { + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = + (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; + sgl_ptr_end += fusion->max_sge_in_main_msg - 1; + sgl_ptr_end->Flags = 0; + } + + mpi25_ieee_chain = + (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; + + io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; + io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, + SGL) / 4; + io_req->ChainOffset = fusion->chain_offset_mfi_pthru; + + mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); + + mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; + + mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size); +} + +/** + * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd + * @instance: Adapter soft state + * @cmd: mfi cmd to build + * + */ +static union MEGASAS_REQUEST_DESCRIPTOR_UNION * +build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; + u16 index; + + build_mpt_mfi_pass_thru(instance, cmd); + index = cmd->context.smid; + + req_desc = megasas_get_request_descriptor(instance, index - 1); + + req_desc->Words = 0; + req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + req_desc->SCSIIO.SMID = cpu_to_le16(index); + + return req_desc; +} + +/** + * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd + * @instance: Adapter soft state + * @cmd: mfi cmd pointer + * + */ +static void +megasas_issue_dcmd_fusion(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + + req_desc = build_mpt_cmd(instance, cmd); + + megasas_fire_cmd_fusion(instance, req_desc); + return; +} + +/** + * megasas_release_fusion - Reverses the FW initialization + * @instance: Adapter soft state + */ +void +megasas_release_fusion(struct megasas_instance *instance) +{ + megasas_free_ioc_init_cmd(instance); + megasas_free_cmds(instance); + megasas_free_cmds_fusion(instance); + + iounmap(instance->reg_set); + + pci_release_selected_regions(instance->pdev, 1<bar); +} + +/** + * megasas_read_fw_status_reg_fusion - returns the current FW status value + * @instance: Adapter soft state + */ +static u32 +megasas_read_fw_status_reg_fusion(struct megasas_instance *instance) +{ + return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0); +} + +/** + * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware + * @instance: Controller's soft instance + * @return: Number of allocated host crash buffers + */ +static void +megasas_alloc_host_crash_buffer(struct megasas_instance *instance) +{ + unsigned int i; + + for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { + instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE); + if (!instance->crash_buf[i]) { + dev_info(&instance->pdev->dev, "Firmware crash dump " + "memory allocation failed at index %d\n", i); + break; + } + } + instance->drv_buf_alloc = i; +} + +/** + * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware + * @instance: Controller's soft instance + */ +void +megasas_free_host_crash_buffer(struct megasas_instance *instance) +{ + unsigned int i; + for (i = 0; i < instance->drv_buf_alloc; i++) { + vfree(instance->crash_buf[i]); + } + instance->drv_buf_index = 0; + instance->drv_buf_alloc = 0; + instance->fw_crash_state = UNAVAILABLE; + instance->fw_crash_buffer_size = 0; +} + +/** + * megasas_adp_reset_fusion - For controller reset + * @instance: Controller's soft instance + * @regs: MFI register set + */ +static int +megasas_adp_reset_fusion(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + u32 host_diag, abs_state, retry; + + /* Now try to reset the chip */ + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); + writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset); + writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset); + writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset); + writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); + writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); + writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); + + /* Check that the diag write enable (DRWE) bit is on */ + host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); + retry = 0; + while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { + msleep(100); + host_diag = megasas_readl(instance, + &instance->reg_set->fusion_host_diag); + if (retry++ == 100) { + dev_warn(&instance->pdev->dev, + "Host diag unlock failed from %s %d\n", + __func__, __LINE__); + break; + } + } + if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) + return -1; + + /* Send chip reset command */ + writel(host_diag | HOST_DIAG_RESET_ADAPTER, + &instance->reg_set->fusion_host_diag); + msleep(3000); + + /* Make sure reset adapter bit is cleared */ + host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); + retry = 0; + while (host_diag & HOST_DIAG_RESET_ADAPTER) { + msleep(100); + host_diag = megasas_readl(instance, + &instance->reg_set->fusion_host_diag); + if (retry++ == 1000) { + dev_warn(&instance->pdev->dev, + "Diag reset adapter never cleared %s %d\n", + __func__, __LINE__); + break; + } + } + if (host_diag & HOST_DIAG_RESET_ADAPTER) + return -1; + + abs_state = instance->instancet->read_fw_status_reg(instance) + & MFI_STATE_MASK; + retry = 0; + + while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { + msleep(100); + abs_state = instance->instancet-> + read_fw_status_reg(instance) & MFI_STATE_MASK; + } + if (abs_state <= MFI_STATE_FW_INIT) { + dev_warn(&instance->pdev->dev, + "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n", + abs_state, __func__, __LINE__); + return -1; + } + + return 0; +} + +/** + * megasas_check_reset_fusion - For controller reset check + * @instance: Controller's soft instance + * @regs: MFI register set + */ +static int +megasas_check_reset_fusion(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + return 0; +} + +/** + * megasas_trigger_snap_dump - Trigger snap dump in FW + * @instance: Soft instance of adapter + */ +static inline void megasas_trigger_snap_dump(struct megasas_instance *instance) +{ + int j; + u32 fw_state, abs_state; + + if (!instance->disableOnlineCtrlReset) { + dev_info(&instance->pdev->dev, "Trigger snap dump\n"); + writel(MFI_ADP_TRIGGER_SNAP_DUMP, + &instance->reg_set->doorbell); + readl(&instance->reg_set->doorbell); + } + + for (j = 0; j < instance->snapdump_wait_time; j++) { + abs_state = instance->instancet->read_fw_status_reg(instance); + fw_state = abs_state & MFI_STATE_MASK; + if (fw_state == MFI_STATE_FAULT) { + dev_printk(KERN_ERR, &instance->pdev->dev, + "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", + abs_state & MFI_STATE_FAULT_CODE, + abs_state & MFI_STATE_FAULT_SUBCODE, __func__); + return; + } + msleep(1000); + } +} + +/* This function waits for outstanding commands on fusion to complete */ +static int +megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, + int reason, int *convert) +{ + int i, outstanding, retval = 0, hb_seconds_missed = 0; + u32 fw_state, abs_state; + u32 waittime_for_io_completion; + + waittime_for_io_completion = + min_t(u32, resetwaittime, + (resetwaittime - instance->snapdump_wait_time)); + + if (reason == MFI_IO_TIMEOUT_OCR) { + dev_info(&instance->pdev->dev, + "MFI command is timed out\n"); + megasas_complete_cmd_dpc_fusion((unsigned long)instance); + if (instance->snapdump_wait_time) + megasas_trigger_snap_dump(instance); + retval = 1; + goto out; + } + + for (i = 0; i < waittime_for_io_completion; i++) { + /* Check if firmware is in fault state */ + abs_state = instance->instancet->read_fw_status_reg(instance); + fw_state = abs_state & MFI_STATE_MASK; + if (fw_state == MFI_STATE_FAULT) { + dev_printk(KERN_ERR, &instance->pdev->dev, + "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", + abs_state & MFI_STATE_FAULT_CODE, + abs_state & MFI_STATE_FAULT_SUBCODE, __func__); + megasas_complete_cmd_dpc_fusion((unsigned long)instance); + if (instance->requestorId && reason) { + dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT" + " state while polling during" + " I/O timeout handling for %d\n", + instance->host->host_no); + *convert = 1; + } + + retval = 1; + goto out; + } + + + /* If SR-IOV VF mode & heartbeat timeout, don't wait */ + if (instance->requestorId && !reason) { + retval = 1; + goto out; + } + + /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ + if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) { + if (instance->hb_host_mem->HB.fwCounter != + instance->hb_host_mem->HB.driverCounter) { + instance->hb_host_mem->HB.driverCounter = + instance->hb_host_mem->HB.fwCounter; + hb_seconds_missed = 0; + } else { + hb_seconds_missed++; + if (hb_seconds_missed == + (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { + dev_warn(&instance->pdev->dev, "SR-IOV:" + " Heartbeat never completed " + " while polling during I/O " + " timeout handling for " + "scsi%d.\n", + instance->host->host_no); + *convert = 1; + retval = 1; + goto out; + } + } + } + + megasas_complete_cmd_dpc_fusion((unsigned long)instance); + outstanding = atomic_read(&instance->fw_outstanding); + if (!outstanding) + goto out; + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { + dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " + "commands to complete for scsi%d\n", i, + outstanding, instance->host->host_no); + } + msleep(1000); + } + + if (instance->snapdump_wait_time) { + megasas_trigger_snap_dump(instance); + retval = 1; + goto out; + } + + if (atomic_read(&instance->fw_outstanding)) { + dev_err(&instance->pdev->dev, "pending commands remain after waiting, " + "will reset adapter scsi%d.\n", + instance->host->host_no); + *convert = 1; + retval = 1; + } + +out: + return retval; +} + +void megasas_reset_reply_desc(struct megasas_instance *instance) +{ + int i, j, count; + struct fusion_context *fusion; + union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; + + fusion = instance->ctrl_context; + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + count += instance->iopoll_q_count; + + for (i = 0 ; i < count ; i++) { + fusion->last_reply_idx[i] = 0; + reply_desc = fusion->reply_frames_desc[i]; + for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++) + reply_desc->Words = cpu_to_le64(ULLONG_MAX); + } +} + +/* + * megasas_refire_mgmt_cmd : Re-fire management commands + * @instance: Controller's soft instance +*/ +static void megasas_refire_mgmt_cmd(struct megasas_instance *instance, + bool return_ioctl) +{ + int j; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + struct megasas_cmd *cmd_mfi; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; + u16 smid; + bool refire_cmd = false; + u8 result; + u32 opcode = 0; + + fusion = instance->ctrl_context; + + /* Re-fire management commands. + * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. + */ + for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { + cmd_fusion = fusion->cmd_list[j]; + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + smid = le16_to_cpu(cmd_mfi->context.smid); + result = REFIRE_CMD; + + if (!smid) + continue; + + req_desc = megasas_get_request_descriptor(instance, smid - 1); + + switch (cmd_mfi->frame->hdr.cmd) { + case MFI_CMD_DCMD: + opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode); + /* Do not refire shutdown command */ + if (opcode == MR_DCMD_CTRL_SHUTDOWN) { + cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK; + result = COMPLETE_CMD; + break; + } + + refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) && + (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && + !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); + + if (!refire_cmd) + result = RETURN_CMD; + + break; + case MFI_CMD_NVME: + if (!instance->support_nvme_passthru) { + cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; + result = COMPLETE_CMD; + } + + break; + case MFI_CMD_TOOLBOX: + if (!instance->support_pci_lane_margining) { + cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; + result = COMPLETE_CMD; + } + + break; + default: + break; + } + + if (return_ioctl && cmd_mfi->sync_cmd && + cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { + dev_err(&instance->pdev->dev, + "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n", + __func__, __LINE__, cmd_mfi->frame->hdr.cmd, + le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); + cmd_mfi->cmd_status_drv = DCMD_BUSY; + result = COMPLETE_CMD; + } + + scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) + cmd_fusion->io_request; + if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) + result = RETURN_CMD; + + switch (result) { + case REFIRE_CMD: + megasas_fire_cmd_fusion(instance, req_desc); + break; + case RETURN_CMD: + megasas_return_cmd(instance, cmd_mfi); + break; + case COMPLETE_CMD: + megasas_complete_cmd(instance, cmd_mfi, DID_OK); + break; + } + } +} + +/* + * megasas_return_polled_cmds: Return polled mode commands back to the pool + * before initiating an OCR. + * @instance: Controller's soft instance + */ +static void +megasas_return_polled_cmds(struct megasas_instance *instance) +{ + int i; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + struct megasas_cmd *cmd_mfi; + + fusion = instance->ctrl_context; + + for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + + if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { + if (megasas_dbg_lvl & OCR_DEBUG) + dev_info(&instance->pdev->dev, + "%s %d return cmd 0x%x opcode 0x%x\n", + __func__, __LINE__, cmd_mfi->frame->hdr.cmd, + le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); + cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; + megasas_return_cmd(instance, cmd_mfi); + } + } +} + +/* + * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device + * @instance: per adapter struct + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * + * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED + */ + +static int megasas_track_scsiio(struct megasas_instance *instance, + int id, int channel) +{ + int i, found = 0; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + fusion = instance->ctrl_context; + + for (i = 0 ; i < instance->max_scsi_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + if (cmd_fusion->scmd && + (cmd_fusion->scmd->device->id == id && + cmd_fusion->scmd->device->channel == channel)) { + dev_info(&instance->pdev->dev, + "SCSI commands pending to target" + "channel %d id %d \tSMID: 0x%x\n", + channel, id, cmd_fusion->index); + scsi_print_command(cmd_fusion->scmd); + found = 1; + break; + } + } + + return found ? FAILED : SUCCESS; +} + +/** + * megasas_tm_response_code - translation of device response code + * @instance: Controller's soft instance + * @mpi_reply: MPI reply returned by firmware + * + * Return nothing. + */ +static void +megasas_tm_response_code(struct megasas_instance *instance, + struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) +{ + char *desc; + + switch (mpi_reply->ResponseCode) { + case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n", + mpi_reply->ResponseCode, desc); + dev_dbg(&instance->pdev->dev, + "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo" + " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", + mpi_reply->TerminationCount, mpi_reply->DevHandle, + mpi_reply->Function, mpi_reply->TaskType, + mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); +} + +/** + * megasas_issue_tm - main routine for sending tm requests + * @instance: per adapter struct + * @device_handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @smid_task: smid assigned to the task + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c) + * @mr_device_priv_data: private data + * Context: user + * + * MegaRaid use MPT interface for Task Magement request. + * A generic API for sending task management requests to firmware. + * + * Return SUCCESS or FAILED. + */ +static int +megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, + uint channel, uint id, u16 smid_task, u8 type, + struct MR_PRIV_DEVICE *mr_device_priv_data) +{ + struct MR_TASK_MANAGE_REQUEST *mr_request; + struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; + unsigned long timeleft; + struct megasas_cmd_fusion *cmd_fusion; + struct megasas_cmd *cmd_mfi; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + struct fusion_context *fusion = NULL; + struct megasas_cmd_fusion *scsi_lookup; + int rc; + int timeout = MEGASAS_DEFAULT_TM_TIMEOUT; + struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; + + fusion = instance->ctrl_context; + + cmd_mfi = megasas_get_cmd(instance); + + if (!cmd_mfi) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return -ENOMEM; + } + + cmd_fusion = megasas_get_cmd_fusion(instance, + instance->max_scsi_cmds + cmd_mfi->index); + + /* Save the smid. To be used for returning the cmd */ + cmd_mfi->context.smid = cmd_fusion->index; + + req_desc = megasas_get_request_descriptor(instance, + (cmd_fusion->index - 1)); + + cmd_fusion->request_desc = req_desc; + req_desc->Words = 0; + + mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; + memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); + mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(device_handle); + mpi_request->TaskType = type; + mpi_request->TaskMID = cpu_to_le16(smid_task); + mpi_request->LUN[1] = 0; + + + req_desc = cmd_fusion->request_desc; + req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index); + req_desc->HighPriority.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + req_desc->HighPriority.MSIxIndex = 0; + req_desc->HighPriority.LMID = 0; + req_desc->HighPriority.Reserved1 = 0; + + if (channel < MEGASAS_MAX_PD_CHANNELS) + mr_request->tmReqFlags.isTMForPD = 1; + else + mr_request->tmReqFlags.isTMForLD = 1; + + init_completion(&cmd_fusion->done); + megasas_fire_cmd_fusion(instance, req_desc); + + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + timeout = mr_device_priv_data->task_abort_tmo; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + timeout = mr_device_priv_data->target_reset_tmo; + break; + } + + timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ); + + if (!timeleft) { + dev_err(&instance->pdev->dev, + "task mgmt type 0x%x timed out\n", type); + mutex_unlock(&instance->reset_mutex); + rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + return rc; + } + + mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply; + megasas_tm_response_code(instance, mpi_reply); + + megasas_return_cmd(instance, cmd_mfi); + rc = SUCCESS; + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + scsi_lookup = fusion->cmd_list[smid_task - 1]; + + if (scsi_lookup->scmd == NULL) + break; + else { + instance->instancet->disable_intr(instance); + megasas_sync_irqs((unsigned long)instance); + instance->instancet->enable_intr(instance); + megasas_enable_irq_poll(instance); + if (scsi_lookup->scmd == NULL) + break; + } + rc = FAILED; + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) + break; + instance->instancet->disable_intr(instance); + megasas_sync_irqs((unsigned long)instance); + rc = megasas_track_scsiio(instance, id, channel); + instance->instancet->enable_intr(instance); + megasas_enable_irq_poll(instance); + + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + break; + default: + rc = FAILED; + break; + } + + return rc; + +} + +/* + * megasas_fusion_smid_lookup : Look for fusion command corresponding to SCSI + * @instance: per adapter struct + * + * Return Non Zero index, if SMID found in outstanding commands + */ +static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd) +{ + int i, ret = 0; + struct megasas_instance *instance; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + fusion = instance->ctrl_context; + + for (i = 0; i < instance->max_scsi_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) { + scmd_printk(KERN_NOTICE, scmd, "Abort request is for" + " SMID: %d\n", cmd_fusion->index); + ret = cmd_fusion->index; + break; + } + } + + return ret; +} + +/* +* megasas_get_tm_devhandle - Get devhandle for TM request +* @sdev- OS provided scsi device +* +* Returns- devhandle/targetID of SCSI device +*/ +static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) +{ + u16 pd_index = 0; + u32 device_id; + struct megasas_instance *instance; + struct fusion_context *fusion; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + u16 devhandle = (u16)ULONG_MAX; + + instance = (struct megasas_instance *)sdev->host->hostdata; + fusion = instance->ctrl_context; + + if (!MEGASAS_IS_LOGICAL(sdev)) { + if (instance->use_seqnum_jbod_fp) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + devhandle = pd_sync->seq[pd_index].devHandle; + } else + sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" + " without JBOD MAP support from %s %d\n", __func__, __LINE__); + } else { + device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + devhandle = device_id; + } + + return devhandle; +} + +/* + * megasas_task_abort_fusion : SCSI task abort function for fusion adapters + * @scmd : pointer to scsi command object + * + * Return SUCCESS, if command aborted else FAILED + */ + +int megasas_task_abort_fusion(struct scsi_cmnd *scmd) +{ + struct megasas_instance *instance; + u16 smid, devhandle; + int ret; + struct MR_PRIV_DEVICE *mr_device_priv_data; + mr_device_priv_data = scmd->device->hostdata; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { + dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," + "SCSI host:%d\n", instance->host->host_no); + ret = FAILED; + return ret; + } + + if (!mr_device_priv_data) { + sdev_printk(KERN_INFO, scmd->device, "device been deleted! " + "scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + ret = SUCCESS; + goto out; + } + + if (!mr_device_priv_data->is_tm_capable) { + ret = FAILED; + goto out; + } + + mutex_lock(&instance->reset_mutex); + + smid = megasas_fusion_smid_lookup(scmd); + + if (!smid) { + ret = SUCCESS; + scmd_printk(KERN_NOTICE, scmd, "Command for which abort is" + " issued is not found in outstanding commands\n"); + mutex_unlock(&instance->reset_mutex); + goto out; + } + + devhandle = megasas_get_tm_devhandle(scmd->device); + + if (devhandle == (u16)ULONG_MAX) { + ret = FAILED; + sdev_printk(KERN_INFO, scmd->device, + "task abort issued for invalid devhandle\n"); + mutex_unlock(&instance->reset_mutex); + goto out; + } + sdev_printk(KERN_INFO, scmd->device, + "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n", + scmd, devhandle); + + mr_device_priv_data->tm_busy = true; + ret = megasas_issue_tm(instance, devhandle, + scmd->device->channel, scmd->device->id, smid, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + mr_device_priv_data); + mr_device_priv_data->tm_busy = false; + + mutex_unlock(&instance->reset_mutex); + scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n", + ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); +out: + scsi_print_command(scmd); + if (megasas_dbg_lvl & TM_DEBUG) + megasas_dump_fusion_io(scmd); + + return ret; +} + +/* + * megasas_reset_target_fusion : target reset function for fusion adapters + * scmd: SCSI command pointer + * + * Returns SUCCESS if all commands associated with target aborted else FAILED + */ + +int megasas_reset_target_fusion(struct scsi_cmnd *scmd) +{ + + struct megasas_instance *instance; + int ret = FAILED; + u16 devhandle; + struct MR_PRIV_DEVICE *mr_device_priv_data; + mr_device_priv_data = scmd->device->hostdata; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { + dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," + "SCSI host:%d\n", instance->host->host_no); + ret = FAILED; + return ret; + } + + if (!mr_device_priv_data) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd: (0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + ret = SUCCESS; + goto out; + } + + if (!mr_device_priv_data->is_tm_capable) { + ret = FAILED; + goto out; + } + + mutex_lock(&instance->reset_mutex); + devhandle = megasas_get_tm_devhandle(scmd->device); + + if (devhandle == (u16)ULONG_MAX) { + ret = FAILED; + sdev_printk(KERN_INFO, scmd->device, + "target reset issued for invalid devhandle\n"); + mutex_unlock(&instance->reset_mutex); + goto out; + } + + sdev_printk(KERN_INFO, scmd->device, + "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n", + scmd, devhandle); + mr_device_priv_data->tm_busy = true; + ret = megasas_issue_tm(instance, devhandle, + scmd->device->channel, scmd->device->id, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + mr_device_priv_data); + mr_device_priv_data->tm_busy = false; + mutex_unlock(&instance->reset_mutex); + scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n", + (ret == SUCCESS) ? "SUCCESS" : "FAILED"); + +out: + return ret; +} + +/*SRIOV get other instance in cluster if any*/ +static struct +megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) +{ + int i; + + for (i = 0; i < MAX_MGMT_ADAPTERS; i++) { + if (megasas_mgmt_info.instance[i] && + (megasas_mgmt_info.instance[i] != instance) && + megasas_mgmt_info.instance[i]->requestorId && + megasas_mgmt_info.instance[i]->peerIsPresent && + (memcmp((megasas_mgmt_info.instance[i]->clusterId), + instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0)) + return megasas_mgmt_info.instance[i]; + } + return NULL; +} + +/* Check for a second path that is currently UP */ +int megasas_check_mpio_paths(struct megasas_instance *instance, + struct scsi_cmnd *scmd) +{ + struct megasas_instance *peer_instance = NULL; + int retval = (DID_REQUEUE << 16); + + if (instance->peerIsPresent) { + peer_instance = megasas_get_peer_instance(instance); + if ((peer_instance) && + (atomic_read(&peer_instance->adprecovery) == + MEGASAS_HBA_OPERATIONAL)) + retval = (DID_NO_CONNECT << 16); + } + return retval; +} + +/* Core fusion reset function */ +int megasas_reset_fusion(struct Scsi_Host *shost, int reason) +{ + int retval = SUCCESS, i, j, convert = 0; + struct megasas_instance *instance; + struct megasas_cmd_fusion *cmd_fusion, *r1_cmd; + struct fusion_context *fusion; + u32 abs_state, status_reg, reset_adapter, fpio_count = 0; + u32 io_timeout_in_crash_mode = 0; + struct scsi_cmnd *scmd_local = NULL; + struct scsi_device *sdev; + int ret_target_prop = DCMD_FAILED; + bool is_target_prop = false; + bool do_adp_reset = true; + int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES; + + instance = (struct megasas_instance *)shost->hostdata; + fusion = instance->ctrl_context; + + mutex_lock(&instance->reset_mutex); + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_warn(&instance->pdev->dev, "Hardware critical error, " + "returning FAILED for scsi%d.\n", + instance->host->host_no); + mutex_unlock(&instance->reset_mutex); + return FAILED; + } + status_reg = instance->instancet->read_fw_status_reg(instance); + abs_state = status_reg & MFI_STATE_MASK; + + /* IO timeout detected, forcibly put FW in FAULT state */ + if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && + instance->crash_dump_app_support && reason) { + dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " + "forcibly FAULT Firmware\n"); + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); + status_reg = megasas_readl(instance, &instance->reg_set->doorbell); + writel(status_reg | MFI_STATE_FORCE_OCR, + &instance->reg_set->doorbell); + readl(&instance->reg_set->doorbell); + mutex_unlock(&instance->reset_mutex); + do { + ssleep(3); + io_timeout_in_crash_mode++; + dev_dbg(&instance->pdev->dev, "waiting for [%d] " + "seconds for crash dump collection and OCR " + "to be done\n", (io_timeout_in_crash_mode * 3)); + } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && + (io_timeout_in_crash_mode < 80)); + + if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { + dev_info(&instance->pdev->dev, "OCR done for IO " + "timeout case\n"); + retval = SUCCESS; + } else { + dev_info(&instance->pdev->dev, "Controller is not " + "operational after 240 seconds wait for IO " + "timeout case in FW crash dump mode\n do " + "OCR/kill adapter\n"); + retval = megasas_reset_fusion(shost, 0); + } + return retval; + } + + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); + set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); + instance->instancet->disable_intr(instance); + megasas_sync_irqs((unsigned long)instance); + + /* First try waiting for commands to complete */ + if (megasas_wait_for_outstanding_fusion(instance, reason, + &convert)) { + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); + dev_warn(&instance->pdev->dev, "resetting fusion " + "adapter scsi%d.\n", instance->host->host_no); + if (convert) + reason = 0; + + if (megasas_dbg_lvl & OCR_DEBUG) + dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n"); + + /* Now return commands back to the OS */ + for (i = 0 ; i < instance->max_scsi_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + /*check for extra commands issued by driver*/ + if (instance->adapter_type >= VENTURA_SERIES) { + r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; + megasas_return_cmd_fusion(instance, r1_cmd); + } + scmd_local = cmd_fusion->scmd; + if (cmd_fusion->scmd) { + if (megasas_dbg_lvl & OCR_DEBUG) { + sdev_printk(KERN_INFO, + cmd_fusion->scmd->device, "SMID: 0x%x\n", + cmd_fusion->index); + megasas_dump_fusion_io(cmd_fusion->scmd); + } + + if (cmd_fusion->io_request->Function == + MPI2_FUNCTION_SCSI_IO_REQUEST) + fpio_count++; + + scmd_local->result = + megasas_check_mpio_paths(instance, + scmd_local); + if (instance->ldio_threshold && + megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + atomic_dec(&instance->ldio_outstanding); + megasas_return_cmd_fusion(instance, cmd_fusion); + scsi_dma_unmap(scmd_local); + scsi_done(scmd_local); + } + } + + dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n", + fpio_count); + + atomic_set(&instance->fw_outstanding, 0); + + status_reg = instance->instancet->read_fw_status_reg(instance); + abs_state = status_reg & MFI_STATE_MASK; + reset_adapter = status_reg & MFI_RESET_ADAPTER; + if (instance->disableOnlineCtrlReset || + (abs_state == MFI_STATE_FAULT && !reset_adapter)) { + /* Reset not supported, kill adapter */ + dev_warn(&instance->pdev->dev, "Reset not supported" + ", killing adapter scsi%d.\n", + instance->host->host_no); + goto kill_hba; + } + + /* Let SR-IOV VF & PF sync up if there was a HB failure */ + if (instance->requestorId && !reason) { + msleep(MEGASAS_OCR_SETTLE_TIME_VF); + do_adp_reset = false; + max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF; + } + + /* Now try to reset the chip */ + for (i = 0; i < max_reset_tries; i++) { + /* + * Do adp reset and wait for + * controller to transition to ready + */ + if (megasas_adp_reset_wait_for_ready(instance, + do_adp_reset, 1) == FAILED) + continue; + + /* Wait for FW to become ready */ + if (megasas_transition_to_ready(instance, 1)) { + dev_warn(&instance->pdev->dev, + "Failed to transition controller to ready for " + "scsi%d.\n", instance->host->host_no); + continue; + } + megasas_reset_reply_desc(instance); + megasas_fusion_update_can_queue(instance, OCR_CONTEXT); + + if (megasas_ioc_init_fusion(instance)) { + continue; + } + + if (megasas_get_ctrl_info(instance)) { + dev_info(&instance->pdev->dev, + "Failed from %s %d\n", + __func__, __LINE__); + goto kill_hba; + } + + megasas_refire_mgmt_cmd(instance, + (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1) + ? 1 : 0)); + + /* Reset load balance info */ + if (fusion->load_balance_info) + memset(fusion->load_balance_info, 0, + (sizeof(struct LD_LOAD_BALANCE_INFO) * + MAX_LOGICAL_DRIVES_EXT)); + + if (!megasas_get_map_info(instance)) { + megasas_sync_map_info(instance); + } else { + /* + * Return pending polled mode cmds before + * retrying OCR + */ + megasas_return_polled_cmds(instance); + continue; + } + + megasas_setup_jbod_map(instance); + + /* reset stream detection array */ + if (instance->adapter_type >= VENTURA_SERIES) { + for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { + memset(fusion->stream_detect_by_ld[j], + 0, sizeof(struct LD_STREAM_DETECT)); + fusion->stream_detect_by_ld[j]->mru_bit_map + = MR_STREAM_BITMAP; + } + } + + clear_bit(MEGASAS_FUSION_IN_RESET, + &instance->reset_flags); + instance->instancet->enable_intr(instance); + megasas_enable_irq_poll(instance); + shost_for_each_device(sdev, shost) { + if ((instance->tgt_prop) && + (instance->nvme_page_size)) + ret_target_prop = megasas_get_target_prop(instance, sdev); + + is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; + megasas_set_dynamic_target_properties(sdev, is_target_prop); + } + + status_reg = instance->instancet->read_fw_status_reg + (instance); + abs_state = status_reg & MFI_STATE_MASK; + if (abs_state != MFI_STATE_OPERATIONAL) { + dev_info(&instance->pdev->dev, + "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n", + abs_state, instance->host->host_no); + goto out; + } + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); + + dev_info(&instance->pdev->dev, + "Adapter is OPERATIONAL for scsi:%d\n", + instance->host->host_no); + + /* Restart SR-IOV heartbeat */ + if (instance->requestorId) { + if (!megasas_sriov_start_heartbeat(instance, 0)) + megasas_start_timer(instance); + else + instance->skip_heartbeat_timer_del = 1; + } + + if (instance->crash_dump_drv_support && + instance->crash_dump_app_support) + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_ON); + else + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_OFF); + + if (instance->snapdump_wait_time) { + megasas_get_snapdump_properties(instance); + dev_info(&instance->pdev->dev, + "Snap dump wait time\t: %d\n", + instance->snapdump_wait_time); + } + + retval = SUCCESS; + + /* Adapter reset completed successfully */ + dev_warn(&instance->pdev->dev, + "Reset successful for scsi%d.\n", + instance->host->host_no); + + goto out; + } + /* Reset failed, kill the adapter */ + dev_warn(&instance->pdev->dev, "Reset failed, killing " + "adapter scsi%d.\n", instance->host->host_no); + goto kill_hba; + } else { + /* For VF: Restart HB timer if we didn't OCR */ + if (instance->requestorId) { + megasas_start_timer(instance); + } + clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + instance->instancet->enable_intr(instance); + megasas_enable_irq_poll(instance); + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); + goto out; + } +kill_hba: + megaraid_sas_kill_hba(instance); + megasas_enable_irq_poll(instance); + instance->skip_heartbeat_timer_del = 1; + retval = FAILED; +out: + clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); + mutex_unlock(&instance->reset_mutex); + return retval; +} + +/* Fusion Crash dump collection */ +static void megasas_fusion_crash_dump(struct megasas_instance *instance) +{ + u32 status_reg; + u8 partial_copy = 0; + int wait = 0; + + + status_reg = instance->instancet->read_fw_status_reg(instance); + + /* + * Allocate host crash buffers to copy data from 1 MB DMA crash buffer + * to host crash buffers + */ + if (instance->drv_buf_index == 0) { + /* Buffer is already allocated for old Crash dump. + * Do OCR and do not wait for crash dump collection + */ + if (instance->drv_buf_alloc) { + dev_info(&instance->pdev->dev, "earlier crash dump is " + "not yet copied by application, ignoring this " + "crash dump and initiating OCR\n"); + status_reg |= MFI_STATE_CRASH_DUMP_DONE; + writel(status_reg, + &instance->reg_set->outbound_scratch_pad_0); + readl(&instance->reg_set->outbound_scratch_pad_0); + return; + } + megasas_alloc_host_crash_buffer(instance); + dev_info(&instance->pdev->dev, "Number of host crash buffers " + "allocated: %d\n", instance->drv_buf_alloc); + } + + while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) && + (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) { + if (!(status_reg & MFI_STATE_DMADONE)) { + /* + * Next crash dump buffer is not yet DMA'd by FW + * Check after 10ms. Wait for 1 second for FW to + * post the next buffer. If not bail out. + */ + wait++; + msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); + status_reg = instance->instancet->read_fw_status_reg( + instance); + continue; + } + + wait = 0; + if (instance->drv_buf_index >= instance->drv_buf_alloc) { + dev_info(&instance->pdev->dev, + "Driver is done copying the buffer: %d\n", + instance->drv_buf_alloc); + status_reg |= MFI_STATE_CRASH_DUMP_DONE; + partial_copy = 1; + break; + } else { + memcpy(instance->crash_buf[instance->drv_buf_index], + instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); + instance->drv_buf_index++; + status_reg &= ~MFI_STATE_DMADONE; + } + + writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); + readl(&instance->reg_set->outbound_scratch_pad_0); + + msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); + status_reg = instance->instancet->read_fw_status_reg(instance); + } + + if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { + dev_info(&instance->pdev->dev, "Crash Dump is available,number " + "of copied buffers: %d\n", instance->drv_buf_index); + instance->fw_crash_buffer_size = instance->drv_buf_index; + instance->fw_crash_state = AVAILABLE; + instance->drv_buf_index = 0; + writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); + readl(&instance->reg_set->outbound_scratch_pad_0); + if (!partial_copy) + megasas_reset_fusion(instance->host, 0); + } +} + + +/* Fusion OCR work queue */ +void megasas_fusion_ocr_wq(struct work_struct *work) +{ + struct megasas_instance *instance = + container_of(work, struct megasas_instance, work_init); + + megasas_reset_fusion(instance->host, 0); +} + +/* Allocate fusion context */ +int +megasas_alloc_fusion_context(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + + instance->ctrl_context = kzalloc(sizeof(struct fusion_context), + GFP_KERNEL); + if (!instance->ctrl_context) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return -ENOMEM; + } + + fusion = instance->ctrl_context; + + fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT * + sizeof(LD_SPAN_INFO)); + fusion->log_to_span = + (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + fusion->log_to_span_pages); + if (!fusion->log_to_span) { + fusion->log_to_span = + vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, + sizeof(LD_SPAN_INFO))); + if (!fusion->log_to_span) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return -ENOMEM; + } + } + + fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * + sizeof(struct LD_LOAD_BALANCE_INFO)); + fusion->load_balance_info = + (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + fusion->load_balance_info_pages); + if (!fusion->load_balance_info) { + fusion->load_balance_info = + vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, + sizeof(struct LD_LOAD_BALANCE_INFO))); + if (!fusion->load_balance_info) + dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, " + "continuing without Load Balance support\n"); + } + + return 0; +} + +void +megasas_free_fusion_context(struct megasas_instance *instance) +{ + struct fusion_context *fusion = instance->ctrl_context; + + if (fusion) { + if (fusion->load_balance_info) { + if (is_vmalloc_addr(fusion->load_balance_info)) + vfree(fusion->load_balance_info); + else + free_pages((ulong)fusion->load_balance_info, + fusion->load_balance_info_pages); + } + + if (fusion->log_to_span) { + if (is_vmalloc_addr(fusion->log_to_span)) + vfree(fusion->log_to_span); + else + free_pages((ulong)fusion->log_to_span, + fusion->log_to_span_pages); + } + + kfree(fusion); + } +} + +struct megasas_instance_template megasas_instance_template_fusion = { + .enable_intr = megasas_enable_intr_fusion, + .disable_intr = megasas_disable_intr_fusion, + .clear_intr = megasas_clear_intr_fusion, + .read_fw_status_reg = megasas_read_fw_status_reg_fusion, + .adp_reset = megasas_adp_reset_fusion, + .check_reset = megasas_check_reset_fusion, + .service_isr = megasas_isr_fusion, + .tasklet = megasas_complete_cmd_dpc_fusion, + .init_adapter = megasas_init_adapter_fusion, + .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, + .issue_dcmd = megasas_issue_dcmd_fusion, +}; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h new file mode 100644 index 000000000..b677d80e5 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -0,0 +1,1396 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2016 Avago Technologies + * Copyright (c) 2016-2018 Broadcom Inc. + * + * FILE: megaraid_sas_fusion.h + * + * Authors: Broadcom Inc. + * Manoj Jose + * Sumant Patro + * Kashyap Desai + * Sumit Saxena + * + * Send feedback to: megaraidlinux.pdl@broadcom.com + */ + +#ifndef _MEGARAID_SAS_FUSION_H_ +#define _MEGARAID_SAS_FUSION_H_ + +/* Fusion defines */ +#define MEGASAS_CHAIN_FRAME_SZ_MIN 1024 +#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009) +#define MEGASAS_MAX_CHAIN_SHIFT 5 +#define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000 +#define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0 +#define MEGASAS_256K_IO 128 +#define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4) +#define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256 +#define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0 +#define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1 +#define MEGASAS_LOAD_BALANCE_FLAG 0x1 +#define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1 +#define HOST_DIAG_WRITE_ENABLE 0x80 +#define HOST_DIAG_RESET_ADAPTER 0x4 +#define MEGASAS_FUSION_MAX_RESET_TRIES 3 +#define MAX_MSIX_QUEUES_FUSION 128 +#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16 +#define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK) + +/* Invader defines */ +#define MPI2_TYPE_CUDA 0x2 +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000 +#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00 +#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 +#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 +#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 +#define MR_RL_WRITE_THROUGH_MODE 0x00 +#define MR_RL_WRITE_BACK_MODE 0x01 + +/* T10 PI defines */ +#define MR_PROT_INFO_TYPE_CONTROLLER 0x8 +#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f +#define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9 +#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB +#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18 +#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20 +#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60 + +#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) + +/* + * Raid context flags + */ + +#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4 +#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30 +enum MR_RAID_FLAGS_IO_SUB_TYPE { + MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, + MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4, + MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6, + MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7, + MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD = 8 +}; + +/* + * Request descriptor types + */ +#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7 +#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1 +#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2 +#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1 + +#define MEGASAS_FP_CMD_LEN 16 +#define MEGASAS_FUSION_IN_RESET 0 +#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1 +#define RAID_1_PEER_CMDS 2 +#define JBOD_MAPS_COUNT 2 +#define MEGASAS_REDUCE_QD_COUNT 64 +#define IOC_INIT_FRAME_SIZE 4096 + +/* + * Raid Context structure which describes MegaRAID specific IO Parameters + * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames + */ + +struct RAID_CONTEXT { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 nseg:4; + u8 type:4; +#else + u8 type:4; + u8 nseg:4; +#endif + u8 resvd0; + __le16 timeout_value; + u8 reg_lock_flags; + u8 resvd1; + __le16 virtual_disk_tgt_id; + __le64 reg_lock_row_lba; + __le32 reg_lock_length; + __le16 next_lmid; + u8 ex_status; + u8 status; + u8 raid_flags; + u8 num_sge; + __le16 config_seq_num; + u8 span_arm; + u8 priority; + u8 num_sge_ext; + u8 resvd2; +}; + +/* + * Raid Context structure which describes ventura MegaRAID specific + * IO Paramenters ,This resides at offset 0x60 where the SGL normally + * starts in MPT IO Frames + */ +struct RAID_CONTEXT_G35 { + #define RAID_CONTEXT_NSEG_MASK 0x00F0 + #define RAID_CONTEXT_NSEG_SHIFT 4 + #define RAID_CONTEXT_TYPE_MASK 0x000F + #define RAID_CONTEXT_TYPE_SHIFT 0 + u16 nseg_type; + u16 timeout_value; /* 0x02 -0x03 */ + u16 routing_flags; // 0x04 -0x05 routing flags + u16 virtual_disk_tgt_id; /* 0x06 -0x07 */ + __le64 reg_lock_row_lba; /* 0x08 - 0x0F */ + u32 reg_lock_length; /* 0x10 - 0x13 */ + union { // flow specific + u16 rmw_op_index; /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/ + u16 peer_smid; /* 0x14 - 0x15, R1 Write: peer smid*/ + u16 r56_arm_map; /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */ + + } flow_specific; + + u8 ex_status; /* 0x16 : OUT */ + u8 status; /* 0x17 status */ + u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4], + * resvd[3:1], preferredCpu[0] + */ + u8 span_arm; /* 0x1C span[7:5], arm[4:0] */ + u16 config_seq_num; /* 0x1A -0x1B */ + union { + /* + * Bit format: + * --------------------------------- + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * --------------------------------- + * Byte0 | numSGE[7]- numSGE[0] | + * --------------------------------- + * Byte1 |SD | resvd | numSGE 8-11 | + * -------------------------------- + */ + #define NUM_SGE_MASK_LOWER 0xFF + #define NUM_SGE_MASK_UPPER 0x0F + #define NUM_SGE_SHIFT_UPPER 8 + #define STREAM_DETECT_SHIFT 7 + #define STREAM_DETECT_MASK 0x80 + struct { +#if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */ + u16 stream_detected:1; + u16 reserved:3; + u16 num_sge:12; +#else + u16 num_sge:12; + u16 reserved:3; + u16 stream_detected:1; +#endif + } bits; + u8 bytes[2]; + } u; + u8 resvd2[2]; /* 0x1E-0x1F */ +}; + +#define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1 +#define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2 +#define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3 +#define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4 +#define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5 +#define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6 +#define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7 +#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8 +#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00 +#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12 +#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000 + +static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35, + u16 sge_count) +{ + rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER); + rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER) + & NUM_SGE_MASK_UPPER); +} + +static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35) +{ + u16 sge_count; + + sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER) + << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0])); + return sge_count; +} + +#define SET_STREAM_DETECTED(rctx_g35) \ + (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK) + +#define CLEAR_STREAM_DETECTED(rctx_g35) \ + (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK)) + +static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35) +{ + return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK)); +} + +union RAID_CONTEXT_UNION { + struct RAID_CONTEXT raid_context; + struct RAID_CONTEXT_G35 raid_context_g35; +}; + +#define RAID_CTX_SPANARM_ARM_SHIFT (0) +#define RAID_CTX_SPANARM_ARM_MASK (0x1f) + +#define RAID_CTX_SPANARM_SPAN_SHIFT (5) +#define RAID_CTX_SPANARM_SPAN_MASK (0xE0) + +/* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */ +#define RAID_CTX_R56_Q_ARM_MASK (0x1F) +#define RAID_CTX_R56_P_ARM_SHIFT (5) +#define RAID_CTX_R56_P_ARM_MASK (0x3E0) +#define RAID_CTX_R56_LOG_ARM_SHIFT (10) +#define RAID_CTX_R56_LOG_ARM_MASK (0x7C00) + +/* number of bits per index in U32 TrackStream */ +#define BITS_PER_INDEX_STREAM 4 +#define INVALID_STREAM_NUM 16 +#define MR_STREAM_BITMAP 0x76543210 +#define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1) +#define ZERO_LAST_STREAM 0x0fffffff +#define MAX_STREAMS_TRACKED 8 + +/* + * define region lock types + */ +enum REGION_TYPE { + REGION_TYPE_UNUSED = 0, + REGION_TYPE_SHARED_READ = 1, + REGION_TYPE_SHARED_WRITE = 2, + REGION_TYPE_EXCLUSIVE = 3, +}; + +/* MPI2 defines */ +#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ +#define MPI2_WHOINIT_HOST_DRIVER (0x04) +#define MPI2_VERSION_MAJOR (0x02) +#define MPI2_VERSION_MINOR (0x00) +#define MPI2_VERSION_MAJOR_MASK (0xFF00) +#define MPI2_VERSION_MAJOR_SHIFT (8) +#define MPI2_VERSION_MINOR_MASK (0x00FF) +#define MPI2_VERSION_MINOR_SHIFT (0) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) +#define MPI2_HEADER_VERSION_UNIT (0x10) +#define MPI2_HEADER_VERSION_DEV (0x00) +#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) +#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) +#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) +#define MPI2_HEADER_VERSION_DEV_SHIFT (0) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \ + MPI2_HEADER_VERSION_DEV) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) +#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +/* EEDP escape mode */ +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03) +#define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) +#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI2_SCSIIO_CONTROL_READ (0x02000000) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) +#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) +#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) +#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) +#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) +#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) +#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) +#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) +#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) +#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) + +struct MPI25_IEEE_SGE_CHAIN64 { + __le64 Address; + __le32 Length; + __le16 Reserved1; + u8 NextChainOffset; + u8 Flags; +}; + +struct MPI2_SGE_SIMPLE_UNION { + __le32 FlagsLength; + union { + __le32 Address32; + __le64 Address64; + } u; +}; + +struct MPI2_SCSI_IO_CDB_EEDP32 { + u8 CDB[20]; /* 0x00 */ + __be32 PrimaryReferenceTag; /* 0x14 */ + __be16 PrimaryApplicationTag; /* 0x18 */ + __be16 PrimaryApplicationTagMask; /* 0x1A */ + __le32 TransferLength; /* 0x1C */ +}; + +struct MPI2_SGE_CHAIN_UNION { + __le16 Length; + u8 NextChainOffset; + u8 Flags; + union { + __le32 Address32; + __le64 Address64; + } u; +}; + +struct MPI2_IEEE_SGE_SIMPLE32 { + __le32 Address; + __le32 FlagsLength; +}; + +struct MPI2_IEEE_SGE_CHAIN32 { + __le32 Address; + __le32 FlagsLength; +}; + +struct MPI2_IEEE_SGE_SIMPLE64 { + __le64 Address; + __le32 Length; + __le16 Reserved1; + u8 Reserved2; + u8 Flags; +}; + +struct MPI2_IEEE_SGE_CHAIN64 { + __le64 Address; + __le32 Length; + __le16 Reserved1; + u8 Reserved2; + u8 Flags; +}; + +union MPI2_IEEE_SGE_SIMPLE_UNION { + struct MPI2_IEEE_SGE_SIMPLE32 Simple32; + struct MPI2_IEEE_SGE_SIMPLE64 Simple64; +}; + +union MPI2_IEEE_SGE_CHAIN_UNION { + struct MPI2_IEEE_SGE_CHAIN32 Chain32; + struct MPI2_IEEE_SGE_CHAIN64 Chain64; +}; + +union MPI2_SGE_IO_UNION { + struct MPI2_SGE_SIMPLE_UNION MpiSimple; + struct MPI2_SGE_CHAIN_UNION MpiChain; + union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +}; + +union MPI2_SCSI_IO_CDB_UNION { + u8 CDB32[32]; + struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + struct MPI2_SGE_SIMPLE_UNION SGE; +}; + +/**************************************************************************** +* SCSI Task Management messages +****************************************************************************/ + +/*SCSI Task Management Request Message */ +struct MPI2_SCSI_TASK_MANAGE_REQUEST { + u16 DevHandle; /*0x00 */ + u8 ChainOffset; /*0x02 */ + u8 Function; /*0x03 */ + u8 Reserved1; /*0x04 */ + u8 TaskType; /*0x05 */ + u8 Reserved2; /*0x06 */ + u8 MsgFlags; /*0x07 */ + u8 VP_ID; /*0x08 */ + u8 VF_ID; /*0x09 */ + u16 Reserved3; /*0x0A */ + u8 LUN[8]; /*0x0C */ + u32 Reserved4[7]; /*0x14 */ + u16 TaskMID; /*0x30 */ + u16 Reserved5; /*0x32 */ +}; + + +/*SCSI Task Management Reply Message */ +struct MPI2_SCSI_TASK_MANAGE_REPLY { + u16 DevHandle; /*0x00 */ + u8 MsgLength; /*0x02 */ + u8 Function; /*0x03 */ + u8 ResponseCode; /*0x04 */ + u8 TaskType; /*0x05 */ + u8 Reserved1; /*0x06 */ + u8 MsgFlags; /*0x07 */ + u8 VP_ID; /*0x08 */ + u8 VF_ID; /*0x09 */ + u16 Reserved2; /*0x0A */ + u16 Reserved3; /*0x0C */ + u16 IOCStatus; /*0x0E */ + u32 IOCLogInfo; /*0x10 */ + u32 TerminationCount; /*0x14 */ + u32 ResponseInfo; /*0x18 */ +}; + +struct MR_TM_REQUEST { + char request[128]; +}; + +struct MR_TM_REPLY { + char reply[128]; +}; + +/* SCSI Task Management Request Message */ +struct MR_TASK_MANAGE_REQUEST { + /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */ + struct MR_TM_REQUEST TmRequest; + union { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved1:30; + u32 isTMForPD:1; + u32 isTMForLD:1; +#else + u32 isTMForLD:1; + u32 isTMForPD:1; + u32 reserved1:30; +#endif + u32 reserved2; + } tmReqFlags; + struct MR_TM_REPLY TMReply; + }; +}; + +/* TaskType values */ + +#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) + +/* ResponseCode values */ + +#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) +#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +/* + * RAID SCSI IO Request Message + * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST + */ +struct MPI2_RAID_SCSI_IO_REQUEST { + __le16 DevHandle; /* 0x00 */ + u8 ChainOffset; /* 0x02 */ + u8 Function; /* 0x03 */ + __le16 Reserved1; /* 0x04 */ + u8 Reserved2; /* 0x06 */ + u8 MsgFlags; /* 0x07 */ + u8 VP_ID; /* 0x08 */ + u8 VF_ID; /* 0x09 */ + __le16 Reserved3; /* 0x0A */ + __le32 SenseBufferLowAddress; /* 0x0C */ + __le16 SGLFlags; /* 0x10 */ + u8 SenseBufferLength; /* 0x12 */ + u8 Reserved4; /* 0x13 */ + u8 SGLOffset0; /* 0x14 */ + u8 SGLOffset1; /* 0x15 */ + u8 SGLOffset2; /* 0x16 */ + u8 SGLOffset3; /* 0x17 */ + __le32 SkipCount; /* 0x18 */ + __le32 DataLength; /* 0x1C */ + __le32 BidirectionalDataLength; /* 0x20 */ + __le16 IoFlags; /* 0x24 */ + __le16 EEDPFlags; /* 0x26 */ + __le32 EEDPBlockSize; /* 0x28 */ + __le32 SecondaryReferenceTag; /* 0x2C */ + __le16 SecondaryApplicationTag; /* 0x30 */ + __le16 ApplicationTagTranslationMask; /* 0x32 */ + u8 LUN[8]; /* 0x34 */ + __le32 Control; /* 0x3C */ + union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ + union RAID_CONTEXT_UNION RaidContext; /* 0x60 */ + union { + union MPI2_SGE_IO_UNION SGL; /* 0x80 */ + DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs); + }; +}; + +/* + * MPT RAID MFA IO Descriptor. + */ +struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { + u32 RequestFlags:8; + u32 MessageAddress1:24; + u32 MessageAddress2; +}; + +/* Default Request Descriptor */ +struct MPI2_DEFAULT_REQUEST_DESCRIPTOR { + u8 RequestFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 DescriptorTypeDependent; /* 0x06 */ +}; + +/* High Priority Request Descriptor */ +struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + u8 RequestFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 Reserved1; /* 0x06 */ +}; + +/* SCSI IO Request Descriptor */ +struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR { + u8 RequestFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 DevHandle; /* 0x06 */ +}; + +/* SCSI Target Request Descriptor */ +struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { + u8 RequestFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 IoIndex; /* 0x06 */ +}; + +/* RAID Accelerator Request Descriptor */ +struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { + u8 RequestFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 Reserved; /* 0x06 */ +}; + +/* union of Request Descriptors */ +union MEGASAS_REQUEST_DESCRIPTOR_UNION { + struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo; + union { + struct { + __le32 low; + __le32 high; + } u; + __le64 Words; + }; +}; + +/* Default Reply Descriptor */ +struct MPI2_DEFAULT_REPLY_DESCRIPTOR { + u8 ReplyFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 DescriptorTypeDependent1; /* 0x02 */ + __le32 DescriptorTypeDependent2; /* 0x04 */ +}; + +/* Address Reply Descriptor */ +struct MPI2_ADDRESS_REPLY_DESCRIPTOR { + u8 ReplyFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le32 ReplyFrameAddress; /* 0x04 */ +}; + +/* SCSI IO Success Reply Descriptor */ +struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + u8 ReplyFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le16 TaskTag; /* 0x04 */ + __le16 Reserved1; /* 0x06 */ +}; + +/* TargetAssist Success Reply Descriptor */ +struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { + u8 ReplyFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + u8 SequenceNumber; /* 0x04 */ + u8 Reserved1; /* 0x05 */ + __le16 IoIndex; /* 0x06 */ +}; + +/* Target Command Buffer Reply Descriptor */ +struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { + u8 ReplyFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + u8 VP_ID; /* 0x02 */ + u8 Flags; /* 0x03 */ + __le16 InitiatorDevHandle; /* 0x04 */ + __le16 IoIndex; /* 0x06 */ +}; + +/* RAID Accelerator Success Reply Descriptor */ +struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { + u8 ReplyFlags; /* 0x00 */ + u8 MSIxIndex; /* 0x01 */ + __le16 SMID; /* 0x02 */ + __le32 Reserved; /* 0x04 */ +}; + +/* union of Reply Descriptors */ +union MPI2_REPLY_DESCRIPTORS_UNION { + struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR + RAIDAcceleratorSuccess; + __le64 Words; +}; + +/* IOCInit Request message */ +struct MPI2_IOC_INIT_REQUEST { + u8 WhoInit; /* 0x00 */ + u8 Reserved1; /* 0x01 */ + u8 ChainOffset; /* 0x02 */ + u8 Function; /* 0x03 */ + __le16 Reserved2; /* 0x04 */ + u8 Reserved3; /* 0x06 */ + u8 MsgFlags; /* 0x07 */ + u8 VP_ID; /* 0x08 */ + u8 VF_ID; /* 0x09 */ + __le16 Reserved4; /* 0x0A */ + __le16 MsgVersion; /* 0x0C */ + __le16 HeaderVersion; /* 0x0E */ + u32 Reserved5; /* 0x10 */ + __le16 Reserved6; /* 0x14 */ + u8 HostPageSize; /* 0x16 */ + u8 HostMSIxVectors; /* 0x17 */ + __le16 Reserved8; /* 0x18 */ + __le16 SystemRequestFrameSize; /* 0x1A */ + __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */ + __le16 ReplyFreeQueueDepth; /* 0x1E */ + __le32 SenseBufferAddressHigh; /* 0x20 */ + __le32 SystemReplyAddressHigh; /* 0x24 */ + __le64 SystemRequestFrameBaseAddress; /* 0x28 */ + __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */ + __le64 ReplyFreeQueueAddress; /* 0x38 */ + __le64 TimeStamp; /* 0x40 */ +}; + +/* mrpriv defines */ +#define MR_PD_INVALID 0xFFFF +#define MR_DEVHANDLE_INVALID 0xFFFF +#define MAX_SPAN_DEPTH 8 +#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH +#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) +#define MAX_ROW_SIZE 32 +#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) +#define MAX_LOGICAL_DRIVES 64 +#define MAX_LOGICAL_DRIVES_EXT 256 +#define MAX_LOGICAL_DRIVES_DYN 512 +#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) +#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) +#define MAX_ARRAYS 128 +#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) +#define MAX_ARRAYS_EXT 256 +#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT) +#define MAX_API_ARRAYS_DYN 512 +#define MAX_PHYSICAL_DEVICES 256 +#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) +#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512 +#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 +#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 +#define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103 +#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/ +#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200 +#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 +#define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100 +#define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600 + +struct MR_DEV_HANDLE_INFO { + __le16 curDevHdl; + u8 validHandles; + u8 interfaceType; + __le16 devHandle[2]; +}; + +struct MR_ARRAY_INFO { + __le16 pd[MAX_RAIDMAP_ROW_SIZE]; +}; + +struct MR_QUAD_ELEMENT { + __le64 logStart; + __le64 logEnd; + __le64 offsetInSpan; + __le32 diff; + __le32 reserved1; +}; + +struct MR_SPAN_INFO { + __le32 noElements; + __le32 reserved1; + struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; +}; + +struct MR_LD_SPAN { + __le64 startBlk; + __le64 numBlks; + __le16 arrayRef; + u8 spanRowSize; + u8 spanRowDataSize; + u8 reserved[4]; +}; + +struct MR_SPAN_BLOCK_INFO { + __le64 num_rows; + struct MR_LD_SPAN span; + struct MR_SPAN_INFO block_span_info; +}; + +#define MR_RAID_CTX_CPUSEL_0 0 +#define MR_RAID_CTX_CPUSEL_1 1 +#define MR_RAID_CTX_CPUSEL_2 2 +#define MR_RAID_CTX_CPUSEL_3 3 +#define MR_RAID_CTX_CPUSEL_FCFS 0xF + +struct MR_CPU_AFFINITY_MASK { + union { + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u8 hw_path:1; + u8 cpu0:1; + u8 cpu1:1; + u8 cpu2:1; + u8 cpu3:1; + u8 reserved:3; +#else + u8 reserved:3; + u8 cpu3:1; + u8 cpu2:1; + u8 cpu1:1; + u8 cpu0:1; + u8 hw_path:1; +#endif + }; + u8 core_mask; + }; +}; + +struct MR_IO_AFFINITY { + union { + struct { + struct MR_CPU_AFFINITY_MASK pdRead; + struct MR_CPU_AFFINITY_MASK pdWrite; + struct MR_CPU_AFFINITY_MASK ldRead; + struct MR_CPU_AFFINITY_MASK ldWrite; + }; + u32 word; + }; + u8 maxCores; /* Total cores + HW Path in ROC */ + u8 reserved[3]; +}; + +struct MR_LD_RAID { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved4:2; + u32 fp_cache_bypass_capable:1; + u32 fp_rmw_capable:1; + u32 disable_coalescing:1; + u32 fpBypassRegionLock:1; + u32 tmCapable:1; + u32 fpNonRWCapable:1; + u32 fpReadAcrossStripe:1; + u32 fpWriteAcrossStripe:1; + u32 fpReadCapable:1; + u32 fpWriteCapable:1; + u32 encryptionType:8; + u32 pdPiMode:4; + u32 ldPiMode:4; + u32 reserved5:2; + u32 ra_capable:1; + u32 fpCapable:1; +#else + u32 fpCapable:1; + u32 ra_capable:1; + u32 reserved5:2; + u32 ldPiMode:4; + u32 pdPiMode:4; + u32 encryptionType:8; + u32 fpWriteCapable:1; + u32 fpReadCapable:1; + u32 fpWriteAcrossStripe:1; + u32 fpReadAcrossStripe:1; + u32 fpNonRWCapable:1; + u32 tmCapable:1; + u32 fpBypassRegionLock:1; + u32 disable_coalescing:1; + u32 fp_rmw_capable:1; + u32 fp_cache_bypass_capable:1; + u32 reserved4:2; +#endif + } capability; + __le32 reserved6; + __le64 size; + u8 spanDepth; + u8 level; + u8 stripeShift; + u8 rowSize; + u8 rowDataSize; + u8 writeMode; + u8 PRL; + u8 SRL; + __le16 targetId; + u8 ldState; + u8 regTypeReqOnWrite; + u8 modFactor; + u8 regTypeReqOnRead; + __le16 seqNum; + +struct { +#ifndef __BIG_ENDIAN_BITFIELD + u32 ldSyncRequired:1; + u32 regTypeReqOnReadIsValid:1; + u32 isEPD:1; + u32 enableSLDOnAllRWIOs:1; + u32 reserved:28; +#else + u32 reserved:28; + u32 enableSLDOnAllRWIOs:1; + u32 isEPD:1; + u32 regTypeReqOnReadIsValid:1; + u32 ldSyncRequired:1; +#endif + } flags; + + u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ + u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ + /* Ox2D This LD accept priority boost of this type */ + u8 ld_accept_priority_type; + u8 reserved2[2]; /* 0x2E - 0x2F */ + /* 0x30 - 0x33, Logical block size for the LD */ + u32 logical_block_length; + struct { +#ifndef __BIG_ENDIAN_BITFIELD + /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */ + u32 ld_pi_exp:4; + /* 0x34, LOGICAL BLOCKS PER PHYSICAL + * BLOCK EXPONENT from READ CAPACITY 16 + */ + u32 ld_logical_block_exp:4; + u32 reserved1:24; /* 0x34 */ +#else + u32 reserved1:24; /* 0x34 */ + /* 0x34, LOGICAL BLOCKS PER PHYSICAL + * BLOCK EXPONENT from READ CAPACITY 16 + */ + u32 ld_logical_block_exp:4; + /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */ + u32 ld_pi_exp:4; +#endif + }; /* 0x34 - 0x37 */ + /* 0x38 - 0x3f, This will determine which + * core will process LD IO and PD IO. + */ + struct MR_IO_AFFINITY cpuAffinity; + /* Bit definiations are specified by MR_IO_AFFINITY */ + u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */ +}; + +struct MR_LD_SPAN_MAP { + struct MR_LD_RAID ldRaid; + u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE]; + struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; +}; + +struct MR_FW_RAID_MAP { + __le32 totalSize; + union { + struct { + __le32 maxLd; + __le32 maxSpanDepth; + __le32 maxRowSize; + __le32 maxPdCount; + __le32 maxArrays; + } validationInfo; + __le32 version[5]; + }; + + __le32 ldCount; + __le32 Reserved1; + u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+ + MAX_RAIDMAP_VIEWS]; + u8 fpPdIoTimeoutSec; + u8 reserved2[7]; + struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; + struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; + struct MR_LD_SPAN_MAP ldSpanMap[]; +}; + +struct IO_REQUEST_INFO { + u64 ldStartBlock; + u32 numBlocks; + u16 ldTgtId; + u8 isRead; + __le16 devHandle; + u8 pd_interface; + u64 pdBlock; + u8 fpOkForIo; + u8 IoforUnevenSpan; + u8 start_span; + u8 do_fp_rlbypass; + u64 start_row; + u8 span_arm; /* span[7:5], arm[4:0] */ + u8 pd_after_lb; + u16 r1_alt_dev_handle; /* raid 1/10 only */ + bool ra_capable; + u8 data_arms; +}; + +struct MR_LD_TARGET_SYNC { + u8 targetId; + u8 reserved; + __le16 seqNum; +}; + +/* + * RAID Map descriptor Types. + * Each element should uniquely idetify one data structure in the RAID map + */ +enum MR_RAID_MAP_DESC_TYPE { + /* MR_DEV_HANDLE_INFO data */ + RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0, + /* target to Ld num Index map */ + RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1, + /* MR_ARRAY_INFO data */ + RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2, + /* MR_LD_SPAN_MAP data */ + RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3, + RAID_MAP_DESC_TYPE_COUNT, +}; + +/* + * This table defines the offset, size and num elements of each descriptor + * type in the RAID Map buffer + */ +struct MR_RAID_MAP_DESC_TABLE { + /* Raid map descriptor type */ + u32 raid_map_desc_type; + /* Offset into the RAID map buffer where + * descriptor data is saved + */ + u32 raid_map_desc_offset; + /* total size of the + * descriptor buffer + */ + u32 raid_map_desc_buffer_size; + /* Number of elements contained in the + * descriptor buffer + */ + u32 raid_map_desc_elements; +}; + +/* + * Dynamic Raid Map Structure. + */ +struct MR_FW_RAID_MAP_DYNAMIC { + u32 raid_map_size; /* total size of RAID Map structure */ + u32 desc_table_offset;/* Offset of desc table into RAID map*/ + u32 desc_table_size; /* Total Size of desc table */ + /* Total Number of elements in the desc table */ + u32 desc_table_num_elements; + u64 reserved1; + u32 reserved2[3]; /*future use */ + /* timeout value used by driver in FP IOs */ + u8 fp_pd_io_timeout_sec; + u8 reserved3[3]; + /* when this seqNum increments, driver needs to + * release RMW buffers asap + */ + u32 rmw_fp_seq_num; + u16 ld_count; /* count of lds. */ + u16 ar_count; /* count of arrays */ + u16 span_count; /* count of spans */ + u16 reserved4[3]; +/* + * The below structure of pointers is only to be used by the driver. + * This is added in the ,API to reduce the amount of code changes + * needed in the driver to support dynamic RAID map Firmware should + * not update these pointers while preparing the raid map + */ + union { + struct { + struct MR_DEV_HANDLE_INFO *dev_hndl_info; + u16 *ld_tgt_id_to_ld; + struct MR_ARRAY_INFO *ar_map_info; + struct MR_LD_SPAN_MAP *ld_span_map; + }; + u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT]; + }; +/* + * RAID Map descriptor table defines the layout of data in the RAID Map. + * The size of the descriptor table itself could change. + */ + /* Variable Size descriptor Table. */ + struct MR_RAID_MAP_DESC_TABLE + raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT]; + /* Variable Size buffer containing all data */ + u32 raid_map_desc_data[]; +}; /* Dynamicaly sized RAID MAp structure */ + +#define IEEE_SGE_FLAGS_ADDR_MASK (0x03) +#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) +#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) +#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) +#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) +#define IEEE_SGE_FLAGS_END_OF_LIST (0x40) + +#define MPI2_SGE_FLAGS_SHIFT (0x02) +#define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0) +#define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00) +#define IEEE_SGE_FLAGS_FORMAT_NVME (0x02) + +#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) +#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) + +#define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15 +#define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60 + +struct megasas_register_set; +struct megasas_instance; + +union desc_word { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +struct megasas_cmd_fusion { + struct MPI2_RAID_SCSI_IO_REQUEST *io_request; + dma_addr_t io_request_phys_addr; + + union MPI2_SGE_IO_UNION *sg_frame; + dma_addr_t sg_frame_phys_addr; + + u8 *sense; + dma_addr_t sense_phys_addr; + + struct list_head list; + struct scsi_cmnd *scmd; + struct megasas_instance *instance; + + u8 retry_for_fw_reset; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc; + + /* + * Context for a MFI frame. + * Used to get the mfi cmd from list when a MFI cmd is completed + */ + u32 sync_cmd_idx; + u32 index; + u8 pd_r1_lb; + struct completion done; + u8 pd_interface; + u16 r1_alt_dev_handle; /* raid 1/10 only*/ + bool cmd_completed; /* raid 1/10 fp writes status holder */ + +}; + +struct LD_LOAD_BALANCE_INFO { + u8 loadBalanceFlag; + u8 reserved1; + atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES]; + u64 last_accessed_block[MAX_PHYSICAL_DEVICES]; +}; + +/* SPAN_SET is info caclulated from span info from Raid map per LD */ +typedef struct _LD_SPAN_SET { + u64 log_start_lba; + u64 log_end_lba; + u64 span_row_start; + u64 span_row_end; + u64 data_strip_start; + u64 data_strip_end; + u64 data_row_start; + u64 data_row_end; + u8 strip_offset[MAX_SPAN_DEPTH]; + u32 span_row_data_width; + u32 diff; + u32 reserved[2]; +} LD_SPAN_SET, *PLD_SPAN_SET; + +typedef struct LOG_BLOCK_SPAN_INFO { + LD_SPAN_SET span_set[MAX_SPAN_DEPTH]; +} LD_SPAN_INFO, *PLD_SPAN_INFO; + +struct MR_FW_RAID_MAP_ALL { + struct MR_FW_RAID_MAP raidMap; + struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES]; +} __attribute__ ((packed)); + +struct MR_DRV_RAID_MAP { + /* total size of this structure, including this field. + * This feild will be manupulated by driver for ext raid map, + * else pick the value from firmware raid map. + */ + __le32 totalSize; + + union { + struct { + __le32 maxLd; + __le32 maxSpanDepth; + __le32 maxRowSize; + __le32 maxPdCount; + __le32 maxArrays; + } validationInfo; + __le32 version[5]; + }; + + /* timeout value used by driver in FP IOs*/ + u8 fpPdIoTimeoutSec; + u8 reserved2[7]; + + __le16 ldCount; + __le16 arCount; + __le16 spanCount; + __le16 reserve3; + + struct MR_DEV_HANDLE_INFO + devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN]; + u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN]; + struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN]; + struct MR_LD_SPAN_MAP ldSpanMap[]; + +}; + +/* Driver raid map size is same as raid map ext + * MR_DRV_RAID_MAP_ALL is created to sync with old raid. + * And it is mainly for code re-use purpose. + */ +struct MR_DRV_RAID_MAP_ALL { + + struct MR_DRV_RAID_MAP raidMap; + struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN]; +} __packed; + + + +struct MR_FW_RAID_MAP_EXT { + /* Not usred in new map */ + u32 reserved; + + union { + struct { + u32 maxLd; + u32 maxSpanDepth; + u32 maxRowSize; + u32 maxPdCount; + u32 maxArrays; + } validationInfo; + u32 version[5]; + }; + + u8 fpPdIoTimeoutSec; + u8 reserved2[7]; + + __le16 ldCount; + __le16 arCount; + __le16 spanCount; + __le16 reserve3; + + struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; + u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; + struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; + struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT]; +}; + +/* + * * define MR_PD_CFG_SEQ structure for system PDs + * */ +struct MR_PD_CFG_SEQ { + u16 seqNum; + u16 devHandle; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:7; + u8 tmCapable:1; +#else + u8 tmCapable:1; + u8 reserved:7; +#endif + } capability; + u8 reserved; + u16 pd_target_id; +} __packed; + +struct MR_PD_CFG_SEQ_NUM_SYNC { + __le32 size; + __le32 count; + struct MR_PD_CFG_SEQ seq[]; +} __packed; + +/* stream detection */ +struct STREAM_DETECT { + u64 next_seq_lba; /* next LBA to match sequential access */ + struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */ + struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */ + u32 count_cmds_in_stream; /* count of host commands in this stream */ + u16 num_sges_in_group; /* total number of SGEs in grouped IOs */ + u8 is_read; /* SCSI OpCode for this stream */ + u8 group_depth; /* total number of host commands in group */ + /* TRUE if cannot add any more commands to this group */ + bool group_flush; + u8 reserved[7]; /* pad to 64-bit alignment */ +}; + +struct LD_STREAM_DETECT { + bool write_back; /* TRUE if WB, FALSE if WT */ + bool fp_write_enabled; + bool members_ssds; + bool fp_cache_bypass_capable; + u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */ + /* this is the array of stream detect structures (one per stream) */ + struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED]; +}; + +struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { + u64 RDPQBaseAddress; + u32 Reserved1; + u32 Reserved2; +}; + +struct rdpq_alloc_detail { + struct dma_pool *dma_pool_ptr; + dma_addr_t pool_entry_phys; + union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt; +}; + +struct fusion_context { + struct megasas_cmd_fusion **cmd_list; + dma_addr_t req_frames_desc_phys; + u8 *req_frames_desc; + + struct dma_pool *io_request_frames_pool; + dma_addr_t io_request_frames_phys; + u8 *io_request_frames; + + struct dma_pool *sg_dma_pool; + struct dma_pool *sense_dma_pool; + + u8 *sense; + dma_addr_t sense_phys_addr; + + atomic_t busy_mq_poll[MAX_MSIX_QUEUES_FUSION]; + + dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION]; + union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION]; + struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT]; + struct dma_pool *reply_frames_desc_pool; + struct dma_pool *reply_frames_desc_pool_align; + + u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION]; + + u32 reply_q_depth; + u32 request_alloc_sz; + u32 reply_alloc_sz; + u32 io_frames_alloc_sz; + + struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt; + dma_addr_t rdpq_phys; + u16 max_sge_in_main_msg; + u16 max_sge_in_chain; + + u8 chain_offset_io_request; + u8 chain_offset_mfi_pthru; + + struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2]; + dma_addr_t ld_map_phys[2]; + + /*Non dma-able memory. Driver local copy.*/ + struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2]; + + u32 max_map_sz; + u32 current_map_sz; + u32 old_map_sz; + u32 new_map_sz; + u32 drv_map_sz; + u32 drv_map_pages; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT]; + dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT]; + u8 fast_path_io; + struct LD_LOAD_BALANCE_INFO *load_balance_info; + u32 load_balance_info_pages; + LD_SPAN_INFO *log_to_span; + u32 log_to_span_pages; + struct LD_STREAM_DETECT **stream_detect_by_ld; + dma_addr_t ioc_init_request_phys; + struct MPI2_IOC_INIT_REQUEST *ioc_init_request; + struct megasas_cmd *ioc_init_cmd; + bool pcie_bw_limitation; + bool r56_div_offload; +}; + +union desc_value { + __le64 word; + struct { + __le32 low; + __le32 high; + } u; +}; + +enum CMD_RET_VALUES { + REFIRE_CMD = 1, + COMPLETE_CMD = 2, + RETURN_CMD = 3, +}; + +struct MR_SNAPDUMP_PROPERTIES { + u8 offload_num; + u8 max_num_supported; + u8 cur_num_supported; + u8 trigger_min_num_sec_before_ocr; + u8 reserved[12]; +}; + +struct megasas_debugfs_buffer { + void *buf; + u32 len; +}; + +void megasas_free_cmds_fusion(struct megasas_instance *instance); +int megasas_ioc_init_fusion(struct megasas_instance *instance); +u8 megasas_get_map_info(struct megasas_instance *instance); +int megasas_sync_map_info(struct megasas_instance *instance); +void megasas_release_fusion(struct megasas_instance *instance); +void megasas_reset_reply_desc(struct megasas_instance *instance); +int megasas_check_mpio_paths(struct megasas_instance *instance, + struct scsi_cmnd *scmd); +void megasas_fusion_ocr_wq(struct work_struct *work); + +#endif /* _MEGARAID_SAS_FUSION_H_ */ diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c new file mode 100644 index 000000000..e276583c5 --- /dev/null +++ b/drivers/scsi/mesh.c @@ -0,0 +1,2073 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware) + * bus adaptor found on Power Macintosh computers. + * We assume the MESH is connected to a DBDMA (descriptor-based DMA) + * controller. + * + * Paul Mackerras, August 1996. + * Copyright (C) 1996 Paul Mackerras. + * + * Apr. 21 2002 - BenH Rework bus reset code for new error handler + * Add delay after initial bus reset + * Add module parameters + * + * Sep. 27 2003 - BenH Move to new driver model, fix some write posting + * issues + * To do: + * - handle aborts correctly + * - retry arbitration if lost (unless higher levels do this for us) + * - power down the chip when no device is detected + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mesh.h" + +#if 1 +#undef KERN_DEBUG +#define KERN_DEBUG KERN_WARNING +#endif + +MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)"); +MODULE_DESCRIPTION("PowerMac MESH SCSI driver"); +MODULE_LICENSE("GPL"); + +static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE; +static int sync_targets = 0xff; +static int resel_targets = 0xff; +static int debug_targets = 0; /* print debug for these targets */ +static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS; + +module_param(sync_rate, int, 0); +MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)"); +module_param(sync_targets, int, 0); +MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous"); +module_param(resel_targets, int, 0); +MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect"); +module_param(debug_targets, int, 0644); +MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets"); +module_param(init_reset_delay, int, 0); +MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)"); + +static int mesh_sync_period = 100; +static int mesh_sync_offset = 0; +static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */ + +#define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1) +#define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1) +#define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1) +#define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id)) + +#undef MESH_DBG +#define N_DBG_LOG 50 +#define N_DBG_SLOG 20 +#define NUM_DBG_EVENTS 13 +#undef DBG_USE_TB /* bombs on 601 */ + +struct dbglog { + char *fmt; + u32 tb; + u8 phase; + u8 bs0; + u8 bs1; + u8 tgt; + int d; +}; + +enum mesh_phase { + idle, + arbitrating, + selecting, + commanding, + dataing, + statusing, + busfreeing, + disconnecting, + reselecting, + sleeping +}; + +enum msg_phase { + msg_none, + msg_out, + msg_out_xxx, + msg_out_last, + msg_in, + msg_in_bad, +}; + +enum sdtr_phase { + do_sdtr, + sdtr_sent, + sdtr_done +}; + +struct mesh_target { + enum sdtr_phase sdtr_state; + int sync_params; + int data_goes_out; /* guess as to data direction */ + struct scsi_cmnd *current_req; + u32 saved_ptr; +#ifdef MESH_DBG + int log_ix; + int n_log; + struct dbglog log[N_DBG_LOG]; +#endif +}; + +struct mesh_state { + volatile struct mesh_regs __iomem *mesh; + int meshintr; + volatile struct dbdma_regs __iomem *dma; + int dmaintr; + struct Scsi_Host *host; + struct mesh_state *next; + struct scsi_cmnd *request_q; + struct scsi_cmnd *request_qtail; + enum mesh_phase phase; /* what we're currently trying to do */ + enum msg_phase msgphase; + int conn_tgt; /* target we're connected to */ + struct scsi_cmnd *current_req; /* req we're currently working on */ + int data_ptr; + int dma_started; + int dma_count; + int stat; + int aborting; + int expect_reply; + int n_msgin; + u8 msgin[16]; + int n_msgout; + int last_n_msgout; + u8 msgout[16]; + struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ + dma_addr_t dma_cmd_bus; + void *dma_cmd_space; + int dma_cmd_size; + int clk_freq; + struct mesh_target tgts[8]; + struct macio_dev *mdev; + struct pci_dev* pdev; +#ifdef MESH_DBG + int log_ix; + int n_log; + struct dbglog log[N_DBG_SLOG]; +#endif +}; + +/* + * Driver is too messy, we need a few prototypes... + */ +static void mesh_done(struct mesh_state *ms, int start_next); +static void mesh_interrupt(struct mesh_state *ms); +static void cmd_complete(struct mesh_state *ms); +static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd); +static void halt_dma(struct mesh_state *ms); +static void phase_mismatch(struct mesh_state *ms); + + +/* + * Some debugging & logging routines + */ + +#ifdef MESH_DBG + +static inline u32 readtb(void) +{ + u32 tb; + +#ifdef DBG_USE_TB + /* Beware: if you enable this, it will crash on 601s. */ + asm ("mftb %0" : "=r" (tb) : ); +#else + tb = 0; +#endif + return tb; +} + +static void dlog(struct mesh_state *ms, char *fmt, int a) +{ + struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; + struct dbglog *tlp, *slp; + + tlp = &tp->log[tp->log_ix]; + slp = &ms->log[ms->log_ix]; + tlp->fmt = fmt; + tlp->tb = readtb(); + tlp->phase = (ms->msgphase << 4) + ms->phase; + tlp->bs0 = ms->mesh->bus_status0; + tlp->bs1 = ms->mesh->bus_status1; + tlp->tgt = ms->conn_tgt; + tlp->d = a; + *slp = *tlp; + if (++tp->log_ix >= N_DBG_LOG) + tp->log_ix = 0; + if (tp->n_log < N_DBG_LOG) + ++tp->n_log; + if (++ms->log_ix >= N_DBG_SLOG) + ms->log_ix = 0; + if (ms->n_log < N_DBG_SLOG) + ++ms->n_log; +} + +static void dumplog(struct mesh_state *ms, int t) +{ + struct mesh_target *tp = &ms->tgts[t]; + struct dbglog *lp; + int i; + + if (tp->n_log == 0) + return; + i = tp->log_ix - tp->n_log; + if (i < 0) + i += N_DBG_LOG; + tp->n_log = 0; + do { + lp = &tp->log[i]; + printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ", + t, lp->bs1, lp->bs0, lp->phase); +#ifdef DBG_USE_TB + printk("tb=%10u ", lp->tb); +#endif + printk(lp->fmt, lp->d); + printk("\n"); + if (++i >= N_DBG_LOG) + i = 0; + } while (i != tp->log_ix); +} + +static void dumpslog(struct mesh_state *ms) +{ + struct dbglog *lp; + int i; + + if (ms->n_log == 0) + return; + i = ms->log_ix - ms->n_log; + if (i < 0) + i += N_DBG_SLOG; + ms->n_log = 0; + do { + lp = &ms->log[i]; + printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ", + lp->bs1, lp->bs0, lp->phase, lp->tgt); +#ifdef DBG_USE_TB + printk("tb=%10u ", lp->tb); +#endif + printk(lp->fmt, lp->d); + printk("\n"); + if (++i >= N_DBG_SLOG) + i = 0; + } while (i != ms->log_ix); +} + +#else + +static inline void dlog(struct mesh_state *ms, char *fmt, int a) +{} +static inline void dumplog(struct mesh_state *ms, int tgt) +{} +static inline void dumpslog(struct mesh_state *ms) +{} + +#endif /* MESH_DBG */ + +#define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) + +static void +mesh_dump_regs(struct mesh_state *ms) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + volatile struct dbdma_regs __iomem *md = ms->dma; + int t; + struct mesh_target *tp; + + printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n", + ms, mr, md); + printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x " + "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n", + (mr->count_hi << 8) + mr->count_lo, mr->sequence, + (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count, + mr->exception, mr->error, mr->intr_mask, mr->interrupt, + mr->sync_params); + while(in_8(&mr->fifo_count)) + printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo)); + printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n", + in_le32(&md->status), in_le32(&md->cmdptr)); + printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n", + ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr); + printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n", + ms->dma_started, ms->dma_count, ms->n_msgout); + for (t = 0; t < 8; ++t) { + tp = &ms->tgts[t]; + if (tp->current_req == NULL) + continue; + printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n", + t, tp->current_req, tp->data_goes_out, tp->saved_ptr); + } +} + + +/* + * Flush write buffers on the bus path to the mesh + */ +static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr) +{ + (void)in_8(&mr->mesh_id); +} + + +/* Called with meshinterrupt disabled, initialize the chipset + * and eventually do the initial bus reset. The lock must not be + * held since we can schedule. + */ +static void mesh_init(struct mesh_state *ms) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + volatile struct dbdma_regs __iomem *md = ms->dma; + + mesh_flush_io(mr); + udelay(100); + + /* Reset controller */ + out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ + out_8(&mr->exception, 0xff); /* clear all exception bits */ + out_8(&mr->error, 0xff); /* clear all error bits */ + out_8(&mr->sequence, SEQ_RESETMESH); + mesh_flush_io(mr); + udelay(10); + out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + out_8(&mr->source_id, ms->host->this_id); + out_8(&mr->sel_timeout, 25); /* 250ms */ + out_8(&mr->sync_params, ASYNC_PARAMS); + + if (init_reset_delay) { + printk(KERN_INFO "mesh: performing initial bus reset...\n"); + + /* Reset bus */ + out_8(&mr->bus_status1, BS1_RST); /* assert RST */ + mesh_flush_io(mr); + udelay(30); /* leave it on for >= 25us */ + out_8(&mr->bus_status1, 0); /* negate RST */ + mesh_flush_io(mr); + + /* Wait for bus to come back */ + msleep(init_reset_delay); + } + + /* Reconfigure controller */ + out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */ + out_8(&mr->sequence, SEQ_FLUSHFIFO); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->sync_params, ASYNC_PARAMS); + out_8(&mr->sequence, SEQ_ENBRESEL); + + ms->phase = idle; + ms->msgphase = msg_none; +} + + +static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + int t, id; + + id = cmd->device->id; + ms->current_req = cmd; + ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE; + ms->tgts[id].current_req = cmd; + +#if 1 + if (DEBUG_TARGET(cmd)) { + int i; + printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id); + for (i = 0; i < cmd->cmd_len; ++i) + printk(" %x", cmd->cmnd[i]); + printk(" use_sg=%d buffer=%p bufflen=%u\n", + scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd)); + } +#endif + if (ms->dma_started) + panic("mesh: double DMA start !\n"); + + ms->phase = arbitrating; + ms->msgphase = msg_none; + ms->data_ptr = 0; + ms->dma_started = 0; + ms->n_msgout = 0; + ms->last_n_msgout = 0; + ms->expect_reply = 0; + ms->conn_tgt = id; + ms->tgts[id].saved_ptr = 0; + ms->stat = DID_OK; + ms->aborting = 0; +#ifdef MESH_DBG + ms->tgts[id].n_log = 0; + dlog(ms, "start cmd=%x", (int) cmd); +#endif + + /* Off we go */ + dlog(ms, "about to arb, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); + out_8(&mr->interrupt, INT_CMDDONE); + out_8(&mr->sequence, SEQ_ENBRESEL); + mesh_flush_io(mr); + udelay(1); + + if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { + /* + * Some other device has the bus or is arbitrating for it - + * probably a target which is about to reselect us. + */ + dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, + mr->error, mr->fifo_count)); + for (t = 100; t > 0; --t) { + if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0) + break; + if (in_8(&mr->interrupt) != 0) { + dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, + mr->error, mr->fifo_count)); + mesh_interrupt(ms); + if (ms->phase != arbitrating) + return; + } + udelay(1); + } + if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { + /* XXX should try again in a little while */ + ms->stat = DID_BUS_BUSY; + ms->phase = idle; + mesh_done(ms, 0); + return; + } + } + + /* + * Apparently the mesh has a bug where it will assert both its + * own bit and the target's bit on the bus during arbitration. + */ + out_8(&mr->dest_id, mr->source_id); + + /* + * There appears to be a race with reselection sometimes, + * where a target reselects us just as we issue the + * arbitrate command. It seems that then the arbitrate + * command just hangs waiting for the bus to be free + * without giving us a reselection exception. + * The only way I have found to get it to respond correctly + * is this: disable reselection before issuing the arbitrate + * command, then after issuing it, if it looks like a target + * is trying to reselect us, reset the mesh and then enable + * reselection. + */ + out_8(&mr->sequence, SEQ_DISRESEL); + if (in_8(&mr->interrupt) != 0) { + dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, + mr->error, mr->fifo_count)); + mesh_interrupt(ms); + if (ms->phase != arbitrating) + return; + dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, + mr->error, mr->fifo_count)); + } + + out_8(&mr->sequence, SEQ_ARBITRATE); + + for (t = 230; t > 0; --t) { + if (in_8(&mr->interrupt) != 0) + break; + udelay(1); + } + dlog(ms, "after arb, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); + if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) + && (in_8(&mr->bus_status0) & BS0_IO)) { + /* looks like a reselection - try resetting the mesh */ + dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); + out_8(&mr->sequence, SEQ_RESETMESH); + mesh_flush_io(mr); + udelay(10); + out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + out_8(&mr->sequence, SEQ_ENBRESEL); + mesh_flush_io(mr); + for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t) + udelay(1); + dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); +#ifndef MESH_MULTIPLE_HOSTS + if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) + && (in_8(&mr->bus_status0) & BS0_IO)) { + printk(KERN_ERR "mesh: controller not responding" + " to reselection!\n"); + /* + * If this is a target reselecting us, and the + * mesh isn't responding, the higher levels of + * the scsi code will eventually time out and + * reset the bus. + */ + } +#endif + } +} + +/* + * Start the next command for a MESH. + * Should be called with interrupts disabled. + */ +static void mesh_start(struct mesh_state *ms) +{ + struct scsi_cmnd *cmd, *prev, *next; + + if (ms->phase != idle || ms->current_req != NULL) { + printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)", + ms->phase, ms); + return; + } + + while (ms->phase == idle) { + prev = NULL; + for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) { + if (cmd == NULL) + return; + if (ms->tgts[cmd->device->id].current_req == NULL) + break; + prev = cmd; + } + next = (struct scsi_cmnd *) cmd->host_scribble; + if (prev == NULL) + ms->request_q = next; + else + prev->host_scribble = (void *) next; + if (next == NULL) + ms->request_qtail = prev; + + mesh_start_cmd(ms, cmd); + } +} + +static void mesh_done(struct mesh_state *ms, int start_next) +{ + struct scsi_cmnd *cmd; + struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; + + cmd = ms->current_req; + ms->current_req = NULL; + tp->current_req = NULL; + if (cmd) { + struct mesh_cmd_priv *mcmd = mesh_priv(cmd); + + set_host_byte(cmd, ms->stat); + set_status_byte(cmd, mcmd->status); + if (ms->stat == DID_OK) + scsi_msg_to_host_byte(cmd, mcmd->message); + if (DEBUG_TARGET(cmd)) { + printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n", + cmd->result, ms->data_ptr, scsi_bufflen(cmd)); +#if 0 + /* needs to use sg? */ + if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3) + && cmd->request_buffer != 0) { + unsigned char *b = cmd->request_buffer; + printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n", + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); + } +#endif + } + mcmd->this_residual -= ms->data_ptr; + scsi_done(cmd); + } + if (start_next) { + out_8(&ms->mesh->sequence, SEQ_ENBRESEL); + mesh_flush_io(ms->mesh); + udelay(1); + ms->phase = idle; + mesh_start(ms); + } +} + +static inline void add_sdtr_msg(struct mesh_state *ms) +{ + int i = ms->n_msgout; + + ms->msgout[i] = EXTENDED_MESSAGE; + ms->msgout[i+1] = 3; + ms->msgout[i+2] = EXTENDED_SDTR; + ms->msgout[i+3] = mesh_sync_period/4; + ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0); + ms->n_msgout = i + 5; +} + +static void set_sdtr(struct mesh_state *ms, int period, int offset) +{ + struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; + volatile struct mesh_regs __iomem *mr = ms->mesh; + int v, tr; + + tp->sdtr_state = sdtr_done; + if (offset == 0) { + /* asynchronous */ + if (SYNC_OFF(tp->sync_params)) + printk(KERN_INFO "mesh: target %d now asynchronous\n", + ms->conn_tgt); + tp->sync_params = ASYNC_PARAMS; + out_8(&mr->sync_params, ASYNC_PARAMS); + return; + } + /* + * We need to compute ceil(clk_freq * period / 500e6) - 2 + * without incurring overflow. + */ + v = (ms->clk_freq / 5000) * period; + if (v <= 250000) { + /* special case: sync_period == 5 * clk_period */ + v = 0; + /* units of tr are 100kB/s */ + tr = (ms->clk_freq + 250000) / 500000; + } else { + /* sync_period == (v + 2) * 2 * clk_period */ + v = (v + 99999) / 100000 - 2; + if (v > 15) + v = 15; /* oops */ + tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000; + } + if (offset > 15) + offset = 15; /* can't happen */ + tp->sync_params = SYNC_PARAMS(offset, v); + out_8(&mr->sync_params, tp->sync_params); + printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n", + ms->conn_tgt, tr/10, tr%10); +} + +static void start_phase(struct mesh_state *ms) +{ + int i, seq, nb; + volatile struct mesh_regs __iomem *mr = ms->mesh; + volatile struct dbdma_regs __iomem *md = ms->dma; + struct scsi_cmnd *cmd = ms->current_req; + struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; + + dlog(ms, "start_phase nmo/exc/fc/seq = %.8x", + MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence)); + out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); + switch (ms->msgphase) { + case msg_none: + break; + + case msg_in: + out_8(&mr->count_hi, 0); + out_8(&mr->count_lo, 1); + out_8(&mr->sequence, SEQ_MSGIN + seq); + ms->n_msgin = 0; + return; + + case msg_out: + /* + * To make sure ATN drops before we assert ACK for + * the last byte of the message, we have to do the + * last byte specially. + */ + if (ms->n_msgout <= 0) { + printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n", + ms->n_msgout); + mesh_dump_regs(ms); + ms->msgphase = msg_none; + break; + } + if (ALLOW_DEBUG(ms->conn_tgt)) { + printk(KERN_DEBUG "mesh: sending %d msg bytes:", + ms->n_msgout); + for (i = 0; i < ms->n_msgout; ++i) + printk(" %x", ms->msgout[i]); + printk("\n"); + } + dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0], + ms->msgout[1], ms->msgout[2])); + out_8(&mr->count_hi, 0); + out_8(&mr->sequence, SEQ_FLUSHFIFO); + mesh_flush_io(mr); + udelay(1); + /* + * If ATN is not already asserted, we assert it, then + * issue a SEQ_MSGOUT to get the mesh to drop ACK. + */ + if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) { + dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0); + out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */ + mesh_flush_io(mr); + udelay(1); + out_8(&mr->count_lo, 1); + out_8(&mr->sequence, SEQ_MSGOUT + seq); + out_8(&mr->bus_status0, 0); /* release explicit ATN */ + dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0); + } + if (ms->n_msgout == 1) { + /* + * We can't issue the SEQ_MSGOUT without ATN + * until the target has asserted REQ. The logic + * in cmd_complete handles both situations: + * REQ already asserted or not. + */ + cmd_complete(ms); + } else { + out_8(&mr->count_lo, ms->n_msgout - 1); + out_8(&mr->sequence, SEQ_MSGOUT + seq); + for (i = 0; i < ms->n_msgout - 1; ++i) + out_8(&mr->fifo, ms->msgout[i]); + } + return; + + default: + printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n", + ms->msgphase); + } + + switch (ms->phase) { + case selecting: + out_8(&mr->dest_id, ms->conn_tgt); + out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN); + break; + case commanding: + out_8(&mr->sync_params, tp->sync_params); + out_8(&mr->count_hi, 0); + if (cmd) { + out_8(&mr->count_lo, cmd->cmd_len); + out_8(&mr->sequence, SEQ_COMMAND + seq); + for (i = 0; i < cmd->cmd_len; ++i) + out_8(&mr->fifo, cmd->cmnd[i]); + } else { + out_8(&mr->count_lo, 6); + out_8(&mr->sequence, SEQ_COMMAND + seq); + for (i = 0; i < 6; ++i) + out_8(&mr->fifo, 0); + } + break; + case dataing: + /* transfer data, if any */ + if (!ms->dma_started) { + set_dma_cmds(ms, cmd); + out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds)); + out_le32(&md->control, (RUN << 16) | RUN); + ms->dma_started = 1; + } + nb = ms->dma_count; + if (nb > 0xfff0) + nb = 0xfff0; + ms->dma_count -= nb; + ms->data_ptr += nb; + out_8(&mr->count_lo, nb); + out_8(&mr->count_hi, nb >> 8); + out_8(&mr->sequence, (tp->data_goes_out? + SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq); + break; + case statusing: + out_8(&mr->count_hi, 0); + out_8(&mr->count_lo, 1); + out_8(&mr->sequence, SEQ_STATUS + seq); + break; + case busfreeing: + case disconnecting: + out_8(&mr->sequence, SEQ_ENBRESEL); + mesh_flush_io(mr); + udelay(1); + dlog(ms, "enbresel intr/exc/err/fc=%.8x", + MKWORD(mr->interrupt, mr->exception, mr->error, + mr->fifo_count)); + out_8(&mr->sequence, SEQ_BUSFREE); + break; + default: + printk(KERN_ERR "mesh: start_phase called with phase=%d\n", + ms->phase); + dumpslog(ms); + } + +} + +static inline void get_msgin(struct mesh_state *ms) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + int i, n; + + n = mr->fifo_count; + if (n != 0) { + i = ms->n_msgin; + ms->n_msgin = i + n; + for (; n > 0; --n) + ms->msgin[i++] = in_8(&mr->fifo); + } +} + +static inline int msgin_length(struct mesh_state *ms) +{ + int b, n; + + n = 1; + if (ms->n_msgin > 0) { + b = ms->msgin[0]; + if (b == 1) { + /* extended message */ + n = ms->n_msgin < 2? 2: ms->msgin[1] + 2; + } else if (0x20 <= b && b <= 0x2f) { + /* 2-byte message */ + n = 2; + } + } + return n; +} + +static void reselected(struct mesh_state *ms) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + struct scsi_cmnd *cmd; + struct mesh_target *tp; + int b, t, prev; + + switch (ms->phase) { + case idle: + break; + case arbitrating: + if ((cmd = ms->current_req) != NULL) { + /* put the command back on the queue */ + cmd->host_scribble = (void *) ms->request_q; + if (ms->request_q == NULL) + ms->request_qtail = cmd; + ms->request_q = cmd; + tp = &ms->tgts[cmd->device->id]; + tp->current_req = NULL; + } + break; + case busfreeing: + ms->phase = reselecting; + mesh_done(ms, 0); + break; + case disconnecting: + break; + default: + printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n", + ms->msgphase, ms->phase, ms->conn_tgt); + dumplog(ms, ms->conn_tgt); + dumpslog(ms); + } + + if (ms->dma_started) { + printk(KERN_ERR "mesh: reselected with DMA started !\n"); + halt_dma(ms); + } + ms->current_req = NULL; + ms->phase = dataing; + ms->msgphase = msg_in; + ms->n_msgout = 0; + ms->last_n_msgout = 0; + prev = ms->conn_tgt; + + /* + * We seem to get abortive reselections sometimes. + */ + while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) { + static int mesh_aborted_resels; + mesh_aborted_resels++; + out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->sequence, SEQ_ENBRESEL); + mesh_flush_io(mr); + udelay(5); + dlog(ms, "extra resel err/exc/fc = %.6x", + MKWORD(0, mr->error, mr->exception, mr->fifo_count)); + } + out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->sequence, SEQ_ENBRESEL); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->sync_params, ASYNC_PARAMS); + + /* + * Find out who reselected us. + */ + if (in_8(&mr->fifo_count) == 0) { + printk(KERN_ERR "mesh: reselection but nothing in fifo?\n"); + ms->conn_tgt = ms->host->this_id; + goto bogus; + } + /* get the last byte in the fifo */ + do { + b = in_8(&mr->fifo); + dlog(ms, "reseldata %x", b); + } while (in_8(&mr->fifo_count)); + for (t = 0; t < 8; ++t) + if ((b & (1 << t)) != 0 && t != ms->host->this_id) + break; + if (b != (1 << t) + (1 << ms->host->this_id)) { + printk(KERN_ERR "mesh: bad reselection data %x\n", b); + ms->conn_tgt = ms->host->this_id; + goto bogus; + } + + + /* + * Set up to continue with that target's transfer. + */ + ms->conn_tgt = t; + tp = &ms->tgts[t]; + out_8(&mr->sync_params, tp->sync_params); + if (ALLOW_DEBUG(t)) { + printk(KERN_DEBUG "mesh: reselected by target %d\n", t); + printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n", + tp->saved_ptr, tp->data_goes_out, tp->current_req); + } + ms->current_req = tp->current_req; + if (tp->current_req == NULL) { + printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t); + goto bogus; + } + ms->data_ptr = tp->saved_ptr; + dlog(ms, "resel prev tgt=%d", prev); + dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception)); + start_phase(ms); + return; + +bogus: + dumplog(ms, ms->conn_tgt); + dumpslog(ms); + ms->data_ptr = 0; + ms->aborting = 1; + start_phase(ms); +} + +static void do_abort(struct mesh_state *ms) +{ + ms->msgout[0] = ABORT; + ms->n_msgout = 1; + ms->aborting = 1; + ms->stat = DID_ABORT; + dlog(ms, "abort", 0); +} + +static void handle_reset(struct mesh_state *ms) +{ + int tgt; + struct mesh_target *tp; + struct scsi_cmnd *cmd; + volatile struct mesh_regs __iomem *mr = ms->mesh; + + for (tgt = 0; tgt < 8; ++tgt) { + tp = &ms->tgts[tgt]; + if ((cmd = tp->current_req) != NULL) { + set_host_byte(cmd, DID_RESET); + tp->current_req = NULL; + scsi_done(cmd); + } + ms->tgts[tgt].sdtr_state = do_sdtr; + ms->tgts[tgt].sync_params = ASYNC_PARAMS; + } + ms->current_req = NULL; + while ((cmd = ms->request_q) != NULL) { + ms->request_q = (struct scsi_cmnd *) cmd->host_scribble; + set_host_byte(cmd, DID_RESET); + scsi_done(cmd); + } + ms->phase = idle; + ms->msgphase = msg_none; + out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + out_8(&mr->sequence, SEQ_FLUSHFIFO); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->sync_params, ASYNC_PARAMS); + out_8(&mr->sequence, SEQ_ENBRESEL); +} + +static irqreturn_t do_mesh_interrupt(int irq, void *dev_id) +{ + unsigned long flags; + struct mesh_state *ms = dev_id; + struct Scsi_Host *dev = ms->host; + + spin_lock_irqsave(dev->host_lock, flags); + mesh_interrupt(ms); + spin_unlock_irqrestore(dev->host_lock, flags); + return IRQ_HANDLED; +} + +static void handle_error(struct mesh_state *ms) +{ + int err, exc, count; + volatile struct mesh_regs __iomem *mr = ms->mesh; + + err = in_8(&mr->error); + exc = in_8(&mr->exception); + out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + dlog(ms, "error err/exc/fc/cl=%.8x", + MKWORD(err, exc, mr->fifo_count, mr->count_lo)); + if (err & ERR_SCSIRESET) { + /* SCSI bus was reset */ + printk(KERN_INFO "mesh: SCSI bus reset detected: " + "waiting for end..."); + while ((in_8(&mr->bus_status1) & BS1_RST) != 0) + udelay(1); + printk("done\n"); + if (ms->dma_started) + halt_dma(ms); + handle_reset(ms); + /* request_q is empty, no point in mesh_start() */ + return; + } + if (err & ERR_UNEXPDISC) { + /* Unexpected disconnect */ + if (exc & EXC_RESELECTED) { + reselected(ms); + return; + } + if (!ms->aborting) { + printk(KERN_WARNING "mesh: target %d aborted\n", + ms->conn_tgt); + dumplog(ms, ms->conn_tgt); + dumpslog(ms); + } + out_8(&mr->interrupt, INT_CMDDONE); + ms->stat = DID_ABORT; + mesh_done(ms, 1); + return; + } + if (err & ERR_PARITY) { + if (ms->msgphase == msg_in) { + printk(KERN_ERR "mesh: msg parity error, target %d\n", + ms->conn_tgt); + ms->msgout[0] = MSG_PARITY_ERROR; + ms->n_msgout = 1; + ms->msgphase = msg_in_bad; + cmd_complete(ms); + return; + } + if (ms->stat == DID_OK) { + printk(KERN_ERR "mesh: parity error, target %d\n", + ms->conn_tgt); + ms->stat = DID_PARITY; + } + count = (mr->count_hi << 8) + mr->count_lo; + if (count == 0) { + cmd_complete(ms); + } else { + /* reissue the data transfer command */ + out_8(&mr->sequence, mr->sequence); + } + return; + } + if (err & ERR_SEQERR) { + if (exc & EXC_RESELECTED) { + /* This can happen if we issue a command to + get the bus just after the target reselects us. */ + static int mesh_resel_seqerr; + mesh_resel_seqerr++; + reselected(ms); + return; + } + if (exc == EXC_PHASEMM) { + static int mesh_phasemm_seqerr; + mesh_phasemm_seqerr++; + phase_mismatch(ms); + return; + } + printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n", + err, exc); + } else { + printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc); + } + mesh_dump_regs(ms); + dumplog(ms, ms->conn_tgt); + if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) { + /* try to do what the target wants */ + do_abort(ms); + phase_mismatch(ms); + return; + } + ms->stat = DID_ERROR; + mesh_done(ms, 1); +} + +static void handle_exception(struct mesh_state *ms) +{ + int exc; + volatile struct mesh_regs __iomem *mr = ms->mesh; + + exc = in_8(&mr->exception); + out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE); + if (exc & EXC_RESELECTED) { + static int mesh_resel_exc; + mesh_resel_exc++; + reselected(ms); + } else if (exc == EXC_ARBLOST) { + printk(KERN_DEBUG "mesh: lost arbitration\n"); + ms->stat = DID_BUS_BUSY; + mesh_done(ms, 1); + } else if (exc == EXC_SELTO) { + /* selection timed out */ + ms->stat = DID_BAD_TARGET; + mesh_done(ms, 1); + } else if (exc == EXC_PHASEMM) { + /* target wants to do something different: + find out what it wants and do it. */ + phase_mismatch(ms); + } else { + printk(KERN_ERR "mesh: can't cope with exception %x\n", exc); + mesh_dump_regs(ms); + dumplog(ms, ms->conn_tgt); + do_abort(ms); + phase_mismatch(ms); + } +} + +static void handle_msgin(struct mesh_state *ms) +{ + int i, code; + struct scsi_cmnd *cmd = ms->current_req; + struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; + + if (ms->n_msgin == 0) + return; + code = ms->msgin[0]; + if (ALLOW_DEBUG(ms->conn_tgt)) { + printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin); + for (i = 0; i < ms->n_msgin; ++i) + printk(" %x", ms->msgin[i]); + printk("\n"); + } + dlog(ms, "msgin msg=%.8x", + MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2])); + + ms->expect_reply = 0; + ms->n_msgout = 0; + if (ms->n_msgin < msgin_length(ms)) + goto reject; + if (cmd) + mesh_priv(cmd)->message = code; + switch (code) { + case COMMAND_COMPLETE: + break; + case EXTENDED_MESSAGE: + switch (ms->msgin[2]) { + case EXTENDED_MODIFY_DATA_POINTER: + ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6] + + (ms->msgin[4] << 16) + (ms->msgin[5] << 8); + break; + case EXTENDED_SDTR: + if (tp->sdtr_state != sdtr_sent) { + /* reply with an SDTR */ + add_sdtr_msg(ms); + /* limit period to at least his value, + offset to no more than his */ + if (ms->msgout[3] < ms->msgin[3]) + ms->msgout[3] = ms->msgin[3]; + if (ms->msgout[4] > ms->msgin[4]) + ms->msgout[4] = ms->msgin[4]; + set_sdtr(ms, ms->msgout[3], ms->msgout[4]); + ms->msgphase = msg_out; + } else { + set_sdtr(ms, ms->msgin[3], ms->msgin[4]); + } + break; + default: + goto reject; + } + break; + case SAVE_POINTERS: + tp->saved_ptr = ms->data_ptr; + break; + case RESTORE_POINTERS: + ms->data_ptr = tp->saved_ptr; + break; + case DISCONNECT: + ms->phase = disconnecting; + break; + case ABORT: + break; + case MESSAGE_REJECT: + if (tp->sdtr_state == sdtr_sent) + set_sdtr(ms, 0, 0); + break; + case NOP: + break; + default: + if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) { + if (cmd == NULL) { + do_abort(ms); + ms->msgphase = msg_out; + } else if (code != cmd->device->lun + IDENTIFY_BASE) { + printk(KERN_WARNING "mesh: lun mismatch " + "(%d != %llu) on reselection from " + "target %d\n", code - IDENTIFY_BASE, + cmd->device->lun, ms->conn_tgt); + } + break; + } + goto reject; + } + return; + + reject: + printk(KERN_WARNING "mesh: rejecting message from target %d:", + ms->conn_tgt); + for (i = 0; i < ms->n_msgin; ++i) + printk(" %x", ms->msgin[i]); + printk("\n"); + ms->msgout[0] = MESSAGE_REJECT; + ms->n_msgout = 1; + ms->msgphase = msg_out; +} + +/* + * Set up DMA commands for transferring data. + */ +static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd) +{ + int i, dma_cmd, total, off, dtot; + struct scatterlist *scl; + struct dbdma_cmd *dcmds; + + dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out? + OUTPUT_MORE: INPUT_MORE; + dcmds = ms->dma_cmds; + dtot = 0; + if (cmd) { + int nseg; + + mesh_priv(cmd)->this_residual = scsi_bufflen(cmd); + + nseg = scsi_dma_map(cmd); + BUG_ON(nseg < 0); + + if (nseg) { + total = 0; + off = ms->data_ptr; + + scsi_for_each_sg(cmd, scl, nseg, i) { + u32 dma_addr = sg_dma_address(scl); + u32 dma_len = sg_dma_len(scl); + + total += scl->length; + if (off >= dma_len) { + off -= dma_len; + continue; + } + if (dma_len > 0xffff) + panic("mesh: scatterlist element >= 64k"); + dcmds->req_count = cpu_to_le16(dma_len - off); + dcmds->command = cpu_to_le16(dma_cmd); + dcmds->phy_addr = cpu_to_le32(dma_addr + off); + dcmds->xfer_status = 0; + ++dcmds; + dtot += dma_len - off; + off = 0; + } + } + } + if (dtot == 0) { + /* Either the target has overrun our buffer, + or the caller didn't provide a buffer. */ + static char mesh_extra_buf[64]; + + dtot = sizeof(mesh_extra_buf); + dcmds->req_count = cpu_to_le16(dtot); + dcmds->phy_addr = cpu_to_le32(virt_to_phys(mesh_extra_buf)); + dcmds->xfer_status = 0; + ++dcmds; + } + dma_cmd += OUTPUT_LAST - OUTPUT_MORE; + dcmds[-1].command = cpu_to_le16(dma_cmd); + memset(dcmds, 0, sizeof(*dcmds)); + dcmds->command = cpu_to_le16(DBDMA_STOP); + ms->dma_count = dtot; +} + +static void halt_dma(struct mesh_state *ms) +{ + volatile struct dbdma_regs __iomem *md = ms->dma; + volatile struct mesh_regs __iomem *mr = ms->mesh; + struct scsi_cmnd *cmd = ms->current_req; + int t, nb; + + if (!ms->tgts[ms->conn_tgt].data_goes_out) { + /* wait a little while until the fifo drains */ + t = 50; + while (t > 0 && in_8(&mr->fifo_count) != 0 + && (in_le32(&md->status) & ACTIVE) != 0) { + --t; + udelay(1); + } + } + out_le32(&md->control, RUN << 16); /* turn off RUN bit */ + nb = (mr->count_hi << 8) + mr->count_lo; + dlog(ms, "halt_dma fc/count=%.6x", + MKWORD(0, mr->fifo_count, 0, nb)); + if (ms->tgts[ms->conn_tgt].data_goes_out) + nb += mr->fifo_count; + /* nb is the number of bytes not yet transferred + to/from the target. */ + ms->data_ptr -= nb; + dlog(ms, "data_ptr %x", ms->data_ptr); + if (ms->data_ptr < 0) { + printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n", + ms->data_ptr, nb, ms); + ms->data_ptr = 0; +#ifdef MESH_DBG + dumplog(ms, ms->conn_tgt); + dumpslog(ms); +#endif /* MESH_DBG */ + } else if (cmd && scsi_bufflen(cmd) && + ms->data_ptr > scsi_bufflen(cmd)) { + printk(KERN_DEBUG "mesh: target %d overrun, " + "data_ptr=%x total=%x goes_out=%d\n", + ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), + ms->tgts[ms->conn_tgt].data_goes_out); + } + if (cmd) + scsi_dma_unmap(cmd); + ms->dma_started = 0; +} + +static void phase_mismatch(struct mesh_state *ms) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + int phase; + + dlog(ms, "phasemm ch/cl/seq/fc=%.8x", + MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count)); + phase = in_8(&mr->bus_status0) & BS0_PHASE; + if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) { + /* output the last byte of the message, without ATN */ + out_8(&mr->count_lo, 1); + out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); + ms->msgphase = msg_out_last; + return; + } + + if (ms->msgphase == msg_in) { + get_msgin(ms); + if (ms->n_msgin) + handle_msgin(ms); + } + + if (ms->dma_started) + halt_dma(ms); + if (mr->fifo_count) { + out_8(&mr->sequence, SEQ_FLUSHFIFO); + mesh_flush_io(mr); + udelay(1); + } + + ms->msgphase = msg_none; + switch (phase) { + case BP_DATAIN: + ms->tgts[ms->conn_tgt].data_goes_out = 0; + ms->phase = dataing; + break; + case BP_DATAOUT: + ms->tgts[ms->conn_tgt].data_goes_out = 1; + ms->phase = dataing; + break; + case BP_COMMAND: + ms->phase = commanding; + break; + case BP_STATUS: + ms->phase = statusing; + break; + case BP_MSGIN: + ms->msgphase = msg_in; + ms->n_msgin = 0; + break; + case BP_MSGOUT: + ms->msgphase = msg_out; + if (ms->n_msgout == 0) { + if (ms->aborting) { + do_abort(ms); + } else { + if (ms->last_n_msgout == 0) { + printk(KERN_DEBUG + "mesh: no msg to repeat\n"); + ms->msgout[0] = NOP; + ms->last_n_msgout = 1; + } + ms->n_msgout = ms->last_n_msgout; + } + } + break; + default: + printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase); + ms->stat = DID_ERROR; + mesh_done(ms, 1); + return; + } + + start_phase(ms); +} + +static void cmd_complete(struct mesh_state *ms) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + struct scsi_cmnd *cmd = ms->current_req; + struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; + int seq, n, t; + + dlog(ms, "cmd_complete fc=%x", mr->fifo_count); + seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); + switch (ms->msgphase) { + case msg_out_xxx: + /* huh? we expected a phase mismatch */ + ms->n_msgin = 0; + ms->msgphase = msg_in; + fallthrough; + + case msg_in: + /* should have some message bytes in fifo */ + get_msgin(ms); + n = msgin_length(ms); + if (ms->n_msgin < n) { + out_8(&mr->count_lo, n - ms->n_msgin); + out_8(&mr->sequence, SEQ_MSGIN + seq); + } else { + ms->msgphase = msg_none; + handle_msgin(ms); + start_phase(ms); + } + break; + + case msg_in_bad: + out_8(&mr->sequence, SEQ_FLUSHFIFO); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->count_lo, 1); + out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg); + break; + + case msg_out: + /* + * To get the right timing on ATN wrt ACK, we have + * to get the MESH to drop ACK, wait until REQ gets + * asserted, then drop ATN. To do this we first + * issue a SEQ_MSGOUT with ATN and wait for REQ, + * then change the command to a SEQ_MSGOUT w/o ATN. + * If we don't see REQ in a reasonable time, we + * change the command to SEQ_MSGIN with ATN, + * wait for the phase mismatch interrupt, then + * issue the SEQ_MSGOUT without ATN. + */ + out_8(&mr->count_lo, 1); + out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN); + t = 30; /* wait up to 30us */ + while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0) + udelay(1); + dlog(ms, "last_mbyte err/exc/fc/cl=%.8x", + MKWORD(mr->error, mr->exception, + mr->fifo_count, mr->count_lo)); + if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) { + /* whoops, target didn't do what we expected */ + ms->last_n_msgout = ms->n_msgout; + ms->n_msgout = 0; + if (in_8(&mr->interrupt) & INT_ERROR) { + printk(KERN_ERR "mesh: error %x in msg_out\n", + in_8(&mr->error)); + handle_error(ms); + return; + } + if (in_8(&mr->exception) != EXC_PHASEMM) + printk(KERN_ERR "mesh: exc %x in msg_out\n", + in_8(&mr->exception)); + else + printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n", + in_8(&mr->bus_status0)); + handle_exception(ms); + return; + } + if (in_8(&mr->bus_status0) & BS0_REQ) { + out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); + ms->msgphase = msg_out_last; + } else { + out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN); + ms->msgphase = msg_out_xxx; + } + break; + + case msg_out_last: + ms->last_n_msgout = ms->n_msgout; + ms->n_msgout = 0; + ms->msgphase = ms->expect_reply? msg_in: msg_none; + start_phase(ms); + break; + + case msg_none: + switch (ms->phase) { + case idle: + printk(KERN_ERR "mesh: interrupt in idle phase?\n"); + dumpslog(ms); + return; + case selecting: + dlog(ms, "Selecting phase at command completion",0); + ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt), + (cmd? cmd->device->lun: 0)); + ms->n_msgout = 1; + ms->expect_reply = 0; + if (ms->aborting) { + ms->msgout[0] = ABORT; + ms->n_msgout++; + } else if (tp->sdtr_state == do_sdtr) { + /* add SDTR message */ + add_sdtr_msg(ms); + ms->expect_reply = 1; + tp->sdtr_state = sdtr_sent; + } + ms->msgphase = msg_out; + /* + * We need to wait for REQ before dropping ATN. + * We wait for at most 30us, then fall back to + * a scheme where we issue a SEQ_COMMAND with ATN, + * which will give us a phase mismatch interrupt + * when REQ does come, and then we send the message. + */ + t = 230; /* wait up to 230us */ + while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) { + if (--t < 0) { + dlog(ms, "impatient for req", ms->n_msgout); + ms->msgphase = msg_none; + break; + } + udelay(1); + } + break; + case dataing: + if (ms->dma_count != 0) { + start_phase(ms); + return; + } + /* + * We can get a phase mismatch here if the target + * changes to the status phase, even though we have + * had a command complete interrupt. Then, if we + * issue the SEQ_STATUS command, we'll get a sequence + * error interrupt. Which isn't so bad except that + * occasionally the mesh actually executes the + * SEQ_STATUS *as well as* giving us the sequence + * error and phase mismatch exception. + */ + out_8(&mr->sequence, 0); + out_8(&mr->interrupt, + INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + halt_dma(ms); + break; + case statusing: + if (cmd) { + struct mesh_cmd_priv *mcmd = mesh_priv(cmd); + + mcmd->status = mr->fifo; + if (DEBUG_TARGET(cmd)) + printk(KERN_DEBUG "mesh: status is %x\n", + mcmd->status); + } + ms->msgphase = msg_in; + break; + case busfreeing: + mesh_done(ms, 1); + return; + case disconnecting: + ms->current_req = NULL; + ms->phase = idle; + mesh_start(ms); + return; + default: + break; + } + ++ms->phase; + start_phase(ms); + break; + } +} + + +/* + * Called by midlayer with host locked to queue a new + * request + */ +static int mesh_queue_lck(struct scsi_cmnd *cmd) +{ + struct mesh_state *ms; + + cmd->host_scribble = NULL; + + ms = (struct mesh_state *) cmd->device->host->hostdata; + + if (ms->request_q == NULL) + ms->request_q = cmd; + else + ms->request_qtail->host_scribble = (void *) cmd; + ms->request_qtail = cmd; + + if (ms->phase == idle) + mesh_start(ms); + + return 0; +} + +static DEF_SCSI_QCMD(mesh_queue) + +/* + * Called to handle interrupts, either call by the interrupt + * handler (do_mesh_interrupt) or by other functions in + * exceptional circumstances + */ +static void mesh_interrupt(struct mesh_state *ms) +{ + volatile struct mesh_regs __iomem *mr = ms->mesh; + int intr; + +#if 0 + if (ALLOW_DEBUG(ms->conn_tgt)) + printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x " + "phase=%d msgphase=%d\n", mr->bus_status0, + mr->interrupt, mr->exception, mr->error, + ms->phase, ms->msgphase); +#endif + while ((intr = in_8(&mr->interrupt)) != 0) { + dlog(ms, "interrupt intr/err/exc/seq=%.8x", + MKWORD(intr, mr->error, mr->exception, mr->sequence)); + if (intr & INT_ERROR) { + handle_error(ms); + } else if (intr & INT_EXCEPTION) { + handle_exception(ms); + } else if (intr & INT_CMDDONE) { + out_8(&mr->interrupt, INT_CMDDONE); + cmd_complete(ms); + } + } +} + +/* Todo: here we can at least try to remove the command from the + * queue if it isn't connected yet, and for pending command, assert + * ATN until the bus gets freed. + */ +static int mesh_abort(struct scsi_cmnd *cmd) +{ + struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; + + printk(KERN_DEBUG "mesh_abort(%p)\n", cmd); + mesh_dump_regs(ms); + dumplog(ms, cmd->device->id); + dumpslog(ms); + return FAILED; +} + +/* + * Called by the midlayer with the lock held to reset the + * SCSI host and bus. + * The midlayer will wait for devices to come back, we don't need + * to do that ourselves + */ +static int mesh_host_reset(struct scsi_cmnd *cmd) +{ + struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; + volatile struct mesh_regs __iomem *mr = ms->mesh; + volatile struct dbdma_regs __iomem *md = ms->dma; + unsigned long flags; + + printk(KERN_DEBUG "mesh_host_reset\n"); + + spin_lock_irqsave(ms->host->host_lock, flags); + + if (ms->dma_started) + halt_dma(ms); + + /* Reset the controller & dbdma channel */ + out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ + out_8(&mr->exception, 0xff); /* clear all exception bits */ + out_8(&mr->error, 0xff); /* clear all error bits */ + out_8(&mr->sequence, SEQ_RESETMESH); + mesh_flush_io(mr); + udelay(1); + out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + out_8(&mr->source_id, ms->host->this_id); + out_8(&mr->sel_timeout, 25); /* 250ms */ + out_8(&mr->sync_params, ASYNC_PARAMS); + + /* Reset the bus */ + out_8(&mr->bus_status1, BS1_RST); /* assert RST */ + mesh_flush_io(mr); + udelay(30); /* leave it on for >= 25us */ + out_8(&mr->bus_status1, 0); /* negate RST */ + + /* Complete pending commands */ + handle_reset(ms); + + spin_unlock_irqrestore(ms->host->host_lock, flags); + return SUCCESS; +} + +static void set_mesh_power(struct mesh_state *ms, int state) +{ + if (!machine_is(powermac)) + return; + if (state) { + pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1); + msleep(200); + } else { + pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0); + msleep(10); + } +} + + +#ifdef CONFIG_PM +static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg) +{ + struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); + unsigned long flags; + + switch (mesg.event) { + case PM_EVENT_SUSPEND: + case PM_EVENT_HIBERNATE: + case PM_EVENT_FREEZE: + break; + default: + return 0; + } + if (ms->phase == sleeping) + return 0; + + scsi_block_requests(ms->host); + spin_lock_irqsave(ms->host->host_lock, flags); + while(ms->phase != idle) { + spin_unlock_irqrestore(ms->host->host_lock, flags); + msleep(10); + spin_lock_irqsave(ms->host->host_lock, flags); + } + ms->phase = sleeping; + spin_unlock_irqrestore(ms->host->host_lock, flags); + disable_irq(ms->meshintr); + set_mesh_power(ms, 0); + + return 0; +} + +static int mesh_resume(struct macio_dev *mdev) +{ + struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); + unsigned long flags; + + if (ms->phase != sleeping) + return 0; + + set_mesh_power(ms, 1); + mesh_init(ms); + spin_lock_irqsave(ms->host->host_lock, flags); + mesh_start(ms); + spin_unlock_irqrestore(ms->host->host_lock, flags); + enable_irq(ms->meshintr); + scsi_unblock_requests(ms->host); + + return 0; +} + +#endif /* CONFIG_PM */ + +/* + * If we leave drives set for synchronous transfers (especially + * CDROMs), and reboot to MacOS, it gets confused, poor thing. + * So, on reboot we reset the SCSI bus. + */ +static int mesh_shutdown(struct macio_dev *mdev) +{ + struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); + volatile struct mesh_regs __iomem *mr; + unsigned long flags; + + printk(KERN_INFO "resetting MESH scsi bus(es)\n"); + spin_lock_irqsave(ms->host->host_lock, flags); + mr = ms->mesh; + out_8(&mr->intr_mask, 0); + out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); + out_8(&mr->bus_status1, BS1_RST); + mesh_flush_io(mr); + udelay(30); + out_8(&mr->bus_status1, 0); + spin_unlock_irqrestore(ms->host->host_lock, flags); + + return 0; +} + +static const struct scsi_host_template mesh_template = { + .proc_name = "mesh", + .name = "MESH", + .queuecommand = mesh_queue, + .eh_abort_handler = mesh_abort, + .eh_host_reset_handler = mesh_host_reset, + .can_queue = 20, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .max_segment_size = 65535, + .cmd_size = sizeof(struct mesh_cmd_priv), +}; + +static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) +{ + struct device_node *mesh = macio_get_of_node(mdev); + struct pci_dev* pdev = macio_get_pci_dev(mdev); + int tgt, minper; + const int *cfp; + struct mesh_state *ms; + struct Scsi_Host *mesh_host; + void *dma_cmd_space; + dma_addr_t dma_cmd_bus; + + switch (mdev->bus->chip->type) { + case macio_heathrow: + case macio_gatwick: + case macio_paddington: + use_active_neg = 0; + break; + default: + use_active_neg = SEQ_ACTIVE_NEG; + } + + if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { + printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs" + " (got %d,%d)\n", macio_resource_count(mdev), + macio_irq_count(mdev)); + return -ENODEV; + } + + if (macio_request_resources(mdev, "mesh") != 0) { + printk(KERN_ERR "mesh: unable to request memory resources"); + return -EBUSY; + } + mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state)); + if (mesh_host == NULL) { + printk(KERN_ERR "mesh: couldn't register host"); + goto out_release; + } + + mesh_host->base = macio_resource_start(mdev, 0); + mesh_host->irq = macio_irq(mdev, 0); + ms = (struct mesh_state *) mesh_host->hostdata; + macio_set_drvdata(mdev, ms); + ms->host = mesh_host; + ms->mdev = mdev; + ms->pdev = pdev; + + ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000); + if (ms->mesh == NULL) { + printk(KERN_ERR "mesh: can't map registers\n"); + goto out_free; + } + ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000); + if (ms->dma == NULL) { + printk(KERN_ERR "mesh: can't map registers\n"); + iounmap(ms->mesh); + goto out_free; + } + + ms->meshintr = macio_irq(mdev, 0); + ms->dmaintr = macio_irq(mdev, 1); + + /* Space for dma command list: +1 for stop command, + * +1 to allow for aligning. + */ + ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd); + + /* We use the PCI APIs for now until the generic one gets fixed + * enough or until we get some macio-specific versions + */ + dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev, + ms->dma_cmd_size, &dma_cmd_bus, + GFP_KERNEL); + if (dma_cmd_space == NULL) { + printk(KERN_ERR "mesh: can't allocate DMA table\n"); + goto out_unmap; + } + + ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); + ms->dma_cmd_space = dma_cmd_space; + ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds) + - (unsigned long)dma_cmd_space; + ms->current_req = NULL; + for (tgt = 0; tgt < 8; ++tgt) { + ms->tgts[tgt].sdtr_state = do_sdtr; + ms->tgts[tgt].sync_params = ASYNC_PARAMS; + ms->tgts[tgt].current_req = NULL; + } + + if ((cfp = of_get_property(mesh, "clock-frequency", NULL))) + ms->clk_freq = *cfp; + else { + printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n"); + ms->clk_freq = 50000000; + } + + /* The maximum sync rate is clock / 5; increase + * mesh_sync_period if necessary. + */ + minper = 1000000000 / (ms->clk_freq / 5); /* ns */ + if (mesh_sync_period < minper) + mesh_sync_period = minper; + + /* Power up the chip */ + set_mesh_power(ms, 1); + + /* Set it up */ + mesh_init(ms); + + /* Request interrupt */ + if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) { + printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); + goto out_shutdown; + } + + /* Add scsi host & scan */ + if (scsi_add_host(mesh_host, &mdev->ofdev.dev)) + goto out_release_irq; + scsi_scan_host(mesh_host); + + return 0; + + out_release_irq: + free_irq(ms->meshintr, ms); + out_shutdown: + /* shutdown & reset bus in case of error or macos can be confused + * at reboot if the bus was set to synchronous mode already + */ + mesh_shutdown(mdev); + set_mesh_power(ms, 0); + dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size, + ms->dma_cmd_space, ms->dma_cmd_bus); + out_unmap: + iounmap(ms->dma); + iounmap(ms->mesh); + out_free: + scsi_host_put(mesh_host); + out_release: + macio_release_resources(mdev); + + return -ENODEV; +} + +static int mesh_remove(struct macio_dev *mdev) +{ + struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); + struct Scsi_Host *mesh_host = ms->host; + + scsi_remove_host(mesh_host); + + free_irq(ms->meshintr, ms); + + /* Reset scsi bus */ + mesh_shutdown(mdev); + + /* Shut down chip & termination */ + set_mesh_power(ms, 0); + + /* Unmap registers & dma controller */ + iounmap(ms->mesh); + iounmap(ms->dma); + + /* Free DMA commands memory */ + dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size, + ms->dma_cmd_space, ms->dma_cmd_bus); + + /* Release memory resources */ + macio_release_resources(mdev); + + scsi_host_put(mesh_host); + + return 0; +} + + +static struct of_device_id mesh_match[] = +{ + { + .name = "mesh", + }, + { + .type = "scsi", + .compatible = "chrp,mesh0" + }, + {}, +}; +MODULE_DEVICE_TABLE (of, mesh_match); + +static struct macio_driver mesh_driver = +{ + .driver = { + .name = "mesh", + .owner = THIS_MODULE, + .of_match_table = mesh_match, + }, + .probe = mesh_probe, + .remove = mesh_remove, + .shutdown = mesh_shutdown, +#ifdef CONFIG_PM + .suspend = mesh_suspend, + .resume = mesh_resume, +#endif +}; + + +static int __init init_mesh(void) +{ + + /* Calculate sync rate from module parameters */ + if (sync_rate > 10) + sync_rate = 10; + if (sync_rate > 0) { + printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate); + mesh_sync_period = 1000 / sync_rate; /* ns */ + mesh_sync_offset = 15; + } else + printk(KERN_INFO "mesh: configured for asynchronous\n"); + + return macio_register_driver(&mesh_driver); +} + +static void __exit exit_mesh(void) +{ + return macio_unregister_driver(&mesh_driver); +} + +module_init(init_mesh); +module_exit(exit_mesh); diff --git a/drivers/scsi/mesh.h b/drivers/scsi/mesh.h new file mode 100644 index 000000000..f70181acc --- /dev/null +++ b/drivers/scsi/mesh.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mesh.h: definitions for the driver for the MESH SCSI bus adaptor + * (Macintosh Enhanced SCSI Hardware) found on Power Macintosh computers. + * + * Copyright (C) 1996 Paul Mackerras. + */ +#ifndef _MESH_H +#define _MESH_H + +struct mesh_cmd_priv { + int this_residual; + int message; + int status; +}; + +static inline struct mesh_cmd_priv *mesh_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +/* + * Registers in the MESH controller. + */ + +struct mesh_regs { + unsigned char count_lo; + char pad0[15]; + unsigned char count_hi; + char pad1[15]; + unsigned char fifo; + char pad2[15]; + unsigned char sequence; + char pad3[15]; + unsigned char bus_status0; + char pad4[15]; + unsigned char bus_status1; + char pad5[15]; + unsigned char fifo_count; + char pad6[15]; + unsigned char exception; + char pad7[15]; + unsigned char error; + char pad8[15]; + unsigned char intr_mask; + char pad9[15]; + unsigned char interrupt; + char pad10[15]; + unsigned char source_id; + char pad11[15]; + unsigned char dest_id; + char pad12[15]; + unsigned char sync_params; + char pad13[15]; + unsigned char mesh_id; + char pad14[15]; + unsigned char sel_timeout; + char pad15[15]; +}; + +/* Bits in the sequence register. */ +#define SEQ_DMA_MODE 0x80 /* use DMA for data transfer */ +#define SEQ_TARGET 0x40 /* put the controller into target mode */ +#define SEQ_ATN 0x20 /* assert ATN signal */ +#define SEQ_ACTIVE_NEG 0x10 /* use active negation on REQ/ACK */ +#define SEQ_CMD 0x0f /* command bits: */ +#define SEQ_ARBITRATE 1 /* get the bus */ +#define SEQ_SELECT 2 /* select a target */ +#define SEQ_COMMAND 3 /* send a command */ +#define SEQ_STATUS 4 /* receive status */ +#define SEQ_DATAOUT 5 /* send data */ +#define SEQ_DATAIN 6 /* receive data */ +#define SEQ_MSGOUT 7 /* send a message */ +#define SEQ_MSGIN 8 /* receive a message */ +#define SEQ_BUSFREE 9 /* look for bus free */ +#define SEQ_ENBPARITY 0x0a /* enable parity checking */ +#define SEQ_DISPARITY 0x0b /* disable parity checking */ +#define SEQ_ENBRESEL 0x0c /* enable reselection */ +#define SEQ_DISRESEL 0x0d /* disable reselection */ +#define SEQ_RESETMESH 0x0e /* reset the controller */ +#define SEQ_FLUSHFIFO 0x0f /* clear out the FIFO */ + +/* Bits in the bus_status0 and bus_status1 registers: + these correspond directly to the SCSI bus control signals. */ +#define BS0_REQ 0x20 +#define BS0_ACK 0x10 +#define BS0_ATN 0x08 +#define BS0_MSG 0x04 +#define BS0_CD 0x02 +#define BS0_IO 0x01 +#define BS1_RST 0x80 +#define BS1_BSY 0x40 +#define BS1_SEL 0x20 + +/* Bus phases defined by the bits in bus_status0 */ +#define BS0_PHASE (BS0_MSG+BS0_CD+BS0_IO) +#define BP_DATAOUT 0 +#define BP_DATAIN BS0_IO +#define BP_COMMAND BS0_CD +#define BP_STATUS (BS0_CD+BS0_IO) +#define BP_MSGOUT (BS0_MSG+BS0_CD) +#define BP_MSGIN (BS0_MSG+BS0_CD+BS0_IO) + +/* Bits in the exception register. */ +#define EXC_SELWATN 0x20 /* (as target) we were selected with ATN */ +#define EXC_SELECTED 0x10 /* (as target) we were selected w/o ATN */ +#define EXC_RESELECTED 0x08 /* (as initiator) we were reselected */ +#define EXC_ARBLOST 0x04 /* we lost arbitration */ +#define EXC_PHASEMM 0x02 /* SCSI phase mismatch */ +#define EXC_SELTO 0x01 /* selection timeout */ + +/* Bits in the error register */ +#define ERR_UNEXPDISC 0x40 /* target unexpectedly disconnected */ +#define ERR_SCSIRESET 0x20 /* SCSI bus got reset on us */ +#define ERR_SEQERR 0x10 /* we did something the chip didn't like */ +#define ERR_PARITY 0x01 /* parity error was detected */ + +/* Bits in the interrupt and intr_mask registers */ +#define INT_ERROR 0x04 /* error interrupt */ +#define INT_EXCEPTION 0x02 /* exception interrupt */ +#define INT_CMDDONE 0x01 /* command done interrupt */ + +/* Fields in the sync_params register */ +#define SYNC_OFF(x) ((x) >> 4) /* offset field */ +#define SYNC_PER(x) ((x) & 0xf) /* period field */ +#define SYNC_PARAMS(o, p) (((o) << 4) | (p)) +#define ASYNC_PARAMS 2 /* sync_params value for async xfers */ + +/* + * Assuming a clock frequency of 50MHz: + * + * The transfer period with SYNC_PER(sync_params) == x + * is (x + 2) * 40ns, except that x == 0 gives 100ns. + * + * The units of the sel_timeout register are 10ms. + */ + + +#endif /* _MESH_H */ diff --git a/drivers/scsi/mpi3mr/Kconfig b/drivers/scsi/mpi3mr/Kconfig new file mode 100644 index 000000000..f48740cd5 --- /dev/null +++ b/drivers/scsi/mpi3mr/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +config SCSI_MPI3MR + tristate "Broadcom MPI3 Storage Controller Device Driver" + depends on PCI && SCSI + select BLK_DEV_BSGLIB + select SCSI_SAS_ATTRS + help + MPI3 based Storage & RAID Controllers Driver. diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile new file mode 100644 index 000000000..3bf8cf34e --- /dev/null +++ b/drivers/scsi/mpi3mr/Makefile @@ -0,0 +1,6 @@ +# mpi3mr makefile +obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr.o +mpi3mr-y += mpi3mr_os.o \ + mpi3mr_fw.o \ + mpi3mr_app.o \ + mpi3mr_transport.o diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h new file mode 100644 index 000000000..35f81af40 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h @@ -0,0 +1,2500 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2017-2023 Broadcom Inc. All rights reserved. + */ +#ifndef MPI30_CNFG_H +#define MPI30_CNFG_H 1 +#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01) +#define MPI3_CONFIG_PAGETYPE_IOC (0x02) +#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03) +#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04) +#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11) +#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12) +#define MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT (0x20) +#define MPI3_CONFIG_PAGETYPE_SAS_EXPANDER (0x21) +#define MPI3_CONFIG_PAGETYPE_SAS_PHY (0x23) +#define MPI3_CONFIG_PAGETYPE_SAS_PORT (0x24) +#define MPI3_CONFIG_PAGETYPE_PCIE_IO_UNIT (0x30) +#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31) +#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33) +#define MPI3_CONFIG_PAGEATTR_MASK (0xf0) +#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00) +#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10) +#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI3_CONFIG_ACTION_PAGE_HEADER (0x00) +#define MPI3_CONFIG_ACTION_READ_DEFAULT (0x01) +#define MPI3_CONFIG_ACTION_READ_CURRENT (0x02) +#define MPI3_CONFIG_ACTION_WRITE_CURRENT (0x03) +#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04) +#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05) +#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000) +#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000) +#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff) +#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000) +#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000) +#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000) +#define MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00ff0000) +#define MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) +#define MPI3_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000ffff) +#define MPI3_SAS_PHY_PGAD_FORM_MASK (0xf0000000) +#define MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000ff) +#define MPI3_SASPORT_PGAD_FORM_MASK (0xf0000000) +#define MPI3_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) +#define MPI3_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) +#define MPI3_SASPORT_PGAD_PORT_NUMBER_MASK (0x000000ff) +#define MPI3_ENCLOS_PGAD_FORM_MASK (0xf0000000) +#define MPI3_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI3_ENCLOS_PGAD_FORM_HANDLE (0x10000000) +#define MPI3_ENCLOS_PGAD_HANDLE_MASK (0x0000ffff) +#define MPI3_PCIE_SWITCH_PGAD_FORM_MASK (0xf0000000) +#define MPI3_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE_PORT_NUM (0x10000000) +#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE (0x20000000) +#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00ff0000) +#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16) +#define MPI3_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000ffff) +#define MPI3_PCIE_LINK_PGAD_FORM_MASK (0xf0000000) +#define MPI3_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000) +#define MPI3_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000) +#define MPI3_PCIE_LINK_PGAD_LINKNUM_MASK (0x000000ff) +#define MPI3_SECURITY_PGAD_FORM_MASK (0xf0000000) +#define MPI3_SECURITY_PGAD_FORM_GET_NEXT_SLOT (0x00000000) +#define MPI3_SECURITY_PGAD_FORM_SLOT_NUM (0x10000000) +#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00) +#define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8) +#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff) +struct mpi3_config_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + u8 page_version; + u8 page_number; + u8 page_type; + u8 action; + __le32 page_address; + __le16 page_length; + __le16 reserved16; + __le32 reserved18[2]; + union mpi3_sge_union sgl; +}; + +struct mpi3_config_page_header { + u8 page_version; + u8 reserved01; + u8 page_number; + u8 page_attribute; + __le16 page_length; + u8 page_type; + u8 reserved07; +}; + +#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0) +#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4) +#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f) +#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0) +#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define MPI3_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) +#define MPI3_SAS_NEG_LINK_RATE_1_5 (0x08) +#define MPI3_SAS_NEG_LINK_RATE_3_0 (0x09) +#define MPI3_SAS_NEG_LINK_RATE_6_0 (0x0a) +#define MPI3_SAS_NEG_LINK_RATE_12_0 (0x0b) +#define MPI3_SAS_NEG_LINK_RATE_22_5 (0x0c) +#define MPI3_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) +#define MPI3_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) +#define MPI3_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) +#define MPI3_SAS_APHYINFO_REASON_MASK (0x0000000f) +#define MPI3_SAS_APHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI3_SAS_APHYINFO_REASON_POWER_ON (0x00000001) +#define MPI3_SAS_APHYINFO_REASON_HARD_RESET (0x00000002) +#define MPI3_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003) +#define MPI3_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004) +#define MPI3_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005) +#define MPI3_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006) +#define MPI3_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007) +#define MPI3_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) +#define MPI3_SAS_APHYINFO_REASON_EXP_REDUCED_FUNC (0x00000009) +#define MPI3_SAS_PHYINFO_STATUS_MASK (0xc0000000) +#define MPI3_SAS_PHYINFO_STATUS_SHIFT (30) +#define MPI3_SAS_PHYINFO_STATUS_ACCESSIBLE (0x00000000) +#define MPI3_SAS_PHYINFO_STATUS_NOT_EXIST (0x40000000) +#define MPI3_SAS_PHYINFO_STATUS_VACANT (0x80000000) +#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) +#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000) +#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000) +#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000) +#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_MASK (0x04000000) +#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_SHIFT (26) +#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_MASK (0x02000000) +#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_SHIFT (25) +#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_MASK (0x01000000) +#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_SHIFT (24) +#define MPI3_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000) +#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_WITHIN (0x00200000) +#define MPI3_SAS_PHYINFO_ZONING_ENABLED (0x00100000) +#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000) +#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000) +#define MPI3_SAS_PHYINFO_REASON_HARD_RESET (0x00020000) +#define MPI3_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000) +#define MPI3_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000) +#define MPI3_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000) +#define MPI3_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000) +#define MPI3_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000) +#define MPI3_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000) +#define MPI3_SAS_PHYINFO_REASON_EXP_REDUCED_FUNC (0x00090000) +#define MPI3_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000) +#define MPI3_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000) +#define MPI3_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) +#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_MASK (0x00000f00) +#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_SHIFT (8) +#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_MASK (0x000000f0) +#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_DIRECT (0x00000000) +#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_SUBTRACTIVE (0x00000010) +#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_TABLE (0x00000020) +#define MPI3_SAS_PRATE_MAX_RATE_MASK (0xf0) +#define MPI3_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI3_SAS_PRATE_MAX_RATE_1_5 (0x80) +#define MPI3_SAS_PRATE_MAX_RATE_3_0 (0x90) +#define MPI3_SAS_PRATE_MAX_RATE_6_0 (0xa0) +#define MPI3_SAS_PRATE_MAX_RATE_12_0 (0xb0) +#define MPI3_SAS_PRATE_MAX_RATE_22_5 (0xc0) +#define MPI3_SAS_PRATE_MIN_RATE_MASK (0x0f) +#define MPI3_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI3_SAS_PRATE_MIN_RATE_1_5 (0x08) +#define MPI3_SAS_PRATE_MIN_RATE_3_0 (0x09) +#define MPI3_SAS_PRATE_MIN_RATE_6_0 (0x0a) +#define MPI3_SAS_PRATE_MIN_RATE_12_0 (0x0b) +#define MPI3_SAS_PRATE_MIN_RATE_22_5 (0x0c) +#define MPI3_SAS_HWRATE_MAX_RATE_MASK (0xf0) +#define MPI3_SAS_HWRATE_MAX_RATE_1_5 (0x80) +#define MPI3_SAS_HWRATE_MAX_RATE_3_0 (0x90) +#define MPI3_SAS_HWRATE_MAX_RATE_6_0 (0xa0) +#define MPI3_SAS_HWRATE_MAX_RATE_12_0 (0xb0) +#define MPI3_SAS_HWRATE_MAX_RATE_22_5 (0xc0) +#define MPI3_SAS_HWRATE_MIN_RATE_MASK (0x0f) +#define MPI3_SAS_HWRATE_MIN_RATE_1_5 (0x08) +#define MPI3_SAS_HWRATE_MIN_RATE_3_0 (0x09) +#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a) +#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b) +#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c) +#define MPI3_SLOT_INVALID (0xffff) +#define MPI3_SLOT_INDEX_INVALID (0xffff) +#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff) +#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff) +#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0) +#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1) +#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2) +#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3) +#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000) +#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5) +#define MPI3_MFGPAGE_DEVID_SAS5116_MPI (0x00b3) +#define MPI3_MFGPAGE_DEVID_SAS5116_NVME (0x00b4) +#define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5) +#define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6) +#define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8) +struct mpi3_man_page0 { + struct mpi3_config_page_header header; + u8 chip_revision[8]; + u8 chip_name[32]; + u8 board_name[32]; + u8 board_assembly[32]; + u8 board_tracer_number[32]; + __le32 board_power; + __le32 reserved94; + __le32 reserved98; + u8 oem; + u8 profile_identifier; + __le16 flags; + u8 board_mfg_day; + u8 board_mfg_month; + __le16 board_mfg_year; + u8 board_rework_day; + u8 board_rework_month; + __le16 board_rework_year; + u8 board_revision[8]; + u8 e_pack_fru[16]; + u8 product_name[256]; +}; + +#define MPI3_MAN0_PAGEVERSION (0x00) +#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002) +#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001) +#define MPI3_MAN1_VPD_SIZE (512) +struct mpi3_man_page1 { + struct mpi3_config_page_header header; + __le32 reserved08[2]; + u8 vpd[MPI3_MAN1_VPD_SIZE]; +}; + +#define MPI3_MAN1_PAGEVERSION (0x00) +struct mpi3_man_page2 { + struct mpi3_config_page_header header; + u8 flags; + u8 reserved09[3]; + __le32 reserved0c[3]; + u8 oem_board_tracer_number[32]; +}; +#define MPI3_MAN2_PAGEVERSION (0x00) +#define MPI3_MAN2_FLAGS_TRACER_PRESENT (0x01) +struct mpi3_man5_phy_entry { + __le64 ioc_wwid; + __le64 device_name; + __le64 sata_wwid; +}; + +#ifndef MPI3_MAN5_PHY_MAX +#define MPI3_MAN5_PHY_MAX (1) +#endif +struct mpi3_man_page5 { + struct mpi3_config_page_header header; + u8 num_phys; + u8 reserved09[3]; + __le32 reserved0c; + struct mpi3_man5_phy_entry phy[MPI3_MAN5_PHY_MAX]; +}; + +#define MPI3_MAN5_PAGEVERSION (0x00) +struct mpi3_man6_gpio_entry { + u8 function_code; + u8 function_flags; + __le16 flags; + u8 param1; + u8 param2; + __le16 reserved06; + __le32 param3; +}; + +#define MPI3_MAN6_GPIO_FUNCTION_GENERIC (0x00) +#define MPI3_MAN6_GPIO_FUNCTION_ALTERNATE (0x01) +#define MPI3_MAN6_GPIO_FUNCTION_EXT_INTERRUPT (0x02) +#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_ACTIVITY (0x03) +#define MPI3_MAN6_GPIO_FUNCTION_OVER_TEMPERATURE (0x04) +#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_GREEN (0x05) +#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06) +#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07) +#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08) +#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a) +#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b) +#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c) +#define MPI3_MAN6_GPIO_FUNCTION_PBLP_STATUS_CHANGE (0x0d) +#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ONLINE (0x0e) +#define MPI3_MAN6_GPIO_FUNCTION_EPACK_FAULT (0x0f) +#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10) +#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11) +#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12) +#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13) +#define MPI3_MAN6_GPIO_FUNCTION_AUXILIARY_POWER (0x14) +#define MPI3_MAN6_GPIO_FUNCTION_RAID_DATA_CACHE_DIRTY (0x15) +#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_CONTROL (0x16) +#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_FAULT (0x17) +#define MPI3_MAN6_GPIO_FUNCTION_POWER_BRAKE (0x18) +#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01) +#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00) +#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00) +#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01) +#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00) +#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01) +#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00) +#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01) +#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02) +#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00) +#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100) +#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100) +#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_SLOW_EDGE (0x0000) +#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_MASK (0x00c0) +#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_100OHM (0x0000) +#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_66OHM (0x0040) +#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_50OHM (0x0080) +#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_33OHM (0x00c0) +#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_MASK (0x0030) +#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_SHIFT (4) +#define MPI3_MAN6_GPIO_FLAGS_ACTIVE_HIGH (0x0008) +#define MPI3_MAN6_GPIO_FLAGS_BI_DIR_ENABLED (0x0004) +#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_MASK (0x0003) +#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_INPUT (0x0000) +#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_DRAIN_OUTPUT (0x0001) +#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_SOURCE_OUTPUT (0x0002) +#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_PUSH_PULL_OUTPUT (0x0003) +#ifndef MPI3_MAN6_GPIO_MAX +#define MPI3_MAN6_GPIO_MAX (1) +#endif +struct mpi3_man_page6 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_gpio; + u8 reserved0d[3]; + struct mpi3_man6_gpio_entry gpio[MPI3_MAN6_GPIO_MAX]; +}; + +#define MPI3_MAN6_PAGEVERSION (0x00) +#define MPI3_MAN6_FLAGS_HEARTBEAT_LED_DISABLED (0x0001) +struct mpi3_man7_receptacle_info { + __le32 name[4]; + u8 location; + u8 connector_type; + u8 ped_clk; + u8 connector_id; + __le32 reserved14; +}; + +#define MPI3_MAN7_LOCATION_UNKNOWN (0x00) +#define MPI3_MAN7_LOCATION_INTERNAL (0x01) +#define MPI3_MAN7_LOCATION_EXTERNAL (0x02) +#define MPI3_MAN7_LOCATION_VIRTUAL (0x03) +#define MPI3_MAN7_LOCATION_HOST (0x04) +#define MPI3_MAN7_CONNECTOR_TYPE_NO_INFO (0x00) +#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10) +#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00) +#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10) +#define MPI3_MAN7_PEDCLK_ID_MASK (0x0f) +#ifndef MPI3_MAN7_RECEPTACLE_INFO_MAX +#define MPI3_MAN7_RECEPTACLE_INFO_MAX (1) +#endif +struct mpi3_man_page7 { + struct mpi3_config_page_header header; + __le32 flags; + u8 num_receptacles; + u8 reserved0d[3]; + __le32 enclosure_name[4]; + struct mpi3_man7_receptacle_info receptacle_info[MPI3_MAN7_RECEPTACLE_INFO_MAX]; +}; + +#define MPI3_MAN7_PAGEVERSION (0x00) +#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_MASK (0x01) +#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_0 (0x00) +#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_1 (0x01) +struct mpi3_man8_phy_info { + u8 receptacle_id; + u8 connector_lane; + __le16 reserved02; + __le16 slotx1; + __le16 slotx2; + __le16 slotx4; + __le16 reserved0a; + __le32 reserved0c; +}; + +#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_NOT_ASSOCIATED (0xff) +#define MPI3_MAN8_PHY_INFO_CONNECTOR_LANE_NOT_ASSOCIATED (0xff) +#ifndef MPI3_MAN8_PHY_INFO_MAX +#define MPI3_MAN8_PHY_INFO_MAX (1) +#endif +struct mpi3_man_page8 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_phys; + u8 reserved0d[3]; + struct mpi3_man8_phy_info phy_info[MPI3_MAN8_PHY_INFO_MAX]; +}; + +#define MPI3_MAN8_PAGEVERSION (0x00) +struct mpi3_man9_rsrc_entry { + __le32 maximum; + __le32 decrement; + __le32 minimum; + __le32 actual; +}; + +enum mpi3_man9_resources { + MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0, + MPI3_MAN9_RSRC_TARGET_CMDS = 1, + MPI3_MAN9_RSRC_RESERVED02 = 2, + MPI3_MAN9_RSRC_NVME = 3, + MPI3_MAN9_RSRC_INITIATORS = 4, + MPI3_MAN9_RSRC_VDS = 5, + MPI3_MAN9_RSRC_ENCLOSURES = 6, + MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7, + MPI3_MAN9_RSRC_EXPANDERS = 8, + MPI3_MAN9_RSRC_PCIE_SWITCHES = 9, + MPI3_MAN9_RSRC_RESERVED10 = 10, + MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11, + MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12, + MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13, + MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14, + MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15, + MPI3_MAN9_RSRC_NUM_RESOURCES +}; + +#define MPI3_MAN9_MIN_OUTSTANDING_REQS (1) +#define MPI3_MAN9_MAX_OUTSTANDING_REQS (65000) +#define MPI3_MAN9_MIN_TARGET_CMDS (0) +#define MPI3_MAN9_MAX_TARGET_CMDS (65535) +#define MPI3_MAN9_MIN_NVME_TARGETS (0) +#define MPI3_MAN9_MIN_INITIATORS (0) +#define MPI3_MAN9_MIN_VDS (0) +#define MPI3_MAN9_MIN_ENCLOSURES (1) +#define MPI3_MAN9_MAX_ENCLOSURES (65535) +#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0) +#define MPI3_MAN9_MIN_EXPANDERS (0) +#define MPI3_MAN9_MAX_EXPANDERS (65535) +#define MPI3_MAN9_MIN_PCIE_SWITCHES (0) +#define MPI3_MAN9_MIN_HOST_PD_DRIVES (0) +#define MPI3_MAN9_ADV_HOST_PD_DRIVES (0) +#define MPI3_MAN9_RAID_PD_DRIVES (0) +#define MPI3_MAN9_DRIVER_DIAG_BUFFER (0) +#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1) +#define MPI3_MAN9_MIN_EXPANDERS (0) +#define MPI3_MAN9_MAX_EXPANDERS (65535) +struct mpi3_man_page9 { + struct mpi3_config_page_header header; + u8 num_resources; + u8 reserved09; + __le16 reserved0a; + __le32 reserved0c; + __le32 reserved10; + __le32 reserved14; + __le32 reserved18; + __le32 reserved1c; + struct mpi3_man9_rsrc_entry resource[MPI3_MAN9_RSRC_NUM_RESOURCES]; +}; + +#define MPI3_MAN9_PAGEVERSION (0x00) +struct mpi3_man10_istwi_ctrlr_entry { + __le16 target_address; + __le16 flags; + u8 scl_low_override; + u8 scl_high_override; + __le16 reserved06; +}; + +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c) +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000) +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004) +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_TARGET_ENABLED (0x0002) +#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_INITIATOR_ENABLED (0x0001) +#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX +#define MPI3_MAN10_ISTWI_CTRLR_MAX (1) +#endif +struct mpi3_man_page10 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_istwi_ctrl; + u8 reserved0d[3]; + struct mpi3_man10_istwi_ctrlr_entry istwi_controller[MPI3_MAN10_ISTWI_CTRLR_MAX]; +}; + +#define MPI3_MAN10_PAGEVERSION (0x00) +struct mpi3_man11_mux_device_format { + u8 max_channel; + u8 reserved01[3]; + __le32 reserved04; +}; + +struct mpi3_man11_temp_sensor_device_format { + u8 type; + u8 reserved01[3]; + u8 temp_channel[4]; +}; + +#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00) +#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01) +#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02) +#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03) +#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0) +#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5) +#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01) +struct mpi3_man11_seeprom_device_format { + u8 size; + u8 page_write_size; + __le16 reserved02; + __le32 reserved04; +}; + +#define MPI3_MAN11_SEEPROM_SIZE_1KBITS (0x01) +#define MPI3_MAN11_SEEPROM_SIZE_2KBITS (0x02) +#define MPI3_MAN11_SEEPROM_SIZE_4KBITS (0x03) +#define MPI3_MAN11_SEEPROM_SIZE_8KBITS (0x04) +#define MPI3_MAN11_SEEPROM_SIZE_16KBITS (0x05) +#define MPI3_MAN11_SEEPROM_SIZE_32KBITS (0x06) +#define MPI3_MAN11_SEEPROM_SIZE_64KBITS (0x07) +#define MPI3_MAN11_SEEPROM_SIZE_128KBITS (0x08) +struct mpi3_man11_ddr_spd_device_format { + u8 channel; + u8 reserved01[3]; + __le32 reserved04; +}; + +struct mpi3_man11_cable_mgmt_device_format { + u8 type; + u8 receptacle_id; + __le16 reserved02; + __le32 reserved04; +}; + +#define MPI3_MAN11_CABLE_MGMT_TYPE_SFF_8636 (0x00) +struct mpi3_man11_bkplane_spec_ubm_format { + __le16 flags; + __le16 reserved02; +}; + +#define MPI3_MAN11_BKPLANE_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200) +#define MPI3_MAN11_BKPLANE_UBM_FLAGS_FORCE_POLLING (0x0100) +#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_MASK (0x00f0) +#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4) +#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f) +#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0) +struct mpi3_man11_bkplane_spec_non_ubm_format { + __le16 flags; + u8 reserved02; + u8 type; +}; + +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_MASK (0x00c0) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_4 (0x0000) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_2 (0x0040) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_1 (0x0080) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f) +#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0) +#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00) +union mpi3_man11_bkplane_spec_format { + struct mpi3_man11_bkplane_spec_ubm_format ubm; + struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm; +}; + +struct mpi3_man11_bkplane_mgmt_device_format { + u8 type; + u8 receptacle_id; + u8 reset_info; + u8 reserved03; + union mpi3_man11_bkplane_spec_format backplane_mgmt_specific; +}; + +#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00) +#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01) +#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0) +#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4) +#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f) +#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0) +struct mpi3_man11_gas_gauge_device_format { + u8 type; + u8 reserved01[3]; + __le32 reserved04; +}; + +#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00) +struct mpi3_man11_mgmt_ctrlr_device_format { + __le32 reserved00; + __le32 reserved04; +}; +struct mpi3_man11_board_fan_device_format { + u8 flags; + u8 reserved01; + u8 min_fan_speed; + u8 max_fan_speed; + __le32 reserved04; +}; +#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_MASK (0x07) +#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_AMC6821 (0x00) +union mpi3_man11_device_specific_format { + struct mpi3_man11_mux_device_format mux; + struct mpi3_man11_temp_sensor_device_format temp_sensor; + struct mpi3_man11_seeprom_device_format seeprom; + struct mpi3_man11_ddr_spd_device_format ddr_spd; + struct mpi3_man11_cable_mgmt_device_format cable_mgmt; + struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt; + struct mpi3_man11_gas_gauge_device_format gas_gauge; + struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller; + struct mpi3_man11_board_fan_device_format board_fan; + __le32 words[2]; +}; +struct mpi3_man11_istwi_device_format { + u8 device_type; + u8 controller; + u8 reserved02; + u8 flags; + __le16 device_address; + u8 mux_channel; + u8 mux_index; + union mpi3_man11_device_specific_format device_specific; +}; + +#define MPI3_MAN11_ISTWI_DEVTYPE_MUX (0x00) +#define MPI3_MAN11_ISTWI_DEVTYPE_TEMP_SENSOR (0x01) +#define MPI3_MAN11_ISTWI_DEVTYPE_SEEPROM (0x02) +#define MPI3_MAN11_ISTWI_DEVTYPE_DDR_SPD (0x03) +#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04) +#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05) +#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06) +#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07) +#define MPI3_MAN11_ISTWI_DEVTYPE_BOARD_FAN (0x08) +#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01) +#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX +#define MPI3_MAN11_ISTWI_DEVICE_MAX (1) +#endif +struct mpi3_man_page11 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_istwi_dev; + u8 reserved0d[3]; + struct mpi3_man11_istwi_device_format istwi_device[MPI3_MAN11_ISTWI_DEVICE_MAX]; +}; + +#define MPI3_MAN11_PAGEVERSION (0x00) +#ifndef MPI3_MAN12_NUM_SGPIO_MAX +#define MPI3_MAN12_NUM_SGPIO_MAX (1) +#endif +struct mpi3_man12_sgpio_info { + u8 slot_count; + u8 reserved01[3]; + __le32 reserved04; + u8 phy_order[32]; +}; + +struct mpi3_man_page12 { + struct mpi3_config_page_header header; + __le32 flags; + __le32 s_clock_freq; + __le32 activity_modulation; + u8 num_sgpio; + u8 reserved15[3]; + __le32 reserved18; + __le32 reserved1c; + __le32 pattern[8]; + struct mpi3_man12_sgpio_info sgpio_info[MPI3_MAN12_NUM_SGPIO_MAX]; +}; + +#define MPI3_MAN12_PAGEVERSION (0x00) +#define MPI3_MAN12_FLAGS_ERROR_PRESENCE_ENABLED (0x0400) +#define MPI3_MAN12_FLAGS_ACTIVITY_INVERT_ENABLED (0x0200) +#define MPI3_MAN12_FLAGS_GROUP_ID_DISABLED (0x0100) +#define MPI3_MAN12_FLAGS_SIO_CLK_FILTER_ENABLED (0x0004) +#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_MASK (0x0002) +#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_PUSH_PULL (0x0000) +#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_OPEN_DRAIN (0x0002) +#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_MASK (0x0001) +#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_PUSH_PULL (0x0000) +#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_OPEN_DRAIN (0x0001) +#define MPI3_MAN12_SIO_CLK_FREQ_MIN (32) +#define MPI3_MAN12_SIO_CLK_FREQ_MAX (100000) +#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_MASK (0x0000f000) +#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_SHIFT (12) +#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_MASK (0x00000f00) +#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_SHIFT (8) +#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_MASK (0x000000f0) +#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_SHIFT (4) +#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_MASK (0x0000000f) +#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_SHIFT (0) +#define MPI3_MAN12_PATTERN_RATE_MASK (0xe0000000) +#define MPI3_MAN12_PATTERN_RATE_2_HZ (0x00000000) +#define MPI3_MAN12_PATTERN_RATE_4_HZ (0x20000000) +#define MPI3_MAN12_PATTERN_RATE_8_HZ (0x40000000) +#define MPI3_MAN12_PATTERN_RATE_16_HZ (0x60000000) +#define MPI3_MAN12_PATTERN_RATE_10_HZ (0x80000000) +#define MPI3_MAN12_PATTERN_RATE_20_HZ (0xa0000000) +#define MPI3_MAN12_PATTERN_RATE_40_HZ (0xc0000000) +#define MPI3_MAN12_PATTERN_LENGTH_MASK (0x1f000000) +#define MPI3_MAN12_PATTERN_LENGTH_SHIFT (24) +#define MPI3_MAN12_PATTERN_BIT_PATTERN_MASK (0x00ffffff) +#define MPI3_MAN12_PATTERN_BIT_PATTERN_SHIFT (0) +#ifndef MPI3_MAN13_NUM_TRANSLATION_MAX +#define MPI3_MAN13_NUM_TRANSLATION_MAX (1) +#endif +struct mpi3_man13_translation_info { + __le32 slot_status; + __le32 mask; + u8 activity; + u8 locate; + u8 error; + u8 reserved0b; +}; + +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_FAULT (0x20000000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_OFF (0x10000000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_ACTIVITY (0x00800000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DO_NOT_REMOVE (0x00400000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_MISSING (0x00100000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_INSERT (0x00080000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REMOVAL (0x00040000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IDENTIFY (0x00020000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_OK (0x00008000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_RESERVED_DEVICE (0x00004000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_HOT_SPARE (0x00002000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000800) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_FAILED_ARRAY (0x00000400) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP (0x00000200) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP_ABORT (0x00000100) +#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_PREDICTED_FAILURE (0x00000040) +#define MPI3_MAN13_BLINK_PATTERN_FORCE_OFF (0x00) +#define MPI3_MAN13_BLINK_PATTERN_FORCE_ON (0x01) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_0 (0x02) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_1 (0x03) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_2 (0x04) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_3 (0x05) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_4 (0x06) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_5 (0x07) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_6 (0x08) +#define MPI3_MAN13_BLINK_PATTERN_PATTERN_7 (0x09) +#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY (0x0a) +#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY_TRAIL (0x0b) +struct mpi3_man_page13 { + struct mpi3_config_page_header header; + u8 num_trans; + u8 reserved09[3]; + __le32 reserved0c; + struct mpi3_man13_translation_info translation[MPI3_MAN13_NUM_TRANSLATION_MAX]; +}; + +#define MPI3_MAN13_PAGEVERSION (0x00) +struct mpi3_man_page14 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_slot_groups; + u8 num_slots; + __le16 max_cert_chain_length; + __le32 sealed_slots; + __le32 populated_slots; + __le32 mgmt_pt_updatable_slots; +}; +#define MPI3_MAN14_PAGEVERSION (0x00) +#define MPI3_MAN14_NUMSLOTS_MAX (32) +#ifndef MPI3_MAN15_VERSION_RECORD_MAX +#define MPI3_MAN15_VERSION_RECORD_MAX 1 +#endif +struct mpi3_man15_version_record { + __le16 spdm_version; + __le16 reserved02; +}; + +struct mpi3_man_page15 { + struct mpi3_config_page_header header; + u8 num_version_records; + u8 reserved09[3]; + __le32 reserved0c; + struct mpi3_man15_version_record version_record[MPI3_MAN15_VERSION_RECORD_MAX]; +}; + +#define MPI3_MAN15_PAGEVERSION (0x00) +#ifndef MPI3_MAN16_CERT_ALGO_MAX +#define MPI3_MAN16_CERT_ALGO_MAX 1 +#endif +struct mpi3_man16_certificate_algorithm { + u8 slot_group; + u8 reserved01[3]; + __le32 base_asym_algo; + __le32 base_hash_algo; + __le32 reserved0c[3]; +}; + +struct mpi3_man_page16 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_cert_algos; + u8 reserved0d[3]; + struct mpi3_man16_certificate_algorithm certificate_algorithm[MPI3_MAN16_CERT_ALGO_MAX]; +}; + +#define MPI3_MAN16_PAGEVERSION (0x00) +#ifndef MPI3_MAN17_HASH_ALGORITHM_MAX +#define MPI3_MAN17_HASH_ALGORITHM_MAX 1 +#endif +struct mpi3_man17_hash_algorithm { + u8 meas_specification; + u8 reserved01[3]; + __le32 measurement_hash_algo; + __le32 reserved08[2]; +}; + +struct mpi3_man_page17 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_hash_algos; + u8 reserved0d[3]; + struct mpi3_man17_hash_algorithm hash_algorithm[MPI3_MAN17_HASH_ALGORITHM_MAX]; +}; + +#define MPI3_MAN17_PAGEVERSION (0x00) +struct mpi3_man_page20 { + struct mpi3_config_page_header header; + __le32 reserved08; + __le32 nonpremium_features; + u8 allowed_personalities; + u8 reserved11[3]; +}; + +#define MPI3_MAN20_PAGEVERSION (0x00) +#define MPI3_MAN20_ALLOWEDPERSON_RAID_MASK (0x02) +#define MPI3_MAN20_ALLOWEDPERSON_RAID_ALLOWED (0x02) +#define MPI3_MAN20_ALLOWEDPERSON_RAID_NOT_ALLOWED (0x00) +#define MPI3_MAN20_ALLOWEDPERSON_EHBA_MASK (0x01) +#define MPI3_MAN20_ALLOWEDPERSON_EHBA_ALLOWED (0x01) +#define MPI3_MAN20_ALLOWEDPERSON_EHBA_NOT_ALLOWED (0x00) +#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_MASK (0x01) +#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_ENABLED (0x00) +#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_DISABLED (0x01) +struct mpi3_man_page21 { + struct mpi3_config_page_header header; + __le32 reserved08; + __le32 flags; +}; + +#define MPI3_MAN21_PAGEVERSION (0x00) +#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x00000060) +#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00000000) +#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x00000020) +#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x00000040) +#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x00000008) +#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00000000) +#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x00000008) +#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x00000001) +#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00000000) +#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x00000001) +#ifndef MPI3_MAN_PROD_SPECIFIC_MAX +#define MPI3_MAN_PROD_SPECIFIC_MAX (1) +#endif +struct mpi3_man_page_product_specific { + struct mpi3_config_page_header header; + __le32 product_specific_info[MPI3_MAN_PROD_SPECIFIC_MAX]; +}; + +struct mpi3_io_unit_page0 { + struct mpi3_config_page_header header; + __le64 unique_value; + __le32 nvdata_version_default; + __le32 nvdata_version_persistent; +}; + +#define MPI3_IOUNIT0_PAGEVERSION (0x00) +struct mpi3_io_unit_page1 { + struct mpi3_config_page_header header; + __le32 flags; + u8 dmd_io_delay; + u8 dmd_report_pcie; + u8 dmd_report_sata; + u8 dmd_report_sas; +}; + +#define MPI3_IOUNIT1_PAGEVERSION (0x00) +#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_MASK (0x00000030) +#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_ENABLE (0x00000000) +#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_DISABLE (0x00000010) +#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_NO_MODIFY (0x00000020) +#define MPI3_IOUNIT1_FLAGS_ATA_SECURITY_FREEZE_LOCK (0x00000008) +#define MPI3_IOUNIT1_FLAGS_WRITE_SAME_BUFFER (0x00000004) +#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_MASK (0x00000003) +#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_ENABLE (0x00000000) +#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_DISABLE (0x00000001) +#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_UNCHANGED (0x00000002) +#define MPI3_IOUNIT1_DMD_REPORT_DELAY_TIME_MASK (0x7f) +#define MPI3_IOUNIT1_DMD_REPORT_UNIT_16_SEC (0x80) +#ifndef MPI3_IO_UNIT2_GPIO_VAL_MAX +#define MPI3_IO_UNIT2_GPIO_VAL_MAX (1) +#endif +struct mpi3_io_unit_page2 { + struct mpi3_config_page_header header; + u8 gpio_count; + u8 reserved09[3]; + __le16 gpio_val[MPI3_IO_UNIT2_GPIO_VAL_MAX]; +}; + +#define MPI3_IOUNIT2_PAGEVERSION (0x00) +#define MPI3_IOUNIT2_GPIO_FUNCTION_MASK (0xfffc) +#define MPI3_IOUNIT2_GPIO_FUNCTION_SHIFT (2) +#define MPI3_IOUNIT2_GPIO_SETTING_MASK (0x0001) +#define MPI3_IOUNIT2_GPIO_SETTING_OFF (0x0000) +#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001) +struct mpi3_io_unit3_sensor { + __le16 flags; + u8 threshold_margin; + u8 reserved03; + __le16 threshold[3]; + __le16 reserved0a; + __le32 reserved0c; + __le32 reserved10; + __le32 reserved14; +}; + +#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010) +#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008) +#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004) +#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002) +#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001) +#ifndef MPI3_IO_UNIT3_SENSOR_MAX +#define MPI3_IO_UNIT3_SENSOR_MAX (1) +#endif +struct mpi3_io_unit_page3 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_sensors; + u8 nominal_poll_interval; + u8 warning_poll_interval; + u8 reserved0f; + struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX]; +}; + +#define MPI3_IOUNIT3_PAGEVERSION (0x00) +struct mpi3_io_unit4_sensor { + __le16 current_temperature; + __le16 reserved02; + u8 flags; + u8 reserved05[3]; + __le16 istwi_index; + u8 channel; + u8 reserved0b; + __le32 reserved0c; +}; + +#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0) +#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5) +#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01) +#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff) +#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff) +#ifndef MPI3_IO_UNIT4_SENSOR_MAX +#define MPI3_IO_UNIT4_SENSOR_MAX (1) +#endif +struct mpi3_io_unit_page4 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_sensors; + u8 reserved0d[3]; + struct mpi3_io_unit4_sensor sensor[MPI3_IO_UNIT4_SENSOR_MAX]; +}; + +#define MPI3_IOUNIT4_PAGEVERSION (0x00) +struct mpi3_io_unit5_spinup_group { + u8 max_target_spinup; + u8 spinup_delay; + u8 spinup_flags; + u8 reserved03; +}; + +#define MPI3_IOUNIT5_SPINUP_FLAGS_DISABLE (0x01) +#ifndef MPI3_IO_UNIT5_PHY_MAX +#define MPI3_IO_UNIT5_PHY_MAX (4) +#endif +struct mpi3_io_unit_page5 { + struct mpi3_config_page_header header; + struct mpi3_io_unit5_spinup_group spinup_group_parameters[4]; + __le32 reserved18; + __le32 reserved1c; + __le16 device_shutdown; + __le16 reserved22; + u8 pcie_device_wait_time; + u8 sata_device_wait_time; + u8 spinup_encl_drive_count; + u8 spinup_encl_delay; + u8 num_phys; + u8 pe_initial_spinup_delay; + u8 topology_stable_time; + u8 flags; + u8 phy[MPI3_IO_UNIT5_PHY_MAX]; +}; + +#define MPI3_IOUNIT5_PAGEVERSION (0x00) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003) +#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_SHIFT (0) +#define MPI3_IOUNIT5_FLAGS_SATAPUIS_MASK (0x0c) +#define MPI3_IOUNIT5_FLAGS_SATAPUIS_NOT_SUPPORTED (0x00) +#define MPI3_IOUNIT5_FLAGS_SATAPUIS_OS_CONTROLLED (0x04) +#define MPI3_IOUNIT5_FLAGS_SATAPUIS_APP_CONTROLLED (0x08) +#define MPI3_IOUNIT5_FLAGS_SATAPUIS_BLOCKED (0x0c) +#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02) +#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01) +#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03) +struct mpi3_io_unit_page6 { + struct mpi3_config_page_header header; + __le32 board_power_requirement; + __le32 pci_slot_power_allocation; + u8 flags; + u8 reserved11[3]; +}; + +#define MPI3_IOUNIT6_PAGEVERSION (0x00) +#define MPI3_IOUNIT6_FLAGS_ACT_CABLE_PWR_EXC (0x01) +#ifndef MPI3_IOUNIT8_DIGEST_MAX +#define MPI3_IOUNIT8_DIGEST_MAX (1) +#endif +union mpi3_iounit8_digest { + __le32 dword[16]; + __le16 word[32]; + u8 byte[64]; +}; + +struct mpi3_io_unit_page8 { + struct mpi3_config_page_header header; + u8 sb_mode; + u8 sb_state; + __le16 reserved0a; + u8 num_slots; + u8 slots_available; + u8 current_key_encryption_algo; + u8 key_digest_hash_algo; + union mpi3_version_union current_svn; + __le32 reserved14; + __le32 current_key[128]; + union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX]; +}; + +#define MPI3_IOUNIT8_PAGEVERSION (0x00) +#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04) +#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02) +#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01) +#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04) +#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02) +#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01) +struct mpi3_io_unit_page9 { + struct mpi3_config_page_header header; + __le32 flags; + __le16 first_device; + __le16 reserved0e; +}; + +#define MPI3_IOUNIT9_PAGEVERSION (0x00) +#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_MASK (0x00000006) +#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_SHIFT (1) +#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_NONE (0x00000000) +#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_RECEPTACLE (0x00000002) +#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004) +#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001) +#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff) +struct mpi3_io_unit_page10 { + struct mpi3_config_page_header header; + u8 flags; + u8 reserved09[3]; + __le32 silicon_id; + u8 fw_version_minor; + u8 fw_version_major; + u8 hw_version_minor; + u8 hw_version_major; + u8 part_number[16]; +}; +#define MPI3_IOUNIT10_PAGEVERSION (0x00) +#define MPI3_IOUNIT10_FLAGS_VALID (0x01) +#define MPI3_IOUNIT10_FLAGS_ACTIVEID_MASK (0x02) +#define MPI3_IOUNIT10_FLAGS_ACTIVEID_FIRST_REGION (0x00) +#define MPI3_IOUNIT10_FLAGS_ACTIVEID_SECOND_REGION (0x02) +#define MPI3_IOUNIT10_FLAGS_PBLP_EXPECTED (0x80) +#ifndef MPI3_IOUNIT11_PROFILE_MAX +#define MPI3_IOUNIT11_PROFILE_MAX (1) +#endif +struct mpi3_iounit11_profile { + u8 profile_identifier; + u8 reserved01[3]; + __le16 max_vds; + __le16 max_host_pds; + __le16 max_adv_host_pds; + __le16 max_raid_pds; + __le16 max_nvme; + __le16 max_outstanding_requests; + __le16 subsystem_id; + __le16 reserved12; + __le32 reserved14[2]; +}; +struct mpi3_io_unit_page11 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_profiles; + u8 current_profile_identifier; + __le16 reserved0e; + struct mpi3_iounit11_profile profile[MPI3_IOUNIT11_PROFILE_MAX]; +}; +#define MPI3_IOUNIT11_PAGEVERSION (0x00) +#ifndef MPI3_IOUNIT12_BUCKET_MAX +#define MPI3_IOUNIT12_BUCKET_MAX (1) +#endif +struct mpi3_iounit12_bucket { + u8 coalescing_depth; + u8 coalescing_timeout; + __le16 io_count_low_boundary; + __le32 reserved04; +}; +struct mpi3_io_unit_page12 { + struct mpi3_config_page_header header; + __le32 flags; + __le32 reserved0c[4]; + u8 num_buckets; + u8 reserved1d[3]; + struct mpi3_iounit12_bucket bucket[MPI3_IOUNIT12_BUCKET_MAX]; +}; +#define MPI3_IOUNIT12_PAGEVERSION (0x00) +#define MPI3_IOUNIT12_FLAGS_NUMPASSES_MASK (0x00000300) +#define MPI3_IOUNIT12_FLAGS_NUMPASSES_SHIFT (8) +#define MPI3_IOUNIT12_FLAGS_NUMPASSES_8 (0x00000000) +#define MPI3_IOUNIT12_FLAGS_NUMPASSES_16 (0x00000100) +#define MPI3_IOUNIT12_FLAGS_NUMPASSES_32 (0x00000200) +#define MPI3_IOUNIT12_FLAGS_NUMPASSES_64 (0x00000300) +#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_MASK (0x00000003) +#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_DISABLED (0x00000000) +#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_500US (0x00000001) +#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_1MS (0x00000002) +#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_2MS (0x00000003) +#ifndef MPI3_IOUNIT13_FUNC_MAX +#define MPI3_IOUNIT13_FUNC_MAX (1) +#endif +struct mpi3_iounit13_allowed_function { + __le16 sub_function; + u8 function_code; + u8 function_flags; +}; +#define MPI3_IOUNIT13_FUNCTION_FLAGS_ADMIN_BLOCKED (0x04) +#define MPI3_IOUNIT13_FUNCTION_FLAGS_OOB_BLOCKED (0x02) +#define MPI3_IOUNIT13_FUNCTION_FLAGS_CHECK_SUBFUNCTION_ENABLED (0x01) +struct mpi3_io_unit_page13 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_allowed_functions; + u8 reserved0d[3]; + struct mpi3_iounit13_allowed_function allowed_function[MPI3_IOUNIT13_FUNC_MAX]; +}; +#define MPI3_IOUNIT13_PAGEVERSION (0x00) +#define MPI3_IOUNIT13_FLAGS_ADMIN_BLOCKED (0x0002) +#define MPI3_IOUNIT13_FLAGS_OOB_BLOCKED (0x0001) +#ifndef MPI3_IOUNIT14_MD_MAX +#define MPI3_IOUNIT14_MD_MAX (1) +#endif +struct mpi3_iounit14_pagemetadata { + u8 page_type; + u8 page_number; + u8 reserved02; + u8 page_flags; +}; +#define MPI3_IOUNIT14_PAGEMETADATA_PAGEFLAGS_OOBWRITE_ALLOWED (0x02) +#define MPI3_IOUNIT14_PAGEMETADATA_PAGEFLAGS_HOSTWRITE_ALLOWED (0x01) +struct mpi3_io_unit_page14 { + struct mpi3_config_page_header header; + u8 flags; + u8 reserved09[3]; + u8 num_pages; + u8 reserved0d[3]; + struct mpi3_iounit14_pagemetadata page_metadata[MPI3_IOUNIT14_MD_MAX]; +}; +#define MPI3_IOUNIT14_PAGEVERSION (0x00) +#define MPI3_IOUNIT14_FLAGS_READONLY (0x01) +#ifndef MPI3_IOUNIT15_PBD_MAX +#define MPI3_IOUNIT15_PBD_MAX (1) +#endif +struct mpi3_io_unit_page15 { + struct mpi3_config_page_header header; + u8 flags; + u8 reserved09[3]; + __le32 reserved0c; + u8 power_budgeting_capability; + u8 reserved11[3]; + u8 num_power_budget_data; + u8 reserved15[3]; + __le32 power_budget_data[MPI3_IOUNIT15_PBD_MAX]; +}; +#define MPI3_IOUNIT15_PAGEVERSION (0x00) +#define MPI3_IOUNIT15_FLAGS_EPRINIT_INITREQUIRED (0x04) +#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_MASK (0x03) +#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_NOT_SUPPORTED (0x00) +#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01) +#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02) +#define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00) +struct mpi3_ioc_page0 { + struct mpi3_config_page_header header; + __le32 reserved08; + __le16 vendor_id; + __le16 device_id; + u8 revision_id; + u8 reserved11[3]; + __le32 class_code; + __le16 subsystem_vendor_id; + __le16 subsystem_id; +}; + +#define MPI3_IOC0_PAGEVERSION (0x00) +struct mpi3_ioc_page1 { + struct mpi3_config_page_header header; + __le32 coalescing_timeout; + u8 coalescing_depth; + u8 obsolete; + __le16 reserved0e; +}; +#define MPI3_IOC1_PAGEVERSION (0x00) +#ifndef MPI3_IOC2_EVENTMASK_WORDS +#define MPI3_IOC2_EVENTMASK_WORDS (4) +#endif +struct mpi3_ioc_page2 { + struct mpi3_config_page_header header; + __le32 reserved08; + __le16 sas_broadcast_primitive_masks; + __le16 sas_notify_primitive_masks; + __le32 event_masks[MPI3_IOC2_EVENTMASK_WORDS]; +}; + +#define MPI3_IOC2_PAGEVERSION (0x00) +#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010) +#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008) +#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004) +#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002) +#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001) +struct mpi3_allowed_cmd_scsi { + __le16 service_action; + u8 operation_code; + u8 command_flags; +}; + +struct mpi3_allowed_cmd_ata { + u8 subcommand; + u8 reserved01; + u8 command; + u8 command_flags; +}; + +struct mpi3_allowed_cmd_nvme { + u8 reserved00; + u8 nvme_cmd_flags; + u8 op_code; + u8 command_flags; +}; + +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f) +#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00) +union mpi3_allowed_cmd { + struct mpi3_allowed_cmd_scsi scsi; + struct mpi3_allowed_cmd_ata ata; + struct mpi3_allowed_cmd_nvme nvme; +}; + +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02) +#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01) +#ifndef MPI3_ALLOWED_CMDS_MAX +#define MPI3_ALLOWED_CMDS_MAX (1) +#endif +struct mpi3_driver_page0 { + struct mpi3_config_page_header header; + __le32 bsd_options; + u8 ssu_timeout; + u8 io_timeout; + u8 tur_retries; + u8 tur_interval; + u8 reserved10; + u8 security_key_timeout; + __le16 reserved12; + __le32 reserved14; + __le32 reserved18; +}; +#define MPI3_DRIVER0_PAGEVERSION (0x00) +#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008) +#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004) +#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003) +#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000) +#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001) +#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002) +struct mpi3_driver_page1 { + struct mpi3_config_page_header header; + __le32 flags; + __le32 reserved0c; + __le16 host_diag_trace_max_size; + __le16 host_diag_trace_min_size; + __le16 host_diag_trace_decrement_size; + __le16 reserved16; + __le16 host_diag_fw_max_size; + __le16 host_diag_fw_min_size; + __le16 host_diag_fw_decrement_size; + __le16 reserved1e; + __le16 host_diag_driver_max_size; + __le16 host_diag_driver_min_size; + __le16 host_diag_driver_decrement_size; + __le16 reserved26; +}; + +#define MPI3_DRIVER1_PAGEVERSION (0x00) +#ifndef MPI3_DRIVER2_TRIGGER_MAX +#define MPI3_DRIVER2_TRIGGER_MAX (1) +#endif +struct mpi3_driver2_trigger_event { + u8 type; + u8 flags; + u8 reserved02; + u8 event; + __le32 reserved04[3]; +}; + +struct mpi3_driver2_trigger_scsi_sense { + u8 type; + u8 flags; + __le16 reserved02; + u8 ascq; + u8 asc; + u8 sense_key; + u8 reserved07; + __le32 reserved08[2]; +}; + +#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff) +#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff) +#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff) +struct mpi3_driver2_trigger_reply { + u8 type; + u8 flags; + __le16 ioc_status; + __le32 ioc_log_info; + __le32 ioc_log_info_mask; + __le32 reserved0c; +}; + +#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff) +union mpi3_driver2_trigger_element { + struct mpi3_driver2_trigger_event event; + struct mpi3_driver2_trigger_scsi_sense scsi_sense; + struct mpi3_driver2_trigger_reply reply; +}; + +#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00) +#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01) +#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02) +#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02) +#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01) +struct mpi3_driver_page2 { + struct mpi3_config_page_header header; + __le64 global_trigger; + __le32 reserved10[3]; + u8 num_triggers; + u8 reserved1d[3]; + union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX]; +}; + +#define MPI3_DRIVER2_PAGEVERSION (0x00) +#define MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL) +#define MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL) +#define MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED (0x2000000000000000ULL) +#define MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED (0x1000000000000000ULL) +#define MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED (0x0800000000000000ULL) +#define MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL) +#define MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL) +struct mpi3_driver_page10 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_allowed_commands; + u8 reserved0d[3]; + union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX]; +}; + +#define MPI3_DRIVER10_PAGEVERSION (0x00) +struct mpi3_driver_page20 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_allowed_commands; + u8 reserved0d[3]; + union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX]; +}; + +#define MPI3_DRIVER20_PAGEVERSION (0x00) +struct mpi3_driver_page30 { + struct mpi3_config_page_header header; + __le16 flags; + __le16 reserved0a; + u8 num_allowed_commands; + u8 reserved0d[3]; + union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX]; +}; + +#define MPI3_DRIVER30_PAGEVERSION (0x00) +union mpi3_security_mac { + __le32 dword[16]; + __le16 word[32]; + u8 byte[64]; +}; + +union mpi3_security_nonce { + __le32 dword[16]; + __le16 word[32]; + u8 byte[64]; +}; + +union mpi3_security_root_digest { + __le32 dword[16]; + __le16 word[32]; + u8 byte[64]; +}; + +union mpi3_security0_cert_chain { + __le32 dword[1024]; + __le16 word[2048]; + u8 byte[4096]; +}; + +struct mpi3_security_page0 { + struct mpi3_config_page_header header; + u8 slot_num_group; + u8 slot_num; + __le16 cert_chain_length; + u8 cert_chain_flags; + u8 reserved0d[3]; + __le32 base_asym_algo; + __le32 base_hash_algo; + __le32 reserved18[4]; + union mpi3_security_mac mac; + union mpi3_security_nonce nonce; + union mpi3_security0_cert_chain certificate_chain; +}; + +#define MPI3_SECURITY0_PAGEVERSION (0x00) +#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_MASK (0x0e) +#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_UNUSED (0x00) +#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_CERBERUS (0x02) +#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_SPDM (0x04) +#define MPI3_SECURITY0_CERTCHAIN_FLAGS_SEALED (0x01) +#ifndef MPI3_SECURITY1_KEY_RECORD_MAX +#define MPI3_SECURITY1_KEY_RECORD_MAX 1 +#endif +#ifndef MPI3_SECURITY1_PAD_MAX +#define MPI3_SECURITY1_PAD_MAX 4 +#endif +union mpi3_security1_key_data { + __le32 dword[128]; + __le16 word[256]; + u8 byte[512]; +}; + +struct mpi3_security1_key_record { + u8 flags; + u8 consumer; + __le16 key_data_size; + __le32 additional_key_data; + __le32 reserved08[2]; + union mpi3_security1_key_data key_data; +}; + +#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_MASK (0x1f) +#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_NOT_VALID (0x00) +#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_HMAC (0x01) +#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_AES (0x02) +#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PRIVATE (0x03) +#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PUBLIC (0x04) +#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00) +#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01) +#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02) +#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03) +#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04) +struct mpi3_security_page1 { + struct mpi3_config_page_header header; + __le32 reserved08[2]; + union mpi3_security_mac mac; + union mpi3_security_nonce nonce; + u8 num_keys; + u8 reserved91[3]; + __le32 reserved94[3]; + struct mpi3_security1_key_record key_record[MPI3_SECURITY1_KEY_RECORD_MAX]; + u8 pad[MPI3_SECURITY1_PAD_MAX]; +}; + +#define MPI3_SECURITY1_PAGEVERSION (0x00) +#ifndef MPI3_SECURITY2_TRUSTED_ROOT_MAX +#define MPI3_SECURITY2_TRUSTED_ROOT_MAX 1 +#endif +struct mpi3_security2_trusted_root { + u8 level; + u8 hash_algorithm; + __le16 trusted_root_flags; + __le32 reserved04[3]; + union mpi3_security_root_digest root_digest; +}; +#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_MASK (0x0006) +#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_SHIFT (1) +#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_HA_FIELD (0x0000) +#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_AKI (0x0002) +#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_USERPROVISIONED_YES (0x0001) +struct mpi3_security_page2 { + struct mpi3_config_page_header header; + __le32 reserved08[2]; + union mpi3_security_mac mac; + union mpi3_security_nonce nonce; + __le32 reserved90[3]; + u8 num_roots; + u8 reserved9d[3]; + struct mpi3_security2_trusted_root trusted_root[MPI3_SECURITY2_TRUSTED_ROOT_MAX]; +}; +#define MPI3_SECURITY2_PAGEVERSION (0x00) +struct mpi3_sas_io_unit0_phy_data { + u8 io_unit_port; + u8 port_flags; + u8 phy_flags; + u8 negotiated_link_rate; + __le16 controller_phy_device_info; + __le16 reserved06; + __le16 attached_dev_handle; + __le16 controller_dev_handle; + __le32 discovery_status; + __le32 reserved10; +}; + +#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX +#define MPI3_SAS_IO_UNIT0_PHY_MAX (1) +#endif +struct mpi3_sas_io_unit_page0 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_phys; + u8 init_status; + __le16 reserved0e; + struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX]; +}; + +#define MPI3_SASIOUNIT0_PAGEVERSION (0x00) +#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00) +#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01) +#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02) +#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04) +#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05) +#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06) +#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0) +#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff) +#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01) +#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02) +#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) +#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02) +#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01) +struct mpi3_sas_io_unit1_phy_data { + u8 io_unit_port; + u8 port_flags; + u8 phy_flags; + u8 max_min_link_rate; + __le16 controller_phy_device_info; + __le16 max_target_port_connect_time; + __le32 reserved08; +}; + +#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX +#define MPI3_SAS_IO_UNIT1_PHY_MAX (1) +#endif +struct mpi3_sas_io_unit_page1 { + struct mpi3_config_page_header header; + __le16 control_flags; + __le16 sas_narrow_max_queue_depth; + __le16 additional_control_flags; + __le16 sas_wide_max_queue_depth; + u8 num_phys; + u8 sata_max_q_depth; + __le16 reserved12; + struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX]; +}; + +#define MPI3_SASIOUNIT1_PAGEVERSION (0x00) +#define MPI3_SASIOUNIT1_CONTROL_CONTROLLER_DEVICE_SELF_TEST (0x8000) +#define MPI3_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) +#define MPI3_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) +#define MPI3_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) +#define MPI3_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) +#define MPI3_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010) +#define MPI3_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) +#define MPI3_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) +#define MPI3_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) +#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_MASK (0x0001) +#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_DEVICE_NAME (0x0000) +#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_SAS_ADDRESS (0x0001) +#define MPI3_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) +#define MPI3_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) +#define MPI3_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) +#define MPI3_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) +#define MPI3_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010) +#define MPI3_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008) +#define MPI3_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004) +#define MPI3_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) +#define MPI3_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) +#define MPI3_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) +#define MPI3_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI3_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) +#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_MASK (0xf0) +#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_SHIFT (4) +#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_6_0 (0xa0) +#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_12_0 (0xb0) +#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_22_5 (0xc0) +#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_MASK (0x0f) +#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_6_0 (0x0a) +#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_12_0 (0x0b) +#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_22_5 (0x0c) +struct mpi3_sas_io_unit2_phy_pm_settings { + u8 control_flags; + u8 reserved01; + __le16 inactivity_timer_exponent; + u8 sata_partial_timeout; + u8 reserved05; + u8 sata_slumber_timeout; + u8 reserved07; + u8 sas_partial_timeout; + u8 reserved09; + u8 sas_slumber_timeout; + u8 reserved0b; +}; + +#ifndef MPI3_SAS_IO_UNIT2_PHY_MAX +#define MPI3_SAS_IO_UNIT2_PHY_MAX (1) +#endif +struct mpi3_sas_io_unit_page2 { + struct mpi3_config_page_header header; + u8 num_phys; + u8 reserved09[3]; + __le32 reserved0c; + struct mpi3_sas_io_unit2_phy_pm_settings sas_phy_power_management_settings[MPI3_SAS_IO_UNIT2_PHY_MAX]; +}; + +#define MPI3_SASIOUNIT2_PAGEVERSION (0x00) +#define MPI3_SASIOUNIT2_CONTROL_SAS_SLUMBER_ENABLE (0x08) +#define MPI3_SASIOUNIT2_CONTROL_SAS_PARTIAL_ENABLE (0x04) +#define MPI3_SASIOUNIT2_CONTROL_SATA_SLUMBER_ENABLE (0x02) +#define MPI3_SASIOUNIT2_CONTROL_SATA_PARTIAL_ENABLE (0x01) +#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_MASK (0x7000) +#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_SHIFT (12) +#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_MASK (0x0700) +#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_SHIFT (8) +#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_MASK (0x0070) +#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_SHIFT (4) +#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_MASK (0x0007) +#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_SHIFT (0) +#define MPI3_SASIOUNIT2_ITE_EXP_TEN_SECONDS (7) +#define MPI3_SASIOUNIT2_ITE_EXP_ONE_SECOND (6) +#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MILLISECONDS (5) +#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MILLISECONDS (4) +#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MILLISECOND (3) +#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MICROSECONDS (2) +#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MICROSECONDS (1) +#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MICROSECOND (0) +struct mpi3_sas_io_unit_page3 { + struct mpi3_config_page_header header; + __le32 reserved08; + __le32 power_management_capabilities; +}; + +#define MPI3_SASIOUNIT3_PAGEVERSION (0x00) +#define MPI3_SASIOUNIT3_PM_HOST_SAS_SLUMBER_MODE (0x00000800) +#define MPI3_SASIOUNIT3_PM_HOST_SAS_PARTIAL_MODE (0x00000400) +#define MPI3_SASIOUNIT3_PM_HOST_SATA_SLUMBER_MODE (0x00000200) +#define MPI3_SASIOUNIT3_PM_HOST_SATA_PARTIAL_MODE (0x00000100) +#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) +#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) +#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) +#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) +struct mpi3_sas_expander_page0 { + struct mpi3_config_page_header header; + u8 io_unit_port; + u8 report_gen_length; + __le16 enclosure_handle; + __le32 reserved0c; + __le64 sas_address; + __le32 discovery_status; + __le16 dev_handle; + __le16 parent_dev_handle; + __le16 expander_change_count; + __le16 expander_route_indexes; + u8 num_phys; + u8 sas_level; + __le16 flags; + __le16 stp_bus_inactivity_time_limit; + __le16 stp_max_connect_time_limit; + __le16 stp_smp_nexus_loss_time; + __le16 max_num_routed_sas_addresses; + __le64 active_zone_manager_sas_address; + __le16 zone_lock_inactivity_limit; + __le16 reserved3a; + u8 time_to_reduced_func; + u8 initial_time_to_reduced_func; + u8 max_reduced_func_time; + u8 exp_status; +}; + +#define MPI3_SASEXPANDER0_PAGEVERSION (0x00) +#define MPI3_SASEXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) +#define MPI3_SASEXPANDER0_FLAGS_ZONE_LOCKED (0x1000) +#define MPI3_SASEXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) +#define MPI3_SASEXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) +#define MPI3_SASEXPANDER0_FLAGS_ZONING_SUPPORT (0x0200) +#define MPI3_SASEXPANDER0_FLAGS_ENABLED_ZONING (0x0100) +#define MPI3_SASEXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080) +#define MPI3_SASEXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010) +#define MPI3_SASEXPANDER0_FLAGS_OTHERS_CONFIG (0x0004) +#define MPI3_SASEXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002) +#define MPI3_SASEXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) +#define MPI3_SASEXPANDER0_ES_NOT_RESPONDING (0x02) +#define MPI3_SASEXPANDER0_ES_RESPONDING (0x03) +#define MPI3_SASEXPANDER0_ES_DELAY_NOT_RESPONDING (0x04) +struct mpi3_sas_expander_page1 { + struct mpi3_config_page_header header; + u8 io_unit_port; + u8 reserved09[3]; + u8 num_phys; + u8 phy; + __le16 num_table_entries_programmed; + u8 programmed_link_rate; + u8 hw_link_rate; + __le16 attached_dev_handle; + __le32 phy_info; + __le16 attached_device_info; + __le16 reserved1a; + __le16 expander_dev_handle; + u8 change_count; + u8 negotiated_link_rate; + u8 phy_identifier; + u8 attached_phy_identifier; + u8 reserved22; + u8 discovery_info; + __le32 attached_phy_info; + u8 zone_group; + u8 self_config_status; + __le16 reserved2a; + __le16 slot; + __le16 slot_index; +}; + +#define MPI3_SASEXPANDER1_PAGEVERSION (0x00) +#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) +#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) +#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) +#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS +#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1) +#endif +struct mpi3_sasexpander2_phy_element { + u8 link_change_count; + u8 reserved01; + __le16 rate_change_count; + __le32 reserved04; +}; + +struct mpi3_sas_expander_page2 { + struct mpi3_config_page_header header; + u8 num_phys; + u8 reserved09; + __le16 dev_handle; + __le32 reserved0c; + struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS]; +}; + +#define MPI3_SASEXPANDER2_PAGEVERSION (0x00) +struct mpi3_sas_port_page0 { + struct mpi3_config_page_header header; + u8 port_number; + u8 reserved09; + u8 port_width; + u8 reserved0b; + u8 zone_group; + u8 reserved0d[3]; + __le64 sas_address; + __le16 device_info; + __le16 reserved1a; + __le32 reserved1c; +}; + +#define MPI3_SASPORT0_PAGEVERSION (0x00) +struct mpi3_sas_phy_page0 { + struct mpi3_config_page_header header; + __le16 owner_dev_handle; + __le16 reserved0a; + __le16 attached_dev_handle; + u8 attached_phy_identifier; + u8 reserved0f; + __le32 attached_phy_info; + u8 programmed_link_rate; + u8 hw_link_rate; + u8 change_count; + u8 flags; + __le32 phy_info; + u8 negotiated_link_rate; + u8 reserved1d[3]; + __le16 slot; + __le16 slot_index; +}; + +#define MPI3_SASPHY0_PAGEVERSION (0x00) +#define MPI3_SASPHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) +struct mpi3_sas_phy_page1 { + struct mpi3_config_page_header header; + __le32 reserved08; + __le32 invalid_dword_count; + __le32 running_disparity_error_count; + __le32 loss_dword_synch_count; + __le32 phy_reset_problem_count; +}; + +#define MPI3_SASPHY1_PAGEVERSION (0x00) +struct mpi3_sas_phy2_phy_event { + u8 phy_event_code; + u8 reserved01[3]; + __le32 phy_event_info; +}; + +#ifndef MPI3_SAS_PHY2_PHY_EVENT_MAX +#define MPI3_SAS_PHY2_PHY_EVENT_MAX (1) +#endif +struct mpi3_sas_phy_page2 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_phy_events; + u8 reserved0d[3]; + struct mpi3_sas_phy2_phy_event phy_event[MPI3_SAS_PHY2_PHY_EVENT_MAX]; +}; + +#define MPI3_SASPHY2_PAGEVERSION (0x00) +struct mpi3_sas_phy3_phy_event_config { + u8 phy_event_code; + u8 reserved01[3]; + u8 counter_type; + u8 threshold_window; + u8 time_units; + u8 reserved07; + __le32 event_threshold; + __le16 threshold_flags; + __le16 reserved0e; +}; + +#define MPI3_SASPHY3_EVENT_CODE_NO_EVENT (0x00) +#define MPI3_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) +#define MPI3_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) +#define MPI3_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03) +#define MPI3_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04) +#define MPI3_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05) +#define MPI3_SASPHY3_EVENT_CODE_RX_ERROR (0x06) +#define MPI3_SASPHY3_EVENT_CODE_INV_SPL_PACKETS (0x07) +#define MPI3_SASPHY3_EVENT_CODE_LOSS_SPL_PACKET_SYNC (0x08) +#define MPI3_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20) +#define MPI3_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21) +#define MPI3_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22) +#define MPI3_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23) +#define MPI3_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24) +#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25) +#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26) +#define MPI3_SASPHY3_EVENT_CODE_TX_BREAK (0x27) +#define MPI3_SASPHY3_EVENT_CODE_RX_BREAK (0x28) +#define MPI3_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29) +#define MPI3_SASPHY3_EVENT_CODE_CONNECTION (0x2a) +#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2b) +#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2c) +#define MPI3_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2d) +#define MPI3_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2e) +#define MPI3_SASPHY3_EVENT_CODE_PERSIST_CONN (0x2f) +#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40) +#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41) +#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42) +#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43) +#define MPI3_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44) +#define MPI3_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45) +#define MPI3_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50) +#define MPI3_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51) +#define MPI3_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52) +#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60) +#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61) +#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63) +#define MPI3_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xd0) +#define MPI3_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xd1) +#define MPI3_SASPHY3_EVENT_CODE_RX_AIP (0xd2) +#define MPI3_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xd3) +#define MPI3_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xd4) +#define MPI3_SASPHY3_EVENT_CODE_LCCONN_TIME (0xd5) +#define MPI3_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xd6) +#define MPI3_SASPHY3_EVENT_CODE_SATA_TX_START (0xd7) +#define MPI3_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xd8) +#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xd9) +#define MPI3_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xda) +#define MPI3_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xdb) +#define MPI3_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xdc) +#define MPI3_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI3_SASPHY3_COUNTER_TYPE_SATURATING (0x01) +#define MPI3_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) +#define MPI3_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) +#define MPI3_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) +#define MPI3_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) +#define MPI3_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) +#define MPI3_SASPHY3_TFLAGS_PHY_RESET (0x0002) +#define MPI3_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) +#ifndef MPI3_SAS_PHY3_PHY_EVENT_MAX +#define MPI3_SAS_PHY3_PHY_EVENT_MAX (1) +#endif +struct mpi3_sas_phy_page3 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_phy_events; + u8 reserved0d[3]; + struct mpi3_sas_phy3_phy_event_config phy_event_config[MPI3_SAS_PHY3_PHY_EVENT_MAX]; +}; + +#define MPI3_SASPHY3_PAGEVERSION (0x00) +struct mpi3_sas_phy_page4 { + struct mpi3_config_page_header header; + u8 reserved08[3]; + u8 flags; + u8 initial_frame[28]; +}; + +#define MPI3_SASPHY4_PAGEVERSION (0x00) +#define MPI3_SASPHY4_FLAGS_FRAME_VALID (0x02) +#define MPI3_SASPHY4_FLAGS_SATA_FRAME (0x01) +#define MPI3_PCIE_LINK_RETIMERS_MASK (0x30) +#define MPI3_PCIE_LINK_RETIMERS_SHIFT (4) +#define MPI3_PCIE_NEG_LINK_RATE_MASK (0x0f) +#define MPI3_PCIE_NEG_LINK_RATE_UNKNOWN (0x00) +#define MPI3_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI3_PCIE_NEG_LINK_RATE_2_5 (0x02) +#define MPI3_PCIE_NEG_LINK_RATE_5_0 (0x03) +#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04) +#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05) +#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06) +#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0) +#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1) +#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2) +#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3) +#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0) +#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1) +#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2) +#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3) +struct mpi3_pcie_io_unit0_phy_data { + u8 link; + u8 link_flags; + u8 phy_flags; + u8 negotiated_link_rate; + __le16 attached_dev_handle; + __le16 controller_dev_handle; + __le32 enumeration_status; + u8 io_unit_port; + u8 reserved0d[3]; +}; + +#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_MASK (0x10) +#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_IOUNIT1 (0x00) +#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_BKPLANE (0x10) +#define MPI3_PCIEIOUNIT0_LINKFLAGS_ENUM_IN_PROGRESS (0x08) +#define MPI3_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) +#define MPI3_PCIEIOUNIT0_PHYFLAGS_HOST_PHY (0x01) +#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCH_DEPTH_EXCEEDED (0x80000000) +#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000) +#define MPI3_PCIEIOUNIT0_ES_MAX_ENDPOINTS_EXCEEDED (0x20000000) +#define MPI3_PCIEIOUNIT0_ES_INSUFFICIENT_RESOURCES (0x10000000) +#ifndef MPI3_PCIE_IO_UNIT0_PHY_MAX +#define MPI3_PCIE_IO_UNIT0_PHY_MAX (1) +#endif +struct mpi3_pcie_io_unit_page0 { + struct mpi3_config_page_header header; + __le32 reserved08; + u8 num_phys; + u8 init_status; + u8 aspm; + u8 reserved0f; + struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX]; +}; + +#define MPI3_PCIEIOUNIT0_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_ERRORS (0x00) +#define MPI3_PCIEIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01) +#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02) +#define MPI3_PCIEIOUNIT0_INITSTATUS_RESOURCE_ALLOC_FAILED (0x03) +#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04) +#define MPI3_PCIEIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05) +#define MPI3_PCIEIOUNIT0_INITSTATUS_HOST_PORT_MISMATCH (0x06) +#define MPI3_PCIEIOUNIT0_INITSTATUS_PHYS_NOT_CONSECUTIVE (0x07) +#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08) +#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0) +#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c) +#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03) +#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0) +struct mpi3_pcie_io_unit1_phy_data { + u8 link; + u8 link_flags; + u8 phy_flags; + u8 max_min_link_rate; + __le32 reserved04; + __le32 reserved08; +}; + +#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_MASK (0x03) +#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00) +#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01) +#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02) +#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50) +#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60) +#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX +#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1) +#endif +struct mpi3_pcie_io_unit_page1 { + struct mpi3_config_page_header header; + __le32 control_flags; + __le32 reserved0c; + u8 num_phys; + u8 reserved11; + u8 aspm; + u8 reserved13; + struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX]; +}; + +#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_MASK (0xe0000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_NONE (0x00000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_DEASSERT (0x20000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_ASSERT (0x40000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_BACKPLANE_ERROR (0x60000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_MASK (0x1c000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_NONE (0x00000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DEASSERT (0x04000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ASSERT (0x08000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_BACKPLANE_ERROR (0x0c000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x00000080) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x00000040) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x00000030) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x00000010) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x00000020) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0000000f) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_USE_BACKPLANE (0x00000000) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x00000002) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x00000003) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x00000004) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x00000005) +#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x00000006) +#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c) +#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2) +#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03) +#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0) +struct mpi3_pcie_io_unit_page2 { + struct mpi3_config_page_header header; + __le16 nvme_max_q_dx1; + __le16 nvme_max_q_dx2; + u8 nvme_abort_to; + u8 reserved0d; + __le16 nvme_max_q_dx4; +}; + +#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0) +#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1) +#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2) +#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3) +#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4) +#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5) +struct mpi3_pcie_io_unit3_error { + __le16 threshold_count; + __le16 reserved02; +}; + +struct mpi3_pcie_io_unit_page3 { + struct mpi3_config_page_header header; + u8 threshold_window; + u8 threshold_action; + u8 escalation_count; + u8 escalation_action; + u8 num_errors; + u8 reserved0d[3]; + struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX]; +}; + +#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00) +#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00) +#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01) +#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02) +#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03) +struct mpi3_pcie_switch_page0 { + struct mpi3_config_page_header header; + u8 io_unit_port; + u8 switch_status; + u8 reserved0a[2]; + __le16 dev_handle; + __le16 parent_dev_handle; + u8 num_ports; + u8 pcie_level; + __le16 reserved12; + __le32 reserved14; + __le32 reserved18; + __le32 reserved1c; +}; + +#define MPI3_PCIESWITCH0_PAGEVERSION (0x00) +#define MPI3_PCIESWITCH0_SS_NOT_RESPONDING (0x02) +#define MPI3_PCIESWITCH0_SS_RESPONDING (0x03) +#define MPI3_PCIESWITCH0_SS_DELAY_NOT_RESPONDING (0x04) +struct mpi3_pcie_switch_page1 { + struct mpi3_config_page_header header; + u8 io_unit_port; + u8 flags; + __le16 reserved0a; + u8 num_ports; + u8 port_num; + __le16 attached_dev_handle; + __le16 switch_dev_handle; + u8 negotiated_port_width; + u8 negotiated_link_rate; + __le16 slot; + __le16 slot_index; + __le32 reserved18; +}; + +#define MPI3_PCIESWITCH1_PAGEVERSION (0x00) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03) +#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0) +#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS +#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1) +#endif +struct mpi3_pcieswitch2_port_element { + __le16 link_change_count; + __le16 rate_change_count; + __le32 reserved04; +}; + +struct mpi3_pcie_switch_page2 { + struct mpi3_config_page_header header; + u8 num_ports; + u8 reserved09; + __le16 dev_handle; + __le32 reserved0c; + struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS]; +}; + +#define MPI3_PCIESWITCH2_PAGEVERSION (0x00) +struct mpi3_pcie_link_page0 { + struct mpi3_config_page_header header; + u8 link; + u8 reserved09[3]; + __le32 reserved0c; + __le32 receiver_error_count; + __le32 recovery_count; + __le32 corr_error_msg_count; + __le32 non_fatal_error_msg_count; + __le32 fatal_error_msg_count; + __le32 non_fatal_error_count; + __le32 fatal_error_count; + __le32 bad_dllp_count; + __le32 bad_tlp_count; +}; + +#define MPI3_PCIELINK0_PAGEVERSION (0x00) +struct mpi3_enclosure_page0 { + struct mpi3_config_page_header header; + __le64 enclosure_logical_id; + __le16 flags; + __le16 enclosure_handle; + __le16 num_slots; + __le16 reserved16; + u8 io_unit_port; + u8 enclosure_level; + __le16 sep_dev_handle; + u8 chassis_slot; + u8 reserved1d[3]; +}; + +#define MPI3_ENCLOSURE0_PAGEVERSION (0x00) +#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_MASK (0xc000) +#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000) +#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000) +#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000) +#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) +#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010) +#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000) +#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010) +#define MPI3_ENCLS0_FLAGS_MNG_MASK (0x000f) +#define MPI3_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI3_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI3_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0002) +#define MPI3_DEVICE_DEVFORM_SAS_SATA (0x00) +#define MPI3_DEVICE_DEVFORM_PCIE (0x01) +#define MPI3_DEVICE_DEVFORM_VD (0x02) +struct mpi3_device0_sas_sata_format { + __le64 sas_address; + __le16 flags; + __le16 device_info; + u8 phy_num; + u8 attached_phy_identifier; + u8 max_port_connections; + u8 zone_group; +}; + +#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400) +#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200) +#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100) +#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080) +#define MPI3_DEVICE0_SASSATA_FLAGS_SW_PRESERVE (0x0040) +#define MPI3_DEVICE0_SASSATA_FLAGS_UNSUPP_DEV (0x0020) +#define MPI3_DEVICE0_SASSATA_FLAGS_48BIT_LBA (0x0010) +#define MPI3_DEVICE0_SASSATA_FLAGS_SMART_SUPP (0x0008) +#define MPI3_DEVICE0_SASSATA_FLAGS_NCQ_SUPP (0x0004) +#define MPI3_DEVICE0_SASSATA_FLAGS_FUA_SUPP (0x0002) +#define MPI3_DEVICE0_SASSATA_FLAGS_PERSIST_CAP (0x0001) +struct mpi3_device0_pcie_format { + u8 supported_link_rates; + u8 max_port_width; + u8 negotiated_port_width; + u8 negotiated_link_rate; + u8 port_num; + u8 controller_reset_to; + __le16 device_info; + __le32 maximum_data_transfer_size; + __le32 capabilities; + __le16 noiob; + u8 nvme_abort_to; + u8 page_size; + __le16 shutdown_latency; + u8 recovery_info; + u8 reserved17; +}; + +#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10) +#define MPI3_DEVICE0_PCIE_LINK_RATE_16_0_SUPP (0x08) +#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04) +#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02) +#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080) +#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0) +#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020) +#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010) +#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008) +#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004) +#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000) +#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002) +#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001) +#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0) +#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6) +#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0) +#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00) +#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04) +#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05) +struct mpi3_device0_vd_format { + u8 vd_state; + u8 raid_level; + __le16 device_info; + __le16 flags; + __le16 io_throttle_group; + __le16 io_throttle_group_low; + __le16 io_throttle_group_high; + __le32 reserved0c; +}; +#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00) +#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01) +#define MPI3_DEVICE0_VD_STATE_DEGRADED (0x02) +#define MPI3_DEVICE0_VD_STATE_OPTIMAL (0x03) +#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_0 (0) +#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_1 (1) +#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_5 (5) +#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_6 (6) +#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_10 (10) +#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_50 (50) +#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_60 (60) +#define MPI3_DEVICE0_VD_DEVICE_INFO_HDD (0x0010) +#define MPI3_DEVICE0_VD_DEVICE_INFO_SSD (0x0008) +#define MPI3_DEVICE0_VD_DEVICE_INFO_NVME (0x0004) +#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002) +#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001) +#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000) +#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12) +union mpi3_device0_dev_spec_format { + struct mpi3_device0_sas_sata_format sas_sata_format; + struct mpi3_device0_pcie_format pcie_format; + struct mpi3_device0_vd_format vd_format; +}; + +struct mpi3_device_page0 { + struct mpi3_config_page_header header; + __le16 dev_handle; + __le16 parent_dev_handle; + __le16 slot; + __le16 enclosure_handle; + __le64 wwid; + __le16 persistent_id; + u8 io_unit_port; + u8 access_status; + __le16 flags; + __le16 reserved1e; + __le16 slot_index; + __le16 queue_depth; + u8 reserved24[3]; + u8 device_form; + union mpi3_device0_dev_spec_format device_specific; +}; + +#define MPI3_DEVICE0_PAGEVERSION (0x00) +#define MPI3_DEVICE0_PARENT_INVALID (0xffff) +#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000) +#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff) +#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff) +#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff) +#define MPI3_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION (0x01) +#define MPI3_DEVICE0_ASTATUS_CAP_UNSUPPORTED (0x02) +#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03) +#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04) +#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05) +#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06) +#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07) +#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f) +#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10) +#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11) +#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12) +#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f) +#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20) +#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21) +#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22) +#define MPI3_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x23) +#define MPI3_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x24) +#define MPI3_DEVICE0_ASTATUS_SIF_PIO_SN (0x25) +#define MPI3_DEVICE0_ASTATUS_SIF_MDMA_SN (0x26) +#define MPI3_DEVICE0_ASTATUS_SIF_UDMA_SN (0x27) +#define MPI3_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x28) +#define MPI3_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x29) +#define MPI3_DEVICE0_ASTATUS_SIF_MAX (0x2f) +#define MPI3_DEVICE0_ASTATUS_PCIE_UNKNOWN (0x30) +#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31) +#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32) +#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33) +#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34) +#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f) +#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40) +#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41) +#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42) +#define MPI3_DEVICE0_ASTATUS_NVME_IDENTIFY_FAILED (0x43) +#define MPI3_DEVICE0_ASTATUS_NVME_QCONFIG_FAILED (0x44) +#define MPI3_DEVICE0_ASTATUS_NVME_QCREATION_FAILED (0x45) +#define MPI3_DEVICE0_ASTATUS_NVME_EVENTCFG_FAILED (0x46) +#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47) +#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48) +#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49) +#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a) +#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b) +#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c) +#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d) +#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e) +#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f) +#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50) +#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51) +#define MPI3_DEVICE0_ASTATUS_NVME_TOO_MANY_ERRORS (0x52) +#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f) +#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80) +#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f) +#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK (0xe000) +#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT (0x0000) +#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB (0x2000) +#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB (0x4000) +#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080) +#define MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED (0x0010) +#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008) +#define MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL (0x0004) +#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002) +#define MPI3_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) +#define MPI3_DEVICE0_QUEUE_DEPTH_NOT_APPLICABLE (0x0000) +struct mpi3_device1_sas_sata_format { + __le32 reserved00; +}; +struct mpi3_device1_pcie_format { + __le16 vendor_id; + __le16 device_id; + __le16 subsystem_vendor_id; + __le16 subsystem_id; + __le32 reserved08; + u8 revision_id; + u8 reserved0d; + __le16 pci_parameters; +}; + +#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_128B (0x0) +#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_256B (0x1) +#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_512B (0x2) +#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_1024B (0x3) +#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_2048B (0x4) +#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_4096B (0x5) +#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_MASK (0x01c0) +#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_SHIFT (6) +#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_MASK (0x0038) +#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_SHIFT (3) +#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_MASK (0x0007) +#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_SHIFT (0) +struct mpi3_device1_vd_format { + __le32 reserved00; +}; + +union mpi3_device1_dev_spec_format { + struct mpi3_device1_sas_sata_format sas_sata_format; + struct mpi3_device1_pcie_format pcie_format; + struct mpi3_device1_vd_format vd_format; +}; + +struct mpi3_device_page1 { + struct mpi3_config_page_header header; + __le16 dev_handle; + __le16 reserved0a; + __le16 link_change_count; + __le16 rate_change_count; + __le16 tm_count; + __le16 reserved12; + __le32 reserved14[10]; + u8 reserved3c[3]; + u8 device_form; + union mpi3_device1_dev_spec_format device_specific; +}; + +#define MPI3_DEVICE1_PAGEVERSION (0x00) +#define MPI3_DEVICE1_COUNTER_MAX (0xfffe) +#define MPI3_DEVICE1_COUNTER_INVALID (0xffff) +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h new file mode 100644 index 000000000..47035b811 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2018-2023 Broadcom Inc. All rights reserved. + */ +#ifndef MPI30_IMAGE_H +#define MPI30_IMAGE_H 1 +struct mpi3_comp_image_version { + __le16 build_num; + __le16 customer_id; + u8 phase_minor; + u8 phase_major; + u8 gen_minor; + u8 gen_major; +}; + +struct mpi3_hash_exclusion_format { + __le32 offset; + __le32 size; +}; + +#define MPI3_IMAGE_HASH_EXCUSION_NUM (4) +struct mpi3_component_image_header { + __le32 signature0; + __le32 load_address; + __le32 data_size; + __le32 start_offset; + __le32 signature1; + __le32 flash_offset; + __le32 image_size; + __le32 version_string_offset; + __le32 build_date_string_offset; + __le32 build_time_string_offset; + __le32 environment_variable_offset; + __le32 application_specific; + __le32 signature2; + __le32 header_size; + __le32 crc; + __le32 flags; + __le32 secondary_flash_offset; + __le32 etp_offset; + __le32 etp_size; + union mpi3_version_union rmc_interface_version; + union mpi3_version_union etp_interface_version; + struct mpi3_comp_image_version component_image_version; + struct mpi3_hash_exclusion_format hash_exclusion[MPI3_IMAGE_HASH_EXCUSION_NUM]; + __le32 next_image_header_offset; + union mpi3_version_union security_version; + __le32 reserved84[31]; +}; + +#define MPI3_IMAGE_HEADER_SIGNATURE0_MPI3 (0xeb00003e) +#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_INVALID (0x00000000) +#define MPI3_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041) +#define MPI3_IMAGE_HEADER_SIGNATURE1_FIRST_MUTABLE (0x20434d46) +#define MPI3_IMAGE_HEADER_SIGNATURE1_BSP (0x20505342) +#define MPI3_IMAGE_HEADER_SIGNATURE1_ROM_BIOS (0x534f4942) +#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_X64 (0x4d494948) +#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_ARM (0x41494948) +#define MPI3_IMAGE_HEADER_SIGNATURE1_CPLD (0x444c5043) +#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053) +#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147) +#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250) +#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d) +#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f) +#define MPI3_IMAGE_HEADER_SIGNATURE1_RMC (0x20434d52) +#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53) +#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350) +#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) +#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030) +#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000) +#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010) +#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008) +#define MPI3_IMAGE_HEADER_FLAGS_REQUIRES_ACTIVATION (0x00000004) +#define MPI3_IMAGE_HEADER_FLAGS_COMPRESSED (0x00000002) +#define MPI3_IMAGE_HEADER_FLAGS_FLASH (0x00000001) +#define MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00) +#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04) +#define MPI3_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08) +#define MPI3_IMAGE_HEADER_START_OFFSET_OFFSET (0x0c) +#define MPI3_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10) +#define MPI3_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14) +#define MPI3_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18) +#define MPI3_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1c) +#define MPI3_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20) +#define MPI3_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24) +#define MPI3_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28) +#define MPI3_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2c) +#define MPI3_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30) +#define MPI3_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34) +#define MPI3_IMAGE_HEADER_CRC_OFFSET (0x38) +#define MPI3_IMAGE_HEADER_FLAGS_OFFSET (0x3c) +#define MPI3_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40) +#define MPI3_IMAGE_HEADER_ETP_OFFSET_OFFSET (0x44) +#define MPI3_IMAGE_HEADER_ETP_SIZE_OFFSET (0x48) +#define MPI3_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4c) +#define MPI3_IMAGE_HEADER_ETP_INTERFACE_VER_OFFSET (0x50) +#define MPI3_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54) +#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c) +#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c) +#define MPI3_IMAGE_HEADER_SIZE (0x100) +#ifndef MPI3_CI_MANIFEST_MPI_MAX +#define MPI3_CI_MANIFEST_MPI_MAX (1) +#endif +struct mpi3_ci_manifest_mpi_comp_image_ref { + __le32 signature1; + __le32 reserved04[3]; + struct mpi3_comp_image_version component_image_version; + __le32 component_image_version_string_offset; + __le32 crc; +}; + +struct mpi3_ci_manifest_mpi { + u8 manifest_type; + u8 reserved01[3]; + __le32 reserved04[3]; + u8 num_image_references; + u8 release_level; + __le16 reserved12; + __le16 reserved14; + __le16 flags; + __le32 reserved18[2]; + __le16 vendor_id; + __le16 device_id; + __le16 subsystem_vendor_id; + __le16 subsystem_id; + __le32 reserved28[2]; + union mpi3_version_union package_security_version; + __le32 reserved34; + struct mpi3_comp_image_version package_version; + __le32 package_version_string_offset; + __le32 package_build_date_string_offset; + __le32 package_build_time_string_offset; + __le32 reserved4c; + __le32 diag_authorization_identifier[16]; + struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX]; +}; + +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50) +#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60) +#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01) +#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff) +#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000) +#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000) +#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000) +union mpi3_ci_manifest { + struct mpi3_ci_manifest_mpi mpi; + __le32 dword[1]; +}; + +#define MPI3_CI_MANIFEST_TYPE_MPI (0x00) +struct mpi3_extended_image_header { + u8 image_type; + u8 reserved01[3]; + __le32 checksum; + __le32 image_size; + __le32 next_image_header_offset; + __le32 reserved10[4]; + __le32 identify_string[8]; +}; + +#define MPI3_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) +#define MPI3_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) +#define MPI3_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0c) +#define MPI3_EXT_IMAGE_HEADER_SIZE (0x40) +#define MPI3_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI3_EXT_IMAGE_TYPE_NVDATA (0x03) +#define MPI3_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) +#define MPI3_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) +#define MPI3_EXT_IMAGE_TYPE_RDE (0x0a) +#define MPI3_EXT_IMAGE_TYPE_AUXILIARY_PROCESSOR (0x0b) +#define MPI3_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI3_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xff) +struct mpi3_supported_device { + __le16 device_id; + __le16 vendor_id; + __le16 device_id_mask; + __le16 reserved06; + u8 low_pci_rev; + u8 high_pci_rev; + __le16 reserved0a; + __le32 reserved0c; +}; + +#ifndef MPI3_SUPPORTED_DEVICE_MAX +#define MPI3_SUPPORTED_DEVICE_MAX (1) +#endif +struct mpi3_supported_devices_data { + u8 image_version; + u8 reserved01; + u8 num_devices; + u8 reserved03; + __le32 reserved04; + struct mpi3_supported_device supported_device[MPI3_SUPPORTED_DEVICE_MAX]; +}; + +#ifndef MPI3_ENCRYPTED_HASH_MAX +#define MPI3_ENCRYPTED_HASH_MAX (1) +#endif +struct mpi3_encrypted_hash_entry { + u8 hash_image_type; + u8 hash_algorithm; + u8 encryption_algorithm; + u8 reserved03; + __le32 reserved04; + __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX]; +}; + +#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03) +#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0) +#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00) +#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20) +#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40) +#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60) +#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f) +#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00) +#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01) +#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02) +#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03) +#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00) +#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01) +#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02) +#define MPI3_ENCRYPTION_ALGORITHM_RSA1024 (0x03) +#define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04) +#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05) +#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06) +#ifndef MPI3_PUBLIC_KEY_MAX +#define MPI3_PUBLIC_KEY_MAX (1) +#endif +struct mpi3_encrypted_key_with_hash_entry { + u8 hash_image_type; + u8 hash_algorithm; + u8 encryption_algorithm; + u8 reserved03; + __le32 reserved04; + __le32 public_key[MPI3_PUBLIC_KEY_MAX]; +}; + +#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX +#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1) +#endif +struct mpi3_encrypted_hash_data { + u8 image_version; + u8 num_hash; + __le16 reserved02; + __le32 reserved04; + struct mpi3_encrypted_hash_entry encrypted_hash_entry[MPI3_ENCRYPTED_HASH_ENTRY_MAX]; +}; + +#ifndef MPI3_AUX_PROC_DATA_MAX +#define MPI3_AUX_PROC_DATA_MAX (1) +#endif +struct mpi3_aux_processor_data { + u8 boot_method; + u8 num_load_addr; + u8 reserved02; + u8 type; + __le32 version; + __le32 load_address[8]; + __le32 reserved28[22]; + __le32 aux_processor_data[MPI3_AUX_PROC_DATA_MAX]; +}; + +#define MPI3_AUX_PROC_DATA_OFFSET (0x80) +#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_MSG (0x00) +#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_DOORBELL (0x01) +#define MPI3_AUXPROCESSOR_BOOT_METHOD_COMPONENT (0x02) +#define MPI3_AUXPROCESSOR_TYPE_ARM_A15 (0x00) +#define MPI3_AUXPROCESSOR_TYPE_ARM_M0 (0x01) +#define MPI3_AUXPROCESSOR_TYPE_ARM_R4 (0x02) +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h new file mode 100644 index 000000000..af86d12c8 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016-2023 Broadcom Inc. All rights reserved. + */ +#ifndef MPI30_INIT_H +#define MPI30_INIT_H 1 +struct mpi3_scsi_io_cdb_eedp32 { + u8 cdb[20]; + __be32 primary_reference_tag; + __le16 primary_application_tag; + __le16 primary_application_tag_mask; + __le32 transfer_length; +}; + +union mpi3_scsi_io_cdb_union { + u8 cdb32[32]; + struct mpi3_scsi_io_cdb_eedp32 eedp32; + struct mpi3_sge_common sge; +}; + +struct mpi3_scsi_io_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 dev_handle; + __le32 flags; + __le32 skip_count; + __le32 data_length; + u8 lun[8]; + union mpi3_scsi_io_cdb_union cdb; + union mpi3_sge_union sgl[4]; +}; + +#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80) +#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40) +#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000) +#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000) +#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000) +#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000) +#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000) +#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000) +#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000) +#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000) +#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000) +#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000) +#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20) +#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000) +#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000) +#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000) +#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000) +#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000) +#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000) +#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0) +#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010) +#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020) +#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080) +#define MPI3_SCSIIO_METASGL_INDEX (3) +struct mpi3_scsi_io_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + u8 scsi_status; + u8 scsi_state; + __le16 dev_handle; + __le32 transfer_count; + __le32 sense_count; + __le32 response_data; + __le16 task_tag; + __le16 scsi_status_qualifier; + __le32 eedp_error_offset; + __le16 eedp_observed_app_tag; + __le16 eedp_observed_guard; + __le32 eedp_observed_ref_tag; + __le64 sense_data_buffer_address; +}; + +#define MPI3_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01) +#define MPI3_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x02) +#define MPI3_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x04) +#define MPI3_SCSI_STATUS_GOOD (0x00) +#define MPI3_SCSI_STATUS_CHECK_CONDITION (0x02) +#define MPI3_SCSI_STATUS_CONDITION_MET (0x04) +#define MPI3_SCSI_STATUS_BUSY (0x08) +#define MPI3_SCSI_STATUS_INTERMEDIATE (0x10) +#define MPI3_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define MPI3_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define MPI3_SCSI_STATUS_COMMAND_TERMINATED (0x22) +#define MPI3_SCSI_STATUS_TASK_SET_FULL (0x28) +#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30) +#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40) +#define MPI3_SCSI_STATE_SENSE_MASK (0x03) +#define MPI3_SCSI_STATE_SENSE_VALID (0x00) +#define MPI3_SCSI_STATE_SENSE_FAILED (0x01) +#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02) +#define MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE (0x03) +#define MPI3_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define MPI3_SCSI_STATE_TERMINATED (0x08) +#define MPI3_SCSI_STATE_RESPONSE_DATA_VALID (0x10) +#define MPI3_SCSI_RSP_RESPONSECODE_MASK (0x000000ff) +#define MPI3_SCSI_RSP_RESPONSECODE_SHIFT (0) +#define MPI3_SCSI_RSP_ARI2_MASK (0x0000ff00) +#define MPI3_SCSI_RSP_ARI2_SHIFT (8) +#define MPI3_SCSI_RSP_ARI1_MASK (0x00ff0000) +#define MPI3_SCSI_RSP_ARI1_SHIFT (16) +#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000) +#define MPI3_SCSI_RSP_ARI0_SHIFT (24) +#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff) +#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08) +#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02) +#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08) +#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09) +#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a) +#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00) +#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a) +#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80) +#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81) +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h new file mode 100644 index 000000000..1e4a60fc6 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h @@ -0,0 +1,1064 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016-2023 Broadcom Inc. All rights reserved. + */ +#ifndef MPI30_IOC_H +#define MPI30_IOC_H 1 +struct mpi3_ioc_init_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + union mpi3_version_union mpi_version; + __le64 time_stamp; + u8 reserved18; + u8 who_init; + __le16 reserved1a; + __le16 reply_free_queue_depth; + __le16 reserved1e; + __le64 reply_free_queue_address; + __le32 reserved28; + __le16 sense_buffer_free_queue_depth; + __le16 sense_buffer_length; + __le64 sense_buffer_free_queue_address; + __le64 driver_information_address; +}; + +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02) +#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03) +#define MPI3_WHOINIT_NOT_INITIALIZED (0x00) +#define MPI3_WHOINIT_ROM_BIOS (0x02) +#define MPI3_WHOINIT_HOST_DRIVER (0x03) +#define MPI3_WHOINIT_MANUFACTURER (0x04) + +struct mpi3_ioc_facts_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + __le32 reserved0c; + union mpi3_sge_union sgl; +}; + +struct mpi3_ioc_facts_data { + __le16 ioc_facts_data_length; + __le16 reserved02; + union mpi3_version_union mpi_version; + struct mpi3_comp_image_version fw_version; + __le32 ioc_capabilities; + u8 ioc_number; + u8 who_init; + __le16 max_msix_vectors; + __le16 max_outstanding_requests; + __le16 product_id; + __le16 ioc_request_frame_size; + __le16 reply_frame_size; + __le16 ioc_exceptions; + __le16 max_persistent_id; + u8 sge_modifier_mask; + u8 sge_modifier_value; + u8 sge_modifier_shift; + u8 protocol_flags; + __le16 max_sas_initiators; + __le16 max_data_length; + __le16 max_sas_expanders; + __le16 max_enclosures; + __le16 min_dev_handle; + __le16 max_dev_handle; + __le16 max_pcie_switches; + __le16 max_nvme; + __le16 reserved38; + __le16 max_vds; + __le16 max_host_pds; + __le16 max_adv_host_pds; + __le16 max_raid_pds; + __le16 max_posted_cmd_buffers; + __le32 flags; + __le16 max_operational_request_queues; + __le16 max_operational_reply_queues; + __le16 shutdown_timeout; + __le16 reserved4e; + __le32 diag_trace_size; + __le32 diag_fw_size; + __le32 diag_driver_size; + u8 max_host_pd_ns_count; + u8 max_adv_host_pd_ns_count; + u8 max_raidpd_ns_count; + u8 max_devices_per_throttle_group; + __le16 io_throttle_data_length; + __le16 max_io_throttle_group; + __le16 io_throttle_low; + __le16 io_throttle_high; +}; +#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000) +#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000) +#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000) +#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600) +#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000) +#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200) +#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040) +#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020) +#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010) +#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008) +#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002) +#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001) +#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000) +#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12) +#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00) +#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8) +#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff) +#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000) +#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000) +#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600) +#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080) +#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040) +#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020) +#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010) +#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008) +#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001) +#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000) +#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001) +#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010) +#define MPI3_IOCFACTS_PROTOCOL_SATA (0x0008) +#define MPI3_IOCFACTS_PROTOCOL_NVME (0x0004) +#define MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) +#define MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED (0x0000) +#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000) +#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00) +#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8) +#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030) +#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000) +#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010) +#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020) +#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f) +#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000) +#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002) +#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000) +#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000) +struct mpi3_mgmt_passthrough_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + __le32 reserved0c[5]; + union mpi3_sge_union command_sgl; + union mpi3_sge_union response_sgl; +}; + +struct mpi3_create_request_queue_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 flags; + u8 burst; + __le16 size; + __le16 queue_id; + __le16 reply_queue_id; + __le16 reserved12; + __le32 reserved14; + __le64 base_address; +}; + +#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80) +#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80) +#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00) +#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2) +struct mpi3_delete_request_queue_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 queue_id; +}; + +struct mpi3_create_reply_queue_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 flags; + u8 reserved0b; + __le16 size; + __le16 queue_id; + __le16 msix_index; + __le16 reserved12; + __le32 reserved14; + __le64 base_address; +}; + +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00) +#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01) +#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2) +struct mpi3_delete_reply_queue_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 queue_id; +}; + +struct mpi3_port_enable_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; +}; + +#define MPI3_EVENT_LOG_DATA (0x01) +#define MPI3_EVENT_CHANGE (0x02) +#define MPI3_EVENT_GPIO_INTERRUPT (0x04) +#define MPI3_EVENT_CABLE_MGMT (0x06) +#define MPI3_EVENT_DEVICE_ADDED (0x07) +#define MPI3_EVENT_DEVICE_INFO_CHANGED (0x08) +#define MPI3_EVENT_PREPARE_FOR_RESET (0x09) +#define MPI3_EVENT_COMP_IMAGE_ACT_START (0x0a) +#define MPI3_EVENT_ENCL_DEVICE_ADDED (0x0b) +#define MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x0c) +#define MPI3_EVENT_DEVICE_STATUS_CHANGE (0x0d) +#define MPI3_EVENT_ENERGY_PACK_CHANGE (0x0e) +#define MPI3_EVENT_SAS_DISCOVERY (0x11) +#define MPI3_EVENT_SAS_BROADCAST_PRIMITIVE (0x12) +#define MPI3_EVENT_SAS_NOTIFY_PRIMITIVE (0x13) +#define MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x14) +#define MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW (0x15) +#define MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x16) +#define MPI3_EVENT_SAS_PHY_COUNTER (0x18) +#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19) +#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20) +#define MPI3_EVENT_PCIE_ENUMERATION (0x22) +#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23) +#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40) +#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50) +#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60) +#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f) +#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4) +struct mpi3_event_notification_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + __le16 sas_broadcast_primitive_masks; + __le16 sas_notify_primitive_masks; + __le32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +struct mpi3_event_notification_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + u8 event_data_length; + u8 event; + __le16 ioc_change_count; + __le32 event_context; + __le32 event_data[1]; +}; + +#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01) +#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01) +#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00) +#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02) +#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00) +#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02) +struct mpi3_event_data_gpio_interrupt { + u8 gpio_num; + u8 reserved01[3]; +}; +struct mpi3_event_data_cable_management { + __le32 active_cable_power_requirement; + u8 status; + u8 receptacle_id; + __le16 reserved06; +}; + +#define MPI3_EVENT_CABLE_MGMT_ACT_CABLE_PWR_INVALID (0xffffffff) +#define MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER (0x00) +#define MPI3_EVENT_CABLE_MGMT_STATUS_PRESENT (0x01) +#define MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED (0x02) +struct mpi3_event_ack_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + u8 event; + u8 reserved0d[3]; + __le32 event_context; +}; + +struct mpi3_event_data_prepare_for_reset { + u8 reason_code; + u8 reserved01; + __le16 reserved02; +}; + +#define MPI3_EVENT_PREPARE_RESET_RC_START (0x01) +#define MPI3_EVENT_PREPARE_RESET_RC_ABORT (0x02) +struct mpi3_event_data_comp_image_activation { + __le32 reserved00; +}; + +struct mpi3_event_data_device_status_change { + __le16 task_tag; + u8 reason_code; + u8 io_unit_port; + __le16 parent_dev_handle; + __le16 dev_handle; + __le64 wwid; + u8 lun[8]; +}; + +#define MPI3_EVENT_DEV_STAT_RC_MOVED (0x01) +#define MPI3_EVENT_DEV_STAT_RC_HIDDEN (0x02) +#define MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN (0x03) +#define MPI3_EVENT_DEV_STAT_RC_ASYNC_NOTIFICATION (0x04) +#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT (0x20) +#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP (0x21) +#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_STRT (0x22) +#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_CMP (0x23) +#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT (0x24) +#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP (0x25) +#define MPI3_EVENT_DEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x30) +#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_STRT (0x40) +#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_CMP (0x41) +#define MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING (0x50) +struct mpi3_event_data_energy_pack_change { + __le32 reserved00; + __le16 shutdown_timeout; + __le16 reserved06; +}; + +struct mpi3_event_data_sas_discovery { + u8 flags; + u8 reason_code; + u8 io_unit_port; + u8 reserved03; + __le32 discovery_status; +}; + +#define MPI3_EVENT_SAS_DISC_FLAGS_DEVICE_CHANGE (0x02) +#define MPI3_EVENT_SAS_DISC_FLAGS_IN_PROGRESS (0x01) +#define MPI3_EVENT_SAS_DISC_RC_STARTED (0x01) +#define MPI3_EVENT_SAS_DISC_RC_COMPLETED (0x02) +#define MPI3_SAS_DISC_STATUS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000) +#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000) +#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000) +#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000) +#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000) +#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800) +#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400) +#define MPI3_SAS_DISC_STATUS_TABLE_TO_SUBTRACTIVE_LINK (0x00000200) +#define MPI3_SAS_DISC_STATUS_UNSUPPORTED_DEVICE (0x00000100) +#define MPI3_SAS_DISC_STATUS_TABLE_LINK (0x00000080) +#define MPI3_SAS_DISC_STATUS_SUBTRACTIVE_LINK (0x00000040) +#define MPI3_SAS_DISC_STATUS_SMP_CRC_ERROR (0x00000020) +#define MPI3_SAS_DISC_STATUS_SMP_FUNCTION_FAILED (0x00000010) +#define MPI3_SAS_DISC_STATUS_SMP_TIMEOUT (0x00000008) +#define MPI3_SAS_DISC_STATUS_MULTIPLE_PORTS (0x00000004) +#define MPI3_SAS_DISC_STATUS_INVALID_SAS_ADDRESS (0x00000002) +#define MPI3_SAS_DISC_STATUS_LOOP_DETECTED (0x00000001) +struct mpi3_event_data_sas_broadcast_primitive { + u8 phy_num; + u8 io_unit_port; + u8 port_width; + u8 primitive; +}; + +#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE (0x01) +#define MPI3_EVENT_BROADCAST_PRIMITIVE_SES (0x02) +#define MPI3_EVENT_BROADCAST_PRIMITIVE_EXPANDER (0x03) +#define MPI3_EVENT_BROADCAST_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) +#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED3 (0x05) +#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED4 (0x06) +#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE0_RESERVED (0x07) +#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE1_RESERVED (0x08) +struct mpi3_event_data_sas_notify_primitive { + u8 phy_num; + u8 io_unit_port; + u8 reserved02; + u8 primitive; +}; + +#define MPI3_EVENT_NOTIFY_PRIMITIVE_ENABLE_SPINUP (0x01) +#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02) +#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03) +#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04) +#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT +#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1) +#endif +struct mpi3_event_sas_topo_phy_entry { + __le16 attached_dev_handle; + u8 link_rate; + u8 status; +}; + +#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xf0) +#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) +#define MPI3_EVENT_SAS_TOPO_LR_PREV_MASK (0x0f) +#define MPI3_EVENT_SAS_TOPO_LR_PREV_SHIFT (0) +#define MPI3_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00) +#define MPI3_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01) +#define MPI3_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02) +#define MPI3_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) +#define MPI3_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) +#define MPI3_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) +#define MPI3_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) +#define MPI3_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0a) +#define MPI3_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0b) +#define MPI3_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0c) +#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_MASK (0xc0) +#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_SHIFT (6) +#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_ACCESSIBLE (0x00) +#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40) +#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80) +#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f) +#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02) +#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03) +#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04) +#define MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING (0x05) +#define MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING (0x06) +struct mpi3_event_data_sas_topology_change_list { + __le16 enclosure_handle; + __le16 expander_dev_handle; + u8 num_phys; + u8 reserved05[3]; + u8 num_entries; + u8 start_phy_num; + u8 exp_status; + u8 io_unit_port; + struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT]; +}; + +#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) +#define MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define MPI3_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) +struct mpi3_event_data_sas_phy_counter { + __le64 time_stamp; + __le32 reserved08; + u8 phy_event_code; + u8 phy_num; + __le16 reserved0e; + __le32 phy_event_info; + u8 counter_type; + u8 threshold_window; + u8 time_units; + u8 reserved17; + __le32 event_threshold; + __le16 threshold_flags; + __le16 reserved1e; +}; + +struct mpi3_event_data_sas_device_disc_err { + __le16 dev_handle; + u8 reason_code; + u8 io_unit_port; + __le32 reserved04; + __le64 sas_address; +}; + +#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_FAILED (0x01) +#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_TIMEOUT (0x02) +struct mpi3_event_data_pcie_enumeration { + u8 flags; + u8 reason_code; + u8 io_unit_port; + u8 reserved03; + __le32 enumeration_status; +}; + +#define MPI3_EVENT_PCIE_ENUM_FLAGS_DEVICE_CHANGE (0x02) +#define MPI3_EVENT_PCIE_ENUM_FLAGS_IN_PROGRESS (0x01) +#define MPI3_EVENT_PCIE_ENUM_RC_STARTED (0x01) +#define MPI3_EVENT_PCIE_ENUM_RC_COMPLETED (0x02) +#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCH_DEPTH_EXCEED (0x80000000) +#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) +#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) +#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) +#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT +#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1) +#endif +struct mpi3_event_pcie_topo_port_entry { + __le16 attached_dev_handle; + u8 port_status; + u8 reserved03; + u8 current_port_info; + u8 reserved05; + u8 previous_port_info; + u8 reserved07; +}; + +#define MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02) +#define MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03) +#define MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04) +#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05) +#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_4 (0x30) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40) +#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05) +#define MPI3_EVENT_PCIE_TOPO_PI_RATE_32_0 (0x06) +struct mpi3_event_data_pcie_topology_change_list { + __le16 enclosure_handle; + __le16 switch_dev_handle; + u8 num_ports; + u8 reserved05[3]; + u8 num_entries; + u8 start_port_num; + u8 switch_status; + u8 io_unit_port; + __le32 reserved0c; + struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT]; +}; + +#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) +#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02) +#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03) +#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04) +struct mpi3_event_data_pcie_error_threshold { + __le64 timestamp; + u8 reason_code; + u8 port; + __le16 switch_dev_handle; + u8 error; + u8 action; + __le16 threshold_count; + __le16 attached_dev_handle; + __le16 reserved12; + __le32 reserved14; +}; + +#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00) +#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01) +struct mpi3_event_data_sas_init_dev_status_change { + u8 reason_code; + u8 io_unit_port; + __le16 dev_handle; + __le32 reserved04; + __le64 sas_address; +}; + +#define MPI3_EVENT_SAS_INIT_RC_ADDED (0x01) +#define MPI3_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) +struct mpi3_event_data_sas_init_table_overflow { + __le16 max_init; + __le16 current_init; + __le32 reserved04; + __le64 sas_address; +}; + +struct mpi3_event_data_hard_reset_received { + u8 reserved00; + u8 io_unit_port; + __le16 reserved02; +}; + +struct mpi3_event_data_diag_buffer_status_change { + u8 type; + u8 reason_code; + __le16 reserved02; + __le32 reserved04; +}; + +#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01) +#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02) +#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03) +#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200) +#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100) +#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080) +#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040) +#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020) +#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010) +#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008) +#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004) +#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002) +#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001) +#define MPI3_PEL_CLASS_DEBUG (0x00) +#define MPI3_PEL_CLASS_PROGRESS (0x01) +#define MPI3_PEL_CLASS_INFORMATIONAL (0x02) +#define MPI3_PEL_CLASS_WARNING (0x03) +#define MPI3_PEL_CLASS_CRITICAL (0x04) +#define MPI3_PEL_CLASS_FATAL (0x05) +#define MPI3_PEL_CLASS_FAULT (0x06) +#define MPI3_PEL_CLEARTYPE_CLEAR (0x00) +#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00) +#define MPI3_PEL_ACTION_GET_SEQNUM (0x01) +#define MPI3_PEL_ACTION_MARK_CLEAR (0x02) +#define MPI3_PEL_ACTION_GET_LOG (0x03) +#define MPI3_PEL_ACTION_GET_COUNT (0x04) +#define MPI3_PEL_ACTION_WAIT (0x05) +#define MPI3_PEL_ACTION_ABORT (0x06) +#define MPI3_PEL_ACTION_GET_PRINT_STRINGS (0x07) +#define MPI3_PEL_ACTION_ACKNOWLEDGE (0x08) +#define MPI3_PEL_STATUS_SUCCESS (0x00) +#define MPI3_PEL_STATUS_NOT_FOUND (0x01) +#define MPI3_PEL_STATUS_ABORTED (0x02) +#define MPI3_PEL_STATUS_NOT_READY (0x03) +struct mpi3_pel_seq { + __le32 newest; + __le32 oldest; + __le32 clear; + __le32 shutdown; + __le32 boot; + __le32 last_acknowledged; +}; + +struct mpi3_pel_entry { + __le64 time_stamp; + __le32 sequence_number; + __le16 log_code; + __le16 arg_type; + __le16 locale; + u8 class; + u8 flags; + u8 ext_num; + u8 num_exts; + u8 arg_data_size; + u8 fixed_format_strings_size; + __le32 reserved18[2]; + __le32 pel_info[24]; +}; + +#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02) +#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01) +struct mpi3_pel_list { + __le32 log_count; + __le32 reserved04; + struct mpi3_pel_entry entry[1]; +}; + +struct mpi3_pel_arg_map { + u8 arg_type; + u8 length; + __le16 start_location; +}; + +#define MPI3_PEL_ARG_MAP_ARG_TYPE_APPEND_STRING (0x00) +#define MPI3_PEL_ARG_MAP_ARG_TYPE_INTEGER (0x01) +#define MPI3_PEL_ARG_MAP_ARG_TYPE_STRING (0x02) +#define MPI3_PEL_ARG_MAP_ARG_TYPE_BIT_FIELD (0x03) +struct mpi3_pel_print_string { + __le16 log_code; + __le16 string_length; + u8 num_arg_map; + u8 reserved05[3]; + struct mpi3_pel_arg_map arg_map[1]; +}; + +struct mpi3_pel_print_string_list { + __le32 num_print_strings; + __le32 residual_bytes_remain; + __le32 reserved08[2]; + struct mpi3_pel_print_string print_string[1]; +}; + +#ifndef MPI3_PEL_ACTION_SPECIFIC_MAX +#define MPI3_PEL_ACTION_SPECIFIC_MAX (1) +#endif +struct mpi3_pel_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 action_specific[MPI3_PEL_ACTION_SPECIFIC_MAX]; +}; + +struct mpi3_pel_req_action_get_sequence_numbers { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 reserved0c[5]; + union mpi3_sge_union sgl; +}; + +struct mpi3_pel_req_action_clear_log_marker { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + u8 clear_type; + u8 reserved0d[3]; +}; + +struct mpi3_pel_req_action_get_log { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 starting_sequence_number; + __le16 locale; + u8 class; + u8 reserved13; + __le32 reserved14[3]; + union mpi3_sge_union sgl; +}; + +struct mpi3_pel_req_action_get_count { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 starting_sequence_number; + __le16 locale; + u8 class; + u8 reserved13; + __le32 reserved14[3]; + union mpi3_sge_union sgl; +}; + +struct mpi3_pel_req_action_wait { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 starting_sequence_number; + __le16 locale; + u8 class; + u8 reserved13; + __le16 wait_time; + __le16 reserved16; + __le32 reserved18[2]; +}; + +struct mpi3_pel_req_action_abort { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 reserved0c; + __le16 abort_host_tag; + __le16 reserved12; + __le32 reserved14; +}; + +struct mpi3_pel_req_action_get_print_strings { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 reserved0c; + __le16 start_log_code; + __le16 reserved12; + __le32 reserved14[3]; + union mpi3_sge_union sgl; +}; + +struct mpi3_pel_req_action_acknowledge { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 sequence_number; + __le32 reserved10; +}; + +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01) +#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02) +struct mpi3_pel_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + u8 action; + u8 reserved11; + __le16 reserved12; + __le16 pe_log_status; + __le16 reserved16; + __le32 transfer_length; +}; + +struct mpi3_ci_download_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 action; + u8 reserved0b; + __le32 signature1; + __le32 total_image_size; + __le32 image_offset; + __le32 segment_size; + __le32 reserved1c; + union mpi3_sge_union sgl; +}; + +#define MPI3_CI_DOWNLOAD_MSGFLAGS_LAST_SEGMENT (0x80) +#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40) +#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20) +#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03) +#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00) +#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01) +#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02) +#define MPI3_CI_DOWNLOAD_ACTION_DOWNLOAD (0x01) +#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02) +#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03) +#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04) +#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05) +struct mpi3_ci_download_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + u8 flags; + u8 cache_dirty; + u8 pending_count; + u8 reserved13; +}; + +#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80) +#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_FAILURE (0x40) +#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20) +#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10) +#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e) +#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00) +#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02) +#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04) +#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_OFFLINE_PENDING (0x06) +#define MPI3_CI_DOWNLOAD_FLAGS_COMPATIBLE (0x01) +struct mpi3_ci_upload_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 reserved0a; + __le32 signature1; + __le32 reserved10; + __le32 image_offset; + __le32 segment_size; + __le32 reserved1c; + union mpi3_sge_union sgl; +}; + +#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01) +#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00) +#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01) +#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02) +#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00) +#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02) +#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01) +#define MPI3_CTRL_OP_LOOKUP_MAPPING (0x02) +#define MPI3_CTRL_OP_UPDATE_TIMESTAMP (0x04) +#define MPI3_CTRL_OP_GET_TIMESTAMP (0x05) +#define MPI3_CTRL_OP_GET_IOC_CHANGE_COUNT (0x06) +#define MPI3_CTRL_OP_CHANGE_PROFILE (0x07) +#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10) +#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11) +#define MPI3_CTRL_OP_HIDDEN_ACK (0x12) +#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13) +#define MPI3_CTRL_OP_SEND_SAS_PRIMITIVE (0x20) +#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21) +#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23) +#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24) +#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30) +#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00) +#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00) +#define MPI3_CTRL_OP_CHANGE_PROFILE_PARAM8_PROFILE_ID_INDEX (0x00) +#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00) +#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00) +#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00) +#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00) +#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PHY_INDEX (0x00) +#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PRIMSEQ_INDEX (0x01) +#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM32_PRIMITIVE_INDEX (0x00) +#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00) +#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01) +#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00) +#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00) +#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00) +#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00) +#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01) +#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01) +#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02) +#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) +#define MPI3_CTRL_LOOKUP_METHOD_PERSISTENT_ID (0x04) +#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM16_DEVH_INDEX (0) +#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM64_WWID_INDEX (0) +#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM16_SLOTNUM_INDEX (0) +#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM64_ENCLOSURELID_INDEX (0) +#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM16_DEVH_INDEX (0) +#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM64_DEVNAME_INDEX (0) +#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_DEVH_INDEX (0) +#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1) +#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0) +#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0) +#define MPI3_CTRL_GET_IOC_CHANGE_COUNT_VALUE16_CHANGECOUNT_INDEX (0) +#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0) +#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01) +#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03) +#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06) +#define MPI3_CTRL_ACTION_NOP (0x00) +#define MPI3_CTRL_ACTION_LINK_RESET (0x01) +#define MPI3_CTRL_ACTION_HARD_RESET (0x02) +#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05) +struct mpi3_iounit_control_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 reserved0a; + u8 operation; + __le32 reserved0c; + __le64 param64[2]; + __le32 param32[4]; + __le16 param16[4]; + u8 param8[8]; +}; + +struct mpi3_iounit_control_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + __le64 value64[2]; + __le32 value32[4]; + __le16 value16[4]; + u8 value8[8]; +}; +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h new file mode 100644 index 000000000..7c15e5851 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016-2023 Broadcom Inc. All rights reserved. + * + */ +#ifndef MPI30_PCI_H +#define MPI30_PCI_H 1 +#ifndef MPI3_NVME_ENCAP_CMD_MAX +#define MPI3_NVME_ENCAP_CMD_MAX (1) +#endif +#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002) +#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000) +#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002) +#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001) +#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) +#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001) + +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h new file mode 100644 index 000000000..4a93c67d3 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016-2023 Broadcom Inc. All rights reserved. + */ +#ifndef MPI30_SAS_H +#define MPI30_SAS_H 1 +#define MPI3_SAS_DEVICE_INFO_SSP_TARGET (0x00000100) +#define MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET (0x00000080) +#define MPI3_SAS_DEVICE_INFO_SMP_TARGET (0x00000040) +#define MPI3_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000020) +#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010) +#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008) +#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007) +#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000) +#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001) +#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002) +struct mpi3_smp_passthrough_request { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + u8 reserved0a; + u8 io_unit_port; + __le32 reserved0c[3]; + __le64 sas_address; + struct mpi3_sge_common request_sge; + struct mpi3_sge_common response_sge; +}; + +struct mpi3_smp_passthrough_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; + __le16 response_data_length; + __le16 reserved12; +}; +#endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h new file mode 100644 index 000000000..1e0a3dcaf --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h @@ -0,0 +1,470 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016-2023 Broadcom Inc. All rights reserved. + */ +#ifndef MPI30_TRANSPORT_H +#define MPI30_TRANSPORT_H 1 +struct mpi3_version_struct { + u8 dev; + u8 unit; + u8 minor; + u8 major; +}; + +union mpi3_version_union { + struct mpi3_version_struct mpi3_version; + __le32 word; +}; + +#define MPI3_VERSION_MAJOR (3) +#define MPI3_VERSION_MINOR (0) +#define MPI3_VERSION_UNIT (28) +#define MPI3_VERSION_DEV (0) +#define MPI3_DEVHANDLE_INVALID (0xffff) +struct mpi3_sysif_oper_queue_indexes { + __le16 producer_index; + __le16 reserved02; + __le16 consumer_index; + __le16 reserved06; +}; + +struct mpi3_sysif_registers { + __le64 ioc_information; + union mpi3_version_union version; + __le32 reserved0c[2]; + __le32 ioc_configuration; + __le32 reserved18; + __le32 ioc_status; + __le32 reserved20; + __le32 admin_queue_num_entries; + __le64 admin_request_queue_address; + __le64 admin_reply_queue_address; + __le32 reserved38[2]; + __le32 coalesce_control; + __le32 reserved44[1007]; + __le16 admin_request_queue_pi; + __le16 reserved1002; + __le16 admin_reply_queue_ci; + __le16 reserved1006; + struct mpi3_sysif_oper_queue_indexes oper_queue_indexes[383]; + __le32 reserved1c00; + __le32 write_sequence; + __le32 host_diagnostic; + __le32 reserved1c0c; + __le32 fault; + __le32 fault_info[3]; + __le32 reserved1c20[4]; + __le64 hcb_address; + __le32 hcb_size; + __le32 reserved1c3c; + __le32 reply_free_host_index; + __le32 sense_buffer_free_host_index; + __le32 reserved1c48[2]; + __le64 diag_rw_data; + __le64 diag_rw_address; + __le16 diag_rw_control; + __le16 diag_rw_status; + __le32 reserved1c64[35]; + __le32 scratchpad[4]; + __le32 reserved1d00[192]; + __le32 device_assigned_registers[2048]; +}; + +#define MPI3_SYSIF_IOC_INFO_LOW_OFFSET (0x00000000) +#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004) +#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000) +#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24) +#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001) +#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014) +#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000) +#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20) +#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000) +#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16) +#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000) +#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000) +#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000) +#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000) +#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010) +#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001) +#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c) +#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010) +#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c) +#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002) +#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000) +#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004) +#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008) +#define MPI3_SYSIF_IOC_STATUS_FAULT (0x00000002) +#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001) +#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024) +#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff) +#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026) +#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000) +#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16) +#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET (0x00000028) +#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_HIGH_OFFSET (0x0000002c) +#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET (0x00000030) +#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034) +#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040) +#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000) +#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000) +#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000) +#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000) +#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000) +#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000) +#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16) +#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00) +#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8) +#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff) +#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_SHIFT (0) +#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000) +#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004) +#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008) +#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8)) +#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c) +#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8)) +#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD (0xb) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH (0x2) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH (0x7) +#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd) +#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300) +#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700) +#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080) +#define MPI3_SYSIF_HOST_DIAG_SECURE_BOOT (0x00000040) +#define MPI3_SYSIF_HOST_DIAG_CLEAR_INVALID_FW_IMAGE (0x00000020) +#define MPI3_SYSIF_HOST_DIAG_INVALID_FW_IMAGE (0x00000010) +#define MPI3_SYSIF_HOST_DIAG_HCBENABLE (0x00000008) +#define MPI3_SYSIF_HOST_DIAG_HCBMODE (0x00000004) +#define MPI3_SYSIF_HOST_DIAG_DIAG_RW_ENABLE (0x00000002) +#define MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE (0x00000001) +#define MPI3_SYSIF_FAULT_OFFSET (0x00001c10) +#define MPI3_SYSIF_FAULT_FUNC_AREA_MASK (0xff000000) +#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24) +#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000) +#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff) +#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000) +#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001) +#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002) +#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003) +#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004) +#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005) +#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006) +#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14) +#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18) +#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c) +#define MPI3_SYSIF_HCB_ADDRESS_LOW_OFFSET (0x00001c30) +#define MPI3_SYSIF_HCB_ADDRESS_HIGH_OFFSET (0x00001c34) +#define MPI3_SYSIF_HCB_SIZE_OFFSET (0x00001c38) +#define MPI3_SYSIF_HCB_SIZE_SIZE_MASK (0xfffff000) +#define MPI3_SYSIF_HCB_SIZE_SIZE_SHIFT (12) +#define MPI3_SYSIF_HCB_SIZE_HCDW_ENABLE (0x00000001) +#define MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET (0x00001c40) +#define MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET (0x00001c44) +#define MPI3_SYSIF_DIAG_RW_DATA_LOW_OFFSET (0x00001c50) +#define MPI3_SYSIF_DIAG_RW_DATA_HIGH_OFFSET (0x00001c54) +#define MPI3_SYSIF_DIAG_RW_ADDRESS_LOW_OFFSET (0x00001c58) +#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c) +#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60) +#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030) +#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000) +#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010) +#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020) +#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030) +#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004) +#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002) +#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000) +#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002) +#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001) +#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62) +#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e) +#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000) +#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002) +#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004) +#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_PAR_ERR (0x00000006) +#define MPI3_SYSIF_DIAG_RW_STATUS_BUSY (0x00000001) +#define MPI3_SYSIF_SCRATCHPAD0_OFFSET (0x00001cf0) +#define MPI3_SYSIF_SCRATCHPAD1_OFFSET (0x00001cf4) +#define MPI3_SYSIF_SCRATCHPAD2_OFFSET (0x00001cf8) +#define MPI3_SYSIF_SCRATCHPAD3_OFFSET (0x00001cfc) +#define MPI3_SYSIF_DEVICE_ASSIGNED_REGS_OFFSET (0x00002000) +#define MPI3_SYSIF_DIAG_SAVE_TIMEOUT (60) +struct mpi3_default_reply_descriptor { + __le32 descriptor_type_dependent1[2]; + __le16 request_queue_ci; + __le16 request_queue_id; + __le16 descriptor_type_dependent2; + __le16 reply_flags; +}; + +#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001) +#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000) +#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000) +#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000) +#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000) +#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000) +#define MPI3_REPLY_DESCRIPT_REQUEST_QUEUE_ID_INVALID (0xffff) +struct mpi3_address_reply_descriptor { + __le64 reply_frame_address; + __le16 request_queue_ci; + __le16 request_queue_id; + __le16 reserved0c; + __le16 reply_flags; +}; + +struct mpi3_success_reply_descriptor { + __le32 reserved00[2]; + __le16 request_queue_ci; + __le16 request_queue_id; + __le16 host_tag; + __le16 reply_flags; +}; + +struct mpi3_target_command_buffer_reply_descriptor { + __le32 reserved00; + __le16 initiator_dev_handle; + u8 phy_num; + u8 reserved07; + __le16 request_queue_ci; + __le16 request_queue_id; + __le16 io_index; + __le16 reply_flags; +}; + +struct mpi3_status_reply_descriptor { + __le16 ioc_status; + __le16 reserved02; + __le32 ioc_log_info; + __le16 request_queue_ci; + __le16 request_queue_id; + __le16 host_tag; + __le16 reply_flags; +}; + +#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL (0x8000) +#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK (0x7fff) +#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_MASK (0xf0000000) +#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_NO_INFO (0x00000000) +#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_SAS (0x30000000) +#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_DATA_MASK (0x0fffffff) +union mpi3_reply_descriptors_union { + struct mpi3_default_reply_descriptor default_reply; + struct mpi3_address_reply_descriptor address_reply; + struct mpi3_success_reply_descriptor success; + struct mpi3_target_command_buffer_reply_descriptor target_command_buffer; + struct mpi3_status_reply_descriptor status; + __le32 words[4]; +}; + +struct mpi3_sge_common { + __le64 address; + __le32 length; + u8 reserved0c[3]; + u8 flags; +}; + +struct mpi3_sge_bit_bucket { + __le64 reserved00; + __le32 length; + u8 reserved0c[3]; + u8 flags; +}; + +struct mpi3_sge_extended_eedp { + u8 user_data_size; + u8 reserved01; + __le16 eedp_flags; + __le32 secondary_reference_tag; + __le16 secondary_application_tag; + __le16 application_tag_translation_mask; + __le16 reserved0c; + u8 extended_operation; + u8 flags; +}; + +union mpi3_sge_union { + struct mpi3_sge_common simple; + struct mpi3_sge_common chain; + struct mpi3_sge_common last_chain; + struct mpi3_sge_bit_bucket bit_bucket; + struct mpi3_sge_extended_eedp eedp; + __le32 words[4]; +}; + +#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0) +#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00) +#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10) +#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20) +#define MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN (0x30) +#define MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED (0xf0) +#define MPI3_SGE_FLAGS_END_OF_LIST (0x08) +#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04) +#define MPI3_SGE_FLAGS_DLAS_MASK (0x03) +#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00) +#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01) +#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02) +#define MPI3_SGE_EXT_OPER_EEDP (0x00) +#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000) +#define MPI3_EEDPFLAGS_INCR_SEC_REF_TAG (0x4000) +#define MPI3_EEDPFLAGS_INCR_PRI_APP_TAG (0x2000) +#define MPI3_EEDPFLAGS_INCR_SEC_APP_TAG (0x1000) +#define MPI3_EEDPFLAGS_ESC_PASSTHROUGH (0x0800) +#define MPI3_EEDPFLAGS_CHK_REF_TAG (0x0400) +#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200) +#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100) +#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0) +#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040) +#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080) +#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0) +#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030) +#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000) +#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010) +#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020) +#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008) +#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007) +#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001) +#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002) +#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003) +#define MPI3_EEDPFLAGS_EEDP_OP_INSERT (0x0004) +#define MPI3_EEDPFLAGS_EEDP_OP_REPLACE (0x0006) +#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN (0x0007) +#define MPI3_EEDP_UDS_512 (0x01) +#define MPI3_EEDP_UDS_520 (0x02) +#define MPI3_EEDP_UDS_4080 (0x03) +#define MPI3_EEDP_UDS_4088 (0x04) +#define MPI3_EEDP_UDS_4096 (0x05) +#define MPI3_EEDP_UDS_4104 (0x06) +#define MPI3_EEDP_UDS_4160 (0x07) +struct mpi3_request_header { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 change_count; + __le16 function_dependent; +}; + +struct mpi3_default_reply { + __le16 host_tag; + u8 ioc_use_only02; + u8 function; + __le16 ioc_use_only04; + u8 ioc_use_only06; + u8 msg_flags; + __le16 ioc_use_only08; + __le16 ioc_status; + __le32 ioc_log_info; +}; + +#define MPI3_HOST_TAG_INVALID (0xffff) +#define MPI3_FUNCTION_IOC_FACTS (0x01) +#define MPI3_FUNCTION_IOC_INIT (0x02) +#define MPI3_FUNCTION_PORT_ENABLE (0x03) +#define MPI3_FUNCTION_EVENT_NOTIFICATION (0x04) +#define MPI3_FUNCTION_EVENT_ACK (0x05) +#define MPI3_FUNCTION_CI_DOWNLOAD (0x06) +#define MPI3_FUNCTION_CI_UPLOAD (0x07) +#define MPI3_FUNCTION_IO_UNIT_CONTROL (0x08) +#define MPI3_FUNCTION_PERSISTENT_EVENT_LOG (0x09) +#define MPI3_FUNCTION_MGMT_PASSTHROUGH (0x0a) +#define MPI3_FUNCTION_CONFIG (0x10) +#define MPI3_FUNCTION_SCSI_IO (0x20) +#define MPI3_FUNCTION_SCSI_TASK_MGMT (0x21) +#define MPI3_FUNCTION_SMP_PASSTHROUGH (0x22) +#define MPI3_FUNCTION_NVME_ENCAPSULATED (0x24) +#define MPI3_FUNCTION_TARGET_ASSIST (0x30) +#define MPI3_FUNCTION_TARGET_STATUS_SEND (0x31) +#define MPI3_FUNCTION_TARGET_MODE_ABORT (0x32) +#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_BASE (0x33) +#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_LIST (0x34) +#define MPI3_FUNCTION_CREATE_REQUEST_QUEUE (0x70) +#define MPI3_FUNCTION_DELETE_REQUEST_QUEUE (0x71) +#define MPI3_FUNCTION_CREATE_REPLY_QUEUE (0x72) +#define MPI3_FUNCTION_DELETE_REPLY_QUEUE (0x73) +#define MPI3_FUNCTION_TOOLBOX (0x80) +#define MPI3_FUNCTION_DIAG_BUFFER_POST (0x81) +#define MPI3_FUNCTION_DIAG_BUFFER_MANAGE (0x82) +#define MPI3_FUNCTION_DIAG_BUFFER_UPLOAD (0x83) +#define MPI3_FUNCTION_MIN_IOC_USE_ONLY (0xc0) +#define MPI3_FUNCTION_MAX_IOC_USE_ONLY (0xef) +#define MPI3_FUNCTION_MIN_PRODUCT_SPECIFIC (0xf0) +#define MPI3_FUNCTION_MAX_PRODUCT_SPECIFIC (0xff) +#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000) +#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000) +#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff) +#define MPI3_IOCSTATUS_SUCCESS (0x0000) +#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define MPI3_IOCSTATUS_BUSY (0x0002) +#define MPI3_IOCSTATUS_INVALID_SGL (0x0003) +#define MPI3_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007) +#define MPI3_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a) +#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b) +#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c) +#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d) +#define MPI3_IOCSTATUS_FAILURE (0x001f) +#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define MPI3_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define MPI3_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define MPI3_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define MPI3_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) +#define MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define MPI3_IOCSTATUS_SCSI_TM_NOT_SUPPORTED (0x0041) +#define MPI3_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define MPI3_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define MPI3_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004a) +#define MPI3_IOCSTATUS_SCSI_IOC_TERMINATED (0x004b) +#define MPI3_IOCSTATUS_SCSI_EXT_TERMINATED (0x004c) +#define MPI3_IOCSTATUS_EEDP_GUARD_ERROR (0x004d) +#define MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004e) +#define MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004f) +#define MPI3_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define MPI3_IOCSTATUS_TARGET_ABORTED (0x0063) +#define MPI3_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define MPI3_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define MPI3_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006a) +#define MPI3_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006d) +#define MPI3_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006e) +#define MPI3_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006f) +#define MPI3_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define MPI3_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) +#define MPI3_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define MPI3_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) +#define MPI3_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00a0) +#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0) +#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1) +#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2) +#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3) +#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4) +#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0) +#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1) +#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00) +#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01) +#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02) +#define MPI3_IOCSTATUS_INVALID_REPLY_QUEUE_ID (0x0f03) +#define MPI3_IOCSTATUS_INVALID_QUEUE_DELETION (0x0f04) +#define MPI3_IOCLOGINFO_TYPE_MASK (0xf0000000) +#define MPI3_IOCLOGINFO_TYPE_SHIFT (28) +#define MPI3_IOCLOGINFO_TYPE_NONE (0x0) +#define MPI3_IOCLOGINFO_TYPE_SAS (0x3) +#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff) +#endif diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h new file mode 100644 index 000000000..ae98d15c3 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -0,0 +1,1420 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for Broadcom MPI3 Storage Controllers + * + * Copyright (C) 2017-2023 Broadcom Inc. + * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) + * + */ + +#ifndef MPI3MR_H_INCLUDED +#define MPI3MR_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpi/mpi30_transport.h" +#include "mpi/mpi30_cnfg.h" +#include "mpi/mpi30_image.h" +#include "mpi/mpi30_init.h" +#include "mpi/mpi30_ioc.h" +#include "mpi/mpi30_sas.h" +#include "mpi/mpi30_pci.h" +#include "mpi3mr_debug.h" + +/* Global list and lock for storing multiple adapters managed by the driver */ +extern spinlock_t mrioc_list_lock; +extern struct list_head mrioc_list; +extern int prot_mask; +extern atomic64_t event_counter; + +#define MPI3MR_DRIVER_VERSION "8.5.0.0.0" +#define MPI3MR_DRIVER_RELDATE "24-July-2023" + +#define MPI3MR_DRIVER_NAME "mpi3mr" +#define MPI3MR_DRIVER_LICENSE "GPL" +#define MPI3MR_DRIVER_AUTHOR "Broadcom Inc. " +#define MPI3MR_DRIVER_DESC "MPI3 Storage Controller Device Driver" + +#define MPI3MR_NAME_LENGTH 32 +#define IOCNAME "%s: " + +#define MPI3MR_DEFAULT_MAX_IO_SIZE (1 * 1024 * 1024) + +/* Definitions for internal SGL and Chain SGL buffers */ +#define MPI3MR_PAGE_SIZE_4K 4096 +#define MPI3MR_DEFAULT_SGL_ENTRIES 256 +#define MPI3MR_MAX_SGL_ENTRIES 2048 + +/* Definitions for MAX values for shost */ +#define MPI3MR_MAX_CMDS_LUN 128 +#define MPI3MR_MAX_CDB_LENGTH 32 + +/* Admin queue management definitions */ +#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K) +#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K) +#define MPI3MR_ADMIN_REQ_FRAME_SZ 128 +#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16 + +/* Operational queue management definitions */ +#define MPI3MR_OP_REQ_Q_QD 512 +#define MPI3MR_OP_REP_Q_QD 1024 +#define MPI3MR_OP_REP_Q_QD4K 4096 +#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096 +#define MPI3MR_OP_REP_Q_SEG_SIZE 4096 +#define MPI3MR_MAX_SEG_LIST_SIZE 4096 + +/* Reserved Host Tag definitions */ +#define MPI3MR_HOSTTAG_INVALID 0xFFFF +#define MPI3MR_HOSTTAG_INITCMDS 1 +#define MPI3MR_HOSTTAG_BSG_CMDS 2 +#define MPI3MR_HOSTTAG_PEL_ABORT 3 +#define MPI3MR_HOSTTAG_PEL_WAIT 4 +#define MPI3MR_HOSTTAG_BLK_TMS 5 +#define MPI3MR_HOSTTAG_CFG_CMDS 6 +#define MPI3MR_HOSTTAG_TRANSPORT_CMDS 7 + +#define MPI3MR_NUM_DEVRMCMD 16 +#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_TRANSPORT_CMDS + 1) +#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \ + MPI3MR_NUM_DEVRMCMD - 1) + +#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX +#define MPI3MR_NUM_EVTACKCMD 4 +#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1) +#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \ + MPI3MR_NUM_EVTACKCMD - 1) + +/* Reduced resource count definition for crash kernel */ +#define MPI3MR_HOST_IOS_KDUMP 128 + +/* command/controller interaction timeout definitions in seconds */ +#define MPI3MR_INTADMCMD_TIMEOUT 60 +#define MPI3MR_PORTENABLE_TIMEOUT 300 +#define MPI3MR_PORTENABLE_POLL_INTERVAL 5 +#define MPI3MR_ABORTTM_TIMEOUT 60 +#define MPI3MR_RESETTM_TIMEOUT 60 +#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5 +#define MPI3MR_TSUPDATE_INTERVAL 900 +#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120 +#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180 +#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180 +#define MPI3MR_RESET_ACK_TIMEOUT 30 +#define MPI3MR_MUR_TIMEOUT 120 + +#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */ + +#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */ + +#define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10 + +#define MPI3MR_SCMD_TIMEOUT (60 * HZ) +#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ) + +/* Internal admin command state definitions*/ +#define MPI3MR_CMD_NOTUSED 0x8000 +#define MPI3MR_CMD_COMPLETE 0x0001 +#define MPI3MR_CMD_PENDING 0x0002 +#define MPI3MR_CMD_REPLY_VALID 0x0004 +#define MPI3MR_CMD_RESET 0x0008 + +/* Definitions for Event replies and sense buffer allocated per controller */ +#define MPI3MR_NUM_EVT_REPLIES 64 +#define MPI3MR_SENSE_BUF_SZ 256 +#define MPI3MR_SENSEBUF_FACTOR 3 +#define MPI3MR_CHAINBUF_FACTOR 3 +#define MPI3MR_CHAINBUFDIX_FACTOR 2 + +/* Invalid target device handle */ +#define MPI3MR_INVALID_DEV_HANDLE 0xFFFF + +/* Controller Reset related definitions */ +#define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT 5 +#define MPI3MR_MAX_RESET_RETRY_COUNT 3 + +/* ResponseCode definitions */ +#define MPI3MR_RI_MASK_RESPCODE (0x000000FF) +#define MPI3MR_RSP_IO_QUEUED_ON_IOC \ + MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC + +#define MPI3MR_DEFAULT_MDTS (128 * 1024) +#define MPI3MR_DEFAULT_PGSZEXP (12) + +/* Command retry count definitions */ +#define MPI3MR_DEV_RMHS_RETRY_COUNT 3 +#define MPI3MR_PEL_RETRY_COUNT 3 + +/* Default target device queue depth */ +#define MPI3MR_DEFAULT_SDEV_QD 32 + +/* Definitions for Threaded IRQ poll*/ +#define MPI3MR_IRQ_POLL_SLEEP 2 +#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8 + +/* Definitions for the controller security status*/ +#define MPI3MR_CTLR_SECURITY_STATUS_MASK 0x0C +#define MPI3MR_CTLR_SECURE_DBG_STATUS_MASK 0x02 + +#define MPI3MR_INVALID_DEVICE 0x00 +#define MPI3MR_CONFIG_SECURE_DEVICE 0x04 +#define MPI3MR_HARD_SECURE_DEVICE 0x08 +#define MPI3MR_TAMPERED_DEVICE 0x0C + +/* SGE Flag definition */ +#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \ + (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \ + MPI3_SGE_FLAGS_END_OF_LIST) + +/* MSI Index from Reply Queue Index */ +#define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset) + +/* + * Maximum data transfer size definitions for management + * application commands + */ +#define MPI3MR_MAX_APP_XFER_SIZE (1 * 1024 * 1024) +#define MPI3MR_MAX_APP_XFER_SEGMENTS 512 +/* + * 2048 sectors are for data buffers and additional 512 sectors for + * other buffers + */ +#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512) + +#define MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS 256 +#define MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS 2048 + +/** + * struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe + * Encapsulated commands. + * + * @base_addr: Physical address + * @length: SGE length + * @rsvd: Reserved + * @rsvd1: Reserved + * @sgl_type: sgl type + */ +struct mpi3mr_nvme_pt_sge { + u64 base_addr; + u32 length; + u16 rsvd; + u8 rsvd1; + u8 sgl_type; +}; + +/** + * struct mpi3mr_buf_map - local structure to + * track kernel and user buffers associated with an BSG + * structure. + * + * @bsg_buf: BSG buffer virtual address + * @bsg_buf_len: BSG buffer length + * @kern_buf: Kernel buffer virtual address + * @kern_buf_len: Kernel buffer length + * @kern_buf_dma: Kernel buffer DMA address + * @data_dir: Data direction. + */ +struct mpi3mr_buf_map { + void *bsg_buf; + u32 bsg_buf_len; + void *kern_buf; + u32 kern_buf_len; + dma_addr_t kern_buf_dma; + u8 data_dir; +}; + +/* IOC State definitions */ +enum mpi3mr_iocstate { + MRIOC_STATE_READY = 1, + MRIOC_STATE_RESET, + MRIOC_STATE_FAULT, + MRIOC_STATE_BECOMING_READY, + MRIOC_STATE_RESET_REQUESTED, + MRIOC_STATE_UNRECOVERABLE, +}; + +/* Reset reason code definitions*/ +enum mpi3mr_reset_reason { + MPI3MR_RESET_FROM_BRINGUP = 1, + MPI3MR_RESET_FROM_FAULT_WATCH = 2, + MPI3MR_RESET_FROM_APP = 3, + MPI3MR_RESET_FROM_EH_HOS = 4, + MPI3MR_RESET_FROM_TM_TIMEOUT = 5, + MPI3MR_RESET_FROM_APP_TIMEOUT = 6, + MPI3MR_RESET_FROM_MUR_FAILURE = 7, + MPI3MR_RESET_FROM_CTLR_CLEANUP = 8, + MPI3MR_RESET_FROM_CIACTIV_FAULT = 9, + MPI3MR_RESET_FROM_PE_TIMEOUT = 10, + MPI3MR_RESET_FROM_TSU_TIMEOUT = 11, + MPI3MR_RESET_FROM_DELREQQ_TIMEOUT = 12, + MPI3MR_RESET_FROM_DELREPQ_TIMEOUT = 13, + MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT = 14, + MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT = 15, + MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT = 16, + MPI3MR_RESET_FROM_IOCINIT_TIMEOUT = 17, + MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT = 18, + MPI3MR_RESET_FROM_EVTACK_TIMEOUT = 19, + MPI3MR_RESET_FROM_CIACTVRST_TIMER = 20, + MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21, + MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22, + MPI3MR_RESET_FROM_SYSFS = 23, + MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24, + MPI3MR_RESET_FROM_FIRMWARE = 27, + MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29, + MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30, +}; + +/* Queue type definitions */ +enum queue_type { + MPI3MR_DEFAULT_QUEUE = 0, + MPI3MR_POLL_QUEUE, +}; + +/** + * struct mpi3mr_compimg_ver - replica of component image + * version defined in mpi30_image.h in host endianness + * + */ +struct mpi3mr_compimg_ver { + u16 build_num; + u16 cust_id; + u8 ph_minor; + u8 ph_major; + u8 gen_minor; + u8 gen_major; +}; + +/** + * struct mpi3mr_ioc_facs - replica of component image version + * defined in mpi30_ioc.h in host endianness + * + */ +struct mpi3mr_ioc_facts { + u32 ioc_capabilities; + struct mpi3mr_compimg_ver fw_ver; + u32 mpi_version; + u16 max_reqs; + u16 product_id; + u16 op_req_sz; + u16 reply_sz; + u16 exceptions; + u16 max_perids; + u16 max_pds; + u16 max_sasexpanders; + u32 max_data_length; + u16 max_sasinitiators; + u16 max_enclosures; + u16 max_pcie_switches; + u16 max_nvme; + u16 max_vds; + u16 max_hpds; + u16 max_advhpds; + u16 max_raid_pds; + u16 min_devhandle; + u16 max_devhandle; + u16 max_op_req_q; + u16 max_op_reply_q; + u16 shutdown_timeout; + u8 ioc_num; + u8 who_init; + u16 max_msix_vectors; + u8 personality; + u8 dma_mask; + u8 protocol_flags; + u8 sge_mod_mask; + u8 sge_mod_value; + u8 sge_mod_shift; + u8 max_dev_per_tg; + u16 max_io_throttle_group; + u16 io_throttle_data_length; + u16 io_throttle_low; + u16 io_throttle_high; + +}; + +/** + * struct segments - memory descriptor structure to store + * virtual and dma addresses for operational queue segments. + * + * @segment: virtual address + * @segment_dma: dma address + */ +struct segments { + void *segment; + dma_addr_t segment_dma; +}; + +/** + * struct op_req_qinfo - Operational Request Queue Information + * + * @ci: consumer index + * @pi: producer index + * @num_request: Maximum number of entries in the queue + * @qid: Queue Id starting from 1 + * @reply_qid: Associated reply queue Id + * @num_segments: Number of discontiguous memory segments + * @segment_qd: Depth of each segments + * @q_lock: Concurrent queue access lock + * @q_segments: Segment descriptor pointer + * @q_segment_list: Segment list base virtual address + * @q_segment_list_dma: Segment list base DMA address + */ +struct op_req_qinfo { + u16 ci; + u16 pi; + u16 num_requests; + u16 qid; + u16 reply_qid; + u16 num_segments; + u16 segment_qd; + spinlock_t q_lock; + struct segments *q_segments; + void *q_segment_list; + dma_addr_t q_segment_list_dma; +}; + +/** + * struct op_reply_qinfo - Operational Reply Queue Information + * + * @ci: consumer index + * @qid: Queue Id starting from 1 + * @num_replies: Maximum number of entries in the queue + * @num_segments: Number of discontiguous memory segments + * @segment_qd: Depth of each segments + * @q_segments: Segment descriptor pointer + * @q_segment_list: Segment list base virtual address + * @q_segment_list_dma: Segment list base DMA address + * @ephase: Expected phased identifier for the reply queue + * @pend_ios: Number of IOs pending in HW for this queue + * @enable_irq_poll: Flag to indicate polling is enabled + * @in_use: Queue is handled by poll/ISR + * @qtype: Type of queue (types defined in enum queue_type) + */ +struct op_reply_qinfo { + u16 ci; + u16 qid; + u16 num_replies; + u16 num_segments; + u16 segment_qd; + struct segments *q_segments; + void *q_segment_list; + dma_addr_t q_segment_list_dma; + u8 ephase; + atomic_t pend_ios; + bool enable_irq_poll; + atomic_t in_use; + enum queue_type qtype; +}; + +/** + * struct mpi3mr_intr_info - Interrupt cookie information + * + * @mrioc: Adapter instance reference + * @os_irq: irq number + * @msix_index: MSIx index + * @op_reply_q: Associated operational reply queue + * @name: Dev name for the irq claiming device + */ +struct mpi3mr_intr_info { + struct mpi3mr_ioc *mrioc; + int os_irq; + u16 msix_index; + struct op_reply_qinfo *op_reply_q; + char name[MPI3MR_NAME_LENGTH]; +}; + +/** + * struct mpi3mr_throttle_group_info - Throttle group info + * + * @io_divert: Flag indicates io divert is on or off for the TG + * @need_qd_reduction: Flag to indicate QD reduction is needed + * @qd_reduction: Queue Depth reduction in units of 10% + * @fw_qd: QueueDepth value reported by the firmware + * @modified_qd: Modified QueueDepth value due to throttling + * @id: Throttle Group ID. + * @high: High limit to turn on throttling in 512 byte blocks + * @low: Low limit to turn off throttling in 512 byte blocks + * @pend_large_data_sz: Counter to track pending large data + */ +struct mpi3mr_throttle_group_info { + u8 io_divert; + u8 need_qd_reduction; + u8 qd_reduction; + u16 fw_qd; + u16 modified_qd; + u16 id; + u32 high; + u32 low; + atomic_t pend_large_data_sz; +}; + +/* HBA port flags */ +#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01 + +/** + * struct mpi3mr_hba_port - HBA's port information + * @port_id: Port number + * @flags: HBA port flags + */ +struct mpi3mr_hba_port { + struct list_head list; + u8 port_id; + u8 flags; +}; + +/** + * struct mpi3mr_sas_port - Internal SAS port information + * @port_list: List of ports belonging to a SAS node + * @num_phys: Number of phys associated with port + * @marked_responding: used while refresing the sas ports + * @lowest_phy: lowest phy ID of current sas port + * @phy_mask: phy_mask of current sas port + * @hba_port: HBA port entry + * @remote_identify: Attached device identification + * @rphy: SAS transport layer rphy object + * @port: SAS transport layer port object + * @phy_list: mpi3mr_sas_phy objects belonging to this port + */ +struct mpi3mr_sas_port { + struct list_head port_list; + u8 num_phys; + u8 marked_responding; + int lowest_phy; + u32 phy_mask; + struct mpi3mr_hba_port *hba_port; + struct sas_identify remote_identify; + struct sas_rphy *rphy; + struct sas_port *port; + struct list_head phy_list; +}; + +/** + * struct mpi3mr_sas_phy - Internal SAS Phy information + * @port_siblings: List of phys belonging to a port + * @identify: Phy identification + * @remote_identify: Attached device identification + * @phy: SAS transport layer Phy object + * @phy_id: Unique phy id within a port + * @handle: Firmware device handle for this phy + * @attached_handle: Firmware device handle for attached device + * @phy_belongs_to_port: Flag to indicate phy belongs to port + @hba_port: HBA port entry + */ +struct mpi3mr_sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; + struct mpi3mr_hba_port *hba_port; +}; + +/** + * struct mpi3mr_sas_node - SAS host/expander information + * @list: List of sas nodes in a controller + * @parent_dev: Parent device class + * @num_phys: Number phys belonging to sas_node + * @sas_address: SAS address of sas_node + * @handle: Firmware device handle for this sas_host/expander + * @sas_address_parent: SAS address of parent expander or host + * @enclosure_handle: Firmware handle of enclosure of this node + * @device_info: Capabilities of this sas_host/expander + * @non_responding: used to refresh the expander devices during reset + * @host_node: Flag to indicate this is a host_node + * @hba_port: HBA port entry + * @phy: A list of phys that make up this sas_host/expander + * @sas_port_list: List of internal ports of this node + * @rphy: sas_rphy object of this expander node + */ +struct mpi3mr_sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 non_responding; + u8 host_node; + struct mpi3mr_hba_port *hba_port; + struct mpi3mr_sas_phy *phy; + struct list_head sas_port_list; + struct sas_rphy *rphy; +}; + +/** + * struct mpi3mr_enclosure_node - enclosure information + * @list: List of enclosures + * @pg0: Enclosure page 0; + */ +struct mpi3mr_enclosure_node { + struct list_head list; + struct mpi3_enclosure_page0 pg0; +}; + +/** + * struct tgt_dev_sas_sata - SAS/SATA device specific + * information cached from firmware given data + * + * @sas_address: World wide unique SAS address + * @sas_address_parent: Sas address of parent expander or host + * @dev_info: Device information bits + * @phy_id: Phy identifier provided in device page 0 + * @attached_phy_id: Attached phy identifier provided in device page 0 + * @sas_transport_attached: Is this device exposed to transport + * @pend_sas_rphy_add: Flag to check device is in process of add + * @hba_port: HBA port entry + * @rphy: SAS transport layer rphy object + */ +struct tgt_dev_sas_sata { + u64 sas_address; + u64 sas_address_parent; + u16 dev_info; + u8 phy_id; + u8 attached_phy_id; + u8 sas_transport_attached; + u8 pend_sas_rphy_add; + struct mpi3mr_hba_port *hba_port; + struct sas_rphy *rphy; +}; + +/** + * struct tgt_dev_pcie - PCIe device specific information cached + * from firmware given data + * + * @mdts: Maximum data transfer size + * @capb: Device capabilities + * @pgsz: Device page size + * @abort_to: Timeout for abort TM + * @reset_to: Timeout for Target/LUN reset TM + * @dev_info: Device information bits + */ +struct tgt_dev_pcie { + u32 mdts; + u16 capb; + u8 pgsz; + u8 abort_to; + u8 reset_to; + u16 dev_info; +}; + +/** + * struct tgt_dev_vd - virtual device specific information + * cached from firmware given data + * + * @state: State of the VD + * @tg_qd_reduction: Queue Depth reduction in units of 10% + * @tg_id: VDs throttle group ID + * @high: High limit to turn on throttling in 512 byte blocks + * @low: Low limit to turn off throttling in 512 byte blocks + * @tg: Pointer to throttle group info + */ +struct tgt_dev_vd { + u8 state; + u8 tg_qd_reduction; + u16 tg_id; + u32 tg_high; + u32 tg_low; + struct mpi3mr_throttle_group_info *tg; +}; + + +/** + * union _form_spec_inf - union of device specific information + */ +union _form_spec_inf { + struct tgt_dev_sas_sata sas_sata_inf; + struct tgt_dev_pcie pcie_inf; + struct tgt_dev_vd vd_inf; +}; + +enum mpi3mr_dev_state { + MPI3MR_DEV_CREATED = 1, + MPI3MR_DEV_REMOVE_HS_STARTED = 2, + MPI3MR_DEV_DELETED = 3, +}; + +/** + * struct mpi3mr_tgt_dev - target device data structure + * + * @list: List pointer + * @starget: Scsi_target pointer + * @dev_handle: FW device handle + * @parent_handle: FW parent device handle + * @slot: Slot number + * @encl_handle: FW enclosure handle + * @perst_id: FW assigned Persistent ID + * @devpg0_flag: Device Page0 flag + * @dev_type: SAS/SATA/PCIE device type + * @is_hidden: Should be exposed to upper layers or not + * @host_exposed: Already exposed to host or not + * @io_unit_port: IO Unit port ID + * @non_stl: Is this device not to be attached with SAS TL + * @io_throttle_enabled: I/O throttling needed or not + * @wslen: Write same max length + * @q_depth: Device specific Queue Depth + * @wwid: World wide ID + * @enclosure_logical_id: Enclosure logical identifier + * @dev_spec: Device type specific information + * @ref_count: Reference count + * @state: device state + */ +struct mpi3mr_tgt_dev { + struct list_head list; + struct scsi_target *starget; + u16 dev_handle; + u16 parent_handle; + u16 slot; + u16 encl_handle; + u16 perst_id; + u16 devpg0_flag; + u8 dev_type; + u8 is_hidden; + u8 host_exposed; + u8 io_unit_port; + u8 non_stl; + u8 io_throttle_enabled; + u16 wslen; + u16 q_depth; + u64 wwid; + u64 enclosure_logical_id; + union _form_spec_inf dev_spec; + struct kref ref_count; + enum mpi3mr_dev_state state; +}; + +/** + * mpi3mr_tgtdev_get - k reference incrementor + * @s: Target device reference + * + * Increment target device reference count. + */ +static inline void mpi3mr_tgtdev_get(struct mpi3mr_tgt_dev *s) +{ + kref_get(&s->ref_count); +} + +/** + * mpi3mr_free_tgtdev - target device memory dealloctor + * @r: k reference pointer of the target device + * + * Free target device memory when no reference. + */ +static inline void mpi3mr_free_tgtdev(struct kref *r) +{ + kfree(container_of(r, struct mpi3mr_tgt_dev, ref_count)); +} + +/** + * mpi3mr_tgtdev_put - k reference decrementor + * @s: Target device reference + * + * Decrement target device reference count. + */ +static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s) +{ + kref_put(&s->ref_count, mpi3mr_free_tgtdev); +} + + +/** + * struct mpi3mr_stgt_priv_data - SCSI target private structure + * + * @starget: Scsi_target pointer + * @dev_handle: FW device handle + * @perst_id: FW assigned Persistent ID + * @num_luns: Number of Logical Units + * @block_io: I/O blocked to the device or not + * @dev_removed: Device removed in the Firmware + * @dev_removedelay: Device is waiting to be removed in FW + * @dev_type: Device type + * @dev_nvme_dif: Device is NVMe DIF enabled + * @wslen: Write same max length + * @io_throttle_enabled: I/O throttling needed or not + * @io_divert: Flag indicates io divert is on or off for the dev + * @throttle_group: Pointer to throttle group info + * @tgt_dev: Internal target device pointer + * @pend_count: Counter to track pending I/Os during error + * handling + */ +struct mpi3mr_stgt_priv_data { + struct scsi_target *starget; + u16 dev_handle; + u16 perst_id; + u32 num_luns; + atomic_t block_io; + u8 dev_removed; + u8 dev_removedelay; + u8 dev_type; + u8 dev_nvme_dif; + u16 wslen; + u8 io_throttle_enabled; + u8 io_divert; + struct mpi3mr_throttle_group_info *throttle_group; + struct mpi3mr_tgt_dev *tgt_dev; + u32 pend_count; +}; + +/** + * struct mpi3mr_stgt_priv_data - SCSI device private structure + * + * @tgt_priv_data: Scsi_target private data pointer + * @lun_id: LUN ID of the device + * @ncq_prio_enable: NCQ priority enable for SATA device + * @pend_count: Counter to track pending I/Os during error + * handling + * @wslen: Write same max length + */ +struct mpi3mr_sdev_priv_data { + struct mpi3mr_stgt_priv_data *tgt_priv_data; + u32 lun_id; + u8 ncq_prio_enable; + u32 pend_count; + u16 wslen; +}; + +/** + * struct mpi3mr_drv_cmd - Internal command tracker + * + * @mutex: Command mutex + * @done: Completeor for wakeup + * @reply: Firmware reply for internal commands + * @sensebuf: Sensebuf for SCSI IO commands + * @iou_rc: IO Unit control reason code + * @state: Command State + * @dev_handle: Firmware handle for device specific commands + * @ioc_status: IOC status from the firmware + * @ioc_loginfo:IOC log info from the firmware + * @is_waiting: Is the command issued in block mode + * @is_sense: Is Sense data present + * @retry_count: Retry count for retriable commands + * @host_tag: Host tag used by the command + * @callback: Callback for non blocking commands + */ +struct mpi3mr_drv_cmd { + struct mutex mutex; + struct completion done; + void *reply; + u8 *sensebuf; + u8 iou_rc; + u16 state; + u16 dev_handle; + u16 ioc_status; + u32 ioc_loginfo; + u8 is_waiting; + u8 is_sense; + u8 retry_count; + u16 host_tag; + + void (*callback)(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd); +}; + +/** + * struct dma_memory_desc - memory descriptor structure to store + * virtual address, dma address and size for any generic dma + * memory allocations in the driver. + * + * @size: buffer size + * @addr: virtual address + * @dma_addr: dma address + */ +struct dma_memory_desc { + u32 size; + void *addr; + dma_addr_t dma_addr; +}; + + +/** + * struct chain_element - memory descriptor structure to store + * virtual and dma addresses for chain elements. + * + * @addr: virtual address + * @dma_addr: dma address + */ +struct chain_element { + void *addr; + dma_addr_t dma_addr; +}; + +/** + * struct scmd_priv - SCSI command private data + * + * @host_tag: Host tag specific to operational queue + * @in_lld_scope: Command in LLD scope or not + * @meta_sg_valid: DIX command with meta data SGL or not + * @scmd: SCSI Command pointer + * @req_q_idx: Operational request queue index + * @chain_idx: Chain frame index + * @meta_chain_idx: Chain frame index of meta data SGL + * @mpi3mr_scsiio_req: MPI SCSI IO request + */ +struct scmd_priv { + u16 host_tag; + u8 in_lld_scope; + u8 meta_sg_valid; + struct scsi_cmnd *scmd; + u16 req_q_idx; + int chain_idx; + int meta_chain_idx; + u8 mpi3mr_scsiio_req[MPI3MR_ADMIN_REQ_FRAME_SZ]; +}; + +/** + * struct mpi3mr_ioc - Adapter anchor structure stored in shost + * private data + * + * @list: List pointer + * @pdev: PCI device pointer + * @shost: Scsi_Host pointer + * @id: Controller ID + * @cpu_count: Number of online CPUs + * @irqpoll_sleep: usleep unit used in threaded isr irqpoll + * @name: Controller ASCII name + * @driver_name: Driver ASCII name + * @sysif_regs: System interface registers virtual address + * @sysif_regs_phys: System interface registers physical address + * @bars: PCI BARS + * @dma_mask: DMA mask + * @msix_count: Number of MSIX vectors used + * @intr_enabled: Is interrupts enabled + * @num_admin_req: Number of admin requests + * @admin_req_q_sz: Admin request queue size + * @admin_req_pi: Admin request queue producer index + * @admin_req_ci: Admin request queue consumer index + * @admin_req_base: Admin request queue base virtual address + * @admin_req_dma: Admin request queue base dma address + * @admin_req_lock: Admin queue access lock + * @num_admin_replies: Number of admin replies + * @admin_reply_q_sz: Admin reply queue size + * @admin_reply_ci: Admin reply queue consumer index + * @admin_reply_ephase:Admin reply queue expected phase + * @admin_reply_base: Admin reply queue base virtual address + * @admin_reply_dma: Admin reply queue base dma address + * @admin_reply_q_in_use: Queue is handled by poll/ISR + * @ready_timeout: Controller ready timeout + * @intr_info: Interrupt cookie pointer + * @intr_info_count: Number of interrupt cookies + * @is_intr_info_set: Flag to indicate intr info is setup + * @num_queues: Number of operational queues + * @num_op_req_q: Number of operational request queues + * @req_qinfo: Operational request queue info pointer + * @num_op_reply_q: Number of operational reply queues + * @op_reply_qinfo: Operational reply queue info pointer + * @init_cmds: Command tracker for initialization commands + * @cfg_cmds: Command tracker for configuration requests + * @facts: Cached IOC facts data + * @op_reply_desc_sz: Operational reply descriptor size + * @num_reply_bufs: Number of reply buffers allocated + * @reply_buf_pool: Reply buffer pool + * @reply_buf: Reply buffer base virtual address + * @reply_buf_dma: Reply buffer DMA address + * @reply_buf_dma_max_address: Reply DMA address max limit + * @reply_free_qsz: Reply free queue size + * @reply_free_q_pool: Reply free queue pool + * @reply_free_q: Reply free queue base virtual address + * @reply_free_q_dma: Reply free queue base DMA address + * @reply_free_queue_lock: Reply free queue lock + * @reply_free_queue_host_index: Reply free queue host index + * @num_sense_bufs: Number of sense buffers + * @sense_buf_pool: Sense buffer pool + * @sense_buf: Sense buffer base virtual address + * @sense_buf_dma: Sense buffer base DMA address + * @sense_buf_q_sz: Sense buffer queue size + * @sense_buf_q_pool: Sense buffer queue pool + * @sense_buf_q: Sense buffer queue virtual address + * @sense_buf_q_dma: Sense buffer queue DMA address + * @sbq_lock: Sense buffer queue lock + * @sbq_host_index: Sense buffer queuehost index + * @event_masks: Event mask bitmap + * @fwevt_worker_name: Firmware event worker thread name + * @fwevt_worker_thread: Firmware event worker thread + * @fwevt_lock: Firmware event lock + * @fwevt_list: Firmware event list + * @watchdog_work_q_name: Fault watchdog worker thread name + * @watchdog_work_q: Fault watchdog worker thread + * @watchdog_work: Fault watchdog work + * @watchdog_lock: Fault watchdog lock + * @is_driver_loading: Is driver still loading + * @scan_started: Async scan started + * @scan_failed: Asycn scan failed + * @stop_drv_processing: Stop all command processing + * @device_refresh_on: Don't process the events until devices are refreshed + * @max_host_ios: Maximum host I/O count + * @max_sgl_entries: Max SGL entries per I/O + * @chain_buf_count: Chain buffer count + * @chain_buf_pool: Chain buffer pool + * @chain_sgl_list: Chain SGL list + * @chain_bitmap: Chain buffer allocator bitmap + * @chain_buf_lock: Chain buffer list lock + * @bsg_cmds: Command tracker for BSG command + * @host_tm_cmds: Command tracker for task management commands + * @dev_rmhs_cmds: Command tracker for device removal commands + * @evtack_cmds: Command tracker for event ack commands + * @devrem_bitmap: Device removal bitmap + * @dev_handle_bitmap_bits: Number of bits in device handle bitmap + * @removepend_bitmap: Remove pending bitmap + * @delayed_rmhs_list: Delayed device removal list + * @evtack_cmds_bitmap: Event Ack bitmap + * @delayed_evtack_cmds_list: Delayed event acknowledgment list + * @ts_update_counter: Timestamp update counter + * @reset_in_progress: Reset in progress flag + * @unrecoverable: Controller unrecoverable flag + * @prev_reset_result: Result of previous reset + * @reset_mutex: Controller reset mutex + * @reset_waitq: Controller reset wait queue + * @prepare_for_reset: Prepare for reset event received + * @prepare_for_reset_timeout_counter: Prepare for reset timeout + * @prp_list_virt: NVMe encapsulated PRP list virtual base + * @prp_list_dma: NVMe encapsulated PRP list DMA + * @prp_sz: NVME encapsulated PRP list size + * @diagsave_timeout: Diagnostic information save timeout + * @logging_level: Controller debug logging level + * @flush_io_count: I/O count to flush after reset + * @current_event: Firmware event currently in process + * @driver_info: Driver, Kernel, OS information to firmware + * @change_count: Topology change count + * @pel_enabled: Persistent Event Log(PEL) enabled or not + * @pel_abort_requested: PEL abort is requested or not + * @pel_class: PEL Class identifier + * @pel_locale: PEL Locale identifier + * @pel_cmds: Command tracker for PEL wait command + * @pel_abort_cmd: Command tracker for PEL abort command + * @pel_newest_seqnum: Newest PEL sequenece number + * @pel_seqnum_virt: PEL sequence number virtual address + * @pel_seqnum_dma: PEL sequence number DMA address + * @pel_seqnum_sz: PEL sequenece number size + * @op_reply_q_offset: Operational reply queue offset with MSIx + * @default_qcount: Total Default queues + * @active_poll_qcount: Currently active poll queue count + * @requested_poll_qcount: User requested poll queue count + * @bsg_dev: BSG device structure + * @bsg_queue: Request queue for BSG device + * @stop_bsgs: Stop BSG request flag + * @logdata_buf: Circular buffer to store log data entries + * @logdata_buf_idx: Index of entry in buffer to store + * @logdata_entry_sz: log data entry size + * @pend_large_data_sz: Counter to track pending large data + * @io_throttle_data_length: I/O size to track in 512b blocks + * @io_throttle_high: I/O size to start throttle in 512b blocks + * @io_throttle_low: I/O size to stop throttle in 512b blocks + * @num_io_throttle_group: Maximum number of throttle groups + * @throttle_groups: Pointer to throttle group info structures + * @cfg_page: Default memory for configuration pages + * @cfg_page_dma: Configuration page DMA address + * @cfg_page_sz: Default configuration page memory size + * @sas_transport_enabled: SAS transport enabled or not + * @scsi_device_channel: Channel ID for SCSI devices + * @transport_cmds: Command tracker for SAS transport commands + * @sas_hba: SAS node for the controller + * @sas_expander_list: SAS node list of expanders + * @sas_node_lock: Lock to protect SAS node list + * @hba_port_table_list: List of HBA Ports + * @enclosure_list: List of Enclosure objects + */ +struct mpi3mr_ioc { + struct list_head list; + struct pci_dev *pdev; + struct Scsi_Host *shost; + u8 id; + int cpu_count; + bool enable_segqueue; + u32 irqpoll_sleep; + + char name[MPI3MR_NAME_LENGTH]; + char driver_name[MPI3MR_NAME_LENGTH]; + + volatile struct mpi3_sysif_registers __iomem *sysif_regs; + resource_size_t sysif_regs_phys; + int bars; + u64 dma_mask; + + u16 msix_count; + u8 intr_enabled; + + u16 num_admin_req; + u32 admin_req_q_sz; + u16 admin_req_pi; + u16 admin_req_ci; + void *admin_req_base; + dma_addr_t admin_req_dma; + spinlock_t admin_req_lock; + + u16 num_admin_replies; + u32 admin_reply_q_sz; + u16 admin_reply_ci; + u8 admin_reply_ephase; + void *admin_reply_base; + dma_addr_t admin_reply_dma; + atomic_t admin_reply_q_in_use; + + u32 ready_timeout; + + struct mpi3mr_intr_info *intr_info; + u16 intr_info_count; + bool is_intr_info_set; + + u16 num_queues; + u16 num_op_req_q; + struct op_req_qinfo *req_qinfo; + + u16 num_op_reply_q; + struct op_reply_qinfo *op_reply_qinfo; + + struct mpi3mr_drv_cmd init_cmds; + struct mpi3mr_drv_cmd cfg_cmds; + struct mpi3mr_ioc_facts facts; + u16 op_reply_desc_sz; + + u32 num_reply_bufs; + struct dma_pool *reply_buf_pool; + u8 *reply_buf; + dma_addr_t reply_buf_dma; + dma_addr_t reply_buf_dma_max_address; + + u16 reply_free_qsz; + u16 reply_sz; + struct dma_pool *reply_free_q_pool; + __le64 *reply_free_q; + dma_addr_t reply_free_q_dma; + spinlock_t reply_free_queue_lock; + u32 reply_free_queue_host_index; + + u32 num_sense_bufs; + struct dma_pool *sense_buf_pool; + u8 *sense_buf; + dma_addr_t sense_buf_dma; + + u16 sense_buf_q_sz; + struct dma_pool *sense_buf_q_pool; + __le64 *sense_buf_q; + dma_addr_t sense_buf_q_dma; + spinlock_t sbq_lock; + u32 sbq_host_index; + u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS]; + + char fwevt_worker_name[MPI3MR_NAME_LENGTH]; + struct workqueue_struct *fwevt_worker_thread; + spinlock_t fwevt_lock; + struct list_head fwevt_list; + + char watchdog_work_q_name[20]; + struct workqueue_struct *watchdog_work_q; + struct delayed_work watchdog_work; + spinlock_t watchdog_lock; + + u8 is_driver_loading; + u8 scan_started; + u16 scan_failed; + u8 stop_drv_processing; + u8 device_refresh_on; + + u16 max_host_ios; + spinlock_t tgtdev_lock; + struct list_head tgtdev_list; + u16 max_sgl_entries; + + u32 chain_buf_count; + struct dma_pool *chain_buf_pool; + struct chain_element *chain_sgl_list; + unsigned long *chain_bitmap; + spinlock_t chain_buf_lock; + + struct mpi3mr_drv_cmd bsg_cmds; + struct mpi3mr_drv_cmd host_tm_cmds; + struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD]; + struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD]; + unsigned long *devrem_bitmap; + u16 dev_handle_bitmap_bits; + unsigned long *removepend_bitmap; + struct list_head delayed_rmhs_list; + unsigned long *evtack_cmds_bitmap; + struct list_head delayed_evtack_cmds_list; + + u32 ts_update_counter; + u8 reset_in_progress; + u8 unrecoverable; + int prev_reset_result; + struct mutex reset_mutex; + wait_queue_head_t reset_waitq; + + u8 prepare_for_reset; + u16 prepare_for_reset_timeout_counter; + + void *prp_list_virt; + dma_addr_t prp_list_dma; + u32 prp_sz; + + u16 diagsave_timeout; + int logging_level; + u16 flush_io_count; + + struct mpi3mr_fwevt *current_event; + struct mpi3_driver_info_layout driver_info; + u16 change_count; + + u8 pel_enabled; + u8 pel_abort_requested; + u8 pel_class; + u16 pel_locale; + struct mpi3mr_drv_cmd pel_cmds; + struct mpi3mr_drv_cmd pel_abort_cmd; + + u32 pel_newest_seqnum; + void *pel_seqnum_virt; + dma_addr_t pel_seqnum_dma; + u32 pel_seqnum_sz; + + u16 op_reply_q_offset; + u16 default_qcount; + u16 active_poll_qcount; + u16 requested_poll_qcount; + + struct device bsg_dev; + struct request_queue *bsg_queue; + u8 stop_bsgs; + u8 *logdata_buf; + u16 logdata_buf_idx; + u16 logdata_entry_sz; + + atomic_t pend_large_data_sz; + u32 io_throttle_data_length; + u32 io_throttle_high; + u32 io_throttle_low; + u16 num_io_throttle_group; + struct mpi3mr_throttle_group_info *throttle_groups; + + void *cfg_page; + dma_addr_t cfg_page_dma; + u16 cfg_page_sz; + + u8 sas_transport_enabled; + u8 scsi_device_channel; + struct mpi3mr_drv_cmd transport_cmds; + struct mpi3mr_sas_node sas_hba; + struct list_head sas_expander_list; + spinlock_t sas_node_lock; + struct list_head hba_port_table_list; + struct list_head enclosure_list; +}; + +/** + * struct mpi3mr_fwevt - Firmware event structure. + * + * @list: list head + * @work: Work structure + * @mrioc: Adapter instance reference + * @event_id: MPI3 firmware event ID + * @send_ack: Event acknowledgment required or not + * @process_evt: Bottomhalf processing required or not + * @evt_ctx: Event context to send in Ack + * @event_data_size: size of the event data in bytes + * @pending_at_sml: waiting for device add/remove API to complete + * @discard: discard this event + * @ref_count: kref count + * @event_data: Actual MPI3 event data + */ +struct mpi3mr_fwevt { + struct list_head list; + struct work_struct work; + struct mpi3mr_ioc *mrioc; + u16 event_id; + bool send_ack; + bool process_evt; + u32 evt_ctx; + u16 event_data_size; + bool pending_at_sml; + bool discard; + struct kref ref_count; + char event_data[] __aligned(4); +}; + + +/** + * struct delayed_dev_rmhs_node - Delayed device removal node + * + * @list: list head + * @handle: Device handle + * @iou_rc: IO Unit Control Reason Code + */ +struct delayed_dev_rmhs_node { + struct list_head list; + u16 handle; + u8 iou_rc; +}; + +/** + * struct delayed_evt_ack_node - Delayed event ack node + * @list: list head + * @event: MPI3 event ID + * @event_ctx: event context + */ +struct delayed_evt_ack_node { + struct list_head list; + u8 event; + u32 event_ctx; +}; + +int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc); +void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc); +int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc); +int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume); +void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc); +int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async); +int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, +u16 admin_req_sz, u8 ignore_reset); +int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, + struct op_req_qinfo *opreqq, u8 *req); +void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, + dma_addr_t dma_addr); +void mpi3mr_build_zero_len_sge(void *paddr); +void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, + dma_addr_t phys_addr); +void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, + dma_addr_t phys_addr); +void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, + u64 sense_buf_dma); + +void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc); +void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc); +void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply); +void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, + struct mpi3_default_reply_descriptor *reply_desc, + u64 *reply_dma, u16 qidx); +void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc); +void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc); + +int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, + u32 reset_reason, u8 snapdump); +void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc); +void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc); + +enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc); +int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, + u32 event_ctx); + +void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout); +void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc); +void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc); +void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc); +void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc); +void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc); +void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code); +void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc); +void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code); +int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, + struct op_reply_qinfo *op_reply_q); +int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc); +void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc); +int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, + u16 handle, uint lun, u16 htag, ulong timeout, + struct mpi3mr_drv_cmd *drv_cmd, + u8 *resp_code, struct scsi_cmnd *scmd); +struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( + struct mpi3mr_ioc *mrioc, u16 handle); +void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd); +int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd); +void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, + u16 event_data_size); +struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( + struct mpi3mr_ioc *mrioc, u16 handle); +extern const struct attribute_group *mpi3mr_host_groups[]; +extern const struct attribute_group *mpi3mr_dev_groups[]; + +extern struct sas_function_template mpi3mr_transport_functions; +extern struct scsi_transport_template *mpi3mr_transport_template; + +int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec); +int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form, + u32 form_spec); +int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form, + u32 form_spec); +int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form, + u32 form_spec); +int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form, + u32 form_spec); +int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form, + u32 form_spec); +int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc, + struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz); +int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, + struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz); +int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, + struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz); +int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, + struct mpi3_driver_page1 *driver_pg1, u16 pg_sz); + +u8 mpi3mr_is_expander_device(u16 device_info); +int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle); +void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address, + struct mpi3mr_hba_port *hba_port); +struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc + *mrioc, u16 handle); +struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc, + u8 port_id); +void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc); +void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc); +void mpi3mr_update_links(struct mpi3mr_ioc *mrioc, + u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate, + struct mpi3mr_hba_port *hba_port); +void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev); +int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev); +void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev); +struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy( + struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy); +void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, + bool device_add); +void mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc); +void mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc); +void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc); +void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc); +void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc); +void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc); +int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc); +void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *sas_expander); +#endif /*MPI3MR_H_INCLUDED*/ diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c new file mode 100644 index 000000000..9dacbb857 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -0,0 +1,1871 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Broadcom MPI3 Storage Controllers + * + * Copyright (C) 2017-2023 Broadcom Inc. + * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) + * + */ + +#include "mpi3mr.h" +#include +#include + +/** + * mpi3mr_bsg_pel_abort - sends PEL abort request + * @mrioc: Adapter instance reference + * + * This function sends PEL abort request to the firmware through + * admin request queue. + * + * Return: 0 on success, -1 on failure + */ +static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_pel_req_action_abort pel_abort_req; + struct mpi3_pel_reply *pel_reply; + int retval = 0; + u16 pe_log_status; + + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + return -1; + } + if (mrioc->stop_bsgs) { + dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); + return -1; + } + + memset(&pel_abort_req, 0, sizeof(pel_abort_req)); + mutex_lock(&mrioc->pel_abort_cmd.mutex); + if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->pel_abort_cmd.mutex); + return -1; + } + mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; + mrioc->pel_abort_cmd.is_waiting = 1; + mrioc->pel_abort_cmd.callback = NULL; + pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT); + pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; + pel_abort_req.action = MPI3_PEL_ACTION_ABORT; + pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); + + mrioc->pel_abort_requested = 1; + init_completion(&mrioc->pel_abort_cmd.done); + retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req, + sizeof(pel_abort_req), 0); + if (retval) { + retval = -1; + dprint_bsg_err(mrioc, "%s: admin request post failed\n", + __func__); + mrioc->pel_abort_requested = 0; + goto out_unlock; + } + + wait_for_completion_timeout(&mrioc->pel_abort_cmd.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { + mrioc->pel_abort_cmd.is_waiting = 0; + dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); + if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET)) + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1); + retval = -1; + goto out_unlock; + } + if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + dprint_bsg_err(mrioc, + "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, (mrioc->pel_abort_cmd.ioc_status & + MPI3_IOCSTATUS_STATUS_MASK), + mrioc->pel_abort_cmd.ioc_loginfo); + retval = -1; + goto out_unlock; + } + if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) { + pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply; + pe_log_status = le16_to_cpu(pel_reply->pe_log_status); + if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) { + dprint_bsg_err(mrioc, + "%s: command failed, pel_status(0x%04x)\n", + __func__, pe_log_status); + retval = -1; + } + } + +out_unlock: + mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->pel_abort_cmd.mutex); + return retval; +} +/** + * mpi3mr_bsg_verify_adapter - verify adapter number is valid + * @ioc_number: Adapter number + * + * This function returns the adapter instance pointer of given + * adapter number. If adapter number does not match with the + * driver's adapter list, driver returns NULL. + * + * Return: adapter instance reference + */ +static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number) +{ + struct mpi3mr_ioc *mrioc = NULL; + + spin_lock(&mrioc_list_lock); + list_for_each_entry(mrioc, &mrioc_list, list) { + if (mrioc->id == ioc_number) { + spin_unlock(&mrioc_list_lock); + return mrioc; + } + } + spin_unlock(&mrioc_list_lock); + return NULL; +} + +/** + * mpi3mr_enable_logdata - Handler for log data enable + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function enables log data caching in the driver if not + * already enabled and return the maximum number of log data + * entries that can be cached in the driver. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + struct mpi3mr_logdata_enable logdata_enable; + + if (!mrioc->logdata_buf) { + mrioc->logdata_entry_sz = + (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4)) + + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ; + mrioc->logdata_buf_idx = 0; + mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES, + mrioc->logdata_entry_sz, GFP_KERNEL); + + if (!mrioc->logdata_buf) + return -ENOMEM; + } + + memset(&logdata_enable, 0, sizeof(logdata_enable)); + logdata_enable.max_entries = + MPI3MR_BSG_LOGDATA_MAX_ENTRIES; + if (job->request_payload.payload_len >= sizeof(logdata_enable)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &logdata_enable, sizeof(logdata_enable)); + return 0; + } + + return -EINVAL; +} +/** + * mpi3mr_get_logdata - Handler for get log data + * @mrioc: Adapter instance reference + * @job: BSG job pointer + * This function copies the log data entries to the user buffer + * when log caching is enabled in the driver. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz; + + if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz)) + return -EINVAL; + + num_entries = job->request_payload.payload_len / entry_sz; + if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES) + num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES; + sz = num_entries * entry_sz; + + if (job->request_payload.payload_len >= sz) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + mrioc->logdata_buf, sz); + return 0; + } + return -EINVAL; +} + +/** + * mpi3mr_bsg_pel_enable - Handler for PEL enable driver + * @mrioc: Adapter instance reference + * @job: BSG job pointer + * + * This function is the handler for PEL enable driver. + * Validates the application given class and locale and if + * requires aborts the existing PEL wait request and/or issues + * new PEL wait request to the firmware and returns. + * + * Return: 0 on success and proper error codes on failure. + */ +static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + long rval = -EINVAL; + struct mpi3mr_bsg_out_pel_enable pel_enable; + u8 issue_pel_wait; + u8 tmp_class; + u16 tmp_locale; + + if (job->request_payload.payload_len != sizeof(pel_enable)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + return rval; + } + + if (mrioc->unrecoverable) { + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", + __func__); + return -EFAULT; + } + + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + return -EAGAIN; + } + + if (mrioc->stop_bsgs) { + dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); + return -EAGAIN; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &pel_enable, sizeof(pel_enable)); + + if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { + dprint_bsg_err(mrioc, "%s: out of range class %d sent\n", + __func__, pel_enable.pel_class); + rval = 0; + goto out; + } + if (!mrioc->pel_enabled) + issue_pel_wait = 1; + else { + if ((mrioc->pel_class <= pel_enable.pel_class) && + !((mrioc->pel_locale & pel_enable.pel_locale) ^ + pel_enable.pel_locale)) { + issue_pel_wait = 0; + rval = 0; + } else { + pel_enable.pel_locale |= mrioc->pel_locale; + + if (mrioc->pel_class < pel_enable.pel_class) + pel_enable.pel_class = mrioc->pel_class; + + rval = mpi3mr_bsg_pel_abort(mrioc); + if (rval) { + dprint_bsg_err(mrioc, + "%s: pel_abort failed, status(%ld)\n", + __func__, rval); + goto out; + } + issue_pel_wait = 1; + } + } + if (issue_pel_wait) { + tmp_class = mrioc->pel_class; + tmp_locale = mrioc->pel_locale; + mrioc->pel_class = pel_enable.pel_class; + mrioc->pel_locale = pel_enable.pel_locale; + mrioc->pel_enabled = 1; + rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL); + if (rval) { + mrioc->pel_class = tmp_class; + mrioc->pel_locale = tmp_locale; + mrioc->pel_enabled = 0; + dprint_bsg_err(mrioc, + "%s: pel get sequence number failed, status(%ld)\n", + __func__, rval); + } + } + +out: + return rval; +} +/** + * mpi3mr_get_all_tgt_info - Get all target information + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function copies the driver managed target devices device + * handle, persistent ID, bus ID and taret ID to the user + * provided buffer for the specific controller. This function + * also provides the number of devices managed by the driver for + * the specific controller. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + u16 num_devices = 0, i = 0, size; + unsigned long flags; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_device_map_info *devmap_info = NULL; + struct mpi3mr_all_tgt_info *alltgt_info = NULL; + uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0; + + if (job->request_payload.payload_len < sizeof(u32)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + return -EINVAL; + } + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) + num_devices++; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + if ((job->request_payload.payload_len <= sizeof(u64)) || + list_empty(&mrioc->tgtdev_list)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &num_devices, sizeof(num_devices)); + return 0; + } + + kern_entrylen = num_devices * sizeof(*devmap_info); + size = sizeof(u64) + kern_entrylen; + alltgt_info = kzalloc(size, GFP_KERNEL); + if (!alltgt_info) + return -ENOMEM; + + devmap_info = alltgt_info->dmi; + memset((u8 *)devmap_info, 0xFF, kern_entrylen); + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if (i < num_devices) { + devmap_info[i].handle = tgtdev->dev_handle; + devmap_info[i].perst_id = tgtdev->perst_id; + if (tgtdev->host_exposed && tgtdev->starget) { + devmap_info[i].target_id = tgtdev->starget->id; + devmap_info[i].bus_id = + tgtdev->starget->channel; + } + i++; + } + } + num_devices = i; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + alltgt_info->num_devices = num_devices; + + usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) / + sizeof(*devmap_info); + usr_entrylen *= sizeof(*devmap_info); + min_entrylen = min(usr_entrylen, kern_entrylen); + + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + alltgt_info, (min_entrylen + sizeof(u64))); + kfree(alltgt_info); + return 0; +} +/** + * mpi3mr_get_change_count - Get topology change count + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function copies the toplogy change count provided by the + * driver in events and cached in the driver to the user + * provided buffer for the specific controller. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + struct mpi3mr_change_count chgcnt; + + memset(&chgcnt, 0, sizeof(chgcnt)); + chgcnt.change_count = mrioc->change_count; + if (job->request_payload.payload_len >= sizeof(chgcnt)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &chgcnt, sizeof(chgcnt)); + return 0; + } + return -EINVAL; +} + +/** + * mpi3mr_bsg_adp_reset - Issue controller reset + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function identifies the user provided reset type and + * issues approporiate reset to the controller and wait for that + * to complete and reinitialize the controller and then returns + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + long rval = -EINVAL; + u8 save_snapdump; + struct mpi3mr_bsg_adp_reset adpreset; + + if (job->request_payload.payload_len != + sizeof(adpreset)) { + dprint_bsg_err(mrioc, "%s: invalid size argument\n", + __func__); + goto out; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &adpreset, sizeof(adpreset)); + + switch (adpreset.reset_type) { + case MPI3MR_BSG_ADPRESET_SOFT: + save_snapdump = 0; + break; + case MPI3MR_BSG_ADPRESET_DIAG_FAULT: + save_snapdump = 1; + break; + default: + dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n", + __func__, adpreset.reset_type); + goto out; + } + + rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP, + save_snapdump); + + if (rval) + dprint_bsg_err(mrioc, + "%s: reset handler returned error(%ld) for reset type %d\n", + __func__, rval, adpreset.reset_type); +out: + return rval; +} + +/** + * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler + * @mrioc: Adapter instance reference + * @job: BSG job reference + * + * This function provides adapter information for the given + * controller + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc, + struct bsg_job *job) +{ + enum mpi3mr_iocstate ioc_state; + struct mpi3mr_bsg_in_adpinfo adpinfo; + + memset(&adpinfo, 0, sizeof(adpinfo)); + adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY; + adpinfo.pci_dev_id = mrioc->pdev->device; + adpinfo.pci_dev_hw_rev = mrioc->pdev->revision; + adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device; + adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor; + adpinfo.pci_bus = mrioc->pdev->bus->number; + adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn); + adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn); + adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus); + adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION; + + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state == MRIOC_STATE_UNRECOVERABLE) + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; + else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; + else if (ioc_state == MRIOC_STATE_FAULT) + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT; + else + adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; + + memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info, + sizeof(adpinfo.driver_info)); + + if (job->request_payload.payload_len >= sizeof(adpinfo)) { + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &adpinfo, sizeof(adpinfo)); + return 0; + } + return -EINVAL; +} + +/** + * mpi3mr_bsg_process_drv_cmds - Driver Command handler + * @job: BSG job reference + * + * This function is the top level handler for driver commands, + * this does basic validation of the buffer and identifies the + * opcode and switches to correct sub handler. + * + * Return: 0 on success and proper error codes on failure + */ +static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job) +{ + long rval = -EINVAL; + struct mpi3mr_ioc *mrioc = NULL; + struct mpi3mr_bsg_packet *bsg_req = NULL; + struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL; + + bsg_req = job->request; + drvrcmd = &bsg_req->cmd.drvrcmd; + + mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id); + if (!mrioc) + return -ENODEV; + + if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) { + rval = mpi3mr_bsg_populate_adpinfo(mrioc, job); + return rval; + } + + if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) + return -ERESTARTSYS; + + switch (drvrcmd->opcode) { + case MPI3MR_DRVBSG_OPCODE_ADPRESET: + rval = mpi3mr_bsg_adp_reset(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO: + rval = mpi3mr_get_all_tgt_info(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_GETCHGCNT: + rval = mpi3mr_get_change_count(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE: + rval = mpi3mr_enable_logdata(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_GETLOGDATA: + rval = mpi3mr_get_logdata(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_PELENABLE: + rval = mpi3mr_bsg_pel_enable(mrioc, job); + break; + case MPI3MR_DRVBSG_OPCODE_UNKNOWN: + default: + pr_err("%s: unsupported driver command opcode %d\n", + MPI3MR_DRIVER_NAME, drvrcmd->opcode); + break; + } + mutex_unlock(&mrioc->bsg_cmds.mutex); + return rval; +} + +/** + * mpi3mr_bsg_build_sgl - SGL construction for MPI commands + * @mpi_req: MPI request + * @sgl_offset: offset to start sgl in the MPI request + * @drv_bufs: DMA address of the buffers to be placed in sgl + * @bufcnt: Number of DMA buffers + * @is_rmc: Does the buffer list has management command buffer + * @is_rmr: Does the buffer list has management response buffer + * @num_datasges: Number of data buffers in the list + * + * This function places the DMA address of the given buffers in + * proper format as SGEs in the given MPI request. + * + * Return: Nothing + */ +static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset, + struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc, + u8 is_rmr, u8 num_datasges) +{ + u8 *sgl = (mpi_req + sgl_offset), count = 0; + struct mpi3_mgmt_passthrough_request *rmgmt_req = + (struct mpi3_mgmt_passthrough_request *)mpi_req; + struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; + u8 sgl_flags, sgl_flags_last; + + sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | + MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER; + sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST; + + if (is_rmc) { + mpi3mr_add_sg_single(&rmgmt_req->command_sgl, + sgl_flags_last, drv_buf_iter->kern_buf_len, + drv_buf_iter->kern_buf_dma); + sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len; + drv_buf_iter++; + count++; + if (is_rmr) { + mpi3mr_add_sg_single(&rmgmt_req->response_sgl, + sgl_flags_last, drv_buf_iter->kern_buf_len, + drv_buf_iter->kern_buf_dma); + drv_buf_iter++; + count++; + } else + mpi3mr_build_zero_len_sge( + &rmgmt_req->response_sgl); + } + if (!num_datasges) { + mpi3mr_build_zero_len_sge(sgl); + return; + } + for (; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + if (num_datasges == 1 || !is_rmc) + mpi3mr_add_sg_single(sgl, sgl_flags_last, + drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); + else + mpi3mr_add_sg_single(sgl, sgl_flags, + drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); + sgl += sizeof(struct mpi3_sge_common); + num_datasges--; + } +} + +/** + * mpi3mr_get_nvme_data_fmt - returns the NVMe data format + * @nvme_encap_request: NVMe encapsulated MPI request + * + * This function returns the type of the data format specified + * in user provided NVMe command in NVMe encapsulated request. + * + * Return: Data format of the NVMe command (PRP/SGL etc) + */ +static unsigned int mpi3mr_get_nvme_data_fmt( + struct mpi3_nvme_encapsulated_request *nvme_encap_request) +{ + u8 format = 0; + + format = ((nvme_encap_request->command[0] & 0xc000) >> 14); + return format; + +} + +/** + * mpi3mr_build_nvme_sgl - SGL constructor for NVME + * encapsulated request + * @mrioc: Adapter instance reference + * @nvme_encap_request: NVMe encapsulated MPI request + * @drv_bufs: DMA address of the buffers to be placed in sgl + * @bufcnt: Number of DMA buffers + * + * This function places the DMA address of the given buffers in + * proper format as SGEs in the given NVMe encapsulated request. + * + * Return: 0 on success, -1 on failure + */ +static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc, + struct mpi3_nvme_encapsulated_request *nvme_encap_request, + struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) +{ + struct mpi3mr_nvme_pt_sge *nvme_sgl; + u64 sgl_ptr; + u8 count; + size_t length = 0; + struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; + u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << + mrioc->facts.sge_mod_shift) << 32); + u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << + mrioc->facts.sge_mod_shift) << 32; + + /* + * Not all commands require a data transfer. If no data, just return + * without constructing any sgl. + */ + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + sgl_ptr = (u64)drv_buf_iter->kern_buf_dma; + length = drv_buf_iter->kern_buf_len; + break; + } + if (!length) + return 0; + + if (sgl_ptr & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: SGL address collides with SGE modifier\n", + __func__); + return -1; + } + + sgl_ptr &= ~sgemod_mask; + sgl_ptr |= sgemod_val; + nvme_sgl = (struct mpi3mr_nvme_pt_sge *) + ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET); + memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); + nvme_sgl->base_addr = sgl_ptr; + nvme_sgl->length = length; + return 0; +} + +/** + * mpi3mr_build_nvme_prp - PRP constructor for NVME + * encapsulated request + * @mrioc: Adapter instance reference + * @nvme_encap_request: NVMe encapsulated MPI request + * @drv_bufs: DMA address of the buffers to be placed in SGL + * @bufcnt: Number of DMA buffers + * + * This function places the DMA address of the given buffers in + * proper format as PRP entries in the given NVMe encapsulated + * request. + * + * Return: 0 on success, -1 on failure + */ +static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc, + struct mpi3_nvme_encapsulated_request *nvme_encap_request, + struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) +{ + int prp_size = MPI3MR_NVME_PRP_SIZE; + __le64 *prp_entry, *prp1_entry, *prp2_entry; + __le64 *prp_page; + dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; + u32 offset, entry_len, dev_pgsz; + u32 page_mask_result, page_mask; + size_t length = 0; + u8 count; + struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; + u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << + mrioc->facts.sge_mod_shift) << 32); + u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << + mrioc->facts.sge_mod_shift) << 32; + u16 dev_handle = nvme_encap_request->dev_handle; + struct mpi3mr_tgt_dev *tgtdev; + + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); + if (!tgtdev) { + dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n", + __func__, dev_handle); + return -1; + } + + if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { + dprint_bsg_err(mrioc, + "%s: NVMe device page size is zero for handle 0x%04x\n", + __func__, dev_handle); + mpi3mr_tgtdev_put(tgtdev); + return -1; + } + + dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); + mpi3mr_tgtdev_put(tgtdev); + + /* + * Not all commands require a data transfer. If no data, just return + * without constructing any PRP. + */ + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + dma_addr = drv_buf_iter->kern_buf_dma; + length = drv_buf_iter->kern_buf_len; + break; + } + + if (!length) + return 0; + + mrioc->prp_sz = 0; + mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev, + dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL); + + if (!mrioc->prp_list_virt) + return -1; + mrioc->prp_sz = dev_pgsz; + + /* + * Set pointers to PRP1 and PRP2, which are in the NVMe command. + * PRP1 is located at a 24 byte offset from the start of the NVMe + * command. Then set the current PRP entry pointer to PRP1. + */ + prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + + MPI3MR_NVME_CMD_PRP1_OFFSET); + prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + + MPI3MR_NVME_CMD_PRP2_OFFSET); + prp_entry = prp1_entry; + /* + * For the PRP entries, use the specially allocated buffer of + * contiguous memory. + */ + prp_page = (__le64 *)mrioc->prp_list_virt; + prp_page_dma = mrioc->prp_list_dma; + + /* + * Check if we are within 1 entry of a page boundary we don't + * want our first entry to be a PRP List entry. + */ + page_mask = dev_pgsz - 1; + page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; + if (!page_mask_result) { + dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n", + __func__); + goto err_out; + } + + /* + * Set PRP physical pointer, which initially points to the current PRP + * DMA memory page. + */ + prp_entry_dma = prp_page_dma; + + + /* Loop while the length is not zero. */ + while (length) { + page_mask_result = (prp_entry_dma + prp_size) & page_mask; + if (!page_mask_result && (length > dev_pgsz)) { + dprint_bsg_err(mrioc, + "%s: single PRP page is not sufficient\n", + __func__); + goto err_out; + } + + /* Need to handle if entry will be part of a page. */ + offset = dma_addr & page_mask; + entry_len = dev_pgsz - offset; + + if (prp_entry == prp1_entry) { + /* + * Must fill in the first PRP pointer (PRP1) before + * moving on. + */ + *prp1_entry = cpu_to_le64(dma_addr); + if (*prp1_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP1 address collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp1_entry &= ~sgemod_mask; + *prp1_entry |= sgemod_val; + + /* + * Now point to the second PRP entry within the + * command (PRP2). + */ + prp_entry = prp2_entry; + } else if (prp_entry == prp2_entry) { + /* + * Should the PRP2 entry be a PRP List pointer or just + * a regular PRP pointer? If there is more than one + * more page of data, must use a PRP List pointer. + */ + if (length > dev_pgsz) { + /* + * PRP2 will contain a PRP List pointer because + * more PRP's are needed with this command. The + * list will start at the beginning of the + * contiguous buffer. + */ + *prp2_entry = cpu_to_le64(prp_entry_dma); + if (*prp2_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP list address collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp2_entry &= ~sgemod_mask; + *prp2_entry |= sgemod_val; + + /* + * The next PRP Entry will be the start of the + * first PRP List. + */ + prp_entry = prp_page; + continue; + } else { + /* + * After this, the PRP Entries are complete. + * This command uses 2 PRP's and no PRP list. + */ + *prp2_entry = cpu_to_le64(dma_addr); + if (*prp2_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP2 collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp2_entry &= ~sgemod_mask; + *prp2_entry |= sgemod_val; + } + } else { + /* + * Put entry in list and bump the addresses. + * + * After PRP1 and PRP2 are filled in, this will fill in + * all remaining PRP entries in a PRP List, one per + * each time through the loop. + */ + *prp_entry = cpu_to_le64(dma_addr); + if (*prp_entry & sgemod_mask) { + dprint_bsg_err(mrioc, + "%s: PRP address collides with SGE modifier\n", + __func__); + goto err_out; + } + *prp_entry &= ~sgemod_mask; + *prp_entry |= sgemod_val; + prp_entry++; + prp_entry_dma += prp_size; + } + + /* + * Bump the phys address of the command's data buffer by the + * entry_len. + */ + dma_addr += entry_len; + + /* decrement length accounting for last partial page. */ + if (entry_len > length) + length = 0; + else + length -= entry_len; + } + return 0; +err_out: + if (mrioc->prp_list_virt) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, + mrioc->prp_list_virt, mrioc->prp_list_dma); + mrioc->prp_list_virt = NULL; + } + return -1; +} +/** + * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler + * @job: BSG job reference + * @reply_payload_rcv_len: length of payload recvd + * + * This function is the top level handler for MPI Pass through + * command, this does basic validation of the input data buffers, + * identifies the given buffer types and MPI command, allocates + * DMAable memory for user given buffers, construstcs SGL + * properly and passes the command to the firmware. + * + * Once the MPI command is completed the driver copies the data + * if any and reply, sense information to user provided buffers. + * If the command is timed out then issues controller reset + * prior to returning. + * + * Return: 0 on success and proper error codes on failure + */ + +static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len) +{ + long rval = -EINVAL; + + struct mpi3mr_ioc *mrioc = NULL; + u8 *mpi_req = NULL, *sense_buff_k = NULL; + u8 mpi_msg_size = 0; + struct mpi3mr_bsg_packet *bsg_req = NULL; + struct mpi3mr_bsg_mptcmd *karg; + struct mpi3mr_buf_entry *buf_entries = NULL; + struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL; + u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; + u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0; + u8 block_io = 0, resp_code = 0, nvme_fmt = 0; + struct mpi3_request_header *mpi_header = NULL; + struct mpi3_status_reply_descriptor *status_desc; + struct mpi3_scsi_task_mgmt_request *tm_req; + u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen; + u16 dev_handle; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_stgt_priv_data *stgt_priv = NULL; + struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL; + u32 din_size = 0, dout_size = 0; + u8 *din_buf = NULL, *dout_buf = NULL; + u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL; + + bsg_req = job->request; + karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd; + + mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id); + if (!mrioc) + return -ENODEV; + + if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT) + karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; + + mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); + if (!mpi_req) + return -ENOMEM; + mpi_header = (struct mpi3_request_header *)mpi_req; + + bufcnt = karg->buf_entry_list.num_of_entries; + drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); + if (!drv_bufs) { + rval = -ENOMEM; + goto out; + } + + dout_buf = kzalloc(job->request_payload.payload_len, + GFP_KERNEL); + if (!dout_buf) { + rval = -ENOMEM; + goto out; + } + + din_buf = kzalloc(job->reply_payload.payload_len, + GFP_KERNEL); + if (!din_buf) { + rval = -ENOMEM; + goto out; + } + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + dout_buf, job->request_payload.payload_len); + + buf_entries = karg->buf_entry_list.buf_entry; + sgl_din_iter = din_buf; + sgl_dout_iter = dout_buf; + drv_buf_iter = drv_bufs; + + for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) { + + if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { + dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", + __func__); + rval = -EINVAL; + goto out; + } + if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { + dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", + __func__); + rval = -EINVAL; + goto out; + } + + switch (buf_entries->buf_type) { + case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD: + sgl_iter = sgl_dout_iter; + sgl_dout_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_TO_DEVICE; + is_rmcb = 1; + if (count != 0) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_FROM_DEVICE; + is_rmrb = 1; + if (count != 1 || !is_rmcb) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_DATA_IN: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_FROM_DEVICE; + din_cnt++; + din_size += drv_buf_iter->bsg_buf_len; + if ((din_cnt > 1) && !is_rmcb) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_DATA_OUT: + sgl_iter = sgl_dout_iter; + sgl_dout_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_TO_DEVICE; + dout_cnt++; + dout_size += drv_buf_iter->bsg_buf_len; + if ((dout_cnt > 1) && !is_rmcb) + invalid_be = 1; + break; + case MPI3MR_BSG_BUFTYPE_MPI_REPLY: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_NONE; + mpirep_offset = count; + break; + case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE: + sgl_iter = sgl_din_iter; + sgl_din_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_NONE; + erb_offset = count; + break; + case MPI3MR_BSG_BUFTYPE_MPI_REQUEST: + sgl_iter = sgl_dout_iter; + sgl_dout_iter += buf_entries->buf_len; + drv_buf_iter->data_dir = DMA_NONE; + mpi_msg_size = buf_entries->buf_len; + if ((!mpi_msg_size || (mpi_msg_size % 4)) || + (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { + dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", + __func__); + rval = -EINVAL; + goto out; + } + memcpy(mpi_req, sgl_iter, buf_entries->buf_len); + break; + default: + invalid_be = 1; + break; + } + if (invalid_be) { + dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", + __func__); + rval = -EINVAL; + goto out; + } + + drv_buf_iter->bsg_buf = sgl_iter; + drv_buf_iter->bsg_buf_len = buf_entries->buf_len; + + } + if (!is_rmcb && (dout_cnt || din_cnt)) { + sg_entries = dout_cnt + din_cnt; + if (((mpi_msg_size) + (sg_entries * + sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) { + dprint_bsg_err(mrioc, + "%s:%d: invalid message size passed\n", + __func__, __LINE__); + rval = -EINVAL; + goto out; + } + } + if (din_size > MPI3MR_MAX_APP_XFER_SIZE) { + dprint_bsg_err(mrioc, + "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", + __func__, __LINE__, mpi_header->function, din_size); + rval = -EINVAL; + goto out; + } + if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) { + dprint_bsg_err(mrioc, + "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", + __func__, __LINE__, mpi_header->function, dout_size); + rval = -EINVAL; + goto out; + } + + drv_buf_iter = drv_bufs; + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + + drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len; + if (is_rmcb && !count) + drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) * + sizeof(struct mpi3_sge_common)); + + if (!drv_buf_iter->kern_buf_len) + continue; + + drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev, + drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma, + GFP_KERNEL); + if (!drv_buf_iter->kern_buf) { + rval = -ENOMEM; + goto out; + } + if (drv_buf_iter->data_dir == DMA_TO_DEVICE) { + tmplen = min(drv_buf_iter->kern_buf_len, + drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); + } + } + + if (erb_offset != 0xFF) { + sense_buff_k = kzalloc(erbsz, GFP_KERNEL); + if (!sense_buff_k) { + rval = -ENOMEM; + goto out; + } + } + + if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { + rval = -ERESTARTSYS; + goto out; + } + if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { + rval = -EAGAIN; + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + if (mrioc->unrecoverable) { + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", + __func__); + rval = -EFAULT; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + rval = -EAGAIN; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + if (mrioc->stop_bsgs) { + dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); + rval = -EAGAIN; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + + if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) { + nvme_fmt = mpi3mr_get_nvme_data_fmt( + (struct mpi3_nvme_encapsulated_request *)mpi_req); + if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { + if (mpi3mr_build_nvme_prp(mrioc, + (struct mpi3_nvme_encapsulated_request *)mpi_req, + drv_bufs, bufcnt)) { + rval = -ENOMEM; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || + nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { + if (mpi3mr_build_nvme_sgl(mrioc, + (struct mpi3_nvme_encapsulated_request *)mpi_req, + drv_bufs, bufcnt)) { + rval = -EINVAL; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + } else { + dprint_bsg_err(mrioc, + "%s:invalid NVMe command format\n", __func__); + rval = -EINVAL; + mutex_unlock(&mrioc->bsg_cmds.mutex); + goto out; + } + } else { + mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size), + drv_bufs, bufcnt, is_rmcb, is_rmrb, + (dout_cnt + din_cnt)); + } + + if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) { + tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req; + if (tm_req->task_type != + MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + dev_handle = tm_req->dev_handle; + block_io = 1; + } + } + if (block_io) { + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); + if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { + stgt_priv = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; + atomic_inc(&stgt_priv->block_io); + mpi3mr_tgtdev_put(tgtdev); + } + } + + mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING; + mrioc->bsg_cmds.is_waiting = 1; + mrioc->bsg_cmds.callback = NULL; + mrioc->bsg_cmds.is_sense = 0; + mrioc->bsg_cmds.sensebuf = sense_buff_k; + memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz); + mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS); + if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) { + dprint_bsg_info(mrioc, + "%s: posting bsg request to the controller\n", __func__); + dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, + "bsg_mpi3_req"); + if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { + drv_buf_iter = &drv_bufs[0]; + dprint_dump(drv_buf_iter->kern_buf, + drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); + } + } + + init_completion(&mrioc->bsg_cmds.done); + rval = mpi3mr_admin_request_post(mrioc, mpi_req, + MPI3MR_ADMIN_REQ_FRAME_SZ, 0); + + + if (rval) { + mrioc->bsg_cmds.is_waiting = 0; + dprint_bsg_err(mrioc, + "%s: posting bsg request is failed\n", __func__); + rval = -EAGAIN; + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->bsg_cmds.done, + (karg->timeout * HZ)); + if (block_io && stgt_priv) + atomic_dec(&stgt_priv->block_io); + if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) { + mrioc->bsg_cmds.is_waiting = 0; + rval = -EAGAIN; + if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET) + goto out_unlock; + dprint_bsg_err(mrioc, + "%s: bsg request timedout after %d seconds\n", __func__, + karg->timeout); + if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) { + dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, + "bsg_mpi3_req"); + if (mpi_header->function == + MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { + drv_buf_iter = &drv_bufs[0]; + dprint_dump(drv_buf_iter->kern_buf, + drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); + } + } + + if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || + (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) + mpi3mr_issue_tm(mrioc, + MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + mpi_header->function_dependent, 0, + MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT, + &mrioc->host_tm_cmds, &resp_code, NULL); + if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) && + !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)) + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_APP_TIMEOUT, 1); + goto out_unlock; + } + dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__); + + if (mrioc->prp_list_virt) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, + mrioc->prp_list_virt, mrioc->prp_list_dma); + mrioc->prp_list_virt = NULL; + } + + if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + dprint_bsg_info(mrioc, + "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, + (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->bsg_cmds.ioc_loginfo); + } + + if ((mpirep_offset != 0xFF) && + drv_bufs[mpirep_offset].bsg_buf_len) { + drv_buf_iter = &drv_bufs[mpirep_offset]; + drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 + + mrioc->reply_sz); + bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL); + + if (!bsg_reply_buf) { + rval = -ENOMEM; + goto out_unlock; + } + if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) { + bsg_reply_buf->mpi_reply_type = + MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS; + memcpy(bsg_reply_buf->reply_buf, + mrioc->bsg_cmds.reply, mrioc->reply_sz); + } else { + bsg_reply_buf->mpi_reply_type = + MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS; + status_desc = (struct mpi3_status_reply_descriptor *) + bsg_reply_buf->reply_buf; + status_desc->ioc_status = mrioc->bsg_cmds.ioc_status; + status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo; + } + tmplen = min(drv_buf_iter->kern_buf_len, + drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen); + } + + if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf && + mrioc->bsg_cmds.is_sense) { + drv_buf_iter = &drv_bufs[erb_offset]; + tmplen = min(erbsz, drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen); + } + + drv_buf_iter = drv_bufs; + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->data_dir == DMA_NONE) + continue; + if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { + tmplen = min(drv_buf_iter->kern_buf_len, + drv_buf_iter->bsg_buf_len); + memcpy(drv_buf_iter->bsg_buf, + drv_buf_iter->kern_buf, tmplen); + } + } + +out_unlock: + if (din_buf) { + *reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + din_buf, job->reply_payload.payload_len); + } + mrioc->bsg_cmds.is_sense = 0; + mrioc->bsg_cmds.sensebuf = NULL; + mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->bsg_cmds.mutex); +out: + kfree(sense_buff_k); + kfree(dout_buf); + kfree(din_buf); + kfree(mpi_req); + if (drv_bufs) { + drv_buf_iter = drv_bufs; + for (count = 0; count < bufcnt; count++, drv_buf_iter++) { + if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma) + dma_free_coherent(&mrioc->pdev->dev, + drv_buf_iter->kern_buf_len, + drv_buf_iter->kern_buf, + drv_buf_iter->kern_buf_dma); + } + kfree(drv_bufs); + } + kfree(bsg_reply_buf); + return rval; +} + +/** + * mpi3mr_app_save_logdata - Save Log Data events + * @mrioc: Adapter instance reference + * @event_data: event data associated with log data event + * @event_data_size: event data size to copy + * + * If log data event caching is enabled by the applicatiobns, + * then this function saves the log data in the circular queue + * and Sends async signal SIGIO to indicate there is an async + * event from the firmware to the event monitoring applications. + * + * Return:Nothing + */ +void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, + u16 event_data_size) +{ + u32 index = mrioc->logdata_buf_idx, sz; + struct mpi3mr_logdata_entry *entry; + + if (!(mrioc->logdata_buf)) + return; + + entry = (struct mpi3mr_logdata_entry *) + (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz)); + entry->valid_entry = 1; + sz = min(mrioc->logdata_entry_sz, event_data_size); + memcpy(entry->data, event_data, sz); + mrioc->logdata_buf_idx = + ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES); + atomic64_inc(&event_counter); +} + +/** + * mpi3mr_bsg_request - bsg request entry point + * @job: BSG job reference + * + * This is driver's entry point for bsg requests + * + * Return: 0 on success and proper error codes on failure + */ +static int mpi3mr_bsg_request(struct bsg_job *job) +{ + long rval = -EINVAL; + unsigned int reply_payload_rcv_len = 0; + + struct mpi3mr_bsg_packet *bsg_req = job->request; + + switch (bsg_req->cmd_type) { + case MPI3MR_DRV_CMD: + rval = mpi3mr_bsg_process_drv_cmds(job); + break; + case MPI3MR_MPT_CMD: + rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len); + break; + default: + pr_err("%s: unsupported BSG command(0x%08x)\n", + MPI3MR_DRIVER_NAME, bsg_req->cmd_type); + break; + } + + bsg_job_done(job, rval, reply_payload_rcv_len); + + return 0; +} + +/** + * mpi3mr_bsg_exit - de-registration from bsg layer + * @mrioc: Adapter instance reference + * + * This will be called during driver unload and all + * bsg resources allocated during load will be freed. + * + * Return:Nothing + */ +void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc) +{ + struct device *bsg_dev = &mrioc->bsg_dev; + if (!mrioc->bsg_queue) + return; + + bsg_remove_queue(mrioc->bsg_queue); + mrioc->bsg_queue = NULL; + + device_del(bsg_dev); + put_device(bsg_dev); +} + +/** + * mpi3mr_bsg_node_release -release bsg device node + * @dev: bsg device node + * + * decrements bsg dev parent reference count + * + * Return:Nothing + */ +static void mpi3mr_bsg_node_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * mpi3mr_bsg_init - registration with bsg layer + * @mrioc: Adapter instance reference + * + * This will be called during driver load and it will + * register driver with bsg layer + * + * Return:Nothing + */ +void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) +{ + struct device *bsg_dev = &mrioc->bsg_dev; + struct device *parent = &mrioc->shost->shost_gendev; + + device_initialize(bsg_dev); + + bsg_dev->parent = get_device(parent); + bsg_dev->release = mpi3mr_bsg_node_release; + + dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id); + + if (device_add(bsg_dev)) { + ioc_err(mrioc, "%s: bsg device add failed\n", + dev_name(bsg_dev)); + put_device(bsg_dev); + return; + } + + mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), + mpi3mr_bsg_request, NULL, 0); + if (IS_ERR(mrioc->bsg_queue)) { + ioc_err(mrioc, "%s: bsg registration failed\n", + dev_name(bsg_dev)); + device_del(bsg_dev); + put_device(bsg_dev); + return; + } + + blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS); + blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS); + + return; +} + +/** + * version_fw_show - SysFS callback for firmware version read + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying firmware version + */ +static ssize_t +version_fw_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; + + return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n", + fwver->gen_major, fwver->gen_minor, fwver->ph_major, + fwver->ph_minor, fwver->cust_id, fwver->build_num); +} +static DEVICE_ATTR_RO(version_fw); + +/** + * fw_queue_depth_show - SysFS callback for firmware max cmds + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying firmware max commands + */ +static ssize_t +fw_queue_depth_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs); +} +static DEVICE_ATTR_RO(fw_queue_depth); + +/** + * op_req_q_count_show - SysFS callback for request queue count + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying request queue count + */ +static ssize_t +op_req_q_count_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q); +} +static DEVICE_ATTR_RO(op_req_q_count); + +/** + * reply_queue_count_show - SysFS callback for reply queue count + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying reply queue count + */ +static ssize_t +reply_queue_count_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q); +} + +static DEVICE_ATTR_RO(reply_queue_count); + +/** + * logging_level_show - Show controller debug level + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * A sysfs 'read/write' shost attribute, to show the current + * debug log level used by the driver for the specific + * controller. + * + * Return: sysfs_emit() return + */ +static ssize_t +logging_level_show(struct device *dev, + struct device_attribute *attr, char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%08xh\n", mrioc->logging_level); +} + +/** + * logging_level_store- Change controller debug level + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * @count: size of the buffer + * + * A sysfs 'read/write' shost attribute, to change the current + * debug log level used by the driver for the specific + * controller. + * + * Return: strlen() return + */ +static ssize_t +logging_level_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + mrioc->logging_level = val; + ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR_RW(logging_level); + +/** + * adp_state_show() - SysFS callback for adapter state show + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying adapter state + */ +static ssize_t +adp_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + enum mpi3mr_iocstate ioc_state; + uint8_t adp_state; + + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state == MRIOC_STATE_UNRECOVERABLE) + adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; + else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) + adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; + else if (ioc_state == MRIOC_STATE_FAULT) + adp_state = MPI3MR_BSG_ADPSTATE_FAULT; + else + adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; + + return sysfs_emit(buf, "%u\n", adp_state); +} + +static DEVICE_ATTR_RO(adp_state); + +static struct attribute *mpi3mr_host_attrs[] = { + &dev_attr_version_fw.attr, + &dev_attr_fw_queue_depth.attr, + &dev_attr_op_req_q_count.attr, + &dev_attr_reply_queue_count.attr, + &dev_attr_logging_level.attr, + &dev_attr_adp_state.attr, + NULL, +}; + +static const struct attribute_group mpi3mr_host_attr_group = { + .attrs = mpi3mr_host_attrs +}; + +const struct attribute_group *mpi3mr_host_groups[] = { + &mpi3mr_host_attr_group, + NULL, +}; + + +/* + * SCSI Device attributes under sysfs + */ + +/** + * sas_address_show - SysFS callback for dev SASaddress display + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying SAS address of the + * specific SAS/SATA end device. + */ +static ssize_t +sas_address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct mpi3mr_stgt_priv_data *tgt_priv_data; + struct mpi3mr_tgt_dev *tgtdev; + + sdev_priv_data = sdev->hostdata; + if (!sdev_priv_data) + return 0; + + tgt_priv_data = sdev_priv_data->tgt_priv_data; + if (!tgt_priv_data) + return 0; + tgtdev = tgt_priv_data->tgt_dev; + if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) + return 0; + return sysfs_emit(buf, "0x%016llx\n", + (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address); +} + +static DEVICE_ATTR_RO(sas_address); + +/** + * device_handle_show - SysFS callback for device handle display + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying firmware internal + * device handle of the specific device. + */ +static ssize_t +device_handle_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct mpi3mr_stgt_priv_data *tgt_priv_data; + struct mpi3mr_tgt_dev *tgtdev; + + sdev_priv_data = sdev->hostdata; + if (!sdev_priv_data) + return 0; + + tgt_priv_data = sdev_priv_data->tgt_priv_data; + if (!tgt_priv_data) + return 0; + tgtdev = tgt_priv_data->tgt_dev; + if (!tgtdev) + return 0; + return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle); +} + +static DEVICE_ATTR_RO(device_handle); + +/** + * persistent_id_show - SysFS callback for persisten ID display + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Return: sysfs_emit() return after copying persistent ID of the + * of the specific device. + */ +static ssize_t +persistent_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct mpi3mr_stgt_priv_data *tgt_priv_data; + struct mpi3mr_tgt_dev *tgtdev; + + sdev_priv_data = sdev->hostdata; + if (!sdev_priv_data) + return 0; + + tgt_priv_data = sdev_priv_data->tgt_priv_data; + if (!tgt_priv_data) + return 0; + tgtdev = tgt_priv_data->tgt_dev; + if (!tgtdev) + return 0; + return sysfs_emit(buf, "%d\n", tgtdev->perst_id); +} +static DEVICE_ATTR_RO(persistent_id); + +static struct attribute *mpi3mr_dev_attrs[] = { + &dev_attr_sas_address.attr, + &dev_attr_device_handle.attr, + &dev_attr_persistent_id.attr, + NULL, +}; + +static const struct attribute_group mpi3mr_dev_attr_group = { + .attrs = mpi3mr_dev_attrs +}; + +const struct attribute_group *mpi3mr_dev_groups[] = { + &mpi3mr_dev_attr_group, + NULL, +}; diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h new file mode 100644 index 000000000..e94f7520d --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for Broadcom MPI3 Storage Controllers + * + * Copyright (C) 2017-2023 Broadcom Inc. + * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) + * + */ + +#ifndef MPI3SAS_DEBUG_H_INCLUDED + +#define MPI3SAS_DEBUG_H_INCLUDED + +/* + * debug levels + */ + +#define MPI3_DEBUG_EVENT 0x00000001 +#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002 +#define MPI3_DEBUG_INIT 0x00000004 +#define MPI3_DEBUG_EXIT 0x00000008 +#define MPI3_DEBUG_TM 0x00000010 +#define MPI3_DEBUG_RESET 0x00000020 +#define MPI3_DEBUG_SCSI_ERROR 0x00000040 +#define MPI3_DEBUG_REPLY 0x00000080 +#define MPI3_DEBUG_CFG_ERROR 0x00000100 +#define MPI3_DEBUG_TRANSPORT_ERROR 0x00000200 +#define MPI3_DEBUG_BSG_ERROR 0x00008000 +#define MPI3_DEBUG_BSG_INFO 0x00010000 +#define MPI3_DEBUG_SCSI_INFO 0x00020000 +#define MPI3_DEBUG_CFG_INFO 0x00040000 +#define MPI3_DEBUG_TRANSPORT_INFO 0x00080000 +#define MPI3_DEBUG 0x01000000 +#define MPI3_DEBUG_SG 0x02000000 + + +/* + * debug macros + */ + +#define ioc_err(ioc, fmt, ...) \ + pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_notice(ioc, fmt, ...) \ + pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_warn(ioc, fmt, ...) \ + pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_info(ioc, fmt, ...) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__) + +#define dprint(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_event_th(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_EVENT) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_event_bh(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_init(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_INIT) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_exit(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_EXIT) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_tm(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_TM) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_reply(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_REPLY) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_reset(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_RESET) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_scsi_info(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_scsi_err(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \ + do { \ + if (ioc->logging_level & LOG_LEVEL) \ + scsi_print_command(SCMD); \ + } while (0) + + +#define dprint_bsg_info(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_BSG_INFO) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_bsg_err(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_BSG_ERROR) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_cfg_info(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_CFG_INFO) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_cfg_err(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_CFG_ERROR) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) +#define dprint_transport_info(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#define dprint_transport_err(ioc, fmt, ...) \ + do { \ + if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_ERROR) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \ + } while (0) + +#endif /* MPT3SAS_DEBUG_H_INCLUDED */ + +/** + * dprint_dump - print contents of a memory buffer + * @req: Pointer to a memory buffer + * @sz: Memory buffer size + * @namestr: Name String to identify the buffer type + */ +static inline void +dprint_dump(void *req, int sz, const char *name_string) +{ + int i; + __le32 *mfp = (__le32 *)req; + + sz = sz/4; + if (name_string) + pr_info("%s:\n\t", name_string); + else + pr_info("request:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +/** + * dprint_dump_req - print message frame contents + * @req: pointer to message frame + * @sz: number of dwords + */ +static inline void +dprint_dump_req(void *req, int sz) +{ + int i; + __le32 *mfp = (__le32 *)req; + + pr_info("request:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c new file mode 100644 index 000000000..f039f1d98 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -0,0 +1,5832 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Broadcom MPI3 Storage Controllers + * + * Copyright (C) 2017-2023 Broadcom Inc. + * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) + * + */ + +#include "mpi3mr.h" +#include + +static int +mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); +static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); +static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, + struct mpi3_ioc_facts_data *facts_data); +static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd); + +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); + +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) +{ + writeq(b, addr); +} +#else +static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) +{ + __u64 data_out = b; + + writel((u32)(data_out), addr); + writel((u32)(data_out >> 32), (addr + 4)); +} +#endif + +static inline bool +mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) +{ + u16 pi, ci, max_entries; + bool is_qfull = false; + + pi = op_req_q->pi; + ci = READ_ONCE(op_req_q->ci); + max_entries = op_req_q->num_requests; + + if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) + is_qfull = true; + + return is_qfull; +} + +static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) +{ + u16 i, max_vectors; + + max_vectors = mrioc->intr_info_count; + + for (i = 0; i < max_vectors; i++) + synchronize_irq(pci_irq_vector(mrioc->pdev, i)); +} + +void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) +{ + mrioc->intr_enabled = 0; + mpi3mr_sync_irqs(mrioc); +} + +void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) +{ + mrioc->intr_enabled = 1; +} + +static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) +{ + u16 i; + + mpi3mr_ioc_disable_intr(mrioc); + + if (!mrioc->intr_info) + return; + + for (i = 0; i < mrioc->intr_info_count; i++) + free_irq(pci_irq_vector(mrioc->pdev, i), + (mrioc->intr_info + i)); + + kfree(mrioc->intr_info); + mrioc->intr_info = NULL; + mrioc->intr_info_count = 0; + mrioc->is_intr_info_set = false; + pci_free_irq_vectors(mrioc->pdev); +} + +void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, + dma_addr_t dma_addr) +{ + struct mpi3_sge_common *sgel = paddr; + + sgel->flags = flags; + sgel->length = cpu_to_le32(length); + sgel->address = cpu_to_le64(dma_addr); +} + +void mpi3mr_build_zero_len_sge(void *paddr) +{ + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + + mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); +} + +void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, + dma_addr_t phys_addr) +{ + if (!phys_addr) + return NULL; + + if ((phys_addr < mrioc->reply_buf_dma) || + (phys_addr > mrioc->reply_buf_dma_max_address)) + return NULL; + + return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); +} + +void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, + dma_addr_t phys_addr) +{ + if (!phys_addr) + return NULL; + + return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); +} + +static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, + u64 reply_dma) +{ + u32 old_idx = 0; + unsigned long flags; + + spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); + old_idx = mrioc->reply_free_queue_host_index; + mrioc->reply_free_queue_host_index = ( + (mrioc->reply_free_queue_host_index == + (mrioc->reply_free_qsz - 1)) ? 0 : + (mrioc->reply_free_queue_host_index + 1)); + mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); + writel(mrioc->reply_free_queue_host_index, + &mrioc->sysif_regs->reply_free_host_index); + spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); +} + +void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, + u64 sense_buf_dma) +{ + u32 old_idx = 0; + unsigned long flags; + + spin_lock_irqsave(&mrioc->sbq_lock, flags); + old_idx = mrioc->sbq_host_index; + mrioc->sbq_host_index = ((mrioc->sbq_host_index == + (mrioc->sense_buf_q_sz - 1)) ? 0 : + (mrioc->sbq_host_index + 1)); + mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); + writel(mrioc->sbq_host_index, + &mrioc->sysif_regs->sense_buffer_free_host_index); + spin_unlock_irqrestore(&mrioc->sbq_lock, flags); +} + +static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + char *desc = NULL; + u16 event; + + event = event_reply->event; + + switch (event) { + case MPI3_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case MPI3_EVENT_CHANGE: + desc = "Event Change"; + break; + case MPI3_EVENT_GPIO_INTERRUPT: + desc = "GPIO Interrupt"; + break; + case MPI3_EVENT_CABLE_MGMT: + desc = "Cable Management"; + break; + case MPI3_EVENT_ENERGY_PACK_CHANGE: + desc = "Energy Pack Change"; + break; + case MPI3_EVENT_DEVICE_ADDED: + { + struct mpi3_device_page0 *event_data = + (struct mpi3_device_page0 *)event_reply->event_data; + ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", + event_data->dev_handle, event_data->device_form); + return; + } + case MPI3_EVENT_DEVICE_INFO_CHANGED: + { + struct mpi3_device_page0 *event_data = + (struct mpi3_device_page0 *)event_reply->event_data; + ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", + event_data->dev_handle, event_data->device_form); + return; + } + case MPI3_EVENT_DEVICE_STATUS_CHANGE: + { + struct mpi3_event_data_device_status_change *event_data = + (struct mpi3_event_data_device_status_change *)event_reply->event_data; + ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", + event_data->dev_handle, event_data->reason_code); + return; + } + case MPI3_EVENT_SAS_DISCOVERY: + { + struct mpi3_event_data_sas_discovery *event_data = + (struct mpi3_event_data_sas_discovery *)event_reply->event_data; + ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", + (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? + "start" : "stop", + le32_to_cpu(event_data->discovery_status)); + return; + } + case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: + desc = "SAS Notify Primitive"; + break; + case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: + desc = "Enclosure Device Status Change"; + break; + case MPI3_EVENT_ENCL_DEVICE_ADDED: + desc = "Enclosure Added"; + break; + case MPI3_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case MPI3_EVENT_SAS_PHY_COUNTER: + desc = "SAS PHY Counter"; + break; + case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + desc = "SAS Device Discovery Error"; + break; + case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + desc = "PCIE Topology Change List"; + break; + case MPI3_EVENT_PCIE_ENUMERATION: + { + struct mpi3_event_data_pcie_enumeration *event_data = + (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; + ioc_info(mrioc, "PCIE Enumeration: (%s)", + (event_data->reason_code == + MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); + if (event_data->enumeration_status) + ioc_info(mrioc, "enumeration_status(0x%08x)\n", + le32_to_cpu(event_data->enumeration_status)); + return; + } + case MPI3_EVENT_PREPARE_FOR_RESET: + desc = "Prepare For Reset"; + break; + } + + if (!desc) + return; + + ioc_info(mrioc, "%s\n", desc); +} + +static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, + struct mpi3_default_reply *def_reply) +{ + struct mpi3_event_notification_reply *event_reply = + (struct mpi3_event_notification_reply *)def_reply; + + mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); + mpi3mr_print_event_data(mrioc, event_reply); + mpi3mr_os_handle_events(mrioc, event_reply); +} + +static struct mpi3mr_drv_cmd * +mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, + struct mpi3_default_reply *def_reply) +{ + u16 idx; + + switch (host_tag) { + case MPI3MR_HOSTTAG_INITCMDS: + return &mrioc->init_cmds; + case MPI3MR_HOSTTAG_CFG_CMDS: + return &mrioc->cfg_cmds; + case MPI3MR_HOSTTAG_BSG_CMDS: + return &mrioc->bsg_cmds; + case MPI3MR_HOSTTAG_BLK_TMS: + return &mrioc->host_tm_cmds; + case MPI3MR_HOSTTAG_PEL_ABORT: + return &mrioc->pel_abort_cmd; + case MPI3MR_HOSTTAG_PEL_WAIT: + return &mrioc->pel_cmds; + case MPI3MR_HOSTTAG_TRANSPORT_CMDS: + return &mrioc->transport_cmds; + case MPI3MR_HOSTTAG_INVALID: + if (def_reply && def_reply->function == + MPI3_FUNCTION_EVENT_NOTIFICATION) + mpi3mr_handle_events(mrioc, def_reply); + return NULL; + default: + break; + } + if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && + host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { + idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; + return &mrioc->dev_rmhs_cmds[idx]; + } + + if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && + host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { + idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; + return &mrioc->evtack_cmds[idx]; + } + + return NULL; +} + +static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, + struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) +{ + u16 reply_desc_type, host_tag = 0; + u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; + u32 ioc_loginfo = 0; + struct mpi3_status_reply_descriptor *status_desc; + struct mpi3_address_reply_descriptor *addr_desc; + struct mpi3_success_reply_descriptor *success_desc; + struct mpi3_default_reply *def_reply = NULL; + struct mpi3mr_drv_cmd *cmdptr = NULL; + struct mpi3_scsi_io_reply *scsi_reply; + u8 *sense_buf = NULL; + + *reply_dma = 0; + reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & + MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; + switch (reply_desc_type) { + case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: + status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; + host_tag = le16_to_cpu(status_desc->host_tag); + ioc_status = le16_to_cpu(status_desc->ioc_status); + if (ioc_status & + MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) + ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); + ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + break; + case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: + addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; + *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); + def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); + if (!def_reply) + goto out; + host_tag = le16_to_cpu(def_reply->host_tag); + ioc_status = le16_to_cpu(def_reply->ioc_status); + if (ioc_status & + MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) + ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); + ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { + scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; + sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, + le64_to_cpu(scsi_reply->sense_data_buffer_address)); + } + break; + case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: + success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; + host_tag = le16_to_cpu(success_desc->host_tag); + break; + default: + break; + } + + cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); + if (cmdptr) { + if (cmdptr->state & MPI3MR_CMD_PENDING) { + cmdptr->state |= MPI3MR_CMD_COMPLETE; + cmdptr->ioc_loginfo = ioc_loginfo; + cmdptr->ioc_status = ioc_status; + cmdptr->state &= ~MPI3MR_CMD_PENDING; + if (def_reply) { + cmdptr->state |= MPI3MR_CMD_REPLY_VALID; + memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, + mrioc->reply_sz); + } + if (sense_buf && cmdptr->sensebuf) { + cmdptr->is_sense = 1; + memcpy(cmdptr->sensebuf, sense_buf, + MPI3MR_SENSE_BUF_SZ); + } + if (cmdptr->is_waiting) { + complete(&cmdptr->done); + cmdptr->is_waiting = 0; + } else if (cmdptr->callback) + cmdptr->callback(mrioc, cmdptr); + } + } +out: + if (sense_buf) + mpi3mr_repost_sense_buf(mrioc, + le64_to_cpu(scsi_reply->sense_data_buffer_address)); +} + +int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) +{ + u32 exp_phase = mrioc->admin_reply_ephase; + u32 admin_reply_ci = mrioc->admin_reply_ci; + u32 num_admin_replies = 0; + u64 reply_dma = 0; + struct mpi3_default_reply_descriptor *reply_desc; + + if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) + return 0; + + reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + + admin_reply_ci; + + if ((le16_to_cpu(reply_desc->reply_flags) & + MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { + atomic_dec(&mrioc->admin_reply_q_in_use); + return 0; + } + + do { + if (mrioc->unrecoverable) + break; + + mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); + mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); + if (reply_dma) + mpi3mr_repost_reply_buf(mrioc, reply_dma); + num_admin_replies++; + if (++admin_reply_ci == mrioc->num_admin_replies) { + admin_reply_ci = 0; + exp_phase ^= 1; + } + reply_desc = + (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + + admin_reply_ci; + if ((le16_to_cpu(reply_desc->reply_flags) & + MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) + break; + } while (1); + + writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); + mrioc->admin_reply_ci = admin_reply_ci; + mrioc->admin_reply_ephase = exp_phase; + atomic_dec(&mrioc->admin_reply_q_in_use); + + return num_admin_replies; +} + +/** + * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to + * queue's consumer index from operational reply descriptor queue. + * @op_reply_q: op_reply_qinfo object + * @reply_ci: operational reply descriptor's queue consumer index + * + * Returns reply descriptor frame address + */ +static inline struct mpi3_default_reply_descriptor * +mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) +{ + void *segment_base_addr; + struct segments *segments = op_reply_q->q_segments; + struct mpi3_default_reply_descriptor *reply_desc = NULL; + + segment_base_addr = + segments[reply_ci / op_reply_q->segment_qd].segment; + reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + + (reply_ci % op_reply_q->segment_qd); + return reply_desc; +} + +/** + * mpi3mr_process_op_reply_q - Operational reply queue handler + * @mrioc: Adapter instance reference + * @op_reply_q: Operational reply queue info + * + * Checks the specific operational reply queue and drains the + * reply queue entries until the queue is empty and process the + * individual reply descriptors. + * + * Return: 0 if queue is already processed,or number of reply + * descriptors processed. + */ +int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, + struct op_reply_qinfo *op_reply_q) +{ + struct op_req_qinfo *op_req_q; + u32 exp_phase; + u32 reply_ci; + u32 num_op_reply = 0; + u64 reply_dma = 0; + struct mpi3_default_reply_descriptor *reply_desc; + u16 req_q_idx = 0, reply_qidx; + + reply_qidx = op_reply_q->qid - 1; + + if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) + return 0; + + exp_phase = op_reply_q->ephase; + reply_ci = op_reply_q->ci; + + reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); + if ((le16_to_cpu(reply_desc->reply_flags) & + MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { + atomic_dec(&op_reply_q->in_use); + return 0; + } + + do { + if (mrioc->unrecoverable) + break; + + req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; + op_req_q = &mrioc->req_qinfo[req_q_idx]; + + WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); + mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, + reply_qidx); + atomic_dec(&op_reply_q->pend_ios); + if (reply_dma) + mpi3mr_repost_reply_buf(mrioc, reply_dma); + num_op_reply++; + + if (++reply_ci == op_reply_q->num_replies) { + reply_ci = 0; + exp_phase ^= 1; + } + + reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); + + if ((le16_to_cpu(reply_desc->reply_flags) & + MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) + break; +#ifndef CONFIG_PREEMPT_RT + /* + * Exit completion loop to avoid CPU lockup + * Ensure remaining completion happens from threaded ISR. + */ + if (num_op_reply > mrioc->max_host_ios) { + op_reply_q->enable_irq_poll = true; + break; + } +#endif + } while (1); + + writel(reply_ci, + &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); + op_reply_q->ci = reply_ci; + op_reply_q->ephase = exp_phase; + + atomic_dec(&op_reply_q->in_use); + return num_op_reply; +} + +/** + * mpi3mr_blk_mq_poll - Operational reply queue handler + * @shost: SCSI Host reference + * @queue_num: Request queue number (w.r.t OS it is hardware context number) + * + * Checks the specific operational reply queue and drains the + * reply queue entries until the queue is empty and process the + * individual reply descriptors. + * + * Return: 0 if queue is already processed,or number of reply + * descriptors processed. + */ +int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + int num_entries = 0; + struct mpi3mr_ioc *mrioc; + + mrioc = (struct mpi3mr_ioc *)shost->hostdata; + + if ((mrioc->reset_in_progress || mrioc->prepare_for_reset || + mrioc->unrecoverable)) + return 0; + + num_entries = mpi3mr_process_op_reply_q(mrioc, + &mrioc->op_reply_qinfo[queue_num]); + + return num_entries; +} + +static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) +{ + struct mpi3mr_intr_info *intr_info = privdata; + struct mpi3mr_ioc *mrioc; + u16 midx; + u32 num_admin_replies = 0, num_op_reply = 0; + + if (!intr_info) + return IRQ_NONE; + + mrioc = intr_info->mrioc; + + if (!mrioc->intr_enabled) + return IRQ_NONE; + + midx = intr_info->msix_index; + + if (!midx) + num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); + if (intr_info->op_reply_q) + num_op_reply = mpi3mr_process_op_reply_q(mrioc, + intr_info->op_reply_q); + + if (num_admin_replies || num_op_reply) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +#ifndef CONFIG_PREEMPT_RT + +static irqreturn_t mpi3mr_isr(int irq, void *privdata) +{ + struct mpi3mr_intr_info *intr_info = privdata; + int ret; + + if (!intr_info) + return IRQ_NONE; + + /* Call primary ISR routine */ + ret = mpi3mr_isr_primary(irq, privdata); + + /* + * If more IOs are expected, schedule IRQ polling thread. + * Otherwise exit from ISR. + */ + if (!intr_info->op_reply_q) + return ret; + + if (!intr_info->op_reply_q->enable_irq_poll || + !atomic_read(&intr_info->op_reply_q->pend_ios)) + return ret; + + disable_irq_nosync(intr_info->os_irq); + + return IRQ_WAKE_THREAD; +} + +/** + * mpi3mr_isr_poll - Reply queue polling routine + * @irq: IRQ + * @privdata: Interrupt info + * + * poll for pending I/O completions in a loop until pending I/Os + * present or controller queue depth I/Os are processed. + * + * Return: IRQ_NONE or IRQ_HANDLED + */ +static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) +{ + struct mpi3mr_intr_info *intr_info = privdata; + struct mpi3mr_ioc *mrioc; + u16 midx; + u32 num_op_reply = 0; + + if (!intr_info || !intr_info->op_reply_q) + return IRQ_NONE; + + mrioc = intr_info->mrioc; + midx = intr_info->msix_index; + + /* Poll for pending IOs completions */ + do { + if (!mrioc->intr_enabled || mrioc->unrecoverable) + break; + + if (!midx) + mpi3mr_process_admin_reply_q(mrioc); + if (intr_info->op_reply_q) + num_op_reply += + mpi3mr_process_op_reply_q(mrioc, + intr_info->op_reply_q); + + usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); + + } while (atomic_read(&intr_info->op_reply_q->pend_ios) && + (num_op_reply < mrioc->max_host_ios)); + + intr_info->op_reply_q->enable_irq_poll = false; + enable_irq(intr_info->os_irq); + + return IRQ_HANDLED; +} + +#endif + +/** + * mpi3mr_request_irq - Request IRQ and register ISR + * @mrioc: Adapter instance reference + * @index: IRQ vector index + * + * Request threaded ISR with primary ISR and secondary + * + * Return: 0 on success and non zero on failures. + */ +static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) +{ + struct pci_dev *pdev = mrioc->pdev; + struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; + int retval = 0; + + intr_info->mrioc = mrioc; + intr_info->msix_index = index; + intr_info->op_reply_q = NULL; + + snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", + mrioc->driver_name, mrioc->id, index); + +#ifndef CONFIG_PREEMPT_RT + retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, + mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); +#else + retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary, + NULL, IRQF_SHARED, intr_info->name, intr_info); +#endif + if (retval) { + ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", + intr_info->name, pci_irq_vector(pdev, index)); + return retval; + } + + intr_info->os_irq = pci_irq_vector(pdev, index); + return retval; +} + +static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) +{ + if (!mrioc->requested_poll_qcount) + return; + + /* Reserved for Admin and Default Queue */ + if (max_vectors > 2 && + (mrioc->requested_poll_qcount < max_vectors - 2)) { + ioc_info(mrioc, + "enabled polled queues (%d) msix (%d)\n", + mrioc->requested_poll_qcount, max_vectors); + } else { + ioc_info(mrioc, + "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", + mrioc->requested_poll_qcount, max_vectors); + mrioc->requested_poll_qcount = 0; + } +} + +/** + * mpi3mr_setup_isr - Setup ISR for the controller + * @mrioc: Adapter instance reference + * @setup_one: Request one IRQ or more + * + * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR + * + * Return: 0 on success and non zero on failures. + */ +static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) +{ + unsigned int irq_flags = PCI_IRQ_MSIX; + int max_vectors, min_vec; + int retval; + int i; + struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; + + if (mrioc->is_intr_info_set) + return 0; + + mpi3mr_cleanup_isr(mrioc); + + if (setup_one || reset_devices) { + max_vectors = 1; + retval = pci_alloc_irq_vectors(mrioc->pdev, + 1, max_vectors, irq_flags); + if (retval < 0) { + ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", + retval); + goto out_failed; + } + } else { + max_vectors = + min_t(int, mrioc->cpu_count + 1 + + mrioc->requested_poll_qcount, mrioc->msix_count); + + mpi3mr_calc_poll_queues(mrioc, max_vectors); + + ioc_info(mrioc, + "MSI-X vectors supported: %d, no of cores: %d,", + mrioc->msix_count, mrioc->cpu_count); + ioc_info(mrioc, + "MSI-x vectors requested: %d poll_queues %d\n", + max_vectors, mrioc->requested_poll_qcount); + + desc.post_vectors = mrioc->requested_poll_qcount; + min_vec = desc.pre_vectors + desc.post_vectors; + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + + retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, + min_vec, max_vectors, irq_flags, &desc); + + if (retval < 0) { + ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", + retval); + goto out_failed; + } + + + /* + * If only one MSI-x is allocated, then MSI-x 0 will be shared + * between Admin queue and operational queue + */ + if (retval == min_vec) + mrioc->op_reply_q_offset = 0; + else if (retval != (max_vectors)) { + ioc_info(mrioc, + "allocated vectors (%d) are less than configured (%d)\n", + retval, max_vectors); + } + + max_vectors = retval; + mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; + + mpi3mr_calc_poll_queues(mrioc, max_vectors); + + } + + mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, + GFP_KERNEL); + if (!mrioc->intr_info) { + retval = -ENOMEM; + pci_free_irq_vectors(mrioc->pdev); + goto out_failed; + } + for (i = 0; i < max_vectors; i++) { + retval = mpi3mr_request_irq(mrioc, i); + if (retval) { + mrioc->intr_info_count = i; + goto out_failed; + } + } + if (reset_devices || !setup_one) + mrioc->is_intr_info_set = true; + mrioc->intr_info_count = max_vectors; + mpi3mr_ioc_enable_intr(mrioc); + return 0; + +out_failed: + mpi3mr_cleanup_isr(mrioc); + + return retval; +} + +static const struct { + enum mpi3mr_iocstate value; + char *name; +} mrioc_states[] = { + { MRIOC_STATE_READY, "ready" }, + { MRIOC_STATE_FAULT, "fault" }, + { MRIOC_STATE_RESET, "reset" }, + { MRIOC_STATE_BECOMING_READY, "becoming ready" }, + { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, + { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, +}; + +static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { + if (mrioc_states[i].value == mrioc_state) { + name = mrioc_states[i].name; + break; + } + } + return name; +} + +/* Reset reason to name mapper structure*/ +static const struct { + enum mpi3mr_reset_reason value; + char *name; +} mpi3mr_reset_reason_codes[] = { + { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, + { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, + { MPI3MR_RESET_FROM_APP, "application invocation" }, + { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, + { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, + { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" }, + { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, + { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, + { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, + { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, + { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, + { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, + { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, + { + MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, + "create request queue timeout" + }, + { + MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, + "create reply queue timeout" + }, + { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, + { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, + { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, + { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, + { + MPI3MR_RESET_FROM_CIACTVRST_TIMER, + "component image activation timeout" + }, + { + MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, + "get package version timeout" + }, + { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, + { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, + { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, + { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, + { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, +}; + +/** + * mpi3mr_reset_rc_name - get reset reason code name + * @reason_code: reset reason code value + * + * Map reset reason to an NULL terminated ASCII string + * + * Return: name corresponding to reset reason value or NULL. + */ +static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { + if (mpi3mr_reset_reason_codes[i].value == reason_code) { + name = mpi3mr_reset_reason_codes[i].name; + break; + } + } + return name; +} + +/* Reset type to name mapper structure*/ +static const struct { + u16 reset_type; + char *name; +} mpi3mr_reset_types[] = { + { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, + { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, +}; + +/** + * mpi3mr_reset_type_name - get reset type name + * @reset_type: reset type value + * + * Map reset type to an NULL terminated ASCII string + * + * Return: name corresponding to reset type value or NULL. + */ +static const char *mpi3mr_reset_type_name(u16 reset_type) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { + if (mpi3mr_reset_types[i].reset_type == reset_type) { + name = mpi3mr_reset_types[i].name; + break; + } + } + return name; +} + +/** + * mpi3mr_print_fault_info - Display fault information + * @mrioc: Adapter instance reference + * + * Display the controller fault information if there is a + * controller fault. + * + * Return: Nothing. + */ +void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) +{ + u32 ioc_status, code, code1, code2, code3; + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + + if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { + code = readl(&mrioc->sysif_regs->fault); + code1 = readl(&mrioc->sysif_regs->fault_info[0]); + code2 = readl(&mrioc->sysif_regs->fault_info[1]); + code3 = readl(&mrioc->sysif_regs->fault_info[2]); + + ioc_info(mrioc, + "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", + code, code1, code2, code3); + } +} + +/** + * mpi3mr_get_iocstate - Get IOC State + * @mrioc: Adapter instance reference + * + * Return a proper IOC state enum based on the IOC status and + * IOC configuration and unrcoverable state of the controller. + * + * Return: Current IOC state. + */ +enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) +{ + u32 ioc_status, ioc_config; + u8 ready, enabled; + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + + if (mrioc->unrecoverable) + return MRIOC_STATE_UNRECOVERABLE; + if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) + return MRIOC_STATE_FAULT; + + ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); + enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); + + if (ready && enabled) + return MRIOC_STATE_READY; + if ((!ready) && (!enabled)) + return MRIOC_STATE_RESET; + if ((!ready) && (enabled)) + return MRIOC_STATE_BECOMING_READY; + + return MRIOC_STATE_RESET_REQUESTED; +} + +/** + * mpi3mr_clear_reset_history - clear reset history + * @mrioc: Adapter instance reference + * + * Write the reset history bit in IOC status to clear the bit, + * if it is already set. + * + * Return: Nothing. + */ +static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) +{ + u32 ioc_status; + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) + writel(ioc_status, &mrioc->sysif_regs->ioc_status); +} + +/** + * mpi3mr_issue_and_process_mur - Message unit Reset handler + * @mrioc: Adapter instance reference + * @reset_reason: Reset reason code + * + * Issue Message unit Reset to the controller and wait for it to + * be complete. + * + * Return: 0 on success, -1 on failure. + */ +static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, + u32 reset_reason) +{ + u32 ioc_config, timeout, ioc_status; + int retval = -1; + + ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); + if (mrioc->unrecoverable) { + ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); + return retval; + } + mpi3mr_clear_reset_history(mrioc); + writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; + writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); + + timeout = MPI3MR_MUR_TIMEOUT * 10; + do { + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { + mpi3mr_clear_reset_history(mrioc); + break; + } + if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { + mpi3mr_print_fault_info(mrioc); + break; + } + msleep(100); + } while (--timeout); + + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || + (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) + retval = 0; + + ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", + (!retval) ? "successful" : "failed", ioc_status, ioc_config); + return retval; +} + +/** + * mpi3mr_revalidate_factsdata - validate IOCFacts parameters + * during reset/resume + * @mrioc: Adapter instance reference + * + * Return zero if the new IOCFacts parameters value is compatible with + * older values else return -EPERM + */ +static int +mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) +{ + unsigned long *removepend_bitmap; + + if (mrioc->facts.reply_sz > mrioc->reply_sz) { + ioc_err(mrioc, + "cannot increase reply size from %d to %d\n", + mrioc->reply_sz, mrioc->facts.reply_sz); + return -EPERM; + } + + if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { + ioc_err(mrioc, + "cannot reduce number of operational reply queues from %d to %d\n", + mrioc->num_op_reply_q, + mrioc->facts.max_op_reply_q); + return -EPERM; + } + + if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { + ioc_err(mrioc, + "cannot reduce number of operational request queues from %d to %d\n", + mrioc->num_op_req_q, mrioc->facts.max_op_req_q); + return -EPERM; + } + + if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512)) + ioc_err(mrioc, "Warning: The maximum data transfer length\n" + "\tchanged after reset: previous(%d), new(%d),\n" + "the driver cannot change this at run time\n", + mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length); + + if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities & + MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) + ioc_err(mrioc, + "critical error: multipath capability is enabled at the\n" + "\tcontroller while sas transport support is enabled at the\n" + "\tdriver, please reboot the system or reload the driver\n"); + + if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) { + removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle, + GFP_KERNEL); + if (!removepend_bitmap) { + ioc_err(mrioc, + "failed to increase removepend_bitmap bits from %d to %d\n", + mrioc->dev_handle_bitmap_bits, + mrioc->facts.max_devhandle); + return -EPERM; + } + bitmap_free(mrioc->removepend_bitmap); + mrioc->removepend_bitmap = removepend_bitmap; + ioc_info(mrioc, + "increased bits of dev_handle_bitmap from %d to %d\n", + mrioc->dev_handle_bitmap_bits, + mrioc->facts.max_devhandle); + mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; + } + + return 0; +} + +/** + * mpi3mr_bring_ioc_ready - Bring controller to ready state + * @mrioc: Adapter instance reference + * + * Set Enable IOC bit in IOC configuration register and wait for + * the controller to become ready. + * + * Return: 0 on success, appropriate error on failure. + */ +static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) +{ + u32 ioc_config, ioc_status, timeout, host_diagnostic; + int retval = 0; + enum mpi3mr_iocstate ioc_state; + u64 base_info; + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); + ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", + ioc_status, ioc_config, base_info); + + /*The timeout value is in 2sec unit, changing it to seconds*/ + mrioc->ready_timeout = + ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> + MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; + + ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); + + ioc_state = mpi3mr_get_iocstate(mrioc); + ioc_info(mrioc, "controller is in %s state during detection\n", + mpi3mr_iocstate_name(ioc_state)); + + if (ioc_state == MRIOC_STATE_BECOMING_READY || + ioc_state == MRIOC_STATE_RESET_REQUESTED) { + timeout = mrioc->ready_timeout * 10; + do { + msleep(100); + } while (--timeout); + + if (!pci_device_is_present(mrioc->pdev)) { + mrioc->unrecoverable = 1; + ioc_err(mrioc, + "controller is not present while waiting to reset\n"); + retval = -1; + goto out_device_not_present; + } + + ioc_state = mpi3mr_get_iocstate(mrioc); + ioc_info(mrioc, + "controller is in %s state after waiting to reset\n", + mpi3mr_iocstate_name(ioc_state)); + } + + if (ioc_state == MRIOC_STATE_READY) { + ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); + retval = mpi3mr_issue_and_process_mur(mrioc, + MPI3MR_RESET_FROM_BRINGUP); + ioc_state = mpi3mr_get_iocstate(mrioc); + if (retval) + ioc_err(mrioc, + "message unit reset failed with error %d current state %s\n", + retval, mpi3mr_iocstate_name(ioc_state)); + } + if (ioc_state != MRIOC_STATE_RESET) { + if (ioc_state == MRIOC_STATE_FAULT) { + timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; + mpi3mr_print_fault_info(mrioc); + do { + host_diagnostic = + readl(&mrioc->sysif_regs->host_diagnostic); + if (!(host_diagnostic & + MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) + break; + if (!pci_device_is_present(mrioc->pdev)) { + mrioc->unrecoverable = 1; + ioc_err(mrioc, "controller is not present at the bringup\n"); + goto out_device_not_present; + } + msleep(100); + } while (--timeout); + } + mpi3mr_print_fault_info(mrioc); + ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); + retval = mpi3mr_issue_reset(mrioc, + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, + MPI3MR_RESET_FROM_BRINGUP); + if (retval) { + ioc_err(mrioc, + "soft reset failed with error %d\n", retval); + goto out_failed; + } + } + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state != MRIOC_STATE_RESET) { + ioc_err(mrioc, + "cannot bring controller to reset state, current state: %s\n", + mpi3mr_iocstate_name(ioc_state)); + goto out_failed; + } + mpi3mr_clear_reset_history(mrioc); + retval = mpi3mr_setup_admin_qpair(mrioc); + if (retval) { + ioc_err(mrioc, "failed to setup admin queues: error %d\n", + retval); + goto out_failed; + } + + ioc_info(mrioc, "bringing controller to ready state\n"); + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; + writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); + + timeout = mrioc->ready_timeout * 10; + do { + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state == MRIOC_STATE_READY) { + ioc_info(mrioc, + "successfully transitioned to %s state\n", + mpi3mr_iocstate_name(ioc_state)); + return 0; + } + if (!pci_device_is_present(mrioc->pdev)) { + mrioc->unrecoverable = 1; + ioc_err(mrioc, + "controller is not present at the bringup\n"); + retval = -1; + goto out_device_not_present; + } + msleep(100); + } while (--timeout); + +out_failed: + ioc_state = mpi3mr_get_iocstate(mrioc); + ioc_err(mrioc, + "failed to bring to ready state, current state: %s\n", + mpi3mr_iocstate_name(ioc_state)); +out_device_not_present: + return retval; +} + +/** + * mpi3mr_soft_reset_success - Check softreset is success or not + * @ioc_status: IOC status register value + * @ioc_config: IOC config register value + * + * Check whether the soft reset is successful or not based on + * IOC status and IOC config register values. + * + * Return: True when the soft reset is success, false otherwise. + */ +static inline bool +mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) +{ + if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || + (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) + return true; + return false; +} + +/** + * mpi3mr_diagfault_success - Check diag fault is success or not + * @mrioc: Adapter reference + * @ioc_status: IOC status register value + * + * Check whether the controller hit diag reset fault code. + * + * Return: True when there is diag fault, false otherwise. + */ +static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, + u32 ioc_status) +{ + u32 fault; + + if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) + return false; + fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; + if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { + mpi3mr_print_fault_info(mrioc); + return true; + } + return false; +} + +/** + * mpi3mr_set_diagsave - Set diag save bit for snapdump + * @mrioc: Adapter reference + * + * Set diag save bit in IOC configuration register to enable + * snapdump. + * + * Return: Nothing. + */ +static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) +{ + u32 ioc_config; + + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; + writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); +} + +/** + * mpi3mr_issue_reset - Issue reset to the controller + * @mrioc: Adapter reference + * @reset_type: Reset type + * @reset_reason: Reset reason code + * + * Unlock the host diagnostic registers and write the specific + * reset type to that, wait for reset acknowledgment from the + * controller, if the reset is not successful retry for the + * predefined number of times. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, + u32 reset_reason) +{ + int retval = -1; + u8 unlock_retry_count = 0; + u32 host_diagnostic, ioc_status, ioc_config; + u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; + + if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && + (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) + return retval; + if (mrioc->unrecoverable) + return retval; + if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { + retval = 0; + return retval; + } + + ioc_info(mrioc, "%s reset due to %s(0x%x)\n", + mpi3mr_reset_type_name(reset_type), + mpi3mr_reset_rc_name(reset_reason), reset_reason); + + mpi3mr_clear_reset_history(mrioc); + do { + ioc_info(mrioc, + "Write magic sequence to unlock host diag register (retry=%d)\n", + ++unlock_retry_count); + if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { + ioc_err(mrioc, + "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", + mpi3mr_reset_type_name(reset_type), + host_diagnostic); + mrioc->unrecoverable = 1; + return retval; + } + + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, + &mrioc->sysif_regs->write_sequence); + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, + &mrioc->sysif_regs->write_sequence); + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, + &mrioc->sysif_regs->write_sequence); + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, + &mrioc->sysif_regs->write_sequence); + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, + &mrioc->sysif_regs->write_sequence); + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, + &mrioc->sysif_regs->write_sequence); + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, + &mrioc->sysif_regs->write_sequence); + usleep_range(1000, 1100); + host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); + ioc_info(mrioc, + "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", + unlock_retry_count, host_diagnostic); + } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); + + writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); + writel(host_diagnostic | reset_type, + &mrioc->sysif_regs->host_diagnostic); + switch (reset_type) { + case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: + do { + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + ioc_config = + readl(&mrioc->sysif_regs->ioc_configuration); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) + && mpi3mr_soft_reset_success(ioc_status, ioc_config) + ) { + mpi3mr_clear_reset_history(mrioc); + retval = 0; + break; + } + msleep(100); + } while (--timeout); + mpi3mr_print_fault_info(mrioc); + break; + case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: + do { + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if (mpi3mr_diagfault_success(mrioc, ioc_status)) { + retval = 0; + break; + } + msleep(100); + } while (--timeout); + break; + default: + break; + } + + writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, + &mrioc->sysif_regs->write_sequence); + + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + ioc_info(mrioc, + "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", + (!retval)?"successful":"failed", ioc_status, + ioc_config); + if (retval) + mrioc->unrecoverable = 1; + return retval; +} + +/** + * mpi3mr_admin_request_post - Post request to admin queue + * @mrioc: Adapter reference + * @admin_req: MPI3 request + * @admin_req_sz: Request size + * @ignore_reset: Ignore reset in process + * + * Post the MPI3 request into admin request queue and + * inform the controller, if the queue is full return + * appropriate error. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, + u16 admin_req_sz, u8 ignore_reset) +{ + u16 areq_pi = 0, areq_ci = 0, max_entries = 0; + int retval = 0; + unsigned long flags; + u8 *areq_entry; + + if (mrioc->unrecoverable) { + ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); + return -EFAULT; + } + + spin_lock_irqsave(&mrioc->admin_req_lock, flags); + areq_pi = mrioc->admin_req_pi; + areq_ci = mrioc->admin_req_ci; + max_entries = mrioc->num_admin_req; + if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && + (areq_pi == (max_entries - 1)))) { + ioc_err(mrioc, "AdminReqQ full condition detected\n"); + retval = -EAGAIN; + goto out; + } + if (!ignore_reset && mrioc->reset_in_progress) { + ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); + retval = -EAGAIN; + goto out; + } + areq_entry = (u8 *)mrioc->admin_req_base + + (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); + memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); + memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); + + if (++areq_pi == max_entries) + areq_pi = 0; + mrioc->admin_req_pi = areq_pi; + + writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); + +out: + spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); + + return retval; +} + +/** + * mpi3mr_free_op_req_q_segments - free request memory segments + * @mrioc: Adapter instance reference + * @q_idx: operational request queue index + * + * Free memory segments allocated for operational request queue + * + * Return: Nothing. + */ +static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) +{ + u16 j; + int size; + struct segments *segments; + + segments = mrioc->req_qinfo[q_idx].q_segments; + if (!segments) + return; + + if (mrioc->enable_segqueue) { + size = MPI3MR_OP_REQ_Q_SEG_SIZE; + if (mrioc->req_qinfo[q_idx].q_segment_list) { + dma_free_coherent(&mrioc->pdev->dev, + MPI3MR_MAX_SEG_LIST_SIZE, + mrioc->req_qinfo[q_idx].q_segment_list, + mrioc->req_qinfo[q_idx].q_segment_list_dma); + mrioc->req_qinfo[q_idx].q_segment_list = NULL; + } + } else + size = mrioc->req_qinfo[q_idx].segment_qd * + mrioc->facts.op_req_sz; + + for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { + if (!segments[j].segment) + continue; + dma_free_coherent(&mrioc->pdev->dev, + size, segments[j].segment, segments[j].segment_dma); + segments[j].segment = NULL; + } + kfree(mrioc->req_qinfo[q_idx].q_segments); + mrioc->req_qinfo[q_idx].q_segments = NULL; + mrioc->req_qinfo[q_idx].qid = 0; +} + +/** + * mpi3mr_free_op_reply_q_segments - free reply memory segments + * @mrioc: Adapter instance reference + * @q_idx: operational reply queue index + * + * Free memory segments allocated for operational reply queue + * + * Return: Nothing. + */ +static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) +{ + u16 j; + int size; + struct segments *segments; + + segments = mrioc->op_reply_qinfo[q_idx].q_segments; + if (!segments) + return; + + if (mrioc->enable_segqueue) { + size = MPI3MR_OP_REP_Q_SEG_SIZE; + if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { + dma_free_coherent(&mrioc->pdev->dev, + MPI3MR_MAX_SEG_LIST_SIZE, + mrioc->op_reply_qinfo[q_idx].q_segment_list, + mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); + mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; + } + } else + size = mrioc->op_reply_qinfo[q_idx].segment_qd * + mrioc->op_reply_desc_sz; + + for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { + if (!segments[j].segment) + continue; + dma_free_coherent(&mrioc->pdev->dev, + size, segments[j].segment, segments[j].segment_dma); + segments[j].segment = NULL; + } + + kfree(mrioc->op_reply_qinfo[q_idx].q_segments); + mrioc->op_reply_qinfo[q_idx].q_segments = NULL; + mrioc->op_reply_qinfo[q_idx].qid = 0; +} + +/** + * mpi3mr_delete_op_reply_q - delete operational reply queue + * @mrioc: Adapter instance reference + * @qidx: operational reply queue index + * + * Delete operatinal reply queue by issuing MPI request + * through admin queue. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) +{ + struct mpi3_delete_reply_queue_request delq_req; + struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; + int retval = 0; + u16 reply_qid = 0, midx; + + reply_qid = op_reply_q->qid; + + midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); + + if (!reply_qid) { + retval = -1; + ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); + goto out; + } + + (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : + mrioc->active_poll_qcount--; + + memset(&delq_req, 0, sizeof(delq_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; + delq_req.queue_id = cpu_to_le16(reply_qid); + + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), + 1); + if (retval) { + ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "delete reply queue timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + mrioc->intr_info[midx].op_reply_q = NULL; + + mpi3mr_free_op_reply_q_segments(mrioc, qidx); +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); +out: + + return retval; +} + +/** + * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool + * @mrioc: Adapter instance reference + * @qidx: request queue index + * + * Allocate segmented memory pools for operational reply + * queue. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) +{ + struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; + int i, size; + u64 *q_segment_list_entry = NULL; + struct segments *segments; + + if (mrioc->enable_segqueue) { + op_reply_q->segment_qd = + MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; + + size = MPI3MR_OP_REP_Q_SEG_SIZE; + + op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, + MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, + GFP_KERNEL); + if (!op_reply_q->q_segment_list) + return -ENOMEM; + q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; + } else { + op_reply_q->segment_qd = op_reply_q->num_replies; + size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; + } + + op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, + op_reply_q->segment_qd); + + op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, + sizeof(struct segments), GFP_KERNEL); + if (!op_reply_q->q_segments) + return -ENOMEM; + + segments = op_reply_q->q_segments; + for (i = 0; i < op_reply_q->num_segments; i++) { + segments[i].segment = + dma_alloc_coherent(&mrioc->pdev->dev, + size, &segments[i].segment_dma, GFP_KERNEL); + if (!segments[i].segment) + return -ENOMEM; + if (mrioc->enable_segqueue) + q_segment_list_entry[i] = + (unsigned long)segments[i].segment_dma; + } + + return 0; +} + +/** + * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. + * @mrioc: Adapter instance reference + * @qidx: request queue index + * + * Allocate segmented memory pools for operational request + * queue. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) +{ + struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; + int i, size; + u64 *q_segment_list_entry = NULL; + struct segments *segments; + + if (mrioc->enable_segqueue) { + op_req_q->segment_qd = + MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; + + size = MPI3MR_OP_REQ_Q_SEG_SIZE; + + op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, + MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, + GFP_KERNEL); + if (!op_req_q->q_segment_list) + return -ENOMEM; + q_segment_list_entry = (u64 *)op_req_q->q_segment_list; + + } else { + op_req_q->segment_qd = op_req_q->num_requests; + size = op_req_q->num_requests * mrioc->facts.op_req_sz; + } + + op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, + op_req_q->segment_qd); + + op_req_q->q_segments = kcalloc(op_req_q->num_segments, + sizeof(struct segments), GFP_KERNEL); + if (!op_req_q->q_segments) + return -ENOMEM; + + segments = op_req_q->q_segments; + for (i = 0; i < op_req_q->num_segments; i++) { + segments[i].segment = + dma_alloc_coherent(&mrioc->pdev->dev, + size, &segments[i].segment_dma, GFP_KERNEL); + if (!segments[i].segment) + return -ENOMEM; + if (mrioc->enable_segqueue) + q_segment_list_entry[i] = + (unsigned long)segments[i].segment_dma; + } + + return 0; +} + +/** + * mpi3mr_create_op_reply_q - create operational reply queue + * @mrioc: Adapter instance reference + * @qidx: operational reply queue index + * + * Create operatinal reply queue by issuing MPI request + * through admin queue. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) +{ + struct mpi3_create_reply_queue_request create_req; + struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; + int retval = 0; + u16 reply_qid = 0, midx; + + reply_qid = op_reply_q->qid; + + midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); + + if (reply_qid) { + retval = -1; + ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", + reply_qid); + + return retval; + } + + reply_qid = qidx + 1; + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; + if (!mrioc->pdev->revision) + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; + op_reply_q->ci = 0; + op_reply_q->ephase = 1; + atomic_set(&op_reply_q->pend_ios, 0); + atomic_set(&op_reply_q->in_use, 0); + op_reply_q->enable_irq_poll = false; + + if (!op_reply_q->q_segments) { + retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); + if (retval) { + mpi3mr_free_op_reply_q_segments(mrioc, qidx); + goto out; + } + } + + memset(&create_req, 0, sizeof(create_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); + goto out_unlock; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; + create_req.queue_id = cpu_to_le16(reply_qid); + + if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) + op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; + else + op_reply_q->qtype = MPI3MR_POLL_QUEUE; + + if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { + create_req.flags = + MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; + create_req.msix_index = + cpu_to_le16(mrioc->intr_info[midx].msix_index); + } else { + create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); + ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", + reply_qid, midx); + if (!mrioc->active_poll_qcount) + disable_irq_nosync(pci_irq_vector(mrioc->pdev, + mrioc->intr_info_count - 1)); + } + + if (mrioc->enable_segqueue) { + create_req.flags |= + MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; + create_req.base_address = cpu_to_le64( + op_reply_q->q_segment_list_dma); + } else + create_req.base_address = cpu_to_le64( + op_reply_q->q_segments[0].segment_dma); + + create_req.size = cpu_to_le16(op_reply_q->num_replies); + + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &create_req, + sizeof(create_req), 1); + if (retval) { + ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "create reply queue timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + op_reply_q->qid = reply_qid; + if (midx < mrioc->intr_info_count) + mrioc->intr_info[midx].op_reply_q = op_reply_q; + + (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : + mrioc->active_poll_qcount++; + +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); +out: + + return retval; +} + +/** + * mpi3mr_create_op_req_q - create operational request queue + * @mrioc: Adapter instance reference + * @idx: operational request queue index + * @reply_qid: Reply queue ID + * + * Create operatinal request queue by issuing MPI request + * through admin queue. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, + u16 reply_qid) +{ + struct mpi3_create_request_queue_request create_req; + struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; + int retval = 0; + u16 req_qid = 0; + + req_qid = op_req_q->qid; + + if (req_qid) { + retval = -1; + ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", + req_qid); + + return retval; + } + req_qid = idx + 1; + + op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; + op_req_q->ci = 0; + op_req_q->pi = 0; + op_req_q->reply_qid = reply_qid; + spin_lock_init(&op_req_q->q_lock); + + if (!op_req_q->q_segments) { + retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); + if (retval) { + mpi3mr_free_op_req_q_segments(mrioc, idx); + goto out; + } + } + + memset(&create_req, 0, sizeof(create_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); + goto out_unlock; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; + create_req.queue_id = cpu_to_le16(req_qid); + if (mrioc->enable_segqueue) { + create_req.flags = + MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; + create_req.base_address = cpu_to_le64( + op_req_q->q_segment_list_dma); + } else + create_req.base_address = cpu_to_le64( + op_req_q->q_segments[0].segment_dma); + create_req.reply_queue_id = cpu_to_le16(reply_qid); + create_req.size = cpu_to_le16(op_req_q->num_requests); + + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &create_req, + sizeof(create_req), 1); + if (retval) { + ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "create request queue timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + op_req_q->qid = req_qid; + +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); +out: + + return retval; +} + +/** + * mpi3mr_create_op_queues - create operational queue pairs + * @mrioc: Adapter instance reference + * + * Allocate memory for operational queue meta data and call + * create request and reply queue functions. + * + * Return: 0 on success, non-zero on failures. + */ +static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) +{ + int retval = 0; + u16 num_queues = 0, i = 0, msix_count_op_q = 1; + + num_queues = min_t(int, mrioc->facts.max_op_reply_q, + mrioc->facts.max_op_req_q); + + msix_count_op_q = + mrioc->intr_info_count - mrioc->op_reply_q_offset; + if (!mrioc->num_queues) + mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); + /* + * During reset set the num_queues to the number of queues + * that was set before the reset. + */ + num_queues = mrioc->num_op_reply_q ? + mrioc->num_op_reply_q : mrioc->num_queues; + ioc_info(mrioc, "trying to create %d operational queue pairs\n", + num_queues); + + if (!mrioc->req_qinfo) { + mrioc->req_qinfo = kcalloc(num_queues, + sizeof(struct op_req_qinfo), GFP_KERNEL); + if (!mrioc->req_qinfo) { + retval = -1; + goto out_failed; + } + + mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * + num_queues, GFP_KERNEL); + if (!mrioc->op_reply_qinfo) { + retval = -1; + goto out_failed; + } + } + + if (mrioc->enable_segqueue) + ioc_info(mrioc, + "allocating operational queues through segmented queues\n"); + + for (i = 0; i < num_queues; i++) { + if (mpi3mr_create_op_reply_q(mrioc, i)) { + ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); + break; + } + if (mpi3mr_create_op_req_q(mrioc, i, + mrioc->op_reply_qinfo[i].qid)) { + ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); + mpi3mr_delete_op_reply_q(mrioc, i); + break; + } + } + + if (i == 0) { + /* Not even one queue is created successfully*/ + retval = -1; + goto out_failed; + } + mrioc->num_op_reply_q = mrioc->num_op_req_q = i; + ioc_info(mrioc, + "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", + mrioc->num_op_reply_q, mrioc->default_qcount, + mrioc->active_poll_qcount); + + return retval; +out_failed: + kfree(mrioc->req_qinfo); + mrioc->req_qinfo = NULL; + + kfree(mrioc->op_reply_qinfo); + mrioc->op_reply_qinfo = NULL; + + return retval; +} + +/** + * mpi3mr_op_request_post - Post request to operational queue + * @mrioc: Adapter reference + * @op_req_q: Operational request queue info + * @req: MPI3 request + * + * Post the MPI3 request into operational request queue and + * inform the controller, if the queue is full return + * appropriate error. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, + struct op_req_qinfo *op_req_q, u8 *req) +{ + u16 pi = 0, max_entries, reply_qidx = 0, midx; + int retval = 0; + unsigned long flags; + u8 *req_entry; + void *segment_base_addr; + u16 req_sz = mrioc->facts.op_req_sz; + struct segments *segments = op_req_q->q_segments; + + reply_qidx = op_req_q->reply_qid - 1; + + if (mrioc->unrecoverable) + return -EFAULT; + + spin_lock_irqsave(&op_req_q->q_lock, flags); + pi = op_req_q->pi; + max_entries = op_req_q->num_requests; + + if (mpi3mr_check_req_qfull(op_req_q)) { + midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( + reply_qidx, mrioc->op_reply_q_offset); + mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); + + if (mpi3mr_check_req_qfull(op_req_q)) { + retval = -EAGAIN; + goto out; + } + } + + if (mrioc->reset_in_progress) { + ioc_err(mrioc, "OpReqQ submit reset in progress\n"); + retval = -EAGAIN; + goto out; + } + + segment_base_addr = segments[pi / op_req_q->segment_qd].segment; + req_entry = (u8 *)segment_base_addr + + ((pi % op_req_q->segment_qd) * req_sz); + + memset(req_entry, 0, req_sz); + memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); + + if (++pi == max_entries) + pi = 0; + op_req_q->pi = pi; + +#ifndef CONFIG_PREEMPT_RT + if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) + > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) + mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; +#else + atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios); +#endif + + writel(op_req_q->pi, + &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); + +out: + spin_unlock_irqrestore(&op_req_q->q_lock, flags); + return retval; +} + +/** + * mpi3mr_check_rh_fault_ioc - check reset history and fault + * controller + * @mrioc: Adapter instance reference + * @reason_code: reason code for the fault. + * + * This routine will save snapdump and fault the controller with + * the given reason code if it is not already in the fault or + * not asynchronosuly reset. This will be used to handle + * initilaization time faults/resets/timeout as in those cases + * immediate soft reset invocation is not required. + * + * Return: None. + */ +void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) +{ + u32 ioc_status, host_diagnostic, timeout; + + if (mrioc->unrecoverable) { + ioc_err(mrioc, "controller is unrecoverable\n"); + return; + } + + if (!pci_device_is_present(mrioc->pdev)) { + mrioc->unrecoverable = 1; + ioc_err(mrioc, "controller is not present\n"); + return; + } + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + mpi3mr_print_fault_info(mrioc); + return; + } + mpi3mr_set_diagsave(mrioc); + mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + reason_code); + timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; + do { + host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); + if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) + break; + msleep(100); + } while (--timeout); +} + +/** + * mpi3mr_sync_timestamp - Issue time stamp sync request + * @mrioc: Adapter reference + * + * Issue IO unit control MPI request to synchornize firmware + * timestamp with host time. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) +{ + ktime_t current_time; + struct mpi3_iounit_control_request iou_ctrl; + int retval = 0; + + memset(&iou_ctrl, 0, sizeof(iou_ctrl)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; + iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; + current_time = ktime_get_real(); + iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); + + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, + sizeof(iou_ctrl), 0); + if (retval) { + ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); + goto out_unlock; + } + + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); + mrioc->init_cmds.is_waiting = 0; + if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_TSU_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); + +out: + return retval; +} + +/** + * mpi3mr_print_pkg_ver - display controller fw package version + * @mrioc: Adapter reference + * + * Retrieve firmware package version from the component image + * header of the controller flash and display it. + * + * Return: 0 on success and non-zero on failure. + */ +static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_ci_upload_request ci_upload; + int retval = -1; + void *data = NULL; + dma_addr_t data_dma; + struct mpi3_ci_manifest_mpi *manifest; + u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + + data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, + GFP_KERNEL); + if (!data) + return -ENOMEM; + + memset(&ci_upload, 0, sizeof(ci_upload)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + ioc_err(mrioc, "sending get package version failed due to command in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; + ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; + ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); + ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); + ci_upload.segment_size = cpu_to_le32(data_len); + + mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, + data_dma); + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &ci_upload, + sizeof(ci_upload), 1); + if (retval) { + ioc_err(mrioc, "posting get package version failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "get package version timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + == MPI3_IOCSTATUS_SUCCESS) { + manifest = (struct mpi3_ci_manifest_mpi *) data; + if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { + ioc_info(mrioc, + "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", + manifest->package_version.gen_major, + manifest->package_version.gen_minor, + manifest->package_version.phase_major, + manifest->package_version.phase_minor, + manifest->package_version.customer_id, + manifest->package_version.build_num); + } + } + retval = 0; +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); + +out: + if (data) + dma_free_coherent(&mrioc->pdev->dev, data_len, data, + data_dma); + return retval; +} + +/** + * mpi3mr_watchdog_work - watchdog thread to monitor faults + * @work: work struct + * + * Watch dog work periodically executed (1 second interval) to + * monitor firmware fault and to issue periodic timer sync to + * the firmware. + * + * Return: Nothing. + */ +static void mpi3mr_watchdog_work(struct work_struct *work) +{ + struct mpi3mr_ioc *mrioc = + container_of(work, struct mpi3mr_ioc, watchdog_work.work); + unsigned long flags; + enum mpi3mr_iocstate ioc_state; + u32 fault, host_diagnostic, ioc_status; + u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; + + if (mrioc->reset_in_progress) + return; + + if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) { + ioc_err(mrioc, "watchdog could not detect the controller\n"); + mrioc->unrecoverable = 1; + } + + if (mrioc->unrecoverable) { + ioc_err(mrioc, + "flush pending commands for unrecoverable controller\n"); + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + return; + } + + if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { + mrioc->ts_update_counter = 0; + mpi3mr_sync_timestamp(mrioc); + } + + if ((mrioc->prepare_for_reset) && + ((mrioc->prepare_for_reset_timeout_counter++) >= + MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); + return; + } + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { + mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); + return; + } + + /*Check for fault state every one second and issue Soft reset*/ + ioc_state = mpi3mr_get_iocstate(mrioc); + if (ioc_state != MRIOC_STATE_FAULT) + goto schedule_work; + + fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; + host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); + if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { + if (!mrioc->diagsave_timeout) { + mpi3mr_print_fault_info(mrioc); + ioc_warn(mrioc, "diag save in progress\n"); + } + if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) + goto schedule_work; + } + + mpi3mr_print_fault_info(mrioc); + mrioc->diagsave_timeout = 0; + + switch (fault) { + case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: + case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: + ioc_warn(mrioc, + "controller requires system power cycle, marking controller as unrecoverable\n"); + mrioc->unrecoverable = 1; + goto schedule_work; + case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: + goto schedule_work; + case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: + reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; + break; + default: + break; + } + mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); + return; + +schedule_work: + spin_lock_irqsave(&mrioc->watchdog_lock, flags); + if (mrioc->watchdog_work_q) + queue_delayed_work(mrioc->watchdog_work_q, + &mrioc->watchdog_work, + msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); + spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); + return; +} + +/** + * mpi3mr_start_watchdog - Start watchdog + * @mrioc: Adapter instance reference + * + * Create and start the watchdog thread to monitor controller + * faults. + * + * Return: Nothing. + */ +void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) +{ + if (mrioc->watchdog_work_q) + return; + + INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); + snprintf(mrioc->watchdog_work_q_name, + sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, + mrioc->id); + mrioc->watchdog_work_q = + create_singlethread_workqueue(mrioc->watchdog_work_q_name); + if (!mrioc->watchdog_work_q) { + ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); + return; + } + + if (mrioc->watchdog_work_q) + queue_delayed_work(mrioc->watchdog_work_q, + &mrioc->watchdog_work, + msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); +} + +/** + * mpi3mr_stop_watchdog - Stop watchdog + * @mrioc: Adapter instance reference + * + * Stop the watchdog thread created to monitor controller + * faults. + * + * Return: Nothing. + */ +void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&mrioc->watchdog_lock, flags); + wq = mrioc->watchdog_work_q; + mrioc->watchdog_work_q = NULL; + spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +/** + * mpi3mr_setup_admin_qpair - Setup admin queue pair + * @mrioc: Adapter instance reference + * + * Allocate memory for admin queue pair if required and register + * the admin queue with the controller. + * + * Return: 0 on success, non-zero on failures. + */ +static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) +{ + int retval = 0; + u32 num_admin_entries = 0; + + mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; + mrioc->num_admin_req = mrioc->admin_req_q_sz / + MPI3MR_ADMIN_REQ_FRAME_SZ; + mrioc->admin_req_ci = mrioc->admin_req_pi = 0; + + mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; + mrioc->num_admin_replies = mrioc->admin_reply_q_sz / + MPI3MR_ADMIN_REPLY_FRAME_SZ; + mrioc->admin_reply_ci = 0; + mrioc->admin_reply_ephase = 1; + atomic_set(&mrioc->admin_reply_q_in_use, 0); + + if (!mrioc->admin_req_base) { + mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, + mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); + + if (!mrioc->admin_req_base) { + retval = -1; + goto out_failed; + } + + mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, + mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, + GFP_KERNEL); + + if (!mrioc->admin_reply_base) { + retval = -1; + goto out_failed; + } + } + + num_admin_entries = (mrioc->num_admin_replies << 16) | + (mrioc->num_admin_req); + writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); + mpi3mr_writeq(mrioc->admin_req_dma, + &mrioc->sysif_regs->admin_request_queue_address); + mpi3mr_writeq(mrioc->admin_reply_dma, + &mrioc->sysif_regs->admin_reply_queue_address); + writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); + writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); + return retval; + +out_failed: + + if (mrioc->admin_reply_base) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, + mrioc->admin_reply_base, mrioc->admin_reply_dma); + mrioc->admin_reply_base = NULL; + } + if (mrioc->admin_req_base) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, + mrioc->admin_req_base, mrioc->admin_req_dma); + mrioc->admin_req_base = NULL; + } + return retval; +} + +/** + * mpi3mr_issue_iocfacts - Send IOC Facts + * @mrioc: Adapter instance reference + * @facts_data: Cached IOC facts data + * + * Issue IOC Facts MPI request through admin queue and wait for + * the completion of it or time out. + * + * Return: 0 on success, non-zero on failures. + */ +static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, + struct mpi3_ioc_facts_data *facts_data) +{ + struct mpi3_ioc_facts_request iocfacts_req; + void *data = NULL; + dma_addr_t data_dma; + u32 data_len = sizeof(*facts_data); + int retval = 0; + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + + data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, + GFP_KERNEL); + + if (!data) { + retval = -1; + goto out; + } + + memset(&iocfacts_req, 0, sizeof(iocfacts_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; + + mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, + data_dma); + + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, + sizeof(iocfacts_req), 1); + if (retval) { + ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "ioc_facts timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + memcpy(facts_data, (u8 *)data, data_len); + mpi3mr_process_factsdata(mrioc, facts_data); +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); + +out: + if (data) + dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); + + return retval; +} + +/** + * mpi3mr_check_reset_dma_mask - Process IOC facts data + * @mrioc: Adapter instance reference + * + * Check whether the new DMA mask requested through IOCFacts by + * firmware needs to be set, if so set it . + * + * Return: 0 on success, non-zero on failure. + */ +static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) +{ + struct pci_dev *pdev = mrioc->pdev; + int r; + u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); + + if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) + return 0; + + ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", + mrioc->dma_mask, facts_dma_mask); + + r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); + if (r) { + ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", + facts_dma_mask, r); + return r; + } + mrioc->dma_mask = facts_dma_mask; + return r; +} + +/** + * mpi3mr_process_factsdata - Process IOC facts data + * @mrioc: Adapter instance reference + * @facts_data: Cached IOC facts data + * + * Convert IOC facts data into cpu endianness and cache it in + * the driver . + * + * Return: Nothing. + */ +static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, + struct mpi3_ioc_facts_data *facts_data) +{ + u32 ioc_config, req_sz, facts_flags; + + if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != + (sizeof(*facts_data) / 4)) { + ioc_warn(mrioc, + "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", + sizeof(*facts_data), + le16_to_cpu(facts_data->ioc_facts_data_length) * 4); + } + + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> + MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); + if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { + ioc_err(mrioc, + "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", + req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); + } + + memset(&mrioc->facts, 0, sizeof(mrioc->facts)); + + facts_flags = le32_to_cpu(facts_data->flags); + mrioc->facts.op_req_sz = req_sz; + mrioc->op_reply_desc_sz = 1 << ((ioc_config & + MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> + MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); + + mrioc->facts.ioc_num = facts_data->ioc_number; + mrioc->facts.who_init = facts_data->who_init; + mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); + mrioc->facts.personality = (facts_flags & + MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); + mrioc->facts.dma_mask = (facts_flags & + MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> + MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; + mrioc->facts.protocol_flags = facts_data->protocol_flags; + mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); + mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); + mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); + mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; + mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); + mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); + mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); + mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); + mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); + mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); + mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); + mrioc->facts.max_pcie_switches = + le16_to_cpu(facts_data->max_pcie_switches); + mrioc->facts.max_sasexpanders = + le16_to_cpu(facts_data->max_sas_expanders); + mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length); + mrioc->facts.max_sasinitiators = + le16_to_cpu(facts_data->max_sas_initiators); + mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); + mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); + mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); + mrioc->facts.max_op_req_q = + le16_to_cpu(facts_data->max_operational_request_queues); + mrioc->facts.max_op_reply_q = + le16_to_cpu(facts_data->max_operational_reply_queues); + mrioc->facts.ioc_capabilities = + le32_to_cpu(facts_data->ioc_capabilities); + mrioc->facts.fw_ver.build_num = + le16_to_cpu(facts_data->fw_version.build_num); + mrioc->facts.fw_ver.cust_id = + le16_to_cpu(facts_data->fw_version.customer_id); + mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; + mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; + mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; + mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; + mrioc->msix_count = min_t(int, mrioc->msix_count, + mrioc->facts.max_msix_vectors); + mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; + mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; + mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; + mrioc->facts.shutdown_timeout = + le16_to_cpu(facts_data->shutdown_timeout); + + mrioc->facts.max_dev_per_tg = + facts_data->max_devices_per_throttle_group; + mrioc->facts.io_throttle_data_length = + le16_to_cpu(facts_data->io_throttle_data_length); + mrioc->facts.max_io_throttle_group = + le16_to_cpu(facts_data->max_io_throttle_group); + mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low); + mrioc->facts.io_throttle_high = + le16_to_cpu(facts_data->io_throttle_high); + + if (mrioc->facts.max_data_length == + MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED) + mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE; + else + mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K; + /* Store in 512b block count */ + if (mrioc->facts.io_throttle_data_length) + mrioc->io_throttle_data_length = + (mrioc->facts.io_throttle_data_length * 2 * 4); + else + /* set the length to 1MB + 1K to disable throttle */ + mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2; + + mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024); + mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024); + + ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", + mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, + mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); + ioc_info(mrioc, + "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", + mrioc->facts.max_reqs, mrioc->facts.min_devhandle, + mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); + ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", + mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, + mrioc->facts.sge_mod_shift); + ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n", + mrioc->facts.dma_mask, (facts_flags & + MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length); + ioc_info(mrioc, + "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n", + mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group); + ioc_info(mrioc, + "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n", + mrioc->facts.io_throttle_data_length * 4, + mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low); +} + +/** + * mpi3mr_alloc_reply_sense_bufs - Send IOC Init + * @mrioc: Adapter instance reference + * + * Allocate and initialize the reply free buffers, sense + * buffers, reply free queue and sense buffer queue. + * + * Return: 0 on success, non-zero on failures. + */ +static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) +{ + int retval = 0; + u32 sz, i; + + if (mrioc->init_cmds.reply) + return retval; + + mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); + if (!mrioc->init_cmds.reply) + goto out_failed; + + mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); + if (!mrioc->bsg_cmds.reply) + goto out_failed; + + mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); + if (!mrioc->transport_cmds.reply) + goto out_failed; + + for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { + mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, + GFP_KERNEL); + if (!mrioc->dev_rmhs_cmds[i].reply) + goto out_failed; + } + + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { + mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, + GFP_KERNEL); + if (!mrioc->evtack_cmds[i].reply) + goto out_failed; + } + + mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); + if (!mrioc->host_tm_cmds.reply) + goto out_failed; + + mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); + if (!mrioc->pel_cmds.reply) + goto out_failed; + + mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); + if (!mrioc->pel_abort_cmd.reply) + goto out_failed; + + mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; + mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits, + GFP_KERNEL); + if (!mrioc->removepend_bitmap) + goto out_failed; + + mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL); + if (!mrioc->devrem_bitmap) + goto out_failed; + + mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD, + GFP_KERNEL); + if (!mrioc->evtack_cmds_bitmap) + goto out_failed; + + mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; + mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; + mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; + mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; + + /* reply buffer pool, 16 byte align */ + sz = mrioc->num_reply_bufs * mrioc->reply_sz; + mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", + &mrioc->pdev->dev, sz, 16, 0); + if (!mrioc->reply_buf_pool) { + ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); + goto out_failed; + } + + mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, + &mrioc->reply_buf_dma); + if (!mrioc->reply_buf) + goto out_failed; + + mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; + + /* reply free queue, 8 byte align */ + sz = mrioc->reply_free_qsz * 8; + mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", + &mrioc->pdev->dev, sz, 8, 0); + if (!mrioc->reply_free_q_pool) { + ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); + goto out_failed; + } + mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, + GFP_KERNEL, &mrioc->reply_free_q_dma); + if (!mrioc->reply_free_q) + goto out_failed; + + /* sense buffer pool, 4 byte align */ + sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; + mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", + &mrioc->pdev->dev, sz, 4, 0); + if (!mrioc->sense_buf_pool) { + ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); + goto out_failed; + } + mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, + &mrioc->sense_buf_dma); + if (!mrioc->sense_buf) + goto out_failed; + + /* sense buffer queue, 8 byte align */ + sz = mrioc->sense_buf_q_sz * 8; + mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", + &mrioc->pdev->dev, sz, 8, 0); + if (!mrioc->sense_buf_q_pool) { + ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); + goto out_failed; + } + mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, + GFP_KERNEL, &mrioc->sense_buf_q_dma); + if (!mrioc->sense_buf_q) + goto out_failed; + + return retval; + +out_failed: + retval = -1; + return retval; +} + +/** + * mpimr_initialize_reply_sbuf_queues - initialize reply sense + * buffers + * @mrioc: Adapter instance reference + * + * Helper function to initialize reply and sense buffers along + * with some debug prints. + * + * Return: None. + */ +static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) +{ + u32 sz, i; + dma_addr_t phy_addr; + + sz = mrioc->num_reply_bufs * mrioc->reply_sz; + ioc_info(mrioc, + "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", + mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, + (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); + sz = mrioc->reply_free_qsz * 8; + ioc_info(mrioc, + "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", + mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), + (unsigned long long)mrioc->reply_free_q_dma); + sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; + ioc_info(mrioc, + "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", + mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, + (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); + sz = mrioc->sense_buf_q_sz * 8; + ioc_info(mrioc, + "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", + mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), + (unsigned long long)mrioc->sense_buf_q_dma); + + /* initialize Reply buffer Queue */ + for (i = 0, phy_addr = mrioc->reply_buf_dma; + i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) + mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); + mrioc->reply_free_q[i] = cpu_to_le64(0); + + /* initialize Sense Buffer Queue */ + for (i = 0, phy_addr = mrioc->sense_buf_dma; + i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) + mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); + mrioc->sense_buf_q[i] = cpu_to_le64(0); +} + +/** + * mpi3mr_issue_iocinit - Send IOC Init + * @mrioc: Adapter instance reference + * + * Issue IOC Init MPI request through admin queue and wait for + * the completion of it or time out. + * + * Return: 0 on success, non-zero on failures. + */ +static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_ioc_init_request iocinit_req; + struct mpi3_driver_info_layout *drv_info; + dma_addr_t data_dma; + u32 data_len = sizeof(*drv_info); + int retval = 0; + ktime_t current_time; + + drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, + GFP_KERNEL); + if (!drv_info) { + retval = -1; + goto out; + } + mpimr_initialize_reply_sbuf_queues(mrioc); + + drv_info->information_length = cpu_to_le32(data_len); + strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); + strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); + strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); + strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); + strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); + strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, + sizeof(drv_info->driver_release_date)); + drv_info->driver_capabilities = 0; + memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, + sizeof(mrioc->driver_info)); + + memset(&iocinit_req, 0, sizeof(iocinit_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + iocinit_req.function = MPI3_FUNCTION_IOC_INIT; + iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; + iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; + iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; + iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; + iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; + iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); + iocinit_req.reply_free_queue_address = + cpu_to_le64(mrioc->reply_free_q_dma); + iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); + iocinit_req.sense_buffer_free_queue_depth = + cpu_to_le16(mrioc->sense_buf_q_sz); + iocinit_req.sense_buffer_free_queue_address = + cpu_to_le64(mrioc->sense_buf_q_dma); + iocinit_req.driver_information_address = cpu_to_le64(data_dma); + + current_time = ktime_get_real(); + iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); + + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, + sizeof(iocinit_req), 1); + if (retval) { + ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); + ioc_err(mrioc, "ioc_init timed out\n"); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + + mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; + writel(mrioc->reply_free_queue_host_index, + &mrioc->sysif_regs->reply_free_host_index); + + mrioc->sbq_host_index = mrioc->num_sense_bufs; + writel(mrioc->sbq_host_index, + &mrioc->sysif_regs->sense_buffer_free_host_index); +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); + +out: + if (drv_info) + dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, + data_dma); + + return retval; +} + +/** + * mpi3mr_unmask_events - Unmask events in event mask bitmap + * @mrioc: Adapter instance reference + * @event: MPI event ID + * + * Un mask the specific event by resetting the event_mask + * bitmap. + * + * Return: 0 on success, non-zero on failures. + */ +static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) +{ + u32 desired_event; + u8 word; + + if (event >= 128) + return; + + desired_event = (1 << (event % 32)); + word = event / 32; + + mrioc->event_masks[word] &= ~desired_event; +} + +/** + * mpi3mr_issue_event_notification - Send event notification + * @mrioc: Adapter instance reference + * + * Issue event notification MPI request through admin queue and + * wait for the completion of it or time out. + * + * Return: 0 on success, non-zero on failures. + */ +static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_event_notification_request evtnotify_req; + int retval = 0; + u8 i; + + memset(&evtnotify_req, 0, sizeof(evtnotify_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; + for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + evtnotify_req.event_masks[i] = + cpu_to_le32(mrioc->event_masks[i]); + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, + sizeof(evtnotify_req), 1); + if (retval) { + ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "event notification timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); +out: + return retval; +} + +/** + * mpi3mr_process_event_ack - Process event acknowledgment + * @mrioc: Adapter instance reference + * @event: MPI3 event ID + * @event_ctx: event context + * + * Send event acknowledgment through admin queue and wait for + * it to complete. + * + * Return: 0 on success, non-zero on failures. + */ +int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, + u32 event_ctx) +{ + struct mpi3_event_ack_request evtack_req; + int retval = 0; + + memset(&evtack_req, 0, sizeof(evtack_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + evtack_req.function = MPI3_FUNCTION_EVENT_ACK; + evtack_req.event = event; + evtack_req.event_context = cpu_to_le32(event_ctx); + + init_completion(&mrioc->init_cmds.done); + retval = mpi3mr_admin_request_post(mrioc, &evtack_req, + sizeof(evtack_req), 1); + if (retval) { + ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->init_cmds.done, + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_EVTACK_TIMEOUT); + retval = -1; + goto out_unlock; + } + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) + != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, + "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + mrioc->init_cmds.ioc_loginfo); + retval = -1; + goto out_unlock; + } + +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); +out: + return retval; +} + +/** + * mpi3mr_alloc_chain_bufs - Allocate chain buffers + * @mrioc: Adapter instance reference + * + * Allocate chain buffers and set a bitmap to indicate free + * chain buffers. Chain buffers are used to pass the SGE + * information along with MPI3 SCSI IO requests for host I/O. + * + * Return: 0 on success, non-zero on failure + */ +static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) +{ + int retval = 0; + u32 sz, i; + u16 num_chains; + + if (mrioc->chain_sgl_list) + return retval; + + num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; + + if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION + | SHOST_DIX_TYPE1_PROTECTION + | SHOST_DIX_TYPE2_PROTECTION + | SHOST_DIX_TYPE3_PROTECTION)) + num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); + + mrioc->chain_buf_count = num_chains; + sz = sizeof(struct chain_element) * num_chains; + mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); + if (!mrioc->chain_sgl_list) + goto out_failed; + + if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length / + MPI3MR_PAGE_SIZE_4K)) + mrioc->max_sgl_entries = mrioc->facts.max_data_length / + MPI3MR_PAGE_SIZE_4K; + sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common); + ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n", + mrioc->max_sgl_entries, sz/1024); + + mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", + &mrioc->pdev->dev, sz, 16, 0); + if (!mrioc->chain_buf_pool) { + ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); + goto out_failed; + } + + for (i = 0; i < num_chains; i++) { + mrioc->chain_sgl_list[i].addr = + dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, + &mrioc->chain_sgl_list[i].dma_addr); + + if (!mrioc->chain_sgl_list[i].addr) + goto out_failed; + } + mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL); + if (!mrioc->chain_bitmap) + goto out_failed; + return retval; +out_failed: + retval = -1; + return retval; +} + +/** + * mpi3mr_port_enable_complete - Mark port enable complete + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * Call back for asynchronous port enable request sets the + * driver command to indicate port enable request is complete. + * + * Return: Nothing + */ +static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + drv_cmd->callback = NULL; + mrioc->scan_started = 0; + if (drv_cmd->state & MPI3MR_CMD_RESET) + mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; + else + mrioc->scan_failed = drv_cmd->ioc_status; + drv_cmd->state = MPI3MR_CMD_NOTUSED; +} + +/** + * mpi3mr_issue_port_enable - Issue Port Enable + * @mrioc: Adapter instance reference + * @async: Flag to wait for completion or not + * + * Issue Port Enable MPI request through admin queue and if the + * async flag is not set wait for the completion of the port + * enable or time out. + * + * Return: 0 on success, non-zero on failures. + */ +int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) +{ + struct mpi3_port_enable_request pe_req; + int retval = 0; + u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; + + memset(&pe_req, 0, sizeof(pe_req)); + mutex_lock(&mrioc->init_cmds.mutex); + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; + if (async) { + mrioc->init_cmds.is_waiting = 0; + mrioc->init_cmds.callback = mpi3mr_port_enable_complete; + } else { + mrioc->init_cmds.is_waiting = 1; + mrioc->init_cmds.callback = NULL; + init_completion(&mrioc->init_cmds.done); + } + pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); + pe_req.function = MPI3_FUNCTION_PORT_ENABLE; + + retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); + if (retval) { + ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); + goto out_unlock; + } + if (async) { + mutex_unlock(&mrioc->init_cmds.mutex); + goto out; + } + + wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { + ioc_err(mrioc, "port enable timed out\n"); + retval = -1; + mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); + goto out_unlock; + } + mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); + +out_unlock: + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->init_cmds.mutex); +out: + return retval; +} + +/* Protocol type to name mapper structure */ +static const struct { + u8 protocol; + char *name; +} mpi3mr_protocols[] = { + { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, + { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, + { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, +}; + +/* Capability to name mapper structure*/ +static const struct { + u32 capability; + char *name; +} mpi3mr_capabilities[] = { + { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, + { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" }, +}; + +/** + * mpi3mr_print_ioc_info - Display controller information + * @mrioc: Adapter instance reference + * + * Display controller personalit, capability, supported + * protocols etc. + * + * Return: Nothing + */ +static void +mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) +{ + int i = 0, bytes_written = 0; + char personality[16]; + char protocol[50] = {0}; + char capabilities[100] = {0}; + struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; + + switch (mrioc->facts.personality) { + case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: + strncpy(personality, "Enhanced HBA", sizeof(personality)); + break; + case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: + strncpy(personality, "RAID", sizeof(personality)); + break; + default: + strncpy(personality, "Unknown", sizeof(personality)); + break; + } + + ioc_info(mrioc, "Running in %s Personality", personality); + + ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", + fwver->gen_major, fwver->gen_minor, fwver->ph_major, + fwver->ph_minor, fwver->cust_id, fwver->build_num); + + for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { + if (mrioc->facts.protocol_flags & + mpi3mr_protocols[i].protocol) { + bytes_written += scnprintf(protocol + bytes_written, + sizeof(protocol) - bytes_written, "%s%s", + bytes_written ? "," : "", + mpi3mr_protocols[i].name); + } + } + + bytes_written = 0; + for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { + if (mrioc->facts.protocol_flags & + mpi3mr_capabilities[i].capability) { + bytes_written += scnprintf(capabilities + bytes_written, + sizeof(capabilities) - bytes_written, "%s%s", + bytes_written ? "," : "", + mpi3mr_capabilities[i].name); + } + } + + ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", + protocol, capabilities); +} + +/** + * mpi3mr_cleanup_resources - Free PCI resources + * @mrioc: Adapter instance reference + * + * Unmap PCI device memory and disable PCI device. + * + * Return: 0 on success and non-zero on failure. + */ +void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) +{ + struct pci_dev *pdev = mrioc->pdev; + + mpi3mr_cleanup_isr(mrioc); + + if (mrioc->sysif_regs) { + iounmap((void __iomem *)mrioc->sysif_regs); + mrioc->sysif_regs = NULL; + } + + if (pci_is_enabled(pdev)) { + if (mrioc->bars) + pci_release_selected_regions(pdev, mrioc->bars); + pci_disable_device(pdev); + } +} + +/** + * mpi3mr_setup_resources - Enable PCI resources + * @mrioc: Adapter instance reference + * + * Enable PCI device memory, MSI-x registers and set DMA mask. + * + * Return: 0 on success and non-zero on failure. + */ +int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) +{ + struct pci_dev *pdev = mrioc->pdev; + u32 memap_sz = 0; + int i, retval = 0, capb = 0; + u16 message_control; + u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : + ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); + + if (pci_enable_device_mem(pdev)) { + ioc_err(mrioc, "pci_enable_device_mem: failed\n"); + retval = -ENODEV; + goto out_failed; + } + + capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (!capb) { + ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); + retval = -ENODEV; + goto out_failed; + } + mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + + if (pci_request_selected_regions(pdev, mrioc->bars, + mrioc->driver_name)) { + ioc_err(mrioc, "pci_request_selected_regions: failed\n"); + retval = -ENODEV; + goto out_failed; + } + + for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + mrioc->sysif_regs_phys = pci_resource_start(pdev, i); + memap_sz = pci_resource_len(pdev, i); + mrioc->sysif_regs = + ioremap(mrioc->sysif_regs_phys, memap_sz); + break; + } + } + + pci_set_master(pdev); + + retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); + if (retval) { + if (dma_mask != DMA_BIT_MASK(32)) { + ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); + dma_mask = DMA_BIT_MASK(32); + retval = dma_set_mask_and_coherent(&pdev->dev, + dma_mask); + } + if (retval) { + mrioc->dma_mask = 0; + ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); + goto out_failed; + } + } + mrioc->dma_mask = dma_mask; + + if (!mrioc->sysif_regs) { + ioc_err(mrioc, + "Unable to map adapter memory or resource not found\n"); + retval = -EINVAL; + goto out_failed; + } + + pci_read_config_word(pdev, capb + 2, &message_control); + mrioc->msix_count = (message_control & 0x3FF) + 1; + + pci_save_state(pdev); + + pci_set_drvdata(pdev, mrioc->shost); + + mpi3mr_ioc_disable_intr(mrioc); + + ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", + (unsigned long long)mrioc->sysif_regs_phys, + mrioc->sysif_regs, memap_sz); + ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", + mrioc->msix_count); + + if (!reset_devices && poll_queues > 0) + mrioc->requested_poll_qcount = min_t(int, poll_queues, + mrioc->msix_count - 2); + return retval; + +out_failed: + mpi3mr_cleanup_resources(mrioc); + return retval; +} + +/** + * mpi3mr_enable_events - Enable required events + * @mrioc: Adapter instance reference + * + * This routine unmasks the events required by the driver by + * sennding appropriate event mask bitmapt through an event + * notification request. + * + * Return: 0 on success and non-zero on failure. + */ +static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) +{ + int retval = 0; + u32 i; + + for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mrioc->event_masks[i] = -1; + + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); + mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); + + retval = mpi3mr_issue_event_notification(mrioc); + if (retval) + ioc_err(mrioc, "failed to issue event notification %d\n", + retval); + return retval; +} + +/** + * mpi3mr_init_ioc - Initialize the controller + * @mrioc: Adapter instance reference + * + * This the controller initialization routine, executed either + * after soft reset or from pci probe callback. + * Setup the required resources, memory map the controller + * registers, create admin and operational reply queue pairs, + * allocate required memory for reply pool, sense buffer pool, + * issue IOC init request to the firmware, unmask the events and + * issue port enable to discover SAS/SATA/NVMe devies and RAID + * volumes. + * + * Return: 0 on success and non-zero on failure. + */ +int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) +{ + int retval = 0; + u8 retry = 0; + struct mpi3_ioc_facts_data facts_data; + u32 sz; + +retry_init: + retval = mpi3mr_bring_ioc_ready(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", + retval); + goto out_failed_noretry; + } + + retval = mpi3mr_setup_isr(mrioc, 1); + if (retval) { + ioc_err(mrioc, "Failed to setup ISR error %d\n", + retval); + goto out_failed_noretry; + } + + retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); + if (retval) { + ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", + retval); + goto out_failed; + } + + mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; + mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512; + mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group; + atomic_set(&mrioc->pend_large_data_sz, 0); + + if (reset_devices) + mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, + MPI3MR_HOST_IOS_KDUMP); + + if (!(mrioc->facts.ioc_capabilities & + MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) { + mrioc->sas_transport_enabled = 1; + mrioc->scsi_device_channel = 1; + mrioc->shost->max_channel = 1; + mrioc->shost->transportt = mpi3mr_transport_template; + } + + mrioc->reply_sz = mrioc->facts.reply_sz; + + retval = mpi3mr_check_reset_dma_mask(mrioc); + if (retval) { + ioc_err(mrioc, "Resetting dma mask failed %d\n", + retval); + goto out_failed_noretry; + } + + mpi3mr_print_ioc_info(mrioc); + + if (!mrioc->cfg_page) { + dprint_init(mrioc, "allocating config page buffers\n"); + mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; + mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, + mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL); + if (!mrioc->cfg_page) { + retval = -1; + goto out_failed_noretry; + } + } + + if (!mrioc->init_cmds.reply) { + retval = mpi3mr_alloc_reply_sense_bufs(mrioc); + if (retval) { + ioc_err(mrioc, + "%s :Failed to allocated reply sense buffers %d\n", + __func__, retval); + goto out_failed_noretry; + } + } + + if (!mrioc->chain_sgl_list) { + retval = mpi3mr_alloc_chain_bufs(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to allocated chain buffers %d\n", + retval); + goto out_failed_noretry; + } + } + + retval = mpi3mr_issue_iocinit(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to Issue IOC Init %d\n", + retval); + goto out_failed; + } + + retval = mpi3mr_print_pkg_ver(mrioc); + if (retval) { + ioc_err(mrioc, "failed to get package version\n"); + goto out_failed; + } + + retval = mpi3mr_setup_isr(mrioc, 0); + if (retval) { + ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", + retval); + goto out_failed_noretry; + } + + retval = mpi3mr_create_op_queues(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to create OpQueues error %d\n", + retval); + goto out_failed; + } + + if (!mrioc->pel_seqnum_virt) { + dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n"); + mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); + mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, + mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, + GFP_KERNEL); + if (!mrioc->pel_seqnum_virt) { + retval = -ENOMEM; + goto out_failed_noretry; + } + } + + if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) { + dprint_init(mrioc, "allocating memory for throttle groups\n"); + sz = sizeof(struct mpi3mr_throttle_group_info); + mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL); + if (!mrioc->throttle_groups) { + retval = -1; + goto out_failed_noretry; + } + } + + retval = mpi3mr_enable_events(mrioc); + if (retval) { + ioc_err(mrioc, "failed to enable events %d\n", + retval); + goto out_failed; + } + + ioc_info(mrioc, "controller initialization completed successfully\n"); + return retval; +out_failed: + if (retry < 2) { + retry++; + ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", + retry); + mpi3mr_memset_buffers(mrioc); + goto retry_init; + } + retval = -1; +out_failed_noretry: + ioc_err(mrioc, "controller initialization failed\n"); + mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + MPI3MR_RESET_FROM_CTLR_CLEANUP); + mrioc->unrecoverable = 1; + return retval; +} + +/** + * mpi3mr_reinit_ioc - Re-Initialize the controller + * @mrioc: Adapter instance reference + * @is_resume: Called from resume or reset path + * + * This the controller re-initialization routine, executed from + * the soft reset handler or resume callback. Creates + * operational reply queue pairs, allocate required memory for + * reply pool, sense buffer pool, issue IOC init request to the + * firmware, unmask the events and issue port enable to discover + * SAS/SATA/NVMe devices and RAID volumes. + * + * Return: 0 on success and non-zero on failure. + */ +int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) +{ + int retval = 0; + u8 retry = 0; + struct mpi3_ioc_facts_data facts_data; + u32 pe_timeout, ioc_status; + +retry_init: + pe_timeout = + (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL); + + dprint_reset(mrioc, "bringing up the controller to ready state\n"); + retval = mpi3mr_bring_ioc_ready(mrioc); + if (retval) { + ioc_err(mrioc, "failed to bring to ready state\n"); + goto out_failed_noretry; + } + + if (is_resume) { + dprint_reset(mrioc, "setting up single ISR\n"); + retval = mpi3mr_setup_isr(mrioc, 1); + if (retval) { + ioc_err(mrioc, "failed to setup ISR\n"); + goto out_failed_noretry; + } + } else + mpi3mr_ioc_enable_intr(mrioc); + + dprint_reset(mrioc, "getting ioc_facts\n"); + retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); + if (retval) { + ioc_err(mrioc, "failed to get ioc_facts\n"); + goto out_failed; + } + + dprint_reset(mrioc, "validating ioc_facts\n"); + retval = mpi3mr_revalidate_factsdata(mrioc); + if (retval) { + ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); + goto out_failed_noretry; + } + + mpi3mr_print_ioc_info(mrioc); + + dprint_reset(mrioc, "sending ioc_init\n"); + retval = mpi3mr_issue_iocinit(mrioc); + if (retval) { + ioc_err(mrioc, "failed to send ioc_init\n"); + goto out_failed; + } + + dprint_reset(mrioc, "getting package version\n"); + retval = mpi3mr_print_pkg_ver(mrioc); + if (retval) { + ioc_err(mrioc, "failed to get package version\n"); + goto out_failed; + } + + if (is_resume) { + dprint_reset(mrioc, "setting up multiple ISR\n"); + retval = mpi3mr_setup_isr(mrioc, 0); + if (retval) { + ioc_err(mrioc, "failed to re-setup ISR\n"); + goto out_failed_noretry; + } + } + + dprint_reset(mrioc, "creating operational queue pairs\n"); + retval = mpi3mr_create_op_queues(mrioc); + if (retval) { + ioc_err(mrioc, "failed to create operational queue pairs\n"); + goto out_failed; + } + + if (!mrioc->pel_seqnum_virt) { + dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n"); + mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); + mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, + mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, + GFP_KERNEL); + if (!mrioc->pel_seqnum_virt) { + retval = -ENOMEM; + goto out_failed_noretry; + } + } + + if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { + ioc_err(mrioc, + "cannot create minimum number of operational queues expected:%d created:%d\n", + mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); + retval = -1; + goto out_failed_noretry; + } + + dprint_reset(mrioc, "enabling events\n"); + retval = mpi3mr_enable_events(mrioc); + if (retval) { + ioc_err(mrioc, "failed to enable events\n"); + goto out_failed; + } + + mrioc->device_refresh_on = 1; + mpi3mr_add_event_wait_for_device_refresh(mrioc); + + ioc_info(mrioc, "sending port enable\n"); + retval = mpi3mr_issue_port_enable(mrioc, 1); + if (retval) { + ioc_err(mrioc, "failed to issue port enable\n"); + goto out_failed; + } + do { + ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL); + if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED) + break; + if (!pci_device_is_present(mrioc->pdev)) + mrioc->unrecoverable = 1; + if (mrioc->unrecoverable) { + retval = -1; + goto out_failed_noretry; + } + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + mpi3mr_print_fault_info(mrioc); + mrioc->init_cmds.is_waiting = 0; + mrioc->init_cmds.callback = NULL; + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + goto out_failed; + } + } while (--pe_timeout); + + if (!pe_timeout) { + ioc_err(mrioc, "port enable timed out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_PE_TIMEOUT); + mrioc->init_cmds.is_waiting = 0; + mrioc->init_cmds.callback = NULL; + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + goto out_failed; + } else if (mrioc->scan_failed) { + ioc_err(mrioc, + "port enable failed with status=0x%04x\n", + mrioc->scan_failed); + } else + ioc_info(mrioc, "port enable completed successfully\n"); + + ioc_info(mrioc, "controller %s completed successfully\n", + (is_resume)?"resume":"re-initialization"); + return retval; +out_failed: + if (retry < 2) { + retry++; + ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", + (is_resume)?"resume":"re-initialization", retry); + mpi3mr_memset_buffers(mrioc); + goto retry_init; + } + retval = -1; +out_failed_noretry: + ioc_err(mrioc, "controller %s is failed\n", + (is_resume)?"resume":"re-initialization"); + mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, + MPI3MR_RESET_FROM_CTLR_CLEANUP); + mrioc->unrecoverable = 1; + return retval; +} + +/** + * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's + * segments + * @mrioc: Adapter instance reference + * @qidx: Operational reply queue index + * + * Return: Nothing. + */ +static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) +{ + struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; + struct segments *segments; + int i, size; + + if (!op_reply_q->q_segments) + return; + + size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; + segments = op_reply_q->q_segments; + for (i = 0; i < op_reply_q->num_segments; i++) + memset(segments[i].segment, 0, size); +} + +/** + * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's + * segments + * @mrioc: Adapter instance reference + * @qidx: Operational request queue index + * + * Return: Nothing. + */ +static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) +{ + struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; + struct segments *segments; + int i, size; + + if (!op_req_q->q_segments) + return; + + size = op_req_q->segment_qd * mrioc->facts.op_req_sz; + segments = op_req_q->q_segments; + for (i = 0; i < op_req_q->num_segments; i++) + memset(segments[i].segment, 0, size); +} + +/** + * mpi3mr_memset_buffers - memset memory for a controller + * @mrioc: Adapter instance reference + * + * clear all the memory allocated for a controller, typically + * called post reset to reuse the memory allocated during the + * controller init. + * + * Return: Nothing. + */ +void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) +{ + u16 i; + struct mpi3mr_throttle_group_info *tg; + + mrioc->change_count = 0; + mrioc->active_poll_qcount = 0; + mrioc->default_qcount = 0; + if (mrioc->admin_req_base) + memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); + if (mrioc->admin_reply_base) + memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); + atomic_set(&mrioc->admin_reply_q_in_use, 0); + + if (mrioc->init_cmds.reply) { + memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); + memset(mrioc->bsg_cmds.reply, 0, + sizeof(*mrioc->bsg_cmds.reply)); + memset(mrioc->host_tm_cmds.reply, 0, + sizeof(*mrioc->host_tm_cmds.reply)); + memset(mrioc->pel_cmds.reply, 0, + sizeof(*mrioc->pel_cmds.reply)); + memset(mrioc->pel_abort_cmd.reply, 0, + sizeof(*mrioc->pel_abort_cmd.reply)); + memset(mrioc->transport_cmds.reply, 0, + sizeof(*mrioc->transport_cmds.reply)); + for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) + memset(mrioc->dev_rmhs_cmds[i].reply, 0, + sizeof(*mrioc->dev_rmhs_cmds[i].reply)); + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) + memset(mrioc->evtack_cmds[i].reply, 0, + sizeof(*mrioc->evtack_cmds[i].reply)); + bitmap_clear(mrioc->removepend_bitmap, 0, + mrioc->dev_handle_bitmap_bits); + bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); + bitmap_clear(mrioc->evtack_cmds_bitmap, 0, + MPI3MR_NUM_EVTACKCMD); + } + + for (i = 0; i < mrioc->num_queues; i++) { + mrioc->op_reply_qinfo[i].qid = 0; + mrioc->op_reply_qinfo[i].ci = 0; + mrioc->op_reply_qinfo[i].num_replies = 0; + mrioc->op_reply_qinfo[i].ephase = 0; + atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); + atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); + mpi3mr_memset_op_reply_q_buffers(mrioc, i); + + mrioc->req_qinfo[i].ci = 0; + mrioc->req_qinfo[i].pi = 0; + mrioc->req_qinfo[i].num_requests = 0; + mrioc->req_qinfo[i].qid = 0; + mrioc->req_qinfo[i].reply_qid = 0; + spin_lock_init(&mrioc->req_qinfo[i].q_lock); + mpi3mr_memset_op_req_q_buffers(mrioc, i); + } + + atomic_set(&mrioc->pend_large_data_sz, 0); + if (mrioc->throttle_groups) { + tg = mrioc->throttle_groups; + for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) { + tg->id = 0; + tg->fw_qd = 0; + tg->modified_qd = 0; + tg->io_divert = 0; + tg->need_qd_reduction = 0; + tg->high = 0; + tg->low = 0; + tg->qd_reduction = 0; + atomic_set(&tg->pend_large_data_sz, 0); + } + } +} + +/** + * mpi3mr_free_mem - Free memory allocated for a controller + * @mrioc: Adapter instance reference + * + * Free all the memory allocated for a controller. + * + * Return: Nothing. + */ +void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) +{ + u16 i; + struct mpi3mr_intr_info *intr_info; + + mpi3mr_free_enclosure_list(mrioc); + + if (mrioc->sense_buf_pool) { + if (mrioc->sense_buf) + dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, + mrioc->sense_buf_dma); + dma_pool_destroy(mrioc->sense_buf_pool); + mrioc->sense_buf = NULL; + mrioc->sense_buf_pool = NULL; + } + if (mrioc->sense_buf_q_pool) { + if (mrioc->sense_buf_q) + dma_pool_free(mrioc->sense_buf_q_pool, + mrioc->sense_buf_q, mrioc->sense_buf_q_dma); + dma_pool_destroy(mrioc->sense_buf_q_pool); + mrioc->sense_buf_q = NULL; + mrioc->sense_buf_q_pool = NULL; + } + + if (mrioc->reply_buf_pool) { + if (mrioc->reply_buf) + dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, + mrioc->reply_buf_dma); + dma_pool_destroy(mrioc->reply_buf_pool); + mrioc->reply_buf = NULL; + mrioc->reply_buf_pool = NULL; + } + if (mrioc->reply_free_q_pool) { + if (mrioc->reply_free_q) + dma_pool_free(mrioc->reply_free_q_pool, + mrioc->reply_free_q, mrioc->reply_free_q_dma); + dma_pool_destroy(mrioc->reply_free_q_pool); + mrioc->reply_free_q = NULL; + mrioc->reply_free_q_pool = NULL; + } + + for (i = 0; i < mrioc->num_op_req_q; i++) + mpi3mr_free_op_req_q_segments(mrioc, i); + + for (i = 0; i < mrioc->num_op_reply_q; i++) + mpi3mr_free_op_reply_q_segments(mrioc, i); + + for (i = 0; i < mrioc->intr_info_count; i++) { + intr_info = mrioc->intr_info + i; + intr_info->op_reply_q = NULL; + } + + kfree(mrioc->req_qinfo); + mrioc->req_qinfo = NULL; + mrioc->num_op_req_q = 0; + + kfree(mrioc->op_reply_qinfo); + mrioc->op_reply_qinfo = NULL; + mrioc->num_op_reply_q = 0; + + kfree(mrioc->init_cmds.reply); + mrioc->init_cmds.reply = NULL; + + kfree(mrioc->bsg_cmds.reply); + mrioc->bsg_cmds.reply = NULL; + + kfree(mrioc->host_tm_cmds.reply); + mrioc->host_tm_cmds.reply = NULL; + + kfree(mrioc->pel_cmds.reply); + mrioc->pel_cmds.reply = NULL; + + kfree(mrioc->pel_abort_cmd.reply); + mrioc->pel_abort_cmd.reply = NULL; + + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { + kfree(mrioc->evtack_cmds[i].reply); + mrioc->evtack_cmds[i].reply = NULL; + } + + bitmap_free(mrioc->removepend_bitmap); + mrioc->removepend_bitmap = NULL; + + bitmap_free(mrioc->devrem_bitmap); + mrioc->devrem_bitmap = NULL; + + bitmap_free(mrioc->evtack_cmds_bitmap); + mrioc->evtack_cmds_bitmap = NULL; + + bitmap_free(mrioc->chain_bitmap); + mrioc->chain_bitmap = NULL; + + kfree(mrioc->transport_cmds.reply); + mrioc->transport_cmds.reply = NULL; + + for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { + kfree(mrioc->dev_rmhs_cmds[i].reply); + mrioc->dev_rmhs_cmds[i].reply = NULL; + } + + if (mrioc->chain_buf_pool) { + for (i = 0; i < mrioc->chain_buf_count; i++) { + if (mrioc->chain_sgl_list[i].addr) { + dma_pool_free(mrioc->chain_buf_pool, + mrioc->chain_sgl_list[i].addr, + mrioc->chain_sgl_list[i].dma_addr); + mrioc->chain_sgl_list[i].addr = NULL; + } + } + dma_pool_destroy(mrioc->chain_buf_pool); + mrioc->chain_buf_pool = NULL; + } + + kfree(mrioc->chain_sgl_list); + mrioc->chain_sgl_list = NULL; + + if (mrioc->admin_reply_base) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, + mrioc->admin_reply_base, mrioc->admin_reply_dma); + mrioc->admin_reply_base = NULL; + } + if (mrioc->admin_req_base) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, + mrioc->admin_req_base, mrioc->admin_req_dma); + mrioc->admin_req_base = NULL; + } + if (mrioc->cfg_page) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz, + mrioc->cfg_page, mrioc->cfg_page_dma); + mrioc->cfg_page = NULL; + } + if (mrioc->pel_seqnum_virt) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, + mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); + mrioc->pel_seqnum_virt = NULL; + } + + kfree(mrioc->throttle_groups); + mrioc->throttle_groups = NULL; + + kfree(mrioc->logdata_buf); + mrioc->logdata_buf = NULL; + +} + +/** + * mpi3mr_issue_ioc_shutdown - shutdown controller + * @mrioc: Adapter instance reference + * + * Send shutodwn notification to the controller and wait for the + * shutdown_timeout for it to be completed. + * + * Return: Nothing. + */ +static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) +{ + u32 ioc_config, ioc_status; + u8 retval = 1; + u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; + + ioc_info(mrioc, "Issuing shutdown Notification\n"); + if (mrioc->unrecoverable) { + ioc_warn(mrioc, + "IOC is unrecoverable shutdown is not issued\n"); + return; + } + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) + == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { + ioc_info(mrioc, "shutdown already in progress\n"); + return; + } + + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; + ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; + + writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); + + if (mrioc->facts.shutdown_timeout) + timeout = mrioc->facts.shutdown_timeout * 10; + + do { + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) + == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { + retval = 0; + break; + } + msleep(100); + } while (--timeout); + + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); + + if (retval) { + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) + == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) + ioc_warn(mrioc, + "shutdown still in progress after timeout\n"); + } + + ioc_info(mrioc, + "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", + (!retval) ? "successful" : "failed", ioc_status, + ioc_config); +} + +/** + * mpi3mr_cleanup_ioc - Cleanup controller + * @mrioc: Adapter instance reference + * + * controller cleanup handler, Message unit reset or soft reset + * and shutdown notification is issued to the controller. + * + * Return: Nothing. + */ +void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) +{ + enum mpi3mr_iocstate ioc_state; + + dprint_exit(mrioc, "cleaning up the controller\n"); + mpi3mr_ioc_disable_intr(mrioc); + + ioc_state = mpi3mr_get_iocstate(mrioc); + + if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && + (ioc_state == MRIOC_STATE_READY)) { + if (mpi3mr_issue_and_process_mur(mrioc, + MPI3MR_RESET_FROM_CTLR_CLEANUP)) + mpi3mr_issue_reset(mrioc, + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, + MPI3MR_RESET_FROM_MUR_FAILURE); + mpi3mr_issue_ioc_shutdown(mrioc); + } + dprint_exit(mrioc, "controller cleanup completed\n"); +} + +/** + * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command + * @mrioc: Adapter instance reference + * @cmdptr: Internal command tracker + * + * Complete an internal driver commands with state indicating it + * is completed due to reset. + * + * Return: Nothing. + */ +static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *cmdptr) +{ + if (cmdptr->state & MPI3MR_CMD_PENDING) { + cmdptr->state |= MPI3MR_CMD_RESET; + cmdptr->state &= ~MPI3MR_CMD_PENDING; + if (cmdptr->is_waiting) { + complete(&cmdptr->done); + cmdptr->is_waiting = 0; + } else if (cmdptr->callback) + cmdptr->callback(mrioc, cmdptr); + } +} + +/** + * mpi3mr_flush_drv_cmds - Flush internaldriver commands + * @mrioc: Adapter instance reference + * + * Flush all internal driver commands post reset + * + * Return: Nothing. + */ +void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_drv_cmd *cmdptr; + u8 i; + + cmdptr = &mrioc->init_cmds; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + + cmdptr = &mrioc->cfg_cmds; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + + cmdptr = &mrioc->bsg_cmds; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + cmdptr = &mrioc->host_tm_cmds; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + + for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { + cmdptr = &mrioc->dev_rmhs_cmds[i]; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + } + + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { + cmdptr = &mrioc->evtack_cmds[i]; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + } + + cmdptr = &mrioc->pel_cmds; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + + cmdptr = &mrioc->pel_abort_cmd; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); + + cmdptr = &mrioc->transport_cmds; + mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); +} + +/** + * mpi3mr_pel_wait_post - Issue PEL Wait + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * Issue PEL Wait MPI request through admin queue and return. + * + * Return: Nothing. + */ +static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + struct mpi3_pel_req_action_wait pel_wait; + + mrioc->pel_abort_requested = false; + + memset(&pel_wait, 0, sizeof(pel_wait)); + drv_cmd->state = MPI3MR_CMD_PENDING; + drv_cmd->is_waiting = 0; + drv_cmd->callback = mpi3mr_pel_wait_complete; + drv_cmd->ioc_status = 0; + drv_cmd->ioc_loginfo = 0; + pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); + pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; + pel_wait.action = MPI3_PEL_ACTION_WAIT; + pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum); + pel_wait.locale = cpu_to_le16(mrioc->pel_locale); + pel_wait.class = cpu_to_le16(mrioc->pel_class); + pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT; + dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n", + mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale); + + if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) { + dprint_bsg_err(mrioc, + "Issuing PELWait: Admin post failed\n"); + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + drv_cmd->retry_count = 0; + mrioc->pel_enabled = false; + } +} + +/** + * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * Issue PEL get sequence number MPI request through admin queue + * and return. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req; + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + int retval = 0; + + memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); + mrioc->pel_cmds.state = MPI3MR_CMD_PENDING; + mrioc->pel_cmds.is_waiting = 0; + mrioc->pel_cmds.ioc_status = 0; + mrioc->pel_cmds.ioc_loginfo = 0; + mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete; + pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); + pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; + pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM; + mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags, + mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma); + + retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req, + sizeof(pel_getseq_req), 0); + if (retval) { + if (drv_cmd) { + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + drv_cmd->retry_count = 0; + } + mrioc->pel_enabled = false; + } + + return retval; +} + +/** + * mpi3mr_pel_wait_complete - PELWait Completion callback + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * This is a callback handler for the PELWait request and + * firmware completes a PELWait request when it is aborted or a + * new PEL entry is available. This sends AEN to the application + * and if the PELwait completion is not due to PELAbort then + * this will send a request for new PEL Sequence number + * + * Return: Nothing. + */ +static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + struct mpi3_pel_reply *pel_reply = NULL; + u16 ioc_status, pe_log_status; + bool do_retry = false; + + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto cleanup_drv_cmd; + + ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", + __func__, ioc_status, drv_cmd->ioc_loginfo); + dprint_bsg_err(mrioc, + "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n", + ioc_status, drv_cmd->ioc_loginfo); + do_retry = true; + } + + if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) + pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; + + if (!pel_reply) { + dprint_bsg_err(mrioc, + "pel_wait: failed due to no reply\n"); + goto out_failed; + } + + pe_log_status = le16_to_cpu(pel_reply->pe_log_status); + if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) && + (pe_log_status != MPI3_PEL_STATUS_ABORTED)) { + ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n", + __func__, pe_log_status); + dprint_bsg_err(mrioc, + "pel_wait: failed due to pel_log_status(0x%04x)\n", + pe_log_status); + do_retry = true; + } + + if (do_retry) { + if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { + drv_cmd->retry_count++; + dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n", + drv_cmd->retry_count); + mpi3mr_pel_wait_post(mrioc, drv_cmd); + return; + } + dprint_bsg_err(mrioc, + "pel_wait: failed after all retries(%d)\n", + drv_cmd->retry_count); + goto out_failed; + } + atomic64_inc(&event_counter); + if (!mrioc->pel_abort_requested) { + mrioc->pel_cmds.retry_count = 0; + mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds); + } + + return; +out_failed: + mrioc->pel_enabled = false; +cleanup_drv_cmd: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + drv_cmd->retry_count = 0; +} + +/** + * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * This is a callback handler for the PEL get sequence number + * request and a new PEL wait request will be issued to the + * firmware from this + * + * Return: Nothing. + */ +void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + struct mpi3_pel_reply *pel_reply = NULL; + struct mpi3_pel_seq *pel_seqnum_virt; + u16 ioc_status; + bool do_retry = false; + + pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt; + + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto cleanup_drv_cmd; + + ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + dprint_bsg_err(mrioc, + "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n", + ioc_status, drv_cmd->ioc_loginfo); + do_retry = true; + } + + if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) + pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; + if (!pel_reply) { + dprint_bsg_err(mrioc, + "pel_get_seqnum: failed due to no reply\n"); + goto out_failed; + } + + if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) { + dprint_bsg_err(mrioc, + "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n", + le16_to_cpu(pel_reply->pe_log_status)); + do_retry = true; + } + + if (do_retry) { + if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { + drv_cmd->retry_count++; + dprint_bsg_err(mrioc, + "pel_get_seqnum: retrying(%d)\n", + drv_cmd->retry_count); + mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd); + return; + } + + dprint_bsg_err(mrioc, + "pel_get_seqnum: failed after all retries(%d)\n", + drv_cmd->retry_count); + goto out_failed; + } + mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1; + drv_cmd->retry_count = 0; + mpi3mr_pel_wait_post(mrioc, drv_cmd); + + return; +out_failed: + mrioc->pel_enabled = false; +cleanup_drv_cmd: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + drv_cmd->retry_count = 0; +} + +/** + * mpi3mr_soft_reset_handler - Reset the controller + * @mrioc: Adapter instance reference + * @reset_reason: Reset reason code + * @snapdump: Flag to generate snapdump in firmware or not + * + * This is an handler for recovering controller by issuing soft + * reset are diag fault reset. This is a blocking function and + * when one reset is executed if any other resets they will be + * blocked. All BSG requests will be blocked during the reset. If + * controller reset is successful then the controller will be + * reinitalized, otherwise the controller will be marked as not + * recoverable + * + * In snapdump bit is set, the controller is issued with diag + * fault reset so that the firmware can create a snap dump and + * post that the firmware will result in F000 fault and the + * driver will issue soft reset to recover from that. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, + u32 reset_reason, u8 snapdump) +{ + int retval = 0, i; + unsigned long flags; + u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; + + /* Block the reset handler until diag save in progress*/ + dprint_reset(mrioc, + "soft_reset_handler: check and block on diagsave_timeout(%d)\n", + mrioc->diagsave_timeout); + while (mrioc->diagsave_timeout) + ssleep(1); + /* + * Block new resets until the currently executing one is finished and + * return the status of the existing reset for all blocked resets + */ + dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); + if (!mutex_trylock(&mrioc->reset_mutex)) { + ioc_info(mrioc, + "controller reset triggered by %s is blocked due to another reset in progress\n", + mpi3mr_reset_rc_name(reset_reason)); + do { + ssleep(1); + } while (mrioc->reset_in_progress == 1); + ioc_info(mrioc, + "returning previous reset result(%d) for the reset triggered by %s\n", + mrioc->prev_reset_result, + mpi3mr_reset_rc_name(reset_reason)); + return mrioc->prev_reset_result; + } + ioc_info(mrioc, "controller reset is triggered by %s\n", + mpi3mr_reset_rc_name(reset_reason)); + + mrioc->device_refresh_on = 0; + mrioc->reset_in_progress = 1; + mrioc->stop_bsgs = 1; + mrioc->prev_reset_result = -1; + + if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && + (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && + (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { + for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mrioc->event_masks[i] = -1; + + dprint_reset(mrioc, "soft_reset_handler: masking events\n"); + mpi3mr_issue_event_notification(mrioc); + } + + mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); + + mpi3mr_ioc_disable_intr(mrioc); + + if (snapdump) { + mpi3mr_set_diagsave(mrioc); + retval = mpi3mr_issue_reset(mrioc, + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); + if (!retval) { + do { + host_diagnostic = + readl(&mrioc->sysif_regs->host_diagnostic); + if (!(host_diagnostic & + MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) + break; + msleep(100); + } while (--timeout); + } + } + + retval = mpi3mr_issue_reset(mrioc, + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); + if (retval) { + ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); + goto out; + } + if (mrioc->num_io_throttle_group != + mrioc->facts.max_io_throttle_group) { + ioc_err(mrioc, + "max io throttle group doesn't match old(%d), new(%d)\n", + mrioc->num_io_throttle_group, + mrioc->facts.max_io_throttle_group); + retval = -EPERM; + goto out; + } + + mpi3mr_flush_delayed_cmd_lists(mrioc); + mpi3mr_flush_drv_cmds(mrioc); + bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); + bitmap_clear(mrioc->removepend_bitmap, 0, + mrioc->dev_handle_bitmap_bits); + bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD); + mpi3mr_flush_host_io(mrioc); + mpi3mr_cleanup_fwevt_list(mrioc); + mpi3mr_invalidate_devhandles(mrioc); + mpi3mr_free_enclosure_list(mrioc); + + if (mrioc->prepare_for_reset) { + mrioc->prepare_for_reset = 0; + mrioc->prepare_for_reset_timeout_counter = 0; + } + mpi3mr_memset_buffers(mrioc); + retval = mpi3mr_reinit_ioc(mrioc, 0); + if (retval) { + pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", + mrioc->name, reset_reason); + goto out; + } + ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); + +out: + if (!retval) { + mrioc->diagsave_timeout = 0; + mrioc->reset_in_progress = 0; + mrioc->pel_abort_requested = 0; + if (mrioc->pel_enabled) { + mrioc->pel_cmds.retry_count = 0; + mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds); + } + + mrioc->device_refresh_on = 0; + + mrioc->ts_update_counter = 0; + spin_lock_irqsave(&mrioc->watchdog_lock, flags); + if (mrioc->watchdog_work_q) + queue_delayed_work(mrioc->watchdog_work_q, + &mrioc->watchdog_work, + msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); + spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); + mrioc->stop_bsgs = 0; + if (mrioc->pel_enabled) + atomic64_inc(&event_counter); + } else { + mpi3mr_issue_reset(mrioc, + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); + mrioc->device_refresh_on = 0; + mrioc->unrecoverable = 1; + mrioc->reset_in_progress = 0; + retval = -1; + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + } + mrioc->prev_reset_result = retval; + mutex_unlock(&mrioc->reset_mutex); + ioc_info(mrioc, "controller reset is %s\n", + ((retval == 0) ? "successful" : "failed")); + return retval; +} + + +/** + * mpi3mr_free_config_dma_memory - free memory for config page + * @mrioc: Adapter instance reference + * @mem_desc: memory descriptor structure + * + * Check whether the size of the buffer specified by the memory + * descriptor is greater than the default page size if so then + * free the memory pointed by the descriptor. + * + * Return: Nothing. + */ +static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc, + struct dma_memory_desc *mem_desc) +{ + if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) { + dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, + mem_desc->addr, mem_desc->dma_addr); + mem_desc->addr = NULL; + } +} + +/** + * mpi3mr_alloc_config_dma_memory - Alloc memory for config page + * @mrioc: Adapter instance reference + * @mem_desc: Memory descriptor to hold dma memory info + * + * This function allocates new dmaable memory or provides the + * default config page dmaable memory based on the memory size + * described by the descriptor. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc, + struct dma_memory_desc *mem_desc) +{ + if (mem_desc->size > mrioc->cfg_page_sz) { + mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, + mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL); + if (!mem_desc->addr) + return -ENOMEM; + } else { + mem_desc->addr = mrioc->cfg_page; + mem_desc->dma_addr = mrioc->cfg_page_dma; + memset(mem_desc->addr, 0, mrioc->cfg_page_sz); + } + return 0; +} + +/** + * mpi3mr_post_cfg_req - Issue config requests and wait + * @mrioc: Adapter instance reference + * @cfg_req: Configuration request + * @timeout: Timeout in seconds + * @ioc_status: Pointer to return ioc status + * + * A generic function for posting MPI3 configuration request to + * the firmware. This blocks for the completion of request for + * timeout seconds and if the request times out this function + * faults the controller with proper reason code. + * + * On successful completion of the request this function returns + * appropriate ioc status from the firmware back to the caller. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc, + struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status) +{ + int retval = 0; + + mutex_lock(&mrioc->cfg_cmds.mutex); + if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "sending config request failed due to command in use\n"); + mutex_unlock(&mrioc->cfg_cmds.mutex); + goto out; + } + mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING; + mrioc->cfg_cmds.is_waiting = 1; + mrioc->cfg_cmds.callback = NULL; + mrioc->cfg_cmds.ioc_status = 0; + mrioc->cfg_cmds.ioc_loginfo = 0; + + cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS); + cfg_req->function = MPI3_FUNCTION_CONFIG; + + init_completion(&mrioc->cfg_cmds.done); + dprint_cfg_info(mrioc, "posting config request\n"); + if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) + dprint_dump(cfg_req, sizeof(struct mpi3_config_request), + "mpi3_cfg_req"); + retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1); + if (retval) { + ioc_err(mrioc, "posting config request failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ)); + if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) { + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT); + ioc_err(mrioc, "config request timed out\n"); + retval = -1; + goto out_unlock; + } + *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK; + if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS) + dprint_cfg_err(mrioc, + "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n", + *ioc_status, mrioc->cfg_cmds.ioc_loginfo); + +out_unlock: + mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->cfg_cmds.mutex); + +out: + return retval; +} + +/** + * mpi3mr_process_cfg_req - config page request processor + * @mrioc: Adapter instance reference + * @cfg_req: Configuration request + * @cfg_hdr: Configuration page header + * @timeout: Timeout in seconds + * @ioc_status: Pointer to return ioc status + * @cfg_buf: Memory pointer to copy config page or header + * @cfg_buf_sz: Size of the memory to get config page or header + * + * This is handler for config page read, write and config page + * header read operations. + * + * This function expects the cfg_req to be populated with page + * type, page number, action for the header read and with page + * address for all other operations. + * + * The cfg_hdr can be passed as null for reading required header + * details for read/write pages the cfg_hdr should point valid + * configuration page header. + * + * This allocates dmaable memory based on the size of the config + * buffer and set the SGE of the cfg_req. + * + * For write actions, the config page data has to be passed in + * the cfg_buf and size of the data has to be mentioned in the + * cfg_buf_sz. + * + * For read/header actions, on successful completion of the + * request with successful ioc_status the data will be copied + * into the cfg_buf limited to a minimum of actual page size and + * cfg_buf_sz + * + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, + struct mpi3_config_request *cfg_req, + struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status, + void *cfg_buf, u32 cfg_buf_sz) +{ + struct dma_memory_desc mem_desc; + int retval = -1; + u8 invalid_action = 0; + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + + memset(&mem_desc, 0, sizeof(struct dma_memory_desc)); + + if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER) + mem_desc.size = sizeof(struct mpi3_config_page_header); + else { + if (!cfg_hdr) { + ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n", + cfg_req->action, cfg_req->page_type, + cfg_req->page_number); + goto out; + } + switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) { + case MPI3_CONFIG_PAGEATTR_READ_ONLY: + if (cfg_req->action + != MPI3_CONFIG_ACTION_READ_CURRENT) + invalid_action = 1; + break; + case MPI3_CONFIG_PAGEATTR_CHANGEABLE: + if ((cfg_req->action == + MPI3_CONFIG_ACTION_READ_PERSISTENT) || + (cfg_req->action == + MPI3_CONFIG_ACTION_WRITE_PERSISTENT)) + invalid_action = 1; + break; + case MPI3_CONFIG_PAGEATTR_PERSISTENT: + default: + break; + } + if (invalid_action) { + ioc_err(mrioc, + "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n", + cfg_req->action, cfg_req->page_type, + cfg_req->page_number, cfg_hdr->page_attribute); + goto out; + } + mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4; + cfg_req->page_length = cfg_hdr->page_length; + cfg_req->page_version = cfg_hdr->page_version; + } + if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc)) + goto out; + + mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, + mem_desc.dma_addr); + + if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) || + (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) { + memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size, + cfg_buf_sz)); + dprint_cfg_info(mrioc, "config buffer to be written\n"); + if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) + dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); + } + + if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status)) + goto out; + + retval = 0; + if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) && + (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) && + (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) { + memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size, + cfg_buf_sz)); + dprint_cfg_info(mrioc, "config buffer read\n"); + if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) + dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); + } + +out: + mpi3mr_free_config_dma_memory(mrioc, &mem_desc); + return retval; +} + +/** + * mpi3mr_cfg_get_dev_pg0 - Read current device page0 + * @mrioc: Adapter instance reference + * @ioc_status: Pointer to return ioc status + * @dev_pg0: Pointer to return device page 0 + * @pg_sz: Size of the memory allocated to the page pointer + * @form: The form to be used for addressing the page + * @form_spec: Form specific information like device handle + * + * This is handler for config page read for a specific device + * page0. The ioc_status has the controller returned ioc_status. + * This routine doesn't check ioc_status to decide whether the + * page read is success or not and it is the callers + * responsibility. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u32 page_address; + + memset(dev_pg0, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE; + cfg_req.page_number = 0; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "device page0 header read failed\n"); + goto out_failed; + } + if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n", + *ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) | + (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK)); + cfg_req.page_address = cpu_to_le32(page_address); + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) { + ioc_err(mrioc, "device page0 read failed\n"); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + + +/** + * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0 + * @mrioc: Adapter instance reference + * @ioc_status: Pointer to return ioc status + * @phy_pg0: Pointer to return SAS Phy page 0 + * @pg_sz: Size of the memory allocated to the page pointer + * @form: The form to be used for addressing the page + * @form_spec: Form specific information like phy number + * + * This is handler for config page read for a specific SAS Phy + * page0. The ioc_status has the controller returned ioc_status. + * This routine doesn't check ioc_status to decide whether the + * page read is success or not and it is the callers + * responsibility. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form, + u32 form_spec) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u32 page_address; + + memset(phy_pg0, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; + cfg_req.page_number = 0; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "sas phy page0 header read failed\n"); + goto out_failed; + } + if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n", + *ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | + (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); + cfg_req.page_address = cpu_to_le32(page_address); + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) { + ioc_err(mrioc, "sas phy page0 read failed\n"); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + +/** + * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1 + * @mrioc: Adapter instance reference + * @ioc_status: Pointer to return ioc status + * @phy_pg1: Pointer to return SAS Phy page 1 + * @pg_sz: Size of the memory allocated to the page pointer + * @form: The form to be used for addressing the page + * @form_spec: Form specific information like phy number + * + * This is handler for config page read for a specific SAS Phy + * page1. The ioc_status has the controller returned ioc_status. + * This routine doesn't check ioc_status to decide whether the + * page read is success or not and it is the callers + * responsibility. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form, + u32 form_spec) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u32 page_address; + + memset(phy_pg1, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; + cfg_req.page_number = 1; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "sas phy page1 header read failed\n"); + goto out_failed; + } + if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n", + *ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | + (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); + cfg_req.page_address = cpu_to_le32(page_address); + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) { + ioc_err(mrioc, "sas phy page1 read failed\n"); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + + +/** + * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0 + * @mrioc: Adapter instance reference + * @ioc_status: Pointer to return ioc status + * @exp_pg0: Pointer to return SAS Expander page 0 + * @pg_sz: Size of the memory allocated to the page pointer + * @form: The form to be used for addressing the page + * @form_spec: Form specific information like device handle + * + * This is handler for config page read for a specific SAS + * Expander page0. The ioc_status has the controller returned + * ioc_status. This routine doesn't check ioc_status to decide + * whether the page read is success or not and it is the callers + * responsibility. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form, + u32 form_spec) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u32 page_address; + + memset(exp_pg0, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; + cfg_req.page_number = 0; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "expander page0 header read failed\n"); + goto out_failed; + } + if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n", + *ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | + (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | + MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); + cfg_req.page_address = cpu_to_le32(page_address); + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) { + ioc_err(mrioc, "expander page0 read failed\n"); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + +/** + * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1 + * @mrioc: Adapter instance reference + * @ioc_status: Pointer to return ioc status + * @exp_pg1: Pointer to return SAS Expander page 1 + * @pg_sz: Size of the memory allocated to the page pointer + * @form: The form to be used for addressing the page + * @form_spec: Form specific information like phy number + * + * This is handler for config page read for a specific SAS + * Expander page1. The ioc_status has the controller returned + * ioc_status. This routine doesn't check ioc_status to decide + * whether the page read is success or not and it is the callers + * responsibility. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form, + u32 form_spec) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u32 page_address; + + memset(exp_pg1, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; + cfg_req.page_number = 1; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "expander page1 header read failed\n"); + goto out_failed; + } + if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n", + *ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | + (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | + MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); + cfg_req.page_address = cpu_to_le32(page_address); + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) { + ioc_err(mrioc, "expander page1 read failed\n"); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + +/** + * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0 + * @mrioc: Adapter instance reference + * @ioc_status: Pointer to return ioc status + * @encl_pg0: Pointer to return Enclosure page 0 + * @pg_sz: Size of the memory allocated to the page pointer + * @form: The form to be used for addressing the page + * @form_spec: Form specific information like device handle + * + * This is handler for config page read for a specific Enclosure + * page0. The ioc_status has the controller returned ioc_status. + * This routine doesn't check ioc_status to decide whether the + * page read is success or not and it is the callers + * responsibility. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, + struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form, + u32 form_spec) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u32 page_address; + + memset(encl_pg0, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE; + cfg_req.page_number = 0; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "enclosure page0 header read failed\n"); + goto out_failed; + } + if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n", + *ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) | + (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK)); + cfg_req.page_address = cpu_to_le32(page_address); + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) { + ioc_err(mrioc, "enclosure page0 read failed\n"); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + + +/** + * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0 + * @mrioc: Adapter instance reference + * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0 + * @pg_sz: Size of the memory allocated to the page pointer + * + * This is handler for config page read for the SAS IO Unit + * page0. This routine checks ioc_status to decide whether the + * page read is success or not. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc, + struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u16 ioc_status = 0; + + memset(sas_io_unit_pg0, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; + cfg_req.page_number = 0; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "sas io unit page0 header read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) { + ioc_err(mrioc, "sas io unit page0 read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + +/** + * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1 + * @mrioc: Adapter instance reference + * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1 + * @pg_sz: Size of the memory allocated to the page pointer + * + * This is handler for config page read for the SAS IO Unit + * page1. This routine checks ioc_status to decide whether the + * page read is success or not. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, + struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u16 ioc_status = 0; + + memset(sas_io_unit_pg1, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; + cfg_req.page_number = 1; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "sas io unit page1 header read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { + ioc_err(mrioc, "sas io unit page1 read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + +/** + * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1 + * @mrioc: Adapter instance reference + * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write + * @pg_sz: Size of the memory allocated to the page pointer + * + * This is handler for config page write for the SAS IO Unit + * page1. This routine checks ioc_status to decide whether the + * page read is success or not. This will modify both current + * and persistent page. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, + struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u16 ioc_status = 0; + + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; + cfg_req.page_number = 1; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "sas io unit page1 header read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { + ioc_err(mrioc, "sas io unit page1 write current failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + + cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { + ioc_err(mrioc, "sas io unit page1 write persistent failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + return 0; +out_failed: + return -1; +} + +/** + * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1 + * @mrioc: Adapter instance reference + * @driver_pg1: Pointer to return Driver page 1 + * @pg_sz: Size of the memory allocated to the page pointer + * + * This is handler for config page read for the Driver page1. + * This routine checks ioc_status to decide whether the page + * read is success or not. + * + * Return: 0 on success, non-zero on failure. + */ +int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, + struct mpi3_driver_page1 *driver_pg1, u16 pg_sz) +{ + struct mpi3_config_page_header cfg_hdr; + struct mpi3_config_request cfg_req; + u16 ioc_status = 0; + + memset(driver_pg1, 0, pg_sz); + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); + memset(&cfg_req, 0, sizeof(cfg_req)); + + cfg_req.function = MPI3_FUNCTION_CONFIG; + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; + cfg_req.page_number = 1; + cfg_req.page_address = 0; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { + ioc_err(mrioc, "driver page1 header read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; + + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) { + ioc_err(mrioc, "driver page1 read failed\n"); + goto out_failed; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n", + ioc_status); + goto out_failed; + } + return 0; +out_failed: + return -1; +} diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c new file mode 100644 index 000000000..c7c752574 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -0,0 +1,5502 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Broadcom MPI3 Storage Controllers + * + * Copyright (C) 2017-2023 Broadcom Inc. + * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) + * + */ + +#include "mpi3mr.h" + +/* global driver scop variables */ +LIST_HEAD(mrioc_list); +DEFINE_SPINLOCK(mrioc_list_lock); +static int mrioc_ids; +static int warn_non_secure_ctlr; +atomic64_t event_counter; + +MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); +MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); +MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); +MODULE_VERSION(MPI3MR_DRIVER_VERSION); + +/* Module parameters*/ +int prot_mask = -1; +module_param(prot_mask, int, 0); +MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); + +static int prot_guard_mask = 3; +module_param(prot_guard_mask, int, 0); +MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); +static int logging_level; +module_param(logging_level, int, 0); +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); +static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; +module_param(max_sgl_entries, int, 0444); +MODULE_PARM_DESC(max_sgl_entries, + "Preferred max number of SG entries to be used for a single I/O\n" + "The actual value will be determined by the driver\n" + "(Minimum=256, Maximum=2048, default=256)"); + +/* Forward declarations*/ +static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, + struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); + +#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) + +#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) + +/** + * mpi3mr_host_tag_for_scmd - Get host tag for a scmd + * @mrioc: Adapter instance reference + * @scmd: SCSI command reference + * + * Calculate the host tag based on block tag for a given scmd. + * + * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. + */ +static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd) +{ + struct scmd_priv *priv = NULL; + u32 unique_tag; + u16 host_tag, hw_queue; + + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); + if (hw_queue >= mrioc->num_op_reply_q) + return MPI3MR_HOSTTAG_INVALID; + host_tag = blk_mq_unique_tag_to_tag(unique_tag); + + if (WARN_ON(host_tag >= mrioc->max_host_ios)) + return MPI3MR_HOSTTAG_INVALID; + + priv = scsi_cmd_priv(scmd); + /*host_tag 0 is invalid hence incrementing by 1*/ + priv->host_tag = host_tag + 1; + priv->scmd = scmd; + priv->in_lld_scope = 1; + priv->req_q_idx = hw_queue; + priv->meta_chain_idx = -1; + priv->chain_idx = -1; + priv->meta_sg_valid = 0; + return priv->host_tag; +} + +/** + * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag + * @mrioc: Adapter instance reference + * @host_tag: Host tag + * @qidx: Operational queue index + * + * Identify the block tag from the host tag and queue index and + * retrieve associated scsi command using scsi_host_find_tag(). + * + * Return: SCSI command reference or NULL. + */ +static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( + struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) +{ + struct scsi_cmnd *scmd = NULL; + struct scmd_priv *priv = NULL; + u32 unique_tag = host_tag - 1; + + if (WARN_ON(host_tag > mrioc->max_host_ios)) + goto out; + + unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); + + scmd = scsi_host_find_tag(mrioc->shost, unique_tag); + if (scmd) { + priv = scsi_cmd_priv(scmd); + if (!priv->in_lld_scope) + scmd = NULL; + } +out: + return scmd; +} + +/** + * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date + * @mrioc: Adapter instance reference + * @scmd: SCSI command reference + * + * Invalidate the SCSI command private data to mark the command + * is not in LLD scope anymore. + * + * Return: Nothing. + */ +static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd) +{ + struct scmd_priv *priv = NULL; + + priv = scsi_cmd_priv(scmd); + + if (WARN_ON(priv->in_lld_scope == 0)) + return; + priv->host_tag = MPI3MR_HOSTTAG_INVALID; + priv->req_q_idx = 0xFFFF; + priv->scmd = NULL; + priv->in_lld_scope = 0; + priv->meta_sg_valid = 0; + if (priv->chain_idx >= 0) { + clear_bit(priv->chain_idx, mrioc->chain_bitmap); + priv->chain_idx = -1; + } + if (priv->meta_chain_idx >= 0) { + clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); + priv->meta_chain_idx = -1; + } +} + +static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, + struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); +static void mpi3mr_fwevt_worker(struct work_struct *work); + +/** + * mpi3mr_fwevt_free - firmware event memory dealloctor + * @r: k reference pointer of the firmware event + * + * Free firmware event memory when no reference. + */ +static void mpi3mr_fwevt_free(struct kref *r) +{ + kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); +} + +/** + * mpi3mr_fwevt_get - k reference incrementor + * @fwevt: Firmware event reference + * + * Increment firmware event reference count. + */ +static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) +{ + kref_get(&fwevt->ref_count); +} + +/** + * mpi3mr_fwevt_put - k reference decrementor + * @fwevt: Firmware event reference + * + * decrement firmware event reference count. + */ +static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) +{ + kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); +} + +/** + * mpi3mr_alloc_fwevt - Allocate firmware event + * @len: length of firmware event data to allocate + * + * Allocate firmware event with required length and initialize + * the reference counter. + * + * Return: firmware event reference. + */ +static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) +{ + struct mpi3mr_fwevt *fwevt; + + fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); + if (!fwevt) + return NULL; + + kref_init(&fwevt->ref_count); + return fwevt; +} + +/** + * mpi3mr_fwevt_add_to_list - Add firmware event to the list + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Add the given firmware event to the firmware event list. + * + * Return: Nothing. + */ +static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + unsigned long flags; + + if (!mrioc->fwevt_worker_thread) + return; + + spin_lock_irqsave(&mrioc->fwevt_lock, flags); + /* get fwevt reference count while adding it to fwevt_list */ + mpi3mr_fwevt_get(fwevt); + INIT_LIST_HEAD(&fwevt->list); + list_add_tail(&fwevt->list, &mrioc->fwevt_list); + INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); + /* get fwevt reference count while enqueueing it to worker queue */ + mpi3mr_fwevt_get(fwevt); + queue_work(mrioc->fwevt_worker_thread, &fwevt->work); + spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); +} + +/** + * mpi3mr_fwevt_del_from_list - Delete firmware event from list + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Delete the given firmware event from the firmware event list. + * + * Return: Nothing. + */ +static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + unsigned long flags; + + spin_lock_irqsave(&mrioc->fwevt_lock, flags); + if (!list_empty(&fwevt->list)) { + list_del_init(&fwevt->list); + /* + * Put fwevt reference count after + * removing it from fwevt_list + */ + mpi3mr_fwevt_put(fwevt); + } + spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); +} + +/** + * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list + * @mrioc: Adapter instance reference + * + * Dequeue a firmware event from the firmware event list. + * + * Return: firmware event. + */ +static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( + struct mpi3mr_ioc *mrioc) +{ + unsigned long flags; + struct mpi3mr_fwevt *fwevt = NULL; + + spin_lock_irqsave(&mrioc->fwevt_lock, flags); + if (!list_empty(&mrioc->fwevt_list)) { + fwevt = list_first_entry(&mrioc->fwevt_list, + struct mpi3mr_fwevt, list); + list_del_init(&fwevt->list); + /* + * Put fwevt reference count after + * removing it from fwevt_list + */ + mpi3mr_fwevt_put(fwevt); + } + spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); + + return fwevt; +} + +/** + * mpi3mr_cancel_work - cancel firmware event + * @fwevt: fwevt object which needs to be canceled + * + * Return: Nothing. + */ +static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) +{ + /* + * Wait on the fwevt to complete. If this returns 1, then + * the event was never executed. + * + * If it did execute, we wait for it to finish, and the put will + * happen from mpi3mr_process_fwevt() + */ + if (cancel_work_sync(&fwevt->work)) { + /* + * Put fwevt reference count after + * dequeuing it from worker queue + */ + mpi3mr_fwevt_put(fwevt); + /* + * Put fwevt reference count to neutralize + * kref_init increment + */ + mpi3mr_fwevt_put(fwevt); + } +} + +/** + * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list + * @mrioc: Adapter instance reference + * + * Flush all pending firmware events from the firmware event + * list. + * + * Return: Nothing. + */ +void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_fwevt *fwevt = NULL; + + if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || + !mrioc->fwevt_worker_thread) + return; + + while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) + mpi3mr_cancel_work(fwevt); + + if (mrioc->current_event) { + fwevt = mrioc->current_event; + /* + * Don't call cancel_work_sync() API for the + * fwevt work if the controller reset is + * get called as part of processing the + * same fwevt work (or) when worker thread is + * waiting for device add/remove APIs to complete. + * Otherwise we will see deadlock. + */ + if (current_work() == &fwevt->work || fwevt->pending_at_sml) { + fwevt->discard = 1; + return; + } + + mpi3mr_cancel_work(fwevt); + } +} + +/** + * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event + * @mrioc: Adapter instance reference + * @tg: Throttle group information pointer + * + * Accessor to queue on synthetically generated driver event to + * the event worker thread, the driver event will be used to + * reduce the QD of all VDs in the TG from the worker thread. + * + * Return: None. + */ +static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, + struct mpi3mr_throttle_group_info *tg) +{ + struct mpi3mr_fwevt *fwevt; + u16 sz = sizeof(struct mpi3mr_throttle_group_info *); + + /* + * If the QD reduction event is already queued due to throttle and if + * the QD is not restored through device info change event + * then dont queue further reduction events + */ + if (tg->fw_qd != tg->modified_qd) + return; + + fwevt = mpi3mr_alloc_fwevt(sz); + if (!fwevt) { + ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); + return; + } + *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; + fwevt->mrioc = mrioc; + fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; + fwevt->send_ack = 0; + fwevt->process_evt = 1; + fwevt->evt_ctx = 0; + fwevt->event_data_size = sz; + tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); + + dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", + tg->id); + mpi3mr_fwevt_add_to_list(mrioc, fwevt); +} + +/** + * mpi3mr_invalidate_devhandles -Invalidate device handles + * @mrioc: Adapter instance reference + * + * Invalidate the device handles in the target device structures + * . Called post reset prior to reinitializing the controller. + * + * Return: Nothing. + */ +void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_stgt_priv_data *tgt_priv; + + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + if (tgtdev->starget && tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + tgt_priv->io_throttle_enabled = 0; + tgt_priv->io_divert = 0; + tgt_priv->throttle_group = NULL; + tgt_priv->wslen = 0; + if (tgtdev->host_exposed) + atomic_set(&tgt_priv->block_io, 1); + } + } +} + +/** + * mpi3mr_print_scmd - print individual SCSI command + * @rq: Block request + * @data: Adapter instance reference + * + * Print the SCSI command details if it is in LLD scope. + * + * Return: true always. + */ +static bool mpi3mr_print_scmd(struct request *rq, void *data) +{ + struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct scmd_priv *priv = NULL; + + if (scmd) { + priv = scsi_cmd_priv(scmd); + if (!priv->in_lld_scope) + goto out; + + ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", + __func__, priv->host_tag, priv->req_q_idx + 1); + scsi_print_command(scmd); + } + +out: + return(true); +} + +/** + * mpi3mr_flush_scmd - Flush individual SCSI command + * @rq: Block request + * @data: Adapter instance reference + * + * Return the SCSI command to the upper layers if it is in LLD + * scope. + * + * Return: true always. + */ + +static bool mpi3mr_flush_scmd(struct request *rq, void *data) +{ + struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct scmd_priv *priv = NULL; + + if (scmd) { + priv = scsi_cmd_priv(scmd); + if (!priv->in_lld_scope) + goto out; + + if (priv->meta_sg_valid) + dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), + scsi_prot_sg_count(scmd), scmd->sc_data_direction); + mpi3mr_clear_scmd_priv(mrioc, scmd); + scsi_dma_unmap(scmd); + scmd->result = DID_RESET << 16; + scsi_print_command(scmd); + scsi_done(scmd); + mrioc->flush_io_count++; + } + +out: + return(true); +} + +/** + * mpi3mr_count_dev_pending - Count commands pending for a lun + * @rq: Block request + * @data: SCSI device reference + * + * This is an iterator function called for each SCSI command in + * a host and if the command is pending in the LLD for the + * specific device(lun) then device specific pending I/O counter + * is updated in the device structure. + * + * Return: true always. + */ + +static bool mpi3mr_count_dev_pending(struct request *rq, void *data) +{ + struct scsi_device *sdev = (struct scsi_device *)data; + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct scmd_priv *priv; + + if (scmd) { + priv = scsi_cmd_priv(scmd); + if (!priv->in_lld_scope) + goto out; + if (scmd->device == sdev) + sdev_priv_data->pend_count++; + } + +out: + return true; +} + +/** + * mpi3mr_count_tgt_pending - Count commands pending for target + * @rq: Block request + * @data: SCSI target reference + * + * This is an iterator function called for each SCSI command in + * a host and if the command is pending in the LLD for the + * specific target then target specific pending I/O counter is + * updated in the target structure. + * + * Return: true always. + */ + +static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) +{ + struct scsi_target *starget = (struct scsi_target *)data; + struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct scmd_priv *priv; + + if (scmd) { + priv = scsi_cmd_priv(scmd); + if (!priv->in_lld_scope) + goto out; + if (scmd->device && (scsi_target(scmd->device) == starget)) + stgt_priv_data->pend_count++; + } + +out: + return true; +} + +/** + * mpi3mr_flush_host_io - Flush host I/Os + * @mrioc: Adapter instance reference + * + * Flush all of the pending I/Os by calling + * blk_mq_tagset_busy_iter() for each possible tag. This is + * executed post controller reset + * + * Return: Nothing. + */ +void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) +{ + struct Scsi_Host *shost = mrioc->shost; + + mrioc->flush_io_count = 0; + ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); + blk_mq_tagset_busy_iter(&shost->tag_set, + mpi3mr_flush_scmd, (void *)mrioc); + ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, + mrioc->flush_io_count); +} + +/** + * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds + * @mrioc: Adapter instance reference + * + * This function waits for currently running IO poll threads to + * exit and then flushes all host I/Os and any internal pending + * cmds. This is executed after controller is marked as + * unrecoverable. + * + * Return: Nothing. + */ +void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) +{ + struct Scsi_Host *shost = mrioc->shost; + int i; + + if (!mrioc->unrecoverable) + return; + + if (mrioc->op_reply_qinfo) { + for (i = 0; i < mrioc->num_queues; i++) { + while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) + udelay(500); + atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); + } + } + mrioc->flush_io_count = 0; + blk_mq_tagset_busy_iter(&shost->tag_set, + mpi3mr_flush_scmd, (void *)mrioc); + mpi3mr_flush_delayed_cmd_lists(mrioc); + mpi3mr_flush_drv_cmds(mrioc); +} + +/** + * mpi3mr_alloc_tgtdev - target device allocator + * + * Allocate target device instance and initialize the reference + * count + * + * Return: target device instance. + */ +static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) +{ + struct mpi3mr_tgt_dev *tgtdev; + + tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); + if (!tgtdev) + return NULL; + kref_init(&tgtdev->ref_count); + return tgtdev; +} + +/** + * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list + * @mrioc: Adapter instance reference + * @tgtdev: Target device + * + * Add the target device to the target device list + * + * Return: Nothing. + */ +static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev) +{ + unsigned long flags; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + mpi3mr_tgtdev_get(tgtdev); + INIT_LIST_HEAD(&tgtdev->list); + list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); + tgtdev->state = MPI3MR_DEV_CREATED; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); +} + +/** + * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list + * @mrioc: Adapter instance reference + * @tgtdev: Target device + * @must_delete: Must delete the target device from the list irrespective + * of the device state. + * + * Remove the target device from the target device list + * + * Return: Nothing. + */ +static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev, bool must_delete) +{ + unsigned long flags; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { + if (!list_empty(&tgtdev->list)) { + list_del_init(&tgtdev->list); + tgtdev->state = MPI3MR_DEV_DELETED; + mpi3mr_tgtdev_put(tgtdev); + } + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); +} + +/** + * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle + * @mrioc: Adapter instance reference + * @handle: Device handle + * + * Accessor to retrieve target device from the device handle. + * Non Lock version + * + * Return: Target device reference. + */ +static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( + struct mpi3mr_ioc *mrioc, u16 handle) +{ + struct mpi3mr_tgt_dev *tgtdev; + + assert_spin_locked(&mrioc->tgtdev_lock); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) + if (tgtdev->dev_handle == handle) + goto found_tgtdev; + return NULL; + +found_tgtdev: + mpi3mr_tgtdev_get(tgtdev); + return tgtdev; +} + +/** + * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle + * @mrioc: Adapter instance reference + * @handle: Device handle + * + * Accessor to retrieve target device from the device handle. + * Lock version + * + * Return: Target device reference. + */ +struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( + struct mpi3mr_ioc *mrioc, u16 handle) +{ + struct mpi3mr_tgt_dev *tgtdev; + unsigned long flags; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + return tgtdev; +} + +/** + * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID + * @mrioc: Adapter instance reference + * @persist_id: Persistent ID + * + * Accessor to retrieve target device from the Persistent ID. + * Non Lock version + * + * Return: Target device reference. + */ +static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( + struct mpi3mr_ioc *mrioc, u16 persist_id) +{ + struct mpi3mr_tgt_dev *tgtdev; + + assert_spin_locked(&mrioc->tgtdev_lock); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) + if (tgtdev->perst_id == persist_id) + goto found_tgtdev; + return NULL; + +found_tgtdev: + mpi3mr_tgtdev_get(tgtdev); + return tgtdev; +} + +/** + * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID + * @mrioc: Adapter instance reference + * @persist_id: Persistent ID + * + * Accessor to retrieve target device from the Persistent ID. + * Lock version + * + * Return: Target device reference. + */ +static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( + struct mpi3mr_ioc *mrioc, u16 persist_id) +{ + struct mpi3mr_tgt_dev *tgtdev; + unsigned long flags; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + return tgtdev; +} + +/** + * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private + * @mrioc: Adapter instance reference + * @tgt_priv: Target private data + * + * Accessor to return target device from the target private + * data. Non Lock version + * + * Return: Target device reference. + */ +static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( + struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) +{ + struct mpi3mr_tgt_dev *tgtdev; + + assert_spin_locked(&mrioc->tgtdev_lock); + tgtdev = tgt_priv->tgt_dev; + if (tgtdev) + mpi3mr_tgtdev_get(tgtdev); + return tgtdev; +} + +/** + * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs + * @mrioc: Adapter instance reference + * @tg: Throttle group information pointer + * @divert_value: 1 or 0 + * + * Accessor to set io_divert flag for each device associated + * with the given throttle group with the given value. + * + * Return: None. + */ +static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, + struct mpi3mr_throttle_group_info *tg, u8 divert_value) +{ + unsigned long flags; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_stgt_priv_data *tgt_priv; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if (tgtdev->starget && tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + if (tgt_priv->throttle_group == tg) + tgt_priv->io_divert = divert_value; + } + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); +} + +/** + * mpi3mr_print_device_event_notice - print notice related to post processing of + * device event after controller reset. + * + * @mrioc: Adapter instance reference + * @device_add: true for device add event and false for device removal event + * + * Return: None. + */ +void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, + bool device_add) +{ + ioc_notice(mrioc, "Device %s was in progress before the reset and\n", + (device_add ? "addition" : "removal")); + ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); + ioc_notice(mrioc, "are matched with attached devices for correctness\n"); +} + +/** + * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers + * @mrioc: Adapter instance reference + * @tgtdev: Target device structure + * + * Checks whether the device is exposed to upper layers and if it + * is then remove the device from upper layers by calling + * scsi_remove_target(). + * + * Return: 0 on success, non zero on failure. + */ +void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev) +{ + struct mpi3mr_stgt_priv_data *tgt_priv; + + ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", + __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); + if (tgtdev->starget && tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + atomic_set(&tgt_priv->block_io, 0); + tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + } + + if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != + MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { + if (tgtdev->starget) { + if (mrioc->current_event) + mrioc->current_event->pending_at_sml = 1; + scsi_remove_target(&tgtdev->starget->dev); + tgtdev->host_exposed = 0; + if (mrioc->current_event) { + mrioc->current_event->pending_at_sml = 0; + if (mrioc->current_event->discard) { + mpi3mr_print_device_event_notice(mrioc, + false); + return; + } + } + } + } else + mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); + + ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", + __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); +} + +/** + * mpi3mr_report_tgtdev_to_host - Expose device to upper layers + * @mrioc: Adapter instance reference + * @perst_id: Persistent ID of the device + * + * Checks whether the device can be exposed to upper layers and + * if it is not then expose the device to upper layers by + * calling scsi_scan_target(). + * + * Return: 0 on success, non zero on failure. + */ +static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, + u16 perst_id) +{ + int retval = 0; + struct mpi3mr_tgt_dev *tgtdev; + + if (mrioc->reset_in_progress) + return -1; + + tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); + if (!tgtdev) { + retval = -1; + goto out; + } + if (tgtdev->is_hidden || tgtdev->host_exposed) { + retval = -1; + goto out; + } + if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != + MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ + tgtdev->host_exposed = 1; + if (mrioc->current_event) + mrioc->current_event->pending_at_sml = 1; + scsi_scan_target(&mrioc->shost->shost_gendev, + mrioc->scsi_device_channel, tgtdev->perst_id, + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); + if (!tgtdev->starget) + tgtdev->host_exposed = 0; + if (mrioc->current_event) { + mrioc->current_event->pending_at_sml = 0; + if (mrioc->current_event->discard) { + mpi3mr_print_device_event_notice(mrioc, true); + goto out; + } + } + } else + mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); +out: + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); + + return retval; +} + +/** + * mpi3mr_change_queue_depth- Change QD callback handler + * @sdev: SCSI device reference + * @q_depth: Queue depth + * + * Validate and limit QD and call scsi_change_queue_depth. + * + * Return: return value of scsi_change_queue_depth + */ +static int mpi3mr_change_queue_depth(struct scsi_device *sdev, + int q_depth) +{ + struct scsi_target *starget = scsi_target(sdev); + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + int retval = 0; + + if (!sdev->tagged_supported) + q_depth = 1; + if (q_depth > shost->can_queue) + q_depth = shost->can_queue; + else if (!q_depth) + q_depth = MPI3MR_DEFAULT_SDEV_QD; + retval = scsi_change_queue_depth(sdev, q_depth); + sdev->max_queue_depth = sdev->queue_depth; + + return retval; +} + +/** + * mpi3mr_update_sdev - Update SCSI device information + * @sdev: SCSI device reference + * @data: target device reference + * + * This is an iterator function called for each SCSI device in a + * target to update the target specific information into each + * SCSI device. + * + * Return: Nothing. + */ +static void +mpi3mr_update_sdev(struct scsi_device *sdev, void *data) +{ + struct mpi3mr_tgt_dev *tgtdev; + + tgtdev = (struct mpi3mr_tgt_dev *)data; + if (!tgtdev) + return; + + mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); + switch (tgtdev->dev_type) { + case MPI3_DEVICE_DEVFORM_PCIE: + /*The block layer hw sector size = 512*/ + if ((tgtdev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { + blk_queue_max_hw_sectors(sdev->request_queue, + tgtdev->dev_spec.pcie_inf.mdts / 512); + if (tgtdev->dev_spec.pcie_inf.pgsz == 0) + blk_queue_virt_boundary(sdev->request_queue, + ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); + else + blk_queue_virt_boundary(sdev->request_queue, + ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); + } + break; + default: + break; + } +} + +/** + * mpi3mr_rfresh_tgtdevs - Refresh target device exposure + * @mrioc: Adapter instance reference + * + * This is executed post controller reset to identify any + * missing devices during reset and remove from the upper layers + * or expose any newly detected device to the upper layers. + * + * Return: Nothing. + */ + +void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; + struct mpi3mr_stgt_priv_data *tgt_priv; + + dprint_reset(mrioc, "refresh target devices: check for removals\n"); + list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, + list) { + if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && + tgtdev->is_hidden && + tgtdev->host_exposed && tgtdev->starget && + tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + tgt_priv->dev_removed = 1; + atomic_set(&tgt_priv->block_io, 0); + } + } + + list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, + list) { + if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { + dprint_reset(mrioc, "removing target device with perst_id(%d)\n", + tgtdev->perst_id); + if (tgtdev->host_exposed) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); + mpi3mr_tgtdev_put(tgtdev); + } else if (tgtdev->is_hidden & tgtdev->host_exposed) { + dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", + tgtdev->perst_id); + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + } + } + + tgtdev = NULL; + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && + !tgtdev->is_hidden) { + if (!tgtdev->host_exposed) + mpi3mr_report_tgtdev_to_host(mrioc, + tgtdev->perst_id); + else if (tgtdev->starget) + starget_for_each_device(tgtdev->starget, + (void *)tgtdev, mpi3mr_update_sdev); + } + } +} + +/** + * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf + * @mrioc: Adapter instance reference + * @tgtdev: Target device internal structure + * @dev_pg0: New device page0 + * @is_added: Flag to indicate the device is just added + * + * Update the information from the device page0 into the driver + * cached target device structure. + * + * Return: Nothing. + */ +static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, + bool is_added) +{ + u16 flags = 0; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; + struct mpi3mr_enclosure_node *enclosure_dev = NULL; + u8 prot_mask = 0; + + tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); + tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); + tgtdev->dev_type = dev_pg0->device_form; + tgtdev->io_unit_port = dev_pg0->io_unit_port; + tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); + tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); + tgtdev->slot = le16_to_cpu(dev_pg0->slot); + tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); + tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); + tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); + + if (tgtdev->encl_handle) + enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, + tgtdev->encl_handle); + if (enclosure_dev) + tgtdev->enclosure_logical_id = le64_to_cpu( + enclosure_dev->pg0.enclosure_logical_id); + + flags = tgtdev->devpg0_flag; + + tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); + + if (is_added == true) + tgtdev->io_throttle_enabled = + (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; + + switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { + case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: + tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; + break; + case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: + tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; + break; + case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: + default: + tgtdev->wslen = 0; + break; + } + + if (tgtdev->starget && tgtdev->starget->hostdata) { + scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; + scsi_tgt_priv_data->perst_id = tgtdev->perst_id; + scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; + scsi_tgt_priv_data->dev_type = tgtdev->dev_type; + scsi_tgt_priv_data->io_throttle_enabled = + tgtdev->io_throttle_enabled; + if (is_added == true) + atomic_set(&scsi_tgt_priv_data->block_io, 0); + scsi_tgt_priv_data->wslen = tgtdev->wslen; + } + + switch (dev_pg0->access_status) { + case MPI3_DEVICE0_ASTATUS_NO_ERRORS: + case MPI3_DEVICE0_ASTATUS_PREPARE: + case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: + case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: + break; + default: + tgtdev->is_hidden = 1; + break; + } + + switch (tgtdev->dev_type) { + case MPI3_DEVICE_DEVFORM_SAS_SATA: + { + struct mpi3_device0_sas_sata_format *sasinf = + &dev_pg0->device_specific.sas_sata_format; + u16 dev_info = le16_to_cpu(sasinf->device_info); + + tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; + tgtdev->dev_spec.sas_sata_inf.sas_address = + le64_to_cpu(sasinf->sas_address); + tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; + tgtdev->dev_spec.sas_sata_inf.attached_phy_id = + sasinf->attached_phy_identifier; + if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != + MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) + tgtdev->is_hidden = 1; + else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | + MPI3_SAS_DEVICE_INFO_SSP_TARGET))) + tgtdev->is_hidden = 1; + + if (((tgtdev->devpg0_flag & + MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) + && (tgtdev->devpg0_flag & + MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || + (tgtdev->parent_handle == 0xFFFF)) + tgtdev->non_stl = 1; + if (tgtdev->dev_spec.sas_sata_inf.hba_port) + tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = + dev_pg0->io_unit_port; + break; + } + case MPI3_DEVICE_DEVFORM_PCIE: + { + struct mpi3_device0_pcie_format *pcieinf = + &dev_pg0->device_specific.pcie_format; + u16 dev_info = le16_to_cpu(pcieinf->device_info); + + tgtdev->dev_spec.pcie_inf.dev_info = dev_info; + tgtdev->dev_spec.pcie_inf.capb = + le32_to_cpu(pcieinf->capabilities); + tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; + /* 2^12 = 4096 */ + tgtdev->dev_spec.pcie_inf.pgsz = 12; + if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { + tgtdev->dev_spec.pcie_inf.mdts = + le32_to_cpu(pcieinf->maximum_data_transfer_size); + tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; + tgtdev->dev_spec.pcie_inf.reset_to = + max_t(u8, pcieinf->controller_reset_to, + MPI3MR_INTADMCMD_TIMEOUT); + tgtdev->dev_spec.pcie_inf.abort_to = + max_t(u8, pcieinf->nvme_abort_to, + MPI3MR_INTADMCMD_TIMEOUT); + } + if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) + tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); + if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && + ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) + tgtdev->is_hidden = 1; + tgtdev->non_stl = 1; + if (!mrioc->shost) + break; + prot_mask = scsi_host_get_prot(mrioc->shost); + if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); + ioc_info(mrioc, + "%s : Disabling DIX0 prot capability\n", __func__); + ioc_info(mrioc, + "because HBA does not support DIX0 operation on NVME drives\n"); + } + break; + } + case MPI3_DEVICE_DEVFORM_VD: + { + struct mpi3_device0_vd_format *vdinf = + &dev_pg0->device_specific.vd_format; + struct mpi3mr_throttle_group_info *tg = NULL; + u16 vdinf_io_throttle_group = + le16_to_cpu(vdinf->io_throttle_group); + + tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; + if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) + tgtdev->is_hidden = 1; + tgtdev->non_stl = 1; + tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; + tgtdev->dev_spec.vd_inf.tg_high = + le16_to_cpu(vdinf->io_throttle_group_high) * 2048; + tgtdev->dev_spec.vd_inf.tg_low = + le16_to_cpu(vdinf->io_throttle_group_low) * 2048; + if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { + tg = mrioc->throttle_groups + vdinf_io_throttle_group; + tg->id = vdinf_io_throttle_group; + tg->high = tgtdev->dev_spec.vd_inf.tg_high; + tg->low = tgtdev->dev_spec.vd_inf.tg_low; + tg->qd_reduction = + tgtdev->dev_spec.vd_inf.tg_qd_reduction; + if (is_added == true) + tg->fw_qd = tgtdev->q_depth; + tg->modified_qd = tgtdev->q_depth; + } + tgtdev->dev_spec.vd_inf.tg = tg; + if (scsi_tgt_priv_data) + scsi_tgt_priv_data->throttle_group = tg; + break; + } + default: + break; + } +} + +/** + * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf + * @mrioc: Adapter instance reference + * @fwevt: Firmware event information. + * + * Process Device status Change event and based on device's new + * information, either expose the device to the upper layers, or + * remove the device from upper layers. + * + * Return: Nothing. + */ +static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + u16 dev_handle = 0; + u8 uhide = 0, delete = 0, cleanup = 0; + struct mpi3mr_tgt_dev *tgtdev = NULL; + struct mpi3_event_data_device_status_change *evtdata = + (struct mpi3_event_data_device_status_change *)fwevt->event_data; + + dev_handle = le16_to_cpu(evtdata->dev_handle); + ioc_info(mrioc, + "%s :device status change: handle(0x%04x): reason code(0x%x)\n", + __func__, dev_handle, evtdata->reason_code); + switch (evtdata->reason_code) { + case MPI3_EVENT_DEV_STAT_RC_HIDDEN: + delete = 1; + break; + case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: + uhide = 1; + break; + case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: + delete = 1; + cleanup = 1; + break; + default: + ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, + evtdata->reason_code); + break; + } + + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); + if (!tgtdev) + goto out; + if (uhide) { + tgtdev->is_hidden = 0; + if (!tgtdev->host_exposed) + mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); + } + + if (delete) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + + if (cleanup) { + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); + mpi3mr_tgtdev_put(tgtdev); + } + +out: + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); +} + +/** + * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf + * @mrioc: Adapter instance reference + * @dev_pg0: New device page0 + * + * Process Device Info Change event and based on device's new + * information, either expose the device to the upper layers, or + * remove the device from upper layers or update the details of + * the device. + * + * Return: Nothing. + */ +static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3_device_page0 *dev_pg0) +{ + struct mpi3mr_tgt_dev *tgtdev = NULL; + u16 dev_handle = 0, perst_id = 0; + + perst_id = le16_to_cpu(dev_pg0->persistent_id); + dev_handle = le16_to_cpu(dev_pg0->dev_handle); + ioc_info(mrioc, + "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", + __func__, dev_handle, perst_id); + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); + if (!tgtdev) + goto out; + mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); + if (!tgtdev->is_hidden && !tgtdev->host_exposed) + mpi3mr_report_tgtdev_to_host(mrioc, perst_id); + if (tgtdev->is_hidden && tgtdev->host_exposed) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) + starget_for_each_device(tgtdev->starget, (void *)tgtdev, + mpi3mr_update_sdev); +out: + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); +} + +/** + * mpi3mr_free_enclosure_list - release enclosures + * @mrioc: Adapter instance reference + * + * Free memory allocated during encloure add. + * + * Return nothing. + */ +void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; + + list_for_each_entry_safe(enclosure_dev, + enclosure_dev_next, &mrioc->enclosure_list, list) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } +} + +/** + * mpi3mr_enclosure_find_by_handle - enclosure search by handle + * @mrioc: Adapter instance reference + * @handle: Firmware device handle of the enclosure + * + * This searches for enclosure device based on handle, then returns the + * enclosure object. + * + * Return: Enclosure object reference or NULL + */ +struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( + struct mpi3mr_ioc *mrioc, u16 handle) +{ + struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; + + list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { + if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) + continue; + r = enclosure_dev; + goto out; + } +out: + return r; +} + +/** + * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event + * @mrioc: Adapter instance reference + * @encl_pg0: Enclosure page 0. + * @is_added: Added event or not + * + * Return nothing. + */ +static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, + struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) +{ + char *reason_str = NULL; + + if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) + return; + + if (is_added) + reason_str = "enclosure added"; + else + reason_str = "enclosure dev status changed"; + + ioc_info(mrioc, + "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", + reason_str, le16_to_cpu(encl_pg0->enclosure_handle), + (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); + ioc_info(mrioc, + "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", + le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, + le16_to_cpu(encl_pg0->flags), + ((le16_to_cpu(encl_pg0->flags) & + MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); +} + +/** + * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Prints information about the Enclosure device status or + * Enclosure add events if logging is enabled and add or remove + * the enclosure from the controller's internal list of + * enclosures. + * + * Return: Nothing. + */ +static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + struct mpi3mr_enclosure_node *enclosure_dev = NULL; + struct mpi3_enclosure_page0 *encl_pg0; + u16 encl_handle; + u8 added, present; + + encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; + added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; + mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); + + + encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); + present = ((le16_to_cpu(encl_pg0->flags) & + MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); + + if (encl_handle) + enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, + encl_handle); + if (!enclosure_dev && present) { + enclosure_dev = + kzalloc(sizeof(struct mpi3mr_enclosure_node), + GFP_KERNEL); + if (!enclosure_dev) + return; + list_add_tail(&enclosure_dev->list, + &mrioc->enclosure_list); + } + if (enclosure_dev) { + if (!present) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } else + memcpy(&enclosure_dev->pg0, encl_pg0, + sizeof(enclosure_dev->pg0)); + + } +} + +/** + * mpi3mr_sastopochg_evt_debug - SASTopoChange details + * @mrioc: Adapter instance reference + * @event_data: SAS topology change list event data + * + * Prints information about the SAS topology change event. + * + * Return: Nothing. + */ +static void +mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, + struct mpi3_event_data_sas_topology_change_list *event_data) +{ + int i; + u16 handle; + u8 reason_code, phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->exp_status) { + case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: + status_str = "responding"; + break; + case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: + status_str = "direct attached"; + break; + default: + status_str = "unknown status"; + break; + } + ioc_info(mrioc, "%s :sas topology change: (%s)\n", + __func__, status_str); + ioc_info(mrioc, + "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", + __func__, le16_to_cpu(event_data->expander_dev_handle), + event_data->io_unit_port, + le16_to_cpu(event_data->enclosure_handle), + event_data->start_phy_num, event_data->num_entries); + for (i = 0; i < event_data->num_entries; i++) { + handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); + if (!handle) + continue; + phy_number = event_data->start_phy_num + i; + reason_code = event_data->phy_entry[i].status & + MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; + switch (reason_code) { + case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: + status_str = "link status change"; + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: + status_str = "link status no change"; + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->phy_entry[i].link_rate >> 4; + prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; + ioc_info(mrioc, + "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", + __func__, phy_number, handle, status_str, link_rate, + prev_link_rate); + } +} + +/** + * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Prints information about the SAS topology change event and + * for "not responding" event code, removes the device from the + * upper layers. + * + * Return: Nothing. + */ +static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + struct mpi3_event_data_sas_topology_change_list *event_data = + (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; + int i; + u16 handle; + u8 reason_code; + u64 exp_sas_address = 0, parent_sas_address = 0; + struct mpi3mr_hba_port *hba_port = NULL; + struct mpi3mr_tgt_dev *tgtdev = NULL; + struct mpi3mr_sas_node *sas_expander = NULL; + unsigned long flags; + u8 link_rate, prev_link_rate, parent_phy_number; + + mpi3mr_sastopochg_evt_debug(mrioc, event_data); + if (mrioc->sas_transport_enabled) { + hba_port = mpi3mr_get_hba_port_by_id(mrioc, + event_data->io_unit_port); + if (le16_to_cpu(event_data->expander_dev_handle)) { + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + sas_expander = __mpi3mr_expander_find_by_handle(mrioc, + le16_to_cpu(event_data->expander_dev_handle)); + if (sas_expander) { + exp_sas_address = sas_expander->sas_address; + hba_port = sas_expander->hba_port; + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + parent_sas_address = exp_sas_address; + } else + parent_sas_address = mrioc->sas_hba.sas_address; + } + + for (i = 0; i < event_data->num_entries; i++) { + if (fwevt->discard) + return; + handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); + if (!handle) + continue; + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); + if (!tgtdev) + continue; + + reason_code = event_data->phy_entry[i].status & + MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; + + switch (reason_code) { + case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: + if (tgtdev->host_exposed) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); + mpi3mr_tgtdev_put(tgtdev); + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: + case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: + case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: + { + if (!mrioc->sas_transport_enabled || tgtdev->non_stl + || tgtdev->is_hidden) + break; + link_rate = event_data->phy_entry[i].link_rate >> 4; + prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; + if (link_rate == prev_link_rate) + break; + if (!parent_sas_address) + break; + parent_phy_number = event_data->start_phy_num + i; + mpi3mr_update_links(mrioc, parent_sas_address, handle, + parent_phy_number, link_rate, hba_port); + break; + } + default: + break; + } + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); + } + + if (mrioc->sas_transport_enabled && (event_data->exp_status == + MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { + if (sas_expander) + mpi3mr_expander_remove(mrioc, exp_sas_address, + hba_port); + } +} + +/** + * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details + * @mrioc: Adapter instance reference + * @event_data: PCIe topology change list event data + * + * Prints information about the PCIe topology change event. + * + * Return: Nothing. + */ +static void +mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, + struct mpi3_event_data_pcie_topology_change_list *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 port_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->switch_status) { + case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: + status_str = "responding"; + break; + case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: + status_str = "direct attached"; + break; + default: + status_str = "unknown status"; + break; + } + ioc_info(mrioc, "%s :pcie topology change: (%s)\n", + __func__, status_str); + ioc_info(mrioc, + "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", + __func__, le16_to_cpu(event_data->switch_dev_handle), + le16_to_cpu(event_data->enclosure_handle), + event_data->start_port_num, event_data->num_entries); + for (i = 0; i < event_data->num_entries; i++) { + handle = + le16_to_cpu(event_data->port_entry[i].attached_dev_handle); + if (!handle) + continue; + port_number = event_data->start_port_num + i; + reason_code = event_data->port_entry[i].port_status; + switch (reason_code) { + case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: + status_str = "link status change"; + break; + case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: + status_str = "link status no change"; + break; + case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->port_entry[i].current_port_info & + MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; + prev_link_rate = event_data->port_entry[i].previous_port_info & + MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; + ioc_info(mrioc, + "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", + __func__, port_number, handle, status_str, link_rate, + prev_link_rate); + } +} + +/** + * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Prints information about the PCIe topology change event and + * for "not responding" event code, removes the device from the + * upper layers. + * + * Return: Nothing. + */ +static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + struct mpi3_event_data_pcie_topology_change_list *event_data = + (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; + int i; + u16 handle; + u8 reason_code; + struct mpi3mr_tgt_dev *tgtdev = NULL; + + mpi3mr_pcietopochg_evt_debug(mrioc, event_data); + + for (i = 0; i < event_data->num_entries; i++) { + if (fwevt->discard) + return; + handle = + le16_to_cpu(event_data->port_entry[i].attached_dev_handle); + if (!handle) + continue; + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); + if (!tgtdev) + continue; + + reason_code = event_data->port_entry[i].port_status; + + switch (reason_code) { + case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + if (tgtdev->host_exposed) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); + mpi3mr_tgtdev_put(tgtdev); + break; + default: + break; + } + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); + } +} + +/** + * mpi3mr_logdata_evt_bh - Log data event bottomhalf + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Extracts the event data and calls application interfacing + * function to process the event further. + * + * Return: Nothing. + */ +static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + mpi3mr_app_save_logdata(mrioc, fwevt->event_data, + fwevt->event_data_size); +} + +/** + * mpi3mr_update_sdev_qd - Update SCSI device queue depath + * @sdev: SCSI device reference + * @data: Queue depth reference + * + * This is an iterator function called for each SCSI device in a + * target to update the QD of each SCSI device. + * + * Return: Nothing. + */ +static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) +{ + u16 *q_depth = (u16 *)data; + + scsi_change_queue_depth(sdev, (int)*q_depth); + sdev->max_queue_depth = sdev->queue_depth; +} + +/** + * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs + * @mrioc: Adapter instance reference + * @tg: Throttle group information pointer + * + * Accessor to reduce QD for each device associated with the + * given throttle group. + * + * Return: None. + */ +static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, + struct mpi3mr_throttle_group_info *tg) +{ + unsigned long flags; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_stgt_priv_data *tgt_priv; + + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if (tgtdev->starget && tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + if (tgt_priv->throttle_group == tg) { + dprint_event_bh(mrioc, + "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", + tgt_priv->perst_id, tgtdev->q_depth, + tg->modified_qd); + starget_for_each_device(tgtdev->starget, + (void *)&tg->modified_qd, + mpi3mr_update_sdev_qd); + } + } + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); +} + +/** + * mpi3mr_fwevt_bh - Firmware event bottomhalf handler + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Identifies the firmware event and calls corresponding bottomg + * half handler and sends event acknowledgment if required. + * + * Return: Nothing. + */ +static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + struct mpi3_device_page0 *dev_pg0 = NULL; + u16 perst_id, handle, dev_info; + struct mpi3_device0_sas_sata_format *sasinf = NULL; + + mpi3mr_fwevt_del_from_list(mrioc, fwevt); + mrioc->current_event = fwevt; + + if (mrioc->stop_drv_processing) + goto out; + + if (mrioc->unrecoverable) { + dprint_event_bh(mrioc, + "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", + fwevt->event_id); + goto out; + } + + if (!fwevt->process_evt) + goto evt_ack; + + switch (fwevt->event_id) { + case MPI3_EVENT_DEVICE_ADDED: + { + dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; + perst_id = le16_to_cpu(dev_pg0->persistent_id); + handle = le16_to_cpu(dev_pg0->dev_handle); + if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) + mpi3mr_report_tgtdev_to_host(mrioc, perst_id); + else if (mrioc->sas_transport_enabled && + (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { + sasinf = &dev_pg0->device_specific.sas_sata_format; + dev_info = le16_to_cpu(sasinf->device_info); + if (!mrioc->sas_hba.num_phys) + mpi3mr_sas_host_add(mrioc); + else + mpi3mr_sas_host_refresh(mrioc); + + if (mpi3mr_is_expander_device(dev_info)) + mpi3mr_expander_add(mrioc, handle); + } + break; + } + case MPI3_EVENT_DEVICE_INFO_CHANGED: + { + dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; + perst_id = le16_to_cpu(dev_pg0->persistent_id); + if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) + mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); + break; + } + case MPI3_EVENT_DEVICE_STATUS_CHANGE: + { + mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); + break; + } + case MPI3_EVENT_ENCL_DEVICE_ADDED: + case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: + { + mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); + break; + } + + case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + { + mpi3mr_sastopochg_evt_bh(mrioc, fwevt); + break; + } + case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + { + mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); + break; + } + case MPI3_EVENT_LOG_DATA: + { + mpi3mr_logdata_evt_bh(mrioc, fwevt); + break; + } + case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: + { + struct mpi3mr_throttle_group_info *tg; + + tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; + dprint_event_bh(mrioc, + "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", + tg->id, tg->need_qd_reduction); + if (tg->need_qd_reduction) { + mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); + tg->need_qd_reduction = 0; + } + break; + } + case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: + { + while (mrioc->device_refresh_on) + msleep(500); + + dprint_event_bh(mrioc, + "scan for non responding and newly added devices after soft reset started\n"); + if (mrioc->sas_transport_enabled) { + mpi3mr_refresh_sas_ports(mrioc); + mpi3mr_refresh_expanders(mrioc); + } + mpi3mr_rfresh_tgtdevs(mrioc); + ioc_info(mrioc, + "scan for non responding and newly added devices after soft reset completed\n"); + break; + } + default: + break; + } + +evt_ack: + if (fwevt->send_ack) + mpi3mr_process_event_ack(mrioc, fwevt->event_id, + fwevt->evt_ctx); +out: + /* Put fwevt reference count to neutralize kref_init increment */ + mpi3mr_fwevt_put(fwevt); + mrioc->current_event = NULL; +} + +/** + * mpi3mr_fwevt_worker - Firmware event worker + * @work: Work struct containing firmware event + * + * Extracts the firmware event and calls mpi3mr_fwevt_bh. + * + * Return: Nothing. + */ +static void mpi3mr_fwevt_worker(struct work_struct *work) +{ + struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, + work); + mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); + /* + * Put fwevt reference count after + * dequeuing it from worker queue + */ + mpi3mr_fwevt_put(fwevt); +} + +/** + * mpi3mr_create_tgtdev - Create and add a target device + * @mrioc: Adapter instance reference + * @dev_pg0: Device Page 0 data + * + * If the device specified by the device page 0 data is not + * present in the driver's internal list, allocate the memory + * for the device, populate the data and add to the list, else + * update the device data. The key is persistent ID. + * + * Return: 0 on success, -ENOMEM on memory allocation failure + */ +static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, + struct mpi3_device_page0 *dev_pg0) +{ + int retval = 0; + struct mpi3mr_tgt_dev *tgtdev = NULL; + u16 perst_id = 0; + unsigned long flags; + + perst_id = le16_to_cpu(dev_pg0->persistent_id); + if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) + return retval; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); + if (tgtdev) + tgtdev->state = MPI3MR_DEV_CREATED; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + if (tgtdev) { + mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); + mpi3mr_tgtdev_put(tgtdev); + } else { + tgtdev = mpi3mr_alloc_tgtdev(); + if (!tgtdev) + return -ENOMEM; + mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); + mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); + } + + return retval; +} + +/** + * mpi3mr_flush_delayed_cmd_lists - Flush pending commands + * @mrioc: Adapter instance reference + * + * Flush pending commands in the delayed lists due to a + * controller reset or driver removal as a cleanup. + * + * Return: Nothing + */ +void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) +{ + struct delayed_dev_rmhs_node *_rmhs_node; + struct delayed_evt_ack_node *_evtack_node; + + dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); + while (!list_empty(&mrioc->delayed_rmhs_list)) { + _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, + struct delayed_dev_rmhs_node, list); + list_del(&_rmhs_node->list); + kfree(_rmhs_node); + } + dprint_reset(mrioc, "flushing delayed event ack commands\n"); + while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { + _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, + struct delayed_evt_ack_node, list); + list_del(&_evtack_node->list); + kfree(_evtack_node); + } +} + +/** + * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * Issues a target reset TM to the firmware from the device + * removal TM pend list or retry the removal handshake sequence + * based on the IOU control request IOC status. + * + * Return: Nothing + */ +static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; + struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; + + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto clear_drv_cmd; + + ioc_info(mrioc, + "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", + __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, + drv_cmd->ioc_loginfo); + if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { + if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { + drv_cmd->retry_count++; + ioc_info(mrioc, + "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", + __func__, drv_cmd->dev_handle, + drv_cmd->retry_count); + mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, + drv_cmd, drv_cmd->iou_rc); + return; + } + ioc_err(mrioc, + "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", + __func__, drv_cmd->dev_handle); + } else { + ioc_info(mrioc, + "%s :dev removal handshake completed successfully: handle(0x%04x)\n", + __func__, drv_cmd->dev_handle); + clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); + } + + if (!list_empty(&mrioc->delayed_rmhs_list)) { + delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, + struct delayed_dev_rmhs_node, list); + drv_cmd->dev_handle = delayed_dev_rmhs->handle; + drv_cmd->retry_count = 0; + drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; + ioc_info(mrioc, + "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", + __func__, drv_cmd->dev_handle); + mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, + drv_cmd->iou_rc); + list_del(&delayed_dev_rmhs->list); + kfree(delayed_dev_rmhs); + return; + } + +clear_drv_cmd: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + drv_cmd->retry_count = 0; + drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + clear_bit(cmd_idx, mrioc->devrem_bitmap); +} + +/** + * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * Issues a target reset TM to the firmware from the device + * removal TM pend list or issue IO unit control request as + * part of device removal or hidden acknowledgment handshake. + * + * Return: Nothing + */ +static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + struct mpi3_iounit_control_request iou_ctrl; + u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; + struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; + int retval; + + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto clear_drv_cmd; + + if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) + tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; + + if (tm_reply) + pr_info(IOCNAME + "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", + mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, + drv_cmd->ioc_loginfo, + le32_to_cpu(tm_reply->termination_count)); + + pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", + mrioc->name, drv_cmd->dev_handle, cmd_idx); + + memset(&iou_ctrl, 0, sizeof(iou_ctrl)); + + drv_cmd->state = MPI3MR_CMD_PENDING; + drv_cmd->is_waiting = 0; + drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; + iou_ctrl.operation = drv_cmd->iou_rc; + iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); + iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); + iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; + + retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), + 1); + if (retval) { + pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", + mrioc->name); + goto clear_drv_cmd; + } + + return; +clear_drv_cmd: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + drv_cmd->retry_count = 0; + clear_bit(cmd_idx, mrioc->devrem_bitmap); +} + +/** + * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal + * @mrioc: Adapter instance reference + * @handle: Device handle + * @cmdparam: Internal command tracker + * @iou_rc: IO unit reason code + * + * Issues a target reset TM to the firmware or add it to a pend + * list as part of device removal or hidden acknowledgment + * handshake. + * + * Return: Nothing + */ +static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, + struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) +{ + struct mpi3_scsi_task_mgmt_request tm_req; + int retval = 0; + u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; + u8 retrycount = 5; + struct mpi3mr_drv_cmd *drv_cmd = cmdparam; + struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; + struct mpi3mr_tgt_dev *tgtdev = NULL; + unsigned long flags; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); + if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) + tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + if (drv_cmd) + goto issue_cmd; + do { + cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, + MPI3MR_NUM_DEVRMCMD); + if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { + if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) + break; + cmd_idx = MPI3MR_NUM_DEVRMCMD; + } + } while (retrycount--); + + if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { + delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), + GFP_ATOMIC); + if (!delayed_dev_rmhs) + return; + INIT_LIST_HEAD(&delayed_dev_rmhs->list); + delayed_dev_rmhs->handle = handle; + delayed_dev_rmhs->iou_rc = iou_rc; + list_add_tail(&delayed_dev_rmhs->list, + &mrioc->delayed_rmhs_list); + ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", + __func__, handle); + return; + } + drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; + +issue_cmd: + cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; + ioc_info(mrioc, + "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", + __func__, handle, cmd_idx); + + memset(&tm_req, 0, sizeof(tm_req)); + if (drv_cmd->state & MPI3MR_CMD_PENDING) { + ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); + goto out; + } + drv_cmd->state = MPI3MR_CMD_PENDING; + drv_cmd->is_waiting = 0; + drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; + drv_cmd->dev_handle = handle; + drv_cmd->iou_rc = iou_rc; + tm_req.dev_handle = cpu_to_le16(handle); + tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); + tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); + tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; + + set_bit(handle, mrioc->removepend_bitmap); + retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); + if (retval) { + ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", + __func__); + goto out_failed; + } +out: + return; +out_failed: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + drv_cmd->retry_count = 0; + clear_bit(cmd_idx, mrioc->devrem_bitmap); +} + +/** + * mpi3mr_complete_evt_ack - event ack request completion + * @mrioc: Adapter instance reference + * @drv_cmd: Internal command tracker + * + * This is the completion handler for non blocking event + * acknowledgment sent to the firmware and this will issue any + * pending event acknowledgment request. + * + * Return: Nothing + */ +static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, + struct mpi3mr_drv_cmd *drv_cmd) +{ + u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; + struct delayed_evt_ack_node *delayed_evtack = NULL; + + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto clear_drv_cmd; + + if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { + dprint_event_th(mrioc, + "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", + (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), + drv_cmd->ioc_loginfo); + } + + if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { + delayed_evtack = + list_entry(mrioc->delayed_evtack_cmds_list.next, + struct delayed_evt_ack_node, list); + mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, + delayed_evtack->event_ctx); + list_del(&delayed_evtack->list); + kfree(delayed_evtack); + return; + } +clear_drv_cmd: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); +} + +/** + * mpi3mr_send_event_ack - Issue event acknwoledgment request + * @mrioc: Adapter instance reference + * @event: MPI3 event id + * @cmdparam: Internal command tracker + * @event_ctx: event context + * + * Issues event acknowledgment request to the firmware if there + * is a free command to send the event ack else it to a pend + * list so that it will be processed on a completion of a prior + * event acknowledgment . + * + * Return: Nothing + */ +static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, + struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) +{ + struct mpi3_event_ack_request evtack_req; + int retval = 0; + u8 retrycount = 5; + u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; + struct mpi3mr_drv_cmd *drv_cmd = cmdparam; + struct delayed_evt_ack_node *delayed_evtack = NULL; + + if (drv_cmd) { + dprint_event_th(mrioc, + "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", + event, event_ctx); + goto issue_cmd; + } + dprint_event_th(mrioc, + "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", + event, event_ctx); + do { + cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, + MPI3MR_NUM_EVTACKCMD); + if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { + if (!test_and_set_bit(cmd_idx, + mrioc->evtack_cmds_bitmap)) + break; + cmd_idx = MPI3MR_NUM_EVTACKCMD; + } + } while (retrycount--); + + if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { + delayed_evtack = kzalloc(sizeof(*delayed_evtack), + GFP_ATOMIC); + if (!delayed_evtack) + return; + INIT_LIST_HEAD(&delayed_evtack->list); + delayed_evtack->event = event; + delayed_evtack->event_ctx = event_ctx; + list_add_tail(&delayed_evtack->list, + &mrioc->delayed_evtack_cmds_list); + dprint_event_th(mrioc, + "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", + event, event_ctx); + return; + } + drv_cmd = &mrioc->evtack_cmds[cmd_idx]; + +issue_cmd: + cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; + + memset(&evtack_req, 0, sizeof(evtack_req)); + if (drv_cmd->state & MPI3MR_CMD_PENDING) { + dprint_event_th(mrioc, + "sending event ack failed due to command in use\n"); + goto out; + } + drv_cmd->state = MPI3MR_CMD_PENDING; + drv_cmd->is_waiting = 0; + drv_cmd->callback = mpi3mr_complete_evt_ack; + evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); + evtack_req.function = MPI3_FUNCTION_EVENT_ACK; + evtack_req.event = event; + evtack_req.event_context = cpu_to_le32(event_ctx); + retval = mpi3mr_admin_request_post(mrioc, &evtack_req, + sizeof(evtack_req), 1); + if (retval) { + dprint_event_th(mrioc, + "posting event ack request is failed\n"); + goto out_failed; + } + + dprint_event_th(mrioc, + "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", + event, event_ctx); +out: + return; +out_failed: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + drv_cmd->callback = NULL; + clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); +} + +/** + * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Checks for the reason code and based on that either block I/O + * to device, or unblock I/O to the device, or start the device + * removal handshake with reason as remove with the firmware for + * PCIe devices. + * + * Return: Nothing + */ +static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_pcie_topology_change_list *topo_evt = + (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; + int i; + u16 handle; + u8 reason_code; + struct mpi3mr_tgt_dev *tgtdev = NULL; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; + + for (i = 0; i < topo_evt->num_entries; i++) { + handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); + if (!handle) + continue; + reason_code = topo_evt->port_entry[i].port_status; + scsi_tgt_priv_data = NULL; + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); + if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) + scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; + switch (reason_code) { + case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + if (scsi_tgt_priv_data) { + scsi_tgt_priv_data->dev_removed = 1; + scsi_tgt_priv_data->dev_removedelay = 0; + atomic_set(&scsi_tgt_priv_data->block_io, 0); + } + mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, + MPI3_CTRL_OP_REMOVE_DEVICE); + break; + case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: + if (scsi_tgt_priv_data) { + scsi_tgt_priv_data->dev_removedelay = 1; + atomic_inc(&scsi_tgt_priv_data->block_io); + } + break; + case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: + if (scsi_tgt_priv_data && + scsi_tgt_priv_data->dev_removedelay) { + scsi_tgt_priv_data->dev_removedelay = 0; + atomic_dec_if_positive + (&scsi_tgt_priv_data->block_io); + } + break; + case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: + default: + break; + } + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); + } +} + +/** + * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Checks for the reason code and based on that either block I/O + * to device, or unblock I/O to the device, or start the device + * removal handshake with reason as remove with the firmware for + * SAS/SATA devices. + * + * Return: Nothing + */ +static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_sas_topology_change_list *topo_evt = + (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; + int i; + u16 handle; + u8 reason_code; + struct mpi3mr_tgt_dev *tgtdev = NULL; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; + + for (i = 0; i < topo_evt->num_entries; i++) { + handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); + if (!handle) + continue; + reason_code = topo_evt->phy_entry[i].status & + MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; + scsi_tgt_priv_data = NULL; + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); + if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) + scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; + switch (reason_code) { + case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: + if (scsi_tgt_priv_data) { + scsi_tgt_priv_data->dev_removed = 1; + scsi_tgt_priv_data->dev_removedelay = 0; + atomic_set(&scsi_tgt_priv_data->block_io, 0); + } + mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, + MPI3_CTRL_OP_REMOVE_DEVICE); + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: + if (scsi_tgt_priv_data) { + scsi_tgt_priv_data->dev_removedelay = 1; + atomic_inc(&scsi_tgt_priv_data->block_io); + } + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: + if (scsi_tgt_priv_data && + scsi_tgt_priv_data->dev_removedelay) { + scsi_tgt_priv_data->dev_removedelay = 0; + atomic_dec_if_positive + (&scsi_tgt_priv_data->block_io); + } + break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: + default: + break; + } + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); + } +} + +/** + * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Checks for the reason code and based on that either block I/O + * to device, or unblock I/O to the device, or start the device + * removal handshake with reason as remove/hide acknowledgment + * with the firmware. + * + * Return: Nothing + */ +static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + u16 dev_handle = 0; + u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; + struct mpi3mr_tgt_dev *tgtdev = NULL; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; + struct mpi3_event_data_device_status_change *evtdata = + (struct mpi3_event_data_device_status_change *)event_reply->event_data; + + if (mrioc->stop_drv_processing) + goto out; + + dev_handle = le16_to_cpu(evtdata->dev_handle); + + switch (evtdata->reason_code) { + case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: + case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: + block = 1; + break; + case MPI3_EVENT_DEV_STAT_RC_HIDDEN: + delete = 1; + hide = 1; + break; + case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: + delete = 1; + remove = 1; + break; + case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: + case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: + ublock = 1; + break; + default: + break; + } + + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); + if (!tgtdev) + goto out; + if (hide) + tgtdev->is_hidden = hide; + if (tgtdev->starget && tgtdev->starget->hostdata) { + scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; + if (block) + atomic_inc(&scsi_tgt_priv_data->block_io); + if (delete) + scsi_tgt_priv_data->dev_removed = 1; + if (ublock) + atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); + } + if (remove) + mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, + MPI3_CTRL_OP_REMOVE_DEVICE); + if (hide) + mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, + MPI3_CTRL_OP_HIDDEN_ACK); + +out: + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); +} + +/** + * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Blocks and unblocks host level I/O based on the reason code + * + * Return: Nothing + */ +static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_prepare_for_reset *evtdata = + (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; + + if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { + dprint_event_th(mrioc, + "prepare for reset event top half with rc=start\n"); + if (mrioc->prepare_for_reset) + return; + mrioc->prepare_for_reset = 1; + mrioc->prepare_for_reset_timeout_counter = 0; + } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { + dprint_event_th(mrioc, + "prepare for reset top half with rc=abort\n"); + mrioc->prepare_for_reset = 0; + mrioc->prepare_for_reset_timeout_counter = 0; + } + if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) + == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) + mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, + le32_to_cpu(event_reply->event_context)); +} + +/** + * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Identifies the new shutdown timeout value and update. + * + * Return: Nothing + */ +static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_energy_pack_change *evtdata = + (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; + u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); + + if (shutdown_timeout <= 0) { + ioc_warn(mrioc, + "%s :Invalid Shutdown Timeout received = %d\n", + __func__, shutdown_timeout); + return; + } + + ioc_info(mrioc, + "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", + __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); + mrioc->facts.shutdown_timeout = shutdown_timeout; +} + +/** + * mpi3mr_cablemgmt_evt_th - Cable management event tophalf + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Displays Cable manegemt event details. + * + * Return: Nothing + */ +static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + struct mpi3_event_data_cable_management *evtdata = + (struct mpi3_event_data_cable_management *)event_reply->event_data; + + switch (evtdata->status) { + case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: + { + ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" + "Devices connected to this cable are not detected.\n" + "This cable requires %d mW of power.\n", + evtdata->receptacle_id, + le32_to_cpu(evtdata->active_cable_power_requirement)); + break; + } + case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: + { + ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", + evtdata->receptacle_id); + break; + } + default: + break; + } +} + +/** + * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event + * @mrioc: Adapter instance reference + * + * Add driver specific event to make sure that the driver won't process the + * events until all the devices are refreshed during soft reset. + * + * Return: Nothing + */ +void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_fwevt *fwevt = NULL; + + fwevt = mpi3mr_alloc_fwevt(0); + if (!fwevt) { + dprint_event_th(mrioc, + "failed to schedule bottom half handler for event(0x%02x)\n", + MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); + return; + } + fwevt->mrioc = mrioc; + fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; + fwevt->send_ack = 0; + fwevt->process_evt = 1; + fwevt->evt_ctx = 0; + fwevt->event_data_size = 0; + mpi3mr_fwevt_add_to_list(mrioc, fwevt); +} + +/** + * mpi3mr_os_handle_events - Firmware event handler + * @mrioc: Adapter instance reference + * @event_reply: event data + * + * Identify whteher the event has to handled and acknowledged + * and either process the event in the tophalf and/or schedule a + * bottom half through mpi3mr_fwevt_worker. + * + * Return: Nothing + */ +void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, + struct mpi3_event_notification_reply *event_reply) +{ + u16 evt_type, sz; + struct mpi3mr_fwevt *fwevt = NULL; + bool ack_req = 0, process_evt_bh = 0; + + if (mrioc->stop_drv_processing) + return; + + if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) + == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) + ack_req = 1; + + evt_type = event_reply->event; + + switch (evt_type) { + case MPI3_EVENT_DEVICE_ADDED: + { + struct mpi3_device_page0 *dev_pg0 = + (struct mpi3_device_page0 *)event_reply->event_data; + if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) + ioc_err(mrioc, + "%s :Failed to add device in the device add event\n", + __func__); + else + process_evt_bh = 1; + break; + } + case MPI3_EVENT_DEVICE_STATUS_CHANGE: + { + process_evt_bh = 1; + mpi3mr_devstatuschg_evt_th(mrioc, event_reply); + break; + } + case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + { + process_evt_bh = 1; + mpi3mr_sastopochg_evt_th(mrioc, event_reply); + break; + } + case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + { + process_evt_bh = 1; + mpi3mr_pcietopochg_evt_th(mrioc, event_reply); + break; + } + case MPI3_EVENT_PREPARE_FOR_RESET: + { + mpi3mr_preparereset_evt_th(mrioc, event_reply); + ack_req = 0; + break; + } + case MPI3_EVENT_DEVICE_INFO_CHANGED: + case MPI3_EVENT_LOG_DATA: + case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: + case MPI3_EVENT_ENCL_DEVICE_ADDED: + { + process_evt_bh = 1; + break; + } + case MPI3_EVENT_ENERGY_PACK_CHANGE: + { + mpi3mr_energypackchg_evt_th(mrioc, event_reply); + break; + } + case MPI3_EVENT_CABLE_MGMT: + { + mpi3mr_cablemgmt_evt_th(mrioc, event_reply); + break; + } + case MPI3_EVENT_SAS_DISCOVERY: + case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: + case MPI3_EVENT_PCIE_ENUMERATION: + break; + default: + ioc_info(mrioc, "%s :event 0x%02x is not handled\n", + __func__, evt_type); + break; + } + if (process_evt_bh || ack_req) { + sz = event_reply->event_data_length * 4; + fwevt = mpi3mr_alloc_fwevt(sz); + if (!fwevt) { + ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", + __func__, __FILE__, __LINE__, __func__); + return; + } + + memcpy(fwevt->event_data, event_reply->event_data, sz); + fwevt->mrioc = mrioc; + fwevt->event_id = evt_type; + fwevt->send_ack = ack_req; + fwevt->process_evt = process_evt_bh; + fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); + mpi3mr_fwevt_add_to_list(mrioc, fwevt); + } +} + +/** + * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO + * @mrioc: Adapter instance reference + * @scmd: SCSI command reference + * @scsiio_req: MPI3 SCSI IO request + * + * Identifies the protection information flags from the SCSI + * command and set appropriate flags in the MPI3 SCSI IO + * request. + * + * Return: Nothing + */ +static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) +{ + u16 eedp_flags = 0; + unsigned char prot_op = scsi_get_prot_op(scmd); + + switch (prot_op) { + case SCSI_PROT_NORMAL: + return; + case SCSI_PROT_READ_STRIP: + eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; + break; + case SCSI_PROT_WRITE_INSERT: + eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; + break; + case SCSI_PROT_READ_INSERT: + eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; + scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; + break; + case SCSI_PROT_WRITE_STRIP: + eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; + scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; + break; + case SCSI_PROT_READ_PASS: + eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; + scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; + break; + case SCSI_PROT_WRITE_PASS: + if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { + eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; + scsiio_req->sgl[0].eedp.application_tag_translation_mask = + 0xffff; + } else + eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; + + scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; + break; + default: + return; + } + + if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) + eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; + + if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) + eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; + + if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { + eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | + MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; + scsiio_req->cdb.eedp32.primary_reference_tag = + cpu_to_be32(scsi_prot_ref_tag(scmd)); + } + + if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) + eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; + + eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; + + switch (scsi_prot_interval(scmd)) { + case 512: + scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; + break; + case 520: + scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; + break; + case 4080: + scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; + break; + case 4088: + scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; + break; + case 4096: + scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; + break; + case 4104: + scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; + break; + case 4160: + scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; + break; + default: + break; + } + + scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); + scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; +} + +/** + * mpi3mr_build_sense_buffer - Map sense information + * @desc: Sense type + * @buf: Sense buffer to populate + * @key: Sense key + * @asc: Additional sense code + * @ascq: Additional sense code qualifier + * + * Maps the given sense information into either descriptor or + * fixed format sense data. + * + * Return: Nothing + */ +static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, + u8 asc, u8 ascq) +{ + if (desc) { + buf[0] = 0x72; /* descriptor, current */ + buf[1] = key; + buf[2] = asc; + buf[3] = ascq; + buf[7] = 0; + } else { + buf[0] = 0x70; /* fixed, current */ + buf[2] = key; + buf[7] = 0xa; + buf[12] = asc; + buf[13] = ascq; + } +} + +/** + * mpi3mr_map_eedp_error - Map EEDP errors from IOC status + * @scmd: SCSI command reference + * @ioc_status: status of MPI3 request + * + * Maps the EEDP error status of the SCSI IO request to sense + * data. + * + * Return: Nothing + */ +static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, + u16 ioc_status) +{ + u8 ascq = 0; + + switch (ioc_status) { + case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: + ascq = 0x01; + break; + case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: + ascq = 0x02; + break; + case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: + ascq = 0x03; + break; + default: + ascq = 0x00; + break; + } + + mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, ascq); + scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; +} + +/** + * mpi3mr_process_op_reply_desc - reply descriptor handler + * @mrioc: Adapter instance reference + * @reply_desc: Operational reply descriptor + * @reply_dma: place holder for reply DMA address + * @qidx: Operational queue index + * + * Process the operational reply descriptor and identifies the + * descriptor type. Based on the descriptor map the MPI3 request + * status to a SCSI command status and calls scsi_done call + * back. + * + * Return: Nothing + */ +void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, + struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) +{ + u16 reply_desc_type, host_tag = 0; + u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; + u32 ioc_loginfo = 0; + struct mpi3_status_reply_descriptor *status_desc = NULL; + struct mpi3_address_reply_descriptor *addr_desc = NULL; + struct mpi3_success_reply_descriptor *success_desc = NULL; + struct mpi3_scsi_io_reply *scsi_reply = NULL; + struct scsi_cmnd *scmd = NULL; + struct scmd_priv *priv = NULL; + u8 *sense_buf = NULL; + u8 scsi_state = 0, scsi_status = 0, sense_state = 0; + u32 xfer_count = 0, sense_count = 0, resp_data = 0; + u16 dev_handle = 0xFFFF; + struct scsi_sense_hdr sshdr; + struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; + struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; + u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; + struct mpi3mr_throttle_group_info *tg = NULL; + u8 throttle_enabled_dev = 0; + + *reply_dma = 0; + reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & + MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; + switch (reply_desc_type) { + case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: + status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; + host_tag = le16_to_cpu(status_desc->host_tag); + ioc_status = le16_to_cpu(status_desc->ioc_status); + if (ioc_status & + MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) + ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); + ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + break; + case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: + addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; + *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); + scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, + *reply_dma); + if (!scsi_reply) { + panic("%s: scsi_reply is NULL, this shouldn't happen\n", + mrioc->name); + goto out; + } + host_tag = le16_to_cpu(scsi_reply->host_tag); + ioc_status = le16_to_cpu(scsi_reply->ioc_status); + scsi_status = scsi_reply->scsi_status; + scsi_state = scsi_reply->scsi_state; + dev_handle = le16_to_cpu(scsi_reply->dev_handle); + sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); + xfer_count = le32_to_cpu(scsi_reply->transfer_count); + sense_count = le32_to_cpu(scsi_reply->sense_count); + resp_data = le32_to_cpu(scsi_reply->response_data); + sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, + le64_to_cpu(scsi_reply->sense_data_buffer_address)); + if (ioc_status & + MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) + ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); + ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) + panic("%s: Ran out of sense buffers\n", mrioc->name); + break; + case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: + success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; + host_tag = le16_to_cpu(success_desc->host_tag); + break; + default: + break; + } + scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); + if (!scmd) { + panic("%s: Cannot Identify scmd for host_tag 0x%x\n", + mrioc->name, host_tag); + goto out; + } + priv = scsi_cmd_priv(scmd); + + data_len_blks = scsi_bufflen(scmd) >> 9; + sdev_priv_data = scmd->device->hostdata; + if (sdev_priv_data) { + stgt_priv_data = sdev_priv_data->tgt_priv_data; + if (stgt_priv_data) { + tg = stgt_priv_data->throttle_group; + throttle_enabled_dev = + stgt_priv_data->io_throttle_enabled; + } + } + if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && + throttle_enabled_dev)) { + ioc_pend_data_len = atomic_sub_return(data_len_blks, + &mrioc->pend_large_data_sz); + if (tg) { + tg_pend_data_len = atomic_sub_return(data_len_blks, + &tg->pend_large_data_sz); + if (tg->io_divert && ((ioc_pend_data_len <= + mrioc->io_throttle_low) && + (tg_pend_data_len <= tg->low))) { + tg->io_divert = 0; + mpi3mr_set_io_divert_for_all_vd_in_tg( + mrioc, tg, 0); + } + } else { + if (ioc_pend_data_len <= mrioc->io_throttle_low) + stgt_priv_data->io_divert = 0; + } + } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { + ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); + if (!tg) { + if (ioc_pend_data_len <= mrioc->io_throttle_low) + stgt_priv_data->io_divert = 0; + + } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { + tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); + if (tg->io_divert && (tg_pend_data_len <= tg->low)) { + tg->io_divert = 0; + mpi3mr_set_io_divert_for_all_vd_in_tg( + mrioc, tg, 0); + } + } + } + + if (success_desc) { + scmd->result = DID_OK << 16; + goto out_success; + } + + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); + if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && + xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || + scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || + scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) + ioc_status = MPI3_IOCSTATUS_SUCCESS; + + if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && + sense_buf) { + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); + + memcpy(scmd->sense_buffer, sense_buf, sz); + } + + switch (ioc_status) { + case MPI3_IOCSTATUS_BUSY: + case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: + scmd->result = DID_SOFT_ERROR << 16; + break; + case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_count == 0) || (scmd->underflow > xfer_count)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) + break; + if (xfer_count < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || + (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + fallthrough; + case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI3_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || + (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || + (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: + case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: + case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: + mpi3mr_map_eedp_error(scmd, ioc_status); + break; + case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI3_IOCSTATUS_INVALID_FUNCTION: + case MPI3_IOCSTATUS_INVALID_SGL: + case MPI3_IOCSTATUS_INTERNAL_ERROR: + case MPI3_IOCSTATUS_INVALID_FIELD: + case MPI3_IOCSTATUS_INVALID_STATE: + case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI3_IOCSTATUS_INSUFFICIENT_POWER: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + } + + if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && + (scmd->cmnd[0] != ATA_16) && + mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { + ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, + scmd->result); + scsi_print_command(scmd); + ioc_info(mrioc, + "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", + __func__, dev_handle, ioc_status, ioc_loginfo, + priv->req_q_idx + 1); + ioc_info(mrioc, + " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", + host_tag, scsi_state, scsi_status, xfer_count, resp_data); + if (sense_buf) { + scsi_normalize_sense(sense_buf, sense_count, &sshdr); + ioc_info(mrioc, + "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", + __func__, sense_count, sshdr.sense_key, + sshdr.asc, sshdr.ascq); + } + } +out_success: + if (priv->meta_sg_valid) { + dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), + scsi_prot_sg_count(scmd), scmd->sc_data_direction); + } + mpi3mr_clear_scmd_priv(mrioc, scmd); + scsi_dma_unmap(scmd); + scsi_done(scmd); +out: + if (sense_buf) + mpi3mr_repost_sense_buf(mrioc, + le64_to_cpu(scsi_reply->sense_data_buffer_address)); +} + +/** + * mpi3mr_get_chain_idx - get free chain buffer index + * @mrioc: Adapter instance reference + * + * Try to get a free chain buffer index from the free pool. + * + * Return: -1 on failure or the free chain buffer index + */ +static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) +{ + u8 retry_count = 5; + int cmd_idx = -1; + unsigned long flags; + + spin_lock_irqsave(&mrioc->chain_buf_lock, flags); + do { + cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, + mrioc->chain_buf_count); + if (cmd_idx < mrioc->chain_buf_count) { + set_bit(cmd_idx, mrioc->chain_bitmap); + break; + } + cmd_idx = -1; + } while (retry_count--); + spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); + return cmd_idx; +} + +/** + * mpi3mr_prepare_sg_scmd - build scatter gather list + * @mrioc: Adapter instance reference + * @scmd: SCSI command reference + * @scsiio_req: MPI3 SCSI IO request + * + * This function maps SCSI command's data and protection SGEs to + * MPI request SGEs. If required additional 4K chain buffer is + * used to send the SGEs. + * + * Return: 0 on success, -ENOMEM on dma_map_sg failure + */ +static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) +{ + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_length; + int sges_left, chain_idx; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 last_chain_sgl_flags; + struct chain_element *chain_req; + struct scmd_priv *priv = NULL; + u32 meta_sg = le32_to_cpu(scsiio_req->flags) & + MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; + + priv = scsi_cmd_priv(scmd); + + simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | + MPI3_SGE_FLAGS_DLAS_SYSTEM; + simple_sgl_flags_last = simple_sgl_flags | + MPI3_SGE_FLAGS_END_OF_LIST; + last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | + MPI3_SGE_FLAGS_DLAS_SYSTEM; + + if (meta_sg) + sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; + else + sg_local = &scsiio_req->sgl; + + if (!scsiio_req->data_length && !meta_sg) { + mpi3mr_build_zero_len_sge(sg_local); + return 0; + } + + if (meta_sg) { + sg_scmd = scsi_prot_sglist(scmd); + sges_left = dma_map_sg(&mrioc->pdev->dev, + scsi_prot_sglist(scmd), + scsi_prot_sg_count(scmd), + scmd->sc_data_direction); + priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ + } else { + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + } + + if (sges_left < 0) { + sdev_printk(KERN_ERR, scmd->device, + "scsi_dma_map failed: request for %d bytes!\n", + scsi_bufflen(scmd)); + return -ENOMEM; + } + if (sges_left > mrioc->max_sgl_entries) { + sdev_printk(KERN_ERR, scmd->device, + "scsi_dma_map returned unsupported sge count %d!\n", + sges_left); + return -ENOMEM; + } + + sges_in_segment = (mrioc->facts.op_req_sz - + offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); + + if (scsiio_req->sgl[0].eedp.flags == + MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { + sg_local += sizeof(struct mpi3_sge_common); + sges_in_segment--; + /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ + } + + if (scsiio_req->msg_flags == + MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { + sges_in_segment--; + /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ + } + + if (meta_sg) + sges_in_segment = 1; + + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment > 1) { + mpi3mr_add_sg_single(sg_local, simple_sgl_flags, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += sizeof(struct mpi3_sge_common); + sges_left--; + sges_in_segment--; + } + + chain_idx = mpi3mr_get_chain_idx(mrioc); + if (chain_idx < 0) + return -1; + chain_req = &mrioc->chain_sgl_list[chain_idx]; + if (meta_sg) + priv->meta_chain_idx = chain_idx; + else + priv->chain_idx = chain_idx; + + chain = chain_req->addr; + chain_dma = chain_req->dma_addr; + sges_in_segment = sges_left; + chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); + + mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, + chain_length, chain_dma); + + sg_local = chain; + +fill_in_last_segment: + while (sges_left > 0) { + if (sges_left == 1) + mpi3mr_add_sg_single(sg_local, + simple_sgl_flags_last, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + mpi3mr_add_sg_single(sg_local, simple_sgl_flags, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += sizeof(struct mpi3_sge_common); + sges_left--; + } + + return 0; +} + +/** + * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO + * @mrioc: Adapter instance reference + * @scmd: SCSI command reference + * @scsiio_req: MPI3 SCSI IO request + * + * This function calls mpi3mr_prepare_sg_scmd for constructing + * both data SGEs and protection information SGEs in the MPI + * format from the SCSI Command as appropriate . + * + * Return: return value of mpi3mr_prepare_sg_scmd. + */ +static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) +{ + int ret; + + ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); + if (ret) + return ret; + + if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { + /* There is a valid meta sg */ + scsiio_req->flags |= + cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); + ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); + } + + return ret; +} + +/** + * mpi3mr_tm_response_name - get TM response as a string + * @resp_code: TM response code + * + * Convert known task management response code as a readable + * string. + * + * Return: response code string. + */ +static const char *mpi3mr_tm_response_name(u8 resp_code) +{ + char *desc; + + switch (resp_code) { + case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: + desc = "task management request completed"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: + desc = "invalid frame"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: + desc = "task management request failed"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: + desc = "invalid LUN"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: + desc = "overlapped tag attempted"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: + desc = "task management request denied by NVMe device"; + break; + default: + desc = "unknown"; + break; + } + + return desc; +} + +inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) +{ + int i; + int num_of_reply_queues = + mrioc->num_op_reply_q + mrioc->op_reply_q_offset; + + for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) + mpi3mr_process_op_reply_q(mrioc, + mrioc->intr_info[i].op_reply_q); +} + +/** + * mpi3mr_issue_tm - Issue Task Management request + * @mrioc: Adapter instance reference + * @tm_type: Task Management type + * @handle: Device handle + * @lun: lun ID + * @htag: Host tag of the TM request + * @timeout: TM timeout value + * @drv_cmd: Internal command tracker + * @resp_code: Response code place holder + * @scmd: SCSI command + * + * Issues a Task Management Request to the controller for a + * specified target, lun and command and wait for its completion + * and check TM response. Recover the TM if it timed out by + * issuing controller reset. + * + * Return: 0 on success, non-zero on errors + */ +int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, + u16 handle, uint lun, u16 htag, ulong timeout, + struct mpi3mr_drv_cmd *drv_cmd, + u8 *resp_code, struct scsi_cmnd *scmd) +{ + struct mpi3_scsi_task_mgmt_request tm_req; + struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; + int retval = 0; + struct mpi3mr_tgt_dev *tgtdev = NULL; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; + struct scmd_priv *cmd_priv = NULL; + struct scsi_device *sdev = NULL; + struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; + + ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", + __func__, tm_type, handle); + if (mrioc->unrecoverable) { + retval = -1; + ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", + __func__); + goto out; + } + + memset(&tm_req, 0, sizeof(tm_req)); + mutex_lock(&drv_cmd->mutex); + if (drv_cmd->state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); + mutex_unlock(&drv_cmd->mutex); + goto out; + } + if (mrioc->reset_in_progress) { + retval = -1; + ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); + mutex_unlock(&drv_cmd->mutex); + goto out; + } + + drv_cmd->state = MPI3MR_CMD_PENDING; + drv_cmd->is_waiting = 1; + drv_cmd->callback = NULL; + tm_req.dev_handle = cpu_to_le16(handle); + tm_req.task_type = tm_type; + tm_req.host_tag = cpu_to_le16(htag); + + int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); + tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; + + tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); + + if (scmd) { + sdev = scmd->device; + sdev_priv_data = sdev->hostdata; + scsi_tgt_priv_data = ((sdev_priv_data) ? + sdev_priv_data->tgt_priv_data : NULL); + } else { + if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) + scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) + tgtdev->starget->hostdata; + } + + if (scsi_tgt_priv_data) + atomic_inc(&scsi_tgt_priv_data->block_io); + + if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { + if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) + timeout = tgtdev->dev_spec.pcie_inf.abort_to; + else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) + timeout = tgtdev->dev_spec.pcie_inf.reset_to; + } + + init_completion(&drv_cmd->done); + retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); + if (retval) { + ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); + goto out_unlock; + } + wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); + + if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { + drv_cmd->is_waiting = 0; + retval = -1; + if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { + dprint_tm(mrioc, + "task management request timed out after %ld seconds\n", + timeout); + if (mrioc->logging_level & MPI3_DEBUG_TM) + dprint_dump_req(&tm_req, sizeof(tm_req)/4); + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_TM_TIMEOUT, 1); + } + goto out_unlock; + } + + if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { + dprint_tm(mrioc, "invalid task management reply message\n"); + retval = -1; + goto out_unlock; + } + + tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; + + switch (drv_cmd->ioc_status) { + case MPI3_IOCSTATUS_SUCCESS: + *resp_code = le32_to_cpu(tm_reply->response_data) & + MPI3MR_RI_MASK_RESPCODE; + break; + case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: + *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; + break; + default: + dprint_tm(mrioc, + "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", + handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); + retval = -1; + goto out_unlock; + } + + switch (*resp_code) { + case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: + case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: + break; + case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: + if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + retval = -1; + break; + default: + retval = -1; + break; + } + + dprint_tm(mrioc, + "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", + tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, + le32_to_cpu(tm_reply->termination_count), + mpi3mr_tm_response_name(*resp_code), *resp_code); + + if (!retval) { + mpi3mr_ioc_disable_intr(mrioc); + mpi3mr_poll_pend_io_completions(mrioc); + mpi3mr_ioc_enable_intr(mrioc); + mpi3mr_poll_pend_io_completions(mrioc); + mpi3mr_process_admin_reply_q(mrioc); + } + switch (tm_type) { + case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (!scsi_tgt_priv_data) + break; + scsi_tgt_priv_data->pend_count = 0; + blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, + mpi3mr_count_tgt_pending, + (void *)scsi_tgt_priv_data->starget); + break; + case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (!sdev_priv_data) + break; + sdev_priv_data->pend_count = 0; + blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, + mpi3mr_count_dev_pending, (void *)sdev); + break; + default: + break; + } + +out_unlock: + drv_cmd->state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&drv_cmd->mutex); + if (scsi_tgt_priv_data) + atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); + if (tgtdev) + mpi3mr_tgtdev_put(tgtdev); +out: + return retval; +} + +/** + * mpi3mr_bios_param - BIOS param callback + * @sdev: SCSI device reference + * @bdev: Block device reference + * @capacity: Capacity in logical sectors + * @params: Parameter array + * + * Just the parameters with heads/secots/cylinders. + * + * Return: 0 always + */ +static int mpi3mr_bios_param(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + + if ((ulong)capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + return 0; +} + +/** + * mpi3mr_map_queues - Map queues callback handler + * @shost: SCSI host reference + * + * Maps default and poll queues. + * + * Return: return zero. + */ +static void mpi3mr_map_queues(struct Scsi_Host *shost) +{ + struct mpi3mr_ioc *mrioc = shost_priv(shost); + int i, qoff, offset; + struct blk_mq_queue_map *map = NULL; + + offset = mrioc->op_reply_q_offset; + + for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { + map = &shost->tag_set.map[i]; + + map->nr_queues = 0; + + if (i == HCTX_TYPE_DEFAULT) + map->nr_queues = mrioc->default_qcount; + else if (i == HCTX_TYPE_POLL) + map->nr_queues = mrioc->active_poll_qcount; + + if (!map->nr_queues) { + BUG_ON(i == HCTX_TYPE_DEFAULT); + continue; + } + + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, mrioc->pdev, offset); + else + blk_mq_map_queues(map); + + qoff += map->nr_queues; + offset += map->nr_queues; + } +} + +/** + * mpi3mr_get_fw_pending_ios - Calculate pending I/O count + * @mrioc: Adapter instance reference + * + * Calculate the pending I/Os for the controller and return. + * + * Return: Number of pending I/Os + */ +static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) +{ + u16 i; + uint pend_ios = 0; + + for (i = 0; i < mrioc->num_op_reply_q; i++) + pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); + return pend_ios; +} + +/** + * mpi3mr_print_pending_host_io - print pending I/Os + * @mrioc: Adapter instance reference + * + * Print number of pending I/Os and each I/O details prior to + * reset for debug purpose. + * + * Return: Nothing + */ +static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) +{ + struct Scsi_Host *shost = mrioc->shost; + + ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", + __func__, mpi3mr_get_fw_pending_ios(mrioc)); + blk_mq_tagset_busy_iter(&shost->tag_set, + mpi3mr_print_scmd, (void *)mrioc); +} + +/** + * mpi3mr_wait_for_host_io - block for I/Os to complete + * @mrioc: Adapter instance reference + * @timeout: time out in seconds + * Waits for pending I/Os for the given adapter to complete or + * to hit the timeout. + * + * Return: Nothing + */ +void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) +{ + enum mpi3mr_iocstate iocstate; + int i = 0; + + iocstate = mpi3mr_get_iocstate(mrioc); + if (iocstate != MRIOC_STATE_READY) + return; + + if (!mpi3mr_get_fw_pending_ios(mrioc)) + return; + ioc_info(mrioc, + "%s :Waiting for %d seconds prior to reset for %d I/O\n", + __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); + + for (i = 0; i < timeout; i++) { + if (!mpi3mr_get_fw_pending_ios(mrioc)) + break; + iocstate = mpi3mr_get_iocstate(mrioc); + if (iocstate != MRIOC_STATE_READY) + break; + msleep(1000); + } + + ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, + mpi3mr_get_fw_pending_ios(mrioc)); +} + +/** + * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same + * @mrioc: Adapter instance reference + * @scmd: SCSI command reference + * @scsiio_req: MPI3 SCSI IO request + * @scsiio_flags: Pointer to MPI3 SCSI IO Flags + * @wslen: write same max length + * + * Gets values of unmap, ndob and number of blocks from write + * same scsi io and based on these values it sets divert IO flag + * and reason for diverting IO to firmware. + * + * Return: Nothing + */ +static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, + u32 *scsiio_flags, u16 wslen) +{ + u8 unmap = 0, ndob = 0; + u8 opcode = scmd->cmnd[0]; + u32 num_blocks = 0; + u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); + + if (opcode == WRITE_SAME_16) { + unmap = scmd->cmnd[1] & 0x08; + ndob = scmd->cmnd[1] & 0x01; + num_blocks = get_unaligned_be32(scmd->cmnd + 10); + } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { + unmap = scmd->cmnd[10] & 0x08; + ndob = scmd->cmnd[10] & 0x01; + num_blocks = get_unaligned_be32(scmd->cmnd + 28); + } else + return; + + if ((unmap) && (ndob) && (num_blocks > wslen)) { + scsiio_req->msg_flags |= + MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; + *scsiio_flags |= + MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; + } +} + +/** + * mpi3mr_eh_host_reset - Host reset error handling callback + * @scmd: SCSI command reference + * + * Issue controller reset if the scmd is for a Physical Device, + * if the scmd is for RAID volume, then wait for + * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any + * pending I/Os prior to issuing reset to the controller. + * + * Return: SUCCESS of successful reset else FAILED + */ +static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) +{ + struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); + struct mpi3mr_stgt_priv_data *stgt_priv_data; + struct mpi3mr_sdev_priv_data *sdev_priv_data; + u8 dev_type = MPI3_DEVICE_DEVFORM_VD; + int retval = FAILED, ret; + + sdev_priv_data = scmd->device->hostdata; + if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { + stgt_priv_data = sdev_priv_data->tgt_priv_data; + dev_type = stgt_priv_data->dev_type; + } + + if (dev_type == MPI3_DEVICE_DEVFORM_VD) { + mpi3mr_wait_for_host_io(mrioc, + MPI3MR_RAID_ERRREC_RESET_TIMEOUT); + if (!mpi3mr_get_fw_pending_ios(mrioc)) { + retval = SUCCESS; + goto out; + } + } + + mpi3mr_print_pending_host_io(mrioc); + ret = mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_EH_HOS, 1); + if (ret) + goto out; + + retval = SUCCESS; +out: + sdev_printk(KERN_INFO, scmd->device, + "Host reset is %s for scmd(%p)\n", + ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + return retval; +} + +/** + * mpi3mr_eh_target_reset - Target reset error handling callback + * @scmd: SCSI command reference + * + * Issue Target reset Task Management and verify the scmd is + * terminated successfully and return status accordingly. + * + * Return: SUCCESS of successful termination of the scmd else + * FAILED + */ +static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) +{ + struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); + struct mpi3mr_stgt_priv_data *stgt_priv_data; + struct mpi3mr_sdev_priv_data *sdev_priv_data; + u16 dev_handle; + u8 resp_code = 0; + int retval = FAILED, ret = 0; + + sdev_printk(KERN_INFO, scmd->device, + "Attempting Target Reset! scmd(%p)\n", scmd); + scsi_print_command(scmd); + + sdev_priv_data = scmd->device->hostdata; + if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { + sdev_printk(KERN_INFO, scmd->device, + "SCSI device is not available\n"); + retval = SUCCESS; + goto out; + } + + stgt_priv_data = sdev_priv_data->tgt_priv_data; + dev_handle = stgt_priv_data->dev_handle; + if (stgt_priv_data->dev_removed) { + struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); + sdev_printk(KERN_INFO, scmd->device, + "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", + mrioc->name, dev_handle); + if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) + retval = SUCCESS; + else + retval = FAILED; + goto out; + } + sdev_printk(KERN_INFO, scmd->device, + "Target Reset is issued to handle(0x%04x)\n", + dev_handle); + + ret = mpi3mr_issue_tm(mrioc, + MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, + sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, + MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); + + if (ret) + goto out; + + if (stgt_priv_data->pend_count) { + sdev_printk(KERN_INFO, scmd->device, + "%s: target has %d pending commands, target reset is failed\n", + mrioc->name, stgt_priv_data->pend_count); + goto out; + } + + retval = SUCCESS; +out: + sdev_printk(KERN_INFO, scmd->device, + "%s: target reset is %s for scmd(%p)\n", mrioc->name, + ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + return retval; +} + +/** + * mpi3mr_eh_dev_reset- Device reset error handling callback + * @scmd: SCSI command reference + * + * Issue lun reset Task Management and verify the scmd is + * terminated successfully and return status accordingly. + * + * Return: SUCCESS of successful termination of the scmd else + * FAILED + */ +static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) +{ + struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); + struct mpi3mr_stgt_priv_data *stgt_priv_data; + struct mpi3mr_sdev_priv_data *sdev_priv_data; + u16 dev_handle; + u8 resp_code = 0; + int retval = FAILED, ret = 0; + + sdev_printk(KERN_INFO, scmd->device, + "Attempting Device(lun) Reset! scmd(%p)\n", scmd); + scsi_print_command(scmd); + + sdev_priv_data = scmd->device->hostdata; + if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { + sdev_printk(KERN_INFO, scmd->device, + "SCSI device is not available\n"); + retval = SUCCESS; + goto out; + } + + stgt_priv_data = sdev_priv_data->tgt_priv_data; + dev_handle = stgt_priv_data->dev_handle; + if (stgt_priv_data->dev_removed) { + struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); + sdev_printk(KERN_INFO, scmd->device, + "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", + mrioc->name, dev_handle); + if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) + retval = SUCCESS; + else + retval = FAILED; + goto out; + } + sdev_printk(KERN_INFO, scmd->device, + "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); + + ret = mpi3mr_issue_tm(mrioc, + MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, + sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, + MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); + + if (ret) + goto out; + + if (sdev_priv_data->pend_count) { + sdev_printk(KERN_INFO, scmd->device, + "%s: device has %d pending commands, device(LUN) reset is failed\n", + mrioc->name, sdev_priv_data->pend_count); + goto out; + } + retval = SUCCESS; +out: + sdev_printk(KERN_INFO, scmd->device, + "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, + ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + return retval; +} + +/** + * mpi3mr_scan_start - Scan start callback handler + * @shost: SCSI host reference + * + * Issue port enable request asynchronously. + * + * Return: Nothing + */ +static void mpi3mr_scan_start(struct Scsi_Host *shost) +{ + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + mrioc->scan_started = 1; + ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); + if (mpi3mr_issue_port_enable(mrioc, 1)) { + ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); + mrioc->scan_started = 0; + mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; + } +} + +/** + * mpi3mr_scan_finished - Scan finished callback handler + * @shost: SCSI host reference + * @time: Jiffies from the scan start + * + * Checks whether the port enable is completed or timedout or + * failed and set the scan status accordingly after taking any + * recovery if required. + * + * Return: 1 on scan finished or timed out, 0 for in progress + */ +static int mpi3mr_scan_finished(struct Scsi_Host *shost, + unsigned long time) +{ + struct mpi3mr_ioc *mrioc = shost_priv(shost); + u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; + u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); + + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + ioc_err(mrioc, "port enable failed due to fault or reset\n"); + mpi3mr_print_fault_info(mrioc); + mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; + mrioc->scan_started = 0; + mrioc->init_cmds.is_waiting = 0; + mrioc->init_cmds.callback = NULL; + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + } + + if (time >= (pe_timeout * HZ)) { + ioc_err(mrioc, "port enable failed due to time out\n"); + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_PE_TIMEOUT); + mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; + mrioc->scan_started = 0; + mrioc->init_cmds.is_waiting = 0; + mrioc->init_cmds.callback = NULL; + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; + } + + if (mrioc->scan_started) + return 0; + + if (mrioc->scan_failed) { + ioc_err(mrioc, + "port enable failed with status=0x%04x\n", + mrioc->scan_failed); + } else + ioc_info(mrioc, "port enable is successfully completed\n"); + + mpi3mr_start_watchdog(mrioc); + mrioc->is_driver_loading = 0; + mrioc->stop_bsgs = 0; + return 1; +} + +/** + * mpi3mr_slave_destroy - Slave destroy callback handler + * @sdev: SCSI device reference + * + * Cleanup and free per device(lun) private data. + * + * Return: Nothing. + */ +static void mpi3mr_slave_destroy(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; + struct mpi3mr_tgt_dev *tgt_dev = NULL; + unsigned long flags; + struct scsi_target *starget; + struct sas_rphy *rphy = NULL; + + if (!sdev->hostdata) + return; + + starget = scsi_target(sdev); + shost = dev_to_shost(&starget->dev); + mrioc = shost_priv(shost); + scsi_tgt_priv_data = starget->hostdata; + + scsi_tgt_priv_data->num_luns--; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + if (starget->channel == mrioc->scsi_device_channel) + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + } + + if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) + tgt_dev->starget = NULL; + if (tgt_dev) + mpi3mr_tgtdev_put(tgt_dev); + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +/** + * mpi3mr_target_destroy - Target destroy callback handler + * @starget: SCSI target reference + * + * Cleanup and free per target private data. + * + * Return: Nothing. + */ +static void mpi3mr_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; + struct mpi3mr_tgt_dev *tgt_dev; + unsigned long flags; + + if (!starget->hostdata) + return; + + shost = dev_to_shost(&starget->dev); + mrioc = shost_priv(shost); + scsi_tgt_priv_data = starget->hostdata; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); + if (tgt_dev && (tgt_dev->starget == starget) && + (tgt_dev->perst_id == starget->id)) + tgt_dev->starget = NULL; + if (tgt_dev) { + scsi_tgt_priv_data->tgt_dev = NULL; + scsi_tgt_priv_data->perst_id = 0; + mpi3mr_tgtdev_put(tgt_dev); + mpi3mr_tgtdev_put(tgt_dev); + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + kfree(starget->hostdata); + starget->hostdata = NULL; +} + +/** + * mpi3mr_slave_configure - Slave configure callback handler + * @sdev: SCSI device reference + * + * Configure queue depth, max hardware sectors and virt boundary + * as required + * + * Return: 0 always. + */ +static int mpi3mr_slave_configure(struct scsi_device *sdev) +{ + struct scsi_target *starget; + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + struct mpi3mr_tgt_dev *tgt_dev = NULL; + unsigned long flags; + int retval = 0; + struct sas_rphy *rphy = NULL; + + starget = scsi_target(sdev); + shost = dev_to_shost(&starget->dev); + mrioc = shost_priv(shost); + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + if (starget->channel == mrioc->scsi_device_channel) + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + if (!tgt_dev) + return -ENXIO; + + mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); + + sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); + + switch (tgt_dev->dev_type) { + case MPI3_DEVICE_DEVFORM_PCIE: + /*The block layer hw sector size = 512*/ + if ((tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { + blk_queue_max_hw_sectors(sdev->request_queue, + tgt_dev->dev_spec.pcie_inf.mdts / 512); + if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) + blk_queue_virt_boundary(sdev->request_queue, + ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); + else + blk_queue_virt_boundary(sdev->request_queue, + ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); + } + break; + default: + break; + } + + mpi3mr_tgtdev_put(tgt_dev); + + return retval; +} + +/** + * mpi3mr_slave_alloc -Slave alloc callback handler + * @sdev: SCSI device reference + * + * Allocate per device(lun) private data and initialize it. + * + * Return: 0 on success -ENOMEM on memory allocation failure. + */ +static int mpi3mr_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; + struct mpi3mr_tgt_dev *tgt_dev = NULL; + struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; + unsigned long flags; + struct scsi_target *starget; + int retval = 0; + struct sas_rphy *rphy = NULL; + + starget = scsi_target(sdev); + shost = dev_to_shost(&starget->dev); + mrioc = shost_priv(shost); + scsi_tgt_priv_data = starget->hostdata; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + + if (starget->channel == mrioc->scsi_device_channel) + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + } + + if (tgt_dev) { + if (tgt_dev->starget == NULL) + tgt_dev->starget = starget; + mpi3mr_tgtdev_put(tgt_dev); + retval = 0; + } else { + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + return -ENXIO; + } + + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); + if (!scsi_dev_priv_data) + return -ENOMEM; + + scsi_dev_priv_data->lun_id = sdev->lun; + scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; + sdev->hostdata = scsi_dev_priv_data; + + scsi_tgt_priv_data->num_luns++; + + return retval; +} + +/** + * mpi3mr_target_alloc - Target alloc callback handler + * @starget: SCSI target reference + * + * Allocate per target private data and initialize it. + * + * Return: 0 on success -ENOMEM on memory allocation failure. + */ +static int mpi3mr_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; + struct mpi3mr_tgt_dev *tgt_dev; + unsigned long flags; + int retval = 0; + struct sas_rphy *rphy = NULL; + + scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); + if (!scsi_tgt_priv_data) + return -ENOMEM; + + starget->hostdata = scsi_tgt_priv_data; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + if (starget->channel == mrioc->scsi_device_channel) { + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + if (tgt_dev && !tgt_dev->is_hidden) { + scsi_tgt_priv_data->starget = starget; + scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; + scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; + scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; + scsi_tgt_priv_data->tgt_dev = tgt_dev; + tgt_dev->starget = starget; + atomic_set(&scsi_tgt_priv_data->block_io, 0); + retval = 0; + if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && + ((tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && + ((tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != + MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) + scsi_tgt_priv_data->dev_nvme_dif = 1; + scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; + scsi_tgt_priv_data->wslen = tgt_dev->wslen; + if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) + scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; + } else + retval = -ENXIO; + } else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && + (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { + scsi_tgt_priv_data->starget = starget; + scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; + scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; + scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; + scsi_tgt_priv_data->tgt_dev = tgt_dev; + scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; + scsi_tgt_priv_data->wslen = tgt_dev->wslen; + tgt_dev->starget = starget; + atomic_set(&scsi_tgt_priv_data->block_io, 0); + retval = 0; + } else + retval = -ENXIO; + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + return retval; +} + +/** + * mpi3mr_check_return_unmap - Whether an unmap is allowed + * @mrioc: Adapter instance reference + * @scmd: SCSI Command reference + * + * The controller hardware cannot handle certain unmap commands + * for NVMe drives, this routine checks those and return true + * and completes the SCSI command with proper status and sense + * data. + * + * Return: TRUE for not allowed unmap, FALSE otherwise. + */ +static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd) +{ + unsigned char *buf; + u16 param_len, desc_len, trunc_param_len; + + trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); + + if (mrioc->pdev->revision) { + if ((param_len > 24) && ((param_len - 8) & 0xF)) { + trunc_param_len -= (param_len - 8) & 0xF; + dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); + dprint_scsi_err(mrioc, + "truncating param_len from (%d) to (%d)\n", + param_len, trunc_param_len); + put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); + dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); + } + return false; + } + + if (!param_len) { + ioc_warn(mrioc, + "%s: cdb received with zero parameter length\n", + __func__); + scsi_print_command(scmd); + scmd->result = DID_OK << 16; + scsi_done(scmd); + return true; + } + + if (param_len < 24) { + ioc_warn(mrioc, + "%s: cdb received with invalid param_len: %d\n", + __func__, param_len); + scsi_print_command(scmd); + scmd->result = SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, + 0x1A, 0); + scsi_done(scmd); + return true; + } + if (param_len != scsi_bufflen(scmd)) { + ioc_warn(mrioc, + "%s: cdb received with param_len: %d bufflen: %d\n", + __func__, param_len, scsi_bufflen(scmd)); + scsi_print_command(scmd); + scmd->result = SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, + 0x1A, 0); + scsi_done(scmd); + return true; + } + buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); + if (!buf) { + scsi_print_command(scmd); + scmd->result = SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, + 0x55, 0x03); + scsi_done(scmd); + return true; + } + scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); + desc_len = get_unaligned_be16(&buf[2]); + + if (desc_len < 16) { + ioc_warn(mrioc, + "%s: Invalid descriptor length in param list: %d\n", + __func__, desc_len); + scsi_print_command(scmd); + scmd->result = SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, + 0x26, 0); + scsi_done(scmd); + kfree(buf); + return true; + } + + if (param_len > (desc_len + 8)) { + trunc_param_len = desc_len + 8; + scsi_print_command(scmd); + dprint_scsi_err(mrioc, + "truncating param_len(%d) to desc_len+8(%d)\n", + param_len, trunc_param_len); + put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); + scsi_print_command(scmd); + } + + kfree(buf); + return false; +} + +/** + * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown + * @scmd: SCSI Command reference + * + * Checks whether a cdb is allowed during shutdown or not. + * + * Return: TRUE for allowed commands, FALSE otherwise. + */ + +inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) +{ + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } +} + +/** + * mpi3mr_qcmd - I/O request despatcher + * @shost: SCSI Host reference + * @scmd: SCSI Command reference + * + * Issues the SCSI Command as an MPI3 request. + * + * Return: 0 on successful queueing of the request or if the + * request is completed with failure. + * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. + * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. + */ +static int mpi3mr_qcmd(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct mpi3mr_ioc *mrioc = shost_priv(shost); + struct mpi3mr_stgt_priv_data *stgt_priv_data; + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct scmd_priv *scmd_priv_data = NULL; + struct mpi3_scsi_io_request *scsiio_req = NULL; + struct op_req_qinfo *op_req_q = NULL; + int retval = 0; + u16 dev_handle; + u16 host_tag; + u32 scsiio_flags = 0, data_len_blks = 0; + struct request *rq = scsi_cmd_to_rq(scmd); + int iprio_class; + u8 is_pcie_dev = 0; + u32 tracked_io_sz = 0; + u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; + struct mpi3mr_throttle_group_info *tg = NULL; + + if (mrioc->unrecoverable) { + scmd->result = DID_ERROR << 16; + scsi_done(scmd); + goto out; + } + + sdev_priv_data = scmd->device->hostdata; + if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + + if (mrioc->stop_drv_processing && + !(mpi3mr_allow_scmd_to_fw(scmd))) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + + stgt_priv_data = sdev_priv_data->tgt_priv_data; + dev_handle = stgt_priv_data->dev_handle; + + /* Avoid error handling escalation when device is removed or blocked */ + + if (scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY && + (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { + scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); + scsi_done(scmd); + goto out; + } + + if (mrioc->reset_in_progress) { + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + if (atomic_read(&stgt_priv_data->block_io)) { + if (mrioc->stop_drv_processing) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + retval = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } + + if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (stgt_priv_data->dev_removed) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + + if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) + is_pcie_dev = 1; + if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && + (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && + mpi3mr_check_return_unmap(mrioc, scmd)) + goto out; + + host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); + if (host_tag == MPI3MR_HOSTTAG_INVALID) { + scmd->result = DID_ERROR << 16; + scsi_done(scmd); + goto out; + } + + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; + else + scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; + + scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; + + if (sdev_priv_data->ncq_prio_enable) { + iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); + if (iprio_class == IOPRIO_CLASS_RT) + scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; + } + + if (scmd->cmd_len > 16) + scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; + + scmd_priv_data = scsi_cmd_priv(scmd); + memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); + scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; + scsiio_req->function = MPI3_FUNCTION_SCSI_IO; + scsiio_req->host_tag = cpu_to_le16(host_tag); + + mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); + + if (stgt_priv_data->wslen) + mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, + stgt_priv_data->wslen); + + memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); + scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); + scsiio_req->dev_handle = cpu_to_le16(dev_handle); + scsiio_req->flags = cpu_to_le32(scsiio_flags); + int_to_scsilun(sdev_priv_data->lun_id, + (struct scsi_lun *)scsiio_req->lun); + + if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { + mpi3mr_clear_scmd_priv(mrioc, scmd); + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; + data_len_blks = scsi_bufflen(scmd) >> 9; + if ((data_len_blks >= mrioc->io_throttle_data_length) && + stgt_priv_data->io_throttle_enabled) { + tracked_io_sz = data_len_blks; + tg = stgt_priv_data->throttle_group; + if (tg) { + ioc_pend_data_len = atomic_add_return(data_len_blks, + &mrioc->pend_large_data_sz); + tg_pend_data_len = atomic_add_return(data_len_blks, + &tg->pend_large_data_sz); + if (!tg->io_divert && ((ioc_pend_data_len >= + mrioc->io_throttle_high) || + (tg_pend_data_len >= tg->high))) { + tg->io_divert = 1; + tg->need_qd_reduction = 1; + mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, + tg, 1); + mpi3mr_queue_qd_reduction_event(mrioc, tg); + } + } else { + ioc_pend_data_len = atomic_add_return(data_len_blks, + &mrioc->pend_large_data_sz); + if (ioc_pend_data_len >= mrioc->io_throttle_high) + stgt_priv_data->io_divert = 1; + } + } + + if (stgt_priv_data->io_divert) { + scsiio_req->msg_flags |= + MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; + scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; + } + scsiio_req->flags = cpu_to_le32(scsiio_flags); + + if (mpi3mr_op_request_post(mrioc, op_req_q, + scmd_priv_data->mpi3mr_scsiio_req)) { + mpi3mr_clear_scmd_priv(mrioc, scmd); + retval = SCSI_MLQUEUE_HOST_BUSY; + if (tracked_io_sz) { + atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); + if (tg) + atomic_sub(tracked_io_sz, + &tg->pend_large_data_sz); + } + goto out; + } + +out: + return retval; +} + +static const struct scsi_host_template mpi3mr_driver_template = { + .module = THIS_MODULE, + .name = "MPI3 Storage Controller", + .proc_name = MPI3MR_DRIVER_NAME, + .queuecommand = mpi3mr_qcmd, + .target_alloc = mpi3mr_target_alloc, + .slave_alloc = mpi3mr_slave_alloc, + .slave_configure = mpi3mr_slave_configure, + .target_destroy = mpi3mr_target_destroy, + .slave_destroy = mpi3mr_slave_destroy, + .scan_finished = mpi3mr_scan_finished, + .scan_start = mpi3mr_scan_start, + .change_queue_depth = mpi3mr_change_queue_depth, + .eh_device_reset_handler = mpi3mr_eh_dev_reset, + .eh_target_reset_handler = mpi3mr_eh_target_reset, + .eh_host_reset_handler = mpi3mr_eh_host_reset, + .bios_param = mpi3mr_bios_param, + .map_queues = mpi3mr_map_queues, + .mq_poll = mpi3mr_blk_mq_poll, + .no_write_same = 1, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, + /* max xfer supported is 1M (2K in 512 byte sized sectors) + */ + .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), + .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, + .max_segment_size = 0xffffffff, + .track_queue_depth = 1, + .cmd_size = sizeof(struct scmd_priv), + .shost_groups = mpi3mr_host_groups, + .sdev_groups = mpi3mr_dev_groups, +}; + +/** + * mpi3mr_init_drv_cmd - Initialize internal command tracker + * @cmdptr: Internal command tracker + * @host_tag: Host tag used for the specific command + * + * Initialize the internal command tracker structure with + * specified host tag. + * + * Return: Nothing. + */ +static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, + u16 host_tag) +{ + mutex_init(&cmdptr->mutex); + cmdptr->reply = NULL; + cmdptr->state = MPI3MR_CMD_NOTUSED; + cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + cmdptr->host_tag = host_tag; +} + +/** + * osintfc_mrioc_security_status -Check controller secure status + * @pdev: PCI device instance + * + * Read the Device Serial Number capability from PCI config + * space and decide whether the controller is secure or not. + * + * Return: 0 on success, non-zero on failure. + */ +static int +osintfc_mrioc_security_status(struct pci_dev *pdev) +{ + u32 cap_data; + int base; + u32 ctlr_status; + u32 debug_status; + int retval = 0; + + base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (!base) { + dev_err(&pdev->dev, + "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); + return -1; + } + + pci_read_config_dword(pdev, base + 4, &cap_data); + + debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; + ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; + + switch (ctlr_status) { + case MPI3MR_INVALID_DEVICE: + dev_err(&pdev->dev, + "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", + __func__, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + retval = -1; + break; + case MPI3MR_CONFIG_SECURE_DEVICE: + if (!debug_status) + dev_info(&pdev->dev, + "%s: Config secure ctlr is detected\n", + __func__); + break; + case MPI3MR_HARD_SECURE_DEVICE: + break; + case MPI3MR_TAMPERED_DEVICE: + dev_err(&pdev->dev, + "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", + __func__, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + retval = -1; + break; + default: + retval = -1; + break; + } + + if (!retval && debug_status) { + dev_err(&pdev->dev, + "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", + __func__, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + retval = -1; + } + + return retval; +} + +/** + * mpi3mr_probe - PCI probe callback + * @pdev: PCI device instance + * @id: PCI device ID details + * + * controller initialization routine. Checks the security status + * of the controller and if it is invalid or tampered return the + * probe without initializing the controller. Otherwise, + * allocate per adapter instance through shost_priv and + * initialize controller specific data structures, initializae + * the controller hardware, add shost to the SCSI subsystem. + * + * Return: 0 on success, non-zero on failure. + */ + +static int +mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mpi3mr_ioc *mrioc = NULL; + struct Scsi_Host *shost = NULL; + int retval = 0, i; + + if (osintfc_mrioc_security_status(pdev)) { + warn_non_secure_ctlr = 1; + return 1; /* For Invalid and Tampered device */ + } + + shost = scsi_host_alloc(&mpi3mr_driver_template, + sizeof(struct mpi3mr_ioc)); + if (!shost) { + retval = -ENODEV; + goto shost_failed; + } + + mrioc = shost_priv(shost); + mrioc->id = mrioc_ids++; + sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); + sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); + INIT_LIST_HEAD(&mrioc->list); + spin_lock(&mrioc_list_lock); + list_add_tail(&mrioc->list, &mrioc_list); + spin_unlock(&mrioc_list_lock); + + spin_lock_init(&mrioc->admin_req_lock); + spin_lock_init(&mrioc->reply_free_queue_lock); + spin_lock_init(&mrioc->sbq_lock); + spin_lock_init(&mrioc->fwevt_lock); + spin_lock_init(&mrioc->tgtdev_lock); + spin_lock_init(&mrioc->watchdog_lock); + spin_lock_init(&mrioc->chain_buf_lock); + spin_lock_init(&mrioc->sas_node_lock); + + INIT_LIST_HEAD(&mrioc->fwevt_list); + INIT_LIST_HEAD(&mrioc->tgtdev_list); + INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); + INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); + INIT_LIST_HEAD(&mrioc->sas_expander_list); + INIT_LIST_HEAD(&mrioc->hba_port_table_list); + INIT_LIST_HEAD(&mrioc->enclosure_list); + + mutex_init(&mrioc->reset_mutex); + mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); + mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); + mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); + mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); + mpi3mr_init_drv_cmd(&mrioc->transport_cmds, + MPI3MR_HOSTTAG_TRANSPORT_CMDS); + + for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) + mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], + MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); + + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) + mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], + MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); + + if (pdev->revision) + mrioc->enable_segqueue = true; + + init_waitqueue_head(&mrioc->reset_waitq); + mrioc->logging_level = logging_level; + mrioc->shost = shost; + mrioc->pdev = pdev; + mrioc->stop_bsgs = 1; + + mrioc->max_sgl_entries = max_sgl_entries; + if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) + mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; + else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) + mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; + else { + mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; + mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; + } + + /* init shost parameters */ + shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; + shost->max_lun = -1; + shost->unique_id = mrioc->id; + + shost->max_channel = 0; + shost->max_id = 0xFFFFFFFF; + + shost->host_tagset = 1; + + if (prot_mask >= 0) + scsi_host_set_prot(shost, prot_mask); + else { + prot_mask = SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION; + scsi_host_set_prot(shost, prot_mask); + } + + ioc_info(mrioc, + "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", + __func__, + (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", + (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", + (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", + (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", + (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", + (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", + (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); + + if (prot_guard_mask) + scsi_host_set_guard(shost, (prot_guard_mask & 3)); + else + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + + snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), + "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); + mrioc->fwevt_worker_thread = alloc_ordered_workqueue( + mrioc->fwevt_worker_name, 0); + if (!mrioc->fwevt_worker_thread) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + retval = -ENODEV; + goto fwevtthread_failed; + } + + mrioc->is_driver_loading = 1; + mrioc->cpu_count = num_online_cpus(); + if (mpi3mr_setup_resources(mrioc)) { + ioc_err(mrioc, "setup resources failed\n"); + retval = -ENODEV; + goto resource_alloc_failed; + } + if (mpi3mr_init_ioc(mrioc)) { + ioc_err(mrioc, "initializing IOC failed\n"); + retval = -ENODEV; + goto init_ioc_failed; + } + + shost->nr_hw_queues = mrioc->num_op_reply_q; + if (mrioc->active_poll_qcount) + shost->nr_maps = 3; + + shost->can_queue = mrioc->max_host_ios; + shost->sg_tablesize = mrioc->max_sgl_entries; + shost->max_id = mrioc->facts.max_perids + 1; + + retval = scsi_add_host(shost, &pdev->dev); + if (retval) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto addhost_failed; + } + + scsi_scan_host(shost); + mpi3mr_bsg_init(mrioc); + return retval; + +addhost_failed: + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_ioc(mrioc); +init_ioc_failed: + mpi3mr_free_mem(mrioc); + mpi3mr_cleanup_resources(mrioc); +resource_alloc_failed: + destroy_workqueue(mrioc->fwevt_worker_thread); +fwevtthread_failed: + spin_lock(&mrioc_list_lock); + list_del(&mrioc->list); + spin_unlock(&mrioc_list_lock); + scsi_host_put(shost); +shost_failed: + return retval; +} + +/** + * mpi3mr_remove - PCI remove callback + * @pdev: PCI device instance + * + * Cleanup the IOC by issuing MUR and shutdown notification. + * Free up all memory and resources associated with the + * controllerand target devices, unregister the shost. + * + * Return: Nothing. + */ +static void mpi3mr_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct mpi3mr_ioc *mrioc; + struct workqueue_struct *wq; + unsigned long flags; + struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; + struct mpi3mr_hba_port *port, *hba_port_next; + struct mpi3mr_sas_node *sas_expander, *sas_expander_next; + + if (!shost) + return; + + mrioc = shost_priv(shost); + while (mrioc->reset_in_progress || mrioc->is_driver_loading) + ssleep(1); + + if (!pci_device_is_present(mrioc->pdev)) { + mrioc->unrecoverable = 1; + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + } + + mpi3mr_bsg_exit(mrioc); + mrioc->stop_drv_processing = 1; + mpi3mr_cleanup_fwevt_list(mrioc); + spin_lock_irqsave(&mrioc->fwevt_lock, flags); + wq = mrioc->fwevt_worker_thread; + mrioc->fwevt_worker_thread = NULL; + spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); + if (wq) + destroy_workqueue(wq); + + if (mrioc->sas_transport_enabled) + sas_remove_host(shost); + else + scsi_remove_host(shost); + + list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, + list) { + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); + mpi3mr_tgtdev_put(tgtdev); + } + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_ioc(mrioc); + mpi3mr_free_mem(mrioc); + mpi3mr_cleanup_resources(mrioc); + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, + &mrioc->sas_expander_list, list) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + mpi3mr_expander_node_remove(mrioc, sas_expander); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + } + list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { + ioc_info(mrioc, + "removing hba_port entry: %p port: %d from hba_port list\n", + port, port->port_id); + list_del(&port->list); + kfree(port); + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (mrioc->sas_hba.num_phys) { + kfree(mrioc->sas_hba.phy); + mrioc->sas_hba.phy = NULL; + mrioc->sas_hba.num_phys = 0; + } + + spin_lock(&mrioc_list_lock); + list_del(&mrioc->list); + spin_unlock(&mrioc_list_lock); + + scsi_host_put(shost); +} + +/** + * mpi3mr_shutdown - PCI shutdown callback + * @pdev: PCI device instance + * + * Free up all memory and resources associated with the + * controller + * + * Return: Nothing. + */ +static void mpi3mr_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct mpi3mr_ioc *mrioc; + struct workqueue_struct *wq; + unsigned long flags; + + if (!shost) + return; + + mrioc = shost_priv(shost); + while (mrioc->reset_in_progress || mrioc->is_driver_loading) + ssleep(1); + + mrioc->stop_drv_processing = 1; + mpi3mr_cleanup_fwevt_list(mrioc); + spin_lock_irqsave(&mrioc->fwevt_lock, flags); + wq = mrioc->fwevt_worker_thread; + mrioc->fwevt_worker_thread = NULL; + spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); + if (wq) + destroy_workqueue(wq); + + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_ioc(mrioc); + mpi3mr_cleanup_resources(mrioc); +} + +/** + * mpi3mr_suspend - PCI power management suspend callback + * @dev: Device struct + * + * Change the power state to the given value and cleanup the IOC + * by issuing MUR and shutdown notification + * + * Return: 0 always. + */ +static int __maybe_unused +mpi3mr_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct mpi3mr_ioc *mrioc; + + if (!shost) + return 0; + + mrioc = shost_priv(shost); + while (mrioc->reset_in_progress || mrioc->is_driver_loading) + ssleep(1); + mrioc->stop_drv_processing = 1; + mpi3mr_cleanup_fwevt_list(mrioc); + scsi_block_requests(shost); + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_ioc(mrioc); + + ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", + pdev, pci_name(pdev)); + mpi3mr_cleanup_resources(mrioc); + + return 0; +} + +/** + * mpi3mr_resume - PCI power management resume callback + * @dev: Device struct + * + * Restore the power state to D0 and reinitialize the controller + * and resume I/O operations to the target devices + * + * Return: 0 on success, non-zero on failure + */ +static int __maybe_unused +mpi3mr_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct mpi3mr_ioc *mrioc; + pci_power_t device_state = pdev->current_state; + int r; + + if (!shost) + return 0; + + mrioc = shost_priv(shost); + + ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", + pdev, pci_name(pdev), device_state); + mrioc->pdev = pdev; + mrioc->cpu_count = num_online_cpus(); + r = mpi3mr_setup_resources(mrioc); + if (r) { + ioc_info(mrioc, "%s: Setup resources failed[%d]\n", + __func__, r); + return r; + } + + mrioc->stop_drv_processing = 0; + mpi3mr_invalidate_devhandles(mrioc); + mpi3mr_free_enclosure_list(mrioc); + mpi3mr_memset_buffers(mrioc); + r = mpi3mr_reinit_ioc(mrioc, 1); + if (r) { + ioc_err(mrioc, "resuming controller failed[%d]\n", r); + return r; + } + ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); + scsi_unblock_requests(shost); + mrioc->device_refresh_on = 0; + mpi3mr_start_watchdog(mrioc); + + return 0; +} + +static const struct pci_device_id mpi3mr_pci_id_table[] = { + { + PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, + MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) + }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); + +static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); + +static struct pci_driver mpi3mr_pci_driver = { + .name = MPI3MR_DRIVER_NAME, + .id_table = mpi3mr_pci_id_table, + .probe = mpi3mr_probe, + .remove = mpi3mr_remove, + .shutdown = mpi3mr_shutdown, + .driver.pm = &mpi3mr_pm_ops, +}; + +static ssize_t event_counter_show(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); +} +static DRIVER_ATTR_RO(event_counter); + +static int __init mpi3mr_init(void) +{ + int ret_val; + + pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, + MPI3MR_DRIVER_VERSION); + + mpi3mr_transport_template = + sas_attach_transport(&mpi3mr_transport_functions); + if (!mpi3mr_transport_template) { + pr_err("%s failed to load due to sas transport attach failure\n", + MPI3MR_DRIVER_NAME); + return -ENODEV; + } + + ret_val = pci_register_driver(&mpi3mr_pci_driver); + if (ret_val) { + pr_err("%s failed to load due to pci register driver failure\n", + MPI3MR_DRIVER_NAME); + goto err_pci_reg_fail; + } + + ret_val = driver_create_file(&mpi3mr_pci_driver.driver, + &driver_attr_event_counter); + if (ret_val) + goto err_event_counter; + + return ret_val; + +err_event_counter: + pci_unregister_driver(&mpi3mr_pci_driver); + +err_pci_reg_fail: + sas_release_transport(mpi3mr_transport_template); + return ret_val; +} + +static void __exit mpi3mr_exit(void) +{ + if (warn_non_secure_ctlr) + pr_warn( + "Unloading %s version %s while managing a non secure controller\n", + MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); + else + pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, + MPI3MR_DRIVER_VERSION); + + driver_remove_file(&mpi3mr_pci_driver.driver, + &driver_attr_event_counter); + pci_unregister_driver(&mpi3mr_pci_driver); + sas_release_transport(mpi3mr_transport_template); +} + +module_init(mpi3mr_init); +module_exit(mpi3mr_exit); diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c new file mode 100644 index 000000000..82b55e955 --- /dev/null +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -0,0 +1,3291 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Broadcom MPI3 Storage Controllers + * + * Copyright (C) 2017-2023 Broadcom Inc. + * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) + * + */ + +#include "mpi3mr.h" + +/** + * mpi3mr_post_transport_req - Issue transport requests and wait + * @mrioc: Adapter instance reference + * @request: Properly populated MPI3 request + * @request_sz: Size of the MPI3 request + * @reply: Pointer to return MPI3 reply + * @reply_sz: Size of the MPI3 reply buffer + * @timeout: Timeout in seconds + * @ioc_status: Pointer to return ioc status + * + * A generic function for posting MPI3 requests from the SAS + * transport layer that uses transport command infrastructure. + * This blocks for the completion of request for timeout seconds + * and if the request times out this function faults the + * controller with proper reason code. + * + * On successful completion of the request this function returns + * appropriate ioc status from the firmware back to the caller. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_post_transport_req(struct mpi3mr_ioc *mrioc, void *request, + u16 request_sz, void *reply, u16 reply_sz, int timeout, + u16 *ioc_status) +{ + int retval = 0; + + mutex_lock(&mrioc->transport_cmds.mutex); + if (mrioc->transport_cmds.state & MPI3MR_CMD_PENDING) { + retval = -1; + ioc_err(mrioc, "sending transport request failed due to command in use\n"); + mutex_unlock(&mrioc->transport_cmds.mutex); + goto out; + } + mrioc->transport_cmds.state = MPI3MR_CMD_PENDING; + mrioc->transport_cmds.is_waiting = 1; + mrioc->transport_cmds.callback = NULL; + mrioc->transport_cmds.ioc_status = 0; + mrioc->transport_cmds.ioc_loginfo = 0; + + init_completion(&mrioc->transport_cmds.done); + dprint_cfg_info(mrioc, "posting transport request\n"); + if (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO) + dprint_dump(request, request_sz, "transport_req"); + retval = mpi3mr_admin_request_post(mrioc, request, request_sz, 1); + if (retval) { + ioc_err(mrioc, "posting transport request failed\n"); + goto out_unlock; + } + wait_for_completion_timeout(&mrioc->transport_cmds.done, + (timeout * HZ)); + if (!(mrioc->transport_cmds.state & MPI3MR_CMD_COMPLETE)) { + mpi3mr_check_rh_fault_ioc(mrioc, + MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT); + ioc_err(mrioc, "transport request timed out\n"); + retval = -1; + goto out_unlock; + } + *ioc_status = mrioc->transport_cmds.ioc_status & + MPI3_IOCSTATUS_STATUS_MASK; + if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS) + dprint_transport_err(mrioc, + "transport request returned with ioc_status(0x%04x), log_info(0x%08x)\n", + *ioc_status, mrioc->transport_cmds.ioc_loginfo); + + if ((reply) && (mrioc->transport_cmds.state & MPI3MR_CMD_REPLY_VALID)) + memcpy((u8 *)reply, mrioc->transport_cmds.reply, reply_sz); + +out_unlock: + mrioc->transport_cmds.state = MPI3MR_CMD_NOTUSED; + mutex_unlock(&mrioc->transport_cmds.mutex); + +out: + return retval; +} + +/* report manufacture request structure */ +struct rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +/* report manufacture reply structure */ +struct rep_manu_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x01 */ + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +/** + * mpi3mr_report_manufacture - obtain SMP report_manufacture + * @mrioc: Adapter instance reference + * @sas_address: SAS address of the expander device + * @edev: SAS transport layer sas_expander_device object + * @port_id: ID of the HBA port + * + * Fills in the sas_expander_device with manufacturing info. + * + * Return: 0 for success, non-zero for failure. + */ +static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc, + u64 sas_address, struct sas_expander_device *edev, u8 port_id) +{ + struct mpi3_smp_passthrough_request mpi_request; + struct mpi3_smp_passthrough_reply mpi_reply; + struct rep_manu_reply *manufacture_reply; + struct rep_manu_request *manufacture_request; + int rc = 0; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + u16 request_sz = sizeof(struct mpi3_smp_passthrough_request); + u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply); + u16 ioc_status; + u8 *tmp; + + if (mrioc->reset_in_progress) { + ioc_err(mrioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + data_out_sz = sizeof(struct rep_manu_request); + data_in_sz = sizeof(struct rep_manu_reply); + data_out = dma_alloc_coherent(&mrioc->pdev->dev, + data_out_sz + data_in_sz, &data_out_dma, GFP_KERNEL); + if (!data_out) { + rc = -ENOMEM; + goto out; + } + + data_in_dma = data_out_dma + data_out_sz; + manufacture_reply = data_out + data_out_sz; + + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + + memset(&mpi_request, 0, request_sz); + memset(&mpi_reply, 0, reply_sz); + mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS); + mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH; + mpi_request.io_unit_port = (u8) port_id; + mpi_request.sas_address = cpu_to_le64(sas_address); + + psge = &mpi_request.request_sge; + mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma); + + psge = &mpi_request.response_sge; + mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma); + + dprint_transport_info(mrioc, + "sending report manufacturer SMP request to sas_address(0x%016llx), port(%d)\n", + (unsigned long long)sas_address, port_id); + + rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz, + &mpi_reply, reply_sz, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status); + if (rc) + goto out; + + dprint_transport_info(mrioc, + "report manufacturer SMP request completed with ioc_status(0x%04x)\n", + ioc_status); + + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + rc = -EINVAL; + goto out; + } + + dprint_transport_info(mrioc, + "report manufacturer - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply.response_data_length)); + + if (le16_to_cpu(mpi_reply.response_data_length) != + sizeof(struct rep_manu_reply)) { + rc = -EINVAL; + goto out; + } + + strscpy(edev->vendor_id, manufacture_reply->vendor_id, + SAS_EXPANDER_VENDOR_ID_LEN); + strscpy(edev->product_id, manufacture_reply->product_id, + SAS_EXPANDER_PRODUCT_ID_LEN); + strscpy(edev->product_rev, manufacture_reply->product_rev, + SAS_EXPANDER_PRODUCT_REV_LEN); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strscpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + tmp = (u8 *)&manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + +out: + if (data_out) + dma_free_coherent(&mrioc->pdev->dev, data_out_sz + data_in_sz, + data_out, data_out_dma); + + return rc; +} + +/** + * __mpi3mr_expander_find_by_handle - expander search by handle + * @mrioc: Adapter instance reference + * @handle: Firmware device handle of the expander + * + * Context: The caller should acquire sas_node_lock + * + * This searches for expander device based on handle, then + * returns the sas_node object. + * + * Return: Expander sas_node object reference or NULL + */ +struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc + *mrioc, u16 handle) +{ + struct mpi3mr_sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * mpi3mr_is_expander_device - if device is an expander + * @device_info: Bitfield providing information about the device + * + * Return: 1 if the device is expander device, else 0. + */ +u8 mpi3mr_is_expander_device(u16 device_info) +{ + if ((device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) == + MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER) + return 1; + else + return 0; +} + +/** + * mpi3mr_get_sas_address - retrieve sas_address for handle + * @mrioc: Adapter instance reference + * @handle: Firmware device handle + * @sas_address: Address to hold sas address + * + * This function issues device page0 read for a given device + * handle and gets the SAS address and return it back + * + * Return: 0 for success, non-zero for failure + */ +static int mpi3mr_get_sas_address(struct mpi3mr_ioc *mrioc, u16 handle, + u64 *sas_address) +{ + struct mpi3_device_page0 dev_pg0; + u16 ioc_status; + struct mpi3_device0_sas_sata_format *sasinf; + + *sas_address = 0; + + if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0, + sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, + handle))) { + ioc_err(mrioc, "%s: device page0 read failed\n", __func__); + return -ENXIO; + } + + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n", + handle, ioc_status, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (le16_to_cpu(dev_pg0.flags) & + MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE) + *sas_address = mrioc->sas_hba.sas_address; + else if (dev_pg0.device_form == MPI3_DEVICE_DEVFORM_SAS_SATA) { + sasinf = &dev_pg0.device_specific.sas_sata_format; + *sas_address = le64_to_cpu(sasinf->sas_address); + } else { + ioc_err(mrioc, "%s: device_form(%d) is not SAS_SATA\n", + __func__, dev_pg0.device_form); + return -ENXIO; + } + return 0; +} + +/** + * __mpi3mr_get_tgtdev_by_addr - target device search + * @mrioc: Adapter instance reference + * @sas_address: SAS address of the device + * @hba_port: HBA port entry + * + * This searches for target device from sas address and hba port + * pointer then return mpi3mr_tgt_dev object. + * + * Return: Valid tget_dev or NULL + */ +static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc, + u64 sas_address, struct mpi3mr_hba_port *hba_port) +{ + struct mpi3mr_tgt_dev *tgtdev; + + assert_spin_locked(&mrioc->tgtdev_lock); + + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) + if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) && + (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address) + && (tgtdev->dev_spec.sas_sata_inf.hba_port == hba_port)) + goto found_device; + return NULL; +found_device: + mpi3mr_tgtdev_get(tgtdev); + return tgtdev; +} + +/** + * mpi3mr_get_tgtdev_by_addr - target device search + * @mrioc: Adapter instance reference + * @sas_address: SAS address of the device + * @hba_port: HBA port entry + * + * This searches for target device from sas address and hba port + * pointer then return mpi3mr_tgt_dev object. + * + * Context: This function will acquire tgtdev_lock and will + * release before returning the mpi3mr_tgt_dev object. + * + * Return: Valid tget_dev or NULL + */ +static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc, + u64 sas_address, struct mpi3mr_hba_port *hba_port) +{ + struct mpi3mr_tgt_dev *tgtdev = NULL; + unsigned long flags; + + if (!hba_port) + goto out; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc, sas_address, hba_port); + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + +out: + return tgtdev; +} + +/** + * mpi3mr_remove_device_by_sas_address - remove the device + * @mrioc: Adapter instance reference + * @sas_address: SAS address of the device + * @hba_port: HBA port entry + * + * This searches for target device using sas address and hba + * port pointer then removes it from the OS. + * + * Return: None + */ +static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc, + u64 sas_address, struct mpi3mr_hba_port *hba_port) +{ + struct mpi3mr_tgt_dev *tgtdev = NULL; + unsigned long flags; + u8 was_on_tgtdev_list = 0; + + if (!hba_port) + return; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc, + sas_address, hba_port); + if (tgtdev) { + if (!list_empty(&tgtdev->list)) { + list_del_init(&tgtdev->list); + was_on_tgtdev_list = 1; + mpi3mr_tgtdev_put(tgtdev); + } + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + if (was_on_tgtdev_list) { + if (tgtdev->host_exposed) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + mpi3mr_tgtdev_put(tgtdev); + } +} + +/** + * __mpi3mr_get_tgtdev_by_addr_and_rphy - target device search + * @mrioc: Adapter instance reference + * @sas_address: SAS address of the device + * @rphy: SAS transport layer rphy object + * + * This searches for target device from sas address and rphy + * pointer then return mpi3mr_tgt_dev object. + * + * Return: Valid tget_dev or NULL + */ +struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy( + struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy) +{ + struct mpi3mr_tgt_dev *tgtdev; + + assert_spin_locked(&mrioc->tgtdev_lock); + + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) + if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) && + (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address) + && (tgtdev->dev_spec.sas_sata_inf.rphy == rphy)) + goto found_device; + return NULL; +found_device: + mpi3mr_tgtdev_get(tgtdev); + return tgtdev; +} + +/** + * mpi3mr_expander_find_by_sas_address - sas expander search + * @mrioc: Adapter instance reference + * @sas_address: SAS address of expander + * @hba_port: HBA port entry + * + * Return: A valid SAS expander node or NULL. + * + */ +static struct mpi3mr_sas_node *mpi3mr_expander_find_by_sas_address( + struct mpi3mr_ioc *mrioc, u64 sas_address, + struct mpi3mr_hba_port *hba_port) +{ + struct mpi3mr_sas_node *sas_expander, *r = NULL; + + if (!hba_port) + goto out; + + list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) { + if ((sas_expander->sas_address != sas_address) || + (sas_expander->hba_port != hba_port)) + continue; + r = sas_expander; + goto out; + } +out: + return r; +} + +/** + * __mpi3mr_sas_node_find_by_sas_address - sas node search + * @mrioc: Adapter instance reference + * @sas_address: SAS address of expander or sas host + * @hba_port: HBA port entry + * Context: Caller should acquire mrioc->sas_node_lock. + * + * If the SAS address indicates the device is direct attached to + * the controller (controller's SAS address) then the SAS node + * associated with the controller is returned back else the SAS + * address and hba port are used to identify the exact expander + * and the associated sas_node object is returned. If there is + * no match NULL is returned. + * + * Return: A valid SAS node or NULL. + * + */ +static struct mpi3mr_sas_node *__mpi3mr_sas_node_find_by_sas_address( + struct mpi3mr_ioc *mrioc, u64 sas_address, + struct mpi3mr_hba_port *hba_port) +{ + + if (mrioc->sas_hba.sas_address == sas_address) + return &mrioc->sas_hba; + return mpi3mr_expander_find_by_sas_address(mrioc, sas_address, + hba_port); +} + +/** + * mpi3mr_parent_present - Is parent present for a phy + * @mrioc: Adapter instance reference + * @phy: SAS transport layer phy object + * + * Return: 0 if parent is present else non-zero + */ +static int mpi3mr_parent_present(struct mpi3mr_ioc *mrioc, struct sas_phy *phy) +{ + unsigned long flags; + struct mpi3mr_hba_port *hba_port = phy->hostdata; + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + if (__mpi3mr_sas_node_find_by_sas_address(mrioc, + phy->identify.sas_address, + hba_port) == NULL) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + return -1; + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + return 0; +} + +/** + * mpi3mr_convert_phy_link_rate - + * @link_rate: link rate as defined in the MPI header + * + * Convert link_rate from mpi format into sas_transport layer + * form. + * + * Return: A valid SAS transport layer defined link rate + */ +static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case MPI3_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case MPI3_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case MPI3_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case MPI3_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case MPI3_SAS_NEG_LINK_RATE_22_5: + rc = SAS_LINK_RATE_22_5_GBPS; + break; + case MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + rc = SAS_PHY_RESET_IN_PROGRESS; + break; + case MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + default: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +/** + * mpi3mr_delete_sas_phy - Remove a single phy from port + * @mrioc: Adapter instance reference + * @mr_sas_port: Internal Port object + * @mr_sas_phy: Internal Phy object + * + * Return: None. + */ +static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_port *mr_sas_port, + struct mpi3mr_sas_phy *mr_sas_phy) +{ + u64 sas_address = mr_sas_port->remote_identify.sas_address; + + dev_info(&mr_sas_phy->phy->dev, + "remove: sas_address(0x%016llx), phy(%d)\n", + (unsigned long long) sas_address, mr_sas_phy->phy_id); + + list_del(&mr_sas_phy->port_siblings); + mr_sas_port->num_phys--; + mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id); + if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy); + mr_sas_phy->phy_belongs_to_port = 0; +} + +/** + * mpi3mr_add_sas_phy - Adding a single phy to a port + * @mrioc: Adapter instance reference + * @mr_sas_port: Internal Port object + * @mr_sas_phy: Internal Phy object + * + * Return: None. + */ +static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_port *mr_sas_port, + struct mpi3mr_sas_phy *mr_sas_phy) +{ + u64 sas_address = mr_sas_port->remote_identify.sas_address; + + dev_info(&mr_sas_phy->phy->dev, + "add: sas_address(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, mr_sas_phy->phy_id); + + list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list); + mr_sas_port->num_phys++; + mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id); + if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy); + mr_sas_phy->phy_belongs_to_port = 1; +} + +/** + * mpi3mr_add_phy_to_an_existing_port - add phy to existing port + * @mrioc: Adapter instance reference + * @mr_sas_node: Internal sas node object (expander or host) + * @mr_sas_phy: Internal Phy object * + * @sas_address: SAS address of device/expander were phy needs + * to be added to + * @hba_port: HBA port entry + * + * Return: None. + */ +static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy, + u64 sas_address, struct mpi3mr_hba_port *hba_port) +{ + struct mpi3mr_sas_port *mr_sas_port; + struct mpi3mr_sas_phy *srch_phy; + + if (mr_sas_phy->phy_belongs_to_port == 1) + return; + + if (!hba_port) + return; + + list_for_each_entry(mr_sas_port, &mr_sas_node->sas_port_list, + port_list) { + if (mr_sas_port->remote_identify.sas_address != + sas_address) + continue; + if (mr_sas_port->hba_port != hba_port) + continue; + list_for_each_entry(srch_phy, &mr_sas_port->phy_list, + port_siblings) { + if (srch_phy == mr_sas_phy) + return; + } + mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy); + return; + } +} + +/** + * mpi3mr_delete_sas_port - helper function to removing a port + * @mrioc: Adapter instance reference + * @mr_sas_port: Internal Port object + * + * Return: None. + */ +static void mpi3mr_delete_sas_port(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_port *mr_sas_port) +{ + u64 sas_address = mr_sas_port->remote_identify.sas_address; + struct mpi3mr_hba_port *hba_port = mr_sas_port->hba_port; + enum sas_device_type device_type = + mr_sas_port->remote_identify.device_type; + + dev_info(&mr_sas_port->port->dev, + "remove: sas_address(0x%016llx)\n", + (unsigned long long) sas_address); + + if (device_type == SAS_END_DEVICE) + mpi3mr_remove_device_by_sas_address(mrioc, sas_address, + hba_port); + + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + mpi3mr_expander_remove(mrioc, sas_address, hba_port); +} + +/** + * mpi3mr_del_phy_from_an_existing_port - del phy from a port + * @mrioc: Adapter instance reference + * @mr_sas_node: Internal sas node object (expander or host) + * @mr_sas_phy: Internal Phy object + * + * Return: None. + */ +static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy) +{ + struct mpi3mr_sas_port *mr_sas_port, *next; + struct mpi3mr_sas_phy *srch_phy; + + if (mr_sas_phy->phy_belongs_to_port == 0) + return; + + list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list, + port_list) { + list_for_each_entry(srch_phy, &mr_sas_port->phy_list, + port_siblings) { + if (srch_phy != mr_sas_phy) + continue; + if ((mr_sas_port->num_phys == 1) && + !mrioc->reset_in_progress) + mpi3mr_delete_sas_port(mrioc, mr_sas_port); + else + mpi3mr_delete_sas_phy(mrioc, mr_sas_port, + mr_sas_phy); + return; + } + } +} + +/** + * mpi3mr_sas_port_sanity_check - sanity check while adding port + * @mrioc: Adapter instance reference + * @mr_sas_node: Internal sas node object (expander or host) + * @sas_address: SAS address of device/expander + * @hba_port: HBA port entry + * + * Verifies whether the Phys attached to a device with the given + * SAS address already belongs to an existing sas port if so + * will remove those phys from the sas port + * + * Return: None. + */ +static void mpi3mr_sas_port_sanity_check(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *mr_sas_node, u64 sas_address, + struct mpi3mr_hba_port *hba_port) +{ + int i; + + for (i = 0; i < mr_sas_node->num_phys; i++) { + if ((mr_sas_node->phy[i].remote_identify.sas_address != + sas_address) || (mr_sas_node->phy[i].hba_port != hba_port)) + continue; + if (mr_sas_node->phy[i].phy_belongs_to_port == 1) + mpi3mr_del_phy_from_an_existing_port(mrioc, + mr_sas_node, &mr_sas_node->phy[i]); + } +} + +/** + * mpi3mr_set_identify - set identify for phys and end devices + * @mrioc: Adapter instance reference + * @handle: Firmware device handle + * @identify: SAS transport layer's identify info + * + * Populates sas identify info for a specific device. + * + * Return: 0 for success, non-zero for failure. + */ +static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle, + struct sas_identify *identify) +{ + + struct mpi3_device_page0 device_pg0; + struct mpi3_device0_sas_sata_format *sasinf; + u16 device_info; + u16 ioc_status; + + if (mrioc->reset_in_progress) { + ioc_err(mrioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0, + sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(mrioc, "%s: device page0 read failed\n", __func__); + return -ENXIO; + } + + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n", + handle, ioc_status, __FILE__, __LINE__, __func__); + return -EIO; + } + + memset(identify, 0, sizeof(struct sas_identify)); + sasinf = &device_pg0.device_specific.sas_sata_format; + device_info = le16_to_cpu(sasinf->device_info); + + /* sas_address */ + identify->sas_address = le64_to_cpu(sasinf->sas_address); + + /* phy number of the parent device this device is linked to */ + identify->phy_identifier = sasinf->phy_num; + + /* device_type */ + switch (device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) { + case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + } + + /* initiator_port_protocols */ + if (device_info & MPI3_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + /* MPI3.0 doesn't have define for SATA INIT so setting both here*/ + if (device_info & MPI3_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= (SAS_PROTOCOL_STP | + SAS_PROTOCOL_SATA); + if (device_info & MPI3_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + + /* target_port_protocols */ + if (device_info & MPI3_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + /* MPI3.0 doesn't have define for STP Target so setting both here*/ + if (device_info & MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET) + identify->target_port_protocols |= (SAS_PROTOCOL_STP | + SAS_PROTOCOL_SATA); + if (device_info & MPI3_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + return 0; +} + +/** + * mpi3mr_add_host_phy - report sas_host phy to SAS transport + * @mrioc: Adapter instance reference + * @mr_sas_phy: Internal Phy object + * @phy_pg0: SAS phy page 0 + * @parent_dev: Prent device class object + * + * Return: 0 for success, non-zero for failure. + */ +static int mpi3mr_add_host_phy(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_phy *mr_sas_phy, struct mpi3_sas_phy_page0 phy_pg0, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mr_sas_phy->phy_id; + + + INIT_LIST_HEAD(&mr_sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle, + &mr_sas_phy->identify))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mr_sas_phy->identify; + mr_sas_phy->attached_handle = le16_to_cpu(phy_pg0.attached_dev_handle); + if (mr_sas_phy->attached_handle) + mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle, + &mr_sas_phy->remote_identify); + phy->identify.phy_identifier = mr_sas_phy->phy_id; + phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate( + (phy_pg0.negotiated_link_rate & + MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> + MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT); + phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate( + phy_pg0.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate( + phy_pg0.hw_link_rate >> 4); + phy->minimum_linkrate = mpi3mr_convert_phy_link_rate( + phy_pg0.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = mpi3mr_convert_phy_link_rate( + phy_pg0.programmed_link_rate >> 4); + phy->hostdata = mr_sas_phy->hba_port; + + if ((sas_phy_add(phy))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_address(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_address(0x%016llx)\n", + mr_sas_phy->handle, (unsigned long long) + mr_sas_phy->identify.sas_address, + mr_sas_phy->attached_handle, + (unsigned long long) + mr_sas_phy->remote_identify.sas_address); + mr_sas_phy->phy = phy; + return 0; +} + +/** + * mpi3mr_add_expander_phy - report expander phy to transport + * @mrioc: Adapter instance reference + * @mr_sas_phy: Internal Phy object + * @expander_pg1: SAS Expander page 1 + * @parent_dev: Parent device class object + * + * Return: 0 for success, non-zero for failure. + */ +static int mpi3mr_add_expander_phy(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_phy *mr_sas_phy, + struct mpi3_sas_expander_page1 expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mr_sas_phy->phy_id; + + INIT_LIST_HEAD(&mr_sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle, + &mr_sas_phy->identify))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mr_sas_phy->identify; + mr_sas_phy->attached_handle = + le16_to_cpu(expander_pg1.attached_dev_handle); + if (mr_sas_phy->attached_handle) + mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle, + &mr_sas_phy->remote_identify); + phy->identify.phy_identifier = mr_sas_phy->phy_id; + phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate( + (expander_pg1.negotiated_link_rate & + MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> + MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT); + phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate( + expander_pg1.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate( + expander_pg1.hw_link_rate >> 4); + phy->minimum_linkrate = mpi3mr_convert_phy_link_rate( + expander_pg1.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = mpi3mr_convert_phy_link_rate( + expander_pg1.programmed_link_rate >> 4); + phy->hostdata = mr_sas_phy->hba_port; + + if ((sas_phy_add(phy))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_address(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_address(0x%016llx)\n", + mr_sas_phy->handle, (unsigned long long) + mr_sas_phy->identify.sas_address, + mr_sas_phy->attached_handle, + (unsigned long long) + mr_sas_phy->remote_identify.sas_address); + mr_sas_phy->phy = phy; + return 0; +} + +/** + * mpi3mr_alloc_hba_port - alloc hba port object + * @mrioc: Adapter instance reference + * @port_id: Port number + * + * Alloc memory for hba port object. + */ +static struct mpi3mr_hba_port * +mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id) +{ + struct mpi3mr_hba_port *hba_port; + + hba_port = kzalloc(sizeof(struct mpi3mr_hba_port), + GFP_KERNEL); + if (!hba_port) + return NULL; + hba_port->port_id = port_id; + ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n", + hba_port, hba_port->port_id); + list_add_tail(&hba_port->list, &mrioc->hba_port_table_list); + return hba_port; +} + +/** + * mpi3mr_get_hba_port_by_id - find hba port by id + * @mrioc: Adapter instance reference + * @port_id - Port ID to search + * + * Return: mpi3mr_hba_port reference for the matched port + */ + +struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc, + u8 port_id) +{ + struct mpi3mr_hba_port *port, *port_next; + + list_for_each_entry_safe(port, port_next, + &mrioc->hba_port_table_list, list) { + if (port->port_id != port_id) + continue; + if (port->flags & MPI3MR_HBA_PORT_FLAG_DIRTY) + continue; + return port; + } + + return NULL; +} + +/** + * mpi3mr_update_links - refreshing SAS phy link changes + * @mrioc: Adapter instance reference + * @sas_address_parent: SAS address of parent expander or host + * @handle: Firmware device handle of attached device + * @phy_number: Phy number + * @link_rate: New link rate + * @hba_port: HBA port entry + * + * Return: None. + */ +void mpi3mr_update_links(struct mpi3mr_ioc *mrioc, + u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate, + struct mpi3mr_hba_port *hba_port) +{ + unsigned long flags; + struct mpi3mr_sas_node *mr_sas_node; + struct mpi3mr_sas_phy *mr_sas_phy; + + if (mrioc->reset_in_progress) + return; + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc, + sas_address_parent, hba_port); + if (!mr_sas_node) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + return; + } + + mr_sas_phy = &mr_sas_node->phy[phy_number]; + mr_sas_phy->attached_handle = handle; + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + if (handle && (link_rate >= MPI3_SAS_NEG_LINK_RATE_1_5)) { + mpi3mr_set_identify(mrioc, handle, + &mr_sas_phy->remote_identify); + mpi3mr_add_phy_to_an_existing_port(mrioc, mr_sas_node, + mr_sas_phy, mr_sas_phy->remote_identify.sas_address, + hba_port); + } else + memset(&mr_sas_phy->remote_identify, 0, sizeof(struct + sas_identify)); + + if (mr_sas_phy->phy) + mr_sas_phy->phy->negotiated_linkrate = + mpi3mr_convert_phy_link_rate(link_rate); + + if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)) + dev_info(&mr_sas_phy->phy->dev, + "refresh: parent sas_address(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_address(0x%016llx)\n", + (unsigned long long)sas_address_parent, + link_rate, phy_number, handle, (unsigned long long) + mr_sas_phy->remote_identify.sas_address); +} + +/** + * mpi3mr_sas_host_refresh - refreshing sas host object contents + * @mrioc: Adapter instance reference + * + * This function refreshes the controllers phy information and + * updates the SAS transport layer with updated information, + * this is executed for each device addition or device info + * change events + * + * Return: None. + */ +void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc) +{ + int i; + u8 link_rate; + u16 sz, port_id, attached_handle; + struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL; + + dprint_transport_info(mrioc, + "updating handles for sas_host(0x%016llx)\n", + (unsigned long long)mrioc->sas_hba.sas_address); + + sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) + + (mrioc->sas_hba.num_phys * + sizeof(struct mpi3_sas_io_unit0_phy_data)); + sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_io_unit_pg0) + return; + if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + mrioc->sas_hba.handle = 0; + for (i = 0; i < mrioc->sas_hba.num_phys; i++) { + if (sas_io_unit_pg0->phy_data[i].phy_flags & + (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY | + MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY)) + continue; + link_rate = + sas_io_unit_pg0->phy_data[i].negotiated_link_rate >> 4; + if (!mrioc->sas_hba.handle) + mrioc->sas_hba.handle = le16_to_cpu( + sas_io_unit_pg0->phy_data[i].controller_dev_handle); + port_id = sas_io_unit_pg0->phy_data[i].io_unit_port; + if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id))) + if (!mpi3mr_alloc_hba_port(mrioc, port_id)) + goto out; + + mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle; + attached_handle = le16_to_cpu( + sas_io_unit_pg0->phy_data[i].attached_dev_handle); + if (attached_handle && link_rate < MPI3_SAS_NEG_LINK_RATE_1_5) + link_rate = MPI3_SAS_NEG_LINK_RATE_1_5; + mrioc->sas_hba.phy[i].hba_port = + mpi3mr_get_hba_port_by_id(mrioc, port_id); + mpi3mr_update_links(mrioc, mrioc->sas_hba.sas_address, + attached_handle, i, link_rate, + mrioc->sas_hba.phy[i].hba_port); + } + out: + kfree(sas_io_unit_pg0); +} + +/** + * mpi3mr_sas_host_add - create sas host object + * @mrioc: Adapter instance reference + * + * This function creates the controllers phy information and + * updates the SAS transport layer with updated information, + * this is executed for first device addition or device info + * change event. + * + * Return: None. + */ +void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc) +{ + int i; + u16 sz, num_phys = 1, port_id, ioc_status; + struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL; + struct mpi3_sas_phy_page0 phy_pg0; + struct mpi3_device_page0 dev_pg0; + struct mpi3_enclosure_page0 encl_pg0; + struct mpi3_device0_sas_sata_format *sasinf; + + sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) + + (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data)); + sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_io_unit_pg0) + return; + + if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + num_phys = sas_io_unit_pg0->num_phys; + kfree(sas_io_unit_pg0); + + mrioc->sas_hba.host_node = 1; + INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list); + mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev; + mrioc->sas_hba.phy = kcalloc(num_phys, + sizeof(struct mpi3mr_sas_phy), GFP_KERNEL); + if (!mrioc->sas_hba.phy) + return; + + mrioc->sas_hba.num_phys = num_phys; + + sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) + + (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data)); + sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_io_unit_pg0) + return; + + if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + mrioc->sas_hba.handle = 0; + for (i = 0; i < mrioc->sas_hba.num_phys; i++) { + if (sas_io_unit_pg0->phy_data[i].phy_flags & + (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY | + MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY)) + continue; + if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0, + sizeof(struct mpi3_sas_phy_page0), + MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, i)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + if (!mrioc->sas_hba.handle) + mrioc->sas_hba.handle = le16_to_cpu( + sas_io_unit_pg0->phy_data[i].controller_dev_handle); + port_id = sas_io_unit_pg0->phy_data[i].io_unit_port; + + if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id))) + if (!mpi3mr_alloc_hba_port(mrioc, port_id)) + goto out; + + mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle; + mrioc->sas_hba.phy[i].phy_id = i; + mrioc->sas_hba.phy[i].hba_port = + mpi3mr_get_hba_port_by_id(mrioc, port_id); + mpi3mr_add_host_phy(mrioc, &mrioc->sas_hba.phy[i], + phy_pg0, mrioc->sas_hba.parent_dev); + } + if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0, + sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, + mrioc->sas_hba.handle))) { + ioc_err(mrioc, "%s: device page0 read failed\n", __func__); + goto out; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n", + mrioc->sas_hba.handle, ioc_status, __FILE__, __LINE__, + __func__); + goto out; + } + mrioc->sas_hba.enclosure_handle = + le16_to_cpu(dev_pg0.enclosure_handle); + sasinf = &dev_pg0.device_specific.sas_sata_format; + mrioc->sas_hba.sas_address = + le64_to_cpu(sasinf->sas_address); + ioc_info(mrioc, + "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + mrioc->sas_hba.handle, + (unsigned long long) mrioc->sas_hba.sas_address, + mrioc->sas_hba.num_phys); + + if (mrioc->sas_hba.enclosure_handle) { + if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status, + &encl_pg0, sizeof(encl_pg0), + MPI3_ENCLOS_PGAD_FORM_HANDLE, + mrioc->sas_hba.enclosure_handle)) && + (ioc_status == MPI3_IOCSTATUS_SUCCESS)) + mrioc->sas_hba.enclosure_logical_id = + le64_to_cpu(encl_pg0.enclosure_logical_id); + } + +out: + kfree(sas_io_unit_pg0); +} + +/** + * mpi3mr_sas_port_add - Expose the SAS device to the SAS TL + * @mrioc: Adapter instance reference + * @handle: Firmware device handle of the attached device + * @sas_address_parent: sas address of parent expander or host + * @hba_port: HBA port entry + * + * This function creates a new sas port object for the given end + * device matching sas address and hba_port and adds it to the + * sas_node's sas_port_list and expose the attached sas device + * to the SAS transport layer through sas_rphy_add. + * + * Returns a valid mpi3mr_sas_port reference or NULL. + */ +static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, + u16 handle, u64 sas_address_parent, struct mpi3mr_hba_port *hba_port) +{ + struct mpi3mr_sas_phy *mr_sas_phy, *next; + struct mpi3mr_sas_port *mr_sas_port; + unsigned long flags; + struct mpi3mr_sas_node *mr_sas_node; + struct sas_rphy *rphy; + struct mpi3mr_tgt_dev *tgtdev = NULL; + int i; + struct sas_port *port; + + if (!hba_port) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return NULL; + } + + mr_sas_port = kzalloc(sizeof(struct mpi3mr_sas_port), GFP_KERNEL); + if (!mr_sas_port) + return NULL; + + INIT_LIST_HEAD(&mr_sas_port->port_list); + INIT_LIST_HEAD(&mr_sas_port->phy_list); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc, + sas_address_parent, hba_port); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (!mr_sas_node) { + ioc_err(mrioc, "%s:could not find parent sas_address(0x%016llx)!\n", + __func__, (unsigned long long)sas_address_parent); + goto out_fail; + } + + if ((mpi3mr_set_identify(mrioc, handle, + &mr_sas_port->remote_identify))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + if (mr_sas_port->remote_identify.device_type == SAS_PHY_UNUSED) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + mr_sas_port->hba_port = hba_port; + mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node, + mr_sas_port->remote_identify.sas_address, hba_port); + + for (i = 0; i < mr_sas_node->num_phys; i++) { + if ((mr_sas_node->phy[i].remote_identify.sas_address != + mr_sas_port->remote_identify.sas_address) || + (mr_sas_node->phy[i].hba_port != hba_port)) + continue; + list_add_tail(&mr_sas_node->phy[i].port_siblings, + &mr_sas_port->phy_list); + mr_sas_port->num_phys++; + mr_sas_port->phy_mask |= (1 << i); + } + + if (!mr_sas_port->num_phys) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + + if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) { + tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc, + mr_sas_port->remote_identify.sas_address, + mr_sas_port->hba_port); + + if (!tgtdev) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 1; + } + + if (!mr_sas_node->parent_dev) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + port = sas_port_alloc_num(mr_sas_node->parent_dev); + if ((sas_port_add(port))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + list_for_each_entry(mr_sas_phy, &mr_sas_port->phy_list, + port_siblings) { + if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)) + dev_info(&port->dev, + "add: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + handle, (unsigned long long) + mr_sas_port->remote_identify.sas_address, + mr_sas_phy->phy_id); + sas_port_add_phy(port, mr_sas_phy->phy); + mr_sas_phy->phy_belongs_to_port = 1; + mr_sas_phy->hba_port = hba_port; + } + + mr_sas_port->port = port; + if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(port); + tgtdev->dev_spec.sas_sata_inf.rphy = rphy; + } else { + rphy = sas_expander_alloc(port, + mr_sas_port->remote_identify.device_type); + } + rphy->identify = mr_sas_port->remote_identify; + + if (mrioc->current_event) + mrioc->current_event->pending_at_sml = 1; + + if ((sas_rphy_add(rphy))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + } + if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) { + tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 0; + tgtdev->dev_spec.sas_sata_inf.sas_transport_attached = 1; + mpi3mr_tgtdev_put(tgtdev); + } + + dev_info(&rphy->dev, + "%s: added: handle(0x%04x), sas_address(0x%016llx)\n", + __func__, handle, (unsigned long long) + mr_sas_port->remote_identify.sas_address); + + mr_sas_port->rphy = rphy; + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_add_tail(&mr_sas_port->port_list, &mr_sas_node->sas_port_list); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (mrioc->current_event) { + mrioc->current_event->pending_at_sml = 0; + if (mrioc->current_event->discard) + mpi3mr_print_device_event_notice(mrioc, true); + } + + /* fill in report manufacture */ + if (mr_sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mr_sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpi3mr_report_manufacture(mrioc, + mr_sas_port->remote_identify.sas_address, + rphy_to_expander_device(rphy), hba_port->port_id); + + return mr_sas_port; + + out_fail: + list_for_each_entry_safe(mr_sas_phy, next, &mr_sas_port->phy_list, + port_siblings) + list_del(&mr_sas_phy->port_siblings); + kfree(mr_sas_port); + return NULL; +} + +/** + * mpi3mr_sas_port_remove - remove port from the list + * @mrioc: Adapter instance reference + * @sas_address: SAS address of attached device + * @sas_address_parent: SAS address of parent expander or host + * @hba_port: HBA port entry + * + * Removing object and freeing associated memory from the + * sas_port_list. + * + * Return: None + */ +static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address, + u64 sas_address_parent, struct mpi3mr_hba_port *hba_port) +{ + int i; + unsigned long flags; + struct mpi3mr_sas_port *mr_sas_port, *next; + struct mpi3mr_sas_node *mr_sas_node; + u8 found = 0; + struct mpi3mr_sas_phy *mr_sas_phy, *next_phy; + struct mpi3mr_hba_port *srch_port, *hba_port_next = NULL; + + if (!hba_port) + return; + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc, + sas_address_parent, hba_port); + if (!mr_sas_node) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list, + port_list) { + if (mr_sas_port->remote_identify.sas_address != sas_address) + continue; + if (mr_sas_port->hba_port != hba_port) + continue; + found = 1; + list_del(&mr_sas_port->port_list); + goto out; + } + + out: + if (!found) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + return; + } + + if (mr_sas_node->host_node) { + list_for_each_entry_safe(srch_port, hba_port_next, + &mrioc->hba_port_table_list, list) { + if (srch_port != hba_port) + continue; + ioc_info(mrioc, + "removing hba_port entry: %p port: %d from hba_port list\n", + srch_port, srch_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + break; + } + } + + for (i = 0; i < mr_sas_node->num_phys; i++) { + if (mr_sas_node->phy[i].remote_identify.sas_address == + sas_address) + memset(&mr_sas_node->phy[i].remote_identify, 0, + sizeof(struct sas_identify)); + } + + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (mrioc->current_event) + mrioc->current_event->pending_at_sml = 1; + + list_for_each_entry_safe(mr_sas_phy, next_phy, + &mr_sas_port->phy_list, port_siblings) { + if ((!mrioc->stop_drv_processing) && + (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)) + dev_info(&mr_sas_port->port->dev, + "remove: sas_address(0x%016llx), phy(%d)\n", + (unsigned long long) + mr_sas_port->remote_identify.sas_address, + mr_sas_phy->phy_id); + mr_sas_phy->phy_belongs_to_port = 0; + if (!mrioc->stop_drv_processing) + sas_port_delete_phy(mr_sas_port->port, + mr_sas_phy->phy); + list_del(&mr_sas_phy->port_siblings); + } + if (!mrioc->stop_drv_processing) + sas_port_delete(mr_sas_port->port); + ioc_info(mrioc, "%s: removed sas_address(0x%016llx)\n", + __func__, (unsigned long long)sas_address); + + if (mrioc->current_event) { + mrioc->current_event->pending_at_sml = 0; + if (mrioc->current_event->discard) + mpi3mr_print_device_event_notice(mrioc, false); + } + + kfree(mr_sas_port); +} + +/** + * struct host_port - host port details + * @sas_address: SAS Address of the attached device + * @phy_mask: phy mask of host port + * @handle: Device Handle of attached device + * @iounit_port_id: port ID + * @used: host port is already matched with sas port from sas_port_list + * @lowest_phy: lowest phy ID of host port + */ +struct host_port { + u64 sas_address; + u32 phy_mask; + u16 handle; + u8 iounit_port_id; + u8 used; + u8 lowest_phy; +}; + +/** + * mpi3mr_update_mr_sas_port - update sas port objects during reset + * @mrioc: Adapter instance reference + * @h_port: host_port object + * @mr_sas_port: sas_port objects which needs to be updated + * + * Update the port ID of sas port object. Also add the phys if new phys got + * added to current sas port and remove the phys if some phys are moved + * out of the current sas port. + * + * Return: Nothing. + */ +static void +mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port, + struct mpi3mr_sas_port *mr_sas_port) +{ + struct mpi3mr_sas_phy *mr_sas_phy; + u32 phy_mask_xor; + u64 phys_to_be_added, phys_to_be_removed; + int i; + + h_port->used = 1; + mr_sas_port->marked_responding = 1; + + dev_info(&mr_sas_port->port->dev, + "sas_address(0x%016llx), old: port_id %d phy_mask 0x%x, new: port_id %d phy_mask:0x%x\n", + mr_sas_port->remote_identify.sas_address, + mr_sas_port->hba_port->port_id, mr_sas_port->phy_mask, + h_port->iounit_port_id, h_port->phy_mask); + + mr_sas_port->hba_port->port_id = h_port->iounit_port_id; + mr_sas_port->hba_port->flags &= ~MPI3MR_HBA_PORT_FLAG_DIRTY; + + /* Get the newly added phys bit map & removed phys bit map */ + phy_mask_xor = mr_sas_port->phy_mask ^ h_port->phy_mask; + phys_to_be_added = h_port->phy_mask & phy_mask_xor; + phys_to_be_removed = mr_sas_port->phy_mask & phy_mask_xor; + + /* + * Register these new phys to current mr_sas_port's port. + * if these phys are previously registered with another port + * then delete these phys from that port first. + */ + for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u32)) { + mr_sas_phy = &mrioc->sas_hba.phy[i]; + if (mr_sas_phy->phy_belongs_to_port) + mpi3mr_del_phy_from_an_existing_port(mrioc, + &mrioc->sas_hba, mr_sas_phy); + mpi3mr_add_phy_to_an_existing_port(mrioc, + &mrioc->sas_hba, mr_sas_phy, + mr_sas_port->remote_identify.sas_address, + mr_sas_port->hba_port); + } + + /* Delete the phys which are not part of current mr_sas_port's port. */ + for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u32)) { + mr_sas_phy = &mrioc->sas_hba.phy[i]; + if (mr_sas_phy->phy_belongs_to_port) + mpi3mr_del_phy_from_an_existing_port(mrioc, + &mrioc->sas_hba, mr_sas_phy); + } +} + +/** + * mpi3mr_refresh_sas_ports - update host's sas ports during reset + * @mrioc: Adapter instance reference + * + * Update the host's sas ports during reset by checking whether + * sas ports are still intact or not. Add/remove phys if any hba + * phys are (moved in)/(moved out) of sas port. Also update + * io_unit_port if it got changed during reset. + * + * Return: Nothing. + */ +void +mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc) +{ + struct host_port h_port[32]; + int i, j, found, host_port_count = 0, port_idx; + u16 sz, attached_handle, ioc_status; + struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL; + struct mpi3_device_page0 dev_pg0; + struct mpi3_device0_sas_sata_format *sasinf; + struct mpi3mr_sas_port *mr_sas_port; + + sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) + + (mrioc->sas_hba.num_phys * + sizeof(struct mpi3_sas_io_unit0_phy_data)); + sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_io_unit_pg0) + return; + if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + /* Create a new expander port table */ + for (i = 0; i < mrioc->sas_hba.num_phys; i++) { + attached_handle = le16_to_cpu( + sas_io_unit_pg0->phy_data[i].attached_dev_handle); + if (!attached_handle) + continue; + found = 0; + for (j = 0; j < host_port_count; j++) { + if (h_port[j].handle == attached_handle) { + h_port[j].phy_mask |= (1 << i); + found = 1; + break; + } + } + if (found) + continue; + if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0, + sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, + attached_handle))) { + dprint_reset(mrioc, + "failed to read dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n", + attached_handle, __FILE__, __LINE__, __func__); + continue; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + dprint_reset(mrioc, + "ioc_status(0x%x) while reading dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n", + ioc_status, attached_handle, + __FILE__, __LINE__, __func__); + continue; + } + sasinf = &dev_pg0.device_specific.sas_sata_format; + + port_idx = host_port_count; + h_port[port_idx].sas_address = le64_to_cpu(sasinf->sas_address); + h_port[port_idx].handle = attached_handle; + h_port[port_idx].phy_mask = (1 << i); + h_port[port_idx].iounit_port_id = sas_io_unit_pg0->phy_data[i].io_unit_port; + h_port[port_idx].lowest_phy = sasinf->phy_num; + h_port[port_idx].used = 0; + host_port_count++; + } + + if (!host_port_count) + goto out; + + if (mrioc->logging_level & MPI3_DEBUG_RESET) { + ioc_info(mrioc, "Host port details before reset\n"); + list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list, + port_list) { + ioc_info(mrioc, + "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n", + mr_sas_port->hba_port->port_id, + mr_sas_port->remote_identify.sas_address, + mr_sas_port->phy_mask, mr_sas_port->lowest_phy); + } + mr_sas_port = NULL; + ioc_info(mrioc, "Host port details after reset\n"); + for (i = 0; i < host_port_count; i++) { + ioc_info(mrioc, + "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n", + h_port[i].iounit_port_id, h_port[i].sas_address, + h_port[i].phy_mask, h_port[i].lowest_phy); + } + } + + /* mark all host sas port entries as dirty */ + list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list, + port_list) { + mr_sas_port->marked_responding = 0; + mr_sas_port->hba_port->flags |= MPI3MR_HBA_PORT_FLAG_DIRTY; + } + + /* First check for matching lowest phy */ + for (i = 0; i < host_port_count; i++) { + mr_sas_port = NULL; + list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list, + port_list) { + if (mr_sas_port->marked_responding) + continue; + if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address) + continue; + if (h_port[i].lowest_phy == mr_sas_port->lowest_phy) { + mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port); + break; + } + } + } + + /* In case if lowest phy is got enabled or disabled during reset */ + for (i = 0; i < host_port_count; i++) { + if (h_port[i].used) + continue; + mr_sas_port = NULL; + list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list, + port_list) { + if (mr_sas_port->marked_responding) + continue; + if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address) + continue; + if (h_port[i].phy_mask & mr_sas_port->phy_mask) { + mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port); + break; + } + } + } + + /* In case if expander cable is removed & connected to another HBA port during reset */ + for (i = 0; i < host_port_count; i++) { + if (h_port[i].used) + continue; + mr_sas_port = NULL; + list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list, + port_list) { + if (mr_sas_port->marked_responding) + continue; + if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address) + continue; + mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port); + break; + } + } +out: + kfree(sas_io_unit_pg0); +} + +/** + * mpi3mr_refresh_expanders - Refresh expander device exposure + * @mrioc: Adapter instance reference + * + * This is executed post controller reset to identify any + * missing expander devices during reset and remove from the upper layers + * or expose any newly detected expander device to the upper layers. + * + * Return: Nothing. + */ +void +mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_sas_node *sas_expander, *sas_expander_next; + struct mpi3_sas_expander_page0 expander_pg0; + u16 ioc_status, handle; + u64 sas_address; + int i; + unsigned long flags; + struct mpi3mr_hba_port *hba_port; + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) { + sas_expander->non_responding = 1; + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + sas_expander = NULL; + + handle = 0xffff; + + /* Search for responding expander devices and add them if they are newly got added */ + while (true) { + if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0, + sizeof(struct mpi3_sas_expander_page0), + MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + dprint_reset(mrioc, + "failed to read exp pg0 for handle(0x%04x) at %s:%d/%s()!\n", + handle, __FILE__, __LINE__, __func__); + break; + } + + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + dprint_reset(mrioc, + "ioc_status(0x%x) while reading exp pg0 for handle:(0x%04x), %s:%d/%s()!\n", + ioc_status, handle, __FILE__, __LINE__, __func__); + break; + } + + handle = le16_to_cpu(expander_pg0.dev_handle); + sas_address = le64_to_cpu(expander_pg0.sas_address); + hba_port = mpi3mr_get_hba_port_by_id(mrioc, expander_pg0.io_unit_port); + + if (!hba_port) { + mpi3mr_sas_host_refresh(mrioc); + mpi3mr_expander_add(mrioc, handle); + continue; + } + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + sas_expander = + mpi3mr_expander_find_by_sas_address(mrioc, + sas_address, hba_port); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (!sas_expander) { + mpi3mr_sas_host_refresh(mrioc); + mpi3mr_expander_add(mrioc, handle); + continue; + } + + sas_expander->non_responding = 0; + if (sas_expander->handle == handle) + continue; + + sas_expander->handle = handle; + for (i = 0 ; i < sas_expander->num_phys ; i++) + sas_expander->phy[i].handle = handle; + } + + /* + * Delete non responding expander devices and the corresponding + * hba_port if the non responding expander device's parent device + * is a host node. + */ + sas_expander = NULL; + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, + &mrioc->sas_expander_list, list) { + if (sas_expander->non_responding) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + mpi3mr_expander_node_remove(mrioc, sas_expander); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + } + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); +} + +/** + * mpi3mr_expander_node_add - insert an expander to the list. + * @mrioc: Adapter instance reference + * @sas_expander: Expander sas node + * Context: This function will acquire sas_node_lock. + * + * Adding new object to the ioc->sas_expander_list. + * + * Return: None. + */ +static void mpi3mr_expander_node_add(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &mrioc->sas_expander_list); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); +} + +/** + * mpi3mr_expander_add - Create expander object + * @mrioc: Adapter instance reference + * @handle: Expander firmware device handle + * + * This function creating expander object, stored in + * sas_expander_list and expose it to the SAS transport + * layer. + * + * Return: 0 for success, non-zero for failure. + */ +int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle) +{ + struct mpi3mr_sas_node *sas_expander; + struct mpi3mr_enclosure_node *enclosure_dev; + struct mpi3_sas_expander_page0 expander_pg0; + struct mpi3_sas_expander_page1 expander_pg1; + u16 ioc_status, parent_handle, temp_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + u8 port_id, link_rate; + struct mpi3mr_sas_port *mr_sas_port = NULL; + struct mpi3mr_hba_port *hba_port; + u32 phynum_handle; + int rc = 0; + + if (!handle) + return -1; + + if (mrioc->reset_in_progress) + return -1; + + if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0, + sizeof(expander_pg0), MPI3_SAS_EXPAND_PGAD_FORM_HANDLE, handle))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + parent_handle = le16_to_cpu(expander_pg0.parent_dev_handle); + if (mpi3mr_get_sas_address(mrioc, parent_handle, &sas_address_parent) + != 0) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + port_id = expander_pg0.io_unit_port; + hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id); + if (!hba_port) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + if (sas_address_parent != mrioc->sas_hba.sas_address) { + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + sas_expander = + mpi3mr_expander_find_by_sas_address(mrioc, + sas_address_parent, hba_port); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + if (!sas_expander) { + rc = mpi3mr_expander_add(mrioc, parent_handle); + if (rc != 0) + return rc; + } else { + /* + * When there is a parent expander present, update it's + * phys where child expander is connected with the link + * speed, attached dev handle and sas address. + */ + for (i = 0 ; i < sas_expander->num_phys ; i++) { + phynum_handle = + (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | + parent_handle; + if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, + &ioc_status, &expander_pg1, + sizeof(expander_pg1), + MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM, + phynum_handle)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + return rc; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + return rc; + } + temp_handle = le16_to_cpu( + expander_pg1.attached_dev_handle); + if (temp_handle != handle) + continue; + link_rate = (expander_pg1.negotiated_link_rate & + MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> + MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT; + mpi3mr_update_links(mrioc, sas_address_parent, + handle, i, link_rate, hba_port); + } + } + } + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.sas_address); + sas_expander = mpi3mr_expander_find_by_sas_address(mrioc, + sas_address, hba_port); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (sas_expander) + return 0; + + sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node), + GFP_KERNEL); + if (!sas_expander) + return -ENOMEM; + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.num_phys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + sas_expander->hba_port = hba_port; + + ioc_info(mrioc, + "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + handle, parent_handle, (unsigned long long) + sas_expander->sas_address, sas_expander->num_phys); + + if (!sas_expander->num_phys) { + rc = -1; + goto out_fail; + } + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct mpi3mr_sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + rc = -1; + goto out_fail; + } + + INIT_LIST_HEAD(&sas_expander->sas_port_list); + mr_sas_port = mpi3mr_sas_port_add(mrioc, handle, sas_address_parent, + sas_expander->hba_port); + if (!mr_sas_port) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &mr_sas_port->rphy->dev; + sas_expander->rphy = mr_sas_port->rphy; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + phynum_handle = (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | + handle; + if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status, + &expander_pg1, sizeof(expander_pg1), + MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM, + phynum_handle)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + sas_expander->phy[i].hba_port = hba_port; + + if ((mpi3mr_add_expander_phy(mrioc, &sas_expander->phy[i], + expander_pg1, sas_expander->parent_dev))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + + if (sas_expander->enclosure_handle) { + enclosure_dev = + mpi3mr_enclosure_find_by_handle(mrioc, + sas_expander->enclosure_handle); + if (enclosure_dev) + sas_expander->enclosure_logical_id = le64_to_cpu( + enclosure_dev->pg0.enclosure_logical_id); + } + + mpi3mr_expander_node_add(mrioc, sas_expander); + return 0; + +out_fail: + + if (mr_sas_port) + mpi3mr_sas_port_remove(mrioc, + sas_expander->sas_address, + sas_address_parent, sas_expander->hba_port); + kfree(sas_expander->phy); + kfree(sas_expander); + return rc; +} + +/** + * mpi3mr_expander_node_remove - recursive removal of expander. + * @mrioc: Adapter instance reference + * @sas_expander: Expander device object + * + * Removes expander object and freeing associated memory from + * the sas_expander_list and removes the same from SAS TL, if + * one of the attached device is an expander then it recursively + * removes the expander device too. + * + * Return nothing. + */ +void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *sas_expander) +{ + struct mpi3mr_sas_port *mr_sas_port, *next; + unsigned long flags; + u8 port_id; + + /* remove sibling ports attached to this expander */ + list_for_each_entry_safe(mr_sas_port, next, + &sas_expander->sas_port_list, port_list) { + if (mrioc->reset_in_progress) + return; + if (mr_sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpi3mr_remove_device_by_sas_address(mrioc, + mr_sas_port->remote_identify.sas_address, + mr_sas_port->hba_port); + else if (mr_sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mr_sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpi3mr_expander_remove(mrioc, + mr_sas_port->remote_identify.sas_address, + mr_sas_port->hba_port); + } + + port_id = sas_expander->hba_port->port_id; + mpi3mr_sas_port_remove(mrioc, sas_expander->sas_address, + sas_expander->sas_address_parent, sas_expander->hba_port); + + ioc_info(mrioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + sas_expander->handle, (unsigned long long) + sas_expander->sas_address, port_id); + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_del(&sas_expander->list); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + kfree(sas_expander->phy); + kfree(sas_expander); +} + +/** + * mpi3mr_expander_remove - Remove expander object + * @mrioc: Adapter instance reference + * @sas_address: Remove expander sas_address + * @hba_port: HBA port reference + * + * This function remove expander object, stored in + * mrioc->sas_expander_list and removes it from the SAS TL by + * calling mpi3mr_expander_node_remove(). + * + * Return: None + */ +void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address, + struct mpi3mr_hba_port *hba_port) +{ + struct mpi3mr_sas_node *sas_expander; + unsigned long flags; + + if (mrioc->reset_in_progress) + return; + + if (!hba_port) + return; + + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + sas_expander = mpi3mr_expander_find_by_sas_address(mrioc, sas_address, + hba_port); + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + if (sas_expander) + mpi3mr_expander_node_remove(mrioc, sas_expander); + +} + +/** + * mpi3mr_get_sas_negotiated_logical_linkrate - get linkrate + * @mrioc: Adapter instance reference + * @tgtdev: Target device + * + * This function identifies whether the target device is + * attached directly or through expander and issues sas phy + * page0 or expander phy page1 and gets the link rate, if there + * is any failure in reading the pages then this returns link + * rate of 1.5. + * + * Return: logical link rate. + */ +static u8 mpi3mr_get_sas_negotiated_logical_linkrate(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev) +{ + u8 link_rate = MPI3_SAS_NEG_LINK_RATE_1_5, phy_number; + struct mpi3_sas_expander_page1 expander_pg1; + struct mpi3_sas_phy_page0 phy_pg0; + u32 phynum_handle; + u16 ioc_status; + + phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id; + if (!(tgtdev->devpg0_flag & MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)) { + phynum_handle = ((phy_number<parent_handle); + if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status, + &expander_pg1, sizeof(expander_pg1), + MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM, + phynum_handle)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + link_rate = (expander_pg1.negotiated_link_rate & + MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> + MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT; + goto out; + } + if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0, + sizeof(struct mpi3_sas_phy_page0), + MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy_number)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + link_rate = (phy_pg0.negotiated_link_rate & + MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> + MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT; +out: + return link_rate; +} + +/** + * mpi3mr_report_tgtdev_to_sas_transport - expose dev to SAS TL + * @mrioc: Adapter instance reference + * @tgtdev: Target device + * + * This function exposes the target device after + * preparing host_phy, setting up link rate etc. + * + * Return: 0 on success, non-zero for failure. + */ +int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev) +{ + int retval = 0; + u8 link_rate, parent_phy_number; + u64 sas_address_parent, sas_address; + struct mpi3mr_hba_port *hba_port; + u8 port_id; + + if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) || + !mrioc->sas_transport_enabled) + return -1; + + sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address; + if (!mrioc->sas_hba.num_phys) + mpi3mr_sas_host_add(mrioc); + else + mpi3mr_sas_host_refresh(mrioc); + + if (mpi3mr_get_sas_address(mrioc, tgtdev->parent_handle, + &sas_address_parent) != 0) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + tgtdev->dev_spec.sas_sata_inf.sas_address_parent = sas_address_parent; + + parent_phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id; + port_id = tgtdev->io_unit_port; + + hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id); + if (!hba_port) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + tgtdev->dev_spec.sas_sata_inf.hba_port = hba_port; + + link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev); + + mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle, + parent_phy_number, link_rate, hba_port); + + tgtdev->host_exposed = 1; + if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle, + sas_address_parent, hba_port)) { + retval = -1; + } else if ((!tgtdev->starget) && (!mrioc->is_driver_loading)) { + mpi3mr_sas_port_remove(mrioc, sas_address, + sas_address_parent, hba_port); + retval = -1; + } + if (retval) { + tgtdev->dev_spec.sas_sata_inf.hba_port = NULL; + tgtdev->host_exposed = 0; + } + return retval; +} + +/** + * mpi3mr_remove_tgtdev_from_sas_transport - remove from SAS TL + * @mrioc: Adapter instance reference + * @tgtdev: Target device + * + * This function removes the target device + * + * Return: None. + */ +void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc, + struct mpi3mr_tgt_dev *tgtdev) +{ + u64 sas_address_parent, sas_address; + struct mpi3mr_hba_port *hba_port; + + if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) || + !mrioc->sas_transport_enabled) + return; + + hba_port = tgtdev->dev_spec.sas_sata_inf.hba_port; + sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address; + sas_address_parent = tgtdev->dev_spec.sas_sata_inf.sas_address_parent; + mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent, + hba_port); + tgtdev->host_exposed = 0; + tgtdev->dev_spec.sas_sata_inf.hba_port = NULL; +} + +/** + * mpi3mr_get_port_id_by_sas_phy - Get port ID of the given phy + * @phy: SAS transport layer phy object + * + * Return: Port number for valid ID else 0xFFFF + */ +static inline u8 mpi3mr_get_port_id_by_sas_phy(struct sas_phy *phy) +{ + u8 port_id = 0xFF; + struct mpi3mr_hba_port *hba_port = phy->hostdata; + + if (hba_port) + port_id = hba_port->port_id; + + return port_id; +} + +/** + * mpi3mr_get_port_id_by_rphy - Get Port number from SAS rphy + * + * @mrioc: Adapter instance reference + * @rphy: SAS transport layer remote phy object + * + * Retrieves HBA port number in which the device pointed by the + * rphy object is attached with. + * + * Return: Valid port number on success else OxFFFF. + */ +static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *rphy) +{ + struct mpi3mr_sas_node *sas_expander; + struct mpi3mr_tgt_dev *tgtdev; + unsigned long flags; + u8 port_id = 0xFF; + + if (!rphy) + return port_id; + + if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &mrioc->sas_expander_list, + list) { + if (sas_expander->rphy == rphy) { + port_id = sas_expander->hba_port->port_id; + break; + } + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + } else if (rphy->identify.device_type == SAS_END_DEVICE) { + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + + tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + if (tgtdev && tgtdev->dev_spec.sas_sata_inf.hba_port) { + port_id = + tgtdev->dev_spec.sas_sata_inf.hba_port->port_id; + mpi3mr_tgtdev_put(tgtdev); + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + } + return port_id; +} + +static inline struct mpi3mr_ioc *phy_to_mrioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + + return shost_priv(shost); +} + +static inline struct mpi3mr_ioc *rphy_to_mrioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + + return shost_priv(shost); +} + +/* report phy error log structure */ +struct phy_error_log_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x11 */ + u8 allocated_response_length; + u8 request_length; /* 02 */ + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +/* report phy error log reply structure */ +struct phy_error_log_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + + +/** + * mpi3mr_get_expander_phy_error_log - return expander counters: + * @mrioc: Adapter instance reference + * @phy: The SAS transport layer phy object + * + * Return: 0 for success, non-zero for failure. + * + */ +static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc, + struct sas_phy *phy) +{ + struct mpi3_smp_passthrough_request mpi_request; + struct mpi3_smp_passthrough_reply mpi_reply; + struct phy_error_log_request *phy_error_log_request; + struct phy_error_log_reply *phy_error_log_reply; + int rc; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma, data_in_dma; + u32 data_out_sz, data_in_sz, sz; + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + u16 request_sz = sizeof(struct mpi3_smp_passthrough_request); + u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply); + u16 ioc_status; + + if (mrioc->reset_in_progress) { + ioc_err(mrioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + data_out_sz = sizeof(struct phy_error_log_request); + data_in_sz = sizeof(struct phy_error_log_reply); + sz = data_out_sz + data_in_sz; + data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma, + GFP_KERNEL); + if (!data_out) { + rc = -ENOMEM; + goto out; + } + + data_in_dma = data_out_dma + data_out_sz; + phy_error_log_reply = data_out + data_out_sz; + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + + memset(&mpi_request, 0, request_sz); + memset(&mpi_reply, 0, reply_sz); + mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS); + mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH; + mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy); + mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address); + + psge = &mpi_request.request_sge; + mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma); + + psge = &mpi_request.response_sge; + mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma); + + dprint_transport_info(mrioc, + "sending phy error log SMP request to sas_address(0x%016llx), phy_id(%d)\n", + (unsigned long long)phy->identify.sas_address, phy->number); + + if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz, + &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) + goto out; + + dprint_transport_info(mrioc, + "phy error log SMP request completed with ioc_status(0x%04x)\n", + ioc_status); + + if (ioc_status == MPI3_IOCSTATUS_SUCCESS) { + dprint_transport_info(mrioc, + "phy error log - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply.response_data_length)); + + if (le16_to_cpu(mpi_reply.response_data_length) != + sizeof(struct phy_error_log_reply)) + goto out; + + dprint_transport_info(mrioc, + "phy error log - function_result(%d)\n", + phy_error_log_reply->function_result); + + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } + +out: + if (data_out) + dma_free_coherent(&mrioc->pdev->dev, sz, data_out, + data_out_dma); + + return rc; +} + +/** + * mpi3mr_transport_get_linkerrors - return phy error counters + * @phy: The SAS transport layer phy object + * + * This function retrieves the phy error log information of the + * HBA or expander for which the phy belongs to + * + * Return: 0 for success, non-zero for failure. + */ +static int mpi3mr_transport_get_linkerrors(struct sas_phy *phy) +{ + struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy); + struct mpi3_sas_phy_page1 phy_pg1; + int rc = 0; + u16 ioc_status; + + rc = mpi3mr_parent_present(mrioc, phy); + if (rc) + return rc; + + if (phy->identify.sas_address != mrioc->sas_hba.sas_address) + return mpi3mr_get_expander_phy_error_log(mrioc, phy); + + memset(&phy_pg1, 0, sizeof(struct mpi3_sas_phy_page1)); + /* get hba phy error logs */ + if ((mpi3mr_cfg_get_sas_phy_pg1(mrioc, &ioc_status, &phy_pg1, + sizeof(struct mpi3_sas_phy_page1), + MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + phy->invalid_dword_count = le32_to_cpu(phy_pg1.invalid_dword_count); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.running_disparity_error_count); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.loss_dword_synch_count); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.phy_reset_problem_count); + return 0; +} + +/** + * mpi3mr_transport_get_enclosure_identifier - Get Enclosure ID + * @rphy: The SAS transport layer remote phy object + * @identifier: Enclosure identifier to be returned + * + * Returns the enclosure id for the device pointed by the remote + * phy object. + * + * Return: 0 on success or -ENXIO + */ +static int +mpi3mr_transport_get_enclosure_identifier(struct sas_rphy *rphy, + u64 *identifier) +{ + struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy); + struct mpi3mr_tgt_dev *tgtdev = NULL; + unsigned long flags; + int rc; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + if (tgtdev) { + *identifier = + tgtdev->enclosure_logical_id; + rc = 0; + mpi3mr_tgtdev_put(tgtdev); + } else { + *identifier = 0; + rc = -ENXIO; + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + return rc; +} + +/** + * mpi3mr_transport_get_bay_identifier - Get bay ID + * @rphy: The SAS transport layer remote phy object + * + * Returns the slot id for the device pointed by the remote phy + * object. + * + * Return: Valid slot ID on success or -ENXIO + */ +static int +mpi3mr_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy); + struct mpi3mr_tgt_dev *tgtdev = NULL; + unsigned long flags; + int rc; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + if (tgtdev) { + rc = tgtdev->slot; + mpi3mr_tgtdev_put(tgtdev); + } else + rc = -ENXIO; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + + return rc; +} + +/* phy control request structure */ +struct phy_control_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x91 */ + u8 allocated_response_length; + u8 request_length; /* 0x09 */ + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +/* phy control reply structure */ +struct phy_control_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; +}; + +#define SMP_PHY_CONTROL_LINK_RESET (0x01) +#define SMP_PHY_CONTROL_HARD_RESET (0x02) +#define SMP_PHY_CONTROL_DISABLE (0x03) + +/** + * mpi3mr_expander_phy_control - expander phy control + * @mrioc: Adapter instance reference + * @phy: The SAS transport layer phy object + * @phy_operation: The phy operation to be executed + * + * Issues SMP passthru phy control request to execute a specific + * phy operation for a given expander device. + * + * Return: 0 for success, non-zero for failure. + */ +static int +mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc, + struct sas_phy *phy, u8 phy_operation) +{ + struct mpi3_smp_passthrough_request mpi_request; + struct mpi3_smp_passthrough_reply mpi_reply; + struct phy_control_request *phy_control_request; + struct phy_control_reply *phy_control_reply; + int rc; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + u16 request_sz = sizeof(struct mpi3_smp_passthrough_request); + u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply); + u16 ioc_status; + u16 sz; + + if (mrioc->reset_in_progress) { + ioc_err(mrioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + data_out_sz = sizeof(struct phy_control_request); + data_in_sz = sizeof(struct phy_control_reply); + sz = data_out_sz + data_in_sz; + data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma, + GFP_KERNEL); + if (!data_out) { + rc = -ENOMEM; + goto out; + } + + data_in_dma = data_out_dma + data_out_sz; + phy_control_reply = data_out + data_out_sz; + + rc = -EINVAL; + memset(data_out, 0, sz); + + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + + memset(&mpi_request, 0, request_sz); + memset(&mpi_reply, 0, reply_sz); + mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS); + mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH; + mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy); + mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address); + + psge = &mpi_request.request_sge; + mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma); + + psge = &mpi_request.response_sge; + mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma); + + dprint_transport_info(mrioc, + "sending phy control SMP request to sas_address(0x%016llx), phy_id(%d) opcode(%d)\n", + (unsigned long long)phy->identify.sas_address, phy->number, + phy_operation); + + if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz, + &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) + goto out; + + dprint_transport_info(mrioc, + "phy control SMP request completed with ioc_status(0x%04x)\n", + ioc_status); + + if (ioc_status == MPI3_IOCSTATUS_SUCCESS) { + dprint_transport_info(mrioc, + "phy control - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply.response_data_length)); + + if (le16_to_cpu(mpi_reply.response_data_length) != + sizeof(struct phy_control_reply)) + goto out; + dprint_transport_info(mrioc, + "phy control - function_result(%d)\n", + phy_control_reply->function_result); + rc = 0; + } + out: + if (data_out) + dma_free_coherent(&mrioc->pdev->dev, sz, data_out, + data_out_dma); + + return rc; +} + +/** + * mpi3mr_transport_phy_reset - Reset a given phy + * @phy: The SAS transport layer phy object + * @hard_reset: Flag to indicate the type of reset + * + * Return: 0 for success, non-zero for failure. + */ +static int +mpi3mr_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy); + struct mpi3_iounit_control_request mpi_request; + struct mpi3_iounit_control_reply mpi_reply; + u16 request_sz = sizeof(struct mpi3_iounit_control_request); + u16 reply_sz = sizeof(struct mpi3_iounit_control_reply); + int rc = 0; + u16 ioc_status; + + rc = mpi3mr_parent_present(mrioc, phy); + if (rc) + return rc; + + /* handle expander phys */ + if (phy->identify.sas_address != mrioc->sas_hba.sas_address) + return mpi3mr_expander_phy_control(mrioc, phy, + (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET : + SMP_PHY_CONTROL_LINK_RESET); + + /* handle hba phys */ + memset(&mpi_request, 0, request_sz); + mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS); + mpi_request.function = MPI3_FUNCTION_IO_UNIT_CONTROL; + mpi_request.operation = MPI3_CTRL_OP_SAS_PHY_CONTROL; + mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX] = + (hard_reset ? MPI3_CTRL_ACTION_HARD_RESET : + MPI3_CTRL_ACTION_LINK_RESET); + mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX] = + phy->number; + + dprint_transport_info(mrioc, + "sending phy reset request to sas_address(0x%016llx), phy_id(%d) hard_reset(%d)\n", + (unsigned long long)phy->identify.sas_address, phy->number, + hard_reset); + + if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz, + &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) { + rc = -EAGAIN; + goto out; + } + + dprint_transport_info(mrioc, + "phy reset request completed with ioc_status(0x%04x)\n", + ioc_status); +out: + return rc; +} + +/** + * mpi3mr_transport_phy_enable - enable/disable phys + * @phy: The SAS transport layer phy object + * @enable: flag to enable/disable, enable phy when true + * + * This function enables/disables a given by executing required + * configuration page changes or expander phy control command + * + * Return: 0 for success, non-zero for failure. + */ +static int +mpi3mr_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy); + struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL; + struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL; + u16 sz; + int rc = 0; + int i, discovery_active; + + rc = mpi3mr_parent_present(mrioc, phy); + if (rc) + return rc; + + /* handle expander phys */ + if (phy->identify.sas_address != mrioc->sas_hba.sas_address) + return mpi3mr_expander_phy_control(mrioc, phy, + (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET : + SMP_PHY_CONTROL_DISABLE); + + /* handle hba phys */ + sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) + + (mrioc->sas_hba.num_phys * + sizeof(struct mpi3_sas_io_unit0_phy_data)); + sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_io_unit_pg0) { + rc = -ENOMEM; + goto out; + } + if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + /* unable to enable/disable phys when discovery is active */ + for (i = 0, discovery_active = 0; i < mrioc->sas_hba.num_phys ; i++) { + if (sas_io_unit_pg0->phy_data[i].port_flags & + MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS) { + ioc_err(mrioc, + "discovery is active on port = %d, phy = %d\n" + "\tunable to enable/disable phys, try again later!\n", + sas_io_unit_pg0->phy_data[i].io_unit_port, i); + discovery_active = 1; + } + } + + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + + if ((sas_io_unit_pg0->phy_data[phy->number].phy_flags & + (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY | + MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + /* read sas_iounit page 1 */ + sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) + + (mrioc->sas_hba.num_phys * + sizeof(struct mpi3_sas_io_unit1_phy_data)); + sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_io_unit_pg1) { + rc = -ENOMEM; + goto out; + } + + if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + if (enable) + sas_io_unit_pg1->phy_data[phy->number].phy_flags + &= ~MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_io_unit_pg1->phy_data[phy->number].phy_flags + |= MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + + mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz); + + /* link reset */ + if (enable) + mpi3mr_transport_phy_reset(phy, 0); + + out: + kfree(sas_io_unit_pg1); + kfree(sas_io_unit_pg0); + return rc; +} + +/** + * mpi3mr_transport_phy_speed - set phy min/max speed + * @phy: The SAS transport later phy object + * @rates: Rates defined as in sas_phy_linkrates + * + * This function sets the link rates given in the rates + * argument to the given phy by executing required configuration + * page changes or expander phy control command + * + * Return: 0 for success, non-zero for failure. + */ +static int +mpi3mr_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy); + struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL; + struct mpi3_sas_phy_page0 phy_pg0; + u16 sz, ioc_status; + int rc = 0; + + rc = mpi3mr_parent_present(mrioc, phy); + if (rc) + return rc; + + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + + /* handle expander phys */ + if (phy->identify.sas_address != mrioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return mpi3mr_expander_phy_control(mrioc, phy, + SMP_PHY_CONTROL_LINK_RESET); + } + + /* handle hba phys */ + sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) + + (mrioc->sas_hba.num_phys * + sizeof(struct mpi3_sas_io_unit1_phy_data)); + sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_io_unit_pg1) { + rc = -ENOMEM; + goto out; + } + + if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + sas_io_unit_pg1->phy_data[phy->number].max_min_link_rate = + (rates->minimum_linkrate + (rates->maximum_linkrate << 4)); + + if (mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) { + ioc_err(mrioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + /* link reset */ + mpi3mr_transport_phy_reset(phy, 0); + + /* read phy page 0, then update the rates in the sas transport phy */ + if (!mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0, + sizeof(struct mpi3_sas_phy_page0), + MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number) && + (ioc_status == MPI3_IOCSTATUS_SUCCESS)) { + phy->minimum_linkrate = mpi3mr_convert_phy_link_rate( + phy_pg0.programmed_link_rate & + MPI3_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = mpi3mr_convert_phy_link_rate( + phy_pg0.programmed_link_rate >> 4); + phy->negotiated_linkrate = + mpi3mr_convert_phy_link_rate( + (phy_pg0.negotiated_link_rate & + MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) + >> MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT); + } + +out: + kfree(sas_io_unit_pg1); + return rc; +} + +/** + * mpi3mr_map_smp_buffer - map BSG dma buffer + * @dev: Generic device reference + * @buf: BSG buffer pointer + * @dma_addr: Physical address holder + * @dma_len: Mapped DMA buffer length. + * @p: Virtual address holder + * + * This function maps the DMAable buffer + * + * Return: 0 on success, non-zero on failure + */ +static int +mpi3mr_map_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t *dma_addr, size_t *dma_len, void **p) +{ + /* Check if the request is split across multiple segments */ + if (buf->sg_cnt > 1) { + *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr, + GFP_KERNEL); + if (!*p) + return -ENOMEM; + *dma_len = buf->payload_len; + } else { + if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL)) + return -ENOMEM; + *dma_addr = sg_dma_address(buf->sg_list); + *dma_len = sg_dma_len(buf->sg_list); + *p = NULL; + } + + return 0; +} + +/** + * mpi3mr_unmap_smp_buffer - unmap BSG dma buffer + * @dev: Generic device reference + * @buf: BSG buffer pointer + * @dma_addr: Physical address to be unmapped + * @p: Virtual address + * + * This function unmaps the DMAable buffer + */ +static void +mpi3mr_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t dma_addr, void *p) +{ + if (p) + dma_free_coherent(dev, buf->payload_len, p, dma_addr); + else + dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL); +} + +/** + * mpi3mr_transport_smp_handler - handler for smp passthru + * @job: BSG job reference + * @shost: SCSI host object reference + * @rphy: SAS transport rphy object pointing the expander + * + * This is used primarily by smp utils for sending the SMP + * commands to the expanders attached to the controller + */ +static void +mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + struct mpi3mr_ioc *mrioc = shost_priv(shost); + struct mpi3_smp_passthrough_request mpi_request; + struct mpi3_smp_passthrough_reply mpi_reply; + int rc; + void *psge; + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + void *addr_in = NULL; + void *addr_out = NULL; + size_t dma_len_in; + size_t dma_len_out; + unsigned int reslen = 0; + u16 request_sz = sizeof(struct mpi3_smp_passthrough_request); + u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply); + u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; + u16 ioc_status; + + if (mrioc->reset_in_progress) { + ioc_err(mrioc, "%s: host reset in progress!\n", __func__); + rc = -EFAULT; + goto out; + } + + rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload, + &dma_addr_out, &dma_len_out, &addr_out); + if (rc) + goto out; + + if (addr_out) + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, addr_out, + job->request_payload.payload_len); + + rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->reply_payload, + &dma_addr_in, &dma_len_in, &addr_in); + if (rc) + goto unmap_out; + + memset(&mpi_request, 0, request_sz); + memset(&mpi_reply, 0, reply_sz); + mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS); + mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH; + mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_rphy(mrioc, rphy); + mpi_request.sas_address = ((rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(mrioc->sas_hba.sas_address)); + psge = &mpi_request.request_sge; + mpi3mr_add_sg_single(psge, sgl_flags, dma_len_out - 4, dma_addr_out); + + psge = &mpi_request.response_sge; + mpi3mr_add_sg_single(psge, sgl_flags, dma_len_in - 4, dma_addr_in); + + dprint_transport_info(mrioc, "sending SMP request\n"); + + rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz, + &mpi_reply, reply_sz, + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status); + if (rc) + goto unmap_in; + + dprint_transport_info(mrioc, + "SMP request completed with ioc_status(0x%04x)\n", ioc_status); + + dprint_transport_info(mrioc, + "SMP request - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply.response_data_length)); + + memcpy(job->reply, &mpi_reply, reply_sz); + job->reply_len = reply_sz; + reslen = le16_to_cpu(mpi_reply.response_data_length); + + if (addr_in) + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, addr_in, + job->reply_payload.payload_len); + + rc = 0; +unmap_in: + mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->reply_payload, + dma_addr_in, addr_in); +unmap_out: + mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->request_payload, + dma_addr_out, addr_out); +out: + bsg_job_done(job, rc, reslen); +} + +struct sas_function_template mpi3mr_transport_functions = { + .get_linkerrors = mpi3mr_transport_get_linkerrors, + .get_enclosure_identifier = mpi3mr_transport_get_enclosure_identifier, + .get_bay_identifier = mpi3mr_transport_get_bay_identifier, + .phy_reset = mpi3mr_transport_phy_reset, + .phy_enable = mpi3mr_transport_phy_enable, + .set_phy_speed = mpi3mr_transport_phy_speed, + .smp_handler = mpi3mr_transport_smp_handler, +}; + +struct scsi_transport_template *mpi3mr_transport_template; diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig new file mode 100644 index 000000000..c299f7e07 --- /dev/null +++ b/drivers/scsi/mpt3sas/Kconfig @@ -0,0 +1,83 @@ +# +# Kernel configuration file for the MPT3SAS +# +# This code is based on drivers/scsi/mpt3sas/Kconfig +# Copyright (C) 2012-2014 LSI Corporation +# (mailto:DL-MPTFusionLinux@lsi.com) + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# NO WARRANTY +# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +# solely responsible for determining the appropriateness of using and +# distributing the Program and assumes all risks associated with its +# exercise of rights under this Agreement, including but not limited to +# the risks and costs of program errors, damage to or loss of data, +# programs or equipment, and unavailability or interruption of operations. + +# DISCLAIMER OF LIABILITY +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +# USA. + +config SCSI_MPT3SAS + tristate "LSI MPT Fusion SAS 3.0 & SAS 2.0 Device Driver" + depends on PCI && SCSI + select SCSI_SAS_ATTRS + select RAID_ATTRS + select IRQ_POLL + help + This driver supports PCI-Express SAS 12Gb/s Host Adapters. + +config SCSI_MPT2SAS_MAX_SGE + int "LSI MPT Fusion SAS 2.0 Max number of SG Entries (16 - 256)" + depends on PCI && SCSI && SCSI_MPT3SAS + default "128" + range 16 256 + help + This option allows you to specify the maximum number of scatter- + gather entries per I/O. The driver default is 128, which matches + MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this + can be 256. However, it may decreased down to 16. Decreasing this + parameter will reduce memory requirements on a per controller instance. + +config SCSI_MPT3SAS_MAX_SGE + int "LSI MPT Fusion SAS 3.0 Max number of SG Entries (16 - 256)" + depends on PCI && SCSI && SCSI_MPT3SAS + default "128" + range 16 256 + help + This option allows you to specify the maximum number of scatter- + gather entries per I/O. The driver default is 128, which matches + MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this + can be 256. However, it may decreased down to 16. Decreasing this + parameter will reduce memory requirements on a per controller instance. + +config SCSI_MPT2SAS + tristate "Legacy MPT2SAS config option" + default n + select SCSI_MPT3SAS + depends on PCI && SCSI + help + Dummy config option for backwards compatibility: configure the MPT3SAS + driver instead. diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile new file mode 100644 index 000000000..e76d994db --- /dev/null +++ b/drivers/scsi/mpt3sas/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# mpt3sas makefile +obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o +mpt3sas-y += mpt3sas_base.o \ + mpt3sas_config.o \ + mpt3sas_scsih.o \ + mpt3sas_transport.o \ + mpt3sas_ctl.o \ + mpt3sas_trigger_diag.o \ + mpt3sas_warpdrive.o \ + mpt3sas_debugfs.o \ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h new file mode 100644 index 000000000..6de35b322 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -0,0 +1,1300 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2.h + * Title: MPI Message independent structures and definitions + * including System Interface Register Set and + * scatter/gather formats. + * Creation Date: June 21, 2006 + * + * mpi2.h Version: 02.00.54 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-14 02.00.36 Updated copyright information. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 03-16-15 02.00.37 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Scratchpad registers to + * MPI2_SYSTEM_INTERFACE_REGS. + * Added MPI2_DIAG_SBR_RELOAD. + * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT + * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT + * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines + * to be unique within first 32 characters. + * Removed AHCI support. + * Removed SOP support. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. + * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-22-18 02.00.51 Added SECURE_BOOT define. + * Bumped MPI2_HEADER_VERSION_UNIT + * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IOCSTATUS_FAILURE + * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT + * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT + * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT + * 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_H +#define MPI2_H + +/***************************************************************************** +* +* MPI Version Definitions +* +*****************************************************************************/ + +#define MPI2_VERSION_MAJOR_MASK (0xFF00) +#define MPI2_VERSION_MAJOR_SHIFT (8) +#define MPI2_VERSION_MINOR_MASK (0x00FF) +#define MPI2_VERSION_MINOR_SHIFT (0) + +/*major version for all MPI v2.x */ +#define MPI2_VERSION_MAJOR (0x02) + +/*minor version for MPI v2.0 compatible products */ +#define MPI2_VERSION_MINOR (0x00) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) +#define MPI2_VERSION_02_00 (0x0200) + +/*minor version for MPI v2.5 compatible products */ +#define MPI25_VERSION_MINOR (0x05) +#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI25_VERSION_MINOR) +#define MPI2_VERSION_02_05 (0x0205) + +/*minor version for MPI v2.6 compatible products */ +#define MPI26_VERSION_MINOR (0x06) +#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI26_VERSION_MINOR) +#define MPI2_VERSION_02_06 (0x0206) + + +/* Unit and Dev versioning for this MPI header set */ +#define MPI2_HEADER_VERSION_UNIT (0x39) +#define MPI2_HEADER_VERSION_DEV (0x00) +#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) +#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) +#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) +#define MPI2_HEADER_VERSION_DEV_SHIFT (0) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \ + MPI2_HEADER_VERSION_DEV) + +/***************************************************************************** +* +* IOC State Definitions +* +*****************************************************************************/ + +#define MPI2_IOC_STATE_RESET (0x00000000) +#define MPI2_IOC_STATE_READY (0x10000000) +#define MPI2_IOC_STATE_OPERATIONAL (0x20000000) +#define MPI2_IOC_STATE_FAULT (0x40000000) +#define MPI2_IOC_STATE_COREDUMP (0x50000000) + +#define MPI2_IOC_STATE_MASK (0xF0000000) +#define MPI2_IOC_STATE_SHIFT (28) + +/*Fault state range for prodcut specific codes */ +#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000) +#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF) + +/***************************************************************************** +* +* System Interface Register Definitions +* +*****************************************************************************/ + +typedef struct _MPI2_SYSTEM_INTERFACE_REGS { + U32 Doorbell; /*0x00 */ + U32 WriteSequence; /*0x04 */ + U32 HostDiagnostic; /*0x08 */ + U32 Reserved1; /*0x0C */ + U32 DiagRWData; /*0x10 */ + U32 DiagRWAddressLow; /*0x14 */ + U32 DiagRWAddressHigh; /*0x18 */ + U32 Reserved2[5]; /*0x1C */ + U32 HostInterruptStatus; /*0x30 */ + U32 HostInterruptMask; /*0x34 */ + U32 DCRData; /*0x38 */ + U32 DCRAddress; /*0x3C */ + U32 Reserved3[2]; /*0x40 */ + U32 ReplyFreeHostIndex; /*0x48 */ + U32 Reserved4[8]; /*0x4C */ + U32 ReplyPostHostIndex; /*0x6C */ + U32 Reserved5; /*0x70 */ + U32 HCBSize; /*0x74 */ + U32 HCBAddressLow; /*0x78 */ + U32 HCBAddressHigh; /*0x7C */ + U32 Reserved6[12]; /*0x80 */ + U32 Scratchpad[4]; /*0xB0 */ + U32 RequestDescriptorPostLow; /*0xC0 */ + U32 RequestDescriptorPostHigh; /*0xC4 */ + U32 AtomicRequestDescriptorPost;/*0xC8 */ + U32 Reserved7[13]; /*0xCC */ +} MPI2_SYSTEM_INTERFACE_REGS, + *PTR_MPI2_SYSTEM_INTERFACE_REGS, + Mpi2SystemInterfaceRegs_t, + *pMpi2SystemInterfaceRegs_t; + +/* + *Defines for working with the Doorbell register. + */ +#define MPI2_DOORBELL_OFFSET (0x00000000) + +/*IOC --> System values */ +#define MPI2_DOORBELL_USED (0x08000000) +#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000) +#define MPI2_DOORBELL_WHO_INIT_SHIFT (24) +#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF) +#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF) + +/*System --> IOC values */ +#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000) +#define MPI2_DOORBELL_FUNCTION_SHIFT (24) +#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000) +#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16) + +/* + *Defines for the WriteSequence register + */ +#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) +#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F) +#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) +#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) +#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) +#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) +#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) +#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) +#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) + +/* + *Defines for the HostDiagnostic register + */ +#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) + +#define MPI26_DIAG_SECURE_BOOT (0x80000000) + +#define MPI2_DIAG_SBR_RELOAD (0x00002000) + +#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) + +/* Defines for V7A/V7R HostDiagnostic Register */ +#define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000) +#define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800) +#define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000) +#define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800) + +#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400) +#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200) +#define MPI2_DIAG_HCB_MODE (0x00000100) +#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080) +#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040) +#define MPI2_DIAG_RESET_HISTORY (0x00000020) +#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010) +#define MPI2_DIAG_RESET_ADAPTER (0x00000004) +#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002) + +/* + *Offsets for DiagRWData and address + */ +#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010) +#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014) +#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018) + +/* + *Defines for the HostInterruptStatus register + */ +#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030) +#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS +#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000) +#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008) +#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001) +#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS + +/* + *Defines for the HostInterruptMask register + */ +#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034) +#define MPI2_HIM_RESET_IRQ_MASK (0x40000000) +#define MPI2_HIM_REPLY_INT_MASK (0x00000008) +#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK +#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001) +#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK + +/* + *Offsets for DCRData and address + */ +#define MPI2_DCR_DATA_OFFSET (0x00000038) +#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C) + +/* + *Offset for the Reply Free Queue + */ +#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) + +/* + *Defines for the Reply Descriptor Post Queue + */ +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) +#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) +#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) +#define MPI2_RPHI_MSIX_INDEX_SHIFT (24) +#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /*MPI v2.5 only*/ + + +/* + *Defines for the HCBSize and address + */ +#define MPI2_HCB_SIZE_OFFSET (0x00000074) +#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000) +#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001) + +#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078) +#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) + +/* + *Offsets for the Scratchpad registers + */ +#define MPI26_SCRATCHPAD0_OFFSET (0x000000B0) +#define MPI26_SCRATCHPAD1_OFFSET (0x000000B4) +#define MPI26_SCRATCHPAD2_OFFSET (0x000000B8) +#define MPI26_SCRATCHPAD3_OFFSET (0x000000BC) + +/* + *Offsets for the Request Descriptor Post Queue + */ +#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) +#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) +#define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8) + +/*Hard Reset delay timings */ +#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) +#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000) +#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000) + +/***************************************************************************** +* +* Message Descriptors +* +*****************************************************************************/ + +/*Request Descriptors */ + +/*Default Request Descriptor */ +typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DescriptorTypeDependent; /*0x06 */ +} MPI2_DEFAULT_REQUEST_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, + Mpi2DefaultRequestDescriptor_t, + *pMpi2DefaultRequestDescriptor_t; + +/*defines for the RequestFlags field */ +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) +#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) +#define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10) + +#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) + +/*High Priority Request Descriptor */ +typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + Mpi2HighPriorityRequestDescriptor_t, + *pMpi2HighPriorityRequestDescriptor_t; + +/*SCSI IO Request Descriptor */ +typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DevHandle; /*0x06 */ +} MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi2SCSIIORequestDescriptor_t, + *pMpi2SCSIIORequestDescriptor_t; + +/*SCSI Target Request Descriptor */ +typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + Mpi2SCSITargetRequestDescriptor_t, + *pMpi2SCSITargetRequestDescriptor_t; + +/*RAID Accelerator Request Descriptor */ +typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved; /*0x06 */ +} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + Mpi2RAIDAcceleratorRequestDescriptor_t, + *pMpi2RAIDAcceleratorRequestDescriptor_t; + +/*Fast Path SCSI IO Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi25FastPathSCSIIORequestDescriptor_t, + *pMpi25FastPathSCSIIORequestDescriptor_t; + +/*PCIe Encapsulated Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + *PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + Mpi26PCIeEncapsulatedRequestDescriptor_t, + *pMpi26PCIeEncapsulatedRequestDescriptor_t; + +/*union of Request Descriptors */ +typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { + MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated; + U64 Words; +} MPI2_REQUEST_DESCRIPTOR_UNION, + *PTR_MPI2_REQUEST_DESCRIPTOR_UNION, + Mpi2RequestDescriptorUnion_t, + *pMpi2RequestDescriptorUnion_t; + +/*Atomic Request Descriptors */ + +/* + * All Atomic Request Descriptors have the same format, so the following + * structure is used for all Atomic Request Descriptors: + * Atomic Default Request Descriptor + * Atomic High Priority Request Descriptor + * Atomic SCSI IO Request Descriptor + * Atomic SCSI Target Request Descriptor + * Atomic RAID Accelerator Request Descriptor + * Atomic Fast Path SCSI IO Request Descriptor + * Atomic PCIe Encapsulated Request Descriptor + */ + +/*Atomic Request Descriptor */ +typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR { + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ +} MPI26_ATOMIC_REQUEST_DESCRIPTOR, + *PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR, + Mpi26AtomicRequestDescriptor_t, + *pMpi26AtomicRequestDescriptor_t; + +/*for the RequestFlags field, use the same + *defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR + */ + +/*Reply Descriptors */ + +/*Default Reply Descriptor */ +typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 DescriptorTypeDependent1; /*0x02 */ + U32 DescriptorTypeDependent2; /*0x04 */ +} MPI2_DEFAULT_REPLY_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, + Mpi2DefaultReplyDescriptor_t, + *pMpi2DefaultReplyDescriptor_t; + +/*defines for the ReplyFlags field */ +#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) +#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05) +#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08) +#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +/*values for marking a reply descriptor as unused */ +#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) +#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF) + +/*Address Reply Descriptor */ +typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 ReplyFrameAddress; /*0x04 */ +} MPI2_ADDRESS_REPLY_DESCRIPTOR, + *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, + Mpi2AddressReplyDescriptor_t, + *pMpi2AddressReplyDescriptor_t; + +#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00) + +/*SCSI IO Success Reply Descriptor */ +typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 TaskTag; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi2SCSIIOSuccessReplyDescriptor_t, + *pMpi2SCSIIOSuccessReplyDescriptor_t; + +/*TargetAssist Success Reply Descriptor */ +typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U8 SequenceNumber; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + Mpi2TargetAssistSuccessReplyDescriptor_t, + *pMpi2TargetAssistSuccessReplyDescriptor_t; + +/*Target Command Buffer Reply Descriptor */ +typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U8 VP_ID; /*0x02 */ + U8 Flags; /*0x03 */ + U16 InitiatorDevHandle; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + Mpi2TargetCommandBufferReplyDescriptor_t, + *pMpi2TargetCommandBufferReplyDescriptor_t; + +/*defines for Flags field */ +#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) + +/*RAID Accelerator Success Reply Descriptor */ +typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 Reserved; /*0x04 */ +} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, + *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; + +/*Fast Path SCSI IO Success Reply Descriptor */ +typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, + *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; + +/*PCIe Encapsulated Success Reply Descriptor */ +typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t, + *pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t; + +/*union of Reply Descriptors */ +typedef union _MPI2_REPLY_DESCRIPTORS_UNION { + MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR + PCIeEncapsulatedSuccess; + U64 Words; +} MPI2_REPLY_DESCRIPTORS_UNION, + *PTR_MPI2_REPLY_DESCRIPTORS_UNION, + Mpi2ReplyDescriptorsUnion_t, + *pMpi2ReplyDescriptorsUnion_t; + +/***************************************************************************** +* +* Message Functions +* +*****************************************************************************/ + +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) +#define MPI2_FUNCTION_IOC_INIT (0x02) +#define MPI2_FUNCTION_IOC_FACTS (0x03) +#define MPI2_FUNCTION_CONFIG (0x04) +#define MPI2_FUNCTION_PORT_FACTS (0x05) +#define MPI2_FUNCTION_PORT_ENABLE (0x06) +#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) +#define MPI2_FUNCTION_EVENT_ACK (0x08) +#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) +#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) +#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) +#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) +#define MPI2_FUNCTION_FW_UPLOAD (0x12) +#define MPI2_FUNCTION_RAID_ACTION (0x15) +#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) +#define MPI2_FUNCTION_TOOLBOX (0x17) +#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) +#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) +#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) +#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B) +#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) +#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) +#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) +#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) +#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) +#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) +#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) +#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) +#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) +#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33) +#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) +#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*Doorbell functions */ +#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) +#define MPI2_FUNCTION_HANDSHAKE (0x42) + +/***************************************************************************** +* +* IOC Status Values +* +*****************************************************************************/ + +/*mask for IOCStatus status value */ +#define MPI2_IOCSTATUS_MASK (0x7FFF) + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + +#define MPI2_IOCSTATUS_SUCCESS (0x0000) +#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define MPI2_IOCSTATUS_BUSY (0x0002) +#define MPI2_IOCSTATUS_INVALID_SGL (0x0003) +#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define MPI2_IOCSTATUS_INVALID_VPID (0x0005) +#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) +#define MPI2_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) +/*MPI v2.6 and later */ +#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) +#define MPI2_IOCSTATUS_FAILURE (0x000F) + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + +#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + +#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + +#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + +#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063) +#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + +#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + +#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) + +/**************************************************************************** +* RAID Accelerator values +****************************************************************************/ + +#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0) + +/**************************************************************************** +* IOCStatus flag to indicate that log info is available +****************************************************************************/ + +#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +/**************************************************************************** +* IOCLogInfo Types +****************************************************************************/ + +#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000) +#define MPI2_IOCLOGINFO_TYPE_SHIFT (28) +#define MPI2_IOCLOGINFO_TYPE_NONE (0x0) +#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1) +#define MPI2_IOCLOGINFO_TYPE_FC (0x2) +#define MPI2_IOCLOGINFO_TYPE_SAS (0x3) +#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4) +#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF) + +/***************************************************************************** +* +* Standard Message Structures +* +*****************************************************************************/ + +/**************************************************************************** +*Request Message Header for all request messages +****************************************************************************/ + +typedef struct _MPI2_REQUEST_HEADER { + U16 FunctionDependent1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ +} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER, + MPI2RequestHeader_t, *pMPI2RequestHeader_t; + +/**************************************************************************** +* Default Reply +****************************************************************************/ + +typedef struct _MPI2_DEFAULT_REPLY { + U16 FunctionDependent1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 FunctionDependent5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY, + MPI2DefaultReply_t, *pMPI2DefaultReply_t; + +/*common version structure/union used in messages and configuration pages */ + +typedef struct _MPI2_VERSION_STRUCT { + U8 Dev; /*0x00 */ + U8 Unit; /*0x01 */ + U8 Minor; /*0x02 */ + U8 Major; /*0x03 */ +} MPI2_VERSION_STRUCT; + +typedef union _MPI2_VERSION_UNION { + MPI2_VERSION_STRUCT Struct; + U32 Word; +} MPI2_VERSION_UNION; + +/*LUN field defines, common to many structures */ +#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_LEVEL_1_WORD (0xFF00) +#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00) + +/***************************************************************************** +* +* Fusion-MPT MPI Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* MPI Simple Element structures +****************************************************************************/ + +typedef struct _MPI2_SGE_SIMPLE32 { + U32 FlagsLength; + U32 Address; +} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32, + Mpi2SGESimple32_t, *pMpi2SGESimple32_t; + +typedef struct _MPI2_SGE_SIMPLE64 { + U32 FlagsLength; + U64 Address; +} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64, + Mpi2SGESimple64_t, *pMpi2SGESimple64_t; + +typedef struct _MPI2_SGE_SIMPLE_UNION { + U32 FlagsLength; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_SIMPLE_UNION, + *PTR_MPI2_SGE_SIMPLE_UNION, + Mpi2SGESimpleUnion_t, + *pMpi2SGESimpleUnion_t; + +/**************************************************************************** +* MPI Chain Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_CHAIN32 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U32 Address; +} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32, + Mpi2SGEChain32_t, *pMpi2SGEChain32_t; + +typedef struct _MPI2_SGE_CHAIN64 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U64 Address; +} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64, + Mpi2SGEChain64_t, *pMpi2SGEChain64_t; + +typedef struct _MPI2_SGE_CHAIN_UNION { + U16 Length; + U8 NextChainOffset; + U8 Flags; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_CHAIN_UNION, + *PTR_MPI2_SGE_CHAIN_UNION, + Mpi2SGEChainUnion_t, + *pMpi2SGEChainUnion_t; + +/**************************************************************************** +* MPI Transaction Context Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANSACTION32 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[1]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION32, + *PTR_MPI2_SGE_TRANSACTION32, + Mpi2SGETransaction32_t, + *pMpi2SGETransaction32_t; + +typedef struct _MPI2_SGE_TRANSACTION64 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[2]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION64, + *PTR_MPI2_SGE_TRANSACTION64, + Mpi2SGETransaction64_t, + *pMpi2SGETransaction64_t; + +typedef struct _MPI2_SGE_TRANSACTION96 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[3]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96, + Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t; + +typedef struct _MPI2_SGE_TRANSACTION128 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[4]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128, + Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128; + +typedef struct _MPI2_SGE_TRANSACTION_UNION { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + union { + U32 TransactionContext32[1]; + U32 TransactionContext64[2]; + U32 TransactionContext96[3]; + U32 TransactionContext128[4]; + } u; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION_UNION, + *PTR_MPI2_SGE_TRANSACTION_UNION, + Mpi2SGETransactionUnion_t, + *pMpi2SGETransactionUnion_t; + +/**************************************************************************** +* MPI SGE union for IO SGL's - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_IO_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + } u; +} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION, + Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t; + +/**************************************************************************** +* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_SGE_TRANS_SIMPLE_UNION, + *PTR_MPI2_SGE_TRANS_SIMPLE_UNION, + Mpi2SGETransSimpleUnion_t, + *pMpi2SGETransSimpleUnion_t; + +/**************************************************************************** +* All MPI SGE types union +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION, + Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t; + +/**************************************************************************** +* MPI SGE field definition and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80) +#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40) +#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30) +#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08) +#define MPI2_SGE_FLAGS_DIRECTION (0x04) +#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02) +#define MPI2_SGE_FLAGS_END_OF_LIST (0x01) + +#define MPI2_SGE_FLAGS_SHIFT (24) + +#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF) +#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF) + +/*Element Type */ + +#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) +#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) +#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30) + +/*Address location */ + +#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00) + +/*Direction */ + +#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) +#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) + +#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) +#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) + +/*Address Size */ + +#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +/*Context Size */ + +#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00) +#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02) +#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04) +#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06) + +#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000) +#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16) + +/**************************************************************************** +* MPI SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \ + MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) +#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) + +#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_SGE_SET_FLAGS(f)) +#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \ + MPI2_SGE_CHAIN_OFFSET_SHIFT) + +/***************************************************************************** +* +* Fusion-MPT IEEE Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* IEEE Simple Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_SIMPLE32 { + U32 Address; + U32 FlagsLength; +} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32, + Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t; + +typedef struct _MPI2_IEEE_SGE_SIMPLE64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64, + Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t; + +typedef union _MPI2_IEEE_SGE_SIMPLE_UNION { + MPI2_IEEE_SGE_SIMPLE32 Simple32; + MPI2_IEEE_SGE_SIMPLE64 Simple64; +} MPI2_IEEE_SGE_SIMPLE_UNION, + *PTR_MPI2_IEEE_SGE_SIMPLE_UNION, + Mpi2IeeeSgeSimpleUnion_t, + *pMpi2IeeeSgeSimpleUnion_t; + +/**************************************************************************** +* IEEE Chain Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; + +/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; + +typedef union _MPI2_IEEE_SGE_CHAIN_UNION { + MPI2_IEEE_SGE_CHAIN32 Chain32; + MPI2_IEEE_SGE_CHAIN64 Chain64; +} MPI2_IEEE_SGE_CHAIN_UNION, + *PTR_MPI2_IEEE_SGE_CHAIN_UNION, + Mpi2IeeeSgeChainUnion_t, + *pMpi2IeeeSgeChainUnion_t; + +/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */ +typedef struct _MPI25_IEEE_SGE_CHAIN64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +} MPI25_IEEE_SGE_CHAIN64, + *PTR_MPI25_IEEE_SGE_CHAIN64, + Mpi25IeeeSgeChain64_t, + *pMpi25IeeeSgeChain64_t; + +/**************************************************************************** +* All IEEE SGE types union +****************************************************************************/ + +/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_UNION { + union { + MPI2_IEEE_SGE_SIMPLE_UNION Simple; + MPI2_IEEE_SGE_CHAIN_UNION Chain; + } u; +} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION, + Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t; + +/**************************************************************************** +* IEEE SGE union for IO SGL's +****************************************************************************/ + +typedef union _MPI25_SGE_IO_UNION { + MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; + MPI25_IEEE_SGE_CHAIN64 IeeeChain; +} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION, + Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t; + +/**************************************************************************** +* IEEE SGE field definitions and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80) +#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40) + +#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24) + +#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF) + +/*Element Type */ + +#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) + +/*Next Segment Format */ + +#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) +#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) + +/*Data Location Address Space */ + +#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) +#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) +#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \ + (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) +#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02) + +/**************************************************************************** +* IEEE SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \ + >> MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) + +#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\ + MPI2_IEEE32_SGE_LENGTH(l)) + +#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \ + MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \ + MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_SET_FLAGS(f)) +#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_LENGTH(l)) + +/***************************************************************************** +* +* Fusion-MPT MPI/IEEE Scatter Gather Unions +* +*****************************************************************************/ + +typedef union _MPI2_SIMPLE_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; +} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION, + Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t; + +typedef union _MPI2_SGE_IO_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_SGE_CHAIN_UNION MpiChain; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION, + Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t; + +/**************************************************************************** +* +* Values for SGLFlags field, used in many request messages with an SGL +* +****************************************************************************/ + +/*values for MPI SGL Data Location Address Space subfield */ +#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C) +#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) +#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) +#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) +#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) +#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) +/*values for SGL Type subfield */ +#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) +#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02) + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h new file mode 100644 index 000000000..4d0be5ab9 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -0,0 +1,4093 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_cnfg.h + * Title: MPI Configuration messages and pages + * Creation Date: November 10, 2006 + * + * mpi2_cnfg.h Version: 02.00.47 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for + * MPI2_CONFIG_PAGE_MAN_7. + * Added EnclosureLevel and ConnectorName fields to + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added EnclosureLevel field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and + * more defines for the BiosOptions field. + * 11-18-14 02.00.30 Updated copyright information. + * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. + * Added AdapterOrderAux fields to BIOS Page 3. + * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added Flags field to IO Unit Page 7. + * Added new SAS Phy Event codes + * 05-25-15 02.00.33 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 08-25-15 02.00.34 Bumped Header Version. + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. + * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. + * Added Link field to PCIe Link Pages + * Added EnclosureLevel and ConnectorName to PCIe + * Device Page 0. + * Added define for PCIE IoUnit page 1 max rate shift. + * Added comment for reserved ExtPageTypes. + * Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * Added NegotiatedLinkRate and NegotiatedPortWidth to + * PCIe device page 0. + * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines + * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. + * Changed declaration of ConnectorName in PCIe DevicePage0 + * to match SAS DevicePage 0. + * Added SATADeviceWaitTime to IO Unit Page 11. + * Added MPI26_MFGPAGE_DEVID_SAS4008 + * Added x16 PCIe width to IO Unit Page 7 + * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 + * phy data. + * Added InitStatus to PCIe IO Unit Page 1 header. + * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. + * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and + * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. + * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. + * Added ChassisSlot field to SAS Enclosure Page 0. + * Added ChassisSlot Valid bit (bit 5) to the Flags field + * in SAS Enclosure Page 0. + * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and + * MPI26_MFGPAGE_DEVID_SAS3916 defines. + * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. + * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. + * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to + * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. + * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. + * Added NOIOB field to PCIe Device Page 2. + * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to + * the Capabilities field of PCIe Device Page 2. + * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816. + * Added WRiteCache defines to IO Unit Page 1. + * Added MaxEnclosureLevel to BIOS Page 1. + * Added OEMRD to SAS Enclosure Page 1. + * Added DMDReportPCIe to PCIe IO Unit Page 1. + * Added Flags field and flags for Retimers to + * PCIe Switch Page 1. + * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. + * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 + * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 + * Added DMDReport Delay Time defines to + * PCIeIOUnitPage1 + * -------------------------------------------------------------------------- + * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. + * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 + * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 + * Added DMDReport Delay Time defines to PCIeIOUnitPage1 + * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. + * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID + * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT + */ + +#ifndef MPI2_CNFG_H +#define MPI2_CNFG_H + +/***************************************************************************** +* Configuration Page Header and defines +*****************************************************************************/ + +/*Config Page Header */ +typedef struct _MPI2_CONFIG_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 PageLength; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ +} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER, + Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t; + +typedef union _MPI2_CONFIG_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + U8 Bytes[4]; + U16 Word16[2]; + U32 Word32; +} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION, + Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion; + +/*Extended Config Page Header */ +typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 Reserved2; /*0x07 */ +} MPI2_CONFIG_EXTENDED_PAGE_HEADER, + *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, + Mpi2ConfigExtendedPageHeader_t, + *pMpi2ConfigExtendedPageHeader_t; + +typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; + U8 Bytes[8]; + U16 Word16[4]; + U32 Word32[2]; +} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + Mpi2ConfigPageExtendedHeaderUnion, + *pMpi2ConfigPageExtendedHeaderUnion; + + +/*PageType field values */ +#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00) +#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10) +#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI2_CONFIG_PAGEATTR_MASK (0xF0) + +#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define MPI2_CONFIG_PAGETYPE_IOC (0x01) +#define MPI2_CONFIG_PAGETYPE_BIOS (0x02) +#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define MPI2_CONFIG_PAGETYPE_MASK (0x0F) + +#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF) + + +/*ExtPageType field values */ +#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14) +#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) +#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E) + + +/***************************************************************************** +* PageAddress defines +*****************************************************************************/ + +/*RAID Volume PageAddress format */ +#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*RAID Physical Disk PageAddress format */ +#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000) +#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) +#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000) + +#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) +#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF) + + +/*SAS Expander PageAddress format */ +#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) + + +/*SAS Device PageAddress format */ +#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*SAS PHY PageAddress format */ +#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000) + +#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF) +#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF) + + +/*SAS Port PageAddress format */ +#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) +#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) + +#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF) + + +/*SAS Enclosure PageAddress format */ +#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + +/*Enclosure PageAddress format */ +#define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + +/*RAID Configuration PageAddress format */ +#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) +#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000) +#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000) + +#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF) + + +/*Driver Persistent Mapping PageAddress format */ +#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000) +#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000) + +#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000) +#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16) +#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) + + +/*Ethernet PageAddress format */ +#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) +#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) + +#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) + + +/*PCIe Switch PageAddress format */ +#define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000) +#define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000) +#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16) + + +/*PCIe Device PageAddress format */ +#define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + +/*PCIe Link PageAddress format */ +#define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000) +#define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000) + +#define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF) + + + +/**************************************************************************** +* Configuration messages +****************************************************************************/ + +/*Configuration Request Message */ +typedef struct _MPI2_CONFIG_REQUEST { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 ProxyVF_ID; /*0x0D */ + U16 Reserved4; /*0x0E */ + U32 Reserved3; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ + U32 PageAddress; /*0x18 */ + MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */ +} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST, + Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t; + +/*values for the Action field */ +#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00) +#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) +#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05) +#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) +#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/*Config Reply Message */ +typedef struct _MPI2_CONFIG_REPLY { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 Reserved2; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ +} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY, + Mpi2ConfigReply_t, *pMpi2ConfigReply_t; + + + +/***************************************************************************** +* +* C o n f i g u r a t i o n P a g e s +* +*****************************************************************************/ + +/**************************************************************************** +* Manufacturing Config pages +****************************************************************************/ + +#define MPI2_MFGPAGE_VENDORID_LSI (0x1000) +#define MPI2_MFGPAGE_VENDORID_ATTO (0x117C) + +/*MPI v2.0 SAS products */ +#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070) +#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072) +#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074) +#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076) +#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) +#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) +#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) + +#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E) + +#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) +#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) +#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) +#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) +#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) +#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) +#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) +#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) +#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) +#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP (0x02B0) +#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1 (0x02B1) + +/*MPI v2.5 SAS products */ +#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) +#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097) +#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090) +#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091) +#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094) +#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095) + +/* MPI v2.6 SAS Products */ +#define MPI26_MFGPAGE_DEVID_SAS3216 (0x00C9) +#define MPI26_MFGPAGE_DEVID_SAS3224 (0x00C4) +#define MPI26_MFGPAGE_DEVID_SAS3316_1 (0x00C5) +#define MPI26_MFGPAGE_DEVID_SAS3316_2 (0x00C6) +#define MPI26_MFGPAGE_DEVID_SAS3316_3 (0x00C7) +#define MPI26_MFGPAGE_DEVID_SAS3316_4 (0x00C8) +#define MPI26_MFGPAGE_DEVID_SAS3324_1 (0x00C0) +#define MPI26_MFGPAGE_DEVID_SAS3324_2 (0x00C1) +#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2) +#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3) + +#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA) +#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB) +#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC) +#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD) +#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE) +#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF) +#define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0) +#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1) +#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2) + +#define MPI26_MFGPAGE_DEVID_SEC_MASK_3916 (0x0003) +#define MPI26_MFGPAGE_DEVID_INVALID0_3916 (0x00E0) +#define MPI26_MFGPAGE_DEVID_CFG_SEC_3916 (0x00E1) +#define MPI26_MFGPAGE_DEVID_HARD_SEC_3916 (0x00E2) +#define MPI26_MFGPAGE_DEVID_INVALID1_3916 (0x00E3) + +#define MPI26_MFGPAGE_DEVID_SEC_MASK_3816 (0x0003) +#define MPI26_MFGPAGE_DEVID_INVALID0_3816 (0x00E4) +#define MPI26_MFGPAGE_DEVID_CFG_SEC_3816 (0x00E5) +#define MPI26_MFGPAGE_DEVID_HARD_SEC_3816 (0x00E6) +#define MPI26_MFGPAGE_DEVID_INVALID1_3816 (0x00E7) + + +/*Manufacturing Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 ChipName[16]; /*0x04 */ + U8 ChipRevision[8]; /*0x14 */ + U8 BoardName[16]; /*0x1C */ + U8 BoardAssembly[16]; /*0x2C */ + U8 BoardTracerNumber[16]; /*0x3C */ +} MPI2_CONFIG_PAGE_MAN_0, + *PTR_MPI2_CONFIG_PAGE_MAN_0, + Mpi2ManufacturingPage0_t, + *pMpi2ManufacturingPage0_t; + +#define MPI2_MANUFACTURING0_PAGEVERSION (0x00) + + +/*Manufacturing Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 VPD[256]; /*0x04 */ +} MPI2_CONFIG_PAGE_MAN_1, + *PTR_MPI2_CONFIG_PAGE_MAN_1, + Mpi2ManufacturingPage1_t, + *pMpi2ManufacturingPage1_t; + +#define MPI2_MANUFACTURING1_PAGEVERSION (0x00) + + +typedef struct _MPI2_CHIP_REVISION_ID { + U16 DeviceID; /*0x00 */ + U8 PCIRevisionID; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID, + Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t; + + +/*Manufacturing Page 2 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS +#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_2, + *PTR_MPI2_CONFIG_PAGE_MAN_2, + Mpi2ManufacturingPage2_t, + *pMpi2ManufacturingPage2_t; + +#define MPI2_MANUFACTURING2_PAGEVERSION (0x00) + + +/*Manufacturing Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_3_INFO_WORDS +#define MPI2_MAN_PAGE_3_INFO_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_3, + *PTR_MPI2_CONFIG_PAGE_MAN_3, + Mpi2ManufacturingPage3_t, + *pMpi2ManufacturingPage3_t; + +#define MPI2_MANUFACTURING3_PAGEVERSION (0x00) + + +/*Manufacturing Page 4 */ + +typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS { + U8 PowerSaveFlags; /*0x00 */ + U8 InternalOperationsSleepTime; /*0x01 */ + U8 InternalOperationsRunTime; /*0x02 */ + U8 HostIdleTime; /*0x03 */ +} MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + Mpi2ManPage4PwrSaveSettings_t, + *pMpi2ManPage4PwrSaveSettings_t; + +/*defines for the PowerSaveFlags field */ +#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03) +#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00) +#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01) +#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02) + +typedef struct _MPI2_CONFIG_PAGE_MAN_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Flags; /*0x08 */ + U8 InquirySize; /*0x0C */ + U8 Reserved2; /*0x0D */ + U16 Reserved3; /*0x0E */ + U8 InquiryData[56]; /*0x10 */ + U32 RAID0VolumeSettings; /*0x48 */ + U32 RAID1EVolumeSettings; /*0x4C */ + U32 RAID1VolumeSettings; /*0x50 */ + U32 RAID10VolumeSettings; /*0x54 */ + U32 Reserved4; /*0x58 */ + U32 Reserved5; /*0x5C */ + MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */ + U8 MaxOCEDisks; /*0x64 */ + U8 ResyncRate; /*0x65 */ + U16 DataScrubDuration; /*0x66 */ + U8 MaxHotSpares; /*0x68 */ + U8 MaxPhysDisksPerVol; /*0x69 */ + U8 MaxPhysDisks; /*0x6A */ + U8 MaxVolumes; /*0x6B */ +} MPI2_CONFIG_PAGE_MAN_4, + *PTR_MPI2_CONFIG_PAGE_MAN_4, + Mpi2ManufacturingPage4_t, + *pMpi2ManufacturingPage4_t; + +#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A) + +/*Manufacturing Page 4 Flags field */ +#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000) +#define MPI2_MANPAGE4_METADATA_512MB (0x00000000) + +#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000) +#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000) +#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000) + +#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00) +#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000) +#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400) +#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800) +#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00) + +#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300) +#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000) +#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100) +#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200) + +#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080) +#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040) +#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020) +#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010) +#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008) +#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004) +#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002) +#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001) + + +/*Manufacturing Page 5 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES +#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_MANUFACTURING5_ENTRY { + U64 WWID; /*0x00 */ + U64 DeviceName; /*0x08 */ +} MPI2_MANUFACTURING5_ENTRY, + *PTR_MPI2_MANUFACTURING5_ENTRY, + Mpi2Manufacturing5Entry_t, + *pMpi2Manufacturing5Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_MAN_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_MANUFACTURING5_ENTRY + Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_5, + *PTR_MPI2_CONFIG_PAGE_MAN_5, + Mpi2ManufacturingPage5_t, + *pMpi2ManufacturingPage5_t; + +#define MPI2_MANUFACTURING5_PAGEVERSION (0x03) + + +/*Manufacturing Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_6, + *PTR_MPI2_CONFIG_PAGE_MAN_6, + Mpi2ManufacturingPage6_t, + *pMpi2ManufacturingPage6_t; + +#define MPI2_MANUFACTURING6_PAGEVERSION (0x00) + + +/*Manufacturing Page 7 */ + +typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { + U32 Pinout; /*0x00 */ + U8 Connector[16]; /*0x04 */ + U8 Location; /*0x14 */ + U8 ReceptacleID; /*0x15 */ + U16 Slot; /*0x16 */ + U16 Slotx2; /*0x18 */ + U16 Slotx4; /*0x1A */ +} MPI2_MANPAGE7_CONNECTOR_INFO, + *PTR_MPI2_MANPAGE7_CONNECTOR_INFO, + Mpi2ManPage7ConnectorInfo_t, + *pMpi2ManPage7ConnectorInfo_t; + +/*defines for the Pinout field */ +#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) +#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) + +#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF) +#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00) +#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01) +#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02) +#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03) +#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04) +#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07) +#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08) +#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) +#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) +#define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F) +#define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10) +#define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11) +#define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12) +#define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13) + +/*defines for the Location field */ +#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) +#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02) +#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04) +#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08) +#define MPI2_MANPAGE7_LOCATION_AUTO (0x10) +#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) +#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) + +/*defines for the Slot field */ +#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX +#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Flags; /*0x0C */ + U8 EnclosureName[16]; /*0x10 */ + U8 NumPhys; /*0x20 */ + U8 Reserved3; /*0x21 */ + U16 Reserved4; /*0x22 */ + MPI2_MANPAGE7_CONNECTOR_INFO + ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */ +} MPI2_CONFIG_PAGE_MAN_7, + *PTR_MPI2_CONFIG_PAGE_MAN_7, + Mpi2ManufacturingPage7_t, + *pMpi2ManufacturingPage7_t; + +#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) + +/*defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) +#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) +#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) + +#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020) +#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID (0x00000010) + +/* + *Generic structure to use for product-specific manufacturing pages + *(currently Manufacturing Page 8 through Manufacturing Page 31). + */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_PS { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_MAN_PS, + Mpi2ManufacturingPagePS_t, + *pMpi2ManufacturingPagePS_t; + +#define MPI2_MANUFACTURING8_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING9_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING10_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING11_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING12_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING13_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING14_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING15_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING16_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING17_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING18_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING19_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING20_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING21_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING22_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING23_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING24_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING25_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING26_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING27_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING28_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING29_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING30_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING31_PAGEVERSION (0x00) + + +/**************************************************************************** +* IO Unit Config Pages +****************************************************************************/ + +/*IO Unit Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 UniqueValue; /*0x04 */ + MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */ + MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */ +} MPI2_CONFIG_PAGE_IO_UNIT_0, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, + Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t; + +#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02) + + +/*IO Unit Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ +} MPI2_CONFIG_PAGE_IO_UNIT_1, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, + Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t; + +#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) + +/* IO Unit Page 1 Flags defines */ +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT (16) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000) +#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) +#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) +#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) +#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) +#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) +#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) +#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) +#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) +#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) +#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) +#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) +#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) + + +/*IO Unit Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for GPIOCount at runtime. + */ +#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX +#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (36) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 GPIOCount; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 + GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */ +} MPI2_CONFIG_PAGE_IO_UNIT_3, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, + Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t; + +#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01) + +/*defines for IO Unit Page 3 GPIOVal field */ +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC) +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) + + +/*IO Unit Page 5 */ + +/* + *Upper layer code (drivers, utilities, etc.) should leave this define set to + *one and check the value returned for NumDmaEngines at runtime. + */ +#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES +#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 + RaidAcceleratorBufferBaseAddress; /*0x04 */ + U64 + RaidAcceleratorBufferSize; /*0x0C */ + U64 + RaidAcceleratorControlBaseAddress; /*0x14 */ + U8 RAControlSize; /*0x1C */ + U8 NumDmaEngines; /*0x1D */ + U8 RAMinControlSize; /*0x1E */ + U8 RAMaxControlSize; /*0x1F */ + U32 Reserved1; /*0x20 */ + U32 Reserved2; /*0x24 */ + U32 Reserved3; /*0x28 */ + U32 + DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */ +} MPI2_CONFIG_PAGE_IO_UNIT_5, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, + Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t; + +#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) + +/*defines for IO Unit Page 5 DmaEngineCapabilities field */ +#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) +#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) + +#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) +#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004) +#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002) +#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001) + + +/*IO Unit Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 Flags; /*0x04 */ + U8 RAHostControlSize; /*0x06 */ + U8 Reserved0; /*0x07 */ + U64 + RaidAcceleratorHostControlBaseAddress; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ + U32 Reserved3; /*0x18 */ +} MPI2_CONFIG_PAGE_IO_UNIT_6, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, + Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t; + +#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00) + +/*defines for IO Unit Page 6 Flags field */ +#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) + + +/*IO Unit Page 7 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 CurrentPowerMode; /*0x04 */ + U8 PreviousPowerMode; /*0x05 */ + U8 PCIeWidth; /*0x06 */ + U8 PCIeSpeed; /*0x07 */ + U32 ProcessorState; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U16 IOCTemperature; /*0x10 */ + U8 + IOCTemperatureUnits; /*0x12 */ + U8 IOCSpeed; /*0x13 */ + U16 BoardTemperature; /*0x14 */ + U8 + BoardTemperatureUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 BoardPowerRequirement; /*0x18 */ + U32 PCISlotPowerAllocation; /*0x1C */ +/* reserved prior to MPI v2.6 */ + U8 Flags; /* 0x20 */ + U8 Reserved6; /* 0x21 */ + U16 Reserved7; /* 0x22 */ + U32 Reserved8; /* 0x24 */ +} MPI2_CONFIG_PAGE_IO_UNIT_7, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, + Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; + +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) + +/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ +#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) +#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40) +#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80) +#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0) + +#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07) +#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01) +#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04) +#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05) +#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06) + + +/*defines for IO Unit Page 7 PCIeWidth field */ +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10) + +/*defines for IO Unit Page 7 PCIeSpeed field */ +#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03) + +/*defines for IO Unit Page 7 ProcessorState field */ +#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) +#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) + +#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) +#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) + +/*defines for IO Unit Page 7 PowerManagementCapabilities field */ +#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100) +#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040) +#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020) +#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) +#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) +#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) + +/*obsolete names for the PowerManagementCapabilities bits (above) */ +#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */ + + +/*defines for IO Unit Page 7 IOCTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) + +/*defines for IO Unit Page 7 IOCSpeed field */ +#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) +#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) +#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) +#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) + +/*defines for IO Unit Page 7 BoardTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) + +/* defines for IO Unit Page 7 Flags field */ +#define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) + +/*IO Unit Page 8 */ + +#define MPI2_IOUNIT8_NUM_THRESHOLDS (4) + +typedef struct _MPI2_IOUNIT8_SENSOR { + U16 Flags; /*0x00 */ + U16 Reserved1; /*0x02 */ + U16 + Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ +} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR, + Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t; + +/*defines for IO Unit Page 8 Sensor Flags field */ +#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 PollingInterval; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT8_SENSOR + Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_8, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, + Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t; + +#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) + + +/*IO Unit Page 9 */ + +typedef struct _MPI2_IOUNIT9_SENSOR { + U16 CurrentTemperature; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 Flags; /*0x04 */ + U8 Reserved2; /*0x05 */ + U16 Reserved3; /*0x06 */ + U32 Reserved4; /*0x08 */ + U32 Reserved5; /*0x0C */ +} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR, + Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t; + +/*defines for IO Unit Page 9 Sensor Flags field */ +#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 Reserved4; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT9_SENSOR + Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_9, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, + Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t; + +#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) + + +/*IO Unit Page 10 */ + +typedef struct _MPI2_IOUNIT10_FUNCTION { + U8 CreditPercent; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_IOUNIT10_FUNCTION, + *PTR_MPI2_IOUNIT10_FUNCTION, + Mpi2IOUnit10Function_t, + *pMpi2IOUnit10Function_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumFunctions at runtime. + */ +#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES +#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumFunctions; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_IOUNIT10_FUNCTION + Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_10, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, + Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t; + +#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) + + +/* IO Unit Page 11 (for MPI v2.6 and later) */ + +typedef struct _MPI26_IOUNIT11_SPINUP_GROUP { + U8 MaxTargetSpinup; /* 0x00 */ + U8 SpinupDelay; /* 0x01 */ + U8 SpinupFlags; /* 0x02 */ + U8 Reserved1; /* 0x03 */ +} MPI26_IOUNIT11_SPINUP_GROUP, + *PTR_MPI26_IOUNIT11_SPINUP_GROUP, + Mpi26IOUnit11SpinupGroup_t, + *pMpi26IOUnit11SpinupGroup_t; + +/* defines for IO Unit Page 11 SpinupFlags */ +#define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01) + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * four and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_IOUNITPAGE11_PHY_MAX +#define MPI26_IOUNITPAGE11_PHY_MAX (4) +#endif + +typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /*0x08 */ + U32 Reserved2; /*0x18 */ + U32 Reserved3; /*0x1C */ + U32 Reserved4; /*0x20 */ + U8 BootDeviceWaitTime; /*0x24 */ + U8 Reserved5; /*0x25 */ + U16 Reserved6; /*0x26 */ + U8 NumPhys; /*0x28 */ + U8 PEInitialSpinupDelay; /*0x29 */ + U8 PEReplyDelay; /*0x2A */ + U8 Flags; /*0x2B */ + U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/*0x2C */ +} MPI26_CONFIG_PAGE_IO_UNIT_11, + *PTR_MPI26_CONFIG_PAGE_IO_UNIT_11, + Mpi26IOUnitPage11_t, + *pMpi26IOUnitPage11_t; + +#define MPI26_IOUNITPAGE11_PAGEVERSION (0x00) + +/* defines for Flags field */ +#define MPI26_IOUNITPAGE11_FLAGS_AUTO_PORTENABLE (0x01) + +/* defines for PHY field */ +#define MPI26_IOUNITPAGE11_PHY_SPINUP_GROUP_MASK (0x03) + + + + + + +/**************************************************************************** +* IOC Config Pages +****************************************************************************/ + +/*IOC Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U16 VendorID; /*0x0C */ + U16 DeviceID; /*0x0E */ + U8 RevisionID; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + U32 ClassCode; /*0x14 */ + U16 SubsystemVendorID; /*0x18 */ + U16 SubsystemID; /*0x1A */ +} MPI2_CONFIG_PAGE_IOC_0, + *PTR_MPI2_CONFIG_PAGE_IOC_0, + Mpi2IOCPage0_t, *pMpi2IOCPage0_t; + +#define MPI2_IOCPAGE0_PAGEVERSION (0x02) + + +/*IOC Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ + U32 CoalescingTimeout; /*0x08 */ + U8 CoalescingDepth; /*0x0C */ + U8 PCISlotNum; /*0x0D */ + U8 PCIBusNum; /*0x0E */ + U8 PCIDomainSegment; /*0x0F */ + U32 Reserved1; /*0x10 */ + U32 ProductSpecific; /* 0x14 */ +} MPI2_CONFIG_PAGE_IOC_1, + *PTR_MPI2_CONFIG_PAGE_IOC_1, + Mpi2IOCPage1_t, *pMpi2IOCPage1_t; + +#define MPI2_IOCPAGE1_PAGEVERSION (0x05) + +/*defines for IOC Page 1 Flags field */ +#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001) + +#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF) + +/*IOC Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 + CapabilitiesFlags; /*0x04 */ + U8 MaxDrivesRAID0; /*0x08 */ + U8 MaxDrivesRAID1; /*0x09 */ + U8 + MaxDrivesRAID1E; /*0x0A */ + U8 + MaxDrivesRAID10; /*0x0B */ + U8 MinDrivesRAID0; /*0x0C */ + U8 MinDrivesRAID1; /*0x0D */ + U8 + MinDrivesRAID1E; /*0x0E */ + U8 + MinDrivesRAID10; /*0x0F */ + U32 Reserved1; /*0x10 */ + U8 + MaxGlobalHotSpares; /*0x14 */ + U8 MaxPhysDisks; /*0x15 */ + U8 MaxVolumes; /*0x16 */ + U8 MaxConfigs; /*0x17 */ + U8 MaxOCEDisks; /*0x18 */ + U8 Reserved2; /*0x19 */ + U16 Reserved3; /*0x1A */ + U32 + SupportedStripeSizeMapRAID0; /*0x1C */ + U32 + SupportedStripeSizeMapRAID1E; /*0x20 */ + U32 + SupportedStripeSizeMapRAID10; /*0x24 */ + U32 Reserved4; /*0x28 */ + U32 Reserved5; /*0x2C */ + U16 + DefaultMetadataSize; /*0x30 */ + U16 Reserved6; /*0x32 */ + U16 + MaxBadBlockTableEntries; /*0x34 */ + U16 Reserved7; /*0x36 */ + U32 + IRNvsramVersion; /*0x38 */ +} MPI2_CONFIG_PAGE_IOC_6, + *PTR_MPI2_CONFIG_PAGE_IOC_6, + Mpi2IOCPage6_t, *pMpi2IOCPage6_t; + +#define MPI2_IOCPAGE6_PAGEVERSION (0x05) + +/*defines for IOC Page 6 CapabilitiesFlags */ +#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002) +#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) + + +/*IOC Page 7 */ + +#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4) + +typedef struct _MPI2_CONFIG_PAGE_IOC_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 + EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */ + U16 SASBroadcastPrimitiveMasks; /*0x18 */ + U16 SASNotifyPrimitiveMasks; /*0x1A */ + U32 Reserved3; /*0x1C */ +} MPI2_CONFIG_PAGE_IOC_7, + *PTR_MPI2_CONFIG_PAGE_IOC_7, + Mpi2IOCPage7_t, *pMpi2IOCPage7_t; + +#define MPI2_IOCPAGE7_PAGEVERSION (0x02) + + +/*IOC Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumDevsPerEnclosure; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 MaxPersistentEntries; /*0x08 */ + U16 MaxNumPhysicalMappedIDs; /*0x0A */ + U16 Flags; /*0x0C */ + U16 Reserved3; /*0x0E */ + U16 IRVolumeMappingFlags; /*0x10 */ + U16 Reserved4; /*0x12 */ + U32 Reserved5; /*0x14 */ +} MPI2_CONFIG_PAGE_IOC_8, + *PTR_MPI2_CONFIG_PAGE_IOC_8, + Mpi2IOCPage8_t, *pMpi2IOCPage8_t; + +#define MPI2_IOCPAGE8_PAGEVERSION (0x00) + +/*defines for IOC Page 8 Flags field */ +#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020) +#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010) + +#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E) +#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002) + +#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001) +#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000) + +/*defines for IOC Page 8 IRVolumeMappingFlags */ +#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001) + + +/**************************************************************************** +* BIOS Config Pages +****************************************************************************/ + +/*BIOS Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 BiosOptions; /*0x04 */ + U32 IOCSettings; /*0x08 */ + U8 SSUTimeout; /*0x0C */ + U8 MaxEnclosureLevel; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 DeviceSettings; /*0x10 */ + U16 NumberOfDevices; /*0x14 */ + U16 UEFIVersion; /*0x16 */ + U16 IOTimeoutBlockDevicesNonRM; /*0x18 */ + U16 IOTimeoutSequential; /*0x1A */ + U16 IOTimeoutOther; /*0x1C */ + U16 IOTimeoutBlockDevicesRM; /*0x1E */ +} MPI2_CONFIG_PAGE_BIOS_1, + *PTR_MPI2_CONFIG_PAGE_BIOS_1, + Mpi2BiosPage1_t, *pMpi2BiosPage1_t; + +#define MPI2_BIOSPAGE1_PAGEVERSION (0x07) + +/*values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000) +#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000) + +#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) + +#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) +#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) +#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) +#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) +#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) + +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) + +/*values for BIOS Page 1 IOCSettings field */ +#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) +#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0) +#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040) +#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030) +#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010) +#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020) +#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030) + +#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008) + +/*values for BIOS Page 1 DeviceSettings field */ +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) + +/*defines for BIOS Page 1 UEFIVersion field */ +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00) +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0) + + + +/*BIOS Page 2 */ + +typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER { + U32 Reserved1; /*0x00 */ + U32 Reserved2; /*0x04 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_BOOT_DEVICE_ADAPTER_ORDER, + *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, + Mpi2BootDeviceAdapterOrder_t, + *pMpi2BootDeviceAdapterOrder_t; + +typedef struct _MPI2_BOOT_DEVICE_SAS_WWID { + U64 SASAddress; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_SAS_WWID, + *PTR_MPI2_BOOT_DEVICE_SAS_WWID, + Mpi2BootDeviceSasWwid_t, + *pMpi2BootDeviceSasWwid_t; + +typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT { + U64 EnclosureLogicalID; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 SlotNumber; /*0x10 */ + U16 Reserved3; /*0x12 */ + U32 Reserved4; /*0x14 */ +} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + Mpi2BootDeviceEnclosureSlot_t, + *pMpi2BootDeviceEnclosureSlot_t; + +typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME { + U64 DeviceName; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_DEVICE_NAME, + *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, + Mpi2BootDeviceDeviceName_t, + *pMpi2BootDeviceDeviceName_t; + +typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE { + MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + MPI2_BOOT_DEVICE_SAS_WWID SasWwid; + MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; +} MPI2_BIOSPAGE2_BOOT_DEVICE, + *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, + Mpi2BiosPage2BootDevice_t, + *pMpi2BiosPage2BootDevice_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ + U32 Reserved5; /*0x14 */ + U32 Reserved6; /*0x18 */ + U8 ReqBootDeviceForm; /*0x1C */ + U8 Reserved7; /*0x1D */ + U16 Reserved8; /*0x1E */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */ + U8 ReqAltBootDeviceForm; /*0x38 */ + U8 Reserved9; /*0x39 */ + U16 Reserved10; /*0x3A */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */ + U8 CurrentBootDeviceForm; /*0x58 */ + U8 Reserved11; /*0x59 */ + U16 Reserved12; /*0x5A */ + MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */ +} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2, + Mpi2BiosPage2_t, *pMpi2BiosPage2_t; + +#define MPI2_BIOSPAGE2_PAGEVERSION (0x04) + +/*values for BIOS Page 2 BootDeviceForm fields */ +#define MPI2_BIOSPAGE2_FORM_MASK (0x0F) +#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + + +/*BIOS Page 3 */ + +#define MPI2_BIOSPAGE3_NUM_ADAPTER (4) + +typedef struct _MPI2_ADAPTER_INFO { + U8 PciBusNumber; /*0x00 */ + U8 PciDeviceAndFunctionNumber; /*0x01 */ + U16 AdapterFlags; /*0x02 */ +} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO, + Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t; + +#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) +#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) + +typedef struct _MPI2_ADAPTER_ORDER_AUX { + U64 WWID; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_ADAPTER_ORDER_AUX, *PTR_MPI2_ADAPTER_ORDER_AUX, + Mpi2AdapterOrderAux_t, *pMpi2AdapterOrderAux_t; + + +typedef struct _MPI2_CONFIG_PAGE_BIOS_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 GlobalFlags; /*0x04 */ + U32 BiosVersion; /*0x08 */ + MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER]; + U32 Reserved1; /*0x1C */ + MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER]; +} MPI2_CONFIG_PAGE_BIOS_3, + *PTR_MPI2_CONFIG_PAGE_BIOS_3, + Mpi2BiosPage3_t, *pMpi2BiosPage3_t; + +#define MPI2_BIOSPAGE3_PAGEVERSION (0x01) + +/*values for BIOS Page 3 GlobalFlags */ +#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) +#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004) +#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010) + +#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0) +#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040) + + +/*BIOS Page 4 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES +#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_BIOS4_ENTRY { + U64 ReassignmentWWID; /*0x00 */ + U64 ReassignmentDeviceName; /*0x08 */ +} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY, + Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + MPI2_BIOS4_ENTRY + Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */ +} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4, + Mpi2BiosPage4_t, *pMpi2BiosPage4_t; + +#define MPI2_BIOSPAGE4_PAGEVERSION (0x01) + + +/**************************************************************************** +* RAID Volume Config Pages +****************************************************************************/ + +/*RAID Volume Page 0 */ + +typedef struct _MPI2_RAIDVOL0_PHYS_DISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U8 PhysDiskNum; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK, + Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAIDVOL0_SETTINGS { + U16 Settings; /*0x00 */ + U8 HotSparePool; /*0x01 */ + U8 Reserved; /*0x02 */ +} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS, + Mpi2RaidVol0Settings_t, + *pMpi2RaidVol0Settings_t; + +/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01) +#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02) +#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04) +#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08) +#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10) +#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20) +#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40) +#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80) + +/*RAID Volume Page 0 VolumeSettings defines */ +#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008) +#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004) + +#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003) +#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000) +#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001) +#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDisks at runtime. + */ +#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX +#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 VolumeState; /*0x06 */ + U8 VolumeType; /*0x07 */ + U32 VolumeStatusFlags; /*0x08 */ + MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */ + U64 MaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U16 BlockSize; /*0x1C */ + U16 Reserved1; /*0x1E */ + U8 SupportedPhysDisks;/*0x20 */ + U8 ResyncRate; /*0x21 */ + U16 DataScrubDuration; /*0x22 */ + U8 NumPhysDisks; /*0x24 */ + U8 Reserved2; /*0x25 */ + U8 Reserved3; /*0x26 */ + U8 InactiveStatus; /*0x27 */ + MPI2_RAIDVOL0_PHYS_DISK + PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */ +} MPI2_CONFIG_PAGE_RAID_VOL_0, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, + Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t; + +#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A) + +/*values for RAID VolumeState */ +#define MPI2_RAID_VOL_STATE_MISSING (0x00) +#define MPI2_RAID_VOL_STATE_FAILED (0x01) +#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02) +#define MPI2_RAID_VOL_STATE_ONLINE (0x03) +#define MPI2_RAID_VOL_STATE_DEGRADED (0x04) +#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05) + +/*values for RAID VolumeType */ +#define MPI2_RAID_VOL_TYPE_RAID0 (0x00) +#define MPI2_RAID_VOL_TYPE_RAID1E (0x01) +#define MPI2_RAID_VOL_TYPE_RAID1 (0x02) +#define MPI2_RAID_VOL_TYPE_RAID10 (0x05) +#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF) + +/*values for RAID Volume Page 0 VolumeStatusFlags field */ +#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000) +#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000) +#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) +#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080) +#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) +#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010) +#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004) +#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001) + +/*values for RAID Volume Page 0 SupportedPhysDisks field */ +#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08) +#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04) +#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02) +#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01) + +/*values for RAID Volume Page 0 InactiveStatus field */ +#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) +#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01) +#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03) +#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05) +#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06) + + +/*RAID Volume Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U16 Reserved0; /*0x06 */ + U8 GUID[24]; /*0x08 */ + U8 Name[16]; /*0x20 */ + U64 WWID; /*0x30 */ + U32 Reserved1; /*0x38 */ + U32 Reserved2; /*0x3C */ +} MPI2_CONFIG_PAGE_RAID_VOL_1, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, + Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t; + +#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03) + + +/**************************************************************************** +* RAID Physical Disk Config Pages +****************************************************************************/ + +/*RAID Physical Disk Page 0 */ + +typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS { + U16 Reserved1; /*0x00 */ + U8 HotSparePool; /*0x02 */ + U8 Reserved2; /*0x03 */ +} MPI2_RAIDPHYSDISK0_SETTINGS, + *PTR_MPI2_RAIDPHYSDISK0_SETTINGS, + Mpi2RaidPhysDisk0Settings_t, + *pMpi2RaidPhysDisk0Settings_t; + +/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ + +typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA { + U8 VendorID[8]; /*0x00 */ + U8 ProductID[16]; /*0x08 */ + U8 ProductRevLevel[4]; /*0x18 */ + U8 SerialNum[32]; /*0x1C */ +} MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + Mpi2RaidPhysDisk0InquiryData_t, + *pMpi2RaidPhysDisk0InquiryData_t; + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 PhysDiskNum; /*0x07 */ + MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */ + U32 Reserved2; /*0x0C */ + MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */ + U32 Reserved3; /*0x4C */ + U8 PhysDiskState; /*0x50 */ + U8 OfflineReason; /*0x51 */ + U8 IncompatibleReason; /*0x52 */ + U8 PhysDiskAttributes; /*0x53 */ + U32 PhysDiskStatusFlags;/*0x54 */ + U64 DeviceMaxLBA; /*0x58 */ + U64 HostMaxLBA; /*0x60 */ + U64 CoercedMaxLBA; /*0x68 */ + U16 BlockSize; /*0x70 */ + U16 Reserved5; /*0x72 */ + U32 Reserved6; /*0x74 */ +} MPI2_CONFIG_PAGE_RD_PDISK_0, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, + Mpi2RaidPhysDiskPage0_t, + *pMpi2RaidPhysDiskPage0_t; + +#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05) + +/*PhysDiskState defines */ +#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define MPI2_RAID_PD_STATE_OFFLINE (0x02) +#define MPI2_RAID_PD_STATE_ONLINE (0x03) +#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04) +#define MPI2_RAID_PD_STATE_DEGRADED (0x05) +#define MPI2_RAID_PD_STATE_REBUILDING (0x06) +#define MPI2_RAID_PD_STATE_OPTIMAL (0x07) + +/*OfflineReason defines */ +#define MPI2_PHYSDISK0_ONLINE (0x00) +#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01) +#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03) +#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04) +#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05) +#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06) +#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF) + +/*IncompatibleReason defines */ +#define MPI2_PHYSDISK0_COMPATIBLE (0x00) +#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01) +#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) +#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) +#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) +#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) + +/*PhysDiskAttributes defines */ +#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) +#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) +#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) + +#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03) +#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) +#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) + +/*PhysDiskStatusFlags defines */ +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040) +#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020) +#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010) +#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000) +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008) +#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004) +#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001) + + +/*RAID Physical Disk Page 1 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDiskPaths at runtime. + */ +#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX +#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) +#endif + +typedef struct _MPI2_RAIDPHYSDISK1_PATH { + U16 DevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U64 WWID; /*0x04 */ + U64 OwnerWWID; /*0x0C */ + U8 OwnerIdentifier; /*0x14 */ + U8 Reserved2; /*0x15 */ + U16 Flags; /*0x16 */ +} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH, + Mpi2RaidPhysDisk1Path_t, + *pMpi2RaidPhysDisk1Path_t; + +/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ +#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004) +#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) +#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001) + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhysDiskPaths; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 Reserved1; /*0x06 */ + U32 Reserved2; /*0x08 */ + MPI2_RAIDPHYSDISK1_PATH + PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */ +} MPI2_CONFIG_PAGE_RD_PDISK_1, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, + Mpi2RaidPhysDiskPage1_t, + *pMpi2RaidPhysDiskPage1_t; + +#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02) + + +/**************************************************************************** +* values for fields used by several types of SAS Config Pages +****************************************************************************/ + +/*values for NegotiatedLinkRates fields */ +#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0) +#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4) +#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/*link rates used for Negotiated Physical and Logical Link Rate */ +#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) +#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) +#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) +#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B) +#define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C) + + +/*values for AttachedPhyInfo fields */ +#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) +#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) +#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) + +#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F) +#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001) +#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002) +#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003) +#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004) +#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005) +#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006) +#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007) +#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) + + +/*values for PhyInfo fields */ +#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) + +#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) +#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27) +#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) + +#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) +#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) +#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000) +#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000) + +#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000) +#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000) +#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000) +#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000) +#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000) +#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000) +#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000) +#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000) +#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000) + +#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000) +#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000) +#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000) +#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00) +#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8) + +#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0) +#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000) +#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010) +#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020) + + +/*values for SAS ProgrammedLinkRate fields */ +#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) +#define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0) +#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B) +#define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C) + + +/*values for SAS HwLinkRate fields */ +#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) +#define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0) +#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B) +#define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C) + + + +/**************************************************************************** +* SAS IO Unit Config Pages +****************************************************************************/ + +/*SAS IO Unit Page 0 */ + +typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 NegotiatedLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo;/*0x04 */ + U16 AttachedDevHandle; /*0x08 */ + U16 ControllerDevHandle; /*0x0A */ + U32 DiscoveryStatus; /*0x0C */ + U32 Reserved; /*0x10 */ +} MPI2_SAS_IO_UNIT0_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, + Mpi2SasIOUnit0PhyData_t, + *pMpi2SasIOUnit0PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT0_PHY_MAX +#define MPI2_SAS_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1;/*0x08 */ + U8 NumPhys; /*0x0C */ + U8 Reserved2;/*0x0D */ + U16 Reserved3;/*0x0E */ + MPI2_SAS_IO_UNIT0_PHY_DATA + PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_0, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, + Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t; + +#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05) + +/*values for SAS IO Unit Page 0 PortFlags */ +#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 0 PhyFlags */ +#define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ + +/*values for SAS IO Unit Page 0 DiscoveryStatus */ +#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400) +#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001) + + +/*SAS IO Unit Page 1 */ + +typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 MaxMinLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo; /*0x04 */ + U16 MaxTargetPortConnectTime; /*0x08 */ + U16 Reserved1; /*0x0A */ +} MPI2_SAS_IO_UNIT1_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, + Mpi2SasIOUnit1PhyData_t, + *pMpi2SasIOUnit1PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT1_PHY_MAX +#define MPI2_SAS_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 + ControlFlags; /*0x08 */ + U16 + SASNarrowMaxQueueDepth; /*0x0A */ + U16 + AdditionalControlFlags; /*0x0C */ + U16 + SASWideMaxQueueDepth; /*0x0E */ + U8 + NumPhys; /*0x10 */ + U8 + SATAMaxQDepth; /*0x11 */ + U8 + ReportDeviceMissingDelay; /*0x12 */ + U8 + IODeviceMissingDelay; /*0x13 */ + MPI2_SAS_IO_UNIT1_PHY_DATA + PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_1, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, + Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t; + +#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09) + +/*values for SAS IO Unit Page 1 ControlFlags */ +#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) + +#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600) +#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2) + +#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) +#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) +#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010) +#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) +#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) +#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) +#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) + +/*values for SAS IO Unit Page 1 AdditionalControlFlags */ +#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) +#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) +#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) +#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) +#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010) +#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008) +#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004) +#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) +#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) + +/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ +#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) + +/*values for SAS IO Unit Page 1 PortFlags */ +#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 1 PhyFlags */ +#define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +/*values for SAS IO Unit Page 1 MaxMinLinkRate */ +#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) +#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) +#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0) +#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0) +#define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0) +#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F) +#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08) +#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09) +#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A) +#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) +#define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C) + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ + + +/*SAS IO Unit Page 4 (for MPI v2.5 and earlier) */ + +typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP { + U8 MaxTargetSpinup; /*0x00 */ + U8 SpinupDelay; /*0x01 */ + U8 SpinupFlags; /*0x02 */ + U8 Reserved1; /*0x03 */ +} MPI2_SAS_IOUNIT4_SPINUP_GROUP, + *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, + Mpi2SasIOUnit4SpinupGroup_t, + *pMpi2SasIOUnit4SpinupGroup_t; +/*defines for SAS IO Unit Page 4 SpinupFlags */ +#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT4_PHY_MAX +#define MPI2_SAS_IOUNIT4_PHY_MAX (4) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */ + MPI2_SAS_IOUNIT4_SPINUP_GROUP + SpinupGroupParameters[4]; /*0x08 */ + U32 + Reserved1; /*0x18 */ + U32 + Reserved2; /*0x1C */ + U32 + Reserved3; /*0x20 */ + U8 + BootDeviceWaitTime; /*0x24 */ + U8 + SATADeviceWaitTime; /*0x25 */ + U16 + Reserved5; /*0x26 */ + U8 + NumPhys; /*0x28 */ + U8 + PEInitialSpinupDelay; /*0x29 */ + U8 + PEReplyDelay; /*0x2A */ + U8 + Flags; /*0x2B */ + U8 + PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */ +} MPI2_CONFIG_PAGE_SASIOUNIT_4, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, + Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t; + +#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02) + +/*defines for Flags field */ +#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01) + +/*defines for PHY field */ +#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) + + +/*SAS IO Unit Page 5 */ + +typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { + U8 ControlFlags; /*0x00 */ + U8 PortWidthModGroup; /*0x01 */ + U16 InactivityTimerExponent; /*0x02 */ + U8 SATAPartialTimeout; /*0x04 */ + U8 Reserved2; /*0x05 */ + U8 SATASlumberTimeout; /*0x06 */ + U8 Reserved3; /*0x07 */ + U8 SASPartialTimeout; /*0x08 */ + U8 Reserved4; /*0x09 */ + U8 SASSlumberTimeout; /*0x0A */ + U8 Reserved5; /*0x0B */ +} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + Mpi2SasIOUnit5PhyPmSettings_t, + *pMpi2SasIOUnit5PhyPmSettings_t; + +/*defines for ControlFlags field */ +#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) +#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) +#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) +#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) + +/*defines for PortWidthModeGroup field */ +#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF) + +/*defines for InactivityTimerExponent field */ +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) + +#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) +#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) +#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) +#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) +#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) +#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT5_PHY_MAX +#define MPI2_SAS_IOUNIT5_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x08 */ + U8 Reserved1;/*0x09 */ + U16 Reserved2;/*0x0A */ + U32 Reserved3;/*0x0C */ + MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS + SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_5, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, + Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t; + +#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01) + + +/*SAS IO Unit Page 6 */ + +typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS { + U8 CurrentStatus; /*0x00 */ + U8 CurrentModulation; /*0x01 */ + U8 CurrentUtilization; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 Reserved2; /*0x04 */ +} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + Mpi2SasIOUnit6PortWidthModGroupStatus_t, + *pMpi2SasIOUnit6PortWidthModGroupStatus_t; + +/*defines for CurrentStatus field */ +#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00) +#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01) +#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02) +#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03) +#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04) +#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07) + +/*defines for CurrentModulation field */ +#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00) +#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01) +#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02) +#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX +#define MPI2_SAS_IOUNIT6_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U8 NumGroups; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS + PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_6, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, + Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t; + +#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 7 */ + +typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS { + U8 Flags; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 Threshold75Pct; /*0x04 */ + U8 Threshold50Pct; /*0x05 */ + U8 Threshold25Pct; /*0x06 */ + U8 Reserved3; /*0x07 */ +} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + Mpi2SasIOUnit7PortWidthModGroupSettings_t, + *pMpi2SasIOUnit7PortWidthModGroupSettings_t; + +/*defines for Flags field */ +#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX +#define MPI2_SAS_IOUNIT7_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 SamplingInterval; /*0x08 */ + U8 WindowLength; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U8 NumGroups; /*0x14 */ + U8 Reserved4; /*0x15 */ + U16 Reserved5; /*0x16 */ + MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS + PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_7, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, + Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t; + +#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U8 + TxRxSleepStatus; /*0x10 */ + U8 + Reserved2; /*0x11 */ + U16 + Reserved3; /*0x12 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_8, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, + Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t; + +#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) + +/*defines for PowerManagementCapabilities field */ +#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100) +#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) + +/*defines for TxRxSleepStatus field */ +#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00) +#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01) +#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02) +#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03) + + + +/*SAS IO Unit Page 16 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U64 + TimeStamp; /*0x08 */ + U32 + Reserved1; /*0x10 */ + U32 + Reserved2; /*0x14 */ + U32 + FastPathPendedRequests; /*0x18 */ + U32 + FastPathUnPendedRequests; /*0x1C */ + U32 + FastPathHostRequestStarts; /*0x20 */ + U32 + FastPathFirmwareRequestStarts; /*0x24 */ + U32 + FastPathHostCompletions; /*0x28 */ + U32 + FastPathFirmwareCompletions; /*0x2C */ + U32 + NonFastPathRequestStarts; /*0x30 */ + U32 + NonFastPathHostCompletions; /*0x30 */ +} MPI2_CONFIG_PAGE_SASIOUNIT16, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, + Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t; + +#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00) + + +/**************************************************************************** +* SAS Expander Config Pages +****************************************************************************/ + +/*SAS Expander Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + ReportGenLength; /*0x09 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U32 + DiscoveryStatus; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + ParentDevHandle; /*0x1A */ + U16 + ExpanderChangeCount; /*0x1C */ + U16 + ExpanderRouteIndexes; /*0x1E */ + U8 + NumPhys; /*0x20 */ + U8 + SASLevel; /*0x21 */ + U16 + Flags; /*0x22 */ + U16 + STPBusInactivityTimeLimit; /*0x24 */ + U16 + STPMaxConnectTimeLimit; /*0x26 */ + U16 + STP_SMP_NexusLossTime; /*0x28 */ + U16 + MaxNumRoutedSasAddresses; /*0x2A */ + U64 + ActiveZoneManagerSASAddress;/*0x2C */ + U16 + ZoneLockInactivityLimit; /*0x34 */ + U16 + Reserved1; /*0x36 */ + U8 + TimeToReducedFunc; /*0x38 */ + U8 + InitialTimeToReducedFunc; /*0x39 */ + U8 + MaxReducedFuncTime; /*0x3A */ + U8 + Reserved2; /*0x3B */ +} MPI2_CONFIG_PAGE_EXPANDER_0, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_0, + Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t; + +#define MPI2_SASEXPANDER0_PAGEVERSION (0x06) + +/*values for SAS Expander Page 0 DiscoveryStatus field */ +#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400) +#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) + +/*values for SAS Expander Page 0 Flags field */ +#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) +#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) +#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200) +#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100) +#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080) +#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010) +#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004) +#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002) +#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) + + +/*SAS Expander Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + Reserved1; /*0x09 */ + U16 + Reserved2; /*0x0A */ + U8 + NumPhys; /*0x0C */ + U8 + Phy; /*0x0D */ + U16 + NumTableEntriesProgrammed; /*0x0E */ + U8 + ProgrammedLinkRate; /*0x10 */ + U8 + HwLinkRate; /*0x11 */ + U16 + AttachedDevHandle; /*0x12 */ + U32 + PhyInfo; /*0x14 */ + U32 + AttachedDeviceInfo; /*0x18 */ + U16 + ExpanderDevHandle; /*0x1C */ + U8 + ChangeCount; /*0x1E */ + U8 + NegotiatedLinkRate; /*0x1F */ + U8 + PhyIdentifier; /*0x20 */ + U8 + AttachedPhyIdentifier; /*0x21 */ + U8 + Reserved3; /*0x22 */ + U8 + DiscoveryInfo; /*0x23 */ + U32 + AttachedPhyInfo; /*0x24 */ + U8 + ZoneGroup; /*0x28 */ + U8 + SelfConfigStatus; /*0x29 */ + U16 + Reserved4; /*0x2A */ +} MPI2_CONFIG_PAGE_EXPANDER_1, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_1, + Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t; + +#define MPI2_SASEXPANDER1_PAGEVERSION (0x02) + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines + *used for the AttachedDeviceInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*values for SAS Expander Page 1 DiscoveryInfo field */ +#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) +#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) +#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + + +/**************************************************************************** +* SAS Device Config Pages +****************************************************************************/ + +/*SAS Device Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Slot; /*0x08 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U16 + ParentDevHandle; /*0x14 */ + U8 + PhyNum; /*0x16 */ + U8 + AccessStatus; /*0x17 */ + U16 + DevHandle; /*0x18 */ + U8 + AttachedPhyIdentifier; /*0x1A */ + U8 + ZoneGroup; /*0x1B */ + U32 + DeviceInfo; /*0x1C */ + U16 + Flags; /*0x20 */ + U8 + PhysicalPort; /*0x22 */ + U8 + MaxPortConnections; /*0x23 */ + U64 + DeviceName; /*0x24 */ + U8 + PortGroups; /*0x2C */ + U8 + DmaGroup; /*0x2D */ + U8 + ControlGroup; /*0x2E */ + U8 + EnclosureLevel; /*0x2F */ + U32 + ConnectorName[4]; /*0x30 */ + U32 + Reserved3; /*0x34 */ +} MPI2_CONFIG_PAGE_SAS_DEV_0, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, + Mpi2SasDevicePage0_t, + *pMpi2SasDevicePage0_t; + +#define MPI2_SASDEVICE0_PAGEVERSION (0x09) + +/*values for SAS Device Page 0 AccessStatus field */ +#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +/*specific values for SATA Init failures */ +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) + +/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ + +/*values for SAS Device Page 0 Flags field */ +#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000) +#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) +#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) +#define MPI2_SAS_DEVICE0_FLAGS_PERSIST_CAPABLE (0x0004) +#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) +#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + + +/*SAS Device Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U64 + SASAddress; /*0x0C */ + U32 + Reserved2; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + Reserved3; /*0x1A */ + U8 + InitialRegDeviceFIS[20];/*0x1C */ +} MPI2_CONFIG_PAGE_SAS_DEV_1, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, + Mpi2SasDevicePage1_t, + *pMpi2SasDevicePage1_t; + +#define MPI2_SASDEVICE1_PAGEVERSION (0x01) + + +/**************************************************************************** +* SAS PHY Config Pages +****************************************************************************/ + +/*SAS PHY Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + OwnerDevHandle; /*0x08 */ + U16 + Reserved1; /*0x0A */ + U16 + AttachedDevHandle; /*0x0C */ + U8 + AttachedPhyIdentifier; /*0x0E */ + U8 + Reserved2; /*0x0F */ + U32 + AttachedPhyInfo; /*0x10 */ + U8 + ProgrammedLinkRate; /*0x14 */ + U8 + HwLinkRate; /*0x15 */ + U8 + ChangeCount; /*0x16 */ + U8 + Flags; /*0x17 */ + U32 + PhyInfo; /*0x18 */ + U8 + NegotiatedLinkRate; /*0x1C */ + U8 + Reserved3; /*0x1D */ + U16 + Reserved4; /*0x1E */ +} MPI2_CONFIG_PAGE_SAS_PHY_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, + Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t; + +#define MPI2_SASPHY0_PAGEVERSION (0x03) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*values for SAS PHY Page 0 Flags field */ +#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/*SAS PHY Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + InvalidDwordCount; /*0x0C */ + U32 + RunningDisparityErrorCount; /*0x10 */ + U32 + LossDwordSynchCount; /*0x14 */ + U32 + PhyResetProblemCount; /*0x18 */ +} MPI2_CONFIG_PAGE_SAS_PHY_1, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, + Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t; + +#define MPI2_SASPHY1_PAGEVERSION (0x01) + + +/*SAS PHY Page 2 */ + +typedef struct _MPI2_SASPHY2_PHY_EVENT { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 PhyEventInfo; /*0x04 */ +} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT, + Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY2_PHY_EVENT_MAX +#define MPI2_SASPHY2_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY2_PHY_EVENT + PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_2, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, + Mpi2SasPhyPage2_t, + *pMpi2SasPhyPage2_t; + +#define MPI2_SASPHY2_PAGEVERSION (0x00) + + +/*SAS PHY Page 3 */ + +typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 CounterType; /*0x04 */ + U8 ThresholdWindow; /*0x05 */ + U8 TimeUnits; /*0x06 */ + U8 Reserved3; /*0x07 */ + U32 EventThreshold; /*0x08 */ + U16 ThresholdFlags; /*0x0C */ + U16 Reserved4; /*0x0E */ +} MPI2_SASPHY3_PHY_EVENT_CONFIG, + *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, + Mpi2SasPhy3PhyEventConfig_t, + *pMpi2SasPhy3PhyEventConfig_t; + +/*values for PhyEventCode field */ +#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00) +#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) +#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) +#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03) +#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04) +#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05) +#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06) +#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20) +#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21) +#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22) +#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23) +#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26) +#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27) +#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28) +#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29) +#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43) +#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44) +#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45) +#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50) +#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51) +#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63) +#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0) +#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) + +/*Following codes are product specific and in MPI v2.6 and later */ +#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3) +#define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4) +#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5) +#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6) +#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7) +#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9) +#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA) +#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB) +#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC) + + +/*values for the CounterType field */ +#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) +#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/*values for the TimeUnits field */ +#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) +#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) +#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) +#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) + +/*values for the ThresholdFlags field */ +#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002) +#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY3_PHY_EVENT_MAX +#define MPI2_SASPHY3_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY3_PHY_EVENT_CONFIG + PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_3, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, + Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t; + +#define MPI2_SASPHY3_PAGEVERSION (0x00) + + +/*SAS PHY Page 4 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Reserved1; /*0x08 */ + U8 + Reserved2; /*0x0A */ + U8 + Flags; /*0x0B */ + U8 + InitialFrame[28]; /*0x0C */ +} MPI2_CONFIG_PAGE_SAS_PHY_4, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, + Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t; + +#define MPI2_SASPHY4_PAGEVERSION (0x00) + +/*values for the Flags field */ +#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) +#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) + + + + +/**************************************************************************** +* SAS Port Config Pages +****************************************************************************/ + +/*SAS Port Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PortNumber; /*0x08 */ + U8 + PhysicalPort; /*0x09 */ + U8 + PortWidth; /*0x0A */ + U8 + PhysicalPortWidth; /*0x0B */ + U8 + ZoneGroup; /*0x0C */ + U8 + Reserved1; /*0x0D */ + U16 + Reserved2; /*0x0E */ + U64 + SASAddress; /*0x10 */ + U32 + DeviceInfo; /*0x18 */ + U32 + Reserved3; /*0x1C */ + U32 + Reserved4; /*0x20 */ +} MPI2_CONFIG_PAGE_SAS_PORT_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, + Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t; + +#define MPI2_SASPORT0_PAGEVERSION (0x00) + +/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ + + +/**************************************************************************** +* SAS Enclosure Config Pages +****************************************************************************/ + +/*SAS Enclosure Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U64 EnclosureLogicalID; /*0x0C */ + U16 Flags; /*0x14 */ + U16 EnclosureHandle; /*0x16 */ + U16 NumSlots; /*0x18 */ + U16 StartSlot; /*0x1A */ + U8 ChassisSlot; /*0x1C */ + U8 EnclosureLevel; /*0x1D */ + U16 SEPDevHandle; /*0x1E */ + U8 OEMRD; /*0x20 */ + U8 Reserved1a; /*0x21 */ + U16 Reserved2; /*0x22 */ + U32 Reserved3; /*0x24 */ +} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t, + MPI26_CONFIG_PAGE_ENCLOSURE_0, + *PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0, + Mpi26EnclosurePage0_t, *pMpi26EnclosurePage0_t; + +#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) + +/*values for SAS Enclosure Page 0 Flags field */ +#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID (0x0080) +#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) +#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) +#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) + +#define MPI26_ENCLOSURE0_PAGEVERSION (0x04) + +/*Values for Enclosure Page 0 Flags field */ +#define MPI26_ENCLS0_FLAGS_OEMRD_VALID (0x0080) +#define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) +#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) +#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) +#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) + +/**************************************************************************** +* Log Config Page +****************************************************************************/ + +/*Log Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLogEntries at runtime. + */ +#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES +#define MPI2_LOG_0_NUM_LOG_ENTRIES (1) +#endif + +#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_LOG_0_ENTRY { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 + LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */ +} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY, + Mpi2Log0Entry_t, *pMpi2Log0Entry_t; + +/*values for Log Page 0 LogEntry LogEntryQualifier field */ +#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000) +#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001) +#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002) +#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000) +#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF) + +typedef struct _MPI2_CONFIG_PAGE_LOG_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 NumLogEntries;/*0x10 */ + U16 Reserved3; /*0x12 */ + MPI2_LOG_0_ENTRY + LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */ +} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0, + Mpi2LogPage0_t, *pMpi2LogPage0_t; + +#define MPI2_LOG_0_PAGEVERSION (0x02) + + +/**************************************************************************** +* RAID Config Page +****************************************************************************/ + +/*RAID Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumElements at runtime. + */ +#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS +#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) +#endif + +typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 HotSparePool; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + Mpi2RaidConfig0ConfigElement_t, + *pMpi2RaidConfig0ConfigElement_t; + +/*values for the ElementFlags field */ +#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + + +typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumHotSpares; /*0x08 */ + U8 NumPhysDisks; /*0x09 */ + U8 NumVolumes; /*0x0A */ + U8 ConfigNum; /*0x0B */ + U32 Flags; /*0x0C */ + U8 ConfigGUID[24]; /*0x10 */ + U32 Reserved1; /*0x28 */ + U8 NumElements; /*0x2C */ + U8 Reserved2; /*0x2D */ + U16 Reserved3; /*0x2E */ + MPI2_RAIDCONFIG0_CONFIG_ELEMENT + ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */ +} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + Mpi2RaidConfigurationPage0_t, + *pMpi2RaidConfigurationPage0_t; + +#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00) + +/*values for RAID Configuration Page 0 Flags field */ +#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001) + + +/**************************************************************************** +* Driver Persistent Mapping Config Pages +****************************************************************************/ + +/*Driver Persistent Mapping Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY { + U64 PhysicalIdentifier; /*0x00 */ + U16 MappingInformation; /*0x08 */ + U16 DeviceIndex; /*0x0A */ + U32 PhysicalBitsMapping; /*0x0C */ + U32 Reserved1; /*0x10 */ +} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */ +} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t; + +#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00) + +/*values for Driver Persistent Mapping Page 0 MappingInformation field */ +#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0) +#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4) +#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) + + +/**************************************************************************** +* Ethernet Config Pages +****************************************************************************/ + +/*Ethernet Page 0 */ + +/*IP address (union of IPv4 and IPv6) */ +typedef union _MPI2_ETHERNET_IP_ADDR { + U32 IPv4Addr; + U32 IPv6Addr[4]; +} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR, + Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t; + +#define MPI2_ETHERNET_HOST_NAME_LENGTH (32) + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumInterfaces; /*0x08 */ + U8 Reserved0; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Status; /*0x0C */ + U8 MediaState; /*0x10 */ + U8 Reserved2; /*0x11 */ + U16 Reserved3; /*0x12 */ + U8 MacAddress[6]; /*0x14 */ + U8 Reserved4; /*0x1A */ + U8 Reserved5; /*0x1B */ + MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */ + MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */ + MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_0, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_0, + Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t; + +#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) + +/*values for Ethernet Page 0 Status field */ +#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) +#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) +#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) +#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) +#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) +#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) +#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) +#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) +#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) +#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) + +/*values for Ethernet Page 0 MediaState field */ +#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) +#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) +#define MPI2_ETHPG0_MS_10MBIT (0x01) +#define MPI2_ETHPG0_MS_100MBIT (0x02) +#define MPI2_ETHPG0_MS_1GBIT (0x03) + + +/*Ethernet Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved0; /*0x08 */ + U32 + Flags; /*0x0C */ + U8 + MediaState; /*0x10 */ + U8 + Reserved1; /*0x11 */ + U16 + Reserved2; /*0x12 */ + U8 + MacAddress[6]; /*0x14 */ + U8 + Reserved3; /*0x1A */ + U8 + Reserved4; /*0x1B */ + MPI2_ETHERNET_IP_ADDR + StaticIpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR + StaticSubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR + StaticGatewayIpAddress; /*0x3C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS2IpAddress; /*0x5C */ + U32 + Reserved5; /*0x6C */ + U32 + Reserved6; /*0x70 */ + U32 + Reserved7; /*0x74 */ + U32 + Reserved8; /*0x78 */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_1, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_1, + Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t; + +#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) + +/*values for Ethernet Page 1 Flags field */ +#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) +#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) +#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) +#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) +#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) + +/*values for Ethernet Page 1 MediaState field */ +#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) +#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) +#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) +#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) +#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) + + +/**************************************************************************** +* Extended Manufacturing Config Pages +****************************************************************************/ + +/* + *Generic structure to use for product-specific extended manufacturing pages + *(currently Extended Manufacturing Page 40 through Extended Manufacturing + *Page 60). + */ + +typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + ProductSpecificInfo; /*0x08 */ +} MPI2_CONFIG_PAGE_EXT_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, + Mpi2ExtManufacturingPagePS_t, + *pMpi2ExtManufacturingPagePS_t; + +/*PageVersion should be provided by product-specific code */ + + + +/**************************************************************************** +* values for fields used by several types of PCIe Config Pages +****************************************************************************/ + +/*values for NegotiatedLinkRates fields */ +#define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/*link rates used for Negotiated Physical Link Rate */ +#define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00) +#define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02) +#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03) +#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04) +#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05) + + +/**************************************************************************** +* PCIe IO Unit Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe IO Unit Page 0 */ + +typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA { + U8 Link; /*0x00 */ + U8 LinkFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 NegotiatedLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo;/*0x04 */ + U16 AttachedDevHandle; /*0x08 */ + U16 ControllerDevHandle; /*0x0A */ + U32 EnumerationStatus; /*0x0C */ + U32 Reserved1; /*0x10 */ +} MPI26_PCIE_IO_UNIT0_PHY_DATA, + *PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA, + Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX +#define MPI26_PCIE_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 NumPhys; /*0x0C */ + U8 InitStatus; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI26_PCIE_IO_UNIT0_PHY_DATA + PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */ +} MPI26_CONFIG_PAGE_PIOUNIT_0, + *PTR_MPI26_CONFIG_PAGE_PIOUNIT_0, + Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t; + +#define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00) + +/*values for PCIe IO Unit Page 0 LinkFlags */ +#define MPI26_PCIEIOUNIT0_LINKFLAGS_ENUMERATION_IN_PROGRESS (0x08) + +/*values for PCIe IO Unit Page 0 PhyFlags */ +#define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo + *values + */ + +/*values for PCIe IO Unit Page 0 EnumerationStatus */ +#define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000) +#define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000) + + +/*PCIe IO Unit Page 1 */ + +typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA { + U8 Link; /*0x00 */ + U8 LinkFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 MaxMinLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo; /*0x04 */ + U32 Reserved1; /*0x08 */ +} MPI26_PCIE_IO_UNIT1_PHY_DATA, + *PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA, + Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t; + +/*values for LinkFlags */ +#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK (0x00) +#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN (0x01) +#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX +#define MPI26_PCIE_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 ControlFlags; /*0x08 */ + U16 Reserved; /*0x0A */ + U16 AdditionalControlFlags; /*0x0C */ + U16 NVMeMaxQueueDepth; /*0x0E */ + U8 NumPhys; /*0x10 */ + U8 DMDReportPCIe; /*0x11 */ + U16 Reserved2; /*0x12 */ + MPI26_PCIE_IO_UNIT1_PHY_DATA + PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */ +} MPI26_CONFIG_PAGE_PIOUNIT_1, + *PTR_MPI26_CONFIG_PAGE_PIOUNIT_1, + Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t; + +#define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00) + +/*values for PCIe IO Unit Page 1 PhyFlags */ +#define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) +#define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01) + +/*values for PCIe IO Unit Page 1 MaxMinLinkRate */ +#define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4) +#define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20) +#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30) +#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40) +#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50) + +/*values for PCIe IO Unit Page 1 DMDReportPCIe */ +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80) +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC (0x00) +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC (0x80) +#define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK (0x7F) + +/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo + *values + */ + + +/**************************************************************************** +* PCIe Switch Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe Switch Page 0 */ + +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 PhysicalPort; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 DevHandle; /*0x0C */ + U16 ParentDevHandle; /*0x0E */ + U8 NumPorts; /*0x10 */ + U8 PCIeLevel; /*0x11 */ + U16 Reserved3; /*0x12 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ + U32 Reserved6; /*0x1C */ +} MPI26_CONFIG_PAGE_PSWITCH_0, *PTR_MPI26_CONFIG_PAGE_PSWITCH_0, + Mpi26PCIeSwitchPage0_t, *pMpi26PCIeSwitchPage0_t; + +#define MPI26_PCIESWITCH0_PAGEVERSION (0x00) + + +/*PCIe Switch Page 1 */ + +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 PhysicalPort; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U8 NumPorts; /*0x0C */ + U8 PortNum; /*0x0D */ + U16 AttachedDevHandle; /*0x0E */ + U16 SwitchDevHandle; /*0x10 */ + U8 NegotiatedPortWidth; /*0x12 */ + U8 NegotiatedLinkRate; /*0x13 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ +} MPI26_CONFIG_PAGE_PSWITCH_1, *PTR_MPI26_CONFIG_PAGE_PSWITCH_1, + Mpi26PCIeSwitchPage1_t, *pMpi26PCIeSwitchPage1_t; + +#define MPI26_PCIESWITCH1_PAGEVERSION (0x00) + +/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/* defines for the Flags field */ +#define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE (0x0002) +#define MPI26_PCIESWITCH1_RETIMER_PRESENCE (0x0001) + +/**************************************************************************** +* PCIe Device Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe Device Page 0 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 Slot; /*0x08 */ + U16 EnclosureHandle; /*0x0A */ + U64 WWID; /*0x0C */ + U16 ParentDevHandle; /*0x14 */ + U8 PortNum; /*0x16 */ + U8 AccessStatus; /*0x17 */ + U16 DevHandle; /*0x18 */ + U8 PhysicalPort; /*0x1A */ + U8 Reserved1; /*0x1B */ + U32 DeviceInfo; /*0x1C */ + U32 Flags; /*0x20 */ + U8 SupportedLinkRates; /*0x24 */ + U8 MaxPortWidth; /*0x25 */ + U8 NegotiatedPortWidth; /*0x26 */ + U8 NegotiatedLinkRate; /*0x27 */ + U8 EnclosureLevel; /*0x28 */ + U8 Reserved2; /*0x29 */ + U16 Reserved3; /*0x2A */ + U8 ConnectorName[4]; /*0x2C */ + U32 Reserved4; /*0x30 */ + U32 Reserved5; /*0x34 */ +} MPI26_CONFIG_PAGE_PCIEDEV_0, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_0, + Mpi26PCIeDevicePage0_t, *pMpi26PCIeDevicePage0_t; + +#define MPI26_PCIEDEVICE0_PAGEVERSION (0x01) + +/*values for PCIe Device Page 0 AccessStatus field */ +#define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00) +#define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04) +#define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02) +#define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07) +#define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08) +#define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09) +#define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A) +#define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10) + +#define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30) +#define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31) +#define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32) +#define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33) +#define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34) +#define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35) +#define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36) +#define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37) +#define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38) + +#define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F) + +/*see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo + *field + */ + +/*values for PCIe Device Page 0 Flags field*/ +#define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE (0x00020000) +#define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE (0x00010000) +#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x00008000) +#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x00004000) +#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x00002000) +#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x00000400) +#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x00000200) +#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x00000100) +#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x00000080) +#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x00000040) +#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x00000020) +#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x00000010) +#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x00000002) +#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x00000001) + +/* values for PCIe Device Page 0 SupportedLinkRates field */ +#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08) +#define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04) +#define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02) +#define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01) + +/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/*PCIe Device Page 2 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x08 */ + U8 ControllerResetTO; /* 0x0A */ + U8 Reserved1; /* 0x0B */ + U32 MaximumDataTransferSize; /*0x0C */ + U32 Capabilities; /*0x10 */ + U16 NOIOB; /* 0x14 */ + U16 ShutdownLatency; /* 0x16 */ + U16 VendorID; /* 0x18 */ + U16 DeviceID; /* 0x1A */ + U16 SubsystemVendorID; /* 0x1C */ + U16 SubsystemID; /* 0x1E */ + U8 RevisionID; /* 0x20 */ + U8 Reserved21[3]; /* 0x21 */ +} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, + Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t; + +#define MPI26_PCIEDEVICE2_PAGEVERSION (0x01) + +/*defines for PCIe Device Page 2 Capabilities field */ +#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008) +#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004) +#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002) +#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001) + +/* Defines for the NOIOB field */ +#define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000) + +/**************************************************************************** +* PCIe Link Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe Link Page 1 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 Link; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 CorrectableErrorCount; /*0x0C */ + U16 NonFatalErrorCount; /*0x10 */ + U16 Reserved3; /*0x12 */ + U16 FatalErrorCount; /*0x14 */ + U16 Reserved4; /*0x16 */ +} MPI26_CONFIG_PAGE_PCIELINK_1, *PTR_MPI26_CONFIG_PAGE_PCIELINK_1, + Mpi26PcieLinkPage1_t, *pMpi26PcieLinkPage1_t; + +#define MPI26_PCIELINK1_PAGEVERSION (0x00) + +/*PCIe Link Page 2 */ + +typedef struct _MPI26_PCIELINK2_LINK_EVENT { + U8 LinkEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 LinkEventInfo; /*0x04 */ +} MPI26_PCIELINK2_LINK_EVENT, *PTR_MPI26_PCIELINK2_LINK_EVENT, + Mpi26PcieLink2LinkEvent_t, *pMpi26PcieLink2LinkEvent_t; + +/*use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */ + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLinkEvents at runtime. + */ +#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX +#define MPI26_PCIELINK2_LINK_EVENT_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 Link; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U8 NumLinkEvents; /*0x0C */ + U8 Reserved3; /*0x0D */ + U16 Reserved4; /*0x0E */ + MPI26_PCIELINK2_LINK_EVENT + LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2, + Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t; + +#define MPI26_PCIELINK2_PAGEVERSION (0x00) + +/*PCIe Link Page 3 */ + +typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG { + U8 LinkEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 CounterType; /*0x04 */ + U8 ThresholdWindow; /*0x05 */ + U8 TimeUnits; /*0x06 */ + U8 Reserved3; /*0x07 */ + U32 EventThreshold; /*0x08 */ + U16 ThresholdFlags; /*0x0C */ + U16 Reserved4; /*0x0E */ +} MPI26_PCIELINK3_LINK_EVENT_CONFIG, *PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG, + Mpi26PcieLink3LinkEventConfig_t, *pMpi26PcieLink3LinkEventConfig_t; + +/*values for LinkEventCode field */ +#define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00) +#define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01) +#define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02) +#define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03) +#define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04) +#define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05) +#define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06) +#define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07) +#define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08) +#define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F) +#define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10) +#define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11) +#define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12) + +/*values for the CounterType field */ +#define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01) +#define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/*values for the TimeUnits field */ +#define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00) +#define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01) +#define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02) +#define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03) + +/*values for the ThresholdFlags field */ +#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLinkEvents at runtime. + */ +#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX +#define MPI26_PCIELINK3_LINK_EVENT_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 Link; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U8 NumLinkEvents; /*0x0C */ + U8 Reserved3; /*0x0D */ + U16 Reserved4; /*0x0E */ + MPI26_PCIELINK3_LINK_EVENT_CONFIG + LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3, + Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t; + +#define MPI26_PCIELINK3_PAGEVERSION (0x00) + + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h new file mode 100644 index 000000000..33b9c3a6f --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h @@ -0,0 +1,516 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2016-2020 Broadcom Limited. All rights reserved. + * + * Name: mpi2_image.h + * Description: Contains definitions for firmware and other component images + * Creation Date: 04/02/2018 + * Version: 02.06.04 + * + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 08-01-18 02.06.00 Initial version for MPI 2.6.5. + * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 + * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE + * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP + * Shorten some defines to be compatible with DOS + * 06-24-19 02.06.05 Whitespace adjustments to help with identifier + * checking tool. + * 10-02-19 02.06.06 Added MPI26_IMAGE_HEADER_SIG1_COREDUMP + * Added MPI2_FLASH_REGION_COREDUMP + */ +#ifndef MPI2_IMAGE_H +#define MPI2_IMAGE_H + + +/*FW Image Header */ +typedef struct _MPI2_FW_IMAGE_HEADER { + U32 Signature; /*0x00 */ + U32 Signature0; /*0x04 */ + U32 Signature1; /*0x08 */ + U32 Signature2; /*0x0C */ + MPI2_VERSION_UNION MPIVersion; /*0x10 */ + MPI2_VERSION_UNION FWVersion; /*0x14 */ + MPI2_VERSION_UNION NVDATAVersion; /*0x18 */ + MPI2_VERSION_UNION PackageVersion; /*0x1C */ + U16 VendorID; /*0x20 */ + U16 ProductID; /*0x22 */ + U16 ProtocolFlags; /*0x24 */ + U16 Reserved26; /*0x26 */ + U32 IOCCapabilities; /*0x28 */ + U32 ImageSize; /*0x2C */ + U32 NextImageHeaderOffset; /*0x30 */ + U32 Checksum; /*0x34 */ + U32 Reserved38; /*0x38 */ + U32 Reserved3C; /*0x3C */ + U32 Reserved40; /*0x40 */ + U32 Reserved44; /*0x44 */ + U32 Reserved48; /*0x48 */ + U32 Reserved4C; /*0x4C */ + U32 Reserved50; /*0x50 */ + U32 Reserved54; /*0x54 */ + U32 Reserved58; /*0x58 */ + U32 Reserved5C; /*0x5C */ + U32 BootFlags; /*0x60 */ + U32 FirmwareVersionNameWhat; /*0x64 */ + U8 FirmwareVersionName[32]; /*0x68 */ + U32 VendorNameWhat; /*0x88 */ + U8 VendorName[32]; /*0x8C */ + U32 PackageNameWhat; /*0x88 */ + U8 PackageName[32]; /*0x8C */ + U32 ReservedD0; /*0xD0 */ + U32 ReservedD4; /*0xD4 */ + U32 ReservedD8; /*0xD8 */ + U32 ReservedDC; /*0xDC */ + U32 ReservedE0; /*0xE0 */ + U32 ReservedE4; /*0xE4 */ + U32 ReservedE8; /*0xE8 */ + U32 ReservedEC; /*0xEC */ + U32 ReservedF0; /*0xF0 */ + U32 ReservedF4; /*0xF4 */ + U32 ReservedF8; /*0xF8 */ + U32 ReservedFC; /*0xFC */ +} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER, + Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t; + +/*Signature field */ +#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) +#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) +#define MPI2_FW_HEADER_SIGNATURE (0xEA000000) +#define MPI26_FW_HEADER_SIGNATURE (0xEB000000) + +/*Signature0 field */ +#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) +#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) +/*Last byte is defined by architecture */ +#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) +/*legacy (0x5AEAA55A) */ +#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02) +#define MPI26_FW_HEADER_SIGNATURE0 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0) +#define MPI26_FW_HEADER_SIGNATURE0_3516 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1) +#define MPI26_FW_HEADER_SIGNATURE0_4008 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3) + +/*Signature1 field */ +#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) +#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) +#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5) + +/*Signature2 field */ +#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) +#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) +#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA) + +/*defines for using the ProductID field */ +#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) +#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) + +#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) +#define MPI2_FW_HEADER_PID_PROD_A (0x0000) +#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) +#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) + +#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) +/*SAS ProductID Family bits */ +#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) +#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) +#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) +#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028) +#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031) + +/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ + +/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ + +#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) +#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) + +#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60) +#define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG (0x00000001) +#define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG (0x00000002) +/*This image has a auto-discovery version of SPI */ +#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004) + + +#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) + +#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840) + +#define MPI2_FW_HEADER_SIZE (0x100) + + +/**************************************************************************** + * Component Image Format and related defines * + ****************************************************************************/ + +/*Maximum number of Hash Exclusion entries in a Component Image Header */ +#define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL (4) + +/*Hash Exclusion Format */ +typedef struct _MPI26_HASH_EXCLUSION_FORMAT { + U32 Offset; /*0x00 */ + U32 Size; /*0x04 */ +} MPI26_HASH_EXCLUSION_FORMAT, + *PTR_MPI26_HASH_EXCLUSION_FORMAT, + Mpi26HashSxclusionFormat_t, + *pMpi26HashExclusionFormat_t; + +/*FW Image Header */ +typedef struct _MPI26_COMPONENT_IMAGE_HEADER { + U32 Signature0; /*0x00 */ + U32 LoadAddress; /*0x04 */ + U32 DataSize; /*0x08 */ + U32 StartAddress; /*0x0C */ + U32 Signature1; /*0x10 */ + U32 FlashOffset; /*0x14 */ + U32 FlashSize; /*0x18 */ + U32 VersionStringOffset; /*0x1C */ + U32 BuildDateStringOffset; /*0x20 */ + U32 BuildTimeStringOffset; /*0x24 */ + U32 EnvironmentVariableOffset; /*0x28 */ + U32 ApplicationSpecific; /*0x2C */ + U32 Signature2; /*0x30 */ + U32 HeaderSize; /*0x34 */ + U32 Crc; /*0x38 */ + U8 NotFlashImage; /*0x3C */ + U8 Compressed; /*0x3D */ + U16 Reserved3E; /*0x3E */ + U32 SecondaryFlashOffset; /*0x40 */ + U32 Reserved44; /*0x44 */ + U32 Reserved48; /*0x48 */ + MPI2_VERSION_UNION RMCInterfaceVersion; /*0x4C */ + MPI2_VERSION_UNION Reserved50; /*0x50 */ + MPI2_VERSION_UNION FWVersion; /*0x54 */ + MPI2_VERSION_UNION NvdataVersion; /*0x58 */ + MPI26_HASH_EXCLUSION_FORMAT + HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL];/*0x5C */ + U32 NextImageHeaderOffset; /*0x7C */ + U32 Reserved80[32]; /*0x80 -- 0xFC */ +} MPI26_COMPONENT_IMAGE_HEADER, + *PTR_MPI26_COMPONENT_IMAGE_HEADER, + Mpi26ComponentImageHeader_t, + *pMpi26ComponentImageHeader_t; + + +/**** Definitions for Signature0 field ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042) + +/**** Definitions for Signature1 field ****/ +#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041) +#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243) +#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D) +#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942) +#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948) +#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948) +#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043) +#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053) +#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) +#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) +#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) +/* little-endian "DUMP" */ +#define MPI26_IMAGE_HEADER_SIG1_COREDUMP (0x504D5544) + +/**** Definitions for Signature2 field ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) + +/**** Offsets for Image Header Fields ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00) +#define MPI26_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04) +#define MPI26_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08) +#define MPI26_IMAGE_HEADER_START_ADDRESS_OFFSET (0x0C) +#define MPI26_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10) +#define MPI26_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14) +#define MPI26_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18) +#define MPI26_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1C) +#define MPI26_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20) +#define MPI26_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24) +#define MPI26_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28) +#define MPI26_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2C) +#define MPI26_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30) +#define MPI26_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34) +#define MPI26_IMAGE_HEADER_CRC_OFFSET (0x38) +#define MPI26_IMAGE_HEADER_NOT_FLASH_IMAGE_OFFSET (0x3C) +#define MPI26_IMAGE_HEADER_COMPRESSED_OFFSET (0x3D) +#define MPI26_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40) +#define MPI26_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4C) +#define MPI26_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54) +#define MPI26_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5C) +#define MPI26_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7C) + + +#define MPI26_IMAGE_HEADER_SIZE (0x100) + + +/*Extended Image Header */ +typedef struct _MPI2_EXT_IMAGE_HEADER { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Checksum; /*0x04 */ + U32 ImageSize; /*0x08 */ + U32 NextImageHeaderOffset; /*0x0C */ + U32 PackageVersion; /*0x10 */ + U32 Reserved3; /*0x14 */ + U32 Reserved4; /*0x18 */ + U32 Reserved5; /*0x1C */ + U8 IdentifyString[32]; /*0x20 */ +} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER, + Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t; + +/*useful offsets */ +#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) +#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) +#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C) +#define MPI2_EXT_IMAGE_PACKAGEVERSION_OFFSET (0x10) + +#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) + +/*defines for the ImageType field */ +#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI2_EXT_IMAGE_TYPE_FW (0x01) +#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) +#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) +#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) +#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) +#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) +#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) +#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A) +#define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B) +#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) + +#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) + +/*FLASH Layout Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check RegionsPerLayout at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_REGIONS +#define MPI2_FLASH_NUMBER_OF_REGIONS (1) +#endif + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfLayouts at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS +#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1) +#endif + +typedef struct _MPI2_FLASH_REGION { + U8 RegionType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 RegionOffset; /*0x04 */ + U32 RegionSize; /*0x08 */ + U32 Reserved3; /*0x0C */ +} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION, + Mpi2FlashRegion_t, *pMpi2FlashRegion_t; + +typedef struct _MPI2_FLASH_LAYOUT { + U32 FlashSize; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */ +} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT, + Mpi2FlashLayout_t, *pMpi2FlashLayout_t; + +typedef struct _MPI2_FLASH_LAYOUT_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 SizeOfRegion; /*0x02 */ + U8 Reserved2; /*0x03 */ + U16 NumberOfLayouts; /*0x04 */ + U16 RegionsPerLayout; /*0x06 */ + U16 MinimumSectorAlignment; /*0x08 */ + U16 Reserved3; /*0x0A */ + U32 Reserved4; /*0x0C */ + MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */ +} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA, + Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t; + +/*defines for the RegionType field */ +#define MPI2_FLASH_REGION_UNUSED (0x00) +#define MPI2_FLASH_REGION_FIRMWARE (0x01) +#define MPI2_FLASH_REGION_BIOS (0x02) +#define MPI2_FLASH_REGION_NVDATA (0x03) +#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05) +#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06) +#define MPI2_FLASH_REGION_CONFIG_1 (0x07) +#define MPI2_FLASH_REGION_CONFIG_2 (0x08) +#define MPI2_FLASH_REGION_MEGARAID (0x09) +#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A) +#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) +#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D) +#define MPI2_FLASH_REGION_SBR (0x0E) +#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F) +#define MPI2_FLASH_REGION_HIIM (0x10) +#define MPI2_FLASH_REGION_HIIA (0x11) +#define MPI2_FLASH_REGION_CTLR (0x12) +#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13) +#define MPI2_FLASH_REGION_MR_NVDATA (0x14) +#define MPI2_FLASH_REGION_CPLD (0x15) +#define MPI2_FLASH_REGION_PSOC (0x16) +#define MPI2_FLASH_REGION_COREDUMP (0x17) + +/*ImageRevision */ +#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) + +/*Supported Devices Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfDevices at runtime. + */ +#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES +#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1) +#endif + +typedef struct _MPI2_SUPPORTED_DEVICE { + U16 DeviceID; /*0x00 */ + U16 VendorID; /*0x02 */ + U16 DeviceIDMask; /*0x04 */ + U16 Reserved1; /*0x06 */ + U8 LowPCIRev; /*0x08 */ + U8 HighPCIRev; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ +} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE, + Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t; + +typedef struct _MPI2_SUPPORTED_DEVICES_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 NumberOfDevices; /*0x02 */ + U8 Reserved2; /*0x03 */ + U32 Reserved3; /*0x04 */ + MPI2_SUPPORTED_DEVICE + SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */ +} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA, + Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t; + +/*ImageRevision */ +#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00) + +/*Init Extended Image Data */ + +typedef struct _MPI2_INIT_IMAGE_FOOTER { + U32 BootFlags; /*0x00 */ + U32 ImageSize; /*0x04 */ + U32 Signature0; /*0x08 */ + U32 Signature1; /*0x0C */ + U32 Signature2; /*0x10 */ + U32 ResetVector; /*0x14 */ +} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER, + Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t; + +/*defines for the BootFlags field */ +#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00) + +/*defines for the ImageSize field */ +#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04) + +/*defines for the Signature0 field */ +#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08) +#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA) + +/*defines for the Signature1 field */ +#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C) +#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5) + +/*defines for the Signature2 field */ +#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10) +#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A) + +/*Signature fields as individual bytes */ +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A) + +/*defines for the ResetVector field */ +#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) + + +/* Encrypted Hash Extended Image Data */ + +typedef struct _MPI25_ENCRYPTED_HASH_ENTRY { + U8 HashImageType; /*0x00 */ + U8 HashAlgorithm; /*0x01 */ + U8 EncryptionAlgorithm; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 EncryptedHash[1]; /*0x08 */ /* variable length */ +} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY, +Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; + +/* values for HashImageType */ +#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) +#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) + +#define MPI26_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI26_HASH_IMAGE_TYPE_FIRMWARE (0x01) +#define MPI26_HASH_IMAGE_TYPE_BIOS (0x02) +#define MPI26_HASH_IMAGE_TYPE_KEY_HASH (0x03) + +/* values for HashAlgorithm */ +#define MPI25_HASH_ALGORITHM_UNUSED (0x00) +#define MPI25_HASH_ALGORITHM_SHA256 (0x01) + +#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0) +#define MPI26_HASH_ALGORITHM_VER_NONE (0x00) +#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20) +#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40) +#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60) +#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) +#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01) +#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02) + + +/* values for EncryptionAlgorithm */ +#define MPI25_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI25_ENCRYPTION_ALG_RSA256 (0x01) + +#define MPI26_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI26_ENCRYPTION_ALG_RSA256 (0x01) +#define MPI26_ENCRYPTION_ALG_RSA512 (0x02) +#define MPI26_ENCRYPTION_ALG_RSA1024 (0x03) +#define MPI26_ENCRYPTION_ALG_RSA2048 (0x04) +#define MPI26_ENCRYPTION_ALG_RSA4096 (0x05) + +typedef struct _MPI25_ENCRYPTED_HASH_DATA { + U8 ImageVersion; /*0x00 */ + U8 NumHash; /*0x01 */ + U16 Reserved1; /*0x02 */ + U32 Reserved2; /*0x04 */ + MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /*0x08 */ +} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA, +Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t; + + +#endif /* MPI2_IMAGE_H */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h new file mode 100644 index 000000000..8f1b903fe --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -0,0 +1,591 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_init.h + * Title: MPI SCSI initiator mode messages and structures + * Creation Date: June 23, 2006 + * + * mpi2_init.h Version: 02.00.21 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. + * 11-18-14 02.00.16 Updated copyright information. + * 03-16-15 02.00.17 Updated for MPI v2.6. + * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. + * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and + * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. + * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to + * be unique within first 32 characters. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_INIT_H +#define MPI2_INIT_H + +/***************************************************************************** +* +* SCSI Initiator Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SCSI IO messages and associated structures +****************************************************************************/ + +typedef struct _MPI2_SCSI_IO_CDB_EEDP32 { + U8 CDB[20]; /*0x00 */ + __be32 PrimaryReferenceTag; /*0x14 */ + U16 PrimaryApplicationTag; /*0x18 */ + U16 PrimaryApplicationTagMask; /*0x1A */ + U32 TransferLength; /*0x1C */ +} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32, + Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t; + +/*MPI v2.0 CDB field */ +typedef union _MPI2_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_SGE_SIMPLE_UNION SGE; +} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION, + Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t; + +/*MPI v2.0 SCSI IO Request Message */ +typedef struct _MPI2_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U16 SGLFlags; /*0x10 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U32 EEDPBlockSize; /*0x28 */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI2_SGE_IO_UNION SGL; /*0x60 */ + +} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST, + Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t; + +/*SCSI IO MsgFlags bits */ + +/*MsgFlags for SenseBufferAddressSpace */ +#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C) +#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) +#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) +#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08) + +/*SCSI IO SGLFlags bits */ + +/*base values for Data Location Address Space */ +#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) + +/*base values for Type */ +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02) + +/*shift values for each sub-field */ +#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12) +#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8) +#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) +#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) + +/*number of SGLOffset fields */ +#define MPI2_SCSIIO_NUM_SGLOFFSETS (4) + +/*SCSI IO IoFlags bits */ + +/*Large CDB Address Space */ +#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000) +#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000) +#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000) + +#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400) +#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200) +#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*SCSI IO EEDPFlags bits */ + +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007) +#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + +/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ + +/*SCSI IO Control bits */ +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000) +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) + +#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) +#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24) +#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI2_SCSIIO_CONTROL_READ (0x02000000) +#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) + +#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) +/*alternate name for the previous field; called Command Priority in SAM-4 */ +#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11) + +#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) +#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100) +#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400) + +#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0) +#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000) +#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040) +#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080) + +/*MPI v2.5 CDB field */ +typedef union _MPI25_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_IEEE_SGE_SIMPLE64 SGE; +} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION, + Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t; + +/*MPI v2.5/2.6 SCSI IO Request Message */ +typedef struct _MPI25_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U8 DMAFlags; /*0x10 */ + U8 Reserved5; /*0x11 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U16 EEDPBlockSize; /*0x28 */ + U16 Reserved6; /*0x2A */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI25_SGE_IO_UNION SGL; /*0x60 */ + +} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST, + Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t; + +/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ + +/*Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF + */ +#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F) + +/*number of SGLOffset fields */ +#define MPI25_SCSIIO_NUM_SGLOFFSETS (4) + +/*defines for the IoFlags field */ +#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) +#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) +#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) + +#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) +#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400) +#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*MPI v2.5 defines for the EEDPFlags bits */ +/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ +#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) +#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) + +#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) +#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) + +/*use MPI2_LUN_ defines from mpi2.h for the LUN field */ + +/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */ + +/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so + * MPI2_SCSI_IO_REPLY is used for both. + */ + +/*SCSI IO Error Reply Message */ +typedef struct _MPI2_SCSI_IO_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 SCSIStatus; /*0x0C */ + U8 SCSIState; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferCount; /*0x14 */ + U32 SenseCount; /*0x18 */ + U32 ResponseInfo; /*0x1C */ + U16 TaskTag; /*0x20 */ + U16 SCSIStatusQualifier; /* 0x22 */ + U32 BidirectionalTransferCount; /*0x24 */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPErrorOffset; /* 0x28 */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedAppTag; /* 0x2C */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedGuard; /* 0x2E */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPObservedRefTag; /* 0x30 */ +} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, + Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; + +/*SCSI IO Reply MsgFlags bits */ +#define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01) +#define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02) +#define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04) + +/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ + +#define MPI2_SCSI_STATUS_GOOD (0x00) +#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02) +#define MPI2_SCSI_STATUS_CONDITION_MET (0x04) +#define MPI2_SCSI_STATUS_BUSY (0x08) +#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10) +#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */ +#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28) +#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30) +#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40) + +/*SCSI IO Reply SCSIState flags */ + +#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI2_SCSI_STATE_TERMINATED (0x08) +#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSI_RI_SHIFT_REASONCODE (0) + +#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) + +/**************************************************************************** +* SCSI Task Management messages +****************************************************************************/ + +/*SCSI Task Management Request Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved1; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 LUN[8]; /*0x0C */ + U32 Reserved4[7]; /*0x14 */ + U16 TaskMID; /*0x30 */ + U16 Reserved5; /*0x32 */ +} MPI2_SCSI_TASK_MANAGE_REQUEST, + *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, + Mpi2SCSITaskManagementRequest_t, + *pMpi2SCSITaskManagementRequest_t; + +/*TaskType values */ + +#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) + +/*obsolete TaskType name */ +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \ + (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) + +/*MsgFlags bits */ + +#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) +#define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) +#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) + +#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) +#define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18) + +/*SCSI Task Management Reply Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 ResponseCode; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TerminationCount; /*0x14 */ + U32 ResponseInfo; /*0x18 */ +} MPI2_SCSI_TASK_MANAGE_REPLY, + *PTR_MPI2_SCSI_TASK_MANAGE_REPLY, + Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t; + +/*ResponseCode values */ + +#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) +#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) + +/**************************************************************************** +* SCSI Enclosure Processor messages +****************************************************************************/ + +/*SCSI Enclosure Processor Request Message */ +typedef struct _MPI2_SEP_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 SlotStatus; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST, + Mpi2SepRequest_t, *pMpi2SepRequest_t; + +/*Action defines */ +#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01) + +/*Flags defines */ +#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) + +/*SlotStatus defines */ +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000) +#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) +#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) + +/*SCSI Enclosure Processor Reply Message */ +typedef struct _MPI2_SEP_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 SlotStatus; /*0x14 */ + U32 Reserved4; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY, + Mpi2SepReply_t, *pMpi2SepReply_t; + +/*SlotStatus defines */ +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000) +#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) +#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h new file mode 100644 index 000000000..2c5711517 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -0,0 +1,1811 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_ioc.h + * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages + * Creation Date: October 11, 2006 + * + * mpi2_ioc.h Version: 02.00.37 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * 11-18-14 02.00.25 Updated copyright information. + * 03-16-15 02.00.26 Updated for MPI v2.6. + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. + * Added MPI26_CTRL_OP_SHUTDOWN. + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. + * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. + * Added ConigurationFlags field to IOCInit message to + * support NVMe SGL format control. + * Added PCIe SRIOV support. + * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.29 Added Archclass for 4008 product. + * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED + * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload + * Request Message. + * Added new defines for the ImageType field of FWUpload + * Request Message. + * Added new values for the RegionType field in the Layout + * Data sections of the FLASH Layout Extended Image Data. + * Added new defines for the ReasonCode field of + * Active Cable Exception Event. + * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and + * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. + * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and + * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. + * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. + * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related + * defines for the ReasonCode field. + * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. + * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED + * to the ReasonCode field in PCIe Device Status Change + * Event Data. + * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC. + * Moved FW image definitions ionto new mpi2_image,h + * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) + * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 10-02-19 02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE + * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED + * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP + * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_IOC_H +#define MPI2_IOC_H + +/***************************************************************************** +* +* IOC Messages +* +*****************************************************************************/ + +/**************************************************************************** +* IOCInit message +****************************************************************************/ + +/*IOCInit Request message */ +typedef struct _MPI2_IOC_INIT_REQUEST { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 MsgVersion; /*0x0C */ + U16 HeaderVersion; /*0x0E */ + U32 Reserved5; /*0x10 */ + U16 ConfigurationFlags; /* 0x14 */ + U8 HostPageSize; /*0x16 */ + U8 HostMSIxVectors; /*0x17 */ + U16 Reserved8; /*0x18 */ + U16 SystemRequestFrameSize; /*0x1A */ + U16 ReplyDescriptorPostQueueDepth; /*0x1C */ + U16 ReplyFreeQueueDepth; /*0x1E */ + U32 SenseBufferAddressHigh; /*0x20 */ + U32 SystemReplyAddressHigh; /*0x24 */ + U64 SystemRequestFrameBaseAddress; /*0x28 */ + U64 ReplyDescriptorPostQueueAddress; /*0x30 */ + U64 ReplyFreeQueueAddress; /*0x38 */ + U64 TimeStamp; /*0x40 */ +} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST, + Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t; + +/*WhoInit values */ +#define MPI2_WHOINIT_NOT_INITIALIZED (0x00) +#define MPI2_WHOINIT_SYSTEM_BIOS (0x01) +#define MPI2_WHOINIT_ROM_BIOS (0x02) +#define MPI2_WHOINIT_PCI_PEER (0x03) +#define MPI2_WHOINIT_HOST_DRIVER (0x04) +#define MPI2_WHOINIT_MANUFACTURER (0x05) + +/* MsgFlags */ +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + + +/*MsgVersion */ +#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) + +/*ConfigurationFlags */ +#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001) +#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE (0x0002) + +/*minimum depth for a Reply Descriptor Post Queue */ +#define MPI2_RDPQ_DEPTH_MIN (16) + +/* Reply Descriptor Post Queue Array Entry */ +typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { + U64 RDPQBaseAddress; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry; + + +/*IOCInit Reply message */ +typedef struct _MPI2_IOC_INIT_REPLY { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY, + Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t; + +/**************************************************************************** +* IOCFacts message +****************************************************************************/ + +/*IOCFacts Request message */ +typedef struct _MPI2_IOC_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST, + Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t; + +/*IOCFacts Reply message */ +typedef struct _MPI2_IOC_FACTS_REPLY { + U16 MsgVersion; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 HeaderVersion; /*0x04 */ + U8 IOCNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 IOCExceptions; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 MaxChainDepth; /*0x14 */ + U8 WhoInit; /*0x15 */ + U8 NumberOfPorts; /*0x16 */ + U8 MaxMSIxVectors; /*0x17 */ + U16 RequestCredit; /*0x18 */ + U16 ProductID; /*0x1A */ + U32 IOCCapabilities; /*0x1C */ + MPI2_VERSION_UNION FWVersion; /*0x20 */ + U16 IOCRequestFrameSize; /*0x24 */ + U16 IOCMaxChainSegmentSize; /*0x26 */ + U16 MaxInitiators; /*0x28 */ + U16 MaxTargets; /*0x2A */ + U16 MaxSasExpanders; /*0x2C */ + U16 MaxEnclosures; /*0x2E */ + U16 ProtocolFlags; /*0x30 */ + U16 HighPriorityCredit; /*0x32 */ + U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */ + U8 ReplyFrameSize; /*0x36 */ + U8 MaxVolumes; /*0x37 */ + U16 MaxDevHandle; /*0x38 */ + U16 MaxPersistentEntries; /*0x3A */ + U16 MinDevHandle; /*0x3C */ + U8 CurrentHostPageSize; /* 0x3E */ + U8 Reserved4; /* 0x3F */ + U8 SGEModifierMask; /*0x40 */ + U8 SGEModifierValue; /*0x41 */ + U8 SGEModifierShift; /*0x42 */ + U8 Reserved5; /*0x43 */ +} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY, + Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t; + +/*MsgVersion */ +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) + +/*IOCExceptions */ +#define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400) +#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) +#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) + +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060) + +#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010) +#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008) +#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) +#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) +#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) + +/*defines for WhoInit field are after the IOCInit Request */ + +/*ProductID field uses MPI2_FW_HEADER_PID_ */ + +/*IOCCapabilities */ +#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000) +#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) +#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) +#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) +#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) +#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) +#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) +#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) +#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) +#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) +#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) + +/*ProtocolFlags */ +#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) + +/**************************************************************************** +* PortFacts message +****************************************************************************/ + +/*PortFacts Request message */ +typedef struct _MPI2_PORT_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ +} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST, + Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t; + +/*PortFacts Reply message */ +typedef struct _MPI2_PORT_FACTS_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 Reserved5; /*0x14 */ + U8 PortType; /*0x15 */ + U16 Reserved6; /*0x16 */ + U16 MaxPostedCmdBuffers; /*0x18 */ + U16 Reserved7; /*0x1A */ +} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY, + Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t; + +/*PortType values */ +#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00) +#define MPI2_PORTFACTS_PORTTYPE_FC (0x10) +#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) +#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) +#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) +#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40) + + +/**************************************************************************** +* PortEnable message +****************************************************************************/ + +/*PortEnable Request message */ +typedef struct _MPI2_PORT_ENABLE_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST, + Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t; + +/*PortEnable Reply message */ +typedef struct _MPI2_PORT_ENABLE_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY, + Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t; + +/**************************************************************************** +* EventNotification message +****************************************************************************/ + +/*EventNotification Request message */ +#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4) + +typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */ + U16 SASBroadcastPrimitiveMasks; /*0x24 */ + U16 SASNotifyPrimitiveMasks; /*0x26 */ + U32 Reserved8; /*0x28 */ +} MPI2_EVENT_NOTIFICATION_REQUEST, + *PTR_MPI2_EVENT_NOTIFICATION_REQUEST, + Mpi2EventNotificationRequest_t, + *pMpi2EventNotificationRequest_t; + +/*EventNotification Reply message */ +typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { + U16 EventDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 AckRequired; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 Event; /*0x14 */ + U16 Reserved4; /*0x16 */ + U32 EventContext; /*0x18 */ + U32 EventData[]; /*0x1C */ +} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY, + Mpi2EventNotificationReply_t, + *pMpi2EventNotificationReply_t; + +/*AckRequired */ +#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00) +#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) + +/*Event */ +#define MPI2_EVENT_LOG_DATA (0x0001) +#define MPI2_EVENT_STATE_CHANGE (0x0002) +#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) +#define MPI2_EVENT_EVENT_CHANGE (0x000A) +#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */ +#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) +#define MPI2_EVENT_SAS_DISCOVERY (0x0016) +#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_IR_VOLUME (0x001E) +#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) +#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) +#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) +#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) +#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) +#define MPI2_EVENT_SAS_QUIESCE (0x0025) +#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) +#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) +#define MPI2_EVENT_HOST_MESSAGE (0x0028) +#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) +#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030) +#define MPI2_EVENT_PCIE_ENUMERATION (0x0031) +#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032) +#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033) +#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) +#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) +#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) +#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) + +/*Log Entry Added Event data */ + +/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ +#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */ +} MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + Mpi2EventDataLogEntryAdded_t, + *pMpi2EventDataLogEntryAdded_t; + +/*GPIO Interrupt Event data */ + +typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { + U8 GPIONum; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_GPIO_INTERRUPT, + *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, + Mpi2EventDataGpioInterrupt_t, + *pMpi2EventDataGpioInterrupt_t; + +/*Temperature Threshold Event data */ + +typedef struct _MPI2_EVENT_DATA_TEMPERATURE { + U16 Status; /*0x00 */ + U8 SensorNum; /*0x02 */ + U8 Reserved1; /*0x03 */ + U16 CurrentTemperature; /*0x04 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ +} MPI2_EVENT_DATA_TEMPERATURE, + *PTR_MPI2_EVENT_DATA_TEMPERATURE, + Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t; + +/*Temperature Threshold Event data Status bits */ +#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) +#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) +#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) +#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) + +/*Host Message Event data */ + +typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { + U8 SourceVF_ID; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + U32 HostData[]; /*0x08 */ +} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, + Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; + +/*Power Performance Change Event data */ + +typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { + U8 CurrentPowerMode; /*0x00 */ + U8 PreviousPowerMode; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_EVENT_DATA_POWER_PERF_CHANGE, + *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, + Mpi2EventDataPowerPerfChange_t, + *pMpi2EventDataPowerPerfChange_t; + +/*defines for CurrentPowerMode and PreviousPowerMode fields */ +#define MPI2_EVENT_PM_INIT_MASK (0xC0) +#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_INIT_HOST (0x40) +#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80) +#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0) + +#define MPI2_EVENT_PM_MODE_MASK (0x07) +#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01) +#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04) +#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) +#define MPI2_EVENT_PM_MODE_STANDBY (0x06) + +/* Active Cable Exception Event data */ + +typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT { + U32 ActiveCablePowerRequirement; /* 0x00 */ + U8 ReasonCode; /* 0x04 */ + U8 ReceptacleID; /* 0x05 */ + U16 Reserved1; /* 0x06 */ +} MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + *PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi25EventDataActiveCableExcept_t, + *pMpi25EventDataActiveCableExcept_t, + MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi26EventDataActiveCableExcept_t, + *pMpi26EventDataActiveCableExcept_t; + +/*MPI2.5 defines for the ReasonCode field */ +#define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) +#define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01) +#define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02) + +/* defines for ReasonCode field */ +#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) +#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01) +#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02) + +/*Hard Reset Received Event data */ + +typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { + U8 Reserved1; /*0x00 */ + U8 Port; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + Mpi2EventDataHardResetReceived_t, + *pMpi2EventDataHardResetReceived_t; + +/*Task Set Full Event data */ +/* this event is obsolete */ + +typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL { + U16 DevHandle; /*0x00 */ + U16 CurrentDepth; /*0x02 */ +} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL, + Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t; + +/*SAS Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE { + U16 TaskTag; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U8 ASC; /*0x04 */ + U8 ASCQ; /*0x05 */ + U16 DevHandle; /*0x06 */ + U32 Reserved2; /*0x08 */ + U64 SASAddress; /*0x0C */ + U8 LUN[8]; /*0x14 */ +} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + Mpi2EventDataSasDeviceStatusChange_t, + *pMpi2EventDataSasDeviceStatusChange_t; + +/*SAS Device Status Change Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + +/*Integrated RAID Operation Status Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS { + U16 VolDevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 RAIDOperation; /*0x04 */ + U8 PercentComplete; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 ElapsedSeconds; /*0x08 */ +} MPI2_EVENT_DATA_IR_OPERATION_STATUS, + *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, + Mpi2EventDataIrOperationStatus_t, + *pMpi2EventDataIrOperationStatus_t; + +/*Integrated RAID Operation Status Event data RAIDOperation values */ +#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00) +#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + +/*Integrated RAID Volume Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_VOLUME { + U16 VolDevHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 NewValue; /*0x04 */ + U32 PreviousValue; /*0x08 */ +} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME, + Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t; + +/*Integrated RAID Volume Event data ReasonCode values */ +#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Physical Disk Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK { + U16 Reserved1; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysDiskNum; /*0x03 */ + U16 PhysDiskDevHandle; /*0x04 */ + U16 Reserved2; /*0x06 */ + U16 Slot; /*0x08 */ + U16 EnclosureHandle; /*0x0A */ + U32 NewValue; /*0x0C */ + U32 PreviousValue; /*0x10 */ +} MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + Mpi2EventDataIrPhysicalDisk_t, + *pMpi2EventDataIrPhysicalDisk_t; + +/*Integrated RAID Physical Disk Event data ReasonCode values */ +#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Configuration Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumElements at runtime. + */ +#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT +#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 ReasonCode; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, + Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t; + +/*IR Configuration Change List Event data ElementFlags values */ +#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) + +/*IR Configuration Change List Event data ReasonCode values */ +#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST { + U8 NumElements; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 Reserved2; /*0x02 */ + U8 ConfigNum; /*0x03 */ + U32 Flags; /*0x04 */ + MPI2_EVENT_IR_CONFIG_ELEMENT + ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */ +} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + Mpi2EventDataIrConfigChangeList_t, + *pMpi2EventDataIrConfigChangeList_t; + +/*IR Configuration Change List Event data Flags values */ +#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) + +/*SAS Discovery Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY { + U8 Flags; /*0x00 */ + U8 ReasonCode; /*0x01 */ + U8 PhysicalPort; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 DiscoveryStatus; /*0x04 */ +} MPI2_EVENT_DATA_SAS_DISCOVERY, + *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, + Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t; + +/*SAS Discovery Event data Flags values */ +#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02) +#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01) + +/*SAS Discovery Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02) + +/*SAS Discovery Event data DiscoveryStatus values */ +#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400) +#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001) + +/*SAS Broadcast Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 PortWidth; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + Mpi2EventDataSasBroadcastPrimitive_t, + *pMpi2EventDataSasBroadcastPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01) +#define MPI2_EVENT_PRIMITIVE_SES (0x02) +#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03) +#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) +#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05) +#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06) +#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) +#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) + +/*SAS Notify Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 Reserved1; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + Mpi2EventDataSasNotifyPrimitive_t, + *pMpi2EventDataSasNotifyPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) +#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) +#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) +#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) + +/*SAS Initiator Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE { + U8 ReasonCode; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U16 DevHandle; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + Mpi2EventDataSasInitDevStatusChange_t, + *pMpi2EventDataSasInitDevStatusChange_t; + +/*SAS Initiator Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) + +/*SAS Initiator Device Table Overflow Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW { + U16 MaxInit; /*0x00 */ + U16 CurrentInit; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + Mpi2EventDataSasInitTableOverflow_t, + *pMpi2EventDataSasInitTableOverflow_t; + +/*SAS Topology Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumEntries at runtime. + */ +#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT +#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY { + U16 AttachedDevHandle; /*0x00 */ + U8 LinkRate; /*0x02 */ + U8 PhyStatus; /*0x03 */ +} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, + Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t; + +typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST { + U16 EnclosureHandle; /*0x00 */ + U16 ExpanderDevHandle; /*0x02 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U8 NumEntries; /*0x08 */ + U8 StartPhyNum; /*0x09 */ + U8 ExpStatus; /*0x0A */ + U8 PhysicalPort; /*0x0B */ + MPI2_EVENT_SAS_TOPO_PHY_ENTRY + PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */ +} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + Mpi2EventDataSasTopologyChangeList_t, + *pMpi2EventDataSasTopologyChangeList_t; + +/*values for the ExpStatus field */ +#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) +#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) + +/*defines for the LinkRate field */ +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0) +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0) + +#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00) +#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01) +#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02) +#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) +#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) +#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) +#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) +#define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C) + +/*values for the PhyStatus field */ +#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10) +/*values for the PhyStatus ReasonCode sub-field */ +#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + +/*SAS Enclosure Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE { + U16 EnclosureHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U64 EnclosureLogicalID; /*0x04 */ + U16 NumSlots; /*0x0C */ + U16 StartSlot; /*0x0E */ + U32 PhyBits; /*0x10 */ +} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + Mpi2EventDataSasEnclDevStatusChange_t, + *pMpi2EventDataSasEnclDevStatusChange_t, + MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + *PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + Mpi26EventDataEnclDevStatusChange_t, + *pMpi26EventDataEnclDevStatusChange_t; + +/*SAS Enclosure Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +/*Enclosure Device Status Change event ReasonCode values */ +#define MPI26_EVENT_ENCL_RC_ADDED (0x01) +#define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02) + + +typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR { + U16 DevHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U32 Reserved1[2]; /*0x04 */ + U64 SASAddress; /*0x0C */ + U32 Reserved2[2]; /*0x14 */ +} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + *PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + Mpi25EventDataSasDeviceDiscoveryError_t, + *pMpi25EventDataSasDeviceDiscoveryError_t; + +/*SAS Device Discovery Error Event data ReasonCode values */ +#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) +#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) + +/*SAS PHY Counter Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 PhyEventCode; /*0x0C */ + U8 PhyNum; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 PhyEventInfo; /*0x10 */ + U8 CounterType; /*0x14 */ + U8 ThresholdWindow; /*0x15 */ + U8 TimeUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 EventThreshold; /*0x18 */ + U16 ThresholdFlags; /*0x1C */ + U16 Reserved4; /*0x1E */ +} MPI2_EVENT_DATA_SAS_PHY_COUNTER, + *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, + Mpi2EventDataSasPhyCounter_t, + *pMpi2EventDataSasPhyCounter_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h + *for the PhyEventCode field */ + +/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h + *for the CounterType field */ + +/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h + *for the TimeUnits field */ + +/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h + *for the ThresholdFlags field */ + +/*SAS Quiesce Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE { + U8 ReasonCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ +} MPI2_EVENT_DATA_SAS_QUIESCE, + *PTR_MPI2_EVENT_DATA_SAS_QUIESCE, + Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t; + +/*SAS Quiesce Event data ReasonCode values */ +#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02) + +/*Host Based Discovery Phy Event data */ + +typedef struct _MPI2_EVENT_HBD_PHY_SAS { + U8 Flags; /*0x00 */ + U8 NegotiatedLinkRate; /*0x01 */ + U8 PhyNum; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U32 Reserved1; /*0x04 */ + U8 InitialFrame[28]; /*0x08 */ +} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS, + Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t; + +/*values for the Flags field */ +#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) +#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h + *for the NegotiatedLinkRate field */ + +typedef union _MPI2_EVENT_HBD_DESCRIPTOR { + MPI2_EVENT_HBD_PHY_SAS Sas; +} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR, + Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t; + +typedef struct _MPI2_EVENT_DATA_HBD_PHY { + U8 DescriptorType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */ +} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY, + Mpi2EventDataHbdPhy_t, + *pMpi2EventDataMpi2EventDataHbdPhy_t; + +/*values for the DescriptorType field */ +#define MPI2_EVENT_HBD_DT_SAS (0x01) + + +/*PCIe Device Status Change Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE { + U16 TaskTag; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U8 ASC; /*0x04 */ + U8 ASCQ; /*0x05 */ + U16 DevHandle; /*0x06 */ + U32 Reserved2; /*0x08 */ + U64 WWID; /*0x0C */ + U8 LUN[8]; /*0x14 */ +} MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, + *PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, + Mpi26EventDataPCIeDeviceStatusChange_t, + *pMpi26EventDataPCIeDeviceStatusChange_t; + +/*PCIe Device Status Change Event data ReasonCode values */ +#define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05) +#define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10) +#define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11) + + +/*PCIe Enumeration Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION { + U8 Flags; /*0x00 */ + U8 ReasonCode; /*0x01 */ + U8 PhysicalPort; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 EnumerationStatus; /*0x04 */ +} MPI26_EVENT_DATA_PCIE_ENUMERATION, + *PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION, + Mpi26EventDataPCIeEnumeration_t, + *pMpi26EventDataPCIeEnumeration_t; + +/*PCIe Enumeration Event data Flags values */ +#define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02) +#define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01) + +/*PCIe Enumeration Event data ReasonCode values */ +#define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01) +#define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02) + +/*PCIe Enumeration Event data EnumerationStatus values */ +#define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) +#define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) +#define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) + + +/*PCIe Topology Change List Event data (MPI v2.6 and later) */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumEntries at runtime. + */ +#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT +#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1) +#endif + +typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY { + U16 AttachedDevHandle; /*0x00 */ + U8 PortStatus; /*0x02 */ + U8 Reserved1; /*0x03 */ + U8 CurrentPortInfo; /*0x04 */ + U8 Reserved2; /*0x05 */ + U8 PreviousPortInfo; /*0x06 */ + U8 Reserved3; /*0x07 */ +} MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, + *PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, + Mpi26EventPCIeTopoPortEntry_t, + *pMpi26EventPCIeTopoPortEntry_t; + +/*PCIe Topology Change List Event data PortStatus values */ +#define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01) +#define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02) +#define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03) +#define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04) +#define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05) + +/*PCIe Topology Change List Event data defines for CurrentPortInfo and + *PreviousPortInfo + */ +#define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0) +#define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00) +#define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10) +#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20) +#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30) +#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40) +#define MPI26_EVENT_PCIE_TOPO_PI_16_LANES (0x50) + +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05) + +typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST { + U16 EnclosureHandle; /*0x00 */ + U16 SwitchDevHandle; /*0x02 */ + U8 NumPorts; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U8 NumEntries; /*0x08 */ + U8 StartPortNum; /*0x09 */ + U8 SwitchStatus; /*0x0A */ + U8 PhysicalPort; /*0x0B */ + MPI26_EVENT_PCIE_TOPO_PORT_ENTRY + PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */ +} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, + *PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, + Mpi26EventDataPCIeTopologyChangeList_t, + *pMpi26EventDataPCIeTopologyChangeList_t; + +/*PCIe Topology Change List Event data SwitchStatus values */ +#define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) +#define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01) +#define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02) +#define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03) +#define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04) + +/*PCIe Link Counter Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 LinkEventCode; /*0x0C */ + U8 LinkNum; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 LinkEventInfo; /*0x10 */ + U8 CounterType; /*0x14 */ + U8 ThresholdWindow; /*0x15 */ + U8 TimeUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 EventThreshold; /*0x18 */ + U16 ThresholdFlags; /*0x1C */ + U16 Reserved4; /*0x1E */ +} MPI26_EVENT_DATA_PCIE_LINK_COUNTER, + *PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER, + Mpi26EventDataPcieLinkCounter_t, *pMpi26EventDataPcieLinkCounter_t; + + +/*use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode + *field + */ + +/*use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType + *field + */ + +/*use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits + *field + */ + +/*use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags + *field + */ + +/**************************************************************************** +* EventAck message +****************************************************************************/ + +/*EventAck Request message */ +typedef struct _MPI2_EVENT_ACK_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Event; /*0x0C */ + U16 Reserved5; /*0x0E */ + U32 EventContext; /*0x10 */ +} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST, + Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t; + +/*EventAck Reply message */ +typedef struct _MPI2_EVENT_ACK_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY, + Mpi2EventAckReply_t, *pMpi2EventAckReply_t; + +/**************************************************************************** +* SendHostMessage message +****************************************************************************/ + +/*SendHostMessage Request message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST { + U16 HostDataLength; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 Reserved4; /*0x0C */ + U8 DestVF_ID; /*0x0D */ + U16 Reserved5; /*0x0E */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 Reserved8; /*0x18 */ + U32 Reserved9; /*0x1C */ + U32 Reserved10; /*0x20 */ + U32 HostData[]; /*0x24 */ +} MPI2_SEND_HOST_MESSAGE_REQUEST, + *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, + Mpi2SendHostMessageRequest_t, + *pMpi2SendHostMessageRequest_t; + +/*SendHostMessage Reply message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY { + U16 HostDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY, + Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t; + +/**************************************************************************** +* FWDownload message +****************************************************************************/ + +/*MPI v2.0 FWDownload Request message */ +typedef struct _MPI2_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST, + Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest; + +#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01) + +#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01) +#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02) +#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) +#define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D) +#define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E) +#define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F) +#define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10) +#define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11) +#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12) +#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13) +#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14) +/*MPI v2.6 and newer */ +#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) +#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) +#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP (0x17) +#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) + +/*MPI v2.0 FWDownload TransactionContext Element */ +typedef struct _MPI2_FW_DOWNLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE, + Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t; + +/*MPI v2.5 FWDownload Request message */ +typedef struct _MPI25_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST, + Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest; + +/*FWDownload Reply message */ +typedef struct _MPI2_FW_DOWNLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY, + Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t; + +/**************************************************************************** +* FWUpload message +****************************************************************************/ + +/*MPI v2.0 FWUpload Request message */ +typedef struct _MPI2_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST, + Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t; + +#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00) +#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01) +#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) +#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) +#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D) +#define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E) +#define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F) +#define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10) +#define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11) +#define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12) +#define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13) +#define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14) + + +/*MPI v2.0 FWUpload TransactionContext Element */ +typedef struct _MPI2_FW_UPLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE, + Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t; + +/*MPI v2.5 FWUpload Request message */ +typedef struct _MPI25_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST, + Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t; + +/*FWUpload Reply message */ +typedef struct _MPI2_FW_UPLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ActualImageSize; /*0x14 */ +} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY, + Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t; + + +/**************************************************************************** +* PowerManagementControl message +****************************************************************************/ + +/*PowerManagementControl Request message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Parameter1; /*0x0C */ + U8 Parameter2; /*0x0D */ + U8 Parameter3; /*0x0E */ + U8 Parameter4; /*0x0F */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, + Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t; + +/*defines for the Feature field */ +#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) +#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) +#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */ +#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) +#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ +/*Parameter1 contains a PHY number */ +/*Parameter2 indicates power condition action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01) +#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02) +#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03) +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION + * Feature */ +/*Parameter1 contains SAS port width modulation group number */ +/*Parameter2 indicates IOC action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01) +#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02) +#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03) +/*Parameter3 indicates desired modulation level using these defines */ +#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00) +#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01) +#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02) +#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03) +/*Parameter4 is reserved */ + +/*this next set (_PCIE_LINK) is obsolete */ +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ +/*Parameter1 indicates desired PCIe link speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */ +/*Parameter2 indicates desired PCIe link width using these defines */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */ +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ +/*Parameter1 indicates desired IOC hardware clock speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01) +#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02) +#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08) +/*Parameter2, Parameter3, and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/ +/*Parameter1 indicates host action regarding global power management mode */ +#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01) +#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02) +#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03) +/*Parameter2 indicates the requested global power management mode */ +#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01) +#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08) +#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40) +/*Parameter3 and Parameter4 are reserved */ + +/*PowerManagementControl Reply message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY, + Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t; + +/**************************************************************************** +* IO Unit Control messages (MPI v2.6 and later only.) +****************************************************************************/ + +/* IO Unit Control Request Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REQUEST { + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U8 PhyNum; /* 0x0E */ + U8 PrimFlags; /* 0x0F */ + U32 Primitive; /* 0x10 */ + U8 LookupMethod; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 SlotNumber; /* 0x16 */ + U64 LookupAddress; /* 0x18 */ + U32 IOCParameterValue; /* 0x20 */ + U32 Reserved7; /* 0x24 */ + U32 Reserved8; /* 0x28 */ +} MPI26_IOUNIT_CONTROL_REQUEST, + *PTR_MPI26_IOUNIT_CONTROL_REQUEST, + Mpi26IoUnitControlRequest_t, + *pMpi26IoUnitControlRequest_t; + +/* values for the Operation field */ +#define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) +#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) +#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09) +#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) +#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) +#define MPI26_CTRL_OP_LOOKUP_MAPPING (0x0E) +#define MPI26_CTRL_OP_SET_IOC_PARAMETER (0x0F) +#define MPI26_CTRL_OP_ENABLE_FP_DEVICE (0x10) +#define MPI26_CTRL_OP_DISABLE_FP_DEVICE (0x11) +#define MPI26_CTRL_OP_ENABLE_FP_ALL (0x12) +#define MPI26_CTRL_OP_DISABLE_FP_ALL (0x13) +#define MPI26_CTRL_OP_DEV_ENABLE_NCQ (0x14) +#define MPI26_CTRL_OP_DEV_DISABLE_NCQ (0x15) +#define MPI26_CTRL_OP_SHUTDOWN (0x16) +#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17) +#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18) +#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19) +#define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A) +#define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B) +#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/* values for the PrimFlags field */ +#define MPI26_CTRL_PRIMFLAGS_SINGLE (0x08) +#define MPI26_CTRL_PRIMFLAGS_TRIPLE (0x02) +#define MPI26_CTRL_PRIMFLAGS_REDUNDANT (0x01) + +/* values for the LookupMethod field */ +#define MPI26_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01) +#define MPI26_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02) +#define MPI26_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + + +/* IO Unit Control Reply Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REPLY { + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI26_IOUNIT_CONTROL_REPLY, + *PTR_MPI26_IOUNIT_CONTROL_REPLY, + Mpi26IoUnitControlReply_t, + *pMpi26IoUnitControlReply_t; + + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h new file mode 100644 index 000000000..bb7b79cfa --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h @@ -0,0 +1,113 @@ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_pci.h + * Title: MPI PCIe Attached Devices structures and definitions. + * Creation Date: October 9, 2012 + * + * mpi2_pci.h Version: 02.00.04 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 03-16-15 02.00.00 Initial version. + * 02-17-16 02.00.01 Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to + * NVME Encapsulated Request. + * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req + * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI + * Shortten some defines to be compatible with DOS + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_PCI_H +#define MPI2_PCI_H + + +/* + *Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event + *data and PCIe Configuration pages. + */ +#define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010) + +#define MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE (0x0000000F) +#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000) +#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001) +#define MPI26_PCIE_DEVINFO_NVME (0x00000003) +#define MPI26_PCIE_DEVINFO_SCSI (0x00000004) + +/**************************************************************************** +* NVMe Encapsulated message +****************************************************************************/ + +/*NVME Encapsulated Request Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 EncapsulatedCommandLength; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ + U64 ErrorResponseBaseAddress; /*0x10 */ + U16 ErrorResponseAllocationLength; /*0x18 */ + U16 Flags; /*0x1A */ + U32 DataLength; /*0x1C */ + U8 NVMe_Command[4]; /*0x20 */ + +} MPI26_NVME_ENCAPSULATED_REQUEST, *PTR_MPI26_NVME_ENCAPSULATED_REQUEST, + Mpi26NVMeEncapsulatedRequest_t, *pMpi26NVMeEncapsulatedRequest_t; + +/*defines for the Flags field */ +#define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020) +/*Submission Queue Type*/ +#define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010) +#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) +#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) +/*Error Response Address Space */ +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008) +/* Data Direction*/ +#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003) +#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000) +#define MPI26_NVME_FLAGS_WRITE (0x0001) +#define MPI26_NVME_FLAGS_READ (0x0002) +#define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003) + + +/*NVMe Encapuslated Reply Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 EncapsulatedCommandLength; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 ErrorResponseCount; /*0x14 */ + U16 Reserved4; /*0x16 */ +} MPI26_NVME_ENCAPSULATED_ERROR_REPLY, + *PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY, + Mpi26NVMeEncapsulatedErrorReply_t, + *pMpi26NVMeEncapsulatedErrorReply_t; + + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h new file mode 100644 index 000000000..b770eb516 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_raid.h + * Title: MPI Integrated RAID messages and structures + * Creation Date: April 26, 2007 + * + * mpi2_raid.h Version: 02.00.11 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * 11-18-14 02.00.11 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_RAID_H +#define MPI2_RAID_H + +/***************************************************************************** +* +* Integrated RAID Messages +* +*****************************************************************************/ + +/**************************************************************************** +* RAID Action messages +****************************************************************************/ + +/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ +#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) + +/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ +#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) +#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ +#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001) + +/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ +typedef struct _MPI2_RAID_ACTION_RATE_DATA { + U8 RateToChange; /*0x00 */ + U8 RateOrMode; /*0x01 */ + U16 DataScrubDuration; /*0x02 */ +} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA, + Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t; + +#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00) +#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01) +#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02) + +/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_START_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, + Mpi2RaidActionStartRaidFunction_t, + *pMpi2RaidActionStartRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_START_NEW (0x00) +#define MPI2_RAID_ACTION_START_RESUME (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + Mpi2RaidActionStopRaidFunction_t, + *pMpi2RaidActionStopRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_STOP_ABORT (0x00) +#define MPI2_RAID_ACTION_STOP_PAUSE (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ +typedef struct _MPI2_RAID_ACTION_HOT_SPARE { + U8 HotSparePool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 DevHandle; /*0x02 */ +} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE, + Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t; + +/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ +typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE { + U8 Flags; /*0x00 */ + U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_FW_UPDATE_MODE, + *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, + Mpi2RaidActionFwUpdateMode_t, + *pMpi2RaidActionFwUpdateMode_t; + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ +#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00) +#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01) + +typedef union _MPI2_RAID_ACTION_DATA { + U32 Word; + MPI2_RAID_ACTION_RATE_DATA Rates; + MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + MPI2_RAID_ACTION_HOT_SPARE HotSpare; + MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA, + Mpi2RaidActionData_t, *pMpi2RaidActionData_t; + +/*RAID Action Request Message */ +typedef struct _MPI2_RAID_ACTION_REQUEST { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ + MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */ + MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */ +} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST, + Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t; + +/*RAID Action request Action values */ + +#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01) +#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02) +#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03) +#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04) +#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05) +#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) +#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B) +#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F) +#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11) +#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15) +#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17) +#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18) +#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19) +#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C) +#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D) +#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E) +#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20) +#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) +#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) +#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23) +#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24) +#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*RAID Volume Creation Structure */ + +/* + *The following define can be customized for the targeted product. + */ +#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS +#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1) +#endif + +typedef struct _MPI2_RAID_VOLUME_PHYSDISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U16 PhysDiskDevHandle; /*0x02 */ +} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK, + Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT { + U8 NumPhysDisks; /*0x00 */ + U8 VolumeType; /*0x01 */ + U16 Reserved1; /*0x02 */ + U32 VolumeCreationFlags; /*0x04 */ + U32 VolumeSettings; /*0x08 */ + U8 Reserved2; /*0x0C */ + U8 ResyncRate; /*0x0D */ + U16 DataScrubDuration; /*0x0E */ + U64 VolumeMaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U8 Name[16]; /*0x1C */ + MPI2_RAID_VOLUME_PHYSDISK + PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */ +} MPI2_RAID_VOLUME_CREATION_STRUCT, + *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, + Mpi2RaidVolumeCreationStruct_t, + *pMpi2RaidVolumeCreationStruct_t; + +/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ + +/*defines for the VolumeCreationFlags field */ +#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) +#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) +#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) +#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) +/*The following is an obsolete define. + *It must be shifted left 24 bits in order to set the proper bit. + */ +#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) + +/*RAID Online Capacity Expansion Structure */ + +typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION { + U32 Flags; /*0x00 */ + U16 DevHandle0; /*0x04 */ + U16 Reserved1; /*0x06 */ + U16 DevHandle1; /*0x08 */ + U16 Reserved2; /*0x0A */ +} MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + Mpi2RaidOnlineCapacityExpansion_t, + *pMpi2RaidOnlineCapacityExpansion_t; + +/*RAID Compatibility Input Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT { + U16 SourceDevHandle; /*0x00 */ + U16 CandidateDevHandle; /*0x02 */ + U32 Flags; /*0x04 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ +} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + Mpi2RaidCompatibilityInputStruct_t, + *pMpi2RaidCompatibilityInputStruct_t; + +/*defines for RAID Compatibility Structure Flags field */ +#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002) +#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001) + +/*RAID Volume Indicator Structure */ + +typedef struct _MPI2_RAID_VOL_INDICATOR { + U64 TotalBlocks; /*0x00 */ + U64 BlocksRemaining; /*0x08 */ + U32 Flags; /*0x10 */ + U32 ElapsedSeconds; /* 0x14 */ +} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR, + Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t; + +/*defines for RAID Volume Indicator Flags field */ +#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) +#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) +#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) +#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) +#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) +#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) +#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) + +/*RAID Compatibility Result Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT { + U8 State; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 GenericAttributes; /*0x04 */ + U32 OEMSpecificAttributes; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ +} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + Mpi2RaidCompatibilityResultStruct_t, + *pMpi2RaidCompatibilityResultStruct_t; + +/*defines for RAID Compatibility Result Structure State field */ +#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00) +#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01) + +/*defines for RAID Compatibility Result Structure GenericAttributes field */ +#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010) + +#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C) +#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008) +#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004) + +#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003) +#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002) +#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001) + +/*RAID Action Reply ActionData union */ +typedef union _MPI2_RAID_ACTION_REPLY_DATA { + U32 Word[6]; + MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA, + Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t; + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*RAID Action Reply Message */ +typedef struct _MPI2_RAID_ACTION_REPLY { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */ +} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY, + Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h new file mode 100644 index 000000000..16c922a8a --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_sas.h + * Title: MPI Serial Attached SCSI structures and definitions + * Creation Date: February 9, 2007 + * + * mpi2_sas.h Version: 02.00.10 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. + * 11-18-14 02.00.09 Updated copyright information. + * 03-16-15 02.00.10 Updated for MPI v2.6. + * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_SAS_H +#define MPI2_SAS_H + +/* + *Values for SASStatus. + */ +#define MPI2_SASSTATUS_SUCCESS (0x00) +#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01) +#define MPI2_SASSTATUS_INVALID_FRAME (0x02) +#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03) +#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04) +#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05) +#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06) +#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07) +#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08) +#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09) +#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A) +#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B) +#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C) +#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D) +#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E) +#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F) +#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10) +#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11) +#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12) +#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) +#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) + +/* + *Values for the SAS DeviceInfo field used in SAS Device Status Change Event + *data and SAS Configuration pages. + */ +#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000) +#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) +#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +/***************************************************************************** +* +* SAS Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SMP Passthrough messages +****************************************************************************/ + +/*SMP Passthrough Request Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 RequestDataLength; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U64 SASAddress; /*0x10 */ + U32 Reserved3; /*0x18 */ + U32 Reserved4; /*0x1C */ + MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */ +} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST, + Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SMP Passthrough Reply Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REPLY { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ResponseDataLength; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 Reserved3; /*0x14 */ + U8 ResponseData[4]; /*0x18 */ +} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY, + Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SATA Passthrough messages +****************************************************************************/ + +typedef union _MPI2_SATA_PT_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */ + MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */ + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */ + MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */ +} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION, + Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t; + +/*SATA Passthrough Request Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 DataLength; /*0x18 */ + U8 CommandFIS[20]; /*0x1C */ + MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/ +} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST, + Mpi2SataPassthroughRequest_t, + *pMpi2SataPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) +#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040) +#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) +#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) +#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) +#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) +#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SATA Passthrough Reply Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 StatusFIS[20]; /*0x14 */ + U32 StatusControlRegisters; /*0x28 */ + U32 TransferCount; /*0x2C */ +} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY, + Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t; + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SAS IO Unit Control messages +* (MPI v2.5 and earlier only. +* Replaced by IO Unit Control messages in MPI v2.6 and later.) +****************************************************************************/ + +/*SAS IO Unit Control Request Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U8 PhyNum; /*0x0E */ + U8 PrimFlags; /*0x0F */ + U32 Primitive; /*0x10 */ + U8 LookupMethod; /*0x14 */ + U8 Reserved5; /*0x15 */ + U16 SlotNumber; /*0x16 */ + U64 LookupAddress; /*0x18 */ + U32 IOCParameterValue; /*0x20 */ + U32 Reserved7; /*0x24 */ + U32 Reserved8; /*0x28 */ +} MPI2_SAS_IOUNIT_CONTROL_REQUEST, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, + Mpi2SasIoUnitControlRequest_t, + *pMpi2SasIoUnitControlRequest_t; + +/*values for the Operation field */ +#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI2_SAS_OP_PHY_LINK_RESET (0x06) +#define MPI2_SAS_OP_PHY_HARD_RESET (0x07) +#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) +#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */ +#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) +#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) +#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) +#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10) +#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11) +#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12) +#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13) +#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) +#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) +#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/*values for the PrimFlags field */ +#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08) +#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02) +#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01) + +/*values for the LookupMethod field */ +#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01) +#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02) +#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + +/*SAS IO Unit Control Reply Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SAS_IOUNIT_CONTROL_REPLY, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, + Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h new file mode 100644 index 000000000..17ef7f63b --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -0,0 +1,565 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_tool.h + * Title: MPI diagnostic tool structures and definitions + * Creation Date: March 26, 2007 + * + * mpi2_tool.h Version: 02.00.16 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * 11-18-14 02.00.13 Updated copyright information. + * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean + * Tool Request Message. + * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. + * Added option for DeviceInfo field in ISTWI tool. + * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TOOL_H +#define MPI2_TOOL_H + +/***************************************************************************** +* +* Toolbox Messages +* +*****************************************************************************/ + +/*defines for the Tools */ +#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) +#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) +#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) +#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) +#define MPI2_TOOLBOX_BEACON_TOOL (0x05) +#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) +#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) +#define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN (0x08) + +/**************************************************************************** +* Toolbox reply +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY, + Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t; + +/**************************************************************************** +* Toolbox Clean Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Flags; /*0x0C */ +} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST, + Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) +#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) +#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) +#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) +#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) +#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) +#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) +#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) +#define MPI2_TOOLBOX_CLEAN_SBR (0x00800000) +#define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000) +#define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000) +#define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000) +#define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000) +#define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000) +#define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000) +#define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0) +#define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010) +#define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008) +#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) +#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) +#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) + +/**************************************************************************** +* Toolbox Memory Move request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */ +} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, + Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t; + +/**************************************************************************** +* Toolbox Diagnostic Data Upload request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 Flags; /*0x10 */ + U32 DataLength; /*0x14 */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */ +} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + Mpi2ToolboxDiagDataUploadRequest_t, + *pMpi2ToolboxDiagDataUploadRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER { + U32 DiagDataLength; /*00h */ + U8 FormatCode; /*04h */ + U8 Reserved1; /*05h */ + U16 Reserved2; /*06h */ +} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, + Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t; + +/**************************************************************************** +* Toolbox ISTWI Read Write Tool +****************************************************************************/ + +/*Toolbox ISTWI Read Write Tool request message */ +typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 SGLFlags; /*0x16 */ + U8 Flags; /*0x17 */ + U16 TxDataLength; /*0x18 */ + U16 RxDataLength; /*0x1A */ + U32 Reserved8; /*0x1C */ + U32 Reserved9; /*0x20 */ + U32 Reserved10; /*0x24 */ + U32 Reserved11; /*0x28 */ + U32 Reserved12; /*0x2C */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */ +} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + Mpi2ToolboxIstwiReadWriteRequest_t, + *pMpi2ToolboxIstwiReadWriteRequest_t; + +/*values for the Action field */ +#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01) +#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02) +#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03) +#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10) +#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) +#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*values for the Flags field */ +#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) +#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) + +/*MPI26 TOOLBOX Request MsgFlags defines */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01) +/*Request uses Man Page 43 device index addressing */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00) +/*Request uses Man Page 43 device info struct addressing */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01) + +/*Toolbox ISTWI Read Write Tool reply message */ +typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 IstwiStatus; /*0x16 */ + U8 Reserved6; /*0x17 */ + U16 TxDataCount; /*0x18 */ + U16 RxDataCount; /*0x1A */ +} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY, + Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t; + +/**************************************************************************** +* Toolbox Beacon Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_BEACON_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Reserved5; /*0x0C */ + U8 PhysicalPort; /*0x0D */ + U8 Reserved6; /*0x0E */ + U8 Flags; /*0x0F */ +} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST, + Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00) +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) + +/**************************************************************************** +* Toolbox Diagnostic CLI Tool +****************************************************************************/ + +#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) + +/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI2_MPI_SGE_IO_UNION SGL; /*0x70 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi2ToolboxDiagnosticCliRequest_t, + *pMpi2ToolboxDiagnosticCliRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI25_SGE_IO_UNION SGL; /* 0x70 */ +} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi25ToolboxDiagnosticCliRequest_t, + *pMpi25ToolboxDiagnosticCliRequest_t; + +/*Toolbox Diagnostic CLI Tool reply message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ReturnedDataLength; /*0x14 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY, + *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, + Mpi2ToolboxDiagnosticCliReply_t, + *pMpi2ToolboxDiagnosticCliReply_t; + + +/**************************************************************************** +* Toolbox Console Text Display Tool +****************************************************************************/ + +/* Toolbox Console Text Display Tool request message */ +typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST { + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Console; /* 0x0C */ + U8 Flags; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U8 TextToDisplay[4]; /* 0x10 */ +} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +Mpi2ToolboxTextDisplayRequest_t, +*pMpi2ToolboxTextDisplayRequest_t; + +/* defines for the Console field */ +#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) +#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00) +#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10) +#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20) + +#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F) + +/* defines for the Flags field */ +#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) + + +/*************************************************************************** + * Toolbox Backend Lane Margining Tool + *************************************************************************** + */ + +/*Toolbox Backend Lane Margining Tool request message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Command; /*0x0C */ + U8 SwitchPort; /*0x0D */ + U16 DevHandle; /*0x0E */ + U8 RegisterOffset; /*0x10 */ + U8 Reserved5; /*0x11 */ + U16 DataLength; /*0x12 */ + MPI25_SGE_IO_UNION SGL; /*0x14 */ +} MPI26_TOOLBOX_LANE_MARGINING_REQUEST, + *PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST, + Mpi26ToolboxLaneMarginingRequest_t, + *pMpi2ToolboxLaneMarginingRequest_t; + +/* defines for the Command field */ +#define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE (0x01) +#define MPI26_TOOL_MARGIN_COMMAND_READ_REGISTER_DATA (0x02) +#define MPI26_TOOL_MARGIN_COMMAND_WRITE_REGISTER_DATA (0x03) +#define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE (0x04) + + +/*Toolbox Backend Lane Margining Tool reply message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 ReturnedDataLength; /*0x14 */ + U16 Reserved6; /*0x16 */ +} MPI26_TOOLBOX_LANE_MARGINING_REPLY, + *PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY, + Mpi26ToolboxLaneMarginingReply_t, + *pMpi26ToolboxLaneMarginingReply_t; + + +/***************************************************************************** +* +* Diagnostic Buffer Messages +* +*****************************************************************************/ + +/**************************************************************************** +* Diagnostic Buffer Post request +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U64 BufferAddress; /*0x0C */ + U32 BufferLength; /*0x14 */ + U32 Reserved5; /*0x18 */ + U32 Reserved6; /*0x1C */ + U32 Flags; /*0x20 */ + U32 ProductSpecific[23]; /*0x24 */ +} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST, + Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t; + +/*values for the ExtendedType field */ +#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) + +/*values for the BufferType field */ +#define MPI2_DIAG_BUF_TYPE_TRACE (0x00) +#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) +#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) +/*count of the number of buffer types */ +#define MPI2_DIAG_BUF_TYPE_COUNT (0x03) + +/*values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) +#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REPLY { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferLength; /*0x14 */ +} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY, + Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t; + +/**************************************************************************** +* Diagnostic Release request +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REQUEST { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST, + Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t; + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REPLY { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY, + Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h new file mode 100644 index 000000000..36494439a --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2014 Avago Technologies. All rights reserved. + * + * + * Name: mpi2_type.h + * Title: MPI basic type definitions + * Creation Date: August 16, 2006 + * + * mpi2_type.h Version: 02.00.01 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TYPE_H +#define MPI2_TYPE_H + +/******************************************************************************* + * Define * if it hasn't already been defined. By default + * * is defined to be a near pointer. MPI2_POINTER can be defined as + * a far pointer by defining * as "far *" before this header file is + * included. + */ + +/* the basic types may have already been included by mpi_type.h */ +#ifndef MPI_TYPE_H +/***************************************************************************** +* +* Basic Types +* +*****************************************************************************/ + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __attribute__ ((aligned(4))); + +/***************************************************************************** +* +* Pointer Types +* +*****************************************************************************/ + +typedef U8 *PU8; +typedef U16 *PU16; +typedef U32 *PU32; +typedef U64 *PU64; + +#endif + +#endif diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c new file mode 100644 index 000000000..a75f670bf --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -0,0 +1,8959 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* To get host page size per arch */ + + +#include "mpt3sas_base.h" + +static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; + + +#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ + + /* maximum controller queue depth */ +#define MAX_HBA_QUEUE_DEPTH 30000 +#define MAX_CHAIN_DEPTH 100000 +static int max_queue_depth = -1; +module_param(max_queue_depth, int, 0444); +MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); + +static int max_sgl_entries = -1; +module_param(max_sgl_entries, int, 0444); +MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); + +static int msix_disable = -1; +module_param(msix_disable, int, 0444); +MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); + +static int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, 0444); +MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); + +static int max_msix_vectors = -1; +module_param(max_msix_vectors, int, 0444); +MODULE_PARM_DESC(max_msix_vectors, + " max msix vectors"); + +static int irqpoll_weight = -1; +module_param(irqpoll_weight, int, 0444); +MODULE_PARM_DESC(irqpoll_weight, + "irq poll weight (default= one fourth of HBA queue depth)"); + +static int mpt3sas_fwfault_debug; +MODULE_PARM_DESC(mpt3sas_fwfault_debug, + " enable detection of firmware fault and halt firmware - (default=0)"); + +static int perf_mode = -1; +module_param(perf_mode, int, 0444); +MODULE_PARM_DESC(perf_mode, + "Performance mode (only for Aero/Sea Generation), options:\n\t\t" + "0 - balanced: high iops mode is enabled &\n\t\t" + "interrupt coalescing is enabled only on high iops queues,\n\t\t" + "1 - iops: high iops mode is disabled &\n\t\t" + "interrupt coalescing is enabled on all queues,\n\t\t" + "2 - latency: high iops mode is disabled &\n\t\t" + "interrupt coalescing is enabled on all queues with timeout value 0xA,\n" + "\t\tdefault - default perf_mode is 'balanced'" + ); + +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1. &\n\t\t" + "when poll_queues are enabled then &\n\t\t" + "perf_mode is set to latency mode. &\n\t\t" + ); + +enum mpt3sas_perf_mode { + MPT_PERF_MODE_DEFAULT = -1, + MPT_PERF_MODE_BALANCED = 0, + MPT_PERF_MODE_IOPS = 1, + MPT_PERF_MODE_LATENCY = 2, +}; + +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, + u32 ioc_state, int timeout); +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); +static void +_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc); + +static u32 +_base_readl_ext_retry(const void __iomem *addr); + +/** + * mpt3sas_base_check_cmd_timeout - Function + * to check timeout and command termination due + * to Host reset. + * + * @ioc: per adapter object. + * @status: Status of issued command. + * @mpi_request:mf request pointer. + * @sz: size of buffer. + * + * Return: 1/0 Reset to be done or Not + */ +u8 +mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, + u8 status, void *mpi_request, int sz) +{ + u8 issue_reset = 0; + + if (!(status & MPT3_CMD_RESET)) + issue_reset = 1; + + ioc_err(ioc, "Command %s\n", + issue_reset == 0 ? "terminated due to Host Reset" : "Timeout"); + _debug_dump_mf(mpi_request, sz); + + return issue_reset; +} + +/** + * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. + * @val: ? + * @kp: ? + * + * Return: ? + */ +static int +_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + /* global ioc spinlock to protect controller list on list operations */ + pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->fwfault_debug = mpt3sas_fwfault_debug; + spin_unlock(&gioc_lock); + return 0; +} +module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, + param_get_int, &mpt3sas_fwfault_debug, 0644); + +/** + * _base_readl_aero - retry readl for max three times. + * @addr: MPT Fusion system interface register address + * + * Retry the readl() for max three times if it gets zero value + * while reading the system interface register. + */ +static inline u32 +_base_readl_aero(const void __iomem *addr) +{ + u32 i = 0, ret_val; + + do { + ret_val = readl(addr); + i++; + } while (ret_val == 0 && i < 3); + + return ret_val; +} + +static u32 +_base_readl_ext_retry(const void __iomem *addr) +{ + u32 i, ret_val; + + for (i = 0 ; i < 30 ; i++) { + ret_val = readl(addr); + if (ret_val != 0) + break; + } + + return ret_val; +} + +static inline u32 +_base_readl(const void __iomem *addr) +{ + return readl(addr); +} + +/** + * _base_clone_reply_to_sys_mem - copies reply to reply free iomem + * in BAR0 space. + * + * @ioc: per adapter object + * @reply: reply message frame(lower 32bit addr) + * @index: System request message index. + */ +static void +_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, + u32 index) +{ + /* + * 256 is offset within sys register. + * 256 offset MPI frame starts. Max MPI frame supported is 32. + * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts + */ + u16 cmd_credit = ioc->facts.RequestCredit + 1; + void __iomem *reply_free_iomem = (void __iomem *)ioc->chip + + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); + + writel(reply, reply_free_iomem); +} + +/** + * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames + * to system/BAR0 region. + * + * @dst_iomem: Pointer to the destination location in BAR0 space. + * @src: Pointer to the Source data. + * @size: Size of data to be copied. + */ +static void +_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size) +{ + int i; + u32 *src_virt_mem = (u32 *)src; + + for (i = 0; i < size/4; i++) + writel((u32)src_virt_mem[i], + (void __iomem *)dst_iomem + (i * 4)); +} + +/** + * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region + * + * @dst_iomem: Pointer to the destination location in BAR0 space. + * @src: Pointer to the Source data. + * @size: Size of data to be copied. + */ +static void +_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) +{ + int i; + u32 *src_virt_mem = (u32 *)(src); + + for (i = 0; i < size/4; i++) + writel((u32)src_virt_mem[i], + (void __iomem *)dst_iomem + (i * 4)); +} + +/** + * _base_get_chain - Calculates and Returns virtual chain address + * for the provided smid in BAR0 space. + * + * @ioc: per adapter object + * @smid: system request message index + * @sge_chain_count: Scatter gather chain count. + * + * Return: the chain address. + */ +static inline void __iomem* +_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 sge_chain_count) +{ + void __iomem *base_chain, *chain_virt; + u16 cmd_credit = ioc->facts.RequestCredit + 1; + + base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + + REPLY_FREE_POOL_SIZE; + chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth * + ioc->request_sz) + (sge_chain_count * ioc->request_sz); + return chain_virt; +} + +/** + * _base_get_chain_phys - Calculates and Returns physical address + * in BAR0 for scatter gather chains, for + * the provided smid. + * + * @ioc: per adapter object + * @smid: system request message index + * @sge_chain_count: Scatter gather chain count. + * + * Return: Physical chain address. + */ +static inline phys_addr_t +_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 sge_chain_count) +{ + phys_addr_t base_chain_phys, chain_phys; + u16 cmd_credit = ioc->facts.RequestCredit + 1; + + base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + + REPLY_FREE_POOL_SIZE; + chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth * + ioc->request_sz) + (sge_chain_count * ioc->request_sz); + return chain_phys; +} + +/** + * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host + * buffer address for the provided smid. + * (Each smid can have 64K starts from 17024) + * + * @ioc: per adapter object + * @smid: system request message index + * + * Return: Pointer to buffer location in BAR0. + */ + +static void __iomem * +_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + u16 cmd_credit = ioc->facts.RequestCredit + 1; + // Added extra 1 to reach end of chain. + void __iomem *chain_end = _base_get_chain(ioc, + cmd_credit + 1, + ioc->facts.MaxChainDepth); + return chain_end + (smid * 64 * 1024); +} + +/** + * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped + * Host buffer Physical address for the provided smid. + * (Each smid can have 64K starts from 17024) + * + * @ioc: per adapter object + * @smid: system request message index + * + * Return: Pointer to buffer location in BAR0. + */ +static phys_addr_t +_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + u16 cmd_credit = ioc->facts.RequestCredit + 1; + phys_addr_t chain_end_phys = _base_get_chain_phys(ioc, + cmd_credit + 1, + ioc->facts.MaxChainDepth); + return chain_end_phys + (smid * 64 * 1024); +} + +/** + * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain + * lookup list and Provides chain_buffer + * address for the matching dma address. + * (Each smid can have 64K starts from 17024) + * + * @ioc: per adapter object + * @chain_buffer_dma: Chain buffer dma address. + * + * Return: Pointer to chain buffer. Or Null on Failure. + */ +static void * +_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, + dma_addr_t chain_buffer_dma) +{ + u16 index, j; + struct chain_tracker *ct; + + for (index = 0; index < ioc->scsiio_depth; index++) { + for (j = 0; j < ioc->chains_needed_per_io; j++) { + ct = &ioc->chain_lookup[index].chains_per_smid[j]; + if (ct && ct->chain_buffer_dma == chain_buffer_dma) + return ct->chain_buffer; + } + } + ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n"); + return NULL; +} + +/** + * _clone_sg_entries - MPI EP's scsiio and config requests + * are handled here. Base function for + * double buffering, before submitting + * the requests. + * + * @ioc: per adapter object. + * @mpi_request: mf request pointer. + * @smid: system request message index. + */ +static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, + void *mpi_request, u16 smid) +{ + Mpi2SGESimple32_t *sgel, *sgel_next; + u32 sgl_flags, sge_chain_count = 0; + bool is_write = false; + u16 i = 0; + void __iomem *buffer_iomem; + phys_addr_t buffer_iomem_phys; + void __iomem *buff_ptr; + phys_addr_t buff_ptr_phys; + void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO]; + void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO]; + phys_addr_t dst_addr_phys; + MPI2RequestHeader_t *request_hdr; + struct scsi_cmnd *scmd; + struct scatterlist *sg_scmd = NULL; + int is_scsiio_req = 0; + + request_hdr = (MPI2RequestHeader_t *) mpi_request; + + if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) { + Mpi25SCSIIORequest_t *scsiio_request = + (Mpi25SCSIIORequest_t *)mpi_request; + sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL; + is_scsiio_req = 1; + } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { + Mpi2ConfigRequest_t *config_req = + (Mpi2ConfigRequest_t *)mpi_request; + sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE; + } else + return; + + /* From smid we can get scsi_cmd, once we have sg_scmd, + * we just need to get sg_virt and sg_next to get virtual + * address associated with sgel->Address. + */ + + if (is_scsiio_req) { + /* Get scsi_cmd using smid */ + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (scmd == NULL) { + ioc_err(ioc, "scmd is NULL\n"); + return; + } + + /* Get sg_scmd from scmd provided */ + sg_scmd = scsi_sglist(scmd); + } + + /* + * 0 - 255 System register + * 256 - 4352 MPI Frame. (This is based on maxCredit 32) + * 4352 - 4864 Reply_free pool (512 byte is reserved + * considering maxCredit 32. Reply need extra + * room, for mCPU case kept four times of + * maxCredit). + * 4864 - 17152 SGE chain element. (32cmd * 3 chain of + * 128 byte size = 12288) + * 17152 - x Host buffer mapped with smid. + * (Each smid can have 64K Max IO.) + * BAR0+Last 1K MSIX Addr and Data + * Total size in use 2113664 bytes of 4MB BAR0 + */ + + buffer_iomem = _base_get_buffer_bar0(ioc, smid); + buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid); + + buff_ptr = buffer_iomem; + buff_ptr_phys = buffer_iomem_phys; + WARN_ON(buff_ptr_phys > U32_MAX); + + if (le32_to_cpu(sgel->FlagsLength) & + (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT)) + is_write = true; + + for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { + + sgl_flags = + (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT); + + switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) { + case MPI2_SGE_FLAGS_CHAIN_ELEMENT: + /* + * Helper function which on passing + * chain_buffer_dma returns chain_buffer. Get + * the virtual address for sgel->Address + */ + sgel_next = + _base_get_chain_buffer_dma_to_chain_buffer(ioc, + le32_to_cpu(sgel->Address)); + if (sgel_next == NULL) + return; + /* + * This is coping 128 byte chain + * frame (not a host buffer) + */ + dst_chain_addr[sge_chain_count] = + _base_get_chain(ioc, + smid, sge_chain_count); + src_chain_addr[sge_chain_count] = + (void *) sgel_next; + dst_addr_phys = _base_get_chain_phys(ioc, + smid, sge_chain_count); + WARN_ON(dst_addr_phys > U32_MAX); + sgel->Address = + cpu_to_le32(lower_32_bits(dst_addr_phys)); + sgel = sgel_next; + sge_chain_count++; + break; + case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: + if (is_write) { + if (is_scsiio_req) { + _base_clone_to_sys_mem(buff_ptr, + sg_virt(sg_scmd), + (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff)); + /* + * FIXME: this relies on a a zero + * PCI mem_offset. + */ + sgel->Address = + cpu_to_le32((u32)buff_ptr_phys); + } else { + _base_clone_to_sys_mem(buff_ptr, + ioc->config_vaddr, + (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff)); + sgel->Address = + cpu_to_le32((u32)buff_ptr_phys); + } + } + buff_ptr += (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff); + buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff); + if ((le32_to_cpu(sgel->FlagsLength) & + (MPI2_SGE_FLAGS_END_OF_BUFFER + << MPI2_SGE_FLAGS_SHIFT))) + goto eob_clone_chain; + else { + /* + * Every single element in MPT will have + * associated sg_next. Better to sanity that + * sg_next is not NULL, but it will be a bug + * if it is null. + */ + if (is_scsiio_req) { + sg_scmd = sg_next(sg_scmd); + if (sg_scmd) + sgel++; + else + goto eob_clone_chain; + } + } + break; + } + } + +eob_clone_chain: + for (i = 0; i < sge_chain_count; i++) { + if (is_scsiio_req) + _base_clone_to_sys_mem(dst_chain_addr[i], + src_chain_addr[i], ioc->request_sz); + } +} + +/** + * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc + * @arg: input argument, used to derive ioc + * + * Return: + * 0 if controller is removed from pci subsystem. + * -1 for other case. + */ +static int mpt3sas_remove_dead_ioc_func(void *arg) +{ + struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; + struct pci_dev *pdev; + + if (!ioc) + return -1; + + pdev = ioc->pdev; + if (!pdev) + return -1; + pci_stop_and_remove_bus_device_locked(pdev); + return 0; +} + +/** + * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp. + * @ioc: Per Adapter Object + * + * Return: nothing. + */ +static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; + ktime_t current_time; + u64 TimeStamp = 0; + u8 issue_reset = 0; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "scsih_cmd in use %s\n", __func__); + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, "Failed obtaining a smid %s\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER; + mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; + current_time = ktime_get_real(); + TimeStamp = ktime_to_ms(current_time); + mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + dinitprintk(ioc, ioc_info(ioc, + "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", + TimeStamp)); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ); + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset); + goto issue_host_reset; + } + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + dinitprintk(ioc, ioc_info(ioc, + "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } +issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; +out: + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +/** + * _base_fault_reset_work - workq handling ioc fault conditions + * @work: input argument, used to derive ioc + * + * Context: sleep. + */ +static void +_base_fault_reset_work(struct work_struct *work) +{ + struct MPT3SAS_ADAPTER *ioc = + container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); + unsigned long flags; + u32 doorbell; + int rc; + struct task_struct *p; + + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { + ioc_err(ioc, "SAS host is non-operational !!!!\n"); + + /* It may be possible that EEH recovery can resolve some of + * pci bus failure issues rather removing the dead ioc function + * by considering controller is in a non-operational state. So + * here priority is given to the EEH recovery. If it doesn't + * not resolve this issue, mpt3sas driver will consider this + * controller to non-operational state and remove the dead ioc + * function. + */ + if (ioc->non_operational_loop++ < 5) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + + /* + * Call _scsih_flush_pending_cmds callback so that we flush all + * pending commands back to OS. This call is required to avoid + * deadlock at block layer. Dead IOC will fail to do diag reset, + * and this call is safe since dead ioc will never return any + * command back from HW. + */ + mpt3sas_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + /* + * Set remove_host flag early since kernel thread will + * take some time to execute. + */ + ioc->remove_host = 1; + /*Remove the Dead Host */ + p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, + "%s_dead_ioc_%d", ioc->driver_name, ioc->id); + if (IS_ERR(p)) + ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", + __func__); + else + ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n", + __func__); + return; /* don't rearm timer */ + } + + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + timeout /= (FAULT_POLLING_INTERVAL/1000); + + if (ioc->ioc_coredump_loop == 0) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + /* do not accept any IOs and disable the interrupts */ + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); + _base_clear_outstanding_commands(ioc); + } + + ioc_info(ioc, "%s: CoreDump loop %d.", + __func__, ioc->ioc_coredump_loop); + + /* Wait until CoreDump completes or times out */ + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + goto rearm_timer; + } + } + + if (ioc->ioc_coredump_loop) { + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP) + ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + else + ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; + } + ioc->non_operational_loop = 0; + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc_warn(ioc, "%s: hard reset: %s\n", + __func__, rc == 0 ? "success" : "failed"); + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) + mpt3sas_print_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + if (rc && (doorbell & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_OPERATIONAL) + return; /* don't rearm timer */ + } + ioc->ioc_coredump_loop = 0; + if (ioc->time_sync_interval && + ++ioc->timestamp_update_count >= ioc->time_sync_interval) { + ioc->timestamp_update_count = 0; + _base_sync_drv_fw_timestamp(ioc); + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + rearm_timer: + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_start_watchdog - start the fault_reset_work_q + * @ioc: per adapter object + * + * Context: sleep. + */ +void +mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + + ioc->timestamp_update_count = 0; + /* initialize fault polling */ + + INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", + ioc->driver_name, ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q + * @ioc: per adapter object + * + * Context: sleep. + */ +void +mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +/** + * mpt3sas_base_fault_info - verbose translation of firmware FAULT code + * @ioc: per adapter object + * @fault_code: fault code + */ +void +mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) +{ + ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code); +} + +/** + * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state + * @ioc: per adapter object + * @fault_code: fault code + * + * Return: nothing. + */ +void +mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) +{ + ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code); +} + +/** + * mpt3sas_base_wait_for_coredump_completion - Wait until coredump + * completes or times out + * @ioc: per adapter object + * @caller: caller function name + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + ioc_err(ioc, + "%s: CoreDump timed out. (ioc_state=0x%x)\n", + caller, ioc_state); + else + ioc_info(ioc, + "%s: CoreDump completed. (ioc_state=0x%x)\n", + caller, ioc_state); + + return ioc_state; +} + +/** + * mpt3sas_halt_firmware - halt's mpt controller firmware + * @ioc: per adapter object + * + * For debugging timeout related issues. Writing 0xCOFFEE00 + * to the doorbell register will halt controller firmware. With + * the purpose to stop both driver and firmware, the enduser can + * obtain a ring buffer from controller UART. + */ +void +mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) +{ + u32 doorbell; + + if (!ioc->fwfault_debug) + return; + + dump_stack(); + + doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + ioc_err(ioc, "Firmware is halted due to command timeout\n"); + } + + if (ioc->fwfault_debug == 2) + for (;;) + ; + else + panic("panic in %s\n", __func__); +} + +/** + * _base_sas_ioc_info - verbose translation of the ioc status + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @request_hdr: request mf + */ +static void +_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, + MPI2RequestHeader_t *request_hdr) +{ + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + char *desc = NULL; + u16 frame_sz; + char *func_str = NULL; + + /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ + if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) + return; + + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return; + /* + * Older Firmware version doesn't support driver trigger pages. + * So, skip displaying 'config invalid type' type + * of error message. + */ + if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { + Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr; + + if ((rqst->ExtPageType == + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) && + !(ioc->logging_level & MPT_DEBUG_CONFIG)) { + return; + } + } + + switch (ioc_status) { + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc = "invalid function"; + break; + case MPI2_IOCSTATUS_BUSY: + desc = "busy"; + break; + case MPI2_IOCSTATUS_INVALID_SGL: + desc = "invalid sgl"; + break; + case MPI2_IOCSTATUS_INTERNAL_ERROR: + desc = "internal error"; + break; + case MPI2_IOCSTATUS_INVALID_VPID: + desc = "invalid vpid"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + desc = "insufficient resources"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + desc = "insufficient power"; + break; + case MPI2_IOCSTATUS_INVALID_FIELD: + desc = "invalid field"; + break; + case MPI2_IOCSTATUS_INVALID_STATE: + desc = "invalid state"; + break; + case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: + desc = "op state not supported"; + break; + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + + case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: + desc = "config invalid action"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: + desc = "config invalid type"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: + desc = "config invalid page"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: + desc = "config invalid data"; + break; + case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: + desc = "config no defaults"; + break; + case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: + desc = "config can't commit"; + break; + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + break; + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc = "eedp app tag error"; + break; + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + + case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: + desc = "target invalid io index"; + break; + case MPI2_IOCSTATUS_TARGET_ABORTED: + desc = "target aborted"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: + desc = "target no conn retryable"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: + desc = "target no connection"; + break; + case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: + desc = "target xfer count mismatch"; + break; + case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: + desc = "target data offset error"; + break; + case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: + desc = "target too much write data"; + break; + case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: + desc = "target iu too short"; + break; + case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: + desc = "target ack nak timeout"; + break; + case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: + desc = "target nak received"; + break; + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + + case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: + desc = "smp request failed"; + break; + case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: + desc = "smp data overrun"; + break; + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + + case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: + desc = "diagnostic released"; + break; + default: + break; + } + + if (!desc) + return; + + switch (request_hdr->Function) { + case MPI2_FUNCTION_CONFIG: + frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; + func_str = "config_page"; + break; + case MPI2_FUNCTION_SCSI_TASK_MGMT: + frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); + func_str = "task_mgmt"; + break; + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); + func_str = "sas_iounit_ctl"; + break; + case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: + frame_sz = sizeof(Mpi2SepRequest_t); + func_str = "enclosure"; + break; + case MPI2_FUNCTION_IOC_INIT: + frame_sz = sizeof(Mpi2IOCInitRequest_t); + func_str = "ioc_init"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + frame_sz = sizeof(Mpi2PortEnableRequest_t); + func_str = "port_enable"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; + func_str = "smp_passthru"; + break; + case MPI2_FUNCTION_NVME_ENCAPSULATED: + frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) + + ioc->sge_size; + func_str = "nvme_encapsulated"; + break; + default: + frame_sz = 32; + func_str = "unknown"; + break; + } + + ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", + desc, ioc_status, request_hdr, func_str); + + _debug_dump_mf(request_hdr, frame_sz/4); +} + +/** + * _base_display_event_data - verbose translation of firmware asyn events + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + */ +static void +_base_display_event_data(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + char *desc = NULL; + u16 event; + + if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) + return; + + event = le16_to_cpu(mpi_reply->Event); + + switch (event) { + case MPI2_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case MPI2_EVENT_STATE_CHANGE: + desc = "Status Change"; + break; + case MPI2_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case MPI2_EVENT_EVENT_CHANGE: + desc = "Event Change"; + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + desc = "Device Status Change"; + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + if (!ioc->hide_ir_msg) + desc = "IR Operation Status"; + break; + case MPI2_EVENT_SAS_DISCOVERY: + { + Mpi2EventDataSasDiscovery_t *event_data = + (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; + ioc_info(ioc, "Discovery: (%s)", + event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_cont(" discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_cont("\n"); + return; + } + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + desc = "SAS Enclosure Device Status Change"; + break; + case MPI2_EVENT_IR_VOLUME: + if (!ioc->hide_ir_msg) + desc = "IR Volume"; + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + if (!ioc->hide_ir_msg) + desc = "IR Physical Disk"; + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + if (!ioc->hide_ir_msg) + desc = "IR Configuration Change List"; + break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + if (!ioc->hide_ir_msg) + desc = "Log Entry Added"; + break; + case MPI2_EVENT_TEMP_THRESHOLD: + desc = "Temperature Threshold"; + break; + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: + desc = "Cable Event"; + break; + case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + desc = "SAS Device Discovery Error"; + break; + case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: + desc = "PCIE Device Status Change"; + break; + case MPI2_EVENT_PCIE_ENUMERATION: + { + Mpi26EventDataPCIeEnumeration_t *event_data = + (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; + ioc_info(ioc, "PCIE Enumeration: (%s)", + event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ? + "start" : "stop"); + if (event_data->EnumerationStatus) + pr_cont("enumeration_status(0x%08x)", + le32_to_cpu(event_data->EnumerationStatus)); + pr_cont("\n"); + return; + } + case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + desc = "PCIE Topology Change List"; + break; + } + + if (!desc) + return; + + ioc_info(ioc, "%s\n", desc); +} + +/** + * _base_sas_log_info - verbose translation of firmware log info + * @ioc: per adapter object + * @log_info: log info + */ +static void +_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info) +{ + union loginfo_type { + u32 loginfo; + struct { + u32 subcode:16; + u32 code:8; + u32 originator:4; + u32 bus_type:4; + } dw; + }; + union loginfo_type sas_loginfo; + char *originator_str = NULL; + + sas_loginfo.loginfo = log_info; + if (sas_loginfo.dw.bus_type != 3 /*SAS*/) + return; + + /* each nexus loss loginfo */ + if (log_info == 0x31170000) + return; + + /* eat the loginfos associated with task aborts */ + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == + 0x31140000 || log_info == 0x31130000)) + return; + + switch (sas_loginfo.dw.originator) { + case 0: + originator_str = "IOP"; + break; + case 1: + originator_str = "PL"; + break; + case 2: + if (!ioc->hide_ir_msg) + originator_str = "IR"; + else + originator_str = "WarpDrive"; + break; + } + + ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", + log_info, + originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode); +} + +/** + * _base_display_reply_info - handle reply descriptors depending on IOC Status + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame (lower 32bit addr) + */ +static void +_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + u32 loginfo = 0; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + + if ((ioc_status & MPI2_IOCSTATUS_MASK) && + (ioc->logging_level & MPT_DEBUG_REPLY)) { + _base_sas_ioc_info(ioc, mpi_reply, + mpt3sas_base_get_msg_frame(ioc, smid)); + } + + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); + _base_sas_log_info(ioc, loginfo); + } + + if (ioc_status || loginfo) { + ioc_status &= MPI2_IOCSTATUS_MASK; + mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); + } +} + +/** + * mpt3sas_base_done - base internal command completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return: + * 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) + return mpt3sas_check_for_pending_internal_cmds(ioc, smid); + + if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + ioc->base_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + } + ioc->base_cmds.status &= ~MPT3_CMD_PENDING; + + complete(&ioc->base_cmds.done); + return 1; +} + +/** + * _base_async_event - main callback handler for firmware asyn events + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return: + * 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + Mpi2EventAckRequest_t *ack_request; + u16 smid; + struct _event_ack_list *delayed_event_ack; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) + return 1; + + _base_display_event_data(ioc, mpi_reply); + + if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) + goto out; + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), + GFP_ATOMIC); + if (!delayed_event_ack) + goto out; + INIT_LIST_HEAD(&delayed_event_ack->list); + delayed_event_ack->Event = mpi_reply->Event; + delayed_event_ack->EventContext = mpi_reply->EventContext; + list_add_tail(&delayed_event_ack->list, + &ioc->delayed_event_ack_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n", + le16_to_cpu(mpi_reply->Event))); + goto out; + } + + ack_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); + ack_request->Function = MPI2_FUNCTION_EVENT_ACK; + ack_request->Event = mpi_reply->Event; + ack_request->EventContext = mpi_reply->EventContext; + ack_request->VF_ID = 0; /* TODO */ + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); + + out: + + /* scsih callback handler */ + mpt3sas_scsih_event_callback(ioc, msix_index, reply); + + /* ctl callback handler */ + mpt3sas_ctl_event_callback(ioc, msix_index, reply); + + return 1; +} + +static struct scsiio_tracker * +_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *cmd; + + if (WARN_ON(!smid) || + WARN_ON(smid >= ioc->hi_priority_smid)) + return NULL; + + cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (cmd) + return scsi_cmd_priv(cmd); + + return NULL; +} + +/** + * _base_get_cb_idx - obtain the callback index + * @ioc: per adapter object + * @smid: system request message index + * + * Return: callback index. + */ +static u8 +_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + int i; + u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; + u8 cb_idx = 0xFF; + + if (smid < ioc->hi_priority_smid) { + struct scsiio_tracker *st; + + if (smid < ctl_smid) { + st = _get_st_from_smid(ioc, smid); + if (st) + cb_idx = st->cb_idx; + } else if (smid == ctl_smid) + cb_idx = ioc->ctl_cb_idx; + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } + return cb_idx; +} + +/** + * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues + * when driver is flushing out the IOs. + * @ioc: per adapter object + * + * Pause polling on the mq poll (io uring) queues when driver is flushing + * out the IOs. Otherwise we may see the race condition of completing the same + * IO from two paths. + * + * Returns nothing. + */ +void +mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1); + + /* + * wait for current poll to complete. + */ + for (qid = 0; qid < iopoll_q_count; qid++) { + while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) { + cpu_relax(); + udelay(500); + } + } +} + +/** + * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues. + * @ioc: per adapter object + * + * Returns nothing. + */ +void +mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0); +} + +/** + * mpt3sas_base_mask_interrupts - disable interrupts + * @ioc: per adapter object + * + * Disabling ResetIRQ, Reply and Doorbell Interrupts + */ +void +mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + ioc->mask_interrupts = 1; + him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); + him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->base_readl(&ioc->chip->HostInterruptMask); +} + +/** + * mpt3sas_base_unmask_interrupts - enable interrupts + * @ioc: per adapter object + * + * Enabling only Reply Interrupts + */ +void +mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); + him_register &= ~MPI2_HIM_RIM; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->mask_interrupts = 0; +} + +union reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +static u32 base_mod64(u64 dividend, u32 divisor) +{ + u32 remainder; + + if (!divisor) + pr_err("mpt3sas: DIVISOR is zero, in div fn\n"); + remainder = do_div(dividend, divisor); + return remainder; +} + +/** + * _base_process_reply_queue - Process reply descriptors from reply + * descriptor post queue. + * @reply_q: per IRQ's reply queue object. + * + * Return: number of reply descriptors processed from reply + * descriptor queue. + */ +static int +_base_process_reply_queue(struct adapter_reply_queue *reply_q) +{ + union reply_descriptor rd; + u64 completed_cmds; + u8 request_descript_type; + u16 smid; + u8 cb_idx; + u32 reply; + u8 msix_index = reply_q->msix_index; + struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; + Mpi2ReplyDescriptorsUnion_t *rpf; + u8 rc; + + completed_cmds = 0; + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return completed_cmds; + + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; + request_descript_type = rpf->Default.ReplyFlags + & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); + return completed_cmds; + } + + cb_idx = 0xFF; + do { + rd.word = le64_to_cpu(rpf->Words); + if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) + goto out; + reply = 0; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + if (request_descript_type == + MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || + request_descript_type == + MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS || + request_descript_type == + MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, 0); + if (rc) + mpt3sas_base_free_smid(ioc, smid); + } + } else if (request_descript_type == + MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { + reply = le32_to_cpu( + rpf->AddressReply.ReplyFrameAddress); + if (reply > ioc->reply_dma_max_address || + reply < ioc->reply_dma_min_address) + reply = 0; + if (smid) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, reply); + if (reply) + _base_display_reply_info(ioc, + smid, msix_index, reply); + if (rc) + mpt3sas_base_free_smid(ioc, + smid); + } + } else { + _base_async_event(ioc, msix_index, reply); + } + + /* reply free queue handling */ + if (reply) { + ioc->reply_free_host_index = + (ioc->reply_free_host_index == + (ioc->reply_free_queue_depth - 1)) ? + 0 : ioc->reply_free_host_index + 1; + ioc->reply_free[ioc->reply_free_host_index] = + cpu_to_le32(reply); + if (ioc->is_mcpu_endpoint) + _base_clone_reply_to_sys_mem(ioc, + reply, + ioc->reply_free_host_index); + writel(ioc->reply_free_host_index, + &ioc->chip->ReplyFreeHostIndex); + } + } + + rpf->Words = cpu_to_le64(ULLONG_MAX); + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + reply_q->reply_post_host_index + 1; + request_descript_type = + reply_q->reply_post_free[reply_q->reply_post_host_index]. + Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + completed_cmds++; + /* Update the reply post host index after continuously + * processing the threshold number of Reply Descriptors. + * So that FW can find enough entries to post the Reply + * Descriptors in the reply descriptor post queue. + */ + if (completed_cmds >= ioc->thresh_hold) { + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | + ((msix_index & 7) << + MPI2_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index/8]); + } else { + writel(reply_q->reply_post_host_index | + (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + if (!reply_q->is_iouring_poll_q && + !reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = true; + irq_poll_sched(&reply_q->irqpoll); + } + atomic_dec(&reply_q->busy); + return completed_cmds; + } + if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + goto out; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; + else + rpf++; + } while (1); + + out: + + if (!completed_cmds) { + atomic_dec(&reply_q->busy); + return completed_cmds; + } + + if (ioc->is_warpdrive) { + writel(reply_q->reply_post_host_index, + ioc->reply_post_host_index[msix_index]); + atomic_dec(&reply_q->busy); + return completed_cmds; + } + + /* Update Reply Post Host Index. + * For those HBA's which support combined reply queue feature + * 1. Get the correct Supplemental Reply Post Host Index Register. + * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host + * Index Register address bank i.e replyPostRegisterIndex[], + * 2. Then update this register with new reply host index value + * in ReplyPostIndex field and the MSIxIndex field with + * msix_index value reduced to a value between 0 and 7, + * using a modulo 8 operation. Since each Supplemental Reply Post + * Host Index Register supports 8 MSI-X vectors. + * + * For other HBA's just update the Reply Post Host Index register with + * new reply host index value in ReplyPostIndex Field and msix_index + * value in MSIxIndex field. + */ + if (ioc->combined_reply_queue) + writel(reply_q->reply_post_host_index | ((msix_index & 7) << + MPI2_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index/8]); + else + writel(reply_q->reply_post_host_index | (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + atomic_dec(&reply_q->busy); + return completed_cmds; +} + +/** + * mpt3sas_blk_mq_poll - poll the blk mq poll queue + * @shost: Scsi_Host object + * @queue_num: hw ctx queue number + * + * Return number of entries that has been processed from poll queue. + */ +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct MPT3SAS_ADAPTER *ioc = + (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct adapter_reply_queue *reply_q; + int num_entries = 0; + int qid = queue_num - ioc->iopoll_q_start_index; + + if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) || + !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1)) + return 0; + + reply_q = ioc->io_uring_poll_queues[qid].reply_q; + + num_entries = _base_process_reply_queue(reply_q); + atomic_dec(&ioc->io_uring_poll_queues[qid].busy); + + return num_entries; +} + +/** + * _base_interrupt - MPT adapter (IOC) specific interrupt handler. + * @irq: irq number (not used) + * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure + * + * Return: IRQ_HANDLED if processed, else IRQ_NONE. + */ +static irqreturn_t +_base_interrupt(int irq, void *bus_id) +{ + struct adapter_reply_queue *reply_q = bus_id; + struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; + + if (ioc->mask_interrupts) + return IRQ_NONE; + if (reply_q->irq_poll_scheduled) + return IRQ_HANDLED; + return ((_base_process_reply_queue(reply_q) > 0) ? + IRQ_HANDLED : IRQ_NONE); +} + +/** + * _base_irqpoll - IRQ poll callback handler + * @irqpoll: irq_poll object + * @budget: irq poll weight + * + * Return: number of reply descriptors processed + */ +static int +_base_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct adapter_reply_queue *reply_q; + int num_entries = 0; + + reply_q = container_of(irqpoll, struct adapter_reply_queue, + irqpoll); + if (reply_q->irq_line_enable) { + disable_irq_nosync(reply_q->os_irq); + reply_q->irq_line_enable = false; + } + num_entries = _base_process_reply_queue(reply_q); + if (num_entries < budget) { + irq_poll_complete(irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + /* + * Go for one more round of processing the + * reply descriptor post queue in case the HBA + * Firmware has posted some reply descriptors + * while reenabling the IRQ. + */ + _base_process_reply_queue(reply_q); + } + + return num_entries; +} + +/** + * _base_init_irqpolls - initliaze IRQ polls + * @ioc: per adapter object + * + * Return: nothing + */ +static void +_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + if (reply_q->is_iouring_poll_q) + continue; + irq_poll_init(&reply_q->irqpoll, + ioc->hba_queue_depth/4, _base_irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + reply_q->os_irq = pci_irq_vector(ioc->pdev, + reply_q->msix_index); + } +} + +/** + * _base_is_controller_msix_enabled - is controller support muli-reply queues + * @ioc: per adapter object + * + * Return: Whether or not MSI/X is enabled. + */ +static inline int +_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +/** + * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts + * @ioc: per adapter object + * @poll: poll over reply descriptor pools incase interrupt for + * timed-out SCSI command got delayed + * Context: non-ISR context + * + * Called when a Task Management request has completed. + */ +void +mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) +{ + struct adapter_reply_queue *reply_q; + + /* If MSIX capability is turned off + * then multi-queues are not enabled + */ + if (!_base_is_controller_msix_enabled(ioc)) + return; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) + return; + /* TMs are on msix_index == 0 */ + if (reply_q->msix_index == 0) + continue; + + if (reply_q->is_iouring_poll_q) { + _base_process_reply_queue(reply_q); + continue; + } + + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); + if (reply_q->irq_poll_scheduled) { + /* Calling irq_poll_disable will wait for any pending + * callbacks to have completed. + */ + irq_poll_disable(&reply_q->irqpoll); + irq_poll_enable(&reply_q->irqpoll); + /* check how the scheduled poll has ended, + * clean up only if necessary + */ + if (reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + } + + if (poll) + _base_process_reply_queue(reply_q); + } +} + +/** + * mpt3sas_base_release_callback_handler - clear interrupt callback handler + * @cb_idx: callback index + */ +void +mpt3sas_base_release_callback_handler(u8 cb_idx) +{ + mpt_callbacks[cb_idx] = NULL; +} + +/** + * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler + * @cb_func: callback function + * + * Return: Index of @cb_func. + */ +u8 +mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) +{ + u8 cb_idx; + + for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) + if (mpt_callbacks[cb_idx] == NULL) + break; + + mpt_callbacks[cb_idx] = cb_func; + return cb_idx; +} + +/** + * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler + */ +void +mpt3sas_base_initialize_callback_handler(void) +{ + u8 cb_idx; + + for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) + mpt3sas_base_release_callback_handler(cb_idx); +} + + +/** + * _base_build_zero_len_sge - build zero length sg entry + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + */ +static void +_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | + MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << + MPI2_SGE_FLAGS_SHIFT); + ioc->base_add_sg_single(paddr, flags_length, -1); +} + +/** + * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + */ +static void +_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple32_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le32(dma_addr); +} + + +/** + * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + */ +static void +_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple64_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_get_chain_buffer_tracker - obtain chain tracker + * @ioc: per adapter object + * @scmd: SCSI commands of the IO request + * + * Return: chain tracker from chain_lookup table using key as + * smid and smid's chain_offset. + */ +static struct chain_tracker * +_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct chain_tracker *chain_req; + struct scsiio_tracker *st = scsi_cmd_priv(scmd); + u16 smid = st->smid; + u8 chain_offset = + atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); + + if (chain_offset == ioc->chains_needed_per_io) + return NULL; + + chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; + atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset); + return chain_req; +} + + +/** + * _base_build_sg - build generic sg + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + */ +static void +_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u32 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size; + + /* READ sgel last */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } +} + +/* IEEE format sgls */ + +/** + * _base_build_nvme_prp - This function is called for NVMe end devices to build + * a native SGL (NVMe PRP). + * @ioc: per adapter object + * @smid: system request message index for getting asscociated SGL + * @nvme_encap_request: the NVMe request msg frame pointer + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + * + * The native SGL is built starting in the first PRP + * entry of the NVMe message (PRP1). If the data buffer is small enough to be + * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is + * used to describe a larger data buffer. If the data buffer is too large to + * describe using the two PRP entriess inside the NVMe message, then PRP1 + * describes the first data memory segment, and PRP2 contains a pointer to a PRP + * list located elsewhere in memory to describe the remaining data memory + * segments. The PRP list will be contiguous. + * + * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP + * consists of a list of PRP entries to describe a number of noncontigous + * physical memory segments as a single memory buffer, just as a SGL does. Note + * however, that this function is only used by the IOCTL call, so the memory + * given will be guaranteed to be contiguous. There is no need to translate + * non-contiguous SGL into a PRP in this case. All PRPs will describe + * contiguous space that is one page size each. + * + * Each NVMe message contains two PRP entries. The first (PRP1) either contains + * a PRP list pointer or a PRP element, depending upon the command. PRP2 + * contains the second PRP element if the memory being described fits within 2 + * PRP entries, or a PRP list pointer if the PRP spans more than two entries. + * + * A PRP list pointer contains the address of a PRP list, structured as a linear + * array of PRP entries. Each PRP entry in this list describes a segment of + * physical memory. + * + * Each 64-bit PRP entry comprises an address and an offset field. The address + * always points at the beginning of a 4KB physical memory page, and the offset + * describes where within that 4KB page the memory segment begins. Only the + * first element in a PRP list may contain a non-zero offset, implying that all + * memory segments following the first begin at the start of a 4KB page. + * + * Each PRP element normally describes 4KB of physical memory, with exceptions + * for the first and last elements in the list. If the memory being described + * by the list begins at a non-zero offset within the first 4KB page, then the + * first PRP element will contain a non-zero offset indicating where the region + * begins within the 4KB page. The last memory segment may end before the end + * of the 4KB segment, depending upon the overall size of the memory being + * described by the PRP list. + * + * Since PRP entries lack any indication of size, the overall data buffer length + * is used to determine where the end of the data memory buffer is located, and + * how many PRP entries are required to describe it. + */ +static void +_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + int prp_size = NVME_PRP_SIZE; + __le64 *prp_entry, *prp1_entry, *prp2_entry; + __le64 *prp_page; + dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; + u32 offset, entry_len; + u32 page_mask_result, page_mask; + size_t length; + struct mpt3sas_nvme_cmd *nvme_cmd = + (void *)nvme_encap_request->NVMe_Command; + + /* + * Not all commands require a data transfer. If no data, just return + * without constructing any PRP. + */ + if (!data_in_sz && !data_out_sz) + return; + prp1_entry = &nvme_cmd->prp1; + prp2_entry = &nvme_cmd->prp2; + prp_entry = prp1_entry; + /* + * For the PRP entries, use the specially allocated buffer of + * contiguous memory. + */ + prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); + prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); + + /* + * Check if we are within 1 entry of a page boundary we don't + * want our first entry to be a PRP List entry. + */ + page_mask = ioc->page_size - 1; + page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; + if (!page_mask_result) { + /* Bump up to next page boundary. */ + prp_page = (__le64 *)((u8 *)prp_page + prp_size); + prp_page_dma = prp_page_dma + prp_size; + } + + /* + * Set PRP physical pointer, which initially points to the current PRP + * DMA memory page. + */ + prp_entry_dma = prp_page_dma; + + /* Get physical address and length of the data buffer. */ + if (data_in_sz) { + dma_addr = data_in_dma; + length = data_in_sz; + } else { + dma_addr = data_out_dma; + length = data_out_sz; + } + + /* Loop while the length is not zero. */ + while (length) { + /* + * Check if we need to put a list pointer here if we are at + * page boundary - prp_size (8 bytes). + */ + page_mask_result = (prp_entry_dma + prp_size) & page_mask; + if (!page_mask_result) { + /* + * This is the last entry in a PRP List, so we need to + * put a PRP list pointer here. What this does is: + * - bump the current memory pointer to the next + * address, which will be the next full page. + * - set the PRP Entry to point to that page. This + * is now the PRP List pointer. + * - bump the PRP Entry pointer the start of the + * next page. Since all of this PRP memory is + * contiguous, no need to get a new page - it's + * just the next address. + */ + prp_entry_dma++; + *prp_entry = cpu_to_le64(prp_entry_dma); + prp_entry++; + } + + /* Need to handle if entry will be part of a page. */ + offset = dma_addr & page_mask; + entry_len = ioc->page_size - offset; + + if (prp_entry == prp1_entry) { + /* + * Must fill in the first PRP pointer (PRP1) before + * moving on. + */ + *prp1_entry = cpu_to_le64(dma_addr); + + /* + * Now point to the second PRP entry within the + * command (PRP2). + */ + prp_entry = prp2_entry; + } else if (prp_entry == prp2_entry) { + /* + * Should the PRP2 entry be a PRP List pointer or just + * a regular PRP pointer? If there is more than one + * more page of data, must use a PRP List pointer. + */ + if (length > ioc->page_size) { + /* + * PRP2 will contain a PRP List pointer because + * more PRP's are needed with this command. The + * list will start at the beginning of the + * contiguous buffer. + */ + *prp2_entry = cpu_to_le64(prp_entry_dma); + + /* + * The next PRP Entry will be the start of the + * first PRP List. + */ + prp_entry = prp_page; + } else { + /* + * After this, the PRP Entries are complete. + * This command uses 2 PRP's and no PRP list. + */ + *prp2_entry = cpu_to_le64(dma_addr); + } + } else { + /* + * Put entry in list and bump the addresses. + * + * After PRP1 and PRP2 are filled in, this will fill in + * all remaining PRP entries in a PRP List, one per + * each time through the loop. + */ + *prp_entry = cpu_to_le64(dma_addr); + prp_entry++; + prp_entry_dma++; + } + + /* + * Bump the phys address of the command's data buffer by the + * entry_len. + */ + dma_addr += entry_len; + + /* Decrement length accounting for last partial page. */ + if (entry_len > length) + length = 0; + else + length -= entry_len; + } +} + +/** + * base_make_prp_nvme - Prepare PRPs (Physical Region Page) - + * SGLs specific to NVMe drives only + * + * @ioc: per adapter object + * @scmd: SCSI command from the mid-layer + * @mpi_request: mpi request + * @smid: msg Index + * @sge_count: scatter gather element count. + * + * Return: true: PRPs are built + * false: IEEE SGLs needs to be built + */ +static void +base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, + Mpi25SCSIIORequest_t *mpi_request, + u16 smid, int sge_count) +{ + int sge_len, num_prp_in_chain = 0; + Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; + __le64 *curr_buff; + dma_addr_t msg_dma, sge_addr, offset; + u32 page_mask, page_mask_result; + struct scatterlist *sg_scmd; + u32 first_prp_len; + int data_len = scsi_bufflen(scmd); + u32 nvme_pg_size; + + nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); + /* + * Nvme has a very convoluted prp format. One prp is required + * for each page or partial page. Driver need to split up OS sg_list + * entries if it is longer than one page or cross a page + * boundary. Driver also have to insert a PRP list pointer entry as + * the last entry in each physical page of the PRP list. + * + * NOTE: The first PRP "entry" is actually placed in the first + * SGL entry in the main message as IEEE 64 format. The 2nd + * entry in the main message is the chain element, and the rest + * of the PRP entries are built in the contiguous pcie buffer. + */ + page_mask = nvme_pg_size - 1; + + /* + * Native SGL is needed. + * Put a chain element in main message frame that points to the first + * chain buffer. + * + * NOTE: The ChainOffset field must be 0 when using a chain pointer to + * a native SGL. + */ + + /* Set main message chain element pointer */ + main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; + /* + * For NVMe the chain element needs to be the 2nd SG entry in the main + * message. + */ + main_chain_element = (Mpi25IeeeSgeChain64_t *) + ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); + + /* + * For the PRP entries, use the specially allocated buffer of + * contiguous memory. Normal chain buffers can't be used + * because each chain buffer would need to be the size of an OS + * page (4k). + */ + curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid); + msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); + + main_chain_element->Address = cpu_to_le64(msg_dma); + main_chain_element->NextChainOffset = 0; + main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; + + /* Build first prp, sge need not to be page aligned*/ + ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; + sg_scmd = scsi_sglist(scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + + offset = sge_addr & page_mask; + first_prp_len = nvme_pg_size - offset; + + ptr_first_sgl->Address = cpu_to_le64(sge_addr); + ptr_first_sgl->Length = cpu_to_le32(first_prp_len); + + data_len -= first_prp_len; + + if (sge_len > first_prp_len) { + sge_addr += first_prp_len; + sge_len -= first_prp_len; + } else if (data_len && (sge_len == first_prp_len)) { + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + for (;;) { + offset = sge_addr & page_mask; + + /* Put PRP pointer due to page boundary*/ + page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask; + if (unlikely(!page_mask_result)) { + scmd_printk(KERN_NOTICE, + scmd, "page boundary curr_buff: 0x%p\n", + curr_buff); + msg_dma += 8; + *curr_buff = cpu_to_le64(msg_dma); + curr_buff++; + num_prp_in_chain++; + } + + *curr_buff = cpu_to_le64(sge_addr); + curr_buff++; + msg_dma += 8; + num_prp_in_chain++; + + sge_addr += nvme_pg_size; + sge_len -= nvme_pg_size; + data_len -= nvme_pg_size; + + if (data_len <= 0) + break; + + if (sge_len > 0) + continue; + + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + main_chain_element->Length = + cpu_to_le32(num_prp_in_chain * sizeof(u64)); + return; +} + +static bool +base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) +{ + u32 data_length = 0; + bool build_prp = true; + + data_length = scsi_bufflen(scmd); + if (pcie_device && + (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) { + build_prp = false; + return build_prp; + } + + /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 + * we built IEEE SGL + */ + if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) + build_prp = false; + + return build_prp; +} + +/** + * _base_check_pcie_native_sgl - This function is called for PCIe end devices to + * determine if the driver needs to build a native SGL. If so, that native + * SGL is built in the special contiguous buffers allocated especially for + * PCIe SGL creation. If the driver will not build a native SGL, return + * TRUE and a normal IEEE SGL will be built. Currently this routine + * supports NVMe. + * @ioc: per adapter object + * @mpi_request: mf request pointer + * @smid: system request message index + * @scmd: scsi command + * @pcie_device: points to the PCIe device's info + * + * Return: 0 if native SGL was built, 1 if no SGL was built + */ +static int +_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, + Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, + struct _pcie_device *pcie_device) +{ + int sges_left; + + /* Get the SG list pointer and info. */ + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) + return 1; + + /* Check if we need to build a native SG list. */ + if (!base_is_prp_possible(ioc, pcie_device, + scmd, sges_left)) { + /* We built a native SG list, just return. */ + goto out; + } + + /* + * Build native NVMe PRP. + */ + base_make_prp_nvme(ioc, scmd, mpi_request, + smid, sges_left); + + return 0; +out: + scsi_dma_unmap(scmd); + return 1; +} + +/** + * _base_add_sg_single_ieee - add sg element for IEEE format + * @paddr: virtual address for SGE + * @flags: SGE flags + * @chain_offset: number of 128 byte elements from start of segment + * @length: data transfer length + * @dma_addr: Physical address + */ +static void +_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, + dma_addr_t dma_addr) +{ + Mpi25IeeeSgeChain64_t *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + */ +static void +_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST); + + _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + +/** + * _base_build_sg_scmd - main sg creation routine + * pcie_device is unused here! + * @ioc: per adapter object + * @scmd: scsi command + * @smid: system request message index + * @unused: unused pcie_device pointer + * Context: none. + * + * The main routine that builds scatter gather table from a given + * scsi request sent via the .queuecommand main handler. + * + * Return: 0 success, anything else error + */ +static int +_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) +{ + Mpi2SCSIIORequest_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + u32 chain_flags; + int sges_left; + u32 sges_in_segment; + u32 sgl_flags; + u32 sgl_flags_last_element; + u32 sgl_flags_end_buffer; + struct chain_tracker *chain_req; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* init scatter gather flags */ + sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; + if (scmd->sc_data_direction == DMA_TO_DEVICE) + sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; + sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) + << MPI2_SGE_FLAGS_SHIFT; + sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) + << MPI2_SGE_FLAGS_SHIFT; + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) + return -ENOMEM; + + sg_local = &mpi_request->SGL; + sges_in_segment = ioc->max_sges_in_main_message; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + + (sges_in_segment * ioc->sge_size))/4; + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment) { + if (sges_in_segment == 1) + ioc->base_add_sg_single(sg_local, + sgl_flags_last_element | sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + sges_in_segment--; + } + + /* initializing the chain flags and pointers */ + chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : (sges_in_segment * ioc->sge_size)/4; + chain_length = sges_in_segment * ioc->sge_size; + if (chain_offset) { + chain_offset = chain_offset << + MPI2_SGE_CHAIN_OFFSET_SHIFT; + chain_length += ioc->sge_size; + } + ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | + chain_length, chain_dma); + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + + /* fill in chain segments */ + while (sges_in_segment) { + if (sges_in_segment == 1) + ioc->base_add_sg_single(sg_local, + sgl_flags_last_element | + sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + + fill_in_last_segment: + + /* fill the last segment */ + while (sges_left) { + if (sges_left == 1) + ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + } + + return 0; +} + +/** + * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format + * @ioc: per adapter object + * @scmd: scsi command + * @smid: system request message index + * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be + * constructed on need. + * Context: none. + * + * The main routine that builds scatter gather table from a given + * scsi request sent via the .queuecommand main handler. + * + * Return: 0 success, anything else error + */ +static int +_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device) +{ + Mpi25SCSIIORequest_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + int sges_left; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 chain_sgl_flags; + struct chain_tracker *chain_req; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* init scatter gather flags */ + simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + simple_sgl_flags_last = simple_sgl_flags | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + + /* Check if we need to build a native SG list. */ + if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, + smid, scmd, pcie_device) == 0)) { + /* We built a native SG list, just return. */ + return 0; + } + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) + return -ENOMEM; + + sg_local = &mpi_request->SGL; + sges_in_segment = (ioc->request_sz - + offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + + (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment > 1) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + /* initializing the pointers */ + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : sges_in_segment; + chain_length = sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + _base_add_sg_single_ieee(sg_local, chain_sgl_flags, + chain_offset, chain_length, chain_dma); + + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + + /* fill in chain segments */ + while (sges_in_segment) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + + fill_in_last_segment: + + /* fill the last segment */ + while (sges_left > 0) { + if (sges_left == 1) + _base_add_sg_single_ieee(sg_local, + simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + } + + return 0; +} + +/** + * _base_build_sg_ieee - build generic sg for IEEE format + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + */ +static void +_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u8 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge_ieee(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size_ieee; + + /* READ sgel last */ + sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } +} + +#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) + +/** + * _base_config_dma_addressing - set dma addressing + * @ioc: per adapter object + * @pdev: PCI device struct + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) +{ + struct sysinfo s; + u64 coherent_dma_mask, dma_mask; + + if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) { + ioc->dma_mask = 32; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(32); + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ + } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { + ioc->dma_mask = 63; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(63); + } else { + ioc->dma_mask = 64; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(64); + } + + if (ioc->use_32bit_dma) + coherent_dma_mask = DMA_BIT_MASK(32); + + if (dma_set_mask(&pdev->dev, dma_mask) || + dma_set_coherent_mask(&pdev->dev, coherent_dma_mask)) + return -ENODEV; + + if (ioc->dma_mask > 32) { + ioc->base_add_sg_single = &_base_add_sg_single_64; + ioc->sge_size = sizeof(Mpi2SGESimple64_t); + } else { + ioc->base_add_sg_single = &_base_add_sg_single_32; + ioc->sge_size = sizeof(Mpi2SGESimple32_t); + } + + si_meminfo(&s); + ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->dma_mask, convert_to_kb(s.totalram)); + + return 0; +} + +/** + * _base_check_enable_msix - checks MSIX capabable. + * @ioc: per adapter object + * + * Check to see if card is capable of MSIX, and set number + * of available msix vectors + */ +static int +_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + int base; + u16 message_control; + + /* Check whether controller SAS2008 B0 controller, + * if it is SAS2008 B0 controller use IO-APIC instead of MSIX + */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { + return -EINVAL; + } + + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); + if (!base) { + dfailprintk(ioc, ioc_info(ioc, "msix not supported\n")); + return -EINVAL; + } + + /* get msix vector count */ + /* NUMA_IO not supported for older controllers */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) + ioc->msix_vector_count = 1; + else { + pci_read_config_word(ioc->pdev, base + 2, &message_control); + ioc->msix_vector_count = (message_control & 0x3FF) + 1; + } + dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n", + ioc->msix_vector_count)); + return 0; +} + +/** + * mpt3sas_base_free_irq - free irq + * @ioc: per adapter object + * + * Freeing respective reply_queue from the list. + */ +void +mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int irq; + struct adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + if (reply_q->is_iouring_poll_q) { + kfree(reply_q); + continue; + } + + if (ioc->smp_affinity_enable) { + irq = pci_irq_vector(ioc->pdev, reply_q->msix_index); + irq_update_affinity_hint(irq, NULL); + } + free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), + reply_q); + kfree(reply_q); + } +} + +/** + * _base_request_irq - request irq + * @ioc: per adapter object + * @index: msix index into vector table + * + * Inserting respective reply_queue into the list. + */ +static int +_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) +{ + struct pci_dev *pdev = ioc->pdev; + struct adapter_reply_queue *reply_q; + int r, qid; + + reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); + if (!reply_q) { + ioc_err(ioc, "unable to allocate memory %zu!\n", + sizeof(struct adapter_reply_queue)); + return -ENOMEM; + } + reply_q->ioc = ioc; + reply_q->msix_index = index; + + atomic_set(&reply_q->busy, 0); + + if (index >= ioc->iopoll_q_start_index) { + qid = index - ioc->iopoll_q_start_index; + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", + ioc->driver_name, ioc->id, qid); + reply_q->is_iouring_poll_q = 1; + ioc->io_uring_poll_queues[qid].reply_q = reply_q; + goto out; + } + + + if (ioc->msix_enable) + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", + ioc->driver_name, ioc->id, index); + else + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", + ioc->driver_name, ioc->id); + r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); + if (r) { + pr_err("%s: unable to allocate interrupt %d!\n", + reply_q->name, pci_irq_vector(pdev, index)); + kfree(reply_q); + return -EBUSY; + } +out: + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} + +/** + * _base_assign_reply_queues - assigning msix index for each cpu + * @ioc: per adapter object + * + * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + */ +static void +_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int cpu, nr_cpus, nr_msix, index = 0, irq; + struct adapter_reply_queue *reply_q; + int iopoll_q_count = ioc->reply_queue_count - + ioc->iopoll_q_start_index; + const struct cpumask *mask; + + if (!_base_is_controller_msix_enabled(ioc)) + return; + + if (ioc->msix_load_balance) + return; + + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + + nr_cpus = num_online_cpus(); + nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, + ioc->facts.MaxMSIxVectors); + if (!nr_msix) + return; + + if (ioc->smp_affinity_enable) { + + /* + * set irq affinity to local numa node for those irqs + * corresponding to high iops queues. + */ + if (ioc->high_iops_queues) { + mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev)); + for (index = 0; index < ioc->high_iops_queues; + index++) { + irq = pci_irq_vector(ioc->pdev, index); + irq_set_affinity_and_hint(irq, mask); + } + } + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + const cpumask_t *mask; + + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + + mask = pci_irq_get_affinity(ioc->pdev, + reply_q->msix_index); + if (!mask) { + ioc_warn(ioc, "no affinity for msi %x\n", + reply_q->msix_index); + goto fall_back; + } + + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (cpu >= ioc->cpu_msix_table_sz) + break; + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } + } + return; + } + +fall_back: + cpu = cpumask_first(cpu_online_mask); + nr_msix -= (ioc->high_iops_queues - iopoll_q_count); + index = 0; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + unsigned int i, group = nr_cpus / nr_msix; + + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + + if (cpu >= nr_cpus) + break; + + if (index < nr_cpus % nr_msix) + group++; + + for (i = 0 ; i < group ; i++) { + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + cpu = cpumask_next(cpu, cpu_online_mask); + } + index++; + } +} + +/** + * _base_check_and_enable_high_iops_queues - enable high iops mode + * @ioc: per adapter object + * @hba_msix_vector_count: msix vectors supported by HBA + * + * Enable high iops queues only if + * - HBA is a SEA/AERO controller and + * - MSI-Xs vector supported by the HBA is 128 and + * - total CPU count in the system >=16 and + * - loaded driver with default max_msix_vectors module parameter and + * - system booted in non kdump mode + * + * Return: nothing. + */ +static void +_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, + int hba_msix_vector_count) +{ + u16 lnksta, speed; + + /* + * Disable high iops queues if io uring poll queues are enabled. + */ + if (perf_mode == MPT_PERF_MODE_IOPS || + perf_mode == MPT_PERF_MODE_LATENCY || + ioc->io_uring_poll_queues) { + ioc->high_iops_queues = 0; + return; + } + + if (perf_mode == MPT_PERF_MODE_DEFAULT) { + + pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); + speed = lnksta & PCI_EXP_LNKSTA_CLS; + + if (speed < 0x4) { + ioc->high_iops_queues = 0; + return; + } + } + + if (!reset_devices && ioc->is_aero_ioc && + hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES && + num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES && + max_msix_vectors == -1) + ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES; + else + ioc->high_iops_queues = 0; +} + +/** + * mpt3sas_base_disable_msix - disables msix + * @ioc: per adapter object + * + */ +void +mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; + pci_free_irq_vectors(ioc->pdev); + ioc->msix_enable = 0; + kfree(ioc->io_uring_poll_queues); +} + +/** + * _base_alloc_irq_vectors - allocate msix vectors + * @ioc: per adapter object + * + */ +static int +_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) +{ + int i, irq_flags = PCI_IRQ_MSIX; + struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; + struct irq_affinity *descp = &desc; + /* + * Don't allocate msix vectors for poll_queues. + * msix_vectors is always within a range of FW supported reply queue. + */ + int nr_msix_vectors = ioc->iopoll_q_start_index; + + + if (ioc->smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + else + descp = NULL; + + ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues, + ioc->reply_queue_count, nr_msix_vectors); + + i = pci_alloc_irq_vectors_affinity(ioc->pdev, + ioc->high_iops_queues, + nr_msix_vectors, irq_flags, descp); + + return i; +} + +/** + * _base_enable_msix - enables msix, failback to io_apic + * @ioc: per adapter object + * + */ +static int +_base_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + int r; + int i, local_max_msix_vectors; + u8 try_msix = 0; + int iopoll_q_count = 0; + + ioc->msix_load_balance = false; + + if (msix_disable == -1 || msix_disable == 0) + try_msix = 1; + + if (!try_msix) + goto try_ioapic; + + if (_base_check_enable_msix(ioc) != 0) + goto try_ioapic; + + ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); + pr_info("\t no of cores: %d, max_msix_vectors: %d\n", + ioc->cpu_count, max_msix_vectors); + + ioc->reply_queue_count = + min_t(int, ioc->cpu_count, ioc->msix_vector_count); + + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) + local_max_msix_vectors = (reset_devices) ? 1 : 8; + else + local_max_msix_vectors = max_msix_vectors; + + if (local_max_msix_vectors == 0) + goto try_ioapic; + + /* + * Enable msix_load_balance only if combined reply queue mode is + * disabled on SAS3 & above generation HBA devices. + */ + if (!ioc->combined_reply_queue && + ioc->hba_mpi_version_belonged != MPI2_VERSION) { + ioc_info(ioc, + "combined ReplyQueue is off, Enabling msix load balance\n"); + ioc->msix_load_balance = true; + } + + /* + * smp affinity setting is not need when msix load balance + * is enabled. + */ + if (ioc->msix_load_balance) + ioc->smp_affinity_enable = 0; + + if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) + ioc->shost->host_tagset = 0; + + /* + * Enable io uring poll queues only if host_tagset is enabled. + */ + if (ioc->shost->host_tagset) + iopoll_q_count = poll_queues; + + if (iopoll_q_count) { + ioc->io_uring_poll_queues = kcalloc(iopoll_q_count, + sizeof(struct io_uring_poll_queue), GFP_KERNEL); + if (!ioc->io_uring_poll_queues) + iopoll_q_count = 0; + } + + if (ioc->is_aero_ioc) + _base_check_and_enable_high_iops_queues(ioc, + ioc->msix_vector_count); + + /* + * Add high iops queues count to reply queue count if high iops queues + * are enabled. + */ + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + ioc->high_iops_queues, + ioc->msix_vector_count); + + /* + * Adjust the reply queue count incase reply queue count + * exceeds the user provided MSIx vectors count. + */ + if (local_max_msix_vectors > 0) + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + /* + * Add io uring poll queues count to reply queues count + * if io uring is enabled in driver. + */ + if (iopoll_q_count) { + if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS)) + iopoll_q_count = 0; + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + iopoll_q_count, + ioc->msix_vector_count); + } + + /* + * Starting index of io uring poll queues in reply queue list. + */ + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + + r = _base_alloc_irq_vectors(ioc); + if (r < 0) { + ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); + goto try_ioapic; + } + + /* + * Adjust the reply queue count if the allocated + * MSIx vectors is less then the requested number + * of MSIx vectors. + */ + if (r < ioc->iopoll_q_start_index) { + ioc->reply_queue_count = r + iopoll_q_count; + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + } + + ioc->msix_enable = 1; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = _base_request_irq(ioc, i); + if (r) { + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); + goto try_ioapic; + } + } + + ioc_info(ioc, "High IOPs queues : %s\n", + ioc->high_iops_queues ? "enabled" : "disabled"); + + return 0; + +/* failback to io_apic interrupt routing */ + try_ioapic: + ioc->high_iops_queues = 0; + ioc_info(ioc, "High IOPs queues : disabled\n"); + ioc->reply_queue_count = 1; + ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; + r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); + if (r < 0) { + dfailprintk(ioc, + ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", + r)); + } else + r = _base_request_irq(ioc, 0); + + return r; +} + +/** + * mpt3sas_base_unmap_resources - free controller resources + * @ioc: per adapter object + */ +static void +mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); + + kfree(ioc->replyPostRegisterIndex); + ioc->replyPostRegisterIndex = NULL; + + + if (ioc->chip_phys) { + iounmap(ioc->chip); + ioc->chip_phys = 0; + } + + if (pci_is_enabled(pdev)) { + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_device(pdev); + } +} + +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc); + +/** + * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state + * and if it is in fault state then issue diag reset. + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u32 ioc_state; + int rc = -EFAULT; + + dinitprintk(ioc, pr_info("%s\n", __func__)); + if (ioc->pci_error_recovery) + return 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state)); + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_mask_interrupts(ioc); + rc = _base_diag_reset(ioc); + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + mpt3sas_base_mask_interrupts(ioc); + rc = _base_diag_reset(ioc); + } + + return rc; +} + +/** + * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + u32 memap_sz; + u32 pio_sz; + int i, r = 0, rc; + u64 pio_chip = 0; + phys_addr_t chip_phys = 0; + struct adapter_reply_queue *reply_q; + int iopoll_q_count = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + ioc_warn(ioc, "pci_enable_device_mem: failed\n"); + ioc->bars = 0; + return -ENODEV; + } + + + if (pci_request_selected_regions(pdev, ioc->bars, + ioc->driver_name)) { + ioc_warn(ioc, "pci_request_selected_regions: failed\n"); + ioc->bars = 0; + r = -ENODEV; + goto out_fail; + } + + pci_set_master(pdev); + + + if (_base_config_dma_addressing(ioc, pdev) != 0) { + ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev)); + r = -ENODEV; + goto out_fail; + } + + for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && + (!memap_sz || !pio_sz); i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pio_sz) + continue; + pio_chip = (u64)pci_resource_start(pdev, i); + pio_sz = pci_resource_len(pdev, i); + } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + if (memap_sz) + continue; + ioc->chip_phys = pci_resource_start(pdev, i); + chip_phys = ioc->chip_phys; + memap_sz = pci_resource_len(pdev, i); + ioc->chip = ioremap(ioc->chip_phys, memap_sz); + } + } + + if (ioc->chip == NULL) { + ioc_err(ioc, + "unable to map adapter memory! or resource not found\n"); + r = -EINVAL; + goto out_fail; + } + + mpt3sas_base_mask_interrupts(ioc); + + r = _base_get_ioc_facts(ioc); + if (r) { + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_ioc_facts(ioc))) + goto out_fail; + } + + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + + r = _base_enable_msix(ioc); + if (r) + goto out_fail; + + iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + for (i = 0; i < iopoll_q_count; i++) { + atomic_set(&ioc->io_uring_poll_queues[i].busy, 0); + atomic_set(&ioc->io_uring_poll_queues[i].pause, 0); + } + + if (!ioc->is_driver_loading) + _base_init_irqpolls(ioc); + /* Use the Combined reply queue feature only for SAS3 C0 & higher + * revision HBAs and also only when reply queue count is greater than 8 + */ + if (ioc->combined_reply_queue) { + /* Determine the Supplemental Reply Post Host Index Registers + * Addresse. Supplemental Reply Post Host Index Registers + * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and + * each register is at offset bytes of + * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. + */ + ioc->replyPostRegisterIndex = kcalloc( + ioc->combined_reply_index_count, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->replyPostRegisterIndex) { + ioc_err(ioc, + "allocation for replyPostRegisterIndex failed!\n"); + r = -ENOMEM; + goto out_fail; + } + + for (i = 0; i < ioc->combined_reply_index_count; i++) { + ioc->replyPostRegisterIndex[i] = + (resource_size_t __iomem *) + ((u8 __force *)&ioc->chip->Doorbell + + MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + + (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); + } + } + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index[0] = (resource_size_t __iomem *) + &ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = + (resource_size_t __iomem *) + ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); + } + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index >= ioc->iopoll_q_start_index) { + pr_info("%s: enabled: index: %d\n", + reply_q->name, reply_q->msix_index); + continue; + } + + pr_info("%s: %s enabled: IRQ %d\n", + reply_q->name, + ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC", + pci_irq_vector(ioc->pdev, reply_q->msix_index)); + } + + ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n", + &chip_phys, ioc->chip, memap_sz); + ioc_info(ioc, "ioport(0x%016llx), size(%d)\n", + (unsigned long long)pio_chip, pio_sz); + + /* Save PCI configuration state for recovery from PCI AER/EEH errors */ + pci_save_state(pdev); + return 0; + + out_fail: + mpt3sas_base_unmap_resources(ioc); + return r; +} + +/** + * mpt3sas_base_get_msg_frame - obtain request mf pointer + * @ioc: per adapter object + * @smid: system request message index(smid zero is invalid) + * + * Return: virt pointer to message frame. + */ +void * +mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->request + (smid * ioc->request_sz)); +} + +/** + * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: virt pointer to sense buffer. + */ +void * +mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: phys pointer to the low 32bit address of the sense buffer. + */ +__le32 +mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le32(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: virt pointer to a PCIe SGL. + */ +void * +mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); +} + +/** + * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: phys pointer to the address of the PCIe buffer. + */ +dma_addr_t +mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; +} + +/** + * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address + * @ioc: per adapter object + * @phys_addr: lower 32 physical addr of the reply + * + * Converts 32bit lower physical addr into a virt address. + */ +void * +mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) +{ + if (!phys_addr) + return NULL; + return ioc->reply + (phys_addr - (u32)ioc->reply_dma); +} + +/** + * _base_get_msix_index - get the msix index + * @ioc: per adapter object + * @scmd: scsi_cmnd object + * + * Return: msix index of general reply queues, + * i.e. reply queue on which IO request's reply + * should be posted by the HBA firmware. + */ +static inline u8 +_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + /* Enables reply_queue load balancing */ + if (ioc->msix_load_balance) + return ioc->reply_queue_count ? + base_mod64(atomic64_add_return(1, + &ioc->total_io_cnt), ioc->reply_queue_count) : 0; + + if (scmd && ioc->shost->nr_hw_queues > 1) { + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + return blk_mq_unique_tag_to_hwq(tag) + + ioc->high_iops_queues; + } + + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + +/** + * _base_get_high_iops_msix_index - get the msix index of + * high iops queues + * @ioc: per adapter object + * @scmd: scsi_cmnd object + * + * Return: msix index of high iops reply queues. + * i.e. high iops reply queue on which IO request's + * reply should be posted by the HBA firmware. + */ +static inline u8 +_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + /** + * Round robin the IO interrupts among the high iops + * reply queues in terms of batch count 16 when outstanding + * IOs on the target device is >=8. + */ + + if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) + return base_mod64(( + atomic64_add_return(1, &ioc->high_iops_outstanding) / + MPT3SAS_HIGH_IOPS_BATCH_COUNT), + MPT3SAS_HIGH_IOPS_REPLY_QUEUES); + + return _base_get_msix_index(ioc, scmd); +} + +/** + * mpt3sas_base_get_smid - obtain a free smid from internal queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Return: smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->internal_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + ioc_err(ioc, "%s: smid not available\n", __func__); + return 0; + } + + request = list_entry(ioc->internal_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue + * @ioc: per adapter object + * @cb_idx: callback index + * @scmd: pointer to scsi command object + * + * Return: smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd) +{ + struct scsiio_tracker *request = scsi_cmd_priv(scmd); + u16 smid; + u32 tag, unique_tag; + + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + tag = blk_mq_unique_tag_to_tag(unique_tag); + + /* + * Store hw queue number corresponding to the tag. + * This hw queue number is used later to determine + * the unique_tag using the logic below. This unique_tag + * is used to retrieve the scmd pointer corresponding + * to tag using scsi_host_find_tag() API. + * + * tag = smid - 1; + * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; + */ + ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); + + smid = tag + 1; + request->cb_idx = cb_idx; + request->smid = smid; + request->scmd = scmd; + INIT_LIST_HEAD(&request->chain_list); + return smid; +} + +/** + * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Return: smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return 0; + } + + request = list_entry(ioc->hpr_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +static void +_base_recovery_check(struct MPT3SAS_ADAPTER *ioc) +{ + /* + * See _wait_for_commands_to_complete() call with regards to this code. + */ + if (ioc->shost_recovery && ioc->pending_io_count) { + ioc->pending_io_count = scsi_host_busy(ioc->shost); + if (ioc->pending_io_count == 0) + wake_up(&ioc->reset_wq); + } +} + +void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, + struct scsiio_tracker *st) +{ + if (WARN_ON(st->smid == 0)) + return; + st->cb_idx = 0xFF; + st->direct_io = 0; + st->scmd = NULL; + atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); + st->smid = 0; +} + +/** + * mpt3sas_base_free_smid - put smid back on free_list + * @ioc: per adapter object + * @smid: system request message index + */ +void +mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + int i; + + if (smid < ioc->hi_priority_smid) { + struct scsiio_tracker *st; + void *request; + + st = _get_st_from_smid(ioc, smid); + if (!st) { + _base_recovery_check(ioc); + return; + } + + /* Clear MPI request frame */ + request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + + mpt3sas_base_clear_st(ioc, st); + _base_recovery_check(ioc); + ioc->io_queue_num[smid - 1] = 0; + return; + } + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (smid < ioc->internal_smid) { + /* hi-priority */ + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + /* internal queue */ + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +} + +/** + * _base_mpi_ep_writeq - 32 bit write to MMIO + * @b: data payload + * @addr: address in MMIO space + * @writeq_lock: spin lock + * + * This special handling for MPI EP to take care of 32 bit + * environment where its not quarenteed to send the entire word + * in one transfer. + */ +static inline void +_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, + spinlock_t *writeq_lock) +{ + unsigned long flags; + + spin_lock_irqsave(writeq_lock, flags); + __raw_writel((u32)(b), addr); + __raw_writel((u32)(b >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); +} + +/** + * _base_writeq - 64 bit write to MMIO + * @b: data payload + * @addr: address in MMIO space + * @writeq_lock: spin lock + * + * Glue for handling an atomic 64 bit word to MMIO. This special handling takes + * care of 32 bit environment where its not quarenteed to send the entire word + * in one transfer. + */ +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + wmb(); + __raw_writeq(b, addr); + barrier(); +} +#else +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + _base_mpi_ep_writeq(b, addr, writeq_lock); +} +#endif + +/** + * _base_set_and_get_msix_index - get the msix index and assign to msix_io + * variable of scsi tracker + * @ioc: per adapter object + * @smid: system request message index + * + * Return: msix index. + */ +static u8 +_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct scsiio_tracker *st = NULL; + + if (smid < ioc->hi_priority_smid) + st = _get_st_from_smid(ioc, smid); + + if (st == NULL) + return _base_get_msix_index(ioc, NULL); + + st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); + return st->msix_io; +} + +/** + * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + */ +static void +_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, + u16 smid, u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + void *mpi_req_iomem; + __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); + + _clone_sg_entries(ioc, (void *) mfp, smid); + mpi_req_iomem = (void __force *)ioc->chip + + MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, + ioc->request_sz); + descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_scsi_io - send SCSI_IO request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + */ +static void +_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + + descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_fast_path - send fast path request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + */ +static void +_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.SCSIIO.RequestFlags = + MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_hi_priority - send Task Management request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 + */ +static void +_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + Mpi2RequestDescriptorUnion_t descriptor; + void *mpi_req_iomem; + u64 *request; + + if (ioc->is_mcpu_endpoint) { + __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); + + /* TBD 256 is offset within sys register. */ + mpi_req_iomem = (void __force *)ioc->chip + + MPI_FRAME_START_OFFSET + + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, + ioc->request_sz); + } + + request = (u64 *)&descriptor; + + descriptor.HighPriority.RequestFlags = + MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.HighPriority.MSIxIndex = msix_task; + descriptor.HighPriority.SMID = cpu_to_le16(smid); + descriptor.HighPriority.LMID = 0; + descriptor.HighPriority.Reserved1 = 0; + if (ioc->is_mcpu_endpoint) + _base_mpi_ep_writeq(*request, + &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); + else + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to + * firmware + * @ioc: per adapter object + * @smid: system request message index + */ +void +mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.Default.RequestFlags = + MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; + descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_default - Default, primarily used for config pages + * @ioc: per adapter object + * @smid: system request message index + */ +static void +_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi2RequestDescriptorUnion_t descriptor; + void *mpi_req_iomem; + u64 *request; + + if (ioc->is_mcpu_endpoint) { + __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); + + _clone_sg_entries(ioc, (void *) mfp, smid); + /* TBD 256 is offset within sys register */ + mpi_req_iomem = (void __force *)ioc->chip + + MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, + ioc->request_sz); + } + request = (u64 *)&descriptor; + descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + if (ioc->is_mcpu_endpoint) + _base_mpi_ep_writeq(*request, + &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); + else + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using + * Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle, unused in this function, for function type match + * + * Return: nothing. + */ +static void +_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_fast_path_atomic - send fast path request to firmware + * using Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle, unused in this function, for function type match + * Return: nothing + */ +static void +_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_hi_priority_atomic - send Task Management request to + * firmware using Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 + * + * Return: nothing. + */ +static void +_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.MSIxIndex = msix_task; + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_default_atomic - Default, primarily used for config pages + * use Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * + * Return: nothing. + */ +static void +_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_display_OEMs_branding - Display branding string + * @ioc: per adapter object + */ +static void +_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) +{ + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (ioc->pdev->subsystem_vendor) { + case PCI_VENDOR_ID_INTEL: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2008: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RMS2LL080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS2LL080_BRANDING); + break; + case MPT2SAS_INTEL_RMS2LL040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS2LL040_BRANDING); + break; + case MPT2SAS_INTEL_SSD910_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_SSD910_BRANDING); + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RS25GB008_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RS25GB008_BRANDING); + break; + case MPT2SAS_INTEL_RMS25JB080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25JB080_BRANDING); + break; + case MPT2SAS_INTEL_RMS25JB040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25JB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25KB080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25KB080_BRANDING); + break; + case MPT2SAS_INTEL_RMS25KB040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25KB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25LB040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25LB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25LB080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25LB080_BRANDING); + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_INTEL_RMS3JC080_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RMS3JC080_BRANDING); + break; + + case MPT3SAS_INTEL_RS3GC008_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RS3GC008_BRANDING); + break; + case MPT3SAS_INTEL_RS3FC044_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RS3FC044_BRANDING); + break; + case MPT3SAS_INTEL_RS3UC080_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RS3UC080_BRANDING); + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case PCI_VENDOR_ID_DELL: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2008: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_BRANDING); + break; + case MPT2SAS_DELL_6GBPS_SAS_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_6GBPS_SAS_BRANDING); + break; + default: + ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_DELL_12G_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_DELL_12G_HBA_BRANDING); + break; + default: + ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case PCI_VENDOR_ID_CISCO: + switch (ioc->pdev->device) { + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_CISCO_12G_8E_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_8E_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_8I_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_8I_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + break; + default: + ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3108_1: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING); + break; + default: + ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPT2SAS_HP_3PAR_SSVID: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2004: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); + break; + default: + ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_2_4_INTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_2_4_INTERNAL_BRANDING); + break; + case MPT2SAS_HP_2_4_EXTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_2_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); + break; + default: + ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + break; + } +} + +/** + * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg + * version from FW Image Header. + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ + static int +_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2FWImageHeader_t *fw_img_hdr; + Mpi26ComponentImageHeader_t *cmp_img_hdr; + Mpi25FWUploadRequest_t *mpi_request; + Mpi2FWUploadReply_t mpi_reply; + int r = 0, issue_diag_reset = 0; + u32 package_version = 0; + void *fwpkg_data = NULL; + dma_addr_t fwpkg_data_dma; + u16 smid, ioc_status; + size_t data_length; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + data_length = sizeof(Mpi2FWImageHeader_t); + fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &fwpkg_data_dma, GFP_KERNEL); + if (!fwpkg_data) { + ioc_err(ioc, + "Memory allocation for fwpkg data failed at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + r = -EAGAIN; + goto out; + } + + ioc->base_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t)); + mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD; + mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH; + mpi_request->ImageSize = cpu_to_le32(data_length); + ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, + data_length); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + /* Wait for 15 seconds */ + wait_for_completion_timeout(&ioc->base_cmds.done, + FW_IMG_HDR_READ_TIMEOUT*HZ); + ioc_info(ioc, "%s: complete\n", __func__); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi25FWUploadRequest_t)/4); + issue_diag_reset = 1; + } else { + memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t)); + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) { + memcpy(&mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2FWUploadReply_t)); + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data; + if (le32_to_cpu(fw_img_hdr->Signature) == + MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) { + cmp_img_hdr = + (Mpi26ComponentImageHeader_t *) + (fwpkg_data); + package_version = + le32_to_cpu( + cmp_img_hdr->ApplicationSpecific); + } else + package_version = + le32_to_cpu( + fw_img_hdr->PackageVersion.Word); + if (package_version) + ioc_info(ioc, + "FW Package Ver(%02d.%02d.%02d.%02d)\n", + ((package_version) & 0xFF000000) >> 24, + ((package_version) & 0x00FF0000) >> 16, + ((package_version) & 0x0000FF00) >> 8, + (package_version) & 0x000000FF); + } else { + _debug_dump_mf(&mpi_reply, + sizeof(Mpi2FWUploadReply_t)/4); + } + } + } + ioc->base_cmds.status = MPT3_CMD_NOT_USED; +out: + if (fwpkg_data) + dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, + fwpkg_data_dma); + if (issue_diag_reset) { + if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +/** + * _base_display_ioc_capabilities - Display IOC's capabilities. + * @ioc: per adapter object + */ +static void +_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) +{ + int i = 0; + char desc[17] = {0}; + u32 iounit_pg1_flags; + + strncpy(desc, ioc->manu_pg0.ChipName, 16); + ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n", + desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, + ioc->pdev->revision); + + _base_display_OEMs_branding(ioc); + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { + pr_info("%sNVMe", i ? "," : ""); + i++; + } + + ioc_info(ioc, "Protocol=("); + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { + pr_cont("Initiator"); + i++; + } + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { + pr_cont("%sTarget", i ? "," : ""); + i++; + } + + i = 0; + pr_cont("), Capabilities=("); + + if (!ioc->hide_ir_msg) { + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { + pr_cont("Raid"); + i++; + } + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { + pr_cont("%sTLR", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { + pr_cont("%sMulticast", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { + pr_cont("%sBIDI Target", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { + pr_cont("%sEEDP", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { + pr_cont("%sSnapshot Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { + pr_cont("%sDiag Trace Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { + pr_cont("%sDiag Extended Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { + pr_cont("%sTask Set Full", i ? "," : ""); + i++; + } + + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { + pr_cont("%sNCQ", i ? "," : ""); + i++; + } + + pr_cont(")\n"); +} + +/** + * mpt3sas_base_update_missing_delay - change the missing delay timers + * @ioc: per adapter object + * @device_missing_delay: amount of time till device is reported missing + * @io_missing_delay: interval IO is returned when there is a missing device + * + * Passed on the command line, this function will modify the device missing + * delay, as well as the io missing delay. This should be called at driver + * load time. + */ +void +mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay) +{ + u16 dmd, dmd_new, dmd_orignal; + u8 io_missing_delay_original; + u16 sz; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2ConfigReply_t mpi_reply; + u8 num_phys = 0; + u16 ioc_status; + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) + return; + + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + /* device missing delay */ + dmd = sas_iounit_pg1->ReportDeviceMissingDelay; + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + dmd_orignal = dmd; + if (device_missing_delay > 0x7F) { + dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : + device_missing_delay; + dmd = dmd / 16; + dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; + } else + dmd = device_missing_delay; + sas_iounit_pg1->ReportDeviceMissingDelay = dmd; + + /* io missing delay */ + io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; + sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; + + if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd_new = (dmd & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd_new = + dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n", + dmd_orignal, dmd_new); + ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n", + io_missing_delay_original, + io_missing_delay); + ioc->device_missing_delay = dmd_new; + ioc->io_missing_delay = io_missing_delay; + } + +out: + kfree(sas_iounit_pg1); +} + +/** + * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields + * according to performance mode. + * @ioc : per adapter object + * + * Return: zero on success; otherwise return EAGAIN error code asking the + * caller to retry. + */ +static int +_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2IOCPage1_t ioc_pg1; + Mpi2ConfigReply_t mpi_reply; + int rc; + + rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy); + if (rc) + return rc; + memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t)); + + switch (perf_mode) { + case MPT_PERF_MODE_DEFAULT: + case MPT_PERF_MODE_BALANCED: + if (ioc->high_iops_queues) { + ioc_info(ioc, + "Enable interrupt coalescing only for first\t" + "%d reply queues\n", + MPT3SAS_HIGH_IOPS_REPLY_QUEUES); + /* + * If 31st bit is zero then interrupt coalescing is + * enabled for all reply descriptor post queues. + * If 31st bit is set to one then user can + * enable/disable interrupt coalescing on per reply + * descriptor post queue group(8) basis. So to enable + * interrupt coalescing only on first reply descriptor + * post queue group 31st bit and zero th bit is enabled. + */ + ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | + ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1)); + rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + ioc_info(ioc, "performance mode: balanced\n"); + return 0; + } + fallthrough; + case MPT_PERF_MODE_LATENCY: + /* + * Enable interrupt coalescing on all reply queues + * with timeout value 0xA + */ + ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa); + ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); + ioc_pg1.ProductSpecific = 0; + rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + ioc_info(ioc, "performance mode: latency\n"); + break; + case MPT_PERF_MODE_IOPS: + /* + * Enable interrupt coalescing on all reply queues. + */ + ioc_info(ioc, + "performance mode: iops with coalescing timeout: 0x%x\n", + le32_to_cpu(ioc_pg1.CoalescingTimeout)); + ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); + ioc_pg1.ProductSpecific = 0; + rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + break; + } + return 0; +} + +/** + * _base_get_event_diag_triggers - get event diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: nothing. + */ +static int +_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage2_t trigger_pg2; + struct SL_WH_EVENT_TRIGGER_T *event_tg; + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, + &trigger_pg2); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) { + count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_event.ValidEntries = count; + + event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0]; + mpi_event_tg = &trigger_pg2.MPIEventTriggers[0]; + for (i = 0; i < count; i++) { + event_tg->EventValue = le16_to_cpu( + mpi_event_tg->MPIEventCode); + event_tg->LogEntryQualifier = le16_to_cpu( + mpi_event_tg->MPIEventCodeSpecific); + event_tg++; + mpi_event_tg++; + } + } + return 0; +} + +/** + * _base_get_scsi_diag_triggers - get scsi diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: 0 on success; otherwise return failure status. + */ +static int +_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage3_t trigger_pg3; + struct SL_WH_SCSI_TRIGGER_T *scsi_tg; + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, + &trigger_pg3); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) { + count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_scsi.ValidEntries = count; + + scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0]; + mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0]; + for (i = 0; i < count; i++) { + scsi_tg->ASCQ = mpi_scsi_tg->ASCQ; + scsi_tg->ASC = mpi_scsi_tg->ASC; + scsi_tg->SenseKey = mpi_scsi_tg->SenseKey; + + scsi_tg++; + mpi_scsi_tg++; + } + } + return 0; +} + +/** + * _base_get_mpi_diag_triggers - get mpi diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: 0 on success; otherwise return failure status. + */ +static int +_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage4_t trigger_pg4; + struct SL_WH_MPI_TRIGGER_T *status_tg; + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, + &trigger_pg4); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) { + count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_mpi.ValidEntries = count; + + status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0]; + mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0]; + + for (i = 0; i < count; i++) { + status_tg->IOCStatus = le16_to_cpu( + mpi_status_tg->IOCStatus); + status_tg->IocLogInfo = le32_to_cpu( + mpi_status_tg->LogInfo); + + status_tg++; + mpi_status_tg++; + } + } + return 0; +} + +/** + * _base_get_master_diag_triggers - get master diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: nothing. + */ +static int +_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage1_t trigger_pg1; + Mpi2ConfigReply_t mpi_reply; + int r; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, + &trigger_pg1); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg1.NumMasterTrigger)) + ioc->diag_trigger_master.MasterData |= + le32_to_cpu( + trigger_pg1.MasterTriggers[0].MasterTriggerFlags); + return 0; +} + +/** + * _base_check_for_trigger_pages_support - checks whether HBA FW supports + * driver trigger pages or not + * @ioc : per adapter object + * @trigger_flags : address where trigger page0's TriggerFlags value is copied + * + * Return: trigger flags mask if HBA FW supports driver trigger pages; + * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or + * return EAGAIN if diag reset occurred due to FW fault and asking the + * caller to retry the command. + * + */ +static int +_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags) +{ + Mpi26DriverTriggerPage0_t trigger_pg0; + int r = 0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, + &trigger_pg0); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return -EFAULT; + + *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags); + return 0; +} + +/** + * _base_get_diag_triggers - Retrieve diag trigger values from + * persistent pages. + * @ioc : per adapter object + * + * Return: zero on success; otherwise return EAGAIN error codes + * asking the caller to retry. + */ +static int +_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + int trigger_flags; + int r; + + /* + * Default setting of master trigger. + */ + ioc->diag_trigger_master.MasterData = + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + + r = _base_check_for_trigger_pages_support(ioc, &trigger_flags); + if (r) { + if (r == -EAGAIN) + return r; + /* + * Don't go for error handling when FW doesn't support + * driver trigger pages. + */ + return 0; + } + + ioc->supports_trigger_pages = 1; + + /* + * Retrieve master diag trigger values from driver trigger pg1 + * if master trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) { + r = _base_get_master_diag_triggers(ioc); + if (r) + return r; + } + + /* + * Retrieve event diag trigger values from driver trigger pg2 + * if event trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) { + r = _base_get_event_diag_triggers(ioc); + if (r) + return r; + } + + /* + * Retrieve scsi diag trigger values from driver trigger pg3 + * if scsi trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) { + r = _base_get_scsi_diag_triggers(ioc); + if (r) + return r; + } + /* + * Retrieve mpi error diag trigger values from driver trigger pg4 + * if loginfo trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) { + r = _base_get_mpi_diag_triggers(ioc); + if (r) + return r; + } + return 0; +} + +/** + * _base_update_diag_trigger_pages - Update the driver trigger pages after + * online FW update, in case updated FW supports driver + * trigger pages. + * @ioc : per adapter object + * + * Return: nothing. + */ +static void +_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc) +{ + + if (ioc->diag_trigger_master.MasterData) + mpt3sas_config_update_driver_trigger_pg1(ioc, + &ioc->diag_trigger_master, 1); + + if (ioc->diag_trigger_event.ValidEntries) + mpt3sas_config_update_driver_trigger_pg2(ioc, + &ioc->diag_trigger_event, 1); + + if (ioc->diag_trigger_scsi.ValidEntries) + mpt3sas_config_update_driver_trigger_pg3(ioc, + &ioc->diag_trigger_scsi, 1); + + if (ioc->diag_trigger_mpi.ValidEntries) + mpt3sas_config_update_driver_trigger_pg4(ioc, + &ioc->diag_trigger_mpi, 1); +} + +/** + * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices. + * - On failure set default QD values. + * @ioc : per adapter object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; + u16 depth; + int sz; + int rc = 0; + + ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH; + ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH; + if (!ioc->is_gen35_ioc) + goto out; + /* sas iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); + sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth); + ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth); + ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = sas_iounit_pg1->SATAMaxQDepth; + ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); + + /* pcie iounit page 1 */ + rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, + &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ? + (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) : + MPT3SAS_NVME_QUEUE_DEPTH; +out: + dinitprintk(ioc, pr_err( + "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n", + ioc->max_wideport_qd, ioc->max_narrowport_qd, + ioc->max_sata_qd, ioc->max_nvme_qd)); + kfree(sas_iounit_pg1); + return rc; +} + +/** + * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1 + * + * @ioc : per adapter object + * @n : ptr to the ATTO nvram structure + * Return: 0 for success, non-zero for failure. + */ +static int +mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc, + struct ATTO_SAS_NVRAM *n) +{ + int r = -EINVAL; + union ATTO_SAS_ADDRESS *s1; + u32 len; + u8 *pb; + u8 ckSum; + + /* validate nvram checksum */ + pb = (u8 *) n; + ckSum = ATTO_SASNVR_CKSUM_SEED; + len = sizeof(struct ATTO_SAS_NVRAM); + + while (len--) + ckSum = ckSum + pb[len]; + + if (ckSum) { + ioc_err(ioc, "Invalid ATTO NVRAM checksum\n"); + return r; + } + + s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr; + + if (n->Signature[0] != 'E' + || n->Signature[1] != 'S' + || n->Signature[2] != 'A' + || n->Signature[3] != 'S') + ioc_err(ioc, "Invalid ATTO NVRAM signature\n"); + else if (n->Version > ATTO_SASNVR_VERSION) + ioc_info(ioc, "Invalid ATTO NVRAM version"); + else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1)) + || s1->b[0] != 0x50 + || s1->b[1] != 0x01 + || s1->b[2] != 0x08 + || (s1->b[3] & 0xF0) != 0x60 + || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) { + ioc_err(ioc, "Invalid ATTO SAS address\n"); + } else + r = 0; + return r; +} + +/** + * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1 + * + * @ioc : per adapter object + * @*sas_addr : return sas address + * Return: 0 for success, non-zero for failure. + */ +static int +mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr) +{ + Mpi2ManufacturingPage1_t mfg_pg1; + Mpi2ConfigReply_t mpi_reply; + struct ATTO_SAS_NVRAM *nvram; + int r; + __be64 addr; + + r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1); + if (r) { + ioc_err(ioc, "Failed to read manufacturing page 1\n"); + return r; + } + + /* validate nvram */ + nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD; + r = mpt3sas_atto_validate_nvram(ioc, nvram); + if (r) + return r; + + addr = *((__be64 *) nvram->SasAddr); + sas_addr->q = cpu_to_le64(be64_to_cpu(addr)); + return r; +} + +/** + * mpt3sas_atto_init - perform initializaion for ATTO branded + * adapter. + * @ioc : per adapter object + *5 + * Return: 0 for success, non-zero for failure. + */ +static int +mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc) +{ + int sz = 0; + Mpi2BiosPage4_t *bios_pg4 = NULL; + Mpi2ConfigReply_t mpi_reply; + int r; + int ix; + union ATTO_SAS_ADDRESS sas_addr; + union ATTO_SAS_ADDRESS temp; + union ATTO_SAS_ADDRESS bias; + + r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr); + if (r) + return r; + + /* get header first to get size */ + r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0); + if (r) { + ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n"); + return r; + } + + sz = mpi_reply.Header.PageLength * sizeof(u32); + bios_pg4 = kzalloc(sz, GFP_KERNEL); + if (!bios_pg4) { + ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n"); + return -ENOMEM; + } + + /* read bios page 4 */ + r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz); + if (r) { + ioc_err(ioc, "Failed to read ATTO bios page 4\n"); + goto out; + } + + /* Update bios page 4 with the ATTO WWID */ + bias.q = sas_addr.q; + bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS; + + for (ix = 0; ix < bios_pg4->NumPhys; ix++) { + temp.q = sas_addr.q; + temp.b[7] += ix; + bios_pg4->Phy[ix].ReassignmentWWID = temp.q; + bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q; + } + r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz); + +out: + kfree(bios_pg4); + return r; +} + +/** + * _base_static_config_pages - static start of day config pages + * @ioc: per adapter object + */ +static int +_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + u32 iounit_pg1_flags; + int tg_flags = 0; + int rc; + ioc->nvme_abort_timeout = 30; + + rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, + &ioc->manu_pg0); + if (rc) + return rc; + if (ioc->ir_firmware) { + rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); + if (rc) + return rc; + } + + if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { + rc = mpt3sas_atto_init(ioc); + if (rc) + return rc; + } + + /* + * Ensure correct T10 PI operation if vendor left EEDPTagMode + * flag unset in NVDATA. + */ + rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + if (rc) + return rc; + if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { + pr_err("%s: overriding NVDATA EEDPTagMode setting\n", + ioc->name); + ioc->manu_pg11.EEDPTagMode &= ~0x3; + ioc->manu_pg11.EEDPTagMode |= 0x1; + mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + } + if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK) + ioc->tm_custom_handling = 1; + else { + ioc->tm_custom_handling = 0; + if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT) + ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT; + else if (ioc->manu_pg11.NVMeAbortTO > + NVME_TASK_ABORT_MAX_TIMEOUT) + ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT; + else + ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO; + } + ioc->time_sync_interval = + ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK; + if (ioc->time_sync_interval) { + if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK) + ioc->time_sync_interval = + ioc->time_sync_interval * SECONDS_PER_HOUR; + else + ioc->time_sync_interval = + ioc->time_sync_interval * SECONDS_PER_MIN; + dinitprintk(ioc, ioc_info(ioc, + "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n", + ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval & + MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute")); + } else { + if (ioc->is_gen35_ioc) + ioc_warn(ioc, + "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n"); + } + rc = _base_assign_fw_reported_qd(ioc); + if (rc) + return rc; + + /* + * ATTO doesn't use bios page 2 and 3 for bios settings. + */ + if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) + ioc->bios_pg3.BiosVersion = 0; + else { + rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + if (rc) + return rc; + rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + if (rc) + return rc; + } + + rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); + if (rc) + return rc; + rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); + if (rc) + return rc; + rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + if (rc) + return rc; + rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); + if (rc) + return rc; + _base_display_ioc_capabilities(ioc); + + /* + * Enable task_set_full handling in iounit_pg1 when the + * facts capabilities indicate that its supported. + */ + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + iounit_pg1_flags &= + ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + else + iounit_pg1_flags |= + MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); + rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + if (rc) + return rc; + + if (ioc->iounit_pg8.NumSensors) + ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; + if (ioc->is_aero_ioc) { + rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc); + if (rc) + return rc; + } + if (ioc->is_gen35_ioc) { + if (ioc->is_driver_loading) { + rc = _base_get_diag_triggers(ioc); + if (rc) + return rc; + } else { + /* + * In case of online HBA FW update operation, + * check whether updated FW supports the driver trigger + * pages or not. + * - If previous FW has not supported driver trigger + * pages and newer FW supports them then update these + * pages with current diag trigger values. + * - If previous FW has supported driver trigger pages + * and new FW doesn't support them then disable + * support_trigger_pages flag. + */ + _base_check_for_trigger_pages_support(ioc, &tg_flags); + if (!ioc->supports_trigger_pages && tg_flags != -EFAULT) + _base_update_diag_trigger_pages(ioc); + else if (ioc->supports_trigger_pages && + tg_flags == -EFAULT) + ioc->supports_trigger_pages = 0; + } + } + return 0; +} + +/** + * mpt3sas_free_enclosure_list - release memory + * @ioc: per adapter object + * + * Free memory allocated during enclosure add. + */ +void +mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) +{ + struct _enclosure_node *enclosure_dev, *enclosure_dev_next; + + /* Free enclosure list */ + list_for_each_entry_safe(enclosure_dev, + enclosure_dev_next, &ioc->enclosure_list, list) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } +} + +/** + * _base_release_memory_pools - release memory + * @ioc: per adapter object + * + * Free memory allocated from _base_allocate_memory_pools. + */ +static void +_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) +{ + int i = 0; + int j = 0; + int dma_alloc_count = 0; + struct chain_tracker *ct; + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->request) { + dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + dexitprintk(ioc, + ioc_info(ioc, "request_pool(0x%p): free\n", + ioc->request)); + ioc->request = NULL; + } + + if (ioc->sense) { + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + dma_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, + ioc_info(ioc, "sense_pool(0x%p): free\n", + ioc->sense)); + ioc->sense = NULL; + } + + if (ioc->reply) { + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + dma_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, + ioc_info(ioc, "reply_pool(0x%p): free\n", + ioc->reply)); + ioc->reply = NULL; + } + + if (ioc->reply_free) { + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + ioc->reply_free_dma); + dma_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, + ioc_info(ioc, "reply_free_pool(0x%p): free\n", + ioc->reply_free)); + ioc->reply_free = NULL; + } + + if (ioc->reply_post) { + dma_alloc_count = DIV_ROUND_UP(count, + RDPQ_MAX_INDEX_IN_ONE_CHUNK); + for (i = 0; i < count; i++) { + if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 + && dma_alloc_count) { + if (ioc->reply_post[i].reply_post_free) { + dma_pool_free( + ioc->reply_post_free_dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + dexitprintk(ioc, ioc_info(ioc, + "reply_post_free_pool(0x%p): free\n", + ioc->reply_post[i].reply_post_free)); + ioc->reply_post[i].reply_post_free = + NULL; + } + --dma_alloc_count; + } + } + dma_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_array && + ioc->rdpq_array_enable) { + dma_pool_free(ioc->reply_post_free_array_dma_pool, + ioc->reply_post_free_array, + ioc->reply_post_free_array_dma); + ioc->reply_post_free_array = NULL; + } + dma_pool_destroy(ioc->reply_post_free_array_dma_pool); + kfree(ioc->reply_post); + } + + if (ioc->pcie_sgl_dma_pool) { + for (i = 0; i < ioc->scsiio_depth; i++) { + dma_pool_free(ioc->pcie_sgl_dma_pool, + ioc->pcie_sg_lookup[i].pcie_sgl, + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->pcie_sg_lookup[i].pcie_sgl = NULL; + } + dma_pool_destroy(ioc->pcie_sgl_dma_pool); + } + kfree(ioc->pcie_sg_lookup); + ioc->pcie_sg_lookup = NULL; + + if (ioc->config_page) { + dexitprintk(ioc, + ioc_info(ioc, "config_page(0x%p): free\n", + ioc->config_page)); + dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, + ioc->config_page, ioc->config_page_dma); + } + + kfree(ioc->hpr_lookup); + ioc->hpr_lookup = NULL; + kfree(ioc->internal_lookup); + ioc->internal_lookup = NULL; + if (ioc->chain_lookup) { + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + if (ct && ct->chain_buffer) + dma_pool_free(ioc->chain_dma_pool, + ct->chain_buffer, + ct->chain_buffer_dma); + } + kfree(ioc->chain_lookup[i].chains_per_smid); + } + dma_pool_destroy(ioc->chain_dma_pool); + kfree(ioc->chain_lookup); + ioc->chain_lookup = NULL; + } + + kfree(ioc->io_queue_num); + ioc->io_queue_num = NULL; +} + +/** + * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are + * having same upper 32bits in their base memory address. + * @start_address: Base address of a reply queue set + * @pool_sz: Size of single Reply Descriptor Post Queues pool size + * + * Return: 1 if reply queues in a set have a same upper 32bits in their base + * memory address, else 0. + */ +static int +mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) +{ + dma_addr_t end_address; + + end_address = start_address + pool_sz - 1; + + if (upper_32_bits(start_address) == upper_32_bits(end_address)) + return 1; + else + return 0; +} + +/** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + **/ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + * for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ct; + + ioc->pcie_sgl_dma_pool = + dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, + ioc->page_size, 0); + if (!ioc->pcie_sgl_dma_pool) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); + return -ENOMEM; + } + + ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; + ioc->chains_per_prp_buffer = + min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->pcie_sg_lookup[i].pcie_sgl = + dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, + &ioc->pcie_sg_lookup[i].pcie_sgl_dma); + if (!ioc->pcie_sg_lookup[i].pcie_sgl) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + + if (!mpt3sas_check_same_4gb_region( + ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) { + ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", + ioc->pcie_sg_lookup[i].pcie_sgl, + (unsigned long long) + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + + for (j = 0; j < ioc->chains_per_prp_buffer; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + ct->chain_buffer = + ioc->pcie_sg_lookup[i].pcie_sgl + + (j * ioc->chain_segment_sz); + ct->chain_buffer_dma = + ioc->pcie_sg_lookup[i].pcie_sgl_dma + + (j * ioc->chain_segment_sz); + } + } + dinitprintk(ioc, ioc_info(ioc, + "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, ioc_info(ioc, + "Number of chains can fit in a PRP page(%d)\n", + ioc->chains_per_prp_buffer)); + return 0; +} + +/** + * _base_allocate_chain_dma_pool - Allocating DMA'able memory + * for chain dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ctr; + + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, + ioc->chain_segment_sz, 16, 0); + if (!ioc->chain_dma_pool) + return -ENOMEM; + + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ctr = &ioc->chain_lookup[i].chains_per_smid[j]; + ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, &ctr->chain_buffer_dma); + if (!ctr->chain_buffer) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region( + ctr->chain_buffer_dma, ioc->chain_segment_sz)) { + ioc_err(ioc, + "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n", + ctr->chain_buffer, + (unsigned long long)ctr->chain_buffer_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + } + } + dinitprintk(ioc, ioc_info(ioc, + "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz))/1024)); + return 0; +} + +/** + * _base_allocate_sense_dma_pool - Allocating DMA'able memory + * for sense dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + ioc->sense_dma_pool = + dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); + if (!ioc->sense_dma_pool) + return -ENOMEM; + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, + GFP_KERNEL, &ioc->sense_dma); + if (!ioc->sense) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) { + dinitprintk(ioc, pr_err( + "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", + ioc->sense, (unsigned long long) ioc->sense_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + ioc_info(ioc, + "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", + ioc->sense, (unsigned long long)ioc->sense_dma, + ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); + return 0; +} + +/** + * _base_allocate_reply_pool - Allocating DMA'able memory + * for reply pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + /* reply pool, 4 byte align */ + ioc->reply_dma_pool = dma_pool_create("reply pool", + &ioc->pdev->dev, sz, 4, 0); + if (!ioc->reply_dma_pool) + return -ENOMEM; + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", + ioc->reply, (unsigned long long) ioc->reply_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + ioc->reply_dma_min_address = (u32)(ioc->reply_dma); + ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; + ioc_info(ioc, + "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->reply, (unsigned long long)ioc->reply_dma, + ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); + return 0; +} + +/** + * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory + * for reply free dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + /* reply free queue, 16 byte align */ + ioc->reply_free_dma_pool = dma_pool_create( + "reply_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) + return -ENOMEM; + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, + GFP_KERNEL, &ioc->reply_free_dma); + if (!ioc->reply_free) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", + ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, ioc_info(ioc, + "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); + dinitprintk(ioc, ioc_info(ioc, + "reply_free_dma (0x%llx)\n", + (unsigned long long)ioc->reply_free_dma)); + return 0; +} + +/** + * _base_allocate_reply_post_free_array - Allocating DMA'able memory + * for reply post free array. + * @ioc: Adapter object + * @reply_post_free_array_sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, + u32 reply_post_free_array_sz) +{ + ioc->reply_post_free_array_dma_pool = + dma_pool_create("reply_post_free_array pool", + &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + if (!ioc->reply_post_free_array_dma_pool) + return -ENOMEM; + ioc->reply_post_free_array = + dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + GFP_KERNEL, &ioc->reply_post_free_array_dma); + if (!ioc->reply_post_free_array) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma, + reply_post_free_array_sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + return 0; +} +/** + * base_alloc_rdpq_dma_pool - Allocating DMA'able memory + * for reply queues. + * @ioc: per adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) +{ + int i = 0; + u32 dma_alloc_count = 0; + int reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct), + GFP_KERNEL); + if (!ioc->reply_post) + return -ENOMEM; + /* + * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and + * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should + * be within 4GB boundary i.e reply queues in a set must have same + * upper 32-bits in their memory address. so here driver is allocating + * the DMA'able memory for reply queues according. + * Driver uses limitation of + * VENTURA_SERIES to manage INVADER_SERIES as well. + */ + dma_alloc_count = DIV_ROUND_UP(count, + RDPQ_MAX_INDEX_IN_ONE_CHUNK); + ioc->reply_post_free_dma_pool = + dma_pool_create("reply_post_free pool", + &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) + return -ENOMEM; + for (i = 0; i < count; i++) { + if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { + ioc->reply_post[i].reply_post_free = + dma_pool_zalloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) + return -ENOMEM; + /* + * Each set of RDPQ pool must satisfy 4gb boundary + * restriction. + * 1) Check if allocated resources for RDPQ pool are in + * the same 4GB range. + * 2) If #1 is true, continue with 64 bit DMA. + * 3) If #1 is false, return 1. which means free all the + * resources and set DMA mask to 32 and allocate. + */ + if (!mpt3sas_check_same_4gb_region( + ioc->reply_post[i].reply_post_free_dma, sz)) { + dinitprintk(ioc, + ioc_err(ioc, "bad Replypost free pool(0x%p)" + "reply_post_free_dma = (0x%llx)\n", + ioc->reply_post[i].reply_post_free, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + return -EAGAIN; + } + dma_alloc_count--; + + } else { + ioc->reply_post[i].reply_post_free = + (Mpi2ReplyDescriptorsUnion_t *) + ((long)ioc->reply_post[i-1].reply_post_free + + reply_post_free_sz); + ioc->reply_post[i].reply_post_free_dma = + (dma_addr_t) + (ioc->reply_post[i-1].reply_post_free_dma + + reply_post_free_sz); + } + } + return 0; +} + +/** + * _base_allocate_memory_pools - allocate start of day memory pools + * @ioc: per adapter object + * + * Return: 0 success, anything else error. + */ +static int +_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) +{ + struct mpt3sas_facts *facts; + u16 max_sge_elements; + u16 chains_needed_per_io; + u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz; + u32 retry_sz; + u32 rdpq_sz = 0, sense_sz = 0; + u16 max_request_credit, nvme_blocks_needed; + unsigned short sg_tablesize; + u16 sge_size; + int i; + int ret = 0, rc = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + + retry_sz = 0; + facts = &ioc->facts; + + /* command line tunables for max sgl entries */ + if (max_sgl_entries != -1) + sg_tablesize = max_sgl_entries; + else { + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + sg_tablesize = MPT2SAS_SG_DEPTH; + else + sg_tablesize = MPT3SAS_SG_DEPTH; + } + + /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ + if (reset_devices) + sg_tablesize = min_t(unsigned short, sg_tablesize, + MPT_KDUMP_MIN_PHYS_SEGMENTS); + + if (ioc->is_mcpu_endpoint) + ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; + else { + if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) + sg_tablesize = MPT_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { + sg_tablesize = min_t(unsigned short, sg_tablesize, + SG_MAX_SEGMENTS); + ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n", + sg_tablesize, MPT_MAX_PHYS_SEGMENTS); + } + ioc->shost->sg_tablesize = sg_tablesize; + } + + ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), + (facts->RequestCredit / 4)); + if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { + if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n", + facts->RequestCredit); + return -ENOMEM; + } + ioc->internal_depth = 10; + } + + ioc->hi_priority_depth = ioc->internal_depth - (5); + /* command line tunables for max controller queue depth */ + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->internal_depth, facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else if (reset_devices) + max_request_credit = min_t(u16, facts->RequestCredit, + (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); + else + max_request_credit = min_t(u16, facts->RequestCredit, + MAX_HBA_QUEUE_DEPTH); + + /* Firmware maintains additional facts->HighPriorityCredit number of + * credits for HiPriprity Request messages, so hba queue depth will be + * sum of max_request_credit and high priority queue depth. + */ + ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; + + /* request frame size */ + ioc->request_sz = facts->IOCRequestFrameSize * 4; + + /* reply frame size */ + ioc->reply_sz = facts->ReplyFrameSize * 4; + + /* chain segment size */ + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (facts->IOCMaxChainSegmentSize) + ioc->chain_segment_sz = + facts->IOCMaxChainSegmentSize * + MAX_CHAIN_ELEMT_SZ; + else + /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ + ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * + MAX_CHAIN_ELEMT_SZ; + } else + ioc->chain_segment_sz = ioc->request_sz; + + /* calculate the max scatter element size */ + sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); + + retry_allocation: + total_sz = 0; + /* calculate number of sg elements left over in the 1st frame */ + max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - + sizeof(Mpi2SGEIOUnion_t)) + sge_size); + ioc->max_sges_in_main_message = max_sge_elements/sge_size; + + /* now do the same for a chain buffer */ + max_sge_elements = ioc->chain_segment_sz - sge_size; + ioc->max_sges_in_chain_message = max_sge_elements/sge_size; + + /* + * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE + */ + chains_needed_per_io = ((ioc->shost->sg_tablesize - + ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) + + 1; + if (chains_needed_per_io > facts->MaxChainDepth) { + chains_needed_per_io = facts->MaxChainDepth; + ioc->shost->sg_tablesize = min_t(u16, + ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message + * chains_needed_per_io), ioc->shost->sg_tablesize); + } + ioc->chains_needed_per_io = chains_needed_per_io; + + /* reply free queue sizing - taking into account for 64 FW events */ + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + + /* mCPU manage single counters for simplicity */ + if (ioc->is_mcpu_endpoint) + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth; + else { + /* calculate reply descriptor post queue depth */ + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1; + /* align the reply post queue on the next 16 count boundary */ + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += 16 - + (ioc->reply_post_queue_depth % 16); + } + + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + } + + ioc_info(ioc, + "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), " + "sge_per_io(%d), chains_per_io(%d)\n", + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); + + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; + if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) + || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK)) + rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; + ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz); + if (ret == -EAGAIN) { + /* + * Free allocated bad RDPQ memory pools. + * Change dma coherent mask to 32 bit and reallocate RDPQ + */ + _base_release_memory_pools(ioc); + ioc->use_32bit_dma = true; + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + ioc_err(ioc, + "32 DMA mask failed %s\n", pci_name(ioc->pdev)); + return -ENODEV; + } + if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz)) + return -ENOMEM; + } else if (ret == -ENOMEM) + return -ENOMEM; + total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 : + DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK)); + ioc->scsiio_depth = ioc->hba_queue_depth - + ioc->hi_priority_depth - ioc->internal_depth; + + /* set the scsi host can_queue depth + * with some internal commands that could be outstanding + */ + ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; + dinitprintk(ioc, + ioc_info(ioc, "scsi host: can_queue depth (%d)\n", + ioc->shost->can_queue)); + + /* contiguous pool for request and chains, 16 byte align, one extra " + * "frame for smid=0 + */ + ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; + sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); + + /* hi-priority queue */ + sz += (ioc->hi_priority_depth * ioc->request_sz); + + /* internal queue */ + sz += (ioc->internal_depth * ioc->request_sz); + + ioc->request_dma_sz = sz; + ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, + &ioc->request_dma, GFP_KERNEL); + if (!ioc->request) { + ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n", + ioc->hba_queue_depth, ioc->chains_needed_per_io, + ioc->request_sz, sz / 1024); + if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) + goto out; + retry_sz = 64; + ioc->hba_queue_depth -= retry_sz; + _base_release_memory_pools(ioc); + goto retry_allocation; + } + + if (retry_sz) + ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", + ioc->hba_queue_depth, ioc->chains_needed_per_io, + ioc->request_sz, sz / 1024); + + /* hi-priority queue */ + ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + + /* internal queue */ + ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * + ioc->request_sz); + ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * + ioc->request_sz); + + ioc_info(ioc, + "request pool(0x%p) - dma(0x%llx): " + "depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->request, (unsigned long long) ioc->request_dma, + ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); + + total_sz += sz; + + dinitprintk(ioc, + ioc_info(ioc, "scsiio(0x%p): depth(%d)\n", + ioc->request, ioc->scsiio_depth)); + + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); + sz = ioc->scsiio_depth * sizeof(struct chain_lookup); + ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup) { + ioc_err(ioc, "chain_lookup: __get_free_pages failed\n"); + goto out; + } + + sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup[i].chains_per_smid) { + ioc_err(ioc, "chain_lookup: kzalloc failed\n"); + goto out; + } + } + + /* initialize hi-priority queue smid's */ + ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->hpr_lookup) { + ioc_err(ioc, "hpr_lookup: kcalloc failed\n"); + goto out; + } + ioc->hi_priority_smid = ioc->scsiio_depth + 1; + dinitprintk(ioc, + ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n", + ioc->hi_priority, + ioc->hi_priority_depth, ioc->hi_priority_smid)); + + /* initialize internal queue smid's */ + ioc->internal_lookup = kcalloc(ioc->internal_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->internal_lookup) { + ioc_err(ioc, "internal_lookup: kcalloc failed\n"); + goto out; + } + ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; + dinitprintk(ioc, + ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n", + ioc->internal, + ioc->internal_depth, ioc->internal_smid)); + + ioc->io_queue_num = kcalloc(ioc->scsiio_depth, + sizeof(u16), GFP_KERNEL); + if (!ioc->io_queue_num) + goto out; + /* + * The number of NVMe page sized blocks needed is: + * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 + * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry + * that is placed in the main message frame. 8 is the size of each PRP + * entry or PRP list pointer entry. 8 is subtracted from page_size + * because of the PRP list pointer entry at the end of a page, so this + * is not counted as a PRP entry. The 1 added page is a round up. + * + * To avoid allocation failures due to the amount of memory that could + * be required for NVMe PRP's, only each set of NVMe blocks will be + * contiguous, so a new set is allocated for each possible I/O. + */ + + ioc->chains_per_prp_buffer = 0; + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { + nvme_blocks_needed = + (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; + nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); + nvme_blocks_needed++; + + sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; + ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->pcie_sg_lookup) { + ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n"); + goto out; + } + sz = nvme_blocks_needed * ioc->page_size; + rc = _base_allocate_pcie_sgl_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz * ioc->scsiio_depth; + } + + rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io - + ioc->chains_per_prp_buffer) * ioc->scsiio_depth); + dinitprintk(ioc, + ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->chain_depth, ioc->chain_segment_sz, + (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); + /* sense buffers, 4 byte align */ + sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + rc = _base_allocate_sense_dma_pool(ioc, sense_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sense_sz; + /* reply pool, 4 byte align */ + sz = ioc->reply_free_queue_depth * ioc->reply_sz; + rc = _base_allocate_reply_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + + /* reply free queue, 16 byte align */ + sz = ioc->reply_free_queue_depth * 4; + rc = _base_allocate_reply_free_dma_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + dinitprintk(ioc, + ioc_info(ioc, "reply_free_dma (0x%llx)\n", + (unsigned long long)ioc->reply_free_dma)); + total_sz += sz; + if (ioc->rdpq_array_enable) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + rc = _base_allocate_reply_post_free_array(ioc, + reply_post_free_array_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + } + ioc->config_page_sz = 512; + ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, + ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL); + if (!ioc->config_page) { + ioc_err(ioc, "config page: dma_pool_alloc failed\n"); + goto out; + } + + ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->config_page, (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); + total_sz += ioc->config_page_sz; + + ioc_info(ioc, "Allocated physical memory: size(%d kB)\n", + total_sz / 1024); + ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", + ioc->shost->can_queue, facts->RequestCredit); + ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n", + ioc->shost->sg_tablesize); + return 0; + +try_32bit_dma: + _base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + /* Change dma coherent mask to 32 bit and reallocate */ + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; + + out: + return -ENOMEM; +} + +/** + * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. + * @ioc: Pointer to MPT_ADAPTER structure + * @cooked: Request raw or cooked IOC state + * + * Return: all IOC Doorbell register bits if cooked==0, else just the + * Doorbell bits in MPI_IOC_STATE_MASK. + */ +u32 +mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) +{ + u32 s, sc; + + s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + sc = s & MPI2_IOC_STATE_MASK; + return cooked ? sc : s; +} + +/** + * _base_wait_on_iocstate - waiting on a particular ioc state + * @ioc: ? + * @ioc_state: controller state { READY, OPERATIONAL, or RESET } + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = 1000 * timeout; + do { + current_state = mpt3sas_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == MPI2_IOC_STATE_FAULT) + break; + if (count && current_state == MPI2_IOC_STATE_COREDUMP) + break; + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + return current_state; +} + +/** + * _base_dump_reg_set - This function will print hexdump of register set. + * @ioc: per adapter object + * + * Return: nothing. + */ +static inline void +_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int i, sz = 256; + u32 __iomem *reg = (u32 __iomem *)ioc->chip; + + ioc_info(ioc, "System Register set:\n"); + for (i = 0; i < (sz / sizeof(u32)); i++) + pr_info("%08x: %08x\n", (i * 4), readl(®[i])); +} + +/** + * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by + * a write to the doorbell) + * @ioc: per adapter object + * @timeout: timeout in seconds + * + * Return: 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. + */ + +static int +_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", + __func__, count, int_status); + return -EFAULT; +} + +static int +_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 2000 * timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } + + udelay(500); + count++; + } while (--cntdn); + + ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", + __func__, count, int_status); + return -EFAULT; + +} + +/** + * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. + * @ioc: per adapter object + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to + * doorbell. + */ +static int +_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell); + return -EFAULT; + } + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + out: + ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", + __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use + * @ioc: per adapter object + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = 1000 * timeout; + do { + doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + if (!(doorbell_reg & MPI2_DOORBELL_USED)) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", + __func__, count, doorbell_reg); + return -EFAULT; +} + +/** + * _base_send_ioc_reset - send doorbell reset + * @ioc: per adapter object + * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) +{ + u32 ioc_state; + int r = 0; + unsigned long flags; + + if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { + ioc_err(ioc, "%s: unknown reset_type\n", __func__); + return -EFAULT; + } + + if (!(ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) + return -EFAULT; + + ioc_info(ioc, "sending message unit reset !!\n"); + + writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, + &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 15))) { + r = -EFAULT; + goto out; + } + + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); + if (ioc_state) { + ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state); + r = -EFAULT; + goto out; + } + out: + if (r != 0) { + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + /* + * Wait for IOC state CoreDump to clear only during + * HBA initialization & release time. + */ + if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 || + ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_print_coredump_info(ioc, ioc_state); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + } + ioc_info(ioc, "message unit reset: %s\n", + r == 0 ? "SUCCESS" : "FAILED"); + return r; +} + +/** + * mpt3sas_wait_for_ioc - IOC's operational state is checked here. + * @ioc: per adapter object + * @timeout: timeout in seconds + * + * Return: Waits up to timeout seconds for the IOC to + * become operational. Returns 0 if IOC is present + * and operational; otherwise returns %-EFAULT. + */ + +int +mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + int wait_state_count = 0; + u32 ioc_state; + + do { + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state == MPI2_IOC_STATE_OPERATIONAL) + break; + + /* + * Watchdog thread will be started after IOC Initialization, so + * no need to wait here for IOC state to become operational + * when IOC Initialization is on. Instead the driver will + * return ETIME status, so that calling function can issue + * diag reset operation and retry the command. + */ + if (ioc->is_driver_loading) + return -ETIME; + + ssleep(1); + ioc_info(ioc, "%s: waiting for operational state(count=%d)\n", + __func__, ++wait_state_count); + } while (--timeout); + if (!timeout) { + ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__); + return -EFAULT; + } + if (wait_state_count) + ioc_info(ioc, "ioc is operational\n"); + return 0; +} + +/** + * _base_handshake_req_reply_wait - send request thru doorbell interface + * @ioc: per adapter object + * @request_bytes: request length + * @request: pointer having request payload + * @reply_bytes: reply length + * @reply: pointer to reply payload + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, + u32 *request, int reply_bytes, u16 *reply, int timeout) +{ + MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; + int i; + u8 failed; + __le32 *mfp; + + /* make sure doorbell is not in use */ + if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { + ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); + return -EFAULT; + } + + /* clear pending doorbell interrupts from previous state changes */ + if (ioc->base_readl(&ioc->chip->HostInterruptStatus) & + MPI2_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + + /* send message to ioc */ + writel(((MPI2_FUNCTION_HANDSHAKE<chip->Doorbell); + + if ((_base_spin_on_doorbell_int(ioc, 5))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + + if ((_base_wait_for_doorbell_ack(ioc, 5))) { + ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + + /* send message 32-bits at a time */ + for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { + writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 5))) + failed = 1; + } + + if (failed) { + ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + + /* now wait for the reply */ + if ((_base_wait_for_doorbell_int(ioc, timeout))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + + /* read the first two 16-bits, it gives the total length of the reply */ + reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((_base_wait_for_doorbell_int(ioc, 5))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((_base_wait_for_doorbell_int(ioc, 5))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + if (i >= reply_bytes/2) /* overflow case */ + ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + else + reply[i] = le16_to_cpu( + ioc->base_readl_ext_retry(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + + _base_wait_for_doorbell_int(ioc, 5); + if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { + dhsprintk(ioc, + ioc_info(ioc, "doorbell is in use (line=%d)\n", + __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + mfp = (__le32 *)reply; + pr_info("\toffset:data\n"); + for (i = 0; i < reply_bytes/4; i++) + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + return 0; +} + +/** + * mpt3sas_base_sas_iounit_control - send sas iounit control to FW + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SAS IO Unit Control Request message allows the host to perform low-level + * operations, such as resets on the PHYs of the IO Unit, also allows the host + * to obtain the IOC assigned device handles for a device if it has other + * identifying information about the device, in addition allows the host to + * remove IOC resources associated with the device. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request) +{ + u16 smid; + u8 issue_reset = 0; + int rc; + void *request; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: base_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); + if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) + ioc->ioc_link_reset_in_progress = 1; + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && + ioc->ioc_link_reset_in_progress) + ioc->ioc_link_reset_in_progress = 0; + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, + mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4, + issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SasIoUnitControlReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * mpt3sas_base_scsi_enclosure_processor - sending request to sep device + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SCSI Enclosure Processor request message causes the IOC to + * communicate with SES devices to control LED status signals. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) +{ + u16 smid; + u8 issue_reset = 0; + int rc; + void *request; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: base_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(request, 0, ioc->request_sz); + memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(Mpi2SepRequest_t)/4, issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SepReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * _base_get_port_facts - obtain port facts reply and save in ioc + * @ioc: per adapter object + * @port: ? + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) +{ + Mpi2PortFactsRequest_t mpi_request; + Mpi2PortFactsReply_t mpi_reply; + struct mpt3sas_port_facts *pfacts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); + mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; + mpi_request.PortNumber = port; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); + + if (r != 0) { + ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + return r; + } + + pfacts = &ioc->pfacts[port]; + memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); + pfacts->PortNumber = mpi_reply.PortNumber; + pfacts->VP_ID = mpi_reply.VP_ID; + pfacts->VF_ID = mpi_reply.VF_ID; + pfacts->MaxPostedCmdBuffers = + le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); + + return 0; +} + +/** + * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL + * @ioc: per adapter object + * @timeout: + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 ioc_state; + int rc; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->pci_error_recovery) { + dfailprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery\n", + __func__)); + return -EFAULT; + } + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, + ioc_info(ioc, "%s: ioc_state(0x%08x)\n", + __func__, ioc_state)); + + if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || + (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + ioc_info(ioc, + "%s: Skipping the diag reset here. (ioc_state=0x%x)\n", + __func__, ioc_state); + return -EFAULT; + } + + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); + if (ioc_state) { + dfailprintk(ioc, + ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state)); + return -EFAULT; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc); + return rc; +} + +/** + * _base_get_ioc_facts - obtain ioc facts reply and save in ioc + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2IOCFactsRequest_t mpi_request; + Mpi2IOCFactsReply_t mpi_reply; + struct mpt3sas_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + r = _base_wait_for_iocstate(ioc, 10); + if (r) { + dfailprintk(ioc, + ioc_info(ioc, "%s: failed getting to correct state\n", + __func__)); + return r; + } + mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); + mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); + + if (r != 0) { + ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + return r; + } + + facts = &ioc->facts; + memset(facts, 0, sizeof(struct mpt3sas_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + if (ioc->msix_enable && (facts->MaxMSIxVectors <= + MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) + ioc->combined_reply_queue = 0; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) + ioc->rdpq_array_capable = 1; + if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) + && ioc->is_aero_ioc) + ioc->atomic_desc_capable = 1; + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = + le16_to_cpu(mpi_reply.IOCRequestFrameSize); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + facts->IOCMaxChainSegmentSize = + le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); + } + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = + le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; + + /* + * Get the Page Size from IOC Facts. If it's 0, default to 4k. + */ + ioc->page_size = 1 << facts->CurrentHostPageSize; + if (ioc->page_size == 1) { + ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n"); + ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; + } + dinitprintk(ioc, + ioc_info(ioc, "CurrentHostPageSize(%d)\n", + facts->CurrentHostPageSize)); + + dinitprintk(ioc, + ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n", + facts->RequestCredit, facts->MaxChainDepth)); + dinitprintk(ioc, + ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n", + facts->IOCRequestFrameSize * 4, + facts->ReplyFrameSize * 4)); + return 0; +} + +/** + * _base_send_ioc_init - send ioc_init to firmware + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2IOCInitRequest_t mpi_request; + Mpi2IOCInitReply_t mpi_reply; + int i, r = 0; + ktime_t current_time; + u16 ioc_status; + u32 reply_post_free_array_sz = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); + mpi_request.Function = MPI2_FUNCTION_IOC_INIT; + mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; + mpi_request.VF_ID = 0; /* TODO */ + mpi_request.VP_ID = 0; + mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); + mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); + mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; + + if (_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; + mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); + mpi_request.ReplyDescriptorPostQueueDepth = + cpu_to_le16(ioc->reply_post_queue_depth); + mpi_request.ReplyFreeQueueDepth = + cpu_to_le16(ioc->reply_free_queue_depth); + + mpi_request.SenseBufferAddressHigh = + cpu_to_le32((u64)ioc->sense_dma >> 32); + mpi_request.SystemReplyAddressHigh = + cpu_to_le32((u64)ioc->reply_dma >> 32); + mpi_request.SystemRequestFrameBaseAddress = + cpu_to_le64((u64)ioc->request_dma); + mpi_request.ReplyFreeQueueAddress = + cpu_to_le64((u64)ioc->reply_free_dma); + + if (ioc->rdpq_array_enable) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + ioc->reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64( + (u64)ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); + } + + /* + * Set the flag to enable CoreDump state feature in IOC firmware. + */ + mpi_request.ConfigurationFlags |= + cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE); + + /* This time stamp specifies number of milliseconds + * since epoch ~ midnight January 1, 1970. + */ + current_time = ktime_get_real(); + mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + __le32 *mfp; + int i; + + mfp = (__le32 *)&mpi_request; + ioc_info(ioc, "\toffset:data\n"); + for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + + r = _base_handshake_req_reply_wait(ioc, + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, + sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); + + if (r != 0) { + ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + return r; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS || + mpi_reply.IOCLogInfo) { + ioc_err(ioc, "%s: failed\n", __func__); + r = -EIO; + } + + /* Reset TimeSync Counter*/ + ioc->timestamp_update_count = 0; + return r; +} + +/** + * mpt3sas_port_enable_done - command completion routine for port enable + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + + if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + + if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) + return 1; + + ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; + ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; + ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + + if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) { + ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + mpt3sas_port_enable_complete(ioc); + return 1; + } else { + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + +/** + * _base_send_port_enable - send port_enable(discovery stuff) to firmware + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2PortEnableRequest_t *mpi_request; + Mpi2PortEnableReply_t *mpi_reply; + int r = 0; + u16 smid; + u16 ioc_status; + + ioc_info(ioc, "sending port enable !!\n"); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return -EAGAIN; + } + + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + init_completion(&ioc->port_enable_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); + if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2PortEnableRequest_t)/4); + if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + goto out; + } + + mpi_reply = ioc->port_enable_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n", + __func__, ioc_status); + r = -EFAULT; + goto out; + } + + out: + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED"); + return r; +} + +/** + * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2PortEnableRequest_t *mpi_request; + u16 smid; + + ioc_info(ioc, "sending port enable !!\n"); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return -EAGAIN; + } + ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED; + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + ioc->put_smid_default(ioc, smid); + return 0; +} + +/** + * _base_determine_wait_on_discovery - desposition + * @ioc: per adapter object + * + * Decide whether to wait on discovery to complete. Used to either + * locate boot device, or report volumes ahead of physical devices. + * + * Return: 1 for wait, 0 for don't wait. + */ +static int +_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) +{ + /* We wait for discovery to complete if IR firmware is loaded. + * The sas topology events arrive before PD events, so we need time to + * turn on the bit in ioc->pd_handles to indicate PD + * Also, it maybe required to report Volumes ahead of physical + * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. + */ + if (ioc->ir_firmware) + return 1; + + /* if no Bios, then we don't need to wait */ + if (!ioc->bios_pg3.BiosVersion) + return 0; + + /* Bios is present, then we drop down here. + * + * If there any entries in the Bios Page 2, then we wait + * for discovery to complete. + */ + + /* Current Boot Device */ + if ((ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Request Boot Device */ + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Alternate Request Boot Device */ + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + + return 1; +} + +/** + * _base_unmask_events - turn on notification for this event + * @ioc: per adapter object + * @event: firmware event + * + * The mask is stored in ioc->event_masks. + */ +static void +_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u32 desired_event; + + if (event >= 128) + return; + + desired_event = (1 << (event % 32)); + + if (event < 32) + ioc->event_masks[0] &= ~desired_event; + else if (event < 64) + ioc->event_masks[1] &= ~desired_event; + else if (event < 96) + ioc->event_masks[2] &= ~desired_event; + else if (event < 128) + ioc->event_masks[3] &= ~desired_event; +} + +/** + * _base_event_notification - send event notification + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_event_notification(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2EventNotificationRequest_t *mpi_request; + u16 smid; + int r = 0; + int i, issue_diag_reset = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return -EAGAIN; + } + ioc->base_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); + mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mpi_request->EventMasks[i] = + cpu_to_le32(ioc->event_masks[i]); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2EventNotificationRequest_t)/4); + if (ioc->base_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + issue_diag_reset = 1; + + } else + dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + if (issue_diag_reset) { + if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +/** + * mpt3sas_base_validate_event_type - validating event types + * @ioc: per adapter object + * @event_type: firmware event + * + * This will turn on firmware event notification when application + * ask for that event. We don't mask events that are already enabled. + */ +void +mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) +{ + int i, j; + u32 event_mask, desired_event; + u8 send_update_to_fw; + + for (i = 0, send_update_to_fw = 0; i < + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { + event_mask = ~event_type[i]; + desired_event = 1; + for (j = 0; j < 32; j++) { + if (!(event_mask & desired_event) && + (ioc->event_masks[i] & desired_event)) { + ioc->event_masks[i] &= ~desired_event; + send_update_to_fw = 1; + } + desired_event = (desired_event << 1); + } + } + + if (!send_update_to_fw) + return; + + mutex_lock(&ioc->base_cmds.mutex); + _base_event_notification(ioc); + mutex_unlock(&ioc->base_cmds.mutex); +} + +/** + * _base_diag_reset - the "big hammer" start of day reset + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + ioc_info(ioc, "sending diag reset !!\n"); + + pci_cfg_access_lock(ioc->pdev); + + drsprintk(ioc, ioc_info(ioc, "clear interrupts\n")); + + count = 0; + do { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ + drsprintk(ioc, ioc_info(ioc, "write magic sequence\n")); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); + + /* wait 100 msec */ + msleep(100); + + if (count++ > 20) { + ioc_info(ioc, + "Stop writing magic sequence after 20 retries\n"); + _base_dump_reg_set(ioc); + goto out; + } + + host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); + drsprintk(ioc, + ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", + count, host_diagnostic)); + + } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); + + hcb_size = ioc->base_readl(&ioc->chip->HCBSize); + + drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); + writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); + + /*This delay allows the chip PCIe hardware time to finish reset tasks*/ + msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); + + /* Approximately 300 second max wait */ + for (count = 0; count < (300000000 / + MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { + + host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); + + if (host_diagnostic == 0xFFFFFFFF) { + ioc_info(ioc, + "Invalid host diagnostic register value\n"); + _base_dump_reg_set(ioc); + goto out; + } + if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) + break; + + msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000); + } + + if (host_diagnostic & MPI2_DIAG_HCB_MODE) { + + drsprintk(ioc, + ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n")); + host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; + host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + + drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n")); + writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, + &ioc->chip->HCBSize); + } + + drsprintk(ioc, ioc_info(ioc, "restart the adapter\n")); + writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, + &ioc->chip->HostDiagnostic); + + drsprintk(ioc, + ioc_info(ioc, "disable writes to the diagnostic register\n")); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + + drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n")); + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); + if (ioc_state) { + ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state); + _base_dump_reg_set(ioc); + goto out; + } + + pci_cfg_access_unlock(ioc->pdev); + ioc_info(ioc, "diag reset: SUCCESS\n"); + return 0; + + out: + pci_cfg_access_unlock(ioc->pdev); + ioc_err(ioc, "diag reset: FAILED\n"); + return -EFAULT; +} + +/** + * mpt3sas_base_make_ioc_ready - put controller in READY state + * @ioc: per adapter object + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) +{ + u32 ioc_state; + int rc; + int count; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->pci_error_recovery) + return 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, + ioc_info(ioc, "%s: ioc_state(0x%08x)\n", + __func__, ioc_state)); + + /* if in RESET state, it should move to READY state shortly */ + count = 0; + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { + while ((ioc_state & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_READY) { + if (count++ == 10) { + ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state); + return -EFAULT; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + } + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + ioc_info(ioc, "unexpected doorbell active!\n"); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + /* + * if host reset is invoked while watch dog thread is waiting + * for IOC state to be changed to Fault state then driver has + * to wait here for CoreDump state to clear otherwise reset + * will be issued to the FW and FW move the IOC state to + * reset state without copying the FW logs to coredump region. + */ + if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + } + goto issue_diag_reset; + } + + if (type == FORCE_BIG_HAMMER) + goto issue_diag_reset; + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + if (!(_base_send_ioc_reset(ioc, + MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) { + return 0; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc); + return rc; +} + +/** + * _base_make_ioc_operational - put controller in OPERATIONAL state + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) +{ + int r, i, index, rc; + unsigned long flags; + u32 reply_address; + u16 smid; + struct _tr_list *delayed_tr, *delayed_tr_next; + struct _sc_list *delayed_sc, *delayed_sc_next; + struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; + u8 hide_flag; + struct adapter_reply_queue *reply_q; + Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + /* clean the delayed target reset list */ + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_volume_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + list_for_each_entry_safe(delayed_sc, delayed_sc_next, + &ioc->delayed_sc_list, list) { + list_del(&delayed_sc->list); + kfree(delayed_sc); + } + + list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, + &ioc->delayed_event_ack_list, list) { + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + } + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + + /* hi-priority queue */ + INIT_LIST_HEAD(&ioc->hpr_free_list); + smid = ioc->hi_priority_smid; + for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { + ioc->hpr_lookup[i].cb_idx = 0xFF; + ioc->hpr_lookup[i].smid = smid; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } + + /* internal queue */ + INIT_LIST_HEAD(&ioc->internal_free_list); + smid = ioc->internal_smid; + for (i = 0; i < ioc->internal_depth; i++, smid++) { + ioc->internal_lookup[i].cb_idx = 0xFF; + ioc->internal_lookup[i].smid = smid; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + /* initialize Reply Free Queue */ + for (i = 0, reply_address = (u32)ioc->reply_dma ; + i < ioc->reply_free_queue_depth ; i++, reply_address += + ioc->reply_sz) { + ioc->reply_free[i] = cpu_to_le32(reply_address); + if (ioc->is_mcpu_endpoint) + _base_clone_reply_to_sys_mem(ioc, + reply_address, i); + } + + /* initialize reply queues */ + if (ioc->is_driver_loading) + _base_assign_reply_queues(ioc); + + /* initialize Reply Post Free Queue */ + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + /* + * If RDPQ is enabled, switch to the next allocation. + * Otherwise advance within the contiguous region. + */ + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + + reply_q->reply_post_host_index = 0; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + } + skip_init_reply_post_free_queue: + + r = _base_send_ioc_init(ioc); + if (r) { + /* + * No need to check IOC state for fault state & issue + * diag reset during host reset. This check is need + * only during driver load time. + */ + if (!ioc->is_driver_loading) + return r; + + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_send_ioc_init(ioc))) + return r; + } + + /* initialize reply free host index */ + ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; + writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + + /* initialize reply post host index */ + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->combined_reply_queue) + writel((reply_q->msix_index & 7)<< + MPI2_RPHI_MSIX_INDEX_SHIFT, + ioc->replyPostRegisterIndex[reply_q->msix_index/8]); + else + writel(reply_q->msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } + + skip_init_reply_post_host_index: + + mpt3sas_base_unmask_interrupts(ioc); + + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + r = _base_display_fwpkg_version(ioc); + if (r) + return r; + } + + r = _base_static_config_pages(ioc); + if (r) + return r; + + r = _base_event_notification(ioc); + if (r) + return r; + + if (!ioc->shost_recovery) { + + if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier + == 0x80) { + hide_flag = (u8) ( + le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & + MFG_PAGE10_HIDE_SSDS_MASK); + if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) + ioc->mfg_pg10_hide_flag = hide_flag; + } + + ioc->wait_for_discovery_to_complete = + _base_determine_wait_on_discovery(ioc); + + return r; /* scan_start and scan_finished support */ + } + + r = _base_send_port_enable(ioc); + if (r) + return r; + + return r; +} + +/** + * mpt3sas_base_free_resources - free resources controller resources + * @ioc: per adapter object + */ +void +mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) +{ + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + /* synchronizing freeing resource with pci_access_mutex lock */ + mutex_lock(&ioc->pci_access_mutex); + if (ioc->chip_phys && ioc->chip) { + mpt3sas_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + } + + mpt3sas_base_unmap_resources(ioc); + mutex_unlock(&ioc->pci_access_mutex); + return; +} + +/** + * mpt3sas_base_attach - attach controller instance + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) +{ + int r, i, rc; + int cpu_id, last_cpu_id = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + /* setup cpu_msix_table */ + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n"); + r = -ENOMEM; + goto out_free_resources; + } + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->reply_post_host_index) { + ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n"); + r = -ENOMEM; + goto out_free_resources; + } + } + + ioc->smp_affinity_enable = smp_affinity_enable; + + ioc->rdpq_array_enable_assigned = 0; + ioc->use_32bit_dma = false; + ioc->dma_mask = 64; + if (ioc->is_aero_ioc) { + ioc->base_readl = &_base_readl_aero; + ioc->base_readl_ext_retry = &_base_readl_ext_retry; + } else { + ioc->base_readl = &_base_readl; + ioc->base_readl_ext_retry = &_base_readl; + } + r = mpt3sas_base_map_resources(ioc); + if (r) + goto out_free_resources; + + pci_set_drvdata(ioc->pdev, ioc->shost); + r = _base_get_ioc_facts(ioc); + if (r) { + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_ioc_facts(ioc))) + goto out_free_resources; + } + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + ioc->build_sg_scmd = &_base_build_sg_scmd; + ioc->build_sg = &_base_build_sg; + ioc->build_zero_len_sge = &_base_build_zero_len_sge; + ioc->get_msix_index_for_smlio = &_base_get_msix_index; + break; + case MPI25_VERSION: + case MPI26_VERSION: + /* + * In SAS3.0, + * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and + * Target Status - all require the IEEE formatted scatter gather + * elements. + */ + ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; + ioc->build_sg = &_base_build_sg_ieee; + ioc->build_nvme_prp = &_base_build_nvme_prp; + ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; + ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); + if (ioc->high_iops_queues) + ioc->get_msix_index_for_smlio = + &_base_get_high_iops_msix_index; + else + ioc->get_msix_index_for_smlio = &_base_get_msix_index; + break; + } + if (ioc->atomic_desc_capable) { + ioc->put_smid_default = &_base_put_smid_default_atomic; + ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; + ioc->put_smid_fast_path = + &_base_put_smid_fast_path_atomic; + ioc->put_smid_hi_priority = + &_base_put_smid_hi_priority_atomic; + } else { + ioc->put_smid_default = &_base_put_smid_default; + ioc->put_smid_fast_path = &_base_put_smid_fast_path; + ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; + if (ioc->is_mcpu_endpoint) + ioc->put_smid_scsi_io = + &_base_put_smid_mpi_ep_scsi_io; + else + ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; + } + /* + * These function pointers for other requests that don't + * the require IEEE scatter gather elements. + * + * For example Configuration Pages and SAS IOUNIT Control don't. + */ + ioc->build_sg_mpi = &_base_build_sg; + ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; + + r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + if (r) + goto out_free_resources; + + ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, + sizeof(struct mpt3sas_port_facts), GFP_KERNEL); + if (!ioc->pfacts) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { + r = _base_get_port_facts(ioc, i); + if (r) { + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_port_facts(ioc, i))) + goto out_free_resources; + } + } + + r = _base_allocate_memory_pools(ioc); + if (r) + goto out_free_resources; + + if (irqpoll_weight > 0) + ioc->thresh_hold = irqpoll_weight; + else + ioc->thresh_hold = ioc->hba_queue_depth/4; + + _base_init_irqpolls(ioc); + init_waitqueue_head(&ioc->reset_wq); + + /* allocate memory pd handle bitmask list */ + ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pd_handles_sz++; + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->pd_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->blocking_handles) { + r = -ENOMEM; + goto out_free_resources; + } + + /* allocate memory for pending OS device add list */ + ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pend_os_device_add_sz++; + ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, + GFP_KERNEL); + if (!ioc->pend_os_device_add) { + r = -ENOMEM; + goto out_free_resources; + } + + ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; + ioc->device_remove_in_progress = + kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); + if (!ioc->device_remove_in_progress) { + r = -ENOMEM; + goto out_free_resources; + } + + ioc->fwfault_debug = mpt3sas_fwfault_debug; + + /* base internal command bits */ + mutex_init(&ioc->base_cmds.mutex); + ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + /* port_enable command bits */ + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + + /* transport internal command bits */ + ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->transport_cmds.mutex); + + /* scsih internal command bits */ + ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->scsih_cmds.mutex); + + /* task management internal command bits */ + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->tm_cmds.mutex); + + /* config page internal command bits */ + ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->config_cmds.mutex); + + /* ctl module internal command bits */ + ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->ctl_cmds.mutex); + + if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || + !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || + !ioc->tm_cmds.reply || !ioc->config_cmds.reply || + !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + ioc->event_masks[i] = -1; + + /* here we enable the events we care about */ + _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); + _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); + _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); + _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); + _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); + _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); + _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); + _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); + if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { + if (ioc->is_gen35_ioc) { + _base_unmask_events(ioc, + MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); + _base_unmask_events(ioc, + MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); + } + } + r = _base_make_ioc_operational(ioc); + if (r == -EAGAIN) { + r = _base_make_ioc_operational(ioc); + if (r) + goto out_free_resources; + } + + /* + * Copy current copy of IOCFacts in prev_fw_facts + * and it will be used during online firmware upgrade. + */ + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct mpt3sas_facts)); + + ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; + ioc->got_task_abort_from_ioctl = 0; + return 0; + + out_free_resources: + + ioc->remove_host = 1; + + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->pfacts); + ioc->ctl_cmds.reply = NULL; + ioc->base_cmds.reply = NULL; + ioc->tm_cmds.reply = NULL; + ioc->scsih_cmds.reply = NULL; + ioc->transport_cmds.reply = NULL; + ioc->config_cmds.reply = NULL; + ioc->pfacts = NULL; + return r; +} + + +/** + * mpt3sas_base_detach - remove controller instance + * @ioc: per adapter object + */ +void +mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) +{ + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + mpt3sas_free_enclosure_list(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->pfacts); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); +} + +/** + * _base_pre_reset_handler - pre reset handler + * @ioc: per adapter object + */ +static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_pre_reset_handler(ioc); + mpt3sas_ctl_pre_reset_handler(ioc); + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); +} + +/** + * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands + * @ioc: per adapter object + */ +static void +_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__)); + if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { + ioc->transport_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); + complete(&ioc->transport_cmds.done); + } + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc->base_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); + complete(&ioc->base_cmds.done); + } + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + MPI2_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + } else { + complete(&ioc->port_enable_cmds.done); + } + } + if (ioc->config_cmds.status & MPT3_CMD_PENDING) { + ioc->config_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + } +} + +/** + * _base_clear_outstanding_commands - clear all outstanding commands + * @ioc: per adapter object + */ +static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + _base_clear_outstanding_mpt_commands(ioc); +} + +/** + * _base_reset_done_handler - reset done handler + * @ioc: per adapter object + */ +static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_reset_done_handler(ioc); + mpt3sas_ctl_reset_done_handler(ioc); + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); +} + +/** + * mpt3sas_wait_for_commands_to_complete - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * + * This function is waiting 10s for all pending commands to complete + * prior to putting controller in reset. + */ +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) +{ + u32 ioc_state; + + ioc->pending_io_count = 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) + return; + + /* pending command count */ + ioc->pending_io_count = scsi_host_busy(ioc->shost); + + if (!ioc->pending_io_count) + return; + + /* wait for pending commands to complete */ + wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); +} + +/** + * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts + * attributes during online firmware upgrade and update the corresponding + * IOC variables accordingly. + * + * @ioc: Pointer to MPT_ADAPTER structure + */ +static int +_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) +{ + u16 pd_handles_sz; + void *pd_handles = NULL, *blocking_handles = NULL; + void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; + + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + pd_handles_sz++; + + pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, + GFP_KERNEL); + if (!pd_handles) { + ioc_info(ioc, + "Unable to allocate the memory for pd_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pd_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->pd_handles = pd_handles; + + blocking_handles = krealloc(ioc->blocking_handles, + pd_handles_sz, GFP_KERNEL); + if (!blocking_handles) { + ioc_info(ioc, + "Unable to allocate the memory for " + "blocking_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(blocking_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->blocking_handles = blocking_handles; + ioc->pd_handles_sz = pd_handles_sz; + + pend_os_device_add = krealloc(ioc->pend_os_device_add, + pd_handles_sz, GFP_KERNEL); + if (!pend_os_device_add) { + ioc_info(ioc, + "Unable to allocate the memory for pend_os_device_add of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, + (pd_handles_sz - ioc->pend_os_device_add_sz)); + ioc->pend_os_device_add = pend_os_device_add; + ioc->pend_os_device_add_sz = pd_handles_sz; + + device_remove_in_progress = krealloc( + ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); + if (!device_remove_in_progress) { + ioc_info(ioc, + "Unable to allocate the memory for " + "device_remove_in_progress of sz: %d\n " + , pd_handles_sz); + return -ENOMEM; + } + memset(device_remove_in_progress + + ioc->device_remove_in_progress_sz, 0, + (pd_handles_sz - ioc->device_remove_in_progress_sz)); + ioc->device_remove_in_progress = device_remove_in_progress; + ioc->device_remove_in_progress_sz = pd_handles_sz; + } + + memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); + return 0; +} + +/** + * mpt3sas_base_hard_reset_handler - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, + enum reset_type type) +{ + int r; + unsigned long flags; + u32 ioc_state; + u8 is_fault = 0, is_trigger = 0; + + dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + + if (ioc->pci_error_recovery) { + ioc_err(ioc, "%s: pci error recovery reset\n", __func__); + r = 0; + goto out_unlocked; + } + + if (mpt3sas_fwfault_debug) + mpt3sas_halt_firmware(ioc); + + /* wait for an active reset in progress to complete */ + mutex_lock(&ioc->reset_in_progress_mutex); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED))) { + is_trigger = 1; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + is_fault = 1; + ioc->htb_rel.trigger_info_dwords[1] = + (ioc_state & MPI2_DOORBELL_DATA_MASK); + } + } + _base_pre_reset_handler(ioc); + mpt3sas_wait_for_commands_to_complete(ioc); + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); + r = mpt3sas_base_make_ioc_ready(ioc, type); + if (r) + goto out; + _base_clear_outstanding_commands(ioc); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; + r = -EFAULT; + goto out; + } + r = _base_get_ioc_facts(ioc); + if (r) + goto out; + + r = _base_check_ioc_facts_changes(ioc); + if (r) { + ioc_info(ioc, + "Some of the parameters got changed in this new firmware" + " image and it requires system reboot\n"); + goto out; + } + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic("%s: Issue occurred with flashing controller firmware." + "Please reboot the system and ensure that the correct" + " firmware version is running\n", ioc->name); + + r = _base_make_ioc_operational(ioc); + if (!r) + _base_reset_done_handler(ioc); + + out: + ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED"); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_count++; + mutex_unlock(&ioc->reset_in_progress_mutex); + mpt3sas_base_resume_mq_polling(ioc); + + out_unlocked: + if ((r == 0) && is_trigger) { + if (is_fault) + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); + else + mpt3sas_trigger_master(ioc, + MASTER_TRIGGER_ADAPTER_RESET); + } + dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__)); + return r; +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h new file mode 100644 index 000000000..1be0850ca --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -0,0 +1,2072 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_BASE_H_INCLUDED +#define MPT3SAS_BASE_H_INCLUDED + +#include "mpi/mpi2_type.h" +#include "mpi/mpi2.h" +#include "mpi/mpi2_ioc.h" +#include "mpi/mpi2_cnfg.h" +#include "mpi/mpi2_init.h" +#include "mpi/mpi2_raid.h" +#include "mpi/mpi2_tool.h" +#include "mpi/mpi2_sas.h" +#include "mpi/mpi2_pci.h" +#include "mpi/mpi2_image.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_debug.h" +#include "mpt3sas_trigger_diag.h" +#include "mpt3sas_trigger_pages.h" + +/* driver versioning info */ +#define MPT3SAS_DRIVER_NAME "mpt3sas" +#define MPT3SAS_AUTHOR "Avago Technologies " +#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" +#define MPT3SAS_DRIVER_VERSION "43.100.00.00" +#define MPT3SAS_MAJOR_VERSION 43 +#define MPT3SAS_MINOR_VERSION 100 +#define MPT3SAS_BUILD_VERSION 0 +#define MPT3SAS_RELEASE_VERSION 00 + +#define MPT2SAS_DRIVER_NAME "mpt2sas" +#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" +#define MPT2SAS_DRIVER_VERSION "20.102.00.00" +#define MPT2SAS_MAJOR_VERSION 20 +#define MPT2SAS_MINOR_VERSION 102 +#define MPT2SAS_BUILD_VERSION 0 +#define MPT2SAS_RELEASE_VERSION 00 + +/* CoreDump: Default timeout */ +#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/ +#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF) +#define MPT3SAS_TIMESYNC_TIMEOUT_SECONDS (10) /* 10 seconds */ +#define MPT3SAS_TIMESYNC_UPDATE_INTERVAL (900) /* 15 minutes */ +#define MPT3SAS_TIMESYNC_UNIT_MASK (0x80) /* bit 7 */ +#define MPT3SAS_TIMESYNC_MASK (0x7F) /* 0 - 6 bits */ +#define SECONDS_PER_MIN (60) +#define SECONDS_PER_HOUR (3600) +#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF) +#define MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP (0x81) + +/* + * Set MPT3SAS_SG_DEPTH value based on user input. + */ +#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE +#define MPT_MIN_PHYS_SEGMENTS 16 +#define MPT_KDUMP_MIN_PHYS_SEGMENTS 32 + +#define MCPU_MAX_CHAINS_PER_IO 3 + +#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE +#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE +#else +#define MPT3SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS +#endif + +#ifdef CONFIG_SCSI_MPT2SAS_MAX_SGE +#define MPT2SAS_SG_DEPTH CONFIG_SCSI_MPT2SAS_MAX_SGE +#else +#define MPT2SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS +#endif + +/* + * Generic Defines + */ +#define MPT3SAS_SATA_QUEUE_DEPTH 32 +#define MPT3SAS_SAS_QUEUE_DEPTH 254 +#define MPT3SAS_RAID_QUEUE_DEPTH 128 +#define MPT3SAS_KDUMP_SCSI_IO_DEPTH 200 + +#define MPT3SAS_RAID_MAX_SECTORS 8192 +#define MPT3SAS_HOST_PAGE_SIZE_4K 12 +#define MPT3SAS_NVME_QUEUE_DEPTH 128 +#define MPT_NAME_LENGTH 32 /* generic length of strings */ +#define MPT_STRING_LENGTH 64 +#define MPI_FRAME_START_OFFSET 256 +#define REPLY_FREE_POOL_SIZE 512 /*(32 maxcredix *4)*(4 times)*/ + +#define MPT_MAX_CALLBACKS 32 + +#define MPT_MAX_HBA_NUM_PHYS 32 + +#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ +/* reserved for issuing internally framed scsi io cmds */ +#define INTERNAL_SCSIIO_CMDS_COUNT 3 + +#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/ + +#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF + +#define MAX_CHAIN_ELEMT_SZ 16 +#define DEFAULT_NUM_FWCHAIN_ELEMTS 8 + +#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6 +#define FW_IMG_HDR_READ_TIMEOUT 15 + +#define IOC_OPERATIONAL_WAIT_COUNT 10 + +/* + * NVMe defines + */ +#define NVME_PRP_SIZE 8 /* PRP size */ +#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */ +#define NVME_TASK_ABORT_MIN_TIMEOUT 6 +#define NVME_TASK_ABORT_MAX_TIMEOUT 60 +#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010) +#define NVME_PRP_PAGE_SIZE 4096 /* Page size */ + +struct mpt3sas_nvme_cmd { + u8 rsvd[24]; + __le64 prp1; + __le64 prp2; +}; + +/* + * logging format + */ +#define ioc_err(ioc, fmt, ...) \ + pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_notice(ioc, fmt, ...) \ + pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_warn(ioc, fmt, ...) \ + pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_info(ioc, fmt, ...) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__) + +/* + * WarpDrive Specific Log codes + */ + +#define MPT2_WARPDRIVE_LOGENTRY (0x8002) +#define MPT2_WARPDRIVE_LC_SSDT (0x41) +#define MPT2_WARPDRIVE_LC_SSDLW (0x43) +#define MPT2_WARPDRIVE_LC_SSDLF (0x44) +#define MPT2_WARPDRIVE_LC_BRMF (0x4D) + +/* + * per target private data + */ +#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01 +#define MPT_TARGET_FLAGS_VOLUME 0x02 +#define MPT_TARGET_FLAGS_DELETED 0x04 +#define MPT_TARGET_FASTPATH_IO 0x08 +#define MPT_TARGET_FLAGS_PCIE_DEVICE 0x10 + +#define SAS2_PCI_DEVICE_B0_REVISION (0x01) +#define SAS3_PCI_DEVICE_C0_REVISION (0x02) + +/* Atlas PCIe Switch Management Port */ +#define MPI26_ATLAS_PCIe_SWITCH_DEVID (0x00B2) + +/* + * Intel HBA branding + */ +#define MPT2SAS_INTEL_RMS25JB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25JB080" +#define MPT2SAS_INTEL_RMS25JB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25JB040" +#define MPT2SAS_INTEL_RMS25KB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25KB080" +#define MPT2SAS_INTEL_RMS25KB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25KB040" +#define MPT2SAS_INTEL_RMS25LB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25LB040" +#define MPT2SAS_INTEL_RMS25LB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25LB080" +#define MPT2SAS_INTEL_RMS2LL080_BRANDING \ + "Intel Integrated RAID Module RMS2LL080" +#define MPT2SAS_INTEL_RMS2LL040_BRANDING \ + "Intel Integrated RAID Module RMS2LL040" +#define MPT2SAS_INTEL_RS25GB008_BRANDING \ + "Intel(R) RAID Controller RS25GB008" +#define MPT2SAS_INTEL_SSD910_BRANDING \ + "Intel(R) SSD 910 Series" + +#define MPT3SAS_INTEL_RMS3JC080_BRANDING \ + "Intel(R) Integrated RAID Module RMS3JC080" +#define MPT3SAS_INTEL_RS3GC008_BRANDING \ + "Intel(R) RAID Controller RS3GC008" +#define MPT3SAS_INTEL_RS3FC044_BRANDING \ + "Intel(R) RAID Controller RS3FC044" +#define MPT3SAS_INTEL_RS3UC080_BRANDING \ + "Intel(R) RAID Controller RS3UC080" + +/* + * Intel HBA SSDIDs + */ +#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516 +#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517 +#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518 +#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519 +#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A +#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B +#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E +#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F +#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 +#define MPT2SAS_INTEL_SSD910_SSDID 0x3700 + +#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521 +#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522 +#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523 +#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 + +/* + * Dell HBA branding + */ +#define MPT2SAS_DELL_BRANDING_SIZE 32 + +#define MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING "Dell 6Gbps SAS HBA" +#define MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING "Dell PERC H200 Adapter" +#define MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING "Dell PERC H200 Integrated" +#define MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING "Dell PERC H200 Modular" +#define MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING "Dell PERC H200 Embedded" +#define MPT2SAS_DELL_PERC_H200_BRANDING "Dell PERC H200" +#define MPT2SAS_DELL_6GBPS_SAS_BRANDING "Dell 6Gbps SAS" + +#define MPT3SAS_DELL_12G_HBA_BRANDING \ + "Dell 12Gbps HBA" + +/* + * Dell HBA SSDIDs + */ +#define MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID 0x1F1C +#define MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID 0x1F1D +#define MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID 0x1F1E +#define MPT2SAS_DELL_PERC_H200_MODULAR_SSDID 0x1F1F +#define MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID 0x1F20 +#define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21 +#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22 + +#define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46 + +/* + * Cisco HBA branding + */ +#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \ + "Cisco 9300-8E 12G SAS HBA" +#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \ + "Cisco 9300-8i 12G SAS HBA" +#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \ + "Cisco 12G Modular SAS Pass through Controller" +#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \ + "UCS C3X60 12G SAS Pass through Controller" +/* + * Cisco HBA SSSDIDs + */ +#define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C +#define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154 +#define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID 0x155 +#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156 + +/* + * status bits for ioc->diag_buffer_status + */ +#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) +#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) +#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) +#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08) +#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10) + +/* + * HP HBA branding + */ +#define MPT2SAS_HP_3PAR_SSVID 0x1590 + +#define MPT2SAS_HP_2_4_INTERNAL_BRANDING \ + "HP H220 Host Bus Adapter" +#define MPT2SAS_HP_2_4_EXTERNAL_BRANDING \ + "HP H221 Host Bus Adapter" +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING \ + "HP H222 Host Bus Adapter" +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING \ + "HP H220i Host Bus Adapter" +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING \ + "HP H210i Host Bus Adapter" + +/* + * HO HBA SSDIDs + */ +#define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 +#define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 + +/* + * Combined Reply Queue constants, + * There are twelve Supplemental Reply Post Host Index Registers + * and each register is at offset 0x10 bytes from the previous one. + */ +#define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8) +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) +#define MPT3_MIN_IRQS 1 + +/* OEM Identifiers */ +#define MFG10_OEM_ID_INVALID (0x00000000) +#define MFG10_OEM_ID_DELL (0x00000001) +#define MFG10_OEM_ID_FSC (0x00000002) +#define MFG10_OEM_ID_SUN (0x00000003) +#define MFG10_OEM_ID_IBM (0x00000004) + +/* GENERIC Flags 0*/ +#define MFG10_GF0_OCE_DISABLED (0x00000001) +#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002) +#define MFG10_GF0_R10_DISPLAY (0x00000004) +#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) +#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) + +#define VIRTUAL_IO_FAILED_RETRY (0x32010081) + +/* High IOPs definitions */ +#define MPT3SAS_DEVICE_HIGH_IOPS_DEPTH 8 +#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8 +#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16 +#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128 +#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16 + +/* OEM Specific Flags will come from OEM specific header files */ +struct Mpi2ManufacturingPage10_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + U8 OEMIdentifier; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 08h */ + U32 Reserved3; /* 0Ch */ + U32 GenericFlags0; /* 10h */ + U32 GenericFlags1; /* 14h */ + U32 Reserved4; /* 18h */ + U32 OEMSpecificFlags0; /* 1Ch */ + U32 OEMSpecificFlags1; /* 20h */ + U32 Reserved5[18]; /* 24h - 60h*/ +}; + + +/* Miscellaneous options */ +struct Mpi2ManufacturingPage11_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + __le32 Reserved1; /* 04h */ + u8 Reserved2; /* 08h */ + u8 EEDPTagMode; /* 09h */ + u8 Reserved3; /* 0Ah */ + u8 Reserved4; /* 0Bh */ + __le32 Reserved5[8]; /* 0Ch-2Ch */ + u16 AddlFlags2; /* 2Ch */ + u8 AddlFlags3; /* 2Eh */ + u8 Reserved6; /* 2Fh */ + __le32 Reserved7[7]; /* 30h - 4Bh */ + u8 NVMeAbortTO; /* 4Ch */ + u8 NumPerDevEvents; /* 4Dh */ + u8 HostTraceBufferDecrementSizeKB; /* 4Eh */ + u8 HostTraceBufferFlags; /* 4Fh */ + u16 HostTraceBufferMaxSizeKB; /* 50h */ + u16 HostTraceBufferMinSizeKB; /* 52h */ + u8 CoreDumpTOSec; /* 54h */ + u8 TimeSyncInterval; /* 55h */ + u16 Reserved9; /* 56h */ + __le32 Reserved10; /* 58h */ +}; + +/** + * struct MPT3SAS_TARGET - starget private hostdata + * @starget: starget object + * @sas_address: target sas address + * @raid_device: raid_device pointer to access volume data + * @handle: device handle + * @num_luns: number luns + * @flags: MPT_TARGET_FLAGS_XXX flags + * @deleted: target flaged for deletion + * @tm_busy: target is busy with TM request. + * @port: hba port entry containing target's port number info + * @sas_dev: The sas_device associated with this target + * @pcie_dev: The pcie device associated with this target + */ +struct MPT3SAS_TARGET { + struct scsi_target *starget; + u64 sas_address; + struct _raid_device *raid_device; + u16 handle; + int num_luns; + u32 flags; + u8 deleted; + u8 tm_busy; + struct hba_port *port; + struct _sas_device *sas_dev; + struct _pcie_device *pcie_dev; +}; + + +/* + * per device private data + */ +#define MPT_DEVICE_FLAGS_INIT 0x01 + +#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) +#define MFG_PAGE10_HIDE_ALL_DISKS (0x00) +#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01) +#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02) + +/** + * struct MPT3SAS_DEVICE - sdev private hostdata + * @sas_target: starget private hostdata + * @lun: lun number + * @flags: MPT_DEVICE_XXX flags + * @configured_lun: lun is configured + * @block: device is in SDEV_BLOCK state + * @tlr_snoop_check: flag used in determining whether to disable TLR + * @eedp_enable: eedp support enable bit + * @eedp_type: 0(type_1), 1(type_2), 2(type_3) + * @eedp_block_length: block size + * @ata_command_pending: SATL passthrough outstanding for device + */ +struct MPT3SAS_DEVICE { + struct MPT3SAS_TARGET *sas_target; + unsigned int lun; + u32 flags; + u8 configured_lun; + u8 block; + u8 tlr_snoop_check; + u8 ignore_delay_remove; + /* Iopriority Command Handling */ + u8 ncq_prio_enable; + /* + * Bug workaround for SATL handling: the mpt2/3sas firmware + * doesn't return BUSY or TASK_SET_FULL for subsequent + * commands while a SATL pass through is in operation as the + * spec requires, it simply does nothing with them until the + * pass through completes, causing them possibly to timeout if + * the passthrough is a long executing command (like format or + * secure erase). This variable allows us to do the right + * thing while a SATL command is pending. + */ + unsigned long ata_command_pending; + +}; + +#define MPT3_CMD_NOT_USED 0x8000 /* free */ +#define MPT3_CMD_COMPLETE 0x0001 /* completed */ +#define MPT3_CMD_PENDING 0x0002 /* pending */ +#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */ +#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */ +#define MPT3_CMD_COMPLETE_ASYNC 0x0010 /* tells whether cmd completes in same thread or not */ + +/** + * struct _internal_cmd - internal commands struct + * @mutex: mutex + * @done: completion + * @reply: reply message pointer + * @sense: sense data + * @status: MPT3_CMD_XXX status + * @smid: system message id + */ +struct _internal_cmd { + struct mutex mutex; + struct completion done; + void *reply; + void *sense; + u16 status; + u16 smid; +}; + + + +/** + * struct _sas_device - attached device information + * @list: sas device list + * @starget: starget object + * @sas_address: device sas address + * @device_name: retrieved from the SAS IDENTIFY frame. + * @handle: device handle + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: enclosure handle + * @enclosure_logical_id: enclosure logical identifier + * @volume_handle: volume handle (valid when hidden raid member) + * @volume_wwid: volume unique identifier + * @device_info: bitfield provides detailed info about the device + * @id: target id + * @channel: target channel + * @slot: number number + * @phy: phy identifier provided in sas device page 0 + * @responding: used in _scsih_sas_device_mark_responding + * @fast_path: fast path feature enable bit + * @pfa_led_on: flag for PFA LED status + * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add() + * addition routine. + * @chassis_slot: chassis slot + * @is_chassis_slot_valid: chassis slot valid or not + * @port: hba port entry containing device's port number info + * @rphy: device's sas_rphy address used to identify this device structure in + * target_alloc callback function + */ +struct _sas_device { + struct list_head list; + struct scsi_target *starget; + u64 sas_address; + u64 device_name; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u16 volume_handle; + u64 volume_wwid; + u32 device_info; + int id; + int channel; + u16 slot; + u8 phy; + u8 responding; + u8 fast_path; + u8 pfa_led_on; + u8 pend_sas_rphy_add; + u8 enclosure_level; + u8 chassis_slot; + u8 is_chassis_slot_valid; + u8 connector_name[5]; + struct kref refcount; + u8 port_type; + struct hba_port *port; + struct sas_rphy *rphy; +}; + +static inline void sas_device_get(struct _sas_device *s) +{ + kref_get(&s->refcount); +} + +static inline void sas_device_free(struct kref *r) +{ + kfree(container_of(r, struct _sas_device, refcount)); +} + +static inline void sas_device_put(struct _sas_device *s) +{ + kref_put(&s->refcount, sas_device_free); +} + +/* + * struct _pcie_device - attached PCIe device information + * @list: pcie device list + * @starget: starget object + * @wwid: device WWID + * @handle: device handle + * @device_info: bitfield provides detailed info about the device + * @id: target id + * @channel: target channel + * @slot: slot number + * @port_num: port number + * @responding: used in _scsih_pcie_device_mark_responding + * @fast_path: fast path feature enable bit + * @nvme_mdts: MaximumDataTransferSize from PCIe Device Page 2 for + * NVMe device only + * @enclosure_handle: enclosure handle + * @enclosure_logical_id: enclosure logical identifier + * @enclosure_level: The level of device's enclosure from the controller + * @connector_name: ASCII value of the Connector's name + * @serial_number: pointer of serial number string allocated runtime + * @access_status: Device's Access Status + * @shutdown_latency: NVMe device's RTD3 Entry Latency + * @refcount: reference count for deletion + */ +struct _pcie_device { + struct list_head list; + struct scsi_target *starget; + u64 wwid; + u16 handle; + u32 device_info; + int id; + int channel; + u16 slot; + u8 port_num; + u8 responding; + u8 fast_path; + u32 nvme_mdts; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 enclosure_level; + u8 connector_name[4]; + u8 *serial_number; + u8 reset_timeout; + u8 access_status; + u16 shutdown_latency; + struct kref refcount; +}; +/** + * pcie_device_get - Increment the pcie device reference count + * + * @p: pcie_device object + * + * When ever this function called it will increment the + * reference count of the pcie device for which this function called. + * + */ +static inline void pcie_device_get(struct _pcie_device *p) +{ + kref_get(&p->refcount); +} + +/** + * pcie_device_free - Release the pcie device object + * @r - kref object + * + * Free's the pcie device object. It will be called when reference count + * reaches to zero. + */ +static inline void pcie_device_free(struct kref *r) +{ + kfree(container_of(r, struct _pcie_device, refcount)); +} + +/** + * pcie_device_put - Decrement the pcie device reference count + * + * @p: pcie_device object + * + * When ever this function called it will decrement the + * reference count of the pcie device for which this function called. + * + * When refernce count reaches to Zero, this will call pcie_device_free to the + * pcie_device object. + */ +static inline void pcie_device_put(struct _pcie_device *p) +{ + kref_put(&p->refcount, pcie_device_free); +} +/** + * struct _raid_device - raid volume link list + * @list: sas device list + * @starget: starget object + * @sdev: scsi device struct (volumes are single lun) + * @wwid: unique identifier for the volume + * @handle: device handle + * @block_size: Block size of the volume + * @id: target id + * @channel: target channel + * @volume_type: the raid level + * @device_info: bitfield provides detailed info about the hidden components + * @num_pds: number of hidden raid components + * @responding: used in _scsih_raid_device_mark_responding + * @percent_complete: resync percent complete + * @direct_io_enabled: Whether direct io to PDs are allowed or not + * @stripe_exponent: X where 2powX is the stripe sz in blocks + * @block_exponent: X where 2powX is the block sz in bytes + * @max_lba: Maximum number of LBA in the volume + * @stripe_sz: Stripe Size of the volume + * @device_info: Device info of the volume member disk + * @pd_handle: Array of handles of the physical drives for direct I/O in le16 + */ +#define MPT_MAX_WARPDRIVE_PDS 8 +struct _raid_device { + struct list_head list; + struct scsi_target *starget; + struct scsi_device *sdev; + u64 wwid; + u16 handle; + u16 block_sz; + int id; + int channel; + u8 volume_type; + u8 num_pds; + u8 responding; + u8 percent_complete; + u8 direct_io_enabled; + u8 stripe_exponent; + u8 block_exponent; + u64 max_lba; + u32 stripe_sz; + u32 device_info; + u16 pd_handle[MPT_MAX_WARPDRIVE_PDS]; +}; + +/** + * struct _boot_device - boot device info + * + * @channel: sas, raid, or pcie channel + * @device: holds pointer for struct _sas_device, struct _raid_device or + * struct _pcie_device + */ +struct _boot_device { + int channel; + void *device; +}; + +/** + * struct _sas_port - wide/narrow sas port information + * @port_list: list of ports belonging to expander + * @num_phys: number of phys belonging to this port + * @remote_identify: attached device identification + * @rphy: sas transport rphy object + * @port: sas transport wide/narrow port object + * @hba_port: hba port entry containing port's port number info + * @phy_list: _sas_phy list objects belonging to this port + */ +struct _sas_port { + struct list_head port_list; + u8 num_phys; + struct sas_identify remote_identify; + struct sas_rphy *rphy; + struct sas_port *port; + struct hba_port *hba_port; + struct list_head phy_list; +}; + +/** + * struct _sas_phy - phy information + * @port_siblings: list of phys belonging to a port + * @identify: phy identification + * @remote_identify: attached device identification + * @phy: sas transport phy object + * @phy_id: unique phy id + * @handle: device handle for this phy + * @attached_handle: device handle for attached device + * @phy_belongs_to_port: port has been created for this phy + * @port: hba port entry containing port number info + */ +struct _sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; + u8 hba_vphy; + struct hba_port *port; +}; + +/** + * struct _sas_node - sas_host/expander information + * @list: list of expanders + * @parent_dev: parent device class + * @num_phys: number phys belonging to this sas_host/expander + * @sas_address: sas address of this sas_host/expander + * @handle: handle for this sas_host/expander + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: handle for this a member of an enclosure + * @device_info: bitwise defining capabilities of this sas_host/expander + * @responding: used in _scsih_expander_device_mark_responding + * @nr_phys_allocated: Allocated memory for this many count phys + * @phy: a list of phys that make up this sas_host/expander + * @sas_port_list: list of ports attached to this sas_host/expander + * @port: hba port entry containing node's port number info + * @rphy: sas_rphy object of this expander + */ +struct _sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 responding; + u8 nr_phys_allocated; + struct hba_port *port; + struct _sas_phy *phy; + struct list_head sas_port_list; + struct sas_rphy *rphy; +}; + +/** + * struct _enclosure_node - enclosure information + * @list: list of enclosures + * @pg0: enclosure pg0; + */ +struct _enclosure_node { + struct list_head list; + Mpi2SasEnclosurePage0_t pg0; +}; + +/** + * enum reset_type - reset state + * @FORCE_BIG_HAMMER: issue diagnostic reset + * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer + */ +enum reset_type { + FORCE_BIG_HAMMER, + SOFT_RESET, +}; + +/** + * struct pcie_sg_list - PCIe SGL buffer (contiguous per I/O) + * @pcie_sgl: PCIe native SGL for NVMe devices + * @pcie_sgl_dma: physical address + */ +struct pcie_sg_list { + void *pcie_sgl; + dma_addr_t pcie_sgl_dma; +}; + +/** + * struct chain_tracker - firmware chain tracker + * @chain_buffer: chain buffer + * @chain_buffer_dma: physical address + * @tracker_list: list of free request (ioc->free_chain_list) + */ +struct chain_tracker { + void *chain_buffer; + dma_addr_t chain_buffer_dma; +}; + +struct chain_lookup { + struct chain_tracker *chains_per_smid; + atomic_t chain_offset; +}; + +/** + * struct scsiio_tracker - scsi mf request tracker + * @smid: system message id + * @cb_idx: callback index + * @direct_io: To indicate whether I/O is direct (WARPDRIVE) + * @chain_list: list of associated firmware chain tracker + * @msix_io: IO's msix + */ +struct scsiio_tracker { + u16 smid; + struct scsi_cmnd *scmd; + u8 cb_idx; + u8 direct_io; + struct pcie_sg_list pcie_sg_list; + struct list_head chain_list; + u16 msix_io; +}; + +/** + * struct request_tracker - firmware request tracker + * @smid: system message id + * @cb_idx: callback index + * @tracker_list: list of free request (ioc->free_list) + */ +struct request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +/** + * struct _tr_list - target reset list + * @handle: device handle + * @state: state machine + */ +struct _tr_list { + struct list_head list; + u16 handle; + u16 state; +}; + +/** + * struct _sc_list - delayed SAS_IO_UNIT_CONTROL message list + * @handle: device handle + */ +struct _sc_list { + struct list_head list; + u16 handle; +}; + +/** + * struct _event_ack_list - delayed event acknowledgment list + * @Event: Event ID + * @EventContext: used to track the event uniquely + */ +struct _event_ack_list { + struct list_head list; + U16 Event; + U32 EventContext; +}; + +/** + * struct adapter_reply_queue - the reply queue struct + * @ioc: per adapter object + * @msix_index: msix index into vector table + * @vector: irq vector + * @reply_post_host_index: head index in the pool where FW completes IO + * @reply_post_free: reply post base virt address + * @name: the name registered to request_irq() + * @busy: isr is actively processing replies on another cpu + * @os_irq: irq number + * @irqpoll: irq_poll object + * @irq_poll_scheduled: Tells whether irq poll is scheduled or not + * @is_iouring_poll_q: Tells whether reply queues is assigned + * to io uring poll queues or not + * @list: this list +*/ +struct adapter_reply_queue { + struct MPT3SAS_ADAPTER *ioc; + u8 msix_index; + u32 reply_post_host_index; + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + char name[MPT_NAME_LENGTH]; + atomic_t busy; + u32 os_irq; + struct irq_poll irqpoll; + bool irq_poll_scheduled; + bool irq_line_enable; + bool is_iouring_poll_q; + struct list_head list; +}; + +/** + * struct io_uring_poll_queue - the io uring poll queue structure + * @busy: Tells whether io uring poll queue is busy or not + * @pause: Tells whether IOs are paused on io uring poll queue or not + * @reply_q: reply queue mapped for io uring poll queue + */ +struct io_uring_poll_queue { + atomic_t busy; + atomic_t pause; + struct adapter_reply_queue *reply_q; +}; + +typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); + +/* SAS3.0 support */ +typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device); +typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz); +typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, + void *paddr); + +/* SAS3.5 support */ +typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid, + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz); + +/* To support atomic and non atomic descriptors*/ +typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 funcdep); +typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid); +typedef u32 (*BASE_READ_REG) (const void __iomem *addr); +/* + * To get high iops reply queue's msix index when high iops mode is enabled + * else get the msix index of general reply queues. + */ +typedef u8 (*GET_MSIX_INDEX) (struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd); + +/* IOC Facts and Port Facts converted from little endian to cpu */ +union mpi3_version_union { + MPI2_VERSION_STRUCT Struct; + u32 Word; +}; + +struct mpt3sas_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union mpi3_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 IOCMaxChainSegmentSize; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; + u8 CurrentHostPageSize; +}; + +struct mpt3sas_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +struct reply_post_struct { + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + +/** + * struct virtual_phy - vSES phy structure + * sas_address: SAS Address of vSES device + * phy_mask: vSES device's phy number + * flags: flags used to manage this structure + */ +struct virtual_phy { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 flags; +}; + +#define MPT_VPHY_FLAG_DIRTY_PHY 0x01 + +/** + * struct hba_port - Saves each HBA's Wide/Narrow port info + * @sas_address: sas address of this wide/narrow port's attached device + * @phy_mask: HBA PHY's belonging to this port + * @port_id: port number + * @flags: hba port flags + * @vphys_mask : mask of vSES devices Phy number + * @vphys_list : list containing vSES device structures + */ +struct hba_port { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 port_id; + u8 flags; + u32 vphys_mask; + struct list_head vphys_list; +}; + +/* hba port flags */ +#define HBA_PORT_FLAG_DIRTY_PORT 0x01 +#define HBA_PORT_FLAG_NEW_PORT 0x02 + +#define MULTIPATH_DISABLED_PORT_ID 0xFF + +/** + * struct htb_rel_query - diagnostic buffer release reason + * @unique_id - unique id associated with this buffer. + * @buffer_rel_condition - Release condition ioctl/sysfs/reset + * @reserved + * @trigger_type - Master/Event/scsi/MPI + * @trigger_info_dwords - Data Correspondig to trigger type + */ +struct htb_rel_query { + u16 buffer_rel_condition; + u16 reserved; + u32 trigger_type; + u32 trigger_info_dwords[2]; +}; + +/* Buffer_rel_condition bit fields */ + +/* Bit 0 - Diag Buffer not Released */ +#define MPT3_DIAG_BUFFER_NOT_RELEASED (0x00) +/* Bit 0 - Diag Buffer Released */ +#define MPT3_DIAG_BUFFER_RELEASED (0x01) + +/* + * Bit 1 - Diag Buffer Released by IOCTL, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_IOCTL (0x02 | MPT3_DIAG_BUFFER_RELEASED) + +/* + * Bit 2 - Diag Buffer Released by Trigger, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_TRIGGER (0x04 | MPT3_DIAG_BUFFER_RELEASED) + +/* + * Bit 3 - Diag Buffer Released by SysFs, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_SYSFS (0x08 | MPT3_DIAG_BUFFER_RELEASED) + +/* DIAG RESET Master trigger flags */ +#define MPT_DIAG_RESET_ISSUED_BY_DRIVER 0x00000000 +#define MPT_DIAG_RESET_ISSUED_BY_USER 0x00000001 + +typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); +/** + * struct MPT3SAS_ADAPTER - per adapter struct + * @list: ioc_list + * @shost: shost object + * @id: unique adapter id + * @cpu_count: number online cpus + * @name: generic ioc string + * @tmp_string: tmp string used for logging + * @pdev: pci pdev object + * @pio_chip: physical io register space + * @chip: memory mapped register space + * @chip_phys: physical addrss prior to mapping + * @logging_level: see mpt3sas_debug.h + * @fwfault_debug: debuging FW timeouts + * @ir_firmware: IR firmware present + * @bars: bitmask of BAR's that must be configured + * @mask_interrupts: ignore interrupt + * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and + * pci resource handling + * @fault_reset_work_q_name: fw fault work queue + * @fault_reset_work_q: "" + * @fault_reset_work: "" + * @firmware_event_name: fw event work queue + * @firmware_event_thread: "" + * @fw_event_lock: + * @fw_event_list: list of fw events + * @current_evet: current processing firmware event + * @fw_event_cleanup: set to one while cleaning up the fw events + * @aen_event_read_flag: event log was read + * @broadcast_aen_busy: broadcast aen waiting to be serviced + * @shost_recovery: host reset in progress + * @ioc_reset_in_progress_lock: + * @ioc_link_reset_in_progress: phy/hard reset in progress + * @ignore_loginfos: ignore loginfos during task management + * @remove_host: flag for when driver unloads, to avoid sending dev resets + * @pci_error_recovery: flag to prevent ioc access until slot reset completes + * @wait_for_discovery_to_complete: flag set at driver load time when + * waiting on reporting devices + * @is_driver_loading: flag set at driver load time + * @port_enable_failed: flag set when port enable has failed + * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work + * @start_scan_failed: means port enable failed, return's the ioc_status + * @msix_enable: flag indicating msix is enabled + * @msix_vector_count: number msix vectors + * @cpu_msix_table: table for mapping cpus to msix index + * @cpu_msix_table_sz: table size + * @total_io_cnt: Gives total IO count, used to load balance the interrupts + * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state + * @timestamp_update_count: Counter to fire timeSync command + * time_sync_interval: Time sync interval read from man page 11 + * @high_iops_outstanding: used to load balance the interrupts + * within high iops reply queues + * @msix_load_balance: Enables load balancing of interrupts across + * the multiple MSIXs + * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands + * @thresh_hold: Max number of reply descriptors processed + * before updating Host Index + * @iopoll_q_start_index: starting index of io uring poll queues + * in reply queue list + * @drv_internal_flags: Bit map internal to driver + * @drv_support_bitmap: driver's supported feature bit map + * @use_32bit_dma: Flag to use 32 bit consistent dma mask + * @scsi_io_cb_idx: shost generated commands + * @tm_cb_idx: task management commands + * @scsih_cb_idx: scsih internal commands + * @transport_cb_idx: transport internal commands + * @ctl_cb_idx: clt internal commands + * @base_cb_idx: base internal commands + * @config_cb_idx: base internal commands + * @tm_tr_cb_idx : device removal target reset handshake + * @tm_tr_volume_cb_idx : volume removal target reset + * @base_cmds: + * @transport_cmds: + * @scsih_cmds: + * @tm_cmds: + * @ctl_cmds: + * @config_cmds: + * @base_add_sg_single: handler for either 32/64 bit sgl's + * @event_type: bits indicating which events to log + * @event_context: unique id for each logged event + * @event_log: event log pointer + * @event_masks: events that are masked + * @max_shutdown_latency: timeout value for NVMe shutdown operation, + * which is equal that NVMe drive's RTD3 Entry Latency + * which has reported maximum RTD3 Entry Latency value + * among attached NVMe drives. + * @facts: static facts data + * @prev_fw_facts: previous fw facts data + * @pfacts: static port facts data + * @manu_pg0: static manufacturing page 0 + * @manu_pg10: static manufacturing page 10 + * @manu_pg11: static manufacturing page 11 + * @bios_pg2: static bios page 2 + * @bios_pg3: static bios page 3 + * @ioc_pg8: static ioc page 8 + * @iounit_pg0: static iounit page 0 + * @iounit_pg1: static iounit page 1 + * @iounit_pg8: static iounit page 8 + * @sas_hba: sas host object + * @sas_expander_list: expander object list + * @enclosure_list: enclosure object list + * @sas_node_lock: + * @sas_device_list: sas device object list + * @sas_device_init_list: sas device object list (used only at init time) + * @sas_device_lock: + * @pcie_device_list: pcie device object list + * @pcie_device_init_list: pcie device object list (used only at init time) + * @pcie_device_lock: + * @io_missing_delay: time for IO completed by fw when PDR enabled + * @device_missing_delay: time for device missing by fw when PDR enabled + * @sas_id : used for setting volume target IDs + * @pcie_target_id: used for setting pcie target IDs + * @blocking_handles: bitmask used to identify which devices need blocking + * @pd_handles : bitmask for PD handles + * @pd_handles_sz : size of pd_handle bitmask + * @config_page_sz: config page size + * @config_page: reserve memory for config page payload + * @config_page_dma: + * @hba_queue_depth: hba request queue depth + * @sge_size: sg element size for either 32/64 bit + * @scsiio_depth: SCSI_IO queue depth + * @request_sz: per request frame size + * @request: pool of request frames + * @request_dma: + * @request_dma_sz: + * @scsi_lookup: firmware request tracker list + * @scsi_lookup_lock: + * @free_list: free list of request + * @pending_io_count: + * @reset_wq: + * @chain: pool of chains + * @chain_dma: + * @max_sges_in_main_message: number sg elements in main message + * @max_sges_in_chain_message: number sg elements per chain + * @chains_needed_per_io: max chains per io + * @chain_depth: total chains allocated + * @chain_segment_sz: gives the max number of + * SGEs accommodate on single chain buffer + * @hi_priority_smid: + * @hi_priority: + * @hi_priority_dma: + * @hi_priority_depth: + * @hpr_lookup: + * @hpr_free_list: + * @internal_smid: + * @internal: + * @internal_dma: + * @internal_depth: + * @internal_lookup: + * @internal_free_list: + * @sense: pool of sense + * @sense_dma: + * @sense_dma_pool: + * @reply_depth: hba reply queue depth: + * @reply_sz: per reply frame size: + * @reply: pool of replys: + * @reply_dma: + * @reply_dma_pool: + * @reply_free_queue_depth: reply free depth + * @reply_free: pool for reply free queue (32 bit addr) + * @reply_free_dma: + * @reply_free_dma_pool: + * @reply_free_host_index: tail index in pool to insert free replys + * @reply_post_queue_depth: reply post queue depth + * @reply_post_struct: struct for reply_post_free physical & virt address + * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init + * @rdpq_array_enable: rdpq_array support is enabled in the driver + * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag + * is assigned only ones + * @reply_queue_count: number of reply queue's + * @reply_queue_list: link list contaning the reply queue info + * @msix96_vector: 96 MSI-X vector support + * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue + * @delayed_tr_list: target reset link list + * @delayed_tr_volume_list: volume target reset link list + * @delayed_sc_list: + * @delayed_event_ack_list: + * @temp_sensors_count: flag to carry the number of temperature sensors + * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and + * pci resource handling. PCI resource freeing will lead to free + * vital hardware/memory resource, which might be in use by cli/sysfs + * path functions resulting in Null pointer reference followed by kernel + * crash. To avoid the above race condition we use mutex syncrhonization + * which ensures the syncrhonization between cli/sysfs_show path. + * @atomic_desc_capable: Atomic Request Descriptor support. + * @GET_MSIX_INDEX: Get the msix index of high iops queues. + * @multipath_on_hba: flag to determine multipath on hba is enabled or not + * @port_table_list: list containing HBA's wide/narrow port's info + */ +struct MPT3SAS_ADAPTER { + struct list_head list; + struct Scsi_Host *shost; + u8 id; + int cpu_count; + char name[MPT_NAME_LENGTH]; + char driver_name[MPT_NAME_LENGTH - 8]; + char tmp_string[MPT_STRING_LENGTH]; + struct pci_dev *pdev; + Mpi2SystemInterfaceRegs_t __iomem *chip; + phys_addr_t chip_phys; + int logging_level; + int fwfault_debug; + u8 ir_firmware; + int bars; + u8 mask_interrupts; + + /* fw fault handler */ + char fault_reset_work_q_name[20]; + struct workqueue_struct *fault_reset_work_q; + struct delayed_work fault_reset_work; + + /* fw event handler */ + char firmware_event_name[20]; + struct workqueue_struct *firmware_event_thread; + spinlock_t fw_event_lock; + struct list_head fw_event_list; + struct fw_event_work *current_event; + u8 fw_events_cleanup; + + /* misc flags */ + int aen_event_read_flag; + u8 broadcast_aen_busy; + u16 broadcast_aen_pending; + u8 shost_recovery; + u8 got_task_abort_from_ioctl; + + struct mutex reset_in_progress_mutex; + spinlock_t ioc_reset_in_progress_lock; + u8 ioc_link_reset_in_progress; + + u8 ignore_loginfos; + u8 remove_host; + u8 pci_error_recovery; + u8 wait_for_discovery_to_complete; + u8 is_driver_loading; + u8 port_enable_failed; + u8 start_scan; + u16 start_scan_failed; + + u8 msix_enable; + u16 msix_vector_count; + u8 *cpu_msix_table; + u16 cpu_msix_table_sz; + resource_size_t __iomem **reply_post_host_index; + u32 ioc_reset_count; + MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; + u32 non_operational_loop; + u8 ioc_coredump_loop; + u32 timestamp_update_count; + u32 time_sync_interval; + atomic64_t total_io_cnt; + atomic64_t high_iops_outstanding; + bool msix_load_balance; + u16 thresh_hold; + u8 high_iops_queues; + u8 iopoll_q_start_index; + u32 drv_internal_flags; + u32 drv_support_bitmap; + u32 dma_mask; + bool enable_sdev_max_qd; + bool use_32bit_dma; + struct io_uring_poll_queue *io_uring_poll_queues; + + /* internal commands, callback index */ + u8 scsi_io_cb_idx; + u8 tm_cb_idx; + u8 transport_cb_idx; + u8 scsih_cb_idx; + u8 ctl_cb_idx; + u8 base_cb_idx; + u8 port_enable_cb_idx; + u8 config_cb_idx; + u8 tm_tr_cb_idx; + u8 tm_tr_volume_cb_idx; + u8 tm_sas_control_cb_idx; + struct _internal_cmd base_cmds; + struct _internal_cmd port_enable_cmds; + struct _internal_cmd transport_cmds; + struct _internal_cmd scsih_cmds; + struct _internal_cmd tm_cmds; + struct _internal_cmd ctl_cmds; + struct _internal_cmd config_cmds; + + MPT_ADD_SGE base_add_sg_single; + + /* function ptr for either IEEE or MPI sg elements */ + MPT_BUILD_SG_SCMD build_sg_scmd; + MPT_BUILD_SG build_sg; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge; + u16 sge_size_ieee; + u16 hba_mpi_version_belonged; + + /* function ptr for MPI sg elements only */ + MPT_BUILD_SG build_sg_mpi; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; + + /* function ptr for NVMe PRP elements only */ + NVME_BUILD_PRP build_nvme_prp; + + /* event log */ + u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + u32 event_context; + void *event_log; + u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + + u8 tm_custom_handling; + u8 nvme_abort_timeout; + u16 max_shutdown_latency; + u16 max_wideport_qd; + u16 max_narrowport_qd; + u16 max_nvme_qd; + u8 max_sata_qd; + + /* static config pages */ + struct mpt3sas_facts facts; + struct mpt3sas_facts prev_fw_facts; + struct mpt3sas_port_facts *pfacts; + Mpi2ManufacturingPage0_t manu_pg0; + struct Mpi2ManufacturingPage10_t manu_pg10; + struct Mpi2ManufacturingPage11_t manu_pg11; + Mpi2BiosPage2_t bios_pg2; + Mpi2BiosPage3_t bios_pg3; + Mpi2IOCPage8_t ioc_pg8; + Mpi2IOUnitPage0_t iounit_pg0; + Mpi2IOUnitPage1_t iounit_pg1; + Mpi2IOUnitPage8_t iounit_pg8; + Mpi2IOCPage1_t ioc_pg1_copy; + + struct _boot_device req_boot_device; + struct _boot_device req_alt_boot_device; + struct _boot_device current_boot_device; + + /* sas hba, expander, and device list */ + struct _sas_node sas_hba; + struct list_head sas_expander_list; + struct list_head enclosure_list; + spinlock_t sas_node_lock; + struct list_head sas_device_list; + struct list_head sas_device_init_list; + spinlock_t sas_device_lock; + struct list_head pcie_device_list; + struct list_head pcie_device_init_list; + spinlock_t pcie_device_lock; + + struct list_head raid_device_list; + spinlock_t raid_device_lock; + u8 io_missing_delay; + u16 device_missing_delay; + int sas_id; + int pcie_target_id; + + void *blocking_handles; + void *pd_handles; + u16 pd_handles_sz; + + void *pend_os_device_add; + u16 pend_os_device_add_sz; + + /* config page */ + u16 config_page_sz; + void *config_page; + dma_addr_t config_page_dma; + void *config_vaddr; + + /* scsiio request */ + u16 hba_queue_depth; + u16 sge_size; + u16 scsiio_depth; + u16 request_sz; + u8 *request; + dma_addr_t request_dma; + u32 request_dma_sz; + struct pcie_sg_list *pcie_sg_lookup; + spinlock_t scsi_lookup_lock; + int pending_io_count; + wait_queue_head_t reset_wq; + u16 *io_queue_num; + + /* PCIe SGL */ + struct dma_pool *pcie_sgl_dma_pool; + /* Host Page Size */ + u32 page_size; + + /* chain */ + struct chain_lookup *chain_lookup; + struct list_head free_chain_list; + struct dma_pool *chain_dma_pool; + ulong chain_pages; + u16 max_sges_in_main_message; + u16 max_sges_in_chain_message; + u16 chains_needed_per_io; + u32 chain_depth; + u16 chain_segment_sz; + u16 chains_per_prp_buffer; + + /* hi-priority queue */ + u16 hi_priority_smid; + u8 *hi_priority; + dma_addr_t hi_priority_dma; + u16 hi_priority_depth; + struct request_tracker *hpr_lookup; + struct list_head hpr_free_list; + + /* internal queue */ + u16 internal_smid; + u8 *internal; + dma_addr_t internal_dma; + u16 internal_depth; + struct request_tracker *internal_lookup; + struct list_head internal_free_list; + + /* sense */ + u8 *sense; + dma_addr_t sense_dma; + struct dma_pool *sense_dma_pool; + + /* reply */ + u16 reply_sz; + u8 *reply; + dma_addr_t reply_dma; + u32 reply_dma_max_address; + u32 reply_dma_min_address; + struct dma_pool *reply_dma_pool; + + /* reply free queue */ + u16 reply_free_queue_depth; + __le32 *reply_free; + dma_addr_t reply_free_dma; + struct dma_pool *reply_free_dma_pool; + u32 reply_free_host_index; + + /* reply post queue */ + u16 reply_post_queue_depth; + struct reply_post_struct *reply_post; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; + struct dma_pool *reply_post_free_dma_pool; + struct dma_pool *reply_post_free_array_dma_pool; + Mpi2IOCInitRDPQArrayEntry *reply_post_free_array; + dma_addr_t reply_post_free_array_dma; + u8 reply_queue_count; + struct list_head reply_queue_list; + + u8 combined_reply_queue; + u8 combined_reply_index_count; + u8 smp_affinity_enable; + /* reply post register index */ + resource_size_t __iomem **replyPostRegisterIndex; + + struct list_head delayed_tr_list; + struct list_head delayed_tr_volume_list; + struct list_head delayed_sc_list; + struct list_head delayed_event_ack_list; + u8 temp_sensors_count; + struct mutex pci_access_mutex; + + /* diag buffer support */ + u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT]; + dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT]; + u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; + u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 ring_buffer_offset; + u32 ring_buffer_sz; + struct htb_rel_query htb_rel; + u8 reset_from_user; + u8 is_warpdrive; + u8 is_mcpu_endpoint; + u8 hide_ir_msg; + u8 mfg_pg10_hide_flag; + u8 hide_drives; + spinlock_t diag_trigger_lock; + u8 diag_trigger_active; + u8 atomic_desc_capable; + BASE_READ_REG base_readl; + BASE_READ_REG base_readl_ext_retry; + struct SL_WH_MASTER_TRIGGER_T diag_trigger_master; + struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; + struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; + struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; + u8 supports_trigger_pages; + void *device_remove_in_progress; + u16 device_remove_in_progress_sz; + u8 is_gen35_ioc; + u8 is_aero_ioc; + struct dentry *debugfs_root; + struct dentry *ioc_dump; + PUT_SMID_IO_FP_HIP put_smid_scsi_io; + PUT_SMID_IO_FP_HIP put_smid_fast_path; + PUT_SMID_IO_FP_HIP put_smid_hi_priority; + PUT_SMID_DEFAULT put_smid_default; + GET_MSIX_INDEX get_msix_index_for_smlio; + + u8 multipath_on_hba; + struct list_head port_table_list; +}; + +struct mpt3sas_debugfs_buffer { + void *buf; + u32 len; +}; + +#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001 +#define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002 + +#define MPT_DRV_INTERNAL_FIRST_PE_ISSUED 0x00000001 + +typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); + +/* + * struct ATTO_SAS_NVRAM - ATTO NVRAM settings stored + * in Manufacturing page 1 used to get + * ATTO SasAddr. + */ +struct ATTO_SAS_NVRAM { + u8 Signature[4]; + u8 Version; +#define ATTO_SASNVR_VERSION 0 + + u8 Checksum; +#define ATTO_SASNVR_CKSUM_SEED 0x5A + u8 Pad[10]; + u8 SasAddr[8]; +#define ATTO_SAS_ADDR_ALIGN 64 + u8 Reserved[232]; +}; + +#define ATTO_SAS_ADDR_DEVNAME_BIAS 63 + +union ATTO_SAS_ADDRESS { + U8 b[8]; + U16 w[4]; + U32 d[2]; + U64 q; +}; + +/* base shared API */ +extern struct list_head mpt3sas_ioc_list; +extern char driver_name[MPT_NAME_LENGTH]; +/* spinlock on list operations over IOCs + * Case: when multiple warpdrive cards(IOCs) are in use + * Each IOC will added to the ioc list structure on initialization. + * Watchdog threads run at regular intervals to check IOC for any + * fault conditions which will trigger the dead_ioc thread to + * deallocate pci resource, resulting deleting the IOC netry from list, + * this deletion need to protected by spinlock to enusre that + * ioc removal is syncrhonized, if not synchronized it might lead to + * list_del corruption as the ioc list is traversed in cli path. + */ +extern spinlock_t gioc_lock; + +void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, + enum reset_type type); + +void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); +__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid); +dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll); +void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc); + +void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle); +void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task); +void mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid); +/* hi-priority queue */ +u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd); +void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, + struct scsiio_tracker *st); + +u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_initialize_callback_handler(void); +u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func); +void mpt3sas_base_release_callback_handler(u8 cb_idx); + +u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, + u32 phys_addr); + +u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); + +void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +#define mpt3sas_print_fault_code(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_fault_info(ioc, fault_code); } while (0) + +void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code); +#define mpt3sas_print_coredump_info(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_coredump_info(ioc, fault_code); } while (0) + +int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller); +int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request); +int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); + +void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, + u32 *event_type); + +void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc); + +void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay); + +int mpt3sas_base_check_for_fault_and_issue_reset( + struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); + +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); + +u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, + u8 status, void *mpi_request, int sz); +#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ +do { ioc_err(ioc, "In func: %s\n", __func__); \ + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \ + status, mpi_request, sz); } while (0) + +int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); +int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); +void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc); + +/* scsih shared API */ +struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply); +void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_scsih_clear_outstanding_scsi_tm_commands( + struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, u64 lun, u8 type, u16 smid_task, + u16 msix_task, u8 timeout, u8 tr_method); +int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, u64 lun, u8 type, u16 smid_task, + u16 msix_task, u8 timeout, u8 tr_method); + +void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port); +u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +struct hba_port * +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port, + u8 bypass_dirty_port_flag); + +struct _sas_node *mpt3sas_scsih_expander_find_by_handle( + struct MPT3SAS_ADAPTER *ioc, u16 handle); +struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +struct _sas_device *mpt3sas_get_sdev_by_addr( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +struct _sas_device *__mpt3sas_get_sdev_by_addr( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, + u16 handle); +struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, + u16 handle); + +void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); +struct _raid_device * +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth); +struct _sas_device * +__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, struct sas_rphy *rphy); +struct virtual_phy * +mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port, u32 phy); + +/* config shared API */ +u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, + u8 *num_phys); +int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); +int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page); + +int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz); +int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page); + +int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); +int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); + +int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage2_t *config_page); +int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page); +int mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_page); +int mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_page); +int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage0_t *config_page); +int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz); +int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); +int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage8_t *config_page); +int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage1_t *config_page); +int mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage1_t *config_page); +int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage8_t *config_page); +int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page, + u32 phy_number, u16 handle); +int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number); +int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number); +int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle); +int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds); +int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz); +int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, + u32 form, u32 form_specific); +int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle); +int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, + u16 volume_handle, u64 *wwid); +int +mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page); +int +mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set); + +/* ctl shared API */ +extern const struct attribute_group *mpt3sas_host_groups[]; +extern const struct attribute_group *mpt3sas_dev_groups[]; +void mpt3sas_ctl_init(ushort hbas_to_enumerate); +void mpt3sas_ctl_exit(ushort hbas_to_enumerate); +u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); +u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, + u8 msix_index, u32 reply); +void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply); + +void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, + u8 bits_to_register); +int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset); + +/* transport shared API */ +extern struct scsi_transport_template *mpt3sas_transport_template; +u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, + u16 handle, u64 sas_address, struct hba_port *port); +void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent, struct hba_port *port); +int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); +int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev); +void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate, + struct hba_port *port); +extern struct sas_function_template mpt3sas_transport_functions; +extern struct scsi_transport_template *mpt3sas_transport_template; +void +mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy); +void +mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, + u64 sas_address, struct hba_port *port); +/* trigger data externs */ +void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, + u32 trigger_bitmask); +void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier); +void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, + u8 asc, u8 ascq); +void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, + u32 loginfo); + +/* warpdrive APIs */ +u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device); +void +mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request); + +/* NCQ Prio Handling Check */ +bool scsih_ncq_prio_supp(struct scsi_device *sdev); + +void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_init_debugfs(void); +void mpt3sas_exit_debugfs(void); + +/** + * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if scsi device. + */ +static inline int +mpt3sas_scsih_is_pcie_scsi_device(u32 device_info) +{ + if ((device_info & + MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI) + return 1; + else + return 0; +} +#endif /* MPT3SAS_BASE_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c new file mode 100644 index 000000000..d114ef381 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -0,0 +1,2794 @@ +/* + * This module provides common API for accessing firmware configuration pages + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/* local definitions */ + +/* Timeout for config page request (in seconds) */ +#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15 + +/* Common sgl flags for READING a config page. */ +#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT) + +/* Common sgl flags for WRITING a config page. */ +#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \ + << MPI2_SGE_FLAGS_SHIFT) + +/** + * struct config_request - obtain dma memory via routine + * @sz: size + * @page: virt pointer + * @page_dma: phys pointer + * + */ +struct config_request { + u16 sz; + void *page; + dma_addr_t page_dma; +}; + +/** + * _config_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { + case MPI2_CONFIG_PAGETYPE_IO_UNIT: + desc = "io_unit"; + break; + case MPI2_CONFIG_PAGETYPE_IOC: + desc = "ioc"; + break; + case MPI2_CONFIG_PAGETYPE_BIOS: + desc = "bios"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_VOLUME: + desc = "raid_volume"; + break; + case MPI2_CONFIG_PAGETYPE_MANUFACTURING: + desc = "manufacturing"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK: + desc = "physdisk"; + break; + case MPI2_CONFIG_PAGETYPE_EXTENDED: + switch (mpi_request->ExtPageType) { + case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: + desc = "sas_io_unit"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER: + desc = "sas_expander"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE: + desc = "sas_device"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY: + desc = "sas_phy"; + break; + case MPI2_CONFIG_EXTPAGETYPE_LOG: + desc = "log"; + break; + case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE: + desc = "enclosure"; + break; + case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG: + desc = "raid_config"; + break; + case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: + desc = "driver_mapping"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_PORT: + desc = "sas_port"; + break; + case MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING: + desc = "ext_manufacturing"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT: + desc = "pcie_io_unit"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH: + desc = "pcie_switch"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE: + desc = "pcie_device"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK: + desc = "pcie_link"; + break; + } + break; + } + + if (!desc) + return; + + ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", + calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); +} + +/** + * _config_alloc_config_dma_memory - obtain physical memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper for obtaining dma-able memory for config page request. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) { + ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n", + __func__, mem->sz); + r = -ENOMEM; + } + } else { /* use tmp buffer if less than 512 bytes */ + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + ioc->config_vaddr = mem->page; + return r; +} + +/** + * _config_free_config_dma_memory - wrapper to free the memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. + * + * Return: 0 for success, non-zero for failure. + */ +static void +_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + +/** + * mpt3sas_config_done - config page completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using _config_request. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->config_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->config_cmds.smid != smid) + return 1; + ioc->config_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->config_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + } + ioc->config_cmds.status &= ~MPT3_CMD_PENDING; + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + return 1; +} + +/** + * _config_request - main routine for sending config page requests + * @ioc: per adapter object + * @mpi_request: request message frame + * @mpi_reply: reply mf payload returned from firmware + * @timeout: timeout in seconds + * @config_page: contents of the config page + * @config_page_sz: size of config page + * Context: sleep + * + * A generic API for config page requests to firmware. + * + * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling + * this API. + * + * The callback index is set inside `ioc->config_cb_idx. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t + *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) +{ + u16 smid; + Mpi2ConfigRequest_t *config_request; + int r; + u8 retry_count, issue_host_reset = 0; + struct config_request mem; + u32 ioc_status = UINT_MAX; + + mutex_lock(&ioc->config_cmds.mutex); + if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: config_cmd in use\n", __func__); + mutex_unlock(&ioc->config_cmds.mutex); + return -EAGAIN; + } + + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = _config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT || + mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, + mem.page_dma); + memcpy(mem.page, config_page, min_t(u16, mem.sz, + config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma); + memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz)); + } + } + + retry_config: + if (retry_count) { + if (retry_count > 2) { /* attempt only 2 retries */ + r = -EFAULT; + goto free_mem; + } + ioc_info(ioc, "%s: attempting retry (%d)\n", + __func__, retry_count); + } + + r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT); + if (r) { + if (r == -ETIME) + issue_host_reset = 1; + goto free_mem; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + r = -EAGAIN; + goto free_mem; + } + + r = 0; + memset(ioc->config_cmds.reply, 0, sizeof(Mpi2ConfigReply_t)); + ioc->config_cmds.status = MPT3_CMD_PENDING; + config_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->config_cmds.smid = smid; + memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_request", NULL); + init_completion(&ioc->config_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); + if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + ioc_err(ioc, "%s: command timeout\n", __func__); + mpt3sas_base_check_cmd_timeout(ioc, ioc->config_cmds.status, + mpi_request, sizeof(Mpi2ConfigRequest_t) / 4); + retry_count++; + if (ioc->config_cmds.smid == smid) + mpt3sas_base_free_smid(ioc, smid); + if (ioc->config_cmds.status & MPT3_CMD_RESET) + goto retry_config; + if (ioc->shost_recovery || ioc->pci_error_recovery) { + issue_host_reset = 0; + r = -EFAULT; + } else + issue_host_reset = 1; + goto free_mem; + } + + if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) { + memcpy(mpi_reply, ioc->config_cmds.reply, + sizeof(Mpi2ConfigReply_t)); + + /* Reply Frame Sanity Checks to workaround FW issues */ + if ((mpi_request->Header.PageType & 0xF) != + (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->Header.PageType & 0xF, + mpi_reply->Header.PageType & 0xF); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->ExtPageType, + mpi_reply->ExtPageType); + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + } + + if (retry_count) + ioc_info(ioc, "%s: retry (%d) completed!!\n", + __func__, retry_count); + + if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && + config_page && mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) { + u8 *p = (u8 *)mem.page; + + /* Config Page Sanity Checks to workaround FW issues */ + if (p) { + if ((mpi_request->Header.PageType & 0xF) != + (p[3] & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->Header.PageType & 0xF, + p[3] & 0xF); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + (mpi_request->ExtPageType != p[6])) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->ExtPageType, p[6]); + } + } + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + } + + free_mem: + if (config_page) + _config_free_config_dma_memory(ioc, &mem); + out: + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->config_cmds.mutex); + + if (issue_host_reset) { + if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) { + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + r = -EFAULT; + } else { + if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + } + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg1 - obtain manufacturing page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 7; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg2 - obtain bios page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg3 - obtain bios page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + + out: + return r; +} + +/** + * mpt3sas_config_set_bios_pg4 - write out bios page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz_config_pg: sizeof the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_pg) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION; + + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz_config_pg); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg4 - read bios page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz_config_pg: sizeof the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_pg) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + /* + * The sizeof the page is variable. Allow for just the + * size to be returned + */ + if (config_page && sz_config_pg) { + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz_config_pg); + } + +out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_iounit_pg1 - set iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg3 - obtain iounit page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} +/** + * mpt3sas_config_get_ioc_pg1 - obtain ioc page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_ioc_pg1 - modify ioc page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; + mpi_request.Header.PageVersion = MPI26_PCIEDEVICE0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +/** + * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT; + mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); +out: + return r; +} + +/** + * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; + mpi_request.Header.PageVersion = MPI26_PCIEDEVICE2_PAGEVERSION; + mpi_request.Header.PageNumber = 2; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +/** + * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host + * @ioc: per adapter object + * @num_phys: pointer returned with the number of phys + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + u16 ioc_status; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t config_page; + + *num_phys = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2SasIOUnitPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_phys = config_page.NumPhys; + } + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg0 - obtain expander page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg1 - obtain expander page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * @handle: expander handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, + u16 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | + (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg0 - obtain phy page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg1 - obtain phy page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume + * @ioc: per adapter object + * @handle: volume handle + * @num_pds: returns pds count + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds) +{ + Mpi2ConfigRequest_t mpi_request; + Mpi2RaidVolPage0_t config_page; + Mpi2ConfigReply_t mpi_reply; + int r; + u16 ioc_status; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + *num_pds = 0; + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2RaidVolPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; + } + + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE + * @form_specific: specific to the form + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form, + u32 form_specific) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | form_specific); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_driver_trigger_pg0 - obtain driver trigger page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg0 - write driver trigger page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg0 - update driver trigger page 0 + * @ioc: per adapter object + * @trigger_flag: trigger type bit map + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +mpt3sas_config_update_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + u16 trigger_flag, bool set) +{ + Mpi26DriverTriggerPage0_t tg_pg0; + Mpi2ConfigReply_t mpi_reply; + int rc; + u16 flags, ioc_status; + + rc = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0); + if (rc) + return rc; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg0, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + if (set) + flags = le16_to_cpu(tg_pg0.TriggerFlags) | trigger_flag; + else + flags = le16_to_cpu(tg_pg0.TriggerFlags) & ~trigger_flag; + + tg_pg0.TriggerFlags = cpu_to_le16(flags); + + rc = _config_set_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0); + if (rc) + return rc; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to update trigger pg0, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + return 0; +} + +/** + * mpt3sas_config_get_driver_trigger_pg1 - obtain driver trigger page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg1 - write driver trigger page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg1 - update driver trigger page 1 + * @ioc: per adapter object + * @master_tg: Master trigger bit map + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set) +{ + Mpi26DriverTriggerPage1_t tg_pg1; + Mpi2ConfigReply_t mpi_reply; + int rc; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + tg_pg1.NumMasterTrigger = cpu_to_le16(1); + tg_pg1.MasterTriggers[0].MasterTriggerFlags = cpu_to_le32( + master_tg->MasterData); + } else { + tg_pg1.NumMasterTrigger = 0; + tg_pg1.MasterTriggers[0].MasterTriggerFlags = 0; + } + + rc = _config_set_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg2 - obtain driver trigger page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg2 - write driver trigger page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg2 - update driver trigger page 2 + * @ioc: per adapter object + * @event_tg: list of Event Triggers + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set) +{ + Mpi26DriverTriggerPage2_t tg_pg2; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + count = event_tg->ValidEntries; + tg_pg2.NumMPIEventTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg2.MPIEventTriggers[i].MPIEventCode = + cpu_to_le16( + event_tg->EventTriggerEntry[i].EventValue); + tg_pg2.MPIEventTriggers[i].MPIEventCodeSpecific = + cpu_to_le16( + event_tg->EventTriggerEntry[i].LogEntryQualifier); + } + } else { + tg_pg2.NumMPIEventTrigger = 0; + memset(&tg_pg2.MPIEventTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg3 - obtain driver trigger page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg3 - write driver trigger page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg3 - update driver trigger page 3 + * @ioc: per adapter object + * @scsi_tg: scsi trigger list + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set) +{ + Mpi26DriverTriggerPage3_t tg_pg3; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + if (set) { + count = scsi_tg->ValidEntries; + tg_pg3.NumSCSISenseTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg3.SCSISenseTriggers[i].ASCQ = + scsi_tg->SCSITriggerEntry[i].ASCQ; + tg_pg3.SCSISenseTriggers[i].ASC = + scsi_tg->SCSITriggerEntry[i].ASC; + tg_pg3.SCSISenseTriggers[i].SenseKey = + scsi_tg->SCSITriggerEntry[i].SenseKey; + } + } else { + tg_pg3.NumSCSISenseTrigger = 0; + memset(&tg_pg3.SCSISenseTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + return 0; +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg4 - obtain driver trigger page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg4 - write driver trigger page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg4 - update driver trigger page 4 + * @ioc: per adapter object + * @mpi_tg: mpi trigger list + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set) +{ + Mpi26DriverTriggerPage4_t tg_pg4; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + count = mpi_tg->ValidEntries; + tg_pg4.NumIOCStatusLogInfoTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg4.IOCStatusLoginfoTriggers[i].IOCStatus = + cpu_to_le16(mpi_tg->MPITriggerEntry[i].IOCStatus); + tg_pg4.IOCStatusLoginfoTriggers[i].LogInfo = + cpu_to_le32(mpi_tg->MPITriggerEntry[i].IocLogInfo); + } + } else { + tg_pg4.NumIOCStatusLogInfoTrigger = 0; + memset(&tg_pg4.IOCStatusLoginfoTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_volume_handle - returns volume handle for give hidden + * raid components + * @ioc: per adapter object + * @pd_handle: phys disk handle + * @volume_handle: volume handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle) +{ + Mpi2RaidConfigurationPage0_t *config_page = NULL; + Mpi2ConfigRequest_t mpi_request; + Mpi2ConfigReply_t mpi_reply; + int r, i, config_page_sz; + u16 ioc_status; + int config_num; + u16 element_type; + u16 phys_disk_dev_handle; + + *volume_handle = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG; + mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) { + r = -1; + goto out; + } + + config_num = 0xff; + while (1) { + mpi_request.PageAddress = cpu_to_le32(config_num + + MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + config_page_sz); + if (r) + goto out; + r = -1; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < config_page->NumElements; i++) { + element_type = le16_to_cpu(config_page-> + ConfigElement[i].ElementFlags) & + MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; + if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT || + element_type == + MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { + phys_disk_dev_handle = + le16_to_cpu(config_page->ConfigElement[i]. + PhysDiskDevHandle); + if (phys_disk_dev_handle == pd_handle) { + *volume_handle = + le16_to_cpu(config_page-> + ConfigElement[i].VolDevHandle); + r = 0; + goto out; + } + } else if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { + *volume_handle = 0; + r = 0; + goto out; + } + } + config_num = config_page->ConfigNum; + } + out: + kfree(config_page); + return r; +} + +/** + * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle + * @ioc: per adapter object + * @volume_handle: volume handle + * @wwid: volume wwid + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, + u64 *wwid) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2RaidVolPage1_t raid_vol_pg1; + + *wwid = 0; + if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, + volume_handle))) { + *wwid = le64_to_cpu(raid_vol_pg1.WWID); + return 0; + } else + return -1; +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c new file mode 100644 index 000000000..efdb8178d --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -0,0 +1,4189 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mpt3sas_base.h" +#include "mpt3sas_ctl.h" + + +static struct fasync_struct *async_queue; +static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); + + +/** + * enum block_state - blocking state + * @NON_BLOCKING: non blocking + * @BLOCKING: blocking + * + * These states are for ioctls that need to wait for a response + * from firmware, so they probably require sleep. + */ +enum block_state { + NON_BLOCKING, + BLOCKING, +}; + +/** + * _ctl_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) + return; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Function) { + case MPI2_FUNCTION_SCSI_IO_REQUEST: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "scsi_io, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + desc = "task_mgmt"; + break; + case MPI2_FUNCTION_IOC_INIT: + desc = "ioc_init"; + break; + case MPI2_FUNCTION_IOC_FACTS: + desc = "ioc_facts"; + break; + case MPI2_FUNCTION_CONFIG: + { + Mpi2ConfigRequest_t *config_request = + (Mpi2ConfigRequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "config, type(0x%02x), ext_type(0x%02x), number(%d)", + (config_request->Header.PageType & + MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, + config_request->Header.PageNumber); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_PORT_FACTS: + desc = "port_facts"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + desc = "port_enable"; + break; + case MPI2_FUNCTION_EVENT_NOTIFICATION: + desc = "event_notification"; + break; + case MPI2_FUNCTION_FW_DOWNLOAD: + desc = "fw_download"; + break; + case MPI2_FUNCTION_FW_UPLOAD: + desc = "fw_upload"; + break; + case MPI2_FUNCTION_RAID_ACTION: + desc = "raid_action"; + break; + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "raid_pass, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + desc = "sas_iounit_cntl"; + break; + case MPI2_FUNCTION_SATA_PASSTHROUGH: + desc = "sata_pass"; + break; + case MPI2_FUNCTION_DIAG_BUFFER_POST: + desc = "diag_buffer_post"; + break; + case MPI2_FUNCTION_DIAG_RELEASE: + desc = "diag_release"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + desc = "smp_passthrough"; + break; + case MPI2_FUNCTION_TOOLBOX: + desc = "toolbox"; + break; + case MPI2_FUNCTION_NVME_ENCAPSULATED: + desc = "nvme_encapsulated"; + break; + } + + if (!desc) + return; + + ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + Mpi2SCSIIOReply_t *scsi_reply = + (Mpi2SCSIIOReply_t *)mpi_reply; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (sas_device) { + ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", + (u64)sas_device->sas_address, + sas_device->phy); + ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + (u64)sas_device->enclosure_logical_id, + sas_device->slot); + sas_device_put(sas_device); + } + if (!sas_device) { + pcie_device = mpt3sas_get_pdev_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (pcie_device) { + ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n", + (unsigned long long)pcie_device->wwid, + pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + pcie_device_put(pcie_device); + } + } + if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) + ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n", + scsi_reply->SCSIState, + scsi_reply->SCSIStatus); + } +} + +/** + * mpt3sas_ctl_done - ctl module completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using ioc->ctl_cb_idx. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + Mpi2SCSIIOReply_t *scsiio_reply; + Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply; + const void *sense_data; + u32 sz; + + if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->ctl_cmds.smid != smid) + return 1; + ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; + /* get sense data */ + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_reply->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; + if (scsiio_reply->SCSIState & + MPI2_SCSI_STATE_AUTOSENSE_VALID) { + sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(scsiio_reply->SenseCount)); + sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + /* + * Get Error Response data for NVMe device. The ctl_cmds.sense + * buffer is used to store the Error Response data. + */ + if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { + nvme_error_reply = + (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply; + sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE, + le16_to_cpu(nvme_error_reply->ErrorResponseCount)); + sense_data = mpt3sas_base_get_sense_buffer(ioc, smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + + _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); + ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->ctl_cmds.done); + return 1; +} + +/** + * _ctl_check_event_type - determines when an event needs logging + * @ioc: per adapter object + * @event: firmware event + * + * The bitmask in ioc->event_type[] indicates which events should be + * be saved in the driver event_log. This bitmask is set by application. + * + * Return: 1 when event should be captured, or zero means no match. + */ +static int +_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u16 i; + u32 desired_event; + + if (event >= 128 || !event || !ioc->event_log) + return 0; + + desired_event = (1 << (event % 32)); + if (!desired_event) + desired_event = 1; + i = event / 32; + return desired_event & ioc->event_type[i]; +} + +/** + * mpt3sas_ctl_add_to_event_log - add event + * @ioc: per adapter object + * @mpi_reply: reply message frame + */ +void +mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + struct MPT3_IOCTL_EVENTS *event_log; + u16 event; + int i; + u32 sz, event_data_sz; + u8 send_aen = 0; + + if (!ioc->event_log) + return; + + event = le16_to_cpu(mpi_reply->Event); + + if (_ctl_check_event_type(ioc, event)) { + + /* insert entry into circular event_log */ + i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; + event_log = ioc->event_log; + event_log[i].event = event; + event_log[i].context = ioc->event_context++; + + event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; + sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); + memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); + memcpy(event_log[i].data, mpi_reply->EventData, sz); + send_aen = 1; + } + + /* This aen_event_read_flag flag is set until the + * application has read the event log. + * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. + */ + if (event == MPI2_EVENT_LOG_ENTRY_ADDED || + (send_aen && !ioc->aen_event_read_flag)) { + ioc->aen_event_read_flag = 1; + wake_up_interruptible(&ctl_poll_wait); + if (async_queue) + kill_fasync(&async_queue, SIGIO, POLL_IN); + } +} + +/** + * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + return 1; +} + +/** + * _ctl_verify_adapter - validates ioc_number passed from application + * @ioc_number: ? + * @iocpp: The ioc pointer is returned in this. + * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & + * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. + * + * Return: (-1) means error, else ioc_number. + */ +static int +_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, + int mpi_version) +{ + struct MPT3SAS_ADAPTER *ioc; + int version = 0; + /* global ioc lock to protect controller on list operations */ + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->id != ioc_number) + continue; + /* Check whether this ioctl command is from right + * ioctl device or not, if not continue the search. + */ + version = ioc->hba_mpi_version_belonged; + /* MPI25_VERSION and MPI26_VERSION uses same ioctl + * device. + */ + if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { + if ((version == MPI25_VERSION) || + (version == MPI26_VERSION)) + goto out; + else + continue; + } else { + if (version != mpi_version) + continue; + } +out: + spin_unlock(&gioc_lock); + *iocpp = ioc; + return ioc_number; + } + spin_unlock(&gioc_lock); + *iocpp = NULL; + return -1; +} + +/** + * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + u8 issue_reset; + + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + + /* + * add a log message to indicate the release + */ + ioc_info(ioc, + "%s: Releasing the trace buffer due to adapter reset.", + __func__); + ioc->htb_rel.buffer_rel_condition = + MPT3_DIAG_BUFFER_REL_TRIGGER; + mpt3sas_send_diag_release(ioc, i, &issue_reset); + } +} + +/** + * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd. + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__)); + if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { + ioc->ctl_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); + complete(&ioc->ctl_cmds.done); + } +} + +/** + * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); + + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + ioc->diag_buffer_status[i] |= + MPT3_DIAG_BUFFER_IS_DIAG_RESET; + } +} + +/** + * _ctl_fasync - + * @fd: ? + * @filep: ? + * @mode: ? + * + * Called when application request fasyn callback handler. + */ +static int +_ctl_fasync(int fd, struct file *filep, int mode) +{ + return fasync_helper(fd, filep, mode, &async_queue); +} + +/** + * _ctl_poll - + * @filep: ? + * @wait: ? + * + */ +static __poll_t +_ctl_poll(struct file *filep, poll_table *wait) +{ + struct MPT3SAS_ADAPTER *ioc; + + poll_wait(filep, &ctl_poll_wait, wait); + + /* global ioc lock to protect controller on list operations */ + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->aen_event_read_flag) { + spin_unlock(&gioc_lock); + return EPOLLIN | EPOLLRDNORM; + } + } + spin_unlock(&gioc_lock); + return 0; +} + +/** + * _ctl_set_task_mid - assign an active smid to tm request + * @ioc: per adapter object + * @karg: (struct mpt3_ioctl_command) + * @tm_request: pointer to mf from user space + * + * Return: 0 when an smid if found, else fail. + * during failure, the reply frame is filled. + */ +static int +_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, + Mpi2SCSITaskManagementRequest_t *tm_request) +{ + bool found = false; + u16 smid; + u16 handle; + struct scsi_cmnd *scmd; + struct MPT3SAS_DEVICE *priv_data; + Mpi2SCSITaskManagementReply_t *tm_reply; + u32 sz; + u32 lun; + char *desc = NULL; + + if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; + + lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + + handle = le16_to_cpu(tm_request->DevHandle); + for (smid = ioc->scsiio_depth; smid && !found; smid--) { + struct scsiio_tracker *st; + __le16 task_mid; + + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (lun != scmd->device->lun) + continue; + priv_data = scmd->device->hostdata; + if (priv_data->sas_target == NULL) + continue; + if (priv_data->sas_target->handle != handle) + continue; + st = scsi_cmd_priv(scmd); + + /* + * If the given TaskMID from the user space is zero, then the + * first outstanding smid will be picked up. Otherwise, + * targeted smid will be the one. + */ + task_mid = cpu_to_le16(st->smid); + if (!tm_request->TaskMID) + tm_request->TaskMID = task_mid; + found = tm_request->TaskMID == task_mid; + } + + if (!found) { + dctlprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n", + desc, le16_to_cpu(tm_request->DevHandle), + lun)); + tm_reply = ioc->ctl_cmds.reply; + tm_reply->DevHandle = tm_request->DevHandle; + tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + tm_reply->TaskType = tm_request->TaskType; + tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; + tm_reply->VP_ID = tm_request->VP_ID; + tm_reply->VF_ID = tm_request->VF_ID; + sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 1; + } + + dctlprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", + desc, le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); + return 0; +} + +/** + * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode + * @ioc: per adapter object + * @karg: (struct mpt3_ioctl_command) + * @mf: pointer to mf in user space + */ +static long +_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, + void __user *mf) +{ + MPI2RequestHeader_t *mpi_request = NULL, *request; + MPI2DefaultReply_t *mpi_reply; + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; + struct _pcie_device *pcie_device = NULL; + u16 smid; + unsigned long timeout; + u8 issue_reset; + u32 sz, sz_arg; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; + + issue_reset = 0; + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + ret = -EAGAIN; + goto out; + } + + ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (ret) + goto out; + + mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); + if (!mpi_request) { + ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n", + __func__); + ret = -ENOMEM; + goto out; + } + + /* Check for overflow and wraparound */ + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + + /* copy in request message frame from user */ + if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + ret = -EFAULT; + goto out; + } + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ret = -EAGAIN; + goto out; + } + } else { + /* Use first reserved smid for passthrough ioctls */ + smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; + } + + ret = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + memcpy(request, mpi_request, karg.data_sge_offset*4); + ioc->ctl_cmds.smid = smid; + data_out_sz = karg.data_out_size; + data_in_sz = karg.data_in_size; + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || + mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { + + device_handle = le16_to_cpu(mpi_request->FunctionDependent1); + if (!device_handle || (device_handle > + ioc->facts.MaxDevHandle)) { + ret = -EINVAL; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + /* obtain dma-able memory for data transfer */ + if (data_out_sz) /* WRITE */ { + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, + &data_out_dma, GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + if (copy_from_user(data_out, karg.data_out_buf_ptr, + data_out_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -EFAULT; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + if (data_in_sz) /* READ */ { + data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, + &data_in_dma, GFP_KERNEL); + if (!data_in) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + psge = (void *)request + (karg.data_sge_offset*4); + + /* send command to firmware */ + _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); + + init_completion(&ioc->ctl_cmds.done); + switch (mpi_request->Function) { + case MPI2_FUNCTION_NVME_ENCAPSULATED: + { + nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; + if (!ioc->pcie_sg_lookup) { + dtmprintk(ioc, ioc_info(ioc, + "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n" + )); + + if (ioc->logging_level & MPT_DEBUG_TM) + _debug_dump_mf(nvme_encap_request, + ioc->request_sz/4); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + /* + * Get the Physical Address of the sense buffer. + * Use Error Response buffer address field to hold the sense + * buffer address. + * Clear the internal sense buffer, which will potentially hold + * the Completion Queue Entry on return, or 0 if no Entry. + * Build the PRPs and set direction bits. + * Send the request. + */ + nvme_encap_request->ErrorResponseBaseAddress = + cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL); + nvme_encap_request->ErrorResponseBaseAddress |= + cpu_to_le64(le32_to_cpu( + mpt3sas_base_get_sense_buffer_dma(ioc, smid))); + nvme_encap_request->ErrorResponseAllocationLength = + cpu_to_le16(NVME_ERROR_RESPONSE_SIZE); + memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE); + ioc->build_nvme_prp(ioc, smid, nvme_encap_request, + data_out_dma, data_out_sz, data_in_dma, data_in_sz); + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + mpt3sas_base_put_smid_nvme_encap(ioc, smid); + break; + } + case MPI2_FUNCTION_SCSI_IO_REQUEST: + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsiio_request = + (Mpi2SCSIIORequest_t *)request; + scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + scsiio_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) + ioc->put_smid_scsi_io(ioc, smid, device_handle); + else + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)request; + + dtmprintk(ioc, + ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", + le16_to_cpu(tm_request->DevHandle), + tm_request->TaskType)); + ioc->got_task_abort_from_ioctl = 1; + if (tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (_ctl_set_task_mid(ioc, &karg, tm_request)) { + mpt3sas_base_free_smid(ioc, smid); + ioc->got_task_abort_from_ioctl = 0; + goto out; + } + } + ioc->got_task_abort_from_ioctl = 0; + + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_hi_priority(ioc, smid, 0); + break; + } + case MPI2_FUNCTION_SMP_PASSTHROUGH: + { + Mpi2SmpPassthroughRequest_t *smp_request = + (Mpi2SmpPassthroughRequest_t *)mpi_request; + u8 *data; + + if (!ioc->multipath_on_hba) { + /* ioc determines which port to use */ + smp_request->PhysicalPort = 0xFF; + } + if (smp_request->PassthroughFlags & + MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) + data = (u8 *)&smp_request->SGL; + else { + if (unlikely(data_out == NULL)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + data = data_out; + } + + if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SATA_PASSTHROUGH: + { + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_FW_DOWNLOAD: + { + if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { + ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n"); + ret = -EPERM; + break; + } + fallthrough; + } + case MPI2_FUNCTION_FW_UPLOAD: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_TOOLBOX: + { + Mpi2ToolboxCleanRequest_t *toolbox_request = + (Mpi2ToolboxCleanRequest_t *)mpi_request; + + if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) + || (toolbox_request->Tool == + MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN)) + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + else if (toolbox_request->Tool == + MPI2_TOOLBOX_MEMORY_MOVE_TOOL) { + Mpi2ToolboxMemMoveRequest_t *mem_move_request = + (Mpi2ToolboxMemMoveRequest_t *)request; + Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL; + + ioc->build_sg_mpi(ioc, psge, data_out_dma, + data_out_sz, data_in_dma, data_in_sz); + if (data_out_sz && !data_in_sz) { + dst = + (Mpi2SGESimple64_t *)&mem_move_request->SGL; + src = (void *)dst + ioc->sge_size; + + memcpy(&tmp, src, ioc->sge_size); + memcpy(src, dst, ioc->sge_size); + memcpy(dst, &tmp, ioc->sge_size); + } + if (ioc->logging_level & MPT_DEBUG_TM) { + ioc_info(ioc, + "Mpi2ToolboxMemMoveRequest_t request msg\n"); + _debug_dump_mf(mem_move_request, + ioc->request_sz/4); + } + } else + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + { + Mpi2SasIoUnitControlRequest_t *sasiounit_request = + (Mpi2SasIoUnitControlRequest_t *)mpi_request; + + if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET + || sasiounit_request->Operation == + MPI2_SAS_OP_PHY_LINK_RESET) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + /* drop to default case for posting the request */ + } + fallthrough; + default: + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + + if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) + timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; + else + timeout = karg.timeout; + wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)mpi_request; + mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && + ioc->ioc_link_reset_in_progress) { + ioc->ioc_link_reset_in_progress = 0; + ioc->ignore_loginfos = 0; + } + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); + goto issue_host_reset; + } + + mpi_reply = ioc->ctl_cmds.reply; + + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && + (ioc->logging_level & MPT_DEBUG_TM)) { + Mpi2SCSITaskManagementReply_t *tm_reply = + (Mpi2SCSITaskManagementReply_t *)mpi_reply; + + ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n", + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); + } + + /* copy out xdata to user */ + if (data_in_sz) { + if (copy_to_user(karg.data_in_buf_ptr, data_in, + data_in_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out reply message frame to user */ + if (karg.max_reply_bytes) { + sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out sense/NVMe Error Response to user */ + if (karg.max_sense_bytes && (mpi_request->Function == + MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == + MPI2_FUNCTION_NVME_ENCAPSULATED)) { + if (karg.sense_data_ptr == NULL) { + ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n"); + goto out; + } + sz_arg = (mpi_request->Function == + MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : + SCSI_SENSE_BUFFERSIZE; + sz = min_t(u32, karg.max_sense_bytes, sz_arg); + if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { + ioc_info(ioc, "issue target reset: handle = (0x%04x)\n", + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_halt_firmware(ioc); + pcie_device = mpt3sas_get_pdev_by_handle(ioc, + le16_to_cpu(mpi_request->FunctionDependent1)); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + pcie_device->device_info)))) + mpt3sas_scsih_issue_locked_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), + 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 0, pcie_device->reset_timeout, + MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); + else + mpt3sas_scsih_issue_locked_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), + 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + } else + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + } + + out: + if (pcie_device) + pcie_device_put(pcie_device); + + /* free memory associated with sg buffers */ + if (data_in) + dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, + data_in_dma); + + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, + data_out_dma); + + kfree(mpi_request); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return ret; +} + +/** + * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_iocinfo karg; + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + memset(&karg, 0 , sizeof(karg)); + if (ioc->pfacts) + karg.port_number = ioc->pfacts[0].PortNumber; + karg.hw_rev = ioc->pdev->revision; + karg.pci_id = ioc->pdev->device; + karg.subsystem_device = ioc->pdev->subsystem_device; + karg.subsystem_vendor = ioc->pdev->subsystem_vendor; + karg.pci_information.u.bits.bus = ioc->pdev->bus->number; + karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); + karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); + karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); + karg.firmware_version = ioc->facts.FWVersion.Word; + strcpy(karg.driver_version, ioc->driver_name); + strcat(karg.driver_version, "-"); + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + if (ioc->is_warpdrive) + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; + else + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; + strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); + break; + case MPI25_VERSION: + case MPI26_VERSION: + if (ioc->is_gen35_ioc) + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35; + else + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; + strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); + break; + } + karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventquery karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; + memcpy(karg.event_types, ioc->event_type, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventenable karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + memcpy(ioc->event_type, karg.event_types, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + mpt3sas_base_validate_event_type(ioc, ioc->event_type); + + if (ioc->event_log) + return 0; + /* initialize event_log */ + ioc->event_context = 0; + ioc->aen_event_read_flag = 0; + ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, + sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); + if (!ioc->event_log) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + return 0; +} + +/** + * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventreport karg; + u32 number_bytes, max_events, max; + struct mpt3_ioctl_eventreport __user *uarg = arg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + number_bytes = karg.hdr.max_data_size - + sizeof(struct mpt3_ioctl_header); + max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); + max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); + + /* If fewer than 1 event is requested, there must have + * been some type of error. + */ + if (!max || !ioc->event_log) + return -ENODATA; + + number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); + if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + /* reset flag so SIGIO can restart */ + ioc->aen_event_read_flag = 0; + return 0; +} + +/** + * _ctl_do_reset - main handler for MPT3HARDRESET opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_diag_reset karg; + int retval; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) + return -EAGAIN; + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + ioc->reset_from_user = 1; + retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc_info(ioc, + "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); + return 0; +} + +/** + * _ctl_btdh_search_sas_device - searching for sas device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _sas_device *sas_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->sas_device_list)) + return rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == sas_device->handle) { + btdh->bus = sas_device->channel; + btdh->id = sas_device->id; + rc = 1; + goto out; + } else if (btdh->bus == sas_device->channel && btdh->id == + sas_device->id && btdh->handle == 0xFFFF) { + btdh->handle = sas_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_search_pcie_device - searching for pcie device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->pcie_device_list)) + return rc; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == pcie_device->handle) { + btdh->bus = pcie_device->channel; + btdh->id = pcie_device->id; + rc = 1; + goto out; + } else if (btdh->bus == pcie_device->channel && btdh->id == + pcie_device->id && btdh->handle == 0xFFFF) { + btdh->handle = pcie_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_search_raid_device - searching for raid device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _raid_device *raid_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->raid_device_list)) + return rc; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == raid_device->handle) { + btdh->bus = raid_device->channel; + btdh->id = raid_device->id; + rc = 1; + goto out; + } else if (btdh->bus == raid_device->channel && btdh->id == + raid_device->id && btdh->handle == 0xFFFF) { + btdh->handle = raid_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_btdh_mapping karg; + int rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + rc = _ctl_btdh_search_sas_device(ioc, &karg); + if (!rc) + rc = _ctl_btdh_search_pcie_device(ioc, &karg); + if (!rc) + _ctl_btdh_search_raid_device(ioc, &karg); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_diag_capability - return diag buffer capability + * @ioc: per adapter object + * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED + * + * returns 1 when diag buffer support is enabled in firmware + */ +static u8 +_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) +{ + u8 rc = 0; + + switch (buffer_type) { + case MPI2_DIAG_BUF_TYPE_TRACE: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_SNAPSHOT: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_EXTENDED: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) + rc = 1; + } + + return rc; +} + +/** + * _ctl_diag_get_bufftype - return diag buffer type + * either TRACE, SNAPSHOT, or EXTENDED + * @ioc: per adapter object + * @unique_id: specifies the unique_id for the buffer + * + * returns MPT3_DIAG_UID_NOT_FOUND if the id not found + */ +static u8 +_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id) +{ + u8 index; + + for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { + if (ioc->unique_id[index] == unique_id) + return index; + } + + return MPT3_DIAG_UID_NOT_FOUND; +} + +/** + * _ctl_diag_register_2 - wrapper for registering diag buffer support + * @ioc: per adapter object + * @diag_register: the diag_register struct passed in from user space + * + */ +static long +_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_diag_register *diag_register) +{ + int rc, i; + void *request_data = NULL; + dma_addr_t request_data_dma; + u32 request_data_sz = 0; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + u8 buffer_type; + u16 smid; + u16 ioc_status; + u32 ioc_state; + u8 issue_reset = 0; + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + ioc_err(ioc, "%s: failed due to ioc not operational\n", + __func__); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + buffer_type = diag_register->buffer_type; + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if (diag_register->unique_id == 0) { + ioc_err(ioc, + "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__, + diag_register->unique_id, buffer_type); + return -EINVAL; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_APP_OWNED) && + !(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + ioc_err(ioc, + "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n", + __func__, buffer_type, ioc->unique_id[buffer_type]); + return -EINVAL; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) { + /* + * If driver posts buffer initially, then an application wants + * to Register that buffer (own it) without Releasing first, + * the application Register command MUST have the same buffer + * type and size in the Register command (obtained from the + * Query command). Otherwise that Register command will be + * failed. If the application has released the buffer but wants + * to re-register it, it should be allowed as long as the + * Unique-Id/Size match. + */ + + if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID && + ioc->diag_buffer_sz[buffer_type] == + diag_register->requested_buffer_size) { + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + dctlprintk(ioc, ioc_info(ioc, + "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n", + __func__, buffer_type, + ioc->unique_id[buffer_type], + diag_register->unique_id)); + + /* + * Application wants to own the buffer with + * the same size. + */ + ioc->unique_id[buffer_type] = + diag_register->unique_id; + rc = 0; /* success */ + goto out; + } + } else if (ioc->unique_id[buffer_type] != + MPT3DIAGBUFFUNIQUEID) { + if (ioc->unique_id[buffer_type] != + diag_register->unique_id || + ioc->diag_buffer_sz[buffer_type] != + diag_register->requested_buffer_size || + !(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + ioc_err(ioc, + "%s: already has a registered buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EINVAL; + } + } else { + ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EINVAL; + } + } else if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { + + if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID || + ioc->diag_buffer_sz[buffer_type] != + diag_register->requested_buffer_size) { + + ioc_err(ioc, + "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n", + __func__, buffer_type, + ioc->diag_buffer_sz[buffer_type]); + return -EINVAL; + } + } + + if (diag_register->requested_buffer_size % 4) { + ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n", + __func__); + return -EINVAL; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + ioc->ctl_cmds.smid = smid; + + request_data = ioc->diag_buffer[buffer_type]; + request_data_sz = diag_register->requested_buffer_size; + ioc->unique_id[buffer_type] = diag_register->unique_id; + /* Reset ioc variables used for additional query commands */ + ioc->reset_from_user = 0; + memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query)); + ioc->diag_buffer_status[buffer_type] &= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + memcpy(ioc->product_specific[buffer_type], + diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); + ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; + + if (request_data) { + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { + dma_free_coherent(&ioc->pdev->dev, + ioc->diag_buffer_sz[buffer_type], + request_data, request_data_dma); + request_data = NULL; + } + } + + if (request_data == NULL) { + ioc->diag_buffer_sz[buffer_type] = 0; + ioc->diag_buffer_dma[buffer_type] = 0; + request_data = dma_alloc_coherent(&ioc->pdev->dev, + request_data_sz, &request_data_dma, GFP_KERNEL); + if (request_data == NULL) { + ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n", + __func__, request_data_sz); + mpt3sas_base_free_smid(ioc, smid); + rc = -ENOMEM; + goto out; + } + ioc->diag_buffer[buffer_type] = request_data; + ioc->diag_buffer_sz[buffer_type] = request_data_sz; + ioc->diag_buffer_dma[buffer_type] = request_data_dma; + } + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = diag_register->buffer_type; + mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); + mpi_request->BufferAddress = cpu_to_le64(request_data_dma); + mpi_request->BufferLength = cpu_to_le32(request_data_sz); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + dctlprintk(ioc, + ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", + __func__, request_data, + (unsigned long long)request_data_dma, + le32_to_cpu(mpi_request->BufferLength))); + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + + init_completion(&ioc->ctl_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + ioc_err(ioc, "%s: no reply message\n", __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + } else { + ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + out: + + if (rc && request_data) { + dma_free_coherent(&ioc->pdev->dev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time + * @ioc: per adapter object + * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 + * + * This is called when command line option diag_buffer_enable is enabled + * at driver load time. + */ +void +mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) +{ + struct mpt3_diag_register diag_register; + u32 ret_val; + u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10; + u32 min_trace_buff_size = 0; + u32 decr_trace_buff_size = 0; + + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + + if (bits_to_register & 1) { + ioc_info(ioc, "registering trace buffer support\n"); + ioc->diag_trigger_master.MasterData = + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? + (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); + + if (trace_buff_size != 0) { + diag_register.requested_buffer_size = trace_buff_size; + min_trace_buff_size = + ioc->manu_pg11.HostTraceBufferMinSizeKB<<10; + decr_trace_buff_size = + ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10; + + if (min_trace_buff_size > trace_buff_size) { + /* The buff size is not set correctly */ + ioc_err(ioc, + "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n", + min_trace_buff_size>>10, + trace_buff_size>>10); + ioc_err(ioc, + "Using zero Min Trace Buff Size\n"); + min_trace_buff_size = 0; + } + + if (decr_trace_buff_size == 0) { + /* + * retry the min size if decrement + * is not available. + */ + decr_trace_buff_size = + trace_buff_size - min_trace_buff_size; + } + } else { + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + } + + do { + ret_val = _ctl_diag_register_2(ioc, &diag_register); + + if (ret_val == -ENOMEM && min_trace_buff_size && + (trace_buff_size - decr_trace_buff_size) >= + min_trace_buff_size) { + /* adjust the buffer size */ + trace_buff_size -= decr_trace_buff_size; + diag_register.requested_buffer_size = + trace_buff_size; + } else + break; + } while (true); + + if (ret_val == -ENOMEM) + ioc_err(ioc, + "Cannot allocate trace buffer memory. Last memory tried = %d KB\n", + diag_register.requested_buffer_size>>10); + else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] + & MPT3_DIAG_BUFFER_IS_REGISTERED) { + ioc_info(ioc, "Trace buffer memory %d KB allocated\n", + diag_register.requested_buffer_size>>10); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[ + MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } + } + + if (bits_to_register & 2) { + ioc_info(ioc, "registering snapshot buffer support\n"); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } + + if (bits_to_register & 4) { + ioc_info(ioc, "registering extended buffer support\n"); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } +} + +/** + * _ctl_diag_register - application register with driver + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +static long +_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_register karg; + long rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + rc = _ctl_diag_register_2(ioc, &karg); + + if (!rc && (ioc->diag_buffer_status[karg.buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + ioc->diag_buffer_status[karg.buffer_type] |= + MPT3_DIAG_BUFFER_IS_APP_OWNED; + + return rc; +} + +/** + * _ctl_diag_unregister - application unregister with driver + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +static long +_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_unregister karg; + void *request_data; + dma_addr_t request_data_dma; + u32 request_data_sz; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + return -EINVAL; + } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n", + __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { + ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_APP_OWNED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_REGISTERED; + } else { + request_data_sz = ioc->diag_buffer_sz[buffer_type]; + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + dma_free_coherent(&ioc->pdev->dev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] = 0; + } + return 0; +} + +/** + * _ctl_diag_query - query relevant info associated with diag buffers + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +static long +_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_query karg; + void *request_data; + int i; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + karg.application_flags = 0; + buffer_type = karg.buffer_type; + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) { + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + return -EINVAL; + } + } + + if (karg.unique_id) { + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID; + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS; + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) + karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC; + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_APP_OWNED)) + karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED; + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + karg.product_specific[i] = + ioc->product_specific[buffer_type][i]; + + karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; + karg.driver_added_buffer_size = 0; + karg.unique_id = ioc->unique_id[buffer_type]; + karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; + + if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { + ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n", + __func__, arg); + return -EFAULT; + } + return 0; +} + +/** + * mpt3sas_send_diag_release - Diag Release Message + * @ioc: per adapter object + * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED + * @issue_reset: specifies whether host reset is required. + * + */ +int +mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset) +{ + Mpi2DiagReleaseRequest_t *mpi_request; + Mpi2DiagReleaseReply_t *mpi_reply; + u16 smid; + u16 ioc_status; + u32 ioc_state; + int rc; + u8 reset_needed = 0; + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + rc = 0; + *issue_reset = 0; + + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, + ioc_info(ioc, "%s: skipping due to FAULT state\n", + __func__)); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; + mpi_request->BufferType = buffer_type; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); + *issue_reset = reset_needed; + rc = -EFAULT; + goto out; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + ioc_err(ioc, "%s: no reply message\n", __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + } else { + ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + out: + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * _ctl_diag_release - request to send Diag Release Message to firmware + * @ioc: ? + * @arg: user space buffer containing ioctl content + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwriting information in the buffer. + */ +static long +_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_release karg; + void *request_data; + int rc; + u8 buffer_type; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n", + __func__, buffer_type); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + + if (!request_data) { + ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + /* buffers were released by due to host reset */ + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; + ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n", + __func__, buffer_type); + return 0; + } + + rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + return rc; +} + +/** + * _ctl_diag_read_buffer - request for copy of the diag buffer + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_read_buffer karg; + struct mpt3_diag_read_buffer __user *uarg = arg; + void *request_data, *diag_data; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + int rc, i; + u8 buffer_type; + unsigned long request_size, copy_size; + u16 smid; + u16 ioc_status; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + request_size = ioc->diag_buffer_sz[buffer_type]; + + if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { + ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n", + __func__); + return -EINVAL; + } + + if (karg.starting_offset > request_size) + return -EINVAL; + + diag_data = (void *)(request_data + karg.starting_offset); + dctlprintk(ioc, + ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n", + __func__, diag_data, karg.starting_offset, + karg.bytes_to_read)); + + /* Truncate data on requests that are too large */ + if ((diag_data + karg.bytes_to_read < diag_data) || + (diag_data + karg.bytes_to_read > request_data + request_size)) + copy_size = request_size - karg.starting_offset; + else + copy_size = karg.bytes_to_read; + + if (copy_to_user((void __user *)uarg->diagnostic_data, + diag_data, copy_size)) { + ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", + __func__, diag_data); + return -EFAULT; + } + + if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) + return 0; + + dctlprintk(ioc, + ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n", + __func__, buffer_type)); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + dctlprintk(ioc, + ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n", + __func__, buffer_type)); + return 0; + } + /* Get a free request frame and save the message context. + */ + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = buffer_type; + mpi_request->BufferLength = + cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); + mpi_request->BufferAddress = + cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + ioc_err(ioc, "%s: no reply message\n", __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + } else { + ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, ioc_status, + le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + out: + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * _ctl_addnl_diag_query - query relevant info associated with diag buffers + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * The application will send only unique_id. Driver will + * inspect unique_id first, if valid, fill the details related to cause + * for diag buffer release. + */ +static long +_ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_addnl_diag_query karg; + u32 buffer_type = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + if (karg.unique_id == 0) { + ioc_err(ioc, "%s: unique_id is(0x%08x)\n", + __func__, karg.unique_id); + return -EPERM; + } + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EPERM; + } + memset(&karg.rel_query, 0, sizeof(karg.rel_query)); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + goto out; + } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n", + __func__, buffer_type); + return -EPERM; + } + memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query)); +out: + if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) { + ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n", + __func__, arg); + return -EFAULT; + } + return 0; +} + +#ifdef CONFIG_COMPAT +/** + * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. + * @ioc: per adapter object + * @cmd: ioctl opcode + * @arg: (struct mpt3_ioctl_command32) + * + * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. + */ +static long +_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, + void __user *arg) +{ + struct mpt3_ioctl_command32 karg32; + struct mpt3_ioctl_command32 __user *uarg; + struct mpt3_ioctl_command karg; + + if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) + return -EINVAL; + + uarg = (struct mpt3_ioctl_command32 __user *) arg; + + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; + karg.hdr.port_number = karg32.hdr.port_number; + karg.hdr.max_data_size = karg32.hdr.max_data_size; + karg.timeout = karg32.timeout; + karg.max_reply_bytes = karg32.max_reply_bytes; + karg.data_in_size = karg32.data_in_size; + karg.data_out_size = karg32.data_out_size; + karg.max_sense_bytes = karg32.max_sense_bytes; + karg.data_sge_offset = karg32.data_sge_offset; + karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); + karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); + karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); + karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); + return _ctl_do_mpt_command(ioc, karg, &uarg->mf); +} +#endif + +/** + * _ctl_ioctl_main - main ioctl entry point + * @file: (struct file) + * @cmd: ioctl opcode + * @arg: user space data buffer + * @compat: handles 32 bit applications in 64bit os + * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & + * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. + */ +static long +_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, + u8 compat, u16 mpi_version) +{ + struct MPT3SAS_ADAPTER *ioc; + struct mpt3_ioctl_header ioctl_header; + enum block_state state; + long ret = -ENOIOCTLCMD; + + /* get IOCTL header */ + if (copy_from_user(&ioctl_header, (char __user *)arg, + sizeof(struct mpt3_ioctl_header))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (_ctl_verify_adapter(ioctl_header.ioc_number, + &ioc, mpi_version) == -1 || !ioc) + return -ENODEV; + + /* pci_access_mutex lock acquired by ioctl path */ + mutex_lock(&ioc->pci_access_mutex); + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading || ioc->remove_host) { + ret = -EAGAIN; + goto out_unlock_pciaccess; + } + + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; + if (state == NON_BLOCKING) { + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { + ret = -EAGAIN; + goto out_unlock_pciaccess; + } + } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { + ret = -ERESTARTSYS; + goto out_unlock_pciaccess; + } + + + switch (cmd) { + case MPT3IOCINFO: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) + ret = _ctl_getiocinfo(ioc, arg); + break; +#ifdef CONFIG_COMPAT + case MPT3COMMAND32: +#endif + case MPT3COMMAND: + { + struct mpt3_ioctl_command __user *uarg; + struct mpt3_ioctl_command karg; + +#ifdef CONFIG_COMPAT + if (compat) { + ret = _ctl_compat_mpt_command(ioc, cmd, arg); + break; + } +#endif + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -EFAULT; + break; + } + + if (karg.hdr.ioc_number != ioctl_header.ioc_number) { + ret = -EINVAL; + break; + } + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { + uarg = arg; + ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); + } + break; + } + case MPT3EVENTQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) + ret = _ctl_eventquery(ioc, arg); + break; + case MPT3EVENTENABLE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) + ret = _ctl_eventenable(ioc, arg); + break; + case MPT3EVENTREPORT: + ret = _ctl_eventreport(ioc, arg); + break; + case MPT3HARDRESET: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) + ret = _ctl_do_reset(ioc, arg); + break; + case MPT3BTDHMAPPING: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) + ret = _ctl_btdh_mapping(ioc, arg); + break; + case MPT3DIAGREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) + ret = _ctl_diag_register(ioc, arg); + break; + case MPT3DIAGUNREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) + ret = _ctl_diag_unregister(ioc, arg); + break; + case MPT3DIAGQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) + ret = _ctl_diag_query(ioc, arg); + break; + case MPT3DIAGRELEASE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) + ret = _ctl_diag_release(ioc, arg); + break; + case MPT3DIAGREADBUFFER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) + ret = _ctl_diag_read_buffer(ioc, arg); + break; + case MPT3ADDNLDIAGQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query)) + ret = _ctl_addnl_diag_query(ioc, arg); + break; + default: + dctlprintk(ioc, + ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n", + cmd)); + break; + } + + mutex_unlock(&ioc->ctl_cmds.mutex); +out_unlock_pciaccess: + mutex_unlock(&ioc->pci_access_mutex); + return ret; +} + +/** + * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) + * @file: (struct file) + * @cmd: ioctl opcode + * @arg: ? + */ +static long +_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + /* pass MPI25_VERSION | MPI26_VERSION value, + * to indicate that this ioctl cmd + * came from mpt3ctl ioctl device. + */ + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, + MPI25_VERSION | MPI26_VERSION); + return ret; +} + +/** + * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) + * @file: (struct file) + * @cmd: ioctl opcode + * @arg: ? + */ +static long +_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + /* pass MPI2_VERSION value, to indicate that this ioctl cmd + * came from mpt2ctl ioctl device. + */ + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); + return ret; +} +#ifdef CONFIG_COMPAT +/** + * _ctl_ioctl_compat - main ioctl entry point (compat) + * @file: ? + * @cmd: ? + * @arg: ? + * + * This routine handles 32 bit applications in 64bit os. + */ +static long +_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, + MPI25_VERSION | MPI26_VERSION); + return ret; +} + +/** + * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat) + * @file: ? + * @cmd: ? + * @arg: ? + * + * This routine handles 32 bit applications in 64bit os. + */ +static long +_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); + return ret; +} +#endif + +/* scsi host attributes */ +/** + * version_fw_show - firmware version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_fw_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF); +} +static DEVICE_ATTR_RO(version_fw); + +/** + * version_bios_show - bios version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_bios_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (version & 0xFF000000) >> 24, + (version & 0x00FF0000) >> 16, + (version & 0x0000FF00) >> 8, + version & 0x000000FF); +} +static DEVICE_ATTR_RO(version_bios); + +/** + * version_mpi_show - MPI (message passing interface) version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_mpi_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", + ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); +} +static DEVICE_ATTR_RO(version_mpi); + +/** + * version_product_show - product name + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_product_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); +} +static DEVICE_ATTR_RO(version_product); + +/** + * version_nvdata_persistent_show - ndvata persistent version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_nvdata_persistent_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_persistent); + +/** + * version_nvdata_default_show - nvdata default version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_nvdata_default_show(struct device *cdev, struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_default); + +/** + * board_name_show - board name + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +board_name_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); +} +static DEVICE_ATTR_RO(board_name); + +/** + * board_assembly_show - board assembly name + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +board_assembly_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); +} +static DEVICE_ATTR_RO(board_assembly); + +/** + * board_tracer_show - board tracer number + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +board_tracer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); +} +static DEVICE_ATTR_RO(board_tracer); + +/** + * io_delay_show - io missing delay + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +io_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); +} +static DEVICE_ATTR_RO(io_delay); + +/** + * device_delay_show - device missing delay + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +device_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); +} +static DEVICE_ATTR_RO(device_delay); + +/** + * fw_queue_depth_show - global credits + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); +} +static DEVICE_ATTR_RO(fw_queue_depth); + +/** + * host_sas_address_show - sas address + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is the controller sas address + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +host_sas_address_show(struct device *cdev, struct device_attribute *attr, + char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->sas_hba.sas_address); +} +static DEVICE_ATTR_RO(host_sas_address); + +/** + * logging_level_show - logging level + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +logging_level_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); +} +static ssize_t +logging_level_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%x", &val) != 1) + return -EINVAL; + + ioc->logging_level = val; + ioc_info(ioc, "logging_level=%08xh\n", + ioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR_RW(logging_level); + +/** + * fwfault_debug_show - show/store fwfault_debug + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * mpt3sas_fwfault_debug is command line option + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +fwfault_debug_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} +static ssize_t +fwfault_debug_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->fwfault_debug = val; + ioc_info(ioc, "fwfault_debug=%d\n", + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR_RW(fwfault_debug); + +/** + * ioc_reset_count_show - ioc reset count + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); +} +static DEVICE_ATTR_RO(ioc_reset_count); + +/** + * reply_queue_count_show - number of reply queues + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR_RO(reply_queue_count); + +/** + * BRM_status_show - Backup Rail Monitor Status + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +BRM_status_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + Mpi2IOUnitPage3_t io_unit_pg3; + Mpi2ConfigReply_t mpi_reply; + u16 backup_rail_monitor_status = 0; + u16 ioc_status; + int sz; + ssize_t rc = 0; + + if (!ioc->is_warpdrive) { + ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", + __func__); + return 0; + } + /* pci_access_mutex lock acquired by sysfs show path */ + mutex_lock(&ioc->pci_access_mutex); + if (ioc->pci_error_recovery || ioc->remove_host) + goto out; + + sz = sizeof(io_unit_pg3); + memset(&io_unit_pg3, 0, sz); + + if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) != + 0) { + ioc_err(ioc, "%s: failed reading iounit_pg3\n", + __func__); + rc = -EINVAL; + goto out; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", + __func__, ioc_status); + rc = -EINVAL; + goto out; + } + + if (io_unit_pg3.GPIOCount < 25) { + ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n", + __func__, io_unit_pg3.GPIOCount); + rc = -EINVAL; + goto out; + } + + /* BRM status is in bit zero of GPIOVal[24] */ + backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]); + rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); + + out: + mutex_unlock(&ioc->pci_access_mutex); + return rc; +} +static DEVICE_ATTR_RO(BRM_status); + +struct DIAG_BUFFER_START { + __le32 Size; + __le32 DiagVersion; + u8 BufferType; + u8 Reserved[3]; + __le32 Reserved1; + __le32 Reserved2; + __le32 Reserved3; +}; + +/** + * host_trace_buffer_size_show - host buffer size (trace only) + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +host_trace_buffer_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + u32 size = 0; + struct DIAG_BUFFER_START *request_data; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + request_data = (struct DIAG_BUFFER_START *) + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; + if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01010000) && + le32_to_cpu(request_data->Reserved3) == 0x4742444c) + size = le32_to_cpu(request_data->Size); + + ioc->ring_buffer_sz = size; + return snprintf(buf, PAGE_SIZE, "%d\n", size); +} +static DEVICE_ATTR_RO(host_trace_buffer_size); + +/** + * host_trace_buffer_show - firmware ring buffer (trace only) + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * You will only be able to read 4k bytes of ring buffer at a time. + * In order to read beyond 4k bytes, you will have to write out the + * offset to the same attribute, it will move the pointer. + */ +static ssize_t +host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + void *request_data; + u32 size; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) + return 0; + + size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; + request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; + memcpy(buf, request_data, size); + return size; +} + +static ssize_t +host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->ring_buffer_offset = val; + return strlen(buf); +} +static DEVICE_ATTR_RW(host_trace_buffer); + + +/*****************************************/ + +/** + * host_trace_buffer_enable_show - firmware ring buffer (trace only) + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * This is a mechnism to post/release host_trace_buffers + */ +static ssize_t +host_trace_buffer_enable_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) + return snprintf(buf, PAGE_SIZE, "off\n"); + else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + return snprintf(buf, PAGE_SIZE, "release\n"); + else + return snprintf(buf, PAGE_SIZE, "post\n"); +} + +static ssize_t +host_trace_buffer_enable_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + char str[10] = ""; + struct mpt3_diag_register diag_register; + u8 issue_reset = 0; + + /* don't allow post/release occurr while recovery is active */ + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery || ioc->is_driver_loading) + return -EBUSY; + + if (sscanf(buf, "%9s", str) != 1) + return -EINVAL; + + if (!strcmp(str, "post")) { + /* exit out if host buffers are already posted */ + if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) + goto out; + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + ioc_info(ioc, "posting host trace buffers\n"); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + + if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 && + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) { + /* post the same buffer allocated previously */ + diag_register.requested_buffer_size = + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE]; + } else { + /* + * Free the diag buffer memory which was previously + * allocated by an application. + */ + if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) + && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_APP_OWNED)) { + dma_free_coherent(&ioc->pdev->dev, + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]); + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] = + NULL; + } + + diag_register.requested_buffer_size = (1024 * 1024); + } + + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? + (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); + ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; + _ctl_diag_register_2(ioc, &diag_register); + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) { + ioc_info(ioc, + "Trace buffer %d KB allocated through sysfs\n", + diag_register.requested_buffer_size>>10); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[ + MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } + } else if (!strcmp(str, "release")) { + /* exit out if host buffers are already released */ + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + goto out; + ioc_info(ioc, "releasing host trace buffer\n"); + ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS; + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + out: + return strlen(buf); +} +static DEVICE_ATTR_RW(host_trace_buffer_enable); + +/*********** diagnostic trigger suppport *********************************/ + +/** + * diag_trigger_master_show - show the diag_trigger_master attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_master_show(struct device *cdev, + struct device_attribute *attr, char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); + memcpy(buf, &ioc->diag_trigger_master, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_master_store - store the diag_trigger_master attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_master_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_MASTER_TRIGGER_T *master_tg; + unsigned long flags; + ssize_t rc; + bool set = 1; + + rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); + + if (ioc->supports_trigger_pages) { + master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T), + GFP_KERNEL); + if (!master_tg) + return -ENOMEM; + + memcpy(master_tg, buf, rc); + if (!master_tg->MasterData) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg, + set)) { + kfree(master_tg); + return -EFAULT; + } + kfree(master_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + memset(&ioc->diag_trigger_master, 0, + sizeof(struct SL_WH_MASTER_TRIGGER_T)); + memcpy(&ioc->diag_trigger_master, buf, rc); + ioc->diag_trigger_master.MasterData |= + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} +static DEVICE_ATTR_RW(diag_trigger_master); + + +/** + * diag_trigger_event_show - show the diag_trigger_event attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_event_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_event, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_event_store - store the diag_trigger_event attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_event_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_EVENT_TRIGGERS_T *event_tg; + unsigned long flags; + ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T), + GFP_KERNEL); + if (!event_tg) + return -ENOMEM; + + memcpy(event_tg, buf, sz); + if (!event_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg, + set)) { + kfree(event_tg); + return -EFAULT; + } + kfree(event_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + memset(&ioc->diag_trigger_event, 0, + sizeof(struct SL_WH_EVENT_TRIGGERS_T)); + memcpy(&ioc->diag_trigger_event, buf, sz); + if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR_RW(diag_trigger_event); + + +/** + * diag_trigger_scsi_show - show the diag_trigger_scsi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_scsi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_scsi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_scsi_store - store the diag_trigger_scsi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_scsi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg; + unsigned long flags; + ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T), + GFP_KERNEL); + if (!scsi_tg) + return -ENOMEM; + + memcpy(scsi_tg, buf, sz); + if (!scsi_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg, + set)) { + kfree(scsi_tg); + return -EFAULT; + } + kfree(scsi_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi)); + memcpy(&ioc->diag_trigger_scsi, buf, sz); + if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR_RW(diag_trigger_scsi); + + +/** + * diag_trigger_mpi_show - show the diag_trigger_mpi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_mpi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_mpi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_mpi_store - store the diag_trigger_mpi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_mpi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_MPI_TRIGGERS_T *mpi_tg; + unsigned long flags; + ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T), + GFP_KERNEL); + if (!mpi_tg) + return -ENOMEM; + + memcpy(mpi_tg, buf, sz); + if (!mpi_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg, + set)) { + kfree(mpi_tg); + return -EFAULT; + } + kfree(mpi_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + memset(&ioc->diag_trigger_mpi, 0, + sizeof(ioc->diag_trigger_mpi)); + memcpy(&ioc->diag_trigger_mpi, buf, sz); + if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} + +static DEVICE_ATTR_RW(diag_trigger_mpi); + +/*********** diagnostic trigger suppport *** END ****************************/ + +/*****************************************/ + +/** + * drv_support_bitmap_show - driver supported feature bitmap + * @cdev: pointer to embedded class device + * @attr: unused + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +drv_support_bitmap_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); +} +static DEVICE_ATTR_RO(drv_support_bitmap); + +/** + * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled + * @cdev: pointer to embedded class device + * @attr: unused + * @buf: the buffer returned + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + */ +static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); +} + +/** + * enable_sdev_max_qd_store - Enable/disable sdev max qd + * @cdev: pointer to embedded class device + * @attr: unused + * @buf: the buffer returned + * @count: unused + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + * If this attribute is disabled then targets will have corresponding default + * queue depth. + */ +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + int val = 0; + struct scsi_device *sdev; + struct _raid_device *raid_device; + int qdepth; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + switch (val) { + case 0: + ioc->enable_sdev_max_qd = 0; + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + continue; + + if (sas_target_priv_data->flags & + MPT_TARGET_FLAGS_VOLUME) { + raid_device = + mpt3sas_raid_device_find_by_handle(ioc, + sas_target_priv_data->handle); + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + MPT3SAS_SAS_QUEUE_DEPTH; + else + qdepth = + MPT3SAS_SATA_QUEUE_DEPTH; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + case MPI2_RAID_VOL_TYPE_RAID1: + case MPI2_RAID_VOL_TYPE_RAID10: + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + } + } else if (sas_target_priv_data->flags & + MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = ioc->max_nvme_qd; + else + qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + break; + case 1: + ioc->enable_sdev_max_qd = 1; + shost_for_each_device(sdev, ioc->shost) + mpt3sas_scsih_change_queue_depth(sdev, + shost->can_queue); + break; + default: + return -EINVAL; + } + + return strlen(buf); +} +static DEVICE_ATTR_RW(enable_sdev_max_qd); + +static struct attribute *mpt3sas_host_attrs[] = { + &dev_attr_version_fw.attr, + &dev_attr_version_bios.attr, + &dev_attr_version_mpi.attr, + &dev_attr_version_product.attr, + &dev_attr_version_nvdata_persistent.attr, + &dev_attr_version_nvdata_default.attr, + &dev_attr_board_name.attr, + &dev_attr_board_assembly.attr, + &dev_attr_board_tracer.attr, + &dev_attr_io_delay.attr, + &dev_attr_device_delay.attr, + &dev_attr_logging_level.attr, + &dev_attr_fwfault_debug.attr, + &dev_attr_fw_queue_depth.attr, + &dev_attr_host_sas_address.attr, + &dev_attr_ioc_reset_count.attr, + &dev_attr_host_trace_buffer_size.attr, + &dev_attr_host_trace_buffer.attr, + &dev_attr_host_trace_buffer_enable.attr, + &dev_attr_reply_queue_count.attr, + &dev_attr_diag_trigger_master.attr, + &dev_attr_diag_trigger_event.attr, + &dev_attr_diag_trigger_scsi.attr, + &dev_attr_diag_trigger_mpi.attr, + &dev_attr_drv_support_bitmap.attr, + &dev_attr_BRM_status.attr, + &dev_attr_enable_sdev_max_qd.attr, + NULL, +}; + +static const struct attribute_group mpt3sas_host_attr_group = { + .attrs = mpt3sas_host_attrs +}; + +const struct attribute_group *mpt3sas_host_groups[] = { + &mpt3sas_host_attr_group, + NULL +}; + +/* device attributes */ + +/** + * sas_address_show - sas address + * @dev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is the sas address for the target + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +sas_address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)sas_device_priv_data->sas_target->sas_address); +} +static DEVICE_ATTR_RO(sas_address); + +/** + * sas_device_handle_show - device handle + * @dev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is the firmware assigned device handle + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +sas_device_handle_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%04x\n", + sas_device_priv_data->sas_target->handle); +} +static DEVICE_ATTR_RO(sas_device_handle); + +/** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** + * sas_ncq_prio_enable_show - send prioritized io commands to device + * @dev: pointer to embedded device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' sdev attribute, only works with SATA + */ +static ssize_t +sas_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", + sas_device_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + bool ncq_prio_enable = 0; + + if (kstrtobool(buf, &ncq_prio_enable)) + return -EINVAL; + + if (!scsih_ncq_prio_supp(sdev)) + return -EINVAL; + + sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + +static struct attribute *mpt3sas_dev_attrs[] = { + &dev_attr_sas_address.attr, + &dev_attr_sas_device_handle.attr, + &dev_attr_sas_ncq_prio_supported.attr, + &dev_attr_sas_ncq_prio_enable.attr, + NULL, +}; + +static const struct attribute_group mpt3sas_dev_attr_group = { + .attrs = mpt3sas_dev_attrs +}; + +const struct attribute_group *mpt3sas_dev_groups[] = { + &mpt3sas_dev_attr_group, + NULL +}; + +/* file operations table for mpt3ctl device */ +static const struct file_operations ctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = _ctl_ioctl, + .poll = _ctl_poll, + .fasync = _ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = _ctl_ioctl_compat, +#endif +}; + +/* file operations table for mpt2ctl device */ +static const struct file_operations ctl_gen2_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = _ctl_mpt2_ioctl, + .poll = _ctl_poll, + .fasync = _ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = _ctl_mpt2_ioctl_compat, +#endif +}; + +static struct miscdevice ctl_dev = { + .minor = MPT3SAS_MINOR, + .name = MPT3SAS_DEV_NAME, + .fops = &ctl_fops, +}; + +static struct miscdevice gen2_ctl_dev = { + .minor = MPT2SAS_MINOR, + .name = MPT2SAS_DEV_NAME, + .fops = &ctl_gen2_fops, +}; + +/** + * mpt3sas_ctl_init - main entry point for ctl. + * @hbas_to_enumerate: ? + */ +void +mpt3sas_ctl_init(ushort hbas_to_enumerate) +{ + async_queue = NULL; + + /* Don't register mpt3ctl ioctl device if + * hbas_to_enumarate is one. + */ + if (hbas_to_enumerate != 1) + if (misc_register(&ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); + + /* Don't register mpt3ctl ioctl device if + * hbas_to_enumarate is two. + */ + if (hbas_to_enumerate != 2) + if (misc_register(&gen2_ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); + + init_waitqueue_head(&ctl_poll_wait); +} + +/** + * mpt3sas_ctl_exit - exit point for ctl + * @hbas_to_enumerate: ? + */ +void +mpt3sas_ctl_exit(ushort hbas_to_enumerate) +{ + struct MPT3SAS_ADAPTER *ioc; + int i; + + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + + /* free memory associated to diag buffers */ + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!ioc->diag_buffer[i]) + continue; + dma_free_coherent(&ioc->pdev->dev, + ioc->diag_buffer_sz[i], + ioc->diag_buffer[i], + ioc->diag_buffer_dma[i]); + ioc->diag_buffer[i] = NULL; + ioc->diag_buffer_status[i] = 0; + } + + kfree(ioc->event_log); + } + if (hbas_to_enumerate != 1) + misc_deregister(&ctl_dev); + if (hbas_to_enumerate != 2) + misc_deregister(&gen2_ctl_dev); +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h new file mode 100644 index 000000000..8f6ffb402 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -0,0 +1,451 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_CTL_H_INCLUDED +#define MPT3SAS_CTL_H_INCLUDED + +#ifdef __KERNEL__ +#include +#endif + +#include "mpt3sas_base.h" + +#ifndef MPT2SAS_MINOR +#define MPT2SAS_MINOR (MPT_MINOR + 1) +#endif +#ifndef MPT3SAS_MINOR +#define MPT3SAS_MINOR (MPT_MINOR + 2) +#endif +#define MPT2SAS_DEV_NAME "mpt2ctl" +#define MPT3SAS_DEV_NAME "mpt3ctl" +#define MPT3_MAGIC_NUMBER 'L' +#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */ + +/** + * IOCTL opcodes + */ +#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \ + struct mpt3_ioctl_iocinfo) +#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command) +#ifdef CONFIG_COMPAT +#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command32) +#endif +#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \ + struct mpt3_ioctl_eventquery) +#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \ + struct mpt3_ioctl_eventenable) +#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \ + struct mpt3_ioctl_eventreport) +#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \ + struct mpt3_ioctl_diag_reset) +#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \ + struct mpt3_ioctl_btdh_mapping) + +/* diag buffer support */ +#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \ + struct mpt3_diag_register) +#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \ + struct mpt3_diag_release) +#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \ + struct mpt3_diag_unregister) +#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \ + struct mpt3_diag_query) +#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ + struct mpt3_diag_read_buffer) +#define MPT3ADDNLDIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 32, \ + struct mpt3_addnl_diag_query) + +/* Trace Buffer default UniqueId */ +#define MPT2DIAGBUFFUNIQUEID (0x07075900) +#define MPT3DIAGBUFFUNIQUEID (0x4252434D) + +/* UID not found */ +#define MPT3_DIAG_UID_NOT_FOUND (0xFF) + + +/** + * struct mpt3_ioctl_header - main header structure + * @ioc_number - IOC unit number + * @port_number - IOC port number + * @max_data_size - maximum number bytes to transfer on read + */ +struct mpt3_ioctl_header { + uint32_t ioc_number; + uint32_t port_number; + uint32_t max_data_size; +}; + +/** + * struct mpt3_ioctl_diag_reset - diagnostic reset + * @hdr - generic header + */ +struct mpt3_ioctl_diag_reset { + struct mpt3_ioctl_header hdr; +}; + + +/** + * struct mpt3_ioctl_pci_info - pci device info + * @device - pci device id + * @function - pci function id + * @bus - pci bus id + * @segment_id - pci segment id + */ +struct mpt3_ioctl_pci_info { + union { + struct { + uint32_t device:5; + uint32_t function:3; + uint32_t bus:24; + } bits; + uint32_t word; + } u; + uint32_t segment_id; +}; + + +#define MPT2_IOCTL_INTERFACE_SCSI (0x00) +#define MPT2_IOCTL_INTERFACE_FC (0x01) +#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) +#define MPT2_IOCTL_INTERFACE_SAS (0x03) +#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) +#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05) +#define MPT3_IOCTL_INTERFACE_SAS3 (0x06) +#define MPT3_IOCTL_INTERFACE_SAS35 (0x07) +#define MPT2_IOCTL_VERSION_LENGTH (32) + +/** + * struct mpt3_ioctl_iocinfo - generic controller info + * @hdr - generic header + * @adapter_type - type of adapter (spi, fc, sas) + * @port_number - port number + * @pci_id - PCI Id + * @hw_rev - hardware revision + * @sub_system_device - PCI subsystem Device ID + * @sub_system_vendor - PCI subsystem Vendor ID + * @rsvd0 - reserved + * @firmware_version - firmware version + * @bios_version - BIOS version + * @driver_version - driver version - 32 ASCII characters + * @rsvd1 - reserved + * @scsi_id - scsi id of adapter 0 + * @rsvd2 - reserved + * @pci_information - pci info (2nd revision) + */ +struct mpt3_ioctl_iocinfo { + struct mpt3_ioctl_header hdr; + uint32_t adapter_type; + uint32_t port_number; + uint32_t pci_id; + uint32_t hw_rev; + uint32_t subsystem_device; + uint32_t subsystem_vendor; + uint32_t rsvd0; + uint32_t firmware_version; + uint32_t bios_version; + uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH]; + uint8_t rsvd1; + uint8_t scsi_id; + uint16_t rsvd2; + struct mpt3_ioctl_pci_info pci_information; +}; + + +/* number of event log entries */ +#define MPT3SAS_CTL_EVENT_LOG_SIZE (200) + +/** + * struct mpt3_ioctl_eventquery - query event count and type + * @hdr - generic header + * @event_entries - number of events returned by get_event_report + * @rsvd - reserved + * @event_types - type of events currently being captured + */ +struct mpt3_ioctl_eventquery { + struct mpt3_ioctl_header hdr; + uint16_t event_entries; + uint16_t rsvd; + uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +/** + * struct mpt3_ioctl_eventenable - enable/disable event capturing + * @hdr - generic header + * @event_types - toggle off/on type of events to be captured + */ +struct mpt3_ioctl_eventenable { + struct mpt3_ioctl_header hdr; + uint32_t event_types[4]; +}; + +#define MPT3_EVENT_DATA_SIZE (192) +/** + * struct MPT3_IOCTL_EVENTS - + * @event - the event that was reported + * @context - unique value for each event assigned by driver + * @data - event data returned in fw reply message + */ +struct MPT3_IOCTL_EVENTS { + uint32_t event; + uint32_t context; + uint8_t data[MPT3_EVENT_DATA_SIZE]; +}; + +/** + * struct mpt3_ioctl_eventreport - returing event log + * @hdr - generic header + * @event_data - (see struct MPT3_IOCTL_EVENTS) + */ +struct mpt3_ioctl_eventreport { + struct mpt3_ioctl_header hdr; + struct MPT3_IOCTL_EVENTS event_data[1]; +}; + +/** + * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl + * @hdr - generic header + * @timeout - command timeout in seconds. (if zero then use driver default + * value). + * @reply_frame_buf_ptr - reply location + * @data_in_buf_ptr - destination for read + * @data_out_buf_ptr - data source for write + * @sense_data_ptr - sense data location + * @max_reply_bytes - maximum number of reply bytes to be sent to app. + * @data_in_size - number bytes for data transfer in (read) + * @data_out_size - number bytes for data transfer out (write) + * @max_sense_bytes - maximum number of bytes for auto sense buffers + * @data_sge_offset - offset in words from the start of the request message to + * the first SGL + * @mf[1]; + */ +struct mpt3_ioctl_command { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + void __user *reply_frame_buf_ptr; + void __user *data_in_buf_ptr; + void __user *data_out_buf_ptr; + void __user *sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; + +#ifdef CONFIG_COMPAT +struct mpt3_ioctl_command32 { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + uint32_t reply_frame_buf_ptr; + uint32_t data_in_buf_ptr; + uint32_t data_out_buf_ptr; + uint32_t sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; +#endif + +/** + * struct mpt3_ioctl_btdh_mapping - mapping info + * @hdr - generic header + * @id - target device identification number + * @bus - SCSI bus number that the target device exists on + * @handle - device handle for the target device + * @rsvd - reserved + * + * To obtain a bus/id the application sets + * handle to valid handle, and bus/id to 0xFFFF. + * + * To obtain the device handle the application sets + * bus/id valid value, and the handle to 0xFFFF. + */ +struct mpt3_ioctl_btdh_mapping { + struct mpt3_ioctl_header hdr; + uint32_t id; + uint32_t bus; + uint16_t handle; + uint16_t rsvd; +}; + + + +/* application flags for mpt3_diag_register, mpt3_diag_query */ +#define MPT3_APP_FLAGS_APP_OWNED (0x0001) +#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002) +#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004) +#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008) + +/* flags for mpt3_diag_read_buffer */ +#define MPT3_FLAGS_REREGISTER (0x0001) + +#define MPT3_PRODUCT_SPECIFIC_DWORDS 23 + +/** + * struct mpt3_diag_register - application register with driver + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @requested_buffer_size - buffers size in bytes + * @unique_id - tag specified by application that is used to signal ownership + * of the buffer. + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +struct mpt3_diag_register { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t requested_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_unregister - application unregister with driver + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be unregistered + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +struct mpt3_diag_unregister { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_query - query relevant info associated with diag buffers + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @total_buffer_size - diag buffer size in bytes + * @driver_added_buffer_size - size of extra space appended to end of buffer + * @unique_id - unique id associated with this buffer. + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +struct mpt3_diag_query { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t total_buffer_size; + uint32_t driver_added_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_release - request to send Diag Release Message to firmware + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be released + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwriting information in the buffer. + */ +struct mpt3_diag_release { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_read_buffer - request for copy of the diag buffer + * @hdr - generic header + * @status - + * @reserved - + * @flags - misc flags + * @starting_offset - starting offset within drivers buffer where to start + * reading data at into the specified application buffer + * @bytes_to_read - number of bytes to copy from the drivers buffer into the + * application buffer starting at starting_offset. + * @unique_id - unique id associated with this buffer. + * @diagnostic_data - data payload + */ +struct mpt3_diag_read_buffer { + struct mpt3_ioctl_header hdr; + uint8_t status; + uint8_t reserved; + uint16_t flags; + uint32_t starting_offset; + uint32_t bytes_to_read; + uint32_t unique_id; + uint32_t diagnostic_data[1]; +}; + +/** + * struct mpt3_addnl_diag_query - diagnostic buffer release reason + * @hdr - generic header + * @unique_id - unique id associated with this buffer. + * @rel_query - release query. + * @reserved2 + */ +struct mpt3_addnl_diag_query { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; + struct htb_rel_query rel_query; + uint32_t reserved2[2]; +}; + +#endif /* MPT3SAS_CTL_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h new file mode 100644 index 000000000..cceeb2c16 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h @@ -0,0 +1,206 @@ +/* + * Logging Support for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_DEBUG_H_INCLUDED +#define MPT3SAS_DEBUG_H_INCLUDED + +#define MPT_DEBUG 0x00000001 +#define MPT_DEBUG_MSG_FRAME 0x00000002 +#define MPT_DEBUG_SG 0x00000004 +#define MPT_DEBUG_EVENTS 0x00000008 +#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010 +#define MPT_DEBUG_INIT 0x00000020 +#define MPT_DEBUG_EXIT 0x00000040 +#define MPT_DEBUG_FAIL 0x00000080 +#define MPT_DEBUG_TM 0x00000100 +#define MPT_DEBUG_REPLY 0x00000200 +#define MPT_DEBUG_HANDSHAKE 0x00000400 +#define MPT_DEBUG_CONFIG 0x00000800 +#define MPT_DEBUG_DL 0x00001000 +#define MPT_DEBUG_RESET 0x00002000 +#define MPT_DEBUG_SCSI 0x00004000 +#define MPT_DEBUG_IOCTL 0x00008000 +#define MPT_DEBUG_SAS 0x00020000 +#define MPT_DEBUG_TRANSPORT 0x00040000 +#define MPT_DEBUG_TASK_SET_FULL 0x00080000 + +#define MPT_DEBUG_TRIGGER_DIAG 0x00200000 + + +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \ +{ \ + if (IOC->logging_level & BITS) \ + CMD; \ +} + +/* + * debug macros + */ + +#define dprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG) + +#define dsgprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG) + +#define devtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS) + +#define dewtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK) + +#define dinitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT) + +#define dexitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT) + +#define dfailprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL) + +#define dtmprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM) + +#define dreplyprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY) + +#define dhsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE) + +#define dcprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG) + +#define ddlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL) + +#define drsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET) + +#define dsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI) + +#define dctlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL) + +#define dsasprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS) + +#define dsastransport(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) + +#define dmfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME) + +#define dtsfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL) + +#define dtransportprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT) + +#define dTriggerDiagPrintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG) + + + +/* inline functions for dumping debug data*/ + +/** + * _debug_dump_mf - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_mf(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("mf:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_reply - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_reply(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("reply:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_config - print config page contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_config(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("config:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +#endif /* MPT3SAS_DEBUG_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_debugfs.c b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c new file mode 100644 index 000000000..a6ab1db81 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Debugfs interface Support for MPT (Message Passing Technology) based + * controllers. + * + * Copyright (C) 2020 Broadcom Inc. + * + * Authors: Broadcom Inc. + * Sreekanth Reddy + * Suganath Prabu + * + * Send feedback to : MPT-FusionLinux.pdl@broadcom.com) + * + **/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "mpt3sas_base.h" +#include + +static struct dentry *mpt3sas_debugfs_root; + +/* + * _debugfs_iocdump_read - copy ioc dump from debugfs buffer + * @filep: File Pointer + * @ubuf: Buffer to fill data + * @cnt: Length of the buffer + * @ppos: Offset in the file + */ + +static ssize_t +_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt, + loff_t *ppos) + +{ + struct mpt3sas_debugfs_buffer *debug = filp->private_data; + + if (!debug || !debug->buf) + return 0; + + return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len); +} + +/* + * _debugfs_iocdump_open : open the ioc_dump debugfs attribute file + */ +static int +_debugfs_iocdump_open(struct inode *inode, struct file *file) +{ + struct MPT3SAS_ADAPTER *ioc = inode->i_private; + struct mpt3sas_debugfs_buffer *debug; + + debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->buf = (void *)ioc; + debug->len = sizeof(struct MPT3SAS_ADAPTER); + file->private_data = debug; + return 0; +} + +/* + * _debugfs_iocdump_release : release the ioc_dump debugfs attribute + * @inode: inode structure to the corresponds device + * @file: File pointer + */ +static int +_debugfs_iocdump_release(struct inode *inode, struct file *file) +{ + struct mpt3sas_debugfs_buffer *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static const struct file_operations mpt3sas_debugfs_iocdump_fops = { + .owner = THIS_MODULE, + .open = _debugfs_iocdump_open, + .read = _debugfs_iocdump_read, + .release = _debugfs_iocdump_release, +}; + +/* + * mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver + */ +void mpt3sas_init_debugfs(void) +{ + mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL); + if (!mpt3sas_debugfs_root) + pr_info("mpt3sas: Cannot create debugfs root\n"); +} + +/* + * mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver + */ +void mpt3sas_exit_debugfs(void) +{ + debugfs_remove_recursive(mpt3sas_debugfs_root); +} + +/* + * mpt3sas_setup_debugfs : Setup debugfs per HBA adapter + * ioc: MPT3SAS_ADAPTER object + */ +void +mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc) +{ + char name[64]; + + snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no); + if (!ioc->debugfs_root) { + ioc->debugfs_root = + debugfs_create_dir(name, mpt3sas_debugfs_root); + if (!ioc->debugfs_root) { + dev_err(&ioc->pdev->dev, + "Cannot create per adapter debugfs directory\n"); + return; + } + } + + snprintf(name, sizeof(name), "ioc_dump"); + ioc->ioc_dump = debugfs_create_file(name, 0444, + ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops); + if (!ioc->ioc_dump) { + dev_err(&ioc->pdev->dev, + "Cannot create ioc_dump debugfs file\n"); + debugfs_remove(ioc->debugfs_root); + return; + } + + snprintf(name, sizeof(name), "host_recovery"); + debugfs_create_u8(name, 0444, ioc->debugfs_root, &ioc->shost_recovery); + +} + +/* + * mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter + * @ioc: MPT3SAS_ADAPTER object + */ +void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc) +{ + debugfs_remove_recursive(ioc->debugfs_root); +} + diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c new file mode 100644 index 000000000..605013d3e --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -0,0 +1,12942 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +#define RAID_CHANNEL 1 + +#define PCIE_CHANNEL 2 + +/* forward proto's */ +static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander); +static void _firmware_event_work(struct work_struct *work); + +static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device); +static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd); +static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); +static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device); +static void +_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); +static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); +static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); + +/* global parameters */ +LIST_HEAD(mpt3sas_ioc_list); +/* global ioc lock for list operations */ +DEFINE_SPINLOCK(gioc_lock); + +MODULE_AUTHOR(MPT3SAS_AUTHOR); +MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(MPT3SAS_DRIVER_VERSION); +MODULE_ALIAS("mpt2sas"); + +/* local parameters */ +static u8 scsi_io_cb_idx = -1; +static u8 tm_cb_idx = -1; +static u8 ctl_cb_idx = -1; +static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; +static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; +static u8 config_cb_idx = -1; +static int mpt2_ids; +static int mpt3_ids; + +static u8 tm_tr_cb_idx = -1 ; +static u8 tm_tr_volume_cb_idx = -1 ; +static u8 tm_sas_control_cb_idx = -1; + +/* command line options */ +static u32 logging_level; +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); + + +static ushort max_sectors = 0xFFFF; +module_param(max_sectors, ushort, 0444); +MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); + + +static int missing_delay[2] = {-1, -1}; +module_param_array(missing_delay, int, NULL, 0444); +MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); + +/* scsi-mid layer global parmeter is max_report_luns, which is 511 */ +#define MPT3SAS_MAX_LUN (16895) +static u64 max_lun = MPT3SAS_MAX_LUN; +module_param(max_lun, ullong, 0444); +MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); + +static ushort hbas_to_enumerate; +module_param(hbas_to_enumerate, ushort, 0444); +MODULE_PARM_DESC(hbas_to_enumerate, + " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ + 1 - enumerates only SAS 2.0 generation HBAs\n \ + 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); + +/* diag_buffer_enable is bitwise + * bit 0 set = TRACE + * bit 1 set = SNAPSHOT + * bit 2 set = EXTENDED + * + * Either bit can be set, or both + */ +static int diag_buffer_enable = -1; +module_param(diag_buffer_enable, int, 0444); +MODULE_PARM_DESC(diag_buffer_enable, + " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); +static int disable_discovery = -1; +module_param(disable_discovery, int, 0444); +MODULE_PARM_DESC(disable_discovery, " disable discovery "); + + +/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ +static int prot_mask = -1; +module_param(prot_mask, int, 0444); +MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); + +static bool enable_sdev_max_qd; +module_param(enable_sdev_max_qd, bool, 0444); +MODULE_PARM_DESC(enable_sdev_max_qd, + "Enable sdev max qd as can_queue, def=disabled(0)"); + +static int multipath_on_hba = -1; +module_param(multipath_on_hba, int, 0); +MODULE_PARM_DESC(multipath_on_hba, + "Multipath support to add same target device\n\t\t" + "as many times as it is visible to HBA from various paths\n\t\t" + "(by default:\n\t\t" + "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" + "\t SAS 3.5 HBA - This will be enabled)"); + +static int host_tagset_enable = 1; +module_param(host_tagset_enable, int, 0444); +MODULE_PARM_DESC(host_tagset_enable, + "Shared host tagset enable/disable Default: enable(1)"); + +/* raid transport support */ +static struct raid_template *mpt3sas_raid_template; +static struct raid_template *mpt2sas_raid_template; + + +/** + * struct sense_info - common structure for obtaining sense keys + * @skey: sense key + * @asc: additional sense code + * @ascq: additional sense code qualifier + */ +struct sense_info { + u8 skey; + u8 asc; + u8 ascq; +}; + +#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) +#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) +#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) +#define MPT3SAS_ABRT_TASK_SET (0xFFFE) +#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) +/** + * struct fw_event_work - firmware event struct + * @list: link list framework + * @work: work object (ioc->fault_reset_work_q) + * @ioc: per adapter object + * @device_handle: device handle + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @ignore: flag meaning this event has been marked to ignore + * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h + * @refcount: kref for this event + * @event_data: reply event data payload follows + * + * This object stored on ioc->fw_event_list. + */ +struct fw_event_work { + struct list_head list; + struct work_struct work; + + struct MPT3SAS_ADAPTER *ioc; + u16 device_handle; + u8 VF_ID; + u8 VP_ID; + u8 ignore; + u16 event; + struct kref refcount; + char event_data[] __aligned(4); +}; + +static void fw_event_work_free(struct kref *r) +{ + kfree(container_of(r, struct fw_event_work, refcount)); +} + +static void fw_event_work_get(struct fw_event_work *fw_work) +{ + kref_get(&fw_work->refcount); +} + +static void fw_event_work_put(struct fw_event_work *fw_work) +{ + kref_put(&fw_work->refcount, fw_event_work_free); +} + +static struct fw_event_work *alloc_fw_event_work(int len) +{ + struct fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); + if (!fw_event) + return NULL; + + kref_init(&fw_event->refcount); + return fw_event; +} + +/** + * struct _scsi_io_transfer - scsi io transfer + * @handle: sas device handle (assigned by firmware) + * @is_raid: flag set for hidden raid components + * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, + * @data_length: data transfer length + * @data_dma: dma pointer to data + * @sense: sense data + * @lun: lun number + * @cdb_length: cdb length + * @cdb: cdb contents + * @timeout: timeout for this command + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @valid_reply: flag set for reply message + * @sense_length: sense length + * @ioc_status: ioc status + * @scsi_state: scsi state + * @scsi_status: scsi staus + * @log_info: log information + * @transfer_length: data length transfer when there is a reply message + * + * Used for sending internal scsi commands to devices within this module. + * Refer to _scsi_send_scsi_io(). + */ +struct _scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + /* the following bits are only valid when 'valid_reply = 1' */ + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +/** + * _scsih_set_debug_level - global setting of ioc->logging_level. + * @val: ? + * @kp: ? + * + * Note: The logging levels are defined in mpt3sas_debug.h. + */ +static int +_scsih_set_debug_level(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + pr_info("setting logging_level(0x%08x)\n", logging_level); + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->logging_level = logging_level; + spin_unlock(&gioc_lock); + return 0; +} +module_param_call(logging_level, _scsih_set_debug_level, param_get_int, + &logging_level, 0644); + +/** + * _scsih_srch_boot_sas_address - search based on sas_address + * @sas_address: sas address + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_sas_address(u64 sas_address, + Mpi2BootDeviceSasWwid_t *boot_device) +{ + return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_device_name - search based on device name + * @device_name: device name specified in INDENTIFY fram + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_device_name(u64 device_name, + Mpi2BootDeviceDeviceName_t *boot_device) +{ + return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot + * @enclosure_logical_id: enclosure logical id + * @slot_number: slot number + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, + Mpi2BootDeviceEnclosureSlot_t *boot_device) +{ + return (enclosure_logical_id == le64_to_cpu(boot_device-> + EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> + SlotNumber)) ? 1 : 0; +} + +/** + * mpt3sas_get_port_by_id - get hba port entry corresponding to provided + * port number from port list + * @ioc: per adapter object + * @port_id: port number + * @bypass_dirty_port_flag: when set look the matching hba port entry even + * if hba port entry is marked as dirty. + * + * Search for hba port entry corresponding to provided port number, + * if available return port object otherwise return NULL. + */ +struct hba_port * +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, + u8 port_id, u8 bypass_dirty_port_flag) +{ + struct hba_port *port, *port_next; + + /* + * When multipath_on_hba is disabled then + * search the hba_port entry using default + * port id i.e. 255 + */ + if (!ioc->multipath_on_hba) + port_id = MULTIPATH_DISABLED_PORT_ID; + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + if (bypass_dirty_port_flag) + return port; + if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) + continue; + return port; + } + + /* + * Allocate hba_port object for default port id (i.e. 255) + * when multipath_on_hba is disabled for the HBA. + * And add this object to port_table_list. + */ + if (!ioc->multipath_on_hba) { + port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); + if (!port) + return NULL; + + port->port_id = port_id; + ioc_info(ioc, + "hba_port entry: %p, port: %d is added to hba_port list\n", + port, port->port_id); + list_add_tail(&port->list, + &ioc->port_table_list); + return port; + } + return NULL; +} + +/** + * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number + * @ioc: per adapter object + * @port: hba_port object + * @phy: phy number + * + * Return virtual_phy object corresponding to phy number. + */ +struct virtual_phy * +mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port, u32 phy) +{ + struct virtual_phy *vphy, *vphy_next; + + if (!port->vphys_mask) + return NULL; + + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + if (vphy->phy_mask & (1 << phy)) + return vphy; + } + return NULL; +} + +/** + * _scsih_is_boot_device - search for matching boot device. + * @sas_address: sas address + * @device_name: device name specified in INDENTIFY fram + * @enclosure_logical_id: enclosure logical id + * @slot: slot number + * @form: specifies boot device form + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static int +_scsih_is_boot_device(u64 sas_address, u64 device_name, + u64 enclosure_logical_id, u16 slot, u8 form, + Mpi2BiosPage2BootDevice_t *boot_device) +{ + int rc = 0; + + switch (form) { + case MPI2_BIOSPAGE2_FORM_SAS_WWID: + if (!sas_address) + break; + rc = _scsih_srch_boot_sas_address( + sas_address, &boot_device->SasWwid); + break; + case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: + if (!enclosure_logical_id) + break; + rc = _scsih_srch_boot_encl_slot( + enclosure_logical_id, + slot, &boot_device->EnclosureSlot); + break; + case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: + if (!device_name) + break; + rc = _scsih_srch_boot_device_name( + device_name, &boot_device->DeviceName); + break; + case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: + break; + } + + return rc; +} + +/** + * _scsih_get_sas_address - set the sas_address for given device handle + * @ioc: ? + * @handle: device handle + * @sas_address: sas address + * + * Return: 0 success, non-zero when failure + */ +static int +_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 ioc_status; + + *sas_address = 0; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + /* For HBA, vSES doesn't return HBA SAS address. Instead return + * vSES's sas address. + */ + if ((handle <= ioc->sas_hba.num_phys) && + (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP))) + *sas_address = ioc->sas_hba.sas_address; + else + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; + } + + /* we hit this because the given parent handle doesn't exist */ + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return -ENXIO; + + /* else error case */ + ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", + handle, ioc_status, __FILE__, __LINE__, __func__); + return -EIO; +} + +/** + * _scsih_determine_boot_device - determine boot device. + * @ioc: per adapter object + * @device: sas_device or pcie_device object + * @channel: SAS or PCIe channel + * + * Determines whether this device should be first reported device to + * to scsi-ml or sas transport, this purpose is for persistent boot device. + * There are primary, alternate, and current entries in bios page 2. The order + * priority is primary, alternate, then current. This routine saves + * the corresponding device object. + * The saved data to be used later in _scsih_probe_boot_devices(). + */ +static void +_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, + u32 channel) +{ + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _raid_device *raid_device; + u64 sas_address; + u64 device_name; + u64 enclosure_logical_id; + u16 slot; + + /* only process this function when driver loads */ + if (!ioc->is_driver_loading) + return; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + if (channel == RAID_CHANNEL) { + raid_device = device; + sas_address = raid_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } else if (channel == PCIE_CHANNEL) { + pcie_device = device; + sas_address = pcie_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } else { + sas_device = device; + sas_address = sas_device->sas_address; + device_name = sas_device->device_name; + enclosure_logical_id = sas_device->enclosure_logical_id; + slot = sas_device->slot; + } + + if (!ioc->req_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedBootDevice)) { + dinitprintk(ioc, + ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", + __func__, (u64)sas_address)); + ioc->req_boot_device.device = device; + ioc->req_boot_device.channel = channel; + } + } + + if (!ioc->req_alt_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedAltBootDevice)) { + dinitprintk(ioc, + ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", + __func__, (u64)sas_address)); + ioc->req_alt_boot_device.device = device; + ioc->req_alt_boot_device.channel = channel; + } + } + + if (!ioc->current_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.CurrentBootDevice)) { + dinitprintk(ioc, + ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", + __func__, (u64)sas_address)); + ioc->current_boot_device.device = device; + ioc->current_boot_device.channel = channel; + } + } +} + +static struct _sas_device * +__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _sas_device *ret; + + assert_spin_locked(&ioc->sas_device_lock); + + ret = tgt_priv->sas_dev; + if (ret) + sas_device_get(ret); + + return ret; +} + +static struct _sas_device * +mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _sas_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return ret; +} + +static struct _pcie_device * +__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _pcie_device *ret; + + assert_spin_locked(&ioc->pcie_device_lock); + + ret = tgt_priv->pcie_dev; + if (ret) + pcie_device_get(ret); + + return ret; +} + +/** + * mpt3sas_get_pdev_from_target - pcie device search + * @ioc: per adapter object + * @tgt_priv: starget private object + * + * Context: This function will acquire ioc->pcie_device_lock and will release + * before returning the pcie_device object. + * + * This searches for pcie_device from target, then return pcie_device object. + */ +static struct _pcie_device * +mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _pcie_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return ret; +} + + +/** + * __mpt3sas_get_sdev_by_rphy - sas device search + * @ioc: per adapter object + * @rphy: sas_rphy pointer + * + * Context: This function will acquire ioc->sas_device_lock and will release + * before returning the sas_device object. + * + * This searches for sas_device from rphy object + * then return sas_device object. + */ +struct _sas_device * +__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct _sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->rphy != rphy) + continue; + sas_device_get(sas_device); + return sas_device; + } + + sas_device = NULL; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { + if (sas_device->rphy != rphy) + continue; + sas_device_get(sas_device); + return sas_device; + } + + return NULL; +} + +/** + * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided + * sas address from sas_device_list list + * @ioc: per adapter object + * @sas_address: device sas address + * @port: port number + * + * Search for _sas_device object corresponding to provided sas address, + * if available return _sas_device object address otherwise return NULL. + */ +struct _sas_device * +__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_device *sas_device; + + if (!port) + return NULL; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->sas_address != sas_address) + continue; + if (sas_device->port != port) + continue; + sas_device_get(sas_device); + return sas_device; + } + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { + if (sas_device->sas_address != sas_address) + continue; + if (sas_device->port != port) + continue; + sas_device_get(sas_device); + return sas_device; + } + + return NULL; +} + +/** + * mpt3sas_get_sdev_by_addr - sas device search + * @ioc: per adapter object + * @sas_address: sas address + * @port: hba port entry + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address & port number, + * then return sas_device object. + */ +struct _sas_device * +mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address, port); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +static struct _sas_device * +__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->handle == handle) + goto found_device; + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->handle == handle) + goto found_device; + + return NULL; + +found_device: + sas_device_get(sas_device); + return sas_device; +} + +/** + * mpt3sas_get_sdev_by_handle - sas device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +struct _sas_device * +mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +/** + * _scsih_display_enclosure_chassis_info - display device location info + * @ioc: per adapter object + * @sas_device: per sas device object + * @sdev: scsi device struct + * @starget: scsi target struct + */ +static void +_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device, struct scsi_device *sdev, + struct scsi_target *starget) +{ + if (sdev) { + if (sas_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "enclosure logical id (0x%016llx), slot(%d) \n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } else if (starget) { + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d) \n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + starget_printk(KERN_INFO, starget, + "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } else { + if (sas_device->enclosure_handle != 0) + ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", + (u64)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + ioc_info(ioc, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } +} + +/** + * _scsih_sas_device_remove - remove sas_device from list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * If sas_device is on the list, remove it and decrement its reference count. + */ +static void +_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + if (!sas_device) + return; + ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", + sas_device->handle, (u64)sas_device->sas_address); + + _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); + + /* + * The lock serializes access to the list, but we still need to verify + * that nobody removed the entry while we were waiting on the lock. + */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_device_remove_by_handle - removing device object by handle + * @ioc: per adapter object + * @handle: device handle + */ +static void +_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + _scsih_remove_device(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * mpt3sas_device_remove_by_sas_address - removing device object by + * sas address & port number + * @ioc: per adapter object + * @sas_address: device sas_address + * @port: hba port entry + * + * Return nothing. + */ +void +mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); + if (sas_device) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + _scsih_remove_device(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * _scsih_sas_device_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object to the ioc->sas_device_list. + */ +static void +_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, sas_device->handle, + (u64)sas_device->sas_address)); + + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->hide_drives) { + clear_bit(sas_device->handle, ioc->pend_os_device_add); + return; + } + + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, sas_device->port)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to remove + * devices while scanning is turned on due to an oops in + * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + _scsih_sas_device_remove(ioc, sas_device); + } + } else + clear_bit(sas_device->handle, ioc->pend_os_device_add); +} + +/** + * _scsih_sas_device_init_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object at driver load time to the ioc->sas_device_init_list. + */ +static void +_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, sas_device->handle, + (u64)sas_device->sas_address)); + + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_init_list); + _scsih_determine_boot_device(ioc, sas_device, 0); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + + +static struct _pcie_device * +__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _pcie_device *pcie_device; + + assert_spin_locked(&ioc->pcie_device_lock); + + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) + if (pcie_device->wwid == wwid) + goto found_device; + + list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) + if (pcie_device->wwid == wwid) + goto found_device; + + return NULL; + +found_device: + pcie_device_get(pcie_device); + return pcie_device; +} + + +/** + * mpt3sas_get_pdev_by_wwid - pcie device search + * @ioc: per adapter object + * @wwid: wwid + * + * Context: This function will acquire ioc->pcie_device_lock and will release + * before returning the pcie_device object. + * + * This searches for pcie_device based on wwid, then return pcie_device object. + */ +static struct _pcie_device * +mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return pcie_device; +} + + +static struct _pcie_device * +__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, + int channel) +{ + struct _pcie_device *pcie_device; + + assert_spin_locked(&ioc->pcie_device_lock); + + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) + if (pcie_device->id == id && pcie_device->channel == channel) + goto found_device; + + list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) + if (pcie_device->id == id && pcie_device->channel == channel) + goto found_device; + + return NULL; + +found_device: + pcie_device_get(pcie_device); + return pcie_device; +} + +static struct _pcie_device * +__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _pcie_device *pcie_device; + + assert_spin_locked(&ioc->pcie_device_lock); + + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) + if (pcie_device->handle == handle) + goto found_device; + + list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) + if (pcie_device->handle == handle) + goto found_device; + + return NULL; + +found_device: + pcie_device_get(pcie_device); + return pcie_device; +} + + +/** + * mpt3sas_get_pdev_by_handle - pcie device search + * @ioc: per adapter object + * @handle: Firmware device handle + * + * Context: This function will acquire ioc->pcie_device_lock and will release + * before returning the pcie_device object. + * + * This searches for pcie_device based on handle, then return pcie_device + * object. + */ +struct _pcie_device * +mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return pcie_device; +} + +/** + * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. + * @ioc: per adapter object + * Context: This function will acquire ioc->pcie_device_lock + * + * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency + * which has reported maximum among all available NVMe drives. + * Minimum max_shutdown_latency will be six seconds. + */ +static void +_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (pcie_device->shutdown_latency) { + if (shutdown_latency < pcie_device->shutdown_latency) + shutdown_latency = + pcie_device->shutdown_latency; + } + } + ioc->max_shutdown_latency = shutdown_latency; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_pcie_device_remove - remove pcie_device from list. + * @ioc: per adapter object + * @pcie_device: the pcie_device object + * Context: This function will acquire ioc->pcie_device_lock. + * + * If pcie_device is on the list, remove it and decrement its reference count. + */ +static void +_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + int was_on_pcie_device_list = 0; + u8 update_latency = 0; + + if (!pcie_device) + return; + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + pcie_device->handle, (u64)pcie_device->wwid); + if (pcie_device->enclosure_handle != 0) + ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + if (!list_empty(&pcie_device->list)) { + list_del_init(&pcie_device->list); + was_on_pcie_device_list = 1; + } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (was_on_pcie_device_list) { + kfree(pcie_device->serial_number); + pcie_device_put(pcie_device); + } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); +} + + +/** + * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle + * @ioc: per adapter object + * @handle: device handle + */ +static void +_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + int was_on_pcie_device_list = 0; + u8 update_latency = 0; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device) { + if (!list_empty(&pcie_device->list)) { + list_del_init(&pcie_device->list); + was_on_pcie_device_list = 1; + pcie_device_put(pcie_device); + } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (was_on_pcie_device_list) { + _scsih_pcie_device_remove_from_sml(ioc, pcie_device); + pcie_device_put(pcie_device); + } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); +} + +/** + * _scsih_pcie_device_add - add pcie_device object + * @ioc: per adapter object + * @pcie_device: pcie_device object + * + * This is added to the pcie_device_list link list. + */ +static void +_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", + __func__, pcie_device->enclosure_level, + pcie_device->connector_name)); + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device_get(pcie_device); + list_add_tail(&pcie_device->list, &ioc->pcie_device_list); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + if (pcie_device->access_status == + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + clear_bit(pcie_device->handle, ioc->pend_os_device_add); + return; + } + if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { + _scsih_pcie_device_remove(ioc, pcie_device); + } else if (!pcie_device->starget) { + if (!ioc->is_driver_loading) { +/*TODO-- Need to find out whether this condition will occur or not*/ + clear_bit(pcie_device->handle, ioc->pend_os_device_add); + } + } else + clear_bit(pcie_device->handle, ioc->pend_os_device_add); +} + +/* + * _scsih_pcie_device_init_add - insert pcie_device to the init list. + * @ioc: per adapter object + * @pcie_device: the pcie_device object + * Context: This function will acquire ioc->pcie_device_lock. + * + * Adding new object at driver load time to the ioc->pcie_device_init_list. + */ +static void +_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", + __func__, pcie_device->enclosure_level, + pcie_device->connector_name)); + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device_get(pcie_device); + list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); + if (pcie_device->access_status != + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) + _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} +/** + * _scsih_raid_device_find_by_id - raid device search + * @ioc: per adapter object + * @id: sas device target id + * @channel: sas device channel + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on target id, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->id == id && raid_device->channel == channel) { + r = raid_device; + goto out; + } + } + + out: + return r; +} + +/** + * mpt3sas_raid_device_find_by_handle - raid device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on handle, then return raid_device + * object. + */ +struct _raid_device * +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_find_by_wwid - raid device search + * @ioc: per adapter object + * @wwid: ? + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on wwid, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid != wwid) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_add - add raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + * This is added to the raid_device_list link list. + */ +static void +_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", + __func__, + raid_device->handle, (u64)raid_device->wwid)); + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_add_tail(&raid_device->list, &ioc->raid_device_list); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_raid_device_remove - delete raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + */ +static void +_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_del(&raid_device->list); + kfree(raid_device); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * mpt3sas_scsih_expander_find_by_handle - expander device search + * @ioc: per adapter object + * @handle: expander handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for expander device based on handle, then returns the + * sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search + * @ioc: per adapter object + * @handle: enclosure handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for enclosure device based on handle, then returns the + * enclosure object. + */ +static struct _enclosure_node * +mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _enclosure_node *enclosure_dev, *r; + + r = NULL; + list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { + if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) + continue; + r = enclosure_dev; + goto out; + } +out: + return r; +} +/** + * mpt3sas_scsih_expander_find_by_sas_address - expander device search + * @ioc: per adapter object + * @sas_address: sas address + * @port: hba port entry + * Context: Calling function should acquire ioc->sas_node_lock. + * + * This searches for expander device based on sas_address & port number, + * then returns the sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_node *sas_expander, *r = NULL; + + if (!port) + return r; + + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + if (sas_expander->port != port) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * _scsih_expander_node_add - insert expander device to the list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new object to the ioc->sas_expander_list. + */ +static void +_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &ioc->sas_expander_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_is_end_device - determines if device is an end device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Return: 1 if end device. + */ +static int +_scsih_is_end_device(u32 device_info) +{ + if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && + ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + +/** + * _scsih_is_nvme_pciescsi_device - determines if + * device is an pcie nvme/scsi device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if device is pcie device type nvme/scsi. + */ +static int +_scsih_is_nvme_pciescsi_device(u32 device_info) +{ + if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) + == MPI26_PCIE_DEVINFO_NVME) || + ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) + == MPI26_PCIE_DEVINFO_SCSI)) + return 1; + else + return 0; +} + +/** + * _scsih_scsi_lookup_find_by_target - search for matching channel:id + * @ioc: per adapter object + * @id: target id + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, + int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; + smid <= ioc->shost->can_queue; smid++) { + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel) + return 1; + } + return 0; +} + +/** + * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun + * @ioc: per adapter object + * @id: target id + * @lun: lun number + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id:lun in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel && + scmd->device->lun == lun) + return 1; + } + return 0; +} + +/** + * mpt3sas_scsih_scsi_lookup_get - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Return: the smid stored scmd pointer. + * Then will dereference the stored scmd pointer. + */ +struct scsi_cmnd * +mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *scmd = NULL; + struct scsiio_tracker *st; + Mpi25SCSIIORequest_t *mpi_request; + u16 tag = smid - 1; + + if (smid > 0 && + smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { + u32 unique_tag = + ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* + * If SCSI IO request is outstanding at driver level then + * DevHandle filed must be non-zero. If DevHandle is zero + * then it means that this smid is free at driver level, + * so return NULL. + */ + if (!mpi_request->DevHandle) + return scmd; + + scmd = scsi_host_find_tag(ioc->shost, unique_tag); + if (scmd) { + st = scsi_cmd_priv(scmd); + if (st->cb_idx == 0xFF || st->smid == 0) + scmd = NULL; + } + } + return scmd; +} + +/** + * scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Return: queue depth. + */ +static int +scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + + max_depth = shost->can_queue; + + /* + * limit max device queue for SATA to 32 if enable_sdev_max_qd + * is disabled. + */ + if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc) + goto not_sata; + + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) + goto not_sata; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device) { + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = MPT3SAS_SATA_QUEUE_DEPTH; + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + not_sata: + + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_change_queue_depth(sdev, qdepth); + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); + return sdev->queue_depth; +} + +/** + * mpt3sas_scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Returns nothing. + */ +void +mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if (ioc->enable_sdev_max_qd) + qdepth = shost->can_queue; + + scsih_change_queue_depth(sdev, qdepth); +} + +/** + * scsih_target_alloc - target add routine + * @starget: scsi target struct + * + * Return: 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +scsih_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + struct _pcie_device *pcie_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), + GFP_KERNEL); + if (!sas_target_priv_data) + return -ENOMEM; + + starget->hostdata = sas_target_priv_data; + sas_target_priv_data->starget = starget; + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + + /* RAID volumes */ + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + sas_target_priv_data->handle = raid_device->handle; + sas_target_priv_data->sas_address = raid_device->wwid; + sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; + if (ioc->is_warpdrive) + sas_target_priv_data->raid_device = raid_device; + raid_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return 0; + } + + /* PCIe devices */ + if (starget->channel == PCIE_CHANNEL) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, + starget->channel); + if (pcie_device) { + sas_target_priv_data->handle = pcie_device->handle; + sas_target_priv_data->sas_address = pcie_device->wwid; + sas_target_priv_data->port = NULL; + sas_target_priv_data->pcie_dev = pcie_device; + pcie_device->starget = starget; + pcie_device->id = starget->id; + pcie_device->channel = starget->channel; + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_PCIE_DEVICE; + if (pcie_device->fast_path) + sas_target_priv_data->flags |= + MPT_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + return 0; + } + + /* sas/sata devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + + if (sas_device) { + sas_target_priv_data->handle = sas_device->handle; + sas_target_priv_data->sas_address = sas_device->sas_address; + sas_target_priv_data->port = sas_device->port; + sas_target_priv_data->sas_dev = sas_device; + sas_device->starget = starget; + sas_device->id = starget->id; + sas_device->channel = starget->channel; + if (test_bit(sas_device->handle, ioc->pd_handles)) + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + if (sas_device->fast_path) + sas_target_priv_data->flags |= + MPT_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return 0; +} + +/** + * scsih_target_destroy - target destroy routine + * @starget: scsi target struct + */ +static void +scsih_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + struct _pcie_device *pcie_device; + unsigned long flags; + + sas_target_priv_data = starget->hostdata; + if (!sas_target_priv_data) + return; + + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + raid_device->starget = NULL; + raid_device->sdev = NULL; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + goto out; + } + + if (starget->channel == PCIE_CHANNEL) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_from_target(ioc, + sas_target_priv_data); + if (pcie_device && (pcie_device->starget == starget) && + (pcie_device->id == starget->id) && + (pcie_device->channel == starget->channel)) + pcie_device->starget = NULL; + + if (pcie_device) { + /* + * Corresponding get() is in _scsih_target_alloc() + */ + sas_target_priv_data->pcie_dev = NULL; + pcie_device_put(pcie_device); + pcie_device_put(pcie_device); + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + goto out; + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device && (sas_device->starget == starget) && + (sas_device->id == starget->id) && + (sas_device->channel == starget->channel)) + sas_device->starget = NULL; + + if (sas_device) { + /* + * Corresponding get() is in _scsih_target_alloc() + */ + sas_target_priv_data->sas_dev = NULL; + sas_device_put(sas_device); + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + out: + kfree(sas_target_priv_data); + starget->hostdata = NULL; +} + +/** + * scsih_slave_alloc - device add routine + * @sdev: scsi device struct + * + * Return: 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +scsih_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_target *starget; + struct _raid_device *raid_device; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + unsigned long flags; + + sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), + GFP_KERNEL); + if (!sas_device_priv_data) + return -ENOMEM; + + sas_device_priv_data->lun = sdev->lun; + sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns++; + sas_device_priv_data->sas_target = sas_target_priv_data; + sdev->hostdata = sas_device_priv_data; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) + sdev->no_uld_attach = 1; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, + starget->id, starget->channel); + if (raid_device) + raid_device->sdev = sdev; /* raid is single lun */ + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + if (starget->channel == PCIE_CHANNEL) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, + sas_target_priv_data->sas_address); + if (pcie_device && (pcie_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : pcie_device->starget set to starget @ %d\n", + __func__, __LINE__); + pcie_device->starget = starget; + } + + if (pcie_device) + pcie_device_put(pcie_device); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + + if (sas_device) + sas_device_put(sas_device); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + return 0; +} + +/** + * scsih_slave_destroy - device destroy routine + * @sdev: scsi device struct + */ +static void +scsih_slave_destroy(struct scsi_device *sdev) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + unsigned long flags; + + if (!sdev->hostdata) + return; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns--; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_from_target(ioc, + sas_target_priv_data); + if (pcie_device && !sas_target_priv_data->num_luns) + pcie_device->starget = NULL; + + if (pcie_device) + pcie_device_put(pcie_device); + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, + sas_target_priv_data); + if (sas_device && !sas_target_priv_data->num_luns) + sas_device->starget = NULL; + + if (sas_device) + sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +/** + * _scsih_display_sata_capabilities - sata capabilities + * @ioc: per adapter object + * @handle: device handle + * @sdev: scsi device struct + */ +static void +_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, + u16 handle, struct scsi_device *sdev) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u16 flags; + u32 device_info; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + flags = le16_to_cpu(sas_device_pg0.Flags); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + sdev_printk(KERN_INFO, sdev, + "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " + "sw_preserve(%s)\n", + (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : + "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); +} + +/* + * raid transport support - + * Enabled for SLES11 and newer, in older kernels the driver will panic when + * unloading the driver followed by a load - I believe that the subroutine + * raid_class_release() is not cleaning up properly. + */ + +/** + * scsih_is_raid - return boolean indicating device is raid volume + * @dev: the device struct object + */ +static int +scsih_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + + if (ioc->is_warpdrive) + return 0; + return (sdev->channel == RAID_CHANNEL) ? 1 : 0; +} + +static int +scsih_is_nvme(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; +} + +/** + * scsih_get_resync - get raid volume resync percent complete + * @dev: the device struct object + */ +static void +scsih_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volume_status_flags; + u8 percent_complete; + u16 handle; + + percent_complete = 0; + handle = 0; + if (ioc->is_warpdrive) + goto out; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) { + handle = raid_device->handle; + percent_complete = raid_device->percent_complete; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!handle) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + percent_complete = 0; + goto out; + } + + volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (!(volume_status_flags & + MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) + percent_complete = 0; + + out: + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + raid_set_resync(mpt2sas_raid_template, dev, percent_complete); + break; + case MPI25_VERSION: + case MPI26_VERSION: + raid_set_resync(mpt3sas_raid_template, dev, percent_complete); + break; + } +} + +/** + * scsih_get_state - get raid volume level + * @dev: the device struct object + */ +static void +scsih_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volstate; + enum raid_state state = RAID_STATE_UNKNOWN; + u16 handle = 0; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) + handle = raid_device->handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!raid_device) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { + state = RAID_STATE_RESYNCING; + goto out; + } + + switch (vol_pg0.VolumeState) { + case MPI2_RAID_VOL_STATE_OPTIMAL: + case MPI2_RAID_VOL_STATE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case MPI2_RAID_VOL_STATE_DEGRADED: + state = RAID_STATE_DEGRADED; + break; + case MPI2_RAID_VOL_STATE_FAILED: + case MPI2_RAID_VOL_STATE_MISSING: + state = RAID_STATE_OFFLINE; + break; + } + out: + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + raid_set_state(mpt2sas_raid_template, dev, state); + break; + case MPI25_VERSION: + case MPI26_VERSION: + raid_set_state(mpt3sas_raid_template, dev, state); + break; + } +} + +/** + * _scsih_set_level - set raid level + * @ioc: ? + * @sdev: scsi device struct + * @volume_type: volume type + */ +static void +_scsih_set_level(struct MPT3SAS_ADAPTER *ioc, + struct scsi_device *sdev, u8 volume_type) +{ + enum raid_level level = RAID_LEVEL_UNKNOWN; + + switch (volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + level = RAID_LEVEL_0; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + level = RAID_LEVEL_10; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + level = RAID_LEVEL_1E; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + level = RAID_LEVEL_1; + break; + } + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + raid_set_level(mpt2sas_raid_template, + &sdev->sdev_gendev, level); + break; + case MPI25_VERSION: + case MPI26_VERSION: + raid_set_level(mpt3sas_raid_template, + &sdev->sdev_gendev, level); + break; + } +} + + +/** + * _scsih_get_volume_capabilities - volume capabilities + * @ioc: per adapter object + * @raid_device: the raid_device object + * + * Return: 0 for success, else 1 + */ +static int +_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds; + + if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + raid_device->num_pds = num_pds; + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + kfree(vol_pg0); + return 1; + } + + raid_device->volume_type = vol_pg0->VolumeType; + + /* figure out what the underlying devices are by + * obtaining the device_info bits for the 1st device + */ + if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[0].PhysDiskNum))) { + if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(pd_pg0.DevHandle)))) { + raid_device->device_info = + le32_to_cpu(sas_device_pg0.DeviceInfo); + } + } + + kfree(vol_pg0); + return 0; +} + +/** + * _scsih_enable_tlr - setting TLR flags + * @ioc: per adapter object + * @sdev: scsi device struct + * + * Enabling Transaction Layer Retries for tape devices when + * vpd page 0x90 is present + * + */ +static void +_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) +{ + + /* only for TAPE */ + if (sdev->type != TYPE_TAPE) + return; + + if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) + return; + + sas_enable_tlr(sdev); + sdev_printk(KERN_INFO, sdev, "TLR %s\n", + sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); + return; + +} + +/** + * scsih_slave_configure - device configure routine. + * @sdev: scsi device struct + * + * Return: 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +scsih_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _raid_device *raid_device; + unsigned long flags; + int qdepth; + u8 ssp_target = 0; + char *ds = ""; + char *r_level = ""; + u16 handle, volume_handle = 0; + u64 volume_wwid = 0; + + qdepth = 1; + sas_device_priv_data = sdev->hostdata; + sas_device_priv_data->configured_lun = 1; + sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + + /* raid volume handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + if (_scsih_get_volume_capabilities(ioc, raid_device)) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + /* + * WARPDRIVE: Initialize the required data for Direct IO + */ + mpt3sas_init_warpdrive_properties(ioc, raid_device); + + /* RAID Queue Depth Support + * IS volume = underlying qdepth of drive type, either + * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH + * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) + */ + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + ds = "SSP"; + } else { + qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + else + ds = "STP"; + } + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + r_level = "RAID0"; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + if (ioc->manu_pg10.OEMIdentifier && + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & + MFG10_GF0_R10_DISPLAY) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID1"; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID10"; + break; + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAIDX"; + break; + } + + if (!ioc->hide_ir_msg) + sdev_printk(KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx)," + " pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); + + if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { + blk_queue_max_hw_sectors(sdev->request_queue, + MPT3SAS_RAID_MAX_SECTORS); + sdev_printk(KERN_INFO, sdev, + "Set queue's max_sector to: %u\n", + MPT3SAS_RAID_MAX_SECTORS); + } + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + + /* raid transport support */ + if (!ioc->is_warpdrive) + _scsih_set_level(ioc, sdev, raid_device->volume_type); + return 0; + } + + /* non-raid handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { + if (mpt3sas_config_get_volume_handle(ioc, handle, + &volume_handle)) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, + volume_handle, &volume_wwid)) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + } + + /* PCIe handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, + sas_device_priv_data->sas_target->sas_address); + if (!pcie_device) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + qdepth = ioc->max_nvme_qd; + ds = "NVMe"; + sdev_printk(KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", + ds, handle, (unsigned long long)pcie_device->wwid, + pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "%s: enclosure logical id(0x%016llx), slot(%d)\n", + ds, + (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "%s: enclosure level(0x%04x)," + "connector name( %s)\n", ds, + pcie_device->enclosure_level, + pcie_device->connector_name); + + if (pcie_device->nvme_mdts) + blk_queue_max_hw_sectors(sdev->request_queue, + pcie_device->nvme_mdts/512); + + pcie_device_put(pcie_device); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be + ** merged and can eliminate holes created during merging + ** operation. + **/ + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, + sdev->request_queue); + blk_queue_virt_boundary(sdev->request_queue, + ioc->page_size - 1); + return 0; + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + ssp_target = 1; + if (sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SEP) { + sdev_printk(KERN_WARNING, sdev, + "set ignore_delay_remove for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->ignore_delay_remove = 1; + ds = "SES"; + } else + ds = "SSP"; + } else { + qdepth = ioc->max_sata_qd; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + ds = "STP"; + else if (sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + } + + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, (unsigned long long)sas_device->device_name); + + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); + + sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!ssp_target) + _scsih_display_sata_capabilities(ioc, handle, sdev); + + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + + if (ssp_target) { + sas_read_port_mode_page(sdev); + _scsih_enable_tlr(ioc, sdev); + } + + return 0; +} + +/** + * scsih_bios_param - fetch head, sector, cylinder info for a disk + * @sdev: scsi device struct + * @bdev: pointer to block device context + * @capacity: device size (in 512 byte sectors) + * @params: three element array to place output: + * params[0] number of heads (max 255) + * params[1] number of sectors (max 63) + * params[2] number of cylinders + */ +static int +scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + + /* + * Handle extended translation size for logical drives + * > 1Gb + */ + if ((ulong)capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + + /* return result */ + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + + return 0; +} + +/** + * _scsih_response_code - translation of device response code + * @ioc: per adapter object + * @response_code: response code returned by the device + */ +static void +_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) +{ + char *desc; + + switch (response_code) { + case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); +} + +/** + * _scsih_tm_done - tm completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using scsih_issue_tm. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->tm_cmds.smid != smid) + return 1; + ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->tm_cmds.done); + return 1; +} + +/** + * mpt3sas_scsih_set_tm_flag - set per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 1; + skip = 1; + ioc->ignore_loginfos = 1; + } + } +} + +/** + * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 0; + skip = 1; + ioc->ignore_loginfos = 0; + } + } +} + +/** + * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status + * @ioc: per adapter object + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * + * Look whether TM has aborted the timed out SCSI command, if + * TM has aborted the IO then return SUCCESS else return FAILED. + */ +static int +scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, + uint id, uint lun, u8 type, u16 smid_task) +{ + + if (smid_task <= ioc->shost->can_queue) { + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (!(_scsih_scsi_lookup_find_by_target(ioc, + id, channel))) + return SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, + lun, channel))) + return SUCCESS; + break; + default: + return SUCCESS; + } + } else if (smid_task == ioc->scsih_cmds.smid) { + if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || + (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) + return SUCCESS; + } else if (smid_task == ioc->ctl_cmds.smid) { + if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || + (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) + return SUCCESS; + } + + return FAILED; +} + +/** + * scsih_tm_post_processing - post processing of target & LUN reset + * @ioc: per adapter object + * @handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * + * Post processing of target & LUN reset. Due to interrupt latency + * issue it possible that interrupt for aborted IO might not be + * received yet. So before returning failure status, poll the + * reply descriptor pools for the reply of timed out SCSI command. + * Return FAILED status if reply for timed out is not received + * otherwise return SUCCESS. + */ +static int +scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, u16 smid_task) +{ + int rc; + + rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); + if (rc == SUCCESS) + return rc; + + ioc_info(ioc, + "Poll ReplyDescriptor queues for completion of" + " smid(%d), task_type(0x%02x), handle(0x%04x)\n", + smid_task, type, handle); + + /* + * Due to interrupt latency issues, driver may receive interrupt for + * TM first and then for aborted SCSI IO command. So, poll all the + * ReplyDescriptor pools before returning the FAILED status to SML. + */ + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_sync_reply_irqs(ioc, 1); + mpt3sas_base_unmask_interrupts(ioc); + + return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); +} + +/** + * mpt3sas_scsih_issue_tm - main routine for sending tm requests + * @ioc: per adapter struct + * @handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * @msix_task: MSIX table index supplied by the OS + * @timeout: timeout in seconds + * @tr_method: Target Reset Method + * Context: user + * + * A generic API for sending task management requests to firmware. + * + * The callback index is set inside `ioc->tm_cb_idx`. + * The caller is responsible to check for outstanding commands. + * + * Return: SUCCESS or FAILED. + */ +int +mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, + uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, + u8 timeout, u8 tr_method) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + Mpi2SCSITaskManagementReply_t *mpi_reply; + Mpi25SCSIIORequest_t *request; + u16 smid = 0; + u32 ioc_state; + int rc; + u8 issue_reset = 0; + + lockdep_assert_held(&ioc->tm_cmds.mutex); + + if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { + ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); + return FAILED; + } + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return FAILED; + } + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return FAILED; + } + + dtmprintk(ioc, + ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", + handle, type, smid_task, timeout, tr_method)); + ioc->tm_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->tm_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = type; + if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + mpi_request->MsgFlags = tr_method; + mpi_request->TaskMID = cpu_to_le16(smid_task); + int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); + mpt3sas_scsih_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); + ioc->put_smid_hi_priority(ioc, smid, msix_task); + wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); + if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); + if (issue_reset) { + rc = mpt3sas_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto out; + } + } + + /* sync IRQs in case those were busy during flush. */ + mpt3sas_base_sync_reply_irqs(ioc, 0); + + if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + mpi_reply = ioc->tm_cmds.reply; + dtmprintk(ioc, + ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (ioc->logging_level & MPT_DEBUG_TM) { + _scsih_response_code(ioc, mpi_reply->ResponseCode); + if (mpi_reply->IOCStatus) + _debug_dump_mf(mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4); + } + } + + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + /* + * If DevHandle filed in smid_task's entry of request pool + * doesn't match with device handle on which this task abort + * TM is received then it means that TM has successfully + * aborted the timed out command. Since smid_task's entry in + * request pool will be memset to zero once the timed out + * command is returned to the SML. If the command is not + * aborted then smid_task’s entry won’t be cleared and it + * will have same DevHandle value on which this task abort TM + * is received and driver will return the TM status as FAILED. + */ + request = mpt3sas_base_get_msg_frame(ioc, smid_task); + if (le16_to_cpu(request->DevHandle) != handle) + break; + + ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," + "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", + handle, timeout, tr_method, smid_task, msix_task); + rc = FAILED; + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, + type, smid_task); + break; + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } + +out: + mpt3sas_scsih_clear_tm_flag(ioc, handle); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, u64 lun, u8 type, u16 smid_task, + u16 msix_task, u8 timeout, u8 tr_method) +{ + int ret; + + mutex_lock(&ioc->tm_cmds.mutex); + ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, + smid_task, msix_task, timeout, tr_method); + mutex_unlock(&ioc->tm_cmds.mutex); + + return ret; +} + +/** + * _scsih_tm_display_info - displays info about the device + * @ioc: per adapter struct + * @scmd: pointer to scsi command object + * + * Called by task management callback handlers. + */ +static void +_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) +{ + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + unsigned long flags; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + + scsi_print_command(scmd); + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + starget_printk(KERN_INFO, starget, + "%s handle(0x%04x), %s wwid(0x%016llx)\n", + device_str, priv_target->handle, + device_str, (unsigned long long)priv_target->sas_address); + + } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); + if (pcie_device) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), wwid(0x%016llx), port(%d)\n", + pcie_device->handle, + (unsigned long long)pcie_device->wwid, + pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long) + pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + pcie_device_put(pcie_device); + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + if (priv_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + starget_printk(KERN_INFO, starget, + "volume handle(0x%04x), " + "volume wwid(0x%016llx)\n", + sas_device->volume_handle, + (unsigned long long)sas_device->volume_wwid); + } + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy); + + _scsih_display_enclosure_chassis_info(NULL, sas_device, + NULL, starget); + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +/** + * scsih_abort - eh threads main abort routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_abort(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsiio_tracker *st = scsi_cmd_priv(scmd); + u16 handle; + int r; + + u8 timeout = 30; + struct _pcie_device *pcie_device = NULL; + sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" + "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* check for completed command */ + if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, "No reference found at " + "driver, assuming scmd(0x%p) might have completed\n", scmd); + scmd->result = DID_RESET << 16; + r = SUCCESS; + goto out; + } + + /* for hidden raid components and volumes this is not supported */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT || + sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + mpt3sas_halt_firmware(ioc); + + handle = sas_device_priv_data->sas_target->handle; + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) + timeout = ioc->nvme_abort_timeout; + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, st->msix_io, timeout, 0); + /* Command must be cleared after abort */ + if (r == SUCCESS && st->cb_idx != 0xFF) + r = FAILED; + out: + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (pcie_device) + pcie_device_put(pcie_device); + return r; +} + +/** + * scsih_dev_reset - eh threads main device reset routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_dev_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; + + sdev_printk(KERN_INFO, scmd->device, + "attempting device reset! scmd(0x%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = mpt3sas_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, + tr_timeout, tr_method); + /* Check for busy commands after reset */ + if (r == SUCCESS && scsi_device_busy(scmd->device)) + r = FAILED; + out: + sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + if (sas_device) + sas_device_put(sas_device); + if (pcie_device) + pcie_device_put(pcie_device); + + return r; +} + +/** + * scsih_target_reset - eh threads main target reset routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_target_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; + + starget_printk(KERN_INFO, starget, + "attempting target reset! scmd(0x%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { + starget_printk(KERN_INFO, starget, + "target been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = mpt3sas_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, + tr_timeout, tr_method); + /* Check for busy commands after reset */ + if (r == SUCCESS && atomic_read(&starget->target_busy)) + r = FAILED; + out: + starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + if (sas_device) + sas_device_put(sas_device); + if (pcie_device) + pcie_device_put(pcie_device); + return r; +} + + +/** + * scsih_host_reset - eh threads main host reset routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_host_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + int r, retval; + + ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); + scsi_print_command(scmd); + + if (ioc->is_driver_loading || ioc->remove_host) { + ioc_info(ioc, "Blocking the host reset\n"); + r = FAILED; + goto out; + } + + retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + r = (retval < 0) ? FAILED : SUCCESS; +out: + ioc_info(ioc, "host reset: %s scmd(0x%p)\n", + r == SUCCESS ? "SUCCESS" : "FAILED", scmd); + + return r; +} + +/** + * _scsih_fw_event_add - insert and queue up fw_event + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * This adds the firmware event object into link list, then queues it up to + * be processed from user context. + */ +static void +_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + fw_event_work_get(fw_event); + INIT_LIST_HEAD(&fw_event->list); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_WORK(&fw_event->work, _firmware_event_work); + fw_event_work_get(fw_event); + queue_work(ioc->firmware_event_thread, &fw_event->work); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_fw_event_del_from_list - delete fw_event from the list + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * If the fw_event is on the fw_event_list, remove it and do a put. + */ +static void +_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work + *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&fw_event->list)) { + list_del_init(&fw_event->list); + fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + + + /** + * mpt3sas_send_trigger_data_event - send event for processing trigger data + * @ioc: per adapter object + * @event_data: trigger event data + */ +void +mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + struct fw_event_work *fw_event; + u16 sz; + + if (ioc->is_driver_loading) + return; + sz = sizeof(*event_data); + fw_event = alloc_fw_event_work(sz); + if (!fw_event) + return; + fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; + fw_event->ioc = ioc; + memcpy(fw_event->event_data, event_data, sizeof(*event_data)); + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * _scsih_error_recovery_delete_devices - remove devices not responding + * @ioc: per adapter object + */ +static void +_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * mpt3sas_port_enable_complete - port enable completed (fake event) + * @ioc: per adapter object + */ +void +mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct fw_event_work *fw_event = NULL; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&ioc->fw_event_list)) { + fw_event = list_first_entry(&ioc->fw_event_list, + struct fw_event_work, list); + list_del_init(&fw_event->list); + fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + + return fw_event; +} + +/** + * _scsih_fw_event_cleanup_queue - cleanup event queue + * @ioc: per adapter object + * + * Walk the firmware event queue, either killing timers, or waiting + * for outstanding events to complete + * + * Context: task, can sleep + */ +static void +_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || + !ioc->firmware_event_thread) + return; + /* + * Set current running event as ignore, so that + * current running event will exit quickly. + * As diag reset has occurred it is of no use + * to process remaining stale event data entries. + */ + if (ioc->shost_recovery && ioc->current_event) + ioc->current_event->ignore = 1; + + ioc->fw_events_cleanup = 1; + while ((fw_event = dequeue_next_fw_event(ioc)) || + (fw_event = ioc->current_event)) { + + /* + * Don't call cancel_work_sync() for current_event + * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES; + * otherwise we may observe deadlock if current + * hard reset issued as part of processing the current_event. + * + * Orginal logic of cleaning the current_event is added + * for handling the back to back host reset issued by the user. + * i.e. during back to back host reset, driver use to process + * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES + * event back to back and this made the drives to unregister + * the devices from SML. + */ + + if (fw_event == ioc->current_event && + ioc->current_event->event != + MPT3SAS_REMOVE_UNRESPONDING_DEVICES) { + ioc->current_event = NULL; + continue; + } + + /* + * Driver has to clear ioc->start_scan flag when + * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE, + * otherwise scsi_scan_host() API waits for the + * 5 minute timer to expire. If we exit from + * scsi_scan_host() early then we can issue the + * new port enable request as part of current diag reset. + */ + if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) { + ioc->port_enable_cmds.status |= MPT3_CMD_RESET; + ioc->start_scan = 0; + } + + /* + * Wait on the fw_event to complete. If this returns 1, then + * the event was never executed, and we need a put for the + * reference the work had on the fw_event. + * + * If it did execute, we wait for it to finish, and the put will + * happen from _firmware_event_work() + */ + if (cancel_work_sync(&fw_event->work)) + fw_event_work_put(fw_event); + + } + ioc->fw_events_cleanup = 0; +} + +/** + * _scsih_internal_device_block - block the sdev device + * @sdev: per device object + * @sas_device_priv_data : per device driver private data + * + * make sure device is blocked without error, if not + * print an error + */ +static void +_scsih_internal_device_block(struct scsi_device *sdev, + struct MPT3SAS_DEVICE *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + + r = scsi_internal_device_block_nowait(sdev); + if (r == -EINVAL) + sdev_printk(KERN_WARNING, sdev, + "device_block failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); +} + +/** + * _scsih_internal_device_unblock - unblock the sdev device + * @sdev: per device object + * @sas_device_priv_data : per device driver private data + * make sure device is unblocked without error, if not retry + * by blocking and then unblocking + */ + +static void +_scsih_internal_device_unblock(struct scsi_device *sdev, + struct MPT3SAS_DEVICE *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " + "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r == -EINVAL) { + /* The device has been set to SDEV_RUNNING by SD layer during + * device addition but the request queue is still stopped by + * our earlier block call. We need to perform a block again + * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ + + sdev_printk(KERN_WARNING, sdev, + "device_unblock failed with return(%d) for handle(0x%04x) " + "performing a block followed by an unblock\n", + r, sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + r = scsi_internal_device_block_nowait(sdev); + if (r) + sdev_printk(KERN_WARNING, sdev, "retried device_block " + "failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); + + sas_device_priv_data->block = 0; + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r) + sdev_printk(KERN_WARNING, sdev, "retried device_unblock" + " failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); + } +} + +/** + * _scsih_ublock_io_all_device - unblock every device + * @ioc: per adapter object + * + * change the device state from block to running + */ +static void +_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (!sas_device_priv_data->block) + continue; + + dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, + "device_running, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle)); + _scsih_internal_device_unblock(sdev, sas_device_priv_data); + } +} + + +/** + * _scsih_ublock_io_device - prepare device to be deleted + * @ioc: per adapter object + * @sas_address: sas address + * @port: hba port entry + * + * unblock then put device in offline state + */ +static void +_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address) + continue; + if (sas_device_priv_data->sas_target->port != port) + continue; + if (sas_device_priv_data->block) + _scsih_internal_device_unblock(sdev, + sas_device_priv_data); + } +} + +/** + * _scsih_block_io_all_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * + * During device pull we need to appropriately set the sdev state. + */ +static void +_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + continue; + } + _scsih_internal_device_block(sdev, sas_device_priv_data); + } +} + +/** + * _scsih_block_io_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * @handle: device handle + * + * During device pull we need to appropriately set the sdev state. + */ +static void +_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + struct _sas_device *sas_device; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle != handle) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device && sas_device->pend_sas_rphy_add) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + continue; + } + _scsih_internal_device_block(sdev, sas_device_priv_data); + } + + if (sas_device) + sas_device_put(sas_device); +} + +/** + * _scsih_block_io_to_children_attached_to_ex + * @ioc: per adapter object + * @sas_expander: the sas_device object + * + * This routine set sdev state to SDEV_BLOCK for all devices + * attached to this expander. This function called when expander is + * pulled. + */ +static void +_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port; + struct _sas_device *sas_device; + struct _sas_node *expander_sibling; + unsigned long flags; + + if (!sas_expander) + return; + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + if (sas_device) { + set_bit(sas_device->handle, + ioc->blocking_handles); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + } + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + + if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) { + expander_sibling = + mpt3sas_scsih_expander_find_by_sas_address( + ioc, mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + _scsih_block_io_to_children_attached_to_ex(ioc, + expander_sibling); + } + } +} + +/** + * _scsih_block_io_to_children_attached_directly + * @ioc: per adapter object + * @event_data: topology change event data + * + * This routine set sdev state to SDEV_BLOCK for all devices + * direct attached during device pull. + */ +static void +_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + _scsih_block_io_device(ioc, handle); + } +} + +/** + * _scsih_block_io_to_pcie_children_attached_directly + * @ioc: per adapter object + * @event_data: topology change event data + * + * This routine set sdev state to SDEV_BLOCK for all devices + * direct attached during device pull/reconnect. + */ +static void +_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PortEntry[i].PortStatus; + if (reason_code == + MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) + _scsih_block_io_device(ioc, handle); + } +} +/** + * _scsih_tm_tr_send - send task management request + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This code is to initiate the device removal handshake protocol + * with controller firmware. This function will issue target reset + * using high priority request queue. It will send a sas iounit + * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + u64 sas_address = 0; + unsigned long flags; + struct _tr_list *delayed_tr; + u32 ioc_state; + u8 tr_method = 0; + struct hba_port *port = NULL; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", + __func__, handle)); + return; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", + __func__, handle)); + return; + } + + /* if PD, then return */ + if (test_bit(handle, ioc->pd_handles)) + return; + + clear_bit(handle, ioc->pend_os_device_add); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device && sas_device->starget && + sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = sas_device->sas_address; + port = sas_device->port; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device && pcie_device->starget && + pcie_device->starget->hostdata) { + sas_target_priv_data = pcie_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = pcie_device->wwid; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + pcie_device->device_info)))) + tr_method = + MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + } + if (sas_target_priv_data) { + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", + handle, (u64)sas_address)); + if (sas_device) { + if (sas_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", + (u64)sas_device->enclosure_logical_id, + sas_device->slot)); + if (sas_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name)); + } else if (pcie_device) { + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name)); + } + _scsih_ublock_io_device(ioc, sas_address, port); + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + goto out; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", + handle)); + goto out; + } + + dewtprintk(ioc, + ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid, ioc->tm_tr_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + set_bit(handle, ioc->device_remove_in_progress); + ioc->put_smid_hi_priority(ioc, smid, 0); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); + +out: + if (sas_device) + sas_device_put(sas_device); + if (pcie_device) + pcie_device_put(pcie_device); +} + +/** + * _scsih_tm_tr_complete - + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the target reset completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + Mpi2SasIoUnitControlRequest_t *mpi_request; + u16 smid_sas_ctrl; + u32 ioc_state; + struct _sc_list *delayed_sc; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery\n", + __func__)); + return 1; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host is not operational\n", + __func__)); + return 1; + } + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, + ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + dewtprintk(ioc, + ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); + if (!smid_sas_ctrl) { + delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); + if (!delayed_sc) + return _scsih_check_for_pending_tm(ioc, smid); + INIT_LIST_HEAD(&delayed_sc->list); + delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); + list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", + handle)); + return _scsih_check_for_pending_tm(ioc, smid); + } + + dewtprintk(ioc, + ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + mpi_request->DevHandle = mpi_request_tm->DevHandle; + ioc->put_smid_default(ioc, smid_sas_ctrl); + + return _scsih_check_for_pending_tm(ioc, smid); +} + +/** _scsih_allow_scmd_to_device - check whether scmd needs to + * issue to IOC or not. + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * + * Returns true if scmd can be issued to IOC otherwise returns false. + */ +inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + + if (ioc->pci_error_recovery) + return false; + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { + if (ioc->remove_host) + return false; + + return true; + } + + if (ioc->remove_host) { + + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } + } + + return true; +} + +/** + * _scsih_sas_control_complete - completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the sas iounit control completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + Mpi2SasIoUnitControlReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (likely(mpi_reply)) { + dewtprintk(ioc, + ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->DevHandle), smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + if (le16_to_cpu(mpi_reply->IOCStatus) == + MPI2_IOCSTATUS_SUCCESS) { + clear_bit(le16_to_cpu(mpi_reply->DevHandle), + ioc->device_remove_in_progress); + } + } else { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + } + return mpt3sas_check_for_pending_internal_cmds(ioc, smid); +} + +/** + * _scsih_tm_tr_volume_send - send target reset request for volumes + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _tr_list *delayed_tr; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host reset in progress!\n", + __func__)); + return; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", + handle)); + return; + } + + dewtprintk(ioc, + ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid, ioc->tm_tr_volume_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +/** + * _scsih_tm_volume_tr_complete - target reset completion + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host reset in progress!\n", + __func__)); + return 1; + } + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, + ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + handle, le16_to_cpu(mpi_reply->DevHandle), + smid)); + return 0; + } + + dewtprintk(ioc, + ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + return _scsih_check_for_pending_tm(ioc, smid); +} + +/** + * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages + * @ioc: per adapter object + * @smid: system request message index + * @event: Event ID + * @event_context: used to track events uniquely + * + * Context - processed in interrupt context. + */ +static void +_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, + U32 event_context) +{ + Mpi2EventAckRequest_t *ack_request; + int i = smid - ioc->internal_smid; + unsigned long flags; + + /* Without releasing the smid just update the + * call back index and reuse the same smid for + * processing this delayed request + */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + dewtprintk(ioc, + ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", + le16_to_cpu(event), smid, ioc->base_cb_idx)); + ack_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); + ack_request->Function = MPI2_FUNCTION_EVENT_ACK; + ack_request->Event = event; + ack_request->EventContext = event_context; + ack_request->VF_ID = 0; /* TODO */ + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +} + +/** + * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed + * sas_io_unit_ctrl messages + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + * + * Context - processed in interrupt context. + */ +static void +_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, + u16 smid, u16 handle) +{ + Mpi2SasIoUnitControlRequest_t *mpi_request; + u32 ioc_state; + int i = smid - ioc->internal_smid; + unsigned long flags; + + if (ioc->remove_host) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host has been removed\n", + __func__)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery\n", + __func__)); + return; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host is not operational\n", + __func__)); + return; + } + + /* Without releasing the smid just update the + * call back index and reuse the same smid for + * processing this delayed request + */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + dewtprintk(ioc, + ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid, ioc->tm_sas_control_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + mpi_request->DevHandle = cpu_to_le16(handle); + ioc->put_smid_default(ioc, smid); +} + +/** + * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages + * @ioc: per adapter object + * @smid: system request message index + * + * Context: Executed in interrupt context + * + * This will check delayed internal messages list, and process the + * next request. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct _sc_list *delayed_sc; + struct _event_ack_list *delayed_event_ack; + + if (!list_empty(&ioc->delayed_event_ack_list)) { + delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, + struct _event_ack_list, list); + _scsih_issue_delayed_event_ack(ioc, smid, + delayed_event_ack->Event, delayed_event_ack->EventContext); + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + return 0; + } + + if (!list_empty(&ioc->delayed_sc_list)) { + delayed_sc = list_entry(ioc->delayed_sc_list.next, + struct _sc_list, list); + _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, + delayed_sc->handle); + list_del(&delayed_sc->list); + kfree(delayed_sc); + return 0; + } + return 1; +} + +/** + * _scsih_check_for_pending_tm - check for pending task management + * @ioc: per adapter object + * @smid: system request message index + * + * This will check delayed target reset list, and feed the + * next reqeust. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct _tr_list *delayed_tr; + + if (!list_empty(&ioc->delayed_tr_volume_list)) { + delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + if (!list_empty(&ioc->delayed_tr_list)) { + delayed_tr = list_entry(ioc->delayed_tr_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + return 1; +} + +/** + * _scsih_check_topo_delete_events - sanity check on topo events + * @ioc: per adapter object + * @event_data: the event data payload + * + * This routine added to better handle cable breaker. + * + * This handles the case where driver receives multiple expander + * add and delete events in a single shot. When there is a delete event + * the routine will void any pending add events waiting in the event queue. + */ +static void +_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + struct fw_event_work *fw_event; + Mpi2EventDataSasTopologyChangeList_t *local_event_data; + u16 expander_handle; + struct _sas_node *sas_expander; + unsigned long flags; + int i, reason_code; + u16 handle; + + for (i = 0 ; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) + _scsih_tm_tr_send(ioc, handle); + } + + expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); + if (expander_handle < ioc->sas_hba.num_phys) { + _scsih_block_io_to_children_attached_directly(ioc, event_data); + return; + } + if (event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { + /* put expander attached devices into blocking state */ + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + expander_handle); + _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + do { + handle = find_first_bit(ioc->blocking_handles, + ioc->facts.MaxDevHandle); + if (handle < ioc->facts.MaxDevHandle) + _scsih_block_io_device(ioc, handle); + } while (test_and_clear_bit(handle, ioc->blocking_handles)); + } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) + _scsih_block_io_to_children_attached_directly(ioc, event_data); + + if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + + /* mark ignore flag for pending events */ + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) + fw_event->event_data; + if (local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->ExpanderDevHandle) == + expander_handle) { + dewtprintk(ioc, + ioc_info(ioc, "setting ignoring flag\n")); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_check_pcie_topo_remove_events - sanity check on topo + * events + * @ioc: per adapter object + * @event_data: the event data payload + * + * This handles the case where driver receives multiple switch + * or device add and delete events in a single shot. When there + * is a delete event the routine will void any pending add + * events waiting in the event queue. + */ +static void +_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeTopologyChangeList_t *event_data) +{ + struct fw_event_work *fw_event; + Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; + unsigned long flags; + int i, reason_code; + u16 handle, switch_handle; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PortEntry[i].PortStatus; + if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) + _scsih_tm_tr_send(ioc, handle); + } + + switch_handle = le16_to_cpu(event_data->SwitchDevHandle); + if (!switch_handle) { + _scsih_block_io_to_pcie_children_attached_directly( + ioc, event_data); + return; + } + /* TODO We are not supporting cascaded PCIe Switch removal yet*/ + if ((event_data->SwitchStatus + == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || + (event_data->SwitchStatus == + MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) + _scsih_block_io_to_pcie_children_attached_directly( + ioc, event_data); + + if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + + /* mark ignore flag for pending events */ + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = + (Mpi26EventDataPCIeTopologyChangeList_t *) + fw_event->event_data; + if (local_event_data->SwitchStatus == + MPI2_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->SwitchStatus == + MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->SwitchDevHandle) == + switch_handle) { + dewtprintk(ioc, + ioc_info(ioc, "setting ignoring flag for switch event\n")); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_set_volume_delete_flag - setting volume delete flag + * @ioc: per adapter object + * @handle: device handle + * + * This returns nothing. + */ +static void +_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device && raid_device->starget && + raid_device->starget->hostdata) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", + handle, (u64)raid_device->wwid)); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_set_volume_handle_for_tr - set handle for target reset to volume + * @handle: input handle + * @a: handle for volume a + * @b: handle for volume b + * + * IR firmware only supports two raid volumes. The purpose of this + * routine is to set the volume handle in either a or b. When the given + * input handle is non-zero, or when a and b have not been set before. + */ +static void +_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) +{ + if (!handle || handle == *a || handle == *b) + return; + if (!*a) + *a = handle; + else if (!*b) + *b = handle; +} + +/** + * _scsih_check_ir_config_unhide_events - check for UNHIDE events + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This routine will send target reset to volume, followed by target + * resets to the PDs. This is called when a PD has been removed, or + * volume has been deleted or removed. When the target reset is sent + * to volume, the PD target resets need to be queued to start upon + * completion of the volume target reset. + */ +static void +_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u16 handle, volume_handle, a, b; + struct _tr_list *delayed_tr; + + a = 0; + b = 0; + + if (ioc->is_warpdrive) + return; + + /* Volume Resets for Deleted or Removed */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || + element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_REMOVED) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_delete_flag(ioc, volume_handle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + /* Volume Resets for UNHIDE events */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + if (a) + _scsih_tm_tr_volume_send(ioc, a); + if (b) + _scsih_tm_tr_volume_send(ioc, b); + + /* PD target resets */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) + continue; + handle = le16_to_cpu(element->PhysDiskDevHandle); + volume_handle = le16_to_cpu(element->VolDevHandle); + clear_bit(handle, ioc->pd_handles); + if (!volume_handle) + _scsih_tm_tr_send(ioc, handle); + else if (volume_handle == a || volume_handle == b) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + BUG_ON(!delayed_tr); + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", + handle)); + } else + _scsih_tm_tr_send(ioc, handle); + } +} + + +/** + * _scsih_check_volume_delete_events - set delete flag for volumes + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This will handle the case when the cable connected to entire volume is + * pulled. We will take care of setting the deleted flag so normal IO will + * not be sent. + */ +static void +_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrVolume_t *event_data) +{ + u32 state; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + state = le32_to_cpu(event_data->NewValue); + if (state == MPI2_RAID_VOL_STATE_MISSING || state == + MPI2_RAID_VOL_STATE_FAILED) + _scsih_set_volume_delete_flag(ioc, + le16_to_cpu(event_data->VolDevHandle)); +} + +/** + * _scsih_temp_threshold_events - display temperature threshold exceeded events + * @ioc: per adapter object + * @event_data: the temp threshold event data + * Context: interrupt time. + */ +static void +_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataTemperature_t *event_data) +{ + u32 doorbell; + if (ioc->temp_sensors_count >= event_data->SensorNum) { + ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", + le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", + le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", + le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", + le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", + event_data->SensorNum); + ioc_err(ioc, "Current Temp In Celsius: %d\n", + event_data->CurrentTemperature); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } + } + } +} + +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) +{ + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + + if (pending) + return test_and_set_bit(0, &priv->ata_command_pending); + + clear_bit(0, &priv->ata_command_pending); + return 0; +} + +/** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object + * + * The flushing out of all pending scmd commands following host reset, + * where all IO is dropped to the floor. + */ +static void +_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) +{ + struct scsi_cmnd *scmd; + struct scsiio_tracker *st; + u16 smid; + int count = 0; + + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + count++; + _scsih_set_satl_pending(scmd, false); + st = scsi_cmd_priv(scmd); + mpt3sas_base_clear_st(ioc, st); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery || ioc->remove_host) + scmd->result = DID_NO_CONNECT << 16; + else + scmd->result = DID_RESET << 16; + scsi_done(scmd); + } + dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); +} + +/** + * _scsih_setup_eedp - setup MPI request for EEDP transfer + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_request: pointer to the SCSI_IO request message frame + * + * Supporting protection 1 and 3. + */ +static void +_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi25SCSIIORequest_t *mpi_request) +{ + u16 eedp_flags; + Mpi25SCSIIORequest_t *mpi_request_3v = + (Mpi25SCSIIORequest_t *)mpi_request; + + switch (scsi_get_prot_op(scmd)) { + case SCSI_PROT_READ_STRIP: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + break; + case SCSI_PROT_WRITE_INSERT: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + break; + default: + return; + } + + if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + + if (scmd->prot_flags & SCSI_PROT_REF_CHECK) + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; + + if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) { + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; + + mpi_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(scsi_prot_ref_tag(scmd)); + } + + mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd)); + + if (ioc->is_gen35_ioc) + eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; + mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); +} + +/** + * _scsih_eedp_error_handling - return sense code for EEDP errors + * @scmd: pointer to scsi command object + * @ioc_status: ioc status + */ +static void +_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) +{ + u8 ascq; + + switch (ioc_status) { + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + ascq = 0x01; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + ascq = 0x02; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + ascq = 0x03; + break; + default: + ascq = 0x00; + break; + } + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq); + set_host_byte(scmd, DID_ABORT); +} + +/** + * scsih_qcmd - main scsi request entry point + * @shost: SCSI host pointer + * @scmd: pointer to scsi command object + * + * The callback index is set inside `ioc->scsi_io_cb_idx`. + * + * Return: 0 on success. If there's a failure, return either: + * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or + * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full + */ +static int +scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _raid_device *raid_device; + struct request *rq = scsi_cmd_to_rq(scmd); + int class; + Mpi25SCSIIORequest_t *mpi_request; + struct _pcie_device *pcie_device = NULL; + u32 mpi_control; + u16 smid; + u16 handle; + + if (ioc->logging_level & MPT_DEBUG_SCSI) + scsi_print_command(scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + sas_target_priv_data = sas_device_priv_data->sas_target; + + /* invalid device handle */ + handle = sas_target_priv_data->handle; + + /* + * Avoid error handling escallation when device is disconnected + */ + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) { + if (scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY) { + scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); + scsi_done(scmd); + return 0; + } + } + + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + + if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { + /* host recovery or link resets sent via IOCTLs */ + return SCSI_MLQUEUE_HOST_BUSY; + } else if (sas_target_priv_data->deleted) { + /* device has been deleted */ + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } else if (sas_target_priv_data->tm_busy || + sas_device_priv_data->block) { + /* device busy with task management */ + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) + return SCSI_MLQUEUE_DEVICE_BUSY; + } while (_scsih_set_satl_pending(scmd, true)); + + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_WRITE; + else + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; + + /* set tags */ + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + /* NCQ Prio supported, make sure control indicated high priority */ + if (sas_device_priv_data->ncq_prio_enable) { + class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); + if (class == IOPRIO_CLASS_RT) + mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; + } + /* Make sure Device is not raid volume. + * We do not expose raid functionality to upper layer for warpdrive. + */ + if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) + && !scsih_is_nvme(&scmd->device->sdev_gendev)) + && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) + mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + _scsih_set_satl_pending(scmd, false); + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + _scsih_setup_eedp(ioc, scmd, mpi_request); + + if (scmd->cmd_len == 32) + mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + mpi_request->Control = cpu_to_le32(mpi_control); + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); + mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; + int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + + if (mpi_request->DataLength) { + pcie_device = sas_target_priv_data->pcie_dev; + if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { + mpt3sas_base_free_smid(ioc, smid); + _scsih_set_satl_pending(scmd, false); + goto out; + } + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + + raid_device = sas_target_priv_data->raid_device; + if (raid_device && raid_device->direct_io_enabled) + mpt3sas_setup_direct_io(ioc, scmd, + raid_device, mpi_request); + + if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { + if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | + MPI25_SCSIIO_IOFLAGS_FAST_PATH); + ioc->put_smid_fast_path(ioc, smid, handle); + } else + ioc->put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->DevHandle)); + } else + ioc->put_smid_default(ioc, smid); + return 0; + + out: + return SCSI_MLQUEUE_HOST_BUSY; +} + +/** + * _scsih_normalize_sense - normalize descriptor and fixed format sense data + * @sense_buffer: sense data returned by target + * @data: normalized skey/asc/ascq + */ +static void +_scsih_normalize_sense(char *sense_buffer, struct sense_info *data) +{ + if ((sense_buffer[0] & 0x7F) >= 0x72) { + /* descriptor format */ + data->skey = sense_buffer[1] & 0x0F; + data->asc = sense_buffer[2]; + data->ascq = sense_buffer[3]; + } else { + /* fixed format */ + data->skey = sense_buffer[2] & 0x0F; + data->asc = sense_buffer[12]; + data->ascq = sense_buffer[13]; + } +} + +/** + * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_reply: reply mf payload returned from firmware + * @smid: ? + * + * scsi_status - SCSI Status code returned from target device + * scsi_state - state info associated with SCSI_IO determined by ioc + * ioc_status - ioc supplied status info + */ +static void +_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi2SCSIIOReply_t *mpi_reply, u16 smid) +{ + u32 response_info; + u8 *response_bytes; + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + u8 scsi_state = mpi_reply->SCSIState; + u8 scsi_status = mpi_reply->SCSIStatus; + char *desc_ioc_state = NULL; + char *desc_scsi_status = NULL; + char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + + if (log_info == 0x31170000) + return; + + switch (ioc_status) { + case MPI2_IOCSTATUS_SUCCESS: + desc_ioc_state = "success"; + break; + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc_ioc_state = "invalid function"; + break; + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + desc_ioc_state = "scsi recovered error"; + break; + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + desc_ioc_state = "scsi invalid dev handle"; + break; + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + desc_ioc_state = "scsi device not there"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + desc_ioc_state = "scsi data overrun"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + desc_ioc_state = "scsi data underrun"; + break; + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + desc_ioc_state = "scsi io data error"; + break; + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + desc_ioc_state = "scsi protocol error"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + desc_ioc_state = "scsi task terminated"; + break; + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + desc_ioc_state = "scsi residual mismatch"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + desc_ioc_state = "scsi task mgmt failed"; + break; + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + desc_ioc_state = "scsi ioc terminated"; + break; + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + desc_ioc_state = "scsi ext terminated"; + break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc_ioc_state = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc_ioc_state = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc_ioc_state = "eedp app tag error"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + desc_ioc_state = "insufficient power"; + break; + default: + desc_ioc_state = "unknown"; + break; + } + + switch (scsi_status) { + case MPI2_SCSI_STATUS_GOOD: + desc_scsi_status = "good"; + break; + case MPI2_SCSI_STATUS_CHECK_CONDITION: + desc_scsi_status = "check condition"; + break; + case MPI2_SCSI_STATUS_CONDITION_MET: + desc_scsi_status = "condition met"; + break; + case MPI2_SCSI_STATUS_BUSY: + desc_scsi_status = "busy"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE: + desc_scsi_status = "intermediate"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: + desc_scsi_status = "intermediate condmet"; + break; + case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: + desc_scsi_status = "reservation conflict"; + break; + case MPI2_SCSI_STATUS_COMMAND_TERMINATED: + desc_scsi_status = "command terminated"; + break; + case MPI2_SCSI_STATUS_TASK_SET_FULL: + desc_scsi_status = "task set full"; + break; + case MPI2_SCSI_STATUS_ACA_ACTIVE: + desc_scsi_status = "aca active"; + break; + case MPI2_SCSI_STATUS_TASK_ABORTED: + desc_scsi_status = "task aborted"; + break; + default: + desc_scsi_status = "unknown"; + break; + } + + desc_scsi_state[0] = '\0'; + if (!scsi_state) + desc_scsi_state = " "; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + strcat(desc_scsi_state, "response info "); + if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + strcat(desc_scsi_state, "state terminated "); + if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) + strcat(desc_scsi_state, "no status "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) + strcat(desc_scsi_state, "autosense failed "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) + strcat(desc_scsi_state, "autosense valid "); + + scsi_print_command(scmd); + + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", + device_str, (u64)priv_target->sas_address); + } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); + if (pcie_device) { + ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", + (u64)pcie_device->wwid, pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0]) + ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + pcie_device_put(pcie_device); + } + } else { + sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", + (u64)sas_device->sas_address, sas_device->phy); + + _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL); + + sas_device_put(sas_device); + } + } + + ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", + le16_to_cpu(mpi_reply->DevHandle), + desc_ioc_state, ioc_status, smid); + ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", + scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); + ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", + le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", + desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + _scsih_normalize_sense(scmd->sense_buffer, &data); + ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", + data.skey, data.asc, data.ascq, + le32_to_cpu(mpi_reply->SenseCount)); + } + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { + response_info = le32_to_cpu(mpi_reply->ResponseInfo); + response_bytes = (u8 *)&response_info; + _scsih_response_code(ioc, response_bytes[0]); + } +} + +/** + * _scsih_turn_on_pfa_led - illuminate PFA LED + * @ioc: per adapter object + * @handle: device handle + * Context: process + */ +static void +_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + struct _sas_device *sas_device; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (!sas_device) + return; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + sas_device->pfa_led_on = 1; + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, + ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + goto out; + } +out: + sas_device_put(sas_device); +} + +/** + * _scsih_turn_off_pfa_led - turn off Fault LED + * @ioc: per adapter object + * @sas_device: sas device whose PFA LED has to turned off + * Context: process + */ +static void +_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, + ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +/** + * _scsih_send_event_to_turn_on_pfa_led - fire delayed event + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + */ +static void +_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_TURN_ON_PFA_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * _scsih_smart_predicted_fault - process smart errors + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + */ +static void +_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + Mpi2EventNotificationReply_t *event_reply; + Mpi2EventDataSasDeviceStatusChange_t *event_data; + struct _sas_device *sas_device; + ssize_t sz; + unsigned long flags; + + /* only handle non-raid devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (!sas_device) + goto out_unlock; + + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || + ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) + goto out_unlock; + + _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + _scsih_send_event_to_turn_on_pfa_led(ioc, handle); + + /* insert into event log */ + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(Mpi2EventDataSasDeviceStatusChange_t); + event_reply = kzalloc(sz, GFP_ATOMIC); + if (!event_reply) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + event_reply->Event = + cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + event_reply->MsgLength = sz/4; + event_reply->EventDataLength = + cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); + event_data = (Mpi2EventDataSasDeviceStatusChange_t *) + event_reply->EventData; + event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; + event_data->ASC = 0x5D; + event_data->DevHandle = cpu_to_le16(handle); + event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); + mpt3sas_ctl_add_to_event_log(ioc, event_reply); + kfree(event_reply); +out: + if (sas_device) + sas_device_put(sas_device); + return; + +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + goto out; +} + +/** + * _scsih_io_done - scsi request callback + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when using _scsih_qcmd. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + Mpi25SCSIIORequest_t *mpi_request; + Mpi2SCSIIOReply_t *mpi_reply; + struct scsi_cmnd *scmd; + struct scsiio_tracker *st; + u16 ioc_status; + u32 xfer_cnt; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 response_code = 0; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (scmd == NULL) + return 1; + + _scsih_set_satl_pending(scmd, false); + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { + scmd->result = DID_OK << 16; + goto out; + } + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + sas_device_priv_data->sas_target->deleted) { + scmd->result = DID_NO_CONNECT << 16; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + + /* + * WARPDRIVE: If direct_io is set then it is directIO, + * the failed direct I/O should be redirected to volume + */ + st = scsi_cmd_priv(scmd); + if (st->direct_io && + ((ioc_status & MPI2_IOCSTATUS_MASK) + != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { + st->direct_io = 0; + st->scmd = scmd; + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + mpi_request->DevHandle = + cpu_to_le16(sas_device_priv_data->sas_target->handle); + ioc->put_smid_scsi_io(ioc, smid, + sas_device_priv_data->sas_target->handle); + return 0; + } + /* turning off TLR */ + scsi_state = mpi_reply->SCSIState; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = + le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; + if (!sas_device_priv_data->tlr_snoop_check) { + sas_device_priv_data->tlr_snoop_check++; + if ((!ioc->is_warpdrive && + !scsih_is_raid(&scmd->device->sdev_gendev) && + !scsih_is_nvme(&scmd->device->sdev_gendev)) + && sas_is_tlr_enabled(scmd->device) && + response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { + sas_disable_tlr(scmd->device); + sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); + } + } + + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + scsi_status = mpi_reply->SCSIStatus; + + if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && + (scsi_status == MPI2_SCSI_STATUS_BUSY || + scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || + scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { + ioc_status = MPI2_IOCSTATUS_SUCCESS; + } + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(mpi_reply->SenseCount)); + memcpy(scmd->sense_buffer, sense_data, sz); + _scsih_normalize_sense(scmd->sense_buffer, &data); + /* failure prediction threshold exceeded */ + if (data.asc == 0x5D) + _scsih_smart_predicted_fault(ioc, + le16_to_cpu(mpi_reply->DevHandle)); + mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); + + if ((ioc->logging_level & MPT_DEBUG_REPLY) && + ((scmd->sense_buffer[2] == UNIT_ATTENTION) || + (scmd->sense_buffer[2] == MEDIUM_ERROR) || + (scmd->sense_buffer[2] == HARDWARE_ERROR))) + _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); + } + switch (ioc_status) { + case MPI2_IOCSTATUS_BUSY: + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + if (sas_device_priv_data->block) { + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; + scsi_device_set_state(scmd->device, + SDEV_OFFLINE); + } else { + scmd->result = DID_SOFT_ERROR << 16; + scmd->device->expecting_cc_ua = 1; + } + break; + } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { + scmd->result = DID_RESET << 16; + break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (MPI2_SCSI_STATE_TERMINATED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; + } + scmd->result = DID_SOFT_ERROR << 16; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + + if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) + break; + + if (xfer_cnt < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { + mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; + mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, + 0x20, 0); + } + break; + + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + fallthrough; + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if (response_code == + MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + _scsih_eedp_error_handling(scmd, ioc_status); + break; + + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_INVALID_FUNCTION: + case MPI2_IOCSTATUS_INVALID_SGL: + case MPI2_IOCSTATUS_INTERNAL_ERROR: + case MPI2_IOCSTATUS_INVALID_FIELD: + case MPI2_IOCSTATUS_INVALID_STATE: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + + } + + if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) + _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); + + out: + + scsi_dma_unmap(scmd); + mpt3sas_base_free_smid(ioc, smid); + scsi_done(scmd); + return 0; +} + +/** + * _scsih_update_vphys_after_reset - update the Port's + * vphys_list after reset + * @ioc: per adapter object + * + * Returns nothing. + */ +static void +_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u16 sz, ioc_status; + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_id; + Mpi2SasPhyPage0_t phy_pg0; + struct hba_port *port, *port_next, *mport; + struct virtual_phy *vphy, *vphy_next; + struct _sas_device *sas_device; + + /* + * Mark all the vphys objects as dirty. + */ + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; + } + } + + /* + * Read SASIOUnitPage0 to get each HBA Phy's data. + */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + /* + * Loop over each HBA Phy. + */ + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + /* + * Check whether Phy's Negotiation Link Rate is > 1.5G or not. + */ + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + MPI2_SAS_NEG_LINK_RATE_1_5) + continue; + /* + * Check whether Phy is connected to SEP device or not, + * if it is SEP device then read the Phy's SASPHYPage0 data to + * determine whether Phy is a virtual Phy or not. if it is + * virtual phy then it is conformed that the attached remote + * device is a HBA's vSES device. + */ + if (!(le32_to_cpu( + sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP)) + continue; + + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + + if (!(le32_to_cpu(phy_pg0.PhyInfo) & + MPI2_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + /* + * Get the vSES device's SAS Address. + */ + attached_handle = le16_to_cpu( + sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (_scsih_get_sas_address(ioc, attached_handle, + &attached_sas_addr) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + + found = 0; + port = port_next = NULL; + /* + * Loop over each virtual_phy object from + * each port's vphys_list. + */ + list_for_each_entry_safe(port, + port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + /* + * Continue with next virtual_phy object + * if the object is not marked as dirty. + */ + if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) + continue; + + /* + * Continue with next virtual_phy object + * if the object's SAS Address is not equals + * to current Phy's vSES device SAS Address. + */ + if (vphy->sas_address != attached_sas_addr) + continue; + /* + * Enable current Phy number bit in object's + * phy_mask field. + */ + if (!(vphy->phy_mask & (1 << i))) + vphy->phy_mask = (1 << i); + /* + * Get hba_port object from hba_port table + * corresponding to current phy's Port ID. + * if there is no hba_port object corresponding + * to Phy's Port ID then create a new hba_port + * object & add to hba_port table. + */ + port_id = sas_iounit_pg0->PhyData[i].Port; + mport = mpt3sas_get_port_by_id(ioc, port_id, 1); + if (!mport) { + mport = kzalloc( + sizeof(struct hba_port), GFP_KERNEL); + if (!mport) + break; + mport->port_id = port_id; + ioc_info(ioc, + "%s: hba_port entry: %p, port: %d is added to hba_port list\n", + __func__, mport, mport->port_id); + list_add_tail(&mport->list, + &ioc->port_table_list); + } + /* + * If mport & port pointers are not pointing to + * same hba_port object then it means that vSES + * device's Port ID got changed after reset and + * hence move current virtual_phy object from + * port's vphys_list to mport's vphys_list. + */ + if (port != mport) { + if (!mport->vphys_mask) + INIT_LIST_HEAD( + &mport->vphys_list); + mport->vphys_mask |= (1 << i); + port->vphys_mask &= ~(1 << i); + list_move(&vphy->list, + &mport->vphys_list); + sas_device = mpt3sas_get_sdev_by_addr( + ioc, attached_sas_addr, port); + if (sas_device) + sas_device->port = mport; + } + /* + * Earlier while updating the hba_port table, + * it is determined that there is no other + * direct attached device with mport's Port ID, + * Hence mport was marked as dirty. Only vSES + * device has this Port ID, so unmark the mport + * as dirt. + */ + if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { + mport->sas_address = 0; + mport->phy_mask = 0; + mport->flags &= + ~HBA_PORT_FLAG_DIRTY_PORT; + } + /* + * Unmark current virtual_phy object as dirty. + */ + vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; + found = 1; + break; + } + if (found) + break; + } + } +out: + kfree(sas_iounit_pg0); +} + +/** + * _scsih_get_port_table_after_reset - Construct temporary port table + * @ioc: per adapter object + * @port_table: address where port table needs to be constructed + * + * return number of HBA port entries available after reset. + */ +static int +_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_table) +{ + u16 sz, ioc_status; + int i, j; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_count = 0, port_id; + + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return port_count; + } + + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + found = 0; + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + MPI2_SAS_NEG_LINK_RATE_1_5) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (_scsih_get_sas_address( + ioc, attached_handle, &attached_sas_addr) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + + for (j = 0; j < port_count; j++) { + port_id = sas_iounit_pg0->PhyData[i].Port; + if (port_table[j].port_id == port_id && + port_table[j].sas_address == attached_sas_addr) { + port_table[j].phy_mask |= (1 << i); + found = 1; + break; + } + } + + if (found) + continue; + + port_id = sas_iounit_pg0->PhyData[i].Port; + port_table[port_count].port_id = port_id; + port_table[port_count].phy_mask = (1 << i); + port_table[port_count].sas_address = attached_sas_addr; + port_count++; + } +out: + kfree(sas_iounit_pg0); + return port_count; +} + +enum hba_port_matched_codes { + NOT_MATCHED = 0, + MATCHED_WITH_ADDR_AND_PHYMASK, + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, + MATCHED_WITH_ADDR_AND_SUBPHYMASK, + MATCHED_WITH_ADDR, +}; + +/** + * _scsih_look_and_get_matched_port_entry - Get matched hba port entry + * from HBA port table + * @ioc: per adapter object + * @port_entry: hba port entry from temporary port table which needs to be + * searched for matched entry in the HBA port table + * @matched_port_entry: save matched hba port entry here + * @count: count of matched entries + * + * return type of matched entry found. + */ +static enum hba_port_matched_codes +_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_entry, + struct hba_port **matched_port_entry, int *count) +{ + struct hba_port *port_table_entry, *matched_port = NULL; + enum hba_port_matched_codes matched_code = NOT_MATCHED; + int lcount = 0; + *matched_port_entry = NULL; + + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) + continue; + + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask == port_entry->phy_mask)) { + matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; + matched_port = port_table_entry; + break; + } + + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask) + && (port_table_entry->port_id == port_entry->port_id)) { + matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; + matched_port = port_table_entry; + continue; + } + + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask)) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; + matched_port = port_table_entry; + continue; + } + + if (port_table_entry->sas_address == port_entry->sas_address) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) + continue; + matched_code = MATCHED_WITH_ADDR; + matched_port = port_table_entry; + lcount++; + } + } + + *matched_port_entry = matched_port; + if (matched_code == MATCHED_WITH_ADDR) + *count = lcount; + return matched_code; +} + +/** + * _scsih_del_phy_part_of_anther_port - remove phy if it + * is a part of anther port + *@ioc: per adapter object + *@port_table: port table after reset + *@index: hba port entry index + *@port_count: number of ports available after host reset + *@offset: HBA phy bit offset + * + */ +static void +_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_table, + int index, u8 port_count, int offset) +{ + struct _sas_node *sas_node = &ioc->sas_hba; + u32 i, found = 0; + + for (i = 0; i < port_count; i++) { + if (i == index) + continue; + + if (port_table[i].phy_mask & (1 << offset)) { + mpt3sas_transport_del_phy_from_an_existing_port( + ioc, sas_node, &sas_node->phy[offset]); + found = 1; + break; + } + } + if (!found) + port_table[index].phy_mask |= (1 << offset); +} + +/** + * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from + * right port + *@ioc: per adapter object + *@hba_port_entry: hba port table entry + *@port_table: temporary port table + *@index: hba port entry index + *@port_count: number of ports available after host reset + * + */ +static void +_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *hba_port_entry, struct hba_port *port_table, + int index, int port_count) +{ + u32 phy_mask, offset = 0; + struct _sas_node *sas_node = &ioc->sas_hba; + + phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; + + for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { + if (phy_mask & (1 << offset)) { + if (!(port_table[index].phy_mask & (1 << offset))) { + _scsih_del_phy_part_of_anther_port( + ioc, port_table, index, port_count, + offset); + continue; + } + if (sas_node->phy[offset].phy_belongs_to_port) + mpt3sas_transport_del_phy_from_an_existing_port( + ioc, sas_node, &sas_node->phy[offset]); + mpt3sas_transport_add_phy_to_an_existing_port( + ioc, sas_node, &sas_node->phy[offset], + hba_port_entry->sas_address, + hba_port_entry); + } + } +} + +/** + * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. + * @ioc: per adapter object + * + * Returns nothing. + */ +static void +_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) +{ + struct hba_port *port, *port_next; + struct virtual_phy *vphy, *vphy_next; + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { + drsprintk(ioc, ioc_info(ioc, + "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", + vphy, port->port_id, + vphy->phy_mask)); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + if (!port->vphys_mask && !port->sas_address) + port->flags |= HBA_PORT_FLAG_DIRTY_PORT; + } +} + +/** + * _scsih_del_dirty_port_entries - delete dirty port entries from port list + * after host reset + *@ioc: per adapter object + * + */ +static void +_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) +{ + struct hba_port *port, *port_next; + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || + port->flags & HBA_PORT_FLAG_NEW_PORT) + continue; + + drsprintk(ioc, ioc_info(ioc, + "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", + port, port->port_id, port->phy_mask)); + list_del(&port->list); + kfree(port); + } +} + +/** + * _scsih_sas_port_refresh - Update HBA port table after host reset + * @ioc: per adapter object + */ +static void +_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) +{ + u32 port_count = 0; + struct hba_port *port_table; + struct hba_port *port_table_entry; + struct hba_port *port_entry = NULL; + int i, j, count = 0, lcount = 0; + int ret; + u64 sas_addr; + u8 num_phys; + + drsprintk(ioc, ioc_info(ioc, + "updating ports for sas_host(0x%016llx)\n", + (unsigned long long)ioc->sas_hba.sas_address)); + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + + port_table = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct hba_port), GFP_KERNEL); + if (!port_table) + return; + + port_count = _scsih_get_port_table_after_reset(ioc, port_table); + if (!port_count) + return; + + drsprintk(ioc, ioc_info(ioc, "New Port table\n")); + for (j = 0; j < port_count; j++) + drsprintk(ioc, ioc_info(ioc, + "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + port_table[j].port_id, + port_table[j].phy_mask, port_table[j].sas_address)); + + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) + port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; + + drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); + port_table_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + drsprintk(ioc, ioc_info(ioc, + "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + port_table_entry->port_id, + port_table_entry->phy_mask, + port_table_entry->sas_address)); + } + + for (j = 0; j < port_count; j++) { + ret = _scsih_look_and_get_matched_port_entry(ioc, + &port_table[j], &port_entry, &count); + if (!port_entry) { + drsprintk(ioc, ioc_info(ioc, + "No Matched entry for sas_addr(0x%16llx), Port:%d\n", + port_table[j].sas_address, + port_table[j].port_id)); + continue; + } + + switch (ret) { + case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: + case MATCHED_WITH_ADDR_AND_SUBPHYMASK: + _scsih_add_or_del_phys_from_existing_port(ioc, + port_entry, port_table, j, port_count); + break; + case MATCHED_WITH_ADDR: + sas_addr = port_table[j].sas_address; + for (i = 0; i < port_count; i++) { + if (port_table[i].sas_address == sas_addr) + lcount++; + } + + if (count > 1 || lcount > 1) + port_entry = NULL; + else + _scsih_add_or_del_phys_from_existing_port(ioc, + port_entry, port_table, j, port_count); + } + + if (!port_entry) + continue; + + if (port_entry->port_id != port_table[j].port_id) + port_entry->port_id = port_table[j].port_id; + port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; + port_entry->phy_mask = port_table[j].phy_mask; + } + + port_table_entry = NULL; +} + +/** + * _scsih_alloc_vphy - allocate virtual_phy object + * @ioc: per adapter object + * @port_id: Port ID number + * @phy_num: HBA Phy number + * + * Returns allocated virtual_phy object. + */ +static struct virtual_phy * +_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) +{ + struct virtual_phy *vphy; + struct hba_port *port; + + port = mpt3sas_get_port_by_id(ioc, port_id, 0); + if (!port) + return NULL; + + vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); + if (!vphy) { + vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); + if (!vphy) + return NULL; + + if (!port->vphys_mask) + INIT_LIST_HEAD(&port->vphys_list); + + /* + * Enable bit corresponding to HBA phy number on its + * parent hba_port object's vphys_mask field. + */ + port->vphys_mask |= (1 << phy_num); + vphy->phy_mask |= (1 << phy_num); + + list_add_tail(&vphy->list, &port->vphys_list); + + ioc_info(ioc, + "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", + vphy, port->port_id, phy_num); + } + return vphy; +} + +/** + * _scsih_sas_host_refresh - refreshing sas host object contents + * @ioc: per adapter object + * Context: user + * + * During port enable, fw will send topology events for every device. Its + * possible that the handles may change from the previous setting, so this + * code keeping handles updating if changed. + */ +static void +_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) +{ + u16 sz; + u16 ioc_status; + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u8 link_rate, port_id; + struct hba_port *port; + Mpi2SasPhyPage0_t phy_pg0; + + dtmprintk(ioc, + ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", + (u64)ioc->sas_hba.sas_address)); + + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu( + sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + ioc_info(ioc, + "hba_port entry: %p, port: %d is added to hba_port list\n", + port, port->port_id); + if (ioc->shost_recovery) + port->flags = HBA_PORT_FLAG_NEW_PORT; + list_add_tail(&port->list, &ioc->port_table_list); + } + /* + * Check whether current Phy belongs to HBA vSES device or not. + */ + if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP && + (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if (!(le32_to_cpu(phy_pg0.PhyInfo) & + MPI2_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + /* + * Allocate a virtual_phy object for vSES device, if + * this vSES device is hot added. + */ + if (!_scsih_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + + /* + * Add new HBA phys to STL if these new phys got added as part + * of HBA Firmware upgrade/downgrade operation. + */ + if (!ioc->sas_hba.phy[i].phy) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. + AttachedDevHandle); + if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; + ioc->sas_hba.phy[i].port = + mpt3sas_get_port_by_id(ioc, port_id, 0); + mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, link_rate, + ioc->sas_hba.phy[i].port); + } + /* + * Clear the phy details if this phy got disabled as part of + * HBA Firmware upgrade/downgrade operation. + */ + for (i = ioc->sas_hba.num_phys; + i < ioc->sas_hba.nr_phys_allocated; i++) { + if (ioc->sas_hba.phy[i].phy && + ioc->sas_hba.phy[i].phy->negotiated_linkrate >= + SAS_LINK_RATE_1_5_GBPS) + mpt3sas_transport_update_links(ioc, + ioc->sas_hba.sas_address, 0, i, + MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); + } + out: + kfree(sas_iounit_pg0); +} + +/** + * _scsih_sas_host_add - create sas host object + * @ioc: per adapter object + * + * Creating host side data object, stored in ioc->sas_hba + */ +static void +_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2SasEnclosurePage0_t enclosure_pg0; + u16 ioc_status; + u16 sz; + u8 device_missing_delay; + u8 num_phys, port_id; + struct hba_port *port; + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc->sas_hba.nr_phys_allocated = max_t(u8, + MPT_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!ioc->sas_hba.phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.num_phys = num_phys; + + /* sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + ioc->io_missing_delay = + sas_iounit_pg1->IODeviceMissingDelay; + device_missing_delay = + sas_iounit_pg1->ReportDeviceMissingDelay; + if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + ioc->device_missing_delay = (device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + ioc->device_missing_delay = device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + + ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + ioc_info(ioc, + "hba_port entry: %p, port: %d is added to hba_port list\n", + port, port->port_id); + list_add_tail(&port->list, + &ioc->port_table_list); + } + + /* + * Check whether current Phy belongs to HBA vSES device or not. + */ + if ((le32_to_cpu(phy_pg0.PhyInfo) & + MPI2_SAS_PHYINFO_VIRTUAL_PHY) && + (phy_pg0.NegotiatedLinkRate >> 4) >= + MPI2_SAS_NEG_LINK_RATE_1_5) { + /* + * Allocate a virtual_phy object for vSES device. + */ + if (!_scsih_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + ioc->sas_hba.phy[i].phy_id = i; + ioc->sas_hba.phy[i].port = + mpt3sas_get_port_by_id(ioc, port_id, 0); + mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], + phy_pg0, ioc->sas_hba.parent_dev); + } + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->sas_hba.handle, + (u64)ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys); + + if (ioc->sas_hba.enclosure_handle) { + if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) + ioc->sas_hba.enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); +} + +/** + * _scsih_expander_add - creating expander object + * @ioc: per adapter object + * @handle: expander handle + * + * Creating expander object, stored in ioc->sas_expander_list. + * + * Return: 0 for success, else error. + */ +static int +_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander; + struct _enclosure_node *enclosure_dev; + Mpi2ConfigReply_t mpi_reply; + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ExpanderPage1_t expander_pg1; + u32 ioc_status; + u16 parent_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port = NULL; + u8 port_id; + + int rc = 0; + + if (!handle) + return -1; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return -1; + + if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + /* handle out of order topology events */ + parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); + if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) + != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + port_id = expander_pg0.PhysicalPort; + if (sas_address_parent != ioc->sas_hba.sas_address) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address_parent, + mpt3sas_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_expander) { + rc = _scsih_expander_add(ioc, parent_handle); + if (rc != 0) + return rc; + } + } + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (sas_expander) + return 0; + + sas_expander = kzalloc(sizeof(struct _sas_node), + GFP_KERNEL); + if (!sas_expander) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.NumPhys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); + if (!sas_expander->port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + + ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + handle, parent_handle, + (u64)sas_expander->sas_address, sas_expander->num_phys); + + if (!sas_expander->num_phys) { + rc = -1; + goto out_fail; + } + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + + INIT_LIST_HEAD(&sas_expander->sas_port_list); + mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, + sas_address_parent, sas_expander->port); + if (!mpt3sas_port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &mpt3sas_port->rphy->dev; + sas_expander->rphy = mpt3sas_port->rphy; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + sas_expander->phy[i].port = + mpt3sas_get_port_by_id(ioc, port_id, 0); + + if ((mpt3sas_transport_add_expander_phy(ioc, + &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + + if (sas_expander->enclosure_handle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + sas_expander->enclosure_handle); + if (enclosure_dev) + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + } + + _scsih_expander_node_add(ioc, sas_expander); + return 0; + + out_fail: + + if (mpt3sas_port) + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_address_parent, sas_expander->port); + kfree(sas_expander); + return rc; +} + +/** + * mpt3sas_expander_remove - removing expander object + * @ioc: per adapter object + * @sas_address: expander sas_address + * @port: hba port entry + */ +void +mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port) +{ + struct _sas_node *sas_expander; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + if (!port) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address, port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + _scsih_expander_node_remove(ioc, sas_expander); +} + +/** + * _scsih_done - internal SCSI_IO callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated SCSI_IO. + * The callback index passed is `ioc->scsih_cb_idx` + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + + + + +#define MPT3_MAX_LUNS (255) + + +/** + * _scsih_check_access_status - check access flags + * @ioc: per adapter object + * @sas_address: sas address + * @handle: sas device handle + * @access_status: errors returned during discovery of the device + * + * Return: 0 for success, else failure + */ +static u8 +_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: + case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: + rc = 0; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: + desc = "sata capability failed"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: + desc = "sata affiliation conflict"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: + desc = "route not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: + desc = "smp error not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: + desc = "device blocked"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: + desc = "sata initialization failed"; + break; + default: + desc = "unknown"; + break; + } + + if (!rc) + return 0; + + ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", + desc, (u64)sas_address, handle); + return rc; +} + +/** + * _scsih_check_device - checking device responsiveness + * @ioc: per adapter object + * @parent_sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_number: phy number + * @link_rate: new link rate + */ +static void +_scsih_check_device(struct MPT3SAS_ADAPTER *ioc, + u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + struct _sas_device *sas_device = NULL; + struct _enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + unsigned long flags; + u64 sas_address; + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + u32 device_info; + struct hba_port *port; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return; + + /* wide port handling ~ we need only handle device once for the phy that + * is matched in sas device page zero + */ + if (phy_number != sas_device_pg0.PhyNum) + return; + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); + if (!port) + goto out_unlock; + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address, port); + + if (!sas_device) + goto out_unlock; + + if (unlikely(sas_device->handle != handle)) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + sas_device->handle, handle); + sas_target_priv_data->handle = handle; + sas_device->handle = handle; + if (le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, + sas_device->enclosure_handle); + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + } + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", + handle); + goto out_unlock; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + goto out_unlock; + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + _scsih_ublock_io_device(ioc, sas_address, port); + + if (sas_device) + sas_device_put(sas_device); + return; + +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) + sas_device_put(sas_device); +} + +/** + * _scsih_add_device - creating sas device object + * @ioc: per adapter object + * @handle: sas device handle + * @phy_num: phy number end device attached to + * @is_pd: is this hidden raid component + * + * Creating end device object, stored in ioc->sas_device_list. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, + u8 is_pd) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + struct _sas_device *sas_device; + struct _enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + u64 sas_address; + u32 device_info; + u8 port_id; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return -1; + set_bit(handle, ioc->pend_os_device_add); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", + handle); + return -1; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + return -1; + + port_id = sas_device_pg0.PhysicalPort; + sas_device = mpt3sas_get_sdev_by_addr(ioc, + sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); + if (sas_device) { + clear_bit(handle, ioc->pend_os_device_add); + sas_device_put(sas_device); + return -1; + } + + if (sas_device_pg0.EnclosureHandle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + le16_to_cpu(sas_device_pg0.EnclosureHandle)); + if (enclosure_dev == NULL) + ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", + sas_device_pg0.EnclosureHandle); + } + + sas_device = kzalloc(sizeof(struct _sas_device), + GFP_KERNEL); + if (!sas_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + + kref_init(&sas_device->refcount); + sas_device->handle = handle; + if (_scsih_get_sas_address(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + if (sas_device->enclosure_handle != 0) + sas_device->slot = + le16_to_cpu(sas_device_pg0.Slot); + sas_device->device_info = device_info; + sas_device->sas_address = sas_address; + sas_device->phy = sas_device_pg0.PhyNum; + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); + if (!sas_device->port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + if (le16_to_cpu(sas_device_pg0.Flags) + & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + /* get enclosure_logical_id & chassis_slot*/ + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + + /* get device name */ + sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + sas_device->port_type = sas_device_pg0.MaxPortConnections; + ioc_info(ioc, + "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", + handle, sas_device->sas_address, sas_device->port_type); + + if (ioc->wait_for_discovery_to_complete) + _scsih_sas_device_init_add(ioc, sas_device); + else + _scsih_sas_device_add(ioc, sas_device); + +out: + sas_device_put(sas_device); + return 0; +} + +/** + * _scsih_remove_device - removing sas device object + * @ioc: per adapter object + * @sas_device: the sas_device object + */ +static void +_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + + if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && + (sas_device->pfa_led_on)) { + _scsih_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } + + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, + sas_device->handle, (u64)sas_device->sas_address)); + + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + _scsih_ublock_io_device(ioc, sas_device->sas_address, + sas_device->port); + sas_target_priv_data->handle = + MPT3SAS_INVALID_DEVICE_HANDLE; + } + + if (!ioc->hide_drives) + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + + ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", + sas_device->handle, (u64)sas_device->sas_address); + + _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); + + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, + sas_device->handle, (u64)sas_device->sas_address)); + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); +} + +/** + * _scsih_sas_topology_change_event_debug - debug for topology event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->ExpStatus) { + case MPI2_EVENT_SAS_TOPO_ES_ADDED: + status_str = "add"; + break; + case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: + status_str = "responding"; + break; + case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + ioc_info(ioc, "sas topology change: (%s)\n", status_str); + pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ + "start_phy(%02d), count(%d)\n", + le16_to_cpu(event_data->ExpanderDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPhyNum, event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + phy_number = event_data->StartPhyNum + i; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + status_str = "target add"; + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + status_str = "link rate change"; + break; + case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ + " link rate: new(0x%02x), old(0x%02x)\n", phy_number, + handle, status_str, link_rate, prev_link_rate); + + } +} + +/** + * _scsih_sas_topology_change_event - handle topology changes + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + */ +static int +_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + int i; + u16 parent_handle, handle; + u16 reason_code; + u8 phy_number, max_phys; + struct _sas_node *sas_expander; + u64 sas_address; + unsigned long flags; + u8 link_rate, prev_link_rate; + struct hba_port *port; + Mpi2EventDataSasTopologyChangeList_t *event_data = + (Mpi2EventDataSasTopologyChangeList_t *) + fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_topology_change_event_debug(ioc, event_data); + + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; + + if (!ioc->sas_hba.num_phys) + _scsih_sas_host_add(ioc); + else + _scsih_sas_host_refresh(ioc); + + if (fw_event->ignore) { + dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); + return 0; + } + + parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); + + /* handle expander add */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) + if (_scsih_expander_add(ioc, parent_handle) != 0) + return 0; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + parent_handle); + if (sas_expander) { + sas_address = sas_expander->sas_address; + max_phys = sas_expander->num_phys; + port = sas_expander->port; + } else if (parent_handle < ioc->sas_hba.num_phys) { + sas_address = ioc->sas_hba.sas_address; + max_phys = ioc->sas_hba.num_phys; + } else { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle siblings events */ + for (i = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, + ioc_info(ioc, "ignoring expander event\n")); + return 0; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return 0; + phy_number = event_data->StartPhyNum + i; + if (phy_number >= max_phys) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if ((event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != + MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) + continue; + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + + if (ioc->shost_recovery) + break; + + if (link_rate == prev_link_rate) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate, port); + + if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + break; + + _scsih_check_device(ioc, sas_address, handle, + phy_number, link_rate); + + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + + fallthrough; + + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + + if (ioc->shost_recovery) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate, port); + + _scsih_add_device(ioc, handle, phy_number, 0); + + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + + _scsih_device_remove_by_handle(ioc, handle); + break; + } + } + + /* handle expander removal */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && + sas_expander) + mpt3sas_expander_remove(ioc, sas_address, port); + + return 0; +} + +/** + * _scsih_sas_device_status_change_event_debug - debug for device event + * @ioc: ? + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: + reason_str = "sata init failure"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; + default: + reason_str = "unknown reason"; + break; + } + ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + reason_str, le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) + pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", + event_data->ASC, event_data->ASCQ); + pr_cont("\n"); +} + +/** + * _scsih_sas_device_status_change_event - handle device status change + * @ioc: per adapter object + * @event_data: The fw event + * Context: user. + */ +static void +_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasDeviceStatusChange_t *event_data) +{ + struct MPT3SAS_TARGET *target_priv_data; + struct _sas_device *sas_device; + u64 sas_address; + unsigned long flags; + + /* In MPI Revision K (0xC), the internal device reset complete was + * implemented, so avoid setting tm_busy flag for older firmware. + */ + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + + if (event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address, + mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); + + if (!sas_device || !sas_device->starget) + goto out; + + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) + goto out; + + if (event_data->ReasonCode == + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + ioc_info(ioc, + "%s tm_busy flag for handle(0x%04x)\n", + (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", + target_priv_data->handle); + +out: + if (sas_device) + sas_device_put(sas_device); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + + +/** + * _scsih_check_pcie_access_status - check access flags + * @ioc: per adapter object + * @wwid: wwid + * @handle: sas device handle + * @access_status: errors returned during discovery of the device + * + * Return: 0 for success, else failure + */ +static u8 +_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: + case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: + rc = 0; + break; + case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: + desc = "PCIe device capability failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: + desc = "PCIe device blocked"; + ioc_info(ioc, + "Device with Access Status (%s): wwid(0x%016llx), " + "handle(0x%04x)\n ll only be added to the internal list", + desc, (u64)wwid, handle); + rc = 0; + break; + case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: + desc = "PCIe device mem space access failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: + desc = "PCIe device unsupported"; + break; + case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: + desc = "PCIe device MSIx Required"; + break; + case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: + desc = "PCIe device init fail max"; + break; + case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: + desc = "PCIe device status unknown"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: + desc = "nvme ready timeout"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: + desc = "nvme device configuration unsupported"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: + desc = "nvme identify failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: + desc = "nvme qconfig failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: + desc = "nvme qcreation failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: + desc = "nvme eventcfg failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: + desc = "nvme get feature stat failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: + desc = "nvme idle timeout"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: + desc = "nvme failure status"; + break; + default: + ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", + access_status, (u64)wwid, handle); + return rc; + } + + if (!rc) + return rc; + + ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", + desc, (u64)wwid, handle); + return rc; +} + +/** + * _scsih_pcie_device_remove_from_sml - removing pcie device + * from SML and free up associated memory + * @ioc: per adapter object + * @pcie_device: the pcie_device object + */ +static void +_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", + __func__, + pcie_device->enclosure_level, + pcie_device->connector_name)); + + if (pcie_device->starget && pcie_device->starget->hostdata) { + sas_target_priv_data = pcie_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + } + + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + pcie_device->handle, (u64)pcie_device->wwid); + if (pcie_device->enclosure_handle != 0) + ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + + if (pcie_device->starget && (pcie_device->access_status != + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) + scsi_remove_target(&pcie_device->starget->dev); + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", + __func__, + pcie_device->enclosure_level, + pcie_device->connector_name)); + + kfree(pcie_device->serial_number); +} + + +/** + * _scsih_pcie_check_device - checking device responsiveness + * @ioc: per adapter object + * @handle: attached device handle + */ +static void +_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi26PCIeDevicePage0_t pcie_device_pg0; + u32 ioc_status; + struct _pcie_device *pcie_device; + u64 wwid; + unsigned long flags; + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + u32 device_info; + + if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return; + + /* check if this is end device */ + device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); + if (!(_scsih_is_nvme_pciescsi_device(device_info))) + return; + + wwid = le64_to_cpu(pcie_device_pg0.WWID); + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); + + if (!pcie_device) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + return; + } + + if (unlikely(pcie_device->handle != handle)) { + starget = pcie_device->starget; + sas_target_priv_data = starget->hostdata; + pcie_device->access_status = pcie_device_pg0.AccessStatus; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + pcie_device->handle, handle); + sas_target_priv_data->handle = handle; + pcie_device->handle = handle; + + if (le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { + pcie_device->enclosure_level = + pcie_device_pg0.EnclosureLevel; + memcpy(&pcie_device->connector_name[0], + &pcie_device_pg0.ConnectorName[0], 4); + } else { + pcie_device->enclosure_level = 0; + pcie_device->connector_name[0] = '\0'; + } + } + + /* check if device is present */ + if (!(le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { + ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", + handle); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + pcie_device_put(pcie_device); + return; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_pcie_access_status(ioc, wwid, handle, + pcie_device_pg0.AccessStatus)) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + pcie_device_put(pcie_device); + return; + } + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + pcie_device_put(pcie_device); + + _scsih_ublock_io_device(ioc, wwid, NULL); + + return; +} + +/** + * _scsih_pcie_add_device - creating pcie device object + * @ioc: per adapter object + * @handle: pcie device handle + * + * Creating end device object, stored in ioc->pcie_device_list. + * + * Return: 1 means queue the event later, 0 means complete the event + */ +static int +_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi26PCIeDevicePage0_t pcie_device_pg0; + Mpi26PCIeDevicePage2_t pcie_device_pg2; + Mpi2ConfigReply_t mpi_reply; + struct _pcie_device *pcie_device; + struct _enclosure_node *enclosure_dev; + u32 ioc_status; + u64 wwid; + + if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + + set_bit(handle, ioc->pend_os_device_add); + wwid = le64_to_cpu(pcie_device_pg0.WWID); + + /* check if device is present */ + if (!(le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { + ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", + handle); + return 0; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_pcie_access_status(ioc, wwid, handle, + pcie_device_pg0.AccessStatus)) + return 0; + + if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu + (pcie_device_pg0.DeviceInfo)))) + return 0; + + pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); + if (pcie_device) { + clear_bit(handle, ioc->pend_os_device_add); + pcie_device_put(pcie_device); + return 0; + } + + /* PCIe Device Page 2 contains read-only information about a + * specific NVMe device; therefore, this page is only + * valid for NVMe devices and skip for pcie devices of type scsi. + */ + if (!(mpt3sas_scsih_is_pcie_scsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { + if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, + &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle)) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 0; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 0; + } + } + + pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); + if (!pcie_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + + kref_init(&pcie_device->refcount); + pcie_device->id = ioc->pcie_target_id++; + pcie_device->channel = PCIE_CHANNEL; + pcie_device->handle = handle; + pcie_device->access_status = pcie_device_pg0.AccessStatus; + pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); + pcie_device->wwid = wwid; + pcie_device->port_num = pcie_device_pg0.PortNum; + pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + + pcie_device->enclosure_handle = + le16_to_cpu(pcie_device_pg0.EnclosureHandle); + if (pcie_device->enclosure_handle != 0) + pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); + + if (le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { + pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; + memcpy(&pcie_device->connector_name[0], + &pcie_device_pg0.ConnectorName[0], 4); + } else { + pcie_device->enclosure_level = 0; + pcie_device->connector_name[0] = '\0'; + } + + /* get enclosure_logical_id */ + if (pcie_device->enclosure_handle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + pcie_device->enclosure_handle); + if (enclosure_dev) + pcie_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + } + /* TODO -- Add device name once FW supports it */ + if (!(mpt3sas_scsih_is_pcie_scsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { + pcie_device->nvme_mdts = + le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); + pcie_device->shutdown_latency = + le16_to_cpu(pcie_device_pg2.ShutdownLatency); + /* + * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency + * if drive's RTD3 Entry Latency is greater then IOC's + * max_shutdown_latency. + */ + if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) + ioc->max_shutdown_latency = + pcie_device->shutdown_latency; + if (pcie_device_pg2.ControllerResetTO) + pcie_device->reset_timeout = + pcie_device_pg2.ControllerResetTO; + else + pcie_device->reset_timeout = 30; + } else + pcie_device->reset_timeout = 30; + + if (ioc->wait_for_discovery_to_complete) + _scsih_pcie_device_init_add(ioc, pcie_device); + else + _scsih_pcie_device_add(ioc, pcie_device); + + pcie_device_put(pcie_device); + return 0; +} + +/** + * _scsih_pcie_topology_change_event_debug - debug for topology + * event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 port_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->SwitchStatus) { + case MPI26_EVENT_PCIE_TOPO_SS_ADDED: + status_str = "add"; + break; + case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: + case 0: + status_str = "responding"; + break; + case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + ioc_info(ioc, "pcie topology change: (%s)\n", status_str); + pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" + "start_port(%02d), count(%d)\n", + le16_to_cpu(event_data->SwitchDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPortNum, event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + port_number = event_data->StartPortNum + i; + reason_code = event_data->PortEntry[i].PortStatus; + switch (reason_code) { + case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: + status_str = "target add"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: + status_str = "link rate change"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PortEntry[i].CurrentPortInfo & + MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & + MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + pr_info("\tport(%02d), attached_handle(0x%04x): %s:" + " link rate: new(0x%02x), old(0x%02x)\n", port_number, + handle, status_str, link_rate, prev_link_rate); + } +} + +/** + * _scsih_pcie_topology_change_event - handle PCIe topology + * changes + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + */ +static void +_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + int i; + u16 handle; + u16 reason_code; + u8 link_rate, prev_link_rate; + unsigned long flags; + int rc; + Mpi26EventDataPCIeTopologyChangeList_t *event_data = + (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; + struct _pcie_device *pcie_device; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_pcie_topology_change_event_debug(ioc, event_data); + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) + return; + + if (fw_event->ignore) { + dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); + return; + } + + /* handle siblings events */ + for (i = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, + ioc_info(ioc, "ignoring switch event\n")); + return; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return; + reason_code = event_data->PortEntry[i].PortStatus; + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + + link_rate = event_data->PortEntry[i].CurrentPortInfo + & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + prev_link_rate = event_data->PortEntry[i].PreviousPortInfo + & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + + switch (reason_code) { + case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: + if (ioc->shost_recovery) + break; + if (link_rate == prev_link_rate) + break; + if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) + break; + + _scsih_pcie_check_device(ioc, handle); + + /* This code after this point handles the test case + * where a device has been added, however its returning + * BUSY for sometime. Then before the Device Missing + * Delay expires and the device becomes READY, the + * device is removed and added back. + */ + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + if (pcie_device) { + pcie_device_put(pcie_device); + break; + } + + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + + dewtprintk(ioc, + ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", + handle)); + event_data->PortEntry[i].PortStatus &= 0xF0; + event_data->PortEntry[i].PortStatus |= + MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; + fallthrough; + case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: + if (ioc->shost_recovery) + break; + if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) + break; + + rc = _scsih_pcie_add_device(ioc, handle); + if (!rc) { + /* mark entry vacant */ + /* TODO This needs to be reviewed and fixed, + * we dont have an entry + * to make an event void like vacant + */ + event_data->PortEntry[i].PortStatus |= + MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; + } + break; + case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + _scsih_pcie_device_remove_by_handle(ioc, handle); + break; + } + } +} + +/** + * _scsih_pcie_device_status_change_event_debug - debug for device event + * @ioc: ? + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: + reason_str = "device init failure"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: + reason_str = "pcie hot reset failed"; + break; + default: + reason_str = "unknown reason"; + break; + } + + ioc_info(ioc, "PCIE device status change: (%s)\n" + "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", + reason_str, le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->WWID), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) + pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", + event_data->ASC, event_data->ASCQ); + pr_cont("\n"); +} + +/** + * _scsih_pcie_device_status_change_event - handle device status + * change + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct MPT3SAS_TARGET *target_priv_data; + struct _pcie_device *pcie_device; + u64 wwid; + unsigned long flags; + Mpi26EventDataPCIeDeviceStatusChange_t *event_data = + (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_pcie_device_status_change_event_debug(ioc, + event_data); + + if (event_data->ReasonCode != + MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + wwid = le64_to_cpu(event_data->WWID); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); + + if (!pcie_device || !pcie_device->starget) + goto out; + + target_priv_data = pcie_device->starget->hostdata; + if (!target_priv_data) + goto out; + + if (event_data->ReasonCode == + MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; +out: + if (pcie_device) + pcie_device_put(pcie_device); + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure + * event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasEnclDevStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_ENCL_RC_ADDED: + reason_str = "enclosure add"; + break; + case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + reason_str = "enclosure remove"; + break; + default: + reason_str = "unknown reason"; + break; + } + + ioc_info(ioc, "enclosure status change: (%s)\n" + "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", + reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (u64)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); +} + +/** + * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2ConfigReply_t mpi_reply; + struct _enclosure_node *enclosure_dev = NULL; + Mpi2EventDataSasEnclDevStatusChange_t *event_data = + (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; + int rc; + u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_enclosure_dev_status_change_event_debug(ioc, + (Mpi2EventDataSasEnclDevStatusChange_t *) + fw_event->event_data); + if (ioc->shost_recovery) + return; + + if (enclosure_handle) + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + enclosure_handle); + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_ENCL_RC_ADDED: + if (!enclosure_dev) { + enclosure_dev = + kzalloc(sizeof(struct _enclosure_node), + GFP_KERNEL); + if (!enclosure_dev) { + ioc_info(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_dev->pg0, + MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + enclosure_handle); + + if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + + list_add_tail(&enclosure_dev->list, + &ioc->enclosure_list); + } + break; + case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + if (enclosure_dev) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } + break; + default: + break; + } +} + +/** + * _scsih_sas_broadcast_primitive_event - handle broadcast events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + struct scsiio_tracker *st; + u16 smid, handle; + u32 lun; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 termination_count; + u32 query_count; + Mpi2SCSITaskManagementReply_t *mpi_reply; + Mpi2EventDataSasBroadcastPrimitive_t *event_data = + (Mpi2EventDataSasBroadcastPrimitive_t *) + fw_event->event_data; + u16 ioc_status; + unsigned long flags; + int r; + u8 max_retries = 0; + u8 task_abort_retries; + + mutex_lock(&ioc->tm_cmds.mutex); + ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", + __func__, event_data->PhyNum, event_data->PortWidth); + + _scsih_block_io_all_device(ioc); + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + mpi_reply = ioc->tm_cmds.reply; + broadcast_aen_retry: + + /* sanity checks for retrying this loop */ + if (max_retries++ == 5) { + dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, + ioc_info(ioc, "%s: %d retry\n", + __func__, max_retries - 1)); + + termination_count = 0; + query_count = 0; + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + if (ioc->shost_recovery) + goto out; + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = scsi_cmd_priv(scmd); + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + /* skip hidden raid components */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; + /* skip volumes */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_VOLUME) + continue; + /* skip PCIe devices */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_PCIE_DEVICE) + continue; + + handle = sas_device_priv_data->sas_target->handle; + lun = sas_device_priv_data->lun; + query_count++; + + if (ioc->shost_recovery) + goto out; + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, + st->msix_io, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: FAILED when sending " + "QUERY_TASK: scmd(%p)\n", scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, + "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", + ioc_status, scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + /* see if IO is still owned by IOC and target */ + if (mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || + mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + continue; + } + task_abort_retries = 0; + tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, + ioc_info(ioc, "%s: ABORT_TASK: giving up\n", + __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + if (ioc->shost_recovery) + goto out_no_lock; + + r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, + sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, st->msix_io, 30, 0); + if (r == FAILED || st->cb_idx != 0xFF) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " + "scmd(%p)\n", scmd); + goto tm_retry; + } + + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" + " scmd(%p)\n", + task_abort_retries - 1, scmd); + + termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + } + + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, + ioc_info(ioc, + "%s: loop back due to pending AEN\n", + __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } + + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + out_no_lock: + + dewtprintk(ioc, + ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", + __func__, query_count, termination_count)); + + ioc->broadcast_aen_busy = 0; + if (!ioc->shost_recovery) + _scsih_ublock_io_all_device(ioc); + mutex_unlock(&ioc->tm_cmds.mutex); +} + +/** + * _scsih_sas_discovery_event - handle discovery events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataSasDiscovery_t *event_data = + (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { + ioc_info(ioc, "discovery event: (%s)", + event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_cont("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_cont("\n"); + } + + if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && + !ioc->sas_hba.num_phys) { + if (disable_discovery > 0 && ioc->shost_recovery) { + /* Wait for the reset to complete */ + while (ioc->shost_recovery) + ssleep(1); + } + _scsih_sas_host_add(ioc); + } +} + +/** + * _scsih_sas_device_discovery_error_event - display SAS device discovery error + * events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi25EventDataSasDeviceDiscoveryError_t *event_data = + (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; + + switch (event_data->ReasonCode) { + case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: + ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", + le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: + ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", + le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + default: + break; + } +} + +/** + * _scsih_pcie_enumeration_event - handle enumeration events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi26EventDataPCIeEnumeration_t *event_data = + (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; + + if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) + return; + + ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", + (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? + "started" : "completed", + event_data->Flags); + if (event_data->EnumerationStatus) + pr_cont("enumeration_status(0x%08x)", + le32_to_cpu(event_data->EnumerationStatus)); + pr_cont("\n"); +} + +/** + * _scsih_ir_fastpath - turn on fastpath for IR physdisk + * @ioc: per adapter object + * @handle: device handle for physical disk + * @phys_disk_num: physical disk number + * + * Return: 0 for success, else failure. + */ +static int +_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + u8 issue_reset = 0; + int rc = 0; + u16 ioc_status; + u32 log_info; + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + return rc; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; + mpi_request->PhysDiskNum = phys_disk_num; + + dewtprintk(ioc, + ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", + handle, phys_disk_num)); + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); + rc = -EFAULT; + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dewtprintk(ioc, + ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", + ioc_status, log_info)); + rc = -EFAULT; + } else + dewtprintk(ioc, + ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +/** + * _scsih_reprobe_lun - reprobing lun + * @sdev: scsi device struct + * @no_uld_attach: sdev->no_uld_attach flag setting + * + **/ +static void +_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) +{ + sdev->no_uld_attach = no_uld_attach ? 1 : 0; + sdev_printk(KERN_INFO, sdev, "%s raid component\n", + sdev->no_uld_attach ? "hiding" : "exposing"); + WARN_ON(scsi_device_reprobe(sdev)); +} + +/** + * _scsih_sas_volume_add - add new volume + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _raid_device *raid_device; + unsigned long flags; + u64 wwid; + u16 handle = le16_to_cpu(element->VolDevHandle); + int rc; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + return; + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + if (!ioc->wait_for_discovery_to_complete) { + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + _scsih_determine_boot_device(ioc, raid_device, 1); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_sas_volume_delete - delete volume + * @ioc: per adapter object + * @handle: volume device handle + * Context: user. + */ +static void +_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + unsigned long flags; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget = NULL; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) { + if (raid_device->starget) { + starget = raid_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 1; + } + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + raid_device->handle, (u64)raid_device->wwid); + list_del(&raid_device->list); + kfree(raid_device); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (starget) + scsi_remove_target(&starget->dev); +} + +/** + * _scsih_sas_pd_expose - expose pd component to /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device->volume_handle = 0; + sas_device->volume_wwid = 0; + clear_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags &= + ~MPT_TARGET_FLAGS_RAID_COMPONENT; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* exposing raid component */ + if (starget) + starget_for_each_device(starget, NULL, _scsih_reprobe_lun); + + sas_device_put(sas_device); +} + +/** + * _scsih_sas_pd_hide - hide pd component from /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + u16 volume_handle = 0; + u64 volume_wwid = 0; + + mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); + if (volume_handle) + mpt3sas_config_get_volume_wwid(ioc, volume_handle, + &volume_wwid); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + set_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* hiding raid component */ + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + + if (starget) + starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); + + sas_device_put(sas_device); +} + +/** + * _scsih_sas_pd_delete - delete pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + _scsih_device_remove_by_handle(ioc, handle); +} + +/** + * _scsih_sas_pd_add - remove pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u64 sas_address; + u16 parent_handle; + + set_bit(handle, ioc->pd_handles); + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + sas_device_put(sas_device); + return; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, + sas_device_pg0.PhysicalPort, 0)); + + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + _scsih_add_device(ioc, handle, 0, 1); +} + +/** + * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + u8 element_type; + int i; + char *reason_str = NULL, *element_str = NULL; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + + ioc_info(ioc, "raid config change: (%s), elements(%d)\n", + le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? + "foreign" : "native", + event_data->NumElements); + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + reason_str = "add"; + break; + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + reason_str = "remove"; + break; + case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: + reason_str = "no change"; + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + reason_str = "hide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + reason_str = "unhide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + reason_str = "volume_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + reason_str = "volume_deleted"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + reason_str = "pd_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + reason_str = "pd_deleted"; + break; + default: + reason_str = "unknown reason"; + break; + } + element_type = le16_to_cpu(element->ElementFlags) & + MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; + switch (element_type) { + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: + element_str = "volume"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: + element_str = "phys disk"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: + element_str = "hot spare"; + break; + default: + element_str = "unknown element"; + break; + } + pr_info("\t(%s:%s), vol handle(0x%04x), " \ + "pd handle(0x%04x), pd num(0x%02x)\n", element_str, + reason_str, le16_to_cpu(element->VolDevHandle), + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } +} + +/** + * _scsih_sas_ir_config_change_event - handle ir configuration change events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u8 foreign_config; + Mpi2EventDataIrConfigChangeList_t *event_data = + (Mpi2EventDataIrConfigChangeList_t *) + fw_event->event_data; + + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && + (!ioc->hide_ir_msg)) + _scsih_sas_ir_config_change_event_debug(ioc, event_data); + + foreign_config = (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + if (ioc->shost_recovery && + ioc->hba_mpi_version_belonged != MPI2_VERSION) { + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) + _scsih_ir_fastpath(ioc, + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } + return; + } + + for (i = 0; i < event_data->NumElements; i++, element++) { + + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + if (!foreign_config) + _scsih_sas_volume_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + if (!foreign_config) + _scsih_sas_volume_delete(ioc, + le16_to_cpu(element->VolDevHandle)); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + if (!ioc->is_warpdrive) + _scsih_sas_pd_hide(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + if (!ioc->is_warpdrive) + _scsih_sas_pd_expose(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + if (!ioc->is_warpdrive) + _scsih_sas_pd_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + if (!ioc->is_warpdrive) + _scsih_sas_pd_delete(ioc, element); + break; + } + } +} + +/** + * _scsih_sas_ir_volume_event - IR volume event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u64 wwid; + unsigned long flags; + struct _raid_device *raid_device; + u16 handle; + u32 state; + int rc; + Mpi2EventDataIrVolume_t *event_data = + (Mpi2EventDataIrVolume_t *) fw_event->event_data; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->VolDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->hide_ir_msg) + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case MPI2_RAID_VOL_STATE_MISSING: + case MPI2_RAID_VOL_STATE_FAILED: + _scsih_sas_volume_delete(ioc, handle); + break; + + case MPI2_RAID_VOL_STATE_ONLINE: + case MPI2_RAID_VOL_STATE_DEGRADED: + case MPI2_RAID_VOL_STATE_OPTIMAL: + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + break; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + break; + } + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + break; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + break; + + case MPI2_RAID_VOL_STATE_INITIALIZING: + default: + break; + } +} + +/** + * _scsih_sas_ir_physical_disk_event - PD event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u16 handle, parent_handle; + u32 state; + struct _sas_device *sas_device; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + Mpi2EventDataIrPhysicalDisk_t *event_data = + (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; + u64 sas_address; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->PhysDiskDevHandle); + state = le32_to_cpu(event_data->NewValue); + + if (!ioc->hide_ir_msg) + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + + switch (state) { + case MPI2_RAID_PD_STATE_ONLINE: + case MPI2_RAID_PD_STATE_DEGRADED: + case MPI2_RAID_PD_STATE_REBUILDING: + case MPI2_RAID_PD_STATE_OPTIMAL: + case MPI2_RAID_PD_STATE_HOT_SPARE: + + if (!ioc->is_warpdrive) + set_bit(handle, ioc->pd_handles); + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device_put(sas_device); + return; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, + sas_device_pg0.PhysicalPort, 0)); + + _scsih_add_device(ioc, handle, 0, 1); + + break; + + case MPI2_RAID_PD_STATE_OFFLINE: + case MPI2_RAID_PD_STATE_NOT_CONFIGURED: + case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: + default: + break; + } +} + +/** + * _scsih_sas_ir_operation_status_event_debug - debug for IR op event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrOperationStatus_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->RAIDOperation) { + case MPI2_EVENT_IR_RAIDOP_RESYNC: + reason_str = "resync"; + break; + case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: + reason_str = "online capacity expansion"; + break; + case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: + reason_str = "consistency check"; + break; + case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; + break; + } + + if (!reason_str) + return; + + ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", + reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); +} + +/** + * _scsih_sas_ir_operation_status_event - handle RAID operation events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataIrOperationStatus_t *event_data = + (Mpi2EventDataIrOperationStatus_t *) + fw_event->event_data; + static struct _raid_device *raid_device; + unsigned long flags; + u16 handle; + + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && + (!ioc->hide_ir_msg)) + _scsih_sas_ir_operation_status_event_debug(ioc, + event_data); + + /* code added for raid transport support */ + if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + handle = le16_to_cpu(event_data->VolDevHandle); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->percent_complete = + event_data->PercentComplete; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_prep_device_scan - initialize parameters prior to device scan + * @ioc: per adapter object + * + * Set the deleted flag prior to device scan. If the device is found during + * the scan, then we clear the deleted flag. + */ +static void +_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) + sas_device_priv_data->sas_target->deleted = 1; + } +} + +/** + * _scsih_update_device_qdepth - Update QD during Reset. + * @ioc: per adapter object + * + */ +static void +_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct scsi_device *sdev; + u16 qdepth; + + ioc_info(ioc, "Update devices with firmware reported queue depth\n"); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) { + sas_target_priv_data = sas_device_priv_data->sas_target; + sas_device = sas_device_priv_data->sas_target->sas_dev; + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = ioc->max_nvme_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + qdepth = ioc->max_sata_qd; + else + continue; + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + } +} + +/** + * _scsih_mark_responding_sas_device - mark a sas_devices as responding + * @ioc: per adapter object + * @sas_device_pg0: SAS Device page 0 + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_sas_devices. + */ +static void +_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, +Mpi2SasDevicePage0_t *sas_device_pg0) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _sas_device *sas_device = NULL; + struct _enclosure_node *enclosure_dev = NULL; + unsigned long flags; + struct hba_port *port = mpt3sas_get_port_by_id( + ioc, sas_device_pg0->PhysicalPort, 0); + + if (sas_device_pg0->EnclosureHandle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + le16_to_cpu(sas_device_pg0->EnclosureHandle)); + if (enclosure_dev == NULL) + ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", + sas_device_pg0->EnclosureHandle); + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->sas_address != le64_to_cpu( + sas_device_pg0->SASAddress)) + continue; + if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) + continue; + if (sas_device->port != port) + continue; + sas_device->responding = 1; + starget = sas_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_addr(0x%016llx)\n", + le16_to_cpu(sas_device_pg0->DevHandle), + (unsigned long long) + sas_device->sas_address); + + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + } + if (le16_to_cpu(sas_device_pg0->Flags) & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0->EnclosureLevel; + memcpy(&sas_device->connector_name[0], + &sas_device_pg0->ConnectorName[0], 4); + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0->EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = le64_to_cpu( + enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + + if (sas_device->handle == le16_to_cpu( + sas_device_pg0->DevHandle)) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + sas_device->handle); + sas_device->handle = le16_to_cpu( + sas_device_pg0->DevHandle); + if (sas_target_priv_data) + sas_target_priv_data->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + goto out; + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_create_enclosure_list_after_reset - Free Existing list, + * And create enclosure list by scanning all Enclosure Page(0)s + * @ioc: per adapter object + */ +static void +_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + struct _enclosure_node *enclosure_dev; + Mpi2ConfigReply_t mpi_reply; + u16 enclosure_handle; + int rc; + + /* Free existing enclosure list */ + mpt3sas_free_enclosure_list(ioc); + + /* Re constructing enclosure list after reset*/ + enclosure_handle = 0xFFFF; + do { + enclosure_dev = + kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_dev->pg0, + MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, + enclosure_handle); + + if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, + &ioc->enclosure_list); + enclosure_handle = + le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); + } while (1); +} + +/** + * _scsih_search_responding_sas_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + ioc_info(ioc, "search for end-devices: start\n"); + + if (list_empty(&ioc->sas_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = le16_to_cpu(sas_device_pg0.DevHandle); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + continue; + _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); + } + + out: + ioc_info(ioc, "search for end-devices: complete\n"); +} + +/** + * _scsih_mark_responding_pcie_device - mark a pcie_device as responding + * @ioc: per adapter object + * @pcie_device_pg0: PCIe Device page 0 + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponding_devices. + */ +static void +_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, + Mpi26PCIeDevicePage0_t *pcie_device_pg0) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _pcie_device *pcie_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) + && (pcie_device->slot == le16_to_cpu( + pcie_device_pg0->Slot))) { + pcie_device->access_status = + pcie_device_pg0->AccessStatus; + pcie_device->responding = 1; + starget = pcie_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), wwid(0x%016llx) ", + pcie_device->handle, + (unsigned long long)pcie_device->wwid); + if (pcie_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), " + "slot(%d)\n", + (unsigned long long) + pcie_device->enclosure_logical_id, + pcie_device->slot); + } + + if (((le32_to_cpu(pcie_device_pg0->Flags)) & + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && + (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { + pcie_device->enclosure_level = + pcie_device_pg0->EnclosureLevel; + memcpy(&pcie_device->connector_name[0], + &pcie_device_pg0->ConnectorName[0], 4); + } else { + pcie_device->enclosure_level = 0; + pcie_device->connector_name[0] = '\0'; + } + + if (pcie_device->handle == le16_to_cpu( + pcie_device_pg0->DevHandle)) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + pcie_device->handle); + pcie_device->handle = le16_to_cpu( + pcie_device_pg0->DevHandle); + if (sas_target_priv_data) + sas_target_priv_data->handle = + le16_to_cpu(pcie_device_pg0->DevHandle); + goto out; + } + } + + out: + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_search_responding_pcie_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26PCIeDevicePage0_t pcie_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + ioc_info(ioc, "search for end-devices: start\n"); + + if (list_empty(&ioc->pcie_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(pcie_device_pg0.DevHandle); + device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); + if (!(_scsih_is_nvme_pciescsi_device(device_info))) + continue; + _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); + } +out: + ioc_info(ioc, "search for PCIe end-devices: complete\n"); +} + +/** + * _scsih_mark_responding_raid_device - mark a raid_device as responding + * @ioc: per adapter object + * @wwid: world wide identifier for raid volume + * @handle: device handle + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_raid_devices. + */ +static void +_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, + u16 handle) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _raid_device *raid_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid == wwid && raid_device->starget) { + starget = raid_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + starget_printk(KERN_INFO, raid_device->starget, + "handle(0x%04x), wwid(0x%016llx)\n", handle, + (unsigned long long)raid_device->wwid); + + /* + * WARPDRIVE: The handles of the PDs might have changed + * across the host reset so re-initialize the + * required data for Direct IO + */ + mpt3sas_init_warpdrive_properties(ioc, raid_device); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + if (raid_device->handle == handle) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + return; + } + pr_info("\thandle changed from(0x%04x)!!!\n", + raid_device->handle); + raid_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_search_responding_raid_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t volume_pg1; + Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u8 phys_disk_num; + + if (!ioc->ir_firmware) + return; + + ioc_info(ioc, "search for raid volumes: start\n"); + + if (list_empty(&ioc->raid_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = le16_to_cpu(volume_pg1.DevHandle); + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + + if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) + _scsih_mark_responding_raid_device(ioc, + le64_to_cpu(volume_pg1.WWID), handle); + } + + /* refresh the pd_handles */ + if (!ioc->is_warpdrive) { + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } + } + out: + ioc_info(ioc, "search for responding raid volumes: complete\n"); +} + +/** + * _scsih_mark_responding_expander - mark a expander as responding + * @ioc: per adapter object + * @expander_pg0:SAS Expander Config Page0 + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_expanders. + */ +static void +_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, + Mpi2ExpanderPage0_t *expander_pg0) +{ + struct _sas_node *sas_expander = NULL; + unsigned long flags; + int i; + struct _enclosure_node *enclosure_dev = NULL; + u16 handle = le16_to_cpu(expander_pg0->DevHandle); + u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); + u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); + struct hba_port *port = mpt3sas_get_port_by_id( + ioc, expander_pg0->PhysicalPort, 0); + + if (enclosure_handle) + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + enclosure_handle); + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + if (sas_expander->port != port) + continue; + sas_expander->responding = 1; + + if (enclosure_dev) { + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + sas_expander->enclosure_handle = + le16_to_cpu(expander_pg0->EnclosureHandle); + } + + if (sas_expander->handle == handle) + goto out; + pr_info("\texpander(0x%016llx): handle changed" \ + " from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0 ; i < sas_expander->num_phys ; i++) + sas_expander->phy[i].handle = handle; + goto out; + } + out: + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_search_responding_expanders - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u64 sas_address; + u16 handle; + u8 port; + + ioc_info(ioc, "search for expanders: start\n"); + + if (list_empty(&ioc->sas_expander_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + + handle = le16_to_cpu(expander_pg0.DevHandle); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + port = expander_pg0.PhysicalPort; + pr_info( + "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + handle, (unsigned long long)sas_address, + (ioc->multipath_on_hba ? + port : MULTIPATH_DISABLED_PORT_ID)); + _scsih_mark_responding_expander(ioc, &expander_pg0); + } + + out: + ioc_info(ioc, "search for expanders: complete\n"); +} + +/** + * _scsih_remove_unresponding_devices - removing unresponding devices + * @ioc: per adapter object + */ +static void +_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device, *sas_device_next; + struct _sas_node *sas_expander, *sas_expander_next; + struct _raid_device *raid_device, *raid_device_next; + struct _pcie_device *pcie_device, *pcie_device_next; + struct list_head tmp_list; + unsigned long flags; + LIST_HEAD(head); + + ioc_info(ioc, "removing unresponding devices: start\n"); + + /* removing unresponding end devices */ + ioc_info(ioc, "removing unresponding devices: end-devices\n"); + /* + * Iterate, pulling off devices marked as non-responding. We become the + * owner for the reference the list had on any object we prune. + */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + + /* + * Clean up the sas_device_init_list list as + * driver goes for fresh scan as part of diag reset. + */ + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_init_list, list) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!sas_device->responding) + list_move_tail(&sas_device->list, &head); + else + sas_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + /* + * Now, uninitialize and remove the unresponding devices we pruned. + */ + list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { + _scsih_remove_device(ioc, sas_device); + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); + INIT_LIST_HEAD(&head); + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + /* + * Clean up the pcie_device_init_list list as + * driver goes for fresh scan as part of diag reset. + */ + list_for_each_entry_safe(pcie_device, pcie_device_next, + &ioc->pcie_device_init_list, list) { + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + + list_for_each_entry_safe(pcie_device, pcie_device_next, + &ioc->pcie_device_list, list) { + if (!pcie_device->responding) + list_move_tail(&pcie_device->list, &head); + else + pcie_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { + _scsih_pcie_device_remove_from_sml(ioc, pcie_device); + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + + /* removing unresponding volumes */ + if (ioc->ir_firmware) { + ioc_info(ioc, "removing unresponding devices: volumes\n"); + list_for_each_entry_safe(raid_device, raid_device_next, + &ioc->raid_device_list, list) { + if (!raid_device->responding) + _scsih_sas_volume_delete(ioc, + raid_device->handle); + else + raid_device->responding = 0; + } + } + + /* removing unresponding expanders */ + ioc_info(ioc, "removing unresponding devices: expanders\n"); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(sas_expander, sas_expander_next, + &ioc->sas_expander_list, list) { + if (!sas_expander->responding) + list_move_tail(&sas_expander->list, &tmp_list); + else + sas_expander->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, + list) { + _scsih_expander_node_remove(ioc, sas_expander); + } + + ioc_info(ioc, "removing unresponding devices: complete\n"); + + /* unblock devices */ + _scsih_ublock_io_all_device(ioc); +} + +static void +_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander, u16 handle) +{ + Mpi2ExpanderPage1_t expander_pg1; + Mpi2ConfigReply_t mpi_reply; + int i; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + mpt3sas_transport_update_links(ioc, sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), i, + expander_pg1.NegotiatedLinkRate >> 4, + sas_expander->port); + } +} + +/** + * _scsih_scan_for_devices_after_reset - scan for devices after host reset + * @ioc: per adapter object + */ +static void +_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi26PCIeDevicePage0_t pcie_device_pg0; + Mpi2RaidVolPage1_t *volume_pg1; + Mpi2RaidVolPage0_t *volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2EventIrConfigElement_t element; + Mpi2ConfigReply_t mpi_reply; + u8 phys_disk_num, port_id; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _sas_node *expander_device; + static struct _raid_device *raid_device; + u8 retry_count; + unsigned long flags; + + volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); + if (!volume_pg0) + return; + + volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); + if (!volume_pg1) { + kfree(volume_pg0); + return; + } + + ioc_info(ioc, "scan devices: start\n"); + + _scsih_sas_host_refresh(ioc); + + ioc_info(ioc, "\tscan devices: expanders start\n"); + + /* expanders */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + port_id = expander_pg0.PhysicalPort; + expander_device = mpt3sas_scsih_expander_find_by_sas_address( + ioc, le64_to_cpu(expander_pg0.SASAddress), + mpt3sas_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (expander_device) + _scsih_refresh_expander_links(ioc, expander_device, + handle); + else { + ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(expander_pg0.SASAddress)); + _scsih_expander_add(ioc, handle); + ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(expander_pg0.SASAddress)); + } + } + + ioc_info(ioc, "\tscan devices: expanders complete\n"); + + if (!ioc->ir_firmware) + goto skip_to_sas; + + ioc_info(ioc, "\tscan devices: phys disk start\n"); + + /* phys disk */ + phys_disk_num = 0xFF; + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device_put(sas_device); + continue; + } + if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, + &sas_address)) { + ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + port_id = sas_device_pg0.PhysicalPort; + mpt3sas_transport_update_links(ioc, sas_address, + handle, sas_device_pg0.PhyNum, + MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, port_id, 0)); + set_bit(handle, ioc->pd_handles); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 1)) { + ssleep(1); + } + ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + + ioc_info(ioc, "\tscan devices: phys disk complete\n"); + + ioc_info(ioc, "\tscan devices: volumes start\n"); + + /* volumes */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1->DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, + le64_to_cpu(volume_pg1->WWID)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + continue; + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); + element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1->DevHandle; + ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", + volume_pg1->DevHandle); + _scsih_sas_volume_add(ioc, &element); + ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", + volume_pg1->DevHandle); + } + } + + ioc_info(ioc, "\tscan devices: volumes complete\n"); + + skip_to_sas: + + ioc_info(ioc, "\tscan devices: end devices start\n"); + + /* sas devices */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (!(_scsih_is_end_device( + le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + port_id = sas_device_pg0.PhysicalPort; + sas_device = mpt3sas_get_sdev_by_addr(ioc, + le64_to_cpu(sas_device_pg0.SASAddress), + mpt3sas_get_port_by_id(ioc, port_id, 0)); + if (sas_device) { + sas_device_put(sas_device); + continue; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { + ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, port_id, 0)); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 0)) { + ssleep(1); + } + ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + ioc_info(ioc, "\tscan devices: end devices complete\n"); + ioc_info(ioc, "\tscan devices: pcie end devices start\n"); + + /* pcie devices */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(pcie_device_pg0.DevHandle); + if (!(_scsih_is_nvme_pciescsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) + continue; + pcie_device = mpt3sas_get_pdev_by_wwid(ioc, + le64_to_cpu(pcie_device_pg0.WWID)); + if (pcie_device) { + pcie_device_put(pcie_device); + continue; + } + retry_count = 0; + parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); + _scsih_pcie_add_device(ioc, handle); + + ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", + handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); + } + + kfree(volume_pg0); + kfree(volume_pg1); + + ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); + ioc_info(ioc, "scan devices: complete\n"); +} + +/** + * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); +} + +/** + * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding + * scsi & tm cmds. + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void +mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); + if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { + ioc->scsih_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); + complete(&ioc->scsih_cmds.done); + } + if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { + ioc->tm_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + + memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); + memset(ioc->device_remove_in_progress, 0, + ioc->device_remove_in_progress_sz); + _scsih_fw_event_cleanup_queue(ioc); + _scsih_flush_running_cmds(ioc); +} + +/** + * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void +mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); + if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { + if (ioc->multipath_on_hba) { + _scsih_sas_port_refresh(ioc); + _scsih_update_vphys_after_reset(ioc); + } + _scsih_prep_device_scan(ioc); + _scsih_create_enclosure_list_after_reset(ioc); + _scsih_search_responding_sas_devices(ioc); + _scsih_search_responding_pcie_devices(ioc); + _scsih_search_responding_raid_devices(ioc); + _scsih_search_responding_expanders(ioc); + _scsih_error_recovery_delete_devices(ioc); + } +} + +/** + * _mpt3sas_fw_work - delayed task for processing firmware events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + ioc->current_event = fw_event; + _scsih_fw_event_del_from_list(ioc, fw_event); + + /* the queue is being flushed so ignore this event */ + if (ioc->remove_host || ioc->pci_error_recovery) { + fw_event_work_put(fw_event); + ioc->current_event = NULL; + return; + } + + switch (fw_event->event) { + case MPT3SAS_PROCESS_TRIGGER_DIAG: + mpt3sas_process_trigger_data(ioc, + (struct SL_WH_TRIGGERS_EVENT_DATA_T *) + fw_event->event_data); + break; + case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost) || + ioc->shost_recovery) { + /* + * If we're unloading or cancelling the work, bail. + * Otherwise, this can become an infinite loop. + */ + if (ioc->remove_host || ioc->fw_events_cleanup) + goto out; + ssleep(1); + } + _scsih_remove_unresponding_devices(ioc); + _scsih_del_dirty_vphy(ioc); + _scsih_del_dirty_port_entries(ioc); + if (ioc->is_gen35_ioc) + _scsih_update_device_qdepth(ioc); + _scsih_scan_for_devices_after_reset(ioc); + /* + * If diag reset has occurred during the driver load + * then driver has to complete the driver load operation + * by executing the following items: + *- Register the devices from sas_device_init_list to SML + *- clear is_driver_loading flag, + *- start the watchdog thread. + * In happy driver load path, above things are taken care of when + * driver executes scsih_scan_finished(). + */ + if (ioc->is_driver_loading) + _scsih_complete_devices_scanning(ioc); + _scsih_set_nvme_max_shutdown_latency(ioc); + break; + case MPT3SAS_PORT_ENABLE_COMPLETE: + ioc->start_scan = 0; + if (missing_delay[0] != -1 && missing_delay[1] != -1) + mpt3sas_base_update_missing_delay(ioc, missing_delay[0], + missing_delay[1]); + dewtprintk(ioc, + ioc_info(ioc, "port enable: complete from worker thread\n")); + break; + case MPT3SAS_TURN_ON_PFA_LED: + _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_sas_topology_change_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_device_status_change_event_debug(ioc, + (Mpi2EventDataSasDeviceStatusChange_t *) + fw_event->event_data); + break; + case MPI2_EVENT_SAS_DISCOVERY: + _scsih_sas_discovery_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + _scsih_sas_device_discovery_error_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + _scsih_sas_broadcast_primitive_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + _scsih_sas_enclosure_dev_status_change_event(ioc, + fw_event); + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_sas_ir_config_change_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_sas_ir_volume_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + _scsih_sas_ir_physical_disk_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + _scsih_sas_ir_operation_status_event(ioc, fw_event); + break; + case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: + _scsih_pcie_device_status_change_event(ioc, fw_event); + break; + case MPI2_EVENT_PCIE_ENUMERATION: + _scsih_pcie_enumeration_event(ioc, fw_event); + break; + case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + _scsih_pcie_topology_change_event(ioc, fw_event); + ioc->current_event = NULL; + return; + } +out: + fw_event_work_put(fw_event); + ioc->current_event = NULL; +} + +/** + * _firmware_event_work + * @work: The fw_event_work object + * Context: user. + * + * wrappers for the work thread handling firmware events + */ + +static void +_firmware_event_work(struct work_struct *work) +{ + struct fw_event_work *fw_event = container_of(work, + struct fw_event_work, work); + + _mpt3sas_fw_work(fw_event->ioc, fw_event); +} + +/** + * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + struct fw_event_work *fw_event; + Mpi2EventNotificationReply_t *mpi_reply; + u16 event; + u16 sz; + Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; + + /* events turned off due to host reset */ + if (ioc->pci_error_recovery) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + + event = le16_to_cpu(mpi_reply->Event); + + if (event != MPI2_EVENT_LOG_ENTRY_ADDED) + mpt3sas_trigger_event(ioc, event, 0); + + switch (event) { + /* handle these */ + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + { + Mpi2EventDataSasBroadcastPrimitive_t *baen_data = + (Mpi2EventDataSasBroadcastPrimitive_t *) + mpi_reply->EventData; + + if (baen_data->Primitive != + MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 1; + + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } else + ioc->broadcast_aen_busy = 1; + break; + } + + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_check_topo_delete_events(ioc, + (Mpi2EventDataSasTopologyChangeList_t *) + mpi_reply->EventData); + /* + * No need to add the topology change list + * event to fw event work queue when + * diag reset is going on. Since during diag + * reset driver scan the devices by reading + * sas device page0's not by processing the + * events. + */ + if (ioc->shost_recovery) + return 1; + break; + case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + _scsih_check_pcie_topo_remove_events(ioc, + (Mpi26EventDataPCIeTopologyChangeList_t *) + mpi_reply->EventData); + if (ioc->shost_recovery) + return 1; + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_check_ir_config_unhide_events(ioc, + (Mpi2EventDataIrConfigChangeList_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_check_volume_delete_events(ioc, + (Mpi2EventDataIrVolume_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + { + Mpi2EventDataLogEntryAdded_t *log_entry; + u32 log_code; + + if (!ioc->is_warpdrive) + break; + + log_entry = (Mpi2EventDataLogEntryAdded_t *) + mpi_reply->EventData; + log_code = le32_to_cpu(*(__le32 *)log_entry->LogData); + + if (le16_to_cpu(log_entry->LogEntryQualifier) + != MPT2_WARPDRIVE_LOGENTRY) + break; + + switch (log_code) { + case MPT2_WARPDRIVE_LC_SSDT: + ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); + break; + case MPT2_WARPDRIVE_LC_SSDLW: + ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); + break; + case MPT2_WARPDRIVE_LC_SSDLF: + ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); + break; + case MPT2_WARPDRIVE_LC_BRMF: + ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); + break; + } + + break; + } + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + _scsih_sas_device_status_change_event(ioc, + (Mpi2EventDataSasDeviceStatusChange_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + case MPI2_EVENT_SAS_DISCOVERY: + case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + case MPI2_EVENT_IR_PHYSICAL_DISK: + case MPI2_EVENT_PCIE_ENUMERATION: + case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: + break; + + case MPI2_EVENT_TEMP_THRESHOLD: + _scsih_temp_threshold_events(ioc, + (Mpi2EventDataTemperature_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: + ActiveCableEventData = + (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; + switch (ActiveCableEventData->ReasonCode) { + case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: + ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", + ActiveCableEventData->ReceptacleID); + pr_notice("cannot be powered and devices connected\n"); + pr_notice("to this active cable will not be seen\n"); + pr_notice("This active cable requires %d mW of power\n", + le32_to_cpu( + ActiveCableEventData->ActiveCablePowerRequirement)); + break; + + case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: + ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", + ActiveCableEventData->ReceptacleID); + pr_notice( + "is not running at optimal speed(12 Gb/s rate)\n"); + break; + } + + break; + + default: /* ignore the rest */ + return 1; + } + + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event = alloc_fw_event_work(sz); + if (!fw_event) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + + memcpy(fw_event->event_data, mpi_reply->EventData, sz); + fw_event->ioc = ioc; + fw_event->VF_ID = mpi_reply->VF_ID; + fw_event->VP_ID = mpi_reply->VP_ID; + fw_event->event = event; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); + return 1; +} + +/** + * _scsih_expander_node_remove - removing expander device from list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * + * Removing object and freeing associated memory from the + * ioc->sas_expander_list. + */ +static void +_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port, *next; + unsigned long flags; + int port_id; + + /* remove sibling ports attached to this expander */ + list_for_each_entry_safe(mpt3sas_port, next, + &sas_expander->sas_port_list, port_list) { + if (ioc->shost_recovery) + return; + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + } + + port_id = sas_expander->port->port_id; + + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->sas_address_parent, sas_expander->port); + + ioc_info(ioc, + "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + sas_expander->handle, (unsigned long long) + sas_expander->sas_address, + port_id); + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_del(&sas_expander->list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + kfree(sas_expander->phy); + kfree(sas_expander); +} + +/** + * _scsih_nvme_shutdown - NVMe shutdown notification + * @ioc: per adapter object + * + * Sending IoUnitControl request with shutdown operation code to alert IOC that + * the host system is shutting down so that IOC can issue NVMe shutdown to + * NVMe drives attached to it. + */ +static void +_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; + + /* are there any NVMe devices ? */ + if (list_empty(&ioc->pcie_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + goto out; + } + + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, + "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + /* Wait for max_shutdown_latency seconds */ + ioc_info(ioc, + "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", + ioc->max_shutdown_latency); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + ioc->max_shutdown_latency*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_info(ioc, "Io Unit Control shutdown (complete):" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + + +/** + * _scsih_ir_shutdown - IR shutdown notification + * @ioc: per adapter object + * + * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that + * the host system is shutting down. + */ +static void +_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + + /* is IR firmware build loaded ? */ + if (!ioc->ir_firmware) + return; + + /* are there any volumes ? */ + if (list_empty(&ioc->raid_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; + + if (!ioc->hide_ir_msg) + ioc_info(ioc, "IR shutdown (sending)\n"); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + if (!ioc->hide_ir_msg) + ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +/** + * _scsih_get_shost_and_ioc - get shost and ioc + * and verify whether they are NULL or not + * @pdev: PCI device struct + * @shost: address of scsi host pointer + * @ioc: address of HBA adapter pointer + * + * Return zero if *shost and *ioc are not NULL otherwise return error number. + */ +static int +_scsih_get_shost_and_ioc(struct pci_dev *pdev, + struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) +{ + *shost = pci_get_drvdata(pdev); + if (*shost == NULL) { + dev_err(&pdev->dev, "pdev's driver data is null\n"); + return -ENXIO; + } + + *ioc = shost_priv(*shost); + if (*ioc == NULL) { + dev_err(&pdev->dev, "shost's private data is null\n"); + return -ENXIO; + } + + return 0; +} + +/** + * scsih_remove - detach and remove add host + * @pdev: PCI device struct + * + * Routine called when unloading the driver. + */ +static void scsih_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct _sas_port *mpt3sas_port, *next_port; + struct _raid_device *raid_device, *next; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _pcie_device *pcie_device, *pcienext; + struct workqueue_struct *wq; + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + struct hba_port *port, *port_next; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return; + + ioc->remove_host = 1; + + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); + _scsih_flush_running_cmds(ioc); + } + + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + /* + * Copy back the unmodified ioc page1. so that on next driver load, + * current modified changes on ioc page1 won't take effect. + */ + if (ioc->is_aero_ioc) + mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + /* release all the volumes */ + _scsih_ir_shutdown(ioc); + mpt3sas_destroy_debugfs(ioc); + sas_remove_host(shost); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, + list) { + if (raid_device->starget) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + scsi_remove_target(&raid_device->starget->dev); + } + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + raid_device->handle, (u64)raid_device->wwid); + _scsih_raid_device_remove(ioc, raid_device); + } + list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, + list) { + _scsih_pcie_device_remove_from_sml(ioc, pcie_device); + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + + /* free ports attached to the sas_host */ + list_for_each_entry_safe(mpt3sas_port, next_port, + &ioc->sas_hba.sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + } + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + list_del(&port->list); + kfree(port); + } + + /* free phys attached to the sas_host */ + if (ioc->sas_hba.num_phys) { + kfree(ioc->sas_hba.phy); + ioc->sas_hba.phy = NULL; + ioc->sas_hba.num_phys = 0; + } + + mpt3sas_base_detach(ioc); + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + scsi_host_put(shost); +} + +/** + * scsih_shutdown - routine call during system shutdown + * @pdev: PCI device struct + */ +static void +scsih_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct workqueue_struct *wq; + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return; + + ioc->remove_host = 1; + + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); + _scsih_flush_running_cmds(ioc); + } + + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + /* + * Copy back the unmodified ioc page1 so that on next driver load, + * current modified changes on ioc page1 won't take effect. + */ + if (ioc->is_aero_ioc) + mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + + _scsih_ir_shutdown(ioc); + _scsih_nvme_shutdown(ioc); + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_stop_watchdog(ioc); + ioc->shost_recovery = 1; + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); +} + + +/** + * _scsih_probe_boot_devices - reports 1st device + * @ioc: per adapter object + * + * If specified in bios page 2, this routine reports the 1st + * device scsi-ml or sas transport for persistent boot device + * purposes. Please refer to function _scsih_determine_boot_device() + */ +static void +_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u32 channel; + void *device; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + struct _pcie_device *pcie_device; + u16 handle; + u64 sas_address_parent; + u64 sas_address; + unsigned long flags; + int rc; + int tid; + struct hba_port *port; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + device = NULL; + if (ioc->req_boot_device.device) { + device = ioc->req_boot_device.device; + channel = ioc->req_boot_device.channel; + } else if (ioc->req_alt_boot_device.device) { + device = ioc->req_alt_boot_device.device; + channel = ioc->req_alt_boot_device.channel; + } else if (ioc->current_boot_device.device) { + device = ioc->current_boot_device.device; + channel = ioc->current_boot_device.channel; + } + + if (!device) + return; + + if (channel == RAID_CHANNEL) { + raid_device = device; + /* + * If this boot vd is already registered with SML then + * no need to register it again as part of device scanning + * after diag reset during driver load operation. + */ + if (raid_device->starget) + return; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else if (channel == PCIE_CHANNEL) { + pcie_device = device; + /* + * If this boot NVMe device is already registered with SML then + * no need to register it again as part of device scanning + * after diag reset during driver load operation. + */ + if (pcie_device->starget) + return; + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + tid = pcie_device->id; + list_move_tail(&pcie_device->list, &ioc->pcie_device_list); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); + if (rc) + _scsih_pcie_device_remove(ioc, pcie_device); + } else { + sas_device = device; + /* + * If this boot sas/sata device is already registered with SML + * then no need to register it again as part of device scanning + * after diag reset during driver load operation. + */ + if (sas_device->starget) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + handle = sas_device->handle; + sas_address_parent = sas_device->sas_address_parent; + sas_address = sas_device->sas_address; + port = sas_device->port; + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->hide_drives) + return; + + if (!port) + return; + + if (!mpt3sas_transport_port_add(ioc, handle, + sas_address_parent, port)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_address, + sas_address_parent, port); + _scsih_sas_device_remove(ioc, sas_device); + } + } + } +} + +/** + * _scsih_probe_raid - reporting raid volumes to scsi-ml + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) +{ + struct _raid_device *raid_device, *raid_next; + int rc; + + list_for_each_entry_safe(raid_device, raid_next, + &ioc->raid_device_list, list) { + if (raid_device->starget) + continue; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } +} + +static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&ioc->sas_device_init_list)) { + sas_device = list_first_entry(&ioc->sas_device_init_list, + struct _sas_device, list); + sas_device_get(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + + /* + * Since we dropped the lock during the call to port_add(), we need to + * be careful here that somebody else didn't move or delete this item + * while we were busy with other things. + * + * If it was on the list, we need a put() for the reference the list + * had. Either way, we need a get() for the destination list. + */ + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_probe_sas - reporting sas devices to sas transport + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device; + + if (ioc->hide_drives) + return; + + while ((sas_device = get_next_sas_device(ioc))) { + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, sas_device->port)) { + _scsih_sas_device_remove(ioc, sas_device); + sas_device_put(sas_device); + continue; + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device()-> + * sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + _scsih_sas_device_remove(ioc, sas_device); + sas_device_put(sas_device); + continue; + } + } + sas_device_make_active(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * get_next_pcie_device - Get the next pcie device + * @ioc: per adapter object + * + * Get the next pcie device from pcie_device_init_list list. + * + * Return: pcie device structure if pcie_device_init_list list is not empty + * otherwise returns NULL + */ +static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + if (!list_empty(&ioc->pcie_device_init_list)) { + pcie_device = list_first_entry(&ioc->pcie_device_init_list, + struct _pcie_device, list); + pcie_device_get(pcie_device); + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return pcie_device; +} + +/** + * pcie_device_make_active - Add pcie device to pcie_device_list list + * @ioc: per adapter object + * @pcie_device: pcie device object + * + * Add the pcie device which has registered with SCSI Transport Later to + * pcie_device_list list + */ +static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + + if (!list_empty(&pcie_device->list)) { + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + pcie_device_get(pcie_device); + list_add_tail(&pcie_device->list, &ioc->pcie_device_list); + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_probe_pcie - reporting PCIe devices to scsi-ml + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device; + int rc; + + /* PCIe Device List */ + while ((pcie_device = get_next_pcie_device(ioc))) { + if (pcie_device->starget) { + pcie_device_put(pcie_device); + continue; + } + if (pcie_device->access_status == + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + pcie_device_make_active(ioc, pcie_device); + pcie_device_put(pcie_device); + continue; + } + rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, + pcie_device->id, 0); + if (rc) { + _scsih_pcie_device_remove(ioc, pcie_device); + pcie_device_put(pcie_device); + continue; + } else if (!pcie_device->starget) { + /* + * When async scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device()-> + * sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + /* TODO-- Need to find out whether this condition will + * occur or not + */ + _scsih_pcie_device_remove(ioc, pcie_device); + pcie_device_put(pcie_device); + continue; + } + } + pcie_device_make_active(ioc, pcie_device); + pcie_device_put(pcie_device); + } +} + +/** + * _scsih_probe_devices - probing for devices + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u16 volume_mapping_flags; + + if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) + return; /* return when IOC doesn't support initiator mode */ + + _scsih_probe_boot_devices(ioc); + + if (ioc->ir_firmware) { + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { + _scsih_probe_raid(ioc); + _scsih_probe_sas(ioc); + } else { + _scsih_probe_sas(ioc); + _scsih_probe_raid(ioc); + } + } else { + _scsih_probe_sas(ioc); + _scsih_probe_pcie(ioc); + } +} + +/** + * scsih_scan_start - scsi lld callback for .scan_start + * @shost: SCSI host pointer + * + * The shost has the ability to discover targets on its own instead + * of scanning the entire bus. In our implemention, we will kick off + * firmware discovery. + */ +static void +scsih_scan_start(struct Scsi_Host *shost) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int rc; + if (diag_buffer_enable != -1 && diag_buffer_enable != 0) + mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); + else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) + mpt3sas_enable_diag_buffer(ioc, 1); + + if (disable_discovery > 0) + return; + + ioc->start_scan = 1; + rc = mpt3sas_port_enable(ioc); + + if (rc != 0) + ioc_info(ioc, "port enable: FAILED\n"); +} + +/** + * _scsih_complete_devices_scanning - add the devices to sml and + * complete ioc initialization. + * @ioc: per adapter object + * + * Return nothing. + */ +static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc) +{ + + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + _scsih_probe_devices(ioc); + } + + mpt3sas_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; +} + +/** + * scsih_scan_finished - scsi lld callback for .scan_finished + * @shost: SCSI host pointer + * @time: elapsed time of the scan in jiffies + * + * This function will be called periodicallyn until it returns 1 with the + * scsi_host and the elapsed time of the scan in jiffies. In our implemention, + * we wait for firmware discovery to complete, then return 1. + */ +static int +scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + u32 ioc_state; + int issue_hard_reset = 0; + + if (disable_discovery > 0) { + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + return 1; + } + + if (time >= (300 * HZ)) { + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); + ioc->is_driver_loading = 0; + return 1; + } + + if (ioc->start_scan) { + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + issue_hard_reset = 1; + goto out; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_base_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + issue_hard_reset = 1; + goto out; + } + return 0; + } + + if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) { + ioc_info(ioc, + "port enable: aborted due to diag reset\n"); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + if (ioc->start_scan_failed) { + ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", + ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + return 1; + } + + ioc_info(ioc, "port enable: SUCCESS\n"); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + _scsih_complete_devices_scanning(ioc); + +out: + if (issue_hard_reset) { + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET)) + ioc->is_driver_loading = 0; + } + return 1; +} + +/** + * scsih_map_queues - map reply queues with request queues + * @shost: SCSI host pointer + */ +static void scsih_map_queues(struct Scsi_Host *shost) +{ + struct MPT3SAS_ADAPTER *ioc = + (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct blk_mq_queue_map *map; + int i, qoff, offset; + int nr_msix_vectors = ioc->iopoll_q_start_index; + int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; + + if (shost->nr_hw_queues == 1) + return; + + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + map = &shost->tag_set.map[i]; + map->nr_queues = 0; + offset = 0; + if (i == HCTX_TYPE_DEFAULT) { + map->nr_queues = + nr_msix_vectors - ioc->high_iops_queues; + offset = ioc->high_iops_queues; + } else if (i == HCTX_TYPE_POLL) + map->nr_queues = iopoll_q_count; + + if (!map->nr_queues) + BUG_ON(i == HCTX_TYPE_DEFAULT); + + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, ioc->pdev, offset); + else + blk_mq_map_queues(map); + + qoff += map->nr_queues; + } +} + +/* shost template for SAS 2.0 HBA devices */ +static const struct scsi_host_template mpt2sas_driver_template = { + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT2SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = scsih_target_alloc, + .slave_alloc = scsih_slave_alloc, + .slave_configure = scsih_slave_configure, + .target_destroy = scsih_target_destroy, + .slave_destroy = scsih_slave_destroy, + .scan_finished = scsih_scan_finished, + .scan_start = scsih_scan_start, + .change_queue_depth = scsih_change_queue_depth, + .eh_abort_handler = scsih_abort, + .eh_device_reset_handler = scsih_dev_reset, + .eh_target_reset_handler = scsih_target_reset, + .eh_host_reset_handler = scsih_host_reset, + .bios_param = scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT2SAS_SG_DEPTH, + .max_sectors = 32767, + .cmd_per_lun = 7, + .shost_groups = mpt3sas_host_groups, + .sdev_groups = mpt3sas_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct scsiio_tracker), +}; + +/* raid transport support for SAS 2.0 HBA devices */ +static struct raid_function_template mpt2sas_raid_functions = { + .cookie = &mpt2sas_driver_template, + .is_raid = scsih_is_raid, + .get_resync = scsih_get_resync, + .get_state = scsih_get_state, +}; + +/* shost template for SAS 3.0 HBA devices */ +static const struct scsi_host_template mpt3sas_driver_template = { + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT3SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = scsih_target_alloc, + .slave_alloc = scsih_slave_alloc, + .slave_configure = scsih_slave_configure, + .target_destroy = scsih_target_destroy, + .slave_destroy = scsih_slave_destroy, + .scan_finished = scsih_scan_finished, + .scan_start = scsih_scan_start, + .change_queue_depth = scsih_change_queue_depth, + .eh_abort_handler = scsih_abort, + .eh_device_reset_handler = scsih_dev_reset, + .eh_target_reset_handler = scsih_target_reset, + .eh_host_reset_handler = scsih_host_reset, + .bios_param = scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT3SAS_SG_DEPTH, + .max_sectors = 32767, + .max_segment_size = 0xffffffff, + .cmd_per_lun = 128, + .shost_groups = mpt3sas_host_groups, + .sdev_groups = mpt3sas_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct scsiio_tracker), + .map_queues = scsih_map_queues, + .mq_poll = mpt3sas_blk_mq_poll, +}; + +/* raid transport support for SAS 3.0 HBA devices */ +static struct raid_function_template mpt3sas_raid_functions = { + .cookie = &mpt3sas_driver_template, + .is_raid = scsih_is_raid, + .get_resync = scsih_get_resync, + .get_state = scsih_get_state, +}; + +/** + * _scsih_determine_hba_mpi_version - determine in which MPI version class + * this device belongs to. + * @pdev: PCI device struct + * + * return MPI2_VERSION for SAS 2.0 HBA devices, + * MPI25_VERSION for SAS 3.0 HBA devices, and + * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices + */ +static u16 +_scsih_determine_hba_mpi_version(struct pci_dev *pdev) +{ + + switch (pdev->device) { + case MPI2_MFGPAGE_DEVID_SSS6200: + case MPI2_MFGPAGE_DEVID_SAS2004: + case MPI2_MFGPAGE_DEVID_SAS2008: + case MPI2_MFGPAGE_DEVID_SAS2108_1: + case MPI2_MFGPAGE_DEVID_SAS2108_2: + case MPI2_MFGPAGE_DEVID_SAS2108_3: + case MPI2_MFGPAGE_DEVID_SAS2116_1: + case MPI2_MFGPAGE_DEVID_SAS2116_2: + case MPI2_MFGPAGE_DEVID_SAS2208_1: + case MPI2_MFGPAGE_DEVID_SAS2208_2: + case MPI2_MFGPAGE_DEVID_SAS2208_3: + case MPI2_MFGPAGE_DEVID_SAS2208_4: + case MPI2_MFGPAGE_DEVID_SAS2208_5: + case MPI2_MFGPAGE_DEVID_SAS2208_6: + case MPI2_MFGPAGE_DEVID_SAS2308_1: + case MPI2_MFGPAGE_DEVID_SAS2308_2: + case MPI2_MFGPAGE_DEVID_SAS2308_3: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: + return MPI2_VERSION; + case MPI25_MFGPAGE_DEVID_SAS3004: + case MPI25_MFGPAGE_DEVID_SAS3008: + case MPI25_MFGPAGE_DEVID_SAS3108_1: + case MPI25_MFGPAGE_DEVID_SAS3108_2: + case MPI25_MFGPAGE_DEVID_SAS3108_5: + case MPI25_MFGPAGE_DEVID_SAS3108_6: + return MPI25_VERSION; + case MPI26_MFGPAGE_DEVID_SAS3216: + case MPI26_MFGPAGE_DEVID_SAS3224: + case MPI26_MFGPAGE_DEVID_SAS3316_1: + case MPI26_MFGPAGE_DEVID_SAS3316_2: + case MPI26_MFGPAGE_DEVID_SAS3316_3: + case MPI26_MFGPAGE_DEVID_SAS3316_4: + case MPI26_MFGPAGE_DEVID_SAS3324_1: + case MPI26_MFGPAGE_DEVID_SAS3324_2: + case MPI26_MFGPAGE_DEVID_SAS3324_3: + case MPI26_MFGPAGE_DEVID_SAS3324_4: + case MPI26_MFGPAGE_DEVID_SAS3508: + case MPI26_MFGPAGE_DEVID_SAS3508_1: + case MPI26_MFGPAGE_DEVID_SAS3408: + case MPI26_MFGPAGE_DEVID_SAS3516: + case MPI26_MFGPAGE_DEVID_SAS3516_1: + case MPI26_MFGPAGE_DEVID_SAS3416: + case MPI26_MFGPAGE_DEVID_SAS3616: + case MPI26_ATLAS_PCIe_SWITCH_DEVID: + case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: + case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: + case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: + case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: + case MPI26_MFGPAGE_DEVID_INVALID0_3916: + case MPI26_MFGPAGE_DEVID_INVALID1_3916: + case MPI26_MFGPAGE_DEVID_INVALID0_3816: + case MPI26_MFGPAGE_DEVID_INVALID1_3816: + return MPI26_VERSION; + } + return 0; +} + +/** + * _scsih_probe - attach and add scsi host + * @pdev: PCI device struct + * @id: pci device id + * + * Return: 0 success, anything else error. + */ +static int +_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct MPT3SAS_ADAPTER *ioc; + struct Scsi_Host *shost = NULL; + int rv; + u16 hba_mpi_version; + int iopoll_q_count = 0; + + /* Determine in which MPI version class this pci device belongs */ + hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); + if (hba_mpi_version == 0) + return -ENODEV; + + /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, + * for other generation HBA's return with -ENODEV + */ + if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) + return -ENODEV; + + /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, + * for other generation HBA's return with -ENODEV + */ + if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION + || hba_mpi_version == MPI26_VERSION))) + return -ENODEV; + + switch (hba_mpi_version) { + case MPI2_VERSION: + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + /* Use mpt2sas driver host template for SAS 2.0 HBA's */ + shost = scsi_host_alloc(&mpt2sas_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); + ioc->hba_mpi_version_belonged = hba_mpi_version; + ioc->id = mpt2_ids++; + sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); + switch (pdev->device) { + case MPI2_MFGPAGE_DEVID_SSS6200: + ioc->is_warpdrive = 1; + ioc->hide_ir_msg = 1; + break; + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: + ioc->is_mcpu_endpoint = 1; + break; + default: + ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; + break; + } + + if (multipath_on_hba == -1 || multipath_on_hba == 0) + ioc->multipath_on_hba = 0; + else + ioc->multipath_on_hba = 1; + + break; + case MPI25_VERSION: + case MPI26_VERSION: + /* Use mpt3sas driver host template for SAS 3.0 HBA's */ + shost = scsi_host_alloc(&mpt3sas_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); + ioc->hba_mpi_version_belonged = hba_mpi_version; + ioc->id = mpt3_ids++; + sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); + switch (pdev->device) { + case MPI26_MFGPAGE_DEVID_SAS3508: + case MPI26_MFGPAGE_DEVID_SAS3508_1: + case MPI26_MFGPAGE_DEVID_SAS3408: + case MPI26_MFGPAGE_DEVID_SAS3516: + case MPI26_MFGPAGE_DEVID_SAS3516_1: + case MPI26_MFGPAGE_DEVID_SAS3416: + case MPI26_MFGPAGE_DEVID_SAS3616: + case MPI26_ATLAS_PCIe_SWITCH_DEVID: + ioc->is_gen35_ioc = 1; + break; + case MPI26_MFGPAGE_DEVID_INVALID0_3816: + case MPI26_MFGPAGE_DEVID_INVALID0_3916: + dev_err(&pdev->dev, + "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", + pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + return 1; + case MPI26_MFGPAGE_DEVID_INVALID1_3816: + case MPI26_MFGPAGE_DEVID_INVALID1_3916: + dev_err(&pdev->dev, + "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", + pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + return 1; + case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: + case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: + dev_info(&pdev->dev, + "HBA is in Configurable Secure mode\n"); + fallthrough; + case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: + case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: + ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; + break; + default: + ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; + } + if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && + pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || + (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { + ioc->combined_reply_queue = 1; + if (ioc->is_gen35_ioc) + ioc->combined_reply_index_count = + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; + else + ioc->combined_reply_index_count = + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; + } + + switch (ioc->is_gen35_ioc) { + case 0: + if (multipath_on_hba == -1 || multipath_on_hba == 0) + ioc->multipath_on_hba = 0; + else + ioc->multipath_on_hba = 1; + break; + case 1: + if (multipath_on_hba == -1 || multipath_on_hba > 0) + ioc->multipath_on_hba = 1; + else + ioc->multipath_on_hba = 0; + break; + default: + break; + } + + break; + default: + return -ENODEV; + } + + INIT_LIST_HEAD(&ioc->list); + spin_lock(&gioc_lock); + list_add_tail(&ioc->list, &mpt3sas_ioc_list); + spin_unlock(&gioc_lock); + ioc->shost = shost; + ioc->pdev = pdev; + ioc->scsi_io_cb_idx = scsi_io_cb_idx; + ioc->tm_cb_idx = tm_cb_idx; + ioc->ctl_cb_idx = ctl_cb_idx; + ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; + ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; + ioc->config_cb_idx = config_cb_idx; + ioc->tm_tr_cb_idx = tm_tr_cb_idx; + ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; + ioc->logging_level = logging_level; + ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; + /* Host waits for minimum of six seconds */ + ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + /* + * Enable MEMORY MOVE support flag. + */ + ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; + /* Enable ADDITIONAL QUERY support flag. */ + ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; + + ioc->enable_sdev_max_qd = enable_sdev_max_qd; + + /* misc semaphores and spin locks */ + mutex_init(&ioc->reset_in_progress_mutex); + /* initializing pci_access_mutex lock */ + mutex_init(&ioc->pci_access_mutex); + spin_lock_init(&ioc->ioc_reset_in_progress_lock); + spin_lock_init(&ioc->scsi_lookup_lock); + spin_lock_init(&ioc->sas_device_lock); + spin_lock_init(&ioc->sas_node_lock); + spin_lock_init(&ioc->fw_event_lock); + spin_lock_init(&ioc->raid_device_lock); + spin_lock_init(&ioc->pcie_device_lock); + spin_lock_init(&ioc->diag_trigger_lock); + + INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->sas_device_init_list); + INIT_LIST_HEAD(&ioc->sas_expander_list); + INIT_LIST_HEAD(&ioc->enclosure_list); + INIT_LIST_HEAD(&ioc->pcie_device_list); + INIT_LIST_HEAD(&ioc->pcie_device_init_list); + INIT_LIST_HEAD(&ioc->fw_event_list); + INIT_LIST_HEAD(&ioc->raid_device_list); + INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); + INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_sc_list); + INIT_LIST_HEAD(&ioc->delayed_event_ack_list); + INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->reply_queue_list); + INIT_LIST_HEAD(&ioc->port_table_list); + + sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); + + /* init shost parameters */ + shost->max_cmd_len = 32; + shost->max_lun = max_lun; + shost->transportt = mpt3sas_transport_template; + shost->unique_id = ioc->id; + + if (ioc->is_mcpu_endpoint) { + /* mCPU MPI support 64K max IO */ + shost->max_sectors = 128; + ioc_info(ioc, "The max_sectors value is set to %d\n", + shost->max_sectors); + } else { + if (max_sectors != 0xFFFF) { + if (max_sectors < 64) { + shost->max_sectors = 64; + ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", + max_sectors); + } else if (max_sectors > 32767) { + shost->max_sectors = 32767; + ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", + max_sectors); + } else { + shost->max_sectors = max_sectors & 0xFFFE; + ioc_info(ioc, "The max_sectors value is set to %d\n", + shost->max_sectors); + } + } + } + /* register EEDP capabilities with SCSI layer */ + if (prot_mask >= 0) + scsi_host_set_prot(shost, (prot_mask & 0x07)); + else + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION); + + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + + /* event thread */ + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), + "fw_event_%s%d", ioc->driver_name, ioc->id); + ioc->firmware_event_thread = alloc_ordered_workqueue( + ioc->firmware_event_name, 0); + if (!ioc->firmware_event_thread) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_thread_fail; + } + + shost->host_tagset = 0; + + if (ioc->is_gen35_ioc && host_tagset_enable) + shost->host_tagset = 1; + + ioc->is_driver_loading = 1; + if ((mpt3sas_base_attach(ioc))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_attach_fail; + } + + if (ioc->is_warpdrive) { + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) + ioc->hide_drives = 0; + else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) + ioc->hide_drives = 1; + else { + if (mpt3sas_get_num_volumes(ioc)) + ioc->hide_drives = 1; + else + ioc->hide_drives = 0; + } + } else + ioc->hide_drives = 0; + + shost->nr_hw_queues = 1; + + if (shost->host_tagset) { + shost->nr_hw_queues = + ioc->reply_queue_count - ioc->high_iops_queues; + + iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + + shost->nr_maps = iopoll_q_count ? 3 : 1; + + dev_info(&ioc->pdev->dev, + "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", + shost->can_queue, shost->nr_hw_queues); + } + + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_add_shost_fail; + } + + scsi_scan_host(shost); + mpt3sas_setup_debugfs(ioc); + return 0; +out_add_shost_fail: + mpt3sas_base_detach(ioc); + out_attach_fail: + destroy_workqueue(ioc->firmware_event_thread); + out_thread_fail: + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + scsi_host_put(shost); + return rv; +} + +/** + * scsih_suspend - power management suspend main entry point + * @dev: Device struct + * + * Return: 0 success, anything else error. + */ +static int __maybe_unused +scsih_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + int rc; + + rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); + if (rc) + return rc; + + mpt3sas_base_stop_watchdog(ioc); + scsi_block_requests(shost); + _scsih_nvme_shutdown(ioc); + ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", + pdev, pci_name(pdev)); + + mpt3sas_base_free_resources(ioc); + return 0; +} + +/** + * scsih_resume - power management resume main entry point + * @dev: Device struct + * + * Return: 0 success, anything else error. + */ +static int __maybe_unused +scsih_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + pci_power_t device_state = pdev->current_state; + int r; + + r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); + if (r) + return r; + + ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", + pdev, pci_name(pdev), device_state); + + ioc->pdev = pdev; + r = mpt3sas_base_map_resources(ioc); + if (r) + return r; + ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); + mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); + scsi_unblock_requests(shost); + mpt3sas_base_start_watchdog(ioc); + return 0; +} + +/** + * scsih_pci_error_detected - Called when a PCI error is detected. + * @pdev: PCI device struct + * @state: PCI channel state + * + * Description: Called when a PCI error is detected. + * + * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. + */ +static pci_ers_result_t +scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + /* Fatal error, prepare for slot reset */ + ioc->pci_error_recovery = 1; + scsi_block_requests(ioc->shost); + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent error, prepare for device removal */ + ioc->pci_error_recovery = 1; + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_pause_mq_polling(ioc); + _scsih_flush_running_cmds(ioc); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * scsih_pci_slot_reset - Called when PCI slot has been reset. + * @pdev: PCI device struct + * + * Description: This routine is called by the pci error recovery + * code after the PCI slot has been reset, just before we + * should resume normal operations. + */ +static pci_ers_result_t +scsih_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + int rc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "PCI error: slot reset callback!!\n"); + + ioc->pci_error_recovery = 0; + ioc->pdev = pdev; + pci_restore_state(pdev); + rc = mpt3sas_base_map_resources(ioc); + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + ioc_warn(ioc, "hard reset: %s\n", + (rc == 0) ? "success" : "failed"); + + if (!rc) + return PCI_ERS_RESULT_RECOVERED; + else + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * scsih_pci_resume() - resume normal ops after PCI reset + * @pdev: pointer to PCI device + * + * Called when the error recovery driver tells us that its + * OK to resume normal operation. Use completion to allow + * halted scsi ops to resume. + */ +static void +scsih_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return; + + ioc_info(ioc, "PCI error: resume callback!!\n"); + + mpt3sas_base_start_watchdog(ioc); + scsi_unblock_requests(ioc->shost); +} + +/** + * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers + * @pdev: pointer to PCI device + */ +static pci_ers_result_t +scsih_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); + + /* TODO - dump whatever for debugging purposes */ + + /* This called only if scsih_pci_error_detected returns + * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still + * works, no need to reset slot. + */ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * scsih_ncq_prio_supp - Check for NCQ command priority support + * @sdev: scsi device struct + * + * This is called when a user indicates they would like to enable + * ncq command priorities. This works only on SATA devices. + */ +bool scsih_ncq_prio_supp(struct scsi_device *sdev) +{ + struct scsi_vpd *vpd; + bool ncq_prio_supp = false; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (!vpd || vpd->len < 214) + goto out; + + ncq_prio_supp = (vpd->data[213] >> 4) & 1; +out: + rcu_read_unlock(); + + return ncq_prio_supp; +} +/* + * The pci device ids are defined in mpi/mpi2_cnfg.h. + */ +static const struct pci_device_id mpt3sas_pci_table[] = { + /* Spitfire ~ 2004 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, + PCI_ANY_ID, PCI_ANY_ID }, + /* Falcon ~ 2008 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, + PCI_ANY_ID, PCI_ANY_ID }, + /* Liberator ~ 2108 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, + PCI_ANY_ID, PCI_ANY_ID }, + /* Meteor ~ 2116 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, + PCI_ANY_ID, PCI_ANY_ID }, + /* Thunderbolt ~ 2208 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, + PCI_ANY_ID, PCI_ANY_ID }, + /* Mustang ~ 2308 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, + PCI_ANY_ID, PCI_ANY_ID }, + /* SSS6200 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, + PCI_ANY_ID, PCI_ANY_ID }, + /* Fury ~ 3004 and 3008 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, + PCI_ANY_ID, PCI_ANY_ID }, + /* Invader ~ 3108 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, + PCI_ANY_ID, PCI_ANY_ID }, + /* Cutlass ~ 3216 and 3224 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, + PCI_ANY_ID, PCI_ANY_ID }, + /* Intruder ~ 3316 and 3324 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, + PCI_ANY_ID, PCI_ANY_ID }, + /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, + PCI_ANY_ID, PCI_ANY_ID }, + /* Mercator ~ 3616*/ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, + PCI_ANY_ID, PCI_ANY_ID }, + + /* Aero SI 0x00E1 Configurable Secure + * 0x00E2 Hard Secure + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, + PCI_ANY_ID, PCI_ANY_ID }, + + /* + * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, + PCI_ANY_ID, PCI_ANY_ID }, + + /* Atlas PCIe Switch Management Port */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, + PCI_ANY_ID, PCI_ANY_ID }, + + /* Sea SI 0x00E5 Configurable Secure + * 0x00E6 Hard Secure + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, + PCI_ANY_ID, PCI_ANY_ID }, + + /* + * ATTO Branded ExpressSAS H12xx GT + */ + { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, + PCI_ANY_ID, PCI_ANY_ID }, + + /* + * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, + PCI_ANY_ID, PCI_ANY_ID }, + + {0} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); + +static struct pci_error_handlers _mpt3sas_err_handler = { + .error_detected = scsih_pci_error_detected, + .mmio_enabled = scsih_pci_mmio_enabled, + .slot_reset = scsih_pci_slot_reset, + .resume = scsih_pci_resume, +}; + +static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); + +static struct pci_driver mpt3sas_driver = { + .name = MPT3SAS_DRIVER_NAME, + .id_table = mpt3sas_pci_table, + .probe = _scsih_probe, + .remove = scsih_remove, + .shutdown = scsih_shutdown, + .err_handler = &_mpt3sas_err_handler, + .driver.pm = &scsih_pm_ops, +}; + +/** + * scsih_init - main entry point for this driver. + * + * Return: 0 success, anything else error. + */ +static int +scsih_init(void) +{ + mpt2_ids = 0; + mpt3_ids = 0; + + mpt3sas_base_initialize_callback_handler(); + + /* queuecommand callback hander */ + scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); + + /* task management callback handler */ + tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); + + /* base internal commands callback handler */ + base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); + port_enable_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_port_enable_done); + + /* transport internal commands callback handler */ + transport_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_transport_done); + + /* scsih internal commands callback handler */ + scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); + + /* configuration page API internal commands callback handler */ + config_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_config_done); + + /* ctl module callback handler */ + ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); + + tm_tr_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_tr_complete); + + tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_volume_tr_complete); + + tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_sas_control_complete); + + mpt3sas_init_debugfs(); + return 0; +} + +/** + * scsih_exit - exit point for this driver (when it is a module). + * + * Return: 0 success, anything else error. + */ +static void +scsih_exit(void) +{ + + mpt3sas_base_release_callback_handler(scsi_io_cb_idx); + mpt3sas_base_release_callback_handler(tm_cb_idx); + mpt3sas_base_release_callback_handler(base_cb_idx); + mpt3sas_base_release_callback_handler(port_enable_cb_idx); + mpt3sas_base_release_callback_handler(transport_cb_idx); + mpt3sas_base_release_callback_handler(scsih_cb_idx); + mpt3sas_base_release_callback_handler(config_cb_idx); + mpt3sas_base_release_callback_handler(ctl_cb_idx); + + mpt3sas_base_release_callback_handler(tm_tr_cb_idx); + mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); + mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); + +/* raid transport support */ + if (hbas_to_enumerate != 1) + raid_class_release(mpt3sas_raid_template); + if (hbas_to_enumerate != 2) + raid_class_release(mpt2sas_raid_template); + sas_release_transport(mpt3sas_transport_template); + mpt3sas_exit_debugfs(); +} + +/** + * _mpt3sas_init - main entry point for this driver. + * + * Return: 0 success, anything else error. + */ +static int __init +_mpt3sas_init(void) +{ + int error; + + pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, + MPT3SAS_DRIVER_VERSION); + + mpt3sas_transport_template = + sas_attach_transport(&mpt3sas_transport_functions); + if (!mpt3sas_transport_template) + return -ENODEV; + + /* No need attach mpt3sas raid functions template + * if hbas_to_enumarate value is one. + */ + if (hbas_to_enumerate != 1) { + mpt3sas_raid_template = + raid_class_attach(&mpt3sas_raid_functions); + if (!mpt3sas_raid_template) { + sas_release_transport(mpt3sas_transport_template); + return -ENODEV; + } + } + + /* No need to attach mpt2sas raid functions template + * if hbas_to_enumarate value is two + */ + if (hbas_to_enumerate != 2) { + mpt2sas_raid_template = + raid_class_attach(&mpt2sas_raid_functions); + if (!mpt2sas_raid_template) { + sas_release_transport(mpt3sas_transport_template); + return -ENODEV; + } + } + + error = scsih_init(); + if (error) { + scsih_exit(); + return error; + } + + mpt3sas_ctl_init(hbas_to_enumerate); + + error = pci_register_driver(&mpt3sas_driver); + if (error) { + mpt3sas_ctl_exit(hbas_to_enumerate); + scsih_exit(); + } + + return error; +} + +/** + * _mpt3sas_exit - exit point for this driver (when it is a module). + * + */ +static void __exit +_mpt3sas_exit(void) +{ + pr_info("mpt3sas version %s unloading\n", + MPT3SAS_DRIVER_VERSION); + + pci_unregister_driver(&mpt3sas_driver); + + mpt3sas_ctl_exit(hbas_to_enumerate); + + scsih_exit(); +} + +module_init(_mpt3sas_init); +module_exit(_mpt3sas_exit); diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c new file mode 100644 index 000000000..e8a4750f6 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -0,0 +1,2200 @@ +/* + * SAS Transport Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/** + * _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to + * @phy: sas_phy object + * + * Return Port number + */ +static inline u8 +_transport_get_port_id_by_sas_phy(struct sas_phy *phy) +{ + u8 port_id = 0xFF; + struct hba_port *port = phy->hostdata; + + if (port) + port_id = port->port_id; + + return port_id; +} + +/** + * _transport_sas_node_find_by_sas_address - sas node search + * @ioc: per adapter object + * @sas_address: sas address of expander or sas host + * @port: hba port entry + * Context: Calling function should acquire ioc->sas_node_lock. + * + * Search for either hba phys or expander device based on handle, then returns + * the sas_node object. + */ +static struct _sas_node * +_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address, port); +} + +/** + * _transport_get_port_id_by_rphy - Get Port number from rphy object + * @ioc: per adapter object + * @rphy: sas_rphy object + * + * Returns Port number. + */ +static u8 +_transport_get_port_id_by_rphy(struct MPT3SAS_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct _sas_node *sas_expander; + struct _sas_device *sas_device; + unsigned long flags; + u8 port_id = 0xFF; + + if (!rphy) + return port_id; + + if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, + &ioc->sas_expander_list, list) { + if (sas_expander->rphy == rphy) { + port_id = sas_expander->port->port_id; + break; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + } else if (rphy->identify.device_type == SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + if (sas_device) { + port_id = sas_device->port->port_id; + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + return port_id; +} + +/** + * _transport_convert_phy_link_rate - + * @link_rate: link rate returned from mpt firmware + * + * Convert link_rate from mpi fusion into sas_transport form. + */ +static enum sas_linkrate +_transport_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case MPI2_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case MPI25_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + rc = SAS_PHY_RESET_IN_PROGRESS; + break; + + default: + case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +/** + * _transport_set_identify - set identify for phys and end devices + * @ioc: per adapter object + * @handle: device handle + * @identify: sas identify info + * + * Populates sas identify info. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, + struct sas_identify *identify) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 device_info; + u32 ioc_status; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n", + handle, ioc_status, __FILE__, __LINE__, __func__); + return -EIO; + } + + memset(identify, 0, sizeof(struct sas_identify)); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + /* sas_address */ + identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* phy number of the parent device this device is linked to */ + identify->phy_identifier = sas_device_pg0.PhyNum; + + /* device_type */ + switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + case MPI2_SAS_DEVICE_INFO_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case MPI2_SAS_DEVICE_INFO_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER: + identify->device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + } + + /* initiator_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST) + identify->initiator_port_protocols |= SAS_PROTOCOL_SATA; + + /* target_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + identify->target_port_protocols |= SAS_PROTOCOL_SATA; + + return 0; +} + +/** + * mpt3sas_transport_done - internal transport layer callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated transport cmds. + * The callback index passed is `ioc->transport_cb_idx` + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->transport_cmds.smid != smid) + return 1; + ioc->transport_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->transport_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->transport_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->transport_cmds.done); + return 1; +} + +/* report manufacture request structure */ +struct rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +/* report manufacture reply structure */ +struct rep_manu_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x01 */ + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +/** + * _transport_expander_report_manufacture - obtain SMP report_manufacture + * @ioc: per adapter object + * @sas_address: expander sas address + * @edev: the sas_expander_device object + * @port_id: Port ID number + * + * Fills in the sas_expander_device object when SMP port is created. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct sas_expander_device *edev, u8 port_id) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct rep_manu_reply *manufacture_reply; + struct rep_manu_request *manufacture_request; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + data_out_sz = sizeof(struct rep_manu_request); + data_in_sz = sizeof(struct rep_manu_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + &data_out_dma, GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + data_in_dma = data_out_dma + sizeof(struct rep_manu_request); + + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = port_id; + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + + dtransportprintk(ioc, + ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n", + (u64)sas_address)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n")); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + u8 *tmp; + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct rep_manu_reply)) + goto out; + + manufacture_reply = data_out + sizeof(struct rep_manu_request); + strncpy(edev->vendor_id, manufacture_reply->vendor_id, + SAS_EXPANDER_VENDOR_ID_LEN); + strncpy(edev->product_id, manufacture_reply->product_id, + SAS_EXPANDER_PRODUCT_ID_LEN); + strncpy(edev->product_rev, manufacture_reply->product_rev, + SAS_EXPANDER_PRODUCT_REV_LEN); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strncpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + tmp = (u8 *)&manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + } else + dtransportprintk(ioc, + ioc_info(ioc, "report_manufacture - no reply\n")); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + + +/** + * _transport_delete_port - helper function to removing a port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + */ +static void +_transport_delete_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + struct hba_port *port = mpt3sas_port->hba_port; + enum sas_device_type device_type = + mpt3sas_port->remote_identify.device_type; + + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long) sas_address); + + ioc->logging_level |= MPT_DEBUG_TRANSPORT; + if (device_type == SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + sas_address, port); + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, sas_address, port); + ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; +} + +/** + * _transport_delete_phy - helper function to removing single phy from port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + */ +static void +_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) sas_address, mpt3sas_phy->phy_id); + + list_del(&mpt3sas_phy->port_siblings); + mpt3sas_port->num_phys--; + sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 0; +} + +/** + * _transport_add_phy - helper function to adding single phy to port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + */ +static void +_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, + struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, mpt3sas_phy->phy_id); + + list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; +} + +/** + * mpt3sas_transport_add_phy_to_an_existing_port - adding new phy to existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + * @sas_address: sas address of device/expander were phy needs to be added to + * @port: hba port entry + */ +void +mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, + u64 sas_address, struct hba_port *port) +{ + struct _sas_port *mpt3sas_port; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 1) + return; + + if (!port) + return; + + list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != + sas_address) + continue; + if (mpt3sas_port->hba_port != port) + continue; + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch == mpt3sas_phy) + return; + } + _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy); + return; + } + +} + +/** + * mpt3sas_transport_del_phy_from_an_existing_port - delete phy from existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + */ +void +mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy) +{ + struct _sas_port *mpt3sas_port, *next; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 0) + return; + + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch != mpt3sas_phy) + continue; + + /* + * Don't delete port during host reset, + * just delete phy. + */ + if (mpt3sas_port->num_phys == 1 && !ioc->shost_recovery) + _transport_delete_port(ioc, mpt3sas_port); + else + _transport_delete_phy(ioc, mpt3sas_port, + mpt3sas_phy); + return; + } + } +} + +/** + * _transport_sanity_check - sanity check when adding a new port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @sas_address: sas address of device being added + * @port: hba port entry + * + * See the explanation above from _transport_delete_duplicate_port + */ +static void +_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, + u64 sas_address, struct hba_port *port) +{ + int i; + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != sas_address) + continue; + if (sas_node->phy[i].port != port) + continue; + if (sas_node->phy[i].phy_belongs_to_port == 1) + mpt3sas_transport_del_phy_from_an_existing_port(ioc, + sas_node, &sas_node->phy[i]); + } +} + +/** + * mpt3sas_transport_port_add - insert port to the list + * @ioc: per adapter object + * @handle: handle of attached device + * @sas_address: sas address of parent expander or sas host + * @hba_port: hba port entry + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new port object to the sas_node->sas_port_list. + * + * Return: mpt3sas_port. + */ +struct _sas_port * +mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 sas_address, struct hba_port *hba_port) +{ + struct _sas_phy *mpt3sas_phy, *next; + struct _sas_port *mpt3sas_port; + unsigned long flags; + struct _sas_node *sas_node; + struct sas_rphy *rphy; + struct _sas_device *sas_device = NULL; + int i; + struct sas_port *port; + struct virtual_phy *vphy = NULL; + + if (!hba_port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return NULL; + } + + mpt3sas_port = kzalloc(sizeof(struct _sas_port), + GFP_KERNEL); + if (!mpt3sas_port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return NULL; + } + + INIT_LIST_HEAD(&mpt3sas_port->port_list); + INIT_LIST_HEAD(&mpt3sas_port->phy_list); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address, hba_port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!sas_node) { + ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n", + __func__, (u64)sas_address); + goto out_fail; + } + + if ((_transport_set_identify(ioc, handle, + &mpt3sas_port->remote_identify))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + mpt3sas_port->hba_port = hba_port; + _transport_sanity_check(ioc, sas_node, + mpt3sas_port->remote_identify.sas_address, hba_port); + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != + mpt3sas_port->remote_identify.sas_address) + continue; + if (sas_node->phy[i].port != hba_port) + continue; + list_add_tail(&sas_node->phy[i].port_siblings, + &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!sas_node->phy[i].hba_vphy) { + hba_port->phy_mask |= (1 << i); + continue; + } + + vphy = mpt3sas_get_vphy_by_phy(ioc, hba_port, i); + if (!vphy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + } + } + + if (!mpt3sas_port->num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device = mpt3sas_get_sdev_by_addr(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + if (!sas_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + sas_device->pend_sas_rphy_add = 1; + } + + if (!sas_node->parent_dev) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + port = sas_port_alloc_num(sas_node->parent_dev); + if (!port || (sas_port_add(port))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list, + port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &port->dev, + "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", + handle, (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + sas_port_add_phy(port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; + mpt3sas_phy->port = hba_port; + } + + mpt3sas_port->port = port; + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(port); + sas_device->rphy = rphy; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!vphy) + hba_port->sas_address = + sas_device->sas_address; + else + vphy->sas_address = + sas_device->sas_address; + } + } else { + rphy = sas_expander_alloc(port, + mpt3sas_port->remote_identify.device_type); + if (sas_node->handle <= ioc->sas_hba.num_phys) + hba_port->sas_address = + mpt3sas_port->remote_identify.sas_address; + } + + if (!rphy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_delete_port; + } + + rphy->identify = mpt3sas_port->remote_identify; + + if ((sas_rphy_add(rphy))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_rphy_free(rphy); + rphy = NULL; + goto out_delete_port; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device->pend_sas_rphy_add = 0; + sas_device_put(sas_device); + } + + dev_info(&rphy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle, + (unsigned long long)mpt3sas_port->remote_identify.sas_address); + + mpt3sas_port->rphy = rphy; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* fill in report manufacture */ + if (mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || + mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) + _transport_expander_report_manufacture(ioc, + mpt3sas_port->remote_identify.sas_address, + rphy_to_expander_device(rphy), hba_port->port_id); + return mpt3sas_port; + +out_delete_port: + sas_port_delete(port); + +out_fail: + list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list, + port_siblings) + list_del(&mpt3sas_phy->port_siblings); + kfree(mpt3sas_port); + return NULL; +} + +/** + * mpt3sas_transport_port_remove - remove port from the list + * @ioc: per adapter object + * @sas_address: sas address of attached device + * @sas_address_parent: sas address of parent expander or sas host + * @port: hba port entry + * Context: This function will acquire ioc->sas_node_lock. + * + * Removing object and freeing associated memory from the + * ioc->sas_port_list. + */ +void +mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent, struct hba_port *port) +{ + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port, *next; + struct _sas_node *sas_node; + u8 found = 0; + struct _sas_phy *mpt3sas_phy, *next_phy; + struct hba_port *hba_port_next, *hba_port = NULL; + struct virtual_phy *vphy, *vphy_next = NULL; + + if (!port) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address_parent, port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != sas_address) + continue; + if (mpt3sas_port->hba_port != port) + continue; + found = 1; + list_del(&mpt3sas_port->port_list); + goto out; + } + out: + if (!found) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + if (sas_node->handle <= ioc->sas_hba.num_phys && + (ioc->multipath_on_hba)) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->sas_address != sas_address) + continue; + ioc_info(ioc, + "remove vphy entry: %p of port:%p,from %d port's vphys list\n", + vphy, port, port->port_id); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + + list_for_each_entry_safe(hba_port, hba_port_next, + &ioc->port_table_list, list) { + if (hba_port != port) + continue; + /* + * Delete hba_port object if + * - hba_port object's sas address matches with current + * removed device's sas address and no vphy's + * associated with it. + * - Current removed device is a vSES device and + * none of the other direct attached device have + * this vSES device's port number (hence hba_port + * object sas_address field will be zero). + */ + if ((hba_port->sas_address == sas_address || + !hba_port->sas_address) && !hba_port->vphys_mask) { + ioc_info(ioc, + "remove hba_port entry: %p port: %d from hba_port list\n", + hba_port, hba_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + } else if (hba_port->sas_address == sas_address && + hba_port->vphys_mask) { + /* + * Current removed device is a non vSES device + * and a vSES device has the same port number + * as of current device's port number. Hence + * only clear the sas_address filed, don't + * delete the hba_port object. + */ + ioc_info(ioc, + "clearing sas_address from hba_port entry: %p port: %d from hba_port list\n", + hba_port, hba_port->port_id); + port->sas_address = 0; + } + break; + } + } + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address == sas_address) + memset(&sas_node->phy[i].remote_identify, 0 , + sizeof(struct sas_identify)); + } + + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + list_for_each_entry_safe(mpt3sas_phy, next_phy, + &mpt3sas_port->phy_list, port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + mpt3sas_phy->phy_belongs_to_port = 0; + if (!ioc->remove_host) + sas_port_delete_phy(mpt3sas_port->port, + mpt3sas_phy->phy); + list_del(&mpt3sas_phy->port_siblings); + } + if (!ioc->remove_host) + sas_port_delete(mpt3sas_port->port); + ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n", + __func__, (unsigned long long)sas_address); + kfree(mpt3sas_port); +} + +/** + * mpt3sas_transport_add_host_phy - report sas_host phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @phy_pg0: sas phy page 0 + * @parent_dev: parent device class object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->hostdata = mpt3sas_phy->port; + + if ((sas_phy_add(phy))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + + +/** + * mpt3sas_transport_add_expander_phy - report expander phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @expander_pg1: expander page 1 + * @parent_dev: parent device class object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = + le16_to_cpu(expander_pg1.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + expander_pg1.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate >> 4); + phy->hostdata = mpt3sas_phy->port; + + if ((sas_phy_add(phy))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + +/** + * mpt3sas_transport_update_links - refreshing phy link changes + * @ioc: per adapter object + * @sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_number: phy number + * @link_rate: new link rate + * @port: hba port entry + * + * Return nothing. + */ +void +mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate, + struct hba_port *port) +{ + unsigned long flags; + struct _sas_node *sas_node; + struct _sas_phy *mpt3sas_phy; + struct hba_port *hba_port = NULL; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address, port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + mpt3sas_phy = &sas_node->phy[phy_number]; + mpt3sas_phy->attached_handle = handle; + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { + _transport_set_identify(ioc, handle, + &mpt3sas_phy->remote_identify); + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + list_for_each_entry(hba_port, + &ioc->port_table_list, list) { + if (hba_port->sas_address == sas_address && + hba_port == port) + hba_port->phy_mask |= + (1 << mpt3sas_phy->phy_id); + } + } + mpt3sas_transport_add_phy_to_an_existing_port(ioc, sas_node, + mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address, + port); + } else + memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct + sas_identify)); + + if (mpt3sas_phy->phy) + mpt3sas_phy->phy->negotiated_linkrate = + _transport_convert_phy_link_rate(link_rate); + + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "refresh: parent sas_addr(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); +} + +static inline void * +phy_to_ioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + return shost_priv(shost); +} + +static inline void * +rphy_to_ioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + return shost_priv(shost); +} + +/* report phy error log structure */ +struct phy_error_log_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x11 */ + u8 allocated_response_length; + u8 request_length; /* 02 */ + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +/* report phy error log reply structure */ +struct phy_error_log_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + +/** + * _transport_get_expander_phy_error_log - return expander counters + * @ioc: per adapter object + * @phy: The sas phy object + * + * Return: 0 for success, non-zero for failure. + * + */ +static int +_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_error_log_request *phy_error_log_request; + struct phy_error_log_reply *phy_error_log_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_error_log_request) + + sizeof(struct phy_error_log_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct phy_error_log_request), + data_out_dma + sizeof(struct phy_error_log_request), + sizeof(struct phy_error_log_reply)); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", + (u64)phy->identify.sas_address, + phy->number)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n")); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_error_log_reply)) + goto out; + + phy_error_log_reply = data_out + + sizeof(struct phy_error_log_request); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - function_result(%d)\n", + phy_error_log_reply->function_result)); + + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } else + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - no reply\n")); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_get_linkerrors - return phy counters for both hba and expanders + * @phy: The sas phy object + * + * Return: 0 for success, non-zero for failure. + * + */ +static int +_transport_get_linkerrors(struct sas_phy *phy) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasPhyPage1_t phy_pg1; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_get_expander_phy_error_log(ioc, phy); + + /* get hba phy error logs */ + if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy->number))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n", + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + return 0; +} + +/** + * _transport_get_enclosure_identifier - + * @rphy: The sas phy object + * @identifier: ? + * + * Obtain the enclosure logical id for an expander. + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + if (sas_device) { + *identifier = sas_device->enclosure_logical_id; + rc = 0; + sas_device_put(sas_device); + } else { + *identifier = 0; + rc = -ENXIO; + } + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _transport_get_bay_identifier - + * @rphy: The sas phy object + * + * Return: the slot id for a device that resides inside an enclosure. + */ +static int +_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + if (sas_device) { + rc = sas_device->slot; + sas_device_put(sas_device); + } else { + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/* phy control request structure */ +struct phy_control_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x91 */ + u8 allocated_response_length; + u8 request_length; /* 0x09 */ + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +/* phy control reply structure */ +struct phy_control_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; +}; + +#define SMP_PHY_CONTROL_LINK_RESET (0x01) +#define SMP_PHY_CONTROL_HARD_RESET (0x02) +#define SMP_PHY_CONTROL_DISABLE (0x03) + +/** + * _transport_expander_phy_control - expander phy control + * @ioc: per adapter object + * @phy: The sas phy object + * @phy_operation: ? + * + * Return: 0 for success, non-zero for failure. + * + */ +static int +_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy, u8 phy_operation) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_control_request *phy_control_request; + struct phy_control_reply *phy_control_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_control_request) + + sizeof(struct phy_control_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct phy_control_request), + data_out_dma + sizeof(struct phy_control_request), + sizeof(struct phy_control_reply)); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", + (u64)phy->identify.sas_address, + phy->number, phy_operation)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n")); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_control_reply)) + goto out; + + phy_control_reply = data_out + + sizeof(struct phy_control_request); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - function_result(%d)\n", + phy_control_reply->function_result)); + + rc = 0; + } else + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - no reply\n")); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, + data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_phy_reset - + * @phy: The sas phy object + * @hard_reset: + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIoUnitControlReply_t mpi_reply; + Mpi2SasIoUnitControlRequest_t mpi_request; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET : + SMP_PHY_CONTROL_LINK_RESET); + + /* handle hba phys */ + memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request.Operation = hard_reset ? + MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET; + mpi_request.PhyNum = phy->number; + + if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + phy->number, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + return 0; +} + +/** + * _transport_phy_enable - enable/disable phys + * @phy: The sas phy object + * @enable: enable phy when true + * + * Only support sas_host direct attached phys. + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int rc = 0; + unsigned long flags; + int i, discovery_active; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET : + SMP_PHY_CONTROL_DISABLE); + + /* handle hba phys */ + + /* read sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* unable to enable/disable phys when when discovery is active */ + for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) { + if (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { + ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n", + sas_iounit_pg0->PhyData[i].Port, i); + discovery_active = 1; + } + } + + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + + /* read sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* copy Port/PortFlags/PhyFlags from page 0 */ + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + sas_iounit_pg1->PhyData[i].Port = + sas_iounit_pg0->PhyData[i].Port; + sas_iounit_pg1->PhyData[i].PortFlags = + (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG); + sas_iounit_pg1->PhyData[i].PhyFlags = + (sas_iounit_pg0->PhyData[i].PhyFlags & + (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED + + MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED)); + } + + if (enable) + sas_iounit_pg1->PhyData[phy->number].PhyFlags + &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_iounit_pg1->PhyData[phy->number].PhyFlags + |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + + mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz); + + /* link reset */ + if (enable) + _transport_phy_reset(phy, 0); + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); + return rc; +} + +/** + * _transport_phy_speed - set phy min/max link rates + * @phy: The sas phy object + * @rates: rates defined in sas_phy_linkrates + * + * Only support sas_host direct attached phys. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int i; + int rc = 0; + unsigned long flags; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return _transport_expander_phy_control(ioc, phy, + SMP_PHY_CONTROL_LINK_RESET); + } + + /* handle hba phys */ + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if (phy->number != i) { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (ioc->sas_hba.phy[i].phy->minimum_linkrate + + (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); + } else { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (rates->minimum_linkrate + + (rates->maximum_linkrate << 4)); + } + } + + if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + /* link reset */ + _transport_phy_reset(phy, 0); + + /* read phy page 0, then update the rates in the sas transport phy */ + if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + phy->number)) { + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + } + + out: + kfree(sas_iounit_pg1); + return rc; +} + +static int +_transport_map_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t *dma_addr, size_t *dma_len, void **p) +{ + /* Check if the request is split across multiple segments */ + if (buf->sg_cnt > 1) { + *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr, + GFP_KERNEL); + if (!*p) + return -ENOMEM; + *dma_len = buf->payload_len; + } else { + if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL)) + return -ENOMEM; + *dma_addr = sg_dma_address(buf->sg_list); + *dma_len = sg_dma_len(buf->sg_list); + *p = NULL; + } + + return 0; +} + +static void +_transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t dma_addr, void *p) +{ + if (p) + dma_free_coherent(dev, buf->payload_len, p, dma_addr); + else + dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL); +} + +/** + * _transport_smp_handler - transport portal for smp passthru + * @job: ? + * @shost: shost object + * @rphy: sas transport rphy object + * + * This used primarily for smp_utils. + * Example: + * smp_rep_general /sys/class/bsg/expander-5:0 + */ +static void +_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + int rc; + u16 smid; + void *psge; + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + void *addr_in = NULL; + void *addr_out = NULL; + size_t dma_len_in; + size_t dma_len_out; + unsigned int reslen = 0; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + rc = -EFAULT; + goto job_done; + } + + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + goto job_done; + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", + __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->request_payload, + &dma_addr_out, &dma_len_out, &addr_out); + if (rc) + goto out; + if (addr_out) { + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, addr_out, + job->request_payload.payload_len); + } + + rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->reply_payload, + &dma_addr_in, &dma_len_in, &addr_in); + if (rc) + goto unmap_out; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto unmap_in; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto unmap_in; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = _transport_get_port_id_by_rphy(ioc, rphy); + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, + dma_len_in - 4); + + dtransportprintk(ioc, + ioc_info(ioc, "%s: sending smp request\n", __func__)); + + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) { + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + goto unmap_in; + } + } + + dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__)); + + if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) { + dtransportprintk(ioc, + ioc_info(ioc, "%s: no reply\n", __func__)); + rc = -ENXIO; + goto unmap_in; + } + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "%s: reply data transfer size(%d)\n", + __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + + memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); + job->reply_len = sizeof(*mpi_reply); + reslen = le16_to_cpu(mpi_reply->ResponseDataLength); + + if (addr_in) { + sg_copy_to_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, addr_in, + job->reply_payload.payload_len); + } + + rc = 0; + unmap_in: + _transport_unmap_smp_buffer(&ioc->pdev->dev, &job->reply_payload, + dma_addr_in, addr_in); + unmap_out: + _transport_unmap_smp_buffer(&ioc->pdev->dev, &job->request_payload, + dma_addr_out, addr_out); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); +job_done: + bsg_job_done(job, rc, reslen); +} + +struct sas_function_template mpt3sas_transport_functions = { + .get_linkerrors = _transport_get_linkerrors, + .get_enclosure_identifier = _transport_get_enclosure_identifier, + .get_bay_identifier = _transport_get_bay_identifier, + .phy_reset = _transport_phy_reset, + .phy_enable = _transport_phy_enable, + .set_phy_speed = _transport_phy_speed, + .smp_handler = _transport_smp_handler, +}; + +struct scsi_transport_template *mpt3sas_transport_template; diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c new file mode 100644 index 000000000..d9b7d0ee2 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c @@ -0,0 +1,473 @@ +/* + * This module provides common API to set Diagnostic trigger for MPT + * (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mpt3sas_base.h" + +/** + * _mpt3sas_raise_sigio - notifiy app + * @ioc: per adapter object + * @event_data: ? + */ +static void +_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + Mpi2EventNotificationReply_t *mpi_reply; + u16 sz, event_data_sz; + unsigned long flags; + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; + mpi_reply = kzalloc(sz, GFP_KERNEL); + if (!mpi_reply) + goto out; + mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED); + event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4; + mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); + memcpy(&mpi_reply->EventData, event_data, + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: add to driver event log\n", + __func__)); + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + kfree(mpi_reply); + out: + + /* clearing the diag_trigger_active flag */ + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: clearing diag_trigger_active flag\n", + __func__)); + ioc->diag_trigger_active = 0; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_process_trigger_data - process the event data for the trigger + * @ioc: per adapter object + * @event_data: ? + */ +void +mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + u8 issue_reset = 0; + u32 *trig_data = (u32 *)&event_data->u.master; + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + + /* release the diag buffer trace */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + /* + * add a log message so that user knows which event caused + * the release + */ + ioc_info(ioc, + "%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n", + __func__, event_data->trigger_type, + trig_data[0], trig_data[1]); + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_TRIGGER; + if (event_data) { + ioc->htb_rel.trigger_type = event_data->trigger_type; + switch (event_data->trigger_type) { + case MPT3SAS_TRIGGER_SCSI: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.scsi, + sizeof(struct SL_WH_SCSI_TRIGGER_T)); + break; + case MPT3SAS_TRIGGER_MPI: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.mpi, + sizeof(struct SL_WH_MPI_TRIGGER_T)); + break; + case MPT3SAS_TRIGGER_MASTER: + ioc->htb_rel.trigger_info_dwords[0] = + event_data->u.master.MasterData; + break; + case MPT3SAS_TRIGGER_EVENT: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.event, + sizeof(struct SL_WH_EVENT_TRIGGER_T)); + break; + default: + ioc_err(ioc, "%d - Is not a valid Trigger type\n", + event_data->trigger_type); + break; + } + } + _mpt3sas_raise_sigio(ioc, event_data); + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_master - Master trigger handler + * @ioc: per adapter object + * @trigger_bitmask: + * + */ +void +mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + unsigned long flags; + u8 found_match = 0; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) + goto by_pass_checks; + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + by_pass_checks: + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n", + __func__, trigger_bitmask)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MASTER; + event_data.u.master.MasterData = trigger_bitmask; + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) { + ioc->htb_rel.trigger_type = MPT3SAS_TRIGGER_MASTER; + ioc->htb_rel.trigger_info_dwords[0] = trigger_bitmask; + if (ioc->reset_from_user) + ioc->htb_rel.trigger_info_dwords[1] = + MPT_DIAG_RESET_ISSUED_BY_USER; + _mpt3sas_raise_sigio(ioc, &event_data); + } else + mpt3sas_send_trigger_data_event(ioc, &event_data); + + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_event - Event trigger handler + * @ioc: per adapter object + * @event: ? + * @log_entry_qualifier: ? + * + */ +void +mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_EVENT_TRIGGER_T *event_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n", + __func__, event, log_entry_qualifier)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + event_trigger = ioc->diag_trigger_event.EventTriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries + && !found_match; i++, event_trigger++) { + if (event_trigger->EventValue != event) + continue; + if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { + if (event_trigger->LogEntryQualifier == + log_entry_qualifier) + found_match = 1; + continue; + } + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; + event_data.u.event.EventValue = event; + event_data.u.event.LogEntryQualifier = log_entry_qualifier; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_scsi - SCSI trigger handler + * @ioc: per adapter object + * @sense_key: ? + * @asc: ? + * @ascq: ? + * + */ +void +mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, + u8 ascq) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_SCSI_TRIGGER_T *scsi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", + __func__, sense_key, asc, ascq)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries + && !found_match; i++, scsi_trigger++) { + if (scsi_trigger->SenseKey != sense_key) + continue; + if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc)) + continue; + if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; + event_data.u.scsi.SenseKey = sense_key; + event_data.u.scsi.ASC = asc; + event_data.u.scsi.ASCQ = ascq; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_mpi - MPI trigger handler + * @ioc: per adapter object + * @ioc_status: ? + * @loginfo: ? + * + */ +void +mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_MPI_TRIGGER_T *mpi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n", + __func__, ioc_status, loginfo)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries + && !found_match; i++, mpi_trigger++) { + if (mpi_trigger->IOCStatus != ioc_status) + continue; + if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF || + mpi_trigger->IocLogInfo == loginfo)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MPI; + event_data.u.mpi.IOCStatus = ioc_status; + event_data.u.mpi.IocLogInfo = loginfo; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h new file mode 100644 index 000000000..405eada26 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h @@ -0,0 +1,194 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * to set Diagnostic triggers for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + /* Diagnostic Trigger Configuration Data Structures */ + +#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED +#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED + +/* limitation on number of entries */ +#define NUM_VALID_ENTRIES (20) + +/* trigger types */ +#define MPT3SAS_TRIGGER_MASTER (1) +#define MPT3SAS_TRIGGER_EVENT (2) +#define MPT3SAS_TRIGGER_SCSI (3) +#define MPT3SAS_TRIGGER_MPI (4) + +/* trigger names */ +#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master" +#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event" +#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi" +#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi" + +/* master trigger bitmask */ +#define MASTER_TRIGGER_FW_FAULT (0x00000001) +#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002) +#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004) +#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008) + +/* fake firmware event for trigger */ +#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E) + +/** + * MasterTrigger is a single U32 passed to/from sysfs. + * + * Bit Flags (enables) include: + * 1. FW Faults + * 2. Adapter Reset issued by driver + * 3. TMs + * 4. Device Remove Event sent by FW + */ + +struct SL_WH_MASTER_TRIGGER_T { + uint32_t MasterData; +}; + +/** + * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element + * @EventValue: Event Code to trigger on + * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only) + * + * Defines an event that should induce a DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_EVENT_TRIGGER_T { + uint16_t EventValue; + uint16_t LogEntryQualifier; +}; + +/** + * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of Event Triggers to be monitored for. + * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this + * structure. + * @EventTriggerEntry: List of Event trigger elements. + * + * This binary structure is transferred via sysfs to get/set Event Triggers + * in the Linux Driver. + */ + +struct SL_WH_EVENT_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element + * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for + * wildcard. + * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard + * @SenseKey: SCSI Sense Key + * + * Defines a sense key (single or many variants) that should induce a + * DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_SCSI_TRIGGER_T { + U8 ASCQ; + U8 ASC; + U8 SenseKey; + U8 Reserved; +}; + +/** + * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of SCSI sense codes that should trigger a DIAG_SERVICE event when + * observed. + * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this + * structure. + * @SCSITriggerEntry: List of SCSI Sense Code trigger elements. + * + * This binary structure is transferred via sysfs to get/set SCSI Sense Code + * Triggers in the Linux Driver. + */ +struct SL_WH_SCSI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element + * @IOCStatus: MPI IOCStatus + * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard + * + * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER + * driver event if observed. + */ +struct SL_WH_MPI_TRIGGER_T { + uint16_t IOCStatus; + uint16_t Reserved; + uint32_t IocLogInfo; +}; + +/** + * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE + * event when observed. + * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this + * structure. + * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements. + * + * This binary structure is transferred via sysfs to get/set MPI Error Triggers + * in the Linux Driver. + */ +struct SL_WH_MPI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger + * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX) + * @u: trigger condition that caused trigger to be sent + */ +struct SL_WH_TRIGGERS_EVENT_DATA_T { + uint32_t trigger_type; + union { + struct SL_WH_MASTER_TRIGGER_T master; + struct SL_WH_EVENT_TRIGGER_T event; + struct SL_WH_SCSI_TRIGGER_T scsi; + struct SL_WH_MPI_TRIGGER_T mpi; + } u; +}; +#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h new file mode 100644 index 000000000..5f3328f01 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This is the Fusion MPT base driver providing common API layer interface + * to store diag trigger values into persistent driver triggers pages + * for MPT (Message Passing Technology) based controllers. + * + * Copyright (C) 2020 Broadcom Inc. + * + * Authors: Broadcom Inc. + * Sreekanth Reddy + * + * Send feedback to : MPT-FusionLinux.pdl@broadcom.com) + */ + +#include "mpi/mpi2_cnfg.h" + +#ifndef MPI2_TRIGGER_PAGES_H +#define MPI2_TRIGGER_PAGES_H + +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER (0xE0) +#define MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION (0x01) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 TriggerFlags; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + U32 Reserved0xC[61]; /* 0x0C */ +} _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0, Mpi26DriverTriggerPage0_t; + +/* Trigger Flags */ +#define MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID (0x0001) +#define MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID (0x0002) +#define MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID (0x0004) +#define MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID (0x0008) + +#define MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_MASTER_TIGGER_ENTRY { + U32 MasterTriggerFlags; +} MPI26_DRIVER_MASTER_TIGGER_ENTRY; + +#define MPI26_MAX_MASTER_TRIGGERS (1) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumMasterTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_MASTER_TIGGER_ENTRY MasterTriggers[MPI26_MAX_MASTER_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_1, Mpi26DriverTriggerPage1_t; + +#define MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY { + U16 MPIEventCode; /* 0x00 */ + U16 MPIEventCodeSpecific; /* 0x02 */ +} MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY; + +#define MPI26_MAX_MPI_EVENT_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumMPIEventTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY MPIEventTriggers[MPI26_MAX_MPI_EVENT_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_2, Mpi26DriverTriggerPage2_t; + +#define MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY { + U8 ASCQ; /* 0x00 */ + U8 ASC; /* 0x01 */ + U8 SenseKey; /* 0x02 */ + U8 Reserved; /* 0x03 */ +} MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY; + +#define MPI26_MAX_SCSI_SENSE_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumSCSISenseTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY SCSISenseTriggers[MPI26_MAX_SCSI_SENSE_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_3, Mpi26DriverTriggerPage3_t; + +#define MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY { + U16 IOCStatus; /* 0x00 */ + U16 Reserved; /* 0x02 */ + U32 LogInfo; /* 0x04 */ +} MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY; + +#define MPI26_MAX_LOGINFO_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumIOCStatusLogInfoTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY IOCStatusLoginfoTriggers[MPI26_MAX_LOGINFO_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_4, Mpi26DriverTriggerPage4_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c new file mode 100644 index 000000000..cc07ba41f --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c @@ -0,0 +1,299 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2015 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program. + */ +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/** + * _warpdrive_disable_ddio - Disable direct I/O for all the volumes + * @ioc: per adapter object + */ +static void +_warpdrive_disable_ddio(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + struct _raid_device *raid_device; + u16 handle; + u16 ioc_status; + unsigned long flags; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + handle = le16_to_cpu(vol_pg1.DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->direct_io_enabled = 0; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + return; +} + + +/** + * mpt3sas_get_num_volumes - Get number of volumes in the ioc + * @ioc: per adapter object + */ +u8 +mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + u16 handle; + u8 vol_cnt = 0; + u16 ioc_status; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + vol_cnt++; + handle = le16_to_cpu(vol_pg1.DevHandle); + } + return vol_cnt; +} + + +/** + * mpt3sas_init_warpdrive_properties - Set properties for warpdrive direct I/O. + * @ioc: per adapter object + * @raid_device: the raid_device object + */ +void +mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds, count; + unsigned long stripe_sz, block_sz; + u8 stripe_exp, block_exp; + u64 dev_max_lba; + + if (!ioc->is_warpdrive) + return; + + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n"); + return; + } + if (mpt3sas_get_num_volumes(ioc) > 1) { + _warpdrive_disable_ddio(ioc); + ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n"); + return; + } + if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n"); + return; + } + + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n"); + return; + } + + if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n"); + kfree(vol_pg0); + return; + } + + /* + * WARPDRIVE:If number of physical disks in a volume exceeds the max pds + * assumed for WARPDRIVE, disable direct I/O + */ + if (num_pds > MPT_MAX_WARPDRIVE_PDS) { + ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n", + raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS); + kfree(vol_pg0); + return; + } + for (count = 0; count < num_pds; count++) { + if (mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[count].PhysDiskNum) || + le16_to_cpu(pd_pg0.DevHandle) == + MPT3SAS_INVALID_DEVICE_HANDLE) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n", + raid_device->handle, + vol_pg0->PhysDisk[count].PhysDiskNum); + goto out_error; + } + /* Disable direct I/O if member drive lba exceeds 4 bytes */ + dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA); + if (dev_max_lba >> 32) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n", + raid_device->handle, + le16_to_cpu(pd_pg0.DevHandle), + (u64)dev_max_lba); + goto out_error; + } + + raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle); + } + + /* + * Assumption for WD: Direct I/O is not supported if the volume is + * not RAID0 + */ + if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n", + raid_device->handle, raid_device->volume_type, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024, + le16_to_cpu(vol_pg0->BlockSize)); + goto out_error; + } + + stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + stripe_exp = find_first_bit(&stripe_sz, 32); + if (stripe_exp == 32) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n", + raid_device->handle, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024); + goto out_error; + } + raid_device->stripe_exponent = stripe_exp; + block_sz = le16_to_cpu(vol_pg0->BlockSize); + block_exp = find_first_bit(&block_sz, 16); + if (block_exp == 16) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n", + raid_device->handle, le16_to_cpu(vol_pg0->BlockSize)); + goto out_error; + } + raid_device->block_exponent = block_exp; + raid_device->direct_io_enabled = 1; + + ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n", + raid_device->handle); + /* + * WARPDRIVE: Though the following fields are not used for direct IO, + * stored for future purpose: + */ + raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA); + raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize); + + + kfree(vol_pg0); + return; + +out_error: + raid_device->direct_io_enabled = 0; + for (count = 0; count < num_pds; count++) + raid_device->pd_handle[count] = 0; + kfree(vol_pg0); + return; +} + +/** + * mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @raid_device: pointer to raid device data structure + * @mpi_request: pointer to the SCSI_IO reqest message frame + */ +void +mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request) +{ + sector_t v_lba, p_lba, stripe_off, column, io_size; + u32 stripe_sz, stripe_exp; + u8 num_pds, cmd = scmd->cmnd[0]; + struct scsiio_tracker *st = scsi_cmd_priv(scmd); + + if (cmd != READ_10 && cmd != WRITE_10 && + cmd != READ_16 && cmd != WRITE_16) + return; + + if (cmd == READ_10 || cmd == WRITE_10) + v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]); + else + v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]); + + io_size = scsi_bufflen(scmd) >> raid_device->block_exponent; + + if (v_lba + io_size - 1 > raid_device->max_lba) + return; + + stripe_sz = raid_device->stripe_sz; + stripe_exp = raid_device->stripe_exponent; + stripe_off = v_lba & (stripe_sz - 1); + + /* Return unless IO falls within a stripe */ + if (stripe_off + io_size > stripe_sz) + return; + + num_pds = raid_device->num_pds; + p_lba = v_lba >> stripe_exp; + column = sector_div(p_lba, num_pds); + p_lba = (p_lba << stripe_exp) + stripe_off; + mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]); + + if (cmd == READ_10 || cmd == WRITE_10) + put_unaligned_be32(lower_32_bits(p_lba), + &mpi_request->CDB.CDB32[2]); + else + put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]); + + st->direct_io = 1; +} diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c new file mode 100644 index 000000000..98b99c0f5 --- /dev/null +++ b/drivers/scsi/mvme147.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "wd33c93.h" +#include "mvme147.h" + +static irqreturn_t mvme147_intr(int irq, void *data) +{ + struct Scsi_Host *instance = data; + + if (irq == MVME147_IRQ_SCSI_PORT) + wd33c93_intr(instance); + else + m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ + return IRQ_HANDLED; +} + +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + struct Scsi_Host *instance = cmd->device->host; + struct WD33C93_hostdata *hdata = shost_priv(instance); + unsigned char flags = 0x01; + unsigned long addr = virt_to_bus(scsi_pointer->ptr); + + /* setup dma direction */ + if (!dir_in) + flags |= 0x04; + + /* remember direction */ + hdata->dma_dir = dir_in; + + if (dir_in) { + /* invalidate any cache */ + cache_clear(addr, scsi_pointer->this_residual); + } else { + /* push any dirty cache */ + cache_push(addr, scsi_pointer->this_residual); + } + + /* start DMA */ + m147_pcc->dma_bcr = scsi_pointer->this_residual | (1 << 24); + m147_pcc->dma_dadr = addr; + m147_pcc->dma_cntrl = flags; + + /* return success */ + return 0; +} + +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, + int status) +{ + m147_pcc->dma_cntrl = 0; +} + +static const struct scsi_host_template mvme147_host_template = { + .module = THIS_MODULE, + .proc_name = "MVME147", + .name = "MVME147 built-in SCSI", + .queuecommand = wd33c93_queuecommand, + .eh_abort_handler = wd33c93_abort, + .eh_host_reset_handler = wd33c93_host_reset, + .show_info = wd33c93_show_info, + .write_info = wd33c93_write_info, + .can_queue = CAN_QUEUE, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = CMD_PER_LUN, + .cmd_size = sizeof(struct scsi_pointer), +}; + +static struct Scsi_Host *mvme147_shost; + +static int __init mvme147_init(void) +{ + wd33c93_regs regs; + struct WD33C93_hostdata *hdata; + int error = -ENOMEM; + + if (!MACH_IS_MVME147) + return 0; + + mvme147_shost = scsi_host_alloc(&mvme147_host_template, + sizeof(struct WD33C93_hostdata)); + if (!mvme147_shost) + goto err_out; + mvme147_shost->base = 0xfffe4000; + mvme147_shost->irq = MVME147_IRQ_SCSI_PORT; + + regs.SASR = (volatile unsigned char *)0xfffe4000; + regs.SCMD = (volatile unsigned char *)0xfffe4001; + + hdata = shost_priv(mvme147_shost); + hdata->no_sync = 0xff; + hdata->fast = 0; + hdata->dma_mode = CTRL_DMA; + + wd33c93_init(mvme147_shost, regs, dma_setup, dma_stop, WD33C93_FS_8_10); + + error = request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, + "MVME147 SCSI PORT", mvme147_shost); + if (error) + goto err_unregister; + error = request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, + "MVME147 SCSI DMA", mvme147_shost); + if (error) + goto err_free_irq; +#if 0 /* Disabled; causes problems booting */ + m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ + udelay(100); + m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */ + udelay(2000); + m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */ +#endif + m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */ + + m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */ + m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ + + error = scsi_add_host(mvme147_shost, NULL); + if (error) + goto err_free_irq; + scsi_scan_host(mvme147_shost); + return 0; + +err_free_irq: + free_irq(MVME147_IRQ_SCSI_PORT, mvme147_shost); +err_unregister: + scsi_host_put(mvme147_shost); +err_out: + return error; +} + +static void __exit mvme147_exit(void) +{ + scsi_remove_host(mvme147_shost); + + /* XXX Make sure DMA is stopped! */ + free_irq(MVME147_IRQ_SCSI_PORT, mvme147_shost); + free_irq(MVME147_IRQ_SCSI_DMA, mvme147_shost); + + scsi_host_put(mvme147_shost); +} + +module_init(mvme147_init); +module_exit(mvme147_exit); diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h new file mode 100644 index 000000000..f75ff58dd --- /dev/null +++ b/drivers/scsi/mvme147.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MVME147_H + +/* $Id: mvme147.h,v 1.4 1997/01/19 23:07:10 davem Exp $ + * + * Header file for the MVME147 built-in SCSI controller for Linux + * + * Written and (C) 1993, Hamish Macdonald, see mvme147.c for more info + * + */ + +#include + +int mvme147_detect(struct scsi_host_template *); +int mvme147_release(struct Scsi_Host *); + +#ifndef CMD_PER_LUN +#define CMD_PER_LUN 2 +#endif + +#ifndef CAN_QUEUE +#define CAN_QUEUE 16 +#endif + +#endif /* MVME147_H */ diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c new file mode 100644 index 000000000..21d638299 --- /dev/null +++ b/drivers/scsi/mvme16x_scsi.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux. + * + * Based on work by Alan Hourihane + * + * Rewritten to use 53c700.c by Kars de Jong + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "53c700.h" + +MODULE_AUTHOR("Kars de Jong "); +MODULE_DESCRIPTION("MVME16x NCR53C710 driver"); +MODULE_LICENSE("GPL"); + +static struct scsi_host_template mvme16x_scsi_driver_template = { + .name = "MVME16x NCR53c710 SCSI", + .proc_name = "MVME16x", + .this_id = 7, + .module = THIS_MODULE, +}; + +static struct platform_device *mvme16x_scsi_device; + +static int mvme16x_probe(struct platform_device *dev) +{ + struct Scsi_Host * host = NULL; + struct NCR_700_Host_Parameters *hostdata; + + if (!MACH_IS_MVME16x) + goto out; + + if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) { + printk(KERN_INFO "mvme16x-scsi: detection disabled, " + "SCSI chip not present\n"); + goto out; + } + + hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); + if (hostdata == NULL) { + printk(KERN_ERR "mvme16x-scsi: " + "Failed to allocate host data\n"); + goto out; + } + + /* Fill in the required pieces of hostdata */ + hostdata->base = (void __iomem *)0xfff47000UL; + hostdata->clock = 50; /* XXX - depends on the CPU clock! */ + hostdata->chip710 = 1; + hostdata->dmode_extra = DMODE_FC2; + hostdata->dcntl_extra = EA_710; + hostdata->ctest7_extra = CTEST7_TT1; + + /* and register the chip */ + host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata, + &dev->dev); + if (!host) { + printk(KERN_ERR "mvme16x-scsi: No host detected; " + "board configuration problem?\n"); + goto out_free; + } + host->this_id = 7; + host->base = 0xfff47000UL; + host->irq = MVME16x_IRQ_SCSI; + if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) { + printk(KERN_ERR "mvme16x-scsi: request_irq failed\n"); + goto out_put_host; + } + + /* Enable scsi chip ints */ + { + volatile unsigned long v; + + /* Enable scsi interrupts at level 4 in PCCchip2 */ + v = in_be32(0xfff4202c); + v = (v & ~0xff) | 0x10 | 4; + out_be32(0xfff4202c, v); + } + + platform_set_drvdata(dev, host); + scsi_scan_host(host); + + return 0; + + out_put_host: + scsi_host_put(host); + out_free: + kfree(hostdata); + out: + return -ENODEV; +} + +static int mvme16x_device_remove(struct platform_device *dev) +{ + struct Scsi_Host *host = platform_get_drvdata(dev); + struct NCR_700_Host_Parameters *hostdata = shost_priv(host); + + /* Disable scsi chip ints */ + { + volatile unsigned long v; + + v = in_be32(0xfff4202c); + v &= ~0x10; + out_be32(0xfff4202c, v); + } + scsi_remove_host(host); + NCR_700_release(host); + kfree(hostdata); + free_irq(host->irq, host); + + return 0; +} + +static struct platform_driver mvme16x_scsi_driver = { + .driver = { + .name = "mvme16x-scsi", + }, + .probe = mvme16x_probe, + .remove = mvme16x_device_remove, +}; + +static int __init mvme16x_scsi_init(void) +{ + int err; + + err = platform_driver_register(&mvme16x_scsi_driver); + if (err) + return err; + + mvme16x_scsi_device = platform_device_register_simple("mvme16x-scsi", + -1, NULL, 0); + if (IS_ERR(mvme16x_scsi_device)) { + platform_driver_unregister(&mvme16x_scsi_driver); + return PTR_ERR(mvme16x_scsi_device); + } + + return 0; +} + +static void __exit mvme16x_scsi_exit(void) +{ + platform_device_unregister(mvme16x_scsi_device); + platform_driver_unregister(&mvme16x_scsi_driver); +} + +module_init(mvme16x_scsi_init); +module_exit(mvme16x_scsi_exit); diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig new file mode 100644 index 000000000..5ac7fd593 --- /dev/null +++ b/drivers/scsi/mvsas/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver. +# +# Copyright 2007 Red Hat, Inc. +# Copyright 2008 Marvell. +# Copyright 2009-2011 Marvell. +# + +config SCSI_MVSAS + tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support" + depends on PCI && HAS_IOPORT + select SCSI_SAS_LIBSAS + select FW_LOADER + help + This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s + PCI-E 88SE94XX chip based host adapters. + +config SCSI_MVSAS_DEBUG + bool "Compile in debug mode" + default y + depends on SCSI_MVSAS + help + Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, + the driver prints some messages to the console. +config SCSI_MVSAS_TASKLET + bool "Support for interrupt tasklet" + default n + depends on SCSI_MVSAS + help + Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode, + the interrupt will schedule a tasklet. diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile new file mode 100644 index 000000000..75849258e --- /dev/null +++ b/drivers/scsi/mvsas/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver. +# +# Copyright 2007 Red Hat, Inc. +# Copyright 2008 Marvell. +# Copyright 2009-2011 Marvell. +# + +ccflags-$(CONFIG_SCSI_MVSAS_DEBUG) := -DMV_DEBUG + +obj-$(CONFIG_SCSI_MVSAS) += mvsas.o +mvsas-y += mv_init.o \ + mv_sas.o \ + mv_64xx.o \ + mv_94xx.o diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c new file mode 100644 index 000000000..1f2b61de8 --- /dev/null +++ b/drivers/scsi/mvsas/mv_64xx.c @@ -0,0 +1,814 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell 88SE64xx hardware specific + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + +#include "mv_sas.h" +#include "mv_64xx.h" +#include "mv_chips.h" + +static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i) +{ + void __iomem *regs = mvi->regs; + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + + reg = mr32(MVS_GBL_PORT_TYPE); + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + if (reg & MODE_SAS_SATA & (1 << i)) + phy->phy_type |= PORT_TYPE_SAS; + else + phy->phy_type |= PORT_TYPE_SATA; +} + +static void mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_PCS); + if (mvi->chip->n_phy <= MVS_SOC_PORTS) + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); + else + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); + mw32(MVS_PCS, tmp); +} + +static void mvs_64xx_phy_hacks(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + + mvs_phy_hacks(mvi); + + if (!(mvi->flags & MVF_FLAG_SOC)) { + for (i = 0; i < MVS_SOC_PORTS; i++) { + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8); + mvs_write_port_vsr_data(mvi, i, 0x2F0); + } + } else { + /* disable auto port detection */ + mw32(MVS_GBL_PORT_TYPE, 0); + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7); + mvs_write_port_vsr_data(mvi, i, 0x90000000); + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9); + mvs_write_port_vsr_data(mvi, i, 0x50f2); + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11); + mvs_write_port_vsr_data(mvi, i, 0x0e); + } + } +} + +static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 reg, tmp; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + if (phy_id < MVS_SOC_PORTS) + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, ®); + else + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, ®); + + } else + reg = mr32(MVS_PHY_CTL); + + tmp = reg; + if (phy_id < MVS_SOC_PORTS) + tmp |= (1U << phy_id) << PCTL_LINK_OFFS; + else + tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + if (phy_id < MVS_SOC_PORTS) { + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + mdelay(10); + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); + } else { + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + mdelay(10); + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg); + } + } else { + mw32(MVS_PHY_CTL, tmp); + mdelay(10); + mw32(MVS_PHY_CTL, reg); + } +} + +static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) +{ + u32 tmp; + tmp = mvs_read_port_irq_stat(mvi, phy_id); + tmp &= ~PHYEV_RDY_CH; + mvs_write_port_irq_stat(mvi, phy_id, tmp); + tmp = mvs_read_phy_ctl(mvi, phy_id); + if (hard == MVS_HARD_RESET) + tmp |= PHY_RST_HARD; + else if (hard == MVS_SOFT_RESET) + tmp |= PHY_RST; + mvs_write_phy_ctl(mvi, phy_id, tmp); + if (hard) { + do { + tmp = mvs_read_phy_ctl(mvi, phy_id); + } while (tmp & PHY_RST_HARD); + } +} + +static void +mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + if (clear_all) { + tmp = mr32(MVS_INT_STAT_SRS_0); + if (tmp) { + printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + } else { + tmp = mr32(MVS_INT_STAT_SRS_0); + if (tmp & (1 << (reg_set % 32))) { + printk(KERN_DEBUG "register set 0x%x was stopped.\n", + reg_set); + mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32)); + } + } +} + +static int mvs_64xx_chip_reset(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + int i; + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(MVS_GBL_CTL, 0); + tmp = mr32(MVS_GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + if (mvi->flags & MVF_PHY_PWR_FIX) { + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } + } + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(MVS_GBL_CTL, 0); + tmp = mr32(MVS_GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ + mw32_f(MVS_GBL_CTL, HBA_RST); + } + + /* wait for reset to finish; timeout is just a guess */ + i = 1000; + while (i-- > 0) { + msleep(10); + + if (!(mr32(MVS_GBL_CTL) & HBA_RST)) + break; + } + if (mr32(MVS_GBL_CTL) & HBA_RST) { + dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n"); + return -EBUSY; + } + return 0; +} + +static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + if (!(mvi->flags & MVF_FLAG_SOC)) { + u32 offs; + if (phy_id < 4) + offs = PCR_PHY_CTL; + else { + offs = PCR_PHY_CTL2; + phy_id -= 4; + } + pci_read_config_dword(mvi->pdev, offs, &tmp); + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); + pci_write_config_dword(mvi->pdev, offs, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); + mw32(MVS_PHY_CTL, tmp); + } +} + +static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + if (!(mvi->flags & MVF_FLAG_SOC)) { + u32 offs; + if (phy_id < 4) + offs = PCR_PHY_CTL; + else { + offs = PCR_PHY_CTL2; + phy_id -= 4; + } + pci_read_config_dword(mvi->pdev, offs, &tmp); + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); + pci_write_config_dword(mvi->pdev, offs, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); + mw32(MVS_PHY_CTL, tmp); + } +} + +static int mvs_64xx_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + + if (mvi->pdev && mvi->pdev->revision == 0) + mvi->flags |= MVF_PHY_PWR_FIX; + if (!(mvi->flags & MVF_FLAG_SOC)) { + mvs_show_pcie_usage(mvi); + tmp = mvs_64xx_chip_reset(mvi); + if (tmp) + return tmp; + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + mw32(MVS_PHY_CTL, tmp); + } + + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(MVS_CTL) & 0xFFFF; + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(MVS_CTL, cctl | CCTL_RST); + + if (!(mvi->flags & MVF_FLAG_SOC)) { + /* write to device control _AND_ device status register */ + pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); + tmp &= ~PRD_REQ_MASK; + tmp |= PRD_REQ_SIZE; + pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp &= ~PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= PCTL_PWR_OFF; + tmp &= ~PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_COM_ON; + tmp &= ~PCTL_PHY_DSBL; + tmp |= PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + tmp &= ~PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + } + + /* reset control */ + mw32(MVS_PCS, 0); /* MVS_PCS */ + /* init phys */ + mvs_64xx_phy_hacks(mvi); + + tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); + tmp &= 0x0000ffff; + tmp |= 0x00fa0000; + mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); + + /* enable auto port detection */ + mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); + + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); + + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(MVS_TX_LO, mvi->tx_dma); + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); + + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); + mw32(MVS_RX_LO, mvi->rx_dma); + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); + + for (i = 0; i < mvi->chip->n_phy; i++) { + /* set phy local SAS address */ + /* should set little endian SAS address to 64xx chip */ + mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI, + cpu_to_be64(mvi->phy[i].dev_sas_addr)); + + mvs_64xx_enable_xmt(mvi, i); + + mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET); + msleep(500); + mvs_64xx_detect_porttype(mvi, i); + } + if (mvi->flags & MVF_FLAG_SOC) { + /* set select registers */ + writel(0x0E008000, regs + 0x000); + writel(0x59000008, regs + 0x004); + writel(0x20, regs + 0x008); + writel(0x20, regs + 0x00c); + writel(0x20, regs + 0x010); + writel(0x20, regs + 0x014); + writel(0x20, regs + 0x018); + writel(0x20, regs + 0x01c); + } + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR | + PHYEV_DEC_ERR; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + } + + /* little endian for open address and command table, etc. */ + cctl = mr32(MVS_CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl |= CCTL_ENDIAN_DATA; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(MVS_CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(MVS_PCS); + tmp |= PCS_CMD_RST; + tmp &= ~PCS_SELF_CLEAR; + mw32(MVS_PCS, tmp); + /* + * the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff | COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN); + + tmp = 0x10000 | interrupt_coalescing; + mw32(MVS_INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(MVS_TX_CFG, 0); + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | + PCS_CMD_EN | PCS_CMD_STOP_ERR); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | + CINT_DMA_PCIE); + + mw32(MVS_INT_MASK, tmp); + + /* Enable SRS interrupt */ + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); + + return 0; +} + +static int mvs_64xx_ioremap(struct mvs_info *mvi) +{ + if (!mvs_ioremap(mvi, 4, 2)) + return 0; + return -1; +} + +static void mvs_64xx_iounmap(struct mvs_info *mvi) +{ + mvs_iounmap(mvi->regs); + mvs_iounmap(mvi->regs_ex); +} + +static void mvs_64xx_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + mw32(MVS_GBL_CTL, tmp | INT_EN); +} + +static void mvs_64xx_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + mw32(MVS_GBL_CTL, tmp & ~INT_EN); +} + +static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq) +{ + void __iomem *regs = mvi->regs; + u32 stat; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + stat = mr32(MVS_GBL_INT_STAT); + + if (stat == 0 || stat == 0xffffffff) + return 0; + } else + stat = 1; + return stat; +} + +static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat) +{ + void __iomem *regs = mvi->regs; + + /* clear CMD_CMPLT ASAP */ + mw32_f(MVS_INT_STAT, CINT_DONE); + + spin_lock(&mvi->lock); + mvs_int_full(mvi); + spin_unlock(&mvi->lock); + + return IRQ_HANDLED; +} + +static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx) +{ + u32 tmp; + mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32)); + mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); +} + +static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + if (type == PORT_TYPE_SATA) { + tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + mw32(MVS_INT_STAT, CINT_CI_STOP); + tmp = mr32(MVS_PCS) | 0xFF00; + mw32(MVS_PCS, tmp); +} + +static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp, offs; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (*tfs < 16) { + tmp = mr32(MVS_PCS); + mw32(MVS_PCS, tmp & ~offs); + } else { + tmp = mr32(MVS_CTL); + mw32(MVS_CTL, tmp & ~offs); + } + + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs); + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + + *tfs = MVS_ID_NOT_MAPPED; + return; +} + +static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + int i; + u32 tmp, offs; + void __iomem *regs = mvi->regs; + + if (*tfs != MVS_ID_NOT_MAPPED) + return 0; + + tmp = mr32(MVS_PCS); + + for (i = 0; i < mvi->chip->srs_sz; i++) { + if (i == 16) + tmp = mr32(MVS_CTL); + offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (!(tmp & offs)) { + *tfs = i; + + if (i < 16) + mw32(MVS_PCS, tmp | offs); + else + mw32(MVS_CTL, tmp | offs); + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i); + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + return 0; + } + } + return MVS_ID_NOT_MAPPED; +} + +static void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct mvs_prd *buf_prd = prd; + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } +} + +static int mvs_64xx_oob_done(struct mvs_info *mvi, int i) +{ + u32 phy_st; + mvs_write_port_cfg_addr(mvi, i, + PHYR_PHY_STAT); + phy_st = mvs_read_port_cfg_data(mvi, i); + if (phy_st & PHY_OOB_DTCTD) + return 1; + return 0; +} + +static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i, + struct sas_identify_frame *id) + +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + + phy->minimum_linkrate = + (phy->phy_status & + PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; + phy->maximum_linkrate = + (phy->phy_status & + PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + + mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); + phy->dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); + phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); + phy->att_dev_sas_addr = + (u64) mvs_read_port_cfg_data(mvi, i) << 32; + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); + phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr); +} + +static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); + tmp = mvs_read_port_vsr_data(mvi, i); + if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == + SAS_LINK_RATE_1_5_GBPS) + tmp &= ~PHY_MODE6_LATECLK; + else + tmp |= PHY_MODE6_LATECLK; + mvs_write_port_vsr_data(mvi, i, tmp); +} + +static void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates) +{ + u32 lrmin = 0, lrmax = 0; + u32 tmp; + + tmp = mvs_read_phy_ctl(mvi, phy_id); + lrmin = (rates->minimum_linkrate << 8); + lrmax = (rates->maximum_linkrate << 12); + + if (lrmin) { + tmp &= ~(0xf << 8); + tmp |= lrmin; + } + if (lrmax) { + tmp &= ~(0xf << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET); +} + +static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_PCS); + mw32(MVS_PCS, tmp & 0xFFFF); + mw32(MVS_PCS, tmp); + tmp = mr32(MVS_CTL); + mw32(MVS_CTL, tmp & 0xFFFF); + mw32(MVS_CTL, tmp); +} + + +static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + return ior32(SPI_DATA_REG_64XX); +} + +static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data) +{ + void __iomem *regs = mvi->regs_ex; + + iow32(SPI_DATA_REG_64XX, data); +} + + +static int mvs_64xx_spi_buildcmd(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ) +{ + u32 dwTmp; + + dwTmp = ((u32)cmd << 24) | ((u32)length << 19); + if (read) + dwTmp |= 1U<<23; + + if (addr != MV_MAX_U32) { + dwTmp |= 1U<<22; + dwTmp |= (addr & 0x0003FFFF); + } + + *dwCmd = dwTmp; + return 0; +} + + +static int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) +{ + void __iomem *regs = mvi->regs_ex; + int retry; + + for (retry = 0; retry < 1; retry++) { + iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE); + iow32(SPI_CMD_REG_64XX, cmd); + iow32(SPI_CTRL_REG_64XX, + SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART); + } + + return 0; +} + +static int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) +{ + void __iomem *regs = mvi->regs_ex; + u32 i, dwTmp; + + for (i = 0; i < timeout; i++) { + dwTmp = ior32(SPI_CTRL_REG_64XX); + if (!(dwTmp & SPI_CTRL_SPISTART)) + return 0; + msleep(10); + } + + return -1; +} + +static void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, + int buf_len, int from, void *prd) +{ + int i; + struct mvs_prd *buf_prd = prd; + dma_addr_t buf_dma = mvi->bulk_buffer_dma; + + buf_prd += from; + for (i = 0; i < MAX_SG_ENTRY - from; i++) { + buf_prd->addr = cpu_to_le64(buf_dma); + buf_prd->len = cpu_to_le32(buf_len); + ++buf_prd; + } +} + +static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time) +{ + void __iomem *regs = mvi->regs; + u32 tmp = 0; + /* + * the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + if (time == 0) { + mw32(MVS_INT_COAL, 0); + mw32(MVS_INT_COAL_TMOUT, 0x10000); + } else { + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff|COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN); + + tmp = 0x10000 | time; + mw32(MVS_INT_COAL_TMOUT, tmp); + } +} + +const struct mvs_dispatch mvs_64xx_dispatch = { + "mv64xx", + mvs_64xx_init, + NULL, + mvs_64xx_ioremap, + mvs_64xx_iounmap, + mvs_64xx_isr, + mvs_64xx_isr_status, + mvs_64xx_interrupt_enable, + mvs_64xx_interrupt_disable, + mvs_read_phy_ctl, + mvs_write_phy_ctl, + mvs_read_port_cfg_data, + mvs_write_port_cfg_data, + mvs_write_port_cfg_addr, + mvs_read_port_vsr_data, + mvs_write_port_vsr_data, + mvs_write_port_vsr_addr, + mvs_read_port_irq_stat, + mvs_write_port_irq_stat, + mvs_read_port_irq_mask, + mvs_write_port_irq_mask, + mvs_64xx_command_active, + mvs_64xx_clear_srs_irq, + mvs_64xx_issue_stop, + mvs_start_delivery, + mvs_rx_update, + mvs_int_full, + mvs_64xx_assign_reg_set, + mvs_64xx_free_reg_set, + mvs_get_prd_size, + mvs_get_prd_count, + mvs_64xx_make_prd, + mvs_64xx_detect_porttype, + mvs_64xx_oob_done, + mvs_64xx_fix_phy_info, + mvs_64xx_phy_work_around, + mvs_64xx_phy_set_link_rate, + mvs_hw_max_link_rate, + mvs_64xx_phy_disable, + mvs_64xx_phy_enable, + mvs_64xx_phy_reset, + mvs_64xx_stp_reset, + mvs_64xx_clear_active_cmds, + mvs_64xx_spi_read_data, + mvs_64xx_spi_write_data, + mvs_64xx_spi_buildcmd, + mvs_64xx_spi_issuecmd, + mvs_64xx_spi_waitdataready, + mvs_64xx_fix_dma, + mvs_64xx_tune_interrupt, + NULL, +}; + diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h new file mode 100644 index 000000000..c25a5dfe7 --- /dev/null +++ b/drivers/scsi/mvsas/mv_64xx.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell 88SE64xx hardware specific head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + +#ifndef _MVS64XX_REG_H_ +#define _MVS64XX_REG_H_ + +#include + +#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS + +/* enhanced mode registers (BAR4) */ +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x08, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ + + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS_0 = 0x15C, + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ + /* ports 5-7 follow after this */ + MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */ + MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ + /* ports 5-7 follow after this */ + MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ + + MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ + MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ + /* ports 5-7 follow after this */ + MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */ + MVS_P4_CFG_DATA = 0x234, /* Port4 config data */ + + /* ports 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ + MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ + /* ports 5-7 follow after this */ + MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */ + MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */ +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0xE8, + PCR_LINK_STAT = 0xF2, +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00, /* Phy Status */ + VSR_PHY_MODE1 = 0x01, /* phy tx */ + VSR_PHY_MODE2 = 0x02, /* tx scc */ + VSR_PHY_MODE3 = 0x03, /* pll */ + VSR_PHY_MODE4 = 0x04, /* VCO */ + VSR_PHY_MODE5 = 0x05, /* Rx */ + VSR_PHY_MODE6 = 0x06, /* CDR */ + VSR_PHY_MODE7 = 0x07, /* Impedance */ + VSR_PHY_MODE8 = 0x08, /* Voltage */ + VSR_PHY_MODE9 = 0x09, /* Test */ + VSR_PHY_MODE10 = 0x0A, /* Power */ + VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ + VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ + VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ +}; + +enum chip_register_bits { + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), +}; + +#define MAX_SG_ENTRY 64 + +struct mvs_prd { + __le64 addr; /* 64-bit buffer address */ + __le32 reserved; + __le32 len; /* 16-bit length */ +}; + +#define SPI_CTRL_REG 0xc0 +#define SPI_CTRL_VENDOR_ENABLE (1U<<29) +#define SPI_CTRL_SPIRDY (1U<<22) +#define SPI_CTRL_SPISTART (1U<<20) + +#define SPI_CMD_REG 0xc4 +#define SPI_DATA_REG 0xc8 + +#define SPI_CTRL_REG_64XX 0x10 +#define SPI_CMD_REG_64XX 0x14 +#define SPI_DATA_REG_64XX 0x18 + +#endif diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c new file mode 100644 index 000000000..fc0b8eb68 --- /dev/null +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -0,0 +1,1182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell 88SE94xx hardware specific + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + +#include "mv_sas.h" +#include "mv_94xx.h" +#include "mv_chips.h" + +static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) +{ + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + u32 phy_status; + + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3); + reg = mvs_read_port_vsr_data(mvi, i); + phy_status = ((reg & 0x3f0000) >> 16) & 0xff; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + switch (phy_status) { + case 0x10: + phy->phy_type |= PORT_TYPE_SAS; + break; + case 0x1d: + default: + phy->phy_type |= PORT_TYPE_SATA; + break; + } +} + +static void set_phy_tuning(struct mvs_info *mvi, int phy_id, + struct phy_tuning phy_tuning) +{ + u32 tmp, setting_0 = 0, setting_1 = 0; + u8 i; + + /* Remap information for B0 chip: + * + * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient) + * R0Dh -> R118h[31:16] (Generation 1 Setting 0) + * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1) + * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0) + * R10h -> R120h[15:0] (Generation 2 Setting 1) + * R11h -> R120h[31:16] (Generation 3 Setting 0) + * R12h -> R124h[15:0] (Generation 3 Setting 1) + * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved)) + */ + + /* A0 has a different set of registers */ + if (mvi->pdev->revision == VANIR_A0_REV) + return; + + for (i = 0; i < 3; i++) { + /* loop 3 times, set Gen 1, Gen 2, Gen 3 */ + switch (i) { + case 0: + setting_0 = GENERATION_1_SETTING; + setting_1 = GENERATION_1_2_SETTING; + break; + case 1: + setting_0 = GENERATION_1_2_SETTING; + setting_1 = GENERATION_2_3_SETTING; + break; + case 2: + setting_0 = GENERATION_2_3_SETTING; + setting_1 = GENERATION_3_4_SETTING; + break; + } + + /* Set: + * + * Transmitter Emphasis Enable + * Transmitter Emphasis Amplitude + * Transmitter Amplitude + */ + mvs_write_port_vsr_addr(mvi, phy_id, setting_0); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~(0xFBE << 16); + tmp |= (((phy_tuning.trans_emp_en << 11) | + (phy_tuning.trans_emp_amp << 7) | + (phy_tuning.trans_amp << 1)) << 16); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* Set Transmitter Amplitude Adjust */ + mvs_write_port_vsr_addr(mvi, phy_id, setting_1); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~(0xC000); + tmp |= (phy_tuning.trans_amp_adj << 14); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + } +} + +static void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id, + struct ffe_control ffe) +{ + u32 tmp; + + /* Don't run this if A0/B0 */ + if ((mvi->pdev->revision == VANIR_A0_REV) + || (mvi->pdev->revision == VANIR_B0_REV)) + return; + + /* FFE Resistor and Capacitor */ + /* R10Ch DFE Resolution Control/Squelch and FFE Setting + * + * FFE_FORCE [7] + * FFE_RES_SEL [6:4] + * FFE_CAP_SEL [3:0] + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0xFF; + + /* Read from HBA_Info_Page */ + tmp |= ((0x1 << 7) | + (ffe.ffe_rss_sel << 4) | + (ffe.ffe_cap_sel << 0)); + + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* R064h PHY Mode Register 1 + * + * DFE_DIS 18 + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0x40001; + /* Hard coding */ + /* No defines in HBA_Info_Page */ + tmp |= (0 << 18); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* R110h DFE F0-F1 Coefficient Control/DFE Update Control + * + * DFE_UPDATE_EN [11:6] + * DFE_FX_FORCE [5:0] + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0xFFF; + /* Hard coding */ + /* No defines in HBA_Info_Page */ + tmp |= ((0x3F << 6) | (0x0 << 0)); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h + * + * FFE_TRAIN_EN 3 + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0x8; + /* Hard coding */ + /* No defines in HBA_Info_Page */ + tmp |= (0 << 3); + mvs_write_port_vsr_data(mvi, phy_id, tmp); +} + +/*Notice: this function must be called when phy is disabled*/ +static void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate) +{ + union reg_phy_cfg phy_cfg, phy_cfg_tmp; + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id); + phy_cfg.v = 0; + phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy; + phy_cfg.u.sas_support = 1; + phy_cfg.u.sata_support = 1; + phy_cfg.u.sata_host_mode = 1; + + switch (rate) { + case 0x0: + /* support 1.5 Gbps */ + phy_cfg.u.speed_support = 1; + phy_cfg.u.snw_3_support = 0; + phy_cfg.u.tx_lnk_parity = 1; + phy_cfg.u.tx_spt_phs_lnk_rate = 0x30; + break; + case 0x1: + + /* support 1.5, 3.0 Gbps */ + phy_cfg.u.speed_support = 3; + phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c; + phy_cfg.u.tx_lgcl_lnk_rate = 0x08; + break; + case 0x2: + default: + /* support 1.5, 3.0, 6.0 Gbps */ + phy_cfg.u.speed_support = 7; + phy_cfg.u.snw_3_support = 1; + phy_cfg.u.tx_lnk_parity = 1; + phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f; + phy_cfg.u.tx_lgcl_lnk_rate = 0x09; + break; + } + mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v); +} + +static void mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id) +{ + u32 temp; + temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]); + if (temp == 0xFFFFFFFFL) { + mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6; + mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A; + mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3; + } + + temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]); + if (temp == 0xFFL) { + switch (mvi->pdev->revision) { + case VANIR_A0_REV: + case VANIR_B0_REV: + mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; + mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7; + break; + case VANIR_C0_REV: + case VANIR_C1_REV: + case VANIR_C2_REV: + default: + mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; + mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC; + break; + } + } + + temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]); + if (temp == 0xFFL) + /*set default phy_rate = 6Gbps*/ + mvi->hba_info_param.phy_rate[phy_id] = 0x2; + + set_phy_tuning(mvi, phy_id, + mvi->hba_info_param.phy_tuning[phy_id]); + set_phy_ffe_tuning(mvi, phy_id, + mvi->hba_info_param.ffe_ctl[phy_id]); + set_phy_rate(mvi, phy_id, + mvi->hba_info_param.phy_rate[phy_id]); +} + +static void mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_PCS); + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); + mw32(MVS_PCS, tmp); +} + +static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) +{ + u32 tmp; + u32 delay = 5000; + if (hard == MVS_PHY_TUNE) { + mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL); + tmp = mvs_read_port_cfg_data(mvi, phy_id); + mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000); + mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000); + return; + } + tmp = mvs_read_port_irq_stat(mvi, phy_id); + tmp &= ~PHYEV_RDY_CH; + mvs_write_port_irq_stat(mvi, phy_id, tmp); + if (hard) { + tmp = mvs_read_phy_ctl(mvi, phy_id); + tmp |= PHY_RST_HARD; + mvs_write_phy_ctl(mvi, phy_id, tmp); + do { + tmp = mvs_read_phy_ctl(mvi, phy_id); + udelay(10); + delay--; + } while ((tmp & PHY_RST_HARD) && delay); + if (!delay) + mv_dprintk("phy hard reset failed.\n"); + } else { + tmp = mvs_read_phy_ctl(mvi, phy_id); + tmp |= PHY_RST; + mvs_write_phy_ctl(mvi, phy_id, tmp); + } +} + +static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) +{ + u32 tmp; + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000); +} + +static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) +{ + u32 tmp; + u8 revision = 0; + + revision = mvi->pdev->revision; + if (revision == VANIR_A0_REV) { + mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA); + mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); + } + if (revision == VANIR_B0_REV) { + mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL); + mvs_write_port_vsr_data(mvi, phy_id, 0x08001006); + mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA); + mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f); + } + + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp |= bit(0); + mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff); +} + +static void mvs_94xx_sgpio_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 tmp; + + tmp = mr32(MVS_HST_CHIP_CONFIG); + tmp |= 0x100; + mw32(MVS_HST_CHIP_CONFIG, tmp); + + mw32(MVS_SGPIO_CTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, + MVS_SGPIO_CTRL_SDOUT_AUTO << MVS_SGPIO_CTRL_SDOUT_SHIFT); + + mw32(MVS_SGPIO_CFG1 + MVS_SGPIO_HOST_OFFSET * mvi->id, + 8 << MVS_SGPIO_CFG1_LOWA_SHIFT | + 8 << MVS_SGPIO_CFG1_HIA_SHIFT | + 4 << MVS_SGPIO_CFG1_LOWB_SHIFT | + 4 << MVS_SGPIO_CFG1_HIB_SHIFT | + 2 << MVS_SGPIO_CFG1_MAXACTON_SHIFT | + 1 << MVS_SGPIO_CFG1_FORCEACTOFF_SHIFT + ); + + mw32(MVS_SGPIO_CFG2 + MVS_SGPIO_HOST_OFFSET * mvi->id, + (300000 / 100) << MVS_SGPIO_CFG2_CLK_SHIFT | /* 100kHz clock */ + 66 << MVS_SGPIO_CFG2_BLINK_SHIFT /* (66 * 0,121 Hz?)*/ + ); + + mw32(MVS_SGPIO_CFG0 + MVS_SGPIO_HOST_OFFSET * mvi->id, + MVS_SGPIO_CFG0_ENABLE | + MVS_SGPIO_CFG0_BLINKA | + MVS_SGPIO_CFG0_BLINKB | + /* 3*4 data bits / PDU */ + (12 - 1) << MVS_SGPIO_CFG0_AUT_BITLEN_SHIFT + ); + + mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, + DEFAULT_SGPIO_BITS); + + mw32(MVS_SGPIO_DSRC + MVS_SGPIO_HOST_OFFSET * mvi->id, + ((mvi->id * 4) + 3) << (8 * 3) | + ((mvi->id * 4) + 2) << (8 * 2) | + ((mvi->id * 4) + 1) << (8 * 1) | + ((mvi->id * 4) + 0) << (8 * 0)); + +} + +static int mvs_94xx_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + u8 revision; + + revision = mvi->pdev->revision; + mvs_show_pcie_usage(mvi); + if (mvi->flags & MVF_FLAG_SOC) { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + mw32(MVS_PHY_CTL, tmp); + } + + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(MVS_CTL) & 0xFFFF; + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(MVS_CTL, cctl | CCTL_RST); + + if (mvi->flags & MVF_FLAG_SOC) { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_COM_ON; + tmp &= ~PCTL_PHY_DSBL; + tmp |= PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + tmp &= ~PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + } + + /* disable Multiplexing, enable phy implemented */ + mw32(MVS_PORTS_IMP, 0xFF); + + if (revision == VANIR_A0_REV) { + mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET); + mw32(MVS_PA_VSR_PORT, 0x00018080); + } + mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2); + if (revision == VANIR_A0_REV || revision == VANIR_B0_REV) + /* set 6G/3G/1.5G, multiplexing, without SSC */ + mw32(MVS_PA_VSR_PORT, 0x0084d4fe); + else + /* set 6G/3G/1.5G, multiplexing, with and without SSC */ + mw32(MVS_PA_VSR_PORT, 0x0084fffe); + + if (revision == VANIR_B0_REV) { + mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL); + mw32(MVS_PA_VSR_PORT, 0x08001006); + mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA); + mw32(MVS_PA_VSR_PORT, 0x0000705f); + } + + /* reset control */ + mw32(MVS_PCS, 0); /* MVS_PCS */ + mw32(MVS_STP_REG_SET_0, 0); + mw32(MVS_STP_REG_SET_1, 0); + + /* init phys */ + mvs_phy_hacks(mvi); + + /* disable non data frame retry */ + tmp = mvs_cr32(mvi, CMD_SAS_CTL1); + if ((revision == VANIR_A0_REV) || + (revision == VANIR_B0_REV) || + (revision == VANIR_C0_REV)) { + tmp &= ~0xffff; + tmp |= 0x007f; + mvs_cw32(mvi, CMD_SAS_CTL1, tmp); + } + + /* set LED blink when IO*/ + mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED); + tmp = mr32(MVS_PA_VSR_PORT); + tmp &= 0xFFFF00FF; + tmp |= 0x00003300; + mw32(MVS_PA_VSR_PORT, tmp); + + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); + + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(MVS_TX_LO, mvi->tx_dma); + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); + + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); + mw32(MVS_RX_LO, mvi->rx_dma); + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); + + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_94xx_phy_disable(mvi, i); + /* set phy local SAS address */ + mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, + cpu_to_le64(mvi->phy[i].dev_sas_addr)); + + mvs_94xx_enable_xmt(mvi, i); + mvs_94xx_config_reg_from_hba(mvi, i); + mvs_94xx_phy_enable(mvi, i); + + mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD); + msleep(500); + mvs_94xx_detect_porttype(mvi, i); + } + + if (mvi->flags & MVF_FLAG_SOC) { + /* set select registers */ + writel(0x0E008000, regs + 0x000); + writel(0x59000008, regs + 0x004); + writel(0x20, regs + 0x008); + writel(0x20, regs + 0x00c); + writel(0x20, regs + 0x010); + writel(0x20, regs + 0x014); + writel(0x20, regs + 0x018); + writel(0x20, regs + 0x01c); + } + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + } + + /* little endian for open address and command table, etc. */ + cctl = mr32(MVS_CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(MVS_CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(MVS_PCS); + tmp |= PCS_CMD_RST; + tmp &= ~PCS_SELF_CLEAR; + mw32(MVS_PCS, tmp); + /* + * the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff | COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN); + + /* default interrupt coalescing time is 128us */ + tmp = 0x10000 | interrupt_coalescing; + mw32(MVS_INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(MVS_TX_CFG, 0); + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN | + PCS_CMD_EN | PCS_CMD_STOP_ERR); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | + CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR); + tmp |= CINT_PHY_MASK; + mw32(MVS_INT_MASK, tmp); + + tmp = mvs_cr32(mvi, CMD_LINK_TIMER); + tmp |= 0xFFFF0000; + mvs_cw32(mvi, CMD_LINK_TIMER, tmp); + + /* tune STP performance */ + tmp = 0x003F003F; + mvs_cw32(mvi, CMD_PL_TIMER, tmp); + + /* This can improve expander large block size seq write performance */ + tmp = mvs_cr32(mvi, CMD_PORT_LAYER_TIMER1); + tmp |= 0xFFFF007F; + mvs_cw32(mvi, CMD_PORT_LAYER_TIMER1, tmp); + + /* change the connection open-close behavior (bit 9) + * set bit8 to 1 for performance tuning */ + tmp = mvs_cr32(mvi, CMD_SL_MODE0); + tmp |= 0x00000300; + /* set bit0 to 0 to enable retry for no_dest reject case */ + tmp &= 0xFFFFFFFE; + mvs_cw32(mvi, CMD_SL_MODE0, tmp); + + /* Enable SRS interrupt */ + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); + + mvs_94xx_sgpio_init(mvi); + + return 0; +} + +static int mvs_94xx_ioremap(struct mvs_info *mvi) +{ + if (!mvs_ioremap(mvi, 2, -1)) { + mvi->regs_ex = mvi->regs + 0x10200; + mvi->regs += 0x20000; + if (mvi->id == 1) + mvi->regs += 0x4000; + return 0; + } + return -1; +} + +static void mvs_94xx_iounmap(struct mvs_info *mvi) +{ + if (mvi->regs) { + mvi->regs -= 0x20000; + if (mvi->id == 1) + mvi->regs -= 0x4000; + mvs_iounmap(mvi->regs); + } +} + +static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); + mw32(MVS_GBL_INT_STAT, tmp); + writel(tmp, regs + 0x0C); + writel(tmp, regs + 0x10); + writel(tmp, regs + 0x14); + writel(tmp, regs + 0x18); + mw32(MVS_GBL_CTL, tmp); +} + +static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + + tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); + mw32(MVS_GBL_INT_STAT, tmp); + writel(tmp, regs + 0x0C); + writel(tmp, regs + 0x10); + writel(tmp, regs + 0x14); + writel(tmp, regs + 0x18); + mw32(MVS_GBL_CTL, tmp); +} + +static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) +{ + void __iomem *regs = mvi->regs_ex; + u32 stat = 0; + if (!(mvi->flags & MVF_FLAG_SOC)) { + stat = mr32(MVS_GBL_INT_STAT); + + if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B))) + return 0; + } + return stat; +} + +static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) +{ + void __iomem *regs = mvi->regs; + + if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) || + ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) { + mw32_f(MVS_INT_STAT, CINT_DONE); + + spin_lock(&mvi->lock); + mvs_int_full(mvi); + spin_unlock(&mvi->lock); + } + return IRQ_HANDLED; +} + +static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) +{ + u32 tmp; + tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); + if (tmp & 1 << (slot_idx % 32)) { + mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); + mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), + 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, + MVS_COMMAND_ACTIVE + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); + } +} + +static void +mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + if (clear_all) { + tmp = mr32(MVS_INT_STAT_SRS_0); + if (tmp) { + mv_dprintk("check SRS 0 %08X.\n", tmp); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + tmp = mr32(MVS_INT_STAT_SRS_1); + if (tmp) { + mv_dprintk("check SRS 1 %08X.\n", tmp); + mw32(MVS_INT_STAT_SRS_1, tmp); + } + } else { + if (reg_set > 31) + tmp = mr32(MVS_INT_STAT_SRS_1); + else + tmp = mr32(MVS_INT_STAT_SRS_0); + + if (tmp & (1 << (reg_set % 32))) { + mv_dprintk("register set 0x%x was stopped.\n", reg_set); + if (reg_set > 31) + mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32)); + else + mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32)); + } + } +} + +static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + mvs_94xx_clear_srs_irq(mvi, 0, 1); + + tmp = mr32(MVS_INT_STAT); + mw32(MVS_INT_STAT, tmp | CINT_CI_STOP); + tmp = mr32(MVS_PCS) | 0xFF00; + mw32(MVS_PCS, tmp); +} + +static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 err_0, err_1; + u8 i; + struct mvs_device *device; + + err_0 = mr32(MVS_NON_NCQ_ERR_0); + err_1 = mr32(MVS_NON_NCQ_ERR_1); + + mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n", + err_0, err_1); + for (i = 0; i < 32; i++) { + if (err_0 & bit(i)) { + device = mvs_find_dev_by_reg_set(mvi, i); + if (device) + mvs_release_task(mvi, device->sas_device); + } + if (err_1 & bit(i)) { + device = mvs_find_dev_by_reg_set(mvi, i+32); + if (device) + mvs_release_task(mvi, device->sas_device); + } + } + + mw32(MVS_NON_NCQ_ERR_0, err_0); + mw32(MVS_NON_NCQ_ERR_1, err_1); +} + +static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + void __iomem *regs = mvi->regs; + u8 reg_set = *tfs; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + mvi->sata_reg_set &= ~bit(reg_set); + if (reg_set < 32) + w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); + else + w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32)); + + *tfs = MVS_ID_NOT_MAPPED; + + return; +} + +static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + int i; + void __iomem *regs = mvi->regs; + + if (*tfs != MVS_ID_NOT_MAPPED) + return 0; + + i = mv_ffc64(mvi->sata_reg_set); + if (i >= 32) { + mvi->sata_reg_set |= bit(i); + w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); + *tfs = i; + return 0; + } else if (i >= 0) { + mvi->sata_reg_set |= bit(i); + w_reg_set_enable(i, (u32)mvi->sata_reg_set); + *tfs = i; + return 0; + } + return MVS_ID_NOT_MAPPED; +} + +static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct mvs_prd *buf_prd = prd; + struct mvs_prd_imt im_len; + *(u32 *)&im_len = 0; + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + im_len.len = sg_dma_len(sg); + buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); + buf_prd++; + } +} + +static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) +{ + u32 phy_st; + phy_st = mvs_read_phy_ctl(mvi, i); + if (phy_st & PHY_READY_MASK) + return 1; + return 0; +} + +static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, + struct sas_identify_frame *id) +{ + int i; + u32 id_frame[7]; + + for (i = 0; i < 7; i++) { + mvs_write_port_cfg_addr(mvi, port_id, + CONFIG_ID_FRAME0 + i * 4); + id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); + } + memcpy(id, id_frame, 28); +} + +static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, + struct sas_identify_frame *id) +{ + int i; + u32 id_frame[7]; + + for (i = 0; i < 7; i++) { + mvs_write_port_cfg_addr(mvi, port_id, + CONFIG_ATT_ID_FRAME0 + i * 4); + id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); + mv_dprintk("94xx phy %d atta frame %d %x.\n", + port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); + } + memcpy(id, id_frame, 28); +} + +static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id) +{ + u32 att_dev_info = 0; + + att_dev_info |= id->dev_type; + if (id->stp_iport) + att_dev_info |= PORT_DEV_STP_INIT; + if (id->smp_iport) + att_dev_info |= PORT_DEV_SMP_INIT; + if (id->ssp_iport) + att_dev_info |= PORT_DEV_SSP_INIT; + if (id->stp_tport) + att_dev_info |= PORT_DEV_STP_TRGT; + if (id->smp_tport) + att_dev_info |= PORT_DEV_SMP_TRGT; + if (id->ssp_tport) + att_dev_info |= PORT_DEV_SSP_TRGT; + + att_dev_info |= (u32)id->phy_id<<24; + return att_dev_info; +} + +static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id) +{ + return mvs_94xx_make_dev_info(id); +} + +static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, + struct sas_identify_frame *id) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status); + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + sas_phy->linkrate += 0x8; + mv_dprintk("get link rate is %d\n", sas_phy->linkrate); + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; + mvs_94xx_get_dev_identify_frame(mvi, i, id); + phy->dev_info = mvs_94xx_make_dev_info(id); + + if (phy->phy_type & PORT_TYPE_SAS) { + mvs_94xx_get_att_identify_frame(mvi, i, id); + phy->att_dev_info = mvs_94xx_make_att_info(id); + phy->att_dev_sas_addr = *(u64 *)id->sas_addr; + } else { + phy->att_dev_info = PORT_DEV_STP_TRGT | 1; + } + + /* enable spin up bit */ + mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + mvs_write_port_cfg_data(mvi, i, 0x04); + +} + +static void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates) +{ + u32 lrmax = 0; + u32 tmp; + + tmp = mvs_read_phy_ctl(mvi, phy_id); + lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12; + + if (lrmax) { + tmp &= ~(0x3 << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD); +} + +static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_STP_REG_SET_0); + mw32(MVS_STP_REG_SET_0, 0); + mw32(MVS_STP_REG_SET_0, tmp); + tmp = mr32(MVS_STP_REG_SET_1); + mw32(MVS_STP_REG_SET_1, 0); + mw32(MVS_STP_REG_SET_1, tmp); +} + + +static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + return mr32(SPI_RD_DATA_REG_94XX); +} + +static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + + mw32(SPI_RD_DATA_REG_94XX, data); +} + + +static int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 dwTmp; + + dwTmp = ((u32)cmd << 8) | ((u32)length << 4); + if (read) + dwTmp |= SPI_CTRL_READ_94XX; + + if (addr != MV_MAX_U32) { + mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL)); + dwTmp |= SPI_ADDR_VLD_94XX; + } + + *dwCmd = dwTmp; + return 0; +} + + +static int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); + + return 0; +} + +static int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 i, dwTmp; + + for (i = 0; i < timeout; i++) { + dwTmp = mr32(SPI_CTRL_REG_94XX); + if (!(dwTmp & SPI_CTRL_SpiStart_94XX)) + return 0; + msleep(10); + } + + return -1; +} + +static void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, + int buf_len, int from, void *prd) +{ + int i; + struct mvs_prd *buf_prd = prd; + dma_addr_t buf_dma; + struct mvs_prd_imt im_len; + + *(u32 *)&im_len = 0; + buf_prd += from; + +#define PRD_CHAINED_ENTRY 0x01 + if ((mvi->pdev->revision == VANIR_A0_REV) || + (mvi->pdev->revision == VANIR_B0_REV)) + buf_dma = (phy_mask <= 0x08) ? + mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1; + else + return; + + for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) { + if (i == MAX_SG_ENTRY - 1) { + buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1)); + im_len.len = 2; + im_len.misc_ctl = PRD_CHAINED_ENTRY; + } else { + buf_prd->addr = cpu_to_le64(buf_dma); + im_len.len = buf_len; + } + buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); + } +} + +static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time) +{ + void __iomem *regs = mvi->regs; + u32 tmp = 0; + /* + * the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + if (time == 0) { + mw32(MVS_INT_COAL, 0); + mw32(MVS_INT_COAL_TMOUT, 0x10000); + } else { + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff|COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN); + + tmp = 0x10000 | time; + mw32(MVS_INT_COAL_TMOUT, tmp); + } + +} + +static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, + u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data) +{ + int i; + + switch (reg_type) { + + case SAS_GPIO_REG_TX_GP: + if (reg_index == 0) + return -EINVAL; + + if (reg_count > 1) + return -EINVAL; + + if (reg_count == 0) + return 0; + + /* maximum supported bits = hosts * 4 drives * 3 bits */ + for (i = 0; i < mvs_prv->n_host * 4 * 3; i++) { + + /* select host */ + struct mvs_info *mvi = mvs_prv->mvi[i/(4*3)]; + + void __iomem *regs = mvi->regs_ex - 0x10200; + + int drive = (i/3) & (4-1); /* drive number on host */ + int driveshift = drive * 8; /* bit offset of drive */ + u32 block = ioread32be(regs + MVS_SGPIO_DCTRL + + MVS_SGPIO_HOST_OFFSET * mvi->id); + + /* + * if bit is set then create a mask with the first + * bit of the drive set in the mask ... + */ + u32 bit = get_unaligned_be32(write_data) & (1 << i) ? + 1 << driveshift : 0; + + /* + * ... and then shift it to the right position based + * on the led type (activity/id/fail) + */ + switch (i%3) { + case 0: /* activity */ + block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT) + << driveshift); + /* hardwire activity bit to SOF */ + block |= LED_BLINKA_SOF << ( + MVS_SGPIO_DCTRL_ACT_SHIFT + + driveshift); + break; + case 1: /* id */ + block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT) + << driveshift); + block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT; + break; + case 2: /* fail */ + block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT) + << driveshift); + block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT; + break; + } + + iowrite32be(block, + regs + MVS_SGPIO_DCTRL + + MVS_SGPIO_HOST_OFFSET * mvi->id); + + } + + return reg_count; + + case SAS_GPIO_REG_TX: + if (reg_index + reg_count > mvs_prv->n_host) + return -EINVAL; + + for (i = 0; i < reg_count; i++) { + struct mvs_info *mvi = mvs_prv->mvi[i+reg_index]; + void __iomem *regs = mvi->regs_ex - 0x10200; + + mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, + ((u32 *) write_data)[i]); + } + return reg_count; + } + return -ENOSYS; +} + +const struct mvs_dispatch mvs_94xx_dispatch = { + "mv94xx", + mvs_94xx_init, + NULL, + mvs_94xx_ioremap, + mvs_94xx_iounmap, + mvs_94xx_isr, + mvs_94xx_isr_status, + mvs_94xx_interrupt_enable, + mvs_94xx_interrupt_disable, + mvs_read_phy_ctl, + mvs_write_phy_ctl, + mvs_read_port_cfg_data, + mvs_write_port_cfg_data, + mvs_write_port_cfg_addr, + mvs_read_port_vsr_data, + mvs_write_port_vsr_data, + mvs_write_port_vsr_addr, + mvs_read_port_irq_stat, + mvs_write_port_irq_stat, + mvs_read_port_irq_mask, + mvs_write_port_irq_mask, + mvs_94xx_command_active, + mvs_94xx_clear_srs_irq, + mvs_94xx_issue_stop, + mvs_start_delivery, + mvs_rx_update, + mvs_int_full, + mvs_94xx_assign_reg_set, + mvs_94xx_free_reg_set, + mvs_get_prd_size, + mvs_get_prd_count, + mvs_94xx_make_prd, + mvs_94xx_detect_porttype, + mvs_94xx_oob_done, + mvs_94xx_fix_phy_info, + NULL, + mvs_94xx_phy_set_link_rate, + mvs_hw_max_link_rate, + mvs_94xx_phy_disable, + mvs_94xx_phy_enable, + mvs_94xx_phy_reset, + NULL, + mvs_94xx_clear_active_cmds, + mvs_94xx_spi_read_data, + mvs_94xx_spi_write_data, + mvs_94xx_spi_buildcmd, + mvs_94xx_spi_issuecmd, + mvs_94xx_spi_waitdataready, + mvs_94xx_fix_dma, + mvs_94xx_tune_interrupt, + mvs_94xx_non_spec_ncq_error, + mvs_94xx_gpio_write, +}; + diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h new file mode 100644 index 000000000..a243182c1 --- /dev/null +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell 88SE94xx hardware specific head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + +#ifndef _MVS94XX_REG_H_ +#define _MVS94XX_REG_H_ + +#include + +#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS + +enum VANIR_REVISION_ID { + VANIR_A0_REV = 0xA0, + VANIR_B0_REV = 0x01, + VANIR_C0_REV = 0x02, + VANIR_C1_REV = 0x03, + VANIR_C2_REV = 0xC2, +}; + +enum host_registers { + MVS_HST_CHIP_CONFIG = 0x10104, /* chip configuration */ +}; + +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x00, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ + + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */ + MVS_STP_REG_SET_1 = 0x11C, + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS_0 = 0x15C, + MVS_INT_STAT_SRS_1 = 0x160, + MVS_INT_MASK_SRS_1 = 0x164, + MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */ + MVS_NON_NCQ_ERR_1 = 0x16C, + MVS_CMD_ADDR = 0x170, /* Command register port (addr) */ + MVS_CMD_DATA = 0x174, /* Command register port (data) */ + MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */ + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */ + /* ports 5-7 follow after this */ + MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */ + MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */ + /* ports 5-7 follow after this */ + MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */ + /* ports 5-7 follow after this */ + MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */ + MVS_P4_CFG_DATA = 0x224, /* Port4 config data */ + + /* phys 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */ + MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */ + /* phys 1-3 follow after this */ + /* multiplexing */ + MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */ + MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ + MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ + MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ + MVS_COMMAND_ACTIVE = 0x300, +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0x78, + PCR_LINK_STAT = 0x82, +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */ + VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */ + VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */ + VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */ + VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */ + VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */ + VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */ + VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */ + VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */ + VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */ + VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */ + VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */ + VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */ + + VSR_PHY_FFE_CONTROL = 0x10C, + VSR_PHY_DFE_UPDATE_CRTL = 0x110, + VSR_REF_CLOCK_CRTL = 0x1A0, +}; + +enum chip_register_bits { + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), +}; + +enum pci_interrupt_cause { + /* MAIN_IRQ_CAUSE (R10200) Bits*/ + MVS_IRQ_COM_IN_I2O_IOP0 = (1 << 0), + MVS_IRQ_COM_IN_I2O_IOP1 = (1 << 1), + MVS_IRQ_COM_IN_I2O_IOP2 = (1 << 2), + MVS_IRQ_COM_IN_I2O_IOP3 = (1 << 3), + MVS_IRQ_COM_OUT_I2O_HOS0 = (1 << 4), + MVS_IRQ_COM_OUT_I2O_HOS1 = (1 << 5), + MVS_IRQ_COM_OUT_I2O_HOS2 = (1 << 6), + MVS_IRQ_COM_OUT_I2O_HOS3 = (1 << 7), + MVS_IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), + MVS_IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), + MVS_IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), + MVS_IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), + MVS_IRQ_PCIF_DRBL0 = (1 << 12), + MVS_IRQ_PCIF_DRBL1 = (1 << 13), + MVS_IRQ_PCIF_DRBL2 = (1 << 14), + MVS_IRQ_PCIF_DRBL3 = (1 << 15), + MVS_IRQ_XOR_A = (1 << 16), + MVS_IRQ_XOR_B = (1 << 17), + MVS_IRQ_SAS_A = (1 << 18), + MVS_IRQ_SAS_B = (1 << 19), + MVS_IRQ_CPU_CNTRL = (1 << 20), + MVS_IRQ_GPIO = (1 << 21), + MVS_IRQ_UART = (1 << 22), + MVS_IRQ_SPI = (1 << 23), + MVS_IRQ_I2C = (1 << 24), + MVS_IRQ_SGPIO = (1 << 25), + MVS_IRQ_COM_ERR = (1 << 29), + MVS_IRQ_I2O_ERR = (1 << 30), + MVS_IRQ_PCIE_ERR = (1 << 31), +}; + +union reg_phy_cfg { + u32 v; + struct { + u32 phy_reset:1; + u32 sas_support:1; + u32 sata_support:1; + u32 sata_host_mode:1; + /* + * bit 2: 6Gbps support + * bit 1: 3Gbps support + * bit 0: 1.5Gbps support + */ + u32 speed_support:3; + u32 snw_3_support:1; + u32 tx_lnk_parity:1; + /* + * bit 5: G1 (1.5Gbps) Without SSC + * bit 4: G1 (1.5Gbps) with SSC + * bit 3: G2 (3.0Gbps) Without SSC + * bit 2: G2 (3.0Gbps) with SSC + * bit 1: G3 (6.0Gbps) without SSC + * bit 0: G3 (6.0Gbps) with SSC + */ + u32 tx_spt_phs_lnk_rate:6; + /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */ + u32 tx_lgcl_lnk_rate:4; + u32 tx_ssc_type:1; + u32 sata_spin_up_spt:1; + u32 sata_spin_up_en:1; + u32 bypass_oob:1; + u32 disable_phy:1; + u32 rsvd:8; + } u; +}; + +#define MAX_SG_ENTRY 255 + +struct mvs_prd_imt { +#ifndef __BIG_ENDIAN + __le32 len:22; + u8 _r_a:2; + u8 misc_ctl:4; + u8 inter_sel:4; +#else + u32 inter_sel:4; + u32 misc_ctl:4; + u32 _r_a:2; + u32 len:22; +#endif +}; + +struct mvs_prd { + /* 64-bit buffer address */ + __le64 addr; + /* 22-bit length */ + __le32 im_len; +} __attribute__ ((packed)); + +enum sgpio_registers { + MVS_SGPIO_HOST_OFFSET = 0x100, /* offset between hosts */ + + MVS_SGPIO_CFG0 = 0xc200, + MVS_SGPIO_CFG0_ENABLE = (1 << 0), /* enable pins */ + MVS_SGPIO_CFG0_BLINKB = (1 << 1), /* blink generators */ + MVS_SGPIO_CFG0_BLINKA = (1 << 2), + MVS_SGPIO_CFG0_INVSCLK = (1 << 3), /* invert signal? */ + MVS_SGPIO_CFG0_INVSLOAD = (1 << 4), + MVS_SGPIO_CFG0_INVSDOUT = (1 << 5), + MVS_SGPIO_CFG0_SLOAD_FALLEDGE = (1 << 6), /* rise/fall edge? */ + MVS_SGPIO_CFG0_SDOUT_FALLEDGE = (1 << 7), + MVS_SGPIO_CFG0_SDIN_RISEEDGE = (1 << 8), + MVS_SGPIO_CFG0_MAN_BITLEN_SHIFT = 18, /* bits/frame manual mode */ + MVS_SGPIO_CFG0_AUT_BITLEN_SHIFT = 24, /* bits/frame auto mode */ + + MVS_SGPIO_CFG1 = 0xc204, /* blink timing register */ + MVS_SGPIO_CFG1_LOWA_SHIFT = 0, /* A off time */ + MVS_SGPIO_CFG1_HIA_SHIFT = 4, /* A on time */ + MVS_SGPIO_CFG1_LOWB_SHIFT = 8, /* B off time */ + MVS_SGPIO_CFG1_HIB_SHIFT = 12, /* B on time */ + MVS_SGPIO_CFG1_MAXACTON_SHIFT = 16, /* max activity on time */ + + /* force activity off time */ + MVS_SGPIO_CFG1_FORCEACTOFF_SHIFT = 20, + /* stretch activity on time */ + MVS_SGPIO_CFG1_STRCHACTON_SHIFT = 24, + /* stretch activiity off time */ + MVS_SGPIO_CFG1_STRCHACTOFF_SHIFT = 28, + + + MVS_SGPIO_CFG2 = 0xc208, /* clock speed register */ + MVS_SGPIO_CFG2_CLK_SHIFT = 0, + MVS_SGPIO_CFG2_BLINK_SHIFT = 20, + + MVS_SGPIO_CTRL = 0xc20c, /* SDOUT/SDIN mode control */ + MVS_SGPIO_CTRL_SDOUT_AUTO = 2, + MVS_SGPIO_CTRL_SDOUT_SHIFT = 2, + + MVS_SGPIO_DSRC = 0xc220, /* map ODn bits to drives */ + + MVS_SGPIO_DCTRL = 0xc238, + MVS_SGPIO_DCTRL_ERR_SHIFT = 0, + MVS_SGPIO_DCTRL_LOC_SHIFT = 3, + MVS_SGPIO_DCTRL_ACT_SHIFT = 5, +}; + +enum sgpio_led_status { + LED_OFF = 0, + LED_ON = 1, + LED_BLINKA = 2, + LED_BLINKA_INV = 3, + LED_BLINKA_SOF = 4, + LED_BLINKA_EOF = 5, + LED_BLINKB = 6, + LED_BLINKB_INV = 7, +}; + +#define DEFAULT_SGPIO_BITS ((LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 3) | \ + (LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 2) | \ + (LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 1) | \ + (LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 0)) + +/* + * these registers are accessed through port vendor + * specific address/data registers + */ +enum sas_sata_phy_regs { + GENERATION_1_SETTING = 0x118, + GENERATION_1_2_SETTING = 0x11C, + GENERATION_2_3_SETTING = 0x120, + GENERATION_3_4_SETTING = 0x124, +}; + +#define SPI_CTRL_REG_94XX 0xc800 +#define SPI_ADDR_REG_94XX 0xc804 +#define SPI_WR_DATA_REG_94XX 0xc808 +#define SPI_RD_DATA_REG_94XX 0xc80c +#define SPI_CTRL_READ_94XX (1U << 2) +#define SPI_ADDR_VLD_94XX (1U << 1) +#define SPI_CTRL_SpiStart_94XX (1U << 0) + +static inline int +mv_ffc64(u64 v) +{ + u64 x = ~v; + return x ? __ffs64(x) : -1; +} + +#define r_reg_set_enable(i) \ + (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \ + mr32(MVS_STP_REG_SET_0)) + +#define w_reg_set_enable(i, tmp) \ + (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \ + mw32(MVS_STP_REG_SET_0, tmp)) + +extern const struct mvs_dispatch mvs_94xx_dispatch; +#endif + diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h new file mode 100644 index 000000000..0e7366faf --- /dev/null +++ b/drivers/scsi/mvsas/mv_chips.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell 88SE64xx/88SE94xx register IO interface + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + + +#ifndef _MV_CHIPS_H_ +#define _MV_CHIPS_H_ + +#define mr32(reg) readl(regs + reg) +#define mw32(reg, val) writel((val), regs + reg) +#define mw32_f(reg, val) do { \ + mw32(reg, val); \ + mr32(reg); \ + } while (0) + +#define iow32(reg, val) outl(val, (unsigned long)(regs + reg)) +#define ior32(reg) inl((unsigned long)(regs + reg)) +#define iow16(reg, val) outw((unsigned long)(val, regs + reg)) +#define ior16(reg) inw((unsigned long)(regs + reg)) +#define iow8(reg, val) outb((unsigned long)(val, regs + reg)) +#define ior8(reg) inb((unsigned long)(regs + reg)) + +static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr) +{ + void __iomem *regs = mvi->regs; + mw32(MVS_CMD_ADDR, addr); + return mr32(MVS_CMD_DATA); +} + +static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val) +{ + void __iomem *regs = mvi->regs; + mw32(MVS_CMD_ADDR, addr); + mw32(MVS_CMD_DATA, val); +} + +static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) +{ + void __iomem *regs = mvi->regs; + return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) : + mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4); +} + +static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) +{ + void __iomem *regs = mvi->regs; + if (port < 4) + mw32(MVS_P0_SER_CTLSTAT + port * 4, val); + else + mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val); +} + +static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, + u32 off2, u32 port) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + return (port < 4) ? readl(regs + port * 8) : + readl(regs2 + (port - 4) * 8); +} + +static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, + u32 port, u32 val) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + if (port < 4) + writel(val, regs + port * 8); + else + writel(val, regs2 + (port - 4) * 8); +} + +static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_CFG_DATA, + MVS_P4_CFG_DATA, port); +} + +static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_CFG_DATA, + MVS_P4_CFG_DATA, port, val); +} + +static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, + u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_CFG_ADDR, + MVS_P4_CFG_ADDR, port, addr); + mdelay(10); +} + +static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_VSR_DATA, + MVS_P4_VSR_DATA, port); +} + +static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_VSR_DATA, + MVS_P4_VSR_DATA, port, val); +} + +static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, + u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_VSR_ADDR, + MVS_P4_VSR_ADDR, port, addr); + mdelay(10); +} + +static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_STAT, + MVS_P4_INT_STAT, port); +} + +static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_STAT, + MVS_P4_INT_STAT, port, val); +} + +static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_MASK, + MVS_P4_INT_MASK, port); + +} + +static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_MASK, + MVS_P4_INT_MASK, port, val); +} + +static inline void mvs_phy_hacks(struct mvs_info *mvi) +{ + u32 tmp; + + tmp = mvs_cr32(mvi, CMD_PHY_TIMER); + tmp &= ~(1 << 9); + tmp |= (1 << 10); + mvs_cw32(mvi, CMD_PHY_TIMER, tmp); + + /* enable retry 127 times */ + mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f); + + /* extend open frame timeout to max */ + tmp = mvs_cr32(mvi, CMD_SAS_CTL0); + tmp &= ~0xffff; + tmp |= 0x3fff; + mvs_cw32(mvi, CMD_SAS_CTL0, tmp); + + mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); + + /* not to halt for different port op during wideport link change */ + mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); +} + +static inline void mvs_int_sata(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_INT_STAT_SRS_0); + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + MVS_CHIP_DISP->clear_active_cmds(mvi); +} + +static inline void mvs_int_full(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp, stat; + int i; + + stat = mr32(MVS_INT_STAT); + mvs_int_rx(mvi, false); + + for (i = 0; i < mvi->chip->n_phy; i++) { + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); + if (tmp) + mvs_int_port(mvi, i, tmp); + } + + if (stat & CINT_NON_SPEC_NCQ_ERROR) + MVS_CHIP_DISP->non_spec_ncq_error(mvi); + + if (stat & CINT_SRS) + mvs_int_sata(mvi); + + mw32(MVS_INT_STAT, stat); +} + +static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx) +{ + void __iomem *regs = mvi->regs; + mw32(MVS_TX_PROD_IDX, tx); +} + +static inline u32 mvs_rx_update(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + return mr32(MVS_RX_CONS_IDX); +} + +static inline u32 mvs_get_prd_size(void) +{ + return sizeof(struct mvs_prd); +} + +static inline u32 mvs_get_prd_count(void) +{ + return MAX_SG_ENTRY; +} + +static inline void mvs_show_pcie_usage(struct mvs_info *mvi) +{ + u16 link_stat, link_spd; + const char *spd[] = { + "UnKnown", + "2.5", + "5.0", + }; + if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0) + return; + + pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat); + link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS; + if (link_spd >= 3) + link_spd = 0; + dev_printk(KERN_INFO, mvi->dev, + "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n", + (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS, + spd[link_spd]); +} + +static inline u32 mvs_hw_max_link_rate(void) +{ + return MAX_LINK_RATE; +} + +#endif /* _MV_CHIPS_H_ */ + diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h new file mode 100644 index 000000000..8ef174cd4 --- /dev/null +++ b/drivers/scsi/mvsas/mv_defs.h @@ -0,0 +1,490 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell 88SE64xx/88SE94xx const head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + +#ifndef _MV_DEFS_H_ +#define _MV_DEFS_H_ + +#define PCI_DEVICE_ID_ARECA_1300 0x1300 +#define PCI_DEVICE_ID_ARECA_1320 0x1320 + +enum chip_flavors { + chip_6320, + chip_6440, + chip_6485, + chip_9480, + chip_9180, + chip_9445, + chip_9485, + chip_1300, + chip_1320 +}; + +/* driver compile-time configuration */ +enum driver_configuration { + MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ + MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ + /* software requires power-of-2 + ring size */ + MVS_SOC_SLOTS = 64, + MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2, + MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2, + + MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ + MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ + MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ + MVS_OAF_SZ = 64, /* Open address frame buffer size */ + MVS_QUEUE_SIZE = 64, /* Support Queue depth */ + MVS_RSVD_SLOTS = 4, + MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, +}; + +/* unchangeable hardware details */ +enum hardware_details { + MVS_MAX_PHYS = 8, /* max. possible phys */ + MVS_MAX_PORTS = 8, /* max. possible ports */ + MVS_SOC_PHYS = 4, /* soc phys */ + MVS_SOC_PORTS = 4, /* soc phys */ + MVS_MAX_DEVICES = 1024, /* max supported device */ +}; + +/* peripheral registers (BAR2) */ +enum peripheral_registers { + SPI_CTL = 0x10, /* EEPROM control */ + SPI_CMD = 0x14, /* EEPROM command */ + SPI_DATA = 0x18, /* EEPROM data */ +}; + +enum peripheral_register_bits { + TWSI_RDY = (1U << 7), /* EEPROM interface ready */ + TWSI_RD = (1U << 4), /* EEPROM read access */ + + SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ +}; + +enum hw_register_bits { + /* MVS_GBL_CTL */ + INT_EN = (1U << 1), /* Global int enable */ + HBA_RST = (1U << 0), /* HBA reset */ + + /* MVS_GBL_INT_STAT */ + INT_XOR = (1U << 4), /* XOR engine event */ + INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ + + /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ + SATA_TARGET = (1U << 16), /* port0 SATA target enable */ + MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ + MODE_AUTO_DET_PORT6 = (1U << 14), + MODE_AUTO_DET_PORT5 = (1U << 13), + MODE_AUTO_DET_PORT4 = (1U << 12), + MODE_AUTO_DET_PORT3 = (1U << 11), + MODE_AUTO_DET_PORT2 = (1U << 10), + MODE_AUTO_DET_PORT1 = (1U << 9), + MODE_AUTO_DET_PORT0 = (1U << 8), + MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | + MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | + MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | + MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, + MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ + MODE_SAS_PORT6_MASK = (1U << 6), + MODE_SAS_PORT5_MASK = (1U << 5), + MODE_SAS_PORT4_MASK = (1U << 4), + MODE_SAS_PORT3_MASK = (1U << 3), + MODE_SAS_PORT2_MASK = (1U << 2), + MODE_SAS_PORT1_MASK = (1U << 1), + MODE_SAS_PORT0_MASK = (1U << 0), + MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | + MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | + MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | + MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, + + /* SAS_MODE value may be + * dictated (in hw) by values + * of SATA_TARGET & AUTO_DET + */ + + /* MVS_TX_CFG */ + TX_EN = (1U << 16), /* Enable TX */ + TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ + + /* MVS_RX_CFG */ + RX_EN = (1U << 16), /* Enable RX */ + RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ + + /* MVS_INT_COAL */ + COAL_EN = (1U << 16), /* Enable int coalescing */ + + /* MVS_INT_STAT, MVS_INT_MASK */ + CINT_I2C = (1U << 31), /* I2C event */ + CINT_SW0 = (1U << 30), /* software event 0 */ + CINT_SW1 = (1U << 29), /* software event 1 */ + CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ + CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ + CINT_MEM = (1U << 26), /* int mem parity err */ + CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ + CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */ + CINT_SRS = (1U << 3), /* SRS event */ + CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ + CINT_DONE = (1U << 0), /* cmd completion */ + + /* shl for ports 1-3 */ + CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ + CINT_PORT = (1U << 8), /* port0 event */ + CINT_PORT_MASK_OFFSET = 8, + CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), + CINT_PHY_MASK_OFFSET = 4, + CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET), + + /* TX (delivery) ring bits */ + TXQ_CMD_SHIFT = 29, + TXQ_CMD_SSP = 1, /* SSP protocol */ + TXQ_CMD_SMP = 2, /* SMP protocol */ + TXQ_CMD_STP = 3, /* STP/SATA protocol */ + TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */ + TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ + TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ + TXQ_MODE_TARGET = 0, + TXQ_MODE_INITIATOR = 1, + TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ + TXQ_PRI_NORMAL = 0, + TXQ_PRI_HIGH = 1, + TXQ_SRS_SHIFT = 20, /* SATA register set */ + TXQ_SRS_MASK = 0x7f, + TXQ_PHY_SHIFT = 12, /* PHY bitmap */ + TXQ_PHY_MASK = 0xff, + TXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* RX (completion) ring bits */ + RXQ_GOOD = (1U << 23), /* Response good */ + RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ + RXQ_CMD_RX = (1U << 20), /* target cmd received */ + RXQ_ATTN = (1U << 19), /* attention */ + RXQ_RSP = (1U << 18), /* response frame xfer'd */ + RXQ_ERR = (1U << 17), /* err info rec xfer'd */ + RXQ_DONE = (1U << 16), /* cmd complete */ + RXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* mvs_cmd_hdr bits */ + MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ + MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ + + /* SSP initiator only */ + MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ + + /* SSP initiator or target */ + MCH_SSP_FR_TASK = 0x1, /* TASK frame */ + + /* SSP target only */ + MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ + MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ + MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ + MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ + + MCH_SSP_MODE_PASSTHRU = 1, + MCH_SSP_MODE_NORMAL = 0, + MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ + MCH_FBURST = (1U << 11), /* first burst (SSP) */ + MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ + MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ + MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ + MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ + MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ + MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ + MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ + MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ + + CCTL_RST = (1U << 5), /* port logic reset */ + + /* 0(LSB first), 1(MSB first) */ + CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ + CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ + CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ + CCTL_ENDIAN_CMD = (1U << 0), /* command table */ + + /* MVS_Px_SER_CTLSTAT (per-phy control) */ + PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ + PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ + PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ + PHY_RST = (1U << 0), /* phy reset */ + PHY_READY_MASK = (1U << 20), + + /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ + PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ + PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */ + PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */ + PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ + PHYEV_AN = (1U << 18), /* SATA async notification */ + PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ + PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ + PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ + PHYEV_IU_BIG = (1U << 11), /* IU too long err */ + PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ + PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ + PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ + PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ + PHYEV_PORT_SEL = (1U << 6), /* port selector present */ + PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ + PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ + PHYEV_ID_FAIL = (1U << 3), /* identify failed */ + PHYEV_ID_DONE = (1U << 2), /* identify done */ + PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ + PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ + + /* MVS_PCS */ + PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ + PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */ + PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ + PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ + PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */ + PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ + PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ + PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ + PCS_CMD_RST = (1U << 1), /* reset cmd issue */ + PCS_CMD_EN = (1U << 0), /* enable cmd issue */ + + /* Port n Attached Device Info */ + PORT_DEV_SSP_TRGT = (1U << 19), + PORT_DEV_SMP_TRGT = (1U << 18), + PORT_DEV_STP_TRGT = (1U << 17), + PORT_DEV_SSP_INIT = (1U << 11), + PORT_DEV_SMP_INIT = (1U << 10), + PORT_DEV_STP_INIT = (1U << 9), + PORT_PHY_ID_MASK = (0xFFU << 24), + PORT_SSP_TRGT_MASK = (0x1U << 19), + PORT_SSP_INIT_MASK = (0x1U << 11), + PORT_DEV_TRGT_MASK = (0x7U << 17), + PORT_DEV_INIT_MASK = (0x7U << 9), + PORT_DEV_TYPE_MASK = (0x7U << 0), + + /* Port n PHY Status */ + PHY_RDY = (1U << 2), + PHY_DW_SYNC = (1U << 1), + PHY_OOB_DTCTD = (1U << 0), + + /* VSR */ + /* PHYMODE 6 (CDB) */ + PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ + PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ + PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ + PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ + PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ + PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ + PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ + PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ + PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ + PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ + PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ + PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ + PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ + PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ +}; + +/* SAS/SATA configuration port registers, aka phy registers */ +enum sas_sata_config_port_regs { + PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ + PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ + PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ + PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ + PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ + PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ + PHYR_SATA_CTL = 0x18, /* SATA control */ + PHYR_PHY_STAT = 0x1C, /* PHY status */ + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ + PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ + PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ + PHYR_WIDE_PORT = 0x38, /* wide port participating */ + PHYR_CURRENT0 = 0x80, /* current connection info 0 */ + PHYR_CURRENT1 = 0x84, /* current connection info 1 */ + PHYR_CURRENT2 = 0x88, /* current connection info 2 */ + CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */ + CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */ + CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */ + CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */ + CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */ + CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */ + CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */ + CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */ + CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */ + CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */ + CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */ + CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */ + CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */ + CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */ +}; + +enum sas_cmd_port_registers { + CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ + CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ + CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ + CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ + CMD_OOB_SPACE = 0x110, /* OOB space control register */ + CMD_OOB_BURST = 0x114, /* OOB burst control register */ + CMD_PHY_TIMER = 0x118, /* PHY timer control register */ + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ + CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ + CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ + CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ + CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ + CMD_ID_TEST = 0x134, /* ID test register */ + CMD_PL_TIMER = 0x138, /* PL timer register */ + CMD_WD_TIMER = 0x13c, /* WD timer register */ + CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ + CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ + CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ + CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ + CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ + CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ + CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ + CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ + CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ + CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ + CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ + CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ + CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ + CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ + CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ + CMD_RESET_COUNT = 0x188, /* Reset Count */ + CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ + CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ + CMD_PHY_CTL = 0x194, /* PHY Control and Status */ + CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ + CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ + CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ + CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ + CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ + CMD_HOST_CTL = 0x1AC, /* Host Control Status */ + CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ + CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ + CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ + CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ + CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ + CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ + CMD_PORT_LAYER_TIMER1 = 0x1E0, /* Port Layer Timer 1 */ + CMD_LINK_TIMER = 0x1E4, /* Link Timer */ +}; + +enum mvs_info_flags { + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ + MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ +}; + +enum mvs_event_flags { + PHY_PLUG_EVENT = (3U), + PHY_PLUG_IN = (1U << 0), /* phy plug in */ + PHY_PLUG_OUT = (1U << 1), /* phy plug out */ + EXP_BRCT_CHG = (1U << 2), /* broadcast change */ +}; + +enum mvs_port_type { + PORT_TGT_MASK = (1U << 5), + PORT_INIT_PORT = (1U << 4), + PORT_TGT_PORT = (1U << 3), + PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT), + PORT_TYPE_SAS = (1U << 1), + PORT_TYPE_SATA = (1U << 0), +}; + +/* Command Table Format */ +enum ct_format { + /* SSP */ + SSP_F_H = 0x00, + SSP_F_IU = 0x18, + SSP_F_MAX = 0x4D, + /* STP */ + STP_CMD_FIS = 0x00, + STP_ATAPI_CMD = 0x40, + STP_F_MAX = 0x10, + /* SMP */ + SMP_F_T = 0x00, + SMP_F_DEP = 0x01, + SMP_F_MAX = 0x101, +}; + +enum status_buffer { + SB_EIR_OFF = 0x00, /* Error Information Record */ + SB_RFB_OFF = 0x08, /* Response Frame Buffer */ + SB_RFB_MAX = 0x400, /* RFB size*/ +}; + +enum error_info_rec { + CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ + CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ + RSP_OVER = (1U << 29), /* rsp buffer overflow */ + RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ + UNK_FIS = (1U << 27), /* unknown FIS */ + DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ + SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ + TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ + R_ERR = (1U << 23), /* SATA returned R_ERR prim */ + RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ + XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ + UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ + DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ + INTERLOCK = (1U << 15), /* interlock error */ + NAK = (1U << 14), /* NAK rx'd */ + ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ + CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ + OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ + PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ + NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ + STP_RES_BSY = (1U << 8), /* STP resources busy */ + BREAK = (1U << 7), /* break received */ + BAD_DEST = (1U << 6), /* bad destination */ + BAD_PROTO = (1U << 5), /* protocol not supported */ + BAD_RATE = (1U << 4), /* cxn rate not supported */ + WRONG_DEST = (1U << 3), /* wrong destination error */ + CREDIT_TO = (1U << 2), /* credit timeout */ + WDOG_TO = (1U << 1), /* watchdog timeout */ + BUF_PAR = (1U << 0), /* buffer parity error */ +}; + +enum error_info_rec_2 { + SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ + GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ + APP_CHK_ERR = (1U << 13), /* Application Check error */ + REF_CHK_ERR = (1U << 12), /* Reference Check Error */ + USR_BLK_NM = (1U << 0), /* User Block Number */ +}; + +enum pci_cfg_register_bits { + PCTL_PWR_OFF = (0xFU << 24), + PCTL_COM_ON = (0xFU << 20), + PCTL_LINK_RST = (0xFU << 16), + PCTL_LINK_OFFS = (16), + PCTL_PHY_DSBL = (0xFU << 12), + PCTL_PHY_DSBL_OFFS = (12), + PRD_REQ_SIZE = (0x4000), + PRD_REQ_MASK = (0x00007000), + PLS_NEG_LINK_WD = (0x3FU << 4), + PLS_NEG_LINK_WD_OFFS = 4, + PLS_LINK_SPD = (0x0FU << 0), + PLS_LINK_SPD_OFFS = 0, +}; + +enum open_frame_protocol { + PROTOCOL_SMP = 0x0, + PROTOCOL_SSP = 0x1, + PROTOCOL_STP = 0x2, +}; + +/* define for response frame datapres field */ +enum datapres_field { + NO_DATA = 0, + RESPONSE_DATA = 1, + SENSE_DATA = 2, +}; + +#endif diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c new file mode 100644 index 000000000..43ebb331e --- /dev/null +++ b/drivers/scsi/mvsas/mv_init.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell 88SE64xx/88SE94xx pci init + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + + +#include "mv_sas.h" + +int interrupt_coalescing = 0x80; + +static struct scsi_transport_template *mvs_stt; +static const struct mvs_chip_info mvs_chips[] = { + [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, + [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, + [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, }, + [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, + [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, + [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, + [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, + [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, + [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, +}; + +static const struct attribute_group *mvst_host_groups[]; + +#define SOC_SAS_NUM 2 + +static const struct scsi_host_template mvs_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .scan_finished = mvs_scan_finished, + .scan_start = mvs_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .shost_groups = mvst_host_groups, + .track_queue_depth = 1, +}; + +static struct sas_domain_function_template mvs_transport_ops = { + .lldd_dev_found = mvs_dev_found, + .lldd_dev_gone = mvs_dev_gone, + .lldd_execute_task = mvs_queue_command, + .lldd_control_phy = mvs_phy_control, + + .lldd_abort_task = mvs_abort_task, + .lldd_abort_task_set = sas_abort_task_set, + .lldd_clear_task_set = sas_clear_task_set, + .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, + .lldd_lu_reset = mvs_lu_reset, + .lldd_query_task = mvs_query_task, + .lldd_port_formed = mvs_port_formed, + .lldd_port_deformed = mvs_port_deformed, + + .lldd_write_gpio = mvs_gpio_write, + +}; + +static void mvs_phy_init(struct mvs_info *mvi, int phy_id) +{ + struct mvs_phy *phy = &mvi->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + phy->mvi = mvi; + phy->port = NULL; + timer_setup(&phy->timer, NULL, 0); + sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy->id = phy_id; + sas_phy->sas_addr = &mvi->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; + sas_phy->lldd_phy = phy; +} + +static void mvs_free(struct mvs_info *mvi) +{ + struct mvs_wq *mwq; + int slot_nr; + + if (!mvi) + return; + + if (mvi->flags & MVF_FLAG_SOC) + slot_nr = MVS_SOC_SLOTS; + else + slot_nr = MVS_CHIP_SLOT_SZ; + + dma_pool_destroy(mvi->dma_pool); + + if (mvi->tx) + dma_free_coherent(mvi->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + mvi->tx, mvi->tx_dma); + if (mvi->rx_fis) + dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, + mvi->rx_fis, mvi->rx_fis_dma); + if (mvi->rx) + dma_free_coherent(mvi->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + mvi->rx, mvi->rx_dma); + if (mvi->slot) + dma_free_coherent(mvi->dev, + sizeof(*mvi->slot) * slot_nr, + mvi->slot, mvi->slot_dma); + + if (mvi->bulk_buffer) + dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, + mvi->bulk_buffer, mvi->bulk_buffer_dma); + if (mvi->bulk_buffer1) + dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, + mvi->bulk_buffer1, mvi->bulk_buffer_dma1); + + MVS_CHIP_DISP->chip_iounmap(mvi); + if (mvi->shost) + scsi_host_put(mvi->shost); + list_for_each_entry(mwq, &mvi->wq_list, entry) + cancel_delayed_work(&mwq->work_q); + kfree(mvi->rsvd_tags); + kfree(mvi); +} + +#ifdef CONFIG_SCSI_MVSAS_TASKLET +static void mvs_tasklet(unsigned long opaque) +{ + u32 stat; + u16 core_nr, i = 0; + + struct mvs_info *mvi; + struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + + if (unlikely(!mvi)) + BUG_ON(1); + + stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq); + if (!stat) + goto out; + + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat); + } +out: + MVS_CHIP_DISP->interrupt_enable(mvi); + +} +#endif + +static irqreturn_t mvs_interrupt(int irq, void *opaque) +{ + u32 stat; + struct mvs_info *mvi; + struct sas_ha_struct *sha = opaque; +#ifndef CONFIG_SCSI_MVSAS_TASKLET + u32 i; + u32 core_nr; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; +#endif + + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + + if (unlikely(!mvi)) + return IRQ_NONE; +#ifdef CONFIG_SCSI_MVSAS_TASKLET + MVS_CHIP_DISP->interrupt_disable(mvi); +#endif + + stat = MVS_CHIP_DISP->isr_status(mvi, irq); + if (!stat) { + #ifdef CONFIG_SCSI_MVSAS_TASKLET + MVS_CHIP_DISP->interrupt_enable(mvi); + #endif + return IRQ_NONE; + } + +#ifdef CONFIG_SCSI_MVSAS_TASKLET + tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); +#else + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + MVS_CHIP_DISP->isr(mvi, irq, stat); + } +#endif + return IRQ_HANDLED; +} + +static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) +{ + int i = 0, slot_nr; + char pool_name[32]; + + if (mvi->flags & MVF_FLAG_SOC) + slot_nr = MVS_SOC_SLOTS; + else + slot_nr = MVS_CHIP_SLOT_SZ; + + spin_lock_init(&mvi->lock); + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_phy_init(mvi, i); + mvi->port[i].wide_port_phymap = 0; + mvi->port[i].port_attached = 0; + INIT_LIST_HEAD(&mvi->port[i].list); + } + for (i = 0; i < MVS_MAX_DEVICES; i++) { + mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; + mvi->devices[i].dev_type = SAS_PHY_UNUSED; + mvi->devices[i].device_id = i; + mvi->devices[i].dev_status = MVS_DEV_NORMAL; + } + + /* + * alloc and init our DMA areas + */ + mvi->tx = dma_alloc_coherent(mvi->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + &mvi->tx_dma, GFP_KERNEL); + if (!mvi->tx) + goto err_out; + mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, + &mvi->rx_fis_dma, GFP_KERNEL); + if (!mvi->rx_fis) + goto err_out; + + mvi->rx = dma_alloc_coherent(mvi->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + &mvi->rx_dma, GFP_KERNEL); + if (!mvi->rx) + goto err_out; + mvi->rx[0] = cpu_to_le32(0xfff); + mvi->rx_cons = 0xfff; + + mvi->slot = dma_alloc_coherent(mvi->dev, + sizeof(*mvi->slot) * slot_nr, + &mvi->slot_dma, GFP_KERNEL); + if (!mvi->slot) + goto err_out; + + mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, + TRASH_BUCKET_SIZE, + &mvi->bulk_buffer_dma, GFP_KERNEL); + if (!mvi->bulk_buffer) + goto err_out; + + mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev, + TRASH_BUCKET_SIZE, + &mvi->bulk_buffer_dma1, GFP_KERNEL); + if (!mvi->bulk_buffer1) + goto err_out; + + sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); + mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev, + MVS_SLOT_BUF_SZ, 16, 0); + if (!mvi->dma_pool) { + printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); + goto err_out; + } + + return 0; +err_out: + return 1; +} + + +int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) +{ + unsigned long res_start, res_len, res_flag_ex = 0; + struct pci_dev *pdev = mvi->pdev; + if (bar_ex != -1) { + /* + * ioremap main and peripheral registers + */ + res_start = pci_resource_start(pdev, bar_ex); + res_len = pci_resource_len(pdev, bar_ex); + if (!res_start || !res_len) + goto err_out; + + res_flag_ex = pci_resource_flags(pdev, bar_ex); + if (res_flag_ex & IORESOURCE_MEM) + mvi->regs_ex = ioremap(res_start, res_len); + else + mvi->regs_ex = (void *)res_start; + if (!mvi->regs_ex) + goto err_out; + } + + res_start = pci_resource_start(pdev, bar); + res_len = pci_resource_len(pdev, bar); + if (!res_start || !res_len) { + iounmap(mvi->regs_ex); + mvi->regs_ex = NULL; + goto err_out; + } + + mvi->regs = ioremap(res_start, res_len); + + if (!mvi->regs) { + if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) + iounmap(mvi->regs_ex); + mvi->regs_ex = NULL; + goto err_out; + } + + return 0; +err_out: + return -1; +} + +void mvs_iounmap(void __iomem *regs) +{ + iounmap(regs); +} + +static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct Scsi_Host *shost, unsigned int id) +{ + struct mvs_info *mvi = NULL; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + mvi = kzalloc(sizeof(*mvi) + + (1L << mvs_chips[ent->driver_data].slot_width) * + sizeof(struct mvs_slot_info), GFP_KERNEL); + if (!mvi) + return NULL; + + mvi->pdev = pdev; + mvi->dev = &pdev->dev; + mvi->chip_id = ent->driver_data; + mvi->chip = &mvs_chips[mvi->chip_id]; + INIT_LIST_HEAD(&mvi->wq_list); + + ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; + ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; + + mvi->id = id; + mvi->sas = sha; + mvi->shost = shost; + + mvi->rsvd_tags = bitmap_zalloc(MVS_RSVD_SLOTS, GFP_KERNEL); + if (!mvi->rsvd_tags) + goto err_out; + + if (MVS_CHIP_DISP->chip_ioremap(mvi)) + goto err_out; + if (!mvs_alloc(mvi, shost)) + return mvi; +err_out: + mvs_free(mvi); + return NULL; +} + +static int pci_go_64(struct pci_dev *pdev) +{ + int rc; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + } + + return rc; +} + +static int mvs_prep_sas_ha_init(struct Scsi_Host *shost, + const struct mvs_chip_info *chip_info) +{ + int phy_nr, port_nr; unsigned short core_nr; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + core_nr = chip_info->n_host; + phy_nr = core_nr * chip_info->n_phy; + port_nr = phy_nr; + + memset(sha, 0x00, sizeof(struct sas_ha_struct)); + arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); + arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) + goto exit_free; + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->shost = shost; + + sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); + if (!sha->lldd_ha) + goto exit_free; + + ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; + + shost->transportt = mvs_stt; + shost->max_id = MVS_MAX_DEVICES; + shost->max_lun = ~0; + shost->max_channel = 1; + shost->max_cmd_len = 16; + + return 0; +exit_free: + kfree(arr_phy); + kfree(arr_port); + return -1; + +} + +static void mvs_post_sas_ha_init(struct Scsi_Host *shost, + const struct mvs_chip_info *chip_info) +{ + int can_queue, i = 0, j = 0; + struct mvs_info *mvi = NULL; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + + for (j = 0; j < nr_core; j++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; + for (i = 0; i < chip_info->n_phy; i++) { + sha->sas_phy[j * chip_info->n_phy + i] = + &mvi->phy[i].sas_phy; + sha->sas_port[j * chip_info->n_phy + i] = + &mvi->port[i].sas_port; + } + } + + sha->sas_ha_name = DRV_NAME; + sha->dev = mvi->dev; + sha->sas_addr = &mvi->sas_addr[0]; + + sha->num_phys = nr_core * chip_info->n_phy; + + if (mvi->flags & MVF_FLAG_SOC) + can_queue = MVS_SOC_CAN_QUEUE; + else + can_queue = MVS_CHIP_SLOT_SZ; + + can_queue -= MVS_RSVD_SLOTS; + + shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); + shost->can_queue = can_queue; + mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; + sha->shost = mvi->shost; +} + +static void mvs_init_sas_add(struct mvs_info *mvi) +{ + u8 i; + for (i = 0; i < mvi->chip->n_phy; i++) { + mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; + mvi->phy[i].dev_sas_addr = + cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); + } + + memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); +} + +static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int rc, nhost = 0; + struct mvs_info *mvi; + irq_handler_t irq_handler = mvs_interrupt; + struct Scsi_Host *shost = NULL; + const struct mvs_chip_info *chip; + + dev_printk(KERN_INFO, &pdev->dev, + "mvsas: driver version %s\n", DRV_VERSION); + rc = pci_enable_device(pdev); + if (rc) + goto err_out_enable; + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + + rc = pci_go_64(pdev); + if (rc) + goto err_out_regions; + + shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); + if (!shost) { + rc = -ENOMEM; + goto err_out_regions; + } + + chip = &mvs_chips[ent->driver_data]; + SHOST_TO_SAS_HA(shost) = + kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); + if (!SHOST_TO_SAS_HA(shost)) { + scsi_host_put(shost); + rc = -ENOMEM; + goto err_out_regions; + } + + rc = mvs_prep_sas_ha_init(shost, chip); + if (rc) { + scsi_host_put(shost); + rc = -ENOMEM; + goto err_out_regions; + } + + pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); + + do { + mvi = mvs_pci_alloc(pdev, ent, shost, nhost); + if (!mvi) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(&mvi->hba_info_param, 0xFF, + sizeof(struct hba_info_page)); + + mvs_init_sas_add(mvi); + + mvi->instance = nhost; + rc = MVS_CHIP_DISP->chip_init(mvi); + if (rc) { + mvs_free(mvi); + goto err_out_regions; + } + nhost++; + } while (nhost < chip->n_host); +#ifdef CONFIG_SCSI_MVSAS_TASKLET + { + struct mvs_prv_info *mpi = SHOST_TO_SAS_HA(shost)->lldd_ha; + + tasklet_init(&(mpi->mv_tasklet), mvs_tasklet, + (unsigned long)SHOST_TO_SAS_HA(shost)); + } +#endif + + mvs_post_sas_ha_init(shost, chip); + + rc = scsi_add_host(shost, &pdev->dev); + if (rc) + goto err_out_shost; + + rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); + if (rc) + goto err_out_shost; + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(shost)); + if (rc) + goto err_not_sas; + + MVS_CHIP_DISP->interrupt_enable(mvi); + + scsi_scan_host(mvi->shost); + + return 0; + +err_not_sas: + sas_unregister_ha(SHOST_TO_SAS_HA(shost)); +err_out_shost: + scsi_remove_host(mvi->shost); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_enable: + return rc; +} + +static void mvs_pci_remove(struct pci_dev *pdev) +{ + unsigned short core_nr, i = 0; + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct mvs_info *mvi = NULL; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + +#ifdef CONFIG_SCSI_MVSAS_TASKLET + tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); +#endif + + sas_unregister_ha(sha); + sas_remove_host(mvi->shost); + + MVS_CHIP_DISP->interrupt_disable(mvi); + free_irq(mvi->pdev->irq, sha); + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + mvs_free(mvi); + } + kfree(sha->sas_phy); + kfree(sha->sas_port); + kfree(sha); + pci_release_regions(pdev); + pci_disable_device(pdev); + return; +} + +static struct pci_device_id mvs_pci_table[] = { + { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, + { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, + { + .vendor = PCI_VENDOR_ID_MARVELL, + .device = 0x6440, + .subvendor = PCI_ANY_ID, + .subdevice = 0x6480, + .class = 0, + .class_mask = 0, + .driver_data = chip_6485, + }, + { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, + { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, + { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, + { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, + { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, + { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, + { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, + { + .vendor = PCI_VENDOR_ID_MARVELL_EXT, + .device = 0x9480, + .subvendor = PCI_ANY_ID, + .subdevice = 0x9480, + .class = 0, + .class_mask = 0, + .driver_data = chip_9480, + }, + { + .vendor = PCI_VENDOR_ID_MARVELL_EXT, + .device = 0x9445, + .subvendor = PCI_ANY_ID, + .subdevice = 0x9480, + .class = 0, + .class_mask = 0, + .driver_data = chip_9445, + }, + { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */ + { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ + { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ + + { } /* terminate list */ +}; + +static struct pci_driver mvs_pci_driver = { + .name = DRV_NAME, + .id_table = mvs_pci_table, + .probe = mvs_pci_init, + .remove = mvs_pci_remove, +}; + +static ssize_t driver_version_show(struct device *cdev, + struct device_attribute *attr, char *buffer) +{ + return sysfs_emit(buffer, "%s\n", DRV_VERSION); +} + +static DEVICE_ATTR_RO(driver_version); + +static ssize_t interrupt_coalescing_store(struct device *cdev, + struct device_attribute *attr, + const char *buffer, size_t size) +{ + unsigned int val = 0; + struct mvs_info *mvi = NULL; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + u8 i, core_nr; + if (buffer == NULL) + return size; + + if (sscanf(buffer, "%u", &val) != 1) + return -EINVAL; + + if (val >= 0x10000) { + mv_dprintk("interrupt coalescing timer %d us is" + "too long\n", val); + return strlen(buffer); + } + + interrupt_coalescing = val; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + + if (unlikely(!mvi)) + return -EINVAL; + + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + if (MVS_CHIP_DISP->tune_interrupt) + MVS_CHIP_DISP->tune_interrupt(mvi, + interrupt_coalescing); + } + mv_dprintk("set interrupt coalescing time to %d us\n", + interrupt_coalescing); + return strlen(buffer); +} + +static ssize_t interrupt_coalescing_show(struct device *cdev, + struct device_attribute *attr, char *buffer) +{ + return sysfs_emit(buffer, "%d\n", interrupt_coalescing); +} + +static DEVICE_ATTR_RW(interrupt_coalescing); + +static int __init mvs_init(void) +{ + int rc; + mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); + if (!mvs_stt) + return -ENOMEM; + + rc = pci_register_driver(&mvs_pci_driver); + if (rc) + goto err_out; + + return 0; + +err_out: + sas_release_transport(mvs_stt); + return rc; +} + +static void __exit mvs_exit(void) +{ + pci_unregister_driver(&mvs_pci_driver); + sas_release_transport(mvs_stt); +} + +static struct attribute *mvst_host_attrs[] = { + &dev_attr_driver_version.attr, + &dev_attr_interrupt_coalescing.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(mvst_host); + +module_init(mvs_init); +module_exit(mvs_exit); + +MODULE_AUTHOR("Jeff Garzik "); +MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +#ifdef CONFIG_PCI +MODULE_DEVICE_TABLE(pci, mvs_pci_table); +#endif diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c new file mode 100644 index 000000000..1444b1f1c --- /dev/null +++ b/drivers/scsi/mvsas/mv_sas.c @@ -0,0 +1,1933 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell 88SE64xx/88SE94xx main function + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + +#include "mv_sas.h" + +static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct mvs_slot_info *slot; + slot = task->lldd_task; + *tag = slot->slot_tag; + return 1; + } + return 0; +} + +static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +{ + void *bitmap = mvi->rsvd_tags; + clear_bit(tag, bitmap); +} + +static void mvs_tag_free(struct mvs_info *mvi, u32 tag) +{ + if (tag >= MVS_RSVD_SLOTS) + return; + + mvs_tag_clear(mvi, tag); +} + +static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +{ + void *bitmap = mvi->rsvd_tags; + set_bit(tag, bitmap); +} + +static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +{ + unsigned int index, tag; + void *bitmap = mvi->rsvd_tags; + + index = find_first_zero_bit(bitmap, MVS_RSVD_SLOTS); + tag = index; + if (tag >= MVS_RSVD_SLOTS) + return -SAS_QUEUE_FULL; + mvs_tag_set(mvi, tag); + *tag_out = tag; + return 0; +} + +static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) +{ + unsigned long i = 0, j = 0, hi = 0; + struct sas_ha_struct *sha = dev->port->ha; + struct mvs_info *mvi = NULL; + struct asd_sas_phy *phy; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + spin_lock(&sha->sas_port[i]->phy_list_lock); + phy = container_of(sha->sas_port[i]->phy_list.next, + struct asd_sas_phy, port_phy_el); + spin_unlock(&sha->sas_port[i]->phy_list_lock); + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + break; + } + i++; + } + hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; + + return mvi; + +} + +static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) +{ + unsigned long i = 0, j = 0, n = 0, num = 0; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + struct sas_ha_struct *sha = dev->port->ha; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + struct asd_sas_phy *phy; + + spin_lock(&sha->sas_port[i]->phy_list_lock); + list_for_each_entry(phy, + &sha->sas_port[i]->phy_list, port_phy_el) { + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + phyno[n] = (j >= mvi->chip->n_phy) ? + (j - mvi->chip->n_phy) : j; + num++; + n++; + } + spin_unlock(&sha->sas_port[i]->phy_list_lock); + break; + } + i++; + } + return num; +} + +struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, + u8 reg_set) +{ + u32 dev_no; + for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) { + if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) + continue; + + if (mvi->devices[dev_no].taskfileset == reg_set) + return &mvi->devices[dev_no]; + } + return NULL; +} + +static inline void mvs_free_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (!dev) { + mv_printk("device has been free.\n"); + return; + } + if (dev->taskfileset == MVS_ID_NOT_MAPPED) + return; + MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); +} + +static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (dev->taskfileset != MVS_ID_NOT_MAPPED) + return 0; + return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); +} + +void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) +{ + u32 no; + for_each_phy(phy_mask, phy_mask, no) { + if (!(phy_mask & 1)) + continue; + MVS_CHIP_DISP->phy_reset(mvi, no, hard); + } +} + +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + int rc = 0, phy_id = sas_phy->id; + u32 tmp, i = 0, hi; + struct sas_ha_struct *sha = sas_phy->ha; + struct mvs_info *mvi = NULL; + + while (sha->sas_phy[i]) { + if (sha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; + + switch (func) { + case PHY_FUNC_SET_LINK_RATE: + MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); + break; + + case PHY_FUNC_HARD_RESET: + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); + if (tmp & PHY_RST_HARD) + break; + MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); + break; + + case PHY_FUNC_LINK_RESET: + MVS_CHIP_DISP->phy_enable(mvi, phy_id); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); + break; + + case PHY_FUNC_DISABLE: + MVS_CHIP_DISP->phy_disable(mvi, phy_id); + break; + case PHY_FUNC_RELEASE_SPINUP_HOLD: + default: + rc = -ENOSYS; + } + msleep(200); + return rc; +} + +void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, + u32 off_hi, u64 sas_addr) +{ + u32 lo = (u32)sas_addr; + u32 hi = (u32)(sas_addr>>32); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); +} + +static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (!phy->phy_attached) + return; + + if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) + && phy->phy_type & PORT_TYPE_SAS) { + return; + } + + sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); + + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate = phy->minimum_linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate = phy->maximum_linkrate; + sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); + } + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + + /* direct attached SAS device */ + if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); + } + } else if (phy->phy_type & PORT_TYPE_SATA) { + /*Nothing*/ + } + mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + + sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); +} + +void mvs_scan_start(struct Scsi_Host *shost) +{ + int i, j; + unsigned short core_nr; + struct mvs_info *mvi; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct mvs_prv_info *mvs_prv = sha->lldd_ha; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + + for (j = 0; j < core_nr; j++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; + for (i = 0; i < mvi->chip->n_phy; ++i) + mvs_bytes_dmaed(mvi, i, GFP_KERNEL); + } + mvs_prv->scan_finished = 1; +} + +int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct mvs_prv_info *mvs_prv = sha->lldd_ha; + + if (mvs_prv->scan_finished == 0) + return 0; + + sas_drain_work(sha); + return 1; +} + +static int mvs_task_prep_smp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + int elem, rc, i; + struct sas_ha_struct *sha = mvi->sas; + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct domain_device *dev = task->dev; + struct asd_sas_port *sas_port = dev->port; + struct sas_phy *sphy = dev->phy; + struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len, tag = tei->tag; + void *buf_tmp; + u8 *buf_oaf; + dma_addr_t buf_tmp_dma; + void *buf_prd; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); + + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = SB_RFB_MAX; + + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ + buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table *********************************** */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; + + /* + * Fill in TX ring and command slot header + */ + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | + TXQ_MODE_I | tag | + (MVS_PHY_ID << TXQ_PHY_SHIFT)); + + hdr->flags |= flags; + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); + hdr->tags = cpu_to_le32(tag); + hdr->data_len = 0; + + /* generate open address frame hdr (first 12 bytes) */ + /* initiator, SMP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; + buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; + *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); + + return 0; + +err_out_2: + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out: + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + +static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ || + qc->tf.command == ATA_CMD_FPDMA_RECV || + qc->tf.command == ATA_CMD_FPDMA_SEND || + qc->tf.command == ATA_CMD_NCQ_NON_DATA) { + *tag = qc->tag; + return 1; + } + } + + return 0; +} + +static int mvs_task_prep_ata(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + struct sas_task *task = tei->task; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct asd_sas_port *sas_port = dev->port; + struct mvs_slot_info *slot; + void *buf_prd; + u32 tag = tei->tag, hdr_tag; + u32 flags, del_q; + void *buf_tmp; + u8 *buf_cmd, *buf_oaf; + dma_addr_t buf_tmp_dma; + u32 i, req_len, resp_len; + const u32 max_resp_len = SB_RFB_MAX; + + if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { + mv_dprintk("Have not enough regiset for dev %d.\n", + mvi_dev->device_id); + return -EBUSY; + } + slot = &mvi->slot_info[tag]; + slot->tx = mvi->tx_prod; + del_q = TXQ_MODE_I | tag | + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | + ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | + (mvi_dev->taskfileset << TXQ_SRS_SHIFT); + mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); + + if (task->data_dir == DMA_FROM_DEVICE) + flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); + else + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); + + if (task->ata_task.use_ncq) + flags |= MCH_FPDMA; + if (dev->sata_dev.class == ATA_DEV_ATAPI) { + if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) + flags |= MCH_ATAPI; + } + + hdr->flags = cpu_to_le32(flags); + + if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + else + hdr_tag = tag; + + hdr->tags = cpu_to_le32(hdr_tag); + + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_ATA_CMD_SZ; + buf_tmp_dma += MVS_ATA_CMD_SZ; + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + /* used for STP. unused for SATA? */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); + + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; + + req_len = sizeof(struct host_to_dev_fis); + resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - + sizeof(struct mvs_err_info) - i; + + /* request, response lengths */ + resp_len = min(resp_len, max_resp_len); + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS and ATAPI CDB */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + if (dev->sata_dev.class == ATA_DEV_ATAPI) + memcpy(buf_cmd + STP_ATAPI_CMD, + task->ata_task.atapi_packet, 16); + + /* generate open address frame hdr (first 12 bytes) */ + /* initiator, STP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; + buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); + + if (task->data_dir == DMA_FROM_DEVICE) + MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, + TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); + + return 0; +} + +static int mvs_task_prep_ssp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei, int is_tmf, + struct sas_tmf_task *tmf) +{ + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct mvs_port *port = tei->port; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct asd_sas_port *sas_port = dev->port; + struct mvs_slot_info *slot; + void *buf_prd; + struct ssp_frame_hdr *ssp_hdr; + void *buf_tmp; + u8 *buf_cmd, *buf_oaf; + dma_addr_t buf_tmp_dma; + u32 flags; + u32 resp_len, req_len, i, tag = tei->tag; + const u32 max_resp_len = SB_RFB_MAX; + u32 phy_mask; + + slot = &mvi->slot_info[tag]; + + phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : + sas_port->phy_mask) & TXQ_PHY_MASK; + + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | + (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | + (phy_mask << TXQ_PHY_SHIFT)); + + flags = MCH_RETRY; + if (is_tmf) + flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); + else + flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); + + hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); + hdr->tags = cpu_to_le32(tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_SSP_CMD_SZ; + buf_tmp_dma += MVS_SSP_CMD_SZ; + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; + + resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - + sizeof(struct mvs_err_info) - i; + resp_len = min(resp_len, max_resp_len); + + req_len = sizeof(struct ssp_frame_hdr) + 28; + + /* request, response lengths */ + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + /* generate open address frame hdr (first 12 bytes) */ + /* initiator, SSP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; + buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in SSP frame header (Command Table.SSP frame header) */ + ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; + + if (is_tmf) + ssp_hdr->frame_type = SSP_TASK; + else + ssp_hdr->frame_type = SSP_COMMAND; + + memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + memcpy(ssp_hdr->hashed_src_addr, + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + ssp_hdr->tag = cpu_to_be16(tag); + + /* fill in IU for TASK and Command Frame */ + buf_cmd += sizeof(*ssp_hdr); + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + + if (ssp_hdr->frame_type != SSP_TASK) { + buf_cmd[9] = task->ssp_task.task_attr; + memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + } else{ + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); + return 0; +} + +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) +static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, + struct sas_tmf_task *tmf, int *pass) +{ + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct mvs_task_exec_info tei; + struct mvs_slot_info *slot; + u32 tag = 0xdeadbeef, n_elem = 0; + struct request *rq; + int rc = 0; + + if (!dev->port) { + struct task_status_struct *tsm = &task->task_status; + + tsm->resp = SAS_TASK_UNDELIVERED; + tsm->stat = SAS_PHY_DOWN; + /* + * libsas will use dev->port, should + * not call task_done for sata + */ + if (dev->dev_type != SAS_SATA_DEV) + task->task_done(task); + return rc; + } + + if (DEV_IS_GONE(mvi_dev)) { + if (mvi_dev) + mv_dprintk("device %d not ready.\n", + mvi_dev->device_id); + else + mv_dprintk("device %016llx not ready.\n", + SAS_ADDR(dev->sas_addr)); + + rc = SAS_PHY_DOWN; + return rc; + } + tei.port = dev->port->lldd_port; + if (tei.port && !tei.port->port_attached && !tmf) { + if (sas_protocol_ata(task->task_proto)) { + struct task_status_struct *ts = &task->task_status; + mv_dprintk("SATA/STP port %d does not attach" + "device.\n", dev->port->id); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + + task->task_done(task); + + } else { + struct task_status_struct *ts = &task->task_status; + mv_dprintk("SAS port %d does not attach" + "device.\n", dev->port->id); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + task->task_done(task); + } + return rc; + } + + if (!sas_protocol_ata(task->task_proto)) { + if (task->num_scatter) { + n_elem = dma_map_sg(mvi->dev, + task->scatter, + task->num_scatter, + task->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto prep_out; + } + } + } else { + n_elem = task->num_scatter; + } + + rq = sas_task_find_rq(task); + if (rq) { + tag = rq->tag + MVS_RSVD_SLOTS; + } else { + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + } + + slot = &mvi->slot_info[tag]; + + task->lldd_task = NULL; + slot->n_elem = n_elem; + slot->slot_tag = tag; + + slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); + if (!slot->buf) { + rc = -ENOMEM; + goto err_out_tag; + } + + tei.task = task; + tei.hdr = &mvi->slot[tag]; + tei.tag = tag; + tei.n_elem = n_elem; + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + rc = mvs_task_prep_smp(mvi, &tei); + break; + case SAS_PROTOCOL_SSP: + rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = mvs_task_prep_ata(mvi, &tei); + break; + default: + dev_printk(KERN_ERR, mvi->dev, + "unknown sas_task proto: 0x%x\n", + task->task_proto); + rc = -EINVAL; + break; + } + + if (rc) { + mv_dprintk("rc is %x\n", rc); + goto err_out_slot_buf; + } + slot->task = task; + slot->port = tei.port; + task->lldd_task = slot; + list_add_tail(&slot->entry, &tei.port->list); + + mvi_dev->running_req++; + ++(*pass); + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); + + return rc; + +err_out_slot_buf: + dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); +err_out_tag: + mvs_tag_free(mvi, tag); +err_out: + + dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); + if (!sas_protocol_ata(task->task_proto)) + if (n_elem) + dma_unmap_sg(mvi->dev, task->scatter, n_elem, + task->data_dir); +prep_out: + return rc; +} + +int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) +{ + struct mvs_info *mvi = NULL; + u32 rc = 0; + u32 pass = 0; + unsigned long flags = 0; + struct sas_tmf_task *tmf = task->tmf; + int is_tmf = !!task->tmf; + + mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; + + spin_lock_irqsave(&mvi->lock, flags); + rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); + if (rc) + dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); + + if (likely(pass)) + MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & + (MVS_CHIP_SLOT_SZ - 1)); + spin_unlock_irqrestore(&mvi->lock, flags); + + return rc; +} + +static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + mvs_tag_free(mvi, slot_idx); +} + +static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, + struct mvs_slot_info *slot, u32 slot_idx) +{ + if (!slot) + return; + if (!slot->task) + return; + if (!sas_protocol_ata(task->task_proto)) + if (slot->n_elem) + dma_unmap_sg(mvi->dev, task->scatter, + slot->n_elem, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); + dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + + if (slot->buf) { + dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); + slot->buf = NULL; + } + list_del_init(&slot->entry); + task->lldd_task = NULL; + slot->task = NULL; + slot->port = NULL; + slot->slot_tag = 0xFFFFFFFF; + mvs_slot_free(mvi, slot_idx); +} + +static void mvs_update_wideport(struct mvs_info *mvi, int phy_no) +{ + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct mvs_port *port = phy->port; + int j, no; + + for_each_phy(port->wide_port_phymap, j, no) { + if (j & 1) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, + port->wide_port_phymap); + } else { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, + 0); + } + } +} + +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port; + + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); + if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { + if (!port) + phy->phy_attached = 1; + return tmp; + } + + if (port) { + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap &= ~(1U << i); + if (!port->wide_port_phymap) + port->port_attached = 0; + mvs_update_wideport(mvi, i); + } else if (phy->phy_type & PORT_TYPE_SATA) + port->port_attached = 0; + phy->port = NULL; + phy->phy_attached = 0; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + } + return 0; +} + +static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) +{ + u32 *s = (u32 *) buf; + + if (!s) + return NULL; + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); + s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); + s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); + s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); + s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); + + if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) + s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); + + return s; +} + +static u32 mvs_is_sig_fis_received(u32 irq_status) +{ + return irq_status & PHYEV_SIG_FIS; +} + +static void mvs_sig_remove_timer(struct mvs_phy *phy) +{ + if (phy->timer.function) + del_timer(&phy->timer); + phy->timer.function = NULL; +} + +void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + + if (get_st) { + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); + phy->phy_status = mvs_is_phy_ready(mvi, i); + } + + if (phy->phy_status) { + int oob_done = 0; + struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; + + oob_done = MVS_CHIP_DISP->oob_done(mvi, i); + + MVS_CHIP_DISP->fix_phy_info(mvi, i, id); + if (phy->phy_type & PORT_TYPE_SATA) { + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; + if (mvs_is_sig_fis_received(phy->irq_status)) { + mvs_sig_remove_timer(phy); + phy->phy_attached = 1; + phy->att_dev_sas_addr = + i + mvi->id * mvi->chip->n_phy; + if (oob_done) + sas_phy->oob_mode = SATA_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct dev_to_host_fis); + mvs_get_d2h_reg(mvi, i, id); + } else { + u32 tmp; + dev_printk(KERN_DEBUG, mvi->dev, + "Phy%d : No sig fis\n", i); + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); + MVS_CHIP_DISP->write_port_irq_mask(mvi, i, + tmp | PHYEV_SIG_FIS); + phy->phy_attached = 0; + phy->phy_type &= ~PORT_TYPE_SATA; + goto out_done; + } + } else if (phy->phy_type & PORT_TYPE_SAS + || phy->att_dev_info & PORT_SSP_INIT_MASK) { + phy->phy_attached = 1; + phy->identify.device_type = + phy->att_dev_info & PORT_DEV_TYPE_MASK; + + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + if (oob_done) + sas_phy->oob_mode = SAS_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct sas_identify_frame); + } + memcpy(sas_phy->attached_sas_addr, + &phy->att_dev_sas_addr, SAS_ADDR_SIZE); + + if (MVS_CHIP_DISP->phy_work_around) + MVS_CHIP_DISP->phy_work_around(mvi, i); + } + mv_dprintk("phy %d attach dev info is %x\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); + mv_dprintk("phy %d attach sas addr is %llx\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); +out_done: + if (get_st) + MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); +} + +static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct mvs_info *mvi = NULL; int i = 0, hi; + struct mvs_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_port *port; + unsigned long flags = 0; + if (!sas_port) + return; + + while (sas_ha->sas_phy[i]) { + if (sas_ha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; + if (i >= mvi->chip->n_phy) + port = &mvi->port[i - mvi->chip->n_phy]; + else + port = &mvi->port[i]; + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + port->port_attached = 1; + phy->port = port; + sas_port->lldd_port = port; + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap = sas_port->phy_mask; + mv_printk("set wide port phy map %x\n", sas_port->phy_mask); + mvs_update_wideport(mvi, sas_phy->id); + + /* direct attached SAS device */ + if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); + } + } + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); +} + +static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) +{ + struct domain_device *dev; + struct mvs_phy *phy = sas_phy->lldd_phy; + struct mvs_info *mvi = phy->mvi; + struct asd_sas_port *port = sas_phy->port; + int phy_no = 0; + + while (phy != &mvi->phy[phy_no]) { + phy_no++; + if (phy_no >= MVS_MAX_PHYS) + return; + } + list_for_each_entry(dev, &port->dev_list, dev_list_node) + mvs_do_release_task(phy->mvi, phy_no, dev); + +} + + +void mvs_port_formed(struct asd_sas_phy *sas_phy) +{ + mvs_port_notify_formed(sas_phy, 1); +} + +void mvs_port_deformed(struct asd_sas_phy *sas_phy) +{ + mvs_port_notify_deformed(sas_phy, 1); +} + +static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) +{ + u32 dev; + for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { + if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { + mvi->devices[dev].device_id = dev; + return &mvi->devices[dev]; + } + } + + if (dev == MVS_MAX_DEVICES) + mv_printk("max support %d devices, ignore ..\n", + MVS_MAX_DEVICES); + + return NULL; +} + +static void mvs_free_dev(struct mvs_device *mvi_dev) +{ + u32 id = mvi_dev->device_id; + memset(mvi_dev, 0, sizeof(*mvi_dev)); + mvi_dev->device_id = id; + mvi_dev->dev_type = SAS_PHY_UNUSED; + mvi_dev->dev_status = MVS_DEV_NORMAL; + mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; +} + +static int mvs_dev_found_notify(struct domain_device *dev, int lock) +{ + unsigned long flags = 0; + int res = 0; + struct mvs_info *mvi = NULL; + struct domain_device *parent_dev = dev->parent; + struct mvs_device *mvi_device; + + mvi = mvs_find_dev_mvi(dev); + + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + + mvi_device = mvs_alloc_dev(mvi); + if (!mvi_device) { + res = -1; + goto found_out; + } + dev->lldd_dev = mvi_device; + mvi_device->dev_status = MVS_DEV_NORMAL; + mvi_device->dev_type = dev->dev_type; + mvi_device->mvi_info = mvi; + mvi_device->sas_device = dev; + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { + int phy_id; + + phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); + if (phy_id < 0) { + mv_printk("Error: no attached dev:%016llx" + "at ex:%016llx.\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + res = phy_id; + } else { + mvi_device->attached_phy = phy_id; + } + } + +found_out: + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); + return res; +} + +int mvs_dev_found(struct domain_device *dev) +{ + return mvs_dev_found_notify(dev, 1); +} + +static void mvs_dev_gone_notify(struct domain_device *dev) +{ + unsigned long flags = 0; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct mvs_info *mvi; + + if (!mvi_dev) { + mv_dprintk("found dev has gone.\n"); + return; + } + + mvi = mvi_dev->mvi_info; + + spin_lock_irqsave(&mvi->lock, flags); + + mv_dprintk("found dev[%d:%x] is gone.\n", + mvi_dev->device_id, mvi_dev->dev_type); + mvs_release_task(mvi, dev); + mvs_free_reg_set(mvi, mvi_dev); + mvs_free_dev(mvi_dev); + + dev->lldd_dev = NULL; + mvi_dev->sas_device = NULL; + + spin_unlock_irqrestore(&mvi->lock, flags); +} + + +void mvs_dev_gone(struct domain_device *dev) +{ + mvs_dev_gone_notify(dev); +} + +/* Standard mandates link reset for ATA (type 0) + and hard reset for SSP (type 1) , only for RECOVERY */ +static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) +{ + int rc; + struct sas_phy *phy = sas_get_local_phy(dev); + int reset_type = (dev->dev_type == SAS_SATA_DEV || + (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + rc = sas_phy_reset(phy, reset_type); + sas_put_local_phy(phy); + msleep(2000); + return rc; +} + +/* mandatory SAM-3 */ +int mvs_lu_reset(struct domain_device *dev, u8 *lun) +{ + unsigned long flags; + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_device * mvi_dev = dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + + mvi_dev->dev_status = MVS_DEV_EH; + rc = sas_lu_reset(dev, lun); + if (rc == TMF_RESP_FUNC_COMPLETE) { + spin_lock_irqsave(&mvi->lock, flags); + mvs_release_task(mvi, dev); + spin_unlock_irqrestore(&mvi->lock, flags); + } + /* If failed, fall-through I_T_Nexus reset */ + mv_printk("%s for device[%x]:rc= %d\n", __func__, + mvi_dev->device_id, rc); + return rc; +} + +int mvs_I_T_nexus_reset(struct domain_device *dev) +{ + unsigned long flags; + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + + if (mvi_dev->dev_status != MVS_DEV_EH) + return TMF_RESP_FUNC_COMPLETE; + else + mvi_dev->dev_status = MVS_DEV_NORMAL; + rc = mvs_debug_I_T_nexus_reset(dev); + mv_printk("%s for device[%x]:rc= %d\n", + __func__, mvi_dev->device_id, rc); + + spin_lock_irqsave(&mvi->lock, flags); + mvs_release_task(mvi, dev); + spin_unlock_irqrestore(&mvi->lock, flags); + + return rc; +} +/* optional SAM-3 */ +int mvs_query_task(struct sas_task *task) +{ + u32 tag; + int rc = TMF_RESP_FUNC_FAILED; + + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + + rc = sas_query_task(task, tag); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + break; + } + } + mv_printk("%s:rc= %d\n", __func__, rc); + return rc; +} + +/* mandatory SAM-3, still need free task/slot info */ +int mvs_abort_task(struct sas_task *task) +{ + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi; + int rc = TMF_RESP_FUNC_FAILED; + unsigned long flags; + u32 tag; + + if (!mvi_dev) { + mv_printk("Device has removed\n"); + return TMF_RESP_FUNC_FAILED; + } + + mvi = mvi_dev->mvi_info; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + mvi_dev->dev_status = MVS_DEV_EH; + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + mv_printk("No such tag in %s\n", __func__); + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + + rc = sas_abort_task(task, tag); + + /* if successful, clear the task and callback forwards.*/ + if (rc == TMF_RESP_FUNC_COMPLETE) { + u32 slot_no; + struct mvs_slot_info *slot; + + if (task->lldd_task) { + slot = task->lldd_task; + slot_no = (u32) (slot - mvi->slot_info); + spin_lock_irqsave(&mvi->lock, flags); + mvs_slot_complete(mvi, slot_no, 1); + spin_unlock_irqrestore(&mvi->lock, flags); + } + } + + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + if (SAS_SATA_DEV == dev->dev_type) { + struct mvs_slot_info *slot = task->lldd_task; + u32 slot_idx = (u32)(slot - mvi->slot_info); + mv_dprintk("mvs_abort_task() mvi=%p task=%p " + "slot=%p slot_idx=x%x\n", + mvi, task, slot, slot_idx); + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + mvs_slot_task_free(mvi, task, slot, slot_idx); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + + } +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + mv_printk("%s:rc= %d\n", __func__, rc); + return rc; +} + +static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx, int err) +{ + struct mvs_device *mvi_dev = task->dev->lldd_dev; + struct task_status_struct *tstat = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; + int stat = SAM_STAT_GOOD; + + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], + SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), + sizeof(struct dev_to_host_fis)); + tstat->buf_valid_size = sizeof(*resp); + if (unlikely(err)) { + if (unlikely(err & CMD_ISS_STPD)) + stat = SAS_OPEN_REJECT; + else + stat = SAS_PROTO_RESPONSE; + } + + return stat; +} + +static void mvs_set_sense(u8 *buffer, int len, int d_sense, + int key, int asc, int ascq) +{ + memset(buffer, 0, len); + + if (d_sense) { + /* Descriptor format */ + if (len < 4) { + mv_printk("Length %d of sense buffer too small to " + "fit sense %x:%x:%x", len, key, asc, ascq); + } + + buffer[0] = 0x72; /* Response Code */ + if (len > 1) + buffer[1] = key; /* Sense Key */ + if (len > 2) + buffer[2] = asc; /* ASC */ + if (len > 3) + buffer[3] = ascq; /* ASCQ */ + } else { + if (len < 14) { + mv_printk("Length %d of sense buffer too small to " + "fit sense %x:%x:%x", len, key, asc, ascq); + } + + buffer[0] = 0x70; /* Response Code */ + if (len > 2) + buffer[2] = key; /* Sense Key */ + if (len > 7) + buffer[7] = 0x0a; /* Additional Sense Length */ + if (len > 12) + buffer[12] = asc; /* ASC */ + if (len > 13) + buffer[13] = ascq; /* ASCQ */ + } + + return; +} + +static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, + u8 key, u8 asc, u8 asc_q) +{ + iu->datapres = SAS_DATAPRES_SENSE_DATA; + iu->response_data_len = 0; + iu->sense_data_len = 17; + iu->status = 02; + mvs_set_sense(iu->sense_data, 17, 0, + key, asc, asc_q); +} + +static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) +{ + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + int stat; + u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); + u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); + u32 tfs = 0; + enum mvs_port_type type = PORT_TYPE_SAS; + + if (err_dw0 & CMD_ISS_STPD) + MVS_CHIP_DISP->issue_stop(mvi, type, tfs); + + MVS_CHIP_DISP->command_active(mvi, slot_idx); + + stat = SAM_STAT_CHECK_CONDITION; + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + stat = SAS_ABORTED_TASK; + if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) { + struct ssp_response_iu *iu = slot->response + + sizeof(struct mvs_err_info); + mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01); + sas_ssp_task_response(mvi->dev, task, iu); + stat = SAM_STAT_CHECK_CONDITION; + } + if (err_dw1 & bit(31)) + mv_printk("reuse same slot, retry command.\n"); + break; + } + case SAS_PROTOCOL_SMP: + stat = SAM_STAT_CHECK_CONDITION; + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + task->ata_task.use_ncq = 0; + stat = SAS_PROTO_RESPONSE; + mvs_sata_done(mvi, task, slot_idx, err_dw0); + } + break; + default: + break; + } + + return stat; +} + +int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + struct sas_task *task = slot->task; + struct mvs_device *mvi_dev = NULL; + struct task_status_struct *tstat; + struct domain_device *dev; + u32 aborted; + + void *to; + enum exec_status sts; + + if (unlikely(!task || !task->lldd_task || !task->dev)) + return -1; + + tstat = &task->task_status; + dev = task->dev; + mvi_dev = dev->lldd_dev; + + spin_lock(&task->task_state_lock); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + /* race condition*/ + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; + spin_unlock(&task->task_state_lock); + + memset(tstat, 0, sizeof(*tstat)); + tstat->resp = SAS_TASK_COMPLETE; + + if (unlikely(aborted)) { + tstat->stat = SAS_ABORTED_TASK; + if (mvi_dev && mvi_dev->running_req) + mvi_dev->running_req--; + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); + + mvs_slot_task_free(mvi, task, slot, slot_idx); + return -1; + } + + /* when no device attaching, go ahead and complete by error handling*/ + if (unlikely(!mvi_dev || flags)) { + if (!mvi_dev) + mv_dprintk("port has not device.\n"); + tstat->stat = SAS_PHY_DOWN; + goto out; + } + + /* + * error info record present; slot->response is 32 bit aligned but may + * not be 64 bit aligned, so check for zero in two 32 bit reads + */ + if (unlikely((rx_desc & RXQ_ERR) + && (*((u32 *)slot->response) + || *(((u32 *)slot->response) + 1)))) { + mv_dprintk("port %d slot %d rx_desc %X has error info" + "%016llX.\n", slot->port->sas_port.id, slot_idx, + rx_desc, get_unaligned_le64(slot->response)); + tstat->stat = mvs_slot_err(mvi, task, slot_idx); + tstat->resp = SAS_TASK_COMPLETE; + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + /* hw says status == 0, datapres == 0 */ + if (rx_desc & RXQ_GOOD) { + tstat->stat = SAS_SAM_STAT_GOOD; + tstat->resp = SAS_TASK_COMPLETE; + } + /* response frame present */ + else if (rx_desc & RXQ_RSP) { + struct ssp_response_iu *iu = slot->response + + sizeof(struct mvs_err_info); + sas_ssp_task_response(mvi->dev, task, iu); + } else + tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + tstat->stat = SAS_SAM_STAT_GOOD; + to = kmap_atomic(sg_page(sg_resp)); + memcpy(to + sg_resp->offset, + slot->response + sizeof(struct mvs_err_info), + sg_dma_len(sg_resp)); + kunmap_atomic(to); + break; + } + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { + tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); + break; + } + + default: + tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; + break; + } + if (!slot->port->port_attached) { + mv_dprintk("port %d has removed.\n", slot->port->sas_port.id); + tstat->stat = SAS_PHY_DOWN; + } + + +out: + if (mvi_dev && mvi_dev->running_req) { + mvi_dev->running_req--; + if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) + mvs_free_reg_set(mvi, mvi_dev); + } + mvs_slot_task_free(mvi, task, slot, slot_idx); + sts = tstat->stat; + + spin_unlock(&mvi->lock); + if (task->task_done) + task->task_done(task); + + spin_lock(&mvi->lock); + + return sts; +} + +void mvs_do_release_task(struct mvs_info *mvi, + int phy_no, struct domain_device *dev) +{ + u32 slot_idx; + struct mvs_phy *phy; + struct mvs_port *port; + struct mvs_slot_info *slot, *slot2; + + phy = &mvi->phy[phy_no]; + port = phy->port; + if (!port) + return; + /* clean cmpl queue in case request is already finished */ + mvs_int_rx(mvi, false); + + + + list_for_each_entry_safe(slot, slot2, &port->list, entry) { + struct sas_task *task; + slot_idx = (u32) (slot - mvi->slot_info); + task = slot->task; + + if (dev && task->dev != dev) + continue; + + mv_printk("Release slot [%x] tag[%x], task [%p]:\n", + slot_idx, slot->slot_tag, task); + MVS_CHIP_DISP->command_active(mvi, slot_idx); + + mvs_slot_complete(mvi, slot_idx, 1); + } +} + +void mvs_release_task(struct mvs_info *mvi, + struct domain_device *dev) +{ + int i, phyno[WIDE_PORT_MAX_PHY], num; + num = mvs_find_dev_phyno(dev, phyno); + for (i = 0; i < num; i++) + mvs_do_release_task(mvi, phyno[i], dev); +} + +static void mvs_phy_disconnected(struct mvs_phy *phy) +{ + phy->phy_attached = 0; + phy->att_dev_info = 0; + phy->att_dev_sas_addr = 0; +} + +static void mvs_work_queue(struct work_struct *work) +{ + struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); + struct mvs_info *mvi = mwq->mvi; + unsigned long flags; + u32 phy_no = (unsigned long) mwq->data; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + spin_lock_irqsave(&mvi->lock, flags); + if (mwq->handler & PHY_PLUG_EVENT) { + + if (phy->phy_event & PHY_PLUG_OUT) { + u32 tmp; + + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); + phy->phy_event &= ~PHY_PLUG_OUT; + if (!(tmp & PHY_READY_MASK)) { + sas_phy_disconnected(sas_phy); + mvs_phy_disconnected(phy); + sas_notify_phy_event(sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); + mv_dprintk("phy%d Removed Device\n", phy_no); + } else { + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + mvs_update_phyinfo(mvi, phy_no, 1); + mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); + mvs_port_notify_formed(sas_phy, 0); + mv_dprintk("phy%d Attached Device\n", phy_no); + } + } + } else if (mwq->handler & EXP_BRCT_CHG) { + phy->phy_event &= ~EXP_BRCT_CHG; + sas_notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD, GFP_ATOMIC); + mv_dprintk("phy%d Got Broadcast Change\n", phy_no); + } + list_del(&mwq->entry); + spin_unlock_irqrestore(&mvi->lock, flags); + kfree(mwq); +} + +static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) +{ + struct mvs_wq *mwq; + int ret = 0; + + mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); + if (mwq) { + mwq->mvi = mvi; + mwq->data = data; + mwq->handler = handler; + MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); + list_add_tail(&mwq->entry, &mvi->wq_list); + schedule_delayed_work(&mwq->work_q, HZ * 2); + } else + ret = -ENOMEM; + + return ret; +} + +static void mvs_sig_time_out(struct timer_list *t) +{ + struct mvs_phy *phy = from_timer(phy, t, timer); + struct mvs_info *mvi = phy->mvi; + u8 phy_no; + + for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { + if (&mvi->phy[phy_no] == phy) { + mv_dprintk("Get signature time out, reset phy %d\n", + phy_no+mvi->id*mvi->chip->n_phy); + MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); + } + } +} + +void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[phy_no]; + + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); + MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); + mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, + MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); + mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, + phy->irq_status); + + /* + * events is port event now , + * we need check the interrupt status which belongs to per port. + */ + + if (phy->irq_status & PHYEV_DCDR_ERR) { + mv_dprintk("phy %d STP decoding error.\n", + phy_no + mvi->id*mvi->chip->n_phy); + } + + if (phy->irq_status & PHYEV_POOF) { + mdelay(500); + if (!(phy->phy_event & PHY_PLUG_OUT)) { + int dev_sata = phy->phy_type & PORT_TYPE_SATA; + int ready; + mvs_do_release_task(mvi, phy_no, NULL); + phy->phy_event |= PHY_PLUG_OUT; + MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1); + mvs_handle_event(mvi, + (void *)(unsigned long)phy_no, + PHY_PLUG_EVENT); + ready = mvs_is_phy_ready(mvi, phy_no); + if (ready || dev_sata) { + if (MVS_CHIP_DISP->stp_reset) + MVS_CHIP_DISP->stp_reset(mvi, + phy_no); + else + MVS_CHIP_DISP->phy_reset(mvi, + phy_no, MVS_SOFT_RESET); + return; + } + } + } + + if (phy->irq_status & PHYEV_COMWAKE) { + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); + MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, + tmp | PHYEV_SIG_FIS); + if (phy->timer.function == NULL) { + phy->timer.function = mvs_sig_time_out; + phy->timer.expires = jiffies + 5*HZ; + add_timer(&phy->timer); + } + } + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); + mv_dprintk("notify plug in on phy[%d]\n", phy_no); + if (phy->phy_status) { + mdelay(10); + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + if (phy->phy_type & PORT_TYPE_SATA) { + tmp = MVS_CHIP_DISP->read_port_irq_mask( + mvi, phy_no); + tmp &= ~PHYEV_SIG_FIS; + MVS_CHIP_DISP->write_port_irq_mask(mvi, + phy_no, tmp); + } + mvs_update_phyinfo(mvi, phy_no, 0); + if (phy->phy_type & PORT_TYPE_SAS) { + MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); + mdelay(10); + } + + mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); + /* whether driver is going to handle hot plug */ + if (phy->phy_event & PHY_PLUG_OUT) { + mvs_port_notify_formed(&phy->sas_phy, 0); + phy->phy_event &= ~PHY_PLUG_OUT; + } + } else { + mv_dprintk("plugin interrupt but phy%d is gone\n", + phy_no + mvi->id*mvi->chip->n_phy); + } + } else if (phy->irq_status & PHYEV_BROAD_CH) { + mv_dprintk("phy %d broadcast change.\n", + phy_no + mvi->id*mvi->chip->n_phy); + mvs_handle_event(mvi, (void *)(unsigned long)phy_no, + EXP_BRCT_CHG); + } +} + +int mvs_int_rx(struct mvs_info *mvi, bool self_clear) +{ + u32 rx_prod_idx, rx_desc; + bool attn = false; + + /* the first dword in the RX ring is special: it contains + * a mirror of the hardware's RX producer index, so that + * we don't have to stall the CPU reading that register. + * The actual RX ring is offset by one dword, due to this. + */ + rx_prod_idx = mvi->rx_cons; + mvi->rx_cons = le32_to_cpu(mvi->rx[0]); + if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ + return 0; + + /* The CMPL_Q may come late, read from register and try again + * note: if coalescing is enabled, + * it will need to read from register every time for sure + */ + if (unlikely(mvi->rx_cons == rx_prod_idx)) + mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; + + if (mvi->rx_cons == rx_prod_idx) + return 0; + + while (mvi->rx_cons != rx_prod_idx) { + /* increment our internal RX consumer pointer */ + rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); + rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); + + if (likely(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + if (rx_desc & RXQ_ATTN) { + attn = true; + } else if (rx_desc & RXQ_ERR) { + if (!(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + } else if (rx_desc & RXQ_SLOT_RESET) { + mvs_slot_free(mvi, rx_desc); + } + } + + if (attn && self_clear) + MVS_CHIP_DISP->int_full(mvi); + return 0; +} + +int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data) +{ + struct mvs_prv_info *mvs_prv = sha->lldd_ha; + struct mvs_info *mvi = mvs_prv->mvi[0]; + + if (MVS_CHIP_DISP->gpio_write) { + return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type, + reg_index, reg_count, write_data); + } + + return -ENOSYS; +} diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h new file mode 100644 index 000000000..68df771e2 --- /dev/null +++ b/drivers/scsi/mvsas/mv_sas.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell 88SE64xx/88SE94xx main function head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. +*/ + +#ifndef _MV_SAS_H_ +#define _MV_SAS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mv_defs.h" + +#define DRV_NAME "mvsas" +#define DRV_VERSION "0.8.16" +#define MVS_ID_NOT_MAPPED 0x7f +#define WIDE_PORT_MAX_PHY 4 +#define mv_printk(fmt, arg ...) \ + printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) +#ifdef MV_DEBUG +#define mv_dprintk(format, arg...) \ + printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg) +#else +#define mv_dprintk(format, arg...) no_printk(format, ## arg) +#endif +#define MV_MAX_U32 0xffffffff + +extern int interrupt_coalescing; +extern struct mvs_tgt_initiator mvs_tgt; +extern struct mvs_info *tgt_mvi; +extern const struct mvs_dispatch mvs_64xx_dispatch; +extern const struct mvs_dispatch mvs_94xx_dispatch; + +#define bit(n) ((u64)1 << n) + +#define for_each_phy(__lseq_mask, __mc, __lseq) \ + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ + (__mc) != 0 ; \ + (++__lseq), (__mc) >>= 1) + +#define MVS_PHY_ID (1U << sas_phy->id) +#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f) +#define UNASSOC_D2H_FIS(id) \ + ((void *) mvi->rx_fis + 0x100 * id) +#define SATA_RECEIVED_FIS_LIST(reg_set) \ + ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set) +#define SATA_RECEIVED_SDB_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58) +#define SATA_RECEIVED_D2H_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40) +#define SATA_RECEIVED_PIO_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20) +#define SATA_RECEIVED_DMA_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00) + +enum dev_status { + MVS_DEV_NORMAL = 0x0, + MVS_DEV_EH = 0x1, +}; + +enum dev_reset { + MVS_SOFT_RESET = 0, + MVS_HARD_RESET = 1, + MVS_PHY_TUNE = 2, +}; + +struct mvs_info; +struct mvs_prv_info; + +struct mvs_dispatch { + char *name; + int (*chip_init)(struct mvs_info *mvi); + int (*spi_init)(struct mvs_info *mvi); + int (*chip_ioremap)(struct mvs_info *mvi); + void (*chip_iounmap)(struct mvs_info *mvi); + irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat); + u32 (*isr_status)(struct mvs_info *mvi, int irq); + void (*interrupt_enable)(struct mvs_info *mvi); + void (*interrupt_disable)(struct mvs_info *mvi); + + u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port); + void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val); + + u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port); + void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val); + void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr); + + u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port); + void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val); + void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr); + + u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port); + void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val); + + u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); + void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); + + void (*command_active)(struct mvs_info *mvi, u32 slot_idx); + void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all); + void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs); + void (*start_delivery)(struct mvs_info *mvi, u32 tx); + u32 (*rx_update)(struct mvs_info *mvi); + void (*int_full)(struct mvs_info *mvi); + u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs); + void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs); + u32 (*prd_size)(void); + u32 (*prd_count)(void); + void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); + void (*detect_porttype)(struct mvs_info *mvi, int i); + int (*oob_done)(struct mvs_info *mvi, int i); + void (*fix_phy_info)(struct mvs_info *mvi, int i, + struct sas_identify_frame *id); + void (*phy_work_around)(struct mvs_info *mvi, int i); + void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates); + u32 (*phy_max_link_rate)(void); + void (*phy_disable)(struct mvs_info *mvi, u32 phy_id); + void (*phy_enable)(struct mvs_info *mvi, u32 phy_id); + void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard); + void (*stp_reset)(struct mvs_info *mvi, u32 phy_id); + void (*clear_active_cmds)(struct mvs_info *mvi); + u32 (*spi_read_data)(struct mvs_info *mvi); + void (*spi_write_data)(struct mvs_info *mvi, u32 data); + int (*spi_buildcmd)(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ); + int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); + int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); + void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask, + int buf_len, int from, void *prd); + void (*tune_interrupt)(struct mvs_info *mvi, u32 time); + void (*non_spec_ncq_error)(struct mvs_info *mvi); + int (*gpio_write)(struct mvs_prv_info *mvs_prv, u8 reg_type, + u8 reg_index, u8 reg_count, u8 *write_data); + +}; + +struct mvs_chip_info { + u32 n_host; + u32 n_phy; + u32 fis_offs; + u32 fis_count; + u32 srs_sz; + u32 sg_width; + u32 slot_width; + const struct mvs_dispatch *dispatch; +}; +#define MVS_MAX_SG (1U << mvi->chip->sg_width) +#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) +#define MVS_RX_FISL_SZ \ + (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) +#define MVS_CHIP_DISP (mvi->chip->dispatch) + +struct mvs_err_info { + __le32 flags; + __le32 flags2; +}; + +struct mvs_cmd_hdr { + __le32 flags; /* PRD tbl len; SAS, SATA ctl */ + __le32 lens; /* cmd, max resp frame len */ + __le32 tags; /* targ port xfer tag; tag */ + __le32 data_len; /* data xfer len */ + __le64 cmd_tbl; /* command table address */ + __le64 open_frame; /* open addr frame address */ + __le64 status_buf; /* status buffer address */ + __le64 prd_tbl; /* PRD tbl address */ + __le32 reserved[4]; +}; + +struct mvs_port { + struct asd_sas_port sas_port; + u8 port_attached; + u8 wide_port_phymap; + struct list_head list; +}; + +struct mvs_phy { + struct mvs_info *mvi; + struct mvs_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + struct timer_list timer; + u64 dev_sas_addr; + u64 att_dev_sas_addr; + u32 att_dev_info; + u32 dev_info; + u32 phy_type; + u32 phy_status; + u32 irq_status; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + u8 phy_mode; + u8 reserved[2]; + u32 phy_event; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; +}; + +struct mvs_device { + struct list_head dev_entry; + enum sas_device_type dev_type; + struct mvs_info *mvi_info; + struct domain_device *sas_device; + u32 attached_phy; + u32 device_id; + u32 running_req; + u8 taskfileset; + u8 dev_status; + u16 reserved; +}; + +/* Generate PHY tunning parameters */ +struct phy_tuning { + /* 1 bit, transmitter emphasis enable */ + u8 trans_emp_en:1; + /* 4 bits, transmitter emphasis amplitude */ + u8 trans_emp_amp:4; + /* 3 bits, reserved space */ + u8 Reserved_2bit_1:3; + /* 5 bits, transmitter amplitude */ + u8 trans_amp:5; + /* 2 bits, transmitter amplitude adjust */ + u8 trans_amp_adj:2; + /* 1 bit, reserved space */ + u8 resv_2bit_2:1; + /* 2 bytes, reserved space */ + u8 reserved[2]; +}; + +struct ffe_control { + /* 4 bits, FFE Capacitor Select (value range 0~F) */ + u8 ffe_cap_sel:4; + /* 3 bits, FFE Resistor Select (value range 0~7) */ + u8 ffe_rss_sel:3; + /* 1 bit reserve*/ + u8 reserved:1; +}; + +/* + * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes. + * The data area is valid only Signature="MRVL". + * If any member fills with 0xFF, the member is invalid. + */ +struct hba_info_page { + /* Dword 0 */ + /* 4 bytes, structure signature,should be "MRVL" at first initial */ + u8 signature[4]; + + /* Dword 1-13 */ + u32 reserved1[13]; + + /* Dword 14-29 */ + /* 64 bytes, SAS address for each port */ + u64 sas_addr[8]; + + /* Dword 30-31 */ + /* 8 bytes for vanir 8 port PHY FFE seeting + * BIT 0~3 : FFE Capacitor select(value range 0~F) + * BIT 4~6 : FFE Resistor select(value range 0~7) + * BIT 7: reserve. + */ + + struct ffe_control ffe_ctl[8]; + /* Dword 32 -43 */ + u32 reserved2[12]; + + /* Dword 44-45 */ + /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */ + u8 phy_rate[8]; + + /* Dword 46-53 */ + /* 32 bytes, PHY tuning parameters for each PHY*/ + struct phy_tuning phy_tuning[8]; + + /* Dword 54-63 */ + u32 reserved3[10]; +}; /* total 256 bytes */ + +struct mvs_slot_info { + struct list_head entry; + union { + struct sas_task *task; + void *tdata; + }; + u32 n_elem; + u32 tx; + u32 slot_tag; + + /* DMA buffer for storing cmd tbl, open addr frame, status buffer, + * and PRD table + */ + void *buf; + dma_addr_t buf_dma; + void *response; + struct mvs_port *port; + struct mvs_device *device; + void *open_frame; +}; + +struct mvs_info { + unsigned long flags; + + /* host-wide lock */ + spinlock_t lock; + + /* our device */ + struct pci_dev *pdev; + struct device *dev; + + /* enhanced mode registers */ + void __iomem *regs; + + /* peripheral or soc registers */ + void __iomem *regs_ex; + u8 sas_addr[SAS_ADDR_SIZE]; + + /* SCSI/SAS glue */ + struct sas_ha_struct *sas; + struct Scsi_Host *shost; + + /* TX (delivery) DMA ring */ + __le32 *tx; + dma_addr_t tx_dma; + + /* cached next-producer idx */ + u32 tx_prod; + + /* RX (completion) DMA ring */ + __le32 *rx; + dma_addr_t rx_dma; + + /* RX consumer idx */ + u32 rx_cons; + + /* RX'd FIS area */ + __le32 *rx_fis; + dma_addr_t rx_fis_dma; + + /* DMA command header slots */ + struct mvs_cmd_hdr *slot; + dma_addr_t slot_dma; + + u32 chip_id; + const struct mvs_chip_info *chip; + + unsigned long *rsvd_tags; + /* further per-slot information */ + struct mvs_phy phy[MVS_MAX_PHYS]; + struct mvs_port port[MVS_MAX_PHYS]; + u32 id; + u64 sata_reg_set; + struct list_head *hba_list; + struct list_head soc_entry; + struct list_head wq_list; + unsigned long instance; + u16 flashid; + u32 flashsize; + u32 flashsectSize; + + void *addon; + struct hba_info_page hba_info_param; + struct mvs_device devices[MVS_MAX_DEVICES]; + void *bulk_buffer; + dma_addr_t bulk_buffer_dma; + void *bulk_buffer1; + dma_addr_t bulk_buffer_dma1; +#define TRASH_BUCKET_SIZE 0x20000 + void *dma_pool; + struct mvs_slot_info slot_info[]; +}; + +struct mvs_prv_info{ + u8 n_host; + u8 n_phy; + u8 scan_finished; + u8 reserve; + struct mvs_info *mvi[2]; + struct tasklet_struct mv_tasklet; +}; + +struct mvs_wq { + struct delayed_work work_q; + struct mvs_info *mvi; + void *data; + int handler; + struct list_head entry; +}; + +struct mvs_task_exec_info { + struct sas_task *task; + struct mvs_cmd_hdr *hdr; + struct mvs_port *port; + u32 tag; + int n_elem; +}; + +/******************** function prototype *********************/ +void mvs_get_sas_addr(void *buf, u32 buflen); +void mvs_iounmap(void __iomem *regs); +int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); +void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, + u32 off_hi, u64 sas_addr); +void mvs_scan_start(struct Scsi_Host *shost); +int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); +int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags); +int mvs_abort_task(struct sas_task *task); +void mvs_port_formed(struct asd_sas_phy *sas_phy); +void mvs_port_deformed(struct asd_sas_phy *sas_phy); +int mvs_dev_found(struct domain_device *dev); +void mvs_dev_gone(struct domain_device *dev); +int mvs_lu_reset(struct domain_device *dev, u8 *lun); +int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags); +int mvs_I_T_nexus_reset(struct domain_device *dev); +int mvs_query_task(struct sas_task *task); +void mvs_release_task(struct mvs_info *mvi, + struct domain_device *dev); +void mvs_do_release_task(struct mvs_info *mvi, int phy_no, + struct domain_device *dev); +void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); +void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); +int mvs_int_rx(struct mvs_info *mvi, bool self_clear); +struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set); +int mvs_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data); +#endif + diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c new file mode 100644 index 000000000..d9d366ec1 --- /dev/null +++ b/drivers/scsi/mvumi.c @@ -0,0 +1,2632 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell UMI driver + * + * Copyright 2011 Marvell. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mvumi.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("jyli@marvell.com"); +MODULE_DESCRIPTION("Marvell UMI Driver"); + +static const struct pci_device_id mvumi_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, mvumi_pci_table); + +static void tag_init(struct mvumi_tag *st, unsigned short size) +{ + unsigned short i; + BUG_ON(size != st->size); + st->top = size; + for (i = 0; i < size; i++) + st->stack[i] = size - 1 - i; +} + +static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) +{ + BUG_ON(st->top <= 0); + return st->stack[--st->top]; +} + +static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, + unsigned short tag) +{ + BUG_ON(st->top >= st->size); + st->stack[st->top++] = tag; +} + +static bool tag_is_empty(struct mvumi_tag *st) +{ + if (st->top == 0) + return true; + else + return false; +} + +static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array) +{ + int i; + + for (i = 0; i < MAX_BASE_ADDRESS; i++) + if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) && + addr_array[i]) + pci_iounmap(dev, addr_array[i]); +} + +static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) +{ + int i; + + for (i = 0; i < MAX_BASE_ADDRESS; i++) { + if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + addr_array[i] = pci_iomap(dev, i, 0); + if (!addr_array[i]) { + dev_err(&dev->dev, "failed to map Bar[%d]\n", + i); + mvumi_unmap_pci_addr(dev, addr_array); + return -ENOMEM; + } + } else + addr_array[i] = NULL; + + dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]); + } + + return 0; +} + +static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, + enum resource_type type, unsigned int size) +{ + struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC); + + if (!res) { + dev_err(&mhba->pdev->dev, + "Failed to allocate memory for resource manager.\n"); + return NULL; + } + + switch (type) { + case RESOURCE_CACHED_MEMORY: + res->virt_addr = kzalloc(size, GFP_ATOMIC); + if (!res->virt_addr) { + dev_err(&mhba->pdev->dev, + "unable to allocate memory,size = %d.\n", size); + kfree(res); + return NULL; + } + break; + + case RESOURCE_UNCACHED_MEMORY: + size = round_up(size, 8); + res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, + &res->bus_addr, + GFP_KERNEL); + if (!res->virt_addr) { + dev_err(&mhba->pdev->dev, + "unable to allocate consistent mem," + "size = %d.\n", size); + kfree(res); + return NULL; + } + break; + + default: + dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); + kfree(res); + return NULL; + } + + res->type = type; + res->size = size; + INIT_LIST_HEAD(&res->entry); + list_add_tail(&res->entry, &mhba->res_list); + + return res; +} + +static void mvumi_release_mem_resource(struct mvumi_hba *mhba) +{ + struct mvumi_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { + switch (res->type) { + case RESOURCE_UNCACHED_MEMORY: + dma_free_coherent(&mhba->pdev->dev, res->size, + res->virt_addr, res->bus_addr); + break; + case RESOURCE_CACHED_MEMORY: + kfree(res->virt_addr); + break; + default: + dev_err(&mhba->pdev->dev, + "unknown resource type %d\n", res->type); + break; + } + list_del(&res->entry); + kfree(res); + } + mhba->fw_flag &= ~MVUMI_FW_ALLOC; +} + +/** + * mvumi_make_sgl - Prepares SGL + * @mhba: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sgl_p: SGL to be filled in + * @sg_count: return the number of SG elements + * + * If successful, this function returns 0. otherwise, it returns -1. + */ +static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, + void *sgl_p, unsigned char *sg_count) +{ + struct scatterlist *sg; + struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p; + unsigned int i; + unsigned int sgnum = scsi_sg_count(scmd); + dma_addr_t busaddr; + + *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, + scmd->sc_data_direction); + if (*sg_count > mhba->max_sge) { + dev_err(&mhba->pdev->dev, + "sg count[0x%x] is bigger than max sg[0x%x].\n", + *sg_count, mhba->max_sge); + dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, + scmd->sc_data_direction); + return -1; + } + scsi_for_each_sg(scmd, sg, *sg_count, i) { + busaddr = sg_dma_address(sg); + m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); + m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); + m_sg->flags = 0; + sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg))); + if ((i + 1) == *sg_count) + m_sg->flags |= 1U << mhba->eot_flag; + + sgd_inc(mhba, m_sg); + } + + return 0; +} + +static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, + unsigned int size) +{ + struct mvumi_sgl *m_sg; + void *virt_addr; + dma_addr_t phy_addr; + + if (size == 0) + return 0; + + virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, + GFP_KERNEL); + if (!virt_addr) + return -1; + + m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; + cmd->frame->sg_counts = 1; + cmd->data_buf = virt_addr; + + m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); + m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); + m_sg->flags = 1U << mhba->eot_flag; + sgd_setsz(mhba, m_sg, cpu_to_le32(size)); + + return 0; +} + +static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, + unsigned int buf_size) +{ + struct mvumi_cmd *cmd; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); + return NULL; + } + INIT_LIST_HEAD(&cmd->queue_pointer); + + cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size, + &cmd->frame_phys, GFP_KERNEL); + if (!cmd->frame) { + dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" + " frame,size = %d.\n", mhba->ib_max_size); + kfree(cmd); + return NULL; + } + + if (buf_size) { + if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { + dev_err(&mhba->pdev->dev, "failed to allocate memory" + " for internal frame\n"); + dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, + cmd->frame, cmd->frame_phys); + kfree(cmd); + return NULL; + } + } else + cmd->frame->sg_counts = 0; + + return cmd; +} + +static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + struct mvumi_sgl *m_sg; + unsigned int size; + dma_addr_t phy_addr; + + if (cmd && cmd->frame) { + if (cmd->frame->sg_counts) { + m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; + sgd_getsz(mhba, m_sg, size); + + phy_addr = (dma_addr_t) m_sg->baseaddr_l | + (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); + + dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf, + phy_addr); + } + dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, + cmd->frame, cmd->frame_phys); + kfree(cmd); + } +} + +/** + * mvumi_get_cmd - Get a command from the free pool + * @mhba: Adapter soft state + * + * Returns a free command from the pool + */ +static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) +{ + struct mvumi_cmd *cmd = NULL; + + if (likely(!list_empty(&mhba->cmd_pool))) { + cmd = list_entry((&mhba->cmd_pool)->next, + struct mvumi_cmd, queue_pointer); + list_del_init(&cmd->queue_pointer); + } else + dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); + + return cmd; +} + +/** + * mvumi_return_cmd - Return a cmd to free command pool + * @mhba: Adapter soft state + * @cmd: Command packet to be returned to free command pool + */ +static inline void mvumi_return_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + cmd->scmd = NULL; + list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); +} + +/** + * mvumi_free_cmds - Free all the cmds in the free cmd pool + * @mhba: Adapter soft state + */ +static void mvumi_free_cmds(struct mvumi_hba *mhba) +{ + struct mvumi_cmd *cmd; + + while (!list_empty(&mhba->cmd_pool)) { + cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, + queue_pointer); + list_del(&cmd->queue_pointer); + if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) + kfree(cmd->frame); + kfree(cmd); + } +} + +/** + * mvumi_alloc_cmds - Allocates the command packets + * @mhba: Adapter soft state + * + */ +static int mvumi_alloc_cmds(struct mvumi_hba *mhba) +{ + int i; + struct mvumi_cmd *cmd; + + for (i = 0; i < mhba->max_io; i++) { + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + goto err_exit; + + INIT_LIST_HEAD(&cmd->queue_pointer); + list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); + if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { + cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; + cmd->frame_phys = mhba->ib_frame_phys + + i * mhba->ib_max_size; + } else + cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); + if (!cmd->frame) + goto err_exit; + } + return 0; + +err_exit: + dev_err(&mhba->pdev->dev, + "failed to allocate memory for cmd[0x%x].\n", i); + while (!list_empty(&mhba->cmd_pool)) { + cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, + queue_pointer); + list_del(&cmd->queue_pointer); + if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) + kfree(cmd->frame); + kfree(cmd); + } + return -ENOMEM; +} + +static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) +{ + unsigned int ib_rp_reg; + struct mvumi_hw_regs *regs = mhba->regs; + + ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); + + if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) == + (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && + ((ib_rp_reg & regs->cl_pointer_toggle) + != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { + dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); + return 0; + } + if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { + dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); + return 0; + } else { + return mhba->max_io - atomic_read(&mhba->fw_outstanding); + } +} + +static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) +{ + unsigned int count; + if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) + return 0; + count = ioread32(mhba->ib_shadow); + if (count == 0xffff) + return 0; + return count; +} + +static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) +{ + unsigned int cur_ib_entry; + + cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; + cur_ib_entry++; + if (cur_ib_entry >= mhba->list_num_io) { + cur_ib_entry -= mhba->list_num_io; + mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; + } + mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; + mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); + if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { + *ib_entry = mhba->ib_list + cur_ib_entry * + sizeof(struct mvumi_dyn_list_entry); + } else { + *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; + } + atomic_inc(&mhba->fw_outstanding); +} + +static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) +{ + iowrite32(0xffff, mhba->ib_shadow); + iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); +} + +static char mvumi_check_ob_frame(struct mvumi_hba *mhba, + unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame) +{ + unsigned short tag, request_id; + + udelay(1); + p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; + request_id = p_outb_frame->request_id; + tag = p_outb_frame->tag; + if (tag > mhba->tag_pool.size) { + dev_err(&mhba->pdev->dev, "ob frame data error\n"); + return -1; + } + if (mhba->tag_cmd[tag] == NULL) { + dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); + return -1; + } else if (mhba->tag_cmd[tag]->request_id != request_id && + mhba->request_id_enabled) { + dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," + "cmd request ID:0x%x\n", request_id, + mhba->tag_cmd[tag]->request_id); + return -1; + } + + return 0; +} + +static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, + unsigned int *cur_obf, unsigned int *assign_obf_end) +{ + unsigned int ob_write, ob_write_shadow; + struct mvumi_hw_regs *regs = mhba->regs; + + do { + ob_write = ioread32(regs->outb_copy_pointer); + ob_write_shadow = ioread32(mhba->ob_shadow); + } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow); + + *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; + *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; + + if ((ob_write & regs->cl_pointer_toggle) != + (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { + *assign_obf_end += mhba->list_num_io; + } + return 0; +} + +static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, + unsigned int *cur_obf, unsigned int *assign_obf_end) +{ + unsigned int ob_write; + struct mvumi_hw_regs *regs = mhba->regs; + + ob_write = ioread32(regs->outb_read_pointer); + ob_write = ioread32(regs->outb_copy_pointer); + *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; + *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; + if (*assign_obf_end < *cur_obf) + *assign_obf_end += mhba->list_num_io; + else if (*assign_obf_end == *cur_obf) + return -1; + return 0; +} + +static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) +{ + unsigned int cur_obf, assign_obf_end, i; + struct mvumi_ob_data *ob_data; + struct mvumi_rsp_frame *p_outb_frame; + struct mvumi_hw_regs *regs = mhba->regs; + + if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) + return; + + for (i = (assign_obf_end - cur_obf); i != 0; i--) { + cur_obf++; + if (cur_obf >= mhba->list_num_io) { + cur_obf -= mhba->list_num_io; + mhba->ob_cur_slot ^= regs->cl_pointer_toggle; + } + + p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; + + /* Copy pointer may point to entry in outbound list + * before entry has valid data + */ + if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || + mhba->tag_cmd[p_outb_frame->tag] == NULL || + p_outb_frame->request_id != + mhba->tag_cmd[p_outb_frame->tag]->request_id)) + if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) + continue; + + if (!list_empty(&mhba->ob_data_list)) { + ob_data = (struct mvumi_ob_data *) + list_first_entry(&mhba->ob_data_list, + struct mvumi_ob_data, list); + list_del_init(&ob_data->list); + } else { + ob_data = NULL; + if (cur_obf == 0) { + cur_obf = mhba->list_num_io - 1; + mhba->ob_cur_slot ^= regs->cl_pointer_toggle; + } else + cur_obf -= 1; + break; + } + + memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); + p_outb_frame->tag = 0xff; + + list_add_tail(&ob_data->list, &mhba->free_ob_list); + } + mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; + mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); + iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); +} + +static void mvumi_reset(struct mvumi_hba *mhba) +{ + struct mvumi_hw_regs *regs = mhba->regs; + + iowrite32(0, regs->enpointa_mask_reg); + if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE) + return; + + iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg); +} + +static unsigned char mvumi_start(struct mvumi_hba *mhba); + +static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) +{ + mhba->fw_state = FW_STATE_ABORT; + mvumi_reset(mhba); + + if (mvumi_start(mhba)) + return FAILED; + else + return SUCCESS; +} + +static int mvumi_wait_for_fw(struct mvumi_hba *mhba) +{ + struct mvumi_hw_regs *regs = mhba->regs; + u32 tmp; + unsigned long before; + before = jiffies; + + iowrite32(0, regs->enpointa_mask_reg); + tmp = ioread32(regs->arm_to_pciea_msg1); + while (tmp != HANDSHAKE_READYSTATE) { + iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg); + if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { + dev_err(&mhba->pdev->dev, + "FW reset failed [0x%x].\n", tmp); + return FAILED; + } + + msleep(500); + rmb(); + tmp = ioread32(regs->arm_to_pciea_msg1); + } + + return SUCCESS; +} + +static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) +{ + unsigned char i; + + for (i = 0; i < MAX_BASE_ADDRESS; i++) { + pci_read_config_dword(mhba->pdev, 0x10 + i * 4, + &mhba->pci_base[i]); + } +} + +static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) +{ + unsigned char i; + + for (i = 0; i < MAX_BASE_ADDRESS; i++) { + if (mhba->pci_base[i]) + pci_write_config_dword(mhba->pdev, 0x10 + i * 4, + mhba->pci_base[i]); + } +} + +static int mvumi_pci_set_master(struct pci_dev *pdev) +{ + int ret = 0; + + pci_set_master(pdev); + + if (IS_DMA64) { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + } else + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + + return ret; +} + +static int mvumi_reset_host_9580(struct mvumi_hba *mhba) +{ + mhba->fw_state = FW_STATE_ABORT; + + iowrite32(0, mhba->regs->reset_enable); + iowrite32(0xf, mhba->regs->reset_request); + + iowrite32(0x10, mhba->regs->reset_enable); + iowrite32(0x10, mhba->regs->reset_request); + msleep(100); + pci_disable_device(mhba->pdev); + + if (pci_enable_device(mhba->pdev)) { + dev_err(&mhba->pdev->dev, "enable device failed\n"); + return FAILED; + } + if (mvumi_pci_set_master(mhba->pdev)) { + dev_err(&mhba->pdev->dev, "set master failed\n"); + return FAILED; + } + mvumi_restore_bar_addr(mhba); + if (mvumi_wait_for_fw(mhba) == FAILED) + return FAILED; + + return mvumi_wait_for_outstanding(mhba); +} + +static int mvumi_reset_host_9143(struct mvumi_hba *mhba) +{ + return mvumi_wait_for_outstanding(mhba); +} + +static int mvumi_host_reset(struct scsi_cmnd *scmd) +{ + struct mvumi_hba *mhba; + + mhba = (struct mvumi_hba *) scmd->device->host->hostdata; + + scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n", + scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries); + + return mhba->instancet->reset_host(mhba); +} + +static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + unsigned long flags; + + cmd->cmd_status = REQ_STATUS_PENDING; + + if (atomic_read(&cmd->sync_cmd)) { + dev_err(&mhba->pdev->dev, + "last blocked cmd not finished, sync_cmd = %d\n", + atomic_read(&cmd->sync_cmd)); + BUG_ON(1); + return -1; + } + atomic_inc(&cmd->sync_cmd); + spin_lock_irqsave(mhba->shost->host_lock, flags); + mhba->instancet->fire_cmd(mhba, cmd); + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + + wait_event_timeout(mhba->int_cmd_wait_q, + (cmd->cmd_status != REQ_STATUS_PENDING), + MVUMI_INTERNAL_CMD_WAIT_TIME * HZ); + + /* command timeout */ + if (atomic_read(&cmd->sync_cmd)) { + spin_lock_irqsave(mhba->shost->host_lock, flags); + atomic_dec(&cmd->sync_cmd); + if (mhba->tag_cmd[cmd->frame->tag]) { + mhba->tag_cmd[cmd->frame->tag] = NULL; + dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", + cmd->frame->tag); + tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); + } + if (!list_empty(&cmd->queue_pointer)) { + dev_warn(&mhba->pdev->dev, + "TIMEOUT:A internal command doesn't send!\n"); + list_del_init(&cmd->queue_pointer); + } else + atomic_dec(&mhba->fw_outstanding); + + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + } + return 0; +} + +static void mvumi_release_fw(struct mvumi_hba *mhba) +{ + mvumi_free_cmds(mhba); + mvumi_release_mem_resource(mhba); + mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); + dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, + mhba->handshake_page, mhba->handshake_page_phys); + kfree(mhba->regs); + pci_release_regions(mhba->pdev); +} + +static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) +{ + struct mvumi_cmd *cmd; + struct mvumi_msg_frame *frame; + unsigned char device_id, retry = 0; + unsigned char bitcount = sizeof(unsigned char) * 8; + + for (device_id = 0; device_id < mhba->max_target_id; device_id++) { + if (!(mhba->target_map[device_id / bitcount] & + (1 << (device_id % bitcount)))) + continue; +get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); + if (!cmd) { + if (retry++ >= 5) { + dev_err(&mhba->pdev->dev, "failed to get memory" + " for internal flush cache cmd for " + "device %d", device_id); + retry = 0; + continue; + } else + goto get_cmd; + } + cmd->scmd = NULL; + cmd->cmd_status = REQ_STATUS_PENDING; + atomic_set(&cmd->sync_cmd, 0); + frame = cmd->frame; + frame->req_function = CL_FUN_SCSI_CMD; + frame->device_id = device_id; + frame->cmd_flag = CMD_FLAG_NON_DATA; + frame->data_transfer_length = 0; + frame->cdb_length = MAX_COMMAND_SIZE; + memset(frame->cdb, 0, MAX_COMMAND_SIZE); + frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; + frame->cdb[1] = CDB_CORE_MODULE; + frame->cdb[2] = CDB_CORE_SHUTDOWN; + + mvumi_issue_blocked_cmd(mhba, cmd); + if (cmd->cmd_status != SAM_STAT_GOOD) { + dev_err(&mhba->pdev->dev, + "device %d flush cache failed, status=0x%x.\n", + device_id, cmd->cmd_status); + } + + mvumi_delete_internal_cmd(mhba, cmd); + } + return 0; +} + +static unsigned char +mvumi_calculate_checksum(struct mvumi_hs_header *p_header, + unsigned short len) +{ + unsigned char *ptr; + unsigned char ret = 0, i; + + ptr = (unsigned char *) p_header->frame_content; + for (i = 0; i < len; i++) { + ret ^= *ptr; + ptr++; + } + + return ret; +} + +static void mvumi_hs_build_page(struct mvumi_hba *mhba, + struct mvumi_hs_header *hs_header) +{ + struct mvumi_hs_page2 *hs_page2; + struct mvumi_hs_page4 *hs_page4; + struct mvumi_hs_page3 *hs_page3; + u64 time; + u64 local_time; + + switch (hs_header->page_code) { + case HS_PAGE_HOST_INFO: + hs_page2 = (struct mvumi_hs_page2 *) hs_header; + hs_header->frame_length = sizeof(*hs_page2) - 4; + memset(hs_header->frame_content, 0, hs_header->frame_length); + hs_page2->host_type = 3; /* 3 mean linux*/ + if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) + hs_page2->host_cap = 0x08;/* host dynamic source mode */ + hs_page2->host_ver.ver_major = VER_MAJOR; + hs_page2->host_ver.ver_minor = VER_MINOR; + hs_page2->host_ver.ver_oem = VER_OEM; + hs_page2->host_ver.ver_build = VER_BUILD; + hs_page2->system_io_bus = 0; + hs_page2->slot_number = 0; + hs_page2->intr_level = 0; + hs_page2->intr_vector = 0; + time = ktime_get_real_seconds(); + local_time = (time - (sys_tz.tz_minuteswest * 60)); + hs_page2->seconds_since1970 = local_time; + hs_header->checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + break; + + case HS_PAGE_FIRM_CTL: + hs_page3 = (struct mvumi_hs_page3 *) hs_header; + hs_header->frame_length = sizeof(*hs_page3) - 4; + memset(hs_header->frame_content, 0, hs_header->frame_length); + hs_header->checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + break; + + case HS_PAGE_CL_INFO: + hs_page4 = (struct mvumi_hs_page4 *) hs_header; + hs_header->frame_length = sizeof(*hs_page4) - 4; + memset(hs_header->frame_content, 0, hs_header->frame_length); + hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); + hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); + + hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); + hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); + hs_page4->ib_entry_size = mhba->ib_max_size_setting; + hs_page4->ob_entry_size = mhba->ob_max_size_setting; + if (mhba->hba_capability + & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) { + hs_page4->ob_depth = find_first_bit((unsigned long *) + &mhba->list_num_io, + BITS_PER_LONG); + hs_page4->ib_depth = find_first_bit((unsigned long *) + &mhba->list_num_io, + BITS_PER_LONG); + } else { + hs_page4->ob_depth = (u8) mhba->list_num_io; + hs_page4->ib_depth = (u8) mhba->list_num_io; + } + hs_header->checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + break; + + default: + dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", + hs_header->page_code); + break; + } +} + +/** + * mvumi_init_data - Initialize requested date for FW + * @mhba: Adapter soft state + */ +static int mvumi_init_data(struct mvumi_hba *mhba) +{ + struct mvumi_ob_data *ob_pool; + struct mvumi_res *res_mgnt; + unsigned int tmp_size, offset, i; + void *virmem, *v; + dma_addr_t p; + + if (mhba->fw_flag & MVUMI_FW_ALLOC) + return 0; + + tmp_size = mhba->ib_max_size * mhba->max_io; + if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) + tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; + + tmp_size += 128 + mhba->ob_max_size * mhba->max_io; + tmp_size += 8 + sizeof(u32)*2 + 16; + + res_mgnt = mvumi_alloc_mem_resource(mhba, + RESOURCE_UNCACHED_MEMORY, tmp_size); + if (!res_mgnt) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for inbound list\n"); + goto fail_alloc_dma_buf; + } + + p = res_mgnt->bus_addr; + v = res_mgnt->virt_addr; + /* ib_list */ + offset = round_up(p, 128) - p; + p += offset; + v += offset; + mhba->ib_list = v; + mhba->ib_list_phys = p; + if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { + v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; + p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; + mhba->ib_frame = v; + mhba->ib_frame_phys = p; + } + v += mhba->ib_max_size * mhba->max_io; + p += mhba->ib_max_size * mhba->max_io; + + /* ib shadow */ + offset = round_up(p, 8) - p; + p += offset; + v += offset; + mhba->ib_shadow = v; + mhba->ib_shadow_phys = p; + p += sizeof(u32)*2; + v += sizeof(u32)*2; + /* ob shadow */ + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { + offset = round_up(p, 8) - p; + p += offset; + v += offset; + mhba->ob_shadow = v; + mhba->ob_shadow_phys = p; + p += 8; + v += 8; + } else { + offset = round_up(p, 4) - p; + p += offset; + v += offset; + mhba->ob_shadow = v; + mhba->ob_shadow_phys = p; + p += 4; + v += 4; + } + + /* ob list */ + offset = round_up(p, 128) - p; + p += offset; + v += offset; + + mhba->ob_list = v; + mhba->ob_list_phys = p; + + /* ob data pool */ + tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); + tmp_size = round_up(tmp_size, 8); + + res_mgnt = mvumi_alloc_mem_resource(mhba, + RESOURCE_CACHED_MEMORY, tmp_size); + if (!res_mgnt) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for outbound data buffer\n"); + goto fail_alloc_dma_buf; + } + virmem = res_mgnt->virt_addr; + + for (i = mhba->max_io; i != 0; i--) { + ob_pool = (struct mvumi_ob_data *) virmem; + list_add_tail(&ob_pool->list, &mhba->ob_data_list); + virmem += mhba->ob_max_size + sizeof(*ob_pool); + } + + tmp_size = sizeof(unsigned short) * mhba->max_io + + sizeof(struct mvumi_cmd *) * mhba->max_io; + tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / + (sizeof(unsigned char) * 8); + + res_mgnt = mvumi_alloc_mem_resource(mhba, + RESOURCE_CACHED_MEMORY, tmp_size); + if (!res_mgnt) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for tag and target map\n"); + goto fail_alloc_dma_buf; + } + + virmem = res_mgnt->virt_addr; + mhba->tag_pool.stack = virmem; + mhba->tag_pool.size = mhba->max_io; + tag_init(&mhba->tag_pool, mhba->max_io); + virmem += sizeof(unsigned short) * mhba->max_io; + + mhba->tag_cmd = virmem; + virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; + + mhba->target_map = virmem; + + mhba->fw_flag |= MVUMI_FW_ALLOC; + return 0; + +fail_alloc_dma_buf: + mvumi_release_mem_resource(mhba); + return -1; +} + +static int mvumi_hs_process_page(struct mvumi_hba *mhba, + struct mvumi_hs_header *hs_header) +{ + struct mvumi_hs_page1 *hs_page1; + unsigned char page_checksum; + + page_checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + if (page_checksum != hs_header->checksum) { + dev_err(&mhba->pdev->dev, "checksum error\n"); + return -1; + } + + switch (hs_header->page_code) { + case HS_PAGE_FIRM_CAP: + hs_page1 = (struct mvumi_hs_page1 *) hs_header; + + mhba->max_io = hs_page1->max_io_support; + mhba->list_num_io = hs_page1->cl_inout_list_depth; + mhba->max_transfer_size = hs_page1->max_transfer_size; + mhba->max_target_id = hs_page1->max_devices_support; + mhba->hba_capability = hs_page1->capability; + mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; + mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; + + mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; + mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; + + dev_dbg(&mhba->pdev->dev, "FW version:%d\n", + hs_page1->fw_ver.ver_build); + + if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) + mhba->eot_flag = 22; + else + mhba->eot_flag = 27; + if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) + mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; + break; + default: + dev_err(&mhba->pdev->dev, "handshake: page code error\n"); + return -1; + } + return 0; +} + +/** + * mvumi_handshake - Move the FW to READY state + * @mhba: Adapter soft state + * + * During the initialization, FW passes can potentially be in any one of + * several possible states. If the FW in operational, waiting-for-handshake + * states, driver must take steps to bring it to ready state. Otherwise, it + * has to wait for the ready state. + */ +static int mvumi_handshake(struct mvumi_hba *mhba) +{ + unsigned int hs_state, tmp, hs_fun; + struct mvumi_hs_header *hs_header; + struct mvumi_hw_regs *regs = mhba->regs; + + if (mhba->fw_state == FW_STATE_STARTING) + hs_state = HS_S_START; + else { + tmp = ioread32(regs->arm_to_pciea_msg0); + hs_state = HS_GET_STATE(tmp); + dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); + if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { + mhba->fw_state = FW_STATE_STARTING; + return -1; + } + } + + hs_fun = 0; + switch (hs_state) { + case HS_S_START: + mhba->fw_state = FW_STATE_HANDSHAKING; + HS_SET_STATUS(hs_fun, HS_STATUS_OK); + HS_SET_STATE(hs_fun, HS_S_RESET); + iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1); + iowrite32(hs_fun, regs->pciea_to_arm_msg0); + iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); + break; + + case HS_S_RESET: + iowrite32(lower_32_bits(mhba->handshake_page_phys), + regs->pciea_to_arm_msg1); + iowrite32(upper_32_bits(mhba->handshake_page_phys), + regs->arm_to_pciea_msg1); + HS_SET_STATUS(hs_fun, HS_STATUS_OK); + HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); + iowrite32(hs_fun, regs->pciea_to_arm_msg0); + iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); + break; + + case HS_S_PAGE_ADDR: + case HS_S_QUERY_PAGE: + case HS_S_SEND_PAGE: + hs_header = (struct mvumi_hs_header *) mhba->handshake_page; + if (hs_header->page_code == HS_PAGE_FIRM_CAP) { + mhba->hba_total_pages = + ((struct mvumi_hs_page1 *) hs_header)->total_pages; + + if (mhba->hba_total_pages == 0) + mhba->hba_total_pages = HS_PAGE_TOTAL-1; + } + + if (hs_state == HS_S_QUERY_PAGE) { + if (mvumi_hs_process_page(mhba, hs_header)) { + HS_SET_STATE(hs_fun, HS_S_ABORT); + return -1; + } + if (mvumi_init_data(mhba)) { + HS_SET_STATE(hs_fun, HS_S_ABORT); + return -1; + } + } else if (hs_state == HS_S_PAGE_ADDR) { + hs_header->page_code = 0; + mhba->hba_total_pages = HS_PAGE_TOTAL-1; + } + + if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { + hs_header->page_code++; + if (hs_header->page_code != HS_PAGE_FIRM_CAP) { + mvumi_hs_build_page(mhba, hs_header); + HS_SET_STATE(hs_fun, HS_S_SEND_PAGE); + } else + HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE); + } else + HS_SET_STATE(hs_fun, HS_S_END); + + HS_SET_STATUS(hs_fun, HS_STATUS_OK); + iowrite32(hs_fun, regs->pciea_to_arm_msg0); + iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); + break; + + case HS_S_END: + /* Set communication list ISR */ + tmp = ioread32(regs->enpointa_mask_reg); + tmp |= regs->int_comaout | regs->int_comaerr; + iowrite32(tmp, regs->enpointa_mask_reg); + iowrite32(mhba->list_num_io, mhba->ib_shadow); + /* Set InBound List Available count shadow */ + iowrite32(lower_32_bits(mhba->ib_shadow_phys), + regs->inb_aval_count_basel); + iowrite32(upper_32_bits(mhba->ib_shadow_phys), + regs->inb_aval_count_baseh); + + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { + /* Set OutBound List Available count shadow */ + iowrite32((mhba->list_num_io-1) | + regs->cl_pointer_toggle, + mhba->ob_shadow); + iowrite32(lower_32_bits(mhba->ob_shadow_phys), + regs->outb_copy_basel); + iowrite32(upper_32_bits(mhba->ob_shadow_phys), + regs->outb_copy_baseh); + } + + mhba->ib_cur_slot = (mhba->list_num_io - 1) | + regs->cl_pointer_toggle; + mhba->ob_cur_slot = (mhba->list_num_io - 1) | + regs->cl_pointer_toggle; + mhba->fw_state = FW_STATE_STARTED; + + break; + default: + dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", + hs_state); + return -1; + } + return 0; +} + +static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) +{ + unsigned int isr_status; + unsigned long before; + + before = jiffies; + mvumi_handshake(mhba); + do { + isr_status = mhba->instancet->read_fw_status_reg(mhba); + + if (mhba->fw_state == FW_STATE_STARTED) + return 0; + if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { + dev_err(&mhba->pdev->dev, + "no handshake response at state 0x%x.\n", + mhba->fw_state); + dev_err(&mhba->pdev->dev, + "isr : global=0x%x,status=0x%x.\n", + mhba->global_isr, isr_status); + return -1; + } + rmb(); + usleep_range(1000, 2000); + } while (!(isr_status & DRBL_HANDSHAKE_ISR)); + + return 0; +} + +static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) +{ + unsigned int tmp; + unsigned long before; + + before = jiffies; + tmp = ioread32(mhba->regs->arm_to_pciea_msg1); + while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { + if (tmp != HANDSHAKE_READYSTATE) + iowrite32(DRBL_MU_RESET, + mhba->regs->pciea_to_arm_drbl_reg); + if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { + dev_err(&mhba->pdev->dev, + "invalid signature [0x%x].\n", tmp); + return -1; + } + usleep_range(1000, 2000); + rmb(); + tmp = ioread32(mhba->regs->arm_to_pciea_msg1); + } + + mhba->fw_state = FW_STATE_STARTING; + dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); + do { + if (mvumi_handshake_event(mhba)) { + dev_err(&mhba->pdev->dev, + "handshake failed at state 0x%x.\n", + mhba->fw_state); + return -1; + } + } while (mhba->fw_state != FW_STATE_STARTED); + + dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); + + return 0; +} + +static unsigned char mvumi_start(struct mvumi_hba *mhba) +{ + unsigned int tmp; + struct mvumi_hw_regs *regs = mhba->regs; + + /* clear Door bell */ + tmp = ioread32(regs->arm_to_pciea_drbl_reg); + iowrite32(tmp, regs->arm_to_pciea_drbl_reg); + + iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); + tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea; + iowrite32(tmp, regs->enpointa_mask_reg); + msleep(100); + if (mvumi_check_handshake(mhba)) + return -1; + + return 0; +} + +/** + * mvumi_complete_cmd - Completes a command + * @mhba: Adapter soft state + * @cmd: Command to be completed + * @ob_frame: Command response + */ +static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, + struct mvumi_rsp_frame *ob_frame) +{ + struct scsi_cmnd *scmd = cmd->scmd; + + mvumi_priv(cmd->scmd)->cmd_priv = NULL; + scmd->result = ob_frame->req_status; + + switch (ob_frame->req_status) { + case SAM_STAT_GOOD: + scmd->result |= DID_OK << 16; + break; + case SAM_STAT_BUSY: + scmd->result |= DID_BUS_BUSY << 16; + break; + case SAM_STAT_CHECK_CONDITION: + scmd->result |= (DID_OK << 16); + if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) { + memcpy(cmd->scmd->sense_buffer, ob_frame->payload, + sizeof(struct mvumi_sense_data)); + } + break; + default: + scmd->result |= (DID_ABORT << 16); + break; + } + + if (scsi_bufflen(scmd)) + dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), + scsi_sg_count(scmd), + scmd->sc_data_direction); + scsi_done(scmd); + mvumi_return_cmd(mhba, cmd); +} + +static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd, + struct mvumi_rsp_frame *ob_frame) +{ + if (atomic_read(&cmd->sync_cmd)) { + cmd->cmd_status = ob_frame->req_status; + + if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) && + (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) && + cmd->data_buf) { + memcpy(cmd->data_buf, ob_frame->payload, + sizeof(struct mvumi_sense_data)); + } + atomic_dec(&cmd->sync_cmd); + wake_up(&mhba->int_cmd_wait_q); + } +} + +static void mvumi_show_event(struct mvumi_hba *mhba, + struct mvumi_driver_event *ptr) +{ + unsigned int i; + + dev_warn(&mhba->pdev->dev, + "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n", + ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id); + if (ptr->param_count) { + printk(KERN_WARNING "Event param(len 0x%x): ", + ptr->param_count); + for (i = 0; i < ptr->param_count; i++) + printk(KERN_WARNING "0x%x ", ptr->params[i]); + + printk(KERN_WARNING "\n"); + } + + if (ptr->sense_data_length) { + printk(KERN_WARNING "Event sense data(len 0x%x): ", + ptr->sense_data_length); + for (i = 0; i < ptr->sense_data_length; i++) + printk(KERN_WARNING "0x%x ", ptr->sense_data[i]); + printk(KERN_WARNING "\n"); + } +} + +static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) +{ + struct scsi_device *sdev; + int ret = -1; + + if (status == DEVICE_OFFLINE) { + sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); + if (sdev) { + dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, + sdev->id, 0); + scsi_remove_device(sdev); + scsi_device_put(sdev); + ret = 0; + } else + dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", + devid); + } else if (status == DEVICE_ONLINE) { + sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); + if (!sdev) { + scsi_add_device(mhba->shost, 0, devid, 0); + dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, + devid, 0); + ret = 0; + } else { + dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", + 0, devid, 0); + scsi_device_put(sdev); + } + } + return ret; +} + +static u64 mvumi_inquiry(struct mvumi_hba *mhba, + unsigned int id, struct mvumi_cmd *cmd) +{ + struct mvumi_msg_frame *frame; + u64 wwid = 0; + int cmd_alloc = 0; + int data_buf_len = 64; + + if (!cmd) { + cmd = mvumi_create_internal_cmd(mhba, data_buf_len); + if (cmd) + cmd_alloc = 1; + else + return 0; + } else { + memset(cmd->data_buf, 0, data_buf_len); + } + cmd->scmd = NULL; + cmd->cmd_status = REQ_STATUS_PENDING; + atomic_set(&cmd->sync_cmd, 0); + frame = cmd->frame; + frame->device_id = (u16) id; + frame->cmd_flag = CMD_FLAG_DATA_IN; + frame->req_function = CL_FUN_SCSI_CMD; + frame->cdb_length = 6; + frame->data_transfer_length = MVUMI_INQUIRY_LENGTH; + memset(frame->cdb, 0, frame->cdb_length); + frame->cdb[0] = INQUIRY; + frame->cdb[4] = frame->data_transfer_length; + + mvumi_issue_blocked_cmd(mhba, cmd); + + if (cmd->cmd_status == SAM_STAT_GOOD) { + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) + wwid = id + 1; + else + memcpy((void *)&wwid, + (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF), + MVUMI_INQUIRY_UUID_LEN); + dev_dbg(&mhba->pdev->dev, + "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid); + } else { + wwid = 0; + } + if (cmd_alloc) + mvumi_delete_internal_cmd(mhba, cmd); + + return wwid; +} + +static void mvumi_detach_devices(struct mvumi_hba *mhba) +{ + struct mvumi_device *mv_dev = NULL , *dev_next; + struct scsi_device *sdev = NULL; + + mutex_lock(&mhba->device_lock); + + /* detach Hard Disk */ + list_for_each_entry_safe(mv_dev, dev_next, + &mhba->shost_dev_list, list) { + mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); + list_del_init(&mv_dev->list); + dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", + mv_dev->id, mv_dev->wwid); + kfree(mv_dev); + } + list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { + list_del_init(&mv_dev->list); + dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", + mv_dev->id, mv_dev->wwid); + kfree(mv_dev); + } + + /* detach virtual device */ + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) + sdev = scsi_device_lookup(mhba->shost, 0, + mhba->max_target_id - 1, 0); + + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } + + mutex_unlock(&mhba->device_lock); +} + +static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) +{ + struct scsi_device *sdev; + + sdev = scsi_device_lookup(mhba->shost, 0, id, 0); + if (sdev) { + scsi_rescan_device(sdev); + scsi_device_put(sdev); + } +} + +static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) +{ + struct mvumi_device *mv_dev = NULL; + + list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { + if (mv_dev->wwid == wwid) { + if (mv_dev->id != id) { + dev_err(&mhba->pdev->dev, + "%s has same wwid[%llx] ," + " but different id[%d %d]\n", + __func__, mv_dev->wwid, mv_dev->id, id); + return -1; + } else { + if (mhba->pdev->device == + PCI_DEVICE_ID_MARVELL_MV9143) + mvumi_rescan_devices(mhba, id); + return 1; + } + } + } + return 0; +} + +static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) +{ + struct mvumi_device *mv_dev = NULL, *dev_next; + + list_for_each_entry_safe(mv_dev, dev_next, + &mhba->shost_dev_list, list) { + if (mv_dev->id == id) { + dev_dbg(&mhba->pdev->dev, + "detach device(0:%d:0) wwid(%llx) from HOST\n", + mv_dev->id, mv_dev->wwid); + mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); + list_del_init(&mv_dev->list); + kfree(mv_dev); + } + } +} + +static int mvumi_probe_devices(struct mvumi_hba *mhba) +{ + int id, maxid; + u64 wwid = 0; + struct mvumi_device *mv_dev = NULL; + struct mvumi_cmd *cmd = NULL; + int found = 0; + + cmd = mvumi_create_internal_cmd(mhba, 64); + if (!cmd) + return -1; + + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) + maxid = mhba->max_target_id; + else + maxid = mhba->max_target_id - 1; + + for (id = 0; id < maxid; id++) { + wwid = mvumi_inquiry(mhba, id, cmd); + if (!wwid) { + /* device no response, remove it */ + mvumi_remove_devices(mhba, id); + } else { + /* device response, add it */ + found = mvumi_match_devices(mhba, id, wwid); + if (!found) { + mvumi_remove_devices(mhba, id); + mv_dev = kzalloc(sizeof(struct mvumi_device), + GFP_KERNEL); + if (!mv_dev) { + dev_err(&mhba->pdev->dev, + "%s alloc mv_dev failed\n", + __func__); + continue; + } + mv_dev->id = id; + mv_dev->wwid = wwid; + mv_dev->sdev = NULL; + INIT_LIST_HEAD(&mv_dev->list); + list_add_tail(&mv_dev->list, + &mhba->mhba_dev_list); + dev_dbg(&mhba->pdev->dev, + "probe a new device(0:%d:0)" + " wwid(%llx)\n", id, mv_dev->wwid); + } else if (found == -1) + return -1; + else + continue; + } + } + + if (cmd) + mvumi_delete_internal_cmd(mhba, cmd); + + return 0; +} + +static int mvumi_rescan_bus(void *data) +{ + int ret = 0; + struct mvumi_hba *mhba = (struct mvumi_hba *) data; + struct mvumi_device *mv_dev = NULL , *dev_next; + + while (!kthread_should_stop()) { + + set_current_state(TASK_INTERRUPTIBLE); + if (!atomic_read(&mhba->pnp_count)) + schedule(); + msleep(1000); + atomic_set(&mhba->pnp_count, 0); + __set_current_state(TASK_RUNNING); + + mutex_lock(&mhba->device_lock); + ret = mvumi_probe_devices(mhba); + if (!ret) { + list_for_each_entry_safe(mv_dev, dev_next, + &mhba->mhba_dev_list, list) { + if (mvumi_handle_hotplug(mhba, mv_dev->id, + DEVICE_ONLINE)) { + dev_err(&mhba->pdev->dev, + "%s add device(0:%d:0) failed" + "wwid(%llx) has exist\n", + __func__, + mv_dev->id, mv_dev->wwid); + list_del_init(&mv_dev->list); + kfree(mv_dev); + } else { + list_move_tail(&mv_dev->list, + &mhba->shost_dev_list); + } + } + } + mutex_unlock(&mhba->device_lock); + } + return 0; +} + +static void mvumi_proc_msg(struct mvumi_hba *mhba, + struct mvumi_hotplug_event *param) +{ + u16 size = param->size; + const unsigned long *ar_bitmap; + const unsigned long *re_bitmap; + int index; + + if (mhba->fw_flag & MVUMI_FW_ATTACH) { + index = -1; + ar_bitmap = (const unsigned long *) param->bitmap; + re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3]; + + mutex_lock(&mhba->sas_discovery_mutex); + do { + index = find_next_zero_bit(ar_bitmap, size, index + 1); + if (index >= size) + break; + mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); + } while (1); + + index = -1; + do { + index = find_next_zero_bit(re_bitmap, size, index + 1); + if (index >= size) + break; + mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); + } while (1); + mutex_unlock(&mhba->sas_discovery_mutex); + } +} + +static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) +{ + if (msg == APICDB1_EVENT_GETEVENT) { + int i, count; + struct mvumi_driver_event *param = NULL; + struct mvumi_event_req *er = buffer; + count = er->count; + if (count > MAX_EVENTS_RETURNED) { + dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" + " than max event count[0x%x].\n", + count, MAX_EVENTS_RETURNED); + return; + } + for (i = 0; i < count; i++) { + param = &er->events[i]; + mvumi_show_event(mhba, param); + } + } else if (msg == APICDB1_HOST_GETEVENT) { + mvumi_proc_msg(mhba, buffer); + } +} + +static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) +{ + struct mvumi_cmd *cmd; + struct mvumi_msg_frame *frame; + + cmd = mvumi_create_internal_cmd(mhba, 512); + if (!cmd) + return -1; + cmd->scmd = NULL; + cmd->cmd_status = REQ_STATUS_PENDING; + atomic_set(&cmd->sync_cmd, 0); + frame = cmd->frame; + frame->device_id = 0; + frame->cmd_flag = CMD_FLAG_DATA_IN; + frame->req_function = CL_FUN_SCSI_CMD; + frame->cdb_length = MAX_COMMAND_SIZE; + frame->data_transfer_length = sizeof(struct mvumi_event_req); + memset(frame->cdb, 0, MAX_COMMAND_SIZE); + frame->cdb[0] = APICDB0_EVENT; + frame->cdb[1] = msg; + mvumi_issue_blocked_cmd(mhba, cmd); + + if (cmd->cmd_status != SAM_STAT_GOOD) + dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", + cmd->cmd_status); + else + mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); + + mvumi_delete_internal_cmd(mhba, cmd); + return 0; +} + +static void mvumi_scan_events(struct work_struct *work) +{ + struct mvumi_events_wq *mu_ev = + container_of(work, struct mvumi_events_wq, work_q); + + mvumi_get_event(mu_ev->mhba, mu_ev->event); + kfree(mu_ev); +} + +static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) +{ + struct mvumi_events_wq *mu_ev; + + while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) { + if (isr_status & DRBL_BUS_CHANGE) { + atomic_inc(&mhba->pnp_count); + wake_up_process(mhba->dm_thread); + isr_status &= ~(DRBL_BUS_CHANGE); + continue; + } + + mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); + if (mu_ev) { + INIT_WORK(&mu_ev->work_q, mvumi_scan_events); + mu_ev->mhba = mhba; + mu_ev->event = APICDB1_EVENT_GETEVENT; + isr_status &= ~(DRBL_EVENT_NOTIFY); + mu_ev->param = NULL; + schedule_work(&mu_ev->work_q); + } + } +} + +static void mvumi_handle_clob(struct mvumi_hba *mhba) +{ + struct mvumi_rsp_frame *ob_frame; + struct mvumi_cmd *cmd; + struct mvumi_ob_data *pool; + + while (!list_empty(&mhba->free_ob_list)) { + pool = list_first_entry(&mhba->free_ob_list, + struct mvumi_ob_data, list); + list_del_init(&pool->list); + list_add_tail(&pool->list, &mhba->ob_data_list); + + ob_frame = (struct mvumi_rsp_frame *) &pool->data[0]; + cmd = mhba->tag_cmd[ob_frame->tag]; + + atomic_dec(&mhba->fw_outstanding); + mhba->tag_cmd[ob_frame->tag] = NULL; + tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); + if (cmd->scmd) + mvumi_complete_cmd(mhba, cmd, ob_frame); + else + mvumi_complete_internal_cmd(mhba, cmd, ob_frame); + } + mhba->instancet->fire_cmd(mhba, NULL); +} + +static irqreturn_t mvumi_isr_handler(int irq, void *devp) +{ + struct mvumi_hba *mhba = (struct mvumi_hba *) devp; + unsigned long flags; + + spin_lock_irqsave(mhba->shost->host_lock, flags); + if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + return IRQ_NONE; + } + + if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { + if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) + mvumi_launch_events(mhba, mhba->isr_status); + if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { + dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); + mvumi_handshake(mhba); + } + + } + + if (mhba->global_isr & mhba->regs->int_comaout) + mvumi_receive_ob_list_entry(mhba); + + mhba->global_isr = 0; + mhba->isr_status = 0; + if (mhba->fw_state == FW_STATE_STARTED) + mvumi_handle_clob(mhba); + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + return IRQ_HANDLED; +} + +static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + void *ib_entry; + struct mvumi_msg_frame *ib_frame; + unsigned int frame_len; + + ib_frame = cmd->frame; + if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { + dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); + return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; + } + if (tag_is_empty(&mhba->tag_pool)) { + dev_dbg(&mhba->pdev->dev, "no free tag.\n"); + return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; + } + mvumi_get_ib_list_entry(mhba, &ib_entry); + + cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); + cmd->frame->request_id = mhba->io_seq++; + cmd->request_id = cmd->frame->request_id; + mhba->tag_cmd[cmd->frame->tag] = cmd; + frame_len = sizeof(*ib_frame) + + ib_frame->sg_counts * sizeof(struct mvumi_sgl); + if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { + struct mvumi_dyn_list_entry *dle; + dle = ib_entry; + dle->src_low_addr = + cpu_to_le32(lower_32_bits(cmd->frame_phys)); + dle->src_high_addr = + cpu_to_le32(upper_32_bits(cmd->frame_phys)); + dle->if_length = (frame_len >> 2) & 0xFFF; + } else { + memcpy(ib_entry, ib_frame, frame_len); + } + return MV_QUEUE_COMMAND_RESULT_SENT; +} + +static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) +{ + unsigned short num_of_cl_sent = 0; + unsigned int count; + enum mvumi_qc_result result; + + if (cmd) + list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); + count = mhba->instancet->check_ib_list(mhba); + if (list_empty(&mhba->waiting_req_list) || !count) + return; + + do { + cmd = list_first_entry(&mhba->waiting_req_list, + struct mvumi_cmd, queue_pointer); + list_del_init(&cmd->queue_pointer); + result = mvumi_send_command(mhba, cmd); + switch (result) { + case MV_QUEUE_COMMAND_RESULT_SENT: + num_of_cl_sent++; + break; + case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE: + list_add(&cmd->queue_pointer, &mhba->waiting_req_list); + if (num_of_cl_sent > 0) + mvumi_send_ib_list_entry(mhba); + + return; + } + } while (!list_empty(&mhba->waiting_req_list) && count--); + + if (num_of_cl_sent > 0) + mvumi_send_ib_list_entry(mhba); +} + +/** + * mvumi_enable_intr - Enables interrupts + * @mhba: Adapter soft state + */ +static void mvumi_enable_intr(struct mvumi_hba *mhba) +{ + unsigned int mask; + struct mvumi_hw_regs *regs = mhba->regs; + + iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); + mask = ioread32(regs->enpointa_mask_reg); + mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; + iowrite32(mask, regs->enpointa_mask_reg); +} + +/** + * mvumi_disable_intr -Disables interrupt + * @mhba: Adapter soft state + */ +static void mvumi_disable_intr(struct mvumi_hba *mhba) +{ + unsigned int mask; + struct mvumi_hw_regs *regs = mhba->regs; + + iowrite32(0, regs->arm_to_pciea_mask_reg); + mask = ioread32(regs->enpointa_mask_reg); + mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout | + regs->int_comaerr); + iowrite32(mask, regs->enpointa_mask_reg); +} + +static int mvumi_clear_intr(void *extend) +{ + struct mvumi_hba *mhba = (struct mvumi_hba *) extend; + unsigned int status, isr_status = 0, tmp = 0; + struct mvumi_hw_regs *regs = mhba->regs; + + status = ioread32(regs->main_int_cause_reg); + if (!(status & regs->int_mu) || status == 0xFFFFFFFF) + return 1; + if (unlikely(status & regs->int_comaerr)) { + tmp = ioread32(regs->outb_isr_cause); + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { + if (tmp & regs->clic_out_err) { + iowrite32(tmp & regs->clic_out_err, + regs->outb_isr_cause); + } + } else { + if (tmp & (regs->clic_in_err | regs->clic_out_err)) + iowrite32(tmp & (regs->clic_in_err | + regs->clic_out_err), + regs->outb_isr_cause); + } + status ^= mhba->regs->int_comaerr; + /* inbound or outbound parity error, command will timeout */ + } + if (status & regs->int_comaout) { + tmp = ioread32(regs->outb_isr_cause); + if (tmp & regs->clic_irq) + iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause); + } + if (status & regs->int_dl_cpu2pciea) { + isr_status = ioread32(regs->arm_to_pciea_drbl_reg); + if (isr_status) + iowrite32(isr_status, regs->arm_to_pciea_drbl_reg); + } + + mhba->global_isr = status; + mhba->isr_status = isr_status; + + return 0; +} + +/** + * mvumi_read_fw_status_reg - returns the current FW status value + * @mhba: Adapter soft state + */ +static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) +{ + unsigned int status; + + status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); + if (status) + iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); + return status; +} + +static struct mvumi_instance_template mvumi_instance_9143 = { + .fire_cmd = mvumi_fire_cmd, + .enable_intr = mvumi_enable_intr, + .disable_intr = mvumi_disable_intr, + .clear_intr = mvumi_clear_intr, + .read_fw_status_reg = mvumi_read_fw_status_reg, + .check_ib_list = mvumi_check_ib_list_9143, + .check_ob_list = mvumi_check_ob_list_9143, + .reset_host = mvumi_reset_host_9143, +}; + +static struct mvumi_instance_template mvumi_instance_9580 = { + .fire_cmd = mvumi_fire_cmd, + .enable_intr = mvumi_enable_intr, + .disable_intr = mvumi_disable_intr, + .clear_intr = mvumi_clear_intr, + .read_fw_status_reg = mvumi_read_fw_status_reg, + .check_ib_list = mvumi_check_ib_list_9580, + .check_ob_list = mvumi_check_ob_list_9580, + .reset_host = mvumi_reset_host_9580, +}; + +static int mvumi_slave_configure(struct scsi_device *sdev) +{ + struct mvumi_hba *mhba; + unsigned char bitcount = sizeof(unsigned char) * 8; + + mhba = (struct mvumi_hba *) sdev->host->hostdata; + if (sdev->id >= mhba->max_target_id) + return -EINVAL; + + mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); + return 0; +} + +/** + * mvumi_build_frame - Prepares a direct cdb (DCDB) command + * @mhba: Adapter soft state + * @scmd: SCSI command + * @cmd: Command to be prepared in + * + * This function prepares CDB commands. These are typcially pass-through + * commands to the devices. + */ +static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, + struct scsi_cmnd *scmd, struct mvumi_cmd *cmd) +{ + struct mvumi_msg_frame *pframe; + + cmd->scmd = scmd; + cmd->cmd_status = REQ_STATUS_PENDING; + pframe = cmd->frame; + pframe->device_id = ((unsigned short) scmd->device->id) | + (((unsigned short) scmd->device->lun) << 8); + pframe->cmd_flag = 0; + + switch (scmd->sc_data_direction) { + case DMA_NONE: + pframe->cmd_flag |= CMD_FLAG_NON_DATA; + break; + case DMA_FROM_DEVICE: + pframe->cmd_flag |= CMD_FLAG_DATA_IN; + break; + case DMA_TO_DEVICE: + pframe->cmd_flag |= CMD_FLAG_DATA_OUT; + break; + case DMA_BIDIRECTIONAL: + default: + dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " + "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]); + goto error; + } + + pframe->cdb_length = scmd->cmd_len; + memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length); + pframe->req_function = CL_FUN_SCSI_CMD; + if (scsi_bufflen(scmd)) { + if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], + &pframe->sg_counts)) + goto error; + + pframe->data_transfer_length = scsi_bufflen(scmd); + } else { + pframe->sg_counts = 0; + pframe->data_transfer_length = 0; + } + return 0; + +error: + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); + return -1; +} + +/** + * mvumi_queue_command - Queue entry point + * @shost: Scsi host to queue command on + * @scmd: SCSI command to be queued + */ +static int mvumi_queue_command(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct mvumi_cmd *cmd; + struct mvumi_hba *mhba; + unsigned long irq_flags; + + spin_lock_irqsave(shost->host_lock, irq_flags); + + mhba = (struct mvumi_hba *) shost->hostdata; + scmd->result = 0; + cmd = mvumi_get_cmd(mhba); + if (unlikely(!cmd)) { + spin_unlock_irqrestore(shost->host_lock, irq_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) + goto out_return_cmd; + + cmd->scmd = scmd; + mvumi_priv(scmd)->cmd_priv = cmd; + mhba->instancet->fire_cmd(mhba, cmd); + spin_unlock_irqrestore(shost->host_lock, irq_flags); + return 0; + +out_return_cmd: + mvumi_return_cmd(mhba, cmd); + scsi_done(scmd); + spin_unlock_irqrestore(shost->host_lock, irq_flags); + return 0; +} + +static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd) +{ + struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv; + struct Scsi_Host *host = scmd->device->host; + struct mvumi_hba *mhba = shost_priv(host); + unsigned long flags; + + spin_lock_irqsave(mhba->shost->host_lock, flags); + + if (mhba->tag_cmd[cmd->frame->tag]) { + mhba->tag_cmd[cmd->frame->tag] = NULL; + tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); + } + if (!list_empty(&cmd->queue_pointer)) + list_del_init(&cmd->queue_pointer); + else + atomic_dec(&mhba->fw_outstanding); + + scmd->result = (DID_ABORT << 16); + mvumi_priv(scmd)->cmd_priv = NULL; + if (scsi_bufflen(scmd)) { + dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), + scsi_sg_count(scmd), + scmd->sc_data_direction); + } + mvumi_return_cmd(mhba, cmd); + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + + return SCSI_EH_NOT_HANDLED; +} + +static int +mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + int heads, sectors; + sector_t cylinders; + unsigned long tmp; + + heads = 64; + sectors = 32; + tmp = heads * sectors; + cylinders = capacity; + sector_div(cylinders, tmp); + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + tmp = heads * sectors; + cylinders = capacity; + sector_div(cylinders, tmp); + } + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} + +static const struct scsi_host_template mvumi_template = { + + .module = THIS_MODULE, + .name = "Marvell Storage Controller", + .slave_configure = mvumi_slave_configure, + .queuecommand = mvumi_queue_command, + .eh_timed_out = mvumi_timed_out, + .eh_host_reset_handler = mvumi_host_reset, + .bios_param = mvumi_bios_param, + .dma_boundary = PAGE_SIZE - 1, + .this_id = -1, + .cmd_size = sizeof(struct mvumi_cmd_priv), +}; + +static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) +{ + void *base = NULL; + struct mvumi_hw_regs *regs; + + switch (mhba->pdev->device) { + case PCI_DEVICE_ID_MARVELL_MV9143: + mhba->mmio = mhba->base_addr[0]; + base = mhba->mmio; + if (!mhba->regs) { + mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); + if (mhba->regs == NULL) + return -ENOMEM; + } + regs = mhba->regs; + + /* For Arm */ + regs->ctrl_sts_reg = base + 0x20104; + regs->rstoutn_mask_reg = base + 0x20108; + regs->sys_soft_rst_reg = base + 0x2010C; + regs->main_int_cause_reg = base + 0x20200; + regs->enpointa_mask_reg = base + 0x2020C; + regs->rstoutn_en_reg = base + 0xF1400; + /* For Doorbell */ + regs->pciea_to_arm_drbl_reg = base + 0x20400; + regs->arm_to_pciea_drbl_reg = base + 0x20408; + regs->arm_to_pciea_mask_reg = base + 0x2040C; + regs->pciea_to_arm_msg0 = base + 0x20430; + regs->pciea_to_arm_msg1 = base + 0x20434; + regs->arm_to_pciea_msg0 = base + 0x20438; + regs->arm_to_pciea_msg1 = base + 0x2043C; + + /* For Message Unit */ + + regs->inb_aval_count_basel = base + 0x508; + regs->inb_aval_count_baseh = base + 0x50C; + regs->inb_write_pointer = base + 0x518; + regs->inb_read_pointer = base + 0x51C; + regs->outb_coal_cfg = base + 0x568; + regs->outb_copy_basel = base + 0x5B0; + regs->outb_copy_baseh = base + 0x5B4; + regs->outb_copy_pointer = base + 0x544; + regs->outb_read_pointer = base + 0x548; + regs->outb_isr_cause = base + 0x560; + regs->outb_coal_cfg = base + 0x568; + /* Bit setting for HW */ + regs->int_comaout = 1 << 8; + regs->int_comaerr = 1 << 6; + regs->int_dl_cpu2pciea = 1 << 1; + regs->cl_pointer_toggle = 1 << 12; + regs->clic_irq = 1 << 1; + regs->clic_in_err = 1 << 8; + regs->clic_out_err = 1 << 12; + regs->cl_slot_num_mask = 0xFFF; + regs->int_drbl_int_mask = 0x3FFFFFFF; + regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout | + regs->int_comaerr; + break; + case PCI_DEVICE_ID_MARVELL_MV9580: + mhba->mmio = mhba->base_addr[2]; + base = mhba->mmio; + if (!mhba->regs) { + mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); + if (mhba->regs == NULL) + return -ENOMEM; + } + regs = mhba->regs; + /* For Arm */ + regs->ctrl_sts_reg = base + 0x20104; + regs->rstoutn_mask_reg = base + 0x1010C; + regs->sys_soft_rst_reg = base + 0x10108; + regs->main_int_cause_reg = base + 0x10200; + regs->enpointa_mask_reg = base + 0x1020C; + regs->rstoutn_en_reg = base + 0xF1400; + + /* For Doorbell */ + regs->pciea_to_arm_drbl_reg = base + 0x10460; + regs->arm_to_pciea_drbl_reg = base + 0x10480; + regs->arm_to_pciea_mask_reg = base + 0x10484; + regs->pciea_to_arm_msg0 = base + 0x10400; + regs->pciea_to_arm_msg1 = base + 0x10404; + regs->arm_to_pciea_msg0 = base + 0x10420; + regs->arm_to_pciea_msg1 = base + 0x10424; + + /* For reset*/ + regs->reset_request = base + 0x10108; + regs->reset_enable = base + 0x1010c; + + /* For Message Unit */ + regs->inb_aval_count_basel = base + 0x4008; + regs->inb_aval_count_baseh = base + 0x400C; + regs->inb_write_pointer = base + 0x4018; + regs->inb_read_pointer = base + 0x401C; + regs->outb_copy_basel = base + 0x4058; + regs->outb_copy_baseh = base + 0x405C; + regs->outb_copy_pointer = base + 0x406C; + regs->outb_read_pointer = base + 0x4070; + regs->outb_coal_cfg = base + 0x4080; + regs->outb_isr_cause = base + 0x4088; + /* Bit setting for HW */ + regs->int_comaout = 1 << 4; + regs->int_dl_cpu2pciea = 1 << 12; + regs->int_comaerr = 1 << 29; + regs->cl_pointer_toggle = 1 << 14; + regs->cl_slot_num_mask = 0x3FFF; + regs->clic_irq = 1 << 0; + regs->clic_out_err = 1 << 1; + regs->int_drbl_int_mask = 0x3FFFFFFF; + regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout; + break; + default: + return -1; + } + + return 0; +} + +/** + * mvumi_init_fw - Initializes the FW + * @mhba: Adapter soft state + * + * This is the main function for initializing firmware. + */ +static int mvumi_init_fw(struct mvumi_hba *mhba) +{ + int ret = 0; + + if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { + dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); + return -EBUSY; + } + ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); + if (ret) + goto fail_ioremap; + + switch (mhba->pdev->device) { + case PCI_DEVICE_ID_MARVELL_MV9143: + mhba->instancet = &mvumi_instance_9143; + mhba->io_seq = 0; + mhba->max_sge = MVUMI_MAX_SG_ENTRY; + mhba->request_id_enabled = 1; + break; + case PCI_DEVICE_ID_MARVELL_MV9580: + mhba->instancet = &mvumi_instance_9580; + mhba->io_seq = 0; + mhba->max_sge = MVUMI_MAX_SG_ENTRY; + break; + default: + dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", + mhba->pdev->device); + mhba->instancet = NULL; + ret = -EINVAL; + goto fail_alloc_mem; + } + dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", + mhba->pdev->device); + ret = mvumi_cfg_hw_reg(mhba); + if (ret) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for reg\n"); + ret = -ENOMEM; + goto fail_alloc_mem; + } + mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev, + HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL); + if (!mhba->handshake_page) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for handshake\n"); + ret = -ENOMEM; + goto fail_alloc_page; + } + + if (mvumi_start(mhba)) { + ret = -EINVAL; + goto fail_ready_state; + } + ret = mvumi_alloc_cmds(mhba); + if (ret) + goto fail_ready_state; + + return 0; + +fail_ready_state: + mvumi_release_mem_resource(mhba); + dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, + mhba->handshake_page, mhba->handshake_page_phys); +fail_alloc_page: + kfree(mhba->regs); +fail_alloc_mem: + mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); +fail_ioremap: + pci_release_regions(mhba->pdev); + + return ret; +} + +/** + * mvumi_io_attach - Attaches this driver to SCSI mid-layer + * @mhba: Adapter soft state + */ +static int mvumi_io_attach(struct mvumi_hba *mhba) +{ + struct Scsi_Host *host = mhba->shost; + struct scsi_device *sdev = NULL; + int ret; + unsigned int max_sg = (mhba->ib_max_size - + sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); + + host->irq = mhba->pdev->irq; + host->unique_id = mhba->unique_id; + host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; + host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; + host->max_sectors = mhba->max_transfer_size / 512; + host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; + host->max_id = mhba->max_target_id; + host->max_cmd_len = MAX_COMMAND_SIZE; + + ret = scsi_add_host(host, &mhba->pdev->dev); + if (ret) { + dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); + return ret; + } + mhba->fw_flag |= MVUMI_FW_ATTACH; + + mutex_lock(&mhba->sas_discovery_mutex); + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) + ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); + else + ret = 0; + if (ret) { + dev_err(&mhba->pdev->dev, "add virtual device failed\n"); + mutex_unlock(&mhba->sas_discovery_mutex); + goto fail_add_device; + } + + mhba->dm_thread = kthread_create(mvumi_rescan_bus, + mhba, "mvumi_scanthread"); + if (IS_ERR(mhba->dm_thread)) { + dev_err(&mhba->pdev->dev, + "failed to create device scan thread\n"); + ret = PTR_ERR(mhba->dm_thread); + mutex_unlock(&mhba->sas_discovery_mutex); + goto fail_create_thread; + } + atomic_set(&mhba->pnp_count, 1); + wake_up_process(mhba->dm_thread); + + mutex_unlock(&mhba->sas_discovery_mutex); + return 0; + +fail_create_thread: + if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) + sdev = scsi_device_lookup(mhba->shost, 0, + mhba->max_target_id - 1, 0); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } +fail_add_device: + scsi_remove_host(mhba->shost); + return ret; +} + +/** + * mvumi_probe_one - PCI hotplug entry point + * @pdev: PCI device structure + * @id: PCI ids of supported hotplugged adapter + */ +static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct Scsi_Host *host; + struct mvumi_hba *mhba; + int ret; + + dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + ret = mvumi_pci_set_master(pdev); + if (ret) + goto fail_set_dma_mask; + + host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); + if (!host) { + dev_err(&pdev->dev, "scsi_host_alloc failed\n"); + ret = -ENOMEM; + goto fail_alloc_instance; + } + mhba = shost_priv(host); + + INIT_LIST_HEAD(&mhba->cmd_pool); + INIT_LIST_HEAD(&mhba->ob_data_list); + INIT_LIST_HEAD(&mhba->free_ob_list); + INIT_LIST_HEAD(&mhba->res_list); + INIT_LIST_HEAD(&mhba->waiting_req_list); + mutex_init(&mhba->device_lock); + INIT_LIST_HEAD(&mhba->mhba_dev_list); + INIT_LIST_HEAD(&mhba->shost_dev_list); + atomic_set(&mhba->fw_outstanding, 0); + init_waitqueue_head(&mhba->int_cmd_wait_q); + mutex_init(&mhba->sas_discovery_mutex); + + mhba->pdev = pdev; + mhba->shost = host; + mhba->unique_id = pci_dev_id(pdev); + + ret = mvumi_init_fw(mhba); + if (ret) + goto fail_init_fw; + + ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, + "mvumi", mhba); + if (ret) { + dev_err(&pdev->dev, "failed to register IRQ\n"); + goto fail_init_irq; + } + + mhba->instancet->enable_intr(mhba); + pci_set_drvdata(pdev, mhba); + + ret = mvumi_io_attach(mhba); + if (ret) + goto fail_io_attach; + + mvumi_backup_bar_addr(mhba); + dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); + + return 0; + +fail_io_attach: + mhba->instancet->disable_intr(mhba); + free_irq(mhba->pdev->irq, mhba); +fail_init_irq: + mvumi_release_fw(mhba); +fail_init_fw: + scsi_host_put(host); + +fail_alloc_instance: +fail_set_dma_mask: + pci_disable_device(pdev); + + return ret; +} + +static void mvumi_detach_one(struct pci_dev *pdev) +{ + struct Scsi_Host *host; + struct mvumi_hba *mhba; + + mhba = pci_get_drvdata(pdev); + if (mhba->dm_thread) { + kthread_stop(mhba->dm_thread); + mhba->dm_thread = NULL; + } + + mvumi_detach_devices(mhba); + host = mhba->shost; + scsi_remove_host(mhba->shost); + mvumi_flush_cache(mhba); + + mhba->instancet->disable_intr(mhba); + free_irq(mhba->pdev->irq, mhba); + mvumi_release_fw(mhba); + scsi_host_put(host); + pci_disable_device(pdev); + dev_dbg(&pdev->dev, "driver is removed!\n"); +} + +/** + * mvumi_shutdown - Shutdown entry point + * @pdev: PCI device structure + */ +static void mvumi_shutdown(struct pci_dev *pdev) +{ + struct mvumi_hba *mhba = pci_get_drvdata(pdev); + + mvumi_flush_cache(mhba); +} + +static int __maybe_unused mvumi_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct mvumi_hba *mhba = pci_get_drvdata(pdev); + + mvumi_flush_cache(mhba); + + mhba->instancet->disable_intr(mhba); + mvumi_unmap_pci_addr(pdev, mhba->base_addr); + + return 0; +} + +static int __maybe_unused mvumi_resume(struct device *dev) +{ + int ret; + struct pci_dev *pdev = to_pci_dev(dev); + struct mvumi_hba *mhba = pci_get_drvdata(pdev); + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + goto fail; + ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); + if (ret) + goto release_regions; + + if (mvumi_cfg_hw_reg(mhba)) { + ret = -EINVAL; + goto unmap_pci_addr; + } + + mhba->mmio = mhba->base_addr[0]; + mvumi_reset(mhba); + + if (mvumi_start(mhba)) { + ret = -EINVAL; + goto unmap_pci_addr; + } + + mhba->instancet->enable_intr(mhba); + + return 0; + +unmap_pci_addr: + mvumi_unmap_pci_addr(pdev, mhba->base_addr); +release_regions: + pci_release_regions(pdev); +fail: + + return ret; +} + +static SIMPLE_DEV_PM_OPS(mvumi_pm_ops, mvumi_suspend, mvumi_resume); + +static struct pci_driver mvumi_pci_driver = { + + .name = MV_DRIVER_NAME, + .id_table = mvumi_pci_table, + .probe = mvumi_probe_one, + .remove = mvumi_detach_one, + .shutdown = mvumi_shutdown, + .driver.pm = &mvumi_pm_ops, +}; + +module_pci_driver(mvumi_pci_driver); diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h new file mode 100644 index 000000000..1306a4abf --- /dev/null +++ b/drivers/scsi/mvumi.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell UMI head file + * + * Copyright 2011 Marvell. + */ + +#ifndef MVUMI_H +#define MVUMI_H + +#define MAX_BASE_ADDRESS 6 + +#define VER_MAJOR 1 +#define VER_MINOR 1 +#define VER_OEM 0 +#define VER_BUILD 1500 + +#define MV_DRIVER_NAME "mvumi" +#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 +#define PCI_DEVICE_ID_MARVELL_MV9580 0x9580 + +#define MVUMI_INTERNAL_CMD_WAIT_TIME 45 +#define MVUMI_INQUIRY_LENGTH 44 +#define MVUMI_INQUIRY_UUID_OFF 36 +#define MVUMI_INQUIRY_UUID_LEN 8 + +#define IS_DMA64 (sizeof(dma_addr_t) == 8) + +enum mvumi_qc_result { + MV_QUEUE_COMMAND_RESULT_SENT = 0, + MV_QUEUE_COMMAND_RESULT_NO_RESOURCE, +}; + +struct mvumi_hw_regs { + /* For CPU */ + void *main_int_cause_reg; + void *enpointa_mask_reg; + void *enpointb_mask_reg; + void *rstoutn_en_reg; + void *ctrl_sts_reg; + void *rstoutn_mask_reg; + void *sys_soft_rst_reg; + + /* For Doorbell */ + void *pciea_to_arm_drbl_reg; + void *arm_to_pciea_drbl_reg; + void *arm_to_pciea_mask_reg; + void *pciea_to_arm_msg0; + void *pciea_to_arm_msg1; + void *arm_to_pciea_msg0; + void *arm_to_pciea_msg1; + + /* reset register */ + void *reset_request; + void *reset_enable; + + /* For Message Unit */ + void *inb_list_basel; + void *inb_list_baseh; + void *inb_aval_count_basel; + void *inb_aval_count_baseh; + void *inb_write_pointer; + void *inb_read_pointer; + void *outb_list_basel; + void *outb_list_baseh; + void *outb_copy_basel; + void *outb_copy_baseh; + void *outb_copy_pointer; + void *outb_read_pointer; + void *inb_isr_cause; + void *outb_isr_cause; + void *outb_coal_cfg; + void *outb_coal_timeout; + + /* Bit setting for HW */ + u32 int_comaout; + u32 int_comaerr; + u32 int_dl_cpu2pciea; + u32 int_mu; + u32 int_drbl_int_mask; + u32 int_main_int_mask; + u32 cl_pointer_toggle; + u32 cl_slot_num_mask; + u32 clic_irq; + u32 clic_in_err; + u32 clic_out_err; +}; + +struct mvumi_dyn_list_entry { + u32 src_low_addr; + u32 src_high_addr; + u32 if_length; + u32 reserve; +}; + +#define SCSI_CMD_MARVELL_SPECIFIC 0xE1 +#define CDB_CORE_MODULE 0x1 +#define CDB_CORE_SHUTDOWN 0xB + +enum { + DRBL_HANDSHAKE = 1 << 0, + DRBL_SOFT_RESET = 1 << 1, + DRBL_BUS_CHANGE = 1 << 2, + DRBL_EVENT_NOTIFY = 1 << 3, + DRBL_MU_RESET = 1 << 4, + DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE, + + /* + * Command flag is the flag for the CDB command itself + */ + /* 1-non data; 0-data command */ + CMD_FLAG_NON_DATA = 1 << 0, + CMD_FLAG_DMA = 1 << 1, + CMD_FLAG_PIO = 1 << 2, + /* 1-host read data */ + CMD_FLAG_DATA_IN = 1 << 3, + /* 1-host write data */ + CMD_FLAG_DATA_OUT = 1 << 4, + CMD_FLAG_PRDT_IN_HOST = 1 << 5, +}; + +#define APICDB0_EVENT 0xF4 +#define APICDB1_EVENT_GETEVENT 0 +#define APICDB1_HOST_GETEVENT 1 +#define MAX_EVENTS_RETURNED 6 + +#define DEVICE_OFFLINE 0 +#define DEVICE_ONLINE 1 + +struct mvumi_hotplug_event { + u16 size; + u8 dummy[2]; + u8 bitmap[]; +}; + +struct mvumi_driver_event { + u32 time_stamp; + u32 sequence_no; + u32 event_id; + u8 severity; + u8 param_count; + u16 device_id; + u32 params[4]; + u8 sense_data_length; + u8 Reserved1; + u8 sense_data[30]; +}; + +struct mvumi_event_req { + unsigned char count; + unsigned char reserved[3]; + struct mvumi_driver_event events[MAX_EVENTS_RETURNED]; +}; + +struct mvumi_events_wq { + struct work_struct work_q; + struct mvumi_hba *mhba; + unsigned int event; + void *param; +}; + +#define HS_CAPABILITY_SUPPORT_COMPACT_SG (1U << 4) +#define HS_CAPABILITY_SUPPORT_PRD_HOST (1U << 5) +#define HS_CAPABILITY_SUPPORT_DYN_SRC (1U << 6) +#define HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF (1U << 14) + +#define MVUMI_MAX_SG_ENTRY 32 +#define SGD_EOT (1L << 27) +#define SGD_EOT_CP (1L << 22) + +struct mvumi_sgl { + u32 baseaddr_l; + u32 baseaddr_h; + u32 flags; + u32 size; +}; +struct mvumi_compact_sgl { + u32 baseaddr_l; + u32 baseaddr_h; + u32 flags; +}; + +#define GET_COMPACT_SGD_SIZE(sgd) \ + ((((struct mvumi_compact_sgl *)(sgd))->flags) & 0x3FFFFFL) + +#define SET_COMPACT_SGD_SIZE(sgd, sz) do { \ + (((struct mvumi_compact_sgl *)(sgd))->flags) &= ~0x3FFFFFL; \ + (((struct mvumi_compact_sgl *)(sgd))->flags) |= (sz); \ +} while (0) +#define sgd_getsz(_mhba, sgd, sz) do { \ + if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \ + (sz) = GET_COMPACT_SGD_SIZE(sgd); \ + else \ + (sz) = (sgd)->size; \ +} while (0) + +#define sgd_setsz(_mhba, sgd, sz) do { \ + if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \ + SET_COMPACT_SGD_SIZE(sgd, sz); \ + else \ + (sgd)->size = (sz); \ +} while (0) + +#define sgd_inc(_mhba, sgd) do { \ + if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \ + sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 12); \ + else \ + sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 16); \ +} while (0) + +struct mvumi_res { + struct list_head entry; + dma_addr_t bus_addr; + void *virt_addr; + unsigned int size; + unsigned short type; /* enum Resource_Type */ +}; + +/* Resource type */ +enum resource_type { + RESOURCE_CACHED_MEMORY = 0, + RESOURCE_UNCACHED_MEMORY +}; + +struct mvumi_sense_data { + u8 error_code:7; + u8 valid:1; + u8 segment_number; + u8 sense_key:4; + u8 reserved:1; + u8 incorrect_length:1; + u8 end_of_media:1; + u8 file_mark:1; + u8 information[4]; + u8 additional_sense_length; + u8 command_specific_information[4]; + u8 additional_sense_code; + u8 additional_sense_code_qualifier; + u8 field_replaceable_unit_code; + u8 sense_key_specific[3]; +}; + +/* Request initiator must set the status to REQ_STATUS_PENDING. */ +#define REQ_STATUS_PENDING 0x80 + +struct mvumi_cmd { + struct list_head queue_pointer; + struct mvumi_msg_frame *frame; + dma_addr_t frame_phys; + struct scsi_cmnd *scmd; + atomic_t sync_cmd; + void *data_buf; + unsigned short request_id; + unsigned char cmd_status; +}; + +struct mvumi_cmd_priv { + struct mvumi_cmd *cmd_priv; +}; + +static inline struct mvumi_cmd_priv *mvumi_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +/* + * the function type of the in bound frame + */ +#define CL_FUN_SCSI_CMD 0x1 + +struct mvumi_msg_frame { + u16 device_id; + u16 tag; + u8 cmd_flag; + u8 req_function; + u8 cdb_length; + u8 sg_counts; + u32 data_transfer_length; + u16 request_id; + u16 reserved1; + u8 cdb[MAX_COMMAND_SIZE]; + u32 payload[]; +}; + +/* + * the respond flag for data_payload of the out bound frame + */ +#define CL_RSP_FLAG_NODATA 0x0 +#define CL_RSP_FLAG_SENSEDATA 0x1 + +struct mvumi_rsp_frame { + u16 device_id; + u16 tag; + u8 req_status; + u8 rsp_flag; /* Indicates the type of Data_Payload.*/ + u16 request_id; + u32 payload[]; +}; + +struct mvumi_ob_data { + struct list_head list; + unsigned char data[]; +}; + +struct version_info { + u32 ver_major; + u32 ver_minor; + u32 ver_oem; + u32 ver_build; +}; + +#define FW_MAX_DELAY 30 +#define MVUMI_FW_BUSY (1U << 0) +#define MVUMI_FW_ATTACH (1U << 1) +#define MVUMI_FW_ALLOC (1U << 2) + +/* + * State is the state of the MU + */ +#define FW_STATE_IDLE 0 +#define FW_STATE_STARTING 1 +#define FW_STATE_HANDSHAKING 2 +#define FW_STATE_STARTED 3 +#define FW_STATE_ABORT 4 + +#define HANDSHAKE_SIGNATURE 0x5A5A5A5AL +#define HANDSHAKE_READYSTATE 0x55AA5AA5L +#define HANDSHAKE_DONESTATE 0x55AAA55AL + +/* HandShake Status definition */ +#define HS_STATUS_OK 1 +#define HS_STATUS_ERR 2 +#define HS_STATUS_INVALID 3 + +/* HandShake State/Cmd definition */ +#define HS_S_START 1 +#define HS_S_RESET 2 +#define HS_S_PAGE_ADDR 3 +#define HS_S_QUERY_PAGE 4 +#define HS_S_SEND_PAGE 5 +#define HS_S_END 6 +#define HS_S_ABORT 7 +#define HS_PAGE_VERIFY_SIZE 128 + +#define HS_GET_STATE(a) (a & 0xFFFF) +#define HS_GET_STATUS(a) ((a & 0xFFFF0000) >> 16) +#define HS_SET_STATE(a, b) (a |= (b & 0xFFFF)) +#define HS_SET_STATUS(a, b) (a |= ((b & 0xFFFF) << 16)) + +/* handshake frame */ +struct mvumi_hs_frame { + u16 size; + /* host information */ + u8 host_type; + u8 reserved_1[1]; + struct version_info host_ver; /* bios or driver version */ + + /* controller information */ + u32 system_io_bus; + u32 slot_number; + u32 intr_level; + u32 intr_vector; + + /* communication list configuration */ + u32 ib_baseaddr_l; + u32 ib_baseaddr_h; + u32 ob_baseaddr_l; + u32 ob_baseaddr_h; + + u8 ib_entry_size; + u8 ob_entry_size; + u8 ob_depth; + u8 ib_depth; + + /* system time */ + u64 seconds_since1970; +}; + +struct mvumi_hs_header { + u8 page_code; + u8 checksum; + u16 frame_length; + u32 frame_content[]; +}; + +/* + * the page code type of the handshake header + */ +#define HS_PAGE_FIRM_CAP 0x1 +#define HS_PAGE_HOST_INFO 0x2 +#define HS_PAGE_FIRM_CTL 0x3 +#define HS_PAGE_CL_INFO 0x4 +#define HS_PAGE_TOTAL 0x5 + +#define HSP_SIZE(i) sizeof(struct mvumi_hs_page##i) + +#define HSP_MAX_SIZE ({ \ + int size, m1, m2; \ + m1 = max(HSP_SIZE(1), HSP_SIZE(3)); \ + m2 = max(HSP_SIZE(2), HSP_SIZE(4)); \ + size = max(m1, m2); \ + size; \ +}) + +/* The format of the page code for Firmware capability */ +struct mvumi_hs_page1 { + u8 pagecode; + u8 checksum; + u16 frame_length; + + u16 number_of_ports; + u16 max_devices_support; + u16 max_io_support; + u16 umi_ver; + u32 max_transfer_size; + struct version_info fw_ver; + u8 cl_in_max_entry_size; + u8 cl_out_max_entry_size; + u8 cl_inout_list_depth; + u8 total_pages; + u16 capability; + u16 reserved1; +}; + +/* The format of the page code for Host information */ +struct mvumi_hs_page2 { + u8 pagecode; + u8 checksum; + u16 frame_length; + + u8 host_type; + u8 host_cap; + u8 reserved[2]; + struct version_info host_ver; + u32 system_io_bus; + u32 slot_number; + u32 intr_level; + u32 intr_vector; + u64 seconds_since1970; +}; + +/* The format of the page code for firmware control */ +struct mvumi_hs_page3 { + u8 pagecode; + u8 checksum; + u16 frame_length; + u16 control; + u8 reserved[2]; + u32 host_bufferaddr_l; + u32 host_bufferaddr_h; + u32 host_eventaddr_l; + u32 host_eventaddr_h; +}; + +struct mvumi_hs_page4 { + u8 pagecode; + u8 checksum; + u16 frame_length; + u32 ib_baseaddr_l; + u32 ib_baseaddr_h; + u32 ob_baseaddr_l; + u32 ob_baseaddr_h; + u8 ib_entry_size; + u8 ob_entry_size; + u8 ob_depth; + u8 ib_depth; +}; + +struct mvumi_tag { + unsigned short *stack; + unsigned short top; + unsigned short size; +}; + +struct mvumi_device { + struct list_head list; + struct scsi_device *sdev; + u64 wwid; + u8 dev_type; + int id; +}; + +struct mvumi_hba { + void *base_addr[MAX_BASE_ADDRESS]; + u32 pci_base[MAX_BASE_ADDRESS]; + void *mmio; + struct list_head cmd_pool; + struct Scsi_Host *shost; + wait_queue_head_t int_cmd_wait_q; + struct pci_dev *pdev; + unsigned int unique_id; + atomic_t fw_outstanding; + struct mvumi_instance_template *instancet; + + void *ib_list; + dma_addr_t ib_list_phys; + + void *ib_frame; + dma_addr_t ib_frame_phys; + + void *ob_list; + dma_addr_t ob_list_phys; + + void *ib_shadow; + dma_addr_t ib_shadow_phys; + + void *ob_shadow; + dma_addr_t ob_shadow_phys; + + void *handshake_page; + dma_addr_t handshake_page_phys; + + unsigned int global_isr; + unsigned int isr_status; + + unsigned short max_sge; + unsigned short max_target_id; + unsigned char *target_map; + unsigned int max_io; + unsigned int list_num_io; + unsigned int ib_max_size; + unsigned int ob_max_size; + unsigned int ib_max_size_setting; + unsigned int ob_max_size_setting; + unsigned int max_transfer_size; + unsigned char hba_total_pages; + unsigned char fw_flag; + unsigned char request_id_enabled; + unsigned char eot_flag; + unsigned short hba_capability; + unsigned short io_seq; + + unsigned int ib_cur_slot; + unsigned int ob_cur_slot; + unsigned int fw_state; + struct mutex sas_discovery_mutex; + + struct list_head ob_data_list; + struct list_head free_ob_list; + struct list_head res_list; + struct list_head waiting_req_list; + + struct mvumi_tag tag_pool; + struct mvumi_cmd **tag_cmd; + struct mvumi_hw_regs *regs; + struct mutex device_lock; + struct list_head mhba_dev_list; + struct list_head shost_dev_list; + struct task_struct *dm_thread; + atomic_t pnp_count; +}; + +struct mvumi_instance_template { + void (*fire_cmd) (struct mvumi_hba *, struct mvumi_cmd *); + void (*enable_intr) (struct mvumi_hba *); + void (*disable_intr) (struct mvumi_hba *); + int (*clear_intr) (void *); + unsigned int (*read_fw_status_reg) (struct mvumi_hba *); + unsigned int (*check_ib_list) (struct mvumi_hba *); + int (*check_ob_list) (struct mvumi_hba *, unsigned int *, + unsigned int *); + int (*reset_host) (struct mvumi_hba *); +}; + +extern struct timezone sys_tz; +#endif diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c new file mode 100644 index 000000000..ca2e932dd --- /dev/null +++ b/drivers/scsi/myrb.c @@ -0,0 +1,3562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers + * + * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH + * + * Based on the original DAC960 driver, + * Copyright 1998-2001 by Leonard N. Zubkoff + * Portions Copyright 2002 by Mylex (An IBM Business Unit) + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "myrb.h" + +static struct raid_template *myrb_raid_template; + +static void myrb_monitor(struct work_struct *work); +static inline void myrb_translate_devstate(void *DeviceState); + +static inline int myrb_logical_channel(struct Scsi_Host *shost) +{ + return shost->max_channel - 1; +} + +static struct myrb_devstate_name_entry { + enum myrb_devstate state; + const char *name; +} myrb_devstate_name_list[] = { + { MYRB_DEVICE_DEAD, "Dead" }, + { MYRB_DEVICE_WO, "WriteOnly" }, + { MYRB_DEVICE_ONLINE, "Online" }, + { MYRB_DEVICE_CRITICAL, "Critical" }, + { MYRB_DEVICE_STANDBY, "Standby" }, + { MYRB_DEVICE_OFFLINE, "Offline" }, +}; + +static const char *myrb_devstate_name(enum myrb_devstate state) +{ + struct myrb_devstate_name_entry *entry = myrb_devstate_name_list; + int i; + + for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) { + if (entry[i].state == state) + return entry[i].name; + } + return "Unknown"; +} + +static struct myrb_raidlevel_name_entry { + enum myrb_raidlevel level; + const char *name; +} myrb_raidlevel_name_list[] = { + { MYRB_RAID_LEVEL0, "RAID0" }, + { MYRB_RAID_LEVEL1, "RAID1" }, + { MYRB_RAID_LEVEL3, "RAID3" }, + { MYRB_RAID_LEVEL5, "RAID5" }, + { MYRB_RAID_LEVEL6, "RAID6" }, + { MYRB_RAID_JBOD, "JBOD" }, +}; + +static const char *myrb_raidlevel_name(enum myrb_raidlevel level) +{ + struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list; + int i; + + for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) { + if (entry[i].level == level) + return entry[i].name; + } + return NULL; +} + +/* + * myrb_create_mempools - allocates auxiliary data structures + * + * Return: true on success, false otherwise. + */ +static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb) +{ + size_t elem_size, elem_align; + + elem_align = sizeof(struct myrb_sge); + elem_size = cb->host->sg_tablesize * elem_align; + cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev, + elem_size, elem_align, 0); + if (cb->sg_pool == NULL) { + shost_printk(KERN_ERR, cb->host, + "Failed to allocate SG pool\n"); + return false; + } + + cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev, + sizeof(struct myrb_dcdb), + sizeof(unsigned int), 0); + if (!cb->dcdb_pool) { + dma_pool_destroy(cb->sg_pool); + cb->sg_pool = NULL; + shost_printk(KERN_ERR, cb->host, + "Failed to allocate DCDB pool\n"); + return false; + } + + snprintf(cb->work_q_name, sizeof(cb->work_q_name), + "myrb_wq_%d", cb->host->host_no); + cb->work_q = create_singlethread_workqueue(cb->work_q_name); + if (!cb->work_q) { + dma_pool_destroy(cb->dcdb_pool); + cb->dcdb_pool = NULL; + dma_pool_destroy(cb->sg_pool); + cb->sg_pool = NULL; + shost_printk(KERN_ERR, cb->host, + "Failed to create workqueue\n"); + return false; + } + + /* + * Initialize the Monitoring Timer. + */ + INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor); + queue_delayed_work(cb->work_q, &cb->monitor_work, 1); + + return true; +} + +/* + * myrb_destroy_mempools - tears down the memory pools for the controller + */ +static void myrb_destroy_mempools(struct myrb_hba *cb) +{ + cancel_delayed_work_sync(&cb->monitor_work); + destroy_workqueue(cb->work_q); + + dma_pool_destroy(cb->sg_pool); + dma_pool_destroy(cb->dcdb_pool); +} + +/* + * myrb_reset_cmd - reset command block + */ +static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk) +{ + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + + memset(mbox, 0, sizeof(union myrb_cmd_mbox)); + cmd_blk->status = 0; +} + +/* + * myrb_qcmd - queues command block for execution + */ +static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) +{ + void __iomem *base = cb->io_base; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox; + + cb->write_cmd_mbox(next_mbox, mbox); + if (cb->prev_cmd_mbox1->words[0] == 0 || + cb->prev_cmd_mbox2->words[0] == 0) + cb->get_cmd_mbox(base); + cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1; + cb->prev_cmd_mbox1 = next_mbox; + if (++next_mbox > cb->last_cmd_mbox) + next_mbox = cb->first_cmd_mbox; + cb->next_cmd_mbox = next_mbox; +} + +/* + * myrb_exec_cmd - executes command block and waits for completion. + * + * Return: command status + */ +static unsigned short myrb_exec_cmd(struct myrb_hba *cb, + struct myrb_cmdblk *cmd_blk) +{ + DECLARE_COMPLETION_ONSTACK(cmpl); + unsigned long flags; + + cmd_blk->completion = &cmpl; + + spin_lock_irqsave(&cb->queue_lock, flags); + cb->qcmd(cb, cmd_blk); + spin_unlock_irqrestore(&cb->queue_lock, flags); + + wait_for_completion(&cmpl); + return cmd_blk->status; +} + +/* + * myrb_exec_type3 - executes a type 3 command and waits for completion. + * + * Return: command status + */ +static unsigned short myrb_exec_type3(struct myrb_hba *cb, + enum myrb_cmd_opcode op, dma_addr_t addr) +{ + struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + unsigned short status; + + mutex_lock(&cb->dcmd_mutex); + myrb_reset_cmd(cmd_blk); + mbox->type3.id = MYRB_DCMD_TAG; + mbox->type3.opcode = op; + mbox->type3.addr = addr; + status = myrb_exec_cmd(cb, cmd_blk); + mutex_unlock(&cb->dcmd_mutex); + return status; +} + +/* + * myrb_exec_type3D - executes a type 3D command and waits for completion. + * + * Return: command status + */ +static unsigned short myrb_exec_type3D(struct myrb_hba *cb, + enum myrb_cmd_opcode op, struct scsi_device *sdev, + struct myrb_pdev_state *pdev_info) +{ + struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + unsigned short status; + dma_addr_t pdev_info_addr; + + pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info, + sizeof(struct myrb_pdev_state), + DMA_FROM_DEVICE); + if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr)) + return MYRB_STATUS_SUBSYS_FAILED; + + mutex_lock(&cb->dcmd_mutex); + myrb_reset_cmd(cmd_blk); + mbox->type3D.id = MYRB_DCMD_TAG; + mbox->type3D.opcode = op; + mbox->type3D.channel = sdev->channel; + mbox->type3D.target = sdev->id; + mbox->type3D.addr = pdev_info_addr; + status = myrb_exec_cmd(cb, cmd_blk); + mutex_unlock(&cb->dcmd_mutex); + dma_unmap_single(&cb->pdev->dev, pdev_info_addr, + sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE); + if (status == MYRB_STATUS_SUCCESS && + mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD) + myrb_translate_devstate(pdev_info); + + return status; +} + +static char *myrb_event_msg[] = { + "killed because write recovery failed", + "killed because of SCSI bus reset failure", + "killed because of double check condition", + "killed because it was removed", + "killed because of gross error on SCSI chip", + "killed because of bad tag returned from drive", + "killed because of timeout on SCSI command", + "killed because of reset SCSI command issued from system", + "killed because busy or parity error count exceeded limit", + "killed because of 'kill drive' command from system", + "killed because of selection timeout", + "killed due to SCSI phase sequence error", + "killed due to unknown status", +}; + +/** + * myrb_get_event - get event log from HBA + * @cb: pointer to the hba structure + * @event: number of the event + * + * Execute a type 3E command and logs the event message + */ +static void myrb_get_event(struct myrb_hba *cb, unsigned int event) +{ + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + struct myrb_log_entry *ev_buf; + dma_addr_t ev_addr; + unsigned short status; + + ev_buf = dma_alloc_coherent(&cb->pdev->dev, + sizeof(struct myrb_log_entry), + &ev_addr, GFP_KERNEL); + if (!ev_buf) + return; + + myrb_reset_cmd(cmd_blk); + mbox->type3E.id = MYRB_MCMD_TAG; + mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION; + mbox->type3E.optype = DAC960_V1_GetEventLogEntry; + mbox->type3E.opqual = 1; + mbox->type3E.ev_seq = event; + mbox->type3E.addr = ev_addr; + status = myrb_exec_cmd(cb, cmd_blk); + if (status != MYRB_STATUS_SUCCESS) + shost_printk(KERN_INFO, cb->host, + "Failed to get event log %d, status %04x\n", + event, status); + + else if (ev_buf->seq_num == event) { + struct scsi_sense_hdr sshdr; + + memset(&sshdr, 0, sizeof(sshdr)); + scsi_normalize_sense(ev_buf->sense, 32, &sshdr); + + if (sshdr.sense_key == VENDOR_SPECIFIC && + sshdr.asc == 0x80 && + sshdr.ascq < ARRAY_SIZE(myrb_event_msg)) + shost_printk(KERN_CRIT, cb->host, + "Physical drive %d:%d: %s\n", + ev_buf->channel, ev_buf->target, + myrb_event_msg[sshdr.ascq]); + else + shost_printk(KERN_CRIT, cb->host, + "Physical drive %d:%d: Sense: %X/%02X/%02X\n", + ev_buf->channel, ev_buf->target, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + } + + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry), + ev_buf, ev_addr); +} + +/* + * myrb_get_errtable - retrieves the error table from the controller + * + * Executes a type 3 command and logs the error table from the controller. + */ +static void myrb_get_errtable(struct myrb_hba *cb) +{ + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + unsigned short status; + struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS]; + + memcpy(&old_table, cb->err_table, sizeof(old_table)); + + myrb_reset_cmd(cmd_blk); + mbox->type3.id = MYRB_MCMD_TAG; + mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE; + mbox->type3.addr = cb->err_table_addr; + status = myrb_exec_cmd(cb, cmd_blk); + if (status == MYRB_STATUS_SUCCESS) { + struct myrb_error_entry *table = cb->err_table; + struct myrb_error_entry *new, *old; + size_t err_table_offset; + struct scsi_device *sdev; + + shost_for_each_device(sdev, cb->host) { + if (sdev->channel >= myrb_logical_channel(cb->host)) + continue; + err_table_offset = sdev->channel * MYRB_MAX_TARGETS + + sdev->id; + new = table + err_table_offset; + old = &old_table[err_table_offset]; + if (new->parity_err == old->parity_err && + new->soft_err == old->soft_err && + new->hard_err == old->hard_err && + new->misc_err == old->misc_err) + continue; + sdev_printk(KERN_CRIT, sdev, + "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n", + new->parity_err, new->soft_err, + new->hard_err, new->misc_err); + } + } +} + +/* + * myrb_get_ldev_info - retrieves the logical device table from the controller + * + * Executes a type 3 command and updates the logical device table. + * + * Return: command status + */ +static unsigned short myrb_get_ldev_info(struct myrb_hba *cb) +{ + unsigned short status; + int ldev_num, ldev_cnt = cb->enquiry->ldev_count; + struct Scsi_Host *shost = cb->host; + + status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO, + cb->ldev_info_addr); + if (status != MYRB_STATUS_SUCCESS) + return status; + + for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) { + struct myrb_ldev_info *old = NULL; + struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num; + struct scsi_device *sdev; + + sdev = scsi_device_lookup(shost, myrb_logical_channel(shost), + ldev_num, 0); + if (!sdev) { + if (new->state == MYRB_DEVICE_OFFLINE) + continue; + shost_printk(KERN_INFO, shost, + "Adding Logical Drive %d in state %s\n", + ldev_num, myrb_devstate_name(new->state)); + scsi_add_device(shost, myrb_logical_channel(shost), + ldev_num, 0); + continue; + } + old = sdev->hostdata; + if (new->state != old->state) + shost_printk(KERN_INFO, shost, + "Logical Drive %d is now %s\n", + ldev_num, myrb_devstate_name(new->state)); + if (new->wb_enabled != old->wb_enabled) + sdev_printk(KERN_INFO, sdev, + "Logical Drive is now WRITE %s\n", + (new->wb_enabled ? "BACK" : "THRU")); + memcpy(old, new, sizeof(*new)); + scsi_device_put(sdev); + } + return status; +} + +/* + * myrb_get_rbld_progress - get rebuild progress information + * + * Executes a type 3 command and returns the rebuild progress + * information. + * + * Return: command status + */ +static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb, + struct myrb_rbld_progress *rbld) +{ + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + struct myrb_rbld_progress *rbld_buf; + dma_addr_t rbld_addr; + unsigned short status; + + rbld_buf = dma_alloc_coherent(&cb->pdev->dev, + sizeof(struct myrb_rbld_progress), + &rbld_addr, GFP_KERNEL); + if (!rbld_buf) + return MYRB_STATUS_RBLD_NOT_CHECKED; + + myrb_reset_cmd(cmd_blk); + mbox->type3.id = MYRB_MCMD_TAG; + mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS; + mbox->type3.addr = rbld_addr; + status = myrb_exec_cmd(cb, cmd_blk); + if (rbld) + memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress)); + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), + rbld_buf, rbld_addr); + return status; +} + +/* + * myrb_update_rbld_progress - updates the rebuild status + * + * Updates the rebuild status for the attached logical devices. + */ +static void myrb_update_rbld_progress(struct myrb_hba *cb) +{ + struct myrb_rbld_progress rbld_buf; + unsigned short status; + + status = myrb_get_rbld_progress(cb, &rbld_buf); + if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS && + cb->last_rbld_status == MYRB_STATUS_SUCCESS) + status = MYRB_STATUS_RBLD_SUCCESS; + if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) { + unsigned int blocks_done = + rbld_buf.ldev_size - rbld_buf.blocks_left; + struct scsi_device *sdev; + + sdev = scsi_device_lookup(cb->host, + myrb_logical_channel(cb->host), + rbld_buf.ldev_num, 0); + if (!sdev) + return; + + switch (status) { + case MYRB_STATUS_SUCCESS: + sdev_printk(KERN_INFO, sdev, + "Rebuild in Progress, %d%% completed\n", + (100 * (blocks_done >> 7)) + / (rbld_buf.ldev_size >> 7)); + break; + case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE: + sdev_printk(KERN_INFO, sdev, + "Rebuild Failed due to Logical Drive Failure\n"); + break; + case MYRB_STATUS_RBLD_FAILED_BADBLOCKS: + sdev_printk(KERN_INFO, sdev, + "Rebuild Failed due to Bad Blocks on Other Drives\n"); + break; + case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED: + sdev_printk(KERN_INFO, sdev, + "Rebuild Failed due to Failure of Drive Being Rebuilt\n"); + break; + case MYRB_STATUS_RBLD_SUCCESS: + sdev_printk(KERN_INFO, sdev, + "Rebuild Completed Successfully\n"); + break; + case MYRB_STATUS_RBLD_SUCCESS_TERMINATED: + sdev_printk(KERN_INFO, sdev, + "Rebuild Successfully Terminated\n"); + break; + default: + break; + } + scsi_device_put(sdev); + } + cb->last_rbld_status = status; +} + +/* + * myrb_get_cc_progress - retrieve the rebuild status + * + * Execute a type 3 Command and fetch the rebuild / consistency check + * status. + */ +static void myrb_get_cc_progress(struct myrb_hba *cb) +{ + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + struct myrb_rbld_progress *rbld_buf; + dma_addr_t rbld_addr; + unsigned short status; + + rbld_buf = dma_alloc_coherent(&cb->pdev->dev, + sizeof(struct myrb_rbld_progress), + &rbld_addr, GFP_KERNEL); + if (!rbld_buf) { + cb->need_cc_status = true; + return; + } + myrb_reset_cmd(cmd_blk); + mbox->type3.id = MYRB_MCMD_TAG; + mbox->type3.opcode = MYRB_CMD_REBUILD_STAT; + mbox->type3.addr = rbld_addr; + status = myrb_exec_cmd(cb, cmd_blk); + if (status == MYRB_STATUS_SUCCESS) { + unsigned int ldev_num = rbld_buf->ldev_num; + unsigned int ldev_size = rbld_buf->ldev_size; + unsigned int blocks_done = + ldev_size - rbld_buf->blocks_left; + struct scsi_device *sdev; + + sdev = scsi_device_lookup(cb->host, + myrb_logical_channel(cb->host), + ldev_num, 0); + if (sdev) { + sdev_printk(KERN_INFO, sdev, + "Consistency Check in Progress: %d%% completed\n", + (100 * (blocks_done >> 7)) + / (ldev_size >> 7)); + scsi_device_put(sdev); + } + } + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), + rbld_buf, rbld_addr); +} + +/* + * myrb_bgi_control - updates background initialisation status + * + * Executes a type 3B command and updates the background initialisation status + */ +static void myrb_bgi_control(struct myrb_hba *cb) +{ + struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + struct myrb_bgi_status *bgi, *last_bgi; + dma_addr_t bgi_addr; + struct scsi_device *sdev = NULL; + unsigned short status; + + bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), + &bgi_addr, GFP_KERNEL); + if (!bgi) { + shost_printk(KERN_ERR, cb->host, + "Failed to allocate bgi memory\n"); + return; + } + myrb_reset_cmd(cmd_blk); + mbox->type3B.id = MYRB_DCMD_TAG; + mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL; + mbox->type3B.optype = 0x20; + mbox->type3B.addr = bgi_addr; + status = myrb_exec_cmd(cb, cmd_blk); + last_bgi = &cb->bgi_status; + sdev = scsi_device_lookup(cb->host, + myrb_logical_channel(cb->host), + bgi->ldev_num, 0); + switch (status) { + case MYRB_STATUS_SUCCESS: + switch (bgi->status) { + case MYRB_BGI_INVALID: + break; + case MYRB_BGI_STARTED: + if (!sdev) + break; + sdev_printk(KERN_INFO, sdev, + "Background Initialization Started\n"); + break; + case MYRB_BGI_INPROGRESS: + if (!sdev) + break; + if (bgi->blocks_done == last_bgi->blocks_done && + bgi->ldev_num == last_bgi->ldev_num) + break; + sdev_printk(KERN_INFO, sdev, + "Background Initialization in Progress: %d%% completed\n", + (100 * (bgi->blocks_done >> 7)) + / (bgi->ldev_size >> 7)); + break; + case MYRB_BGI_SUSPENDED: + if (!sdev) + break; + sdev_printk(KERN_INFO, sdev, + "Background Initialization Suspended\n"); + break; + case MYRB_BGI_CANCELLED: + if (!sdev) + break; + sdev_printk(KERN_INFO, sdev, + "Background Initialization Cancelled\n"); + break; + } + memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status)); + break; + case MYRB_STATUS_BGI_SUCCESS: + if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) + sdev_printk(KERN_INFO, sdev, + "Background Initialization Completed Successfully\n"); + cb->bgi_status.status = MYRB_BGI_INVALID; + break; + case MYRB_STATUS_BGI_ABORTED: + if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) + sdev_printk(KERN_INFO, sdev, + "Background Initialization Aborted\n"); + fallthrough; + case MYRB_STATUS_NO_BGI_INPROGRESS: + cb->bgi_status.status = MYRB_BGI_INVALID; + break; + } + if (sdev) + scsi_device_put(sdev); + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), + bgi, bgi_addr); +} + +/* + * myrb_hba_enquiry - updates the controller status + * + * Executes a DAC_V1_Enquiry command and updates the controller status. + * + * Return: command status + */ +static unsigned short myrb_hba_enquiry(struct myrb_hba *cb) +{ + struct myrb_enquiry old, *new; + unsigned short status; + + memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry)); + + status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr); + if (status != MYRB_STATUS_SUCCESS) + return status; + + new = cb->enquiry; + if (new->ldev_count > old.ldev_count) { + int ldev_num = old.ldev_count - 1; + + while (++ldev_num < new->ldev_count) + shost_printk(KERN_CRIT, cb->host, + "Logical Drive %d Now Exists\n", + ldev_num); + } + if (new->ldev_count < old.ldev_count) { + int ldev_num = new->ldev_count - 1; + + while (++ldev_num < old.ldev_count) + shost_printk(KERN_CRIT, cb->host, + "Logical Drive %d No Longer Exists\n", + ldev_num); + } + if (new->status.deferred != old.status.deferred) + shost_printk(KERN_CRIT, cb->host, + "Deferred Write Error Flag is now %s\n", + (new->status.deferred ? "TRUE" : "FALSE")); + if (new->ev_seq != old.ev_seq) { + cb->new_ev_seq = new->ev_seq; + cb->need_err_info = true; + shost_printk(KERN_INFO, cb->host, + "Event log %d/%d (%d/%d) available\n", + cb->old_ev_seq, cb->new_ev_seq, + old.ev_seq, new->ev_seq); + } + if ((new->ldev_critical > 0 && + new->ldev_critical != old.ldev_critical) || + (new->ldev_offline > 0 && + new->ldev_offline != old.ldev_offline) || + (new->ldev_count != old.ldev_count)) { + shost_printk(KERN_INFO, cb->host, + "Logical drive count changed (%d/%d/%d)\n", + new->ldev_critical, + new->ldev_offline, + new->ldev_count); + cb->need_ldev_info = true; + } + if (new->pdev_dead > 0 || + new->pdev_dead != old.pdev_dead || + time_after_eq(jiffies, cb->secondary_monitor_time + + MYRB_SECONDARY_MONITOR_INTERVAL)) { + cb->need_bgi_status = cb->bgi_status_supported; + cb->secondary_monitor_time = jiffies; + } + if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS || + new->rbld == MYRB_BG_RBLD_IN_PROGRESS || + old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS || + old.rbld == MYRB_BG_RBLD_IN_PROGRESS) { + cb->need_rbld = true; + cb->rbld_first = (new->ldev_critical < old.ldev_critical); + } + if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS) + switch (new->rbld) { + case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS: + shost_printk(KERN_INFO, cb->host, + "Consistency Check Completed Successfully\n"); + break; + case MYRB_STDBY_RBLD_IN_PROGRESS: + case MYRB_BG_RBLD_IN_PROGRESS: + break; + case MYRB_BG_CHECK_IN_PROGRESS: + cb->need_cc_status = true; + break; + case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR: + shost_printk(KERN_INFO, cb->host, + "Consistency Check Completed with Error\n"); + break; + case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED: + shost_printk(KERN_INFO, cb->host, + "Consistency Check Failed - Physical Device Failed\n"); + break; + case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED: + shost_printk(KERN_INFO, cb->host, + "Consistency Check Failed - Logical Drive Failed\n"); + break; + case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER: + shost_printk(KERN_INFO, cb->host, + "Consistency Check Failed - Other Causes\n"); + break; + case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED: + shost_printk(KERN_INFO, cb->host, + "Consistency Check Successfully Terminated\n"); + break; + } + else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS) + cb->need_cc_status = true; + + return MYRB_STATUS_SUCCESS; +} + +/* + * myrb_set_pdev_state - sets the device state for a physical device + * + * Return: command status + */ +static unsigned short myrb_set_pdev_state(struct myrb_hba *cb, + struct scsi_device *sdev, enum myrb_devstate state) +{ + struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + unsigned short status; + + mutex_lock(&cb->dcmd_mutex); + mbox->type3D.opcode = MYRB_CMD_START_DEVICE; + mbox->type3D.id = MYRB_DCMD_TAG; + mbox->type3D.channel = sdev->channel; + mbox->type3D.target = sdev->id; + mbox->type3D.state = state & 0x1F; + status = myrb_exec_cmd(cb, cmd_blk); + mutex_unlock(&cb->dcmd_mutex); + + return status; +} + +/* + * myrb_enable_mmio - enables the Memory Mailbox Interface + * + * PD and P controller types have no memory mailbox, but still need the + * other dma mapped memory. + * + * Return: true on success, false otherwise. + */ +static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) +{ + void __iomem *base = cb->io_base; + struct pci_dev *pdev = cb->pdev; + size_t err_table_size; + size_t ldev_info_size; + union myrb_cmd_mbox *cmd_mbox_mem; + struct myrb_stat_mbox *stat_mbox_mem; + union myrb_cmd_mbox mbox; + unsigned short status; + + memset(&mbox, 0, sizeof(union myrb_cmd_mbox)); + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(&pdev->dev, "DMA mask out of range\n"); + return false; + } + + cb->enquiry = dma_alloc_coherent(&pdev->dev, + sizeof(struct myrb_enquiry), + &cb->enquiry_addr, GFP_KERNEL); + if (!cb->enquiry) + return false; + + err_table_size = sizeof(struct myrb_error_entry) * + MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; + cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size, + &cb->err_table_addr, GFP_KERNEL); + if (!cb->err_table) + return false; + + ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS; + cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size, + &cb->ldev_info_addr, GFP_KERNEL); + if (!cb->ldev_info_buf) + return false; + + /* + * Skip mailbox initialisation for PD and P Controllers + */ + if (!mmio_init_fn) + return true; + + /* These are the base addresses for the command memory mailbox array */ + cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox); + cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev, + cb->cmd_mbox_size, + &cb->cmd_mbox_addr, + GFP_KERNEL); + if (!cb->first_cmd_mbox) + return false; + + cmd_mbox_mem = cb->first_cmd_mbox; + cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1; + cb->last_cmd_mbox = cmd_mbox_mem; + cb->next_cmd_mbox = cb->first_cmd_mbox; + cb->prev_cmd_mbox1 = cb->last_cmd_mbox; + cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1; + + /* These are the base addresses for the status memory mailbox array */ + cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT * + sizeof(struct myrb_stat_mbox); + cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev, + cb->stat_mbox_size, + &cb->stat_mbox_addr, + GFP_KERNEL); + if (!cb->first_stat_mbox) + return false; + + stat_mbox_mem = cb->first_stat_mbox; + stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1; + cb->last_stat_mbox = stat_mbox_mem; + cb->next_stat_mbox = cb->first_stat_mbox; + + /* Enable the Memory Mailbox Interface. */ + cb->dual_mode_interface = true; + mbox.typeX.opcode = 0x2B; + mbox.typeX.id = 0; + mbox.typeX.opcode2 = 0x14; + mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr; + mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr; + + status = mmio_init_fn(pdev, base, &mbox); + if (status != MYRB_STATUS_SUCCESS) { + cb->dual_mode_interface = false; + mbox.typeX.opcode2 = 0x10; + status = mmio_init_fn(pdev, base, &mbox); + if (status != MYRB_STATUS_SUCCESS) { + dev_err(&pdev->dev, + "Failed to enable mailbox, statux %02X\n", + status); + return false; + } + } + return true; +} + +/* + * myrb_get_hba_config - reads the configuration information + * + * Reads the configuration information from the controller and + * initializes the controller structure. + * + * Return: 0 on success, errno otherwise + */ +static int myrb_get_hba_config(struct myrb_hba *cb) +{ + struct myrb_enquiry2 *enquiry2; + dma_addr_t enquiry2_addr; + struct myrb_config2 *config2; + dma_addr_t config2_addr; + struct Scsi_Host *shost = cb->host; + struct pci_dev *pdev = cb->pdev; + int pchan_max = 0, pchan_cur = 0; + unsigned short status; + int ret = -ENODEV, memsize = 0; + + enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), + &enquiry2_addr, GFP_KERNEL); + if (!enquiry2) { + shost_printk(KERN_ERR, cb->host, + "Failed to allocate V1 enquiry2 memory\n"); + return -ENOMEM; + } + config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2), + &config2_addr, GFP_KERNEL); + if (!config2) { + shost_printk(KERN_ERR, cb->host, + "Failed to allocate V1 config2 memory\n"); + dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), + enquiry2, enquiry2_addr); + return -ENOMEM; + } + mutex_lock(&cb->dma_mutex); + status = myrb_hba_enquiry(cb); + mutex_unlock(&cb->dma_mutex); + if (status != MYRB_STATUS_SUCCESS) { + shost_printk(KERN_WARNING, cb->host, + "Failed it issue V1 Enquiry\n"); + goto out_free; + } + + status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr); + if (status != MYRB_STATUS_SUCCESS) { + shost_printk(KERN_WARNING, cb->host, + "Failed to issue V1 Enquiry2\n"); + goto out_free; + } + + status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr); + if (status != MYRB_STATUS_SUCCESS) { + shost_printk(KERN_WARNING, cb->host, + "Failed to issue ReadConfig2\n"); + goto out_free; + } + + status = myrb_get_ldev_info(cb); + if (status != MYRB_STATUS_SUCCESS) { + shost_printk(KERN_WARNING, cb->host, + "Failed to get logical drive information\n"); + goto out_free; + } + + /* + * Initialize the Controller Model Name and Full Model Name fields. + */ + switch (enquiry2->hw.sub_model) { + case DAC960_V1_P_PD_PU: + if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA) + strcpy(cb->model_name, "DAC960PU"); + else + strcpy(cb->model_name, "DAC960PD"); + break; + case DAC960_V1_PL: + strcpy(cb->model_name, "DAC960PL"); + break; + case DAC960_V1_PG: + strcpy(cb->model_name, "DAC960PG"); + break; + case DAC960_V1_PJ: + strcpy(cb->model_name, "DAC960PJ"); + break; + case DAC960_V1_PR: + strcpy(cb->model_name, "DAC960PR"); + break; + case DAC960_V1_PT: + strcpy(cb->model_name, "DAC960PT"); + break; + case DAC960_V1_PTL0: + strcpy(cb->model_name, "DAC960PTL0"); + break; + case DAC960_V1_PRL: + strcpy(cb->model_name, "DAC960PRL"); + break; + case DAC960_V1_PTL1: + strcpy(cb->model_name, "DAC960PTL1"); + break; + case DAC960_V1_1164P: + strcpy(cb->model_name, "eXtremeRAID 1100"); + break; + default: + shost_printk(KERN_WARNING, cb->host, + "Unknown Model %X\n", + enquiry2->hw.sub_model); + goto out; + } + /* + * Initialize the Controller Firmware Version field and verify that it + * is a supported firmware version. + * The supported firmware versions are: + * + * DAC1164P 5.06 and above + * DAC960PTL/PRL/PJ/PG 4.06 and above + * DAC960PU/PD/PL 3.51 and above + * DAC960PU/PD/PL/P 2.73 and above + */ +#if defined(CONFIG_ALPHA) + /* + * DEC Alpha machines were often equipped with DAC960 cards that were + * OEMed from Mylex, and had their own custom firmware. Version 2.70, + * the last custom FW revision to be released by DEC for these older + * controllers, appears to work quite well with this driver. + * + * Cards tested successfully were several versions each of the PD and + * PU, called by DEC the KZPSC and KZPAC, respectively, and having + * the Manufacturer Numbers (from Mylex), usually on a sticker on the + * back of the board, of: + * + * KZPSC: D040347 (1-channel) or D040348 (2-channel) + * or D040349 (3-channel) + * KZPAC: D040395 (1-channel) or D040396 (2-channel) + * or D040397 (3-channel) + */ +# define FIRMWARE_27X "2.70" +#else +# define FIRMWARE_27X "2.73" +#endif + + if (enquiry2->fw.major_version == 0) { + enquiry2->fw.major_version = cb->enquiry->fw_major_version; + enquiry2->fw.minor_version = cb->enquiry->fw_minor_version; + enquiry2->fw.firmware_type = '0'; + enquiry2->fw.turn_id = 0; + } + snprintf(cb->fw_version, sizeof(cb->fw_version), + "%u.%02u-%c-%02u", + enquiry2->fw.major_version, + enquiry2->fw.minor_version, + enquiry2->fw.firmware_type, + enquiry2->fw.turn_id); + if (!((enquiry2->fw.major_version == 5 && + enquiry2->fw.minor_version >= 6) || + (enquiry2->fw.major_version == 4 && + enquiry2->fw.minor_version >= 6) || + (enquiry2->fw.major_version == 3 && + enquiry2->fw.minor_version >= 51) || + (enquiry2->fw.major_version == 2 && + strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) { + shost_printk(KERN_WARNING, cb->host, + "Firmware Version '%s' unsupported\n", + cb->fw_version); + goto out; + } + /* + * Initialize the Channels, Targets, Memory Size, and SAF-TE + * Enclosure Management Enabled fields. + */ + switch (enquiry2->hw.model) { + case MYRB_5_CHANNEL_BOARD: + pchan_max = 5; + break; + case MYRB_3_CHANNEL_BOARD: + case MYRB_3_CHANNEL_ASIC_DAC: + pchan_max = 3; + break; + case MYRB_2_CHANNEL_BOARD: + pchan_max = 2; + break; + default: + pchan_max = enquiry2->cfg_chan; + break; + } + pchan_cur = enquiry2->cur_chan; + if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT) + cb->bus_width = 32; + else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT) + cb->bus_width = 16; + else + cb->bus_width = 8; + cb->ldev_block_size = enquiry2->ldev_block_size; + shost->max_channel = pchan_cur; + shost->max_id = enquiry2->max_targets; + memsize = enquiry2->mem_size >> 20; + cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE); + /* + * Initialize the Controller Queue Depth, Driver Queue Depth, + * Logical Drive Count, Maximum Blocks per Command, Controller + * Scatter/Gather Limit, and Driver Scatter/Gather Limit. + * The Driver Queue Depth must be at most one less than the + * Controller Queue Depth to allow for an automatic drive + * rebuild operation. + */ + shost->can_queue = cb->enquiry->max_tcq; + if (shost->can_queue < 3) + shost->can_queue = enquiry2->max_cmds; + if (shost->can_queue < 3) + /* Play safe and disable TCQ */ + shost->can_queue = 1; + + if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2) + shost->can_queue = MYRB_CMD_MBOX_COUNT - 2; + shost->max_sectors = enquiry2->max_sectors; + shost->sg_tablesize = enquiry2->max_sge; + if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT) + shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT; + /* + * Initialize the Stripe Size, Segment Size, and Geometry Translation. + */ + cb->stripe_size = config2->blocks_per_stripe * config2->block_factor + >> (10 - MYRB_BLKSIZE_BITS); + cb->segment_size = config2->blocks_per_cacheline * config2->block_factor + >> (10 - MYRB_BLKSIZE_BITS); + /* Assume 255/63 translation */ + cb->ldev_geom_heads = 255; + cb->ldev_geom_sectors = 63; + if (config2->drive_geometry) { + cb->ldev_geom_heads = 128; + cb->ldev_geom_sectors = 32; + } + + /* + * Initialize the Background Initialization Status. + */ + if ((cb->fw_version[0] == '4' && + strcmp(cb->fw_version, "4.08") >= 0) || + (cb->fw_version[0] == '5' && + strcmp(cb->fw_version, "5.08") >= 0)) { + cb->bgi_status_supported = true; + myrb_bgi_control(cb); + } + cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS; + ret = 0; + +out: + shost_printk(KERN_INFO, cb->host, + "Configuring %s PCI RAID Controller\n", cb->model_name); + shost_printk(KERN_INFO, cb->host, + " Firmware Version: %s, Memory Size: %dMB\n", + cb->fw_version, memsize); + if (cb->io_addr == 0) + shost_printk(KERN_INFO, cb->host, + " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n", + (unsigned long)cb->pci_addr, cb->irq); + else + shost_printk(KERN_INFO, cb->host, + " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n", + (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr, + cb->irq); + shost_printk(KERN_INFO, cb->host, + " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n", + cb->host->can_queue, cb->host->max_sectors); + shost_printk(KERN_INFO, cb->host, + " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n", + cb->host->can_queue, cb->host->sg_tablesize, + MYRB_SCATTER_GATHER_LIMIT); + shost_printk(KERN_INFO, cb->host, + " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n", + cb->stripe_size, cb->segment_size, + cb->ldev_geom_heads, cb->ldev_geom_sectors, + cb->safte_enabled ? + " SAF-TE Enclosure Management Enabled" : ""); + shost_printk(KERN_INFO, cb->host, + " Physical: %d/%d channels %d/%d/%d devices\n", + pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead, + cb->host->max_id); + + shost_printk(KERN_INFO, cb->host, + " Logical: 1/1 channels, %d/%d disks\n", + cb->enquiry->ldev_count, MYRB_MAX_LDEVS); + +out_free: + dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), + enquiry2, enquiry2_addr); + dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2), + config2, config2_addr); + + return ret; +} + +/* + * myrb_unmap - unmaps controller structures + */ +static void myrb_unmap(struct myrb_hba *cb) +{ + if (cb->ldev_info_buf) { + size_t ldev_info_size = sizeof(struct myrb_ldev_info) * + MYRB_MAX_LDEVS; + dma_free_coherent(&cb->pdev->dev, ldev_info_size, + cb->ldev_info_buf, cb->ldev_info_addr); + cb->ldev_info_buf = NULL; + } + if (cb->err_table) { + size_t err_table_size = sizeof(struct myrb_error_entry) * + MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; + dma_free_coherent(&cb->pdev->dev, err_table_size, + cb->err_table, cb->err_table_addr); + cb->err_table = NULL; + } + if (cb->enquiry) { + dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry), + cb->enquiry, cb->enquiry_addr); + cb->enquiry = NULL; + } + if (cb->first_stat_mbox) { + dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size, + cb->first_stat_mbox, cb->stat_mbox_addr); + cb->first_stat_mbox = NULL; + } + if (cb->first_cmd_mbox) { + dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size, + cb->first_cmd_mbox, cb->cmd_mbox_addr); + cb->first_cmd_mbox = NULL; + } +} + +/* + * myrb_cleanup - cleanup controller structures + */ +static void myrb_cleanup(struct myrb_hba *cb) +{ + struct pci_dev *pdev = cb->pdev; + + /* Free the memory mailbox, status, and related structures */ + myrb_unmap(cb); + + if (cb->mmio_base) { + if (cb->disable_intr) + cb->disable_intr(cb->io_base); + iounmap(cb->mmio_base); + } + if (cb->irq) + free_irq(cb->irq, cb); + if (cb->io_addr) + release_region(cb->io_addr, 0x80); + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + scsi_host_put(cb->host); +} + +static int myrb_host_reset(struct scsi_cmnd *scmd) +{ + struct Scsi_Host *shost = scmd->device->host; + struct myrb_hba *cb = shost_priv(shost); + + cb->reset(cb->io_base); + return SUCCESS; +} + +static int myrb_pthru_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct request *rq = scsi_cmd_to_rq(scmd); + struct myrb_hba *cb = shost_priv(shost); + struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + struct myrb_dcdb *dcdb; + dma_addr_t dcdb_addr; + struct scsi_device *sdev = scmd->device; + struct scatterlist *sgl; + unsigned long flags; + int nsge; + + myrb_reset_cmd(cmd_blk); + dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr); + if (!dcdb) + return SCSI_MLQUEUE_HOST_BUSY; + nsge = scsi_dma_map(scmd); + if (nsge > 1) { + dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr); + scmd->result = (DID_ERROR << 16); + scsi_done(scmd); + return 0; + } + + mbox->type3.opcode = MYRB_CMD_DCDB; + mbox->type3.id = rq->tag + 3; + mbox->type3.addr = dcdb_addr; + dcdb->channel = sdev->channel; + dcdb->target = sdev->id; + switch (scmd->sc_data_direction) { + case DMA_NONE: + dcdb->data_xfer = MYRB_DCDB_XFER_NONE; + break; + case DMA_TO_DEVICE: + dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE; + break; + case DMA_FROM_DEVICE: + dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM; + break; + default: + dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL; + break; + } + dcdb->early_status = false; + if (rq->timeout <= 10) + dcdb->timeout = MYRB_DCDB_TMO_10_SECS; + else if (rq->timeout <= 60) + dcdb->timeout = MYRB_DCDB_TMO_60_SECS; + else if (rq->timeout <= 600) + dcdb->timeout = MYRB_DCDB_TMO_10_MINS; + else + dcdb->timeout = MYRB_DCDB_TMO_24_HRS; + dcdb->no_autosense = false; + dcdb->allow_disconnect = true; + sgl = scsi_sglist(scmd); + dcdb->dma_addr = sg_dma_address(sgl); + if (sg_dma_len(sgl) > USHRT_MAX) { + dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff; + dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16; + } else { + dcdb->xfer_len_lo = sg_dma_len(sgl); + dcdb->xfer_len_hi4 = 0; + } + dcdb->cdb_len = scmd->cmd_len; + dcdb->sense_len = sizeof(dcdb->sense); + memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len); + + spin_lock_irqsave(&cb->queue_lock, flags); + cb->qcmd(cb, cmd_blk); + spin_unlock_irqrestore(&cb->queue_lock, flags); + return 0; +} + +static void myrb_inquiry(struct myrb_hba *cb, + struct scsi_cmnd *scmd) +{ + unsigned char inq[36] = { + 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00, + 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, + }; + + if (cb->bus_width > 16) + inq[7] |= 1 << 6; + if (cb->bus_width > 8) + inq[7] |= 1 << 5; + memcpy(&inq[16], cb->model_name, 16); + memcpy(&inq[32], cb->fw_version, 1); + memcpy(&inq[33], &cb->fw_version[2], 2); + memcpy(&inq[35], &cb->fw_version[7], 1); + + scsi_sg_copy_from_buffer(scmd, (void *)inq, 36); +} + +static void +myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd, + struct myrb_ldev_info *ldev_info) +{ + unsigned char modes[32], *mode_pg; + bool dbd; + size_t mode_len; + + dbd = (scmd->cmnd[1] & 0x08) == 0x08; + if (dbd) { + mode_len = 24; + mode_pg = &modes[4]; + } else { + mode_len = 32; + mode_pg = &modes[12]; + } + memset(modes, 0, sizeof(modes)); + modes[0] = mode_len - 1; + if (!dbd) { + unsigned char *block_desc = &modes[4]; + + modes[3] = 8; + put_unaligned_be32(ldev_info->size, &block_desc[0]); + put_unaligned_be32(cb->ldev_block_size, &block_desc[5]); + } + mode_pg[0] = 0x08; + mode_pg[1] = 0x12; + if (ldev_info->wb_enabled) + mode_pg[2] |= 0x04; + if (cb->segment_size) { + mode_pg[2] |= 0x08; + put_unaligned_be16(cb->segment_size, &mode_pg[14]); + } + + scsi_sg_copy_from_buffer(scmd, modes, mode_len); +} + +static void myrb_request_sense(struct myrb_hba *cb, + struct scsi_cmnd *scmd) +{ + scsi_build_sense(scmd, 0, NO_SENSE, 0, 0); + scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE); +} + +static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd, + struct myrb_ldev_info *ldev_info) +{ + unsigned char data[8]; + + dev_dbg(&scmd->device->sdev_gendev, + "Capacity %u, blocksize %u\n", + ldev_info->size, cb->ldev_block_size); + put_unaligned_be32(ldev_info->size - 1, &data[0]); + put_unaligned_be32(cb->ldev_block_size, &data[4]); + scsi_sg_copy_from_buffer(scmd, data, 8); +} + +static int myrb_ldev_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct myrb_hba *cb = shost_priv(shost); + struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + struct myrb_ldev_info *ldev_info; + struct scsi_device *sdev = scmd->device; + struct scatterlist *sgl; + unsigned long flags; + u64 lba; + u32 block_cnt; + int nsge; + + ldev_info = sdev->hostdata; + if (ldev_info->state != MYRB_DEVICE_ONLINE && + ldev_info->state != MYRB_DEVICE_WO) { + dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n", + sdev->id, ldev_info ? ldev_info->state : 0xff); + scmd->result = (DID_BAD_TARGET << 16); + scsi_done(scmd); + return 0; + } + switch (scmd->cmnd[0]) { + case TEST_UNIT_READY: + scmd->result = (DID_OK << 16); + scsi_done(scmd); + return 0; + case INQUIRY: + if (scmd->cmnd[1] & 1) { + /* Illegal request, invalid field in CDB */ + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); + } else { + myrb_inquiry(cb, scmd); + scmd->result = (DID_OK << 16); + } + scsi_done(scmd); + return 0; + case SYNCHRONIZE_CACHE: + scmd->result = (DID_OK << 16); + scsi_done(scmd); + return 0; + case MODE_SENSE: + if ((scmd->cmnd[2] & 0x3F) != 0x3F && + (scmd->cmnd[2] & 0x3F) != 0x08) { + /* Illegal request, invalid field in CDB */ + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); + } else { + myrb_mode_sense(cb, scmd, ldev_info); + scmd->result = (DID_OK << 16); + } + scsi_done(scmd); + return 0; + case READ_CAPACITY: + if ((scmd->cmnd[1] & 1) || + (scmd->cmnd[8] & 1)) { + /* Illegal request, invalid field in CDB */ + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); + scsi_done(scmd); + return 0; + } + lba = get_unaligned_be32(&scmd->cmnd[2]); + if (lba) { + /* Illegal request, invalid field in CDB */ + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); + scsi_done(scmd); + return 0; + } + myrb_read_capacity(cb, scmd, ldev_info); + scsi_done(scmd); + return 0; + case REQUEST_SENSE: + myrb_request_sense(cb, scmd); + scmd->result = (DID_OK << 16); + return 0; + case SEND_DIAGNOSTIC: + if (scmd->cmnd[1] != 0x04) { + /* Illegal request, invalid field in CDB */ + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); + } else { + /* Assume good status */ + scmd->result = (DID_OK << 16); + } + scsi_done(scmd); + return 0; + case READ_6: + if (ldev_info->state == MYRB_DEVICE_WO) { + /* Data protect, attempt to read invalid data */ + scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); + scsi_done(scmd); + return 0; + } + fallthrough; + case WRITE_6: + lba = (((scmd->cmnd[1] & 0x1F) << 16) | + (scmd->cmnd[2] << 8) | + scmd->cmnd[3]); + block_cnt = scmd->cmnd[4]; + break; + case READ_10: + if (ldev_info->state == MYRB_DEVICE_WO) { + /* Data protect, attempt to read invalid data */ + scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); + scsi_done(scmd); + return 0; + } + fallthrough; + case WRITE_10: + case VERIFY: /* 0x2F */ + case WRITE_VERIFY: /* 0x2E */ + lba = get_unaligned_be32(&scmd->cmnd[2]); + block_cnt = get_unaligned_be16(&scmd->cmnd[7]); + break; + case READ_12: + if (ldev_info->state == MYRB_DEVICE_WO) { + /* Data protect, attempt to read invalid data */ + scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); + scsi_done(scmd); + return 0; + } + fallthrough; + case WRITE_12: + case VERIFY_12: /* 0xAF */ + case WRITE_VERIFY_12: /* 0xAE */ + lba = get_unaligned_be32(&scmd->cmnd[2]); + block_cnt = get_unaligned_be32(&scmd->cmnd[6]); + break; + default: + /* Illegal request, invalid opcode */ + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0); + scsi_done(scmd); + return 0; + } + + myrb_reset_cmd(cmd_blk); + mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3; + if (scmd->sc_data_direction == DMA_NONE) + goto submit; + nsge = scsi_dma_map(scmd); + if (nsge == 1) { + sgl = scsi_sglist(scmd); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mbox->type5.opcode = MYRB_CMD_READ; + else + mbox->type5.opcode = MYRB_CMD_WRITE; + + mbox->type5.ld.xfer_len = block_cnt; + mbox->type5.ld.ldev_num = sdev->id; + mbox->type5.lba = lba; + mbox->type5.addr = (u32)sg_dma_address(sgl); + } else { + struct myrb_sge *hw_sgl; + dma_addr_t hw_sgl_addr; + int i; + + hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr); + if (!hw_sgl) + return SCSI_MLQUEUE_HOST_BUSY; + + cmd_blk->sgl = hw_sgl; + cmd_blk->sgl_addr = hw_sgl_addr; + + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mbox->type5.opcode = MYRB_CMD_READ_SG; + else + mbox->type5.opcode = MYRB_CMD_WRITE_SG; + + mbox->type5.ld.xfer_len = block_cnt; + mbox->type5.ld.ldev_num = sdev->id; + mbox->type5.lba = lba; + mbox->type5.addr = hw_sgl_addr; + mbox->type5.sg_count = nsge; + + scsi_for_each_sg(scmd, sgl, nsge, i) { + hw_sgl->sge_addr = (u32)sg_dma_address(sgl); + hw_sgl->sge_count = (u32)sg_dma_len(sgl); + hw_sgl++; + } + } +submit: + spin_lock_irqsave(&cb->queue_lock, flags); + cb->qcmd(cb, cmd_blk); + spin_unlock_irqrestore(&cb->queue_lock, flags); + + return 0; +} + +static int myrb_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + + if (sdev->channel > myrb_logical_channel(shost)) { + scmd->result = (DID_BAD_TARGET << 16); + scsi_done(scmd); + return 0; + } + if (sdev->channel == myrb_logical_channel(shost)) + return myrb_ldev_queuecommand(shost, scmd); + + return myrb_pthru_queuecommand(shost, scmd); +} + +static int myrb_ldev_slave_alloc(struct scsi_device *sdev) +{ + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_ldev_info *ldev_info; + unsigned short ldev_num = sdev->id; + enum raid_level level; + + ldev_info = cb->ldev_info_buf + ldev_num; + if (!ldev_info) + return -ENXIO; + + sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL); + if (!sdev->hostdata) + return -ENOMEM; + dev_dbg(&sdev->sdev_gendev, + "slave alloc ldev %d state %x\n", + ldev_num, ldev_info->state); + memcpy(sdev->hostdata, ldev_info, + sizeof(*ldev_info)); + switch (ldev_info->raid_level) { + case MYRB_RAID_LEVEL0: + level = RAID_LEVEL_LINEAR; + break; + case MYRB_RAID_LEVEL1: + level = RAID_LEVEL_1; + break; + case MYRB_RAID_LEVEL3: + level = RAID_LEVEL_3; + break; + case MYRB_RAID_LEVEL5: + level = RAID_LEVEL_5; + break; + case MYRB_RAID_LEVEL6: + level = RAID_LEVEL_6; + break; + case MYRB_RAID_JBOD: + level = RAID_LEVEL_JBOD; + break; + default: + level = RAID_LEVEL_UNKNOWN; + break; + } + raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level); + return 0; +} + +static int myrb_pdev_slave_alloc(struct scsi_device *sdev) +{ + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_pdev_state *pdev_info; + unsigned short status; + + if (sdev->id > MYRB_MAX_TARGETS) + return -ENXIO; + + pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL); + if (!pdev_info) + return -ENOMEM; + + status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, + sdev, pdev_info); + if (status != MYRB_STATUS_SUCCESS) { + dev_dbg(&sdev->sdev_gendev, + "Failed to get device state, status %x\n", + status); + kfree(pdev_info); + return -ENXIO; + } + if (!pdev_info->present) { + dev_dbg(&sdev->sdev_gendev, + "device not present, skip\n"); + kfree(pdev_info); + return -ENXIO; + } + dev_dbg(&sdev->sdev_gendev, + "slave alloc pdev %d:%d state %x\n", + sdev->channel, sdev->id, pdev_info->state); + sdev->hostdata = pdev_info; + + return 0; +} + +static int myrb_slave_alloc(struct scsi_device *sdev) +{ + if (sdev->channel > myrb_logical_channel(sdev->host)) + return -ENXIO; + + if (sdev->lun > 0) + return -ENXIO; + + if (sdev->channel == myrb_logical_channel(sdev->host)) + return myrb_ldev_slave_alloc(sdev); + + return myrb_pdev_slave_alloc(sdev); +} + +static int myrb_slave_configure(struct scsi_device *sdev) +{ + struct myrb_ldev_info *ldev_info; + + if (sdev->channel > myrb_logical_channel(sdev->host)) + return -ENXIO; + + if (sdev->channel < myrb_logical_channel(sdev->host)) { + sdev->no_uld_attach = 1; + return 0; + } + if (sdev->lun != 0) + return -ENXIO; + + ldev_info = sdev->hostdata; + if (!ldev_info) + return -ENXIO; + if (ldev_info->state != MYRB_DEVICE_ONLINE) + sdev_printk(KERN_INFO, sdev, + "Logical drive is %s\n", + myrb_devstate_name(ldev_info->state)); + + sdev->tagged_supported = 1; + return 0; +} + +static void myrb_slave_destroy(struct scsi_device *sdev) +{ + kfree(sdev->hostdata); +} + +static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + struct myrb_hba *cb = shost_priv(sdev->host); + + geom[0] = cb->ldev_geom_heads; + geom[1] = cb->ldev_geom_sectors; + geom[2] = sector_div(capacity, geom[0] * geom[1]); + + return 0; +} + +static ssize_t raid_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrb_hba *cb = shost_priv(sdev->host); + int ret; + + if (!sdev->hostdata) + return snprintf(buf, 16, "Unknown\n"); + + if (sdev->channel == myrb_logical_channel(sdev->host)) { + struct myrb_ldev_info *ldev_info = sdev->hostdata; + const char *name; + + name = myrb_devstate_name(ldev_info->state); + if (name) + ret = snprintf(buf, 32, "%s\n", name); + else + ret = snprintf(buf, 32, "Invalid (%02X)\n", + ldev_info->state); + } else { + struct myrb_pdev_state *pdev_info = sdev->hostdata; + unsigned short status; + const char *name; + + status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, + sdev, pdev_info); + if (status != MYRB_STATUS_SUCCESS) + sdev_printk(KERN_INFO, sdev, + "Failed to get device state, status %x\n", + status); + + if (!pdev_info->present) + name = "Removed"; + else + name = myrb_devstate_name(pdev_info->state); + if (name) + ret = snprintf(buf, 32, "%s\n", name); + else + ret = snprintf(buf, 32, "Invalid (%02X)\n", + pdev_info->state); + } + return ret; +} + +static ssize_t raid_state_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_pdev_state *pdev_info; + enum myrb_devstate new_state; + unsigned short status; + + if (!strncmp(buf, "kill", 4) || + !strncmp(buf, "offline", 7)) + new_state = MYRB_DEVICE_DEAD; + else if (!strncmp(buf, "online", 6)) + new_state = MYRB_DEVICE_ONLINE; + else if (!strncmp(buf, "standby", 7)) + new_state = MYRB_DEVICE_STANDBY; + else + return -EINVAL; + + pdev_info = sdev->hostdata; + if (!pdev_info) { + sdev_printk(KERN_INFO, sdev, + "Failed - no physical device information\n"); + return -ENXIO; + } + if (!pdev_info->present) { + sdev_printk(KERN_INFO, sdev, + "Failed - device not present\n"); + return -ENXIO; + } + + if (pdev_info->state == new_state) + return count; + + status = myrb_set_pdev_state(cb, sdev, new_state); + switch (status) { + case MYRB_STATUS_SUCCESS: + break; + case MYRB_STATUS_START_DEVICE_FAILED: + sdev_printk(KERN_INFO, sdev, + "Failed - Unable to Start Device\n"); + count = -EAGAIN; + break; + case MYRB_STATUS_NO_DEVICE: + sdev_printk(KERN_INFO, sdev, + "Failed - No Device at Address\n"); + count = -ENODEV; + break; + case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET: + sdev_printk(KERN_INFO, sdev, + "Failed - Invalid Channel or Target or Modifier\n"); + count = -EINVAL; + break; + case MYRB_STATUS_CHANNEL_BUSY: + sdev_printk(KERN_INFO, sdev, + "Failed - Channel Busy\n"); + count = -EBUSY; + break; + default: + sdev_printk(KERN_INFO, sdev, + "Failed - Unexpected Status %04X\n", status); + count = -EIO; + break; + } + return count; +} +static DEVICE_ATTR_RW(raid_state); + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->channel == myrb_logical_channel(sdev->host)) { + struct myrb_ldev_info *ldev_info = sdev->hostdata; + const char *name; + + if (!ldev_info) + return -ENXIO; + + name = myrb_raidlevel_name(ldev_info->raid_level); + if (!name) + return snprintf(buf, 32, "Invalid (%02X)\n", + ldev_info->state); + return snprintf(buf, 32, "%s\n", name); + } + return snprintf(buf, 32, "Physical Drive\n"); +} +static DEVICE_ATTR_RO(raid_level); + +static ssize_t rebuild_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_rbld_progress rbld_buf; + unsigned char status; + + if (sdev->channel < myrb_logical_channel(sdev->host)) + return snprintf(buf, 32, "physical device - not rebuilding\n"); + + status = myrb_get_rbld_progress(cb, &rbld_buf); + + if (rbld_buf.ldev_num != sdev->id || + status != MYRB_STATUS_SUCCESS) + return snprintf(buf, 32, "not rebuilding\n"); + + return snprintf(buf, 32, "rebuilding block %u of %u\n", + rbld_buf.ldev_size - rbld_buf.blocks_left, + rbld_buf.ldev_size); +} + +static ssize_t rebuild_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_cmdblk *cmd_blk; + union myrb_cmd_mbox *mbox; + unsigned short status; + int rc, start; + const char *msg; + + rc = kstrtoint(buf, 0, &start); + if (rc) + return rc; + + if (sdev->channel >= myrb_logical_channel(sdev->host)) + return -ENXIO; + + status = myrb_get_rbld_progress(cb, NULL); + if (start) { + if (status == MYRB_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, + "Rebuild Not Initiated; already in progress\n"); + return -EALREADY; + } + mutex_lock(&cb->dcmd_mutex); + cmd_blk = &cb->dcmd_blk; + myrb_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC; + mbox->type3D.id = MYRB_DCMD_TAG; + mbox->type3D.channel = sdev->channel; + mbox->type3D.target = sdev->id; + status = myrb_exec_cmd(cb, cmd_blk); + mutex_unlock(&cb->dcmd_mutex); + } else { + struct pci_dev *pdev = cb->pdev; + unsigned char *rate; + dma_addr_t rate_addr; + + if (status != MYRB_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, + "Rebuild Not Cancelled; not in progress\n"); + return 0; + } + + rate = dma_alloc_coherent(&pdev->dev, sizeof(char), + &rate_addr, GFP_KERNEL); + if (rate == NULL) { + sdev_printk(KERN_INFO, sdev, + "Cancellation of Rebuild Failed - Out of Memory\n"); + return -ENOMEM; + } + mutex_lock(&cb->dcmd_mutex); + cmd_blk = &cb->dcmd_blk; + myrb_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; + mbox->type3R.id = MYRB_DCMD_TAG; + mbox->type3R.rbld_rate = 0xFF; + mbox->type3R.addr = rate_addr; + status = myrb_exec_cmd(cb, cmd_blk); + dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); + mutex_unlock(&cb->dcmd_mutex); + } + if (status == MYRB_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, "Rebuild %s\n", + start ? "Initiated" : "Cancelled"); + return count; + } + if (!start) { + sdev_printk(KERN_INFO, sdev, + "Rebuild Not Cancelled, status 0x%x\n", + status); + return -EIO; + } + + switch (status) { + case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: + msg = "Attempt to Rebuild Online or Unresponsive Drive"; + break; + case MYRB_STATUS_RBLD_NEW_DISK_FAILED: + msg = "New Disk Failed During Rebuild"; + break; + case MYRB_STATUS_INVALID_ADDRESS: + msg = "Invalid Device Address"; + break; + case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: + msg = "Already in Progress"; + break; + default: + msg = NULL; + break; + } + if (msg) + sdev_printk(KERN_INFO, sdev, + "Rebuild Failed - %s\n", msg); + else + sdev_printk(KERN_INFO, sdev, + "Rebuild Failed, status 0x%x\n", status); + + return -EIO; +} +static DEVICE_ATTR_RW(rebuild); + +static ssize_t consistency_check_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_rbld_progress rbld_buf; + struct myrb_cmdblk *cmd_blk; + union myrb_cmd_mbox *mbox; + unsigned short ldev_num = 0xFFFF; + unsigned short status; + int rc, start; + const char *msg; + + rc = kstrtoint(buf, 0, &start); + if (rc) + return rc; + + if (sdev->channel < myrb_logical_channel(sdev->host)) + return -ENXIO; + + status = myrb_get_rbld_progress(cb, &rbld_buf); + if (start) { + if (status == MYRB_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, + "Check Consistency Not Initiated; already in progress\n"); + return -EALREADY; + } + mutex_lock(&cb->dcmd_mutex); + cmd_blk = &cb->dcmd_blk; + myrb_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC; + mbox->type3C.id = MYRB_DCMD_TAG; + mbox->type3C.ldev_num = sdev->id; + mbox->type3C.auto_restore = true; + + status = myrb_exec_cmd(cb, cmd_blk); + mutex_unlock(&cb->dcmd_mutex); + } else { + struct pci_dev *pdev = cb->pdev; + unsigned char *rate; + dma_addr_t rate_addr; + + if (ldev_num != sdev->id) { + sdev_printk(KERN_INFO, sdev, + "Check Consistency Not Cancelled; not in progress\n"); + return 0; + } + rate = dma_alloc_coherent(&pdev->dev, sizeof(char), + &rate_addr, GFP_KERNEL); + if (rate == NULL) { + sdev_printk(KERN_INFO, sdev, + "Cancellation of Check Consistency Failed - Out of Memory\n"); + return -ENOMEM; + } + mutex_lock(&cb->dcmd_mutex); + cmd_blk = &cb->dcmd_blk; + myrb_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; + mbox->type3R.id = MYRB_DCMD_TAG; + mbox->type3R.rbld_rate = 0xFF; + mbox->type3R.addr = rate_addr; + status = myrb_exec_cmd(cb, cmd_blk); + dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); + mutex_unlock(&cb->dcmd_mutex); + } + if (status == MYRB_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n", + start ? "Initiated" : "Cancelled"); + return count; + } + if (!start) { + sdev_printk(KERN_INFO, sdev, + "Check Consistency Not Cancelled, status 0x%x\n", + status); + return -EIO; + } + + switch (status) { + case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: + msg = "Dependent Physical Device is DEAD"; + break; + case MYRB_STATUS_RBLD_NEW_DISK_FAILED: + msg = "New Disk Failed During Rebuild"; + break; + case MYRB_STATUS_INVALID_ADDRESS: + msg = "Invalid or Nonredundant Logical Drive"; + break; + case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: + msg = "Already in Progress"; + break; + default: + msg = NULL; + break; + } + if (msg) + sdev_printk(KERN_INFO, sdev, + "Check Consistency Failed - %s\n", msg); + else + sdev_printk(KERN_INFO, sdev, + "Check Consistency Failed, status 0x%x\n", status); + + return -EIO; +} + +static ssize_t consistency_check_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return rebuild_show(dev, attr, buf); +} +static DEVICE_ATTR_RW(consistency_check); + +static ssize_t ctlr_num_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrb_hba *cb = shost_priv(shost); + + return snprintf(buf, 20, "%u\n", cb->ctlr_num); +} +static DEVICE_ATTR_RO(ctlr_num); + +static ssize_t firmware_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrb_hba *cb = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", cb->fw_version); +} +static DEVICE_ATTR_RO(firmware); + +static ssize_t model_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrb_hba *cb = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", cb->model_name); +} +static DEVICE_ATTR_RO(model); + +static ssize_t flush_cache_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrb_hba *cb = shost_priv(shost); + unsigned short status; + + status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); + if (status == MYRB_STATUS_SUCCESS) { + shost_printk(KERN_INFO, shost, + "Cache Flush Completed\n"); + return count; + } + shost_printk(KERN_INFO, shost, + "Cache Flush Failed, status %x\n", status); + return -EIO; +} +static DEVICE_ATTR_WO(flush_cache); + +static struct attribute *myrb_sdev_attrs[] = { + &dev_attr_rebuild.attr, + &dev_attr_consistency_check.attr, + &dev_attr_raid_state.attr, + &dev_attr_raid_level.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(myrb_sdev); + +static struct attribute *myrb_shost_attrs[] = { + &dev_attr_ctlr_num.attr, + &dev_attr_model.attr, + &dev_attr_firmware.attr, + &dev_attr_flush_cache.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(myrb_shost); + +static const struct scsi_host_template myrb_template = { + .module = THIS_MODULE, + .name = "DAC960", + .proc_name = "myrb", + .queuecommand = myrb_queuecommand, + .eh_host_reset_handler = myrb_host_reset, + .slave_alloc = myrb_slave_alloc, + .slave_configure = myrb_slave_configure, + .slave_destroy = myrb_slave_destroy, + .bios_param = myrb_biosparam, + .cmd_size = sizeof(struct myrb_cmdblk), + .shost_groups = myrb_shost_groups, + .sdev_groups = myrb_sdev_groups, + .this_id = -1, +}; + +/** + * myrb_is_raid - return boolean indicating device is raid volume + * @dev: the device struct object + */ +static int myrb_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sdev->channel == myrb_logical_channel(sdev->host); +} + +/** + * myrb_get_resync - get raid volume resync percent complete + * @dev: the device struct object + */ +static void myrb_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_rbld_progress rbld_buf; + unsigned int percent_complete = 0; + unsigned short status; + unsigned int ldev_size = 0, remaining = 0; + + if (sdev->channel < myrb_logical_channel(sdev->host)) + return; + status = myrb_get_rbld_progress(cb, &rbld_buf); + if (status == MYRB_STATUS_SUCCESS) { + if (rbld_buf.ldev_num == sdev->id) { + ldev_size = rbld_buf.ldev_size; + remaining = rbld_buf.blocks_left; + } + } + if (remaining && ldev_size) + percent_complete = (ldev_size - remaining) * 100 / ldev_size; + raid_set_resync(myrb_raid_template, dev, percent_complete); +} + +/** + * myrb_get_state - get raid volume status + * @dev: the device struct object + */ +static void myrb_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrb_hba *cb = shost_priv(sdev->host); + struct myrb_ldev_info *ldev_info = sdev->hostdata; + enum raid_state state = RAID_STATE_UNKNOWN; + unsigned short status; + + if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info) + state = RAID_STATE_UNKNOWN; + else { + status = myrb_get_rbld_progress(cb, NULL); + if (status == MYRB_STATUS_SUCCESS) + state = RAID_STATE_RESYNCING; + else { + switch (ldev_info->state) { + case MYRB_DEVICE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case MYRB_DEVICE_WO: + case MYRB_DEVICE_CRITICAL: + state = RAID_STATE_DEGRADED; + break; + default: + state = RAID_STATE_OFFLINE; + } + } + } + raid_set_state(myrb_raid_template, dev, state); +} + +static struct raid_function_template myrb_raid_functions = { + .cookie = &myrb_template, + .is_raid = myrb_is_raid, + .get_resync = myrb_get_resync, + .get_state = myrb_get_state, +}; + +static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk, + struct scsi_cmnd *scmd) +{ + unsigned short status; + + if (!cmd_blk) + return; + + scsi_dma_unmap(scmd); + + if (cmd_blk->dcdb) { + memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64); + dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb, + cmd_blk->dcdb_addr); + cmd_blk->dcdb = NULL; + } + if (cmd_blk->sgl) { + dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr); + cmd_blk->sgl = NULL; + cmd_blk->sgl_addr = 0; + } + status = cmd_blk->status; + switch (status) { + case MYRB_STATUS_SUCCESS: + case MYRB_STATUS_DEVICE_BUSY: + scmd->result = (DID_OK << 16) | status; + break; + case MYRB_STATUS_BAD_DATA: + dev_dbg(&scmd->device->sdev_gendev, + "Bad Data Encountered\n"); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + /* Unrecovered read error */ + scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0); + else + /* Write error */ + scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0); + break; + case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR: + scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n"); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + /* Unrecovered read error, auto-reallocation failed */ + scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04); + else + /* Write error, auto-reallocation failed */ + scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02); + break; + case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE: + dev_dbg(&scmd->device->sdev_gendev, + "Logical Drive Nonexistent or Offline"); + scmd->result = (DID_BAD_TARGET << 16); + break; + case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV: + dev_dbg(&scmd->device->sdev_gendev, + "Attempt to Access Beyond End of Logical Drive"); + /* Logical block address out of range */ + scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0); + break; + case MYRB_STATUS_DEVICE_NONRESPONSIVE: + dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n"); + scmd->result = (DID_BAD_TARGET << 16); + break; + default: + scmd_printk(KERN_ERR, scmd, + "Unexpected Error Status %04X", status); + scmd->result = (DID_ERROR << 16); + break; + } + scsi_done(scmd); +} + +static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) +{ + if (!cmd_blk) + return; + + if (cmd_blk->completion) { + complete(cmd_blk->completion); + cmd_blk->completion = NULL; + } +} + +static void myrb_monitor(struct work_struct *work) +{ + struct myrb_hba *cb = container_of(work, + struct myrb_hba, monitor_work.work); + struct Scsi_Host *shost = cb->host; + unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL; + + dev_dbg(&shost->shost_gendev, "monitor tick\n"); + + if (cb->new_ev_seq > cb->old_ev_seq) { + int event = cb->old_ev_seq; + + dev_dbg(&shost->shost_gendev, + "get event log no %d/%d\n", + cb->new_ev_seq, event); + myrb_get_event(cb, event); + cb->old_ev_seq = event + 1; + interval = 10; + } else if (cb->need_err_info) { + cb->need_err_info = false; + dev_dbg(&shost->shost_gendev, "get error table\n"); + myrb_get_errtable(cb); + interval = 10; + } else if (cb->need_rbld && cb->rbld_first) { + cb->need_rbld = false; + dev_dbg(&shost->shost_gendev, + "get rebuild progress\n"); + myrb_update_rbld_progress(cb); + interval = 10; + } else if (cb->need_ldev_info) { + cb->need_ldev_info = false; + dev_dbg(&shost->shost_gendev, + "get logical drive info\n"); + myrb_get_ldev_info(cb); + interval = 10; + } else if (cb->need_rbld) { + cb->need_rbld = false; + dev_dbg(&shost->shost_gendev, + "get rebuild progress\n"); + myrb_update_rbld_progress(cb); + interval = 10; + } else if (cb->need_cc_status) { + cb->need_cc_status = false; + dev_dbg(&shost->shost_gendev, + "get consistency check progress\n"); + myrb_get_cc_progress(cb); + interval = 10; + } else if (cb->need_bgi_status) { + cb->need_bgi_status = false; + dev_dbg(&shost->shost_gendev, "get background init status\n"); + myrb_bgi_control(cb); + interval = 10; + } else { + dev_dbg(&shost->shost_gendev, "new enquiry\n"); + mutex_lock(&cb->dma_mutex); + myrb_hba_enquiry(cb); + mutex_unlock(&cb->dma_mutex); + if ((cb->new_ev_seq - cb->old_ev_seq > 0) || + cb->need_err_info || cb->need_rbld || + cb->need_ldev_info || cb->need_cc_status || + cb->need_bgi_status) { + dev_dbg(&shost->shost_gendev, + "reschedule monitor\n"); + interval = 0; + } + } + if (interval > 1) + cb->primary_monitor_time = jiffies; + queue_delayed_work(cb->work_q, &cb->monitor_work, interval); +} + +/* + * myrb_err_status - reports controller BIOS messages + * + * Controller BIOS messages are passed through the Error Status Register + * when the driver performs the BIOS handshaking. + * + * Return: true for fatal errors and false otherwise. + */ +static bool myrb_err_status(struct myrb_hba *cb, unsigned char error, + unsigned char parm0, unsigned char parm1) +{ + struct pci_dev *pdev = cb->pdev; + + switch (error) { + case 0x00: + dev_info(&pdev->dev, + "Physical Device %d:%d Not Responding\n", + parm1, parm0); + break; + case 0x08: + dev_notice(&pdev->dev, "Spinning Up Drives\n"); + break; + case 0x30: + dev_notice(&pdev->dev, "Configuration Checksum Error\n"); + break; + case 0x60: + dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n"); + break; + case 0x70: + dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n"); + break; + case 0x90: + dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n", + parm1, parm0); + break; + case 0xA0: + dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n"); + break; + case 0xB0: + dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n"); + break; + case 0xD0: + dev_notice(&pdev->dev, "New Controller Configuration Found\n"); + break; + case 0xF0: + dev_err(&pdev->dev, "Fatal Memory Parity Error\n"); + return true; + default: + dev_err(&pdev->dev, "Unknown Initialization Error %02X\n", + error); + return true; + } + return false; +} + +/* + * Hardware-specific functions + */ + +/* + * DAC960 LA Series Controllers + */ + +static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base) +{ + writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); +} + +static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base) +{ + writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET); +} + +static inline void DAC960_LA_reset_ctrl(void __iomem *base) +{ + writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET); +} + +static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base) +{ + writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); +} + +static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base) +{ + unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); + + return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY); +} + +static inline bool DAC960_LA_init_in_progress(void __iomem *base) +{ + unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); + + return !(idb & DAC960_LA_IDB_INIT_DONE); +} + +static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base) +{ + writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET); +} + +static inline void DAC960_LA_ack_intr(void __iomem *base) +{ + writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ, + base + DAC960_LA_ODB_OFFSET); +} + +static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base) +{ + unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET); + + return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL; +} + +static inline void DAC960_LA_enable_intr(void __iomem *base) +{ + unsigned char odb = 0xFF; + + odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ; + writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); +} + +static inline void DAC960_LA_disable_intr(void __iomem *base) +{ + unsigned char odb = 0xFF; + + odb |= DAC960_LA_IRQMASK_DISABLE_IRQ; + writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); +} + +static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, + union myrb_cmd_mbox *mbox) +{ + mem_mbox->words[1] = mbox->words[1]; + mem_mbox->words[2] = mbox->words[2]; + mem_mbox->words[3] = mbox->words[3]; + /* Memory barrier to prevent reordering */ + wmb(); + mem_mbox->words[0] = mbox->words[0]; + /* Memory barrier to force PCI access */ + mb(); +} + +static inline void DAC960_LA_write_hw_mbox(void __iomem *base, + union myrb_cmd_mbox *mbox) +{ + writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET); + writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET); + writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET); + writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET); +} + +static inline unsigned short DAC960_LA_read_status(void __iomem *base) +{ + return readw(base + DAC960_LA_STS_OFFSET); +} + +static inline bool +DAC960_LA_read_error_status(void __iomem *base, unsigned char *error, + unsigned char *param0, unsigned char *param1) +{ + unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET); + + if (!(errsts & DAC960_LA_ERRSTS_PENDING)) + return false; + errsts &= ~DAC960_LA_ERRSTS_PENDING; + + *error = errsts; + *param0 = readb(base + DAC960_LA_CMDOP_OFFSET); + *param1 = readb(base + DAC960_LA_CMDID_OFFSET); + writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET); + return true; +} + +static inline unsigned short +DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base, + union myrb_cmd_mbox *mbox) +{ + unsigned short status; + int timeout = 0; + + while (timeout < MYRB_MAILBOX_TIMEOUT) { + if (!DAC960_LA_hw_mbox_is_full(base)) + break; + udelay(10); + timeout++; + } + if (DAC960_LA_hw_mbox_is_full(base)) { + dev_err(&pdev->dev, + "Timeout waiting for empty mailbox\n"); + return MYRB_STATUS_SUBSYS_TIMEOUT; + } + DAC960_LA_write_hw_mbox(base, mbox); + DAC960_LA_hw_mbox_new_cmd(base); + timeout = 0; + while (timeout < MYRB_MAILBOX_TIMEOUT) { + if (DAC960_LA_hw_mbox_status_available(base)) + break; + udelay(10); + timeout++; + } + if (!DAC960_LA_hw_mbox_status_available(base)) { + dev_err(&pdev->dev, "Timeout waiting for mailbox status\n"); + return MYRB_STATUS_SUBSYS_TIMEOUT; + } + status = DAC960_LA_read_status(base); + DAC960_LA_ack_hw_mbox_intr(base); + DAC960_LA_ack_hw_mbox_status(base); + + return status; +} + +static int DAC960_LA_hw_init(struct pci_dev *pdev, + struct myrb_hba *cb, void __iomem *base) +{ + int timeout = 0; + unsigned char error, parm0, parm1; + + DAC960_LA_disable_intr(base); + DAC960_LA_ack_hw_mbox_status(base); + udelay(1000); + while (DAC960_LA_init_in_progress(base) && + timeout < MYRB_MAILBOX_TIMEOUT) { + if (DAC960_LA_read_error_status(base, &error, + &parm0, &parm1) && + myrb_err_status(cb, error, parm0, parm1)) + return -ENODEV; + udelay(10); + timeout++; + } + if (timeout == MYRB_MAILBOX_TIMEOUT) { + dev_err(&pdev->dev, + "Timeout waiting for Controller Initialisation\n"); + return -ETIMEDOUT; + } + if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) { + dev_err(&pdev->dev, + "Unable to Enable Memory Mailbox Interface\n"); + DAC960_LA_reset_ctrl(base); + return -ENODEV; + } + DAC960_LA_enable_intr(base); + cb->qcmd = myrb_qcmd; + cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox; + if (cb->dual_mode_interface) + cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd; + else + cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd; + cb->disable_intr = DAC960_LA_disable_intr; + cb->reset = DAC960_LA_reset_ctrl; + + return 0; +} + +static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg) +{ + struct myrb_hba *cb = arg; + void __iomem *base = cb->io_base; + struct myrb_stat_mbox *next_stat_mbox; + unsigned long flags; + + spin_lock_irqsave(&cb->queue_lock, flags); + DAC960_LA_ack_intr(base); + next_stat_mbox = cb->next_stat_mbox; + while (next_stat_mbox->valid) { + unsigned char id = next_stat_mbox->id; + struct scsi_cmnd *scmd = NULL; + struct myrb_cmdblk *cmd_blk = NULL; + + if (id == MYRB_DCMD_TAG) + cmd_blk = &cb->dcmd_blk; + else if (id == MYRB_MCMD_TAG) + cmd_blk = &cb->mcmd_blk; + else { + scmd = scsi_host_find_tag(cb->host, id - 3); + if (scmd) + cmd_blk = scsi_cmd_priv(scmd); + } + if (cmd_blk) + cmd_blk->status = next_stat_mbox->status; + else + dev_err(&cb->pdev->dev, + "Unhandled command completion %d\n", id); + + memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); + if (++next_stat_mbox > cb->last_stat_mbox) + next_stat_mbox = cb->first_stat_mbox; + + if (cmd_blk) { + if (id < 3) + myrb_handle_cmdblk(cb, cmd_blk); + else + myrb_handle_scsi(cb, cmd_blk, scmd); + } + } + cb->next_stat_mbox = next_stat_mbox; + spin_unlock_irqrestore(&cb->queue_lock, flags); + return IRQ_HANDLED; +} + +static struct myrb_privdata DAC960_LA_privdata = { + .hw_init = DAC960_LA_hw_init, + .irq_handler = DAC960_LA_intr_handler, + .mmio_size = DAC960_LA_mmio_size, +}; + +/* + * DAC960 PG Series Controllers + */ +static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base) +{ + writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); +} + +static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base) +{ + writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET); +} + +static inline void DAC960_PG_reset_ctrl(void __iomem *base) +{ + writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET); +} + +static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base) +{ + writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); +} + +static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base) +{ + unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); + + return idb & DAC960_PG_IDB_HWMBOX_FULL; +} + +static inline bool DAC960_PG_init_in_progress(void __iomem *base) +{ + unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); + + return idb & DAC960_PG_IDB_INIT_IN_PROGRESS; +} + +static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base) +{ + writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET); +} + +static inline void DAC960_PG_ack_intr(void __iomem *base) +{ + writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ, + base + DAC960_PG_ODB_OFFSET); +} + +static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base) +{ + unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET); + + return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL; +} + +static inline void DAC960_PG_enable_intr(void __iomem *base) +{ + unsigned int imask = (unsigned int)-1; + + imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ; + writel(imask, base + DAC960_PG_IRQMASK_OFFSET); +} + +static inline void DAC960_PG_disable_intr(void __iomem *base) +{ + unsigned int imask = (unsigned int)-1; + + writel(imask, base + DAC960_PG_IRQMASK_OFFSET); +} + +static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, + union myrb_cmd_mbox *mbox) +{ + mem_mbox->words[1] = mbox->words[1]; + mem_mbox->words[2] = mbox->words[2]; + mem_mbox->words[3] = mbox->words[3]; + /* Memory barrier to prevent reordering */ + wmb(); + mem_mbox->words[0] = mbox->words[0]; + /* Memory barrier to force PCI access */ + mb(); +} + +static inline void DAC960_PG_write_hw_mbox(void __iomem *base, + union myrb_cmd_mbox *mbox) +{ + writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET); + writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET); + writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET); + writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET); +} + +static inline unsigned short +DAC960_PG_read_status(void __iomem *base) +{ + return readw(base + DAC960_PG_STS_OFFSET); +} + +static inline bool +DAC960_PG_read_error_status(void __iomem *base, unsigned char *error, + unsigned char *param0, unsigned char *param1) +{ + unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET); + + if (!(errsts & DAC960_PG_ERRSTS_PENDING)) + return false; + errsts &= ~DAC960_PG_ERRSTS_PENDING; + *error = errsts; + *param0 = readb(base + DAC960_PG_CMDOP_OFFSET); + *param1 = readb(base + DAC960_PG_CMDID_OFFSET); + writeb(0, base + DAC960_PG_ERRSTS_OFFSET); + return true; +} + +static inline unsigned short +DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base, + union myrb_cmd_mbox *mbox) +{ + unsigned short status; + int timeout = 0; + + while (timeout < MYRB_MAILBOX_TIMEOUT) { + if (!DAC960_PG_hw_mbox_is_full(base)) + break; + udelay(10); + timeout++; + } + if (DAC960_PG_hw_mbox_is_full(base)) { + dev_err(&pdev->dev, + "Timeout waiting for empty mailbox\n"); + return MYRB_STATUS_SUBSYS_TIMEOUT; + } + DAC960_PG_write_hw_mbox(base, mbox); + DAC960_PG_hw_mbox_new_cmd(base); + + timeout = 0; + while (timeout < MYRB_MAILBOX_TIMEOUT) { + if (DAC960_PG_hw_mbox_status_available(base)) + break; + udelay(10); + timeout++; + } + if (!DAC960_PG_hw_mbox_status_available(base)) { + dev_err(&pdev->dev, + "Timeout waiting for mailbox status\n"); + return MYRB_STATUS_SUBSYS_TIMEOUT; + } + status = DAC960_PG_read_status(base); + DAC960_PG_ack_hw_mbox_intr(base); + DAC960_PG_ack_hw_mbox_status(base); + + return status; +} + +static int DAC960_PG_hw_init(struct pci_dev *pdev, + struct myrb_hba *cb, void __iomem *base) +{ + int timeout = 0; + unsigned char error, parm0, parm1; + + DAC960_PG_disable_intr(base); + DAC960_PG_ack_hw_mbox_status(base); + udelay(1000); + while (DAC960_PG_init_in_progress(base) && + timeout < MYRB_MAILBOX_TIMEOUT) { + if (DAC960_PG_read_error_status(base, &error, + &parm0, &parm1) && + myrb_err_status(cb, error, parm0, parm1)) + return -EIO; + udelay(10); + timeout++; + } + if (timeout == MYRB_MAILBOX_TIMEOUT) { + dev_err(&pdev->dev, + "Timeout waiting for Controller Initialisation\n"); + return -ETIMEDOUT; + } + if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) { + dev_err(&pdev->dev, + "Unable to Enable Memory Mailbox Interface\n"); + DAC960_PG_reset_ctrl(base); + return -ENODEV; + } + DAC960_PG_enable_intr(base); + cb->qcmd = myrb_qcmd; + cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox; + if (cb->dual_mode_interface) + cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd; + else + cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd; + cb->disable_intr = DAC960_PG_disable_intr; + cb->reset = DAC960_PG_reset_ctrl; + + return 0; +} + +static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg) +{ + struct myrb_hba *cb = arg; + void __iomem *base = cb->io_base; + struct myrb_stat_mbox *next_stat_mbox; + unsigned long flags; + + spin_lock_irqsave(&cb->queue_lock, flags); + DAC960_PG_ack_intr(base); + next_stat_mbox = cb->next_stat_mbox; + while (next_stat_mbox->valid) { + unsigned char id = next_stat_mbox->id; + struct scsi_cmnd *scmd = NULL; + struct myrb_cmdblk *cmd_blk = NULL; + + if (id == MYRB_DCMD_TAG) + cmd_blk = &cb->dcmd_blk; + else if (id == MYRB_MCMD_TAG) + cmd_blk = &cb->mcmd_blk; + else { + scmd = scsi_host_find_tag(cb->host, id - 3); + if (scmd) + cmd_blk = scsi_cmd_priv(scmd); + } + if (cmd_blk) + cmd_blk->status = next_stat_mbox->status; + else + dev_err(&cb->pdev->dev, + "Unhandled command completion %d\n", id); + + memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); + if (++next_stat_mbox > cb->last_stat_mbox) + next_stat_mbox = cb->first_stat_mbox; + + if (id < 3) + myrb_handle_cmdblk(cb, cmd_blk); + else + myrb_handle_scsi(cb, cmd_blk, scmd); + } + cb->next_stat_mbox = next_stat_mbox; + spin_unlock_irqrestore(&cb->queue_lock, flags); + return IRQ_HANDLED; +} + +static struct myrb_privdata DAC960_PG_privdata = { + .hw_init = DAC960_PG_hw_init, + .irq_handler = DAC960_PG_intr_handler, + .mmio_size = DAC960_PG_mmio_size, +}; + + +/* + * DAC960 PD Series Controllers + */ + +static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base) +{ + writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET); +} + +static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base) +{ + writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET); +} + +static inline void DAC960_PD_reset_ctrl(void __iomem *base) +{ + writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET); +} + +static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base) +{ + unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); + + return idb & DAC960_PD_IDB_HWMBOX_FULL; +} + +static inline bool DAC960_PD_init_in_progress(void __iomem *base) +{ + unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); + + return idb & DAC960_PD_IDB_INIT_IN_PROGRESS; +} + +static inline void DAC960_PD_ack_intr(void __iomem *base) +{ + writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET); +} + +static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base) +{ + unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET); + + return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL; +} + +static inline void DAC960_PD_enable_intr(void __iomem *base) +{ + writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET); +} + +static inline void DAC960_PD_disable_intr(void __iomem *base) +{ + writeb(0, base + DAC960_PD_IRQEN_OFFSET); +} + +static inline void DAC960_PD_write_cmd_mbox(void __iomem *base, + union myrb_cmd_mbox *mbox) +{ + writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET); + writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET); + writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET); + writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET); +} + +static inline unsigned char +DAC960_PD_read_status_cmd_ident(void __iomem *base) +{ + return readb(base + DAC960_PD_STSID_OFFSET); +} + +static inline unsigned short +DAC960_PD_read_status(void __iomem *base) +{ + return readw(base + DAC960_PD_STS_OFFSET); +} + +static inline bool +DAC960_PD_read_error_status(void __iomem *base, unsigned char *error, + unsigned char *param0, unsigned char *param1) +{ + unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET); + + if (!(errsts & DAC960_PD_ERRSTS_PENDING)) + return false; + errsts &= ~DAC960_PD_ERRSTS_PENDING; + *error = errsts; + *param0 = readb(base + DAC960_PD_CMDOP_OFFSET); + *param1 = readb(base + DAC960_PD_CMDID_OFFSET); + writeb(0, base + DAC960_PD_ERRSTS_OFFSET); + return true; +} + +static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) +{ + void __iomem *base = cb->io_base; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + + while (DAC960_PD_hw_mbox_is_full(base)) + udelay(1); + DAC960_PD_write_cmd_mbox(base, mbox); + DAC960_PD_hw_mbox_new_cmd(base); +} + +static int DAC960_PD_hw_init(struct pci_dev *pdev, + struct myrb_hba *cb, void __iomem *base) +{ + int timeout = 0; + unsigned char error, parm0, parm1; + + if (!request_region(cb->io_addr, 0x80, "myrb")) { + dev_err(&pdev->dev, "IO port 0x%lx busy\n", + (unsigned long)cb->io_addr); + return -EBUSY; + } + DAC960_PD_disable_intr(base); + DAC960_PD_ack_hw_mbox_status(base); + udelay(1000); + while (DAC960_PD_init_in_progress(base) && + timeout < MYRB_MAILBOX_TIMEOUT) { + if (DAC960_PD_read_error_status(base, &error, + &parm0, &parm1) && + myrb_err_status(cb, error, parm0, parm1)) + return -EIO; + udelay(10); + timeout++; + } + if (timeout == MYRB_MAILBOX_TIMEOUT) { + dev_err(&pdev->dev, + "Timeout waiting for Controller Initialisation\n"); + return -ETIMEDOUT; + } + if (!myrb_enable_mmio(cb, NULL)) { + dev_err(&pdev->dev, + "Unable to Enable Memory Mailbox Interface\n"); + DAC960_PD_reset_ctrl(base); + return -ENODEV; + } + DAC960_PD_enable_intr(base); + cb->qcmd = DAC960_PD_qcmd; + cb->disable_intr = DAC960_PD_disable_intr; + cb->reset = DAC960_PD_reset_ctrl; + + return 0; +} + +static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg) +{ + struct myrb_hba *cb = arg; + void __iomem *base = cb->io_base; + unsigned long flags; + + spin_lock_irqsave(&cb->queue_lock, flags); + while (DAC960_PD_hw_mbox_status_available(base)) { + unsigned char id = DAC960_PD_read_status_cmd_ident(base); + struct scsi_cmnd *scmd = NULL; + struct myrb_cmdblk *cmd_blk = NULL; + + if (id == MYRB_DCMD_TAG) + cmd_blk = &cb->dcmd_blk; + else if (id == MYRB_MCMD_TAG) + cmd_blk = &cb->mcmd_blk; + else { + scmd = scsi_host_find_tag(cb->host, id - 3); + if (scmd) + cmd_blk = scsi_cmd_priv(scmd); + } + if (cmd_blk) + cmd_blk->status = DAC960_PD_read_status(base); + else + dev_err(&cb->pdev->dev, + "Unhandled command completion %d\n", id); + + DAC960_PD_ack_intr(base); + DAC960_PD_ack_hw_mbox_status(base); + + if (id < 3) + myrb_handle_cmdblk(cb, cmd_blk); + else + myrb_handle_scsi(cb, cmd_blk, scmd); + } + spin_unlock_irqrestore(&cb->queue_lock, flags); + return IRQ_HANDLED; +} + +static struct myrb_privdata DAC960_PD_privdata = { + .hw_init = DAC960_PD_hw_init, + .irq_handler = DAC960_PD_intr_handler, + .mmio_size = DAC960_PD_mmio_size, +}; + + +/* + * DAC960 P Series Controllers + * + * Similar to the DAC960 PD Series Controllers, but some commands have + * to be translated. + */ + +static inline void myrb_translate_enquiry(void *enq) +{ + memcpy(enq + 132, enq + 36, 64); + memset(enq + 36, 0, 96); +} + +static inline void myrb_translate_devstate(void *state) +{ + memcpy(state + 2, state + 3, 1); + memmove(state + 4, state + 5, 2); + memmove(state + 6, state + 8, 4); +} + +static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk) +{ + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + int ldev_num = mbox->type5.ld.ldev_num; + + mbox->bytes[3] &= 0x7; + mbox->bytes[3] |= mbox->bytes[7] << 6; + mbox->bytes[7] = ldev_num; +} + +static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk) +{ + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + int ldev_num = mbox->bytes[7]; + + mbox->bytes[7] = mbox->bytes[3] >> 6; + mbox->bytes[3] &= 0x7; + mbox->bytes[3] |= ldev_num << 3; +} + +static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) +{ + void __iomem *base = cb->io_base; + union myrb_cmd_mbox *mbox = &cmd_blk->mbox; + + switch (mbox->common.opcode) { + case MYRB_CMD_ENQUIRY: + mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD; + break; + case MYRB_CMD_GET_DEVICE_STATE: + mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD; + break; + case MYRB_CMD_READ: + mbox->common.opcode = MYRB_CMD_READ_OLD; + myrb_translate_to_rw_command(cmd_blk); + break; + case MYRB_CMD_WRITE: + mbox->common.opcode = MYRB_CMD_WRITE_OLD; + myrb_translate_to_rw_command(cmd_blk); + break; + case MYRB_CMD_READ_SG: + mbox->common.opcode = MYRB_CMD_READ_SG_OLD; + myrb_translate_to_rw_command(cmd_blk); + break; + case MYRB_CMD_WRITE_SG: + mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD; + myrb_translate_to_rw_command(cmd_blk); + break; + default: + break; + } + while (DAC960_PD_hw_mbox_is_full(base)) + udelay(1); + DAC960_PD_write_cmd_mbox(base, mbox); + DAC960_PD_hw_mbox_new_cmd(base); +} + + +static int DAC960_P_hw_init(struct pci_dev *pdev, + struct myrb_hba *cb, void __iomem *base) +{ + int timeout = 0; + unsigned char error, parm0, parm1; + + if (!request_region(cb->io_addr, 0x80, "myrb")) { + dev_err(&pdev->dev, "IO port 0x%lx busy\n", + (unsigned long)cb->io_addr); + return -EBUSY; + } + DAC960_PD_disable_intr(base); + DAC960_PD_ack_hw_mbox_status(base); + udelay(1000); + while (DAC960_PD_init_in_progress(base) && + timeout < MYRB_MAILBOX_TIMEOUT) { + if (DAC960_PD_read_error_status(base, &error, + &parm0, &parm1) && + myrb_err_status(cb, error, parm0, parm1)) + return -EAGAIN; + udelay(10); + timeout++; + } + if (timeout == MYRB_MAILBOX_TIMEOUT) { + dev_err(&pdev->dev, + "Timeout waiting for Controller Initialisation\n"); + return -ETIMEDOUT; + } + if (!myrb_enable_mmio(cb, NULL)) { + dev_err(&pdev->dev, + "Unable to allocate DMA mapped memory\n"); + DAC960_PD_reset_ctrl(base); + return -ETIMEDOUT; + } + DAC960_PD_enable_intr(base); + cb->qcmd = DAC960_P_qcmd; + cb->disable_intr = DAC960_PD_disable_intr; + cb->reset = DAC960_PD_reset_ctrl; + + return 0; +} + +static irqreturn_t DAC960_P_intr_handler(int irq, void *arg) +{ + struct myrb_hba *cb = arg; + void __iomem *base = cb->io_base; + unsigned long flags; + + spin_lock_irqsave(&cb->queue_lock, flags); + while (DAC960_PD_hw_mbox_status_available(base)) { + unsigned char id = DAC960_PD_read_status_cmd_ident(base); + struct scsi_cmnd *scmd = NULL; + struct myrb_cmdblk *cmd_blk = NULL; + union myrb_cmd_mbox *mbox; + enum myrb_cmd_opcode op; + + + if (id == MYRB_DCMD_TAG) + cmd_blk = &cb->dcmd_blk; + else if (id == MYRB_MCMD_TAG) + cmd_blk = &cb->mcmd_blk; + else { + scmd = scsi_host_find_tag(cb->host, id - 3); + if (scmd) + cmd_blk = scsi_cmd_priv(scmd); + } + if (cmd_blk) + cmd_blk->status = DAC960_PD_read_status(base); + else + dev_err(&cb->pdev->dev, + "Unhandled command completion %d\n", id); + + DAC960_PD_ack_intr(base); + DAC960_PD_ack_hw_mbox_status(base); + + if (!cmd_blk) + continue; + + mbox = &cmd_blk->mbox; + op = mbox->common.opcode; + switch (op) { + case MYRB_CMD_ENQUIRY_OLD: + mbox->common.opcode = MYRB_CMD_ENQUIRY; + myrb_translate_enquiry(cb->enquiry); + break; + case MYRB_CMD_READ_OLD: + mbox->common.opcode = MYRB_CMD_READ; + myrb_translate_from_rw_command(cmd_blk); + break; + case MYRB_CMD_WRITE_OLD: + mbox->common.opcode = MYRB_CMD_WRITE; + myrb_translate_from_rw_command(cmd_blk); + break; + case MYRB_CMD_READ_SG_OLD: + mbox->common.opcode = MYRB_CMD_READ_SG; + myrb_translate_from_rw_command(cmd_blk); + break; + case MYRB_CMD_WRITE_SG_OLD: + mbox->common.opcode = MYRB_CMD_WRITE_SG; + myrb_translate_from_rw_command(cmd_blk); + break; + default: + break; + } + if (id < 3) + myrb_handle_cmdblk(cb, cmd_blk); + else + myrb_handle_scsi(cb, cmd_blk, scmd); + } + spin_unlock_irqrestore(&cb->queue_lock, flags); + return IRQ_HANDLED; +} + +static struct myrb_privdata DAC960_P_privdata = { + .hw_init = DAC960_P_hw_init, + .irq_handler = DAC960_P_intr_handler, + .mmio_size = DAC960_PD_mmio_size, +}; + +static struct myrb_hba *myrb_detect(struct pci_dev *pdev, + const struct pci_device_id *entry) +{ + struct myrb_privdata *privdata = + (struct myrb_privdata *)entry->driver_data; + irq_handler_t irq_handler = privdata->irq_handler; + unsigned int mmio_size = privdata->mmio_size; + struct Scsi_Host *shost; + struct myrb_hba *cb = NULL; + + shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba)); + if (!shost) { + dev_err(&pdev->dev, "Unable to allocate Controller\n"); + return NULL; + } + shost->max_cmd_len = 12; + shost->max_lun = 256; + cb = shost_priv(shost); + mutex_init(&cb->dcmd_mutex); + mutex_init(&cb->dma_mutex); + cb->pdev = pdev; + cb->host = shost; + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + scsi_host_put(shost); + return NULL; + } + + if (privdata->hw_init == DAC960_PD_hw_init || + privdata->hw_init == DAC960_P_hw_init) { + cb->io_addr = pci_resource_start(pdev, 0); + cb->pci_addr = pci_resource_start(pdev, 1); + } else + cb->pci_addr = pci_resource_start(pdev, 0); + + pci_set_drvdata(pdev, cb); + spin_lock_init(&cb->queue_lock); + if (mmio_size < PAGE_SIZE) + mmio_size = PAGE_SIZE; + cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size); + if (cb->mmio_base == NULL) { + dev_err(&pdev->dev, + "Unable to map Controller Register Window\n"); + goto failure; + } + + cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK); + if (privdata->hw_init(pdev, cb, cb->io_base)) + goto failure; + + if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) { + dev_err(&pdev->dev, + "Unable to acquire IRQ Channel %d\n", pdev->irq); + goto failure; + } + cb->irq = pdev->irq; + return cb; + +failure: + dev_err(&pdev->dev, + "Failed to initialize Controller\n"); + myrb_cleanup(cb); + return NULL; +} + +static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry) +{ + struct myrb_hba *cb; + int ret; + + cb = myrb_detect(dev, entry); + if (!cb) + return -ENODEV; + + ret = myrb_get_hba_config(cb); + if (ret < 0) { + myrb_cleanup(cb); + return ret; + } + + if (!myrb_create_mempools(dev, cb)) { + ret = -ENOMEM; + goto failed; + } + + ret = scsi_add_host(cb->host, &dev->dev); + if (ret) { + dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret); + myrb_destroy_mempools(cb); + goto failed; + } + scsi_scan_host(cb->host); + return 0; +failed: + myrb_cleanup(cb); + return ret; +} + + +static void myrb_remove(struct pci_dev *pdev) +{ + struct myrb_hba *cb = pci_get_drvdata(pdev); + + shost_printk(KERN_NOTICE, cb->host, "Flushing Cache..."); + myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); + myrb_cleanup(cb); + myrb_destroy_mempools(cb); +} + + +static const struct pci_device_id myrb_id_table[] = { + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC, + PCI_DEVICE_ID_DEC_21285, + PCI_VENDOR_ID_MYLEX, + PCI_DEVICE_ID_MYLEX_DAC960_LA), + .driver_data = (unsigned long) &DAC960_LA_privdata, + }, + { + PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata), + }, + { + PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata), + }, + { + PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata), + }, + {0, }, +}; + +MODULE_DEVICE_TABLE(pci, myrb_id_table); + +static struct pci_driver myrb_pci_driver = { + .name = "myrb", + .id_table = myrb_id_table, + .probe = myrb_probe, + .remove = myrb_remove, +}; + +static int __init myrb_init_module(void) +{ + int ret; + + myrb_raid_template = raid_class_attach(&myrb_raid_functions); + if (!myrb_raid_template) + return -ENODEV; + + ret = pci_register_driver(&myrb_pci_driver); + if (ret) + raid_class_release(myrb_raid_template); + + return ret; +} + +static void __exit myrb_cleanup_module(void) +{ + pci_unregister_driver(&myrb_pci_driver); + raid_class_release(myrb_raid_template); +} + +module_init(myrb_init_module); +module_exit(myrb_cleanup_module); + +MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)"); +MODULE_AUTHOR("Hannes Reinecke "); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h new file mode 100644 index 000000000..fb8eacfce --- /dev/null +++ b/drivers/scsi/myrb.h @@ -0,0 +1,958 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers + * + * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH + * + * Based on the original DAC960 driver, + * Copyright 1998-2001 by Leonard N. Zubkoff + * Portions Copyright 2002 by Mylex (An IBM Business Unit) + * + */ + +#ifndef MYRB_H +#define MYRB_H + +#define MYRB_MAX_LDEVS 32 +#define MYRB_MAX_CHANNELS 3 +#define MYRB_MAX_TARGETS 16 +#define MYRB_MAX_PHYSICAL_DEVICES 45 +#define MYRB_SCATTER_GATHER_LIMIT 32 +#define MYRB_CMD_MBOX_COUNT 256 +#define MYRB_STAT_MBOX_COUNT 1024 + +#define MYRB_BLKSIZE_BITS 9 +#define MYRB_MAILBOX_TIMEOUT 1000000 + +#define MYRB_DCMD_TAG 1 +#define MYRB_MCMD_TAG 2 + +#define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ) +#define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ) + +/* + * DAC960 V1 Firmware Command Opcodes. + */ +enum myrb_cmd_opcode { + /* I/O Commands */ + MYRB_CMD_READ_EXTENDED = 0x33, + MYRB_CMD_WRITE_EXTENDED = 0x34, + MYRB_CMD_READAHEAD_EXTENDED = 0x35, + MYRB_CMD_READ_EXTENDED_SG = 0xB3, + MYRB_CMD_WRITE_EXTENDED_SG = 0xB4, + MYRB_CMD_READ = 0x36, + MYRB_CMD_READ_SG = 0xB6, + MYRB_CMD_WRITE = 0x37, + MYRB_CMD_WRITE_SG = 0xB7, + MYRB_CMD_DCDB = 0x04, + MYRB_CMD_DCDB_SG = 0x84, + MYRB_CMD_FLUSH = 0x0A, + /* Controller Status Related Commands */ + MYRB_CMD_ENQUIRY = 0x53, + MYRB_CMD_ENQUIRY2 = 0x1C, + MYRB_CMD_GET_LDRV_ELEMENT = 0x55, + MYRB_CMD_GET_LDEV_INFO = 0x19, + MYRB_CMD_IOPORTREAD = 0x39, + MYRB_CMD_IOPORTWRITE = 0x3A, + MYRB_CMD_GET_SD_STATS = 0x3E, + MYRB_CMD_GET_PD_STATS = 0x3F, + MYRB_CMD_EVENT_LOG_OPERATION = 0x72, + /* Device Related Commands */ + MYRB_CMD_START_DEVICE = 0x10, + MYRB_CMD_GET_DEVICE_STATE = 0x50, + MYRB_CMD_STOP_CHANNEL = 0x13, + MYRB_CMD_START_CHANNEL = 0x12, + MYRB_CMD_RESET_CHANNEL = 0x1A, + /* Commands Associated with Data Consistency and Errors */ + MYRB_CMD_REBUILD = 0x09, + MYRB_CMD_REBUILD_ASYNC = 0x16, + MYRB_CMD_CHECK_CONSISTENCY = 0x0F, + MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E, + MYRB_CMD_REBUILD_STAT = 0x0C, + MYRB_CMD_GET_REBUILD_PROGRESS = 0x27, + MYRB_CMD_REBUILD_CONTROL = 0x1F, + MYRB_CMD_READ_BADBLOCK_TABLE = 0x0B, + MYRB_CMD_READ_BADDATA_TABLE = 0x25, + MYRB_CMD_CLEAR_BADDATA_TABLE = 0x26, + MYRB_CMD_GET_ERROR_TABLE = 0x17, + MYRB_CMD_ADD_CAPACITY_ASYNC = 0x2A, + MYRB_CMD_BGI_CONTROL = 0x2B, + /* Configuration Related Commands */ + MYRB_CMD_READ_CONFIG2 = 0x3D, + MYRB_CMD_WRITE_CONFIG2 = 0x3C, + MYRB_CMD_READ_CONFIG_ONDISK = 0x4A, + MYRB_CMD_WRITE_CONFIG_ONDISK = 0x4B, + MYRB_CMD_READ_CONFIG = 0x4E, + MYRB_CMD_READ_BACKUP_CONFIG = 0x4D, + MYRB_CMD_WRITE_CONFIG = 0x4F, + MYRB_CMD_ADD_CONFIG = 0x4C, + MYRB_CMD_READ_CONFIG_LABEL = 0x48, + MYRB_CMD_WRITE_CONFIG_LABEL = 0x49, + /* Firmware Upgrade Related Commands */ + MYRB_CMD_LOAD_IMAGE = 0x20, + MYRB_CMD_STORE_IMAGE = 0x21, + MYRB_CMD_PROGRAM_IMAGE = 0x22, + /* Diagnostic Commands */ + MYRB_CMD_SET_DIAGNOSTIC_MODE = 0x31, + MYRB_CMD_RUN_DIAGNOSTIC = 0x32, + /* Subsystem Service Commands */ + MYRB_CMD_GET_SUBSYS_DATA = 0x70, + MYRB_CMD_SET_SUBSYS_PARAM = 0x71, + /* Version 2.xx Firmware Commands */ + MYRB_CMD_ENQUIRY_OLD = 0x05, + MYRB_CMD_GET_DEVICE_STATE_OLD = 0x14, + MYRB_CMD_READ_OLD = 0x02, + MYRB_CMD_WRITE_OLD = 0x03, + MYRB_CMD_READ_SG_OLD = 0x82, + MYRB_CMD_WRITE_SG_OLD = 0x83 +} __packed; + +/* + * DAC960 V1 Firmware Command Status Codes. + */ +#define MYRB_STATUS_SUCCESS 0x0000 /* Common */ +#define MYRB_STATUS_CHECK_CONDITION 0x0002 /* Common */ +#define MYRB_STATUS_NO_DEVICE 0x0102 /* Common */ +#define MYRB_STATUS_INVALID_ADDRESS 0x0105 /* Common */ +#define MYRB_STATUS_INVALID_PARAM 0x0105 /* Common */ +#define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR 0x0001 /* I/O */ +#define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002 /* I/O */ +#define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV 0x0105 /* I/O */ +#define MYRB_STATUS_BAD_DATA 0x010C /* I/O */ +#define MYRB_STATUS_DEVICE_BUSY 0x0008 /* DCDB */ +#define MYRB_STATUS_DEVICE_NONRESPONSIVE 0x000E /* DCDB */ +#define MYRB_STATUS_COMMAND_TERMINATED 0x000F /* DCDB */ +#define MYRB_STATUS_START_DEVICE_FAILED 0x0002 /* Device */ +#define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET 0x0105 /* Device */ +#define MYRB_STATUS_CHANNEL_BUSY 0x0106 /* Device */ +#define MYRB_STATUS_OUT_OF_MEMORY 0x0107 /* Device */ +#define MYRB_STATUS_CHANNEL_NOT_STOPPED 0x0002 /* Device */ +#define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002 /* Consistency */ +#define MYRB_STATUS_RBLD_BADBLOCKS 0x0003 /* Consistency */ +#define MYRB_STATUS_RBLD_NEW_DISK_FAILED 0x0004 /* Consistency */ +#define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS 0x0106 /* Consistency */ +#define MYRB_STATUS_DEPENDENT_DISK_DEAD 0x0002 /* Consistency */ +#define MYRB_STATUS_INCONSISTENT_BLOCKS 0x0003 /* Consistency */ +#define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */ +#define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS 0x0105 /* Consistency */ +#define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID 0x0000 /* Consistency */ +#define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE 0x0002 /* Consistency */ +#define MYRB_STATUS_RBLD_FAILED_BADBLOCKS 0x0003 /* Consistency */ +#define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004 /* Consistency */ +#define MYRB_STATUS_RBLD_SUCCESS 0x0100 /* Consistency */ +#define MYRB_STATUS_RBLD_SUCCESS_TERMINATED 0x0107 /* Consistency */ +#define MYRB_STATUS_RBLD_NOT_CHECKED 0x0108 /* Consistency */ +#define MYRB_STATUS_BGI_SUCCESS 0x0100 /* Consistency */ +#define MYRB_STATUS_BGI_ABORTED 0x0005 /* Consistency */ +#define MYRB_STATUS_NO_BGI_INPROGRESS 0x0105 /* Consistency */ +#define MYRB_STATUS_ADD_CAPACITY_INPROGRESS 0x0004 /* Consistency */ +#define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */ +#define MYRB_STATUS_CONFIG2_CSUM_ERROR 0x0002 /* Configuration */ +#define MYRB_STATUS_CONFIGURATION_SUSPENDED 0x0106 /* Configuration */ +#define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM 0x0105 /* Configuration */ +#define MYRB_STATUS_CONFIGURATION_NOT_SAVED 0x0106 /* Configuration */ +#define MYRB_STATUS_SUBSYS_NOTINSTALLED 0x0001 /* Subsystem */ +#define MYRB_STATUS_SUBSYS_FAILED 0x0002 /* Subsystem */ +#define MYRB_STATUS_SUBSYS_BUSY 0x0106 /* Subsystem */ +#define MYRB_STATUS_SUBSYS_TIMEOUT 0x0108 /* Subsystem */ + +/* + * DAC960 V1 Firmware Enquiry Command reply structure. + */ +struct myrb_enquiry { + unsigned char ldev_count; /* Byte 0 */ + unsigned int rsvd1:24; /* Bytes 1-3 */ + unsigned int ldev_sizes[32]; /* Bytes 4-131 */ + unsigned short flash_age; /* Bytes 132-133 */ + struct { + unsigned char deferred:1; /* Byte 134 Bit 0 */ + unsigned char low_bat:1; /* Byte 134 Bit 1 */ + unsigned char rsvd2:6; /* Byte 134 Bits 2-7 */ + } status; + unsigned char rsvd3:8; /* Byte 135 */ + unsigned char fw_minor_version; /* Byte 136 */ + unsigned char fw_major_version; /* Byte 137 */ + enum { + MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS = 0x00, + MYRB_STDBY_RBLD_IN_PROGRESS = 0x01, + MYRB_BG_RBLD_IN_PROGRESS = 0x02, + MYRB_BG_CHECK_IN_PROGRESS = 0x03, + MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR = 0xFF, + MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED = 0xF0, + MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED = 0xF1, + MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER = 0xF2, + MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED = 0xF3 + } __packed rbld; /* Byte 138 */ + unsigned char max_tcq; /* Byte 139 */ + unsigned char ldev_offline; /* Byte 140 */ + unsigned char rsvd4:8; /* Byte 141 */ + unsigned short ev_seq; /* Bytes 142-143 */ + unsigned char ldev_critical; /* Byte 144 */ + unsigned int rsvd5:24; /* Bytes 145-147 */ + unsigned char pdev_dead; /* Byte 148 */ + unsigned char rsvd6:8; /* Byte 149 */ + unsigned char rbld_count; /* Byte 150 */ + struct { + unsigned char rsvd7:3; /* Byte 151 Bits 0-2 */ + unsigned char bbu_present:1; /* Byte 151 Bit 3 */ + unsigned char rsvd8:4; /* Byte 151 Bits 4-7 */ + } misc; + struct { + unsigned char target; + unsigned char channel; + } dead_drives[21]; /* Bytes 152-194 */ + unsigned char rsvd9[62]; /* Bytes 195-255 */ +} __packed; + +/* + * DAC960 V1 Firmware Enquiry2 Command reply structure. + */ +struct myrb_enquiry2 { + struct { + enum { + DAC960_V1_P_PD_PU = 0x01, + DAC960_V1_PL = 0x02, + DAC960_V1_PG = 0x10, + DAC960_V1_PJ = 0x11, + DAC960_V1_PR = 0x12, + DAC960_V1_PT = 0x13, + DAC960_V1_PTL0 = 0x14, + DAC960_V1_PRL = 0x15, + DAC960_V1_PTL1 = 0x16, + DAC960_V1_1164P = 0x20 + } __packed sub_model; /* Byte 0 */ + unsigned char actual_channels; /* Byte 1 */ + enum { + MYRB_5_CHANNEL_BOARD = 0x01, + MYRB_3_CHANNEL_BOARD = 0x02, + MYRB_2_CHANNEL_BOARD = 0x03, + MYRB_3_CHANNEL_ASIC_DAC = 0x04 + } __packed model; /* Byte 2 */ + enum { + MYRB_EISA_CONTROLLER = 0x01, + MYRB_MCA_CONTROLLER = 0x02, + MYRB_PCI_CONTROLLER = 0x03, + MYRB_SCSI_TO_SCSI = 0x08 + } __packed controller; /* Byte 3 */ + } hw; /* Bytes 0-3 */ + /* MajorVersion.MinorVersion-FirmwareType-TurnID */ + struct { + unsigned char major_version; /* Byte 4 */ + unsigned char minor_version; /* Byte 5 */ + unsigned char turn_id; /* Byte 6 */ + char firmware_type; /* Byte 7 */ + } fw; /* Bytes 4-7 */ + unsigned int rsvd1; /* Byte 8-11 */ + unsigned char cfg_chan; /* Byte 12 */ + unsigned char cur_chan; /* Byte 13 */ + unsigned char max_targets; /* Byte 14 */ + unsigned char max_tcq; /* Byte 15 */ + unsigned char max_ldev; /* Byte 16 */ + unsigned char max_arms; /* Byte 17 */ + unsigned char max_spans; /* Byte 18 */ + unsigned char rsvd2; /* Byte 19 */ + unsigned int rsvd3; /* Bytes 20-23 */ + unsigned int mem_size; /* Bytes 24-27 */ + unsigned int cache_size; /* Bytes 28-31 */ + unsigned int flash_size; /* Bytes 32-35 */ + unsigned int nvram_size; /* Bytes 36-39 */ + struct { + enum { + MYRB_RAM_TYPE_DRAM = 0x0, + MYRB_RAM_TYPE_EDO = 0x1, + MYRB_RAM_TYPE_SDRAM = 0x2, + MYRB_RAM_TYPE_Last = 0x7 + } __packed ram:3; /* Byte 40 Bits 0-2 */ + enum { + MYRB_ERR_CORR_None = 0x0, + MYRB_ERR_CORR_Parity = 0x1, + MYRB_ERR_CORR_ECC = 0x2, + MYRB_ERR_CORR_Last = 0x7 + } __packed ec:3; /* Byte 40 Bits 3-5 */ + unsigned char fast_page:1; /* Byte 40 Bit 6 */ + unsigned char low_power:1; /* Byte 40 Bit 7 */ + unsigned char rsvd4; /* Bytes 41 */ + } mem_type; + unsigned short clock_speed; /* Bytes 42-43 */ + unsigned short mem_speed; /* Bytes 44-45 */ + unsigned short hw_speed; /* Bytes 46-47 */ + unsigned char rsvd5[12]; /* Bytes 48-59 */ + unsigned short max_cmds; /* Bytes 60-61 */ + unsigned short max_sge; /* Bytes 62-63 */ + unsigned short max_drv_cmds; /* Bytes 64-65 */ + unsigned short max_io_desc; /* Bytes 66-67 */ + unsigned short max_sectors; /* Bytes 68-69 */ + unsigned char latency; /* Byte 70 */ + unsigned char rsvd6; /* Byte 71 */ + unsigned char scsi_tmo; /* Byte 72 */ + unsigned char rsvd7; /* Byte 73 */ + unsigned short min_freelines; /* Bytes 74-75 */ + unsigned char rsvd8[8]; /* Bytes 76-83 */ + unsigned char rbld_rate_const; /* Byte 84 */ + unsigned char rsvd9[11]; /* Byte 85-95 */ + unsigned short pdrv_block_size; /* Bytes 96-97 */ + unsigned short ldev_block_size; /* Bytes 98-99 */ + unsigned short max_blocks_per_cmd; /* Bytes 100-101 */ + unsigned short block_factor; /* Bytes 102-103 */ + unsigned short cacheline_size; /* Bytes 104-105 */ + struct { + enum { + MYRB_WIDTH_NARROW_8BIT = 0x0, + MYRB_WIDTH_WIDE_16BIT = 0x1, + MYRB_WIDTH_WIDE_32BIT = 0x2 + } __packed bus_width:2; /* Byte 106 Bits 0-1 */ + enum { + MYRB_SCSI_SPEED_FAST = 0x0, + MYRB_SCSI_SPEED_ULTRA = 0x1, + MYRB_SCSI_SPEED_ULTRA2 = 0x2 + } __packed bus_speed:2; /* Byte 106 Bits 2-3 */ + unsigned char differential:1; /* Byte 106 Bit 4 */ + unsigned char rsvd10:3; /* Byte 106 Bits 5-7 */ + } scsi_cap; + unsigned char rsvd11[5]; /* Byte 107-111 */ + unsigned short fw_build; /* Bytes 112-113 */ + enum { + MYRB_FAULT_AEMI = 0x01, + MYRB_FAULT_OEM1 = 0x02, + MYRB_FAULT_OEM2 = 0x04, + MYRB_FAULT_OEM3 = 0x08, + MYRB_FAULT_CONNER = 0x10, + MYRB_FAULT_SAFTE = 0x20 + } __packed fault_mgmt; /* Byte 114 */ + unsigned char rsvd12; /* Byte 115 */ + struct { + unsigned int clustering:1; /* Byte 116 Bit 0 */ + unsigned int online_RAID_expansion:1; /* Byte 116 Bit 1 */ + unsigned int readahead:1; /* Byte 116 Bit 2 */ + unsigned int bgi:1; /* Byte 116 Bit 3 */ + unsigned int rsvd13:28; /* Bytes 116-119 */ + } fw_features; + unsigned char rsvd14[8]; /* Bytes 120-127 */ +} __packed; + +/* + * DAC960 V1 Firmware Logical Drive State type. + */ +enum myrb_devstate { + MYRB_DEVICE_DEAD = 0x00, + MYRB_DEVICE_WO = 0x02, + MYRB_DEVICE_ONLINE = 0x03, + MYRB_DEVICE_CRITICAL = 0x04, + MYRB_DEVICE_STANDBY = 0x10, + MYRB_DEVICE_OFFLINE = 0xFF +} __packed; + +/* + * DAC960 V1 RAID Levels + */ +enum myrb_raidlevel { + MYRB_RAID_LEVEL0 = 0x0, /* RAID 0 */ + MYRB_RAID_LEVEL1 = 0x1, /* RAID 1 */ + MYRB_RAID_LEVEL3 = 0x3, /* RAID 3 */ + MYRB_RAID_LEVEL5 = 0x5, /* RAID 5 */ + MYRB_RAID_LEVEL6 = 0x6, /* RAID 6 */ + MYRB_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */ +} __packed; + +/* + * DAC960 V1 Firmware Logical Drive Information structure. + */ +struct myrb_ldev_info { + unsigned int size; /* Bytes 0-3 */ + enum myrb_devstate state; /* Byte 4 */ + unsigned int raid_level:7; /* Byte 5 Bits 0-6 */ + unsigned int wb_enabled:1; /* Byte 5 Bit 7 */ + unsigned int rsvd:16; /* Bytes 6-7 */ +}; + +/* + * DAC960 V1 Firmware Perform Event Log Operation Types. + */ +#define DAC960_V1_GetEventLogEntry 0x00 + +/* + * DAC960 V1 Firmware Get Event Log Entry Command reply structure. + */ +struct myrb_log_entry { + unsigned char msg_type; /* Byte 0 */ + unsigned char msg_len; /* Byte 1 */ + unsigned char target:5; /* Byte 2 Bits 0-4 */ + unsigned char channel:3; /* Byte 2 Bits 5-7 */ + unsigned char lun:6; /* Byte 3 Bits 0-5 */ + unsigned char rsvd1:2; /* Byte 3 Bits 6-7 */ + unsigned short seq_num; /* Bytes 4-5 */ + unsigned char sense[26]; /* Bytes 6-31 */ +}; + +/* + * DAC960 V1 Firmware Get Device State Command reply structure. + * The structure is padded by 2 bytes for compatibility with Version 2.xx + * Firmware. + */ +struct myrb_pdev_state { + unsigned int present:1; /* Byte 0 Bit 0 */ + unsigned int :7; /* Byte 0 Bits 1-7 */ + enum { + MYRB_TYPE_OTHER = 0x0, + MYRB_TYPE_DISK = 0x1, + MYRB_TYPE_TAPE = 0x2, + MYRB_TYPE_CDROM_OR_WORM = 0x3 + } __packed devtype:2; /* Byte 1 Bits 0-1 */ + unsigned int rsvd1:1; /* Byte 1 Bit 2 */ + unsigned int fast20:1; /* Byte 1 Bit 3 */ + unsigned int sync:1; /* Byte 1 Bit 4 */ + unsigned int fast:1; /* Byte 1 Bit 5 */ + unsigned int wide:1; /* Byte 1 Bit 6 */ + unsigned int tcq_supported:1; /* Byte 1 Bit 7 */ + enum myrb_devstate state; /* Byte 2 */ + unsigned int rsvd2:8; /* Byte 3 */ + unsigned int sync_multiplier; /* Byte 4 */ + unsigned int sync_offset:5; /* Byte 5 Bits 0-4 */ + unsigned int rsvd3:3; /* Byte 5 Bits 5-7 */ + unsigned int size; /* Bytes 6-9 */ + unsigned int rsvd4:16; /* Bytes 10-11 */ +} __packed; + +/* + * DAC960 V1 Firmware Get Rebuild Progress Command reply structure. + */ +struct myrb_rbld_progress { + unsigned int ldev_num; /* Bytes 0-3 */ + unsigned int ldev_size; /* Bytes 4-7 */ + unsigned int blocks_left; /* Bytes 8-11 */ +}; + +/* + * DAC960 V1 Firmware Background Initialization Status Command reply structure. + */ +struct myrb_bgi_status { + unsigned int ldev_size; /* Bytes 0-3 */ + unsigned int blocks_done; /* Bytes 4-7 */ + unsigned char rsvd1[12]; /* Bytes 8-19 */ + unsigned int ldev_num; /* Bytes 20-23 */ + unsigned char raid_level; /* Byte 24 */ + enum { + MYRB_BGI_INVALID = 0x00, + MYRB_BGI_STARTED = 0x02, + MYRB_BGI_INPROGRESS = 0x04, + MYRB_BGI_SUSPENDED = 0x05, + MYRB_BGI_CANCELLED = 0x06 + } __packed status; /* Byte 25 */ + unsigned char rsvd2[6]; /* Bytes 26-31 */ +}; + +/* + * DAC960 V1 Firmware Error Table Entry structure. + */ +struct myrb_error_entry { + unsigned char parity_err; /* Byte 0 */ + unsigned char soft_err; /* Byte 1 */ + unsigned char hard_err; /* Byte 2 */ + unsigned char misc_err; /* Byte 3 */ +}; + +/* + * DAC960 V1 Firmware Read Config2 Command reply structure. + */ +struct myrb_config2 { + unsigned rsvd1:1; /* Byte 0 Bit 0 */ + unsigned active_negation:1; /* Byte 0 Bit 1 */ + unsigned rsvd2:5; /* Byte 0 Bits 2-6 */ + unsigned no_rescan_on_reset_during_scan:1; /* Byte 0 Bit 7 */ + unsigned StorageWorks_support:1; /* Byte 1 Bit 0 */ + unsigned HewlettPackard_support:1; /* Byte 1 Bit 1 */ + unsigned no_disconnect_on_first_command:1; /* Byte 1 Bit 2 */ + unsigned rsvd3:2; /* Byte 1 Bits 3-4 */ + unsigned AEMI_ARM:1; /* Byte 1 Bit 5 */ + unsigned AEMI_OFM:1; /* Byte 1 Bit 6 */ + unsigned rsvd4:1; /* Byte 1 Bit 7 */ + enum { + MYRB_OEMID_MYLEX = 0x00, + MYRB_OEMID_IBM = 0x08, + MYRB_OEMID_HP = 0x0A, + MYRB_OEMID_DEC = 0x0C, + MYRB_OEMID_SIEMENS = 0x10, + MYRB_OEMID_INTEL = 0x12 + } __packed OEMID; /* Byte 2 */ + unsigned char oem_model_number; /* Byte 3 */ + unsigned char physical_sector; /* Byte 4 */ + unsigned char logical_sector; /* Byte 5 */ + unsigned char block_factor; /* Byte 6 */ + unsigned readahead_enabled:1; /* Byte 7 Bit 0 */ + unsigned low_BIOS_delay:1; /* Byte 7 Bit 1 */ + unsigned rsvd5:2; /* Byte 7 Bits 2-3 */ + unsigned restrict_reassign_to_one_sector:1; /* Byte 7 Bit 4 */ + unsigned rsvd6:1; /* Byte 7 Bit 5 */ + unsigned FUA_during_write_recovery:1; /* Byte 7 Bit 6 */ + unsigned enable_LeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */ + unsigned char default_rebuild_rate; /* Byte 8 */ + unsigned char rsvd7; /* Byte 9 */ + unsigned char blocks_per_cacheline; /* Byte 10 */ + unsigned char blocks_per_stripe; /* Byte 11 */ + struct { + enum { + MYRB_SPEED_ASYNC = 0x0, + MYRB_SPEED_SYNC_8MHz = 0x1, + MYRB_SPEED_SYNC_5MHz = 0x2, + MYRB_SPEED_SYNC_10_OR_20MHz = 0x3 + } __packed speed:2; /* Byte 11 Bits 0-1 */ + unsigned force_8bit:1; /* Byte 11 Bit 2 */ + unsigned disable_fast20:1; /* Byte 11 Bit 3 */ + unsigned rsvd8:3; /* Byte 11 Bits 4-6 */ + unsigned enable_tcq:1; /* Byte 11 Bit 7 */ + } __packed channelparam[6]; /* Bytes 12-17 */ + unsigned char SCSIInitiatorID; /* Byte 18 */ + unsigned char rsvd9; /* Byte 19 */ + enum { + MYRB_STARTUP_CONTROLLER_SPINUP = 0x00, + MYRB_STARTUP_POWERON_SPINUP = 0x01 + } __packed startup; /* Byte 20 */ + unsigned char simultaneous_device_spinup_count; /* Byte 21 */ + unsigned char seconds_delay_between_spinups; /* Byte 22 */ + unsigned char rsvd10[29]; /* Bytes 23-51 */ + unsigned BIOS_disabled:1; /* Byte 52 Bit 0 */ + unsigned CDROM_boot_enabled:1; /* Byte 52 Bit 1 */ + unsigned rsvd11:3; /* Byte 52 Bits 2-4 */ + enum { + MYRB_GEOM_128_32 = 0x0, + MYRB_GEOM_255_63 = 0x1, + MYRB_GEOM_RESERVED1 = 0x2, + MYRB_GEOM_RESERVED2 = 0x3 + } __packed drive_geometry:2; /* Byte 52 Bits 5-6 */ + unsigned rsvd12:1; /* Byte 52 Bit 7 */ + unsigned char rsvd13[9]; /* Bytes 53-61 */ + unsigned short csum; /* Bytes 62-63 */ +}; + +/* + * DAC960 V1 Firmware DCDB request structure. + */ +struct myrb_dcdb { + unsigned target:4; /* Byte 0 Bits 0-3 */ + unsigned channel:4; /* Byte 0 Bits 4-7 */ + enum { + MYRB_DCDB_XFER_NONE = 0, + MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1, + MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2, + MYRB_DCDB_XFER_ILLEGAL = 3 + } __packed data_xfer:2; /* Byte 1 Bits 0-1 */ + unsigned early_status:1; /* Byte 1 Bit 2 */ + unsigned rsvd1:1; /* Byte 1 Bit 3 */ + enum { + MYRB_DCDB_TMO_24_HRS = 0, + MYRB_DCDB_TMO_10_SECS = 1, + MYRB_DCDB_TMO_60_SECS = 2, + MYRB_DCDB_TMO_10_MINS = 3 + } __packed timeout:2; /* Byte 1 Bits 4-5 */ + unsigned no_autosense:1; /* Byte 1 Bit 6 */ + unsigned allow_disconnect:1; /* Byte 1 Bit 7 */ + unsigned short xfer_len_lo; /* Bytes 2-3 */ + u32 dma_addr; /* Bytes 4-7 */ + unsigned char cdb_len:4; /* Byte 8 Bits 0-3 */ + unsigned char xfer_len_hi4:4; /* Byte 8 Bits 4-7 */ + unsigned char sense_len; /* Byte 9 */ + unsigned char cdb[12]; /* Bytes 10-21 */ + unsigned char sense[64]; /* Bytes 22-85 */ + unsigned char status; /* Byte 86 */ + unsigned char rsvd2; /* Byte 87 */ +}; + +/* + * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address + *32 Bit Byte Count structure. + */ +struct myrb_sge { + u32 sge_addr; /* Bytes 0-3 */ + u32 sge_count; /* Bytes 4-7 */ +}; + +/* + * 13 Byte DAC960 V1 Firmware Command Mailbox structure. + * Bytes 13-15 are not used. The structure is padded to 16 bytes for + * efficient access. + */ +union myrb_cmd_mbox { + unsigned int words[4]; /* Words 0-3 */ + unsigned char bytes[16]; /* Bytes 0-15 */ + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char rsvd[14]; /* Bytes 2-15 */ + } __packed common; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char rsvd1[6]; /* Bytes 2-7 */ + u32 addr; /* Bytes 8-11 */ + unsigned char rsvd2[4]; /* Bytes 12-15 */ + } __packed type3; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char optype; /* Byte 2 */ + unsigned char rsvd1[5]; /* Bytes 3-7 */ + u32 addr; /* Bytes 8-11 */ + unsigned char rsvd2[4]; /* Bytes 12-15 */ + } __packed type3B; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char rsvd1[5]; /* Bytes 2-6 */ + unsigned char ldev_num:6; /* Byte 7 Bits 0-6 */ + unsigned char auto_restore:1; /* Byte 7 Bit 7 */ + unsigned char rsvd2[8]; /* Bytes 8-15 */ + } __packed type3C; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char channel; /* Byte 2 */ + unsigned char target; /* Byte 3 */ + enum myrb_devstate state; /* Byte 4 */ + unsigned char rsvd1[3]; /* Bytes 5-7 */ + u32 addr; /* Bytes 8-11 */ + unsigned char rsvd2[4]; /* Bytes 12-15 */ + } __packed type3D; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char optype; /* Byte 2 */ + unsigned char opqual; /* Byte 3 */ + unsigned short ev_seq; /* Bytes 4-5 */ + unsigned char rsvd1[2]; /* Bytes 6-7 */ + u32 addr; /* Bytes 8-11 */ + unsigned char rsvd2[4]; /* Bytes 12-15 */ + } __packed type3E; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char rsvd1[2]; /* Bytes 2-3 */ + unsigned char rbld_rate; /* Byte 4 */ + unsigned char rsvd2[3]; /* Bytes 5-7 */ + u32 addr; /* Bytes 8-11 */ + unsigned char rsvd3[4]; /* Bytes 12-15 */ + } __packed type3R; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned short xfer_len; /* Bytes 2-3 */ + unsigned int lba; /* Bytes 4-7 */ + u32 addr; /* Bytes 8-11 */ + unsigned char ldev_num; /* Byte 12 */ + unsigned char rsvd[3]; /* Bytes 13-15 */ + } __packed type4; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + struct { + unsigned short xfer_len:11; /* Bytes 2-3 */ + unsigned char ldev_num:5; /* Byte 3 Bits 3-7 */ + } __packed ld; + unsigned int lba; /* Bytes 4-7 */ + u32 addr; /* Bytes 8-11 */ + unsigned char sg_count:6; /* Byte 12 Bits 0-5 */ + enum { + MYRB_SGL_ADDR32_COUNT32 = 0x0, + MYRB_SGL_ADDR32_COUNT16 = 0x1, + MYRB_SGL_COUNT32_ADDR32 = 0x2, + MYRB_SGL_COUNT16_ADDR32 = 0x3 + } __packed sg_type:2; /* Byte 12 Bits 6-7 */ + unsigned char rsvd[3]; /* Bytes 13-15 */ + } __packed type5; + struct { + enum myrb_cmd_opcode opcode; /* Byte 0 */ + unsigned char id; /* Byte 1 */ + unsigned char opcode2; /* Byte 2 */ + unsigned char rsvd1:8; /* Byte 3 */ + u32 cmd_mbox_addr; /* Bytes 4-7 */ + u32 stat_mbox_addr; /* Bytes 8-11 */ + unsigned char rsvd2[4]; /* Bytes 12-15 */ + } __packed typeX; +}; + +/* + * DAC960 V1 Firmware Controller Status Mailbox structure. + */ +struct myrb_stat_mbox { + unsigned char id; /* Byte 0 */ + unsigned char rsvd:7; /* Byte 1 Bits 0-6 */ + unsigned char valid:1; /* Byte 1 Bit 7 */ + unsigned short status; /* Bytes 2-3 */ +}; + +struct myrb_cmdblk { + union myrb_cmd_mbox mbox; + unsigned short status; + struct completion *completion; + struct myrb_dcdb *dcdb; + dma_addr_t dcdb_addr; + struct myrb_sge *sgl; + dma_addr_t sgl_addr; +}; + +struct myrb_hba { + unsigned int ldev_block_size; + unsigned char ldev_geom_heads; + unsigned char ldev_geom_sectors; + unsigned char bus_width; + unsigned short stripe_size; + unsigned short segment_size; + unsigned short new_ev_seq; + unsigned short old_ev_seq; + bool dual_mode_interface; + bool bgi_status_supported; + bool safte_enabled; + bool need_ldev_info; + bool need_err_info; + bool need_rbld; + bool need_cc_status; + bool need_bgi_status; + bool rbld_first; + + struct pci_dev *pdev; + struct Scsi_Host *host; + + struct workqueue_struct *work_q; + char work_q_name[20]; + struct delayed_work monitor_work; + unsigned long primary_monitor_time; + unsigned long secondary_monitor_time; + + struct dma_pool *sg_pool; + struct dma_pool *dcdb_pool; + + spinlock_t queue_lock; + + void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk); + void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox, + union myrb_cmd_mbox *cmd_mbox); + void (*get_cmd_mbox)(void __iomem *base); + void (*disable_intr)(void __iomem *base); + void (*reset)(void __iomem *base); + + unsigned int ctlr_num; + unsigned char model_name[20]; + unsigned char fw_version[12]; + + unsigned int irq; + phys_addr_t io_addr; + phys_addr_t pci_addr; + void __iomem *io_base; + void __iomem *mmio_base; + + size_t cmd_mbox_size; + dma_addr_t cmd_mbox_addr; + union myrb_cmd_mbox *first_cmd_mbox; + union myrb_cmd_mbox *last_cmd_mbox; + union myrb_cmd_mbox *next_cmd_mbox; + union myrb_cmd_mbox *prev_cmd_mbox1; + union myrb_cmd_mbox *prev_cmd_mbox2; + + size_t stat_mbox_size; + dma_addr_t stat_mbox_addr; + struct myrb_stat_mbox *first_stat_mbox; + struct myrb_stat_mbox *last_stat_mbox; + struct myrb_stat_mbox *next_stat_mbox; + + struct myrb_cmdblk dcmd_blk; + struct myrb_cmdblk mcmd_blk; + struct mutex dcmd_mutex; + + struct myrb_enquiry *enquiry; + dma_addr_t enquiry_addr; + + struct myrb_error_entry *err_table; + dma_addr_t err_table_addr; + + unsigned short last_rbld_status; + + struct myrb_ldev_info *ldev_info_buf; + dma_addr_t ldev_info_addr; + + struct myrb_bgi_status bgi_status; + + struct mutex dma_mutex; +}; + +/* + * DAC960 LA Series Controller Interface Register Offsets. + */ +#define DAC960_LA_mmio_size 0x80 + +enum DAC960_LA_reg_offset { + DAC960_LA_IRQMASK_OFFSET = 0x34, + DAC960_LA_CMDOP_OFFSET = 0x50, + DAC960_LA_CMDID_OFFSET = 0x51, + DAC960_LA_MBOX2_OFFSET = 0x52, + DAC960_LA_MBOX3_OFFSET = 0x53, + DAC960_LA_MBOX4_OFFSET = 0x54, + DAC960_LA_MBOX5_OFFSET = 0x55, + DAC960_LA_MBOX6_OFFSET = 0x56, + DAC960_LA_MBOX7_OFFSET = 0x57, + DAC960_LA_MBOX8_OFFSET = 0x58, + DAC960_LA_MBOX9_OFFSET = 0x59, + DAC960_LA_MBOX10_OFFSET = 0x5A, + DAC960_LA_MBOX11_OFFSET = 0x5B, + DAC960_LA_MBOX12_OFFSET = 0x5C, + DAC960_LA_STSID_OFFSET = 0x5D, + DAC960_LA_STS_OFFSET = 0x5E, + DAC960_LA_IDB_OFFSET = 0x60, + DAC960_LA_ODB_OFFSET = 0x61, + DAC960_LA_ERRSTS_OFFSET = 0x63, +}; + +/* + * DAC960 LA Series Inbound Door Bell Register. + */ +#define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01 +#define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02 +#define DAC960_LA_IDB_GEN_IRQ 0x04 +#define DAC960_LA_IDB_CTRL_RESET 0x08 +#define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10 + +#define DAC960_LA_IDB_HWMBOX_EMPTY 0x01 +#define DAC960_LA_IDB_INIT_DONE 0x02 + +/* + * DAC960 LA Series Outbound Door Bell Register. + */ +#define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01 +#define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02 +#define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01 +#define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02 + +/* + * DAC960 LA Series Interrupt Mask Register. + */ +#define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04 + +/* + * DAC960 LA Series Error Status Register. + */ +#define DAC960_LA_ERRSTS_PENDING 0x02 + +/* + * DAC960 PG Series Controller Interface Register Offsets. + */ +#define DAC960_PG_mmio_size 0x2000 + +enum DAC960_PG_reg_offset { + DAC960_PG_IDB_OFFSET = 0x0020, + DAC960_PG_ODB_OFFSET = 0x002C, + DAC960_PG_IRQMASK_OFFSET = 0x0034, + DAC960_PG_CMDOP_OFFSET = 0x1000, + DAC960_PG_CMDID_OFFSET = 0x1001, + DAC960_PG_MBOX2_OFFSET = 0x1002, + DAC960_PG_MBOX3_OFFSET = 0x1003, + DAC960_PG_MBOX4_OFFSET = 0x1004, + DAC960_PG_MBOX5_OFFSET = 0x1005, + DAC960_PG_MBOX6_OFFSET = 0x1006, + DAC960_PG_MBOX7_OFFSET = 0x1007, + DAC960_PG_MBOX8_OFFSET = 0x1008, + DAC960_PG_MBOX9_OFFSET = 0x1009, + DAC960_PG_MBOX10_OFFSET = 0x100A, + DAC960_PG_MBOX11_OFFSET = 0x100B, + DAC960_PG_MBOX12_OFFSET = 0x100C, + DAC960_PG_STSID_OFFSET = 0x1018, + DAC960_PG_STS_OFFSET = 0x101A, + DAC960_PG_ERRSTS_OFFSET = 0x103F, +}; + +/* + * DAC960 PG Series Inbound Door Bell Register. + */ +#define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01 +#define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02 +#define DAC960_PG_IDB_GEN_IRQ 0x04 +#define DAC960_PG_IDB_CTRL_RESET 0x08 +#define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10 + +#define DAC960_PG_IDB_HWMBOX_FULL 0x01 +#define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02 + +/* + * DAC960 PG Series Outbound Door Bell Register. + */ +#define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01 +#define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02 +#define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01 +#define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02 + +/* + * DAC960 PG Series Interrupt Mask Register. + */ +#define DAC960_PG_IRQMASK_MSI_MASK1 0x03 +#define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04 +#define DAC960_PG_IRQMASK_MSI_MASK2 0xF8 + +/* + * DAC960 PG Series Error Status Register. + */ +#define DAC960_PG_ERRSTS_PENDING 0x04 + +/* + * DAC960 PD Series Controller Interface Register Offsets. + */ +#define DAC960_PD_mmio_size 0x80 + +enum DAC960_PD_reg_offset { + DAC960_PD_CMDOP_OFFSET = 0x00, + DAC960_PD_CMDID_OFFSET = 0x01, + DAC960_PD_MBOX2_OFFSET = 0x02, + DAC960_PD_MBOX3_OFFSET = 0x03, + DAC960_PD_MBOX4_OFFSET = 0x04, + DAC960_PD_MBOX5_OFFSET = 0x05, + DAC960_PD_MBOX6_OFFSET = 0x06, + DAC960_PD_MBOX7_OFFSET = 0x07, + DAC960_PD_MBOX8_OFFSET = 0x08, + DAC960_PD_MBOX9_OFFSET = 0x09, + DAC960_PD_MBOX10_OFFSET = 0x0A, + DAC960_PD_MBOX11_OFFSET = 0x0B, + DAC960_PD_MBOX12_OFFSET = 0x0C, + DAC960_PD_STSID_OFFSET = 0x0D, + DAC960_PD_STS_OFFSET = 0x0E, + DAC960_PD_ERRSTS_OFFSET = 0x3F, + DAC960_PD_IDB_OFFSET = 0x40, + DAC960_PD_ODB_OFFSET = 0x41, + DAC960_PD_IRQEN_OFFSET = 0x43, +}; + +/* + * DAC960 PD Series Inbound Door Bell Register. + */ +#define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01 +#define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02 +#define DAC960_PD_IDB_GEN_IRQ 0x04 +#define DAC960_PD_IDB_CTRL_RESET 0x08 + +#define DAC960_PD_IDB_HWMBOX_FULL 0x01 +#define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02 + +/* + * DAC960 PD Series Outbound Door Bell Register. + */ +#define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01 +#define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01 + +/* + * DAC960 PD Series Interrupt Enable Register. + */ +#define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01 + +/* + * DAC960 PD Series Error Status Register. + */ +#define DAC960_PD_ERRSTS_PENDING 0x04 + +typedef int (*myrb_hw_init_t)(struct pci_dev *pdev, + struct myrb_hba *cb, void __iomem *base); +typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev, + void __iomem *base, + union myrb_cmd_mbox *mbox); + +struct myrb_privdata { + myrb_hw_init_t hw_init; + irq_handler_t irq_handler; + unsigned int mmio_size; +}; + +#endif /* MYRB_H */ diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c new file mode 100644 index 000000000..a1eec65a9 --- /dev/null +++ b/drivers/scsi/myrs.c @@ -0,0 +1,3168 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers + * + * This driver supports the newer, SCSI-based firmware interface only. + * + * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH + * + * Based on the original DAC960 driver, which has + * Copyright 1998-2001 by Leonard N. Zubkoff + * Portions Copyright 2002 by Mylex (An IBM Business Unit) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "myrs.h" + +static struct raid_template *myrs_raid_template; + +static struct myrs_devstate_name_entry { + enum myrs_devstate state; + char *name; +} myrs_devstate_name_list[] = { + { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" }, + { MYRS_DEVICE_ONLINE, "Online" }, + { MYRS_DEVICE_REBUILD, "Rebuild" }, + { MYRS_DEVICE_MISSING, "Missing" }, + { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" }, + { MYRS_DEVICE_OFFLINE, "Offline" }, + { MYRS_DEVICE_CRITICAL, "Critical" }, + { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" }, + { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" }, + { MYRS_DEVICE_STANDBY, "Standby" }, + { MYRS_DEVICE_INVALID_STATE, "Invalid" }, +}; + +static char *myrs_devstate_name(enum myrs_devstate state) +{ + struct myrs_devstate_name_entry *entry = myrs_devstate_name_list; + int i; + + for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) { + if (entry[i].state == state) + return entry[i].name; + } + return NULL; +} + +static struct myrs_raid_level_name_entry { + enum myrs_raid_level level; + char *name; +} myrs_raid_level_name_list[] = { + { MYRS_RAID_LEVEL0, "RAID0" }, + { MYRS_RAID_LEVEL1, "RAID1" }, + { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" }, + { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" }, + { MYRS_RAID_LEVEL6, "RAID6" }, + { MYRS_RAID_JBOD, "JBOD" }, + { MYRS_RAID_NEWSPAN, "New Mylex SPAN" }, + { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" }, + { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" }, + { MYRS_RAID_SPAN, "Mylex SPAN" }, + { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" }, + { MYRS_RAID_LEVELE, "RAIDE (concatenation)" }, + { MYRS_RAID_PHYSICAL, "Physical device" }, +}; + +static char *myrs_raid_level_name(enum myrs_raid_level level) +{ + struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list; + int i; + + for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) { + if (entry[i].level == level) + return entry[i].name; + } + return NULL; +} + +/* + * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk + */ +static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk) +{ + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + + memset(mbox, 0, sizeof(union myrs_cmd_mbox)); + cmd_blk->status = 0; +} + +/* + * myrs_qcmd - queues Command for DAC960 V2 Series Controllers. + */ +static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) +{ + void __iomem *base = cs->io_base; + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; + + cs->write_cmd_mbox(next_mbox, mbox); + + if (cs->prev_cmd_mbox1->words[0] == 0 || + cs->prev_cmd_mbox2->words[0] == 0) + cs->get_cmd_mbox(base); + + cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; + cs->prev_cmd_mbox1 = next_mbox; + + if (++next_mbox > cs->last_cmd_mbox) + next_mbox = cs->first_cmd_mbox; + + cs->next_cmd_mbox = next_mbox; +} + +/* + * myrs_exec_cmd - executes V2 Command and waits for completion. + */ +static void myrs_exec_cmd(struct myrs_hba *cs, + struct myrs_cmdblk *cmd_blk) +{ + DECLARE_COMPLETION_ONSTACK(complete); + unsigned long flags; + + cmd_blk->complete = &complete; + spin_lock_irqsave(&cs->queue_lock, flags); + myrs_qcmd(cs, cmd_blk); + spin_unlock_irqrestore(&cs->queue_lock, flags); + + wait_for_completion(&complete); +} + +/* + * myrs_report_progress - prints progress message + */ +static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num, + unsigned char *msg, unsigned long blocks, + unsigned long size) +{ + shost_printk(KERN_INFO, cs->host, + "Logical Drive %d: %s in Progress: %d%% completed\n", + ldev_num, msg, + (100 * (int)(blocks >> 7)) / (int)(size >> 7)); +} + +/* + * myrs_get_ctlr_info - executes a Controller Information IOCTL Command + */ +static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs) +{ + struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + dma_addr_t ctlr_info_addr; + union myrs_sgl *sgl; + unsigned char status; + unsigned short ldev_present, ldev_critical, ldev_offline; + + ldev_present = cs->ctlr_info->ldev_present; + ldev_critical = cs->ctlr_info->ldev_critical; + ldev_offline = cs->ctlr_info->ldev_offline; + + ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info, + sizeof(struct myrs_ctlr_info), + DMA_FROM_DEVICE); + if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr)) + return MYRS_STATUS_FAILED; + + mutex_lock(&cs->dcmd_mutex); + myrs_reset_cmd(cmd_blk); + mbox->ctlr_info.id = MYRS_DCMD_TAG; + mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL; + mbox->ctlr_info.control.dma_ctrl_to_host = true; + mbox->ctlr_info.control.no_autosense = true; + mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info); + mbox->ctlr_info.ctlr_num = 0; + mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO; + sgl = &mbox->ctlr_info.dma_addr; + sgl->sge[0].sge_addr = ctlr_info_addr; + sgl->sge[0].sge_count = mbox->ctlr_info.dma_size; + dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n"); + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + dma_unmap_single(&cs->pdev->dev, ctlr_info_addr, + sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE); + if (status == MYRS_STATUS_SUCCESS) { + if (cs->ctlr_info->bg_init_active + + cs->ctlr_info->ldev_init_active + + cs->ctlr_info->pdev_init_active + + cs->ctlr_info->cc_active + + cs->ctlr_info->rbld_active + + cs->ctlr_info->exp_active != 0) + cs->needs_update = true; + if (cs->ctlr_info->ldev_present != ldev_present || + cs->ctlr_info->ldev_critical != ldev_critical || + cs->ctlr_info->ldev_offline != ldev_offline) + shost_printk(KERN_INFO, cs->host, + "Logical drive count changes (%d/%d/%d)\n", + cs->ctlr_info->ldev_critical, + cs->ctlr_info->ldev_offline, + cs->ctlr_info->ldev_present); + } + + return status; +} + +/* + * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command + */ +static unsigned char myrs_get_ldev_info(struct myrs_hba *cs, + unsigned short ldev_num, struct myrs_ldev_info *ldev_info) +{ + struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + dma_addr_t ldev_info_addr; + struct myrs_ldev_info ldev_info_orig; + union myrs_sgl *sgl; + unsigned char status; + + memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info)); + ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info, + sizeof(struct myrs_ldev_info), + DMA_FROM_DEVICE); + if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr)) + return MYRS_STATUS_FAILED; + + mutex_lock(&cs->dcmd_mutex); + myrs_reset_cmd(cmd_blk); + mbox->ldev_info.id = MYRS_DCMD_TAG; + mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL; + mbox->ldev_info.control.dma_ctrl_to_host = true; + mbox->ldev_info.control.no_autosense = true; + mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info); + mbox->ldev_info.ldev.ldev_num = ldev_num; + mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID; + sgl = &mbox->ldev_info.dma_addr; + sgl->sge[0].sge_addr = ldev_info_addr; + sgl->sge[0].sge_count = mbox->ldev_info.dma_size; + dev_dbg(&cs->host->shost_gendev, + "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num); + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + dma_unmap_single(&cs->pdev->dev, ldev_info_addr, + sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE); + if (status == MYRS_STATUS_SUCCESS) { + unsigned short ldev_num = ldev_info->ldev_num; + struct myrs_ldev_info *new = ldev_info; + struct myrs_ldev_info *old = &ldev_info_orig; + unsigned long ldev_size = new->cfg_devsize; + + if (new->dev_state != old->dev_state) { + const char *name; + + name = myrs_devstate_name(new->dev_state); + shost_printk(KERN_INFO, cs->host, + "Logical Drive %d is now %s\n", + ldev_num, name ? name : "Invalid"); + } + if ((new->soft_errs != old->soft_errs) || + (new->cmds_failed != old->cmds_failed) || + (new->deferred_write_errs != old->deferred_write_errs)) + shost_printk(KERN_INFO, cs->host, + "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n", + ldev_num, new->soft_errs, + new->cmds_failed, + new->deferred_write_errs); + if (new->bg_init_active) + myrs_report_progress(cs, ldev_num, + "Background Initialization", + new->bg_init_lba, ldev_size); + else if (new->fg_init_active) + myrs_report_progress(cs, ldev_num, + "Foreground Initialization", + new->fg_init_lba, ldev_size); + else if (new->migration_active) + myrs_report_progress(cs, ldev_num, + "Data Migration", + new->migration_lba, ldev_size); + else if (new->patrol_active) + myrs_report_progress(cs, ldev_num, + "Patrol Operation", + new->patrol_lba, ldev_size); + if (old->bg_init_active && !new->bg_init_active) + shost_printk(KERN_INFO, cs->host, + "Logical Drive %d: Background Initialization %s\n", + ldev_num, + (new->ldev_control.ldev_init_done ? + "Completed" : "Failed")); + } + return status; +} + +/* + * myrs_get_pdev_info - executes a "Read Physical Device Information" Command + */ +static unsigned char myrs_get_pdev_info(struct myrs_hba *cs, + unsigned char channel, unsigned char target, unsigned char lun, + struct myrs_pdev_info *pdev_info) +{ + struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + dma_addr_t pdev_info_addr; + union myrs_sgl *sgl; + unsigned char status; + + pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info, + sizeof(struct myrs_pdev_info), + DMA_FROM_DEVICE); + if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr)) + return MYRS_STATUS_FAILED; + + mutex_lock(&cs->dcmd_mutex); + myrs_reset_cmd(cmd_blk); + mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL; + mbox->pdev_info.id = MYRS_DCMD_TAG; + mbox->pdev_info.control.dma_ctrl_to_host = true; + mbox->pdev_info.control.no_autosense = true; + mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info); + mbox->pdev_info.pdev.lun = lun; + mbox->pdev_info.pdev.target = target; + mbox->pdev_info.pdev.channel = channel; + mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID; + sgl = &mbox->pdev_info.dma_addr; + sgl->sge[0].sge_addr = pdev_info_addr; + sgl->sge[0].sge_count = mbox->pdev_info.dma_size; + dev_dbg(&cs->host->shost_gendev, + "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n", + channel, target, lun); + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + dma_unmap_single(&cs->pdev->dev, pdev_info_addr, + sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE); + return status; +} + +/* + * myrs_dev_op - executes a "Device Operation" Command + */ +static unsigned char myrs_dev_op(struct myrs_hba *cs, + enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev) +{ + struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + unsigned char status; + + mutex_lock(&cs->dcmd_mutex); + myrs_reset_cmd(cmd_blk); + mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL; + mbox->dev_op.id = MYRS_DCMD_TAG; + mbox->dev_op.control.dma_ctrl_to_host = true; + mbox->dev_op.control.no_autosense = true; + mbox->dev_op.ioctl_opcode = opcode; + mbox->dev_op.opdev = opdev; + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + return status; +} + +/* + * myrs_translate_pdev - translates a Physical Device Channel and + * TargetID into a Logical Device. + */ +static unsigned char myrs_translate_pdev(struct myrs_hba *cs, + unsigned char channel, unsigned char target, unsigned char lun, + struct myrs_devmap *devmap) +{ + struct pci_dev *pdev = cs->pdev; + dma_addr_t devmap_addr; + struct myrs_cmdblk *cmd_blk; + union myrs_cmd_mbox *mbox; + union myrs_sgl *sgl; + unsigned char status; + + memset(devmap, 0x0, sizeof(struct myrs_devmap)); + devmap_addr = dma_map_single(&pdev->dev, devmap, + sizeof(struct myrs_devmap), + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, devmap_addr)) + return MYRS_STATUS_FAILED; + + mutex_lock(&cs->dcmd_mutex); + cmd_blk = &cs->dcmd_blk; + mbox = &cmd_blk->mbox; + mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL; + mbox->pdev_info.control.dma_ctrl_to_host = true; + mbox->pdev_info.control.no_autosense = true; + mbox->pdev_info.dma_size = sizeof(struct myrs_devmap); + mbox->pdev_info.pdev.target = target; + mbox->pdev_info.pdev.channel = channel; + mbox->pdev_info.pdev.lun = lun; + mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV; + sgl = &mbox->pdev_info.dma_addr; + sgl->sge[0].sge_addr = devmap_addr; + sgl->sge[0].sge_count = mbox->pdev_info.dma_size; + + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + dma_unmap_single(&pdev->dev, devmap_addr, + sizeof(struct myrs_devmap), DMA_FROM_DEVICE); + return status; +} + +/* + * myrs_get_event - executes a Get Event Command + */ +static unsigned char myrs_get_event(struct myrs_hba *cs, + unsigned int event_num, struct myrs_event *event_buf) +{ + struct pci_dev *pdev = cs->pdev; + dma_addr_t event_addr; + struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + union myrs_sgl *sgl; + unsigned char status; + + event_addr = dma_map_single(&pdev->dev, event_buf, + sizeof(struct myrs_event), DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, event_addr)) + return MYRS_STATUS_FAILED; + + mbox->get_event.opcode = MYRS_CMD_OP_IOCTL; + mbox->get_event.dma_size = sizeof(struct myrs_event); + mbox->get_event.evnum_upper = event_num >> 16; + mbox->get_event.ctlr_num = 0; + mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT; + mbox->get_event.evnum_lower = event_num & 0xFFFF; + sgl = &mbox->get_event.dma_addr; + sgl->sge[0].sge_addr = event_addr; + sgl->sge[0].sge_count = mbox->get_event.dma_size; + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + dma_unmap_single(&pdev->dev, event_addr, + sizeof(struct myrs_event), DMA_FROM_DEVICE); + + return status; +} + +/* + * myrs_get_fwstatus - executes a Get Health Status Command + */ +static unsigned char myrs_get_fwstatus(struct myrs_hba *cs) +{ + struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + union myrs_sgl *sgl; + unsigned char status = cmd_blk->status; + + myrs_reset_cmd(cmd_blk); + mbox->common.opcode = MYRS_CMD_OP_IOCTL; + mbox->common.id = MYRS_MCMD_TAG; + mbox->common.control.dma_ctrl_to_host = true; + mbox->common.control.no_autosense = true; + mbox->common.dma_size = sizeof(struct myrs_fwstat); + mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS; + sgl = &mbox->common.dma_addr; + sgl->sge[0].sge_addr = cs->fwstat_addr; + sgl->sge[0].sge_count = mbox->ctlr_info.dma_size; + dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n"); + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + + return status; +} + +/* + * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface + */ +static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, + enable_mbox_t enable_mbox_fn) +{ + void __iomem *base = cs->io_base; + struct pci_dev *pdev = cs->pdev; + union myrs_cmd_mbox *cmd_mbox; + struct myrs_stat_mbox *stat_mbox; + union myrs_cmd_mbox *mbox; + dma_addr_t mbox_addr; + unsigned char status = MYRS_STATUS_FAILED; + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(&pdev->dev, "DMA mask out of range\n"); + return false; + } + + /* Temporary dma mapping, used only in the scope of this function */ + mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), + &mbox_addr, GFP_KERNEL); + if (dma_mapping_error(&pdev->dev, mbox_addr)) + return false; + + /* These are the base addresses for the command memory mailbox array */ + cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox); + cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size, + &cs->cmd_mbox_addr, GFP_KERNEL); + if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) { + dev_err(&pdev->dev, "Failed to map command mailbox\n"); + goto out_free; + } + cs->first_cmd_mbox = cmd_mbox; + cmd_mbox += MYRS_MAX_CMD_MBOX - 1; + cs->last_cmd_mbox = cmd_mbox; + cs->next_cmd_mbox = cs->first_cmd_mbox; + cs->prev_cmd_mbox1 = cs->last_cmd_mbox; + cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1; + + /* These are the base addresses for the status memory mailbox array */ + cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox); + stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size, + &cs->stat_mbox_addr, GFP_KERNEL); + if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) { + dev_err(&pdev->dev, "Failed to map status mailbox\n"); + goto out_free; + } + + cs->first_stat_mbox = stat_mbox; + stat_mbox += MYRS_MAX_STAT_MBOX - 1; + cs->last_stat_mbox = stat_mbox; + cs->next_stat_mbox = cs->first_stat_mbox; + + cs->fwstat_buf = dma_alloc_coherent(&pdev->dev, + sizeof(struct myrs_fwstat), + &cs->fwstat_addr, GFP_KERNEL); + if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) { + dev_err(&pdev->dev, "Failed to map firmware health buffer\n"); + cs->fwstat_buf = NULL; + goto out_free; + } + cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL); + if (!cs->ctlr_info) + goto out_free; + + cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL); + if (!cs->event_buf) + goto out_free; + + /* Enable the Memory Mailbox Interface. */ + memset(mbox, 0, sizeof(union myrs_cmd_mbox)); + mbox->set_mbox.id = 1; + mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL; + mbox->set_mbox.control.no_autosense = true; + mbox->set_mbox.first_cmd_mbox_size_kb = + (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10; + mbox->set_mbox.first_stat_mbox_size_kb = + (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10; + mbox->set_mbox.second_cmd_mbox_size_kb = 0; + mbox->set_mbox.second_stat_mbox_size_kb = 0; + mbox->set_mbox.sense_len = 0; + mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX; + mbox->set_mbox.fwstat_buf_size_kb = 1; + mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr; + mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr; + mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr; + status = enable_mbox_fn(base, mbox_addr); + +out_free: + dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), + mbox, mbox_addr); + if (status != MYRS_STATUS_SUCCESS) + dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n", + status); + return (status == MYRS_STATUS_SUCCESS); +} + +/* + * myrs_get_config - reads the Configuration Information + */ +static int myrs_get_config(struct myrs_hba *cs) +{ + struct myrs_ctlr_info *info = cs->ctlr_info; + struct Scsi_Host *shost = cs->host; + unsigned char status; + unsigned char model[20]; + unsigned char fw_version[12]; + int i, model_len; + + /* Get data into dma-able area, then copy into permanent location */ + mutex_lock(&cs->cinfo_mutex); + status = myrs_get_ctlr_info(cs); + mutex_unlock(&cs->cinfo_mutex); + if (status != MYRS_STATUS_SUCCESS) { + shost_printk(KERN_ERR, shost, + "Failed to get controller information\n"); + return -ENODEV; + } + + /* Initialize the Controller Model Name and Full Model Name fields. */ + model_len = sizeof(info->ctlr_name); + if (model_len > sizeof(model)-1) + model_len = sizeof(model)-1; + memcpy(model, info->ctlr_name, model_len); + model_len--; + while (model[model_len] == ' ' || model[model_len] == '\0') + model_len--; + model[++model_len] = '\0'; + strcpy(cs->model_name, "DAC960 "); + strcat(cs->model_name, model); + /* Initialize the Controller Firmware Version field. */ + sprintf(fw_version, "%d.%02d-%02d", + info->fw_major_version, info->fw_minor_version, + info->fw_turn_number); + if (info->fw_major_version == 6 && + info->fw_minor_version == 0 && + info->fw_turn_number < 1) { + shost_printk(KERN_WARNING, shost, + "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n" + "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n" + "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n", + fw_version); + return -ENODEV; + } + /* Initialize the Controller Channels and Targets. */ + shost->max_channel = info->physchan_present + info->virtchan_present; + shost->max_id = info->max_targets[0]; + for (i = 1; i < 16; i++) { + if (!info->max_targets[i]) + continue; + if (shost->max_id < info->max_targets[i]) + shost->max_id = info->max_targets[i]; + } + + /* + * Initialize the Controller Queue Depth, Driver Queue Depth, + * Logical Drive Count, Maximum Blocks per Command, Controller + * Scatter/Gather Limit, and Driver Scatter/Gather Limit. + * The Driver Queue Depth must be at most three less than + * the Controller Queue Depth; tag '1' is reserved for + * direct commands, and tag '2' for monitoring commands. + */ + shost->can_queue = info->max_tcq - 3; + if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3) + shost->can_queue = MYRS_MAX_CMD_MBOX - 3; + shost->max_sectors = info->max_transfer_size; + shost->sg_tablesize = info->max_sge; + if (shost->sg_tablesize > MYRS_SG_LIMIT) + shost->sg_tablesize = MYRS_SG_LIMIT; + + shost_printk(KERN_INFO, shost, + "Configuring %s PCI RAID Controller\n", model); + shost_printk(KERN_INFO, shost, + " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n", + fw_version, info->physchan_present, info->mem_size_mb); + + shost_printk(KERN_INFO, shost, + " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n", + shost->can_queue, shost->max_sectors); + + shost_printk(KERN_INFO, shost, + " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n", + shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT); + for (i = 0; i < info->physchan_max; i++) { + if (!info->max_targets[i]) + continue; + shost_printk(KERN_INFO, shost, + " Device Channel %d: max %d devices\n", + i, info->max_targets[i]); + } + shost_printk(KERN_INFO, shost, + " Physical: %d/%d channels, %d disks, %d devices\n", + info->physchan_present, info->physchan_max, + info->pdisk_present, info->pdev_present); + + shost_printk(KERN_INFO, shost, + " Logical: %d/%d channels, %d disks\n", + info->virtchan_present, info->virtchan_max, + info->ldev_present); + return 0; +} + +/* + * myrs_log_event - prints a Controller Event message + */ +static struct { + int ev_code; + unsigned char *ev_msg; +} myrs_ev_list[] = { + /* Physical Device Events (0x0000 - 0x007F) */ + { 0x0001, "P Online" }, + { 0x0002, "P Standby" }, + { 0x0005, "P Automatic Rebuild Started" }, + { 0x0006, "P Manual Rebuild Started" }, + { 0x0007, "P Rebuild Completed" }, + { 0x0008, "P Rebuild Cancelled" }, + { 0x0009, "P Rebuild Failed for Unknown Reasons" }, + { 0x000A, "P Rebuild Failed due to New Physical Device" }, + { 0x000B, "P Rebuild Failed due to Logical Drive Failure" }, + { 0x000C, "S Offline" }, + { 0x000D, "P Found" }, + { 0x000E, "P Removed" }, + { 0x000F, "P Unconfigured" }, + { 0x0010, "P Expand Capacity Started" }, + { 0x0011, "P Expand Capacity Completed" }, + { 0x0012, "P Expand Capacity Failed" }, + { 0x0013, "P Command Timed Out" }, + { 0x0014, "P Command Aborted" }, + { 0x0015, "P Command Retried" }, + { 0x0016, "P Parity Error" }, + { 0x0017, "P Soft Error" }, + { 0x0018, "P Miscellaneous Error" }, + { 0x0019, "P Reset" }, + { 0x001A, "P Active Spare Found" }, + { 0x001B, "P Warm Spare Found" }, + { 0x001C, "S Sense Data Received" }, + { 0x001D, "P Initialization Started" }, + { 0x001E, "P Initialization Completed" }, + { 0x001F, "P Initialization Failed" }, + { 0x0020, "P Initialization Cancelled" }, + { 0x0021, "P Failed because Write Recovery Failed" }, + { 0x0022, "P Failed because SCSI Bus Reset Failed" }, + { 0x0023, "P Failed because of Double Check Condition" }, + { 0x0024, "P Failed because Device Cannot Be Accessed" }, + { 0x0025, "P Failed because of Gross Error on SCSI Processor" }, + { 0x0026, "P Failed because of Bad Tag from Device" }, + { 0x0027, "P Failed because of Command Timeout" }, + { 0x0028, "P Failed because of System Reset" }, + { 0x0029, "P Failed because of Busy Status or Parity Error" }, + { 0x002A, "P Failed because Host Set Device to Failed State" }, + { 0x002B, "P Failed because of Selection Timeout" }, + { 0x002C, "P Failed because of SCSI Bus Phase Error" }, + { 0x002D, "P Failed because Device Returned Unknown Status" }, + { 0x002E, "P Failed because Device Not Ready" }, + { 0x002F, "P Failed because Device Not Found at Startup" }, + { 0x0030, "P Failed because COD Write Operation Failed" }, + { 0x0031, "P Failed because BDT Write Operation Failed" }, + { 0x0039, "P Missing at Startup" }, + { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" }, + { 0x003C, "P Temporarily Offline Device Automatically Made Online" }, + { 0x003D, "P Standby Rebuild Started" }, + /* Logical Device Events (0x0080 - 0x00FF) */ + { 0x0080, "M Consistency Check Started" }, + { 0x0081, "M Consistency Check Completed" }, + { 0x0082, "M Consistency Check Cancelled" }, + { 0x0083, "M Consistency Check Completed With Errors" }, + { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" }, + { 0x0085, "M Consistency Check Failed due to Physical Device Failure" }, + { 0x0086, "L Offline" }, + { 0x0087, "L Critical" }, + { 0x0088, "L Online" }, + { 0x0089, "M Automatic Rebuild Started" }, + { 0x008A, "M Manual Rebuild Started" }, + { 0x008B, "M Rebuild Completed" }, + { 0x008C, "M Rebuild Cancelled" }, + { 0x008D, "M Rebuild Failed for Unknown Reasons" }, + { 0x008E, "M Rebuild Failed due to New Physical Device" }, + { 0x008F, "M Rebuild Failed due to Logical Drive Failure" }, + { 0x0090, "M Initialization Started" }, + { 0x0091, "M Initialization Completed" }, + { 0x0092, "M Initialization Cancelled" }, + { 0x0093, "M Initialization Failed" }, + { 0x0094, "L Found" }, + { 0x0095, "L Deleted" }, + { 0x0096, "M Expand Capacity Started" }, + { 0x0097, "M Expand Capacity Completed" }, + { 0x0098, "M Expand Capacity Failed" }, + { 0x0099, "L Bad Block Found" }, + { 0x009A, "L Size Changed" }, + { 0x009B, "L Type Changed" }, + { 0x009C, "L Bad Data Block Found" }, + { 0x009E, "L Read of Data Block in BDT" }, + { 0x009F, "L Write Back Data for Disk Block Lost" }, + { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" }, + { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" }, + { 0x00A2, "L Standby Rebuild Started" }, + /* Fault Management Events (0x0100 - 0x017F) */ + { 0x0140, "E Fan %d Failed" }, + { 0x0141, "E Fan %d OK" }, + { 0x0142, "E Fan %d Not Present" }, + { 0x0143, "E Power Supply %d Failed" }, + { 0x0144, "E Power Supply %d OK" }, + { 0x0145, "E Power Supply %d Not Present" }, + { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" }, + { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" }, + { 0x0148, "E Temperature Sensor %d Temperature Normal" }, + { 0x0149, "E Temperature Sensor %d Not Present" }, + { 0x014A, "E Enclosure Management Unit %d Access Critical" }, + { 0x014B, "E Enclosure Management Unit %d Access OK" }, + { 0x014C, "E Enclosure Management Unit %d Access Offline" }, + /* Controller Events (0x0180 - 0x01FF) */ + { 0x0181, "C Cache Write Back Error" }, + { 0x0188, "C Battery Backup Unit Found" }, + { 0x0189, "C Battery Backup Unit Charge Level Low" }, + { 0x018A, "C Battery Backup Unit Charge Level OK" }, + { 0x0193, "C Installation Aborted" }, + { 0x0195, "C Battery Backup Unit Physically Removed" }, + { 0x0196, "C Memory Error During Warm Boot" }, + { 0x019E, "C Memory Soft ECC Error Corrected" }, + { 0x019F, "C Memory Hard ECC Error Corrected" }, + { 0x01A2, "C Battery Backup Unit Failed" }, + { 0x01AB, "C Mirror Race Recovery Failed" }, + { 0x01AC, "C Mirror Race on Critical Drive" }, + /* Controller Internal Processor Events */ + { 0x0380, "C Internal Controller Hung" }, + { 0x0381, "C Internal Controller Firmware Breakpoint" }, + { 0x0390, "C Internal Controller i960 Processor Specific Error" }, + { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" }, + { 0, "" } +}; + +static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev) +{ + unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE]; + int ev_idx = 0, ev_code; + unsigned char ev_type, *ev_msg; + struct Scsi_Host *shost = cs->host; + struct scsi_device *sdev; + struct scsi_sense_hdr sshdr = {0}; + unsigned char sense_info[4]; + unsigned char cmd_specific[4]; + + if (ev->ev_code == 0x1C) { + if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) { + memset(&sshdr, 0x0, sizeof(sshdr)); + memset(sense_info, 0x0, sizeof(sense_info)); + memset(cmd_specific, 0x0, sizeof(cmd_specific)); + } else { + memcpy(sense_info, &ev->sense_data[3], 4); + memcpy(cmd_specific, &ev->sense_data[7], 4); + } + } + if (sshdr.sense_key == VENDOR_SPECIFIC && + (sshdr.asc == 0x80 || sshdr.asc == 0x81)) + ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq); + while (true) { + ev_code = myrs_ev_list[ev_idx].ev_code; + if (ev_code == ev->ev_code || ev_code == 0) + break; + ev_idx++; + } + ev_type = myrs_ev_list[ev_idx].ev_msg[0]; + ev_msg = &myrs_ev_list[ev_idx].ev_msg[2]; + if (ev_code == 0) { + shost_printk(KERN_WARNING, shost, + "Unknown Controller Event Code %04X\n", + ev->ev_code); + return; + } + switch (ev_type) { + case 'P': + sdev = scsi_device_lookup(shost, ev->channel, + ev->target, 0); + sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n", + ev->ev_seq, ev_msg); + if (sdev && sdev->hostdata && + sdev->channel < cs->ctlr_info->physchan_present) { + struct myrs_pdev_info *pdev_info = sdev->hostdata; + + switch (ev->ev_code) { + case 0x0001: + case 0x0007: + pdev_info->dev_state = MYRS_DEVICE_ONLINE; + break; + case 0x0002: + pdev_info->dev_state = MYRS_DEVICE_STANDBY; + break; + case 0x000C: + pdev_info->dev_state = MYRS_DEVICE_OFFLINE; + break; + case 0x000E: + pdev_info->dev_state = MYRS_DEVICE_MISSING; + break; + case 0x000F: + pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED; + break; + } + } + break; + case 'L': + shost_printk(KERN_INFO, shost, + "event %d: Logical Drive %d %s\n", + ev->ev_seq, ev->lun, ev_msg); + cs->needs_update = true; + break; + case 'M': + shost_printk(KERN_INFO, shost, + "event %d: Logical Drive %d %s\n", + ev->ev_seq, ev->lun, ev_msg); + cs->needs_update = true; + break; + case 'S': + if (sshdr.sense_key == NO_SENSE || + (sshdr.sense_key == NOT_READY && + sshdr.asc == 0x04 && (sshdr.ascq == 0x01 || + sshdr.ascq == 0x02))) + break; + shost_printk(KERN_INFO, shost, + "event %d: Physical Device %d:%d %s\n", + ev->ev_seq, ev->channel, ev->target, ev_msg); + shost_printk(KERN_INFO, shost, + "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n", + ev->channel, ev->target, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + shost_printk(KERN_INFO, shost, + "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n", + ev->channel, ev->target, + sense_info[0], sense_info[1], + sense_info[2], sense_info[3], + cmd_specific[0], cmd_specific[1], + cmd_specific[2], cmd_specific[3]); + break; + case 'E': + if (cs->disable_enc_msg) + break; + sprintf(msg_buf, ev_msg, ev->lun); + shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n", + ev->ev_seq, ev->target, msg_buf); + break; + case 'C': + shost_printk(KERN_INFO, shost, "event %d: Controller %s\n", + ev->ev_seq, ev_msg); + break; + default: + shost_printk(KERN_INFO, shost, + "event %d: Unknown Event Code %04X\n", + ev->ev_seq, ev->ev_code); + break; + } +} + +/* + * SCSI sysfs interface functions + */ +static ssize_t raid_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + int ret; + + if (!sdev->hostdata) + return snprintf(buf, 16, "Unknown\n"); + + if (sdev->channel >= cs->ctlr_info->physchan_present) { + struct myrs_ldev_info *ldev_info = sdev->hostdata; + const char *name; + + name = myrs_devstate_name(ldev_info->dev_state); + if (name) + ret = snprintf(buf, 32, "%s\n", name); + else + ret = snprintf(buf, 32, "Invalid (%02X)\n", + ldev_info->dev_state); + } else { + struct myrs_pdev_info *pdev_info; + const char *name; + + pdev_info = sdev->hostdata; + name = myrs_devstate_name(pdev_info->dev_state); + if (name) + ret = snprintf(buf, 32, "%s\n", name); + else + ret = snprintf(buf, 32, "Invalid (%02X)\n", + pdev_info->dev_state); + } + return ret; +} + +static ssize_t raid_state_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_cmdblk *cmd_blk; + union myrs_cmd_mbox *mbox; + enum myrs_devstate new_state; + unsigned short ldev_num; + unsigned char status; + + if (!strncmp(buf, "offline", 7) || + !strncmp(buf, "kill", 4)) + new_state = MYRS_DEVICE_OFFLINE; + else if (!strncmp(buf, "online", 6)) + new_state = MYRS_DEVICE_ONLINE; + else if (!strncmp(buf, "standby", 7)) + new_state = MYRS_DEVICE_STANDBY; + else + return -EINVAL; + + if (sdev->channel < cs->ctlr_info->physchan_present) { + struct myrs_pdev_info *pdev_info = sdev->hostdata; + struct myrs_devmap *pdev_devmap = + (struct myrs_devmap *)&pdev_info->rsvd13; + + if (pdev_info->dev_state == new_state) { + sdev_printk(KERN_INFO, sdev, + "Device already in %s\n", + myrs_devstate_name(new_state)); + return count; + } + status = myrs_translate_pdev(cs, sdev->channel, sdev->id, + sdev->lun, pdev_devmap); + if (status != MYRS_STATUS_SUCCESS) + return -ENXIO; + ldev_num = pdev_devmap->ldev_num; + } else { + struct myrs_ldev_info *ldev_info = sdev->hostdata; + + if (ldev_info->dev_state == new_state) { + sdev_printk(KERN_INFO, sdev, + "Device already in %s\n", + myrs_devstate_name(new_state)); + return count; + } + ldev_num = ldev_info->ldev_num; + } + mutex_lock(&cs->dcmd_mutex); + cmd_blk = &cs->dcmd_blk; + myrs_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->common.opcode = MYRS_CMD_OP_IOCTL; + mbox->common.id = MYRS_DCMD_TAG; + mbox->common.control.dma_ctrl_to_host = true; + mbox->common.control.no_autosense = true; + mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE; + mbox->set_devstate.state = new_state; + mbox->set_devstate.ldev.ldev_num = ldev_num; + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + if (status == MYRS_STATUS_SUCCESS) { + if (sdev->channel < cs->ctlr_info->physchan_present) { + struct myrs_pdev_info *pdev_info = sdev->hostdata; + + pdev_info->dev_state = new_state; + } else { + struct myrs_ldev_info *ldev_info = sdev->hostdata; + + ldev_info->dev_state = new_state; + } + sdev_printk(KERN_INFO, sdev, + "Set device state to %s\n", + myrs_devstate_name(new_state)); + return count; + } + sdev_printk(KERN_INFO, sdev, + "Failed to set device state to %s, status 0x%02x\n", + myrs_devstate_name(new_state), status); + return -EINVAL; +} +static DEVICE_ATTR_RW(raid_state); + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + const char *name = NULL; + + if (!sdev->hostdata) + return snprintf(buf, 16, "Unknown\n"); + + if (sdev->channel >= cs->ctlr_info->physchan_present) { + struct myrs_ldev_info *ldev_info; + + ldev_info = sdev->hostdata; + name = myrs_raid_level_name(ldev_info->raid_level); + if (!name) + return snprintf(buf, 32, "Invalid (%02X)\n", + ldev_info->dev_state); + + } else + name = myrs_raid_level_name(MYRS_RAID_PHYSICAL); + + return snprintf(buf, 32, "%s\n", name); +} +static DEVICE_ATTR_RO(raid_level); + +static ssize_t rebuild_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_ldev_info *ldev_info; + unsigned short ldev_num; + unsigned char status; + + if (sdev->channel < cs->ctlr_info->physchan_present) + return snprintf(buf, 32, "physical device - not rebuilding\n"); + + ldev_info = sdev->hostdata; + ldev_num = ldev_info->ldev_num; + status = myrs_get_ldev_info(cs, ldev_num, ldev_info); + if (status != MYRS_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, + "Failed to get device information, status 0x%02x\n", + status); + return -EIO; + } + if (ldev_info->rbld_active) { + return snprintf(buf, 32, "rebuilding block %zu of %zu\n", + (size_t)ldev_info->rbld_lba, + (size_t)ldev_info->cfg_devsize); + } else + return snprintf(buf, 32, "not rebuilding\n"); +} + +static ssize_t rebuild_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_ldev_info *ldev_info; + struct myrs_cmdblk *cmd_blk; + union myrs_cmd_mbox *mbox; + unsigned short ldev_num; + unsigned char status; + int rebuild, ret; + + if (sdev->channel < cs->ctlr_info->physchan_present) + return -EINVAL; + + ldev_info = sdev->hostdata; + if (!ldev_info) + return -ENXIO; + ldev_num = ldev_info->ldev_num; + + ret = kstrtoint(buf, 0, &rebuild); + if (ret) + return ret; + + status = myrs_get_ldev_info(cs, ldev_num, ldev_info); + if (status != MYRS_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, + "Failed to get device information, status 0x%02x\n", + status); + return -EIO; + } + + if (rebuild && ldev_info->rbld_active) { + sdev_printk(KERN_INFO, sdev, + "Rebuild Not Initiated; already in progress\n"); + return -EALREADY; + } + if (!rebuild && !ldev_info->rbld_active) { + sdev_printk(KERN_INFO, sdev, + "Rebuild Not Cancelled; no rebuild in progress\n"); + return count; + } + + mutex_lock(&cs->dcmd_mutex); + cmd_blk = &cs->dcmd_blk; + myrs_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->common.opcode = MYRS_CMD_OP_IOCTL; + mbox->common.id = MYRS_DCMD_TAG; + mbox->common.control.dma_ctrl_to_host = true; + mbox->common.control.no_autosense = true; + if (rebuild) { + mbox->ldev_info.ldev.ldev_num = ldev_num; + mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START; + } else { + mbox->ldev_info.ldev.ldev_num = ldev_num; + mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP; + } + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + if (status) { + sdev_printk(KERN_INFO, sdev, + "Rebuild Not %s, status 0x%02x\n", + rebuild ? "Initiated" : "Cancelled", status); + ret = -EIO; + } else { + sdev_printk(KERN_INFO, sdev, "Rebuild %s\n", + rebuild ? "Initiated" : "Cancelled"); + ret = count; + } + + return ret; +} +static DEVICE_ATTR_RW(rebuild); + +static ssize_t consistency_check_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_ldev_info *ldev_info; + unsigned short ldev_num; + + if (sdev->channel < cs->ctlr_info->physchan_present) + return snprintf(buf, 32, "physical device - not checking\n"); + + ldev_info = sdev->hostdata; + if (!ldev_info) + return -ENXIO; + ldev_num = ldev_info->ldev_num; + myrs_get_ldev_info(cs, ldev_num, ldev_info); + if (ldev_info->cc_active) + return snprintf(buf, 32, "checking block %zu of %zu\n", + (size_t)ldev_info->cc_lba, + (size_t)ldev_info->cfg_devsize); + else + return snprintf(buf, 32, "not checking\n"); +} + +static ssize_t consistency_check_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_ldev_info *ldev_info; + struct myrs_cmdblk *cmd_blk; + union myrs_cmd_mbox *mbox; + unsigned short ldev_num; + unsigned char status; + int check, ret; + + if (sdev->channel < cs->ctlr_info->physchan_present) + return -EINVAL; + + ldev_info = sdev->hostdata; + if (!ldev_info) + return -ENXIO; + ldev_num = ldev_info->ldev_num; + + ret = kstrtoint(buf, 0, &check); + if (ret) + return ret; + + status = myrs_get_ldev_info(cs, ldev_num, ldev_info); + if (status != MYRS_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, + "Failed to get device information, status 0x%02x\n", + status); + return -EIO; + } + if (check && ldev_info->cc_active) { + sdev_printk(KERN_INFO, sdev, + "Consistency Check Not Initiated; " + "already in progress\n"); + return -EALREADY; + } + if (!check && !ldev_info->cc_active) { + sdev_printk(KERN_INFO, sdev, + "Consistency Check Not Cancelled; " + "check not in progress\n"); + return count; + } + + mutex_lock(&cs->dcmd_mutex); + cmd_blk = &cs->dcmd_blk; + myrs_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->common.opcode = MYRS_CMD_OP_IOCTL; + mbox->common.id = MYRS_DCMD_TAG; + mbox->common.control.dma_ctrl_to_host = true; + mbox->common.control.no_autosense = true; + if (check) { + mbox->cc.ldev.ldev_num = ldev_num; + mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START; + mbox->cc.restore_consistency = true; + mbox->cc.initialized_area_only = false; + } else { + mbox->cc.ldev.ldev_num = ldev_num; + mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP; + } + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + if (status != MYRS_STATUS_SUCCESS) { + sdev_printk(KERN_INFO, sdev, + "Consistency Check Not %s, status 0x%02x\n", + check ? "Initiated" : "Cancelled", status); + ret = -EIO; + } else { + sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n", + check ? "Initiated" : "Cancelled"); + ret = count; + } + + return ret; +} +static DEVICE_ATTR_RW(consistency_check); + +static struct attribute *myrs_sdev_attrs[] = { + &dev_attr_consistency_check.attr, + &dev_attr_rebuild.attr, + &dev_attr_raid_state.attr, + &dev_attr_raid_level.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(myrs_sdev); + +static ssize_t serial_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + char serial[17]; + + memcpy(serial, cs->ctlr_info->serial_number, 16); + serial[16] = '\0'; + return snprintf(buf, 16, "%s\n", serial); +} +static DEVICE_ATTR_RO(serial); + +static ssize_t ctlr_num_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + + return snprintf(buf, 20, "%d\n", cs->host->host_no); +} +static DEVICE_ATTR_RO(ctlr_num); + +static struct myrs_cpu_type_tbl { + enum myrs_cpu_type type; + char *name; +} myrs_cpu_type_names[] = { + { MYRS_CPUTYPE_i960CA, "i960CA" }, + { MYRS_CPUTYPE_i960RD, "i960RD" }, + { MYRS_CPUTYPE_i960RN, "i960RN" }, + { MYRS_CPUTYPE_i960RP, "i960RP" }, + { MYRS_CPUTYPE_NorthBay, "NorthBay" }, + { MYRS_CPUTYPE_StrongArm, "StrongARM" }, + { MYRS_CPUTYPE_i960RM, "i960RM" }, +}; + +static ssize_t processor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + struct myrs_cpu_type_tbl *tbl; + const char *first_processor = NULL; + const char *second_processor = NULL; + struct myrs_ctlr_info *info = cs->ctlr_info; + ssize_t ret; + int i; + + if (info->cpu[0].cpu_count) { + tbl = myrs_cpu_type_names; + for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) { + if (tbl[i].type == info->cpu[0].cpu_type) { + first_processor = tbl[i].name; + break; + } + } + } + if (info->cpu[1].cpu_count) { + tbl = myrs_cpu_type_names; + for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) { + if (tbl[i].type == info->cpu[1].cpu_type) { + second_processor = tbl[i].name; + break; + } + } + } + if (first_processor && second_processor) + ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n" + "2: %s (%s, %d cpus)\n", + info->cpu[0].cpu_name, + first_processor, info->cpu[0].cpu_count, + info->cpu[1].cpu_name, + second_processor, info->cpu[1].cpu_count); + else if (first_processor && !second_processor) + ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n", + info->cpu[0].cpu_name, + first_processor, info->cpu[0].cpu_count); + else if (!first_processor && second_processor) + ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n", + info->cpu[1].cpu_name, + second_processor, info->cpu[1].cpu_count); + else + ret = snprintf(buf, 64, "1: absent\n2: absent\n"); + + return ret; +} +static DEVICE_ATTR_RO(processor); + +static ssize_t model_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + + return snprintf(buf, 28, "%s\n", cs->model_name); +} +static DEVICE_ATTR_RO(model); + +static ssize_t ctlr_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + + return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type); +} +static DEVICE_ATTR_RO(ctlr_type); + +static ssize_t cache_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + + return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb); +} +static DEVICE_ATTR_RO(cache_size); + +static ssize_t firmware_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + + return snprintf(buf, 16, "%d.%02d-%02d\n", + cs->ctlr_info->fw_major_version, + cs->ctlr_info->fw_minor_version, + cs->ctlr_info->fw_turn_number); +} +static DEVICE_ATTR_RO(firmware); + +static ssize_t discovery_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + struct myrs_cmdblk *cmd_blk; + union myrs_cmd_mbox *mbox; + unsigned char status; + + mutex_lock(&cs->dcmd_mutex); + cmd_blk = &cs->dcmd_blk; + myrs_reset_cmd(cmd_blk); + mbox = &cmd_blk->mbox; + mbox->common.opcode = MYRS_CMD_OP_IOCTL; + mbox->common.id = MYRS_DCMD_TAG; + mbox->common.control.dma_ctrl_to_host = true; + mbox->common.control.no_autosense = true; + mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY; + myrs_exec_cmd(cs, cmd_blk); + status = cmd_blk->status; + mutex_unlock(&cs->dcmd_mutex); + if (status != MYRS_STATUS_SUCCESS) { + shost_printk(KERN_INFO, shost, + "Discovery Not Initiated, status %02X\n", + status); + return -EINVAL; + } + shost_printk(KERN_INFO, shost, "Discovery Initiated\n"); + cs->next_evseq = 0; + cs->needs_update = true; + queue_delayed_work(cs->work_q, &cs->monitor_work, 1); + flush_delayed_work(&cs->monitor_work); + shost_printk(KERN_INFO, shost, "Discovery Completed\n"); + + return count; +} +static DEVICE_ATTR_WO(discovery); + +static ssize_t flush_cache_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + unsigned char status; + + status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, + MYRS_RAID_CONTROLLER); + if (status == MYRS_STATUS_SUCCESS) { + shost_printk(KERN_INFO, shost, "Cache Flush Completed\n"); + return count; + } + shost_printk(KERN_INFO, shost, + "Cache Flush failed, status 0x%02x\n", status); + return -EIO; +} +static DEVICE_ATTR_WO(flush_cache); + +static ssize_t disable_enclosure_messages_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct myrs_hba *cs = shost_priv(shost); + + return snprintf(buf, 3, "%d\n", cs->disable_enc_msg); +} + +static ssize_t disable_enclosure_messages_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + int value, ret; + + ret = kstrtoint(buf, 0, &value); + if (ret) + return ret; + + if (value > 2) + return -EINVAL; + + cs->disable_enc_msg = value; + return count; +} +static DEVICE_ATTR_RW(disable_enclosure_messages); + +static struct attribute *myrs_shost_attrs[] = { + &dev_attr_serial.attr, + &dev_attr_ctlr_num.attr, + &dev_attr_processor.attr, + &dev_attr_model.attr, + &dev_attr_ctlr_type.attr, + &dev_attr_cache_size.attr, + &dev_attr_firmware.attr, + &dev_attr_discovery.attr, + &dev_attr_flush_cache.attr, + &dev_attr_disable_enclosure_messages.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(myrs_shost); + +/* + * SCSI midlayer interface + */ +static int myrs_host_reset(struct scsi_cmnd *scmd) +{ + struct Scsi_Host *shost = scmd->device->host; + struct myrs_hba *cs = shost_priv(shost); + + cs->reset(cs->io_base); + return SUCCESS; +} + +static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd, + struct myrs_ldev_info *ldev_info) +{ + unsigned char modes[32], *mode_pg; + bool dbd; + size_t mode_len; + + dbd = (scmd->cmnd[1] & 0x08) == 0x08; + if (dbd) { + mode_len = 24; + mode_pg = &modes[4]; + } else { + mode_len = 32; + mode_pg = &modes[12]; + } + memset(modes, 0, sizeof(modes)); + modes[0] = mode_len - 1; + modes[2] = 0x10; /* Enable FUA */ + if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO) + modes[2] |= 0x80; + if (!dbd) { + unsigned char *block_desc = &modes[4]; + + modes[3] = 8; + put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]); + put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]); + } + mode_pg[0] = 0x08; + mode_pg[1] = 0x12; + if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED) + mode_pg[2] |= 0x01; + if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED || + ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED) + mode_pg[2] |= 0x04; + if (ldev_info->cacheline_size) { + mode_pg[2] |= 0x08; + put_unaligned_be16(1 << ldev_info->cacheline_size, + &mode_pg[14]); + } + + scsi_sg_copy_from_buffer(scmd, modes, mode_len); +} + +static int myrs_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct request *rq = scsi_cmd_to_rq(scmd); + struct myrs_hba *cs = shost_priv(shost); + struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd); + union myrs_cmd_mbox *mbox = &cmd_blk->mbox; + struct scsi_device *sdev = scmd->device; + union myrs_sgl *hw_sge; + dma_addr_t sense_addr; + struct scatterlist *sgl; + unsigned long flags, timeout; + int nsge; + + if (!scmd->device->hostdata) { + scmd->result = (DID_NO_CONNECT << 16); + scsi_done(scmd); + return 0; + } + + switch (scmd->cmnd[0]) { + case REPORT_LUNS: + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0); + scsi_done(scmd); + return 0; + case MODE_SENSE: + if (scmd->device->channel >= cs->ctlr_info->physchan_present) { + struct myrs_ldev_info *ldev_info = sdev->hostdata; + + if ((scmd->cmnd[2] & 0x3F) != 0x3F && + (scmd->cmnd[2] & 0x3F) != 0x08) { + /* Illegal request, invalid field in CDB */ + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); + } else { + myrs_mode_sense(cs, scmd, ldev_info); + scmd->result = (DID_OK << 16); + } + scsi_done(scmd); + return 0; + } + break; + } + + myrs_reset_cmd(cmd_blk); + cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC, + &sense_addr); + if (!cmd_blk->sense) + return SCSI_MLQUEUE_HOST_BUSY; + cmd_blk->sense_addr = sense_addr; + + timeout = rq->timeout; + if (scmd->cmd_len <= 10) { + if (scmd->device->channel >= cs->ctlr_info->physchan_present) { + struct myrs_ldev_info *ldev_info = sdev->hostdata; + + mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10; + mbox->SCSI_10.pdev.lun = ldev_info->lun; + mbox->SCSI_10.pdev.target = ldev_info->target; + mbox->SCSI_10.pdev.channel = ldev_info->channel; + mbox->SCSI_10.pdev.ctlr = 0; + } else { + mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU; + mbox->SCSI_10.pdev.lun = sdev->lun; + mbox->SCSI_10.pdev.target = sdev->id; + mbox->SCSI_10.pdev.channel = sdev->channel; + } + mbox->SCSI_10.id = rq->tag + 3; + mbox->SCSI_10.control.dma_ctrl_to_host = + (scmd->sc_data_direction == DMA_FROM_DEVICE); + if (rq->cmd_flags & REQ_FUA) + mbox->SCSI_10.control.fua = true; + mbox->SCSI_10.dma_size = scsi_bufflen(scmd); + mbox->SCSI_10.sense_addr = cmd_blk->sense_addr; + mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE; + mbox->SCSI_10.cdb_len = scmd->cmd_len; + if (timeout > 60) { + mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES; + mbox->SCSI_10.tmo.tmo_val = timeout / 60; + } else { + mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS; + mbox->SCSI_10.tmo.tmo_val = timeout; + } + memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len); + hw_sge = &mbox->SCSI_10.dma_addr; + cmd_blk->dcdb = NULL; + } else { + dma_addr_t dcdb_dma; + + cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC, + &dcdb_dma); + if (!cmd_blk->dcdb) { + dma_pool_free(cs->sense_pool, cmd_blk->sense, + cmd_blk->sense_addr); + cmd_blk->sense = NULL; + cmd_blk->sense_addr = 0; + return SCSI_MLQUEUE_HOST_BUSY; + } + cmd_blk->dcdb_dma = dcdb_dma; + if (scmd->device->channel >= cs->ctlr_info->physchan_present) { + struct myrs_ldev_info *ldev_info = sdev->hostdata; + + mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256; + mbox->SCSI_255.pdev.lun = ldev_info->lun; + mbox->SCSI_255.pdev.target = ldev_info->target; + mbox->SCSI_255.pdev.channel = ldev_info->channel; + mbox->SCSI_255.pdev.ctlr = 0; + } else { + mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU; + mbox->SCSI_255.pdev.lun = sdev->lun; + mbox->SCSI_255.pdev.target = sdev->id; + mbox->SCSI_255.pdev.channel = sdev->channel; + } + mbox->SCSI_255.id = rq->tag + 3; + mbox->SCSI_255.control.dma_ctrl_to_host = + (scmd->sc_data_direction == DMA_FROM_DEVICE); + if (rq->cmd_flags & REQ_FUA) + mbox->SCSI_255.control.fua = true; + mbox->SCSI_255.dma_size = scsi_bufflen(scmd); + mbox->SCSI_255.sense_addr = cmd_blk->sense_addr; + mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE; + mbox->SCSI_255.cdb_len = scmd->cmd_len; + mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma; + if (timeout > 60) { + mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES; + mbox->SCSI_255.tmo.tmo_val = timeout / 60; + } else { + mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS; + mbox->SCSI_255.tmo.tmo_val = timeout; + } + memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len); + hw_sge = &mbox->SCSI_255.dma_addr; + } + if (scmd->sc_data_direction == DMA_NONE) + goto submit; + nsge = scsi_dma_map(scmd); + if (nsge == 1) { + sgl = scsi_sglist(scmd); + hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl); + hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl); + } else { + struct myrs_sge *hw_sgl; + dma_addr_t hw_sgl_addr; + int i; + + if (nsge > 2) { + hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC, + &hw_sgl_addr); + if (WARN_ON(!hw_sgl)) { + if (cmd_blk->dcdb) { + dma_pool_free(cs->dcdb_pool, + cmd_blk->dcdb, + cmd_blk->dcdb_dma); + cmd_blk->dcdb = NULL; + cmd_blk->dcdb_dma = 0; + } + dma_pool_free(cs->sense_pool, + cmd_blk->sense, + cmd_blk->sense_addr); + cmd_blk->sense = NULL; + cmd_blk->sense_addr = 0; + return SCSI_MLQUEUE_HOST_BUSY; + } + cmd_blk->sgl = hw_sgl; + cmd_blk->sgl_addr = hw_sgl_addr; + if (scmd->cmd_len <= 10) + mbox->SCSI_10.control.add_sge_mem = true; + else + mbox->SCSI_255.control.add_sge_mem = true; + hw_sge->ext.sge0_len = nsge; + hw_sge->ext.sge0_addr = cmd_blk->sgl_addr; + } else + hw_sgl = hw_sge->sge; + + scsi_for_each_sg(scmd, sgl, nsge, i) { + if (WARN_ON(!hw_sgl)) { + scsi_dma_unmap(scmd); + scmd->result = (DID_ERROR << 16); + scsi_done(scmd); + return 0; + } + hw_sgl->sge_addr = (u64)sg_dma_address(sgl); + hw_sgl->sge_count = (u64)sg_dma_len(sgl); + hw_sgl++; + } + } +submit: + spin_lock_irqsave(&cs->queue_lock, flags); + myrs_qcmd(cs, cmd_blk); + spin_unlock_irqrestore(&cs->queue_lock, flags); + + return 0; +} + +static unsigned short myrs_translate_ldev(struct myrs_hba *cs, + struct scsi_device *sdev) +{ + unsigned short ldev_num; + unsigned int chan_offset = + sdev->channel - cs->ctlr_info->physchan_present; + + ldev_num = sdev->id + chan_offset * sdev->host->max_id; + + return ldev_num; +} + +static int myrs_slave_alloc(struct scsi_device *sdev) +{ + struct myrs_hba *cs = shost_priv(sdev->host); + unsigned char status; + + if (sdev->channel > sdev->host->max_channel) + return 0; + + if (sdev->channel >= cs->ctlr_info->physchan_present) { + struct myrs_ldev_info *ldev_info; + unsigned short ldev_num; + + if (sdev->lun > 0) + return -ENXIO; + + ldev_num = myrs_translate_ldev(cs, sdev); + + ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL); + if (!ldev_info) + return -ENOMEM; + + status = myrs_get_ldev_info(cs, ldev_num, ldev_info); + if (status != MYRS_STATUS_SUCCESS) { + sdev->hostdata = NULL; + kfree(ldev_info); + } else { + enum raid_level level; + + dev_dbg(&sdev->sdev_gendev, + "Logical device mapping %d:%d:%d -> %d\n", + ldev_info->channel, ldev_info->target, + ldev_info->lun, ldev_info->ldev_num); + + sdev->hostdata = ldev_info; + switch (ldev_info->raid_level) { + case MYRS_RAID_LEVEL0: + level = RAID_LEVEL_LINEAR; + break; + case MYRS_RAID_LEVEL1: + level = RAID_LEVEL_1; + break; + case MYRS_RAID_LEVEL3: + case MYRS_RAID_LEVEL3F: + case MYRS_RAID_LEVEL3L: + level = RAID_LEVEL_3; + break; + case MYRS_RAID_LEVEL5: + case MYRS_RAID_LEVEL5L: + level = RAID_LEVEL_5; + break; + case MYRS_RAID_LEVEL6: + level = RAID_LEVEL_6; + break; + case MYRS_RAID_LEVELE: + case MYRS_RAID_NEWSPAN: + case MYRS_RAID_SPAN: + level = RAID_LEVEL_LINEAR; + break; + case MYRS_RAID_JBOD: + level = RAID_LEVEL_JBOD; + break; + default: + level = RAID_LEVEL_UNKNOWN; + break; + } + raid_set_level(myrs_raid_template, + &sdev->sdev_gendev, level); + if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) { + const char *name; + + name = myrs_devstate_name(ldev_info->dev_state); + sdev_printk(KERN_DEBUG, sdev, + "logical device in state %s\n", + name ? name : "Invalid"); + } + } + } else { + struct myrs_pdev_info *pdev_info; + + pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL); + if (!pdev_info) + return -ENOMEM; + + status = myrs_get_pdev_info(cs, sdev->channel, + sdev->id, sdev->lun, + pdev_info); + if (status != MYRS_STATUS_SUCCESS) { + sdev->hostdata = NULL; + kfree(pdev_info); + return -ENXIO; + } + sdev->hostdata = pdev_info; + } + return 0; +} + +static int myrs_slave_configure(struct scsi_device *sdev) +{ + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_ldev_info *ldev_info; + + if (sdev->channel > sdev->host->max_channel) + return -ENXIO; + + if (sdev->channel < cs->ctlr_info->physchan_present) { + /* Skip HBA device */ + if (sdev->type == TYPE_RAID) + return -ENXIO; + sdev->no_uld_attach = 1; + return 0; + } + if (sdev->lun != 0) + return -ENXIO; + + ldev_info = sdev->hostdata; + if (!ldev_info) + return -ENXIO; + if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED || + ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED) + sdev->wce_default_on = 1; + sdev->tagged_supported = 1; + return 0; +} + +static void myrs_slave_destroy(struct scsi_device *sdev) +{ + kfree(sdev->hostdata); +} + +static const struct scsi_host_template myrs_template = { + .module = THIS_MODULE, + .name = "DAC960", + .proc_name = "myrs", + .queuecommand = myrs_queuecommand, + .eh_host_reset_handler = myrs_host_reset, + .slave_alloc = myrs_slave_alloc, + .slave_configure = myrs_slave_configure, + .slave_destroy = myrs_slave_destroy, + .cmd_size = sizeof(struct myrs_cmdblk), + .shost_groups = myrs_shost_groups, + .sdev_groups = myrs_sdev_groups, + .this_id = -1, +}; + +static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev, + const struct pci_device_id *entry) +{ + struct Scsi_Host *shost; + struct myrs_hba *cs; + + shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba)); + if (!shost) + return NULL; + + shost->max_cmd_len = 16; + shost->max_lun = 256; + cs = shost_priv(shost); + mutex_init(&cs->dcmd_mutex); + mutex_init(&cs->cinfo_mutex); + cs->host = shost; + + return cs; +} + +/* + * RAID template functions + */ + +/** + * myrs_is_raid - return boolean indicating device is raid volume + * @dev: the device struct object + */ +static int +myrs_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + + return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0; +} + +/** + * myrs_get_resync - get raid volume resync percent complete + * @dev: the device struct object + */ +static void +myrs_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_ldev_info *ldev_info = sdev->hostdata; + u64 percent_complete = 0; + + if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) + return; + if (ldev_info->rbld_active) { + unsigned short ldev_num = ldev_info->ldev_num; + + myrs_get_ldev_info(cs, ldev_num, ldev_info); + percent_complete = ldev_info->rbld_lba * 100; + do_div(percent_complete, ldev_info->cfg_devsize); + } + raid_set_resync(myrs_raid_template, dev, percent_complete); +} + +/** + * myrs_get_state - get raid volume status + * @dev: the device struct object + */ +static void +myrs_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct myrs_hba *cs = shost_priv(sdev->host); + struct myrs_ldev_info *ldev_info = sdev->hostdata; + enum raid_state state = RAID_STATE_UNKNOWN; + + if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) + state = RAID_STATE_UNKNOWN; + else { + switch (ldev_info->dev_state) { + case MYRS_DEVICE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case MYRS_DEVICE_SUSPECTED_CRITICAL: + case MYRS_DEVICE_CRITICAL: + state = RAID_STATE_DEGRADED; + break; + case MYRS_DEVICE_REBUILD: + state = RAID_STATE_RESYNCING; + break; + case MYRS_DEVICE_UNCONFIGURED: + case MYRS_DEVICE_INVALID_STATE: + state = RAID_STATE_UNKNOWN; + break; + default: + state = RAID_STATE_OFFLINE; + } + } + raid_set_state(myrs_raid_template, dev, state); +} + +static struct raid_function_template myrs_raid_functions = { + .cookie = &myrs_template, + .is_raid = myrs_is_raid, + .get_resync = myrs_get_resync, + .get_state = myrs_get_state, +}; + +/* + * PCI interface functions + */ +static void myrs_flush_cache(struct myrs_hba *cs) +{ + myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER); +} + +static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk, + struct scsi_cmnd *scmd) +{ + unsigned char status; + + if (!cmd_blk) + return; + + scsi_dma_unmap(scmd); + status = cmd_blk->status; + if (cmd_blk->sense) { + if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) { + unsigned int sense_len = SCSI_SENSE_BUFFERSIZE; + + if (sense_len > cmd_blk->sense_len) + sense_len = cmd_blk->sense_len; + memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len); + } + dma_pool_free(cs->sense_pool, cmd_blk->sense, + cmd_blk->sense_addr); + cmd_blk->sense = NULL; + cmd_blk->sense_addr = 0; + } + if (cmd_blk->dcdb) { + dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb, + cmd_blk->dcdb_dma); + cmd_blk->dcdb = NULL; + cmd_blk->dcdb_dma = 0; + } + if (cmd_blk->sgl) { + dma_pool_free(cs->sg_pool, cmd_blk->sgl, + cmd_blk->sgl_addr); + cmd_blk->sgl = NULL; + cmd_blk->sgl_addr = 0; + } + if (cmd_blk->residual) + scsi_set_resid(scmd, cmd_blk->residual); + if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE || + status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2) + scmd->result = (DID_BAD_TARGET << 16); + else + scmd->result = (DID_OK << 16) | status; + scsi_done(scmd); +} + +static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) +{ + if (!cmd_blk) + return; + + if (cmd_blk->complete) { + complete(cmd_blk->complete); + cmd_blk->complete = NULL; + } +} + +static void myrs_monitor(struct work_struct *work) +{ + struct myrs_hba *cs = container_of(work, struct myrs_hba, + monitor_work.work); + struct Scsi_Host *shost = cs->host; + struct myrs_ctlr_info *info = cs->ctlr_info; + unsigned int epoch = cs->fwstat_buf->epoch; + unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL; + unsigned char status; + + dev_dbg(&shost->shost_gendev, "monitor tick\n"); + + status = myrs_get_fwstatus(cs); + + if (cs->needs_update) { + cs->needs_update = false; + mutex_lock(&cs->cinfo_mutex); + status = myrs_get_ctlr_info(cs); + mutex_unlock(&cs->cinfo_mutex); + } + if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) { + status = myrs_get_event(cs, cs->next_evseq, + cs->event_buf); + if (status == MYRS_STATUS_SUCCESS) { + myrs_log_event(cs, cs->event_buf); + cs->next_evseq++; + interval = 1; + } + } + + if (time_after(jiffies, cs->secondary_monitor_time + + MYRS_SECONDARY_MONITOR_INTERVAL)) + cs->secondary_monitor_time = jiffies; + + if (info->bg_init_active + + info->ldev_init_active + + info->pdev_init_active + + info->cc_active + + info->rbld_active + + info->exp_active != 0) { + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) { + struct myrs_ldev_info *ldev_info; + int ldev_num; + + if (sdev->channel < info->physchan_present) + continue; + ldev_info = sdev->hostdata; + if (!ldev_info) + continue; + ldev_num = ldev_info->ldev_num; + myrs_get_ldev_info(cs, ldev_num, ldev_info); + } + cs->needs_update = true; + } + if (epoch == cs->epoch && + cs->fwstat_buf->next_evseq == cs->next_evseq && + (cs->needs_update == false || + time_before(jiffies, cs->primary_monitor_time + + MYRS_PRIMARY_MONITOR_INTERVAL))) { + interval = MYRS_SECONDARY_MONITOR_INTERVAL; + } + + if (interval > 1) + cs->primary_monitor_time = jiffies; + queue_delayed_work(cs->work_q, &cs->monitor_work, interval); +} + +static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs) +{ + struct Scsi_Host *shost = cs->host; + size_t elem_size, elem_align; + + elem_align = sizeof(struct myrs_sge); + elem_size = shost->sg_tablesize * elem_align; + cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev, + elem_size, elem_align, 0); + if (cs->sg_pool == NULL) { + shost_printk(KERN_ERR, shost, + "Failed to allocate SG pool\n"); + return false; + } + + cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev, + MYRS_SENSE_SIZE, sizeof(int), 0); + if (cs->sense_pool == NULL) { + dma_pool_destroy(cs->sg_pool); + cs->sg_pool = NULL; + shost_printk(KERN_ERR, shost, + "Failed to allocate sense data pool\n"); + return false; + } + + cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev, + MYRS_DCDB_SIZE, + sizeof(unsigned char), 0); + if (!cs->dcdb_pool) { + dma_pool_destroy(cs->sg_pool); + cs->sg_pool = NULL; + dma_pool_destroy(cs->sense_pool); + cs->sense_pool = NULL; + shost_printk(KERN_ERR, shost, + "Failed to allocate DCDB pool\n"); + return false; + } + + snprintf(cs->work_q_name, sizeof(cs->work_q_name), + "myrs_wq_%d", shost->host_no); + cs->work_q = create_singlethread_workqueue(cs->work_q_name); + if (!cs->work_q) { + dma_pool_destroy(cs->dcdb_pool); + cs->dcdb_pool = NULL; + dma_pool_destroy(cs->sg_pool); + cs->sg_pool = NULL; + dma_pool_destroy(cs->sense_pool); + cs->sense_pool = NULL; + shost_printk(KERN_ERR, shost, + "Failed to create workqueue\n"); + return false; + } + + /* Initialize the Monitoring Timer. */ + INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor); + queue_delayed_work(cs->work_q, &cs->monitor_work, 1); + + return true; +} + +static void myrs_destroy_mempools(struct myrs_hba *cs) +{ + cancel_delayed_work_sync(&cs->monitor_work); + destroy_workqueue(cs->work_q); + + dma_pool_destroy(cs->sg_pool); + dma_pool_destroy(cs->dcdb_pool); + dma_pool_destroy(cs->sense_pool); +} + +static void myrs_unmap(struct myrs_hba *cs) +{ + kfree(cs->event_buf); + kfree(cs->ctlr_info); + if (cs->fwstat_buf) { + dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat), + cs->fwstat_buf, cs->fwstat_addr); + cs->fwstat_buf = NULL; + } + if (cs->first_stat_mbox) { + dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size, + cs->first_stat_mbox, cs->stat_mbox_addr); + cs->first_stat_mbox = NULL; + } + if (cs->first_cmd_mbox) { + dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size, + cs->first_cmd_mbox, cs->cmd_mbox_addr); + cs->first_cmd_mbox = NULL; + } +} + +static void myrs_cleanup(struct myrs_hba *cs) +{ + struct pci_dev *pdev = cs->pdev; + + /* Free the memory mailbox, status, and related structures */ + myrs_unmap(cs); + + if (cs->mmio_base) { + if (cs->disable_intr) + cs->disable_intr(cs); + iounmap(cs->mmio_base); + cs->mmio_base = NULL; + } + if (cs->irq) + free_irq(cs->irq, cs); + if (cs->io_addr) + release_region(cs->io_addr, 0x80); + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + scsi_host_put(cs->host); +} + +static struct myrs_hba *myrs_detect(struct pci_dev *pdev, + const struct pci_device_id *entry) +{ + struct myrs_privdata *privdata = + (struct myrs_privdata *)entry->driver_data; + irq_handler_t irq_handler = privdata->irq_handler; + unsigned int mmio_size = privdata->mmio_size; + struct myrs_hba *cs = NULL; + + cs = myrs_alloc_host(pdev, entry); + if (!cs) { + dev_err(&pdev->dev, "Unable to allocate Controller\n"); + return NULL; + } + cs->pdev = pdev; + + if (pci_enable_device(pdev)) + goto Failure; + + cs->pci_addr = pci_resource_start(pdev, 0); + + pci_set_drvdata(pdev, cs); + spin_lock_init(&cs->queue_lock); + /* Map the Controller Register Window. */ + if (mmio_size < PAGE_SIZE) + mmio_size = PAGE_SIZE; + cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size); + if (cs->mmio_base == NULL) { + dev_err(&pdev->dev, + "Unable to map Controller Register Window\n"); + goto Failure; + } + + cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK); + if (privdata->hw_init(pdev, cs, cs->io_base)) + goto Failure; + + /* Acquire shared access to the IRQ Channel. */ + if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) { + dev_err(&pdev->dev, + "Unable to acquire IRQ Channel %d\n", pdev->irq); + goto Failure; + } + cs->irq = pdev->irq; + return cs; + +Failure: + dev_err(&pdev->dev, + "Failed to initialize Controller\n"); + myrs_cleanup(cs); + return NULL; +} + +/* + * myrs_err_status reports Controller BIOS Messages passed through + * the Error Status Register when the driver performs the BIOS handshaking. + * It returns true for fatal errors and false otherwise. + */ + +static bool myrs_err_status(struct myrs_hba *cs, unsigned char status, + unsigned char parm0, unsigned char parm1) +{ + struct pci_dev *pdev = cs->pdev; + + switch (status) { + case 0x00: + dev_info(&pdev->dev, + "Physical Device %d:%d Not Responding\n", + parm1, parm0); + break; + case 0x08: + dev_notice(&pdev->dev, "Spinning Up Drives\n"); + break; + case 0x30: + dev_notice(&pdev->dev, "Configuration Checksum Error\n"); + break; + case 0x60: + dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n"); + break; + case 0x70: + dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n"); + break; + case 0x90: + dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n", + parm1, parm0); + break; + case 0xA0: + dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n"); + break; + case 0xB0: + dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n"); + break; + case 0xD0: + dev_notice(&pdev->dev, "New Controller Configuration Found\n"); + break; + case 0xF0: + dev_err(&pdev->dev, "Fatal Memory Parity Error\n"); + return true; + default: + dev_err(&pdev->dev, "Unknown Initialization Error %02X\n", + status); + return true; + } + return false; +} + +/* + * Hardware-specific functions + */ + +/* + * DAC960 GEM Series Controllers. + */ + +static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base) +{ + __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24); + + writel(val, base + DAC960_GEM_IDB_READ_OFFSET); +} + +static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base) +{ + __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24); + + writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET); +} + +static inline void DAC960_GEM_reset_ctrl(void __iomem *base) +{ + __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24); + + writel(val, base + DAC960_GEM_IDB_READ_OFFSET); +} + +static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base) +{ + __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24); + + writel(val, base + DAC960_GEM_IDB_READ_OFFSET); +} + +static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base) +{ + __le32 val; + + val = readl(base + DAC960_GEM_IDB_READ_OFFSET); + return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL; +} + +static inline bool DAC960_GEM_init_in_progress(void __iomem *base) +{ + __le32 val; + + val = readl(base + DAC960_GEM_IDB_READ_OFFSET); + return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS; +} + +static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base) +{ + __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24); + + writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET); +} + +static inline void DAC960_GEM_ack_intr(void __iomem *base) +{ + __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ | + DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24); + + writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET); +} + +static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base) +{ + __le32 val; + + val = readl(base + DAC960_GEM_ODB_READ_OFFSET); + return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL; +} + +static inline void DAC960_GEM_enable_intr(void __iomem *base) +{ + __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ | + DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24); + writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET); +} + +static inline void DAC960_GEM_disable_intr(void __iomem *base) +{ + __le32 val = 0; + + writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET); +} + +static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, + union myrs_cmd_mbox *mbox) +{ + memcpy(&mem_mbox->words[1], &mbox->words[1], + sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); + /* Barrier to avoid reordering */ + wmb(); + mem_mbox->words[0] = mbox->words[0]; + /* Barrier to force PCI access */ + mb(); +} + +static inline void DAC960_GEM_write_hw_mbox(void __iomem *base, + dma_addr_t cmd_mbox_addr) +{ + dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET); +} + +static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base) +{ + return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2); +} + +static inline bool +DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error, + unsigned char *param0, unsigned char *param1) +{ + __le32 val; + + val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET); + if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING)) + return false; + *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24); + *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0); + *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1); + writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET); + return true; +} + +static inline unsigned char +DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr) +{ + unsigned char status; + + while (DAC960_GEM_hw_mbox_is_full(base)) + udelay(1); + DAC960_GEM_write_hw_mbox(base, mbox_addr); + DAC960_GEM_hw_mbox_new_cmd(base); + while (!DAC960_GEM_hw_mbox_status_available(base)) + udelay(1); + status = DAC960_GEM_read_cmd_status(base); + DAC960_GEM_ack_hw_mbox_intr(base); + DAC960_GEM_ack_hw_mbox_status(base); + + return status; +} + +static int DAC960_GEM_hw_init(struct pci_dev *pdev, + struct myrs_hba *cs, void __iomem *base) +{ + int timeout = 0; + unsigned char status, parm0, parm1; + + DAC960_GEM_disable_intr(base); + DAC960_GEM_ack_hw_mbox_status(base); + udelay(1000); + while (DAC960_GEM_init_in_progress(base) && + timeout < MYRS_MAILBOX_TIMEOUT) { + if (DAC960_GEM_read_error_status(base, &status, + &parm0, &parm1) && + myrs_err_status(cs, status, parm0, parm1)) + return -EIO; + udelay(10); + timeout++; + } + if (timeout == MYRS_MAILBOX_TIMEOUT) { + dev_err(&pdev->dev, + "Timeout waiting for Controller Initialisation\n"); + return -ETIMEDOUT; + } + if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) { + dev_err(&pdev->dev, + "Unable to Enable Memory Mailbox Interface\n"); + DAC960_GEM_reset_ctrl(base); + return -EAGAIN; + } + DAC960_GEM_enable_intr(base); + cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox; + cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd; + cs->disable_intr = DAC960_GEM_disable_intr; + cs->reset = DAC960_GEM_reset_ctrl; + return 0; +} + +static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg) +{ + struct myrs_hba *cs = arg; + void __iomem *base = cs->io_base; + struct myrs_stat_mbox *next_stat_mbox; + unsigned long flags; + + spin_lock_irqsave(&cs->queue_lock, flags); + DAC960_GEM_ack_intr(base); + next_stat_mbox = cs->next_stat_mbox; + while (next_stat_mbox->id > 0) { + unsigned short id = next_stat_mbox->id; + struct scsi_cmnd *scmd = NULL; + struct myrs_cmdblk *cmd_blk = NULL; + + if (id == MYRS_DCMD_TAG) + cmd_blk = &cs->dcmd_blk; + else if (id == MYRS_MCMD_TAG) + cmd_blk = &cs->mcmd_blk; + else { + scmd = scsi_host_find_tag(cs->host, id - 3); + if (scmd) + cmd_blk = scsi_cmd_priv(scmd); + } + if (cmd_blk) { + cmd_blk->status = next_stat_mbox->status; + cmd_blk->sense_len = next_stat_mbox->sense_len; + cmd_blk->residual = next_stat_mbox->residual; + } else + dev_err(&cs->pdev->dev, + "Unhandled command completion %d\n", id); + + memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); + if (++next_stat_mbox > cs->last_stat_mbox) + next_stat_mbox = cs->first_stat_mbox; + + if (cmd_blk) { + if (id < 3) + myrs_handle_cmdblk(cs, cmd_blk); + else + myrs_handle_scsi(cs, cmd_blk, scmd); + } + } + cs->next_stat_mbox = next_stat_mbox; + spin_unlock_irqrestore(&cs->queue_lock, flags); + return IRQ_HANDLED; +} + +static struct myrs_privdata DAC960_GEM_privdata = { + .hw_init = DAC960_GEM_hw_init, + .irq_handler = DAC960_GEM_intr_handler, + .mmio_size = DAC960_GEM_mmio_size, +}; + +/* + * DAC960 BA Series Controllers. + */ + +static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base) +{ + writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET); +} + +static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base) +{ + writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET); +} + +static inline void DAC960_BA_reset_ctrl(void __iomem *base) +{ + writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET); +} + +static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base) +{ + writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET); +} + +static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base) +{ + u8 val; + + val = readb(base + DAC960_BA_IDB_OFFSET); + return !(val & DAC960_BA_IDB_HWMBOX_EMPTY); +} + +static inline bool DAC960_BA_init_in_progress(void __iomem *base) +{ + u8 val; + + val = readb(base + DAC960_BA_IDB_OFFSET); + return !(val & DAC960_BA_IDB_INIT_DONE); +} + +static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base) +{ + writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET); +} + +static inline void DAC960_BA_ack_intr(void __iomem *base) +{ + writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ, + base + DAC960_BA_ODB_OFFSET); +} + +static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base) +{ + u8 val; + + val = readb(base + DAC960_BA_ODB_OFFSET); + return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL; +} + +static inline void DAC960_BA_enable_intr(void __iomem *base) +{ + writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET); +} + +static inline void DAC960_BA_disable_intr(void __iomem *base) +{ + writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET); +} + +static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, + union myrs_cmd_mbox *mbox) +{ + memcpy(&mem_mbox->words[1], &mbox->words[1], + sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); + /* Barrier to avoid reordering */ + wmb(); + mem_mbox->words[0] = mbox->words[0]; + /* Barrier to force PCI access */ + mb(); +} + + +static inline void DAC960_BA_write_hw_mbox(void __iomem *base, + dma_addr_t cmd_mbox_addr) +{ + dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET); +} + +static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base) +{ + return readw(base + DAC960_BA_CMDSTS_OFFSET + 2); +} + +static inline bool +DAC960_BA_read_error_status(void __iomem *base, unsigned char *error, + unsigned char *param0, unsigned char *param1) +{ + u8 val; + + val = readb(base + DAC960_BA_ERRSTS_OFFSET); + if (!(val & DAC960_BA_ERRSTS_PENDING)) + return false; + val &= ~DAC960_BA_ERRSTS_PENDING; + *error = val; + *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0); + *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1); + writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET); + return true; +} + +static inline unsigned char +DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr) +{ + unsigned char status; + + while (DAC960_BA_hw_mbox_is_full(base)) + udelay(1); + DAC960_BA_write_hw_mbox(base, mbox_addr); + DAC960_BA_hw_mbox_new_cmd(base); + while (!DAC960_BA_hw_mbox_status_available(base)) + udelay(1); + status = DAC960_BA_read_cmd_status(base); + DAC960_BA_ack_hw_mbox_intr(base); + DAC960_BA_ack_hw_mbox_status(base); + + return status; +} + +static int DAC960_BA_hw_init(struct pci_dev *pdev, + struct myrs_hba *cs, void __iomem *base) +{ + int timeout = 0; + unsigned char status, parm0, parm1; + + DAC960_BA_disable_intr(base); + DAC960_BA_ack_hw_mbox_status(base); + udelay(1000); + while (DAC960_BA_init_in_progress(base) && + timeout < MYRS_MAILBOX_TIMEOUT) { + if (DAC960_BA_read_error_status(base, &status, + &parm0, &parm1) && + myrs_err_status(cs, status, parm0, parm1)) + return -EIO; + udelay(10); + timeout++; + } + if (timeout == MYRS_MAILBOX_TIMEOUT) { + dev_err(&pdev->dev, + "Timeout waiting for Controller Initialisation\n"); + return -ETIMEDOUT; + } + if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) { + dev_err(&pdev->dev, + "Unable to Enable Memory Mailbox Interface\n"); + DAC960_BA_reset_ctrl(base); + return -EAGAIN; + } + DAC960_BA_enable_intr(base); + cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox; + cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd; + cs->disable_intr = DAC960_BA_disable_intr; + cs->reset = DAC960_BA_reset_ctrl; + return 0; +} + +static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg) +{ + struct myrs_hba *cs = arg; + void __iomem *base = cs->io_base; + struct myrs_stat_mbox *next_stat_mbox; + unsigned long flags; + + spin_lock_irqsave(&cs->queue_lock, flags); + DAC960_BA_ack_intr(base); + next_stat_mbox = cs->next_stat_mbox; + while (next_stat_mbox->id > 0) { + unsigned short id = next_stat_mbox->id; + struct scsi_cmnd *scmd = NULL; + struct myrs_cmdblk *cmd_blk = NULL; + + if (id == MYRS_DCMD_TAG) + cmd_blk = &cs->dcmd_blk; + else if (id == MYRS_MCMD_TAG) + cmd_blk = &cs->mcmd_blk; + else { + scmd = scsi_host_find_tag(cs->host, id - 3); + if (scmd) + cmd_blk = scsi_cmd_priv(scmd); + } + if (cmd_blk) { + cmd_blk->status = next_stat_mbox->status; + cmd_blk->sense_len = next_stat_mbox->sense_len; + cmd_blk->residual = next_stat_mbox->residual; + } else + dev_err(&cs->pdev->dev, + "Unhandled command completion %d\n", id); + + memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); + if (++next_stat_mbox > cs->last_stat_mbox) + next_stat_mbox = cs->first_stat_mbox; + + if (cmd_blk) { + if (id < 3) + myrs_handle_cmdblk(cs, cmd_blk); + else + myrs_handle_scsi(cs, cmd_blk, scmd); + } + } + cs->next_stat_mbox = next_stat_mbox; + spin_unlock_irqrestore(&cs->queue_lock, flags); + return IRQ_HANDLED; +} + +static struct myrs_privdata DAC960_BA_privdata = { + .hw_init = DAC960_BA_hw_init, + .irq_handler = DAC960_BA_intr_handler, + .mmio_size = DAC960_BA_mmio_size, +}; + +/* + * DAC960 LP Series Controllers. + */ + +static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base) +{ + writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET); +} + +static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base) +{ + writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET); +} + +static inline void DAC960_LP_reset_ctrl(void __iomem *base) +{ + writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET); +} + +static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base) +{ + writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET); +} + +static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base) +{ + u8 val; + + val = readb(base + DAC960_LP_IDB_OFFSET); + return val & DAC960_LP_IDB_HWMBOX_FULL; +} + +static inline bool DAC960_LP_init_in_progress(void __iomem *base) +{ + u8 val; + + val = readb(base + DAC960_LP_IDB_OFFSET); + return val & DAC960_LP_IDB_INIT_IN_PROGRESS; +} + +static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base) +{ + writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET); +} + +static inline void DAC960_LP_ack_intr(void __iomem *base) +{ + writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ, + base + DAC960_LP_ODB_OFFSET); +} + +static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base) +{ + u8 val; + + val = readb(base + DAC960_LP_ODB_OFFSET); + return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL; +} + +static inline void DAC960_LP_enable_intr(void __iomem *base) +{ + writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET); +} + +static inline void DAC960_LP_disable_intr(void __iomem *base) +{ + writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET); +} + +static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, + union myrs_cmd_mbox *mbox) +{ + memcpy(&mem_mbox->words[1], &mbox->words[1], + sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); + /* Barrier to avoid reordering */ + wmb(); + mem_mbox->words[0] = mbox->words[0]; + /* Barrier to force PCI access */ + mb(); +} + +static inline void DAC960_LP_write_hw_mbox(void __iomem *base, + dma_addr_t cmd_mbox_addr) +{ + dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET); +} + +static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base) +{ + return readw(base + DAC960_LP_CMDSTS_OFFSET + 2); +} + +static inline bool +DAC960_LP_read_error_status(void __iomem *base, unsigned char *error, + unsigned char *param0, unsigned char *param1) +{ + u8 val; + + val = readb(base + DAC960_LP_ERRSTS_OFFSET); + if (!(val & DAC960_LP_ERRSTS_PENDING)) + return false; + val &= ~DAC960_LP_ERRSTS_PENDING; + *error = val; + *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0); + *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1); + writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET); + return true; +} + +static inline unsigned char +DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr) +{ + unsigned char status; + + while (DAC960_LP_hw_mbox_is_full(base)) + udelay(1); + DAC960_LP_write_hw_mbox(base, mbox_addr); + DAC960_LP_hw_mbox_new_cmd(base); + while (!DAC960_LP_hw_mbox_status_available(base)) + udelay(1); + status = DAC960_LP_read_cmd_status(base); + DAC960_LP_ack_hw_mbox_intr(base); + DAC960_LP_ack_hw_mbox_status(base); + + return status; +} + +static int DAC960_LP_hw_init(struct pci_dev *pdev, + struct myrs_hba *cs, void __iomem *base) +{ + int timeout = 0; + unsigned char status, parm0, parm1; + + DAC960_LP_disable_intr(base); + DAC960_LP_ack_hw_mbox_status(base); + udelay(1000); + while (DAC960_LP_init_in_progress(base) && + timeout < MYRS_MAILBOX_TIMEOUT) { + if (DAC960_LP_read_error_status(base, &status, + &parm0, &parm1) && + myrs_err_status(cs, status, parm0, parm1)) + return -EIO; + udelay(10); + timeout++; + } + if (timeout == MYRS_MAILBOX_TIMEOUT) { + dev_err(&pdev->dev, + "Timeout waiting for Controller Initialisation\n"); + return -ETIMEDOUT; + } + if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) { + dev_err(&pdev->dev, + "Unable to Enable Memory Mailbox Interface\n"); + DAC960_LP_reset_ctrl(base); + return -ENODEV; + } + DAC960_LP_enable_intr(base); + cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox; + cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd; + cs->disable_intr = DAC960_LP_disable_intr; + cs->reset = DAC960_LP_reset_ctrl; + + return 0; +} + +static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg) +{ + struct myrs_hba *cs = arg; + void __iomem *base = cs->io_base; + struct myrs_stat_mbox *next_stat_mbox; + unsigned long flags; + + spin_lock_irqsave(&cs->queue_lock, flags); + DAC960_LP_ack_intr(base); + next_stat_mbox = cs->next_stat_mbox; + while (next_stat_mbox->id > 0) { + unsigned short id = next_stat_mbox->id; + struct scsi_cmnd *scmd = NULL; + struct myrs_cmdblk *cmd_blk = NULL; + + if (id == MYRS_DCMD_TAG) + cmd_blk = &cs->dcmd_blk; + else if (id == MYRS_MCMD_TAG) + cmd_blk = &cs->mcmd_blk; + else { + scmd = scsi_host_find_tag(cs->host, id - 3); + if (scmd) + cmd_blk = scsi_cmd_priv(scmd); + } + if (cmd_blk) { + cmd_blk->status = next_stat_mbox->status; + cmd_blk->sense_len = next_stat_mbox->sense_len; + cmd_blk->residual = next_stat_mbox->residual; + } else + dev_err(&cs->pdev->dev, + "Unhandled command completion %d\n", id); + + memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); + if (++next_stat_mbox > cs->last_stat_mbox) + next_stat_mbox = cs->first_stat_mbox; + + if (cmd_blk) { + if (id < 3) + myrs_handle_cmdblk(cs, cmd_blk); + else + myrs_handle_scsi(cs, cmd_blk, scmd); + } + } + cs->next_stat_mbox = next_stat_mbox; + spin_unlock_irqrestore(&cs->queue_lock, flags); + return IRQ_HANDLED; +} + +static struct myrs_privdata DAC960_LP_privdata = { + .hw_init = DAC960_LP_hw_init, + .irq_handler = DAC960_LP_intr_handler, + .mmio_size = DAC960_LP_mmio_size, +}; + +/* + * Module functions + */ +static int +myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry) +{ + struct myrs_hba *cs; + int ret; + + cs = myrs_detect(dev, entry); + if (!cs) + return -ENODEV; + + ret = myrs_get_config(cs); + if (ret < 0) { + myrs_cleanup(cs); + return ret; + } + + if (!myrs_create_mempools(dev, cs)) { + ret = -ENOMEM; + goto failed; + } + + ret = scsi_add_host(cs->host, &dev->dev); + if (ret) { + dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret); + myrs_destroy_mempools(cs); + goto failed; + } + scsi_scan_host(cs->host); + return 0; +failed: + myrs_cleanup(cs); + return ret; +} + + +static void myrs_remove(struct pci_dev *pdev) +{ + struct myrs_hba *cs = pci_get_drvdata(pdev); + + if (cs == NULL) + return; + + shost_printk(KERN_NOTICE, cs->host, "Flushing Cache..."); + myrs_flush_cache(cs); + myrs_destroy_mempools(cs); + myrs_cleanup(cs); +} + + +static const struct pci_device_id myrs_id_table[] = { + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX, + PCI_DEVICE_ID_MYLEX_DAC960_GEM, + PCI_VENDOR_ID_MYLEX, PCI_ANY_ID), + .driver_data = (unsigned long) &DAC960_GEM_privdata, + }, + { + PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata), + }, + { + PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata), + }, + {0, }, +}; + +MODULE_DEVICE_TABLE(pci, myrs_id_table); + +static struct pci_driver myrs_pci_driver = { + .name = "myrs", + .id_table = myrs_id_table, + .probe = myrs_probe, + .remove = myrs_remove, +}; + +static int __init myrs_init_module(void) +{ + int ret; + + myrs_raid_template = raid_class_attach(&myrs_raid_functions); + if (!myrs_raid_template) + return -ENODEV; + + ret = pci_register_driver(&myrs_pci_driver); + if (ret) + raid_class_release(myrs_raid_template); + + return ret; +} + +static void __exit myrs_cleanup_module(void) +{ + pci_unregister_driver(&myrs_pci_driver); + raid_class_release(myrs_raid_template); +} + +module_init(myrs_init_module); +module_exit(myrs_cleanup_module); + +MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)"); +MODULE_AUTHOR("Hannes Reinecke "); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h new file mode 100644 index 000000000..9f6696d0d --- /dev/null +++ b/drivers/scsi/myrs.h @@ -0,0 +1,1134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers + * + * This driver supports the newer, SCSI-based firmware interface only. + * + * Copyright 2018 Hannes Reinecke, SUSE Linux GmbH + * + * Based on the original DAC960 driver, which has + * Copyright 1998-2001 by Leonard N. Zubkoff + * Portions Copyright 2002 by Mylex (An IBM Business Unit) + */ + +#ifndef _MYRS_H +#define _MYRS_H + +#define MYRS_MAILBOX_TIMEOUT 1000000 + +#define MYRS_DCMD_TAG 1 +#define MYRS_MCMD_TAG 2 + +#define MYRS_LINE_BUFFER_SIZE 128 + +#define MYRS_PRIMARY_MONITOR_INTERVAL (10 * HZ) +#define MYRS_SECONDARY_MONITOR_INTERVAL (60 * HZ) + +/* Maximum number of Scatter/Gather Segments supported */ +#define MYRS_SG_LIMIT 128 + +/* + * Number of Command and Status Mailboxes used by the + * DAC960 V2 Firmware Memory Mailbox Interface. + */ +#define MYRS_MAX_CMD_MBOX 512 +#define MYRS_MAX_STAT_MBOX 512 + +#define MYRS_DCDB_SIZE 16 +#define MYRS_SENSE_SIZE 14 + +/* + * DAC960 V2 Firmware Command Opcodes. + */ +enum myrs_cmd_opcode { + MYRS_CMD_OP_MEMCOPY = 0x01, + MYRS_CMD_OP_SCSI_10_PASSTHRU = 0x02, + MYRS_CMD_OP_SCSI_255_PASSTHRU = 0x03, + MYRS_CMD_OP_SCSI_10 = 0x04, + MYRS_CMD_OP_SCSI_256 = 0x05, + MYRS_CMD_OP_IOCTL = 0x20, +} __packed; + +/* + * DAC960 V2 Firmware IOCTL Opcodes. + */ +enum myrs_ioctl_opcode { + MYRS_IOCTL_GET_CTLR_INFO = 0x01, + MYRS_IOCTL_GET_LDEV_INFO_VALID = 0x03, + MYRS_IOCTL_GET_PDEV_INFO_VALID = 0x05, + MYRS_IOCTL_GET_HEALTH_STATUS = 0x11, + MYRS_IOCTL_GET_EVENT = 0x15, + MYRS_IOCTL_START_DISCOVERY = 0x81, + MYRS_IOCTL_SET_DEVICE_STATE = 0x82, + MYRS_IOCTL_INIT_PDEV_START = 0x84, + MYRS_IOCTL_INIT_PDEV_STOP = 0x85, + MYRS_IOCTL_INIT_LDEV_START = 0x86, + MYRS_IOCTL_INIT_LDEV_STOP = 0x87, + MYRS_IOCTL_RBLD_DEVICE_START = 0x88, + MYRS_IOCTL_RBLD_DEVICE_STOP = 0x89, + MYRS_IOCTL_MAKE_CONSISTENT_START = 0x8A, + MYRS_IOCTL_MAKE_CONSISTENT_STOP = 0x8B, + MYRS_IOCTL_CC_START = 0x8C, + MYRS_IOCTL_CC_STOP = 0x8D, + MYRS_IOCTL_SET_MEM_MBOX = 0x8E, + MYRS_IOCTL_RESET_DEVICE = 0x90, + MYRS_IOCTL_FLUSH_DEVICE_DATA = 0x91, + MYRS_IOCTL_PAUSE_DEVICE = 0x92, + MYRS_IOCTL_UNPAUS_EDEVICE = 0x93, + MYRS_IOCTL_LOCATE_DEVICE = 0x94, + MYRS_IOCTL_CREATE_CONFIGURATION = 0xC0, + MYRS_IOCTL_DELETE_LDEV = 0xC1, + MYRS_IOCTL_REPLACE_INTERNALDEVICE = 0xC2, + MYRS_IOCTL_RENAME_LDEV = 0xC3, + MYRS_IOCTL_ADD_CONFIGURATION = 0xC4, + MYRS_IOCTL_XLATE_PDEV_TO_LDEV = 0xC5, + MYRS_IOCTL_CLEAR_CONFIGURATION = 0xCA, +} __packed; + +/* + * DAC960 V2 Firmware Command Status Codes. + */ +#define MYRS_STATUS_SUCCESS 0x00 +#define MYRS_STATUS_FAILED 0x02 +#define MYRS_STATUS_DEVICE_BUSY 0x08 +#define MYRS_STATUS_DEVICE_NON_RESPONSIVE 0x0E +#define MYRS_STATUS_DEVICE_NON_RESPONSIVE2 0x0F +#define MYRS_STATUS_RESERVATION_CONFLICT 0x18 + +/* + * DAC960 V2 Firmware Memory Type structure. + */ +struct myrs_mem_type { + enum { + MYRS_MEMTYPE_RESERVED = 0x00, + MYRS_MEMTYPE_DRAM = 0x01, + MYRS_MEMTYPE_EDRAM = 0x02, + MYRS_MEMTYPE_EDO = 0x03, + MYRS_MEMTYPE_SDRAM = 0x04, + MYRS_MEMTYPE_LAST = 0x1F, + } __packed mem_type:5; /* Byte 0 Bits 0-4 */ + unsigned rsvd:1; /* Byte 0 Bit 5 */ + unsigned mem_parity:1; /* Byte 0 Bit 6 */ + unsigned mem_ecc:1; /* Byte 0 Bit 7 */ +}; + +/* + * DAC960 V2 Firmware Processor Type structure. + */ +enum myrs_cpu_type { + MYRS_CPUTYPE_i960CA = 0x01, + MYRS_CPUTYPE_i960RD = 0x02, + MYRS_CPUTYPE_i960RN = 0x03, + MYRS_CPUTYPE_i960RP = 0x04, + MYRS_CPUTYPE_NorthBay = 0x05, + MYRS_CPUTYPE_StrongArm = 0x06, + MYRS_CPUTYPE_i960RM = 0x07, +} __packed; + +/* + * DAC960 V2 Firmware Get Controller Info reply structure. + */ +struct myrs_ctlr_info { + unsigned char rsvd1; /* Byte 0 */ + enum { + MYRS_SCSI_BUS = 0x00, + MYRS_Fibre_BUS = 0x01, + MYRS_PCI_BUS = 0x03 + } __packed bus; /* Byte 1 */ + enum { + MYRS_CTLR_DAC960E = 0x01, + MYRS_CTLR_DAC960M = 0x08, + MYRS_CTLR_DAC960PD = 0x10, + MYRS_CTLR_DAC960PL = 0x11, + MYRS_CTLR_DAC960PU = 0x12, + MYRS_CTLR_DAC960PE = 0x13, + MYRS_CTLR_DAC960PG = 0x14, + MYRS_CTLR_DAC960PJ = 0x15, + MYRS_CTLR_DAC960PTL0 = 0x16, + MYRS_CTLR_DAC960PR = 0x17, + MYRS_CTLR_DAC960PRL = 0x18, + MYRS_CTLR_DAC960PT = 0x19, + MYRS_CTLR_DAC1164P = 0x1A, + MYRS_CTLR_DAC960PTL1 = 0x1B, + MYRS_CTLR_EXR2000P = 0x1C, + MYRS_CTLR_EXR3000P = 0x1D, + MYRS_CTLR_ACCELERAID352 = 0x1E, + MYRS_CTLR_ACCELERAID170 = 0x1F, + MYRS_CTLR_ACCELERAID160 = 0x20, + MYRS_CTLR_DAC960S = 0x60, + MYRS_CTLR_DAC960SU = 0x61, + MYRS_CTLR_DAC960SX = 0x62, + MYRS_CTLR_DAC960SF = 0x63, + MYRS_CTLR_DAC960SS = 0x64, + MYRS_CTLR_DAC960FL = 0x65, + MYRS_CTLR_DAC960LL = 0x66, + MYRS_CTLR_DAC960FF = 0x67, + MYRS_CTLR_DAC960HP = 0x68, + MYRS_CTLR_RAIDBRICK = 0x69, + MYRS_CTLR_METEOR_FL = 0x6A, + MYRS_CTLR_METEOR_FF = 0x6B + } __packed ctlr_type; /* Byte 2 */ + unsigned char rsvd2; /* Byte 3 */ + unsigned short bus_speed_mhz; /* Bytes 4-5 */ + unsigned char bus_width; /* Byte 6 */ + unsigned char flash_code; /* Byte 7 */ + unsigned char ports_present; /* Byte 8 */ + unsigned char rsvd3[7]; /* Bytes 9-15 */ + unsigned char bus_name[16]; /* Bytes 16-31 */ + unsigned char ctlr_name[16]; /* Bytes 32-47 */ + unsigned char rsvd4[16]; /* Bytes 48-63 */ + /* Firmware Release Information */ + unsigned char fw_major_version; /* Byte 64 */ + unsigned char fw_minor_version; /* Byte 65 */ + unsigned char fw_turn_number; /* Byte 66 */ + unsigned char fw_build_number; /* Byte 67 */ + unsigned char fw_release_day; /* Byte 68 */ + unsigned char fw_release_month; /* Byte 69 */ + unsigned char fw_release_year_hi; /* Byte 70 */ + unsigned char fw_release_year_lo; /* Byte 71 */ + /* Hardware Release Information */ + unsigned char hw_rev; /* Byte 72 */ + unsigned char rsvd5[3]; /* Bytes 73-75 */ + unsigned char hw_release_day; /* Byte 76 */ + unsigned char hw_release_month; /* Byte 77 */ + unsigned char hw_release_year_hi; /* Byte 78 */ + unsigned char hw_release_year_lo; /* Byte 79 */ + /* Hardware Manufacturing Information */ + unsigned char manuf_batch_num; /* Byte 80 */ + unsigned char rsvd6; /* Byte 81 */ + unsigned char manuf_plant_num; /* Byte 82 */ + unsigned char rsvd7; /* Byte 83 */ + unsigned char hw_manuf_day; /* Byte 84 */ + unsigned char hw_manuf_month; /* Byte 85 */ + unsigned char hw_manuf_year_hi; /* Byte 86 */ + unsigned char hw_manuf_year_lo; /* Byte 87 */ + unsigned char max_pd_per_xld; /* Byte 88 */ + unsigned char max_ild_per_xld; /* Byte 89 */ + unsigned short nvram_size_kb; /* Bytes 90-91 */ + unsigned char max_xld; /* Byte 92 */ + unsigned char rsvd8[3]; /* Bytes 93-95 */ + /* Unique Information per Controller */ + unsigned char serial_number[16]; /* Bytes 96-111 */ + unsigned char rsvd9[16]; /* Bytes 112-127 */ + /* Vendor Information */ + unsigned char rsvd10[3]; /* Bytes 128-130 */ + unsigned char oem_code; /* Byte 131 */ + unsigned char vendor[16]; /* Bytes 132-147 */ + /* Other Physical/Controller/Operation Information */ + unsigned char bbu_present:1; /* Byte 148 Bit 0 */ + unsigned char cluster_mode:1; /* Byte 148 Bit 1 */ + unsigned char rsvd11:6; /* Byte 148 Bits 2-7 */ + unsigned char rsvd12[3]; /* Bytes 149-151 */ + /* Physical Device Scan Information */ + unsigned char pscan_active:1; /* Byte 152 Bit 0 */ + unsigned char rsvd13:7; /* Byte 152 Bits 1-7 */ + unsigned char pscan_chan; /* Byte 153 */ + unsigned char pscan_target; /* Byte 154 */ + unsigned char pscan_lun; /* Byte 155 */ + /* Maximum Command Data Transfer Sizes */ + unsigned short max_transfer_size; /* Bytes 156-157 */ + unsigned short max_sge; /* Bytes 158-159 */ + /* Logical/Physical Device Counts */ + unsigned short ldev_present; /* Bytes 160-161 */ + unsigned short ldev_critical; /* Bytes 162-163 */ + unsigned short ldev_offline; /* Bytes 164-165 */ + unsigned short pdev_present; /* Bytes 166-167 */ + unsigned short pdisk_present; /* Bytes 168-169 */ + unsigned short pdisk_critical; /* Bytes 170-171 */ + unsigned short pdisk_offline; /* Bytes 172-173 */ + unsigned short max_tcq; /* Bytes 174-175 */ + /* Channel and Target ID Information */ + unsigned char physchan_present; /* Byte 176 */ + unsigned char virtchan_present; /* Byte 177 */ + unsigned char physchan_max; /* Byte 178 */ + unsigned char virtchan_max; /* Byte 179 */ + unsigned char max_targets[16]; /* Bytes 180-195 */ + unsigned char rsvd14[12]; /* Bytes 196-207 */ + /* Memory/Cache Information */ + unsigned short mem_size_mb; /* Bytes 208-209 */ + unsigned short cache_size_mb; /* Bytes 210-211 */ + unsigned int valid_cache_bytes; /* Bytes 212-215 */ + unsigned int dirty_cache_bytes; /* Bytes 216-219 */ + unsigned short mem_speed_mhz; /* Bytes 220-221 */ + unsigned char mem_data_width; /* Byte 222 */ + struct myrs_mem_type mem_type; /* Byte 223 */ + unsigned char cache_mem_type_name[16]; /* Bytes 224-239 */ + /* Execution Memory Information */ + unsigned short exec_mem_size_mb; /* Bytes 240-241 */ + unsigned short exec_l2_cache_size_mb; /* Bytes 242-243 */ + unsigned char rsvd15[8]; /* Bytes 244-251 */ + unsigned short exec_mem_speed_mhz; /* Bytes 252-253 */ + unsigned char exec_mem_data_width; /* Byte 254 */ + struct myrs_mem_type exec_mem_type; /* Byte 255 */ + unsigned char exec_mem_type_name[16]; /* Bytes 256-271 */ + /* CPU Type Information */ + struct { /* Bytes 272-335 */ + unsigned short cpu_speed_mhz; + enum myrs_cpu_type cpu_type; + unsigned char cpu_count; + unsigned char rsvd16[12]; + unsigned char cpu_name[16]; + } __packed cpu[2]; + /* Debugging/Profiling/Command Time Tracing Information */ + unsigned short cur_prof_page_num; /* Bytes 336-337 */ + unsigned short num_prof_waiters; /* Bytes 338-339 */ + unsigned short cur_trace_page_num; /* Bytes 340-341 */ + unsigned short num_trace_waiters; /* Bytes 342-343 */ + unsigned char rsvd18[8]; /* Bytes 344-351 */ + /* Error Counters on Physical Devices */ + unsigned short pdev_bus_resets; /* Bytes 352-353 */ + unsigned short pdev_parity_errors; /* Bytes 355-355 */ + unsigned short pdev_soft_errors; /* Bytes 356-357 */ + unsigned short pdev_cmds_failed; /* Bytes 358-359 */ + unsigned short pdev_misc_errors; /* Bytes 360-361 */ + unsigned short pdev_cmd_timeouts; /* Bytes 362-363 */ + unsigned short pdev_sel_timeouts; /* Bytes 364-365 */ + unsigned short pdev_retries_done; /* Bytes 366-367 */ + unsigned short pdev_aborts_done; /* Bytes 368-369 */ + unsigned short pdev_host_aborts_done; /* Bytes 370-371 */ + unsigned short pdev_predicted_failures; /* Bytes 372-373 */ + unsigned short pdev_host_cmds_failed; /* Bytes 374-375 */ + unsigned short pdev_hard_errors; /* Bytes 376-377 */ + unsigned char rsvd19[6]; /* Bytes 378-383 */ + /* Error Counters on Logical Devices */ + unsigned short ldev_soft_errors; /* Bytes 384-385 */ + unsigned short ldev_cmds_failed; /* Bytes 386-387 */ + unsigned short ldev_host_aborts_done; /* Bytes 388-389 */ + unsigned char rsvd20[2]; /* Bytes 390-391 */ + /* Error Counters on Controller */ + unsigned short ctlr_mem_errors; /* Bytes 392-393 */ + unsigned short ctlr_host_aborts_done; /* Bytes 394-395 */ + unsigned char rsvd21[4]; /* Bytes 396-399 */ + /* Long Duration Activity Information */ + unsigned short bg_init_active; /* Bytes 400-401 */ + unsigned short ldev_init_active; /* Bytes 402-403 */ + unsigned short pdev_init_active; /* Bytes 404-405 */ + unsigned short cc_active; /* Bytes 406-407 */ + unsigned short rbld_active; /* Bytes 408-409 */ + unsigned short exp_active; /* Bytes 410-411 */ + unsigned short patrol_active; /* Bytes 412-413 */ + unsigned char rsvd22[2]; /* Bytes 414-415 */ + /* Flash ROM Information */ + unsigned char flash_type; /* Byte 416 */ + unsigned char rsvd23; /* Byte 417 */ + unsigned short flash_size_MB; /* Bytes 418-419 */ + unsigned int flash_limit; /* Bytes 420-423 */ + unsigned int flash_count; /* Bytes 424-427 */ + unsigned char rsvd24[4]; /* Bytes 428-431 */ + unsigned char flash_type_name[16]; /* Bytes 432-447 */ + /* Firmware Run Time Information */ + unsigned char rbld_rate; /* Byte 448 */ + unsigned char bg_init_rate; /* Byte 449 */ + unsigned char fg_init_rate; /* Byte 450 */ + unsigned char cc_rate; /* Byte 451 */ + unsigned char rsvd25[4]; /* Bytes 452-455 */ + unsigned int max_dp; /* Bytes 456-459 */ + unsigned int free_dp; /* Bytes 460-463 */ + unsigned int max_iop; /* Bytes 464-467 */ + unsigned int free_iop; /* Bytes 468-471 */ + unsigned short max_combined_len; /* Bytes 472-473 */ + unsigned short num_cfg_groups; /* Bytes 474-475 */ + unsigned installation_abort_status:1; /* Byte 476 Bit 0 */ + unsigned maint_mode_status:1; /* Byte 476 Bit 1 */ + unsigned rsvd26:6; /* Byte 476 Bits 2-7 */ + unsigned char rsvd27[6]; /* Bytes 477-511 */ + unsigned char rsvd28[512]; /* Bytes 512-1023 */ +}; + +/* + * DAC960 V2 Firmware Device State type. + */ +enum myrs_devstate { + MYRS_DEVICE_UNCONFIGURED = 0x00, + MYRS_DEVICE_ONLINE = 0x01, + MYRS_DEVICE_REBUILD = 0x03, + MYRS_DEVICE_MISSING = 0x04, + MYRS_DEVICE_SUSPECTED_CRITICAL = 0x05, + MYRS_DEVICE_OFFLINE = 0x08, + MYRS_DEVICE_CRITICAL = 0x09, + MYRS_DEVICE_SUSPECTED_DEAD = 0x0C, + MYRS_DEVICE_COMMANDED_OFFLINE = 0x10, + MYRS_DEVICE_STANDBY = 0x21, + MYRS_DEVICE_INVALID_STATE = 0xFF, +} __packed; + +/* + * DAC960 V2 RAID Levels + */ +enum myrs_raid_level { + MYRS_RAID_LEVEL0 = 0x0, /* RAID 0 */ + MYRS_RAID_LEVEL1 = 0x1, /* RAID 1 */ + MYRS_RAID_LEVEL3 = 0x3, /* RAID 3 right asymmetric parity */ + MYRS_RAID_LEVEL5 = 0x5, /* RAID 5 right asymmetric parity */ + MYRS_RAID_LEVEL6 = 0x6, /* RAID 6 (Mylex RAID 6) */ + MYRS_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */ + MYRS_RAID_NEWSPAN = 0x8, /* New Mylex SPAN */ + MYRS_RAID_LEVEL3F = 0x9, /* RAID 3 fixed parity */ + MYRS_RAID_LEVEL3L = 0xb, /* RAID 3 left symmetric parity */ + MYRS_RAID_SPAN = 0xc, /* current spanning implementation */ + MYRS_RAID_LEVEL5L = 0xd, /* RAID 5 left symmetric parity */ + MYRS_RAID_LEVELE = 0xe, /* RAID E (concatenation) */ + MYRS_RAID_PHYSICAL = 0xf, /* physical device */ +} __packed; + +enum myrs_stripe_size { + MYRS_STRIPE_SIZE_0 = 0x0, /* no stripe (RAID 1, RAID 7, etc) */ + MYRS_STRIPE_SIZE_512B = 0x1, + MYRS_STRIPE_SIZE_1K = 0x2, + MYRS_STRIPE_SIZE_2K = 0x3, + MYRS_STRIPE_SIZE_4K = 0x4, + MYRS_STRIPE_SIZE_8K = 0x5, + MYRS_STRIPE_SIZE_16K = 0x6, + MYRS_STRIPE_SIZE_32K = 0x7, + MYRS_STRIPE_SIZE_64K = 0x8, + MYRS_STRIPE_SIZE_128K = 0x9, + MYRS_STRIPE_SIZE_256K = 0xa, + MYRS_STRIPE_SIZE_512K = 0xb, + MYRS_STRIPE_SIZE_1M = 0xc, +} __packed; + +enum myrs_cacheline_size { + MYRS_CACHELINE_ZERO = 0x0, /* caching cannot be enabled */ + MYRS_CACHELINE_512B = 0x1, + MYRS_CACHELINE_1K = 0x2, + MYRS_CACHELINE_2K = 0x3, + MYRS_CACHELINE_4K = 0x4, + MYRS_CACHELINE_8K = 0x5, + MYRS_CACHELINE_16K = 0x6, + MYRS_CACHELINE_32K = 0x7, + MYRS_CACHELINE_64K = 0x8, +} __packed; + +/* + * DAC960 V2 Firmware Get Logical Device Info reply structure. + */ +struct myrs_ldev_info { + unsigned char ctlr; /* Byte 0 */ + unsigned char channel; /* Byte 1 */ + unsigned char target; /* Byte 2 */ + unsigned char lun; /* Byte 3 */ + enum myrs_devstate dev_state; /* Byte 4 */ + unsigned char raid_level; /* Byte 5 */ + enum myrs_stripe_size stripe_size; /* Byte 6 */ + enum myrs_cacheline_size cacheline_size; /* Byte 7 */ + struct { + enum { + MYRS_READCACHE_DISABLED = 0x0, + MYRS_READCACHE_ENABLED = 0x1, + MYRS_READAHEAD_ENABLED = 0x2, + MYRS_INTELLIGENT_READAHEAD_ENABLED = 0x3, + MYRS_READCACHE_LAST = 0x7, + } __packed rce:3; /* Byte 8 Bits 0-2 */ + enum { + MYRS_WRITECACHE_DISABLED = 0x0, + MYRS_LOGICALDEVICE_RO = 0x1, + MYRS_WRITECACHE_ENABLED = 0x2, + MYRS_INTELLIGENT_WRITECACHE_ENABLED = 0x3, + MYRS_WRITECACHE_LAST = 0x7, + } __packed wce:3; /* Byte 8 Bits 3-5 */ + unsigned rsvd1:1; /* Byte 8 Bit 6 */ + unsigned ldev_init_done:1; /* Byte 8 Bit 7 */ + } ldev_control; /* Byte 8 */ + /* Logical Device Operations Status */ + unsigned char cc_active:1; /* Byte 9 Bit 0 */ + unsigned char rbld_active:1; /* Byte 9 Bit 1 */ + unsigned char bg_init_active:1; /* Byte 9 Bit 2 */ + unsigned char fg_init_active:1; /* Byte 9 Bit 3 */ + unsigned char migration_active:1; /* Byte 9 Bit 4 */ + unsigned char patrol_active:1; /* Byte 9 Bit 5 */ + unsigned char rsvd2:2; /* Byte 9 Bits 6-7 */ + unsigned char raid5_writeupdate; /* Byte 10 */ + unsigned char raid5_algo; /* Byte 11 */ + unsigned short ldev_num; /* Bytes 12-13 */ + /* BIOS Info */ + unsigned char bios_disabled:1; /* Byte 14 Bit 0 */ + unsigned char cdrom_boot:1; /* Byte 14 Bit 1 */ + unsigned char drv_coercion:1; /* Byte 14 Bit 2 */ + unsigned char write_same_disabled:1; /* Byte 14 Bit 3 */ + unsigned char hba_mode:1; /* Byte 14 Bit 4 */ + enum { + MYRS_GEOMETRY_128_32 = 0x0, + MYRS_GEOMETRY_255_63 = 0x1, + MYRS_GEOMETRY_RSVD1 = 0x2, + MYRS_GEOMETRY_RSVD2 = 0x3 + } __packed drv_geom:2; /* Byte 14 Bits 5-6 */ + unsigned char super_ra_enabled:1; /* Byte 14 Bit 7 */ + unsigned char rsvd3; /* Byte 15 */ + /* Error Counters */ + unsigned short soft_errs; /* Bytes 16-17 */ + unsigned short cmds_failed; /* Bytes 18-19 */ + unsigned short cmds_aborted; /* Bytes 20-21 */ + unsigned short deferred_write_errs; /* Bytes 22-23 */ + unsigned int rsvd4; /* Bytes 24-27 */ + unsigned int rsvd5; /* Bytes 28-31 */ + /* Device Size Information */ + unsigned short rsvd6; /* Bytes 32-33 */ + unsigned short devsize_bytes; /* Bytes 34-35 */ + unsigned int orig_devsize; /* Bytes 36-39 */ + unsigned int cfg_devsize; /* Bytes 40-43 */ + unsigned int rsvd7; /* Bytes 44-47 */ + unsigned char ldev_name[32]; /* Bytes 48-79 */ + unsigned char inquiry[36]; /* Bytes 80-115 */ + unsigned char rsvd8[12]; /* Bytes 116-127 */ + u64 last_read_lba; /* Bytes 128-135 */ + u64 last_write_lba; /* Bytes 136-143 */ + u64 cc_lba; /* Bytes 144-151 */ + u64 rbld_lba; /* Bytes 152-159 */ + u64 bg_init_lba; /* Bytes 160-167 */ + u64 fg_init_lba; /* Bytes 168-175 */ + u64 migration_lba; /* Bytes 176-183 */ + u64 patrol_lba; /* Bytes 184-191 */ + unsigned char rsvd9[64]; /* Bytes 192-255 */ +}; + +/* + * DAC960 V2 Firmware Get Physical Device Info reply structure. + */ +struct myrs_pdev_info { + unsigned char rsvd1; /* Byte 0 */ + unsigned char channel; /* Byte 1 */ + unsigned char target; /* Byte 2 */ + unsigned char lun; /* Byte 3 */ + /* Configuration Status Bits */ + unsigned char pdev_fault_tolerant:1; /* Byte 4 Bit 0 */ + unsigned char pdev_connected:1; /* Byte 4 Bit 1 */ + unsigned char pdev_local_to_ctlr:1; /* Byte 4 Bit 2 */ + unsigned char rsvd2:5; /* Byte 4 Bits 3-7 */ + /* Multiple Host/Controller Status Bits */ + unsigned char remote_host_dead:1; /* Byte 5 Bit 0 */ + unsigned char remove_ctlr_dead:1; /* Byte 5 Bit 1 */ + unsigned char rsvd3:6; /* Byte 5 Bits 2-7 */ + enum myrs_devstate dev_state; /* Byte 6 */ + unsigned char nego_data_width; /* Byte 7 */ + unsigned short nego_sync_rate; /* Bytes 8-9 */ + /* Multiported Physical Device Information */ + unsigned char num_ports; /* Byte 10 */ + unsigned char drv_access_bitmap; /* Byte 11 */ + unsigned int rsvd4; /* Bytes 12-15 */ + unsigned char ip_address[16]; /* Bytes 16-31 */ + unsigned short max_tags; /* Bytes 32-33 */ + /* Physical Device Operations Status */ + unsigned char cc_in_progress:1; /* Byte 34 Bit 0 */ + unsigned char rbld_in_progress:1; /* Byte 34 Bit 1 */ + unsigned char makecc_in_progress:1; /* Byte 34 Bit 2 */ + unsigned char pdevinit_in_progress:1; /* Byte 34 Bit 3 */ + unsigned char migration_in_progress:1; /* Byte 34 Bit 4 */ + unsigned char patrol_in_progress:1; /* Byte 34 Bit 5 */ + unsigned char rsvd5:2; /* Byte 34 Bits 6-7 */ + unsigned char long_op_status; /* Byte 35 */ + unsigned char parity_errs; /* Byte 36 */ + unsigned char soft_errs; /* Byte 37 */ + unsigned char hard_errs; /* Byte 38 */ + unsigned char misc_errs; /* Byte 39 */ + unsigned char cmd_timeouts; /* Byte 40 */ + unsigned char retries; /* Byte 41 */ + unsigned char aborts; /* Byte 42 */ + unsigned char pred_failures; /* Byte 43 */ + unsigned int rsvd6; /* Bytes 44-47 */ + unsigned short rsvd7; /* Bytes 48-49 */ + unsigned short devsize_bytes; /* Bytes 50-51 */ + unsigned int orig_devsize; /* Bytes 52-55 */ + unsigned int cfg_devsize; /* Bytes 56-59 */ + unsigned int rsvd8; /* Bytes 60-63 */ + unsigned char pdev_name[16]; /* Bytes 64-79 */ + unsigned char rsvd9[16]; /* Bytes 80-95 */ + unsigned char rsvd10[32]; /* Bytes 96-127 */ + unsigned char inquiry[36]; /* Bytes 128-163 */ + unsigned char rsvd11[20]; /* Bytes 164-183 */ + unsigned char rsvd12[8]; /* Bytes 184-191 */ + u64 last_read_lba; /* Bytes 192-199 */ + u64 last_write_lba; /* Bytes 200-207 */ + u64 cc_lba; /* Bytes 208-215 */ + u64 rbld_lba; /* Bytes 216-223 */ + u64 makecc_lba; /* Bytes 224-231 */ + u64 devinit_lba; /* Bytes 232-239 */ + u64 migration_lba; /* Bytes 240-247 */ + u64 patrol_lba; /* Bytes 248-255 */ + unsigned char rsvd13[256]; /* Bytes 256-511 */ +}; + +/* + * DAC960 V2 Firmware Health Status Buffer structure. + */ +struct myrs_fwstat { + unsigned int uptime_usecs; /* Bytes 0-3 */ + unsigned int uptime_msecs; /* Bytes 4-7 */ + unsigned int seconds; /* Bytes 8-11 */ + unsigned char rsvd1[4]; /* Bytes 12-15 */ + unsigned int epoch; /* Bytes 16-19 */ + unsigned char rsvd2[4]; /* Bytes 20-23 */ + unsigned int dbg_msgbuf_idx; /* Bytes 24-27 */ + unsigned int coded_msgbuf_idx; /* Bytes 28-31 */ + unsigned int cur_timetrace_page; /* Bytes 32-35 */ + unsigned int cur_prof_page; /* Bytes 36-39 */ + unsigned int next_evseq; /* Bytes 40-43 */ + unsigned char rsvd3[4]; /* Bytes 44-47 */ + unsigned char rsvd4[16]; /* Bytes 48-63 */ + unsigned char rsvd5[64]; /* Bytes 64-127 */ +}; + +/* + * DAC960 V2 Firmware Get Event reply structure. + */ +struct myrs_event { + unsigned int ev_seq; /* Bytes 0-3 */ + unsigned int ev_time; /* Bytes 4-7 */ + unsigned int ev_code; /* Bytes 8-11 */ + unsigned char rsvd1; /* Byte 12 */ + unsigned char channel; /* Byte 13 */ + unsigned char target; /* Byte 14 */ + unsigned char lun; /* Byte 15 */ + unsigned int rsvd2; /* Bytes 16-19 */ + unsigned int ev_parm; /* Bytes 20-23 */ + unsigned char sense_data[40]; /* Bytes 24-63 */ +}; + +/* + * DAC960 V2 Firmware Command Control Bits structure. + */ +struct myrs_cmd_ctrl { + unsigned char fua:1; /* Byte 0 Bit 0 */ + unsigned char disable_pgout:1; /* Byte 0 Bit 1 */ + unsigned char rsvd1:1; /* Byte 0 Bit 2 */ + unsigned char add_sge_mem:1; /* Byte 0 Bit 3 */ + unsigned char dma_ctrl_to_host:1; /* Byte 0 Bit 4 */ + unsigned char rsvd2:1; /* Byte 0 Bit 5 */ + unsigned char no_autosense:1; /* Byte 0 Bit 6 */ + unsigned char disc_prohibited:1; /* Byte 0 Bit 7 */ +}; + +/* + * DAC960 V2 Firmware Command Timeout structure. + */ +struct myrs_cmd_tmo { + unsigned char tmo_val:6; /* Byte 0 Bits 0-5 */ + enum { + MYRS_TMO_SCALE_SECONDS = 0, + MYRS_TMO_SCALE_MINUTES = 1, + MYRS_TMO_SCALE_HOURS = 2, + MYRS_TMO_SCALE_RESERVED = 3 + } __packed tmo_scale:2; /* Byte 0 Bits 6-7 */ +}; + +/* + * DAC960 V2 Firmware Physical Device structure. + */ +struct myrs_pdev { + unsigned char lun; /* Byte 0 */ + unsigned char target; /* Byte 1 */ + unsigned char channel:3; /* Byte 2 Bits 0-2 */ + unsigned char ctlr:5; /* Byte 2 Bits 3-7 */ +} __packed; + +/* + * DAC960 V2 Firmware Logical Device structure. + */ +struct myrs_ldev { + unsigned short ldev_num; /* Bytes 0-1 */ + unsigned char rsvd:3; /* Byte 2 Bits 0-2 */ + unsigned char ctlr:5; /* Byte 2 Bits 3-7 */ +} __packed; + +/* + * DAC960 V2 Firmware Operation Device type. + */ +enum myrs_opdev { + MYRS_PHYSICAL_DEVICE = 0x00, + MYRS_RAID_DEVICE = 0x01, + MYRS_PHYSICAL_CHANNEL = 0x02, + MYRS_RAID_CHANNEL = 0x03, + MYRS_PHYSICAL_CONTROLLER = 0x04, + MYRS_RAID_CONTROLLER = 0x05, + MYRS_CONFIGURATION_GROUP = 0x10, + MYRS_ENCLOSURE = 0x11, +} __packed; + +/* + * DAC960 V2 Firmware Translate Physical To Logical Device structure. + */ +struct myrs_devmap { + unsigned short ldev_num; /* Bytes 0-1 */ + unsigned short rsvd; /* Bytes 2-3 */ + unsigned char prev_boot_ctlr; /* Byte 4 */ + unsigned char prev_boot_channel; /* Byte 5 */ + unsigned char prev_boot_target; /* Byte 6 */ + unsigned char prev_boot_lun; /* Byte 7 */ +}; + +/* + * DAC960 V2 Firmware Scatter/Gather List Entry structure. + */ +struct myrs_sge { + u64 sge_addr; /* Bytes 0-7 */ + u64 sge_count; /* Bytes 8-15 */ +}; + +/* + * DAC960 V2 Firmware Data Transfer Memory Address structure. + */ +union myrs_sgl { + struct myrs_sge sge[2]; /* Bytes 0-31 */ + struct { + unsigned short sge0_len; /* Bytes 0-1 */ + unsigned short sge1_len; /* Bytes 2-3 */ + unsigned short sge2_len; /* Bytes 4-5 */ + unsigned short rsvd; /* Bytes 6-7 */ + u64 sge0_addr; /* Bytes 8-15 */ + u64 sge1_addr; /* Bytes 16-23 */ + u64 sge2_addr; /* Bytes 24-31 */ + } ext; +}; + +/* + * 64 Byte DAC960 V2 Firmware Command Mailbox structure. + */ +union myrs_cmd_mbox { + unsigned int words[16]; /* Words 0-15 */ + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + unsigned int rsvd1:24; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + unsigned char rsvd2[10]; /* Bytes 22-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } common; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size; /* Bytes 4-7 */ + u64 sense_addr; /* Bytes 8-15 */ + struct myrs_pdev pdev; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + unsigned char cdb_len; /* Byte 21 */ + unsigned char cdb[10]; /* Bytes 22-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } SCSI_10; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size; /* Bytes 4-7 */ + u64 sense_addr; /* Bytes 8-15 */ + struct myrs_pdev pdev; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + unsigned char cdb_len; /* Byte 21 */ + unsigned short rsvd; /* Bytes 22-23 */ + u64 cdb_addr; /* Bytes 24-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } SCSI_255; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + unsigned short rsvd1; /* Bytes 16-17 */ + unsigned char ctlr_num; /* Byte 18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + unsigned char rsvd2[10]; /* Bytes 22-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } ctlr_info; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + struct myrs_ldev ldev; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + unsigned char rsvd[10]; /* Bytes 22-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } ldev_info; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + struct myrs_pdev pdev; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + unsigned char rsvd[10]; /* Bytes 22-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } pdev_info; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + unsigned short evnum_upper; /* Bytes 16-17 */ + unsigned char ctlr_num; /* Byte 18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + unsigned short evnum_lower; /* Bytes 22-23 */ + unsigned char rsvd[8]; /* Bytes 24-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } get_event; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + union { + struct myrs_ldev ldev; /* Bytes 16-18 */ + struct myrs_pdev pdev; /* Bytes 16-18 */ + }; + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + enum myrs_devstate state; /* Byte 22 */ + unsigned char rsvd[9]; /* Bytes 23-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } set_devstate; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + struct myrs_ldev ldev; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + unsigned char restore_consistency:1; /* Byte 22 Bit 0 */ + unsigned char initialized_area_only:1; /* Byte 22 Bit 1 */ + unsigned char rsvd1:6; /* Byte 22 Bits 2-7 */ + unsigned char rsvd2[9]; /* Bytes 23-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } cc; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + unsigned char first_cmd_mbox_size_kb; /* Byte 4 */ + unsigned char first_stat_mbox_size_kb; /* Byte 5 */ + unsigned char second_cmd_mbox_size_kb; /* Byte 6 */ + unsigned char second_stat_mbox_size_kb; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + unsigned int rsvd1:24; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + unsigned char fwstat_buf_size_kb; /* Byte 22 */ + unsigned char rsvd2; /* Byte 23 */ + u64 fwstat_buf_addr; /* Bytes 24-31 */ + u64 first_cmd_mbox_addr; /* Bytes 32-39 */ + u64 first_stat_mbox_addr; /* Bytes 40-47 */ + u64 second_cmd_mbox_addr; /* Bytes 48-55 */ + u64 second_stat_mbox_addr; /* Bytes 56-63 */ + } set_mbox; + struct { + unsigned short id; /* Bytes 0-1 */ + enum myrs_cmd_opcode opcode; /* Byte 2 */ + struct myrs_cmd_ctrl control; /* Byte 3 */ + u32 dma_size:24; /* Bytes 4-6 */ + unsigned char dma_num; /* Byte 7 */ + u64 sense_addr; /* Bytes 8-15 */ + struct myrs_pdev pdev; /* Bytes 16-18 */ + struct myrs_cmd_tmo tmo; /* Byte 19 */ + unsigned char sense_len; /* Byte 20 */ + enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */ + enum myrs_opdev opdev; /* Byte 22 */ + unsigned char rsvd[9]; /* Bytes 23-31 */ + union myrs_sgl dma_addr; /* Bytes 32-63 */ + } dev_op; +}; + +/* + * DAC960 V2 Firmware Controller Status Mailbox structure. + */ +struct myrs_stat_mbox { + unsigned short id; /* Bytes 0-1 */ + unsigned char status; /* Byte 2 */ + unsigned char sense_len; /* Byte 3 */ + int residual; /* Bytes 4-7 */ +}; + +struct myrs_cmdblk { + union myrs_cmd_mbox mbox; + unsigned char status; + unsigned char sense_len; + int residual; + struct completion *complete; + struct myrs_sge *sgl; + dma_addr_t sgl_addr; + unsigned char *dcdb; + dma_addr_t dcdb_dma; + unsigned char *sense; + dma_addr_t sense_addr; +}; + +/* + * DAC960 Driver Controller structure. + */ +struct myrs_hba { + void __iomem *io_base; + void __iomem *mmio_base; + phys_addr_t io_addr; + phys_addr_t pci_addr; + unsigned int irq; + + unsigned char model_name[28]; + unsigned char fw_version[12]; + + struct Scsi_Host *host; + struct pci_dev *pdev; + + unsigned int epoch; + unsigned int next_evseq; + /* Monitor flags */ + bool needs_update; + bool disable_enc_msg; + + struct workqueue_struct *work_q; + char work_q_name[20]; + struct delayed_work monitor_work; + unsigned long primary_monitor_time; + unsigned long secondary_monitor_time; + + spinlock_t queue_lock; + + struct dma_pool *sg_pool; + struct dma_pool *sense_pool; + struct dma_pool *dcdb_pool; + + void (*write_cmd_mbox)(union myrs_cmd_mbox *next_mbox, + union myrs_cmd_mbox *cmd_mbox); + void (*get_cmd_mbox)(void __iomem *base); + void (*disable_intr)(void __iomem *base); + void (*reset)(void __iomem *base); + + dma_addr_t cmd_mbox_addr; + size_t cmd_mbox_size; + union myrs_cmd_mbox *first_cmd_mbox; + union myrs_cmd_mbox *last_cmd_mbox; + union myrs_cmd_mbox *next_cmd_mbox; + union myrs_cmd_mbox *prev_cmd_mbox1; + union myrs_cmd_mbox *prev_cmd_mbox2; + + dma_addr_t stat_mbox_addr; + size_t stat_mbox_size; + struct myrs_stat_mbox *first_stat_mbox; + struct myrs_stat_mbox *last_stat_mbox; + struct myrs_stat_mbox *next_stat_mbox; + + struct myrs_cmdblk dcmd_blk; + struct myrs_cmdblk mcmd_blk; + struct mutex dcmd_mutex; + + struct myrs_fwstat *fwstat_buf; + dma_addr_t fwstat_addr; + + struct myrs_ctlr_info *ctlr_info; + struct mutex cinfo_mutex; + + struct myrs_event *event_buf; +}; + +typedef unsigned char (*enable_mbox_t)(void __iomem *base, dma_addr_t addr); +typedef int (*myrs_hwinit_t)(struct pci_dev *pdev, + struct myrs_hba *c, void __iomem *base); + +struct myrs_privdata { + myrs_hwinit_t hw_init; + irq_handler_t irq_handler; + unsigned int mmio_size; +}; + +/* + * DAC960 GEM Series Controller Interface Register Offsets. + */ + +#define DAC960_GEM_mmio_size 0x600 + +enum DAC960_GEM_reg_offset { + DAC960_GEM_IDB_READ_OFFSET = 0x214, + DAC960_GEM_IDB_CLEAR_OFFSET = 0x218, + DAC960_GEM_ODB_READ_OFFSET = 0x224, + DAC960_GEM_ODB_CLEAR_OFFSET = 0x228, + DAC960_GEM_IRQSTS_OFFSET = 0x208, + DAC960_GEM_IRQMASK_READ_OFFSET = 0x22C, + DAC960_GEM_IRQMASK_CLEAR_OFFSET = 0x230, + DAC960_GEM_CMDMBX_OFFSET = 0x510, + DAC960_GEM_CMDSTS_OFFSET = 0x518, + DAC960_GEM_ERRSTS_READ_OFFSET = 0x224, + DAC960_GEM_ERRSTS_CLEAR_OFFSET = 0x228, +}; + +/* + * DAC960 GEM Series Inbound Door Bell Register. + */ +#define DAC960_GEM_IDB_HWMBOX_NEW_CMD 0x01 +#define DAC960_GEM_IDB_HWMBOX_ACK_STS 0x02 +#define DAC960_GEM_IDB_GEN_IRQ 0x04 +#define DAC960_GEM_IDB_CTRL_RESET 0x08 +#define DAC960_GEM_IDB_MMBOX_NEW_CMD 0x10 + +#define DAC960_GEM_IDB_HWMBOX_FULL 0x01 +#define DAC960_GEM_IDB_INIT_IN_PROGRESS 0x02 + +/* + * DAC960 GEM Series Outbound Door Bell Register. + */ +#define DAC960_GEM_ODB_HWMBOX_ACK_IRQ 0x01 +#define DAC960_GEM_ODB_MMBOX_ACK_IRQ 0x02 +#define DAC960_GEM_ODB_HWMBOX_STS_AVAIL 0x01 +#define DAC960_GEM_ODB_MMBOX_STS_AVAIL 0x02 + +/* + * DAC960 GEM Series Interrupt Mask Register. + */ +#define DAC960_GEM_IRQMASK_HWMBOX_IRQ 0x01 +#define DAC960_GEM_IRQMASK_MMBOX_IRQ 0x02 + +/* + * DAC960 GEM Series Error Status Register. + */ +#define DAC960_GEM_ERRSTS_PENDING 0x20 + +/* + * dma_addr_writeql is provided to write dma_addr_t types + * to a 64-bit pci address space register. The controller + * will accept having the register written as two 32-bit + * values. + * + * In HIGHMEM kernels, dma_addr_t is a 64-bit value. + * without HIGHMEM, dma_addr_t is a 32-bit value. + * + * The compiler should always fix up the assignment + * to u.wq appropriately, depending upon the size of + * dma_addr_t. + */ +static inline +void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address) +{ + union { + u64 wq; + uint wl[2]; + } u; + + u.wq = addr; + + writel(u.wl[0], write_address); + writel(u.wl[1], write_address + 4); +} + +/* + * DAC960 BA Series Controller Interface Register Offsets. + */ + +#define DAC960_BA_mmio_size 0x80 + +enum DAC960_BA_reg_offset { + DAC960_BA_IRQSTS_OFFSET = 0x30, + DAC960_BA_IRQMASK_OFFSET = 0x34, + DAC960_BA_CMDMBX_OFFSET = 0x50, + DAC960_BA_CMDSTS_OFFSET = 0x58, + DAC960_BA_IDB_OFFSET = 0x60, + DAC960_BA_ODB_OFFSET = 0x61, + DAC960_BA_ERRSTS_OFFSET = 0x63, +}; + +/* + * DAC960 BA Series Inbound Door Bell Register. + */ +#define DAC960_BA_IDB_HWMBOX_NEW_CMD 0x01 +#define DAC960_BA_IDB_HWMBOX_ACK_STS 0x02 +#define DAC960_BA_IDB_GEN_IRQ 0x04 +#define DAC960_BA_IDB_CTRL_RESET 0x08 +#define DAC960_BA_IDB_MMBOX_NEW_CMD 0x10 + +#define DAC960_BA_IDB_HWMBOX_EMPTY 0x01 +#define DAC960_BA_IDB_INIT_DONE 0x02 + +/* + * DAC960 BA Series Outbound Door Bell Register. + */ +#define DAC960_BA_ODB_HWMBOX_ACK_IRQ 0x01 +#define DAC960_BA_ODB_MMBOX_ACK_IRQ 0x02 + +#define DAC960_BA_ODB_HWMBOX_STS_AVAIL 0x01 +#define DAC960_BA_ODB_MMBOX_STS_AVAIL 0x02 + +/* + * DAC960 BA Series Interrupt Mask Register. + */ +#define DAC960_BA_IRQMASK_DISABLE_IRQ 0x04 +#define DAC960_BA_IRQMASK_DISABLEW_I2O 0x08 + +/* + * DAC960 BA Series Error Status Register. + */ +#define DAC960_BA_ERRSTS_PENDING 0x04 + +/* + * DAC960 LP Series Controller Interface Register Offsets. + */ + +#define DAC960_LP_mmio_size 0x80 + +enum DAC960_LP_reg_offset { + DAC960_LP_CMDMBX_OFFSET = 0x10, + DAC960_LP_CMDSTS_OFFSET = 0x18, + DAC960_LP_IDB_OFFSET = 0x20, + DAC960_LP_ODB_OFFSET = 0x2C, + DAC960_LP_ERRSTS_OFFSET = 0x2E, + DAC960_LP_IRQSTS_OFFSET = 0x30, + DAC960_LP_IRQMASK_OFFSET = 0x34, +}; + +/* + * DAC960 LP Series Inbound Door Bell Register. + */ +#define DAC960_LP_IDB_HWMBOX_NEW_CMD 0x01 +#define DAC960_LP_IDB_HWMBOX_ACK_STS 0x02 +#define DAC960_LP_IDB_GEN_IRQ 0x04 +#define DAC960_LP_IDB_CTRL_RESET 0x08 +#define DAC960_LP_IDB_MMBOX_NEW_CMD 0x10 + +#define DAC960_LP_IDB_HWMBOX_FULL 0x01 +#define DAC960_LP_IDB_INIT_IN_PROGRESS 0x02 + +/* + * DAC960 LP Series Outbound Door Bell Register. + */ +#define DAC960_LP_ODB_HWMBOX_ACK_IRQ 0x01 +#define DAC960_LP_ODB_MMBOX_ACK_IRQ 0x02 + +#define DAC960_LP_ODB_HWMBOX_STS_AVAIL 0x01 +#define DAC960_LP_ODB_MMBOX_STS_AVAIL 0x02 + +/* + * DAC960 LP Series Interrupt Mask Register. + */ +#define DAC960_LP_IRQMASK_DISABLE_IRQ 0x04 + +/* + * DAC960 LP Series Error Status Register. + */ +#define DAC960_LP_ERRSTS_PENDING 0x04 + +#endif /* _MYRS_H */ diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c new file mode 100644 index 000000000..35869b4f9 --- /dev/null +++ b/drivers/scsi/ncr53c8xx.c @@ -0,0 +1,8410 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/****************************************************************************** +** Device driver for the PCI-SCSI NCR538XX controller family. +** +** Copyright (C) 1994 Wolfgang Stanglmeier +** +** +**----------------------------------------------------------------------------- +** +** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver +** and is currently maintained by +** +** Gerard Roudier +** +** Being given that this driver originates from the FreeBSD version, and +** in order to keep synergy on both, any suggested enhancements and corrections +** received on Linux are automatically a potential candidate for the FreeBSD +** version. +** +** The original driver has been written for 386bsd and FreeBSD by +** Wolfgang Stanglmeier +** Stefan Esser +** +** And has been ported to NetBSD by +** Charles M. Hannum +** +**----------------------------------------------------------------------------- +** +** Brief history +** +** December 10 1995 by Gerard Roudier: +** Initial port to Linux. +** +** June 23 1996 by Gerard Roudier: +** Support for 64 bits architectures (Alpha). +** +** November 30 1996 by Gerard Roudier: +** Support for Fast-20 scsi. +** Support for large DMA fifo and 128 dwords bursting. +** +** February 27 1997 by Gerard Roudier: +** Support for Fast-40 scsi. +** Support for on-Board RAM. +** +** May 3 1997 by Gerard Roudier: +** Full support for scsi scripts instructions pre-fetching. +** +** May 19 1997 by Richard Waltham : +** Support for NvRAM detection and reading. +** +** August 18 1997 by Cort : +** Support for Power/PC (Big Endian). +** +** June 20 1998 by Gerard Roudier +** Support for up to 64 tags per lun. +** O(1) everywhere (C and SCRIPTS) for normal cases. +** Low PCI traffic for command handling when on-chip RAM is present. +** Aggressive SCSI SCRIPTS optimizations. +** +** 2005 by Matthew Wilcox and James Bottomley +** PCI-ectomy. This driver now supports only the 720 chip (see the +** NCR_Q720 and zalon drivers for the bus probe logic). +** +******************************************************************************* +*/ + +/* +** Supported SCSI-II features: +** Synchronous negotiation +** Wide negotiation (depends on the NCR Chip) +** Enable disconnection +** Tagged command queuing +** Parity checking +** Etc... +** +** Supported NCR/SYMBIOS chips: +** 53C720 (Wide, Fast SCSI-2, intfly problems) +*/ + +/* Name and version of the driver */ +#define SCSI_NCR_DRIVER_NAME "ncr53c8xx-3.4.3g" + +#define SCSI_NCR_DEBUG_FLAGS (0) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ncr53c8xx.h" + +#define NAME53C8XX "ncr53c8xx" + +/*========================================================== +** +** Debugging tags +** +**========================================================== +*/ + +#define DEBUG_ALLOC (0x0001) +#define DEBUG_PHASE (0x0002) +#define DEBUG_QUEUE (0x0008) +#define DEBUG_RESULT (0x0010) +#define DEBUG_POINTER (0x0020) +#define DEBUG_SCRIPT (0x0040) +#define DEBUG_TINY (0x0080) +#define DEBUG_TIMING (0x0100) +#define DEBUG_NEGO (0x0200) +#define DEBUG_TAGS (0x0400) +#define DEBUG_SCATTER (0x0800) +#define DEBUG_IC (0x1000) + +/* +** Enable/Disable debug messages. +** Can be changed at runtime too. +*/ + +#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT +static int ncr_debug = SCSI_NCR_DEBUG_FLAGS; + #define DEBUG_FLAGS ncr_debug +#else + #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS +#endif + +/* + * Locally used status flag + */ +#define SAM_STAT_ILLEGAL 0xff + +static inline struct list_head *ncr_list_pop(struct list_head *head) +{ + if (!list_empty(head)) { + struct list_head *elem = head->next; + + list_del(elem); + return elem; + } + + return NULL; +} + +/*========================================================== +** +** Simple power of two buddy-like allocator. +** +** This simple code is not intended to be fast, but to +** provide power of 2 aligned memory allocations. +** Since the SCRIPTS processor only supplies 8 bit +** arithmetic, this allocator allows simple and fast +** address calculations from the SCRIPTS code. +** In addition, cache line alignment is guaranteed for +** power of 2 cache line size. +** Enhanced in linux-2.3.44 to provide a memory pool +** per pcidev to support dynamic dma mapping. (I would +** have preferred a real bus abstraction, btw). +** +**========================================================== +*/ + +#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ +#if PAGE_SIZE >= 8192 +#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ +#else +#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */ +#endif +#define MEMO_FREE_UNUSED /* Free unused pages immediately */ +#define MEMO_WARN 1 +#define MEMO_GFP_FLAGS GFP_ATOMIC +#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) +#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) +#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) + +typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ +typedef struct device *m_bush_t; /* Something that addresses DMAable */ + +typedef struct m_link { /* Link between free memory chunks */ + struct m_link *next; +} m_link_s; + +typedef struct m_vtob { /* Virtual to Bus address translation */ + struct m_vtob *next; + m_addr_t vaddr; + m_addr_t baddr; +} m_vtob_s; +#define VTOB_HASH_SHIFT 5 +#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) +#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) +#define VTOB_HASH_CODE(m) \ + ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) + +typedef struct m_pool { /* Memory pool of a given kind */ + m_bush_t bush; + m_addr_t (*getp)(struct m_pool *); + void (*freep)(struct m_pool *, m_addr_t); + int nump; + m_vtob_s *(vtob[VTOB_HASH_SIZE]); + struct m_pool *next; + struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1]; +} m_pool_s; + +static void *___m_alloc(m_pool_s *mp, int size) +{ + int i = 0; + int s = (1 << MEMO_SHIFT); + int j; + m_addr_t a; + m_link_s *h = mp->h; + + if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) + return NULL; + + while (size > s) { + s <<= 1; + ++i; + } + + j = i; + while (!h[j].next) { + if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { + h[j].next = (m_link_s *)mp->getp(mp); + if (h[j].next) + h[j].next->next = NULL; + break; + } + ++j; + s <<= 1; + } + a = (m_addr_t) h[j].next; + if (a) { + h[j].next = h[j].next->next; + while (j > i) { + j -= 1; + s >>= 1; + h[j].next = (m_link_s *) (a+s); + h[j].next->next = NULL; + } + } +#ifdef DEBUG + printk("___m_alloc(%d) = %p\n", size, (void *) a); +#endif + return (void *) a; +} + +static void ___m_free(m_pool_s *mp, void *ptr, int size) +{ + int i = 0; + int s = (1 << MEMO_SHIFT); + m_link_s *q; + m_addr_t a, b; + m_link_s *h = mp->h; + +#ifdef DEBUG + printk("___m_free(%p, %d)\n", ptr, size); +#endif + + if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) + return; + + while (size > s) { + s <<= 1; + ++i; + } + + a = (m_addr_t) ptr; + + while (1) { +#ifdef MEMO_FREE_UNUSED + if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { + mp->freep(mp, a); + break; + } +#endif + b = a ^ s; + q = &h[i]; + while (q->next && q->next != (m_link_s *) b) { + q = q->next; + } + if (!q->next) { + ((m_link_s *) a)->next = h[i].next; + h[i].next = (m_link_s *) a; + break; + } + q->next = q->next->next; + a = a & b; + s <<= 1; + ++i; + } +} + +static DEFINE_SPINLOCK(ncr53c8xx_lock); + +static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags) +{ + void *p; + + p = ___m_alloc(mp, size); + + if (DEBUG_FLAGS & DEBUG_ALLOC) + printk ("new %-10s[%4d] @%p.\n", name, size, p); + + if (p) + memset(p, 0, size); + else if (uflags & MEMO_WARN) + printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size); + + return p; +} + +#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN) + +static void __m_free(m_pool_s *mp, void *ptr, int size, char *name) +{ + if (DEBUG_FLAGS & DEBUG_ALLOC) + printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr); + + ___m_free(mp, ptr, size); + +} + +/* + * With pci bus iommu support, we use a default pool of unmapped memory + * for memory we donnot need to DMA from/to and one pool per pcidev for + * memory accessed by the PCI chip. `mp0' is the default not DMAable pool. + */ + +static m_addr_t ___mp0_getp(m_pool_s *mp) +{ + m_addr_t m = __get_free_pages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER); + if (m) + ++mp->nump; + return m; +} + +static void ___mp0_freep(m_pool_s *mp, m_addr_t m) +{ + free_pages(m, MEMO_PAGE_ORDER); + --mp->nump; +} + +static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep}; + +/* + * DMAable pools. + */ + +/* + * With pci bus iommu support, we maintain one pool per pcidev and a + * hashed reverse table for virtual to bus physical address translations. + */ +static m_addr_t ___dma_getp(m_pool_s *mp) +{ + m_addr_t vp; + m_vtob_s *vbp; + + vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB"); + if (vbp) { + dma_addr_t daddr; + vp = (m_addr_t) dma_alloc_coherent(mp->bush, + PAGE_SIZE<vaddr = vp; + vbp->baddr = daddr; + vbp->next = mp->vtob[hc]; + mp->vtob[hc] = vbp; + ++mp->nump; + return vp; + } + } + if (vbp) + __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); + return 0; +} + +static void ___dma_freep(m_pool_s *mp, m_addr_t m) +{ + m_vtob_s **vbpp, *vbp; + int hc = VTOB_HASH_CODE(m); + + vbpp = &mp->vtob[hc]; + while (*vbpp && (*vbpp)->vaddr != m) + vbpp = &(*vbpp)->next; + if (*vbpp) { + vbp = *vbpp; + *vbpp = (*vbpp)->next; + dma_free_coherent(mp->bush, PAGE_SIZE<vaddr, (dma_addr_t)vbp->baddr); + __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); + --mp->nump; + } +} + +static inline m_pool_s *___get_dma_pool(m_bush_t bush) +{ + m_pool_s *mp; + for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next); + return mp; +} + +static m_pool_s *___cre_dma_pool(m_bush_t bush) +{ + m_pool_s *mp; + mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL"); + if (mp) { + memset(mp, 0, sizeof(*mp)); + mp->bush = bush; + mp->getp = ___dma_getp; + mp->freep = ___dma_freep; + mp->next = mp0.next; + mp0.next = mp; + } + return mp; +} + +static void ___del_dma_pool(m_pool_s *p) +{ + struct m_pool **pp = &mp0.next; + + while (*pp && *pp != p) + pp = &(*pp)->next; + if (*pp) { + *pp = (*pp)->next; + __m_free(&mp0, p, sizeof(*p), "MPOOL"); + } +} + +static void *__m_calloc_dma(m_bush_t bush, int size, char *name) +{ + u_long flags; + struct m_pool *mp; + void *m = NULL; + + spin_lock_irqsave(&ncr53c8xx_lock, flags); + mp = ___get_dma_pool(bush); + if (!mp) + mp = ___cre_dma_pool(bush); + if (mp) + m = __m_calloc(mp, size, name); + if (mp && !mp->nump) + ___del_dma_pool(mp); + spin_unlock_irqrestore(&ncr53c8xx_lock, flags); + + return m; +} + +static void __m_free_dma(m_bush_t bush, void *m, int size, char *name) +{ + u_long flags; + struct m_pool *mp; + + spin_lock_irqsave(&ncr53c8xx_lock, flags); + mp = ___get_dma_pool(bush); + if (mp) + __m_free(mp, m, size, name); + if (mp && !mp->nump) + ___del_dma_pool(mp); + spin_unlock_irqrestore(&ncr53c8xx_lock, flags); +} + +static m_addr_t __vtobus(m_bush_t bush, void *m) +{ + u_long flags; + m_pool_s *mp; + int hc = VTOB_HASH_CODE(m); + m_vtob_s *vp = NULL; + m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; + + spin_lock_irqsave(&ncr53c8xx_lock, flags); + mp = ___get_dma_pool(bush); + if (mp) { + vp = mp->vtob[hc]; + while (vp && (m_addr_t) vp->vaddr != a) + vp = vp->next; + } + spin_unlock_irqrestore(&ncr53c8xx_lock, flags); + return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; +} + +#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->dev, s, n) +#define _m_free_dma(np, p, s, n) __m_free_dma(np->dev, p, s, n) +#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n) +#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n) +#define _vtobus(np, p) __vtobus(np->dev, p) +#define vtobus(p) _vtobus(np, p) + +/* + * Deal with DMA mapping/unmapping. + */ + +static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd) +{ + struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd); + + switch(cmd_priv->data_mapped) { + case 2: + scsi_dma_unmap(cmd); + break; + } + cmd_priv->data_mapped = 0; +} + +static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd) +{ + struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd); + int use_sg; + + use_sg = scsi_dma_map(cmd); + if (!use_sg) + return 0; + + cmd_priv->data_mapped = 2; + cmd_priv->data_mapping = use_sg; + + return use_sg; +} + +#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd) +#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd) + +/*========================================================== +** +** Driver setup. +** +** This structure is initialized from linux config +** options. It can be overridden at boot-up by the boot +** command line. +** +**========================================================== +*/ +static struct ncr_driver_setup + driver_setup = SCSI_NCR_DRIVER_SETUP; + +#ifndef MODULE +#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT +static struct ncr_driver_setup + driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP; +#endif +#endif /* !MODULE */ + +#define initverbose (driver_setup.verbose) +#define bootverbose (np->verbose) + + +/*=================================================================== +** +** Driver setup from the boot command line +** +**=================================================================== +*/ + +#ifdef MODULE +#define ARG_SEP ' ' +#else +#define ARG_SEP ',' +#endif + +#define OPT_TAGS 1 +#define OPT_MASTER_PARITY 2 +#define OPT_SCSI_PARITY 3 +#define OPT_DISCONNECTION 4 +#define OPT_SPECIAL_FEATURES 5 +#define OPT_UNUSED_1 6 +#define OPT_FORCE_SYNC_NEGO 7 +#define OPT_REVERSE_PROBE 8 +#define OPT_DEFAULT_SYNC 9 +#define OPT_VERBOSE 10 +#define OPT_DEBUG 11 +#define OPT_BURST_MAX 12 +#define OPT_LED_PIN 13 +#define OPT_MAX_WIDE 14 +#define OPT_SETTLE_DELAY 15 +#define OPT_DIFF_SUPPORT 16 +#define OPT_IRQM 17 +#define OPT_PCI_FIX_UP 18 +#define OPT_BUS_CHECK 19 +#define OPT_OPTIMIZE 20 +#define OPT_RECOVERY 21 +#define OPT_SAFE_SETUP 22 +#define OPT_USE_NVRAM 23 +#define OPT_EXCLUDE 24 +#define OPT_HOST_ID 25 + +#ifdef SCSI_NCR_IARB_SUPPORT +#define OPT_IARB 26 +#endif + +#ifdef MODULE +#define ARG_SEP ' ' +#else +#define ARG_SEP ',' +#endif + +#ifndef MODULE +static char setup_token[] __initdata = + "tags:" "mpar:" + "spar:" "disc:" + "specf:" "ultra:" + "fsn:" "revprob:" + "sync:" "verb:" + "debug:" "burst:" + "led:" "wide:" + "settle:" "diff:" + "irqm:" "pcifix:" + "buschk:" "optim:" + "recovery:" + "safe:" "nvram:" + "excl:" "hostid:" +#ifdef SCSI_NCR_IARB_SUPPORT + "iarb:" +#endif + ; /* DONNOT REMOVE THIS ';' */ + +static int __init get_setup_token(char *p) +{ + char *cur = setup_token; + char *pc; + int i = 0; + + while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { + ++pc; + ++i; + if (!strncmp(p, cur, pc - cur)) + return i; + cur = pc; + } + return 0; +} + +static int __init sym53c8xx__setup(char *str) +{ +#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT + char *cur = str; + char *pc, *pv; + int i, val, c; + int xi = 0; + + while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { + char *pe; + + val = 0; + pv = pc; + c = *++pv; + + if (c == 'n') + val = 0; + else if (c == 'y') + val = 1; + else + val = (int) simple_strtoul(pv, &pe, 0); + + switch (get_setup_token(cur)) { + case OPT_TAGS: + driver_setup.default_tags = val; + if (pe && *pe == '/') { + i = 0; + while (*pe && *pe != ARG_SEP && + i < sizeof(driver_setup.tag_ctrl)-1) { + driver_setup.tag_ctrl[i++] = *pe++; + } + driver_setup.tag_ctrl[i] = '\0'; + } + break; + case OPT_MASTER_PARITY: + driver_setup.master_parity = val; + break; + case OPT_SCSI_PARITY: + driver_setup.scsi_parity = val; + break; + case OPT_DISCONNECTION: + driver_setup.disconnection = val; + break; + case OPT_SPECIAL_FEATURES: + driver_setup.special_features = val; + break; + case OPT_FORCE_SYNC_NEGO: + driver_setup.force_sync_nego = val; + break; + case OPT_REVERSE_PROBE: + driver_setup.reverse_probe = val; + break; + case OPT_DEFAULT_SYNC: + driver_setup.default_sync = val; + break; + case OPT_VERBOSE: + driver_setup.verbose = val; + break; + case OPT_DEBUG: + driver_setup.debug = val; + break; + case OPT_BURST_MAX: + driver_setup.burst_max = val; + break; + case OPT_LED_PIN: + driver_setup.led_pin = val; + break; + case OPT_MAX_WIDE: + driver_setup.max_wide = val? 1:0; + break; + case OPT_SETTLE_DELAY: + driver_setup.settle_delay = val; + break; + case OPT_DIFF_SUPPORT: + driver_setup.diff_support = val; + break; + case OPT_IRQM: + driver_setup.irqm = val; + break; + case OPT_PCI_FIX_UP: + driver_setup.pci_fix_up = val; + break; + case OPT_BUS_CHECK: + driver_setup.bus_check = val; + break; + case OPT_OPTIMIZE: + driver_setup.optimize = val; + break; + case OPT_RECOVERY: + driver_setup.recovery = val; + break; + case OPT_USE_NVRAM: + driver_setup.use_nvram = val; + break; + case OPT_SAFE_SETUP: + memcpy(&driver_setup, &driver_safe_setup, + sizeof(driver_setup)); + break; + case OPT_EXCLUDE: + if (xi < SCSI_NCR_MAX_EXCLUDES) + driver_setup.excludes[xi++] = val; + break; + case OPT_HOST_ID: + driver_setup.host_id = val; + break; +#ifdef SCSI_NCR_IARB_SUPPORT + case OPT_IARB: + driver_setup.iarb = val; + break; +#endif + default: + printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur); + break; + } + + if ((cur = strchr(cur, ARG_SEP)) != NULL) + ++cur; + } +#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */ + return 1; +} +#endif /* !MODULE */ + +/*=================================================================== +** +** Get device queue depth from boot command line. +** +**=================================================================== +*/ +#define DEF_DEPTH (driver_setup.default_tags) +#define ALL_TARGETS -2 +#define NO_TARGET -1 +#define ALL_LUNS -2 +#define NO_LUN -1 + +static int device_queue_depth(int unit, int target, int lun) +{ + int c, h, t, u, v; + char *p = driver_setup.tag_ctrl; + char *ep; + + h = -1; + t = NO_TARGET; + u = NO_LUN; + while ((c = *p++) != 0) { + v = simple_strtoul(p, &ep, 0); + switch(c) { + case '/': + ++h; + t = ALL_TARGETS; + u = ALL_LUNS; + break; + case 't': + if (t != target) + t = (target == v) ? v : NO_TARGET; + u = ALL_LUNS; + break; + case 'u': + if (u != lun) + u = (lun == v) ? v : NO_LUN; + break; + case 'q': + if (h == unit && + (t == ALL_TARGETS || t == target) && + (u == ALL_LUNS || u == lun)) + return v; + break; + case '-': + t = ALL_TARGETS; + u = ALL_LUNS; + break; + default: + break; + } + p = ep; + } + return DEF_DEPTH; +} + + +/*========================================================== +** +** The CCB done queue uses an array of CCB virtual +** addresses. Empty entries are flagged using the bogus +** virtual address 0xffffffff. +** +** Since PCI ensures that only aligned DWORDs are accessed +** atomically, 64 bit little-endian architecture requires +** to test the high order DWORD of the entry to determine +** if it is empty or valid. +** +** BTW, I will make things differently as soon as I will +** have a better idea, but this is simple and should work. +** +**========================================================== +*/ + +#define SCSI_NCR_CCB_DONE_SUPPORT +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + +#define MAX_DONE 24 +#define CCB_DONE_EMPTY 0xffffffffUL + +/* All 32 bit architectures */ +#if BITS_PER_LONG == 32 +#define CCB_DONE_VALID(cp) (((u_long) cp) != CCB_DONE_EMPTY) + +/* All > 32 bit (64 bit) architectures regardless endian-ness */ +#else +#define CCB_DONE_VALID(cp) \ + ((((u_long) cp) & 0xffffffff00000000ul) && \ + (((u_long) cp) & 0xfffffffful) != CCB_DONE_EMPTY) +#endif + +#endif /* SCSI_NCR_CCB_DONE_SUPPORT */ + +/*========================================================== +** +** Configuration and Debugging +** +**========================================================== +*/ + +/* +** SCSI address of this device. +** The boot routines should have set it. +** If not, use this. +*/ + +#ifndef SCSI_NCR_MYADDR +#define SCSI_NCR_MYADDR (7) +#endif + +/* +** The maximum number of tags per logic unit. +** Used only for disk devices that support tags. +*/ + +#ifndef SCSI_NCR_MAX_TAGS +#define SCSI_NCR_MAX_TAGS (8) +#endif + +/* +** TAGS are actually limited to 64 tags/lun. +** We need to deal with power of 2, for alignment constraints. +*/ +#if SCSI_NCR_MAX_TAGS > 64 +#define MAX_TAGS (64) +#else +#define MAX_TAGS SCSI_NCR_MAX_TAGS +#endif + +#define NO_TAG (255) + +/* +** Choose appropriate type for tag bitmap. +*/ +#if MAX_TAGS > 32 +typedef u64 tagmap_t; +#else +typedef u32 tagmap_t; +#endif + +/* +** Number of targets supported by the driver. +** n permits target numbers 0..n-1. +** Default is 16, meaning targets #0..#15. +** #7 .. is myself. +*/ + +#ifdef SCSI_NCR_MAX_TARGET +#define MAX_TARGET (SCSI_NCR_MAX_TARGET) +#else +#define MAX_TARGET (16) +#endif + +/* +** Number of logic units supported by the driver. +** n enables logic unit numbers 0..n-1. +** The common SCSI devices require only +** one lun, so take 1 as the default. +*/ + +#ifdef SCSI_NCR_MAX_LUN +#define MAX_LUN SCSI_NCR_MAX_LUN +#else +#define MAX_LUN (1) +#endif + +/* +** Asynchronous pre-scaler (ns). Shall be 40 +*/ + +#ifndef SCSI_NCR_MIN_ASYNC +#define SCSI_NCR_MIN_ASYNC (40) +#endif + +/* +** The maximum number of jobs scheduled for starting. +** There should be one slot per target, and one slot +** for each tag of each target in use. +** The calculation below is actually quite silly ... +*/ + +#ifdef SCSI_NCR_CAN_QUEUE +#define MAX_START (SCSI_NCR_CAN_QUEUE + 4) +#else +#define MAX_START (MAX_TARGET + 7 * MAX_TAGS) +#endif + +/* +** We limit the max number of pending IO to 250. +** since we donnot want to allocate more than 1 +** PAGE for 'scripth'. +*/ +#if MAX_START > 250 +#undef MAX_START +#define MAX_START 250 +#endif + +/* +** The maximum number of segments a transfer is split into. +** We support up to 127 segments for both read and write. +** The data scripts are broken into 2 sub-scripts. +** 80 (MAX_SCATTERL) segments are moved from a sub-script +** in on-chip RAM. This makes data transfers shorter than +** 80k (assuming 1k fs) as fast as possible. +*/ + +#define MAX_SCATTER (SCSI_NCR_MAX_SCATTER) + +#if (MAX_SCATTER > 80) +#define MAX_SCATTERL 80 +#define MAX_SCATTERH (MAX_SCATTER - MAX_SCATTERL) +#else +#define MAX_SCATTERL (MAX_SCATTER-1) +#define MAX_SCATTERH 1 +#endif + +/* +** other +*/ + +#define NCR_SNOOP_TIMEOUT (1000000) + +/* +** Other definitions +*/ + +#define initverbose (driver_setup.verbose) +#define bootverbose (np->verbose) + +/*========================================================== +** +** Command control block states. +** +**========================================================== +*/ + +#define HS_IDLE (0) +#define HS_BUSY (1) +#define HS_NEGOTIATE (2) /* sync/wide data transfer*/ +#define HS_DISCONNECT (3) /* Disconnected by target */ + +#define HS_DONEMASK (0x80) +#define HS_COMPLETE (4|HS_DONEMASK) +#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ +#define HS_RESET (6|HS_DONEMASK) /* SCSI reset */ +#define HS_ABORTED (7|HS_DONEMASK) /* Transfer aborted */ +#define HS_TIMEOUT (8|HS_DONEMASK) /* Software timeout */ +#define HS_FAIL (9|HS_DONEMASK) /* SCSI or PCI bus errors */ +#define HS_UNEXPECTED (10|HS_DONEMASK)/* Unexpected disconnect */ + +/* +** Invalid host status values used by the SCRIPTS processor +** when the nexus is not fully identified. +** Shall never appear in a CCB. +*/ + +#define HS_INVALMASK (0x40) +#define HS_SELECTING (0|HS_INVALMASK) +#define HS_IN_RESELECT (1|HS_INVALMASK) +#define HS_STARTING (2|HS_INVALMASK) + +/* +** Flags set by the SCRIPT processor for commands +** that have been skipped. +*/ +#define HS_SKIPMASK (0x20) + +/*========================================================== +** +** Software Interrupt Codes +** +**========================================================== +*/ + +#define SIR_BAD_STATUS (1) +#define SIR_XXXXXXXXXX (2) +#define SIR_NEGO_SYNC (3) +#define SIR_NEGO_WIDE (4) +#define SIR_NEGO_FAILED (5) +#define SIR_NEGO_PROTO (6) +#define SIR_REJECT_RECEIVED (7) +#define SIR_REJECT_SENT (8) +#define SIR_IGN_RESIDUE (9) +#define SIR_MISSING_SAVE (10) +#define SIR_RESEL_NO_MSG_IN (11) +#define SIR_RESEL_NO_IDENTIFY (12) +#define SIR_RESEL_BAD_LUN (13) +#define SIR_RESEL_BAD_TARGET (14) +#define SIR_RESEL_BAD_I_T_L (15) +#define SIR_RESEL_BAD_I_T_L_Q (16) +#define SIR_DONE_OVERFLOW (17) +#define SIR_INTFLY (18) +#define SIR_MAX (18) + +/*========================================================== +** +** Extended error codes. +** xerr_status field of struct ccb. +** +**========================================================== +*/ + +#define XE_OK (0) +#define XE_EXTRA_DATA (1) /* unexpected data phase */ +#define XE_BAD_PHASE (2) /* illegal phase (4/5) */ + +/*========================================================== +** +** Negotiation status. +** nego_status field of struct ccb. +** +**========================================================== +*/ + +#define NS_NOCHANGE (0) +#define NS_SYNC (1) +#define NS_WIDE (2) +#define NS_PPR (4) + +/*========================================================== +** +** Misc. +** +**========================================================== +*/ + +#define CCB_MAGIC (0xf2691ad2) + +/*========================================================== +** +** Declaration of structs. +** +**========================================================== +*/ + +static struct scsi_transport_template *ncr53c8xx_transport_template = NULL; + +struct tcb; +struct lcb; +struct ccb; +struct ncb; +struct script; + +struct link { + ncrcmd l_cmd; + ncrcmd l_paddr; +}; + +struct usrcmd { + u_long target; + u_long lun; + u_long data; + u_long cmd; +}; + +#define UC_SETSYNC 10 +#define UC_SETTAGS 11 +#define UC_SETDEBUG 12 +#define UC_SETORDER 13 +#define UC_SETWIDE 14 +#define UC_SETFLAG 15 +#define UC_SETVERBOSE 17 + +#define UF_TRACE (0x01) +#define UF_NODISC (0x02) +#define UF_NOSCAN (0x04) + +/*======================================================================== +** +** Declaration of structs: target control block +** +**======================================================================== +*/ +struct tcb { + /*---------------------------------------------------------------- + ** During reselection the ncr jumps to this point with SFBR + ** set to the encoded target number with bit 7 set. + ** if it's not this target, jump to the next. + ** + ** JUMP IF (SFBR != #target#), @(next tcb) + **---------------------------------------------------------------- + */ + struct link jump_tcb; + + /*---------------------------------------------------------------- + ** Load the actual values for the sxfer and the scntl3 + ** register (sync/wide mode). + ** + ** SCR_COPY (1), @(sval field of this tcb), @(sxfer register) + ** SCR_COPY (1), @(wval field of this tcb), @(scntl3 register) + **---------------------------------------------------------------- + */ + ncrcmd getscr[6]; + + /*---------------------------------------------------------------- + ** Get the IDENTIFY message and load the LUN to SFBR. + ** + ** CALL, + **---------------------------------------------------------------- + */ + struct link call_lun; + + /*---------------------------------------------------------------- + ** Now look for the right lun. + ** + ** For i = 0 to 3 + ** SCR_JUMP ^ IFTRUE(MASK(i, 3)), @(first lcb mod. i) + ** + ** Recent chips will prefetch the 4 JUMPS using only 1 burst. + ** It is kind of hashcoding. + **---------------------------------------------------------------- + */ + struct link jump_lcb[4]; /* JUMPs for reselection */ + struct lcb * lp[MAX_LUN]; /* The lcb's of this tcb */ + + /*---------------------------------------------------------------- + ** Pointer to the ccb used for negotiation. + ** Prevent from starting a negotiation for all queued commands + ** when tagged command queuing is enabled. + **---------------------------------------------------------------- + */ + struct ccb * nego_cp; + + /*---------------------------------------------------------------- + ** statistical data + **---------------------------------------------------------------- + */ + u_long transfers; + u_long bytes; + + /*---------------------------------------------------------------- + ** negotiation of wide and synch transfer and device quirks. + **---------------------------------------------------------------- + */ +#ifdef SCSI_NCR_BIG_ENDIAN +/*0*/ u16 period; +/*2*/ u_char sval; +/*3*/ u_char minsync; +/*0*/ u_char wval; +/*1*/ u_char widedone; +/*2*/ u_char quirks; +/*3*/ u_char maxoffs; +#else +/*0*/ u_char minsync; +/*1*/ u_char sval; +/*2*/ u16 period; +/*0*/ u_char maxoffs; +/*1*/ u_char quirks; +/*2*/ u_char widedone; +/*3*/ u_char wval; +#endif + + /* User settable limits and options. */ + u_char usrsync; + u_char usrwide; + u_char usrtags; + u_char usrflag; + struct scsi_target *starget; +}; + +/*======================================================================== +** +** Declaration of structs: lun control block +** +**======================================================================== +*/ +struct lcb { + /*---------------------------------------------------------------- + ** During reselection the ncr jumps to this point + ** with SFBR set to the "Identify" message. + ** if it's not this lun, jump to the next. + ** + ** JUMP IF (SFBR != #lun#), @(next lcb of this target) + ** + ** It is this lun. Load TEMP with the nexus jumps table + ** address and jump to RESEL_TAG (or RESEL_NOTAG). + ** + ** SCR_COPY (4), p_jump_ccb, TEMP, + ** SCR_JUMP, + **---------------------------------------------------------------- + */ + struct link jump_lcb; + ncrcmd load_jump_ccb[3]; + struct link jump_tag; + ncrcmd p_jump_ccb; /* Jump table bus address */ + + /*---------------------------------------------------------------- + ** Jump table used by the script processor to directly jump + ** to the CCB corresponding to the reselected nexus. + ** Address is allocated on 256 bytes boundary in order to + ** allow 8 bit calculation of the tag jump entry for up to + ** 64 possible tags. + **---------------------------------------------------------------- + */ + u32 jump_ccb_0; /* Default table if no tags */ + u32 *jump_ccb; /* Virtual address */ + + /*---------------------------------------------------------------- + ** CCB queue management. + **---------------------------------------------------------------- + */ + struct list_head free_ccbq; /* Queue of available CCBs */ + struct list_head busy_ccbq; /* Queue of busy CCBs */ + struct list_head wait_ccbq; /* Queue of waiting for IO CCBs */ + struct list_head skip_ccbq; /* Queue of skipped CCBs */ + u_char actccbs; /* Number of allocated CCBs */ + u_char busyccbs; /* CCBs busy for this lun */ + u_char queuedccbs; /* CCBs queued to the controller*/ + u_char queuedepth; /* Queue depth for this lun */ + u_char scdev_depth; /* SCSI device queue depth */ + u_char maxnxs; /* Max possible nexuses */ + + /*---------------------------------------------------------------- + ** Control of tagged command queuing. + ** Tags allocation is performed using a circular buffer. + ** This avoids using a loop for tag allocation. + **---------------------------------------------------------------- + */ + u_char ia_tag; /* Allocation index */ + u_char if_tag; /* Freeing index */ + u_char cb_tags[MAX_TAGS]; /* Circular tags buffer */ + u_char usetags; /* Command queuing is active */ + u_char maxtags; /* Max nr of tags asked by user */ + u_char numtags; /* Current number of tags */ + + /*---------------------------------------------------------------- + ** QUEUE FULL control and ORDERED tag control. + **---------------------------------------------------------------- + */ + /*---------------------------------------------------------------- + ** QUEUE FULL and ORDERED tag control. + **---------------------------------------------------------------- + */ + u16 num_good; /* Nr of GOOD since QUEUE FULL */ + tagmap_t tags_umap; /* Used tags bitmap */ + tagmap_t tags_smap; /* Tags in use at 'tag_stime' */ + u_long tags_stime; /* Last time we set smap=umap */ + struct ccb * held_ccb; /* CCB held for QUEUE FULL */ +}; + +/*======================================================================== +** +** Declaration of structs: the launch script. +** +**======================================================================== +** +** It is part of the CCB and is called by the scripts processor to +** start or restart the data structure (nexus). +** This 6 DWORDs mini script makes use of prefetching. +** +**------------------------------------------------------------------------ +*/ +struct launch { + /*---------------------------------------------------------------- + ** SCR_COPY(4), @(p_phys), @(dsa register) + ** SCR_JUMP, @(scheduler_point) + **---------------------------------------------------------------- + */ + ncrcmd setup_dsa[3]; /* Copy 'phys' address to dsa */ + struct link schedule; /* Jump to scheduler point */ + ncrcmd p_phys; /* 'phys' header bus address */ +}; + +/*======================================================================== +** +** Declaration of structs: global HEADER. +** +**======================================================================== +** +** This substructure is copied from the ccb to a global address after +** selection (or reselection) and copied back before disconnect. +** +** These fields are accessible to the script processor. +** +**------------------------------------------------------------------------ +*/ + +struct head { + /*---------------------------------------------------------------- + ** Saved data pointer. + ** Points to the position in the script responsible for the + ** actual transfer transfer of data. + ** It's written after reception of a SAVE_DATA_POINTER message. + ** The goalpointer points after the last transfer command. + **---------------------------------------------------------------- + */ + u32 savep; + u32 lastp; + u32 goalp; + + /*---------------------------------------------------------------- + ** Alternate data pointer. + ** They are copied back to savep/lastp/goalp by the SCRIPTS + ** when the direction is unknown and the device claims data out. + **---------------------------------------------------------------- + */ + u32 wlastp; + u32 wgoalp; + + /*---------------------------------------------------------------- + ** The virtual address of the ccb containing this header. + **---------------------------------------------------------------- + */ + struct ccb * cp; + + /*---------------------------------------------------------------- + ** Status fields. + **---------------------------------------------------------------- + */ + u_char scr_st[4]; /* script status */ + u_char status[4]; /* host status. must be the */ + /* last DWORD of the header. */ +}; + +/* +** The status bytes are used by the host and the script processor. +** +** The byte corresponding to the host_status must be stored in the +** last DWORD of the CCB header since it is used for command +** completion (ncr_wakeup()). Doing so, we are sure that the header +** has been entirely copied back to the CCB when the host_status is +** seen complete by the CPU. +** +** The last four bytes (status[4]) are copied to the scratchb register +** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, +** and copied back just after disconnecting. +** Inside the script the XX_REG are used. +** +** The first four bytes (scr_st[4]) are used inside the script by +** "COPY" commands. +** Because source and destination must have the same alignment +** in a DWORD, the fields HAVE to be at the chosen offsets. +** xerr_st 0 (0x34) scratcha +** sync_st 1 (0x05) sxfer +** wide_st 3 (0x03) scntl3 +*/ + +/* +** Last four bytes (script) +*/ +#define QU_REG scr0 +#define HS_REG scr1 +#define HS_PRT nc_scr1 +#define SS_REG scr2 +#define SS_PRT nc_scr2 +#define PS_REG scr3 + +/* +** Last four bytes (host) +*/ +#ifdef SCSI_NCR_BIG_ENDIAN +#define actualquirks phys.header.status[3] +#define host_status phys.header.status[2] +#define scsi_status phys.header.status[1] +#define parity_status phys.header.status[0] +#else +#define actualquirks phys.header.status[0] +#define host_status phys.header.status[1] +#define scsi_status phys.header.status[2] +#define parity_status phys.header.status[3] +#endif + +/* +** First four bytes (script) +*/ +#define xerr_st header.scr_st[0] +#define sync_st header.scr_st[1] +#define nego_st header.scr_st[2] +#define wide_st header.scr_st[3] + +/* +** First four bytes (host) +*/ +#define xerr_status phys.xerr_st +#define nego_status phys.nego_st + +/*========================================================== +** +** Declaration of structs: Data structure block +** +**========================================================== +** +** During execution of a ccb by the script processor, +** the DSA (data structure address) register points +** to this substructure of the ccb. +** This substructure contains the header with +** the script-processor-changeable data and +** data blocks for the indirect move commands. +** +**---------------------------------------------------------- +*/ + +struct dsb { + + /* + ** Header. + */ + + struct head header; + + /* + ** Table data for Script + */ + + struct scr_tblsel select; + struct scr_tblmove smsg ; + struct scr_tblmove cmd ; + struct scr_tblmove sense ; + struct scr_tblmove data[MAX_SCATTER]; +}; + + +/*======================================================================== +** +** Declaration of structs: Command control block. +** +**======================================================================== +*/ +struct ccb { + /*---------------------------------------------------------------- + ** This is the data structure which is pointed by the DSA + ** register when it is executed by the script processor. + ** It must be the first entry because it contains the header + ** as first entry that must be cache line aligned. + **---------------------------------------------------------------- + */ + struct dsb phys; + + /*---------------------------------------------------------------- + ** Mini-script used at CCB execution start-up. + ** Load the DSA with the data structure address (phys) and + ** jump to SELECT. Jump to CANCEL if CCB is to be canceled. + **---------------------------------------------------------------- + */ + struct launch start; + + /*---------------------------------------------------------------- + ** Mini-script used at CCB relection to restart the nexus. + ** Load the DSA with the data structure address (phys) and + ** jump to RESEL_DSA. Jump to ABORT if CCB is to be aborted. + **---------------------------------------------------------------- + */ + struct launch restart; + + /*---------------------------------------------------------------- + ** If a data transfer phase is terminated too early + ** (after reception of a message (i.e. DISCONNECT)), + ** we have to prepare a mini script to transfer + ** the rest of the data. + **---------------------------------------------------------------- + */ + ncrcmd patch[8]; + + /*---------------------------------------------------------------- + ** The general SCSI driver provides a + ** pointer to a control block. + **---------------------------------------------------------------- + */ + struct scsi_cmnd *cmd; /* SCSI command */ + u_char cdb_buf[16]; /* Copy of CDB */ + u_char sense_buf[64]; + int data_len; /* Total data length */ + + /*---------------------------------------------------------------- + ** Message areas. + ** We prepare a message to be sent after selection. + ** We may use a second one if the command is rescheduled + ** due to GETCC or QFULL. + ** Contents are IDENTIFY and SIMPLE_TAG. + ** While negotiating sync or wide transfer, + ** a SDTR or WDTR message is appended. + **---------------------------------------------------------------- + */ + u_char scsi_smsg [8]; + u_char scsi_smsg2[8]; + + /*---------------------------------------------------------------- + ** Other fields. + **---------------------------------------------------------------- + */ + u_long p_ccb; /* BUS address of this CCB */ + u_char sensecmd[6]; /* Sense command */ + u_char tag; /* Tag for this transfer */ + /* 255 means no tag */ + u_char target; + u_char lun; + u_char queued; + u_char auto_sense; + struct ccb * link_ccb; /* Host adapter CCB chain */ + struct list_head link_ccbq; /* Link to unit CCB queue */ + u32 startp; /* Initial data pointer */ + u_long magic; /* Free / busy CCB flag */ +}; + +#define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl)) + + +/*======================================================================== +** +** Declaration of structs: NCR device descriptor +** +**======================================================================== +*/ +struct ncb { + /*---------------------------------------------------------------- + ** The global header. + ** It is accessible to both the host and the script processor. + ** Must be cache line size aligned (32 for x86) in order to + ** allow cache line bursting when it is copied to/from CCB. + **---------------------------------------------------------------- + */ + struct head header; + + /*---------------------------------------------------------------- + ** CCBs management queues. + **---------------------------------------------------------------- + */ + struct scsi_cmnd *waiting_list; /* Commands waiting for a CCB */ + /* when lcb is not allocated. */ + struct scsi_cmnd *done_list; /* Commands waiting for done() */ + /* callback to be invoked. */ + spinlock_t smp_lock; /* Lock for SMP threading */ + + /*---------------------------------------------------------------- + ** Chip and controller identification. + **---------------------------------------------------------------- + */ + int unit; /* Unit number */ + char inst_name[16]; /* ncb instance name */ + + /*---------------------------------------------------------------- + ** Initial value of some IO register bits. + ** These values are assumed to have been set by BIOS, and may + ** be used for probing adapter implementation differences. + **---------------------------------------------------------------- + */ + u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest0, sv_ctest3, + sv_ctest4, sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4; + + /*---------------------------------------------------------------- + ** Actual initial value of IO register bits used by the + ** driver. They are loaded at initialisation according to + ** features that are to be enabled. + **---------------------------------------------------------------- + */ + u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest0, rv_ctest3, + rv_ctest4, rv_ctest5, rv_stest2; + + /*---------------------------------------------------------------- + ** Targets management. + ** During reselection the ncr jumps to jump_tcb. + ** The SFBR register is loaded with the encoded target id. + ** For i = 0 to 3 + ** SCR_JUMP ^ IFTRUE(MASK(i, 3)), @(next tcb mod. i) + ** + ** Recent chips will prefetch the 4 JUMPS using only 1 burst. + ** It is kind of hashcoding. + **---------------------------------------------------------------- + */ + struct link jump_tcb[4]; /* JUMPs for reselection */ + struct tcb target[MAX_TARGET]; /* Target data */ + + /*---------------------------------------------------------------- + ** Virtual and physical bus addresses of the chip. + **---------------------------------------------------------------- + */ + void __iomem *vaddr; /* Virtual and bus address of */ + unsigned long paddr; /* chip's IO registers. */ + unsigned long paddr2; /* On-chip RAM bus address. */ + volatile /* Pointer to volatile for */ + struct ncr_reg __iomem *reg; /* memory mapped IO. */ + + /*---------------------------------------------------------------- + ** SCRIPTS virtual and physical bus addresses. + ** 'script' is loaded in the on-chip RAM if present. + ** 'scripth' stays in main memory. + **---------------------------------------------------------------- + */ + struct script *script0; /* Copies of script and scripth */ + struct scripth *scripth0; /* relocated for this ncb. */ + struct scripth *scripth; /* Actual scripth virt. address */ + u_long p_script; /* Actual script and scripth */ + u_long p_scripth; /* bus addresses. */ + + /*---------------------------------------------------------------- + ** General controller parameters and configuration. + **---------------------------------------------------------------- + */ + struct device *dev; + u_char revision_id; /* PCI device revision id */ + u32 irq; /* IRQ level */ + u32 features; /* Chip features map */ + u_char myaddr; /* SCSI id of the adapter */ + u_char maxburst; /* log base 2 of dwords burst */ + u_char maxwide; /* Maximum transfer width */ + u_char minsync; /* Minimum sync period factor */ + u_char maxsync; /* Maximum sync period factor */ + u_char maxoffs; /* Max scsi offset */ + u_char multiplier; /* Clock multiplier (1,2,4) */ + u_char clock_divn; /* Number of clock divisors */ + u_long clock_khz; /* SCSI clock frequency in KHz */ + + /*---------------------------------------------------------------- + ** Start queue management. + ** It is filled up by the host processor and accessed by the + ** SCRIPTS processor in order to start SCSI commands. + **---------------------------------------------------------------- + */ + u16 squeueput; /* Next free slot of the queue */ + u16 actccbs; /* Number of allocated CCBs */ + u16 queuedccbs; /* Number of CCBs in start queue*/ + u16 queuedepth; /* Start queue depth */ + + /*---------------------------------------------------------------- + ** Timeout handler. + **---------------------------------------------------------------- + */ + struct timer_list timer; /* Timer handler link header */ + u_long lasttime; + u_long settle_time; /* Resetting the SCSI BUS */ + + /*---------------------------------------------------------------- + ** Debugging and profiling. + **---------------------------------------------------------------- + */ + struct ncr_reg regdump; /* Register dump */ + u_long regtime; /* Time it has been done */ + + /*---------------------------------------------------------------- + ** Miscellaneous buffers accessed by the scripts-processor. + ** They shall be DWORD aligned, because they may be read or + ** written with a SCR_COPY script command. + **---------------------------------------------------------------- + */ + u_char msgout[8]; /* Buffer for MESSAGE OUT */ + u_char msgin [8]; /* Buffer for MESSAGE IN */ + u32 lastmsg; /* Last SCSI message sent */ + u_char scratch; /* Scratch for SCSI receive */ + + /*---------------------------------------------------------------- + ** Miscellaneous configuration and status parameters. + **---------------------------------------------------------------- + */ + u_char disc; /* Disconnection allowed */ + u_char scsi_mode; /* Current SCSI BUS mode */ + u_char order; /* Tag order to use */ + u_char verbose; /* Verbosity for this controller*/ + int ncr_cache; /* Used for cache test at init. */ + u_long p_ncb; /* BUS address of this NCB */ + + /*---------------------------------------------------------------- + ** Command completion handling. + **---------------------------------------------------------------- + */ +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + struct ccb *(ccb_done[MAX_DONE]); + int ccb_done_ic; +#endif + /*---------------------------------------------------------------- + ** Fields that should be removed or changed. + **---------------------------------------------------------------- + */ + struct ccb *ccb; /* Global CCB */ + struct usrcmd user; /* Command from user */ + volatile u_char release_stage; /* Synchronisation stage on release */ +}; + +#define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl)) +#define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl)) + +/*========================================================== +** +** +** Script for NCR-Processor. +** +** Use ncr_script_fill() to create the variable parts. +** Use ncr_script_copy_and_bind() to make a copy and +** bind to physical addresses. +** +** +**========================================================== +** +** We have to know the offsets of all labels before +** we reach them (for forward jumps). +** Therefore we declare a struct here. +** If you make changes inside the script, +** DONT FORGET TO CHANGE THE LENGTHS HERE! +** +**---------------------------------------------------------- +*/ + +/* +** For HP Zalon/53c720 systems, the Zalon interface +** between CPU and 53c720 does prefetches, which causes +** problems with self modifying scripts. The problem +** is overcome by calling a dummy subroutine after each +** modification, to force a refetch of the script on +** return from the subroutine. +*/ + +#ifdef CONFIG_NCR53C8XX_PREFETCH +#define PREFETCH_FLUSH_CNT 2 +#define PREFETCH_FLUSH SCR_CALL, PADDRH (wait_dma), +#else +#define PREFETCH_FLUSH_CNT 0 +#define PREFETCH_FLUSH +#endif + +/* +** Script fragments which are loaded into the on-chip RAM +** of 825A, 875 and 895 chips. +*/ +struct script { + ncrcmd start [ 5]; + ncrcmd startpos [ 1]; + ncrcmd select [ 6]; + ncrcmd select2 [ 9 + PREFETCH_FLUSH_CNT]; + ncrcmd loadpos [ 4]; + ncrcmd send_ident [ 9]; + ncrcmd prepare [ 6]; + ncrcmd prepare2 [ 7]; + ncrcmd command [ 6]; + ncrcmd dispatch [ 32]; + ncrcmd clrack [ 4]; + ncrcmd no_data [ 17]; + ncrcmd status [ 8]; + ncrcmd msg_in [ 2]; + ncrcmd msg_in2 [ 16]; + ncrcmd msg_bad [ 4]; + ncrcmd setmsg [ 7]; + ncrcmd cleanup [ 6]; + ncrcmd complete [ 9]; + ncrcmd cleanup_ok [ 8 + PREFETCH_FLUSH_CNT]; + ncrcmd cleanup0 [ 1]; +#ifndef SCSI_NCR_CCB_DONE_SUPPORT + ncrcmd signal [ 12]; +#else + ncrcmd signal [ 9]; + ncrcmd done_pos [ 1]; + ncrcmd done_plug [ 2]; + ncrcmd done_end [ 7]; +#endif + ncrcmd save_dp [ 7]; + ncrcmd restore_dp [ 5]; + ncrcmd disconnect [ 10]; + ncrcmd msg_out [ 9]; + ncrcmd msg_out_done [ 7]; + ncrcmd idle [ 2]; + ncrcmd reselect [ 8]; + ncrcmd reselected [ 8]; + ncrcmd resel_dsa [ 6 + PREFETCH_FLUSH_CNT]; + ncrcmd loadpos1 [ 4]; + ncrcmd resel_lun [ 6]; + ncrcmd resel_tag [ 6]; + ncrcmd jump_to_nexus [ 4 + PREFETCH_FLUSH_CNT]; + ncrcmd nexus_indirect [ 4]; + ncrcmd resel_notag [ 4]; + ncrcmd data_in [MAX_SCATTERL * 4]; + ncrcmd data_in2 [ 4]; + ncrcmd data_out [MAX_SCATTERL * 4]; + ncrcmd data_out2 [ 4]; +}; + +/* +** Script fragments which stay in main memory for all chips. +*/ +struct scripth { + ncrcmd tryloop [MAX_START*2]; + ncrcmd tryloop2 [ 2]; +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + ncrcmd done_queue [MAX_DONE*5]; + ncrcmd done_queue2 [ 2]; +#endif + ncrcmd select_no_atn [ 8]; + ncrcmd cancel [ 4]; + ncrcmd skip [ 9 + PREFETCH_FLUSH_CNT]; + ncrcmd skip2 [ 19]; + ncrcmd par_err_data_in [ 6]; + ncrcmd par_err_other [ 4]; + ncrcmd msg_reject [ 8]; + ncrcmd msg_ign_residue [ 24]; + ncrcmd msg_extended [ 10]; + ncrcmd msg_ext_2 [ 10]; + ncrcmd msg_wdtr [ 14]; + ncrcmd send_wdtr [ 7]; + ncrcmd msg_ext_3 [ 10]; + ncrcmd msg_sdtr [ 14]; + ncrcmd send_sdtr [ 7]; + ncrcmd nego_bad_phase [ 4]; + ncrcmd msg_out_abort [ 10]; + ncrcmd hdata_in [MAX_SCATTERH * 4]; + ncrcmd hdata_in2 [ 2]; + ncrcmd hdata_out [MAX_SCATTERH * 4]; + ncrcmd hdata_out2 [ 2]; + ncrcmd reset [ 4]; + ncrcmd aborttag [ 4]; + ncrcmd abort [ 2]; + ncrcmd abort_resel [ 20]; + ncrcmd resend_ident [ 4]; + ncrcmd clratn_go_on [ 3]; + ncrcmd nxtdsp_go_on [ 1]; + ncrcmd sdata_in [ 8]; + ncrcmd data_io [ 18]; + ncrcmd bad_identify [ 12]; + ncrcmd bad_i_t_l [ 4]; + ncrcmd bad_i_t_l_q [ 4]; + ncrcmd bad_target [ 8]; + ncrcmd bad_status [ 8]; + ncrcmd start_ram [ 4 + PREFETCH_FLUSH_CNT]; + ncrcmd start_ram0 [ 4]; + ncrcmd sto_restart [ 5]; + ncrcmd wait_dma [ 2]; + ncrcmd snooptest [ 9]; + ncrcmd snoopend [ 2]; +}; + +/*========================================================== +** +** +** Function headers. +** +** +**========================================================== +*/ + +static void ncr_alloc_ccb (struct ncb *np, u_char tn, u_char ln); +static void ncr_complete (struct ncb *np, struct ccb *cp); +static void ncr_exception (struct ncb *np); +static void ncr_free_ccb (struct ncb *np, struct ccb *cp); +static void ncr_init_ccb (struct ncb *np, struct ccb *cp); +static void ncr_init_tcb (struct ncb *np, u_char tn); +static struct lcb * ncr_alloc_lcb (struct ncb *np, u_char tn, u_char ln); +static struct lcb * ncr_setup_lcb (struct ncb *np, struct scsi_device *sdev); +static void ncr_getclock (struct ncb *np, int mult); +static void ncr_selectclock (struct ncb *np, u_char scntl3); +static struct ccb *ncr_get_ccb (struct ncb *np, struct scsi_cmnd *cmd); +static void ncr_chip_reset (struct ncb *np, int delay); +static void ncr_init (struct ncb *np, int reset, char * msg, u_long code); +static int ncr_int_sbmc (struct ncb *np); +static int ncr_int_par (struct ncb *np); +static void ncr_int_ma (struct ncb *np); +static void ncr_int_sir (struct ncb *np); +static void ncr_int_sto (struct ncb *np); +static void ncr_negotiate (struct ncb* np, struct tcb* tp); +static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr); + +static void ncr_script_copy_and_bind + (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len); +static void ncr_script_fill (struct script * scr, struct scripth * scripth); +static int ncr_scatter (struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd); +static void ncr_getsync (struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl3p); +static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer); +static void ncr_setup_tags (struct ncb *np, struct scsi_device *sdev); +static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack); +static int ncr_snooptest (struct ncb *np); +static void ncr_timeout (struct ncb *np); +static void ncr_wakeup (struct ncb *np, u_long code); +static void ncr_wakeup_done (struct ncb *np); +static void ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn); +static void ncr_put_start_queue(struct ncb *np, struct ccb *cp); + +static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd); +static void process_waiting_list(struct ncb *np, int sts); + +#define requeue_waiting_list(np) process_waiting_list((np), DID_OK) +#define reset_waiting_list(np) process_waiting_list((np), DID_RESET) + +static inline char *ncr_name (struct ncb *np) +{ + return np->inst_name; +} + + +/*========================================================== +** +** +** Scripts for NCR-Processor. +** +** Use ncr_script_bind for binding to physical addresses. +** +** +**========================================================== +** +** NADDR generates a reference to a field of the controller data. +** PADDR generates a reference to another part of the script. +** RADDR generates a reference to a script processor register. +** FADDR generates a reference to a script processor register +** with offset. +** +**---------------------------------------------------------- +*/ + +#define RELOC_SOFTC 0x40000000 +#define RELOC_LABEL 0x50000000 +#define RELOC_REGISTER 0x60000000 +#define RELOC_LABELH 0x80000000 +#define RELOC_MASK 0xf0000000 + +#define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label)) +#define PADDR(label) (RELOC_LABEL | offsetof(struct script, label)) +#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label)) +#define RADDR(label) (RELOC_REGISTER | REG(label)) +#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) + + +static struct script script0 __initdata = { +/*--------------------------< START >-----------------------*/ { + /* + ** This NOP will be patched with LED ON + ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) + */ + SCR_NO_OP, + 0, + /* + ** Clear SIGP. + */ + SCR_FROM_REG (ctest2), + 0, + /* + ** Then jump to a certain point in tryloop. + ** Due to the lack of indirect addressing the code + ** is self modifying here. + */ + SCR_JUMP, +}/*-------------------------< STARTPOS >--------------------*/,{ + PADDRH(tryloop), + +}/*-------------------------< SELECT >----------------------*/,{ + /* + ** DSA contains the address of a scheduled + ** data structure. + ** + ** SCRATCHA contains the address of the script, + ** which starts the next entry. + ** + ** Set Initiator mode. + ** + ** (Target mode is left as an exercise for the reader) + */ + + SCR_CLR (SCR_TRG), + 0, + SCR_LOAD_REG (HS_REG, HS_SELECTING), + 0, + + /* + ** And try to select this target. + */ + SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), + PADDR (reselect), + +}/*-------------------------< SELECT2 >----------------------*/,{ + /* + ** Now there are 4 possibilities: + ** + ** (1) The ncr loses arbitration. + ** This is ok, because it will try again, + ** when the bus becomes idle. + ** (But beware of the timeout function!) + ** + ** (2) The ncr is reselected. + ** Then the script processor takes the jump + ** to the RESELECT label. + ** + ** (3) The ncr wins arbitration. + ** Then it will execute SCRIPTS instruction until + ** the next instruction that checks SCSI phase. + ** Then will stop and wait for selection to be + ** complete or selection time-out to occur. + ** As a result the SCRIPTS instructions until + ** LOADPOS + 2 should be executed in parallel with + ** the SCSI core performing selection. + */ + + /* + ** The MESSAGE_REJECT problem seems to be due to a selection + ** timing problem. + ** Wait immediately for the selection to complete. + ** (2.5x behaves so) + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)), + 0, + + /* + ** Next time use the next slot. + */ + SCR_COPY (4), + RADDR (temp), + PADDR (startpos), + /* + ** The ncr doesn't have an indirect load + ** or store command. So we have to + ** copy part of the control block to a + ** fixed place, where we can access it. + ** + ** We patch the address part of a + ** COPY command with the DSA-register. + */ + SCR_COPY_F (4), + RADDR (dsa), + PADDR (loadpos), + /* + ** Flush script prefetch if required + */ + PREFETCH_FLUSH + /* + ** then we do the actual copy. + */ + SCR_COPY (sizeof (struct head)), + /* + ** continued after the next label ... + */ +}/*-------------------------< LOADPOS >---------------------*/,{ + 0, + NADDR (header), + /* + ** Wait for the next phase or the selection + ** to complete or time-out. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDR (prepare), + +}/*-------------------------< SEND_IDENT >----------------------*/,{ + /* + ** Selection complete. + ** Send the IDENTIFY and SIMPLE_TAG messages + ** (and the EXTENDED_SDTR message) + */ + SCR_MOVE_TBL ^ SCR_MSG_OUT, + offsetof (struct dsb, smsg), + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), + PADDRH (resend_ident), + SCR_LOAD_REG (scratcha, 0x80), + 0, + SCR_COPY (1), + RADDR (scratcha), + NADDR (lastmsg), +}/*-------------------------< PREPARE >----------------------*/,{ + /* + ** load the savep (saved pointer) into + ** the TEMP register (actual pointer) + */ + SCR_COPY (4), + NADDR (header.savep), + RADDR (temp), + /* + ** Initialize the status registers + */ + SCR_COPY (4), + NADDR (header.status), + RADDR (scr0), +}/*-------------------------< PREPARE2 >---------------------*/,{ + /* + ** Initialize the msgout buffer with a NOOP message. + */ + SCR_LOAD_REG (scratcha, NOP), + 0, + SCR_COPY (1), + RADDR (scratcha), + NADDR (msgout), + /* + ** Anticipate the COMMAND phase. + ** This is the normal case for initial selection. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)), + PADDR (dispatch), + +}/*-------------------------< COMMAND >--------------------*/,{ + /* + ** ... and send the command + */ + SCR_MOVE_TBL ^ SCR_COMMAND, + offsetof (struct dsb, cmd), + /* + ** If status is still HS_NEGOTIATE, negotiation failed. + ** We check this here, since we want to do that + ** only once. + */ + SCR_FROM_REG (HS_REG), + 0, + SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), + SIR_NEGO_FAILED, + +}/*-----------------------< DISPATCH >----------------------*/,{ + /* + ** MSG_IN is the only phase that shall be + ** entered at least once for each (re)selection. + ** So we test it first. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR (msg_in), + + SCR_RETURN ^ IFTRUE (IF (SCR_DATA_OUT)), + 0, + /* + ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 4. + ** Possible data corruption during Memory Write and Invalidate. + ** This work-around resets the addressing logic prior to the + ** start of the first MOVE of a DATA IN phase. + ** (See Documentation/scsi/ncr53c8xx.rst for more information) + */ + SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), + 20, + SCR_COPY (4), + RADDR (scratcha), + RADDR (scratcha), + SCR_RETURN, + 0, + SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), + PADDR (status), + SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), + PADDR (command), + SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), + PADDR (msg_out), + /* + ** Discard one illegal phase byte, if required. + */ + SCR_LOAD_REG (scratcha, XE_BAD_PHASE), + 0, + SCR_COPY (1), + RADDR (scratcha), + NADDR (xerr_st), + SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), + 8, + SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, + NADDR (scratch), + SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), + 8, + SCR_MOVE_ABS (1) ^ SCR_ILG_IN, + NADDR (scratch), + SCR_JUMP, + PADDR (dispatch), + +}/*-------------------------< CLRACK >----------------------*/,{ + /* + ** Terminate possible pending message phase. + */ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP, + PADDR (dispatch), + +}/*-------------------------< NO_DATA >--------------------*/,{ + /* + ** The target wants to tranfer too much data + ** or in the wrong direction. + ** Remember that in extended error. + */ + SCR_LOAD_REG (scratcha, XE_EXTRA_DATA), + 0, + SCR_COPY (1), + RADDR (scratcha), + NADDR (xerr_st), + /* + ** Discard one data byte, if required. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), + 8, + SCR_MOVE_ABS (1) ^ SCR_DATA_OUT, + NADDR (scratch), + SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), + 8, + SCR_MOVE_ABS (1) ^ SCR_DATA_IN, + NADDR (scratch), + /* + ** .. and repeat as required. + */ + SCR_CALL, + PADDR (dispatch), + SCR_JUMP, + PADDR (no_data), + +}/*-------------------------< STATUS >--------------------*/,{ + /* + ** get the status + */ + SCR_MOVE_ABS (1) ^ SCR_STATUS, + NADDR (scratch), + /* + ** save status to scsi_status. + ** mark as complete. + */ + SCR_TO_REG (SS_REG), + 0, + SCR_LOAD_REG (HS_REG, HS_COMPLETE), + 0, + SCR_JUMP, + PADDR (dispatch), +}/*-------------------------< MSG_IN >--------------------*/,{ + /* + ** Get the first byte of the message + ** and save it to SCRATCHA. + ** + ** The script processor doesn't negate the + ** ACK signal after this transfer. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin[0]), +}/*-------------------------< MSG_IN2 >--------------------*/,{ + /* + ** Handle this message. + */ + SCR_JUMP ^ IFTRUE (DATA (COMMAND_COMPLETE)), + PADDR (complete), + SCR_JUMP ^ IFTRUE (DATA (DISCONNECT)), + PADDR (disconnect), + SCR_JUMP ^ IFTRUE (DATA (SAVE_POINTERS)), + PADDR (save_dp), + SCR_JUMP ^ IFTRUE (DATA (RESTORE_POINTERS)), + PADDR (restore_dp), + SCR_JUMP ^ IFTRUE (DATA (EXTENDED_MESSAGE)), + PADDRH (msg_extended), + SCR_JUMP ^ IFTRUE (DATA (NOP)), + PADDR (clrack), + SCR_JUMP ^ IFTRUE (DATA (MESSAGE_REJECT)), + PADDRH (msg_reject), + SCR_JUMP ^ IFTRUE (DATA (IGNORE_WIDE_RESIDUE)), + PADDRH (msg_ign_residue), + /* + ** Rest of the messages left as + ** an exercise ... + ** + ** Unimplemented messages: + ** fall through to MSG_BAD. + */ +}/*-------------------------< MSG_BAD >------------------*/,{ + /* + ** unimplemented message - reject it. + */ + SCR_INT, + SIR_REJECT_SENT, + SCR_LOAD_REG (scratcha, MESSAGE_REJECT), + 0, +}/*-------------------------< SETMSG >----------------------*/,{ + SCR_COPY (1), + RADDR (scratcha), + NADDR (msgout), + SCR_SET (SCR_ATN), + 0, + SCR_JUMP, + PADDR (clrack), +}/*-------------------------< CLEANUP >-------------------*/,{ + /* + ** dsa: Pointer to ccb + ** or xxxxxxFF (no ccb) + ** + ** HS_REG: Host-Status (<>0!) + */ + SCR_FROM_REG (dsa), + 0, + SCR_JUMP ^ IFTRUE (DATA (0xff)), + PADDR (start), + /* + ** dsa is valid. + ** complete the cleanup. + */ + SCR_JUMP, + PADDR (cleanup_ok), + +}/*-------------------------< COMPLETE >-----------------*/,{ + /* + ** Complete message. + ** + ** Copy TEMP register to LASTP in header. + */ + SCR_COPY (4), + RADDR (temp), + NADDR (header.lastp), + /* + ** When we terminate the cycle by clearing ACK, + ** the target may disconnect immediately. + ** + ** We don't want to be told of an + ** "unexpected disconnect", + ** so we disable this feature. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + /* + ** Terminate cycle ... + */ + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + /* + ** ... and wait for the disconnect. + */ + SCR_WAIT_DISC, + 0, +}/*-------------------------< CLEANUP_OK >----------------*/,{ + /* + ** Save host status to header. + */ + SCR_COPY (4), + RADDR (scr0), + NADDR (header.status), + /* + ** and copy back the header to the ccb. + */ + SCR_COPY_F (4), + RADDR (dsa), + PADDR (cleanup0), + /* + ** Flush script prefetch if required + */ + PREFETCH_FLUSH + SCR_COPY (sizeof (struct head)), + NADDR (header), +}/*-------------------------< CLEANUP0 >--------------------*/,{ + 0, +}/*-------------------------< SIGNAL >----------------------*/,{ + /* + ** if job not completed ... + */ + SCR_FROM_REG (HS_REG), + 0, + /* + ** ... start the next command. + */ + SCR_JUMP ^ IFTRUE (MASK (0, (HS_DONEMASK|HS_SKIPMASK))), + PADDR(start), + /* + ** If command resulted in not GOOD status, + ** call the C code if needed. + */ + SCR_FROM_REG (SS_REG), + 0, + SCR_CALL ^ IFFALSE (DATA (SAM_STAT_GOOD)), + PADDRH (bad_status), + +#ifndef SCSI_NCR_CCB_DONE_SUPPORT + + /* + ** ... signal completion to the host + */ + SCR_INT, + SIR_INTFLY, + /* + ** Auf zu neuen Schandtaten! + */ + SCR_JUMP, + PADDR(start), + +#else /* defined SCSI_NCR_CCB_DONE_SUPPORT */ + + /* + ** ... signal completion to the host + */ + SCR_JUMP, +}/*------------------------< DONE_POS >---------------------*/,{ + PADDRH (done_queue), +}/*------------------------< DONE_PLUG >--------------------*/,{ + SCR_INT, + SIR_DONE_OVERFLOW, +}/*------------------------< DONE_END >---------------------*/,{ + SCR_INT, + SIR_INTFLY, + SCR_COPY (4), + RADDR (temp), + PADDR (done_pos), + SCR_JUMP, + PADDR (start), + +#endif /* SCSI_NCR_CCB_DONE_SUPPORT */ + +}/*-------------------------< SAVE_DP >------------------*/,{ + /* + ** SAVE_DP message: + ** Copy TEMP register to SAVEP in header. + */ + SCR_COPY (4), + RADDR (temp), + NADDR (header.savep), + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP, + PADDR (dispatch), +}/*-------------------------< RESTORE_DP >---------------*/,{ + /* + ** RESTORE_DP message: + ** Copy SAVEP in header to TEMP register. + */ + SCR_COPY (4), + NADDR (header.savep), + RADDR (temp), + SCR_JUMP, + PADDR (clrack), + +}/*-------------------------< DISCONNECT >---------------*/,{ + /* + ** DISCONNECTing ... + ** + ** disable the "unexpected disconnect" feature, + ** and remove the ACK signal. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + /* + ** Wait for the disconnect. + */ + SCR_WAIT_DISC, + 0, + /* + ** Status is: DISCONNECTED. + */ + SCR_LOAD_REG (HS_REG, HS_DISCONNECT), + 0, + SCR_JUMP, + PADDR (cleanup_ok), + +}/*-------------------------< MSG_OUT >-------------------*/,{ + /* + ** The target requests a message. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, + NADDR (msgout), + SCR_COPY (1), + NADDR (msgout), + NADDR (lastmsg), + /* + ** If it was no ABORT message ... + */ + SCR_JUMP ^ IFTRUE (DATA (ABORT_TASK_SET)), + PADDRH (msg_out_abort), + /* + ** ... wait for the next phase + ** if it's a message out, send it again, ... + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), + PADDR (msg_out), +}/*-------------------------< MSG_OUT_DONE >--------------*/,{ + /* + ** ... else clear the message ... + */ + SCR_LOAD_REG (scratcha, NOP), + 0, + SCR_COPY (4), + RADDR (scratcha), + NADDR (msgout), + /* + ** ... and process the next phase + */ + SCR_JUMP, + PADDR (dispatch), +}/*-------------------------< IDLE >------------------------*/,{ + /* + ** Nothing to do? + ** Wait for reselect. + ** This NOP will be patched with LED OFF + ** SCR_REG_REG (gpreg, SCR_OR, 0x01) + */ + SCR_NO_OP, + 0, +}/*-------------------------< RESELECT >--------------------*/,{ + /* + ** make the DSA invalid. + */ + SCR_LOAD_REG (dsa, 0xff), + 0, + SCR_CLR (SCR_TRG), + 0, + SCR_LOAD_REG (HS_REG, HS_IN_RESELECT), + 0, + /* + ** Sleep waiting for a reselection. + ** If SIGP is set, special treatment. + ** + ** Zu allem bereit .. + */ + SCR_WAIT_RESEL, + PADDR(start), +}/*-------------------------< RESELECTED >------------------*/,{ + /* + ** This NOP will be patched with LED ON + ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) + */ + SCR_NO_OP, + 0, + /* + ** ... zu nichts zu gebrauchen ? + ** + ** load the target id into the SFBR + ** and jump to the control block. + ** + ** Look at the declarations of + ** - struct ncb + ** - struct tcb + ** - struct lcb + ** - struct ccb + ** to understand what's going on. + */ + SCR_REG_SFBR (ssid, SCR_AND, 0x8F), + 0, + SCR_TO_REG (sdid), + 0, + SCR_JUMP, + NADDR (jump_tcb), + +}/*-------------------------< RESEL_DSA >-------------------*/,{ + /* + ** Ack the IDENTIFY or TAG previously received. + */ + SCR_CLR (SCR_ACK), + 0, + /* + ** The ncr doesn't have an indirect load + ** or store command. So we have to + ** copy part of the control block to a + ** fixed place, where we can access it. + ** + ** We patch the address part of a + ** COPY command with the DSA-register. + */ + SCR_COPY_F (4), + RADDR (dsa), + PADDR (loadpos1), + /* + ** Flush script prefetch if required + */ + PREFETCH_FLUSH + /* + ** then we do the actual copy. + */ + SCR_COPY (sizeof (struct head)), + /* + ** continued after the next label ... + */ + +}/*-------------------------< LOADPOS1 >-------------------*/,{ + 0, + NADDR (header), + /* + ** The DSA contains the data structure address. + */ + SCR_JUMP, + PADDR (prepare), + +}/*-------------------------< RESEL_LUN >-------------------*/,{ + /* + ** come back to this point + ** to get an IDENTIFY message + ** Wait for a msg_in phase. + */ + SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), + SIR_RESEL_NO_MSG_IN, + /* + ** message phase. + ** Read the data directly from the BUS DATA lines. + ** This helps to support very old SCSI devices that + ** may reselect without sending an IDENTIFY. + */ + SCR_FROM_REG (sbdl), + 0, + /* + ** It should be an Identify message. + */ + SCR_RETURN, + 0, +}/*-------------------------< RESEL_TAG >-------------------*/,{ + /* + ** Read IDENTIFY + SIMPLE + TAG using a single MOVE. + ** Aggressive optimization, is'nt it? + ** No need to test the SIMPLE TAG message, since the + ** driver only supports conformant devices for tags. ;-) + */ + SCR_MOVE_ABS (3) ^ SCR_MSG_IN, + NADDR (msgin), + /* + ** Read the TAG from the SIDL. + ** Still an aggressive optimization. ;-) + ** Compute the CCB indirect jump address which + ** is (#TAG*2 & 0xfc) due to tag numbering using + ** 1,3,5..MAXTAGS*2+1 actual values. + */ + SCR_REG_SFBR (sidl, SCR_SHL, 0), + 0, + SCR_SFBR_REG (temp, SCR_AND, 0xfc), + 0, +}/*-------------------------< JUMP_TO_NEXUS >-------------------*/,{ + SCR_COPY_F (4), + RADDR (temp), + PADDR (nexus_indirect), + /* + ** Flush script prefetch if required + */ + PREFETCH_FLUSH + SCR_COPY (4), +}/*-------------------------< NEXUS_INDIRECT >-------------------*/,{ + 0, + RADDR (temp), + SCR_RETURN, + 0, +}/*-------------------------< RESEL_NOTAG >-------------------*/,{ + /* + ** No tag expected. + ** Read an throw away the IDENTIFY. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin), + SCR_JUMP, + PADDR (jump_to_nexus), +}/*-------------------------< DATA_IN >--------------------*/,{ +/* +** Because the size depends on the +** #define MAX_SCATTERL parameter, +** it is filled in at runtime. +** +** ##===========< i=0; i========= +** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), +** || PADDR (dispatch), +** || SCR_MOVE_TBL ^ SCR_DATA_IN, +** || offsetof (struct dsb, data[ i]), +** ##========================================== +** +**--------------------------------------------------------- +*/ +0 +}/*-------------------------< DATA_IN2 >-------------------*/,{ + SCR_CALL, + PADDR (dispatch), + SCR_JUMP, + PADDR (no_data), +}/*-------------------------< DATA_OUT >--------------------*/,{ +/* +** Because the size depends on the +** #define MAX_SCATTERL parameter, +** it is filled in at runtime. +** +** ##===========< i=0; i========= +** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), +** || PADDR (dispatch), +** || SCR_MOVE_TBL ^ SCR_DATA_OUT, +** || offsetof (struct dsb, data[ i]), +** ##========================================== +** +**--------------------------------------------------------- +*/ +0 +}/*-------------------------< DATA_OUT2 >-------------------*/,{ + SCR_CALL, + PADDR (dispatch), + SCR_JUMP, + PADDR (no_data), +}/*--------------------------------------------------------*/ +}; + +static struct scripth scripth0 __initdata = { +/*-------------------------< TRYLOOP >---------------------*/{ +/* +** Start the next entry. +** Called addresses point to the launch script in the CCB. +** They are patched by the main processor. +** +** Because the size depends on the +** #define MAX_START parameter, it is filled +** in at runtime. +** +**----------------------------------------------------------- +** +** ##===========< I=0; i=========== +** || SCR_CALL, +** || PADDR (idle), +** ##========================================== +** +**----------------------------------------------------------- +*/ +0 +}/*------------------------< TRYLOOP2 >---------------------*/,{ + SCR_JUMP, + PADDRH(tryloop), + +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + +}/*------------------------< DONE_QUEUE >-------------------*/,{ +/* +** Copy the CCB address to the next done entry. +** Because the size depends on the +** #define MAX_DONE parameter, it is filled +** in at runtime. +** +**----------------------------------------------------------- +** +** ##===========< I=0; i=========== +** || SCR_COPY (sizeof(struct ccb *), +** || NADDR (header.cp), +** || NADDR (ccb_done[i]), +** || SCR_CALL, +** || PADDR (done_end), +** ##========================================== +** +**----------------------------------------------------------- +*/ +0 +}/*------------------------< DONE_QUEUE2 >------------------*/,{ + SCR_JUMP, + PADDRH (done_queue), + +#endif /* SCSI_NCR_CCB_DONE_SUPPORT */ +}/*------------------------< SELECT_NO_ATN >-----------------*/,{ + /* + ** Set Initiator mode. + ** And try to select this target without ATN. + */ + + SCR_CLR (SCR_TRG), + 0, + SCR_LOAD_REG (HS_REG, HS_SELECTING), + 0, + SCR_SEL_TBL ^ offsetof (struct dsb, select), + PADDR (reselect), + SCR_JUMP, + PADDR (select2), + +}/*-------------------------< CANCEL >------------------------*/,{ + + SCR_LOAD_REG (scratcha, HS_ABORTED), + 0, + SCR_JUMPR, + 8, +}/*-------------------------< SKIP >------------------------*/,{ + SCR_LOAD_REG (scratcha, 0), + 0, + /* + ** This entry has been canceled. + ** Next time use the next slot. + */ + SCR_COPY (4), + RADDR (temp), + PADDR (startpos), + /* + ** The ncr doesn't have an indirect load + ** or store command. So we have to + ** copy part of the control block to a + ** fixed place, where we can access it. + ** + ** We patch the address part of a + ** COPY command with the DSA-register. + */ + SCR_COPY_F (4), + RADDR (dsa), + PADDRH (skip2), + /* + ** Flush script prefetch if required + */ + PREFETCH_FLUSH + /* + ** then we do the actual copy. + */ + SCR_COPY (sizeof (struct head)), + /* + ** continued after the next label ... + */ +}/*-------------------------< SKIP2 >---------------------*/,{ + 0, + NADDR (header), + /* + ** Initialize the status registers + */ + SCR_COPY (4), + NADDR (header.status), + RADDR (scr0), + /* + ** Force host status. + */ + SCR_FROM_REG (scratcha), + 0, + SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)), + 16, + SCR_REG_REG (HS_REG, SCR_OR, HS_SKIPMASK), + 0, + SCR_JUMPR, + 8, + SCR_TO_REG (HS_REG), + 0, + SCR_LOAD_REG (SS_REG, SAM_STAT_GOOD), + 0, + SCR_JUMP, + PADDR (cleanup_ok), + +},/*-------------------------< PAR_ERR_DATA_IN >---------------*/{ + /* + ** Ignore all data in byte, until next phase + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), + PADDRH (par_err_other), + SCR_MOVE_ABS (1) ^ SCR_DATA_IN, + NADDR (scratch), + SCR_JUMPR, + -24, +},/*-------------------------< PAR_ERR_OTHER >------------------*/{ + /* + ** count it. + */ + SCR_REG_REG (PS_REG, SCR_ADD, 0x01), + 0, + /* + ** jump to dispatcher. + */ + SCR_JUMP, + PADDR (dispatch), +}/*-------------------------< MSG_REJECT >---------------*/,{ + /* + ** If a negotiation was in progress, + ** negotiation failed. + ** Otherwise, let the C code print + ** some message. + */ + SCR_FROM_REG (HS_REG), + 0, + SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)), + SIR_REJECT_RECEIVED, + SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), + SIR_NEGO_FAILED, + SCR_JUMP, + PADDR (clrack), + +}/*-------------------------< MSG_IGN_RESIDUE >----------*/,{ + /* + ** Terminate cycle + */ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR (dispatch), + /* + ** get residue size. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin[1]), + /* + ** Size is 0 .. ignore message. + */ + SCR_JUMP ^ IFTRUE (DATA (0)), + PADDR (clrack), + /* + ** Size is not 1 .. have to interrupt. + */ + SCR_JUMPR ^ IFFALSE (DATA (1)), + 40, + /* + ** Check for residue byte in swide register + */ + SCR_FROM_REG (scntl2), + 0, + SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), + 16, + /* + ** There IS data in the swide register. + ** Discard it. + */ + SCR_REG_REG (scntl2, SCR_OR, WSR), + 0, + SCR_JUMP, + PADDR (clrack), + /* + ** Load again the size to the sfbr register. + */ + SCR_FROM_REG (scratcha), + 0, + SCR_INT, + SIR_IGN_RESIDUE, + SCR_JUMP, + PADDR (clrack), + +}/*-------------------------< MSG_EXTENDED >-------------*/,{ + /* + ** Terminate cycle + */ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR (dispatch), + /* + ** get length. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin[1]), + /* + */ + SCR_JUMP ^ IFTRUE (DATA (3)), + PADDRH (msg_ext_3), + SCR_JUMP ^ IFFALSE (DATA (2)), + PADDR (msg_bad), +}/*-------------------------< MSG_EXT_2 >----------------*/,{ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR (dispatch), + /* + ** get extended message code. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin[2]), + SCR_JUMP ^ IFTRUE (DATA (EXTENDED_WDTR)), + PADDRH (msg_wdtr), + /* + ** unknown extended message + */ + SCR_JUMP, + PADDR (msg_bad) +}/*-------------------------< MSG_WDTR >-----------------*/,{ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR (dispatch), + /* + ** get data bus width + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin[3]), + /* + ** let the host do the real work. + */ + SCR_INT, + SIR_NEGO_WIDE, + /* + ** let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDRH (nego_bad_phase), + +}/*-------------------------< SEND_WDTR >----------------*/,{ + /* + ** Send the EXTENDED_WDTR + */ + SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, + NADDR (msgout), + SCR_COPY (1), + NADDR (msgout), + NADDR (lastmsg), + SCR_JUMP, + PADDR (msg_out_done), + +}/*-------------------------< MSG_EXT_3 >----------------*/,{ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR (dispatch), + /* + ** get extended message code. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin[2]), + SCR_JUMP ^ IFTRUE (DATA (EXTENDED_SDTR)), + PADDRH (msg_sdtr), + /* + ** unknown extended message + */ + SCR_JUMP, + PADDR (msg_bad) + +}/*-------------------------< MSG_SDTR >-----------------*/,{ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR (dispatch), + /* + ** get period and offset + */ + SCR_MOVE_ABS (2) ^ SCR_MSG_IN, + NADDR (msgin[3]), + /* + ** let the host do the real work. + */ + SCR_INT, + SIR_NEGO_SYNC, + /* + ** let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDRH (nego_bad_phase), + +}/*-------------------------< SEND_SDTR >-------------*/,{ + /* + ** Send the EXTENDED_SDTR + */ + SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, + NADDR (msgout), + SCR_COPY (1), + NADDR (msgout), + NADDR (lastmsg), + SCR_JUMP, + PADDR (msg_out_done), + +}/*-------------------------< NEGO_BAD_PHASE >------------*/,{ + SCR_INT, + SIR_NEGO_PROTO, + SCR_JUMP, + PADDR (dispatch), + +}/*-------------------------< MSG_OUT_ABORT >-------------*/,{ + /* + ** After ABORT message, + ** + ** expect an immediate disconnect, ... + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + SCR_WAIT_DISC, + 0, + /* + ** ... and set the status to "ABORTED" + */ + SCR_LOAD_REG (HS_REG, HS_ABORTED), + 0, + SCR_JUMP, + PADDR (cleanup), + +}/*-------------------------< HDATA_IN >-------------------*/,{ +/* +** Because the size depends on the +** #define MAX_SCATTERH parameter, +** it is filled in at runtime. +** +** ##==< i=MAX_SCATTERL; i== +** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), +** || PADDR (dispatch), +** || SCR_MOVE_TBL ^ SCR_DATA_IN, +** || offsetof (struct dsb, data[ i]), +** ##=================================================== +** +**--------------------------------------------------------- +*/ +0 +}/*-------------------------< HDATA_IN2 >------------------*/,{ + SCR_JUMP, + PADDR (data_in), + +}/*-------------------------< HDATA_OUT >-------------------*/,{ +/* +** Because the size depends on the +** #define MAX_SCATTERH parameter, +** it is filled in at runtime. +** +** ##==< i=MAX_SCATTERL; i== +** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), +** || PADDR (dispatch), +** || SCR_MOVE_TBL ^ SCR_DATA_OUT, +** || offsetof (struct dsb, data[ i]), +** ##=================================================== +** +**--------------------------------------------------------- +*/ +0 +}/*-------------------------< HDATA_OUT2 >------------------*/,{ + SCR_JUMP, + PADDR (data_out), + +}/*-------------------------< RESET >----------------------*/,{ + /* + ** Send a TARGET_RESET message if bad IDENTIFY + ** received on reselection. + */ + SCR_LOAD_REG (scratcha, ABORT_TASK), + 0, + SCR_JUMP, + PADDRH (abort_resel), +}/*-------------------------< ABORTTAG >-------------------*/,{ + /* + ** Abort a wrong tag received on reselection. + */ + SCR_LOAD_REG (scratcha, ABORT_TASK), + 0, + SCR_JUMP, + PADDRH (abort_resel), +}/*-------------------------< ABORT >----------------------*/,{ + /* + ** Abort a reselection when no active CCB. + */ + SCR_LOAD_REG (scratcha, ABORT_TASK_SET), + 0, +}/*-------------------------< ABORT_RESEL >----------------*/,{ + SCR_COPY (1), + RADDR (scratcha), + NADDR (msgout), + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + /* + ** and send it. + ** we expect an immediate disconnect + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, + NADDR (msgout), + SCR_COPY (1), + NADDR (msgout), + NADDR (lastmsg), + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + SCR_WAIT_DISC, + 0, + SCR_JUMP, + PADDR (start), +}/*-------------------------< RESEND_IDENT >-------------------*/,{ + /* + ** The target stays in MSG OUT phase after having acked + ** Identify [+ Tag [+ Extended message ]]. Targets shall + ** behave this way on parity error. + ** We must send it again all the messages. + */ + SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */ + 0, /* 1rst ACK = 90 ns. Hope the NCR is'nt too fast */ + SCR_JUMP, + PADDR (send_ident), +}/*-------------------------< CLRATN_GO_ON >-------------------*/,{ + SCR_CLR (SCR_ATN), + 0, + SCR_JUMP, +}/*-------------------------< NXTDSP_GO_ON >-------------------*/,{ + 0, +}/*-------------------------< SDATA_IN >-------------------*/,{ + SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), + PADDR (dispatch), + SCR_MOVE_TBL ^ SCR_DATA_IN, + offsetof (struct dsb, sense), + SCR_CALL, + PADDR (dispatch), + SCR_JUMP, + PADDR (no_data), +}/*-------------------------< DATA_IO >--------------------*/,{ + /* + ** We jump here if the data direction was unknown at the + ** time we had to queue the command to the scripts processor. + ** Pointers had been set as follow in this situation: + ** savep --> DATA_IO + ** lastp --> start pointer when DATA_IN + ** goalp --> goal pointer when DATA_IN + ** wlastp --> start pointer when DATA_OUT + ** wgoalp --> goal pointer when DATA_OUT + ** This script sets savep/lastp/goalp according to the + ** direction chosen by the target. + */ + SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_OUT)), + 32, + /* + ** Direction is DATA IN. + ** Warning: we jump here, even when phase is DATA OUT. + */ + SCR_COPY (4), + NADDR (header.lastp), + NADDR (header.savep), + + /* + ** Jump to the SCRIPTS according to actual direction. + */ + SCR_COPY (4), + NADDR (header.savep), + RADDR (temp), + SCR_RETURN, + 0, + /* + ** Direction is DATA OUT. + */ + SCR_COPY (4), + NADDR (header.wlastp), + NADDR (header.lastp), + SCR_COPY (4), + NADDR (header.wgoalp), + NADDR (header.goalp), + SCR_JUMPR, + -64, +}/*-------------------------< BAD_IDENTIFY >---------------*/,{ + /* + ** If message phase but not an IDENTIFY, + ** get some help from the C code. + ** Old SCSI device may behave so. + */ + SCR_JUMPR ^ IFTRUE (MASK (0x80, 0x80)), + 16, + SCR_INT, + SIR_RESEL_NO_IDENTIFY, + SCR_JUMP, + PADDRH (reset), + /* + ** Message is an IDENTIFY, but lun is unknown. + ** Read the message, since we got it directly + ** from the SCSI BUS data lines. + ** Signal problem to C code for logging the event. + ** Send an ABORT_TASK_SET to clear all pending tasks. + */ + SCR_INT, + SIR_RESEL_BAD_LUN, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin), + SCR_JUMP, + PADDRH (abort), +}/*-------------------------< BAD_I_T_L >------------------*/,{ + /* + ** We donnot have a task for that I_T_L. + ** Signal problem to C code for logging the event. + ** Send an ABORT_TASK_SET message. + */ + SCR_INT, + SIR_RESEL_BAD_I_T_L, + SCR_JUMP, + PADDRH (abort), +}/*-------------------------< BAD_I_T_L_Q >----------------*/,{ + /* + ** We donnot have a task that matches the tag. + ** Signal problem to C code for logging the event. + ** Send an ABORT_TASK message. + */ + SCR_INT, + SIR_RESEL_BAD_I_T_L_Q, + SCR_JUMP, + PADDRH (aborttag), +}/*-------------------------< BAD_TARGET >-----------------*/,{ + /* + ** We donnot know the target that reselected us. + ** Grab the first message if any (IDENTIFY). + ** Signal problem to C code for logging the event. + ** TARGET_RESET message. + */ + SCR_INT, + SIR_RESEL_BAD_TARGET, + SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), + 8, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + NADDR (msgin), + SCR_JUMP, + PADDRH (reset), +}/*-------------------------< BAD_STATUS >-----------------*/,{ + /* + ** If command resulted in either TASK_SET FULL, + ** CHECK CONDITION or COMMAND TERMINATED, + ** call the C code. + */ + SCR_INT ^ IFTRUE (DATA (SAM_STAT_TASK_SET_FULL)), + SIR_BAD_STATUS, + SCR_INT ^ IFTRUE (DATA (SAM_STAT_CHECK_CONDITION)), + SIR_BAD_STATUS, + SCR_INT ^ IFTRUE (DATA (SAM_STAT_COMMAND_TERMINATED)), + SIR_BAD_STATUS, + SCR_RETURN, + 0, +}/*-------------------------< START_RAM >-------------------*/,{ + /* + ** Load the script into on-chip RAM, + ** and jump to start point. + */ + SCR_COPY_F (4), + RADDR (scratcha), + PADDRH (start_ram0), + /* + ** Flush script prefetch if required + */ + PREFETCH_FLUSH + SCR_COPY (sizeof (struct script)), +}/*-------------------------< START_RAM0 >--------------------*/,{ + 0, + PADDR (start), + SCR_JUMP, + PADDR (start), +}/*-------------------------< STO_RESTART >-------------------*/,{ + /* + ** + ** Repair start queue (e.g. next time use the next slot) + ** and jump to start point. + */ + SCR_COPY (4), + RADDR (temp), + PADDR (startpos), + SCR_JUMP, + PADDR (start), +}/*-------------------------< WAIT_DMA >-------------------*/,{ + /* + ** For HP Zalon/53c720 systems, the Zalon interface + ** between CPU and 53c720 does prefetches, which causes + ** problems with self modifying scripts. The problem + ** is overcome by calling a dummy subroutine after each + ** modification, to force a refetch of the script on + ** return from the subroutine. + */ + SCR_RETURN, + 0, +}/*-------------------------< SNOOPTEST >-------------------*/,{ + /* + ** Read the variable. + */ + SCR_COPY (4), + NADDR(ncr_cache), + RADDR (scratcha), + /* + ** Write the variable. + */ + SCR_COPY (4), + RADDR (temp), + NADDR(ncr_cache), + /* + ** Read back the variable. + */ + SCR_COPY (4), + NADDR(ncr_cache), + RADDR (temp), +}/*-------------------------< SNOOPEND >-------------------*/,{ + /* + ** And stop. + */ + SCR_INT, + 99, +}/*--------------------------------------------------------*/ +}; + +/*========================================================== +** +** +** Fill in #define dependent parts of the script +** +** +**========================================================== +*/ + +void __init ncr_script_fill (struct script * scr, struct scripth * scrh) +{ + int i; + ncrcmd *p; + + p = scrh->tryloop; + for (i=0; itryloop + sizeof (scrh->tryloop)); + +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + + p = scrh->done_queue; + for (i = 0; idone_queue+sizeof(scrh->done_queue)); + +#endif /* SCSI_NCR_CCB_DONE_SUPPORT */ + + p = scrh->hdata_in; + for (i=0; ihdata_in + sizeof (scrh->hdata_in)); + + p = scr->data_in; + for (i=MAX_SCATTERH; idata_in + sizeof (scr->data_in)); + + p = scrh->hdata_out; + for (i=0; ihdata_out + sizeof (scrh->hdata_out)); + + p = scr->data_out; + for (i=MAX_SCATTERH; idata_out + sizeof (scr->data_out)); +} + +/*========================================================== +** +** +** Copy and rebind a script. +** +** +**========================================================== +*/ + +static void __init +ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len) +{ + ncrcmd opcode, new, old, tmp1, tmp2; + ncrcmd *start, *end; + int relocs; + int opchanged = 0; + + start = src; + end = src + len/4; + + while (src < end) { + + opcode = *src++; + *dst++ = cpu_to_scr(opcode); + + /* + ** If we forget to change the length + ** in struct script, a field will be + ** padded with 0. This is an illegal + ** command. + */ + + if (opcode == 0) { + printk (KERN_ERR "%s: ERROR0 IN SCRIPT at %d.\n", + ncr_name(np), (int) (src-start-1)); + mdelay(1000); + } + + if (DEBUG_FLAGS & DEBUG_SCRIPT) + printk (KERN_DEBUG "%p: <%x>\n", + (src-1), (unsigned)opcode); + + /* + ** We don't have to decode ALL commands + */ + switch (opcode >> 28) { + + case 0xc: + /* + ** COPY has TWO arguments. + */ + relocs = 2; + tmp1 = src[0]; +#ifdef RELOC_KVAR + if ((tmp1 & RELOC_MASK) == RELOC_KVAR) + tmp1 = 0; +#endif + tmp2 = src[1]; +#ifdef RELOC_KVAR + if ((tmp2 & RELOC_MASK) == RELOC_KVAR) + tmp2 = 0; +#endif + if ((tmp1 ^ tmp2) & 3) { + printk (KERN_ERR"%s: ERROR1 IN SCRIPT at %d.\n", + ncr_name(np), (int) (src-start-1)); + mdelay(1000); + } + /* + ** If PREFETCH feature not enabled, remove + ** the NO FLUSH bit if present. + */ + if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) { + dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH); + ++opchanged; + } + break; + + case 0x0: + /* + ** MOVE (absolute address) + */ + relocs = 1; + break; + + case 0x8: + /* + ** JUMP / CALL + ** don't relocate if relative :-) + */ + if (opcode & 0x00800000) + relocs = 0; + else + relocs = 1; + break; + + case 0x4: + case 0x5: + case 0x6: + case 0x7: + relocs = 1; + break; + + default: + relocs = 0; + break; + } + + if (relocs) { + while (relocs--) { + old = *src++; + + switch (old & RELOC_MASK) { + case RELOC_REGISTER: + new = (old & ~RELOC_MASK) + np->paddr; + break; + case RELOC_LABEL: + new = (old & ~RELOC_MASK) + np->p_script; + break; + case RELOC_LABELH: + new = (old & ~RELOC_MASK) + np->p_scripth; + break; + case RELOC_SOFTC: + new = (old & ~RELOC_MASK) + np->p_ncb; + break; +#ifdef RELOC_KVAR + case RELOC_KVAR: + if (((old & ~RELOC_MASK) < + SCRIPT_KVAR_FIRST) || + ((old & ~RELOC_MASK) > + SCRIPT_KVAR_LAST)) + panic("ncr KVAR out of range"); + new = vtophys(script_kvars[old & + ~RELOC_MASK]); + break; +#endif + case 0: + /* Don't relocate a 0 address. */ + if (old == 0) { + new = old; + break; + } + fallthrough; + default: + panic("ncr_script_copy_and_bind: weird relocation %x\n", old); + break; + } + + *dst++ = cpu_to_scr(new); + } + } else + *dst++ = cpu_to_scr(*src++); + + } +} + +/* +** Linux host data structure +*/ + +struct host_data { + struct ncb *ncb; +}; + +#define PRINT_ADDR(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg) + +static void ncr_print_msg(struct ccb *cp, char *label, u_char *msg) +{ + PRINT_ADDR(cp->cmd, "%s: ", label); + + spi_print_msg(msg); + printk("\n"); +} + +/*========================================================== +** +** NCR chip clock divisor table. +** Divisors are multiplied by 10,000,000 in order to make +** calculations more simple. +** +**========================================================== +*/ + +#define _5M 5000000 +static u_long div_10M[] = + {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; + + +/*=============================================================== +** +** Prepare io register values used by ncr_init() according +** to selected and supported features. +** +** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128 +** transfers. 32,64,128 are only supported by 875 and 895 chips. +** We use log base 2 (burst length) as internal code, with +** value 0 meaning "burst disabled". +** +**=============================================================== +*/ + +/* + * Burst length from burst code. + */ +#define burst_length(bc) (!(bc))? 0 : 1 << (bc) + +/* + * Burst code from io register bits. Burst enable is ctest0 for c720 + */ +#define burst_code(dmode, ctest0) \ + (ctest0) & 0x80 ? 0 : (((dmode) & 0xc0) >> 6) + 1 + +/* + * Set initial io register bits from burst code. + */ +static inline void ncr_init_burst(struct ncb *np, u_char bc) +{ + u_char *be = &np->rv_ctest0; + *be &= ~0x80; + np->rv_dmode &= ~(0x3 << 6); + np->rv_ctest5 &= ~0x4; + + if (!bc) { + *be |= 0x80; + } else { + --bc; + np->rv_dmode |= ((bc & 0x3) << 6); + np->rv_ctest5 |= (bc & 0x4); + } +} + +static void __init ncr_prepare_setting(struct ncb *np) +{ + u_char burst_max; + u_long period; + int i; + + /* + ** Save assumed BIOS setting + */ + + np->sv_scntl0 = INB(nc_scntl0) & 0x0a; + np->sv_scntl3 = INB(nc_scntl3) & 0x07; + np->sv_dmode = INB(nc_dmode) & 0xce; + np->sv_dcntl = INB(nc_dcntl) & 0xa8; + np->sv_ctest0 = INB(nc_ctest0) & 0x84; + np->sv_ctest3 = INB(nc_ctest3) & 0x01; + np->sv_ctest4 = INB(nc_ctest4) & 0x80; + np->sv_ctest5 = INB(nc_ctest5) & 0x24; + np->sv_gpcntl = INB(nc_gpcntl); + np->sv_stest2 = INB(nc_stest2) & 0x20; + np->sv_stest4 = INB(nc_stest4); + + /* + ** Wide ? + */ + + np->maxwide = (np->features & FE_WIDE)? 1 : 0; + + /* + * Guess the frequency of the chip's clock. + */ + if (np->features & FE_ULTRA) + np->clock_khz = 80000; + else + np->clock_khz = 40000; + + /* + * Get the clock multiplier factor. + */ + if (np->features & FE_QUAD) + np->multiplier = 4; + else if (np->features & FE_DBLR) + np->multiplier = 2; + else + np->multiplier = 1; + + /* + * Measure SCSI clock frequency for chips + * it may vary from assumed one. + */ + if (np->features & FE_VARCLK) + ncr_getclock(np, np->multiplier); + + /* + * Divisor to be used for async (timer pre-scaler). + */ + i = np->clock_divn - 1; + while (--i >= 0) { + if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) { + ++i; + break; + } + } + np->rv_scntl3 = i+1; + + /* + * Minimum synchronous period factor supported by the chip. + * Btw, 'period' is in tenths of nanoseconds. + */ + + period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; + if (period <= 250) np->minsync = 10; + else if (period <= 303) np->minsync = 11; + else if (period <= 500) np->minsync = 12; + else np->minsync = (period + 40 - 1) / 40; + + /* + * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). + */ + + if (np->minsync < 25 && !(np->features & FE_ULTRA)) + np->minsync = 25; + + /* + * Maximum synchronous period factor supported by the chip. + */ + + period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); + np->maxsync = period > 2540 ? 254 : period / 10; + + /* + ** Prepare initial value of other IO registers + */ +#if defined SCSI_NCR_TRUST_BIOS_SETTING + np->rv_scntl0 = np->sv_scntl0; + np->rv_dmode = np->sv_dmode; + np->rv_dcntl = np->sv_dcntl; + np->rv_ctest0 = np->sv_ctest0; + np->rv_ctest3 = np->sv_ctest3; + np->rv_ctest4 = np->sv_ctest4; + np->rv_ctest5 = np->sv_ctest5; + burst_max = burst_code(np->sv_dmode, np->sv_ctest0); +#else + + /* + ** Select burst length (dwords) + */ + burst_max = driver_setup.burst_max; + if (burst_max == 255) + burst_max = burst_code(np->sv_dmode, np->sv_ctest0); + if (burst_max > 7) + burst_max = 7; + if (burst_max > np->maxburst) + burst_max = np->maxburst; + + /* + ** Select all supported special features + */ + if (np->features & FE_ERL) + np->rv_dmode |= ERL; /* Enable Read Line */ + if (np->features & FE_BOF) + np->rv_dmode |= BOF; /* Burst Opcode Fetch */ + if (np->features & FE_ERMP) + np->rv_dmode |= ERMP; /* Enable Read Multiple */ + if (np->features & FE_PFEN) + np->rv_dcntl |= PFEN; /* Prefetch Enable */ + if (np->features & FE_CLSE) + np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ + if (np->features & FE_WRIE) + np->rv_ctest3 |= WRIE; /* Write and Invalidate */ + if (np->features & FE_DFS) + np->rv_ctest5 |= DFS; /* Dma Fifo Size */ + if (np->features & FE_MUX) + np->rv_ctest4 |= MUX; /* Host bus multiplex mode */ + if (np->features & FE_EA) + np->rv_dcntl |= EA; /* Enable ACK */ + if (np->features & FE_EHP) + np->rv_ctest0 |= EHP; /* Even host parity */ + + /* + ** Select some other + */ + if (driver_setup.master_parity) + np->rv_ctest4 |= MPEE; /* Master parity checking */ + if (driver_setup.scsi_parity) + np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ + + /* + ** Get SCSI addr of host adapter (set by bios?). + */ + if (np->myaddr == 255) { + np->myaddr = INB(nc_scid) & 0x07; + if (!np->myaddr) + np->myaddr = SCSI_NCR_MYADDR; + } + +#endif /* SCSI_NCR_TRUST_BIOS_SETTING */ + + /* + * Prepare initial io register bits for burst length + */ + ncr_init_burst(np, burst_max); + + /* + ** Set SCSI BUS mode. + ** + ** - ULTRA2 chips (895/895A/896) report the current + ** BUS mode through the STEST4 IO register. + ** - For previous generation chips (825/825A/875), + ** user has to tell us how to check against HVD, + ** since a 100% safe algorithm is not possible. + */ + np->scsi_mode = SMODE_SE; + if (np->features & FE_DIFF) { + switch(driver_setup.diff_support) { + case 4: /* Trust previous settings if present, then GPIO3 */ + if (np->sv_scntl3) { + if (np->sv_stest2 & 0x20) + np->scsi_mode = SMODE_HVD; + break; + } + fallthrough; + case 3: /* SYMBIOS controllers report HVD through GPIO3 */ + if (INB(nc_gpreg) & 0x08) + break; + fallthrough; + case 2: /* Set HVD unconditionally */ + np->scsi_mode = SMODE_HVD; + fallthrough; + case 1: /* Trust previous settings for HVD */ + if (np->sv_stest2 & 0x20) + np->scsi_mode = SMODE_HVD; + break; + default:/* Don't care about HVD */ + break; + } + } + if (np->scsi_mode == SMODE_HVD) + np->rv_stest2 |= 0x20; + + /* + ** Set LED support from SCRIPTS. + ** Ignore this feature for boards known to use a + ** specific GPIO wiring and for the 895A or 896 + ** that drive the LED directly. + ** Also probe initial setting of GPIO0 as output. + */ + if ((driver_setup.led_pin) && + !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) + np->features |= FE_LED0; + + /* + ** Set irq mode. + */ + switch(driver_setup.irqm & 3) { + case 2: + np->rv_dcntl |= IRQM; + break; + case 1: + np->rv_dcntl |= (np->sv_dcntl & IRQM); + break; + default: + break; + } + + /* + ** Configure targets according to driver setup. + ** Allow to override sync, wide and NOSCAN from + ** boot command line. + */ + for (i = 0 ; i < MAX_TARGET ; i++) { + struct tcb *tp = &np->target[i]; + + tp->usrsync = driver_setup.default_sync; + tp->usrwide = driver_setup.max_wide; + tp->usrtags = MAX_TAGS; + tp->period = 0xffff; + if (!driver_setup.disconnection) + np->target[i].usrflag = UF_NODISC; + } + + /* + ** Announce all that stuff to user. + */ + + printk(KERN_INFO "%s: ID %d, Fast-%d%s%s\n", ncr_name(np), + np->myaddr, + np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10), + (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity", + (np->rv_stest2 & 0x20) ? ", Differential" : ""); + + if (bootverbose > 1) { + printk (KERN_INFO "%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " + "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", + ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, + np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); + + printk (KERN_INFO "%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " + "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", + ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, + np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); + } + + if (bootverbose && np->paddr2) + printk (KERN_INFO "%s: on-chip RAM at 0x%lx\n", + ncr_name(np), np->paddr2); +} + +/*========================================================== +** +** +** Done SCSI commands list management. +** +** We donnot enter the scsi_done() callback immediately +** after a command has been seen as completed but we +** insert it into a list which is flushed outside any kind +** of driver critical section. +** This allows to do minimal stuff under interrupt and +** inside critical sections and to also avoid locking up +** on recursive calls to driver entry points under SMP. +** In fact, the only kernel point which is entered by the +** driver with a driver lock set is kmalloc(GFP_ATOMIC) +** that shall not reenter the driver under any circumstances, +** AFAIK. +** +**========================================================== +*/ +static inline void ncr_queue_done_cmd(struct ncb *np, struct scsi_cmnd *cmd) +{ + unmap_scsi_data(np, cmd); + cmd->host_scribble = (char *) np->done_list; + np->done_list = cmd; +} + +static inline void ncr_flush_done_cmds(struct scsi_cmnd *lcmd) +{ + struct scsi_cmnd *cmd; + + while (lcmd) { + cmd = lcmd; + lcmd = (struct scsi_cmnd *) cmd->host_scribble; + scsi_done(cmd); + } +} + +/*========================================================== +** +** +** Prepare the next negotiation message if needed. +** +** Fill in the part of message buffer that contains the +** negotiation and the nego_status field of the CCB. +** Returns the size of the message in bytes. +** +** +**========================================================== +*/ + + +static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr) +{ + struct tcb *tp = &np->target[cp->target]; + int msglen = 0; + int nego = 0; + struct scsi_target *starget = tp->starget; + + /* negotiate wide transfers ? */ + if (!tp->widedone) { + if (spi_support_wide(starget)) { + nego = NS_WIDE; + } else + tp->widedone=1; + } + + /* negotiate synchronous transfers? */ + if (!nego && !tp->period) { + if (spi_support_sync(starget)) { + nego = NS_SYNC; + } else { + tp->period =0xffff; + dev_info(&starget->dev, "target did not report SYNC.\n"); + } + } + + switch (nego) { + case NS_SYNC: + msglen += spi_populate_sync_msg(msgptr + msglen, + tp->maxoffs ? tp->minsync : 0, tp->maxoffs); + break; + case NS_WIDE: + msglen += spi_populate_width_msg(msgptr + msglen, tp->usrwide); + break; + } + + cp->nego_status = nego; + + if (nego) { + tp->nego_cp = cp; + if (DEBUG_FLAGS & DEBUG_NEGO) { + ncr_print_msg(cp, nego == NS_WIDE ? + "wide msgout":"sync_msgout", msgptr); + } + } + + return msglen; +} + + + +/*========================================================== +** +** +** Start execution of a SCSI command. +** This is called from the generic SCSI driver. +** +** +**========================================================== +*/ +static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct tcb *tp = &np->target[sdev->id]; + struct lcb *lp = tp->lp[sdev->lun]; + struct ccb *cp; + + int segments; + u_char idmsg, *msgptr; + u32 msglen; + int direction; + u32 lastp, goalp; + + /*--------------------------------------------- + ** + ** Some shortcuts ... + ** + **--------------------------------------------- + */ + if ((sdev->id == np->myaddr ) || + (sdev->id >= MAX_TARGET) || + (sdev->lun >= MAX_LUN )) { + return(DID_BAD_TARGET); + } + + /*--------------------------------------------- + ** + ** Complete the 1st TEST UNIT READY command + ** with error condition if the device is + ** flagged NOSCAN, in order to speed up + ** the boot. + ** + **--------------------------------------------- + */ + if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12) && + (tp->usrflag & UF_NOSCAN)) { + tp->usrflag &= ~UF_NOSCAN; + return DID_BAD_TARGET; + } + + if (DEBUG_FLAGS & DEBUG_TINY) { + PRINT_ADDR(cmd, "CMD=%x ", cmd->cmnd[0]); + } + + /*--------------------------------------------------- + ** + ** Assign a ccb / bind cmd. + ** If resetting, shorten settle_time if necessary + ** in order to avoid spurious timeouts. + ** If resetting or no free ccb, + ** insert cmd into the waiting list. + ** + **---------------------------------------------------- + */ + if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) { + u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ; + if (time_after(np->settle_time, tlimit)) + np->settle_time = tlimit; + } + + if (np->settle_time || !(cp=ncr_get_ccb (np, cmd))) { + insert_into_waiting_list(np, cmd); + return(DID_OK); + } + cp->cmd = cmd; + + /*---------------------------------------------------- + ** + ** Build the identify / tag / sdtr message + ** + **---------------------------------------------------- + */ + + idmsg = IDENTIFY(0, sdev->lun); + + if (cp ->tag != NO_TAG || + (cp != np->ccb && np->disc && !(tp->usrflag & UF_NODISC))) + idmsg |= 0x40; + + msgptr = cp->scsi_smsg; + msglen = 0; + msgptr[msglen++] = idmsg; + + if (cp->tag != NO_TAG) { + char order = np->order; + + /* + ** Force ordered tag if necessary to avoid timeouts + ** and to preserve interactivity. + */ + if (lp && time_after(jiffies, lp->tags_stime)) { + if (lp->tags_smap) { + order = ORDERED_QUEUE_TAG; + if ((DEBUG_FLAGS & DEBUG_TAGS)||bootverbose>2){ + PRINT_ADDR(cmd, + "ordered tag forced.\n"); + } + } + lp->tags_stime = jiffies + 3*HZ; + lp->tags_smap = lp->tags_umap; + } + + if (order == 0) { + /* + ** Ordered write ops, unordered read ops. + */ + switch (cmd->cmnd[0]) { + case 0x08: /* READ_SMALL (6) */ + case 0x28: /* READ_BIG (10) */ + case 0xa8: /* READ_HUGE (12) */ + order = SIMPLE_QUEUE_TAG; + break; + default: + order = ORDERED_QUEUE_TAG; + } + } + msgptr[msglen++] = order; + /* + ** Actual tags are numbered 1,3,5,..2*MAXTAGS+1, + ** since we may have to deal with devices that have + ** problems with #TAG 0 or too great #TAG numbers. + */ + msgptr[msglen++] = (cp->tag << 1) + 1; + } + + /*---------------------------------------------------- + ** + ** Build the data descriptors + ** + **---------------------------------------------------- + */ + + direction = cmd->sc_data_direction; + if (direction != DMA_NONE) { + segments = ncr_scatter(np, cp, cp->cmd); + if (segments < 0) { + ncr_free_ccb(np, cp); + return(DID_ERROR); + } + } + else { + cp->data_len = 0; + segments = 0; + } + + /*--------------------------------------------------- + ** + ** negotiation required? + ** + ** (nego_status is filled by ncr_prepare_nego()) + ** + **--------------------------------------------------- + */ + + cp->nego_status = 0; + + if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) { + msglen += ncr_prepare_nego (np, cp, msgptr + msglen); + } + + /*---------------------------------------------------- + ** + ** Determine xfer direction. + ** + **---------------------------------------------------- + */ + if (!cp->data_len) + direction = DMA_NONE; + + /* + ** If data direction is BIDIRECTIONAL, speculate FROM_DEVICE + ** but prepare alternate pointers for TO_DEVICE in case + ** of our speculation will be just wrong. + ** SCRIPTS will swap values if needed. + */ + switch(direction) { + case DMA_BIDIRECTIONAL: + case DMA_TO_DEVICE: + goalp = NCB_SCRIPT_PHYS (np, data_out2) + 8; + if (segments <= MAX_SCATTERL) + lastp = goalp - 8 - (segments * 16); + else { + lastp = NCB_SCRIPTH_PHYS (np, hdata_out2); + lastp -= (segments - MAX_SCATTERL) * 16; + } + if (direction != DMA_BIDIRECTIONAL) + break; + cp->phys.header.wgoalp = cpu_to_scr(goalp); + cp->phys.header.wlastp = cpu_to_scr(lastp); + fallthrough; + case DMA_FROM_DEVICE: + goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8; + if (segments <= MAX_SCATTERL) + lastp = goalp - 8 - (segments * 16); + else { + lastp = NCB_SCRIPTH_PHYS (np, hdata_in2); + lastp -= (segments - MAX_SCATTERL) * 16; + } + break; + default: + case DMA_NONE: + lastp = goalp = NCB_SCRIPT_PHYS (np, no_data); + break; + } + + /* + ** Set all pointers values needed by SCRIPTS. + ** If direction is unknown, start at data_io. + */ + cp->phys.header.lastp = cpu_to_scr(lastp); + cp->phys.header.goalp = cpu_to_scr(goalp); + + if (direction == DMA_BIDIRECTIONAL) + cp->phys.header.savep = + cpu_to_scr(NCB_SCRIPTH_PHYS (np, data_io)); + else + cp->phys.header.savep= cpu_to_scr(lastp); + + /* + ** Save the initial data pointer in order to be able + ** to redo the command. + */ + cp->startp = cp->phys.header.savep; + + /*---------------------------------------------------- + ** + ** fill in ccb + ** + **---------------------------------------------------- + ** + ** + ** physical -> virtual backlink + ** Generic SCSI command + */ + + /* + ** Startqueue + */ + cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); + cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_dsa)); + /* + ** select + */ + cp->phys.select.sel_id = sdev_id(sdev); + cp->phys.select.sel_scntl3 = tp->wval; + cp->phys.select.sel_sxfer = tp->sval; + /* + ** message + */ + cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg)); + cp->phys.smsg.size = cpu_to_scr(msglen); + + /* + ** command + */ + memcpy(cp->cdb_buf, cmd->cmnd, min_t(int, cmd->cmd_len, sizeof(cp->cdb_buf))); + cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0])); + cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); + + /* + ** status + */ + cp->actualquirks = 0; + cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; + cp->scsi_status = SAM_STAT_ILLEGAL; + cp->parity_status = 0; + + cp->xerr_status = XE_OK; + + /*---------------------------------------------------- + ** + ** Critical region: start this job. + ** + **---------------------------------------------------- + */ + + /* activate this job. */ + cp->magic = CCB_MAGIC; + + /* + ** insert next CCBs into start queue. + ** 2 max at a time is enough to flush the CCB wait queue. + */ + cp->auto_sense = 0; + if (lp) + ncr_start_next_ccb(np, lp, 2); + else + ncr_put_start_queue(np, cp); + + /* Command is successfully queued. */ + + return DID_OK; +} + + +/*========================================================== +** +** +** Insert a CCB into the start queue and wake up the +** SCRIPTS processor. +** +** +**========================================================== +*/ + +static void ncr_start_next_ccb(struct ncb *np, struct lcb *lp, int maxn) +{ + struct list_head *qp; + struct ccb *cp; + + if (lp->held_ccb) + return; + + while (maxn-- && lp->queuedccbs < lp->queuedepth) { + qp = ncr_list_pop(&lp->wait_ccbq); + if (!qp) + break; + ++lp->queuedccbs; + cp = list_entry(qp, struct ccb, link_ccbq); + list_add_tail(qp, &lp->busy_ccbq); + lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag] = + cpu_to_scr(CCB_PHYS (cp, restart)); + ncr_put_start_queue(np, cp); + } +} + +static void ncr_put_start_queue(struct ncb *np, struct ccb *cp) +{ + u16 qidx; + + /* + ** insert into start queue. + */ + if (!np->squeueput) np->squeueput = 1; + qidx = np->squeueput + 2; + if (qidx >= MAX_START + MAX_START) qidx = 1; + + np->scripth->tryloop [qidx] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); + MEMORY_BARRIER(); + np->scripth->tryloop [np->squeueput] = cpu_to_scr(CCB_PHYS (cp, start)); + + np->squeueput = qidx; + ++np->queuedccbs; + cp->queued = 1; + + if (DEBUG_FLAGS & DEBUG_QUEUE) + printk ("%s: queuepos=%d.\n", ncr_name (np), np->squeueput); + + /* + ** Script processor may be waiting for reselect. + ** Wake it up. + */ + MEMORY_BARRIER(); + OUTB (nc_istat, SIGP); +} + + +static int ncr_reset_scsi_bus(struct ncb *np, int enab_int, int settle_delay) +{ + u32 term; + int retv = 0; + + np->settle_time = jiffies + settle_delay * HZ; + + if (bootverbose > 1) + printk("%s: resetting, " + "command processing suspended for %d seconds\n", + ncr_name(np), settle_delay); + + ncr_chip_reset(np, 100); + udelay(2000); /* The 895 needs time for the bus mode to settle */ + if (enab_int) + OUTW (nc_sien, RST); + /* + ** Enable Tolerant, reset IRQD if present and + ** properly set IRQ mode, prior to resetting the bus. + */ + OUTB (nc_stest3, TE); + OUTB (nc_scntl1, CRST); + udelay(200); + + if (!driver_setup.bus_check) + goto out; + /* + ** Check for no terminators or SCSI bus shorts to ground. + ** Read SCSI data bus, data parity bits and control signals. + ** We are expecting RESET to be TRUE and other signals to be + ** FALSE. + */ + + term = INB(nc_sstat0); + term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ + term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ + ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ + ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ + INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ + + if (!(np->features & FE_WIDE)) + term &= 0x3ffff; + + if (term != (2<<7)) { + printk("%s: suspicious SCSI data while resetting the BUS.\n", + ncr_name(np)); + printk("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " + "0x%lx, expecting 0x%lx\n", + ncr_name(np), + (np->features & FE_WIDE) ? "dp1,d15-8," : "", + (u_long)term, (u_long)(2<<7)); + if (driver_setup.bus_check == 1) + retv = 1; + } +out: + OUTB (nc_scntl1, 0); + return retv; +} + +/* + * Start reset process. + * If reset in progress do nothing. + * The interrupt handler will reinitialize the chip. + * The timeout handler will wait for settle_time before + * clearing it and so resuming command processing. + */ +static void ncr_start_reset(struct ncb *np) +{ + if (!np->settle_time) { + ncr_reset_scsi_bus(np, 1, driver_setup.settle_delay); + } +} + +/*========================================================== +** +** +** Reset the SCSI BUS. +** This is called from the generic SCSI driver. +** +** +**========================================================== +*/ +static int ncr_reset_bus (struct ncb *np) +{ +/* + * Return immediately if reset is in progress. + */ + if (np->settle_time) { + return FAILED; + } +/* + * Start the reset process. + * The script processor is then assumed to be stopped. + * Commands will now be queued in the waiting list until a settle + * delay of 2 seconds will be completed. + */ + ncr_start_reset(np); +/* + * Wake-up all awaiting commands with DID_RESET. + */ + reset_waiting_list(np); +/* + * Wake-up all pending commands with HS_RESET -> DID_RESET. + */ + ncr_wakeup(np, HS_RESET); + + return SUCCESS; +} + +static void ncr_detach(struct ncb *np) +{ + struct ccb *cp; + struct tcb *tp; + struct lcb *lp; + int target, lun; + int i; + char inst_name[16]; + + /* Local copy so we don't access np after freeing it! */ + strscpy(inst_name, ncr_name(np), sizeof(inst_name)); + + printk("%s: releasing host resources\n", ncr_name(np)); + +/* +** Stop the ncr_timeout process +** Set release_stage to 1 and wait that ncr_timeout() set it to 2. +*/ + +#ifdef DEBUG_NCR53C8XX + printk("%s: stopping the timer\n", ncr_name(np)); +#endif + np->release_stage = 1; + for (i = 50 ; i && np->release_stage != 2 ; i--) + mdelay(100); + if (np->release_stage != 2) + printk("%s: the timer seems to be already stopped\n", ncr_name(np)); + else np->release_stage = 2; + +/* +** Disable chip interrupts +*/ + +#ifdef DEBUG_NCR53C8XX + printk("%s: disabling chip interrupts\n", ncr_name(np)); +#endif + OUTW (nc_sien , 0); + OUTB (nc_dien , 0); + + /* + ** Reset NCR chip + ** Restore bios setting for automatic clock detection. + */ + + printk("%s: resetting chip\n", ncr_name(np)); + ncr_chip_reset(np, 100); + + OUTB(nc_dmode, np->sv_dmode); + OUTB(nc_dcntl, np->sv_dcntl); + OUTB(nc_ctest0, np->sv_ctest0); + OUTB(nc_ctest3, np->sv_ctest3); + OUTB(nc_ctest4, np->sv_ctest4); + OUTB(nc_ctest5, np->sv_ctest5); + OUTB(nc_gpcntl, np->sv_gpcntl); + OUTB(nc_stest2, np->sv_stest2); + + ncr_selectclock(np, np->sv_scntl3); + + /* + ** Free allocated ccb(s) + */ + + while ((cp=np->ccb->link_ccb) != NULL) { + np->ccb->link_ccb = cp->link_ccb; + if (cp->host_status) { + printk("%s: shall free an active ccb (host_status=%d)\n", + ncr_name(np), cp->host_status); + } +#ifdef DEBUG_NCR53C8XX + printk("%s: freeing ccb (%lx)\n", ncr_name(np), (u_long) cp); +#endif + m_free_dma(cp, sizeof(*cp), "CCB"); + } + + /* Free allocated tp(s) */ + + for (target = 0; target < MAX_TARGET ; target++) { + tp=&np->target[target]; + for (lun = 0 ; lun < MAX_LUN ; lun++) { + lp = tp->lp[lun]; + if (lp) { +#ifdef DEBUG_NCR53C8XX + printk("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp); +#endif + if (lp->jump_ccb != &lp->jump_ccb_0) + m_free_dma(lp->jump_ccb,256,"JUMP_CCB"); + m_free_dma(lp, sizeof(*lp), "LCB"); + } + } + } + + if (np->scripth0) + m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH"); + if (np->script0) + m_free_dma(np->script0, sizeof(struct script), "SCRIPT"); + if (np->ccb) + m_free_dma(np->ccb, sizeof(struct ccb), "CCB"); + m_free_dma(np, sizeof(struct ncb), "NCB"); + + printk("%s: host resources successfully released\n", inst_name); +} + +/*========================================================== +** +** +** Complete execution of a SCSI command. +** Signal completion to the generic SCSI driver. +** +** +**========================================================== +*/ + +void ncr_complete (struct ncb *np, struct ccb *cp) +{ + struct scsi_cmnd *cmd; + struct tcb *tp; + struct lcb *lp; + + /* + ** Sanity check + */ + + if (!cp || cp->magic != CCB_MAGIC || !cp->cmd) + return; + + /* + ** Print minimal debug information. + */ + + if (DEBUG_FLAGS & DEBUG_TINY) + printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp, + cp->host_status,cp->scsi_status); + + /* + ** Get command, target and lun pointers. + */ + + cmd = cp->cmd; + cp->cmd = NULL; + tp = &np->target[cmd->device->id]; + lp = tp->lp[cmd->device->lun]; + + /* + ** We donnot queue more than 1 ccb per target + ** with negotiation at any time. If this ccb was + ** used for negotiation, clear this info in the tcb. + */ + + if (cp == tp->nego_cp) + tp->nego_cp = NULL; + + /* + ** If auto-sense performed, change scsi status. + */ + if (cp->auto_sense) { + cp->scsi_status = cp->auto_sense; + } + + /* + ** If we were recovering from queue full or performing + ** auto-sense, requeue skipped CCBs to the wait queue. + */ + + if (lp && lp->held_ccb) { + if (cp == lp->held_ccb) { + list_splice_init(&lp->skip_ccbq, &lp->wait_ccbq); + lp->held_ccb = NULL; + } + } + + /* + ** Check for parity errors. + */ + + if (cp->parity_status > 1) { + PRINT_ADDR(cmd, "%d parity error(s).\n",cp->parity_status); + } + + /* + ** Check for extended errors. + */ + + if (cp->xerr_status != XE_OK) { + switch (cp->xerr_status) { + case XE_EXTRA_DATA: + PRINT_ADDR(cmd, "extraneous data discarded.\n"); + break; + case XE_BAD_PHASE: + PRINT_ADDR(cmd, "invalid scsi phase (4/5).\n"); + break; + default: + PRINT_ADDR(cmd, "extended error %d.\n", + cp->xerr_status); + break; + } + if (cp->host_status==HS_COMPLETE) + cp->host_status = HS_FAIL; + } + + /* + ** Print out any error for debugging purpose. + */ + if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) { + if (cp->host_status != HS_COMPLETE || + cp->scsi_status != SAM_STAT_GOOD) { + PRINT_ADDR(cmd, "ERROR: cmd=%x host_status=%x " + "scsi_status=%x\n", cmd->cmnd[0], + cp->host_status, cp->scsi_status); + } + } + + /* + ** Check the status. + */ + cmd->result = 0; + if ( (cp->host_status == HS_COMPLETE) + && (cp->scsi_status == SAM_STAT_GOOD || + cp->scsi_status == SAM_STAT_CONDITION_MET)) { + /* + * All went well (GOOD status). + * CONDITION MET status is returned on + * `Pre-Fetch' or `Search data' success. + */ + set_status_byte(cmd, cp->scsi_status); + + /* + ** @RESID@ + ** Could dig out the correct value for resid, + ** but it would be quite complicated. + */ + /* if (cp->phys.header.lastp != cp->phys.header.goalp) */ + + /* + ** Allocate the lcb if not yet. + */ + if (!lp) + ncr_alloc_lcb (np, cmd->device->id, cmd->device->lun); + + tp->bytes += cp->data_len; + tp->transfers ++; + + /* + ** If tags was reduced due to queue full, + ** increase tags if 1000 good status received. + */ + if (lp && lp->usetags && lp->numtags < lp->maxtags) { + ++lp->num_good; + if (lp->num_good >= 1000) { + lp->num_good = 0; + ++lp->numtags; + ncr_setup_tags (np, cmd->device); + } + } + } else if ((cp->host_status == HS_COMPLETE) + && (cp->scsi_status == SAM_STAT_CHECK_CONDITION)) { + /* + ** Check condition code + */ + set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); + + /* + ** Copy back sense data to caller's buffer. + */ + memcpy(cmd->sense_buffer, cp->sense_buf, + min_t(size_t, SCSI_SENSE_BUFFERSIZE, + sizeof(cp->sense_buf))); + + if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) { + u_char *p = cmd->sense_buffer; + int i; + PRINT_ADDR(cmd, "sense data:"); + for (i=0; i<14; i++) printk (" %x", *p++); + printk (".\n"); + } + } else if ((cp->host_status == HS_COMPLETE) + && (cp->scsi_status == SAM_STAT_RESERVATION_CONFLICT)) { + /* + ** Reservation Conflict condition code + */ + set_status_byte(cmd, SAM_STAT_RESERVATION_CONFLICT); + + } else if ((cp->host_status == HS_COMPLETE) + && (cp->scsi_status == SAM_STAT_BUSY || + cp->scsi_status == SAM_STAT_TASK_SET_FULL)) { + + /* + ** Target is busy. + */ + set_status_byte(cmd, cp->scsi_status); + + } else if ((cp->host_status == HS_SEL_TIMEOUT) + || (cp->host_status == HS_TIMEOUT)) { + + /* + ** No response + */ + set_status_byte(cmd, cp->scsi_status); + set_host_byte(cmd, DID_TIME_OUT); + + } else if (cp->host_status == HS_RESET) { + + /* + ** SCSI bus reset + */ + set_status_byte(cmd, cp->scsi_status); + set_host_byte(cmd, DID_RESET); + + } else if (cp->host_status == HS_ABORTED) { + + /* + ** Transfer aborted + */ + set_status_byte(cmd, cp->scsi_status); + set_host_byte(cmd, DID_ABORT); + + } else { + + /* + ** Other protocol messes + */ + PRINT_ADDR(cmd, "COMMAND FAILED (%x %x) @%p.\n", + cp->host_status, cp->scsi_status, cp); + + set_status_byte(cmd, cp->scsi_status); + set_host_byte(cmd, DID_ERROR); + } + + /* + ** trace output + */ + + if (tp->usrflag & UF_TRACE) { + u_char * p; + int i; + PRINT_ADDR(cmd, " CMD:"); + p = (u_char*) &cmd->cmnd[0]; + for (i=0; icmd_len; i++) printk (" %x", *p++); + + if (cp->host_status==HS_COMPLETE) { + switch (cp->scsi_status) { + case SAM_STAT_GOOD: + printk (" GOOD"); + break; + case SAM_STAT_CHECK_CONDITION: + printk (" SENSE:"); + p = (u_char*) &cmd->sense_buffer; + for (i=0; i<14; i++) + printk (" %x", *p++); + break; + default: + printk (" STAT: %x\n", cp->scsi_status); + break; + } + } else printk (" HOSTERROR: %x", cp->host_status); + printk ("\n"); + } + + /* + ** Free this ccb + */ + ncr_free_ccb (np, cp); + + /* + ** requeue awaiting scsi commands for this lun. + */ + if (lp && lp->queuedccbs < lp->queuedepth && + !list_empty(&lp->wait_ccbq)) + ncr_start_next_ccb(np, lp, 2); + + /* + ** requeue awaiting scsi commands for this controller. + */ + if (np->waiting_list) + requeue_waiting_list(np); + + /* + ** signal completion to generic driver. + */ + ncr_queue_done_cmd(np, cmd); +} + +/*========================================================== +** +** +** Signal all (or one) control block done. +** +** +**========================================================== +*/ + +/* +** This CCB has been skipped by the NCR. +** Queue it in the corresponding unit queue. +*/ +static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp) +{ + struct tcb *tp = &np->target[cp->target]; + struct lcb *lp = tp->lp[cp->lun]; + + if (lp && cp != np->ccb) { + cp->host_status &= ~HS_SKIPMASK; + cp->start.schedule.l_paddr = + cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); + list_move_tail(&cp->link_ccbq, &lp->skip_ccbq); + if (cp->queued) { + --lp->queuedccbs; + } + } + if (cp->queued) { + --np->queuedccbs; + cp->queued = 0; + } +} + +/* +** The NCR has completed CCBs. +** Look at the DONE QUEUE if enabled, otherwise scan all CCBs +*/ +void ncr_wakeup_done (struct ncb *np) +{ + struct ccb *cp; +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + int i, j; + + i = np->ccb_done_ic; + while (1) { + j = i+1; + if (j >= MAX_DONE) + j = 0; + + cp = np->ccb_done[j]; + if (!CCB_DONE_VALID(cp)) + break; + + np->ccb_done[j] = (struct ccb *)CCB_DONE_EMPTY; + np->scripth->done_queue[5*j + 4] = + cpu_to_scr(NCB_SCRIPT_PHYS (np, done_plug)); + MEMORY_BARRIER(); + np->scripth->done_queue[5*i + 4] = + cpu_to_scr(NCB_SCRIPT_PHYS (np, done_end)); + + if (cp->host_status & HS_DONEMASK) + ncr_complete (np, cp); + else if (cp->host_status & HS_SKIPMASK) + ncr_ccb_skipped (np, cp); + + i = j; + } + np->ccb_done_ic = i; +#else + cp = np->ccb; + while (cp) { + if (cp->host_status & HS_DONEMASK) + ncr_complete (np, cp); + else if (cp->host_status & HS_SKIPMASK) + ncr_ccb_skipped (np, cp); + cp = cp->link_ccb; + } +#endif +} + +/* +** Complete all active CCBs. +*/ +void ncr_wakeup (struct ncb *np, u_long code) +{ + struct ccb *cp = np->ccb; + + while (cp) { + if (cp->host_status != HS_IDLE) { + cp->host_status = code; + ncr_complete (np, cp); + } + cp = cp->link_ccb; + } +} + +/* +** Reset ncr chip. +*/ + +/* Some initialisation must be done immediately following reset, for 53c720, + * at least. EA (dcntl bit 5) isn't set here as it is set once only in + * the _detect function. + */ +static void ncr_chip_reset(struct ncb *np, int delay) +{ + OUTB (nc_istat, SRST); + udelay(delay); + OUTB (nc_istat, 0 ); + + if (np->features & FE_EHP) + OUTB (nc_ctest0, EHP); + if (np->features & FE_MUX) + OUTB (nc_ctest4, MUX); +} + + +/*========================================================== +** +** +** Start NCR chip. +** +** +**========================================================== +*/ + +void ncr_init (struct ncb *np, int reset, char * msg, u_long code) +{ + int i; + + /* + ** Reset chip if asked, otherwise just clear fifos. + */ + + if (reset) { + OUTB (nc_istat, SRST); + udelay(100); + } + else { + OUTB (nc_stest3, TE|CSF); + OUTONB (nc_ctest3, CLF); + } + + /* + ** Message. + */ + + if (msg) printk (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg); + + /* + ** Clear Start Queue + */ + np->queuedepth = MAX_START - 1; /* 1 entry needed as end marker */ + for (i = 1; i < MAX_START + MAX_START; i += 2) + np->scripth0->tryloop[i] = + cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); + + /* + ** Start at first entry. + */ + np->squeueput = 0; + np->script0->startpos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np, tryloop)); + +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + /* + ** Clear Done Queue + */ + for (i = 0; i < MAX_DONE; i++) { + np->ccb_done[i] = (struct ccb *)CCB_DONE_EMPTY; + np->scripth0->done_queue[5*i + 4] = + cpu_to_scr(NCB_SCRIPT_PHYS (np, done_end)); + } +#endif + + /* + ** Start at first entry. + */ + np->script0->done_pos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np,done_queue)); + np->ccb_done_ic = MAX_DONE-1; + np->scripth0->done_queue[5*(MAX_DONE-1) + 4] = + cpu_to_scr(NCB_SCRIPT_PHYS (np, done_plug)); + + /* + ** Wakeup all pending jobs. + */ + ncr_wakeup (np, code); + + /* + ** Init chip. + */ + + /* + ** Remove reset; big delay because the 895 needs time for the + ** bus mode to settle + */ + ncr_chip_reset(np, 2000); + + OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); + /* full arb., ena parity, par->ATN */ + OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ + + ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ + + OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ + OUTW (nc_respid, 1ul<myaddr); /* Id to respond to */ + OUTB (nc_istat , SIGP ); /* Signal Process */ + OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ + OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ + + OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ + OUTB (nc_ctest0, np->rv_ctest0); /* 720: CDIS and EHP */ + OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ + OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ + + OUTB (nc_stest2, EXT|np->rv_stest2); /* Extended Sreq/Sack filtering */ + OUTB (nc_stest3, TE); /* TolerANT enable */ + OUTB (nc_stime0, 0x0c ); /* HTH disabled STO 0.25 sec */ + + /* + ** Disable disconnects. + */ + + np->disc = 0; + + /* + ** Enable GPIO0 pin for writing if LED support. + */ + + if (np->features & FE_LED0) { + OUTOFFB (nc_gpcntl, 0x01); + } + + /* + ** enable ints + */ + + OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); + OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID); + + /* + ** Fill in target structure. + ** Reinitialize usrsync. + ** Reinitialize usrwide. + ** Prepare sync negotiation according to actual SCSI bus mode. + */ + + for (i=0;itarget[i]; + + tp->sval = 0; + tp->wval = np->rv_scntl3; + + if (tp->usrsync != 255) { + if (tp->usrsync <= np->maxsync) { + if (tp->usrsync < np->minsync) { + tp->usrsync = np->minsync; + } + } + else + tp->usrsync = 255; + } + + if (tp->usrwide > np->maxwide) + tp->usrwide = np->maxwide; + + } + + /* + ** Start script processor. + */ + if (np->paddr2) { + if (bootverbose) + printk ("%s: Downloading SCSI SCRIPTS.\n", + ncr_name(np)); + OUTL (nc_scratcha, vtobus(np->script0)); + OUTL_DSP (NCB_SCRIPTH_PHYS (np, start_ram)); + } + else + OUTL_DSP (NCB_SCRIPT_PHYS (np, start)); +} + +/*========================================================== +** +** Prepare the negotiation values for wide and +** synchronous transfers. +** +**========================================================== +*/ + +static void ncr_negotiate (struct ncb* np, struct tcb* tp) +{ + /* + ** minsync unit is 4ns ! + */ + + u_long minsync = tp->usrsync; + + /* + ** SCSI bus mode limit + */ + + if (np->scsi_mode && np->scsi_mode == SMODE_SE) { + if (minsync < 12) minsync = 12; + } + + /* + ** our limit .. + */ + + if (minsync < np->minsync) + minsync = np->minsync; + + /* + ** divider limit + */ + + if (minsync > np->maxsync) + minsync = 255; + + if (tp->maxoffs > np->maxoffs) + tp->maxoffs = np->maxoffs; + + tp->minsync = minsync; + tp->maxoffs = (minsync<255 ? tp->maxoffs : 0); + + /* + ** period=0: has to negotiate sync transfer + */ + + tp->period=0; + + /* + ** widedone=0: has to negotiate wide transfer + */ + tp->widedone=0; +} + +/*========================================================== +** +** Get clock factor and sync divisor for a given +** synchronous factor period. +** Returns the clock factor (in sxfer) and scntl3 +** synchronous divisor field. +** +**========================================================== +*/ + +static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl3p) +{ + u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */ + int div = np->clock_divn; /* Number of divisors supported */ + u_long fak; /* Sync factor in sxfer */ + u_long per; /* Period in tenths of ns */ + u_long kpc; /* (per * clk) */ + + /* + ** Compute the synchronous period in tenths of nano-seconds + */ + if (sfac <= 10) per = 250; + else if (sfac == 11) per = 303; + else if (sfac == 12) per = 500; + else per = 40 * sfac; + + /* + ** Look for the greatest clock divisor that allows an + ** input speed faster than the period. + */ + kpc = per * clk; + while (--div > 0) + if (kpc >= (div_10M[div] << 2)) break; + + /* + ** Calculate the lowest clock factor that allows an output + ** speed not faster than the period. + */ + fak = (kpc - 1) / div_10M[div] + 1; + + if (fak < 4) fak = 4; /* Should never happen, too bad ... */ + + /* + ** Compute and return sync parameters for the ncr + */ + *fakp = fak - 4; + *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); +} + + +/*========================================================== +** +** Set actual values, sync status and patch all ccbs of +** a target according to new sync/wide agreement. +** +**========================================================== +*/ + +static void ncr_set_sync_wide_status (struct ncb *np, u_char target) +{ + struct ccb *cp; + struct tcb *tp = &np->target[target]; + + /* + ** set actual value and sync_status + */ + OUTB (nc_sxfer, tp->sval); + np->sync_st = tp->sval; + OUTB (nc_scntl3, tp->wval); + np->wide_st = tp->wval; + + /* + ** patch ALL ccbs of this target. + */ + for (cp = np->ccb; cp; cp = cp->link_ccb) { + if (!cp->cmd) continue; + if (scmd_id(cp->cmd) != target) continue; + cp->phys.select.sel_scntl3 = tp->wval; + cp->phys.select.sel_sxfer = tp->sval; + } +} + +/*========================================================== +** +** Switch sync mode for current job and it's target +** +**========================================================== +*/ + +static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer) +{ + struct scsi_cmnd *cmd = cp->cmd; + struct tcb *tp; + u_char target = INB (nc_sdid) & 0x0f; + u_char idiv; + + BUG_ON(target != (scmd_id(cmd) & 0xf)); + + tp = &np->target[target]; + + if (!scntl3 || !(sxfer & 0x1f)) + scntl3 = np->rv_scntl3; + scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) | (np->rv_scntl3 & 0x07); + + /* + ** Deduce the value of controller sync period from scntl3. + ** period is in tenths of nano-seconds. + */ + + idiv = ((scntl3 >> 4) & 0x7); + if ((sxfer & 0x1f) && idiv) + tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz; + else + tp->period = 0xffff; + + /* Stop there if sync parameters are unchanged */ + if (tp->sval == sxfer && tp->wval == scntl3) + return; + tp->sval = sxfer; + tp->wval = scntl3; + + if (sxfer & 0x01f) { + /* Disable extended Sreq/Sack filtering */ + if (tp->period <= 2000) + OUTOFFB(nc_stest2, EXT); + } + + spi_display_xfer_agreement(tp->starget); + + /* + ** set actual value and sync_status + ** patch ALL ccbs of this target. + */ + ncr_set_sync_wide_status(np, target); +} + +/*========================================================== +** +** Switch wide mode for current job and it's target +** SCSI specs say: a SCSI device that accepts a WDTR +** message shall reset the synchronous agreement to +** asynchronous mode. +** +**========================================================== +*/ + +static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack) +{ + struct scsi_cmnd *cmd = cp->cmd; + u16 target = INB (nc_sdid) & 0x0f; + struct tcb *tp; + u_char scntl3; + u_char sxfer; + + BUG_ON(target != (scmd_id(cmd) & 0xf)); + + tp = &np->target[target]; + tp->widedone = wide+1; + scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0); + + sxfer = ack ? 0 : tp->sval; + + /* + ** Stop there if sync/wide parameters are unchanged + */ + if (tp->sval == sxfer && tp->wval == scntl3) return; + tp->sval = sxfer; + tp->wval = scntl3; + + /* + ** Bells and whistles ;-) + */ + if (bootverbose >= 2) { + dev_info(&cmd->device->sdev_target->dev, "WIDE SCSI %sabled.\n", + (scntl3 & EWS) ? "en" : "dis"); + } + + /* + ** set actual value and sync_status + ** patch ALL ccbs of this target. + */ + ncr_set_sync_wide_status(np, target); +} + +/*========================================================== +** +** Switch tagged mode for a target. +** +**========================================================== +*/ + +static void ncr_setup_tags (struct ncb *np, struct scsi_device *sdev) +{ + unsigned char tn = sdev->id, ln = sdev->lun; + struct tcb *tp = &np->target[tn]; + struct lcb *lp = tp->lp[ln]; + u_char reqtags, maxdepth; + + /* + ** Just in case ... + */ + if ((!tp) || (!lp) || !sdev) + return; + + /* + ** If SCSI device queue depth is not yet set, leave here. + */ + if (!lp->scdev_depth) + return; + + /* + ** Donnot allow more tags than the SCSI driver can queue + ** for this device. + ** Donnot allow more tags than we can handle. + */ + maxdepth = lp->scdev_depth; + if (maxdepth > lp->maxnxs) maxdepth = lp->maxnxs; + if (lp->maxtags > maxdepth) lp->maxtags = maxdepth; + if (lp->numtags > maxdepth) lp->numtags = maxdepth; + + /* + ** only devices conformant to ANSI Version >= 2 + ** only devices capable of tagged commands + ** only if enabled by user .. + */ + if (sdev->tagged_supported && lp->numtags > 1) { + reqtags = lp->numtags; + } else { + reqtags = 1; + } + + /* + ** Update max number of tags + */ + lp->numtags = reqtags; + if (lp->numtags > lp->maxtags) + lp->maxtags = lp->numtags; + + /* + ** If we want to switch tag mode, we must wait + ** for no CCB to be active. + */ + if (reqtags > 1 && lp->usetags) { /* Stay in tagged mode */ + if (lp->queuedepth == reqtags) /* Already announced */ + return; + lp->queuedepth = reqtags; + } + else if (reqtags <= 1 && !lp->usetags) { /* Stay in untagged mode */ + lp->queuedepth = reqtags; + return; + } + else { /* Want to switch tag mode */ + if (lp->busyccbs) /* If not yet safe, return */ + return; + lp->queuedepth = reqtags; + lp->usetags = reqtags > 1 ? 1 : 0; + } + + /* + ** Patch the lun mini-script, according to tag mode. + */ + lp->jump_tag.l_paddr = lp->usetags? + cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_tag)) : + cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag)); + + /* + ** Announce change to user. + */ + if (bootverbose) { + if (lp->usetags) { + dev_info(&sdev->sdev_gendev, + "tagged command queue depth set to %d\n", + reqtags); + } else { + dev_info(&sdev->sdev_gendev, + "tagged command queueing disabled\n"); + } + } +} + +/*========================================================== +** +** +** ncr timeout handler. +** +** +**========================================================== +** +** Misused to keep the driver running when +** interrupts are not configured correctly. +** +**---------------------------------------------------------- +*/ + +static void ncr_timeout (struct ncb *np) +{ + u_long thistime = jiffies; + + /* + ** If release process in progress, let's go + ** Set the release stage from 1 to 2 to synchronize + ** with the release process. + */ + + if (np->release_stage) { + if (np->release_stage == 1) np->release_stage = 2; + return; + } + + np->timer.expires = jiffies + SCSI_NCR_TIMER_INTERVAL; + add_timer(&np->timer); + + /* + ** If we are resetting the ncr, wait for settle_time before + ** clearing it. Then command processing will be resumed. + */ + if (np->settle_time) { + if (np->settle_time <= thistime) { + if (bootverbose > 1) + printk("%s: command processing resumed\n", ncr_name(np)); + np->settle_time = 0; + np->disc = 1; + requeue_waiting_list(np); + } + return; + } + + /* + ** Since the generic scsi driver only allows us 0.5 second + ** to perform abort of a command, we must look at ccbs about + ** every 0.25 second. + */ + if (np->lasttime + 4*HZ < thistime) { + /* + ** block ncr interrupts + */ + np->lasttime = thistime; + } + +#ifdef SCSI_NCR_BROKEN_INTR + if (INB(nc_istat) & (INTF|SIP|DIP)) { + + /* + ** Process pending interrupts. + */ + if (DEBUG_FLAGS & DEBUG_TINY) printk ("{"); + ncr_exception (np); + if (DEBUG_FLAGS & DEBUG_TINY) printk ("}"); + } +#endif /* SCSI_NCR_BROKEN_INTR */ +} + +/*========================================================== +** +** log message for real hard errors +** +** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." +** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." +** +** exception register: +** ds: dstat +** si: sist +** +** SCSI bus lines: +** so: control lines as driver by NCR. +** si: control lines as seen by NCR. +** sd: scsi data lines as seen by NCR. +** +** wide/fastmode: +** sxfer: (see the manual) +** scntl3: (see the manual) +** +** current script command: +** dsp: script address (relative to start of script). +** dbc: first word of script command. +** +** First 16 register of the chip: +** r0..rf +** +**========================================================== +*/ + +static void ncr_log_hard_error(struct ncb *np, u16 sist, u_char dstat) +{ + u32 dsp; + int script_ofs; + int script_size; + char *script_name; + u_char *script_base; + int i; + + dsp = INL (nc_dsp); + + if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { + script_ofs = dsp - np->p_script; + script_size = sizeof(struct script); + script_base = (u_char *) np->script0; + script_name = "script"; + } + else if (np->p_scripth < dsp && + dsp <= np->p_scripth + sizeof(struct scripth)) { + script_ofs = dsp - np->p_scripth; + script_size = sizeof(struct scripth); + script_base = (u_char *) np->scripth0; + script_name = "scripth"; + } else { + script_ofs = dsp; + script_size = 0; + script_base = NULL; + script_name = "mem"; + } + + printk ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", + ncr_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, + (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), + (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs, + (unsigned)INL (nc_dbc)); + + if (((script_ofs & 3) == 0) && + (unsigned)script_ofs < script_size) { + printk ("%s: script cmd = %08x\n", ncr_name(np), + scr_to_cpu((int) *(ncrcmd *)(script_base + script_ofs))); + } + + printk ("%s: regdump:", ncr_name(np)); + for (i=0; i<16;i++) + printk (" %02x", (unsigned)INB_OFF(i)); + printk (".\n"); +} + +/*============================================================ +** +** ncr chip exception handler. +** +**============================================================ +** +** In normal cases, interrupt conditions occur one at a +** time. The ncr is able to stack in some extra registers +** other interrupts that will occur after the first one. +** But, several interrupts may occur at the same time. +** +** We probably should only try to deal with the normal +** case, but it seems that multiple interrupts occur in +** some cases that are not abnormal at all. +** +** The most frequent interrupt condition is Phase Mismatch. +** We should want to service this interrupt quickly. +** A SCSI parity error may be delivered at the same time. +** The SIR interrupt is not very frequent in this driver, +** since the INTFLY is likely used for command completion +** signaling. +** The Selection Timeout interrupt may be triggered with +** IID and/or UDC. +** The SBMC interrupt (SCSI Bus Mode Change) may probably +** occur at any time. +** +** This handler try to deal as cleverly as possible with all +** the above. +** +**============================================================ +*/ + +void ncr_exception (struct ncb *np) +{ + u_char istat, dstat; + u16 sist; + int i; + + /* + ** interrupt on the fly ? + ** Since the global header may be copied back to a CCB + ** using a posted PCI memory write, the last operation on + ** the istat register is a READ in order to flush posted + ** PCI write commands. + */ + istat = INB (nc_istat); + if (istat & INTF) { + OUTB (nc_istat, (istat & SIGP) | INTF); + istat = INB (nc_istat); + if (DEBUG_FLAGS & DEBUG_TINY) printk ("F "); + ncr_wakeup_done (np); + } + + if (!(istat & (SIP|DIP))) + return; + + if (istat & CABRT) + OUTB (nc_istat, CABRT); + + /* + ** Steinbach's Guideline for Systems Programming: + ** Never test for an error condition you don't know how to handle. + */ + + sist = (istat & SIP) ? INW (nc_sist) : 0; + dstat = (istat & DIP) ? INB (nc_dstat) : 0; + + if (DEBUG_FLAGS & DEBUG_TINY) + printk ("<%d|%x:%x|%x:%x>", + (int)INB(nc_scr0), + dstat,sist, + (unsigned)INL(nc_dsp), + (unsigned)INL(nc_dbc)); + + /*======================================================== + ** First, interrupts we want to service cleanly. + ** + ** Phase mismatch is the most frequent interrupt, and + ** so we have to service it as quickly and as cleanly + ** as possible. + ** Programmed interrupts are rarely used in this driver, + ** but we must handle them cleanly anyway. + ** We try to deal with PAR and SBMC combined with + ** some other interrupt(s). + **========================================================= + */ + + if (!(sist & (STO|GEN|HTH|SGE|UDC|RST)) && + !(dstat & (MDPE|BF|ABRT|IID))) { + if ((sist & SBMC) && ncr_int_sbmc (np)) + return; + if ((sist & PAR) && ncr_int_par (np)) + return; + if (sist & MA) { + ncr_int_ma (np); + return; + } + if (dstat & SIR) { + ncr_int_sir (np); + return; + } + /* + ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 2. + */ + if (!(sist & (SBMC|PAR)) && !(dstat & SSI)) { + printk( "%s: unknown interrupt(s) ignored, " + "ISTAT=%x DSTAT=%x SIST=%x\n", + ncr_name(np), istat, dstat, sist); + return; + } + OUTONB_STD (); + return; + } + + /*======================================================== + ** Now, interrupts that need some fixing up. + ** Order and multiple interrupts is so less important. + ** + ** If SRST has been asserted, we just reset the chip. + ** + ** Selection is intirely handled by the chip. If the + ** chip says STO, we trust it. Seems some other + ** interrupts may occur at the same time (UDC, IID), so + ** we ignore them. In any case we do enough fix-up + ** in the service routine. + ** We just exclude some fatal dma errors. + **========================================================= + */ + + if (sist & RST) { + ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET); + return; + } + + if ((sist & STO) && + !(dstat & (MDPE|BF|ABRT))) { + /* + ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 1. + */ + OUTONB (nc_ctest3, CLF); + + ncr_int_sto (np); + return; + } + + /*========================================================= + ** Now, interrupts we are not able to recover cleanly. + ** (At least for the moment). + ** + ** Do the register dump. + ** Log message for real hard errors. + ** Clear all fifos. + ** For MDPE, BF, ABORT, IID, SGE and HTH we reset the + ** BUS and the chip. + ** We are more soft for UDC. + **========================================================= + */ + + if (time_after(jiffies, np->regtime)) { + np->regtime = jiffies + 10*HZ; + for (i = 0; iregdump); i++) + ((char*)&np->regdump)[i] = INB_OFF(i); + np->regdump.nc_dstat = dstat; + np->regdump.nc_sist = sist; + } + + ncr_log_hard_error(np, sist, dstat); + + printk ("%s: have to clear fifos.\n", ncr_name (np)); + OUTB (nc_stest3, TE|CSF); + OUTONB (nc_ctest3, CLF); + + if ((sist & (SGE)) || + (dstat & (MDPE|BF|ABRT|IID))) { + ncr_start_reset(np); + return; + } + + if (sist & HTH) { + printk ("%s: handshake timeout\n", ncr_name(np)); + ncr_start_reset(np); + return; + } + + if (sist & UDC) { + printk ("%s: unexpected disconnect\n", ncr_name(np)); + OUTB (HS_PRT, HS_UNEXPECTED); + OUTL_DSP (NCB_SCRIPT_PHYS (np, cleanup)); + return; + } + + /*========================================================= + ** We just miss the cause of the interrupt. :( + ** Print a message. The timeout will do the real work. + **========================================================= + */ + printk ("%s: unknown interrupt\n", ncr_name(np)); +} + +/*========================================================== +** +** ncr chip exception handler for selection timeout +** +**========================================================== +** +** There seems to be a bug in the 53c810. +** Although a STO-Interrupt is pending, +** it continues executing script commands. +** But it will fail and interrupt (IID) on +** the next instruction where it's looking +** for a valid phase. +** +**---------------------------------------------------------- +*/ + +void ncr_int_sto (struct ncb *np) +{ + u_long dsa; + struct ccb *cp; + if (DEBUG_FLAGS & DEBUG_TINY) printk ("T"); + + /* + ** look for ccb and set the status. + */ + + dsa = INL (nc_dsa); + cp = np->ccb; + while (cp && (CCB_PHYS (cp, phys) != dsa)) + cp = cp->link_ccb; + + if (cp) { + cp-> host_status = HS_SEL_TIMEOUT; + ncr_complete (np, cp); + } + + /* + ** repair start queue and jump to start point. + */ + + OUTL_DSP (NCB_SCRIPTH_PHYS (np, sto_restart)); + return; +} + +/*========================================================== +** +** ncr chip exception handler for SCSI bus mode change +** +**========================================================== +** +** spi2-r12 11.2.3 says a transceiver mode change must +** generate a reset event and a device that detects a reset +** event shall initiate a hard reset. It says also that a +** device that detects a mode change shall set data transfer +** mode to eight bit asynchronous, etc... +** So, just resetting should be enough. +** +** +**---------------------------------------------------------- +*/ + +static int ncr_int_sbmc (struct ncb *np) +{ + u_char scsi_mode = INB (nc_stest4) & SMODE; + + if (scsi_mode != np->scsi_mode) { + printk("%s: SCSI bus mode change from %x to %x.\n", + ncr_name(np), np->scsi_mode, scsi_mode); + + np->scsi_mode = scsi_mode; + + + /* + ** Suspend command processing for 1 second and + ** reinitialize all except the chip. + */ + np->settle_time = jiffies + HZ; + ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET); + return 1; + } + return 0; +} + +/*========================================================== +** +** ncr chip exception handler for SCSI parity error. +** +**========================================================== +** +** +**---------------------------------------------------------- +*/ + +static int ncr_int_par (struct ncb *np) +{ + u_char hsts = INB (HS_PRT); + u32 dbc = INL (nc_dbc); + u_char sstat1 = INB (nc_sstat1); + int phase = -1; + int msg = -1; + u32 jmp; + + printk("%s: SCSI parity error detected: SCR1=%d DBC=%x SSTAT1=%x\n", + ncr_name(np), hsts, dbc, sstat1); + + /* + * Ignore the interrupt if the NCR is not connected + * to the SCSI bus, since the right work should have + * been done on unexpected disconnection handling. + */ + if (!(INB (nc_scntl1) & ISCON)) + return 0; + + /* + * If the nexus is not clearly identified, reset the bus. + * We will try to do better later. + */ + if (hsts & HS_INVALMASK) + goto reset_all; + + /* + * If the SCSI parity error occurs in MSG IN phase, prepare a + * MSG PARITY message. Otherwise, prepare a INITIATOR DETECTED + * ERROR message and let the device decide to retry the command + * or to terminate with check condition. If we were in MSG IN + * phase waiting for the response of a negotiation, we will + * get SIR_NEGO_FAILED at dispatch. + */ + if (!(dbc & 0xc0000000)) + phase = (dbc >> 24) & 7; + if (phase == 7) + msg = MSG_PARITY_ERROR; + else + msg = INITIATOR_ERROR; + + + /* + * If the NCR stopped on a MOVE ^ DATA_IN, we jump to a + * script that will ignore all data in bytes until phase + * change, since we are not sure the chip will wait the phase + * change prior to delivering the interrupt. + */ + if (phase == 1) + jmp = NCB_SCRIPTH_PHYS (np, par_err_data_in); + else + jmp = NCB_SCRIPTH_PHYS (np, par_err_other); + + OUTONB (nc_ctest3, CLF ); /* clear dma fifo */ + OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ + + np->msgout[0] = msg; + OUTL_DSP (jmp); + return 1; + +reset_all: + ncr_start_reset(np); + return 1; +} + +/*========================================================== +** +** +** ncr chip exception handler for phase errors. +** +** +**========================================================== +** +** We have to construct a new transfer descriptor, +** to transfer the rest of the current block. +** +**---------------------------------------------------------- +*/ + +static void ncr_int_ma (struct ncb *np) +{ + u32 dbc; + u32 rest; + u32 dsp; + u32 dsa; + u32 nxtdsp; + u32 newtmp; + u32 *vdsp; + u32 oadr, olen; + u32 *tblp; + ncrcmd *newcmd; + u_char cmd, sbcl; + struct ccb *cp; + + dsp = INL (nc_dsp); + dbc = INL (nc_dbc); + sbcl = INB (nc_sbcl); + + cmd = dbc >> 24; + rest = dbc & 0xffffff; + + /* + ** Take into account dma fifo and various buffers and latches, + ** only if the interrupted phase is an OUTPUT phase. + */ + + if ((cmd & 1) == 0) { + u_char ctest5, ss0, ss2; + u16 delta; + + ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0; + if (ctest5 & DFS) + delta=(((ctest5 << 8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff; + else + delta=(INB (nc_dfifo) - rest) & 0x7f; + + /* + ** The data in the dma fifo has not been transferred to + ** the target -> add the amount to the rest + ** and clear the data. + ** Check the sstat2 register in case of wide transfer. + */ + + rest += delta; + ss0 = INB (nc_sstat0); + if (ss0 & OLF) rest++; + if (ss0 & ORF) rest++; + if (INB(nc_scntl3) & EWS) { + ss2 = INB (nc_sstat2); + if (ss2 & OLF1) rest++; + if (ss2 & ORF1) rest++; + } + + if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) + printk ("P%x%x RL=%d D=%d SS0=%x ", cmd&7, sbcl&7, + (unsigned) rest, (unsigned) delta, ss0); + + } else { + if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) + printk ("P%x%x RL=%d ", cmd&7, sbcl&7, rest); + } + + /* + ** Clear fifos. + */ + OUTONB (nc_ctest3, CLF ); /* clear dma fifo */ + OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ + + /* + ** locate matching cp. + ** if the interrupted phase is DATA IN or DATA OUT, + ** trust the global header. + */ + dsa = INL (nc_dsa); + if (!(cmd & 6)) { + cp = np->header.cp; + if (CCB_PHYS(cp, phys) != dsa) + cp = NULL; + } else { + cp = np->ccb; + while (cp && (CCB_PHYS (cp, phys) != dsa)) + cp = cp->link_ccb; + } + + /* + ** try to find the interrupted script command, + ** and the address at which to continue. + */ + vdsp = NULL; + nxtdsp = 0; + if (dsp > np->p_script && + dsp <= np->p_script + sizeof(struct script)) { + vdsp = (u32 *)((char*)np->script0 + (dsp-np->p_script-8)); + nxtdsp = dsp; + } + else if (dsp > np->p_scripth && + dsp <= np->p_scripth + sizeof(struct scripth)) { + vdsp = (u32 *)((char*)np->scripth0 + (dsp-np->p_scripth-8)); + nxtdsp = dsp; + } + else if (cp) { + if (dsp == CCB_PHYS (cp, patch[2])) { + vdsp = &cp->patch[0]; + nxtdsp = scr_to_cpu(vdsp[3]); + } + else if (dsp == CCB_PHYS (cp, patch[6])) { + vdsp = &cp->patch[4]; + nxtdsp = scr_to_cpu(vdsp[3]); + } + } + + /* + ** log the information + */ + + if (DEBUG_FLAGS & DEBUG_PHASE) { + printk ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", + cp, np->header.cp, + (unsigned)dsp, + (unsigned)nxtdsp, vdsp, cmd); + } + + /* + ** cp=0 means that the DSA does not point to a valid control + ** block. This should not happen since we donnot use multi-byte + ** move while we are being reselected ot after command complete. + ** We are not able to recover from such a phase error. + */ + if (!cp) { + printk ("%s: SCSI phase error fixup: " + "CCB already dequeued (0x%08lx)\n", + ncr_name (np), (u_long) np->header.cp); + goto reset_all; + } + + /* + ** get old startaddress and old length. + */ + + oadr = scr_to_cpu(vdsp[1]); + + if (cmd & 0x10) { /* Table indirect */ + tblp = (u32 *) ((char*) &cp->phys + oadr); + olen = scr_to_cpu(tblp[0]); + oadr = scr_to_cpu(tblp[1]); + } else { + tblp = (u32 *) 0; + olen = scr_to_cpu(vdsp[0]) & 0xffffff; + } + + if (DEBUG_FLAGS & DEBUG_PHASE) { + printk ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", + (unsigned) (scr_to_cpu(vdsp[0]) >> 24), + tblp, + (unsigned) olen, + (unsigned) oadr); + } + + /* + ** check cmd against assumed interrupted script command. + */ + + if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) { + PRINT_ADDR(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] " + ">> 24)\n", cmd, scr_to_cpu(vdsp[0]) >> 24); + + goto reset_all; + } + + /* + ** cp != np->header.cp means that the header of the CCB + ** currently being processed has not yet been copied to + ** the global header area. That may happen if the device did + ** not accept all our messages after having been selected. + */ + if (cp != np->header.cp) { + printk ("%s: SCSI phase error fixup: " + "CCB address mismatch (0x%08lx != 0x%08lx)\n", + ncr_name (np), (u_long) cp, (u_long) np->header.cp); + } + + /* + ** if old phase not dataphase, leave here. + */ + + if (cmd & 0x06) { + PRINT_ADDR(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n", + cmd&7, sbcl&7, (unsigned)olen, + (unsigned)oadr, (unsigned)rest); + goto unexpected_phase; + } + + /* + ** choose the correct patch area. + ** if savep points to one, choose the other. + */ + + newcmd = cp->patch; + newtmp = CCB_PHYS (cp, patch); + if (newtmp == scr_to_cpu(cp->phys.header.savep)) { + newcmd = &cp->patch[4]; + newtmp = CCB_PHYS (cp, patch[4]); + } + + /* + ** fillin the commands + */ + + newcmd[0] = cpu_to_scr(((cmd & 0x0f) << 24) | rest); + newcmd[1] = cpu_to_scr(oadr + olen - rest); + newcmd[2] = cpu_to_scr(SCR_JUMP); + newcmd[3] = cpu_to_scr(nxtdsp); + + if (DEBUG_FLAGS & DEBUG_PHASE) { + PRINT_ADDR(cp->cmd, "newcmd[%d] %x %x %x %x.\n", + (int) (newcmd - cp->patch), + (unsigned)scr_to_cpu(newcmd[0]), + (unsigned)scr_to_cpu(newcmd[1]), + (unsigned)scr_to_cpu(newcmd[2]), + (unsigned)scr_to_cpu(newcmd[3])); + } + /* + ** fake the return address (to the patch). + ** and restart script processor at dispatcher. + */ + OUTL (nc_temp, newtmp); + OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch)); + return; + + /* + ** Unexpected phase changes that occurs when the current phase + ** is not a DATA IN or DATA OUT phase are due to error conditions. + ** Such event may only happen when the SCRIPTS is using a + ** multibyte SCSI MOVE. + ** + ** Phase change Some possible cause + ** + ** COMMAND --> MSG IN SCSI parity error detected by target. + ** COMMAND --> STATUS Bad command or refused by target. + ** MSG OUT --> MSG IN Message rejected by target. + ** MSG OUT --> COMMAND Bogus target that discards extended + ** negotiation messages. + ** + ** The code below does not care of the new phase and so + ** trusts the target. Why to annoy it ? + ** If the interrupted phase is COMMAND phase, we restart at + ** dispatcher. + ** If a target does not get all the messages after selection, + ** the code assumes blindly that the target discards extended + ** messages and clears the negotiation status. + ** If the target does not want all our response to negotiation, + ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids + ** bloat for such a should_not_happen situation). + ** In all other situation, we reset the BUS. + ** Are these assumptions reasonable ? (Wait and see ...) + */ +unexpected_phase: + dsp -= 8; + nxtdsp = 0; + + switch (cmd & 7) { + case 2: /* COMMAND phase */ + nxtdsp = NCB_SCRIPT_PHYS (np, dispatch); + break; +#if 0 + case 3: /* STATUS phase */ + nxtdsp = NCB_SCRIPT_PHYS (np, dispatch); + break; +#endif + case 6: /* MSG OUT phase */ + np->scripth->nxtdsp_go_on[0] = cpu_to_scr(dsp + 8); + if (dsp == NCB_SCRIPT_PHYS (np, send_ident)) { + cp->host_status = HS_BUSY; + nxtdsp = NCB_SCRIPTH_PHYS (np, clratn_go_on); + } + else if (dsp == NCB_SCRIPTH_PHYS (np, send_wdtr) || + dsp == NCB_SCRIPTH_PHYS (np, send_sdtr)) { + nxtdsp = NCB_SCRIPTH_PHYS (np, nego_bad_phase); + } + break; +#if 0 + case 7: /* MSG IN phase */ + nxtdsp = NCB_SCRIPT_PHYS (np, clrack); + break; +#endif + } + + if (nxtdsp) { + OUTL_DSP (nxtdsp); + return; + } + +reset_all: + ncr_start_reset(np); +} + + +static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp) +{ + struct scsi_cmnd *cmd = cp->cmd; + struct tcb *tp = &np->target[cmd->device->id]; + struct lcb *lp = tp->lp[cmd->device->lun]; + struct list_head *qp; + struct ccb * cp2; + int disc_cnt = 0; + int busy_cnt = 0; + u32 startp; + u_char s_status = INB (SS_PRT); + + /* + ** Let the SCRIPTS processor skip all not yet started CCBs, + ** and count disconnected CCBs. Since the busy queue is in + ** the same order as the chip start queue, disconnected CCBs + ** are before cp and busy ones after. + */ + if (lp) { + qp = lp->busy_ccbq.prev; + while (qp != &lp->busy_ccbq) { + cp2 = list_entry(qp, struct ccb, link_ccbq); + qp = qp->prev; + ++busy_cnt; + if (cp2 == cp) + break; + cp2->start.schedule.l_paddr = + cpu_to_scr(NCB_SCRIPTH_PHYS (np, skip)); + } + lp->held_ccb = cp; /* Requeue when this one completes */ + disc_cnt = lp->queuedccbs - busy_cnt; + } + + switch(s_status) { + default: /* Just for safety, should never happen */ + case SAM_STAT_TASK_SET_FULL: + /* + ** Decrease number of tags to the number of + ** disconnected commands. + */ + if (!lp) + goto out; + if (bootverbose >= 1) { + PRINT_ADDR(cmd, "QUEUE FULL! %d busy, %d disconnected " + "CCBs\n", busy_cnt, disc_cnt); + } + if (disc_cnt < lp->numtags) { + lp->numtags = disc_cnt > 2 ? disc_cnt : 2; + lp->num_good = 0; + ncr_setup_tags (np, cmd->device); + } + /* + ** Requeue the command to the start queue. + ** If any disconnected commands, + ** Clear SIGP. + ** Jump to reselect. + */ + cp->phys.header.savep = cp->startp; + cp->host_status = HS_BUSY; + cp->scsi_status = SAM_STAT_ILLEGAL; + + ncr_put_start_queue(np, cp); + if (disc_cnt) + INB (nc_ctest2); /* Clear SIGP */ + OUTL_DSP (NCB_SCRIPT_PHYS (np, reselect)); + return; + case SAM_STAT_COMMAND_TERMINATED: + case SAM_STAT_CHECK_CONDITION: + /* + ** If we were requesting sense, give up. + */ + if (cp->auto_sense) + goto out; + + /* + ** Device returned CHECK CONDITION status. + ** Prepare all needed data strutures for getting + ** sense data. + ** + ** identify message + */ + cp->scsi_smsg2[0] = IDENTIFY(0, cmd->device->lun); + cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2)); + cp->phys.smsg.size = cpu_to_scr(1); + + /* + ** sense command + */ + cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd)); + cp->phys.cmd.size = cpu_to_scr(6); + + /* + ** patch requested size into sense command + */ + cp->sensecmd[0] = 0x03; + cp->sensecmd[1] = (cmd->device->lun & 0x7) << 5; + cp->sensecmd[4] = sizeof(cp->sense_buf); + + /* + ** sense data + */ + memset(cp->sense_buf, 0, sizeof(cp->sense_buf)); + cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0])); + cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf)); + + /* + ** requeue the command. + */ + startp = cpu_to_scr(NCB_SCRIPTH_PHYS (np, sdata_in)); + + cp->phys.header.savep = startp; + cp->phys.header.goalp = startp + 24; + cp->phys.header.lastp = startp; + cp->phys.header.wgoalp = startp + 24; + cp->phys.header.wlastp = startp; + + cp->host_status = HS_BUSY; + cp->scsi_status = SAM_STAT_ILLEGAL; + cp->auto_sense = s_status; + + cp->start.schedule.l_paddr = + cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); + + /* + ** Select without ATN for quirky devices. + */ + if (cmd->device->select_no_atn) + cp->start.schedule.l_paddr = + cpu_to_scr(NCB_SCRIPTH_PHYS (np, select_no_atn)); + + ncr_put_start_queue(np, cp); + + OUTL_DSP (NCB_SCRIPT_PHYS (np, start)); + return; + } + +out: + OUTONB_STD (); + return; +} + + +/*========================================================== +** +** +** ncr chip exception handler for programmed interrupts. +** +** +**========================================================== +*/ + +void ncr_int_sir (struct ncb *np) +{ + u_char scntl3; + u_char chg, ofs, per, fak, wide; + u_char num = INB (nc_dsps); + struct ccb *cp=NULL; + u_long dsa = INL (nc_dsa); + u_char target = INB (nc_sdid) & 0x0f; + struct tcb *tp = &np->target[target]; + struct scsi_target *starget = tp->starget; + + if (DEBUG_FLAGS & DEBUG_TINY) printk ("I#%d", num); + + switch (num) { + case SIR_INTFLY: + /* + ** This is used for HP Zalon/53c720 where INTFLY + ** operation is currently broken. + */ + ncr_wakeup_done(np); +#ifdef SCSI_NCR_CCB_DONE_SUPPORT + OUTL(nc_dsp, NCB_SCRIPT_PHYS (np, done_end) + 8); +#else + OUTL(nc_dsp, NCB_SCRIPT_PHYS (np, start)); +#endif + return; + case SIR_RESEL_NO_MSG_IN: + case SIR_RESEL_NO_IDENTIFY: + /* + ** If devices reselecting without sending an IDENTIFY + ** message still exist, this should help. + ** We just assume lun=0, 1 CCB, no tag. + */ + if (tp->lp[0]) { + OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0])); + return; + } + fallthrough; + case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */ + case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */ + case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */ + case SIR_RESEL_BAD_I_T_L: /* Will send an ABORT message */ + printk ("%s:%d: SIR %d, " + "incorrect nexus identification on reselection\n", + ncr_name (np), target, num); + goto out; + case SIR_DONE_OVERFLOW: + printk ("%s:%d: SIR %d, " + "CCB done queue overflow\n", + ncr_name (np), target, num); + goto out; + case SIR_BAD_STATUS: + cp = np->header.cp; + if (!cp || CCB_PHYS (cp, phys) != dsa) + goto out; + ncr_sir_to_redo(np, num, cp); + return; + default: + /* + ** lookup the ccb + */ + cp = np->ccb; + while (cp && (CCB_PHYS (cp, phys) != dsa)) + cp = cp->link_ccb; + + BUG_ON(!cp); + BUG_ON(cp != np->header.cp); + + if (!cp || cp != np->header.cp) + goto out; + } + + switch (num) { +/*----------------------------------------------------------------------------- +** +** Was Sie schon immer ueber transfermode negotiation wissen wollten ... +** ("Everything you've always wanted to know about transfer mode +** negotiation") +** +** We try to negotiate sync and wide transfer only after +** a successful inquire command. We look at byte 7 of the +** inquire data to determine the capabilities of the target. +** +** When we try to negotiate, we append the negotiation message +** to the identify and (maybe) simple tag message. +** The host status field is set to HS_NEGOTIATE to mark this +** situation. +** +** If the target doesn't answer this message immediately +** (as required by the standard), the SIR_NEGO_FAIL interrupt +** will be raised eventually. +** The handler removes the HS_NEGOTIATE status, and sets the +** negotiated value to the default (async / nowide). +** +** If we receive a matching answer immediately, we check it +** for validity, and set the values. +** +** If we receive a Reject message immediately, we assume the +** negotiation has failed, and fall back to standard values. +** +** If we receive a negotiation message while not in HS_NEGOTIATE +** state, it's a target initiated negotiation. We prepare a +** (hopefully) valid answer, set our parameters, and send back +** this answer to the target. +** +** If the target doesn't fetch the answer (no message out phase), +** we assume the negotiation has failed, and fall back to default +** settings. +** +** When we set the values, we adjust them in all ccbs belonging +** to this target, in the controller's register, and in the "phys" +** field of the controller's struct ncb. +** +** Possible cases: hs sir msg_in value send goto +** We try to negotiate: +** -> target doesn't msgin NEG FAIL noop defa. - dispatch +** -> target rejected our msg NEG FAIL reject defa. - dispatch +** -> target answered (ok) NEG SYNC sdtr set - clrack +** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad +** -> target answered (ok) NEG WIDE wdtr set - clrack +** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad +** -> any other msgin NEG FAIL noop defa. - dispatch +** +** Target tries to negotiate: +** -> incoming message --- SYNC sdtr set SDTR - +** -> incoming message --- WIDE wdtr set WDTR - +** We sent our answer: +** -> target doesn't msgout --- PROTO ? defa. - dispatch +** +**----------------------------------------------------------------------------- +*/ + + case SIR_NEGO_FAILED: + /*------------------------------------------------------- + ** + ** Negotiation failed. + ** Target doesn't send an answer message, + ** or target rejected our message. + ** + ** Remove negotiation request. + ** + **------------------------------------------------------- + */ + OUTB (HS_PRT, HS_BUSY); + + fallthrough; + + case SIR_NEGO_PROTO: + /*------------------------------------------------------- + ** + ** Negotiation failed. + ** Target doesn't fetch the answer message. + ** + **------------------------------------------------------- + */ + + if (DEBUG_FLAGS & DEBUG_NEGO) { + PRINT_ADDR(cp->cmd, "negotiation failed sir=%x " + "status=%x.\n", num, cp->nego_status); + } + + /* + ** any error in negotiation: + ** fall back to default mode. + */ + switch (cp->nego_status) { + + case NS_SYNC: + spi_period(starget) = 0; + spi_offset(starget) = 0; + ncr_setsync (np, cp, 0, 0xe0); + break; + + case NS_WIDE: + spi_width(starget) = 0; + ncr_setwide (np, cp, 0, 0); + break; + + } + np->msgin [0] = NOP; + np->msgout[0] = NOP; + cp->nego_status = 0; + break; + + case SIR_NEGO_SYNC: + if (DEBUG_FLAGS & DEBUG_NEGO) { + ncr_print_msg(cp, "sync msgin", np->msgin); + } + + chg = 0; + per = np->msgin[3]; + ofs = np->msgin[4]; + if (ofs==0) per=255; + + /* + ** if target sends SDTR message, + ** it CAN transfer synch. + */ + + if (ofs && starget) + spi_support_sync(starget) = 1; + + /* + ** check values against driver limits. + */ + + if (per < np->minsync) + {chg = 1; per = np->minsync;} + if (per < tp->minsync) + {chg = 1; per = tp->minsync;} + if (ofs > tp->maxoffs) + {chg = 1; ofs = tp->maxoffs;} + + /* + ** Check against controller limits. + */ + fak = 7; + scntl3 = 0; + if (ofs != 0) { + ncr_getsync(np, per, &fak, &scntl3); + if (fak > 7) { + chg = 1; + ofs = 0; + } + } + if (ofs == 0) { + fak = 7; + per = 0; + scntl3 = 0; + tp->minsync = 0; + } + + if (DEBUG_FLAGS & DEBUG_NEGO) { + PRINT_ADDR(cp->cmd, "sync: per=%d scntl3=0x%x ofs=%d " + "fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg); + } + + if (INB (HS_PRT) == HS_NEGOTIATE) { + OUTB (HS_PRT, HS_BUSY); + switch (cp->nego_status) { + + case NS_SYNC: + /* This was an answer message */ + if (chg) { + /* Answer wasn't acceptable. */ + spi_period(starget) = 0; + spi_offset(starget) = 0; + ncr_setsync(np, cp, 0, 0xe0); + OUTL_DSP(NCB_SCRIPT_PHYS (np, msg_bad)); + } else { + /* Answer is ok. */ + spi_period(starget) = per; + spi_offset(starget) = ofs; + ncr_setsync(np, cp, scntl3, (fak<<5)|ofs); + OUTL_DSP(NCB_SCRIPT_PHYS (np, clrack)); + } + return; + + case NS_WIDE: + spi_width(starget) = 0; + ncr_setwide(np, cp, 0, 0); + break; + } + } + + /* + ** It was a request. Set value and + ** prepare an answer message + */ + + spi_period(starget) = per; + spi_offset(starget) = ofs; + ncr_setsync(np, cp, scntl3, (fak<<5)|ofs); + + spi_populate_sync_msg(np->msgout, per, ofs); + cp->nego_status = NS_SYNC; + + if (DEBUG_FLAGS & DEBUG_NEGO) { + ncr_print_msg(cp, "sync msgout", np->msgout); + } + + if (!ofs) { + OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad)); + return; + } + np->msgin [0] = NOP; + + break; + + case SIR_NEGO_WIDE: + /* + ** Wide request message received. + */ + if (DEBUG_FLAGS & DEBUG_NEGO) { + ncr_print_msg(cp, "wide msgin", np->msgin); + } + + /* + ** get requested values. + */ + + chg = 0; + wide = np->msgin[3]; + + /* + ** if target sends WDTR message, + ** it CAN transfer wide. + */ + + if (wide && starget) + spi_support_wide(starget) = 1; + + /* + ** check values against driver limits. + */ + + if (wide > tp->usrwide) + {chg = 1; wide = tp->usrwide;} + + if (DEBUG_FLAGS & DEBUG_NEGO) { + PRINT_ADDR(cp->cmd, "wide: wide=%d chg=%d.\n", wide, + chg); + } + + if (INB (HS_PRT) == HS_NEGOTIATE) { + OUTB (HS_PRT, HS_BUSY); + switch (cp->nego_status) { + + case NS_WIDE: + /* + ** This was an answer message + */ + if (chg) { + /* Answer wasn't acceptable. */ + spi_width(starget) = 0; + ncr_setwide(np, cp, 0, 1); + OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad)); + } else { + /* Answer is ok. */ + spi_width(starget) = wide; + ncr_setwide(np, cp, wide, 1); + OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack)); + } + return; + + case NS_SYNC: + spi_period(starget) = 0; + spi_offset(starget) = 0; + ncr_setsync(np, cp, 0, 0xe0); + break; + } + } + + /* + ** It was a request, set value and + ** prepare an answer message + */ + + spi_width(starget) = wide; + ncr_setwide(np, cp, wide, 1); + spi_populate_width_msg(np->msgout, wide); + + np->msgin [0] = NOP; + + cp->nego_status = NS_WIDE; + + if (DEBUG_FLAGS & DEBUG_NEGO) { + ncr_print_msg(cp, "wide msgout", np->msgin); + } + break; + +/*-------------------------------------------------------------------- +** +** Processing of special messages +** +**-------------------------------------------------------------------- +*/ + + case SIR_REJECT_RECEIVED: + /*----------------------------------------------- + ** + ** We received a MESSAGE_REJECT. + ** + **----------------------------------------------- + */ + + PRINT_ADDR(cp->cmd, "MESSAGE_REJECT received (%x:%x).\n", + (unsigned)scr_to_cpu(np->lastmsg), np->msgout[0]); + break; + + case SIR_REJECT_SENT: + /*----------------------------------------------- + ** + ** We received an unknown message + ** + **----------------------------------------------- + */ + + ncr_print_msg(cp, "MESSAGE_REJECT sent for", np->msgin); + break; + +/*-------------------------------------------------------------------- +** +** Processing of special messages +** +**-------------------------------------------------------------------- +*/ + + case SIR_IGN_RESIDUE: + /*----------------------------------------------- + ** + ** We received an IGNORE RESIDUE message, + ** which couldn't be handled by the script. + ** + **----------------------------------------------- + */ + + PRINT_ADDR(cp->cmd, "IGNORE_WIDE_RESIDUE received, but not yet " + "implemented.\n"); + break; +#if 0 + case SIR_MISSING_SAVE: + /*----------------------------------------------- + ** + ** We received an DISCONNECT message, + ** but the datapointer wasn't saved before. + ** + **----------------------------------------------- + */ + + PRINT_ADDR(cp->cmd, "DISCONNECT received, but datapointer " + "not saved: data=%x save=%x goal=%x.\n", + (unsigned) INL (nc_temp), + (unsigned) scr_to_cpu(np->header.savep), + (unsigned) scr_to_cpu(np->header.goalp)); + break; +#endif + } + +out: + OUTONB_STD (); +} + +/*========================================================== +** +** +** Acquire a control block +** +** +**========================================================== +*/ + +static struct ccb *ncr_get_ccb(struct ncb *np, struct scsi_cmnd *cmd) +{ + u_char tn = cmd->device->id; + u_char ln = cmd->device->lun; + struct tcb *tp = &np->target[tn]; + struct lcb *lp = tp->lp[ln]; + u_char tag = NO_TAG; + struct ccb *cp = NULL; + + /* + ** Lun structure available ? + */ + if (lp) { + struct list_head *qp; + /* + ** Keep from using more tags than we can handle. + */ + if (lp->usetags && lp->busyccbs >= lp->maxnxs) + return NULL; + + /* + ** Allocate a new CCB if needed. + */ + if (list_empty(&lp->free_ccbq)) + ncr_alloc_ccb(np, tn, ln); + + /* + ** Look for free CCB + */ + qp = ncr_list_pop(&lp->free_ccbq); + if (qp) { + cp = list_entry(qp, struct ccb, link_ccbq); + if (cp->magic) { + PRINT_ADDR(cmd, "ccb free list corrupted " + "(@%p)\n", cp); + cp = NULL; + } else { + list_add_tail(qp, &lp->wait_ccbq); + ++lp->busyccbs; + } + } + + /* + ** If a CCB is available, + ** Get a tag for this nexus if required. + */ + if (cp) { + if (lp->usetags) + tag = lp->cb_tags[lp->ia_tag]; + } + else if (lp->actccbs > 0) + return NULL; + } + + /* + ** if nothing available, take the default. + */ + if (!cp) + cp = np->ccb; + + /* + ** Wait until available. + */ +#if 0 + while (cp->magic) { + if (flags & SCSI_NOSLEEP) break; + if (tsleep ((caddr_t)cp, PRIBIO|PCATCH, "ncr", 0)) + break; + } +#endif + + if (cp->magic) + return NULL; + + cp->magic = 1; + + /* + ** Move to next available tag if tag used. + */ + if (lp) { + if (tag != NO_TAG) { + ++lp->ia_tag; + if (lp->ia_tag == MAX_TAGS) + lp->ia_tag = 0; + lp->tags_umap |= (((tagmap_t) 1) << tag); + } + } + + /* + ** Remember all informations needed to free this CCB. + */ + cp->tag = tag; + cp->target = tn; + cp->lun = ln; + + if (DEBUG_FLAGS & DEBUG_TAGS) { + PRINT_ADDR(cmd, "ccb @%p using tag %d.\n", cp, tag); + } + + return cp; +} + +/*========================================================== +** +** +** Release one control block +** +** +**========================================================== +*/ + +static void ncr_free_ccb (struct ncb *np, struct ccb *cp) +{ + struct tcb *tp = &np->target[cp->target]; + struct lcb *lp = tp->lp[cp->lun]; + + if (DEBUG_FLAGS & DEBUG_TAGS) { + PRINT_ADDR(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag); + } + + /* + ** If lun control block available, + ** decrement active commands and increment credit, + ** free the tag if any and remove the JUMP for reselect. + */ + if (lp) { + if (cp->tag != NO_TAG) { + lp->cb_tags[lp->if_tag++] = cp->tag; + if (lp->if_tag == MAX_TAGS) + lp->if_tag = 0; + lp->tags_umap &= ~(((tagmap_t) 1) << cp->tag); + lp->tags_smap &= lp->tags_umap; + lp->jump_ccb[cp->tag] = + cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l_q)); + } else { + lp->jump_ccb[0] = + cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l)); + } + } + + /* + ** Make this CCB available. + */ + + if (lp) { + if (cp != np->ccb) + list_move(&cp->link_ccbq, &lp->free_ccbq); + --lp->busyccbs; + if (cp->queued) { + --lp->queuedccbs; + } + } + cp -> host_status = HS_IDLE; + cp -> magic = 0; + if (cp->queued) { + --np->queuedccbs; + cp->queued = 0; + } + +#if 0 + if (cp == np->ccb) + wakeup ((caddr_t) cp); +#endif +} + + +#define ncr_reg_bus_addr(r) (np->paddr + offsetof (struct ncr_reg, r)) + +/*------------------------------------------------------------------------ +** Initialize the fixed part of a CCB structure. +**------------------------------------------------------------------------ +**------------------------------------------------------------------------ +*/ +static void ncr_init_ccb(struct ncb *np, struct ccb *cp) +{ + ncrcmd copy_4 = np->features & FE_PFEN ? SCR_COPY(4) : SCR_COPY_F(4); + + /* + ** Remember virtual and bus address of this ccb. + */ + cp->p_ccb = vtobus(cp); + cp->phys.header.cp = cp; + + /* + ** This allows list_del to work for the default ccb. + */ + INIT_LIST_HEAD(&cp->link_ccbq); + + /* + ** Initialyze the start and restart launch script. + ** + ** COPY(4) @(...p_phys), @(dsa) + ** JUMP @(sched_point) + */ + cp->start.setup_dsa[0] = cpu_to_scr(copy_4); + cp->start.setup_dsa[1] = cpu_to_scr(CCB_PHYS(cp, start.p_phys)); + cp->start.setup_dsa[2] = cpu_to_scr(ncr_reg_bus_addr(nc_dsa)); + cp->start.schedule.l_cmd = cpu_to_scr(SCR_JUMP); + cp->start.p_phys = cpu_to_scr(CCB_PHYS(cp, phys)); + + memcpy(&cp->restart, &cp->start, sizeof(cp->restart)); + + cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); + cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort)); +} + + +/*------------------------------------------------------------------------ +** Allocate a CCB and initialize its fixed part. +**------------------------------------------------------------------------ +**------------------------------------------------------------------------ +*/ +static void ncr_alloc_ccb(struct ncb *np, u_char tn, u_char ln) +{ + struct tcb *tp = &np->target[tn]; + struct lcb *lp = tp->lp[ln]; + struct ccb *cp = NULL; + + /* + ** Allocate memory for this CCB. + */ + cp = m_calloc_dma(sizeof(struct ccb), "CCB"); + if (!cp) + return; + + /* + ** Count it and initialyze it. + */ + lp->actccbs++; + np->actccbs++; + memset(cp, 0, sizeof (*cp)); + ncr_init_ccb(np, cp); + + /* + ** Chain into wakeup list and free ccb queue and take it + ** into account for tagged commands. + */ + cp->link_ccb = np->ccb->link_ccb; + np->ccb->link_ccb = cp; + + list_add(&cp->link_ccbq, &lp->free_ccbq); +} + +/*========================================================== +** +** +** Allocation of resources for Targets/Luns/Tags. +** +** +**========================================================== +*/ + + +/*------------------------------------------------------------------------ +** Target control block initialisation. +**------------------------------------------------------------------------ +** This data structure is fully initialized after a SCSI command +** has been successfully completed for this target. +** It contains a SCRIPT that is called on target reselection. +**------------------------------------------------------------------------ +*/ +static void ncr_init_tcb (struct ncb *np, u_char tn) +{ + struct tcb *tp = &np->target[tn]; + ncrcmd copy_1 = np->features & FE_PFEN ? SCR_COPY(1) : SCR_COPY_F(1); + int th = tn & 3; + int i; + + /* + ** Jump to next tcb if SFBR does not match this target. + ** JUMP IF (SFBR != #target#), @(next tcb) + */ + tp->jump_tcb.l_cmd = + cpu_to_scr((SCR_JUMP ^ IFFALSE (DATA (0x80 + tn)))); + tp->jump_tcb.l_paddr = np->jump_tcb[th].l_paddr; + + /* + ** Load the synchronous transfer register. + ** COPY @(tp->sval), @(sxfer) + */ + tp->getscr[0] = cpu_to_scr(copy_1); + tp->getscr[1] = cpu_to_scr(vtobus (&tp->sval)); +#ifdef SCSI_NCR_BIG_ENDIAN + tp->getscr[2] = cpu_to_scr(ncr_reg_bus_addr(nc_sxfer) ^ 3); +#else + tp->getscr[2] = cpu_to_scr(ncr_reg_bus_addr(nc_sxfer)); +#endif + + /* + ** Load the timing register. + ** COPY @(tp->wval), @(scntl3) + */ + tp->getscr[3] = cpu_to_scr(copy_1); + tp->getscr[4] = cpu_to_scr(vtobus (&tp->wval)); +#ifdef SCSI_NCR_BIG_ENDIAN + tp->getscr[5] = cpu_to_scr(ncr_reg_bus_addr(nc_scntl3) ^ 3); +#else + tp->getscr[5] = cpu_to_scr(ncr_reg_bus_addr(nc_scntl3)); +#endif + + /* + ** Get the IDENTIFY message and the lun. + ** CALL @script(resel_lun) + */ + tp->call_lun.l_cmd = cpu_to_scr(SCR_CALL); + tp->call_lun.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_lun)); + + /* + ** Look for the lun control block of this nexus. + ** For i = 0 to 3 + ** JUMP ^ IFTRUE (MASK (i, 3)), @(next_lcb) + */ + for (i = 0 ; i < 4 ; i++) { + tp->jump_lcb[i].l_cmd = + cpu_to_scr((SCR_JUMP ^ IFTRUE (MASK (i, 3)))); + tp->jump_lcb[i].l_paddr = + cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_identify)); + } + + /* + ** Link this target control block to the JUMP chain. + */ + np->jump_tcb[th].l_paddr = cpu_to_scr(vtobus (&tp->jump_tcb)); + + /* + ** These assert's should be moved at driver initialisations. + */ +#ifdef SCSI_NCR_BIG_ENDIAN + BUG_ON(((offsetof(struct ncr_reg, nc_sxfer) ^ + offsetof(struct tcb , sval )) &3) != 3); + BUG_ON(((offsetof(struct ncr_reg, nc_scntl3) ^ + offsetof(struct tcb , wval )) &3) != 3); +#else + BUG_ON(((offsetof(struct ncr_reg, nc_sxfer) ^ + offsetof(struct tcb , sval )) &3) != 0); + BUG_ON(((offsetof(struct ncr_reg, nc_scntl3) ^ + offsetof(struct tcb , wval )) &3) != 0); +#endif +} + + +/*------------------------------------------------------------------------ +** Lun control block allocation and initialization. +**------------------------------------------------------------------------ +** This data structure is allocated and initialized after a SCSI +** command has been successfully completed for this target/lun. +**------------------------------------------------------------------------ +*/ +static struct lcb *ncr_alloc_lcb (struct ncb *np, u_char tn, u_char ln) +{ + struct tcb *tp = &np->target[tn]; + struct lcb *lp = tp->lp[ln]; + ncrcmd copy_4 = np->features & FE_PFEN ? SCR_COPY(4) : SCR_COPY_F(4); + int lh = ln & 3; + + /* + ** Already done, return. + */ + if (lp) + return lp; + + /* + ** Allocate the lcb. + */ + lp = m_calloc_dma(sizeof(struct lcb), "LCB"); + if (!lp) + goto fail; + memset(lp, 0, sizeof(*lp)); + tp->lp[ln] = lp; + + /* + ** Initialize the target control block if not yet. + */ + if (!tp->jump_tcb.l_cmd) + ncr_init_tcb(np, tn); + + /* + ** Initialize the CCB queue headers. + */ + INIT_LIST_HEAD(&lp->free_ccbq); + INIT_LIST_HEAD(&lp->busy_ccbq); + INIT_LIST_HEAD(&lp->wait_ccbq); + INIT_LIST_HEAD(&lp->skip_ccbq); + + /* + ** Set max CCBs to 1 and use the default 1 entry + ** jump table by default. + */ + lp->maxnxs = 1; + lp->jump_ccb = &lp->jump_ccb_0; + lp->p_jump_ccb = cpu_to_scr(vtobus(lp->jump_ccb)); + + /* + ** Initilialyze the reselect script: + ** + ** Jump to next lcb if SFBR does not match this lun. + ** Load TEMP with the CCB direct jump table bus address. + ** Get the SIMPLE TAG message and the tag. + ** + ** JUMP IF (SFBR != #lun#), @(next lcb) + ** COPY @(lp->p_jump_ccb), @(temp) + ** JUMP @script(resel_notag) + */ + lp->jump_lcb.l_cmd = + cpu_to_scr((SCR_JUMP ^ IFFALSE (MASK (0x80+ln, 0xff)))); + lp->jump_lcb.l_paddr = tp->jump_lcb[lh].l_paddr; + + lp->load_jump_ccb[0] = cpu_to_scr(copy_4); + lp->load_jump_ccb[1] = cpu_to_scr(vtobus (&lp->p_jump_ccb)); + lp->load_jump_ccb[2] = cpu_to_scr(ncr_reg_bus_addr(nc_temp)); + + lp->jump_tag.l_cmd = cpu_to_scr(SCR_JUMP); + lp->jump_tag.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_notag)); + + /* + ** Link this lun control block to the JUMP chain. + */ + tp->jump_lcb[lh].l_paddr = cpu_to_scr(vtobus (&lp->jump_lcb)); + + /* + ** Initialize command queuing control. + */ + lp->busyccbs = 1; + lp->queuedccbs = 1; + lp->queuedepth = 1; +fail: + return lp; +} + + +/*------------------------------------------------------------------------ +** Lun control block setup on INQUIRY data received. +**------------------------------------------------------------------------ +** We only support WIDE, SYNC for targets and CMDQ for logical units. +** This setup is done on each INQUIRY since we are expecting user +** will play with CHANGE DEFINITION commands. :-) +**------------------------------------------------------------------------ +*/ +static struct lcb *ncr_setup_lcb (struct ncb *np, struct scsi_device *sdev) +{ + unsigned char tn = sdev->id, ln = sdev->lun; + struct tcb *tp = &np->target[tn]; + struct lcb *lp = tp->lp[ln]; + + /* If no lcb, try to allocate it. */ + if (!lp && !(lp = ncr_alloc_lcb(np, tn, ln))) + goto fail; + + /* + ** If unit supports tagged commands, allocate the + ** CCB JUMP table if not yet. + */ + if (sdev->tagged_supported && lp->jump_ccb == &lp->jump_ccb_0) { + int i; + lp->jump_ccb = m_calloc_dma(256, "JUMP_CCB"); + if (!lp->jump_ccb) { + lp->jump_ccb = &lp->jump_ccb_0; + goto fail; + } + lp->p_jump_ccb = cpu_to_scr(vtobus(lp->jump_ccb)); + for (i = 0 ; i < 64 ; i++) + lp->jump_ccb[i] = + cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l_q)); + for (i = 0 ; i < MAX_TAGS ; i++) + lp->cb_tags[i] = i; + lp->maxnxs = MAX_TAGS; + lp->tags_stime = jiffies + 3*HZ; + ncr_setup_tags (np, sdev); + } + + +fail: + return lp; +} + +/*========================================================== +** +** +** Build Scatter Gather Block +** +** +**========================================================== +** +** The transfer area may be scattered among +** several non adjacent physical pages. +** +** We may use MAX_SCATTER blocks. +** +**---------------------------------------------------------- +*/ + +/* +** We try to reduce the number of interrupts caused +** by unexpected phase changes due to disconnects. +** A typical harddisk may disconnect before ANY block. +** If we wanted to avoid unexpected phase changes at all +** we had to use a break point every 512 bytes. +** Of course the number of scatter/gather blocks is +** limited. +** Under Linux, the scatter/gatter blocks are provided by +** the generic driver. We just have to copy addresses and +** sizes to the data segment array. +*/ + +static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd) +{ + int segment = 0; + int use_sg = scsi_sg_count(cmd); + + cp->data_len = 0; + + use_sg = map_scsi_sg_data(np, cmd); + if (use_sg > 0) { + struct scatterlist *sg; + struct scr_tblmove *data; + + if (use_sg > MAX_SCATTER) { + unmap_scsi_data(np, cmd); + return -1; + } + + data = &cp->phys.data[MAX_SCATTER - use_sg]; + + scsi_for_each_sg(cmd, sg, use_sg, segment) { + dma_addr_t baddr = sg_dma_address(sg); + unsigned int len = sg_dma_len(sg); + + ncr_build_sge(np, &data[segment], baddr, len); + cp->data_len += len; + } + } else + segment = -2; + + return segment; +} + +/*========================================================== +** +** +** Test the bus snoop logic :-( +** +** Has to be called with interrupts disabled. +** +** +**========================================================== +*/ + +static int __init ncr_regtest (struct ncb* np) +{ + register volatile u32 data; + /* + ** ncr registers may NOT be cached. + ** write 0xffffffff to a read only register area, + ** and try to read it back. + */ + data = 0xffffffff; + OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data); + data = INL_OFF(offsetof(struct ncr_reg, nc_dstat)); +#if 1 + if (data == 0xffffffff) { +#else + if ((data & 0xe2f0fffd) != 0x02000080) { +#endif + printk ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", + (unsigned) data); + return (0x10); + } + return (0); +} + +static int __init ncr_snooptest (struct ncb* np) +{ + u32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; + int i, err=0; + if (np->reg) { + err |= ncr_regtest (np); + if (err) + return (err); + } + + /* init */ + pc = NCB_SCRIPTH_PHYS (np, snooptest); + host_wr = 1; + ncr_wr = 2; + /* + ** Set memory and register. + */ + np->ncr_cache = cpu_to_scr(host_wr); + OUTL (nc_temp, ncr_wr); + /* + ** Start script (exchange values) + */ + OUTL_DSP (pc); + /* + ** Wait 'til done (with timeout) + */ + for (i=0; incr_cache); + ncr_rd = INL (nc_scratcha); + ncr_bk = INL (nc_temp); + /* + ** Reset ncr chip + */ + ncr_chip_reset(np, 100); + /* + ** check for timeout + */ + if (i>=NCR_SNOOP_TIMEOUT) { + printk ("CACHE TEST FAILED: timeout.\n"); + return (0x20); + } + /* + ** Check termination position. + */ + if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) { + printk ("CACHE TEST FAILED: script execution failed.\n"); + printk ("start=%08lx, pc=%08lx, end=%08lx\n", + (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc, + (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8); + return (0x40); + } + /* + ** Show results. + */ + if (host_wr != ncr_rd) { + printk ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", + (int) host_wr, (int) ncr_rd); + err |= 1; + } + if (host_rd != ncr_wr) { + printk ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", + (int) ncr_wr, (int) host_rd); + err |= 2; + } + if (ncr_bk != ncr_wr) { + printk ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", + (int) ncr_wr, (int) ncr_bk); + err |= 4; + } + return (err); +} + +/*========================================================== +** +** Determine the ncr's clock frequency. +** This is essential for the negotiation +** of the synchronous transfer rate. +** +**========================================================== +** +** Note: we have to return the correct value. +** THERE IS NO SAFE DEFAULT VALUE. +** +** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. +** 53C860 and 53C875 rev. 1 support fast20 transfers but +** do not have a clock doubler and so are provided with a +** 80 MHz clock. All other fast20 boards incorporate a doubler +** and so should be delivered with a 40 MHz clock. +** The future fast40 chips (895/895) use a 40 Mhz base clock +** and provide a clock quadrupler (160 Mhz). The code below +** tries to deal as cleverly as possible with all this stuff. +** +**---------------------------------------------------------- +*/ + +/* + * Select NCR SCSI clock frequency + */ +static void ncr_selectclock(struct ncb *np, u_char scntl3) +{ + if (np->multiplier < 2) { + OUTB(nc_scntl3, scntl3); + return; + } + + if (bootverbose >= 2) + printk ("%s: enabling clock multiplier\n", ncr_name(np)); + + OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ + if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */ + int i = 20; + while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) + udelay(20); + if (!i) + printk("%s: the chip cannot lock the frequency\n", ncr_name(np)); + } else /* Wait 20 micro-seconds for doubler */ + udelay(20); + OUTB(nc_stest3, HSC); /* Halt the scsi clock */ + OUTB(nc_scntl3, scntl3); + OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ + OUTB(nc_stest3, 0x00); /* Restart scsi clock */ +} + + +/* + * calculate NCR SCSI clock frequency (in KHz) + */ +static unsigned __init ncrgetfreq (struct ncb *np, int gen) +{ + unsigned ms = 0; + char count = 0; + + /* + * Measure GEN timer delay in order + * to calculate SCSI clock frequency + * + * This code will never execute too + * many loop iterations (if DELAY is + * reasonably correct). It could get + * too low a delay (too high a freq.) + * if the CPU is slow executing the + * loop for some reason (an NMI, for + * example). For this reason we will + * if multiple measurements are to be + * performed trust the higher delay + * (lower frequency returned). + */ + OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */ + OUTW (nc_sien , 0); /* mask all scsi interrupts */ + (void) INW (nc_sist); /* clear pending scsi interrupt */ + OUTB (nc_dien , 0); /* mask all dma interrupts */ + (void) INW (nc_sist); /* another one, just to be sure :) */ + OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ + OUTB (nc_stime1, 0); /* disable general purpose timer */ + OUTB (nc_stime1, gen); /* set to nominal delay of 1<= 2) + printk ("%s: Delay (GEN=%d): %u msec\n", ncr_name(np), gen, ms); + /* + * adjust for prescaler, and convert into KHz + */ + return ms ? ((1 << gen) * 4340) / ms : 0; +} + +/* + * Get/probe NCR SCSI clock frequency + */ +static void __init ncr_getclock (struct ncb *np, int mult) +{ + unsigned char scntl3 = INB(nc_scntl3); + unsigned char stest1 = INB(nc_stest1); + unsigned f1; + + np->multiplier = 1; + f1 = 40000; + + /* + ** True with 875 or 895 with clock multiplier selected + */ + if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { + if (bootverbose >= 2) + printk ("%s: clock multiplier found\n", ncr_name(np)); + np->multiplier = mult; + } + + /* + ** If multiplier not found or scntl3 not 7,5,3, + ** reset chip and get frequency from general purpose timer. + ** Otherwise trust scntl3 BIOS setting. + */ + if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { + unsigned f2; + + ncr_chip_reset(np, 5); + + (void) ncrgetfreq (np, 11); /* throw away first result */ + f1 = ncrgetfreq (np, 11); + f2 = ncrgetfreq (np, 11); + + if(bootverbose) + printk ("%s: NCR clock is %uKHz, %uKHz\n", ncr_name(np), f1, f2); + + if (f1 > f2) f1 = f2; /* trust lower result */ + + if (f1 < 45000) f1 = 40000; + else if (f1 < 55000) f1 = 50000; + else f1 = 80000; + + if (f1 < 80000 && mult > 1) { + if (bootverbose >= 2) + printk ("%s: clock multiplier assumed\n", ncr_name(np)); + np->multiplier = mult; + } + } else { + if ((scntl3 & 7) == 3) f1 = 40000; + else if ((scntl3 & 7) == 5) f1 = 80000; + else f1 = 160000; + + f1 /= np->multiplier; + } + + /* + ** Compute controller synchronous parameters. + */ + f1 *= np->multiplier; + np->clock_khz = f1; +} + +/*===================== LINUX ENTRY POINTS SECTION ==========================*/ + +static int ncr53c8xx_slave_alloc(struct scsi_device *device) +{ + struct Scsi_Host *host = device->host; + struct ncb *np = ((struct host_data *) host->hostdata)->ncb; + struct tcb *tp = &np->target[device->id]; + tp->starget = device->sdev_target; + + return 0; +} + +static int ncr53c8xx_slave_configure(struct scsi_device *device) +{ + struct Scsi_Host *host = device->host; + struct ncb *np = ((struct host_data *) host->hostdata)->ncb; + struct tcb *tp = &np->target[device->id]; + struct lcb *lp = tp->lp[device->lun]; + int numtags, depth_to_use; + + ncr_setup_lcb(np, device); + + /* + ** Select queue depth from driver setup. + ** Donnot use more than configured by user. + ** Use at least 2. + ** Donnot use more than our maximum. + */ + numtags = device_queue_depth(np->unit, device->id, device->lun); + if (numtags > tp->usrtags) + numtags = tp->usrtags; + if (!device->tagged_supported) + numtags = 1; + depth_to_use = numtags; + if (depth_to_use < 2) + depth_to_use = 2; + if (depth_to_use > MAX_TAGS) + depth_to_use = MAX_TAGS; + + scsi_change_queue_depth(device, depth_to_use); + + /* + ** Since the queue depth is not tunable under Linux, + ** we need to know this value in order not to + ** announce stupid things to user. + ** + ** XXX(hch): As of Linux 2.6 it certainly _is_ tunable.. + ** In fact we just tuned it, or did I miss + ** something important? :) + */ + if (lp) { + lp->numtags = lp->maxtags = numtags; + lp->scdev_depth = depth_to_use; + } + ncr_setup_tags (np, device); + +#ifdef DEBUG_NCR53C8XX + printk("ncr53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n", + np->unit, device->id, device->lun, depth_to_use); +#endif + + if (spi_support_sync(device->sdev_target) && + !spi_initial_dv(device->sdev_target)) + spi_dv_device(device); + return 0; +} + +static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd) +{ + struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd); + void (*done)(struct scsi_cmnd *) = scsi_done; + struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; + unsigned long flags; + int sts; + +#ifdef DEBUG_NCR53C8XX +printk("ncr53c8xx_queue_command\n"); +#endif + + cmd->host_scribble = NULL; + cmd_priv->data_mapped = 0; + cmd_priv->data_mapping = 0; + + spin_lock_irqsave(&np->smp_lock, flags); + + if ((sts = ncr_queue_command(np, cmd)) != DID_OK) { + set_host_byte(cmd, sts); +#ifdef DEBUG_NCR53C8XX +printk("ncr53c8xx : command not queued - result=%d\n", sts); +#endif + } +#ifdef DEBUG_NCR53C8XX + else +printk("ncr53c8xx : command successfully queued\n"); +#endif + + spin_unlock_irqrestore(&np->smp_lock, flags); + + if (sts != DID_OK) { + unmap_scsi_data(np, cmd); + done(cmd); + sts = 0; + } + + return sts; +} + +static DEF_SCSI_QCMD(ncr53c8xx_queue_command) + +irqreturn_t ncr53c8xx_intr(int irq, void *dev_id) +{ + unsigned long flags; + struct Scsi_Host *shost = (struct Scsi_Host *)dev_id; + struct host_data *host_data = (struct host_data *)shost->hostdata; + struct ncb *np = host_data->ncb; + struct scsi_cmnd *done_list; + +#ifdef DEBUG_NCR53C8XX + printk("ncr53c8xx : interrupt received\n"); +#endif + + if (DEBUG_FLAGS & DEBUG_TINY) printk ("["); + + spin_lock_irqsave(&np->smp_lock, flags); + ncr_exception(np); + done_list = np->done_list; + np->done_list = NULL; + spin_unlock_irqrestore(&np->smp_lock, flags); + + if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n"); + + if (done_list) + ncr_flush_done_cmds(done_list); + return IRQ_HANDLED; +} + +static void ncr53c8xx_timeout(struct timer_list *t) +{ + struct ncb *np = from_timer(np, t, timer); + unsigned long flags; + struct scsi_cmnd *done_list; + + spin_lock_irqsave(&np->smp_lock, flags); + ncr_timeout(np); + done_list = np->done_list; + np->done_list = NULL; + spin_unlock_irqrestore(&np->smp_lock, flags); + + if (done_list) + ncr_flush_done_cmds(done_list); +} + +static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd) +{ + struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; + int sts; + unsigned long flags; + struct scsi_cmnd *done_list; + + /* + * If the mid-level driver told us reset is synchronous, it seems + * that we must call the done() callback for the involved command, + * even if this command was not queued to the low-level driver, + * before returning SUCCESS. + */ + + spin_lock_irqsave(&np->smp_lock, flags); + sts = ncr_reset_bus(np); + + done_list = np->done_list; + np->done_list = NULL; + spin_unlock_irqrestore(&np->smp_lock, flags); + + ncr_flush_done_cmds(done_list); + + return sts; +} + + +/* +** Scsi command waiting list management. +** +** It may happen that we cannot insert a scsi command into the start queue, +** in the following circumstances. +** Too few preallocated ccb(s), +** maxtags < cmd_per_lun of the Linux host control block, +** etc... +** Such scsi commands are inserted into a waiting list. +** When a scsi command complete, we try to requeue the commands of the +** waiting list. +*/ + +#define next_wcmd host_scribble + +static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd) +{ + struct scsi_cmnd *wcmd; + +#ifdef DEBUG_WAITING_LIST + printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd); +#endif + cmd->next_wcmd = NULL; + if (!(wcmd = np->waiting_list)) np->waiting_list = cmd; + else { + while (wcmd->next_wcmd) + wcmd = (struct scsi_cmnd *) wcmd->next_wcmd; + wcmd->next_wcmd = (char *) cmd; + } +} + +static void process_waiting_list(struct ncb *np, int sts) +{ + struct scsi_cmnd *waiting_list, *wcmd; + + waiting_list = np->waiting_list; + np->waiting_list = NULL; + +#ifdef DEBUG_WAITING_LIST + if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts); +#endif + while ((wcmd = waiting_list) != NULL) { + waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd; + wcmd->next_wcmd = NULL; + if (sts == DID_OK) { +#ifdef DEBUG_WAITING_LIST + printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd); +#endif + sts = ncr_queue_command(np, wcmd); + } + if (sts != DID_OK) { +#ifdef DEBUG_WAITING_LIST + printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts); +#endif + set_host_byte(wcmd, sts); + ncr_queue_done_cmd(np, wcmd); + } + } +} + +#undef next_wcmd + +static ssize_t show_ncr53c8xx_revision(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + struct host_data *host_data = (struct host_data *)host->hostdata; + + return snprintf(buf, 20, "0x%x\n", host_data->ncb->revision_id); +} + +static struct device_attribute ncr53c8xx_revision_attr = { + .attr = { .name = "revision", .mode = S_IRUGO, }, + .show = show_ncr53c8xx_revision, +}; + +static struct attribute *ncr53c8xx_host_attrs[] = { + &ncr53c8xx_revision_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(ncr53c8xx_host); + +/*========================================================== +** +** Boot command line. +** +**========================================================== +*/ +#ifdef MODULE +char *ncr53c8xx; /* command line passed by insmod */ +module_param(ncr53c8xx, charp, 0); +#endif + +#ifndef MODULE +static int __init ncr53c8xx_setup(char *str) +{ + return sym53c8xx__setup(str); +} + +__setup("ncr53c8xx=", ncr53c8xx_setup); +#endif + + +/* + * Host attach and initialisations. + * + * Allocate host data and ncb structure. + * Request IO region and remap MMIO region. + * Do chip initialization. + * If all is OK, install interrupt handling and + * start the timer daemon. + */ +struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt, + int unit, struct ncr_device *device) +{ + struct host_data *host_data; + struct ncb *np = NULL; + struct Scsi_Host *instance = NULL; + u_long flags = 0; + int i; + + WARN_ON_ONCE(tpnt->cmd_size < sizeof(struct ncr_cmd_priv)); + + if (!tpnt->name) + tpnt->name = SCSI_NCR_DRIVER_NAME; + if (!tpnt->shost_groups) + tpnt->shost_groups = ncr53c8xx_host_groups; + + tpnt->queuecommand = ncr53c8xx_queue_command; + tpnt->slave_configure = ncr53c8xx_slave_configure; + tpnt->slave_alloc = ncr53c8xx_slave_alloc; + tpnt->eh_bus_reset_handler = ncr53c8xx_bus_reset; + tpnt->can_queue = SCSI_NCR_CAN_QUEUE; + tpnt->this_id = 7; + tpnt->sg_tablesize = SCSI_NCR_SG_TABLESIZE; + tpnt->cmd_per_lun = SCSI_NCR_CMD_PER_LUN; + + if (device->differential) + driver_setup.diff_support = device->differential; + + printk(KERN_INFO "ncr53c720-%d: rev 0x%x irq %d\n", + unit, device->chip.revision_id, device->slot.irq); + + instance = scsi_host_alloc(tpnt, sizeof(*host_data)); + if (!instance) + goto attach_error; + host_data = (struct host_data *) instance->hostdata; + + np = __m_calloc_dma(device->dev, sizeof(struct ncb), "NCB"); + if (!np) + goto attach_error; + spin_lock_init(&np->smp_lock); + np->dev = device->dev; + np->p_ncb = vtobus(np); + host_data->ncb = np; + + np->ccb = m_calloc_dma(sizeof(struct ccb), "CCB"); + if (!np->ccb) + goto attach_error; + + /* Store input information in the host data structure. */ + np->unit = unit; + np->verbose = driver_setup.verbose; + sprintf(np->inst_name, "ncr53c720-%d", np->unit); + np->revision_id = device->chip.revision_id; + np->features = device->chip.features; + np->clock_divn = device->chip.nr_divisor; + np->maxoffs = device->chip.offset_max; + np->maxburst = device->chip.burst_max; + np->myaddr = device->host_id; + + /* Allocate SCRIPTS areas. */ + np->script0 = m_calloc_dma(sizeof(struct script), "SCRIPT"); + if (!np->script0) + goto attach_error; + np->scripth0 = m_calloc_dma(sizeof(struct scripth), "SCRIPTH"); + if (!np->scripth0) + goto attach_error; + + timer_setup(&np->timer, ncr53c8xx_timeout, 0); + + /* Try to map the controller chip to virtual and physical memory. */ + + np->paddr = device->slot.base; + np->paddr2 = (np->features & FE_RAM) ? device->slot.base_2 : 0; + + if (device->slot.base_v) + np->vaddr = device->slot.base_v; + else + np->vaddr = ioremap(device->slot.base_c, 128); + + if (!np->vaddr) { + printk(KERN_ERR + "%s: can't map memory mapped IO region\n",ncr_name(np)); + goto attach_error; + } else { + if (bootverbose > 1) + printk(KERN_INFO + "%s: using memory mapped IO at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr); + } + + /* Make the controller's registers available. Now the INB INW INL + * OUTB OUTW OUTL macros can be used safely. + */ + + np->reg = (struct ncr_reg __iomem *)np->vaddr; + + /* Do chip dependent initialization. */ + ncr_prepare_setting(np); + + if (np->paddr2 && sizeof(struct script) > 4096) { + np->paddr2 = 0; + printk(KERN_WARNING "%s: script too large, NOT using on chip RAM.\n", + ncr_name(np)); + } + + instance->max_channel = 0; + instance->this_id = np->myaddr; + instance->max_id = np->maxwide ? 16 : 8; + instance->max_lun = SCSI_NCR_MAX_LUN; + instance->base = (unsigned long) np->reg; + instance->irq = device->slot.irq; + instance->unique_id = device->slot.base; + instance->dma_channel = 0; + instance->cmd_per_lun = MAX_TAGS; + instance->can_queue = (MAX_START-4); + /* This can happen if you forget to call ncr53c8xx_init from + * your module_init */ + BUG_ON(!ncr53c8xx_transport_template); + instance->transportt = ncr53c8xx_transport_template; + + /* Patch script to physical addresses */ + ncr_script_fill(&script0, &scripth0); + + np->scripth = np->scripth0; + np->p_scripth = vtobus(np->scripth); + np->p_script = (np->paddr2) ? np->paddr2 : vtobus(np->script0); + + ncr_script_copy_and_bind(np, (ncrcmd *) &script0, + (ncrcmd *) np->script0, sizeof(struct script)); + ncr_script_copy_and_bind(np, (ncrcmd *) &scripth0, + (ncrcmd *) np->scripth0, sizeof(struct scripth)); + np->ccb->p_ccb = vtobus (np->ccb); + + /* Patch the script for LED support. */ + + if (np->features & FE_LED0) { + np->script0->idle[0] = + cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01)); + np->script0->reselected[0] = + cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe)); + np->script0->start[0] = + cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe)); + } + + /* + * Look for the target control block of this nexus. + * For i = 0 to 3 + * JUMP ^ IFTRUE (MASK (i, 3)), @(next_lcb) + */ + for (i = 0 ; i < 4 ; i++) { + np->jump_tcb[i].l_cmd = + cpu_to_scr((SCR_JUMP ^ IFTRUE (MASK (i, 3)))); + np->jump_tcb[i].l_paddr = + cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_target)); + } + + ncr_chip_reset(np, 100); + + /* Now check the cache handling of the chipset. */ + + if (ncr_snooptest(np)) { + printk(KERN_ERR "CACHE INCORRECTLY CONFIGURED.\n"); + goto attach_error; + } + + /* Install the interrupt handler. */ + np->irq = device->slot.irq; + + /* Initialize the fixed part of the default ccb. */ + ncr_init_ccb(np, np->ccb); + + /* + * After SCSI devices have been opened, we cannot reset the bus + * safely, so we do it here. Interrupt handler does the real work. + * Process the reset exception if interrupts are not enabled yet. + * Then enable disconnects. + */ + spin_lock_irqsave(&np->smp_lock, flags); + if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) { + printk(KERN_ERR "%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np)); + + spin_unlock_irqrestore(&np->smp_lock, flags); + goto attach_error; + } + ncr_exception(np); + + np->disc = 1; + + /* + * The middle-level SCSI driver does not wait for devices to settle. + * Wait synchronously if more than 2 seconds. + */ + if (driver_setup.settle_delay > 2) { + printk(KERN_INFO "%s: waiting %d seconds for scsi devices to settle...\n", + ncr_name(np), driver_setup.settle_delay); + mdelay(1000 * driver_setup.settle_delay); + } + + /* start the timeout daemon */ + np->lasttime=0; + ncr_timeout (np); + + /* use SIMPLE TAG messages by default */ +#ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG + np->order = SIMPLE_QUEUE_TAG; +#endif + + spin_unlock_irqrestore(&np->smp_lock, flags); + + return instance; + + attach_error: + if (!instance) + return NULL; + printk(KERN_INFO "%s: detaching...\n", ncr_name(np)); + if (!np) + goto unregister; + if (np->scripth0) + m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH"); + if (np->script0) + m_free_dma(np->script0, sizeof(struct script), "SCRIPT"); + if (np->ccb) + m_free_dma(np->ccb, sizeof(struct ccb), "CCB"); + m_free_dma(np, sizeof(struct ncb), "NCB"); + host_data->ncb = NULL; + + unregister: + scsi_host_put(instance); + + return NULL; +} + + +void ncr53c8xx_release(struct Scsi_Host *host) +{ + struct host_data *host_data = shost_priv(host); +#ifdef DEBUG_NCR53C8XX + printk("ncr53c8xx: release\n"); +#endif + if (host_data->ncb) + ncr_detach(host_data->ncb); + scsi_host_put(host); +} + +static void ncr53c8xx_set_period(struct scsi_target *starget, int period) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; + struct tcb *tp = &np->target[starget->id]; + + if (period > np->maxsync) + period = np->maxsync; + else if (period < np->minsync) + period = np->minsync; + + tp->usrsync = period; + + ncr_negotiate(np, tp); +} + +static void ncr53c8xx_set_offset(struct scsi_target *starget, int offset) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; + struct tcb *tp = &np->target[starget->id]; + + if (offset > np->maxoffs) + offset = np->maxoffs; + else if (offset < 0) + offset = 0; + + tp->maxoffs = offset; + + ncr_negotiate(np, tp); +} + +static void ncr53c8xx_set_width(struct scsi_target *starget, int width) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; + struct tcb *tp = &np->target[starget->id]; + + if (width > np->maxwide) + width = np->maxwide; + else if (width < 0) + width = 0; + + tp->usrwide = width; + + ncr_negotiate(np, tp); +} + +static void ncr53c8xx_get_signalling(struct Scsi_Host *shost) +{ + struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; + enum spi_signal_type type; + + switch (np->scsi_mode) { + case SMODE_SE: + type = SPI_SIGNAL_SE; + break; + case SMODE_HVD: + type = SPI_SIGNAL_HVD; + break; + default: + type = SPI_SIGNAL_UNKNOWN; + break; + } + spi_signalling(shost) = type; +} + +static struct spi_function_template ncr53c8xx_transport_functions = { + .set_period = ncr53c8xx_set_period, + .show_period = 1, + .set_offset = ncr53c8xx_set_offset, + .show_offset = 1, + .set_width = ncr53c8xx_set_width, + .show_width = 1, + .get_signalling = ncr53c8xx_get_signalling, +}; + +int __init ncr53c8xx_init(void) +{ + ncr53c8xx_transport_template = spi_attach_transport(&ncr53c8xx_transport_functions); + if (!ncr53c8xx_transport_template) + return -ENODEV; + return 0; +} + +void ncr53c8xx_exit(void) +{ + spi_release_transport(ncr53c8xx_transport_template); +} diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h new file mode 100644 index 000000000..be38c9028 --- /dev/null +++ b/drivers/scsi/ncr53c8xx.h @@ -0,0 +1,1303 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/****************************************************************************** +** Device driver for the PCI-SCSI NCR538XX controller family. +** +** Copyright (C) 1994 Wolfgang Stanglmeier +** Copyright (C) 1998-2001 Gerard Roudier +** +** +**----------------------------------------------------------------------------- +** +** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver +** and is currently maintained by +** +** Gerard Roudier +** +** Being given that this driver originates from the FreeBSD version, and +** in order to keep synergy on both, any suggested enhancements and corrections +** received on Linux are automatically a potential candidate for the FreeBSD +** version. +** +** The original driver has been written for 386bsd and FreeBSD by +** Wolfgang Stanglmeier +** Stefan Esser +** +** And has been ported to NetBSD by +** Charles M. Hannum +** +** NVRAM detection and reading. +** Copyright (C) 1997 Richard Waltham +** +** Added support for MIPS big endian systems. +** Carsten Langgaard, carstenl@mips.com +** Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. +** +** Added support for HP PARISC big endian systems. +** Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. +** +******************************************************************************* +*/ + +#ifndef NCR53C8XX_H +#define NCR53C8XX_H + +#include + + +/* +** If you want a driver as small as possible, donnot define the +** following options. +*/ +#define SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT +#define SCSI_NCR_DEBUG_INFO_SUPPORT + +/* +** To disable integrity checking, do not define the +** following option. +*/ +#ifdef CONFIG_SCSI_NCR53C8XX_INTEGRITY_CHECK +# define SCSI_NCR_ENABLE_INTEGRITY_CHECK +#endif + +/* --------------------------------------------------------------------- +** Take into account kernel configured parameters. +** Most of these options can be overridden at startup by a command line. +** --------------------------------------------------------------------- +*/ + +/* + * For Ultra2 and Ultra3 SCSI support option, use special features. + * + * Value (default) means: + * bit 0 : all features enabled, except: + * bit 1 : PCI Write And Invalidate. + * bit 2 : Data Phase Mismatch handling from SCRIPTS. + * + * Use boot options ncr53c8xx=specf:1 if you want all chip features to be + * enabled by the driver. + */ +#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3) + +#define SCSI_NCR_MAX_SYNC (80) + +/* + * Allow tags from 2 to 256, default 8 + */ +#ifdef CONFIG_SCSI_NCR53C8XX_MAX_TAGS +#if CONFIG_SCSI_NCR53C8XX_MAX_TAGS < 2 +#define SCSI_NCR_MAX_TAGS (2) +#elif CONFIG_SCSI_NCR53C8XX_MAX_TAGS > 256 +#define SCSI_NCR_MAX_TAGS (256) +#else +#define SCSI_NCR_MAX_TAGS CONFIG_SCSI_NCR53C8XX_MAX_TAGS +#endif +#else +#define SCSI_NCR_MAX_TAGS (8) +#endif + +/* + * Allow tagged command queuing support if configured with default number + * of tags set to max (see above). + */ +#ifdef CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS +#define SCSI_NCR_SETUP_DEFAULT_TAGS CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS +#elif defined CONFIG_SCSI_NCR53C8XX_TAGGED_QUEUE +#define SCSI_NCR_SETUP_DEFAULT_TAGS SCSI_NCR_MAX_TAGS +#else +#define SCSI_NCR_SETUP_DEFAULT_TAGS (0) +#endif + +/* + * Immediate arbitration + */ +#if defined(CONFIG_SCSI_NCR53C8XX_IARB) +#define SCSI_NCR_IARB_SUPPORT +#endif + +/* + * Sync transfer frequency at startup. + * Allow from 5Mhz to 80Mhz default 20 Mhz. + */ +#ifndef CONFIG_SCSI_NCR53C8XX_SYNC +#define CONFIG_SCSI_NCR53C8XX_SYNC (20) +#elif CONFIG_SCSI_NCR53C8XX_SYNC > SCSI_NCR_MAX_SYNC +#undef CONFIG_SCSI_NCR53C8XX_SYNC +#define CONFIG_SCSI_NCR53C8XX_SYNC SCSI_NCR_MAX_SYNC +#endif + +#if CONFIG_SCSI_NCR53C8XX_SYNC == 0 +#define SCSI_NCR_SETUP_DEFAULT_SYNC (255) +#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 5 +#define SCSI_NCR_SETUP_DEFAULT_SYNC (50) +#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 20 +#define SCSI_NCR_SETUP_DEFAULT_SYNC (250/(CONFIG_SCSI_NCR53C8XX_SYNC)) +#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 33 +#define SCSI_NCR_SETUP_DEFAULT_SYNC (11) +#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 40 +#define SCSI_NCR_SETUP_DEFAULT_SYNC (10) +#else +#define SCSI_NCR_SETUP_DEFAULT_SYNC (9) +#endif + +/* + * Disallow disconnections at boot-up + */ +#ifdef CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT +#define SCSI_NCR_SETUP_DISCONNECTION (0) +#else +#define SCSI_NCR_SETUP_DISCONNECTION (1) +#endif + +/* + * Force synchronous negotiation for all targets + */ +#ifdef CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO +#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (1) +#else +#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (0) +#endif + +/* + * Disable master parity checking (flawed hardwares need that) + */ +#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_MPARITY_CHECK +#define SCSI_NCR_SETUP_MASTER_PARITY (0) +#else +#define SCSI_NCR_SETUP_MASTER_PARITY (1) +#endif + +/* + * Disable scsi parity checking (flawed devices may need that) + */ +#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_PARITY_CHECK +#define SCSI_NCR_SETUP_SCSI_PARITY (0) +#else +#define SCSI_NCR_SETUP_SCSI_PARITY (1) +#endif + +/* + * Settle time after reset at boot-up + */ +#define SCSI_NCR_SETUP_SETTLE_TIME (2) + +/* +** Bridge quirks work-around option defaulted to 1. +*/ +#ifndef SCSI_NCR_PCIQ_WORK_AROUND_OPT +#define SCSI_NCR_PCIQ_WORK_AROUND_OPT 1 +#endif + +/* +** Work-around common bridge misbehaviour. +** +** - Do not flush posted writes in the opposite +** direction on read. +** - May reorder DMA writes to memory. +** +** This option should not affect performances +** significantly, so it is the default. +*/ +#if SCSI_NCR_PCIQ_WORK_AROUND_OPT == 1 +#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM +#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES +#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS + +/* +** Same as option 1, but also deal with +** misconfigured interrupts. +** +** - Edge triggered instead of level sensitive. +** - No interrupt line connected. +** - IRQ number misconfigured. +** +** If no interrupt is delivered, the driver will +** catch the interrupt conditions 10 times per +** second. No need to say that this option is +** not recommended. +*/ +#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 2 +#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM +#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES +#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS +#define SCSI_NCR_PCIQ_BROKEN_INTR + +/* +** Some bridge designers decided to flush +** everything prior to deliver the interrupt. +** This option tries to deal with such a +** behaviour. +*/ +#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 3 +#define SCSI_NCR_PCIQ_SYNC_ON_INTR +#endif + +/* +** Other parameters not configurable with "make config" +** Avoid to change these constants, unless you know what you are doing. +*/ + +#define SCSI_NCR_ALWAYS_SIMPLE_TAG +#define SCSI_NCR_MAX_SCATTER (127) +#define SCSI_NCR_MAX_TARGET (16) + +/* +** Compute some desirable value for CAN_QUEUE +** and CMD_PER_LUN. +** The driver will use lower values if these +** ones appear to be too large. +*/ +#define SCSI_NCR_CAN_QUEUE (8*SCSI_NCR_MAX_TAGS + 2*SCSI_NCR_MAX_TARGET) +#define SCSI_NCR_CMD_PER_LUN (SCSI_NCR_MAX_TAGS) + +#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER) +#define SCSI_NCR_TIMER_INTERVAL (HZ) + +#define SCSI_NCR_MAX_LUN (16) + +/* + * IO functions definition for big/little endian CPU support. + * For now, the NCR is only supported in little endian addressing mode, + */ + +#ifdef __BIG_ENDIAN + +#define inw_l2b inw +#define inl_l2b inl +#define outw_b2l outw +#define outl_b2l outl + +#define readb_raw readb +#define writeb_raw writeb + +#if defined(SCSI_NCR_BIG_ENDIAN) +#define readw_l2b __raw_readw +#define readl_l2b __raw_readl +#define writew_b2l __raw_writew +#define writel_b2l __raw_writel +#define readw_raw __raw_readw +#define readl_raw __raw_readl +#define writew_raw __raw_writew +#define writel_raw __raw_writel +#else /* Other big-endian */ +#define readw_l2b readw +#define readl_l2b readl +#define writew_b2l writew +#define writel_b2l writel +#define readw_raw readw +#define readl_raw readl +#define writew_raw writew +#define writel_raw writel +#endif + +#else /* little endian */ + +#define inw_raw inw +#define inl_raw inl +#define outw_raw outw +#define outl_raw outl + +#define readb_raw readb +#define readw_raw readw +#define readl_raw readl +#define writeb_raw writeb +#define writew_raw writew +#define writel_raw writel + +#endif + +#if !defined(__hppa__) && !defined(__mips__) +#ifdef SCSI_NCR_BIG_ENDIAN +#error "The NCR in BIG ENDIAN addressing mode is not (yet) supported" +#endif +#endif + +#define MEMORY_BARRIER() mb() + + +/* + * If the NCR uses big endian addressing mode over the + * PCI, actual io register addresses for byte and word + * accesses must be changed according to lane routing. + * Btw, ncr_offb() and ncr_offw() macros only apply to + * constants and so donnot generate bloated code. + */ + +#if defined(SCSI_NCR_BIG_ENDIAN) + +#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3)) +#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2)) + +#else + +#define ncr_offb(o) (o) +#define ncr_offw(o) (o) + +#endif + +/* + * If the CPU and the NCR use same endian-ness addressing, + * no byte reordering is needed for script patching. + * Macro cpu_to_scr() is to be used for script patching. + * Macro scr_to_cpu() is to be used for getting a DWORD + * from the script. + */ + +#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN) + +#define cpu_to_scr(dw) cpu_to_le32(dw) +#define scr_to_cpu(dw) le32_to_cpu(dw) + +#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN) + +#define cpu_to_scr(dw) cpu_to_be32(dw) +#define scr_to_cpu(dw) be32_to_cpu(dw) + +#else + +#define cpu_to_scr(dw) (dw) +#define scr_to_cpu(dw) (dw) + +#endif + +/* + * Access to the controller chip. + * + * If the CPU and the NCR use same endian-ness addressing, + * no byte reordering is needed for accessing chip io + * registers. Functions suffixed by '_raw' are assumed + * to access the chip over the PCI without doing byte + * reordering. Functions suffixed by '_l2b' are + * assumed to perform little-endian to big-endian byte + * reordering, those suffixed by '_b2l' blah, blah, + * blah, ... + */ + +/* + * MEMORY mapped IO input / output + */ + +#define INB_OFF(o) readb_raw((char __iomem *)np->reg + ncr_offb(o)) +#define OUTB_OFF(o, val) writeb_raw((val), (char __iomem *)np->reg + ncr_offb(o)) + +#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN) + +#define INW_OFF(o) readw_l2b((char __iomem *)np->reg + ncr_offw(o)) +#define INL_OFF(o) readl_l2b((char __iomem *)np->reg + (o)) + +#define OUTW_OFF(o, val) writew_b2l((val), (char __iomem *)np->reg + ncr_offw(o)) +#define OUTL_OFF(o, val) writel_b2l((val), (char __iomem *)np->reg + (o)) + +#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN) + +#define INW_OFF(o) readw_b2l((char __iomem *)np->reg + ncr_offw(o)) +#define INL_OFF(o) readl_b2l((char __iomem *)np->reg + (o)) + +#define OUTW_OFF(o, val) writew_l2b((val), (char __iomem *)np->reg + ncr_offw(o)) +#define OUTL_OFF(o, val) writel_l2b((val), (char __iomem *)np->reg + (o)) + +#else + +#ifdef CONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS +/* Only 8 or 32 bit transfers allowed */ +#define INW_OFF(o) (readb((char __iomem *)np->reg + ncr_offw(o)) << 8 | readb((char __iomem *)np->reg + ncr_offw(o) + 1)) +#else +#define INW_OFF(o) readw_raw((char __iomem *)np->reg + ncr_offw(o)) +#endif +#define INL_OFF(o) readl_raw((char __iomem *)np->reg + (o)) + +#ifdef CONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS +/* Only 8 or 32 bit transfers allowed */ +#define OUTW_OFF(o, val) do { writeb((char)((val) >> 8), (char __iomem *)np->reg + ncr_offw(o)); writeb((char)(val), (char __iomem *)np->reg + ncr_offw(o) + 1); } while (0) +#else +#define OUTW_OFF(o, val) writew_raw((val), (char __iomem *)np->reg + ncr_offw(o)) +#endif +#define OUTL_OFF(o, val) writel_raw((val), (char __iomem *)np->reg + (o)) + +#endif + +#define INB(r) INB_OFF (offsetof(struct ncr_reg,r)) +#define INW(r) INW_OFF (offsetof(struct ncr_reg,r)) +#define INL(r) INL_OFF (offsetof(struct ncr_reg,r)) + +#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val)) +#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val)) +#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val)) + +/* + * Set bit field ON, OFF + */ + +#define OUTONB(r, m) OUTB(r, INB(r) | (m)) +#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) +#define OUTONW(r, m) OUTW(r, INW(r) | (m)) +#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) +#define OUTONL(r, m) OUTL(r, INL(r) | (m)) +#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) + +/* + * We normally want the chip to have a consistent view + * of driver internal data structures when we restart it. + * Thus these macros. + */ +#define OUTL_DSP(v) \ + do { \ + MEMORY_BARRIER(); \ + OUTL (nc_dsp, (v)); \ + } while (0) + +#define OUTONB_STD() \ + do { \ + MEMORY_BARRIER(); \ + OUTONB (nc_dcntl, (STD|NOCOM)); \ + } while (0) + + +/* +** NCR53C8XX devices features table. +*/ +struct ncr_chip { + unsigned short revision_id; + unsigned char burst_max; /* log-base-2 of max burst */ + unsigned char offset_max; + unsigned char nr_divisor; + unsigned int features; +#define FE_LED0 (1<<0) +#define FE_WIDE (1<<1) /* Wide data transfers */ +#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */ +#define FE_DBLR (1<<4) /* Clock doubler present */ +#define FE_QUAD (1<<5) /* Clock quadrupler present */ +#define FE_ERL (1<<6) /* Enable read line */ +#define FE_CLSE (1<<7) /* Cache line size enable */ +#define FE_WRIE (1<<8) /* Write & Invalidate enable */ +#define FE_ERMP (1<<9) /* Enable read multiple */ +#define FE_BOF (1<<10) /* Burst opcode fetch */ +#define FE_DFS (1<<11) /* DMA fifo size */ +#define FE_PFEN (1<<12) /* Prefetch enable */ +#define FE_LDSTR (1<<13) /* Load/Store supported */ +#define FE_RAM (1<<14) /* On chip RAM present */ +#define FE_VARCLK (1<<15) /* SCSI clock may vary */ +#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */ +#define FE_64BIT (1<<17) /* Have a 64-bit PCI interface */ +#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */ +#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */ +#define FE_LEDC (1<<20) /* Hardware control of LED */ +#define FE_DIFF (1<<21) /* Support Differential SCSI */ +#define FE_66MHZ (1<<23) /* 66MHz PCI Support */ +#define FE_DAC (1<<24) /* Support DAC cycles (64 bit addressing) */ +#define FE_ISTAT1 (1<<25) /* Have ISTAT1, MBOX0, MBOX1 registers */ +#define FE_DAC_IN_USE (1<<26) /* Platform does DAC cycles */ +#define FE_EHP (1<<27) /* 720: Even host parity */ +#define FE_MUX (1<<28) /* 720: Multiplexed bus */ +#define FE_EA (1<<29) /* 720: Enable Ack */ + +#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP) +#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_DBLR|FE_QUAD|F_CLK80) +#define FE_SPECIAL_SET (FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM) +}; + + +/* +** Driver setup structure. +** +** This structure is initialized from linux config options. +** It can be overridden at boot-up by the boot command line. +*/ +#define SCSI_NCR_MAX_EXCLUDES 8 +struct ncr_driver_setup { + u8 master_parity; + u8 scsi_parity; + u8 disconnection; + u8 special_features; + u8 force_sync_nego; + u8 reverse_probe; + u8 pci_fix_up; + u8 use_nvram; + u8 verbose; + u8 default_tags; + u16 default_sync; + u16 debug; + u8 burst_max; + u8 led_pin; + u8 max_wide; + u8 settle_delay; + u8 diff_support; + u8 irqm; + u8 bus_check; + u8 optimize; + u8 recovery; + u8 host_id; + u16 iarb; + u32 excludes[SCSI_NCR_MAX_EXCLUDES]; + char tag_ctrl[100]; +}; + +/* +** Initial setup. +** Can be overridden at startup by a command line. +*/ +#define SCSI_NCR_DRIVER_SETUP \ +{ \ + SCSI_NCR_SETUP_MASTER_PARITY, \ + SCSI_NCR_SETUP_SCSI_PARITY, \ + SCSI_NCR_SETUP_DISCONNECTION, \ + SCSI_NCR_SETUP_SPECIAL_FEATURES, \ + SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \ + 0, \ + 0, \ + 1, \ + 0, \ + SCSI_NCR_SETUP_DEFAULT_TAGS, \ + SCSI_NCR_SETUP_DEFAULT_SYNC, \ + 0x00, \ + 7, \ + 0, \ + 1, \ + SCSI_NCR_SETUP_SETTLE_TIME, \ + 0, \ + 0, \ + 1, \ + 0, \ + 0, \ + 255, \ + 0x00 \ +} + +/* +** Boot fail safe setup. +** Override initial setup from boot command line: +** ncr53c8xx=safe:y +*/ +#define SCSI_NCR_DRIVER_SAFE_SETUP \ +{ \ + 0, \ + 1, \ + 0, \ + 0, \ + 0, \ + 0, \ + 0, \ + 1, \ + 2, \ + 0, \ + 255, \ + 0x00, \ + 255, \ + 0, \ + 0, \ + 10, \ + 1, \ + 1, \ + 1, \ + 0, \ + 0, \ + 255 \ +} + +/**************** ORIGINAL CONTENT of ncrreg.h from FreeBSD ******************/ + +/*----------------------------------------------------------------- +** +** The ncr 53c810 register structure. +** +**----------------------------------------------------------------- +*/ + +struct ncr_reg { +/*00*/ u8 nc_scntl0; /* full arb., ena parity, par->ATN */ + +/*01*/ u8 nc_scntl1; /* no reset */ + #define ISCON 0x10 /* connected to scsi */ + #define CRST 0x08 /* force reset */ + #define IARB 0x02 /* immediate arbitration */ + +/*02*/ u8 nc_scntl2; /* no disconnect expected */ + #define SDU 0x80 /* cmd: disconnect will raise error */ + #define CHM 0x40 /* sta: chained mode */ + #define WSS 0x08 /* sta: wide scsi send [W]*/ + #define WSR 0x01 /* sta: wide scsi received [W]*/ + +/*03*/ u8 nc_scntl3; /* cnf system clock dependent */ + #define EWS 0x08 /* cmd: enable wide scsi [W]*/ + #define ULTRA 0x80 /* cmd: ULTRA enable */ + /* bits 0-2, 7 rsvd for C1010 */ + +/*04*/ u8 nc_scid; /* cnf host adapter scsi address */ + #define RRE 0x40 /* r/w:e enable response to resel. */ + #define SRE 0x20 /* r/w:e enable response to select */ + +/*05*/ u8 nc_sxfer; /* ### Sync speed and count */ + /* bits 6-7 rsvd for C1010 */ + +/*06*/ u8 nc_sdid; /* ### Destination-ID */ + +/*07*/ u8 nc_gpreg; /* ??? IO-Pins */ + +/*08*/ u8 nc_sfbr; /* ### First byte in phase */ + +/*09*/ u8 nc_socl; + #define CREQ 0x80 /* r/w: SCSI-REQ */ + #define CACK 0x40 /* r/w: SCSI-ACK */ + #define CBSY 0x20 /* r/w: SCSI-BSY */ + #define CSEL 0x10 /* r/w: SCSI-SEL */ + #define CATN 0x08 /* r/w: SCSI-ATN */ + #define CMSG 0x04 /* r/w: SCSI-MSG */ + #define CC_D 0x02 /* r/w: SCSI-C_D */ + #define CI_O 0x01 /* r/w: SCSI-I_O */ + +/*0a*/ u8 nc_ssid; + +/*0b*/ u8 nc_sbcl; + +/*0c*/ u8 nc_dstat; + #define DFE 0x80 /* sta: dma fifo empty */ + #define MDPE 0x40 /* int: master data parity error */ + #define BF 0x20 /* int: script: bus fault */ + #define ABRT 0x10 /* int: script: command aborted */ + #define SSI 0x08 /* int: script: single step */ + #define SIR 0x04 /* int: script: interrupt instruct. */ + #define IID 0x01 /* int: script: illegal instruct. */ + +/*0d*/ u8 nc_sstat0; + #define ILF 0x80 /* sta: data in SIDL register lsb */ + #define ORF 0x40 /* sta: data in SODR register lsb */ + #define OLF 0x20 /* sta: data in SODL register lsb */ + #define AIP 0x10 /* sta: arbitration in progress */ + #define LOA 0x08 /* sta: arbitration lost */ + #define WOA 0x04 /* sta: arbitration won */ + #define IRST 0x02 /* sta: scsi reset signal */ + #define SDP 0x01 /* sta: scsi parity signal */ + +/*0e*/ u8 nc_sstat1; + #define FF3210 0xf0 /* sta: bytes in the scsi fifo */ + +/*0f*/ u8 nc_sstat2; + #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/ + #define ORF1 0x40 /* sta: data in SODR register msb[W]*/ + #define OLF1 0x20 /* sta: data in SODL register msb[W]*/ + #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */ + #define LDSC 0x02 /* sta: disconnect & reconnect */ + +/*10*/ u8 nc_dsa; /* --> Base page */ +/*11*/ u8 nc_dsa1; +/*12*/ u8 nc_dsa2; +/*13*/ u8 nc_dsa3; + +/*14*/ u8 nc_istat; /* --> Main Command and status */ + #define CABRT 0x80 /* cmd: abort current operation */ + #define SRST 0x40 /* mod: reset chip */ + #define SIGP 0x20 /* r/w: message from host to ncr */ + #define SEM 0x10 /* r/w: message between host + ncr */ + #define CON 0x08 /* sta: connected to scsi */ + #define INTF 0x04 /* sta: int on the fly (reset by wr)*/ + #define SIP 0x02 /* sta: scsi-interrupt */ + #define DIP 0x01 /* sta: host/script interrupt */ + +/*15*/ u8 nc_istat1; /* 896 and later cores only */ + #define FLSH 0x04 /* sta: chip is flushing */ + #define SRUN 0x02 /* sta: scripts are running */ + #define SIRQD 0x01 /* r/w: disable INT pin */ + +/*16*/ u8 nc_mbox0; /* 896 and later cores only */ +/*17*/ u8 nc_mbox1; /* 896 and later cores only */ + +/*18*/ u8 nc_ctest0; + #define EHP 0x04 /* 720 even host parity */ +/*19*/ u8 nc_ctest1; + +/*1a*/ u8 nc_ctest2; + #define CSIGP 0x40 + /* bits 0-2,7 rsvd for C1010 */ + +/*1b*/ u8 nc_ctest3; + #define FLF 0x08 /* cmd: flush dma fifo */ + #define CLF 0x04 /* cmd: clear dma fifo */ + #define FM 0x02 /* mod: fetch pin mode */ + #define WRIE 0x01 /* mod: write and invalidate enable */ + /* bits 4-7 rsvd for C1010 */ + +/*1c*/ u32 nc_temp; /* ### Temporary stack */ + +/*20*/ u8 nc_dfifo; +/*21*/ u8 nc_ctest4; + #define MUX 0x80 /* 720 host bus multiplex mode */ + #define BDIS 0x80 /* mod: burst disable */ + #define MPEE 0x08 /* mod: master parity error enable */ + +/*22*/ u8 nc_ctest5; + #define DFS 0x20 /* mod: dma fifo size */ + /* bits 0-1, 3-7 rsvd for C1010 */ +/*23*/ u8 nc_ctest6; + +/*24*/ u32 nc_dbc; /* ### Byte count and command */ +/*28*/ u32 nc_dnad; /* ### Next command register */ +/*2c*/ u32 nc_dsp; /* --> Script Pointer */ +/*30*/ u32 nc_dsps; /* --> Script pointer save/opcode#2 */ + +/*34*/ u8 nc_scratcha; /* Temporary register a */ +/*35*/ u8 nc_scratcha1; +/*36*/ u8 nc_scratcha2; +/*37*/ u8 nc_scratcha3; + +/*38*/ u8 nc_dmode; + #define BL_2 0x80 /* mod: burst length shift value +2 */ + #define BL_1 0x40 /* mod: burst length shift value +1 */ + #define ERL 0x08 /* mod: enable read line */ + #define ERMP 0x04 /* mod: enable read multiple */ + #define BOF 0x02 /* mod: burst op code fetch */ + +/*39*/ u8 nc_dien; +/*3a*/ u8 nc_sbr; + +/*3b*/ u8 nc_dcntl; /* --> Script execution control */ + #define CLSE 0x80 /* mod: cache line size enable */ + #define PFF 0x40 /* cmd: pre-fetch flush */ + #define PFEN 0x20 /* mod: pre-fetch enable */ + #define EA 0x20 /* mod: 720 enable-ack */ + #define SSM 0x10 /* mod: single step mode */ + #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */ + #define STD 0x04 /* cmd: start dma mode */ + #define IRQD 0x02 /* mod: irq disable */ + #define NOCOM 0x01 /* cmd: protect sfbr while reselect */ + /* bits 0-1 rsvd for C1010 */ + +/*3c*/ u32 nc_adder; + +/*40*/ u16 nc_sien; /* -->: interrupt enable */ +/*42*/ u16 nc_sist; /* <--: interrupt status */ + #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */ + #define STO 0x0400/* sta: timeout (select) */ + #define GEN 0x0200/* sta: timeout (general) */ + #define HTH 0x0100/* sta: timeout (handshake) */ + #define MA 0x80 /* sta: phase mismatch */ + #define CMP 0x40 /* sta: arbitration complete */ + #define SEL 0x20 /* sta: selected by another device */ + #define RSL 0x10 /* sta: reselected by another device*/ + #define SGE 0x08 /* sta: gross error (over/underflow)*/ + #define UDC 0x04 /* sta: unexpected disconnect */ + #define RST 0x02 /* sta: scsi bus reset detected */ + #define PAR 0x01 /* sta: scsi parity error */ + +/*44*/ u8 nc_slpar; +/*45*/ u8 nc_swide; +/*46*/ u8 nc_macntl; +/*47*/ u8 nc_gpcntl; +/*48*/ u8 nc_stime0; /* cmd: timeout for select&handshake*/ +/*49*/ u8 nc_stime1; /* cmd: timeout user defined */ +/*4a*/ u16 nc_respid; /* sta: Reselect-IDs */ + +/*4c*/ u8 nc_stest0; + +/*4d*/ u8 nc_stest1; + #define SCLK 0x80 /* Use the PCI clock as SCSI clock */ + #define DBLEN 0x08 /* clock doubler running */ + #define DBLSEL 0x04 /* clock doubler selected */ + + +/*4e*/ u8 nc_stest2; + #define ROF 0x40 /* reset scsi offset (after gross error!) */ + #define DIF 0x20 /* 720 SCSI differential mode */ + #define EXT 0x02 /* extended filtering */ + +/*4f*/ u8 nc_stest3; + #define TE 0x80 /* c: tolerAnt enable */ + #define HSC 0x20 /* c: Halt SCSI Clock */ + #define CSF 0x02 /* c: clear scsi fifo */ + +/*50*/ u16 nc_sidl; /* Lowlevel: latched from scsi data */ +/*52*/ u8 nc_stest4; + #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */ + #define SMODE_HVD 0x40 /* High Voltage Differential */ + #define SMODE_SE 0x80 /* Single Ended */ + #define SMODE_LVD 0xc0 /* Low Voltage Differential */ + #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */ + /* bits 0-5 rsvd for C1010 */ + +/*53*/ u8 nc_53_; +/*54*/ u16 nc_sodl; /* Lowlevel: data out to scsi data */ +/*56*/ u8 nc_ccntl0; /* Chip Control 0 (896) */ + #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */ + #define PMJCTL 0x40 /* Phase Mismatch Jump Control */ + #define ENNDJ 0x20 /* Enable Non Data PM Jump */ + #define DISFC 0x10 /* Disable Auto FIFO Clear */ + #define DILS 0x02 /* Disable Internal Load/Store */ + #define DPR 0x01 /* Disable Pipe Req */ + +/*57*/ u8 nc_ccntl1; /* Chip Control 1 (896) */ + #define ZMOD 0x80 /* High Impedance Mode */ + #define DIC 0x10 /* Disable Internal Cycles */ + #define DDAC 0x08 /* Disable Dual Address Cycle */ + #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */ + #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */ + #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */ + +/*58*/ u16 nc_sbdl; /* Lowlevel: data from scsi data */ +/*5a*/ u16 nc_5a_; + +/*5c*/ u8 nc_scr0; /* Working register B */ +/*5d*/ u8 nc_scr1; /* */ +/*5e*/ u8 nc_scr2; /* */ +/*5f*/ u8 nc_scr3; /* */ + +/*60*/ u8 nc_scrx[64]; /* Working register C-R */ +/*a0*/ u32 nc_mmrs; /* Memory Move Read Selector */ +/*a4*/ u32 nc_mmws; /* Memory Move Write Selector */ +/*a8*/ u32 nc_sfs; /* Script Fetch Selector */ +/*ac*/ u32 nc_drs; /* DSA Relative Selector */ +/*b0*/ u32 nc_sbms; /* Static Block Move Selector */ +/*b4*/ u32 nc_dbms; /* Dynamic Block Move Selector */ +/*b8*/ u32 nc_dnad64; /* DMA Next Address 64 */ +/*bc*/ u16 nc_scntl4; /* C1010 only */ + #define U3EN 0x80 /* Enable Ultra 3 */ + #define AIPEN 0x40 /* Allow check upper byte lanes */ + #define XCLKH_DT 0x08 /* Extra clock of data hold on DT + transfer edge */ + #define XCLKH_ST 0x04 /* Extra clock of data hold on ST + transfer edge */ + +/*be*/ u8 nc_aipcntl0; /* Epat Control 1 C1010 only */ +/*bf*/ u8 nc_aipcntl1; /* AIP Control C1010_66 Only */ + +/*c0*/ u32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */ +/*c4*/ u32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */ +/*c8*/ u8 nc_rbc; /* Remaining Byte Count */ +/*c9*/ u8 nc_rbc1; /* */ +/*ca*/ u8 nc_rbc2; /* */ +/*cb*/ u8 nc_rbc3; /* */ + +/*cc*/ u8 nc_ua; /* Updated Address */ +/*cd*/ u8 nc_ua1; /* */ +/*ce*/ u8 nc_ua2; /* */ +/*cf*/ u8 nc_ua3; /* */ +/*d0*/ u32 nc_esa; /* Entry Storage Address */ +/*d4*/ u8 nc_ia; /* Instruction Address */ +/*d5*/ u8 nc_ia1; +/*d6*/ u8 nc_ia2; +/*d7*/ u8 nc_ia3; +/*d8*/ u32 nc_sbc; /* SCSI Byte Count (3 bytes only) */ +/*dc*/ u32 nc_csbc; /* Cumulative SCSI Byte Count */ + + /* Following for C1010 only */ +/*e0*/ u16 nc_crcpad; /* CRC Value */ +/*e2*/ u8 nc_crccntl0; /* CRC control register */ + #define SNDCRC 0x10 /* Send CRC Request */ +/*e3*/ u8 nc_crccntl1; /* CRC control register */ +/*e4*/ u32 nc_crcdata; /* CRC data register */ +/*e8*/ u32 nc_e8_; /* rsvd */ +/*ec*/ u32 nc_ec_; /* rsvd */ +/*f0*/ u16 nc_dfbc; /* DMA FIFO byte count */ + +}; + +/*----------------------------------------------------------- +** +** Utility macros for the script. +** +**----------------------------------------------------------- +*/ + +#define REGJ(p,r) (offsetof(struct ncr_reg, p ## r)) +#define REG(r) REGJ (nc_, r) + +typedef u32 ncrcmd; + +/*----------------------------------------------------------- +** +** SCSI phases +** +** DT phases illegal for ncr driver. +** +**----------------------------------------------------------- +*/ + +#define SCR_DATA_OUT 0x00000000 +#define SCR_DATA_IN 0x01000000 +#define SCR_COMMAND 0x02000000 +#define SCR_STATUS 0x03000000 +#define SCR_DT_DATA_OUT 0x04000000 +#define SCR_DT_DATA_IN 0x05000000 +#define SCR_MSG_OUT 0x06000000 +#define SCR_MSG_IN 0x07000000 + +#define SCR_ILG_OUT 0x04000000 +#define SCR_ILG_IN 0x05000000 + +/*----------------------------------------------------------- +** +** Data transfer via SCSI. +** +**----------------------------------------------------------- +** +** MOVE_ABS (LEN) +** <> +** +** MOVE_IND (LEN) +** <> +** +** MOVE_TBL +** <> +** +**----------------------------------------------------------- +*/ + +#define OPC_MOVE 0x08000000 + +#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l)) +#define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l)) +#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE) + +#define SCR_CHMOV_ABS(l) ((0x00000000) | (l)) +#define SCR_CHMOV_IND(l) ((0x20000000) | (l)) +#define SCR_CHMOV_TBL (0x10000000) + +struct scr_tblmove { + u32 size; + u32 addr; +}; + +/*----------------------------------------------------------- +** +** Selection +** +**----------------------------------------------------------- +** +** SEL_ABS | SCR_ID (0..15) [ | REL_JMP] +** <> +** +** SEL_TBL | << dnad_offset>> [ | REL_JMP] +** <> +** +**----------------------------------------------------------- +*/ + +#define SCR_SEL_ABS 0x40000000 +#define SCR_SEL_ABS_ATN 0x41000000 +#define SCR_SEL_TBL 0x42000000 +#define SCR_SEL_TBL_ATN 0x43000000 + + +#ifdef SCSI_NCR_BIG_ENDIAN +struct scr_tblsel { + u8 sel_scntl3; + u8 sel_id; + u8 sel_sxfer; + u8 sel_scntl4; +}; +#else +struct scr_tblsel { + u8 sel_scntl4; + u8 sel_sxfer; + u8 sel_id; + u8 sel_scntl3; +}; +#endif + +#define SCR_JMP_REL 0x04000000 +#define SCR_ID(id) (((u32)(id)) << 16) + +/*----------------------------------------------------------- +** +** Waiting for Disconnect or Reselect +** +**----------------------------------------------------------- +** +** WAIT_DISC +** dummy: <> +** +** WAIT_RESEL +** <> +** +**----------------------------------------------------------- +*/ + +#define SCR_WAIT_DISC 0x48000000 +#define SCR_WAIT_RESEL 0x50000000 + +/*----------------------------------------------------------- +** +** Bit Set / Reset +** +**----------------------------------------------------------- +** +** SET (flags {|.. }) +** +** CLR (flags {|.. }) +** +**----------------------------------------------------------- +*/ + +#define SCR_SET(f) (0x58000000 | (f)) +#define SCR_CLR(f) (0x60000000 | (f)) + +#define SCR_CARRY 0x00000400 +#define SCR_TRG 0x00000200 +#define SCR_ACK 0x00000040 +#define SCR_ATN 0x00000008 + + + + +/*----------------------------------------------------------- +** +** Memory to memory move +** +**----------------------------------------------------------- +** +** COPY (bytecount) +** << source_address >> +** << destination_address >> +** +** SCR_COPY sets the NO FLUSH option by default. +** SCR_COPY_F does not set this option. +** +** For chips which do not support this option, +** ncr_copy_and_bind() will remove this bit. +**----------------------------------------------------------- +*/ + +#define SCR_NO_FLUSH 0x01000000 + +#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n)) +#define SCR_COPY_F(n) (0xc0000000 | (n)) + +/*----------------------------------------------------------- +** +** Register move and binary operations +** +**----------------------------------------------------------- +** +** SFBR_REG (reg, op, data) reg = SFBR op data +** << 0 >> +** +** REG_SFBR (reg, op, data) SFBR = reg op data +** << 0 >> +** +** REG_REG (reg, op, data) reg = reg op data +** << 0 >> +** +**----------------------------------------------------------- +** On 810A, 860, 825A, 875, 895 and 896 chips the content +** of SFBR register can be used as data (SCR_SFBR_DATA). +** The 896 has additional IO registers starting at +** offset 0x80. Bit 7 of register offset is stored in +** bit 7 of the SCRIPTS instruction first DWORD. +**----------------------------------------------------------- +*/ + +#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80)) + +#define SCR_SFBR_REG(reg,op,data) \ + (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul)) + +#define SCR_REG_SFBR(reg,op,data) \ + (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul)) + +#define SCR_REG_REG(reg,op,data) \ + (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul)) + + +#define SCR_LOAD 0x00000000 +#define SCR_SHL 0x01000000 +#define SCR_OR 0x02000000 +#define SCR_XOR 0x03000000 +#define SCR_AND 0x04000000 +#define SCR_SHR 0x05000000 +#define SCR_ADD 0x06000000 +#define SCR_ADDC 0x07000000 + +#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */ + +/*----------------------------------------------------------- +** +** FROM_REG (reg) SFBR = reg +** << 0 >> +** +** TO_REG (reg) reg = SFBR +** << 0 >> +** +** LOAD_REG (reg, data) reg = +** << 0 >> +** +** LOAD_SFBR(data) SFBR = +** << 0 >> +** +**----------------------------------------------------------- +*/ + +#define SCR_FROM_REG(reg) \ + SCR_REG_SFBR(reg,SCR_OR,0) + +#define SCR_TO_REG(reg) \ + SCR_SFBR_REG(reg,SCR_OR,0) + +#define SCR_LOAD_REG(reg,data) \ + SCR_REG_REG(reg,SCR_LOAD,data) + +#define SCR_LOAD_SFBR(data) \ + (SCR_REG_SFBR (gpreg, SCR_LOAD, data)) + +/*----------------------------------------------------------- +** +** LOAD from memory to register. +** STORE from register to memory. +** +** Only supported by 810A, 860, 825A, 875, 895 and 896. +** +**----------------------------------------------------------- +** +** LOAD_ABS (LEN) +** <> +** +** LOAD_REL (LEN) (DSA relative) +** <> +** +**----------------------------------------------------------- +*/ + +#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul) +#define SCR_NO_FLUSH2 0x02000000 +#define SCR_DSA_REL2 0x10000000 + +#define SCR_LOAD_R(reg, how, n) \ + (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) + +#define SCR_STORE_R(reg, how, n) \ + (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) + +#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n) +#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n) +#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n) +#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n) + +#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n) +#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n) +#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n) +#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n) + + +/*----------------------------------------------------------- +** +** Waiting for Disconnect or Reselect +** +**----------------------------------------------------------- +** +** JUMP [ | IFTRUE/IFFALSE ( ... ) ] +** <
> +** +** JUMPR [ | IFTRUE/IFFALSE ( ... ) ] +** <> +** +** CALL [ | IFTRUE/IFFALSE ( ... ) ] +** <
> +** +** CALLR [ | IFTRUE/IFFALSE ( ... ) ] +** <> +** +** RETURN [ | IFTRUE/IFFALSE ( ... ) ] +** <> +** +** INT [ | IFTRUE/IFFALSE ( ... ) ] +** <> +** +** INT_FLY [ | IFTRUE/IFFALSE ( ... ) ] +** <> +** +** Conditions: +** WHEN (phase) +** IF (phase) +** CARRYSET +** DATA (data, mask) +** +**----------------------------------------------------------- +*/ + +#define SCR_NO_OP 0x80000000 +#define SCR_JUMP 0x80080000 +#define SCR_JUMP64 0x80480000 +#define SCR_JUMPR 0x80880000 +#define SCR_CALL 0x88080000 +#define SCR_CALLR 0x88880000 +#define SCR_RETURN 0x90080000 +#define SCR_INT 0x98080000 +#define SCR_INT_FLY 0x98180000 + +#define IFFALSE(arg) (0x00080000 | (arg)) +#define IFTRUE(arg) (0x00000000 | (arg)) + +#define WHEN(phase) (0x00030000 | (phase)) +#define IF(phase) (0x00020000 | (phase)) + +#define DATA(D) (0x00040000 | ((D) & 0xff)) +#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff)) + +#define CARRYSET (0x00200000) + +/*----------------------------------------------------------- +** +** SCSI constants. +** +**----------------------------------------------------------- +*/ + +/* + * End of ncrreg from FreeBSD + */ + +/* + Build a scatter/gather entry. + see sym53c8xx_2/sym_hipd.h for more detailed sym_build_sge() + implementation ;) + */ + +#define ncr_build_sge(np, data, badd, len) \ +do { \ + (data)->addr = cpu_to_scr(badd); \ + (data)->size = cpu_to_scr(len); \ +} while (0) + +/*========================================================== +** +** Structures used by the detection routine to transmit +** device configuration to the attach function. +** +**========================================================== +*/ +struct ncr_slot { + u_long base; + u_long base_2; + u_long base_c; + u_long base_2_c; + void __iomem *base_v; + void __iomem *base_2_v; + int irq; +/* port and reg fields to use INB, OUTB macros */ + volatile struct ncr_reg __iomem *reg; +}; + +/*========================================================== +** +** Structure used by detection routine to save data on +** each detected board for attach. +** +**========================================================== +*/ +struct ncr_device { + struct device *dev; + struct ncr_slot slot; + struct ncr_chip chip; + u_char host_id; + u8 differential; +}; + +/* To keep track of the dma mapping (sg/single) that has been set */ +struct ncr_cmd_priv { + int data_mapped; + int data_mapping; +}; + +extern struct Scsi_Host *ncr_attach(struct scsi_host_template *tpnt, int unit, struct ncr_device *device); +extern void ncr53c8xx_release(struct Scsi_Host *host); +irqreturn_t ncr53c8xx_intr(int irq, void *dev_id); +extern int ncr53c8xx_init(void); +extern void ncr53c8xx_exit(void); + +#endif /* NCR53C8XX_H */ diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c new file mode 100644 index 000000000..b79870196 --- /dev/null +++ b/drivers/scsi/nsp32.c @@ -0,0 +1,3404 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver + * Copyright (C) 2001, 2002, 2003 + * YOKOTA Hiroshi + * GOTO Masanori , + * + * Revision History: + * 1.0: Initial Release. + * 1.1: Add /proc SDTR status. + * Remove obsolete error handler nsp32_reset. + * Some clean up. + * 1.2: PowerPC (big endian) support. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include "nsp32.h" + + +/*********************************************************************** + * Module parameters + */ +static int trans_mode = 0; /* default: BIOS */ +module_param (trans_mode, int, 0); +MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M"); +#define ASYNC_MODE 1 +#define ULTRA20M_MODE 2 + +static bool auto_param = 0; /* default: ON */ +module_param (auto_param, bool, 0); +MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)"); + +static bool disc_priv = 1; /* default: OFF */ +module_param (disc_priv, bool, 0); +MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))"); + +MODULE_AUTHOR("YOKOTA Hiroshi , GOTO Masanori "); +MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module"); +MODULE_LICENSE("GPL"); + +static const char *nsp32_release_version = "1.2"; + + +/**************************************************************************** + * Supported hardware + */ +static struct pci_device_id nsp32_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_IODATA, + .device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_IODATA, + }, + { + .vendor = PCI_VENDOR_ID_WORKBIT, + .device = PCI_DEVICE_ID_NINJASCSI_32BI_KME, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_KME, + }, + { + .vendor = PCI_VENDOR_ID_WORKBIT, + .device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_WORKBIT, + }, + { + .vendor = PCI_VENDOR_ID_WORKBIT, + .device = PCI_DEVICE_ID_WORKBIT_STANDARD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_PCI_WORKBIT, + }, + { + .vendor = PCI_VENDOR_ID_WORKBIT, + .device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_LOGITEC, + }, + { + .vendor = PCI_VENDOR_ID_WORKBIT, + .device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_PCI_LOGITEC, + }, + { + .vendor = PCI_VENDOR_ID_WORKBIT, + .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_PCI_MELCO, + }, + { + .vendor = PCI_VENDOR_ID_WORKBIT, + .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = MODEL_PCI_MELCO, + }, + {0,0,}, +}; +MODULE_DEVICE_TABLE(pci, nsp32_pci_table); + +static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */ + + +/* + * Period/AckWidth speed conversion table + * + * Note: This period/ackwidth speed table must be in descending order. + */ +static nsp32_sync_table nsp32_sync_table_40M[] = { + /* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */ + {0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */ + {0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */ + {0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ + {0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */ + {0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */ + {0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */ + {0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ + {0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */ + {0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ +}; + +static nsp32_sync_table nsp32_sync_table_20M[] = { + {0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ + {0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */ + {0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ + {0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ + {0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */ + {0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */ + {0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */ + {0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */ + {0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */ +}; + +static nsp32_sync_table nsp32_sync_table_pci[] = { + {0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */ + {0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */ + {0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */ + {0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */ + {0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */ + {0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */ + {0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */ + {0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */ + {0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */ +}; + +/* + * function declaration + */ +/* module entry point */ +static int nsp32_probe (struct pci_dev *, const struct pci_device_id *); +static void nsp32_remove(struct pci_dev *); +static int __init init_nsp32 (void); +static void __exit exit_nsp32 (void); + +/* struct struct scsi_host_template */ +static int nsp32_show_info (struct seq_file *, struct Scsi_Host *); + +static int nsp32_detect (struct pci_dev *pdev); +static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); +static const char *nsp32_info (struct Scsi_Host *); +static int nsp32_release (struct Scsi_Host *); + +/* SCSI error handler */ +static int nsp32_eh_abort (struct scsi_cmnd *); +static int nsp32_eh_host_reset(struct scsi_cmnd *); + +/* generate SCSI message */ +static void nsp32_build_identify(struct scsi_cmnd *); +static void nsp32_build_nop (struct scsi_cmnd *); +static void nsp32_build_reject (struct scsi_cmnd *); +static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, + unsigned char); + +/* SCSI message handler */ +static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short); +static void nsp32_msgout_occur (struct scsi_cmnd *); +static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, + unsigned short); + +static int nsp32_setup_sg_table (struct scsi_cmnd *); +static int nsp32_selection_autopara(struct scsi_cmnd *); +static int nsp32_selection_autoscsi(struct scsi_cmnd *); +static void nsp32_scsi_done (struct scsi_cmnd *); +static int nsp32_arbitration (struct scsi_cmnd *, unsigned int); +static int nsp32_reselection (struct scsi_cmnd *, unsigned char); +static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int); +static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short); + +/* SCSI SDTR */ +static void nsp32_analyze_sdtr (struct scsi_cmnd *); +static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, + unsigned char); +static void nsp32_set_async (nsp32_hw_data *, nsp32_target *); +static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, + unsigned char *, unsigned char *); +static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, + int, unsigned char); + +/* SCSI bus status handler */ +static void nsp32_wait_req (nsp32_hw_data *, int); +static void nsp32_wait_sack (nsp32_hw_data *, int); +static void nsp32_sack_assert (nsp32_hw_data *); +static void nsp32_sack_negate (nsp32_hw_data *); +static void nsp32_do_bus_reset(nsp32_hw_data *); + +/* hardware interrupt handler */ +static irqreturn_t do_nsp32_isr(int, void *); + +/* initialize hardware */ +static int nsp32hw_init(nsp32_hw_data *); + +/* EEPROM handler */ +static int nsp32_getprom_param (nsp32_hw_data *); +static int nsp32_getprom_at24 (nsp32_hw_data *); +static int nsp32_getprom_c16 (nsp32_hw_data *); +static void nsp32_prom_start (nsp32_hw_data *); +static void nsp32_prom_stop (nsp32_hw_data *); +static int nsp32_prom_read (nsp32_hw_data *, int); +static int nsp32_prom_read_bit (nsp32_hw_data *); +static void nsp32_prom_write_bit(nsp32_hw_data *, int); +static void nsp32_prom_set (nsp32_hw_data *, int, int); +static int nsp32_prom_get (nsp32_hw_data *, int); + +/* debug/warning/info message */ +static void nsp32_message (const char *, int, char *, char *, ...); +#ifdef NSP32_DEBUG +static void nsp32_dmessage(const char *, int, int, char *, ...); +#endif + +/* + * max_sectors is currently limited up to 128. + */ +static const struct scsi_host_template nsp32_template = { + .proc_name = "nsp32", + .name = "Workbit NinjaSCSI-32Bi/UDE", + .show_info = nsp32_show_info, + .info = nsp32_info, + .queuecommand = nsp32_queuecommand, + .can_queue = 1, + .sg_tablesize = NSP32_SG_SIZE, + .max_sectors = 128, + .this_id = NSP32_HOST_SCSIID, + .dma_boundary = PAGE_SIZE - 1, + .eh_abort_handler = nsp32_eh_abort, + .eh_host_reset_handler = nsp32_eh_host_reset, +/* .highmem_io = 1, */ + .cmd_size = sizeof(struct nsp32_cmd_priv), +}; + +#include "nsp32_io.h" + +/*********************************************************************** + * debug, error print + */ +#ifndef NSP32_DEBUG +# define NSP32_DEBUG_MASK 0x000000 +# define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args) +# define nsp32_dbg(mask, args...) /* */ +#else +# define NSP32_DEBUG_MASK 0xffffff +# define nsp32_msg(type, args...) \ + nsp32_message (__func__, __LINE__, (type), args) +# define nsp32_dbg(mask, args...) \ + nsp32_dmessage(__func__, __LINE__, (mask), args) +#endif + +#define NSP32_DEBUG_QUEUECOMMAND BIT(0) +#define NSP32_DEBUG_REGISTER BIT(1) +#define NSP32_DEBUG_AUTOSCSI BIT(2) +#define NSP32_DEBUG_INTR BIT(3) +#define NSP32_DEBUG_SGLIST BIT(4) +#define NSP32_DEBUG_BUSFREE BIT(5) +#define NSP32_DEBUG_CDB_CONTENTS BIT(6) +#define NSP32_DEBUG_RESELECTION BIT(7) +#define NSP32_DEBUG_MSGINOCCUR BIT(8) +#define NSP32_DEBUG_EEPROM BIT(9) +#define NSP32_DEBUG_MSGOUTOCCUR BIT(10) +#define NSP32_DEBUG_BUSRESET BIT(11) +#define NSP32_DEBUG_RESTART BIT(12) +#define NSP32_DEBUG_SYNC BIT(13) +#define NSP32_DEBUG_WAIT BIT(14) +#define NSP32_DEBUG_TARGETFLAG BIT(15) +#define NSP32_DEBUG_PROC BIT(16) +#define NSP32_DEBUG_INIT BIT(17) +#define NSP32_SPECIAL_PRINT_REGISTER BIT(20) + +#define NSP32_DEBUG_BUF_LEN 100 + +__printf(4, 5) +static void nsp32_message(const char *func, int line, char *type, char *fmt, ...) +{ + va_list args; + char buf[NSP32_DEBUG_BUF_LEN]; + + va_start(args, fmt); + vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + +#ifndef NSP32_DEBUG + printk("%snsp32: %s\n", type, buf); +#else + printk("%snsp32: %s (%d): %s\n", type, func, line, buf); +#endif +} + +#ifdef NSP32_DEBUG +static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...) +{ + va_list args; + char buf[NSP32_DEBUG_BUF_LEN]; + + va_start(args, fmt); + vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + if (mask & NSP32_DEBUG_MASK) { + printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); + } +} +#endif + +#ifdef NSP32_DEBUG +# include "nsp32_debug.c" +#else +# define show_command(arg) /* */ +# define show_busphase(arg) /* */ +# define show_autophase(arg) /* */ +#endif + +/* + * IDENTIFY Message + */ +static void nsp32_build_identify(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + int pos = data->msgout_len; + int mode = FALSE; + + /* XXX: Auto DiscPriv detection is progressing... */ + if (disc_priv == 0) { + /* mode = TRUE; */ + } + + data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++; + + data->msgout_len = pos; +} + +/* + * SDTR Message Routine + */ +static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt, + unsigned char period, + unsigned char offset) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + int pos = data->msgout_len; + + data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++; + data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++; + data->msgoutbuf[pos] = EXTENDED_SDTR; pos++; + data->msgoutbuf[pos] = period; pos++; + data->msgoutbuf[pos] = offset; pos++; + + data->msgout_len = pos; +} + +/* + * No Operation Message + */ +static void nsp32_build_nop(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + int pos = data->msgout_len; + + if (pos != 0) { + nsp32_msg(KERN_WARNING, + "Some messages are already contained!"); + return; + } + + data->msgoutbuf[pos] = NOP; pos++; + data->msgout_len = pos; +} + +/* + * Reject Message + */ +static void nsp32_build_reject(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + int pos = data->msgout_len; + + data->msgoutbuf[pos] = MESSAGE_REJECT; pos++; + data->msgout_len = pos; +} + +/* + * timer + */ +#if 0 +static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time) +{ + unsigned int base = SCpnt->host->io_port; + + nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time); + + if (time & (~TIMER_CNT_MASK)) { + nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow"); + } + + nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK); +} +#endif + + +/* + * set SCSI command and other parameter to asic, and start selection phase + */ +static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = SCpnt->device->host->io_port; + unsigned int host_id = SCpnt->device->host->this_id; + unsigned char target = scmd_id(SCpnt); + nsp32_autoparam *param = data->autoparam; + unsigned char phase; + int i, ret; + unsigned int msgout; + u16_le s; + + nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); + + /* + * check bus free + */ + phase = nsp32_read1(base, SCSI_BUS_MONITOR); + if (phase != BUSMON_BUS_FREE) { + nsp32_msg(KERN_WARNING, "bus busy"); + show_busphase(phase & BUSMON_PHASE_MASK); + SCpnt->result = DID_BUS_BUSY << 16; + return FALSE; + } + + /* + * message out + * + * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. + * over 3 messages needs another routine. + */ + if (data->msgout_len == 0) { + nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); + SCpnt->result = DID_ERROR << 16; + return FALSE; + } else if (data->msgout_len > 0 && data->msgout_len <= 3) { + msgout = 0; + for (i = 0; i < data->msgout_len; i++) { + /* + * the sending order of the message is: + * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 + * MCNT 2: MSG#1 -> MSG#2 + * MCNT 1: MSG#2 + */ + msgout >>= 8; + msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); + } + msgout |= MV_VALID; /* MV valid */ + msgout |= (unsigned int)data->msgout_len; /* len */ + } else { + /* data->msgout_len > 3 */ + msgout = 0; + } + + // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", + // nsp32_read2(base, SEL_TIME_OUT)); + // nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); + + /* + * setup asic parameter + */ + memset(param, 0, sizeof(nsp32_autoparam)); + + /* cdb */ + for (i = 0; i < SCpnt->cmd_len; i++) { + param->cdb[4 * i] = SCpnt->cmnd[i]; + } + + /* outgoing messages */ + param->msgout = cpu_to_le32(msgout); + + /* syncreg, ackwidth, target id, SREQ sampling rate */ + param->syncreg = data->cur_target->syncreg; + param->ackwidth = data->cur_target->ackwidth; + param->target_id = BIT(host_id) | BIT(target); + param->sample_reg = data->cur_target->sample_reg; + + // nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg); + + /* command control */ + param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER | + AUTOSCSI_START | + AUTO_MSGIN_00_OR_04 | + AUTO_MSGIN_02 | + AUTO_ATN ); + + + /* transfer control */ + s = 0; + switch (data->trans_method) { + case NSP32_TRANSFER_BUSMASTER: + s |= BM_START; + break; + case NSP32_TRANSFER_MMIO: + s |= CB_MMIO_MODE; + break; + case NSP32_TRANSFER_PIO: + s |= CB_IO_MODE; + break; + default: + nsp32_msg(KERN_ERR, "unknown trans_method"); + break; + } + /* + * OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits. + * For bus master transfer, it's taken off. + */ + s |= (TRANSFER_GO | ALL_COUNTER_CLR); + param->transfer_control = cpu_to_le16(s); + + /* sg table addr */ + param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr); + + /* + * transfer parameter to ASIC + */ + nsp32_write4(base, SGT_ADR, data->auto_paddr); + nsp32_write2(base, COMMAND_CONTROL, + CLEAR_CDB_FIFO_POINTER | AUTO_PARAMETER ); + + /* + * Check arbitration + */ + ret = nsp32_arbitration(SCpnt, base); + + return ret; +} + + +/* + * Selection with AUTO SCSI (without AUTO PARAMETER) + */ +static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = SCpnt->device->host->io_port; + unsigned int host_id = SCpnt->device->host->this_id; + unsigned char target = scmd_id(SCpnt); + unsigned char phase; + int status; + unsigned short command = 0; + unsigned int msgout = 0; + int i; + + nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); + + /* + * IRQ disable + */ + nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); + + /* + * check bus line + */ + phase = nsp32_read1(base, SCSI_BUS_MONITOR); + if ((phase & BUSMON_BSY) || (phase & BUSMON_SEL)) { + nsp32_msg(KERN_WARNING, "bus busy"); + SCpnt->result = DID_BUS_BUSY << 16; + status = 1; + goto out; + } + + /* + * clear execph + */ + nsp32_read2(base, SCSI_EXECUTE_PHASE); + + /* + * clear FIFO counter to set CDBs + */ + nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER); + + /* + * set CDB0 - CDB15 + */ + for (i = 0; i < SCpnt->cmd_len; i++) { + nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]); + } + nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]); + + /* + * set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID + */ + nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, + BIT(host_id) | BIT(target)); + + /* + * set SCSI MSGOUT REG + * + * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. + * over 3 messages needs another routine. + */ + if (data->msgout_len == 0) { + nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); + SCpnt->result = DID_ERROR << 16; + status = 1; + goto out; + } else if (data->msgout_len > 0 && data->msgout_len <= 3) { + msgout = 0; + for (i = 0; i < data->msgout_len; i++) { + /* + * the sending order of the message is: + * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 + * MCNT 2: MSG#1 -> MSG#2 + * MCNT 1: MSG#2 + */ + msgout >>= 8; + msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); + } + msgout |= MV_VALID; /* MV valid */ + msgout |= (unsigned int)data->msgout_len; /* len */ + nsp32_write4(base, SCSI_MSG_OUT, msgout); + } else { + /* data->msgout_len > 3 */ + nsp32_write4(base, SCSI_MSG_OUT, 0); + } + + /* + * set selection timeout(= 250ms) + */ + nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); + + /* + * set SREQ hazard killer sampling rate + * + * TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz. + * check other internal clock! + */ + nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); + + /* + * clear Arbit + */ + nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); + + /* + * set SYNCREG + * Don't set BM_START_ADR before setting this register. + */ + nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); + + /* + * set ACKWIDTH + */ + nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); + + nsp32_dbg(NSP32_DEBUG_AUTOSCSI, + "syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x", + nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH), + nsp32_read4(base, SGT_ADR), + nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); + nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x", + data->msgout_len, msgout); + + /* + * set SGT ADDR (physical address) + */ + nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); + + /* + * set TRANSFER CONTROL REG + */ + command = 0; + command |= (TRANSFER_GO | ALL_COUNTER_CLR); + if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { + if (scsi_bufflen(SCpnt) > 0) { + command |= BM_START; + } + } else if (data->trans_method & NSP32_TRANSFER_MMIO) { + command |= CB_MMIO_MODE; + } else if (data->trans_method & NSP32_TRANSFER_PIO) { + command |= CB_IO_MODE; + } + nsp32_write2(base, TRANSFER_CONTROL, command); + + /* + * start AUTO SCSI, kick off arbitration + */ + command = (CLEAR_CDB_FIFO_POINTER | + AUTOSCSI_START | + AUTO_MSGIN_00_OR_04 | + AUTO_MSGIN_02 | + AUTO_ATN); + nsp32_write2(base, COMMAND_CONTROL, command); + + /* + * Check arbitration + */ + status = nsp32_arbitration(SCpnt, base); + + out: + /* + * IRQ enable + */ + nsp32_write2(base, IRQ_CONTROL, 0); + + return status; +} + + +/* + * Arbitration Status Check + * + * Note: Arbitration counter is waited during ARBIT_GO is not lifting. + * Using udelay(1) consumes CPU time and system time, but + * arbitration delay time is defined minimal 2.4us in SCSI + * specification, thus udelay works as coarse grained wait timer. + */ +static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base) +{ + unsigned char arbit; + int status = TRUE; + int time = 0; + + do { + arbit = nsp32_read1(base, ARBIT_STATUS); + time++; + } while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && + (time <= ARBIT_TIMEOUT_TIME)); + + nsp32_dbg(NSP32_DEBUG_AUTOSCSI, + "arbit: 0x%x, delay time: %d", arbit, time); + + if (arbit & ARBIT_WIN) { + /* Arbitration succeeded */ + SCpnt->result = DID_OK << 16; + nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */ + } else if (arbit & ARBIT_FAIL) { + /* Arbitration failed */ + SCpnt->result = DID_BUS_BUSY << 16; + status = FALSE; + } else { + /* + * unknown error or ARBIT_GO timeout, + * something lock up! guess no connection. + */ + nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout"); + SCpnt->result = DID_NO_CONNECT << 16; + status = FALSE; + } + + /* + * clear Arbit + */ + nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); + + return status; +} + + +/* + * reselection + * + * Note: This reselection routine is called from msgin_occur, + * reselection target id&lun must be already set. + * SCSI-2 says IDENTIFY implies RESTORE_POINTER operation. + */ +static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int host_id = SCpnt->device->host->this_id; + unsigned int base = SCpnt->device->host->io_port; + unsigned char tmpid, newid; + + nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter"); + + /* + * calculate reselected SCSI ID + */ + tmpid = nsp32_read1(base, RESELECT_ID); + tmpid &= (~BIT(host_id)); + newid = 0; + while (tmpid) { + if (tmpid & 1) { + break; + } + tmpid >>= 1; + newid++; + } + + /* + * If reselected New ID:LUN is not existed + * or current nexus is not existed, unexpected + * reselection is occurred. Send reject message. + */ + if (newid >= ARRAY_SIZE(data->lunt) || + newlun >= ARRAY_SIZE(data->lunt[0])) { + nsp32_msg(KERN_WARNING, "unknown id/lun"); + return FALSE; + } else if(data->lunt[newid][newlun].SCpnt == NULL) { + nsp32_msg(KERN_WARNING, "no SCSI command is processing"); + return FALSE; + } + + data->cur_id = newid; + data->cur_lun = newlun; + data->cur_target = &(data->target[newid]); + data->cur_lunt = &(data->lunt[newid][newlun]); + + /* reset SACK/SavedACK counter (or ALL clear?) */ + nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); + + return TRUE; +} + + +/* + * nsp32_setup_sg_table - build scatter gather list for transfer data + * with bus master. + * + * Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time. + */ +static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + struct scatterlist *sg; + nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; + int num, i; + u32_le l; + + if (sgt == NULL) { + nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null"); + return FALSE; + } + + num = scsi_dma_map(SCpnt); + if (!num) + return TRUE; + else if (num < 0) + return FALSE; + else { + scsi_for_each_sg(SCpnt, sg, num, i) { + /* + * Build nsp32_sglist, substitute sg dma addresses. + */ + sgt[i].addr = cpu_to_le32(sg_dma_address(sg)); + sgt[i].len = cpu_to_le32(sg_dma_len(sg)); + + if (le32_to_cpu(sgt[i].len) > 0x10000) { + nsp32_msg(KERN_ERR, + "can't transfer over 64KB at a time, " + "size=0x%x", le32_to_cpu(sgt[i].len)); + return FALSE; + } + nsp32_dbg(NSP32_DEBUG_SGLIST, + "num 0x%x : addr 0x%lx len 0x%lx", + i, + le32_to_cpu(sgt[i].addr), + le32_to_cpu(sgt[i].len )); + } + + /* set end mark */ + l = le32_to_cpu(sgt[num-1].len); + sgt[num-1].len = cpu_to_le32(l | SGTEND); + } + + return TRUE; +} + +static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + nsp32_target *target; + nsp32_lunt *cur_lunt; + int ret; + + nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, + "enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x " + "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", + SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], + SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt), + scsi_bufflen(SCpnt)); + + if (data->CurrentSC != NULL) { + nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request"); + data->CurrentSC = NULL; + SCpnt->result = DID_NO_CONNECT << 16; + done(SCpnt); + return 0; + } + + /* check target ID is not same as this initiator ID */ + if (scmd_id(SCpnt) == SCpnt->device->host->this_id) { + nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "target==host???"); + SCpnt->result = DID_BAD_TARGET << 16; + done(SCpnt); + return 0; + } + + /* check target LUN is allowable value */ + if (SCpnt->device->lun >= MAX_LUN) { + nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun"); + SCpnt->result = DID_BAD_TARGET << 16; + done(SCpnt); + return 0; + } + + show_command(SCpnt); + + data->CurrentSC = SCpnt; + nsp32_priv(SCpnt)->status = SAM_STAT_CHECK_CONDITION; + scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); + + /* initialize data */ + data->msgout_len = 0; + data->msgin_len = 0; + cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]); + cur_lunt->SCpnt = SCpnt; + cur_lunt->save_datp = 0; + cur_lunt->msgin03 = FALSE; + data->cur_lunt = cur_lunt; + data->cur_id = SCpnt->device->id; + data->cur_lun = SCpnt->device->lun; + + ret = nsp32_setup_sg_table(SCpnt); + if (ret == FALSE) { + nsp32_msg(KERN_ERR, "SGT fail"); + SCpnt->result = DID_ERROR << 16; + nsp32_scsi_done(SCpnt); + return 0; + } + + /* Build IDENTIFY */ + nsp32_build_identify(SCpnt); + + /* + * If target is the first time to transfer after the reset + * (target don't have SDTR_DONE and SDTR_INITIATOR), sync + * message SDTR is needed to do synchronous transfer. + */ + target = &data->target[scmd_id(SCpnt)]; + data->cur_target = target; + + if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) { + unsigned char period, offset; + + if (trans_mode != ASYNC_MODE) { + nsp32_set_max_sync(data, target, &period, &offset); + nsp32_build_sdtr(SCpnt, period, offset); + target->sync_flag |= SDTR_INITIATOR; + } else { + nsp32_set_async(data, target); + target->sync_flag |= SDTR_DONE; + } + + nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, + "SDTR: entry: %d start_period: 0x%x offset: 0x%x\n", + target->limit_entry, period, offset); + } else if (target->sync_flag & SDTR_INITIATOR) { + /* + * It was negotiating SDTR with target, sending from the + * initiator, but there are no chance to remove this flag. + * Set async because we don't get proper negotiation. + */ + nsp32_set_async(data, target); + target->sync_flag &= ~SDTR_INITIATOR; + target->sync_flag |= SDTR_DONE; + + nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, + "SDTR_INITIATOR: fall back to async"); + } else if (target->sync_flag & SDTR_TARGET) { + /* + * It was negotiating SDTR with target, sending from target, + * but there are no chance to remove this flag. Set async + * because we don't get proper negotiation. + */ + nsp32_set_async(data, target); + target->sync_flag &= ~SDTR_TARGET; + target->sync_flag |= SDTR_DONE; + + nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, + "Unknown SDTR from target is reached, fall back to async."); + } + + nsp32_dbg(NSP32_DEBUG_TARGETFLAG, + "target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x", + SCpnt->device->id, target->sync_flag, target->syncreg, + target->ackwidth); + + /* Selection */ + if (auto_param == 0) { + ret = nsp32_selection_autopara(SCpnt); + } else { + ret = nsp32_selection_autoscsi(SCpnt); + } + + if (ret != TRUE) { + nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail"); + nsp32_scsi_done(SCpnt); + } + + return 0; +} + +static DEF_SCSI_QCMD(nsp32_queuecommand) + +/* initialize asic */ +static int nsp32hw_init(nsp32_hw_data *data) +{ + unsigned int base = data->BaseAddress; + unsigned short irq_stat; + unsigned long lc_reg; + unsigned char power; + + lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE); + if ((lc_reg & 0xff00) == 0) { + lc_reg |= (0x20 << 8); + nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff); + } + + nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); + nsp32_write2(base, TRANSFER_CONTROL, 0); + nsp32_write4(base, BM_CNT, 0); + nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); + + do { + irq_stat = nsp32_read2(base, IRQ_STATUS); + nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat); + } while (irq_stat & IRQSTATUS_ANY_IRQ); + + /* + * Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is + * designated by specification. + */ + if ((data->trans_method & NSP32_TRANSFER_PIO) || + (data->trans_method & NSP32_TRANSFER_MMIO)) { + nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40); + nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40); + } else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { + nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10); + nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60); + } else { + nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode"); + } + + nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x", + nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT), + nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT)); + + nsp32_index_write1(base, CLOCK_DIV, data->clock); + nsp32_index_write1(base, BM_CYCLE, + MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD); + nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */ + + /* + * initialize MISC_WRRD register + * + * Note: Designated parameters is obeyed as following: + * MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set. + * MISC_MASTER_TERMINATION_SELECT: It must be set. + * MISC_BMREQ_NEGATE_TIMING_SEL: It should be set. + * MISC_AUTOSEL_TIMING_SEL: It should be set. + * MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set. + * MISC_DELAYED_BMSTART: It's selected for safety. + * + * Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then + * we have to set TRANSFERCONTROL_BM_START as 0 and set + * appropriate value before restarting bus master transfer. + */ + nsp32_index_write2(base, MISC_WR, + (SCSI_DIRECTION_DETECTOR_SELECT | + DELAYED_BMSTART | + MASTER_TERMINATION_SELECT | + BMREQ_NEGATE_TIMING_SEL | + AUTOSEL_TIMING_SEL | + BMSTOP_CHANGE2_NONDATA_PHASE)); + + nsp32_index_write1(base, TERM_PWR_CONTROL, 0); + power = nsp32_index_read1(base, TERM_PWR_CONTROL); + if (!(power & SENSE)) { + nsp32_msg(KERN_INFO, "term power on"); + nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR); + } + + nsp32_write2(base, TIMER_SET, TIMER_STOP); + nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */ + + nsp32_write1(base, SYNC_REG, 0); + nsp32_write1(base, ACK_WIDTH, 0); + nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); + + /* + * enable to select designated IRQ (except for + * IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR) + */ + nsp32_index_write2(base, IRQ_SELECT, + IRQSELECT_TIMER_IRQ | + IRQSELECT_SCSIRESET_IRQ | + IRQSELECT_FIFO_SHLD_IRQ | + IRQSELECT_RESELECT_IRQ | + IRQSELECT_PHASE_CHANGE_IRQ | + IRQSELECT_AUTO_SCSI_SEQ_IRQ | + // IRQSELECT_BMCNTERR_IRQ | + IRQSELECT_TARGET_ABORT_IRQ | + IRQSELECT_MASTER_ABORT_IRQ ); + nsp32_write2(base, IRQ_CONTROL, 0); + + /* PCI LED off */ + nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF); + nsp32_index_write1(base, EXT_PORT, LED_OFF); + + return TRUE; +} + + +/* interrupt routine */ +static irqreturn_t do_nsp32_isr(int irq, void *dev_id) +{ + nsp32_hw_data *data = dev_id; + unsigned int base = data->BaseAddress; + struct scsi_cmnd *SCpnt = data->CurrentSC; + unsigned short auto_stat, irq_stat, trans_stat; + unsigned char busmon, busphase; + unsigned long flags; + int ret; + int handled = 0; + struct Scsi_Host *host = data->Host; + + spin_lock_irqsave(host->host_lock, flags); + + /* + * IRQ check, then enable IRQ mask + */ + irq_stat = nsp32_read2(base, IRQ_STATUS); + nsp32_dbg(NSP32_DEBUG_INTR, + "enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat); + /* is this interrupt comes from Ninja asic? */ + if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) { + nsp32_dbg(NSP32_DEBUG_INTR, + "shared interrupt: irq other 0x%x", irq_stat); + goto out2; + } + handled = 1; + nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); + + busmon = nsp32_read1(base, SCSI_BUS_MONITOR); + busphase = busmon & BUSMON_PHASE_MASK; + + trans_stat = nsp32_read2(base, TRANSFER_STATUS); + if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) { + nsp32_msg(KERN_INFO, "card disconnect"); + if (data->CurrentSC != NULL) { + nsp32_msg(KERN_INFO, "clean up current SCSI command"); + SCpnt->result = DID_BAD_TARGET << 16; + nsp32_scsi_done(SCpnt); + } + goto out; + } + + /* Timer IRQ */ + if (irq_stat & IRQSTATUS_TIMER_IRQ) { + nsp32_dbg(NSP32_DEBUG_INTR, "timer stop"); + nsp32_write2(base, TIMER_SET, TIMER_STOP); + goto out; + } + + /* SCSI reset */ + if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) { + nsp32_msg(KERN_INFO, "detected someone do bus reset"); + nsp32_do_bus_reset(data); + if (SCpnt != NULL) { + SCpnt->result = DID_RESET << 16; + nsp32_scsi_done(SCpnt); + } + goto out; + } + + if (SCpnt == NULL) { + nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened"); + nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", + irq_stat, trans_stat); + goto out; + } + + /* + * AutoSCSI Interrupt. + * Note: This interrupt is occurred when AutoSCSI is finished. Then + * check SCSIEXECUTEPHASE, and do appropriate action. Each phases are + * recorded when AutoSCSI sequencer has been processed. + */ + if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) { + /* getting SCSI executed phase */ + auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE); + nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); + + /* Selection Timeout, go busfree phase. */ + if (auto_stat & SELECTION_TIMEOUT) { + nsp32_dbg(NSP32_DEBUG_INTR, + "selection timeout occurred"); + + SCpnt->result = DID_TIME_OUT << 16; + nsp32_scsi_done(SCpnt); + goto out; + } + + if (auto_stat & MSGOUT_PHASE) { + /* + * MsgOut phase was processed. + * If MSG_IN_OCCUER is not set, then MsgOut phase is + * completed. Thus, msgout_len must reset. Otherwise, + * nothing to do here. If MSG_OUT_OCCUER is occurred, + * then we will encounter the condition and check. + */ + if (!(auto_stat & MSG_IN_OCCUER) && + (data->msgout_len <= 3)) { + /* + * !MSG_IN_OCCUER && msgout_len <=3 + * ---> AutoSCSI with MSGOUTreg is processed. + */ + data->msgout_len = 0; + } + + nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed"); + } + + if ((auto_stat & DATA_IN_PHASE) && + (scsi_get_resid(SCpnt) > 0) && + ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) { + printk( "auto+fifo\n"); + //nsp32_pio_read(SCpnt); + } + + if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) { + /* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */ + nsp32_dbg(NSP32_DEBUG_INTR, + "Data in/out phase processed"); + + /* read BMCNT, SGT pointer addr */ + nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx", + nsp32_read4(base, BM_CNT)); + nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx", + nsp32_read4(base, SGT_ADR)); + nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx", + nsp32_read4(base, SACK_CNT)); + nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx", + nsp32_read4(base, SAVED_SACK_CNT)); + + scsi_set_resid(SCpnt, 0); /* all data transferred! */ + } + + /* + * MsgIn Occur + */ + if (auto_stat & MSG_IN_OCCUER) { + nsp32_msgin_occur(SCpnt, irq_stat, auto_stat); + } + + /* + * MsgOut Occur + */ + if (auto_stat & MSG_OUT_OCCUER) { + nsp32_msgout_occur(SCpnt); + } + + /* + * Bus Free Occur + */ + if (auto_stat & BUS_FREE_OCCUER) { + ret = nsp32_busfree_occur(SCpnt, auto_stat); + if (ret == TRUE) { + goto out; + } + } + + if (auto_stat & STATUS_PHASE) { + /* + * Read CSB and substitute CSB for SCpnt->result + * to save status phase stutas byte. + * scsi error handler checks host_byte (DID_*: + * low level driver to indicate status), then checks + * status_byte (SCSI status byte). + */ + SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN); + } + + if (auto_stat & ILLEGAL_PHASE) { + /* Illegal phase is detected. SACK is not back. */ + nsp32_msg(KERN_WARNING, + "AUTO SCSI ILLEGAL PHASE OCCUR!!!!"); + + /* TODO: currently we don't have any action... bus reset? */ + + /* + * To send back SACK, assert, wait, and negate. + */ + nsp32_sack_assert(data); + nsp32_wait_req(data, NEGATE); + nsp32_sack_negate(data); + + } + + if (auto_stat & COMMAND_PHASE) { + /* nothing to do */ + nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed"); + } + + if (auto_stat & AUTOSCSI_BUSY) { + /* AutoSCSI is running */ + } + + show_autophase(auto_stat); + } + + /* FIFO_SHLD_IRQ */ + if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) { + nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ"); + + switch(busphase) { + case BUSPHASE_DATA_OUT: + nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write"); + + //nsp32_pio_write(SCpnt); + + break; + + case BUSPHASE_DATA_IN: + nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read"); + + //nsp32_pio_read(SCpnt); + + break; + + case BUSPHASE_STATUS: + nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status"); + + nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN); + + break; + default: + nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase"); + nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", + irq_stat, trans_stat); + show_busphase(busphase); + break; + } + + goto out; + } + + /* Phase Change IRQ */ + if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) { + nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ"); + + switch(busphase) { + case BUSPHASE_MESSAGE_IN: + nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in"); + nsp32_msgin_occur(SCpnt, irq_stat, 0); + break; + default: + nsp32_msg(KERN_WARNING, "phase chg/other phase?"); + nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n", + irq_stat, trans_stat); + show_busphase(busphase); + break; + } + goto out; + } + + /* PCI_IRQ */ + if (irq_stat & IRQSTATUS_PCI_IRQ) { + nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred"); + /* Do nothing */ + } + + /* BMCNTERR_IRQ */ + if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) { + nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! "); + /* + * TODO: To be implemented improving bus master + * transfer reliability when BMCNTERR is occurred in + * AutoSCSI phase described in specification. + */ + } + +#if 0 + nsp32_dbg(NSP32_DEBUG_INTR, + "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); + show_busphase(busphase); +#endif + + out: + /* disable IRQ mask */ + nsp32_write2(base, IRQ_CONTROL, 0); + + out2: + spin_unlock_irqrestore(host->host_lock, flags); + + nsp32_dbg(NSP32_DEBUG_INTR, "exit"); + + return IRQ_RETVAL(handled); +} + + +static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + unsigned long flags; + nsp32_hw_data *data; + int hostno; + unsigned int base; + unsigned char mode_reg; + int id, speed; + long model; + + hostno = host->host_no; + data = (nsp32_hw_data *)host->hostdata; + base = host->io_port; + + seq_puts(m, "NinjaSCSI-32 status\n\n"); + seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", + nsp32_release_version); + seq_printf(m, "SCSI host No.: %d\n", hostno); + seq_printf(m, "IRQ: %d\n", host->irq); + seq_printf(m, "IO: 0x%lx-0x%lx\n", + host->io_port, host->io_port + host->n_io_port - 1); + seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", + host->base, host->base + data->MmioLength - 1); + seq_printf(m, "sg_tablesize: %d\n", + host->sg_tablesize); + seq_printf(m, "Chip revision: 0x%x\n", + (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); + + mode_reg = nsp32_index_read1(base, CHIP_MODE); + model = data->pci_devid->driver_data; + +#ifdef CONFIG_PM + seq_printf(m, "Power Management: %s\n", + (mode_reg & OPTF) ? "yes" : "no"); +#endif + seq_printf(m, "OEM: %ld, %s\n", + (mode_reg & (OEM0|OEM1)), nsp32_model[model]); + + spin_lock_irqsave(&(data->Lock), flags); + seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC); + spin_unlock_irqrestore(&(data->Lock), flags); + + + seq_puts(m, "SDTR status\n"); + for (id = 0; id < ARRAY_SIZE(data->target); id++) { + + seq_printf(m, "id %d: ", id); + + if (id == host->this_id) { + seq_puts(m, "----- NinjaSCSI-32 host adapter\n"); + continue; + } + + if (data->target[id].sync_flag == SDTR_DONE) { + if (data->target[id].period == 0 && + data->target[id].offset == ASYNC_OFFSET ) { + seq_puts(m, "async"); + } else { + seq_puts(m, " sync"); + } + } else { + seq_puts(m, " none"); + } + + if (data->target[id].period != 0) { + + speed = 1000000 / (data->target[id].period * 4); + + seq_printf(m, " transfer %d.%dMB/s, offset %d", + speed / 1000, + speed % 1000, + data->target[id].offset + ); + } + seq_putc(m, '\n'); + } + return 0; +} + + + +/* + * Reset parameters and call scsi_done for data->cur_lunt. + * Be careful setting SCpnt->result = DID_* before calling this function. + */ +static void nsp32_scsi_done(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = SCpnt->device->host->io_port; + + scsi_dma_unmap(SCpnt); + + /* + * clear TRANSFERCONTROL_BM_START + */ + nsp32_write2(base, TRANSFER_CONTROL, 0); + nsp32_write4(base, BM_CNT, 0); + + /* + * call scsi_done + */ + scsi_done(SCpnt); + + /* + * reset parameters + */ + data->cur_lunt->SCpnt = NULL; + data->cur_lunt = NULL; + data->cur_target = NULL; + data->CurrentSC = NULL; +} + + +/* + * Bus Free Occur + * + * Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase + * with ACK reply when below condition is matched: + * MsgIn 00: Command Complete. + * MsgIn 02: Save Data Pointer. + * MsgIn 04: Disconnect. + * In other case, unexpected BUSFREE is detected. + */ +static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = SCpnt->device->host->io_port; + + nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph); + show_autophase(execph); + + nsp32_write4(base, BM_CNT, 0); + nsp32_write2(base, TRANSFER_CONTROL, 0); + + /* + * MsgIn 02: Save Data Pointer + * + * VALID: + * Save Data Pointer is received. Adjust pointer. + * + * NO-VALID: + * SCSI-3 says if Save Data Pointer is not received, then we restart + * processing and we can't adjust any SCSI data pointer in next data + * phase. + */ + if (execph & MSGIN_02_VALID) { + nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid"); + + /* + * Check sack_cnt/saved_sack_cnt, then adjust sg table if + * needed. + */ + if (!(execph & MSGIN_00_VALID) && + ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) { + unsigned int sacklen, s_sacklen; + + /* + * Read SACK count and SAVEDSACK count, then compare. + */ + sacklen = nsp32_read4(base, SACK_CNT ); + s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); + + /* + * If SAVEDSACKCNT == 0, it means SavedDataPointer is + * come after data transferring. + */ + if (s_sacklen > 0) { + /* + * Comparing between sack and savedsack to + * check the condition of AutoMsgIn03. + * + * If they are same, set msgin03 == TRUE, + * COMMANDCONTROL_AUTO_MSGIN_03 is enabled at + * reselection. On the other hand, if they + * aren't same, set msgin03 == FALSE, and + * COMMANDCONTROL_AUTO_MSGIN_03 is disabled at + * reselection. + */ + if (sacklen != s_sacklen) { + data->cur_lunt->msgin03 = FALSE; + } else { + data->cur_lunt->msgin03 = TRUE; + } + + nsp32_adjust_busfree(SCpnt, s_sacklen); + } + } + + /* This value has not substitude with valid value yet... */ + //data->cur_lunt->save_datp = data->cur_datp; + } else { + /* + * no processing. + */ + } + + if (execph & MSGIN_03_VALID) { + /* MsgIn03 was valid to be processed. No need processing. */ + } + + /* + * target SDTR check + */ + if (data->cur_target->sync_flag & SDTR_INITIATOR) { + /* + * SDTR negotiation pulled by the initiator has not + * finished yet. Fall back to ASYNC mode. + */ + nsp32_set_async(data, data->cur_target); + data->cur_target->sync_flag &= ~SDTR_INITIATOR; + data->cur_target->sync_flag |= SDTR_DONE; + } else if (data->cur_target->sync_flag & SDTR_TARGET) { + /* + * SDTR negotiation pulled by the target has been + * negotiating. + */ + if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) { + /* + * If valid message is received, then + * negotiation is succeeded. + */ + } else { + /* + * On the contrary, if unexpected bus free is + * occurred, then negotiation is failed. Fall + * back to ASYNC mode. + */ + nsp32_set_async(data, data->cur_target); + } + data->cur_target->sync_flag &= ~SDTR_TARGET; + data->cur_target->sync_flag |= SDTR_DONE; + } + + /* + * It is always ensured by SCSI standard that initiator + * switches into Bus Free Phase after + * receiving message 00 (Command Complete), 04 (Disconnect). + * It's the reason that processing here is valid. + */ + if (execph & MSGIN_00_VALID) { + /* MsgIn 00: Command Complete */ + nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete"); + + nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN); + nsp32_dbg(NSP32_DEBUG_BUSFREE, + "normal end stat=0x%x resid=0x%x\n", + nsp32_priv(SCpnt)->status, scsi_get_resid(SCpnt)); + SCpnt->result = (DID_OK << 16) | + (nsp32_priv(SCpnt)->status << 0); + nsp32_scsi_done(SCpnt); + /* All operation is done */ + return TRUE; + } else if (execph & MSGIN_04_VALID) { + /* MsgIn 04: Disconnect */ + nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN); + + nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect"); + return TRUE; + } else { + /* Unexpected bus free */ + nsp32_msg(KERN_WARNING, "unexpected bus free occurred"); + + SCpnt->result = DID_ERROR << 16; + nsp32_scsi_done(SCpnt); + return TRUE; + } + return FALSE; +} + + +/* + * nsp32_adjust_busfree - adjusting SG table + * + * Note: This driver adjust the SG table using SCSI ACK + * counter instead of BMCNT counter! + */ +static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + int old_entry = data->cur_entry; + int new_entry; + int sg_num = data->cur_lunt->sg_num; + nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; + unsigned int restlen, sentlen; + u32_le len, addr; + + nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt)); + + /* adjust saved SACK count with 4 byte start address boundary */ + s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3; + + /* + * calculate new_entry from sack count and each sgt[].len + * calculate the byte which is intent to send + */ + sentlen = 0; + for (new_entry = old_entry; new_entry < sg_num; new_entry++) { + sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND); + if (sentlen > s_sacklen) { + break; + } + } + + /* all sgt is processed */ + if (new_entry == sg_num) { + goto last; + } + + if (sentlen == s_sacklen) { + /* XXX: confirm it's ok or not */ + /* In this case, it's ok because we are at + * the head element of the sg. restlen is correctly + * calculated. + */ + } + + /* calculate the rest length for transferring */ + restlen = sentlen - s_sacklen; + + /* update adjusting current SG table entry */ + len = le32_to_cpu(sgt[new_entry].len); + addr = le32_to_cpu(sgt[new_entry].addr); + addr += (len - restlen); + sgt[new_entry].addr = cpu_to_le32(addr); + sgt[new_entry].len = cpu_to_le32(restlen); + + /* set cur_entry with new_entry */ + data->cur_entry = new_entry; + + return; + + last: + if (scsi_get_resid(SCpnt) < sentlen) { + nsp32_msg(KERN_ERR, "resid underflow"); + } + + scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen); + nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt)); + + /* update hostdata and lun */ + + return; +} + + +/* + * It's called MsgOut phase occur. + * NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in + * message out phase. It, however, has more than 3 messages, + * HBA creates the interrupt and we have to process by hand. + */ +static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = SCpnt->device->host->io_port; + int i; + + nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, + "enter: msgout_len: 0x%x", data->msgout_len); + + /* + * If MsgOut phase is occurred without having any + * message, then No_Operation is sent (SCSI-2). + */ + if (data->msgout_len == 0) { + nsp32_build_nop(SCpnt); + } + + /* + * send messages + */ + for (i = 0; i < data->msgout_len; i++) { + nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, + "%d : 0x%x", i, data->msgoutbuf[i]); + + /* + * Check REQ is asserted. + */ + nsp32_wait_req(data, ASSERT); + + if (i == (data->msgout_len - 1)) { + /* + * If the last message, set the AutoSCSI restart + * before send back the ack message. AutoSCSI + * restart automatically negate ATN signal. + */ + //command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); + //nsp32_restart_autoscsi(SCpnt, command); + nsp32_write2(base, COMMAND_CONTROL, + (CLEAR_CDB_FIFO_POINTER | + AUTO_COMMAND_PHASE | + AUTOSCSI_RESTART | + AUTO_MSGIN_00_OR_04 | + AUTO_MSGIN_02 )); + } + /* + * Write data with SACK, then wait sack is + * automatically negated. + */ + nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]); + nsp32_wait_sack(data, NEGATE); + + nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n", + nsp32_read1(base, SCSI_BUS_MONITOR)); + } + + data->msgout_len = 0; + + nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit"); +} + +/* + * Restart AutoSCSI + * + * Note: Restarting AutoSCSI needs set: + * SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL + */ +static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = data->BaseAddress; + unsigned short transfer = 0; + + nsp32_dbg(NSP32_DEBUG_RESTART, "enter"); + + if (data->cur_target == NULL || data->cur_lunt == NULL) { + nsp32_msg(KERN_ERR, "Target or Lun is invalid"); + } + + /* + * set SYNC_REG + * Don't set BM_START_ADR before setting this register. + */ + nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); + + /* + * set ACKWIDTH + */ + nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); + + /* + * set SREQ hazard killer sampling rate + */ + nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); + + /* + * set SGT ADDR (physical address) + */ + nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); + + /* + * set TRANSFER CONTROL REG + */ + transfer = 0; + transfer |= (TRANSFER_GO | ALL_COUNTER_CLR); + if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { + if (scsi_bufflen(SCpnt) > 0) { + transfer |= BM_START; + } + } else if (data->trans_method & NSP32_TRANSFER_MMIO) { + transfer |= CB_MMIO_MODE; + } else if (data->trans_method & NSP32_TRANSFER_PIO) { + transfer |= CB_IO_MODE; + } + nsp32_write2(base, TRANSFER_CONTROL, transfer); + + /* + * restart AutoSCSI + * + * TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ? + */ + command |= (CLEAR_CDB_FIFO_POINTER | + AUTO_COMMAND_PHASE | + AUTOSCSI_RESTART ); + nsp32_write2(base, COMMAND_CONTROL, command); + + nsp32_dbg(NSP32_DEBUG_RESTART, "exit"); +} + + +/* + * cannot run automatically message in occur + */ +static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt, + unsigned long irq_status, + unsigned short execph) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = SCpnt->device->host->io_port; + unsigned char msg; + unsigned char msgtype; + unsigned char newlun; + unsigned short command = 0; + int msgclear = TRUE; + long new_sgtp; + int ret; + + /* + * read first message + * Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure + * of Message-In have to be processed before sending back SCSI ACK. + */ + msg = nsp32_read1(base, SCSI_DATA_IN); + data->msginbuf[(unsigned char)data->msgin_len] = msg; + msgtype = data->msginbuf[0]; + nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, + "enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x", + data->msgin_len, msg, msgtype); + + /* + * TODO: We need checking whether bus phase is message in? + */ + + /* + * assert SCSI ACK + */ + nsp32_sack_assert(data); + + /* + * processing IDENTIFY + */ + if (msgtype & 0x80) { + if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) { + /* Invalid (non reselect) phase */ + goto reject; + } + + newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */ + ret = nsp32_reselection(SCpnt, newlun); + if (ret == TRUE) { + goto restart; + } else { + goto reject; + } + } + + /* + * processing messages except for IDENTIFY + * + * TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO. + */ + switch (msgtype) { + /* + * 1-byte message + */ + case COMMAND_COMPLETE: + case DISCONNECT: + /* + * These messages should not be occurred. + * They should be processed on AutoSCSI sequencer. + */ + nsp32_msg(KERN_WARNING, + "unexpected message of AutoSCSI MsgIn: 0x%x", msg); + break; + + case RESTORE_POINTERS: + /* + * AutoMsgIn03 is disabled, and HBA gets this message. + */ + + if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) { + unsigned int s_sacklen; + + s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); + if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) { + nsp32_adjust_busfree(SCpnt, s_sacklen); + } else { + /* No need to rewrite SGT */ + } + } + data->cur_lunt->msgin03 = FALSE; + + /* Update with the new value */ + + /* reset SACK/SavedACK counter (or ALL clear?) */ + nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); + + /* + * set new sg pointer + */ + new_sgtp = data->cur_lunt->sglun_paddr + + (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable)); + nsp32_write4(base, SGT_ADR, new_sgtp); + + break; + + case SAVE_POINTERS: + /* + * These messages should not be occurred. + * They should be processed on AutoSCSI sequencer. + */ + nsp32_msg (KERN_WARNING, + "unexpected message of AutoSCSI MsgIn: SAVE_POINTERS"); + + break; + + case MESSAGE_REJECT: + /* If previous message_out is sending SDTR, and get + message_reject from target, SDTR negotiation is failed */ + if (data->cur_target->sync_flag & + (SDTR_INITIATOR | SDTR_TARGET)) { + /* + * Current target is negotiating SDTR, but it's + * failed. Fall back to async transfer mode, and set + * SDTR_DONE. + */ + nsp32_set_async(data, data->cur_target); + data->cur_target->sync_flag &= ~SDTR_INITIATOR; + data->cur_target->sync_flag |= SDTR_DONE; + + } + break; + + case LINKED_CMD_COMPLETE: + case LINKED_FLG_CMD_COMPLETE: + /* queue tag is not supported currently */ + nsp32_msg (KERN_WARNING, + "unsupported message: 0x%x", msgtype); + break; + + case INITIATE_RECOVERY: + /* staring ECA (Extended Contingent Allegiance) state. */ + /* This message is declined in SPI2 or later. */ + + goto reject; + + /* + * 2-byte message + */ + case SIMPLE_QUEUE_TAG: + case 0x23: + /* + * 0x23: Ignore_Wide_Residue is not declared in scsi.h. + * No support is needed. + */ + if (data->msgin_len >= 1) { + goto reject; + } + + /* current position is 1-byte of 2 byte */ + msgclear = FALSE; + + break; + + /* + * extended message + */ + case EXTENDED_MESSAGE: + if (data->msgin_len < 1) { + /* + * Current position does not reach 2-byte + * (2-byte is extended message length). + */ + msgclear = FALSE; + break; + } + + if ((data->msginbuf[1] + 1) > data->msgin_len) { + /* + * Current extended message has msginbuf[1] + 2 + * (msgin_len starts counting from 0, so buf[1] + 1). + * If current message position is not finished, + * continue receiving message. + */ + msgclear = FALSE; + break; + } + + /* + * Reach here means regular length of each type of + * extended messages. + */ + switch (data->msginbuf[2]) { + case EXTENDED_MODIFY_DATA_POINTER: + /* TODO */ + goto reject; /* not implemented yet */ + break; + + case EXTENDED_SDTR: + /* + * Exchange this message between initiator and target. + */ + if (data->msgin_len != EXTENDED_SDTR_LEN + 1) { + /* + * received inappropriate message. + */ + goto reject; + break; + } + + nsp32_analyze_sdtr(SCpnt); + + break; + + case EXTENDED_EXTENDED_IDENTIFY: + /* SCSI-I only, not supported. */ + goto reject; /* not implemented yet */ + + break; + + case EXTENDED_WDTR: + goto reject; /* not implemented yet */ + + break; + + default: + goto reject; + } + break; + + default: + goto reject; + } + + restart: + if (msgclear == TRUE) { + data->msgin_len = 0; + + /* + * If restarting AutoSCSI, but there are some message to out + * (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0 + * (MV_VALID = 0). When commandcontrol is written with + * AutoSCSI restart, at the same time MsgOutOccur should be + * happened (however, such situation is really possible...?). + */ + if (data->msgout_len > 0) { + nsp32_write4(base, SCSI_MSG_OUT, 0); + command |= AUTO_ATN; + } + + /* + * restart AutoSCSI + * If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed. + */ + command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); + + /* + * If current msgin03 is TRUE, then flag on. + */ + if (data->cur_lunt->msgin03 == TRUE) { + command |= AUTO_MSGIN_03; + } + data->cur_lunt->msgin03 = FALSE; + } else { + data->msgin_len++; + } + + /* + * restart AutoSCSI + */ + nsp32_restart_autoscsi(SCpnt, command); + + /* + * wait SCSI REQ negate for REQ-ACK handshake + */ + nsp32_wait_req(data, NEGATE); + + /* + * negate SCSI ACK + */ + nsp32_sack_negate(data); + + nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); + + return; + + reject: + nsp32_msg(KERN_WARNING, + "invalid or unsupported MessageIn, rejected. " + "current msg: 0x%x (len: 0x%x), processing msg: 0x%x", + msg, data->msgin_len, msgtype); + nsp32_build_reject(SCpnt); + data->msgin_len = 0; + + goto restart; +} + +/* + * + */ +static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + nsp32_target *target = data->cur_target; + unsigned char get_period = data->msginbuf[3]; + unsigned char get_offset = data->msginbuf[4]; + int entry; + + nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter"); + + /* + * If this inititor sent the SDTR message, then target responds SDTR, + * initiator SYNCREG, ACKWIDTH from SDTR parameter. + * Messages are not appropriate, then send back reject message. + * If initiator did not send the SDTR, but target sends SDTR, + * initiator calculator the appropriate parameter and send back SDTR. + */ + if (target->sync_flag & SDTR_INITIATOR) { + /* + * Initiator sent SDTR, the target responds and + * send back negotiation SDTR. + */ + nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR"); + + target->sync_flag &= ~SDTR_INITIATOR; + target->sync_flag |= SDTR_DONE; + + /* + * offset: + */ + if (get_offset > SYNC_OFFSET) { + /* + * Negotiation is failed, the target send back + * unexpected offset value. + */ + goto reject; + } + + if (get_offset == ASYNC_OFFSET) { + /* + * Negotiation is succeeded, the target want + * to fall back into asynchronous transfer mode. + */ + goto async; + } + + /* + * period: + * Check whether sync period is too short. If too short, + * fall back to async mode. If it's ok, then investigate + * the received sync period. If sync period is acceptable + * between sync table start_period and end_period, then + * set this I_T nexus as sent offset and period. + * If it's not acceptable, send back reject and fall back + * to async mode. + */ + if (get_period < data->synct[0].period_num) { + /* + * Negotiation is failed, the target send back + * unexpected period value. + */ + goto reject; + } + + entry = nsp32_search_period_entry(data, target, get_period); + + if (entry < 0) { + /* + * Target want to use long period which is not + * acceptable NinjaSCSI-32Bi/UDE. + */ + goto reject; + } + + /* + * Set new sync table and offset in this I_T nexus. + */ + nsp32_set_sync_entry(data, target, entry, get_offset); + } else { + /* Target send SDTR to initiator. */ + nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR"); + + target->sync_flag |= SDTR_INITIATOR; + + /* offset: */ + if (get_offset > SYNC_OFFSET) { + /* send back as SYNC_OFFSET */ + get_offset = SYNC_OFFSET; + } + + /* period: */ + if (get_period < data->synct[0].period_num) { + get_period = data->synct[0].period_num; + } + + entry = nsp32_search_period_entry(data, target, get_period); + + if (get_offset == ASYNC_OFFSET || entry < 0) { + nsp32_set_async(data, target); + nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET); + } else { + nsp32_set_sync_entry(data, target, entry, get_offset); + nsp32_build_sdtr(SCpnt, get_period, get_offset); + } + } + + target->period = get_period; + nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); + return; + + reject: + /* + * If the current message is unacceptable, send back to the target + * with reject message. + */ + nsp32_build_reject(SCpnt); + + async: + nsp32_set_async(data, target); /* set as ASYNC transfer mode */ + + target->period = 0; + nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async"); + return; +} + + +/* + * Search config entry number matched in sync_table from given + * target and speed period value. If failed to search, return negative value. + */ +static int nsp32_search_period_entry(nsp32_hw_data *data, + nsp32_target *target, + unsigned char period) +{ + int i; + + if (target->limit_entry >= data->syncnum) { + nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!"); + target->limit_entry = 0; + } + + for (i = target->limit_entry; i < data->syncnum; i++) { + if (period >= data->synct[i].start_period && + period <= data->synct[i].end_period) { + break; + } + } + + /* + * Check given period value is over the sync_table value. + * If so, return max value. + */ + if (i == data->syncnum) { + i = -1; + } + + return i; +} + + +/* + * target <-> initiator use ASYNC transfer + */ +static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target) +{ + unsigned char period = data->synct[target->limit_entry].period_num; + + target->offset = ASYNC_OFFSET; + target->period = 0; + target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET); + target->ackwidth = 0; + target->sample_reg = 0; + + nsp32_dbg(NSP32_DEBUG_SYNC, "set async"); +} + + +/* + * target <-> initiator use maximum SYNC transfer + */ +static void nsp32_set_max_sync(nsp32_hw_data *data, + nsp32_target *target, + unsigned char *period, + unsigned char *offset) +{ + unsigned char period_num, ackwidth; + + period_num = data->synct[target->limit_entry].period_num; + *period = data->synct[target->limit_entry].start_period; + ackwidth = data->synct[target->limit_entry].ackwidth; + *offset = SYNC_OFFSET; + + target->syncreg = TO_SYNCREG(period_num, *offset); + target->ackwidth = ackwidth; + target->offset = *offset; + target->sample_reg = 0; /* disable SREQ sampling */ +} + + +/* + * target <-> initiator use entry number speed + */ +static void nsp32_set_sync_entry(nsp32_hw_data *data, + nsp32_target *target, + int entry, + unsigned char offset) +{ + unsigned char period, ackwidth, sample_rate; + + period = data->synct[entry].period_num; + ackwidth = data->synct[entry].ackwidth; + sample_rate = data->synct[entry].sample_rate; + + target->syncreg = TO_SYNCREG(period, offset); + target->ackwidth = ackwidth; + target->offset = offset; + target->sample_reg = sample_rate | SAMPLING_ENABLE; + + nsp32_dbg(NSP32_DEBUG_SYNC, "set sync"); +} + + +/* + * It waits until SCSI REQ becomes assertion or negation state. + * + * Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then + * connected target responds SCSI REQ negation. We have to wait + * SCSI REQ becomes negation in order to negate SCSI ACK signal for + * REQ-ACK handshake. + */ +static void nsp32_wait_req(nsp32_hw_data *data, int state) +{ + unsigned int base = data->BaseAddress; + int wait_time = 0; + unsigned char bus, req_bit; + + if (!((state == ASSERT) || (state == NEGATE))) { + nsp32_msg(KERN_ERR, "unknown state designation"); + } + /* REQ is BIT(5) */ + req_bit = (state == ASSERT ? BUSMON_REQ : 0); + + do { + bus = nsp32_read1(base, SCSI_BUS_MONITOR); + if ((bus & BUSMON_REQ) == req_bit) { + nsp32_dbg(NSP32_DEBUG_WAIT, + "wait_time: %d", wait_time); + return; + } + udelay(1); + wait_time++; + } while (wait_time < REQSACK_TIMEOUT_TIME); + + nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit); +} + +/* + * It waits until SCSI SACK becomes assertion or negation state. + */ +static void nsp32_wait_sack(nsp32_hw_data *data, int state) +{ + unsigned int base = data->BaseAddress; + int wait_time = 0; + unsigned char bus, ack_bit; + + if (!((state == ASSERT) || (state == NEGATE))) { + nsp32_msg(KERN_ERR, "unknown state designation"); + } + /* ACK is BIT(4) */ + ack_bit = (state == ASSERT ? BUSMON_ACK : 0); + + do { + bus = nsp32_read1(base, SCSI_BUS_MONITOR); + if ((bus & BUSMON_ACK) == ack_bit) { + nsp32_dbg(NSP32_DEBUG_WAIT, + "wait_time: %d", wait_time); + return; + } + udelay(1); + wait_time++; + } while (wait_time < REQSACK_TIMEOUT_TIME); + + nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit); +} + +/* + * assert SCSI ACK + * + * Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1. + */ +static void nsp32_sack_assert(nsp32_hw_data *data) +{ + unsigned int base = data->BaseAddress; + unsigned char busctrl; + + busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); + busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB); + nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); +} + +/* + * negate SCSI ACK + */ +static void nsp32_sack_negate(nsp32_hw_data *data) +{ + unsigned int base = data->BaseAddress; + unsigned char busctrl; + + busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); + busctrl &= ~BUSCTL_ACK; + nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); +} + + + +/* + * Note: n_io_port is defined as 0x7f because I/O register port is + * assigned as: + * 0x800-0x8ff: memory mapped I/O port + * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly) + * 0xc00-0xfff: CardBus status registers + */ +static int nsp32_detect(struct pci_dev *pdev) +{ + struct Scsi_Host *host; /* registered host structure */ + struct resource *res; + nsp32_hw_data *data; + int ret; + int i, j; + + nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); + + /* + * register this HBA as SCSI device + */ + host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data)); + if (host == NULL) { + nsp32_msg (KERN_ERR, "failed to scsi register"); + goto err; + } + + /* + * set nsp32_hw_data + */ + data = (nsp32_hw_data *)host->hostdata; + + memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data)); + + host->irq = data->IrqNumber; + host->io_port = data->BaseAddress; + host->unique_id = data->BaseAddress; + host->n_io_port = data->NumAddress; + host->base = (unsigned long)data->MmioAddress; + + data->Host = host; + spin_lock_init(&(data->Lock)); + + data->cur_lunt = NULL; + data->cur_target = NULL; + + /* + * Bus master transfer mode is supported currently. + */ + data->trans_method = NSP32_TRANSFER_BUSMASTER; + + /* + * Set clock div, CLOCK_4 (HBA has own external clock, and + * dividing * 100ns/4). + * Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet. + */ + data->clock = CLOCK_4; + + /* + * Select appropriate nsp32_sync_table and set I_CLOCKDIV. + */ + switch (data->clock) { + case CLOCK_4: + /* If data->clock is CLOCK_4, then select 40M sync table. */ + data->synct = nsp32_sync_table_40M; + data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); + break; + case CLOCK_2: + /* If data->clock is CLOCK_2, then select 20M sync table. */ + data->synct = nsp32_sync_table_20M; + data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M); + break; + case PCICLK: + /* If data->clock is PCICLK, then select pci sync table. */ + data->synct = nsp32_sync_table_pci; + data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci); + break; + default: + nsp32_msg(KERN_WARNING, + "Invalid clock div is selected, set CLOCK_4."); + /* Use default value CLOCK_4 */ + data->clock = CLOCK_4; + data->synct = nsp32_sync_table_40M; + data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); + } + + /* + * setup nsp32_lunt + */ + + /* + * setup DMA + */ + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) { + nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); + goto scsi_unregister; + } + + /* + * allocate autoparam DMA resource. + */ + data->autoparam = dma_alloc_coherent(&pdev->dev, + sizeof(nsp32_autoparam), &(data->auto_paddr), + GFP_KERNEL); + if (data->autoparam == NULL) { + nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); + goto scsi_unregister; + } + + /* + * allocate scatter-gather DMA resource. + */ + data->sg_list = dma_alloc_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE, + &data->sg_paddr, GFP_KERNEL); + if (data->sg_list == NULL) { + nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); + goto free_autoparam; + } + + for (i = 0; i < ARRAY_SIZE(data->lunt); i++) { + for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) { + int offset = i * ARRAY_SIZE(data->lunt[0]) + j; + nsp32_lunt tmp = { + .SCpnt = NULL, + .save_datp = 0, + .msgin03 = FALSE, + .sg_num = 0, + .cur_entry = 0, + .sglun = &(data->sg_list[offset]), + .sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)), + }; + + data->lunt[i][j] = tmp; + } + } + + /* + * setup target + */ + for (i = 0; i < ARRAY_SIZE(data->target); i++) { + nsp32_target *target = &(data->target[i]); + + target->limit_entry = 0; + target->sync_flag = 0; + nsp32_set_async(data, target); + } + + /* + * EEPROM check + */ + ret = nsp32_getprom_param(data); + if (ret == FALSE) { + data->resettime = 3; /* default 3 */ + } + + /* + * setup HBA + */ + nsp32hw_init(data); + + snprintf(data->info_str, sizeof(data->info_str), + "NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x", + host->irq, host->io_port, host->n_io_port); + + /* + * SCSI bus reset + * + * Note: It's important to reset SCSI bus in initialization phase. + * NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when + * system is coming up, so SCSI devices connected to HBA is set as + * un-asynchronous mode. It brings the merit that this HBA is + * ready to start synchronous transfer without any preparation, + * but we are difficult to control transfer speed. In addition, + * it prevents device transfer speed from effecting EEPROM start-up + * SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as + * Auto Mode, then FAST-10M is selected when SCSI devices are + * connected same or more than 4 devices. It should be avoided + * depending on this specification. Thus, resetting the SCSI bus + * restores all connected SCSI devices to asynchronous mode, then + * this driver set SDTR safely later, and we can control all SCSI + * device transfer mode. + */ + nsp32_do_bus_reset(data); + + ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data); + if (ret < 0) { + nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 " + "SCSI PCI controller. Interrupt: %d", host->irq); + goto free_sg_list; + } + + /* + * PCI IO register + */ + res = request_region(host->io_port, host->n_io_port, "nsp32"); + if (res == NULL) { + nsp32_msg(KERN_ERR, + "I/O region 0x%x+0x%x is already used", + data->BaseAddress, data->NumAddress); + goto free_irq; + } + + ret = scsi_add_host(host, &pdev->dev); + if (ret) { + nsp32_msg(KERN_ERR, "failed to add scsi host"); + goto free_region; + } + scsi_scan_host(host); + pci_set_drvdata(pdev, host); + return 0; + + free_region: + release_region(host->io_port, host->n_io_port); + + free_irq: + free_irq(host->irq, data); + + free_sg_list: + dma_free_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE, + data->sg_list, data->sg_paddr); + + free_autoparam: + dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam), + data->autoparam, data->auto_paddr); + + scsi_unregister: + scsi_host_put(host); + + err: + return 1; +} + +static int nsp32_release(struct Scsi_Host *host) +{ + nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; + + if (data->autoparam) { + dma_free_coherent(&data->Pci->dev, sizeof(nsp32_autoparam), + data->autoparam, data->auto_paddr); + } + + if (data->sg_list) { + dma_free_coherent(&data->Pci->dev, NSP32_SG_TABLE_SIZE, + data->sg_list, data->sg_paddr); + } + + if (host->irq) { + free_irq(host->irq, data); + } + + if (host->io_port && host->n_io_port) { + release_region(host->io_port, host->n_io_port); + } + + if (data->MmioAddress) { + iounmap(data->MmioAddress); + } + + return 0; +} + +static const char *nsp32_info(struct Scsi_Host *shpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata; + + return data->info_str; +} + + +/**************************************************************************** + * error handler + */ +static int nsp32_eh_abort(struct scsi_cmnd *SCpnt) +{ + nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; + unsigned int base = SCpnt->device->host->io_port; + + nsp32_msg(KERN_WARNING, "abort"); + + if (data->cur_lunt->SCpnt == NULL) { + nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed"); + return FAILED; + } + + if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) { + /* reset SDTR negotiation */ + data->cur_target->sync_flag = 0; + nsp32_set_async(data, data->cur_target); + } + + nsp32_write2(base, TRANSFER_CONTROL, 0); + nsp32_write2(base, BM_CNT, 0); + + SCpnt->result = DID_ABORT << 16; + nsp32_scsi_done(SCpnt); + + nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success"); + return SUCCESS; +} + +static void nsp32_do_bus_reset(nsp32_hw_data *data) +{ + unsigned int base = data->BaseAddress; + int i; + unsigned short __maybe_unused intrdat; + + nsp32_dbg(NSP32_DEBUG_BUSRESET, "in"); + + /* + * stop all transfer + * clear TRANSFERCONTROL_BM_START + * clear counter + */ + nsp32_write2(base, TRANSFER_CONTROL, 0); + nsp32_write4(base, BM_CNT, 0); + nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); + + /* + * fall back to asynchronous transfer mode + * initialize SDTR negotiation flag + */ + for (i = 0; i < ARRAY_SIZE(data->target); i++) { + nsp32_target *target = &data->target[i]; + + target->sync_flag = 0; + nsp32_set_async(data, target); + } + + /* + * reset SCSI bus + */ + nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST); + mdelay(RESET_HOLD_TIME / 1000); + nsp32_write1(base, SCSI_BUS_CONTROL, 0); + for(i = 0; i < 5; i++) { + intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */ + nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat); + } + + data->CurrentSC = NULL; +} + +static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt) +{ + struct Scsi_Host *host = SCpnt->device->host; + unsigned int base = SCpnt->device->host->io_port; + nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; + + nsp32_msg(KERN_INFO, "Host Reset"); + nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); + + spin_lock_irq(SCpnt->device->host->host_lock); + + nsp32hw_init(data); + nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); + nsp32_do_bus_reset(data); + nsp32_write2(base, IRQ_CONTROL, 0); + + spin_unlock_irq(SCpnt->device->host->host_lock); + return SUCCESS; /* Host reset is succeeded at any time. */ +} + + +/************************************************************************** + * EEPROM handler + */ + +/* + * getting EEPROM parameter + */ +static int nsp32_getprom_param(nsp32_hw_data *data) +{ + int vendor = data->pci_devid->vendor; + int device = data->pci_devid->device; + int ret, i; + int __maybe_unused val; + + /* + * EEPROM checking. + */ + ret = nsp32_prom_read(data, 0x7e); + if (ret != 0x55) { + nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret); + return FALSE; + } + ret = nsp32_prom_read(data, 0x7f); + if (ret != 0xaa) { + nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret); + return FALSE; + } + + /* + * check EEPROM type + */ + if (vendor == PCI_VENDOR_ID_WORKBIT && + device == PCI_DEVICE_ID_WORKBIT_STANDARD) { + ret = nsp32_getprom_c16(data); + } else if (vendor == PCI_VENDOR_ID_WORKBIT && + device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) { + ret = nsp32_getprom_at24(data); + } else if (vendor == PCI_VENDOR_ID_WORKBIT && + device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) { + ret = nsp32_getprom_at24(data); + } else { + nsp32_msg(KERN_WARNING, "Unknown EEPROM"); + ret = FALSE; + } + + /* for debug : SPROM data full checking */ + for (i = 0; i <= 0x1f; i++) { + val = nsp32_prom_read(data, i); + nsp32_dbg(NSP32_DEBUG_EEPROM, + "rom address 0x%x : 0x%x", i, val); + } + + return ret; +} + + +/* + * AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map: + * + * ROMADDR + * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) + * Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M + * 0x07 : HBA Synchronous Transfer Period + * Value 0: AutoSync, 1: Manual Setting + * 0x08 - 0x0f : Not Used? (0x0) + * 0x10 : Bus Termination + * Value 0: Auto[ON], 1: ON, 2: OFF + * 0x11 : Not Used? (0) + * 0x12 : Bus Reset Delay Time (0x03) + * 0x13 : Bootable CD Support + * Value 0: Disable, 1: Enable + * 0x14 : Device Scan + * Bit 7 6 5 4 3 2 1 0 + * | <-----------------> + * | SCSI ID: Value 0: Skip, 1: YES + * |-> Value 0: ALL scan, Value 1: Manual + * 0x15 - 0x1b : Not Used? (0) + * 0x1c : Constant? (0x01) (clock div?) + * 0x1d - 0x7c : Not Used (0xff) + * 0x7d : Not Used? (0xff) + * 0x7e : Constant (0x55), Validity signature + * 0x7f : Constant (0xaa), Validity signature + */ +static int nsp32_getprom_at24(nsp32_hw_data *data) +{ + int ret, i; + int auto_sync; + nsp32_target *target; + int entry; + + /* + * Reset time which is designated by EEPROM. + * + * TODO: Not used yet. + */ + data->resettime = nsp32_prom_read(data, 0x12); + + /* + * HBA Synchronous Transfer Period + * + * Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says + * that if auto_sync is 0 (auto), and connected SCSI devices are + * same or lower than 3, then transfer speed is set as ULTRA-20M. + * On the contrary if connected SCSI devices are same or higher + * than 4, then transfer speed is set as FAST-10M. + * + * I break this rule. The number of connected SCSI devices are + * only ignored. If auto_sync is 0 (auto), then transfer speed is + * forced as ULTRA-20M. + */ + ret = nsp32_prom_read(data, 0x07); + switch (ret) { + case 0: + auto_sync = TRUE; + break; + case 1: + auto_sync = FALSE; + break; + default: + nsp32_msg(KERN_WARNING, + "Unsupported Auto Sync mode. Fall back to manual mode."); + auto_sync = TRUE; + } + + if (trans_mode == ULTRA20M_MODE) { + auto_sync = TRUE; + } + + /* + * each device Synchronous Transfer Period + */ + for (i = 0; i < NSP32_HOST_SCSIID; i++) { + target = &data->target[i]; + if (auto_sync == TRUE) { + target->limit_entry = 0; /* set as ULTRA20M */ + } else { + ret = nsp32_prom_read(data, i); + entry = nsp32_search_period_entry(data, target, ret); + if (entry < 0) { + /* search failed... set maximum speed */ + entry = 0; + } + target->limit_entry = entry; + } + } + + return TRUE; +} + + +/* + * C16 110 (I-O Data: SC-NBD) data map: + * + * ROMADDR + * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) + * Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC + * 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync) + * 0x08 - 0x0f : Not Used? (0x0) + * 0x10 : Transfer Mode + * Value 0: PIO, 1: Busmater + * 0x11 : Bus Reset Delay Time (0x00-0x20) + * 0x12 : Bus Termination + * Value 0: Disable, 1: Enable + * 0x13 - 0x19 : Disconnection + * Value 0: Disable, 1: Enable + * 0x1a - 0x7c : Not Used? (0) + * 0x7d : Not Used? (0xf8) + * 0x7e : Constant (0x55), Validity signature + * 0x7f : Constant (0xaa), Validity signature + */ +static int nsp32_getprom_c16(nsp32_hw_data *data) +{ + int ret, i; + nsp32_target *target; + int entry, val; + + /* + * Reset time which is designated by EEPROM. + * + * TODO: Not used yet. + */ + data->resettime = nsp32_prom_read(data, 0x11); + + /* + * each device Synchronous Transfer Period + */ + for (i = 0; i < NSP32_HOST_SCSIID; i++) { + target = &data->target[i]; + ret = nsp32_prom_read(data, i); + switch (ret) { + case 0: /* 20MB/s */ + val = 0x0c; + break; + case 1: /* 10MB/s */ + val = 0x19; + break; + case 2: /* 5MB/s */ + val = 0x32; + break; + case 3: /* ASYNC */ + val = 0x00; + break; + default: /* default 20MB/s */ + val = 0x0c; + break; + } + entry = nsp32_search_period_entry(data, target, val); + if (entry < 0 || trans_mode == ULTRA20M_MODE) { + /* search failed... set maximum speed */ + entry = 0; + } + target->limit_entry = entry; + } + + return TRUE; +} + + +/* + * Atmel AT24C01A (drived in 5V) serial EEPROM routines + */ +static int nsp32_prom_read(nsp32_hw_data *data, int romaddr) +{ + int i, val; + + /* start condition */ + nsp32_prom_start(data); + + /* device address */ + nsp32_prom_write_bit(data, 1); /* 1 */ + nsp32_prom_write_bit(data, 0); /* 0 */ + nsp32_prom_write_bit(data, 1); /* 1 */ + nsp32_prom_write_bit(data, 0); /* 0 */ + nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ + nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ + nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ + + /* R/W: W for dummy write */ + nsp32_prom_write_bit(data, 0); + + /* ack */ + nsp32_prom_write_bit(data, 0); + + /* word address */ + for (i = 7; i >= 0; i--) { + nsp32_prom_write_bit(data, ((romaddr >> i) & 1)); + } + + /* ack */ + nsp32_prom_write_bit(data, 0); + + /* start condition */ + nsp32_prom_start(data); + + /* device address */ + nsp32_prom_write_bit(data, 1); /* 1 */ + nsp32_prom_write_bit(data, 0); /* 0 */ + nsp32_prom_write_bit(data, 1); /* 1 */ + nsp32_prom_write_bit(data, 0); /* 0 */ + nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ + nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ + nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ + + /* R/W: R */ + nsp32_prom_write_bit(data, 1); + + /* ack */ + nsp32_prom_write_bit(data, 0); + + /* data... */ + val = 0; + for (i = 7; i >= 0; i--) { + val += (nsp32_prom_read_bit(data) << i); + } + + /* no ack */ + nsp32_prom_write_bit(data, 1); + + /* stop condition */ + nsp32_prom_stop(data); + + return val; +} + +static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val) +{ + int base = data->BaseAddress; + int tmp; + + tmp = nsp32_index_read1(base, SERIAL_ROM_CTL); + + if (val == 0) { + tmp &= ~bit; + } else { + tmp |= bit; + } + + nsp32_index_write1(base, SERIAL_ROM_CTL, tmp); + + udelay(10); +} + +static int nsp32_prom_get(nsp32_hw_data *data, int bit) +{ + int base = data->BaseAddress; + int tmp, ret; + + if (bit != SDA) { + nsp32_msg(KERN_ERR, "return value is not appropriate"); + return 0; + } + + + tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit; + + if (tmp == 0) { + ret = 0; + } else { + ret = 1; + } + + udelay(10); + + return ret; +} + +static void nsp32_prom_start (nsp32_hw_data *data) +{ + /* start condition */ + nsp32_prom_set(data, SCL, 1); + nsp32_prom_set(data, SDA, 1); + nsp32_prom_set(data, ENA, 1); /* output mode */ + nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting + * SDA 1->0 is start condition */ + nsp32_prom_set(data, SCL, 0); +} + +static void nsp32_prom_stop (nsp32_hw_data *data) +{ + /* stop condition */ + nsp32_prom_set(data, SCL, 1); + nsp32_prom_set(data, SDA, 0); + nsp32_prom_set(data, ENA, 1); /* output mode */ + nsp32_prom_set(data, SDA, 1); + nsp32_prom_set(data, SCL, 0); +} + +static void nsp32_prom_write_bit(nsp32_hw_data *data, int val) +{ + /* write */ + nsp32_prom_set(data, SDA, val); + nsp32_prom_set(data, SCL, 1 ); + nsp32_prom_set(data, SCL, 0 ); +} + +static int nsp32_prom_read_bit(nsp32_hw_data *data) +{ + int val; + + /* read */ + nsp32_prom_set(data, ENA, 0); /* input mode */ + nsp32_prom_set(data, SCL, 1); + + val = nsp32_prom_get(data, SDA); + + nsp32_prom_set(data, SCL, 0); + nsp32_prom_set(data, ENA, 1); /* output mode */ + + return val; +} + + +/************************************************************************** + * Power Management + */ +#ifdef CONFIG_PM + +/* Device suspended */ +static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + + nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state.event=%x, slot=%s, host=0x%p", + pdev, state.event, pci_name(pdev), host); + + pci_save_state (pdev); + pci_disable_device (pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +/* Device woken up */ +static int nsp32_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; + unsigned short reg; + + nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", + pdev, pci_name(pdev), host); + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake (pdev, PCI_D0, 0); + pci_restore_state (pdev); + + reg = nsp32_read2(data->BaseAddress, INDEX_REG); + + nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg); + + if (reg == 0xffff) { + nsp32_msg(KERN_INFO, "missing device. abort resume."); + return 0; + } + + nsp32hw_init (data); + nsp32_do_bus_reset(data); + + nsp32_msg(KERN_INFO, "resume success"); + + return 0; +} + +#endif + +/************************************************************************ + * PCI/Cardbus probe/remove routine + */ +static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + nsp32_hw_data *data = &nsp32_data_base; + + nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); + + ret = pci_enable_device(pdev); + if (ret) { + nsp32_msg(KERN_ERR, "failed to enable pci device"); + return ret; + } + + data->Pci = pdev; + data->pci_devid = id; + data->IrqNumber = pdev->irq; + data->BaseAddress = pci_resource_start(pdev, 0); + data->NumAddress = pci_resource_len (pdev, 0); + data->MmioAddress = pci_ioremap_bar(pdev, 1); + data->MmioLength = pci_resource_len (pdev, 1); + + pci_set_master(pdev); + + ret = nsp32_detect(pdev); + + nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s", + pdev->irq, + data->MmioAddress, data->MmioLength, + pci_name(pdev), + nsp32_model[id->driver_data]); + + nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret); + + return ret; +} + +static void nsp32_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + + nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); + + scsi_remove_host(host); + + nsp32_release(host); + + scsi_host_put(host); +} + +static struct pci_driver nsp32_driver = { + .name = "nsp32", + .id_table = nsp32_pci_table, + .probe = nsp32_probe, + .remove = nsp32_remove, +#ifdef CONFIG_PM + .suspend = nsp32_suspend, + .resume = nsp32_resume, +#endif +}; + +/********************************************************************* + * Moule entry point + */ +static int __init init_nsp32(void) { + nsp32_msg(KERN_INFO, "loading..."); + return pci_register_driver(&nsp32_driver); +} + +static void __exit exit_nsp32(void) { + nsp32_msg(KERN_INFO, "unloading..."); + pci_unregister_driver(&nsp32_driver); +} + +module_init(init_nsp32); +module_exit(exit_nsp32); + +/* end */ diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h new file mode 100644 index 000000000..924889f8b --- /dev/null +++ b/drivers/scsi/nsp32.h @@ -0,0 +1,617 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver + * Basic data header +*/ + +#ifndef _NSP32_H +#define _NSP32_H + +//#define NSP32_DEBUG 9 + +/* + * VENDOR/DEVICE ID + */ +#define PCI_VENDOR_ID_IODATA 0x10fc +#define PCI_VENDOR_ID_WORKBIT 0x1145 + +#define PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II 0x0005 +#define PCI_DEVICE_ID_NINJASCSI_32BI_KME 0xf007 +#define PCI_DEVICE_ID_NINJASCSI_32BI_WBT 0x8007 +#define PCI_DEVICE_ID_WORKBIT_STANDARD 0xf010 +#define PCI_DEVICE_ID_WORKBIT_DUALEDGE 0xf011 +#define PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC 0xf012 +#define PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC 0xf013 +#define PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO 0xf015 +#define PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II 0x8009 + +/* + * MODEL + */ +enum { + MODEL_IODATA = 0, + MODEL_KME = 1, + MODEL_WORKBIT = 2, + MODEL_LOGITEC = 3, + MODEL_PCI_WORKBIT = 4, + MODEL_PCI_LOGITEC = 5, + MODEL_PCI_MELCO = 6, +}; + +static char * nsp32_model[] = { + "I-O DATA CBSC-II CardBus card", + "KME SCSI CardBus card", + "Workbit duo SCSI CardBus card", + "Logitec CardBus card with external ROM", + "Workbit / I-O DATA PCI card", + "Logitec PCI card with external ROM", + "Melco CardBus/PCI card with external ROM", +}; + + +/* + * SCSI Generic Definitions + */ +#define EXTENDED_SDTR_LEN 0x03 + +/* Little Endian */ +typedef u32 u32_le; +typedef u16 u16_le; + +/* + * BASIC Definitions + */ +#ifndef TRUE +# define TRUE 1 +#endif +#ifndef FALSE +# define FALSE 0 +#endif +#define ASSERT 1 +#define NEGATE 0 + + +/*******************/ +/* normal register */ +/*******************/ +/* + * Don't access below register with Double Word: + * +00, +04, +08, +0c, +64, +80, +84, +88, +90, +c4, +c8, +cc, +d0. + */ +#define IRQ_CONTROL 0x00 /* BASE+00, W, W */ +#define IRQ_STATUS 0x00 /* BASE+00, W, R */ +# define IRQSTATUS_LATCHED_MSG BIT(0) +# define IRQSTATUS_LATCHED_IO BIT(1) +# define IRQSTATUS_LATCHED_CD BIT(2) +# define IRQSTATUS_LATCHED_BUS_FREE BIT(3) +# define IRQSTATUS_RESELECT_OCCUER BIT(4) +# define IRQSTATUS_PHASE_CHANGE_IRQ BIT(5) +# define IRQSTATUS_SCSIRESET_IRQ BIT(6) +# define IRQSTATUS_TIMER_IRQ BIT(7) +# define IRQSTATUS_FIFO_SHLD_IRQ BIT(8) +# define IRQSTATUS_PCI_IRQ BIT(9) +# define IRQSTATUS_BMCNTERR_IRQ BIT(10) +# define IRQSTATUS_AUTOSCSI_IRQ BIT(11) +# define PCI_IRQ_MASK BIT(12) +# define TIMER_IRQ_MASK BIT(13) +# define FIFO_IRQ_MASK BIT(14) +# define SCSI_IRQ_MASK BIT(15) +# define IRQ_CONTROL_ALL_IRQ_MASK (PCI_IRQ_MASK | \ + TIMER_IRQ_MASK | \ + FIFO_IRQ_MASK | \ + SCSI_IRQ_MASK ) +# define IRQSTATUS_ANY_IRQ (IRQSTATUS_RESELECT_OCCUER | \ + IRQSTATUS_PHASE_CHANGE_IRQ | \ + IRQSTATUS_SCSIRESET_IRQ | \ + IRQSTATUS_TIMER_IRQ | \ + IRQSTATUS_FIFO_SHLD_IRQ | \ + IRQSTATUS_PCI_IRQ | \ + IRQSTATUS_BMCNTERR_IRQ | \ + IRQSTATUS_AUTOSCSI_IRQ ) + +#define TRANSFER_CONTROL 0x02 /* BASE+02, W, W */ +#define TRANSFER_STATUS 0x02 /* BASE+02, W, R */ +# define CB_MMIO_MODE BIT(0) +# define CB_IO_MODE BIT(1) +# define BM_TEST BIT(2) +# define BM_TEST_DIR BIT(3) +# define DUAL_EDGE_ENABLE BIT(4) +# define NO_TRANSFER_TO_HOST BIT(5) +# define TRANSFER_GO BIT(7) +# define BLIEND_MODE BIT(8) +# define BM_START BIT(9) +# define ADVANCED_BM_WRITE BIT(10) +# define BM_SINGLE_MODE BIT(11) +# define FIFO_TRUE_FULL BIT(12) +# define FIFO_TRUE_EMPTY BIT(13) +# define ALL_COUNTER_CLR BIT(14) +# define FIFOTEST BIT(15) + +#define INDEX_REG 0x04 /* BASE+04, Byte(R/W), Word(R) */ + +#define TIMER_SET 0x06 /* BASE+06, W, R/W */ +# define TIMER_CNT_MASK (0xff) +# define TIMER_STOP BIT(8) + +#define DATA_REG_LOW 0x08 /* BASE+08, LowW, R/W */ +#define DATA_REG_HI 0x0a /* BASE+0a, Hi-W, R/W */ + +#define FIFO_REST_CNT 0x0c /* BASE+0c, W, R/W */ +# define FIFO_REST_MASK 0x1ff +# define FIFO_EMPTY_SHLD_FLAG BIT(14) +# define FIFO_FULL_SHLD_FLAG BIT(15) + +#define SREQ_SMPL_RATE 0x0f /* BASE+0f, B, R/W */ +# define SREQSMPLRATE_RATE0 BIT(0) +# define SREQSMPLRATE_RATE1 BIT(1) +# define SAMPLING_ENABLE BIT(2) +# define SMPL_40M (0) /* 40MHz: 0-100ns/period */ +# define SMPL_20M (SREQSMPLRATE_RATE0) /* 20MHz: 100-200ns/period */ +# define SMPL_10M (SREQSMPLRATE_RATE1) /* 10Mhz: 200- ns/period */ + +#define SCSI_BUS_CONTROL 0x10 /* BASE+10, B, R/W */ +# define BUSCTL_SEL BIT(0) +# define BUSCTL_RST BIT(1) +# define BUSCTL_DATAOUT_ENB BIT(2) +# define BUSCTL_ATN BIT(3) +# define BUSCTL_ACK BIT(4) +# define BUSCTL_BSY BIT(5) +# define AUTODIRECTION BIT(6) +# define ACKENB BIT(7) + +#define CLR_COUNTER 0x12 /* BASE+12, B, W */ +# define ACK_COUNTER_CLR BIT(0) +# define SREQ_COUNTER_CLR BIT(1) +# define FIFO_HOST_POINTER_CLR BIT(2) +# define FIFO_REST_COUNT_CLR BIT(3) +# define BM_COUNTER_CLR BIT(4) +# define SAVED_ACK_CLR BIT(5) +# define CLRCOUNTER_ALLMASK (ACK_COUNTER_CLR | \ + SREQ_COUNTER_CLR | \ + FIFO_HOST_POINTER_CLR | \ + FIFO_REST_COUNT_CLR | \ + BM_COUNTER_CLR | \ + SAVED_ACK_CLR ) + +#define SCSI_BUS_MONITOR 0x12 /* BASE+12, B, R */ +# define BUSMON_MSG BIT(0) +# define BUSMON_IO BIT(1) +# define BUSMON_CD BIT(2) +# define BUSMON_BSY BIT(3) +# define BUSMON_ACK BIT(4) +# define BUSMON_REQ BIT(5) +# define BUSMON_SEL BIT(6) +# define BUSMON_ATN BIT(7) + +#define COMMAND_DATA 0x14 /* BASE+14, B, R/W */ + +#define PARITY_CONTROL 0x16 /* BASE+16, B, W */ +# define PARITY_CHECK_ENABLE BIT(0) +# define PARITY_ERROR_CLEAR BIT(1) +#define PARITY_STATUS 0x16 /* BASE+16, B, R */ +//# define PARITY_CHECK_ENABLE BIT(0) +# define PARITY_ERROR_NORMAL BIT(1) +# define PARITY_ERROR_LSB BIT(1) +# define PARITY_ERROR_MSB BIT(2) + +#define RESELECT_ID 0x18 /* BASE+18, B, R */ + +#define COMMAND_CONTROL 0x18 /* BASE+18, W, W */ +# define CLEAR_CDB_FIFO_POINTER BIT(0) +# define AUTO_COMMAND_PHASE BIT(1) +# define AUTOSCSI_START BIT(2) +# define AUTOSCSI_RESTART BIT(3) +# define AUTO_PARAMETER BIT(4) +# define AUTO_ATN BIT(5) +# define AUTO_MSGIN_00_OR_04 BIT(6) +# define AUTO_MSGIN_02 BIT(7) +# define AUTO_MSGIN_03 BIT(8) + +#define SET_ARBIT 0x1a /* BASE+1a, B, W */ +# define ARBIT_GO BIT(0) +# define ARBIT_CLEAR BIT(1) + +#define ARBIT_STATUS 0x1a /* BASE+1a, B, R */ +//# define ARBIT_GO BIT(0) +# define ARBIT_WIN BIT(1) +# define ARBIT_FAIL BIT(2) +# define AUTO_PARAMETER_VALID BIT(3) +# define SGT_VALID BIT(4) + +#define SYNC_REG 0x1c /* BASE+1c, B, R/W */ + +#define ACK_WIDTH 0x1d /* BASE+1d, B, R/W */ + +#define SCSI_DATA_WITH_ACK 0x20 /* BASE+20, B, R/W */ +#define SCSI_OUT_LATCH_TARGET_ID 0x22 /* BASE+22, B, W */ +#define SCSI_DATA_IN 0x22 /* BASE+22, B, R */ + +#define SCAM_CONTROL 0x24 /* BASE+24, B, W */ +#define SCAM_STATUS 0x24 /* BASE+24, B, R */ +# define SCAM_MSG BIT(0) +# define SCAM_IO BIT(1) +# define SCAM_CD BIT(2) +# define SCAM_BSY BIT(3) +# define SCAM_SEL BIT(4) +# define SCAM_XFEROK BIT(5) + +#define SCAM_DATA 0x26 /* BASE+26, B, R/W */ +# define SD0 BIT(0) +# define SD1 BIT(1) +# define SD2 BIT(2) +# define SD3 BIT(3) +# define SD4 BIT(4) +# define SD5 BIT(5) +# define SD6 BIT(6) +# define SD7 BIT(7) + +#define SACK_CNT 0x28 /* BASE+28, DW, R/W */ +#define SREQ_CNT 0x2c /* BASE+2c, DW, R/W */ + +#define FIFO_DATA_LOW 0x30 /* BASE+30, B/W/DW, R/W */ +#define FIFO_DATA_HIGH 0x32 /* BASE+32, B/W, R/W */ +#define BM_START_ADR 0x34 /* BASE+34, DW, R/W */ + +#define BM_CNT 0x38 /* BASE+38, DW, R/W */ +# define BM_COUNT_MASK 0x0001ffffUL +# define SGTEND BIT(31) /* Last SGT marker */ + +#define SGT_ADR 0x3c /* BASE+3c, DW, R/W */ +#define WAIT_REG 0x40 /* Bi only */ + +#define SCSI_EXECUTE_PHASE 0x40 /* BASE+40, W, R */ +# define COMMAND_PHASE BIT(0) +# define DATA_IN_PHASE BIT(1) +# define DATA_OUT_PHASE BIT(2) +# define MSGOUT_PHASE BIT(3) +# define STATUS_PHASE BIT(4) +# define ILLEGAL_PHASE BIT(5) +# define BUS_FREE_OCCUER BIT(6) +# define MSG_IN_OCCUER BIT(7) +# define MSG_OUT_OCCUER BIT(8) +# define SELECTION_TIMEOUT BIT(9) +# define MSGIN_00_VALID BIT(10) +# define MSGIN_02_VALID BIT(11) +# define MSGIN_03_VALID BIT(12) +# define MSGIN_04_VALID BIT(13) +# define AUTOSCSI_BUSY BIT(15) + +#define SCSI_CSB_IN 0x42 /* BASE+42, B, R */ + +#define SCSI_MSG_OUT 0x44 /* BASE+44, DW, R/W */ +# define MSGOUT_COUNT_MASK (BIT(0)|BIT(1)) +# define MV_VALID BIT(7) + +#define SEL_TIME_OUT 0x48 /* BASE+48, W, R/W */ +#define SAVED_SACK_CNT 0x4c /* BASE+4c, DW, R */ + +#define HTOSDATADELAY 0x50 /* BASE+50, B, R/W */ +#define STOHDATADELAY 0x54 /* BASE+54, B, R/W */ +#define ACKSUMCHECKRD 0x58 /* BASE+58, W, R */ +#define REQSUMCHECKRD 0x5c /* BASE+5c, W, R */ + + +/********************/ +/* indexed register */ +/********************/ + +#define CLOCK_DIV 0x00 /* BASE+08, IDX+00, B, R/W */ +# define CLOCK_2 BIT(0) /* MCLK/2 */ +# define CLOCK_4 BIT(1) /* MCLK/4 */ +# define PCICLK BIT(7) /* PCICLK (33MHz) */ + +#define TERM_PWR_CONTROL 0x01 /* BASE+08, IDX+01, B, R/W */ +# define BPWR BIT(0) +# define SENSE BIT(1) /* Read Only */ + +#define EXT_PORT_DDR 0x02 /* BASE+08, IDX+02, B, R/W */ +#define EXT_PORT 0x03 /* BASE+08, IDX+03, B, R/W */ +# define LED_ON (0) +# define LED_OFF BIT(0) + +#define IRQ_SELECT 0x04 /* BASE+08, IDX+04, W, R/W */ +# define IRQSELECT_RESELECT_IRQ BIT(0) +# define IRQSELECT_PHASE_CHANGE_IRQ BIT(1) +# define IRQSELECT_SCSIRESET_IRQ BIT(2) +# define IRQSELECT_TIMER_IRQ BIT(3) +# define IRQSELECT_FIFO_SHLD_IRQ BIT(4) +# define IRQSELECT_TARGET_ABORT_IRQ BIT(5) +# define IRQSELECT_MASTER_ABORT_IRQ BIT(6) +# define IRQSELECT_SERR_IRQ BIT(7) +# define IRQSELECT_PERR_IRQ BIT(8) +# define IRQSELECT_BMCNTERR_IRQ BIT(9) +# define IRQSELECT_AUTO_SCSI_SEQ_IRQ BIT(10) + +#define OLD_SCSI_PHASE 0x05 /* BASE+08, IDX+05, B, R */ +# define OLD_MSG BIT(0) +# define OLD_IO BIT(1) +# define OLD_CD BIT(2) +# define OLD_BUSY BIT(3) + +#define FIFO_FULL_SHLD_COUNT 0x06 /* BASE+08, IDX+06, B, R/W */ +#define FIFO_EMPTY_SHLD_COUNT 0x07 /* BASE+08, IDX+07, B, R/W */ + +#define EXP_ROM_CONTROL 0x08 /* BASE+08, IDX+08, B, R/W */ /* external ROM control */ +# define ROM_WRITE_ENB BIT(0) +# define IO_ACCESS_ENB BIT(1) +# define ROM_ADR_CLEAR BIT(2) + +#define EXP_ROM_ADR 0x09 /* BASE+08, IDX+09, W, R/W */ + +#define EXP_ROM_DATA 0x0a /* BASE+08, IDX+0a, B, R/W */ + +#define CHIP_MODE 0x0b /* BASE+08, IDX+0b, B, R */ /* NinjaSCSI-32Bi only */ +# define OEM0 BIT(1) /* OEM select */ /* 00=I-O DATA, 01=KME, 10=Workbit, 11=Ext ROM */ +# define OEM1 BIT(2) /* OEM select */ +# define OPTB BIT(3) /* KME mode select */ +# define OPTC BIT(4) /* KME mode select */ +# define OPTD BIT(5) /* KME mode select */ +# define OPTE BIT(6) /* KME mode select */ +# define OPTF BIT(7) /* Power management */ + +#define MISC_WR 0x0c /* BASE+08, IDX+0c, W, R/W */ +#define MISC_RD 0x0c +# define SCSI_DIRECTION_DETECTOR_SELECT BIT(0) +# define SCSI2_HOST_DIRECTION_VALID BIT(1) /* Read only */ +# define HOST2_SCSI_DIRECTION_VALID BIT(2) /* Read only */ +# define DELAYED_BMSTART BIT(3) +# define MASTER_TERMINATION_SELECT BIT(4) +# define BMREQ_NEGATE_TIMING_SEL BIT(5) +# define AUTOSEL_TIMING_SEL BIT(6) +# define MISC_MABORT_MASK BIT(7) +# define BMSTOP_CHANGE2_NONDATA_PHASE BIT(8) + +#define BM_CYCLE 0x0d /* BASE+08, IDX+0d, B, R/W */ +# define BM_CYCLE0 BIT(0) +# define BM_CYCLE1 BIT(1) +# define BM_FRAME_ASSERT_TIMING BIT(2) +# define BM_IRDY_ASSERT_TIMING BIT(3) +# define BM_SINGLE_BUS_MASTER BIT(4) +# define MEMRD_CMD0 BIT(5) +# define SGT_AUTO_PARA_MEMED_CMD BIT(6) +# define MEMRD_CMD1 BIT(7) + + +#define SREQ_EDGH 0x0e /* BASE+08, IDX+0e, B, W */ +# define SREQ_EDGH_SELECT BIT(0) + +#define UP_CNT 0x0f /* BASE+08, IDX+0f, B, W */ +# define REQCNT_UP BIT(0) +# define ACKCNT_UP BIT(1) +# define BMADR_UP BIT(4) +# define BMCNT_UP BIT(5) +# define SGT_CNT_UP BIT(7) + +#define CFG_CMD_STR 0x10 /* BASE+08, IDX+10, W, R */ +#define CFG_LATE_CACHE 0x11 /* BASE+08, IDX+11, W, R/W */ +#define CFG_BASE_ADR_1 0x12 /* BASE+08, IDX+12, W, R */ +#define CFG_BASE_ADR_2 0x13 /* BASE+08, IDX+13, W, R */ +#define CFG_INLINE 0x14 /* BASE+08, IDX+14, W, R */ + +#define SERIAL_ROM_CTL 0x15 /* BASE+08, IDX+15, B, R */ +# define SCL BIT(0) +# define ENA BIT(1) +# define SDA BIT(2) + +#define FIFO_HST_POINTER 0x16 /* BASE+08, IDX+16, B, R/W */ +#define SREQ_DELAY 0x17 /* BASE+08, IDX+17, B, R/W */ +#define SACK_DELAY 0x18 /* BASE+08, IDX+18, B, R/W */ +#define SREQ_NOISE_CANCEL 0x19 /* BASE+08, IDX+19, B, R/W */ +#define SDP_NOISE_CANCEL 0x1a /* BASE+08, IDX+1a, B, R/W */ +#define DELAY_TEST 0x1b /* BASE+08, IDX+1b, B, R/W */ +#define SD0_NOISE_CANCEL 0x20 /* BASE+08, IDX+20, B, R/W */ +#define SD1_NOISE_CANCEL 0x21 /* BASE+08, IDX+21, B, R/W */ +#define SD2_NOISE_CANCEL 0x22 /* BASE+08, IDX+22, B, R/W */ +#define SD3_NOISE_CANCEL 0x23 /* BASE+08, IDX+23, B, R/W */ +#define SD4_NOISE_CANCEL 0x24 /* BASE+08, IDX+24, B, R/W */ +#define SD5_NOISE_CANCEL 0x25 /* BASE+08, IDX+25, B, R/W */ +#define SD6_NOISE_CANCEL 0x26 /* BASE+08, IDX+26, B, R/W */ +#define SD7_NOISE_CANCEL 0x27 /* BASE+08, IDX+27, B, R/W */ + + +/* + * Useful Bus Monitor status combinations. + */ +#define BUSMON_BUS_FREE 0 +#define BUSMON_COMMAND ( BUSMON_BSY | BUSMON_CD | BUSMON_REQ ) +#define BUSMON_MESSAGE_IN ( BUSMON_BSY | BUSMON_MSG | BUSMON_IO | BUSMON_CD | BUSMON_REQ ) +#define BUSMON_MESSAGE_OUT ( BUSMON_BSY | BUSMON_MSG | BUSMON_CD | BUSMON_REQ ) +#define BUSMON_DATA_IN ( BUSMON_BSY | BUSMON_IO | BUSMON_REQ ) +#define BUSMON_DATA_OUT ( BUSMON_BSY | BUSMON_REQ ) +#define BUSMON_STATUS ( BUSMON_BSY | BUSMON_IO | BUSMON_CD | BUSMON_REQ ) +#define BUSMON_RESELECT ( BUSMON_IO | BUSMON_SEL) +#define BUSMON_PHASE_MASK ( BUSMON_MSG | BUSMON_IO | BUSMON_CD | BUSMON_SEL) + +#define BUSPHASE_COMMAND ( BUSMON_COMMAND & BUSMON_PHASE_MASK ) +#define BUSPHASE_MESSAGE_IN ( BUSMON_MESSAGE_IN & BUSMON_PHASE_MASK ) +#define BUSPHASE_MESSAGE_OUT ( BUSMON_MESSAGE_OUT & BUSMON_PHASE_MASK ) +#define BUSPHASE_DATA_IN ( BUSMON_DATA_IN & BUSMON_PHASE_MASK ) +#define BUSPHASE_DATA_OUT ( BUSMON_DATA_OUT & BUSMON_PHASE_MASK ) +#define BUSPHASE_STATUS ( BUSMON_STATUS & BUSMON_PHASE_MASK ) +#define BUSPHASE_SELECT ( BUSMON_SEL | BUSMON_IO ) + + +/************************************************************************ + * structure for DMA/Scatter Gather list + */ +#define NSP32_SG_SIZE SG_ALL + +typedef struct _nsp32_sgtable { + /* values must be little endian */ + u32_le addr; /* transfer address */ + u32_le len; /* transfer length. BIT(31) is for SGT_END mark */ +} __attribute__ ((packed)) nsp32_sgtable; + +typedef struct _nsp32_sglun { + nsp32_sgtable sgt[NSP32_SG_SIZE+1]; /* SG table */ +} __attribute__ ((packed)) nsp32_sglun; +#define NSP32_SG_TABLE_SIZE (sizeof(nsp32_sgtable) * NSP32_SG_SIZE * MAX_TARGET * MAX_LUN) + +/* Auto parameter mode memory map. */ +/* All values must be little endian. */ +typedef struct _nsp32_autoparam { + u8 cdb[4 * 0x10]; /* SCSI Command */ + u32_le msgout; /* outgoing messages */ + u8 syncreg; /* sync register value */ + u8 ackwidth; /* ack width register value */ + u8 target_id; /* target/host device id */ + u8 sample_reg; /* hazard killer sampling rate */ + u16_le command_control; /* command control register */ + u16_le transfer_control; /* transfer control register */ + u32_le sgt_pointer; /* SG table physical address for DMA */ + u32_le dummy[2]; +} __attribute__ ((packed)) nsp32_autoparam; /* must be packed struct */ + +/* + * host data structure + */ +/* message in/out buffer */ +#define MSGOUTBUF_MAX 20 +#define MSGINBUF_MAX 20 + +/* flag for trans_method */ +#define NSP32_TRANSFER_BUSMASTER BIT(0) +#define NSP32_TRANSFER_MMIO BIT(1) /* Not supported yet */ +#define NSP32_TRANSFER_PIO BIT(2) /* Not supported yet */ + + +/* + * structure for connected LUN dynamic data + * + * Note: Currently tagged queuing is disabled, each nsp32_lunt holds + * one SCSI command and one state. + */ +#define DISCPRIV_OK BIT(0) /* DISCPRIV Enable mode */ +#define MSGIN03 BIT(1) /* Auto Msg In 03 Flag */ + +typedef struct _nsp32_lunt { + struct scsi_cmnd *SCpnt; /* Current Handling struct scsi_cmnd */ + unsigned long save_datp; /* Save Data Pointer - saved position from initial address */ + int msgin03; /* auto msg in 03 flag */ + unsigned int sg_num; /* Total number of SG entries */ + int cur_entry; /* Current SG entry number */ + nsp32_sglun *sglun; /* sg table per lun */ + dma_addr_t sglun_paddr; /* sglun physical address */ +} nsp32_lunt; + + +/* + * SCSI TARGET/LUN definition + */ +#define NSP32_HOST_SCSIID 7 /* SCSI initiator is every time defined as 7 */ +#define MAX_TARGET 8 +#define MAX_LUN 8 /* XXX: In SPI3, max number of LUN is 64. */ + + +typedef struct _nsp32_sync_table { + unsigned char period_num; /* period number */ + unsigned char ackwidth; /* ack width designated by period */ + unsigned char start_period; /* search range - start period */ + unsigned char end_period; /* search range - end period */ + unsigned char sample_rate; /* hazard killer parameter */ +} nsp32_sync_table; + + +/* + * structure for target device static data + */ +/* flag for nsp32_target.sync_flag */ +#define SDTR_INITIATOR BIT(0) /* sending SDTR from initiator */ +#define SDTR_TARGET BIT(1) /* sending SDTR from target */ +#define SDTR_DONE BIT(2) /* exchanging SDTR has been processed */ + +/* syncronous period value for nsp32_target.config_max */ +#define FAST5M 0x32 +#define FAST10M 0x19 +#define ULTRA20M 0x0c + +/* flag for nsp32_target.{sync_offset}, period */ +#define ASYNC_OFFSET 0 /* asynchronous transfer */ +#define SYNC_OFFSET 0xf /* synchronous transfer max offset */ + +/* syncreg: + bit:07 06 05 04 03 02 01 00 + ---PERIOD-- ---OFFSET-- */ +#define TO_SYNCREG(period, offset) (((period) & 0x0f) << 4 | ((offset) & 0x0f)) + +struct nsp32_cmd_priv { + enum sam_status status; +}; + +static inline struct nsp32_cmd_priv *nsp32_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +typedef struct _nsp32_target { + unsigned char syncreg; /* value for SYNCREG */ + unsigned char ackwidth; /* value for ACKWIDTH */ + unsigned char period; /* sync period (0-255) */ + unsigned char offset; /* sync offset (0-15) */ + int sync_flag; /* SDTR_*, 0 */ + int limit_entry; /* max speed limit entry designated + by EEPROM configuration */ + unsigned char sample_reg; /* SREQ hazard killer register */ +} nsp32_target; + +typedef struct _nsp32_hw_data { + int IrqNumber; + int BaseAddress; + int NumAddress; + void __iomem *MmioAddress; +#define NSP32_MMIO_OFFSET 0x0800 + unsigned long MmioLength; + + struct scsi_cmnd *CurrentSC; + + struct pci_dev *Pci; + const struct pci_device_id *pci_devid; + struct Scsi_Host *Host; + spinlock_t Lock; + + char info_str[100]; + + /* allocated memory region */ + nsp32_sglun *sg_list; /* sglist virtuxal address */ + dma_addr_t sg_paddr; /* physical address of hw_sg_table */ + nsp32_autoparam *autoparam; /* auto parameter transfer region */ + dma_addr_t auto_paddr; /* physical address of autoparam */ + int cur_entry; /* current sgt entry */ + + /* target/LUN */ + nsp32_lunt *cur_lunt; /* Current connected LUN table */ + nsp32_lunt lunt[MAX_TARGET][MAX_LUN]; /* All LUN table */ + + nsp32_target *cur_target; /* Current connected SCSI ID */ + nsp32_target target[MAX_TARGET]; /* SCSI ID */ + int cur_id; /* Current connected target ID */ + int cur_lun; /* Current connected target LUN */ + + /* behavior setting parameters */ + int trans_method; /* transfer method flag */ + int resettime; /* Reset time */ + int clock; /* clock dividing flag */ + nsp32_sync_table *synct; /* sync_table determined by clock */ + int syncnum; /* the max number of synct element */ + + /* message buffer */ + unsigned char msgoutbuf[MSGOUTBUF_MAX]; /* msgout buffer */ + char msgout_len; /* msgoutbuf length */ + unsigned char msginbuf [MSGINBUF_MAX]; /* megin buffer */ + char msgin_len; /* msginbuf length */ + +} nsp32_hw_data; + +/* + * TIME definition + */ +#define RESET_HOLD_TIME 10000 /* reset time in us (SCSI-2 says the + minimum is 25us) */ +#define SEL_TIMEOUT_TIME 10000 /* 250ms defined in SCSI specification + (25.6us/1unit) */ +#define ARBIT_TIMEOUT_TIME 100 /* 100us */ +#define REQSACK_TIMEOUT_TIME 10000 /* max wait time for REQ/SACK assertion + or negation, 10000us == 10ms */ + +#endif /* _NSP32_H */ +/* end */ diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c new file mode 100644 index 000000000..4f1d4bf9c --- /dev/null +++ b/drivers/scsi/nsp32_debug.c @@ -0,0 +1,263 @@ +/* + * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver + * Debug routine + * + * This software may be used and distributed according to the terms of + * the GNU General Public License. + */ + +/* + * Show the command data of a command + */ +static const char unknown[] = "UNKNOWN"; + +static const char * group_0_commands[] = { +/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense", +/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reassign Blocks", +/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown, +/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry", +/* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve", +/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit", +/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic", +/* 1e-1f */ "Prevent/Allow Medium Removal", unknown, +}; + + +static const char *group_1_commands[] = { +/* 20-22 */ unknown, unknown, unknown, +/* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)", +/* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown, +/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal", +/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position", +/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data", +/* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer", +/* 3d-3f */ "Update Block", "Read Long", "Write Long", +}; + + +static const char *group_2_commands[] = { +/* 40-41 */ "Change Definition", "Write Same", +/* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)", +/* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown, +/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)", +/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown, +/* 5c-5f */ unknown, unknown, unknown, +}; + +#define group(opcode) (((opcode) >> 5) & 7) + +#define RESERVED_GROUP 0 +#define VENDOR_GROUP 1 +#define NOTEXT_GROUP 2 + +static const char **commands[] = { + group_0_commands, group_1_commands, group_2_commands, + (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP, + (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP, + (const char **) VENDOR_GROUP +}; + +static const char reserved[] = "RESERVED"; +static const char vendor[] = "VENDOR SPECIFIC"; + +static void print_opcodek(unsigned char opcode) +{ + const char **table = commands[ group(opcode) ]; + + switch ((unsigned long) table) { + case RESERVED_GROUP: + printk("%s[%02x] ", reserved, opcode); + break; + case NOTEXT_GROUP: + printk("%s(notext)[%02x] ", unknown, opcode); + break; + case VENDOR_GROUP: + printk("%s[%02x] ", vendor, opcode); + break; + default: + if (table[opcode & 0x1f] != unknown) + printk("%s[%02x] ", table[opcode & 0x1f], opcode); + else + printk("%s[%02x] ", unknown, opcode); + break; + } +} + +static void print_commandk (unsigned char *command) +{ + int i,s; +// printk(KERN_DEBUG); + print_opcodek(command[0]); + /*printk(KERN_DEBUG "%s ", __func__);*/ + if ((command[0] >> 5) == 6 || + (command[0] >> 5) == 7 ) { + s = 12; /* vender specific */ + } else { + s = COMMAND_SIZE(command[0]); + } + + for ( i = 1; i < s; ++i) { + printk("%02x ", command[i]); + } + + switch (s) { + case 6: + printk("LBA=%d len=%d", + (((unsigned int)command[1] & 0x0f) << 16) | + ( (unsigned int)command[2] << 8) | + ( (unsigned int)command[3] ), + (unsigned int)command[4] + ); + break; + case 10: + printk("LBA=%d len=%d", + ((unsigned int)command[2] << 24) | + ((unsigned int)command[3] << 16) | + ((unsigned int)command[4] << 8) | + ((unsigned int)command[5] ), + ((unsigned int)command[7] << 8) | + ((unsigned int)command[8] ) + ); + break; + case 12: + printk("LBA=%d len=%d", + ((unsigned int)command[2] << 24) | + ((unsigned int)command[3] << 16) | + ((unsigned int)command[4] << 8) | + ((unsigned int)command[5] ), + ((unsigned int)command[6] << 24) | + ((unsigned int)command[7] << 16) | + ((unsigned int)command[8] << 8) | + ((unsigned int)command[9] ) + ); + break; + default: + break; + } + printk("\n"); +} + +static void show_command(struct scsi_cmnd *SCpnt) +{ + print_commandk(SCpnt->cmnd); +} + +static void show_busphase(unsigned char stat) +{ + switch(stat) { + case BUSPHASE_COMMAND: + printk( "BUSPHASE_COMMAND\n"); + break; + case BUSPHASE_MESSAGE_IN: + printk( "BUSPHASE_MESSAGE_IN\n"); + break; + case BUSPHASE_MESSAGE_OUT: + printk( "BUSPHASE_MESSAGE_OUT\n"); + break; + case BUSPHASE_DATA_IN: + printk( "BUSPHASE_DATA_IN\n"); + break; + case BUSPHASE_DATA_OUT: + printk( "BUSPHASE_DATA_OUT\n"); + break; + case BUSPHASE_STATUS: + printk( "BUSPHASE_STATUS\n"); + break; + case BUSPHASE_SELECT: + printk( "BUSPHASE_SELECT\n"); + break; + default: + printk( "BUSPHASE_other: 0x%x\n", stat); + break; + } +} + +static void show_autophase(unsigned short i) +{ + printk("auto: 0x%x,", i); + + if(i & COMMAND_PHASE) { + printk(" cmd"); + } + if(i & DATA_IN_PHASE) { + printk(" din"); + } + if(i & DATA_OUT_PHASE) { + printk(" dout"); + } + if(i & MSGOUT_PHASE) { + printk(" mout"); + } + if(i & STATUS_PHASE) { + printk(" stat"); + } + if(i & ILLEGAL_PHASE) { + printk(" ill"); + } + if(i & BUS_FREE_OCCUER) { + printk(" bfree-o"); + } + if(i & MSG_IN_OCCUER) { + printk(" min-o"); + } + if(i & MSG_OUT_OCCUER) { + printk(" mout-o"); + } + if(i & SELECTION_TIMEOUT) { + printk(" sel"); + } + if(i & MSGIN_00_VALID) { + printk(" m0"); + } + if(i & MSGIN_02_VALID) { + printk(" m2"); + } + if(i & MSGIN_03_VALID) { + printk(" m3"); + } + if(i & MSGIN_04_VALID) { + printk(" m4"); + } + if(i & AUTOSCSI_BUSY) { + printk(" busy"); + } + + printk("\n"); +} + +static void nsp32_print_register(int base) +{ + if (!(NSP32_DEBUG_MASK & NSP32_SPECIAL_PRINT_REGISTER)) + return; + + printk("Phase=0x%x, ", nsp32_read1(base, SCSI_BUS_MONITOR)); + printk("OldPhase=0x%x, ", nsp32_index_read1(base, OLD_SCSI_PHASE)); + printk("syncreg=0x%x, ", nsp32_read1(base, SYNC_REG)); + printk("ackwidth=0x%x, ", nsp32_read1(base, ACK_WIDTH)); + printk("sgtpaddr=0x%lx, ", nsp32_read4(base, SGT_ADR)); + printk("scsioutlatch=0x%x, ", nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); + printk("msgout=0x%lx, ", nsp32_read4(base, SCSI_MSG_OUT)); + printk("miscrd=0x%x, ", nsp32_index_read2(base, MISC_WR)); + printk("seltimeout=0x%x, ", nsp32_read2(base, SEL_TIME_OUT)); + printk("sreqrate=0x%x, ", nsp32_read1(base, SREQ_SMPL_RATE)); + printk("transStatus=0x%x, ", nsp32_read2(base, TRANSFER_STATUS)); + printk("reselectid=0x%x, ", nsp32_read2(base, COMMAND_CONTROL)); + printk("arbit=0x%x, ", nsp32_read1(base, ARBIT_STATUS)); + printk("BmStart=0x%lx, ", nsp32_read4(base, BM_START_ADR)); + printk("BmCount=0x%lx, ", nsp32_read4(base, BM_CNT)); + printk("SackCnt=0x%lx, ", nsp32_read4(base, SACK_CNT)); + printk("SReqCnt=0x%lx, ", nsp32_read4(base, SREQ_CNT)); + printk("SavedSackCnt=0x%lx, ", nsp32_read4(base, SAVED_SACK_CNT)); + printk("ScsiBusControl=0x%x, ", nsp32_read1(base, SCSI_BUS_CONTROL)); + printk("FifoRestCnt=0x%x, ", nsp32_read2(base, FIFO_REST_CNT)); + printk("CdbIn=0x%x, ", nsp32_read1(base, SCSI_CSB_IN)); + printk("\n"); + + if (0) { + printk("execph=0x%x, ", nsp32_read2(base, SCSI_EXECUTE_PHASE)); + printk("IrqStatus=0x%x, ", nsp32_read2(base, IRQ_STATUS)); + printk("\n"); + } +} + +/* end */ diff --git a/drivers/scsi/nsp32_io.h b/drivers/scsi/nsp32_io.h new file mode 100644 index 000000000..e3f3c27b0 --- /dev/null +++ b/drivers/scsi/nsp32_io.h @@ -0,0 +1,259 @@ +/* + * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver + * I/O routine + * + * This software may be used and distributed according to the terms of + * the GNU General Public License. + */ + +#ifndef _NSP32_IO_H +#define _NSP32_IO_H + +static inline void nsp32_write1(unsigned int base, + unsigned int index, + unsigned char val) +{ + outb(val, (base + index)); +} + +static inline unsigned char nsp32_read1(unsigned int base, + unsigned int index) +{ + return inb(base + index); +} + +static inline void nsp32_write2(unsigned int base, + unsigned int index, + unsigned short val) +{ + outw(val, (base + index)); +} + +static inline unsigned short nsp32_read2(unsigned int base, + unsigned int index) +{ + return inw(base + index); +} + +static inline void nsp32_write4(unsigned int base, + unsigned int index, + unsigned long val) +{ + outl(val, (base + index)); +} + +static inline unsigned long nsp32_read4(unsigned int base, + unsigned int index) +{ + return inl(base + index); +} + +/*==============================================*/ + +static inline void nsp32_mmio_write1(unsigned long base, + unsigned int index, + unsigned char val) +{ + volatile unsigned char *ptr; + + ptr = (unsigned char *)(base + NSP32_MMIO_OFFSET + index); + + writeb(val, ptr); +} + +static inline unsigned char nsp32_mmio_read1(unsigned long base, + unsigned int index) +{ + volatile unsigned char *ptr; + + ptr = (unsigned char *)(base + NSP32_MMIO_OFFSET + index); + + return readb(ptr); +} + +static inline void nsp32_mmio_write2(unsigned long base, + unsigned int index, + unsigned short val) +{ + volatile unsigned short *ptr; + + ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + index); + + writew(cpu_to_le16(val), ptr); +} + +static inline unsigned short nsp32_mmio_read2(unsigned long base, + unsigned int index) +{ + volatile unsigned short *ptr; + + ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + index); + + return le16_to_cpu(readw(ptr)); +} + +static inline void nsp32_mmio_write4(unsigned long base, + unsigned int index, + unsigned long val) +{ + volatile unsigned long *ptr; + + ptr = (unsigned long *)(base + NSP32_MMIO_OFFSET + index); + + writel(cpu_to_le32(val), ptr); +} + +static inline unsigned long nsp32_mmio_read4(unsigned long base, + unsigned int index) +{ + volatile unsigned long *ptr; + + ptr = (unsigned long *)(base + NSP32_MMIO_OFFSET + index); + + return le32_to_cpu(readl(ptr)); +} + +/*==============================================*/ + +static inline unsigned char nsp32_index_read1(unsigned int base, + unsigned int reg) +{ + outb(reg, base + INDEX_REG); + return inb(base + DATA_REG_LOW); +} + +static inline void nsp32_index_write1(unsigned int base, + unsigned int reg, + unsigned char val) +{ + outb(reg, base + INDEX_REG ); + outb(val, base + DATA_REG_LOW); +} + +static inline unsigned short nsp32_index_read2(unsigned int base, + unsigned int reg) +{ + outb(reg, base + INDEX_REG); + return inw(base + DATA_REG_LOW); +} + +static inline void nsp32_index_write2(unsigned int base, + unsigned int reg, + unsigned short val) +{ + outb(reg, base + INDEX_REG ); + outw(val, base + DATA_REG_LOW); +} + +static inline unsigned long nsp32_index_read4(unsigned int base, + unsigned int reg) +{ + unsigned long h,l; + + outb(reg, base + INDEX_REG); + l = inw(base + DATA_REG_LOW); + h = inw(base + DATA_REG_HI ); + + return ((h << 16) | l); +} + +static inline void nsp32_index_write4(unsigned int base, + unsigned int reg, + unsigned long val) +{ + unsigned long h,l; + + h = (val & 0xffff0000) >> 16; + l = (val & 0x0000ffff) >> 0; + + outb(reg, base + INDEX_REG ); + outw(l, base + DATA_REG_LOW); + outw(h, base + DATA_REG_HI ); +} + +/*==============================================*/ + +static inline unsigned char nsp32_mmio_index_read1(unsigned long base, + unsigned int reg) +{ + volatile unsigned short *index_ptr, *data_ptr; + + index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG); + data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW); + + writeb(reg, index_ptr); + return readb(data_ptr); +} + +static inline void nsp32_mmio_index_write1(unsigned long base, + unsigned int reg, + unsigned char val) +{ + volatile unsigned short *index_ptr, *data_ptr; + + index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG); + data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW); + + writeb(reg, index_ptr); + writeb(val, data_ptr ); +} + +static inline unsigned short nsp32_mmio_index_read2(unsigned long base, + unsigned int reg) +{ + volatile unsigned short *index_ptr, *data_ptr; + + index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG); + data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW); + + writeb(reg, index_ptr); + return le16_to_cpu(readw(data_ptr)); +} + +static inline void nsp32_mmio_index_write2(unsigned long base, + unsigned int reg, + unsigned short val) +{ + volatile unsigned short *index_ptr, *data_ptr; + + index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG); + data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW); + + writeb(reg, index_ptr); + writew(cpu_to_le16(val), data_ptr ); +} + +/*==============================================*/ + +static inline void nsp32_multi_read4(unsigned int base, + unsigned int reg, + void *buf, + unsigned long count) +{ + insl(base + reg, buf, count); +} + +static inline void nsp32_fifo_read(unsigned int base, + void *buf, + unsigned long count) +{ + nsp32_multi_read4(base, FIFO_DATA_LOW, buf, count); +} + +static inline void nsp32_multi_write4(unsigned int base, + unsigned int reg, + void *buf, + unsigned long count) +{ + outsl(base + reg, buf, count); +} + +static inline void nsp32_fifo_write(unsigned int base, + void *buf, + unsigned long count) +{ + nsp32_multi_write4(base, FIFO_DATA_LOW, buf, count); +} + +#endif /* _NSP32_IO_H */ +/* end */ diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig new file mode 100644 index 000000000..449bd85db --- /dev/null +++ b/drivers/scsi/pcmcia/Kconfig @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# PCMCIA SCSI adapter configuration +# + +menuconfig SCSI_LOWLEVEL_PCMCIA + bool "PCMCIA SCSI adapter support" + depends on SCSI!=n && PCMCIA!=n + +# drivers have problems when build in, so require modules +if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m + +config PCMCIA_AHA152X + tristate "Adaptec AHA152X PCMCIA support" + depends on HAS_IOPORT + select SCSI_SPI_ATTRS + help + Say Y here if you intend to attach this type of PCMCIA SCSI host + adapter to your computer. + + To compile this driver as a module, choose M here: the + module will be called aha152x_cs. + +config PCMCIA_FDOMAIN + tristate "Future Domain PCMCIA support" + depends on HAS_IOPORT + select SCSI_FDOMAIN + help + Say Y here if you intend to attach this type of PCMCIA SCSI host + adapter to your computer. + + To compile this driver as a module, choose M here: the + module will be called fdomain_cs. + +config PCMCIA_NINJA_SCSI + tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support" + depends on (!64BIT || COMPILE_TEST) && HAS_IOPORT + help + If you intend to attach this type of PCMCIA SCSI host adapter to + your computer, say Y here and read + . + + Supported cards: + + NinjaSCSI-3: (version string: "WBT","NinjaSCSI-3","R1.0") + IO-DATA PCSC-FP + ALPHA DATA AD-PCS201 + CyQ've SFC-201 + LOGITECH LPM-SCSI2E + Pioneer PCR-PR24's card + I-O DATA CDPS-PX24's card (PCSC-F) + Panasonic KXL-RW10AN CD-RW's card + etc. + + NinjaSCSI-32Bit (in 16bit mode): + [Workbit (version string: "WORKBIT","UltraNinja-16","1")] + Jazz SCP050 + [I-O DATA (OEM) (version string: "IO DATA","CBSC16 ","1")] + I-O DATA CBSC-II + [Kyusyu Matsushita Kotobuki (OEM) + (version string: "KME ","SCSI-CARD-001","1")] + KME KXL-820AN's card + HP M820e CDRW's card + etc. + + To compile this driver as a module, choose M here: the + module will be called nsp_cs. + +config PCMCIA_QLOGIC + tristate "Qlogic PCMCIA support" + depends on HAS_IOPORT + help + Say Y here if you intend to attach this type of PCMCIA SCSI host + adapter to your computer. + + To compile this driver as a module, choose M here: the + module will be called qlogic_cs. + +config PCMCIA_SYM53C500 + tristate "Symbios 53c500 PCMCIA support" + depends on HAS_IOPORT + help + Say Y here if you have a New Media Bus Toaster or other PCMCIA + SCSI adapter based on the Symbios 53c500 controller. + + To compile this driver as a module, choose M here: the + module will be called sym53c500_cs. + +endif # SCSI_LOWLEVEL_PCMCIA diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile new file mode 100644 index 000000000..02f5b44a2 --- /dev/null +++ b/drivers/scsi/pcmcia/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y := -I $(srctree)/drivers/scsi + +# 16-bit client drivers +obj-$(CONFIG_PCMCIA_QLOGIC) += qlogic_cs.o +obj-$(CONFIG_PCMCIA_FDOMAIN) += fdomain_cs.o +obj-$(CONFIG_PCMCIA_AHA152X) += aha152x_cs.o +obj-$(CONFIG_PCMCIA_NINJA_SCSI) += nsp_cs.o +obj-$(CONFIG_PCMCIA_SYM53C500) += sym53c500_cs.o + +aha152x_cs-objs := aha152x_stub.o aha152x_core.o +qlogic_cs-objs := qlogic_stub.o diff --git a/drivers/scsi/pcmcia/aha152x_core.c b/drivers/scsi/pcmcia/aha152x_core.c new file mode 100644 index 000000000..24b89228b --- /dev/null +++ b/drivers/scsi/pcmcia/aha152x_core.c @@ -0,0 +1,3 @@ +#define AHA152X_PCMCIA 1 +#define AHA152X_STAT 1 +#include "aha152x.c" diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c new file mode 100644 index 000000000..6a6621728 --- /dev/null +++ b/drivers/scsi/pcmcia/aha152x_stub.c @@ -0,0 +1,226 @@ +/*====================================================================== + + A driver for Adaptec AHA152X-compatible PCMCIA SCSI cards. + + This driver supports the Adaptec AHA-1460, the New Media Bus + Toaster, and the New Media Toast & Jam. + + aha152x_cs.c 1.54 2000/06/12 21:27:25 + + The contents of this file are subject to the Mozilla Public + License Version 1.1 (the "License"); you may not use this file + except in compliance with the License. You may obtain a copy of + the License at http://www.mozilla.org/MPL/ + + Software distributed under the License is distributed on an "AS + IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + implied. See the License for the specific language governing + rights and limitations under the License. + + The initial developer of the original code is David A. Hinds + . Portions created by David A. Hinds + are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + + Alternatively, the contents of this file may be used under the + terms of the GNU General Public License version 2 (the "GPL"), in which + case the provisions of the GPL are applicable instead of the + above. If you wish to allow the use of your version of this file + only under the terms of the GPL and not to allow others to use + your version of this file under the MPL, indicate your decision + by deleting the provisions above and replace them with the notice + and other provisions required by the GPL. If you do not delete + the provisions above, a recipient may use your version of this + file under either the MPL or the GPL. + +======================================================================*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "aha152x.h" + +#include +#include + + +/*====================================================================*/ + +/* Parameters that can be set with 'insmod' */ + +/* SCSI bus setup options */ +static int host_id = 7; +static int reconnect = 1; +static int parity = 1; +static int synchronous = 1; +static int reset_delay = 100; +static int ext_trans = 0; + +module_param(host_id, int, 0); +module_param(reconnect, int, 0); +module_param(parity, int, 0); +module_param(synchronous, int, 0); +module_param(reset_delay, int, 0); +module_param(ext_trans, int, 0); + +MODULE_LICENSE("Dual MPL/GPL"); + +/*====================================================================*/ + +typedef struct scsi_info_t { + struct pcmcia_device *p_dev; + struct Scsi_Host *host; +} scsi_info_t; + +static void aha152x_release_cs(struct pcmcia_device *link); +static void aha152x_detach(struct pcmcia_device *p_dev); +static int aha152x_config_cs(struct pcmcia_device *link); + +static int aha152x_probe(struct pcmcia_device *link) +{ + scsi_info_t *info; + + dev_dbg(&link->dev, "aha152x_attach()\n"); + + /* Create new SCSI device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) return -ENOMEM; + info->p_dev = link; + link->priv = info; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + link->config_regs = PRESENT_OPTION; + + return aha152x_config_cs(link); +} /* aha152x_attach */ + +/*====================================================================*/ + +static void aha152x_detach(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "aha152x_detach\n"); + + aha152x_release_cs(link); + + /* Unlink device structure, free bits */ + kfree(link->priv); +} /* aha152x_detach */ + +/*====================================================================*/ + +static int aha152x_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + p_dev->io_lines = 10; + + /* For New Media T&J, look for a SCSI window */ + if ((p_dev->resource[0]->end < 0x20) && + (p_dev->resource[1]->end >= 0x20)) + p_dev->resource[0]->start = p_dev->resource[1]->start; + + if (p_dev->resource[0]->start >= 0xffff) + return -EINVAL; + + p_dev->resource[1]->start = p_dev->resource[1]->end = 0; + p_dev->resource[0]->end = 0x20; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + return pcmcia_request_io(p_dev); +} + +static int aha152x_config_cs(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + struct aha152x_setup s; + int ret; + struct Scsi_Host *host; + + dev_dbg(&link->dev, "aha152x_config\n"); + + ret = pcmcia_loop_config(link, aha152x_config_check, NULL); + if (ret) + goto failed; + + if (!link->irq) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + /* Set configuration options for the aha152x driver */ + memset(&s, 0, sizeof(s)); + s.conf = "PCMCIA setup"; + s.io_port = link->resource[0]->start; + s.irq = link->irq; + s.scsiid = host_id; + s.reconnect = reconnect; + s.parity = parity; + s.synchronous = synchronous; + s.delay = reset_delay; + if (ext_trans) + s.ext_trans = ext_trans; + + host = aha152x_probe_one(&s); + if (host == NULL) { + printk(KERN_INFO "aha152x_cs: no SCSI devices found\n"); + goto failed; + } + + info->host = host; + + return 0; + +failed: + aha152x_release_cs(link); + return -ENODEV; +} + +static void aha152x_release_cs(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + + aha152x_release(info->host); + pcmcia_disable_device(link); +} + +static int aha152x_resume(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + + aha152x_host_reset_host(info->host); + + return 0; +} + +static const struct pcmcia_device_id aha152x_ids[] = { + PCMCIA_DEVICE_PROD_ID123("New Media", "SCSI", "Bus Toaster", 0xcdf7e4cc, 0x35f26476, 0xa8851d6e), + PCMCIA_DEVICE_PROD_ID123("NOTEWORTHY", "SCSI", "Bus Toaster", 0xad89c6e8, 0x35f26476, 0xa8851d6e), + PCMCIA_DEVICE_PROD_ID12("Adaptec, Inc.", "APA-1460 SCSI Host Adapter", 0x24ba9738, 0x3a3c3d20), + PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Multimedia Sound/SCSI", 0x085a850b, 0x80a6535c), + PCMCIA_DEVICE_PROD_ID12("NOTEWORTHY", "NWCOMB02 SCSI/AUDIO COMBO CARD", 0xad89c6e8, 0x5f9a615b), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, aha152x_ids); + +static struct pcmcia_driver aha152x_cs_driver = { + .owner = THIS_MODULE, + .name = "aha152x_cs", + .probe = aha152x_probe, + .remove = aha152x_detach, + .id_table = aha152x_ids, + .resume = aha152x_resume, +}; +module_pcmcia_driver(aha152x_cs_driver); diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c new file mode 100644 index 000000000..33df6a9ba --- /dev/null +++ b/drivers/scsi/pcmcia/fdomain_cs.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) +/* + * Driver for Future Domain-compatible PCMCIA SCSI cards + * Copyright 2019 Ondrej Zary + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include "fdomain.h" + +MODULE_AUTHOR("Ondrej Zary, David Hinds"); +MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver"); +MODULE_LICENSE("Dual MPL/GPL"); + +static int fdomain_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + p_dev->io_lines = 10; + p_dev->resource[0]->end = FDOMAIN_REGION_SIZE; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + return pcmcia_request_io(p_dev); +} + +static int fdomain_probe(struct pcmcia_device *link) +{ + int ret; + struct Scsi_Host *sh; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + link->config_regs = PRESENT_OPTION; + + ret = pcmcia_loop_config(link, fdomain_config_check, NULL); + if (ret) + return ret; + + ret = pcmcia_enable_device(link); + if (ret) + goto fail_disable; + + if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE, + "fdomain_cs")) { + ret = -EBUSY; + goto fail_disable; + } + + sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev); + if (!sh) { + dev_err(&link->dev, "Controller initialization failed"); + ret = -ENODEV; + goto fail_release; + } + + link->priv = sh; + + return 0; + +fail_release: + release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE); +fail_disable: + pcmcia_disable_device(link); + return ret; +} + +static void fdomain_remove(struct pcmcia_device *link) +{ + fdomain_destroy(link->priv); + release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE); + pcmcia_disable_device(link); +} + +static const struct pcmcia_device_id fdomain_ids[] = { + PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "SCSI PCMCIA Card", 0xe3736c88, + 0x859cad20), + PCMCIA_DEVICE_PROD_ID1("SCSI PCMCIA Adapter Card", 0x8dacb57e), + PCMCIA_DEVICE_PROD_ID12(" SIMPLE TECHNOLOGY Corporation", + "SCSI PCMCIA Credit Card Controller", + 0x182bdafe, 0xc80d106f), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, fdomain_ids); + +static struct pcmcia_driver fdomain_cs_driver = { + .owner = THIS_MODULE, + .name = "fdomain_cs", + .probe = fdomain_probe, + .remove = fdomain_remove, + .id_table = fdomain_ids, +}; + +module_pcmcia_driver(fdomain_cs_driver); diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c new file mode 100644 index 000000000..a5a1406a2 --- /dev/null +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -0,0 +1,1758 @@ +/*====================================================================== + + NinjaSCSI-3 / NinjaSCSI-32Bi PCMCIA SCSI host adapter card driver + By: YOKOTA Hiroshi + + Ver.2.8 Support 32bit MMIO mode + Support Synchronous Data Transfer Request (SDTR) mode + Ver.2.0 Support 32bit PIO mode + Ver.1.1.2 Fix for scatter list buffer exceeds + Ver.1.1 Support scatter list + Ver.0.1 Initial version + + This software may be used and distributed according to the terms of + the GNU General Public License. + +======================================================================*/ + +/*********************************************************************** + This driver is for these PCcards. + + I-O DATA PCSC-F (Workbit NinjaSCSI-3) + "WBT", "NinjaSCSI-3", "R1.0" + I-O DATA CBSC-II (Workbit NinjaSCSI-32Bi in 16bit mode) + "IO DATA", "CBSC16 ", "1" + +***********************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include "nsp_cs.h" + +MODULE_AUTHOR("YOKOTA Hiroshi "); +MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); +MODULE_LICENSE("GPL"); + +#include "nsp_io.h" + +/*====================================================================*/ +/* Parameters that can be set with 'insmod' */ + +static int nsp_burst_mode = BURST_MEM32; +module_param(nsp_burst_mode, int, 0); +MODULE_PARM_DESC(nsp_burst_mode, "Burst transfer mode (0=io8, 1=io32, 2=mem32(default))"); + +/* Release IO ports after configuration? */ +static bool free_ports = 0; +module_param(free_ports, bool, 0); +MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))"); + +static struct scsi_pointer *nsp_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static struct scsi_host_template nsp_driver_template = { + .proc_name = "nsp_cs", + .show_info = nsp_show_info, + .name = "WorkBit NinjaSCSI-3/32Bi(16bit)", + .info = nsp_info, + .queuecommand = nsp_queuecommand, +/* .eh_abort_handler = nsp_eh_abort,*/ + .eh_bus_reset_handler = nsp_eh_bus_reset, + .eh_host_reset_handler = nsp_eh_host_reset, + .can_queue = 1, + .this_id = NSP_INITIATOR_ID, + .sg_tablesize = SG_ALL, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct scsi_pointer), +}; + +static nsp_hw_data nsp_data_base; /* attach <-> detect glue */ + + + +/* + * debug, error print + */ +#ifndef NSP_DEBUG +# define NSP_DEBUG_MASK 0x000000 +# define nsp_msg(type, args...) nsp_cs_message("", 0, (type), args) +# define nsp_dbg(mask, args...) /* */ +#else +# define NSP_DEBUG_MASK 0xffffff +# define nsp_msg(type, args...) \ + nsp_cs_message (__func__, __LINE__, (type), args) +# define nsp_dbg(mask, args...) \ + nsp_cs_dmessage(__func__, __LINE__, (mask), args) +#endif + +#define NSP_DEBUG_QUEUECOMMAND BIT(0) +#define NSP_DEBUG_REGISTER BIT(1) +#define NSP_DEBUG_AUTOSCSI BIT(2) +#define NSP_DEBUG_INTR BIT(3) +#define NSP_DEBUG_SGLIST BIT(4) +#define NSP_DEBUG_BUSFREE BIT(5) +#define NSP_DEBUG_CDB_CONTENTS BIT(6) +#define NSP_DEBUG_RESELECTION BIT(7) +#define NSP_DEBUG_MSGINOCCUR BIT(8) +#define NSP_DEBUG_EEPROM BIT(9) +#define NSP_DEBUG_MSGOUTOCCUR BIT(10) +#define NSP_DEBUG_BUSRESET BIT(11) +#define NSP_DEBUG_RESTART BIT(12) +#define NSP_DEBUG_SYNC BIT(13) +#define NSP_DEBUG_WAIT BIT(14) +#define NSP_DEBUG_TARGETFLAG BIT(15) +#define NSP_DEBUG_PROC BIT(16) +#define NSP_DEBUG_INIT BIT(17) +#define NSP_DEBUG_DATA_IO BIT(18) +#define NSP_SPECIAL_PRINT_REGISTER BIT(20) + +#define NSP_DEBUG_BUF_LEN 150 + +static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc) +{ + scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc); +} + +__printf(4, 5) +static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...) +{ + va_list args; + char buf[NSP_DEBUG_BUF_LEN]; + + va_start(args, fmt); + vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + +#ifndef NSP_DEBUG + printk("%snsp_cs: %s\n", type, buf); +#else + printk("%snsp_cs: %s (%d): %s\n", type, func, line, buf); +#endif +} + +#ifdef NSP_DEBUG +static void nsp_cs_dmessage(const char *func, int line, int mask, char *fmt, ...) +{ + va_list args; + char buf[NSP_DEBUG_BUF_LEN]; + + va_start(args, fmt); + vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + if (mask & NSP_DEBUG_MASK) { + printk("nsp_cs-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); + } +} +#endif + +/***********************************************************/ + +/*==================================================== + * Clenaup parameters and call done() functions. + * You must be set SCpnt->result before call this function. + */ +static void nsp_scsi_done(struct scsi_cmnd *SCpnt) +{ + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + + data->CurrentSC = NULL; + + scsi_done(SCpnt); +} + +static int nsp_queuecommand_lck(struct scsi_cmnd *const SCpnt) +{ + struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); +#ifdef NSP_DEBUG + /*unsigned int host_id = SCpnt->device->host->this_id;*/ + /*unsigned int base = SCpnt->device->host->io_port;*/ + unsigned char target = scmd_id(SCpnt); +#endif + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + + nsp_dbg(NSP_DEBUG_QUEUECOMMAND, + "SCpnt=0x%p target=%d lun=%llu sglist=0x%p bufflen=%d sg_count=%d", + SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt), + scsi_bufflen(SCpnt), scsi_sg_count(SCpnt)); + //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); + + if (data->CurrentSC != NULL) { + nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen"); + SCpnt->result = DID_BAD_TARGET << 16; + nsp_scsi_done(SCpnt); + return 0; + } + +#if 0 + /* XXX: pcmcia-cs generates SCSI command with "scsi_info" utility. + This makes kernel crash when suspending... */ + if (data->ScsiInfo->stop != 0) { + nsp_msg(KERN_INFO, "suspending device. reject command."); + SCpnt->result = DID_BAD_TARGET << 16; + nsp_scsi_done(SCpnt); + return SCSI_MLQUEUE_HOST_BUSY; + } +#endif + + show_command(SCpnt); + + data->CurrentSC = SCpnt; + + scsi_pointer->Status = SAM_STAT_CHECK_CONDITION; + scsi_pointer->Message = 0; + scsi_pointer->have_data_in = IO_UNKNOWN; + scsi_pointer->sent_command = 0; + scsi_pointer->phase = PH_UNDETERMINED; + scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); + + /* setup scratch area + SCp.ptr : buffer pointer + SCp.this_residual : buffer length + SCp.buffer : next buffer + SCp.buffers_residual : left buffers in list + SCp.phase : current state of the command */ + if (scsi_bufflen(SCpnt)) { + scsi_pointer->buffer = scsi_sglist(SCpnt); + scsi_pointer->ptr = BUFFER_ADDR(SCpnt); + scsi_pointer->this_residual = scsi_pointer->buffer->length; + scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1; + } else { + scsi_pointer->ptr = NULL; + scsi_pointer->this_residual = 0; + scsi_pointer->buffer = NULL; + scsi_pointer->buffers_residual = 0; + } + + if (!nsphw_start_selection(SCpnt)) { + nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail"); + SCpnt->result = DID_BUS_BUSY << 16; + nsp_scsi_done(SCpnt); + return 0; + } + + + //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "out"); +#ifdef NSP_DEBUG + data->CmdId++; +#endif + return 0; +} + +static DEF_SCSI_QCMD(nsp_queuecommand) + +/* + * setup PIO FIFO transfer mode and enable/disable to data out + */ +static void nsp_setup_fifo(nsp_hw_data *data, bool enabled) +{ + unsigned int base = data->BaseAddress; + unsigned char transfer_mode_reg; + + //nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled); + + if (enabled) { + transfer_mode_reg = TRANSFER_GO | BRAIND; + } else { + transfer_mode_reg = 0; + } + + transfer_mode_reg |= data->TransferMode; + + nsp_index_write(base, TRANSFERMODE, transfer_mode_reg); +} + +static void nsphw_init_sync(nsp_hw_data *data) +{ + sync_data tmp_sync = { .SyncNegotiation = SYNC_NOT_YET, + .SyncPeriod = 0, + .SyncOffset = 0 + }; + int i; + + /* setup sync data */ + for ( i = 0; i < ARRAY_SIZE(data->Sync); i++ ) { + data->Sync[i] = tmp_sync; + } +} + +/* + * Initialize Ninja hardware + */ +static void nsphw_init(nsp_hw_data *data) +{ + unsigned int base = data->BaseAddress; + + nsp_dbg(NSP_DEBUG_INIT, "in base=0x%x", base); + + data->ScsiClockDiv = CLOCK_40M | FAST_20; + data->CurrentSC = NULL; + data->FifoCount = 0; + data->TransferMode = MODE_IO8; + + nsphw_init_sync(data); + + /* block all interrupts */ + nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); + + /* setup SCSI interface */ + nsp_write(base, IFSELECT, IF_IFSEL); + + nsp_index_write(base, SCSIIRQMODE, 0); + + nsp_index_write(base, TRANSFERMODE, MODE_IO8); + nsp_index_write(base, CLOCKDIV, data->ScsiClockDiv); + + nsp_index_write(base, PARITYCTRL, 0); + nsp_index_write(base, POINTERCLR, POINTER_CLEAR | + ACK_COUNTER_CLEAR | + REQ_COUNTER_CLEAR | + HOST_COUNTER_CLEAR); + + /* setup fifo asic */ + nsp_write(base, IFSELECT, IF_REGSEL); + nsp_index_write(base, TERMPWRCTRL, 0); + if ((nsp_index_read(base, OTHERCONTROL) & TPWR_SENSE) == 0) { + nsp_msg(KERN_INFO, "terminator power on"); + nsp_index_write(base, TERMPWRCTRL, POWER_ON); + } + + nsp_index_write(base, TIMERCOUNT, 0); + nsp_index_write(base, TIMERCOUNT, 0); /* requires 2 times!! */ + + nsp_index_write(base, SYNCREG, 0); + nsp_index_write(base, ACKWIDTH, 0); + + /* enable interrupts and ack them */ + nsp_index_write(base, SCSIIRQMODE, SCSI_PHASE_CHANGE_EI | + RESELECT_EI | + SCSI_RESET_IRQ_EI ); + nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); + + nsp_setup_fifo(data, false); +} + +/* + * Start selection phase + */ +static bool nsphw_start_selection(struct scsi_cmnd *const SCpnt) +{ + struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); + unsigned int host_id = SCpnt->device->host->this_id; + unsigned int base = SCpnt->device->host->io_port; + unsigned char target = scmd_id(SCpnt); + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + int time_out; + unsigned char phase, arbit; + + //nsp_dbg(NSP_DEBUG_RESELECTION, "in"); + + phase = nsp_index_read(base, SCSIBUSMON); + if(phase != BUSMON_BUS_FREE) { + //nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy"); + return false; + } + + /* start arbitration */ + //nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit"); + scsi_pointer->phase = PH_ARBSTART; + nsp_index_write(base, SETARBIT, ARBIT_GO); + + time_out = 1000; + do { + /* XXX: what a stupid chip! */ + arbit = nsp_index_read(base, ARBITSTATUS); + //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit=%d, wait_count=%d", arbit, wait_count); + udelay(1); /* hold 1.2us */ + } while((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && + (time_out-- != 0)); + + if (!(arbit & ARBIT_WIN)) { + //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail"); + nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); + return false; + } + + /* assert select line */ + //nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line"); + scsi_pointer->phase = PH_SELSTART; + udelay(3); /* wait 2.4us */ + nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target)); + nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN); + udelay(2); /* wait >1.2us */ + nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_DATAOUT_ENB | SCSI_ATN); + nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); + /*udelay(1);*/ /* wait >90ns */ + nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_DATAOUT_ENB | SCSI_ATN); + + /* check selection timeout */ + nsp_start_timer(SCpnt, 1000/51); + data->SelectionTimeOut = 1; + + return true; +} + +struct nsp_sync_table { + unsigned int min_period; + unsigned int max_period; + unsigned int chip_period; + unsigned int ack_width; +}; + +static struct nsp_sync_table nsp_sync_table_40M[] = { + {0x0c, 0x0c, 0x1, 0}, /* 20MB 50ns*/ + {0x19, 0x19, 0x3, 1}, /* 10MB 100ns*/ + {0x1a, 0x25, 0x5, 2}, /* 7.5MB 150ns*/ + {0x26, 0x32, 0x7, 3}, /* 5MB 200ns*/ + { 0, 0, 0, 0}, +}; + +static struct nsp_sync_table nsp_sync_table_20M[] = { + {0x19, 0x19, 0x1, 0}, /* 10MB 100ns*/ + {0x1a, 0x25, 0x2, 0}, /* 7.5MB 150ns*/ + {0x26, 0x32, 0x3, 1}, /* 5MB 200ns*/ + { 0, 0, 0, 0}, +}; + +/* + * setup synchronous data transfer mode + */ +static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) +{ + unsigned char target = scmd_id(SCpnt); +// unsigned char lun = SCpnt->device->lun; + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + sync_data *sync = &(data->Sync[target]); + struct nsp_sync_table *sync_table; + unsigned int period, offset; + + nsp_dbg(NSP_DEBUG_SYNC, "in"); + + period = sync->SyncPeriod; + offset = sync->SyncOffset; + + nsp_dbg(NSP_DEBUG_SYNC, "period=0x%x, offset=0x%x", period, offset); + + if ((data->ScsiClockDiv & (BIT(0)|BIT(1))) == CLOCK_20M) { + sync_table = nsp_sync_table_20M; + } else { + sync_table = nsp_sync_table_40M; + } + + for (; sync_table->max_period != 0; sync_table++) { + if ( period >= sync_table->min_period && + period <= sync_table->max_period ) { + break; + } + } + + if (period != 0 && sync_table->max_period == 0) { + /* + * No proper period/offset found + */ + nsp_dbg(NSP_DEBUG_SYNC, "no proper period/offset"); + + sync->SyncPeriod = 0; + sync->SyncOffset = 0; + sync->SyncRegister = 0; + sync->AckWidth = 0; + + return false; + } + + sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) | + (offset & SYNCREG_OFFSET_MASK); + sync->AckWidth = sync_table->ack_width; + + nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth); + + return true; +} + + +/* + * start ninja hardware timer + */ +static void nsp_start_timer(struct scsi_cmnd *SCpnt, int time) +{ + unsigned int base = SCpnt->device->host->io_port; + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + + //nsp_dbg(NSP_DEBUG_INTR, "in SCpnt=0x%p, time=%d", SCpnt, time); + data->TimerCount = time; + nsp_index_write(base, TIMERCOUNT, time); +} + +/* + * wait for bus phase change + */ +static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask, + char *str) +{ + unsigned int base = SCpnt->device->host->io_port; + unsigned char reg; + int time_out; + + //nsp_dbg(NSP_DEBUG_INTR, "in"); + + time_out = 100; + + do { + reg = nsp_index_read(base, SCSIBUSMON); + if (reg == 0xff) { + break; + } + } while ((--time_out != 0) && (reg & mask) != 0); + + if (time_out == 0) { + nsp_msg(KERN_DEBUG, " %s signal off timeout", str); + } + + return 0; +} + +/* + * expect Ninja Irq + */ +static int nsp_expect_signal(struct scsi_cmnd *SCpnt, + unsigned char current_phase, + unsigned char mask) +{ + unsigned int base = SCpnt->device->host->io_port; + int time_out; + unsigned char phase, i_src; + + //nsp_dbg(NSP_DEBUG_INTR, "current_phase=0x%x, mask=0x%x", current_phase, mask); + + time_out = 100; + do { + phase = nsp_index_read(base, SCSIBUSMON); + if (phase == 0xff) { + //nsp_dbg(NSP_DEBUG_INTR, "ret -1"); + return -1; + } + i_src = nsp_read(base, IRQSTATUS); + if (i_src & IRQSTATUS_SCSI) { + //nsp_dbg(NSP_DEBUG_INTR, "ret 0 found scsi signal"); + return 0; + } + if ((phase & mask) != 0 && (phase & BUSMON_PHASE_MASK) == current_phase) { + //nsp_dbg(NSP_DEBUG_INTR, "ret 1 phase=0x%x", phase); + return 1; + } + } while(time_out-- != 0); + + //nsp_dbg(NSP_DEBUG_INTR, "timeout"); + return -1; +} + +/* + * transfer SCSI message + */ +static int nsp_xfer(struct scsi_cmnd *const SCpnt, int phase) +{ + struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); + unsigned int base = SCpnt->device->host->io_port; + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + char *buf = data->MsgBuffer; + int len = min(MSGBUF_SIZE, data->MsgLen); + int ptr; + int ret; + + //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); + for (ptr = 0; len > 0; len--, ptr++) { + + ret = nsp_expect_signal(SCpnt, phase, BUSMON_REQ); + if (ret <= 0) { + nsp_dbg(NSP_DEBUG_DATA_IO, "xfer quit"); + return 0; + } + + /* if last byte, negate ATN */ + if (len == 1 && scsi_pointer->phase == PH_MSG_OUT) { + nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB); + } + + /* read & write message */ + if (phase & BUSMON_IO) { + nsp_dbg(NSP_DEBUG_DATA_IO, "read msg"); + buf[ptr] = nsp_index_read(base, SCSIDATAWITHACK); + } else { + nsp_dbg(NSP_DEBUG_DATA_IO, "write msg"); + nsp_index_write(base, SCSIDATAWITHACK, buf[ptr]); + } + nsp_negate_signal(SCpnt, BUSMON_ACK, "xfer"); + + } + return len; +} + +/* + * get extra SCSI data from fifo + */ +static int nsp_dataphase_bypass(struct scsi_cmnd *const SCpnt) +{ + struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + unsigned int count; + + //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); + + if (scsi_pointer->have_data_in != IO_IN) { + return 0; + } + + count = nsp_fifo_count(SCpnt); + if (data->FifoCount == count) { + //nsp_dbg(NSP_DEBUG_DATA_IO, "not use bypass quirk"); + return 0; + } + + /* + * XXX: NSP_QUIRK + * data phase skip only occures in case of SCSI_LOW_READ + */ + nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk"); + scsi_pointer->phase = PH_DATA; + nsp_pio_read(SCpnt); + nsp_setup_fifo(data, false); + + return 0; +} + +/* + * accept reselection + */ +static void nsp_reselected(struct scsi_cmnd *SCpnt) +{ + unsigned int base = SCpnt->device->host->io_port; + unsigned int host_id = SCpnt->device->host->this_id; + //nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + unsigned char bus_reg; + unsigned char id_reg, tmp; + int target; + + nsp_dbg(NSP_DEBUG_RESELECTION, "in"); + + id_reg = nsp_index_read(base, RESELECTID); + tmp = id_reg & (~BIT(host_id)); + target = 0; + while(tmp != 0) { + if (tmp & BIT(0)) { + break; + } + tmp >>= 1; + target++; + } + + if (scmd_id(SCpnt) != target) { + nsp_msg(KERN_ERR, "XXX: reselect ID must be %d in this implementation.", target); + } + + nsp_negate_signal(SCpnt, BUSMON_SEL, "reselect"); + + nsp_nexus(SCpnt); + bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN); + nsp_index_write(base, SCSIBUSCTRL, bus_reg); + nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB); +} + +/* + * count how many data transferd + */ +static int nsp_fifo_count(struct scsi_cmnd *SCpnt) +{ + unsigned int base = SCpnt->device->host->io_port; + unsigned int count; + unsigned int l, m, h; + + nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER); + + l = nsp_index_read(base, TRANSFERCOUNT); + m = nsp_index_read(base, TRANSFERCOUNT); + h = nsp_index_read(base, TRANSFERCOUNT); + nsp_index_read(base, TRANSFERCOUNT); /* required this! */ + + count = (h << 16) | (m << 8) | (l << 0); + + //nsp_dbg(NSP_DEBUG_DATA_IO, "count=0x%x", count); + + return count; +} + +/* fifo size */ +#define RFIFO_CRIT 64 +#define WFIFO_CRIT 64 + +/* + * read data in DATA IN phase + */ +static void nsp_pio_read(struct scsi_cmnd *const SCpnt) +{ + struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); + unsigned int base = SCpnt->device->host->io_port; + unsigned long mmio_base = SCpnt->device->host->base; + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + long time_out; + int ocount, res; + unsigned char stat, fifo_stat; + + ocount = data->FifoCount; + + nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d", + SCpnt, scsi_get_resid(SCpnt), ocount, scsi_pointer->ptr, + scsi_pointer->this_residual, scsi_pointer->buffer, + scsi_pointer->buffers_residual); + + time_out = 1000; + + while ((time_out-- != 0) && + (scsi_pointer->this_residual > 0 || + scsi_pointer->buffers_residual > 0)) { + + stat = nsp_index_read(base, SCSIBUSMON); + stat &= BUSMON_PHASE_MASK; + + + res = nsp_fifo_count(SCpnt) - ocount; + //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount, res); + if (res == 0) { /* if some data available ? */ + if (stat == BUSPHASE_DATA_IN) { /* phase changed? */ + //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", scsi_pointer->this_residual); + continue; + } else { + nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat); + break; + } + } + + fifo_stat = nsp_read(base, FIFOSTATUS); + if ((fifo_stat & FIFOSTATUS_FULL_EMPTY) == 0 && + stat == BUSPHASE_DATA_IN) { + continue; + } + + res = min(res, scsi_pointer->this_residual); + + switch (data->TransferMode) { + case MODE_IO32: + res &= ~(BIT(1)|BIT(0)); /* align 4 */ + nsp_fifo32_read(base, scsi_pointer->ptr, res >> 2); + break; + case MODE_IO8: + nsp_fifo8_read(base, scsi_pointer->ptr, res); + break; + + case MODE_MEM32: + res &= ~(BIT(1)|BIT(0)); /* align 4 */ + nsp_mmio_fifo32_read(mmio_base, scsi_pointer->ptr, + res >> 2); + break; + + default: + nsp_dbg(NSP_DEBUG_DATA_IO, "unknown read mode"); + return; + } + + nsp_inc_resid(SCpnt, -res); + scsi_pointer->ptr += res; + scsi_pointer->this_residual -= res; + ocount += res; + //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount); + + /* go to next scatter list if available */ + if (scsi_pointer->this_residual == 0 && + scsi_pointer->buffers_residual != 0 ) { + //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out); + scsi_pointer->buffers_residual--; + scsi_pointer->buffer = sg_next(scsi_pointer->buffer); + scsi_pointer->ptr = BUFFER_ADDR(SCpnt); + scsi_pointer->this_residual = + scsi_pointer->buffer->length; + time_out = 1000; + + //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", scsi_pointer->buffer->page, scsi_pointer->buffer->offset); + } + } + + data->FifoCount = ocount; + + if (time_out < 0) { + nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", + scsi_get_resid(SCpnt), scsi_pointer->this_residual, + scsi_pointer->buffers_residual); + } + nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount); + nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId, + scsi_get_resid(SCpnt)); +} + +/* + * write data in DATA OUT phase + */ +static void nsp_pio_write(struct scsi_cmnd *SCpnt) +{ + struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); + unsigned int base = SCpnt->device->host->io_port; + unsigned long mmio_base = SCpnt->device->host->base; + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + int time_out; + int ocount, res; + unsigned char stat; + + ocount = data->FifoCount; + + nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x", + data->FifoCount, scsi_pointer->ptr, scsi_pointer->this_residual, + scsi_pointer->buffer, scsi_pointer->buffers_residual, + scsi_get_resid(SCpnt)); + + time_out = 1000; + + while ((time_out-- != 0) && + (scsi_pointer->this_residual > 0 || + scsi_pointer->buffers_residual > 0)) { + stat = nsp_index_read(base, SCSIBUSMON); + stat &= BUSMON_PHASE_MASK; + + if (stat != BUSPHASE_DATA_OUT) { + res = ocount - nsp_fifo_count(SCpnt); + + nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res); + /* Put back pointer */ + nsp_inc_resid(SCpnt, res); + scsi_pointer->ptr -= res; + scsi_pointer->this_residual += res; + ocount -= res; + + break; + } + + res = ocount - nsp_fifo_count(SCpnt); + if (res > 0) { /* write all data? */ + nsp_dbg(NSP_DEBUG_DATA_IO, "wait for all data out. ocount=0x%x res=%d", ocount, res); + continue; + } + + res = min(scsi_pointer->this_residual, WFIFO_CRIT); + + //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, res); + switch (data->TransferMode) { + case MODE_IO32: + res &= ~(BIT(1)|BIT(0)); /* align 4 */ + nsp_fifo32_write(base, scsi_pointer->ptr, res >> 2); + break; + case MODE_IO8: + nsp_fifo8_write(base, scsi_pointer->ptr, res); + break; + + case MODE_MEM32: + res &= ~(BIT(1)|BIT(0)); /* align 4 */ + nsp_mmio_fifo32_write(mmio_base, scsi_pointer->ptr, + res >> 2); + break; + + default: + nsp_dbg(NSP_DEBUG_DATA_IO, "unknown write mode"); + break; + } + + nsp_inc_resid(SCpnt, -res); + scsi_pointer->ptr += res; + scsi_pointer->this_residual -= res; + ocount += res; + + /* go to next scatter list if available */ + if (scsi_pointer->this_residual == 0 && + scsi_pointer->buffers_residual != 0 ) { + //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next"); + scsi_pointer->buffers_residual--; + scsi_pointer->buffer = sg_next(scsi_pointer->buffer); + scsi_pointer->ptr = BUFFER_ADDR(SCpnt); + scsi_pointer->this_residual = + scsi_pointer->buffer->length; + time_out = 1000; + } + } + + data->FifoCount = ocount; + + if (time_out < 0) { + nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", + scsi_get_resid(SCpnt)); + } + nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount); + nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId, + scsi_get_resid(SCpnt)); +} +#undef RFIFO_CRIT +#undef WFIFO_CRIT + +/* + * setup synchronous/asynchronous data transfer mode + */ +static int nsp_nexus(struct scsi_cmnd *SCpnt) +{ + unsigned int base = SCpnt->device->host->io_port; + unsigned char target = scmd_id(SCpnt); +// unsigned char lun = SCpnt->device->lun; + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + sync_data *sync = &(data->Sync[target]); + + //nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p", SCpnt); + + /* setup synch transfer registers */ + nsp_index_write(base, SYNCREG, sync->SyncRegister); + nsp_index_write(base, ACKWIDTH, sync->AckWidth); + + if (scsi_get_resid(SCpnt) % 4 != 0 || + scsi_get_resid(SCpnt) <= PAGE_SIZE ) { + data->TransferMode = MODE_IO8; + } else if (nsp_burst_mode == BURST_MEM32) { + data->TransferMode = MODE_MEM32; + } else if (nsp_burst_mode == BURST_IO32) { + data->TransferMode = MODE_IO32; + } else { + data->TransferMode = MODE_IO8; + } + + /* setup pdma fifo */ + nsp_setup_fifo(data, true); + + /* clear ack counter */ + data->FifoCount = 0; + nsp_index_write(base, POINTERCLR, POINTER_CLEAR | + ACK_COUNTER_CLEAR | + REQ_COUNTER_CLEAR | + HOST_COUNTER_CLEAR); + + return 0; +} + +#include "nsp_message.c" +/* + * interrupt handler + */ +static irqreturn_t nspintr(int irq, void *dev_id) +{ + unsigned int base; + unsigned char irq_status, irq_phase, phase; + struct scsi_cmnd *tmpSC; + struct scsi_pointer *scsi_pointer; + unsigned char target, lun; + unsigned int *sync_neg; + int i, tmp; + nsp_hw_data *data; + + + //nsp_dbg(NSP_DEBUG_INTR, "dev_id=0x%p", dev_id); + //nsp_dbg(NSP_DEBUG_INTR, "host=0x%p", ((scsi_info_t *)dev_id)->host); + + if ( dev_id != NULL && + ((scsi_info_t *)dev_id)->host != NULL ) { + scsi_info_t *info = (scsi_info_t *)dev_id; + + data = (nsp_hw_data *)info->host->hostdata; + } else { + nsp_dbg(NSP_DEBUG_INTR, "host data wrong"); + return IRQ_NONE; + } + + //nsp_dbg(NSP_DEBUG_INTR, "&nsp_data_base=0x%p, dev_id=0x%p", &nsp_data_base, dev_id); + + base = data->BaseAddress; + //nsp_dbg(NSP_DEBUG_INTR, "base=0x%x", base); + + /* + * interrupt check + */ + nsp_write(base, IRQCONTROL, IRQCONTROL_IRQDISABLE); + irq_status = nsp_read(base, IRQSTATUS); + //nsp_dbg(NSP_DEBUG_INTR, "irq_status=0x%x", irq_status); + if ((irq_status == 0xff) || ((irq_status & IRQSTATUS_MASK) == 0)) { + nsp_write(base, IRQCONTROL, 0); + //nsp_dbg(NSP_DEBUG_INTR, "no irq/shared irq"); + return IRQ_NONE; + } + + /* XXX: IMPORTANT + * Do not read an irq_phase register if no scsi phase interrupt. + * Unless, you should lose a scsi phase interrupt. + */ + phase = nsp_index_read(base, SCSIBUSMON); + if((irq_status & IRQSTATUS_SCSI) != 0) { + irq_phase = nsp_index_read(base, IRQPHASESENCE); + } else { + irq_phase = 0; + } + + //nsp_dbg(NSP_DEBUG_INTR, "irq_phase=0x%x", irq_phase); + + /* + * timer interrupt handler (scsi vs timer interrupts) + */ + //nsp_dbg(NSP_DEBUG_INTR, "timercount=%d", data->TimerCount); + if (data->TimerCount != 0) { + //nsp_dbg(NSP_DEBUG_INTR, "stop timer"); + nsp_index_write(base, TIMERCOUNT, 0); + nsp_index_write(base, TIMERCOUNT, 0); + data->TimerCount = 0; + } + + if ((irq_status & IRQSTATUS_MASK) == IRQSTATUS_TIMER && + data->SelectionTimeOut == 0) { + //nsp_dbg(NSP_DEBUG_INTR, "timer start"); + nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR); + return IRQ_HANDLED; + } + + nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR | IRQCONTROL_FIFO_CLEAR); + + if ((irq_status & IRQSTATUS_SCSI) && + (irq_phase & SCSI_RESET_IRQ)) { + nsp_msg(KERN_ERR, "bus reset (power off?)"); + + nsphw_init(data); + nsp_bus_reset(data); + + if(data->CurrentSC != NULL) { + tmpSC = data->CurrentSC; + scsi_pointer = nsp_priv(tmpSC); + tmpSC->result = (DID_RESET << 16) | + ((scsi_pointer->Message & 0xff) << 8) | + ((scsi_pointer->Status & 0xff) << 0); + nsp_scsi_done(tmpSC); + } + return IRQ_HANDLED; + } + + if (data->CurrentSC == NULL) { + nsp_msg(KERN_ERR, "CurrentSC==NULL irq_status=0x%x phase=0x%x irq_phase=0x%x this can't be happen. reset everything", irq_status, phase, irq_phase); + nsphw_init(data); + nsp_bus_reset(data); + return IRQ_HANDLED; + } + + tmpSC = data->CurrentSC; + scsi_pointer = nsp_priv(tmpSC); + target = tmpSC->device->id; + lun = tmpSC->device->lun; + sync_neg = &(data->Sync[target].SyncNegotiation); + + /* + * parse hardware SCSI irq reasons register + */ + if (irq_status & IRQSTATUS_SCSI) { + if (irq_phase & RESELECT_IRQ) { + nsp_dbg(NSP_DEBUG_INTR, "reselect"); + nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR); + nsp_reselected(tmpSC); + return IRQ_HANDLED; + } + + if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) { + return IRQ_HANDLED; + } + } + + //show_phase(tmpSC); + + switch (scsi_pointer->phase) { + case PH_SELSTART: + // *sync_neg = SYNC_NOT_YET; + if ((phase & BUSMON_BSY) == 0) { + //nsp_dbg(NSP_DEBUG_INTR, "selection count=%d", data->SelectionTimeOut); + if (data->SelectionTimeOut >= NSP_SELTIMEOUT) { + nsp_dbg(NSP_DEBUG_INTR, "selection time out"); + data->SelectionTimeOut = 0; + nsp_index_write(base, SCSIBUSCTRL, 0); + + tmpSC->result = DID_TIME_OUT << 16; + nsp_scsi_done(tmpSC); + + return IRQ_HANDLED; + } + data->SelectionTimeOut += 1; + nsp_start_timer(tmpSC, 1000/51); + return IRQ_HANDLED; + } + + /* attention assert */ + //nsp_dbg(NSP_DEBUG_INTR, "attention assert"); + data->SelectionTimeOut = 0; + scsi_pointer->phase = PH_SELECTED; + nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN); + udelay(1); + nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB); + return IRQ_HANDLED; + + case PH_RESELECT: + //nsp_dbg(NSP_DEBUG_INTR, "phase reselect"); + // *sync_neg = SYNC_NOT_YET; + if ((phase & BUSMON_PHASE_MASK) != BUSPHASE_MESSAGE_IN) { + + tmpSC->result = DID_ABORT << 16; + nsp_scsi_done(tmpSC); + return IRQ_HANDLED; + } + fallthrough; + default: + if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) { + return IRQ_HANDLED; + } + break; + } + + /* + * SCSI sequencer + */ + //nsp_dbg(NSP_DEBUG_INTR, "start scsi seq"); + + /* normal disconnect */ + if ((scsi_pointer->phase == PH_MSG_IN || + scsi_pointer->phase == PH_MSG_OUT) && + (irq_phase & LATCHED_BUS_FREE) != 0) { + nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); + + //*sync_neg = SYNC_NOT_YET; + + /* all command complete and return status */ + if (scsi_pointer->Message == COMMAND_COMPLETE) { + tmpSC->result = (DID_OK << 16) | + ((scsi_pointer->Message & 0xff) << 8) | + ((scsi_pointer->Status & 0xff) << 0); + nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result); + nsp_scsi_done(tmpSC); + + return IRQ_HANDLED; + } + + return IRQ_HANDLED; + } + + + /* check unexpected bus free state */ + if (phase == 0) { + nsp_msg(KERN_DEBUG, "unexpected bus free. irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); + + *sync_neg = SYNC_NG; + tmpSC->result = DID_ERROR << 16; + nsp_scsi_done(tmpSC); + return IRQ_HANDLED; + } + + switch (phase & BUSMON_PHASE_MASK) { + case BUSPHASE_COMMAND: + nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_COMMAND"); + if ((phase & BUSMON_REQ) == 0) { + nsp_dbg(NSP_DEBUG_INTR, "REQ == 0"); + return IRQ_HANDLED; + } + + scsi_pointer->phase = PH_COMMAND; + + nsp_nexus(tmpSC); + + /* write scsi command */ + nsp_dbg(NSP_DEBUG_INTR, "cmd_len=%d", tmpSC->cmd_len); + nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER); + for (i = 0; i < tmpSC->cmd_len; i++) { + nsp_index_write(base, COMMANDDATA, tmpSC->cmnd[i]); + } + nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER | AUTO_COMMAND_GO); + break; + + case BUSPHASE_DATA_OUT: + nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT"); + + scsi_pointer->phase = PH_DATA; + scsi_pointer->have_data_in = IO_OUT; + + nsp_pio_write(tmpSC); + + break; + + case BUSPHASE_DATA_IN: + nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN"); + + scsi_pointer->phase = PH_DATA; + scsi_pointer->have_data_in = IO_IN; + + nsp_pio_read(tmpSC); + + break; + + case BUSPHASE_STATUS: + nsp_dataphase_bypass(tmpSC); + nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS"); + + scsi_pointer->phase = PH_STATUS; + + scsi_pointer->Status = nsp_index_read(base, SCSIDATAWITHACK); + nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", + scsi_pointer->Message, scsi_pointer->Status); + + break; + + case BUSPHASE_MESSAGE_OUT: + nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_OUT"); + if ((phase & BUSMON_REQ) == 0) { + goto timer_out; + } + + scsi_pointer->phase = PH_MSG_OUT; + + //*sync_neg = SYNC_NOT_YET; + + data->MsgLen = i = 0; + data->MsgBuffer[i] = IDENTIFY(true, lun); i++; + + if (*sync_neg == SYNC_NOT_YET) { + data->Sync[target].SyncPeriod = 0; + data->Sync[target].SyncOffset = 0; + + /**/ + data->MsgBuffer[i] = EXTENDED_MESSAGE; i++; + data->MsgBuffer[i] = 3; i++; + data->MsgBuffer[i] = EXTENDED_SDTR; i++; + data->MsgBuffer[i] = 0x0c; i++; + data->MsgBuffer[i] = 15; i++; + /**/ + } + data->MsgLen = i; + + nsp_analyze_sdtr(tmpSC); + show_message(data); + nsp_message_out(tmpSC); + break; + + case BUSPHASE_MESSAGE_IN: + nsp_dataphase_bypass(tmpSC); + nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_IN"); + if ((phase & BUSMON_REQ) == 0) { + goto timer_out; + } + + scsi_pointer->phase = PH_MSG_IN; + nsp_message_in(tmpSC); + + /**/ + if (*sync_neg == SYNC_NOT_YET) { + //nsp_dbg(NSP_DEBUG_INTR, "sync target=%d,lun=%d",target,lun); + + if (data->MsgLen >= 5 && + data->MsgBuffer[0] == EXTENDED_MESSAGE && + data->MsgBuffer[1] == 3 && + data->MsgBuffer[2] == EXTENDED_SDTR ) { + data->Sync[target].SyncPeriod = data->MsgBuffer[3]; + data->Sync[target].SyncOffset = data->MsgBuffer[4]; + //nsp_dbg(NSP_DEBUG_INTR, "sync ok, %d %d", data->MsgBuffer[3], data->MsgBuffer[4]); + *sync_neg = SYNC_OK; + } else { + data->Sync[target].SyncPeriod = 0; + data->Sync[target].SyncOffset = 0; + *sync_neg = SYNC_NG; + } + nsp_analyze_sdtr(tmpSC); + } + /**/ + + /* search last messeage byte */ + tmp = -1; + for (i = 0; i < data->MsgLen; i++) { + tmp = data->MsgBuffer[i]; + if (data->MsgBuffer[i] == EXTENDED_MESSAGE) { + i += (1 + data->MsgBuffer[i+1]); + } + } + scsi_pointer->Message = tmp; + + nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", + scsi_pointer->Message, data->MsgLen); + show_message(data); + + break; + + case BUSPHASE_SELECT: + default: + nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE other"); + + break; + } + + //nsp_dbg(NSP_DEBUG_INTR, "out"); + return IRQ_HANDLED; + +timer_out: + nsp_start_timer(tmpSC, 1000/102); + return IRQ_HANDLED; +} + +#ifdef NSP_DEBUG +#include "nsp_debug.c" +#endif /* NSP_DEBUG */ + +/*----------------------------------------------------------------*/ +/* look for ninja3 card and init if found */ +/*----------------------------------------------------------------*/ +static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht) +{ + struct Scsi_Host *host; /* registered host structure */ + nsp_hw_data *data_b = &nsp_data_base, *data; + + nsp_dbg(NSP_DEBUG_INIT, "this_id=%d", sht->this_id); + host = scsi_host_alloc(&nsp_driver_template, sizeof(nsp_hw_data)); + if (host == NULL) { + nsp_dbg(NSP_DEBUG_INIT, "host failed"); + return NULL; + } + + memcpy(host->hostdata, data_b, sizeof(nsp_hw_data)); + data = (nsp_hw_data *)host->hostdata; + data->ScsiInfo->host = host; +#ifdef NSP_DEBUG + data->CmdId = 0; +#endif + + nsp_dbg(NSP_DEBUG_INIT, "irq=%d,%d", data_b->IrqNumber, ((nsp_hw_data *)host->hostdata)->IrqNumber); + + host->unique_id = data->BaseAddress; + host->io_port = data->BaseAddress; + host->n_io_port = data->NumAddress; + host->irq = data->IrqNumber; + host->base = data->MmioAddress; + + spin_lock_init(&(data->Lock)); + + snprintf(data->nspinfo, + sizeof(data->nspinfo), + "NinjaSCSI-3/32Bi Driver $Revision: 1.23 $ IO:0x%04lx-0x%04lx MMIO(virt addr):0x%04lx IRQ:%02d", + host->io_port, host->io_port + host->n_io_port - 1, + host->base, + host->irq); + sht->name = data->nspinfo; + + nsp_dbg(NSP_DEBUG_INIT, "end"); + + + return host; /* detect done. */ +} + +/*----------------------------------------------------------------*/ +/* return info string */ +/*----------------------------------------------------------------*/ +static const char *nsp_info(struct Scsi_Host *shpnt) +{ + nsp_hw_data *data = (nsp_hw_data *)shpnt->hostdata; + + return data->nspinfo; +} + +static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + int id; + int speed; + unsigned long flags; + nsp_hw_data *data; + int hostno; + + hostno = host->host_no; + data = (nsp_hw_data *)host->hostdata; + + seq_puts(m, "NinjaSCSI status\n\n" + "Driver version: $Revision: 1.23 $\n"); + seq_printf(m, "SCSI host No.: %d\n", hostno); + seq_printf(m, "IRQ: %d\n", host->irq); + seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); + seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); + seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize); + + seq_puts(m, "burst transfer mode: "); + switch (nsp_burst_mode) { + case BURST_IO8: + seq_puts(m, "io8"); + break; + case BURST_IO32: + seq_puts(m, "io32"); + break; + case BURST_MEM32: + seq_puts(m, "mem32"); + break; + default: + seq_puts(m, "???"); + break; + } + seq_putc(m, '\n'); + + + spin_lock_irqsave(&(data->Lock), flags); + seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC); + spin_unlock_irqrestore(&(data->Lock), flags); + + seq_puts(m, "SDTR status\n"); + for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { + + seq_printf(m, "id %d: ", id); + + if (id == host->this_id) { + seq_puts(m, "----- NinjaSCSI-3 host adapter\n"); + continue; + } + + switch(data->Sync[id].SyncNegotiation) { + case SYNC_OK: + seq_puts(m, " sync"); + break; + case SYNC_NG: + seq_puts(m, "async"); + break; + case SYNC_NOT_YET: + seq_puts(m, " none"); + break; + default: + seq_puts(m, "?????"); + break; + } + + if (data->Sync[id].SyncPeriod != 0) { + speed = 1000000 / (data->Sync[id].SyncPeriod * 4); + + seq_printf(m, " transfer %d.%dMB/s, offset %d", + speed / 1000, + speed % 1000, + data->Sync[id].SyncOffset + ); + } + seq_putc(m, '\n'); + } + return 0; +} + +/*---------------------------------------------------------------*/ +/* error handler */ +/*---------------------------------------------------------------*/ + +/* +static int nsp_eh_abort(struct scsi_cmnd *SCpnt) +{ + nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); + + return nsp_eh_bus_reset(SCpnt); +}*/ + +static int nsp_bus_reset(nsp_hw_data *data) +{ + unsigned int base = data->BaseAddress; + int i; + + nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); + + nsp_index_write(base, SCSIBUSCTRL, SCSI_RST); + mdelay(100); /* 100ms */ + nsp_index_write(base, SCSIBUSCTRL, 0); + for(i = 0; i < 5; i++) { + nsp_index_read(base, IRQPHASESENCE); /* dummy read */ + } + + nsphw_init_sync(data); + + nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); + + return SUCCESS; +} + +static int nsp_eh_bus_reset(struct scsi_cmnd *SCpnt) +{ + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + + nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); + + return nsp_bus_reset(data); +} + +static int nsp_eh_host_reset(struct scsi_cmnd *SCpnt) +{ + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + + nsp_dbg(NSP_DEBUG_BUSRESET, "in"); + + nsphw_init(data); + + return SUCCESS; +} + + +/********************************************************************** + PCMCIA functions +**********************************************************************/ + +static int nsp_cs_probe(struct pcmcia_device *link) +{ + scsi_info_t *info; + nsp_hw_data *data = &nsp_data_base; + int ret; + + nsp_dbg(NSP_DEBUG_INIT, "in"); + + /* Create new SCSI device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) { return -ENOMEM; } + info->p_dev = link; + link->priv = info; + data->ScsiInfo = info; + + nsp_dbg(NSP_DEBUG_INIT, "info=0x%p", info); + + ret = nsp_cs_config(link); + + nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); + return ret; +} /* nsp_cs_attach */ + + +static void nsp_cs_detach(struct pcmcia_device *link) +{ + nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link); + + ((scsi_info_t *)link->priv)->stop = 1; + nsp_cs_release(link); + + kfree(link->priv); + link->priv = NULL; +} /* nsp_cs_detach */ + + +static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + nsp_hw_data *data = priv_data; + + if (p_dev->config_index == 0) + return -ENODEV; + + /* This reserves IO space but doesn't actually enable it */ + if (pcmcia_request_io(p_dev) != 0) + goto next_entry; + + if (resource_size(p_dev->resource[2])) { + p_dev->resource[2]->flags |= (WIN_DATA_WIDTH_16 | + WIN_MEMORY_TYPE_CM | + WIN_ENABLE); + if (p_dev->resource[2]->end < 0x1000) + p_dev->resource[2]->end = 0x1000; + if (pcmcia_request_window(p_dev, p_dev->resource[2], 0) != 0) + goto next_entry; + if (pcmcia_map_mem_page(p_dev, p_dev->resource[2], + p_dev->card_addr) != 0) + goto next_entry; + + data->MmioAddress = (unsigned long) + ioremap(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); + if (!data->MmioAddress) + goto next_entry; + + data->MmioLength = resource_size(p_dev->resource[2]); + } + /* If we got this far, we're cool! */ + return 0; + +next_entry: + nsp_dbg(NSP_DEBUG_INIT, "next"); + pcmcia_disable_device(p_dev); + return -ENODEV; +} + +static int nsp_cs_config(struct pcmcia_device *link) +{ + int ret; + scsi_info_t *info = link->priv; + struct Scsi_Host *host; + nsp_hw_data *data = &nsp_data_base; + + nsp_dbg(NSP_DEBUG_INIT, "in"); + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_CHECK_VCC | + CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IOMEM | + CONF_AUTO_SET_IO; + + ret = pcmcia_loop_config(link, nsp_cs_config_check, data); + if (ret) + goto cs_failed; + + if (pcmcia_request_irq(link, nspintr)) + goto cs_failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto cs_failed; + + if (free_ports) { + if (link->resource[0]) { + release_region(link->resource[0]->start, + resource_size(link->resource[0])); + } + if (link->resource[1]) { + release_region(link->resource[1]->start, + resource_size(link->resource[1])); + } + } + + /* Set port and IRQ */ + data->BaseAddress = link->resource[0]->start; + data->NumAddress = resource_size(link->resource[0]); + data->IrqNumber = link->irq; + + nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d", + data->BaseAddress, data->NumAddress, data->IrqNumber); + + nsphw_init(data); + + host = nsp_detect(&nsp_driver_template); + + if (host == NULL) { + nsp_dbg(NSP_DEBUG_INIT, "detect failed"); + goto cs_failed; + } + + + ret = scsi_add_host (host, NULL); + if (ret) + goto cs_failed; + + scsi_scan_host(host); + + info->host = host; + + return 0; + + cs_failed: + nsp_dbg(NSP_DEBUG_INIT, "config fail"); + nsp_cs_release(link); + + return -ENODEV; +} /* nsp_cs_config */ + + +static void nsp_cs_release(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + nsp_hw_data *data = NULL; + + if (info->host == NULL) { + nsp_msg(KERN_DEBUG, "unexpected card release call."); + } else { + data = (nsp_hw_data *)info->host->hostdata; + } + + nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); + + /* Unlink the device chain */ + if (info->host != NULL) { + scsi_remove_host(info->host); + } + + if (resource_size(link->resource[2])) { + if (data != NULL) { + iounmap((void *)(data->MmioAddress)); + } + } + pcmcia_disable_device(link); + + if (info->host != NULL) { + scsi_host_put(info->host); + } +} /* nsp_cs_release */ + +static int nsp_cs_suspend(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + nsp_hw_data *data; + + nsp_dbg(NSP_DEBUG_INIT, "event: suspend"); + + if (info->host != NULL) { + nsp_msg(KERN_INFO, "clear SDTR status"); + + data = (nsp_hw_data *)info->host->hostdata; + + nsphw_init_sync(data); + } + + info->stop = 1; + + return 0; +} + +static int nsp_cs_resume(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + nsp_hw_data *data; + + nsp_dbg(NSP_DEBUG_INIT, "event: resume"); + + info->stop = 0; + + if (info->host != NULL) { + nsp_msg(KERN_INFO, "reset host and bus"); + + data = (nsp_hw_data *)info->host->hostdata; + + nsphw_init (data); + nsp_bus_reset(data); + } + + return 0; +} + +/*======================================================================* + * module entry point + *====================================================================*/ +static const struct pcmcia_device_id nsp_cs_ids[] = { + PCMCIA_DEVICE_PROD_ID123("IO DATA", "CBSC16 ", "1", 0x547e66dc, 0x0d63a3fd, 0x51de003a), + PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-001", "1", 0x534c02bc, 0x52008408, 0x51de003a), + PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-002", "1", 0x534c02bc, 0xcb09d5b2, 0x51de003a), + PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-003", "1", 0x534c02bc, 0xbc0ee524, 0x51de003a), + PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-004", "1", 0x534c02bc, 0x226a7087, 0x51de003a), + PCMCIA_DEVICE_PROD_ID123("WBT", "NinjaSCSI-3", "R1.0", 0xc7ba805f, 0xfdc7c97d, 0x6973710e), + PCMCIA_DEVICE_PROD_ID123("WORKBIT", "UltraNinja-16", "1", 0x28191418, 0xb70f4b09, 0x51de003a), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, nsp_cs_ids); + +static struct pcmcia_driver nsp_driver = { + .owner = THIS_MODULE, + .name = "nsp_cs", + .probe = nsp_cs_probe, + .remove = nsp_cs_detach, + .id_table = nsp_cs_ids, + .suspend = nsp_cs_suspend, + .resume = nsp_cs_resume, +}; +module_pcmcia_driver(nsp_driver); + +/* end */ diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h new file mode 100644 index 000000000..e1ee8ef90 --- /dev/null +++ b/drivers/scsi/pcmcia/nsp_cs.h @@ -0,0 +1,377 @@ +/*=======================================================/ + Header file for nsp_cs.c + By: YOKOTA Hiroshi + + Ver.1.0 : Cut unused lines. + Ver 0.1 : Initial version. + + This software may be used and distributed according to the terms of + the GNU General Public License. + +=========================================================*/ + +#ifndef __nsp_cs__ +#define __nsp_cs__ + +/* for debugging */ +//#define NSP_DEBUG 9 + +/* +#define static +#define inline +*/ + +/************************************ + * Some useful macros... + */ + +/* SCSI initiator must be ID 7 */ +#define NSP_INITIATOR_ID 7 + +#define NSP_SELTIMEOUT 200 + +/*************************************************************************** + * register definitions + ***************************************************************************/ +/*======================================================================== + * base register + ========================================================================*/ +#define IRQCONTROL 0x00 /* R */ +# define IRQCONTROL_RESELECT_CLEAR BIT(0) +# define IRQCONTROL_PHASE_CHANGE_CLEAR BIT(1) +# define IRQCONTROL_TIMER_CLEAR BIT(2) +# define IRQCONTROL_FIFO_CLEAR BIT(3) +# define IRQCONTROL_ALLMASK 0xff +# define IRQCONTROL_ALLCLEAR (IRQCONTROL_RESELECT_CLEAR | \ + IRQCONTROL_PHASE_CHANGE_CLEAR | \ + IRQCONTROL_TIMER_CLEAR | \ + IRQCONTROL_FIFO_CLEAR ) +# define IRQCONTROL_IRQDISABLE 0xf0 + +#define IRQSTATUS 0x00 /* W */ +# define IRQSTATUS_SCSI BIT(0) +# define IRQSTATUS_TIMER BIT(2) +# define IRQSTATUS_FIFO BIT(3) +# define IRQSTATUS_MASK 0x0f + +#define IFSELECT 0x01 /* W */ +# define IF_IFSEL BIT(0) +# define IF_REGSEL BIT(2) + +#define FIFOSTATUS 0x01 /* R */ +# define FIFOSTATUS_CHIP_REVISION_MASK 0x0f +# define FIFOSTATUS_CHIP_ID_MASK 0x70 +# define FIFOSTATUS_FULL_EMPTY BIT(7) + +#define INDEXREG 0x02 /* R/W */ +#define DATAREG 0x03 /* R/W */ +#define FIFODATA 0x04 /* R/W */ +#define FIFODATA1 0x05 /* R/W */ +#define FIFODATA2 0x06 /* R/W */ +#define FIFODATA3 0x07 /* R/W */ + +/*==================================================================== + * indexed register + ====================================================================*/ +#define EXTBUSCTRL 0x10 /* R/W,deleted */ + +#define CLOCKDIV 0x11 /* R/W */ +# define CLOCK_40M 0x02 +# define CLOCK_20M 0x01 +# define FAST_20 BIT(2) + +#define TERMPWRCTRL 0x13 /* R/W */ +# define POWER_ON BIT(0) + +#define SCSIIRQMODE 0x15 /* R/W */ +# define SCSI_PHASE_CHANGE_EI BIT(0) +# define RESELECT_EI BIT(4) +# define FIFO_IRQ_EI BIT(5) +# define SCSI_RESET_IRQ_EI BIT(6) + +#define IRQPHASESENCE 0x16 /* R */ +# define LATCHED_MSG BIT(0) +# define LATCHED_IO BIT(1) +# define LATCHED_CD BIT(2) +# define LATCHED_BUS_FREE BIT(3) +# define PHASE_CHANGE_IRQ BIT(4) +# define RESELECT_IRQ BIT(5) +# define FIFO_IRQ BIT(6) +# define SCSI_RESET_IRQ BIT(7) + +#define TIMERCOUNT 0x17 /* R/W */ + +#define SCSIBUSCTRL 0x18 /* R/W */ +# define SCSI_SEL BIT(0) +# define SCSI_RST BIT(1) +# define SCSI_DATAOUT_ENB BIT(2) +# define SCSI_ATN BIT(3) +# define SCSI_ACK BIT(4) +# define SCSI_BSY BIT(5) +# define AUTODIRECTION BIT(6) +# define ACKENB BIT(7) + +#define SCSIBUSMON 0x19 /* R */ + +#define SETARBIT 0x1A /* W */ +# define ARBIT_GO BIT(0) +# define ARBIT_FLAG_CLEAR BIT(1) + +#define ARBITSTATUS 0x1A /* R */ +/*# define ARBIT_GO BIT(0)*/ +# define ARBIT_WIN BIT(1) +# define ARBIT_FAIL BIT(2) +# define RESELECT_FLAG BIT(3) + +#define PARITYCTRL 0x1B /* W */ +#define PARITYSTATUS 0x1B /* R */ + +#define COMMANDCTRL 0x1C /* W */ +# define CLEAR_COMMAND_POINTER BIT(0) +# define AUTO_COMMAND_GO BIT(1) + +#define RESELECTID 0x1C /* R */ +#define COMMANDDATA 0x1D /* R/W */ + +#define POINTERCLR 0x1E /* W */ +# define POINTER_CLEAR BIT(0) +# define ACK_COUNTER_CLEAR BIT(1) +# define REQ_COUNTER_CLEAR BIT(2) +# define HOST_COUNTER_CLEAR BIT(3) +# define READ_SOURCE (BIT(4) | BIT(5)) +# define ACK_COUNTER (0) +# define REQ_COUNTER (BIT(4)) +# define HOST_COUNTER (BIT(5)) + +#define TRANSFERCOUNT 0x1E /* R */ + +#define TRANSFERMODE 0x20 /* R/W */ +# define MODE_MEM8 BIT(0) +# define MODE_MEM32 BIT(1) +# define MODE_ADR24 BIT(2) +# define MODE_ADR32 BIT(3) +# define MODE_IO8 BIT(4) +# define MODE_IO32 BIT(5) +# define TRANSFER_GO BIT(6) +# define BRAIND BIT(7) + +#define SYNCREG 0x21 /* R/W */ +# define SYNCREG_OFFSET_MASK 0x0f +# define SYNCREG_PERIOD_MASK 0xf0 +# define SYNCREG_PERIOD_SHIFT 4 + +#define SCSIDATALATCH 0x22 /* W */ +#define SCSIDATAIN 0x22 /* R */ +#define SCSIDATAWITHACK 0x23 /* R/W */ +#define SCAMCONTROL 0x24 /* W */ +#define SCAMSTATUS 0x24 /* R */ +#define SCAMDATA 0x25 /* R/W */ + +#define OTHERCONTROL 0x26 /* R/W */ +# define TPL_ROM_WRITE_EN BIT(0) +# define TPWR_OUT BIT(1) +# define TPWR_SENSE BIT(2) +# define RA8_CONTROL BIT(3) + +#define ACKWIDTH 0x27 /* R/W */ +#define CLRTESTPNT 0x28 /* W */ +#define ACKCNTLD 0x29 /* W */ +#define REQCNTLD 0x2A /* W */ +#define HSTCNTLD 0x2B /* W */ +#define CHECKSUM 0x2C /* R/W */ + +/************************************************************************ + * Input status bit definitions. + ************************************************************************/ +#define S_MESSAGE BIT(0) /* Message line from SCSI bus */ +#define S_IO BIT(1) /* Input/Output line from SCSI bus */ +#define S_CD BIT(2) /* Command/Data line from SCSI bus */ +#define S_BUSY BIT(3) /* Busy line from SCSI bus */ +#define S_ACK BIT(4) /* Acknowledge line from SCSI bus */ +#define S_REQUEST BIT(5) /* Request line from SCSI bus */ +#define S_SELECT BIT(6) /* */ +#define S_ATN BIT(7) /* */ + +/*********************************************************************** + * Useful Bus Monitor status combinations. + ***********************************************************************/ +#define BUSMON_SEL S_SELECT +#define BUSMON_BSY S_BUSY +#define BUSMON_REQ S_REQUEST +#define BUSMON_IO S_IO +#define BUSMON_ACK S_ACK +#define BUSMON_BUS_FREE 0 +#define BUSMON_COMMAND ( S_BUSY | S_CD | S_REQUEST ) +#define BUSMON_MESSAGE_IN ( S_BUSY | S_CD | S_IO | S_MESSAGE | S_REQUEST ) +#define BUSMON_MESSAGE_OUT ( S_BUSY | S_CD | S_MESSAGE | S_REQUEST ) +#define BUSMON_DATA_IN ( S_BUSY | S_IO | S_REQUEST ) +#define BUSMON_DATA_OUT ( S_BUSY | S_REQUEST ) +#define BUSMON_STATUS ( S_BUSY | S_CD | S_IO | S_REQUEST ) +#define BUSMON_SELECT ( S_IO | S_SELECT ) +#define BUSMON_RESELECT ( S_IO | S_SELECT ) +#define BUSMON_PHASE_MASK ( S_CD | S_IO | S_MESSAGE | S_SELECT ) + +#define BUSPHASE_SELECT ( BUSMON_SELECT & BUSMON_PHASE_MASK ) +#define BUSPHASE_COMMAND ( BUSMON_COMMAND & BUSMON_PHASE_MASK ) +#define BUSPHASE_MESSAGE_IN ( BUSMON_MESSAGE_IN & BUSMON_PHASE_MASK ) +#define BUSPHASE_MESSAGE_OUT ( BUSMON_MESSAGE_OUT & BUSMON_PHASE_MASK ) +#define BUSPHASE_DATA_IN ( BUSMON_DATA_IN & BUSMON_PHASE_MASK ) +#define BUSPHASE_DATA_OUT ( BUSMON_DATA_OUT & BUSMON_PHASE_MASK ) +#define BUSPHASE_STATUS ( BUSMON_STATUS & BUSMON_PHASE_MASK ) + +/*====================================================================*/ + +typedef struct scsi_info_t { + struct pcmcia_device *p_dev; + struct Scsi_Host *host; + int stop; +} scsi_info_t; + + +/* synchronous transfer negotiation data */ +typedef struct _sync_data { + unsigned int SyncNegotiation; +#define SYNC_NOT_YET 0 +#define SYNC_OK 1 +#define SYNC_NG 2 + + unsigned int SyncPeriod; + unsigned int SyncOffset; + unsigned char SyncRegister; + unsigned char AckWidth; +} sync_data; + +typedef struct _nsp_hw_data { + unsigned int BaseAddress; + unsigned int NumAddress; + unsigned int IrqNumber; + + unsigned long MmioAddress; +#define NSP_MMIO_OFFSET 0x0800 + unsigned long MmioLength; + + unsigned char ScsiClockDiv; + + unsigned char TransferMode; + + int TimerCount; + int SelectionTimeOut; + struct scsi_cmnd *CurrentSC; + //int CurrnetTarget; + + int FifoCount; + +#define MSGBUF_SIZE 20 + unsigned char MsgBuffer[MSGBUF_SIZE]; + int MsgLen; + +#define N_TARGET 8 + sync_data Sync[N_TARGET]; + + char nspinfo[110]; /* description */ + spinlock_t Lock; + + scsi_info_t *ScsiInfo; /* attach <-> detect glue */ + + +#ifdef NSP_DEBUG + int CmdId; /* Accepted command serial number. + Used for debugging. */ +#endif +} nsp_hw_data; + +/**************************************************************************** + * + */ + +/* Card service functions */ +static void nsp_cs_detach (struct pcmcia_device *p_dev); +static void nsp_cs_release(struct pcmcia_device *link); +static int nsp_cs_config (struct pcmcia_device *link); + +/* Linux SCSI subsystem specific functions */ +static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht); +static const char *nsp_info (struct Scsi_Host *shpnt); +static int nsp_show_info (struct seq_file *m, + struct Scsi_Host *host); +static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); + +/* Error handler */ +/*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/ +/*static int nsp_eh_device_reset(struct scsi_cmnd *SCpnt);*/ +static int nsp_eh_bus_reset (struct scsi_cmnd *SCpnt); +static int nsp_eh_host_reset (struct scsi_cmnd *SCpnt); +static int nsp_bus_reset (nsp_hw_data *data); + +/* */ +static void nsphw_init (nsp_hw_data *data); +static bool nsphw_start_selection(struct scsi_cmnd *SCpnt); +static void nsp_start_timer (struct scsi_cmnd *SCpnt, int time); +static int nsp_fifo_count (struct scsi_cmnd *SCpnt); +static void nsp_pio_read (struct scsi_cmnd *SCpnt); +static void nsp_pio_write (struct scsi_cmnd *SCpnt); +static int nsp_nexus (struct scsi_cmnd *SCpnt); +static void nsp_scsi_done (struct scsi_cmnd *SCpnt); +static int nsp_analyze_sdtr (struct scsi_cmnd *SCpnt); +static int nsp_negate_signal (struct scsi_cmnd *SCpnt, + unsigned char mask, char *str); +static int nsp_expect_signal (struct scsi_cmnd *SCpnt, + unsigned char current_phase, + unsigned char mask); +static int nsp_xfer (struct scsi_cmnd *SCpnt, int phase); +static int nsp_dataphase_bypass (struct scsi_cmnd *SCpnt); +static void nsp_reselected (struct scsi_cmnd *SCpnt); +static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht); + +/* Interrupt handler */ +//static irqreturn_t nspintr(int irq, void *dev_id); + +/* Debug */ +#ifdef NSP_DEBUG +static void show_command (struct scsi_cmnd *SCpnt); +static void show_phase (struct scsi_cmnd *SCpnt); +static void show_busphase(unsigned char stat); +static void show_message (nsp_hw_data *data); +#else +# define show_command(ptr) /* */ +# define show_phase(SCpnt) /* */ +# define show_busphase(stat) /* */ +# define show_message(data) /* */ +#endif + +/* + * SCSI phase + */ +enum _scsi_phase { + PH_UNDETERMINED , + PH_ARBSTART , + PH_SELSTART , + PH_SELECTED , + PH_COMMAND , + PH_DATA , + PH_STATUS , + PH_MSG_IN , + PH_MSG_OUT , + PH_DISCONNECT , + PH_RESELECT , + PH_ABORT , + PH_RESET +}; + +enum _data_in_out { + IO_UNKNOWN, + IO_IN, + IO_OUT +}; + +enum _burst_mode { + BURST_IO8 = 0, + BURST_IO32 = 1, + BURST_MEM32 = 2, +}; + +/* scatter-gather table */ +#define BUFFER_ADDR(SCpnt) ((char *)(sg_virt(nsp_priv(SCpnt)->buffer))) + +#endif /*__nsp_cs__*/ +/* end */ diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c new file mode 100644 index 000000000..23b68dd26 --- /dev/null +++ b/drivers/scsi/pcmcia/nsp_debug.c @@ -0,0 +1,215 @@ +/*======================================================================== + Debug routines for nsp_cs + By: YOKOTA Hiroshi + + This software may be used and distributed according to the terms of + the GNU General Public License. +=========================================================================*/ + +/* $Id: nsp_debug.c,v 1.3 2003/07/26 14:21:09 elca Exp $ */ + +/* + * Show the command data of a command + */ +static const char unknown[] = "UNKNOWN"; + +static const char * group_0_commands[] = { +/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense", +/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reassign Blocks", +/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown, +/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry", +/* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve", +/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit", +/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic", +/* 1e-1f */ "Prevent/Allow Medium Removal", unknown, +}; + + +static const char *group_1_commands[] = { +/* 20-22 */ unknown, unknown, unknown, +/* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)", +/* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown, +/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal", +/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position", +/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data", +/* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer", +/* 3d-3f */ "Update Block", "Read Long", "Write Long", +}; + + +static const char *group_2_commands[] = { +/* 40-41 */ "Change Definition", "Write Same", +/* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)", +/* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown, +/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)", +/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown, +/* 5c-5f */ unknown, unknown, unknown, +}; + +#define group(opcode) (((opcode) >> 5) & 7) + +#define RESERVED_GROUP 0 +#define VENDOR_GROUP 1 +#define NOTEXT_GROUP 2 + +static const char **commands[] = { + group_0_commands, group_1_commands, group_2_commands, + (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP, + (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP, + (const char **) VENDOR_GROUP +}; + +static const char reserved[] = "RESERVED"; +static const char vendor[] = "VENDOR SPECIFIC"; + +static void print_opcodek(unsigned char opcode) +{ + const char **table = commands[ group(opcode) ]; + + switch ((unsigned long) table) { + case RESERVED_GROUP: + printk("%s[%02x] ", reserved, opcode); + break; + case NOTEXT_GROUP: + printk("%s(notext)[%02x] ", unknown, opcode); + break; + case VENDOR_GROUP: + printk("%s[%02x] ", vendor, opcode); + break; + default: + if (table[opcode & 0x1f] != unknown) + printk("%s[%02x] ", table[opcode & 0x1f], opcode); + else + printk("%s[%02x] ", unknown, opcode); + break; + } +} + +static void print_commandk (unsigned char *command) +{ + int i, s; + printk(KERN_DEBUG); + print_opcodek(command[0]); + /*printk(KERN_DEBUG "%s ", __func__);*/ + if ((command[0] >> 5) == 6 || + (command[0] >> 5) == 7 ) { + s = 12; /* vender specific */ + } else { + s = COMMAND_SIZE(command[0]); + } + for ( i = 1; i < s; ++i) { + printk("%02x ", command[i]); + } + + switch (s) { + case 6: + printk("LBA=%d len=%d", + (((unsigned int)command[1] & 0x0f) << 16) | + ( (unsigned int)command[2] << 8) | + ( (unsigned int)command[3] ), + (unsigned int)command[4] + ); + break; + case 10: + printk("LBA=%d len=%d", + ((unsigned int)command[2] << 24) | + ((unsigned int)command[3] << 16) | + ((unsigned int)command[4] << 8) | + ((unsigned int)command[5] ), + ((unsigned int)command[7] << 8) | + ((unsigned int)command[8] ) + ); + break; + case 12: + printk("LBA=%d len=%d", + ((unsigned int)command[2] << 24) | + ((unsigned int)command[3] << 16) | + ((unsigned int)command[4] << 8) | + ((unsigned int)command[5] ), + ((unsigned int)command[6] << 24) | + ((unsigned int)command[7] << 16) | + ((unsigned int)command[8] << 8) | + ((unsigned int)command[9] ) + ); + break; + default: + break; + } + printk("\n"); +} + +static void show_command(struct scsi_cmnd *SCpnt) +{ + print_commandk(SCpnt->cmnd); +} + +static void show_phase(struct scsi_cmnd *SCpnt) +{ + int i = nsp_scsi_pointer(SCpnt)->phase; + + char *ph[] = { + "PH_UNDETERMINED", + "PH_ARBSTART", + "PH_SELSTART", + "PH_SELECTED", + "PH_COMMAND", + "PH_DATA", + "PH_STATUS", + "PH_MSG_IN", + "PH_MSG_OUT", + "PH_DISCONNECT", + "PH_RESELECT" + }; + + if ( i < PH_UNDETERMINED || i > PH_RESELECT ) { + printk(KERN_DEBUG "scsi phase: unknown(%d)\n", i); + return; + } + + printk(KERN_DEBUG "scsi phase: %s\n", ph[i]); + + return; +} + +static void show_busphase(unsigned char stat) +{ + switch(stat) { + case BUSPHASE_COMMAND: + printk(KERN_DEBUG "BUSPHASE_COMMAND\n"); + break; + case BUSPHASE_MESSAGE_IN: + printk(KERN_DEBUG "BUSPHASE_MESSAGE_IN\n"); + break; + case BUSPHASE_MESSAGE_OUT: + printk(KERN_DEBUG "BUSPHASE_MESSAGE_OUT\n"); + break; + case BUSPHASE_DATA_IN: + printk(KERN_DEBUG "BUSPHASE_DATA_IN\n"); + break; + case BUSPHASE_DATA_OUT: + printk(KERN_DEBUG "BUSPHASE_DATA_OUT\n"); + break; + case BUSPHASE_STATUS: + printk(KERN_DEBUG "BUSPHASE_STATUS\n"); + break; + case BUSPHASE_SELECT: + printk(KERN_DEBUG "BUSPHASE_SELECT\n"); + break; + default: + printk(KERN_DEBUG "BUSPHASE_other\n"); + break; + } +} + +static void show_message(nsp_hw_data *data) +{ + int i; + + printk(KERN_DEBUG "msg:"); + for(i=0; i < data->MsgLen; i++) { + printk(" %02x", data->MsgBuffer[i]); + } + printk("\n"); +} + +/* end */ diff --git a/drivers/scsi/pcmcia/nsp_io.h b/drivers/scsi/pcmcia/nsp_io.h new file mode 100644 index 000000000..3b8746f85 --- /dev/null +++ b/drivers/scsi/pcmcia/nsp_io.h @@ -0,0 +1,274 @@ +/* + NinjaSCSI I/O funtions + By: YOKOTA Hiroshi + + This software may be used and distributed according to the terms of + the GNU General Public License. + + */ + +/* $Id: nsp_io.h,v 1.3 2003/08/04 21:15:26 elca Exp $ */ + +#ifndef __NSP_IO_H__ +#define __NSP_IO_H__ + +static inline void nsp_write(unsigned int base, + unsigned int index, + unsigned char val); +static inline unsigned char nsp_read(unsigned int base, + unsigned int index); +static inline void nsp_index_write(unsigned int BaseAddr, + unsigned int Register, + unsigned char Value); +static inline unsigned char nsp_index_read(unsigned int BaseAddr, + unsigned int Register); + +/******************************************************************* + * Basic IO + */ + +static inline void nsp_write(unsigned int base, + unsigned int index, + unsigned char val) +{ + outb(val, (base + index)); +} + +static inline unsigned char nsp_read(unsigned int base, + unsigned int index) +{ + return inb(base + index); +} + + +/********************************************************************** + * Indexed IO + */ +static inline unsigned char nsp_index_read(unsigned int BaseAddr, + unsigned int Register) +{ + outb(Register, BaseAddr + INDEXREG); + return inb(BaseAddr + DATAREG); +} + +static inline void nsp_index_write(unsigned int BaseAddr, + unsigned int Register, + unsigned char Value) +{ + outb(Register, BaseAddr + INDEXREG); + outb(Value, BaseAddr + DATAREG); +} + +/********************************************************************* + * fifo func + */ + +/* read 8 bit FIFO */ +static inline void nsp_multi_read_1(unsigned int BaseAddr, + unsigned int Register, + void *buf, + unsigned long count) +{ + insb(BaseAddr + Register, buf, count); +} + +static inline void nsp_fifo8_read(unsigned int base, + void *buf, + unsigned long count) +{ + /*nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx", buf, count);*/ + nsp_multi_read_1(base, FIFODATA, buf, count); +} + +/*--------------------------------------------------------------*/ + +/* read 16 bit FIFO */ +static inline void nsp_multi_read_2(unsigned int BaseAddr, + unsigned int Register, + void *buf, + unsigned long count) +{ + insw(BaseAddr + Register, buf, count); +} + +static inline void nsp_fifo16_read(unsigned int base, + void *buf, + unsigned long count) +{ + //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*2", buf, count); + nsp_multi_read_2(base, FIFODATA, buf, count); +} + +/*--------------------------------------------------------------*/ + +/* read 32bit FIFO */ +static inline void nsp_multi_read_4(unsigned int BaseAddr, + unsigned int Register, + void *buf, + unsigned long count) +{ + insl(BaseAddr + Register, buf, count); +} + +static inline void nsp_fifo32_read(unsigned int base, + void *buf, + unsigned long count) +{ + //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*4", buf, count); + nsp_multi_read_4(base, FIFODATA, buf, count); +} + +/*----------------------------------------------------------*/ + +/* write 8bit FIFO */ +static inline void nsp_multi_write_1(unsigned int BaseAddr, + unsigned int Register, + void *buf, + unsigned long count) +{ + outsb(BaseAddr + Register, buf, count); +} + +static inline void nsp_fifo8_write(unsigned int base, + void *buf, + unsigned long count) +{ + nsp_multi_write_1(base, FIFODATA, buf, count); +} + +/*---------------------------------------------------------*/ + +/* write 16bit FIFO */ +static inline void nsp_multi_write_2(unsigned int BaseAddr, + unsigned int Register, + void *buf, + unsigned long count) +{ + outsw(BaseAddr + Register, buf, count); +} + +static inline void nsp_fifo16_write(unsigned int base, + void *buf, + unsigned long count) +{ + nsp_multi_write_2(base, FIFODATA, buf, count); +} + +/*---------------------------------------------------------*/ + +/* write 32bit FIFO */ +static inline void nsp_multi_write_4(unsigned int BaseAddr, + unsigned int Register, + void *buf, + unsigned long count) +{ + outsl(BaseAddr + Register, buf, count); +} + +static inline void nsp_fifo32_write(unsigned int base, + void *buf, + unsigned long count) +{ + nsp_multi_write_4(base, FIFODATA, buf, count); +} + + +/*====================================================================*/ + +static inline void nsp_mmio_write(unsigned long base, + unsigned int index, + unsigned char val) +{ + unsigned char *ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + index); + + writeb(val, ptr); +} + +static inline unsigned char nsp_mmio_read(unsigned long base, + unsigned int index) +{ + unsigned char *ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + index); + + return readb(ptr); +} + +/*-----------*/ + +static inline unsigned char nsp_mmio_index_read(unsigned long base, + unsigned int reg) +{ + unsigned char *index_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + INDEXREG); + unsigned char *data_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + DATAREG); + + writeb((unsigned char)reg, index_ptr); + return readb(data_ptr); +} + +static inline void nsp_mmio_index_write(unsigned long base, + unsigned int reg, + unsigned char val) +{ + unsigned char *index_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + INDEXREG); + unsigned char *data_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + DATAREG); + + writeb((unsigned char)reg, index_ptr); + writeb(val, data_ptr); +} + +/* read 32bit FIFO */ +static inline void nsp_mmio_multi_read_4(unsigned long base, + unsigned int Register, + void *buf, + unsigned long count) +{ + unsigned long *ptr = (unsigned long *)(base + Register); + unsigned long *tmp = (unsigned long *)buf; + int i; + + //nsp_dbg(NSP_DEBUG_DATA_IO, "base 0x%0lx ptr 0x%p",base,ptr); + + for (i = 0; i < count; i++) { + *tmp = readl(ptr); + //nsp_dbg(NSP_DEBUG_DATA_IO, "<%d,%p,%p,%lx>", i, ptr, tmp, *tmp); + tmp++; + } +} + +static inline void nsp_mmio_fifo32_read(unsigned int base, + void *buf, + unsigned long count) +{ + //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*4", buf, count); + nsp_mmio_multi_read_4(base, FIFODATA, buf, count); +} + +static inline void nsp_mmio_multi_write_4(unsigned long base, + unsigned int Register, + void *buf, + unsigned long count) +{ + unsigned long *ptr = (unsigned long *)(base + Register); + unsigned long *tmp = (unsigned long *)buf; + int i; + + //nsp_dbg(NSP_DEBUG_DATA_IO, "base 0x%0lx ptr 0x%p",base,ptr); + + for (i = 0; i < count; i++) { + writel(*tmp, ptr); + //nsp_dbg(NSP_DEBUG_DATA_IO, "<%d,%p,%p,%lx>", i, ptr, tmp, *tmp); + tmp++; + } +} + +static inline void nsp_mmio_fifo32_write(unsigned int base, + void *buf, + unsigned long count) +{ + //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*4", buf, count); + nsp_mmio_multi_write_4(base, FIFODATA, buf, count); +} + + + +#endif +/* end */ diff --git a/drivers/scsi/pcmcia/nsp_message.c b/drivers/scsi/pcmcia/nsp_message.c new file mode 100644 index 000000000..ef593b70d --- /dev/null +++ b/drivers/scsi/pcmcia/nsp_message.c @@ -0,0 +1,78 @@ +/*========================================================================== + NinjaSCSI-3 message handler + By: YOKOTA Hiroshi + + This software may be used and distributed according to the terms of + the GNU General Public License. + */ + +/* $Id: nsp_message.c,v 1.6 2003/07/26 14:21:09 elca Exp $ */ + +static void nsp_message_in(struct scsi_cmnd *SCpnt) +{ + unsigned int base = SCpnt->device->host->io_port; + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + unsigned char data_reg, control_reg; + int ret, len; + + /* + * XXX: NSP QUIRK + * NSP invoke interrupts only in the case of scsi phase changes, + * therefore we should poll the scsi phase here to catch + * the next "msg in" if exists (no scsi phase changes). + */ + ret = 16; + len = 0; + + nsp_dbg(NSP_DEBUG_MSGINOCCUR, "msgin loop"); + do { + /* read data */ + data_reg = nsp_index_read(base, SCSIDATAIN); + + /* assert ACK */ + control_reg = nsp_index_read(base, SCSIBUSCTRL); + control_reg |= SCSI_ACK; + nsp_index_write(base, SCSIBUSCTRL, control_reg); + nsp_negate_signal(SCpnt, BUSMON_REQ, "msgin"); + + data->MsgBuffer[len] = data_reg; len++; + + /* deassert ACK */ + control_reg = nsp_index_read(base, SCSIBUSCTRL); + control_reg &= ~SCSI_ACK; + nsp_index_write(base, SCSIBUSCTRL, control_reg); + + /* catch a next signal */ + ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_IN, BUSMON_REQ); + } while (ret > 0 && MSGBUF_SIZE > len); + + data->MsgLen = len; + +} + +static void nsp_message_out(struct scsi_cmnd *SCpnt) +{ + nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; + int ret = 1; + int len = data->MsgLen; + + /* + * XXX: NSP QUIRK + * NSP invoke interrupts only in the case of scsi phase changes, + * therefore we should poll the scsi phase here to catch + * the next "msg out" if exists (no scsi phase changes). + */ + + nsp_dbg(NSP_DEBUG_MSGOUTOCCUR, "msgout loop"); + do { + if (nsp_xfer(SCpnt, BUSPHASE_MESSAGE_OUT)) { + nsp_msg(KERN_DEBUG, "msgout: xfer short"); + } + + /* catch a next signal */ + ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_OUT, BUSMON_REQ); + } while (ret > 0 && len-- > 0); + +} + +/* end */ diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c new file mode 100644 index 000000000..310d0b658 --- /dev/null +++ b/drivers/scsi/pcmcia/qlogic_stub.c @@ -0,0 +1,313 @@ +/*====================================================================== + + A driver for the Qlogic SCSI card + + qlogic_cs.c 1.79 2000/06/12 21:27:26 + + The contents of this file are subject to the Mozilla Public + License Version 1.1 (the "License"); you may not use this file + except in compliance with the License. You may obtain a copy of + the License at http://www.mozilla.org/MPL/ + + Software distributed under the License is distributed on an "AS + IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + implied. See the License for the specific language governing + rights and limitations under the License. + + The initial developer of the original code is David A. Hinds + . Portions created by David A. Hinds + are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + + Alternatively, the contents of this file may be used under the + terms of the GNU General Public License version 2 (the "GPL"), in which + case the provisions of the GPL are applicable instead of the + above. If you wish to allow the use of your version of this file + only under the terms of the GPL and not to allow others to use + your version of this file under the MPL, indicate your decision + by deleting the provisions above and replace them with the notice + and other provisions required by the GPL. If you do not delete + the provisions above, a recipient may use your version of this + file under either the MPL or the GPL. + +======================================================================*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "../qlogicfas408.h" + +#include +#include +#include + +/* Set the following to 2 to use normal interrupt (active high/totempole- + * tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open + * drain + */ +#define INT_TYPE 0 + +static char qlogic_name[] = "qlogic_cs"; + +static struct scsi_host_template qlogicfas_driver_template = { + .module = THIS_MODULE, + .name = qlogic_name, + .proc_name = qlogic_name, + .info = qlogicfas408_info, + .queuecommand = qlogicfas408_queuecommand, + .eh_abort_handler = qlogicfas408_abort, + .eh_host_reset_handler = qlogicfas408_host_reset, + .bios_param = qlogicfas408_biosparam, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .dma_boundary = PAGE_SIZE - 1, +}; + +/*====================================================================*/ + +typedef struct scsi_info_t { + struct pcmcia_device *p_dev; + struct Scsi_Host *host; + unsigned short manf_id; +} scsi_info_t; + +static void qlogic_release(struct pcmcia_device *link); +static void qlogic_detach(struct pcmcia_device *p_dev); +static int qlogic_config(struct pcmcia_device * link); + +static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host, + struct pcmcia_device *link, int qbase, int qlirq) +{ + int qltyp; /* type of chip */ + int qinitid; + struct Scsi_Host *shost; /* registered host structure */ + struct qlogicfas408_priv *priv; + + qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); + qinitid = host->this_id; + if (qinitid < 0) + qinitid = 7; /* if no ID, use 7 */ + + qlogicfas408_setup(qbase, qinitid, INT_TYPE); + + host->name = qlogic_name; + shost = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); + if (!shost) + goto err; + shost->io_port = qbase; + shost->n_io_port = 16; + shost->dma_channel = -1; + if (qlirq != -1) + shost->irq = qlirq; + + priv = get_priv_by_host(shost); + priv->qlirq = qlirq; + priv->qbase = qbase; + priv->qinitid = qinitid; + priv->shost = shost; + priv->int_type = INT_TYPE; + + if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogic_name, shost)) + goto free_scsi_host; + + sprintf(priv->qinfo, + "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", + qltyp, qbase, qlirq, QL_TURBO_PDMA); + + if (scsi_add_host(shost, NULL)) + goto free_interrupt; + + scsi_scan_host(shost); + + return shost; + +free_interrupt: + free_irq(qlirq, shost); + +free_scsi_host: + scsi_host_put(shost); + +err: + return NULL; +} +static int qlogic_probe(struct pcmcia_device *link) +{ + scsi_info_t *info; + + dev_dbg(&link->dev, "qlogic_attach()\n"); + + /* Create new SCSI device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + info->p_dev = link; + link->priv = info; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + link->config_regs = PRESENT_OPTION; + + return qlogic_config(link); +} /* qlogic_attach */ + +/*====================================================================*/ + +static void qlogic_detach(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "qlogic_detach\n"); + + qlogic_release(link); + kfree(link->priv); + +} /* qlogic_detach */ + +/*====================================================================*/ + +static int qlogic_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + p_dev->io_lines = 10; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + if (p_dev->resource[0]->start == 0) + return -ENODEV; + + return pcmcia_request_io(p_dev); +} + +static int qlogic_config(struct pcmcia_device * link) +{ + scsi_info_t *info = link->priv; + int ret; + struct Scsi_Host *host; + + dev_dbg(&link->dev, "qlogic_config\n"); + + ret = pcmcia_loop_config(link, qlogic_config_check, NULL); + if (ret) + goto failed; + + if (!link->irq) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { + /* set ATAcmd */ + outb(0xb4, link->resource[0]->start + 0xd); + outb(0x24, link->resource[0]->start + 0x9); + outb(0x04, link->resource[0]->start + 0xd); + } + + /* The KXL-810AN has a bigger IO port window */ + if (resource_size(link->resource[0]) == 32) + host = qlogic_detect(&qlogicfas_driver_template, link, + link->resource[0]->start + 16, link->irq); + else + host = qlogic_detect(&qlogicfas_driver_template, link, + link->resource[0]->start, link->irq); + + if (!host) { + printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); + goto failed; + } + + info->host = host; + + return 0; + +failed: + pcmcia_disable_device(link); + return -ENODEV; +} /* qlogic_config */ + +/*====================================================================*/ + +static void qlogic_release(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + + dev_dbg(&link->dev, "qlogic_release\n"); + + scsi_remove_host(info->host); + + free_irq(link->irq, info->host); + pcmcia_disable_device(link); + + scsi_host_put(info->host); +} + +/*====================================================================*/ + +static int qlogic_resume(struct pcmcia_device *link) +{ + scsi_info_t *info = link->priv; + int ret; + + ret = pcmcia_enable_device(link); + if (ret) + return ret; + + if ((info->manf_id == MANFID_MACNICA) || + (info->manf_id == MANFID_PIONEER) || + (info->manf_id == 0x0098)) { + outb(0x80, link->resource[0]->start + 0xd); + outb(0x24, link->resource[0]->start + 0x9); + outb(0x04, link->resource[0]->start + 0xd); + } + /* Ugggglllyyyy!!! */ + qlogicfas408_host_reset(NULL); + + return 0; +} + +static const struct pcmcia_device_id qlogic_ids[] = { + PCMCIA_DEVICE_PROD_ID12("EIger Labs", "PCMCIA-to-SCSI Adapter", 0x88395fa7, 0x33b7a5e6), + PCMCIA_DEVICE_PROD_ID12("EPSON", "SCSI-2 PC Card SC200", 0xd361772f, 0x299d1751), + PCMCIA_DEVICE_PROD_ID12("MACNICA", "MIRACLE SCSI-II mPS110", 0x20841b68, 0xab3c3b6d), + PCMCIA_DEVICE_PROD_ID12("MIDORI ELECTRONICS ", "CN-SC43", 0x6534382a, 0xd67eee79), + PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J03R", 0x18df0ba0, 0x24662e8a), + PCMCIA_DEVICE_PROD_ID12("KME ", "KXLC003", 0x82375a27, 0xf68e5bf7), + PCMCIA_DEVICE_PROD_ID12("KME ", "KXLC004", 0x82375a27, 0x68eace54), + PCMCIA_DEVICE_PROD_ID12("KME", "KXLC101", 0x3faee676, 0x194250ec), + PCMCIA_DEVICE_PROD_ID12("QLOGIC CORPORATION", "pc05", 0xd77b2930, 0xa85b2735), + PCMCIA_DEVICE_PROD_ID12("QLOGIC CORPORATION", "pc05 rev 1.10", 0xd77b2930, 0x70f8b5f8), + PCMCIA_DEVICE_PROD_ID123("KME", "KXLC002", "00", 0x3faee676, 0x81896b61, 0xf99f065f), + PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "SCSI2 CARD 37", 0x85c10e17, 0x1a2640c1), + PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "SCSC200A PC CARD SCSI", 0xb4585a1a, 0xa6f06ebe), + PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "SCSC200B PC CARD SCSI-10", 0xb4585a1a, 0x0a88dea0), + /* these conflict with other cards! */ + /* PCMCIA_DEVICE_PROD_ID123("MACNICA", "MIRACLE SCSI", "mPS100", 0x20841b68, 0xf8dedaeb, 0x89f7fafb), */ + /* PCMCIA_DEVICE_PROD_ID123("MACNICA", "MIRACLE SCSI", "mPS100", 0x20841b68, 0xf8dedaeb, 0x89f7fafb), */ + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, qlogic_ids); + +static struct pcmcia_driver qlogic_cs_driver = { + .owner = THIS_MODULE, + .name = "qlogic_cs", + .probe = qlogic_probe, + .remove = qlogic_detach, + .id_table = qlogic_ids, + .resume = qlogic_resume, +}; + +MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); +MODULE_DESCRIPTION("Driver for the PCMCIA Qlogic FAS SCSI controllers"); +MODULE_LICENSE("GPL"); +module_pcmcia_driver(qlogic_cs_driver); diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c new file mode 100644 index 000000000..278c78d06 --- /dev/null +++ b/drivers/scsi/pcmcia/sym53c500_cs.c @@ -0,0 +1,882 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* +* sym53c500_cs.c Bob Tracy (rct@frus.com) +* +* A rewrite of the pcmcia-cs add-on driver for newer (circa 1997) +* New Media Bus Toaster PCMCIA SCSI cards using the Symbios Logic +* 53c500 controller: intended for use with 2.6 and later kernels. +* The pcmcia-cs add-on version of this driver is not supported +* beyond 2.4. It consisted of three files with history/copyright +* information as follows: +* +* SYM53C500.h +* Bob Tracy (rct@frus.com) +* Original by Tom Corner (tcorner@via.at). +* Adapted from NCR53c406a.h which is Copyrighted (C) 1994 +* Normunds Saumanis (normunds@rx.tech.swh.lv) +* +* SYM53C500.c +* Bob Tracy (rct@frus.com) +* Original driver by Tom Corner (tcorner@via.at) was adapted +* from NCR53c406a.c which is Copyrighted (C) 1994, 1995, 1996 +* Normunds Saumanis (normunds@fi.ibm.com) +* +* sym53c500.c +* Bob Tracy (rct@frus.com) +* Original by Tom Corner (tcorner@via.at) was adapted from a +* driver for the Qlogic SCSI card written by +* David Hinds (dhinds@allegro.stanford.edu). +*/ + +#define SYM53C500_DEBUG 0 +#define VERBOSE_SYM53C500_DEBUG 0 + +/* +* Set this to 0 if you encounter kernel lockups while transferring +* data in PIO mode. Note this can be changed via "sysfs". +*/ +#define USE_FAST_PIO 1 + +/* =============== End of user configurable parameters ============== */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + + +/* ================================================================== */ + +#define SYNC_MODE 0 /* Synchronous transfer mode */ + +/* Default configuration */ +#define C1_IMG 0x07 /* ID=7 */ +#define C2_IMG 0x48 /* FE SCSI2 */ +#define C3_IMG 0x20 /* CDB */ +#define C4_IMG 0x04 /* ANE */ +#define C5_IMG 0xa4 /* ? changed from b6= AA PI SIE POL */ +#define C7_IMG 0x80 /* added for SYM53C500 t. corner */ + +/* Hardware Registers: offsets from io_port (base) */ + +/* Control Register Set 0 */ +#define TC_LSB 0x00 /* transfer counter lsb */ +#define TC_MSB 0x01 /* transfer counter msb */ +#define SCSI_FIFO 0x02 /* scsi fifo register */ +#define CMD_REG 0x03 /* command register */ +#define STAT_REG 0x04 /* status register */ +#define DEST_ID 0x04 /* selection/reselection bus id */ +#define INT_REG 0x05 /* interrupt status register */ +#define SRTIMOUT 0x05 /* select/reselect timeout reg */ +#define SEQ_REG 0x06 /* sequence step register */ +#define SYNCPRD 0x06 /* synchronous transfer period */ +#define FIFO_FLAGS 0x07 /* indicates # of bytes in fifo */ +#define SYNCOFF 0x07 /* synchronous offset register */ +#define CONFIG1 0x08 /* configuration register */ +#define CLKCONV 0x09 /* clock conversion register */ +/* #define TESTREG 0x0A */ /* test mode register */ +#define CONFIG2 0x0B /* configuration 2 register */ +#define CONFIG3 0x0C /* configuration 3 register */ +#define CONFIG4 0x0D /* configuration 4 register */ +#define TC_HIGH 0x0E /* transfer counter high */ +/* #define FIFO_BOTTOM 0x0F */ /* reserve FIFO byte register */ + +/* Control Register Set 1 */ +/* #define JUMPER_SENSE 0x00 */ /* jumper sense port reg (r/w) */ +/* #define SRAM_PTR 0x01 */ /* SRAM address pointer reg (r/w) */ +/* #define SRAM_DATA 0x02 */ /* SRAM data register (r/w) */ +#define PIO_FIFO 0x04 /* PIO FIFO registers (r/w) */ +/* #define PIO_FIFO1 0x05 */ /* */ +/* #define PIO_FIFO2 0x06 */ /* */ +/* #define PIO_FIFO3 0x07 */ /* */ +#define PIO_STATUS 0x08 /* PIO status (r/w) */ +/* #define ATA_CMD 0x09 */ /* ATA command/status reg (r/w) */ +/* #define ATA_ERR 0x0A */ /* ATA features/error reg (r/w) */ +#define PIO_FLAG 0x0B /* PIO flag interrupt enable (r/w) */ +#define CONFIG5 0x09 /* configuration 5 register */ +/* #define SIGNATURE 0x0E */ /* signature register (r) */ +/* #define CONFIG6 0x0F */ /* configuration 6 register (r) */ +#define CONFIG7 0x0d + +/* select register set 0 */ +#define REG0(x) (outb(C4_IMG, (x) + CONFIG4)) +/* select register set 1 */ +#define REG1(x) outb(C7_IMG, (x) + CONFIG7); outb(C5_IMG, (x) + CONFIG5) + +#if SYM53C500_DEBUG +#define DEB(x) x +#else +#define DEB(x) +#endif + +#if VERBOSE_SYM53C500_DEBUG +#define VDEB(x) x +#else +#define VDEB(x) +#endif + +#define LOAD_DMA_COUNT(x, count) \ + outb(count & 0xff, (x) + TC_LSB); \ + outb((count >> 8) & 0xff, (x) + TC_MSB); \ + outb((count >> 16) & 0xff, (x) + TC_HIGH); + +/* Chip commands */ +#define DMA_OP 0x80 + +#define SCSI_NOP 0x00 +#define FLUSH_FIFO 0x01 +#define CHIP_RESET 0x02 +#define SCSI_RESET 0x03 +#define RESELECT 0x40 +#define SELECT_NO_ATN 0x41 +#define SELECT_ATN 0x42 +#define SELECT_ATN_STOP 0x43 +#define ENABLE_SEL 0x44 +#define DISABLE_SEL 0x45 +#define SELECT_ATN3 0x46 +#define RESELECT3 0x47 +#define TRANSFER_INFO 0x10 +#define INIT_CMD_COMPLETE 0x11 +#define MSG_ACCEPT 0x12 +#define TRANSFER_PAD 0x18 +#define SET_ATN 0x1a +#define RESET_ATN 0x1b +#define SEND_MSG 0x20 +#define SEND_STATUS 0x21 +#define SEND_DATA 0x22 +#define DISCONN_SEQ 0x23 +#define TERMINATE_SEQ 0x24 +#define TARG_CMD_COMPLETE 0x25 +#define DISCONN 0x27 +#define RECV_MSG 0x28 +#define RECV_CMD 0x29 +#define RECV_DATA 0x2a +#define RECV_CMD_SEQ 0x2b +#define TARGET_ABORT_DMA 0x04 + +/* ================================================================== */ + +struct scsi_info_t { + struct pcmcia_device *p_dev; + struct Scsi_Host *host; + unsigned short manf_id; +}; + +/* +* Repository for per-instance host data. +*/ +struct sym53c500_data { + struct scsi_cmnd *current_SC; + int fast_pio; +}; + +struct sym53c500_cmd_priv { + int status; + int message; + int phase; +}; + +enum Phase { + idle, + data_out, + data_in, + command_ph, + status_ph, + message_out, + message_in +}; + +/* ================================================================== */ + +static void +chip_init(int io_port) +{ + REG1(io_port); + outb(0x01, io_port + PIO_STATUS); + outb(0x00, io_port + PIO_FLAG); + + outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ + outb(C3_IMG, io_port + CONFIG3); + outb(C2_IMG, io_port + CONFIG2); + outb(C1_IMG, io_port + CONFIG1); + + outb(0x05, io_port + CLKCONV); /* clock conversion factor */ + outb(0x9C, io_port + SRTIMOUT); /* Selection timeout */ + outb(0x05, io_port + SYNCPRD); /* Synchronous transfer period */ + outb(SYNC_MODE, io_port + SYNCOFF); /* synchronous mode */ +} + +static void +SYM53C500_int_host_reset(int io_port) +{ + outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ + outb(CHIP_RESET, io_port + CMD_REG); + outb(SCSI_NOP, io_port + CMD_REG); /* required after reset */ + outb(SCSI_RESET, io_port + CMD_REG); + chip_init(io_port); +} + +static __inline__ int +SYM53C500_pio_read(int fast_pio, int base, unsigned char *request, unsigned int reqlen) +{ + int i; + int len; /* current scsi fifo size */ + + REG1(base); + while (reqlen) { + i = inb(base + PIO_STATUS); + /* VDEB(printk("pio_status=%x\n", i)); */ + if (i & 0x80) + return 0; + + switch (i & 0x1e) { + default: + case 0x10: /* fifo empty */ + len = 0; + break; + case 0x0: + len = 1; + break; + case 0x8: /* fifo 1/3 full */ + len = 42; + break; + case 0xc: /* fifo 2/3 full */ + len = 84; + break; + case 0xe: /* fifo full */ + len = 128; + break; + } + + if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */ + return 0; + } + + if (len) { + if (len > reqlen) + len = reqlen; + + if (fast_pio && len > 3) { + insl(base + PIO_FIFO, request, len >> 2); + request += len & 0xfc; + reqlen -= len & 0xfc; + } else { + while (len--) { + *request++ = inb(base + PIO_FIFO); + reqlen--; + } + } + } + } + return 0; +} + +static __inline__ int +SYM53C500_pio_write(int fast_pio, int base, unsigned char *request, unsigned int reqlen) +{ + int i = 0; + int len; /* current scsi fifo size */ + + REG1(base); + while (reqlen && !(i & 0x40)) { + i = inb(base + PIO_STATUS); + /* VDEB(printk("pio_status=%x\n", i)); */ + if (i & 0x80) /* error */ + return 0; + + switch (i & 0x1e) { + case 0x10: + len = 128; + break; + case 0x0: + len = 84; + break; + case 0x8: + len = 42; + break; + case 0xc: + len = 1; + break; + default: + case 0xe: + len = 0; + break; + } + + if (len) { + if (len > reqlen) + len = reqlen; + + if (fast_pio && len > 3) { + outsl(base + PIO_FIFO, request, len >> 2); + request += len & 0xfc; + reqlen -= len & 0xfc; + } else { + while (len--) { + outb(*request++, base + PIO_FIFO); + reqlen--; + } + } + } + } + return 0; +} + +static irqreturn_t +SYM53C500_intr(int irq, void *dev_id) +{ + unsigned long flags; + struct Scsi_Host *dev = dev_id; + DEB(unsigned char fifo_size;) + DEB(unsigned char seq_reg;) + unsigned char status, int_reg; + unsigned char pio_status; + int port_base = dev->io_port; + struct sym53c500_data *data = + (struct sym53c500_data *)dev->hostdata; + struct scsi_cmnd *curSC = data->current_SC; + struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC); + int fast_pio = data->fast_pio; + + spin_lock_irqsave(dev->host_lock, flags); + + VDEB(printk("SYM53C500_intr called\n")); + + REG1(port_base); + pio_status = inb(port_base + PIO_STATUS); + REG0(port_base); + status = inb(port_base + STAT_REG); + DEB(seq_reg = inb(port_base + SEQ_REG)); + int_reg = inb(port_base + INT_REG); + DEB(fifo_size = inb(port_base + FIFO_FLAGS) & 0x1f); + +#if SYM53C500_DEBUG + printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x", + status, seq_reg, int_reg, fifo_size); + printk(", pio=%02x\n", pio_status); +#endif /* SYM53C500_DEBUG */ + + if (int_reg & 0x80) { /* SCSI reset intr */ + DEB(printk("SYM53C500: reset intr received\n")); + curSC->result = DID_RESET << 16; + goto idle_out; + } + + if (pio_status & 0x80) { + printk("SYM53C500: Warning: PIO error!\n"); + curSC->result = DID_ERROR << 16; + goto idle_out; + } + + if (status & 0x20) { /* Parity error */ + printk("SYM53C500: Warning: parity error!\n"); + curSC->result = DID_PARITY << 16; + goto idle_out; + } + + if (status & 0x40) { /* Gross error */ + printk("SYM53C500: Warning: gross error!\n"); + curSC->result = DID_ERROR << 16; + goto idle_out; + } + + if (int_reg & 0x20) { /* Disconnect */ + DEB(printk("SYM53C500: disconnect intr received\n")); + if (scp->phase != message_in) { /* Unexpected disconnect */ + curSC->result = DID_NO_CONNECT << 16; + } else { /* Command complete, return status and message */ + curSC->result = (scp->status & 0xff) | + ((scp->message & 0xff) << 8) | (DID_OK << 16); + } + goto idle_out; + } + + switch (status & 0x07) { /* scsi phase */ + case 0x00: /* DATA-OUT */ + if (int_reg & 0x10) { /* Target requesting info transfer */ + struct scatterlist *sg; + int i; + + scp->phase = data_out; + VDEB(printk("SYM53C500: Data-Out phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ + outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); + + scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) { + SYM53C500_pio_write(fast_pio, port_base, + sg_virt(sg), sg->length); + } + REG0(port_base); + } + break; + + case 0x01: /* DATA-IN */ + if (int_reg & 0x10) { /* Target requesting info transfer */ + struct scatterlist *sg; + int i; + + scp->phase = data_in; + VDEB(printk("SYM53C500: Data-In phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ + outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); + + scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) { + SYM53C500_pio_read(fast_pio, port_base, + sg_virt(sg), sg->length); + } + REG0(port_base); + } + break; + + case 0x02: /* COMMAND */ + scp->phase = command_ph; + printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n"); + break; + + case 0x03: /* STATUS */ + scp->phase = status_ph; + VDEB(printk("SYM53C500: Status phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + outb(INIT_CMD_COMPLETE, port_base + CMD_REG); + break; + + case 0x04: /* Reserved */ + case 0x05: /* Reserved */ + printk("SYM53C500: WARNING: Reserved phase!!!\n"); + break; + + case 0x06: /* MESSAGE-OUT */ + DEB(printk("SYM53C500: Message-Out phase\n")); + scp->phase = message_out; + outb(SET_ATN, port_base + CMD_REG); /* Reject the message */ + outb(MSG_ACCEPT, port_base + CMD_REG); + break; + + case 0x07: /* MESSAGE-IN */ + VDEB(printk("SYM53C500: Message-In phase\n")); + scp->phase = message_in; + + scp->status = inb(port_base + SCSI_FIFO); + scp->message = inb(port_base + SCSI_FIFO); + + VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f)); + DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message)); + + if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) { + outb(SET_ATN, port_base + CMD_REG); /* Reject message */ + DEB(printk("Discarding SAVE_POINTERS message\n")); + } + outb(MSG_ACCEPT, port_base + CMD_REG); + break; + } +out: + spin_unlock_irqrestore(dev->host_lock, flags); + return IRQ_HANDLED; + +idle_out: + scp->phase = idle; + scsi_done(curSC); + goto out; +} + +static void +SYM53C500_release(struct pcmcia_device *link) +{ + struct scsi_info_t *info = link->priv; + struct Scsi_Host *shost = info->host; + + dev_dbg(&link->dev, "SYM53C500_release\n"); + + /* + * Do this before releasing/freeing resources. + */ + scsi_remove_host(shost); + + /* + * Interrupts getting hosed on card removal. Try + * the following code, mostly from qlogicfas.c. + */ + if (shost->irq) + free_irq(shost->irq, shost); + if (shost->io_port && shost->n_io_port) + release_region(shost->io_port, shost->n_io_port); + + pcmcia_disable_device(link); + + scsi_host_put(shost); +} /* SYM53C500_release */ + +static const char* +SYM53C500_info(struct Scsi_Host *SChost) +{ + static char info_msg[256]; + struct sym53c500_data *data = + (struct sym53c500_data *)SChost->hostdata; + + DEB(printk("SYM53C500_info called\n")); + (void)snprintf(info_msg, sizeof(info_msg), + "SYM53C500 at 0x%lx, IRQ %d, %s PIO mode.", + SChost->io_port, SChost->irq, data->fast_pio ? "fast" : "slow"); + return (info_msg); +} + +static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) +{ + struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt); + int i; + int port_base = SCpnt->device->host->io_port; + struct sym53c500_data *data = + (struct sym53c500_data *)SCpnt->device->host->hostdata; + + VDEB(printk("SYM53C500_queue called\n")); + + DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", + SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, + (u8)SCpnt->device->lun, scsi_bufflen(SCpnt))); + + VDEB(for (i = 0; i < SCpnt->cmd_len; i++) + printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); + VDEB(printk("\n")); + + data->current_SC = SCpnt; + scp->phase = command_ph; + scp->status = 0; + scp->message = 0; + + /* We are locked here already by the mid layer */ + REG0(port_base); + outb(scmd_id(SCpnt), port_base + DEST_ID); /* set destination */ + outb(FLUSH_FIFO, port_base + CMD_REG); /* reset the fifos */ + + for (i = 0; i < SCpnt->cmd_len; i++) { + outb(SCpnt->cmnd[i], port_base + SCSI_FIFO); + } + outb(SELECT_NO_ATN, port_base + CMD_REG); + + return 0; +} + +static DEF_SCSI_QCMD(SYM53C500_queue) + +static int +SYM53C500_host_reset(struct scsi_cmnd *SCpnt) +{ + int port_base = SCpnt->device->host->io_port; + + DEB(printk("SYM53C500_host_reset called\n")); + spin_lock_irq(SCpnt->device->host->host_lock); + SYM53C500_int_host_reset(port_base); + spin_unlock_irq(SCpnt->device->host->host_lock); + + return SUCCESS; +} + +static int +SYM53C500_biosparm(struct scsi_device *disk, + struct block_device *dev, + sector_t capacity, int *info_array) +{ + int size; + + DEB(printk("SYM53C500_biosparm called\n")); + + size = capacity; + info_array[0] = 64; /* heads */ + info_array[1] = 32; /* sectors */ + info_array[2] = size >> 11; /* cylinders */ + if (info_array[2] > 1024) { /* big disk */ + info_array[0] = 255; + info_array[1] = 63; + info_array[2] = size / (255 * 63); + } + return 0; +} + +static ssize_t +SYM53C500_show_pio(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *SHp = class_to_shost(dev); + struct sym53c500_data *data = + (struct sym53c500_data *)SHp->hostdata; + + return snprintf(buf, 4, "%d\n", data->fast_pio); +} + +static ssize_t +SYM53C500_store_pio(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int pio; + struct Scsi_Host *SHp = class_to_shost(dev); + struct sym53c500_data *data = + (struct sym53c500_data *)SHp->hostdata; + + pio = simple_strtoul(buf, NULL, 0); + if (pio == 0 || pio == 1) { + data->fast_pio = pio; + return count; + } + else + return -EINVAL; +} + +/* +* SCSI HBA device attributes we want to +* make available via sysfs. +*/ +static struct device_attribute SYM53C500_pio_attr = { + .attr = { + .name = "fast_pio", + .mode = (S_IRUGO | S_IWUSR), + }, + .show = SYM53C500_show_pio, + .store = SYM53C500_store_pio, +}; + +static struct attribute *SYM53C500_shost_attrs[] = { + &SYM53C500_pio_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(SYM53C500_shost); + +/* +* scsi_host_template initializer +*/ +static const struct scsi_host_template sym53c500_driver_template = { + .module = THIS_MODULE, + .name = "SYM53C500", + .info = SYM53C500_info, + .queuecommand = SYM53C500_queue, + .eh_host_reset_handler = SYM53C500_host_reset, + .bios_param = SYM53C500_biosparm, + .proc_name = "SYM53C500", + .can_queue = 1, + .this_id = 7, + .sg_tablesize = 32, + .shost_groups = SYM53C500_shost_groups, + .cmd_size = sizeof(struct sym53c500_cmd_priv), +}; + +static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + p_dev->io_lines = 10; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + if (p_dev->resource[0]->start == 0) + return -ENODEV; + + return pcmcia_request_io(p_dev); +} + +static int +SYM53C500_config(struct pcmcia_device *link) +{ + struct scsi_info_t *info = link->priv; + int ret; + int irq_level, port_base; + struct Scsi_Host *host; + const struct scsi_host_template *tpnt = &sym53c500_driver_template; + struct sym53c500_data *data; + + dev_dbg(&link->dev, "SYM53C500_config\n"); + + info->manf_id = link->manf_id; + + ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL); + if (ret) + goto failed; + + if (!link->irq) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + /* + * That's the trouble with copying liberally from another driver. + * Some things probably aren't relevant, and I suspect this entire + * section dealing with manufacturer IDs can be scrapped. --rct + */ + if ((info->manf_id == MANFID_MACNICA) || + (info->manf_id == MANFID_PIONEER) || + (info->manf_id == 0x0098)) { + /* set ATAcmd */ + outb(0xb4, link->resource[0]->start + 0xd); + outb(0x24, link->resource[0]->start + 0x9); + outb(0x04, link->resource[0]->start + 0xd); + } + + /* + * irq_level == 0 implies tpnt->can_queue == 0, which + * is not supported in 2.6. Thus, only irq_level > 0 + * will be allowed. + * + * Possible port_base values are as follows: + * + * 0x130, 0x230, 0x280, 0x290, + * 0x320, 0x330, 0x340, 0x350 + */ + port_base = link->resource[0]->start; + irq_level = link->irq; + + DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n", + port_base, irq_level, USE_FAST_PIO);) + + chip_init(port_base); + + host = scsi_host_alloc(tpnt, sizeof(struct sym53c500_data)); + if (!host) { + printk("SYM53C500: Unable to register host, giving up.\n"); + goto err_release; + } + + data = (struct sym53c500_data *)host->hostdata; + + if (irq_level > 0) { + if (request_irq(irq_level, SYM53C500_intr, IRQF_SHARED, "SYM53C500", host)) { + printk("SYM53C500: unable to allocate IRQ %d\n", irq_level); + goto err_free_scsi; + } + DEB(printk("SYM53C500: allocated IRQ %d\n", irq_level)); + } else if (irq_level == 0) { + DEB(printk("SYM53C500: No interrupts detected\n")); + goto err_free_scsi; + } else { + DEB(printk("SYM53C500: Shouldn't get here!\n")); + goto err_free_scsi; + } + + host->unique_id = port_base; + host->irq = irq_level; + host->io_port = port_base; + host->n_io_port = 0x10; + host->dma_channel = -1; + + /* + * Note fast_pio is set to USE_FAST_PIO by + * default, but can be changed via "sysfs". + */ + data->fast_pio = USE_FAST_PIO; + + info->host = host; + + if (scsi_add_host(host, NULL)) + goto err_free_irq; + + scsi_scan_host(host); + + return 0; + +err_free_irq: + free_irq(irq_level, host); +err_free_scsi: + scsi_host_put(host); +err_release: + release_region(port_base, 0x10); + printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); + return -ENODEV; + +failed: + SYM53C500_release(link); + return -ENODEV; +} /* SYM53C500_config */ + +static int sym53c500_resume(struct pcmcia_device *link) +{ + struct scsi_info_t *info = link->priv; + + /* See earlier comment about manufacturer IDs. */ + if ((info->manf_id == MANFID_MACNICA) || + (info->manf_id == MANFID_PIONEER) || + (info->manf_id == 0x0098)) { + outb(0x80, link->resource[0]->start + 0xd); + outb(0x24, link->resource[0]->start + 0x9); + outb(0x04, link->resource[0]->start + 0xd); + } + /* + * If things don't work after a "resume", + * this is a good place to start looking. + */ + SYM53C500_int_host_reset(link->resource[0]->start); + + return 0; +} + +static void +SYM53C500_detach(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "SYM53C500_detach\n"); + + SYM53C500_release(link); + + kfree(link->priv); + link->priv = NULL; +} /* SYM53C500_detach */ + +static int +SYM53C500_probe(struct pcmcia_device *link) +{ + struct scsi_info_t *info; + + dev_dbg(&link->dev, "SYM53C500_attach()\n"); + + /* Create new SCSI device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + info->p_dev = link; + link->priv = info; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + + return SYM53C500_config(link); +} /* SYM53C500_attach */ + +MODULE_AUTHOR("Bob Tracy "); +MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver"); +MODULE_LICENSE("GPL"); + +static const struct pcmcia_device_id sym53c500_ids[] = { + PCMCIA_DEVICE_PROD_ID12("BASICS by New Media Corporation", "SCSI Sym53C500", 0x23c78a9d, 0x0099e7f7), + PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "SCSI Bus Toaster Sym53C500", 0x085a850b, 0x45432eb8), + PCMCIA_DEVICE_PROD_ID2("SCSI9000", 0x21648f44), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, sym53c500_ids); + +static struct pcmcia_driver sym53c500_cs_driver = { + .owner = THIS_MODULE, + .name = "sym53c500_cs", + .probe = SYM53C500_probe, + .remove = SYM53C500_detach, + .id_table = sym53c500_ids, + .resume = sym53c500_resume, +}; +module_pcmcia_driver(sym53c500_cs_driver); diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile new file mode 100644 index 000000000..bbb51b731 --- /dev/null +++ b/drivers/scsi/pm8001/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver +# +# Copyright (C) 2008-2009 USI Co., Ltd. + + +obj-$(CONFIG_SCSI_PM8001) += pm80xx.o + +CFLAGS_pm80xx_tracepoints.o := -I$(src) + +pm80xx-y += pm8001_init.o \ + pm8001_sas.o \ + pm8001_ctl.o \ + pm8001_hwi.o \ + pm80xx_hwi.o \ + pm80xx_tracepoints.o diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h new file mode 100644 index 000000000..9241c7826 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_chips.h @@ -0,0 +1,89 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_CHIPS_H_ +#define _PM8001_CHIPS_H_ + +static inline u32 pm8001_read_32(void *virt_addr) +{ + return *((u32 *)virt_addr); +} + +static inline void pm8001_write_32(void *addr, u32 offset, __le32 val) +{ + *((__le32 *)(addr + offset)) = val; +} + +static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar, + u32 offset) +{ + return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset); +} + +static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar, + u32 addr, u32 val) +{ + writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr); +} +static inline u32 pm8001_mr32(void __iomem *addr, u32 offset) +{ + return readl(addr + offset); +} +static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val) +{ + writel(val, addr + offset); +} +static inline u32 get_pci_bar_index(u32 pcibar) +{ + switch (pcibar) { + case 0x18: + case 0x1C: + return 1; + case 0x20: + return 2; + case 0x24: + return 3; + default: + return 0; + } +} + +#endif /* _PM8001_CHIPS_H_ */ + diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c new file mode 100644 index 000000000..5c26a13ff --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -0,0 +1,1041 @@ +/* + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#include +#include +#include "pm8001_sas.h" +#include "pm8001_ctl.h" +#include "pm8001_chips.h" + +/* scsi host attributes */ + +/** + * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); + } else { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); + } +} +static +DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); + +/** + * controller_fatal_error_show - check controller is under fatal err + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t controller_fatal_error_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%d\n", + pm8001_ha->controller_fatal_error); +} +static DEVICE_ATTR_RO(controller_fatal_error); + +/** + * pm8001_ctl_fw_version_show - firmware version + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); + } else { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + } +} +static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); + +/** + * pm8001_ctl_ila_version_show - ila version + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_ila_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id != chip_8001) { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version)); + } + return 0; +} +static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL); + +/** + * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id != chip_8001) { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version)); + } + return 0; +} +static +DEVICE_ATTR(inc_fw_ver, 0444, pm8001_ctl_inactive_fw_version_show, NULL); + +/** + * pm8001_ctl_max_out_io_show - max outstanding io supported + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); + } else { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); + } +} +static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); +/** + * pm8001_ctl_max_devices_show - max devices support + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)); + } else { + return sysfs_emit(buf, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)); + } +} +static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); +/** + * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no + * hardware limitation + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF); + } else { + return sysfs_emit(buf, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF); + } +} +static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); + +#define SAS_1_0 0x1 +#define SAS_1_1 0x2 +#define SAS_2_0 0x4 + +static ssize_t +show_sas_spec_support_status(unsigned int mode, char *buf) +{ + ssize_t len = 0; + + if (mode & SAS_1_1) + len = sprintf(buf, "%s", "SAS1.1"); + if (mode & SAS_2_0) + len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0"); + len += sprintf(buf + len, "\n"); + + return len; +} + +/** + * pm8001_ctl_sas_spec_support_show - sas spec supported + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + unsigned int mode; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + /* fe000000 means supports SAS2.1 */ + if (pm8001_ha->chip_id == chip_8001) + mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + else + /* fe000000 means supports SAS2.1 */ + mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + return show_sas_spec_support_status(mode, buf); +} +static DEVICE_ATTR(sas_spec_support, S_IRUGO, + pm8001_ctl_sas_spec_support_show, NULL); + +/** + * pm8001_ctl_host_sas_address_show - sas address + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * This is the controller sas address + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + return sysfs_emit(buf, "0x%016llx\n", + be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr)); +} +static DEVICE_ATTR(host_sas_address, S_IRUGO, + pm8001_ctl_host_sas_address_show, NULL); + +/** + * pm8001_ctl_logging_level_show - logging level + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t pm8001_ctl_logging_level_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%08xh\n", pm8001_ha->logging_level); +} + +static ssize_t pm8001_ctl_logging_level_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int val = 0; + + if (sscanf(buf, "%x", &val) != 1) + return -EINVAL; + + pm8001_ha->logging_level = val; + return strlen(buf); +} + +static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, + pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store); +/** + * pm8001_ctl_aap_log_show - aap1 event log + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_aap_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + u8 *ptr = (u8 *)pm8001_ha->memoryMap.region[AAP1].virt_ptr; + int i; + + char *str = buf; + int max = 2; + for (i = 0; i < max; i++) { + str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" + "0x%08x 0x%08x\n", + pm8001_ctl_aap1_memmap(ptr, i, 0), + pm8001_ctl_aap1_memmap(ptr, i, 4), + pm8001_ctl_aap1_memmap(ptr, i, 8), + pm8001_ctl_aap1_memmap(ptr, i, 12), + pm8001_ctl_aap1_memmap(ptr, i, 16), + pm8001_ctl_aap1_memmap(ptr, i, 20), + pm8001_ctl_aap1_memmap(ptr, i, 24), + pm8001_ctl_aap1_memmap(ptr, i, 28)); + } + + return str - buf; +} +static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); +/** + * pm8001_ctl_ib_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; + u32 ib_offset = pm8001_ha->ib_offset; + u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128; +#define IB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[ib_offset].virt_ptr + \ + pm8001_ha->evtlog_ib_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; + if (((pm8001_ha->evtlog_ib_offset) % queue_size) == 0) + pm8001_ha->evtlog_ib_offset = 0; + + return str - buf; +} + +static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); +/** + * pm8001_ctl_ob_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ + +static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; + u32 ob_offset = pm8001_ha->ob_offset; + u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128; +#define OB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[ob_offset].virt_ptr + \ + pm8001_ha->evtlog_ob_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; + if (((pm8001_ha->evtlog_ob_offset) % queue_size) == 0) + pm8001_ha->evtlog_ob_offset = 0; + + return str - buf; +} +static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); +/** + * pm8001_ctl_bios_version_show - Bios version Display + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf:the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *str = buf; + int bios_index; + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + + pm8001_ha->nvmd_completion = &completion; + payload.minor_function = 7; + payload.offset = 0; + payload.rd_length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; + if (PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload)) { + kfree(payload.func_specific); + return -ENOMEM; + } + wait_for_completion(&completion); + for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; + bios_index++) + str += sprintf(str, "%c", + *(payload.func_specific+bios_index)); + kfree(payload.func_specific); + return str - buf; +} +static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); +/** + * event_log_size_show - event log size + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs read shost attribute. + */ +static ssize_t event_log_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); +} +static DEVICE_ATTR_RO(event_log_size); +/** + * pm8001_ctl_iop_log_show - IOP event log + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *str = buf; + u32 read_size = + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024; + static u32 start, end, count; + u32 max_read_times = 32; + u32 max_count = (read_size * 1024) / (max_read_times * 4); + u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr; + + if ((count % max_count) == 0) { + start = 0; + end = max_read_times; + count = 0; + } else { + start = end; + end = end + max_read_times; + } + + for (; start < end; start++) + str += sprintf(str, "%08x ", *(temp+start)); + count++; + return str - buf; +} +static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); + +/** + * pm8001_ctl_fatal_log_show - fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ + +static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + ssize_t count; + + count = pm80xx_get_fatal_dump(cdev, attr, buf); + return count; +} + +static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); + +/** + * non_fatal_log_show - non fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t non_fatal_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u32 count; + + count = pm80xx_get_non_fatal_dump(cdev, attr, buf); + return count; +} +static DEVICE_ATTR_RO(non_fatal_log); + +static ssize_t non_fatal_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%08x\n", + pm8001_ha->non_fatal_count); +} + +static ssize_t non_fatal_count_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int val = 0; + + if (kstrtoint(buf, 16, &val) != 0) + return -EINVAL; + + pm8001_ha->non_fatal_count = val; + return strlen(buf); +} +static DEVICE_ATTR_RW(non_fatal_count); + +/** + * pm8001_ctl_gsm_log_show - gsm dump collection + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + ssize_t count; + + count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf); + return count; +} + +static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); + +#define FLASH_CMD_NONE 0x00 +#define FLASH_CMD_UPDATE 0x01 +#define FLASH_CMD_SET_NVMD 0x02 + +struct flash_command { + u8 command[8]; + int code; +}; + +static const struct flash_command flash_command_table[] = { + {"set_nvmd", FLASH_CMD_SET_NVMD}, + {"update", FLASH_CMD_UPDATE}, + {"", FLASH_CMD_NONE} /* Last entry should be NULL. */ +}; + +struct error_fw { + char *reason; + int err_code; +}; + +static const struct error_fw flash_error_table[] = { + {"Failed to open fw image file", FAIL_OPEN_BIOS_FILE}, + {"image header mismatch", FLASH_UPDATE_HDR_ERR}, + {"image offset mismatch", FLASH_UPDATE_OFFSET_ERR}, + {"image CRC Error", FLASH_UPDATE_CRC_ERR}, + {"image length Error.", FLASH_UPDATE_LENGTH_ERR}, + {"Failed to program flash chip", FLASH_UPDATE_HW_ERR}, + {"Flash chip not supported.", FLASH_UPDATE_DNLD_NOT_SUPPORTED}, + {"Flash update disabled.", FLASH_UPDATE_DISABLED}, + {"Flash in progress", FLASH_IN_PROGRESS}, + {"Image file size Error", FAIL_FILE_SIZE}, + {"Input parameter error", FAIL_PARAMETERS}, + {"Out of memory", FAIL_OUT_MEMORY}, + {"OK", 0} /* Last entry err_code = 0. */ +}; + +static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_ioctl_payload *payload; + DECLARE_COMPLETION_ONSTACK(completion); + u8 *ioctlbuffer; + u32 ret; + u32 length = 1024 * 5 + sizeof(*payload) - 1; + + if (pm8001_ha->fw_image->size > 4096) { + pm8001_ha->fw_status = FAIL_FILE_SIZE; + return -EFAULT; + } + + ioctlbuffer = kzalloc(length, GFP_KERNEL); + if (!ioctlbuffer) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + return -ENOMEM; + } + payload = (struct pm8001_ioctl_payload *)ioctlbuffer; + memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + pm8001_ha->fw_image->size); + payload->wr_length = pm8001_ha->fw_image->size; + payload->id = 0; + payload->minor_function = 0x1; + pm8001_ha->nvmd_completion = &completion; + ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); + if (ret) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + goto out; + } + wait_for_completion(&completion); +out: + kfree(ioctlbuffer); + return ret; +} + +static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_ioctl_payload *payload; + DECLARE_COMPLETION_ONSTACK(completion); + u8 *ioctlbuffer; + struct fw_control_info *fwControl; + __be32 partitionSizeTmp; + u32 partitionSize; + u32 loopNumber, loopcount; + struct pm8001_fw_image_header *image_hdr; + u32 sizeRead = 0; + u32 ret = 0; + u32 length = 1024 * 16 + sizeof(*payload) - 1; + u32 fc_len; + u8 *read_buf; + + if (pm8001_ha->fw_image->size < 28) { + pm8001_ha->fw_status = FAIL_FILE_SIZE; + return -EFAULT; + } + ioctlbuffer = kzalloc(length, GFP_KERNEL); + if (!ioctlbuffer) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + return -ENOMEM; + } + image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; + while (sizeRead < pm8001_ha->fw_image->size) { + partitionSizeTmp = + *(__be32 *)((u8 *)&image_hdr->image_length + sizeRead); + partitionSize = be32_to_cpu(partitionSizeTmp); + loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN, + IOCTL_BUF_SIZE); + for (loopNumber = 0; loopNumber < loopcount; loopNumber++) { + payload = (struct pm8001_ioctl_payload *)ioctlbuffer; + payload->wr_length = 1024*16; + payload->id = 0; + fwControl = + (struct fw_control_info *)&payload->func_specific; + fwControl->len = IOCTL_BUF_SIZE; /* IN */ + fwControl->size = partitionSize + HEADER_LEN;/* IN */ + fwControl->retcode = 0;/* OUT */ + fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */ + + /* + * for the last chunk of data in case file size is + * not even with 4k, load only the rest + */ + + read_buf = (u8 *)pm8001_ha->fw_image->data + sizeRead; + fc_len = (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE; + + if (loopcount - loopNumber == 1 && fc_len) { + fwControl->len = fc_len; + memcpy((u8 *)fwControl->buffer, read_buf, fc_len); + sizeRead += fc_len; + } else { + memcpy((u8 *)fwControl->buffer, read_buf, IOCTL_BUF_SIZE); + sizeRead += IOCTL_BUF_SIZE; + } + + pm8001_ha->nvmd_completion = &completion; + ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload); + if (ret) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + goto out; + } + wait_for_completion(&completion); + if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) { + pm8001_ha->fw_status = fwControl->retcode; + ret = -EFAULT; + goto out; + } + } + } +out: + kfree(ioctlbuffer); + return ret; +} +static ssize_t pm8001_store_update_fw(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *cmd_ptr, *filename_ptr; + int res, i; + int flash_command = FLASH_CMD_NONE; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* this test protects us from running two flash processes at once, + * so we should start with this test */ + if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) + return -EINPROGRESS; + pm8001_ha->fw_status = FLASH_IN_PROGRESS; + + cmd_ptr = kcalloc(count, 2, GFP_KERNEL); + if (!cmd_ptr) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + return -ENOMEM; + } + + filename_ptr = cmd_ptr + count; + res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); + if (res != 2) { + pm8001_ha->fw_status = FAIL_PARAMETERS; + ret = -EINVAL; + goto out; + } + + for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { + if (!memcmp(flash_command_table[i].command, + cmd_ptr, strlen(cmd_ptr))) { + flash_command = flash_command_table[i].code; + break; + } + } + if (flash_command == FLASH_CMD_NONE) { + pm8001_ha->fw_status = FAIL_PARAMETERS; + ret = -EINVAL; + goto out; + } + + ret = request_firmware(&pm8001_ha->fw_image, + filename_ptr, + pm8001_ha->dev); + + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, + "Failed to load firmware image file %s, error %d\n", + filename_ptr, ret); + pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE; + goto out; + } + + if (FLASH_CMD_UPDATE == flash_command) + ret = pm8001_update_flash(pm8001_ha); + else + ret = pm8001_set_nvmd(pm8001_ha); + + release_firmware(pm8001_ha->fw_image); +out: + kfree(cmd_ptr); + + if (ret) + return ret; + + pm8001_ha->fw_status = FLASH_OK; + return count; +} + +static ssize_t pm8001_show_update_fw(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + int i; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + for (i = 0; flash_error_table[i].err_code != 0; i++) { + if (flash_error_table[i].err_code == pm8001_ha->fw_status) + break; + } + if (pm8001_ha->fw_status != FLASH_IN_PROGRESS) + pm8001_ha->fw_status = FLASH_OK; + + return snprintf(buf, PAGE_SIZE, "status=%x %s\n", + flash_error_table[i].err_code, + flash_error_table[i].reason); +} +static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP, + pm8001_show_update_fw, pm8001_store_update_fw); + +static const char *const mpiStateText[] = { + "MPI is not initialized", + "MPI is successfully initialized", + "MPI termination is in progress", + "MPI initialization failed with error in [31:16]" +}; + +/** + * ctl_mpi_state_show - controller MPI state check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_mpi_state_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int mpidw0; + + mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0); + return sysfs_emit(buf, "%s\n", mpiStateText[mpidw0 & 0x0003]); +} +static DEVICE_ATTR_RO(ctl_mpi_state); + +/** + * ctl_hmi_error_show - controller MPI initialization fails + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_hmi_error_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int mpidw0; + + mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0); + return sysfs_emit(buf, "0x%08x\n", (mpidw0 >> 16)); +} +static DEVICE_ATTR_RO(ctl_hmi_error); + +/** + * ctl_raae_count_show - controller raae count check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_raae_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int raaecnt; + + raaecnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 12); + return sysfs_emit(buf, "0x%08x\n", raaecnt); +} +static DEVICE_ATTR_RO(ctl_raae_count); + +/** + * ctl_iop0_count_show - controller iop0 count check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_iop0_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int iop0cnt; + + iop0cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 16); + return sysfs_emit(buf, "0x%08x\n", iop0cnt); +} +static DEVICE_ATTR_RO(ctl_iop0_count); + +/** + * ctl_iop1_count_show - controller iop1 count check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_iop1_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int iop1cnt; + + iop1cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 20); + return sysfs_emit(buf, "0x%08x\n", iop1cnt); + +} +static DEVICE_ATTR_RO(ctl_iop1_count); + +static struct attribute *pm8001_host_attrs[] = { + &dev_attr_interface_rev.attr, + &dev_attr_controller_fatal_error.attr, + &dev_attr_fw_version.attr, + &dev_attr_update_fw.attr, + &dev_attr_aap_log.attr, + &dev_attr_iop_log.attr, + &dev_attr_fatal_log.attr, + &dev_attr_non_fatal_log.attr, + &dev_attr_non_fatal_count.attr, + &dev_attr_gsm_log.attr, + &dev_attr_max_out_io.attr, + &dev_attr_max_devices.attr, + &dev_attr_max_sg_list.attr, + &dev_attr_sas_spec_support.attr, + &dev_attr_logging_level.attr, + &dev_attr_event_log_size.attr, + &dev_attr_host_sas_address.attr, + &dev_attr_bios_version.attr, + &dev_attr_ib_log.attr, + &dev_attr_ob_log.attr, + &dev_attr_ila_version.attr, + &dev_attr_inc_fw_ver.attr, + &dev_attr_ctl_mpi_state.attr, + &dev_attr_ctl_hmi_error.attr, + &dev_attr_ctl_raae_count.attr, + &dev_attr_ctl_iop0_count.attr, + &dev_attr_ctl_iop1_count.attr, + NULL, +}; + +static const struct attribute_group pm8001_host_attr_group = { + .attrs = pm8001_host_attrs +}; + +const struct attribute_group *pm8001_host_groups[] = { + &pm8001_host_attr_group, + NULL +}; diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h new file mode 100644 index 000000000..4743f0de2 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.h @@ -0,0 +1,68 @@ + /* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef PM8001_CTL_H_INCLUDED +#define PM8001_CTL_H_INCLUDED + +#define IOCTL_BUF_SIZE 4096 +#define HEADER_LEN 28 +#define SIZE_OFFSET 16 + +#define BIOSOFFSET 56 +#define BIOS_OFFSET_LIMIT 61 + +#define FLASH_OK 0x000000 +#define FAIL_OPEN_BIOS_FILE 0x000100 +#define FAIL_FILE_SIZE 0x000a00 +#define FAIL_PARAMETERS 0x000b00 +#define FAIL_OUT_MEMORY 0x000c00 +#define FLASH_IN_PROGRESS 0x001000 + +#define IB_OB_READ_TIMES 256 +#define SYSFS_OFFSET 1024 +#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024) +#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024) + +static inline u32 pm8001_ctl_aap1_memmap(u8 *ptr, int idx, int off) +{ + return *(u32 *)(ptr + idx * 32 + off); +} +#endif /* PM8001_CTL_H_INCLUDED */ + diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h new file mode 100644 index 000000000..501b57423 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -0,0 +1,143 @@ +/* + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_DEFS_H_ +#define _PM8001_DEFS_H_ + +enum chip_flavors { + chip_8001, + chip_8008, + chip_8009, + chip_8018, + chip_8019, + chip_8074, + chip_8076, + chip_8077, + chip_8006, + chip_8070, + chip_8072 +}; + +enum phy_speed { + PHY_SPEED_15 = 0x01, + PHY_SPEED_30 = 0x02, + PHY_SPEED_60 = 0x04, + PHY_SPEED_120 = 0x08, +}; + +enum data_direction { + DATA_DIR_NONE = 0x0, /* NO TRANSFER */ + DATA_DIR_IN = 0x01, /* INBOUND */ + DATA_DIR_OUT = 0x02, /* OUTBOUND */ + DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */ +}; + +enum port_type { + PORT_TYPE_SAS = (1L << 1), + PORT_TYPE_SATA = (1L << 0), +}; + +/* driver compile-time configuration */ +#define PM8001_MAX_CCB 1024 /* max ccbs supported */ +#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ +#define PM8001_MAX_INB_NUM 64 +#define PM8001_MAX_OUTB_NUM 64 +#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ + +/* Inbound/Outbound queue size */ +#define IOMB_SIZE_SPC 64 +#define IOMB_SIZE_SPCV 128 + +/* unchangeable hardware details */ +#define PM8001_MAX_PHYS 16 /* max. possible phys */ +#define PM8001_MAX_PORTS 16 /* max. possible ports */ +#define PM8001_MAX_DEVICES 2048 /* max supported device */ +#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define PM8001_RESERVE_SLOT 8 + +#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528 +#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG + +enum memory_region_num { + AAP1 = 0x0, /* application acceleration processor */ + IOP, /* IO processor */ + NVMD, /* NVM device */ + FW_FLASH, /* memory for fw flash update */ + FORENSIC_MEM, /* memory for fw forensic data */ + USI_MAX_MEMCNT_BASE +}; +#define PM8001_EVENT_LOG_SIZE (128 * 1024) + +/** + * maximum DMA memory regions(number of IBQ + number of IBQ CI + * + number of OBQ + number of OBQ PI) + */ +#define USI_MAX_MEMCNT (USI_MAX_MEMCNT_BASE + ((2 * PM8001_MAX_INB_NUM) \ + + (2 * PM8001_MAX_OUTB_NUM))) +/*error code*/ +enum mpi_err { + MPI_IO_STATUS_SUCCESS = 0x0, + MPI_IO_STATUS_BUSY = 0x01, + MPI_IO_STATUS_FAIL = 0x02, +}; + +/** + * Phy Control constants + */ +enum phy_control_type { + PHY_LINK_RESET = 0x01, + PHY_HARD_RESET = 0x02, + PHY_NOTIFY_ENABLE_SPINUP = 0x10, +}; + +enum pm8001_hba_info_flags { + PM8001F_INIT_TIME = (1U << 0), + PM8001F_RUN_TIME = (1U << 1), +}; + +/** + * Phy Status + */ +#define PHY_LINK_DISABLE 0x00 +#define PHY_LINK_DOWN 0x01 +#define PHY_STATE_LINK_UP_SPCV 0x2 +#define PHY_STATE_LINK_UP_SPC 0x1 + +#endif diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c new file mode 100644 index 000000000..90069c7b1 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -0,0 +1,4838 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include + #include "pm8001_sas.h" + #include "pm8001_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + #include "pm80xx_tracepoints.h" + +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = + pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = + pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = + pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = + pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = + pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = + pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = + pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = + pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = + pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = + pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = + pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = + pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = + pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd = + pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = + pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = + pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = + pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = + pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = + pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = + pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = + pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = + pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = + pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = + pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = + pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = + pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = + pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = + pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = + pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = + pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = + pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = + pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = + pm8001_mr32(address, 0x60); +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + 0x18)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + 0x18)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + u32 ib_offset = pm8001_ha->ib_offset; + u32 ob_offset = pm8001_ha->ob_offset; + u32 ci_offset = pm8001_ha->ci_offset; + u32 pi_offset = pm8001_ha->pi_offset; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ib_offset + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0); + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ob_offset + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = + 0 | (10 << 16) | (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0); + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, 0x24, + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, 0x28, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); + pm8001_mw32(address, 0x2C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); + pm8001_mw32(address, 0x30, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); + pm8001_mw32(address, 0x34, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); + pm8001_mw32(address, 0x38, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid0_3); + pm8001_mw32(address, 0x3C, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid4_7); + pm8001_mw32(address, 0x40, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid0_3); + pm8001_mw32(address, 0x44, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid4_7); + pm8001_mw32(address, 0x48, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid0_3); + pm8001_mw32(address, 0x4C, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid4_7); + pm8001_mw32(address, 0x50, + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); + pm8001_mw32(address, 0x54, + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); + pm8001_mw32(address, 0x5C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); + pm8001_mw32(address, 0x60, + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); + pm8001_mw32(address, 0x64, + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); + pm8001_mw32(address, 0x6C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); + pm8001_mw32(address, 0x70, + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + 0x00, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + 0x04, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + 0x08, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + 0x0C, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + 0x10, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + 0x00, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + 0x04, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + 0x08, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + 0x0C, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + 0x10, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + 0x1C, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * pm8001_bar4_shift - function is called to shift BAR base address + * @pm8001_ha : our hba card information + * @shiftValue : shifting value in memory bar. + */ +int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) +{ + u32 regVal; + unsigned long start; + + /* program the inbound AXI translation Lower Address */ + pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue); + + /* confirm the setting is written */ + start = jiffies + HZ; /* 1 sec */ + do { + regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW); + } while ((regVal != shiftValue) && time_before(jiffies, start)); + + if (regVal != shiftValue) { + pm8001_dbg(pm8001_ha, INIT, + "TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW = 0x%x\n", + regVal); + return -1; + } + return 0; +} + +/** + * mpi_set_phys_g3_with_ssc + * @pm8001_ha: our hba card information + * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc. + */ +static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, + u32 SSCbit) +{ + u32 offset, i; + unsigned long flags; + +#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 +#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 +#define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074 +#define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074 +#define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12 +#define PHY_G3_WITH_SSC_BIT_SHIFT 13 +#define SNW3_PHY_CAPABILITIES_PARITY 31 + + /* + * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) + * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7) + */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (-1 == pm8001_bar4_shift(pm8001_ha, + SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + + for (i = 0; i < 4; i++) { + offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; + pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); + } + /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ + if (-1 == pm8001_bar4_shift(pm8001_ha, + SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + for (i = 4; i < 8; i++) { + offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); + pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); + } + /************************************************************* + Change the SSC upspreading value to 0x0 so that upspreading is disabled. + Device MABC SMOD0 Controls + Address: (via MEMBASE-III): + Using shifted destination address 0x0_0000: with Offset 0xD8 + + 31:28 R/W Reserved Do not change + 27:24 R/W SAS_SMOD_SPRDUP 0000 + 23:20 R/W SAS_SMOD_SPRDDN 0000 + 19:0 R/W Reserved Do not change + Upon power-up this register will read as 0x8990c016, + and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000 + so that the written value will be 0x8090c016. + This will ensure only down-spreading SSC is enabled on the SPC. + *************************************************************/ + pm8001_cr32(pm8001_ha, 2, 0xd8); + pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016); + + /*set the shifted destination address to 0x0 to avoid error operation */ + pm8001_bar4_shift(pm8001_ha, 0x0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; +} + +/** + * mpi_set_open_retry_interval_reg + * @pm8001_ha: our hba card information + * @interval: interval time for each OPEN_REJECT (RETRY). The units are in 1us. + */ +static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha, + u32 interval) +{ + u32 offset; + u32 value; + u32 i; + unsigned long flags; + +#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000 +#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000 +#define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4 +#define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4 +#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF + + value = interval & OPEN_RETRY_INTERVAL_REG_MASK; + spin_lock_irqsave(&pm8001_ha->lock, flags); + /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/ + if (-1 == pm8001_bar4_shift(pm8001_ha, + OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + for (i = 0; i < 4; i++) { + offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i; + pm8001_cw32(pm8001_ha, 2, offset, value); + } + + if (-1 == pm8001_bar4_shift(pm8001_ha, + OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + for (i = 4; i < 8; i++) { + offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4); + pm8001_cw32(pm8001_ha, 2, offset, value); + } + /*set the shifted destination address to 0x0 to avoid error operation */ + pm8001_bar4_shift(pm8001_ha, 0x0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000;/* 1 sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPC_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) + return -1; + /* check the MPI-State for initialization */ + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK)) + return -1; + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value, value1; + u32 max_wait_count; + /* check error state */ + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + /* check AAP error */ + if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { + /* error state */ + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + return -1; + } + + /* check IOP error */ + if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { + /* error state */ + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + return -1; + } + + /* bit 4-31 of scratch pad1 should be zeros if it is not + in error state*/ + if (value & SCRATCH_PAD1_STATE_MASK) { + /* error case */ + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + return -1; + } + + /* bit 2, 4-31 of scratch pad2 should be zeros if it is not + in error state */ + if (value1 & SCRATCH_PAD2_STATE_MASK) { + /* error case */ + return -1; + } + + max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */ + + /* wait until scratch pad 1 and 2 registers in ready state */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) + & SCRATCH_PAD1_RDY; + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) + & SCRATCH_PAD2_RDY; + if ((--max_wait_count) == 0) + return -1; + } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); + return 0; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, 0x44); + offset = value & 0x03FFFFFF; + pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 Offset: %x\n", offset); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20); +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + u32 i = 0; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + /* 8081 controllers need BAR shift to access MPI space + * as this is shared with BIOS data */ + if (deviceid == 0x8081 || deviceid == 0x0042) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + pm8001_dbg(pm8001_ha, FAIL, + "Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE); + return -1; + } + } + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < pm8001_ha->max_q_num; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < pm8001_ha->max_q_num; i++) + update_outbnd_queue_table(pm8001_ha, i); + /* 8081 controller donot require these operations */ + if (deviceid != 0x8081 && deviceid != 0x0042) { + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + /* 7->130ms, 34->500ms, 119->1.5s */ + mpi_set_open_retry_interval_reg(pm8001_ha, 119); + } + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n"); + } else + return -EBUSY; + /*This register is a 16-bit timer with a resolution of 1us. This is the + timer used for interrupt delay/coalescing in the PCIe Application Layer. + Zero is not a valid value. A value of 1 in the register will cause the + interrupts to be normal. A value greater than 1 will cause coalescing + delays.*/ + pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1); + pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0); + return 0; +} + +static void pm8001_chip_post_init(struct pm8001_hba_info *pm8001_ha) +{ +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + if (deviceid == 0x8081 || deviceid == 0x0042) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + pm8001_dbg(pm8001_ha, FAIL, + "Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE); + return -1; + } + } + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000;/* 1 sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPC_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=0x%x\n", + value); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000; /* 1 sec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK); + return -1; + } + return 0; +} + +/** + * soft_reset_ready_check - Function to check FW is ready for soft reset. + * @pm8001_ha: our hba card information + */ +static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 regVal, regVal1, regVal2; + if (mpi_uninit_check(pm8001_ha) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "MPI state is not ready\n"); + return -1; + } + /* read the scratch pad 2 register bit 2 */ + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) + & SCRATCH_PAD2_FWRDY_RST; + if (regVal == SCRATCH_PAD2_FWRDY_RST) { + pm8001_dbg(pm8001_ha, INIT, "Firmware is ready for reset.\n"); + } else { + unsigned long flags; + /* Trigger NMI twice via RB6 */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "Shift Bar4 to 0x%x failed\n", + RB6_ACCESS_REG); + return -1; + } + pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, + RB6_MAGIC_NUMBER_RST); + pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST); + /* wait for 100 ms */ + mdelay(100); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & + SCRATCH_PAD2_FWRDY_RST; + if (regVal != SCRATCH_PAD2_FWRDY_RST) { + regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MSGU_SCRATCH_PAD1=0x%x, MSGU_SCRATCH_PAD2=0x%x\n", + regVal1, regVal2); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -1; + } + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + } + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + */ +static int +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regVal, toggleVal; + u32 max_wait_count; + u32 regVal1, regVal2, regVal3; + u32 signature = 0x252acbcd; /* for host scratch pad0 */ + unsigned long flags; + + /* step1: Check FW is ready for soft reset */ + if (soft_reset_ready_check(pm8001_ha) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "FW is not ready\n"); + return -1; + } + + /* step 2: clear NMI status register on AAP1 and IOP, write the same + value to clear */ + /* map 0x60000 to BAR4(0x20), BAR2(win) */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + MBIC_AAP1_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP); + pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0); + /* map 0x70000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + MBIC_IOP_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1); + pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE); + pm8001_dbg(pm8001_ha, INIT, "PCIE -Event Interrupt Enable = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT); + pm8001_dbg(pm8001_ha, INIT, "PCIE - Event Interrupt = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE); + pm8001_dbg(pm8001_ha, INIT, "PCIE -Error Interrupt Enable = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT); + pm8001_dbg(pm8001_ha, INIT, "PCIE - Error Interrupt = 0x%x\n", regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal); + + /* read the scratch pad 1 register bit 2 */ + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) + & SCRATCH_PAD1_RST; + toggleVal = regVal ^ SCRATCH_PAD1_RST; + + /* set signature in host scratch pad0 register to tell SPC that the + host performs the soft reset */ + pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature); + + /* read required registers for confirmming */ + /* map 0x0700000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + GSM_ADDR_BASE); + return -1; + } + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x0(0x00007b88)-GSM Configuration and Reset = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + + /* step 3: host read GSM Configuration and Reset register */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); + /* Put those bits to low */ + /* GSM XCBI offset = 0x70 0000 + 0x00 Bit 13 COM_SLV_SW_RSTB 1 + 0x00 Bit 12 QSSP_SW_RSTB 1 + 0x00 Bit 11 RAAE_SW_RSTB 1 + 0x00 Bit 9 RB_1_SW_RSTB 1 + 0x00 Bit 8 SM_SW_RSTB 1 + */ + regVal &= ~(0x00003b00); + /* host write GSM Configuration and Reset register */ + pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM Configuration and Reset is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + + /* step 4: */ + /* disable GSM - Read Address Parity Check */ + regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n", + regVal1); + pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)); + + /* disable GSM - Write Address Parity Check */ + regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700040 - Write Address Parity Check Enable = 0x%x\n", + regVal2); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)); + + /* disable GSM - Write Data Parity Check */ + regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); + pm8001_dbg(pm8001_ha, INIT, "GSM 0x300048 - Write Data Parity Check Enable = 0x%x\n", + regVal3); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x300048 - Write Data Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)); + + /* step 5: delay 10 usec */ + udelay(10); + /* step 5-b: set GPIO-0 output control to tristate anyway */ + if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, INIT, "Shift Bar4 to 0x%x failed\n", + GPIO_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET); + pm8001_dbg(pm8001_ha, INIT, "GPIO Output Control Register: = 0x%x\n", + regVal); + /* set GPIO-0 output control to tri-state */ + regVal &= 0xFFFFFFFC; + pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal); + + /* Step 6: Reset the IOP and AAP1 */ + /* map 0x00000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n", + SPC_TOP_LEVEL_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting IOP/AAP1:= 0x%x\n", + regVal); + regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 7: Reset the BDMA/OSSP */ + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting BDMA/OSSP: = 0x%x\n", + regVal); + regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 8: delay 10 usec */ + udelay(10); + + /* step 9: bring the BDMA and OSSP out of reset */ + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + pm8001_dbg(pm8001_ha, INIT, + "Top Register before bringing up BDMA/OSSP:= 0x%x\n", + regVal); + regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 10: delay 10 usec */ + udelay(10); + + /* step 11: reads and sets the GSM Configuration and Reset Register */ + /* map 0x0700000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n", + GSM_ADDR_BASE); + return -1; + } + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x0 (0x00007b88)-GSM Configuration and Reset = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); + /* Put those bits to high */ + /* GSM XCBI offset = 0x70 0000 + 0x00 Bit 13 COM_SLV_SW_RSTB 1 + 0x00 Bit 12 QSSP_SW_RSTB 1 + 0x00 Bit 11 RAAE_SW_RSTB 1 + 0x00 Bit 9 RB_1_SW_RSTB 1 + 0x00 Bit 8 SM_SW_RSTB 1 + */ + regVal |= (GSM_CONFIG_RESET_VALUE); + pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); + pm8001_dbg(pm8001_ha, INIT, "GSM (0x00004088 ==> 0x00007b88) - GSM Configuration and Reset is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + + /* step 12: Restore GSM - Read Address Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); + /* just for debugging */ + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1); + pm8001_dbg(pm8001_ha, INIT, "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)); + /* Restore GSM - Write Address Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)); + /* Restore GSM - Write Data Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700048 - Write Data Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)); + + /* step 13: bring the IOP and AAP1 out of reset */ + /* map 0x00000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + SPC_TOP_LEVEL_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 14: delay 10 usec - Normal Mode */ + udelay(10); + /* check Soft Reset Normal mode or Soft Reset HDA mode */ + if (signature == SPC_SOFT_RESET_SIGNATURE) { + /* step 15 (Normal Mode): wait until scratch pad1 register + bit 2 toggled */ + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + do { + udelay(1); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_RST; + } while ((regVal != toggleVal) && (--max_wait_count)); + + if (!max_wait_count) { + regVal = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_1); + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT : ToggleVal 0x%x,MSGU_SCRATCH_PAD1 = 0x%x\n", + toggleVal, regVal); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD2 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_2)); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_3)); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -1; + } + + /* step 16 (Normal) - Clear ODMR and ODCR */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + + /* step 17 (Normal Mode): wait for the FW and IOP to get + ready - 1 sec timeout */ + /* Wait for the SPC Configuration Table to be ready */ + if (check_fw_ready(pm8001_ha) == -1) { + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + /* return error if MPI Configuration Table not ready */ + pm8001_dbg(pm8001_ha, INIT, + "FW not ready SCRATCH_PAD1 = 0x%x\n", + regVal); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + /* return error if MPI Configuration Table not ready */ + pm8001_dbg(pm8001_ha, INIT, + "FW not ready SCRATCH_PAD2 = 0x%x\n", + regVal); + pm8001_dbg(pm8001_ha, INIT, + "SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, INIT, + "SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_3)); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -1; + } + } + pm8001_bar4_shift(pm8001_ha, 0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + + pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n"); + return 0; +} + +static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + u32 regVal; + pm8001_dbg(pm8001_ha, INIT, "chip reset start\n"); + + /* do SPC chip reset. */ + regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); + regVal &= ~(SPC_REG_RESET_DEVICE); + pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); + + /* delay 10 usec */ + udelay(10); + + /* bring chip reset out of reset */ + regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); + regVal |= SPC_REG_RESET_DEVICE; + pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); + + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n"); +} + +/** + * pm8001_chip_iounmap - which mapped when initialized. + * @pm8001_ha: our hba card information + */ +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +{ + s8 bar, logical = 0; + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + /* + ** logical BARs for SPC: + ** bar 0 and 1 - logical BAR0 + ** bar 2 and 3 - logical BAR1 + ** bar4 - logical BAR2 + ** bar5 - logical BAR3 + ** Skip the appropriate assignments: + */ + if ((bar == 1) || (bar == 3)) + continue; + if (pm8001_ha->io_mem[logical].memvirtaddr) { + iounmap(pm8001_ha->io_mem[logical].memvirtaddr); + logical++; + } + } +} + +#ifndef PM8001_USE_MSIX +/** + * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL); +} + +#else + +/** + * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @int_vec_idx: interrupt number to enable + */ +static void +pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha, + u32 int_vec_idx) +{ + u32 msi_index; + u32 value; + msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; + msi_index += MSIX_TABLE_BASE; + pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE); + value = (1 << int_vec_idx); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value); + +} + +/** + * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @int_vec_idx: interrupt number to disable + */ +static void +pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, + u32 int_vec_idx) +{ + u32 msi_index; + msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; + msi_index += MSIX_TABLE_BASE; + pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE); +} +#endif + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: unused + */ +static void +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); +#else + pm8001_chip_intx_interrupt_enable(pm8001_ha); +#endif +} + +/** + * pm8001_chip_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: unused + */ +static void +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); +#else + pm8001_chip_intx_interrupt_disable(pm8001_ha); +#endif +} + +/** + * pm8001_mpi_msg_free_get - get the free message buffer for transfer + * inbound queue. + * @circularQ: the inbound queue we want to transfer to HBA. + * @messageSize: the message size of this transfer, normally it is 64 bytes + * @messagePtr: the pointer to message. + */ +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr) +{ + u32 offset, consumer_index; + struct mpi_msg_hdr *msgHeader; + u8 bcCount = 1; /* only support single buffer */ + + /* Checks is the requested message size can be allocated in this queue*/ + if (messageSize > IOMB_SIZE_SPCV) { + *messagePtr = NULL; + return -1; + } + + /* Stores the new consumer index */ + consumer_index = pm8001_read_32(circularQ->ci_virt); + circularQ->consumer_index = cpu_to_le32(consumer_index); + if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) == + le32_to_cpu(circularQ->consumer_index)) { + *messagePtr = NULL; + return -1; + } + /* get memory IOMB buffer address */ + offset = circularQ->producer_idx * messageSize; + /* increment to next bcCount element */ + circularQ->producer_idx = (circularQ->producer_idx + bcCount) + % PM8001_MPI_QUEUE; + /* Adds that distance to the base of the region virtual address plus + the message header size*/ + msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset); + *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr); + return 0; +} + +/** + * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to + * FW to tell the fw to get this message from IOMB. + * @pm8001_ha: our hba card information + * @q_index: the index in the inbound queue we want to transfer to HBA. + * @opCode: the operation code represents commands which LLDD and fw recognized. + * @payload: the command payload of each operation command. + * @nb: size in bytes of the command payload + * @responseQueue: queue to interrupt on w/ command response (if any) + */ +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + u32 q_index, u32 opCode, void *payload, size_t nb, + u32 responseQueue) +{ + u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; + void *pMessage; + unsigned long flags; + struct inbound_queue_table *circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; + int rv; + u32 htag = le32_to_cpu(*(__le32 *)payload); + + trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index, + circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index)); + + if (WARN_ON(q_index >= pm8001_ha->max_q_num)) + return -EINVAL; + + spin_lock_irqsave(&circularQ->iq_lock, flags); + rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, + &pMessage); + if (rv < 0) { + pm8001_dbg(pm8001_ha, IO, "No free mpi buffer\n"); + rv = -ENOMEM; + goto done; + } + + if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr))) + nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr); + memcpy(pMessage, payload, nb); + if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size) + memset(pMessage + nb, 0, pm8001_ha->iomb_size - + (nb + sizeof(struct mpi_msg_hdr))); + + /*Build the header*/ + Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) + | ((responseQueue & 0x3F) << 16) + | ((category & 0xF) << 12) | (opCode & 0xFFF)); + + pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header)); + /*Update the PI to the firmware*/ + pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, + circularQ->pi_offset, circularQ->producer_idx); + pm8001_dbg(pm8001_ha, DEVIO, + "INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", + responseQueue, opCode, circularQ->producer_idx, + circularQ->consumer_index); +done: + spin_unlock_irqrestore(&circularQ->iq_lock, flags); + return rv; +} + +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc) +{ + u32 producer_index; + struct mpi_msg_hdr *msgHeader; + struct mpi_msg_hdr *pOutBoundMsgHeader; + + msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); + pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + + circularQ->consumer_idx * pm8001_ha->iomb_size); + if (pOutBoundMsgHeader != msgHeader) { + pm8001_dbg(pm8001_ha, FAIL, + "consumer_idx = %d msgHeader = %p\n", + circularQ->consumer_idx, msgHeader); + + /* Update the producer index from SPC */ + producer_index = pm8001_read_32(circularQ->pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + pm8001_dbg(pm8001_ha, FAIL, + "consumer_idx = %d producer_index = %dmsgHeader = %p\n", + circularQ->consumer_idx, + circularQ->producer_index, msgHeader); + return 0; + } + /* free the circular queue buffer elements associated with the message*/ + circularQ->consumer_idx = (circularQ->consumer_idx + bc) + % PM8001_MPI_QUEUE; + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, + circularQ->consumer_idx); + /* Update the producer index from SPC*/ + producer_index = pm8001_read_32(circularQ->pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + pm8001_dbg(pm8001_ha, IO, " CI=%d PI=%d\n", + circularQ->consumer_idx, circularQ->producer_index); + return 0; +} + +/** + * pm8001_mpi_msg_consume- get the MPI message from outbound queue + * message table. + * @pm8001_ha: our hba card information + * @circularQ: the outbound queue table. + * @messagePtr1: the message contents of this outbound message. + * @pBC: the message size. + */ +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC) +{ + struct mpi_msg_hdr *msgHeader; + __le32 msgHeader_tmp; + u32 header_tmp; + do { + /* If there are not-yet-delivered messages ... */ + if (le32_to_cpu(circularQ->producer_index) + != circularQ->consumer_idx) { + /*Get the pointer to the circular queue buffer element*/ + msgHeader = (struct mpi_msg_hdr *) + (circularQ->base_virt + + circularQ->consumer_idx * pm8001_ha->iomb_size); + /* read header */ + header_tmp = pm8001_read_32(msgHeader); + msgHeader_tmp = cpu_to_le32(header_tmp); + pm8001_dbg(pm8001_ha, DEVIO, + "outbound opcode msgheader:%x ci=%d pi=%d\n", + msgHeader_tmp, circularQ->consumer_idx, + circularQ->producer_index); + if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) { + if (OPC_OUB_SKIP_ENTRY != + (le32_to_cpu(msgHeader_tmp) & 0xfff)) { + *messagePtr1 = + ((u8 *)msgHeader) + + sizeof(struct mpi_msg_hdr); + *pBC = (u8)((le32_to_cpu(msgHeader_tmp) + >> 24) & 0x1f); + pm8001_dbg(pm8001_ha, IO, + ": CI=%d PI=%d msgHeader=%x\n", + circularQ->consumer_idx, + circularQ->producer_index, + msgHeader_tmp); + return MPI_IO_STATUS_SUCCESS; + } else { + circularQ->consumer_idx = + (circularQ->consumer_idx + + ((le32_to_cpu(msgHeader_tmp) + >> 24) & 0x1f)) + % PM8001_MPI_QUEUE; + msgHeader_tmp = 0; + pm8001_write_32(msgHeader, 0, 0); + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, + circularQ->ci_pci_bar, + circularQ->ci_offset, + circularQ->consumer_idx); + } + } else { + circularQ->consumer_idx = + (circularQ->consumer_idx + + ((le32_to_cpu(msgHeader_tmp) >> 24) & + 0x1f)) % PM8001_MPI_QUEUE; + msgHeader_tmp = 0; + pm8001_write_32(msgHeader, 0, 0); + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, + circularQ->ci_offset, + circularQ->consumer_idx); + return MPI_IO_STATUS_FAIL; + } + } else { + u32 producer_index; + void *pi_virt = circularQ->pi_virt; + /* spurious interrupt during setup if + * kexec-ing and driver doing a doorbell access + * with the pre-kexec oq interrupt setup + */ + if (!pi_virt) + break; + /* Update the producer index from SPC */ + producer_index = pm8001_read_32(pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + } + } while (le32_to_cpu(circularQ->producer_index) != + circularQ->consumer_idx); + /* while we don't have any more not-yet-delivered message */ + /* report empty */ + return MPI_IO_STATUS_BUSY; +} + +void pm8001_work_fn(struct work_struct *work) +{ + struct pm8001_work *pw = container_of(work, struct pm8001_work, work); + struct pm8001_device *pm8001_dev; + struct domain_device *dev; + + /* + * So far, all users of this stash an associated structure here. + * If we get here, and this pointer is null, then the action + * was cancelled. This nullification happens when the device + * goes away. + */ + if (pw->handler != IO_FATAL_ERROR) { + pm8001_dev = pw->data; /* Most stash device structure */ + if ((pm8001_dev == NULL) + || ((pw->handler != IO_XFER_ERROR_BREAK) + && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { + kfree(pw); + return; + } + } + + switch (pw->handler) { + case IO_XFER_ERROR_BREAK: + { /* This one stashes the sas_task instead */ + struct sas_task *t = (struct sas_task *)pm8001_dev; + struct pm8001_ccb_info *ccb; + struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; + unsigned long flags, flags1; + struct task_status_struct *ts; + int i; + + if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC) + break; /* Task still on lu */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + + spin_lock_irqsave(&t->task_state_lock, flags1); + if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + break; /* Task got completed by another */ + } + spin_unlock_irqrestore(&t->task_state_lock, flags1); + + /* Search for a possible ccb that matches the task */ + for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { + ccb = &pm8001_ha->ccb_info[i]; + if ((ccb->ccb_tag != PM8001_INVALID_TAG) && + (ccb->task == t)) + break; + } + if (!ccb) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + break; /* Task got freed by another */ + } + ts = &t->task_status; + ts->resp = SAS_TASK_COMPLETE; + /* Force the midlayer to retry */ + ts->stat = SAS_QUEUE_FULL; + pm8001_dev = ccb->device; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + spin_lock_irqsave(&t->task_state_lock, flags1); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, pw->handler, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + t->task_done(t); + } + } break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + { /* This one stashes the sas_task instead */ + struct sas_task *t = (struct sas_task *)pm8001_dev; + struct pm8001_ccb_info *ccb; + struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; + unsigned long flags, flags1; + int i, ret = 0; + + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + + ret = pm8001_query_task(t); + + if (ret == TMF_RESP_FUNC_SUCC) + pm8001_dbg(pm8001_ha, IO, "...Task on lu\n"); + else if (ret == TMF_RESP_FUNC_COMPLETE) + pm8001_dbg(pm8001_ha, IO, "...Task NOT on lu\n"); + else + pm8001_dbg(pm8001_ha, DEVIO, "...query task failed!!!\n"); + + spin_lock_irqsave(&pm8001_ha->lock, flags); + + spin_lock_irqsave(&t->task_state_lock, flags1); + + if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ + (void)pm8001_abort_task(t); + break; /* Task got completed by another */ + } + + spin_unlock_irqrestore(&t->task_state_lock, flags1); + + /* Search for a possible ccb that matches the task */ + for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { + ccb = &pm8001_ha->ccb_info[i]; + if ((ccb->ccb_tag != PM8001_INVALID_TAG) && + (ccb->task == t)) + break; + } + if (!ccb) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ + (void)pm8001_abort_task(t); + break; /* Task got freed by another */ + } + + pm8001_dev = ccb->device; + dev = pm8001_dev->sas_device; + + switch (ret) { + case TMF_RESP_FUNC_SUCC: /* task on lu */ + ccb->open_retry = 1; /* Snub completion */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + ret = pm8001_abort_task(t); + ccb->open_retry = 0; + switch (ret) { + case TMF_RESP_FUNC_SUCC: + case TMF_RESP_FUNC_COMPLETE: + break; + default: /* device misbehavior */ + ret = TMF_RESP_FUNC_FAILED; + pm8001_dbg(pm8001_ha, IO, "...Reset phy\n"); + pm8001_I_T_nexus_reset(dev); + break; + } + break; + + case TMF_RESP_FUNC_COMPLETE: /* task not on lu */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + /* Do we need to abort the task locally? */ + break; + + default: /* device misbehavior */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + ret = TMF_RESP_FUNC_FAILED; + pm8001_dbg(pm8001_ha, IO, "...Reset phy\n"); + pm8001_I_T_nexus_reset(dev); + } + + if (ret == TMF_RESP_FUNC_FAILED) + t = NULL; + pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev); + pm8001_dbg(pm8001_ha, IO, "...Complete\n"); + } break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_event_handler(dev); + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_DS_IN_ERROR: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_DS_NON_OPERATIONAL: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_FATAL_ERROR: + { + struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; + struct pm8001_ccb_info *ccb; + struct task_status_struct *ts; + struct sas_task *task; + int i; + u32 device_id; + + for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { + ccb = &pm8001_ha->ccb_info[i]; + task = ccb->task; + ts = &task->task_status; + + if (task != NULL) { + dev = task->dev; + if (!dev) { + pm8001_dbg(pm8001_ha, FAIL, + "dev is NULL\n"); + continue; + } + /*complete sas task and update to top layer */ + pm8001_ccb_task_free(pm8001_ha, ccb); + ts->resp = SAS_TASK_COMPLETE; + task->task_done(task); + } else if (ccb->ccb_tag != PM8001_INVALID_TAG) { + /* complete the internal commands/non-sas task */ + pm8001_dev = ccb->device; + if (pm8001_dev->dcompletion) { + complete(pm8001_dev->dcompletion); + pm8001_dev->dcompletion = NULL; + } + complete(pm8001_ha->nvmd_completion); + pm8001_ccb_free(pm8001_ha, ccb); + } + } + /* Deregister all the device ids */ + for (i = 0; i < PM8001_MAX_DEVICES; i++) { + pm8001_dev = &pm8001_ha->devices[i]; + device_id = pm8001_dev->device_id; + if (device_id) { + PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_free_dev(pm8001_dev); + } + } + } + break; + case IO_XFER_ERROR_ABORTED_NCQ_MODE: + { + dev = pm8001_dev->sas_device; + sas_ata_device_link_abort(dev, false); + } + break; + } + kfree(pw); +} + +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, + int handler) +{ + struct pm8001_work *pw; + int ret = 0; + + pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC); + if (pw) { + pw->pm8001_ha = pm8001_ha; + pw->data = data; + pw->handler = handler; + INIT_WORK(&pw->work, pm8001_work_fn); + queue_work(pm8001_wq, &pw->work); + } else + ret = -ENOMEM; + + return ret; +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + + if (status) + pm8001_dbg(pm8001_ha, IOERR, + "status:0x%x, tag:0x%x, task:0x%p\n", + status, tag, t); + + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS,param = %d\n", + param); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW,param = %d\n", + param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + pm8001_dbg(pm8001_ha, IO, "scsi_status = %x\n", + psspPayload->ssp_resp_iu.status); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + u32 dev_id = le32_to_cpu(psspPayload->device_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + pm8001_dbg(pm8001_ha, DEVIO, "port_id = %x,device_id = %x\n", + port_id, dev_id); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + return; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low; + u8 sata_addr_hi[4]; + u32 temp_sata_addr_hi; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + param = le32_to_cpu(psataPayload->param); + tag = le32_to_cpu(psataPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + if (pm8001_dev && unlikely(!t || !t->lldd_task || !t->dev)) { + pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); + return; + } + + ts = &t->task_status; + + if (status) + pm8001_dbg(pm8001_ha, IOERR, + "status:0x%x, tag:0x%x, task::0x%p\n", + status, tag, t); + + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (dev_is_expander(t->dev->parent->dev_type)))) { + for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%08x%08x\n", + temp_sata_addr_hi, + temp_sata_addr_low); + } else { + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + } + } + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + pm8001_dbg(pm8001_ha, IO, + "SAS_PROTO_RESPONSE len = %d\n", + param); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == DMA_FROM_DEVICE) { + len = sizeof(struct pio_setup_fis); + pm8001_dbg(pm8001_ha, IO, + "PIO read len = %d\n", len); + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { + len = sizeof(struct set_dev_bits_fis); + pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", + len); + } else { + len = sizeof(struct dev_to_host_fis); + pm8001_dbg(pm8001_ha, IO, "other len = %d\n", + len); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + pm8001_dbg(pm8001_ha, IO, + "response too large\n"); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, " IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_IN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + + if (event) + pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + if (pm8001_dev) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event); + + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + if (unlikely(!t->lldd_task || !t->dev)) + return; + + ts = &t->task_status; + pm8001_dbg(pm8001_ha, DEVIO, + "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, dev_id, tag, event); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + break; + case IO_XFER_PIO_SETUP_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) { + pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status); + pm8001_dbg(pm8001_ha, IOERR, + "status:0x%x, tag:0x%x, task:0x%p\n", + status, tag, t); + } + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_dev_state_resp *pPayload = + (struct set_dev_state_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + struct pm8001_device *pm8001_dev = ccb->device; + u32 status = le32_to_cpu(pPayload->status); + u32 device_id = le32_to_cpu(pPayload->device_id); + u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS; + u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS; + + pm8001_dbg(pm8001_ha, MSG, + "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n", + device_id, pds, nds, status); + complete(pm8001_dev->setds_completion); + pm8001_ccb_free(pm8001_ha, ccb); +} + +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct get_nvm_data_resp *pPayload = + (struct get_nvm_data_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + u32 dlen_status = le32_to_cpu(pPayload->dlen_status); + + complete(pm8001_ha->nvmd_completion); + pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n"); + if ((dlen_status & NVMD_STAT) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n", + dlen_status); + } + pm8001_ccb_free(pm8001_ha, ccb); +} + +void +pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct fw_control_ex *fw_control_context; + struct get_nvm_data_resp *pPayload = + (struct get_nvm_data_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + u32 dlen_status = le32_to_cpu(pPayload->dlen_status); + u32 ir_tds_bn_dps_das_nvm = + le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); + void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; + fw_control_context = ccb->fw_control_context; + + pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n"); + if ((dlen_status & NVMD_STAT) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error %x\n", + dlen_status); + complete(pm8001_ha->nvmd_completion); + /* We should free tag during failure also, the tag is not being + * freed by requesting path anywhere. + */ + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + if (ir_tds_bn_dps_das_nvm & IPMode) { + /* indirect mode - IR bit set */ + pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n"); + if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) { + if (ir_tds_bn_dps_das_nvm == 0x80a80200) { + memcpy(pm8001_ha->sas_addr, + ((u8 *)virt_addr + 4), + SAS_ADDR_SIZE); + pm8001_dbg(pm8001_ha, MSG, "Get SAS address from VPD successfully!\n"); + } + } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM) + || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) || + ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) { + ; + } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP) + || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) { + ; + } else { + /* Should not be happened*/ + pm8001_dbg(pm8001_ha, MSG, + "(IR=1)Wrong Device type 0x%x\n", + ir_tds_bn_dps_das_nvm); + } + } else /* direct mode */{ + pm8001_dbg(pm8001_ha, MSG, + "Get NVMD success, IR=0, dataLen=%d\n", + (dlen_status & NVMD_LEN) >> 24); + } + /* Though fw_control_context is freed below, usrAddr still needs + * to be updated as this holds the response to the request function + */ + memcpy(fw_control_context->usrAddr, + pm8001_ha->memoryMap.region[NVMD].virt_ptr, + fw_control_context->len); + kfree(ccb->fw_control_context); + /* To avoid race condition, complete should be + * called after the message is copied to + * fw_control_context->usrAddr + */ + complete(pm8001_ha->nvmd_completion); + pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n"); + pm8001_ccb_free(pm8001_ha, ccb); +} + +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 tag; + struct local_phy_ctl_resp *pPayload = + (struct local_phy_ctl_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS; + u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; + tag = le32_to_cpu(pPayload->tag); + if (status != 0) { + pm8001_dbg(pm8001_ha, MSG, + "%x phy execute %x phy op failed!\n", + phy_id, phy_op); + } else { + pm8001_dbg(pm8001_ha, MSG, + "%x phy execute %x phy op success!\n", + phy_id, phy_op); + pm8001_ha->phy[phy_id].reset_success = true; + } + if (pm8001_ha->phy[phy_id].enable_completion) { + complete(pm8001_ha->phy[phy_id].enable_completion); + pm8001_ha->phy[phy_id].enable_completion = NULL; + } + pm8001_tag_free(pm8001_ha, tag); + return 0; +} + +/** + * pm8001_bytes_dmaed - one of the interface function communication with libsas + * @pm8001_ha: our hba card information + * @i: which phy that received the event. + * + * when HBA driver received the identify done event or initiate FIS received + * event(for SATA), it will invoke this function to notify the sas layer that + * the sas toplogy has formed, please discover the whole sas domain, + * while receive a broadcast(change) primitive just tell the sas + * layer to discover the changed domain rather than the whole domain. + */ +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +{ + struct pm8001_phy *phy = &pm8001_ha->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + if (!phy->phy_attached) + return; + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /*Nothing*/ + } + pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i); + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC); +} + +/* Get the link rate speed */ +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +{ + struct sas_phy *sas_phy = phy->sas_phy.phy; + + switch (link_rate) { + case PHY_SPEED_120: + phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS; + break; + case PHY_SPEED_60: + phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + case PHY_SPEED_30: + phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_SPEED_15: + phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + } + sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; + sas_phy->maximum_linkrate_hw = phy->maximum_linkrate; + sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sas_phy->maximum_linkrate = phy->maximum_linkrate; + sas_phy->minimum_linkrate = phy->minimum_linkrate; +} + +/** + * pm8001_get_attached_sas_addr - extract/generate attached SAS address + * @phy: pointer to asd_phy + * @sas_addr: pointer to buffer where the SAS address is to be written + * + * This function extracts the SAS address from an IDENTIFY frame + * received. If OOB is SATA, then a SAS address is generated from the + * HA tables. + * + * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame + * buffer. + */ +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, + u8 *sas_addr) +{ + if (phy->sas_phy.frame_rcvd[0] == 0x34 + && phy->sas_phy.oob_mode == SATA_OOB_MODE) { + struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha; + /* FIS device-to-host */ + u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr); + addr += phy->sas_phy.id; + *(__be64 *)sas_addr = cpu_to_be64(addr); + } else { + struct sas_identify_frame *idframe = + (void *) phy->sas_phy.frame_rcvd; + memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); + } +} + +/** + * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + memset((u8 *)&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0x0F) << 4) | (port_id & 0x0F)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + + pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload, sizeof(payload), 0); +} + +static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 link_rate = + (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); + u8 portstate = (u8)(npip_portstate & 0x0000000F); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", + port_id, phy_id); + + switch (deviceType) { + case SAS_PHY_UNUSED: + pm8001_dbg(pm8001_ha, MSG, "device type no device.\n"); + break; + case SAS_END_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "end device.\n"); + pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n", + deviceType); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 link_rate = + (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); + u8 portstate = (u8)(npip_portstate & 0x0000000F); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n", + port_id, phy_id); + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); + u8 portstate = (u8)(npip_portstate & 0x0000000F); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + port->port_state = portstate; + phy->phy_type = 0; + phy->identify.device_type = 0; + phy->phy_attached = 0; + memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n", + port_id); + pm8001_dbg(pm8001_ha, MSG, + " Last phy Down and port invalid\n"); + port->port_attached = 0; + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n", + port_id); + break; + case PORT_NOT_ESTABLISHED: + pm8001_dbg(pm8001_ha, MSG, + " phy Down and PORT_NOT_ESTABLISHED\n"); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + pm8001_dbg(pm8001_ha, MSG, " phy Down and PORT_LOSTCOMM\n"); + pm8001_dbg(pm8001_ha, MSG, + " Last phy Down and port invalid\n"); + port->port_attached = 0; + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + port->port_attached = 0; + pm8001_dbg(pm8001_ha, DEVIO, " phy Down and(default) = %x\n", + portstate); + break; + + } +} + +/** + * pm8001_mpi_reg_resp -process register device ID response. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + * + * when sas layer find a device it will notify LLDD, then the driver register + * the domain device to FW, this event is the return device ID which the FW + * has assigned, from now, inter-communication with FW is no longer using the + * SAS address, use device ID which FW assigned. + */ +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + u32 device_id; + u32 htag; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct dev_reg_resp *registerRespPayload = + (struct dev_reg_resp *)(piomb + 4); + + htag = le32_to_cpu(registerRespPayload->tag); + ccb = &pm8001_ha->ccb_info[htag]; + pm8001_dev = ccb->device; + status = le32_to_cpu(registerRespPayload->status); + device_id = le32_to_cpu(registerRespPayload->device_id); + pm8001_dbg(pm8001_ha, INIT, + "register device status %d phy_id 0x%x device_id %d\n", + status, pm8001_dev->attached_phy, device_id); + switch (status) { + case DEVREG_SUCCESS: + pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n"); + pm8001_dev->device_id = device_id; + break; + case DEVREG_FAILURE_OUT_OF_RESOURCE: + pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_OUT_OF_RESOURCE\n"); + break; + case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"); + break; + case DEVREG_FAILURE_INVALID_PHY_ID: + pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_INVALID_PHY_ID\n"); + break; + case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"); + break; + case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"); + break; + case DEVREG_FAILURE_PORT_NOT_VALID_STATE: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"); + break; + case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"); + break; + default: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"); + break; + } + complete(pm8001_dev->dcompletion); + pm8001_ccb_free(pm8001_ha, ccb); + return 0; +} + +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + u32 device_id; + struct dev_reg_resp *registerRespPayload = + (struct dev_reg_resp *)(piomb + 4); + + status = le32_to_cpu(registerRespPayload->status); + device_id = le32_to_cpu(registerRespPayload->device_id); + if (status != 0) + pm8001_dbg(pm8001_ha, MSG, + " deregister device failed ,status = %x, device_id = %x\n", + status, device_id); + return 0; +} + +/** + * pm8001_mpi_fw_flash_update_resp - Response from FW for flash update command. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + u32 status; + struct fw_flash_Update_resp *ppayload = + (struct fw_flash_Update_resp *)(piomb + 4); + u32 tag = le32_to_cpu(ppayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + + status = le32_to_cpu(ppayload->status); + switch (status) { + case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: + pm8001_dbg(pm8001_ha, MSG, + ": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"); + break; + case FLASH_UPDATE_IN_PROGRESS: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_IN_PROGRESS\n"); + break; + case FLASH_UPDATE_HDR_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HDR_ERR\n"); + break; + case FLASH_UPDATE_OFFSET_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_OFFSET_ERR\n"); + break; + case FLASH_UPDATE_CRC_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_CRC_ERR\n"); + break; + case FLASH_UPDATE_LENGTH_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_LENGTH_ERR\n"); + break; + case FLASH_UPDATE_HW_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HW_ERR\n"); + break; + case FLASH_UPDATE_DNLD_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, MSG, + ": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"); + break; + case FLASH_UPDATE_DISABLED: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_DISABLED\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "No matched status = %d\n", + status); + break; + } + kfree(ccb->fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + complete(pm8001_ha->nvmd_completion); + return 0; +} + +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + int i; + struct general_event_resp *pPayload = + (struct general_event_resp *)(piomb + 4); + status = le32_to_cpu(pPayload->status); + pm8001_dbg(pm8001_ha, MSG, " status = 0x%x\n", status); + for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) + pm8001_dbg(pm8001_ha, MSG, "inb_IOMB_payload[0x%x] 0x%x,\n", + i, + pPayload->inb_IOMB_payload[i]); + return 0; +} + +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status ; + u32 tag, scp; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + + struct task_abort_resp *pPayload = + (struct task_abort_resp *)(piomb + 4); + + status = le32_to_cpu(pPayload->status); + tag = le32_to_cpu(pPayload->tag); + + scp = le32_to_cpu(pPayload->scp); + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; /* retrieve device */ + + if (!t) { + pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n"); + return -1; + } + + if (t->task_proto == SAS_PROTOCOL_INTERNAL_ABORT) + atomic_dec(&pm8001_dev->running_req); + + ts = &t->task_status; + if (status != 0) + pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n", + status, tag, scp); + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + break; + case IO_NOT_VALID: + pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n"); + ts->resp = TMF_RESP_FUNC_FAILED; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb(); + + t->task_done(t); + + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u16 eventType = + (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24); + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + pm8001_dbg(pm8001_ha, DEVIO, + "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n", + port_id, phy_id, eventType, status); + switch (eventType) { + case HW_EVENT_PHY_START_STATUS: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n", + status); + if (status == 0) + phy->phy_state = 1; + + if (pm8001_ha->flags == PM8001F_RUN_TIME && + phy->enable_completion != NULL) { + complete(phy->enable_completion); + phy->enable_completion = NULL; + } + break; + case HW_EVENT_SAS_PHY_UP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n"); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n"); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_PHY_STOP_STATUS: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_STOP_STATUS status = %x\n", + status); + if (status == 0) + phy->phy_state = 0; + break; + case HW_EVENT_SATA_SPINUP_HOLD: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n"); + sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_DOWN: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n"); + sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL, + GFP_ATOMIC); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_ERROR: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n"); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); + break; + case HW_EVENT_BROADCAST_EXP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_INVALID_DWORD\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_CODE_VIOLATION\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_MALFUNCTION: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n"); + break; + case HW_EVENT_BROADCAST_SES: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n"); + sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RECOVER: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n"); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n"); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type = %x\n", + eventType); + break; + } + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF); + + pm8001_dbg(pm8001_ha, MSG, "process_one_iomb:\n"); + + switch (opc) { + case OPC_OUB_ECHO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n"); + break; + case OPC_OUB_HW_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n"); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n"); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n"); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n"); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n"); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + pm8001_dbg(pm8001_ha, MSG, "unregister the device\n"); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n"); + break; + case OPC_OUB_SATA_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n"); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n"); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n"); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n"); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n"); + /*This is for target*/ + break; + case OPC_OUB_DEV_INFO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_INFO\n"); + break; + case OPC_OUB_FW_FLASH_UPDATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n"); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n"); + break; + case OPC_OUB_GPIO_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n"); + break; + case OPC_OUB_GENERAL_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n"); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SAS_DIAG_MODE_START_END\n"); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n"); + break; + case OPC_OUB_GET_TIME_STAMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n"); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n"); + break; + case OPC_OUB_PORT_CONTROL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n"); + break; + case OPC_OUB_SMP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n"); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n"); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n"); + break; + case OPC_OUB_SET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n"); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n"); + break; + case OPC_OUB_SET_DEV_INFO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); + break; + case OPC_OUB_SAS_RE_INITIALIZE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_RE_INITIALIZE\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown outbound Queue IOMB OPC = %x\n", + opc); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 bc; + u32 ret = MPI_IO_STATUS_FAIL; + unsigned long flags; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + do { + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return ret; +} + +/* DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ + [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ + [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ + [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ +}; +void +pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct pm8001_prd *buf_prd = prd; + + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); + buf_prd->im_len.e = 0; + buf_prd++; + } +} + +static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, + &smp_cmd, sizeof(smp_cmd), 0); + if (rc) + goto err_out_2; + + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + +/** + * pm8001_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + u64 phys_addr; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + ssp_cmd.dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for + SAS 1.1 compatible TLR*/ + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &ssp_cmd, + sizeof(ssp_cmd), 0); +} + +static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr; + u32 ATAP = 0x0; + u32 dir, retfis = 0; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { + ATAP = 0x04; /* no data*/ + pm8001_dbg(pm8001_ha, IO, "no data\n"); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + pm8001_dbg(pm8001_ha, IO, "DMA\n"); + } else { + ATAP = 0x05; /* PIO*/ + pm8001_dbg(pm8001_ha, IO, "PIO\n"); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + if (task->ata_task.return_fis_on_success) + retfis = 1; + sata_cmd.retfis_ncqtag_atap_dir_m = + cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) | + ((ATAP & 0x3f) << 10) | dir); + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, + sizeof(sata_cmd), 0); +} + +/** + * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both + ** [14] 0b disable spin up hold; 1b enable spin up hold + */ + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/* + * see comments on pm8001_mpi_reg_resp. + */ +static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + u32 linkrate, phy_id; + int rc; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + struct pm8001_port *port = dev->port->lldd_port; + + memset(&payload, 0, sizeof(payload)); + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + if (flag == 1) + stp_sspsmp_sata = 0x02; /*direct attached sata */ + else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + dev_is_expander(pm8001_dev->dev_type)) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + opc = OPC_INB_REG_DEV; + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + payload.phyid_portid = + cpu_to_le32(((port->port_id) & 0x0F) | + ((phy_id & 0x0F) << 4)); + payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) * 0x1000000) | + ((stp_sspsmp_sata & 0x03) * 0x10000000)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +/* + * see comments on pm8001_mpi_reg_resp. + */ +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + struct dereg_dev_req payload; + u32 opc = OPC_INB_DEREG_DEV_HANDLE; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.device_id = cpu_to_le32(device_id); + pm8001_dbg(pm8001_ha, INIT, "unregister device device_id %d\n", + device_id); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); +} + +/** + * pm8001_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @phyId: the phy id which we wanted to operate + * @phy_op: the phy operation to request + */ +static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); +} + +static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) +{ +#ifdef PM8001_USE_MSIX + return 1; +#else + u32 value; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; +#endif +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @vec: IRQ number + */ +static irqreturn_t +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm8001_chip_interrupt_disable(pm8001_ha, vec); + pm8001_dbg(pm8001_ha, DEVIO, + "irq vec %d, ODMR:0x%x\n", + vec, pm8001_cr32(pm8001_ha, 0, 0x30)); + process_oq(pm8001_ha, vec); + pm8001_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, + u32 dev_id, enum sas_internal_abort type, u32 task_tag, u32 cmd_tag) +{ + struct task_abort_req task_abort; + + memset(&task_abort, 0, sizeof(task_abort)); + if (type == SAS_INTERNAL_ABORT_SINGLE) { + task_abort.abort_all = 0; + task_abort.device_id = cpu_to_le32(dev_id); + task_abort.tag_to_abort = cpu_to_le32(task_tag); + } else if (type == SAS_INTERNAL_ABORT_DEV) { + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(dev_id); + } else { + pm8001_dbg(pm8001_ha, EH, "unknown type (%d)\n", type); + return -EIO; + } + + task_abort.tag = cpu_to_le32(cmd_tag); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort, + sizeof(task_abort), 0); +} + +/* + * pm8001_chip_abort_task - SAS abort task when error or exception happened. + */ +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct sas_internal_abort_task *abort = &task->abort_task; + struct pm8001_device *pm8001_dev = ccb->device; + int rc = TMF_RESP_FUNC_FAILED; + u32 opc, device_id; + + pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n", + ccb->ccb_tag, abort->tag); + if (pm8001_dev->dev_type == SAS_END_DEVICE) + opc = OPC_INB_SSP_ABORT; + else if (pm8001_dev->dev_type == SAS_SATA_DEV) + opc = OPC_INB_SATA_ABORT; + else + opc = OPC_INB_SMP_ABORT;/* SMP */ + device_id = pm8001_dev->device_id; + rc = send_task_abort(pm8001_ha, opc, device_id, abort->type, + abort->tag, ccb->ccb_tag); + if (rc != TMF_RESP_FUNC_COMPLETE) + pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc); + return rc; +} + +/** + * pm8001_chip_ssp_tm_req - built the task management command. + * @pm8001_ha: our hba card information. + * @ccb: the ccb information. + * @tmf: task management function. + */ +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + u32 opc = OPC_INB_SSPINITMSTART; + struct ssp_ini_tm_start_req sspTMCmd; + + memset(&sspTMCmd, 0, sizeof(sspTMCmd)); + sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id); + sspTMCmd.relate_tag = cpu_to_le32((u32)tmf->tag_of_task_to_be_managed); + sspTMCmd.tmf = cpu_to_le32(tmf->tmf); + memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); + sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); + if (pm8001_ha->chip_id != chip_8001) + sspTMCmd.ds_ads_m = cpu_to_le32(0x08); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sspTMCmd, + sizeof(sspTMCmd), 0); +} + +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + u32 opc = OPC_INB_GET_NVMD_DATA; + u32 nvmd_type; + int rc; + struct pm8001_ccb_info *ccb; + struct get_nvm_data_req nvmd_req; + struct fw_control_ex *fw_control_context; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + nvmd_type = ioctl_payload->minor_function; + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; + fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; + fw_control_context->len = ioctl_payload->rd_length; + memset(&nvmd_req, 0, sizeof(nvmd_req)); + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) { + kfree(fw_control_context); + return -SAS_QUEUE_FULL; + } + ccb->fw_control_context = fw_control_context; + + nvmd_req.tag = cpu_to_le32(ccb->ccb_tag); + + switch (nvmd_type) { + case TWI_DEVICE: { + u32 twi_addr, twi_page_size; + twi_addr = 0xa8; + twi_page_size = 2; + + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | + twi_page_size << 8 | TWI_DEVICE); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case C_SEEPROM: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case VPD_FLASH: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case EXPAN_ROM: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case IOP_RDUMP: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + default: + break; + } + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req, + sizeof(nvmd_req), 0); + if (rc) { + kfree(fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + } + return rc; +} + +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + u32 opc = OPC_INB_SET_NVMD_DATA; + u32 nvmd_type; + int rc; + struct pm8001_ccb_info *ccb; + struct set_nvm_data_req nvmd_req; + struct fw_control_ex *fw_control_context; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + nvmd_type = ioctl_payload->minor_function; + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; + + memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, + &ioctl_payload->func_specific, + ioctl_payload->wr_length); + memset(&nvmd_req, 0, sizeof(nvmd_req)); + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) { + kfree(fw_control_context); + return -SAS_QUEUE_FULL; + } + ccb->fw_control_context = fw_control_context; + + nvmd_req.tag = cpu_to_le32(ccb->ccb_tag); + switch (nvmd_type) { + case TWI_DEVICE: { + u32 twi_addr, twi_page_size; + twi_addr = 0xa8; + twi_page_size = 2; + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | + twi_page_size << 8 | TWI_DEVICE); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case C_SEEPROM: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + case VPD_FLASH: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + case EXPAN_ROM: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + default: + break; + } + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req, + sizeof(nvmd_req), 0); + if (rc) { + kfree(fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + } + return rc; +} + +/** + * pm8001_chip_fw_flash_update_build - support the firmware update operation + * @pm8001_ha: our hba card information. + * @fw_flash_updata_info: firmware flash update param + * @tag: Tag to apply to the payload + */ +int +pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag) +{ + struct fw_flash_Update_req payload; + struct fw_flash_updata_info *info; + u32 opc = OPC_INB_FW_FLASH_UPDATE; + + memset(&payload, 0, sizeof(struct fw_flash_Update_req)); + info = fw_flash_updata_info; + payload.tag = cpu_to_le32(tag); + payload.cur_image_len = cpu_to_le32(info->cur_image_len); + payload.cur_image_offset = cpu_to_le32(info->cur_image_offset); + payload.total_image_len = cpu_to_le32(info->total_image_len); + payload.len = info->sgl.im_len.len ; + payload.sgl_addr_lo = + cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); + payload.sgl_addr_hi = + cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); +} + +int +pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + struct fw_flash_updata_info flash_update_info; + struct fw_control_info *fw_control; + struct fw_control_ex *fw_control_context; + int rc; + struct pm8001_ccb_info *ccb; + void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; + dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; + pm8001_dbg(pm8001_ha, DEVIO, + "dma fw_control context input length :%x\n", + fw_control->len); + memcpy(buffer, fw_control->buffer, fw_control->len); + flash_update_info.sgl.addr = cpu_to_le64(phys_addr); + flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); + flash_update_info.sgl.im_len.e = 0; + flash_update_info.cur_image_offset = fw_control->offset; + flash_update_info.cur_image_len = fw_control->len; + flash_update_info.total_image_len = fw_control->size; + fw_control_context->fw_control = fw_control; + fw_control_context->virtAddr = buffer; + fw_control_context->phys_addr = phys_addr; + fw_control_context->len = fw_control->len; + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) { + kfree(fw_control_context); + return -SAS_QUEUE_FULL; + } + ccb->fw_control_context = fw_control_context; + + rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, + ccb->ccb_tag); + if (rc) { + kfree(fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + } + + return rc; +} + +ssize_t +pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf) +{ + u32 value, rem, offset = 0, bar = 0; + u32 index, work_offset, dw_length; + u32 shift_value, gsm_base, gsm_dump_offset; + char *direct_data; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + direct_data = buf; + gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset; + + /* check max is 1 Mbytes */ + if ((length > 0x100000) || (gsm_dump_offset & 3) || + ((gsm_dump_offset + length) > 0x1000000)) + return -EINVAL; + + if (pm8001_ha->chip_id == chip_8001) + bar = 2; + else + bar = 1; + + work_offset = gsm_dump_offset & 0xFFFF0000; + offset = gsm_dump_offset & 0x0000FFFF; + gsm_dump_offset = work_offset; + /* adjust length to dword boundary */ + rem = length & 3; + dw_length = length >> 2; + + for (index = 0; index < dw_length; index++) { + if ((work_offset + offset) & 0xFFFF0000) { + if (pm8001_ha->chip_id == chip_8001) + shift_value = ((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK); + else + shift_value = (((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK) >> + SHIFT_REG_BIT_SHIFT); + + if (pm8001_ha->chip_id == chip_8001) { + gsm_base = GSM_BASE; + if (-1 == pm8001_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return -EIO; + } else { + gsm_base = 0; + if (-1 == pm80xx_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return -EIO; + } + gsm_dump_offset = (gsm_dump_offset + offset) & + 0xFFFF0000; + work_offset = 0; + offset = offset & 0x0000FFFF; + } + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + direct_data += sprintf(direct_data, "%08x ", value); + offset += 4; + } + if (rem != 0) { + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + /* xfr for non_dw */ + direct_data += sprintf(direct_data, "%08x ", value); + } + /* Shift back to BAR4 original address */ + if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) + return -EIO; + pm8001_ha->fatal_forensic_shift_offset += 1024; + + if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) + pm8001_ha->fatal_forensic_shift_offset = 0; + return direct_data - buf; +} + +int +pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state) +{ + struct set_dev_state_req payload; + struct pm8001_ccb_info *ccb; + int rc; + u32 opc = OPC_INB_SET_DEVICE_STATE; + + memset(&payload, 0, sizeof(payload)); + + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + payload.device_id = cpu_to_le32(pm8001_dev->device_id); + payload.nds = cpu_to_le32(state); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +static int +pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) +{ + struct sas_re_initialization_req payload; + struct pm8001_ccb_info *ccb; + int rc; + u32 opc = OPC_INB_SAS_RE_INITIALIZE; + + memset(&payload, 0, sizeof(payload)); + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + payload.SSAHOLT = cpu_to_le32(0xd << 25); + payload.sata_hol_tmo = cpu_to_le32(80); + payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +const struct pm8001_dispatch pm8001_8001_dispatch = { + .name = "pmc8001", + .chip_init = pm8001_chip_init, + .chip_post_init = pm8001_chip_post_init, + .chip_soft_rst = pm8001_chip_soft_rst, + .chip_rst = pm8001_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm8001_chip_isr, + .is_our_interrupt = pm8001_chip_is_our_interrupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm8001_chip_interrupt_enable, + .interrupt_disable = pm8001_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm8001_chip_smp_req, + .ssp_io_req = pm8001_chip_ssp_io_req, + .sata_req = pm8001_chip_sata_req, + .phy_start_req = pm8001_chip_phy_start_req, + .phy_stop_req = pm8001_chip_phy_stop_req, + .reg_dev_req = pm8001_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm8001_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, + .sas_re_init_req = pm8001_chip_sas_re_initialization, + .fatal_errors = pm80xx_fatal_errors, +}; diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h new file mode 100644 index 000000000..fc2127dcb --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -0,0 +1,1030 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include +#include + + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +#define OPC_INB_SSPINIEXTIOSTART 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +#define OPC_INB_SSPINIEDCIOSTART 12 /* 0x00C */ +#define OPC_INB_SSPINIEXTEDCIOSTART 13 /* 0x00D */ +#define OPC_INB_SSPTGTEDCIOSTART 14 /* 0x00E */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* SMP_RESPONSE is removed */ +#define OPC_INB_SMP_RESPONSE 19 /* 0x013 */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +#define OPC_INB_REG_DEV 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +#define OPC_INB_GET_DEV_INFO 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +#define OPC_INB_SAS_HW_EVENT_ACK 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +#define OPC_INB_SAS_RE_INITIALIZE 45 /* 0x02D */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_HW_EVENT 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_DEV_REGIST 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_DEV_HANDLE_ARRIV 16 /* 0x010 */ +/* SMP_RECEIVED Notification is removed */ +#define OPC_OUB_SMP_RECV_EVENT 17 /* 0x011 */ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_DEV_INFO 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_SAS_HW_EVENT_ACK 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_SAS_RE_INITIALIZE 41 /* 0x029 */ + +/* for phy start*/ +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x04 << 8) + +/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ +#define GSM_SM_BASE 0x4F0000 +struct mpi_msg_hdr{ + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (64 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; + u32 reserved[5]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (64 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +struct hw_event_resp { + __le32 lr_evt_status_phyid_portid; + __le32 evt_param; + __le32 npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ + +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ +struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[11]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ + +struct hw_event_ack_req { + __le32 tag; + __le32 sea_phyid_portid; + __le32 param0; + __le32 param1; + u32 reserved1[11]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET esponse + * use to indicate a SATA Completion (64 bytes) + */ + +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ + +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 _r_a[12]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req{ + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of sas_re_initialization + */ +struct sas_re_initialization_req { + + __le32 tag; + __le32 SSAHOLT;/* bit29-set max port; + ** bit28-set open reject cmd retries. + ** bit27-set open reject data retries. + ** bit26-set open reject option, remap:1 or not:0. + ** bit25-set sata head of line time out. + */ + __le32 reserved_maxPorts; + __le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16; + * data retries: bit15-bit0. + */ + __le32 sata_hol_tmo; + u32 reserved1[10]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 retfis_ncqtag_atap_dir_m; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; + u32 addr_low; + u32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; +} __attribute__((packed, aligned(4))); + + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ +struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1; +} __attribute__((packed, aligned(4))); + + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + __le32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1; +} __attribute__((packed, aligned(4))); + + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 + +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 +#define IO_FATAL_ERROR 0x51 + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x43 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPC_MSGU_CFG_TABLE_UPDATE 0x01/* Inbound doorbell bit0 */ +#define SPC_MSGU_CFG_TABLE_RESET 0x02/* Inbound doorbell bit1 */ +#define SPC_MSGU_CFG_TABLE_FREEZE 0x04/* Inbound doorbell bit2 */ +#define SPC_MSGU_CFG_TABLE_UNFREEZE 0x08/* Inbound doorbell bit4 */ +#define MSGU_IBDB_SET 0x04 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20/* RevB - Host not use */ +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x3C/* RevB */ +#define MSGU_ODCR 0x40/* RevB */ +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 +#define MSGU_ODMR 0x74/* RevB */ + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0/* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD1_POR 0x00 /* power on reset state */ +#define SCRATCH_PAD1_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD1_ERR 0x02 /* error state */ +#define SCRATCH_PAD1_RDY 0x03 /* ready state */ +#define SCRATCH_PAD1_RST 0x04 /* soft reset toggle flag */ +#define SCRATCH_PAD1_AAP1RDY_RST 0x08 /* AAP1 ready for soft reset */ +#define SCRATCH_PAD1_STATE_MASK 0xFFFFFFF0 /* ScratchPad1 + Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */ +#define SCRATCH_PAD1_RESERVED 0x000003F8 /* Scratch Pad1 + Reserved bit 3 to 9 */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW ready for soft reset flag*/ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC /* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00/* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04/* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08/* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C/* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10/* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14/* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18/* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C/* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20/* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24/* DWORD 0x09 */ +#define MAIN_OB_HW_EVENT_PID03_OFFSET 0x28/* DWORD 0x0A */ +#define MAIN_OB_HW_EVENT_PID47_OFFSET 0x2C/* DWORD 0x0B */ +#define MAIN_OB_NCQ_EVENT_PID03_OFFSET 0x30/* DWORD 0x0C */ +#define MAIN_OB_NCQ_EVENT_PID47_OFFSET 0x34/* DWORD 0x0D */ +#define MAIN_TITNX_EVENT_PID03_OFFSET 0x38/* DWORD 0x0E */ +#define MAIN_TITNX_EVENT_PID47_OFFSET 0x3C/* DWORD 0x0F */ +#define MAIN_OB_SSP_EVENT_PID03_OFFSET 0x40/* DWORD 0x10 */ +#define MAIN_OB_SSP_EVENT_PID47_OFFSET 0x44/* DWORD 0x11 */ +#define MAIN_OB_SMP_EVENT_PID03_OFFSET 0x48/* DWORD 0x12 */ +#define MAIN_OB_SMP_EVENT_PID47_OFFSET 0x4C/* DWORD 0x13 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50/* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54/* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58/* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C/* DWORD 0x17 */ +#define MAIN_IOP_EVENT_LOG_ADDR_HI 0x60/* DWORD 0x18 */ +#define MAIN_IOP_EVENT_LOG_ADDR_LO 0x64/* DWORD 0x19 */ +#define MAIN_IOP_EVENT_LOG_BUFF_SIZE 0x68/* DWORD 0x1A */ +#define MAIN_IOP_EVENT_LOG_OPTION 0x6C/* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70/* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74/* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78/* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C/* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80/* DWORD 0x20 */ +#define MAIN_HDA_FLAGS_OFFSET 0x84/* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88/* DWORD 0x22 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +#define GST_PHYSTATE_OFFSET 0x18 +#define GST_PHYSTATE0_OFFSET 0x18 +#define GST_PHYSTATE1_OFFSET 0x1C +#define GST_PHYSTATE2_OFFSET 0x20 +#define GST_PHYSTATE3_OFFSET 0x24 +#define GST_PHYSTATE4_OFFSET 0x28 +#define GST_PHYSTATE5_OFFSET 0x2C +#define GST_PHYSTATE6_OFFSET 0x30 +#define GST_PHYSTATE7_OFFSET 0x34 +#define GST_RERRINFO_OFFSET 0x44 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit difination for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPC_IBW_AXI_TRANSLATION_LOW 0x003258 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#define GSM_BASE 0x4F0000 +#define SHIFT_REG_64K_MASK 0xffff0000 +#define SHIFT_REG_BIT_SHIFT 8 +#endif + diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c new file mode 100644 index 000000000..443a3176c --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -0,0 +1,1569 @@ +/* + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include +#include "pm8001_sas.h" +#include "pm8001_chips.h" +#include "pm80xx_hwi.h" + +static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING | + PM8001_EVENT_LOGGING | PM8001_INIT_LOGGING; +module_param(logging_level, ulong, 0644); +MODULE_PARM_DESC(logging_level, " bits for enabling logging info."); + +static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120; +module_param(link_rate, ulong, 0644); +MODULE_PARM_DESC(link_rate, "Enable link rate.\n" + " 1: Link rate 1.5G\n" + " 2: Link rate 3.0G\n" + " 4: Link rate 6.0G\n" + " 8: Link rate 12.0G\n"); + +static struct scsi_transport_template *pm8001_stt; +static int pm8001_init_ccb_tag(struct pm8001_hba_info *); + +/* + * chip info structure to identify chip key functionality as + * encryption available/not, no of ports, hw specific function ref + */ +static const struct pm8001_chip_info pm8001_chips[] = { + [chip_8001] = {0, 8, &pm8001_8001_dispatch,}, + [chip_8008] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, + [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, + [chip_8074] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8076] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8077] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8006] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8070] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8072] = {0, 16, &pm8001_80xx_dispatch,}, +}; +static int pm8001_id; + +LIST_HEAD(hba_list); + +struct workqueue_struct *pm8001_wq; + +static void pm8001_map_queues(struct Scsi_Host *shost) +{ + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + + if (pm8001_ha->number_of_intr > 1) + blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); + + return blk_mq_map_queues(qmap); +} + +/* + * The main structure which LLDD must register for scsi core. + */ +static const struct scsi_host_template pm8001_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .proc_name = DRV_NAME, + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .scan_finished = pm8001_scan_finished, + .scan_start = pm8001_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = PM8001_MAX_DMA_SG, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .shost_groups = pm8001_host_groups, + .track_queue_depth = 1, + .cmd_per_lun = 32, + .map_queues = pm8001_map_queues, +}; + +/* + * Sas layer call this function to execute specific task. + */ +static struct sas_domain_function_template pm8001_transport_ops = { + .lldd_dev_found = pm8001_dev_found, + .lldd_dev_gone = pm8001_dev_gone, + + .lldd_execute_task = pm8001_queue_command, + .lldd_control_phy = pm8001_phy_control, + + .lldd_abort_task = pm8001_abort_task, + .lldd_abort_task_set = sas_abort_task_set, + .lldd_clear_task_set = pm8001_clear_task_set, + .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset, + .lldd_lu_reset = pm8001_lu_reset, + .lldd_query_task = pm8001_query_task, + .lldd_port_formed = pm8001_port_formed, + .lldd_tmf_exec_complete = pm8001_setds_completion, + .lldd_tmf_aborted = pm8001_tmf_aborted, +}; + +/** + * pm8001_phy_init - initiate our adapter phys + * @pm8001_ha: our hba structure. + * @phy_id: phy id. + */ +static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id) +{ + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + phy->phy_state = PHY_LINK_DISABLE; + phy->pm8001_ha = pm8001_ha; + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; + sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + sas_phy->id = phy_id; + sas_phy->sas_addr = (u8 *)&phy->dev_sas_addr; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata; + sas_phy->lldd_phy = phy; +} + +/** + * pm8001_free - free hba + * @pm8001_ha: our hba structure. + */ +static void pm8001_free(struct pm8001_hba_info *pm8001_ha) +{ + int i; + + if (!pm8001_ha) + return; + + for (i = 0; i < USI_MAX_MEMCNT; i++) { + if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { + dma_free_coherent(&pm8001_ha->pdev->dev, + (pm8001_ha->memoryMap.region[i].total_len + + pm8001_ha->memoryMap.region[i].alignment), + pm8001_ha->memoryMap.region[i].virt_ptr, + pm8001_ha->memoryMap.region[i].phys_addr); + } + } + PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); + flush_workqueue(pm8001_wq); + bitmap_free(pm8001_ha->rsvd_tags); + kfree(pm8001_ha); +} + +#ifdef PM8001_USE_TASKLET + +/** + * pm8001_tasklet() - tasklet for 64 msi-x interrupt handler + * @opaque: the passed general host adapter struct + * Note: pm8001_tasklet is common for pm8001 & pm80xx + */ +static void pm8001_tasklet(unsigned long opaque) +{ + struct pm8001_hba_info *pm8001_ha; + struct isr_param *irq_vector; + + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; + if (unlikely(!pm8001_ha)) + BUG_ON(1); + PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); +} +#endif + +/** + * pm8001_interrupt_handler_msix - main MSIX interrupt handler. + * It obtains the vector number and calls the equivalent bottom + * half or services directly. + * @irq: interrupt number + * @opaque: the passed outbound queue/vector. Host structure is + * retrieved from the same. + */ +static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) +{ + struct isr_param *irq_vector; + struct pm8001_hba_info *pm8001_ha; + irqreturn_t ret = IRQ_HANDLED; + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; + + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha)) + return IRQ_NONE; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); +#endif + return ret; +} + +/** + * pm8001_interrupt_handler_intx - main INTx interrupt handler. + * @irq: interrupt number + * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure. + */ + +static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) +{ + struct pm8001_hba_info *pm8001_ha; + irqreturn_t ret = IRQ_HANDLED; + struct sas_ha_struct *sha = dev_id; + pm8001_ha = sha->lldd_ha; + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha)) + return IRQ_NONE; + +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet[0]); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); +#endif + return ret; +} + +static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha); + +/** + * pm8001_alloc - initiate our hba structure and 6 DMAs area. + * @pm8001_ha: our hba structure. + * @ent: PCI device ID structure to match on + */ +static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, + const struct pci_device_id *ent) +{ + int i, count = 0, rc = 0; + u32 ci_offset, ib_offset, ob_offset, pi_offset; + struct inbound_queue_table *ibq; + struct outbound_queue_table *obq; + + spin_lock_init(&pm8001_ha->lock); + spin_lock_init(&pm8001_ha->bitmap_lock); + pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n", + pm8001_ha->chip->n_phy); + + /* Request Interrupt */ + rc = pm8001_request_irq(pm8001_ha); + if (rc) + goto err_out; + + count = pm8001_ha->max_q_num; + /* Queues are chosen based on the number of cores/msix availability */ + ib_offset = pm8001_ha->ib_offset = USI_MAX_MEMCNT_BASE; + ci_offset = pm8001_ha->ci_offset = ib_offset + count; + ob_offset = pm8001_ha->ob_offset = ci_offset + count; + pi_offset = pm8001_ha->pi_offset = ob_offset + count; + pm8001_ha->max_memcnt = pi_offset + count; + + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + pm8001_phy_init(pm8001_ha, i); + pm8001_ha->port[i].wide_port_phymap = 0; + pm8001_ha->port[i].port_attached = 0; + pm8001_ha->port[i].port_state = 0; + INIT_LIST_HEAD(&pm8001_ha->port[i].list); + } + + /* MPI Memory region 1 for AAP Event Log for fw */ + pm8001_ha->memoryMap.region[AAP1].num_elements = 1; + pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[AAP1].alignment = 32; + + /* MPI Memory region 2 for IOP Event Log for fw */ + pm8001_ha->memoryMap.region[IOP].num_elements = 1; + pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[IOP].alignment = 32; + + for (i = 0; i < count; i++) { + ibq = &pm8001_ha->inbnd_q_tbl[i]; + spin_lock_init(&ibq->iq_lock); + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[ci_offset+i].num_elements = 1; + pm8001_ha->memoryMap.region[ci_offset+i].element_size = 4; + pm8001_ha->memoryMap.region[ci_offset+i].total_len = 4; + pm8001_ha->memoryMap.region[ci_offset+i].alignment = 4; + + if ((ent->driver_data) != chip_8001) { + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[ib_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ib_offset+i].element_size + = 128; + pm8001_ha->memoryMap.region[ib_offset+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[ib_offset+i].alignment + = 128; + } else { + pm8001_ha->memoryMap.region[ib_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ib_offset+i].element_size + = 64; + pm8001_ha->memoryMap.region[ib_offset+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[ib_offset+i].alignment = 64; + } + } + + for (i = 0; i < count; i++) { + obq = &pm8001_ha->outbnd_q_tbl[i]; + spin_lock_init(&obq->oq_lock); + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[pi_offset+i].num_elements = 1; + pm8001_ha->memoryMap.region[pi_offset+i].element_size = 4; + pm8001_ha->memoryMap.region[pi_offset+i].total_len = 4; + pm8001_ha->memoryMap.region[pi_offset+i].alignment = 4; + + if (ent->driver_data != chip_8001) { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[ob_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ob_offset+i].element_size + = 128; + pm8001_ha->memoryMap.region[ob_offset+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[ob_offset+i].alignment + = 128; + } else { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[ob_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ob_offset+i].element_size + = 64; + pm8001_ha->memoryMap.region[ob_offset+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[ob_offset+i].alignment = 64; + } + + } + /* Memory region write DMA*/ + pm8001_ha->memoryMap.region[NVMD].num_elements = 1; + pm8001_ha->memoryMap.region[NVMD].element_size = 4096; + pm8001_ha->memoryMap.region[NVMD].total_len = 4096; + + /* Memory region for fw flash */ + pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + + pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1; + pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000; + for (i = 0; i < pm8001_ha->max_memcnt; i++) { + struct mpi_mem *region = &pm8001_ha->memoryMap.region[i]; + + if (pm8001_mem_alloc(pm8001_ha->pdev, + ®ion->virt_ptr, + ®ion->phys_addr, + ®ion->phys_addr_hi, + ®ion->phys_addr_lo, + region->total_len, + region->alignment) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "Mem%d alloc failed\n", i); + goto err_out; + } + } + + /* Memory region for devices*/ + pm8001_ha->devices = kzalloc(PM8001_MAX_DEVICES + * sizeof(struct pm8001_device), GFP_KERNEL); + if (!pm8001_ha->devices) { + rc = -ENOMEM; + goto err_out_nodev; + } + for (i = 0; i < PM8001_MAX_DEVICES; i++) { + pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; + pm8001_ha->devices[i].id = i; + pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; + atomic_set(&pm8001_ha->devices[i].running_req, 0); + } + pm8001_ha->flags = PM8001F_INIT_TIME; + return 0; + +err_out_nodev: + for (i = 0; i < pm8001_ha->max_memcnt; i++) { + if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { + dma_free_coherent(&pm8001_ha->pdev->dev, + (pm8001_ha->memoryMap.region[i].total_len + + pm8001_ha->memoryMap.region[i].alignment), + pm8001_ha->memoryMap.region[i].virt_ptr, + pm8001_ha->memoryMap.region[i].phys_addr); + } + } +err_out: + return 1; +} + +/** + * pm8001_ioremap - remap the pci high physical address to kernel virtual + * address so that we can access them. + * @pm8001_ha: our hba structure. + */ +static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) +{ + u32 bar; + u32 logicalBar = 0; + struct pci_dev *pdev; + + pdev = pm8001_ha->pdev; + /* map pci mem (PMC pci base 0-3)*/ + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + /* + ** logical BARs for SPC: + ** bar 0 and 1 - logical BAR0 + ** bar 2 and 3 - logical BAR1 + ** bar4 - logical BAR2 + ** bar5 - logical BAR3 + ** Skip the appropriate assignments: + */ + if ((bar == 1) || (bar == 3)) + continue; + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { + pm8001_ha->io_mem[logicalBar].membase = + pci_resource_start(pdev, bar); + pm8001_ha->io_mem[logicalBar].memsize = + pci_resource_len(pdev, bar); + pm8001_ha->io_mem[logicalBar].memvirtaddr = + ioremap(pm8001_ha->io_mem[logicalBar].membase, + pm8001_ha->io_mem[logicalBar].memsize); + if (!pm8001_ha->io_mem[logicalBar].memvirtaddr) { + pm8001_dbg(pm8001_ha, INIT, + "Failed to ioremap bar %d, logicalBar %d", + bar, logicalBar); + return -ENOMEM; + } + pm8001_dbg(pm8001_ha, INIT, + "base addr %llx virt_addr=%llx len=%d\n", + (u64)pm8001_ha->io_mem[logicalBar].membase, + (u64)(unsigned long) + pm8001_ha->io_mem[logicalBar].memvirtaddr, + pm8001_ha->io_mem[logicalBar].memsize); + } else { + pm8001_ha->io_mem[logicalBar].membase = 0; + pm8001_ha->io_mem[logicalBar].memsize = 0; + pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL; + } + logicalBar++; + } + return 0; +} + +/** + * pm8001_pci_alloc - initialize our ha card structure + * @pdev: pci device. + * @ent: ent + * @shost: scsi host struct which has been initialized before. + */ +static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct Scsi_Host *shost) + +{ + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + int j; + + pm8001_ha = sha->lldd_ha; + if (!pm8001_ha) + return NULL; + + pm8001_ha->pdev = pdev; + pm8001_ha->dev = &pdev->dev; + pm8001_ha->chip_id = ent->driver_data; + pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; + pm8001_ha->irq = pdev->irq; + pm8001_ha->sas = sha; + pm8001_ha->shost = shost; + pm8001_ha->id = pm8001_id++; + pm8001_ha->logging_level = logging_level; + pm8001_ha->non_fatal_count = 0; + if (link_rate >= 1 && link_rate <= 15) + pm8001_ha->link_rate = (link_rate << 8); + else { + pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 | + LINKRATE_60 | LINKRATE_120; + pm8001_dbg(pm8001_ha, FAIL, + "Setting link rate to default value\n"); + } + sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); + /* IOMB size is 128 for 8088/89 controllers */ + if (pm8001_ha->chip_id != chip_8001) + pm8001_ha->iomb_size = IOMB_SIZE_SPCV; + else + pm8001_ha->iomb_size = IOMB_SIZE_SPC; + +#ifdef PM8001_USE_TASKLET + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap || !pci_msi_enabled()) + || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); +#endif + if (pm8001_ioremap(pm8001_ha)) + goto failed_pci_alloc; + if (!pm8001_alloc(pm8001_ha, ent)) + return pm8001_ha; +failed_pci_alloc: + pm8001_free(pm8001_ha); + return NULL; +} + +/** + * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit + * @pdev: pci device. + */ +static int pci_go_44(struct pci_dev *pdev) +{ + int rc; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (rc) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + } + return rc; +} + +/** + * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them. + * @shost: scsi host which has been allocated outside. + * @chip_info: our ha struct. + */ +static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost, + const struct pm8001_chip_info *chip_info) +{ + int phy_nr, port_nr; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + phy_nr = chip_info->n_phy; + port_nr = phy_nr; + memset(sha, 0x00, sizeof(*sha)); + arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy) + goto exit; + arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_port) + goto exit_free2; + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL); + if (!sha->lldd_ha) + goto exit_free1; + + shost->transportt = pm8001_stt; + shost->max_id = PM8001_MAX_DEVICES; + shost->unique_id = pm8001_id; + shost->max_cmd_len = 16; + return 0; +exit_free1: + kfree(arr_port); +exit_free2: + kfree(arr_phy); +exit: + return -1; +} + +/** + * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas + * @shost: scsi host which has been allocated outside + * @chip_info: our ha struct. + */ +static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, + const struct pm8001_chip_info *chip_info) +{ + int i = 0; + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + pm8001_ha = sha->lldd_ha; + for (i = 0; i < chip_info->n_phy; i++) { + sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy; + sha->sas_port[i] = &pm8001_ha->port[i].sas_port; + sha->sas_phy[i]->sas_addr = + (u8 *)&pm8001_ha->phy[i].dev_sas_addr; + } + sha->sas_ha_name = DRV_NAME; + sha->dev = pm8001_ha->dev; + sha->strict_wide_ports = 1; + sha->sas_addr = &pm8001_ha->sas_addr[0]; + sha->num_phys = chip_info->n_phy; + sha->shost = shost; +} + +/** + * pm8001_init_sas_add - initialize sas address + * @pm8001_ha: our ha struct. + * + * Currently we just set the fixed SAS address to our HBA, for manufacture, + * it should read from the EEPROM + */ +static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) +{ + u8 i, j; + u8 sas_add[8]; +#ifdef PM8001_READ_VPD + /* For new SPC controllers WWN is stored in flash vpd + * For SPC/SPCve controllers WWN is stored in EEPROM + * For Older SPC WWN is stored in NVMD + */ + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + u16 deviceid; + int rc; + unsigned long time_remaining; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n"); + return -EIO; + } + + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + pm8001_ha->nvmd_completion = &completion; + + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081 || deviceid == 0x0042) { + payload.minor_function = 4; + payload.rd_length = 4096; + } else { + payload.minor_function = 0; + payload.rd_length = 128; + } + } else if ((pm8001_ha->chip_id == chip_8070 || + pm8001_ha->chip_id == chip_8072) && + pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) { + payload.minor_function = 4; + payload.rd_length = 4096; + } else { + payload.minor_function = 1; + payload.rd_length = 4096; + } + payload.offset = 0; + payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL); + if (!payload.func_specific) { + pm8001_dbg(pm8001_ha, FAIL, "mem alloc fail\n"); + return -ENOMEM; + } + rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + if (rc) { + kfree(payload.func_specific); + pm8001_dbg(pm8001_ha, FAIL, "nvmd failed\n"); + return -EIO; + } + time_remaining = wait_for_completion_timeout(&completion, + msecs_to_jiffies(60*1000)); // 1 min + if (!time_remaining) { + kfree(payload.func_specific); + pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n"); + return -EIO; + } + + + for (i = 0, j = 0; i <= 7; i++, j++) { + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x704 + i]; + else if (deviceid == 0x0042) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x010 + i]; + } else if ((pm8001_ha->chip_id == chip_8070 || + pm8001_ha->chip_id == chip_8072) && + pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) { + pm8001_ha->sas_addr[j] = + payload.func_specific[0x010 + i]; + } else + pm8001_ha->sas_addr[j] = + payload.func_specific[0x804 + i]; + } + memcpy(sas_add, pm8001_ha->sas_addr, SAS_ADDR_SIZE); + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + if (i && ((i % 4) == 0)) + sas_add[7] = sas_add[7] + 4; + memcpy(&pm8001_ha->phy[i].dev_sas_addr, + sas_add, SAS_ADDR_SIZE); + pm8001_dbg(pm8001_ha, INIT, "phy %d sas_addr = %016llx\n", i, + pm8001_ha->phy[i].dev_sas_addr); + } + kfree(payload.func_specific); +#else + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL; + pm8001_ha->phy[i].dev_sas_addr = + cpu_to_be64((u64) + (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); + } + memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr, + SAS_ADDR_SIZE); +#endif + return 0; +} + +/* + * pm8001_get_phy_settings_info : Read phy setting values. + * @pm8001_ha : our hba. + */ +static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +{ + +#ifdef PM8001_READ_VPD + /*OPTION ROM FLASH read for the SPC cards */ + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + int rc; + + pm8001_ha->nvmd_completion = &completion; + /* SAS ADDRESS read from flash / EEPROM */ + payload.minor_function = 6; + payload.offset = 0; + payload.rd_length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; + /* Read phy setting values from flash */ + rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + if (rc) { + kfree(payload.func_specific); + pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n"); + return -ENOMEM; + } + wait_for_completion(&completion); + pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); + kfree(payload.func_specific); +#endif + return 0; +} + +struct pm8001_mpi3_phy_pg_trx_config { + u32 LaneLosCfg; + u32 LanePgaCfg1; + u32 LanePisoCfg1; + u32 LanePisoCfg2; + u32 LanePisoCfg3; + u32 LanePisoCfg4; + u32 LanePisoCfg5; + u32 LanePisoCfg6; + u32 LaneBctCtrl; +}; + +/** + * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings + * @pm8001_ha : our adapter + * @phycfg : PHY config page to populate + */ +static +void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha, + struct pm8001_mpi3_phy_pg_trx_config *phycfg) +{ + phycfg->LaneLosCfg = 0x00000132; + phycfg->LanePgaCfg1 = 0x00203949; + phycfg->LanePisoCfg1 = 0x000000FF; + phycfg->LanePisoCfg2 = 0xFF000001; + phycfg->LanePisoCfg3 = 0xE7011300; + phycfg->LanePisoCfg4 = 0x631C40C0; + phycfg->LanePisoCfg5 = 0xF8102036; + phycfg->LanePisoCfg6 = 0xF74A1000; + phycfg->LaneBctCtrl = 0x00FB33F8; +} + +/** + * pm8001_get_external_phy_settings - Retrieves the external PHY settings + * @pm8001_ha : our adapter + * @phycfg : PHY config page to populate + */ +static +void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha, + struct pm8001_mpi3_phy_pg_trx_config *phycfg) +{ + phycfg->LaneLosCfg = 0x00000132; + phycfg->LanePgaCfg1 = 0x00203949; + phycfg->LanePisoCfg1 = 0x000000FF; + phycfg->LanePisoCfg2 = 0xFF000001; + phycfg->LanePisoCfg3 = 0xE7011300; + phycfg->LanePisoCfg4 = 0x63349140; + phycfg->LanePisoCfg5 = 0xF8102036; + phycfg->LanePisoCfg6 = 0xF80D9300; + phycfg->LaneBctCtrl = 0x00FB33F8; +} + +/** + * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext + * @pm8001_ha : our adapter + * @phymask : The PHY mask + */ +static +void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask) +{ + switch (pm8001_ha->pdev->subsystem_device) { + case 0x0070: /* H1280 - 8 external 0 internal */ + case 0x0072: /* H12F0 - 16 external 0 internal */ + *phymask = 0x0000; + break; + + case 0x0071: /* H1208 - 0 external 8 internal */ + case 0x0073: /* H120F - 0 external 16 internal */ + *phymask = 0xFFFF; + break; + + case 0x0080: /* H1244 - 4 external 4 internal */ + *phymask = 0x00F0; + break; + + case 0x0081: /* H1248 - 4 external 8 internal */ + *phymask = 0x0FF0; + break; + + case 0x0082: /* H1288 - 8 external 8 internal */ + *phymask = 0xFF00; + break; + + default: + pm8001_dbg(pm8001_ha, INIT, + "Unknown subsystem device=0x%.04x\n", + pm8001_ha->pdev->subsystem_device); + } +} + +/** + * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings + * @pm8001_ha : our adapter + */ +static +int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_mpi3_phy_pg_trx_config phycfg_int; + struct pm8001_mpi3_phy_pg_trx_config phycfg_ext; + int phymask = 0; + int i = 0; + + memset(&phycfg_int, 0, sizeof(phycfg_int)); + memset(&phycfg_ext, 0, sizeof(phycfg_ext)); + + pm8001_get_internal_phy_settings(pm8001_ha, &phycfg_int); + pm8001_get_external_phy_settings(pm8001_ha, &phycfg_ext); + pm8001_get_phy_mask(pm8001_ha, &phymask); + + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + if (phymask & (1 << i)) {/* Internal PHY */ + pm8001_set_phy_profile_single(pm8001_ha, i, + sizeof(phycfg_int) / sizeof(u32), + (u32 *)&phycfg_int); + + } else { /* External PHY */ + pm8001_set_phy_profile_single(pm8001_ha, i, + sizeof(phycfg_ext) / sizeof(u32), + (u32 *)&phycfg_ext); + } + } + + return 0; +} + +/** + * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID. + * @pm8001_ha : our hba. + */ +static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha) +{ + switch (pm8001_ha->pdev->subsystem_vendor) { + case PCI_VENDOR_ID_ATTO: + if (pm8001_ha->pdev->device == 0x0042) /* 6Gb */ + return 0; + else + return pm8001_set_phy_settings_ven_117c_12G(pm8001_ha); + + case PCI_VENDOR_ID_ADAPTEC2: + case 0: + return 0; + + default: + return pm8001_get_phy_settings_info(pm8001_ha); + } +} + +#ifdef PM8001_USE_MSIX +/** + * pm8001_setup_msix - enable MSI-X interrupt + * @pm8001_ha: our ha struct. + */ +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) +{ + unsigned int allocated_irq_vectors; + int rc; + + /* SPCv controllers supports 64 msi-x */ + if (pm8001_ha->chip_id == chip_8001) { + rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1, + PCI_IRQ_MSIX); + } else { + /* + * Queue index #0 is used always for housekeeping, so don't + * include in the affinity spreading. + */ + struct irq_affinity desc = { + .pre_vectors = 1, + }; + rc = pci_alloc_irq_vectors_affinity( + pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); + } + + allocated_irq_vectors = rc; + if (rc < 0) + return rc; + + /* Assigns the number of interrupts */ + pm8001_ha->number_of_intr = allocated_irq_vectors; + + /* Maximum queue number updating in HBA structure */ + pm8001_ha->max_q_num = allocated_irq_vectors; + + pm8001_dbg(pm8001_ha, INIT, + "pci_alloc_irq_vectors request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr); + return 0; +} + +static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha) +{ + u32 i = 0, j = 0; + int flag = 0, rc = 0; + int nr_irqs = pm8001_ha->number_of_intr; + + if (pm8001_ha->chip_id != chip_8001) + flag &= ~IRQF_SHARED; + + pm8001_dbg(pm8001_ha, INIT, + "pci_enable_msix request number of intr %d\n", + pm8001_ha->number_of_intr); + + if (nr_irqs > ARRAY_SIZE(pm8001_ha->intr_drvname)) + nr_irqs = ARRAY_SIZE(pm8001_ha->intr_drvname); + + for (i = 0; i < nr_irqs; i++) { + snprintf(pm8001_ha->intr_drvname[i], + sizeof(pm8001_ha->intr_drvname[0]), + "%s-%d", pm8001_ha->name, i); + pm8001_ha->irq_vector[i].irq_id = i; + pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; + + rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i), + pm8001_interrupt_handler_msix, flag, + pm8001_ha->intr_drvname[i], + &(pm8001_ha->irq_vector[i])); + if (rc) { + for (j = 0; j < i; j++) { + free_irq(pci_irq_vector(pm8001_ha->pdev, i), + &(pm8001_ha->irq_vector[i])); + } + pci_free_irq_vectors(pm8001_ha->pdev); + break; + } + } + + return rc; +} +#endif + +/** + * pm8001_request_irq - register interrupt + * @pm8001_ha: our ha struct. + */ +static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) +{ + struct pci_dev *pdev = pm8001_ha->pdev; +#ifdef PM8001_USE_MSIX + int rc; + + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { + rc = pm8001_setup_msix(pm8001_ha); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "pm8001_setup_irq failed [ret: %d]\n", rc); + return rc; + } + + if (pdev->msix_cap && pci_msi_enabled()) + return pm8001_request_msix(pm8001_ha); + } + + pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); +#endif + + /* initialize the INT-X interrupt */ + pm8001_ha->irq_vector[0].irq_id = 0; + pm8001_ha->irq_vector[0].drv_inst = pm8001_ha; + + return request_irq(pdev->irq, pm8001_interrupt_handler_intx, + IRQF_SHARED, pm8001_ha->name, + SHOST_TO_SAS_HA(pm8001_ha->shost)); +} + +/** + * pm8001_pci_probe - probe supported device + * @pdev: pci device which kernel has been prepared for. + * @ent: pci device id + * + * This function is the main initialization function, when register a new + * pci driver it is invoked, all struct and hardware initialization should be + * done here, also, register interrupt. + */ +static int pm8001_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned int rc; + u32 pci_reg; + u8 i = 0; + struct pm8001_hba_info *pm8001_ha; + struct Scsi_Host *shost = NULL; + const struct pm8001_chip_info *chip; + struct sas_ha_struct *sha; + + dev_printk(KERN_INFO, &pdev->dev, + "pm80xx: driver version %s\n", DRV_VERSION); + rc = pci_enable_device(pdev); + if (rc) + goto err_out_enable; + pci_set_master(pdev); + /* + * Enable pci slot busmaster by setting pci command register. + * This is required by FW for Cyclone card. + */ + + pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg); + pci_reg |= 0x157; + pci_write_config_dword(pdev, PCI_COMMAND, pci_reg); + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + rc = pci_go_44(pdev); + if (rc) + goto err_out_regions; + + shost = scsi_host_alloc(&pm8001_sht, sizeof(void *)); + if (!shost) { + rc = -ENOMEM; + goto err_out_regions; + } + chip = &pm8001_chips[ent->driver_data]; + sha = kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL); + if (!sha) { + rc = -ENOMEM; + goto err_out_free_host; + } + SHOST_TO_SAS_HA(shost) = sha; + + rc = pm8001_prep_sas_ha_init(shost, chip); + if (rc) { + rc = -ENOMEM; + goto err_out_free; + } + pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); + /* ent->driver variable is used to differentiate between controllers */ + pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); + if (!pm8001_ha) { + rc = -ENOMEM; + goto err_out_free; + } + + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "chip_init failed [ret: %d]\n", rc); + goto err_out_ha_free; + } + + rc = pm8001_init_ccb_tag(pm8001_ha); + if (rc) + goto err_out_enable; + + + PM8001_CHIP_DISP->chip_post_init(pm8001_ha); + + if (pm8001_ha->number_of_intr > 1) { + shost->nr_hw_queues = pm8001_ha->number_of_intr - 1; + /* + * For now, ensure we're not sent too many commands by setting + * host_tagset. This is also required if we start using request + * tag. + */ + shost->host_tagset = 1; + } + + rc = scsi_add_host(shost, &pdev->dev); + if (rc) + goto err_out_ha_free; + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + /* setup thermal configuration. */ + pm80xx_set_thermal_config(pm8001_ha); + } + + rc = pm8001_init_sas_add(pm8001_ha); + if (rc) + goto err_out_shost; + /* phy setting support for motherboard controller */ + rc = pm8001_configure_phy_settings(pm8001_ha); + if (rc) + goto err_out_shost; + + pm8001_post_sas_ha_init(shost, chip); + rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "sas_register_ha failed [ret: %d]\n", rc); + goto err_out_shost; + } + list_add_tail(&pm8001_ha->list, &hba_list); + pm8001_ha->flags = PM8001F_RUN_TIME; + scsi_scan_host(pm8001_ha->shost); + return 0; + +err_out_shost: + scsi_remove_host(pm8001_ha->shost); +err_out_ha_free: + pm8001_free(pm8001_ha); +err_out_free: + kfree(sha); +err_out_free_host: + scsi_host_put(shost); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_enable: + return rc; +} + +/** + * pm8001_init_ccb_tag - allocate memory to CCB and tag. + * @pm8001_ha: our hba card information. + */ +static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha) +{ + struct Scsi_Host *shost = pm8001_ha->shost; + struct device *dev = pm8001_ha->dev; + u32 max_out_io, ccb_count; + int i; + + max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io; + ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io); + + shost->can_queue = ccb_count - PM8001_RESERVE_SLOT; + + pm8001_ha->rsvd_tags = bitmap_zalloc(PM8001_RESERVE_SLOT, GFP_KERNEL); + if (!pm8001_ha->rsvd_tags) + goto err_out; + + /* Memory region for ccb_info*/ + pm8001_ha->ccb_count = ccb_count; + pm8001_ha->ccb_info = + kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL); + if (!pm8001_ha->ccb_info) { + pm8001_dbg(pm8001_ha, FAIL, + "Unable to allocate memory for ccb\n"); + goto err_out_noccb; + } + for (i = 0; i < ccb_count; i++) { + pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev, + sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG, + &pm8001_ha->ccb_info[i].ccb_dma_handle, + GFP_KERNEL); + if (!pm8001_ha->ccb_info[i].buf_prd) { + pm8001_dbg(pm8001_ha, FAIL, + "ccb prd memory allocation error\n"); + goto err_out; + } + pm8001_ha->ccb_info[i].task = NULL; + pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG; + pm8001_ha->ccb_info[i].device = NULL; + } + + return 0; + +err_out_noccb: + kfree(pm8001_ha->devices); +err_out: + return -ENOMEM; +} + +static void pm8001_pci_remove(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha; + int i, j; + pm8001_ha = sha->lldd_ha; + sas_unregister_ha(sha); + sas_remove_host(pm8001_ha->shost); + list_del(&pm8001_ha->list); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + +#ifdef PM8001_USE_MSIX + for (i = 0; i < pm8001_ha->number_of_intr; i++) + synchronize_irq(pci_irq_vector(pdev, i)); + for (i = 0; i < pm8001_ha->number_of_intr; i++) + free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]); + pci_free_irq_vectors(pdev); +#else + free_irq(pm8001_ha->irq, sha); +#endif +#ifdef PM8001_USE_TASKLET + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap || !pci_msi_enabled()) || + (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); +#endif + scsi_host_put(pm8001_ha->shost); + + for (i = 0; i < pm8001_ha->ccb_count; i++) { + dma_free_coherent(&pm8001_ha->pdev->dev, + sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG, + pm8001_ha->ccb_info[i].buf_prd, + pm8001_ha->ccb_info[i].ccb_dma_handle); + } + kfree(pm8001_ha->ccb_info); + kfree(pm8001_ha->devices); + + pm8001_free(pm8001_ha); + kfree(sha->sas_phy); + kfree(sha->sas_port); + kfree(sha); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +/** + * pm8001_pci_suspend - power management suspend main entry point + * @dev: Device struct + * + * Return: 0 on success, anything else on error. + */ +static int __maybe_unused pm8001_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int i, j; + sas_suspend_ha(sha); + flush_workqueue(pm8001_wq); + scsi_block_requests(pm8001_ha->shost); + if (!pdev->pm_cap) { + dev_err(dev, " PCI PM not supported\n"); + return -ENODEV; + } + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); +#ifdef PM8001_USE_MSIX + for (i = 0; i < pm8001_ha->number_of_intr; i++) + synchronize_irq(pci_irq_vector(pdev, i)); + for (i = 0; i < pm8001_ha->number_of_intr; i++) + free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]); + pci_free_irq_vectors(pdev); +#else + free_irq(pm8001_ha->irq, sha); +#endif +#ifdef PM8001_USE_TASKLET + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap || !pci_msi_enabled()) || + (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); +#endif + pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, entering " + "suspended state\n", pdev, + pm8001_ha->name); + return 0; +} + +/** + * pm8001_pci_resume - power management resume main entry point + * @dev: Device struct + * + * Return: 0 on success, anything else on error. + */ +static int __maybe_unused pm8001_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha; + int rc; + u8 i = 0, j; + DECLARE_COMPLETION_ONSTACK(completion); + + pm8001_ha = sha->lldd_ha; + + pm8001_info(pm8001_ha, + "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n", + pdev, pm8001_ha->name, pdev->current_state); + + rc = pci_go_44(pdev); + if (rc) + goto err_out_disable; + sas_prep_resume_ha(sha); + /* chip soft rst only for spc */ + if (pm8001_ha->chip_id == chip_8001) { + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + pm8001_dbg(pm8001_ha, INIT, "chip soft reset successful\n"); + } + rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); + if (rc) + goto err_out_disable; + + /* disable all the interrupt bits */ + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + + rc = pm8001_request_irq(pm8001_ha); + if (rc) + goto err_out_disable; +#ifdef PM8001_USE_TASKLET + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap || !pci_msi_enabled()) || + (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); +#endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + } + + /* Chip documentation for the 8070 and 8072 SPCv */ + /* states that a 500ms minimum delay is required */ + /* before issuing commands. Otherwise, the firmware */ + /* will enter an unrecoverable state. */ + + if (pm8001_ha->chip_id == chip_8070 || + pm8001_ha->chip_id == chip_8072) { + mdelay(500); + } + + /* Spin up the PHYs */ + + pm8001_ha->flags = PM8001F_RUN_TIME; + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + pm8001_ha->phy[i].enable_completion = &completion; + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); + wait_for_completion(&completion); + } + sas_resume_ha(sha); + return 0; + +err_out_disable: + scsi_remove_host(pm8001_ha->shost); + + return rc; +} + +/* update of pci device, vendor id and driver data with + * unique value for each of the controller + */ +static struct pci_device_id pm8001_pci_table[] = { + { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 }, + { PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 }, + { PCI_VDEVICE(ATTO, 0x0042), chip_8001 }, + /* Support for SPC/SPCv/SPCve controllers */ + { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, + { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 }, + { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 }, + { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 }, + { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 }, + { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, + { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, + { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 }, + { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 }, + { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 }, + { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 }, + { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 }, + { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ATTO, 0x8070, + PCI_VENDOR_ID_ATTO, 0x0070, 0, 0, chip_8070 }, + { PCI_VENDOR_ID_ATTO, 0x8070, + PCI_VENDOR_ID_ATTO, 0x0071, 0, 0, chip_8070 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0072, 0, 0, chip_8072 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0073, 0, 0, chip_8072 }, + { PCI_VENDOR_ID_ATTO, 0x8070, + PCI_VENDOR_ID_ATTO, 0x0080, 0, 0, chip_8070 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0081, 0, 0, chip_8072 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0082, 0, 0, chip_8072 }, + {} /* terminate list */ +}; + +static SIMPLE_DEV_PM_OPS(pm8001_pci_pm_ops, + pm8001_pci_suspend, + pm8001_pci_resume); + +static struct pci_driver pm8001_pci_driver = { + .name = DRV_NAME, + .id_table = pm8001_pci_table, + .probe = pm8001_pci_probe, + .remove = pm8001_pci_remove, + .driver.pm = &pm8001_pci_pm_ops, +}; + +/** + * pm8001_init - initialize scsi transport template + */ +static int __init pm8001_init(void) +{ + int rc = -ENOMEM; + + pm8001_wq = alloc_workqueue("pm80xx", 0, 0); + if (!pm8001_wq) + goto err; + + pm8001_id = 0; + pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops); + if (!pm8001_stt) + goto err_wq; + rc = pci_register_driver(&pm8001_pci_driver); + if (rc) + goto err_tp; + return 0; + +err_tp: + sas_release_transport(pm8001_stt); +err_wq: + destroy_workqueue(pm8001_wq); +err: + return rc; +} + +static void __exit pm8001_exit(void) +{ + pci_unregister_driver(&pm8001_pci_driver); + sas_release_transport(pm8001_stt); + destroy_workqueue(pm8001_wq); +} + +module_init(pm8001_init); +module_exit(pm8001_exit); + +MODULE_AUTHOR("Jack Wang "); +MODULE_AUTHOR("Anand Kumar Santhanam "); +MODULE_AUTHOR("Sangeetha Gnanasekaran "); +MODULE_AUTHOR("Nikith Ganigarakoppal "); +MODULE_DESCRIPTION( + "PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077/8070/8072 " + "SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pm8001_pci_table); + diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c new file mode 100644 index 000000000..a5a31dfa4 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -0,0 +1,1195 @@ +/* + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include +#include "pm8001_sas.h" +#include "pm80xx_tracepoints.h" + +/** + * pm8001_find_tag - from sas task to find out tag that belongs to this task + * @task: the task sent to the LLDD + * @tag: the found tag associated with the task + */ +static int pm8001_find_tag(struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct pm8001_ccb_info *ccb; + ccb = task->lldd_task; + *tag = ccb->ccb_tag; + return 1; + } + return 0; +} + +/** + * pm8001_tag_free - free the no more needed tag + * @pm8001_ha: our hba struct + * @tag: the found tag associated with the task + */ +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +{ + void *bitmap = pm8001_ha->rsvd_tags; + unsigned long flags; + + if (tag >= PM8001_RESERVE_SLOT) + return; + + spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); + __clear_bit(tag, bitmap); + spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); +} + +/** + * pm8001_tag_alloc - allocate a empty tag for task used. + * @pm8001_ha: our hba struct + * @tag_out: the found empty tag . + */ +int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) +{ + void *bitmap = pm8001_ha->rsvd_tags; + unsigned long flags; + unsigned int tag; + + spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); + tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT); + if (tag >= PM8001_RESERVE_SLOT) { + spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); + return -SAS_QUEUE_FULL; + } + __set_bit(tag, bitmap); + spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); + + /* reserved tags are in the lower region of the tagset */ + *tag_out = tag; + return 0; +} + +/** + * pm8001_mem_alloc - allocate memory for pm8001. + * @pdev: pci device. + * @virt_addr: the allocated virtual address + * @pphys_addr: DMA address for this device + * @pphys_addr_hi: the physical address high byte address. + * @pphys_addr_lo: the physical address low byte address. + * @mem_size: memory size. + * @align: requested byte alignment + */ +int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, + dma_addr_t *pphys_addr, u32 *pphys_addr_hi, + u32 *pphys_addr_lo, u32 mem_size, u32 align) +{ + caddr_t mem_virt_alloc; + dma_addr_t mem_dma_handle; + u64 phys_align; + u64 align_offset = 0; + if (align) + align_offset = (dma_addr_t)align - 1; + mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, + &mem_dma_handle, GFP_KERNEL); + if (!mem_virt_alloc) + return -ENOMEM; + *pphys_addr = mem_dma_handle; + phys_align = (*pphys_addr + align_offset) & ~align_offset; + *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; + *pphys_addr_hi = upper_32_bits(phys_align); + *pphys_addr_lo = lower_32_bits(phys_align); + return 0; +} + +/** + * pm8001_find_ha_by_dev - from domain device which come from sas layer to + * find out our hba struct. + * @dev: the domain device which from sas layer. + */ +static +struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) +{ + struct sas_ha_struct *sha = dev->port->ha; + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + return pm8001_ha; +} + +/** + * pm8001_phy_control - this function should be registered to + * sas_domain_function_template to provide libsas used, note: this is just + * control the HBA phy rather than other expander phy if you want control + * other phy, you should use SMP command. + * @sas_phy: which phy in HBA phys. + * @func: the operation. + * @funcdata: always NULL. + */ +int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + int rc = 0, phy_id = sas_phy->id; + struct pm8001_hba_info *pm8001_ha = NULL; + struct sas_phy_linkrates *rates; + struct pm8001_phy *phy; + DECLARE_COMPLETION_ONSTACK(completion); + unsigned long flags; + pm8001_ha = sas_phy->ha->lldd_ha; + phy = &pm8001_ha->phy[phy_id]; + pm8001_ha->phy[phy_id].enable_completion = &completion; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + /* + * If the controller is in fatal error state, + * we will not get a response from the controller + */ + pm8001_dbg(pm8001_ha, FAIL, + "Phy control failed due to fatal errors\n"); + return -EFAULT; + } + + switch (func) { + case PHY_FUNC_SET_LINK_RATE: + rates = funcdata; + if (rates->minimum_linkrate) { + pm8001_ha->phy[phy_id].minimum_linkrate = + rates->minimum_linkrate; + } + if (rates->maximum_linkrate) { + pm8001_ha->phy[phy_id].maximum_linkrate = + rates->maximum_linkrate; + } + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_HARD_RESET: + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_HARD_RESET); + break; + case PHY_FUNC_LINK_RESET: + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_RELEASE_SPINUP_HOLD: + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_DISABLE: + if (pm8001_ha->chip_id != chip_8001) { + if (pm8001_ha->phy[phy_id].phy_state == + PHY_STATE_LINK_UP_SPCV) { + sas_phy_disconnected(&phy->sas_phy); + sas_notify_phy_event(&phy->sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); + phy->phy_attached = 0; + } + } else { + if (pm8001_ha->phy[phy_id].phy_state == + PHY_STATE_LINK_UP_SPC) { + sas_phy_disconnected(&phy->sas_phy); + sas_notify_phy_event(&phy->sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); + phy->phy_attached = 0; + } + } + PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); + break; + case PHY_FUNC_GET_EVENTS: + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, + (phy_id < 4) ? 0x30000 : 0x40000)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -EINVAL; + } + } + { + struct sas_phy *phy = sas_phy->phy; + u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr + + 0x1034 + (0x4000 * (phy_id & 3)); + + phy->invalid_dword_count = readl(qp); + phy->running_disparity_error_count = readl(&qp[1]); + phy->loss_of_dword_sync_count = readl(&qp[3]); + phy->phy_reset_problem_count = readl(&qp[4]); + } + if (pm8001_ha->chip_id == chip_8001) + pm8001_bar4_shift(pm8001_ha, 0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return 0; + default: + pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func); + rc = -EOPNOTSUPP; + } + msleep(300); + return rc; +} + +/** + * pm8001_scan_start - we should enable all HBA phys by sending the phy_start + * command to HBA. + * @shost: the scsi host data. + */ +void pm8001_scan_start(struct Scsi_Host *shost) +{ + int i; + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + DECLARE_COMPLETION_ONSTACK(completion); + pm8001_ha = sha->lldd_ha; + /* SAS_RE_INITIALIZATION not available in SPCv/ve */ + if (pm8001_ha->chip_id == chip_8001) + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); + for (i = 0; i < pm8001_ha->chip->n_phy; ++i) { + pm8001_ha->phy[i].enable_completion = &completion; + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); + wait_for_completion(&completion); + msleep(300); + } +} + +int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + sas_drain_work(ha); + return 1; +} + +/** + * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to smp task + */ +static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); +} + +u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc && ata_is_ncq(qc->tf.protocol)) { + *tag = qc->tag; + return 1; + } + + return 0; +} + +/** + * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to sata task + */ +static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); +} + +/** + * pm8001_task_prep_internal_abort - the dispatcher function, prepare data + * for internal abort task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to sata task + */ +static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb); +} + +/** + * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to TM + * @tmf: the task management IU + */ +static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf) +{ + return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); +} + +/** + * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to ssp task + */ +static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); +} + + /* Find the local port id that's attached to this device */ +static int sas_find_local_port_id(struct domain_device *dev) +{ + struct domain_device *pdev = dev->parent; + + /* Directly attached device */ + if (!pdev) + return dev->port->id; + while (pdev) { + struct domain_device *pdev_p = pdev->parent; + if (!pdev_p) + return pdev->port->id; + pdev = pdev->parent; + } + return 0; +} + +#define DEV_IS_GONE(pm8001_dev) \ + ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) + + +static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + enum sas_protocol task_proto = task->task_proto; + struct sas_tmf_task *tmf = task->tmf; + int is_tmf = !!tmf; + + switch (task_proto) { + case SAS_PROTOCOL_SMP: + return pm8001_task_prep_smp(pm8001_ha, ccb); + case SAS_PROTOCOL_SSP: + if (is_tmf) + return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf); + return pm8001_task_prep_ssp(pm8001_ha, ccb); + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + return pm8001_task_prep_ata(pm8001_ha, ccb); + case SAS_PROTOCOL_INTERNAL_ABORT: + return pm8001_task_prep_internal_abort(pm8001_ha, ccb); + default: + dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n", + task_proto); + } + + return -EINVAL; +} + +/** + * pm8001_queue_command - register for upper layer used, all IO commands sent + * to HBA are from this interface. + * @task: the task to be execute. + * @gfp_flags: gfp_flags + */ +int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) +{ + struct task_status_struct *ts = &task->task_status; + enum sas_protocol task_proto = task->task_proto; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + bool internal_abort = sas_is_internal_abort(task); + struct pm8001_hba_info *pm8001_ha; + struct pm8001_port *port = NULL; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 n_elem = 0; + int rc = 0; + + if (!internal_abort && !dev->port) { + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (dev->dev_type != SAS_SATA_DEV) + task->task_done(task); + return 0; + } + + pm8001_ha = pm8001_find_ha_by_dev(dev); + if (pm8001_ha->controller_fatal_error) { + ts->resp = SAS_TASK_UNDELIVERED; + task->task_done(task); + return 0; + } + + pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n"); + + spin_lock_irqsave(&pm8001_ha->lock, flags); + + pm8001_dev = dev->lldd_dev; + port = &pm8001_ha->port[sas_find_local_port_id(dev)]; + + if (!internal_abort && + (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) { + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (sas_protocol_ata(task_proto)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + task->task_done(task); + spin_lock_irqsave(&pm8001_ha->lock, flags); + } else { + task->task_done(task); + } + rc = -ENODEV; + goto err_out; + } + + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task); + if (!ccb) { + rc = -SAS_QUEUE_FULL; + goto err_out; + } + + if (!sas_protocol_ata(task_proto)) { + if (task->num_scatter) { + n_elem = dma_map_sg(pm8001_ha->dev, task->scatter, + task->num_scatter, task->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto err_out_ccb; + } + } + } else { + n_elem = task->num_scatter; + } + + task->lldd_task = ccb; + ccb->n_elem = n_elem; + + atomic_inc(&pm8001_dev->running_req); + + rc = pm8001_deliver_command(pm8001_ha, ccb); + if (rc) { + atomic_dec(&pm8001_dev->running_req); + if (!sas_protocol_ata(task_proto) && n_elem) + dma_unmap_sg(pm8001_ha->dev, task->scatter, + task->num_scatter, task->data_dir); +err_out_ccb: + pm8001_ccb_free(pm8001_ha, ccb); + +err_out: + pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc); + } + + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + + return rc; +} + +/** + * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to ssp task to free + */ +void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct ata_queued_cmd *qc; + struct pm8001_device *pm8001_dev; + + if (!task) + return; + + if (!sas_protocol_ata(task->task_proto) && ccb->n_elem) + dma_unmap_sg(pm8001_ha->dev, task->scatter, + task->num_scatter, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); + dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + + if (sas_protocol_ata(task->task_proto)) { + /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */ + qc = task->uldd_task; + pm8001_dev = ccb->device; + trace_pm80xx_request_complete(pm8001_ha->id, + pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS, + ccb->ccb_tag, 0 /* ctlr_opcode not known */, + qc ? qc->tf.command : 0, // ata opcode + pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1); + } + + task->lldd_task = NULL; + pm8001_ccb_free(pm8001_ha, ccb); +} + +/** + * pm8001_alloc_dev - find a empty pm8001_device + * @pm8001_ha: our hba card information + */ +static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { + pm8001_ha->devices[dev].id = dev; + return &pm8001_ha->devices[dev]; + } + } + if (dev == PM8001_MAX_DEVICES) { + pm8001_dbg(pm8001_ha, FAIL, + "max support %d devices, ignore ..\n", + PM8001_MAX_DEVICES); + } + return NULL; +} +/** + * pm8001_find_dev - find a matching pm8001_device + * @pm8001_ha: our hba card information + * @device_id: device ID to match against + */ +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].device_id == device_id) + return &pm8001_ha->devices[dev]; + } + if (dev == PM8001_MAX_DEVICES) { + pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n"); + } + return NULL; +} + +void pm8001_free_dev(struct pm8001_device *pm8001_dev) +{ + u32 id = pm8001_dev->id; + memset(pm8001_dev, 0, sizeof(*pm8001_dev)); + pm8001_dev->id = id; + pm8001_dev->dev_type = SAS_PHY_UNUSED; + pm8001_dev->device_id = PM8001_MAX_DEVICES; + pm8001_dev->sas_device = NULL; +} + +/** + * pm8001_dev_found_notify - libsas notify a device is found. + * @dev: the device structure which sas layer used. + * + * when libsas find a sas domain device, it should tell the LLDD that + * device is found, and then LLDD register this device to HBA firmware + * by the command "OPC_INB_REG_DEV", after that the HBA will assign a + * device ID(according to device's sas address) and returned it to LLDD. From + * now on, we communicate with HBA FW with the device ID which HBA assigned + * rather than sas address. it is the necessary step for our HBA but it is + * the optional for other HBA driver. + */ +static int pm8001_dev_found_notify(struct domain_device *dev) +{ + unsigned long flags = 0; + int res = 0; + struct pm8001_hba_info *pm8001_ha = NULL; + struct domain_device *parent_dev = dev->parent; + struct pm8001_device *pm8001_device; + DECLARE_COMPLETION_ONSTACK(completion); + u32 flag = 0; + pm8001_ha = pm8001_find_ha_by_dev(dev); + spin_lock_irqsave(&pm8001_ha->lock, flags); + + pm8001_device = pm8001_alloc_dev(pm8001_ha); + if (!pm8001_device) { + res = -1; + goto found_out; + } + pm8001_device->sas_device = dev; + dev->lldd_dev = pm8001_device; + pm8001_device->dev_type = dev->dev_type; + pm8001_device->dcompletion = &completion; + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { + int phy_id; + + phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); + if (phy_id < 0) { + pm8001_dbg(pm8001_ha, FAIL, + "Error: no attached dev:%016llx at ex:%016llx.\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + res = phy_id; + } else { + pm8001_device->attached_phy = phy_id; + } + } else { + if (dev->dev_type == SAS_SATA_DEV) { + pm8001_device->attached_phy = + dev->rphy->identify.phy_identifier; + flag = 1; /* directly sata */ + } + } /*register this device to HBA*/ + pm8001_dbg(pm8001_ha, DISC, "Found device\n"); + PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + wait_for_completion(&completion); + if (dev->dev_type == SAS_END_DEVICE) + msleep(50); + pm8001_ha->flags = PM8001F_RUN_TIME; + return 0; +found_out: + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return res; +} + +int pm8001_dev_found(struct domain_device *dev) +{ + return pm8001_dev_found_notify(dev); +} + +#define PM8001_TASK_TIMEOUT 20 + +/** + * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" + * @dev: the device structure which sas layer used. + */ +static void pm8001_dev_gone_notify(struct domain_device *dev) +{ + unsigned long flags = 0; + struct pm8001_hba_info *pm8001_ha; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + + pm8001_ha = pm8001_find_ha_by_dev(dev); + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (pm8001_dev) { + u32 device_id = pm8001_dev->device_id; + + pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n", + pm8001_dev->device_id, pm8001_dev->dev_type); + if (atomic_read(&pm8001_dev->running_req)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + sas_execute_internal_abort_dev(dev, 0, NULL); + while (atomic_read(&pm8001_dev->running_req)) + msleep(20); + spin_lock_irqsave(&pm8001_ha->lock, flags); + } + PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_free_dev(pm8001_dev); + } else { + pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); + } + dev->lldd_dev = NULL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); +} + +void pm8001_dev_gone(struct domain_device *dev) +{ + pm8001_dev_gone_notify(dev); +} + +/* retry commands by ha, by task and/or by device */ +void pm8001_open_reject_retry( + struct pm8001_hba_info *pm8001_ha, + struct sas_task *task_to_close, + struct pm8001_device *device_to_close) +{ + int i; + unsigned long flags; + + if (pm8001_ha == NULL) + return; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + + for (i = 0; i < PM8001_MAX_CCB; i++) { + struct sas_task *task; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + unsigned long flags1; + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; + + if (ccb->ccb_tag == PM8001_INVALID_TAG) + continue; + + pm8001_dev = ccb->device; + if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) + continue; + if (!device_to_close) { + uintptr_t d = (uintptr_t)pm8001_dev + - (uintptr_t)&pm8001_ha->devices; + if (((d % sizeof(*pm8001_dev)) != 0) + || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES)) + continue; + } else if (pm8001_dev != device_to_close) + continue; + task = ccb->task; + if (!task || !task->task_done) + continue; + if (task_to_close && (task != task_to_close)) + continue; + ts = &task->task_status; + ts->resp = SAS_TASK_COMPLETE; + /* Force the midlayer to retry */ + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + spin_lock_irqsave(&task->task_state_lock, flags1); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags + & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags1); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&task->task_state_lock, + flags1); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + task->task_done(task); + spin_lock_irqsave(&pm8001_ha->lock, flags); + } + } + + spin_unlock_irqrestore(&pm8001_ha->lock, flags); +} + +/** + * pm8001_I_T_nexus_reset() - reset the initiator/target connection + * @dev: the device structure for the device to reset. + * + * Standard mandates link reset for ATA (type 0) and hard reset for + * SSP (type 1), only for RECOVERY + */ +int pm8001_I_T_nexus_reset(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + + if (!dev || !dev->lldd_dev) + return -ENODEV; + + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + rc = sas_phy_reset(phy, 1); + if (rc) { + pm8001_dbg(pm8001_ha, EH, + "phy reset failed for device %x\n" + "with rc %d\n", pm8001_dev->device_id, rc); + rc = TMF_RESP_FUNC_FAILED; + goto out; + } + msleep(2000); + rc = sas_execute_internal_abort_dev(dev, 0, NULL); + if (rc) { + pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n" + "with rc %d\n", pm8001_dev->device_id, rc); + rc = TMF_RESP_FUNC_FAILED; + } + } else { + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc); + out: + sas_put_local_phy(phy); + return rc; +} + +/* +* This function handle the IT_NEXUS_XXX event or completion +* status code for SSP/SATA/SMP I/O request. +*/ +int pm8001_I_T_nexus_event_handler(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + + pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n"); + + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + /* send internal ssp/sata/smp abort command to FW */ + sas_execute_internal_abort_dev(dev, 0, NULL); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + pm8001_dev->setds_completion = &completion_setstate; + + wait_for_completion(&completion_setstate); + } else { + /* send internal ssp/sata/smp abort command to FW */ + sas_execute_internal_abort_dev(dev, 0, NULL); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc); +out: + sas_put_local_phy(phy); + + return rc; +} +/* mandatory SAM-3, the task reset the specified LUN*/ +int pm8001_lu_reset(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + DECLARE_COMPLETION_ONSTACK(completion_setstate); + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + /* + * If the controller is in fatal error state, + * we will not get a response from the controller + */ + pm8001_dbg(pm8001_ha, FAIL, + "LUN reset failed due to fatal errors\n"); + return rc; + } + + if (dev_is_sata(dev)) { + struct sas_phy *phy = sas_get_local_phy(dev); + sas_execute_internal_abort_dev(dev, 0, NULL); + rc = sas_phy_reset(phy, 1); + sas_put_local_phy(phy); + pm8001_dev->setds_completion = &completion_setstate; + rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_OPERATIONAL); + wait_for_completion(&completion_setstate); + } else { + rc = sas_lu_reset(dev, lun); + } + /* If failed, fall-through I_T_Nexus reset */ + pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc); + return rc; +} + +/* optional SAM-3 */ +int pm8001_query_task(struct sas_task *task) +{ + u32 tag = 0xdeadbeef; + int rc = TMF_RESP_FUNC_FAILED; + if (unlikely(!task || !task->lldd_task || !task->dev)) + return rc; + + if (task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd *cmnd = task->uldd_task; + struct domain_device *dev = task->dev; + struct pm8001_hba_info *pm8001_ha = + pm8001_find_ha_by_dev(dev); + + rc = pm8001_find_tag(task, &tag); + if (rc == 0) { + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd); + + rc = sas_query_task(task, tag); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + pm8001_dbg(pm8001_ha, EH, + "The task is still in Lun\n"); + break; + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + pm8001_dbg(pm8001_ha, EH, + "The task is not in Lun or failed, reset the phy\n"); + break; + } + } + pr_err("pm80xx: rc= %d\n", rc); + return rc; +} + +/* mandatory SAM-3, still need free task/ccb info, abort the specified task */ +int pm8001_abort_task(struct sas_task *task) +{ + struct pm8001_ccb_info *ccb = task->lldd_task; + unsigned long flags; + u32 tag; + struct domain_device *dev ; + struct pm8001_hba_info *pm8001_ha; + struct pm8001_device *pm8001_dev; + int rc = TMF_RESP_FUNC_FAILED, ret; + u32 phy_id, port_id; + struct sas_task_slow slow_task; + + if (!task->lldd_task || !task->dev) + return TMF_RESP_FUNC_FAILED; + + dev = task->dev; + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + phy_id = pm8001_dev->attached_phy; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + // If the controller is seeing fatal errors + // abort task will not get a response from the controller + return TMF_RESP_FUNC_FAILED; + } + + ret = pm8001_find_tag(task, &tag); + if (ret == 0) { + pm8001_info(pm8001_ha, "no tag for task:%p\n", task); + return TMF_RESP_FUNC_FAILED; + } + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + return TMF_RESP_FUNC_COMPLETE; + } + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + if (task->slow_task == NULL) { + init_completion(&slow_task.completion); + task->slow_task = &slow_task; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (task->task_proto & SAS_PROTOCOL_SSP) { + rc = sas_abort_task(task, tag); + sas_execute_internal_abort_single(dev, tag, 0, NULL); + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + if (pm8001_ha->chip_id == chip_8006) { + DECLARE_COMPLETION_ONSTACK(completion_reset); + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_phy *phy = pm8001_ha->phy + phy_id; + port_id = phy->port->port_id; + + /* 1. Set Device state as Recovery */ + pm8001_dev->setds_completion = &completion; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_IN_RECOVERY); + wait_for_completion(&completion); + + /* 2. Send Phy Control Hard Reset */ + reinit_completion(&completion); + phy->port_reset_status = PORT_RESET_TMO; + phy->reset_success = false; + phy->enable_completion = &completion; + phy->reset_completion = &completion_reset; + ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_HARD_RESET); + if (ret) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + goto out; + } + + /* In the case of the reset timeout/fail we still + * abort the command at the firmware. The assumption + * here is that the drive is off doing something so + * that it's not processing requests, and we want to + * avoid getting a completion for this and either + * leaking the task in libsas or losing the race and + * getting a double free. + */ + pm8001_dbg(pm8001_ha, MSG, + "Waiting for local phy ctl\n"); + ret = wait_for_completion_timeout(&completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret || !phy->reset_success) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + } else { + /* 3. Wait for Port Reset complete or + * Port reset TMO + */ + pm8001_dbg(pm8001_ha, MSG, + "Waiting for Port reset\n"); + ret = wait_for_completion_timeout( + &completion_reset, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + phy->reset_completion = NULL; + WARN_ON(phy->port_reset_status == + PORT_RESET_TMO); + if (phy->port_reset_status == PORT_RESET_TMO) { + pm8001_dev_gone_notify(dev); + PM8001_CHIP_DISP->hw_event_ack_req( + pm8001_ha, 0, + 0x07, /*HW_EVENT_PHY_DOWN ack*/ + port_id, phy_id, 0, 0); + goto out; + } + } + + /* + * 4. SATA Abort ALL + * we wait for the task to be aborted so that the task + * is removed from the ccb. on success the caller is + * going to free the task. + */ + ret = sas_execute_internal_abort_dev(dev, 0, NULL); + if (ret) + goto out; + ret = wait_for_completion_timeout( + &task->slow_task->completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + goto out; + + /* 5. Set Device State as Operational */ + reinit_completion(&completion); + pm8001_dev->setds_completion = &completion; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_OPERATIONAL); + wait_for_completion(&completion); + } else { + /* + * Ensure that if we see a completion for the ccb + * associated with the task which we are trying to + * abort then we should not touch the sas_task as it + * may race with libsas freeing it when return here. + */ + ccb->task = NULL; + ret = sas_execute_internal_abort_single(dev, tag, 0, NULL); + } + rc = TMF_RESP_FUNC_COMPLETE; + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + /* SMP */ + rc = sas_execute_internal_abort_single(dev, tag, 0, NULL); + + } +out: + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->slow_task == &slow_task) + task->slow_task = NULL; + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (rc != TMF_RESP_FUNC_COMPLETE) + pm8001_info(pm8001_ha, "rc= %d\n", rc); + return rc; +} + +int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) +{ + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + + pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n", + pm8001_dev->device_id); + return sas_clear_task_set(dev, lun); +} + +void pm8001_port_formed(struct asd_sas_phy *sas_phy) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha; + struct pm8001_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct pm8001_port *port = phy->port; + + if (!sas_port) { + pm8001_dbg(pm8001_ha, FAIL, "Received null port\n"); + return; + } + sas_port->lldd_port = port; +} + +void pm8001_setds_completion(struct domain_device *dev) +{ + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + struct pm8001_device *pm8001_dev = dev->lldd_dev; + DECLARE_COMPLETION_ONSTACK(completion_setstate); + + if (pm8001_ha->chip_id != chip_8001) { + pm8001_dev->setds_completion = &completion_setstate; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_OPERATIONAL); + wait_for_completion(&completion_setstate); + } +} + +void pm8001_tmf_aborted(struct sas_task *task) +{ + struct pm8001_ccb_info *ccb = task->lldd_task; + + if (ccb) + ccb->task = NULL; +} diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h new file mode 100644 index 000000000..2fadd353f --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -0,0 +1,794 @@ +/* + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_SAS_H_ +#define _PM8001_SAS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pm8001_defs.h" + +#define DRV_NAME "pm80xx" +#define DRV_VERSION "0.1.40" +#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ +#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ +#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ +#define PM8001_IO_LOGGING 0x08 /* I/O path logging */ +#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ +#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ +#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ +#define PM8001_DEV_LOGGING 0x80 /* development message logging */ +#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */ +#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */ +#define PM8001_EVENT_LOGGING 0x400 /* HW event logging */ + +#define pm8001_info(HBA, fmt, ...) \ + pr_info("%s:: %s %d: " fmt, \ + (HBA)->name, __func__, __LINE__, ##__VA_ARGS__) + +#define pm8001_dbg(HBA, level, fmt, ...) \ +do { \ + if (unlikely((HBA)->logging_level & PM8001_##level##_LOGGING)) \ + pm8001_info(HBA, fmt, ##__VA_ARGS__); \ +} while (0) + +#define PM8001_USE_TASKLET +#define PM8001_USE_MSIX +#define PM8001_READ_VPD + + +#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \ + || (dev->device == 0X8076) \ + || (dev->device == 0X8077) \ + || (dev->device == 0X8070) \ + || (dev->device == 0X8072)) + +#define PM8001_NAME_LENGTH 32/* generic length of strings */ +extern struct list_head hba_list; +extern const struct pm8001_dispatch pm8001_8001_dispatch; +extern const struct pm8001_dispatch pm8001_80xx_dispatch; + +struct pm8001_hba_info; +struct pm8001_ccb_info; +struct pm8001_device; + +struct pm8001_ioctl_payload { + u32 signature; + u16 major_function; + u16 minor_function; + u16 status; + u16 offset; + u16 id; + u32 wr_length; + u32 rd_length; + u8 *func_specific; +}; + +#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF +#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24) +#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */ +#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */ +#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */ +#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */ +#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */ +#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */ +#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */ +#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */ +#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1 +#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3 +#define TYPE_GSM_SPACE 1 +#define TYPE_QUEUE 2 +#define TYPE_FATAL 3 +#define TYPE_NON_FATAL 4 +#define TYPE_INBOUND 1 +#define TYPE_OUTBOUND 2 +struct forensic_data { + u32 data_type; + union { + struct { + u32 direct_len; + u32 direct_offset; + void *direct_data; + } gsm_buf; + struct { + u16 queue_type; + u16 queue_index; + u32 direct_len; + void *direct_data; + } queue_buf; + struct { + u32 direct_len; + u32 direct_offset; + u32 read_len; + void *direct_data; + } data_buf; + }; +}; + +/* bit31-26 - mask bar */ +#define SCRATCH_PAD0_BAR_MASK 0xFC000000 +/* bit25-0 - offset mask */ +#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF +/* if AAP error state */ +#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF +/* Inbound doorbell bit7 */ +#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80 +/* Inbound doorbell bit7 SPCV */ +#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80 +#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */ + +struct pm8001_dispatch { + char *name; + int (*chip_init)(struct pm8001_hba_info *pm8001_ha); + void (*chip_post_init)(struct pm8001_hba_info *pm8001_ha); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha); + void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); + int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); + void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec); + u32 (*is_our_interrupt)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); + int (*smp_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*sata_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); + int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); + int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag); + int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id); + int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha, + u32 phy_id, u32 phy_op); + int (*task_abort)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf); + int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); + int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); + int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha, + void *payload); + int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); + int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha, + u32 state); + int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha, + u32 state); + int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha); + int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha); + void (*hw_event_ack_req)(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, + u32 param1); +}; + +struct pm8001_chip_info { + u32 encrypt; + u32 n_phy; + const struct pm8001_dispatch *dispatch; +}; +#define PM8001_CHIP_DISP (pm8001_ha->chip->dispatch) + +struct pm8001_port { + struct asd_sas_port sas_port; + u8 port_attached; + u16 wide_port_phymap; + u8 port_state; + u8 port_id; + struct list_head list; +}; + +struct pm8001_phy { + struct pm8001_hba_info *pm8001_ha; + struct pm8001_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + u64 dev_sas_addr; + u32 phy_type; + struct completion *enable_completion; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + u8 phy_state; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; + struct completion *reset_completion; + bool port_reset_status; + bool reset_success; +}; + +/* port reset status */ +#define PORT_RESET_SUCCESS 0x00 +#define PORT_RESET_TMO 0x01 + +struct pm8001_device { + enum sas_device_type dev_type; + struct domain_device *sas_device; + u32 attached_phy; + u32 id; + struct completion *dcompletion; + struct completion *setds_completion; + u32 device_id; + atomic_t running_req; +}; + +struct pm8001_prd_imt { + __le32 len; + __le32 e; +}; + +struct pm8001_prd { + __le64 addr; /* 64-bit buffer address */ + struct pm8001_prd_imt im_len; /* 64-bit length */ +} __attribute__ ((packed)); +/* + * CCB(Command Control Block) + */ +struct pm8001_ccb_info { + struct sas_task *task; + u32 n_elem; + u32 ccb_tag; + dma_addr_t ccb_dma_handle; + struct pm8001_device *device; + struct pm8001_prd *buf_prd; + struct fw_control_ex *fw_control_context; + u8 open_retry; +}; + +struct mpi_mem { + void *virt_ptr; + dma_addr_t phys_addr; + u32 phys_addr_hi; + u32 phys_addr_lo; + u32 total_len; + u32 num_elements; + u32 element_size; + u32 alignment; +}; + +struct mpi_mem_req { + /* The number of element in the mpiMemory array */ + u32 count; + /* The array of structures that define memroy regions*/ + struct mpi_mem region[USI_MAX_MEMCNT]; +}; + +struct encrypt { + u32 cipher_mode; + u32 sec_mode; + u32 status; + u32 flag; +}; + +struct sas_phy_attribute_table { + u32 phystart1_16[16]; + u32 outbound_hw_event_pid1_16[16]; +}; + +union main_cfg_table { + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 outbound_hw_event_pid0_3; + u32 outbound_hw_event_pid4_7; + u32 outbound_ncq_event_pid0_3; + u32 outbound_ncq_event_pid4_7; + u32 outbound_tgt_ITNexus_event_pid0_3; + u32 outbound_tgt_ITNexus_event_pid4_7; + u32 outbound_tgt_ssp_event_pid0_3; + u32 outbound_tgt_ssp_event_pid4_7; + u32 outbound_tgt_smp_event_pid0_3; + u32 outbound_tgt_smp_event_pid4_7; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_option; + u32 upper_iop_event_log_addr; + u32 lower_iop_event_log_addr; + u32 iop_event_log_size; + u32 iop_event_log_option; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 hda_mode_flag; + u32 anolog_setup_table_offset; + u32 rsvd[4]; + } pm8001_tbl; + + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 rsvd[8]; + u32 crc_core_dump; + u32 rsvd1; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_severity; + u32 upper_pcs_event_log_addr; + u32 lower_pcs_event_log_addr; + u32 pcs_event_log_size; + u32 pcs_event_log_severity; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 gpio_led_mapping; + u32 analog_setup_table_offset; + u32 int_vec_table_offset; + u32 phy_attr_table_offset; + u32 port_recovery_timer; + u32 interrupt_reassertion_delay; + u32 fatal_n_non_fatal_dump; /* 0x28 */ + u32 ila_version; + u32 inc_fw_version; + } pm80xx_tbl; +}; + +union general_status_table { + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd; + u32 phy_state[8]; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm8001_tbl; + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd[9]; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm80xx_tbl; +}; +struct inbound_queue_table { + u32 element_pri_size_cnt; + u32 upper_base_addr; + u32 lower_base_addr; + u32 ci_upper_base_addr; + u32 ci_lower_base_addr; + u32 pi_pci_bar; + u32 pi_offset; + u32 total_length; + void *base_virt; + void *ci_virt; + u32 reserved; + __le32 consumer_index; + u32 producer_idx; + spinlock_t iq_lock; +}; +struct outbound_queue_table { + u32 element_size_cnt; + u32 upper_base_addr; + u32 lower_base_addr; + void *base_virt; + u32 pi_upper_base_addr; + u32 pi_lower_base_addr; + u32 ci_pci_bar; + u32 ci_offset; + u32 total_length; + void *pi_virt; + u32 interrup_vec_cnt_delay; + u32 dinterrup_to_pci_offset; + __le32 producer_index; + u32 consumer_idx; + spinlock_t oq_lock; + unsigned long lock_flags; +}; +struct pm8001_hba_memspace { + void __iomem *memvirtaddr; + u64 membase; + u32 memsize; +}; +struct isr_param { + struct pm8001_hba_info *drv_inst; + u32 irq_id; +}; +struct pm8001_hba_info { + char name[PM8001_NAME_LENGTH]; + struct list_head list; + unsigned long flags; + spinlock_t lock;/* host-wide lock */ + spinlock_t bitmap_lock; + struct pci_dev *pdev;/* our device */ + struct device *dev; + struct pm8001_hba_memspace io_mem[6]; + struct mpi_mem_req memoryMap; + struct encrypt encrypt_info; /* support encryption */ + struct forensic_data forensic_info; + u32 fatal_bar_loc; + u32 forensic_last_offset; + u32 fatal_forensic_shift_offset; + u32 forensic_fatal_step; + u32 forensic_preserved_accumulated_transfer; + u32 evtlog_ib_offset; + u32 evtlog_ob_offset; + void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ + void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ + void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ + void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ + void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ + void __iomem *pspa_q_tbl_addr; + /*MPI SAS PHY attributes Queue Config Table Addr*/ + void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */ + union main_cfg_table main_cfg_tbl; + union general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + struct sas_phy_attribute_table phy_attr_table; + /* MPI SAS PHY attributes */ + u8 sas_addr[SAS_ADDR_SIZE]; + struct sas_ha_struct *sas;/* SCSI/SAS glue */ + struct Scsi_Host *shost; + u32 chip_id; + const struct pm8001_chip_info *chip; + struct completion *nvmd_completion; + unsigned long *rsvd_tags; + struct pm8001_phy phy[PM8001_MAX_PHYS]; + struct pm8001_port port[PM8001_MAX_PHYS]; + u32 id; + u32 irq; + u32 iomb_size; /* SPC and SPCV IOMB size */ + struct pm8001_device *devices; + struct pm8001_ccb_info *ccb_info; + u32 ccb_count; +#ifdef PM8001_USE_MSIX + int number_of_intr;/*will be used in remove()*/ + char intr_drvname[PM8001_MAX_MSIX_VEC] + [PM8001_NAME_LENGTH+1+3+1]; +#endif +#ifdef PM8001_USE_TASKLET + struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC]; +#endif + u32 logging_level; + u32 link_rate; + u32 fw_status; + u32 smp_exp_mode; + bool controller_fatal_error; + const struct firmware *fw_image; + struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; + u32 non_fatal_count; + u32 non_fatal_read_length; + u32 max_q_num; + u32 ib_offset; + u32 ob_offset; + u32 ci_offset; + u32 pi_offset; + u32 max_memcnt; +}; + +struct pm8001_work { + struct work_struct work; + struct pm8001_hba_info *pm8001_ha; + void *data; + int handler; +}; + +struct pm8001_fw_image_header { + u8 vender_id[8]; + u8 product_id; + u8 hardware_rev; + u8 dest_partition; + u8 reserved; + u8 fw_rev[4]; + __be32 image_length; + __be32 image_crc; + __be32 startup_entry; +} __attribute__((packed, aligned(4))); + + +/** + * FW Flash Update status values + */ +#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT 0x00 +#define FLASH_UPDATE_IN_PROGRESS 0x01 +#define FLASH_UPDATE_HDR_ERR 0x02 +#define FLASH_UPDATE_OFFSET_ERR 0x03 +#define FLASH_UPDATE_CRC_ERR 0x04 +#define FLASH_UPDATE_LENGTH_ERR 0x05 +#define FLASH_UPDATE_HW_ERR 0x06 +#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 +#define FLASH_UPDATE_DISABLED 0x11 + +/* Device states */ +#define DS_OPERATIONAL 0x01 +#define DS_PORT_IN_RESET 0x02 +#define DS_IN_RECOVERY 0x03 +#define DS_IN_ERROR 0x04 +#define DS_NON_OPERATIONAL 0x07 + +/** + * brief param structure for firmware flash update. + */ +struct fw_flash_updata_info { + u32 cur_image_offset; + u32 cur_image_len; + u32 total_image_len; + struct pm8001_prd sgl; +}; + +struct fw_control_info { + u32 retcode;/*ret code (status)*/ + u32 phase;/*ret code phase*/ + u32 phaseCmplt;/*percent complete for the current + update phase */ + u32 version;/*Hex encoded firmware version number*/ + u32 offset;/*Used for downloading firmware */ + u32 len; /*len of buffer*/ + u32 size;/* Used in OS VPD and Trace get size + operations.*/ + u32 reserved;/* padding required for 64 bit + alignment */ + u8 buffer[];/* Start of buffer */ +}; +struct fw_control_ex { + struct fw_control_info *fw_control; + void *buffer;/* keep buffer pointer to be + freed when the response comes*/ + void *virtAddr;/* keep virtual address of the data */ + void *usrAddr;/* keep virtual address of the + user data */ + dma_addr_t phys_addr; + u32 len; /* len of buffer */ + void *payload; /* pointer to IOCTL Payload */ + u8 inProgress;/*if 1 - the IOCTL request is in + progress */ + void *param1; + void *param2; + void *param3; +}; + +/* pm8001 workqueue */ +extern struct workqueue_struct *pm8001_wq; + +/******************** function prototype *********************/ +int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); +u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); +void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); +int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +void pm8001_scan_start(struct Scsi_Host *shost); +int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); +int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags); +int pm8001_abort_task(struct sas_task *task); +int pm8001_clear_task_set(struct domain_device *dev, u8 *lun); +int pm8001_dev_found(struct domain_device *dev); +void pm8001_dev_gone(struct domain_device *dev); +int pm8001_lu_reset(struct domain_device *dev, u8 *lun); +int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_I_T_nexus_event_handler(struct domain_device *dev); +int pm8001_query_task(struct sas_task *task); +void pm8001_port_formed(struct asd_sas_phy *sas_phy); +void pm8001_open_reject_retry( + struct pm8001_hba_info *pm8001_ha, + struct sas_task *task_to_close, + struct pm8001_device *device_to_close); +int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, + dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, + u32 mem_size, u32 align); + +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + u32 q_index, u32 opCode, void *payload, size_t nb, + u32 responseQueue); +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr); +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc); +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC); +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload); +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag); +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, + struct sas_tmf_task *tmf); +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id); +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd); +void pm8001_work_fn(struct work_struct *work); +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, + void *data, int handler); +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate); +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr); +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i); +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id); +int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); + +int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf); +void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha, + u32 phy, u32 length, u32 *buf); +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf); +ssize_t pm80xx_get_non_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf); +ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf); +int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha); +void pm8001_free_dev(struct pm8001_device *pm8001_dev); +/* ctl shared API */ +extern const struct attribute_group *pm8001_host_groups[]; + +#define PM8001_INVALID_TAG ((u32)-1) + +/* + * Allocate a new tag and return the corresponding ccb after initializing it. + */ +static inline struct pm8001_ccb_info * +pm8001_ccb_alloc(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *dev, struct sas_task *task) +{ + struct pm8001_ccb_info *ccb; + struct request *rq = NULL; + u32 tag; + + if (task) + rq = sas_task_find_rq(task); + + if (rq) { + tag = rq->tag + PM8001_RESERVE_SLOT; + } else if (pm8001_tag_alloc(pm8001_ha, &tag)) { + pm8001_dbg(pm8001_ha, FAIL, "Failed to allocate a tag\n"); + return NULL; + } + + ccb = &pm8001_ha->ccb_info[tag]; + ccb->task = task; + ccb->n_elem = 0; + ccb->ccb_tag = tag; + ccb->device = dev; + ccb->fw_control_context = NULL; + ccb->open_retry = 0; + + return ccb; +} + +/* + * Free the tag of an initialized ccb. + */ +static inline void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + u32 tag = ccb->ccb_tag; + + /* + * Cleanup the ccb to make sure that a manual scan of the adapter + * ccb_info array can detect ccb's that are in use. + * C.f. pm8001_open_reject_retry() + */ + ccb->task = NULL; + ccb->ccb_tag = PM8001_INVALID_TAG; + ccb->device = NULL; + ccb->fw_control_context = NULL; + + pm8001_tag_free(pm8001_ha, tag); +} + +static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + + pm8001_ccb_task_free(pm8001_ha, ccb); + smp_mb(); /*in order to force CPU ordering*/ + task->task_done(task); +} +void pm8001_setds_completion(struct domain_device *dev); +void pm8001_tmf_aborted(struct sas_task *task); + +#endif + diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c new file mode 100644 index 000000000..3afd9443c --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -0,0 +1,4940 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 PMC-Sierra, Inc., + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include + #include "pm8001_sas.h" + #include "pm80xx_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" +#include "pm80xx_tracepoints.h" + +#define SMP_DIRECT 1 +#define SMP_INDIRECT 2 + + +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) +{ + u32 reg_val; + unsigned long start; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value); + /* confirm the setting is written */ + start = jiffies + HZ; /* 1 sec */ + do { + reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER); + } while ((reg_val != shift_value) && time_before(jiffies, start)); + if (reg_val != shift_value) { + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MEMBASE_II_SHIFT_REGISTER = 0x%x\n", + reg_val); + return -1; + } + return 0; +} + +static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, + __le32 *destination, + u32 dw_count, u32 bus_base_number) +{ + u32 index, value, offset; + + for (index = 0; index < dw_count; index += 4, destination++) { + offset = (soffset + index); + if (offset < (64 * 1024)) { + value = pm8001_cr32(pm8001_ha, bus_base_number, offset); + *destination = cpu_to_le32(value); + } + } + return; +} + +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; + u32 accum_len, reg_val, index, *temp; + u32 status = 1; + unsigned long start; + u8 *direct_data; + char *fatal_error_data = buf; + u32 length_to_read; + u32 offset; + + pm8001_ha->forensic_info.data_buf.direct_data = buf; + if (pm8001_ha->chip_id == chip_8001) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "Not supported for SPC controller"); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + /* initialize variables for very first call from host application */ + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { + pm8001_dbg(pm8001_ha, IO, + "forensic_info TYPE_NON_FATAL..............\n"); + direct_data = (u8 *)fatal_error_data; + pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; + pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + pm8001_ha->forensic_preserved_accumulated_transfer = 0; + + /* Write signature to fatal dump table */ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd); + + pm8001_ha->forensic_info.data_buf.direct_data = direct_data; + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: status1 %d\n", status); + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: read_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.read_len); + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_len); + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_offset 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_offset); + } + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { + /* start to get data */ + /* Program the MEMBASE II Shifting Register with 0x00.*/ + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->forensic_last_offset = 0; + pm8001_ha->forensic_fatal_step = 0; + pm8001_ha->fatal_bar_loc = 0; + } + + /* Read until accum_len is retrieved */ + accum_len = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + /* Determine length of data between previously stored transfer length + * and current accumulated transfer length + */ + length_to_read = + accum_len - pm8001_ha->forensic_preserved_accumulated_transfer; + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: accum_len 0x%x\n", + accum_len); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: length_to_read 0x%x\n", + length_to_read); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: last_offset 0x%x\n", + pm8001_ha->forensic_last_offset); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: read_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.read_len); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_len); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_offset 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_offset); + + /* If accumulated length failed to read correctly fail the attempt.*/ + if (accum_len == 0xFFFFFFFF) { + pm8001_dbg(pm8001_ha, IO, + "Possible PCI issue 0x%x not expected\n", + accum_len); + return status; + } + /* If accumulated length is zero fail the attempt */ + if (accum_len == 0) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + /* Accumulated length is good so start capturing the first data */ + temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; + if (pm8001_ha->forensic_fatal_step == 0) { +moreData: + /* If data to read is less than SYSFS_OFFSET then reduce the + * length of dataLen + */ + if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET + > length_to_read) { + pm8001_ha->forensic_info.data_buf.direct_len = + length_to_read - + pm8001_ha->forensic_last_offset; + } else { + pm8001_ha->forensic_info.data_buf.direct_len = + SYSFS_OFFSET; + } + if (pm8001_ha->forensic_info.data_buf.direct_data) { + /* Data is in bar, copy to host memory */ + pm80xx_pci_mem_copy(pm8001_ha, + pm8001_ha->fatal_bar_loc, + pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, + pm8001_ha->forensic_info.data_buf.direct_len, 1); + } + pm8001_ha->fatal_bar_loc += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.direct_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_last_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.read_len = + pm8001_ha->forensic_info.data_buf.direct_len; + + if (pm8001_ha->forensic_last_offset >= length_to_read) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 3); + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + + pm8001_ha->fatal_bar_loc = 0; + pm8001_ha->forensic_fatal_step = 1; + pm8001_ha->fatal_forensic_shift_offset = 0; + pm8001_ha->forensic_last_offset = 0; + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, + "get_fatal_spcv:return1 0x%x\n", offset); + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->fatal_bar_loc < (64 * 1024)) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data + += sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, + "get_fatal_spcv:return2 0x%x\n", offset); + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + + /* Increment the MEMBASE II Shifting Register value by 0x100.*/ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4) ; index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + pm8001_ha->fatal_forensic_shift_offset += 0x100; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->fatal_bar_loc = 0; + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return3 0x%x\n", + offset); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->forensic_fatal_step == 1) { + /* store previous accumulated length before triggering next + * accumulated length update + */ + pm8001_ha->forensic_preserved_accumulated_transfer = + pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + + /* continue capturing the fatal log until Dump status is 0x3 */ + if (pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS) < + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { + + /* reset fddstat bit by writing to zero*/ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS, 0x0); + + /* set dump control value to '1' so that new data will + * be transferred to shared memory + */ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, + MPI_FATAL_EDUMP_HANDSHAKE_RDY); + + /*Poll FDDHSHK until clear */ + start = jiffies + (2 * HZ); /* 2 sec */ + + do { + reg_val = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE); + } while ((reg_val) && time_before(jiffies, start)); + + if (reg_val != 0) { + pm8001_dbg(pm8001_ha, FAIL, + "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n", + reg_val); + /* Fail the dump if a timeout occurs */ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return((char *) + pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + } + /* Poll status register until set to 2 or + * 3 for up to 2 seconds + */ + start = jiffies + (2 * HZ); /* 2 sec */ + + do { + reg_val = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS); + } while (((reg_val != 2) && (reg_val != 3)) && + time_before(jiffies, start)); + + if (reg_val < 2) { + pm8001_dbg(pm8001_ha, FAIL, + "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n", + reg_val); + /* Fail the dump if a timeout occurs */ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return((char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf); + } + /* reset fatal_forensic_shift_offset back to zero and reset MEMBASE 2 register to zero */ + pm8001_ha->fatal_forensic_shift_offset = 0; /* location in 64k region */ + pm8001_cw32(pm8001_ha, 0, + MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + } + /* Read the next block of the debug data.*/ + length_to_read = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) - + pm8001_ha->forensic_preserved_accumulated_transfer; + if (length_to_read != 0x0) { + pm8001_ha->forensic_fatal_step = 0; + goto moreData; + } else { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 4); + pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; + pm8001_ha->forensic_info.data_buf.direct_len = 0; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + } + } + offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return4 0x%x\n", offset); + return ((char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf); +} + +/* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma + * location by the firmware. + */ +ssize_t pm80xx_get_non_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + void __iomem *nonfatal_table_address = pm8001_ha->fatal_tbl_addr; + u32 accum_len = 0; + u32 total_len = 0; + u32 reg_val = 0; + u32 *temp = NULL; + u32 index = 0; + u32 output_length; + unsigned long start = 0; + char *buf_copy = buf; + + temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; + if (++pm8001_ha->non_fatal_count == 1) { + if (pm8001_ha->chip_id == chip_8001) { + snprintf(pm8001_ha->forensic_info.data_buf.direct_data, + PAGE_SIZE, "Not supported for SPC controller"); + return 0; + } + pm8001_dbg(pm8001_ha, IO, "forensic_info TYPE_NON_FATAL...\n"); + /* + * Step 1: Write the host buffer parameters in the MPI Fatal and + * Non-Fatal Error Dump Capture Table.This is the buffer + * where debug data will be DMAed to. + */ + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_LO_OFFSET, + pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_lo); + + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_HI_OFFSET, + pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_hi); + + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_LENGTH, SYSFS_OFFSET); + + /* Optionally, set the DUMPCTRL bit to 1 if the host + * keeps sending active I/Os while capturing the non-fatal + * debug data. Otherwise, leave this bit set to zero + */ + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY); + + /* + * Step 2: Clear Accumulative Length of Debug Data Transferred + * [ACCDDLEN] field in the MPI Fatal and Non-Fatal Error Dump + * Capture Table to zero. + */ + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN, 0); + + /* initiallize previous accumulated length to 0 */ + pm8001_ha->forensic_preserved_accumulated_transfer = 0; + pm8001_ha->non_fatal_read_length = 0; + } + + total_len = pm8001_mr32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_TOTAL_LEN); + /* + * Step 3:Clear Fatal/Non-Fatal Debug Data Transfer Status [FDDTSTAT] + * field and then request that the SPCv controller transfer the debug + * data by setting bit 7 of the Inbound Doorbell Set Register. + */ + pm8001_mw32(nonfatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS, 0); + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, + SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP); + + /* + * Step 4.1: Read back the Inbound Doorbell Set Register (by polling for + * 2 seconds) until register bit 7 is cleared. + * This step only indicates the request is accepted by the controller. + */ + start = jiffies + (2 * HZ); /* 2 sec */ + do { + reg_val = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET) & + SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP; + } while ((reg_val != 0) && time_before(jiffies, start)); + + /* Step 4.2: To check the completion of the transfer, poll the Fatal/Non + * Fatal Debug Data Transfer Status [FDDTSTAT] field for 2 seconds in + * the MPI Fatal and Non-Fatal Error Dump Capture Table. + */ + start = jiffies + (2 * HZ); /* 2 sec */ + do { + reg_val = pm8001_mr32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS); + } while ((!reg_val) && time_before(jiffies, start)); + + if ((reg_val == 0x00) || + (reg_val == MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED) || + (reg_val > MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE)) { + pm8001_ha->non_fatal_read_length = 0; + buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF); + pm8001_ha->non_fatal_count = 0; + return (buf_copy - buf); + } else if (reg_val == + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA) { + buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2); + } else if ((reg_val == MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) || + (pm8001_ha->non_fatal_read_length >= total_len)) { + pm8001_ha->non_fatal_read_length = 0; + buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4); + pm8001_ha->non_fatal_count = 0; + } + accum_len = pm8001_mr32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + output_length = accum_len - + pm8001_ha->forensic_preserved_accumulated_transfer; + + for (index = 0; index < output_length/4; index++) + buf_copy += snprintf(buf_copy, PAGE_SIZE, + "%08x ", *(temp+index)); + + pm8001_ha->non_fatal_read_length += output_length; + + /* store current accumulated length to use in next iteration as + * the previous accumulated length + */ + pm8001_ha->forensic_preserved_accumulated_transfer = accum_len; + return (buf_copy - buf); +} + +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = + pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = + pm8001_mr32(address, MAIN_INTERFACE_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = + pm8001_mr32(address, MAIN_FW_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = + pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = + pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = + pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = + pm8001_mr32(address, MAIN_GST_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); + + /* read GPIO LED settings from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = + pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = + pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); + /* read port recover and reset timeout */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer = + pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER); + /* read ILA and inactive firmware version */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version = + pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version = + pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION); + + pm8001_dbg(pm8001_ha, DEV, + "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev); + + pm8001_dbg(pm8001_ha, DEV, + "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset); + + pm8001_dbg(pm8001_ha, DEV, + "Main cfg table; ila rev:%x Inactive fw rev:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = + pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = + pm8001_mr32(address, GST_MSGUTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = + pm8001_mr32(address, GST_IOPTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = + pm8001_mr32(address, GST_GPIO_INPUT_VAL); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = + pm8001_mr32(address, GST_RERRINFO_OFFSET0); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = + pm8001_mr32(address, GST_RERRINFO_OFFSET1); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = + pm8001_mr32(address, GST_RERRINFO_OFFSET2); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = + pm8001_mr32(address, GST_RERRINFO_OFFSET3); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = + pm8001_mr32(address, GST_RERRINFO_OFFSET4); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = + pm8001_mr32(address, GST_RERRINFO_OFFSET5); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = + pm8001_mr32(address, GST_RERRINFO_OFFSET6); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = + pm8001_mr32(address, GST_RERRINFO_OFFSET7); +} +/** + * read_phy_attr_table - read the phy attribute table and save it. + * @pm8001_ha: our hba card information + */ +static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->pspa_q_tbl_addr; + pm8001_ha->phy_attr_table.phystart1_16[0] = + pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[1] = + pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[2] = + pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[3] = + pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[4] = + pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[5] = + pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[6] = + pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[7] = + pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[8] = + pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[9] = + pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[10] = + pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[11] = + pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[12] = + pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[13] = + pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[14] = + pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[15] = + pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); + + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); + +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + IB_PIPCI_BAR))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + OB_CIPCI_BAR))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + u32 ib_offset = pm8001_ha->ib_offset; + u32 ob_offset = pm8001_ha->ob_offset; + u32 ci_offset = pm8001_ha->ci_offset; + u32 pi_offset = pm8001_ha->pi_offset; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + + /* Enable higher IQs and OQs, 32 to 63, bit 16 */ + if (pm8001_ha->max_q_num > 32) + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + 1 << 16; + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ib_offset + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0); + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + + pm8001_dbg(pm8001_ha, DEV, + "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i, + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar, + pm8001_ha->inbnd_q_tbl[i].pi_offset); + } + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ob_offset + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; + /* interrupt vector based on oq */ + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0); + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + + pm8001_dbg(pm8001_ha, DEV, + "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i, + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar, + pm8001_ha->outbnd_q_tbl[i].ci_offset); + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); + pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + /* Update Fatal error interrupt vector */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + ((pm8001_ha->max_q_num - 1) << 8); + pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + pm8001_dbg(pm8001_ha, DEV, + "Updated Fatal error interrupt vector 0x%x\n", + pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)); + + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); + + /* SPCv specific */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; + /* Set GPIOLED to 0x2 for LED indicator */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; + pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + pm8001_dbg(pm8001_ha, DEV, + "Programming DW 0x21 in main cfg table with 0x%x\n", + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)); + + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); + pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |= + PORT_RECOVERY_TIMEOUT; + if (pm8001_ha->chip_id == chip_8006) { + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= + 0x0000ffff; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |= + CHIP_8006_PORT_RECOVERY_TIMEOUT; + } + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); + + pm8001_dbg(pm8001_ha, DEV, + "IQ %d: Element pri size 0x%x\n", + number, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + + pm8001_dbg(pm8001_ha, DEV, + "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n", + pm8001_ha->inbnd_q_tbl[number].upper_base_addr, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + + pm8001_dbg(pm8001_ha, DEV, + "CI upper base addr 0x%x CI lower base addr 0x%x\n", + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); + + pm8001_dbg(pm8001_ha, DEV, + "OQ %d: Element pri size 0x%x\n", + number, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + + pm8001_dbg(pm8001_ha, DEV, + "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n", + pm8001_ha->outbnd_q_tbl[number].upper_base_addr, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + + pm8001_dbg(pm8001_ha, DEV, + "PI upper base addr 0x%x PI lower base addr 0x%x\n", + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT; + } else { + max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT; + } + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + /* additional check */ + pm8001_dbg(pm8001_ha, FAIL, + "Inb doorbell clear not toggled[value:%x]\n", + value); + return -EBUSY; + } + /* check the MPI-State for initialization up to 100ms*/ + max_wait_count = 5;/* 100 msec */ + do { + msleep(FW_READY_INTERVAL); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + } while ((GST_MPI_STATE_INIT != + (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); + if (!max_wait_count) + return -EBUSY; + + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -EBUSY; + + /* + * As per controller datasheet, after successful MPI + * initialization minimum 500ms delay is required before + * issuing commands. + */ + msleep(500); + + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * This function sleeps hence it must not be used in atomic context. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; + u32 max_wait_count; + u32 max_wait_time; + u32 expected_mask; + int ret = 0; + + /* reset / PCIe ready */ + max_wait_time = max_wait_count = 5; /* 100 milli sec */ + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while ((value == 0xFFFFFFFF) && (--max_wait_count)); + + /* check ila, RAAE and iops status */ + if ((pm8001_ha->chip_id != chip_8008) && + (pm8001_ha->chip_id != chip_8009)) { + max_wait_time = max_wait_count = 180; /* 3600 milli sec */ + expected_mask = SCRATCH_PAD_ILA_READY | + SCRATCH_PAD_RAAE_READY | + SCRATCH_PAD_IOP0_READY | + SCRATCH_PAD_IOP1_READY; + } else { + max_wait_time = max_wait_count = 170; /* 3400 milli sec */ + expected_mask = SCRATCH_PAD_ILA_READY | + SCRATCH_PAD_RAAE_READY | + SCRATCH_PAD_IOP0_READY; + } + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & expected_mask) != + expected_mask) && (--max_wait_count)); + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, INIT, + "At least one FW component failed to load within %d millisec: Scratchpad1: 0x%x\n", + max_wait_time * FW_READY_INTERVAL, value); + ret = -1; + } else { + pm8001_dbg(pm8001_ha, MSG, + "All FW components ready by %d ms\n", + (max_wait_time - max_wait_count) * FW_READY_INTERVAL); + } + return ret; +} + +static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + + /* + * lower 26 bits of SCRATCHPAD0 register describes offset within the + * PCIe BAR where the MPI configuration table is present + */ + offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ + + pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n", + offset, value); + /* + * Upper 6 bits describe the offset within PCI config space where BAR + * is located. + */ + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); + + /* + * Make sure the offset falls inside the ioremapped PCI BAR + */ + if (offset > pm8001_ha->io_mem[pcibar].memsize) { + pm8001_dbg(pm8001_ha, FAIL, + "Main cfg tbl offset outside %u > %u\n", + offset, pm8001_ha->io_mem[pcibar].memsize); + return -EBUSY; + } + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + + /* + * Validate main configuration table address: first DWord should read + * "PMCS" + */ + value = pm8001_mr32(pm8001_ha->main_cfg_tbl_addr, 0); + if (memcmp(&value, "PMCS", 4) != 0) { + pm8001_dbg(pm8001_ha, FAIL, + "BAD main config signature 0x%x\n", + value); + return -EBUSY; + } + pm8001_dbg(pm8001_ha, INIT, + "VALID main config signature 0x%x\n", value); + pm8001_ha->general_stat_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & + 0xFFFFFF); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & + 0xFFFFFF); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & + 0xFFFFFF); + pm8001_ha->ivt_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & + 0xFFFFFF); + pm8001_ha->pspa_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & + 0xFFFFFF); + pm8001_ha->fatal_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) & + 0xFFFFFF); + + pm8001_dbg(pm8001_ha, INIT, "GST OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)); + pm8001_dbg(pm8001_ha, INIT, "INBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)); + pm8001_dbg(pm8001_ha, INIT, "OBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)); + pm8001_dbg(pm8001_ha, INIT, "IVT OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)); + pm8001_dbg(pm8001_ha, INIT, "PSPA OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)); + pm8001_dbg(pm8001_ha, INIT, "addr - main cfg %p general status %p\n", + pm8001_ha->main_cfg_tbl_addr, + pm8001_ha->general_stat_tbl_addr); + pm8001_dbg(pm8001_ha, INIT, "addr - inbnd %p obnd %p\n", + pm8001_ha->inbnd_q_tbl_addr, + pm8001_ha->outbnd_q_tbl_addr); + pm8001_dbg(pm8001_ha, INIT, "addr - pspa %p ivt %p\n", + pm8001_ha->pspa_q_tbl_addr, + pm8001_ha->ivt_tbl_addr); + return 0; +} + +/** + * pm80xx_set_thermal_config - support the thermal configuration + * @pm8001_ha: our hba card information. + */ +int +pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + u32 page_code; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + + if (IS_SPCV_12G(pm8001_ha->pdev)) + page_code = THERMAL_PAGE_CODE_7H; + else + page_code = THERMAL_PAGE_CODE_8H; + + payload.cfg_pg[0] = + cpu_to_le32((THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | page_code); + payload.cfg_pg[1] = + cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8)); + + pm8001_dbg(pm8001_ha, DEV, + "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n", + payload.cfg_pg[0], payload.cfg_pg[1]); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; + +} + +/** +* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol +* Timer configuration page +* @pm8001_ha: our hba card information. +*/ +static int +pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + SASProtocolTimerConfig_t SASConfigPage; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + + SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE); + SASConfigPage.MST_MSI = cpu_to_le32(3 << 15); + SASConfigPage.STP_SSP_MCT_TMO = + cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO); + SASConfigPage.STP_FRM_TMO = + cpu_to_le32((SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER); + SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME); + + SASConfigPage.OPNRJT_RTRY_INTVL = + cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = + cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = + cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR); + SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP); + + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n", + le32_to_cpu(SASConfigPage.pageCode)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n", + le32_to_cpu(SASConfigPage.MST_MSI)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.STP_FRM_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.STP_IDLE_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n", + le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n", + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n", + le32_to_cpu(SASConfigPage.MAX_AIP)); + + memcpy(&payload.cfg_pg, &SASConfigPage, + sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; +} + +/** + * pm80xx_get_encrypt_info - Check for encryption + * @pm8001_ha: our hba card information. + */ +static int +pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) +{ + u32 scratch3_value; + int ret = -1; + + /* Read encryption status from SCRATCH PAD 3 */ + scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_READY) { + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_ha->encrypt_info.status = 0; + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X.Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", + scratch3_value, + pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status); + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == + SCRATCH_PAD3_ENC_DISABLED) { + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", + scratch3_value); + pm8001_ha->encrypt_info.status = 0xFFFFFFFF; + pm8001_ha->encrypt_info.cipher_mode = 0; + pm8001_ha->encrypt_info.sec_mode = 0; + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_DIS_ERR) { + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, + pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status); + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_ENA_ERR) { + + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, + pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status); + } + return ret; +} + +/** + * pm80xx_encrypt_update - update flash with encryption information + * @pm8001_ha: our hba card information. + */ +static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) +{ + struct kek_mgmt_req payload; + int rc; + u32 tag; + u32 opc = OPC_INB_KEK_MANAGEMENT; + + memset(&payload, 0, sizeof(struct kek_mgmt_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + /* Currently only one key is used. New KEK index is 1. + * Current KEK index is 1. Store KEK to NVRAM is 1. + */ + payload.new_curidx_ksop = + cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE)); + + pm8001_dbg(pm8001_ha, DEV, + "Saving Encryption info to flash. payload 0x%x\n", + le32_to_cpu(payload.new_curidx_ksop)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; +} + +/** + * pm80xx_chip_init - the main init function that initializes whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + int ret; + u8 i = 0; + + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); + return -EBUSY; + } + + /* Initialize the controller fatal error flag */ + pm8001_ha->controller_fatal_error = false; + + /* Initialize pci space address eg: mpi offset */ + ret = init_pci_device_addresses(pm8001_ha); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, + "Failed to init pci addresses"); + return ret; + } + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + read_phy_attr_table(pm8001_ha); + + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < pm8001_ha->max_q_num; i++) { + update_inbnd_queue_table(pm8001_ha, i); + update_outbnd_queue_table(pm8001_ha, i); + } + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n"); + } else + return -EBUSY; + + return 0; +} + +static void pm80xx_chip_post_init(struct pm8001_hba_info *pm8001_ha) +{ + /* send SAS protocol timer configuration page to FW */ + pm80xx_set_sas_protocol_timer_config(pm8001_ha); + + /* Check for encryption */ + if (pm8001_ha->chip->encrypt) { + int ret; + + pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n"); + ret = pm80xx_get_encrypt_info(pm8001_ha); + if (ret == -1) { + pm8001_dbg(pm8001_ha, INIT, "Encryption error !!\n"); + if (pm8001_ha->encrypt_info.status == 0x81) { + pm8001_dbg(pm8001_ha, INIT, + "Encryption enabled with error.Saving encryption key to flash\n"); + pm80xx_encrypt_update(pm8001_ha); + } + } + } +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + int ret; + + ret = init_pci_device_addresses(pm8001_ha); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, + "Failed to init pci addresses"); + return ret; + } + + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT; + } else { + max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT; + } + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=%x\n", value); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 100; /* 2 sec for spcv/ve */ + do { + msleep(FW_READY_INTERVAL); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK); + return -1; + } + + return 0; +} + +/** + * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors + * @pm8001_ha: our hba card information + * + * Fatal errors are recoverable only after a host reboot. + */ +int +pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha) +{ + int ret = 0; + u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0); + u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_1); + u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if (pm8001_ha->chip_id != chip_8006 && + pm8001_ha->chip_id != chip_8074 && + pm8001_ha->chip_id != chip_8076) { + return 0; + } + + if (MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(scratch_pad1)) { + pm8001_dbg(pm8001_ha, FAIL, + "Fatal error SCRATCHPAD1 = 0x%x SCRATCHPAD2 = 0x%x SCRATCHPAD3 = 0x%x SCRATCHPAD_RSVD0 = 0x%x SCRATCHPAD_RSVD1 = 0x%x\n", + scratch_pad1, scratch_pad2, scratch_pad3, + scratch_pad_rsvd0, scratch_pad_rsvd1); + ret = 1; + } + + return ret; +} + +/** + * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all + * FW register status are reset to the originated status. + * @pm8001_ha: our hba card information + */ + +static int +pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regval; + u32 bootloader_state; + u32 ibutton0, ibutton1; + + /* Process MPI table uninitialization only if FW is ready */ + if (!pm8001_ha->controller_fatal_error) { + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + u32 r0 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + pm8001_dbg(pm8001_ha, FAIL, + "MPI state is not ready scratch: %x:%x:%x:%x\n", + r0, r1, r2, r3); + /* if things aren't ready but the bootloader is ok then + * try the reset anyway. + */ + if (r1 & SCRATCH_PAD1_BOOTSTATE_MASK) + return -1; + } + } + /* checked for reset register normal state; 0x0 */ + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + pm8001_dbg(pm8001_ha, INIT, "reset register before write : 0x%x\n", + regval); + + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); + msleep(500); + + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + pm8001_dbg(pm8001_ha, INIT, "reset register after write 0x%x\n", + regval); + + if ((regval & SPCv_SOFT_RESET_READ_MASK) == + SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { + pm8001_dbg(pm8001_ha, MSG, + " soft reset successful [regval: 0x%x]\n", + regval); + } else { + pm8001_dbg(pm8001_ha, MSG, + " soft reset failed [regval: 0x%x]\n", + regval); + + /* check bootloader is successfully executed or in HDA mode */ + bootloader_state = + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_BOOTSTATE_MASK; + + if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state - HDA mode SEEPROM\n"); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state - HDA mode Bootstrap Pin\n"); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state - HDA mode soft reset\n"); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state-HDA mode critical error\n"); + } + return -EBUSY; + } + + /* check the firmware status after reset */ + if (-1 == check_fw_ready(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); + /* check iButton feature support for motherboard controller */ + if (pm8001_ha->pdev->subsystem_vendor != + PCI_VENDOR_ID_ADAPTEC2 && + pm8001_ha->pdev->subsystem_vendor != + PCI_VENDOR_ID_ATTO && + pm8001_ha->pdev->subsystem_vendor != 0) { + ibutton0 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0); + ibutton1 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_1); + if (!ibutton0 && !ibutton1) { + pm8001_dbg(pm8001_ha, FAIL, + "iButton Feature is not Available!!!\n"); + return -EBUSY; + } + if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) { + pm8001_dbg(pm8001_ha, FAIL, + "CRC Check for iButton Feature Failed!!!\n"); + return -EBUSY; + } + } + } + pm8001_dbg(pm8001_ha, INIT, "SPCv soft reset Complete\n"); + return 0; +} + +static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + + pm8001_dbg(pm8001_ha, INIT, "chip reset start\n"); + + /* do SPCv chip reset. */ + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); + pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n"); + + /* Check this ..whether delay is required or no */ + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n"); +} + +/** + * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); +} + +/** + * pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: interrupt number to enable + */ +static void +pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, + 1U << (vec - 32)); + return; +#endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm80xx_chip_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: interrupt number to disable + */ +static void +pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + if (vec == 0xFF) { + /* disable all vectors 0-31, 32-63 */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF); + } else if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, + 1U << (vec - 32)); + return; +#endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +} + +/** + * mpi_ssp_completion - process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + + pm8001_dbg(pm8001_ha, DEV, + "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t); + + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS ,param = 0x%x\n", + param); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW ,param = 0x%x\n", + param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_TM_TAG_NOT_FOUND: + pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + } + pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ", + psspPayload->ssp_resp_iu.status); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + if (t->slow_task) + complete(&t->slow_task->completion); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + pm8001_dbg(pm8001_ha, IOERR, + "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + return; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low, temp_sata_addr_hi; + u8 sata_addr_hi[4]; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + param = le32_to_cpu(psataPayload->param); + tag = le32_to_cpu(psataPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + + if (pm8001_dev && unlikely(!t->lldd_task || !t->dev)) + return; + + ts = &t->task_status; + + if (status != IO_SUCCESS) { + pm8001_dbg(pm8001_ha, FAIL, + "IO failed device_id %u status 0x%x tag %d\n", + pm8001_dev->device_id, status, tag); + } + + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (dev_is_expander(t->dev->parent->dev_type)))) { + for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%08x%08x\n", + temp_sata_addr_hi, + temp_sata_addr_low); + + } else { + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + } + } + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + pm8001_dbg(pm8001_ha, IO, + "SAS_PROTO_RESPONSE len = %d\n", + param); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == DMA_FROM_DEVICE) { + len = sizeof(struct pio_setup_fis); + pm8001_dbg(pm8001_ha, IO, + "PIO read len = %d\n", len); + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { + len = sizeof(struct set_dev_bits_fis); + pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", + len); + } else { + len = sizeof(struct dev_to_host_fis); + pm8001_dbg(pm8001_ha, IO, "other len = %d\n", + len); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + pm8001_dbg(pm8001_ha, IO, + "response too large\n"); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_IN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown status device_id %u status 0x%x tag %d\n", + pm8001_dev->device_id, status, tag); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + if (t->slow_task) + complete(&t->slow_task->completion); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + + if (event) + pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension by aborting the link - libata does what we want */ + if (pm8001_dev) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + if (unlikely(!t->lldd_task || !t->dev)) + return; + + ts = &t->task_status; + pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, FAIL, + "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + break; + case IO_XFER_PIO_SETUP_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + pm8001_dbg(pm8001_ha, FAIL, + "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_DMA_ACTIVATE_TIMEOUT: + pm8001_dbg(pm8001_ha, FAIL, "IO_XFR_DMA_ACTIVATE_TIMEOUT\n"); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + pm8001_dbg(pm8001_ha, IO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param, i; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + pm8001_dbg(pm8001_ha, DEV, "tag::0x%x status::0x%x\n", tag, status); + + switch (status) { + + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + struct scatterlist *sg_resp = &t->smp_task.smp_resp; + u8 *payload; + void *to; + + pm8001_dbg(pm8001_ha, IO, + "DIRECT RESPONSE Length:%d\n", + param); + to = kmap_atomic(sg_page(sg_resp)); + payload = to + sg_resp->offset; + for (i = 0; i < param; i++) { + *(payload + i) = psmpPayload->_r_a[i]; + pm8001_dbg(pm8001_ha, IO, + "SMP Byte%d DMA data 0x%x psmp 0x%x\n", + i, *(payload + i), + psmpPayload->_r_a[i]); + } + kunmap_atomic(to); + } + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/** + * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + memset((u8 *)&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0xFF) << 24) | (port_id & 0xFF)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + + pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload, + sizeof(payload), 0); +} + +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 deviceType = pPayload->sas_identify.dev_type; + u8 link_rate = (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + + if (deviceType == SAS_END_DEVICE) { + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + } + + port->wide_port_phymap |= (1U << phy_id); + pm8001_get_lrate_mode(phy, link_rate); + phy->sas_phy.oob_mode = SAS_OOB_MODE; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + phy->phy_attached = 1; +} + +/** + * hw_event_sas_phy_up - FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + port->wide_port_phymap |= (1U << phy_id); + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + pm8001_dbg(pm8001_ha, MSG, + "portid:%d; phyid:%d; linkrate:%d; portstate:%x; devicetype:%x\n", + port_id, phy_id, link_rate, portstate, deviceType); + + switch (deviceType) { + case SAS_PHY_UNUSED: + pm8001_dbg(pm8001_ha, MSG, "device type no device.\n"); + break; + case SAS_END_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "end device.\n"); + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n", + deviceType); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200); /* delay a moment to wait for disk to spin up */ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up - FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SATA_PHY_UP phyid:%#x port_id:%#x link_rate:%d portstate:%#x\n", + phy_id, port_id, link_rate, portstate); + + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down - we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + u32 port_sata = (phy->phy_type & PORT_TYPE_SATA); + port->port_state = portstate; + phy->identify.device_type = 0; + phy->phy_attached = 0; + switch (portstate) { + case PORT_VALID: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_VALID\n", + phy_id, port_id); + break; + case PORT_INVALID: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_INVALID\n", + phy_id, port_id); + pm8001_dbg(pm8001_ha, MSG, + " Last phy Down and port invalid\n"); + if (port_sata) { + phy->phy_type = 0; + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + } + sas_phy_disconnected(&phy->sas_phy); + break; + case PORT_IN_RESET: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_IN_RESET\n", + phy_id, port_id); + break; + case PORT_NOT_ESTABLISHED: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_NOT_ESTABLISHED\n", + phy_id, port_id); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_LOSTCOMM\n", + phy_id, port_id); + pm8001_dbg(pm8001_ha, MSG, " Last phy Down and port invalid\n"); + if (port_sata) { + port->port_attached = 0; + phy->phy_type = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + } + sas_phy_disconnected(&phy->sas_phy); + break; + default: + port->port_attached = 0; + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); + break; + + } + if (port_sata && (portstate != PORT_IN_RESET)) + sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL, + GFP_ATOMIC); +} + +static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_start_resp *pPayload = + (struct phy_start_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phy_id = + le32_to_cpu(pPayload->phyid) & 0xFF; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + + pm8001_dbg(pm8001_ha, INIT, + "phy start resp status:0x%x, phyid:0x%x\n", + status, phy_id); + if (status == 0) + phy->phy_state = PHY_LINK_DOWN; + + if (pm8001_ha->flags == PM8001F_RUN_TIME && + phy->enable_completion != NULL) { + complete(phy->enable_completion); + phy->enable_completion = NULL; + } + return 0; + +} + +/** + * mpi_thermal_hw_event - a thermal hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct thermal_hw_event *pPayload = + (struct thermal_hw_event *)(piomb + 4); + + u32 thermal_event = le32_to_cpu(pPayload->thermal_event); + u32 rht_lht = le32_to_cpu(pPayload->rht_lht); + + if (thermal_event & 0x40) { + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Local high temperature violated!\n"); + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Measured local high temperature %d\n", + ((rht_lht & 0xFF00) >> 8)); + } + if (thermal_event & 0x10) { + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Remote high temperature violated!\n"); + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Measured remote high temperature %d\n", + ((rht_lht & 0xFF000000) >> 24)); + } + return 0; +} + +/** + * mpi_hw_event - The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags, i; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + u16 eventType = + (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_status_evt_portid & 0x0F000000) >> 24); + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + pm8001_dbg(pm8001_ha, DEV, + "portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status); + + switch (eventType) { + + case HW_EVENT_SAS_PHY_UP: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SAS_PHY_UP phyid:%#x port_id:%#x\n", + phy_id, port_id); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_SPINUP_HOLD: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SATA_SPINUP_HOLD phyid:%#x port_id:%#x\n", + phy_id, port_id); + sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_DOWN: + hw_event_phy_down(pm8001_ha, piomb); + phy->phy_state = PHY_LINK_DISABLE; + break; + case HW_EVENT_PORT_INVALID: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_INVALID phyid:%#x port_id:%#x\n", + phy_id, port_id); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n"); + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_ERROR: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); + break; + case HW_EVENT_BROADCAST_EXP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_INVALID_DWORD phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_DISPARITY_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_CODE_VIOLATION phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_MALFUNCTION: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_MALFUNCTION phyid:%#x\n", phy_id); + break; + case HW_EVENT_BROADCAST_SES: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_INBOUND_CRC_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_HARD_RESET_RECEIVED phyid:%#x\n", phy_id); + sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_ID_FRAME_TIMEOUT phyid:%#x\n", phy_id); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_PHY_RESET_FAILED phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RESET_TIMER_TMO phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); + if (!pm8001_ha->phy[phy_id].reset_completion) { + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + } + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + port->port_state = portstate; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + if (pm8001_ha->phy[phy_id].reset_completion) { + pm8001_ha->phy[phy_id].port_reset_status = + PORT_RESET_TMO; + complete(pm8001_ha->phy[phy_id].reset_completion); + pm8001_ha->phy[phy_id].reset_completion = NULL; + } + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RECOVERY_TIMER_TMO phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_PORT_RECOVERY_TIMER_TMO, + port_id, phy_id, 0, 0); + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + if (port->wide_port_phymap & (1 << i)) { + phy = &pm8001_ha->phy[i]; + sas_notify_phy_event(&phy->sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); + port->wide_port_phymap &= ~(1 << i); + } + } + break; + case HW_EVENT_PORT_RECOVER: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RECOVER phyid:%#x port_id:%#x\n", + phy_id, port_id); + hw_event_port_recover(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RESET_COMPLETE phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); + if (pm8001_ha->phy[phy_id].reset_completion) { + pm8001_ha->phy[phy_id].port_reset_status = + PORT_RESET_SUCCESS; + complete(pm8001_ha->phy[phy_id].reset_completion); + pm8001_ha->phy[phy_id].reset_completion = NULL; + } + phy->phy_attached = 1; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + port->port_state = portstate; + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown event portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status); + break; + } + return 0; +} + +/** + * mpi_phy_stop_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_stop_resp *pPayload = + (struct phy_stop_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phyid = + le32_to_cpu(pPayload->phyid) & 0xFF; + struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; + pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n", + phyid, status); + if (status == PHY_STOP_SUCCESS || + status == PHY_STOP_ERR_DEVICE_ATTACHED) { + phy->phy_state = PHY_LINK_DISABLE; + phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED; + phy->sas_phy.linkrate = SAS_PHY_DISABLED; + } + + return 0; +} + +/** + * mpi_set_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_ctrl_cfg_resp *pPayload = + (struct set_ctrl_cfg_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + u32 tag = le32_to_cpu(pPayload->tag); + + pm8001_dbg(pm8001_ha, MSG, + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", + status, err_qlfr_pgcd); + pm8001_tag_free(pm8001_ha, tag); + + return 0; +} + +/** + * mpi_get_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * mpi_get_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * mpi_flash_op_ext_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * mpi_set_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + u32 tag; + u8 page_code; + int rc = 0; + struct set_phy_profile_resp *pPayload = + (struct set_phy_profile_resp *)(piomb + 4); + u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid); + u32 status = le32_to_cpu(pPayload->status); + + tag = le32_to_cpu(pPayload->tag); + page_code = (u8)((ppc_phyid & 0xFF00) >> 8); + if (status) { + /* status is FAILED */ + pm8001_dbg(pm8001_ha, FAIL, + "PhyProfile command failed with status 0x%08X\n", + status); + rc = -1; + } else { + if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) { + pm8001_dbg(pm8001_ha, FAIL, "Invalid page code 0x%X\n", + page_code); + rc = -1; + } + } + pm8001_tag_free(pm8001_ha, tag); + return rc; +} + +/** + * mpi_kek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); + + u32 status = le32_to_cpu(pPayload->status); + u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); + u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); + + pm8001_dbg(pm8001_ha, MSG, + "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", + status, kidx_new_curr_ksop, err_qlfr); + + return 0; +} + +/** + * mpi_dek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * ssp_coalesced_comp_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @circularQ: outbound circular queue + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); + + switch (opc) { + case OPC_OUB_ECHO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n"); + break; + case OPC_OUB_HW_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n"); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_THERM_HW_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_THERMAL_EVENT\n"); + mpi_thermal_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n"); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n"); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n"); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n"); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + pm8001_dbg(pm8001_ha, MSG, "unregister the device\n"); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n"); + break; + case OPC_OUB_SATA_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n"); + mpi_sata_completion(pm8001_ha, circularQ, piomb); + break; + case OPC_OUB_SATA_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n"); + mpi_sata_event(pm8001_ha, circularQ, piomb); + break; + case OPC_OUB_SSP_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n"); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n"); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n"); + /*This is for target*/ + break; + case OPC_OUB_FW_FLASH_UPDATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n"); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n"); + break; + case OPC_OUB_GPIO_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n"); + break; + case OPC_OUB_GENERAL_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n"); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SAS_DIAG_MODE_START_END\n"); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n"); + break; + case OPC_OUB_GET_TIME_STAMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n"); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n"); + break; + case OPC_OUB_PORT_CONTROL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n"); + break; + case OPC_OUB_SMP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n"); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n"); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n"); + break; + case OPC_OUB_SET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n"); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n"); + break; + case OPC_OUB_SET_DEV_INFO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); + break; + /* spcv specific commands */ + case OPC_OUB_PHY_START_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_PHY_START_RESP opcode:%x\n", opc); + mpi_phy_start_resp(pm8001_ha, piomb); + break; + case OPC_OUB_PHY_STOP_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc); + mpi_phy_stop_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_CONTROLLER_CONFIG: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc); + mpi_set_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_CONTROLLER_CONFIG: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc); + mpi_get_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_PHY_PROFILE: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc); + mpi_get_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_FLASH_OP_EXT: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc); + mpi_flash_op_ext_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_PHY_PROFILE: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc); + mpi_set_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_KEK_MANAGEMENT_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc); + mpi_kek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEK_MANAGEMENT_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc); + mpi_dek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COALESCED_COMP_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc); + ssp_coalesced_comp_resp(pm8001_ha, piomb); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown outbound Queue IOMB OPC = 0x%x\n", opc); + break; + } +} + +static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_1:0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_4: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_5: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_0)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_1)); +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 bc; + u32 ret = MPI_IO_STATUS_FAIL; + u32 regval; + + /* + * Fatal errors are programmed to be signalled in irq vector + * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl. + * fatal_err_interrupt + */ + if (vec == (pm8001_ha->max_q_num - 1)) { + u32 mipsall_ready; + + if (pm8001_ha->chip_id == chip_8008 || + pm8001_ha->chip_id == chip_8009) + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT; + else + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT; + + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + if ((regval & mipsall_ready) != mipsall_ready) { + pm8001_ha->controller_fatal_error = true; + pm8001_dbg(pm8001_ha, FAIL, + "Firmware Fatal error! Regval:0x%x\n", + regval); + pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR); + print_scratchpad_registers(pm8001_ha); + return ret; + } else { + /*read scratchpad rsvd 0 register*/ + regval = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0); + switch (regval) { + case NON_FATAL_SPBC_LBUS_ECC_ERR: + case NON_FATAL_BDMA_ERR: + case NON_FATAL_THERM_OVERTEMP_ERR: + /*Clear the register*/ + pm8001_cw32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0, + 0x00000000); + break; + default: + break; + } + } + } + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + spin_lock_irqsave(&circularQ->oq_lock, circularQ->lock_flags); + do { + /* spurious interrupt during setup if kexec-ing and + * driver doing a doorbell access w/ the pre-kexec oq + * interrupt setup. + */ + if (!circularQ->pi_virt) + break; + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, circularQ, + (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&circularQ->oq_lock, circularQ->lock_flags); + return ret; +} + +/* DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ + [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ + [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ + [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ +}; + +static void build_smp_cmd(u32 deviceID, __le32 hTag, + struct smp_req *psmp_cmd, int mode, int length) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + if (mode == SMP_DIRECT) { + length = length - 4; /* subtract crc */ + psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); + } else { + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); + } +} + +/** + * pm80xx_chip_smp_req - send an SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp, *smp_req; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + u32 i, length; + u8 *payload; + u8 *to; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + + length = sg_req->length; + pm8001_dbg(pm8001_ha, IO, "SMP Frame Length %d\n", sg_req->length); + if (!(length - 8)) + pm8001_ha->smp_exp_mode = SMP_DIRECT; + else + pm8001_ha->smp_exp_mode = SMP_INDIRECT; + + + smp_req = &task->smp_task.smp_req; + to = kmap_atomic(sg_page(smp_req)); + payload = to + smp_req->offset; + + /* INDIRECT MODE command settings. Use DMA */ + if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { + pm8001_dbg(pm8001_ha, IO, "SMP REQUEST INDIRECT MODE\n"); + /* for SPCv indirect mode. Place the top 4 bytes of + * SMP Request header here. */ + for (i = 0; i < 4; i++) + smp_cmd.smp_req16[i] = *(payload + i); + /* exclude top 4 bytes for SMP req header */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req) + 4); + /* exclude 4 bytes for SMP req header and CRC */ + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len + (&task->smp_task.smp_resp)-4); + } else { /* DIRECT MODE */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32 + ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + } + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n"); + for (i = 0; i < length; i++) + if (i < 16) { + smp_cmd.smp_req16[i] = *(payload + i); + pm8001_dbg(pm8001_ha, IO, + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req16[i], + *(payload)); + } else { + smp_cmd.smp_req[i] = *(payload + i); + pm8001_dbg(pm8001_ha, IO, + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req[i], + *(payload)); + } + } + kunmap_atomic(to); + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, + &smp_cmd, pm8001_ha->smp_exp_mode, length); + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &smp_cmd, + sizeof(smp_cmd), 0); + if (rc) + goto err_out_2; + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + +static int check_enc_sas_cmd(struct sas_task *task) +{ + u8 cmd = task->ssp_task.cmd->cmnd[0]; + + if (cmd == READ_10 || cmd == WRITE_10 || cmd == WRITE_VERIFY) + return 1; + else + return 0; +} + +static int check_enc_sat_cmd(struct sas_task *task) +{ + int ret = 0; + switch (task->ata_task.fis.command) { + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +static u32 pm80xx_chip_get_q_index(struct sas_task *task) +{ + struct request *rq = sas_task_find_rq(task); + + if (!rq) + return 0; + + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(rq)); +} + +/** + * pm80xx_chip_ssp_io_req - send an SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + u64 phys_addr, end_addr; + u32 end_addr_high, end_addr_low; + u32 q_index; + u32 opc = OPC_INB_SSPINIIOSTART; + + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + + /* data address domain added for spcv; set to 0 by host, + * used internally by controller + * 0 for SAS 1.1 and SAS 2.0 compatible TLR + */ + ssp_cmd.dad_dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + q_index = pm80xx_chip_get_q_index(task); + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { + pm8001_dbg(pm8001_ha, IO, + "Encryption enabled.Sending Encrypt SAS command 0x%x\n", + task->ssp_task.cmd->cmnd[0]); + opc = OPC_INB_SSP_INI_DIF_ENC_IO; + /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ + ssp_cmd.dad_dir_m_tlr = cpu_to_le32 + ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + + if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(ssp_cmd.enc_len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1U<<31); + } + } else if (task->num_scatter == 0) { + ssp_cmd.enc_addr_low = 0; + ssp_cmd.enc_addr_high = 0; + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } + + /* XTS mode. All other fields are 0 */ + ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4); + + /* set tweak values. Should be the start lba */ + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | + (task->ssp_task.cmd->cmnd[3] << 16) | + (task->ssp_task.cmd->cmnd[4] << 8) | + (task->ssp_task.cmd->cmnd[5])); + } else { + pm8001_dbg(pm8001_ha, IO, + "Sending Normal SAS command 0x%x inb q %x\n", + task->ssp_task.cmd->cmnd[0], q_index); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(ssp_cmd.len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + } + + return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &ssp_cmd, + sizeof(ssp_cmd), q_index); +} + +static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + struct ata_queued_cmd *qc = task->uldd_task; + u32 tag = ccb->ccb_tag, q_index; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr, end_addr; + u32 end_addr_high, end_addr_low; + u32 ATAP = 0x0; + u32 dir, retfis = 0; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + + q_index = pm80xx_chip_get_q_index(task); + + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { + ATAP = 0x04; /* no data*/ + pm8001_dbg(pm8001_ha, IO, "no data\n"); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + pm8001_dbg(pm8001_ha, IO, "DMA\n"); + } else { + ATAP = 0x05; /* PIO*/ + pm8001_dbg(pm8001_ha, IO, "PIO\n"); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + if (task->ata_task.return_fis_on_success) + retfis = 1; + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { + pm8001_dbg(pm8001_ha, IO, + "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", + sata_cmd.sata_fis.command); + opc = OPC_INB_SATA_DIF_ENC_IO; + /* set encryption bit; dad (bits 0-1) is 0 */ + sata_cmd.retfis_ncqtag_atap_dir_m_dad = + cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) | + ((ATAP & 0x3f) << 10) | 0x20 | dir); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + sata_cmd.enc_esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(sata_cmd.enc_len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + sata_cmd.enc_esgl = + cpu_to_le32(1 << 31); + } + } else if (task->num_scatter == 0) { + sata_cmd.enc_addr_low = 0; + sata_cmd.enc_addr_high = 0; + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4); + + /* set tweak values. Should be the start lba */ + sata_cmd.twk_val0 = + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | + (sata_cmd.sata_fis.lbah << 16) | + (sata_cmd.sata_fis.lbam << 8) | + (sata_cmd.sata_fis.lbal)); + sata_cmd.twk_val1 = + cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | + (sata_cmd.sata_fis.lbam_exp)); + } else { + pm8001_dbg(pm8001_ha, IO, + "Sending Normal SATA command 0x%x inb %x\n", + sata_cmd.sata_fis.command, q_index); + /* dad (bits 0-1) is 0 */ + sata_cmd.retfis_ncqtag_atap_dir_m_dad = + cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) | + ((ATAP & 0x3f) << 10) | dir); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1U << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != sata_cmd.addr_high) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(sata_cmd.len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1U << 31); + } + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + + /* scsi cdb */ + sata_cmd.atapi_scsi_cdb[0] = + cpu_to_le32(((task->ata_task.atapi_packet[0]) | + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); + sata_cmd.atapi_scsi_cdb[1] = + cpu_to_le32(((task->ata_task.atapi_packet[4]) | + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); + sata_cmd.atapi_scsi_cdb[2] = + cpu_to_le32(((task->ata_task.atapi_packet[8]) | + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); + sata_cmd.atapi_scsi_cdb[3] = + cpu_to_le32(((task->ata_task.atapi_packet[12]) | + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); + } + + trace_pm80xx_request_issue(pm8001_ha->id, + ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS, + ccb->ccb_tag, opc, + qc ? qc->tf.command : 0, // ata opcode + ccb->device ? atomic_read(&ccb->device->running_req) : 0); + return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &sata_cmd, + sizeof(sata_cmd), q_index); +} + +/** + * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + + pm8001_dbg(pm8001_ha, INIT, "PHY START REQ for phy_id %d\n", phy_id); + + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | pm8001_ha->link_rate | phy_id); + /* SSC Disable and SAS Analog ST configuration */ + /* + payload.ase_sh_lm_slr_phyid = + cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | + phy_id); + Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need + */ + + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/** + * pm80xx_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/* + * see comments on pm8001_mpi_reg_resp. + */ +static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + u32 linkrate, phy_id; + int rc; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + struct pm8001_port *port = dev->port->lldd_port; + + memset(&payload, 0, sizeof(payload)); + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + + if (flag == 1) { + stp_sspsmp_sata = 0x02; /*direct attached sata */ + } else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + dev_is_expander(pm8001_dev->dev_type)) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + + opc = OPC_INB_REG_DEV; + + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + + payload.phyid_portid = + cpu_to_le32(((port->port_id) & 0xFF) | + ((phy_id & 0xFF) << 8)); + + payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) << 24) | + ((stp_sspsmp_sata & 0x03) << 28)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + pm8001_dbg(pm8001_ha, INIT, + "register device req phy_id 0x%x port_id 0x%x\n", phy_id, + (port->port_id & 0xFF)); + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +/** + * pm80xx_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @phyId: the phy id which we wanted to operate + * @phy_op: phy operation to request + */ +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + u32 tag; + int rc; + struct local_phy_ctl_req payload; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; +} + +static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) +{ +#ifdef PM8001_USE_MSIX + return 1; +#else + u32 value; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; +#endif +} + +/** + * pm80xx_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @vec: irq number. + */ +static irqreturn_t +pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm80xx_chip_interrupt_disable(pm8001_ha, vec); + pm8001_dbg(pm8001_ha, DEVIO, + "irq vec %d, ODMR:0x%x\n", + vec, pm8001_cr32(pm8001_ha, 0, 0x30)); + process_oq(pm8001_ha, vec); + pm80xx_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, + u32 operation, u32 phyid, + u32 length, u32 *buf) +{ + u32 tag, i, j = 0; + int rc; + struct set_phy_profile_req payload; + u32 opc = OPC_INB_SET_PHY_PROFILE; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, "Invalid tag\n"); + return; + } + + payload.tag = cpu_to_le32(tag); + payload.ppc_phyid = + cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF)); + pm8001_dbg(pm8001_ha, DISC, + " phy profile command for phy %x ,length is %d\n", + le32_to_cpu(payload.ppc_phyid), length); + for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { + payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); + j++; + } + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); +} + +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf) +{ + u32 i; + + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + mpi_set_phy_profile_req(pm8001_ha, + SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); + length = length + PHY_DWORD_LENGTH; + } + pm8001_dbg(pm8001_ha, INIT, "phy settings completed\n"); +} + +void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha, + u32 phy, u32 length, u32 *buf) +{ + u32 tag, opc; + int rc, i; + struct set_phy_profile_req payload; + + memset(&payload, 0, sizeof(payload)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) { + pm8001_dbg(pm8001_ha, INIT, "Invalid tag\n"); + return; + } + + opc = OPC_INB_SET_PHY_PROFILE; + + payload.tag = cpu_to_le32(tag); + payload.ppc_phyid = + cpu_to_le32(((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8) + | (phy & 0xFF)); + + for (i = 0; i < length; i++) + payload.reserved[i] = cpu_to_le32(*(buf + i)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + pm8001_dbg(pm8001_ha, INIT, "PHY %d settings applied\n", phy); +} +const struct pm8001_dispatch pm8001_80xx_dispatch = { + .name = "pmc80xx", + .chip_init = pm80xx_chip_init, + .chip_post_init = pm80xx_chip_post_init, + .chip_soft_rst = pm80xx_chip_soft_rst, + .chip_rst = pm80xx_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm80xx_chip_isr, + .is_our_interrupt = pm80xx_chip_is_our_interrupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm80xx_chip_interrupt_enable, + .interrupt_disable = pm80xx_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm80xx_chip_smp_req, + .ssp_io_req = pm80xx_chip_ssp_io_req, + .sata_req = pm80xx_chip_sata_req, + .phy_start_req = pm80xx_chip_phy_start_req, + .phy_stop_req = pm80xx_chip_phy_stop_req, + .reg_dev_req = pm80xx_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm80xx_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, + .fatal_errors = pm80xx_fatal_errors, + .hw_event_ack_req = pm80xx_hw_event_ack_req, +}; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h new file mode 100644 index 000000000..eb8fd37b2 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -0,0 +1,1665 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include +#include + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +/* 0x8 RESV IN SPCv */ +#define OPC_INB_RSVD 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +/* 0xC, 0xD, 0xE removed in SPCv */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* 0x13 SMP_RESPONSE is removed in SPCv */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +/* 0x16 RESV IN SPCv */ +#define OPC_INB_RSVD1 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +/* 0x1A RESV IN SPCv */ +#define OPC_INB_RSVD2 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +/* 0x25 RESV IN SPCv */ +#define OPC_INB_RSVD3 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +/* 0x2D RESV IN SPCv */ +#define OPC_INB_RSVD4 45 /* 0x02D */ +#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */ +#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */ +#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */ +#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */ +#define OPC_INB_REG_DEV 50 /* 0x032 */ +#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */ +#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */ +#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */ +#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */ +#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */ +#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */ +#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */ +#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */ +#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_RSVD 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_RSVD1 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_RSVD2 16 /* 0x010 */ +/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_RSVD3 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_RSVD4 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_RSVD5 41 /* 0x029 */ +#define OPC_OUB_HW_EVENT 1792 /* 0x700 */ +#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */ +#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */ +#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */ +#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */ +#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */ +#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */ +#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */ +/* spcv specific commands */ +#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */ +#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */ +#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */ +#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */ +#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */ +#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */ +#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */ +#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */ +#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */ +#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */ + +/* for phy start*/ +#define SSC_DISABLE_15 (0x01 << 16) +#define SSC_DISABLE_30 (0x02 << 16) +#define SSC_DISABLE_60 (0x04 << 16) +#define SAS_ASE (0x01 << 15) +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x04 << 8) +#define LINKRATE_120 (0x08 << 8) + +/*phy_stop*/ +#define PHY_STOP_SUCCESS 0x00 +#define PHY_STOP_ERR_DEVICE_ATTACHED 0x1046 + +/* phy_profile */ +#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04 +#define PHY_DWORD_LENGTH 0xC + +/* Thermal related */ +#define THERMAL_ENABLE 0x1 +#define THERMAL_LOG_ENABLE 0x1 +#define THERMAL_PAGE_CODE_7H 0x6 +#define THERMAL_PAGE_CODE_8H 0x7 +#define LTEMPHIL 70 +#define RTEMPHIL 100 + +/* Encryption info */ +#define SCRATCH_PAD3_ENC_DISABLED 0x00000000 +#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001 +#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002 +#define SCRATCH_PAD3_ENC_READY 0x00000003 +#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY + +#define SCRATCH_PAD3_XTS_ENABLED (1 << 14) +#define SCRATCH_PAD3_SMA_ENABLED (1 << 4) +#define SCRATCH_PAD3_SMB_ENABLED (1 << 5) +#define SCRATCH_PAD3_SMF_ENABLED 0 +#define SCRATCH_PAD3_SM_MASK 0x000000F0 +#define SCRATCH_PAD3_ERR_CODE 0x00FF0000 + +#define SEC_MODE_SMF 0x0 +#define SEC_MODE_SMA 0x100 +#define SEC_MODE_SMB 0x200 +#define CIPHER_MODE_ECB 0x00000001 +#define CIPHER_MODE_XTS 0x00000002 +#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4 + +/* SAS protocol timer configuration page */ +#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04 +#define STP_MCT_TMO 32 +#define SSP_MCT_TMO 32 +#define SAS_MAX_OPEN_TIME 5 +#define SMP_MAX_CONN_TIMER 0xFF +#define STP_FRM_TIMER 0 +#define STP_IDLE_TIME 5 /* 5 us; controller default */ +#define SAS_MFD 0 +#define SAS_OPNRJT_RTRY_INTVL 2 +#define SAS_DOPNRJT_RTRY_TMO 128 +#define SAS_COPNRJT_RTRY_TMO 128 + +#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 50) /* 30 sec */ +#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 50) /* 15 sec */ + +/* + Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. + Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 + is DOPNRJT_RTRY_TMO +*/ +#define SAS_DOPNRJT_RTRY_THR 23438 +#define SAS_COPNRJT_RTRY_THR 23438 +#define SAS_MAX_AIP 0x200000 +#define IT_NEXUS_TIMEOUT 0x7D0 +#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) +/* Port recovery timeout, 10000 ms for PM8006 controller */ +#define CHIP_8006_PORT_RECOVERY_TIMEOUT 0x640000 + +#ifdef __LITTLE_ENDIAN_BITFIELD +struct sas_identify_frame_local { + /* Byte 0 */ + u8 frame_type:4; + u8 dev_type:3; + u8 _un0:1; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un20:1; + u8 smp_iport:1; + u8 stp_iport:1; + u8 ssp_iport:1; + u8 _un247:4; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un30:1; + u8 smp_tport:1; + u8 stp_tport:1; + u8 ssp_tport:1; + u8 _un347:4; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; + +} __packed; + +#elif defined(__BIG_ENDIAN_BITFIELD) +struct sas_identify_frame_local { + /* Byte 0 */ + u8 _un0:1; + u8 dev_type:3; + u8 frame_type:4; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un247:4; + u8 ssp_iport:1; + u8 stp_iport:1; + u8 smp_iport:1; + u8 _un20:1; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un347:4; + u8 ssp_tport:1; + u8 stp_tport:1; + u8 smp_tport:1; + u8 _un30:1; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; +} __packed; +#else +#error "Bitfield order not defined!" +#endif + +struct mpi_msg_hdr { + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (128 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame_local sas_identify; /* 28 Bytes */ + __le32 spasti; + u32 reserved[21]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (128 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +/* updated outbound struct for spcv */ + +struct hw_event_resp { + __le32 lr_status_evt_portid; + __le32 evt_param; + __le32 phyid_npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure for thermal event notification + */ + +struct thermal_hw_event { + __le32 thermal_event; + __le32 rht_lht; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_mcn_ir_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ + struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ +struct hw_event_ack_req { + __le32 tag; + __le32 phyid_sea_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_START Response Command + * indicates the completion of PHY_START command (64 bytes) + */ +struct phy_start_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_STOP Response Command + * indicates the completion of PHY_STOP command (64 bytes) + */ +struct phy_stop_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET response + * use to indicate a SATA Completion (64 bytes) + */ +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved; + __le32 event_param0; + __le32 event_param1; + __le32 sata_addr_h32; + __le32 sata_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + __le32 ssp_tag; + __le32 event_param0; + __le32 event_param1; + __le32 sas_addr_h32; + __le32 sas_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; + __le32 rsvd[16]; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u8 _r_a[252]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[27]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 retfis_ncqtag_atap_dir_m_dad; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; /* dword 11. rsvd for normal I/O. */ + /* EPLE Descl for enc I/O */ + u32 addr_low; /* dword 12. rsvd for enc I/O */ + u32 addr_high; /* dword 13. reserved for enc I/O */ + __le32 len; /* dword 14: length for normal I/O. */ + /* EPLE Desch for enc I/O */ + __le32 esgl; /* dword 15. rsvd for enc I/O */ + __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */ + /* The below fields are reserved for normal I/O */ + __le32 key_index_mode; /* dword 20 */ + __le32 sector_cnt_enss;/* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28. Encryption SGL address high */ + __le32 enc_addr_high; /* dword 29. Encryption SGL address low */ + __le32 enc_len; /* dword 30. Encryption length */ + __le32 enc_esgl; /* dword 31. Encryption esgl bit */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dad_dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; /* dword 12: sgl low for normal I/O. */ + /* epl_descl for encryption I/O */ + __le32 addr_high; /* dword 13: sgl hi for normal I/O */ + /* dpl_descl for encryption I/O */ + __le32 len; /* dword 14: len for normal I/O. */ + /* edpl_desch for encryption I/O */ + __le32 esgl; /* dword 15: ESGL bit for normal I/O. */ + /* user defined tag mask for enc I/O */ + /* The below fields are reserved for normal I/O */ + u8 udt[12]; /* dword 16-18 */ + __le32 sectcnt_ios; /* dword 19 */ + __le32 key_cmode; /* dword 20 */ + __le32 ks_enss; /* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */ + __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */ + __le32 enc_len; /* dword 30: Encryption length */ + __le32 enc_esgl; /* dword 31: ESGL bit for encryption */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND + * use to initiate SSP I/O operation with optional DIF/ENC + */ +struct ssp_dif_enc_io_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dirMTlr; + __le32 sspiu0; + __le32 sspiu1; + __le32 sspiu2; + __le32 sspiu3; + __le32 sspiu4; + __le32 sspiu5; + __le32 sspiu6; + __le32 epl_des; + __le32 dpl_desl_ndplr; + __le32 dpl_desh; + __le32 uum_uuv_bss_difbits; + u8 udt[12]; + __le32 sectcnt_ios; + __le32 key_cmode; + __le32 ks_enss; + __le32 keytagl; + __le32 keytagh; + __le32 twk_val0; + __le32 twk_val1; + __le32 twk_val2; + __le32 twk_val3; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ + struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_req { + __le32 tag; + __le32 cfg_pg[14]; + u32 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET CONTROLLER CONFIG COMMAND + * use to get controller configuration page + */ +struct get_ctrl_cfg_req { + __le32 tag; + __le32 pgcd; + __le32 int_vec; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for KEK_MANAGEMENT COMMAND + * use for KEK management + */ +struct kek_mgmt_req { + __le32 tag; + __le32 new_curidx_ksop; + u32 reserved; + __le32 kblob[12]; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for DEK_MANAGEMENT COMMAND + * use for DEK management + */ +struct dek_mgmt_req { + __le32 tag; + __le32 kidx_dsop; + __le32 dekidx; + __le32 addr_l; + __le32 addr_h; + __le32 nent; + __le32 dbf_tblsize; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct set_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct get_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 profile[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for EXT FLASH PARTITION + * use to manage ext flash partition + */ +struct ext_flash_partition_req { + __le32 tag; + __le32 cmd; + __le32 offset; + __le32 len; + u32 reserved[7]; + __le32 addr_low; + __le32 addr_high; + __le32 len1; + __le32 ext; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - begins */ +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr_pgcd; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +struct get_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr; + __le32 confg_page[12]; +} __attribute__((packed, aligned(4))); + +struct kek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kidx_new_curr_ksop; + __le32 err_qlfr; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +struct dek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kekidx_tbls_dsop; + __le32 dekidx; + __le32 err_qlfr; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct get_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct flash_op_ext_resp { + __le32 tag; + __le32 cmd; + __le32 status; + __le32 epart_size; + __le32 epart_sect_size; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct set_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct ssp_coalesced_comp_resp { + __le32 coal_cnt; + __le32 tag0; + __le32 ssp_tag0; + __le32 tag1; + __le32 ssp_tag1; + __le32 add_tag_ssp_tag[10]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - ends */ + +/* brief data structure for SAS protocol timer configuration page. + * + */ +struct SASProtocolTimerConfig { + __le32 pageCode; /* 0 */ + __le32 MST_MSI; /* 1 */ + __le32 STP_SSP_MCT_TMO; /* 2 */ + __le32 STP_FRM_TMO; /* 3 */ + __le32 STP_IDLE_TMO; /* 4 */ + __le32 OPNRJT_RTRY_INTVL; /* 5 */ + __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */ + __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */ + __le32 MAX_AIP; /* 8 */ +} __attribute__((packed, aligned(4))); + +typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_3RD_PARTY_RESET 0x07 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A + +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +/* This error code 0x18 is not used on SPCv */ +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 + +/* This error code 0x22 is not used on SPCv */ +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +/* This error code 0x25 is not used on SPCv */ +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 + +/* The following error code 0x31 and 0x32 are not using (obsolete) */ +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/********** additional response event values *****************/ + +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43 +#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48 +#define IO_DS_INVALID 0x49 +#define IO_FATAL_ERROR 0x51 +/* WARNING: the value is not contiguous from here */ +#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52 +#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53 +#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 +#define MPI_IO_RQE_BUSY_FULL 0x55 +#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 +#define IO_XFER_ERROR_INVALID_SSP_RSP_FRAME 0x57 +#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 + +#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 +#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024 + +#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040 +/* + * An encryption IO request failed due to DEK Key Tag mismatch. + * The key tag supplied in the encryption IOMB does not match with + * the Key Tag in the referenced DEK Entry. + */ +#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041 +#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042 +/* + * An encryption I/O request failed because the initial value (IV) + * in the unwrapped DEK blob didn't match the IV used to unwrap it. + */ +#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_INTERNAL_RAM 0x2045 +/* + * An encryption I/O request failed + * because the DEK index specified in the I/O was outside the bounds of + * the total number of entries in the host DEK table. + */ +#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046 + +/* define DIF IO response error status code */ +#define IO_XFR_ERROR_DIF_MISMATCH 0x3000 +#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001 +#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002 +#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003 + +/* define operator management response status and error qualifier code */ +#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060 +#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063 +#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064 +#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022 +#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023 +/***************** additional response event values ***************/ + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x2023 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x001 +#define SPCv_MSGU_CFG_TABLE_RESET 0x002 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x004 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008 +#define MSGU_IBDB_SET 0x00 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20 + +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x20 +#define MSGU_ODCR 0x28 + +#define MSGU_ODMR 0x30 +#define MSGU_ODMR_U 0x34 +#define MSGU_ODMR_CLR 0x38 +#define MSGU_ODMR_CLR_U 0x3C +#define MSGU_OD_RSVD 0x40 + +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_SCRATCH_PAD_RSVD_0 0x6C +#define MSGU_SCRATCH_PAD_RSVD_1 0x70 + +#define MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) ((x & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) (((x >> 2) & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) ((((x >> 4) & 0x7) == 0x7) || \ + (((x >> 4) & 0x7) == 0x4)) +#define MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) (((x >> 10) & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x) (((x >> 12) & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(x) \ + (MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x)) + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0 /* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \ + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD_RAAE_READY 0x3 +#define SCRATCH_PAD_ILA_READY 0xC +#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 +#define SCRATCH_PAD_IOP0_READY 0xC00 +#define SCRATCH_PAD_IOP1_READY 0x3000 +#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \ + SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ + SCRATCH_PAD_RAAE_READY) +#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ + SCRATCH_PAD_RAAE_READY) + +/* boot loader state */ +#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ +#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */ +#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */ +#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/*state definition for Scratchpad Rsvd 0, Offset 0x6C, Non-fatal*/ +#define NON_FATAL_SPBC_LBUS_ECC_ERR 0x70000001 +#define NON_FATAL_BDMA_ERR 0xE0000001 +#define NON_FATAL_THERM_OVERTEMP_ERR 0x80000001 + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */ + +/* 0x28 - 0x4C - RSVD */ +#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */ +#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */ +#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */ +#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */ +#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */ +#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */ + +#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */ +#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ +#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ +#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ +#define MAIN_MPI_ILA_RELEASE_TYPE 0xA4 /* DWORD 0x29 */ +#define MAIN_MPI_INACTIVE_FW_VERSION 0XB0 /* DWORD 0x2C */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +/* 0x14 - 0x34 - RSVD */ +#define GST_GPIO_INPUT_VAL 0x38 +/* 0x3c - 0x40 - RSVD */ +#define GST_RERRINFO_OFFSET0 0x44 +#define GST_RERRINFO_OFFSET1 0x48 +#define GST_RERRINFO_OFFSET2 0x4c +#define GST_RERRINFO_OFFSET3 0x50 +#define GST_RERRINFO_OFFSET4 0x54 +#define GST_RERRINFO_OFFSET5 0x58 +#define GST_RERRINFO_OFFSET6 0x5c +#define GST_RERRINFO_OFFSET7 0x60 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +/* Per SAS PHY Attributes */ + +#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */ +#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */ +#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */ +#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */ +#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */ +#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */ +#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */ +#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */ +#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */ +#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */ +#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */ +#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */ +#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */ +#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */ +#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */ +#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */ +#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */ +#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */ +#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */ +#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */ +#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */ +#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */ +#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */ +#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */ +#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */ +#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */ +#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */ +#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */ +#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */ +#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */ +#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */ +#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */ +/* end PSPA */ + +/* inbound queue configuration offset - byte offset */ +#define IB_PROPERITY_OFFSET 0x00 +#define IB_BASE_ADDR_HI_OFFSET 0x04 +#define IB_BASE_ADDR_LO_OFFSET 0x08 +#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C +#define IB_CI_BASE_ADDR_LO_OFFSET 0x10 +#define IB_PIPCI_BAR 0x14 +#define IB_PIPCI_BAR_OFFSET 0x18 +#define IB_RESERVED_OFFSET 0x1C + +/* outbound queue configuration offset - byte offset */ +#define OB_PROPERITY_OFFSET 0x00 +#define OB_BASE_ADDR_HI_OFFSET 0x04 +#define OB_BASE_ADDR_LO_OFFSET 0x08 +#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C +#define OB_PI_BASE_ADDR_LO_OFFSET 0x10 +#define OB_CIPCI_BAR 0x14 +#define OB_CIPCI_BAR_OFFSET 0x18 +#define OB_INTERRUPT_COALES_OFFSET 0x1C +#define OB_DYNAMIC_COALES_OFFSET 0x20 +#define OB_PROPERTY_INT_ENABLE 0x40000000 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C + +/* SPCV soft reset */ +#define SPC_REG_SOFT_RESET 0x00001000 +#define SPCv_NORMAL_RESET_VALUE 0x1 + +#define SPCv_SOFT_RESET_READ_MASK 0xC0 +#define SPCv_SOFT_RESET_NO_RESET 0x0 +#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40 +#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80 +#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0 + +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit definition for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define HDA_GSM_CMD_OFFSET_BITS 0x42C0 +#define HDA_GSM_RSP_OFFSET_BITS 0x42E0 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + + +#define MEMBASE_II_SHIFT_REGISTER 0x1010 +#endif + +/** + * As we know sleep (1~20) ms may result in sleep longer than ~20 ms, hence we + * choose 20 ms interval. + */ +#define FW_READY_INTERVAL 20 diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.c b/drivers/scsi/pm8001/pm80xx_tracepoints.c new file mode 100644 index 000000000..344aface9 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_tracepoints.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Trace events in pm8001 driver. + * + * Copyright 2020 Google LLC + * Author: Akshat Jain + */ + +#define CREATE_TRACE_POINTS +#include "pm80xx_tracepoints.h" diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.h b/drivers/scsi/pm8001/pm80xx_tracepoints.h new file mode 100644 index 000000000..5e669a8a9 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_tracepoints.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace events in pm8001 driver. + * + * Copyright 2020 Google LLC + * Author: Akshat Jain + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pm80xx + +#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PM80XX_H + +#include +#include "pm8001_sas.h" + +TRACE_EVENT(pm80xx_request_issue, + TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode, + u16 ata_opcode, int running_req), + + TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, phy_id) + __field(u32, htag) + __field(u32, ctlr_opcode) + __field(u16, ata_opcode) + __field(int, running_req) + ), + + TP_fast_assign( + __entry->id = id; + __entry->phy_id = phy_id; + __entry->htag = htag; + __entry->ctlr_opcode = ctlr_opcode; + __entry->ata_opcode = ata_opcode; + __entry->running_req = running_req; + ), + + TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d", + __entry->id, __entry->phy_id, __entry->htag, + __entry->ctlr_opcode, __entry->ata_opcode, + __entry->running_req) +); + +TRACE_EVENT(pm80xx_request_complete, + TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode, + u16 ata_opcode, int running_req), + + TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, phy_id) + __field(u32, htag) + __field(u32, ctlr_opcode) + __field(u16, ata_opcode) + __field(int, running_req) + ), + + TP_fast_assign( + __entry->id = id; + __entry->phy_id = phy_id; + __entry->htag = htag; + __entry->ctlr_opcode = ctlr_opcode; + __entry->ata_opcode = ata_opcode; + __entry->running_req = running_req; + ), + + TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d", + __entry->id, __entry->phy_id, __entry->htag, + __entry->ctlr_opcode, __entry->ata_opcode, + __entry->running_req) +); + +TRACE_EVENT(pm80xx_mpi_build_cmd, + TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci), + + TP_ARGS(id, opc, htag, qi, pi, ci), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, opc) + __field(u32, htag) + __field(u32, qi) + __field(u32, pi) + __field(u32, ci) + ), + + TP_fast_assign( + __entry->id = id; + __entry->opc = opc; + __entry->htag = htag; + __entry->qi = qi; + __entry->pi = pi; + __entry->ci = ci; + ), + + TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u", + __entry->id, __entry->opc, __entry->htag, __entry->qi, + __entry->pi, __entry->ci) +); + +#endif /* _TRACE_PM80XX_H_ */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE pm80xx_tracepoints + +#include diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c new file mode 100644 index 000000000..50dc30051 --- /dev/null +++ b/drivers/scsi/pmcraid.c @@ -0,0 +1,5394 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters + * + * Written By: Anil Ravindranath + * PMC-Sierra Inc + * + * Copyright (C) 2008, 2009 PMC Sierra Inc + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pmcraid.h" + +/* + * Module configuration parameters + */ +static unsigned int pmcraid_debug_log; +static unsigned int pmcraid_disable_aen; +static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST; +static unsigned int pmcraid_enable_msix; + +/* + * Data structures to support multiple adapters by the LLD. + * pmcraid_adapter_count - count of configured adapters + */ +static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0); + +/* + * Supporting user-level control interface through IOCTL commands. + * pmcraid_major - major number to use + * pmcraid_minor - minor number(s) to use + */ +static unsigned int pmcraid_major; +static struct class *pmcraid_class; +static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS); + +/* + * Module parameters + */ +MODULE_AUTHOR("Anil Ravindranath"); +MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PMCRAID_DRIVER_VERSION); + +module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR)); +MODULE_PARM_DESC(log_level, + "Enables firmware error code logging, default :1 high-severity" + " errors, 2: all errors including high-severity errors," + " 0: disables logging"); + +module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR)); +MODULE_PARM_DESC(debug, + "Enable driver verbose message logging. Set 1 to enable." + "(default: 0)"); + +module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR)); +MODULE_PARM_DESC(disable_aen, + "Disable driver aen notifications to apps. Set 1 to disable." + "(default: 0)"); + +/* chip specific constants for PMC MaxRAID controllers (same for + * 0x5220 and 0x8010 + */ +static struct pmcraid_chip_details pmcraid_chip_cfg[] = { + { + .ioastatus = 0x0, + .ioarrin = 0x00040, + .mailbox = 0x7FC30, + .global_intr_mask = 0x00034, + .ioa_host_intr = 0x0009C, + .ioa_host_intr_clr = 0x000A0, + .ioa_host_msix_intr = 0x7FC40, + .ioa_host_mask = 0x7FC28, + .ioa_host_mask_clr = 0x7FC28, + .host_ioa_intr = 0x00020, + .host_ioa_intr_clr = 0x00020, + .transop_timeout = 300 + } +}; + +/* + * PCI device ids supported by pmcraid driver + */ +static struct pci_device_id pmcraid_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID), + 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0] + }, + {} +}; + +MODULE_DEVICE_TABLE(pci, pmcraid_pci_table); + + + +/** + * pmcraid_slave_alloc - Prepare for commands to a device + * @scsi_dev: scsi device struct + * + * This function is called by mid-layer prior to sending any command to the new + * device. Stores resource entry details of the device in scsi_device struct. + * Queuecommand uses the resource handle and other details to fill up IOARCB + * while sending commands to the device. + * + * Return value: + * 0 on success / -ENXIO if device does not exist + */ +static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) +{ + struct pmcraid_resource_entry *temp, *res = NULL; + struct pmcraid_instance *pinstance; + u8 target, bus, lun; + unsigned long lock_flags; + int rc = -ENXIO; + u16 fw_version; + + pinstance = shost_priv(scsi_dev->host); + + fw_version = be16_to_cpu(pinstance->inq_data->fw_version); + + /* Driver exposes VSET and GSCSI resources only; all other device types + * are not exposed. Resource list is synchronized using resource lock + * so any traversal or modifications to the list should be done inside + * this lock + */ + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + list_for_each_entry(temp, &pinstance->used_res_q, queue) { + + /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */ + if (RES_IS_VSET(temp->cfg_entry)) { + if (fw_version <= PMCRAID_FW_VERSION_1) + target = temp->cfg_entry.unique_flags1; + else + target = le16_to_cpu(temp->cfg_entry.array_id) & 0xFF; + + if (target > PMCRAID_MAX_VSET_TARGETS) + continue; + bus = PMCRAID_VSET_BUS_ID; + lun = 0; + } else if (RES_IS_GSCSI(temp->cfg_entry)) { + target = RES_TARGET(temp->cfg_entry.resource_address); + bus = PMCRAID_PHYS_BUS_ID; + lun = RES_LUN(temp->cfg_entry.resource_address); + } else { + continue; + } + + if (bus == scsi_dev->channel && + target == scsi_dev->id && + lun == scsi_dev->lun) { + res = temp; + break; + } + } + + if (res) { + res->scsi_dev = scsi_dev; + scsi_dev->hostdata = res; + res->change_detected = 0; + atomic_set(&res->read_failures, 0); + atomic_set(&res->write_failures, 0); + rc = 0; + } + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); + return rc; +} + +/** + * pmcraid_slave_configure - Configures a SCSI device + * @scsi_dev: scsi device struct + * + * This function is executed by SCSI mid layer just after a device is first + * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the + * timeout value (default 30s) will be over-written to a higher value (60s) + * and max_sectors value will be over-written to 512. It also sets queue depth + * to host->cmd_per_lun value + * + * Return value: + * 0 on success + */ +static int pmcraid_slave_configure(struct scsi_device *scsi_dev) +{ + struct pmcraid_resource_entry *res = scsi_dev->hostdata; + + if (!res) + return 0; + + /* LLD exposes VSETs and Enclosure devices only */ + if (RES_IS_GSCSI(res->cfg_entry) && + scsi_dev->type != TYPE_ENCLOSURE) + return -ENXIO; + + pmcraid_info("configuring %x:%x:%x:%x\n", + scsi_dev->host->unique_id, + scsi_dev->channel, + scsi_dev->id, + (u8)scsi_dev->lun); + + if (RES_IS_GSCSI(res->cfg_entry)) { + scsi_dev->allow_restart = 1; + } else if (RES_IS_VSET(res->cfg_entry)) { + scsi_dev->allow_restart = 1; + blk_queue_rq_timeout(scsi_dev->request_queue, + PMCRAID_VSET_IO_TIMEOUT); + blk_queue_max_hw_sectors(scsi_dev->request_queue, + PMCRAID_VSET_MAX_SECTORS); + } + + /* + * We never want to report TCQ support for these types of devices. + */ + if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry)) + scsi_dev->tagged_supported = 0; + + return 0; +} + +/** + * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it + * + * @scsi_dev: scsi device struct + * + * This is called by mid-layer before removing a device. Pointer assignments + * done in pmcraid_slave_alloc will be reset to NULL here. + * + * Return value + * none + */ +static void pmcraid_slave_destroy(struct scsi_device *scsi_dev) +{ + struct pmcraid_resource_entry *res; + + res = (struct pmcraid_resource_entry *)scsi_dev->hostdata; + + if (res) + res->scsi_dev = NULL; + + scsi_dev->hostdata = NULL; +} + +/** + * pmcraid_change_queue_depth - Change the device's queue depth + * @scsi_dev: scsi device struct + * @depth: depth to set + * + * Return value + * actual depth set + */ +static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth) +{ + if (depth > PMCRAID_MAX_CMD_PER_LUN) + depth = PMCRAID_MAX_CMD_PER_LUN; + return scsi_change_queue_depth(scsi_dev, depth); +} + +/** + * pmcraid_init_cmdblk - initializes a command block + * + * @cmd: pointer to struct pmcraid_cmd to be initialized + * @index: if >=0 first time initialization; otherwise reinitialization + * + * Return Value + * None + */ +static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index) +{ + struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb); + dma_addr_t dma_addr = cmd->ioa_cb_bus_addr; + + if (index >= 0) { + /* first time initialization (called from probe) */ + u32 ioasa_offset = + offsetof(struct pmcraid_control_block, ioasa); + + cmd->index = index; + ioarcb->response_handle = cpu_to_le32(index << 2); + ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr); + ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset); + ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa)); + } else { + /* re-initialization of various lengths, called once command is + * processed by IOA + */ + memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->hrrq_id = 0; + ioarcb->request_flags0 = 0; + ioarcb->request_flags1 = 0; + ioarcb->cmd_timeout = 0; + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL); + ioarcb->ioadl_bus_addr = 0; + ioarcb->ioadl_length = 0; + ioarcb->data_transfer_length = 0; + ioarcb->add_cmd_param_length = 0; + ioarcb->add_cmd_param_offset = 0; + cmd->ioa_cb->ioasa.ioasc = 0; + cmd->ioa_cb->ioasa.residual_data_length = 0; + cmd->time_left = 0; + } + + cmd->cmd_done = NULL; + cmd->scsi_cmd = NULL; + cmd->release = 0; + cmd->completion_req = 0; + cmd->sense_buffer = NULL; + cmd->sense_buffer_dma = 0; + cmd->dma_handle = 0; + timer_setup(&cmd->timer, NULL, 0); +} + +/** + * pmcraid_reinit_cmdblk - reinitialize a command block + * + * @cmd: pointer to struct pmcraid_cmd to be reinitialized + * + * Return Value + * None + */ +static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd) +{ + pmcraid_init_cmdblk(cmd, -1); +} + +/** + * pmcraid_get_free_cmd - get a free cmd block from command block pool + * @pinstance: adapter instance structure + * + * Return Value: + * returns pointer to cmd block or NULL if no blocks are available + */ +static struct pmcraid_cmd *pmcraid_get_free_cmd( + struct pmcraid_instance *pinstance +) +{ + struct pmcraid_cmd *cmd = NULL; + unsigned long lock_flags; + + /* free cmd block list is protected by free_pool_lock */ + spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags); + + if (!list_empty(&pinstance->free_cmd_pool)) { + cmd = list_entry(pinstance->free_cmd_pool.next, + struct pmcraid_cmd, free_list); + list_del(&cmd->free_list); + } + spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags); + + /* Initialize the command block before giving it the caller */ + if (cmd != NULL) + pmcraid_reinit_cmdblk(cmd); + return cmd; +} + +/** + * pmcraid_return_cmd - return a completed command block back into free pool + * @cmd: pointer to the command block + * + * Return Value: + * nothing + */ +static void pmcraid_return_cmd(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + + spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags); + list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool); + spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags); +} + +/** + * pmcraid_read_interrupts - reads IOA interrupts + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * interrupts read from IOA + */ +static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance) +{ + return (pinstance->interrupt_mode) ? + ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) : + ioread32(pinstance->int_regs.ioa_host_interrupt_reg); +} + +/** + * pmcraid_disable_interrupts - Masks and clears all specified interrupts + * + * @pinstance: pointer to per adapter instance structure + * @intrs: interrupts to disable + * + * Return Value + * None + */ +static void pmcraid_disable_interrupts( + struct pmcraid_instance *pinstance, + u32 intrs +) +{ + u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg); + u32 nmask = gmask | GLOBAL_INTERRUPT_MASK; + + iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg); + iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); + ioread32(pinstance->int_regs.global_interrupt_mask_reg); + + if (!pinstance->interrupt_mode) { + iowrite32(intrs, + pinstance->int_regs.ioa_host_interrupt_mask_reg); + ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); + } +} + +/** + * pmcraid_enable_interrupts - Enables specified interrupts + * + * @pinstance: pointer to per adapter instance structure + * @intrs: interrupts to enable + * + * Return Value + * None + */ +static void pmcraid_enable_interrupts( + struct pmcraid_instance *pinstance, + u32 intrs) +{ + u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg); + u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK); + + iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); + + if (!pinstance->interrupt_mode) { + iowrite32(~intrs, + pinstance->int_regs.ioa_host_interrupt_mask_reg); + ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); + } + + pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n", + ioread32(pinstance->int_regs.global_interrupt_mask_reg), + ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg)); +} + +/** + * pmcraid_clr_trans_op - clear trans to op interrupt + * + * @pinstance: pointer to per adapter instance structure + * + * Return Value + * None + */ +static void pmcraid_clr_trans_op( + struct pmcraid_instance *pinstance +) +{ + unsigned long lock_flags; + + if (!pinstance->interrupt_mode) { + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs.ioa_host_interrupt_mask_reg); + ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg); + } + + if (pinstance->reset_cmd != NULL) { + del_timer(&pinstance->reset_cmd->timer); + spin_lock_irqsave( + pinstance->host->host_lock, lock_flags); + pinstance->reset_cmd->cmd_done(pinstance->reset_cmd); + spin_unlock_irqrestore( + pinstance->host->host_lock, lock_flags); + } +} + +/** + * pmcraid_reset_type - Determine the required reset type + * @pinstance: pointer to adapter instance structure + * + * IOA requires hard reset if any of the following conditions is true. + * 1. If HRRQ valid interrupt is not masked + * 2. IOA reset alert doorbell is set + * 3. If there are any error interrupts + */ +static void pmcraid_reset_type(struct pmcraid_instance *pinstance) +{ + u32 mask; + u32 intrs; + u32 alerts; + + mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); + intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + + if ((mask & INTRS_HRRQ_VALID) == 0 || + (alerts & DOORBELL_IOA_RESET_ALERT) || + (intrs & PMCRAID_ERROR_INTERRUPTS)) { + pmcraid_info("IOA requires hard reset\n"); + pinstance->ioa_hard_reset = 1; + } + + /* If unit check is active, trigger the dump */ + if (intrs & INTRS_IOA_UNIT_CHECK) + pinstance->ioa_unit_check = 1; +} + +static void pmcraid_ioa_reset(struct pmcraid_cmd *); +/** + * pmcraid_bist_done - completion function for PCI BIST + * @t: pointer to reset command + * Return Value + * none + */ +static void pmcraid_bist_done(struct timer_list *t) +{ + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + int rc; + u16 pci_reg; + + rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg); + + /* If PCI config space can't be accessed wait for another two secs */ + if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) && + cmd->time_left > 0) { + pmcraid_info("BIST not complete, waiting another 2 secs\n"); + cmd->timer.expires = jiffies + cmd->time_left; + cmd->time_left = 0; + add_timer(&cmd->timer); + } else { + cmd->time_left = 0; + pmcraid_info("BIST is complete, proceeding with reset\n"); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_ioa_reset(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } +} + +/** + * pmcraid_start_bist - starts BIST + * @cmd: pointer to reset cmd + * Return Value + * none + */ +static void pmcraid_start_bist(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 doorbells, intrs; + + /* proceed with bist and wait for 2 seconds */ + iowrite32(DOORBELL_IOA_START_BIST, + pinstance->int_regs.host_ioa_interrupt_reg); + doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + pmcraid_info("doorbells after start bist: %x intrs: %x\n", + doorbells, intrs); + + cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); + cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); + cmd->timer.function = pmcraid_bist_done; + add_timer(&cmd->timer); +} + +/** + * pmcraid_reset_alert_done - completion routine for reset_alert + * @t: pointer to command block used in reset sequence + * Return value + * None + */ +static void pmcraid_reset_alert_done(struct timer_list *t) +{ + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 status = ioread32(pinstance->ioa_status); + unsigned long lock_flags; + + /* if the critical operation in progress bit is set or the wait times + * out, invoke reset engine to proceed with hard reset. If there is + * some more time to wait, restart the timer + */ + if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) || + cmd->time_left <= 0) { + pmcraid_info("critical op is reset proceeding with reset\n"); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_ioa_reset(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } else { + pmcraid_info("critical op is not yet reset waiting again\n"); + /* restart timer if some more time is available to wait */ + cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT; + cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; + cmd->timer.function = pmcraid_reset_alert_done; + add_timer(&cmd->timer); + } +} + +static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32); +/** + * pmcraid_reset_alert - alerts IOA for a possible reset + * @cmd: command block to be used for reset sequence. + * + * Return Value + * returns 0 if pci config-space is accessible and RESET_DOORBELL is + * successfully written to IOA. Returns non-zero in case pci_config_space + * is not accessible + */ +static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 doorbells; + int rc; + u16 pci_reg; + + /* If we are able to access IOA PCI config space, alert IOA that we are + * going to reset it soon. This enables IOA to preserv persistent error + * data if any. In case memory space is not accessible, proceed with + * BIST or slot_reset + */ + rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg); + if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) { + + /* wait for IOA permission i.e until CRITICAL_OPERATION bit is + * reset IOA doesn't generate any interrupts when CRITICAL + * OPERATION bit is reset. A timer is started to wait for this + * bit to be reset. + */ + cmd->time_left = PMCRAID_RESET_TIMEOUT; + cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; + cmd->timer.function = pmcraid_reset_alert_done; + add_timer(&cmd->timer); + + iowrite32(DOORBELL_IOA_RESET_ALERT, + pinstance->int_regs.host_ioa_interrupt_reg); + doorbells = + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + pmcraid_info("doorbells after reset alert: %x\n", doorbells); + } else { + pmcraid_info("PCI config is not accessible starting BIST\n"); + pinstance->ioa_state = IOA_STATE_IN_HARD_RESET; + pmcraid_start_bist(cmd); + } +} + +/** + * pmcraid_timeout_handler - Timeout handler for internally generated ops + * + * @t: pointer to command structure, that got timedout + * + * This function blocks host requests and initiates an adapter reset. + * + * Return value: + * None + */ +static void pmcraid_timeout_handler(struct timer_list *t) +{ + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + + dev_info(&pinstance->pdev->dev, + "Adapter being reset due to cmd(CDB[0] = %x) timeout\n", + cmd->ioa_cb->ioarcb.cdb[0]); + + /* Command timeouts result in hard reset sequence. The command that got + * timed out may be the one used as part of reset sequence. In this + * case restart reset sequence using the same command block even if + * reset is in progress. Otherwise fail this command and get a free + * command block to restart the reset sequence. + */ + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + if (!pinstance->ioa_reset_in_progress) { + pinstance->ioa_reset_attempts = 0; + cmd = pmcraid_get_free_cmd(pinstance); + + /* If we are out of command blocks, just return here itself. + * Some other command's timeout handler can do the reset job + */ + if (cmd == NULL) { + spin_unlock_irqrestore(pinstance->host->host_lock, + lock_flags); + pmcraid_err("no free cmnd block for timeout handler\n"); + return; + } + + pinstance->reset_cmd = cmd; + pinstance->ioa_reset_in_progress = 1; + } else { + pmcraid_info("reset is already in progress\n"); + + if (pinstance->reset_cmd != cmd) { + /* This command should have been given to IOA, this + * command will be completed by fail_outstanding_cmds + * anyway + */ + pmcraid_err("cmd is pending but reset in progress\n"); + } + + /* If this command was being used as part of the reset + * sequence, set cmd_done pointer to pmcraid_ioa_reset. This + * causes fail_outstanding_commands not to return the command + * block back to free pool + */ + if (cmd == pinstance->reset_cmd) + cmd->cmd_done = pmcraid_ioa_reset; + } + + /* Notify apps of important IOA bringup/bringdown sequences */ + if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START && + pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START) + pmcraid_notify_ioastate(pinstance, + PMC_DEVICE_EVENT_RESET_START); + + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + scsi_block_requests(pinstance->host); + pmcraid_reset_alert(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); +} + +/** + * pmcraid_internal_done - completion routine for internally generated cmds + * + * @cmd: command that got response from IOA + * + * Return Value: + * none + */ +static void pmcraid_internal_done(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + /* Some of the internal commands are sent with callers blocking for the + * response. Same will be indicated as part of cmd->completion_req + * field. Response path needs to wake up any waiters waiting for cmd + * completion if this flag is set. + */ + if (cmd->completion_req) { + cmd->completion_req = 0; + complete(&cmd->wait_for_completion); + } + + /* most of the internal commands are completed by caller itself, so + * no need to return the command block back to free pool until we are + * required to do so (e.g once done with initialization). + */ + if (cmd->release) { + cmd->release = 0; + pmcraid_return_cmd(cmd); + } +} + +/** + * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization + * + * @cmd: command that got response from IOA + * + * This routine is called after driver re-reads configuration table due to a + * lost CCN. It returns the command block back to free pool and schedules + * worker thread to add/delete devices into the system. + * + * Return Value: + * none + */ +static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + if (cmd->release) { + cmd->release = 0; + pmcraid_return_cmd(cmd); + } + pmcraid_info("scheduling worker for config table reinitialization\n"); + schedule_work(&cmd->drv_inst->worker_q); +} + +/** + * pmcraid_erp_done - Process completion of SCSI error response from device + * @cmd: pmcraid_command + * + * This function copies the sense buffer into the scsi_cmd struct and completes + * scsi_cmd by calling scsi_done function. + * + * Return value: + * none + */ +static void pmcraid_erp_done(struct pmcraid_cmd *cmd) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + + if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) { + scsi_cmd->result |= (DID_ERROR << 16); + scmd_printk(KERN_INFO, scsi_cmd, + "command CDB[0] = %x failed with IOASC: 0x%08X\n", + cmd->ioa_cb->ioarcb.cdb[0], ioasc); + } + + if (cmd->sense_buffer) { + dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + cmd->sense_buffer = NULL; + cmd->sense_buffer_dma = 0; + } + + scsi_dma_unmap(scsi_cmd); + pmcraid_return_cmd(cmd); + scsi_done(scsi_cmd); +} + +/** + * _pmcraid_fire_command - sends an IOA command to adapter + * + * This function adds the given block into pending command list + * and returns without waiting + * + * @cmd : command to be sent to the device + * + * Return Value + * None + */ +static void _pmcraid_fire_command(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + + /* Add this command block to pending cmd pool. We do this prior to + * writting IOARCB to ioarrin because IOA might complete the command + * by the time we are about to add it to the list. Response handler + * (isr/tasklet) looks for cmd block in the pending pending list. + */ + spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); + list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool); + spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags); + atomic_inc(&pinstance->outstanding_cmds); + + /* driver writes lower 32-bit value of IOARCB address only */ + mb(); + iowrite32(le64_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), pinstance->ioarrin); +} + +/** + * pmcraid_send_cmd - fires a command to IOA + * + * This function also sets up timeout function, and command completion + * function + * + * @cmd: pointer to the command block to be fired to IOA + * @cmd_done: command completion function, called once IOA responds + * @timeout: timeout to wait for this command completion + * @timeout_func: timeout handler + * + * Return value + * none + */ +static void pmcraid_send_cmd( + struct pmcraid_cmd *cmd, + void (*cmd_done) (struct pmcraid_cmd *), + unsigned long timeout, + void (*timeout_func) (struct timer_list *) +) +{ + /* initialize done function */ + cmd->cmd_done = cmd_done; + + if (timeout_func) { + /* setup timeout handler */ + cmd->timer.expires = jiffies + timeout; + cmd->timer.function = timeout_func; + add_timer(&cmd->timer); + } + + /* fire the command to IOA */ + _pmcraid_fire_command(cmd); +} + +/** + * pmcraid_ioa_shutdown_done - completion function for IOA shutdown command + * @cmd: pointer to the command block used for sending IOA shutdown command + * + * Return value + * None + */ +static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_ioa_reset(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); +} + +/** + * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa + * + * @cmd: pointer to the command block used as part of reset sequence + * + * Return Value + * None + */ +static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + /* Note that commands sent during reset require next command to be sent + * to IOA. Hence reinit the done function as well as timeout function + */ + pmcraid_reinit_cmdblk(cmd); + cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD; + cmd->ioa_cb->ioarcb.resource_handle = + cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN; + cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL; + + /* fire shutdown command to hardware. */ + pmcraid_info("firing normal shutdown command (%d) to IOA\n", + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle)); + + pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START); + + pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done, + PMCRAID_SHUTDOWN_TIMEOUT, + pmcraid_timeout_handler); +} + +static void pmcraid_querycfg(struct pmcraid_cmd *); +/** + * pmcraid_get_fwversion_done - completion function for get_fwversion + * + * @cmd: pointer to command block used to send INQUIRY command + * + * Return Value + * none + */ +static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + unsigned long lock_flags; + + /* configuration table entry size depends on firmware version. If fw + * version is not known, it is not possible to interpret IOA config + * table + */ + if (ioasc) { + pmcraid_err("IOA Inquiry failed with %x\n", ioasc); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } else { + pmcraid_querycfg(cmd); + } +} + +/** + * pmcraid_get_fwversion - reads firmware version information + * + * @cmd: pointer to command block used to send INQUIRY command + * + * Return Value + * none + */ +static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_ioadl_desc *ioadl; + struct pmcraid_instance *pinstance = cmd->drv_inst; + u16 data_size = sizeof(struct pmcraid_inquiry_data); + + pmcraid_reinit_cmdblk(cmd); + ioarcb->request_type = REQ_TYPE_SCSI; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + ioarcb->cdb[0] = INQUIRY; + ioarcb->cdb[1] = 1; + ioarcb->cdb[2] = 0xD0; + ioarcb->cdb[3] = (data_size >> 8) & 0xFF; + ioarcb->cdb[4] = data_size & 0xFF; + + /* Since entire inquiry data it can be part of IOARCB itself + */ + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->data_transfer_length = cpu_to_le32(data_size); + ioadl = &(ioarcb->add_data.u.ioadl[0]); + ioadl->flags = IOADL_FLAGS_LAST_DESC; + ioadl->address = cpu_to_le64(pinstance->inq_data_baddr); + ioadl->data_len = cpu_to_le32(data_size); + + pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done, + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); +} + +/** + * pmcraid_identify_hrrq - registers host rrq buffers with IOA + * @cmd: pointer to command block to be used for identify hrrq + * + * Return Value + * none + */ +static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + int index = cmd->hrrq_index; + __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]); + __be32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD); + void (*done_function)(struct pmcraid_cmd *); + + pmcraid_reinit_cmdblk(cmd); + cmd->hrrq_index = index + 1; + + if (cmd->hrrq_index < pinstance->num_hrrq) { + done_function = pmcraid_identify_hrrq; + } else { + cmd->hrrq_index = 0; + done_function = pmcraid_get_fwversion; + } + + /* Initialize ioarcb */ + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + + /* initialize the hrrq number where IOA will respond to this command */ + ioarcb->hrrq_id = index; + ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ; + ioarcb->cdb[1] = index; + + /* IOA expects 64-bit pci address to be written in B.E format + * (i.e cdb[2]=MSByte..cdb[9]=LSB. + */ + pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n", + hrrq_addr, ioarcb->ioarcb_bus_addr, index); + + memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr)); + memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size)); + + /* Subsequent commands require HRRQ identification to be successful. + * Note that this gets called even during reset from SCSI mid-layer + * or tasklet + */ + pmcraid_send_cmd(cmd, done_function, + PMCRAID_INTERNAL_TIMEOUT, + pmcraid_timeout_handler); +} + +static void pmcraid_process_ccn(struct pmcraid_cmd *cmd); +static void pmcraid_process_ldn(struct pmcraid_cmd *cmd); + +/** + * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA + * + * @cmd: initialized command block pointer + * + * Return Value + * none + */ +static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd) +{ + if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE) + atomic_set(&(cmd->drv_inst->ccn.ignore), 0); + else + atomic_set(&(cmd->drv_inst->ldn.ignore), 0); + + pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL); +} + +/** + * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA + * + * @pinstance: pointer to adapter instance structure + * @type: HCAM type + * + * Return Value + * pointer to initialized pmcraid_cmd structure or NULL + */ +static struct pmcraid_cmd *pmcraid_init_hcam +( + struct pmcraid_instance *pinstance, + u8 type +) +{ + struct pmcraid_cmd *cmd; + struct pmcraid_ioarcb *ioarcb; + struct pmcraid_ioadl_desc *ioadl; + struct pmcraid_hostrcb *hcam; + void (*cmd_done) (struct pmcraid_cmd *); + dma_addr_t dma; + int rcb_size; + + cmd = pmcraid_get_free_cmd(pinstance); + + if (!cmd) { + pmcraid_err("no free command blocks for hcam\n"); + return cmd; + } + + if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) { + rcb_size = sizeof(struct pmcraid_hcam_ccn_ext); + cmd_done = pmcraid_process_ccn; + dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE; + hcam = &pinstance->ccn; + } else { + rcb_size = sizeof(struct pmcraid_hcam_ldn); + cmd_done = pmcraid_process_ldn; + dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE; + hcam = &pinstance->ldn; + } + + /* initialize command pointer used for HCAM registration */ + hcam->cmd = cmd; + + ioarcb = &cmd->ioa_cb->ioarcb; + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + ioadl = ioarcb->add_data.u.ioadl; + + /* Initialize ioarcb */ + ioarcb->request_type = REQ_TYPE_HCAM; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC; + ioarcb->cdb[1] = type; + ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF; + ioarcb->cdb[8] = (rcb_size) & 0xFF; + + ioarcb->data_transfer_length = cpu_to_le32(rcb_size); + + ioadl[0].flags |= IOADL_FLAGS_READ_LAST; + ioadl[0].data_len = cpu_to_le32(rcb_size); + ioadl[0].address = cpu_to_le64(dma); + + cmd->cmd_done = cmd_done; + return cmd; +} + +/** + * pmcraid_send_hcam - Send an HCAM to IOA + * @pinstance: ioa config struct + * @type: HCAM type + * + * This function will send a Host Controlled Async command to IOA. + * + * Return value: + * none + */ +static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type) +{ + struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type); + pmcraid_send_hcam_cmd(cmd); +} + + +/** + * pmcraid_prepare_cancel_cmd - prepares a command block to abort another + * + * @cmd: pointer to cmd that is used as cancelling command + * @cmd_to_cancel: pointer to the command that needs to be cancelled + */ +static void pmcraid_prepare_cancel_cmd( + struct pmcraid_cmd *cmd, + struct pmcraid_cmd *cmd_to_cancel +) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + __be64 ioarcb_addr; + + /* IOARCB address of the command to be cancelled is given in + * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in + * IOARCB address are not masked. + */ + ioarcb_addr = cpu_to_be64(le64_to_cpu(cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr)); + + /* Get the resource handle to where the command to be aborted has been + * sent. + */ + ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle; + ioarcb->request_type = REQ_TYPE_IOACMD; + memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->cdb[0] = PMCRAID_ABORT_CMD; + + memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr)); +} + +/** + * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM + * + * @cmd: command to be used as cancelling command + * @type: HCAM type + * @cmd_done: op done function for the cancelling command + */ +static void pmcraid_cancel_hcam( + struct pmcraid_cmd *cmd, + u8 type, + void (*cmd_done) (struct pmcraid_cmd *) +) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_hostrcb *hcam; + + pinstance = cmd->drv_inst; + hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ? + &pinstance->ldn : &pinstance->ccn; + + /* prepare for cancelling previous hcam command. If the HCAM is + * currently not pending with IOA, we would have hcam->cmd as non-null + */ + if (hcam->cmd == NULL) + return; + + pmcraid_prepare_cancel_cmd(cmd, hcam->cmd); + + /* writing to IOARRIN must be protected by host_lock, as mid-layer + * schedule queuecommand while we are doing this + */ + pmcraid_send_cmd(cmd, cmd_done, + PMCRAID_INTERNAL_TIMEOUT, + pmcraid_timeout_handler); +} + +/** + * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA + * + * @cmd: command block to be used for cancelling the HCAM + */ +static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + pmcraid_reinit_cmdblk(cmd); + + pmcraid_cancel_hcam(cmd, + PMCRAID_HCAM_CODE_CONFIG_CHANGE, + pmcraid_ioa_shutdown); +} + +/** + * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA + * + * @cmd: command block to be used for cancelling the HCAM + */ +static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd) +{ + pmcraid_cancel_hcam(cmd, + PMCRAID_HCAM_CODE_LOG_DATA, + pmcraid_cancel_ccn); +} + +/** + * pmcraid_expose_resource - check if the resource can be exposed to OS + * + * @fw_version: firmware version code + * @cfgte: pointer to configuration table entry of the resource + * + * Return value: + * true if resource can be added to midlayer, false(0) otherwise + */ +static int pmcraid_expose_resource(u16 fw_version, + struct pmcraid_config_table_entry *cfgte) +{ + int retval = 0; + + if (cfgte->resource_type == RES_TYPE_VSET) { + if (fw_version <= PMCRAID_FW_VERSION_1) + retval = ((cfgte->unique_flags1 & 0x80) == 0); + else + retval = ((cfgte->unique_flags0 & 0x80) == 0 && + (cfgte->unique_flags1 & 0x80) == 0); + + } else if (cfgte->resource_type == RES_TYPE_GSCSI) + retval = (RES_BUS(cfgte->resource_address) != + PMCRAID_VIRTUAL_ENCL_BUS_ID); + return retval; +} + +/* attributes supported by pmcraid_event_family */ +enum { + PMCRAID_AEN_ATTR_UNSPEC, + PMCRAID_AEN_ATTR_EVENT, + __PMCRAID_AEN_ATTR_MAX, +}; +#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1) + +/* commands supported by pmcraid_event_family */ +enum { + PMCRAID_AEN_CMD_UNSPEC, + PMCRAID_AEN_CMD_EVENT, + __PMCRAID_AEN_CMD_MAX, +}; +#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) + +static struct genl_multicast_group pmcraid_mcgrps[] = { + { .name = "events", /* not really used - see ID discussion below */ }, +}; + +static struct genl_family pmcraid_event_family __ro_after_init = { + .module = THIS_MODULE, + .name = "pmcraid", + .version = 1, + .maxattr = PMCRAID_AEN_ATTR_MAX, + .mcgrps = pmcraid_mcgrps, + .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), +}; + +/** + * pmcraid_netlink_init - registers pmcraid_event_family + * + * Return value: + * 0 if the pmcraid_event_family is successfully registered + * with netlink generic, non-zero otherwise + */ +static int __init pmcraid_netlink_init(void) +{ + int result; + + result = genl_register_family(&pmcraid_event_family); + + if (result) + return result; + + pmcraid_info("registered NETLINK GENERIC group: %d\n", + pmcraid_event_family.id); + + return result; +} + +/** + * pmcraid_netlink_release - unregisters pmcraid_event_family + * + * Return value: + * none + */ +static void pmcraid_netlink_release(void) +{ + genl_unregister_family(&pmcraid_event_family); +} + +/* + * pmcraid_notify_aen - sends event msg to user space application + * @pinstance: pointer to adapter instance structure + * + * Return value: + * 0 if success, error value in case of any failure. + */ +static int pmcraid_notify_aen( + struct pmcraid_instance *pinstance, + struct pmcraid_aen_msg *aen_msg, + u32 data_size) +{ + struct sk_buff *skb; + void *msg_header; + u32 total_size, nla_genl_hdr_total_size; + int result; + + aen_msg->hostno = (pinstance->host->unique_id << 16 | + MINOR(pinstance->cdev.dev)); + aen_msg->length = data_size; + + data_size += sizeof(*aen_msg); + + total_size = nla_total_size(data_size); + /* Add GENL_HDR to total_size */ + nla_genl_hdr_total_size = + (total_size + (GENL_HDRLEN + + ((struct genl_family *)&pmcraid_event_family)->hdrsize) + + NLMSG_HDRLEN); + skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC); + + + if (!skb) { + pmcraid_err("Failed to allocate aen data SKB of size: %x\n", + total_size); + return -ENOMEM; + } + + /* add the genetlink message header */ + msg_header = genlmsg_put(skb, 0, 0, + &pmcraid_event_family, 0, + PMCRAID_AEN_CMD_EVENT); + if (!msg_header) { + pmcraid_err("failed to copy command details\n"); + nlmsg_free(skb); + return -ENOMEM; + } + + result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg); + + if (result) { + pmcraid_err("failed to copy AEN attribute data\n"); + nlmsg_free(skb); + return -EINVAL; + } + + /* send genetlink multicast message to notify applications */ + genlmsg_end(skb, msg_header); + + result = genlmsg_multicast(&pmcraid_event_family, skb, + 0, 0, GFP_ATOMIC); + + /* If there are no listeners, genlmsg_multicast may return non-zero + * value. + */ + if (result) + pmcraid_info("error (%x) sending aen event message\n", result); + return result; +} + +/** + * pmcraid_notify_ccn - notifies about CCN event msg to user space + * @pinstance: pointer adapter instance structure + * + * Return value: + * 0 if success, error value in case of any failure + */ +static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance) +{ + return pmcraid_notify_aen(pinstance, + pinstance->ccn.msg, + le32_to_cpu(pinstance->ccn.hcam->data_len) + + sizeof(struct pmcraid_hcam_hdr)); +} + +/** + * pmcraid_notify_ldn - notifies about CCN event msg to user space + * @pinstance: pointer adapter instance structure + * + * Return value: + * 0 if success, error value in case of any failure + */ +static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance) +{ + return pmcraid_notify_aen(pinstance, + pinstance->ldn.msg, + le32_to_cpu(pinstance->ldn.hcam->data_len) + + sizeof(struct pmcraid_hcam_hdr)); +} + +/** + * pmcraid_notify_ioastate - sends IOA state event msg to user space + * @pinstance: pointer adapter instance structure + * @evt: controller state event to be sent + * + * Return value: + * 0 if success, error value in case of any failure + */ +static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt) +{ + pinstance->scn.ioa_state = evt; + pmcraid_notify_aen(pinstance, + &pinstance->scn.msg, + sizeof(u32)); +} + +/** + * pmcraid_handle_config_change - Handle a config change from the adapter + * @pinstance: pointer to per adapter instance structure + * + * Return value: + * none + */ + +static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) +{ + struct pmcraid_config_table_entry *cfg_entry; + struct pmcraid_hcam_ccn *ccn_hcam; + struct pmcraid_cmd *cmd; + struct pmcraid_cmd *cfgcmd; + struct pmcraid_resource_entry *res = NULL; + unsigned long lock_flags; + unsigned long host_lock_flags; + u32 new_entry = 1; + u32 hidden_entry = 0; + u16 fw_version; + int rc; + + ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; + cfg_entry = &ccn_hcam->cfg_entry; + fw_version = be16_to_cpu(pinstance->inq_data->fw_version); + + pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \ + res: %x:%x:%x:%x\n", + le32_to_cpu(pinstance->ccn.hcam->ilid), + pinstance->ccn.hcam->op_code, + (le32_to_cpu(pinstance->ccn.hcam->timestamp1) | + ((le32_to_cpu(pinstance->ccn.hcam->timestamp2) & 0xffffffffLL) << 32)), + pinstance->ccn.hcam->notification_type, + pinstance->ccn.hcam->notification_lost, + pinstance->ccn.hcam->flags, + pinstance->host->unique_id, + RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID : + (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID : + RES_BUS(cfg_entry->resource_address)), + RES_IS_VSET(*cfg_entry) ? + (fw_version <= PMCRAID_FW_VERSION_1 ? + cfg_entry->unique_flags1 : + le16_to_cpu(cfg_entry->array_id) & 0xFF) : + RES_TARGET(cfg_entry->resource_address), + RES_LUN(cfg_entry->resource_address)); + + + /* If this HCAM indicates a lost notification, read the config table */ + if (pinstance->ccn.hcam->notification_lost) { + cfgcmd = pmcraid_get_free_cmd(pinstance); + if (cfgcmd) { + pmcraid_info("lost CCN, reading config table\b"); + pinstance->reinit_cfg_table = 1; + pmcraid_querycfg(cfgcmd); + } else { + pmcraid_err("lost CCN, no free cmd for querycfg\n"); + } + goto out_notify_apps; + } + + /* If this resource is not going to be added to mid-layer, just notify + * applications and return. If this notification is about hiding a VSET + * resource, check if it was exposed already. + */ + if (pinstance->ccn.hcam->notification_type == + NOTIFICATION_TYPE_ENTRY_CHANGED && + cfg_entry->resource_type == RES_TYPE_VSET) { + hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0; + } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) { + goto out_notify_apps; + } + + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + list_for_each_entry(res, &pinstance->used_res_q, queue) { + rc = memcmp(&res->cfg_entry.resource_address, + &cfg_entry->resource_address, + sizeof(cfg_entry->resource_address)); + if (!rc) { + new_entry = 0; + break; + } + } + + if (new_entry) { + + if (hidden_entry) { + spin_unlock_irqrestore(&pinstance->resource_lock, + lock_flags); + goto out_notify_apps; + } + + /* If there are more number of resources than what driver can + * manage, do not notify the applications about the CCN. Just + * ignore this notifications and re-register the same HCAM + */ + if (list_empty(&pinstance->free_res_q)) { + spin_unlock_irqrestore(&pinstance->resource_lock, + lock_flags); + pmcraid_err("too many resources attached\n"); + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + pmcraid_send_hcam(pinstance, + PMCRAID_HCAM_CODE_CONFIG_CHANGE); + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + return; + } + + res = list_entry(pinstance->free_res_q.next, + struct pmcraid_resource_entry, queue); + + list_del(&res->queue); + res->scsi_dev = NULL; + res->reset_progress = 0; + list_add_tail(&res->queue, &pinstance->used_res_q); + } + + memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size); + + if (pinstance->ccn.hcam->notification_type == + NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) { + if (res->scsi_dev) { + if (fw_version <= PMCRAID_FW_VERSION_1) + res->cfg_entry.unique_flags1 &= 0x7F; + else + res->cfg_entry.array_id &= cpu_to_le16(0xFF); + res->change_detected = RES_CHANGE_DEL; + res->cfg_entry.resource_handle = + PMCRAID_INVALID_RES_HANDLE; + schedule_work(&pinstance->worker_q); + } else { + /* This may be one of the non-exposed resources */ + list_move_tail(&res->queue, &pinstance->free_res_q); + } + } else if (!res->scsi_dev) { + res->change_detected = RES_CHANGE_ADD; + schedule_work(&pinstance->worker_q); + } + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); + +out_notify_apps: + + /* Notify configuration changes to registered applications.*/ + if (!pmcraid_disable_aen) + pmcraid_notify_ccn(pinstance); + + cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); + if (cmd) + pmcraid_send_hcam_cmd(cmd); +} + +/** + * pmcraid_get_error_info - return error string for an ioasc + * @ioasc: ioasc code + * Return Value + * none + */ +static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc) +{ + int i; + for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) { + if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc) + return &pmcraid_ioasc_error_table[i]; + } + return NULL; +} + +/** + * pmcraid_ioasc_logger - log IOASC information based user-settings + * @ioasc: ioasc code + * @cmd: pointer to command that resulted in 'ioasc' + */ +static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc); + + if (error_info == NULL || + cmd->drv_inst->current_log_level < error_info->log_level) + return; + + /* log the error string */ + pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle), + ioasc, error_info->error_string); +} + +/** + * pmcraid_handle_error_log - Handle a config change (error log) from the IOA + * + * @pinstance: pointer to per adapter instance structure + * + * Return value: + * none + */ +static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance) +{ + struct pmcraid_hcam_ldn *hcam_ldn; + u32 ioasc; + + hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam; + + pmcraid_info + ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n", + pinstance->ldn.hcam->ilid, + pinstance->ldn.hcam->op_code, + pinstance->ldn.hcam->notification_type, + pinstance->ldn.hcam->notification_lost, + pinstance->ldn.hcam->flags, + pinstance->ldn.hcam->overlay_id); + + /* log only the errors, no need to log informational log entries */ + if (pinstance->ldn.hcam->notification_type != + NOTIFICATION_TYPE_ERROR_LOG) + return; + + if (pinstance->ldn.hcam->notification_lost == + HOSTRCB_NOTIFICATIONS_LOST) + dev_info(&pinstance->pdev->dev, "Error notifications lost\n"); + + ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc); + + if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET || + ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) { + dev_info(&pinstance->pdev->dev, + "UnitAttention due to IOA Bus Reset\n"); + scsi_report_bus_reset( + pinstance->host, + RES_BUS(hcam_ldn->error_log.fd_ra)); + } + + return; +} + +/** + * pmcraid_process_ccn - Op done function for a CCN. + * @cmd: pointer to command struct + * + * This function is the op done function for a configuration + * change notification + * + * Return value: + * none + */ +static void pmcraid_process_ccn(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + unsigned long lock_flags; + + pinstance->ccn.cmd = NULL; + pmcraid_return_cmd(cmd); + + /* If driver initiated IOA reset happened while this hcam was pending + * with IOA, or IOA bringdown sequence is in progress, no need to + * re-register the hcam + */ + if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || + atomic_read(&pinstance->ccn.ignore) == 1) { + return; + } else if (ioasc) { + dev_info(&pinstance->pdev->dev, + "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } else { + pmcraid_handle_config_change(pinstance); + } +} + +static void pmcraid_initiate_reset(struct pmcraid_instance *); +static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd); +/** + * pmcraid_process_ldn - op done function for an LDN + * @cmd: pointer to command block + * + * Return value + * none + */ +static void pmcraid_process_ldn(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_hcam_ldn *ldn_hcam = + (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam; + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc); + unsigned long lock_flags; + + /* return the command block back to freepool */ + pinstance->ldn.cmd = NULL; + pmcraid_return_cmd(cmd); + + /* If driver initiated IOA reset happened while this hcam was pending + * with IOA, no need to re-register the hcam as reset engine will do it + * once reset sequence is complete + */ + if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || + atomic_read(&pinstance->ccn.ignore) == 1) { + return; + } else if (!ioasc) { + pmcraid_handle_error_log(pinstance); + if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) { + spin_lock_irqsave(pinstance->host->host_lock, + lock_flags); + pmcraid_initiate_reset(pinstance); + spin_unlock_irqrestore(pinstance->host->host_lock, + lock_flags); + return; + } + if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) { + pinstance->timestamp_error = 1; + pmcraid_set_timestamp(cmd); + } + } else { + dev_info(&pinstance->pdev->dev, + "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc); + } + /* send netlink message for HCAM notification if enabled */ + if (!pmcraid_disable_aen) + pmcraid_notify_ldn(pinstance); + + cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); + if (cmd) + pmcraid_send_hcam_cmd(cmd); +} + +/** + * pmcraid_register_hcams - register HCAMs for CCN and LDN + * + * @pinstance: pointer per adapter instance structure + * + * Return Value + * none + */ +static void pmcraid_register_hcams(struct pmcraid_instance *pinstance) +{ + pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); + pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); +} + +/** + * pmcraid_unregister_hcams - cancel HCAMs registered already + * @cmd: pointer to command used as part of reset sequence + */ +static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + + /* During IOA bringdown, HCAM gets fired and tasklet proceeds with + * handling hcam response though it is not necessary. In order to + * prevent this, set 'ignore', so that bring-down sequence doesn't + * re-send any more hcams + */ + atomic_set(&pinstance->ccn.ignore, 1); + atomic_set(&pinstance->ldn.ignore, 1); + + /* If adapter reset was forced as part of runtime reset sequence, + * start the reset sequence. Reset will be triggered even in case + * IOA unit_check. + */ + if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) || + pinstance->ioa_unit_check) { + pinstance->force_ioa_reset = 0; + pinstance->ioa_unit_check = 0; + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + return; + } + + /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM + * one after the other. So CCN cancellation will be triggered by + * pmcraid_cancel_ldn itself. + */ + pmcraid_cancel_ldn(cmd); +} + +static void pmcraid_reinit_buffers(struct pmcraid_instance *); + +/** + * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset + * @pinstance: pointer to adapter instance structure + * Return Value + * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0 + */ +static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance) +{ + u32 intrs; + + pmcraid_reinit_buffers(pinstance); + intrs = pmcraid_read_interrupts(pinstance); + + pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); + + if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) { + if (!pinstance->interrupt_mode) { + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs. + ioa_host_interrupt_mask_reg); + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + } + return 1; + } else { + return 0; + } +} + +/** + * pmcraid_soft_reset - performs a soft reset and makes IOA become ready + * @cmd : pointer to reset command block + * + * Return Value + * none + */ +static void pmcraid_soft_reset(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 int_reg; + u32 doorbell; + + /* There will be an interrupt when Transition to Operational bit is + * set so tasklet would execute next reset task. The timeout handler + * would re-initiate a reset + */ + cmd->cmd_done = pmcraid_ioa_reset; + cmd->timer.expires = jiffies + + msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT); + cmd->timer.function = pmcraid_timeout_handler; + + if (!timer_pending(&cmd->timer)) + add_timer(&cmd->timer); + + /* Enable destructive diagnostics on IOA if it is not yet in + * operational state + */ + doorbell = DOORBELL_RUNTIME_RESET | + DOORBELL_ENABLE_DESTRUCTIVE_DIAGS; + + /* Since we do RESET_ALERT and Start BIST we have to again write + * MSIX Doorbell to indicate the interrupt mode + */ + if (pinstance->interrupt_mode) { + iowrite32(DOORBELL_INTR_MODE_MSIX, + pinstance->int_regs.host_ioa_interrupt_reg); + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + } + + iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); + ioread32(pinstance->int_regs.host_ioa_interrupt_reg), + int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + + pmcraid_info("Waiting for IOA to become operational %x:%x\n", + ioread32(pinstance->int_regs.host_ioa_interrupt_reg), + int_reg); +} + +/** + * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt + * + * @pinstance: pointer to adapter instance structure + * + * Return Value + * none + */ +static void pmcraid_get_dump(struct pmcraid_instance *pinstance) +{ + pmcraid_info("%s is not yet implemented\n", __func__); +} + +/** + * pmcraid_fail_outstanding_cmds - Fails all outstanding ops. + * @pinstance: pointer to adapter instance structure + * + * This function fails all outstanding ops. If they are submitted to IOA + * already, it sends cancel all messages if IOA is still accepting IOARCBs, + * otherwise just completes the commands and returns the cmd blocks to free + * pool. + * + * Return value: + * none + */ +static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance) +{ + struct pmcraid_cmd *cmd, *temp; + unsigned long lock_flags; + + /* pending command list is protected by pending_pool_lock. Its + * traversal must be done as within this lock + */ + spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); + list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool, + free_list) { + list_del(&cmd->free_list); + spin_unlock_irqrestore(&pinstance->pending_pool_lock, + lock_flags); + cmd->ioa_cb->ioasa.ioasc = + cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET); + cmd->ioa_cb->ioasa.ilid = + cpu_to_le32(PMCRAID_DRIVER_ILID); + + /* In case the command timer is still running */ + del_timer(&cmd->timer); + + /* If this is an IO command, complete it by invoking scsi_done + * function. If this is one of the internal commands other + * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to + * complete it + */ + if (cmd->scsi_cmd) { + + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + __le32 resp = cmd->ioa_cb->ioarcb.response_handle; + + scsi_cmd->result |= DID_ERROR << 16; + + scsi_dma_unmap(scsi_cmd); + pmcraid_return_cmd(cmd); + + pmcraid_info("failing(%d) CDB[0] = %x result: %x\n", + le32_to_cpu(resp) >> 2, + cmd->ioa_cb->ioarcb.cdb[0], + scsi_cmd->result); + scsi_done(scsi_cmd); + } else if (cmd->cmd_done == pmcraid_internal_done || + cmd->cmd_done == pmcraid_erp_done) { + cmd->cmd_done(cmd); + } else if (cmd->cmd_done != pmcraid_ioa_reset && + cmd->cmd_done != pmcraid_ioa_shutdown_done) { + pmcraid_return_cmd(cmd); + } + + atomic_dec(&pinstance->outstanding_cmds); + spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); + } + + spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags); +} + +/** + * pmcraid_ioa_reset - Implementation of IOA reset logic + * + * @cmd: pointer to the cmd block to be used for entire reset process + * + * This function executes most of the steps required for IOA reset. This gets + * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's + * 'eh_' thread. Access to variables used for controlling the reset sequence is + * synchronized using host lock. Various functions called during reset process + * would make use of a single command block, pointer to which is also stored in + * adapter instance structure. + * + * Return Value + * None + */ +static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u8 reset_complete = 0; + + pinstance->ioa_reset_in_progress = 1; + + if (pinstance->reset_cmd != cmd) { + pmcraid_err("reset is called with different command block\n"); + pinstance->reset_cmd = cmd; + } + + pmcraid_info("reset_engine: state = %d, command = %p\n", + pinstance->ioa_state, cmd); + + switch (pinstance->ioa_state) { + + case IOA_STATE_DEAD: + /* If IOA is offline, whatever may be the reset reason, just + * return. callers might be waiting on the reset wait_q, wake + * up them + */ + pmcraid_err("IOA is offline no reset is possible\n"); + reset_complete = 1; + break; + + case IOA_STATE_IN_BRINGDOWN: + /* we enter here, once ioa shutdown command is processed by IOA + * Alert IOA for a possible reset. If reset alert fails, IOA + * goes through hard-reset + */ + pmcraid_disable_interrupts(pinstance, ~0); + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + break; + + case IOA_STATE_UNKNOWN: + /* We may be called during probe or resume. Some pre-processing + * is required for prior to reset + */ + scsi_block_requests(pinstance->host); + + /* If asked to reset while IOA was processing responses or + * there are any error responses then IOA may require + * hard-reset. + */ + if (pinstance->ioa_hard_reset == 0) { + if (ioread32(pinstance->ioa_status) & + INTRS_TRANSITION_TO_OPERATIONAL) { + pmcraid_info("sticky bit set, bring-up\n"); + pinstance->ioa_state = IOA_STATE_IN_BRINGUP; + pmcraid_reinit_cmdblk(cmd); + pmcraid_identify_hrrq(cmd); + } else { + pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET; + pmcraid_soft_reset(cmd); + } + } else { + /* Alert IOA of a possible reset and wait for critical + * operation in progress bit to reset + */ + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + } + break; + + case IOA_STATE_IN_RESET_ALERT: + /* If critical operation in progress bit is reset or wait gets + * timed out, reset proceeds with starting BIST on the IOA. + * pmcraid_ioa_hard_reset keeps a count of reset attempts. If + * they are 3 or more, reset engine marks IOA dead and returns + */ + pinstance->ioa_state = IOA_STATE_IN_HARD_RESET; + pmcraid_start_bist(cmd); + break; + + case IOA_STATE_IN_HARD_RESET: + pinstance->ioa_reset_attempts++; + + /* retry reset if we haven't reached maximum allowed limit */ + if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) { + pinstance->ioa_reset_attempts = 0; + pmcraid_err("IOA didn't respond marking it as dead\n"); + pinstance->ioa_state = IOA_STATE_DEAD; + + if (pinstance->ioa_bringdown) + pmcraid_notify_ioastate(pinstance, + PMC_DEVICE_EVENT_SHUTDOWN_FAILED); + else + pmcraid_notify_ioastate(pinstance, + PMC_DEVICE_EVENT_RESET_FAILED); + reset_complete = 1; + break; + } + + /* Once either bist or pci reset is done, restore PCI config + * space. If this fails, proceed with hard reset again + */ + pci_restore_state(pinstance->pdev); + + /* fail all pending commands */ + pmcraid_fail_outstanding_cmds(pinstance); + + /* check if unit check is active, if so extract dump */ + if (pinstance->ioa_unit_check) { + pmcraid_info("unit check is active\n"); + pinstance->ioa_unit_check = 0; + pmcraid_get_dump(pinstance); + pinstance->ioa_reset_attempts--; + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + break; + } + + /* if the reset reason is to bring-down the ioa, we might be + * done with the reset restore pci_config_space and complete + * the reset + */ + if (pinstance->ioa_bringdown) { + pmcraid_info("bringing down the adapter\n"); + pinstance->ioa_shutdown_type = SHUTDOWN_NONE; + pinstance->ioa_bringdown = 0; + pinstance->ioa_state = IOA_STATE_UNKNOWN; + pmcraid_notify_ioastate(pinstance, + PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS); + reset_complete = 1; + } else { + /* bring-up IOA, so proceed with soft reset + * Reinitialize hrrq_buffers and their indices also + * enable interrupts after a pci_restore_state + */ + if (pmcraid_reset_enable_ioa(pinstance)) { + pinstance->ioa_state = IOA_STATE_IN_BRINGUP; + pmcraid_info("bringing up the adapter\n"); + pmcraid_reinit_cmdblk(cmd); + pmcraid_identify_hrrq(cmd); + } else { + pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET; + pmcraid_soft_reset(cmd); + } + } + break; + + case IOA_STATE_IN_SOFT_RESET: + /* TRANSITION TO OPERATIONAL is on so start initialization + * sequence + */ + pmcraid_info("In softreset proceeding with bring-up\n"); + pinstance->ioa_state = IOA_STATE_IN_BRINGUP; + + /* Initialization commands start with HRRQ identification. From + * now on tasklet completes most of the commands as IOA is up + * and intrs are enabled + */ + pmcraid_identify_hrrq(cmd); + break; + + case IOA_STATE_IN_BRINGUP: + /* we are done with bringing up of IOA, change the ioa_state to + * operational and wake up any waiters + */ + pinstance->ioa_state = IOA_STATE_OPERATIONAL; + reset_complete = 1; + break; + + case IOA_STATE_OPERATIONAL: + default: + /* When IOA is operational and a reset is requested, check for + * the reset reason. If reset is to bring down IOA, unregister + * HCAMs and initiate shutdown; if adapter reset is forced then + * restart reset sequence again + */ + if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE && + pinstance->force_ioa_reset == 0) { + pmcraid_notify_ioastate(pinstance, + PMC_DEVICE_EVENT_RESET_SUCCESS); + reset_complete = 1; + } else { + if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE) + pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN; + pmcraid_reinit_cmdblk(cmd); + pmcraid_unregister_hcams(cmd); + } + break; + } + + /* reset will be completed if ioa_state is either DEAD or UNKNOWN or + * OPERATIONAL. Reset all control variables used during reset, wake up + * any waiting threads and let the SCSI mid-layer send commands. Note + * that host_lock must be held before invoking scsi_report_bus_reset. + */ + if (reset_complete) { + pinstance->ioa_reset_in_progress = 0; + pinstance->ioa_reset_attempts = 0; + pinstance->reset_cmd = NULL; + pinstance->ioa_shutdown_type = SHUTDOWN_NONE; + pinstance->ioa_bringdown = 0; + pmcraid_return_cmd(cmd); + + /* If target state is to bring up the adapter, proceed with + * hcam registration and resource exposure to mid-layer. + */ + if (pinstance->ioa_state == IOA_STATE_OPERATIONAL) + pmcraid_register_hcams(pinstance); + + wake_up_all(&pinstance->reset_wait_q); + } + + return; +} + +/** + * pmcraid_initiate_reset - initiates reset sequence. This is called from + * ISR/tasklet during error interrupts including IOA unit check. If reset + * is already in progress, it just returns, otherwise initiates IOA reset + * to bring IOA up to operational state. + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * none + */ +static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance) +{ + struct pmcraid_cmd *cmd; + + /* If the reset is already in progress, just return, otherwise start + * reset sequence and return + */ + if (!pinstance->ioa_reset_in_progress) { + scsi_block_requests(pinstance->host); + cmd = pmcraid_get_free_cmd(pinstance); + + if (cmd == NULL) { + pmcraid_err("no cmnd blocks for initiate_reset\n"); + return; + } + + pinstance->ioa_shutdown_type = SHUTDOWN_NONE; + pinstance->reset_cmd = cmd; + pinstance->force_ioa_reset = 1; + pmcraid_notify_ioastate(pinstance, + PMC_DEVICE_EVENT_RESET_START); + pmcraid_ioa_reset(cmd); + } +} + +/** + * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup + * or bringdown IOA + * @pinstance: pointer adapter instance structure + * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV + * @target_state: expected target state after reset + * + * Note: This command initiates reset and waits for its completion. Hence this + * should not be called from isr/timer/tasklet functions (timeout handlers, + * error response handlers and interrupt handlers). + * + * Return Value + * 1 in case ioa_state is not target_state, 0 otherwise. + */ +static int pmcraid_reset_reload( + struct pmcraid_instance *pinstance, + u8 shutdown_type, + u8 target_state +) +{ + struct pmcraid_cmd *reset_cmd = NULL; + unsigned long lock_flags; + int reset = 1; + + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + + if (pinstance->ioa_reset_in_progress) { + pmcraid_info("reset_reload: reset is already in progress\n"); + + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + + wait_event(pinstance->reset_wait_q, + !pinstance->ioa_reset_in_progress); + + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + + if (pinstance->ioa_state == IOA_STATE_DEAD) { + pmcraid_info("reset_reload: IOA is dead\n"); + goto out_unlock; + } + + if (pinstance->ioa_state == target_state) { + reset = 0; + goto out_unlock; + } + } + + pmcraid_info("reset_reload: proceeding with reset\n"); + scsi_block_requests(pinstance->host); + reset_cmd = pmcraid_get_free_cmd(pinstance); + if (reset_cmd == NULL) { + pmcraid_err("no free cmnd for reset_reload\n"); + goto out_unlock; + } + + if (shutdown_type == SHUTDOWN_NORMAL) + pinstance->ioa_bringdown = 1; + + pinstance->ioa_shutdown_type = shutdown_type; + pinstance->reset_cmd = reset_cmd; + pinstance->force_ioa_reset = reset; + pmcraid_info("reset_reload: initiating reset\n"); + pmcraid_ioa_reset(reset_cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + pmcraid_info("reset_reload: waiting for reset to complete\n"); + wait_event(pinstance->reset_wait_q, + !pinstance->ioa_reset_in_progress); + + pmcraid_info("reset_reload: reset is complete !!\n"); + scsi_unblock_requests(pinstance->host); + return pinstance->ioa_state != target_state; + +out_unlock: + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + return reset; +} + +/** + * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA + * + * @pinstance: pointer to adapter instance structure + * + * Return Value + * whatever is returned from pmcraid_reset_reload + */ +static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance) +{ + return pmcraid_reset_reload(pinstance, + SHUTDOWN_NORMAL, + IOA_STATE_UNKNOWN); +} + +/** + * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA + * + * @pinstance: pointer to adapter instance structure + * + * Return Value + * whatever is returned from pmcraid_reset_reload + */ +static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance) +{ + pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START); + + return pmcraid_reset_reload(pinstance, + SHUTDOWN_NONE, + IOA_STATE_OPERATIONAL); +} + +/** + * pmcraid_request_sense - Send request sense to a device + * @cmd: pmcraid command struct + * + * This function sends a request sense to a device as a result of a check + * condition. This method re-uses the same command block that failed earlier. + */ +static void pmcraid_request_sense(struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; + struct device *dev = &cmd->drv_inst->pdev->dev; + + cmd->sense_buffer = cmd->scsi_cmd->sense_buffer; + cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, cmd->sense_buffer_dma)) { + pmcraid_err + ("couldn't allocate sense buffer for request sense\n"); + pmcraid_erp_done(cmd); + return; + } + + /* re-use the command block */ + memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa)); + memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->request_flags0 = (SYNC_COMPLETE | + NO_LINK_DESCS | + INHIBIT_UL_CHECK); + ioarcb->request_type = REQ_TYPE_SCSI; + ioarcb->cdb[0] = REQUEST_SENSE; + ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE; + + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + + ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); + + ioadl->address = cpu_to_le64(cmd->sense_buffer_dma); + ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); + ioadl->flags = IOADL_FLAGS_LAST_DESC; + + /* request sense might be called as part of error response processing + * which runs in tasklets context. It is possible that mid-layer might + * schedule queuecommand during this time, hence, writting to IOARRIN + * must be protect by host_lock + */ + pmcraid_send_cmd(cmd, pmcraid_erp_done, + PMCRAID_REQUEST_SENSE_TIMEOUT, + pmcraid_timeout_handler); +} + +/** + * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery + * @cmd: command that failed + * @need_sense: true if request_sense is required after cancel all + * + * This function sends a cancel all to a device to clear the queue. + */ +static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; + + memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->request_flags0 = SYNC_OVERRIDE; + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS; + + if (RES_IS_GSCSI(res->cfg_entry)) + ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL; + + ioarcb->ioadl_bus_addr = 0; + ioarcb->ioadl_length = 0; + ioarcb->data_transfer_length = 0; + ioarcb->ioarcb_bus_addr &= cpu_to_le64((~0x1FULL)); + + /* writing to IOARRIN must be protected by host_lock, as mid-layer + * schedule queuecommand while we are doing this + */ + pmcraid_send_cmd(cmd, need_sense ? + pmcraid_erp_done : pmcraid_request_sense, + PMCRAID_REQUEST_SENSE_TIMEOUT, + pmcraid_timeout_handler); +} + +/** + * pmcraid_frame_auto_sense: frame fixed format sense information + * + * @cmd: pointer to failing command block + * + * Return value + * none + */ +static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd) +{ + u8 *sense_buf = cmd->scsi_cmd->sense_buffer; + struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata; + struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; + u32 ioasc = le32_to_cpu(ioasa->ioasc); + u32 failing_lba = 0; + + memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); + cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; + + if (RES_IS_VSET(res->cfg_entry) && + ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC && + ioasa->u.vset.failing_lba_hi != 0) { + + sense_buf[0] = 0x72; + sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc); + sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc); + sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc); + + sense_buf[7] = 12; + sense_buf[8] = 0; + sense_buf[9] = 0x0A; + sense_buf[10] = 0x80; + + failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi); + + sense_buf[12] = (failing_lba & 0xff000000) >> 24; + sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[15] = failing_lba & 0x000000ff; + + failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo); + + sense_buf[16] = (failing_lba & 0xff000000) >> 24; + sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[19] = failing_lba & 0x000000ff; + } else { + sense_buf[0] = 0x70; + sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc); + sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc); + sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc); + + if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) { + if (RES_IS_VSET(res->cfg_entry)) + failing_lba = + le32_to_cpu(ioasa->u. + vset.failing_lba_lo); + sense_buf[0] |= 0x80; + sense_buf[3] = (failing_lba >> 24) & 0xff; + sense_buf[4] = (failing_lba >> 16) & 0xff; + sense_buf[5] = (failing_lba >> 8) & 0xff; + sense_buf[6] = failing_lba & 0xff; + } + + sense_buf[7] = 6; /* additional length */ + } +} + +/** + * pmcraid_error_handler - Error response handlers for a SCSI op + * @cmd: pointer to pmcraid_cmd that has failed + * + * This function determines whether or not to initiate ERP on the affected + * device. This is called from a tasklet, which doesn't hold any locks. + * + * Return value: + * 0 it caller can complete the request, otherwise 1 where in error + * handler itself completes the request and returns the command block + * back to free-pool + */ +static int pmcraid_error_handler(struct pmcraid_cmd *cmd) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; + u32 ioasc = le32_to_cpu(ioasa->ioasc); + u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK; + bool sense_copied = false; + + if (!res) { + pmcraid_info("resource pointer is NULL\n"); + return 0; + } + + /* If this was a SCSI read/write command keep count of errors */ + if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) + atomic_inc(&res->read_failures); + else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) + atomic_inc(&res->write_failures); + + if (!RES_IS_GSCSI(res->cfg_entry) && + masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { + pmcraid_frame_auto_sense(cmd); + } + + /* Log IOASC/IOASA information based on user settings */ + pmcraid_ioasc_logger(ioasc, cmd); + + switch (masked_ioasc) { + + case PMCRAID_IOASC_AC_TERMINATED_BY_HOST: + scsi_cmd->result |= (DID_ABORT << 16); + break; + + case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE: + case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + break; + + case PMCRAID_IOASC_NR_SYNC_REQUIRED: + res->sync_reqd = 1; + scsi_cmd->result |= (DID_IMM_RETRY << 16); + break; + + case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC: + scsi_cmd->result |= (DID_PASSTHROUGH << 16); + break; + + case PMCRAID_IOASC_UA_BUS_WAS_RESET: + case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER: + if (!res->reset_progress) + scsi_report_bus_reset(pinstance->host, + scsi_cmd->device->channel); + scsi_cmd->result |= (DID_ERROR << 16); + break; + + case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR: + scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc); + res->sync_reqd = 1; + + /* if check_condition is not active return with error otherwise + * get/frame the sense buffer + */ + if (PMCRAID_IOASC_SENSE_STATUS(ioasc) != + SAM_STAT_CHECK_CONDITION && + PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE) + return 0; + + /* If we have auto sense data as part of IOASA pass it to + * mid-layer + */ + if (ioasa->auto_sense_length != 0) { + short sense_len = le16_to_cpu(ioasa->auto_sense_length); + int data_size = min_t(u16, sense_len, + SCSI_SENSE_BUFFERSIZE); + + memcpy(scsi_cmd->sense_buffer, + ioasa->sense_data, + data_size); + sense_copied = true; + } + + if (RES_IS_GSCSI(res->cfg_entry)) + pmcraid_cancel_all(cmd, sense_copied); + else if (sense_copied) + pmcraid_erp_done(cmd); + else + pmcraid_request_sense(cmd); + + return 1; + + case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED: + break; + + default: + if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) + scsi_cmd->result |= (DID_ERROR << 16); + break; + } + return 0; +} + +/** + * pmcraid_reset_device - device reset handler functions + * + * @scsi_cmd: scsi command struct + * @timeout: command timeout + * @modifier: reset modifier indicating the reset sequence to be performed + * + * This function issues a device reset to the affected device. + * A LUN reset will be sent to the device first. If that does + * not work, a target reset will be sent. + * + * Return value: + * SUCCESS / FAILED + */ +static int pmcraid_reset_device( + struct scsi_cmnd *scsi_cmd, + unsigned long timeout, + u8 modifier) +{ + struct pmcraid_cmd *cmd; + struct pmcraid_instance *pinstance; + struct pmcraid_resource_entry *res; + struct pmcraid_ioarcb *ioarcb; + unsigned long lock_flags; + u32 ioasc; + + pinstance = + (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + if (!res) { + sdev_printk(KERN_ERR, scsi_cmd->device, + "reset_device: NULL resource pointer\n"); + return FAILED; + } + + /* If adapter is currently going through reset/reload, return failed. + * This will force the mid-layer to call _eh_bus/host reset, which + * will then go to sleep and wait for the reset to complete + */ + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + if (pinstance->ioa_reset_in_progress || + pinstance->ioa_state == IOA_STATE_DEAD) { + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + return FAILED; + } + + res->reset_progress = 1; + pmcraid_info("Resetting %s resource with addr %x\n", + ((modifier & RESET_DEVICE_LUN) ? "LUN" : + ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")), + le32_to_cpu(res->cfg_entry.resource_address)); + + /* get a free cmd block */ + cmd = pmcraid_get_free_cmd(pinstance); + + if (cmd == NULL) { + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + pmcraid_err("%s: no cmd blocks are available\n", __func__); + return FAILED; + } + + ioarcb = &cmd->ioa_cb->ioarcb; + ioarcb->resource_handle = res->cfg_entry.resource_handle; + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->cdb[0] = PMCRAID_RESET_DEVICE; + + /* Initialize reset modifier bits */ + if (modifier) + modifier = ENABLE_RESET_MODIFIER | modifier; + + ioarcb->cdb[1] = modifier; + + init_completion(&cmd->wait_for_completion); + cmd->completion_req = 1; + + pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle), + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2); + + pmcraid_send_cmd(cmd, + pmcraid_internal_done, + timeout, + pmcraid_timeout_handler); + + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + + /* RESET_DEVICE command completes after all pending IOARCBs are + * completed. Once this command is completed, pmcraind_internal_done + * will wake up the 'completion' queue. + */ + wait_for_completion(&cmd->wait_for_completion); + + /* complete the command here itself and return the command block + * to free list + */ + pmcraid_return_cmd(cmd); + res->reset_progress = 0; + ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + + /* set the return value based on the returned ioasc */ + return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; +} + +/** + * _pmcraid_io_done - helper for pmcraid_io_done function + * + * @cmd: pointer to pmcraid command struct + * @reslen: residual data length to be set in the ioasa + * @ioasc: ioasc either returned by IOA or set by driver itself. + * + * This function is invoked by pmcraid_io_done to complete mid-layer + * scsi ops. + * + * Return value: + * 0 if caller is required to return it to free_pool. Returns 1 if + * caller need not worry about freeing command block as error handler + * will take care of that. + */ + +static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + int rc = 0; + + scsi_set_resid(scsi_cmd, reslen); + + pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n", + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, + cmd->ioa_cb->ioarcb.cdb[0], + ioasc, scsi_cmd->result); + + if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0) + rc = pmcraid_error_handler(cmd); + + if (rc == 0) { + scsi_dma_unmap(scsi_cmd); + scsi_done(scsi_cmd); + } + + return rc; +} + +/** + * pmcraid_io_done - SCSI completion function + * + * @cmd: pointer to pmcraid command struct + * + * This function is invoked by tasklet/mid-layer error handler to completing + * the SCSI ops sent from mid-layer. + * + * Return value + * none + */ + +static void pmcraid_io_done(struct pmcraid_cmd *cmd) +{ + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length); + + if (_pmcraid_io_done(cmd, reslen, ioasc) == 0) + pmcraid_return_cmd(cmd); +} + +/** + * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA + * + * @cmd: command block of the command to be aborted + * + * Return Value: + * returns pointer to command structure used as cancelling cmd + */ +static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd) +{ + struct pmcraid_cmd *cancel_cmd; + struct pmcraid_instance *pinstance; + + pinstance = (struct pmcraid_instance *)cmd->drv_inst; + + cancel_cmd = pmcraid_get_free_cmd(pinstance); + + if (cancel_cmd == NULL) { + pmcraid_err("%s: no cmd blocks are available\n", __func__); + return NULL; + } + + pmcraid_prepare_cancel_cmd(cancel_cmd, cmd); + + pmcraid_info("aborting command CDB[0]= %x with index = %d\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2); + + init_completion(&cancel_cmd->wait_for_completion); + cancel_cmd->completion_req = 1; + + pmcraid_info("command (%d) CDB[0] = %x for %x\n", + le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2, + cancel_cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle)); + + pmcraid_send_cmd(cancel_cmd, + pmcraid_internal_done, + PMCRAID_INTERNAL_TIMEOUT, + pmcraid_timeout_handler); + return cancel_cmd; +} + +/** + * pmcraid_abort_complete - Waits for ABORT TASK completion + * + * @cancel_cmd: command block use as cancelling command + * + * Return Value: + * returns SUCCESS if ABORT TASK has good completion + * otherwise FAILED + */ +static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd) +{ + struct pmcraid_resource_entry *res; + u32 ioasc; + + wait_for_completion(&cancel_cmd->wait_for_completion); + res = cancel_cmd->res; + cancel_cmd->res = NULL; + ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); + + /* If the abort task is not timed out we will get a Good completion + * as sense_key, otherwise we may get one the following responses + * due to subsequent bus reset or device reset. In case IOASC is + * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource + */ + if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET || + ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) { + if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) + res->sync_reqd = 1; + ioasc = 0; + } + + /* complete the command here itself */ + pmcraid_return_cmd(cancel_cmd); + return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; +} + +/** + * pmcraid_eh_abort_handler - entry point for aborting a single task on errors + * + * @scsi_cmd: scsi command struct given by mid-layer. When this is called + * mid-layer ensures that no other commands are queued. This + * never gets called under interrupt, but a separate eh thread. + * + * Return value: + * SUCCESS / FAILED + */ +static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_cmd *cmd; + struct pmcraid_resource_entry *res; + unsigned long host_lock_flags; + unsigned long pending_lock_flags; + struct pmcraid_cmd *cancel_cmd = NULL; + int cmd_found = 0; + int rc = FAILED; + + pinstance = + (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; + + scmd_printk(KERN_INFO, scsi_cmd, + "I/O command timed out, aborting it.\n"); + + res = scsi_cmd->device->hostdata; + + if (res == NULL) + return rc; + + /* If we are currently going through reset/reload, return failed. + * This will force the mid-layer to eventually call + * pmcraid_eh_host_reset which will then go to sleep and wait for the + * reset to complete + */ + spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags); + + if (pinstance->ioa_reset_in_progress || + pinstance->ioa_state == IOA_STATE_DEAD) { + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + return rc; + } + + /* loop over pending cmd list to find cmd corresponding to this + * scsi_cmd. Note that this command might not have been completed + * already. locking: all pending commands are protected with + * pending_pool_lock. + */ + spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags); + list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) { + + if (cmd->scsi_cmd == scsi_cmd) { + cmd_found = 1; + break; + } + } + + spin_unlock_irqrestore(&pinstance->pending_pool_lock, + pending_lock_flags); + + /* If the command to be aborted was given to IOA and still pending with + * it, send ABORT_TASK to abort this and wait for its completion + */ + if (cmd_found) + cancel_cmd = pmcraid_abort_cmd(cmd); + + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + + if (cancel_cmd) { + cancel_cmd->res = cmd->scsi_cmd->device->hostdata; + rc = pmcraid_abort_complete(cancel_cmd); + } + + return cmd_found ? rc : SUCCESS; +} + +/** + * pmcraid_eh_device_reset_handler - bus/target/device reset handler callbacks + * + * @scmd: pointer to scsi_cmd that was sent to the resource to be reset. + * + * All these routines invokve pmcraid_reset_device with appropriate parameters. + * Since these are called from mid-layer EH thread, no other IO will be queued + * to the resource being reset. However, control path (IOCTL) may be active so + * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device + * takes care by locking/unlocking host_lock. + * + * Return value + * SUCCESS or FAILED + */ +static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd) +{ + scmd_printk(KERN_INFO, scmd, + "resetting device due to an I/O command timeout.\n"); + return pmcraid_reset_device(scmd, + PMCRAID_INTERNAL_TIMEOUT, + RESET_DEVICE_LUN); +} + +static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd) +{ + scmd_printk(KERN_INFO, scmd, + "Doing bus reset due to an I/O command timeout.\n"); + return pmcraid_reset_device(scmd, + PMCRAID_RESET_BUS_TIMEOUT, + RESET_DEVICE_BUS); +} + +static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd) +{ + scmd_printk(KERN_INFO, scmd, + "Doing target reset due to an I/O command timeout.\n"); + return pmcraid_reset_device(scmd, + PMCRAID_INTERNAL_TIMEOUT, + RESET_DEVICE_TARGET); +} + +/** + * pmcraid_eh_host_reset_handler - adapter reset handler callback + * + * @scmd: pointer to scsi_cmd that was sent to a resource of adapter + * + * Initiates adapter reset to bring it up to operational state + * + * Return value + * SUCCESS or FAILED + */ +static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd) +{ + unsigned long interval = 10000; /* 10 seconds interval */ + int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval; + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)(scmd->device->host->hostdata); + + + /* wait for an additional 150 seconds just in case firmware could come + * up and if it could complete all the pending commands excluding the + * two HCAM (CCN and LDN). + */ + while (waits--) { + if (atomic_read(&pinstance->outstanding_cmds) <= + PMCRAID_MAX_HCAM_CMD) + return SUCCESS; + msleep(interval); + } + + dev_err(&pinstance->pdev->dev, + "Adapter being reset due to an I/O command timeout.\n"); + return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED; +} + +/** + * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB + * @cmd: pmcraid command struct + * @sgcount: count of scatter-gather elements + * + * Return value + * returns pointer pmcraid_ioadl_desc, initialized to point to internal + * or external IOADLs + */ +static struct pmcraid_ioadl_desc * +pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount) +{ + struct pmcraid_ioadl_desc *ioadl; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + int ioadl_count = 0; + + if (ioarcb->add_cmd_param_length) + ioadl_count = DIV_ROUND_UP(le16_to_cpu(ioarcb->add_cmd_param_length), 16); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc) * sgcount); + + if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) { + /* external ioadls start at offset 0x80 from control_block + * structure, re-using 24 out of 27 ioadls part of IOARCB. + * It is necessary to indicate to firmware that driver is + * using ioadls to be treated as external to IOARCB. + */ + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); + ioarcb->ioadl_bus_addr = + cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[3])); + ioadl = &ioarcb->add_data.u.ioadl[3]; + } else { + ioarcb->ioadl_bus_addr = + cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[ioadl_count])); + + ioadl = &ioarcb->add_data.u.ioadl[ioadl_count]; + ioarcb->ioarcb_bus_addr |= + cpu_to_le64(DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8)); + } + + return ioadl; +} + +/** + * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer + * @pinstance: pointer to adapter instance structure + * @cmd: pmcraid command struct + * + * This function is invoked by queuecommand entry point while sending a command + * to firmware. This builds ioadl descriptors and sets up ioarcb fields. + * + * Return value: + * 0 on success or -1 on failure + */ +static int pmcraid_build_ioadl( + struct pmcraid_instance *pinstance, + struct pmcraid_cmd *cmd +) +{ + int i, nseg; + struct scatterlist *sglist; + + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb); + struct pmcraid_ioadl_desc *ioadl; + + u32 length = scsi_bufflen(scsi_cmd); + + if (!length) + return 0; + + nseg = scsi_dma_map(scsi_cmd); + + if (nseg < 0) { + scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n"); + return -1; + } else if (nseg > PMCRAID_MAX_IOADLS) { + scsi_dma_unmap(scsi_cmd); + scmd_printk(KERN_ERR, scsi_cmd, + "sg count is (%d) more than allowed!\n", nseg); + return -1; + } + + /* Initialize IOARCB data transfer length fields */ + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) + ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->data_transfer_length = cpu_to_le32(length); + ioadl = pmcraid_init_ioadls(cmd, nseg); + + /* Initialize IOADL descriptor addresses */ + scsi_for_each_sg(scsi_cmd, sglist, nseg, i) { + ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist)); + ioadl[i].address = cpu_to_le64(sg_dma_address(sglist)); + ioadl[i].flags = 0; + } + /* setup last descriptor */ + ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC; + + return 0; +} + +/** + * pmcraid_queuecommand_lck - Queue a mid-layer request + * @scsi_cmd: scsi command struct + * + * This function queues a request generated by the mid-layer. Midlayer calls + * this routine within host->lock. Some of the functions called by queuecommand + * would use cmd block queue locks (free_pool_lock and pending_pool_lock) + * + * Return value: + * 0 on success + * SCSI_MLQUEUE_DEVICE_BUSY if device is busy + * SCSI_MLQUEUE_HOST_BUSY if host is busy + */ +static int pmcraid_queuecommand_lck(struct scsi_cmnd *scsi_cmd) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_resource_entry *res; + struct pmcraid_ioarcb *ioarcb; + struct pmcraid_cmd *cmd; + u32 fw_version; + int rc = 0; + + pinstance = + (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; + fw_version = be16_to_cpu(pinstance->inq_data->fw_version); + res = scsi_cmd->device->hostdata; + scsi_cmd->result = (DID_OK << 16); + + /* if adapter is marked as dead, set result to DID_NO_CONNECT complete + * the command + */ + if (pinstance->ioa_state == IOA_STATE_DEAD) { + pmcraid_info("IOA is dead, but queuecommand is scheduled\n"); + scsi_cmd->result = (DID_NO_CONNECT << 16); + scsi_done(scsi_cmd); + return 0; + } + + /* If IOA reset is in progress, can't queue the commands */ + if (pinstance->ioa_reset_in_progress) + return SCSI_MLQUEUE_HOST_BUSY; + + /* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete + * the command here itself with success return + */ + if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) { + pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n"); + scsi_done(scsi_cmd); + return 0; + } + + /* initialize the command and IOARCB to be sent to IOA */ + cmd = pmcraid_get_free_cmd(pinstance); + + if (cmd == NULL) { + pmcraid_err("free command block is not available\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + cmd->scsi_cmd = scsi_cmd; + ioarcb = &(cmd->ioa_cb->ioarcb); + memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); + ioarcb->resource_handle = res->cfg_entry.resource_handle; + ioarcb->request_type = REQ_TYPE_SCSI; + + /* set hrrq number where the IOA should respond to. Note that all cmds + * generated internally uses hrrq_id 0, exception to this is the cmd + * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses + * hrrq_id assigned here in queuecommand + */ + ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % + pinstance->num_hrrq; + cmd->cmd_done = pmcraid_io_done; + + if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) { + if (scsi_cmd->underflow == 0) + ioarcb->request_flags0 |= INHIBIT_UL_CHECK; + + if (res->sync_reqd) { + ioarcb->request_flags0 |= SYNC_COMPLETE; + res->sync_reqd = 0; + } + + ioarcb->request_flags0 |= NO_LINK_DESCS; + + if (scsi_cmd->flags & SCMD_TAGGED) + ioarcb->request_flags1 |= TASK_TAG_SIMPLE; + + if (RES_IS_GSCSI(res->cfg_entry)) + ioarcb->request_flags1 |= DELAY_AFTER_RESET; + } + + rc = pmcraid_build_ioadl(pinstance, cmd); + + pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n", + le32_to_cpu(ioarcb->response_handle) >> 2, + scsi_cmd->cmnd[0], pinstance->host->unique_id, + RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID : + PMCRAID_PHYS_BUS_ID, + RES_IS_VSET(res->cfg_entry) ? + (fw_version <= PMCRAID_FW_VERSION_1 ? + res->cfg_entry.unique_flags1 : + le16_to_cpu(res->cfg_entry.array_id) & 0xFF) : + RES_TARGET(res->cfg_entry.resource_address), + RES_LUN(res->cfg_entry.resource_address)); + + if (likely(rc == 0)) { + _pmcraid_fire_command(cmd); + } else { + pmcraid_err("queuecommand could not build ioadl\n"); + pmcraid_return_cmd(cmd); + rc = SCSI_MLQUEUE_HOST_BUSY; + } + + return rc; +} + +static DEF_SCSI_QCMD(pmcraid_queuecommand) + +/* + * pmcraid_open -char node "open" entry, allowed only users with admin access + */ +static int pmcraid_chr_open(struct inode *inode, struct file *filep) +{ + struct pmcraid_instance *pinstance; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* Populate adapter instance * pointer for use by ioctl */ + pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev); + filep->private_data = pinstance; + + return 0; +} + +/* + * pmcraid_fasync - Async notifier registration from applications + * + * This function adds the calling process to a driver global queue. When an + * event occurs, SIGIO will be sent to all processes in this queue. + */ +static int pmcraid_chr_fasync(int fd, struct file *filep, int mode) +{ + struct pmcraid_instance *pinstance; + int rc; + + pinstance = filep->private_data; + mutex_lock(&pinstance->aen_queue_lock); + rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue); + mutex_unlock(&pinstance->aen_queue_lock); + + return rc; +} + +/** + * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself + * + * @pinstance: pointer to adapter instance structure + * @cmd: ioctl command passed in + * @buflen: length of user_buffer + * @user_buffer: user buffer pointer + * + * Return Value + * 0 in case of success, otherwise appropriate error code + */ +static long pmcraid_ioctl_driver( + struct pmcraid_instance *pinstance, + unsigned int cmd, + unsigned int buflen, + void __user *user_buffer +) +{ + int rc = -ENOSYS; + + switch (cmd) { + case PMCRAID_IOCTL_RESET_ADAPTER: + pmcraid_reset_bringup(pinstance); + rc = 0; + break; + + default: + break; + } + + return rc; +} + +/** + * pmcraid_check_ioctl_buffer - check for proper access to user buffer + * + * @cmd: ioctl command + * @arg: user buffer + * @hdr: pointer to kernel memory for pmcraid_ioctl_header + * + * Return Value + * negetive error code if there are access issues, otherwise zero. + * Upon success, returns ioctl header copied out of user buffer. + */ + +static int pmcraid_check_ioctl_buffer( + int cmd, + void __user *arg, + struct pmcraid_ioctl_header *hdr +) +{ + int rc; + + if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) { + pmcraid_err("couldn't copy ioctl header from user buffer\n"); + return -EFAULT; + } + + /* check for valid driver signature */ + rc = memcmp(hdr->signature, + PMCRAID_IOCTL_SIGNATURE, + sizeof(hdr->signature)); + if (rc) { + pmcraid_err("signature verification failed\n"); + return -EINVAL; + } + + return 0; +} + +/* + * pmcraid_ioctl - char node ioctl entry point + */ +static long pmcraid_chr_ioctl( + struct file *filep, + unsigned int cmd, + unsigned long arg +) +{ + struct pmcraid_instance *pinstance = NULL; + struct pmcraid_ioctl_header *hdr = NULL; + void __user *argp = (void __user *)arg; + int retval = -ENOTTY; + + hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL); + + if (!hdr) { + pmcraid_err("failed to allocate memory for ioctl header\n"); + return -ENOMEM; + } + + retval = pmcraid_check_ioctl_buffer(cmd, argp, hdr); + + if (retval) { + pmcraid_info("chr_ioctl: header check failed\n"); + kfree(hdr); + return retval; + } + + pinstance = filep->private_data; + + if (!pinstance) { + pmcraid_info("adapter instance is not found\n"); + kfree(hdr); + return -ENOTTY; + } + + switch (_IOC_TYPE(cmd)) { + + case PMCRAID_DRIVER_IOCTL: + arg += sizeof(struct pmcraid_ioctl_header); + retval = pmcraid_ioctl_driver(pinstance, cmd, + hdr->buffer_length, argp); + break; + + default: + retval = -ENOTTY; + break; + } + + kfree(hdr); + + return retval; +} + +/* + * File operations structure for management interface + */ +static const struct file_operations pmcraid_fops = { + .owner = THIS_MODULE, + .open = pmcraid_chr_open, + .fasync = pmcraid_chr_fasync, + .unlocked_ioctl = pmcraid_chr_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .llseek = noop_llseek, +}; + + + + +/** + * pmcraid_show_log_level - Display adapter's error logging level + * @dev: class device struct + * @attr: unused + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_show_log_level( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)shost->hostdata; + return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level); +} + +/** + * pmcraid_store_log_level - Change the adapter's error logging level + * @dev: class device struct + * @attr: unused + * @buf: buffer + * @count: not used + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_store_log_level( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count +) +{ + struct Scsi_Host *shost; + struct pmcraid_instance *pinstance; + u8 val; + + if (kstrtou8(buf, 10, &val)) + return -EINVAL; + /* log-level should be from 0 to 2 */ + if (val > 2) + return -EINVAL; + + shost = class_to_shost(dev); + pinstance = (struct pmcraid_instance *)shost->hostdata; + pinstance->current_log_level = val; + + return strlen(buf); +} + +static struct device_attribute pmcraid_log_level_attr = { + .attr = { + .name = "log_level", + .mode = S_IRUGO | S_IWUSR, + }, + .show = pmcraid_show_log_level, + .store = pmcraid_store_log_level, +}; + +/** + * pmcraid_show_drv_version - Display driver version + * @dev: class device struct + * @attr: unused + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_show_drv_version( + struct device *dev, + struct device_attribute *attr, + char *buf +) +{ + return snprintf(buf, PAGE_SIZE, "version: %s\n", + PMCRAID_DRIVER_VERSION); +} + +static struct device_attribute pmcraid_driver_version_attr = { + .attr = { + .name = "drv_version", + .mode = S_IRUGO, + }, + .show = pmcraid_show_drv_version, +}; + +/** + * pmcraid_show_adapter_id - Display driver assigned adapter id + * @dev: class device struct + * @attr: unused + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_show_adapter_id( + struct device *dev, + struct device_attribute *attr, + char *buf +) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)shost->hostdata; + u32 adapter_id = pci_dev_id(pinstance->pdev); + u32 aen_group = pmcraid_event_family.id; + + return snprintf(buf, PAGE_SIZE, + "adapter id: %d\nminor: %d\naen group: %d\n", + adapter_id, MINOR(pinstance->cdev.dev), aen_group); +} + +static struct device_attribute pmcraid_adapter_id_attr = { + .attr = { + .name = "adapter_id", + .mode = S_IRUGO, + }, + .show = pmcraid_show_adapter_id, +}; + +static struct attribute *pmcraid_host_attrs[] = { + &pmcraid_log_level_attr.attr, + &pmcraid_driver_version_attr.attr, + &pmcraid_adapter_id_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(pmcraid_host); + +/* host template structure for pmcraid driver */ +static const struct scsi_host_template pmcraid_host_template = { + .module = THIS_MODULE, + .name = PMCRAID_DRIVER_NAME, + .queuecommand = pmcraid_queuecommand, + .eh_abort_handler = pmcraid_eh_abort_handler, + .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler, + .eh_target_reset_handler = pmcraid_eh_target_reset_handler, + .eh_device_reset_handler = pmcraid_eh_device_reset_handler, + .eh_host_reset_handler = pmcraid_eh_host_reset_handler, + + .slave_alloc = pmcraid_slave_alloc, + .slave_configure = pmcraid_slave_configure, + .slave_destroy = pmcraid_slave_destroy, + .change_queue_depth = pmcraid_change_queue_depth, + .can_queue = PMCRAID_MAX_IO_CMD, + .this_id = -1, + .sg_tablesize = PMCRAID_MAX_IOADLS, + .max_sectors = PMCRAID_IOA_MAX_SECTORS, + .no_write_same = 1, + .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, + .shost_groups = pmcraid_host_groups, + .proc_name = PMCRAID_DRIVER_NAME, +}; + +/* + * pmcraid_isr_msix - implements MSI-X interrupt handling routine + * @irq: interrupt vector number + * @dev_id: pointer hrrq_vector + * + * Return Value + * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored + */ + +static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id) +{ + struct pmcraid_isr_param *hrrq_vector; + struct pmcraid_instance *pinstance; + unsigned long lock_flags; + u32 intrs_val; + int hrrq_id; + + hrrq_vector = (struct pmcraid_isr_param *)dev_id; + hrrq_id = hrrq_vector->hrrq_id; + pinstance = hrrq_vector->drv_inst; + + if (!hrrq_id) { + /* Read the interrupt */ + intrs_val = pmcraid_read_interrupts(pinstance); + if (intrs_val && + ((ioread32(pinstance->int_regs.host_ioa_interrupt_reg) + & DOORBELL_INTR_MSIX_CLR) == 0)) { + /* Any error interrupts including unit_check, + * initiate IOA reset.In case of unit check indicate + * to reset_sequence that IOA unit checked and prepare + * for a dump during reset sequence + */ + if (intrs_val & PMCRAID_ERROR_INTERRUPTS) { + if (intrs_val & INTRS_IOA_UNIT_CHECK) + pinstance->ioa_unit_check = 1; + + pmcraid_err("ISR: error interrupts: %x \ + initiating reset\n", intrs_val); + spin_lock_irqsave(pinstance->host->host_lock, + lock_flags); + pmcraid_initiate_reset(pinstance); + spin_unlock_irqrestore( + pinstance->host->host_lock, + lock_flags); + } + /* If interrupt was as part of the ioa initialization, + * clear it. Delete the timer and wakeup the + * reset engine to proceed with reset sequence + */ + if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL) + pmcraid_clr_trans_op(pinstance); + + /* Clear the interrupt register by writing + * to host to ioa doorbell. Once done + * FW will clear the interrupt. + */ + iowrite32(DOORBELL_INTR_MSIX_CLR, + pinstance->int_regs.host_ioa_interrupt_reg); + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + + + } + } + + tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id])); + + return IRQ_HANDLED; +} + +/** + * pmcraid_isr - implements legacy interrupt handling routine + * + * @irq: interrupt vector number + * @dev_id: pointer hrrq_vector + * + * Return Value + * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored + */ +static irqreturn_t pmcraid_isr(int irq, void *dev_id) +{ + struct pmcraid_isr_param *hrrq_vector; + struct pmcraid_instance *pinstance; + u32 intrs; + unsigned long lock_flags; + int hrrq_id = 0; + + /* In case of legacy interrupt mode where interrupts are shared across + * isrs, it may be possible that the current interrupt is not from IOA + */ + if (!dev_id) { + printk(KERN_INFO "%s(): NULL host pointer\n", __func__); + return IRQ_NONE; + } + hrrq_vector = (struct pmcraid_isr_param *)dev_id; + pinstance = hrrq_vector->drv_inst; + + intrs = pmcraid_read_interrupts(pinstance); + + if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) + return IRQ_NONE; + + /* Any error interrupts including unit_check, initiate IOA reset. + * In case of unit check indicate to reset_sequence that IOA unit + * checked and prepare for a dump during reset sequence + */ + if (intrs & PMCRAID_ERROR_INTERRUPTS) { + + if (intrs & INTRS_IOA_UNIT_CHECK) + pinstance->ioa_unit_check = 1; + + iowrite32(intrs, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + pmcraid_err("ISR: error interrupts: %x initiating reset\n", + intrs); + intrs = ioread32( + pinstance->int_regs.ioa_host_interrupt_clr_reg); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_initiate_reset(pinstance); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } else { + /* If interrupt was as part of the ioa initialization, + * clear. Delete the timer and wakeup the + * reset engine to proceed with reset sequence + */ + if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) { + pmcraid_clr_trans_op(pinstance); + } else { + iowrite32(intrs, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + ioread32( + pinstance->int_regs.ioa_host_interrupt_clr_reg); + + tasklet_schedule( + &(pinstance->isr_tasklet[hrrq_id])); + } + } + + return IRQ_HANDLED; +} + + +/** + * pmcraid_worker_function - worker thread function + * + * @workp: pointer to struct work queue + * + * Return Value + * None + */ + +static void pmcraid_worker_function(struct work_struct *workp) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_resource_entry *res; + struct pmcraid_resource_entry *temp; + struct scsi_device *sdev; + unsigned long lock_flags; + unsigned long host_lock_flags; + u16 fw_version; + u8 bus, target, lun; + + pinstance = container_of(workp, struct pmcraid_instance, worker_q); + /* add resources only after host is added into system */ + if (!atomic_read(&pinstance->expose_resources)) + return; + + fw_version = be16_to_cpu(pinstance->inq_data->fw_version); + + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) { + + if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) { + sdev = res->scsi_dev; + + /* host_lock must be held before calling + * scsi_device_get + */ + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + if (!scsi_device_get(sdev)) { + spin_unlock_irqrestore( + pinstance->host->host_lock, + host_lock_flags); + pmcraid_info("deleting %x from midlayer\n", + res->cfg_entry.resource_address); + list_move_tail(&res->queue, + &pinstance->free_res_q); + spin_unlock_irqrestore( + &pinstance->resource_lock, + lock_flags); + scsi_remove_device(sdev); + scsi_device_put(sdev); + spin_lock_irqsave(&pinstance->resource_lock, + lock_flags); + res->change_detected = 0; + } else { + spin_unlock_irqrestore( + pinstance->host->host_lock, + host_lock_flags); + } + } + } + + list_for_each_entry(res, &pinstance->used_res_q, queue) { + + if (res->change_detected == RES_CHANGE_ADD) { + + if (!pmcraid_expose_resource(fw_version, + &res->cfg_entry)) + continue; + + if (RES_IS_VSET(res->cfg_entry)) { + bus = PMCRAID_VSET_BUS_ID; + if (fw_version <= PMCRAID_FW_VERSION_1) + target = res->cfg_entry.unique_flags1; + else + target = le16_to_cpu(res->cfg_entry.array_id) & 0xFF; + lun = PMCRAID_VSET_LUN_ID; + } else { + bus = PMCRAID_PHYS_BUS_ID; + target = + RES_TARGET( + res->cfg_entry.resource_address); + lun = RES_LUN(res->cfg_entry.resource_address); + } + + res->change_detected = 0; + spin_unlock_irqrestore(&pinstance->resource_lock, + lock_flags); + scsi_add_device(pinstance->host, bus, target, lun); + spin_lock_irqsave(&pinstance->resource_lock, + lock_flags); + } + } + + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); +} + +/** + * pmcraid_tasklet_function - Tasklet function + * + * @instance: pointer to msix param structure + * + * Return Value + * None + */ +static void pmcraid_tasklet_function(unsigned long instance) +{ + struct pmcraid_isr_param *hrrq_vector; + struct pmcraid_instance *pinstance; + unsigned long hrrq_lock_flags; + unsigned long pending_lock_flags; + unsigned long host_lock_flags; + spinlock_t *lockp; /* hrrq buffer lock */ + int id; + u32 resp; + + hrrq_vector = (struct pmcraid_isr_param *)instance; + pinstance = hrrq_vector->drv_inst; + id = hrrq_vector->hrrq_id; + lockp = &(pinstance->hrrq_lock[id]); + + /* loop through each of the commands responded by IOA. Each HRRQ buf is + * protected by its own lock. Traversals must be done within this lock + * as there may be multiple tasklets running on multiple CPUs. Note + * that the lock is held just for picking up the response handle and + * manipulating hrrq_curr/toggle_bit values. + */ + spin_lock_irqsave(lockp, hrrq_lock_flags); + + resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); + + while ((resp & HRRQ_TOGGLE_BIT) == + pinstance->host_toggle_bit[id]) { + + int cmd_index = resp >> 2; + struct pmcraid_cmd *cmd = NULL; + + if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) { + pinstance->hrrq_curr[id]++; + } else { + pinstance->hrrq_curr[id] = pinstance->hrrq_start[id]; + pinstance->host_toggle_bit[id] ^= 1u; + } + + if (cmd_index >= PMCRAID_MAX_CMD) { + /* In case of invalid response handle, log message */ + pmcraid_err("Invalid response handle %d\n", cmd_index); + resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); + continue; + } + + cmd = pinstance->cmd_list[cmd_index]; + spin_unlock_irqrestore(lockp, hrrq_lock_flags); + + spin_lock_irqsave(&pinstance->pending_pool_lock, + pending_lock_flags); + list_del(&cmd->free_list); + spin_unlock_irqrestore(&pinstance->pending_pool_lock, + pending_lock_flags); + del_timer(&cmd->timer); + atomic_dec(&pinstance->outstanding_cmds); + + if (cmd->cmd_done == pmcraid_ioa_reset) { + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + cmd->cmd_done(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + } else if (cmd->cmd_done != NULL) { + cmd->cmd_done(cmd); + } + /* loop over until we are done with all responses */ + spin_lock_irqsave(lockp, hrrq_lock_flags); + resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); + } + + spin_unlock_irqrestore(lockp, hrrq_lock_flags); +} + +/** + * pmcraid_unregister_interrupt_handler - de-register interrupts handlers + * @pinstance: pointer to adapter instance structure + * + * This routine un-registers registered interrupt handler and + * also frees irqs/vectors. + * + * Retun Value + * None + */ +static +void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance) +{ + struct pci_dev *pdev = pinstance->pdev; + int i; + + for (i = 0; i < pinstance->num_hrrq; i++) + free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); + + pinstance->interrupt_mode = 0; + pci_free_irq_vectors(pdev); +} + +/** + * pmcraid_register_interrupt_handler - registers interrupt handler + * @pinstance: pointer to per-adapter instance structure + * + * Return Value + * 0 on success, non-zero error code otherwise. + */ +static int +pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) +{ + struct pci_dev *pdev = pinstance->pdev; + unsigned int irq_flag = PCI_IRQ_LEGACY, flag; + int num_hrrq, rc, i; + irq_handler_t isr; + + if (pmcraid_enable_msix) + irq_flag |= PCI_IRQ_MSIX; + + num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS, + irq_flag); + if (num_hrrq < 0) + return num_hrrq; + + if (pdev->msix_enabled) { + flag = 0; + isr = pmcraid_isr_msix; + } else { + flag = IRQF_SHARED; + isr = pmcraid_isr; + } + + for (i = 0; i < num_hrrq; i++) { + struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i]; + + vec->hrrq_id = i; + vec->drv_inst = pinstance; + rc = request_irq(pci_irq_vector(pdev, i), isr, flag, + PMCRAID_DRIVER_NAME, vec); + if (rc) + goto out_unwind; + } + + pinstance->num_hrrq = num_hrrq; + if (pdev->msix_enabled) { + pinstance->interrupt_mode = 1; + iowrite32(DOORBELL_INTR_MODE_MSIX, + pinstance->int_regs.host_ioa_interrupt_reg); + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + } + + return 0; + +out_unwind: + while (--i >= 0) + free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); + pci_free_irq_vectors(pdev); + return rc; +} + +/** + * pmcraid_release_cmd_blocks - release buufers allocated for command blocks + * @pinstance: per adapter instance structure pointer + * @max_index: number of buffer blocks to release + * + * Return Value + * None + */ +static void +pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index) +{ + int i; + for (i = 0; i < max_index; i++) { + kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]); + pinstance->cmd_list[i] = NULL; + } + kmem_cache_destroy(pinstance->cmd_cachep); + pinstance->cmd_cachep = NULL; +} + +/** + * pmcraid_release_control_blocks - releases buffers alloced for control blocks + * @pinstance: pointer to per adapter instance structure + * @max_index: number of buffers (from 0 onwards) to release + * + * This function assumes that the command blocks for which control blocks are + * linked are not released. + * + * Return Value + * None + */ +static void +pmcraid_release_control_blocks( + struct pmcraid_instance *pinstance, + int max_index +) +{ + int i; + + if (pinstance->control_pool == NULL) + return; + + for (i = 0; i < max_index; i++) { + dma_pool_free(pinstance->control_pool, + pinstance->cmd_list[i]->ioa_cb, + pinstance->cmd_list[i]->ioa_cb_bus_addr); + pinstance->cmd_list[i]->ioa_cb = NULL; + pinstance->cmd_list[i]->ioa_cb_bus_addr = 0; + } + dma_pool_destroy(pinstance->control_pool); + pinstance->control_pool = NULL; +} + +/** + * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures + * @pinstance: pointer to per adapter instance structure + * + * Allocates memory for command blocks using kernel slab allocator. + * + * Return Value + * 0 in case of success; -ENOMEM in case of failure + */ +static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance) +{ + int i; + + sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d", + pinstance->host->unique_id); + + + pinstance->cmd_cachep = kmem_cache_create( + pinstance->cmd_pool_name, + sizeof(struct pmcraid_cmd), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!pinstance->cmd_cachep) + return -ENOMEM; + + for (i = 0; i < PMCRAID_MAX_CMD; i++) { + pinstance->cmd_list[i] = + kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL); + if (!pinstance->cmd_list[i]) { + pmcraid_release_cmd_blocks(pinstance, i); + return -ENOMEM; + } + } + return 0; +} + +/** + * pmcraid_allocate_control_blocks - allocates memory control blocks + * @pinstance : pointer to per adapter instance structure + * + * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs + * and IOASAs. This is called after command blocks are already allocated. + * + * Return Value + * 0 in case it can allocate all control blocks, otherwise -ENOMEM + */ +static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance) +{ + int i; + + sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d", + pinstance->host->unique_id); + + pinstance->control_pool = + dma_pool_create(pinstance->ctl_pool_name, + &pinstance->pdev->dev, + sizeof(struct pmcraid_control_block), + PMCRAID_IOARCB_ALIGNMENT, 0); + + if (!pinstance->control_pool) + return -ENOMEM; + + for (i = 0; i < PMCRAID_MAX_CMD; i++) { + pinstance->cmd_list[i]->ioa_cb = + dma_pool_zalloc( + pinstance->control_pool, + GFP_KERNEL, + &(pinstance->cmd_list[i]->ioa_cb_bus_addr)); + + if (!pinstance->cmd_list[i]->ioa_cb) { + pmcraid_release_control_blocks(pinstance, i); + return -ENOMEM; + } + } + return 0; +} + +/** + * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s) + * @pinstance: pointer to per adapter instance structure + * @maxindex: size of hrrq buffer pointer array + * + * Return Value + * None + */ +static void +pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex) +{ + int i; + + for (i = 0; i < maxindex; i++) { + dma_free_coherent(&pinstance->pdev->dev, + HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD, + pinstance->hrrq_start[i], + pinstance->hrrq_start_bus_addr[i]); + + /* reset pointers and toggle bit to zeros */ + pinstance->hrrq_start[i] = NULL; + pinstance->hrrq_start_bus_addr[i] = 0; + pinstance->host_toggle_bit[i] = 0; + } +} + +/** + * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers + * @pinstance: pointer to per adapter instance structure + * + * Return value + * 0 hrrq buffers are allocated, -ENOMEM otherwise. + */ +static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance) +{ + int i, buffer_size; + + buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD; + + for (i = 0; i < pinstance->num_hrrq; i++) { + pinstance->hrrq_start[i] = + dma_alloc_coherent(&pinstance->pdev->dev, buffer_size, + &pinstance->hrrq_start_bus_addr[i], + GFP_KERNEL); + if (!pinstance->hrrq_start[i]) { + pmcraid_err("pci_alloc failed for hrrq vector : %d\n", + i); + pmcraid_release_host_rrqs(pinstance, i); + return -ENOMEM; + } + + pinstance->hrrq_curr[i] = pinstance->hrrq_start[i]; + pinstance->hrrq_end[i] = + pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1; + pinstance->host_toggle_bit[i] = 1; + spin_lock_init(&pinstance->hrrq_lock[i]); + } + return 0; +} + +/** + * pmcraid_release_hcams - release HCAM buffers + * + * @pinstance: pointer to per adapter instance structure + * + * Return value + * none + */ +static void pmcraid_release_hcams(struct pmcraid_instance *pinstance) +{ + if (pinstance->ccn.msg != NULL) { + dma_free_coherent(&pinstance->pdev->dev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ccn_ext), + pinstance->ccn.msg, + pinstance->ccn.baddr); + + pinstance->ccn.msg = NULL; + pinstance->ccn.hcam = NULL; + pinstance->ccn.baddr = 0; + } + + if (pinstance->ldn.msg != NULL) { + dma_free_coherent(&pinstance->pdev->dev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ldn), + pinstance->ldn.msg, + pinstance->ldn.baddr); + + pinstance->ldn.msg = NULL; + pinstance->ldn.hcam = NULL; + pinstance->ldn.baddr = 0; + } +} + +/** + * pmcraid_allocate_hcams - allocates HCAM buffers + * @pinstance : pointer to per adapter instance structure + * + * Return Value: + * 0 in case of successful allocation, non-zero otherwise + */ +static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance) +{ + pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ccn_ext), + &pinstance->ccn.baddr, GFP_KERNEL); + + pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ldn), + &pinstance->ldn.baddr, GFP_KERNEL); + + if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) { + pmcraid_release_hcams(pinstance); + } else { + pinstance->ccn.hcam = + (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE; + pinstance->ldn.hcam = + (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE; + + atomic_set(&pinstance->ccn.ignore, 0); + atomic_set(&pinstance->ldn.ignore, 0); + } + + return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0; +} + +/** + * pmcraid_release_config_buffers - release config.table buffers + * @pinstance: pointer to per adapter instance structure + * + * Return Value + * none + */ +static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance) +{ + if (pinstance->cfg_table != NULL && + pinstance->cfg_table_bus_addr != 0) { + dma_free_coherent(&pinstance->pdev->dev, + sizeof(struct pmcraid_config_table), + pinstance->cfg_table, + pinstance->cfg_table_bus_addr); + pinstance->cfg_table = NULL; + pinstance->cfg_table_bus_addr = 0; + } + + if (pinstance->res_entries != NULL) { + int i; + + for (i = 0; i < PMCRAID_MAX_RESOURCES; i++) + list_del(&pinstance->res_entries[i].queue); + kfree(pinstance->res_entries); + pinstance->res_entries = NULL; + } + + pmcraid_release_hcams(pinstance); +} + +/** + * pmcraid_allocate_config_buffers - allocates DMAable memory for config table + * @pinstance : pointer to per adapter instance structure + * + * Return Value + * 0 for successful allocation, -ENOMEM for any failure + */ +static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance) +{ + int i; + + pinstance->res_entries = + kcalloc(PMCRAID_MAX_RESOURCES, + sizeof(struct pmcraid_resource_entry), + GFP_KERNEL); + + if (NULL == pinstance->res_entries) { + pmcraid_err("failed to allocate memory for resource table\n"); + return -ENOMEM; + } + + for (i = 0; i < PMCRAID_MAX_RESOURCES; i++) + list_add_tail(&pinstance->res_entries[i].queue, + &pinstance->free_res_q); + + pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev, + sizeof(struct pmcraid_config_table), + &pinstance->cfg_table_bus_addr, + GFP_KERNEL); + + if (NULL == pinstance->cfg_table) { + pmcraid_err("couldn't alloc DMA memory for config table\n"); + pmcraid_release_config_buffers(pinstance); + return -ENOMEM; + } + + if (pmcraid_allocate_hcams(pinstance)) { + pmcraid_err("could not alloc DMA memory for HCAMS\n"); + pmcraid_release_config_buffers(pinstance); + return -ENOMEM; + } + + return 0; +} + +/** + * pmcraid_init_tasklets - registers tasklets for response handling + * + * @pinstance: pointer adapter instance structure + * + * Return value + * none + */ +static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance) +{ + int i; + for (i = 0; i < pinstance->num_hrrq; i++) + tasklet_init(&pinstance->isr_tasklet[i], + pmcraid_tasklet_function, + (unsigned long)&pinstance->hrrq_vector[i]); +} + +/** + * pmcraid_kill_tasklets - destroys tasklets registered for response handling + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * none + */ +static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance) +{ + int i; + for (i = 0; i < pinstance->num_hrrq; i++) + tasklet_kill(&pinstance->isr_tasklet[i]); +} + +/** + * pmcraid_release_buffers - release per-adapter buffers allocated + * + * @pinstance: pointer to adapter soft state + * + * Return Value + * none + */ +static void pmcraid_release_buffers(struct pmcraid_instance *pinstance) +{ + pmcraid_release_config_buffers(pinstance); + pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD); + pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + + if (pinstance->inq_data != NULL) { + dma_free_coherent(&pinstance->pdev->dev, + sizeof(struct pmcraid_inquiry_data), + pinstance->inq_data, + pinstance->inq_data_baddr); + + pinstance->inq_data = NULL; + pinstance->inq_data_baddr = 0; + } + + if (pinstance->timestamp_data != NULL) { + dma_free_coherent(&pinstance->pdev->dev, + sizeof(struct pmcraid_timestamp_data), + pinstance->timestamp_data, + pinstance->timestamp_data_baddr); + + pinstance->timestamp_data = NULL; + pinstance->timestamp_data_baddr = 0; + } +} + +/** + * pmcraid_init_buffers - allocates memory and initializes various structures + * @pinstance: pointer to per adapter instance structure + * + * This routine pre-allocates memory based on the type of block as below: + * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator, + * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator + * config-table entries : DMAable memory using dma_alloc_coherent + * HostRRQs : DMAable memory, using dma_alloc_coherent + * + * Return Value + * 0 in case all of the blocks are allocated, -ENOMEM otherwise. + */ +static int pmcraid_init_buffers(struct pmcraid_instance *pinstance) +{ + int i; + + if (pmcraid_allocate_host_rrqs(pinstance)) { + pmcraid_err("couldn't allocate memory for %d host rrqs\n", + pinstance->num_hrrq); + return -ENOMEM; + } + + if (pmcraid_allocate_config_buffers(pinstance)) { + pmcraid_err("couldn't allocate memory for config buffers\n"); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + return -ENOMEM; + } + + if (pmcraid_allocate_cmd_blocks(pinstance)) { + pmcraid_err("couldn't allocate memory for cmd blocks\n"); + pmcraid_release_config_buffers(pinstance); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + return -ENOMEM; + } + + if (pmcraid_allocate_control_blocks(pinstance)) { + pmcraid_err("couldn't allocate memory control blocks\n"); + pmcraid_release_config_buffers(pinstance); + pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + return -ENOMEM; + } + + /* allocate DMAable memory for page D0 INQUIRY buffer */ + pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev, + sizeof(struct pmcraid_inquiry_data), + &pinstance->inq_data_baddr, GFP_KERNEL); + if (pinstance->inq_data == NULL) { + pmcraid_err("couldn't allocate DMA memory for INQUIRY\n"); + pmcraid_release_buffers(pinstance); + return -ENOMEM; + } + + /* allocate DMAable memory for set timestamp data buffer */ + pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev, + sizeof(struct pmcraid_timestamp_data), + &pinstance->timestamp_data_baddr, + GFP_KERNEL); + if (pinstance->timestamp_data == NULL) { + pmcraid_err("couldn't allocate DMA memory for \ + set time_stamp \n"); + pmcraid_release_buffers(pinstance); + return -ENOMEM; + } + + + /* Initialize all the command blocks and add them to free pool. No + * need to lock (free_pool_lock) as this is done in initialization + * itself + */ + for (i = 0; i < PMCRAID_MAX_CMD; i++) { + struct pmcraid_cmd *cmdp = pinstance->cmd_list[i]; + pmcraid_init_cmdblk(cmdp, i); + cmdp->drv_inst = pinstance; + list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool); + } + + return 0; +} + +/** + * pmcraid_reinit_buffers - resets various buffer pointers + * @pinstance: pointer to adapter instance + * Return value + * none + */ +static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance) +{ + int i; + int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD; + + for (i = 0; i < pinstance->num_hrrq; i++) { + memset(pinstance->hrrq_start[i], 0, buffer_size); + pinstance->hrrq_curr[i] = pinstance->hrrq_start[i]; + pinstance->hrrq_end[i] = + pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1; + pinstance->host_toggle_bit[i] = 1; + } +} + +/** + * pmcraid_init_instance - initialize per instance data structure + * @pdev: pointer to pci device structure + * @host: pointer to Scsi_Host structure + * @mapped_pci_addr: memory mapped IOA configuration registers + * + * Return Value + * 0 on success, non-zero in case of any failure + */ +static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host, + void __iomem *mapped_pci_addr) +{ + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)host->hostdata; + + pinstance->host = host; + pinstance->pdev = pdev; + + /* Initialize register addresses */ + pinstance->mapped_dma_addr = mapped_pci_addr; + + /* Initialize chip-specific details */ + { + struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg; + struct pmcraid_interrupts *pint_regs = &pinstance->int_regs; + + pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin; + + pint_regs->ioa_host_interrupt_reg = + mapped_pci_addr + chip_cfg->ioa_host_intr; + pint_regs->ioa_host_interrupt_clr_reg = + mapped_pci_addr + chip_cfg->ioa_host_intr_clr; + pint_regs->ioa_host_msix_interrupt_reg = + mapped_pci_addr + chip_cfg->ioa_host_msix_intr; + pint_regs->host_ioa_interrupt_reg = + mapped_pci_addr + chip_cfg->host_ioa_intr; + pint_regs->host_ioa_interrupt_clr_reg = + mapped_pci_addr + chip_cfg->host_ioa_intr_clr; + + /* Current version of firmware exposes interrupt mask set + * and mask clr registers through memory mapped bar0. + */ + pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox; + pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus; + pint_regs->ioa_host_interrupt_mask_reg = + mapped_pci_addr + chip_cfg->ioa_host_mask; + pint_regs->ioa_host_interrupt_mask_clr_reg = + mapped_pci_addr + chip_cfg->ioa_host_mask_clr; + pint_regs->global_interrupt_mask_reg = + mapped_pci_addr + chip_cfg->global_intr_mask; + } + + pinstance->ioa_reset_attempts = 0; + init_waitqueue_head(&pinstance->reset_wait_q); + + atomic_set(&pinstance->outstanding_cmds, 0); + atomic_set(&pinstance->last_message_id, 0); + atomic_set(&pinstance->expose_resources, 0); + + INIT_LIST_HEAD(&pinstance->free_res_q); + INIT_LIST_HEAD(&pinstance->used_res_q); + INIT_LIST_HEAD(&pinstance->free_cmd_pool); + INIT_LIST_HEAD(&pinstance->pending_cmd_pool); + + spin_lock_init(&pinstance->free_pool_lock); + spin_lock_init(&pinstance->pending_pool_lock); + spin_lock_init(&pinstance->resource_lock); + mutex_init(&pinstance->aen_queue_lock); + + /* Work-queue (Shared) for deferred processing error handling */ + INIT_WORK(&pinstance->worker_q, pmcraid_worker_function); + + /* Initialize the default log_level */ + pinstance->current_log_level = pmcraid_log_level; + + /* Setup variables required for reset engine */ + pinstance->ioa_state = IOA_STATE_UNKNOWN; + pinstance->reset_cmd = NULL; + return 0; +} + +/** + * pmcraid_shutdown - shutdown adapter controller. + * @pdev: pci device struct + * + * Issues an adapter shutdown to the card waits for its completion + * + * Return value + * none + */ +static void pmcraid_shutdown(struct pci_dev *pdev) +{ + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + pmcraid_reset_bringdown(pinstance); +} + + +/* + * pmcraid_get_minor - returns unused minor number from minor number bitmap + */ +static unsigned short pmcraid_get_minor(void) +{ + int minor; + + minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS); + __set_bit(minor, pmcraid_minor); + return minor; +} + +/* + * pmcraid_release_minor - releases given minor back to minor number bitmap + */ +static void pmcraid_release_minor(unsigned short minor) +{ + __clear_bit(minor, pmcraid_minor); +} + +/** + * pmcraid_setup_chrdev - allocates a minor number and registers a char device + * + * @pinstance: pointer to adapter instance for which to register device + * + * Return value + * 0 in case of success, otherwise non-zero + */ +static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance) +{ + int minor; + int error; + + minor = pmcraid_get_minor(); + cdev_init(&pinstance->cdev, &pmcraid_fops); + pinstance->cdev.owner = THIS_MODULE; + + error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1); + + if (error) + pmcraid_release_minor(minor); + else + device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor), + NULL, "%s%u", PMCRAID_DEVFILE, minor); + return error; +} + +/** + * pmcraid_release_chrdev - unregisters per-adapter management interface + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * none + */ +static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance) +{ + pmcraid_release_minor(MINOR(pinstance->cdev.dev)); + device_destroy(pmcraid_class, + MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev))); + cdev_del(&pinstance->cdev); +} + +/** + * pmcraid_remove - IOA hot plug remove entry point + * @pdev: pci device struct + * + * Return value + * none + */ +static void pmcraid_remove(struct pci_dev *pdev) +{ + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + + /* remove the management interface (/dev file) for this device */ + pmcraid_release_chrdev(pinstance); + + /* remove host template from scsi midlayer */ + scsi_remove_host(pinstance->host); + + /* block requests from mid-layer */ + scsi_block_requests(pinstance->host); + + /* initiate shutdown adapter */ + pmcraid_shutdown(pdev); + + pmcraid_disable_interrupts(pinstance, ~0); + flush_work(&pinstance->worker_q); + + pmcraid_kill_tasklets(pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + pmcraid_release_buffers(pinstance); + iounmap(pinstance->mapped_dma_addr); + pci_release_regions(pdev); + scsi_host_put(pinstance->host); + pci_disable_device(pdev); + + return; +} + +/** + * pmcraid_suspend - driver suspend entry point for power management + * @dev: Device structure + * + * Return Value - 0 always + */ +static int __maybe_unused pmcraid_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + + pmcraid_shutdown(pdev); + pmcraid_disable_interrupts(pinstance, ~0); + pmcraid_kill_tasklets(pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + + return 0; +} + +/** + * pmcraid_resume - driver resume entry point PCI power management + * @dev: Device structure + * + * Return Value - 0 in case of success. Error code in case of any failure + */ +static int __maybe_unused pmcraid_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + struct Scsi_Host *host = pinstance->host; + int rc = 0; + + if (sizeof(dma_addr_t) == 4 || + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + + if (rc == 0) + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + + if (rc != 0) { + dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n"); + goto disable_device; + } + + pmcraid_disable_interrupts(pinstance, ~0); + atomic_set(&pinstance->outstanding_cmds, 0); + rc = pmcraid_register_interrupt_handler(pinstance); + + if (rc) { + dev_err(&pdev->dev, + "resume: couldn't register interrupt handlers\n"); + rc = -ENODEV; + goto release_host; + } + + pmcraid_init_tasklets(pinstance); + pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); + + /* Start with hard reset sequence which brings up IOA to operational + * state as well as completes the reset sequence. + */ + pinstance->ioa_hard_reset = 1; + + /* Start IOA firmware initialization and bring card to Operational + * state. + */ + if (pmcraid_reset_bringup(pinstance)) { + dev_err(&pdev->dev, "couldn't initialize IOA\n"); + rc = -ENODEV; + goto release_tasklets; + } + + return 0; + +release_tasklets: + pmcraid_disable_interrupts(pinstance, ~0); + pmcraid_kill_tasklets(pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + +release_host: + scsi_host_put(host); + +disable_device: + + return rc; +} + +/** + * pmcraid_complete_ioa_reset - Called by either timer or tasklet during + * completion of the ioa reset + * @cmd: pointer to reset command block + */ +static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long flags; + + spin_lock_irqsave(pinstance->host->host_lock, flags); + pmcraid_ioa_reset(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, flags); + scsi_unblock_requests(pinstance->host); + schedule_work(&pinstance->worker_q); +} + +/** + * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP + * + * @cmd: pointer to pmcraid_cmd structure + * + * Return Value + * 0 for success or non-zero for failure cases + */ +static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset; + + pmcraid_reinit_cmdblk(cmd); + + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES; + ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED; + + /* If this was called as part of resource table reinitialization due to + * lost CCN, it is enough to return the command block back to free pool + * as part of set_supported_devs completion function. + */ + if (cmd->drv_inst->reinit_cfg_table) { + cmd->drv_inst->reinit_cfg_table = 0; + cmd->release = 1; + cmd_done = pmcraid_reinit_cfgtable_done; + } + + /* we will be done with the reset sequence after set supported devices, + * setup the done function to return the command block back to free + * pool + */ + pmcraid_send_cmd(cmd, + cmd_done, + PMCRAID_SET_SUP_DEV_TIMEOUT, + pmcraid_timeout_handler); + return; +} + +/** + * pmcraid_set_timestamp - set the timestamp to IOAFP + * + * @cmd: pointer to pmcraid_cmd structure + * + * Return Value + * 0 for success or non-zero for failure cases + */ +static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN); + struct pmcraid_ioadl_desc *ioadl; + u64 timestamp; + + timestamp = ktime_get_real_seconds() * 1000; + + pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp); + pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8); + pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16); + pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24); + pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32); + pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40); + + pmcraid_reinit_cmdblk(cmd); + ioarcb->request_type = REQ_TYPE_SCSI; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP; + ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION; + memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len)); + + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; + ioarcb->data_transfer_length = + cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); + ioadl = &(ioarcb->add_data.u.ioadl[0]); + ioadl->flags = IOADL_FLAGS_LAST_DESC; + ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr); + ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); + + if (!pinstance->timestamp_error) { + pinstance->timestamp_error = 0; + pmcraid_send_cmd(cmd, pmcraid_set_supported_devs, + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); + } else { + pmcraid_send_cmd(cmd, pmcraid_return_cmd, + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); + return; + } +} + + +/** + * pmcraid_init_res_table - Initialize the resource table + * @cmd: pointer to pmcraid command struct + * + * This function looks through the existing resource table, comparing + * it with the config table. This function will take care of old/new + * devices and schedule adding/removing them from the mid-layer + * as appropriate. + * + * Return value + * None + */ +static void pmcraid_init_res_table(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_resource_entry *res, *temp; + struct pmcraid_config_table_entry *cfgte; + unsigned long lock_flags; + int found, rc, i; + u16 fw_version; + LIST_HEAD(old_res); + + if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED) + pmcraid_err("IOA requires microcode download\n"); + + fw_version = be16_to_cpu(pinstance->inq_data->fw_version); + + /* resource list is protected by pinstance->resource_lock. + * init_res_table can be called from probe (user-thread) or runtime + * reset (timer/tasklet) + */ + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + + list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) + list_move_tail(&res->queue, &old_res); + + for (i = 0; i < le16_to_cpu(pinstance->cfg_table->num_entries); i++) { + if (be16_to_cpu(pinstance->inq_data->fw_version) <= + PMCRAID_FW_VERSION_1) + cfgte = &pinstance->cfg_table->entries[i]; + else + cfgte = (struct pmcraid_config_table_entry *) + &pinstance->cfg_table->entries_ext[i]; + + if (!pmcraid_expose_resource(fw_version, cfgte)) + continue; + + found = 0; + + /* If this entry was already detected and initialized */ + list_for_each_entry_safe(res, temp, &old_res, queue) { + + rc = memcmp(&res->cfg_entry.resource_address, + &cfgte->resource_address, + sizeof(cfgte->resource_address)); + if (!rc) { + list_move_tail(&res->queue, + &pinstance->used_res_q); + found = 1; + break; + } + } + + /* If this is new entry, initialize it and add it the queue */ + if (!found) { + + if (list_empty(&pinstance->free_res_q)) { + pmcraid_err("Too many devices attached\n"); + break; + } + + found = 1; + res = list_entry(pinstance->free_res_q.next, + struct pmcraid_resource_entry, queue); + + res->scsi_dev = NULL; + res->change_detected = RES_CHANGE_ADD; + res->reset_progress = 0; + list_move_tail(&res->queue, &pinstance->used_res_q); + } + + /* copy new configuration table entry details into driver + * maintained resource entry + */ + if (found) { + memcpy(&res->cfg_entry, cfgte, + pinstance->config_table_entry_size); + pmcraid_info("New res type:%x, vset:%x, addr:%x:\n", + res->cfg_entry.resource_type, + (fw_version <= PMCRAID_FW_VERSION_1 ? + res->cfg_entry.unique_flags1 : + le16_to_cpu(res->cfg_entry.array_id) & 0xFF), + le32_to_cpu(res->cfg_entry.resource_address)); + } + } + + /* Detect any deleted entries, mark them for deletion from mid-layer */ + list_for_each_entry_safe(res, temp, &old_res, queue) { + + if (res->scsi_dev) { + res->change_detected = RES_CHANGE_DEL; + res->cfg_entry.resource_handle = + PMCRAID_INVALID_RES_HANDLE; + list_move_tail(&res->queue, &pinstance->used_res_q); + } else { + list_move_tail(&res->queue, &pinstance->free_res_q); + } + } + + /* release the resource list lock */ + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); + pmcraid_set_timestamp(cmd); +} + +/** + * pmcraid_querycfg - Send a Query IOA Config to the adapter. + * @cmd: pointer pmcraid_cmd struct + * + * This function sends a Query IOA Configuration command to the adapter to + * retrieve the IOA configuration table. + * + * Return value: + * none + */ +static void pmcraid_querycfg(struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_ioadl_desc *ioadl; + struct pmcraid_instance *pinstance = cmd->drv_inst; + __be32 cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table)); + + if (be16_to_cpu(pinstance->inq_data->fw_version) <= + PMCRAID_FW_VERSION_1) + pinstance->config_table_entry_size = + sizeof(struct pmcraid_config_table_entry); + else + pinstance->config_table_entry_size = + sizeof(struct pmcraid_config_table_entry_ext); + + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + + ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG; + + /* firmware requires 4-byte length field, specified in B.E format */ + memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size)); + + /* Since entire config table can be described by single IOADL, it can + * be part of IOARCB itself + */ + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL); + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->data_transfer_length = + cpu_to_le32(sizeof(struct pmcraid_config_table)); + + ioadl = &(ioarcb->add_data.u.ioadl[0]); + ioadl->flags = IOADL_FLAGS_LAST_DESC; + ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr); + ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table)); + + pmcraid_send_cmd(cmd, pmcraid_init_res_table, + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); +} + + +/** + * pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver + * @pdev: pointer to pci device structure + * @dev_id: pointer to device ids structure + * + * Return Value + * returns 0 if the device is claimed and successfully configured. + * returns non-zero error code in case of any failure + */ +static int pmcraid_probe(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + struct pmcraid_instance *pinstance; + struct Scsi_Host *host; + void __iomem *mapped_pci_addr; + int rc = PCIBIOS_SUCCESSFUL; + + if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) { + pmcraid_err + ("maximum number(%d) of supported adapters reached\n", + atomic_read(&pmcraid_adapter_count)); + return -ENOMEM; + } + + atomic_inc(&pmcraid_adapter_count); + rc = pci_enable_device(pdev); + + if (rc) { + dev_err(&pdev->dev, "Cannot enable adapter\n"); + atomic_dec(&pmcraid_adapter_count); + return rc; + } + + dev_info(&pdev->dev, + "Found new IOA(%x:%x), Total IOA count: %d\n", + pdev->vendor, pdev->device, + atomic_read(&pmcraid_adapter_count)); + + rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME); + + if (rc < 0) { + dev_err(&pdev->dev, + "Couldn't register memory range of registers\n"); + goto out_disable_device; + } + + mapped_pci_addr = pci_iomap(pdev, 0, 0); + + if (!mapped_pci_addr) { + dev_err(&pdev->dev, "Couldn't map PCI registers memory\n"); + rc = -ENOMEM; + goto out_release_regions; + } + + pci_set_master(pdev); + + /* Firmware requires the system bus address of IOARCB to be within + * 32-bit addressable range though it has 64-bit IOARRIN register. + * However, firmware supports 64-bit streaming DMA buffers, whereas + * coherent buffers are to be 32-bit. Since dma_alloc_coherent always + * returns memory within 4GB (if not, change this logic), coherent + * buffers are within firmware acceptable address ranges. + */ + if (sizeof(dma_addr_t) == 4 || + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + + /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32 + * bit mask for dma_alloc_coherent to return addresses within 4GB + */ + if (rc == 0) + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + + if (rc != 0) { + dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); + goto cleanup_nomem; + } + + host = scsi_host_alloc(&pmcraid_host_template, + sizeof(struct pmcraid_instance)); + + if (!host) { + dev_err(&pdev->dev, "scsi_host_alloc failed!\n"); + rc = -ENOMEM; + goto cleanup_nomem; + } + + host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS; + host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET; + host->unique_id = host->host_no; + host->max_channel = PMCRAID_MAX_BUS_TO_SCAN; + host->max_cmd_len = PMCRAID_MAX_CDB_LEN; + + /* zero out entire instance structure */ + pinstance = (struct pmcraid_instance *)host->hostdata; + memset(pinstance, 0, sizeof(*pinstance)); + + pinstance->chip_cfg = + (struct pmcraid_chip_details *)(dev_id->driver_data); + + rc = pmcraid_init_instance(pdev, host, mapped_pci_addr); + + if (rc < 0) { + dev_err(&pdev->dev, "failed to initialize adapter instance\n"); + goto out_scsi_host_put; + } + + pci_set_drvdata(pdev, pinstance); + + /* Save PCI config-space for use following the reset */ + rc = pci_save_state(pinstance->pdev); + + if (rc != 0) { + dev_err(&pdev->dev, "Failed to save PCI config space\n"); + goto out_scsi_host_put; + } + + pmcraid_disable_interrupts(pinstance, ~0); + + rc = pmcraid_register_interrupt_handler(pinstance); + + if (rc) { + dev_err(&pdev->dev, "couldn't register interrupt handler\n"); + goto out_scsi_host_put; + } + + pmcraid_init_tasklets(pinstance); + + /* allocate verious buffers used by LLD.*/ + rc = pmcraid_init_buffers(pinstance); + + if (rc) { + pmcraid_err("couldn't allocate memory blocks\n"); + goto out_unregister_isr; + } + + /* check the reset type required */ + pmcraid_reset_type(pinstance); + + pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); + + /* Start IOA firmware initialization and bring card to Operational + * state. + */ + pmcraid_info("starting IOA initialization sequence\n"); + if (pmcraid_reset_bringup(pinstance)) { + dev_err(&pdev->dev, "couldn't initialize IOA\n"); + rc = 1; + goto out_release_bufs; + } + + /* Add adapter instance into mid-layer list */ + rc = scsi_add_host(pinstance->host, &pdev->dev); + if (rc != 0) { + pmcraid_err("couldn't add host into mid-layer: %d\n", rc); + goto out_release_bufs; + } + + scsi_scan_host(pinstance->host); + + rc = pmcraid_setup_chrdev(pinstance); + + if (rc != 0) { + pmcraid_err("couldn't create mgmt interface, error: %x\n", + rc); + goto out_remove_host; + } + + /* Schedule worker thread to handle CCN and take care of adding and + * removing devices to OS + */ + atomic_set(&pinstance->expose_resources, 1); + schedule_work(&pinstance->worker_q); + return rc; + +out_remove_host: + scsi_remove_host(host); + +out_release_bufs: + pmcraid_release_buffers(pinstance); + +out_unregister_isr: + pmcraid_kill_tasklets(pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + +out_scsi_host_put: + scsi_host_put(host); + +cleanup_nomem: + iounmap(mapped_pci_addr); + +out_release_regions: + pci_release_regions(pdev); + +out_disable_device: + atomic_dec(&pmcraid_adapter_count); + pci_disable_device(pdev); + return -ENODEV; +} + +static SIMPLE_DEV_PM_OPS(pmcraid_pm_ops, pmcraid_suspend, pmcraid_resume); + +/* + * PCI driver structure of pmcraid driver + */ +static struct pci_driver pmcraid_driver = { + .name = PMCRAID_DRIVER_NAME, + .id_table = pmcraid_pci_table, + .probe = pmcraid_probe, + .remove = pmcraid_remove, + .driver.pm = &pmcraid_pm_ops, + .shutdown = pmcraid_shutdown +}; + +/** + * pmcraid_init - module load entry point + */ +static int __init pmcraid_init(void) +{ + dev_t dev; + int error; + + pmcraid_info("%s Device Driver version: %s\n", + PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION); + + error = alloc_chrdev_region(&dev, 0, + PMCRAID_MAX_ADAPTERS, + PMCRAID_DEVFILE); + + if (error) { + pmcraid_err("failed to get a major number for adapters\n"); + goto out_init; + } + + pmcraid_major = MAJOR(dev); + pmcraid_class = class_create(PMCRAID_DEVFILE); + + if (IS_ERR(pmcraid_class)) { + error = PTR_ERR(pmcraid_class); + pmcraid_err("failed to register with sysfs, error = %x\n", + error); + goto out_unreg_chrdev; + } + + error = pmcraid_netlink_init(); + + if (error) { + class_destroy(pmcraid_class); + goto out_unreg_chrdev; + } + + error = pci_register_driver(&pmcraid_driver); + + if (error == 0) + goto out_init; + + pmcraid_err("failed to register pmcraid driver, error = %x\n", + error); + class_destroy(pmcraid_class); + pmcraid_netlink_release(); + +out_unreg_chrdev: + unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS); + +out_init: + return error; +} + +/** + * pmcraid_exit - module unload entry point + */ +static void __exit pmcraid_exit(void) +{ + pmcraid_netlink_release(); + unregister_chrdev_region(MKDEV(pmcraid_major, 0), + PMCRAID_MAX_ADAPTERS); + pci_unregister_driver(&pmcraid_driver); + class_destroy(pmcraid_class); +} + +module_init(pmcraid_init); +module_exit(pmcraid_exit); diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h new file mode 100644 index 000000000..9f59930e8 --- /dev/null +++ b/drivers/scsi/pmcraid.h @@ -0,0 +1,1047 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * pmcraid.h -- PMC Sierra MaxRAID controller driver header file + * + * Written By: Anil Ravindranath + * PMC-Sierra Inc + * + * Copyright (C) 2008, 2009 PMC Sierra Inc. + */ + +#ifndef _PMCRAID_H +#define _PMCRAID_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * Driver name : string representing the driver name + * Device file : /dev file to be used for management interfaces + * Driver version: version string in major_version.minor_version.patch format + * Driver date : date information in "Mon dd yyyy" format + */ +#define PMCRAID_DRIVER_NAME "PMC MaxRAID" +#define PMCRAID_DEVFILE "pmcsas" +#define PMCRAID_DRIVER_VERSION "1.0.3" + +#define PMCRAID_FW_VERSION_1 0x002 + +/* Maximum number of adapters supported by current version of the driver */ +#define PMCRAID_MAX_ADAPTERS 1024 + +/* Bit definitions as per firmware, bit position [0][1][2].....[31] */ +#define PMC_BIT8(n) (1 << (7-n)) +#define PMC_BIT16(n) (1 << (15-n)) +#define PMC_BIT32(n) (1 << (31-n)) + +/* PMC PCI vendor ID and device ID values */ +#define PCI_VENDOR_ID_PMC 0x11F8 +#define PCI_DEVICE_ID_PMC_MAXRAID 0x5220 + +/* + * MAX_CMD : maximum commands that can be outstanding with IOA + * MAX_IO_CMD : command blocks available for IO commands + * MAX_HCAM_CMD : command blocks available for HCAMS + * MAX_INTERNAL_CMD : command blocks available for internal commands like reset + */ +#define PMCRAID_MAX_CMD 1024 +#define PMCRAID_MAX_IO_CMD 1020 +#define PMCRAID_MAX_HCAM_CMD 2 +#define PMCRAID_MAX_INTERNAL_CMD 2 + +/* MAX_IOADLS : max number of scatter-gather lists supported by IOA + * IOADLS_INTERNAL : number of ioadls included as part of IOARCB. + * IOADLS_EXTERNAL : number of ioadls allocated external to IOARCB + */ +#define PMCRAID_IOADLS_INTERNAL 27 +#define PMCRAID_IOADLS_EXTERNAL 37 +#define PMCRAID_MAX_IOADLS PMCRAID_IOADLS_INTERNAL + +/* HRRQ_ENTRY_SIZE : size of hrrq buffer + * IOARCB_ALIGNMENT : alignment required for IOARCB + * IOADL_ALIGNMENT : alignment requirement for IOADLs + * MSIX_VECTORS : number of MSIX vectors supported + */ +#define HRRQ_ENTRY_SIZE sizeof(__le32) +#define PMCRAID_IOARCB_ALIGNMENT 32 +#define PMCRAID_IOADL_ALIGNMENT 16 +#define PMCRAID_IOASA_ALIGNMENT 4 +#define PMCRAID_NUM_MSIX_VECTORS 16 + +/* various other limits */ +#define PMCRAID_VENDOR_ID_LEN 8 +#define PMCRAID_PRODUCT_ID_LEN 16 +#define PMCRAID_SERIAL_NUM_LEN 8 +#define PMCRAID_LUN_LEN 8 +#define PMCRAID_MAX_CDB_LEN 16 +#define PMCRAID_DEVICE_ID_LEN 8 +#define PMCRAID_SENSE_DATA_LEN 256 +#define PMCRAID_ADD_CMD_PARAM_LEN 48 + +#define PMCRAID_MAX_BUS_TO_SCAN 1 +#define PMCRAID_MAX_NUM_TARGETS_PER_BUS 256 +#define PMCRAID_MAX_NUM_LUNS_PER_TARGET 8 + +/* IOA bus/target/lun number of IOA resources */ +#define PMCRAID_IOA_BUS_ID 0xfe +#define PMCRAID_IOA_TARGET_ID 0xff +#define PMCRAID_IOA_LUN_ID 0xff +#define PMCRAID_VSET_BUS_ID 0x1 +#define PMCRAID_VSET_LUN_ID 0x0 +#define PMCRAID_PHYS_BUS_ID 0x0 +#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8 +#define PMCRAID_MAX_VSET_TARGETS 0x7F +#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8 + +#define PMCRAID_IOA_MAX_SECTORS 32767 +#define PMCRAID_VSET_MAX_SECTORS 512 +#define PMCRAID_MAX_CMD_PER_LUN 254 + +/* Number of configuration table entries (resources), includes 1 FP, + * 1 Enclosure device + */ +#define PMCRAID_MAX_RESOURCES 256 + +/* Adapter Commands used by driver */ +#define PMCRAID_QUERY_RESOURCE_STATE 0xC2 +#define PMCRAID_RESET_DEVICE 0xC3 +/* options to select reset target */ +#define ENABLE_RESET_MODIFIER 0x80 +#define RESET_DEVICE_LUN 0x40 +#define RESET_DEVICE_TARGET 0x20 +#define RESET_DEVICE_BUS 0x10 + +#define PMCRAID_IDENTIFY_HRRQ 0xC4 +#define PMCRAID_QUERY_IOA_CONFIG 0xC5 +#define PMCRAID_QUERY_CMD_STATUS 0xCB +#define PMCRAID_ABORT_CMD 0xC7 + +/* CANCEL ALL command, provides option for setting SYNC_COMPLETE + * on the target resources for which commands got cancelled + */ +#define PMCRAID_CANCEL_ALL_REQUESTS 0xCE +#define PMCRAID_SYNC_COMPLETE_AFTER_CANCEL PMC_BIT8(0) + +/* HCAM command and types of HCAM supported by IOA */ +#define PMCRAID_HOST_CONTROLLED_ASYNC 0xCF +#define PMCRAID_HCAM_CODE_CONFIG_CHANGE 0x01 +#define PMCRAID_HCAM_CODE_LOG_DATA 0x02 + +/* IOA shutdown command and various shutdown types */ +#define PMCRAID_IOA_SHUTDOWN 0xF7 +#define PMCRAID_SHUTDOWN_NORMAL 0x00 +#define PMCRAID_SHUTDOWN_PREPARE_FOR_NORMAL 0x40 +#define PMCRAID_SHUTDOWN_NONE 0x100 +#define PMCRAID_SHUTDOWN_ABBREV 0x80 + +/* SET SUPPORTED DEVICES command and the option to select all the + * devices to be supported + */ +#define PMCRAID_SET_SUPPORTED_DEVICES 0xFB +#define ALL_DEVICES_SUPPORTED PMC_BIT8(0) + +/* This option is used with SCSI WRITE_BUFFER command */ +#define PMCRAID_WR_BUF_DOWNLOAD_AND_SAVE 0x05 + +/* IOASC Codes used by driver */ +#define PMCRAID_IOASC_SENSE_MASK 0xFFFFFF00 +#define PMCRAID_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) +#define PMCRAID_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) +#define PMCRAID_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8) +#define PMCRAID_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff) + +#define PMCRAID_IOASC_GOOD_COMPLETION 0x00000000 +#define PMCRAID_IOASC_GC_IOARCB_NOTFOUND 0x005A0000 +#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED 0x02040200 +#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED 0x02048000 +#define PMCRAID_IOASC_NR_SYNC_REQUIRED 0x023F0000 +#define PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC 0x03110C00 +#define PMCRAID_IOASC_HW_CANNOT_COMMUNICATE 0x04050000 +#define PMCRAID_IOASC_HW_DEVICE_TIMEOUT 0x04080100 +#define PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR 0x04448500 +#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED 0x04448600 +#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000 +#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000 +#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000 +#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC 0x06908B00 +#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000 + +/* Driver defined IOASCs */ +#define PMCRAID_IOASC_IOA_WAS_RESET 0x10000001 +#define PMCRAID_IOASC_PCI_ACCESS_ERROR 0x10000002 + +/* Various timeout values (in milliseconds) used. If any of these are chip + * specific, move them to pmcraid_chip_details structure. + */ +#define PMCRAID_PCI_DEASSERT_TIMEOUT 2000 +#define PMCRAID_BIST_TIMEOUT 2000 +#define PMCRAID_AENWAIT_TIMEOUT 5000 +#define PMCRAID_TRANSOP_TIMEOUT 60000 + +#define PMCRAID_RESET_TIMEOUT (2 * HZ) +#define PMCRAID_CHECK_FOR_RESET_TIMEOUT ((HZ / 10)) +#define PMCRAID_VSET_IO_TIMEOUT (60 * HZ) +#define PMCRAID_INTERNAL_TIMEOUT (60 * HZ) +#define PMCRAID_SHUTDOWN_TIMEOUT (150 * HZ) +#define PMCRAID_RESET_BUS_TIMEOUT (60 * HZ) +#define PMCRAID_RESET_HOST_TIMEOUT (150 * HZ) +#define PMCRAID_REQUEST_SENSE_TIMEOUT (30 * HZ) +#define PMCRAID_SET_SUP_DEV_TIMEOUT (2 * 60 * HZ) + +/* structure to represent a scatter-gather element (IOADL descriptor) */ +struct pmcraid_ioadl_desc { + __le64 address; + __le32 data_len; + __u8 reserved[3]; + __u8 flags; +} __attribute__((packed, aligned(PMCRAID_IOADL_ALIGNMENT))); + +/* pmcraid_ioadl_desc.flags values */ +#define IOADL_FLAGS_CHAINED PMC_BIT8(0) +#define IOADL_FLAGS_LAST_DESC PMC_BIT8(1) +#define IOADL_FLAGS_READ_LAST PMC_BIT8(1) +#define IOADL_FLAGS_WRITE_LAST PMC_BIT8(1) + + +/* additional IOARCB data which can be CDB or additional request parameters + * or list of IOADLs. Firmware supports max of 512 bytes for IOARCB, hence then + * number of IOADLs are limted to 27. In case they are more than 27, they will + * be used in chained form + */ +struct pmcraid_ioarcb_add_data { + union { + struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_INTERNAL]; + __u8 add_cmd_params[PMCRAID_ADD_CMD_PARAM_LEN]; + } u; +}; + +/* + * IOA Request Control Block + */ +struct pmcraid_ioarcb { + __le64 ioarcb_bus_addr; + __le32 resource_handle; + __le32 response_handle; + __le64 ioadl_bus_addr; + __le32 ioadl_length; + __le32 data_transfer_length; + __le64 ioasa_bus_addr; + __le16 ioasa_len; + __le16 cmd_timeout; + __le16 add_cmd_param_offset; + __le16 add_cmd_param_length; + __le32 reserved1[2]; + __le32 reserved2; + __u8 request_type; + __u8 request_flags0; + __u8 request_flags1; + __u8 hrrq_id; + __u8 cdb[PMCRAID_MAX_CDB_LEN]; + struct pmcraid_ioarcb_add_data add_data; +}; + +/* well known resource handle values */ +#define PMCRAID_IOA_RES_HANDLE 0xffffffff +#define PMCRAID_INVALID_RES_HANDLE 0 + +/* pmcraid_ioarcb.request_type values */ +#define REQ_TYPE_SCSI 0x00 +#define REQ_TYPE_IOACMD 0x01 +#define REQ_TYPE_HCAM 0x02 + +/* pmcraid_ioarcb.flags0 values */ +#define TRANSFER_DIR_WRITE PMC_BIT8(0) +#define INHIBIT_UL_CHECK PMC_BIT8(2) +#define SYNC_OVERRIDE PMC_BIT8(3) +#define SYNC_COMPLETE PMC_BIT8(4) +#define NO_LINK_DESCS PMC_BIT8(5) + +/* pmcraid_ioarcb.flags1 values */ +#define DELAY_AFTER_RESET PMC_BIT8(0) +#define TASK_TAG_SIMPLE 0x10 +#define TASK_TAG_ORDERED 0x20 +#define TASK_TAG_QUEUE_HEAD 0x30 + +/* toggle bit offset in response handle */ +#define HRRQ_TOGGLE_BIT 0x01 +#define HRRQ_RESPONSE_BIT 0x02 + +/* IOA Status Area */ +struct pmcraid_ioasa_vset { + __le32 failing_lba_hi; + __le32 failing_lba_lo; + __le32 reserved; +} __attribute__((packed, aligned(4))); + +struct pmcraid_ioasa { + __le32 ioasc; + __le16 returned_status_length; + __le16 available_status_length; + __le32 residual_data_length; + __le32 ilid; + __le32 fd_ioasc; + __le32 fd_res_address; + __le32 fd_res_handle; + __le32 reserved; + + /* resource specific sense information */ + union { + struct pmcraid_ioasa_vset vset; + } u; + + /* IOA autosense data */ + __le16 auto_sense_length; + __le16 error_data_length; + __u8 sense_data[PMCRAID_SENSE_DATA_LEN]; +} __attribute__((packed, aligned(4))); + +#define PMCRAID_DRIVER_ILID 0xffffffff + +/* Config Table Entry per Resource */ +struct pmcraid_config_table_entry { + __u8 resource_type; + __u8 bus_protocol; + __le16 array_id; + __u8 common_flags0; + __u8 common_flags1; + __u8 unique_flags0; + __u8 unique_flags1; /*also used as vset target_id */ + __le32 resource_handle; + __le32 resource_address; + __u8 device_id[PMCRAID_DEVICE_ID_LEN]; + __u8 lun[PMCRAID_LUN_LEN]; +} __attribute__((packed, aligned(4))); + +/* extended configuration table sizes are also of 32 bytes in size */ +struct pmcraid_config_table_entry_ext { + struct pmcraid_config_table_entry cfgte; +}; + +/* resource types (config_table_entry.resource_type values) */ +#define RES_TYPE_AF_DASD 0x00 +#define RES_TYPE_GSCSI 0x01 +#define RES_TYPE_VSET 0x02 +#define RES_TYPE_IOA_FP 0xFF + +#define RES_IS_IOA(res) ((res).resource_type == RES_TYPE_IOA_FP) +#define RES_IS_GSCSI(res) ((res).resource_type == RES_TYPE_GSCSI) +#define RES_IS_VSET(res) ((res).resource_type == RES_TYPE_VSET) +#define RES_IS_AFDASD(res) ((res).resource_type == RES_TYPE_AF_DASD) + +/* bus_protocol values used by driver */ +#define RES_TYPE_VENCLOSURE 0x8 + +/* config_table_entry.common_flags0 */ +#define MULTIPATH_RESOURCE PMC_BIT32(0) + +/* unique_flags1 */ +#define IMPORT_MODE_MANUAL PMC_BIT8(0) + +/* well known resource handle values */ +#define RES_HANDLE_IOA 0xFFFFFFFF +#define RES_HANDLE_NONE 0x00000000 + +/* well known resource address values */ +#define RES_ADDRESS_IOAFP 0xFEFFFFFF +#define RES_ADDRESS_INVALID 0xFFFFFFFF + +/* BUS/TARGET/LUN values from resource_addrr */ +#define RES_BUS(res_addr) (le32_to_cpu(res_addr) & 0xFF) +#define RES_TARGET(res_addr) ((le32_to_cpu(res_addr) >> 16) & 0xFF) +#define RES_LUN(res_addr) 0x0 + +/* configuration table structure */ +struct pmcraid_config_table { + __le16 num_entries; + __u8 table_format; + __u8 reserved1; + __u8 flags; + __u8 reserved2[11]; + union { + struct pmcraid_config_table_entry + entries[PMCRAID_MAX_RESOURCES]; + struct pmcraid_config_table_entry_ext + entries_ext[PMCRAID_MAX_RESOURCES]; + }; +} __attribute__((packed, aligned(4))); + +/* config_table.flags value */ +#define MICROCODE_UPDATE_REQUIRED PMC_BIT32(0) + +/* + * HCAM format + */ +#define PMCRAID_HOSTRCB_LDNSIZE 4056 + +/* Error log notification format */ +struct pmcraid_hostrcb_error { + __le32 fd_ioasc; + __le32 fd_ra; + __le32 fd_rh; + __le32 prc; + union { + __u8 data[PMCRAID_HOSTRCB_LDNSIZE]; + } u; +} __attribute__ ((packed, aligned(4))); + +struct pmcraid_hcam_hdr { + __u8 op_code; + __u8 notification_type; + __u8 notification_lost; + __u8 flags; + __u8 overlay_id; + __u8 reserved1[3]; + __le32 ilid; + __le32 timestamp1; + __le32 timestamp2; + __le32 data_len; +} __attribute__((packed, aligned(4))); + +#define PMCRAID_AEN_GROUP 0x3 + +struct pmcraid_hcam_ccn { + struct pmcraid_hcam_hdr header; + struct pmcraid_config_table_entry cfg_entry; + struct pmcraid_config_table_entry cfg_entry_old; +} __attribute__((packed, aligned(4))); + +#define PMCRAID_CCN_EXT_SIZE 3944 +struct pmcraid_hcam_ccn_ext { + struct pmcraid_hcam_hdr header; + struct pmcraid_config_table_entry_ext cfg_entry; + struct pmcraid_config_table_entry_ext cfg_entry_old; + __u8 reserved[PMCRAID_CCN_EXT_SIZE]; +} __attribute__((packed, aligned(4))); + +struct pmcraid_hcam_ldn { + struct pmcraid_hcam_hdr header; + struct pmcraid_hostrcb_error error_log; +} __attribute__((packed, aligned(4))); + +/* pmcraid_hcam.op_code values */ +#define HOSTRCB_TYPE_CCN 0xE1 +#define HOSTRCB_TYPE_LDN 0xE2 + +/* pmcraid_hcam.notification_type values */ +#define NOTIFICATION_TYPE_ENTRY_CHANGED 0x0 +#define NOTIFICATION_TYPE_ENTRY_NEW 0x1 +#define NOTIFICATION_TYPE_ENTRY_DELETED 0x2 +#define NOTIFICATION_TYPE_STATE_CHANGE 0x3 +#define NOTIFICATION_TYPE_ENTRY_STATECHANGED 0x4 +#define NOTIFICATION_TYPE_ERROR_LOG 0x10 +#define NOTIFICATION_TYPE_INFORMATION_LOG 0x11 + +#define HOSTRCB_NOTIFICATIONS_LOST PMC_BIT8(0) + +/* pmcraid_hcam.flags values */ +#define HOSTRCB_INTERNAL_OP_ERROR PMC_BIT8(0) +#define HOSTRCB_ERROR_RESPONSE_SENT PMC_BIT8(1) + +/* pmcraid_hcam.overlay_id values */ +#define HOSTRCB_OVERLAY_ID_08 0x08 +#define HOSTRCB_OVERLAY_ID_09 0x09 +#define HOSTRCB_OVERLAY_ID_11 0x11 +#define HOSTRCB_OVERLAY_ID_12 0x12 +#define HOSTRCB_OVERLAY_ID_13 0x13 +#define HOSTRCB_OVERLAY_ID_14 0x14 +#define HOSTRCB_OVERLAY_ID_16 0x16 +#define HOSTRCB_OVERLAY_ID_17 0x17 +#define HOSTRCB_OVERLAY_ID_20 0x20 +#define HOSTRCB_OVERLAY_ID_FF 0xFF + +/* Implementation specific card details */ +struct pmcraid_chip_details { + /* hardware register offsets */ + unsigned long ioastatus; + unsigned long ioarrin; + unsigned long mailbox; + unsigned long global_intr_mask; + unsigned long ioa_host_intr; + unsigned long ioa_host_msix_intr; + unsigned long ioa_host_intr_clr; + unsigned long ioa_host_mask; + unsigned long ioa_host_mask_clr; + unsigned long host_ioa_intr; + unsigned long host_ioa_intr_clr; + + /* timeout used during transitional to operational state */ + unsigned long transop_timeout; +}; + +/* IOA to HOST doorbells (interrupts) */ +#define INTRS_TRANSITION_TO_OPERATIONAL PMC_BIT32(0) +#define INTRS_IOARCB_TRANSFER_FAILED PMC_BIT32(3) +#define INTRS_IOA_UNIT_CHECK PMC_BIT32(4) +#define INTRS_NO_HRRQ_FOR_CMD_RESPONSE PMC_BIT32(5) +#define INTRS_CRITICAL_OP_IN_PROGRESS PMC_BIT32(6) +#define INTRS_IO_DEBUG_ACK PMC_BIT32(7) +#define INTRS_IOARRIN_LOST PMC_BIT32(27) +#define INTRS_SYSTEM_BUS_MMIO_ERROR PMC_BIT32(28) +#define INTRS_IOA_PROCESSOR_ERROR PMC_BIT32(29) +#define INTRS_HRRQ_VALID PMC_BIT32(30) +#define INTRS_OPERATIONAL_STATUS PMC_BIT32(0) +#define INTRS_ALLOW_MSIX_VECTOR0 PMC_BIT32(31) + +/* Host to IOA Doorbells */ +#define DOORBELL_RUNTIME_RESET PMC_BIT32(1) +#define DOORBELL_IOA_RESET_ALERT PMC_BIT32(7) +#define DOORBELL_IOA_DEBUG_ALERT PMC_BIT32(9) +#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS PMC_BIT32(8) +#define DOORBELL_IOA_START_BIST PMC_BIT32(23) +#define DOORBELL_INTR_MODE_MSIX PMC_BIT32(25) +#define DOORBELL_INTR_MSIX_CLR PMC_BIT32(26) +#define DOORBELL_RESET_IOA PMC_BIT32(31) + +/* Global interrupt mask register value */ +#define GLOBAL_INTERRUPT_MASK 0x5ULL + +#define PMCRAID_ERROR_INTERRUPTS (INTRS_IOARCB_TRANSFER_FAILED | \ + INTRS_IOA_UNIT_CHECK | \ + INTRS_NO_HRRQ_FOR_CMD_RESPONSE | \ + INTRS_IOARRIN_LOST | \ + INTRS_SYSTEM_BUS_MMIO_ERROR | \ + INTRS_IOA_PROCESSOR_ERROR) + +#define PMCRAID_PCI_INTERRUPTS (PMCRAID_ERROR_INTERRUPTS | \ + INTRS_HRRQ_VALID | \ + INTRS_TRANSITION_TO_OPERATIONAL |\ + INTRS_ALLOW_MSIX_VECTOR0) + +/* control_block, associated with each of the commands contains IOARCB, IOADLs + * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support + * additional request parameters (of max size 48) any command. + */ +struct pmcraid_control_block { + struct pmcraid_ioarcb ioarcb; + struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_EXTERNAL + 3]; + struct pmcraid_ioasa ioasa; +} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT))); + +/* pmcraid_sglist - Scatter-gather list allocated for passthrough ioctls + */ +struct pmcraid_sglist { + u32 order; + u32 num_sg; + u32 num_dma_sg; + struct scatterlist *scatterlist; +}; + +/* page D0 inquiry data of focal point resource */ +struct pmcraid_inquiry_data { + __u8 ph_dev_type; + __u8 page_code; + __u8 reserved1; + __u8 add_page_len; + __u8 length; + __u8 reserved2; + __be16 fw_version; + __u8 reserved3[16]; +}; + +#define PMCRAID_TIMESTAMP_LEN 12 +#define PMCRAID_REQ_TM_STR_LEN 6 +#define PMCRAID_SCSI_SET_TIMESTAMP 0xA4 +#define PMCRAID_SCSI_SERVICE_ACTION 0x0F + +struct pmcraid_timestamp_data { + __u8 reserved1[4]; + __u8 timestamp[PMCRAID_REQ_TM_STR_LEN]; /* current time value */ + __u8 reserved2[2]; +}; + +/* pmcraid_cmd - LLD representation of SCSI command */ +struct pmcraid_cmd { + + /* Ptr and bus address of DMA.able control block for this command */ + struct pmcraid_control_block *ioa_cb; + dma_addr_t ioa_cb_bus_addr; + dma_addr_t dma_handle; + + /* pointer to mid layer structure of SCSI commands */ + struct scsi_cmnd *scsi_cmd; + + struct list_head free_list; + struct completion wait_for_completion; + struct timer_list timer; /* needed for internal commands */ + u32 timeout; /* current timeout value */ + u32 index; /* index into the command list */ + u8 completion_req; /* for handling internal commands */ + u8 release; /* for handling completions */ + + void (*cmd_done) (struct pmcraid_cmd *); + struct pmcraid_instance *drv_inst; + + struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */ + + /* scratch used */ + union { + /* during reset sequence */ + unsigned long time_left; + struct pmcraid_resource_entry *res; + int hrrq_index; + + /* used during IO command error handling. Sense buffer + * for REQUEST SENSE command if firmware is not sending + * auto sense data + */ + struct { + u8 *sense_buffer; + dma_addr_t sense_buffer_dma; + }; + }; +}; + +/* + * Interrupt registers of IOA + */ +struct pmcraid_interrupts { + void __iomem *ioa_host_interrupt_reg; + void __iomem *ioa_host_msix_interrupt_reg; + void __iomem *ioa_host_interrupt_clr_reg; + void __iomem *ioa_host_interrupt_mask_reg; + void __iomem *ioa_host_interrupt_mask_clr_reg; + void __iomem *global_interrupt_mask_reg; + void __iomem *host_ioa_interrupt_reg; + void __iomem *host_ioa_interrupt_clr_reg; +}; + +/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */ +struct pmcraid_isr_param { + struct pmcraid_instance *drv_inst; + u8 hrrq_id; /* hrrq entry index */ +}; + + +/* AEN message header sent as part of event data to applications */ +struct pmcraid_aen_msg { + u32 hostno; + u32 length; + u8 reserved[8]; + u8 data[]; +}; + +/* Controller state event message type */ +struct pmcraid_state_msg { + struct pmcraid_aen_msg msg; + u32 ioa_state; +}; + +#define PMC_DEVICE_EVENT_RESET_START 0x11000000 +#define PMC_DEVICE_EVENT_RESET_SUCCESS 0x11000001 +#define PMC_DEVICE_EVENT_RESET_FAILED 0x11000002 +#define PMC_DEVICE_EVENT_SHUTDOWN_START 0x11000003 +#define PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS 0x11000004 +#define PMC_DEVICE_EVENT_SHUTDOWN_FAILED 0x11000005 + +struct pmcraid_hostrcb { + struct pmcraid_instance *drv_inst; + struct pmcraid_aen_msg *msg; + struct pmcraid_hcam_hdr *hcam; /* pointer to hcam buffer */ + struct pmcraid_cmd *cmd; /* pointer to command block used */ + dma_addr_t baddr; /* system address of hcam buffer */ + atomic_t ignore; /* process HCAM response ? */ +}; + +#define PMCRAID_AEN_HDR_SIZE sizeof(struct pmcraid_aen_msg) + + + +/* + * Per adapter structure maintained by LLD + */ +struct pmcraid_instance { + /* Array of allowed-to-be-exposed resources, initialized from + * Configutation Table, later updated with CCNs + */ + struct pmcraid_resource_entry *res_entries; + + struct list_head free_res_q; /* res_entries lists for easy lookup */ + struct list_head used_res_q; /* List of to be exposed resources */ + spinlock_t resource_lock; /* spinlock to protect resource list */ + + void __iomem *mapped_dma_addr; + void __iomem *ioa_status; /* Iomapped IOA status register */ + void __iomem *mailbox; /* Iomapped mailbox register */ + void __iomem *ioarrin; /* IOmapped IOARR IN register */ + + struct pmcraid_interrupts int_regs; + struct pmcraid_chip_details *chip_cfg; + + /* HostRCBs needed for HCAM */ + struct pmcraid_hostrcb ldn; + struct pmcraid_hostrcb ccn; + struct pmcraid_state_msg scn; /* controller state change msg */ + + + /* Bus address of start of HRRQ */ + dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS]; + + /* Pointer to 1st entry of HRRQ */ + __le32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS]; + + /* Pointer to last entry of HRRQ */ + __le32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS]; + + /* Pointer to current pointer of hrrq */ + __le32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS]; + + /* Lock for HRRQ access */ + spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS]; + + struct pmcraid_inquiry_data *inq_data; + dma_addr_t inq_data_baddr; + + struct pmcraid_timestamp_data *timestamp_data; + dma_addr_t timestamp_data_baddr; + + /* size of configuration table entry, varies based on the firmware */ + u32 config_table_entry_size; + + /* Expected toggle bit at host */ + u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS]; + + + /* Wait Q for threads to wait for Reset IOA completion */ + wait_queue_head_t reset_wait_q; + struct pmcraid_cmd *reset_cmd; + + /* structures for supporting SIGIO based AEN. */ + struct fasync_struct *aen_queue; + struct mutex aen_queue_lock; /* lock for aen subscribers list */ + struct cdev cdev; + + struct Scsi_Host *host; /* mid layer interface structure handle */ + struct pci_dev *pdev; /* PCI device structure handle */ + + /* No of Reset IOA retries . IOA marked dead if threshold exceeds */ + u8 ioa_reset_attempts; +#define PMCRAID_RESET_ATTEMPTS 3 + + u8 current_log_level; /* default level for logging IOASC errors */ + + u8 num_hrrq; /* Number of interrupt vectors allocated */ + u8 interrupt_mode; /* current interrupt mode legacy or msix */ + dev_t dev; /* Major-Minor numbers for Char device */ + + /* Used as ISR handler argument */ + struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS]; + + /* Message id as filled in last fired IOARCB, used to identify HRRQ */ + atomic_t last_message_id; + + /* configuration table */ + struct pmcraid_config_table *cfg_table; + dma_addr_t cfg_table_bus_addr; + + /* structures related to command blocks */ + struct kmem_cache *cmd_cachep; /* cache for cmd blocks */ + struct dma_pool *control_pool; /* pool for control blocks */ + char cmd_pool_name[64]; /* name of cmd cache */ + char ctl_pool_name[64]; /* name of control cache */ + + struct pmcraid_cmd *cmd_list[PMCRAID_MAX_CMD]; + + struct list_head free_cmd_pool; + struct list_head pending_cmd_pool; + spinlock_t free_pool_lock; /* free pool lock */ + spinlock_t pending_pool_lock; /* pending pool lock */ + + /* Tasklet to handle deferred processing */ + struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS]; + + /* Work-queue (Shared) for deferred reset processing */ + struct work_struct worker_q; + + /* No of IO commands pending with FW */ + atomic_t outstanding_cmds; + + /* should add/delete resources to mid-layer now ?*/ + atomic_t expose_resources; + + + + u32 ioa_state:4; /* For IOA Reset sequence FSM */ +#define IOA_STATE_OPERATIONAL 0x0 +#define IOA_STATE_UNKNOWN 0x1 +#define IOA_STATE_DEAD 0x2 +#define IOA_STATE_IN_SOFT_RESET 0x3 +#define IOA_STATE_IN_HARD_RESET 0x4 +#define IOA_STATE_IN_RESET_ALERT 0x5 +#define IOA_STATE_IN_BRINGDOWN 0x6 +#define IOA_STATE_IN_BRINGUP 0x7 + + u32 ioa_reset_in_progress:1; /* true if IOA reset is in progress */ + u32 ioa_hard_reset:1; /* TRUE if Hard Reset is needed */ + u32 ioa_unit_check:1; /* Indicates Unit Check condition */ + u32 ioa_bringdown:1; /* whether IOA needs to be brought down */ + u32 force_ioa_reset:1; /* force adapter reset ? */ + u32 reinit_cfg_table:1; /* reinit config table due to lost CCN */ + u32 ioa_shutdown_type:2;/* shutdown type used during reset */ +#define SHUTDOWN_NONE 0x0 +#define SHUTDOWN_NORMAL 0x1 +#define SHUTDOWN_ABBREV 0x2 + u32 timestamp_error:1; /* indicate set timestamp for out of sync */ + +}; + +/* LLD maintained resource entry structure */ +struct pmcraid_resource_entry { + struct list_head queue; /* link to "to be exposed" resources */ + union { + struct pmcraid_config_table_entry cfg_entry; + struct pmcraid_config_table_entry_ext cfg_entry_ext; + }; + struct scsi_device *scsi_dev; /* Link scsi_device structure */ + atomic_t read_failures; /* count of failed READ commands */ + atomic_t write_failures; /* count of failed WRITE commands */ + + /* To indicate add/delete/modify during CCN */ + u8 change_detected; +#define RES_CHANGE_ADD 0x1 /* add this to mid-layer */ +#define RES_CHANGE_DEL 0x2 /* remove this from mid-layer */ + + u8 reset_progress; /* Device is resetting */ + + /* + * When IOA asks for sync (i.e. IOASC = Not Ready, Sync Required), this + * flag will be set, mid layer will be asked to retry. In the next + * attempt, this flag will be checked in queuecommand() to set + * SYNC_COMPLETE flag in IOARCB (flag_0). + */ + u8 sync_reqd; + + /* target indicates the mapped target_id assigned to this resource if + * this is VSET resource. For non-VSET resources this will be un-used + * or zero + */ + u8 target; +}; + +/* Data structures used in IOASC error code logging */ +struct pmcraid_ioasc_error { + u32 ioasc_code; /* IOASC code */ + u8 log_level; /* default log level assignment. */ + char *error_string; +}; + +/* Initial log_level assignments for various IOASCs */ +#define IOASC_LOG_LEVEL_NONE 0x0 /* no logging */ +#define IOASC_LOG_LEVEL_MUST 0x1 /* must log: all high-severity errors */ +#define IOASC_LOG_LEVEL_HARD 0x2 /* optional – low severity errors */ + +/* Error information maintained by LLD. LLD initializes the pmcraid_error_table + * statically. + */ +static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = { + {0x01180600, IOASC_LOG_LEVEL_HARD, + "Recovered Error, soft media error, sector reassignment suggested"}, + {0x015D0000, IOASC_LOG_LEVEL_HARD, + "Recovered Error, failure prediction threshold exceeded"}, + {0x015D9200, IOASC_LOG_LEVEL_HARD, + "Recovered Error, soft Cache Card Battery error threshold"}, + {0x015D9200, IOASC_LOG_LEVEL_HARD, + "Recovered Error, soft Cache Card Battery error threshold"}, + {0x02048000, IOASC_LOG_LEVEL_HARD, + "Not Ready, IOA Reset Required"}, + {0x02408500, IOASC_LOG_LEVEL_HARD, + "Not Ready, IOA microcode download required"}, + {0x03110B00, IOASC_LOG_LEVEL_HARD, + "Medium Error, data unreadable, reassignment suggested"}, + {0x03110C00, IOASC_LOG_LEVEL_MUST, + "Medium Error, data unreadable do not reassign"}, + {0x03310000, IOASC_LOG_LEVEL_HARD, + "Medium Error, media corrupted"}, + {0x04050000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, IOA can't communicate with device"}, + {0x04080000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, device bus error"}, + {0x04088000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, device bus is not functioning"}, + {0x04118000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, IOA reserved area data check"}, + {0x04118100, IOASC_LOG_LEVEL_HARD, + "Hardware Error, IOA reserved area invalid data pattern"}, + {0x04118200, IOASC_LOG_LEVEL_HARD, + "Hardware Error, IOA reserved area LRC error"}, + {0x04320000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, reassignment space exhausted"}, + {0x04330000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, data transfer underlength error"}, + {0x04330000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, data transfer overlength error"}, + {0x04418000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, PCI bus error"}, + {0x04440000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, device error"}, + {0x04448200, IOASC_LOG_LEVEL_MUST, + "Hardware Error, IOA error"}, + {0x04448300, IOASC_LOG_LEVEL_HARD, + "Hardware Error, undefined device response"}, + {0x04448400, IOASC_LOG_LEVEL_HARD, + "Hardware Error, IOA microcode error"}, + {0x04448600, IOASC_LOG_LEVEL_HARD, + "Hardware Error, IOA reset required"}, + {0x04449200, IOASC_LOG_LEVEL_HARD, + "Hardware Error, hard Cache Fearuee Card Battery error"}, + {0x0444A000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, failed device altered"}, + {0x0444A200, IOASC_LOG_LEVEL_HARD, + "Hardware Error, data check after reassignment"}, + {0x0444A300, IOASC_LOG_LEVEL_HARD, + "Hardware Error, LRC error after reassignment"}, + {0x044A0000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, device bus error (msg/cmd phase)"}, + {0x04670400, IOASC_LOG_LEVEL_HARD, + "Hardware Error, new device can't be used"}, + {0x04678000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, invalid multiadapter configuration"}, + {0x04678100, IOASC_LOG_LEVEL_HARD, + "Hardware Error, incorrect connection between enclosures"}, + {0x04678200, IOASC_LOG_LEVEL_HARD, + "Hardware Error, connections exceed IOA design limits"}, + {0x04678300, IOASC_LOG_LEVEL_HARD, + "Hardware Error, incorrect multipath connection"}, + {0x04679000, IOASC_LOG_LEVEL_HARD, + "Hardware Error, command to LUN failed"}, + {0x064C8000, IOASC_LOG_LEVEL_HARD, + "Unit Attention, cache exists for missing/failed device"}, + {0x06670100, IOASC_LOG_LEVEL_HARD, + "Unit Attention, incompatible exposed mode device"}, + {0x06670600, IOASC_LOG_LEVEL_HARD, + "Unit Attention, attachment of logical unit failed"}, + {0x06678000, IOASC_LOG_LEVEL_HARD, + "Unit Attention, cables exceed connective design limit"}, + {0x06678300, IOASC_LOG_LEVEL_HARD, + "Unit Attention, incomplete multipath connection between" \ + "IOA and enclosure"}, + {0x06678400, IOASC_LOG_LEVEL_HARD, + "Unit Attention, incomplete multipath connection between" \ + "device and enclosure"}, + {0x06678500, IOASC_LOG_LEVEL_HARD, + "Unit Attention, incomplete multipath connection between" \ + "IOA and remote IOA"}, + {0x06678600, IOASC_LOG_LEVEL_HARD, + "Unit Attention, missing remote IOA"}, + {0x06679100, IOASC_LOG_LEVEL_HARD, + "Unit Attention, enclosure doesn't support required multipath" \ + "function"}, + {0x06698200, IOASC_LOG_LEVEL_HARD, + "Unit Attention, corrupt array parity detected on device"}, + {0x066B0200, IOASC_LOG_LEVEL_HARD, + "Unit Attention, array exposed"}, + {0x066B8200, IOASC_LOG_LEVEL_HARD, + "Unit Attention, exposed array is still protected"}, + {0x066B9200, IOASC_LOG_LEVEL_HARD, + "Unit Attention, Multipath redundancy level got worse"}, + {0x07270000, IOASC_LOG_LEVEL_HARD, + "Data Protect, device is read/write protected by IOA"}, + {0x07278000, IOASC_LOG_LEVEL_HARD, + "Data Protect, IOA doesn't support device attribute"}, + {0x07278100, IOASC_LOG_LEVEL_HARD, + "Data Protect, NVRAM mirroring prohibited"}, + {0x07278400, IOASC_LOG_LEVEL_HARD, + "Data Protect, array is short 2 or more devices"}, + {0x07278600, IOASC_LOG_LEVEL_HARD, + "Data Protect, exposed array is short a required device"}, + {0x07278700, IOASC_LOG_LEVEL_HARD, + "Data Protect, array members not at required addresses"}, + {0x07278800, IOASC_LOG_LEVEL_HARD, + "Data Protect, exposed mode device resource address conflict"}, + {0x07278900, IOASC_LOG_LEVEL_HARD, + "Data Protect, incorrect resource address of exposed mode device"}, + {0x07278A00, IOASC_LOG_LEVEL_HARD, + "Data Protect, Array is missing a device and parity is out of sync"}, + {0x07278B00, IOASC_LOG_LEVEL_HARD, + "Data Protect, maximum number of arrays already exist"}, + {0x07278C00, IOASC_LOG_LEVEL_HARD, + "Data Protect, cannot locate cache data for device"}, + {0x07278D00, IOASC_LOG_LEVEL_HARD, + "Data Protect, cache data exits for a changed device"}, + {0x07279100, IOASC_LOG_LEVEL_HARD, + "Data Protect, detection of a device requiring format"}, + {0x07279200, IOASC_LOG_LEVEL_HARD, + "Data Protect, IOA exceeds maximum number of devices"}, + {0x07279600, IOASC_LOG_LEVEL_HARD, + "Data Protect, missing array, volume set is not functional"}, + {0x07279700, IOASC_LOG_LEVEL_HARD, + "Data Protect, single device for a volume set"}, + {0x07279800, IOASC_LOG_LEVEL_HARD, + "Data Protect, missing multiple devices for a volume set"}, + {0x07279900, IOASC_LOG_LEVEL_HARD, + "Data Protect, maximum number of volument sets already exists"}, + {0x07279A00, IOASC_LOG_LEVEL_HARD, + "Data Protect, other volume set problem"}, +}; + +/* macros to help in debugging */ +#define pmcraid_err(...) \ + printk(KERN_ERR "MaxRAID: "__VA_ARGS__) + +#define pmcraid_info(...) \ + if (pmcraid_debug_log) \ + printk(KERN_INFO "MaxRAID: "__VA_ARGS__) + +/* check if given command is a SCSI READ or SCSI WRITE command */ +#define SCSI_READ_CMD 0x1 /* any of SCSI READ commands */ +#define SCSI_WRITE_CMD 0x2 /* any of SCSI WRITE commands */ +#define SCSI_CMD_TYPE(opcode) \ +({ u8 op = opcode; u8 __type = 0;\ + if (op == READ_6 || op == READ_10 || op == READ_12 || op == READ_16)\ + __type = SCSI_READ_CMD;\ + else if (op == WRITE_6 || op == WRITE_10 || op == WRITE_12 || \ + op == WRITE_16)\ + __type = SCSI_WRITE_CMD;\ + __type;\ +}) + +#define IS_SCSI_READ_WRITE(opcode) \ +({ u8 __type = SCSI_CMD_TYPE(opcode); \ + (__type == SCSI_READ_CMD || __type == SCSI_WRITE_CMD) ? 1 : 0;\ +}) + + +/* + * pmcraid_ioctl_header - definition of header structure that precedes all the + * buffers given as ioctl arguments. + * + * .signature : always ASCII string, "PMCRAID" + * .reserved : not used + * .buffer_length : length of the buffer following the header + */ +struct pmcraid_ioctl_header { + u8 signature[8]; + u32 reserved; + u32 buffer_length; +}; + +#define PMCRAID_IOCTL_SIGNATURE "PMCRAID" + +/* + * keys to differentiate between driver handled IOCTLs and passthrough + * IOCTLs passed to IOA. driver determines the ioctl type using macro + * _IOC_TYPE + */ +#define PMCRAID_DRIVER_IOCTL 'D' + +#define DRV_IOCTL(n, size) \ + _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) + +/* + * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd. + * This is to facilitate applications avoiding un-necessary memory allocations. + * For example, most of driver handled ioctls do not require ioarcb, ioasa. + */ +#define _ARGSIZE(arg) (sizeof(struct pmcraid_ioctl_header) + sizeof(arg)) + +/* Driver handled IOCTL command definitions */ + +#define PMCRAID_IOCTL_RESET_ADAPTER \ + DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header)) + +#endif /* _PMCRAID_H */ diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c new file mode 100644 index 000000000..d592ee917 --- /dev/null +++ b/drivers/scsi/ppa.c @@ -0,0 +1,1164 @@ +/* ppa.c -- low level driver for the IOMEGA PPA3 + * parallel port SCSI host adapter. + * + * (The PPA3 is the embedded controller in the ZIP drive.) + * + * (c) 1995,1996 Grant R. Guenther, grant@torque.net, + * under the terms of the GNU General Public License. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +static void ppa_reset_pulse(unsigned int base); + +typedef struct { + struct pardevice *dev; /* Parport device entry */ + int base; /* Actual port address */ + int mode; /* Transfer mode */ + struct scsi_cmnd *cur_cmd; /* Current queued command */ + struct delayed_work ppa_tq; /* Polling interrupt stuff */ + unsigned long jstart; /* Jiffies at start */ + unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ + unsigned int failed:1; /* Failure flag */ + unsigned wanted:1; /* Parport sharing busy flag */ + unsigned int dev_no; /* Device number */ + wait_queue_head_t *waiting; + struct Scsi_Host *host; + struct list_head list; +} ppa_struct; + +#include "ppa.h" + +static unsigned int mode = PPA_AUTODETECT; +module_param(mode, uint, 0644); +MODULE_PARM_DESC(mode, "Transfer mode (0 = Autodetect, 1 = SPP 4-bit, " + "2 = SPP 8-bit, 3 = EPP 8-bit, 4 = EPP 16-bit, 5 = EPP 32-bit"); + +static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static inline ppa_struct *ppa_dev(struct Scsi_Host *host) +{ + return *(ppa_struct **)&host->hostdata; +} + +static DEFINE_SPINLOCK(arbitration_lock); + +static void got_it(ppa_struct *dev) +{ + dev->base = dev->dev->port->base; + if (dev->cur_cmd) + ppa_scsi_pointer(dev->cur_cmd)->phase = 1; + else + wake_up(dev->waiting); +} + +static void ppa_wakeup(void *ref) +{ + ppa_struct *dev = (ppa_struct *) ref; + unsigned long flags; + + spin_lock_irqsave(&arbitration_lock, flags); + if (dev->wanted) { + parport_claim(dev->dev); + got_it(dev); + dev->wanted = 0; + } + spin_unlock_irqrestore(&arbitration_lock, flags); + return; +} + +static int ppa_pb_claim(ppa_struct *dev) +{ + unsigned long flags; + int res = 1; + spin_lock_irqsave(&arbitration_lock, flags); + if (parport_claim(dev->dev) == 0) { + got_it(dev); + res = 0; + } + dev->wanted = res; + spin_unlock_irqrestore(&arbitration_lock, flags); + return res; +} + +static void ppa_pb_dismiss(ppa_struct *dev) +{ + unsigned long flags; + int wanted; + spin_lock_irqsave(&arbitration_lock, flags); + wanted = dev->wanted; + dev->wanted = 0; + spin_unlock_irqrestore(&arbitration_lock, flags); + if (!wanted) + parport_release(dev->dev); +} + +static inline void ppa_pb_release(ppa_struct *dev) +{ + parport_release(dev->dev); +} + +/* + * Start of Chipset kludges + */ + +/* This is to give the ppa driver a way to modify the timings (and other + * parameters) by writing to the /proc/scsi/ppa/0 file. + * Very simple method really... (To simple, no error checking :( ) + * Reason: Kernel hackers HATE having to unload and reload modules for + * testing... + * Also gives a method to use a script to obtain optimum timings (TODO) + */ + +static inline int ppa_write_info(struct Scsi_Host *host, char *buffer, int length) +{ + ppa_struct *dev = ppa_dev(host); + unsigned long x; + + if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) { + x = simple_strtoul(buffer + 5, NULL, 0); + dev->mode = x; + return length; + } + if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) { + x = simple_strtoul(buffer + 10, NULL, 0); + dev->recon_tmo = x; + printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x); + return length; + } + printk(KERN_WARNING "ppa /proc: invalid variable\n"); + return -EINVAL; +} + +static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + ppa_struct *dev = ppa_dev(host); + + seq_printf(m, "Version : %s\n", PPA_VERSION); + seq_printf(m, "Parport : %s\n", dev->dev->port->name); + seq_printf(m, "Mode : %s\n", PPA_MODE_STRING[dev->mode]); +#if PPA_DEBUG > 0 + seq_printf(m, "recon_tmo : %lu\n", dev->recon_tmo); +#endif + return 0; +} + +static int device_check(ppa_struct *dev, bool autodetect); + +#if PPA_DEBUG > 0 +#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\ + y, __func__, __LINE__); ppa_fail_func(x,y); +static inline void ppa_fail_func(ppa_struct *dev, int error_code) +#else +static inline void ppa_fail(ppa_struct *dev, int error_code) +#endif +{ + /* If we fail a device then we trash status / message bytes */ + if (dev->cur_cmd) { + dev->cur_cmd->result = error_code << 16; + dev->failed = 1; + } +} + +/* + * Wait for the high bit to be set. + * + * In principle, this could be tied to an interrupt, but the adapter + * doesn't appear to be designed to support interrupts. We spin on + * the 0x80 ready bit. + */ +static unsigned char ppa_wait(ppa_struct *dev) +{ + int k; + unsigned short ppb = dev->base; + unsigned char r; + + k = PPA_SPIN_TMO; + /* Wait for bit 6 and 7 - PJC */ + for (r = r_str(ppb); ((r & 0xc0) != 0xc0) && (k); k--) { + udelay(1); + r = r_str(ppb); + } + + /* + * return some status information. + * Semantics: 0xc0 = ZIP wants more data + * 0xd0 = ZIP wants to send more data + * 0xe0 = ZIP is expecting SCSI command data + * 0xf0 = end of transfer, ZIP is sending status + */ + if (k) + return (r & 0xf0); + + /* Counter expired - Time out occurred */ + ppa_fail(dev, DID_TIME_OUT); + printk(KERN_WARNING "ppa timeout in ppa_wait\n"); + return 0; /* command timed out */ +} + +/* + * Clear EPP Timeout Bit + */ +static inline void epp_reset(unsigned short ppb) +{ + int i; + + i = r_str(ppb); + w_str(ppb, i); + w_str(ppb, i & 0xfe); +} + +/* + * Wait for empty ECP fifo (if we are in ECP fifo mode only) + */ +static inline void ecp_sync(ppa_struct *dev) +{ + int i, ppb_hi = dev->dev->port->base_hi; + + if (ppb_hi == 0) + return; + + if ((r_ecr(ppb_hi) & 0xe0) == 0x60) { /* mode 011 == ECP fifo mode */ + for (i = 0; i < 100; i++) { + if (r_ecr(ppb_hi) & 0x01) + return; + udelay(5); + } + printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n"); + } +} + +static int ppa_byte_out(unsigned short base, const char *buffer, int len) +{ + int i; + + for (i = len; i; i--) { + w_dtr(base, *buffer++); + w_ctr(base, 0xe); + w_ctr(base, 0xc); + } + return 1; /* All went well - we hope! */ +} + +static int ppa_byte_in(unsigned short base, char *buffer, int len) +{ + int i; + + for (i = len; i; i--) { + *buffer++ = r_dtr(base); + w_ctr(base, 0x27); + w_ctr(base, 0x25); + } + return 1; /* All went well - we hope! */ +} + +static int ppa_nibble_in(unsigned short base, char *buffer, int len) +{ + for (; len; len--) { + unsigned char h; + + w_ctr(base, 0x4); + h = r_str(base) & 0xf0; + w_ctr(base, 0x6); + *buffer++ = h | ((r_str(base) & 0xf0) >> 4); + } + return 1; /* All went well - we hope! */ +} + +static int ppa_out(ppa_struct *dev, char *buffer, int len) +{ + int r; + unsigned short ppb = dev->base; + + r = ppa_wait(dev); + + if ((r & 0x50) != 0x40) { + ppa_fail(dev, DID_ERROR); + return 0; + } + switch (dev->mode) { + case PPA_NIBBLE: + case PPA_PS2: + /* 8 bit output, with a loop */ + r = ppa_byte_out(ppb, buffer, len); + break; + + case PPA_EPP_32: + case PPA_EPP_16: + case PPA_EPP_8: + epp_reset(ppb); + w_ctr(ppb, 0x4); + if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03)) + outsl(ppb + 4, buffer, len >> 2); + else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01)) + outsw(ppb + 4, buffer, len >> 1); + else + outsb(ppb + 4, buffer, len); + w_ctr(ppb, 0xc); + r = !(r_str(ppb) & 0x01); + w_ctr(ppb, 0xc); + ecp_sync(dev); + break; + + default: + printk(KERN_ERR "PPA: bug in ppa_out()\n"); + r = 0; + } + return r; +} + +static int ppa_in(ppa_struct *dev, char *buffer, int len) +{ + int r; + unsigned short ppb = dev->base; + + r = ppa_wait(dev); + + if ((r & 0x50) != 0x50) { + ppa_fail(dev, DID_ERROR); + return 0; + } + switch (dev->mode) { + case PPA_NIBBLE: + /* 4 bit input, with a loop */ + r = ppa_nibble_in(ppb, buffer, len); + w_ctr(ppb, 0xc); + break; + + case PPA_PS2: + /* 8 bit input, with a loop */ + w_ctr(ppb, 0x25); + r = ppa_byte_in(ppb, buffer, len); + w_ctr(ppb, 0x4); + w_ctr(ppb, 0xc); + break; + + case PPA_EPP_32: + case PPA_EPP_16: + case PPA_EPP_8: + epp_reset(ppb); + w_ctr(ppb, 0x24); + if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03)) + insl(ppb + 4, buffer, len >> 2); + else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01)) + insw(ppb + 4, buffer, len >> 1); + else + insb(ppb + 4, buffer, len); + w_ctr(ppb, 0x2c); + r = !(r_str(ppb) & 0x01); + w_ctr(ppb, 0x2c); + ecp_sync(dev); + break; + + default: + printk(KERN_ERR "PPA: bug in ppa_ins()\n"); + r = 0; + break; + } + return r; +} + +/* end of ppa_io.h */ +static inline void ppa_d_pulse(unsigned short ppb, unsigned char b) +{ + w_dtr(ppb, b); + w_ctr(ppb, 0xc); + w_ctr(ppb, 0xe); + w_ctr(ppb, 0xc); + w_ctr(ppb, 0x4); + w_ctr(ppb, 0xc); +} + +static void ppa_disconnect(ppa_struct *dev) +{ + unsigned short ppb = dev->base; + + ppa_d_pulse(ppb, 0); + ppa_d_pulse(ppb, 0x3c); + ppa_d_pulse(ppb, 0x20); + ppa_d_pulse(ppb, 0xf); +} + +static inline void ppa_c_pulse(unsigned short ppb, unsigned char b) +{ + w_dtr(ppb, b); + w_ctr(ppb, 0x4); + w_ctr(ppb, 0x6); + w_ctr(ppb, 0x4); + w_ctr(ppb, 0xc); +} + +static inline void ppa_connect(ppa_struct *dev, int flag) +{ + unsigned short ppb = dev->base; + + ppa_c_pulse(ppb, 0); + ppa_c_pulse(ppb, 0x3c); + ppa_c_pulse(ppb, 0x20); + if ((flag == CONNECT_EPP_MAYBE) && IN_EPP_MODE(dev->mode)) + ppa_c_pulse(ppb, 0xcf); + else + ppa_c_pulse(ppb, 0x8f); +} + +static int ppa_select(ppa_struct *dev, int target) +{ + int k; + unsigned short ppb = dev->base; + + /* + * Bit 6 (0x40) is the device selected bit. + * First we must wait till the current device goes off line... + */ + k = PPA_SELECT_TMO; + do { + k--; + udelay(1); + } while ((r_str(ppb) & 0x40) && (k)); + if (!k) + return 0; + + w_dtr(ppb, (1 << target)); + w_ctr(ppb, 0xe); + w_ctr(ppb, 0xc); + w_dtr(ppb, 0x80); /* This is NOT the initator */ + w_ctr(ppb, 0x8); + + k = PPA_SELECT_TMO; + do { + k--; + udelay(1); + } + while (!(r_str(ppb) & 0x40) && (k)); + if (!k) + return 0; + + return 1; +} + +/* + * This is based on a trace of what the Iomega DOS 'guest' driver does. + * I've tried several different kinds of parallel ports with guest and + * coded this to react in the same ways that it does. + * + * The return value from this function is just a hint about where the + * handshaking failed. + * + */ +static int ppa_init(ppa_struct *dev) +{ + int retv; + unsigned short ppb = dev->base; + bool autodetect = dev->mode == PPA_AUTODETECT; + + if (autodetect) { + int modes = dev->dev->port->modes; + int ppb_hi = dev->dev->port->base_hi; + + /* Mode detection works up the chain of speed + * This avoids a nasty if-then-else-if-... tree + */ + dev->mode = PPA_NIBBLE; + + if (modes & PARPORT_MODE_TRISTATE) + dev->mode = PPA_PS2; + + if (modes & PARPORT_MODE_ECP) { + w_ecr(ppb_hi, 0x20); + dev->mode = PPA_PS2; + } + if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP)) + w_ecr(ppb_hi, 0x80); + } + + ppa_disconnect(dev); + ppa_connect(dev, CONNECT_NORMAL); + + retv = 2; /* Failed */ + + w_ctr(ppb, 0xe); + if ((r_str(ppb) & 0x08) == 0x08) + retv--; + + w_ctr(ppb, 0xc); + if ((r_str(ppb) & 0x08) == 0x00) + retv--; + + if (!retv) + ppa_reset_pulse(ppb); + udelay(1000); /* Allow devices to settle down */ + ppa_disconnect(dev); + udelay(1000); /* Another delay to allow devices to settle */ + + if (retv) + return -EIO; + + return device_check(dev, autodetect); +} + +static inline int ppa_send_command(struct scsi_cmnd *cmd) +{ + ppa_struct *dev = ppa_dev(cmd->device->host); + int k; + + w_ctr(dev->base, 0x0c); + + for (k = 0; k < cmd->cmd_len; k++) + if (!ppa_out(dev, &cmd->cmnd[k], 1)) + return 0; + return 1; +} + +/* + * The bulk flag enables some optimisations in the data transfer loops, + * it should be true for any command that transfers data in integral + * numbers of sectors. + * + * The driver appears to remain stable if we speed up the parallel port + * i/o in this function, but not elsewhere. + */ +static int ppa_completion(struct scsi_cmnd *const cmd) +{ + /* Return codes: + * -1 Error + * 0 Told to schedule + * 1 Finished data transfer + */ + struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd); + ppa_struct *dev = ppa_dev(cmd->device->host); + unsigned short ppb = dev->base; + unsigned long start_jiffies = jiffies; + + unsigned char r, v; + int fast, bulk, status; + + v = cmd->cmnd[0]; + bulk = ((v == READ_6) || + (v == READ_10) || (v == WRITE_6) || (v == WRITE_10)); + + /* + * We only get here if the drive is ready to comunicate, + * hence no need for a full ppa_wait. + */ + r = (r_str(ppb) & 0xf0); + + while (r != (unsigned char) 0xf0) { + /* + * If we have been running for more than a full timer tick + * then take a rest. + */ + if (time_after(jiffies, start_jiffies + 1)) + return 0; + + if (scsi_pointer->this_residual <= 0) { + ppa_fail(dev, DID_ERROR); + return -1; /* ERROR_RETURN */ + } + + /* On some hardware we have SCSI disconnected (6th bit low) + * for about 100usecs. It is too expensive to wait a + * tick on every loop so we busy wait for no more than + * 500usecs to give the drive a chance first. We do not + * change things for "normal" hardware since generally + * the 6th bit is always high. + * This makes the CPU load higher on some hardware + * but otherwise we can not get more than 50K/secs + * on this problem hardware. + */ + if ((r & 0xc0) != 0xc0) { + /* Wait for reconnection should be no more than + * jiffy/2 = 5ms = 5000 loops + */ + unsigned long k = dev->recon_tmo; + for (; k && ((r = (r_str(ppb) & 0xf0)) & 0xc0) != 0xc0; + k--) + udelay(1); + + if (!k) + return 0; + } + + /* determine if we should use burst I/O */ + fast = bulk && scsi_pointer->this_residual >= PPA_BURST_SIZE ? + PPA_BURST_SIZE : 1; + + if (r == (unsigned char) 0xc0) + status = ppa_out(dev, scsi_pointer->ptr, fast); + else + status = ppa_in(dev, scsi_pointer->ptr, fast); + + scsi_pointer->ptr += fast; + scsi_pointer->this_residual -= fast; + + if (!status) { + ppa_fail(dev, DID_BUS_BUSY); + return -1; /* ERROR_RETURN */ + } + if (scsi_pointer->buffer && !scsi_pointer->this_residual) { + /* if scatter/gather, advance to the next segment */ + if (scsi_pointer->buffers_residual--) { + scsi_pointer->buffer = + sg_next(scsi_pointer->buffer); + scsi_pointer->this_residual = + scsi_pointer->buffer->length; + scsi_pointer->ptr = + sg_virt(scsi_pointer->buffer); + } + } + /* Now check to see if the drive is ready to comunicate */ + r = (r_str(ppb) & 0xf0); + /* If not, drop back down to the scheduler and wait a timer tick */ + if (!(r & 0x80)) + return 0; + } + return 1; /* FINISH_RETURN */ +} + +/* + * Since the PPA itself doesn't generate interrupts, we use + * the scheduler's task queue to generate a stream of call-backs and + * complete the request when the drive is ready. + */ +static void ppa_interrupt(struct work_struct *work) +{ + ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work); + struct scsi_cmnd *cmd = dev->cur_cmd; + + if (!cmd) { + printk(KERN_ERR "PPA: bug in ppa_interrupt\n"); + return; + } + if (ppa_engine(dev, cmd)) { + schedule_delayed_work(&dev->ppa_tq, 1); + return; + } + /* Command must of completed hence it is safe to let go... */ +#if PPA_DEBUG > 0 + switch ((cmd->result >> 16) & 0xff) { + case DID_OK: + break; + case DID_NO_CONNECT: + printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", scmd_id(cmd)); + break; + case DID_BUS_BUSY: + printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n"); + break; + case DID_TIME_OUT: + printk(KERN_DEBUG "ppa: unknown timeout\n"); + break; + case DID_ABORT: + printk(KERN_DEBUG "ppa: told to abort\n"); + break; + case DID_PARITY: + printk(KERN_DEBUG "ppa: parity error (???)\n"); + break; + case DID_ERROR: + printk(KERN_DEBUG "ppa: internal driver error\n"); + break; + case DID_RESET: + printk(KERN_DEBUG "ppa: told to reset device\n"); + break; + case DID_BAD_INTR: + printk(KERN_WARNING "ppa: bad interrupt (???)\n"); + break; + default: + printk(KERN_WARNING "ppa: bad return code (%02x)\n", + (cmd->result >> 16) & 0xff); + } +#endif + + if (ppa_scsi_pointer(cmd)->phase > 1) + ppa_disconnect(dev); + + ppa_pb_dismiss(dev); + + dev->cur_cmd = NULL; + + scsi_done(cmd); +} + +static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) +{ + struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd); + unsigned short ppb = dev->base; + unsigned char l = 0, h = 0; + int retv; + + /* First check for any errors that may of occurred + * Here we check for internal errors + */ + if (dev->failed) + return 0; + + switch (scsi_pointer->phase) { + case 0: /* Phase 0 - Waiting for parport */ + if (time_after(jiffies, dev->jstart + HZ)) { + /* + * We waited more than a second + * for parport to call us + */ + ppa_fail(dev, DID_BUS_BUSY); + return 0; + } + return 1; /* wait until ppa_wakeup claims parport */ + case 1: /* Phase 1 - Connected */ + { /* Perform a sanity check for cable unplugged */ + int retv = 2; /* Failed */ + + ppa_connect(dev, CONNECT_EPP_MAYBE); + + w_ctr(ppb, 0xe); + if ((r_str(ppb) & 0x08) == 0x08) + retv--; + + w_ctr(ppb, 0xc); + if ((r_str(ppb) & 0x08) == 0x00) + retv--; + + if (retv) { + if (time_after(jiffies, dev->jstart + (1 * HZ))) { + printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n"); + ppa_fail(dev, DID_BUS_BUSY); + return 0; + } else { + ppa_disconnect(dev); + return 1; /* Try again in a jiffy */ + } + } + scsi_pointer->phase++; + } + fallthrough; + + case 2: /* Phase 2 - We are now talking to the scsi bus */ + if (!ppa_select(dev, scmd_id(cmd))) { + ppa_fail(dev, DID_NO_CONNECT); + return 0; + } + scsi_pointer->phase++; + fallthrough; + + case 3: /* Phase 3 - Ready to accept a command */ + w_ctr(ppb, 0x0c); + if (!(r_str(ppb) & 0x80)) + return 1; + + if (!ppa_send_command(cmd)) + return 0; + scsi_pointer->phase++; + fallthrough; + + case 4: /* Phase 4 - Setup scatter/gather buffers */ + if (scsi_bufflen(cmd)) { + scsi_pointer->buffer = scsi_sglist(cmd); + scsi_pointer->this_residual = + scsi_pointer->buffer->length; + scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); + } else { + scsi_pointer->buffer = NULL; + scsi_pointer->this_residual = 0; + scsi_pointer->ptr = NULL; + } + scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1; + scsi_pointer->phase++; + fallthrough; + + case 5: /* Phase 5 - Data transfer stage */ + w_ctr(ppb, 0x0c); + if (!(r_str(ppb) & 0x80)) + return 1; + + retv = ppa_completion(cmd); + if (retv == -1) + return 0; + if (retv == 0) + return 1; + scsi_pointer->phase++; + fallthrough; + + case 6: /* Phase 6 - Read status/message */ + cmd->result = DID_OK << 16; + /* Check for data overrun */ + if (ppa_wait(dev) != (unsigned char) 0xf0) { + ppa_fail(dev, DID_ERROR); + return 0; + } + if (ppa_in(dev, &l, 1)) { /* read status byte */ + /* Check for optional message byte */ + if (ppa_wait(dev) == (unsigned char) 0xf0) + ppa_in(dev, &h, 1); + cmd->result = + (DID_OK << 16) + (h << 8) + (l & STATUS_MASK); + } + return 0; /* Finished */ + + default: + printk(KERN_ERR "ppa: Invalid scsi phase\n"); + } + return 0; +} + +static int ppa_queuecommand_lck(struct scsi_cmnd *cmd) +{ + ppa_struct *dev = ppa_dev(cmd->device->host); + + if (dev->cur_cmd) { + printk(KERN_ERR "PPA: bug in ppa_queuecommand\n"); + return 0; + } + dev->failed = 0; + dev->jstart = jiffies; + dev->cur_cmd = cmd; + cmd->result = DID_ERROR << 16; /* default return code */ + ppa_scsi_pointer(cmd)->phase = 0; /* bus free */ + + schedule_delayed_work(&dev->ppa_tq, 0); + + ppa_pb_claim(dev); + + return 0; +} + +static DEF_SCSI_QCMD(ppa_queuecommand) + +/* + * Apparently the disk->capacity attribute is off by 1 sector + * for all disk drives. We add the one here, but it should really + * be done in sd.c. Even if it gets fixed there, this will still + * work. + */ +static int ppa_biosparam(struct scsi_device *sdev, struct block_device *dev, + sector_t capacity, int ip[]) +{ + ip[0] = 0x40; + ip[1] = 0x20; + ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); + if (ip[2] > 1024) { + ip[0] = 0xff; + ip[1] = 0x3f; + ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); + if (ip[2] > 1023) + ip[2] = 1023; + } + return 0; +} + +static int ppa_abort(struct scsi_cmnd *cmd) +{ + ppa_struct *dev = ppa_dev(cmd->device->host); + /* + * There is no method for aborting commands since Iomega + * have tied the SCSI_MESSAGE line high in the interface + */ + + switch (ppa_scsi_pointer(cmd)->phase) { + case 0: /* Do not have access to parport */ + case 1: /* Have not connected to interface */ + dev->cur_cmd = NULL; /* Forget the problem */ + return SUCCESS; + default: /* SCSI command sent, can not abort */ + return FAILED; + } +} + +static void ppa_reset_pulse(unsigned int base) +{ + w_dtr(base, 0x40); + w_ctr(base, 0x8); + udelay(30); + w_ctr(base, 0xc); +} + +static int ppa_reset(struct scsi_cmnd *cmd) +{ + ppa_struct *dev = ppa_dev(cmd->device->host); + + if (ppa_scsi_pointer(cmd)->phase) + ppa_disconnect(dev); + dev->cur_cmd = NULL; /* Forget the problem */ + + ppa_connect(dev, CONNECT_NORMAL); + ppa_reset_pulse(dev->base); + mdelay(1); /* device settle delay */ + ppa_disconnect(dev); + mdelay(1); /* device settle delay */ + return SUCCESS; +} + +static int device_check(ppa_struct *dev, bool autodetect) +{ + /* This routine looks for a device and then attempts to use EPP + to send a command. If all goes as planned then EPP is available. */ + + static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + int loop, old_mode, status, k, ppb = dev->base; + unsigned char l; + + old_mode = dev->mode; + for (loop = 0; loop < 8; loop++) { + /* Attempt to use EPP for Test Unit Ready */ + if (autodetect && (ppb & 0x0007) == 0x0000) + dev->mode = PPA_EPP_8; + +second_pass: + ppa_connect(dev, CONNECT_EPP_MAYBE); + /* Select SCSI device */ + if (!ppa_select(dev, loop)) { + ppa_disconnect(dev); + continue; + } + printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n", + loop, PPA_MODE_STRING[dev->mode]); + + /* Send SCSI command */ + status = 1; + w_ctr(ppb, 0x0c); + for (l = 0; (l < 6) && (status); l++) + status = ppa_out(dev, cmd, 1); + + if (!status) { + ppa_disconnect(dev); + ppa_connect(dev, CONNECT_EPP_MAYBE); + w_dtr(ppb, 0x40); + w_ctr(ppb, 0x08); + udelay(30); + w_ctr(ppb, 0x0c); + udelay(1000); + ppa_disconnect(dev); + udelay(1000); + if (dev->mode != old_mode) { + dev->mode = old_mode; + goto second_pass; + } + return -EIO; + } + w_ctr(ppb, 0x0c); + k = 1000000; /* 1 Second */ + do { + l = r_str(ppb); + k--; + udelay(1); + } while (!(l & 0x80) && (k)); + + l &= 0xf0; + + if (l != 0xf0) { + ppa_disconnect(dev); + ppa_connect(dev, CONNECT_EPP_MAYBE); + ppa_reset_pulse(ppb); + udelay(1000); + ppa_disconnect(dev); + udelay(1000); + if (dev->mode != old_mode) { + dev->mode = old_mode; + goto second_pass; + } + return -EIO; + } + ppa_disconnect(dev); + printk(KERN_INFO "ppa: Communication established with ID %i using %s\n", + loop, PPA_MODE_STRING[dev->mode]); + ppa_connect(dev, CONNECT_EPP_MAYBE); + ppa_reset_pulse(ppb); + udelay(1000); + ppa_disconnect(dev); + udelay(1000); + return 0; + } + return -ENODEV; +} + +static int ppa_adjust_queue(struct scsi_device *device) +{ + blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); + return 0; +} + +static const struct scsi_host_template ppa_template = { + .module = THIS_MODULE, + .proc_name = "ppa", + .show_info = ppa_show_info, + .write_info = ppa_write_info, + .name = "Iomega VPI0 (ppa) interface", + .queuecommand = ppa_queuecommand, + .eh_abort_handler = ppa_abort, + .eh_host_reset_handler = ppa_reset, + .bios_param = ppa_biosparam, + .this_id = -1, + .sg_tablesize = SG_ALL, + .can_queue = 1, + .slave_alloc = ppa_adjust_queue, + .cmd_size = sizeof(struct scsi_pointer), +}; + +/*************************************************************************** + * Parallel port probing routines * + ***************************************************************************/ + +static LIST_HEAD(ppa_hosts); + +/* + * Finds the first available device number that can be alloted to the + * new ppa device and returns the address of the previous node so that + * we can add to the tail and have a list in the ascending order. + */ + +static inline ppa_struct *find_parent(void) +{ + ppa_struct *dev, *par = NULL; + unsigned int cnt = 0; + + if (list_empty(&ppa_hosts)) + return NULL; + + list_for_each_entry(dev, &ppa_hosts, list) { + if (dev->dev_no != cnt) + return par; + cnt++; + par = dev; + } + + return par; +} + +static int __ppa_attach(struct parport *pb) +{ + struct Scsi_Host *host; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting); + DEFINE_WAIT(wait); + ppa_struct *dev, *temp; + int ports; + int err = -ENOMEM; + struct pardev_cb ppa_cb; + + dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL); + if (!dev) + return -ENOMEM; + dev->base = -1; + dev->mode = mode < PPA_UNKNOWN ? mode : PPA_AUTODETECT; + dev->recon_tmo = PPA_RECON_TMO; + init_waitqueue_head(&waiting); + temp = find_parent(); + if (temp) + dev->dev_no = temp->dev_no + 1; + + memset(&ppa_cb, 0, sizeof(ppa_cb)); + ppa_cb.private = dev; + ppa_cb.wakeup = ppa_wakeup; + + dev->dev = parport_register_dev_model(pb, "ppa", &ppa_cb, dev->dev_no); + + if (!dev->dev) + goto out; + + /* Claim the bus so it remembers what we do to the control + * registers. [ CTR and ECP ] + */ + err = -EBUSY; + dev->waiting = &waiting; + prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE); + if (ppa_pb_claim(dev)) + schedule_timeout(3 * HZ); + if (dev->wanted) { + printk(KERN_ERR "ppa%d: failed to claim parport because " + "a pardevice is owning the port for too long " + "time!\n", pb->number); + ppa_pb_dismiss(dev); + dev->waiting = NULL; + finish_wait(&waiting, &wait); + goto out1; + } + dev->waiting = NULL; + finish_wait(&waiting, &wait); + dev->base = dev->dev->port->base; + w_ctr(dev->base, 0x0c); + + /* Done configuration */ + + err = ppa_init(dev); + ppa_pb_release(dev); + + if (err) + goto out1; + + /* now the glue ... */ + if (dev->mode == PPA_NIBBLE || dev->mode == PPA_PS2) + ports = 3; + else + ports = 8; + + INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt); + + err = -ENOMEM; + host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); + if (!host) + goto out1; + host->io_port = pb->base; + host->n_io_port = ports; + host->dma_channel = -1; + host->unique_id = pb->number; + *(ppa_struct **)&host->hostdata = dev; + dev->host = host; + list_add_tail(&dev->list, &ppa_hosts); + err = scsi_add_host(host, NULL); + if (err) + goto out2; + scsi_scan_host(host); + return 0; +out2: + list_del_init(&dev->list); + scsi_host_put(host); +out1: + parport_unregister_device(dev->dev); +out: + kfree(dev); + return err; +} + +static void ppa_attach(struct parport *pb) +{ + __ppa_attach(pb); +} + +static void ppa_detach(struct parport *pb) +{ + ppa_struct *dev; + list_for_each_entry(dev, &ppa_hosts, list) { + if (dev->dev->port == pb) { + list_del_init(&dev->list); + scsi_remove_host(dev->host); + scsi_host_put(dev->host); + parport_unregister_device(dev->dev); + kfree(dev); + break; + } + } +} + +static struct parport_driver ppa_driver = { + .name = "ppa", + .match_port = ppa_attach, + .detach = ppa_detach, + .devmodel = true, +}; +module_parport_driver(ppa_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h new file mode 100644 index 000000000..098bcf7b9 --- /dev/null +++ b/drivers/scsi/ppa.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Driver for the PPA3 parallel port SCSI HBA embedded in + * the Iomega ZIP drive + * + * (c) 1996 Grant R. Guenther grant@torque.net + * David Campbell + * + * All comments to David. + */ + +#ifndef _PPA_H +#define _PPA_H + +#define PPA_VERSION "2.07 (for Linux 2.4.x)" + +/* + * this driver has been hacked by Matteo Frigo (athena@theory.lcs.mit.edu) + * to support EPP and scatter-gather. [0.26-athena] + * + * additional hacks by David Campbell + * in response to this driver "mis-behaving" on his machine. + * Fixed EPP to handle "software" changing of EPP port data direction. + * Chased down EPP timeouts + * Made this driver "kernel version friendly" [0.28-athena] + * + * [ Stuff removed ] + * + * Corrected ppa.h for 2.1.x kernels (>=2.1.85) + * Modified "Nat Semi Kludge" for extended chipsets + * [1.41] + * + * Fixed id_probe for EPP 1.9 chipsets (misdetected as EPP 1.7) + * [1.42] + * + * Development solely for 2.1.x kernels from now on! + * [2.00] + * + * Hack and slash at the init code (EPP device check routine) + * Added INSANE option. + * [2.01] + * + * Patch applied to sync against the 2.1.x kernel code + * Included qboot_zip.sh + * [2.02] + * + * Cleaned up the mess left by someone else trying to fix the + * asm section to keep egcc happy. The asm section no longer + * exists, the nibble code is *almost* as fast as the asm code + * providing it is compiled with egcc. + * + * Other clean ups include the follow changes: + * CONFIG_SCSI_PPA_HAVE_PEDANTIC => CONFIG_SCSI_IZIP_EPP16 + * added CONFIG_SCSI_IZIP_SLOW_CTR option + * [2.03] + * + * Use ppa_wait() to check for ready AND connected status bits + * Add ppa_wait() calls to ppa_completion() + * by Peter Cherriman and + * Tim Waugh + * [2.04] + * + * Fix kernel panic on scsi timeout, 2000-08-18 [2.05] + * + * Avoid io_request_lock problems. + * John Cavan [2.06] + * + * Busy wait for connected status bit in ppa_completion() + * in order to cope with some hardware that has this bit low + * for short periods of time. + * Add udelay() to ppa_select() + * by Peter Cherriman and + * Oleg Makarenko + * [2.07] + */ +/* ------ END OF USER CONFIGURABLE PARAMETERS ----- */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +/* batteries not included :-) */ + +/* + * modes in which the driver can operate + */ +#define PPA_AUTODETECT 0 /* Autodetect mode */ +#define PPA_NIBBLE 1 /* work in standard 4 bit mode */ +#define PPA_PS2 2 /* PS/2 byte mode */ +#define PPA_EPP_8 3 /* EPP mode, 8 bit */ +#define PPA_EPP_16 4 /* EPP mode, 16 bit */ +#define PPA_EPP_32 5 /* EPP mode, 32 bit */ +#define PPA_UNKNOWN 6 /* Just in case... */ + +static char *PPA_MODE_STRING[] = +{ + "Autodetect", + "SPP", + "PS/2", + "EPP 8 bit", + "EPP 16 bit", + "EPP 32 bit", + "Unknown"}; + +/* other options */ +#define PPA_BURST_SIZE 512 /* data burst size */ +#define PPA_SELECT_TMO 5000 /* how long to wait for target ? */ +#define PPA_SPIN_TMO 50000 /* ppa_wait loop limiter */ +#define PPA_RECON_TMO 500 /* scsi reconnection loop limiter */ +#define PPA_DEBUG 0 /* debugging option */ +#define IN_EPP_MODE(x) (x == PPA_EPP_8 || x == PPA_EPP_16 || x == PPA_EPP_32) + +/* args to ppa_connect */ +#define CONNECT_EPP_MAYBE 1 +#define CONNECT_NORMAL 0 + +#define r_dtr(x) (unsigned char)inb((x)) +#define r_str(x) (unsigned char)inb((x)+1) +#define r_ctr(x) (unsigned char)inb((x)+2) +#define r_epp(x) (unsigned char)inb((x)+4) +#define r_fifo(x) (unsigned char)inb((x)) /* x must be base_hi */ + /* On PCI is base+0x400 != base_hi */ +#define r_ecr(x) (unsigned char)inb((x)+0x2) /* x must be base_hi */ + +#define w_dtr(x,y) outb(y, (x)) +#define w_str(x,y) outb(y, (x)+1) +#define w_epp(x,y) outb(y, (x)+4) +#define w_fifo(x,y) outb(y, (x)) /* x must be base_hi */ +#define w_ecr(x,y) outb(y, (x)+0x2)/* x must be base_hi */ + +#ifdef CONFIG_SCSI_IZIP_SLOW_CTR +#define w_ctr(x,y) outb_p(y, (x)+2) +#else +#define w_ctr(x,y) outb(y, (x)+2) +#endif + +static int ppa_engine(ppa_struct *, struct scsi_cmnd *); + +#endif /* _PPA_H */ diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c new file mode 100644 index 000000000..90495a832 --- /dev/null +++ b/drivers/scsi/ps3rom.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PS3 BD/DVD/CD-ROM Storage Driver + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2007 Sony Corp. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + +#define DEVICE_NAME "ps3rom" + +#define BOUNCE_SIZE (64*1024) + +#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9) + + +struct ps3rom_private { + struct ps3_storage_device *dev; + struct scsi_cmnd *curr_cmd; +}; + + +#define LV1_STORAGE_SEND_ATAPI_COMMAND (1) + +struct lv1_atapi_cmnd_block { + u8 pkt[32]; /* packet command block */ + u32 pktlen; /* should be 12 for ATAPI 8020 */ + u32 blocks; + u32 block_size; + u32 proto; /* transfer mode */ + u32 in_out; /* transfer direction */ + u64 buffer; /* parameter except command block */ + u32 arglen; /* length above */ +}; + +enum lv1_atapi_proto { + NON_DATA_PROTO = 0, + PIO_DATA_IN_PROTO = 1, + PIO_DATA_OUT_PROTO = 2, + DMA_PROTO = 3 +}; + +enum lv1_atapi_in_out { + DIR_WRITE = 0, /* memory -> device */ + DIR_READ = 1 /* device -> memory */ +}; + + +static int ps3rom_slave_configure(struct scsi_device *scsi_dev) +{ + struct ps3rom_private *priv = shost_priv(scsi_dev->host); + struct ps3_storage_device *dev = priv->dev; + + dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %llu, channel %u\n", __func__, + __LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel); + + /* + * ATAPI SFF8020 devices use MODE_SENSE_10, + * so we can prohibit MODE_SENSE_6 + */ + scsi_dev->use_10_for_ms = 1; + + /* we don't support {READ,WRITE}_6 */ + scsi_dev->use_10_for_rw = 1; + + return 0; +} + +static int ps3rom_atapi_request(struct ps3_storage_device *dev, + struct scsi_cmnd *cmd) +{ + struct lv1_atapi_cmnd_block atapi_cmnd; + unsigned char opcode = cmd->cmnd[0]; + int res; + u64 lpar; + + dev_dbg(&dev->sbd.core, "%s:%u: send ATAPI command 0x%02x\n", __func__, + __LINE__, opcode); + + memset(&atapi_cmnd, 0, sizeof(struct lv1_atapi_cmnd_block)); + memcpy(&atapi_cmnd.pkt, cmd->cmnd, 12); + atapi_cmnd.pktlen = 12; + atapi_cmnd.block_size = 1; /* transfer size is block_size * blocks */ + atapi_cmnd.blocks = atapi_cmnd.arglen = scsi_bufflen(cmd); + atapi_cmnd.buffer = dev->bounce_lpar; + + switch (cmd->sc_data_direction) { + case DMA_FROM_DEVICE: + if (scsi_bufflen(cmd) >= CD_FRAMESIZE) + atapi_cmnd.proto = DMA_PROTO; + else + atapi_cmnd.proto = PIO_DATA_IN_PROTO; + atapi_cmnd.in_out = DIR_READ; + break; + + case DMA_TO_DEVICE: + if (scsi_bufflen(cmd) >= CD_FRAMESIZE) + atapi_cmnd.proto = DMA_PROTO; + else + atapi_cmnd.proto = PIO_DATA_OUT_PROTO; + atapi_cmnd.in_out = DIR_WRITE; + scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size); + break; + + default: + atapi_cmnd.proto = NON_DATA_PROTO; + break; + } + + lpar = ps3_mm_phys_to_lpar(__pa(&atapi_cmnd)); + res = lv1_storage_send_device_command(dev->sbd.dev_id, + LV1_STORAGE_SEND_ATAPI_COMMAND, + lpar, sizeof(atapi_cmnd), + atapi_cmnd.buffer, + atapi_cmnd.arglen, &dev->tag); + if (res == LV1_DENIED_BY_POLICY) { + dev_dbg(&dev->sbd.core, + "%s:%u: ATAPI command 0x%02x denied by policy\n", + __func__, __LINE__, opcode); + return DID_ERROR << 16; + } + + if (res) { + dev_err(&dev->sbd.core, + "%s:%u: ATAPI command 0x%02x failed %d\n", __func__, + __LINE__, opcode, res); + return DID_ERROR << 16; + } + + return 0; +} + +static inline unsigned int srb10_lba(const struct scsi_cmnd *cmd) +{ + return cmd->cmnd[2] << 24 | cmd->cmnd[3] << 16 | cmd->cmnd[4] << 8 | + cmd->cmnd[5]; +} + +static inline unsigned int srb10_len(const struct scsi_cmnd *cmd) +{ + return cmd->cmnd[7] << 8 | cmd->cmnd[8]; +} + +static int ps3rom_read_request(struct ps3_storage_device *dev, + struct scsi_cmnd *cmd, u32 start_sector, + u32 sectors) +{ + int res; + + dev_dbg(&dev->sbd.core, "%s:%u: read %u sectors starting at %u\n", + __func__, __LINE__, sectors, start_sector); + + res = lv1_storage_read(dev->sbd.dev_id, + dev->regions[dev->region_idx].id, start_sector, + sectors, 0, dev->bounce_lpar, &dev->tag); + if (res) { + dev_err(&dev->sbd.core, "%s:%u: read failed %d\n", __func__, + __LINE__, res); + return DID_ERROR << 16; + } + + return 0; +} + +static int ps3rom_write_request(struct ps3_storage_device *dev, + struct scsi_cmnd *cmd, u32 start_sector, + u32 sectors) +{ + int res; + + dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n", + __func__, __LINE__, sectors, start_sector); + + scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size); + + res = lv1_storage_write(dev->sbd.dev_id, + dev->regions[dev->region_idx].id, start_sector, + sectors, 0, dev->bounce_lpar, &dev->tag); + if (res) { + dev_err(&dev->sbd.core, "%s:%u: write failed %d\n", __func__, + __LINE__, res); + return DID_ERROR << 16; + } + + return 0; +} + +static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd) +{ + struct ps3rom_private *priv = shost_priv(cmd->device->host); + struct ps3_storage_device *dev = priv->dev; + unsigned char opcode; + int res; + + priv->curr_cmd = cmd; + + opcode = cmd->cmnd[0]; + /* + * While we can submit READ/WRITE SCSI commands as ATAPI commands, + * it's recommended for various reasons (performance, error handling, + * ...) to use lv1_storage_{read,write}() instead + */ + switch (opcode) { + case READ_10: + res = ps3rom_read_request(dev, cmd, srb10_lba(cmd), + srb10_len(cmd)); + break; + + case WRITE_10: + res = ps3rom_write_request(dev, cmd, srb10_lba(cmd), + srb10_len(cmd)); + break; + + default: + res = ps3rom_atapi_request(dev, cmd); + break; + } + + if (res) { + scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0, 0); + cmd->result = res; + priv->curr_cmd = NULL; + scsi_done(cmd); + } + + return 0; +} + +static DEF_SCSI_QCMD(ps3rom_queuecommand) + +static int decode_lv1_status(u64 status, unsigned char *sense_key, + unsigned char *asc, unsigned char *ascq) +{ + if (((status >> 24) & 0xff) != SAM_STAT_CHECK_CONDITION) + return -1; + + *sense_key = (status >> 16) & 0xff; + *asc = (status >> 8) & 0xff; + *ascq = status & 0xff; + return 0; +} + +static irqreturn_t ps3rom_interrupt(int irq, void *data) +{ + struct ps3_storage_device *dev = data; + struct Scsi_Host *host; + struct ps3rom_private *priv; + struct scsi_cmnd *cmd; + int res; + u64 tag, status; + unsigned char sense_key, asc, ascq; + + res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); + /* + * status = -1 may mean that ATAPI transport completed OK, but + * ATAPI command itself resulted CHECK CONDITION + * so, upper layer should issue REQUEST_SENSE to check the sense data + */ + + if (tag != dev->tag) + dev_err(&dev->sbd.core, + "%s:%u: tag mismatch, got %llx, expected %llx\n", + __func__, __LINE__, tag, dev->tag); + + if (res) { + dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", + __func__, __LINE__, res, status); + return IRQ_HANDLED; + } + + host = ps3_system_bus_get_drvdata(&dev->sbd); + priv = shost_priv(host); + cmd = priv->curr_cmd; + + if (!status) { + /* OK, completed */ + if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + int len; + + len = scsi_sg_copy_from_buffer(cmd, + dev->bounce_buf, + dev->bounce_size); + + scsi_set_resid(cmd, scsi_bufflen(cmd) - len); + } + cmd->result = DID_OK << 16; + goto done; + } + + if (cmd->cmnd[0] == REQUEST_SENSE) { + /* SCSI spec says request sense should never get error */ + dev_err(&dev->sbd.core, "%s:%u: end error without autosense\n", + __func__, __LINE__); + cmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION; + goto done; + } + + if (decode_lv1_status(status, &sense_key, &asc, &ascq)) { + cmd->result = DID_ERROR << 16; + goto done; + } + + scsi_build_sense(cmd, 0, sense_key, asc, ascq); + +done: + priv->curr_cmd = NULL; + scsi_done(cmd); + return IRQ_HANDLED; +} + +static const struct scsi_host_template ps3rom_host_template = { + .name = DEVICE_NAME, + .slave_configure = ps3rom_slave_configure, + .queuecommand = ps3rom_queuecommand, + .can_queue = 1, + .this_id = 7, + .sg_tablesize = SG_ALL, + .emulated = 1, /* only sg driver uses this */ + .max_sectors = PS3ROM_MAX_SECTORS, + .module = THIS_MODULE, +}; + + +static int ps3rom_probe(struct ps3_system_bus_device *_dev) +{ + struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); + int error; + struct Scsi_Host *host; + struct ps3rom_private *priv; + + if (dev->blk_size != CD_FRAMESIZE) { + dev_err(&dev->sbd.core, + "%s:%u: cannot handle block size %llu\n", __func__, + __LINE__, dev->blk_size); + return -EINVAL; + } + + dev->bounce_size = BOUNCE_SIZE; + dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA); + if (!dev->bounce_buf) + return -ENOMEM; + + error = ps3stor_setup(dev, ps3rom_interrupt); + if (error) + goto fail_free_bounce; + + host = scsi_host_alloc(&ps3rom_host_template, + sizeof(struct ps3rom_private)); + if (!host) { + dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n", + __func__, __LINE__); + error = -ENOMEM; + goto fail_teardown; + } + + priv = shost_priv(host); + ps3_system_bus_set_drvdata(&dev->sbd, host); + priv->dev = dev; + + /* One device/LUN per SCSI bus */ + host->max_id = 1; + host->max_lun = 1; + + error = scsi_add_host(host, &dev->sbd.core); + if (error) { + dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed %d\n", + __func__, __LINE__, error); + error = -ENODEV; + goto fail_host_put; + } + + scsi_scan_host(host); + return 0; + +fail_host_put: + scsi_host_put(host); + ps3_system_bus_set_drvdata(&dev->sbd, NULL); +fail_teardown: + ps3stor_teardown(dev); +fail_free_bounce: + kfree(dev->bounce_buf); + return error; +} + +static void ps3rom_remove(struct ps3_system_bus_device *_dev) +{ + struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); + struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd); + + scsi_remove_host(host); + ps3stor_teardown(dev); + scsi_host_put(host); + ps3_system_bus_set_drvdata(&dev->sbd, NULL); + kfree(dev->bounce_buf); +} + +static struct ps3_system_bus_driver ps3rom = { + .match_id = PS3_MATCH_ID_STOR_ROM, + .core.name = DEVICE_NAME, + .core.owner = THIS_MODULE, + .probe = ps3rom_probe, + .remove = ps3rom_remove +}; + + +static int __init ps3rom_init(void) +{ + return ps3_system_bus_driver_register(&ps3rom); +} + +static void __exit ps3rom_exit(void) +{ + ps3_system_bus_driver_unregister(&ps3rom); +} + +module_init(ps3rom_init); +module_exit(ps3rom_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PS3 BD/DVD/CD-ROM Storage Driver"); +MODULE_AUTHOR("Sony Corporation"); +MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_ROM); diff --git a/drivers/scsi/qedf/Kconfig b/drivers/scsi/qedf/Kconfig new file mode 100644 index 000000000..eb81a1b03 --- /dev/null +++ b/drivers/scsi/qedf/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config QEDF + tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support" + depends on PCI && SCSI + depends on QED + depends on LIBFC + depends on LIBFCOE + select QED_LL2 + select QED_FCOE + help + This driver supports FCoE offload for the QLogic FastLinQ + 41000 Series Converged Network Adapters. diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile new file mode 100644 index 000000000..c46287826 --- /dev/null +++ b/drivers/scsi/qedf/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_QEDF) := qedf.o +qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \ + qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o + +qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c new file mode 100644 index 000000000..e8bc8d9e4 --- /dev/null +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include "drv_fcoe_fw_funcs.h" +#include "drv_scsi_fw_funcs.h" + +#define FCOE_RX_ID (0xFFFFu) + +static inline void init_common_sqe(struct fcoe_task_params *task_params, + enum fcoe_sqe_request_type request_type) +{ + memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); + SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, + request_type); + task_params->sqe->task_id = task_params->itid; +} + +int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, + struct scsi_sgl_task_params *sgl_task_params, + struct regpair sense_data_buffer_phys_addr, + u32 task_retry_id, + u8 fcp_cmd_payload[32]) +{ + struct fcoe_task_context *ctx = task_params->context; + const u8 val_byte = ctx->ystorm_ag_context.byte0; + struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; + struct ystorm_fcoe_task_st_ctx *y_st_ctx; + struct tstorm_fcoe_task_st_ctx *t_st_ctx; + struct mstorm_fcoe_task_st_ctx *m_st_ctx; + u32 io_size, val; + bool slow_sgl; + + memset(ctx, 0, sizeof(*(ctx))); + ctx->ystorm_ag_context.byte0 = val_byte; + slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge); + io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ? + task_params->tx_io_size : task_params->rx_io_size); + + /* Ystorm ctx */ + y_st_ctx = &ctx->ystorm_st_context; + y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size); + y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id); + y_st_ctx->task_type = (u8)task_params->task_type; + memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload, + fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload)); + + /* Tstorm ctx */ + t_st_ctx = &ctx->tstorm_st_context; + t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ? + FCOE_TASK_DEV_TYPE_TAPE : + FCOE_TASK_DEV_TYPE_DISK); + t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); + val = cpu_to_le32(task_params->cq_rss_number); + t_st_ctx->read_only.glbl_q_num = val; + t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size); + t_st_ctx->read_only.task_type = (u8)task_params->task_type; + SET_FIELD(t_st_ctx->read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); + t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); + + /* Ustorm ctx */ + u_ag_ctx = &ctx->ustorm_ag_context; + u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number); + + /* Mstorm buffer for sense/rsp data placement */ + m_st_ctx = &ctx->mstorm_st_context; + val = cpu_to_le32(sense_data_buffer_phys_addr.hi); + m_st_ctx->rsp_buf_addr.hi = val; + val = cpu_to_le32(sense_data_buffer_phys_addr.lo); + m_st_ctx->rsp_buf_addr.lo = val; + + if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) { + /* Ystorm ctx */ + y_st_ctx->expect_first_xfer = 1; + + /* Set the amount of super SGEs. Can be up to 4. */ + SET_FIELD(y_st_ctx->sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); + init_scsi_sgl_context(&y_st_ctx->sgl_params, + &y_st_ctx->data_desc, + sgl_task_params); + + /* Mstorm ctx */ + SET_FIELD(m_st_ctx->flags, + MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); + m_st_ctx->sgl_params.sgl_num_sges = + cpu_to_le16(sgl_task_params->num_sges); + } else { + /* Tstorm ctx */ + SET_FIELD(t_st_ctx->read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE, + (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); + + /* Mstorm ctx */ + m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size); + init_scsi_sgl_context(&m_st_ctx->sgl_params, + &m_st_ctx->data_desc, + sgl_task_params); + } + + /* Init Sqe */ + init_common_sqe(task_params, SEND_FCOE_CMD); + + return 0; +} + +int init_initiator_midpath_unsolicited_fcoe_task( + struct fcoe_task_params *task_params, + struct fcoe_tx_mid_path_params *mid_path_fc_header, + struct scsi_sgl_task_params *tx_sgl_task_params, + struct scsi_sgl_task_params *rx_sgl_task_params, + u8 fw_to_place_fc_header) +{ + struct fcoe_task_context *ctx = task_params->context; + const u8 val_byte = ctx->ystorm_ag_context.byte0; + struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; + struct ystorm_fcoe_task_st_ctx *y_st_ctx; + struct tstorm_fcoe_task_st_ctx *t_st_ctx; + struct mstorm_fcoe_task_st_ctx *m_st_ctx; + u32 val; + + memset(ctx, 0, sizeof(*(ctx))); + ctx->ystorm_ag_context.byte0 = val_byte; + + /* Init Ystorm */ + y_st_ctx = &ctx->ystorm_st_context; + init_scsi_sgl_context(&y_st_ctx->sgl_params, + &y_st_ctx->data_desc, + tx_sgl_task_params); + SET_FIELD(y_st_ctx->sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL); + y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size); + y_st_ctx->task_type = (u8)task_params->task_type; + memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path, + mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params)); + + /* Init Mstorm */ + m_st_ctx = &ctx->mstorm_st_context; + init_scsi_sgl_context(&m_st_ctx->sgl_params, + &m_st_ctx->data_desc, + rx_sgl_task_params); + SET_FIELD(m_st_ctx->flags, + MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER, + fw_to_place_fc_header); + m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size); + + /* Init Tstorm */ + t_st_ctx = &ctx->tstorm_st_context; + t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); + val = cpu_to_le32(task_params->cq_rss_number); + t_st_ctx->read_only.glbl_q_num = val; + t_st_ctx->read_only.task_type = (u8)task_params->task_type; + SET_FIELD(t_st_ctx->read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); + t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); + + /* Init Ustorm */ + u_ag_ctx = &ctx->ustorm_ag_context; + u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number); + + /* Init SQE */ + init_common_sqe(task_params, SEND_FCOE_MIDPATH); + task_params->sqe->additional_info_union.burst_length = + tx_sgl_task_params->total_buffer_size; + SET_FIELD(task_params->sqe->flags, + FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges); + SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, + SCSI_FAST_SGL); + + return 0; +} + +int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params) +{ + init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST); + return 0; +} + +int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params) +{ + init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP); + return 0; +} + +int init_initiator_sequence_recovery_fcoe_task( + struct fcoe_task_params *task_params, u32 desired_offset) +{ + init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY); + task_params->sqe->additional_info_union.seq_rec_updated_offset = + desired_offset; + return 0; +} diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h new file mode 100644 index 000000000..7125e484b --- /dev/null +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#ifndef _FCOE_FW_FUNCS_H +#define _FCOE_FW_FUNCS_H +#include "drv_scsi_fw_funcs.h" +#include "qedf_hsi.h" +#include + +struct fcoe_task_params { + /* Output parameter [set/filled by the HSI function] */ + struct fcoe_task_context *context; + + /* Output parameter [set/filled by the HSI function] */ + struct fcoe_wqe *sqe; + enum fcoe_task_type task_type; + u32 tx_io_size; /* in bytes */ + u32 rx_io_size; /* in bytes */ + u32 conn_cid; + u16 itid; + u8 cq_rss_number; + + /* Whether it's Tape device or not (0=Disk, 1=Tape) */ + u8 is_tape_device; +}; + +/** + * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for + * read/write task types and init fcoe_sqe + * + * @param task_params - Pointer to task parameters struct + * @param sgl_task_params - Pointer to SGL task params + * @param sense_data_buffer_phys_addr - Pointer to sense data buffer + * @param task_retry_id - retry identification - Used only for Tape device + * @param fcp_cmnd_payload - FCP CMD Payload + */ +int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, + struct scsi_sgl_task_params *sgl_task_params, + struct regpair sense_data_buffer_phys_addr, + u32 task_retry_id, + u8 fcp_cmd_payload[32]); + +/** + * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for + * midpath/unsolicited task types and init fcoe_sqe + * + * @param task_params - Pointer to task parameters struct + * @param mid_path_fc_header - FC header + * @param tx_sgl_task_params - Pointer to Tx SGL task params + * @param rx_sgl_task_params - Pointer to Rx SGL task params + * @param fw_to_place_fc_header - Indication if the FW will place the FC header + * in addition to the data arrives. + */ +int init_initiator_midpath_unsolicited_fcoe_task( + struct fcoe_task_params *task_params, + struct fcoe_tx_mid_path_params *mid_path_fc_header, + struct scsi_sgl_task_params *tx_sgl_task_params, + struct scsi_sgl_task_params *rx_sgl_task_params, + u8 fw_to_place_fc_header); + +/** + * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for + * abort task types and init fcoe_sqe + * + * @param task_params - Pointer to task parameters struct + */ +int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params); + +/** + * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for + * cleanup task types and init fcoe_sqe + * + * + * @param task_params - Pointer to task parameters struct + */ +int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params); + +/** + * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for + * sequence recovery task types and init fcoe_sqe + * + * + * @param task_params - Pointer to task parameters struct + * @param desired_offset - The desired offest the task will be re-sent from + */ +int init_initiator_sequence_recovery_fcoe_task( + struct fcoe_task_params *task_params, + u32 desired_offset); +#endif diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c new file mode 100644 index 000000000..3289b7103 --- /dev/null +++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include "drv_scsi_fw_funcs.h" + +#define SCSI_NUM_SGES_IN_CACHE 0x4 + +bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge) +{ + return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); +} + +void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, + struct scsi_cached_sges *ctx_data_desc, + struct scsi_sgl_task_params *sgl_task_params) +{ + /* no need to check for sgl_task_params->sgl validity */ + u8 num_sges_to_init = sgl_task_params->num_sges > + SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE : + sgl_task_params->num_sges; + u8 sge_index; + u32 val; + + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo); + ctx_sgl_params->sgl_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi); + ctx_sgl_params->sgl_addr.hi = val; + val = cpu_to_le32(sgl_task_params->total_buffer_size); + ctx_sgl_params->sgl_total_length = val; + ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges); + + for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) { + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo); + ctx_data_desc->sge[sge_index].sge_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi); + ctx_data_desc->sge[sge_index].sge_addr.hi = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len); + ctx_data_desc->sge[sge_index].sge_len = val; + } +} diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h new file mode 100644 index 000000000..6195f13de --- /dev/null +++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#ifndef _SCSI_FW_FUNCS_H +#define _SCSI_FW_FUNCS_H +#include +#include +#include + +struct scsi_sgl_task_params { + struct scsi_sge *sgl; + struct regpair sgl_phys_addr; + u32 total_buffer_size; + u16 num_sges; + + /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last) + * -> relevant for tx only + */ + bool small_mid_sge; +}; + +struct scsi_dif_task_params { + u32 initial_ref_tag; + bool initial_ref_tag_is_valid; + u16 application_tag; + u16 application_tag_mask; + u16 dif_block_size_log; + bool dif_on_network; + bool dif_on_host; + u8 host_guard_type; + u8 protection_type; + u8 ref_tag_mask; + bool crc_seed; + + /* Enable Connection error upon DIF error (segments with DIF errors are + * dropped) + */ + bool tx_dif_conn_err_en; + bool ignore_app_tag; + bool keep_ref_tag_const; + bool validate_guard; + bool validate_app_tag; + bool validate_ref_tag; + bool forward_guard; + bool forward_app_tag; + bool forward_ref_tag; + bool forward_app_tag_with_mask; + bool forward_ref_tag_with_mask; +}; + +struct scsi_initiator_cmd_params { + /* for cdb_size > default CDB size (extended CDB > 16 bytes) -> + * pointer to the CDB buffer SGE + */ + struct scsi_sge extended_cdb_sge; + + /* Physical address of sense data buffer for sense data - 256B buffer */ + struct regpair sense_data_buffer_phys_addr; +}; + +/** + * @brief scsi_is_slow_sgl - checks for slow SGL + * + * @param num_sges - number of sges in SGL + * @param small_mid_sge - True is the SGL contains an SGE which is smaller than + * 4KB and its not the 1st or last SGE in the SGL + */ +bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge); + +/** + * @brief init_scsi_sgl_context - initializes SGL task context + * + * @param sgl_params - SGL context parameters to initialize (output parameter) + * @param data_desc - context struct containing SGEs array to set (output + * parameter) + * @param sgl_task_params - SGL parameters (input) + */ +void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params, + struct scsi_cached_sges *ctx_data_desc, + struct scsi_sgl_task_params *sgl_task_params); +#endif diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h new file mode 100644 index 000000000..1619cc330 --- /dev/null +++ b/drivers/scsi/qedf/qedf.h @@ -0,0 +1,603 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#ifndef _QEDFC_H_ +#define _QEDFC_H_ + +#include +#include +#include +#include +#include + +/* qedf_hsi.h needs to before included any qed includes */ +#include "qedf_hsi.h" + +#include +#include +#include +#include "qedf_version.h" +#include "qedf_dbg.h" +#include "drv_fcoe_fw_funcs.h" + +/* Helpers to extract upper and lower 32-bits of pointer */ +#define U64_HI(val) ((u32)(((u64)(val)) >> 32)) +#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) + +#define QEDF_DESCR "QLogic FCoE Offload Driver" +#define QEDF_MODULE_NAME "qedf" + +#define QEDF_FLOGI_RETRY_CNT 3 +#define QEDF_RPORT_RETRY_CNT 255 +#define QEDF_MAX_SESSIONS 1024 +#define QEDF_MAX_PAYLOAD 2048 +#define QEDF_MAX_BDS_PER_CMD 256 +#define QEDF_MAX_BD_LEN 0xffff +#define QEDF_BD_SPLIT_SZ 0x1000 +#define QEDF_PAGE_SIZE 4096 +#define QED_HW_DMA_BOUNDARY 0xfff +#define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) +#define QEDF_MFS (QEDF_MAX_PAYLOAD + \ + sizeof(struct fc_frame_header)) +#define QEDF_MAX_NPIV 64 +#define QEDF_TM_TIMEOUT 10 +#define QEDF_ABORT_TIMEOUT (10 * 1000) +#define QEDF_CLEANUP_TIMEOUT 1 +#define QEDF_MAX_CDB_LEN 16 +#define QEDF_LL2_BUF_SIZE 2500 /* Buffer size required for LL2 Rx */ + +#define UPSTREAM_REMOVE 1 +#define UPSTREAM_KEEP 1 + +struct qedf_mp_req { + uint32_t req_len; + void *req_buf; + dma_addr_t req_buf_dma; + struct scsi_sge *mp_req_bd; + dma_addr_t mp_req_bd_dma; + struct fc_frame_header req_fc_hdr; + + uint32_t resp_len; + void *resp_buf; + dma_addr_t resp_buf_dma; + struct scsi_sge *mp_resp_bd; + dma_addr_t mp_resp_bd_dma; + struct fc_frame_header resp_fc_hdr; +}; + +struct qedf_els_cb_arg { + struct qedf_ioreq *aborted_io_req; + struct qedf_ioreq *io_req; + u8 op; /* Used to keep track of ELS op */ + uint16_t l2_oxid; + u32 offset; /* Used for sequence cleanup */ + u8 r_ctl; /* Used for sequence cleanup */ +}; + +enum qedf_ioreq_event { + QEDF_IOREQ_EV_NONE, + QEDF_IOREQ_EV_ABORT_SUCCESS, + QEDF_IOREQ_EV_ABORT_FAILED, + QEDF_IOREQ_EV_SEND_RRQ, + QEDF_IOREQ_EV_ELS_TMO, + QEDF_IOREQ_EV_ELS_ERR_DETECT, + QEDF_IOREQ_EV_ELS_FLUSH, + QEDF_IOREQ_EV_CLEANUP_SUCCESS, + QEDF_IOREQ_EV_CLEANUP_FAILED, +}; + +#define FC_GOOD 0 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +struct qedf_ioreq { + struct list_head link; + uint16_t xid; + struct scsi_cmnd *sc_cmd; +#define QEDF_SCSI_CMD 1 +#define QEDF_TASK_MGMT_CMD 2 +#define QEDF_ABTS 3 +#define QEDF_ELS 4 +#define QEDF_CLEANUP 5 +#define QEDF_SEQ_CLEANUP 6 + u8 cmd_type; +#define QEDF_CMD_OUTSTANDING 0x0 +#define QEDF_CMD_IN_ABORT 0x1 +#define QEDF_CMD_IN_CLEANUP 0x2 +#define QEDF_CMD_SRR_SENT 0x3 +#define QEDF_CMD_DIRTY 0x4 +#define QEDF_CMD_ERR_SCSI_DONE 0x5 + u8 io_req_flags; + uint8_t tm_flags; + struct qedf_rport *fcport; +#define QEDF_CMD_ST_INACTIVE 0 +#define QEDFC_CMD_ST_IO_ACTIVE 1 +#define QEDFC_CMD_ST_ABORT_ACTIVE 2 +#define QEDFC_CMD_ST_ABORT_ACTIVE_EH 3 +#define QEDFC_CMD_ST_CLEANUP_ACTIVE 4 +#define QEDFC_CMD_ST_CLEANUP_ACTIVE_EH 5 +#define QEDFC_CMD_ST_RRQ_ACTIVE 6 +#define QEDFC_CMD_ST_RRQ_WAIT 7 +#define QEDFC_CMD_ST_OXID_RETIRE_WAIT 8 +#define QEDFC_CMD_ST_TMF_ACTIVE 9 +#define QEDFC_CMD_ST_DRAIN_ACTIVE 10 +#define QEDFC_CMD_ST_CLEANED 11 +#define QEDFC_CMD_ST_ELS_ACTIVE 12 + atomic_t state; + unsigned long flags; + enum qedf_ioreq_event event; + size_t data_xfer_len; + /* ID: 001: Alloc cmd (qedf_alloc_cmd) */ + /* ID: 002: Initiate ABTS (qedf_initiate_abts) */ + /* ID: 003: For RRQ (qedf_process_abts_compl) */ + struct kref refcount; + struct qedf_cmd_mgr *cmd_mgr; + struct io_bdt *bd_tbl; + struct delayed_work timeout_work; + struct completion tm_done; + struct completion abts_done; + struct completion cleanup_done; + struct fcoe_task_context *task; + struct fcoe_task_params *task_params; + struct scsi_sgl_task_params *sgl_task_params; + int idx; + int lun; +/* + * Need to allocate enough room for both sense data and FCP response data + * which has a max length of 8 bytes according to spec. + */ +#define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8) + uint8_t *sense_buffer; + dma_addr_t sense_buffer_dma; + u32 fcp_resid; + u32 fcp_rsp_len; + u32 fcp_sns_len; + u8 cdb_status; + u8 fcp_status; + u8 fcp_rsp_code; + u8 scsi_comp_flags; +#define QEDF_MAX_REUSE 0xfff + u16 reuse_count; + struct qedf_mp_req mp_req; + void (*cb_func)(struct qedf_els_cb_arg *cb_arg); + struct qedf_els_cb_arg *cb_arg; + int fp_idx; + unsigned int cpu; + unsigned int int_cpu; +#define QEDF_IOREQ_UNKNOWN_SGE 1 +#define QEDF_IOREQ_SLOW_SGE 2 +#define QEDF_IOREQ_FAST_SGE 3 + u8 sge_type; + struct delayed_work rrq_work; + + /* Used for sequence level recovery; i.e. REC/SRR */ + uint32_t rx_buf_off; + uint32_t tx_buf_off; + uint32_t rx_id; + uint32_t task_retry_identifier; + + /* + * Used to tell if we need to return a SCSI command + * during some form of error processing. + */ + bool return_scsi_cmd_on_abts; + + unsigned int alloc; +}; + +struct qedf_cmd_priv { + struct qedf_ioreq *io_req; +}; + +static inline struct qedf_cmd_priv *qedf_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +extern struct workqueue_struct *qedf_io_wq; + +struct qedf_rport { + spinlock_t rport_lock; +#define QEDF_RPORT_SESSION_READY 1 +#define QEDF_RPORT_UPLOADING_CONNECTION 2 +#define QEDF_RPORT_IN_RESET 3 +#define QEDF_RPORT_IN_LUN_RESET 4 +#define QEDF_RPORT_IN_TARGET_RESET 5 + unsigned long flags; + int lun_reset_lun; + unsigned long retry_delay_timestamp; + struct fc_rport *rport; + struct fc_rport_priv *rdata; + struct qedf_ctx *qedf; + u32 handle; /* Handle from qed */ + u32 fw_cid; /* fw_cid from qed */ + void __iomem *p_doorbell; + /* Send queue management */ + atomic_t free_sqes; + atomic_t ios_to_queue; + atomic_t num_active_ios; + struct fcoe_wqe *sq; + dma_addr_t sq_dma; + u16 sq_prod_idx; + u16 fw_sq_prod_idx; + u16 sq_con_idx; + u32 sq_mem_size; + void *sq_pbl; + dma_addr_t sq_pbl_dma; + u32 sq_pbl_size; + u32 sid; +#define QEDF_RPORT_TYPE_DISK 0 +#define QEDF_RPORT_TYPE_TAPE 1 + uint dev_type; /* Disk or tape */ + struct list_head peers; +}; + +/* Used to contain LL2 skb's in ll2_skb_list */ +struct qedf_skb_work { + struct work_struct work; + struct sk_buff *skb; + struct qedf_ctx *qedf; +}; + +struct qedf_fastpath { +#define QEDF_SB_ID_NULL 0xffff + u16 sb_id; + struct qed_sb_info *sb_info; + struct qedf_ctx *qedf; + /* Keep track of number of completions on this fastpath */ + unsigned long completions; + uint32_t cq_num_entries; +}; + +/* Used to pass fastpath information needed to process CQEs */ +struct qedf_io_work { + struct work_struct work; + struct fcoe_cqe cqe; + struct qedf_ctx *qedf; + struct fc_frame *fp; +}; + +struct qedf_glbl_q_params { + u64 hw_p_cq; /* Completion queue PBL */ + u64 hw_p_rq; /* Request queue PBL */ + u64 hw_p_cmdq; /* Command queue PBL */ +}; + +struct global_queue { + struct fcoe_cqe *cq; + dma_addr_t cq_dma; + u32 cq_mem_size; + u32 cq_cons_idx; /* Completion queue consumer index */ + u32 cq_prod_idx; + + void *cq_pbl; + dma_addr_t cq_pbl_dma; + u32 cq_pbl_size; +}; + +/* I/O tracing entry */ +#define QEDF_IO_TRACE_SIZE 2048 +struct qedf_io_log { +#define QEDF_IO_TRACE_REQ 0 +#define QEDF_IO_TRACE_RSP 1 + uint8_t direction; + uint16_t task_id; + uint32_t port_id; /* Remote port fabric ID */ + int lun; + unsigned char op; /* SCSI CDB */ + uint8_t lba[4]; + unsigned int bufflen; /* SCSI buffer length */ + unsigned int sg_count; /* Number of SG elements */ + int result; /* Result passed back to mid-layer */ + unsigned long jiffies; /* Time stamp when I/O logged */ + int refcount; /* Reference count for task id */ + unsigned int req_cpu; /* CPU that the task is queued on */ + unsigned int int_cpu; /* Interrupt CPU that the task is received on */ + unsigned int rsp_cpu; /* CPU that task is returned on */ + u8 sge_type; /* Did we take the slow, single or fast SGE path */ +}; + +/* Number of entries in BDQ */ +#define QEDF_BDQ_SIZE 256 +#define QEDF_BDQ_BUF_SIZE 2072 + +/* DMA coherent buffers for BDQ */ +struct qedf_bdq_buf { + void *buf_addr; + dma_addr_t buf_dma; +}; + +/* Main adapter struct */ +struct qedf_ctx { + struct qedf_dbg_ctx dbg_ctx; + struct fcoe_ctlr ctlr; + struct fc_lport *lport; + u8 data_src_addr[ETH_ALEN]; +#define QEDF_LINK_DOWN 0 +#define QEDF_LINK_UP 1 + atomic_t link_state; +#define QEDF_DCBX_PENDING 0 +#define QEDF_DCBX_DONE 1 + atomic_t dcbx; +#define QEDF_NULL_VLAN_ID -1 +#define QEDF_FALLBACK_VLAN 1002 +#define QEDF_DEFAULT_PRIO 3 + int vlan_id; + u8 prio; + struct qed_dev *cdev; + struct qed_dev_fcoe_info dev_info; + struct qed_int_info int_info; + uint16_t last_command; + spinlock_t hba_lock; + struct pci_dev *pdev; + u64 wwnn; + u64 wwpn; + u8 __aligned(16) mac[ETH_ALEN]; + struct list_head fcports; + atomic_t num_offloads; + unsigned int curr_conn_id; + struct workqueue_struct *ll2_recv_wq; + struct workqueue_struct *link_update_wq; + struct devlink *devlink; + struct delayed_work link_update; + struct delayed_work link_recovery; + struct completion flogi_compl; + struct completion fipvlan_compl; + + /* + * Used to tell if we're in the window where we are waiting for + * the link to come back up before informting fcoe that the link is + * done. + */ + atomic_t link_down_tmo_valid; +#define QEDF_TIMER_INTERVAL (1 * HZ) + struct timer_list timer; /* One second book keeping timer */ +#define QEDF_DRAIN_ACTIVE 1 +#define QEDF_LL2_STARTED 2 +#define QEDF_UNLOADING 3 +#define QEDF_GRCDUMP_CAPTURE 4 +#define QEDF_IN_RECOVERY 5 +#define QEDF_DBG_STOP_IO 6 +#define QEDF_PROBING 8 + unsigned long flags; /* Miscellaneous state flags */ + int fipvlan_retries; + u8 num_queues; + struct global_queue **global_queues; + /* Pointer to array of queue structures */ + struct qedf_glbl_q_params *p_cpuq; + /* Physical address of array of queue structures */ + dma_addr_t hw_p_cpuq; + + struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE]; + void *bdq_pbl; + dma_addr_t bdq_pbl_dma; + size_t bdq_pbl_mem_size; + void *bdq_pbl_list; + dma_addr_t bdq_pbl_list_dma; + u8 bdq_pbl_list_num_entries; + void __iomem *bdq_primary_prod; + void __iomem *bdq_secondary_prod; + uint16_t bdq_prod_idx; + + /* Structure for holding all the fastpath for this qedf_ctx */ + struct qedf_fastpath *fp_array; + struct qed_fcoe_tid tasks; + struct qedf_cmd_mgr *cmd_mgr; + /* Holds the PF parameters we pass to qed to start he FCoE function */ + struct qed_pf_params pf_params; + /* Used to time middle path ELS and TM commands */ + struct workqueue_struct *timer_work_queue; + +#define QEDF_IO_WORK_MIN 64 + mempool_t *io_mempool; + struct workqueue_struct *dpc_wq; + struct delayed_work recovery_work; + struct delayed_work board_disable_work; + struct delayed_work grcdump_work; + struct delayed_work stag_work; + + u32 slow_sge_ios; + u32 fast_sge_ios; + + uint8_t *grcdump; + uint32_t grcdump_size; + + struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE]; + spinlock_t io_trace_lock; + uint16_t io_trace_idx; + + bool stop_io_on_error; + + u32 flogi_cnt; + u32 flogi_failed; + u32 flogi_pending; + + /* Used for fc statistics */ + struct mutex stats_mutex; + u64 input_requests; + u64 output_requests; + u64 control_requests; + u64 packet_aborts; + u64 alloc_failures; + u8 lun_resets; + u8 target_resets; + u8 task_set_fulls; + u8 busy; + /* Used for flush routine */ + struct mutex flush_mutex; +}; + +struct io_bdt { + struct qedf_ioreq *io_req; + struct scsi_sge *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + +struct qedf_cmd_mgr { + struct qedf_ctx *qedf; + u16 idx; + struct io_bdt **io_bdt_pool; +#define FCOE_PARAMS_NUM_TASKS 2048 + struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS]; + spinlock_t lock; + atomic_t free_list_cnt; +}; + +/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info + * Usage: + * + * void *ptr; + * ptr = qedf_get_task_mem(&qedf->tasks, 128); + */ +static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid) +{ + return (void *)(info->blocks[tid / info->num_tids_per_block] + + (tid % info->num_tids_per_block) * info->size); +} + +static inline void qedf_stop_all_io(struct qedf_ctx *qedf) +{ + set_bit(QEDF_DBG_STOP_IO, &qedf->flags); +} + +/* + * Externs + */ + +/* + * (QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ + * | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ | + * QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO) + */ +#define QEDF_DEFAULT_LOG_MASK 0x3CFB6 +extern const struct qed_fcoe_ops *qed_ops; +extern uint qedf_dump_frames; +extern uint qedf_io_tracing; +extern uint qedf_stop_io_on_error; +extern uint qedf_link_down_tmo; +#define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */ +extern bool qedf_retry_delay; +extern uint qedf_debug; + +extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf); +extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr); +extern int qedf_queuecommand(struct Scsi_Host *host, + struct scsi_cmnd *sc_cmd); +extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); +extern u8 *qedf_get_src_mac(struct fc_lport *lport); +extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb); +extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf); +extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern void qedf_process_warning_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern void qedf_process_error_detect(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun); +extern void qedf_release_cmd(struct kref *ref); +extern int qedf_initiate_abts(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts); +extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, + u8 cmd_type); + +extern const struct attribute_group *qedf_host_groups[]; +extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + unsigned int timer_msec); +extern int qedf_init_mp_req(struct qedf_ioreq *io_req); +extern void qedf_init_mp_task(struct qedf_ioreq *io_req, + struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe); +extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport); +extern void qedf_ring_doorbell(struct qedf_rport *fcport); +extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *els_req); +extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req); +extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp); +extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts); +extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags); +extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe); +extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + int result); +extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id); +extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf); +extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf); +extern void qedf_capture_grc_dump(struct qedf_ctx *qedf); +bool qedf_wait_for_upload(struct qedf_ctx *qedf); +extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, + struct fcoe_cqe *cqe); +extern void qedf_restart_rport(struct qedf_rport *fcport); +extern int qedf_send_rec(struct qedf_ioreq *orig_io_req); +extern int qedf_post_io_req(struct qedf_rport *fcport, + struct qedf_ioreq *io_req); +extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern int qedf_send_flogi(struct qedf_ctx *qedf); +extern void qedf_get_protocol_tlv_data(void *dev, void *data); +extern void qedf_fp_io_handler(struct work_struct *work); +extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data); +extern void qedf_wq_grcdump(struct work_struct *work); +void qedf_stag_change_work(struct work_struct *work); +void qedf_ctx_soft_reset(struct fc_lport *lport); +extern void qedf_schedule_hw_err_handler(void *dev, + enum qed_hw_err_type err_type); + +#define FCOE_WORD_TO_BYTE 4 +#define QEDF_MAX_TASK_NUM 0xFFFF +#define QL45xxx 0x165C +#define QL41xxx 0x8080 +#define MAX_CT_PAYLOAD 2048 +#define DISCOVERED_PORTS 4 +#define NUMBER_OF_PORTS 1 + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +/* SQ/CQ Sizes */ +#define GBL_RSVD_TASKS 16 +#define NUM_TASKS_PER_CONNECTION 1024 +#define NUM_RW_TASKS_PER_CONNECTION 512 +#define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS + +#define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS +#define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION + +#define QEDF_FCOE_PARAMS_GL_RQ_PI 0 +#define QEDF_FCOE_PARAMS_GL_CMD_PI 1 + +#define QEDF_READ (1 << 1) +#define QEDF_WRITE (1 << 0) +#define MAX_FIBRE_LUNS 0xffffffff + +#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ + num_online_cpus()) + +/* + * PCI function probe defines + */ +/* Probe/remove called during normal PCI probe */ +#define QEDF_MODE_NORMAL 0 +/* Probe/remove called from qed error recovery */ +#define QEDF_MODE_RECOVERY 1 + +#define SUPPORTED_25000baseKR_Full (1<<27) +#define SUPPORTED_50000baseKR2_Full (1<<28) +#define SUPPORTED_100000baseKR4_Full (1<<29) +#define SUPPORTED_100000baseCR4_Full (1<<30) + +#endif diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c new file mode 100644 index 000000000..8d8c760ee --- /dev/null +++ b/drivers/scsi/qedf/qedf_attr.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include "qedf.h" + +inline bool qedf_is_vport(struct qedf_ctx *qedf) +{ + return qedf->lport->vport != NULL; +} + +/* Get base qedf for physical port from vport */ +static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf) +{ + struct fc_lport *lport; + struct fc_lport *base_lport; + + if (!(qedf_is_vport(qedf))) + return NULL; + + lport = qedf->lport; + base_lport = shost_priv(vport_to_shost(lport->vport)); + return lport_priv(base_lport); +} + +static ssize_t fcoe_mac_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lport = shost_priv(class_to_shost(dev)); + u32 port_id; + u8 lport_src_id[3]; + u8 fcoe_mac[6]; + + port_id = fc_host_port_id(lport->host); + lport_src_id[2] = (port_id & 0x000000FF); + lport_src_id[1] = (port_id & 0x0000FF00) >> 8; + lport_src_id[0] = (port_id & 0x00FF0000) >> 16; + fc_fcoe_set_mac(fcoe_mac, lport_src_id); + + return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac); +} + +static ssize_t fka_period_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lport = shost_priv(class_to_shost(dev)); + struct qedf_ctx *qedf = lport_priv(lport); + int fka_period = -1; + + if (qedf_is_vport(qedf)) + qedf = qedf_get_base_qedf(qedf); + + if (qedf->ctlr.sel_fcf) + fka_period = qedf->ctlr.sel_fcf->fka_period; + + return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period); +} + +static DEVICE_ATTR_RO(fcoe_mac); +static DEVICE_ATTR_RO(fka_period); + +static struct attribute *qedf_host_attrs[] = { + &dev_attr_fcoe_mac.attr, + &dev_attr_fka_period.attr, + NULL, +}; + +static const struct attribute_group qedf_host_attr_group = { + .attrs = qedf_host_attrs +}; + +const struct attribute_group *qedf_host_groups[] = { + &qedf_host_attr_group, + NULL +}; + +extern const struct qed_fcoe_ops *qed_ops; + +void qedf_capture_grc_dump(struct qedf_ctx *qedf) +{ + struct qedf_ctx *base_qedf; + + /* Make sure we use the base qedf to take the GRC dump */ + if (qedf_is_vport(qedf)) + base_qedf = qedf_get_base_qedf(qedf); + else + base_qedf = qedf; + + if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) { + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO, + "GRC Dump already captured.\n"); + return; + } + + + qedf_get_grc_dump(base_qedf->cdev, qed_ops->common, + &base_qedf->grcdump, &base_qedf->grcdump_size); + QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n"); + set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags); + qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP, + NULL); +} + +static ssize_t +qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + ssize_t ret = 0; + struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qedf_ctx *qedf = lport_priv(lport); + + if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) { + ret = memory_read_from_buffer(buf, count, &off, + qedf->grcdump, qedf->grcdump_size); + } else { + QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n"); + } + + return ret; +} + +static ssize_t +qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + struct fc_lport *lport = NULL; + struct qedf_ctx *qedf = NULL; + long reading; + int ret = 0; + + if (off != 0) + return ret; + + + lport = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + qedf = lport_priv(lport); + + buf[1] = 0; + ret = kstrtol(buf, 10, &reading); + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret); + return ret; + } + + switch (reading) { + case 0: + memset(qedf->grcdump, 0, qedf->grcdump_size); + clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags); + break; + case 1: + qedf_capture_grc_dump(qedf); + break; + } + + return count; +} + +static struct bin_attribute sysfs_grcdump_attr = { + .attr = { + .name = "grcdump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qedf_sysfs_read_grcdump, + .write = qedf_sysfs_write_grcdump, +}; + +static struct sysfs_bin_attrs bin_file_entries[] = { + {"grcdump", &sysfs_grcdump_attr}, + {NULL}, +}; + +void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf) +{ + qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries); +} + +void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf) +{ + qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries); +} diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c new file mode 100644 index 000000000..0d2aed828 --- /dev/null +++ b/drivers/scsi/qedf/qedf_dbg.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include "qedf_dbg.h" +#include + +void +qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (likely(qedf) && likely(qedf->pdev)) + pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + func, line, qedf->host_no, &vaf); + else + pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + + va_end(va); +} + +void +qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & QEDF_LOG_WARN)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + func, line, qedf->host_no, &vaf); + else + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + +ret: + va_end(va); +} + +void +qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & QEDF_LOG_NOTICE)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_notice("[%s]:[%s:%d]:%d: %pV", + dev_name(&(qedf->pdev->dev)), func, line, + qedf->host_no, &vaf); + else + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + +ret: + va_end(va); +} + +void +qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + u32 level, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & level)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + func, line, qedf->host_no, &vaf); + else + pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + +ret: + va_end(va); +} + +int +qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len) +{ + *buf = vzalloc(len); + if (!(*buf)) + return -ENOMEM; + + return 0; +} + +void +qedf_free_grc_dump_buf(uint8_t **buf) +{ + vfree(*buf); + *buf = NULL; +} + +int +qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common, + u8 **buf, uint32_t *grcsize) +{ + if (!*buf) + return -EINVAL; + + return common->dbg_all_data(cdev, *buf); +} + +void +qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg) +{ + char event_string[40]; + char *envp[] = {event_string, NULL}; + + memset(event_string, 0, sizeof(event_string)); + switch (code) { + case QEDF_UEVENT_CODE_GRCDUMP: + if (msg) + strscpy(event_string, msg, sizeof(event_string)); + else + sprintf(event_string, "GRCDUMP=%u", shost->host_no); + break; + default: + /* do nothing */ + break; + } + + kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp); +} + +int +qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + int ret = 0; + + for (; iter->name; iter++) { + ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, + iter->attr); + if (ret) + pr_err("Unable to create sysfs %s attr, err(%d).\n", + iter->name, ret); + } + return ret; +} + +void +qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + for (; iter->name; iter++) + sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); +} diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h new file mode 100644 index 000000000..5ec2b817c --- /dev/null +++ b/drivers/scsi/qedf/qedf_dbg.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#ifndef _QEDF_DBG_H_ +#define _QEDF_DBG_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +extern uint qedf_debug; + +/* Debug print level definitions */ +#define QEDF_LOG_DEFAULT 0x1 /* Set default logging mask */ +#define QEDF_LOG_INFO 0x2 /* + * Informational logs, + * MAC address, WWPN, WWNN + */ +#define QEDF_LOG_DISC 0x4 /* Init, discovery, rport */ +#define QEDF_LOG_LL2 0x8 /* LL2, VLAN logs */ +#define QEDF_LOG_CONN 0x10 /* Connection setup, cleanup */ +#define QEDF_LOG_EVT 0x20 /* Events, link, mtu */ +#define QEDF_LOG_TIMER 0x40 /* Timer events */ +#define QEDF_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */ +#define QEDF_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */ +#define QEDF_LOG_UNSOL 0x200 /* unsolicited event logs */ +#define QEDF_LOG_IO 0x400 /* scsi cmd, completion */ +#define QEDF_LOG_MQ 0x800 /* Multi Queue logs */ +#define QEDF_LOG_BSG 0x1000 /* BSG logs */ +#define QEDF_LOG_DEBUGFS 0x2000 /* debugFS logs */ +#define QEDF_LOG_LPORT 0x4000 /* lport logs */ +#define QEDF_LOG_ELS 0x8000 /* ELS logs */ +#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */ +#define QEDF_LOG_SESS 0x20000 /* Connection setup, cleanup */ +#define QEDF_LOG_TID 0x80000 /* + * FW TID context acquire + * free + */ +#define QEDF_TRACK_TID 0x100000 /* + * Track TID state. To be + * enabled only at module load + * and not run-time. + */ +#define QEDF_TRACK_CMD_LIST 0x300000 /* + * Track active cmd list nodes, + * done with reference to TID, + * hence TRACK_TID also enabled. + */ +#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */ +#define QEDF_LOG_WARN 0x80000000 /* Warning logs */ + +#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE) + +/* Debug context structure */ +struct qedf_dbg_ctx { + unsigned int host_no; + struct pci_dev *pdev; +#ifdef CONFIG_DEBUG_FS + struct dentry *bdf_dentry; +#endif +}; + +#define QEDF_ERR(pdev, fmt, ...) \ + qedf_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_WARN(pdev, fmt, ...) \ + qedf_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_NOTICE(pdev, fmt, ...) \ + qedf_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_INFO(pdev, level, fmt, ...) \ + qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \ + ## __VA_ARGS__) +__printf(4, 5) +void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...); +__printf(4, 5) +void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *, ...); +__printf(4, 5) +void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, + u32 line, const char *, ...); +__printf(5, 6) +void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + u32 info, const char *fmt, ...); + +/* GRC Dump related defines */ + +struct Scsi_Host; + +#define QEDF_UEVENT_CODE_GRCDUMP 0 + +struct sysfs_bin_attrs { + char *name; + struct bin_attribute *attr; +}; + +extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len); +extern void qedf_free_grc_dump_buf(uint8_t **buf); +extern int qedf_get_grc_dump(struct qed_dev *cdev, + const struct qed_common_ops *common, uint8_t **buf, + uint32_t *grcsize); +extern void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg); +extern int qedf_create_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); +extern void qedf_remove_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); + +struct qedf_debugfs_ops { + char *name; + struct qedf_list_of_funcs *qedf_funcs; +}; + +extern const struct qedf_debugfs_ops qedf_debugfs_ops[]; +extern const struct file_operations qedf_dbg_fops[]; + +#ifdef CONFIG_DEBUG_FS +/* DebugFS related code */ +struct qedf_list_of_funcs { + char *oper_str; + ssize_t (*oper_func)(struct qedf_dbg_ctx *qedf); +}; + +#define qedf_dbg_fileops(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = drv##_dbg_##ops##_cmd_read, \ + .write = drv##_dbg_##ops##_cmd_write \ +} + +/* Used for debugfs sequential files */ +#define qedf_dbg_fileops_seq(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = drv##_dbg_##ops##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +extern void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, + const struct qedf_debugfs_ops *dops, + const struct file_operations *fops); +extern void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf); +extern void qedf_dbg_init(char *drv_name); +extern void qedf_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _QEDF_DBG_H_ */ diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c new file mode 100644 index 000000000..451fd236b --- /dev/null +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 QLogic Corporation + */ +#ifdef CONFIG_DEBUG_FS + +#include +#include +#include +#include + +#include "qedf.h" +#include "qedf_dbg.h" + +static struct dentry *qedf_dbg_root; + +/* + * qedf_dbg_host_init - setup the debugfs file for the pf + */ +void +qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, + const struct qedf_debugfs_ops *dops, + const struct file_operations *fops) +{ + char host_dirname[32]; + + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n"); + /* create pf dir */ + sprintf(host_dirname, "host%u", qedf->host_no); + qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root); + + /* create debugfs files */ + while (dops) { + if (!(dops->name)) + break; + + debugfs_create_file(dops->name, 0600, qedf->bdf_dentry, qedf, + fops); + dops++; + fops++; + } +} + +/* + * qedf_dbg_host_exit - clear out the pf's debugfs entries + */ +void +qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg) +{ + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Destroying debugfs host " + "entry\n"); + /* remove debugfs entries of this PF */ + debugfs_remove_recursive(qedf_dbg->bdf_dentry); + qedf_dbg->bdf_dentry = NULL; +} + +/* + * qedf_dbg_init - start up debugfs for the driver + */ +void +qedf_dbg_init(char *drv_name) +{ + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n"); + + /* create qed dir in root of debugfs. NULL means debugfs root */ + qedf_dbg_root = debugfs_create_dir(drv_name, NULL); +} + +/* + * qedf_dbg_exit - clean out the driver's debugfs entries + */ +void +qedf_dbg_exit(void) +{ + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root " + "entry\n"); + + /* remove qed dir in root of debugfs */ + debugfs_remove_recursive(qedf_dbg_root); + qedf_dbg_root = NULL; +} + +const struct qedf_debugfs_ops qedf_debugfs_ops[] = { + { "fp_int", NULL }, + { "io_trace", NULL }, + { "debug", NULL }, + { "stop_io_on_error", NULL}, + { "driver_stats", NULL}, + { "clear_stats", NULL}, + { "offload_stats", NULL}, + /* This must be last */ + { NULL, NULL } +}; + +DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads); + +static ssize_t +qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + ssize_t ret; + size_t cnt = 0; + char *cbuf; + int id; + struct qedf_fastpath *fp = NULL; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN); + if (!cbuf) + return 0; + + cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n"); + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + if (fp->sb_id == QEDF_SB_ID_NULL) + continue; + cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, + "#%d: %lu\n", id, fp->completions); + } + + ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); + + vfree(cbuf); + + return ret; +} + +static ssize_t +qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + if (!count || *ppos) + return 0; + + return count; +} + +static ssize_t +qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + int cnt; + char cbuf[32]; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug); + cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug); + + return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); +} + +static ssize_t +qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + uint32_t val; + void *kern_buf; + int rval; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + + if (!count || *ppos) + return 0; + + kern_buf = memdup_user(buffer, count); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + rval = kstrtouint(kern_buf, 10, &val); + kfree(kern_buf); + if (rval) + return rval; + + if (val == 1) + qedf_debug = QEDF_DEFAULT_LOG_MASK; + else + qedf_debug = val; + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val); + return count; +} + +static ssize_t +qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int cnt; + char cbuf[7]; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n", + qedf->stop_io_on_error ? "true" : "false"); + + return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); +} + +static ssize_t +qedf_dbg_stop_io_on_error_cmd_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + void *kern_buf; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, + dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + if (!count || *ppos) + return 0; + + kern_buf = memdup_user(buffer, 6); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + if (strncmp(kern_buf, "false", 5) == 0) + qedf->stop_io_on_error = false; + else if (strncmp(kern_buf, "true", 4) == 0) + qedf->stop_io_on_error = true; + else if (strncmp(kern_buf, "now", 3) == 0) + /* Trigger from user to stop all I/O on this host */ + set_bit(QEDF_DBG_STOP_IO, &qedf->flags); + + kfree(kern_buf); + return count; +} + +static int +qedf_io_trace_show(struct seq_file *s, void *unused) +{ + int i, idx = 0; + struct qedf_ctx *qedf = s->private; + struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx; + struct qedf_io_log *io_log; + unsigned long flags; + + if (!qedf_io_tracing) { + seq_puts(s, "I/O tracing not enabled.\n"); + goto out; + } + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + spin_lock_irqsave(&qedf->io_trace_lock, flags); + idx = qedf->io_trace_idx; + for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) { + io_log = &qedf->io_trace_buf[idx]; + seq_printf(s, "%d:", io_log->direction); + seq_printf(s, "0x%x:", io_log->task_id); + seq_printf(s, "0x%06x:", io_log->port_id); + seq_printf(s, "%d:", io_log->lun); + seq_printf(s, "0x%02x:", io_log->op); + seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], + io_log->lba[1], io_log->lba[2], io_log->lba[3]); + seq_printf(s, "%d:", io_log->bufflen); + seq_printf(s, "%d:", io_log->sg_count); + seq_printf(s, "0x%08x:", io_log->result); + seq_printf(s, "%lu:", io_log->jiffies); + seq_printf(s, "%d:", io_log->refcount); + seq_printf(s, "%d:", io_log->req_cpu); + seq_printf(s, "%d:", io_log->int_cpu); + seq_printf(s, "%d:", io_log->rsp_cpu); + seq_printf(s, "%d\n", io_log->sge_type); + + idx++; + if (idx == QEDF_IO_TRACE_SIZE) + idx = 0; + } + spin_unlock_irqrestore(&qedf->io_trace_lock, flags); + +out: + return 0; +} + +static int +qedf_dbg_io_trace_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_io_trace_show, qedf); +} + +/* Based on fip_state enum from libfcoe.h */ +static char *fip_state_names[] = { + "FIP_ST_DISABLED", + "FIP_ST_LINK_WAIT", + "FIP_ST_AUTO", + "FIP_ST_NON_FIP", + "FIP_ST_ENABLED", + "FIP_ST_VNMP_START", + "FIP_ST_VNMP_PROBE1", + "FIP_ST_VNMP_PROBE2", + "FIP_ST_VNMP_CLAIM", + "FIP_ST_VNMP_UP", +}; + +/* Based on fc_rport_state enum from libfc.h */ +static char *fc_rport_state_names[] = { + "RPORT_ST_INIT", + "RPORT_ST_FLOGI", + "RPORT_ST_PLOGI_WAIT", + "RPORT_ST_PLOGI", + "RPORT_ST_PRLI", + "RPORT_ST_RTV", + "RPORT_ST_READY", + "RPORT_ST_ADISC", + "RPORT_ST_DELETE", +}; + +static int +qedf_driver_stats_show(struct seq_file *s, void *unused) +{ + struct qedf_ctx *qedf = s->private; + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + + seq_printf(s, "Host WWNN/WWPN: %016llx/%016llx\n", + qedf->wwnn, qedf->wwpn); + seq_printf(s, "Host NPortID: %06x\n", qedf->lport->port_id); + seq_printf(s, "Link State: %s\n", atomic_read(&qedf->link_state) ? + "Up" : "Down"); + seq_printf(s, "Logical Link State: %s\n", qedf->lport->link_up ? + "Up" : "Down"); + seq_printf(s, "FIP state: %s\n", fip_state_names[qedf->ctlr.state]); + seq_printf(s, "FIP VLAN ID: %d\n", qedf->vlan_id & 0xfff); + seq_printf(s, "FIP 802.1Q Priority: %d\n", qedf->prio); + if (qedf->ctlr.sel_fcf) { + seq_printf(s, "FCF WWPN: %016llx\n", + qedf->ctlr.sel_fcf->switch_name); + seq_printf(s, "FCF MAC: %pM\n", qedf->ctlr.sel_fcf->fcf_mac); + } else { + seq_puts(s, "FCF not selected\n"); + } + + seq_puts(s, "\nSGE stats:\n\n"); + seq_printf(s, "cmg_mgr free io_reqs: %d\n", + atomic_read(&qedf->cmd_mgr->free_list_cnt)); + seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios); + seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios); + + seq_puts(s, "Offloaded ports:\n\n"); + + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + seq_printf(s, "%016llx/%016llx/%06x: state=%s, free_sqes=%d, num_active_ios=%d\n", + rdata->rport->node_name, rdata->rport->port_name, + rdata->ids.port_id, + fc_rport_state_names[rdata->rp_state], + atomic_read(&fcport->free_sqes), + atomic_read(&fcport->num_active_ios)); + } + rcu_read_unlock(); + + return 0; +} + +static int +qedf_dbg_driver_stats_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_driver_stats_show, qedf); +} + +static ssize_t +qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int cnt = 0; + + /* Essentially a read stub */ + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_clear_stats_cmd_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, + dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n"); + + if (!count || *ppos) + return 0; + + /* Clear stat counters exposed by 'stats' node */ + qedf->slow_sge_ios = 0; + qedf->fast_sge_ios = 0; + + return count; +} + +static int +qedf_offload_stats_show(struct seq_file *s, void *unused) +{ + struct qedf_ctx *qedf = s->private; + struct qed_fcoe_stats *fw_fcoe_stats; + + fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); + if (!fw_fcoe_stats) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " + "fw_fcoe_stats.\n"); + goto out; + } + + /* Query firmware for offload stats */ + qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); + + seq_printf(s, "fcoe_rx_byte_cnt=%llu\n" + "fcoe_rx_data_pkt_cnt=%llu\n" + "fcoe_rx_xfer_pkt_cnt=%llu\n" + "fcoe_rx_other_pkt_cnt=%llu\n" + "fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n" + "fcoe_silent_drop_pkt_crc_error_cnt=%u\n" + "fcoe_silent_drop_pkt_task_invalid_cnt=%u\n" + "fcoe_silent_drop_total_pkt_cnt=%u\n" + "fcoe_silent_drop_pkt_rq_full_cnt=%u\n" + "fcoe_tx_byte_cnt=%llu\n" + "fcoe_tx_data_pkt_cnt=%llu\n" + "fcoe_tx_xfer_pkt_cnt=%llu\n" + "fcoe_tx_other_pkt_cnt=%llu\n", + fw_fcoe_stats->fcoe_rx_byte_cnt, + fw_fcoe_stats->fcoe_rx_data_pkt_cnt, + fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt, + fw_fcoe_stats->fcoe_rx_other_pkt_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt, + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt, + fw_fcoe_stats->fcoe_tx_byte_cnt, + fw_fcoe_stats->fcoe_tx_data_pkt_cnt, + fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt, + fw_fcoe_stats->fcoe_tx_other_pkt_cnt); + + kfree(fw_fcoe_stats); +out: + return 0; +} + +static int +qedf_dbg_offload_stats_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_offload_stats_show, qedf); +} + +const struct file_operations qedf_dbg_fops[] = { + qedf_dbg_fileops(qedf, fp_int), + qedf_dbg_fileops_seq(qedf, io_trace), + qedf_dbg_fileops(qedf, debug), + qedf_dbg_fileops(qedf, stop_io_on_error), + qedf_dbg_fileops_seq(qedf, driver_stats), + qedf_dbg_fileops(qedf, clear_stats), + qedf_dbg_fileops_seq(qedf, offload_stats), + /* This must be last */ + { }, +}; + +#else /* CONFIG_DEBUG_FS */ +void qedf_dbg_host_init(struct qedf_dbg_ctx *); +void qedf_dbg_host_exit(struct qedf_dbg_ctx *); +void qedf_dbg_init(char *); +void qedf_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c new file mode 100644 index 000000000..1ff5bc314 --- /dev/null +++ b/drivers/scsi/qedf/qedf_els.c @@ -0,0 +1,1066 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include "qedf.h" + +/* It's assumed that the lock is held when calling this function. */ +static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, + void *data, uint32_t data_len, + void (*cb_func)(struct qedf_els_cb_arg *cb_arg), + struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) +{ + struct qedf_ctx *qedf; + struct fc_lport *lport; + struct qedf_ioreq *els_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + struct fcoe_task_context *task; + int rc = 0; + uint32_t did, sid; + uint16_t xid; + struct fcoe_wqe *sqe; + unsigned long flags; + u16 sqe_idx; + + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL"); + rc = -EINVAL; + goto els_err; + } + + qedf = fcport->qedf; + lport = qedf->lport; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); + + rc = fc_remote_port_chkready(fcport->rport); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op); + rc = -EAGAIN; + goto els_err; + } + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n", + op); + rc = -EAGAIN; + goto els_err; + } + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); + rc = -EINVAL; + goto els_err; + } + + els_req = qedf_alloc_cmd(fcport, QEDF_ELS); + if (!els_req) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, + "Failed to alloc ELS request 0x%x\n", op); + rc = -ENOMEM; + goto els_err; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " + "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg, + els_req->xid); + els_req->sc_cmd = NULL; + els_req->cmd_type = QEDF_ELS; + els_req->fcport = fcport; + els_req->cb_func = cb_func; + cb_arg->io_req = els_req; + cb_arg->op = op; + els_req->cb_arg = cb_arg; + els_req->data_xfer_len = data_len; + + /* Record which cpu this request is associated with */ + els_req->cpu = smp_processor_id(); + + mp_req = (struct qedf_mp_req *)&(els_req->mp_req); + rc = qedf_init_mp_req(els_req); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n"); + kref_put(&els_req->refcount, qedf_release_cmd); + goto els_err; + } else { + rc = 0; + } + + /* Fill ELS Payload */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { + memcpy(mp_req->req_buf, data, data_len); + } else { + QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + kref_put(&els_req->refcount, qedf_release_cmd); + rc = -EINVAL; + } + + if (rc) + goto els_err; + + /* Fill FC header */ + fc_hdr = &(mp_req->req_fc_hdr); + + did = fcport->rdata->ids.port_id; + sid = fcport->sid; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + /* Obtain exchange id */ + xid = els_req->xid; + + spin_lock_irqsave(&fcport->rport_lock, flags); + + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + + /* Initialize task context for this IO request */ + task = qedf_get_task_mem(&qedf->tasks, xid); + qedf_init_mp_task(els_req, task, sqe); + + /* Put timer on els request */ + if (timer_msec) + qedf_cmd_timer_set(qedf, els_req, timer_msec); + + /* Ring doorbell */ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " + "req\n"); + qedf_ring_doorbell(fcport); + set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); +els_err: + return rc; +} + +void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *els_req) +{ + struct fcoe_cqe_midpath_info *mp_info; + struct qedf_rport *fcport; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x" + " cmd_type = %d.\n", els_req->xid, els_req->cmd_type); + + if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) + || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) + || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "ELS completion xid=0x%x after flush event=0x%x", + els_req->xid, els_req->event); + return; + } + + fcport = els_req->fcport; + + /* When flush is active, + * let the cmds be completed from the cleanup context + */ + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || + test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Dropping ELS completion xid=0x%x as fcport is flushing", + els_req->xid); + return; + } + + clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); + + /* Kill the ELS timer */ + cancel_delayed_work(&els_req->timeout_work); + + /* Get ELS response length from CQE */ + mp_info = &cqe->cqe_info.midpath_info; + els_req->mp_req.resp_len = mp_info->data_placement_size; + + /* Parse ELS response */ + if ((els_req->cb_func) && (els_req->cb_arg)) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + kref_put(&els_req->refcount, qedf_release_cmd); +} + +static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *rrq_req; + struct qedf_ctx *qedf; + int refcount; + + rrq_req = cb_arg->io_req; + qedf = rrq_req->fcport->qedf; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n"); + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) { + QEDF_ERR(&qedf->dbg_ctx, + "Original io_req is NULL, rrq_req = %p.\n", rrq_req); + goto out_free; + } + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," + " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); + + /* + * This should return the aborted io_req to the command pool. Note that + * we need to check the refcound in case the original request was + * flushed but we get a completion on this xid. + */ + if (orig_io_req && refcount > 0) + kref_put(&orig_io_req->refcount, qedf_release_cmd); + +out_free: + /* + * Release a reference to the rrq request if we timed out as the + * rrq completion handler is called directly from the timeout handler + * and not from els_compl where the reference would have normally been + * released. + */ + if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO) + kref_put(&rrq_req->refcount, qedf_release_cmd); + kfree(cb_arg); +} + +/* Assumes kref is already held by caller */ +int qedf_send_rrq(struct qedf_ioreq *aborted_io_req) +{ + + struct fc_els_rrq rrq; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t sid; + uint32_t r_a_tov; + int rc; + int refcount; + + if (!aborted_io_req) { + QEDF_ERR(NULL, "abort_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = aborted_io_req->fcport; + + if (!fcport) { + refcount = kref_read(&aborted_io_req->refcount); + QEDF_ERR(NULL, + "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n", + aborted_io_req->xid, refcount); + kref_put(&aborted_io_req->refcount, qedf_release_cmd); + return -EINVAL; + } + + /* Check that fcport is still offloaded */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + qedf = fcport->qedf; + + /* + * Sanity check that we can send a RRQ to make sure that refcount isn't + * 0 + */ + refcount = kref_read(&aborted_io_req->refcount); + if (refcount != 1) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, + "refcount for xid=%x io_req=%p refcount=%d is not 1.\n", + aborted_io_req->xid, aborted_io_req, refcount); + return -EINVAL; + } + + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig " + "io = %p, orig_xid = 0x%x\n", aborted_io_req, + aborted_io_req->xid); + memset(&rrq, 0, sizeof(rrq)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "RRQ\n"); + rc = -ENOMEM; + goto rrq_err; + } + + cb_arg->aborted_io_req = aborted_io_req; + + rrq.rrq_cmd = ELS_RRQ; + hton24(rrq.rrq_s_id, sid); + rrq.rrq_ox_id = htons(aborted_io_req->xid); + rrq.rrq_rx_id = + htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id); + + rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq), + qedf_rrq_compl, cb_arg, r_a_tov); + +rrq_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io " + "req 0x%x\n", aborted_io_req->xid); + kfree(cb_arg); + kref_put(&aborted_io_req->refcount, qedf_release_cmd); + } + return rc; +} + +static void qedf_process_l2_frame_compl(struct qedf_rport *fcport, + struct fc_frame *fp, + u16 l2_oxid) +{ + struct fc_lport *lport = fcport->qedf->lport; + struct fc_frame_header *fh; + u32 crc; + + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + + /* Set the OXID we return to what libfc used */ + if (l2_oxid != FC_XID_UNKNOWN) + fh->fh_ox_id = htons(l2_oxid); + + /* Setup header fields */ + fh->fh_r_ctl = FC_RCTL_ELS_REP; + fh->fh_type = FC_TYPE_ELS; + /* Last sequence, end sequence */ + fh->fh_f_ctl[0] = 0x98; + hton24(fh->fh_d_id, lport->port_id); + hton24(fh->fh_s_id, fcport->rdata->ids.port_id); + fh->fh_rx_id = 0xffff; + + /* Set frame attributes */ + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + + /* Send completed request to libfc */ + fc_exch_recv(lport, fp); +} + +/* + * In instances where an ELS command times out we may need to restart the + * rport by logging out and then logging back in. + */ +void qedf_restart_rport(struct qedf_rport *fcport) +{ + struct fc_lport *lport; + struct fc_rport_priv *rdata; + u32 port_id; + unsigned long flags; + + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); + return; + } + + spin_lock_irqsave(&fcport->rport_lock, flags); + if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) || + !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || + test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n", + fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); + return; + } + + /* Set that we are now in reset */ + set_bit(QEDF_RPORT_IN_RESET, &fcport->flags); + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + rdata = fcport->rdata; + if (rdata && !kref_get_unless_zero(&rdata->kref)) { + fcport->rdata = NULL; + rdata = NULL; + } + + if (rdata && rdata->rp_state == RPORT_ST_READY) { + lport = fcport->qedf->lport; + port_id = rdata->ids.port_id; + QEDF_ERR(&(fcport->qedf->dbg_ctx), + "LOGO port_id=%x.\n", port_id); + fc_rport_logoff(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + mutex_lock(&lport->disc.disc_mutex); + /* Recreate the rport and log back in */ + rdata = fc_rport_create(lport, port_id); + mutex_unlock(&lport->disc.disc_mutex); + if (rdata) + fc_rport_login(rdata); + fcport->rdata = rdata; + } + clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags); +} + +static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *els_req; + struct qedf_rport *fcport; + struct qedf_mp_req *mp_req; + struct fc_frame *fp; + struct fc_frame_header *fh, *mp_fc_hdr; + void *resp_buf, *fc_payload; + u32 resp_len; + u16 l2_oxid; + + l2_oxid = cb_arg->l2_oxid; + els_req = cb_arg->io_req; + + if (!els_req) { + QEDF_ERR(NULL, "els_req is NULL.\n"); + goto free_arg; + } + + /* + * If we are flushing the command just free the cb_arg as none of the + * response data will be valid. + */ + if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) { + QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n", + els_req->xid); + goto free_arg; + } + + fcport = els_req->fcport; + mp_req = &(els_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + /* + * If a middle path ELS command times out, don't try to return + * the command but rather do any internal cleanup and then libfc + * timeout the command and clean up its internal resources. + */ + if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { + /* + * If ADISC times out, libfc will timeout the exchange and then + * try to send a PLOGI which will timeout since the session is + * still offloaded. Force libfc to logout the session which + * will offload the connection and allow the PLOGI response to + * flow over the LL2 path. + */ + if (cb_arg->op == ELS_ADISC) + qedf_restart_rport(fcport); + return; + } + + if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is " + "beyond page size.\n"); + goto free_arg; + } + + fp = fc_frame_alloc(fcport->qedf->lport, resp_len); + if (!fp) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + return; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Completing OX_ID 0x%x back to libfc.\n", l2_oxid); + qedf_process_l2_frame_compl(fcport, fp, l2_oxid); + +free_arg: + kfree(cb_arg); +} + +int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + struct fc_frame_header *fh; + struct fc_lport *lport = fcport->qedf->lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t r_a_tov = lport->r_a_tov; + int rc; + + qedf = fcport->qedf; + fh = fc_frame_header_get(fp); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "ADISC\n"); + rc = -ENOMEM; + goto adisc_err; + } + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid); + + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + + rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc), + qedf_l2_els_compl, cb_arg, r_a_tov); + +adisc_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n"); + kfree(cb_arg); + } + return rc; +} + +static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *srr_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *mp_fc_hdr, *fh; + struct fc_frame *fp; + void *resp_buf, *fc_payload; + u32 resp_len; + struct fc_lport *lport; + struct qedf_ctx *qedf; + int refcount; + u8 opcode; + + srr_req = cb_arg->io_req; + qedf = srr_req->fcport->qedf; + lport = qedf->lport; + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + goto out_free; + } + + clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); + + if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && + srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," + " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, srr_req->xid, refcount); + + /* If a SRR times out, simply free resources */ + if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) { + QEDF_ERR(&qedf->dbg_ctx, + "ELS timeout rec_xid=0x%x.\n", srr_req->xid); + goto out_put; + } + + /* Normalize response data into struct fc_frame */ + mp_req = &(srr_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + fp = fc_frame_alloc(lport, resp_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + goto out_put; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + opcode = fc_frame_payload_op(fp); + switch (opcode) { + case ELS_LS_ACC: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "SRR success.\n"); + break; + case ELS_LS_RJT: + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, + "SRR rejected.\n"); + qedf_initiate_abts(orig_io_req, true); + break; + } + + fc_frame_free(fp); +out_put: + /* Put reference for original command since SRR completed */ + kref_put(&orig_io_req->refcount, qedf_release_cmd); +out_free: + kfree(cb_arg); +} + +static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) +{ + struct fcp_srr srr; + struct qedf_ctx *qedf; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + u32 r_a_tov; + int rc; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = orig_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + /* Take reference until SRR command completion */ + kref_get(&orig_io_req->refcount); + + qedf = fcport->qedf; + lport = qedf->lport; + r_a_tov = lport->r_a_tov; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, " + "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid); + memset(&srr, 0, sizeof(srr)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "SRR\n"); + rc = -ENOMEM; + goto srr_err; + } + + cb_arg->aborted_io_req = orig_io_req; + + srr.srr_op = ELS_SRR; + srr.srr_ox_id = htons(orig_io_req->xid); + srr.srr_rx_id = htons(orig_io_req->rx_id); + srr.srr_rel_off = htonl(offset); + srr.srr_r_ctl = r_ctl; + + rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr), + qedf_srr_compl, cb_arg, r_a_tov); + +srr_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req" + "=0x%x\n", orig_io_req->xid); + kfree(cb_arg); + /* If we fail to queue SRR, send ABTS to orig_io */ + qedf_initiate_abts(orig_io_req, true); + kref_put(&orig_io_req->refcount, qedf_release_cmd); + } else + /* Tell other threads that SRR is in progress */ + set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); + + return rc; +} + +static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, + u32 offset, u8 r_ctl) +{ + struct qedf_rport *fcport; + unsigned long flags; + struct qedf_els_cb_arg *cb_arg; + struct fcoe_wqe *sqe; + u16 sqe_idx; + + fcport = orig_io_req->fcport; + + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Doing sequence cleanup for xid=0x%x offset=%u.\n", + orig_io_req->xid, offset); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg " + "for sequence cleanup\n"); + return; + } + + /* Get reference for cleanup request */ + kref_get(&orig_io_req->refcount); + + orig_io_req->cmd_type = QEDF_SEQ_CLEANUP; + cb_arg->offset = offset; + cb_arg->r_ctl = r_ctl; + orig_io_req->cb_arg = cb_arg; + + qedf_cmd_timer_set(fcport->qedf, orig_io_req, + QEDF_CLEANUP_TIMEOUT * HZ); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + orig_io_req->task_params->sqe = sqe; + + init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params, + offset); + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); +} + +void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) +{ + int rc; + struct qedf_els_cb_arg *cb_arg; + + cb_arg = io_req->cb_arg; + + /* If we timed out just free resources */ + if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) { + QEDF_ERR(&qedf->dbg_ctx, + "cqe is NULL or timeout event (0x%x)", io_req->event); + goto free; + } + + /* Kill the timer we put on the request */ + cancel_delayed_work_sync(&io_req->timeout_work); + + rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); + if (rc) + QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will " + "abort, xid=0x%x.\n", io_req->xid); +free: + kfree(cb_arg); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req) +{ + struct qedf_rport *fcport; + struct qedf_ioreq *new_io_req; + unsigned long flags; + bool rc = false; + + fcport = orig_io_req->fcport; + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); + goto out; + } + + if (!orig_io_req->sc_cmd) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for " + "xid=0x%x.\n", orig_io_req->xid); + goto out; + } + + new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); + if (!new_io_req) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new " + "io_req.\n"); + goto out; + } + + new_io_req->sc_cmd = orig_io_req->sc_cmd; + + /* + * This keeps the sc_cmd struct from being returned to the tape + * driver and being requeued twice. We do need to put a reference + * for the original I/O request since we will not do a SCSI completion + * for it. + */ + orig_io_req->sc_cmd = NULL; + kref_put(&orig_io_req->refcount, qedf_release_cmd); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + /* kref for new command released in qedf_post_io_req on error */ + if (qedf_post_io_req(fcport, new_io_req)) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n"); + /* Return SQE to pool */ + atomic_inc(&fcport->free_sqes); + } else { + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Reissued SCSI command from orig_xid=0x%x on " + "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid); + /* + * Abort the original I/O but do not return SCSI command as + * it has been reissued on another OX_ID. + */ + spin_unlock_irqrestore(&fcport->rport_lock, flags); + qedf_initiate_abts(orig_io_req, false); + goto out; + } + + spin_unlock_irqrestore(&fcport->rport_lock, flags); +out: + return rc; +} + + +static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *rec_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *mp_fc_hdr, *fh; + struct fc_frame *fp; + void *resp_buf, *fc_payload; + u32 resp_len; + struct fc_lport *lport; + struct qedf_ctx *qedf; + int refcount; + enum fc_rctl r_ctl; + struct fc_els_ls_rjt *rjt; + struct fc_els_rec_acc *acc; + u8 opcode; + u32 offset, e_stat; + struct scsi_cmnd *sc_cmd; + bool srr_needed = false; + + rec_req = cb_arg->io_req; + qedf = rec_req->fcport->qedf; + lport = qedf->lport; + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + goto out_free; + } + + if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && + rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," + " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, rec_req->xid, refcount); + + /* If a REC times out, free resources */ + if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) { + QEDF_ERR(&qedf->dbg_ctx, + "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n", + orig_io_req, orig_io_req->xid); + goto out_put; + } + + /* Normalize response data into struct fc_frame */ + mp_req = &(rec_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + acc = resp_buf = mp_req->resp_buf; + + fp = fc_frame_alloc(lport, resp_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + goto out_put; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + if (!rjt) { + QEDF_ERR(&qedf->dbg_ctx, "payload get failed"); + goto out_free_frame; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Received LS_RJT for REC: er_reason=0x%x, " + "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); + /* + * The following response(s) mean that we need to reissue the + * request on another exchange. We need to do this without + * informing the upper layers lest it cause an application + * error. + */ + if ((rjt->er_reason == ELS_RJT_LOGIC || + rjt->er_reason == ELS_RJT_UNAB) && + rjt->er_explan == ELS_EXPL_OXID_RXID) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Handle CMD LOST case.\n"); + qedf_requeue_io_req(orig_io_req); + } + } else if (opcode == ELS_LS_ACC) { + offset = ntohl(acc->reca_fc4value); + e_stat = ntohl(acc->reca_e_stat); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", + offset, e_stat); + if (e_stat & ESB_ST_SEQ_INIT) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Target has the seq init\n"); + goto out_free_frame; + } + sc_cmd = orig_io_req->sc_cmd; + if (!sc_cmd) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "sc_cmd is NULL for xid=0x%x.\n", + orig_io_req->xid); + goto out_free_frame; + } + /* SCSI write case */ + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + if (offset == orig_io_req->data_xfer_len) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "WRITE - response lost.\n"); + r_ctl = FC_RCTL_DD_CMD_STATUS; + srr_needed = true; + offset = 0; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "WRITE - XFER_RDY/DATA lost.\n"); + r_ctl = FC_RCTL_DD_DATA_DESC; + /* Use data from warning CQE instead of REC */ + offset = orig_io_req->tx_buf_off; + } + /* SCSI read case */ + } else { + if (orig_io_req->rx_buf_off == + orig_io_req->data_xfer_len) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "READ - response lost.\n"); + srr_needed = true; + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "READ - DATA lost.\n"); + /* + * For read case we always set the offset to 0 + * for sequence recovery task. + */ + offset = 0; + r_ctl = FC_RCTL_DD_SOL_DATA; + } + } + + if (srr_needed) + qedf_send_srr(orig_io_req, offset, r_ctl); + else + qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); + } + +out_free_frame: + fc_frame_free(fp); +out_put: + /* Put reference for original command since REC completed */ + kref_put(&orig_io_req->refcount, qedf_release_cmd); +out_free: + kfree(cb_arg); +} + +/* Assumes kref is already held by caller */ +int qedf_send_rec(struct qedf_ioreq *orig_io_req) +{ + + struct fc_els_rec rec; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t sid; + uint32_t r_a_tov; + int rc; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = orig_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + /* Take reference until REC command completion */ + kref_get(&orig_io_req->refcount); + + qedf = fcport->qedf; + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + memset(&rec, 0, sizeof(rec)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "REC\n"); + rc = -ENOMEM; + goto rec_err; + } + + cb_arg->aborted_io_req = orig_io_req; + + rec.rec_cmd = ELS_REC; + hton24(rec.rec_s_id, sid); + rec.rec_ox_id = htons(orig_io_req->xid); + rec.rec_rx_id = + htons(orig_io_req->task->tstorm_st_context.read_write.rx_id); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, " + "orig_xid=0x%x rx_id=0x%x\n", orig_io_req, + orig_io_req->xid, rec.rec_rx_id); + rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec), + qedf_rec_compl, cb_arg, r_a_tov); + +rec_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req" + "=0x%x\n", orig_io_req->xid); + kfree(cb_arg); + kref_put(&orig_io_req->refcount, qedf_release_cmd); + } + return rc; +} diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c new file mode 100644 index 000000000..ad6a56ce7 --- /dev/null +++ b/drivers/scsi/qedf/qedf_fip.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include +#include +#include "qedf.h" + +extern const struct qed_fcoe_ops *qed_ops; +/* + * FIP VLAN functions that will eventually move to libfcoe. + */ + +void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) +{ + struct sk_buff *skb; + char *eth_fr; + struct fip_vlan *vlan; +#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) + static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; + unsigned long flags = 0; + int rc; + + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) { + QEDF_ERR(&qedf->dbg_ctx, + "Failed to allocate skb.\n"); + return; + } + + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + ether_addr_copy(vlan->eth.h_source, qedf->mac); + ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN " + "request."); + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request " + "because link is not up.\n"); + + kfree_skb(skb); + return; + } + + set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags); + rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, flags); + if (rc) { + QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); + kfree_skb(skb); + return; + } + +} + +static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, + struct sk_buff *skb) +{ + struct fip_header *fiph; + struct fip_desc *desc; + u16 vid = 0; + ssize_t rlen; + size_t dlen; + + fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2); + + rlen = ntohs(fiph->fip_dl_len) * 4; + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Dropping VLAN response as link is down.\n"); + return; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, " + "vid=0x%x.\n", vid); + + if (vid > 0 && qedf->vlan_id != vid) { + qedf_set_vlan_id(qedf, vid); + + /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ + if (!completion_done(&qedf->fipvlan_compl)) + complete(&qedf->fipvlan_compl); + } +} + +void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr); + struct ethhdr *eth_hdr; + struct fip_header *fiph; + u16 op, vlan_tci = 0; + u8 sub; + int rc = -1; + + if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); + kfree_skb(skb); + return; + } + + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + /* + * Add VLAN tag to non-offload FIP frame based on current stored VLAN + * for FIP/FCoE traffic. + */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); + + /* Get VLAN ID from skb for printing purposes */ + __vlan_hwaccel_get_tag(skb, &vlan_tci); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: " + "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub, + vlan_tci); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, false); + + rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); + if (rc) { + QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); + kfree_skb(skb); + return; + } +} + +static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; + +/* Process incoming FIP frames. */ +void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + struct fip_header *fiph; + struct fip_desc *desc; + struct fip_mac_desc *mp; + struct fip_wwn_desc *wp; + struct fip_vn_desc *vp; + size_t rlen, dlen; + u16 op; + u8 sub; + bool fcf_valid = false; + /* Default is to handle CVL regardless of fabric id descriptor */ + bool fabric_id_valid = true; + bool fc_wwpn_valid = false; + u64 switch_name; + u16 vlan = 0; + + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, + "FIP frame received: skb=%p fiph=%p source=%pM destn=%pM op=%x sub=%x vlan=%04x", + skb, fiph, eth_hdr->h_source, eth_hdr->h_dest, op, + sub, vlan); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, false); + + if (!ether_addr_equal(eth_hdr->h_dest, qedf->mac) && + !ether_addr_equal(eth_hdr->h_dest, fcoe_all_enode) && + !ether_addr_equal(eth_hdr->h_dest, qedf->data_src_addr)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, + "Dropping FIP type 0x%x pkt due to destination MAC mismatch dest_mac=%pM ctlr.dest_addr=%pM data_src_addr=%pM.\n", + op, eth_hdr->h_dest, qedf->mac, + qedf->data_src_addr); + kfree_skb(skb); + return; + } + + /* Handle FIP VLAN resp in the driver */ + if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { + qedf_fcoe_process_vlan_resp(qedf, skb); + kfree_skb(skb); + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual " + "link received.\n"); + + /* Check that an FCF has been selected by fcoe */ + if (qedf->ctlr.sel_fcf == NULL) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Dropping CVL since FCF has not been selected " + "yet."); + kfree_skb(skb); + return; + } + + /* + * We need to loop through the CVL descriptors to determine + * if we want to reset the fcoe link + */ + rlen = ntohs(fiph->fip_dl_len) * FIP_BPW; + desc = (struct fip_desc *)(fiph + 1); + while (rlen >= sizeof(*desc)) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_MAC: + mp = (struct fip_mac_desc *)desc; + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Switch fd_mac=%pM.\n", mp->fd_mac); + if (ether_addr_equal(mp->fd_mac, + qedf->ctlr.sel_fcf->fcf_mac)) + fcf_valid = true; + break; + case FIP_DT_NAME: + wp = (struct fip_wwn_desc *)desc; + switch_name = get_unaligned_be64(&wp->fd_wwn); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Switch fd_wwn=%016llx fcf_switch_name=%016llx.\n", + switch_name, + qedf->ctlr.sel_fcf->switch_name); + if (switch_name == + qedf->ctlr.sel_fcf->switch_name) + fc_wwpn_valid = true; + break; + case FIP_DT_VN_ID: + fabric_id_valid = false; + vp = (struct fip_vn_desc *)desc; + + QEDF_ERR(&qedf->dbg_ctx, + "CVL vx_port fd_fc_id=0x%x fd_mac=%pM fd_wwpn=%016llx.\n", + ntoh24(vp->fd_fc_id), vp->fd_mac, + get_unaligned_be64(&vp->fd_wwpn)); + /* Check for vx_port wwpn OR Check vx_port + * fabric ID OR Check vx_port MAC + */ + if ((get_unaligned_be64(&vp->fd_wwpn) == + qedf->wwpn) || + (ntoh24(vp->fd_fc_id) == + qedf->lport->port_id) || + (ether_addr_equal(vp->fd_mac, + qedf->data_src_addr))) { + fabric_id_valid = true; + } + break; + default: + /* Ignore anything else */ + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "fcf_valid=%d fabric_id_valid=%d fc_wwpn_valid=%d.\n", + fcf_valid, fabric_id_valid, fc_wwpn_valid); + if (fcf_valid && fabric_id_valid && fc_wwpn_valid) + qedf_ctx_soft_reset(qedf->lport); + kfree_skb(skb); + } else { + /* Everything else is handled by libfcoe */ + __skb_pull(skb, ETH_HLEN); + fcoe_ctlr_recv(&qedf->ctlr, skb); + } +} + +u8 *qedf_get_src_mac(struct fc_lport *lport) +{ + struct qedf_ctx *qedf = lport_priv(lport); + + return qedf->data_src_addr; +} diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h new file mode 100644 index 000000000..ecd5cb53b --- /dev/null +++ b/drivers/scsi/qedf/qedf_hsi.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#ifndef __QEDF_HSI__ +#define __QEDF_HSI__ +/* + * Add include to common target + */ +#include + +/* + * Add include to common storage target + */ +#include + +/* + * Add include to common fcoe target for both eCore and protocol driver + */ +#include + + +/* + * FCoE CQ element ABTS information + */ +struct fcoe_abts_info { + u8 r_ctl /* R_CTL in the ABTS response frame */; + u8 reserved0; + __le16 rx_id; + __le32 reserved2[2]; + __le32 fc_payload[3] /* ABTS FC payload response frame */; +}; + + +/* + * FCoE class type + */ +enum fcoe_class_type { + FCOE_TASK_CLASS_TYPE_3, + FCOE_TASK_CLASS_TYPE_2, + MAX_FCOE_CLASS_TYPE +}; + + +/* + * FCoE CMDQ element control information + */ +struct fcoe_cmdqe_control { + __le16 conn_id; + u8 num_additional_cmdqes; + u8 cmdType; + /* true for ABTS request cmdqe. used in Target mode */ +#define FCOE_CMDQE_CONTROL_ABTSREQCMD_MASK 0x1 +#define FCOE_CMDQE_CONTROL_ABTSREQCMD_SHIFT 0 +#define FCOE_CMDQE_CONTROL_RESERVED1_MASK 0x7F +#define FCOE_CMDQE_CONTROL_RESERVED1_SHIFT 1 + u8 reserved2[4]; +}; + +/* + * FCoE control + payload CMDQ element + */ +struct fcoe_cmdqe { + struct fcoe_cmdqe_control hdr; + u8 fc_header[24]; + __le32 fcp_cmd_payload[8]; +}; + + + +/* + * FCP RSP flags + */ +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_MASK 0x7 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + +/* + * FCoE CQ element response information + */ +struct fcoe_cqe_rsp_info { + struct fcoe_fcp_rsp_flags rsp_flags; + u8 scsi_status_code; + __le16 retry_delay_timer; + __le32 fcp_resid; + __le32 fcp_sns_len; + __le32 fcp_rsp_len; + __le16 rx_id; + u8 fw_error_flags; +#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_MASK 0x1 /* FW detected underrun */ +#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_SHIFT 0 +#define FCOE_CQE_RSP_INFO_RESREVED_MASK 0x7F +#define FCOE_CQE_RSP_INFO_RESREVED_SHIFT 1 + u8 reserved; + __le32 fw_residual /* Residual bytes calculated by FW */; +}; + +/* + * FCoE CQ element Target completion information + */ +struct fcoe_cqe_target_info { + __le16 rx_id; + __le16 reserved0; + __le32 reserved1[5]; +}; + +/* + * FCoE error/warning reporting entry + */ +struct fcoe_err_report_entry { + __le32 err_warn_bitmap_lo /* Error bitmap lower 32 bits */; + __le32 err_warn_bitmap_hi /* Error bitmap higher 32 bits */; + /* Buffer offset the beginning of the Sequence last transmitted */ + __le32 tx_buf_off; + /* Buffer offset from the beginning of the Sequence last received */ + __le32 rx_buf_off; + __le16 rx_id /* RX_ID of the associated task */; + __le16 reserved1; + __le32 reserved2; +}; + +/* + * FCoE CQ element middle path information + */ +struct fcoe_cqe_midpath_info { + __le32 data_placement_size; + __le16 rx_id; + __le16 reserved0; + __le32 reserved1[4]; +}; + +/* + * FCoE CQ element unsolicited information + */ +struct fcoe_unsolic_info { + /* BD information: Physical address and opaque data */ + struct scsi_bd bd_info; + __le16 conn_id /* Connection ID the frame is associated to */; + __le16 pkt_len /* Packet length */; + u8 reserved1[4]; +}; + +/* + * FCoE warning reporting entry + */ +struct fcoe_warning_report_entry { + /* BD information: Physical address and opaque data */ + struct scsi_bd bd_info; + /* Buffer offset the beginning of the Sequence last transmitted */ + __le32 buf_off; + __le16 rx_id /* RX_ID of the associated task */; + __le16 reserved1; +}; + +/* + * FCoE CQ element information + */ +union fcoe_cqe_info { + struct fcoe_cqe_rsp_info rsp_info /* Response completion information */; + /* Target completion information */ + struct fcoe_cqe_target_info target_info; + /* Error completion information */ + struct fcoe_err_report_entry err_info; + struct fcoe_abts_info abts_info /* ABTS completion information */; + /* Middle path completion information */ + struct fcoe_cqe_midpath_info midpath_info; + /* Unsolicited packet completion information */ + struct fcoe_unsolic_info unsolic_info; + /* Warning completion information (Rec Tov expiration) */ + struct fcoe_warning_report_entry warn_info; +}; + +/* + * FCoE CQ element + */ +struct fcoe_cqe { + __le32 cqe_data; + /* The task identifier (OX_ID) to be completed */ +#define FCOE_CQE_TASK_ID_MASK 0xFFFF +#define FCOE_CQE_TASK_ID_SHIFT 0 + /* + * The CQE type: 0x0 Indicating on a pending work request completion. + * 0x1 - Indicating on an unsolicited event notification. use enum + * fcoe_cqe_type (use enum fcoe_cqe_type) + */ +#define FCOE_CQE_CQE_TYPE_MASK 0xF +#define FCOE_CQE_CQE_TYPE_SHIFT 16 +#define FCOE_CQE_RESERVED0_MASK 0xFFF +#define FCOE_CQE_RESERVED0_SHIFT 20 + __le16 reserved1; + __le16 fw_cq_prod; + union fcoe_cqe_info cqe_info; +}; + +/* + * FCoE CQE type + */ +enum fcoe_cqe_type { + /* solicited response on a R/W or middle-path SQE */ + FCOE_GOOD_COMPLETION_CQE_TYPE, + FCOE_UNSOLIC_CQE_TYPE /* unsolicited packet, RQ consumed */, + FCOE_ERROR_DETECTION_CQE_TYPE /* timer expiration, validation error */, + FCOE_WARNING_CQE_TYPE /* rec_tov or rr_tov timer expiration */, + FCOE_EXCH_CLEANUP_CQE_TYPE /* task cleanup completed */, + FCOE_ABTS_CQE_TYPE /* ABTS received and task cleaned */, + FCOE_DUMMY_CQE_TYPE /* just increment SQ CONS */, + /* Task was completed wight after sending a pkt to the target */ + FCOE_LOCAL_COMP_CQE_TYPE, + MAX_FCOE_CQE_TYPE +}; + +/* + * FCoE fast path error codes + */ +enum fcoe_fp_error_warning_code { + FCOE_ERROR_CODE_XFER_OOO_RO /* XFER error codes */, + FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED, + FCOE_ERROR_CODE_XFER_NULL_BURST_LEN, + FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS, + FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE, + FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE, + FCOE_ERROR_CODE_XFER_PEND_XFER_SET, + FCOE_ERROR_CODE_XFER_OPENED_SEQ, + FCOE_ERROR_CODE_XFER_FCTL, + FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET /* FCP RSP error codes */, + FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD, + FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD, + FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE, + FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET, + FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ, + FCOE_ERROR_CODE_FCP_RSP_FCTL, + FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET, + FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET, + FCOE_ERROR_CODE_DATA_OOO_RO /* FCP DATA error codes */, + FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE, + FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS, + FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET, + FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET, + FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET, + FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET, + FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ, + FCOE_ERROR_CODE_DATA_FCTL_INITIATIR, + FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE /* Middle path error codes */, + FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET, + FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET, + FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET, + FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET, + FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL, + FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY, + FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL, + FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD /* Common error codes */, + FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE, + FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH, + FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT, + FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH, + FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES, + FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR, + FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG, + FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED, + FCOE_ERROR_CODE_COMMON_TASK_DDF_RCTL_INFO_FIELD, + FCOE_ERROR_CODE_COMMON_TASK_INVALID_RCTL, + FCOE_ERROR_CODE_COMMON_TASK_RCTL_GENERAL_MISMATCH, + FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION /* Timer error codes */, + FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION /* Timer error codes */, + FCOE_ERROR_CODE_RR_TOV_TIMER_EXPIRATION /* Timer error codes */, + /* ABTSrsp pckt arrived unexpected */ + FCOE_ERROR_CODE_ABTS_REPLY_UNEXPECTED, + FCOE_ERROR_CODE_TARGET_MODE_FCP_RSP, + FCOE_ERROR_CODE_TARGET_MODE_FCP_XFER, + FCOE_ERROR_CODE_TARGET_MODE_DATA_TASK_TYPE_NOT_WRITE, + FCOE_ERROR_CODE_DATA_FCTL_TARGET, + FCOE_ERROR_CODE_TARGET_DATA_SIZE_NO_MATCH_XFER, + FCOE_ERROR_CODE_TARGET_DIF_CRC_CHECKSUM_ERROR, + FCOE_ERROR_CODE_TARGET_DIF_REF_TAG_ERROR, + FCOE_ERROR_CODE_TARGET_DIF_APP_TAG_ERROR, + MAX_FCOE_FP_ERROR_WARNING_CODE +}; + + +/* + * FCoE RESPQ element + */ +struct fcoe_respqe { + __le16 ox_id /* OX_ID that is located in the FCP_RSP FC header */; + __le16 rx_id /* RX_ID that is located in the FCP_RSP FC header */; + __le32 additional_info; +/* PARAM that is located in the FCP_RSP FC header */ +#define FCOE_RESPQE_PARAM_MASK 0xFFFFFF +#define FCOE_RESPQE_PARAM_SHIFT 0 +/* Indication whther its Target-auto-rsp mode or not */ +#define FCOE_RESPQE_TARGET_AUTO_RSP_MASK 0xFF +#define FCOE_RESPQE_TARGET_AUTO_RSP_SHIFT 24 +}; + + +/* + * FCoE slow path error codes + */ +enum fcoe_sp_error_code { + /* Error codes for Error Reporting in slow path flows */ + FCOE_ERROR_CODE_SLOW_PATH_TOO_MANY_FUNCS, + FCOE_ERROR_SLOW_PATH_CODE_NO_LICENSE, + MAX_FCOE_SP_ERROR_CODE +}; + +/* + * FCoE task TX state + */ +enum fcoe_task_tx_state { + /* Initiate state after driver has initialized the task */ + FCOE_TASK_TX_STATE_NORMAL, + /* Updated by TX path after complete transmitting unsolicited packet */ + FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED, + /* + * Updated by TX path after start processing the task requesting the + * cleanup/abort operation + */ + FCOE_TASK_TX_STATE_CLEAN_REQ, + FCOE_TASK_TX_STATE_ABTS /* Updated by TX path during abort procedure */, + /* Updated by TX path during exchange cleanup procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP, + /* + * Updated by TX path during exchange cleanup continuation task + * procedure + */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_CONT, + /* Updated by TX path during exchange cleanup first xfer procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE, + /* Updated by TX path during exchange cleanup read task in Target */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_READ_OR_RSP, + /* Updated by TX path during target exchange cleanup procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_LAST_CYCLE, + /* Updated by TX path during sequence recovery procedure */ + FCOE_TASK_TX_STATE_SEQRECOVERY, + MAX_FCOE_TASK_TX_STATE +}; + +#endif /* __QEDF_HSI__ */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c new file mode 100644 index 000000000..10fe33838 --- /dev/null +++ b/drivers/scsi/qedf/qedf_io.c @@ -0,0 +1,2630 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include +#include +#include "qedf.h" +#include + +void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + unsigned int timer_msec) +{ + queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, + msecs_to_jiffies(timer_msec)); +} + +static void qedf_cmd_timeout(struct work_struct *work) +{ + + struct qedf_ioreq *io_req = + container_of(work, struct qedf_ioreq, timeout_work.work); + struct qedf_ctx *qedf; + struct qedf_rport *fcport; + + fcport = io_req->fcport; + if (io_req->fcport == NULL) { + QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n"); + return; + } + + qedf = fcport->qedf; + + switch (io_req->cmd_type) { + case QEDF_ABTS: + if (qedf == NULL) { + QEDF_INFO(NULL, QEDF_LOG_IO, + "qedf is NULL for ABTS xid=0x%x.\n", + io_req->xid); + return; + } + + QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", + io_req->xid); + /* Cleanup timed out ABTS */ + qedf_initiate_cleanup(io_req, true); + complete(&io_req->abts_done); + + /* + * Need to call kref_put for reference taken when initiate_abts + * was called since abts_compl won't be called now that we've + * cleaned up the task. + */ + kref_put(&io_req->refcount, qedf_release_cmd); + + /* Clear in abort bit now that we're done with the command */ + clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + + /* + * Now that the original I/O and the ABTS are complete see + * if we need to reconnect to the target. + */ + qedf_restart_rport(fcport); + break; + case QEDF_ELS: + if (!qedf) { + QEDF_INFO(NULL, QEDF_LOG_IO, + "qedf is NULL for ELS xid=0x%x.\n", + io_req->xid); + return; + } + /* ELS request no longer outstanding since it timed out */ + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + + kref_get(&io_req->refcount); + /* + * Don't attempt to clean an ELS timeout as any subseqeunt + * ABTS or cleanup requests just hang. For now just free + * the resources of the original I/O and the RRQ + */ + QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", + io_req->xid); + qedf_initiate_cleanup(io_req, true); + io_req->event = QEDF_IOREQ_EV_ELS_TMO; + /* Call callback function to complete command */ + if (io_req->cb_func && io_req->cb_arg) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + kref_put(&io_req->refcount, qedf_release_cmd); + break; + case QEDF_SEQ_CLEANUP: + QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, " + "xid=0x%x.\n", io_req->xid); + qedf_initiate_cleanup(io_req, true); + io_req->event = QEDF_IOREQ_EV_ELS_TMO; + qedf_process_seq_cleanup_compl(qedf, NULL, io_req); + break; + default: + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Hit default case, xid=0x%x.\n", io_req->xid); + break; + } +} + +void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) +{ + struct io_bdt *bdt_info; + struct qedf_ctx *qedf = cmgr->qedf; + size_t bd_tbl_sz; + u16 min_xid = 0; + u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); + int num_ios; + int i; + struct qedf_ioreq *io_req; + + num_ios = max_xid - min_xid + 1; + + /* Free fcoe_bdt_ctx structures */ + if (!cmgr->io_bdt_pool) { + QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n"); + goto free_cmd_pool; + } + + bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + if (bdt_info->bd_tbl) { + dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz, + bdt_info->bd_tbl, bdt_info->bd_tbl_dma); + bdt_info->bd_tbl = NULL; + } + } + + /* Destroy io_bdt pool */ + for (i = 0; i < num_ios; i++) { + kfree(cmgr->io_bdt_pool[i]); + cmgr->io_bdt_pool[i] = NULL; + } + + kfree(cmgr->io_bdt_pool); + cmgr->io_bdt_pool = NULL; + +free_cmd_pool: + + for (i = 0; i < num_ios; i++) { + io_req = &cmgr->cmds[i]; + kfree(io_req->sgl_task_params); + kfree(io_req->task_params); + /* Make sure we free per command sense buffer */ + if (io_req->sense_buffer) + dma_free_coherent(&qedf->pdev->dev, + QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer, + io_req->sense_buffer_dma); + cancel_delayed_work_sync(&io_req->rrq_work); + } + + /* Free command manager itself */ + vfree(cmgr); +} + +static void qedf_handle_rrq(struct work_struct *work) +{ + struct qedf_ioreq *io_req = + container_of(work, struct qedf_ioreq, rrq_work.work); + + atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE); + qedf_send_rrq(io_req); + +} + +struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) +{ + struct qedf_cmd_mgr *cmgr; + struct io_bdt *bdt_info; + struct qedf_ioreq *io_req; + u16 xid; + int i; + int num_ios; + u16 min_xid = 0; + u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); + + /* Make sure num_queues is already set before calling this function */ + if (!qedf->num_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n"); + return NULL; + } + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and " + "max_xid 0x%x.\n", min_xid, max_xid); + return NULL; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid " + "0x%x.\n", min_xid, max_xid); + + num_ios = max_xid - min_xid + 1; + + cmgr = vzalloc(sizeof(struct qedf_cmd_mgr)); + if (!cmgr) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n"); + return NULL; + } + + cmgr->qedf = qedf; + spin_lock_init(&cmgr->lock); + + /* + * Initialize I/O request fields. + */ + xid = 0; + + for (i = 0; i < num_ios; i++) { + io_req = &cmgr->cmds[i]; + INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout); + + io_req->xid = xid++; + + INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq); + + /* Allocate DMA memory to hold sense buffer */ + io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, + GFP_KERNEL); + if (!io_req->sense_buffer) { + QEDF_ERR(&qedf->dbg_ctx, + "Failed to alloc sense buffer.\n"); + goto mem_err; + } + + /* Allocate task parameters to pass to f/w init funcions */ + io_req->task_params = kzalloc(sizeof(*io_req->task_params), + GFP_KERNEL); + if (!io_req->task_params) { + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to allocate task_params for xid=0x%x\n", + i); + goto mem_err; + } + + /* + * Allocate scatter/gather list info to pass to f/w init + * functions. + */ + io_req->sgl_task_params = kzalloc( + sizeof(struct scsi_sgl_task_params), GFP_KERNEL); + if (!io_req->sgl_task_params) { + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to allocate sgl_task_params for xid=0x%x\n", + i); + goto mem_err; + } + } + + /* Allocate pool of io_bdts - one for each qedf_ioreq */ + cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *), + GFP_KERNEL); + + if (!cmgr->io_bdt_pool) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n"); + goto mem_err; + } + + for (i = 0; i < num_ios; i++) { + cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), + GFP_KERNEL); + if (!cmgr->io_bdt_pool[i]) { + QEDF_WARN(&(qedf->dbg_ctx), + "Failed to alloc io_bdt_pool[%d].\n", i); + goto mem_err; + } + } + + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), + &bdt_info->bd_tbl_dma, GFP_KERNEL); + if (!bdt_info->bd_tbl) { + QEDF_WARN(&(qedf->dbg_ctx), + "Failed to alloc bdt_tbl[%d].\n", i); + goto mem_err; + } + } + atomic_set(&cmgr->free_list_cnt, num_ios); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "cmgr->free_list_cnt=%d.\n", + atomic_read(&cmgr->free_list_cnt)); + + return cmgr; + +mem_err: + qedf_cmd_mgr_free(cmgr); + return NULL; +} + +struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr; + struct qedf_ioreq *io_req = NULL; + struct io_bdt *bd_tbl; + u16 xid; + uint32_t free_sqes; + int i; + unsigned long flags; + + free_sqes = atomic_read(&fcport->free_sqes); + + if (!free_sqes) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, free_sqes=%d.\n ", + free_sqes); + goto out_failed; + } + + /* Limit the number of outstanding R/W tasks */ + if ((atomic_read(&fcport->num_active_ios) >= + NUM_RW_TASKS_PER_CONNECTION)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, num_active_ios=%d.\n", + atomic_read(&fcport->num_active_ios)); + goto out_failed; + } + + /* Limit global TIDs certain tasks */ + if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, free_list_cnt=%d.\n", + atomic_read(&cmd_mgr->free_list_cnt)); + goto out_failed; + } + + spin_lock_irqsave(&cmd_mgr->lock, flags); + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { + io_req = &cmd_mgr->cmds[cmd_mgr->idx]; + cmd_mgr->idx++; + if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS) + cmd_mgr->idx = 0; + + /* Check to make sure command was previously freed */ + if (!io_req->alloc) + break; + } + + if (i == FCOE_PARAMS_NUM_TASKS) { + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + goto out_failed; + } + + if (test_bit(QEDF_CMD_DIRTY, &io_req->flags)) + QEDF_ERR(&qedf->dbg_ctx, + "io_req found to be dirty ox_id = 0x%x.\n", + io_req->xid); + + /* Clear any flags now that we've reallocated the xid */ + io_req->flags = 0; + io_req->alloc = 1; + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + + atomic_inc(&fcport->num_active_ios); + atomic_dec(&fcport->free_sqes); + xid = io_req->xid; + atomic_dec(&cmd_mgr->free_list_cnt); + + io_req->cmd_mgr = cmd_mgr; + io_req->fcport = fcport; + + /* Clear any stale sc_cmd back pointer */ + io_req->sc_cmd = NULL; + io_req->lun = -1; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); /* ID: 001 */ + atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE); + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + if (bd_tbl == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid); + kref_put(&io_req->refcount, qedf_release_cmd); + goto out_failed; + } + bd_tbl->io_req = io_req; + io_req->cmd_type = cmd_type; + io_req->tm_flags = 0; + + /* Reset sequence offset data */ + io_req->rx_buf_off = 0; + io_req->tx_buf_off = 0; + io_req->rx_id = 0xffff; /* No OX_ID */ + + return io_req; + +out_failed: + /* Record failure for stats and return NULL to caller */ + qedf->alloc_failures++; + return NULL; +} + +static void qedf_free_mp_resc(struct qedf_ioreq *io_req) +{ + struct qedf_mp_req *mp_req = &(io_req->mp_req); + struct qedf_ctx *qedf = io_req->fcport->qedf; + uint64_t sz = sizeof(struct scsi_sge); + + /* clear tm flags */ + if (mp_req->mp_req_bd) { + dma_free_coherent(&qedf->pdev->dev, sz, + mp_req->mp_req_bd, mp_req->mp_req_bd_dma); + mp_req->mp_req_bd = NULL; + } + if (mp_req->mp_resp_bd) { + dma_free_coherent(&qedf->pdev->dev, sz, + mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); + mp_req->mp_resp_bd = NULL; + } + if (mp_req->req_buf) { + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + mp_req->req_buf, mp_req->req_buf_dma); + mp_req->req_buf = NULL; + } + if (mp_req->resp_buf) { + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + mp_req->resp_buf, mp_req->resp_buf_dma); + mp_req->resp_buf = NULL; + } +} + +void qedf_release_cmd(struct kref *ref) +{ + struct qedf_ioreq *io_req = + container_of(ref, struct qedf_ioreq, refcount); + struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + struct qedf_rport *fcport = io_req->fcport; + unsigned long flags; + + if (io_req->cmd_type == QEDF_SCSI_CMD) { + QEDF_WARN(&fcport->qedf->dbg_ctx, + "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n", + io_req, io_req->xid); + WARN_ON(io_req->sc_cmd); + } + + if (io_req->cmd_type == QEDF_ELS || + io_req->cmd_type == QEDF_TASK_MGMT_CMD) + qedf_free_mp_resc(io_req); + + atomic_inc(&cmd_mgr->free_list_cnt); + atomic_dec(&fcport->num_active_ios); + atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE); + if (atomic_read(&fcport->num_active_ios) < 0) { + QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); + WARN_ON(1); + } + + /* Increment task retry identifier now that the request is released */ + io_req->task_retry_identifier++; + io_req->fcport = NULL; + + clear_bit(QEDF_CMD_DIRTY, &io_req->flags); + io_req->cpu = 0; + spin_lock_irqsave(&cmd_mgr->lock, flags); + io_req->fcport = NULL; + io_req->alloc = 0; + spin_unlock_irqrestore(&cmd_mgr->lock, flags); +} + +static int qedf_map_sg(struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct Scsi_Host *host = sc->device->host; + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int sg_count = 0; + int bd_count = 0; + u32 sg_len; + u64 addr; + int i = 0; + + sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + sg = scsi_sglist(sc); + + io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE; + + if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ) + io_req->sge_type = QEDF_IOREQ_FAST_SGE; + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = (u32)sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + + /* + * Intermediate s/g element so check if start address + * is page aligned. Only required for writes and only if the + * number of scatter/gather elements is 8 or more. + */ + if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) && + (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE) + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + + bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr)); + bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr)); + bd[bd_count].sge_len = cpu_to_le32(sg_len); + + bd_count++; + byte_count += sg_len; + } + + /* To catch a case where FAST and SLOW nothing is set, set FAST */ + if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE) + io_req->sge_type = QEDF_IOREQ_FAST_SGE; + + if (byte_count != scsi_bufflen(sc)) + QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " + "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count, + scsi_bufflen(sc), io_req->xid); + + return bd_count; +} + +static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; + int bd_count; + + if (scsi_sg_count(sc)) { + bd_count = qedf_map_sg(io_req); + if (bd_count == 0) + return -ENOMEM; + } else { + bd_count = 0; + bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; + bd[0].sge_len = 0; + } + io_req->bd_tbl->bd_valid = bd_count; + + return 0; +} + +static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, + struct fcp_cmnd *fcp_cmnd) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + /* fcp_cmnd is 32 bytes */ + memset(fcp_cmnd, 0, FCP_CMND_LEN); + + /* 8 bytes: SCSI LUN info */ + int_to_scsilun(sc_cmd->device->lun, + (struct scsi_lun *)&fcp_cmnd->fc_lun); + + /* 4 bytes: flag info */ + fcp_cmnd->fc_pri_ta = 0; + fcp_cmnd->fc_tm_flags = io_req->tm_flags; + fcp_cmnd->fc_flags = io_req->io_req_flags; + fcp_cmnd->fc_cmdref = 0; + + /* Populate data direction */ + if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { + fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; + } else { + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; + else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; + } + + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + + /* 16 bytes: CDB information */ + if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + + /* 4 bytes: FCP data length */ + fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); +} + +static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, + struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, + struct fcoe_wqe *sqe) +{ + enum fcoe_task_type task_type; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct io_bdt *bd_tbl = io_req->bd_tbl; + u8 fcp_cmnd[32]; + u32 tmp_fcp_cmnd[8]; + int bd_count = 0; + struct qedf_ctx *qedf = fcport->qedf; + uint16_t cq_idx = smp_processor_id() % qedf->num_queues; + struct regpair sense_data_buffer_phys_addr; + u32 tx_io_size = 0; + u32 rx_io_size = 0; + int i, cnt; + + /* Note init_initiator_rw_fcoe_task memsets the task context */ + io_req->task = task_ctx; + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); + memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); + + /* Set task type bassed on DMA directio of command */ + if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { + task_type = FCOE_TASK_TYPE_READ_INITIATOR; + } else { + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; + tx_io_size = io_req->data_xfer_len; + } else { + task_type = FCOE_TASK_TYPE_READ_INITIATOR; + rx_io_size = io_req->data_xfer_len; + } + } + + /* Setup the fields for fcoe_task_params */ + io_req->task_params->context = task_ctx; + io_req->task_params->sqe = sqe; + io_req->task_params->task_type = task_type; + io_req->task_params->tx_io_size = tx_io_size; + io_req->task_params->rx_io_size = rx_io_size; + io_req->task_params->conn_cid = fcport->fw_cid; + io_req->task_params->itid = io_req->xid; + io_req->task_params->cq_rss_number = cq_idx; + io_req->task_params->is_tape_device = fcport->dev_type; + + /* Fill in information for scatter/gather list */ + if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) { + bd_count = bd_tbl->bd_valid; + io_req->sgl_task_params->sgl = bd_tbl->bd_tbl; + io_req->sgl_task_params->sgl_phys_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + io_req->sgl_task_params->sgl_phys_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + io_req->sgl_task_params->num_sges = bd_count; + io_req->sgl_task_params->total_buffer_size = + scsi_bufflen(io_req->sc_cmd); + if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) + io_req->sgl_task_params->small_mid_sge = 1; + else + io_req->sgl_task_params->small_mid_sge = 0; + } + + /* Fill in physical address of sense buffer */ + sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma); + sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma); + + /* fill FCP_CMND IU */ + qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd); + + /* Swap fcp_cmnd since FC is big endian */ + cnt = sizeof(struct fcp_cmnd) / sizeof(u32); + for (i = 0; i < cnt; i++) { + tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]); + } + memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd)); + + init_initiator_rw_fcoe_task(io_req->task_params, + io_req->sgl_task_params, + sense_data_buffer_phys_addr, + io_req->task_retry_identifier, fcp_cmnd); + + /* Increment SGL type counters */ + if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) + qedf->slow_sge_ios++; + else + qedf->fast_sge_ios++; +} + +void qedf_init_mp_task(struct qedf_ioreq *io_req, + struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) +{ + struct qedf_mp_req *mp_req = &(io_req->mp_req); + struct qedf_rport *fcport = io_req->fcport; + struct qedf_ctx *qedf = io_req->fcport->qedf; + struct fc_frame_header *fc_hdr; + struct fcoe_tx_mid_path_params task_fc_hdr; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Initializing MP task for cmd_type=%d\n", + io_req->cmd_type); + + qedf->control_requests++; + + memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task_ctx; + + /* Setup the fields for fcoe_task_params */ + io_req->task_params->context = task_ctx; + io_req->task_params->sqe = sqe; + io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH; + io_req->task_params->tx_io_size = io_req->data_xfer_len; + /* rx_io_size tells the f/w how large a response buffer we have */ + io_req->task_params->rx_io_size = PAGE_SIZE; + io_req->task_params->conn_cid = fcport->fw_cid; + io_req->task_params->itid = io_req->xid; + /* Return middle path commands on CQ 0 */ + io_req->task_params->cq_rss_number = 0; + io_req->task_params->is_tape_device = fcport->dev_type; + + fc_hdr = &(mp_req->req_fc_hdr); + /* Set OX_ID and RX_ID based on driver task id */ + fc_hdr->fh_ox_id = io_req->xid; + fc_hdr->fh_rx_id = htons(0xffff); + + /* Set up FC header information */ + task_fc_hdr.parameter = fc_hdr->fh_parm_offset; + task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl; + task_fc_hdr.type = fc_hdr->fh_type; + task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl; + task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl; + task_fc_hdr.rx_id = fc_hdr->fh_rx_id; + task_fc_hdr.ox_id = fc_hdr->fh_ox_id; + + /* Set up s/g list parameters for request buffer */ + tx_sgl_task_params.sgl = mp_req->mp_req_bd; + tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma); + tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma); + tx_sgl_task_params.num_sges = 1; + /* Set PAGE_SIZE for now since sg element is that size ??? */ + tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len; + tx_sgl_task_params.small_mid_sge = 0; + + /* Set up s/g list parameters for request buffer */ + rx_sgl_task_params.sgl = mp_req->mp_resp_bd; + rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma); + rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma); + rx_sgl_task_params.num_sges = 1; + /* Set PAGE_SIZE for now since sg element is that size ??? */ + rx_sgl_task_params.total_buffer_size = PAGE_SIZE; + rx_sgl_task_params.small_mid_sge = 0; + + + /* + * Last arg is 0 as previous code did not set that we wanted the + * fc header information. + */ + init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params, + &task_fc_hdr, + &tx_sgl_task_params, + &rx_sgl_task_params, 0); +} + +/* Presumed that fcport->rport_lock is held */ +u16 qedf_get_sqe_idx(struct qedf_rport *fcport) +{ + uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); + u16 rval; + + rval = fcport->sq_prod_idx; + + /* Adjust ring index */ + fcport->sq_prod_idx++; + fcport->fw_sq_prod_idx++; + if (fcport->sq_prod_idx == total_sqe) + fcport->sq_prod_idx = 0; + + return rval; +} + +void qedf_ring_doorbell(struct qedf_rport *fcport) +{ + struct fcoe_db_data dbell = { 0 }; + + dbell.agg_flags = 0; + + dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT; + dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT; + dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD << + FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; + + dbell.sq_prod = fcport->fw_sq_prod_idx; + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(*(u32 *)&dbell, fcport->p_doorbell); + /* + * Fence required to flush the write combined buffer, since another + * CPU may write to the same doorbell address and data may be lost + * due to relaxed order nature of write combined bar. + */ + wmb(); +} + +static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, + int8_t direction) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct qedf_io_log *io_log; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + unsigned long flags; + + spin_lock_irqsave(&qedf->io_trace_lock, flags); + + io_log = &qedf->io_trace_buf[qedf->io_trace_idx]; + io_log->direction = direction; + io_log->task_id = io_req->xid; + io_log->port_id = fcport->rdata->ids.port_id; + io_log->lun = sc_cmd->device->lun; + io_log->op = sc_cmd->cmnd[0]; + io_log->lba[0] = sc_cmd->cmnd[2]; + io_log->lba[1] = sc_cmd->cmnd[3]; + io_log->lba[2] = sc_cmd->cmnd[4]; + io_log->lba[3] = sc_cmd->cmnd[5]; + io_log->bufflen = scsi_bufflen(sc_cmd); + io_log->sg_count = scsi_sg_count(sc_cmd); + io_log->result = sc_cmd->result; + io_log->jiffies = jiffies; + io_log->refcount = kref_read(&io_req->refcount); + + if (direction == QEDF_IO_TRACE_REQ) { + /* For requests we only care abot the submission CPU */ + io_log->req_cpu = io_req->cpu; + io_log->int_cpu = 0; + io_log->rsp_cpu = 0; + } else if (direction == QEDF_IO_TRACE_RSP) { + io_log->req_cpu = io_req->cpu; + io_log->int_cpu = io_req->int_cpu; + io_log->rsp_cpu = smp_processor_id(); + } + + io_log->sge_type = io_req->sge_type; + + qedf->io_trace_idx++; + if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE) + qedf->io_trace_idx = 0; + + spin_unlock_irqrestore(&qedf->io_trace_lock, flags); +} + +int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct Scsi_Host *host = sc_cmd->device->host; + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fcoe_task_context *task_ctx; + u16 xid; + struct fcoe_wqe *sqe; + u16 sqe_idx; + + /* Initialize rest of io_req fileds */ + io_req->data_xfer_len = scsi_bufflen(sc_cmd); + qedf_priv(sc_cmd)->io_req = io_req; + io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */ + + /* Record which cpu this request is associated with */ + io_req->cpu = smp_processor_id(); + + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + io_req->io_req_flags = QEDF_READ; + qedf->input_requests++; + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + io_req->io_req_flags = QEDF_WRITE; + qedf->output_requests++; + } else { + io_req->io_req_flags = 0; + qedf->control_requests++; + } + + xid = io_req->xid; + + /* Build buffer descriptor list for firmware from sg list */ + if (qedf_build_bd_list_from_sg(io_req)) { + QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); + /* Release cmd will release io_req, but sc_cmd is assigned */ + io_req->sc_cmd = NULL; + kref_put(&io_req->refcount, qedf_release_cmd); + return -EAGAIN; + } + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || + test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); + /* Release cmd will release io_req, but sc_cmd is assigned */ + io_req->sc_cmd = NULL; + kref_put(&io_req->refcount, qedf_release_cmd); + return -EINVAL; + } + + /* Record LUN number for later use if we need them */ + io_req->lun = (int)sc_cmd->device->lun; + + /* Obtain free SQE */ + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + + /* Get the task context */ + task_ctx = qedf_get_task_mem(&qedf->tasks, xid); + if (!task_ctx) { + QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", + xid); + /* Release cmd will release io_req, but sc_cmd is assigned */ + io_req->sc_cmd = NULL; + kref_put(&io_req->refcount, qedf_release_cmd); + return -EINVAL; + } + + qedf_init_task(fcport, lport, io_req, task_ctx, sqe); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + + /* Set that command is with the firmware now */ + set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + + if (qedf_io_tracing && io_req->sc_cmd) + qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); + + return false; +} + +int +qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport; + struct qedf_ioreq *io_req; + int rc = 0; + int rval; + unsigned long flags = 0; + int num_sgs = 0; + + num_sgs = scsi_sg_count(sc_cmd); + if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) { + QEDF_ERR(&qedf->dbg_ctx, + "Number of SG elements %d exceeds what hardware limitation of %d.\n", + num_sgs, QEDF_MAX_BDS_PER_CMD); + sc_cmd->result = DID_ERROR; + scsi_done(sc_cmd); + return 0; + } + + if (test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Returning DNC as unloading or stop io, flags 0x%lx.\n", + qedf->flags); + sc_cmd->result = DID_NO_CONNECT << 16; + scsi_done(sc_cmd); + return 0; + } + + if (!qedf->pdev->msix_enabled) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n", + sc_cmd); + sc_cmd->result = DID_NO_CONNECT << 16; + scsi_done(sc_cmd); + return 0; + } + + rval = fc_remote_port_chkready(rport); + if (rval) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n", + rval, rport->port_id); + sc_cmd->result = rval; + scsi_done(sc_cmd); + return 0; + } + + /* Retry command if we are doing a qed drain operation */ + if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n"); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + if (lport->state != LPORT_ST_READY || + atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n"); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + fcport = (struct qedf_rport *)&rp[1]; + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || + test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + /* + * Session is not offloaded yet. Let SCSI-ml retry + * the command. + */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + + atomic_inc(&fcport->ios_to_queue); + + if (fcport->retry_delay_timestamp) { + /* Take fcport->rport_lock for resetting the delay_timestamp */ + spin_lock_irqsave(&fcport->rport_lock, flags); + if (time_after(jiffies, fcport->retry_delay_timestamp)) { + fcport->retry_delay_timestamp = 0; + } else { + spin_unlock_irqrestore(&fcport->rport_lock, flags); + /* If retry_delay timer is active, flow off the ML */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + atomic_dec(&fcport->ios_to_queue); + goto exit_qcmd; + } + spin_unlock_irqrestore(&fcport->rport_lock, flags); + } + + io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); + if (!io_req) { + rc = SCSI_MLQUEUE_HOST_BUSY; + atomic_dec(&fcport->ios_to_queue); + goto exit_qcmd; + } + + io_req->sc_cmd = sc_cmd; + + /* Take fcport->rport_lock for posting to fcport send queue */ + spin_lock_irqsave(&fcport->rport_lock, flags); + if (qedf_post_io_req(fcport, io_req)) { + QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n"); + /* Return SQE to pool */ + atomic_inc(&fcport->free_sqes); + rc = SCSI_MLQUEUE_HOST_BUSY; + } + spin_unlock_irqrestore(&fcport->rport_lock, flags); + atomic_dec(&fcport->ios_to_queue); + +exit_qcmd: + return rc; +} + +static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, + struct fcoe_cqe_rsp_info *fcp_rsp) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct qedf_ctx *qedf = io_req->fcport->qedf; + u8 rsp_flags = fcp_rsp->rsp_flags.flags; + int fcp_sns_len = 0; + int fcp_rsp_len = 0; + uint8_t *rsp_info, *sense_data; + + io_req->fcp_status = FC_GOOD; + io_req->fcp_resid = 0; + if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | + FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) + io_req->fcp_resid = fcp_rsp->fcp_resid; + + io_req->scsi_comp_flags = rsp_flags; + io_req->cdb_status = fcp_rsp->scsi_status_code; + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) + fcp_rsp_len = fcp_rsp->fcp_rsp_len; + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) + fcp_sns_len = fcp_rsp->fcp_sns_len; + + io_req->fcp_rsp_len = fcp_rsp_len; + io_req->fcp_sns_len = fcp_sns_len; + rsp_info = sense_data = io_req->sense_buffer; + + /* fetch fcp_rsp_code */ + if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { + /* Only for task management function */ + io_req->fcp_rsp_code = rsp_info[3]; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); + /* Adjust sense-data location. */ + sense_data += fcp_rsp_len; + } + + if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Truncating sense buffer\n"); + fcp_sns_len = SCSI_SENSE_BUFFERSIZE; + } + + /* The sense buffer can be NULL for TMF commands */ + if (sc_cmd->sense_buffer) { + memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, sense_data, + fcp_sns_len); + } +} + +static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + + if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { + dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + io_req->bd_tbl->bd_valid = 0; + } +} + +void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc_cmd; + struct fcoe_cqe_rsp_info *fcp_rsp; + struct qedf_rport *fcport; + int refcount; + u16 scope, qualifier = 0; + u8 fw_residual_flag = 0; + unsigned long flags = 0; + u16 chk_scope = 0; + + if (!io_req) + return; + if (!cqe) + return; + + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || + test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || + test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "io_req xid=0x%x already in cleanup or abort processing or already completed.\n", + io_req->xid); + return; + } + + sc_cmd = io_req->sc_cmd; + fcp_rsp = &cqe->cqe_info.rsp_info; + + if (!sc_cmd) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); + return; + } + + if (!qedf_priv(sc_cmd)->io_req) { + QEDF_WARN(&(qedf->dbg_ctx), + "io_req is NULL, returned in another context.\n"); + return; + } + + if (!sc_cmd->device) { + QEDF_ERR(&qedf->dbg_ctx, + "Device for sc_cmd %p is NULL.\n", sc_cmd); + return; + } + + if (!scsi_cmd_to_rq(sc_cmd)->q) { + QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " + "is not valid, sc_cmd=%p.\n", sc_cmd); + return; + } + + fcport = io_req->fcport; + + /* + * When flush is active, let the cmds be completed from the cleanup + * context + */ + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || + (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && + sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Dropping good completion xid=0x%x as fcport is flushing", + io_req->xid); + return; + } + + qedf_parse_fcp_rsp(io_req, fcp_rsp); + + qedf_unmap_sg_list(qedf, io_req); + + /* Check for FCP transport error */ + if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) { + QEDF_ERR(&(qedf->dbg_ctx), + "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d " + "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len, + io_req->fcp_rsp_code); + sc_cmd->result = DID_BUS_BUSY << 16; + goto out; + } + + fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, + FCOE_CQE_RSP_INFO_FW_UNDERRUN); + if (fw_residual_flag) { + QEDF_ERR(&qedf->dbg_ctx, + "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n", + io_req->xid, fcp_rsp->rsp_flags.flags, + io_req->fcp_resid, + cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2], + sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]); + + if (io_req->cdb_status == 0) + sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; + else + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + /* + * Set resid to the whole buffer length so we won't try to resue + * any previously data. + */ + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + goto out; + } + + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good I/O completion */ + sc_cmd->result = DID_OK << 16; + } else { + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "%d:0:%d:%lld xid=0x%0x op=0x%02x " + "lba=%02x%02x%02x%02x cdb_status=%d " + "fcp_resid=0x%x refcount=%d.\n", + qedf->lport->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun, io_req->xid, + sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], + sc_cmd->cmnd[4], sc_cmd->cmnd[5], + io_req->cdb_status, io_req->fcp_resid, + refcount); + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || + io_req->cdb_status == SAM_STAT_BUSY) { + /* + * Check whether we need to set retry_delay at + * all based on retry_delay module parameter + * and the status qualifier. + */ + + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; + + if (qedf_retry_delay) + chk_scope = 1; + /* Record stats */ + if (io_req->cdb_status == + SAM_STAT_TASK_SET_FULL) + qedf->task_set_fulls++; + else + qedf->busy++; + } + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + + if (chk_scope == 1) { + if ((scope == 1 || scope == 2) && + (qualifier > 0 && qualifier <= 0x3FEF)) { + /* Check we don't go over the max */ + if (qualifier > QEDF_RETRY_DELAY_MAX) { + qualifier = QEDF_RETRY_DELAY_MAX; + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "qualifier = %d\n", + (fcp_rsp->retry_delay_timer & + 0x3FFF)); + } + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Scope = %d and qualifier = %d", + scope, qualifier); + /* Take fcport->rport_lock to + * update the retry_delay_timestamp + */ + spin_lock_irqsave(&fcport->rport_lock, flags); + fcport->retry_delay_timestamp = + jiffies + (qualifier * HZ / 10); + spin_unlock_irqrestore(&fcport->rport_lock, + flags); + + } else { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "combination of scope = %d and qualifier = %d is not handled in qedf.\n", + scope, qualifier); + } + } + break; + default: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", + io_req->fcp_status); + break; + } + +out: + if (qedf_io_tracing) + qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); + + /* + * We wait till the end of the function to clear the + * outstanding bit in case we need to send an abort + */ + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + + io_req->sc_cmd = NULL; + qedf_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +/* Return a SCSI command in some other context besides a normal completion */ +void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + int result) +{ + struct scsi_cmnd *sc_cmd; + int refcount; + + if (!io_req) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n"); + return; + } + + if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "io_req:%p scsi_done handling already done\n", + io_req); + return; + } + + /* + * We will be done with this command after this call so clear the + * outstanding bit. + */ + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + + sc_cmd = io_req->sc_cmd; + + if (!sc_cmd) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); + return; + } + + if (!virt_addr_valid(sc_cmd)) { + QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd); + goto bad_scsi_ptr; + } + + if (!qedf_priv(sc_cmd)->io_req) { + QEDF_WARN(&(qedf->dbg_ctx), + "io_req is NULL, returned in another context.\n"); + return; + } + + if (!sc_cmd->device) { + QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n", + sc_cmd); + goto bad_scsi_ptr; + } + + if (!virt_addr_valid(sc_cmd->device)) { + QEDF_ERR(&qedf->dbg_ctx, + "Device pointer for sc_cmd %p is bad.\n", sc_cmd); + goto bad_scsi_ptr; + } + + if (!sc_cmd->sense_buffer) { + QEDF_ERR(&qedf->dbg_ctx, + "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n", + sc_cmd); + goto bad_scsi_ptr; + } + + if (!virt_addr_valid(sc_cmd->sense_buffer)) { + QEDF_ERR(&qedf->dbg_ctx, + "sc_cmd->sense_buffer for sc_cmd %p is bad.\n", + sc_cmd); + goto bad_scsi_ptr; + } + + qedf_unmap_sg_list(qedf, io_req); + + sc_cmd->result = result << 16; + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing " + "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " + "allowed=%d retries=%d refcount=%d.\n", + qedf->lport->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0], + sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], + sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries, + refcount); + + /* + * Set resid to the whole buffer length so we won't try to resue any + * previously read data + */ + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + + if (qedf_io_tracing) + qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); + + io_req->sc_cmd = NULL; + qedf_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); + kref_put(&io_req->refcount, qedf_release_cmd); + return; + +bad_scsi_ptr: + /* + * Clear the io_req->sc_cmd backpointer so we don't try to process + * this again + */ + io_req->sc_cmd = NULL; + kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */ +} + +/* + * Handle warning type CQE completions. This is mainly used for REC timer + * popping. + */ +void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + int rval, i; + struct qedf_rport *fcport = io_req->fcport; + u64 err_warn_bit_map; + u8 err_warn = 0xff; + + if (!cqe) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "cqe is NULL for io_req %p xid=0x%x\n", + io_req, io_req->xid); + return; + } + + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " + "xid=0x%x\n", io_req->xid); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), + "err_warn_bitmap=%08x:%08x\n", + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " + "rx_buff_off=%08x, rx_id=%04x\n", + le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_id)); + + /* Normalize the error bitmap value to an just an unsigned int */ + err_warn_bit_map = (u64) + ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) | + (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo; + for (i = 0; i < 64; i++) { + if (err_warn_bit_map & (u64)((u64)1 << i)) { + err_warn = i; + break; + } + } + + /* Check if REC TOV expired if this is a tape device */ + if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { + if (err_warn == + FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) { + QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n"); + if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) { + io_req->rx_buf_off = + cqe->cqe_info.err_info.rx_buf_off; + io_req->tx_buf_off = + cqe->cqe_info.err_info.tx_buf_off; + io_req->rx_id = cqe->cqe_info.err_info.rx_id; + rval = qedf_send_rec(io_req); + /* + * We only want to abort the io_req if we + * can't queue the REC command as we want to + * keep the exchange open for recovery. + */ + if (rval) + goto send_abort; + } + return; + } + } + +send_abort: + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); +} + +/* Cleanup a command when we receive an error detection completion */ +void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + int rval; + + if (io_req == NULL) { + QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n"); + return; + } + + if (io_req->fcport == NULL) { + QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n"); + return; + } + + if (!cqe) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "cqe is NULL for io_req %p\n", io_req); + return; + } + + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " + "xid=0x%x\n", io_req->xid); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), + "err_warn_bitmap=%08x:%08x\n", + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " + "rx_buff_off=%08x, rx_id=%04x\n", + le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_id)); + + /* When flush is active, let the cmds be flushed out from the cleanup context */ + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) || + (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) && + io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) { + QEDF_ERR(&qedf->dbg_ctx, + "Dropping EQE for xid=0x%x as fcport is flushing", + io_req->xid); + return; + } + + if (qedf->stop_io_on_error) { + qedf_stop_all_io(qedf); + return; + } + + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); +} + +static void qedf_flush_els_req(struct qedf_ctx *qedf, + struct qedf_ioreq *els_req) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid, + kref_read(&els_req->refcount)); + + /* + * Need to distinguish this from a timeout when calling the + * els_req->cb_func. + */ + els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; + + clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); + + /* Cancel the timer */ + cancel_delayed_work_sync(&els_req->timeout_work); + + /* Call callback function to complete command */ + if (els_req->cb_func && els_req->cb_arg) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + /* Release kref for original initiate_els */ + kref_put(&els_req->refcount, qedf_release_cmd); +} + +/* A value of -1 for lun is a wild card that means flush all + * active SCSI I/Os for the target. + */ +void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) +{ + struct qedf_ioreq *io_req; + struct qedf_ctx *qedf; + struct qedf_cmd_mgr *cmd_mgr; + int i, rc; + unsigned long flags; + int flush_cnt = 0; + int wait_cnt = 100; + int refcount = 0; + + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL\n"); + return; + } + + /* Check that fcport is still offloaded */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return; + } + + qedf = fcport->qedf; + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + return; + } + + /* Only wait for all commands to be queued in the Upload context */ + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && + (lun == -1)) { + while (atomic_read(&fcport->ios_to_queue)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Waiting for %d I/Os to be queued\n", + atomic_read(&fcport->ios_to_queue)); + if (wait_cnt == 0) { + QEDF_ERR(NULL, + "%d IOs request could not be queued\n", + atomic_read(&fcport->ios_to_queue)); + } + msleep(20); + wait_cnt--; + } + } + + cmd_mgr = qedf->cmd_mgr; + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n", + atomic_read(&fcport->num_active_ios), fcport, + fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n"); + + mutex_lock(&qedf->flush_mutex); + if (lun == -1) { + set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); + } else { + set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); + fcport->lun_reset_lun = lun; + } + + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { + io_req = &cmd_mgr->cmds[i]; + + if (!io_req) + continue; + if (!io_req->fcport) + continue; + + spin_lock_irqsave(&cmd_mgr->lock, flags); + + if (io_req->alloc) { + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { + if (io_req->cmd_type == QEDF_SCSI_CMD) + QEDF_ERR(&qedf->dbg_ctx, + "Allocated but not queued, xid=0x%x\n", + io_req->xid); + } + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + } else { + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + continue; + } + + if (io_req->fcport != fcport) + continue; + + /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response, + * but RRQ is still pending. + * Workaround: Within qedf_send_rrq, we check if the fcport is + * NULL, and we drop the ref on the io_req to clean it up. + */ + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n", + io_req->xid, io_req->cmd_type, refcount); + /* If RRQ work has been queue, try to cancel it and + * free the io_req + */ + if (atomic_read(&io_req->state) == + QEDFC_CMD_ST_RRQ_WAIT) { + if (cancel_delayed_work_sync + (&io_req->rrq_work)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Putting reference for pending RRQ work xid=0x%x.\n", + io_req->xid); + /* ID: 003 */ + kref_put(&io_req->refcount, + qedf_release_cmd); + } + } + continue; + } + + /* Only consider flushing ELS during target reset */ + if (io_req->cmd_type == QEDF_ELS && + lun == -1) { + rc = kref_get_unless_zero(&io_req->refcount); + if (!rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "Could not get kref for ELS io_req=0x%p xid=0x%x.\n", + io_req, io_req->xid); + continue; + } + qedf_initiate_cleanup(io_req, false); + flush_cnt++; + qedf_flush_els_req(qedf, io_req); + + /* + * Release the kref and go back to the top of the + * loop. + */ + goto free_cmd; + } + + if (io_req->cmd_type == QEDF_ABTS) { + /* ID: 004 */ + rc = kref_get_unless_zero(&io_req->refcount); + if (!rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "Could not get kref for abort io_req=0x%p xid=0x%x.\n", + io_req, io_req->xid); + continue; + } + if (lun != -1 && io_req->lun != lun) + goto free_cmd; + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Flushing abort xid=0x%x.\n", io_req->xid); + + if (cancel_delayed_work_sync(&io_req->rrq_work)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Putting ref for cancelled RRQ work xid=0x%x.\n", + io_req->xid); + kref_put(&io_req->refcount, qedf_release_cmd); + } + + if (cancel_delayed_work_sync(&io_req->timeout_work)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Putting ref for cancelled tmo work xid=0x%x.\n", + io_req->xid); + qedf_initiate_cleanup(io_req, true); + /* Notify eh_abort handler that ABTS is + * complete + */ + complete(&io_req->abts_done); + clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + /* ID: 002 */ + kref_put(&io_req->refcount, qedf_release_cmd); + } + flush_cnt++; + goto free_cmd; + } + + if (!io_req->sc_cmd) + continue; + if (!io_req->sc_cmd->device) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Device backpointer NULL for sc_cmd=%p.\n", + io_req->sc_cmd); + /* Put reference for non-existent scsi_cmnd */ + io_req->sc_cmd = NULL; + qedf_initiate_cleanup(io_req, false); + kref_put(&io_req->refcount, qedf_release_cmd); + continue; + } + if (lun > -1) { + if (io_req->lun != lun) + continue; + } + + /* + * Use kref_get_unless_zero in the unlikely case the command + * we're about to flush was completed in the normal SCSI path + */ + rc = kref_get_unless_zero(&io_req->refcount); + if (!rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " + "io_req=0x%p xid=0x%x\n", io_req, io_req->xid); + continue; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Cleanup xid=0x%x.\n", io_req->xid); + flush_cnt++; + + /* Cleanup task and return I/O mid-layer */ + qedf_initiate_cleanup(io_req, true); + +free_cmd: + kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */ + } + + wait_cnt = 60; + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Flushed 0x%x I/Os, active=0x%x.\n", + flush_cnt, atomic_read(&fcport->num_active_ios)); + /* Only wait for all commands to complete in the Upload context */ + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && + (lun == -1)) { + while (atomic_read(&fcport->num_active_ios)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n", + flush_cnt, + atomic_read(&fcport->num_active_ios), + wait_cnt); + if (wait_cnt == 0) { + QEDF_ERR(&qedf->dbg_ctx, + "Flushed %d I/Os, active=%d.\n", + flush_cnt, + atomic_read(&fcport->num_active_ios)); + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { + io_req = &cmd_mgr->cmds[i]; + if (io_req->fcport && + io_req->fcport == fcport) { + refcount = + kref_read(&io_req->refcount); + set_bit(QEDF_CMD_DIRTY, + &io_req->flags); + QEDF_ERR(&qedf->dbg_ctx, + "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n", + io_req, io_req->xid, + io_req->flags, + io_req->sc_cmd, + refcount, + io_req->cmd_type); + } + } + WARN_ON(1); + break; + } + msleep(500); + wait_cnt--; + } + } + + clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); + clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n"); + mutex_unlock(&qedf->flush_mutex); +} + +/* + * Initiate a ABTS middle path command. Note that we don't have to initialize + * the task context for an ABTS task. + */ +int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) +{ + struct fc_lport *lport; + struct qedf_rport *fcport = io_req->fcport; + struct fc_rport_priv *rdata; + struct qedf_ctx *qedf; + u16 xid; + int rc = 0; + unsigned long flags; + struct fcoe_wqe *sqe; + u16 sqe_idx; + int refcount = 0; + + /* Sanity check qedf_rport before dereferencing any pointers */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "tgt not offloaded\n"); + rc = 1; + goto out; + } + + qedf = fcport->qedf; + rdata = fcport->rdata; + + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { + QEDF_ERR(&qedf->dbg_ctx, "stale rport\n"); + rc = 1; + goto out; + } + + lport = qedf->lport; + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); + rc = 1; + goto drop_rdata_kref; + } + + if (atomic_read(&qedf->link_down_tmo_valid) > 0) { + QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); + rc = 1; + goto drop_rdata_kref; + } + + /* Ensure room on SQ */ + if (!atomic_read(&fcport->free_sqes)) { + QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); + rc = 1; + goto drop_rdata_kref; + } + + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n"); + rc = 1; + goto drop_rdata_kref; + } + + spin_lock_irqsave(&fcport->rport_lock, flags); + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || + test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || + test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n", + io_req->xid, io_req->sc_cmd); + rc = 1; + spin_unlock_irqrestore(&fcport->rport_lock, flags); + goto drop_rdata_kref; + } + + /* Set the command type to abort */ + io_req->cmd_type = QEDF_ABTS; + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + kref_get(&io_req->refcount); + + xid = io_req->xid; + qedf->control_requests++; + qedf->packet_aborts++; + + io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; + + set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, + "ABTS io_req xid = 0x%x refcount=%d\n", + xid, refcount); + + qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + io_req->task_params->sqe = sqe; + + init_initiator_abort_fcoe_task(io_req->task_params); + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); + +drop_rdata_kref: + kref_put(&rdata->kref, fc_rport_destroy); +out: + return rc; +} + +void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + uint32_t r_ctl; + int rc; + struct qedf_rport *fcport = io_req->fcport; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " + "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); + + r_ctl = cqe->cqe_info.abts_info.r_ctl; + + /* This was added at a point when we were scheduling abts_compl & + * cleanup_compl on different CPUs and there was a possibility of + * the io_req to be freed from the other context before we got here. + */ + if (!fcport) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Dropping ABTS completion xid=0x%x as fcport is NULL", + io_req->xid); + return; + } + + /* + * When flush is active, let the cmds be completed from the cleanup + * context + */ + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || + test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Dropping ABTS completion xid=0x%x as fcport is flushing", + io_req->xid); + return; + } + + if (!cancel_delayed_work(&io_req->timeout_work)) { + QEDF_ERR(&qedf->dbg_ctx, + "Wasn't able to cancel abts timeout work.\n"); + } + + switch (r_ctl) { + case FC_RCTL_BA_ACC: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "ABTS response - ACC Send RRQ after R_A_TOV\n"); + io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; + rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */ + if (!rc) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, + "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n", + io_req->xid); + return; + } + /* + * Dont release this cmd yet. It will be relesed + * after we get RRQ response + */ + queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, + msecs_to_jiffies(qedf->lport->r_a_tov)); + atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT); + break; + /* For error cases let the cleanup return the command */ + case FC_RCTL_BA_RJT: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "ABTS response - RJT\n"); + io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; + break; + default: + QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n"); + break; + } + + clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + + if (io_req->sc_cmd) { + if (!io_req->return_scsi_cmd_on_abts) + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, + "Not call scsi_done for xid=0x%x.\n", + io_req->xid); + if (io_req->return_scsi_cmd_on_abts) + qedf_scsi_done(qedf, io_req, DID_ERROR); + } + + /* Notify eh_abort handler that ABTS is complete */ + complete(&io_req->abts_done); + + kref_put(&io_req->refcount, qedf_release_cmd); +} + +int qedf_init_mp_req(struct qedf_ioreq *io_req) +{ + struct qedf_mp_req *mp_req; + struct scsi_sge *mp_req_bd; + struct scsi_sge *mp_resp_bd; + struct qedf_ctx *qedf = io_req->fcport->qedf; + dma_addr_t addr; + uint64_t sz; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n"); + + mp_req = (struct qedf_mp_req *)&(io_req->mp_req); + memset(mp_req, 0, sizeof(struct qedf_mp_req)); + + if (io_req->cmd_type != QEDF_ELS) { + mp_req->req_len = sizeof(struct fcp_cmnd); + io_req->data_xfer_len = mp_req->req_len; + } else + mp_req->req_len = io_req->data_xfer_len; + + mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + &mp_req->req_buf_dma, GFP_KERNEL); + if (!mp_req->req_buf) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL); + if (!mp_req->resp_buf) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp " + "buffer\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + /* Allocate and map mp_req_bd and mp_resp_bd */ + sz = sizeof(struct scsi_sge); + mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, + &mp_req->mp_req_bd_dma, GFP_KERNEL); + if (!mp_req->mp_req_bd) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, + &mp_req->mp_resp_bd_dma, GFP_KERNEL); + if (!mp_req->mp_resp_bd) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + /* Fill bd table */ + addr = mp_req->req_buf_dma; + mp_req_bd = mp_req->mp_req_bd; + mp_req_bd->sge_addr.lo = U64_LO(addr); + mp_req_bd->sge_addr.hi = U64_HI(addr); + mp_req_bd->sge_len = QEDF_PAGE_SIZE; + + /* + * MP buffer is either a task mgmt command or an ELS. + * So the assumption is that it consumes a single bd + * entry in the bd table + */ + mp_resp_bd = mp_req->mp_resp_bd; + addr = mp_req->resp_buf_dma; + mp_resp_bd->sge_addr.lo = U64_LO(addr); + mp_resp_bd->sge_addr.hi = U64_HI(addr); + mp_resp_bd->sge_len = QEDF_PAGE_SIZE; + + return 0; +} + +/* + * Last ditch effort to clear the port if it's stuck. Used only after a + * cleanup task times out. + */ +static void qedf_drain_request(struct qedf_ctx *qedf) +{ + if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n"); + return; + } + + /* Set bit to return all queuecommand requests as busy */ + set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); + + /* Call qed drain request for function. Should be synchronous */ + qed_ops->common->drain(qedf->cdev); + + /* Settle time for CQEs to be returned */ + msleep(100); + + /* Unplug and continue */ + clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); +} + +/* + * Returns SUCCESS if the cleanup task does not timeout, otherwise return + * FAILURE. + */ +int qedf_initiate_cleanup(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts) +{ + struct qedf_rport *fcport; + struct qedf_ctx *qedf; + int tmo = 0; + int rc = SUCCESS; + unsigned long flags; + struct fcoe_wqe *sqe; + u16 sqe_idx; + int refcount = 0; + + fcport = io_req->fcport; + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); + return SUCCESS; + } + + /* Sanity check qedf_rport before dereferencing any pointers */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "tgt not offloaded\n"); + return SUCCESS; + } + + qedf = fcport->qedf; + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + return SUCCESS; + } + + if (io_req->cmd_type == QEDF_ELS) { + goto process_els; + } + + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || + test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " + "cleanup processing or already completed.\n", + io_req->xid); + return SUCCESS; + } + set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + +process_els: + /* Ensure room on SQ */ + if (!atomic_read(&fcport->free_sqes)) { + QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); + /* Need to make sure we clear the flag since it was set */ + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + return FAILED; + } + + if (io_req->cmd_type == QEDF_CLEANUP) { + QEDF_ERR(&qedf->dbg_ctx, + "io_req=0x%x is already a cleanup command cmd_type=%d.\n", + io_req->xid, io_req->cmd_type); + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + return SUCCESS; + } + + refcount = kref_read(&io_req->refcount); + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n", + io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags, + refcount, fcport, fcport->rdata->ids.port_id); + + /* Cleanup cmds re-use the same TID as the original I/O */ + spin_lock_irqsave(&fcport->rport_lock, flags); + io_req->cmd_type = QEDF_CLEANUP; + spin_unlock_irqrestore(&fcport->rport_lock, flags); + io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; + + init_completion(&io_req->cleanup_done); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + io_req->task_params->sqe = sqe; + + init_initiator_cleanup_fcoe_task(io_req->task_params); + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + tmo = wait_for_completion_timeout(&io_req->cleanup_done, + QEDF_CLEANUP_TIMEOUT * HZ); + + if (!tmo) { + rc = FAILED; + /* Timeout case */ + QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, " + "xid=%x.\n", io_req->xid); + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + /* Issue a drain request if cleanup task times out */ + QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n"); + qedf_drain_request(qedf); + } + + /* If it TASK MGMT handle it, reference will be decreased + * in qedf_execute_tmf + */ + if (io_req->tm_flags == FCP_TMF_LUN_RESET || + io_req->tm_flags == FCP_TMF_TGT_RESET) { + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + io_req->sc_cmd = NULL; + kref_put(&io_req->refcount, qedf_release_cmd); + complete(&io_req->tm_done); + } + + if (io_req->sc_cmd) { + if (!io_req->return_scsi_cmd_on_abts) + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, + "Not call scsi_done for xid=0x%x.\n", + io_req->xid); + if (io_req->return_scsi_cmd_on_abts) + qedf_scsi_done(qedf, io_req, DID_ERROR); + } + + if (rc == SUCCESS) + io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; + else + io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; + + return rc; +} + +void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n", + io_req->xid); + + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + + /* Complete so we can finish cleaning up the I/O */ + complete(&io_req->cleanup_done); +} + +static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, + uint8_t tm_flags) +{ + struct qedf_ioreq *io_req; + struct fcoe_task_context *task; + struct qedf_ctx *qedf = fcport->qedf; + struct fc_lport *lport = qedf->lport; + int rc = 0; + uint16_t xid; + int tmo = 0; + int lun = 0; + unsigned long flags; + struct fcoe_wqe *sqe; + u16 sqe_idx; + + if (!sc_cmd) { + QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n"); + return FAILED; + } + + lun = (int)sc_cmd->device->lun; + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); + rc = FAILED; + goto no_flush; + } + + io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); + if (!io_req) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); + rc = -EAGAIN; + goto no_flush; + } + + if (tm_flags == FCP_TMF_LUN_RESET) + qedf->lun_resets++; + else if (tm_flags == FCP_TMF_TGT_RESET) + qedf->target_resets++; + + /* Initialize rest of io_req fields */ + io_req->sc_cmd = sc_cmd; + io_req->fcport = fcport; + io_req->cmd_type = QEDF_TASK_MGMT_CMD; + + /* Record which cpu this request is associated with */ + io_req->cpu = smp_processor_id(); + + /* Set TM flags */ + io_req->io_req_flags = QEDF_READ; + io_req->data_xfer_len = 0; + io_req->tm_flags = tm_flags; + + /* Default is to return a SCSI command when an error occurs */ + io_req->return_scsi_cmd_on_abts = false; + + /* Obtain exchange id */ + xid = io_req->xid; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = " + "0x%x\n", xid); + + /* Initialize task context for this IO request */ + task = qedf_get_task_mem(&qedf->tasks, xid); + + init_completion(&io_req->tm_done); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + + qedf_init_task(fcport, lport, io_req, task, sqe); + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + tmo = wait_for_completion_timeout(&io_req->tm_done, + QEDF_TM_TIMEOUT * HZ); + + if (!tmo) { + rc = FAILED; + QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); + /* Clear outstanding bit since command timed out */ + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + io_req->sc_cmd = NULL; + } else { + /* Check TMF response code */ + if (io_req->fcp_rsp_code == 0) + rc = SUCCESS; + else + rc = FAILED; + } + /* + * Double check that fcport has not gone into an uploading state before + * executing the command flush for the LUN/target. + */ + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "fcport is uploading, not executing flush.\n"); + goto no_flush; + } + /* We do not need this io_req any more */ + kref_put(&io_req->refcount, qedf_release_cmd); + + + if (tm_flags == FCP_TMF_LUN_RESET) + qedf_flush_active_ios(fcport, lun); + else + qedf_flush_active_ios(fcport, -1); + +no_flush: + if (rc != SUCCESS) { + QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); + rc = FAILED; + } else { + QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); + rc = SUCCESS; + } + return rc; +} + +int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; + struct qedf_ctx *qedf; + struct fc_lport *lport = shost_priv(sc_cmd->device->host); + int rc = SUCCESS; + int rval; + struct qedf_ioreq *io_req = NULL; + int ref_cnt = 0; + struct fc_rport_priv *rdata = fcport->rdata; + + QEDF_ERR(NULL, + "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n", + tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff, + rport->scsi_target_id, (int)sc_cmd->device->lun); + + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { + QEDF_ERR(NULL, "stale rport\n"); + return FAILED; + } + + QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id, + (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" : + "LUN RESET"); + + if (qedf_priv(sc_cmd)->io_req) { + io_req = qedf_priv(sc_cmd)->io_req; + ref_cnt = kref_read(&io_req->refcount); + QEDF_ERR(NULL, + "orig io_req = %p xid = 0x%x ref_cnt = %d.\n", + io_req, io_req->xid, ref_cnt); + } + + rval = fc_remote_port_chkready(rport); + if (rval) { + QEDF_ERR(NULL, "device_reset rport not ready\n"); + rc = FAILED; + goto tmf_err; + } + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + goto tmf_err; + + if (!fcport) { + QEDF_ERR(NULL, "device_reset: rport is NULL\n"); + rc = FAILED; + goto tmf_err; + } + + qedf = fcport->qedf; + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + rc = FAILED; + goto tmf_err; + } + + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n"); + rc = SUCCESS; + goto tmf_err; + } + + if (test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { + rc = SUCCESS; + goto tmf_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); + rc = FAILED; + goto tmf_err; + } + + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + if (!fcport->rdata) + QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n", + fcport); + else + QEDF_ERR(&qedf->dbg_ctx, + "fcport %p port_id=%06x is uploading.\n", + fcport, fcport->rdata->ids.port_id); + rc = FAILED; + goto tmf_err; + } + + rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); + +tmf_err: + kref_put(&rdata->kref, fc_rport_destroy); + return rc; +} + +void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + struct fcoe_cqe_rsp_info *fcp_rsp; + + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + + fcp_rsp = &cqe->cqe_info.rsp_info; + qedf_parse_fcp_rsp(io_req, fcp_rsp); + + io_req->sc_cmd = NULL; + complete(&io_req->tm_done); +} + +void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, + struct fcoe_cqe *cqe) +{ + unsigned long flags; + uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; + u32 payload_len, crc; + struct fc_frame_header *fh; + struct fc_frame *fp; + struct qedf_io_work *io_work; + u32 bdq_idx; + void *bdq_addr; + struct scsi_bd *p_bd_info; + + p_bd_info = &cqe->cqe_info.unsolic_info.bd_info; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n", + le32_to_cpu(p_bd_info->address.hi), + le32_to_cpu(p_bd_info->address.lo), + le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi), + le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo), + qedf->bdq_prod_idx, pktlen); + + bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo); + if (bdq_idx >= QEDF_BDQ_SIZE) { + QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", + bdq_idx); + goto increment_prod; + } + + bdq_addr = qedf->bdq[bdq_idx].buf_addr; + if (!bdq_addr) { + QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping " + "unsolicited packet.\n"); + goto increment_prod; + } + + if (qedf_dump_frames) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "BDQ frame is at addr=%p.\n", bdq_addr); + print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1, + (void *)bdq_addr, pktlen, false); + } + + /* Allocate frame */ + payload_len = pktlen - sizeof(struct fc_frame_header); + fp = fc_frame_alloc(qedf->lport, payload_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n"); + goto increment_prod; + } + + /* Copy data from BDQ buffer into fc_frame struct */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, (void *)bdq_addr, pktlen); + + QEDF_WARN(&qedf->dbg_ctx, + "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n", + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, + fh->fh_type, fc_frame_payload_op(fp)); + + /* Initialize the frame so libfc sees it as a valid frame */ + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = qedf->lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + + /* + * We need to return the frame back up to libfc in a non-atomic + * context + */ + io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); + if (!io_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "work for I/O completion.\n"); + fc_frame_free(fp); + goto increment_prod; + } + memset(io_work, 0, sizeof(struct qedf_io_work)); + + INIT_WORK(&io_work->work, qedf_fp_io_handler); + + /* Copy contents of CQE for deferred processing */ + memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); + + io_work->qedf = qedf; + io_work->fp = fp; + + queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); +increment_prod: + spin_lock_irqsave(&qedf->hba_lock, flags); + + /* Increment producer to let f/w know we've handled the frame */ + qedf->bdq_prod_idx++; + + /* Producer index wraps at uint16_t boundary */ + if (qedf->bdq_prod_idx == 0xffff) + qedf->bdq_prod_idx = 0; + + writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); + readw(qedf->bdq_primary_prod); + writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); + readw(qedf->bdq_secondary_prod); + + spin_unlock_irqrestore(&qedf->hba_lock, flags); +} diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c new file mode 100644 index 000000000..91f3f1d70 --- /dev/null +++ b/drivers/scsi/qedf/qedf_main.c @@ -0,0 +1,4195 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qedf.h" +#include "qedf_dbg.h" +#include + +const struct qed_fcoe_ops *qed_ops; + +static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); +static void qedf_remove(struct pci_dev *pdev); +static void qedf_shutdown(struct pci_dev *pdev); +static void qedf_schedule_recovery_handler(void *dev); +static void qedf_recovery_handler(struct work_struct *work); +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state); + +/* + * Driver module parameters. + */ +static unsigned int qedf_dev_loss_tmo = 60; +module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO); +MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached " + "remote ports (default 60)"); + +uint qedf_debug = QEDF_LOG_INFO; +module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging" + " mask"); + +static uint qedf_fipvlan_retries = 60; +module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO); +MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt " + "before giving up (default 60)"); + +static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN; +module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO); +MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails " + "(default 1002)."); + +static int qedf_default_prio = -1; +module_param_named(default_prio, qedf_default_prio, int, S_IRUGO); +MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE" + " traffic (value between 0 and 7, default 3)."); + +uint qedf_dump_frames; +module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames " + "(default off)"); + +static uint qedf_queue_depth; +module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO); +MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered " + "by the qedf driver. Default is 0 (use OS default)."); + +uint qedf_io_tracing; +module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions " + "into trace buffer. (default off)."); + +static uint qedf_max_lun = MAX_FIBRE_LUNS; +module_param_named(max_lun, qedf_max_lun, int, S_IRUGO); +MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver " + "supports. (default 0xffffffff)"); + +uint qedf_link_down_tmo; +module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO); +MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the " + "link is down by N seconds."); + +bool qedf_retry_delay; +module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry " + "delay handling (default off)."); + +static bool qedf_dcbx_no_wait; +module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start " + "sending FIP VLAN requests on link up (Default: off)."); + +static uint qedf_dp_module; +module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO); +MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " + "qed module during probe."); + +static uint qedf_dp_level = QED_LEVEL_NOTICE; +module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO); +MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " + "during probe (0-3: 0 more verbose)."); + +static bool qedf_enable_recovery = true; +module_param_named(enable_recovery, qedf_enable_recovery, + bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware " + "interface level errors 0 = Disabled, 1 = Enabled (Default: 1)."); + +struct workqueue_struct *qedf_io_wq; + +static struct fcoe_percpu_s qedf_global; +static DEFINE_SPINLOCK(qedf_global_lock); + +static struct kmem_cache *qedf_io_work_cache; + +void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) +{ + int vlan_id_tmp = 0; + + vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); + qedf->vlan_id = vlan_id_tmp; + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Setting vlan_id=0x%04x prio=%d.\n", + vlan_id_tmp, qedf->prio); +} + +/* Returns true if we have a valid vlan, false otherwise */ +static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) +{ + + while (qedf->fipvlan_retries--) { + /* This is to catch if link goes down during fipvlan retries */ + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { + QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); + return false; + } + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); + return false; + } + + if (qedf->vlan_id > 0) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "vlan = 0x%x already set, calling ctlr_link_up.\n", + qedf->vlan_id); + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) + fcoe_ctlr_link_up(&qedf->ctlr); + return true; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Retry %d.\n", qedf->fipvlan_retries); + init_completion(&qedf->fipvlan_compl); + qedf_fcoe_send_vlan_req(qedf); + wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); + } + + return false; +} + +static void qedf_handle_link_update(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, link_update.work); + int rc; + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n", + atomic_read(&qedf->link_state)); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { + rc = qedf_initiate_fipvlan_req(qedf); + if (rc) + return; + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Link is down, resetting vlan_id.\n"); + qedf->vlan_id = 0; + return; + } + + /* + * If we get here then we never received a repsonse to our + * fip vlan request so set the vlan_id to the default and + * tell FCoE that the link is up + */ + QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " + "response, falling back to default VLAN %d.\n", + qedf_fallback_vlan); + qedf_set_vlan_id(qedf, qedf_fallback_vlan); + + /* + * Zero out data_src_addr so we'll update it with the new + * lport port_id + */ + eth_zero_addr(qedf->data_src_addr); + fcoe_ctlr_link_up(&qedf->ctlr); + } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { + /* + * If we hit here and link_down_tmo_valid is still 1 it means + * that link_down_tmo timed out so set it to 0 to make sure any + * other readers have accurate state. + */ + atomic_set(&qedf->link_down_tmo_valid, 0); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Calling fcoe_ctlr_link_down().\n"); + fcoe_ctlr_link_down(&qedf->ctlr); + if (qedf_wait_for_upload(qedf) == false) + QEDF_ERR(&qedf->dbg_ctx, + "Could not upload all sessions.\n"); + /* Reset the number of FIP VLAN retries */ + qedf->fipvlan_retries = qedf_fipvlan_retries; + } +} + +#define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1 +#define QEDF_FCOE_MAC_METHOD_FCF_MAP 2 +#define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3 +static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp) +{ + u8 *granted_mac; + struct fc_frame_header *fh = fc_frame_header_get(fp); + u8 fc_map[3]; + int method = 0; + + /* Get granted MAC address from FIP FLOGI payload */ + granted_mac = fr_cb(fp)->granted_mac; + + /* + * We set the source MAC for FCoE traffic based on the Granted MAC + * address from the switch. + * + * If granted_mac is non-zero, we used that. + * If the granted_mac is zeroed out, created the FCoE MAC based on + * the sel_fcf->fc_map and the d_id fo the FLOGI frame. + * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the + * d_id of the FLOGI frame. + */ + if (!is_zero_ether_addr(granted_mac)) { + ether_addr_copy(qedf->data_src_addr, granted_mac); + method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC; + } else if (qedf->ctlr.sel_fcf->fc_map != 0) { + hton24(fc_map, qedf->ctlr.sel_fcf->fc_map); + qedf->data_src_addr[0] = fc_map[0]; + qedf->data_src_addr[1] = fc_map[1]; + qedf->data_src_addr[2] = fc_map[2]; + qedf->data_src_addr[3] = fh->fh_d_id[0]; + qedf->data_src_addr[4] = fh->fh_d_id[1]; + qedf->data_src_addr[5] = fh->fh_d_id[2]; + method = QEDF_FCOE_MAC_METHOD_FCF_MAP; + } else { + fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id); + method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method); +} + +static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + struct qedf_ctx *qedf = lport_priv(lport); + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + return; + } + + /* + * If ERR_PTR is set then don't try to stat anything as it will cause + * a crash when we access fp. + */ + if (IS_ERR(fp)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "fp has IS_ERR() set.\n"); + goto skip_stat; + } + + /* Log stats for FLOGI reject */ + if (fc_frame_payload_op(fp) == ELS_LS_RJT) + qedf->flogi_failed++; + else if (fc_frame_payload_op(fp) == ELS_LS_ACC) { + /* Set the source MAC we will use for FCoE traffic */ + qedf_set_data_src_addr(qedf, fp); + qedf->flogi_pending = 0; + } + + /* Complete flogi_compl so we can proceed to sending ADISCs */ + complete(&qedf->flogi_compl); + +skip_stat: + /* Report response to libfc */ + fc_lport_flogi_resp(seq, fp, lport); +} + +static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct qedf_ctx *qedf = lport_priv(lport); + + /* + * Intercept FLOGI for statistic purposes. Note we use the resp + * callback to tell if this is really a flogi. + */ + if (resp == fc_lport_flogi_resp) { + qedf->flogi_cnt++; + if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { + schedule_delayed_work(&qedf->stag_work, 2); + return NULL; + } + qedf->flogi_pending++; + return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, + arg, timeout); + } + + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} + +int qedf_send_flogi(struct qedf_ctx *qedf) +{ + struct fc_lport *lport; + struct fc_frame *fp; + + lport = qedf->lport; + + if (!lport->tt.elsct_send) { + QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); + return -EINVAL; + } + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n"); + return -ENOMEM; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Sending FLOGI to reestablish session with switch.\n"); + lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, + ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov); + + init_completion(&qedf->flogi_compl); + + return 0; +} + +/* + * This function is called if link_down_tmo is in use. If we get a link up and + * link_down_tmo has not expired then use just FLOGI/ADISC to recover our + * sessions with targets. Otherwise, just call fcoe_ctlr_link_up(). + */ +static void qedf_link_recovery(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, link_recovery.work); + struct fc_lport *lport = qedf->lport; + struct fc_rport_priv *rdata; + bool rc; + int retries = 30; + int rval, i; + struct list_head rdata_login_list; + + INIT_LIST_HEAD(&rdata_login_list); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Link down tmo did not expire.\n"); + + /* + * Essentially reset the fcoe_ctlr here without affecting the state + * of the libfc structs. + */ + qedf->ctlr.state = FIP_ST_LINK_WAIT; + fcoe_ctlr_link_down(&qedf->ctlr); + + /* + * Bring the link up before we send the fipvlan request so libfcoe + * can select a new fcf in parallel + */ + fcoe_ctlr_link_up(&qedf->ctlr); + + /* Since the link when down and up to verify which vlan we're on */ + qedf->fipvlan_retries = qedf_fipvlan_retries; + rc = qedf_initiate_fipvlan_req(qedf); + /* If getting the VLAN fails, set the VLAN to the fallback one */ + if (!rc) + qedf_set_vlan_id(qedf, qedf_fallback_vlan); + + /* + * We need to wait for an FCF to be selected due to the + * fcoe_ctlr_link_up other the FLOGI will be rejected. + */ + while (retries > 0) { + if (qedf->ctlr.sel_fcf) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "FCF reselected, proceeding with FLOGI.\n"); + break; + } + msleep(500); + retries--; + } + + if (retries < 1) { + QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " + "FCF selection.\n"); + return; + } + + rval = qedf_send_flogi(qedf); + if (rval) + return; + + /* Wait for FLOGI completion before proceeding with sending ADISCs */ + i = wait_for_completion_timeout(&qedf->flogi_compl, + qedf->lport->r_a_tov); + if (i == 0) { + QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); + return; + } + + /* + * Call lport->tt.rport_login which will cause libfc to send an + * ADISC since the rport is in state ready. + */ + mutex_lock(&lport->disc.disc_mutex); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { + if (kref_get_unless_zero(&rdata->kref)) { + fc_rport_login(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + } + } + mutex_unlock(&lport->disc.disc_mutex); +} + +static void qedf_update_link_speed(struct qedf_ctx *qedf, + struct qed_link_output *link) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); + struct fc_lport *lport = qedf->lport; + + lport->link_speed = FC_PORTSPEED_UNKNOWN; + lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; + + /* Set fc_host link speed */ + switch (link->speed) { + case 10000: + lport->link_speed = FC_PORTSPEED_10GBIT; + break; + case 25000: + lport->link_speed = FC_PORTSPEED_25GBIT; + break; + case 40000: + lport->link_speed = FC_PORTSPEED_40GBIT; + break; + case 50000: + lport->link_speed = FC_PORTSPEED_50GBIT; + break; + case 100000: + lport->link_speed = FC_PORTSPEED_100GBIT; + break; + case 20000: + lport->link_speed = FC_PORTSPEED_20GBIT; + break; + default: + lport->link_speed = FC_PORTSPEED_UNKNOWN; + break; + } + + /* + * Set supported link speed by querying the supported + * capabilities of the link. + */ + + phylink_zero(sup_caps); + phylink_set(sup_caps, 10000baseT_Full); + phylink_set(sup_caps, 10000baseKX4_Full); + phylink_set(sup_caps, 10000baseR_FEC); + phylink_set(sup_caps, 10000baseCR_Full); + phylink_set(sup_caps, 10000baseSR_Full); + phylink_set(sup_caps, 10000baseLR_Full); + phylink_set(sup_caps, 10000baseLRM_Full); + phylink_set(sup_caps, 10000baseKR_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 25000baseKR_Full); + phylink_set(sup_caps, 25000baseCR_Full); + phylink_set(sup_caps, 25000baseSR_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) + lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 40000baseLR4_Full); + phylink_set(sup_caps, 40000baseKR4_Full); + phylink_set(sup_caps, 40000baseCR4_Full); + phylink_set(sup_caps, 40000baseSR4_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) + lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 50000baseKR2_Full); + phylink_set(sup_caps, 50000baseCR2_Full); + phylink_set(sup_caps, 50000baseSR2_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) + lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 100000baseKR4_Full); + phylink_set(sup_caps, 100000baseSR4_Full); + phylink_set(sup_caps, 100000baseCR4_Full); + phylink_set(sup_caps, 100000baseLR4_ER4_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) + lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 20000baseKR2_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) + lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; + + if (lport->host && lport->host->shost_data) + fc_host_supported_speeds(lport->host) = + lport->link_supported_speeds; +} + +static void qedf_bw_update(void *dev) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + struct qed_link_output link; + + /* Get the latest status of the link */ + qed_ops->common->get_link(qedf->cdev, &link); + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Ignore link update, driver getting unload.\n"); + return; + } + + if (link.link_up) { + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) + qedf_update_link_speed(qedf, &link); + else + QEDF_ERR(&qedf->dbg_ctx, + "Ignore bw update, link is down.\n"); + + } else { + QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n"); + } +} + +static void qedf_link_update(void *dev, struct qed_link_output *link) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + + /* + * Prevent race where we're removing the module and we get link update + * for qed. + */ + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Ignore link update, driver getting unload.\n"); + return; + } + + if (link->link_up) { + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { + QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, + "Ignoring link up event as link is already up.\n"); + return; + } + QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", + link->speed / 1000); + + /* Cancel any pending link down work */ + cancel_delayed_work(&qedf->link_update); + + atomic_set(&qedf->link_state, QEDF_LINK_UP); + qedf_update_link_speed(qedf, link); + + if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE || + qedf_dcbx_no_wait) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "DCBx done.\n"); + if (atomic_read(&qedf->link_down_tmo_valid) > 0) + queue_delayed_work(qedf->link_update_wq, + &qedf->link_recovery, 0); + else + queue_delayed_work(qedf->link_update_wq, + &qedf->link_update, 0); + atomic_set(&qedf->link_down_tmo_valid, 0); + } + + } else { + QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n"); + + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); + /* + * Flag that we're waiting for the link to come back up before + * informing the fcoe layer of the event. + */ + if (qedf_link_down_tmo > 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Starting link down tmo.\n"); + atomic_set(&qedf->link_down_tmo_valid, 1); + } + qedf->vlan_id = 0; + qedf_update_link_speed(qedf, link); + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + qedf_link_down_tmo * HZ); + } +} + + +static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + u8 tmp_prio; + + QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " + "prio=%d.\n", get->operational.valid, get->operational.enabled, + get->operational.app_prio.fcoe); + + if (get->operational.enabled && get->operational.valid) { + /* If DCBX was already negotiated on link up then just exit */ + if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "DCBX already set on link up.\n"); + return; + } + + atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); + + /* + * Set the 8021q priority in the following manner: + * + * 1. If a modparam is set use that + * 2. If the value is not between 0..7 use the default + * 3. Use the priority we get from the DCBX app tag + */ + tmp_prio = get->operational.app_prio.fcoe; + if (qedf_default_prio > -1) + qedf->prio = qedf_default_prio; + else if (tmp_prio > 7) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "FIP/FCoE prio %d out of range, setting to %d.\n", + tmp_prio, QEDF_DEFAULT_PRIO); + qedf->prio = QEDF_DEFAULT_PRIO; + } else + qedf->prio = tmp_prio; + + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP && + !qedf_dcbx_no_wait) { + if (atomic_read(&qedf->link_down_tmo_valid) > 0) + queue_delayed_work(qedf->link_update_wq, + &qedf->link_recovery, 0); + else + queue_delayed_work(qedf->link_update_wq, + &qedf->link_update, 0); + atomic_set(&qedf->link_down_tmo_valid, 0); + } + } + +} + +static u32 qedf_get_login_failures(void *cookie) +{ + struct qedf_ctx *qedf; + + qedf = (struct qedf_ctx *)cookie; + return qedf->flogi_failed; +} + +static struct qed_fcoe_cb_ops qedf_cb_ops = { + { + .link_update = qedf_link_update, + .bw_update = qedf_bw_update, + .schedule_recovery_handler = qedf_schedule_recovery_handler, + .dcbx_aen = qedf_dcbx_handler, + .get_generic_tlv_data = qedf_get_generic_tlv_data, + .get_protocol_tlv_data = qedf_get_protocol_tlv_data, + .schedule_hw_err_handler = qedf_schedule_hw_err_handler, + } +}; + +/* + * Various transport templates. + */ + +static struct scsi_transport_template *qedf_fc_transport_template; +static struct scsi_transport_template *qedf_fc_vport_transport_template; + +/* + * SCSI EH handlers + */ +static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_lport *lport; + struct qedf_ctx *qedf; + struct qedf_ioreq *io_req; + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct fc_rport_priv *rdata; + struct qedf_rport *fcport = NULL; + int rc = FAILED; + int wait_count = 100; + int refcount = 0; + int rval; + int got_ref = 0; + + lport = shost_priv(sc_cmd->device->host); + qedf = (struct qedf_ctx *)lport_priv(lport); + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + fcport = (struct qedf_rport *)&rp[1]; + rdata = fcport->rdata; + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { + QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); + rc = SUCCESS; + goto out; + } + + + io_req = qedf_priv(sc_cmd)->io_req; + if (!io_req) { + QEDF_ERR(&qedf->dbg_ctx, + "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n", + sc_cmd, sc_cmd->cmnd[0], + rdata->ids.port_id); + rc = SUCCESS; + goto drop_rdata_kref; + } + + rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ + if (rval) + got_ref = 1; + + /* If we got a valid io_req, confirm it belongs to this sc_cmd. */ + if (!rval || io_req->sc_cmd != sc_cmd) { + QEDF_ERR(&qedf->dbg_ctx, + "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n", + io_req->sc_cmd, sc_cmd, rdata->ids.port_id); + + goto drop_rdata_kref; + } + + if (fc_remote_port_chkready(rport)) { + refcount = kref_read(&io_req->refcount); + QEDF_ERR(&qedf->dbg_ctx, + "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n", + io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], + refcount, rdata->ids.port_id); + + goto drop_rdata_kref; + } + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + goto drop_rdata_kref; + + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Connection uploading, xid=0x%x., port_id=%06x\n", + io_req->xid, rdata->ids.port_id); + while (io_req->sc_cmd && (wait_count != 0)) { + msleep(100); + wait_count--; + } + if (wait_count) { + QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n"); + rc = SUCCESS; + } else { + QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n"); + rc = FAILED; + } + goto drop_rdata_kref; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n"); + goto drop_rdata_kref; + } + + QEDF_ERR(&qedf->dbg_ctx, + "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n", + io_req, sc_cmd, io_req->xid, io_req->fp_idx, + rdata->ids.port_id); + + if (qedf->stop_io_on_error) { + qedf_stop_all_io(qedf); + rc = SUCCESS; + goto drop_rdata_kref; + } + + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); + /* + * If we fail to queue the ABTS then return this command to + * the SCSI layer as it will own and free the xid + */ + rc = SUCCESS; + qedf_scsi_done(qedf, io_req, DID_ERROR); + goto drop_rdata_kref; + } + + wait_for_completion(&io_req->abts_done); + + if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || + io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || + io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { + /* + * If we get a reponse to the abort this is success from + * the perspective that all references to the command have + * been removed from the driver and firmware + */ + rc = SUCCESS; + } else { + /* If the abort and cleanup failed then return a failure */ + rc = FAILED; + } + + if (rc == SUCCESS) + QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n", + io_req->xid); + else + QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", + io_req->xid); + +drop_rdata_kref: + kref_put(&rdata->kref, fc_rport_destroy); +out: + if (got_ref) + kref_put(&io_req->refcount, qedf_release_cmd); + return rc; +} + +static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd) +{ + QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...", + sc_cmd->device->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun); + return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); +} + +static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ", + sc_cmd->device->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun); + return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); +} + +bool qedf_wait_for_upload(struct qedf_ctx *qedf) +{ + struct qedf_rport *fcport; + int wait_cnt = 120; + + while (wait_cnt--) { + if (atomic_read(&qedf->num_offloads)) + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Waiting for all uploads to complete num_offloads = 0x%x.\n", + atomic_read(&qedf->num_offloads)); + else + return true; + msleep(500); + } + + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + if (test_bit(QEDF_RPORT_SESSION_READY, + &fcport->flags)) { + if (fcport->rdata) + QEDF_ERR(&qedf->dbg_ctx, + "Waiting for fcport %p portid=%06x.\n", + fcport, fcport->rdata->ids.port_id); + } else { + QEDF_ERR(&qedf->dbg_ctx, + "Waiting for fcport %p.\n", fcport); + } + } + + rcu_read_unlock(); + return false; +} + +/* Performs soft reset of qedf_ctx by simulating a link down/up */ +void qedf_ctx_soft_reset(struct fc_lport *lport) +{ + struct qedf_ctx *qedf; + struct qed_link_output if_link; + + if (lport->vport) { + printk_ratelimited("Cannot issue host reset on NPIV port.\n"); + return; + } + + qedf = lport_priv(lport); + + qedf->flogi_pending = 0; + /* For host reset, essentially do a soft link up/down */ + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Queuing link down work.\n"); + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + 0); + + if (qedf_wait_for_upload(qedf) == false) { + QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); + WARN_ON(atomic_read(&qedf->num_offloads)); + } + + /* Before setting link up query physical link state */ + qed_ops->common->get_link(qedf->cdev, &if_link); + /* Bail if the physical link is not up */ + if (!if_link.link_up) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Physical link is not up.\n"); + return; + } + /* Flush and wait to make sure link down is processed */ + flush_delayed_work(&qedf->link_update); + msleep(500); + + atomic_set(&qedf->link_state, QEDF_LINK_UP); + qedf->vlan_id = 0; + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Queue link up work.\n"); + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + 0); +} + +/* Reset the host by gracefully logging out and then logging back in */ +static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport; + struct qedf_ctx *qedf; + + lport = shost_priv(sc_cmd->device->host); + qedf = lport_priv(lport); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || + test_bit(QEDF_UNLOADING, &qedf->flags)) + return FAILED; + + QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); + + qedf_ctx_soft_reset(lport); + + return SUCCESS; +} + +static int qedf_slave_configure(struct scsi_device *sdev) +{ + if (qedf_queue_depth) { + scsi_change_queue_depth(sdev, qedf_queue_depth); + } + + return 0; +} + +static const struct scsi_host_template qedf_host_template = { + .module = THIS_MODULE, + .name = QEDF_MODULE_NAME, + .this_id = -1, + .cmd_per_lun = 32, + .max_sectors = 0xffff, + .queuecommand = qedf_queuecommand, + .shost_groups = qedf_host_groups, + .eh_abort_handler = qedf_eh_abort, + .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ + .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ + .eh_host_reset_handler = qedf_eh_host_reset, + .slave_configure = qedf_slave_configure, + .dma_boundary = QED_HW_DMA_BOUNDARY, + .sg_tablesize = QEDF_MAX_BDS_PER_CMD, + .can_queue = FCOE_PARAMS_NUM_TASKS, + .change_queue_depth = scsi_change_queue_depth, + .cmd_size = sizeof(struct qedf_cmd_priv), +}; + +static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + int rc; + + spin_lock(&qedf_global_lock); + rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global); + spin_unlock(&qedf_global_lock); + + return rc; +} + +static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id) +{ + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + if (rdata->ids.port_id == port_id) { + rcu_read_unlock(); + return fcport; + } + } + rcu_read_unlock(); + + /* Return NULL to caller to let them know fcport was not found */ + return NULL; +} + +/* Transmits an ELS frame over an offloaded session */ +static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp) +{ + struct fc_frame_header *fh; + int rc = 0; + + fh = fc_frame_header_get(fp); + if ((fh->fh_type == FC_TYPE_ELS) && + (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + switch (fc_frame_payload_op(fp)) { + case ELS_ADISC: + qedf_send_adisc(fcport, fp); + rc = 1; + break; + } + } + + return rc; +} + +/* + * qedf_xmit - qedf FCoE frame transmit function + */ +static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_lport *base_lport; + struct qedf_ctx *qedf; + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + struct fcoe_hdr *hp; + u8 sof, eof; + u32 crc; + unsigned int hlen, tlen, elen; + int wlen; + struct fc_lport *tmp_lport; + struct fc_lport *vn_port = NULL; + struct qedf_rport *fcport; + int rc; + u16 vlan_tci = 0; + + qedf = (struct qedf_ctx *)lport_priv(lport); + + fh = fc_frame_header_get(fp); + skb = fp_skb(fp); + + /* Filter out traffic to other NPIV ports on the same host */ + if (lport->vport) + base_lport = shost_priv(vport_to_shost(lport->vport)); + else + base_lport = lport; + + /* Flag if the destination is the base port */ + if (base_lport->port_id == ntoh24(fh->fh_d_id)) { + vn_port = base_lport; + } else { + /* Got through the list of vports attached to the base_lport + * and see if we have a match with the destination address. + */ + list_for_each_entry(tmp_lport, &base_lport->vports, list) { + if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) { + vn_port = tmp_lport; + break; + } + } + } + if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { + struct fc_rport_priv *rdata = NULL; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); + kfree_skb(skb); + rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); + if (rdata) { + rdata->retries = lport->max_rport_retry_count; + kref_put(&rdata->kref, fc_rport_destroy); + } + return -EINVAL; + } + /* End NPIV filtering */ + + if (!qedf->ctlr.sel_fcf) { + kfree_skb(skb); + return 0; + } + + if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); + kfree_skb(skb); + return 0; + } + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n"); + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) + return 0; + } + + /* Check to see if this needs to be sent on an offloaded session */ + fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); + + if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + rc = qedf_xmit_l2_frame(fcport, fp); + /* + * If the frame was successfully sent over the middle path + * then do not try to also send it over the LL2 path + */ + if (rc) + return 0; + } + + sof = fr_sof(fp); + eof = fr_eof(fp); + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + + if (qedf_get_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); + } else { + cp = skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp); + cp = NULL; + } + + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + + /* + * Add VLAN tag to non-offload FCoE frame based on current stored VLAN + * for FIP/FCoE traffic. + */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + if (qedf->ctlr.map_dest) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); + + /* Set the source MAC address */ + ether_addr_copy(eh->h_source, qedf->data_src_addr); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /*update tx stats */ + this_cpu_inc(lport->stats->TxFrames); + this_cpu_add(lport->stats->TxWords, wlen); + + /* Get VLAN ID from skb for printing purposes */ + __vlan_hwaccel_get_tag(skb, &vlan_tci); + + /* send down to lld */ + fr_dev(fp) = lport; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: " + "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n", + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, + vlan_tci); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, skb->len, false); + rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); + if (rc) { + QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); + kfree_skb(skb); + return rc; + } + + return 0; +} + +static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) +{ + int rval = 0; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* Calculate appropriate queue and PBL sizes */ + fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); + fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE); + fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) * + sizeof(void *); + fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; + + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, + &fcport->sq_dma, GFP_KERNEL); + if (!fcport->sq) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); + rval = 1; + goto out; + } + + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + fcport->sq_pbl_size, + &fcport->sq_pbl_dma, GFP_KERNEL); + if (!fcport->sq_pbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); + rval = 1; + goto out_free_sq; + } + + /* Create PBL */ + num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; + page = fcport->sq_dma; + pbl = (u32 *)fcport->sq_pbl; + + while (num_pages--) { + *pbl = U64_LO(page); + pbl++; + *pbl = U64_HI(page); + pbl++; + page += QEDF_PAGE_SIZE; + } + + return rval; + +out_free_sq: + dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, + fcport->sq_dma); +out: + return rval; +} + +static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) +{ + if (fcport->sq_pbl) + dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, + fcport->sq_pbl, fcport->sq_pbl_dma); + if (fcport->sq) + dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, + fcport->sq, fcport->sq_dma); +} + +static int qedf_offload_connection(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + struct qed_fcoe_params_offload conn_info; + u32 port_id; + int rval; + uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection " + "portid=%06x.\n", fcport->rdata->ids.port_id); + rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle, + &fcport->fw_cid, &fcport->p_doorbell); + if (rval) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection " + "for portid=%06x.\n", fcport->rdata->ids.port_id); + rval = 1; /* For some reason qed returns 0 on failure here */ + goto out; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x " + "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id, + fcport->fw_cid, fcport->handle); + + memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload)); + + /* Fill in the offload connection info */ + conn_info.sq_pbl_addr = fcport->sq_pbl_dma; + + conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl); + conn_info.sq_next_page_addr = + (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); + + /* Need to use our FCoE MAC for the offload session */ + ether_addr_copy(conn_info.src_mac, qedf->data_src_addr); + + ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); + + conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; + conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov; + conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */ + conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; + + /* Set VLAN data */ + conn_info.vlan_tag = qedf->vlan_id << + FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT; + conn_info.vlan_tag |= + qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; + conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT); + + /* Set host port source id */ + port_id = fc_host_port_id(qedf->lport->host); + fcport->sid = port_id; + conn_info.s_id.addr_hi = (port_id & 0x000000FF); + conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8; + conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16; + + conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq; + + /* Set remote port destination id */ + port_id = fcport->rdata->rport->port_id; + conn_info.d_id.addr_hi = (port_id & 0x000000FF); + conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8; + conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16; + + conn_info.def_q_idx = 0; /* Default index for send queue? */ + + /* Set FC-TAPE specific flags if needed */ + if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, + "Enable CONF, REC for portid=%06x.\n", + fcport->rdata->ids.port_id); + conn_info.flags |= 1 << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT; + conn_info.flags |= + ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT; + } + + rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info); + if (rval) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection " + "for portid=%06x.\n", fcport->rdata->ids.port_id); + goto out_free_conn; + } else + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload " + "succeeded portid=%06x total_sqe=%d.\n", + fcport->rdata->ids.port_id, total_sqe); + + spin_lock_init(&fcport->rport_lock); + atomic_set(&fcport->free_sqes, total_sqe); + return 0; +out_free_conn: + qed_ops->release_conn(qedf->cdev, fcport->handle); +out: + return rval; +} + +#define QEDF_TERM_BUFF_SIZE 10 +static void qedf_upload_connection(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + void *term_params; + dma_addr_t term_params_dma; + + /* Term params needs to be a DMA coherent buffer as qed shared the + * physical DMA address with the firmware. The buffer may be used in + * the receive path so we may eventually have to move this. + */ + term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, + &term_params_dma, GFP_KERNEL); + if (!term_params) + return; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " + "port_id=%06x.\n", fcport->rdata->ids.port_id); + + qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma); + qed_ops->release_conn(qedf->cdev, fcport->handle); + + dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params, + term_params_dma); +} + +static void qedf_cleanup_fcport(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + struct fc_rport_priv *rdata = fcport->rdata; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", + fcport->rdata->ids.port_id); + + /* Flush any remaining i/o's before we upload the connection */ + qedf_flush_active_ios(fcport, -1); + + if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) + qedf_upload_connection(qedf, fcport); + qedf_free_sq(qedf, fcport); + fcport->rdata = NULL; + fcport->qedf = NULL; + kref_put(&rdata->kref, fc_rport_destroy); +} + +/* + * This event_callback is called after successful completion of libfc + * initiated target login. qedf can proceed with initiating the session + * establishment. + */ +static void qedf_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct qedf_ctx *qedf = lport_priv(lport); + struct fc_rport *rport = rdata->rport; + struct fc_rport_libfc_priv *rp; + struct qedf_rport *fcport; + u32 port_id; + int rval; + unsigned long flags; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, " + "port_id = 0x%x\n", event, rdata->ids.port_id); + + switch (event) { + case RPORT_EV_READY: + if (!rport) { + QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n"); + break; + } + + rp = rport->dd_data; + fcport = (struct qedf_rport *)&rp[1]; + fcport->qedf = qedf; + + if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) { + QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " + "portid=0x%x as max number of offloaded sessions " + "reached.\n", rdata->ids.port_id); + return; + } + + /* + * Don't try to offload the session again. Can happen when we + * get an ADISC + */ + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "Session already " + "offloaded, portid=0x%x.\n", + rdata->ids.port_id); + return; + } + + if (rport->port_id == FC_FID_DIR_SERV) { + /* + * qedf_rport structure doesn't exist for + * directory server. + * We should not come here, as lport will + * take care of fabric login + */ + QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " + "exist for dir server port_id=%x\n", + rdata->ids.port_id); + break; + } + + if (rdata->spp_type != FC_TYPE_FCP) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Not offloading since spp type isn't FCP\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Not FCP target so not offloading\n"); + break; + } + + /* Initial reference held on entry, so this can't fail */ + kref_get(&rdata->kref); + fcport->rdata = rdata; + fcport->rport = rport; + + rval = qedf_alloc_sq(qedf, fcport); + if (rval) { + qedf_cleanup_fcport(qedf, fcport); + break; + } + + /* Set device type */ + if (rdata->flags & FC_RP_FLAGS_RETRY && + rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && + !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { + fcport->dev_type = QEDF_RPORT_TYPE_TAPE; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "portid=%06x is a TAPE device.\n", + rdata->ids.port_id); + } else { + fcport->dev_type = QEDF_RPORT_TYPE_DISK; + } + + rval = qedf_offload_connection(qedf, fcport); + if (rval) { + qedf_cleanup_fcport(qedf, fcport); + break; + } + + /* Add fcport to list of qedf_ctx list of offloaded ports */ + spin_lock_irqsave(&qedf->hba_lock, flags); + list_add_rcu(&fcport->peers, &qedf->fcports); + spin_unlock_irqrestore(&qedf->hba_lock, flags); + + /* + * Set the session ready bit to let everyone know that this + * connection is ready for I/O + */ + set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags); + atomic_inc(&qedf->num_offloads); + + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + port_id = rdata->ids.port_id; + if (port_id == FC_FID_DIR_SERV) + break; + + if (rdata->spp_type != FC_TYPE_FCP) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "No action since spp type isn't FCP\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Not FCP target so no action\n"); + break; + } + + if (!rport) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "port_id=%x - rport notcreated Yet!!\n", port_id); + break; + } + rp = rport->dd_data; + /* + * Perform session upload. Note that rdata->peers is already + * removed from disc->rports list before we get this event. + */ + fcport = (struct qedf_rport *)&rp[1]; + + spin_lock_irqsave(&fcport->rport_lock, flags); + /* Only free this fcport if it is offloaded already */ + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) && + !test_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags)) { + set_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags); + spin_unlock_irqrestore(&fcport->rport_lock, flags); + qedf_cleanup_fcport(qedf, fcport); + /* + * Remove fcport to list of qedf_ctx list of offloaded + * ports + */ + spin_lock_irqsave(&qedf->hba_lock, flags); + list_del_rcu(&fcport->peers); + spin_unlock_irqrestore(&qedf->hba_lock, flags); + + clear_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags); + atomic_dec(&qedf->num_offloads); + } else { + spin_unlock_irqrestore(&fcport->rport_lock, flags); + } + break; + + case RPORT_EV_NONE: + break; + } +} + +static void qedf_abort_io(struct fc_lport *lport) +{ + /* NO-OP but need to fill in the template */ +} + +static void qedf_fcp_cleanup(struct fc_lport *lport) +{ + /* + * NO-OP but need to fill in template to prevent a NULL + * function pointer dereference during link down. I/Os + * will be flushed when port is uploaded. + */ +} + +static struct libfc_function_template qedf_lport_template = { + .frame_send = qedf_xmit, + .fcp_abort_io = qedf_abort_io, + .fcp_cleanup = qedf_fcp_cleanup, + .rport_event_callback = qedf_rport_event_handler, + .elsct_send = qedf_elsct_send, +}; + +static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) +{ + fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); + + qedf->ctlr.send = qedf_fip_send; + qedf->ctlr.get_src_addr = qedf_get_src_mac; + ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); +} + +static void qedf_setup_fdmi(struct qedf_ctx *qedf) +{ + struct fc_lport *lport = qedf->lport; + u8 buf[8]; + int pos; + uint32_t i; + + /* + * fdmi_enabled needs to be set for libfc + * to execute FDMI registration + */ + lport->fdmi_enabled = 1; + + /* + * Setup the necessary fc_host attributes to that will be used to fill + * in the FDMI information. + */ + + /* Get the PCI-e Device Serial Number Capability */ + pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + pos += 4; + for (i = 0; i < 8; i++) + pci_read_config_byte(qedf->pdev, pos + i, &buf[i]); + + snprintf(fc_host_serial_number(lport->host), + FC_SERIAL_NUMBER_SIZE, + "%02X%02X%02X%02X%02X%02X%02X%02X", + buf[7], buf[6], buf[5], buf[4], + buf[3], buf[2], buf[1], buf[0]); + } else + snprintf(fc_host_serial_number(lport->host), + FC_SERIAL_NUMBER_SIZE, "Unknown"); + + snprintf(fc_host_manufacturer(lport->host), + FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc."); + + if (qedf->pdev->device == QL45xxx) { + snprintf(fc_host_model(lport->host), + FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx"); + + snprintf(fc_host_model_description(lport->host), + FC_SYMBOLIC_NAME_SIZE, "%s", + "Marvell FastLinQ QL45xxx FCoE Adapter"); + } + + if (qedf->pdev->device == QL41xxx) { + snprintf(fc_host_model(lport->host), + FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx"); + + snprintf(fc_host_model_description(lport->host), + FC_SYMBOLIC_NAME_SIZE, "%s", + "Marvell FastLinQ QL41xxx FCoE Adapter"); + } + + snprintf(fc_host_hardware_version(lport->host), + FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision); + + snprintf(fc_host_driver_version(lport->host), + FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION); + + snprintf(fc_host_firmware_version(lport->host), + FC_VERSION_STRING_SIZE, "%d.%d.%d.%d", + FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, + FW_ENGINEERING_VERSION); + + snprintf(fc_host_vendor_identifier(lport->host), + FC_VENDOR_IDENTIFIER, "%s", "Marvell"); + +} + +static int qedf_lport_setup(struct qedf_ctx *qedf) +{ + struct fc_lport *lport = qedf->lport; + + lport->link_up = 0; + lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; + lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->boot_time = jiffies; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + + /* Set NPIV support */ + lport->does_npiv = 1; + fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV; + + fc_set_wwnn(lport, qedf->wwnn); + fc_set_wwpn(lport, qedf->wwpn); + + if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { + QEDF_ERR(&qedf->dbg_ctx, + "fcoe_libfc_config failed.\n"); + return -ENOMEM; + } + + /* Allocate the exchange manager */ + fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS, + 0xfffe, NULL); + + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish lport config */ + fc_lport_config(lport); + + /* Set max frame size */ + fc_set_mfs(lport, QEDF_MFS); + fc_host_maxframe_size(lport->host) = lport->mfs; + + /* Set default dev_loss_tmo based on module parameter */ + fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; + + /* Set symbolic node name */ + if (qedf->pdev->device == QL45xxx) + snprintf(fc_host_symbolic_name(lport->host), 256, + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); + + if (qedf->pdev->device == QL41xxx) + snprintf(fc_host_symbolic_name(lport->host), 256, + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); + + qedf_setup_fdmi(qedf); + + return 0; +} + +/* + * NPIV functions + */ + +static int qedf_vport_libfc_config(struct fc_vport *vport, + struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; + lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->boot_time = jiffies; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + lport->does_npiv = 1; /* Temporary until we add NPIV support */ + + /* Allocate stats for vport */ + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish lport config */ + fc_lport_config(lport); + + /* offload related configuration */ + lport->crc_offload = 0; + lport->seq_offload = 0; + lport->lro_enabled = 0; + lport->lro_xid = 0; + lport->lso_max = 0; + + return 0; +} + +static int qedf_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + struct qedf_ctx *base_qedf = lport_priv(n_port); + struct qedf_ctx *vport_qedf; + + char buf[32]; + int rc = 0; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " + "WWPN (0x%s) already exists.\n", buf); + return rc; + } + + if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " + "because link is not up.\n"); + return -EIO; + } + + vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); + if (!vn_port) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " + "for vport.\n"); + return -ENOMEM; + } + + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n", + buf); + + /* Copy some fields from base_qedf */ + vport_qedf = lport_priv(vn_port); + memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx)); + + /* Set qedf data specific to this vport */ + vport_qedf->lport = vn_port; + /* Use same hba_lock as base_qedf */ + vport_qedf->hba_lock = base_qedf->hba_lock; + vport_qedf->pdev = base_qedf->pdev; + vport_qedf->cmd_mgr = base_qedf->cmd_mgr; + init_completion(&vport_qedf->flogi_compl); + INIT_LIST_HEAD(&vport_qedf->fcports); + INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work); + + rc = qedf_vport_libfc_config(vport, vn_port); + if (rc) { + QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " + "for lport stats.\n"); + goto err; + } + + fc_set_wwnn(vn_port, vport->node_name); + fc_set_wwpn(vn_port, vport->port_name); + vport_qedf->wwnn = vn_port->wwnn; + vport_qedf->wwpn = vn_port->wwpn; + + vn_port->host->transportt = qedf_fc_vport_transport_template; + vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS; + vn_port->host->max_lun = qedf_max_lun; + vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; + vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; + vn_port->host->max_id = QEDF_MAX_SESSIONS; + + rc = scsi_add_host(vn_port->host, &vport->dev); + if (rc) { + QEDF_WARN(&base_qedf->dbg_ctx, + "Error adding Scsi_Host rc=0x%x.\n", rc); + goto err; + } + + /* Set default dev_loss_tmo based on module parameter */ + fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo; + + /* Init libfc stuffs */ + memcpy(&vn_port->tt, &qedf_lport_template, + sizeof(qedf_lport_template)); + fc_exch_init(vn_port); + fc_elsct_init(vn_port); + fc_lport_init(vn_port); + fc_disc_init(vn_port); + fc_disc_config(vn_port, vn_port); + + + /* Allocate the exchange manager */ + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + fc_exch_mgr_list_clone(n_port, vn_port); + + /* Set max frame size */ + fc_set_mfs(vn_port, QEDF_MFS); + + fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN; + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + + /* Set symbolic node name */ + if (base_qedf->pdev->device == QL45xxx) + snprintf(fc_host_symbolic_name(vn_port->host), 256, + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); + + if (base_qedf->pdev->device == QL41xxx) + snprintf(fc_host_symbolic_name(vn_port->host), 256, + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); + + /* Set supported speed */ + fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds; + + /* Set speed */ + vn_port->link_speed = n_port->link_speed; + + /* Set port type */ + fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV; + + /* Set maxframe size */ + fc_host_maxframe_size(vn_port->host) = n_port->mfs; + + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", + vn_port); + + /* Set up debug context for vport */ + vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; + vport_qedf->dbg_ctx.pdev = base_qedf->pdev; + + return 0; + +err: + scsi_host_put(vn_port->host); + return rc; +} + +static int qedf_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + struct qedf_ctx *qedf = lport_priv(vn_port); + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + goto out; + } + + /* Set unloading bit on vport qedf_ctx to prevent more I/O */ + set_bit(QEDF_UNLOADING, &qedf->flags); + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + + fc_fabric_logoff(vn_port); + fc_lport_destroy(vn_port); + + /* Detach from scsi-ml */ + fc_remove_host(vn_port->host); + scsi_remove_host(vn_port->host); + + /* + * Only try to release the exchange manager if the vn_port + * configuration is complete. + */ + if (vn_port->state == LPORT_ST_READY) + fc_exch_mgr_free(vn_port); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(vn_port); + + /* Release Scsi_Host */ + scsi_host_put(vn_port->host); + +out: + return 0; +} + +static int qedf_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + return 0; +} + +/* + * During removal we need to wait for all the vports associated with a port + * to be destroyed so we avoid a race condition where libfc is still trying + * to reap vports while the driver remove function has already reaped the + * driver contexts associated with the physical port. + */ +static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf) +{ + struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, + "Entered.\n"); + while (fc_host->npiv_vports_inuse > 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, + "Waiting for all vports to be reaped.\n"); + msleep(1000); + } +} + +/** + * qedf_fcoe_reset - Resets the fcoe + * + * @shost: shost the reset is from + * + * Returns: always 0 + */ +static int qedf_fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + + qedf_ctx_soft_reset(lport); + return 0; +} + +static void qedf_get_host_port_id(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + + fc_host_port_id(shost) = lport->port_id; +} + +static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host + *shost) +{ + struct fc_host_statistics *qedf_stats; + struct fc_lport *lport = shost_priv(shost); + struct qedf_ctx *qedf = lport_priv(lport); + struct qed_fcoe_stats *fw_fcoe_stats; + + qedf_stats = fc_get_host_stats(shost); + + /* We don't collect offload stats for specific NPIV ports */ + if (lport->vport) + goto out; + + fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); + if (!fw_fcoe_stats) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " + "fw_fcoe_stats.\n"); + goto out; + } + + mutex_lock(&qedf->stats_mutex); + + /* Query firmware for offload stats */ + qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); + + /* + * The expectation is that we add our offload stats to the stats + * being maintained by libfc each time the fc_get_host_status callback + * is invoked. The additions are not carried over for each call to + * the fc_get_host_stats callback. + */ + qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt + + fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt + + fw_fcoe_stats->fcoe_tx_other_pkt_cnt; + qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt + + fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt + + fw_fcoe_stats->fcoe_rx_other_pkt_cnt; + qedf_stats->fcp_input_megabytes += + do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000); + qedf_stats->fcp_output_megabytes += + do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000); + qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4; + qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4; + qedf_stats->invalid_crc_count += + fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt; + qedf_stats->dumped_frames = + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; + qedf_stats->error_frames += + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; + qedf_stats->fcp_input_requests += qedf->input_requests; + qedf_stats->fcp_output_requests += qedf->output_requests; + qedf_stats->fcp_control_requests += qedf->control_requests; + qedf_stats->fcp_packet_aborts += qedf->packet_aborts; + qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; + + mutex_unlock(&qedf->stats_mutex); + kfree(fw_fcoe_stats); +out: + return qedf_stats; +} + +static struct fc_function_template qedf_fc_transport_fn = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .get_host_port_id = qedf_get_host_port_id, + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + /* + * Tell FC transport to allocate enough space to store the backpointer + * for the associate qedf_rport struct. + */ + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct qedf_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = qedf_fc_get_host_stats, + .issue_fc_host_lip = qedf_fcoe_reset, + .vport_create = qedf_vport_create, + .vport_delete = qedf_vport_destroy, + .vport_disable = qedf_vport_disable, + .bsg_request = fc_lport_bsg_request, +}; + +static struct fc_function_template qedf_fc_vport_transport_fn = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct qedf_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = qedf_fcoe_reset, + .bsg_request = fc_lport_bsg_request, +}; + +static bool qedf_fp_has_work(struct qedf_fastpath *fp) +{ + struct qedf_ctx *qedf = fp->qedf; + struct global_queue *que; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + u16 prod_idx; + + /* Get the pointer to the global CQ this completion is on */ + que = qedf->global_queues[fp->sb_id]; + + /* Be sure all responses have been written to PI */ + rmb(); + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; + + return (que->cq_prod_idx != prod_idx); +} + +/* + * Interrupt handler code. + */ + +/* Process completion queue and copy CQE contents for deferred processesing + * + * Return true if we should wake the I/O thread, false if not. + */ +static bool qedf_process_completions(struct qedf_fastpath *fp) +{ + struct qedf_ctx *qedf = fp->qedf; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + struct global_queue *que; + u16 prod_idx; + struct fcoe_cqe *cqe; + struct qedf_io_work *io_work; + unsigned int cpu; + struct qedf_ioreq *io_req = NULL; + u16 xid; + u16 new_cqes; + u32 comp_type; + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; + + /* Get the pointer to the global CQ this completion is on */ + que = qedf->global_queues[fp->sb_id]; + + /* Calculate the amount of new elements since last processing */ + new_cqes = (prod_idx >= que->cq_prod_idx) ? + (prod_idx - que->cq_prod_idx) : + 0x10000 - que->cq_prod_idx + prod_idx; + + /* Save producer index */ + que->cq_prod_idx = prod_idx; + + while (new_cqes) { + fp->completions++; + cqe = &que->cq[que->cq_cons_idx]; + + comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + + /* + * Process unsolicited CQEs directly in the interrupt handler + * sine we need the fastpath ID + */ + if (comp_type == FCOE_UNSOLIC_CQE_TYPE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "Unsolicated CQE.\n"); + qedf_process_unsol_compl(qedf, fp->sb_id, cqe); + /* + * Don't add a work list item. Increment consumer + * consumer index and move on. + */ + goto inc_idx; + } + + xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; + io_req = &qedf->cmd_mgr->cmds[xid]; + + /* + * Figure out which percpu thread we should queue this I/O + * on. + */ + if (!io_req) + /* If there is not io_req assocated with this CQE + * just queue it on CPU 0 + */ + cpu = 0; + else { + cpu = io_req->cpu; + io_req->int_cpu = smp_processor_id(); + } + + io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); + if (!io_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "work for I/O completion.\n"); + continue; + } + memset(io_work, 0, sizeof(struct qedf_io_work)); + + INIT_WORK(&io_work->work, qedf_fp_io_handler); + + /* Copy contents of CQE for deferred processing */ + memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); + + io_work->qedf = fp->qedf; + io_work->fp = NULL; /* Only used for unsolicited frames */ + + queue_work_on(cpu, qedf_io_wq, &io_work->work); + +inc_idx: + que->cq_cons_idx++; + if (que->cq_cons_idx == fp->cq_num_entries) + que->cq_cons_idx = 0; + new_cqes--; + } + + return true; +} + + +/* MSI-X fastpath handler code */ +static irqreturn_t qedf_msix_handler(int irq, void *dev_id) +{ + struct qedf_fastpath *fp = dev_id; + + if (!fp) { + QEDF_ERR(NULL, "fp is null.\n"); + return IRQ_HANDLED; + } + if (!fp->sb_info) { + QEDF_ERR(NULL, "fp->sb_info in null."); + return IRQ_HANDLED; + } + + /* + * Disable interrupts for this status block while we process new + * completions + */ + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); + + while (1) { + qedf_process_completions(fp); + + if (qedf_fp_has_work(fp) == 0) { + /* Update the sb information */ + qed_sb_update_sb_idx(fp->sb_info); + + /* Check for more work */ + rmb(); + + if (qedf_fp_has_work(fp) == 0) { + /* Re-enable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + return IRQ_HANDLED; + } + } + } + + /* Do we ever want to break out of above loop? */ + return IRQ_HANDLED; +} + +/* simd handler for MSI/INTa */ +static void qedf_simd_int_handler(void *cookie) +{ + /* Cookie is qedf_ctx struct */ + struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; + + QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf); +} + +#define QEDF_SIMD_HANDLER_NUM 0 +static void qedf_sync_free_irqs(struct qedf_ctx *qedf) +{ + int i; + u16 vector_idx = 0; + u32 vector; + + if (qedf->int_info.msix_cnt) { + for (i = 0; i < qedf->int_info.used_cnt; i++) { + vector_idx = i * qedf->dev_info.common.num_hwfns + + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Freeing IRQ #%d vector_idx=%d.\n", + i, vector_idx); + vector = qedf->int_info.msix[vector_idx].vector; + synchronize_irq(vector); + irq_set_affinity_hint(vector, NULL); + irq_set_affinity_notifier(vector, NULL); + free_irq(vector, &qedf->fp_array[i]); + } + } else + qed_ops->common->simd_handler_clean(qedf->cdev, + QEDF_SIMD_HANDLER_NUM); + + qedf->int_info.used_cnt = 0; + qed_ops->common->set_fp_int(qedf->cdev, 0); +} + +static int qedf_request_msix_irq(struct qedf_ctx *qedf) +{ + int i, rc, cpu; + u16 vector_idx = 0; + u32 vector; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < qedf->num_queues; i++) { + vector_idx = i * qedf->dev_info.common.num_hwfns + + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Requesting IRQ #%d vector_idx=%d.\n", + i, vector_idx); + vector = qedf->int_info.msix[vector_idx].vector; + rc = request_irq(vector, qedf_msix_handler, 0, "qedf", + &qedf->fp_array[i]); + + if (rc) { + QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); + qedf_sync_free_irqs(qedf); + return rc; + } + + qedf->int_info.used_cnt++; + rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + return 0; +} + +static int qedf_setup_int(struct qedf_ctx *qedf) +{ + int rc = 0; + + /* + * Learn interrupt configuration + */ + rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); + if (rc <= 0) + return 0; + + rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); + if (rc) + return 0; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = " + "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt, + num_online_cpus()); + + if (qedf->int_info.msix_cnt) + return qedf_request_msix_irq(qedf); + + qed_ops->common->simd_handler_config(qedf->cdev, &qedf, + QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); + qedf->int_info.used_cnt = 1; + + QEDF_ERR(&qedf->dbg_ctx, + "Cannot load driver due to a lack of MSI-X vectors.\n"); + return -EINVAL; +} + +/* Main function for libfc frame reception */ +static void qedf_recv_frame(struct qedf_ctx *qedf, + struct sk_buff *skb) +{ + u32 fr_len; + struct fc_lport *lport; + struct fc_frame_header *fh; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + u8 *mac = NULL; + u8 *dest_mac = NULL; + struct fcoe_hdr *hp; + struct qedf_rport *fcport; + struct fc_lport *vn_port; + u32 f_ctl; + + lport = qedf->lport; + if (lport == NULL || lport->state == LPORT_ST_DISABLED) { + QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n"); + kfree_skb(skb); + return; + } + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); + mac = eth_hdr(skb)->h_source; + dest_mac = eth_hdr(skb)->h_dest; + + /* Pull the header */ + hp = (struct fcoe_hdr *)skb->data; + fh = (struct fc_frame_header *) skb_transport_header(skb); + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n"); + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n"); + kfree_skb(skb); + return; + } + + fh = fc_frame_header_get(fp); + + /* + * Invalid frame filters. + */ + + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + /* Drop FCP data. We dont this in L2 path */ + kfree_skb(skb); + return; + } + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fh->fh_type == FC_TYPE_ELS) { + switch (fc_frame_payload_op(fp)) { + case ELS_LOGO: + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + /* drop non-FIP LOGO */ + kfree_skb(skb); + return; + } + break; + } + } + + if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { + /* Drop incoming ABTS */ + kfree_skb(skb); + return; + } + + if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "FC frame d_id mismatch with MAC %pM.\n", dest_mac); + kfree_skb(skb); + return; + } + + if (qedf->ctlr.state) { + if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Wrong source address: mac:%pM dest_addr:%pM.\n", + mac, qedf->ctlr.dest_addr); + kfree_skb(skb); + return; + } + } + + vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + + /* + * If the destination ID from the frame header does not match what we + * have on record for lport and the search for a NPIV port came up + * empty then this is not addressed to our port so simply drop it. + */ + if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, + "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", + lport->port_id, ntoh24(fh->fh_d_id)); + kfree_skb(skb); + return; + } + + f_ctl = ntoh24(fh->fh_f_ctl); + if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && + (f_ctl & FC_FC_EX_CTX)) { + /* Drop incoming ABTS response that has both SEQ/EX CTX set */ + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, + "Dropping ABTS response as both SEQ/EX CTX set.\n"); + kfree_skb(skb); + return; + } + + /* + * If a connection is uploading, drop incoming FCoE frames as there + * is a small window where we could try to return a frame while libfc + * is trying to clean things up. + */ + + /* Get fcport associated with d_id if it exists */ + fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); + + if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Connection uploading, dropping fp=%p.\n", fp); + kfree_skb(skb); + return; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: " + "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp, + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, + fh->fh_type); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, skb->len, false); + fc_exch_recv(lport, fp); +} + +static void qedf_ll2_process_skb(struct work_struct *work) +{ + struct qedf_skb_work *skb_work = + container_of(work, struct qedf_skb_work, work); + struct qedf_ctx *qedf = skb_work->qedf; + struct sk_buff *skb = skb_work->skb; + struct ethhdr *eh; + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL\n"); + goto err_out; + } + + eh = (struct ethhdr *)skb->data; + + /* Undo VLAN encapsulation */ + if (eh->h_proto == htons(ETH_P_8021Q)) { + memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); + eh = skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + + /* + * Process either a FIP frame or FCoE frame based on the + * protocol value. If it's not either just drop the + * frame. + */ + if (eh->h_proto == htons(ETH_P_FIP)) { + qedf_fip_recv(qedf, skb); + goto out; + } else if (eh->h_proto == htons(ETH_P_FCOE)) { + __skb_pull(skb, ETH_HLEN); + qedf_recv_frame(qedf, skb); + goto out; + } else + goto err_out; + +err_out: + kfree_skb(skb); +out: + kfree(skb_work); + return; +} + +static int qedf_ll2_rx(void *cookie, struct sk_buff *skb, + u32 arg1, u32 arg2) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; + struct qedf_skb_work *skb_work; + + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, + "Dropping frame as link state is down.\n"); + kfree_skb(skb); + return 0; + } + + skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC); + if (!skb_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " + "dropping frame.\n"); + kfree_skb(skb); + return 0; + } + + INIT_WORK(&skb_work->work, qedf_ll2_process_skb); + skb_work->skb = skb; + skb_work->qedf = qedf; + queue_work(qedf->ll2_recv_wq, &skb_work->work); + + return 0; +} + +static struct qed_ll2_cb_ops qedf_ll2_cb_ops = { + .rx_cb = qedf_ll2_rx, + .tx_cb = NULL, +}; + +/* Main thread to process I/O completions */ +void qedf_fp_io_handler(struct work_struct *work) +{ + struct qedf_io_work *io_work = + container_of(work, struct qedf_io_work, work); + u32 comp_type; + + /* + * Deferred part of unsolicited CQE sends + * frame to libfc. + */ + comp_type = (io_work->cqe.cqe_data >> + FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + if (comp_type == FCOE_UNSOLIC_CQE_TYPE && + io_work->fp) + fc_exch_recv(io_work->qedf->lport, io_work->fp); + else + qedf_process_cqe(io_work->qedf, &io_work->cqe); + + kfree(io_work); +} + +static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int ret; + + sb_virt = dma_alloc_coherent(&qedf->pdev->dev, + sizeof(struct status_block), &sb_phys, GFP_KERNEL); + + if (!sb_virt) { + QEDF_ERR(&qedf->dbg_ctx, + "Status block allocation failed for id = %d.\n", + sb_id); + return -ENOMEM; + } + + ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys, + sb_id, QED_SB_TYPE_STORAGE); + + if (ret) { + QEDF_ERR(&qedf->dbg_ctx, + "Status block initialization failed (0x%x) for id = %d.\n", + ret, sb_id); + return ret; + } + + return 0; +} + +static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info) +{ + if (sb_info->sb_virt) + dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, sb_info->sb_phys); +} + +static void qedf_destroy_sb(struct qedf_ctx *qedf) +{ + int id; + struct qedf_fastpath *fp = NULL; + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + if (fp->sb_id == QEDF_SB_ID_NULL) + break; + qedf_free_sb(qedf, fp->sb_info); + kfree(fp->sb_info); + } + kfree(qedf->fp_array); +} + +static int qedf_prepare_sb(struct qedf_ctx *qedf) +{ + int id; + struct qedf_fastpath *fp; + int ret; + + qedf->fp_array = + kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath), + GFP_KERNEL); + + if (!qedf->fp_array) { + QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation " + "failed.\n"); + return -ENOMEM; + } + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + fp->sb_id = QEDF_SB_ID_NULL; + fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); + if (!fp->sb_info) { + QEDF_ERR(&(qedf->dbg_ctx), "SB info struct " + "allocation failed.\n"); + goto err; + } + ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id); + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and " + "initialization failed.\n"); + goto err; + } + fp->sb_id = id; + fp->qedf = qedf; + fp->cq_num_entries = + qedf->global_queues[id]->cq_mem_size / + sizeof(struct fcoe_cqe); + } +err: + return 0; +} + +void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) +{ + u16 xid; + struct qedf_ioreq *io_req; + struct qedf_rport *fcport; + u32 comp_type; + u8 io_comp_type; + unsigned long flags; + + comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + + xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; + io_req = &qedf->cmd_mgr->cmds[xid]; + + /* Completion not for a valid I/O anymore so just return */ + if (!io_req) { + QEDF_ERR(&qedf->dbg_ctx, + "io_req is NULL for xid=0x%x.\n", xid); + return; + } + + fcport = io_req->fcport; + + if (fcport == NULL) { + QEDF_ERR(&qedf->dbg_ctx, + "fcport is NULL for xid=0x%x io_req=%p.\n", + xid, io_req); + return; + } + + /* + * Check that fcport is offloaded. If it isn't then the spinlock + * isn't valid and shouldn't be taken. We should just return. + */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Session not offloaded yet, fcport = %p.\n", fcport); + return; + } + + spin_lock_irqsave(&fcport->rport_lock, flags); + io_comp_type = io_req->cmd_type; + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + switch (comp_type) { + case FCOE_GOOD_COMPLETION_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + switch (io_comp_type) { + case QEDF_SCSI_CMD: + qedf_scsi_completion(qedf, cqe, io_req); + break; + case QEDF_ELS: + qedf_process_els_compl(qedf, cqe, io_req); + break; + case QEDF_TASK_MGMT_CMD: + qedf_process_tmf_compl(qedf, cqe, io_req); + break; + case QEDF_SEQ_CLEANUP: + qedf_process_seq_cleanup_compl(qedf, cqe, io_req); + break; + } + break; + case FCOE_ERROR_DETECTION_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Error detect CQE.\n"); + qedf_process_error_detect(qedf, cqe, io_req); + break; + case FCOE_EXCH_CLEANUP_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Cleanup CQE.\n"); + qedf_process_cleanup_compl(qedf, cqe, io_req); + break; + case FCOE_ABTS_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Abort CQE.\n"); + qedf_process_abts_compl(qedf, cqe, io_req); + break; + case FCOE_DUMMY_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Dummy CQE.\n"); + break; + case FCOE_LOCAL_COMP_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Local completion CQE.\n"); + break; + case FCOE_WARNING_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Warning CQE.\n"); + qedf_process_warning_compl(qedf, cqe, io_req); + break; + case MAX_FCOE_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Max FCoE CQE.\n"); + break; + default: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Default CQE.\n"); + break; + } +} + +static void qedf_free_bdq(struct qedf_ctx *qedf) +{ + int i; + + if (qedf->bdq_pbl_list) + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma); + + if (qedf->bdq_pbl) + dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, + qedf->bdq_pbl, qedf->bdq_pbl_dma); + + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + if (qedf->bdq[i].buf_addr) { + dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, + qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma); + } + } +} + +static void qedf_free_global_queues(struct qedf_ctx *qedf) +{ + int i; + struct global_queue **gl = qedf->global_queues; + + for (i = 0; i < qedf->num_queues; i++) { + if (!gl[i]) + continue; + + if (gl[i]->cq) + dma_free_coherent(&qedf->pdev->dev, + gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); + if (gl[i]->cq_pbl) + dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, + gl[i]->cq_pbl, gl[i]->cq_pbl_dma); + + kfree(gl[i]); + } + + qedf_free_bdq(qedf); +} + +static int qedf_alloc_bdq(struct qedf_ctx *qedf) +{ + int i; + struct scsi_bd *pbl; + u64 *list; + + /* Alloc dma memory for BDQ buffers */ + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL); + if (!qedf->bdq[i].buf_addr) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ " + "buffer %d.\n", i); + return -ENOMEM; + } + } + + /* Alloc dma memory for BDQ page buffer list */ + qedf->bdq_pbl_mem_size = + QEDF_BDQ_SIZE * sizeof(struct scsi_bd); + qedf->bdq_pbl_mem_size = + ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE); + + qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL); + if (!qedf->bdq_pbl) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n"); + return -ENOMEM; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "BDQ PBL addr=0x%p dma=%pad\n", + qedf->bdq_pbl, &qedf->bdq_pbl_dma); + + /* + * Populate BDQ PBL with physical and virtual address of individual + * BDQ buffers + */ + pbl = (struct scsi_bd *)qedf->bdq_pbl; + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); + pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); + pbl->opaque.fcoe_opaque.hi = 0; + /* Opaque lo data is an index into the BDQ array */ + pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i); + pbl++; + } + + /* Allocate list of PBL pages */ + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_PAGE_SIZE, + &qedf->bdq_pbl_list_dma, + GFP_KERNEL); + if (!qedf->bdq_pbl_list) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); + return -ENOMEM; + } + + /* + * Now populate PBL list with pages that contain pointers to the + * individual buffers. + */ + qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / + QEDF_PAGE_SIZE; + list = (u64 *)qedf->bdq_pbl_list; + for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { + *list = qedf->bdq_pbl_dma; + list++; + } + + return 0; +} + +static int qedf_alloc_global_queues(struct qedf_ctx *qedf) +{ + u32 *list; + int i; + int status; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* Allocate and map CQs, RQs */ + /* + * Number of global queues (CQ / RQ). This should + * be <= number of available MSIX vectors for the PF + */ + if (!qedf->num_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); + return -ENOMEM; + } + + /* + * Make sure we allocated the PBL that will contain the physical + * addresses of our queues + */ + if (!qedf->p_cpuq) { + QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); + return -EINVAL; + } + + qedf->global_queues = kzalloc((sizeof(struct global_queue *) + * qedf->num_queues), GFP_KERNEL); + if (!qedf->global_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global " + "queues array ptr memory\n"); + return -ENOMEM; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "qedf->global_queues=%p.\n", qedf->global_queues); + + /* Allocate DMA coherent buffers for BDQ */ + status = qedf_alloc_bdq(qedf); + if (status) { + QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); + goto mem_alloc_failure; + } + + /* Allocate a CQ and an associated PBL for each MSI-X vector */ + for (i = 0; i < qedf->num_queues; i++) { + qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), + GFP_KERNEL); + if (!qedf->global_queues[i]) { + QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate " + "global queue %d.\n", i); + status = -ENOMEM; + goto mem_alloc_failure; + } + + qedf->global_queues[i]->cq_mem_size = + FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); + qedf->global_queues[i]->cq_mem_size = + ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE); + + qedf->global_queues[i]->cq_pbl_size = + (qedf->global_queues[i]->cq_mem_size / + PAGE_SIZE) * sizeof(void *); + qedf->global_queues[i]->cq_pbl_size = + ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); + + qedf->global_queues[i]->cq = + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_mem_size, + &qedf->global_queues[i]->cq_dma, + GFP_KERNEL); + + if (!qedf->global_queues[i]->cq) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + + qedf->global_queues[i]->cq_pbl = + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_pbl_size, + &qedf->global_queues[i]->cq_pbl_dma, + GFP_KERNEL); + + if (!qedf->global_queues[i]->cq_pbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + + /* Create PBL */ + num_pages = qedf->global_queues[i]->cq_mem_size / + QEDF_PAGE_SIZE; + page = qedf->global_queues[i]->cq_dma; + pbl = (u32 *)qedf->global_queues[i]->cq_pbl; + + while (num_pages--) { + *pbl = U64_LO(page); + pbl++; + *pbl = U64_HI(page); + pbl++; + page += QEDF_PAGE_SIZE; + } + /* Set the initial consumer index for cq */ + qedf->global_queues[i]->cq_cons_idx = 0; + } + + list = (u32 *)qedf->p_cpuq; + + /* + * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, + * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points + * to the physical address which contains an array of pointers to + * the physical addresses of the specific queue pages. + */ + for (i = 0; i < qedf->num_queues; i++) { + *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma); + list++; + *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma); + list++; + *list = U64_LO(0); + list++; + *list = U64_HI(0); + list++; + } + + return 0; + +mem_alloc_failure: + qedf_free_global_queues(qedf); + return status; +} + +static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) +{ + u8 sq_num_pbl_pages; + u32 sq_mem_size; + u32 cq_mem_size; + u32 cq_num_entries; + int rval; + + /* + * The number of completion queues/fastpath interrupts/status blocks + * we allocation is the minimum off: + * + * Number of CPUs + * Number allocated by qed for our PCI function + */ + qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", + qedf->num_queues); + + qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev, + qedf->num_queues * sizeof(struct qedf_glbl_q_params), + &qedf->hw_p_cpuq, GFP_KERNEL); + + if (!qedf->p_cpuq) { + QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n"); + return 1; + } + + rval = qedf_alloc_global_queues(qedf); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation " + "failed.\n"); + return 1; + } + + /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */ + sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); + sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE); + sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE); + + /* Calculate CQ num entries */ + cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); + cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE); + cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe); + + memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params)); + + /* Setup the value for fcoe PF */ + qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; + qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS; + qedf->pf_params.fcoe_pf_params.glbl_q_params_addr = + (u64)qedf->hw_p_cpuq; + qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages; + + qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0; + + qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries; + qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues; + + /* log_page_size: 12 for 4KB pages */ + qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE); + + qedf->pf_params.fcoe_pf_params.mtu = 9000; + qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI; + qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI; + + /* BDQ address and size */ + qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] = + qedf->bdq_pbl_list_dma; + qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] = + qedf->bdq_pbl_list_num_entries; + qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n", + qedf->bdq_pbl_list, + qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0], + qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "cq_num_entries=%d.\n", + qedf->pf_params.fcoe_pf_params.cq_num_entries); + + return 0; +} + +/* Free DMA coherent memory for array of queue pointers we pass to qed */ +static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf) +{ + size_t size = 0; + + if (qedf->p_cpuq) { + size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); + dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq, + qedf->hw_p_cpuq); + } + + qedf_free_global_queues(qedf); + + kfree(qedf->global_queues); +} + +/* + * PCI driver functions + */ + +static const struct pci_device_id qedf_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) }, + {0} +}; +MODULE_DEVICE_TABLE(pci, qedf_pci_tbl); + +static struct pci_driver qedf_pci_driver = { + .name = QEDF_MODULE_NAME, + .id_table = qedf_pci_tbl, + .probe = qedf_probe, + .remove = qedf_remove, + .shutdown = qedf_shutdown, + .suspend = qedf_suspend, +}; + +static int __qedf_probe(struct pci_dev *pdev, int mode) +{ + int rc = -EINVAL; + struct fc_lport *lport; + struct qedf_ctx *qedf = NULL; + struct Scsi_Host *host; + bool is_vf = false; + struct qed_ll2_params params; + char host_buf[20]; + struct qed_link_params link_params; + int status; + void *task_start, *task_end; + struct qed_slowpath_params slowpath_params; + struct qed_probe_params qed_params; + u16 retry_cnt = 10; + + /* + * When doing error recovery we didn't reap the lport so don't try + * to reallocate it. + */ +retry_probe: + if (mode == QEDF_MODE_RECOVERY) + msleep(2000); + + if (mode != QEDF_MODE_RECOVERY) { + lport = libfc_host_alloc(&qedf_host_template, + sizeof(struct qedf_ctx)); + + if (!lport) { + QEDF_ERR(NULL, "Could not allocate lport.\n"); + rc = -ENOMEM; + goto err0; + } + + fc_disc_init(lport); + + /* Initialize qedf_ctx */ + qedf = lport_priv(lport); + set_bit(QEDF_PROBING, &qedf->flags); + qedf->lport = lport; + qedf->ctlr.lp = lport; + qedf->pdev = pdev; + qedf->dbg_ctx.pdev = pdev; + qedf->dbg_ctx.host_no = lport->host->host_no; + spin_lock_init(&qedf->hba_lock); + INIT_LIST_HEAD(&qedf->fcports); + qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1; + atomic_set(&qedf->num_offloads, 0); + qedf->stop_io_on_error = false; + pci_set_drvdata(pdev, qedf); + init_completion(&qedf->fipvlan_compl); + mutex_init(&qedf->stats_mutex); + mutex_init(&qedf->flush_mutex); + qedf->flogi_pending = 0; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, + "QLogic FastLinQ FCoE Module qedf %s, " + "FW %d.%d.%d.%d\n", QEDF_VERSION, + FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, + FW_ENGINEERING_VERSION); + } else { + /* Init pointers during recovery */ + qedf = pci_get_drvdata(pdev); + set_bit(QEDF_PROBING, &qedf->flags); + lport = qedf->lport; + } + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); + + host = lport->host; + + /* Allocate mempool for qedf_io_work structs */ + qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN, + qedf_io_work_cache); + if (qedf->io_mempool == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n"); + goto err1; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", + qedf->io_mempool); + + sprintf(host_buf, "qedf_%u_link", + qedf->lport->host->host_no); + qedf->link_update_wq = create_workqueue(host_buf); + INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); + INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); + INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); + INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work); + qedf->fipvlan_retries = qedf_fipvlan_retries; + /* Set a default prio in case DCBX doesn't converge */ + if (qedf_default_prio > -1) { + /* + * This is the case where we pass a modparam in so we want to + * honor it even if dcbx doesn't converge. + */ + qedf->prio = qedf_default_prio; + } else + qedf->prio = QEDF_DEFAULT_PRIO; + + /* + * Common probe. Takes care of basic hardware init and pci_* + * functions. + */ + memset(&qed_params, 0, sizeof(qed_params)); + qed_params.protocol = QED_PROTOCOL_FCOE; + qed_params.dp_module = qedf_dp_module; + qed_params.dp_level = qedf_dp_level; + qed_params.is_vf = is_vf; + qedf->cdev = qed_ops->common->probe(pdev, &qed_params); + if (!qedf->cdev) { + if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) { + QEDF_ERR(&qedf->dbg_ctx, + "Retry %d initialize hardware\n", retry_cnt); + retry_cnt--; + goto retry_probe; + } + QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); + rc = -ENODEV; + goto err1; + } + + /* Learn information crucial for qedf to progress */ + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); + goto err1; + } + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", + qedf->dev_info.common.num_hwfns, + qed_ops->common->get_affin_hwfn_idx(qedf->cdev)); + + /* queue allocation code should come here + * order should be + * slowpath_start + * status block allocation + * interrupt registration (to get min number of queues) + * set_fcoe_pf_param + * qed_sp_fcoe_func_start + */ + rc = qedf_set_fcoe_pf_param(qedf); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n"); + goto err2; + } + qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); + + /* Learn information crucial for qedf to progress */ + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); + if (rc) { + QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n"); + goto err2; + } + + if (mode != QEDF_MODE_RECOVERY) { + qedf->devlink = qed_ops->common->devlink_register(qedf->cdev); + if (IS_ERR(qedf->devlink)) { + QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n"); + rc = PTR_ERR(qedf->devlink); + qedf->devlink = NULL; + goto err2; + } + } + + /* Record BDQ producer doorbell addresses */ + qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; + qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod, + qedf->bdq_secondary_prod); + + qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf); + + rc = qedf_prepare_sb(qedf); + if (rc) { + + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); + goto err2; + } + + /* Start the Slowpath-process */ + slowpath_params.int_mode = QED_INT_MODE_MSIX; + slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; + slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; + slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; + slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; + strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); + rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); + goto err2; + } + + /* + * update_pf_params needs to be called before and after slowpath + * start + */ + qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); + + /* Setup interrupts */ + rc = qedf_setup_int(qedf); + if (rc) { + QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); + goto err3; + } + + rc = qed_ops->start(qedf->cdev, &qedf->tasks); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n"); + goto err4; + } + task_start = qedf_get_task_mem(&qedf->tasks, 0); + task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, " + "end=%p block_size=%u.\n", task_start, task_end, + qedf->tasks.size); + + /* + * We need to write the number of BDs in the BDQ we've preallocated so + * the f/w will do a prefetch and we'll get an unsolicited CQE when a + * packet arrives. + */ + qedf->bdq_prod_idx = QEDF_BDQ_SIZE; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Writing %d to primary and secondary BDQ doorbell registers.\n", + qedf->bdq_prod_idx); + writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); + readw(qedf->bdq_primary_prod); + writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); + readw(qedf->bdq_secondary_prod); + + qed_ops->common->set_power_state(qedf->cdev, PCI_D0); + + /* Now that the dev_info struct has been filled in set the MAC + * address + */ + ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", + qedf->mac); + + /* + * Set the WWNN and WWPN in the following way: + * + * If the info we get from qed is non-zero then use that to set the + * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based + * on the MAC address. + */ + if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Setting WWPN and WWNN from qed dev_info.\n"); + qedf->wwnn = qedf->dev_info.wwnn; + qedf->wwpn = qedf->dev_info.wwpn; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n"); + qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); + qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " + "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); + + sprintf(host_buf, "host_%d", host->host_no); + qed_ops->common->set_name(qedf->cdev, host_buf); + + /* Allocate cmd mgr */ + qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); + if (!qedf->cmd_mgr) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); + rc = -ENOMEM; + goto err5; + } + + if (mode != QEDF_MODE_RECOVERY) { + host->transportt = qedf_fc_transport_template; + host->max_lun = qedf_max_lun; + host->max_cmd_len = QEDF_MAX_CDB_LEN; + host->max_id = QEDF_MAX_SESSIONS; + host->can_queue = FCOE_PARAMS_NUM_TASKS; + rc = scsi_add_host(host, &pdev->dev); + if (rc) { + QEDF_WARN(&qedf->dbg_ctx, + "Error adding Scsi_Host rc=0x%x.\n", rc); + goto err6; + } + } + + memset(¶ms, 0, sizeof(params)); + params.mtu = QEDF_LL2_BUF_SIZE; + ether_addr_copy(params.ll2_mac_address, qedf->mac); + + /* Start LL2 processing thread */ + snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); + qedf->ll2_recv_wq = + create_workqueue(host_buf); + if (!qedf->ll2_recv_wq) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); + rc = -ENOMEM; + goto err7; + } + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops, + qedf_dbg_fops); +#endif + + /* Start LL2 */ + qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf); + rc = qed_ops->ll2->start(qedf->cdev, ¶ms); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n"); + goto err7; + } + set_bit(QEDF_LL2_STARTED, &qedf->flags); + + /* Set initial FIP/FCoE VLAN to NULL */ + qedf->vlan_id = 0; + + /* + * No need to setup fcoe_ctlr or fc_lport objects during recovery since + * they were not reaped during the unload process. + */ + if (mode != QEDF_MODE_RECOVERY) { + /* Setup imbedded fcoe controller */ + qedf_fcoe_ctlr_setup(qedf); + + /* Setup lport */ + rc = qedf_lport_setup(qedf); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "qedf_lport_setup failed.\n"); + goto err7; + } + } + + sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); + qedf->timer_work_queue = + create_workqueue(host_buf); + if (!qedf->timer_work_queue) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " + "workqueue.\n"); + rc = -ENOMEM; + goto err7; + } + + /* DPC workqueue is not reaped during recovery unload */ + if (mode != QEDF_MODE_RECOVERY) { + sprintf(host_buf, "qedf_%u_dpc", + qedf->lport->host->host_no); + qedf->dpc_wq = create_workqueue(host_buf); + } + INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); + + /* + * GRC dump and sysfs parameters are not reaped during the recovery + * unload process. + */ + if (mode != QEDF_MODE_RECOVERY) { + qedf->grcdump_size = + qed_ops->common->dbg_all_data_size(qedf->cdev); + if (qedf->grcdump_size) { + rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, + qedf->grcdump_size); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "GRC Dump buffer alloc failed.\n"); + qedf->grcdump = NULL; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "grcdump: addr=%p, size=%u.\n", + qedf->grcdump, qedf->grcdump_size); + } + qedf_create_sysfs_ctx_attr(qedf); + + /* Initialize I/O tracing for this adapter */ + spin_lock_init(&qedf->io_trace_lock); + qedf->io_trace_idx = 0; + } + + init_completion(&qedf->flogi_compl); + + status = qed_ops->common->update_drv_state(qedf->cdev, true); + if (status) + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to send drv state to MFW.\n"); + + memset(&link_params, 0, sizeof(struct qed_link_params)); + link_params.link_up = true; + status = qed_ops->common->set_link(qedf->cdev, &link_params); + if (status) + QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n"); + + /* Start/restart discovery */ + if (mode == QEDF_MODE_RECOVERY) + fcoe_ctlr_link_up(&qedf->ctlr); + else + fc_fabric_login(lport); + + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); + + clear_bit(QEDF_PROBING, &qedf->flags); + + /* All good */ + return 0; + +err7: + if (qedf->ll2_recv_wq) + destroy_workqueue(qedf->ll2_recv_wq); + fc_remove_host(qedf->lport->host); + scsi_remove_host(qedf->lport->host); +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_exit(&(qedf->dbg_ctx)); +#endif +err6: + qedf_cmd_mgr_free(qedf->cmd_mgr); +err5: + qed_ops->stop(qedf->cdev); +err4: + qedf_free_fcoe_pf_param(qedf); + qedf_sync_free_irqs(qedf); +err3: + qed_ops->common->slowpath_stop(qedf->cdev); +err2: + qed_ops->common->remove(qedf->cdev); +err1: + scsi_host_put(lport->host); +err0: + return rc; +} + +static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return __qedf_probe(pdev, QEDF_MODE_NORMAL); +} + +static void __qedf_remove(struct pci_dev *pdev, int mode) +{ + struct qedf_ctx *qedf; + int rc; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return; + } + + qedf = pci_get_drvdata(pdev); + + /* + * Prevent race where we're in board disable work and then try to + * rmmod the module. + */ + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n"); + return; + } + + if (mode != QEDF_MODE_RECOVERY) + set_bit(QEDF_UNLOADING, &qedf->flags); + + /* Logoff the fabric to upload all connections */ + if (mode == QEDF_MODE_RECOVERY) + fcoe_ctlr_link_down(&qedf->ctlr); + else + fc_fabric_logoff(qedf->lport); + + if (!qedf_wait_for_upload(qedf)) + QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_exit(&(qedf->dbg_ctx)); +#endif + + /* Stop any link update handling */ + cancel_delayed_work_sync(&qedf->link_update); + destroy_workqueue(qedf->link_update_wq); + qedf->link_update_wq = NULL; + + if (qedf->timer_work_queue) + destroy_workqueue(qedf->timer_work_queue); + + /* Stop Light L2 */ + clear_bit(QEDF_LL2_STARTED, &qedf->flags); + qed_ops->ll2->stop(qedf->cdev); + if (qedf->ll2_recv_wq) + destroy_workqueue(qedf->ll2_recv_wq); + + /* Stop fastpath */ + qedf_sync_free_irqs(qedf); + qedf_destroy_sb(qedf); + + /* + * During recovery don't destroy OS constructs that represent the + * physical port. + */ + if (mode != QEDF_MODE_RECOVERY) { + qedf_free_grc_dump_buf(&qedf->grcdump); + qedf_remove_sysfs_ctx_attr(qedf); + + /* Remove all SCSI/libfc/libfcoe structures */ + fcoe_ctlr_destroy(&qedf->ctlr); + fc_lport_destroy(qedf->lport); + fc_remove_host(qedf->lport->host); + scsi_remove_host(qedf->lport->host); + } + + qedf_cmd_mgr_free(qedf->cmd_mgr); + + if (mode != QEDF_MODE_RECOVERY) { + fc_exch_mgr_free(qedf->lport); + fc_lport_free_stats(qedf->lport); + + /* Wait for all vports to be reaped */ + qedf_wait_for_vport_destroy(qedf); + } + + /* + * Now that all connections have been uploaded we can stop the + * rest of the qed operations + */ + qed_ops->stop(qedf->cdev); + + if (mode != QEDF_MODE_RECOVERY) { + if (qedf->dpc_wq) { + /* Stop general DPC handling */ + destroy_workqueue(qedf->dpc_wq); + qedf->dpc_wq = NULL; + } + } + + /* Final shutdown for the board */ + qedf_free_fcoe_pf_param(qedf); + if (mode != QEDF_MODE_RECOVERY) { + qed_ops->common->set_power_state(qedf->cdev, PCI_D0); + pci_set_drvdata(pdev, NULL); + } + + rc = qed_ops->common->update_drv_state(qedf->cdev, false); + if (rc) + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to send drv state to MFW.\n"); + + if (mode != QEDF_MODE_RECOVERY && qedf->devlink) { + qed_ops->common->devlink_unregister(qedf->devlink); + qedf->devlink = NULL; + } + + qed_ops->common->slowpath_stop(qedf->cdev); + qed_ops->common->remove(qedf->cdev); + + mempool_destroy(qedf->io_mempool); + + /* Only reap the Scsi_host on a real removal */ + if (mode != QEDF_MODE_RECOVERY) + scsi_host_put(qedf->lport->host); +} + +static void qedf_remove(struct pci_dev *pdev) +{ + /* Check to make sure this function wasn't already disabled */ + if (!atomic_read(&pdev->enable_cnt)) + return; + + __qedf_remove(pdev, QEDF_MODE_NORMAL); +} + +void qedf_wq_grcdump(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, grcdump_work.work); + + QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n"); + qedf_capture_grc_dump(qedf); +} + +void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type) +{ + struct qedf_ctx *qedf = dev; + + QEDF_ERR(&(qedf->dbg_ctx), + "Hardware error handler scheduled, event=%d.\n", + err_type); + + if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), + "Already in recovery, not scheduling board disable work.\n"); + return; + } + + switch (err_type) { + case QED_HW_ERR_FAN_FAIL: + schedule_delayed_work(&qedf->board_disable_work, 0); + break; + case QED_HW_ERR_MFW_RESP_FAIL: + case QED_HW_ERR_HW_ATTN: + case QED_HW_ERR_DMAE_FAIL: + case QED_HW_ERR_FW_ASSERT: + /* Prevent HW attentions from being reasserted */ + qed_ops->common->attn_clr_enable(qedf->cdev, true); + break; + case QED_HW_ERR_RAMROD_FAIL: + /* Prevent HW attentions from being reasserted */ + qed_ops->common->attn_clr_enable(qedf->cdev, true); + + if (qedf_enable_recovery && qedf->devlink) + qed_ops->common->report_fatal_error(qedf->devlink, + err_type); + + break; + default: + break; + } +} + +/* + * Protocol TLV handler + */ +void qedf_get_protocol_tlv_data(void *dev, void *data) +{ + struct qedf_ctx *qedf = dev; + struct qed_mfw_tlv_fcoe *fcoe = data; + struct fc_lport *lport; + struct Scsi_Host *host; + struct fc_host_attrs *fc_host; + struct fc_host_statistics *hst; + + if (!qedf) { + QEDF_ERR(NULL, "qedf is null.\n"); + return; + } + + if (test_bit(QEDF_PROBING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); + return; + } + + lport = qedf->lport; + host = lport->host; + fc_host = shost_to_fc_host(host); + + /* Force a refresh of the fc_host stats including offload stats */ + hst = qedf_fc_get_host_stats(host); + + fcoe->qos_pri_set = true; + fcoe->qos_pri = 3; /* Hard coded to 3 in driver */ + + fcoe->ra_tov_set = true; + fcoe->ra_tov = lport->r_a_tov; + + fcoe->ed_tov_set = true; + fcoe->ed_tov = lport->e_d_tov; + + fcoe->npiv_state_set = true; + fcoe->npiv_state = 1; /* NPIV always enabled */ + + fcoe->num_npiv_ids_set = true; + fcoe->num_npiv_ids = fc_host->npiv_vports_inuse; + + /* Certain attributes we only want to set if we've selected an FCF */ + if (qedf->ctlr.sel_fcf) { + fcoe->switch_name_set = true; + u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name); + } + + fcoe->port_state_set = true; + /* For qedf we're either link down or fabric attach */ + if (lport->link_up) + fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC; + else + fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE; + + fcoe->link_failures_set = true; + fcoe->link_failures = (u16)hst->link_failure_count; + + fcoe->fcoe_txq_depth_set = true; + fcoe->fcoe_rxq_depth_set = true; + fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS; + fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS; + + fcoe->fcoe_rx_frames_set = true; + fcoe->fcoe_rx_frames = hst->rx_frames; + + fcoe->fcoe_tx_frames_set = true; + fcoe->fcoe_tx_frames = hst->tx_frames; + + fcoe->fcoe_rx_bytes_set = true; + fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000; + + fcoe->fcoe_tx_bytes_set = true; + fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000; + + fcoe->crc_count_set = true; + fcoe->crc_count = hst->invalid_crc_count; + + fcoe->tx_abts_set = true; + fcoe->tx_abts = hst->fcp_packet_aborts; + + fcoe->tx_lun_rst_set = true; + fcoe->tx_lun_rst = qedf->lun_resets; + + fcoe->abort_task_sets_set = true; + fcoe->abort_task_sets = qedf->packet_aborts; + + fcoe->scsi_busy_set = true; + fcoe->scsi_busy = qedf->busy; + + fcoe->scsi_tsk_full_set = true; + fcoe->scsi_tsk_full = qedf->task_set_fulls; +} + +/* Deferred work function to perform soft context reset on STAG change */ +void qedf_stag_change_work(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, stag_work.work); + + printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.", + dev_name(&qedf->pdev->dev), __func__, __LINE__, + qedf->dbg_ctx.host_no); + qedf_ctx_soft_reset(qedf->lport); +} + +static void qedf_shutdown(struct pci_dev *pdev) +{ + __qedf_remove(pdev, QEDF_MODE_NORMAL); +} + +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedf_ctx *qedf; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedf = pci_get_drvdata(pdev); + + QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + +/* + * Recovery handler code + */ +static void qedf_schedule_recovery_handler(void *dev) +{ + struct qedf_ctx *qedf = dev; + + QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n"); + schedule_delayed_work(&qedf->recovery_work, 0); +} + +static void qedf_recovery_handler(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, recovery_work.work); + + if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags)) + return; + + /* + * Call common_ops->recovery_prolog to allow the MFW to quiesce + * any PCI transactions. + */ + qed_ops->common->recovery_prolog(qedf->cdev); + + QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n"); + __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY); + /* + * Reset link and dcbx to down state since we will not get a link down + * event from the MFW but calling __qedf_remove will essentially be a + * link down event. + */ + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); + __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY); + clear_bit(QEDF_IN_RECOVERY, &qedf->flags); + QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n"); +} + +/* Generic TLV data callback */ +void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) +{ + struct qedf_ctx *qedf; + + if (!dev) { + QEDF_INFO(NULL, QEDF_LOG_EVT, + "dev is NULL so ignoring get_generic_tlv_data request.\n"); + return; + } + qedf = (struct qedf_ctx *)dev; + + memset(data, 0, sizeof(struct qed_generic_tlvs)); + ether_addr_copy(data->mac[0], qedf->mac); +} + +/* + * Module Init/Remove + */ + +static int __init qedf_init(void) +{ + int ret; + + /* If debug=1 passed, set the default log mask */ + if (qedf_debug == QEDF_LOG_DEFAULT) + qedf_debug = QEDF_DEFAULT_LOG_MASK; + + /* + * Check that default prio for FIP/FCoE traffic is between 0..7 if a + * value has been set + */ + if (qedf_default_prio > -1) + if (qedf_default_prio > 7) { + qedf_default_prio = QEDF_DEFAULT_PRIO; + QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n", + QEDF_DEFAULT_PRIO); + } + + /* Print driver banner */ + QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR, + QEDF_VERSION); + + /* Create kmem_cache for qedf_io_work structs */ + qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache", + sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL); + if (qedf_io_work_cache == NULL) { + QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n"); + goto err1; + } + QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n", + qedf_io_work_cache); + + qed_ops = qed_get_fcoe_ops(); + if (!qed_ops) { + QEDF_ERR(NULL, "Failed to get qed fcoe operations\n"); + goto err1; + } + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_init("qedf"); +#endif + + qedf_fc_transport_template = + fc_attach_transport(&qedf_fc_transport_fn); + if (!qedf_fc_transport_template) { + QEDF_ERR(NULL, "Could not register with FC transport\n"); + goto err2; + } + + qedf_fc_vport_transport_template = + fc_attach_transport(&qedf_fc_vport_transport_fn); + if (!qedf_fc_vport_transport_template) { + QEDF_ERR(NULL, "Could not register vport template with FC " + "transport\n"); + goto err3; + } + + qedf_io_wq = create_workqueue("qedf_io_wq"); + if (!qedf_io_wq) { + QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); + goto err4; + } + + qedf_cb_ops.get_login_failures = qedf_get_login_failures; + + ret = pci_register_driver(&qedf_pci_driver); + if (ret) { + QEDF_ERR(NULL, "Failed to register driver\n"); + goto err5; + } + + return 0; + +err5: + destroy_workqueue(qedf_io_wq); +err4: + fc_release_transport(qedf_fc_vport_transport_template); +err3: + fc_release_transport(qedf_fc_transport_template); +err2: +#ifdef CONFIG_DEBUG_FS + qedf_dbg_exit(); +#endif + qed_put_fcoe_ops(); +err1: + return -EINVAL; +} + +static void __exit qedf_cleanup(void) +{ + pci_unregister_driver(&qedf_pci_driver); + + destroy_workqueue(qedf_io_wq); + + fc_release_transport(qedf_fc_vport_transport_template); + fc_release_transport(qedf_fc_transport_template); +#ifdef CONFIG_DEBUG_FS + qedf_dbg_exit(); +#endif + qed_put_fcoe_ops(); + + kmem_cache_destroy(qedf_io_work_cache); +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module"); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_VERSION(QEDF_VERSION); +module_init(qedf_init); +module_exit(qedf_cleanup); diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h new file mode 100644 index 000000000..b0e37afe5 --- /dev/null +++ b/drivers/scsi/qedf/qedf_version.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016-2018 Cavium Inc. + */ + +#define QEDF_VERSION "8.42.3.0" +#define QEDF_DRIVER_MAJOR_VER 8 +#define QEDF_DRIVER_MINOR_VER 42 +#define QEDF_DRIVER_REV_VER 3 +#define QEDF_DRIVER_ENG_VER 0 + diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig new file mode 100644 index 000000000..2091d883a --- /dev/null +++ b/drivers/scsi/qedi/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config QEDI + tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support" + depends on PCI && SCSI && UIO + depends on QED + select SCSI_ISCSI_ATTRS + select QED_LL2 + select QED_OOO + select QED_ISCSI + select ISCSI_BOOT_SYSFS + help + This driver supports iSCSI offload for the QLogic FastLinQ + 41000 Series Converged Network Adapters. diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile new file mode 100644 index 000000000..d84eedfd0 --- /dev/null +++ b/drivers/scsi/qedi/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_QEDI) := qedi.o +qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \ + qedi_dbg.o qedi_fw_api.o + +qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h new file mode 100644 index 000000000..ce199a7a1 --- /dev/null +++ b/drivers/scsi/qedi/qedi.h @@ -0,0 +1,393 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#ifndef _QEDI_H_ +#define _QEDI_H_ + +#define __PREVENT_QED_HSI__ + +#include +#include +#include +#include + +#include "qedi_hsi.h" +#include +#include "qedi_dbg.h" +#include +#include +#include "qedi_version.h" +#include "qedi_nvm_iscsi_cfg.h" + +#define QEDI_MODULE_NAME "qedi" + +struct qedi_endpoint; + +#ifndef GET_FIELD2 +#define GET_FIELD2(value, name) \ + (((value) & (name ## _MASK)) >> (name ## _OFFSET)) +#endif + +/* + * PCI function probe defines + */ +#define QEDI_MODE_NORMAL 0 +#define QEDI_MODE_RECOVERY 1 +#define QEDI_MODE_SHUTDOWN 2 + +#define ISCSI_WQE_SET_PTU_INVALIDATE 1 +#define QEDI_MAX_ISCSI_TASK 4096 +#define QEDI_MAX_TASK_NUM 0x0FFF +#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 +#define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ +#define MAX_OUTSTANDING_TASKS_PER_CON 1024 + +#define QEDI_MAX_BD_LEN 0xffff +#define QEDI_BD_SPLIT_SZ 0x1000 +#define QEDI_PAGE_SIZE 4096 +#define QEDI_FAST_SGE_COUNT 4 +/* MAX Length for cached SGL */ +#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) + +#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ + num_online_cpus()) + +#define QEDI_LOCAL_PORT_MIN 60000 +#define QEDI_LOCAL_PORT_MAX 61024 +#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN) +#define QEDI_LOCAL_PORT_INVALID 0xffff +#define TX_RX_RING 16 +#define RX_RING (TX_RX_RING - 1) +#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE) +#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) + +#define QEDI_HW_DMA_BOUNDARY 0xfff +#define QEDI_PATH_HANDLE 0xFE0000000UL + +enum qedi_nvm_tgts { + QEDI_NVM_TGT_PRI, + QEDI_NVM_TGT_SEC, +}; + +struct qedi_nvm_iscsi_image { + struct nvm_iscsi_cfg iscsi_cfg; + u32 crc; +}; + +struct qedi_uio_ctrl { + /* meta data */ + u32 uio_hsi_version; + + /* user writes */ + u32 host_tx_prod; + u32 host_rx_cons; + u32 host_rx_bd_cons; + u32 host_tx_pkt_len; + u32 host_rx_cons_cnt; + + /* driver writes */ + u32 hw_tx_cons; + u32 hw_rx_prod; + u32 hw_rx_bd_prod; + u32 hw_rx_prod_cnt; + + /* other */ + u8 mac_addr[6]; + u8 reserve[2]; +}; + +struct qedi_rx_bd { + u32 rx_pkt_index; + u32 rx_pkt_len; + u16 vlan_id; +}; + +#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd)) +#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1) +#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1) +#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1) + +#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \ + (QEDI_MAX_RX_DESC_CNT - 1)) ? \ + (x) + 2 : (x) + 1) + +struct qedi_uio_dev { + struct uio_info qedi_uinfo; + u32 uio_dev; + struct list_head list; + + u32 ll2_ring_size; + void *ll2_ring; + + u32 ll2_buf_size; + void *ll2_buf; + + void *rx_pkt; + void *tx_pkt; + + struct qedi_ctx *qedi; + struct pci_dev *pdev; + void *uctrl; +}; + +/* List to maintain the skb pointers */ +struct skb_work_list { + struct list_head list; + struct sk_buff *skb; + u16 vlan_id; +}; + +/* Queue sizes in number of elements */ +#define QEDI_SQ_SIZE MAX_OUTSTANDING_TASKS_PER_CON +#define QEDI_CQ_SIZE 2048 +#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK +#define QEDI_PROTO_CQ_PROD_IDX 0 + +struct qedi_glbl_q_params { + u64 hw_p_cq; /* Completion queue PBL */ + u64 hw_p_rq; /* Request queue PBL */ + u64 hw_p_cmdq; /* Command queue PBL */ +}; + +struct global_queue { + union iscsi_cqe *cq; + dma_addr_t cq_dma; + u32 cq_mem_size; + u32 cq_cons_idx; /* Completion queue consumer index */ + + void *cq_pbl; + dma_addr_t cq_pbl_dma; + u32 cq_pbl_size; + +}; + +struct qedi_fastpath { + struct qed_sb_info *sb_info; + u16 sb_id; +#define QEDI_NAME_SIZE 16 + char name[QEDI_NAME_SIZE]; + struct qedi_ctx *qedi; +}; + +/* Used to pass fastpath information needed to process CQEs */ +struct qedi_io_work { + struct list_head list; + struct iscsi_cqe_solicited cqe; + u16 que_idx; +}; + +/** + * struct iscsi_cid_queue - Per adapter iscsi cid queue + * + * @cid_que_base: queue base memory + * @cid_que: queue memory pointer + * @cid_q_prod_idx: produce index + * @cid_q_cons_idx: consumer index + * @cid_q_max_idx: max index. used to detect wrap around condition + * @cid_free_cnt: queue size + * @conn_cid_tbl: iscsi cid to conn structure mapping table + * + * Per adapter iSCSI CID Queue + */ +struct iscsi_cid_queue { + void *cid_que_base; + u32 *cid_que; + u32 cid_q_prod_idx; + u32 cid_q_cons_idx; + u32 cid_q_max_idx; + u32 cid_free_cnt; + struct qedi_conn **conn_cid_tbl; +}; + +struct qedi_portid_tbl { + spinlock_t lock; /* Port id lock */ + u16 start; + u16 max; + u16 next; + unsigned long *table; +}; + +struct qedi_itt_map { + __le32 itt; + struct qedi_cmd *p_cmd; +}; + +/* I/O tracing entry */ +#define QEDI_IO_TRACE_SIZE 2048 +struct qedi_io_log { +#define QEDI_IO_TRACE_REQ 0 +#define QEDI_IO_TRACE_RSP 1 + u8 direction; + u16 task_id; + u32 cid; + u32 port_id; /* Remote port fabric ID */ + int lun; + u8 op; /* SCSI CDB */ + u8 lba[4]; + unsigned int bufflen; /* SCSI buffer length */ + unsigned int sg_count; /* Number of SG elements */ + u8 fast_sgs; /* number of fast sgls */ + u8 slow_sgs; /* number of slow sgls */ + u8 cached_sgs; /* number of cached sgls */ + int result; /* Result passed back to mid-layer */ + unsigned long jiffies; /* Time stamp when I/O logged */ + int refcount; /* Reference count for task id */ + unsigned int blk_req_cpu; /* CPU that the task is queued on by + * blk layer + */ + unsigned int req_cpu; /* CPU that the task is queued on */ + unsigned int intr_cpu; /* Interrupt CPU that the task is received on */ + unsigned int blk_rsp_cpu;/* CPU that task is actually processed and + * returned to blk layer + */ + bool cached_sge; + bool slow_sge; + bool fast_sge; +}; + +/* Number of entries in BDQ */ +#define QEDI_BDQ_NUM 256 +#define QEDI_BDQ_BUF_SIZE 256 + +/* DMA coherent buffers for BDQ */ +struct qedi_bdq_buf { + void *buf_addr; + dma_addr_t buf_dma; +}; + +/* Main port level struct */ +struct qedi_ctx { + struct qedi_dbg_ctx dbg_ctx; + struct Scsi_Host *shost; + struct pci_dev *pdev; + struct qed_dev *cdev; + struct qed_dev_iscsi_info dev_info; + struct qed_int_info int_info; + struct qedi_glbl_q_params *p_cpuq; + struct global_queue **global_queues; + /* uio declaration */ + struct qedi_uio_dev *udev; + struct list_head ll2_skb_list; + spinlock_t ll2_lock; /* Light L2 lock */ + spinlock_t hba_lock; /* per port lock */ + struct task_struct *ll2_recv_thread; + unsigned long qedi_err_flags; +#define QEDI_ERR_ATTN_CLR_EN 0 +#define QEDI_ERR_IS_RECOVERABLE 2 +#define QEDI_ERR_OVERRIDE_EN 31 + unsigned long flags; +#define UIO_DEV_OPENED 1 +#define QEDI_IOTHREAD_WAKE 2 +#define QEDI_IN_RECOVERY 5 +#define QEDI_IN_OFFLINE 6 +#define QEDI_IN_SHUTDOWN 7 +#define QEDI_BLOCK_IO 8 + + u8 mac[ETH_ALEN]; + u32 src_ip[4]; + u8 ip_type; + + /* Physical address of above array */ + dma_addr_t hw_p_cpuq; + + struct qedi_bdq_buf bdq[QEDI_BDQ_NUM]; + void *bdq_pbl; + dma_addr_t bdq_pbl_dma; + size_t bdq_pbl_mem_size; + void *bdq_pbl_list; + dma_addr_t bdq_pbl_list_dma; + u8 bdq_pbl_list_num_entries; + struct qedi_nvm_iscsi_image *iscsi_image; + dma_addr_t nvm_buf_dma; + void __iomem *bdq_primary_prod; + void __iomem *bdq_secondary_prod; + u16 bdq_prod_idx; + u16 rq_num_entries; + + u32 max_sqes; + u8 num_queues; + u32 max_active_conns; + s32 msix_count; + + struct iscsi_cid_queue cid_que; + struct qedi_endpoint **ep_tbl; + struct qedi_portid_tbl lcl_port_tbl; + + /* Rx fast path intr context */ + struct qed_sb_info *sb_array; + struct qedi_fastpath *fp_array; + struct qed_iscsi_tid tasks; + +#define QEDI_LINK_DOWN 0 +#define QEDI_LINK_UP 1 + atomic_t link_state; + +#define QEDI_RESERVE_TASK_ID 0 +#define MAX_ISCSI_TASK_ENTRIES 4096 +#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1) + unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG]; + struct qedi_itt_map *itt_map; + u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK]; + struct qed_pf_params pf_params; + + struct workqueue_struct *tmf_thread; + struct workqueue_struct *offload_thread; + + u16 ll2_mtu; + + struct workqueue_struct *dpc_wq; + struct delayed_work recovery_work; + struct delayed_work board_disable_work; + + spinlock_t task_idx_lock; /* To protect gbl context */ + s32 last_tidx_alloc; + s32 last_tidx_clear; + + struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE]; + spinlock_t io_trace_lock; /* prtect trace Log buf */ + u16 io_trace_idx; + unsigned int intr_cpu; + u32 cached_sgls; + bool use_cached_sge; + u32 slow_sgls; + bool use_slow_sge; + u32 fast_sgls; + bool use_fast_sge; + + atomic_t num_offloads; +#define SYSFS_FLAG_FW_SEL_BOOT 2 +#define IPV6_LEN 41 +#define IPV4_LEN 17 + struct iscsi_boot_kset *boot_kset; + + /* Used for iscsi statistics */ + struct mutex stats_lock; +}; + +struct qedi_work { + struct list_head list; + struct qedi_ctx *qedi; + union iscsi_cqe cqe; + u16 que_idx; + bool is_solicited; +}; + +struct qedi_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t p_work_lock; /* Per cpu worker lock */ +}; + +static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid) +{ + return (info->blocks[tid / info->num_tids_per_block] + + (tid % info->num_tids_per_block) * info->size); +} + +#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32)) +#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) + +#endif /* _QEDI_H_ */ diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c new file mode 100644 index 000000000..2ebef4d20 --- /dev/null +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#include "qedi_dbg.h" +#include + +void +qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (likely(qedi) && likely(qedi->pdev)) + pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), + func, line, qedi->host_no, &vaf); + else + pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + + va_end(va); +} + +void +qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedi_dbg_log & QEDI_LOG_WARN)) + goto ret; + + if (likely(qedi) && likely(qedi->pdev)) + pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), + func, line, qedi->host_no, &vaf); + else + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + +ret: + va_end(va); +} + +void +qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedi_dbg_log & QEDI_LOG_NOTICE)) + goto ret; + + if (likely(qedi) && likely(qedi->pdev)) + pr_notice("[%s]:[%s:%d]:%d: %pV", + dev_name(&qedi->pdev->dev), func, line, + qedi->host_no, &vaf); + else + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + +ret: + va_end(va); +} + +void +qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + u32 level, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedi_dbg_log & level)) + goto ret; + + if (likely(qedi) && likely(qedi->pdev)) + pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), + func, line, qedi->host_no, &vaf); + else + pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); + +ret: + va_end(va); +} + +int +qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + int ret = 0; + + for (; iter->name; iter++) { + ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, + iter->attr); + if (ret) + pr_err("Unable to create sysfs %s attr, err(%d).\n", + iter->name, ret); + } + return ret; +} + +void +qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + for (; iter->name; iter++) + sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); +} diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h new file mode 100644 index 000000000..fdda12ef1 --- /dev/null +++ b/drivers/scsi/qedi/qedi_dbg.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#ifndef _QEDI_DBG_H_ +#define _QEDI_DBG_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define __PREVENT_QED_HSI__ +#include +#include + +extern uint qedi_dbg_log; + +/* Debug print level definitions */ +#define QEDI_LOG_DEFAULT 0x1 /* Set default logging mask */ +#define QEDI_LOG_INFO 0x2 /* Informational logs, + * MAC address, WWPN, WWNN + */ +#define QEDI_LOG_DISC 0x4 /* Init, discovery, rport */ +#define QEDI_LOG_LL2 0x8 /* LL2, VLAN logs */ +#define QEDI_LOG_CONN 0x10 /* Connection setup, cleanup */ +#define QEDI_LOG_EVT 0x20 /* Events, link, mtu */ +#define QEDI_LOG_TIMER 0x40 /* Timer events */ +#define QEDI_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */ +#define QEDI_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */ +#define QEDI_LOG_UNSOL 0x200 /* unsolicited event logs */ +#define QEDI_LOG_IO 0x400 /* scsi cmd, completion */ +#define QEDI_LOG_MQ 0x800 /* Multi Queue logs */ +#define QEDI_LOG_BSG 0x1000 /* BSG logs */ +#define QEDI_LOG_DEBUGFS 0x2000 /* debugFS logs */ +#define QEDI_LOG_LPORT 0x4000 /* lport logs */ +#define QEDI_LOG_ELS 0x8000 /* ELS logs */ +#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */ +#define QEDI_LOG_SESS 0x20000 /* Connection setup, cleanup */ +#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */ +#define QEDI_LOG_TID 0x80000 /* FW TID context acquire, + * free + */ +#define QEDI_TRACK_TID 0x100000 /* Track TID state. To be + * enabled only at module load + * and not run-time. + */ +#define QEDI_TRACK_CMD_LIST 0x300000 /* Track active cmd list nodes, + * done with reference to TID, + * hence TRACK_TID also enabled. + */ +#define QEDI_LOG_NOTICE 0x40000000 /* Notice logs */ +#define QEDI_LOG_WARN 0x80000000 /* Warning logs */ + +/* Debug context structure */ +struct qedi_dbg_ctx { + unsigned int host_no; + struct pci_dev *pdev; +#ifdef CONFIG_DEBUG_FS + struct dentry *bdf_dentry; +#endif +}; + +#define QEDI_ERR(pdev, fmt, ...) \ + qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDI_WARN(pdev, fmt, ...) \ + qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDI_NOTICE(pdev, fmt, ...) \ + qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDI_INFO(pdev, level, fmt, ...) \ + qedi_dbg_info(pdev, __func__, __LINE__, level, fmt, \ + ## __VA_ARGS__) + +void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...); +void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...); +void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...); +void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + u32 info, const char *fmt, ...); + +struct Scsi_Host; + +struct sysfs_bin_attrs { + char *name; + struct bin_attribute *attr; +}; + +int qedi_create_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); +void qedi_remove_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); + +/* DebugFS related code */ +struct qedi_list_of_funcs { + char *oper_str; + ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi); +}; + +struct qedi_debugfs_ops { + char *name; + struct qedi_list_of_funcs *qedi_funcs; +}; + +#define qedi_dbg_fileops(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = drv##_dbg_##ops##_cmd_read, \ + .write = drv##_dbg_##ops##_cmd_write \ +} + +/* Used for debugfs sequential files */ +#define qedi_dbg_fileops_seq(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = drv##_dbg_##ops##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi, + const struct qedi_debugfs_ops *dops, + const struct file_operations *fops); +void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi); +void qedi_dbg_init(char *drv_name); +void qedi_dbg_exit(void); + +#endif /* _QEDI_DBG_H_ */ diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c new file mode 100644 index 000000000..8deb2001d --- /dev/null +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#include "qedi.h" +#include "qedi_dbg.h" + +#include +#include +#include + +int qedi_do_not_recover; +static struct dentry *qedi_dbg_root; + +void +qedi_dbg_host_init(struct qedi_dbg_ctx *qedi, + const struct qedi_debugfs_ops *dops, + const struct file_operations *fops) +{ + char host_dirname[32]; + + sprintf(host_dirname, "host%u", qedi->host_no); + qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root); + + while (dops) { + if (!(dops->name)) + break; + + debugfs_create_file(dops->name, 0600, qedi->bdf_dentry, qedi, + fops); + dops++; + fops++; + } +} + +void +qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi) +{ + debugfs_remove_recursive(qedi->bdf_dentry); + qedi->bdf_dentry = NULL; +} + +void +qedi_dbg_init(char *drv_name) +{ + qedi_dbg_root = debugfs_create_dir(drv_name, NULL); +} + +void +qedi_dbg_exit(void) +{ + debugfs_remove_recursive(qedi_dbg_root); + qedi_dbg_root = NULL; +} + +static ssize_t +qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg) +{ + if (!qedi_do_not_recover) + qedi_do_not_recover = 1; + + QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", + qedi_do_not_recover); + return 0; +} + +static ssize_t +qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg) +{ + if (qedi_do_not_recover) + qedi_do_not_recover = 0; + + QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", + qedi_do_not_recover); + return 0; +} + +static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = { + { "enable", qedi_dbg_do_not_recover_enable }, + { "disable", qedi_dbg_do_not_recover_disable }, + { NULL, NULL } +}; + +const struct qedi_debugfs_ops qedi_debugfs_ops[] = { + { "gbl_ctx", NULL }, + { "do_not_recover", qedi_dbg_do_not_recover_ops}, + { "io_trace", NULL }, + { NULL, NULL } +}; + +static ssize_t +qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + size_t cnt = 0; + struct qedi_dbg_ctx *qedi_dbg = + (struct qedi_dbg_ctx *)filp->private_data; + struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops; + + if (*ppos) + return 0; + + while (lof) { + if (!(lof->oper_str)) + break; + + if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) { + cnt = lof->oper_func(qedi_dbg); + break; + } + + lof++; + } + return (count - cnt); +} + +static ssize_t +qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + size_t cnt = 0; + + if (*ppos) + return 0; + + cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover); + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static int +qedi_gbl_ctx_show(struct seq_file *s, void *unused) +{ + struct qedi_fastpath *fp = NULL; + struct qed_sb_info *sb_info = NULL; + struct status_block *sb = NULL; + struct global_queue *que = NULL; + int id; + u16 prod_idx; + struct qedi_ctx *qedi = s->private; + unsigned long flags; + + seq_puts(s, " DUMP CQ CONTEXT:\n"); + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + spin_lock_irqsave(&qedi->hba_lock, flags); + seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id); + fp = &qedi->fp_array[id]; + sb_info = fp->sb_info; + sb = sb_info->sb_virt; + prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] & + STATUS_BLOCK_PROD_INDEX_MASK); + seq_printf(s, "SB PROD IDX: %d\n", prod_idx); + que = qedi->global_queues[fp->sb_id]; + seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx); + seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id); + seq_puts(s, "=========== END ==================\n\n\n"); + spin_unlock_irqrestore(&qedi->hba_lock, flags); + } + return 0; +} + +static int +qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file) +{ + struct qedi_dbg_ctx *qedi_dbg = inode->i_private; + struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, + dbg_ctx); + + return single_open(file, qedi_gbl_ctx_show, qedi); +} + +static int +qedi_io_trace_show(struct seq_file *s, void *unused) +{ + int id, idx = 0; + struct qedi_ctx *qedi = s->private; + struct qedi_io_log *io_log; + unsigned long flags; + + seq_puts(s, " DUMP IO LOGS:\n"); + spin_lock_irqsave(&qedi->io_trace_lock, flags); + idx = qedi->io_trace_idx; + for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) { + io_log = &qedi->io_trace_buf[idx]; + seq_printf(s, "iodir-%d:", io_log->direction); + seq_printf(s, "tid-0x%x:", io_log->task_id); + seq_printf(s, "cid-0x%x:", io_log->cid); + seq_printf(s, "lun-%d:", io_log->lun); + seq_printf(s, "op-0x%02x:", io_log->op); + seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], + io_log->lba[1], io_log->lba[2], io_log->lba[3]); + seq_printf(s, "buflen-%d:", io_log->bufflen); + seq_printf(s, "sgcnt-%d:", io_log->sg_count); + seq_printf(s, "res-0x%08x:", io_log->result); + seq_printf(s, "jif-%lu:", io_log->jiffies); + seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu); + seq_printf(s, "req_cpu-%d:", io_log->req_cpu); + seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu); + seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu); + + idx++; + if (idx == QEDI_IO_TRACE_SIZE) + idx = 0; + } + spin_unlock_irqrestore(&qedi->io_trace_lock, flags); + return 0; +} + +static int +qedi_dbg_io_trace_open(struct inode *inode, struct file *file) +{ + struct qedi_dbg_ctx *qedi_dbg = inode->i_private; + struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, + dbg_ctx); + + return single_open(file, qedi_io_trace_show, qedi); +} + +const struct file_operations qedi_dbg_fops[] = { + qedi_dbg_fileops_seq(qedi, gbl_ctx), + qedi_dbg_fileops(qedi, do_not_recover), + qedi_dbg_fileops_seq(qedi, io_trace), + { }, +}; diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c new file mode 100644 index 000000000..690173832 --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw.c @@ -0,0 +1,2158 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#include +#include +#include + +#include "qedi.h" +#include "qedi_iscsi.h" +#include "qedi_gbl.h" +#include "qedi_fw_iscsi.h" +#include "qedi_fw_scsi.h" + +static int send_iscsi_tmf(struct qedi_conn *qedi_conn, + struct iscsi_task *mtask, struct iscsi_task *ctask); + +void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + + if (cmd->io_tbl.sge_valid && sc) { + cmd->io_tbl.sge_valid = 0; + scsi_dma_unmap(sc); + } +} + +static void qedi_process_logout_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_logout_rsp *resp_hdr; + struct iscsi_session *session = conn->session; + struct iscsi_logout_response_hdr *cqe_logout_response; + struct qedi_cmd *cmd; + + cmd = (struct qedi_cmd *)task->dd_data; + cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; + spin_lock(&session->back_lock); + resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = cqe_logout_response->opcode; + resp_hdr->flags = cqe_logout_response->flags; + resp_hdr->hlength = 0; + + resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); + resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn); + + resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait); + resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } else { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", + cmd->task_id, qedi_conn->iscsi_conn_id, + &cmd->io_cmd); + } + spin_unlock(&qedi_conn->list_lock); + + cmd->state = RESPONSE_RECEIVED; + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); + + spin_unlock(&session->back_lock); +} + +static void qedi_process_text_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_task_context *task_ctx; + struct iscsi_text_rsp *resp_hdr_ptr; + struct iscsi_text_response_hdr *cqe_text_response; + struct qedi_cmd *cmd; + int pld_len; + + cmd = (struct qedi_cmd *)task->dd_data; + task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); + + cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; + spin_lock(&session->back_lock); + resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr)); + resp_hdr_ptr->opcode = cqe_text_response->opcode; + resp_hdr_ptr->flags = cqe_text_response->flags; + resp_hdr_ptr->hlength = 0; + + hton24(resp_hdr_ptr->dlength, + (cqe_text_response->hdr_second_dword & + ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK)); + + resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + resp_hdr_ptr->ttt = cqe_text_response->ttt; + resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn); + resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn); + resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn); + + pld_len = cqe_text_response->hdr_second_dword & + ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK; + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; + + memset(task_ctx, '\0', sizeof(*task_ctx)); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } else { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", + cmd->task_id, qedi_conn->iscsi_conn_id, + &cmd->io_cmd); + } + spin_unlock(&qedi_conn->list_lock); + + cmd->state = RESPONSE_RECEIVED; + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, + qedi_conn->gen_pdu.resp_buf, + (qedi_conn->gen_pdu.resp_wr_ptr - + qedi_conn->gen_pdu.resp_buf)); + spin_unlock(&session->back_lock); +} + +static void qedi_tmf_resp_work(struct work_struct *work) +{ + struct qedi_cmd *qedi_cmd = + container_of(work, struct qedi_cmd, tmf_work); + struct qedi_conn *qedi_conn = qedi_cmd->conn; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_tm_rsp *resp_hdr_ptr; + int rval = 0; + + resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; + + rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true); + if (rval) + goto exit_tmf_resp; + + spin_lock(&session->back_lock); + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); + spin_unlock(&session->back_lock); + +exit_tmf_resp: + kfree(resp_hdr_ptr); + + spin_lock(&qedi_conn->tmf_work_lock); + qedi_conn->fw_cleanup_works--; + spin_unlock(&qedi_conn->tmf_work_lock); +} + +static void qedi_process_tmf_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) + +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_tmf_response_hdr *cqe_tmp_response; + struct iscsi_tm_rsp *resp_hdr_ptr; + struct iscsi_tm *tmf_hdr; + struct qedi_cmd *qedi_cmd = NULL; + + cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; + + qedi_cmd = task->dd_data; + qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC); + if (!qedi_cmd->tmf_resp_buf) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to allocate resp buf, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return; + } + + spin_lock(&session->back_lock); + resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; + memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp)); + + /* Fill up the header */ + resp_hdr_ptr->opcode = cqe_tmp_response->opcode; + resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags; + resp_hdr_ptr->response = cqe_tmp_response->hdr_response; + resp_hdr_ptr->hlength = 0; + + hton24(resp_hdr_ptr->dlength, + (cqe_tmp_response->hdr_second_dword & + ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK)); + resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn); + resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn); + resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn); + + tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr; + + spin_lock(&qedi_conn->list_lock); + if (likely(qedi_cmd->io_cmd_in_list)) { + qedi_cmd->io_cmd_in_list = false; + list_del_init(&qedi_cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + spin_unlock(&qedi_conn->list_lock); + + spin_lock(&qedi_conn->tmf_work_lock); + switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + if (qedi_conn->ep_disconnect_starting) { + /* Session is down so ep_disconnect will clean up */ + spin_unlock(&qedi_conn->tmf_work_lock); + goto unblock_sess; + } + + qedi_conn->fw_cleanup_works++; + spin_unlock(&qedi_conn->tmf_work_lock); + + INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work); + queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); + goto unblock_sess; + } + spin_unlock(&qedi_conn->tmf_work_lock); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); + kfree(resp_hdr_ptr); + +unblock_sess: + spin_unlock(&session->back_lock); +} + +static void qedi_process_login_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_task_context *task_ctx; + struct iscsi_login_rsp *resp_hdr_ptr; + struct iscsi_login_response_hdr *cqe_login_response; + struct qedi_cmd *cmd; + int pld_len; + + cmd = (struct qedi_cmd *)task->dd_data; + + cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response; + task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); + + spin_lock(&session->back_lock); + resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp)); + resp_hdr_ptr->opcode = cqe_login_response->opcode; + resp_hdr_ptr->flags = cqe_login_response->flags_attr; + resp_hdr_ptr->hlength = 0; + + hton24(resp_hdr_ptr->dlength, + (cqe_login_response->hdr_second_dword & + ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK)); + resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + resp_hdr_ptr->tsih = cqe_login_response->tsih; + resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn); + resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn); + resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn); + resp_hdr_ptr->status_class = cqe_login_response->status_class; + resp_hdr_ptr->status_detail = cqe_login_response->status_detail; + pld_len = cqe_login_response->hdr_second_dword & + ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK; + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; + + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + spin_unlock(&qedi_conn->list_lock); + + memset(task_ctx, '\0', sizeof(*task_ctx)); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, + qedi_conn->gen_pdu.resp_buf, + (qedi_conn->gen_pdu.resp_wr_ptr - + qedi_conn->gen_pdu.resp_buf)); + + spin_unlock(&session->back_lock); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + cmd->state = RESPONSE_RECEIVED; +} + +static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi, + struct iscsi_cqe_unsolicited *cqe, + char *ptr, int len) +{ + u16 idx = 0; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n", + len, qedi->bdq_prod_idx, + (qedi->bdq_prod_idx % qedi->rq_num_entries)); + + /* Obtain buffer address from rqe_opaque */ + idx = cqe->rqe_opaque; + if (idx > (QEDI_BDQ_NUM - 1)) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "wrong idx %d returned by FW, dropping the unsolicited pkt\n", + idx); + return; + } + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "unsol_cqe_type = %d\n", cqe->unsol_cqe_type); + switch (cqe->unsol_cqe_type) { + case ISCSI_CQE_UNSOLICITED_SINGLE: + case ISCSI_CQE_UNSOLICITED_FIRST: + if (len) + memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len); + break; + case ISCSI_CQE_UNSOLICITED_MIDDLE: + case ISCSI_CQE_UNSOLICITED_LAST: + break; + default: + break; + } +} + +static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi, + struct iscsi_cqe_unsolicited *cqe, + int count) +{ + u16 idx = 0; + struct scsi_bd *pbl; + + /* Obtain buffer address from rqe_opaque */ + idx = cqe->rqe_opaque; + if (idx > (QEDI_BDQ_NUM - 1)) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "wrong idx %d returned by FW, dropping the unsolicited pkt\n", + idx); + return; + } + + pbl = (struct scsi_bd *)qedi->bdq_pbl; + pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries); + pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma)); + pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma)); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n", + pbl, pbl->address.hi, pbl->address.lo, idx); + pbl->opaque.iscsi_opaque.reserved_zero[0] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[1] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[2] = 0; + pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx); + + /* Increment producer to let f/w know we've handled the frame */ + qedi->bdq_prod_idx += count; + + writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); + readw(qedi->bdq_primary_prod); + + writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); + readw(qedi->bdq_secondary_prod); +} + +static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi, + struct iscsi_cqe_unsolicited *cqe, + u32 pdu_len, u32 num_bdqs, + char *bdq_data) +{ + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "num_bdqs [%d]\n", num_bdqs); + + qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len); + qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1)); +} + +static int qedi_process_nopin_mesg(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn, u16 que_idx) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_nop_in_hdr *cqe_nop_in; + struct iscsi_nopin *hdr; + struct qedi_cmd *cmd; + int tgt_async_nop = 0; + u32 lun[2]; + u32 pdu_len, num_bdqs; + char bdq_data[QEDI_BDQ_BUF_SIZE]; + unsigned long flags; + + spin_lock_bh(&session->back_lock); + cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in; + + pdu_len = cqe_nop_in->hdr_second_dword & + ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK; + num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; + + hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = cqe_nop_in->opcode; + hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn); + hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn); + hdr->ttt = cpu_to_be32(cqe_nop_in->ttt); + + if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { + spin_lock_irqsave(&qedi->hba_lock, flags); + qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, + pdu_len, num_bdqs, bdq_data); + hdr->itt = RESERVED_ITT; + tgt_async_nop = 1; + spin_unlock_irqrestore(&qedi->hba_lock, flags); + goto done; + } + + /* Response to one of our nop-outs */ + if (task) { + cmd = task->dd_data; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + hdr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + lun[0] = 0xffffffff; + lun[1] = 0xffffffff; + memcpy(&hdr->lun, lun, sizeof(struct scsi_lun)); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + cmd->state = RESPONSE_RECEIVED; + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + + spin_unlock(&qedi_conn->list_lock); + } + +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len); + + spin_unlock_bh(&session->back_lock); + return tgt_async_nop; +} + +static void qedi_process_async_mesg(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn, + u16 que_idx) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_async_msg_hdr *cqe_async_msg; + struct iscsi_async *resp_hdr; + u32 lun[2]; + u32 pdu_len, num_bdqs; + char bdq_data[QEDI_BDQ_BUF_SIZE]; + unsigned long flags; + + spin_lock_bh(&session->back_lock); + + cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg; + pdu_len = cqe_async_msg->hdr_second_dword & + ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK; + num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; + + if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { + spin_lock_irqsave(&qedi->hba_lock, flags); + qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, + pdu_len, num_bdqs, bdq_data); + spin_unlock_irqrestore(&qedi->hba_lock, flags); + } + + resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = cqe_async_msg->opcode; + resp_hdr->flags = 0x80; + + lun[0] = cpu_to_be32(cqe_async_msg->lun.lo); + lun[1] = cpu_to_be32(cqe_async_msg->lun.hi); + memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun)); + resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn); + resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn); + + resp_hdr->async_event = cqe_async_msg->async_event; + resp_hdr->async_vcode = cqe_async_msg->async_vcode; + + resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv); + resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv); + resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data, + pdu_len); + + spin_unlock_bh(&session->back_lock); +} + +static void qedi_process_reject_mesg(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn, + uint16_t que_idx) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_reject_hdr *cqe_reject; + struct iscsi_reject *hdr; + u32 pld_len, num_bdqs; + unsigned long flags; + + spin_lock_bh(&session->back_lock); + cqe_reject = &cqe->cqe_common.iscsi_hdr.reject; + pld_len = cqe_reject->hdr_second_dword & + ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK; + num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE; + + if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { + spin_lock_irqsave(&qedi->hba_lock, flags); + qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, + pld_len, num_bdqs, conn->data); + spin_unlock_irqrestore(&qedi->hba_lock, flags); + } + hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = cqe_reject->opcode; + hdr->reason = cqe_reject->hdr_reason; + hdr->flags = cqe_reject->hdr_flags; + hton24(hdr->dlength, (cqe_reject->hdr_second_dword & + ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK)); + hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn); + hdr->statsn = cpu_to_be32(cqe_reject->stat_sn); + hdr->ffffffff = cpu_to_be32(0xffffffff); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, + conn->data, pld_len); + spin_unlock_bh(&session->back_lock); +} + +static void qedi_scsi_completion(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct iscsi_conn *conn) +{ + struct scsi_cmnd *sc_cmd; + struct qedi_cmd *cmd = task->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_scsi_rsp *hdr; + struct iscsi_data_in_hdr *cqe_data_in; + int datalen = 0; + struct qedi_conn *qedi_conn; + u32 iscsi_cid; + u8 cqe_err_bits = 0; + + iscsi_cid = cqe->cqe_common.conn_id; + qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + + cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in; + cqe_err_bits = + cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; + + spin_lock_bh(&session->back_lock); + /* get the scsi command */ + sc_cmd = cmd->scsi_cmd; + + if (!sc_cmd) { + QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n"); + goto error; + } + + if (!iscsi_cmd(sc_cmd)->task) { + QEDI_WARN(&qedi->dbg_ctx, + "NULL task pointer, returned in another context.\n"); + goto error; + } + + if (!scsi_cmd_to_rq(sc_cmd)->q) { + QEDI_WARN(&qedi->dbg_ctx, + "request->q is NULL so request is not valid, sc_cmd=%p.\n", + sc_cmd); + goto error; + } + + qedi_iscsi_unmap_sg_list(cmd); + + hdr = (struct iscsi_scsi_rsp *)task->hdr; + hdr->opcode = cqe_data_in->opcode; + hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn); + hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); + hdr->response = cqe_data_in->reserved1; + hdr->cmd_status = cqe_data_in->status_rsvd; + hdr->flags = cqe_data_in->flags; + hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count); + + if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) { + datalen = cqe_data_in->reserved2 & + ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK; + memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen); + } + + /* If f/w reports data underrun err then set residual to IO transfer + * length, set Underrun flag and clear Overrun flag explicitly + */ + if (unlikely(cqe_err_bits && + GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n", + hdr->itt, cqe_data_in->flags, cmd->task_id, + qedi_conn->iscsi_conn_id, hdr->residual_count, + scsi_bufflen(sc_cmd)); + hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd)); + hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; + hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW); + } + + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + spin_unlock(&qedi_conn->list_lock); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + cmd->state = RESPONSE_RECEIVED; + if (qedi_io_tracing) + qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, + conn->data, datalen); +error: + spin_unlock_bh(&session->back_lock); +} + +static void qedi_mtask_completion(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *conn, uint16_t que_idx) +{ + struct iscsi_conn *iscsi_conn; + u32 hdr_opcode; + + hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; + iscsi_conn = conn->cls_conn->dd_data; + + switch (hdr_opcode) { + case ISCSI_OPCODE_SCSI_RESPONSE: + case ISCSI_OPCODE_DATA_IN: + qedi_scsi_completion(qedi, cqe, task, iscsi_conn); + break; + case ISCSI_OPCODE_LOGIN_RESPONSE: + qedi_process_login_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_TMF_RESPONSE: + qedi_process_tmf_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_TEXT_RESPONSE: + qedi_process_text_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_LOGOUT_RESPONSE: + qedi_process_logout_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_NOP_IN: + qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx); + break; + default: + QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n"); + } +} + +static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi, + struct iscsi_cqe_solicited *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct qedi_cmd *cmd = task->dd_data; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL, + "itid=0x%x, cmd task id=0x%x\n", + cqe->itid, cmd->task_id); + + cmd->state = RESPONSE_RECEIVED; + + spin_lock_bh(&session->back_lock); + __iscsi_put_task(task); + spin_unlock_bh(&session->back_lock); +} + +static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, + struct iscsi_cqe_solicited *cqe, + struct iscsi_conn *conn) +{ + struct qedi_work_map *work, *work_tmp; + u32 proto_itt = cqe->itid; + int found = 0; + struct qedi_cmd *qedi_cmd = NULL; + u32 iscsi_cid; + struct qedi_conn *qedi_conn; + struct qedi_cmd *dbg_cmd; + struct iscsi_task *mtask, *task; + struct iscsi_tm *tmf_hdr = NULL; + + iscsi_cid = cqe->conn_id; + qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!qedi_conn) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "icid not found 0x%x\n", cqe->conn_id); + return; + } + + /* Based on this itt get the corresponding qedi_cmd */ + spin_lock_bh(&qedi_conn->tmf_work_lock); + list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list, + list) { + if (work->rtid == proto_itt) { + /* We found the command */ + qedi_cmd = work->qedi_cmd; + if (!qedi_cmd->list_tmf_work) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "TMF work not found, cqe->tid=0x%x, cid=0x%x\n", + proto_itt, qedi_conn->iscsi_conn_id); + WARN_ON(1); + } + found = 1; + mtask = qedi_cmd->task; + task = work->ctask; + tmf_hdr = (struct iscsi_tm *)mtask->hdr; + + list_del_init(&work->list); + kfree(work); + qedi_cmd->list_tmf_work = NULL; + } + } + spin_unlock_bh(&qedi_conn->tmf_work_lock); + + if (!found) + goto check_cleanup_reqs; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", + proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id); + + spin_lock_bh(&conn->session->back_lock); + if (iscsi_task_is_completed(task)) { + QEDI_NOTICE(&qedi->dbg_ctx, + "IO task completed, tmf rtt=0x%x, cid=0x%x\n", + get_itt(tmf_hdr->rtt), qedi_conn->iscsi_conn_id); + goto unlock; + } + + dbg_cmd = task->dd_data; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n", + get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id, + qedi_conn->iscsi_conn_id); + + spin_lock(&qedi_conn->list_lock); + if (likely(dbg_cmd->io_cmd_in_list)) { + dbg_cmd->io_cmd_in_list = false; + list_del_init(&dbg_cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + spin_unlock(&qedi_conn->list_lock); + qedi_cmd->state = CLEANUP_RECV; +unlock: + spin_unlock_bh(&conn->session->back_lock); + wake_up_interruptible(&qedi_conn->wait_queue); + return; + +check_cleanup_reqs: + if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) == + qedi_conn->cmd_cleanup_req) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Freeing tid=0x%x for cid=0x%x\n", + cqe->itid, qedi_conn->iscsi_conn_id); + wake_up(&qedi_conn->wait_queue); + } +} + +void qedi_fp_process_cqes(struct qedi_work *work) +{ + struct qedi_ctx *qedi = work->qedi; + union iscsi_cqe *cqe = &work->cqe; + struct iscsi_task *task = NULL; + struct iscsi_nopout *nopout_hdr; + struct qedi_conn *q_conn; + struct iscsi_conn *conn; + struct qedi_cmd *qedi_cmd; + u32 comp_type; + u32 iscsi_cid; + u32 hdr_opcode; + u16 que_idx = work->que_idx; + u8 cqe_err_bits = 0; + + comp_type = cqe->cqe_common.cqe_type; + hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; + cqe_err_bits = + cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n", + cqe->cqe_common.conn_id, comp_type, hdr_opcode); + + if (comp_type >= MAX_ISCSI_CQES_TYPE) { + QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n"); + return; + } + + iscsi_cid = cqe->cqe_common.conn_id; + q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!q_conn) { + QEDI_WARN(&qedi->dbg_ctx, + "Session no longer exists for cid=0x%x!!\n", + iscsi_cid); + return; + } + + conn = q_conn->cls_conn->dd_data; + + if (unlikely(cqe_err_bits && + GET_FIELD(cqe_err_bits, + CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) { + iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); + return; + } + + switch (comp_type) { + case ISCSI_CQE_TYPE_SOLICITED: + case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: + qedi_cmd = container_of(work, struct qedi_cmd, cqe_work); + task = qedi_cmd->task; + if (!task) { + QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n"); + return; + } + + /* Process NOPIN local completion */ + nopout_hdr = (struct iscsi_nopout *)task->hdr; + if ((nopout_hdr->itt == RESERVED_ITT) && + (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) { + qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited, + task, q_conn); + } else { + cqe->cqe_solicited.itid = + qedi_get_itt(cqe->cqe_solicited); + /* Process other solicited responses */ + qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx); + } + break; + case ISCSI_CQE_TYPE_UNSOLICITED: + switch (hdr_opcode) { + case ISCSI_OPCODE_NOP_IN: + qedi_process_nopin_mesg(qedi, cqe, task, q_conn, + que_idx); + break; + case ISCSI_OPCODE_ASYNC_MSG: + qedi_process_async_mesg(qedi, cqe, task, q_conn, + que_idx); + break; + case ISCSI_OPCODE_REJECT: + qedi_process_reject_mesg(qedi, cqe, task, q_conn, + que_idx); + break; + } + goto exit_fp_process; + case ISCSI_CQE_TYPE_DUMMY: + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n"); + goto exit_fp_process; + case ISCSI_CQE_TYPE_TASK_CLEANUP: + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n"); + qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, conn); + goto exit_fp_process; + default: + QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n"); + break; + } + +exit_fp_process: + return; +} + +static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) +{ + qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx; + + /* wmb - Make sure fw idx is coherent */ + wmb(); + writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell); + + /* Make sure fw write idx is coherent, and include both memory barriers + * as a failsafe as for some architectures the call is the same but on + * others they are two different assembly operations. + */ + wmb(); + QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ, + "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n", + qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx, + qedi_conn->iscsi_conn_id); +} + +static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn) +{ + struct qedi_endpoint *ep; + u16 rval; + + ep = qedi_conn->ep; + rval = ep->sq_prod_idx; + + /* Increament SQ index */ + ep->sq_prod_idx++; + ep->fw_sq_prod_idx++; + if (ep->sq_prod_idx == QEDI_SQ_SIZE) + ep->sq_prod_idx = 0; + + return rval; +} + +int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct iscsi_login_req_hdr login_req_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; + struct iscsi_task_context *fw_task_ctx; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_login_req *login_hdr; + struct scsi_sge *resp_sge = NULL; + struct qedi_cmd *qedi_cmd; + struct qedi_endpoint *ep; + s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; + + resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + qedi_cmd = (struct qedi_cmd *)task->dd_data; + ep = qedi_conn->ep; + login_hdr = (struct iscsi_login_req *)task->hdr; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + qedi_cmd->task_id = tid; + + memset(&task_params, 0, sizeof(task_params)); + memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + /* Update header info */ + login_req_pdu_header.opcode = login_hdr->opcode; + login_req_pdu_header.version_min = login_hdr->min_version; + login_req_pdu_header.version_max = login_hdr->max_version; + login_req_pdu_header.flags_attr = login_hdr->flags; + login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid); + login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]); + + login_req_pdu_header.tsih = login_hdr->tsih; + login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength); + + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + login_req_pdu_header.cid = qedi_conn->iscsi_conn_id; + login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn); + login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); + login_req_pdu_header.exp_stat_sn = 0; + + /* Fill tx AHS and rx buffer */ + tx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.req_dma_addr); + tx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength); + tx_sgl_task_params.num_sges = 1; + + rx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.resp_dma_addr); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; + rx_sgl_task_params.num_sges = 1; + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = ntoh24(login_hdr->dlength); + task_params.rx_io_size = resp_sge->sge_len; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + rval = init_initiator_login_request_task(&task_params, + &login_req_pdu_header, + &tx_sgl_task_params, + &rx_sgl_task_params); + if (rval) + return -1; + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_ring_doorbell(qedi_conn); + return 0; +} + +int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct iscsi_logout_req_hdr logout_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_logout *logout_hdr = NULL; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct qedi_cmd *qedi_cmd; + struct qedi_endpoint *ep; + s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; + + qedi_cmd = (struct qedi_cmd *)task->dd_data; + logout_hdr = (struct iscsi_logout *)task->hdr; + ep = qedi_conn->ep; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + qedi_cmd->task_id = tid; + + memset(&task_params, 0, sizeof(task_params)); + memset(&logout_pdu_header, 0, sizeof(logout_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + + /* Update header info */ + logout_pdu_header.opcode = logout_hdr->opcode; + logout_pdu_header.reason_code = 0x80 | logout_hdr->flags; + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn); + logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn); + logout_pdu_header.cid = qedi_conn->iscsi_conn_id; + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = 0; + task_params.rx_io_size = 0; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + + rval = init_initiator_logout_request_task(&task_params, + &logout_pdu_header, + NULL, NULL); + if (rval) + return -1; + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_ring_doorbell(qedi_conn); + return 0; +} + +int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, + struct iscsi_task *task, bool in_recovery) +{ + int rval; + struct iscsi_task *ctask; + struct qedi_cmd *cmd, *cmd_tmp; + struct iscsi_tm *tmf_hdr; + unsigned int lun = 0; + bool lun_reset = false; + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + + /* From recovery, task is NULL or from tmf resp valid task */ + if (task) { + tmf_hdr = (struct iscsi_tm *)task->hdr; + + if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) { + lun_reset = true; + lun = scsilun_to_int(&tmf_hdr->lun); + } + } + + qedi_conn->cmd_cleanup_req = 0; + atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n", + qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id, + in_recovery, lun_reset); + + if (lun_reset) + spin_lock_bh(&session->back_lock); + + spin_lock(&qedi_conn->list_lock); + + list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, + io_cmd) { + ctask = cmd->task; + if (ctask == task) + continue; + + if (lun_reset) { + if (cmd->scsi_cmd && cmd->scsi_cmd->device) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n", + cmd->task_id, get_itt(ctask->itt), + cmd->scsi_cmd, cmd->scsi_cmd->device, + ctask->state, cmd->state, + qedi_conn->iscsi_conn_id); + if (cmd->scsi_cmd->device->lun != lun) + continue; + } + } + qedi_conn->cmd_cleanup_req++; + qedi_iscsi_cleanup_task(ctask, true); + + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + QEDI_WARN(&qedi->dbg_ctx, + "Deleted active cmd list node io_cmd=%p, cid=0x%x\n", + &cmd->io_cmd, qedi_conn->iscsi_conn_id); + } + + spin_unlock(&qedi_conn->list_lock); + + if (lun_reset) + spin_unlock_bh(&session->back_lock); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "cmd_cleanup_req=%d, cid=0x%x\n", + qedi_conn->cmd_cleanup_req, + qedi_conn->iscsi_conn_id); + + rval = wait_event_interruptible_timeout(qedi_conn->wait_queue, + (qedi_conn->cmd_cleanup_req == + atomic_read(&qedi_conn->cmd_cleanup_cmpl)) || + test_bit(QEDI_IN_RECOVERY, &qedi->flags), + 5 * HZ); + if (rval) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", + qedi_conn->cmd_cleanup_req, + atomic_read(&qedi_conn->cmd_cleanup_cmpl), + qedi_conn->iscsi_conn_id); + + return 0; + } + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", + qedi_conn->cmd_cleanup_req, + atomic_read(&qedi_conn->cmd_cleanup_cmpl), + qedi_conn->iscsi_conn_id); + + iscsi_host_for_each_session(qedi->shost, + qedi_mark_device_missing); + qedi_ops->common->drain(qedi->cdev); + + /* Enable IOs for all other sessions except current.*/ + if (!wait_event_interruptible_timeout(qedi_conn->wait_queue, + (qedi_conn->cmd_cleanup_req == + atomic_read(&qedi_conn->cmd_cleanup_cmpl)) || + test_bit(QEDI_IN_RECOVERY, &qedi->flags), + 5 * HZ)) { + iscsi_host_for_each_session(qedi->shost, + qedi_mark_device_available); + return -1; + } + + iscsi_host_for_each_session(qedi->shost, + qedi_mark_device_available); + + return 0; +} + +void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct qedi_endpoint *qedi_ep; + int rval; + + qedi_ep = qedi_conn->ep; + qedi_conn->cmd_cleanup_req = 0; + atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); + + if (!qedi_ep) { + QEDI_WARN(&qedi->dbg_ctx, + "Cannot proceed, ep already disconnected, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return; + } + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n", + qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep); + + qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle); + + rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true); + if (rval) { + QEDI_ERR(&qedi->dbg_ctx, + "fatal error, need hard reset, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + WARN_ON(1); + } +} + +static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn, + struct iscsi_task *task, + struct qedi_cmd *qedi_cmd, + struct qedi_work_map *list_work) +{ + struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data; + int wait; + + wait = wait_event_interruptible_timeout(qedi_conn->wait_queue, + ((qedi_cmd->state == + CLEANUP_RECV) || + ((qedi_cmd->type == TYPEIO) && + (cmd->state == + RESPONSE_RECEIVED))), + 5 * HZ); + if (!wait) { + qedi_cmd->state = CLEANUP_WAIT_FAILED; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + + return -1; + } + return 0; +} + +static void qedi_abort_work(struct work_struct *work) +{ + struct qedi_cmd *qedi_cmd = + container_of(work, struct qedi_cmd, tmf_work); + struct qedi_conn *qedi_conn = qedi_cmd->conn; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct qedi_work_map *list_work = NULL; + struct iscsi_task *mtask; + struct qedi_cmd *cmd; + struct iscsi_task *ctask; + struct iscsi_tm *tmf_hdr; + s16 rval = 0; + + mtask = qedi_cmd->task; + tmf_hdr = (struct iscsi_tm *)mtask->hdr; + + spin_lock_bh(&conn->session->back_lock); + ctask = iscsi_itt_to_ctask(conn, tmf_hdr->rtt); + if (!ctask) { + spin_unlock_bh(&conn->session->back_lock); + QEDI_ERR(&qedi->dbg_ctx, "Invalid RTT. Letting abort timeout.\n"); + goto clear_cleanup; + } + + if (iscsi_task_is_completed(ctask)) { + spin_unlock_bh(&conn->session->back_lock); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Task already completed\n"); + /* + * We have to still send the TMF because libiscsi needs the + * response to avoid a timeout. + */ + goto send_tmf; + } + spin_unlock_bh(&conn->session->back_lock); + + cmd = (struct qedi_cmd *)ctask->dd_data; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n", + get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id, + qedi_conn->iscsi_conn_id); + + if (qedi_do_not_recover) { + QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n", + qedi_do_not_recover); + goto clear_cleanup; + } + + list_work = kzalloc(sizeof(*list_work), GFP_NOIO); + if (!list_work) { + QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n"); + goto clear_cleanup; + } + + qedi_cmd->type = TYPEIO; + qedi_cmd->state = CLEANUP_WAIT; + list_work->qedi_cmd = qedi_cmd; + list_work->rtid = cmd->task_id; + list_work->state = QEDI_WORK_SCHEDULED; + list_work->ctask = ctask; + qedi_cmd->list_tmf_work = list_work; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n", + list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id, + tmf_hdr->flags); + + spin_lock_bh(&qedi_conn->tmf_work_lock); + list_add_tail(&list_work->list, &qedi_conn->tmf_work_list); + spin_unlock_bh(&qedi_conn->tmf_work_lock); + + qedi_iscsi_cleanup_task(ctask, false); + + rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd, + list_work); + if (rval == -1) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "FW cleanup got escalated, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + goto ldel_exit; + } + +send_tmf: + send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask); + goto clear_cleanup; + +ldel_exit: + spin_lock_bh(&qedi_conn->tmf_work_lock); + if (qedi_cmd->list_tmf_work) { + list_del_init(&list_work->list); + qedi_cmd->list_tmf_work = NULL; + kfree(list_work); + } + spin_unlock_bh(&qedi_conn->tmf_work_lock); + + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + spin_unlock(&qedi_conn->list_lock); + +clear_cleanup: + spin_lock(&qedi_conn->tmf_work_lock); + qedi_conn->fw_cleanup_works--; + spin_unlock(&qedi_conn->tmf_work_lock); +} + +static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask, + struct iscsi_task *ctask) +{ + struct iscsi_tmf_request_hdr tmf_pdu_header; + struct iscsi_task_params task_params; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_tm *tmf_hdr; + struct qedi_cmd *qedi_cmd; + struct qedi_cmd *cmd; + struct qedi_endpoint *ep; + u32 scsi_lun[2]; + s16 tid = 0; + u16 sq_idx = 0; + + tmf_hdr = (struct iscsi_tm *)mtask->hdr; + qedi_cmd = (struct qedi_cmd *)mtask->dd_data; + ep = qedi_conn->ep; + if (!ep) + return -ENODEV; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + qedi_cmd->task_id = tid; + + memset(&task_params, 0, sizeof(task_params)); + memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header)); + + /* Update header info */ + qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd); + tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt)); + tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn); + + memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun)); + tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); + tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); + + if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_ABORT_TASK) { + cmd = (struct qedi_cmd *)ctask->dd_data; + tmf_pdu_header.rtt = + qedi_set_itt(cmd->task_id, + get_itt(tmf_hdr->rtt)); + } else { + tmf_pdu_header.rtt = ISCSI_RESERVED_TAG; + } + + tmf_pdu_header.opcode = tmf_hdr->opcode; + tmf_pdu_header.function = tmf_hdr->flags; + tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength); + tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn); + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = 0; + task_params.rx_io_size = 0; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + init_initiator_tmf_request_task(&task_params, &tmf_pdu_header); + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_ring_doorbell(qedi_conn); + return 0; +} + +int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask) +{ + struct iscsi_tm *tmf_hdr = (struct iscsi_tm *)mtask->hdr; + struct qedi_cmd *qedi_cmd = mtask->dd_data; + struct qedi_ctx *qedi = qedi_conn->qedi; + int rc = 0; + + switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { + case ISCSI_TM_FUNC_ABORT_TASK: + spin_lock(&qedi_conn->tmf_work_lock); + qedi_conn->fw_cleanup_works++; + spin_unlock(&qedi_conn->tmf_work_lock); + + INIT_WORK(&qedi_cmd->tmf_work, qedi_abort_work); + queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); + break; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + rc = send_iscsi_tmf(qedi_conn, mtask, NULL); + break; + default: + QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return -EINVAL; + } + + return rc; +} + +int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct iscsi_text_request_hdr text_request_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; + struct iscsi_task_context *fw_task_ctx; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_text *text_hdr; + struct scsi_sge *req_sge = NULL; + struct scsi_sge *resp_sge = NULL; + struct qedi_cmd *qedi_cmd; + struct qedi_endpoint *ep; + s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; + + req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + qedi_cmd = (struct qedi_cmd *)task->dd_data; + text_hdr = (struct iscsi_text *)task->hdr; + ep = qedi_conn->ep; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + qedi_cmd->task_id = tid; + + memset(&task_params, 0, sizeof(task_params)); + memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + + /* Update header info */ + text_request_pdu_header.opcode = text_hdr->opcode; + text_request_pdu_header.flags_attr = text_hdr->flags; + + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + text_request_pdu_header.ttt = text_hdr->ttt; + text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn); + text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn); + text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength); + + /* Fill tx AHS and rx buffer */ + tx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.req_dma_addr); + tx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + tx_sgl_task_params.total_buffer_size = req_sge->sge_len; + tx_sgl_task_params.num_sges = 1; + + rx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.resp_dma_addr); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; + rx_sgl_task_params.num_sges = 1; + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = ntoh24(text_hdr->dlength); + task_params.rx_io_size = resp_sge->sge_len; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + rval = init_initiator_text_request_task(&task_params, + &text_request_pdu_header, + &tx_sgl_task_params, + &rx_sgl_task_params); + if (rval) + return -1; + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_ring_doorbell(qedi_conn); + return 0; +} + +int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, + struct iscsi_task *task, + char *datap, int data_len, int unsol) +{ + struct iscsi_nop_out_hdr nop_out_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_nopout *nopout_hdr; + struct scsi_sge *resp_sge = NULL; + struct qedi_cmd *qedi_cmd; + struct qedi_endpoint *ep; + u32 scsi_lun[2]; + s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; + + resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + qedi_cmd = (struct qedi_cmd *)task->dd_data; + nopout_hdr = (struct iscsi_nopout *)task->hdr; + ep = qedi_conn->ep; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + qedi_cmd->task_id = tid; + + memset(&task_params, 0, sizeof(task_params)); + memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + + /* Update header info */ + nop_out_pdu_header.opcode = nopout_hdr->opcode; + SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1); + SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0); + + memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun)); + nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); + nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); + nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); + nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn); + + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + + if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) { + nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt); + nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt); + } else { + nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES; + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + } + + /* Fill tx AHS and rx buffer */ + if (data_len) { + tx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.req_dma_addr); + tx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + tx_sgl_task_params.total_buffer_size = data_len; + tx_sgl_task_params.num_sges = 1; + + rx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.resp_dma_addr); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; + rx_sgl_task_params.num_sges = 1; + } + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = data_len; + task_params.rx_io_size = resp_sge->sge_len; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + rval = init_initiator_nop_out_task(&task_params, + &nop_out_pdu_header, + &tx_sgl_task_params, + &rx_sgl_task_params); + if (rval) + return -1; + + qedi_ring_doorbell(qedi_conn); + return 0; +} + +static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len, + int bd_index) +{ + struct scsi_sge *bd = cmd->io_tbl.sge_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + + while (sg_len) { + if (addr % QEDI_PAGE_SIZE) + frag_size = + (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE)); + else + frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 : + (sg_len % QEDI_BD_SPLIT_SZ); + + if (frag_size == 0) + frag_size = QEDI_BD_SPLIT_SZ; + + bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff); + bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32); + bd[bd_index + sg_frags].sge_len = (u16)frag_size; + QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO, + "split sge %d: addr=%llx, len=%x", + (bd_index + sg_frags), addr, frag_size); + + addr += (u64)frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; +} + +static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + struct scsi_sge *bd = cmd->io_tbl.sge_tbl; + struct scatterlist *sg; + int byte_count = 0; + int bd_count = 0; + int sg_count; + int sg_len; + int sg_frags; + u64 addr, end_addr; + int i; + + WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD); + + sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + + /* + * New condition to send single SGE as cached-SGL. + * Single SGE with length less than 64K. + */ + sg = scsi_sglist(sc); + if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + + bd[bd_count].sge_addr.lo = (addr & 0xffffffff); + bd[bd_count].sge_addr.hi = (addr >> 32); + bd[bd_count].sge_len = (u16)sg_len; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "single-cached-sgl: bd_count:%d addr=%llx, len=%x", + sg_count, addr, sg_len); + + return ++bd_count; + } + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + end_addr = (addr + sg_len); + + /* + * first sg elem in the 'list', + * check if end addr is page-aligned. + */ + if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE)) + cmd->use_slowpath = true; + + /* + * last sg elem in the 'list', + * check if start addr is page-aligned. + */ + else if ((i == (sg_count - 1)) && + (sg_count > 1) && (addr % QEDI_PAGE_SIZE)) + cmd->use_slowpath = true; + + /* + * middle sg elements in list, + * check if start and end addr is page-aligned + */ + else if ((i != 0) && (i != (sg_count - 1)) && + ((addr % QEDI_PAGE_SIZE) || + (end_addr % QEDI_PAGE_SIZE))) + cmd->use_slowpath = true; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x", + i, sg_len); + + if (sg_len > QEDI_BD_SPLIT_SZ) { + sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count); + } else { + sg_frags = 1; + bd[bd_count].sge_addr.lo = addr & 0xffffffff; + bd[bd_count].sge_addr.hi = addr >> 32; + bd[bd_count].sge_len = sg_len; + } + byte_count += sg_len; + bd_count += sg_frags; + } + + if (byte_count != scsi_bufflen(sc)) + QEDI_ERR(&qedi->dbg_ctx, + "byte_count = %d != scsi_bufflen = %d\n", byte_count, + scsi_bufflen(sc)); + else + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n", + byte_count); + + WARN_ON(byte_count != scsi_bufflen(sc)); + + return bd_count; +} + +static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd) +{ + int bd_count; + struct scsi_cmnd *sc = cmd->scsi_cmd; + + if (scsi_sg_count(sc)) { + bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd); + if (bd_count == 0) + return; + } else { + struct scsi_sge *bd = cmd->io_tbl.sge_tbl; + + bd[0].sge_addr.lo = 0; + bd[0].sge_addr.hi = 0; + bd[0].sge_len = 0; + bd_count = 0; + } + cmd->io_tbl.sge_valid = bd_count; +} + +static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp) +{ + u32 dword; + int lpcnt; + u8 *srcp; + + lpcnt = sc->cmd_len / sizeof(dword); + srcp = (u8 *)sc->cmnd; + while (lpcnt--) { + memcpy(&dword, (const void *)srcp, 4); + *dstp = cpu_to_be32(dword); + srcp += 4; + dstp++; + } + if (sc->cmd_len & 0x3) { + dword = (u32)srcp[0] | ((u32)srcp[1] << 8); + *dstp = cpu_to_be32(dword); + } +} + +void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, + u16 tid, int8_t direction) +{ + struct qedi_io_log *io_log; + struct iscsi_conn *conn = task->conn; + struct qedi_conn *qedi_conn = conn->dd_data; + struct scsi_cmnd *sc_cmd = task->sc; + unsigned long flags; + + spin_lock_irqsave(&qedi->io_trace_lock, flags); + + io_log = &qedi->io_trace_buf[qedi->io_trace_idx]; + io_log->direction = direction; + io_log->task_id = tid; + io_log->cid = qedi_conn->iscsi_conn_id; + io_log->lun = sc_cmd->device->lun; + io_log->op = sc_cmd->cmnd[0]; + io_log->lba[0] = sc_cmd->cmnd[2]; + io_log->lba[1] = sc_cmd->cmnd[3]; + io_log->lba[2] = sc_cmd->cmnd[4]; + io_log->lba[3] = sc_cmd->cmnd[5]; + io_log->bufflen = scsi_bufflen(sc_cmd); + io_log->sg_count = scsi_sg_count(sc_cmd); + io_log->fast_sgs = qedi->fast_sgls; + io_log->cached_sgs = qedi->cached_sgls; + io_log->slow_sgs = qedi->slow_sgls; + io_log->cached_sge = qedi->use_cached_sge; + io_log->slow_sge = qedi->use_slow_sge; + io_log->fast_sge = qedi->use_fast_sge; + io_log->result = sc_cmd->result; + io_log->jiffies = jiffies; + io_log->blk_req_cpu = smp_processor_id(); + + if (direction == QEDI_IO_TRACE_REQ) { + /* For requests we only care about the submission CPU */ + io_log->req_cpu = smp_processor_id() % qedi->num_queues; + io_log->intr_cpu = 0; + io_log->blk_rsp_cpu = 0; + } else if (direction == QEDI_IO_TRACE_RSP) { + io_log->req_cpu = smp_processor_id() % qedi->num_queues; + io_log->intr_cpu = qedi->intr_cpu; + io_log->blk_rsp_cpu = smp_processor_id(); + } + + qedi->io_trace_idx++; + if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE) + qedi->io_trace_idx = 0; + + qedi->use_cached_sge = false; + qedi->use_slow_sge = false; + qedi->use_fast_sge = false; + + spin_unlock_irqrestore(&qedi->io_trace_lock, flags); +} + +int qedi_iscsi_send_ioreq(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + struct scsi_cmnd *sc = task->sc; + struct iscsi_cmd_hdr cmd_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct scsi_sgl_task_params *prx_sgl = NULL; + struct scsi_sgl_task_params *ptx_sgl = NULL; + struct iscsi_task_params task_params; + struct iscsi_conn_params conn_params; + struct scsi_initiator_cmd_params cmd_params; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_cls_conn *cls_conn; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; + enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE; + struct qedi_endpoint *ep; + u32 scsi_lun[2]; + s16 tid = 0; + u16 sq_idx = 0; + u16 cq_idx; + int rval = 0; + + ep = qedi_conn->ep; + cls_conn = qedi_conn->cls_conn; + conn = cls_conn->dd_data; + + qedi_iscsi_map_sg_list(cmd); + int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun); + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + cmd->task_id = tid; + + memset(&task_params, 0, sizeof(task_params)); + memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + memset(&conn_params, 0, sizeof(conn_params)); + memset(&cmd_params, 0, sizeof(cmd_params)); + + cq_idx = smp_processor_id() % qedi->num_queues; + /* Update header info */ + SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR, + ISCSI_ATTR_SIMPLE); + if (hdr->cdb[0] != TEST_UNIT_READY) { + if (sc->sc_data_direction == DMA_TO_DEVICE) { + SET_FIELD(cmd_pdu_header.flags_attr, + ISCSI_CMD_HDR_WRITE, 1); + task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; + } else { + SET_FIELD(cmd_pdu_header.flags_attr, + ISCSI_CMD_HDR_READ, 1); + task_type = ISCSI_TASK_TYPE_INITIATOR_READ; + } + } + + cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); + cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); + + qedi_update_itt_map(qedi, tid, task->itt, cmd); + cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length); + cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength); + cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd_pdu_header.hdr_first_byte = hdr->opcode; + qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb); + + /* Fill tx AHS and rx buffer */ + if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { + tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(cmd->io_tbl.sge_tbl_dma); + tx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); + tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); + tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; + if (cmd->use_slowpath) + tx_sgl_task_params.small_mid_sge = true; + } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) { + rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(cmd->io_tbl.sge_tbl_dma); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); + rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); + rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; + } + + /* Add conn param */ + conn_params.first_burst_length = conn->session->first_burst; + conn_params.max_send_pdu_length = conn->max_xmit_dlength; + conn_params.max_burst_length = conn->session->max_burst; + if (conn->session->initial_r2t_en) + conn_params.initial_r2t = true; + if (conn->session->imm_data_en) + conn_params.immediate_data = true; + + /* Add cmd params */ + cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma; + cmd_params.sense_data_buffer_phys_addr.hi = + (u32)((u64)cmd->sense_buffer_dma >> 32); + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = cq_idx; + if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) + task_params.tx_io_size = scsi_bufflen(sc); + else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) + task_params.rx_io_size = scsi_bufflen(sc); + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n", + (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ? + "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ? + "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"), + (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc), + (u32)(cmd->io_tbl.sge_tbl_dma), + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32)); + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + + if (task_params.tx_io_size != 0) + ptx_sgl = &tx_sgl_task_params; + if (task_params.rx_io_size != 0) + prx_sgl = &rx_sgl_task_params; + + rval = init_initiator_rw_iscsi_task(&task_params, &conn_params, + &cmd_params, &cmd_pdu_header, + ptx_sgl, prx_sgl, + NULL); + if (rval) + return -1; + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list); + cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_ring_doorbell(qedi_conn); + return 0; +} + +int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted) +{ + struct iscsi_task_params task_params; + struct qedi_endpoint *ep; + struct iscsi_conn *conn = task->conn; + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + u16 sq_idx = 0; + int rval = 0; + + QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n", + cmd->task_id, get_itt(task->itt), task->state, + cmd->state, qedi_conn->iscsi_conn_id); + + memset(&task_params, 0, sizeof(task_params)); + ep = qedi_conn->ep; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + + task_params.sqe = &ep->sq[sq_idx]; + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + task_params.itid = cmd->task_id; + + rval = init_cleanup_task(&task_params); + if (rval) + return rval; + + qedi_ring_doorbell(qedi_conn); + return 0; +} diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c new file mode 100644 index 000000000..642556a1c --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw_api.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#include +#include +#include "qedi_hsi.h" +#include + +#include "qedi_fw_iscsi.h" +#include "qedi_fw_scsi.h" + +#define SCSI_NUM_SGES_IN_CACHE 0x4 + +static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge) +{ + return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); +} + +static +void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, + struct scsi_cached_sges *ctx_data_desc, + struct scsi_sgl_task_params *sgl_task_params) +{ + u8 sge_index; + u8 num_sges; + u32 val; + + num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ? + SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges; + + /* sgl params */ + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo); + ctx_sgl_params->sgl_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi); + ctx_sgl_params->sgl_addr.hi = val; + val = cpu_to_le32(sgl_task_params->total_buffer_size); + ctx_sgl_params->sgl_total_length = val; + ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges); + + for (sge_index = 0; sge_index < num_sges; sge_index++) { + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo); + ctx_data_desc->sge[sge_index].sge_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi); + ctx_data_desc->sge[sge_index].sge_addr.hi = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len); + ctx_data_desc->sge[sge_index].sge_len = val; + } +} + +static u32 calc_rw_task_size(struct iscsi_task_params *task_params, + enum iscsi_task_type task_type, + struct scsi_sgl_task_params *sgl_task_params, + struct scsi_dif_task_params *dif_task_params) +{ + u32 io_size; + + if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE || + task_type == ISCSI_TASK_TYPE_TARGET_READ) + io_size = task_params->tx_io_size; + else + io_size = task_params->rx_io_size; + + if (!io_size) + return 0; + + if (!dif_task_params) + return io_size; + + return !dif_task_params->dif_on_network ? + io_size : sgl_task_params->total_buffer_size; +} + +static void +init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags, + struct scsi_dif_task_params *dif_task_params) +{ + if (!dif_task_params) + return; + + SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG, + dif_task_params->dif_block_size_log); + SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER, + dif_task_params->dif_on_network ? 1 : 0); + SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE, + dif_task_params->dif_on_host ? 1 : 0); +} + +static void init_sqe(struct iscsi_task_params *task_params, + struct scsi_sgl_task_params *sgl_task_params, + struct scsi_dif_task_params *dif_task_params, + struct iscsi_common_hdr *pdu_header, + struct scsi_initiator_cmd_params *cmd_params, + enum iscsi_task_type task_type, + bool is_cleanup) +{ + if (!task_params->sqe) + return; + + memset(task_params->sqe, 0, sizeof(*task_params->sqe)); + task_params->sqe->task_id = cpu_to_le16(task_params->itid); + if (is_cleanup) { + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_TASK_CLEANUP); + return; + } + + switch (task_type) { + case ISCSI_TASK_TYPE_INITIATOR_WRITE: + { + u32 buf_size = 0; + u32 num_sges = 0; + + init_dif_context_flags(&task_params->sqe->prot_flags, + dif_task_params); + + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_NORMAL); + + if (task_params->tx_io_size) { + buf_size = calc_rw_task_size(task_params, task_type, + sgl_task_params, + dif_task_params); + + if (scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge)) + num_sges = ISCSI_WQE_NUM_SGES_SLOWIO; + else + num_sges = min(sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR); + } + + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, + num_sges); + SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, + buf_size); + + if (GET_FIELD(pdu_header->hdr_second_dword, + ISCSI_CMD_HDR_TOTAL_AHS_LEN)) + SET_FIELD(task_params->sqe->contlen_cdbsize, + ISCSI_WQE_CDB_SIZE, + cmd_params->extended_cdb_sge.sge_len); + } + break; + case ISCSI_TASK_TYPE_INITIATOR_READ: + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_NORMAL); + + if (GET_FIELD(pdu_header->hdr_second_dword, + ISCSI_CMD_HDR_TOTAL_AHS_LEN)) + SET_FIELD(task_params->sqe->contlen_cdbsize, + ISCSI_WQE_CDB_SIZE, + cmd_params->extended_cdb_sge.sge_len); + break; + case ISCSI_TASK_TYPE_LOGIN_RESPONSE: + case ISCSI_TASK_TYPE_MIDPATH: + { + bool advance_statsn = true; + + if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE) + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_LOGIN); + else + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_MIDDLE_PATH); + + if (task_type == ISCSI_TASK_TYPE_MIDPATH) { + u8 opcode = GET_FIELD(pdu_header->hdr_first_byte, + ISCSI_COMMON_HDR_OPCODE); + + if (opcode != ISCSI_OPCODE_TEXT_RESPONSE && + (opcode != ISCSI_OPCODE_NOP_IN || + pdu_header->itt == ISCSI_TTT_ALL_ONES)) + advance_statsn = false; + } + + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE, + advance_statsn ? 1 : 0); + + if (task_params->tx_io_size) { + SET_FIELD(task_params->sqe->contlen_cdbsize, + ISCSI_WQE_CONT_LEN, task_params->tx_io_size); + + if (scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge)) + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, + ISCSI_WQE_NUM_SGES_SLOWIO); + else + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, + min(sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR)); + } + } + break; + default: + break; + } +} + +static void init_default_iscsi_task(struct iscsi_task_params *task_params, + struct data_hdr *pdu_header, + enum iscsi_task_type task_type) +{ + struct iscsi_task_context *context; + u32 val; + u16 index; + u8 val_byte; + + context = task_params->context; + val_byte = context->mstorm_ag_context.cdu_validation; + memset(context, 0, sizeof(*context)); + context->mstorm_ag_context.cdu_validation = val_byte; + + for (index = 0; index < + ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data); + index++) { + val = cpu_to_le32(pdu_header->data[index]); + context->ystorm_st_context.pdu_hdr.data.data[index] = val; + } + + context->mstorm_st_context.task_type = task_type; + context->mstorm_ag_context.task_cid = + cpu_to_le16(task_params->conn_icid); + + SET_FIELD(context->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + + context->ustorm_st_context.task_type = task_type; + context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; + context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid); +} + +static +void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc, + struct scsi_initiator_cmd_params *cmd) +{ + union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr; + u32 val; + + if (!cmd->extended_cdb_sge.sge_len) + return; + + SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword, + ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE, + cmd->extended_cdb_sge.sge_len); + val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo); + ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val; + val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi); + ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val; + val = cpu_to_le32(cmd->extended_cdb_sge.sge_len); + ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val; +} + +static +void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, + struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, + u32 remaining_recv_len, u32 expected_data_transfer_len, + u8 num_sges, bool tx_dif_conn_err_en) +{ + u32 val; + + ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len); + ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len); + val = cpu_to_le32(expected_data_transfer_len); + ustorm_st_cxt->exp_data_transfer_len = val; + SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges); + SET_FIELD(ustorm_ag_cxt->flags2, + USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, + tx_dif_conn_err_en ? 1 : 0); +} + +static +void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context, + struct iscsi_conn_params *conn_params, + enum iscsi_task_type task_type, + u32 task_size, + u32 exp_data_transfer_len, + u8 total_ahs_length) +{ + u32 max_unsolicited_data = 0, val; + + if (total_ahs_length && + (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE || + task_type == ISCSI_TASK_TYPE_INITIATOR_READ)) + SET_FIELD(context->ustorm_st_context.flags2, + USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1); + + switch (task_type) { + case ISCSI_TASK_TYPE_INITIATOR_WRITE: + if (!conn_params->initial_r2t) + max_unsolicited_data = conn_params->first_burst_length; + else if (conn_params->immediate_data) + max_unsolicited_data = + min(conn_params->first_burst_length, + conn_params->max_send_pdu_length); + + context->ustorm_ag_context.exp_data_acked = + cpu_to_le32(total_ahs_length == 0 ? + min(exp_data_transfer_len, + max_unsolicited_data) : + ((u32)(total_ahs_length + + ISCSI_AHS_CNTL_SIZE))); + break; + case ISCSI_TASK_TYPE_TARGET_READ: + val = cpu_to_le32(exp_data_transfer_len); + context->ustorm_ag_context.exp_data_acked = val; + break; + case ISCSI_TASK_TYPE_INITIATOR_READ: + context->ustorm_ag_context.exp_data_acked = + cpu_to_le32((total_ahs_length == 0 ? 0 : + total_ahs_length + + ISCSI_AHS_CNTL_SIZE)); + break; + case ISCSI_TASK_TYPE_TARGET_WRITE: + val = cpu_to_le32(task_size); + context->ustorm_ag_context.exp_cont_len = val; + break; + default: + break; + } +} + +static +void init_rtdif_task_context(struct rdif_task_context *rdif_context, + struct tdif_task_context *tdif_context, + struct scsi_dif_task_params *dif_task_params, + enum iscsi_task_type task_type) +{ + u32 val; + + if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host) + return; + + if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE || + task_type == ISCSI_TASK_TYPE_INITIATOR_READ) { + rdif_context->app_tag_value = + cpu_to_le16(dif_task_params->application_tag); + rdif_context->partial_crc_value = cpu_to_le16(0xffff); + val = cpu_to_le32(dif_task_params->initial_ref_tag); + rdif_context->initial_ref_tag = val; + rdif_context->app_tag_mask = + cpu_to_le16(dif_task_params->application_tag_mask); + SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED, + dif_task_params->crc_seed ? 1 : 0); + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_HOST_GUARD_TYPE, + dif_task_params->host_guard_type); + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_PROTECTION_TYPE, + dif_task_params->protection_type); + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1); + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST, + dif_task_params->keep_ref_tag_const ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_VALIDATE_APP_TAG, + (dif_task_params->validate_app_tag && + dif_task_params->dif_on_network) ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_VALIDATE_GUARD, + (dif_task_params->validate_guard && + dif_task_params->dif_on_network) ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_VALIDATE_REF_TAG, + (dif_task_params->validate_ref_tag && + dif_task_params->dif_on_network) ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_HOST_INTERFACE, + dif_task_params->dif_on_host ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_NETWORK_INTERFACE, + dif_task_params->dif_on_network ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARD_GUARD, + dif_task_params->forward_guard ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARD_APP_TAG, + dif_task_params->forward_app_tag ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARD_REF_TAG, + dif_task_params->forward_ref_tag ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK, + dif_task_params->forward_app_tag_with_mask ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK, + dif_task_params->forward_ref_tag_with_mask ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_INTERVAL_SIZE, + dif_task_params->dif_block_size_log - 9); + SET_FIELD(rdif_context->state, + RDIF_TASK_CONTEXT_REF_TAG_MASK, + dif_task_params->ref_tag_mask); + SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG, + dif_task_params->ignore_app_tag); + } + + if (task_type == ISCSI_TASK_TYPE_TARGET_READ || + task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { + tdif_context->app_tag_value = + cpu_to_le16(dif_task_params->application_tag); + tdif_context->partial_crc_value_b = + cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); + tdif_context->partial_crc_value_a = + cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); + SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED, + dif_task_params->crc_seed ? 1 : 0); + + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP, + dif_task_params->tx_dif_conn_err_en ? 1 : 0); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD, + dif_task_params->forward_guard ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARD_APP_TAG, + dif_task_params->forward_app_tag ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARD_REF_TAG, + dif_task_params->forward_ref_tag ? 1 : 0); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE, + dif_task_params->dif_block_size_log - 9); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_HOST_INTERFACE, + dif_task_params->dif_on_host ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_NETWORK_INTERFACE, + dif_task_params->dif_on_network ? 1 : 0); + val = cpu_to_le32(dif_task_params->initial_ref_tag); + tdif_context->initial_ref_tag = val; + tdif_context->app_tag_mask = + cpu_to_le16(dif_task_params->application_tag_mask); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_HOST_GUARD_TYPE, + dif_task_params->host_guard_type); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_PROTECTION_TYPE, + dif_task_params->protection_type); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, + dif_task_params->initial_ref_tag_is_valid ? 1 : 0); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST, + dif_task_params->keep_ref_tag_const ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_VALIDATE_GUARD, + (dif_task_params->validate_guard && + dif_task_params->dif_on_host) ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_VALIDATE_APP_TAG, + (dif_task_params->validate_app_tag && + dif_task_params->dif_on_host) ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_VALIDATE_REF_TAG, + (dif_task_params->validate_ref_tag && + dif_task_params->dif_on_host) ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK, + dif_task_params->forward_app_tag_with_mask ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK, + dif_task_params->forward_ref_tag_with_mask ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_REF_TAG_MASK, + dif_task_params->ref_tag_mask); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_IGNORE_APP_TAG, + dif_task_params->ignore_app_tag ? 1 : 0); + } +} + +static void set_local_completion_context(struct iscsi_task_context *context) +{ + SET_FIELD(context->ystorm_st_context.state.flags, + YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1); + SET_FIELD(context->ustorm_st_context.flags, + USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1); +} + +static int init_rw_iscsi_task(struct iscsi_task_params *task_params, + enum iscsi_task_type task_type, + struct iscsi_conn_params *conn_params, + struct iscsi_common_hdr *pdu_header, + struct scsi_sgl_task_params *sgl_task_params, + struct scsi_initiator_cmd_params *cmd_params, + struct scsi_dif_task_params *dif_task_params) +{ + u32 exp_data_transfer_len = conn_params->max_burst_length; + struct iscsi_task_context *cxt; + bool slow_io = false; + u32 task_size, val; + u8 num_sges = 0; + + task_size = calc_rw_task_size(task_params, task_type, sgl_task_params, + dif_task_params); + + init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header, + task_type); + + cxt = task_params->context; + + + if (task_type == ISCSI_TASK_TYPE_TARGET_READ) { + set_local_completion_context(cxt); + } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) { + val = cpu_to_le32(task_size + + ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset); + cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val; + cxt->mstorm_st_context.expected_itt = + cpu_to_le32(pdu_header->itt); + } else { + val = cpu_to_le32(task_size); + cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = + val; + init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context, + cmd_params); + val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo); + cxt->mstorm_st_context.sense_db.lo = val; + + val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi); + cxt->mstorm_st_context.sense_db.hi = val; + } + + if (task_params->tx_io_size) { + init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags, + dif_task_params); + init_dif_context_flags(&cxt->ustorm_st_context.dif_flags, + dif_task_params); + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + sgl_task_params); + + slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge); + + num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR) : + ISCSI_WQE_NUM_SGES_SLOWIO; + + if (slow_io) { + SET_FIELD(cxt->ystorm_st_context.state.flags, + YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1); + } + } else if (task_params->rx_io_size) { + init_dif_context_flags(&cxt->mstorm_st_context.dif_flags, + dif_task_params); + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + sgl_task_params); + num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge) ? + min_t(u16, sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR) : + ISCSI_WQE_NUM_SGES_SLOWIO; + cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size); + } + + if (exp_data_transfer_len > task_size || + task_type != ISCSI_TASK_TYPE_TARGET_WRITE) + exp_data_transfer_len = task_size; + + init_ustorm_task_contexts(&task_params->context->ustorm_st_context, + &task_params->context->ustorm_ag_context, + task_size, exp_data_transfer_len, num_sges, + dif_task_params ? + dif_task_params->tx_dif_conn_err_en : false); + + set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params, + task_type, task_size, + exp_data_transfer_len, + GET_FIELD(pdu_header->hdr_second_dword, + ISCSI_CMD_HDR_TOTAL_AHS_LEN)); + + if (dif_task_params) + init_rtdif_task_context(&task_params->context->rdif_context, + &task_params->context->tdif_context, + dif_task_params, task_type); + + init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header, + cmd_params, task_type, false); + + return 0; +} + +int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params, + struct iscsi_conn_params *conn_params, + struct scsi_initiator_cmd_params *cmd_params, + struct iscsi_cmd_hdr *cmd_header, + struct scsi_sgl_task_params *tx_sgl_params, + struct scsi_sgl_task_params *rx_sgl_params, + struct scsi_dif_task_params *dif_task_params) +{ + if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE)) + return init_rw_iscsi_task(task_params, + ISCSI_TASK_TYPE_INITIATOR_WRITE, + conn_params, + (struct iscsi_common_hdr *)cmd_header, + tx_sgl_params, cmd_params, + dif_task_params); + else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) || + (task_params->rx_io_size == 0 && task_params->tx_io_size == 0)) + return init_rw_iscsi_task(task_params, + ISCSI_TASK_TYPE_INITIATOR_READ, + conn_params, + (struct iscsi_common_hdr *)cmd_header, + rx_sgl_params, cmd_params, + dif_task_params); + else + return -1; +} + +int init_initiator_login_request_task(struct iscsi_task_params *task_params, + struct iscsi_login_req_hdr *login_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)login_header, + ISCSI_TASK_TYPE_MIDPATH); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_params->total_buffer_size : 0, 0, + 0); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_params); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_params->total_buffer_size : 0); + + init_sqe(task_params, tx_params, NULL, + (struct iscsi_common_hdr *)login_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_nop_out_task(struct iscsi_task_params *task_params, + struct iscsi_nop_out_hdr *nop_out_pdu_header, + struct scsi_sgl_task_params *tx_sgl_task_params, + struct scsi_sgl_task_params *rx_sgl_task_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)nop_out_pdu_header, + ISCSI_TASK_TYPE_MIDPATH); + + if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES) + set_local_completion_context(task_params->context); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_sgl_task_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_sgl_task_params); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_sgl_task_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_sgl_task_params->total_buffer_size : 0, + 0, 0); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_sgl_task_params->total_buffer_size : + 0); + + init_sqe(task_params, tx_sgl_task_params, NULL, + (struct iscsi_common_hdr *)nop_out_pdu_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_logout_request_task(struct iscsi_task_params *task_params, + struct iscsi_logout_req_hdr *logout_hdr, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)logout_hdr, + ISCSI_TASK_TYPE_MIDPATH); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_params); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_params->total_buffer_size : 0, + 0, 0); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_params->total_buffer_size : 0); + + init_sqe(task_params, tx_params, NULL, + (struct iscsi_common_hdr *)logout_hdr, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_tmf_request_task(struct iscsi_task_params *task_params, + struct iscsi_tmf_request_hdr *tmf_header) +{ + init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header, + ISCSI_TASK_TYPE_MIDPATH); + + init_sqe(task_params, NULL, NULL, + (struct iscsi_common_hdr *)tmf_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_text_request_task(struct iscsi_task_params *task_params, + struct iscsi_text_request_hdr *text_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)text_header, + ISCSI_TASK_TYPE_MIDPATH); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_params); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_params->total_buffer_size : 0); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_params->total_buffer_size : 0, 0, 0); + + init_sqe(task_params, tx_params, NULL, + (struct iscsi_common_hdr *)text_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_cleanup_task(struct iscsi_task_params *task_params) +{ + init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH, + true); + return 0; +} diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h new file mode 100644 index 000000000..df2d471a7 --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw_iscsi.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#ifndef _QEDI_FW_ISCSI_H_ +#define _QEDI_FW_ISCSI_H_ + +#include "qedi_fw_scsi.h" + +struct iscsi_task_params { + struct iscsi_task_context *context; + struct iscsi_wqe *sqe; + u32 tx_io_size; + u32 rx_io_size; + u16 conn_icid; + u16 itid; + u8 cq_rss_number; +}; + +struct iscsi_conn_params { + u32 first_burst_length; + u32 max_send_pdu_length; + u32 max_burst_length; + bool initial_r2t; + bool immediate_data; +}; + +/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read + * task context. + * + * @param task_params - Pointer to task parameters struct + * @param conn_params - Connection Parameters + * @param cmd_params - command specific parameters + * @param cmd_pdu_header - PDU Header Parameters + * @param sgl_task_params - Pointer to SGL task params + * @param dif_task_params - Pointer to DIF parameters struct + */ +int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params, + struct iscsi_conn_params *conn_params, + struct scsi_initiator_cmd_params *cmd_params, + struct iscsi_cmd_hdr *cmd_pdu_header, + struct scsi_sgl_task_params *tx_sgl_params, + struct scsi_sgl_task_params *rx_sgl_params, + struct scsi_dif_task_params *dif_task_params); + +/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login + * Request task context. + * + * @param task_params - Pointer to task parameters struct + * @param login_req_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to SGL task params + * @param rx_sgl_task_params - Pointer to SGL task params + */ +int init_initiator_login_request_task(struct iscsi_task_params *task_params, + struct iscsi_login_req_hdr *login_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params); + +/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out + * task context. + * + * @param task_params - Pointer to task parameters struct + * @param nop_out_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to SGL task params + * @param rx_sgl_task_params - Pointer to SGL task params + */ +int init_initiator_nop_out_task(struct iscsi_task_params *task_params, + struct iscsi_nop_out_hdr *nop_out_pdu_header, + struct scsi_sgl_task_params *tx_sgl_params, + struct scsi_sgl_task_params *rx_sgl_params); + +/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator + * Logout Request task context. + * + * @param task_params - Pointer to task parameters struct + * @param logout_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to SGL task params + * @param rx_sgl_task_params - Pointer to SGL task params + */ +int init_initiator_logout_request_task(struct iscsi_task_params *task_params, + struct iscsi_logout_req_hdr *logout_hdr, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params); + +/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF + * task context. + * + * @param task_params - Pointer to task parameters struct + * @param tmf_pdu_header - PDU Header Parameters + */ +int init_initiator_tmf_request_task(struct iscsi_task_params *task_params, + struct iscsi_tmf_request_hdr *tmf_header); + +/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text + * Request task context. + * + * @param task_params - Pointer to task parameters struct + * @param text_request_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to Tx SGL task params + * @param rx_sgl_task_params - Pointer to Rx SGL task params + */ +int init_initiator_text_request_task(struct iscsi_task_params *task_params, + struct iscsi_text_request_hdr *text_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params); + +/* @brief init_cleanup_task - initializes Clean task (SQE) + * + * @param task_params - Pointer to task parameters struct + */ +int init_cleanup_task(struct iscsi_task_params *task_params); +#endif diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h new file mode 100644 index 000000000..2524f9f3c --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw_scsi.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#ifndef _QEDI_FW_SCSI_H_ +#define _QEDI_FW_SCSI_H_ + +#include +#include +#include "qedi_hsi.h" +#include + +struct scsi_sgl_task_params { + struct scsi_sge *sgl; + struct regpair sgl_phys_addr; + u32 total_buffer_size; + u16 num_sges; + bool small_mid_sge; +}; + +struct scsi_dif_task_params { + u32 initial_ref_tag; + bool initial_ref_tag_is_valid; + u16 application_tag; + u16 application_tag_mask; + u16 dif_block_size_log; + bool dif_on_network; + bool dif_on_host; + u8 host_guard_type; + u8 protection_type; + u8 ref_tag_mask; + bool crc_seed; + bool tx_dif_conn_err_en; + bool ignore_app_tag; + bool keep_ref_tag_const; + bool validate_guard; + bool validate_app_tag; + bool validate_ref_tag; + bool forward_guard; + bool forward_app_tag; + bool forward_ref_tag; + bool forward_app_tag_with_mask; + bool forward_ref_tag_with_mask; +}; + +struct scsi_initiator_cmd_params { + struct scsi_sge extended_cdb_sge; + struct regpair sense_data_buffer_phys_addr; +}; +#endif diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h new file mode 100644 index 000000000..772218445 --- /dev/null +++ b/drivers/scsi/qedi/qedi_gbl.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#ifndef _QEDI_GBL_H_ +#define _QEDI_GBL_H_ + +#include "qedi_iscsi.h" + +#ifdef CONFIG_DEBUG_FS +extern int qedi_do_not_recover; +#else +#define qedi_do_not_recover (0) +#endif + +extern uint qedi_io_tracing; + +extern const struct scsi_host_template qedi_host_template; +extern struct iscsi_transport qedi_iscsi_transport; +extern const struct qed_iscsi_ops *qedi_ops; +extern const struct qedi_debugfs_ops qedi_debugfs_ops[]; +extern const struct file_operations qedi_dbg_fops[]; +extern const struct attribute_group *qedi_shost_groups[]; + +int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep); +void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep); + +int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, + struct iscsi_task *task); +int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, + struct iscsi_task *task); +int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask); +int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, + struct iscsi_task *task); +int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, + struct iscsi_task *task, + char *datap, int data_len, int unsol); +int qedi_iscsi_send_ioreq(struct iscsi_task *task); +int qedi_get_task_idx(struct qedi_ctx *qedi); +void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx); +int qedi_iscsi_cleanup_task(struct iscsi_task *task, + bool mark_cmd_node_deleted); +void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd); +void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, + struct qedi_cmd *qedi_cmd); +void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt); +void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid); +void qedi_process_iscsi_error(struct qedi_endpoint *ep, + struct iscsi_eqe_data *data); +void qedi_start_conn_recovery(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn); +struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid); +void qedi_process_tcp_error(struct qedi_endpoint *ep, + struct iscsi_eqe_data *data); +void qedi_mark_device_missing(struct iscsi_cls_session *cls_session); +void qedi_mark_device_available(struct iscsi_cls_session *cls_session); +void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu); +int qedi_recover_all_conns(struct qedi_ctx *qedi); +void qedi_fp_process_cqes(struct qedi_work *work); +int qedi_cleanup_all_io(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn, + struct iscsi_task *task, bool in_recovery); +void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, + u16 tid, int8_t direction); +int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id); +u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl); +void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id); +void qedi_clearsq(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn, + struct iscsi_task *task); + +#endif diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h new file mode 100644 index 000000000..d82ab99ac --- /dev/null +++ b/drivers/scsi/qedi/qedi_hsi.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ +#ifndef __QEDI_HSI__ +#define __QEDI_HSI__ +/* + * Add include to common target + */ +#include + +/* + * Add include to common storage target + */ +#include + +/* + * Add include to common TCP target + */ +#include + +/* + * Add include to common iSCSI target for both eCore and protocol driver + */ +#include + +/* + * iSCSI CMDQ element + */ +struct iscsi_cmdqe { + __le16 conn_id; + u8 invalid_command; + u8 cmd_hdr_type; + __le32 reserved1[2]; + __le32 cmd_payload[13]; +}; + +/* + * iSCSI CMD header type + */ +enum iscsi_cmd_hdr_type { + ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */, + ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */, + ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */, + MAX_ISCSI_CMD_HDR_TYPE +}; + +#endif /* __QEDI_HSI__ */ diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c new file mode 100644 index 000000000..6ed8ef976 --- /dev/null +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -0,0 +1,1710 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#include +#include +#include +#include +#include + +#include "qedi.h" +#include "qedi_iscsi.h" +#include "qedi_gbl.h" + +int qedi_recover_all_conns(struct qedi_ctx *qedi) +{ + struct qedi_conn *qedi_conn; + int i; + + for (i = 0; i < qedi->max_active_conns; i++) { + qedi_conn = qedi_get_conn_from_id(qedi, i); + if (!qedi_conn) + continue; + + qedi_start_conn_recovery(qedi, qedi_conn); + } + + return SUCCESS; +} + +static int qedi_eh_host_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *shost = cmd->device->host; + struct qedi_ctx *qedi; + + qedi = iscsi_host_priv(shost); + + return qedi_recover_all_conns(qedi); +} + +const struct scsi_host_template qedi_host_template = { + .module = THIS_MODULE, + .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver", + .proc_name = QEDI_MODULE_NAME, + .queuecommand = iscsi_queuecommand, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .eh_host_reset_handler = qedi_eh_host_reset, + .target_alloc = iscsi_target_alloc, + .change_queue_depth = scsi_change_queue_depth, + .can_queue = QEDI_MAX_ISCSI_TASK, + .this_id = -1, + .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, + .max_sectors = 0xffff, + .dma_boundary = QEDI_HW_DMA_BOUNDARY, + .cmd_per_lun = 128, + .shost_groups = qedi_shost_groups, + .cmd_size = sizeof(struct iscsi_cmd), +}; + +static void qedi_conn_free_login_resources(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + if (qedi_conn->gen_pdu.resp_bd_tbl) { + dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + qedi_conn->gen_pdu.resp_bd_tbl, + qedi_conn->gen_pdu.resp_bd_dma); + qedi_conn->gen_pdu.resp_bd_tbl = NULL; + } + + if (qedi_conn->gen_pdu.req_bd_tbl) { + dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + qedi_conn->gen_pdu.req_bd_tbl, + qedi_conn->gen_pdu.req_bd_dma); + qedi_conn->gen_pdu.req_bd_tbl = NULL; + } + + if (qedi_conn->gen_pdu.resp_buf) { + dma_free_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.resp_buf, + qedi_conn->gen_pdu.resp_dma_addr); + qedi_conn->gen_pdu.resp_buf = NULL; + } + + if (qedi_conn->gen_pdu.req_buf) { + dma_free_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.req_buf, + qedi_conn->gen_pdu.req_dma_addr); + qedi_conn->gen_pdu.req_buf = NULL; + } +} + +static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + qedi_conn->gen_pdu.req_buf = + dma_alloc_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &qedi_conn->gen_pdu.req_dma_addr, + GFP_KERNEL); + if (!qedi_conn->gen_pdu.req_buf) + goto login_req_buf_failure; + + qedi_conn->gen_pdu.req_buf_size = 0; + qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf; + + qedi_conn->gen_pdu.resp_buf = + dma_alloc_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &qedi_conn->gen_pdu.resp_dma_addr, + GFP_KERNEL); + if (!qedi_conn->gen_pdu.resp_buf) + goto login_resp_buf_failure; + + qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf; + + qedi_conn->gen_pdu.req_bd_tbl = + dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL); + if (!qedi_conn->gen_pdu.req_bd_tbl) + goto login_req_bd_tbl_failure; + + qedi_conn->gen_pdu.resp_bd_tbl = + dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + &qedi_conn->gen_pdu.resp_bd_dma, + GFP_KERNEL); + if (!qedi_conn->gen_pdu.resp_bd_tbl) + goto login_resp_bd_tbl_failure; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS, + "Allocation successful, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return 0; + +login_resp_bd_tbl_failure: + dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + qedi_conn->gen_pdu.req_bd_tbl, + qedi_conn->gen_pdu.req_bd_dma); + qedi_conn->gen_pdu.req_bd_tbl = NULL; + +login_req_bd_tbl_failure: + dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.resp_buf, + qedi_conn->gen_pdu.resp_dma_addr); + qedi_conn->gen_pdu.resp_buf = NULL; +login_resp_buf_failure: + dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.req_buf, + qedi_conn->gen_pdu.req_dma_addr); + qedi_conn->gen_pdu.req_buf = NULL; +login_req_buf_failure: + iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data, + "login resource alloc failed!!\n"); + return -ENOMEM; +} + +static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct qedi_cmd *cmd = task->dd_data; + + if (cmd->io_tbl.sge_tbl) + dma_free_coherent(&qedi->pdev->dev, + QEDI_ISCSI_MAX_BDS_PER_CMD * + sizeof(struct scsi_sge), + cmd->io_tbl.sge_tbl, + cmd->io_tbl.sge_tbl_dma); + + if (cmd->sense_buffer) + dma_free_coherent(&qedi->pdev->dev, + SCSI_SENSE_BUFFERSIZE, + cmd->sense_buffer, + cmd->sense_buffer_dma); + } +} + +static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session, + struct qedi_cmd *cmd) +{ + struct qedi_io_bdt *io = &cmd->io_tbl; + struct scsi_sge *sge; + + io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev, + QEDI_ISCSI_MAX_BDS_PER_CMD * + sizeof(*sge), + &io->sge_tbl_dma, GFP_KERNEL); + if (!io->sge_tbl) { + iscsi_session_printk(KERN_ERR, session, + "Could not allocate BD table.\n"); + return -ENOMEM; + } + + io->sge_valid = 0; + return 0; +} + +static int qedi_setup_cmd_pool(struct qedi_ctx *qedi, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct qedi_cmd *cmd = task->dd_data; + + task->hdr = &cmd->hdr; + task->hdr_max = sizeof(struct iscsi_hdr); + + if (qedi_alloc_sget(qedi, session, cmd)) + goto free_sgets; + + cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev, + SCSI_SENSE_BUFFERSIZE, + &cmd->sense_buffer_dma, + GFP_KERNEL); + if (!cmd->sense_buffer) + goto free_sgets; + } + + return 0; + +free_sgets: + qedi_destroy_cmd_pool(qedi, session); + return -ENOMEM; +} + +static struct iscsi_cls_session * +qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max, + u16 qdepth, uint32_t initial_cmdsn) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *cls_session; + struct qedi_ctx *qedi; + struct qedi_endpoint *qedi_ep; + + if (!ep) + return NULL; + + qedi_ep = ep->dd_data; + shost = qedi_ep->qedi->shost; + qedi = iscsi_host_priv(shost); + + if (cmds_max > qedi->max_sqes) + cmds_max = qedi->max_sqes; + else if (cmds_max < QEDI_SQ_WQES_MIN) + cmds_max = QEDI_SQ_WQES_MIN; + + cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost, + cmds_max, 0, sizeof(struct qedi_cmd), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to setup session for ep=%p\n", qedi_ep); + return NULL; + } + + if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to setup cmd pool for ep=%p\n", qedi_ep); + goto session_teardown; + } + + return cls_session; + +session_teardown: + iscsi_session_teardown(cls_session); + return NULL; +} + +static void qedi_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + + qedi_destroy_cmd_pool(qedi, session); + iscsi_session_teardown(cls_session); +} + +static struct iscsi_cls_conn * +qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + struct iscsi_cls_conn *cls_conn; + struct qedi_conn *qedi_conn; + struct iscsi_conn *conn; + + cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn), + cid); + if (!cls_conn) { + QEDI_ERR(&qedi->dbg_ctx, + "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n", + cid, cls_session); + return NULL; + } + + conn = cls_conn->dd_data; + qedi_conn = conn->dd_data; + qedi_conn->cls_conn = cls_conn; + qedi_conn->qedi = qedi; + qedi_conn->ep = NULL; + qedi_conn->active_cmd_count = 0; + INIT_LIST_HEAD(&qedi_conn->active_cmd_list); + spin_lock_init(&qedi_conn->list_lock); + + if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) { + iscsi_conn_printk(KERN_ALERT, conn, + "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n", + cid, cls_session); + goto free_conn; + } + + return cls_conn; + +free_conn: + iscsi_conn_teardown(cls_conn); + return NULL; +} + +void qedi_mark_device_missing(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct qedi_conn *qedi_conn = session->leadconn->dd_data; + + spin_lock_bh(&session->frwd_lock); + set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags); + spin_unlock_bh(&session->frwd_lock); +} + +void qedi_mark_device_available(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct qedi_conn *qedi_conn = session->leadconn->dd_data; + + spin_lock_bh(&session->frwd_lock); + clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags); + spin_unlock_bh(&session->frwd_lock); +} + +static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + u32 iscsi_cid = qedi_conn->iscsi_conn_id; + + if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) { + iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, + "conn bind - entry #%d not free\n", + iscsi_cid); + return -EBUSY; + } + + qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn; + return 0; +} + +struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid) +{ + if (!qedi->cid_que.conn_cid_tbl) { + QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n"); + return NULL; + + } else if (iscsi_cid >= qedi->max_active_conns) { + QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid); + return NULL; + } + return qedi->cid_que.conn_cid_tbl[iscsi_cid]; +} + +static int qedi_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + u64 transport_fd, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_conn *qedi_conn = conn->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + struct qedi_endpoint *qedi_ep; + struct iscsi_endpoint *ep; + int rc = 0; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; + + qedi_ep = ep->dd_data; + if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) || + (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) { + rc = -EINVAL; + goto put_ep; + } + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + rc = -EINVAL; + goto put_ep; + } + + + qedi_ep->conn = qedi_conn; + qedi_conn->ep = qedi_ep; + qedi_conn->iscsi_ep = ep; + qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid; + qedi_conn->fw_cid = qedi_ep->fw_cid; + qedi_conn->cmd_cleanup_req = 0; + atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); + + if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) { + rc = -EINVAL; + goto put_ep; + } + + + spin_lock_init(&qedi_conn->tmf_work_lock); + INIT_LIST_HEAD(&qedi_conn->tmf_work_list); + init_waitqueue_head(&qedi_conn->wait_queue); +put_ep: + iscsi_put_endpoint(ep); + return rc; +} + +static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + struct qed_iscsi_params_update *conn_info; + struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn; + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_endpoint *qedi_ep; + int rval; + + qedi_ep = qedi_conn->ep; + + conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); + if (!conn_info) { + QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n"); + return -ENOMEM; + } + + conn_info->update_flag = 0; + + if (conn->hdrdgst_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true); + if (conn->datadgst_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true); + if (conn->session->initial_r2t_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, + true); + if (conn->session->imm_data_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, + true); + + conn_info->max_seq_size = conn->session->max_burst; + conn_info->max_recv_pdu_length = conn->max_recv_dlength; + conn_info->max_send_pdu_length = conn->max_xmit_dlength; + conn_info->first_seq_length = conn->session->first_burst; + conn_info->exp_stat_sn = conn->exp_statsn; + + rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle, + conn_info); + if (rval) { + rval = -ENXIO; + QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n"); + } + + kfree(conn_info); + return rval; +} + +static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) +{ + u16 mss = 0; + u16 hdrs = TCP_HDR_LEN; + + if (is_ipv6) + hdrs += IPV6_HDR_LEN; + else + hdrs += IPV4_HDR_LEN; + + mss = pmtu - hdrs; + + if (!mss) + mss = DEF_MSS; + + return mss; +} + +static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep) +{ + struct qed_iscsi_params_offload *conn_info; + struct qedi_ctx *qedi = qedi_ep->qedi; + int rval; + int i; + + conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); + if (!conn_info) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to allocate memory ep=%p\n", qedi_ep); + return -ENOMEM; + } + + ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac); + ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac); + + conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]); + conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]); + + if (qedi_ep->ip_type == TCP_IPV4) { + conn_info->ip_version = 0; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "After ntohl: src_addr=%pI4, dst_addr=%pI4\n", + qedi_ep->src_addr, qedi_ep->dst_addr); + } else { + for (i = 1; i < 4; i++) { + conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]); + conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]); + } + + conn_info->ip_version = 1; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "After ntohl: src_addr=%pI6, dst_addr=%pI6\n", + qedi_ep->src_addr, qedi_ep->dst_addr); + } + + conn_info->src.port = qedi_ep->src_port; + conn_info->dst.port = qedi_ep->dst_port; + + conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE; + conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma; + conn_info->vlan_id = qedi_ep->vlan_id; + + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1); + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1); + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1); + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1); + + conn_info->default_cq = (qedi_ep->fw_cid % qedi->num_queues); + + conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT; + conn_info->dup_ack_theshold = 3; + conn_info->rcv_wnd = 65535; + + conn_info->ss_thresh = 65535; + conn_info->srtt = 300; + conn_info->rtt_var = 150; + conn_info->flow_label = 0; + conn_info->ka_timeout = DEF_KA_TIMEOUT; + conn_info->ka_interval = DEF_KA_INTERVAL; + conn_info->max_rt_time = DEF_MAX_RT_TIME; + conn_info->ttl = DEF_TTL; + conn_info->tos_or_tc = DEF_TOS; + conn_info->remote_port = qedi_ep->dst_port; + conn_info->local_port = qedi_ep->src_port; + + conn_info->mss = qedi_calc_mss(qedi_ep->pmtu, + (qedi_ep->ip_type == TCP_IPV6), + 1, (qedi_ep->vlan_id != 0)); + + conn_info->cwnd = DEF_MAX_CWND * conn_info->mss; + conn_info->rcv_wnd_scale = 4; + conn_info->da_timeout_value = 200; + conn_info->ack_frequency = 2; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Default cq index [%d], mss [%d]\n", + conn_info->default_cq, conn_info->mss); + + /* Prepare the doorbell parameters */ + qedi_ep->db_data.agg_flags = 0; + qedi_ep->db_data.params = 0; + SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_CMD, + DB_AGG_CMD_MAX); + SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_VAL_SEL, + DQ_XCM_ISCSI_SQ_PROD_CMD); + SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_BYPASS_EN, 1); + + /* Register doorbell with doorbell recovery mechanism */ + rval = qedi_ops->common->db_recovery_add(qedi->cdev, + qedi_ep->p_doorbell, + &qedi_ep->db_data, + DB_REC_WIDTH_32B, + DB_REC_KERNEL); + if (rval) { + kfree(conn_info); + return rval; + } + + rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info); + if (rval) { + /* delete doorbell from doorbell recovery mechanism */ + rval = qedi_ops->common->db_recovery_del(qedi->cdev, + qedi_ep->p_doorbell, + &qedi_ep->db_data); + + QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n", + rval, qedi_ep); + } + + kfree(conn_info); + return rval; +} + +static int qedi_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_ctx *qedi; + int rval; + + qedi = qedi_conn->qedi; + + rval = qedi_iscsi_update_conn(qedi, qedi_conn); + if (rval) { + iscsi_conn_printk(KERN_ALERT, conn, + "conn_start: FW offload conn failed.\n"); + rval = -EINVAL; + goto start_err; + } + + spin_lock(&qedi_conn->tmf_work_lock); + qedi_conn->fw_cleanup_works = 0; + qedi_conn->ep_disconnect_starting = false; + spin_unlock(&qedi_conn->tmf_work_lock); + + qedi_conn->abrt_conn = 0; + + rval = iscsi_conn_start(cls_conn); + if (rval) { + iscsi_conn_printk(KERN_ALERT, conn, + "iscsi_conn_start: FW offload conn failed!!\n"); + } + +start_err: + return rval; +} + +static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_conn *qedi_conn = conn->dd_data; + struct Scsi_Host *shost; + struct qedi_ctx *qedi; + + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); + qedi = iscsi_host_priv(shost); + + qedi_conn_free_login_resources(qedi, qedi_conn); + iscsi_conn_teardown(cls_conn); +} + +static int qedi_ep_get_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct qedi_endpoint *qedi_ep = ep->dd_data; + int len; + + if (!qedi_ep) + return -ENOTCONN; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + len = sprintf(buf, "%hu\n", qedi_ep->dst_port); + break; + case ISCSI_PARAM_CONN_ADDRESS: + if (qedi_ep->ip_type == TCP_IPV4) + len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr); + else + len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr); + break; + default: + return -ENOTCONN; + } + + return len; +} + +static int qedi_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct qedi_ctx *qedi; + int len; + + qedi = iscsi_host_priv(shost); + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, qedi->mac, 6); + break; + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sprintf(buf, "host%d\n", shost->host_no); + break; + case ISCSI_HOST_PARAM_IPADDRESS: + if (qedi->ip_type == TCP_IPV4) + len = sprintf(buf, "%pI4\n", qedi->src_ip); + else + len = sprintf(buf, "%pI6\n", qedi->src_ip); + break; + default: + return iscsi_host_get_param(shost, param, buf); + } + + return len; +} + +static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qed_iscsi_stats iscsi_stats; + struct Scsi_Host *shost; + struct qedi_ctx *qedi; + + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); + qedi = iscsi_host_priv(shost); + qedi_ops->get_stats(qedi->cdev, &iscsi_stats); + + conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt; + conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt; + conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt; + conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt; + conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + strcpy(stats->custom[0].desc, "eh_abort_cnt"); + stats->custom[0].value = conn->eh_abort_cnt; + stats->custom_length = 1; +} + +static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn) +{ + struct scsi_sge *bd_tbl; + + bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + + bd_tbl->sge_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr; + bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr - + qedi_conn->gen_pdu.req_buf; + bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + bd_tbl->sge_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr; + bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN; +} + +static int qedi_iscsi_send_generic_request(struct iscsi_task *task) +{ + struct qedi_cmd *cmd = task->dd_data; + struct qedi_conn *qedi_conn = cmd->conn; + char *buf; + int data_len; + int rc = 0; + + qedi_iscsi_prep_generic_pdu_bd(qedi_conn); + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + qedi_send_iscsi_login(qedi_conn, task); + break; + case ISCSI_OP_NOOP_OUT: + data_len = qedi_conn->gen_pdu.req_buf_size; + buf = qedi_conn->gen_pdu.req_buf; + if (data_len) + rc = qedi_send_iscsi_nopout(qedi_conn, task, + buf, data_len, 1); + else + rc = qedi_send_iscsi_nopout(qedi_conn, task, + NULL, 0, 1); + break; + case ISCSI_OP_LOGOUT: + rc = qedi_send_iscsi_logout(qedi_conn, task); + break; + case ISCSI_OP_SCSI_TMFUNC: + rc = qedi_send_iscsi_tmf(qedi_conn, task); + break; + case ISCSI_OP_TEXT: + rc = qedi_send_iscsi_text(qedi_conn, task); + break; + default: + iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, + "unsupported op 0x%x\n", task->hdr->opcode); + } + + return rc; +} + +static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +{ + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + + memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); + + qedi_conn->gen_pdu.req_buf_size = task->data_count; + + if (task->data_count) { + memcpy(qedi_conn->gen_pdu.req_buf, task->data, + task->data_count); + qedi_conn->gen_pdu.req_wr_ptr = + qedi_conn->gen_pdu.req_buf + task->data_count; + } + + cmd->conn = conn->dd_data; + return qedi_iscsi_send_generic_request(task); +} + +static int qedi_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + struct scsi_cmnd *sc = task->sc; + + /* Clear now so in cleanup_task we know it didn't make it */ + cmd->scsi_cmd = NULL; + cmd->task_id = U16_MAX; + + if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags)) + return -ENODEV; + + if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags)) + return -EACCES; + + cmd->state = 0; + cmd->task = NULL; + cmd->use_slowpath = false; + cmd->conn = qedi_conn; + cmd->task = task; + cmd->io_cmd_in_list = false; + INIT_LIST_HEAD(&cmd->io_cmd); + + if (!sc) + return qedi_mtask_xmit(conn, task); + + cmd->scsi_cmd = sc; + return qedi_iscsi_send_ioreq(task); +} + +static void qedi_offload_work(struct work_struct *work) +{ + struct qedi_endpoint *qedi_ep = + container_of(work, struct qedi_endpoint, offload_work); + struct qedi_ctx *qedi; + int wait_delay = 5 * HZ; + int ret; + + qedi = qedi_ep->qedi; + + ret = qedi_iscsi_offload_conn(qedi_ep); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", + qedi_ep->iscsi_cid, qedi_ep, ret); + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + return; + } + + ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, + (qedi_ep->state == + EP_STATE_OFLDCONN_COMPL), + wait_delay); + if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) { + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + QEDI_ERR(&qedi->dbg_ctx, + "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", + qedi_ep->iscsi_cid, qedi_ep); + } +} + +static struct iscsi_endpoint * +qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) +{ + struct qedi_ctx *qedi; + struct iscsi_endpoint *ep; + struct qedi_endpoint *qedi_ep; + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + struct iscsi_path path_req; + u32 msg_type = ISCSI_KEVENT_IF_DOWN; + u32 iscsi_cid = QEDI_CID_RESERVED; + u16 len = 0; + char *buf = NULL; + int ret, tmp; + + if (!shost) { + ret = -ENXIO; + QEDI_ERR(NULL, "shost is NULL\n"); + return ERR_PTR(ret); + } + + if (qedi_do_not_recover) { + ret = -ENOMEM; + return ERR_PTR(ret); + } + + qedi = iscsi_host_priv(shost); + + if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) || + test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { + ret = -ENOMEM; + return ERR_PTR(ret); + } + + if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) { + QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n"); + return ERR_PTR(-ENXIO); + } + + ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint)); + if (!ep) { + QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n"); + ret = -ENOMEM; + return ERR_PTR(ret); + } + qedi_ep = ep->dd_data; + memset(qedi_ep, 0, sizeof(struct qedi_endpoint)); + INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); + qedi_ep->state = EP_STATE_IDLE; + qedi_ep->iscsi_cid = (u32)-1; + qedi_ep->qedi = qedi; + + if (dst_addr->sa_family == AF_INET) { + addr = (struct sockaddr_in *)dst_addr; + memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr, + sizeof(struct in_addr)); + qedi_ep->dst_port = ntohs(addr->sin_port); + qedi_ep->ip_type = TCP_IPV4; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "dst_addr=%pI4, dst_port=%u\n", + qedi_ep->dst_addr, qedi_ep->dst_port); + } else if (dst_addr->sa_family == AF_INET6) { + addr6 = (struct sockaddr_in6 *)dst_addr; + memcpy(qedi_ep->dst_addr, &addr6->sin6_addr, + sizeof(struct in6_addr)); + qedi_ep->dst_port = ntohs(addr6->sin6_port); + qedi_ep->ip_type = TCP_IPV6; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "dst_addr=%pI6, dst_port=%u\n", + qedi_ep->dst_addr, qedi_ep->dst_port); + } else { + QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n"); + } + + ret = qedi_alloc_sq(qedi, qedi_ep); + if (ret) + goto ep_conn_exit; + + ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle, + &qedi_ep->fw_cid, &qedi_ep->p_doorbell); + + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n"); + ret = -ENXIO; + goto ep_free_sq; + } + + iscsi_cid = qedi_ep->handle; + qedi_ep->iscsi_cid = iscsi_cid; + + init_waitqueue_head(&qedi_ep->ofld_wait); + init_waitqueue_head(&qedi_ep->tcp_ofld_wait); + qedi_ep->state = EP_STATE_OFLDCONN_START; + qedi->ep_tbl[iscsi_cid] = qedi_ep; + + buf = (char *)&path_req; + len = sizeof(path_req); + memset(&path_req, 0, len); + + msg_type = ISCSI_KEVENT_PATH_REQ; + path_req.handle = (u64)qedi_ep->iscsi_cid; + path_req.pmtu = qedi->ll2_mtu; + qedi_ep->pmtu = qedi->ll2_mtu; + if (qedi_ep->ip_type == TCP_IPV4) { + memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr, + sizeof(struct in_addr)); + path_req.ip_addr_len = 4; + } else { + memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr, + sizeof(struct in6_addr)); + path_req.ip_addr_len = 16; + } + + ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf, + len); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n", + iscsi_cid, ret); + goto ep_rel_conn; + } + + atomic_inc(&qedi->num_offloads); + return ep; + +ep_rel_conn: + qedi->ep_tbl[iscsi_cid] = NULL; + tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); + if (tmp) + QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", + tmp); +ep_free_sq: + qedi_free_sq(qedi, qedi_ep); +ep_conn_exit: + iscsi_destroy_endpoint(ep); + return ERR_PTR(ret); +} + +static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct qedi_endpoint *qedi_ep; + int ret = 0; + + if (qedi_do_not_recover) + return 1; + + qedi_ep = ep->dd_data; + if (qedi_ep->state == EP_STATE_IDLE || + qedi_ep->state == EP_STATE_OFLDCONN_NONE || + qedi_ep->state == EP_STATE_OFLDCONN_FAILED) + return -1; + + if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL) + ret = 1; + + ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait, + QEDI_OFLD_WAIT_STATE(qedi_ep), + msecs_to_jiffies(timeout_ms)); + + if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED) + ret = -1; + + if (ret > 0) + return 1; + else if (!ret) + return 0; + else + return ret; +} + +static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn) +{ + struct qedi_cmd *cmd, *cmd_tmp; + + spin_lock(&qedi_conn->list_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, + io_cmd) { + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + spin_unlock(&qedi_conn->list_lock); +} + +static void qedi_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct qedi_endpoint *qedi_ep; + struct qedi_conn *qedi_conn = NULL; + struct qedi_ctx *qedi; + int ret = 0; + int wait_delay; + int abrt_conn = 0; + + wait_delay = 60 * HZ + DEF_MAX_RT_TIME; + qedi_ep = ep->dd_data; + qedi = qedi_ep->qedi; + + flush_work(&qedi_ep->offload_work); + + if (qedi_ep->state == EP_STATE_OFLDCONN_START) + goto ep_exit_recover; + + if (qedi_ep->conn) { + qedi_conn = qedi_ep->conn; + abrt_conn = qedi_conn->abrt_conn; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "cid=0x%x qedi_ep=%p waiting for %d tmfs\n", + qedi_ep->iscsi_cid, qedi_ep, + qedi_conn->fw_cleanup_works); + + spin_lock(&qedi_conn->tmf_work_lock); + qedi_conn->ep_disconnect_starting = true; + while (qedi_conn->fw_cleanup_works > 0) { + spin_unlock(&qedi_conn->tmf_work_lock); + msleep(1000); + spin_lock(&qedi_conn->tmf_work_lock); + } + spin_unlock(&qedi_conn->tmf_work_lock); + + if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { + if (qedi_do_not_recover) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Do not recover cid=0x%x\n", + qedi_ep->iscsi_cid); + goto ep_exit_recover; + } + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n", + qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state); + qedi_cleanup_active_cmd_list(qedi_conn); + goto ep_release_conn; + } + } + + if (qedi_do_not_recover) + goto ep_exit_recover; + + switch (qedi_ep->state) { + case EP_STATE_OFLDCONN_START: + case EP_STATE_OFLDCONN_NONE: + goto ep_release_conn; + case EP_STATE_OFLDCONN_FAILED: + break; + case EP_STATE_OFLDCONN_COMPL: + if (unlikely(!qedi_conn)) + break; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n", + qedi_conn->active_cmd_count, abrt_conn, + qedi_ep->state, + qedi_ep->iscsi_cid, + qedi_ep->conn + ); + + if (!qedi_conn->active_cmd_count) + abrt_conn = 0; + else + abrt_conn = 1; + + if (abrt_conn) + qedi_clearsq(qedi, qedi_conn, NULL); + break; + default: + break; + } + + if (!abrt_conn) + wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; + + qedi_ep->state = EP_STATE_DISCONN_START; + + if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) || + test_bit(QEDI_IN_RECOVERY, &qedi->flags)) + goto ep_release_conn; + + /* Delete doorbell from doorbell recovery mechanism */ + ret = qedi_ops->common->db_recovery_del(qedi->cdev, + qedi_ep->p_doorbell, + &qedi_ep->db_data); + + ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); + if (ret) { + QEDI_WARN(&qedi->dbg_ctx, + "destroy_conn failed returned %d\n", ret); + } else { + ret = wait_event_interruptible_timeout( + qedi_ep->tcp_ofld_wait, + (qedi_ep->state != + EP_STATE_DISCONN_START), + wait_delay); + if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) { + QEDI_WARN(&qedi->dbg_ctx, + "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n", + ret, wait_delay, qedi_ep->iscsi_cid); + } + } + +ep_release_conn: + ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); + if (ret) + QEDI_WARN(&qedi->dbg_ctx, + "release_conn returned %d, cid=0x%x\n", + ret, qedi_ep->iscsi_cid); +ep_exit_recover: + qedi_ep->state = EP_STATE_IDLE; + qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL; + qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL; + qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port); + qedi_free_sq(qedi, qedi_ep); + + if (qedi_conn) + qedi_conn->ep = NULL; + + qedi_ep->conn = NULL; + qedi_ep->qedi = NULL; + atomic_dec(&qedi->num_offloads); + + iscsi_destroy_endpoint(ep); +} + +static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) +{ + struct qed_dev *cdev = qedi->cdev; + struct qedi_uio_dev *udev; + struct qedi_uio_ctrl *uctrl; + struct sk_buff *skb; + u32 len; + int rc = 0; + + udev = qedi->udev; + if (!udev) { + QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n"); + return -EINVAL; + } + + uctrl = (struct qedi_uio_ctrl *)udev->uctrl; + if (!uctrl) { + QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n"); + return -EINVAL; + } + + len = uctrl->host_tx_pkt_len; + if (!len) { + QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len); + return -EINVAL; + } + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n"); + return -EINVAL; + } + + skb_put(skb, len); + memcpy(skb->data, udev->tx_pkt, len); + skb->ip_summed = CHECKSUM_NONE; + + if (vlanid) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); + + rc = qedi_ops->ll2->start_xmit(cdev, skb, 0); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", + rc); + kfree_skb(skb); + } + + uctrl->host_tx_pkt_len = 0; + uctrl->hw_tx_cons++; + + return rc; +} + +static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) +{ + struct qedi_ctx *qedi; + struct qedi_endpoint *qedi_ep; + int ret = 0; + u32 iscsi_cid; + u16 port_id = 0; + + if (!shost) { + ret = -ENXIO; + QEDI_ERR(NULL, "shost is NULL\n"); + return ret; + } + + if (strcmp(shost->hostt->proc_name, "qedi")) { + ret = -ENXIO; + QEDI_ERR(NULL, "shost %s is invalid\n", + shost->hostt->proc_name); + return ret; + } + + qedi = iscsi_host_priv(shost); + if (path_data->handle == QEDI_PATH_HANDLE) { + ret = qedi_data_avail(qedi, path_data->vlan_id); + goto set_path_exit; + } + + iscsi_cid = (u32)path_data->handle; + if (iscsi_cid >= qedi->max_active_conns) { + ret = -EINVAL; + goto set_path_exit; + } + qedi_ep = qedi->ep_tbl[iscsi_cid]; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); + if (!qedi_ep) { + ret = -EINVAL; + goto set_path_exit; + } + + if (!is_valid_ether_addr(&path_data->mac_addr[0])) { + QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); + qedi_ep->state = EP_STATE_OFLDCONN_NONE; + ret = -EIO; + goto set_path_exit; + } + + ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]); + ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]); + + qedi_ep->vlan_id = path_data->vlan_id; + if (path_data->pmtu < DEF_PATH_MTU) { + qedi_ep->pmtu = qedi->ll2_mtu; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "MTU cannot be %u, using default MTU %u\n", + path_data->pmtu, qedi_ep->pmtu); + } + + if (path_data->pmtu != qedi->ll2_mtu) { + if (path_data->pmtu > JUMBO_MTU) { + ret = -EINVAL; + QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu); + goto set_path_exit; + } + + qedi_reset_host_mtu(qedi, path_data->pmtu); + qedi_ep->pmtu = qedi->ll2_mtu; + } + + port_id = qedi_ep->src_port; + if (port_id >= QEDI_LOCAL_PORT_MIN && + port_id < QEDI_LOCAL_PORT_MAX) { + if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id)) + port_id = 0; + } else { + port_id = 0; + } + + if (!port_id) { + port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl); + if (port_id == QEDI_LOCAL_PORT_INVALID) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to allocate port id for iscsi_cid=0x%x\n", + iscsi_cid); + ret = -ENOMEM; + goto set_path_exit; + } + } + + qedi_ep->src_port = port_id; + + if (qedi_ep->ip_type == TCP_IPV4) { + memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr, + sizeof(struct in_addr)); + memcpy(&qedi->src_ip[0], &path_data->src.v4_addr, + sizeof(struct in_addr)); + qedi->ip_type = TCP_IPV4; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n", + qedi_ep->src_addr, qedi_ep->src_port, + qedi_ep->dst_addr, qedi_ep->dst_port); + } else { + memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr, + sizeof(struct in6_addr)); + memcpy(&qedi->src_ip[0], &path_data->src.v6_addr, + sizeof(struct in6_addr)); + qedi->ip_type = TCP_IPV6; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n", + qedi_ep->src_addr, qedi_ep->src_port, + qedi_ep->dst_addr, qedi_ep->dst_port); + } + + queue_work(qedi->offload_thread, &qedi_ep->offload_work); + + ret = 0; + +set_path_exit: + return ret; +} + +static umode_t qedi_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + return 0444; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + case ISCSI_PARAM_BOOT_ROOT: + case ISCSI_PARAM_BOOT_NIC: + case ISCSI_PARAM_BOOT_TARGET: + return 0444; + default: + return 0; + } + } + + return 0; +} + +static void qedi_cleanup_task(struct iscsi_task *task) +{ + struct qedi_cmd *cmd; + + if (task->state == ISCSI_TASK_PENDING) { + QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n", + refcount_read(&task->refcount)); + return; + } + + if (task->sc) + qedi_iscsi_unmap_sg_list(task->dd_data); + + cmd = task->dd_data; + if (cmd->task_id != U16_MAX) + qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host), + cmd->task_id); + + cmd->task_id = U16_MAX; + cmd->scsi_cmd = NULL; +} + +struct iscsi_transport qedi_iscsi_transport = { + .owner = THIS_MODULE, + .name = QEDI_MODULE_NAME, + .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | + CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, + .create_session = qedi_session_create, + .destroy_session = qedi_session_destroy, + .create_conn = qedi_conn_create, + .bind_conn = qedi_conn_bind, + .unbind_conn = iscsi_conn_unbind, + .start_conn = qedi_conn_start, + .stop_conn = iscsi_conn_stop, + .destroy_conn = qedi_conn_destroy, + .set_param = iscsi_set_param, + .get_ep_param = qedi_ep_get_param, + .get_conn_param = iscsi_conn_get_param, + .get_session_param = iscsi_session_get_param, + .get_host_param = qedi_host_get_param, + .send_pdu = iscsi_conn_send_pdu, + .get_stats = qedi_conn_get_stats, + .xmit_task = qedi_task_xmit, + .cleanup_task = qedi_cleanup_task, + .session_recovery_timedout = iscsi_session_recovery_timedout, + .ep_connect = qedi_ep_connect, + .ep_poll = qedi_ep_poll, + .ep_disconnect = qedi_ep_disconnect, + .set_path = qedi_set_path, + .attr_is_visible = qedi_attr_is_visible, +}; + +void qedi_start_conn_recovery(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + + cls_conn = qedi_conn->cls_conn; + conn = cls_conn->dd_data; + cls_sess = iscsi_conn_to_session(cls_conn); + + if (iscsi_is_session_online(cls_sess)) { + qedi_conn->abrt_conn = 1; + QEDI_ERR(&qedi->dbg_ctx, + "Failing connection, state=0x%x, cid=0x%x\n", + conn->session->state, qedi_conn->iscsi_conn_id); + iscsi_conn_failure(qedi_conn->cls_conn->dd_data, + ISCSI_ERR_CONN_FAILED); + } +} + +static const struct { + enum iscsi_error_types error_code; + char *err_string; +} qedi_iscsi_error[] = { + { ISCSI_STATUS_NONE, + "tcp_error none" + }, + { ISCSI_CONN_ERROR_TASK_CID_MISMATCH, + "task cid mismatch" + }, + { ISCSI_CONN_ERROR_TASK_NOT_VALID, + "invalid task" + }, + { ISCSI_CONN_ERROR_RQ_RING_IS_FULL, + "rq ring full" + }, + { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, + "cmdq ring full" + }, + { ISCSI_CONN_ERROR_HQE_CACHING_FAILED, + "sge caching failed" + }, + { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, + "hdr digest error" + }, + { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, + "local cmpl error" + }, + { ISCSI_CONN_ERROR_DATA_OVERRUN, + "invalid task" + }, + { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, + "out of sge error" + }, + { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, + "tcp ip fragment error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, + "AHS len protocol error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, + "itt out of range error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, + "data seg more than pdu size" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, + "invalid opcode" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, + "invalid opcode before update" + }, + { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, + "unexpected opcode" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, + "r2t carries no data" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, + "data sn error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, + "data TTT error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, + "r2t TTT error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, + "buffer offset error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, + "buffer offset ooo" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, + "data seg len 0" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, + "data xer len error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, + "data xer len1 error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, + "data xer len2 error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, + "protocol lun error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, + "f bit zero error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, + "exp stat sn error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, + "dsl not zero error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, + "invalid dsl" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, + "data seg len too big" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, + "outstanding r2t count error" + }, + { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, + "sense datalen error" + }, +}; + +static char *qedi_get_iscsi_error(enum iscsi_error_types err_code) +{ + int i; + char *msg = NULL; + + for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) { + if (qedi_iscsi_error[i].error_code == err_code) { + msg = qedi_iscsi_error[i].err_string; + break; + } + } + return msg; +} + +void qedi_process_iscsi_error(struct qedi_endpoint *ep, + struct iscsi_eqe_data *data) +{ + struct qedi_conn *qedi_conn; + struct qedi_ctx *qedi; + char warn_notice[] = "iscsi_warning"; + char error_notice[] = "iscsi_error"; + char unknown_msg[] = "Unknown error"; + char *message; + int need_recovery = 0; + u32 err_mask = 0; + char *msg; + + if (!ep) + return; + + qedi_conn = ep->conn; + if (!qedi_conn) + return; + + qedi = ep->qedi; + + QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n", + data->error_code); + + if (err_mask) { + need_recovery = 0; + message = warn_notice; + } else { + need_recovery = 1; + message = error_notice; + } + + msg = qedi_get_iscsi_error(data->error_code); + if (!msg) { + need_recovery = 0; + msg = unknown_msg; + } + + iscsi_conn_printk(KERN_ALERT, + qedi_conn->cls_conn->dd_data, + "qedi: %s - %s\n", message, msg); + + if (need_recovery) + qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); +} + +void qedi_process_tcp_error(struct qedi_endpoint *ep, + struct iscsi_eqe_data *data) +{ + struct qedi_conn *qedi_conn; + + if (!ep) + return; + + qedi_conn = ep->conn; + if (!qedi_conn) + return; + + QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n", + data->error_code); + + qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); +} diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h new file mode 100644 index 000000000..9b9f2e44f --- /dev/null +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#ifndef _QEDI_ISCSI_H_ +#define _QEDI_ISCSI_H_ + +#include +#include +#include "qedi.h" + +#define ISCSI_MAX_SESS_PER_HBA 4096 + +#define DEF_KA_TIMEOUT 7200000 +#define DEF_KA_INTERVAL 10000 +#define DEF_KA_MAX_PROBE_COUNT 10 +#define DEF_TOS 0 +#define DEF_TTL 0xfe +#define DEF_SND_SEQ_SCALE 0 +#define DEF_RCV_BUF 0xffff +#define DEF_SND_BUF 0xffff +#define DEF_SEED 0 +#define DEF_MAX_RT_TIME 8000 +#define DEF_MAX_DA_COUNT 2 +#define DEF_SWS_TIMER 1000 +#define DEF_MAX_CWND 2 +#define DEF_PATH_MTU 1500 +#define DEF_MSS 1460 +#define DEF_LL2_MTU 1560 +#define JUMBO_MTU 9000 + +#define MIN_MTU 576 /* rfc 793 */ +#define IPV4_HDR_LEN 20 +#define IPV6_HDR_LEN 40 +#define TCP_HDR_LEN 20 +#define TCP_OPTION_LEN 12 +#define VLAN_LEN 4 + +enum { + EP_STATE_IDLE = 0x0, + EP_STATE_ACQRCONN_START = 0x1, + EP_STATE_ACQRCONN_COMPL = 0x2, + EP_STATE_OFLDCONN_START = 0x4, + EP_STATE_OFLDCONN_COMPL = 0x8, + EP_STATE_DISCONN_START = 0x10, + EP_STATE_DISCONN_COMPL = 0x20, + EP_STATE_CLEANUP_START = 0x40, + EP_STATE_CLEANUP_CMPL = 0x80, + EP_STATE_TCP_FIN_RCVD = 0x100, + EP_STATE_TCP_RST_RCVD = 0x200, + EP_STATE_LOGOUT_SENT = 0x400, + EP_STATE_LOGOUT_RESP_RCVD = 0x800, + EP_STATE_CLEANUP_FAILED = 0x1000, + EP_STATE_OFLDCONN_FAILED = 0x2000, + EP_STATE_CONNECT_FAILED = 0x4000, + EP_STATE_DISCONN_TIMEDOUT = 0x8000, + EP_STATE_OFLDCONN_NONE = 0x10000, +}; + +struct qedi_conn; + +struct qedi_endpoint { + struct qedi_ctx *qedi; + u32 dst_addr[4]; + u32 src_addr[4]; + u16 src_port; + u16 dst_port; + u16 vlan_id; + u16 pmtu; + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + u8 ip_type; + int state; + wait_queue_head_t ofld_wait; + wait_queue_head_t tcp_ofld_wait; + u32 iscsi_cid; + /* identifier of the connection from qed */ + u32 handle; + u32 fw_cid; + void __iomem *p_doorbell; + struct iscsi_db_data db_data; + + /* Send queue management */ + struct iscsi_wqe *sq; + dma_addr_t sq_dma; + + u16 sq_prod_idx; + u16 fw_sq_prod_idx; + u16 sq_con_idx; + u32 sq_mem_size; + + void *sq_pbl; + dma_addr_t sq_pbl_dma; + u32 sq_pbl_size; + struct qedi_conn *conn; + struct work_struct offload_work; +}; + +#define QEDI_SQ_WQES_MIN 16 + +struct qedi_io_bdt { + struct scsi_sge *sge_tbl; + dma_addr_t sge_tbl_dma; + u16 sge_valid; +}; + +/** + * struct generic_pdu_resc - login pdu resource structure + * + * @req_buf: driver buffer used to stage payload associated with + * the login request + * @req_dma_addr: dma address for iscsi login request payload buffer + * @req_buf_size: actual login request payload length + * @req_wr_ptr: pointer into login request buffer when next data is + * to be written + * @resp_hdr: iscsi header where iscsi login response header is to + * be recreated + * @resp_buf: buffer to stage login response payload + * @resp_dma_addr: login response payload buffer dma address + * @resp_buf_size: login response paylod length + * @resp_wr_ptr: pointer into login response buffer when next data is + * to be written + * @req_bd_tbl: iscsi login request payload BD table + * @req_bd_dma: login request BD table dma address + * @resp_bd_tbl: iscsi login response payload BD table + * @resp_bd_dma: login request BD table dma address + * + * following structure defines buffer info for generic pdus such as iSCSI Login, + * Logout and NOP + */ +struct generic_pdu_resc { + char *req_buf; + dma_addr_t req_dma_addr; + u32 req_buf_size; + char *req_wr_ptr; + struct iscsi_hdr resp_hdr; + char *resp_buf; + dma_addr_t resp_dma_addr; + u32 resp_buf_size; + char *resp_wr_ptr; + char *req_bd_tbl; + dma_addr_t req_bd_dma; + char *resp_bd_tbl; + dma_addr_t resp_bd_dma; +}; + +struct qedi_conn { + struct iscsi_cls_conn *cls_conn; + struct qedi_ctx *qedi; + struct qedi_endpoint *ep; + struct iscsi_endpoint *iscsi_ep; + struct list_head active_cmd_list; + spinlock_t list_lock; /* internal conn lock */ + u32 active_cmd_count; + u32 cmd_cleanup_req; + atomic_t cmd_cleanup_cmpl; + + u32 iscsi_conn_id; + int itt; + int abrt_conn; +#define QEDI_CID_RESERVED 0x5AFF + u32 fw_cid; + /* + * Buffer for login negotiation process + */ + struct generic_pdu_resc gen_pdu; + + struct list_head tmf_work_list; + wait_queue_head_t wait_queue; + spinlock_t tmf_work_lock; /* tmf work lock */ + bool ep_disconnect_starting; + int fw_cleanup_works; +}; + +struct qedi_cmd { + struct list_head io_cmd; + bool io_cmd_in_list; + struct iscsi_hdr hdr; + struct qedi_conn *conn; + struct scsi_cmnd *scsi_cmd; + struct scatterlist *sg; + struct qedi_io_bdt io_tbl; + struct iscsi_task_context request; + unsigned char *sense_buffer; + dma_addr_t sense_buffer_dma; + u16 task_id; + + /* field populated for tmf work queue */ + struct iscsi_task *task; + struct work_struct tmf_work; + int state; +#define CLEANUP_WAIT 1 +#define CLEANUP_RECV 2 +#define CLEANUP_WAIT_FAILED 3 +#define CLEANUP_NOT_REQUIRED 4 +#define LUN_RESET_RESPONSE_RECEIVED 5 +#define RESPONSE_RECEIVED 6 + + int type; +#define TYPEIO 1 +#define TYPERESET 2 + + struct qedi_work_map *list_tmf_work; + /* slowpath management */ + bool use_slowpath; + + struct iscsi_tm_rsp *tmf_resp_buf; + struct qedi_work cqe_work; +}; + +struct qedi_work_map { + struct list_head list; + struct qedi_cmd *qedi_cmd; + struct iscsi_task *ctask; + int rtid; + + int state; +#define QEDI_WORK_QUEUED 1 +#define QEDI_WORK_SCHEDULED 2 +#define QEDI_WORK_EXIT 3 + + struct work_struct *ptr_tmf_work; +}; + +struct qedi_boot_target { + char ip_addr[64]; + char iscsi_name[255]; + u32 ipv6_en; +}; + +#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16))) +#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16) + +#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \ + (q)->state == EP_STATE_OFLDCONN_COMPL) + +#endif /* _QEDI_ISCSI_H_ */ diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c new file mode 100644 index 000000000..cd0180b1f --- /dev/null +++ b/drivers/scsi/qedi/qedi_main.c @@ -0,0 +1,2964 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "qedi.h" +#include "qedi_gbl.h" +#include "qedi_iscsi.h" + +static uint qedi_qed_debug; +module_param(qedi_qed_debug, uint, 0644); +MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)"); + +static uint qedi_fw_debug; +module_param(qedi_fw_debug, uint, 0644); +MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3"); + +uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM; +module_param(qedi_dbg_log, uint, 0644); +MODULE_PARM_DESC(qedi_dbg_log, " Default debug level"); + +uint qedi_io_tracing; +module_param(qedi_io_tracing, uint, 0644); +MODULE_PARM_DESC(qedi_io_tracing, + " Enable logging of SCSI requests/completions into trace buffer. (default off)."); + +static uint qedi_ll2_buf_size = 0x400; +module_param(qedi_ll2_buf_size, uint, 0644); +MODULE_PARM_DESC(qedi_ll2_buf_size, + "parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400."); + +static uint qedi_flags_override; +module_param(qedi_flags_override, uint, 0644); +MODULE_PARM_DESC(qedi_flags_override, "Disable/Enable MFW error flags bits action."); + +const struct qed_iscsi_ops *qedi_ops; +static struct scsi_transport_template *qedi_scsi_transport; +static struct pci_driver qedi_pci_driver; +static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu); +static LIST_HEAD(qedi_udev_list); +/* Static function declaration */ +static int qedi_alloc_global_queues(struct qedi_ctx *qedi); +static void qedi_free_global_queues(struct qedi_ctx *qedi); +static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid); +static void qedi_reset_uio_rings(struct qedi_uio_dev *udev); +static void qedi_ll2_free_skbs(struct qedi_ctx *qedi); +static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi); +static void qedi_recovery_handler(struct work_struct *work); +static void qedi_schedule_hw_err_handler(void *dev, + enum qed_hw_err_type err_type); +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state); + +static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) +{ + struct qedi_ctx *qedi; + struct qedi_endpoint *qedi_ep; + struct iscsi_eqe_data *data; + int rval = 0; + + if (!context || !fw_handle) { + QEDI_ERR(NULL, "Recv event with ctx NULL\n"); + return -EINVAL; + } + + qedi = (struct qedi_ctx *)context; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle); + + data = (struct iscsi_eqe_data *)fw_handle; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n", + data->icid, data->conn_id, data->error_code, + data->error_pdu_opcode_reserved); + + qedi_ep = qedi->ep_tbl[data->icid]; + + if (!qedi_ep) { + QEDI_WARN(&qedi->dbg_ctx, + "Cannot process event, ep already disconnected, cid=0x%x\n", + data->icid); + WARN_ON(1); + return -ENODEV; + } + + switch (fw_event_code) { + case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE: + if (qedi_ep->state == EP_STATE_OFLDCONN_START) + qedi_ep->state = EP_STATE_OFLDCONN_COMPL; + + wake_up_interruptible(&qedi_ep->tcp_ofld_wait); + break; + case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE: + qedi_ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&qedi_ep->tcp_ofld_wait); + break; + case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR: + qedi_process_iscsi_error(qedi_ep, data); + break; + case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD: + case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD: + case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME: + case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT: + case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT: + case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2: + case ISCSI_EVENT_TYPE_TCP_CONN_ERROR: + qedi_process_tcp_error(qedi_ep, data); + break; + default: + QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n", + fw_event_code); + } + + return rval; +} + +static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode) +{ + struct qedi_uio_dev *udev = uinfo->priv; + struct qedi_ctx *qedi = udev->qedi; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (udev->uio_dev != -1) + return -EBUSY; + + rtnl_lock(); + udev->uio_dev = iminor(inode); + qedi_reset_uio_rings(udev); + set_bit(UIO_DEV_OPENED, &qedi->flags); + rtnl_unlock(); + + return 0; +} + +static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode) +{ + struct qedi_uio_dev *udev = uinfo->priv; + struct qedi_ctx *qedi = udev->qedi; + + udev->uio_dev = -1; + clear_bit(UIO_DEV_OPENED, &qedi->flags); + qedi_ll2_free_skbs(qedi); + return 0; +} + +static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) +{ + if (udev->uctrl) { + free_page((unsigned long)udev->uctrl); + udev->uctrl = NULL; + } + + if (udev->ll2_ring) { + free_page((unsigned long)udev->ll2_ring); + udev->ll2_ring = NULL; + } + + if (udev->ll2_buf) { + free_pages((unsigned long)udev->ll2_buf, 2); + udev->ll2_buf = NULL; + } +} + +static void __qedi_free_uio(struct qedi_uio_dev *udev) +{ + uio_unregister_device(&udev->qedi_uinfo); + + __qedi_free_uio_rings(udev); + + pci_dev_put(udev->pdev); + kfree(udev); +} + +static void qedi_free_uio(struct qedi_uio_dev *udev) +{ + if (!udev) + return; + + list_del_init(&udev->list); + __qedi_free_uio(udev); +} + +static void qedi_reset_uio_rings(struct qedi_uio_dev *udev) +{ + struct qedi_ctx *qedi = NULL; + struct qedi_uio_ctrl *uctrl = NULL; + + qedi = udev->qedi; + uctrl = udev->uctrl; + + spin_lock_bh(&qedi->ll2_lock); + uctrl->host_rx_cons = 0; + uctrl->hw_rx_prod = 0; + uctrl->hw_rx_bd_prod = 0; + uctrl->host_rx_bd_cons = 0; + + memset(udev->ll2_ring, 0, udev->ll2_ring_size); + memset(udev->ll2_buf, 0, udev->ll2_buf_size); + spin_unlock_bh(&qedi->ll2_lock); +} + +static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev) +{ + int rc = 0; + + if (udev->ll2_ring || udev->ll2_buf) + return rc; + + /* Memory for control area. */ + udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); + if (!udev->uctrl) + return -ENOMEM; + + /* Allocating memory for LL2 ring */ + udev->ll2_ring_size = QEDI_PAGE_SIZE; + udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); + if (!udev->ll2_ring) { + rc = -ENOMEM; + goto exit_alloc_ring; + } + + /* Allocating memory for Tx/Rx pkt buffer */ + udev->ll2_buf_size = TX_RX_RING * qedi_ll2_buf_size; + udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size); + udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP | + __GFP_ZERO, 2); + if (!udev->ll2_buf) { + rc = -ENOMEM; + goto exit_alloc_buf; + } + return rc; + +exit_alloc_buf: + free_page((unsigned long)udev->ll2_ring); + udev->ll2_ring = NULL; +exit_alloc_ring: + return rc; +} + +static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) +{ + struct qedi_uio_dev *udev = NULL; + int rc = 0; + + list_for_each_entry(udev, &qedi_udev_list, list) { + if (udev->pdev == qedi->pdev) { + udev->qedi = qedi; + if (__qedi_alloc_uio_rings(udev)) { + udev->qedi = NULL; + return -ENOMEM; + } + qedi->udev = udev; + return 0; + } + } + + udev = kzalloc(sizeof(*udev), GFP_KERNEL); + if (!udev) + goto err_udev; + + udev->uio_dev = -1; + + udev->qedi = qedi; + udev->pdev = qedi->pdev; + + rc = __qedi_alloc_uio_rings(udev); + if (rc) + goto err_uctrl; + + list_add(&udev->list, &qedi_udev_list); + + pci_dev_get(udev->pdev); + qedi->udev = udev; + + udev->tx_pkt = udev->ll2_buf; + udev->rx_pkt = udev->ll2_buf + qedi_ll2_buf_size; + return 0; + + err_uctrl: + kfree(udev); + err_udev: + return -ENOMEM; +} + +static int qedi_init_uio(struct qedi_ctx *qedi) +{ + struct qedi_uio_dev *udev = qedi->udev; + struct uio_info *uinfo; + int ret = 0; + + if (!udev) + return -ENOMEM; + + uinfo = &udev->qedi_uinfo; + + uinfo->mem[0].addr = (unsigned long)udev->uctrl; + uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl); + uinfo->mem[0].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[1].addr = (unsigned long)udev->ll2_ring; + uinfo->mem[1].size = udev->ll2_ring_size; + uinfo->mem[1].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[2].addr = (unsigned long)udev->ll2_buf; + uinfo->mem[2].size = udev->ll2_buf_size; + uinfo->mem[2].memtype = UIO_MEM_LOGICAL; + + uinfo->name = "qedi_uio"; + uinfo->version = QEDI_MODULE_VERSION; + uinfo->irq = UIO_IRQ_CUSTOM; + + uinfo->open = qedi_uio_open; + uinfo->release = qedi_uio_close; + + if (udev->uio_dev == -1) { + if (!uinfo->priv) { + uinfo->priv = udev; + + ret = uio_register_device(&udev->pdev->dev, uinfo); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "UIO registration failed\n"); + } + } + } + + return ret; +} + +static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int ret; + + sb_virt = dma_alloc_coherent(&qedi->pdev->dev, + sizeof(struct status_block), &sb_phys, + GFP_KERNEL); + if (!sb_virt) { + QEDI_ERR(&qedi->dbg_ctx, + "Status block allocation failed for id = %d.\n", + sb_id); + return -ENOMEM; + } + + ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, + sb_id, QED_SB_TYPE_STORAGE); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "Status block initialization failed for id = %d.\n", + sb_id); + return ret; + } + + return 0; +} + +static void qedi_free_sb(struct qedi_ctx *qedi) +{ + struct qed_sb_info *sb_info; + int id; + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + sb_info = &qedi->sb_array[id]; + if (sb_info->sb_virt) + dma_free_coherent(&qedi->pdev->dev, + sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, + sb_info->sb_phys); + } +} + +static void qedi_free_fp(struct qedi_ctx *qedi) +{ + kfree(qedi->fp_array); + kfree(qedi->sb_array); +} + +static void qedi_destroy_fp(struct qedi_ctx *qedi) +{ + qedi_free_sb(qedi); + qedi_free_fp(qedi); +} + +static int qedi_alloc_fp(struct qedi_ctx *qedi) +{ + int ret = 0; + + qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), + sizeof(struct qedi_fastpath), GFP_KERNEL); + if (!qedi->fp_array) { + QEDI_ERR(&qedi->dbg_ctx, + "fastpath fp array allocation failed.\n"); + return -ENOMEM; + } + + qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), + sizeof(struct qed_sb_info), GFP_KERNEL); + if (!qedi->sb_array) { + QEDI_ERR(&qedi->dbg_ctx, + "fastpath sb array allocation failed.\n"); + ret = -ENOMEM; + goto free_fp; + } + + return ret; + +free_fp: + qedi_free_fp(qedi); + return ret; +} + +static void qedi_int_fp(struct qedi_ctx *qedi) +{ + struct qedi_fastpath *fp; + int id; + + memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) * + sizeof(*qedi->fp_array)); + memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) * + sizeof(*qedi->sb_array)); + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + fp = &qedi->fp_array[id]; + fp->sb_info = &qedi->sb_array[id]; + fp->sb_id = id; + fp->qedi = qedi; + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + "qedi", id); + + /* fp_array[i] ---- irq cookie + * So init data which is needed in int ctx + */ + } +} + +static int qedi_prepare_fp(struct qedi_ctx *qedi) +{ + struct qedi_fastpath *fp; + int id, ret = 0; + + ret = qedi_alloc_fp(qedi); + if (ret) + goto err; + + qedi_int_fp(qedi); + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + fp = &qedi->fp_array[id]; + ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "SB allocation and initialization failed.\n"); + ret = -EIO; + goto err_init; + } + } + + return 0; + +err_init: + qedi_free_sb(qedi); + qedi_free_fp(qedi); +err: + return ret; +} + +static int qedi_setup_cid_que(struct qedi_ctx *qedi) +{ + int i; + + qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns, + sizeof(u32), GFP_KERNEL); + if (!qedi->cid_que.cid_que_base) + return -ENOMEM; + + qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns, + sizeof(struct qedi_conn *), + GFP_KERNEL); + if (!qedi->cid_que.conn_cid_tbl) { + kfree(qedi->cid_que.cid_que_base); + qedi->cid_que.cid_que_base = NULL; + return -ENOMEM; + } + + qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base; + qedi->cid_que.cid_q_prod_idx = 0; + qedi->cid_que.cid_q_cons_idx = 0; + qedi->cid_que.cid_q_max_idx = qedi->max_active_conns; + qedi->cid_que.cid_free_cnt = qedi->max_active_conns; + + for (i = 0; i < qedi->max_active_conns; i++) { + qedi->cid_que.cid_que[i] = i; + qedi->cid_que.conn_cid_tbl[i] = NULL; + } + + return 0; +} + +static void qedi_release_cid_que(struct qedi_ctx *qedi) +{ + kfree(qedi->cid_que.cid_que_base); + qedi->cid_que.cid_que_base = NULL; + + kfree(qedi->cid_que.conn_cid_tbl); + qedi->cid_que.conn_cid_tbl = NULL; +} + +static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size, + u16 start_id, u16 next) +{ + id_tbl->start = start_id; + id_tbl->max = size; + id_tbl->next = next; + spin_lock_init(&id_tbl->lock); + id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); + if (!id_tbl->table) + return -ENOMEM; + + return 0; +} + +static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl) +{ + kfree(id_tbl->table); + id_tbl->table = NULL; +} + +int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id) +{ + int ret = -1; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return ret; + + spin_lock(&id_tbl->lock); + if (!test_bit(id, id_tbl->table)) { + set_bit(id, id_tbl->table); + ret = 0; + } + spin_unlock(&id_tbl->lock); + return ret; +} + +u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl) +{ + u16 id; + + spin_lock(&id_tbl->lock); + id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); + if (id >= id_tbl->max) { + id = QEDI_LOCAL_PORT_INVALID; + if (id_tbl->next != 0) { + id = find_first_zero_bit(id_tbl->table, id_tbl->next); + if (id >= id_tbl->next) + id = QEDI_LOCAL_PORT_INVALID; + } + } + + if (id < id_tbl->max) { + set_bit(id, id_tbl->table); + id_tbl->next = (id + 1) & (id_tbl->max - 1); + id += id_tbl->start; + } + + spin_unlock(&id_tbl->lock); + + return id; +} + +void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id) +{ + if (id == QEDI_LOCAL_PORT_INVALID) + return; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return; + + clear_bit(id, id_tbl->table); +} + +static void qedi_cm_free_mem(struct qedi_ctx *qedi) +{ + kfree(qedi->ep_tbl); + qedi->ep_tbl = NULL; + qedi_free_id_tbl(&qedi->lcl_port_tbl); +} + +static int qedi_cm_alloc_mem(struct qedi_ctx *qedi) +{ + u16 port_id; + + qedi->ep_tbl = kzalloc((qedi->max_active_conns * + sizeof(struct qedi_endpoint *)), GFP_KERNEL); + if (!qedi->ep_tbl) + return -ENOMEM; + port_id = get_random_u32_below(QEDI_LOCAL_PORT_RANGE); + if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, + QEDI_LOCAL_PORT_MIN, port_id)) { + qedi_cm_free_mem(qedi); + return -ENOMEM; + } + + return 0; +} + +static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct qedi_ctx *qedi = NULL; + + shost = iscsi_host_alloc(&qedi_host_template, + sizeof(struct qedi_ctx), 0); + if (!shost) { + QEDI_ERR(NULL, "Could not allocate shost\n"); + goto exit_setup_shost; + } + + shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA - 1; + shost->max_channel = 0; + shost->max_lun = ~0; + shost->max_cmd_len = 16; + shost->transportt = qedi_scsi_transport; + + qedi = iscsi_host_priv(shost); + memset(qedi, 0, sizeof(*qedi)); + qedi->shost = shost; + qedi->dbg_ctx.host_no = shost->host_no; + qedi->pdev = pdev; + qedi->dbg_ctx.pdev = pdev; + qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA; + qedi->max_sqes = QEDI_SQ_SIZE; + + shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi); + + pci_set_drvdata(pdev, qedi); + +exit_setup_shost: + return qedi; +} + +static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2) +{ + struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; + struct skb_work_list *work; + struct ethhdr *eh; + + if (!qedi) { + QEDI_ERR(NULL, "qedi is NULL\n"); + return -1; + } + + if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO, + "UIO DEV is not opened\n"); + kfree_skb(skb); + return 0; + } + + eh = (struct ethhdr *)skb->data; + /* Undo VLAN encapsulation */ + if (eh->h_proto == htons(ETH_P_8021Q)) { + memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); + eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + + /* Filter out non FIP/FCoE frames here to free them faster */ + if (eh->h_proto != htons(ETH_P_ARP) && + eh->h_proto != htons(ETH_P_IP) && + eh->h_proto != htons(ETH_P_IPV6)) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, + "Dropping frame ethertype [0x%x] len [0x%x].\n", + eh->h_proto, skb->len); + kfree_skb(skb); + return 0; + } + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, + "Allowed frame ethertype [0x%x] len [0x%x].\n", + eh->h_proto, skb->len); + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate work so dropping frame.\n"); + kfree_skb(skb); + return 0; + } + + INIT_LIST_HEAD(&work->list); + work->skb = skb; + + if (skb_vlan_tag_present(skb)) + work->vlan_id = skb_vlan_tag_get(skb); + + if (work->vlan_id) + __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id); + + spin_lock_bh(&qedi->ll2_lock); + list_add_tail(&work->list, &qedi->ll2_skb_list); + spin_unlock_bh(&qedi->ll2_lock); + + wake_up_process(qedi->ll2_recv_thread); + + return 0; +} + +/* map this skb to iscsiuio mmaped region */ +static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb, + u16 vlan_id) +{ + struct qedi_uio_dev *udev = NULL; + struct qedi_uio_ctrl *uctrl = NULL; + struct qedi_rx_bd rxbd; + struct qedi_rx_bd *p_rxbd; + u32 rx_bd_prod; + void *pkt; + int len = 0; + u32 prod; + + if (!qedi) { + QEDI_ERR(NULL, "qedi is NULL\n"); + return -1; + } + + udev = qedi->udev; + uctrl = udev->uctrl; + + ++uctrl->hw_rx_prod_cnt; + prod = (uctrl->hw_rx_prod + 1) % RX_RING; + + pkt = udev->rx_pkt + (prod * qedi_ll2_buf_size); + len = min_t(u32, skb->len, (u32)qedi_ll2_buf_size); + memcpy(pkt, skb->data, len); + + memset(&rxbd, 0, sizeof(rxbd)); + rxbd.rx_pkt_index = prod; + rxbd.rx_pkt_len = len; + rxbd.vlan_id = vlan_id; + + uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD; + rx_bd_prod = uctrl->hw_rx_bd_prod; + p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring; + p_rxbd += rx_bd_prod; + + memcpy(p_rxbd, &rxbd, sizeof(rxbd)); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, + "hw_rx_prod [%d] prod [%d] hw_rx_bd_prod [%d] rx_pkt_idx [%d] rx_len [%d].\n", + uctrl->hw_rx_prod, prod, uctrl->hw_rx_bd_prod, + rxbd.rx_pkt_index, rxbd.rx_pkt_len); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, + "host_rx_cons [%d] hw_rx_bd_cons [%d].\n", + uctrl->host_rx_cons, uctrl->host_rx_bd_cons); + + uctrl->hw_rx_prod = prod; + + /* notify the iscsiuio about new packet */ + uio_event_notify(&udev->qedi_uinfo); + + return 0; +} + +static void qedi_ll2_free_skbs(struct qedi_ctx *qedi) +{ + struct skb_work_list *work, *work_tmp; + + spin_lock_bh(&qedi->ll2_lock); + list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) { + list_del(&work->list); + kfree_skb(work->skb); + kfree(work); + } + spin_unlock_bh(&qedi->ll2_lock); +} + +static int qedi_ll2_recv_thread(void *arg) +{ + struct qedi_ctx *qedi = (struct qedi_ctx *)arg; + struct skb_work_list *work, *work_tmp; + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + spin_lock_bh(&qedi->ll2_lock); + list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, + list) { + list_del(&work->list); + qedi_ll2_process_skb(qedi, work->skb, work->vlan_id); + kfree_skb(work->skb); + kfree(work); + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&qedi->ll2_lock); + schedule(); + } + + __set_current_state(TASK_RUNNING); + return 0; +} + +static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) +{ + u8 num_sq_pages; + u32 log_page_size; + int rval = 0; + + + num_sq_pages = (MAX_OUTSTANDING_TASKS_PER_CON * 8) / QEDI_PAGE_SIZE; + + qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Number of CQ count is %d\n", qedi->num_queues); + + memset(&qedi->pf_params.iscsi_pf_params, 0, + sizeof(qedi->pf_params.iscsi_pf_params)); + + qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev, + qedi->num_queues * sizeof(struct qedi_glbl_q_params), + &qedi->hw_p_cpuq, GFP_KERNEL); + if (!qedi->p_cpuq) { + QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n"); + rval = -1; + goto err_alloc_mem; + } + + rval = qedi_alloc_global_queues(qedi); + if (rval) { + QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n"); + rval = -1; + goto err_alloc_mem; + } + + qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA; + qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK; + qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10; + qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages; + qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages; + qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; + qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; + qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; + qedi->pf_params.iscsi_pf_params.two_msl_timer = QED_TWO_MSL_TIMER_DFLT; + qedi->pf_params.iscsi_pf_params.tx_sws_timer = QED_TX_SWS_TIMER_DFLT; + qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; + + for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { + if ((1 << log_page_size) == QEDI_PAGE_SIZE) + break; + } + qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size; + + qedi->pf_params.iscsi_pf_params.glbl_q_params_addr = + (u64)qedi->hw_p_cpuq; + + /* RQ BDQ initializations. + * rq_num_entries: suggested value for Initiator is 16 (4KB RQ) + * rqe_log_size: 8 for 256B RQE + */ + qedi->pf_params.iscsi_pf_params.rqe_log_size = 8; + /* BDQ address and size */ + qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] = + qedi->bdq_pbl_list_dma; + qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] = + qedi->bdq_pbl_list_num_entries; + qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE; + + /* cq_num_entries: num_tasks + rq_num_entries */ + qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048; + + qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX; + qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1; + +err_alloc_mem: + return rval; +} + +/* Free DMA coherent memory for array of queue pointers we pass to qed */ +static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi) +{ + size_t size = 0; + + if (qedi->p_cpuq) { + size = qedi->num_queues * sizeof(struct qedi_glbl_q_params); + dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq, + qedi->hw_p_cpuq); + } + + qedi_free_global_queues(qedi); + + kfree(qedi->global_queues); +} + +static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block, + struct qedi_boot_target *tgt, u8 index) +{ + u32 ipv6_en; + + ipv6_en = !!(block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_IPV6_ENABLED); + + snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s", + block->target[index].target_name.byte); + + tgt->ipv6_en = ipv6_en; + + if (ipv6_en) + snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n", + block->target[index].ipv6_addr.byte); + else + snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n", + block->target[index].ipv4_addr.byte); +} + +static int qedi_find_boot_info(struct qedi_ctx *qedi, + struct qed_mfw_tlv_iscsi *iscsi, + struct nvm_iscsi_block *block) +{ + struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL; + u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0; + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + struct qedi_conn *qedi_conn; + struct iscsi_session *sess; + struct iscsi_conn *conn; + char ep_ip_addr[64]; + int i, ret = 0; + + pri_ctrl_flags = !!(block->target[0].ctrl_flags & + NVM_ISCSI_CFG_TARGET_ENABLED); + if (pri_ctrl_flags) { + pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL); + if (!pri_tgt) + return -1; + qedi_get_boot_tgt_info(block, pri_tgt, 0); + } + + sec_ctrl_flags = !!(block->target[1].ctrl_flags & + NVM_ISCSI_CFG_TARGET_ENABLED); + if (sec_ctrl_flags) { + sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL); + if (!sec_tgt) { + ret = -1; + goto free_tgt; + } + qedi_get_boot_tgt_info(block, sec_tgt, 1); + } + + for (i = 0; i < qedi->max_active_conns; i++) { + qedi_conn = qedi_get_conn_from_id(qedi, i); + if (!qedi_conn) + continue; + + if (qedi_conn->ep->ip_type == TCP_IPV4) + snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n", + qedi_conn->ep->dst_addr); + else + snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n", + qedi_conn->ep->dst_addr); + + cls_conn = qedi_conn->cls_conn; + conn = cls_conn->dd_data; + cls_sess = iscsi_conn_to_session(cls_conn); + sess = cls_sess->dd_data; + + if (!iscsi_is_session_online(cls_sess)) + continue; + + if (!sess->targetname) + continue; + + if (pri_ctrl_flags) { + if (!strcmp(pri_tgt->iscsi_name, sess->targetname) && + !strcmp(pri_tgt->ip_addr, ep_ip_addr)) { + found = 1; + break; + } + } + + if (sec_ctrl_flags) { + if (!strcmp(sec_tgt->iscsi_name, sess->targetname) && + !strcmp(sec_tgt->ip_addr, ep_ip_addr)) { + found = 1; + break; + } + } + } + + if (found) { + if (conn->hdrdgst_en) { + iscsi->header_digest_set = true; + iscsi->header_digest = 1; + } + + if (conn->datadgst_en) { + iscsi->data_digest_set = true; + iscsi->data_digest = 1; + } + iscsi->boot_taget_portal_set = true; + iscsi->boot_taget_portal = sess->tpgt; + + } else { + ret = -1; + } + + if (sec_ctrl_flags) + kfree(sec_tgt); +free_tgt: + if (pri_ctrl_flags) + kfree(pri_tgt); + + return ret; +} + +static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) +{ + struct qedi_ctx *qedi; + + if (!dev) { + QEDI_INFO(NULL, QEDI_LOG_EVT, + "dev is NULL so ignoring get_generic_tlv_data request.\n"); + return; + } + qedi = (struct qedi_ctx *)dev; + + memset(data, 0, sizeof(struct qed_generic_tlvs)); + ether_addr_copy(data->mac[0], qedi->mac); +} + +/* + * Protocol TLV handler + */ +static void qedi_get_protocol_tlv_data(void *dev, void *data) +{ + struct qed_mfw_tlv_iscsi *iscsi = data; + struct qed_iscsi_stats *fw_iscsi_stats; + struct nvm_iscsi_block *block = NULL; + u32 chap_en = 0, mchap_en = 0; + struct qedi_ctx *qedi = dev; + int rval = 0; + + fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL); + if (!fw_iscsi_stats) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not allocate memory for fw_iscsi_stats.\n"); + goto exit_get_data; + } + + mutex_lock(&qedi->stats_lock); + /* Query firmware for offload stats */ + qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats); + mutex_unlock(&qedi->stats_lock); + + iscsi->rx_frames_set = true; + iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt; + iscsi->rx_bytes_set = true; + iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt; + iscsi->tx_frames_set = true; + iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt; + iscsi->tx_bytes_set = true; + iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt; + iscsi->frame_size_set = true; + iscsi->frame_size = qedi->ll2_mtu; + block = qedi_get_nvram_block(qedi); + if (block) { + chap_en = !!(block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_ENABLED); + mchap_en = !!(block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED); + + iscsi->auth_method_set = (chap_en || mchap_en) ? true : false; + iscsi->auth_method = 1; + if (chap_en) + iscsi->auth_method = 2; + if (mchap_en) + iscsi->auth_method = 3; + + iscsi->tx_desc_size_set = true; + iscsi->tx_desc_size = QEDI_SQ_SIZE; + iscsi->rx_desc_size_set = true; + iscsi->rx_desc_size = QEDI_CQ_SIZE; + + /* tpgt, hdr digest, data digest */ + rval = qedi_find_boot_info(qedi, iscsi, block); + if (rval) + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Boot target not set"); + } + + kfree(fw_iscsi_stats); +exit_get_data: + return; +} + +void qedi_schedule_hw_err_handler(void *dev, + enum qed_hw_err_type err_type) +{ + struct qedi_ctx *qedi = (struct qedi_ctx *)dev; + unsigned long override_flags = qedi_flags_override; + + if (override_flags && test_bit(QEDI_ERR_OVERRIDE_EN, &override_flags)) + qedi->qedi_err_flags = qedi_flags_override; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "HW error handler scheduled, err=%d err_flags=0x%x\n", + err_type, qedi->qedi_err_flags); + + switch (err_type) { + case QED_HW_ERR_FAN_FAIL: + schedule_delayed_work(&qedi->board_disable_work, 0); + break; + case QED_HW_ERR_MFW_RESP_FAIL: + case QED_HW_ERR_HW_ATTN: + case QED_HW_ERR_DMAE_FAIL: + case QED_HW_ERR_RAMROD_FAIL: + case QED_HW_ERR_FW_ASSERT: + /* Prevent HW attentions from being reasserted */ + if (test_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags)) + qedi_ops->common->attn_clr_enable(qedi->cdev, true); + + if (err_type == QED_HW_ERR_RAMROD_FAIL && + test_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags)) + qedi_ops->common->recovery_process(qedi->cdev); + + break; + default: + break; + } +} + +static void qedi_schedule_recovery_handler(void *dev) +{ + struct qedi_ctx *qedi = dev; + + QEDI_ERR(&qedi->dbg_ctx, "Recovery handler scheduled.\n"); + + if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) + return; + + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + + schedule_delayed_work(&qedi->recovery_work, 0); +} + +static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct iscsi_conn *conn = session->leadconn; + struct qedi_conn *qedi_conn = conn->dd_data; + + qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); +} + +static void qedi_link_update(void *dev, struct qed_link_output *link) +{ + struct qedi_ctx *qedi = (struct qedi_ctx *)dev; + + if (link->link_up) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n"); + atomic_set(&qedi->link_state, QEDI_LINK_UP); + } else { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Link Down event.\n"); + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery); + } +} + +static struct qed_iscsi_cb_ops qedi_cb_ops = { + { + .link_update = qedi_link_update, + .schedule_recovery_handler = qedi_schedule_recovery_handler, + .schedule_hw_err_handler = qedi_schedule_hw_err_handler, + .get_protocol_tlv_data = qedi_get_protocol_tlv_data, + .get_generic_tlv_data = qedi_get_generic_tlv_data, + } +}; + +static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe, + u16 que_idx, struct qedi_percpu_s *p) +{ + struct qedi_work *qedi_work; + struct qedi_conn *q_conn; + struct qedi_cmd *qedi_cmd; + u32 iscsi_cid; + int rc = 0; + + iscsi_cid = cqe->cqe_common.conn_id; + q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!q_conn) { + QEDI_WARN(&qedi->dbg_ctx, + "Session no longer exists for cid=0x%x!!\n", + iscsi_cid); + return -1; + } + + switch (cqe->cqe_common.cqe_type) { + case ISCSI_CQE_TYPE_SOLICITED: + case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: + qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid); + if (!qedi_cmd) { + rc = -1; + break; + } + INIT_LIST_HEAD(&qedi_cmd->cqe_work.list); + qedi_cmd->cqe_work.qedi = qedi; + memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe)); + qedi_cmd->cqe_work.que_idx = que_idx; + qedi_cmd->cqe_work.is_solicited = true; + list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list); + break; + case ISCSI_CQE_TYPE_UNSOLICITED: + case ISCSI_CQE_TYPE_DUMMY: + case ISCSI_CQE_TYPE_TASK_CLEANUP: + qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC); + if (!qedi_work) { + rc = -1; + break; + } + INIT_LIST_HEAD(&qedi_work->list); + qedi_work->qedi = qedi; + memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe)); + qedi_work->que_idx = que_idx; + qedi_work->is_solicited = false; + list_add_tail(&qedi_work->list, &p->work_list); + break; + default: + rc = -1; + QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n"); + } + return rc; +} + +static bool qedi_process_completions(struct qedi_fastpath *fp) +{ + struct qedi_ctx *qedi = fp->qedi; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + struct qedi_percpu_s *p = NULL; + struct global_queue *que; + u16 prod_idx; + unsigned long flags; + union iscsi_cqe *cqe; + int cpu; + int ret; + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; + + if (prod_idx >= QEDI_CQ_SIZE) + prod_idx = prod_idx % QEDI_CQ_SIZE; + + que = qedi->global_queues[fp->sb_id]; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n", + que, prod_idx, que->cq_cons_idx, fp->sb_id); + + qedi->intr_cpu = fp->sb_id; + cpu = smp_processor_id(); + p = &per_cpu(qedi_percpu, cpu); + + if (unlikely(!p->iothread)) + WARN_ON(1); + + spin_lock_irqsave(&p->p_work_lock, flags); + while (que->cq_cons_idx != prod_idx) { + cqe = &que->cq[que->cq_cons_idx]; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "cqe=%p prod_idx=%d cons_idx=%d.\n", + cqe, prod_idx, que->cq_cons_idx); + + ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p); + if (ret) + QEDI_WARN(&qedi->dbg_ctx, + "Dropping CQE 0x%x for cid=0x%x.\n", + que->cq_cons_idx, cqe->cqe_common.conn_id); + + que->cq_cons_idx++; + if (que->cq_cons_idx == QEDI_CQ_SIZE) + que->cq_cons_idx = 0; + } + wake_up_process(p->iothread); + spin_unlock_irqrestore(&p->p_work_lock, flags); + + return true; +} + +static bool qedi_fp_has_work(struct qedi_fastpath *fp) +{ + struct qedi_ctx *qedi = fp->qedi; + struct global_queue *que; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + u16 prod_idx; + + barrier(); + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; + + /* Get the pointer to the global CQ this completion is on */ + que = qedi->global_queues[fp->sb_id]; + + /* prod idx wrap around uint16 */ + if (prod_idx >= QEDI_CQ_SIZE) + prod_idx = prod_idx % QEDI_CQ_SIZE; + + return (que->cq_cons_idx != prod_idx); +} + +/* MSI-X fastpath handler code */ +static irqreturn_t qedi_msix_handler(int irq, void *dev_id) +{ + struct qedi_fastpath *fp = dev_id; + struct qedi_ctx *qedi = fp->qedi; + bool wake_io_thread = true; + + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); + +process_again: + wake_io_thread = qedi_process_completions(fp); + if (wake_io_thread) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "process already running\n"); + } + + if (!qedi_fp_has_work(fp)) + qed_sb_update_sb_idx(fp->sb_info); + + /* Check for more work */ + rmb(); + + if (!qedi_fp_has_work(fp)) + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + else + goto process_again; + + return IRQ_HANDLED; +} + +/* simd handler for MSI/INTa */ +static void qedi_simd_int_handler(void *cookie) +{ + /* Cookie is qedi_ctx struct */ + struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; + + QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi); +} + +#define QEDI_SIMD_HANDLER_NUM 0 +static void qedi_sync_free_irqs(struct qedi_ctx *qedi) +{ + int i; + u16 idx; + + if (qedi->int_info.msix_cnt) { + for (i = 0; i < qedi->int_info.used_cnt; i++) { + idx = i * qedi->dev_info.common.num_hwfns + + qedi_ops->common->get_affin_hwfn_idx(qedi->cdev); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Freeing IRQ #%d vector_idx=%d.\n", i, idx); + + synchronize_irq(qedi->int_info.msix[idx].vector); + irq_set_affinity_hint(qedi->int_info.msix[idx].vector, + NULL); + free_irq(qedi->int_info.msix[idx].vector, + &qedi->fp_array[i]); + } + } else { + qedi_ops->common->simd_handler_clean(qedi->cdev, + QEDI_SIMD_HANDLER_NUM); + } + + qedi->int_info.used_cnt = 0; + qedi_ops->common->set_fp_int(qedi->cdev, 0); +} + +static int qedi_request_msix_irq(struct qedi_ctx *qedi) +{ + int i, rc, cpu; + u16 idx; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < qedi->msix_count; i++) { + idx = i * qedi->dev_info.common.num_hwfns + + qedi_ops->common->get_affin_hwfn_idx(qedi->cdev); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", + qedi->dev_info.common.num_hwfns, + qedi_ops->common->get_affin_hwfn_idx(qedi->cdev)); + + rc = request_irq(qedi->int_info.msix[idx].vector, + qedi_msix_handler, 0, "qedi", + &qedi->fp_array[i]); + if (rc) { + QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n"); + qedi_sync_free_irqs(qedi); + return rc; + } + qedi->int_info.used_cnt++; + rc = irq_set_affinity_hint(qedi->int_info.msix[idx].vector, + get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + return 0; +} + +static int qedi_setup_int(struct qedi_ctx *qedi) +{ + int rc = 0; + + rc = qedi_ops->common->set_fp_int(qedi->cdev, qedi->num_queues); + if (rc < 0) + goto exit_setup_int; + + qedi->msix_count = rc; + + rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info); + if (rc) + goto exit_setup_int; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "Number of msix_cnt = 0x%x num of cpus = 0x%x\n", + qedi->int_info.msix_cnt, num_online_cpus()); + + if (qedi->int_info.msix_cnt) { + rc = qedi_request_msix_irq(qedi); + goto exit_setup_int; + } else { + qedi_ops->common->simd_handler_config(qedi->cdev, &qedi, + QEDI_SIMD_HANDLER_NUM, + qedi_simd_int_handler); + qedi->int_info.used_cnt = 1; + } + +exit_setup_int: + return rc; +} + +static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) +{ + if (qedi->iscsi_image) + dma_free_coherent(&qedi->pdev->dev, + sizeof(struct qedi_nvm_iscsi_image), + qedi->iscsi_image, qedi->nvm_buf_dma); +} + +static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) +{ + qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, + sizeof(struct qedi_nvm_iscsi_image), + &qedi->nvm_buf_dma, GFP_KERNEL); + if (!qedi->iscsi_image) { + QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); + return -ENOMEM; + } + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image, + qedi->nvm_buf_dma); + + return 0; +} + +static void qedi_free_bdq(struct qedi_ctx *qedi) +{ + int i; + + if (qedi->bdq_pbl_list) + dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma); + + if (qedi->bdq_pbl) + dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size, + qedi->bdq_pbl, qedi->bdq_pbl_dma); + + for (i = 0; i < QEDI_BDQ_NUM; i++) { + if (qedi->bdq[i].buf_addr) { + dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE, + qedi->bdq[i].buf_addr, + qedi->bdq[i].buf_dma); + } + } +} + +static void qedi_free_global_queues(struct qedi_ctx *qedi) +{ + int i; + struct global_queue **gl = qedi->global_queues; + + for (i = 0; i < qedi->num_queues; i++) { + if (!gl[i]) + continue; + + if (gl[i]->cq) + dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size, + gl[i]->cq, gl[i]->cq_dma); + if (gl[i]->cq_pbl) + dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size, + gl[i]->cq_pbl, gl[i]->cq_pbl_dma); + + kfree(gl[i]); + } + qedi_free_bdq(qedi); + qedi_free_nvm_iscsi_cfg(qedi); +} + +static int qedi_alloc_bdq(struct qedi_ctx *qedi) +{ + int i; + struct scsi_bd *pbl; + u64 *list; + + /* Alloc dma memory for BDQ buffers */ + for (i = 0; i < QEDI_BDQ_NUM; i++) { + qedi->bdq[i].buf_addr = + dma_alloc_coherent(&qedi->pdev->dev, + QEDI_BDQ_BUF_SIZE, + &qedi->bdq[i].buf_dma, + GFP_KERNEL); + if (!qedi->bdq[i].buf_addr) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not allocate BDQ buffer %d.\n", i); + return -ENOMEM; + } + } + + /* Alloc dma memory for BDQ page buffer list */ + qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd); + qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, QEDI_PAGE_SIZE); + qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n", + qedi->rq_num_entries); + + qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev, + qedi->bdq_pbl_mem_size, + &qedi->bdq_pbl_dma, GFP_KERNEL); + if (!qedi->bdq_pbl) { + QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n"); + return -ENOMEM; + } + + /* + * Populate BDQ PBL with physical and virtual address of individual + * BDQ buffers + */ + pbl = (struct scsi_bd *)qedi->bdq_pbl; + for (i = 0; i < QEDI_BDQ_NUM; i++) { + pbl->address.hi = + cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma)); + pbl->address.lo = + cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma)); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n", + pbl, pbl->address.hi, pbl->address.lo, i); + pbl->opaque.iscsi_opaque.reserved_zero[0] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[1] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[2] = 0; + pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i); + pbl++; + } + + /* Allocate list of PBL pages */ + qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, + QEDI_PAGE_SIZE, + &qedi->bdq_pbl_list_dma, + GFP_KERNEL); + if (!qedi->bdq_pbl_list) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not allocate list of PBL pages.\n"); + return -ENOMEM; + } + + /* + * Now populate PBL list with pages that contain pointers to the + * individual buffers. + */ + qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / + QEDI_PAGE_SIZE; + list = (u64 *)qedi->bdq_pbl_list; + for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) { + *list = qedi->bdq_pbl_dma; + list++; + } + + return 0; +} + +static int qedi_alloc_global_queues(struct qedi_ctx *qedi) +{ + u32 *list; + int i; + int status; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* + * Number of global queues (CQ / RQ). This should + * be <= number of available MSIX vectors for the PF + */ + if (!qedi->num_queues) { + QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n"); + return -ENOMEM; + } + + /* Make sure we allocated the PBL that will contain the physical + * addresses of our queues + */ + if (!qedi->p_cpuq) { + status = -EINVAL; + goto mem_alloc_failure; + } + + qedi->global_queues = kzalloc((sizeof(struct global_queue *) * + qedi->num_queues), GFP_KERNEL); + if (!qedi->global_queues) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to allocate global queues array ptr memory\n"); + return -ENOMEM; + } + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "qedi->global_queues=%p.\n", qedi->global_queues); + + /* Allocate DMA coherent buffers for BDQ */ + status = qedi_alloc_bdq(qedi); + if (status) + goto mem_alloc_failure; + + /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */ + status = qedi_alloc_nvm_iscsi_cfg(qedi); + if (status) + goto mem_alloc_failure; + + /* Allocate a CQ and an associated PBL for each MSI-X + * vector. + */ + for (i = 0; i < qedi->num_queues; i++) { + qedi->global_queues[i] = + kzalloc(sizeof(*qedi->global_queues[0]), + GFP_KERNEL); + if (!qedi->global_queues[i]) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to allocation global queue %d.\n", i); + status = -ENOMEM; + goto mem_alloc_failure; + } + + qedi->global_queues[i]->cq_mem_size = + (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe); + qedi->global_queues[i]->cq_mem_size = + (qedi->global_queues[i]->cq_mem_size + + (QEDI_PAGE_SIZE - 1)); + + qedi->global_queues[i]->cq_pbl_size = + (qedi->global_queues[i]->cq_mem_size / + QEDI_PAGE_SIZE) * sizeof(void *); + qedi->global_queues[i]->cq_pbl_size = + (qedi->global_queues[i]->cq_pbl_size + + (QEDI_PAGE_SIZE - 1)); + + qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev, + qedi->global_queues[i]->cq_mem_size, + &qedi->global_queues[i]->cq_dma, + GFP_KERNEL); + + if (!qedi->global_queues[i]->cq) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate cq.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev, + qedi->global_queues[i]->cq_pbl_size, + &qedi->global_queues[i]->cq_pbl_dma, + GFP_KERNEL); + + if (!qedi->global_queues[i]->cq_pbl) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate cq PBL.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + + /* Create PBL */ + num_pages = qedi->global_queues[i]->cq_mem_size / + QEDI_PAGE_SIZE; + page = qedi->global_queues[i]->cq_dma; + pbl = (u32 *)qedi->global_queues[i]->cq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += QEDI_PAGE_SIZE; + } + } + + list = (u32 *)qedi->p_cpuq; + + /* + * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, + * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points + * to the physical address which contains an array of pointers to the + * physical addresses of the specific queue pages. + */ + for (i = 0; i < qedi->num_queues; i++) { + *list = (u32)qedi->global_queues[i]->cq_pbl_dma; + list++; + *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32); + list++; + + *list = (u32)0; + list++; + *list = (u32)((u64)0 >> 32); + list++; + } + + return 0; + +mem_alloc_failure: + qedi_free_global_queues(qedi); + return status; +} + +int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) +{ + int rval = 0; + u32 *pbl; + dma_addr_t page; + int num_pages; + + if (!ep) + return -EIO; + + /* Calculate appropriate queue and PBL sizes */ + ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe); + ep->sq_mem_size += QEDI_PAGE_SIZE - 1; + + ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); + ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; + + ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, + &ep->sq_dma, GFP_KERNEL); + if (!ep->sq) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate send queue.\n"); + rval = -ENOMEM; + goto out; + } + ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, + &ep->sq_pbl_dma, GFP_KERNEL); + if (!ep->sq_pbl) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate send queue PBL.\n"); + rval = -ENOMEM; + goto out_free_sq; + } + + /* Create PBL */ + num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE; + page = ep->sq_dma; + pbl = (u32 *)ep->sq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += QEDI_PAGE_SIZE; + } + + return rval; + +out_free_sq: + dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, + ep->sq_dma); +out: + return rval; +} + +void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) +{ + if (ep->sq_pbl) + dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl, + ep->sq_pbl_dma); + if (ep->sq) + dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, + ep->sq_dma); +} + +int qedi_get_task_idx(struct qedi_ctx *qedi) +{ + s16 tmp_idx; + +again: + tmp_idx = find_first_zero_bit(qedi->task_idx_map, + MAX_ISCSI_TASK_ENTRIES); + + if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) { + QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n"); + tmp_idx = -1; + goto err_idx; + } + + if (test_and_set_bit(tmp_idx, qedi->task_idx_map)) + goto again; + +err_idx: + return tmp_idx; +} + +void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) +{ + if (!test_and_clear_bit(idx, qedi->task_idx_map)) + QEDI_ERR(&qedi->dbg_ctx, + "FW task context, already cleared, tid=0x%x\n", idx); +} + +void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, + struct qedi_cmd *cmd) +{ + qedi->itt_map[tid].itt = proto_itt; + qedi->itt_map[tid].p_cmd = cmd; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "update itt map tid=0x%x, with proto itt=0x%x\n", tid, + qedi->itt_map[tid].itt); +} + +void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid) +{ + u16 i; + + for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) { + if (qedi->itt_map[i].itt == itt) { + *tid = i; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "Ref itt=0x%x, found at tid=0x%x\n", + itt, *tid); + return; + } + } + + WARN_ON(1); +} + +void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt) +{ + *proto_itt = qedi->itt_map[tid].itt; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "Get itt map tid [0x%x with proto itt[0x%x]", + tid, *proto_itt); +} + +struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) +{ + struct qedi_cmd *cmd = NULL; + + if (tid >= MAX_ISCSI_TASK_ENTRIES) + return NULL; + + cmd = qedi->itt_map[tid].p_cmd; + if (cmd->task_id != tid) + return NULL; + + qedi->itt_map[tid].p_cmd = NULL; + + return cmd; +} + +static int qedi_alloc_itt(struct qedi_ctx *qedi) +{ + qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES, + sizeof(struct qedi_itt_map), GFP_KERNEL); + if (!qedi->itt_map) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to allocate itt map array memory\n"); + return -ENOMEM; + } + return 0; +} + +static void qedi_free_itt(struct qedi_ctx *qedi) +{ + kfree(qedi->itt_map); +} + +static struct qed_ll2_cb_ops qedi_ll2_cb_ops = { + .rx_cb = qedi_ll2_rx, + .tx_cb = NULL, +}; + +static int qedi_percpu_io_thread(void *arg) +{ + struct qedi_percpu_s *p = arg; + struct qedi_work *work, *tmp; + unsigned long flags; + LIST_HEAD(work_list); + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + spin_lock_irqsave(&p->p_work_lock, flags); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_irqrestore(&p->p_work_lock, flags); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + qedi_fp_process_cqes(work); + if (!work->is_solicited) + kfree(work); + } + cond_resched(); + spin_lock_irqsave(&p->p_work_lock, flags); + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&p->p_work_lock, flags); + schedule(); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + +static int qedi_cpu_online(unsigned int cpu) +{ + struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); + struct task_struct *thread; + + thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, + cpu_to_node(cpu), + "qedi_thread/%d", cpu); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + return 0; +} + +static int qedi_cpu_offline(unsigned int cpu) +{ + struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); + struct qedi_work *work, *tmp; + struct task_struct *thread; + unsigned long flags; + + spin_lock_irqsave(&p->p_work_lock, flags); + thread = p->iothread; + p->iothread = NULL; + + list_for_each_entry_safe(work, tmp, &p->work_list, list) { + list_del_init(&work->list); + qedi_fp_process_cqes(work); + if (!work->is_solicited) + kfree(work); + } + + spin_unlock_irqrestore(&p->p_work_lock, flags); + if (thread) + kthread_stop(thread); + return 0; +} + +void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu) +{ + struct qed_ll2_params params; + + qedi_recover_all_conns(qedi); + + qedi_ops->ll2->stop(qedi->cdev); + qedi_ll2_free_skbs(qedi); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n", + qedi->ll2_mtu, mtu); + memset(¶ms, 0, sizeof(params)); + qedi->ll2_mtu = mtu; + params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN; + params.drop_ttl0_packets = 0; + params.rx_vlan_stripping = 1; + ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); + qedi_ops->ll2->start(qedi->cdev, ¶ms); +} + +/* + * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting + * for gaps) for the matching absolute-pf-id of the QEDI device. + */ +static struct nvm_iscsi_block * +qedi_get_nvram_block(struct qedi_ctx *qedi) +{ + int i; + u8 pf; + u32 flags; + struct nvm_iscsi_block *block; + + pf = qedi->dev_info.common.abs_pf_id; + block = &qedi->iscsi_image->iscsi_cfg.block[0]; + for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { + flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> + NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; + if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY | + NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) && + (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK) + >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET)) + return block; + } + return NULL; +} + +static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + struct nvm_iscsi_initiator *initiator; + int rc = 1; + u32 ipv6_en, dhcp_en, ip_len; + struct nvm_iscsi_block *block; + char *fmt, *ip, *sub, *gw; + + block = qedi_get_nvram_block(qedi); + if (!block) + return 0; + + initiator = &block->initiator; + ipv6_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_IPV6_ENABLED; + dhcp_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED; + /* Static IP assignments. */ + fmt = ipv6_en ? "%pI6\n" : "%pI4\n"; + ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte; + ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN; + sub = ipv6_en ? initiator->ipv6.subnet_mask.byte : + initiator->ipv4.subnet_mask.byte; + gw = ipv6_en ? initiator->ipv6.gateway.byte : + initiator->ipv4.gateway.byte; + /* DHCP IP adjustments. */ + fmt = dhcp_en ? "%s\n" : fmt; + if (dhcp_en) { + ip = ipv6_en ? "0::0" : "0.0.0.0"; + sub = ip; + gw = ip; + ip_len = ipv6_en ? 5 : 8; + } + + switch (type) { + case ISCSI_BOOT_ETH_IP_ADDR: + rc = snprintf(buf, ip_len, fmt, ip); + break; + case ISCSI_BOOT_ETH_SUBNET_MASK: + rc = snprintf(buf, ip_len, fmt, sub); + break; + case ISCSI_BOOT_ETH_GATEWAY: + rc = snprintf(buf, ip_len, fmt, gw); + break; + case ISCSI_BOOT_ETH_FLAGS: + rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_ETH_INDEX: + rc = snprintf(buf, 3, "0\n"); + break; + case ISCSI_BOOT_ETH_MAC: + rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN); + break; + case ISCSI_BOOT_ETH_VLAN: + rc = snprintf(buf, 12, "%d\n", + GET_FIELD2(initiator->generic_cont0, + NVM_ISCSI_CFG_INITIATOR_VLAN)); + break; + case ISCSI_BOOT_ETH_ORIGIN: + if (dhcp_en) + rc = snprintf(buf, 3, "3\n"); + break; + default: + rc = 0; + break; + } + + return rc; +} + +static umode_t qedi_eth_get_attr_visibility(void *data, int type) +{ + int rc = 1; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + case ISCSI_BOOT_ETH_MAC: + case ISCSI_BOOT_ETH_INDEX: + case ISCSI_BOOT_ETH_IP_ADDR: + case ISCSI_BOOT_ETH_SUBNET_MASK: + case ISCSI_BOOT_ETH_GATEWAY: + case ISCSI_BOOT_ETH_ORIGIN: + case ISCSI_BOOT_ETH_VLAN: + rc = 0444; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + struct nvm_iscsi_initiator *initiator; + int rc; + struct nvm_iscsi_block *block; + + block = qedi_get_nvram_block(qedi); + if (!block) + return 0; + + initiator = &block->initiator; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, + initiator->initiator_name.byte); + break; + default: + rc = 0; + break; + } + return rc; +} + +static umode_t qedi_ini_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = 0444; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t +qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, + char *buf, enum qedi_nvm_tgts idx) +{ + int rc = 1; + u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; + struct nvm_iscsi_block *block; + char *chap_name, *chap_secret; + char *mchap_name, *mchap_secret; + + block = qedi_get_nvram_block(qedi); + if (!block) + goto exit_show_tgt_info; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT, + "Port:%d, tgt_idx:%d\n", + GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx); + + ctrl_flags = block->target[idx].ctrl_flags & + NVM_ISCSI_CFG_TARGET_ENABLED; + + if (!ctrl_flags) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT, + "Target disabled\n"); + goto exit_show_tgt_info; + } + + ipv6_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_IPV6_ENABLED; + ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN; + chap_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_ENABLED; + chap_name = chap_en ? block->initiator.chap_name.byte : NULL; + chap_secret = chap_en ? block->initiator.chap_password.byte : NULL; + + mchap_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED; + mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL; + mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, + block->target[idx].target_name.byte); + break; + case ISCSI_BOOT_TGT_IP_ADDR: + if (ipv6_en) + rc = snprintf(buf, ip_len, "%pI6\n", + block->target[idx].ipv6_addr.byte); + else + rc = snprintf(buf, ip_len, "%pI4\n", + block->target[idx].ipv4_addr.byte); + break; + case ISCSI_BOOT_TGT_PORT: + rc = snprintf(buf, 12, "%d\n", + GET_FIELD2(block->target[idx].generic_cont0, + NVM_ISCSI_CFG_TARGET_TCP_PORT)); + break; + case ISCSI_BOOT_TGT_LUN: + rc = snprintf(buf, 22, "%.*d\n", + block->target[idx].lun.value[1], + block->target[idx].lun.value[0]); + break; + case ISCSI_BOOT_TGT_CHAP_NAME: + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + chap_name); + break; + case ISCSI_BOOT_TGT_CHAP_SECRET: + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, + chap_secret); + break; + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + mchap_name); + break; + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, + mchap_secret); + break; + case ISCSI_BOOT_TGT_FLAGS: + rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_TGT_NIC_ASSOC: + rc = snprintf(buf, 3, "0\n"); + break; + default: + rc = 0; + break; + } + +exit_show_tgt_info: + return rc; +} + +static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + + return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI); +} + +static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + + return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC); +} + +static umode_t qedi_tgt_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + case ISCSI_BOOT_TGT_IP_ADDR: + case ISCSI_BOOT_TGT_PORT: + case ISCSI_BOOT_TGT_LUN: + case ISCSI_BOOT_TGT_CHAP_NAME: + case ISCSI_BOOT_TGT_CHAP_SECRET: + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + case ISCSI_BOOT_TGT_NIC_ASSOC: + case ISCSI_BOOT_TGT_FLAGS: + rc = 0444; + break; + default: + rc = 0; + break; + } + return rc; +} + +static void qedi_boot_release(void *data) +{ + struct qedi_ctx *qedi = data; + + scsi_host_put(qedi->shost); +} + +static int qedi_get_boot_info(struct qedi_ctx *qedi) +{ + int ret = 1; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Get NVM iSCSI CFG image\n"); + ret = qedi_ops->common->nvm_get_image(qedi->cdev, + QED_NVM_IMAGE_ISCSI_CFG, + (char *)qedi->iscsi_image, + sizeof(struct qedi_nvm_iscsi_image)); + if (ret) + QEDI_ERR(&qedi->dbg_ctx, + "Could not get NVM image. ret = %d\n", ret); + + return ret; +} + +static int qedi_setup_boot_info(struct qedi_ctx *qedi) +{ + struct iscsi_boot_kobj *boot_kobj; + + if (qedi_get_boot_info(qedi)) + return -EPERM; + + qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no); + if (!qedi->boot_kset) + goto kset_free; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi, + qedi_show_boot_tgt_pri_info, + qedi_tgt_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi, + qedi_show_boot_tgt_sec_info, + qedi_tgt_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi, + qedi_show_boot_ini_info, + qedi_ini_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi, + qedi_show_boot_eth_info, + qedi_eth_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + return 0; + +put_host: + scsi_host_put(qedi->shost); +kset_free: + iscsi_boot_destroy_kset(qedi->boot_kset); + return -ENOMEM; +} + +static pci_ers_result_t qedi_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qedi_ctx *qedi = pci_get_drvdata(pdev); + + QEDI_ERR(&qedi->dbg_ctx, "%s: PCI error detected [%d]\n", + __func__, state); + + if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Recovery already in progress.\n"); + return PCI_ERS_RESULT_NONE; + } + + qedi_ops->common->recovery_process(qedi->cdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static void __qedi_remove(struct pci_dev *pdev, int mode) +{ + struct qedi_ctx *qedi = pci_get_drvdata(pdev); + int rval; + u16 retry = 10; + + if (mode == QEDI_MODE_NORMAL) + iscsi_host_remove(qedi->shost, false); + else if (mode == QEDI_MODE_SHUTDOWN) + iscsi_host_remove(qedi->shost, true); + + if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) { + if (qedi->tmf_thread) { + destroy_workqueue(qedi->tmf_thread); + qedi->tmf_thread = NULL; + } + + if (qedi->offload_thread) { + destroy_workqueue(qedi->offload_thread); + qedi->offload_thread = NULL; + } + } + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_host_exit(&qedi->dbg_ctx); +#endif + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) + qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); + + qedi_sync_free_irqs(qedi); + + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { + while (retry--) { + rval = qedi_ops->stop(qedi->cdev); + if (rval < 0) + msleep(1000); + else + break; + } + qedi_ops->ll2->stop(qedi->cdev); + } + + cancel_delayed_work_sync(&qedi->recovery_work); + cancel_delayed_work_sync(&qedi->board_disable_work); + + qedi_free_iscsi_pf_param(qedi); + + rval = qedi_ops->common->update_drv_state(qedi->cdev, false); + if (rval) + QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n"); + + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { + qedi_ops->common->slowpath_stop(qedi->cdev); + qedi_ops->common->remove(qedi->cdev); + } + + qedi_destroy_fp(qedi); + + if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) { + qedi_release_cid_que(qedi); + qedi_cm_free_mem(qedi); + qedi_free_uio(qedi->udev); + qedi_free_itt(qedi); + + if (qedi->ll2_recv_thread) { + kthread_stop(qedi->ll2_recv_thread); + qedi->ll2_recv_thread = NULL; + } + qedi_ll2_free_skbs(qedi); + + if (qedi->boot_kset) + iscsi_boot_destroy_kset(qedi->boot_kset); + + iscsi_host_free(qedi->shost); + } +} + +static void qedi_board_disable_work(struct work_struct *work) +{ + struct qedi_ctx *qedi = + container_of(work, struct qedi_ctx, + board_disable_work.work); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Fan failure, Unloading firmware context.\n"); + + if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags)) + return; + + __qedi_remove(qedi->pdev, QEDI_MODE_NORMAL); +} + +static void qedi_shutdown(struct pci_dev *pdev) +{ + struct qedi_ctx *qedi = pci_get_drvdata(pdev); + + QEDI_ERR(&qedi->dbg_ctx, "%s: Shutdown qedi\n", __func__); + if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags)) + return; + __qedi_remove(pdev, QEDI_MODE_SHUTDOWN); +} + +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedi_ctx *qedi; + + if (!pdev) { + QEDI_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedi = pci_get_drvdata(pdev); + + QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + +static int __qedi_probe(struct pci_dev *pdev, int mode) +{ + struct qedi_ctx *qedi; + struct qed_ll2_params params; + u8 dp_level = 0; + bool is_vf = false; + char host_buf[16]; + struct qed_link_params link_params; + struct qed_slowpath_params sp_params; + struct qed_probe_params qed_params; + void *task_start, *task_end; + int rc; + u16 retry = 10; + + if (mode != QEDI_MODE_RECOVERY) { + qedi = qedi_host_alloc(pdev); + if (!qedi) { + rc = -ENOMEM; + goto exit_probe; + } + } else { + qedi = pci_get_drvdata(pdev); + } + +retry_probe: + if (mode == QEDI_MODE_RECOVERY) + msleep(2000); + + memset(&qed_params, 0, sizeof(qed_params)); + qed_params.protocol = QED_PROTOCOL_ISCSI; + qed_params.dp_module = qedi_qed_debug; + qed_params.dp_level = dp_level; + qed_params.is_vf = is_vf; + qedi->cdev = qedi_ops->common->probe(pdev, &qed_params); + if (!qedi->cdev) { + if (mode == QEDI_MODE_RECOVERY && retry) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Retry %d initialize hardware\n", retry); + retry--; + goto retry_probe; + } + + rc = -ENODEV; + QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n"); + goto free_host; + } + + set_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags); + set_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags); + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + + rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); + if (rc) + goto free_host; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", + qedi->dev_info.common.num_hwfns, + qedi_ops->common->get_affin_hwfn_idx(qedi->cdev)); + + rc = qedi_set_iscsi_pf_param(qedi); + if (rc) { + rc = -ENOMEM; + QEDI_ERR(&qedi->dbg_ctx, + "Set iSCSI pf param fail\n"); + goto free_host; + } + + qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); + + rc = qedi_prepare_fp(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n"); + goto free_pf_params; + } + + /* Start the Slowpath-process */ + memset(&sp_params, 0, sizeof(struct qed_slowpath_params)); + sp_params.int_mode = QED_INT_MODE_MSIX; + sp_params.drv_major = QEDI_DRIVER_MAJOR_VER; + sp_params.drv_minor = QEDI_DRIVER_MINOR_VER; + sp_params.drv_rev = QEDI_DRIVER_REV_VER; + sp_params.drv_eng = QEDI_DRIVER_ENG_VER; + strscpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE); + rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n"); + goto stop_hw; + } + + /* update_pf_params needs to be called before and after slowpath + * start + */ + qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); + + rc = qedi_setup_int(qedi); + if (rc) + goto stop_iscsi_func; + + qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); + + /* Learn information crucial for qedi to progress */ + rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); + if (rc) + goto stop_iscsi_func; + + /* Record BDQ producer doorbell addresses */ + qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr; + qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "BDQ primary_prod=%p secondary_prod=%p.\n", + qedi->bdq_primary_prod, + qedi->bdq_secondary_prod); + + /* + * We need to write the number of BDs in the BDQ we've preallocated so + * the f/w will do a prefetch and we'll get an unsolicited CQE when a + * packet arrives. + */ + qedi->bdq_prod_idx = QEDI_BDQ_NUM; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "Writing %d to primary and secondary BDQ doorbell registers.\n", + qedi->bdq_prod_idx); + writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); + readw(qedi->bdq_primary_prod); + writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); + readw(qedi->bdq_secondary_prod); + + ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n", + qedi->mac); + + snprintf(host_buf, sizeof(host_buf), "host_%d", qedi->shost->host_no); + qedi_ops->common->set_name(qedi->cdev, host_buf); + + qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); + + memset(¶ms, 0, sizeof(params)); + params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN; + qedi->ll2_mtu = DEF_PATH_MTU; + params.drop_ttl0_packets = 0; + params.rx_vlan_stripping = 1; + ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); + + if (mode != QEDI_MODE_RECOVERY) { + /* set up rx path */ + INIT_LIST_HEAD(&qedi->ll2_skb_list); + spin_lock_init(&qedi->ll2_lock); + /* start qedi context */ + spin_lock_init(&qedi->hba_lock); + spin_lock_init(&qedi->task_idx_lock); + mutex_init(&qedi->stats_lock); + } + qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); + qedi_ops->ll2->start(qedi->cdev, ¶ms); + + if (mode != QEDI_MODE_RECOVERY) { + qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread, + (void *)qedi, + "qedi_ll2_thread"); + } + + rc = qedi_ops->start(qedi->cdev, &qedi->tasks, + qedi, qedi_iscsi_event_cb); + if (rc) { + rc = -ENODEV; + QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n"); + goto stop_slowpath; + } + + task_start = qedi_get_task_mem(&qedi->tasks, 0); + task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "Task context start=%p, end=%p block_size=%u.\n", + task_start, task_end, qedi->tasks.size); + + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = true; + rc = qedi_ops->common->set_link(qedi->cdev, &link_params); + if (rc) { + QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n"); + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + } + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_host_init(&qedi->dbg_ctx, qedi_debugfs_ops, + qedi_dbg_fops); +#endif + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n", + QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION, + FW_REVISION_VERSION, FW_ENGINEERING_VERSION); + + if (mode == QEDI_MODE_NORMAL) { + if (iscsi_host_add(qedi->shost, &pdev->dev)) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not add iscsi host\n"); + rc = -ENOMEM; + goto remove_host; + } + + /* Allocate uio buffers */ + rc = qedi_alloc_uio_rings(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "UIO alloc ring failed err=%d\n", rc); + goto remove_host; + } + + rc = qedi_init_uio(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "UIO init failed, err=%d\n", rc); + goto free_uio; + } + + /* host the array on iscsi_conn */ + rc = qedi_setup_cid_que(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not setup cid que\n"); + goto free_uio; + } + + rc = qedi_cm_alloc_mem(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not alloc cm memory\n"); + goto free_cid_que; + } + + rc = qedi_alloc_itt(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not alloc itt memory\n"); + goto free_cid_que; + } + + sprintf(host_buf, "host_%d", qedi->shost->host_no); + qedi->tmf_thread = create_singlethread_workqueue(host_buf); + if (!qedi->tmf_thread) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to start tmf thread!\n"); + rc = -ENODEV; + goto free_cid_que; + } + + sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); + qedi->offload_thread = create_workqueue(host_buf); + if (!qedi->offload_thread) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to start offload thread!\n"); + rc = -ENODEV; + goto free_tmf_thread; + } + + INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler); + INIT_DELAYED_WORK(&qedi->board_disable_work, + qedi_board_disable_work); + + /* F/w needs 1st task context memory entry for performance */ + set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); + atomic_set(&qedi->num_offloads, 0); + + if (qedi_setup_boot_info(qedi)) + QEDI_ERR(&qedi->dbg_ctx, + "No iSCSI boot target configured\n"); + + rc = qedi_ops->common->update_drv_state(qedi->cdev, true); + if (rc) + QEDI_ERR(&qedi->dbg_ctx, + "Failed to send drv state to MFW\n"); + + } + + return 0; + +free_tmf_thread: + destroy_workqueue(qedi->tmf_thread); +free_cid_que: + qedi_release_cid_que(qedi); +free_uio: + qedi_free_uio(qedi->udev); +remove_host: +#ifdef CONFIG_DEBUG_FS + qedi_dbg_host_exit(&qedi->dbg_ctx); +#endif + iscsi_host_remove(qedi->shost, false); +stop_iscsi_func: + qedi_ops->stop(qedi->cdev); +stop_slowpath: + qedi_ops->common->slowpath_stop(qedi->cdev); +stop_hw: + qedi_ops->common->remove(qedi->cdev); +free_pf_params: + qedi_free_iscsi_pf_param(qedi); +free_host: + iscsi_host_free(qedi->shost); +exit_probe: + return rc; +} + +static void qedi_mark_conn_recovery(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct iscsi_conn *conn = session->leadconn; + struct qedi_conn *qedi_conn = conn->dd_data; + + iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED); +} + +static void qedi_recovery_handler(struct work_struct *work) +{ + struct qedi_ctx *qedi = + container_of(work, struct qedi_ctx, recovery_work.work); + + iscsi_host_for_each_session(qedi->shost, qedi_mark_conn_recovery); + + /* Call common_ops->recovery_prolog to allow the MFW to quiesce + * any PCI transactions. + */ + qedi_ops->common->recovery_prolog(qedi->cdev); + + __qedi_remove(qedi->pdev, QEDI_MODE_RECOVERY); + __qedi_probe(qedi->pdev, QEDI_MODE_RECOVERY); + clear_bit(QEDI_IN_RECOVERY, &qedi->flags); +} + +static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return __qedi_probe(pdev, QEDI_MODE_NORMAL); +} + +static void qedi_remove(struct pci_dev *pdev) +{ + __qedi_remove(pdev, QEDI_MODE_NORMAL); +} + +static struct pci_device_id qedi_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); + +static enum cpuhp_state qedi_cpuhp_state; + +static struct pci_error_handlers qedi_err_handler = { + .error_detected = qedi_io_error_detected, +}; + +static struct pci_driver qedi_pci_driver = { + .name = QEDI_MODULE_NAME, + .id_table = qedi_pci_tbl, + .probe = qedi_probe, + .remove = qedi_remove, + .shutdown = qedi_shutdown, + .err_handler = &qedi_err_handler, + .suspend = qedi_suspend, +}; + +static int __init qedi_init(void) +{ + struct qedi_percpu_s *p; + int cpu, rc = 0; + + qedi_ops = qed_get_iscsi_ops(); + if (!qedi_ops) { + QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n"); + return -EINVAL; + } + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_init("qedi"); +#endif + + qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport); + if (!qedi_scsi_transport) { + QEDI_ERR(NULL, "Could not register qedi transport"); + rc = -ENOMEM; + goto exit_qedi_init_1; + } + + for_each_possible_cpu(cpu) { + p = &per_cpu(qedi_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->p_work_lock); + p->iothread = NULL; + } + + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online", + qedi_cpu_online, qedi_cpu_offline); + if (rc < 0) + goto exit_qedi_init_2; + qedi_cpuhp_state = rc; + + rc = pci_register_driver(&qedi_pci_driver); + if (rc) { + QEDI_ERR(NULL, "Failed to register driver\n"); + goto exit_qedi_hp; + } + + return 0; + +exit_qedi_hp: + cpuhp_remove_state(qedi_cpuhp_state); +exit_qedi_init_2: + iscsi_unregister_transport(&qedi_iscsi_transport); +exit_qedi_init_1: +#ifdef CONFIG_DEBUG_FS + qedi_dbg_exit(); +#endif + qed_put_iscsi_ops(); + return rc; +} + +static void __exit qedi_cleanup(void) +{ + pci_unregister_driver(&qedi_pci_driver); + cpuhp_remove_state(qedi_cpuhp_state); + iscsi_unregister_transport(&qedi_iscsi_transport); + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_exit(); +#endif + qed_put_iscsi_ops(); +} + +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_VERSION(QEDI_MODULE_VERSION); +module_init(qedi_init); +module_exit(qedi_cleanup); diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h new file mode 100644 index 000000000..760864e43 --- /dev/null +++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#ifndef NVM_ISCSI_CFG_H +#define NVM_ISCSI_CFG_H + +#define NUM_OF_ISCSI_TARGET_PER_PF 4 /* Defined as per the + * ISCSI IBFT constraint + */ +#define NUM_OF_ISCSI_PF_SUPPORTED 4 /* One PF per Port - + * assuming 4 port card + */ + +#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN 256 + +union nvm_iscsi_dhcp_vendor_id { + u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN]; +}; + +#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4 +union nvm_iscsi_ipv4_addr { + u32 addr; + u8 byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN]; +}; + +#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16 +union nvm_iscsi_ipv6_addr { + u32 addr[4]; + u8 byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN]; +}; + +struct nvm_iscsi_initiator_ipv4 { + union nvm_iscsi_ipv4_addr addr; /* 0x0 */ + union nvm_iscsi_ipv4_addr subnet_mask; /* 0x4 */ + union nvm_iscsi_ipv4_addr gateway; /* 0x8 */ + union nvm_iscsi_ipv4_addr primary_dns; /* 0xC */ + union nvm_iscsi_ipv4_addr secondary_dns; /* 0x10 */ + union nvm_iscsi_ipv4_addr dhcp_addr; /* 0x14 */ + + union nvm_iscsi_ipv4_addr isns_server; /* 0x18 */ + union nvm_iscsi_ipv4_addr slp_server; /* 0x1C */ + union nvm_iscsi_ipv4_addr primay_radius_server; /* 0x20 */ + union nvm_iscsi_ipv4_addr secondary_radius_server; /* 0x24 */ + + union nvm_iscsi_ipv4_addr rsvd[4]; /* 0x28 */ +}; + +struct nvm_iscsi_initiator_ipv6 { + union nvm_iscsi_ipv6_addr addr; /* 0x0 */ + union nvm_iscsi_ipv6_addr subnet_mask; /* 0x10 */ + union nvm_iscsi_ipv6_addr gateway; /* 0x20 */ + union nvm_iscsi_ipv6_addr primary_dns; /* 0x30 */ + union nvm_iscsi_ipv6_addr secondary_dns; /* 0x40 */ + union nvm_iscsi_ipv6_addr dhcp_addr; /* 0x50 */ + + union nvm_iscsi_ipv6_addr isns_server; /* 0x60 */ + union nvm_iscsi_ipv6_addr slp_server; /* 0x70 */ + union nvm_iscsi_ipv6_addr primay_radius_server; /* 0x80 */ + union nvm_iscsi_ipv6_addr secondary_radius_server; /* 0x90 */ + + union nvm_iscsi_ipv6_addr rsvd[3]; /* 0xA0 */ + + u32 config; /* 0xD0 */ +#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK 0x000000FF +#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET 0 + + u32 rsvd_1[3]; +}; + +#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN 256 +union nvm_iscsi_name { + u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN]; +}; + +#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN 256 +union nvm_iscsi_chap_name { + u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN]; +}; + +#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN 16 /* md5 need per RFC1996 + * is 16 octets + */ +union nvm_iscsi_chap_password { + u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN]; +}; + +union nvm_iscsi_lun { + u8 byte[8]; + u32 value[2]; +}; + +struct nvm_iscsi_generic { + u32 ctrl_flags; /* 0x0 */ +#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED BIT(0) +#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED BIT(1) +#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED BIT(2) +#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED BIT(3) +#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED BIT(4) +#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN BIT(5) +#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN BIT(6) +#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED BIT(7) +#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED BIT(8) + + u32 timeout; /* 0x4 */ +#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK 0x0000FFFF +#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET 0 +#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK 0xFFFF0000 +#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET 16 + + union nvm_iscsi_dhcp_vendor_id dhcp_vendor_id; /* 0x8 */ + u32 rsvd[62]; /* 0x108 */ +}; + +struct nvm_iscsi_initiator { + struct nvm_iscsi_initiator_ipv4 ipv4; /* 0x0 */ + struct nvm_iscsi_initiator_ipv6 ipv6; /* 0x38 */ + + union nvm_iscsi_name initiator_name; /* 0x118 */ + union nvm_iscsi_chap_name chap_name; /* 0x218 */ + union nvm_iscsi_chap_password chap_password; /* 0x318 */ + + u32 generic_cont0; /* 0x398 */ +#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK 0x0000FFFF +#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET 0 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK 0x00030000 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET 16 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4 1 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6 2 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6 3 + + u32 ctrl_flags; +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6 BIT(0) +#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED BIT(1) + + u32 rsvd[116]; /* 0x32C */ +}; + +struct nvm_iscsi_target { + u32 ctrl_flags; /* 0x0 */ +#define NVM_ISCSI_CFG_TARGET_ENABLED BIT(0) +#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS BIT(1) + + u32 generic_cont0; /* 0x4 */ +#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK 0x0000FFFF +#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET 0 + + u32 ip_ver; +#define NVM_ISCSI_CFG_IPv4 4 +#define NVM_ISCSI_CFG_IPv6 6 + + u32 rsvd_1[7]; /* 0x24 */ + union nvm_iscsi_ipv4_addr ipv4_addr; /* 0x28 */ + union nvm_iscsi_ipv6_addr ipv6_addr; /* 0x2C */ + union nvm_iscsi_lun lun; /* 0x3C */ + + union nvm_iscsi_name target_name; /* 0x44 */ + union nvm_iscsi_chap_name chap_name; /* 0x144 */ + union nvm_iscsi_chap_password chap_password; /* 0x244 */ + + u32 rsvd_2[107]; /* 0x2C4 */ +}; + +struct nvm_iscsi_block { + u32 id; /* 0x0 */ +#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK 0x0000000F +#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET 0 +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK 0x00000FF0 +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET 4 +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY BIT(0) +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED BIT(1) + + u32 rsvd_1[5]; /* 0x4 */ + + struct nvm_iscsi_generic generic; /* 0x18 */ + struct nvm_iscsi_initiator initiator; /* 0x218 */ + struct nvm_iscsi_target target[NUM_OF_ISCSI_TARGET_PER_PF]; + /* 0x718 */ + + u32 rsvd_2[58]; /* 0x1718 */ + /* total size - 0x1800 - 6K block */ +}; + +struct nvm_iscsi_cfg { + u32 id; /* 0x0 */ +#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK 0x000000FF +#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK 0x0000FF00 +#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK 0xFFFF0000 +#define NVM_ISCSI_CFG_BLK_SIGNATURE 0x49430000 /* IC - Iscsi + * Config + */ + +#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR 0 +#define NVM_ISCSI_CFG_BLK_VERSION_MINOR 10 +#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \ + NVM_ISCSI_CFG_BLK_VERSION_MINOR) + + struct nvm_iscsi_block block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */ +}; + +#endif diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c new file mode 100644 index 000000000..b00a7e08e --- /dev/null +++ b/drivers/scsi/qedi/qedi_sysfs.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#include "qedi.h" +#include "qedi_gbl.h" +#include "qedi_iscsi.h" +#include "qedi_dbg.h" + +static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev) +{ + struct Scsi_Host *shost = class_to_shost(dev); + + return iscsi_host_priv(shost); +} + +static ssize_t port_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct qedi_ctx *qedi = qedi_dev_to_hba(dev); + + if (atomic_read(&qedi->link_state) == QEDI_LINK_UP) + return sprintf(buf, "Online\n"); + else + return sprintf(buf, "Linkdown\n"); +} + +static ssize_t speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qedi_ctx *qedi = qedi_dev_to_hba(dev); + struct qed_link_output if_link; + + qedi_ops->common->get_link(qedi->cdev, &if_link); + + return sprintf(buf, "%d Gbit\n", if_link.speed / 1000); +} + +static DEVICE_ATTR_RO(port_state); +static DEVICE_ATTR_RO(speed); + +static struct attribute *qedi_shost_attrs[] = { + &dev_attr_port_state.attr, + &dev_attr_speed.attr, + NULL +}; + +static const struct attribute_group qedi_shost_attr_group = { + .attrs = qedi_shost_attrs +}; + +const struct attribute_group *qedi_shost_groups[] = { + &qedi_shost_attr_group, + NULL +}; diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h new file mode 100644 index 000000000..0ac1055bd --- /dev/null +++ b/drivers/scsi/qedi/qedi_version.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + */ + +#define QEDI_MODULE_VERSION "8.37.0.20" +#define QEDI_DRIVER_MAJOR_VER 8 +#define QEDI_DRIVER_MINOR_VER 37 +#define QEDI_DRIVER_REV_VER 0 +#define QEDI_DRIVER_ENG_VER 20 diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c new file mode 100644 index 000000000..6e5e89aaa --- /dev/null +++ b/drivers/scsi/qla1280.c @@ -0,0 +1,4388 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/****************************************************************************** +* QLOGIC LINUX SOFTWARE +* +* QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver +* Copyright (C) 2000 Qlogic Corporation (www.qlogic.com) +* Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc. +* Copyright (C) 2003-2004 Christoph Hellwig +* +******************************************************************************/ +#define QLA1280_VERSION "3.27.1" +/***************************************************************************** + Revision History: + Rev 3.27.1, February 8, 2010, Michael Reed + - Retain firmware image for error recovery. + Rev 3.27, February 10, 2009, Michael Reed + - General code cleanup. + - Improve error recovery. + Rev 3.26, January 16, 2006 Jes Sorensen + - Ditch all < 2.6 support + Rev 3.25.1, February 10, 2005 Christoph Hellwig + - use pci_map_single to map non-S/G requests + - remove qla1280_proc_info + Rev 3.25, September 28, 2004, Christoph Hellwig + - add support for ISP1020/1040 + - don't include "scsi.h" anymore for 2.6.x + Rev 3.24.4 June 7, 2004 Christoph Hellwig + - restructure firmware loading, cleanup initialization code + - prepare support for ISP1020/1040 chips + Rev 3.24.3 January 19, 2004, Jes Sorensen + - Handle PCI DMA mask settings correctly + - Correct order of error handling in probe_one, free_irq should not + be called if request_irq failed + Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez + - Big endian fixes (James) + - Remove bogus IOCB content on zero data transfer commands (Andrew) + Rev 3.24.1 January 5, 2004, Jes Sorensen + - Initialize completion queue to avoid OOPS on probe + - Handle interrupts during mailbox testing + Rev 3.24 November 17, 2003, Christoph Hellwig + - use struct list_head for completion queue + - avoid old Scsi_FOO typedefs + - cleanup 2.4 compat glue a bit + - use headers on 2.6 instead of "scsi.h" + - make initialization for memory mapped vs port I/O more similar + - remove broken pci config space manipulation + - kill more cruft + - this is an almost perfect 2.6 scsi driver now! ;) + Rev 3.23.39 December 17, 2003, Jes Sorensen + - Delete completion queue from srb if mailbox command failed to + to avoid qla1280_done completeting qla1280_error_action's + obsolete context + - Reduce arguments for qla1280_done + Rev 3.23.38 October 18, 2003, Christoph Hellwig + - Convert to new-style hotplugable driver for 2.6 + - Fix missing scsi_unregister/scsi_host_put on HBA removal + - Kill some more cruft + Rev 3.23.37 October 1, 2003, Jes Sorensen + - Make MMIO depend on CONFIG_X86_VISWS instead of yet another + random CONFIG option + - Clean up locking in probe path + Rev 3.23.36 October 1, 2003, Christoph Hellwig + - queuecommand only ever receives new commands - clear flags + - Reintegrate lost fixes from Linux 2.5 + Rev 3.23.35 August 14, 2003, Jes Sorensen + - Build against 2.6 + Rev 3.23.34 July 23, 2003, Jes Sorensen + - Remove pointless TRUE/FALSE macros + - Clean up vchan handling + Rev 3.23.33 July 3, 2003, Jes Sorensen + - Don't define register access macros before define determining MMIO. + This just happened to work out on ia64 but not elsewhere. + - Don't try and read from the card while it is in reset as + it won't respond and causes an MCA + Rev 3.23.32 June 23, 2003, Jes Sorensen + - Basic support for boot time arguments + Rev 3.23.31 June 8, 2003, Jes Sorensen + - Reduce boot time messages + Rev 3.23.30 June 6, 2003, Jes Sorensen + - Do not enable sync/wide/ppr before it has been determined + that the target device actually supports it + - Enable DMA arbitration for multi channel controllers + Rev 3.23.29 June 3, 2003, Jes Sorensen + - Port to 2.5.69 + Rev 3.23.28 June 3, 2003, Jes Sorensen + - Eliminate duplicate marker commands on bus resets + - Handle outstanding commands appropriately on bus/device resets + Rev 3.23.27 May 28, 2003, Jes Sorensen + - Remove bogus input queue code, let the Linux SCSI layer do the work + - Clean up NVRAM handling, only read it once from the card + - Add a number of missing default nvram parameters + Rev 3.23.26 Beta May 28, 2003, Jes Sorensen + - Use completion queue for mailbox commands instead of busy wait + Rev 3.23.25 Beta May 27, 2003, James Bottomley + - Migrate to use new error handling code + Rev 3.23.24 Beta May 21, 2003, James Bottomley + - Big endian support + - Cleanup data direction code + Rev 3.23.23 Beta May 12, 2003, Jes Sorensen + - Switch to using MMIO instead of PIO + Rev 3.23.22 Beta April 15, 2003, Jes Sorensen + - Fix PCI parity problem with 12160 during reset. + Rev 3.23.21 Beta April 14, 2003, Jes Sorensen + - Use pci_map_page()/pci_unmap_page() instead of map_single version. + Rev 3.23.20 Beta April 9, 2003, Jes Sorensen + - Remove < 2.4.x support + - Introduce HOST_LOCK to make the spin lock changes portable. + - Remove a bunch of idiotic and unnecessary typedef's + - Kill all leftovers of target-mode support which never worked anyway + Rev 3.23.19 Beta April 11, 2002, Linus Torvalds + - Do qla1280_pci_config() before calling request_irq() and + request_region() + - Use pci_dma_hi32() to handle upper word of DMA addresses instead + of large shifts + - Hand correct arguments to free_irq() in case of failure + Rev 3.23.18 Beta April 11, 2002, Jes Sorensen + - Run source through Lindent and clean up the output + Rev 3.23.17 Beta April 11, 2002, Jes Sorensen + - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32 + Rev 3.23.16 Beta March 19, 2002, Jes Sorensen + - Rely on mailbox commands generating interrupts - do not + run qla1280_isr() from ql1280_mailbox_command() + - Remove device_reg_t + - Integrate ql12160_set_target_parameters() with 1280 version + - Make qla1280_setup() non static + - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request + sent to the card - this command pauses the firmware!!! + Rev 3.23.15 Beta March 19, 2002, Jes Sorensen + - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions + - Remove a pile of pointless and confusing (srb_t **) and + (scsi_lu_t *) typecasts + - Explicit mark that we do not use the new error handling (for now) + - Remove scsi_qla_host_t and use 'struct' instead + - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled, + pci_64bit_slot flags which weren't used for anything anyway + - Grab host->host_lock while calling qla1280_isr() from abort() + - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we + do not need to save/restore flags in the interrupt handler + - Enable interrupts early (before any mailbox access) in preparation + for cleaning up the mailbox handling + Rev 3.23.14 Beta March 14, 2002, Jes Sorensen + - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace + it with proper use of dprintk(). + - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take + a debug level argument to determine if data is to be printed + - Add KERN_* info to printk() + Rev 3.23.13 Beta March 14, 2002, Jes Sorensen + - Significant cosmetic cleanups + - Change debug code to use dprintk() and remove #if mess + Rev 3.23.12 Beta March 13, 2002, Jes Sorensen + - More cosmetic cleanups, fix places treating return as function + - use cpu_relax() in qla1280_debounce_register() + Rev 3.23.11 Beta March 13, 2002, Jes Sorensen + - Make it compile under 2.5.5 + Rev 3.23.10 Beta October 1, 2001, Jes Sorensen + - Do no typecast short * to long * in QL1280BoardTbl, this + broke miserably on big endian boxes + Rev 3.23.9 Beta September 30, 2001, Jes Sorensen + - Remove pre 2.2 hack for checking for reentrance in interrupt handler + - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32 + unsigned int to match the types from struct scsi_cmnd + Rev 3.23.8 Beta September 29, 2001, Jes Sorensen + - Remove bogus timer_t typedef from qla1280.h + - Remove obsolete pre 2.2 PCI setup code, use proper #define's + for PCI_ values, call pci_set_master() + - Fix memleak of qla1280_buffer on module unload + - Only compile module parsing code #ifdef MODULE - should be + changed to use individual MODULE_PARM's later + - Remove dummy_buffer that was never modified nor printed + - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove + #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls + - Remove \r from print statements, this is Linux, not DOS + - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK} + dummy macros + - Remove C++ compile hack in header file as Linux driver are not + supposed to be compiled as C++ + - Kill MS_64BITS macro as it makes the code more readable + - Remove unnecessary flags.in_interrupts bit + Rev 3.23.7 Beta August 20, 2001, Jes Sorensen + - Dont' check for set flags on q->q_flag one by one in qla1280_next() + - Check whether the interrupt was generated by the QLA1280 before + doing any processing + - qla1280_status_entry(): Only zero out part of sense_buffer that + is not being copied into + - Remove more superflouous typecasts + - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy() + Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel + - Don't walk the entire list in qla1280_putq_t() just to directly + grab the pointer to the last element afterwards + Rev 3.23.5 Beta August 9, 2001, Jes Sorensen + - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver + Rev 3.23.4 Beta August 8, 2001, Jes Sorensen + - Set dev->max_sectors to 1024 + Rev 3.23.3 Beta August 6, 2001, Jes Sorensen + - Provide compat macros for pci_enable_device(), pci_find_subsys() + and scsi_set_pci_device() + - Call scsi_set_pci_device() for all devices + - Reduce size of kernel version dependent device probe code + - Move duplicate probe/init code to separate function + - Handle error if qla1280_mem_alloc() fails + - Kill OFFSET() macro and use Linux's PCI definitions instead + - Kill private structure defining PCI config space (struct config_reg) + - Only allocate I/O port region if not in MMIO mode + - Remove duplicate (unused) sanity check of sife of srb_t + Rev 3.23.2 Beta August 6, 2001, Jes Sorensen + - Change home-brew memset() implementations to use memset() + - Remove all references to COMTRACE() - accessing a PC's COM2 serial + port directly is not legal under Linux. + Rev 3.23.1 Beta April 24, 2001, Jes Sorensen + - Remove pre 2.2 kernel support + - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat) + - Fix MMIO access to use readl/writel instead of directly + dereferencing pointers + - Nuke MSDOS debugging code + - Change true/false data types to int from uint8_t + - Use int for counters instead of uint8_t etc. + - Clean up size & byte order conversion macro usage + Rev 3.23 Beta January 11, 2001 BN Qlogic + - Added check of device_id when handling non + QLA12160s during detect(). + Rev 3.22 Beta January 5, 2001 BN Qlogic + - Changed queue_task() to schedule_task() + for kernels 2.4.0 and higher. + Note: 2.4.0-testxx kernels released prior to + the actual 2.4.0 kernel release on January 2001 + will get compile/link errors with schedule_task(). + Please update your kernel to released 2.4.0 level, + or comment lines in this file flagged with 3.22 + to resolve compile/link error of schedule_task(). + - Added -DCONFIG_SMP in addition to -D__SMP__ + in Makefile for 2.4.0 builds of driver as module. + Rev 3.21 Beta January 4, 2001 BN Qlogic + - Changed criteria of 64/32 Bit mode of HBA + operation according to BITS_PER_LONG rather + than HBA's NVRAM setting of >4Gig memory bit; + so that the HBA auto-configures without the need + to setup each system individually. + Rev 3.20 Beta December 5, 2000 BN Qlogic + - Added priority handling to IA-64 onboard SCSI + ISP12160 chip for kernels greater than 2.3.18. + - Added irqrestore for qla1280_intr_handler. + - Enabled /proc/scsi/qla1280 interface. + - Clear /proc/scsi/qla1280 counters in detect(). + Rev 3.19 Beta October 13, 2000 BN Qlogic + - Declare driver_template for new kernel + (2.4.0 and greater) scsi initialization scheme. + - Update /proc/scsi entry for 2.3.18 kernels and + above as qla1280 + Rev 3.18 Beta October 10, 2000 BN Qlogic + - Changed scan order of adapters to map + the QLA12160 followed by the QLA1280. + Rev 3.17 Beta September 18, 2000 BN Qlogic + - Removed warnings for 32 bit 2.4.x compiles + - Corrected declared size for request and response + DMA addresses that are kept in each ha + Rev. 3.16 Beta August 25, 2000 BN Qlogic + - Corrected 64 bit addressing issue on IA-64 + where the upper 32 bits were not properly + passed to the RISC engine. + Rev. 3.15 Beta August 22, 2000 BN Qlogic + - Modified qla1280_setup_chip to properly load + ISP firmware for greater that 4 Gig memory on IA-64 + Rev. 3.14 Beta August 16, 2000 BN Qlogic + - Added setting of dma_mask to full 64 bit + if flags.enable_64bit_addressing is set in NVRAM + Rev. 3.13 Beta August 16, 2000 BN Qlogic + - Use new PCI DMA mapping APIs for 2.4.x kernel + Rev. 3.12 July 18, 2000 Redhat & BN Qlogic + - Added check of pci_enable_device to detect() for 2.3.x + - Use pci_resource_start() instead of + pdev->resource[0].start in detect() for 2.3.x + - Updated driver version + Rev. 3.11 July 14, 2000 BN Qlogic + - Updated SCSI Firmware to following versions: + qla1x80: 8.13.08 + qla1x160: 10.04.08 + - Updated driver version to 3.11 + Rev. 3.10 June 23, 2000 BN Qlogic + - Added filtering of AMI SubSys Vendor ID devices + Rev. 3.9 + - DEBUG_QLA1280 undefined and new version BN Qlogic + Rev. 3.08b May 9, 2000 MD Dell + - Added logic to check against AMI subsystem vendor ID + Rev. 3.08 May 4, 2000 DG Qlogic + - Added logic to check for PCI subsystem ID. + Rev. 3.07 Apr 24, 2000 DG & BN Qlogic + - Updated SCSI Firmware to following versions: + qla12160: 10.01.19 + qla1280: 8.09.00 + Rev. 3.06 Apr 12, 2000 DG & BN Qlogic + - Internal revision; not released + Rev. 3.05 Mar 28, 2000 DG & BN Qlogic + - Edit correction for virt_to_bus and PROC. + Rev. 3.04 Mar 28, 2000 DG & BN Qlogic + - Merge changes from ia64 port. + Rev. 3.03 Mar 28, 2000 BN Qlogic + - Increase version to reflect new code drop with compile fix + of issue with inclusion of linux/spinlock for 2.3 kernels + Rev. 3.02 Mar 15, 2000 BN Qlogic + - Merge qla1280_proc_info from 2.10 code base + Rev. 3.01 Feb 10, 2000 BN Qlogic + - Corrected code to compile on a 2.2.x kernel. + Rev. 3.00 Jan 17, 2000 DG Qlogic + - Added 64-bit support. + Rev. 2.07 Nov 9, 1999 DG Qlogic + - Added new routine to set target parameters for ISP12160. + Rev. 2.06 Sept 10, 1999 DG Qlogic + - Added support for ISP12160 Ultra 3 chip. + Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont + - Modified code to remove errors generated when compiling with + Cygnus IA64 Compiler. + - Changed conversion of pointers to unsigned longs instead of integers. + - Changed type of I/O port variables from uint32_t to unsigned long. + - Modified OFFSET macro to work with 64-bit as well as 32-bit. + - Changed sprintf and printk format specifiers for pointers to %p. + - Changed some int to long type casts where needed in sprintf & printk. + - Added l modifiers to sprintf and printk format specifiers for longs. + - Removed unused local variables. + Rev. 1.20 June 8, 1999 DG, Qlogic + Changes to support RedHat release 6.0 (kernel 2.2.5). + - Added SCSI exclusive access lock (io_request_lock) when accessing + the adapter. + - Added changes for the new LINUX interface template. Some new error + handling routines have been added to the template, but for now we + will use the old ones. + - Initial Beta Release. +*****************************************************************************/ + + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +/* + * Compile time Options: + * 0 - Disable and 1 - Enable + */ +#define DEBUG_QLA1280_INTR 0 +#define DEBUG_PRINT_NVRAM 0 +#define DEBUG_QLA1280 0 + +#define MEMORY_MAPPED_IO 1 + +#include "qla1280.h" + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#define QLA_64BIT_PTR 1 +#endif + +#define NVRAM_DELAY() udelay(500) /* 2 microseconds */ + +#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) +#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ + ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240) +#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \ + ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160) + + +static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *); +static void qla1280_remove_one(struct pci_dev *); + +/* + * QLogic Driver Support Function Prototypes. + */ +static void qla1280_done(struct scsi_qla_host *); +static int qla1280_get_token(char *); +static int qla1280_setup(char *s) __init; + +/* + * QLogic ISP1280 Hardware Support Function Prototypes. + */ +static int qla1280_load_firmware(struct scsi_qla_host *); +static int qla1280_init_rings(struct scsi_qla_host *); +static int qla1280_nvram_config(struct scsi_qla_host *); +static int qla1280_mailbox_command(struct scsi_qla_host *, + uint8_t, uint16_t *); +static int qla1280_bus_reset(struct scsi_qla_host *, int); +static int qla1280_device_reset(struct scsi_qla_host *, int, int); +static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); +static int qla1280_abort_isp(struct scsi_qla_host *); +#ifdef QLA_64BIT_PTR +static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *); +#else +static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *); +#endif +static void qla1280_nv_write(struct scsi_qla_host *, uint16_t); +static void qla1280_poll(struct scsi_qla_host *); +static void qla1280_reset_adapter(struct scsi_qla_host *); +static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8); +static void qla1280_isp_cmd(struct scsi_qla_host *); +static void qla1280_isr(struct scsi_qla_host *, struct list_head *); +static void qla1280_rst_aen(struct scsi_qla_host *); +static void qla1280_status_entry(struct scsi_qla_host *, struct response *, + struct list_head *); +static void qla1280_error_entry(struct scsi_qla_host *, struct response *, + struct list_head *); +static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t); +static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t); +static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *); +static request_t *qla1280_req_pkt(struct scsi_qla_host *); +static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *, + unsigned int); +static void qla1280_get_target_parameters(struct scsi_qla_host *, + struct scsi_device *); +static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int); + + +static struct qla_driver_setup driver_setup; + +/* + * convert scsi data direction to request_t control flags + */ +static inline uint16_t +qla1280_data_direction(struct scsi_cmnd *cmnd) +{ + switch(cmnd->sc_data_direction) { + case DMA_FROM_DEVICE: + return BIT_5; + case DMA_TO_DEVICE: + return BIT_6; + case DMA_BIDIRECTIONAL: + return BIT_5 | BIT_6; + /* + * We could BUG() on default here if one of the four cases aren't + * met, but then again if we receive something like that from the + * SCSI layer we have more serious problems. This shuts up GCC. + */ + case DMA_NONE: + default: + return 0; + } +} + +#if DEBUG_QLA1280 +static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd); +static void __qla1280_dump_buffer(char *, int); +#endif + + +/* + * insmod needs to find the variable and make it point to something + */ +#ifdef MODULE +static char *qla1280; + +/* insmod qla1280 options=verbose" */ +module_param(qla1280, charp, 0); +#else +__setup("qla1280=", qla1280_setup); +#endif + + +#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len +#define CMD_CDBP(Cmnd) Cmnd->cmnd +#define CMD_SNSP(Cmnd) Cmnd->sense_buffer +#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE +#define CMD_RESULT(Cmnd) Cmnd->result +#define CMD_HANDLE(Cmnd) Cmnd->host_scribble + +#define CMD_HOST(Cmnd) Cmnd->device->host +#define SCSI_BUS_32(Cmnd) Cmnd->device->channel +#define SCSI_TCN_32(Cmnd) Cmnd->device->id +#define SCSI_LUN_32(Cmnd) Cmnd->device->lun + + +/*****************************************/ +/* ISP Boards supported by this driver */ +/*****************************************/ + +struct qla_boards { + char *name; /* Board ID String */ + int numPorts; /* Number of SCSI ports */ + int fw_index; /* index into qla1280_fw_tbl for firmware */ +}; + +/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */ +static struct pci_device_id qla1280_pci_tbl[] = { + {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, + {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, + {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, + {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, + {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl); + +static DEFINE_MUTEX(qla1280_firmware_mutex); + +struct qla_fw { + char *fwname; + const struct firmware *fw; +}; + +#define QL_NUM_FW_IMAGES 3 + +static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = { + {"qlogic/1040.bin", NULL}, /* image 0 */ + {"qlogic/1280.bin", NULL}, /* image 1 */ + {"qlogic/12160.bin", NULL}, /* image 2 */ +}; + +/* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */ +static struct qla_boards ql1280_board_tbl[] = { + {.name = "QLA12160", .numPorts = 2, .fw_index = 2}, + {.name = "QLA1040" , .numPorts = 1, .fw_index = 0}, + {.name = "QLA1080" , .numPorts = 1, .fw_index = 1}, + {.name = "QLA1240" , .numPorts = 2, .fw_index = 1}, + {.name = "QLA1280" , .numPorts = 2, .fw_index = 1}, + {.name = "QLA10160", .numPorts = 1, .fw_index = 2}, + {.name = " ", .numPorts = 0, .fw_index = -1}, +}; + +static int qla1280_verbose = 1; + +#if DEBUG_QLA1280 +static int ql_debug_level = 1; +#define dprintk(level, format, a...) \ + do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0) +#define qla1280_dump_buffer(level, buf, size) \ + if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size) +#define qla1280_print_scsi_cmd(level, cmd) \ + if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd) +#else +#define ql_debug_level 0 +#define dprintk(level, format, a...) do{}while(0) +#define qla1280_dump_buffer(a, b, c) do{}while(0) +#define qla1280_print_scsi_cmd(a, b) do{}while(0) +#endif + +#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x); +#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x); +#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x); +#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x); + + +static int qla1280_read_nvram(struct scsi_qla_host *ha) +{ + uint16_t *wptr; + uint8_t chksum; + int cnt, i; + struct nvram *nv; + + ENTER("qla1280_read_nvram"); + + if (driver_setup.no_nvram) + return 1; + + printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no); + + wptr = (uint16_t *)&ha->nvram; + nv = &ha->nvram; + chksum = 0; + for (cnt = 0; cnt < 3; cnt++) { + *wptr = qla1280_get_nvram_word(ha, cnt); + chksum += *wptr & 0xff; + chksum += (*wptr >> 8) & 0xff; + wptr++; + } + + if (nv->id0 != 'I' || nv->id1 != 'S' || + nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) { + dprintk(2, "Invalid nvram ID or version!\n"); + chksum = 1; + } else { + for (; cnt < sizeof(struct nvram); cnt++) { + *wptr = qla1280_get_nvram_word(ha, cnt); + chksum += *wptr & 0xff; + chksum += (*wptr >> 8) & 0xff; + wptr++; + } + } + + dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x" + " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3, + nv->version); + + + if (chksum) { + if (!driver_setup.no_nvram) + printk(KERN_WARNING "scsi(%ld): Unable to identify or " + "validate NVRAM checksum, using default " + "settings\n", ha->host_no); + ha->nvram_valid = 0; + } else + ha->nvram_valid = 1; + + /* The firmware interface is, um, interesting, in that the + * actual firmware image on the chip is little endian, thus, + * the process of taking that image to the CPU would end up + * little endian. However, the firmware interface requires it + * to be read a word (two bytes) at a time. + * + * The net result of this would be that the word (and + * doubleword) quantities in the firmware would be correct, but + * the bytes would be pairwise reversed. Since most of the + * firmware quantities are, in fact, bytes, we do an extra + * le16_to_cpu() in the firmware read routine. + * + * The upshot of all this is that the bytes in the firmware + * are in the correct places, but the 16 and 32 bit quantities + * are still in little endian format. We fix that up below by + * doing extra reverses on them */ + nv->isp_parameter = cpu_to_le16(nv->isp_parameter); + nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w); + for(i = 0; i < MAX_BUSES; i++) { + nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout); + nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth); + } + dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n"); + LEAVE("qla1280_read_nvram"); + + return chksum; +} + +/************************************************************************** + * qla1280_info + * Return a string describing the driver. + **************************************************************************/ +static const char * +qla1280_info(struct Scsi_Host *host) +{ + static char qla1280_scsi_name_buffer[125]; + char *bp; + struct scsi_qla_host *ha; + struct qla_boards *bdp; + + bp = &qla1280_scsi_name_buffer[0]; + ha = (struct scsi_qla_host *)host->hostdata; + bdp = &ql1280_board_tbl[ha->devnum]; + memset(bp, 0, sizeof(qla1280_scsi_name_buffer)); + + sprintf (bp, + "QLogic %s PCI to SCSI Host Adapter\n" + " Firmware version: %2d.%02d.%02d, Driver version %s", + &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3, + QLA1280_VERSION); + return bp; +} + +/************************************************************************** + * qla1280_queuecommand + * Queue a command to the controller. + * + * Note: + * The mid-level driver tries to ensures that queuecommand never gets invoked + * concurrently with itself or the interrupt handler (although the + * interrupt handler may call this routine as part of request-completion + * handling). Unfortunately, it sometimes calls the scheduler in interrupt + * context which is a big NO! NO!. + **************************************************************************/ +static int qla1280_queuecommand_lck(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; + struct srb *sp = scsi_cmd_priv(cmd); + int status; + + sp->cmd = cmd; + sp->flags = 0; + sp->wait = NULL; + CMD_HANDLE(cmd) = (unsigned char *)NULL; + + qla1280_print_scsi_cmd(5, cmd); + +#ifdef QLA_64BIT_PTR + /* + * Using 64 bit commands if the PCI bridge doesn't support it is a + * bit wasteful, however this should really only happen if one's + * PCI controller is completely broken, like the BCM1250. For + * sane hardware this is not an issue. + */ + status = qla1280_64bit_start_scsi(ha, sp); +#else + status = qla1280_32bit_start_scsi(ha, sp); +#endif + return status; +} + +static DEF_SCSI_QCMD(qla1280_queuecommand) + +enum action { + ABORT_COMMAND, + DEVICE_RESET, + BUS_RESET, + ADAPTER_RESET, +}; + + +static void qla1280_mailbox_timeout(struct timer_list *t) +{ + struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer); + struct device_reg __iomem *reg; + reg = ha->iobase; + + ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0); + printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, " + "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0], + RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus)); + complete(ha->mailbox_wait); +} + +static int +_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp, + struct completion *wait) +{ + int status = FAILED; + struct scsi_cmnd *cmd = sp->cmd; + + spin_unlock_irq(ha->host->host_lock); + wait_for_completion_timeout(wait, 4*HZ); + spin_lock_irq(ha->host->host_lock); + sp->wait = NULL; + if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) { + status = SUCCESS; + scsi_done(cmd); + } + return status; +} + +static int +qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp) +{ + DECLARE_COMPLETION_ONSTACK(wait); + + sp->wait = &wait; + return _qla1280_wait_for_single_command(ha, sp, &wait); +} + +static int +qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) +{ + int cnt; + int status; + struct srb *sp; + struct scsi_cmnd *cmd; + + status = SUCCESS; + + /* + * Wait for all commands with the designated bus/target + * to be completed by the firmware + */ + for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = ha->outstanding_cmds[cnt]; + if (sp) { + cmd = sp->cmd; + + if (bus >= 0 && SCSI_BUS_32(cmd) != bus) + continue; + if (target >= 0 && SCSI_TCN_32(cmd) != target) + continue; + + status = qla1280_wait_for_single_command(ha, sp); + if (status == FAILED) + break; + } + } + return status; +} + +/************************************************************************** + * qla1280_error_action + * The function will attempt to perform a specified error action and + * wait for the results (or time out). + * + * Input: + * cmd = Linux SCSI command packet of the command that cause the + * bus reset. + * action = error action to take (see action_t) + * + * Returns: + * SUCCESS or FAILED + * + **************************************************************************/ +static int +qla1280_error_action(struct scsi_cmnd *cmd, enum action action) +{ + struct scsi_qla_host *ha; + int bus, target, lun; + struct srb *sp; + int i, found; + int result=FAILED; + int wait_for_bus=-1; + int wait_for_target = -1; + DECLARE_COMPLETION_ONSTACK(wait); + + ENTER("qla1280_error_action"); + + ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); + sp = scsi_cmd_priv(cmd); + bus = SCSI_BUS_32(cmd); + target = SCSI_TCN_32(cmd); + lun = SCSI_LUN_32(cmd); + + dprintk(4, "error_action %i, istatus 0x%04x\n", action, + RD_REG_WORD(&ha->iobase->istatus)); + + dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n", + RD_REG_WORD(&ha->iobase->host_cmd), + RD_REG_WORD(&ha->iobase->ictrl), jiffies); + + if (qla1280_verbose) + printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " + "Handle=0x%p, action=0x%x\n", + ha->host_no, cmd, CMD_HANDLE(cmd), action); + + /* + * Check to see if we have the command in the outstanding_cmds[] + * array. If not then it must have completed before this error + * action was initiated. If the error_action isn't ABORT_COMMAND + * then the driver must proceed with the requested action. + */ + found = -1; + for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { + if (sp == ha->outstanding_cmds[i]) { + found = i; + sp->wait = &wait; /* we'll wait for it to complete */ + break; + } + } + + if (found < 0) { /* driver doesn't have command */ + result = SUCCESS; + if (qla1280_verbose) { + printk(KERN_INFO + "scsi(%ld:%d:%d:%d): specified command has " + "already completed.\n", ha->host_no, bus, + target, lun); + } + } + + switch (action) { + + case ABORT_COMMAND: + dprintk(1, "qla1280: RISC aborting command\n"); + /* + * The abort might fail due to race when the host_lock + * is released to issue the abort. As such, we + * don't bother to check the return status. + */ + if (found >= 0) + qla1280_abort_command(ha, sp, found); + break; + + case DEVICE_RESET: + if (qla1280_verbose) + printk(KERN_INFO + "scsi(%ld:%d:%d:%d): Queueing device reset " + "command.\n", ha->host_no, bus, target, lun); + if (qla1280_device_reset(ha, bus, target) == 0) { + /* issued device reset, set wait conditions */ + wait_for_bus = bus; + wait_for_target = target; + } + break; + + case BUS_RESET: + if (qla1280_verbose) + printk(KERN_INFO "qla1280(%ld:%d): Issued bus " + "reset.\n", ha->host_no, bus); + if (qla1280_bus_reset(ha, bus) == 0) { + /* issued bus reset, set wait conditions */ + wait_for_bus = bus; + } + break; + + case ADAPTER_RESET: + default: + if (qla1280_verbose) { + printk(KERN_INFO + "scsi(%ld): Issued ADAPTER RESET\n", + ha->host_no); + printk(KERN_INFO "scsi(%ld): I/O processing will " + "continue automatically\n", ha->host_no); + } + ha->flags.reset_active = 1; + + if (qla1280_abort_isp(ha) != 0) { /* it's dead */ + result = FAILED; + } + + ha->flags.reset_active = 0; + } + + /* + * At this point, the host_lock has been released and retaken + * by the issuance of the mailbox command. + * Wait for the command passed in by the mid-layer if it + * was found by the driver. It might have been returned + * between eh recovery steps, hence the check of the "found" + * variable. + */ + + if (found >= 0) + result = _qla1280_wait_for_single_command(ha, sp, &wait); + + if (action == ABORT_COMMAND && result != SUCCESS) { + printk(KERN_WARNING + "scsi(%li:%i:%i:%i): " + "Unable to abort command!\n", + ha->host_no, bus, target, lun); + } + + /* + * If the command passed in by the mid-layer has been + * returned by the board, then wait for any additional + * commands which are supposed to complete based upon + * the error action. + * + * All commands are unconditionally returned during a + * call to qla1280_abort_isp(), ADAPTER_RESET. No need + * to wait for them. + */ + if (result == SUCCESS && wait_for_bus >= 0) { + result = qla1280_wait_for_pending_commands(ha, + wait_for_bus, wait_for_target); + } + + dprintk(1, "RESET returning %d\n", result); + + LEAVE("qla1280_error_action"); + return result; +} + +/************************************************************************** + * qla1280_abort + * Abort the specified SCSI command(s). + **************************************************************************/ +static int +qla1280_eh_abort(struct scsi_cmnd * cmd) +{ + int rc; + + spin_lock_irq(cmd->device->host->host_lock); + rc = qla1280_error_action(cmd, ABORT_COMMAND); + spin_unlock_irq(cmd->device->host->host_lock); + + return rc; +} + +/************************************************************************** + * qla1280_device_reset + * Reset the specified SCSI device + **************************************************************************/ +static int +qla1280_eh_device_reset(struct scsi_cmnd *cmd) +{ + int rc; + + spin_lock_irq(cmd->device->host->host_lock); + rc = qla1280_error_action(cmd, DEVICE_RESET); + spin_unlock_irq(cmd->device->host->host_lock); + + return rc; +} + +/************************************************************************** + * qla1280_bus_reset + * Reset the specified bus. + **************************************************************************/ +static int +qla1280_eh_bus_reset(struct scsi_cmnd *cmd) +{ + int rc; + + spin_lock_irq(cmd->device->host->host_lock); + rc = qla1280_error_action(cmd, BUS_RESET); + spin_unlock_irq(cmd->device->host->host_lock); + + return rc; +} + +/************************************************************************** + * qla1280_adapter_reset + * Reset the specified adapter (both channels) + **************************************************************************/ +static int +qla1280_eh_adapter_reset(struct scsi_cmnd *cmd) +{ + int rc; + + spin_lock_irq(cmd->device->host->host_lock); + rc = qla1280_error_action(cmd, ADAPTER_RESET); + spin_unlock_irq(cmd->device->host->host_lock); + + return rc; +} + +static int +qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + int heads, sectors, cylinders; + + heads = 64; + sectors = 32; + cylinders = (unsigned long)capacity / (heads * sectors); + if (cylinders > 1024) { + heads = 255; + sectors = 63; + cylinders = (unsigned long)capacity / (heads * sectors); + /* if (cylinders > 1023) + cylinders = 1023; */ + } + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} + + +/* disable risc and host interrupts */ +static inline void +qla1280_disable_intrs(struct scsi_qla_host *ha) +{ + WRT_REG_WORD(&ha->iobase->ictrl, 0); + RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ +} + +/* enable risc and host interrupts */ +static inline void +qla1280_enable_intrs(struct scsi_qla_host *ha) +{ + WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC)); + RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ +} + +/************************************************************************** + * qla1280_intr_handler + * Handles the H/W interrupt + **************************************************************************/ +static irqreturn_t +qla1280_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha; + struct device_reg __iomem *reg; + u16 data; + int handled = 0; + + ENTER_INTR ("qla1280_intr_handler"); + ha = (struct scsi_qla_host *)dev_id; + + spin_lock(ha->host->host_lock); + + ha->isr_count++; + reg = ha->iobase; + + qla1280_disable_intrs(ha); + + data = qla1280_debounce_register(®->istatus); + /* Check for pending interrupts. */ + if (data & RISC_INT) { + qla1280_isr(ha, &ha->done_q); + handled = 1; + } + if (!list_empty(&ha->done_q)) + qla1280_done(ha); + + spin_unlock(ha->host->host_lock); + + qla1280_enable_intrs(ha); + + LEAVE_INTR("qla1280_intr_handler"); + return IRQ_RETVAL(handled); +} + + +static int +qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) +{ + uint8_t mr; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + struct nvram *nv; + int status, lun; + + nv = &ha->nvram; + + mr = BIT_3 | BIT_2 | BIT_1 | BIT_0; + + /* Set Target Parameters. */ + mb[0] = MBC_SET_TARGET_PARAMETERS; + mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); + mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8; + mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9; + mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10; + mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11; + mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12; + mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13; + mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14; + mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15; + + if (IS_ISP1x160(ha)) { + mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; + mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8); + mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | + nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; + mr |= BIT_6; + } else { + mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8); + } + mb[3] |= nv->bus[bus].target[target].sync_period; + + status = qla1280_mailbox_command(ha, mr, mb); + + /* Set Device Queue Parameters. */ + for (lun = 0; lun < MAX_LUNS; lun++) { + mb[0] = MBC_SET_DEVICE_QUEUE; + mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); + mb[1] |= lun; + mb[2] = nv->bus[bus].max_queue_depth; + mb[3] = nv->bus[bus].target[target].execution_throttle; + status |= qla1280_mailbox_command(ha, 0x0f, mb); + } + + if (status) + printk(KERN_WARNING "scsi(%ld:%i:%i): " + "qla1280_set_target_parameters() failed\n", + ha->host_no, bus, target); + return status; +} + + +/************************************************************************** + * qla1280_slave_configure + * + * Description: + * Determines the queue depth for a given device. There are two ways + * a queue depth can be obtained for a tagged queueing device. One + * way is the default queue depth which is determined by whether + * If it is defined, then it is used + * as the default queue depth. Otherwise, we use either 4 or 8 as the + * default queue depth (dependent on the number of hardware SCBs). + **************************************************************************/ +static int +qla1280_slave_configure(struct scsi_device *device) +{ + struct scsi_qla_host *ha; + int default_depth = 3; + int bus = device->channel; + int target = device->id; + int status = 0; + struct nvram *nv; + unsigned long flags; + + ha = (struct scsi_qla_host *)device->host->hostdata; + nv = &ha->nvram; + + if (qla1280_check_for_dead_scsi_bus(ha, bus)) + return 1; + + if (device->tagged_supported && + (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { + scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat); + } else { + scsi_change_queue_depth(device, default_depth); + } + + nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; + nv->bus[bus].target[target].parameter.enable_wide = device->wdtr; + nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; + + if (driver_setup.no_sync || + (driver_setup.sync_mask && + (~driver_setup.sync_mask & (1 << target)))) + nv->bus[bus].target[target].parameter.enable_sync = 0; + if (driver_setup.no_wide || + (driver_setup.wide_mask && + (~driver_setup.wide_mask & (1 << target)))) + nv->bus[bus].target[target].parameter.enable_wide = 0; + if (IS_ISP1x160(ha)) { + if (driver_setup.no_ppr || + (driver_setup.ppr_mask && + (~driver_setup.ppr_mask & (1 << target)))) + nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; + } + + spin_lock_irqsave(ha->host->host_lock, flags); + if (nv->bus[bus].target[target].parameter.enable_sync) + status = qla1280_set_target_parameters(ha, bus, target); + qla1280_get_target_parameters(ha, device); + spin_unlock_irqrestore(ha->host->host_lock, flags); + return status; +} + + +/* + * qla1280_done + * Process completed commands. + * + * Input: + * ha = adapter block pointer. + */ +static void +qla1280_done(struct scsi_qla_host *ha) +{ + struct srb *sp; + struct list_head *done_q; + int bus, target; + struct scsi_cmnd *cmd; + + ENTER("qla1280_done"); + + done_q = &ha->done_q; + + while (!list_empty(done_q)) { + sp = list_entry(done_q->next, struct srb, list); + + list_del(&sp->list); + + cmd = sp->cmd; + bus = SCSI_BUS_32(cmd); + target = SCSI_TCN_32(cmd); + + switch ((CMD_RESULT(cmd) >> 16)) { + case DID_RESET: + /* Issue marker command. */ + if (!ha->flags.abort_isp_active) + qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); + break; + case DID_ABORT: + sp->flags &= ~SRB_ABORT_PENDING; + sp->flags |= SRB_ABORTED; + break; + default: + break; + } + + /* Release memory used for this I/O */ + scsi_dma_unmap(cmd); + + /* Call the mid-level driver interrupt handler */ + ha->actthreads--; + + if (sp->wait == NULL) + scsi_done(cmd); + else + complete(sp->wait); + } + LEAVE("qla1280_done"); +} + +/* + * Translates a ISP error to a Linux SCSI error + */ +static int +qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) +{ + int host_status = DID_ERROR; + uint16_t comp_status = le16_to_cpu(sts->comp_status); + uint16_t state_flags = le16_to_cpu(sts->state_flags); + uint32_t residual_length = le32_to_cpu(sts->residual_length); + uint16_t scsi_status = le16_to_cpu(sts->scsi_status); +#if DEBUG_QLA1280_INTR + static char *reason[] = { + "DID_OK", + "DID_NO_CONNECT", + "DID_BUS_BUSY", + "DID_TIME_OUT", + "DID_BAD_TARGET", + "DID_ABORT", + "DID_PARITY", + "DID_ERROR", + "DID_RESET", + "DID_BAD_INTR" + }; +#endif /* DEBUG_QLA1280_INTR */ + + ENTER("qla1280_return_status"); + +#if DEBUG_QLA1280_INTR + /* + dprintk(1, "qla1280_return_status: compl status = 0x%04x\n", + comp_status); + */ +#endif + + switch (comp_status) { + case CS_COMPLETE: + host_status = DID_OK; + break; + + case CS_INCOMPLETE: + if (!(state_flags & SF_GOT_BUS)) + host_status = DID_NO_CONNECT; + else if (!(state_flags & SF_GOT_TARGET)) + host_status = DID_BAD_TARGET; + else if (!(state_flags & SF_SENT_CDB)) + host_status = DID_ERROR; + else if (!(state_flags & SF_TRANSFERRED_DATA)) + host_status = DID_ERROR; + else if (!(state_flags & SF_GOT_STATUS)) + host_status = DID_ERROR; + else if (!(state_flags & SF_GOT_SENSE)) + host_status = DID_ERROR; + break; + + case CS_RESET: + host_status = DID_RESET; + break; + + case CS_ABORTED: + host_status = DID_ABORT; + break; + + case CS_TIMEOUT: + host_status = DID_TIME_OUT; + break; + + case CS_DATA_OVERRUN: + dprintk(2, "Data overrun 0x%x\n", residual_length); + dprintk(2, "qla1280_return_status: response packet data\n"); + qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); + host_status = DID_ERROR; + break; + + case CS_DATA_UNDERRUN: + if ((scsi_bufflen(cp) - residual_length) < + cp->underflow) { + printk(KERN_WARNING + "scsi: Underflow detected - retrying " + "command.\n"); + host_status = DID_ERROR; + } else { + scsi_set_resid(cp, residual_length); + host_status = DID_OK; + } + break; + + default: + host_status = DID_ERROR; + break; + } + +#if DEBUG_QLA1280_INTR + dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n", + reason[host_status], scsi_status); +#endif + + LEAVE("qla1280_return_status"); + + return (scsi_status & 0xff) | (host_status << 16); +} + +/****************************************************************************/ +/* QLogic ISP1280 Hardware Support Functions. */ +/****************************************************************************/ + +/* + * qla1280_initialize_adapter + * Initialize board. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +static int +qla1280_initialize_adapter(struct scsi_qla_host *ha) +{ + struct device_reg __iomem *reg; + int status; + int bus; + unsigned long flags; + + ENTER("qla1280_initialize_adapter"); + + /* Clear adapter flags. */ + ha->flags.online = 0; + ha->flags.disable_host_adapter = 0; + ha->flags.reset_active = 0; + ha->flags.abort_isp_active = 0; + + /* TODO: implement support for the 1040 nvram format */ + if (IS_ISP1040(ha)) + driver_setup.no_nvram = 1; + + dprintk(1, "Configure PCI space for adapter...\n"); + + reg = ha->iobase; + + /* Insure mailbox registers are free. */ + WRT_REG_WORD(®->semaphore, 0); + WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); + WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT); + RD_REG_WORD(®->host_cmd); + + if (qla1280_read_nvram(ha)) { + dprintk(2, "qla1280_initialize_adapter: failed to read " + "NVRAM\n"); + } + + /* + * It's necessary to grab the spin here as qla1280_mailbox_command + * needs to be able to drop the lock unconditionally to wait + * for completion. + */ + spin_lock_irqsave(ha->host->host_lock, flags); + + status = qla1280_load_firmware(ha); + if (status) { + printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n", + ha->host_no); + goto out; + } + + /* Setup adapter based on NVRAM parameters. */ + dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no); + qla1280_nvram_config(ha); + + if (ha->flags.disable_host_adapter) { + status = 1; + goto out; + } + + status = qla1280_init_rings(ha); + if (status) + goto out; + + /* Issue SCSI reset, if we can't reset twice then bus is dead */ + for (bus = 0; bus < ha->ports; bus++) { + if (!ha->bus_settings[bus].disable_scsi_reset && + qla1280_bus_reset(ha, bus) && + qla1280_bus_reset(ha, bus)) + ha->bus_settings[bus].scsi_bus_dead = 1; + } + + ha->flags.online = 1; + out: + spin_unlock_irqrestore(ha->host->host_lock, flags); + + if (status) + dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n"); + + LEAVE("qla1280_initialize_adapter"); + return status; +} + +/* + * qla1280_request_firmware + * Acquire firmware for chip. Retain in memory + * for error recovery. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * Pointer to firmware image or an error code + * cast to pointer via ERR_PTR(). + */ +static const struct firmware * +qla1280_request_firmware(struct scsi_qla_host *ha) +{ + const struct firmware *fw; + int err; + int index; + char *fwname; + + spin_unlock_irq(ha->host->host_lock); + mutex_lock(&qla1280_firmware_mutex); + + index = ql1280_board_tbl[ha->devnum].fw_index; + fw = qla1280_fw_tbl[index].fw; + if (fw) + goto out; + + fwname = qla1280_fw_tbl[index].fwname; + err = request_firmware(&fw, fwname, &ha->pdev->dev); + + if (err) { + printk(KERN_ERR "Failed to load image \"%s\" err %d\n", + fwname, err); + fw = ERR_PTR(err); + goto unlock; + } + if ((fw->size % 2) || (fw->size < 6)) { + printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n", + fw->size, fwname); + release_firmware(fw); + fw = ERR_PTR(-EINVAL); + goto unlock; + } + + qla1280_fw_tbl[index].fw = fw; + + out: + ha->fwver1 = fw->data[0]; + ha->fwver2 = fw->data[1]; + ha->fwver3 = fw->data[2]; + unlock: + mutex_unlock(&qla1280_firmware_mutex); + spin_lock_irq(ha->host->host_lock); + return fw; +} + +/* + * Chip diagnostics + * Test chip for proper operation. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + */ +static int +qla1280_chip_diag(struct scsi_qla_host *ha) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + struct device_reg __iomem *reg = ha->iobase; + int status = 0; + int cnt; + uint16_t data; + dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l); + + dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no); + + /* Soft reset chip and wait for it to finish. */ + WRT_REG_WORD(®->ictrl, ISP_RESET); + + /* + * We can't do a traditional PCI write flush here by reading + * back the register. The card will not respond once the reset + * is in action and we end up with a machine check exception + * instead. Nothing to do but wait and hope for the best. + * A portable pci_write_flush(pdev) call would be very useful here. + */ + udelay(20); + data = qla1280_debounce_register(®->ictrl); + /* + * Yet another QLogic gem ;-( + */ + for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) { + udelay(5); + data = RD_REG_WORD(®->ictrl); + } + + if (!cnt) + goto fail; + + /* Reset register cleared by chip reset. */ + dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n"); + + WRT_REG_WORD(®->cfg_1, 0); + + /* Reset RISC and disable BIOS which + allows RISC to execute out of RAM. */ + WRT_REG_WORD(®->host_cmd, HC_RESET_RISC | + HC_RELEASE_RISC | HC_DISABLE_BIOS); + + RD_REG_WORD(®->id_l); /* Flush PCI write */ + data = qla1280_debounce_register(®->mailbox0); + + /* + * I *LOVE* this code! + */ + for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) { + udelay(5); + data = RD_REG_WORD(®->mailbox0); + } + + if (!cnt) + goto fail; + + /* Check product ID of chip */ + dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n"); + + if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 || + (RD_REG_WORD(®->mailbox2) != PROD_ID_2 && + RD_REG_WORD(®->mailbox2) != PROD_ID_2a) || + RD_REG_WORD(®->mailbox3) != PROD_ID_3 || + RD_REG_WORD(®->mailbox4) != PROD_ID_4) { + printk(KERN_INFO "qla1280: Wrong product ID = " + "0x%x,0x%x,0x%x,0x%x\n", + RD_REG_WORD(®->mailbox1), + RD_REG_WORD(®->mailbox2), + RD_REG_WORD(®->mailbox3), + RD_REG_WORD(®->mailbox4)); + goto fail; + } + + /* + * Enable ints early!!! + */ + qla1280_enable_intrs(ha); + + dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n"); + /* Wrap Incoming Mailboxes Test. */ + mb[0] = MBC_MAILBOX_REGISTER_TEST; + mb[1] = 0xAAAA; + mb[2] = 0x5555; + mb[3] = 0xAA55; + mb[4] = 0x55AA; + mb[5] = 0xA5A5; + mb[6] = 0x5A5A; + mb[7] = 0x2525; + + status = qla1280_mailbox_command(ha, 0xff, mb); + if (status) + goto fail; + + if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 || + mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A || + mb[7] != 0x2525) { + printk(KERN_INFO "qla1280: Failed mbox check\n"); + goto fail; + } + + dprintk(3, "qla1280_chip_diag: exiting normally\n"); + return 0; + fail: + dprintk(2, "qla1280_chip_diag: **** FAILED ****\n"); + return status; +} + +static int +qla1280_load_firmware_pio(struct scsi_qla_host *ha) +{ + /* enter with host_lock acquired */ + + const struct firmware *fw; + const __le16 *fw_data; + uint16_t risc_address, risc_code_size; + uint16_t mb[MAILBOX_REGISTER_COUNT], i; + int err = 0; + + fw = qla1280_request_firmware(ha); + if (IS_ERR(fw)) + return PTR_ERR(fw); + + fw_data = (const __le16 *)&fw->data[0]; + ha->fwstart = __le16_to_cpu(fw_data[2]); + + /* Load RISC code. */ + risc_address = ha->fwstart; + fw_data = (const __le16 *)&fw->data[6]; + risc_code_size = (fw->size - 6) / 2; + + for (i = 0; i < risc_code_size; i++) { + mb[0] = MBC_WRITE_RAM_WORD; + mb[1] = risc_address + i; + mb[2] = __le16_to_cpu(fw_data[i]); + + err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb); + if (err) { + printk(KERN_ERR "scsi(%li): Failed to load firmware\n", + ha->host_no); + break; + } + } + + return err; +} + +#ifdef QLA_64BIT_PTR +#define LOAD_CMD MBC_LOAD_RAM_A64_ROM +#define DUMP_CMD MBC_DUMP_RAM_A64_ROM +#define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0) +#else +#define LOAD_CMD MBC_LOAD_RAM +#define DUMP_CMD MBC_DUMP_RAM +#define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0) +#endif + +#define DUMP_IT_BACK 0 /* for debug of RISC loading */ +static int +qla1280_load_firmware_dma(struct scsi_qla_host *ha) +{ + /* enter with host_lock acquired */ + const struct firmware *fw; + const __le16 *fw_data; + uint16_t risc_address, risc_code_size; + uint16_t mb[MAILBOX_REGISTER_COUNT], cnt; + int err = 0, num, i; +#if DUMP_IT_BACK + uint8_t *sp, *tbuf; + dma_addr_t p_tbuf; + + tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL); + if (!tbuf) + return -ENOMEM; +#endif + + fw = qla1280_request_firmware(ha); + if (IS_ERR(fw)) + return PTR_ERR(fw); + + fw_data = (const __le16 *)&fw->data[0]; + ha->fwstart = __le16_to_cpu(fw_data[2]); + + /* Load RISC code. */ + risc_address = ha->fwstart; + fw_data = (const __le16 *)&fw->data[6]; + risc_code_size = (fw->size - 6) / 2; + + dprintk(1, "%s: DMA RISC code (%i) words\n", + __func__, risc_code_size); + + num = 0; + while (risc_code_size > 0) { + int warn __attribute__((unused)) = 0; + + cnt = 2000 >> 1; + + if (cnt > risc_code_size) + cnt = risc_code_size; + + dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p)," + "%d,%d(0x%x)\n", + fw_data, cnt, num, risc_address); + for(i = 0; i < cnt; i++) + ((__le16 *)ha->request_ring)[i] = fw_data[i]; + + mb[0] = LOAD_CMD; + mb[1] = risc_address; + mb[4] = cnt; + mb[3] = ha->request_dma & 0xffff; + mb[2] = (ha->request_dma >> 16) & 0xffff; + mb[7] = upper_32_bits(ha->request_dma) & 0xffff; + mb[6] = upper_32_bits(ha->request_dma) >> 16; + dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", + __func__, mb[0], + (void *)(long)ha->request_dma, + mb[6], mb[7], mb[2], mb[3]); + err = qla1280_mailbox_command(ha, CMD_ARGS, mb); + if (err) { + printk(KERN_ERR "scsi(%li): Failed to load partial " + "segment of f\n", ha->host_no); + goto out; + } + +#if DUMP_IT_BACK + mb[0] = DUMP_CMD; + mb[1] = risc_address; + mb[4] = cnt; + mb[3] = p_tbuf & 0xffff; + mb[2] = (p_tbuf >> 16) & 0xffff; + mb[7] = upper_32_bits(p_tbuf) & 0xffff; + mb[6] = upper_32_bits(p_tbuf) >> 16; + + err = qla1280_mailbox_command(ha, CMD_ARGS, mb); + if (err) { + printk(KERN_ERR + "Failed to dump partial segment of f/w\n"); + goto out; + } + sp = (uint8_t *)ha->request_ring; + for (i = 0; i < (cnt << 1); i++) { + if (tbuf[i] != sp[i] && warn++ < 10) { + printk(KERN_ERR "%s: FW compare error @ " + "byte(0x%x) loop#=%x\n", + __func__, i, num); + printk(KERN_ERR "%s: FWbyte=%x " + "FWfromChip=%x\n", + __func__, sp[i], tbuf[i]); + /*break; */ + } + } +#endif + risc_address += cnt; + risc_code_size = risc_code_size - cnt; + fw_data = fw_data + cnt; + num++; + } + + out: +#if DUMP_IT_BACK + dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf); +#endif + return err; +} + +static int +qla1280_start_firmware(struct scsi_qla_host *ha) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + int err; + + dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", + __func__); + + /* Verify checksum of loaded RISC code. */ + mb[0] = MBC_VERIFY_CHECKSUM; + /* mb[1] = ql12_risc_code_addr01; */ + mb[1] = ha->fwstart; + err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); + if (err) { + printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no); + return err; + } + + /* Start firmware execution. */ + dprintk(1, "%s: start firmware running.\n", __func__); + mb[0] = MBC_EXECUTE_FIRMWARE; + mb[1] = ha->fwstart; + err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); + if (err) { + printk(KERN_ERR "scsi(%li): Failed to start firmware\n", + ha->host_no); + } + + return err; +} + +static int +qla1280_load_firmware(struct scsi_qla_host *ha) +{ + /* enter with host_lock taken */ + int err; + + err = qla1280_chip_diag(ha); + if (err) + goto out; + if (IS_ISP1040(ha)) + err = qla1280_load_firmware_pio(ha); + else + err = qla1280_load_firmware_dma(ha); + if (err) + goto out; + err = qla1280_start_firmware(ha); + out: + return err; +} + +/* + * Initialize rings + * + * Input: + * ha = adapter block pointer. + * ha->request_ring = request ring virtual address + * ha->response_ring = response ring virtual address + * ha->request_dma = request ring physical address + * ha->response_dma = response ring physical address + * + * Returns: + * 0 = success. + */ +static int +qla1280_init_rings(struct scsi_qla_host *ha) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + int status = 0; + + ENTER("qla1280_init_rings"); + + /* Clear outstanding commands array. */ + memset(ha->outstanding_cmds, 0, + sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS); + + /* Initialize request queue. */ + ha->request_ring_ptr = ha->request_ring; + ha->req_ring_index = 0; + ha->req_q_cnt = REQUEST_ENTRY_CNT; + /* mb[0] = MBC_INIT_REQUEST_QUEUE; */ + mb[0] = MBC_INIT_REQUEST_QUEUE_A64; + mb[1] = REQUEST_ENTRY_CNT; + mb[3] = ha->request_dma & 0xffff; + mb[2] = (ha->request_dma >> 16) & 0xffff; + mb[4] = 0; + mb[7] = upper_32_bits(ha->request_dma) & 0xffff; + mb[6] = upper_32_bits(ha->request_dma) >> 16; + if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 | + BIT_3 | BIT_2 | BIT_1 | BIT_0, + &mb[0]))) { + /* Initialize response queue. */ + ha->response_ring_ptr = ha->response_ring; + ha->rsp_ring_index = 0; + /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */ + mb[0] = MBC_INIT_RESPONSE_QUEUE_A64; + mb[1] = RESPONSE_ENTRY_CNT; + mb[3] = ha->response_dma & 0xffff; + mb[2] = (ha->response_dma >> 16) & 0xffff; + mb[5] = 0; + mb[7] = upper_32_bits(ha->response_dma) & 0xffff; + mb[6] = upper_32_bits(ha->response_dma) >> 16; + status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 | + BIT_3 | BIT_2 | BIT_1 | BIT_0, + &mb[0]); + } + + if (status) + dprintk(2, "qla1280_init_rings: **** FAILED ****\n"); + + LEAVE("qla1280_init_rings"); + return status; +} + +static void +qla1280_print_settings(struct nvram *nv) +{ + dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n", + nv->bus[0].config_1.initiator_id); + dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n", + nv->bus[1].config_1.initiator_id); + + dprintk(1, "qla1280 : bus reset delay[0]=%d\n", + nv->bus[0].bus_reset_delay); + dprintk(1, "qla1280 : bus reset delay[1]=%d\n", + nv->bus[1].bus_reset_delay); + + dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count); + dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay); + dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count); + dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay); + + dprintk(1, "qla1280 : async data setup time[0]=%d\n", + nv->bus[0].config_2.async_data_setup_time); + dprintk(1, "qla1280 : async data setup time[1]=%d\n", + nv->bus[1].config_2.async_data_setup_time); + + dprintk(1, "qla1280 : req/ack active negation[0]=%d\n", + nv->bus[0].config_2.req_ack_active_negation); + dprintk(1, "qla1280 : req/ack active negation[1]=%d\n", + nv->bus[1].config_2.req_ack_active_negation); + + dprintk(1, "qla1280 : data line active negation[0]=%d\n", + nv->bus[0].config_2.data_line_active_negation); + dprintk(1, "qla1280 : data line active negation[1]=%d\n", + nv->bus[1].config_2.data_line_active_negation); + + dprintk(1, "qla1280 : disable loading risc code=%d\n", + nv->cntr_flags_1.disable_loading_risc_code); + + dprintk(1, "qla1280 : enable 64bit addressing=%d\n", + nv->cntr_flags_1.enable_64bit_addressing); + + dprintk(1, "qla1280 : selection timeout limit[0]=%d\n", + nv->bus[0].selection_timeout); + dprintk(1, "qla1280 : selection timeout limit[1]=%d\n", + nv->bus[1].selection_timeout); + + dprintk(1, "qla1280 : max queue depth[0]=%d\n", + nv->bus[0].max_queue_depth); + dprintk(1, "qla1280 : max queue depth[1]=%d\n", + nv->bus[1].max_queue_depth); +} + +static void +qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target) +{ + struct nvram *nv = &ha->nvram; + + nv->bus[bus].target[target].parameter.renegotiate_on_error = 1; + nv->bus[bus].target[target].parameter.auto_request_sense = 1; + nv->bus[bus].target[target].parameter.tag_queuing = 1; + nv->bus[bus].target[target].parameter.enable_sync = 1; +#if 1 /* Some SCSI Processors do not seem to like this */ + nv->bus[bus].target[target].parameter.enable_wide = 1; +#endif + nv->bus[bus].target[target].execution_throttle = + nv->bus[bus].max_queue_depth - 1; + nv->bus[bus].target[target].parameter.parity_checking = 1; + nv->bus[bus].target[target].parameter.disconnect_allowed = 1; + + if (IS_ISP1x160(ha)) { + nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; + nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e; + nv->bus[bus].target[target].sync_period = 9; + nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; + nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2; + nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1; + } else { + nv->bus[bus].target[target].flags.flags1x80.device_enable = 1; + nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12; + nv->bus[bus].target[target].sync_period = 10; + } +} + +static void +qla1280_set_defaults(struct scsi_qla_host *ha) +{ + struct nvram *nv = &ha->nvram; + int bus, target; + + dprintk(1, "Using defaults for NVRAM: \n"); + memset(nv, 0, sizeof(struct nvram)); + + /* nv->cntr_flags_1.disable_loading_risc_code = 1; */ + nv->firmware_feature.f.enable_fast_posting = 1; + nv->firmware_feature.f.disable_synchronous_backoff = 1; + nv->termination.scsi_bus_0_control = 3; + nv->termination.scsi_bus_1_control = 3; + nv->termination.auto_term_support = 1; + + /* + * Set default FIFO magic - What appropriate values would be here + * is unknown. This is what I have found testing with 12160s. + * + * Now, I would love the magic decoder ring for this one, the + * header file provided by QLogic seems to be bogus or incomplete + * at best. + */ + nv->isp_config.burst_enable = 1; + if (IS_ISP1040(ha)) + nv->isp_config.fifo_threshold |= 3; + else + nv->isp_config.fifo_threshold |= 4; + + if (IS_ISP1x160(ha)) + nv->isp_parameter = 0x01; /* fast memory enable */ + + for (bus = 0; bus < MAX_BUSES; bus++) { + nv->bus[bus].config_1.initiator_id = 7; + nv->bus[bus].config_2.req_ack_active_negation = 1; + nv->bus[bus].config_2.data_line_active_negation = 1; + nv->bus[bus].selection_timeout = 250; + nv->bus[bus].max_queue_depth = 32; + + if (IS_ISP1040(ha)) { + nv->bus[bus].bus_reset_delay = 3; + nv->bus[bus].config_2.async_data_setup_time = 6; + nv->bus[bus].retry_delay = 1; + } else { + nv->bus[bus].bus_reset_delay = 5; + nv->bus[bus].config_2.async_data_setup_time = 8; + } + + for (target = 0; target < MAX_TARGETS; target++) + qla1280_set_target_defaults(ha, bus, target); + } +} + +static int +qla1280_config_target(struct scsi_qla_host *ha, int bus, int target) +{ + struct nvram *nv = &ha->nvram; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + int status, lun; + uint16_t flag; + + /* Set Target Parameters. */ + mb[0] = MBC_SET_TARGET_PARAMETERS; + mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); + + /* + * Do not enable sync and ppr for the initial INQUIRY run. We + * enable this later if we determine the target actually + * supports it. + */ + mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE + | TP_WIDE | TP_PARITY | TP_DISCONNECT); + + if (IS_ISP1x160(ha)) + mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; + else + mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; + mb[3] |= nv->bus[bus].target[target].sync_period; + status = qla1280_mailbox_command(ha, 0x0f, mb); + + /* Save Tag queuing enable flag. */ + flag = (BIT_0 << target); + if (nv->bus[bus].target[target].parameter.tag_queuing) + ha->bus_settings[bus].qtag_enables |= flag; + + /* Save Device enable flag. */ + if (IS_ISP1x160(ha)) { + if (nv->bus[bus].target[target].flags.flags1x160.device_enable) + ha->bus_settings[bus].device_enables |= flag; + ha->bus_settings[bus].lun_disables |= 0; + } else { + if (nv->bus[bus].target[target].flags.flags1x80.device_enable) + ha->bus_settings[bus].device_enables |= flag; + /* Save LUN disable flag. */ + if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) + ha->bus_settings[bus].lun_disables |= flag; + } + + /* Set Device Queue Parameters. */ + for (lun = 0; lun < MAX_LUNS; lun++) { + mb[0] = MBC_SET_DEVICE_QUEUE; + mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); + mb[1] |= lun; + mb[2] = nv->bus[bus].max_queue_depth; + mb[3] = nv->bus[bus].target[target].execution_throttle; + status |= qla1280_mailbox_command(ha, 0x0f, mb); + } + + return status; +} + +static int +qla1280_config_bus(struct scsi_qla_host *ha, int bus) +{ + struct nvram *nv = &ha->nvram; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + int target, status; + + /* SCSI Reset Disable. */ + ha->bus_settings[bus].disable_scsi_reset = + nv->bus[bus].config_1.scsi_reset_disable; + + /* Initiator ID. */ + ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id; + mb[0] = MBC_SET_INITIATOR_ID; + mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 : + ha->bus_settings[bus].id; + status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); + + /* Reset Delay. */ + ha->bus_settings[bus].bus_reset_delay = + nv->bus[bus].bus_reset_delay; + + /* Command queue depth per device. */ + ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1; + + /* Set target parameters. */ + for (target = 0; target < MAX_TARGETS; target++) + status |= qla1280_config_target(ha, bus, target); + + return status; +} + +static int +qla1280_nvram_config(struct scsi_qla_host *ha) +{ + struct device_reg __iomem *reg = ha->iobase; + struct nvram *nv = &ha->nvram; + int bus, target, status = 0; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + + ENTER("qla1280_nvram_config"); + + if (ha->nvram_valid) { + /* Always force AUTO sense for LINUX SCSI */ + for (bus = 0; bus < MAX_BUSES; bus++) + for (target = 0; target < MAX_TARGETS; target++) { + nv->bus[bus].target[target].parameter. + auto_request_sense = 1; + } + } else { + qla1280_set_defaults(ha); + } + + qla1280_print_settings(nv); + + /* Disable RISC load of firmware. */ + ha->flags.disable_risc_code_load = + nv->cntr_flags_1.disable_loading_risc_code; + + if (IS_ISP1040(ha)) { + uint16_t hwrev, cfg1, cdma_conf; + + hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK; + + cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6); + cdma_conf = RD_REG_WORD(®->cdma_cfg); + + /* Busted fifo, says mjacob. */ + if (hwrev != ISP_CFG0_1040A) + cfg1 |= nv->isp_config.fifo_threshold << 4; + + cfg1 |= nv->isp_config.burst_enable << 2; + WRT_REG_WORD(®->cfg_1, cfg1); + + WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); + WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); + } else { + uint16_t cfg1, term; + + /* Set ISP hardware DMA burst */ + cfg1 = nv->isp_config.fifo_threshold << 4; + cfg1 |= nv->isp_config.burst_enable << 2; + /* Enable DMA arbitration on dual channel controllers */ + if (ha->ports > 1) + cfg1 |= BIT_13; + WRT_REG_WORD(®->cfg_1, cfg1); + + /* Set SCSI termination. */ + WRT_REG_WORD(®->gpio_enable, + BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + term = nv->termination.scsi_bus_1_control; + term |= nv->termination.scsi_bus_0_control << 2; + term |= nv->termination.auto_term_support << 7; + RD_REG_WORD(®->id_l); /* Flush PCI write */ + WRT_REG_WORD(®->gpio_data, term); + } + RD_REG_WORD(®->id_l); /* Flush PCI write */ + + /* ISP parameter word. */ + mb[0] = MBC_SET_SYSTEM_PARAMETER; + mb[1] = nv->isp_parameter; + status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); + + if (IS_ISP1x40(ha)) { + /* clock rate - for qla1240 and older, only */ + mb[0] = MBC_SET_CLOCK_RATE; + mb[1] = 40; + status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); + } + + /* Firmware feature word. */ + mb[0] = MBC_SET_FIRMWARE_FEATURES; + mb[1] = nv->firmware_feature.f.enable_fast_posting; + mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1; + mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5; + status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); + + /* Retry count and delay. */ + mb[0] = MBC_SET_RETRY_COUNT; + mb[1] = nv->bus[0].retry_count; + mb[2] = nv->bus[0].retry_delay; + mb[6] = nv->bus[1].retry_count; + mb[7] = nv->bus[1].retry_delay; + status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 | + BIT_1 | BIT_0, &mb[0]); + + /* ASYNC data setup time. */ + mb[0] = MBC_SET_ASYNC_DATA_SETUP; + mb[1] = nv->bus[0].config_2.async_data_setup_time; + mb[2] = nv->bus[1].config_2.async_data_setup_time; + status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); + + /* Active negation states. */ + mb[0] = MBC_SET_ACTIVE_NEGATION; + mb[1] = 0; + if (nv->bus[0].config_2.req_ack_active_negation) + mb[1] |= BIT_5; + if (nv->bus[0].config_2.data_line_active_negation) + mb[1] |= BIT_4; + mb[2] = 0; + if (nv->bus[1].config_2.req_ack_active_negation) + mb[2] |= BIT_5; + if (nv->bus[1].config_2.data_line_active_negation) + mb[2] |= BIT_4; + status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); + + mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; + mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ + status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); + + /* thingy */ + mb[0] = MBC_SET_PCI_CONTROL; + mb[1] = BIT_1; /* Data DMA Channel Burst Enable */ + mb[2] = BIT_1; /* Command DMA Channel Burst Enable */ + status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); + + mb[0] = MBC_SET_TAG_AGE_LIMIT; + mb[1] = 8; + status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); + + /* Selection timeout. */ + mb[0] = MBC_SET_SELECTION_TIMEOUT; + mb[1] = nv->bus[0].selection_timeout; + mb[2] = nv->bus[1].selection_timeout; + status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); + + for (bus = 0; bus < ha->ports; bus++) + status |= qla1280_config_bus(ha, bus); + + if (status) + dprintk(2, "qla1280_nvram_config: **** FAILED ****\n"); + + LEAVE("qla1280_nvram_config"); + return status; +} + +/* + * Get NVRAM data word + * Calculates word position in NVRAM and calls request routine to + * get the word from NVRAM. + * + * Input: + * ha = adapter block pointer. + * address = NVRAM word address. + * + * Returns: + * data word. + */ +static uint16_t +qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address) +{ + uint32_t nv_cmd; + uint16_t data; + + nv_cmd = address << 16; + nv_cmd |= NV_READ_OP; + + data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd)); + + dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = " + "0x%x", data); + + return data; +} + +/* + * NVRAM request + * Sends read command to NVRAM and gets data from NVRAM. + * + * Input: + * ha = adapter block pointer. + * nv_cmd = Bit 26 = start bit + * Bit 25, 24 = opcode + * Bit 23-16 = address + * Bit 15-0 = write data + * + * Returns: + * data word. + */ +static uint16_t +qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd) +{ + struct device_reg __iomem *reg = ha->iobase; + int cnt; + uint16_t data = 0; + uint16_t reg_data; + + /* Send command to NVRAM. */ + + nv_cmd <<= 5; + for (cnt = 0; cnt < 11; cnt++) { + if (nv_cmd & BIT_31) + qla1280_nv_write(ha, NV_DATA_OUT); + else + qla1280_nv_write(ha, 0); + nv_cmd <<= 1; + } + + /* Read data from NVRAM. */ + + for (cnt = 0; cnt < 16; cnt++) { + WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK)); + RD_REG_WORD(®->id_l); /* Flush PCI write */ + NVRAM_DELAY(); + data <<= 1; + reg_data = RD_REG_WORD(®->nvram); + if (reg_data & NV_DATA_IN) + data |= BIT_0; + WRT_REG_WORD(®->nvram, NV_SELECT); + RD_REG_WORD(®->id_l); /* Flush PCI write */ + NVRAM_DELAY(); + } + + /* Deselect chip. */ + + WRT_REG_WORD(®->nvram, NV_DESELECT); + RD_REG_WORD(®->id_l); /* Flush PCI write */ + NVRAM_DELAY(); + + return data; +} + +static void +qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data) +{ + struct device_reg __iomem *reg = ha->iobase; + + WRT_REG_WORD(®->nvram, data | NV_SELECT); + RD_REG_WORD(®->id_l); /* Flush PCI write */ + NVRAM_DELAY(); + WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK); + RD_REG_WORD(®->id_l); /* Flush PCI write */ + NVRAM_DELAY(); + WRT_REG_WORD(®->nvram, data | NV_SELECT); + RD_REG_WORD(®->id_l); /* Flush PCI write */ + NVRAM_DELAY(); +} + +/* + * Mailbox Command + * Issue mailbox command and waits for completion. + * + * Input: + * ha = adapter block pointer. + * mr = mailbox registers to load. + * mb = data pointer for mailbox registers. + * + * Output: + * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data. + * + * Returns: + * 0 = success + */ +static int +qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) +{ + struct device_reg __iomem *reg = ha->iobase; + int status = 0; + int cnt; + uint16_t *optr, *iptr; + uint16_t __iomem *mptr; + DECLARE_COMPLETION_ONSTACK(wait); + + ENTER("qla1280_mailbox_command"); + + if (ha->mailbox_wait) { + printk(KERN_ERR "Warning mailbox wait already in use!\n"); + } + ha->mailbox_wait = &wait; + + /* + * We really should start out by verifying that the mailbox is + * available before starting sending the command data + */ + /* Load mailbox registers. */ + mptr = (uint16_t __iomem *) ®->mailbox0; + iptr = mb; + for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) { + if (mr & BIT_0) { + WRT_REG_WORD(mptr, (*iptr)); + } + + mr >>= 1; + mptr++; + iptr++; + } + + /* Issue set host interrupt command. */ + + /* set up a timer just in case we're really jammed */ + timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0); + mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ); + + spin_unlock_irq(ha->host->host_lock); + WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT); + qla1280_debounce_register(®->istatus); + + wait_for_completion(&wait); + del_timer_sync(&ha->mailbox_timer); + + spin_lock_irq(ha->host->host_lock); + + ha->mailbox_wait = NULL; + + /* Check for mailbox command timeout. */ + if (ha->mailbox_out[0] != MBS_CMD_CMP) { + printk(KERN_WARNING "qla1280_mailbox_command: Command failed, " + "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = " + "0x%04x\n", + mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus)); + printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n", + RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1), + RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3)); + printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n", + RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5), + RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7)); + status = 1; + } + + /* Load return mailbox registers. */ + optr = mb; + iptr = (uint16_t *) &ha->mailbox_out[0]; + mr = MAILBOX_REGISTER_COUNT; + memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); + + if (ha->flags.reset_marker) + qla1280_rst_aen(ha); + + if (status) + dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " + "0x%x ****\n", mb[0]); + + LEAVE("qla1280_mailbox_command"); + return status; +} + +/* + * qla1280_poll + * Polls ISP for interrupts. + * + * Input: + * ha = adapter block pointer. + */ +static void +qla1280_poll(struct scsi_qla_host *ha) +{ + struct device_reg __iomem *reg = ha->iobase; + uint16_t data; + LIST_HEAD(done_q); + + /* ENTER("qla1280_poll"); */ + + /* Check for pending interrupts. */ + data = RD_REG_WORD(®->istatus); + if (data & RISC_INT) + qla1280_isr(ha, &done_q); + + if (!ha->mailbox_wait) { + if (ha->flags.reset_marker) + qla1280_rst_aen(ha); + } + + if (!list_empty(&done_q)) + qla1280_done(ha); + + /* LEAVE("qla1280_poll"); */ +} + +/* + * qla1280_bus_reset + * Issue SCSI bus reset. + * + * Input: + * ha = adapter block pointer. + * bus = SCSI bus number. + * + * Returns: + * 0 = success + */ +static int +qla1280_bus_reset(struct scsi_qla_host *ha, int bus) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + uint16_t reset_delay; + int status; + + dprintk(3, "qla1280_bus_reset: entered\n"); + + if (qla1280_verbose) + printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n", + ha->host_no, bus); + + reset_delay = ha->bus_settings[bus].bus_reset_delay; + mb[0] = MBC_BUS_RESET; + mb[1] = reset_delay; + mb[2] = (uint16_t) bus; + status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); + + if (status) { + if (ha->bus_settings[bus].failed_reset_count > 2) + ha->bus_settings[bus].scsi_bus_dead = 1; + ha->bus_settings[bus].failed_reset_count++; + } else { + spin_unlock_irq(ha->host->host_lock); + ssleep(reset_delay); + spin_lock_irq(ha->host->host_lock); + + ha->bus_settings[bus].scsi_bus_dead = 0; + ha->bus_settings[bus].failed_reset_count = 0; + ha->bus_settings[bus].reset_marker = 0; + /* Issue marker command. */ + qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL); + } + + /* + * We should probably call qla1280_set_target_parameters() + * here as well for all devices on the bus. + */ + + if (status) + dprintk(2, "qla1280_bus_reset: **** FAILED ****\n"); + else + dprintk(3, "qla1280_bus_reset: exiting normally\n"); + + return status; +} + +/* + * qla1280_device_reset + * Issue bus device reset message to the target. + * + * Input: + * ha = adapter block pointer. + * bus = SCSI BUS number. + * target = SCSI ID. + * + * Returns: + * 0 = success + */ +static int +qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + int status; + + ENTER("qla1280_device_reset"); + + mb[0] = MBC_ABORT_TARGET; + mb[1] = (bus ? (target | BIT_7) : target) << 8; + mb[2] = 1; + status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); + + /* Issue marker command. */ + qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); + + if (status) + dprintk(2, "qla1280_device_reset: **** FAILED ****\n"); + + LEAVE("qla1280_device_reset"); + return status; +} + +/* + * qla1280_abort_command + * Abort command aborts a specified IOCB. + * + * Input: + * ha = adapter block pointer. + * sp = SB structure pointer. + * + * Returns: + * 0 = success + */ +static int +qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + unsigned int bus, target, lun; + int status; + + ENTER("qla1280_abort_command"); + + bus = SCSI_BUS_32(sp->cmd); + target = SCSI_TCN_32(sp->cmd); + lun = SCSI_LUN_32(sp->cmd); + + sp->flags |= SRB_ABORT_PENDING; + + mb[0] = MBC_ABORT_COMMAND; + mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; + mb[2] = handle >> 16; + mb[3] = handle & 0xffff; + status = qla1280_mailbox_command(ha, 0x0f, &mb[0]); + + if (status) { + dprintk(2, "qla1280_abort_command: **** FAILED ****\n"); + sp->flags &= ~SRB_ABORT_PENDING; + } + + + LEAVE("qla1280_abort_command"); + return status; +} + +/* + * qla1280_reset_adapter + * Reset adapter. + * + * Input: + * ha = adapter block pointer. + */ +static void +qla1280_reset_adapter(struct scsi_qla_host *ha) +{ + struct device_reg __iomem *reg = ha->iobase; + + ENTER("qla1280_reset_adapter"); + + /* Disable ISP chip */ + ha->flags.online = 0; + WRT_REG_WORD(®->ictrl, ISP_RESET); + WRT_REG_WORD(®->host_cmd, + HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS); + RD_REG_WORD(®->id_l); /* Flush PCI write */ + + LEAVE("qla1280_reset_adapter"); +} + +/* + * Issue marker command. + * Function issues marker IOCB. + * + * Input: + * ha = adapter block pointer. + * bus = SCSI BUS number + * id = SCSI ID + * lun = SCSI LUN + * type = marker modifier + */ +static void +qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type) +{ + struct mrk_entry *pkt; + + ENTER("qla1280_marker"); + + /* Get request packet. */ + if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) { + pkt->entry_type = MARKER_TYPE; + pkt->lun = (uint8_t) lun; + pkt->target = (uint8_t) (bus ? (id | BIT_7) : id); + pkt->modifier = type; + pkt->entry_status = 0; + + /* Issue command to ISP */ + qla1280_isp_cmd(ha); + } + + LEAVE("qla1280_marker"); +} + + +/* + * qla1280_64bit_start_scsi + * The start SCSI is responsible for building request packets on + * request ring and modifying ISP input pointer. + * + * Input: + * ha = adapter block pointer. + * sp = SB structure pointer. + * + * Returns: + * 0 = success, was able to issue command. + */ +#ifdef QLA_64BIT_PTR +static int +qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) +{ + struct device_reg __iomem *reg = ha->iobase; + struct scsi_cmnd *cmd = sp->cmd; + cmd_a64_entry_t *pkt; + __le32 *dword_ptr; + dma_addr_t dma_handle; + int status = 0; + int cnt; + int req_cnt; + int seg_cnt; + u8 dir; + + ENTER("qla1280_64bit_start_scsi:"); + + /* Calculate number of entries and segments required. */ + req_cnt = 1; + seg_cnt = scsi_dma_map(cmd); + if (seg_cnt > 0) { + if (seg_cnt > 2) { + req_cnt += (seg_cnt - 2) / 5; + if ((seg_cnt - 2) % 5) + req_cnt++; + } + } else if (seg_cnt < 0) { + status = 1; + goto out; + } + + if ((req_cnt + 2) >= ha->req_q_cnt) { + /* Calculate number of free request entries. */ + cnt = RD_REG_WORD(®->mailbox4); + if (ha->req_ring_index < cnt) + ha->req_q_cnt = cnt - ha->req_ring_index; + else + ha->req_q_cnt = + REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); + } + + dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", + ha->req_q_cnt, seg_cnt); + + /* If room for request in request ring. */ + if ((req_cnt + 2) >= ha->req_q_cnt) { + status = SCSI_MLQUEUE_HOST_BUSY; + dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" + "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, + req_cnt); + goto out; + } + + /* Check for room in outstanding command list. */ + for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && + ha->outstanding_cmds[cnt] != NULL; cnt++); + + if (cnt >= MAX_OUTSTANDING_COMMANDS) { + status = SCSI_MLQUEUE_HOST_BUSY; + dprintk(2, "qla1280_start_scsi: NO ROOM IN " + "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); + goto out; + } + + ha->outstanding_cmds[cnt] = sp; + ha->req_q_cnt -= req_cnt; + CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); + + dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, + cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); + dprintk(2, " bus %i, target %i, lun %i\n", + SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); + qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE); + + /* + * Build command packet. + */ + pkt = (cmd_a64_entry_t *) ha->request_ring_ptr; + + pkt->entry_type = COMMAND_A64_TYPE; + pkt->entry_count = (uint8_t) req_cnt; + pkt->sys_define = (uint8_t) ha->req_ring_index; + pkt->entry_status = 0; + pkt->handle = cpu_to_le32(cnt); + + /* Zero out remaining portion of packet. */ + memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); + + /* Set ISP command timeout. */ + pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ); + + /* Set device target ID and LUN */ + pkt->lun = SCSI_LUN_32(cmd); + pkt->target = SCSI_BUS_32(cmd) ? + (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); + + /* Enable simple tag queuing if device supports it. */ + if (cmd->device->simple_tags) + pkt->control_flags |= cpu_to_le16(BIT_3); + + /* Load SCSI command packet. */ + pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); + memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd)); + /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ + + /* Set transfer direction. */ + dir = qla1280_data_direction(cmd); + pkt->control_flags |= cpu_to_le16(dir); + + /* Set total data segment count. */ + pkt->dseg_count = cpu_to_le16(seg_cnt); + + /* + * Load data segments. + */ + if (seg_cnt) { /* If data transfer. */ + struct scatterlist *sg, *s; + int remseg = seg_cnt; + + sg = scsi_sglist(cmd); + + /* Setup packet address segment pointer. */ + dword_ptr = (u32 *)&pkt->dseg_0_address; + + /* Load command entry data segments. */ + for_each_sg(sg, s, seg_cnt, cnt) { + if (cnt == 2) + break; + + dma_handle = sg_dma_address(s); + *dword_ptr++ = + cpu_to_le32(lower_32_bits(dma_handle)); + *dword_ptr++ = + cpu_to_le32(upper_32_bits(dma_handle)); + *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", + cpu_to_le32(upper_32_bits(dma_handle)), + cpu_to_le32(lower_32_bits(dma_handle)), + cpu_to_le32(sg_dma_len(sg_next(s)))); + remseg--; + } + dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " + "command packet data - b %i, t %i, l %i \n", + SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), + SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, + REQUEST_ENTRY_SIZE); + + /* + * Build continuation packets. + */ + dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " + "remains\n", seg_cnt); + + while (remseg > 0) { + /* Update sg start */ + sg = s; + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = + ha->request_ring; + } else + ha->request_ring_ptr++; + + pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; + + /* Zero out packet. */ + memset(pkt, 0, REQUEST_ENTRY_SIZE); + + /* Load packet defaults. */ + ((struct cont_a64_entry *) pkt)->entry_type = + CONTINUE_A64_TYPE; + ((struct cont_a64_entry *) pkt)->entry_count = 1; + ((struct cont_a64_entry *) pkt)->sys_define = + (uint8_t)ha->req_ring_index; + /* Setup packet address segment pointer. */ + dword_ptr = + (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; + + /* Load continuation entry data segments. */ + for_each_sg(sg, s, remseg, cnt) { + if (cnt == 5) + break; + dma_handle = sg_dma_address(s); + *dword_ptr++ = + cpu_to_le32(lower_32_bits(dma_handle)); + *dword_ptr++ = + cpu_to_le32(upper_32_bits(dma_handle)); + *dword_ptr++ = + cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", + cpu_to_le32(upper_32_bits(dma_handle)), + cpu_to_le32(lower_32_bits(dma_handle)), + cpu_to_le32(sg_dma_len(s))); + } + remseg -= cnt; + dprintk(5, "qla1280_64bit_start_scsi: " + "continuation packet data - b %i, t " + "%i, l %i \n", SCSI_BUS_32(cmd), + SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, + REQUEST_ENTRY_SIZE); + } + } else { /* No data transfer */ + dprintk(5, "qla1280_64bit_start_scsi: No data, command " + "packet data - b %i, t %i, l %i \n", + SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); + } + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = ha->request_ring; + } else + ha->request_ring_ptr++; + + /* Set chip new ring index. */ + dprintk(2, + "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n"); + sp->flags |= SRB_SENT; + ha->actthreads++; + WRT_REG_WORD(®->mailbox4, ha->req_ring_index); + + out: + if (status) + dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n"); + else + dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n"); + + return status; +} +#else /* !QLA_64BIT_PTR */ + +/* + * qla1280_32bit_start_scsi + * The start SCSI is responsible for building request packets on + * request ring and modifying ISP input pointer. + * + * The Qlogic firmware interface allows every queue slot to have a SCSI + * command and up to 4 scatter/gather (SG) entries. If we need more + * than 4 SG entries, then continuation entries are used that can + * hold another 7 entries each. The start routine determines if there + * is eought empty slots then build the combination of requests to + * fulfill the OS request. + * + * Input: + * ha = adapter block pointer. + * sp = SCSI Request Block structure pointer. + * + * Returns: + * 0 = success, was able to issue command. + */ +static int +qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) +{ + struct device_reg __iomem *reg = ha->iobase; + struct scsi_cmnd *cmd = sp->cmd; + struct cmd_entry *pkt; + __le32 *dword_ptr; + int status = 0; + int cnt; + int req_cnt; + int seg_cnt; + u8 dir; + + ENTER("qla1280_32bit_start_scsi"); + + dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp, + cmd->cmnd[0]); + + /* Calculate number of entries and segments required. */ + req_cnt = 1; + seg_cnt = scsi_dma_map(cmd); + if (seg_cnt) { + /* + * if greater than four sg entries then we need to allocate + * continuation entries + */ + if (seg_cnt > 4) { + req_cnt += (seg_cnt - 4) / 7; + if ((seg_cnt - 4) % 7) + req_cnt++; + } + dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", + cmd, seg_cnt, req_cnt); + } else if (seg_cnt < 0) { + status = 1; + goto out; + } + + if ((req_cnt + 2) >= ha->req_q_cnt) { + /* Calculate number of free request entries. */ + cnt = RD_REG_WORD(®->mailbox4); + if (ha->req_ring_index < cnt) + ha->req_q_cnt = cnt - ha->req_ring_index; + else + ha->req_q_cnt = + REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); + } + + dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", + ha->req_q_cnt, seg_cnt); + /* If room for request in request ring. */ + if ((req_cnt + 2) >= ha->req_q_cnt) { + status = SCSI_MLQUEUE_HOST_BUSY; + dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " + "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, + ha->req_q_cnt, req_cnt); + goto out; + } + + /* Check for empty slot in outstanding command list. */ + for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && + ha->outstanding_cmds[cnt]; cnt++); + + if (cnt >= MAX_OUTSTANDING_COMMANDS) { + status = SCSI_MLQUEUE_HOST_BUSY; + dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " + "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); + goto out; + } + + CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1); + ha->outstanding_cmds[cnt] = sp; + ha->req_q_cnt -= req_cnt; + + /* + * Build command packet. + */ + pkt = (struct cmd_entry *) ha->request_ring_ptr; + + pkt->entry_type = COMMAND_TYPE; + pkt->entry_count = (uint8_t) req_cnt; + pkt->sys_define = (uint8_t) ha->req_ring_index; + pkt->entry_status = 0; + pkt->handle = cpu_to_le32(cnt); + + /* Zero out remaining portion of packet. */ + memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); + + /* Set ISP command timeout. */ + pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ); + + /* Set device target ID and LUN */ + pkt->lun = SCSI_LUN_32(cmd); + pkt->target = SCSI_BUS_32(cmd) ? + (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); + + /* Enable simple tag queuing if device supports it. */ + if (cmd->device->simple_tags) + pkt->control_flags |= cpu_to_le16(BIT_3); + + /* Load SCSI command packet. */ + pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); + memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd)); + + /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ + /* Set transfer direction. */ + dir = qla1280_data_direction(cmd); + pkt->control_flags |= cpu_to_le16(dir); + + /* Set total data segment count. */ + pkt->dseg_count = cpu_to_le16(seg_cnt); + + /* + * Load data segments. + */ + if (seg_cnt) { + struct scatterlist *sg, *s; + int remseg = seg_cnt; + + sg = scsi_sglist(cmd); + + /* Setup packet address segment pointer. */ + dword_ptr = &pkt->dseg_0_address; + + dprintk(3, "Building S/G data segments..\n"); + qla1280_dump_buffer(1, (char *)sg, 4 * 16); + + /* Load command entry data segments. */ + for_each_sg(sg, s, seg_cnt, cnt) { + if (cnt == 4) + break; + *dword_ptr++ = + cpu_to_le32(lower_32_bits(sg_dma_address(s))); + *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", + (lower_32_bits(sg_dma_address(s))), + (sg_dma_len(s))); + remseg--; + } + /* + * Build continuation packets. + */ + dprintk(3, "S/G Building Continuation" + "...seg_cnt=0x%x remains\n", seg_cnt); + while (remseg > 0) { + /* Continue from end point */ + sg = s; + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = + ha->request_ring; + } else + ha->request_ring_ptr++; + + pkt = (struct cmd_entry *)ha->request_ring_ptr; + + /* Zero out packet. */ + memset(pkt, 0, REQUEST_ENTRY_SIZE); + + /* Load packet defaults. */ + ((struct cont_entry *) pkt)-> + entry_type = CONTINUE_TYPE; + ((struct cont_entry *) pkt)->entry_count = 1; + + ((struct cont_entry *) pkt)->sys_define = + (uint8_t) ha->req_ring_index; + + /* Setup packet address segment pointer. */ + dword_ptr = + &((struct cont_entry *) pkt)->dseg_0_address; + + /* Load continuation entry data segments. */ + for_each_sg(sg, s, remseg, cnt) { + if (cnt == 7) + break; + *dword_ptr++ = + cpu_to_le32(lower_32_bits(sg_dma_address(s))); + *dword_ptr++ = + cpu_to_le32(sg_dma_len(s)); + dprintk(1, + "S/G Segment Cont. phys_addr=0x%x, " + "len=0x%x\n", + cpu_to_le32(lower_32_bits(sg_dma_address(s))), + cpu_to_le32(sg_dma_len(s))); + } + remseg -= cnt; + dprintk(5, "qla1280_32bit_start_scsi: " + "continuation packet data - " + "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), + SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, + REQUEST_ENTRY_SIZE); + } + } else { /* No data transfer at all */ + dprintk(5, "qla1280_32bit_start_scsi: No data, command " + "packet data - \n"); + qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); + } + dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n"); + qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, + REQUEST_ENTRY_SIZE); + + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = ha->request_ring; + } else + ha->request_ring_ptr++; + + /* Set chip new ring index. */ + dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC " + "for pending command\n"); + sp->flags |= SRB_SENT; + ha->actthreads++; + WRT_REG_WORD(®->mailbox4, ha->req_ring_index); + +out: + if (status) + dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n"); + + LEAVE("qla1280_32bit_start_scsi"); + + return status; +} +#endif + +/* + * qla1280_req_pkt + * Function is responsible for locking ring and + * getting a zeroed out request packet. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = failed to get slot. + */ +static request_t * +qla1280_req_pkt(struct scsi_qla_host *ha) +{ + struct device_reg __iomem *reg = ha->iobase; + request_t *pkt = NULL; + int cnt; + uint32_t timer; + + ENTER("qla1280_req_pkt"); + + /* + * This can be called from interrupt context, damn it!!! + */ + /* Wait for 30 seconds for slot. */ + for (timer = 15000000; timer; timer--) { + if (ha->req_q_cnt > 0) { + /* Calculate number of free request entries. */ + cnt = RD_REG_WORD(®->mailbox4); + if (ha->req_ring_index < cnt) + ha->req_q_cnt = cnt - ha->req_ring_index; + else + ha->req_q_cnt = + REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); + } + + /* Found empty request ring slot? */ + if (ha->req_q_cnt > 0) { + ha->req_q_cnt--; + pkt = ha->request_ring_ptr; + + /* Zero out packet. */ + memset(pkt, 0, REQUEST_ENTRY_SIZE); + + /* + * How can this be right when we have a ring + * size of 512??? + */ + /* Set system defined field. */ + pkt->sys_define = (uint8_t) ha->req_ring_index; + + /* Set entry count. */ + pkt->entry_count = 1; + + break; + } + + udelay(2); /* 10 */ + + /* Check for pending interrupts. */ + qla1280_poll(ha); + } + + if (!pkt) + dprintk(2, "qla1280_req_pkt: **** FAILED ****\n"); + else + dprintk(3, "qla1280_req_pkt: exiting normally\n"); + + return pkt; +} + +/* + * qla1280_isp_cmd + * Function is responsible for modifying ISP input pointer. + * Releases ring lock. + * + * Input: + * ha = adapter block pointer. + */ +static void +qla1280_isp_cmd(struct scsi_qla_host *ha) +{ + struct device_reg __iomem *reg = ha->iobase; + + ENTER("qla1280_isp_cmd"); + + dprintk(5, "qla1280_isp_cmd: IOCB data:\n"); + qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, + REQUEST_ENTRY_SIZE); + + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = ha->request_ring; + } else + ha->request_ring_ptr++; + + /* + * Update request index to mailbox4 (Request Queue In). + */ + WRT_REG_WORD(®->mailbox4, ha->req_ring_index); + + LEAVE("qla1280_isp_cmd"); +} + +/****************************************************************************/ +/* Interrupt Service Routine. */ +/****************************************************************************/ + +/**************************************************************************** + * qla1280_isr + * Calls I/O done on command completion. + * + * Input: + * ha = adapter block pointer. + * done_q = done queue. + ****************************************************************************/ +static void +qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) +{ + struct device_reg __iomem *reg = ha->iobase; + struct response *pkt; + struct srb *sp = NULL; + uint16_t mailbox[MAILBOX_REGISTER_COUNT]; + uint16_t *wptr; + uint32_t index; + u16 istatus; + + ENTER("qla1280_isr"); + + istatus = RD_REG_WORD(®->istatus); + if (!(istatus & (RISC_INT | PCI_INT))) + return; + + /* Save mailbox register 5 */ + mailbox[5] = RD_REG_WORD(®->mailbox5); + + /* Check for mailbox interrupt. */ + + mailbox[0] = RD_REG_WORD_dmasync(®->semaphore); + + if (mailbox[0] & BIT_0) { + /* Get mailbox data. */ + /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */ + + wptr = &mailbox[0]; + *wptr++ = RD_REG_WORD(®->mailbox0); + *wptr++ = RD_REG_WORD(®->mailbox1); + *wptr = RD_REG_WORD(®->mailbox2); + if (mailbox[0] != MBA_SCSI_COMPLETION) { + wptr++; + *wptr++ = RD_REG_WORD(®->mailbox3); + *wptr++ = RD_REG_WORD(®->mailbox4); + wptr++; + *wptr++ = RD_REG_WORD(®->mailbox6); + *wptr = RD_REG_WORD(®->mailbox7); + } + + /* Release mailbox registers. */ + + WRT_REG_WORD(®->semaphore, 0); + WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); + + dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x", + mailbox[0]); + + /* Handle asynchronous event */ + switch (mailbox[0]) { + case MBA_SCSI_COMPLETION: /* Response completion */ + dprintk(5, "qla1280_isr: mailbox SCSI response " + "completion\n"); + + if (ha->flags.online) { + /* Get outstanding command index. */ + index = mailbox[2] << 16 | mailbox[1]; + + /* Validate handle. */ + if (index < MAX_OUTSTANDING_COMMANDS) + sp = ha->outstanding_cmds[index]; + else + sp = NULL; + + if (sp) { + /* Free outstanding command slot. */ + ha->outstanding_cmds[index] = NULL; + + /* Save ISP completion status */ + CMD_RESULT(sp->cmd) = 0; + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; + + /* Place block on done queue */ + list_add_tail(&sp->list, done_q); + } else { + /* + * If we get here we have a real problem! + */ + printk(KERN_WARNING + "qla1280: ISP invalid handle\n"); + } + } + break; + + case MBA_BUS_RESET: /* SCSI Bus Reset */ + ha->flags.reset_marker = 1; + index = mailbox[6] & BIT_0; + ha->bus_settings[index].reset_marker = 1; + + printk(KERN_DEBUG "qla1280_isr(): index %i " + "asynchronous BUS_RESET\n", index); + break; + + case MBA_SYSTEM_ERR: /* System Error */ + printk(KERN_WARNING + "qla1280: ISP System Error - mbx1=%xh, mbx2=" + "%xh, mbx3=%xh\n", mailbox[1], mailbox[2], + mailbox[3]); + break; + + case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ + printk(KERN_WARNING + "qla1280: ISP Request Transfer Error\n"); + break; + + case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ + printk(KERN_WARNING + "qla1280: ISP Response Transfer Error\n"); + break; + + case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ + dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n"); + break; + + case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */ + dprintk(2, + "qla1280_isr: asynchronous TIMEOUT_RESET\n"); + break; + + case MBA_DEVICE_RESET: /* Bus Device Reset */ + printk(KERN_INFO "qla1280_isr(): asynchronous " + "BUS_DEVICE_RESET\n"); + + ha->flags.reset_marker = 1; + index = mailbox[6] & BIT_0; + ha->bus_settings[index].reset_marker = 1; + break; + + case MBA_BUS_MODE_CHANGE: + dprintk(2, + "qla1280_isr: asynchronous BUS_MODE_CHANGE\n"); + break; + + default: + /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */ + if (mailbox[0] < MBA_ASYNC_EVENT) { + wptr = &mailbox[0]; + memcpy((uint16_t *) ha->mailbox_out, wptr, + MAILBOX_REGISTER_COUNT * + sizeof(uint16_t)); + + if(ha->mailbox_wait != NULL) + complete(ha->mailbox_wait); + } + break; + } + } else { + WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); + } + + /* + * We will receive interrupts during mailbox testing prior to + * the card being marked online, hence the double check. + */ + if (!(ha->flags.online && !ha->mailbox_wait)) { + dprintk(2, "qla1280_isr: Response pointer Error\n"); + goto out; + } + + if (mailbox[5] >= RESPONSE_ENTRY_CNT) + goto out; + + while (ha->rsp_ring_index != mailbox[5]) { + pkt = ha->response_ring_ptr; + + dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]" + " = 0x%x\n", ha->rsp_ring_index, mailbox[5]); + dprintk(5,"qla1280_isr: response packet data\n"); + qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE); + + if (pkt->entry_type == STATUS_TYPE) { + if ((le16_to_cpu(pkt->scsi_status) & 0xff) + || pkt->comp_status || pkt->entry_status) { + dprintk(2, "qla1280_isr: ha->rsp_ring_index = " + "0x%x mailbox[5] = 0x%x, comp_status " + "= 0x%x, scsi_status = 0x%x\n", + ha->rsp_ring_index, mailbox[5], + le16_to_cpu(pkt->comp_status), + le16_to_cpu(pkt->scsi_status)); + } + } else { + dprintk(2, "qla1280_isr: ha->rsp_ring_index = " + "0x%x, mailbox[5] = 0x%x\n", + ha->rsp_ring_index, mailbox[5]); + dprintk(2, "qla1280_isr: response packet data\n"); + qla1280_dump_buffer(2, (char *)pkt, + RESPONSE_ENTRY_SIZE); + } + + if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) { + dprintk(2, "status: Cmd %p, handle %i\n", + ha->outstanding_cmds[pkt->handle]->cmd, + pkt->handle); + if (pkt->entry_type == STATUS_TYPE) + qla1280_status_entry(ha, pkt, done_q); + else + qla1280_error_entry(ha, pkt, done_q); + /* Adjust ring index. */ + ha->rsp_ring_index++; + if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) { + ha->rsp_ring_index = 0; + ha->response_ring_ptr = ha->response_ring; + } else + ha->response_ring_ptr++; + WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index); + } + } + + out: + LEAVE("qla1280_isr"); +} + +/* + * qla1280_rst_aen + * Processes asynchronous reset. + * + * Input: + * ha = adapter block pointer. + */ +static void +qla1280_rst_aen(struct scsi_qla_host *ha) +{ + uint8_t bus; + + ENTER("qla1280_rst_aen"); + + if (ha->flags.online && !ha->flags.reset_active && + !ha->flags.abort_isp_active) { + ha->flags.reset_active = 1; + while (ha->flags.reset_marker) { + /* Issue marker command. */ + ha->flags.reset_marker = 0; + for (bus = 0; bus < ha->ports && + !ha->flags.reset_marker; bus++) { + if (ha->bus_settings[bus].reset_marker) { + ha->bus_settings[bus].reset_marker = 0; + qla1280_marker(ha, bus, 0, 0, + MK_SYNC_ALL); + } + } + } + } + + LEAVE("qla1280_rst_aen"); +} + + +/* + * qla1280_status_entry + * Processes received ISP status entry. + * + * Input: + * ha = adapter block pointer. + * pkt = entry pointer. + * done_q = done queue. + */ +static void +qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, + struct list_head *done_q) +{ + int sense_sz; + struct srb *sp; + struct scsi_cmnd *cmd; + uint32_t handle = le32_to_cpu(pkt->handle); + uint16_t scsi_status = le16_to_cpu(pkt->scsi_status); + uint16_t comp_status = le16_to_cpu(pkt->comp_status); + + ENTER("qla1280_status_entry"); + + /* Validate handle. */ + if (handle < MAX_OUTSTANDING_COMMANDS) + sp = ha->outstanding_cmds[handle]; + else + sp = NULL; + + if (!sp) { + printk(KERN_WARNING "qla1280: Status Entry invalid handle\n"); + goto out; + } + + /* Free outstanding command slot. */ + ha->outstanding_cmds[handle] = NULL; + + cmd = sp->cmd; + + if (comp_status || scsi_status) { + dprintk(3, "scsi: comp_status = 0x%x, scsi_status = " + "0x%x, handle = 0x%x\n", comp_status, + scsi_status, handle); + } + + /* Target busy or queue full */ + if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL || + (scsi_status & 0xFF) == SAM_STAT_BUSY) { + CMD_RESULT(cmd) = scsi_status & 0xff; + } else { + + /* Save ISP completion status */ + CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); + + if (scsi_status & SAM_STAT_CHECK_CONDITION) { + if (comp_status != CS_ARS_FAILED) { + uint16_t req_sense_length = + le16_to_cpu(pkt->req_sense_length); + if (req_sense_length < CMD_SNSLEN(cmd)) + sense_sz = req_sense_length; + else + /* + * scsi_cmnd->sense_buffer is + * 64 bytes, why only copy 63? + * This looks wrong! /Jes + */ + sense_sz = CMD_SNSLEN(cmd) - 1; + + memcpy(cmd->sense_buffer, + &pkt->req_sense_data, sense_sz); + } else + sense_sz = 0; + memset(cmd->sense_buffer + sense_sz, 0, + SCSI_SENSE_BUFFERSIZE - sense_sz); + + dprintk(2, "qla1280_status_entry: Check " + "condition Sense data, b %i, t %i, " + "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), + SCSI_LUN_32(cmd)); + if (sense_sz) + qla1280_dump_buffer(2, + (char *)cmd->sense_buffer, + sense_sz); + } + } + + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; + + /* Place command on done queue. */ + list_add_tail(&sp->list, done_q); + out: + LEAVE("qla1280_status_entry"); +} + +/* + * qla1280_error_entry + * Processes error entry. + * + * Input: + * ha = adapter block pointer. + * pkt = entry pointer. + * done_q = done queue. + */ +static void +qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, + struct list_head *done_q) +{ + struct srb *sp; + uint32_t handle = le32_to_cpu(pkt->handle); + + ENTER("qla1280_error_entry"); + + if (pkt->entry_status & BIT_3) + dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n"); + else if (pkt->entry_status & BIT_2) + dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n"); + else if (pkt->entry_status & BIT_1) + dprintk(2, "qla1280_error_entry: FULL flag error\n"); + else + dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n"); + + /* Validate handle. */ + if (handle < MAX_OUTSTANDING_COMMANDS) + sp = ha->outstanding_cmds[handle]; + else + sp = NULL; + + if (sp) { + /* Free outstanding command slot. */ + ha->outstanding_cmds[handle] = NULL; + + /* Bad payload or header */ + if (pkt->entry_status & (BIT_3 + BIT_2)) { + /* Bad payload or header, set error status. */ + /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */ + CMD_RESULT(sp->cmd) = DID_ERROR << 16; + } else if (pkt->entry_status & BIT_1) { /* FULL flag */ + CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16; + } else { + /* Set error status. */ + CMD_RESULT(sp->cmd) = DID_ERROR << 16; + } + + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; + + /* Place command on done queue. */ + list_add_tail(&sp->list, done_q); + } +#ifdef QLA_64BIT_PTR + else if (pkt->entry_type == COMMAND_A64_TYPE) { + printk(KERN_WARNING "!qla1280: Error Entry invalid handle"); + } +#endif + + LEAVE("qla1280_error_entry"); +} + +/* + * qla1280_abort_isp + * Resets ISP and aborts all outstanding commands. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +static int +qla1280_abort_isp(struct scsi_qla_host *ha) +{ + struct device_reg __iomem *reg = ha->iobase; + struct srb *sp; + int status = 0; + int cnt; + int bus; + + ENTER("qla1280_abort_isp"); + + if (ha->flags.abort_isp_active || !ha->flags.online) + goto out; + + ha->flags.abort_isp_active = 1; + + /* Disable ISP interrupts. */ + qla1280_disable_intrs(ha); + WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); + RD_REG_WORD(®->id_l); + + printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n", + ha->host_no); + /* Dequeue all commands in outstanding command list. */ + for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + struct scsi_cmnd *cmd; + sp = ha->outstanding_cmds[cnt]; + if (sp) { + cmd = sp->cmd; + CMD_RESULT(cmd) = DID_RESET << 16; + CMD_HANDLE(cmd) = COMPLETED_HANDLE; + ha->outstanding_cmds[cnt] = NULL; + list_add_tail(&sp->list, &ha->done_q); + } + } + + qla1280_done(ha); + + status = qla1280_load_firmware(ha); + if (status) + goto out; + + /* Setup adapter based on NVRAM parameters. */ + qla1280_nvram_config (ha); + + status = qla1280_init_rings(ha); + if (status) + goto out; + + /* Issue SCSI reset. */ + for (bus = 0; bus < ha->ports; bus++) + qla1280_bus_reset(ha, bus); + + ha->flags.abort_isp_active = 0; + out: + if (status) { + printk(KERN_WARNING + "qla1280: ISP error recovery failed, board disabled"); + qla1280_reset_adapter(ha); + dprintk(2, "qla1280_abort_isp: **** FAILED ****\n"); + } + + LEAVE("qla1280_abort_isp"); + return status; +} + + +/* + * qla1280_debounce_register + * Debounce register. + * + * Input: + * port = register address. + * + * Returns: + * register value. + */ +static u16 +qla1280_debounce_register(volatile u16 __iomem * addr) +{ + volatile u16 ret; + volatile u16 ret2; + + ret = RD_REG_WORD(addr); + ret2 = RD_REG_WORD(addr); + + if (ret == ret2) + return ret; + + do { + cpu_relax(); + ret = RD_REG_WORD(addr); + ret2 = RD_REG_WORD(addr); + } while (ret != ret2); + + return ret; +} + + +/************************************************************************ + * qla1280_check_for_dead_scsi_bus * + * * + * This routine checks for a dead SCSI bus * + ************************************************************************/ +#define SET_SXP_BANK 0x0100 +#define SCSI_PHASE_INVALID 0x87FF +static int +qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) +{ + uint16_t config_reg, scsi_control; + struct device_reg __iomem *reg = ha->iobase; + + if (ha->bus_settings[bus].scsi_bus_dead) { + WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); + config_reg = RD_REG_WORD(®->cfg_1); + WRT_REG_WORD(®->cfg_1, SET_SXP_BANK); + scsi_control = RD_REG_WORD(®->scsiControlPins); + WRT_REG_WORD(®->cfg_1, config_reg); + WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC); + + if (scsi_control == SCSI_PHASE_INVALID) { + ha->bus_settings[bus].scsi_bus_dead = 1; + return 1; /* bus is dead */ + } else { + ha->bus_settings[bus].scsi_bus_dead = 0; + ha->bus_settings[bus].failed_reset_count = 0; + } + } + return 0; /* bus is not dead */ +} + +static void +qla1280_get_target_parameters(struct scsi_qla_host *ha, + struct scsi_device *device) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + int bus, target, lun; + + bus = device->channel; + target = device->id; + lun = device->lun; + + + mb[0] = MBC_GET_TARGET_PARAMETERS; + mb[1] = (uint16_t) (bus ? target | BIT_7 : target); + mb[1] <<= 8; + qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0, + &mb[0]); + + printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun); + + if (mb[3] != 0) { + printk(KERN_CONT " Sync: period %d, offset %d", + (mb[3] & 0xff), (mb[3] >> 8)); + if (mb[2] & BIT_13) + printk(KERN_CONT ", Wide"); + if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2) + printk(KERN_CONT ", DT"); + } else + printk(KERN_CONT " Async"); + + if (device->simple_tags) + printk(KERN_CONT ", Tagged queuing: depth %d", device->queue_depth); + printk(KERN_CONT "\n"); +} + + +#if DEBUG_QLA1280 +static void +__qla1280_dump_buffer(char *b, int size) +{ + int cnt; + u8 c; + + printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah " + "Bh Ch Dh Eh Fh\n"); + printk(KERN_DEBUG "---------------------------------------------" + "------------------\n"); + + for (cnt = 0; cnt < size;) { + c = *b++; + + printk("0x%02x", c); + cnt++; + if (!(cnt % 16)) + printk("\n"); + else + printk(" "); + } + if (cnt % 16) + printk("\n"); +} + +/************************************************************************** + * ql1280_print_scsi_cmd + * + **************************************************************************/ +static void +__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) +{ + struct scsi_qla_host *ha; + struct Scsi_Host *host = CMD_HOST(cmd); + struct srb *sp; + /* struct scatterlist *sg; */ + + int i; + ha = (struct scsi_qla_host *)host->hostdata; + + sp = scsi_cmd_priv(cmd); + printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd)); + printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n", + SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd), + CMD_CDBLEN(cmd)); + printk(" CDB = "); + for (i = 0; i < cmd->cmd_len; i++) { + printk("0x%02x ", cmd->cmnd[i]); + } + printk(" seg_cnt =%d\n", scsi_sg_count(cmd)); + printk(" request buffer=0x%p, request buffer len=0x%x\n", + scsi_sglist(cmd), scsi_bufflen(cmd)); + /* if (cmd->use_sg) + { + sg = (struct scatterlist *) cmd->request_buffer; + printk(" SG buffer: \n"); + qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist))); + } */ + printk(" tag=%d, transfersize=0x%x \n", + scsi_cmd_to_rq(cmd)->tag, cmd->transfersize); + printk(" underflow size = 0x%x, direction=0x%x\n", + cmd->underflow, cmd->sc_data_direction); +} + +/************************************************************************** + * ql1280_dump_device + * + **************************************************************************/ +static void +ql1280_dump_device(struct scsi_qla_host *ha) +{ + + struct scsi_cmnd *cp; + struct srb *sp; + int i; + + printk(KERN_DEBUG "Outstanding Commands on controller:\n"); + + for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { + if ((sp = ha->outstanding_cmds[i]) == NULL) + continue; + if ((cp = sp->cmd) == NULL) + continue; + qla1280_print_scsi_cmd(1, cp); + } +} +#endif + + +enum tokens { + TOKEN_NVRAM, + TOKEN_SYNC, + TOKEN_WIDE, + TOKEN_PPR, + TOKEN_VERBOSE, + TOKEN_DEBUG, +}; + +struct setup_tokens { + char *token; + int val; +}; + +static struct setup_tokens setup_token[] __initdata = +{ + { "nvram", TOKEN_NVRAM }, + { "sync", TOKEN_SYNC }, + { "wide", TOKEN_WIDE }, + { "ppr", TOKEN_PPR }, + { "verbose", TOKEN_VERBOSE }, + { "debug", TOKEN_DEBUG }, +}; + + +/************************************************************************** + * qla1280_setup + * + * Handle boot parameters. This really needs to be changed so one + * can specify per adapter parameters. + **************************************************************************/ +static int __init +qla1280_setup(char *s) +{ + char *cp, *ptr; + unsigned long val; + + cp = s; + + while (cp && (ptr = strchr(cp, ':'))) { + ptr++; + if (!strcmp(ptr, "yes")) { + val = 0x10000; + ptr += 3; + } else if (!strcmp(ptr, "no")) { + val = 0; + ptr += 2; + } else + val = simple_strtoul(ptr, &ptr, 0); + + switch (qla1280_get_token(cp)) { + case TOKEN_NVRAM: + if (!val) + driver_setup.no_nvram = 1; + break; + case TOKEN_SYNC: + if (!val) + driver_setup.no_sync = 1; + else if (val != 0x10000) + driver_setup.sync_mask = val; + break; + case TOKEN_WIDE: + if (!val) + driver_setup.no_wide = 1; + else if (val != 0x10000) + driver_setup.wide_mask = val; + break; + case TOKEN_PPR: + if (!val) + driver_setup.no_ppr = 1; + else if (val != 0x10000) + driver_setup.ppr_mask = val; + break; + case TOKEN_VERBOSE: + qla1280_verbose = val; + break; + default: + printk(KERN_INFO "qla1280: unknown boot option %s\n", + cp); + } + + cp = strchr(ptr, ';'); + if (cp) + cp++; + else { + break; + } + } + return 1; +} + + +static int __init +qla1280_get_token(char *str) +{ + char *sep; + long ret = -1; + int i; + + sep = strchr(str, ':'); + + if (sep) { + for (i = 0; i < ARRAY_SIZE(setup_token); i++) { + if (!strncmp(setup_token[i].token, str, (sep - str))) { + ret = setup_token[i].val; + break; + } + } + } + + return ret; +} + + +static const struct scsi_host_template qla1280_driver_template = { + .module = THIS_MODULE, + .proc_name = "qla1280", + .name = "Qlogic ISP 1280/12160", + .info = qla1280_info, + .slave_configure = qla1280_slave_configure, + .queuecommand = qla1280_queuecommand, + .eh_abort_handler = qla1280_eh_abort, + .eh_device_reset_handler= qla1280_eh_device_reset, + .eh_bus_reset_handler = qla1280_eh_bus_reset, + .eh_host_reset_handler = qla1280_eh_adapter_reset, + .bios_param = qla1280_biosparam, + .can_queue = MAX_OUTSTANDING_COMMANDS, + .this_id = -1, + .sg_tablesize = SG_ALL, + .cmd_size = sizeof(struct srb), +}; + + +static int +qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int devnum = id->driver_data; + struct qla_boards *bdp = &ql1280_board_tbl[devnum]; + struct Scsi_Host *host; + struct scsi_qla_host *ha; + int error = -ENODEV; + + /* Bypass all AMI SUBSYS VENDOR IDs */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) { + printk(KERN_INFO + "qla1280: Skipping AMI SubSys Vendor ID Chip\n"); + goto error; + } + + printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n", + bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn)); + + if (pci_enable_device(pdev)) { + printk(KERN_WARNING + "qla1280: Failed to enabled pci device, aborting.\n"); + goto error; + } + + pci_set_master(pdev); + + error = -ENOMEM; + host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha)); + if (!host) { + printk(KERN_WARNING + "qla1280: Failed to register host, aborting.\n"); + goto error_disable_device; + } + + ha = (struct scsi_qla_host *)host->hostdata; + memset(ha, 0, sizeof(struct scsi_qla_host)); + + ha->pdev = pdev; + ha->devnum = devnum; /* specifies microcode load address */ + +#ifdef QLA_64BIT_PTR + if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { + if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { + printk(KERN_WARNING "scsi(%li): Unable to set a " + "suitable DMA mask - aborting\n", ha->host_no); + error = -ENODEV; + goto error_put_host; + } + } else + dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", + ha->host_no); +#else + if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { + printk(KERN_WARNING "scsi(%li): Unable to set a " + "suitable DMA mask - aborting\n", ha->host_no); + error = -ENODEV; + goto error_put_host; + } +#endif + + ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, + ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), + &ha->request_dma, GFP_KERNEL); + if (!ha->request_ring) { + printk(KERN_INFO "qla1280: Failed to get request memory\n"); + goto error_put_host; + } + + ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, + ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), + &ha->response_dma, GFP_KERNEL); + if (!ha->response_ring) { + printk(KERN_INFO "qla1280: Failed to get response memory\n"); + goto error_free_request_ring; + } + + ha->ports = bdp->numPorts; + + ha->host = host; + ha->host_no = host->host_no; + + host->irq = pdev->irq; + host->max_channel = bdp->numPorts - 1; + host->max_lun = MAX_LUNS - 1; + host->max_id = MAX_TARGETS; + host->max_sectors = 1024; + host->unique_id = host->host_no; + + error = -ENODEV; + +#if MEMORY_MAPPED_IO + ha->mmpbase = pci_ioremap_bar(ha->pdev, 1); + if (!ha->mmpbase) { + printk(KERN_INFO "qla1280: Unable to map I/O memory\n"); + goto error_free_response_ring; + } + + host->base = (unsigned long)ha->mmpbase; + ha->iobase = (struct device_reg __iomem *)ha->mmpbase; +#else + host->io_port = pci_resource_start(ha->pdev, 0); + if (!request_region(host->io_port, 0xff, "qla1280")) { + printk(KERN_INFO "qla1280: Failed to reserve i/o region " + "0x%04lx-0x%04lx - already in use\n", + host->io_port, host->io_port + 0xff); + goto error_free_response_ring; + } + + ha->iobase = (struct device_reg *)host->io_port; +#endif + + INIT_LIST_HEAD(&ha->done_q); + + /* Disable ISP interrupts. */ + qla1280_disable_intrs(ha); + + if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED, + "qla1280", ha)) { + printk("qla1280 : Failed to reserve interrupt %d already " + "in use\n", pdev->irq); + goto error_release_region; + } + + /* load the F/W, read paramaters, and init the H/W */ + if (qla1280_initialize_adapter(ha)) { + printk(KERN_INFO "qla1x160: Failed to initialize adapter\n"); + goto error_free_irq; + } + + /* set our host ID (need to do something about our two IDs) */ + host->this_id = ha->bus_settings[0].id; + + pci_set_drvdata(pdev, host); + + error = scsi_add_host(host, &pdev->dev); + if (error) + goto error_disable_adapter; + scsi_scan_host(host); + + return 0; + + error_disable_adapter: + qla1280_disable_intrs(ha); + error_free_irq: + free_irq(pdev->irq, ha); + error_release_region: +#if MEMORY_MAPPED_IO + iounmap(ha->mmpbase); +#else + release_region(host->io_port, 0xff); +#endif + error_free_response_ring: + dma_free_coherent(&ha->pdev->dev, + ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), + ha->response_ring, ha->response_dma); + error_free_request_ring: + dma_free_coherent(&ha->pdev->dev, + ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), + ha->request_ring, ha->request_dma); + error_put_host: + scsi_host_put(host); + error_disable_device: + pci_disable_device(pdev); + error: + return error; +} + + +static void +qla1280_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; + + scsi_remove_host(host); + + qla1280_disable_intrs(ha); + + free_irq(pdev->irq, ha); + +#if MEMORY_MAPPED_IO + iounmap(ha->mmpbase); +#else + release_region(host->io_port, 0xff); +#endif + + dma_free_coherent(&ha->pdev->dev, + ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), + ha->request_ring, ha->request_dma); + dma_free_coherent(&ha->pdev->dev, + ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), + ha->response_ring, ha->response_dma); + + pci_disable_device(pdev); + + scsi_host_put(host); +} + +static struct pci_driver qla1280_pci_driver = { + .name = "qla1280", + .id_table = qla1280_pci_tbl, + .probe = qla1280_probe_one, + .remove = qla1280_remove_one, +}; + +static int __init +qla1280_init(void) +{ +#ifdef MODULE + /* + * If we are called as a module, the qla1280 pointer may not be null + * and it would point to our bootup string, just like on the lilo + * command line. IF not NULL, then process this config string with + * qla1280_setup + * + * Boot time Options + * To add options at boot time add a line to your lilo.conf file like: + * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}" + * which will result in the first four devices on the first two + * controllers being set to a tagged queue depth of 32. + */ + if (qla1280) + qla1280_setup(qla1280); +#endif + + return pci_register_driver(&qla1280_pci_driver); +} + +static void __exit +qla1280_exit(void) +{ + int i; + + pci_unregister_driver(&qla1280_pci_driver); + /* release any allocated firmware images */ + for (i = 0; i < QL_NUM_FW_IMAGES; i++) { + release_firmware(qla1280_fw_tbl[i].fw); + qla1280_fw_tbl[i].fw = NULL; + } +} + +module_init(qla1280_init); +module_exit(qla1280_exit); + +MODULE_AUTHOR("Qlogic & Jes Sorensen"); +MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("qlogic/1040.bin"); +MODULE_FIRMWARE("qlogic/1280.bin"); +MODULE_FIRMWARE("qlogic/12160.bin"); +MODULE_VERSION(QLA1280_VERSION); diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h new file mode 100644 index 000000000..d309e2ca1 --- /dev/null +++ b/drivers/scsi/qla1280.h @@ -0,0 +1,1071 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/****************************************************************************** +* QLOGIC LINUX SOFTWARE +* +* QLogic ISP1280 (Ultra2) /12160 (Ultra3) SCSI driver +* Copyright (C) 2000 Qlogic Corporation +* (www.qlogic.com) +* +******************************************************************************/ + +#ifndef _QLA1280_H +#define _QLA1280_H + +/* + * Data bit definitions. + */ +#define BIT_0 0x1 +#define BIT_1 0x2 +#define BIT_2 0x4 +#define BIT_3 0x8 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 +#define BIT_8 0x100 +#define BIT_9 0x200 +#define BIT_10 0x400 +#define BIT_11 0x800 +#define BIT_12 0x1000 +#define BIT_13 0x2000 +#define BIT_14 0x4000 +#define BIT_15 0x8000 +#define BIT_16 0x10000 +#define BIT_17 0x20000 +#define BIT_18 0x40000 +#define BIT_19 0x80000 +#define BIT_20 0x100000 +#define BIT_21 0x200000 +#define BIT_22 0x400000 +#define BIT_23 0x800000 +#define BIT_24 0x1000000 +#define BIT_25 0x2000000 +#define BIT_26 0x4000000 +#define BIT_27 0x8000000 +#define BIT_28 0x10000000 +#define BIT_29 0x20000000 +#define BIT_30 0x40000000 +#define BIT_31 0x80000000 + +#if MEMORY_MAPPED_IO +#define RD_REG_WORD(addr) readw_relaxed(addr) +#define RD_REG_WORD_dmasync(addr) readw(addr) +#define WRT_REG_WORD(addr, data) writew(data, addr) +#else /* MEMORY_MAPPED_IO */ +#define RD_REG_WORD(addr) inw((unsigned long)addr) +#define RD_REG_WORD_dmasync(addr) RD_REG_WORD(addr) +#define WRT_REG_WORD(addr, data) outw(data, (unsigned long)addr) +#endif /* MEMORY_MAPPED_IO */ + +/* + * Host adapter default definitions. + */ +#define MAX_BUSES 2 /* 2 */ +#define MAX_B_BITS 1 + +#define MAX_TARGETS 16 /* 16 */ +#define MAX_T_BITS 4 /* 4 */ + +#define MAX_LUNS 8 /* 32 */ +#define MAX_L_BITS 3 /* 5 */ + +/* + * Watchdog time quantum + */ +#define QLA1280_WDG_TIME_QUANTUM 5 /* In seconds */ + +/* Command retry count (0-65535) */ +#define COMMAND_RETRY_COUNT 255 + +/* Maximum outstanding commands in ISP queues */ +#define MAX_OUTSTANDING_COMMANDS 512 +#define COMPLETED_HANDLE ((unsigned char *) \ + (MAX_OUTSTANDING_COMMANDS + 2)) + +/* ISP request and response entry counts (37-65535) */ +#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ +#define RESPONSE_ENTRY_CNT 63 /* Number of response entries. */ + +/* + * SCSI Request Block structure (sp) that occurs after each struct scsi_cmnd. + */ +struct srb { + struct list_head list; /* (8/16) LU queue */ + struct scsi_cmnd *cmd; /* (4/8) SCSI command block */ + /* NOTE: the sp->cmd will be NULL when this completion is + * called, so you should know the scsi_cmnd when using this */ + struct completion *wait; + dma_addr_t saved_dma_handle; /* for unmap of single transfers */ + uint8_t flags; /* (1) Status flags. */ + uint8_t dir; /* direction of transfer */ +}; + +/* + * SRB flag definitions + */ +#define SRB_TIMEOUT (1 << 0) /* Command timed out */ +#define SRB_SENT (1 << 1) /* Command sent to ISP */ +#define SRB_ABORT_PENDING (1 << 2) /* Command abort sent to device */ +#define SRB_ABORTED (1 << 3) /* Command aborted command already */ + +/* + * ISP I/O Register Set structure definitions. + */ +struct device_reg { + uint16_t id_l; /* ID low */ + uint16_t id_h; /* ID high */ + uint16_t cfg_0; /* Configuration 0 */ +#define ISP_CFG0_HWMSK 0x000f /* Hardware revision mask */ +#define ISP_CFG0_1020 BIT_0 /* ISP1020 */ +#define ISP_CFG0_1020A BIT_1 /* ISP1020A */ +#define ISP_CFG0_1040 BIT_2 /* ISP1040 */ +#define ISP_CFG0_1040A BIT_3 /* ISP1040A */ +#define ISP_CFG0_1040B BIT_4 /* ISP1040B */ +#define ISP_CFG0_1040C BIT_5 /* ISP1040C */ + uint16_t cfg_1; /* Configuration 1 */ +#define ISP_CFG1_F128 BIT_6 /* 128-byte FIFO threshold */ +#define ISP_CFG1_F64 BIT_4|BIT_5 /* 128-byte FIFO threshold */ +#define ISP_CFG1_F32 BIT_5 /* 128-byte FIFO threshold */ +#define ISP_CFG1_F16 BIT_4 /* 128-byte FIFO threshold */ +#define ISP_CFG1_BENAB BIT_2 /* Global Bus burst enable */ +#define ISP_CFG1_SXP BIT_0 /* SXP register select */ + uint16_t ictrl; /* Interface control */ +#define ISP_RESET BIT_0 /* ISP soft reset */ +#define ISP_EN_INT BIT_1 /* ISP enable interrupts. */ +#define ISP_EN_RISC BIT_2 /* ISP enable RISC interrupts. */ +#define ISP_FLASH_ENABLE BIT_8 /* Flash BIOS Read/Write enable */ +#define ISP_FLASH_UPPER BIT_9 /* Flash upper bank select */ + uint16_t istatus; /* Interface status */ +#define PCI_64BIT_SLOT BIT_14 /* PCI 64-bit slot indicator. */ +#define RISC_INT BIT_2 /* RISC interrupt */ +#define PCI_INT BIT_1 /* PCI interrupt */ + uint16_t semaphore; /* Semaphore */ + uint16_t nvram; /* NVRAM register. */ +#define NV_DESELECT 0 +#define NV_CLOCK BIT_0 +#define NV_SELECT BIT_1 +#define NV_DATA_OUT BIT_2 +#define NV_DATA_IN BIT_3 + uint16_t flash_data; /* Flash BIOS data */ + uint16_t flash_address; /* Flash BIOS address */ + + uint16_t unused_1[0x06]; + + /* cdma_* and ddma_* are 1040 only */ + uint16_t cdma_cfg; +#define CDMA_CONF_SENAB BIT_3 /* SXP to DMA Data enable */ +#define CDMA_CONF_RIRQ BIT_2 /* RISC interrupt enable */ +#define CDMA_CONF_BENAB BIT_1 /* Bus burst enable */ +#define CDMA_CONF_DIR BIT_0 /* DMA direction (0=fifo->host 1=host->fifo) */ + uint16_t cdma_ctrl; + uint16_t cdma_status; + uint16_t cdma_fifo_status; + uint16_t cdma_count; + uint16_t cdma_reserved; + uint16_t cdma_address_count_0; + uint16_t cdma_address_count_1; + uint16_t cdma_address_count_2; + uint16_t cdma_address_count_3; + + uint16_t unused_2[0x06]; + + uint16_t ddma_cfg; +#define DDMA_CONF_SENAB BIT_3 /* SXP to DMA Data enable */ +#define DDMA_CONF_RIRQ BIT_2 /* RISC interrupt enable */ +#define DDMA_CONF_BENAB BIT_1 /* Bus burst enable */ +#define DDMA_CONF_DIR BIT_0 /* DMA direction (0=fifo->host 1=host->fifo) */ + uint16_t ddma_ctrl; + uint16_t ddma_status; + uint16_t ddma_fifo_status; + uint16_t ddma_xfer_count_low; + uint16_t ddma_xfer_count_high; + uint16_t ddma_addr_count_0; + uint16_t ddma_addr_count_1; + uint16_t ddma_addr_count_2; + uint16_t ddma_addr_count_3; + + uint16_t unused_3[0x0e]; + + uint16_t mailbox0; /* Mailbox 0 */ + uint16_t mailbox1; /* Mailbox 1 */ + uint16_t mailbox2; /* Mailbox 2 */ + uint16_t mailbox3; /* Mailbox 3 */ + uint16_t mailbox4; /* Mailbox 4 */ + uint16_t mailbox5; /* Mailbox 5 */ + uint16_t mailbox6; /* Mailbox 6 */ + uint16_t mailbox7; /* Mailbox 7 */ + + uint16_t unused_4[0x20];/* 0x80-0xbf Gap */ + + uint16_t host_cmd; /* Host command and control */ +#define HOST_INT BIT_7 /* host interrupt bit */ +#define BIOS_ENABLE BIT_0 + + uint16_t unused_5[0x5]; /* 0xc2-0xcb Gap */ + + uint16_t gpio_data; + uint16_t gpio_enable; + + uint16_t unused_6[0x11]; /* d0-f0 */ + uint16_t scsiControlPins; /* f2 */ +}; + +#define MAILBOX_REGISTER_COUNT 8 + +/* + * ISP product identification definitions in mailboxes after reset. + */ +#define PROD_ID_1 0x4953 +#define PROD_ID_2 0x0000 +#define PROD_ID_2a 0x5020 +#define PROD_ID_3 0x2020 +#define PROD_ID_4 0x1 + +/* + * ISP host command and control register command definitions + */ +#define HC_RESET_RISC 0x1000 /* Reset RISC */ +#define HC_PAUSE_RISC 0x2000 /* Pause RISC */ +#define HC_RELEASE_RISC 0x3000 /* Release RISC from reset. */ +#define HC_SET_HOST_INT 0x5000 /* Set host interrupt */ +#define HC_CLR_HOST_INT 0x6000 /* Clear HOST interrupt */ +#define HC_CLR_RISC_INT 0x7000 /* Clear RISC interrupt */ +#define HC_DISABLE_BIOS 0x9000 /* Disable BIOS. */ + +/* + * ISP mailbox Self-Test status codes + */ +#define MBS_FRM_ALIVE 0 /* Firmware Alive. */ +#define MBS_CHKSUM_ERR 1 /* Checksum Error. */ +#define MBS_SHADOW_LD_ERR 2 /* Shadow Load Error. */ +#define MBS_BUSY 4 /* Busy. */ + +/* + * ISP mailbox command complete status codes + */ +#define MBS_CMD_CMP 0x4000 /* Command Complete. */ +#define MBS_INV_CMD 0x4001 /* Invalid Command. */ +#define MBS_HOST_INF_ERR 0x4002 /* Host Interface Error. */ +#define MBS_TEST_FAILED 0x4003 /* Test Failed. */ +#define MBS_CMD_ERR 0x4005 /* Command Error. */ +#define MBS_CMD_PARAM_ERR 0x4006 /* Command Parameter Error. */ + +/* + * ISP mailbox asynchronous event status codes + */ +#define MBA_ASYNC_EVENT 0x8000 /* Asynchronous event. */ +#define MBA_BUS_RESET 0x8001 /* SCSI Bus Reset. */ +#define MBA_SYSTEM_ERR 0x8002 /* System Error. */ +#define MBA_REQ_TRANSFER_ERR 0x8003 /* Request Transfer Error. */ +#define MBA_RSP_TRANSFER_ERR 0x8004 /* Response Transfer Error. */ +#define MBA_WAKEUP_THRES 0x8005 /* Request Queue Wake-up. */ +#define MBA_TIMEOUT_RESET 0x8006 /* Execution Timeout Reset. */ +#define MBA_DEVICE_RESET 0x8007 /* Bus Device Reset. */ +#define MBA_BUS_MODE_CHANGE 0x800E /* SCSI bus mode transition. */ +#define MBA_SCSI_COMPLETION 0x8020 /* Completion response. */ + +/* + * ISP mailbox commands + */ +#define MBC_NOP 0 /* No Operation */ +#define MBC_LOAD_RAM 1 /* Load RAM */ +#define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware */ +#define MBC_DUMP_RAM 3 /* Dump RAM contents */ +#define MBC_WRITE_RAM_WORD 4 /* Write ram word */ +#define MBC_READ_RAM_WORD 5 /* Read ram word */ +#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */ +#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum */ +#define MBC_ABOUT_FIRMWARE 8 /* Get firmware revision */ +#define MBC_LOAD_RAM_A64_ROM 9 /* Load RAM 64bit ROM version */ +#define MBC_DUMP_RAM_A64_ROM 0x0a /* Dump RAM 64bit ROM version */ +#define MBC_INIT_REQUEST_QUEUE 0x10 /* Initialize request queue */ +#define MBC_INIT_RESPONSE_QUEUE 0x11 /* Initialize response queue */ +#define MBC_EXECUTE_IOCB 0x12 /* Execute IOCB command */ +#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command */ +#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN) */ +#define MBC_ABORT_TARGET 0x17 /* Abort target (ID) */ +#define MBC_BUS_RESET 0x18 /* SCSI bus reset */ +#define MBC_GET_RETRY_COUNT 0x22 /* Get retry count and delay */ +#define MBC_GET_TARGET_PARAMETERS 0x28 /* Get target parameters */ +#define MBC_SET_INITIATOR_ID 0x30 /* Set initiator SCSI ID */ +#define MBC_SET_SELECTION_TIMEOUT 0x31 /* Set selection timeout */ +#define MBC_SET_RETRY_COUNT 0x32 /* Set retry count and delay */ +#define MBC_SET_TAG_AGE_LIMIT 0x33 /* Set tag age limit */ +#define MBC_SET_CLOCK_RATE 0x34 /* Set clock rate */ +#define MBC_SET_ACTIVE_NEGATION 0x35 /* Set active negation state */ +#define MBC_SET_ASYNC_DATA_SETUP 0x36 /* Set async data setup time */ +#define MBC_SET_PCI_CONTROL 0x37 /* Set BUS control parameters */ +#define MBC_SET_TARGET_PARAMETERS 0x38 /* Set target parameters */ +#define MBC_SET_DEVICE_QUEUE 0x39 /* Set device queue parameters */ +#define MBC_SET_RESET_DELAY_PARAMETERS 0x3A /* Set reset delay parameters */ +#define MBC_SET_SYSTEM_PARAMETER 0x45 /* Set system parameter word */ +#define MBC_SET_FIRMWARE_FEATURES 0x4A /* Set firmware feature word */ +#define MBC_INIT_REQUEST_QUEUE_A64 0x52 /* Initialize request queue A64 */ +#define MBC_INIT_RESPONSE_QUEUE_A64 0x53 /* Initialize response q A64 */ +#define MBC_ENABLE_TARGET_MODE 0x55 /* Enable target mode */ +#define MBC_SET_DATA_OVERRUN_RECOVERY 0x5A /* Set data overrun recovery mode */ + +/* + * ISP Get/Set Target Parameters mailbox command control flags. + */ +#define TP_PPR BIT_5 /* PPR */ +#define TP_RENEGOTIATE BIT_8 /* Renegotiate on error. */ +#define TP_STOP_QUEUE BIT_9 /* Stop que on check condition */ +#define TP_AUTO_REQUEST_SENSE BIT_10 /* Automatic request sense. */ +#define TP_TAGGED_QUEUE BIT_11 /* Tagged queuing. */ +#define TP_SYNC BIT_12 /* Synchronous data transfers. */ +#define TP_WIDE BIT_13 /* Wide data transfers. */ +#define TP_PARITY BIT_14 /* Parity checking. */ +#define TP_DISCONNECT BIT_15 /* Disconnect privilege. */ + +/* + * NVRAM Command values. + */ +#define NV_START_BIT BIT_2 +#define NV_WRITE_OP (BIT_26 | BIT_24) +#define NV_READ_OP (BIT_26 | BIT_25) +#define NV_ERASE_OP (BIT_26 | BIT_25 | BIT_24) +#define NV_MASK_OP (BIT_26 | BIT_25 | BIT_24) +#define NV_DELAY_COUNT 10 + +/* + * QLogic ISP1280/ISP12160 NVRAM structure definition. + */ +struct nvram { + uint8_t id0; /* 0 */ + uint8_t id1; /* 1 */ + uint8_t id2; /* 2 */ + uint8_t id3; /* 3 */ + uint8_t version; /* 4 */ + + struct { + uint8_t bios_configuration_mode:2; + uint8_t bios_disable:1; + uint8_t selectable_scsi_boot_enable:1; + uint8_t cd_rom_boot_enable:1; + uint8_t disable_loading_risc_code:1; + uint8_t enable_64bit_addressing:1; + uint8_t unused_7:1; + } cntr_flags_1; /* 5 */ + + struct { + uint8_t boot_lun_number:5; + uint8_t scsi_bus_number:1; + uint8_t unused_6:1; + uint8_t unused_7:1; + } cntr_flags_2l; /* 7 */ + + struct { + uint8_t boot_target_number:4; + uint8_t unused_12:1; + uint8_t unused_13:1; + uint8_t unused_14:1; + uint8_t unused_15:1; + } cntr_flags_2h; /* 8 */ + + uint16_t unused_8; /* 8, 9 */ + uint16_t unused_10; /* 10, 11 */ + uint16_t unused_12; /* 12, 13 */ + uint16_t unused_14; /* 14, 15 */ + + struct { + uint8_t reserved:2; + uint8_t burst_enable:1; + uint8_t reserved_1:1; + uint8_t fifo_threshold:4; + } isp_config; /* 16 */ + + /* Termination + * 0 = Disable, 1 = high only, 3 = Auto term + */ + struct { + uint8_t scsi_bus_1_control:2; + uint8_t scsi_bus_0_control:2; + uint8_t unused_0:1; + uint8_t unused_1:1; + uint8_t unused_2:1; + uint8_t auto_term_support:1; + } termination; /* 17 */ + + uint16_t isp_parameter; /* 18, 19 */ + + union { + uint16_t w; + struct { + uint16_t enable_fast_posting:1; + uint16_t report_lvd_bus_transition:1; + uint16_t unused_2:1; + uint16_t unused_3:1; + uint16_t disable_iosbs_with_bus_reset_status:1; + uint16_t disable_synchronous_backoff:1; + uint16_t unused_6:1; + uint16_t synchronous_backoff_reporting:1; + uint16_t disable_reselection_fairness:1; + uint16_t unused_9:1; + uint16_t unused_10:1; + uint16_t unused_11:1; + uint16_t unused_12:1; + uint16_t unused_13:1; + uint16_t unused_14:1; + uint16_t unused_15:1; + } f; + } firmware_feature; /* 20, 21 */ + + uint16_t unused_22; /* 22, 23 */ + + struct { + struct { + uint8_t initiator_id:4; + uint8_t scsi_reset_disable:1; + uint8_t scsi_bus_size:1; + uint8_t scsi_bus_type:1; + uint8_t unused_7:1; + } config_1; /* 24 */ + + uint8_t bus_reset_delay; /* 25 */ + uint8_t retry_count; /* 26 */ + uint8_t retry_delay; /* 27 */ + + struct { + uint8_t async_data_setup_time:4; + uint8_t req_ack_active_negation:1; + uint8_t data_line_active_negation:1; + uint8_t unused_6:1; + uint8_t unused_7:1; + } config_2; /* 28 */ + + uint8_t unused_29; /* 29 */ + + uint16_t selection_timeout; /* 30, 31 */ + uint16_t max_queue_depth; /* 32, 33 */ + + uint16_t unused_34; /* 34, 35 */ + uint16_t unused_36; /* 36, 37 */ + uint16_t unused_38; /* 38, 39 */ + + struct { + struct { + uint8_t renegotiate_on_error:1; + uint8_t stop_queue_on_check:1; + uint8_t auto_request_sense:1; + uint8_t tag_queuing:1; + uint8_t enable_sync:1; + uint8_t enable_wide:1; + uint8_t parity_checking:1; + uint8_t disconnect_allowed:1; + } parameter; /* 40 */ + + uint8_t execution_throttle; /* 41 */ + uint8_t sync_period; /* 42 */ + + union { /* 43 */ + uint8_t flags_43; + struct { + uint8_t sync_offset:4; + uint8_t device_enable:1; + uint8_t lun_disable:1; + uint8_t unused_6:1; + uint8_t unused_7:1; + } flags1x80; + struct { + uint8_t sync_offset:5; + uint8_t device_enable:1; + uint8_t unused_6:1; + uint8_t unused_7:1; + } flags1x160; + } flags; + union { /* PPR flags for the 1x160 controllers */ + uint8_t unused_44; + struct { + uint8_t ppr_options:4; + uint8_t ppr_bus_width:2; + uint8_t unused_8:1; + uint8_t enable_ppr:1; + } flags; /* 44 */ + } ppr_1x160; + uint8_t unused_45; /* 45 */ + } target[MAX_TARGETS]; + } bus[MAX_BUSES]; + + uint16_t unused_248; /* 248, 249 */ + + uint16_t subsystem_id[2]; /* 250, 251, 252, 253 */ + + union { /* 254 */ + uint8_t unused_254; + uint8_t system_id_pointer; + } sysid_1x160; + + uint8_t chksum; /* 255 */ +}; + +/* + * ISP queue - command entry structure definition. + */ +#define MAX_CMDSZ 12 /* SCSI maximum CDB size. */ +struct cmd_entry { + uint8_t entry_type; /* Entry type. */ +#define COMMAND_TYPE 1 /* Command entry */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + __le32 handle; /* System handle. */ + uint8_t lun; /* SCSI LUN */ + uint8_t target; /* SCSI ID */ + __le16 cdb_len; /* SCSI command length. */ + __le16 control_flags; /* Control flags. */ + __le16 reserved; + __le16 timeout; /* Command timeout. */ + __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ + __le32 dseg_0_address; /* Data segment 0 address. */ + __le32 dseg_0_length; /* Data segment 0 length. */ + __le32 dseg_1_address; /* Data segment 1 address. */ + __le32 dseg_1_length; /* Data segment 1 length. */ + __le32 dseg_2_address; /* Data segment 2 address. */ + __le32 dseg_2_length; /* Data segment 2 length. */ + __le32 dseg_3_address; /* Data segment 3 address. */ + __le32 dseg_3_length; /* Data segment 3 length. */ +}; + +/* + * ISP queue - continuation entry structure definition. + */ +struct cont_entry { + uint8_t entry_type; /* Entry type. */ +#define CONTINUE_TYPE 2 /* Continuation entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + __le32 reserved; /* Reserved */ + __le32 dseg_0_address; /* Data segment 0 address. */ + __le32 dseg_0_length; /* Data segment 0 length. */ + __le32 dseg_1_address; /* Data segment 1 address. */ + __le32 dseg_1_length; /* Data segment 1 length. */ + __le32 dseg_2_address; /* Data segment 2 address. */ + __le32 dseg_2_length; /* Data segment 2 length. */ + __le32 dseg_3_address; /* Data segment 3 address. */ + __le32 dseg_3_length; /* Data segment 3 length. */ + __le32 dseg_4_address; /* Data segment 4 address. */ + __le32 dseg_4_length; /* Data segment 4 length. */ + __le32 dseg_5_address; /* Data segment 5 address. */ + __le32 dseg_5_length; /* Data segment 5 length. */ + __le32 dseg_6_address; /* Data segment 6 address. */ + __le32 dseg_6_length; /* Data segment 6 length. */ +}; + +/* + * ISP queue - status entry structure definition. + */ +struct response { + uint8_t entry_type; /* Entry type. */ +#define STATUS_TYPE 3 /* Status entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ +#define RF_CONT BIT_0 /* Continuation. */ +#define RF_FULL BIT_1 /* Full */ +#define RF_BAD_HEADER BIT_2 /* Bad header. */ +#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */ + __le32 handle; /* System handle. */ + __le16 scsi_status; /* SCSI status. */ + __le16 comp_status; /* Completion status. */ + __le16 state_flags; /* State flags. */ +#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */ +#define SF_GOT_SENSE BIT_13 /* Got Sense */ +#define SF_GOT_STATUS BIT_12 /* Got Status */ +#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */ +#define SF_SENT_CDB BIT_10 /* Send CDB */ +#define SF_GOT_TARGET BIT_9 /* */ +#define SF_GOT_BUS BIT_8 /* */ + __le16 status_flags; /* Status flags. */ + __le16 time; /* Time. */ + __le16 req_sense_length;/* Request sense data length. */ + __le32 residual_length; /* Residual transfer length. */ + __le16 reserved[4]; + uint8_t req_sense_data[32]; /* Request sense data. */ +}; + +/* + * ISP queue - marker entry structure definition. + */ +struct mrk_entry { + uint8_t entry_type; /* Entry type. */ +#define MARKER_TYPE 4 /* Marker entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + __le32 reserved; + uint8_t lun; /* SCSI LUN */ + uint8_t target; /* SCSI ID */ + uint8_t modifier; /* Modifier (7-0). */ +#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */ +#define MK_SYNC_ID 1 /* Synchronize ID */ +#define MK_SYNC_ALL 2 /* Synchronize all ID/LUN */ + uint8_t reserved_1[53]; +}; + +/* + * ISP queue - extended command entry structure definition. + * + * Unused by the driver! + */ +struct ecmd_entry { + uint8_t entry_type; /* Entry type. */ +#define EXTENDED_CMD_TYPE 5 /* Extended command entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System handle. */ + uint8_t lun; /* SCSI LUN */ + uint8_t target; /* SCSI ID */ + __le16 cdb_len; /* SCSI command length. */ + __le16 control_flags; /* Control flags. */ + __le16 reserved; + __le16 timeout; /* Command timeout. */ + __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_cdb[88]; /* SCSI command words. */ +}; + +/* + * ISP queue - 64-Bit addressing, command entry structure definition. + */ +typedef struct { + uint8_t entry_type; /* Entry type. */ +#define COMMAND_A64_TYPE 9 /* Command A64 entry */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + __le32 handle; /* System handle. */ + uint8_t lun; /* SCSI LUN */ + uint8_t target; /* SCSI ID */ + __le16 cdb_len; /* SCSI command length. */ + __le16 control_flags; /* Control flags. */ + __le16 reserved; + __le16 timeout; /* Command timeout. */ + __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ + __le32 reserved_1[2]; /* unused */ + __le32 dseg_0_address[2]; /* Data segment 0 address. */ + __le32 dseg_0_length; /* Data segment 0 length. */ + __le32 dseg_1_address[2]; /* Data segment 1 address. */ + __le32 dseg_1_length; /* Data segment 1 length. */ +} cmd_a64_entry_t, request_t; + +/* + * ISP queue - 64-Bit addressing, continuation entry structure definition. + */ +struct cont_a64_entry { + uint8_t entry_type; /* Entry type. */ +#define CONTINUE_A64_TYPE 0xA /* Continuation A64 entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + __le32 dseg_0_address[2]; /* Data segment 0 address. */ + __le32 dseg_0_length; /* Data segment 0 length. */ + __le32 dseg_1_address[2]; /* Data segment 1 address. */ + __le32 dseg_1_length; /* Data segment 1 length. */ + __le32 dseg_2_address[2]; /* Data segment 2 address. */ + __le32 dseg_2_length; /* Data segment 2 length. */ + __le32 dseg_3_address[2]; /* Data segment 3 address. */ + __le32 dseg_3_length; /* Data segment 3 length. */ + __le32 dseg_4_address[2]; /* Data segment 4 address. */ + __le32 dseg_4_length; /* Data segment 4 length. */ +}; + +/* + * ISP queue - enable LUN entry structure definition. + */ +struct elun_entry { + uint8_t entry_type; /* Entry type. */ +#define ENABLE_LUN_TYPE 0xB /* Enable LUN entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status not used. */ + __le32 reserved_2; + __le16 lun; /* Bit 15 is bus number. */ + __le16 reserved_4; + __le32 option_flags; + uint8_t status; + uint8_t reserved_5; + uint8_t command_count; /* Number of ATIOs allocated. */ + uint8_t immed_notify_count; /* Number of Immediate Notify */ + /* entries allocated. */ + uint8_t group_6_length; /* SCSI CDB length for group 6 */ + /* commands (2-26). */ + uint8_t group_7_length; /* SCSI CDB length for group 7 */ + /* commands (2-26). */ + __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + __le16 reserved_6[20]; +}; + +/* + * ISP queue - modify LUN entry structure definition. + * + * Unused by the driver! + */ +struct modify_lun_entry { + uint8_t entry_type; /* Entry type. */ +#define MODIFY_LUN_TYPE 0xC /* Modify LUN entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; /* SCSI LUN */ + uint8_t reserved_3; + uint8_t operators; + uint8_t reserved_4; + __le32 option_flags; + uint8_t status; + uint8_t reserved_5; + uint8_t command_count; /* Number of ATIOs allocated. */ + uint8_t immed_notify_count; /* Number of Immediate Notify */ + /* entries allocated. */ + __le16 reserved_6; + __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + __le16 reserved_7[20]; +}; + +/* + * ISP queue - immediate notify entry structure definition. + */ +struct notify_entry { + uint8_t entry_type; /* Entry type. */ +#define IMMED_NOTIFY_TYPE 0xD /* Immediate notify entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; + uint8_t initiator_id; + uint8_t reserved_3; + uint8_t target_id; + __le32 option_flags; + uint8_t status; + uint8_t reserved_4; + uint8_t tag_value; /* Received queue tag message value */ + uint8_t tag_type; /* Received queue tag message type */ + /* entries allocated. */ + __le16 seq_id; + uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */ + __le16 reserved_5[8]; + uint8_t sense_data[18]; +}; + +/* + * ISP queue - notify acknowledge entry structure definition. + */ +struct nack_entry { + uint8_t entry_type; /* Entry type. */ +#define NOTIFY_ACK_TYPE 0xE /* Notify acknowledge entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; + uint8_t initiator_id; + uint8_t reserved_3; + uint8_t target_id; + __le32 option_flags; + uint8_t status; + uint8_t event; + __le16 seq_id; + __le16 reserved_4[22]; +}; + +/* + * ISP queue - Accept Target I/O (ATIO) entry structure definition. + */ +struct atio_entry { + uint8_t entry_type; /* Entry type. */ +#define ACCEPT_TGT_IO_TYPE 6 /* Accept target I/O entry. */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; + uint8_t initiator_id; + uint8_t cdb_len; + uint8_t target_id; + __le32 option_flags; + uint8_t status; + uint8_t scsi_status; + uint8_t tag_value; /* Received queue tag message value */ + uint8_t tag_type; /* Received queue tag message type */ + uint8_t cdb[26]; + uint8_t sense_data[18]; +}; + +/* + * ISP queue - Continue Target I/O (CTIO) entry structure definition. + */ +struct ctio_entry { + uint8_t entry_type; /* Entry type. */ +#define CONTINUE_TGT_IO_TYPE 7 /* CTIO entry */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; /* SCSI LUN */ + uint8_t initiator_id; + uint8_t reserved_3; + uint8_t target_id; + __le32 option_flags; + uint8_t status; + uint8_t scsi_status; + uint8_t tag_value; /* Received queue tag message value */ + uint8_t tag_type; /* Received queue tag message type */ + __le32 transfer_length; + __le32 residual; + __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + __le16 dseg_count; /* Data segment count. */ + __le32 dseg_0_address; /* Data segment 0 address. */ + __le32 dseg_0_length; /* Data segment 0 length. */ + __le32 dseg_1_address; /* Data segment 1 address. */ + __le32 dseg_1_length; /* Data segment 1 length. */ + __le32 dseg_2_address; /* Data segment 2 address. */ + __le32 dseg_2_length; /* Data segment 2 length. */ + __le32 dseg_3_address; /* Data segment 3 address. */ + __le32 dseg_3_length; /* Data segment 3 length. */ +}; + +/* + * ISP queue - CTIO returned entry structure definition. + */ +struct ctio_ret_entry { + uint8_t entry_type; /* Entry type. */ +#define CTIO_RET_TYPE 7 /* CTIO return entry */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; /* SCSI LUN */ + uint8_t initiator_id; + uint8_t reserved_3; + uint8_t target_id; + __le32 option_flags; + uint8_t status; + uint8_t scsi_status; + uint8_t tag_value; /* Received queue tag message value */ + uint8_t tag_type; /* Received queue tag message type */ + __le32 transfer_length; + __le32 residual; + __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + __le16 dseg_count; /* Data segment count. */ + __le32 dseg_0_address; /* Data segment 0 address. */ + __le32 dseg_0_length; /* Data segment 0 length. */ + __le32 dseg_1_address; /* Data segment 1 address. */ + __le16 dseg_1_length; /* Data segment 1 length. */ + uint8_t sense_data[18]; +}; + +/* + * ISP queue - CTIO A64 entry structure definition. + */ +struct ctio_a64_entry { + uint8_t entry_type; /* Entry type. */ +#define CTIO_A64_TYPE 0xF /* CTIO A64 entry */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; /* SCSI LUN */ + uint8_t initiator_id; + uint8_t reserved_3; + uint8_t target_id; + __le32 option_flags; + uint8_t status; + uint8_t scsi_status; + uint8_t tag_value; /* Received queue tag message value */ + uint8_t tag_type; /* Received queue tag message type */ + __le32 transfer_length; + __le32 residual; + __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + __le16 dseg_count; /* Data segment count. */ + __le32 reserved_4[2]; + __le32 dseg_0_address[2];/* Data segment 0 address. */ + __le32 dseg_0_length; /* Data segment 0 length. */ + __le32 dseg_1_address[2];/* Data segment 1 address. */ + __le32 dseg_1_length; /* Data segment 1 length. */ +}; + +/* + * ISP queue - CTIO returned entry structure definition. + */ +struct ctio_a64_ret_entry { + uint8_t entry_type; /* Entry type. */ +#define CTIO_A64_RET_TYPE 0xF /* CTIO A64 returned entry */ + uint8_t entry_count; /* Entry count. */ + uint8_t reserved_1; + uint8_t entry_status; /* Entry Status. */ + __le32 reserved_2; + uint8_t lun; /* SCSI LUN */ + uint8_t initiator_id; + uint8_t reserved_3; + uint8_t target_id; + __le32 option_flags; + uint8_t status; + uint8_t scsi_status; + uint8_t tag_value; /* Received queue tag message value */ + uint8_t tag_type; /* Received queue tag message type */ + __le32 transfer_length; + __le32 residual; + __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + __le16 dseg_count; /* Data segment count. */ + __le16 reserved_4[7]; + uint8_t sense_data[18]; +}; + +/* + * ISP request and response queue entry sizes + */ +#define RESPONSE_ENTRY_SIZE (sizeof(struct response)) +#define REQUEST_ENTRY_SIZE (sizeof(request_t)) + +/* + * ISP status entry - completion status definitions. + */ +#define CS_COMPLETE 0x0 /* No errors */ +#define CS_INCOMPLETE 0x1 /* Incomplete transfer of cmd. */ +#define CS_DMA 0x2 /* A DMA direction error. */ +#define CS_TRANSPORT 0x3 /* Transport error. */ +#define CS_RESET 0x4 /* SCSI bus reset occurred */ +#define CS_ABORTED 0x5 /* System aborted command. */ +#define CS_TIMEOUT 0x6 /* Timeout error. */ +#define CS_DATA_OVERRUN 0x7 /* Data overrun. */ +#define CS_COMMAND_OVERRUN 0x8 /* Command Overrun. */ +#define CS_STATUS_OVERRUN 0x9 /* Status Overrun. */ +#define CS_BAD_MSG 0xA /* Bad msg after status phase. */ +#define CS_NO_MSG_OUT 0xB /* No msg out after selection. */ +#define CS_EXTENDED_ID 0xC /* Extended ID failed. */ +#define CS_IDE_MSG 0xD /* Target rejected IDE msg. */ +#define CS_ABORT_MSG 0xE /* Target rejected abort msg. */ +#define CS_REJECT_MSG 0xF /* Target rejected reject msg. */ +#define CS_NOP_MSG 0x10 /* Target rejected NOP msg. */ +#define CS_PARITY_MSG 0x11 /* Target rejected parity msg. */ +#define CS_DEV_RESET_MSG 0x12 /* Target rejected dev rst msg. */ +#define CS_ID_MSG 0x13 /* Target rejected ID msg. */ +#define CS_FREE 0x14 /* Unexpected bus free. */ +#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */ +#define CS_TRANACTION_1 0x18 /* Transaction error 1 */ +#define CS_TRANACTION_2 0x19 /* Transaction error 2 */ +#define CS_TRANACTION_3 0x1a /* Transaction error 3 */ +#define CS_INV_ENTRY_TYPE 0x1b /* Invalid entry type */ +#define CS_DEV_QUEUE_FULL 0x1c /* Device queue full */ +#define CS_PHASED_SKIPPED 0x1d /* SCSI phase skipped */ +#define CS_ARS_FAILED 0x1e /* ARS failed */ +#define CS_LVD_BUS_ERROR 0x21 /* LVD bus error */ +#define CS_BAD_PAYLOAD 0x80 /* Driver defined */ +#define CS_UNKNOWN 0x81 /* Driver defined */ +#define CS_RETRY 0x82 /* Driver defined */ + +/* + * ISP target entries - Option flags bit definitions. + */ +#define OF_ENABLE_TAG BIT_1 /* Tagged queue action enable */ +#define OF_DATA_IN BIT_6 /* Data in to initiator */ + /* (data from target to initiator) */ +#define OF_DATA_OUT BIT_7 /* Data out from initiator */ + /* (data from initiator to target) */ +#define OF_NO_DATA (BIT_7 | BIT_6) +#define OF_DISC_DISABLED BIT_15 /* Disconnects disabled */ +#define OF_DISABLE_SDP BIT_24 /* Disable sending save data ptr */ +#define OF_SEND_RDP BIT_26 /* Send restore data pointers msg */ +#define OF_FORCE_DISC BIT_30 /* Disconnects mandatory */ +#define OF_SSTS BIT_31 /* Send SCSI status */ + + +/* + * BUS parameters/settings structure - UNUSED + */ +struct bus_param { + uint8_t id; /* Host adapter SCSI id */ + uint8_t bus_reset_delay; /* SCSI bus reset delay. */ + uint8_t failed_reset_count; /* number of time reset failed */ + uint8_t unused; + uint16_t device_enables; /* Device enable bits. */ + uint16_t lun_disables; /* LUN disable bits. */ + uint16_t qtag_enables; /* Tag queue enables. */ + uint16_t hiwat; /* High water mark per device. */ + uint8_t reset_marker:1; + uint8_t disable_scsi_reset:1; + uint8_t scsi_bus_dead:1; /* SCSI Bus is Dead, when 5 back to back resets failed */ +}; + + +struct qla_driver_setup { + uint32_t no_sync:1; + uint32_t no_wide:1; + uint32_t no_ppr:1; + uint32_t no_nvram:1; + uint16_t sync_mask; + uint16_t wide_mask; + uint16_t ppr_mask; +}; + + +/* + * Linux Host Adapter structure + */ +struct scsi_qla_host { + /* Linux adapter configuration data */ + struct Scsi_Host *host; /* pointer to host data */ + struct scsi_qla_host *next; + struct device_reg __iomem *iobase; /* Base Memory-mapped I/O address */ + + unsigned char __iomem *mmpbase; /* memory mapped address */ + unsigned long host_no; + struct pci_dev *pdev; + uint8_t devnum; + uint8_t revision; + uint8_t ports; + + unsigned long actthreads; + unsigned long isr_count; /* Interrupt count */ + unsigned long spurious_int; + + /* Outstandings ISP commands. */ + struct srb *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; + + /* BUS configuration data */ + struct bus_param bus_settings[MAX_BUSES]; + + /* Received ISP mailbox data. */ + volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; + + dma_addr_t request_dma; /* Physical Address */ + request_t *request_ring; /* Base virtual address */ + request_t *request_ring_ptr; /* Current address. */ + uint16_t req_ring_index; /* Current index. */ + uint16_t req_q_cnt; /* Number of available entries. */ + + dma_addr_t response_dma; /* Physical address. */ + struct response *response_ring; /* Base virtual address */ + struct response *response_ring_ptr; /* Current address. */ + uint16_t rsp_ring_index; /* Current index. */ + + struct list_head done_q; /* Done queue */ + + struct completion *mailbox_wait; + struct timer_list mailbox_timer; + + volatile struct { + uint32_t online:1; /* 0 */ + uint32_t reset_marker:1; /* 1 */ + uint32_t disable_host_adapter:1; /* 2 */ + uint32_t reset_active:1; /* 3 */ + uint32_t abort_isp_active:1; /* 4 */ + uint32_t disable_risc_code_load:1; /* 5 */ + } flags; + + struct nvram nvram; + int nvram_valid; + + /* Firmware Info */ + unsigned short fwstart; /* start address for F/W */ + unsigned char fwver1; /* F/W version first char */ + unsigned char fwver2; /* F/W version second char */ + unsigned char fwver3; /* F/W version third char */ +}; + +#endif /* _QLA1280_H */ diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig new file mode 100644 index 000000000..a584708d3 --- /dev/null +++ b/drivers/scsi/qla2xxx/Kconfig @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_QLA_FC + tristate "QLogic QLA2XXX Fibre Channel Support" + depends on PCI && HAS_IOPORT && SCSI + depends on SCSI_FC_ATTRS + depends on NVME_FC || !NVME_FC + select FW_LOADER + select BTREE + help + This qla2xxx driver supports all QLogic Fibre Channel + PCI and PCIe host adapters. + + By default, firmware for the ISP parts will be loaded + via the Firmware Loader interface. + + ISP Firmware Filename + ---------- ----------------- + 21xx ql2100_fw.bin + 22xx ql2200_fw.bin + 2300, 2312, 6312 ql2300_fw.bin + 2322, 6322 ql2322_fw.bin + 24xx, 54xx ql2400_fw.bin + 25xx ql2500_fw.bin + + Upon request, the driver caches the firmware image until + the driver is unloaded. + + Firmware images can be retrieved from: + + http://ldriver.qlogic.com/firmware/ + + They are also included in the linux-firmware tree as well. + +config TCM_QLA2XXX + tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs" + depends on SCSI_QLA_FC && TARGET_CORE + depends on LIBFC + select BTREE + default n + help + Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs + +if TCM_QLA2XXX +config TCM_QLA2XXX_DEBUG + bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs" + default n + help + Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs + This will include code to enable the SCSI command jammer +endif diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile new file mode 100644 index 000000000..cbc1303e7 --- /dev/null +++ b/drivers/scsi/qla2xxx/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ + qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ + qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \ + qla_edif.o + +obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o +obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c new file mode 100644 index 000000000..44449c70a --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -0,0 +1,3384 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_target.h" + +#include +#include +#include +#include + +static int qla24xx_vport_disable(struct fc_vport *, bool); + +/* SYSFS attributes --------------------------------------------------------- */ + +static ssize_t +qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + + if (!(ha->fw_dump_reading || ha->mctp_dump_reading || + ha->mpi_fw_dump_reading)) + return 0; + + mutex_lock(&ha->optrom_mutex); + if (IS_P3P_TYPE(ha)) { + if (off < ha->md_template_size) { + rval = memory_read_from_buffer(buf, count, + &off, ha->md_tmplt_hdr, ha->md_template_size); + } else { + off -= ha->md_template_size; + rval = memory_read_from_buffer(buf, count, + &off, ha->md_dump, ha->md_dump_size); + } + } else if (ha->mctp_dumped && ha->mctp_dump_reading) { + rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump, + MCTP_DUMP_SIZE); + } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) { + rval = memory_read_from_buffer(buf, count, &off, + ha->mpi_fw_dump, + ha->mpi_fw_dump_len); + } else if (ha->fw_dump_reading) { + rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump, + ha->fw_dump_len); + } else { + rval = 0; + } + mutex_unlock(&ha->optrom_mutex); + return rval; +} + +static ssize_t +qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int reading; + + if (off != 0) + return (0); + + reading = simple_strtol(buf, NULL, 10); + switch (reading) { + case 0: + if (!ha->fw_dump_reading) + break; + + ql_log(ql_log_info, vha, 0x705d, + "Firmware dump cleared on (%ld).\n", vha->host_no); + + if (IS_P3P_TYPE(ha)) { + qla82xx_md_free(vha); + qla82xx_md_prep(vha); + } + ha->fw_dump_reading = 0; + ha->fw_dumped = false; + break; + case 1: + if (ha->fw_dumped && !ha->fw_dump_reading) { + ha->fw_dump_reading = 1; + + ql_log(ql_log_info, vha, 0x705e, + "Raw firmware dump ready for read on (%ld).\n", + vha->host_no); + } + break; + case 2: + qla2x00_alloc_fw_dump(vha); + break; + case 3: + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); + } else { + qla2x00_system_error(vha); + } + break; + case 4: + if (IS_P3P_TYPE(ha)) { + if (ha->md_tmplt_hdr) + ql_dbg(ql_dbg_user, vha, 0x705b, + "MiniDump supported with this firmware.\n"); + else + ql_dbg(ql_dbg_user, vha, 0x709d, + "MiniDump not supported with this firmware.\n"); + } + break; + case 5: + if (IS_P3P_TYPE(ha)) + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + case 6: + if (!ha->mctp_dump_reading) + break; + ql_log(ql_log_info, vha, 0x70c1, + "MCTP dump cleared on (%ld).\n", vha->host_no); + ha->mctp_dump_reading = 0; + ha->mctp_dumped = 0; + break; + case 7: + if (ha->mctp_dumped && !ha->mctp_dump_reading) { + ha->mctp_dump_reading = 1; + ql_log(ql_log_info, vha, 0x70c2, + "Raw mctp dump ready for read on (%ld).\n", + vha->host_no); + } + break; + case 8: + if (!ha->mpi_fw_dump_reading) + break; + ql_log(ql_log_info, vha, 0x70e7, + "MPI firmware dump cleared on (%ld).\n", vha->host_no); + ha->mpi_fw_dump_reading = 0; + ha->mpi_fw_dumped = 0; + break; + case 9: + if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) { + ha->mpi_fw_dump_reading = 1; + ql_log(ql_log_info, vha, 0x70e8, + "Raw MPI firmware dump ready for read on (%ld).\n", + vha->host_no); + } + break; + case 10: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ql_log(ql_log_info, vha, 0x70e9, + "Issuing MPI firmware dump on host#%ld.\n", + vha->host_no); + ha->isp_ops->mpi_fw_dump(vha, 0); + } + break; + } + return count; +} + +static struct bin_attribute sysfs_fw_dump_attr = { + .attr = { + .name = "fw_dump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_fw_dump, + .write = qla2x00_sysfs_write_fw_dump, +}; + +static ssize_t +qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + uint32_t faddr; + struct active_regions active_regions = { }; + + if (!capable(CAP_SYS_ADMIN)) + return 0; + + mutex_lock(&ha->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&ha->optrom_mutex); + return -EAGAIN; + } + + if (!IS_NOCACHE_VPD_TYPE(ha)) { + mutex_unlock(&ha->optrom_mutex); + goto skip; + } + + faddr = ha->flt_region_nvram; + if (IS_QLA28XX(ha)) { + qla28xx_get_aux_images(vha, &active_regions); + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_nvram_sec; + } + ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); + + mutex_unlock(&ha->optrom_mutex); + +skip: + return memory_read_from_buffer(buf, count, &off, ha->nvram, + ha->nvram_size); +} + +static ssize_t +qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + uint16_t cnt; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || + !ha->isp_ops->write_nvram) + return -EINVAL; + + /* Checksum NVRAM. */ + if (IS_FWI2_CAPABLE(ha)) { + __le32 *iter = (__force __le32 *)buf; + uint32_t chksum; + + chksum = 0; + for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) + chksum += le32_to_cpu(*iter); + chksum = ~chksum + 1; + *iter = cpu_to_le32(chksum); + } else { + uint8_t *iter; + uint8_t chksum; + + iter = (uint8_t *)buf; + chksum = 0; + for (cnt = 0; cnt < count - 1; cnt++) + chksum += *iter++; + chksum = ~chksum + 1; + *iter = chksum; + } + + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x705f, + "HBA not online, failing NVRAM update.\n"); + return -EAGAIN; + } + + mutex_lock(&ha->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&ha->optrom_mutex); + return -EAGAIN; + } + + /* Write NVRAM. */ + ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count); + ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base, + count); + mutex_unlock(&ha->optrom_mutex); + + ql_dbg(ql_dbg_user, vha, 0x7060, + "Setting ISP_ABORT_NEEDED\n"); + /* NVRAM settings take effect immediately. */ + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + + return count; +} + +static struct bin_attribute sysfs_nvram_attr = { + .attr = { + .name = "nvram", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 512, + .read = qla2x00_sysfs_read_nvram, + .write = qla2x00_sysfs_write_nvram, +}; + +static ssize_t +qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + ssize_t rval = 0; + + mutex_lock(&ha->optrom_mutex); + + if (ha->optrom_state != QLA_SREADING) + goto out; + + rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, + ha->optrom_region_size); + +out: + mutex_unlock(&ha->optrom_mutex); + + return rval; +} + +static ssize_t +qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + + mutex_lock(&ha->optrom_mutex); + + if (ha->optrom_state != QLA_SWRITING) { + mutex_unlock(&ha->optrom_mutex); + return -EINVAL; + } + if (off > ha->optrom_region_size) { + mutex_unlock(&ha->optrom_mutex); + return -ERANGE; + } + if (off + count > ha->optrom_region_size) + count = ha->optrom_region_size - off; + + memcpy(&ha->optrom_buffer[off], buf, count); + mutex_unlock(&ha->optrom_mutex); + + return count; +} + +static struct bin_attribute sysfs_optrom_attr = { + .attr = { + .name = "optrom", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_optrom, + .write = qla2x00_sysfs_write_optrom, +}; + +static ssize_t +qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + uint32_t start = 0; + uint32_t size = ha->optrom_size; + int val, valid; + ssize_t rval = count; + + if (off) + return -EINVAL; + + if (unlikely(pci_channel_offline(ha->pdev))) + return -EAGAIN; + + if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) + return -EINVAL; + if (start > ha->optrom_size) + return -EINVAL; + if (size > ha->optrom_size - start) + size = ha->optrom_size - start; + + mutex_lock(&ha->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&ha->optrom_mutex); + return -EAGAIN; + } + switch (val) { + case 0: + if (ha->optrom_state != QLA_SREADING && + ha->optrom_state != QLA_SWRITING) { + rval = -EINVAL; + goto out; + } + ha->optrom_state = QLA_SWAITING; + + ql_dbg(ql_dbg_user, vha, 0x7061, + "Freeing flash region allocation -- 0x%x bytes.\n", + ha->optrom_region_size); + + vfree(ha->optrom_buffer); + ha->optrom_buffer = NULL; + break; + case 1: + if (ha->optrom_state != QLA_SWAITING) { + rval = -EINVAL; + goto out; + } + + ha->optrom_region_start = start; + ha->optrom_region_size = size; + + ha->optrom_state = QLA_SREADING; + ha->optrom_buffer = vzalloc(ha->optrom_region_size); + if (ha->optrom_buffer == NULL) { + ql_log(ql_log_warn, vha, 0x7062, + "Unable to allocate memory for optrom retrieval " + "(%x).\n", ha->optrom_region_size); + + ha->optrom_state = QLA_SWAITING; + rval = -ENOMEM; + goto out; + } + + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7063, + "HBA not online, failing NVRAM update.\n"); + rval = -EAGAIN; + goto out; + } + + ql_dbg(ql_dbg_user, vha, 0x7064, + "Reading flash region -- 0x%x/0x%x.\n", + ha->optrom_region_start, ha->optrom_region_size); + + ha->isp_ops->read_optrom(vha, ha->optrom_buffer, + ha->optrom_region_start, ha->optrom_region_size); + break; + case 2: + if (ha->optrom_state != QLA_SWAITING) { + rval = -EINVAL; + goto out; + } + + /* + * We need to be more restrictive on which FLASH regions are + * allowed to be updated via user-space. Regions accessible + * via this method include: + * + * ISP21xx/ISP22xx/ISP23xx type boards: + * + * 0x000000 -> 0x020000 -- Boot code. + * + * ISP2322/ISP24xx type boards: + * + * 0x000000 -> 0x07ffff -- Boot code. + * 0x080000 -> 0x0fffff -- Firmware. + * + * ISP25xx type boards: + * + * 0x000000 -> 0x07ffff -- Boot code. + * 0x080000 -> 0x0fffff -- Firmware. + * 0x120000 -> 0x12ffff -- VPD and HBA parameters. + * + * > ISP25xx type boards: + * + * None -- should go through BSG. + */ + valid = 0; + if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) + valid = 1; + else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) + valid = 1; + if (!valid) { + ql_log(ql_log_warn, vha, 0x7065, + "Invalid start region 0x%x/0x%x.\n", start, size); + rval = -EINVAL; + goto out; + } + + ha->optrom_region_start = start; + ha->optrom_region_size = size; + + ha->optrom_state = QLA_SWRITING; + ha->optrom_buffer = vzalloc(ha->optrom_region_size); + if (ha->optrom_buffer == NULL) { + ql_log(ql_log_warn, vha, 0x7066, + "Unable to allocate memory for optrom update " + "(%x)\n", ha->optrom_region_size); + + ha->optrom_state = QLA_SWAITING; + rval = -ENOMEM; + goto out; + } + + ql_dbg(ql_dbg_user, vha, 0x7067, + "Staging flash region write -- 0x%x/0x%x.\n", + ha->optrom_region_start, ha->optrom_region_size); + + break; + case 3: + if (ha->optrom_state != QLA_SWRITING) { + rval = -EINVAL; + goto out; + } + + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7068, + "HBA not online, failing flash update.\n"); + rval = -EAGAIN; + goto out; + } + + ql_dbg(ql_dbg_user, vha, 0x7069, + "Writing flash region -- 0x%x/0x%x.\n", + ha->optrom_region_start, ha->optrom_region_size); + + rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, + ha->optrom_region_start, ha->optrom_region_size); + if (rval) + rval = -EIO; + break; + default: + rval = -EINVAL; + } + +out: + mutex_unlock(&ha->optrom_mutex); + return rval; +} + +static struct bin_attribute sysfs_optrom_ctl_attr = { + .attr = { + .name = "optrom_ctl", + .mode = S_IWUSR, + }, + .size = 0, + .write = qla2x00_sysfs_write_optrom_ctl, +}; + +static ssize_t +qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + uint32_t faddr; + struct active_regions active_regions = { }; + + if (unlikely(pci_channel_offline(ha->pdev))) + return -EAGAIN; + + if (!capable(CAP_SYS_ADMIN)) + return -EINVAL; + + if (!IS_NOCACHE_VPD_TYPE(ha)) + goto skip; + + faddr = ha->flt_region_vpd << 2; + + if (IS_QLA28XX(ha)) { + qla28xx_get_aux_images(vha, &active_regions); + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_vpd_sec << 2; + + ql_dbg(ql_dbg_init, vha, 0x7070, + "Loading %s nvram image.\n", + active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? + "primary" : "secondary"); + } + + mutex_lock(&ha->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&ha->optrom_mutex); + return -EAGAIN; + } + + ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); + mutex_unlock(&ha->optrom_mutex); + + ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); +skip: + return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); +} + +static ssize_t +qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + uint8_t *tmp_data; + + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + + if (qla2x00_chip_is_down(vha)) + return 0; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || + !ha->isp_ops->write_nvram) + return 0; + + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x706a, + "HBA not online, failing VPD update.\n"); + return -EAGAIN; + } + + mutex_lock(&ha->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&ha->optrom_mutex); + return -EAGAIN; + } + + /* Write NVRAM. */ + ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count); + ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count); + + /* Update flash version information for 4Gb & above. */ + if (!IS_FWI2_CAPABLE(ha)) { + mutex_unlock(&ha->optrom_mutex); + return -EINVAL; + } + + tmp_data = vmalloc(256); + if (!tmp_data) { + mutex_unlock(&ha->optrom_mutex); + ql_log(ql_log_warn, vha, 0x706b, + "Unable to allocate memory for VPD information update.\n"); + return -ENOMEM; + } + ha->isp_ops->get_flash_version(vha, tmp_data); + vfree(tmp_data); + + mutex_unlock(&ha->optrom_mutex); + + return count; +} + +static struct bin_attribute sysfs_vpd_attr = { + .attr = { + .name = "vpd", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_vpd, + .write = qla2x00_sysfs_write_vpd, +}; + +static ssize_t +qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + int rval; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) + return 0; + + mutex_lock(&vha->hw->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&vha->hw->optrom_mutex); + return 0; + } + + rval = qla2x00_read_sfp_dev(vha, buf, count); + mutex_unlock(&vha->hw->optrom_mutex); + + if (rval) + return -EIO; + + return count; +} + +static struct bin_attribute sysfs_sfp_attr = { + .attr = { + .name = "sfp", + .mode = S_IRUSR | S_IWUSR, + }, + .size = SFP_DEV_SIZE, + .read = qla2x00_sysfs_read_sfp, +}; + +static ssize_t +qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + int type; + uint32_t idc_control; + uint8_t *tmp_data = NULL; + + if (off != 0) + return -EINVAL; + + type = simple_strtol(buf, NULL, 10); + switch (type) { + case 0x2025c: + ql_log(ql_log_info, vha, 0x706e, + "Issuing ISP reset.\n"); + + if (vha->hw->flags.port_isolated) { + ql_log(ql_log_info, vha, 0x706e, + "Port is isolated, returning.\n"); + return -EINVAL; + } + + scsi_block_requests(vha->host); + if (IS_QLA82XX(ha)) { + ha->flags.isp82xx_no_md_cap = 1; + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + idc_control = qla8044_rd_reg(ha, + QLA8044_IDC_DRV_CTRL); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_control | GRACEFUL_RESET_BIT1)); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); + } else { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + qla2x00_wait_for_chip_reset(vha); + scsi_unblock_requests(vha->host); + break; + case 0x2025d: + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + ql_log(ql_log_info, vha, 0x706f, + "Issuing MPI reset.\n"); + + if (IS_QLA83XX(ha)) { + uint32_t idc_control; + + qla83xx_idc_lock(vha, 0); + __qla83xx_get_idc_control(vha, &idc_control); + idc_control |= QLA83XX_IDC_GRACEFUL_RESET; + __qla83xx_set_idc_control(vha, idc_control); + qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); + qla83xx_idc_unlock(vha, 0); + break; + } else { + /* Make sure FC side is not in reset */ + WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != + QLA_SUCCESS); + + /* Issue MPI reset */ + scsi_block_requests(vha->host); + if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x7070, + "MPI reset failed.\n"); + scsi_unblock_requests(vha->host); + break; + } + break; + case 0x2025e: + if (!IS_P3P_TYPE(ha) || vha != base_vha) { + ql_log(ql_log_info, vha, 0x7071, + "FCoE ctx reset not supported.\n"); + return -EPERM; + } + + ql_log(ql_log_info, vha, 0x7072, + "Issuing FCoE ctx reset.\n"); + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_fcoe_ctx_reset(vha); + break; + case 0x2025f: + if (!IS_QLA8031(ha)) + return -EPERM; + ql_log(ql_log_info, vha, 0x70bc, + "Disabling Reset by IDC control\n"); + qla83xx_idc_lock(vha, 0); + __qla83xx_get_idc_control(vha, &idc_control); + idc_control |= QLA83XX_IDC_RESET_DISABLED; + __qla83xx_set_idc_control(vha, idc_control); + qla83xx_idc_unlock(vha, 0); + break; + case 0x20260: + if (!IS_QLA8031(ha)) + return -EPERM; + ql_log(ql_log_info, vha, 0x70bd, + "Enabling Reset by IDC control\n"); + qla83xx_idc_lock(vha, 0); + __qla83xx_get_idc_control(vha, &idc_control); + idc_control &= ~QLA83XX_IDC_RESET_DISABLED; + __qla83xx_set_idc_control(vha, idc_control); + qla83xx_idc_unlock(vha, 0); + break; + case 0x20261: + ql_dbg(ql_dbg_user, vha, 0x70e0, + "Updating cache versions without reset "); + + tmp_data = vmalloc(256); + if (!tmp_data) { + ql_log(ql_log_warn, vha, 0x70e1, + "Unable to allocate memory for VPD information update.\n"); + return -ENOMEM; + } + ha->isp_ops->get_flash_version(vha, tmp_data); + vfree(tmp_data); + break; + } + return count; +} + +static struct bin_attribute sysfs_reset_attr = { + .attr = { + .name = "reset", + .mode = S_IWUSR, + }, + .size = 0, + .write = qla2x00_sysfs_write_reset, +}; + +static ssize_t +qla2x00_issue_logo(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + int type; + port_id_t did; + + if (!capable(CAP_SYS_ADMIN)) + return 0; + + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return 0; + + if (qla2x00_chip_is_down(vha)) + return 0; + + type = simple_strtol(buf, NULL, 10); + + did.b.domain = (type & 0x00ff0000) >> 16; + did.b.area = (type & 0x0000ff00) >> 8; + did.b.al_pa = (type & 0x000000ff); + + ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n", + did.b.domain, did.b.area, did.b.al_pa); + + ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); + + qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); + return count; +} + +static struct bin_attribute sysfs_issue_logo_attr = { + .attr = { + .name = "issue_logo", + .mode = S_IWUSR, + }, + .size = 0, + .write = qla2x00_issue_logo, +}; + +static ssize_t +qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int rval; + uint16_t actual_size; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) + return 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + mutex_lock(&vha->hw->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&vha->hw->optrom_mutex); + return 0; + } + + if (ha->xgmac_data) + goto do_read; + + ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, + &ha->xgmac_data_dma, GFP_KERNEL); + if (!ha->xgmac_data) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_log(ql_log_warn, vha, 0x7076, + "Unable to allocate memory for XGMAC read-data.\n"); + return 0; + } + +do_read: + actual_size = 0; + memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); + + rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, + XGMAC_DATA_SIZE, &actual_size); + + mutex_unlock(&vha->hw->optrom_mutex); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7077, + "Unable to read XGMAC data (%x).\n", rval); + count = 0; + } + + count = actual_size > count ? count : actual_size; + memcpy(buf, ha->xgmac_data, count); + + return count; +} + +static struct bin_attribute sysfs_xgmac_stats_attr = { + .attr = { + .name = "xgmac_stats", + .mode = S_IRUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_xgmac_stats, +}; + +static ssize_t +qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int rval; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) + return 0; + + mutex_lock(&vha->hw->optrom_mutex); + if (ha->dcbx_tlv) + goto do_read; + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&vha->hw->optrom_mutex); + return 0; + } + + ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, + &ha->dcbx_tlv_dma, GFP_KERNEL); + if (!ha->dcbx_tlv) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_log(ql_log_warn, vha, 0x7078, + "Unable to allocate memory for DCBX TLV read-data.\n"); + return -ENOMEM; + } + +do_read: + memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); + + rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, + DCBX_TLV_DATA_SIZE); + + mutex_unlock(&vha->hw->optrom_mutex); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7079, + "Unable to read DCBX TLV (%x).\n", rval); + return -EIO; + } + + memcpy(buf, ha->dcbx_tlv, count); + + return count; +} + +static struct bin_attribute sysfs_dcbx_tlv_attr = { + .attr = { + .name = "dcbx_tlv", + .mode = S_IRUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_dcbx_tlv, +}; + +static struct sysfs_entry { + char *name; + struct bin_attribute *attr; + int type; +} bin_file_entries[] = { + { "fw_dump", &sysfs_fw_dump_attr, }, + { "nvram", &sysfs_nvram_attr, }, + { "optrom", &sysfs_optrom_attr, }, + { "optrom_ctl", &sysfs_optrom_ctl_attr, }, + { "vpd", &sysfs_vpd_attr, 1 }, + { "sfp", &sysfs_sfp_attr, 1 }, + { "reset", &sysfs_reset_attr, }, + { "issue_logo", &sysfs_issue_logo_attr, }, + { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, + { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, + { NULL }, +}; + +void +qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) +{ + struct Scsi_Host *host = vha->host; + struct sysfs_entry *iter; + int ret; + + for (iter = bin_file_entries; iter->name; iter++) { + if (iter->type && !IS_FWI2_CAPABLE(vha->hw)) + continue; + if (iter->type == 2 && !IS_QLA25XX(vha->hw)) + continue; + if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw))) + continue; + + ret = sysfs_create_bin_file(&host->shost_gendev.kobj, + iter->attr); + if (ret) + ql_log(ql_log_warn, vha, 0x00f3, + "Unable to create sysfs %s binary attribute (%d).\n", + iter->name, ret); + else + ql_dbg(ql_dbg_init, vha, 0x00f4, + "Successfully created sysfs %s binary attribute.\n", + iter->name); + } +} + +void +qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) +{ + struct Scsi_Host *host = vha->host; + struct sysfs_entry *iter; + struct qla_hw_data *ha = vha->hw; + + for (iter = bin_file_entries; iter->name; iter++) { + if (iter->type && !IS_FWI2_CAPABLE(ha)) + continue; + if (iter->type == 2 && !IS_QLA25XX(ha)) + continue; + if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) + continue; + + sysfs_remove_bin_file(&host->shost_gendev.kobj, + iter->attr); + } + + if (stop_beacon && ha->beacon_blink_led == 1) + ha->isp_ops->beacon_off(vha); +} + +/* Scsi_Host attributes. */ + +static ssize_t +qla2x00_driver_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); +} + +static ssize_t +qla2x00_fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + char fw_str[128]; + + return scnprintf(buf, PAGE_SIZE, "%s\n", + ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str))); +} + +static ssize_t +qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + uint32_t sn; + + if (IS_QLAFX00(vha->hw)) { + return scnprintf(buf, PAGE_SIZE, "%s\n", + vha->hw->mr.serial_num); + } else if (IS_FWI2_CAPABLE(ha)) { + qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1); + return strlen(strcat(buf, "\n")); + } + + sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; + return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, + sn % 100000); +} + +static ssize_t +qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); +} + +static ssize_t +qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (IS_QLAFX00(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "%s\n", + vha->hw->mr.hw_version); + + return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", + ha->product_id[0], ha->product_id[1], ha->product_id[2], + ha->product_id[3]); +} + +static ssize_t +qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); +} + +static ssize_t +qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc); +} + +static ssize_t +qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + char pci_info[30]; + + return scnprintf(buf, PAGE_SIZE, "%s\n", + vha->hw->isp_ops->pci_info_str(vha, pci_info, + sizeof(pci_info))); +} + +static ssize_t +qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + int len = 0; + + if (atomic_read(&vha->loop_state) == LOOP_DOWN || + atomic_read(&vha->loop_state) == LOOP_DEAD || + vha->device_flags & DFLG_NO_CABLE) + len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); + else if (atomic_read(&vha->loop_state) != LOOP_READY || + qla2x00_chip_is_down(vha)) + len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); + else { + len = scnprintf(buf, PAGE_SIZE, "Link Up - "); + + switch (ha->current_topology) { + case ISP_CFG_NL: + len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); + break; + case ISP_CFG_FL: + len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); + break; + case ISP_CFG_N: + len += scnprintf(buf + len, PAGE_SIZE-len, + "N_Port to N_Port\n"); + break; + case ISP_CFG_F: + len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); + break; + default: + len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); + break; + } + } + return len; +} + +static ssize_t +qla2x00_zio_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int len = 0; + + switch (vha->hw->zio_mode) { + case QLA_ZIO_MODE_6: + len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); + break; + case QLA_ZIO_DISABLED: + len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); + break; + } + return len; +} + +static ssize_t +qla2x00_zio_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + int val = 0; + uint16_t zio_mode; + + if (!IS_ZIO_SUPPORTED(ha)) + return -ENOTSUPP; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + if (val) + zio_mode = QLA_ZIO_MODE_6; + else + zio_mode = QLA_ZIO_DISABLED; + + /* Update per-hba values and queue a reset. */ + if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { + ha->zio_mode = zio_mode; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + return strlen(buf); +} + +static ssize_t +qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); +} + +static ssize_t +qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int val = 0; + uint16_t zio_timer; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + if (val > 25500 || val < 100) + return -ERANGE; + + zio_timer = (uint16_t)(val / 100); + vha->hw->zio_timer = zio_timer; + + return strlen(buf); +} + +static ssize_t +qla_zio_threshold_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%d exchanges\n", + vha->hw->last_zio_threshold); +} + +static ssize_t +qla_zio_threshold_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int val = 0; + + if (vha->hw->zio_mode != QLA_ZIO_MODE_6) + return -EINVAL; + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + if (val < 0 || val > 256) + return -ERANGE; + + atomic_set(&vha->hw->zio_threshold, val); + return strlen(buf); +} + +static ssize_t +qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int len = 0; + + if (vha->hw->beacon_blink_led) + len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); + else + len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); + return len; +} + +static ssize_t +qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + int val = 0; + int rval; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return -EPERM; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + mutex_lock(&vha->hw->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_log(ql_log_warn, vha, 0x707a, + "Abort ISP active -- ignoring beacon request.\n"); + return -EBUSY; + } + + if (val) + rval = ha->isp_ops->beacon_on(vha); + else + rval = ha->isp_ops->beacon_off(vha); + + if (rval != QLA_SUCCESS) + count = 0; + + mutex_unlock(&vha->hw->optrom_mutex); + + return count; +} + +static ssize_t +qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + uint16_t led[3] = { 0 }; + + if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + if (ql26xx_led_config(vha, 0, led)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n", + led[0], led[1], led[2]); +} + +static ssize_t +qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + uint16_t options = BIT_0; + uint16_t led[3] = { 0 }; + uint16_t word[4]; + int n; + + if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3); + if (n == 4) { + if (word[0] == 3) { + options |= BIT_3|BIT_2|BIT_1; + led[0] = word[1]; + led[1] = word[2]; + led[2] = word[3]; + goto write; + } + return -EINVAL; + } + + if (n == 2) { + /* check led index */ + if (word[0] == 0) { + options |= BIT_2; + led[0] = word[1]; + goto write; + } + if (word[0] == 1) { + options |= BIT_3; + led[1] = word[1]; + goto write; + } + if (word[0] == 2) { + options |= BIT_1; + led[2] = word[1]; + goto write; + } + return -EINVAL; + } + + return -EINVAL; + +write: + if (ql26xx_led_config(vha, options, led)) + return -EFAULT; + + return count; +} + +static ssize_t +qla2x00_optrom_bios_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], + ha->bios_revision[0]); +} + +static ssize_t +qla2x00_optrom_efi_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], + ha->efi_revision[0]); +} + +static ssize_t +qla2x00_optrom_fcode_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], + ha->fcode_revision[0]); +} + +static ssize_t +qla2x00_optrom_fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", + ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], + ha->fw_revision[3]); +} + +static ssize_t +qla2x00_optrom_gold_fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", + ha->gold_fw_version[0], ha->gold_fw_version[1], + ha->gold_fw_version[2], ha->gold_fw_version[3]); +} + +static ssize_t +qla2x00_total_isp_aborts_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + vha->qla_stats.total_isp_aborts); +} + +static ssize_t +qla24xx_84xx_fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int rval = QLA_SUCCESS; + uint16_t status[2] = { 0 }; + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA84XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + if (!ha->cs84xx->op_fw_version) { + rval = qla84xx_verify_chip(vha, status); + + if (!rval && !status[0]) + return scnprintf(buf, PAGE_SIZE, "%u\n", + (uint32_t)ha->cs84xx->op_fw_version); + } + + return scnprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t +qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", + ha->serdes_version[0], ha->serdes_version[1], + ha->serdes_version[2]); +} + +static ssize_t +qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", + ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], + ha->mpi_capabilities); +} + +static ssize_t +qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", + ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); +} + +static ssize_t +qla2x00_flash_block_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); +} + +static ssize_t +qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_CNA_CAPABLE(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); +} + +static ssize_t +qla2x00_vn_port_mac_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_CNA_CAPABLE(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); +} + +static ssize_t +qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); +} + +static ssize_t +qla2x00_thermal_temp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + uint16_t temp = 0; + int rc; + + mutex_lock(&vha->hw->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); + goto done; + } + + if (vha->hw->flags.eeh_busy) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); + goto done; + } + + rc = qla2x00_get_thermal_temp(vha, &temp); + mutex_unlock(&vha->hw->optrom_mutex); + if (rc == QLA_SUCCESS) + return scnprintf(buf, PAGE_SIZE, "%d\n", temp); + +done: + return scnprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t +qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval = QLA_FUNCTION_FAILED; + uint16_t state[6]; + uint32_t pstate; + + if (IS_QLAFX00(vha->hw)) { + pstate = qlafx00_fw_state_show(dev, attr, buf); + return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); + } + + mutex_lock(&vha->hw->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_log(ql_log_warn, vha, 0x707c, + "ISP reset active.\n"); + goto out; + } else if (vha->hw->flags.eeh_busy) { + mutex_unlock(&vha->hw->optrom_mutex); + goto out; + } + + rval = qla2x00_get_firmware_state(vha, state); + mutex_unlock(&vha->hw->optrom_mutex); +out: + if (rval != QLA_SUCCESS) { + memset(state, -1, sizeof(state)); + rval = qla2x00_get_firmware_state(vha, state); + } + + return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + state[0], state[1], state[2], state[3], state[4], state[5]); +} + +static ssize_t +qla2x00_diag_requests_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_BIDI_CAPABLE(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); +} + +static ssize_t +qla2x00_diag_megabytes_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_BIDI_CAPABLE(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", + vha->bidi_stats.transfer_bytes >> 20); +} + +static ssize_t +qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + uint32_t size; + + if (!ha->fw_dumped) + size = 0; + else if (IS_P3P_TYPE(ha)) + size = ha->md_template_size + ha->md_dump_size; + else + size = ha->fw_dump_len; + + return scnprintf(buf, PAGE_SIZE, "%d\n", size); +} + +static ssize_t +qla2x00_allow_cna_fw_dump_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_P3P_TYPE(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "\n"); + else + return scnprintf(buf, PAGE_SIZE, "%s\n", + vha->hw->allow_cna_fw_dump ? "true" : "false"); +} + +static ssize_t +qla2x00_allow_cna_fw_dump_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int val = 0; + + if (!IS_P3P_TYPE(vha->hw)) + return -EINVAL; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + vha->hw->allow_cna_fw_dump = val != 0; + + return strlen(buf); +} + +static ssize_t +qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", + ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]); +} + +static ssize_t +qla2x00_min_supported_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + ha->min_supported_speed == 6 ? "64Gps" : + ha->min_supported_speed == 5 ? "32Gps" : + ha->min_supported_speed == 4 ? "16Gps" : + ha->min_supported_speed == 3 ? "8Gps" : + ha->min_supported_speed == 2 ? "4Gps" : + ha->min_supported_speed != 0 ? "unknown" : ""); +} + +static ssize_t +qla2x00_max_supported_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + ha->max_supported_speed == 2 ? "64Gps" : + ha->max_supported_speed == 1 ? "32Gps" : + ha->max_supported_speed == 0 ? "16Gps" : "unknown"); +} + +static ssize_t +qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); + ulong type, speed; + int oldspeed, rval; + int mode = QLA_SET_DATA_RATE_LR; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { + ql_log(ql_log_warn, vha, 0x70d8, + "Speed setting not supported \n"); + return -EINVAL; + } + + rval = kstrtol(buf, 10, &type); + if (rval) + return rval; + speed = type; + if (type == 40 || type == 80 || type == 160 || + type == 320) { + ql_dbg(ql_dbg_user, vha, 0x70d9, + "Setting will be affected after a loss of sync\n"); + type = type/10; + mode = QLA_SET_DATA_RATE_NOLR; + } + + oldspeed = ha->set_data_rate; + + switch (type) { + case 0: + ha->set_data_rate = PORT_SPEED_AUTO; + break; + case 4: + ha->set_data_rate = PORT_SPEED_4GB; + break; + case 8: + ha->set_data_rate = PORT_SPEED_8GB; + break; + case 16: + ha->set_data_rate = PORT_SPEED_16GB; + break; + case 32: + ha->set_data_rate = PORT_SPEED_32GB; + break; + default: + ql_log(ql_log_warn, vha, 0x1199, + "Unrecognized speed setting:%lx. Setting Autoneg\n", + speed); + ha->set_data_rate = PORT_SPEED_AUTO; + } + + if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate)) + return -EINVAL; + + ql_log(ql_log_info, vha, 0x70da, + "Setting speed to %lx Gbps \n", type); + + rval = qla2x00_set_data_rate(vha, mode); + if (rval != QLA_SUCCESS) + return -EIO; + + return strlen(buf); +} + +static const struct { + u16 rate; + char *str; +} port_speed_str[] = { + { PORT_SPEED_4GB, "4" }, + { PORT_SPEED_8GB, "8" }, + { PORT_SPEED_16GB, "16" }, + { PORT_SPEED_32GB, "32" }, + { PORT_SPEED_64GB, "64" }, + { PORT_SPEED_10GB, "10" }, +}; + +static ssize_t +qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + ssize_t rval; + u16 i; + char *speed = "Unknown"; + + rval = qla2x00_get_data_rate(vha); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x70db, + "Unable to get port speed rval:%zd\n", rval); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) { + if (port_speed_str[i].rate != ha->link_data_rate) + continue; + speed = port_speed_str[i].str; + break; + } + + return scnprintf(buf, PAGE_SIZE, "%s\n", speed); +} + +static ssize_t +qla2x00_mpi_pause_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval = 0; + + if (sscanf(buf, "%d", &rval) != 1) + return -EINVAL; + + ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n"); + + rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n"); + count = 0; + } + + return count; +} + +static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store); + +/* ----- */ + +static ssize_t +qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int len = 0; + + len += scnprintf(buf + len, PAGE_SIZE-len, + "Supported options: enabled | disabled | dual | exclusive\n"); + + /* --- */ + len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: "); + + switch (vha->qlini_mode) { + case QLA2XXX_INI_MODE_EXCLUSIVE: + len += scnprintf(buf + len, PAGE_SIZE-len, + QLA2XXX_INI_MODE_STR_EXCLUSIVE); + break; + case QLA2XXX_INI_MODE_DISABLED: + len += scnprintf(buf + len, PAGE_SIZE-len, + QLA2XXX_INI_MODE_STR_DISABLED); + break; + case QLA2XXX_INI_MODE_ENABLED: + len += scnprintf(buf + len, PAGE_SIZE-len, + QLA2XXX_INI_MODE_STR_ENABLED); + break; + case QLA2XXX_INI_MODE_DUAL: + len += scnprintf(buf + len, PAGE_SIZE-len, + QLA2XXX_INI_MODE_STR_DUAL); + break; + } + len += scnprintf(buf + len, PAGE_SIZE-len, "\n"); + + return len; +} + +static char *mode_to_str[] = { + "exclusive", + "disabled", + "enabled", + "dual", +}; + +#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT) +static void qla_set_ini_mode(scsi_qla_host_t *vha, int op) +{ + enum { + NO_ACTION, + MODE_CHANGE_ACCEPT, + MODE_CHANGE_NO_ACTION, + TARGET_STILL_ACTIVE, + }; + int action = NO_ACTION; + int set_mode = 0; + u8 eo_toggle = 0; /* exchange offload flipped */ + + switch (vha->qlini_mode) { + case QLA2XXX_INI_MODE_DISABLED: + switch (op) { + case QLA2XXX_INI_MODE_DISABLED: + if (qla_tgt_mode_enabled(vha)) { + if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != + vha->hw->flags.exchoffld_enabled) + eo_toggle = 1; + if (((vha->ql2xexchoffld != + vha->u_ql2xexchoffld) && + NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || + eo_toggle) { + /* + * The number of exchange to be offload + * was tweaked or offload option was + * flipped + */ + action = MODE_CHANGE_ACCEPT; + } else { + action = MODE_CHANGE_NO_ACTION; + } + } else { + action = MODE_CHANGE_NO_ACTION; + } + break; + case QLA2XXX_INI_MODE_EXCLUSIVE: + if (qla_tgt_mode_enabled(vha)) { + if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != + vha->hw->flags.exchoffld_enabled) + eo_toggle = 1; + if (((vha->ql2xexchoffld != + vha->u_ql2xexchoffld) && + NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || + eo_toggle) { + /* + * The number of exchange to be offload + * was tweaked or offload option was + * flipped + */ + action = MODE_CHANGE_ACCEPT; + } else { + action = MODE_CHANGE_NO_ACTION; + } + } else { + action = MODE_CHANGE_ACCEPT; + } + break; + case QLA2XXX_INI_MODE_DUAL: + action = MODE_CHANGE_ACCEPT; + /* active_mode is target only, reset it to dual */ + if (qla_tgt_mode_enabled(vha)) { + set_mode = 1; + action = MODE_CHANGE_ACCEPT; + } else { + action = MODE_CHANGE_NO_ACTION; + } + break; + + case QLA2XXX_INI_MODE_ENABLED: + if (qla_tgt_mode_enabled(vha)) + action = TARGET_STILL_ACTIVE; + else { + action = MODE_CHANGE_ACCEPT; + set_mode = 1; + } + break; + } + break; + + case QLA2XXX_INI_MODE_EXCLUSIVE: + switch (op) { + case QLA2XXX_INI_MODE_EXCLUSIVE: + if (qla_tgt_mode_enabled(vha)) { + if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != + vha->hw->flags.exchoffld_enabled) + eo_toggle = 1; + if (((vha->ql2xexchoffld != + vha->u_ql2xexchoffld) && + NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || + eo_toggle) + /* + * The number of exchange to be offload + * was tweaked or offload option was + * flipped + */ + action = MODE_CHANGE_ACCEPT; + else + action = NO_ACTION; + } else + action = NO_ACTION; + + break; + + case QLA2XXX_INI_MODE_DISABLED: + if (qla_tgt_mode_enabled(vha)) { + if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != + vha->hw->flags.exchoffld_enabled) + eo_toggle = 1; + if (((vha->ql2xexchoffld != + vha->u_ql2xexchoffld) && + NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || + eo_toggle) + action = MODE_CHANGE_ACCEPT; + else + action = MODE_CHANGE_NO_ACTION; + } else + action = MODE_CHANGE_NO_ACTION; + break; + + case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */ + if (qla_tgt_mode_enabled(vha)) { + action = MODE_CHANGE_ACCEPT; + set_mode = 1; + } else + action = MODE_CHANGE_ACCEPT; + break; + + case QLA2XXX_INI_MODE_ENABLED: + if (qla_tgt_mode_enabled(vha)) + action = TARGET_STILL_ACTIVE; + else { + if (vha->hw->flags.fw_started) + action = MODE_CHANGE_NO_ACTION; + else + action = MODE_CHANGE_ACCEPT; + } + break; + } + break; + + case QLA2XXX_INI_MODE_ENABLED: + switch (op) { + case QLA2XXX_INI_MODE_ENABLED: + if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) != + vha->hw->flags.exchoffld_enabled) + eo_toggle = 1; + if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) && + NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) || + eo_toggle) + action = MODE_CHANGE_ACCEPT; + else + action = NO_ACTION; + break; + case QLA2XXX_INI_MODE_DUAL: + case QLA2XXX_INI_MODE_DISABLED: + action = MODE_CHANGE_ACCEPT; + break; + default: + action = MODE_CHANGE_NO_ACTION; + break; + } + break; + + case QLA2XXX_INI_MODE_DUAL: + switch (op) { + case QLA2XXX_INI_MODE_DUAL: + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { + if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + + vha->u_ql2xiniexchg) != + vha->hw->flags.exchoffld_enabled) + eo_toggle = 1; + + if ((((vha->ql2xexchoffld + + vha->ql2xiniexchg) != + (vha->u_ql2xiniexchg + + vha->u_ql2xexchoffld)) && + NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + + vha->u_ql2xexchoffld)) || eo_toggle) + action = MODE_CHANGE_ACCEPT; + else + action = NO_ACTION; + } else { + if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + + vha->u_ql2xiniexchg) != + vha->hw->flags.exchoffld_enabled) + eo_toggle = 1; + + if ((((vha->ql2xexchoffld + vha->ql2xiniexchg) + != (vha->u_ql2xiniexchg + + vha->u_ql2xexchoffld)) && + NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + + vha->u_ql2xexchoffld)) || eo_toggle) + action = MODE_CHANGE_NO_ACTION; + else + action = NO_ACTION; + } + break; + + case QLA2XXX_INI_MODE_DISABLED: + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { + /* turning off initiator mode */ + set_mode = 1; + action = MODE_CHANGE_ACCEPT; + } else { + action = MODE_CHANGE_NO_ACTION; + } + break; + + case QLA2XXX_INI_MODE_EXCLUSIVE: + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { + set_mode = 1; + action = MODE_CHANGE_ACCEPT; + } else { + action = MODE_CHANGE_ACCEPT; + } + break; + + case QLA2XXX_INI_MODE_ENABLED: + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { + action = TARGET_STILL_ACTIVE; + } else { + action = MODE_CHANGE_ACCEPT; + } + } + break; + } + + switch (action) { + case MODE_CHANGE_ACCEPT: + ql_log(ql_log_warn, vha, 0xffff, + "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", + mode_to_str[vha->qlini_mode], mode_to_str[op], + vha->ql2xexchoffld, vha->u_ql2xexchoffld, + vha->ql2xiniexchg, vha->u_ql2xiniexchg); + + vha->qlini_mode = op; + vha->ql2xexchoffld = vha->u_ql2xexchoffld; + vha->ql2xiniexchg = vha->u_ql2xiniexchg; + if (set_mode) + qlt_set_mode(vha); + vha->flags.online = 1; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case MODE_CHANGE_NO_ACTION: + ql_log(ql_log_warn, vha, 0xffff, + "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", + mode_to_str[vha->qlini_mode], mode_to_str[op], + vha->ql2xexchoffld, vha->u_ql2xexchoffld, + vha->ql2xiniexchg, vha->u_ql2xiniexchg); + vha->qlini_mode = op; + vha->ql2xexchoffld = vha->u_ql2xexchoffld; + vha->ql2xiniexchg = vha->u_ql2xiniexchg; + break; + + case TARGET_STILL_ACTIVE: + ql_log(ql_log_warn, vha, 0xffff, + "Target Mode is active. Unable to change Mode.\n"); + break; + + case NO_ACTION: + default: + ql_log(ql_log_warn, vha, 0xffff, + "Mode unchange. No action taken. %d|%d pct %d|%d.\n", + vha->qlini_mode, op, + vha->ql2xexchoffld, vha->u_ql2xexchoffld); + break; + } +} + +static ssize_t +qlini_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int ini; + + if (!buf) + return -EINVAL; + + if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf, + strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0) + ini = QLA2XXX_INI_MODE_EXCLUSIVE; + else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf, + strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0) + ini = QLA2XXX_INI_MODE_DISABLED; + else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf, + strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0) + ini = QLA2XXX_INI_MODE_ENABLED; + else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf, + strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0) + ini = QLA2XXX_INI_MODE_DUAL; + else + return -EINVAL; + + qla_set_ini_mode(vha, ini); + return strlen(buf); +} + +static ssize_t +ql2xexchoffld_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int len = 0; + + len += scnprintf(buf + len, PAGE_SIZE-len, + "target exchange: new %d : current: %d\n\n", + vha->u_ql2xexchoffld, vha->ql2xexchoffld); + + len += scnprintf(buf + len, PAGE_SIZE-len, + "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", + vha->host_no); + + return len; +} + +static ssize_t +ql2xexchoffld_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + if (val > FW_MAX_EXCHANGES_CNT) + val = FW_MAX_EXCHANGES_CNT; + else if (val < 0) + val = 0; + + vha->u_ql2xexchoffld = val; + return strlen(buf); +} + +static ssize_t +ql2xiniexchg_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int len = 0; + + len += scnprintf(buf + len, PAGE_SIZE-len, + "target exchange: new %d : current: %d\n\n", + vha->u_ql2xiniexchg, vha->ql2xiniexchg); + + len += scnprintf(buf + len, PAGE_SIZE-len, + "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", + vha->host_no); + + return len; +} + +static ssize_t +ql2xiniexchg_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + if (val > FW_MAX_EXCHANGES_CNT) + val = FW_MAX_EXCHANGES_CNT; + else if (val < 0) + val = 0; + + vha->u_ql2xiniexchg = val; + return strlen(buf); +} + +static ssize_t +qla2x00_dif_bundle_statistics_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + return scnprintf(buf, PAGE_SIZE, + "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n", + ha->dif_bundle_crossed_pages, ha->dif_bundle_reads, + ha->dif_bundle_writes, ha->dif_bundle_kallocs, + ha->dif_bundle_dma_allocs, ha->pool.unusable.count); +} + +static ssize_t +qla2x00_fw_attr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%llx\n", + (uint64_t)ha->fw_attributes_ext[1] << 48 | + (uint64_t)ha->fw_attributes_ext[0] << 32 | + (uint64_t)ha->fw_attributes_h << 16 | + (uint64_t)ha->fw_attributes); +} + +static ssize_t +qla2x00_port_no_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no); +} + +static ssize_t +qla2x00_dport_diagnostics_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && + !IS_QLA28XX(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + if (!*vha->dport_data) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", + vha->dport_data[0], vha->dport_data[1], + vha->dport_data[2], vha->dport_data[3]); +} +static DEVICE_ATTR(dport_diagnostics, 0444, + qla2x00_dport_diagnostics_show, NULL); + +static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); +static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); +static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); +static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); +static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); +static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); +static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); +static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); +static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); +static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); +static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, + qla2x00_zio_timer_store); +static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, + qla2x00_beacon_store); +static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show, + qla2x00_beacon_config_store); +static DEVICE_ATTR(optrom_bios_version, S_IRUGO, + qla2x00_optrom_bios_version_show, NULL); +static DEVICE_ATTR(optrom_efi_version, S_IRUGO, + qla2x00_optrom_efi_version_show, NULL); +static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, + qla2x00_optrom_fcode_version_show, NULL); +static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, + NULL); +static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO, + qla2x00_optrom_gold_fw_version_show, NULL); +static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, + NULL); +static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, + NULL); +static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL); +static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); +static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); +static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, + NULL); +static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); +static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, + qla2x00_vn_port_mac_address_show, NULL); +static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); +static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); +static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); +static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); +static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); +static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); +static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, + qla2x00_allow_cna_fw_dump_show, + qla2x00_allow_cna_fw_dump_store); +static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); +static DEVICE_ATTR(min_supported_speed, 0444, + qla2x00_min_supported_speed_show, NULL); +static DEVICE_ATTR(max_supported_speed, 0444, + qla2x00_max_supported_speed_show, NULL); +static DEVICE_ATTR(zio_threshold, 0644, + qla_zio_threshold_show, + qla_zio_threshold_store); +static DEVICE_ATTR_RW(qlini_mode); +static DEVICE_ATTR_RW(ql2xexchoffld); +static DEVICE_ATTR_RW(ql2xiniexchg); +static DEVICE_ATTR(dif_bundle_statistics, 0444, + qla2x00_dif_bundle_statistics_show, NULL); +static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, + qla2x00_port_speed_store); +static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); +static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); + +static struct attribute *qla2x00_host_attrs[] = { + &dev_attr_driver_version.attr, + &dev_attr_fw_version.attr, + &dev_attr_serial_num.attr, + &dev_attr_isp_name.attr, + &dev_attr_isp_id.attr, + &dev_attr_model_name.attr, + &dev_attr_model_desc.attr, + &dev_attr_pci_info.attr, + &dev_attr_link_state.attr, + &dev_attr_zio.attr, + &dev_attr_zio_timer.attr, + &dev_attr_beacon.attr, + &dev_attr_beacon_config.attr, + &dev_attr_optrom_bios_version.attr, + &dev_attr_optrom_efi_version.attr, + &dev_attr_optrom_fcode_version.attr, + &dev_attr_optrom_fw_version.attr, + &dev_attr_84xx_fw_version.attr, + &dev_attr_total_isp_aborts.attr, + &dev_attr_serdes_version.attr, + &dev_attr_mpi_version.attr, + &dev_attr_phy_version.attr, + &dev_attr_flash_block_size.attr, + &dev_attr_vlan_id.attr, + &dev_attr_vn_port_mac_address.attr, + &dev_attr_fabric_param.attr, + &dev_attr_fw_state.attr, + &dev_attr_optrom_gold_fw_version.attr, + &dev_attr_thermal_temp.attr, + &dev_attr_diag_requests.attr, + &dev_attr_diag_megabytes.attr, + &dev_attr_fw_dump_size.attr, + &dev_attr_allow_cna_fw_dump.attr, + &dev_attr_pep_version.attr, + &dev_attr_min_supported_speed.attr, + &dev_attr_max_supported_speed.attr, + &dev_attr_zio_threshold.attr, + &dev_attr_dif_bundle_statistics.attr, + &dev_attr_port_speed.attr, + &dev_attr_port_no.attr, + &dev_attr_fw_attr.attr, + &dev_attr_dport_diagnostics.attr, + &dev_attr_mpi_pause.attr, + &dev_attr_qlini_mode.attr, + &dev_attr_ql2xiniexchg.attr, + &dev_attr_ql2xexchoffld.attr, + NULL, +}; + +static umode_t qla_host_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL && + (attr == &dev_attr_qlini_mode.attr || + attr == &dev_attr_ql2xiniexchg.attr || + attr == &dev_attr_ql2xexchoffld.attr)) + return 0; + return attr->mode; +} + +static const struct attribute_group qla2x00_host_attr_group = { + .is_visible = qla_host_attr_is_visible, + .attrs = qla2x00_host_attrs +}; + +const struct attribute_group *qla2x00_host_groups[] = { + &qla2x00_host_attr_group, + NULL +}; + +/* Host attributes. */ + +static void +qla2x00_get_host_port_id(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + fc_host_port_id(shost) = vha->d_id.b.domain << 16 | + vha->d_id.b.area << 8 | vha->d_id.b.al_pa; +} + +static void +qla2x00_get_host_speed(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + u32 speed; + + if (IS_QLAFX00(vha->hw)) { + qlafx00_get_host_speed(shost); + return; + } + + switch (vha->hw->link_data_rate) { + case PORT_SPEED_1GB: + speed = FC_PORTSPEED_1GBIT; + break; + case PORT_SPEED_2GB: + speed = FC_PORTSPEED_2GBIT; + break; + case PORT_SPEED_4GB: + speed = FC_PORTSPEED_4GBIT; + break; + case PORT_SPEED_8GB: + speed = FC_PORTSPEED_8GBIT; + break; + case PORT_SPEED_10GB: + speed = FC_PORTSPEED_10GBIT; + break; + case PORT_SPEED_16GB: + speed = FC_PORTSPEED_16GBIT; + break; + case PORT_SPEED_32GB: + speed = FC_PORTSPEED_32GBIT; + break; + case PORT_SPEED_64GB: + speed = FC_PORTSPEED_64GBIT; + break; + default: + speed = FC_PORTSPEED_UNKNOWN; + break; + } + + fc_host_speed(shost) = speed; +} + +static void +qla2x00_get_host_port_type(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + uint32_t port_type; + + if (vha->vp_idx) { + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + return; + } + switch (vha->hw->current_topology) { + case ISP_CFG_NL: + port_type = FC_PORTTYPE_LPORT; + break; + case ISP_CFG_FL: + port_type = FC_PORTTYPE_NLPORT; + break; + case ISP_CFG_N: + port_type = FC_PORTTYPE_PTP; + break; + case ISP_CFG_F: + port_type = FC_PORTTYPE_NPORT; + break; + default: + port_type = FC_PORTTYPE_UNKNOWN; + break; + } + + fc_host_port_type(shost) = port_type; +} + +static void +qla2x00_get_starget_node_name(struct scsi_target *starget) +{ + struct Scsi_Host *host = dev_to_shost(starget->dev.parent); + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport; + u64 node_name = 0; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->rport && + starget->id == fcport->rport->scsi_target_id) { + node_name = wwn_to_u64(fcport->node_name); + break; + } + } + + fc_starget_node_name(starget) = node_name; +} + +static void +qla2x00_get_starget_port_name(struct scsi_target *starget) +{ + struct Scsi_Host *host = dev_to_shost(starget->dev.parent); + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport; + u64 port_name = 0; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->rport && + starget->id == fcport->rport->scsi_target_id) { + port_name = wwn_to_u64(fcport->port_name); + break; + } + } + + fc_starget_port_name(starget) = port_name; +} + +static void +qla2x00_get_starget_port_id(struct scsi_target *starget) +{ + struct Scsi_Host *host = dev_to_shost(starget->dev.parent); + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport; + uint32_t port_id = ~0U; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->rport && + starget->id == fcport->rport->scsi_target_id) { + port_id = fcport->d_id.b.domain << 16 | + fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; + break; + } + } + + fc_starget_port_id(starget) = port_id; +} + +static inline void +qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) +{ + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + + rport->dev_loss_tmo = timeout ? timeout : 1; + + if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port) + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, + rport->dev_loss_tmo); +} + +static void +qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) +{ + struct Scsi_Host *host = rport_to_shost(rport); + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + unsigned long flags; + + if (!fcport) + return; + + ql_dbg(ql_dbg_async, fcport->vha, 0x5101, + DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d", + rport->port_state)); + + /* + * Now that the rport has been deleted, set the fcport state to + * FCS_DEVICE_DEAD, if the fcport is still lost. + */ + if (fcport->scan_state != QLA_FCPORT_FOUND) + qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); + + /* + * Transport has effectively 'deleted' the rport, clear + * all local references. + */ + spin_lock_irqsave(host->host_lock, flags); + /* Confirm port has not reappeared before clearing pointers. */ + if (rport->port_state != FC_PORTSTATE_ONLINE) { + fcport->rport = NULL; + *((fc_port_t **)rport->dd_data) = NULL; + } + spin_unlock_irqrestore(host->host_lock, flags); + + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) + return; + + if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { + qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); + return; + } +} + +static void +qla2x00_terminate_rport_io(struct fc_rport *rport) +{ + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + scsi_qla_host_t *vha; + + if (!fcport) + return; + + if (test_bit(UNLOADING, &fcport->vha->dpc_flags)) + return; + + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) + return; + vha = fcport->vha; + + if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { + qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); + qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, + 0, WAIT_TARGET); + return; + } + /* + * At this point all fcport's software-states are cleared. Perform any + * final cleanup of firmware resources (PCBs and XCBs). + * + * Attempt to cleanup only lost devices. + */ + if (fcport->loop_id != FC_NO_LOOP_ID) { + if (IS_FWI2_CAPABLE(fcport->vha->hw) && + fcport->scan_state != QLA_FCPORT_FOUND) { + if (fcport->loop_id != FC_NO_LOOP_ID) + fcport->logout_on_delete = 1; + + if (!EDIF_NEGOTIATION_PENDING(fcport)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s %d schedule session deletion\n", __func__, + __LINE__); + qlt_schedule_sess_for_deletion(fcport); + } + } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) { + qla2x00_port_logout(fcport->vha, fcport); + } + } + + /* check for any straggling io left behind */ + if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) { + ql_log(ql_log_warn, vha, 0x300b, + "IO not return. Resetting. \n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + } +} + +static int +qla2x00_issue_lip(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + if (IS_QLAFX00(vha->hw)) + return 0; + + if (vha->hw->flags.port_isolated) + return 0; + + qla2x00_loop_reset(vha); + return 0; +} + +static struct fc_host_statistics * +qla2x00_get_fc_host_stats(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + int rval; + struct link_statistics *stats; + dma_addr_t stats_dma; + struct fc_host_statistics *p = &vha->fc_host_stat; + struct qla_qpair *qpair; + int i; + u64 ib = 0, ob = 0, ir = 0, or = 0; + + memset(p, -1, sizeof(*p)); + + if (IS_QLAFX00(vha->hw)) + goto done; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + goto done; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto done; + + if (qla2x00_chip_is_down(vha)) + goto done; + + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, + GFP_KERNEL); + if (!stats) { + ql_log(ql_log_warn, vha, 0x707d, + "Failed to allocate memory for stats.\n"); + goto done; + } + + rval = QLA_FUNCTION_FAILED; + if (IS_FWI2_CAPABLE(ha)) { + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0); + } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && + !ha->dpc_active) { + /* Must be in a 'READY' state for statistics retrieval. */ + rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, + stats, stats_dma); + } + + if (rval != QLA_SUCCESS) + goto done_free; + + /* --- */ + for (i = 0; i < vha->hw->max_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) + continue; + ir += qpair->counters.input_requests; + or += qpair->counters.output_requests; + ib += qpair->counters.input_bytes; + ob += qpair->counters.output_bytes; + } + ir += ha->base_qpair->counters.input_requests; + or += ha->base_qpair->counters.output_requests; + ib += ha->base_qpair->counters.input_bytes; + ob += ha->base_qpair->counters.output_bytes; + + ir += vha->qla_stats.input_requests; + or += vha->qla_stats.output_requests; + ib += vha->qla_stats.input_bytes; + ob += vha->qla_stats.output_bytes; + /* --- */ + + p->link_failure_count = le32_to_cpu(stats->link_fail_cnt); + p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt); + p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt); + p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt); + p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt); + p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt); + if (IS_FWI2_CAPABLE(ha)) { + p->lip_count = le32_to_cpu(stats->lip_cnt); + p->tx_frames = le32_to_cpu(stats->tx_frames); + p->rx_frames = le32_to_cpu(stats->rx_frames); + p->dumped_frames = le32_to_cpu(stats->discarded_frames); + p->nos_count = le32_to_cpu(stats->nos_rcvd); + p->error_frames = + le32_to_cpu(stats->dropped_frames) + + le32_to_cpu(stats->discarded_frames); + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt); + p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt); + } else { + p->rx_words = ib >> 2; + p->tx_words = ob >> 2; + } + } + + p->fcp_control_requests = vha->qla_stats.control_requests; + p->fcp_input_requests = ir; + p->fcp_output_requests = or; + p->fcp_input_megabytes = ib >> 20; + p->fcp_output_megabytes = ob >> 20; + p->seconds_since_last_reset = + get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; + do_div(p->seconds_since_last_reset, HZ); + +done_free: + dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), + stats, stats_dma); +done: + return p; +} + +static void +qla2x00_reset_host_stats(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct link_statistics *stats; + dma_addr_t stats_dma; + int i; + struct qla_qpair *qpair; + + memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); + memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); + for (i = 0; i < vha->hw->max_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) + continue; + memset(&qpair->counters, 0, sizeof(qpair->counters)); + } + memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters)); + + vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); + + if (IS_FWI2_CAPABLE(ha)) { + int rval; + + stats = dma_alloc_coherent(&ha->pdev->dev, + sizeof(*stats), &stats_dma, GFP_KERNEL); + if (!stats) { + ql_log(ql_log_warn, vha, 0x70d7, + "Failed to allocate memory for stats.\n"); + return; + } + + /* reset firmware statistics */ + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x70de, + "Resetting ISP statistics failed: rval = %d\n", + rval); + + dma_free_coherent(&ha->pdev->dev, sizeof(*stats), + stats, stats_dma); + } +} + +static void +qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost), + sizeof(fc_host_symbolic_name(shost))); +} + +static void +qla2x00_set_host_system_hostname(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); +} + +static void +qla2x00_get_host_fabric_name(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + static const uint8_t node_name[WWN_SIZE] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + u64 fabric_name = wwn_to_u64(node_name); + + if (vha->device_flags & SWITCH_FOUND) + fabric_name = wwn_to_u64(vha->fabric_node_name); + + fc_host_fabric_name(shost) = fabric_name; +} + +static void +qla2x00_get_host_port_state(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); + + if (!base_vha->flags.online) { + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + return; + } + + switch (atomic_read(&base_vha->loop_state)) { + case LOOP_UPDATE: + fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; + break; + case LOOP_DOWN: + if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) + fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; + else + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + break; + case LOOP_DEAD: + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + break; + case LOOP_READY: + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + break; + default: + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + break; + } +} + +static int +qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) +{ + int ret = 0; + uint8_t qos = 0; + scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); + scsi_qla_host_t *vha = NULL; + struct qla_hw_data *ha = base_vha->hw; + int cnt; + struct req_que *req = ha->req_q_map[0]; + struct qla_qpair *qpair; + + ret = qla24xx_vport_create_req_sanity_check(fc_vport); + if (ret) { + ql_log(ql_log_warn, vha, 0x707e, + "Vport sanity check failed, status %x\n", ret); + return (ret); + } + + vha = qla24xx_create_vhost(fc_vport); + if (vha == NULL) { + ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n"); + return FC_VPORT_FAILED; + } + if (disable) { + atomic_set(&vha->vp_state, VP_OFFLINE); + fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); + } else + atomic_set(&vha->vp_state, VP_FAILED); + + /* ready to create vport */ + ql_log(ql_log_info, vha, 0x7080, + "VP entry id %d assigned.\n", vha->vp_idx); + + /* initialized vport states */ + atomic_set(&vha->loop_state, LOOP_DOWN); + vha->vp_err_state = VP_ERR_PORTDWN; + vha->vp_prev_err_state = VP_ERR_UNKWN; + /* Check if physical ha port is Up */ + if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || + atomic_read(&base_vha->loop_state) == LOOP_DEAD) { + /* Don't retry or attempt login of this virtual port */ + ql_dbg(ql_dbg_user, vha, 0x7081, + "Vport loop state is not UP.\n"); + atomic_set(&vha->loop_state, LOOP_DEAD); + if (!disable) + fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); + } + + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { + if (ha->fw_attributes & BIT_4) { + int prot = 0, guard; + + vha->flags.difdix_supported = 1; + ql_dbg(ql_dbg_user, vha, 0x7082, + "Registered for DIF/DIX type 1 and 3 protection.\n"); + scsi_host_set_prot(vha->host, + prot | SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION + | SHOST_DIX_TYPE1_PROTECTION + | SHOST_DIX_TYPE2_PROTECTION + | SHOST_DIX_TYPE3_PROTECTION); + + guard = SHOST_DIX_GUARD_CRC; + + if (IS_PI_IPGUARD_CAPABLE(ha) && + (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) + guard |= SHOST_DIX_GUARD_IP; + + scsi_host_set_guard(vha->host, guard); + } else + vha->flags.difdix_supported = 0; + } + + if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, + &ha->pdev->dev)) { + ql_dbg(ql_dbg_user, vha, 0x7083, + "scsi_add_host failure for VP[%d].\n", vha->vp_idx); + goto vport_create_failed_2; + } + + /* initialize attributes */ + fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; + fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); + fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); + fc_host_supported_classes(vha->host) = + fc_host_supported_classes(base_vha->host); + fc_host_supported_speeds(vha->host) = + fc_host_supported_speeds(base_vha->host); + + qlt_vport_create(vha, ha); + qla24xx_vport_disable(fc_vport, disable); + + if (!ql2xmqsupport || !ha->npiv_info) + goto vport_queue; + + /* Create a request queue in QoS mode for the vport */ + for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { + if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 + && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, + 8) == 0) { + qos = ha->npiv_info[cnt].q_qos; + break; + } + } + + if (qos) { + qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true); + if (!qpair) + ql_log(ql_log_warn, vha, 0x7084, + "Can't create qpair for VP[%d]\n", + vha->vp_idx); + else { + ql_dbg(ql_dbg_multiq, vha, 0xc001, + "Queue pair: %d Qos: %d) created for VP[%d]\n", + qpair->id, qos, vha->vp_idx); + ql_dbg(ql_dbg_user, vha, 0x7085, + "Queue Pair: %d Qos: %d) created for VP[%d]\n", + qpair->id, qos, vha->vp_idx); + req = qpair->req; + vha->qpair = qpair; + } + } + +vport_queue: + vha->req = req; + return 0; + +vport_create_failed_2: + qla24xx_disable_vp(vha); + qla24xx_deallocate_vp_id(vha); + scsi_host_put(vha->host); + return FC_VPORT_FAILED; +} + +static int +qla24xx_vport_delete(struct fc_vport *fc_vport) +{ + scsi_qla_host_t *vha = fc_vport->dd_data; + struct qla_hw_data *ha = vha->hw; + uint16_t id = vha->vp_idx; + + set_bit(VPORT_DELETE, &vha->dpc_flags); + + while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) + msleep(1000); + + + qla24xx_disable_vp(vha); + qla2x00_wait_for_sess_deletion(vha); + + qla_nvme_delete(vha); + qla_enode_stop(vha); + qla_edb_stop(vha); + + vha->flags.delete_progress = 1; + + qlt_remove_target(ha, vha); + + fc_remove_host(vha->host); + + scsi_remove_host(vha->host); + + /* Allow timer to run to drain queued items, when removing vp */ + qla24xx_deallocate_vp_id(vha); + + if (vha->timer_active) { + qla2x00_vp_stop_timer(vha); + ql_dbg(ql_dbg_user, vha, 0x7086, + "Timer for the VP[%d] has stopped\n", vha->vp_idx); + } + + qla2x00_free_fcports(vha); + + mutex_lock(&ha->vport_lock); + ha->cur_vport_count--; + clear_bit(vha->vp_idx, ha->vp_idx_map); + mutex_unlock(&ha->vport_lock); + + dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, + vha->gnl.ldma); + + vha->gnl.l = NULL; + + vfree(vha->scan.l); + + if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { + if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x7087, + "Queue Pair delete failed.\n"); + } + + ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); + scsi_host_put(vha->host); + return 0; +} + +static int +qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) +{ + scsi_qla_host_t *vha = fc_vport->dd_data; + + if (disable) + qla24xx_disable_vp(vha); + else + qla24xx_enable_vp(vha); + + return 0; +} + +struct fc_function_template qla2xxx_transport_functions = { + + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_speeds = 1, + + .get_host_port_id = qla2x00_get_host_port_id, + .show_host_port_id = 1, + .get_host_speed = qla2x00_get_host_speed, + .show_host_speed = 1, + .get_host_port_type = qla2x00_get_host_port_type, + .show_host_port_type = 1, + .get_host_symbolic_name = qla2x00_get_host_symbolic_name, + .show_host_symbolic_name = 1, + .set_host_system_hostname = qla2x00_set_host_system_hostname, + .show_host_system_hostname = 1, + .get_host_fabric_name = qla2x00_get_host_fabric_name, + .show_host_fabric_name = 1, + .get_host_port_state = qla2x00_get_host_port_state, + .show_host_port_state = 1, + + .dd_fcrport_size = sizeof(struct fc_port *), + .show_rport_supported_classes = 1, + + .get_starget_node_name = qla2x00_get_starget_node_name, + .show_starget_node_name = 1, + .get_starget_port_name = qla2x00_get_starget_port_name, + .show_starget_port_name = 1, + .get_starget_port_id = qla2x00_get_starget_port_id, + .show_starget_port_id = 1, + + .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .issue_fc_host_lip = qla2x00_issue_lip, + .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, + .terminate_rport_io = qla2x00_terminate_rport_io, + .get_fc_host_stats = qla2x00_get_fc_host_stats, + .reset_fc_host_stats = qla2x00_reset_host_stats, + + .vport_create = qla24xx_vport_create, + .vport_disable = qla24xx_vport_disable, + .vport_delete = qla24xx_vport_delete, + .bsg_request = qla24xx_bsg_request, + .bsg_timeout = qla24xx_bsg_timeout, +}; + +struct fc_function_template qla2xxx_transport_vport_functions = { + + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + + .get_host_port_id = qla2x00_get_host_port_id, + .show_host_port_id = 1, + .get_host_speed = qla2x00_get_host_speed, + .show_host_speed = 1, + .get_host_port_type = qla2x00_get_host_port_type, + .show_host_port_type = 1, + .get_host_symbolic_name = qla2x00_get_host_symbolic_name, + .show_host_symbolic_name = 1, + .set_host_system_hostname = qla2x00_set_host_system_hostname, + .show_host_system_hostname = 1, + .get_host_fabric_name = qla2x00_get_host_fabric_name, + .show_host_fabric_name = 1, + .get_host_port_state = qla2x00_get_host_port_state, + .show_host_port_state = 1, + + .dd_fcrport_size = sizeof(struct fc_port *), + .show_rport_supported_classes = 1, + + .get_starget_node_name = qla2x00_get_starget_node_name, + .show_starget_node_name = 1, + .get_starget_port_name = qla2x00_get_starget_port_name, + .show_starget_port_name = 1, + .get_starget_port_id = qla2x00_get_starget_port_id, + .show_starget_port_id = 1, + + .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .issue_fc_host_lip = qla2x00_issue_lip, + .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, + .terminate_rport_io = qla2x00_terminate_rport_io, + .get_fc_host_stats = qla2x00_get_fc_host_stats, + .reset_fc_host_stats = qla2x00_reset_host_stats, + + .bsg_request = qla24xx_bsg_request, + .bsg_timeout = qla24xx_bsg_timeout, +}; + +static uint +qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds) +{ + uint supported_speeds = FC_PORTSPEED_UNKNOWN; + + if (speeds & FDMI_PORT_SPEED_64GB) + supported_speeds |= FC_PORTSPEED_64GBIT; + if (speeds & FDMI_PORT_SPEED_32GB) + supported_speeds |= FC_PORTSPEED_32GBIT; + if (speeds & FDMI_PORT_SPEED_16GB) + supported_speeds |= FC_PORTSPEED_16GBIT; + if (speeds & FDMI_PORT_SPEED_8GB) + supported_speeds |= FC_PORTSPEED_8GBIT; + if (speeds & FDMI_PORT_SPEED_4GB) + supported_speeds |= FC_PORTSPEED_4GBIT; + if (speeds & FDMI_PORT_SPEED_2GB) + supported_speeds |= FC_PORTSPEED_2GBIT; + if (speeds & FDMI_PORT_SPEED_1GB) + supported_speeds |= FC_PORTSPEED_1GBIT; + + return supported_speeds; +} + +void +qla2x00_init_host_attr(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + u32 speeds = 0, fdmi_speed = 0; + + fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; + fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); + fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); + fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ? + (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; + fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; + fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; + + fdmi_speed = qla25xx_fdmi_port_speed_capability(ha); + speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed); + + fc_host_supported_speeds(vha->host) = speeds; +} diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c new file mode 100644 index 000000000..19bb64bdd --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -0,0 +1,3170 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_gbl.h" + +#include +#include +#include +#include + +static void qla2xxx_free_fcport_work(struct work_struct *work) +{ + struct fc_port *fcport = container_of(work, typeof(*fcport), + free_work); + + qla2x00_free_fcport(fcport); +} + +/* BSG support for ELS/CT pass through */ +void qla2x00_bsg_job_done(srb_t *sp, int res) +{ + struct bsg_job *bsg_job = sp->u.bsg_job; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + + ql_dbg(ql_dbg_user, sp->vha, 0x7009, + "%s: sp hdl %x, result=%x bsg ptr %p\n", + __func__, sp->handle, res, bsg_job); + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + + bsg_reply->result = res; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +} + +void qla2x00_bsg_sp_free(srb_t *sp) +{ + struct qla_hw_data *ha = sp->vha->hw; + struct bsg_job *bsg_job = sp->u.bsg_job; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct qla_mt_iocb_rqst_fx00 *piocb_rqst; + + if (sp->type == SRB_FXIOCB_BCMD) { + piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) + &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } else { + + if (sp->remap.remapped) { + dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, + sp->remap.rsp.dma); + dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, + sp->remap.req.dma); + } else { + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } + } + + if (sp->type == SRB_CT_CMD || + sp->type == SRB_FXIOCB_BCMD || + sp->type == SRB_ELS_CMD_HST) { + INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work); + queue_work(ha->wq, &sp->fcport->free_work); + } + + qla2x00_rel_sp(sp); +} + +int +qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, + struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) +{ + int i, ret, num_valid; + uint8_t *bcode; + struct qla_fcp_prio_entry *pri_entry; + uint32_t *bcode_val_ptr, bcode_val; + + ret = 1; + num_valid = 0; + bcode = (uint8_t *)pri_cfg; + bcode_val_ptr = (uint32_t *)pri_cfg; + bcode_val = (uint32_t)(*bcode_val_ptr); + + if (bcode_val == 0xFFFFFFFF) { + /* No FCP Priority config data in flash */ + ql_dbg(ql_dbg_user, vha, 0x7051, + "No FCP Priority config data.\n"); + return 0; + } + + if (memcmp(bcode, "HQOS", 4)) { + /* Invalid FCP priority data header*/ + ql_dbg(ql_dbg_user, vha, 0x7052, + "Invalid FCP Priority data header. bcode=0x%x.\n", + bcode_val); + return 0; + } + if (flag != 1) + return ret; + + pri_entry = &pri_cfg->entry[0]; + for (i = 0; i < pri_cfg->num_entries; i++) { + if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) + num_valid++; + pri_entry++; + } + + if (num_valid == 0) { + /* No valid FCP priority data entries */ + ql_dbg(ql_dbg_user, vha, 0x7053, + "No valid FCP Priority data entries.\n"); + ret = 0; + } else { + /* FCP priority data is valid */ + ql_dbg(ql_dbg_user, vha, 0x7054, + "Valid FCP priority data. num entries = %d.\n", + num_valid); + } + + return ret; +} + +static int +qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int ret = 0; + uint32_t len; + uint32_t oper; + + if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { + ret = -EINVAL; + goto exit_fcp_prio_cfg; + } + + /* Get the sub command */ + oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + + /* Only set config is allowed if config memory is not allocated */ + if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { + ret = -EINVAL; + goto exit_fcp_prio_cfg; + } + switch (oper) { + case QLFC_FCP_PRIO_DISABLE: + if (ha->flags.fcp_prio_enabled) { + ha->flags.fcp_prio_enabled = 0; + ha->fcp_prio_cfg->attributes &= + ~FCP_PRIO_ATTR_ENABLE; + qla24xx_update_all_fcp_prio(vha); + bsg_reply->result = DID_OK; + } else { + ret = -EINVAL; + bsg_reply->result = (DID_ERROR << 16); + goto exit_fcp_prio_cfg; + } + break; + + case QLFC_FCP_PRIO_ENABLE: + if (!ha->flags.fcp_prio_enabled) { + if (ha->fcp_prio_cfg) { + ha->flags.fcp_prio_enabled = 1; + ha->fcp_prio_cfg->attributes |= + FCP_PRIO_ATTR_ENABLE; + qla24xx_update_all_fcp_prio(vha); + bsg_reply->result = DID_OK; + } else { + ret = -EINVAL; + bsg_reply->result = (DID_ERROR << 16); + goto exit_fcp_prio_cfg; + } + } + break; + + case QLFC_FCP_PRIO_GET_CONFIG: + len = bsg_job->reply_payload.payload_len; + if (!len || len > FCP_PRIO_CFG_SIZE) { + ret = -EINVAL; + bsg_reply->result = (DID_ERROR << 16); + goto exit_fcp_prio_cfg; + } + + bsg_reply->result = DID_OK; + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer( + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, + len); + + break; + + case QLFC_FCP_PRIO_SET_CONFIG: + len = bsg_job->request_payload.payload_len; + if (!len || len > FCP_PRIO_CFG_SIZE) { + bsg_reply->result = (DID_ERROR << 16); + ret = -EINVAL; + goto exit_fcp_prio_cfg; + } + + if (!ha->fcp_prio_cfg) { + ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); + if (!ha->fcp_prio_cfg) { + ql_log(ql_log_warn, vha, 0x7050, + "Unable to allocate memory for fcp prio " + "config data (%x).\n", FCP_PRIO_CFG_SIZE); + bsg_reply->result = (DID_ERROR << 16); + ret = -ENOMEM; + goto exit_fcp_prio_cfg; + } + } + + memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, + FCP_PRIO_CFG_SIZE); + + /* validate fcp priority data */ + + if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) { + bsg_reply->result = (DID_ERROR << 16); + ret = -EINVAL; + /* If buffer was invalidatic int + * fcp_prio_cfg is of no use + */ + vfree(ha->fcp_prio_cfg); + ha->fcp_prio_cfg = NULL; + goto exit_fcp_prio_cfg; + } + + ha->flags.fcp_prio_enabled = 0; + if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) + ha->flags.fcp_prio_enabled = 1; + qla24xx_update_all_fcp_prio(vha); + bsg_reply->result = DID_OK; + break; + default: + ret = -EINVAL; + break; + } +exit_fcp_prio_cfg: + if (!ret) + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return ret; +} + +static int +qla2x00_process_els(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_rport *rport; + fc_port_t *fcport = NULL; + struct Scsi_Host *host; + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + srb_t *sp; + const char *type; + int req_sg_cnt, rsp_sg_cnt; + int rval = (DID_ERROR << 16); + uint32_t els_cmd = 0; + int qla_port_allocated = 0; + + if (bsg_request->msgcode == FC_BSG_RPT_ELS) { + rport = fc_bsg_to_rport(bsg_job); + if (!rport) { + rval = -ENOMEM; + goto done; + } + fcport = *(fc_port_t **) rport->dd_data; + host = rport_to_shost(rport); + vha = shost_priv(host); + ha = vha->hw; + type = "FC_BSG_RPT_ELS"; + } else { + host = fc_bsg_to_shost(bsg_job); + vha = shost_priv(host); + ha = vha->hw; + type = "FC_BSG_HST_ELS_NOLOGIN"; + els_cmd = bsg_request->rqst_data.h_els.command_code; + if (els_cmd == ELS_AUTH_ELS) + return qla_edif_process_els(vha, bsg_job); + } + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); + rval = -EIO; + goto done; + } + + /* pass through is supported only for ISP 4Gb or higher */ + if (!IS_FWI2_CAPABLE(ha)) { + ql_dbg(ql_dbg_user, vha, 0x7001, + "ELS passthru not supported for ISP23xx based adapters.\n"); + rval = -EPERM; + goto done; + } + + /* Multiple SG's are not supported for ELS requests */ + if (bsg_job->request_payload.sg_cnt > 1 || + bsg_job->reply_payload.sg_cnt > 1) { + ql_dbg(ql_dbg_user, vha, 0x7002, + "Multiple SG's are not supported for ELS requests, " + "request_sg_cnt=%x reply_sg_cnt=%x.\n", + bsg_job->request_payload.sg_cnt, + bsg_job->reply_payload.sg_cnt); + rval = -EPERM; + goto done; + } + + /* ELS request for rport */ + if (bsg_request->msgcode == FC_BSG_RPT_ELS) { + /* make sure the rport is logged in, + * if not perform fabric login + */ + if (atomic_read(&fcport->state) != FCS_ONLINE) { + ql_dbg(ql_dbg_user, vha, 0x7003, + "Port %06X is not online for ELS passthru.\n", + fcport->d_id.b24); + rval = -EIO; + goto done; + } + } else { + /* Allocate a dummy fcport structure, since functions + * preparing the IOCB and mailbox command retrieves port + * specific information from fcport structure. For Host based + * ELS commands there will be no fcport structure allocated + */ + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + rval = -ENOMEM; + goto done; + } + + qla_port_allocated = 1; + /* Initialize all required fields of fcport */ + fcport->vha = vha; + fcport->d_id.b.al_pa = + bsg_request->rqst_data.h_els.port_id[0]; + fcport->d_id.b.area = + bsg_request->rqst_data.h_els.port_id[1]; + fcport->d_id.b.domain = + bsg_request->rqst_data.h_els.port_id[2]; + fcport->loop_id = + (fcport->d_id.b.al_pa == 0xFD) ? + NPH_FABRIC_CONTROLLER : NPH_F_PORT; + } + + req_sg_cnt = + dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + if (!req_sg_cnt) { + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + rval = -ENOMEM; + goto done_free_fcport; + } + + rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + if (!rsp_sg_cnt) { + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + rval = -ENOMEM; + goto done_free_fcport; + } + + if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || + (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { + ql_log(ql_log_warn, vha, 0x7008, + "dma mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " + "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, + req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); + rval = -EAGAIN; + goto done_unmap_sg; + } + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + rval = -ENOMEM; + goto done_unmap_sg; + } + + sp->type = + (bsg_request->msgcode == FC_BSG_RPT_ELS ? + SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); + sp->name = + (bsg_request->msgcode == FC_BSG_RPT_ELS ? + "bsg_els_rpt" : "bsg_els_hst"); + sp->u.bsg_job = bsg_job; + sp->free = qla2x00_bsg_sp_free; + sp->done = qla2x00_bsg_job_done; + + ql_dbg(ql_dbg_user, vha, 0x700a, + "bsg rqst type: %s els type: %x - loop-id=%x " + "portid=%-2x%02x%02x.\n", type, + bsg_request->rqst_data.h_els.command_code, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x700e, + "qla2x00_start_sp failed = %d\n", rval); + qla2x00_rel_sp(sp); + rval = -EIO; + goto done_unmap_sg; + } + return rval; + +done_unmap_sg: + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + goto done_free_fcport; + +done_free_fcport: + if (qla_port_allocated) + qla2x00_free_fcport(fcport); +done: + return rval; +} + +static inline uint16_t +qla24xx_calc_ct_iocbs(uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > 2) { + iocbs += (dsds - 2) / 5; + if ((dsds - 2) % 5) + iocbs++; + } + return iocbs; +} + +static int +qla2x00_process_ct(struct bsg_job *bsg_job) +{ + srb_t *sp; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = (DID_ERROR << 16); + int req_sg_cnt, rsp_sg_cnt; + uint16_t loop_id; + struct fc_port *fcport; + char *type = "FC_BSG_HST_CT"; + + req_sg_cnt = + dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + if (!req_sg_cnt) { + ql_log(ql_log_warn, vha, 0x700f, + "dma_map_sg return %d for request\n", req_sg_cnt); + rval = -ENOMEM; + goto done; + } + + rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + if (!rsp_sg_cnt) { + ql_log(ql_log_warn, vha, 0x7010, + "dma_map_sg return %d for reply\n", rsp_sg_cnt); + rval = -ENOMEM; + goto done; + } + + if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || + (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { + ql_log(ql_log_warn, vha, 0x7011, + "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " + "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, + req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); + rval = -EAGAIN; + goto done_unmap_sg; + } + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x7012, + "Host is not online.\n"); + rval = -EIO; + goto done_unmap_sg; + } + + loop_id = + (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) + >> 24; + switch (loop_id) { + case 0xFC: + loop_id = NPH_SNS; + break; + case 0xFA: + loop_id = vha->mgmt_svr_loop_id; + break; + default: + ql_dbg(ql_dbg_user, vha, 0x7013, + "Unknown loop id: %x.\n", loop_id); + rval = -EINVAL; + goto done_unmap_sg; + } + + /* Allocate a dummy fcport structure, since functions preparing the + * IOCB and mailbox command retrieves port specific information + * from fcport structure. For Host based ELS commands there will be + * no fcport structure allocated + */ + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + ql_log(ql_log_warn, vha, 0x7014, + "Failed to allocate fcport.\n"); + rval = -ENOMEM; + goto done_unmap_sg; + } + + /* Initialize all required fields of fcport */ + fcport->vha = vha; + fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; + fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; + fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; + fcport->loop_id = loop_id; + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_log(ql_log_warn, vha, 0x7015, + "qla2x00_get_sp failed.\n"); + rval = -ENOMEM; + goto done_free_fcport; + } + + sp->type = SRB_CT_CMD; + sp->name = "bsg_ct"; + sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); + sp->u.bsg_job = bsg_job; + sp->free = qla2x00_bsg_sp_free; + sp->done = qla2x00_bsg_job_done; + + ql_dbg(ql_dbg_user, vha, 0x7016, + "bsg rqst type: %s else type: %x - " + "loop-id=%x portid=%02x%02x%02x.\n", type, + (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), + fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7017, + "qla2x00_start_sp failed=%d.\n", rval); + qla2x00_rel_sp(sp); + rval = -EIO; + goto done_free_fcport; + } + return rval; + +done_free_fcport: + qla2x00_free_fcport(fcport); +done_unmap_sg: + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); +done: + return rval; +} + +/* Disable loopback mode */ +static inline int +qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, + int wait, int wait2) +{ + int ret = 0; + int rval = 0; + uint16_t new_config[4]; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) + goto done_reset_internal; + + memset(new_config, 0 , sizeof(new_config)); + if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == + ENABLE_INTERNAL_LOOPBACK || + (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == + ENABLE_EXTERNAL_LOOPBACK) { + new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; + ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", + (new_config[0] & INTERNAL_LOOPBACK_MASK)); + memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; + + ha->notify_dcbx_comp = wait; + ha->notify_lb_portup_comp = wait2; + + ret = qla81xx_set_port_config(vha, new_config); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7025, + "Set port config failed.\n"); + ha->notify_dcbx_comp = 0; + ha->notify_lb_portup_comp = 0; + rval = -EINVAL; + goto done_reset_internal; + } + + /* Wait for DCBX complete event */ + if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, + (DCBX_COMP_TIMEOUT * HZ))) { + ql_dbg(ql_dbg_user, vha, 0x7026, + "DCBX completion not received.\n"); + ha->notify_dcbx_comp = 0; + ha->notify_lb_portup_comp = 0; + rval = -EINVAL; + goto done_reset_internal; + } else + ql_dbg(ql_dbg_user, vha, 0x7027, + "DCBX completion received.\n"); + + if (wait2 && + !wait_for_completion_timeout(&ha->lb_portup_comp, + (LB_PORTUP_COMP_TIMEOUT * HZ))) { + ql_dbg(ql_dbg_user, vha, 0x70c5, + "Port up completion not received.\n"); + ha->notify_lb_portup_comp = 0; + rval = -EINVAL; + goto done_reset_internal; + } else + ql_dbg(ql_dbg_user, vha, 0x70c6, + "Port up completion received.\n"); + + ha->notify_dcbx_comp = 0; + ha->notify_lb_portup_comp = 0; + } +done_reset_internal: + return rval; +} + +/* + * Set the port configuration to enable the internal or external loopback + * depending on the loopback mode. + */ +static inline int +qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, + uint16_t *new_config, uint16_t mode) +{ + int ret = 0; + int rval = 0; + unsigned long rem_tmo = 0, current_tmo = 0; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) + goto done_set_internal; + + if (mode == INTERNAL_LOOPBACK) + new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); + else if (mode == EXTERNAL_LOOPBACK) + new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); + ql_dbg(ql_dbg_user, vha, 0x70be, + "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); + + memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); + + ha->notify_dcbx_comp = 1; + ret = qla81xx_set_port_config(vha, new_config); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7021, + "set port config failed.\n"); + ha->notify_dcbx_comp = 0; + rval = -EINVAL; + goto done_set_internal; + } + + /* Wait for DCBX complete event */ + current_tmo = DCBX_COMP_TIMEOUT * HZ; + while (1) { + rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, + current_tmo); + if (!ha->idc_extend_tmo || rem_tmo) { + ha->idc_extend_tmo = 0; + break; + } + current_tmo = ha->idc_extend_tmo * HZ; + ha->idc_extend_tmo = 0; + } + + if (!rem_tmo) { + ql_dbg(ql_dbg_user, vha, 0x7022, + "DCBX completion not received.\n"); + ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); + /* + * If the reset of the loopback mode doesn't work take a FCoE + * dump and reset the chip. + */ + if (ret) { + qla2xxx_dump_fw(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + rval = -EINVAL; + } else { + if (ha->flags.idc_compl_status) { + ql_dbg(ql_dbg_user, vha, 0x70c3, + "Bad status in IDC Completion AEN\n"); + rval = -EINVAL; + ha->flags.idc_compl_status = 0; + } else + ql_dbg(ql_dbg_user, vha, 0x7023, + "DCBX completion received.\n"); + } + + ha->notify_dcbx_comp = 0; + ha->idc_extend_tmo = 0; + +done_set_internal: + return rval; +} + +static int +qla2x00_process_loopback(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval; + uint8_t command_sent; + char *type; + struct msg_echo_lb elreq; + uint16_t response[MAILBOX_REGISTER_COUNT]; + uint16_t config[4], new_config[4]; + uint8_t *fw_sts_ptr; + void *req_data = NULL; + dma_addr_t req_data_dma; + uint32_t req_data_len; + uint8_t *rsp_data = NULL; + dma_addr_t rsp_data_dma; + uint32_t rsp_data_len; + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); + return -EIO; + } + + memset(&elreq, 0, sizeof(elreq)); + + elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, + DMA_TO_DEVICE); + + if (!elreq.req_sg_cnt) { + ql_log(ql_log_warn, vha, 0x701a, + "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); + return -ENOMEM; + } + + elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, + DMA_FROM_DEVICE); + + if (!elreq.rsp_sg_cnt) { + ql_log(ql_log_warn, vha, 0x701b, + "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); + rval = -ENOMEM; + goto done_unmap_req_sg; + } + + if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || + (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { + ql_log(ql_log_warn, vha, 0x701c, + "dma mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt: %x " + "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", + bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, + bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); + rval = -EAGAIN; + goto done_unmap_sg; + } + req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; + req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, + &req_data_dma, GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x701d, + "dma alloc failed for req_data.\n"); + rval = -ENOMEM; + goto done_unmap_sg; + } + + rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, + &rsp_data_dma, GFP_KERNEL); + if (!rsp_data) { + ql_log(ql_log_warn, vha, 0x7004, + "dma alloc failed for rsp_data.\n"); + rval = -ENOMEM; + goto done_free_dma_req; + } + + /* Copy the request buffer in req_data now */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, req_data_len); + + elreq.send_dma = req_data_dma; + elreq.rcv_dma = rsp_data_dma; + elreq.transfer_size = req_data_len; + + elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + elreq.iteration_count = + bsg_request->rqst_data.h_vendor.vendor_cmd[2]; + + if (atomic_read(&vha->loop_state) == LOOP_READY && + ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) || + ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && + get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && + req_data_len == MAX_ELS_FRAME_PAYLOAD && + elreq.options == EXTERNAL_LOOPBACK))) { + type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; + ql_dbg(ql_dbg_user, vha, 0x701e, + "BSG request type: %s.\n", type); + command_sent = INT_DEF_LB_ECHO_CMD; + rval = qla2x00_echo_test(vha, &elreq, response); + } else { + if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { + memset(config, 0, sizeof(config)); + memset(new_config, 0, sizeof(new_config)); + + if (qla81xx_get_port_config(vha, config)) { + ql_log(ql_log_warn, vha, 0x701f, + "Get port config failed.\n"); + rval = -EPERM; + goto done_free_dma_rsp; + } + + if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { + ql_dbg(ql_dbg_user, vha, 0x70c4, + "Loopback operation already in " + "progress.\n"); + rval = -EAGAIN; + goto done_free_dma_rsp; + } + + ql_dbg(ql_dbg_user, vha, 0x70c0, + "elreq.options=%04x\n", elreq.options); + + if (elreq.options == EXTERNAL_LOOPBACK) + if (IS_QLA8031(ha) || IS_QLA8044(ha)) + rval = qla81xx_set_loopback_mode(vha, + config, new_config, elreq.options); + else + rval = qla81xx_reset_loopback_mode(vha, + config, 1, 0); + else + rval = qla81xx_set_loopback_mode(vha, config, + new_config, elreq.options); + + if (rval) { + rval = -EPERM; + goto done_free_dma_rsp; + } + + type = "FC_BSG_HST_VENDOR_LOOPBACK"; + ql_dbg(ql_dbg_user, vha, 0x7028, + "BSG request type: %s.\n", type); + + command_sent = INT_DEF_LB_LOOPBACK_CMD; + rval = qla2x00_loopback_test(vha, &elreq, response); + + if (response[0] == MBS_COMMAND_ERROR && + response[1] == MBS_LB_RESET) { + ql_log(ql_log_warn, vha, 0x7029, + "MBX command error, Aborting ISP.\n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + /* Also reset the MPI */ + if (IS_QLA81XX(ha)) { + if (qla81xx_restart_mpi_firmware(vha) != + QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x702a, + "MPI reset failed.\n"); + } + } + + rval = -EIO; + goto done_free_dma_rsp; + } + + if (new_config[0]) { + int ret; + + /* Revert back to original port config + * Also clear internal loopback + */ + ret = qla81xx_reset_loopback_mode(vha, + new_config, 0, 1); + if (ret) { + /* + * If the reset of the loopback mode + * doesn't work take FCoE dump and then + * reset the chip. + */ + qla2xxx_dump_fw(vha); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + + } + + } else { + type = "FC_BSG_HST_VENDOR_LOOPBACK"; + ql_dbg(ql_dbg_user, vha, 0x702b, + "BSG request type: %s.\n", type); + command_sent = INT_DEF_LB_LOOPBACK_CMD; + rval = qla2x00_loopback_test(vha, &elreq, response); + } + } + + if (rval) { + ql_log(ql_log_warn, vha, 0x702c, + "Vendor request %s failed.\n", type); + + rval = 0; + bsg_reply->result = (DID_ERROR << 16); + bsg_reply->reply_payload_rcv_len = 0; + } else { + ql_dbg(ql_dbg_user, vha, 0x702d, + "Vendor request %s completed.\n", type); + bsg_reply->result = (DID_OK << 16); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, rsp_data, + rsp_data_len); + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply) + + sizeof(response) + sizeof(uint8_t); + fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); + memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, + sizeof(response)); + fw_sts_ptr += sizeof(response); + *fw_sts_ptr = command_sent; + +done_free_dma_rsp: + dma_free_coherent(&ha->pdev->dev, rsp_data_len, + rsp_data, rsp_data_dma); +done_free_dma_req: + dma_free_coherent(&ha->pdev->dev, req_data_len, + req_data, req_data_dma); +done_unmap_sg: + dma_unmap_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); +done_unmap_req_sg: + dma_unmap_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + if (!rval) + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rval; +} + +static int +qla84xx_reset(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint32_t flag; + + if (!IS_QLA84XX(ha)) { + ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); + return -EINVAL; + } + + flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + + rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); + + if (rval) { + ql_log(ql_log_warn, vha, 0x7030, + "Vendor request 84xx reset failed.\n"); + rval = (DID_ERROR << 16); + + } else { + ql_dbg(ql_dbg_user, vha, 0x7031, + "Vendor request 84xx reset completed.\n"); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + + return rval; +} + +static int +qla84xx_updatefw(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct verify_chip_entry_84xx *mn = NULL; + dma_addr_t mn_dma, fw_dma; + void *fw_buf = NULL; + int rval = 0; + uint32_t sg_cnt; + uint32_t data_len; + uint16_t options; + uint32_t flag; + uint32_t fw_ver; + + if (!IS_QLA84XX(ha)) { + ql_dbg(ql_dbg_user, vha, 0x7032, + "Not 84xx, exiting.\n"); + return -EINVAL; + } + + sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + if (!sg_cnt) { + ql_log(ql_log_warn, vha, 0x7033, + "dma_map_sg returned %d for request.\n", sg_cnt); + return -ENOMEM; + } + + if (sg_cnt != bsg_job->request_payload.sg_cnt) { + ql_log(ql_log_warn, vha, 0x7034, + "DMA mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", + bsg_job->request_payload.sg_cnt, sg_cnt); + rval = -EAGAIN; + goto done_unmap_sg; + } + + data_len = bsg_job->request_payload.payload_len; + fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, + &fw_dma, GFP_KERNEL); + if (!fw_buf) { + ql_log(ql_log_warn, vha, 0x7035, + "DMA alloc failed for fw_buf.\n"); + rval = -ENOMEM; + goto done_unmap_sg; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, fw_buf, data_len); + + mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); + if (!mn) { + ql_log(ql_log_warn, vha, 0x7036, + "DMA alloc failed for fw buffer.\n"); + rval = -ENOMEM; + goto done_free_fw_buf; + } + + flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); + + mn->entry_type = VERIFY_CHIP_IOCB_TYPE; + mn->entry_count = 1; + + options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; + if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) + options |= VCO_DIAG_FW; + + mn->options = cpu_to_le16(options); + mn->fw_ver = cpu_to_le32(fw_ver); + mn->fw_size = cpu_to_le32(data_len); + mn->fw_seq_size = cpu_to_le32(data_len); + put_unaligned_le64(fw_dma, &mn->dsd.address); + mn->dsd.length = cpu_to_le32(data_len); + mn->data_seg_cnt = cpu_to_le16(1); + + rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); + + if (rval) { + ql_log(ql_log_warn, vha, 0x7037, + "Vendor request 84xx updatefw failed.\n"); + + rval = (DID_ERROR << 16); + } else { + ql_dbg(ql_dbg_user, vha, 0x7038, + "Vendor request 84xx updatefw completed.\n"); + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK; + } + + dma_pool_free(ha->s_dma_pool, mn, mn_dma); + +done_free_fw_buf: + dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); + +done_unmap_sg: + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + + if (!rval) + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rval; +} + +static int +qla84xx_mgmt_cmd(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct access_chip_84xx *mn = NULL; + dma_addr_t mn_dma, mgmt_dma; + void *mgmt_b = NULL; + int rval = 0; + struct qla_bsg_a84_mgmt *ql84_mgmt; + uint32_t sg_cnt; + uint32_t data_len = 0; + uint32_t dma_direction = DMA_NONE; + + if (!IS_QLA84XX(ha)) { + ql_log(ql_log_warn, vha, 0x703a, + "Not 84xx, exiting.\n"); + return -EINVAL; + } + + mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); + if (!mn) { + ql_log(ql_log_warn, vha, 0x703c, + "DMA alloc failed for fw buffer.\n"); + return -ENOMEM; + } + + mn->entry_type = ACCESS_CHIP_IOCB_TYPE; + mn->entry_count = 1; + ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); + switch (ql84_mgmt->mgmt.cmd) { + case QLA84_MGMT_READ_MEM: + case QLA84_MGMT_GET_INFO: + sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + if (!sg_cnt) { + ql_log(ql_log_warn, vha, 0x703d, + "dma_map_sg returned %d for reply.\n", sg_cnt); + rval = -ENOMEM; + goto exit_mgmt; + } + + dma_direction = DMA_FROM_DEVICE; + + if (sg_cnt != bsg_job->reply_payload.sg_cnt) { + ql_log(ql_log_warn, vha, 0x703e, + "DMA mapping resulted in different sg counts, " + "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", + bsg_job->reply_payload.sg_cnt, sg_cnt); + rval = -EAGAIN; + goto done_unmap_sg; + } + + data_len = bsg_job->reply_payload.payload_len; + + mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, + &mgmt_dma, GFP_KERNEL); + if (!mgmt_b) { + ql_log(ql_log_warn, vha, 0x703f, + "DMA alloc failed for mgmt_b.\n"); + rval = -ENOMEM; + goto done_unmap_sg; + } + + if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { + mn->options = cpu_to_le16(ACO_DUMP_MEMORY); + mn->parameter1 = + cpu_to_le32( + ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); + + } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { + mn->options = cpu_to_le16(ACO_REQUEST_INFO); + mn->parameter1 = + cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); + + mn->parameter2 = + cpu_to_le32( + ql84_mgmt->mgmt.mgmtp.u.info.context); + } + break; + + case QLA84_MGMT_WRITE_MEM: + sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + + if (!sg_cnt) { + ql_log(ql_log_warn, vha, 0x7040, + "dma_map_sg returned %d.\n", sg_cnt); + rval = -ENOMEM; + goto exit_mgmt; + } + + dma_direction = DMA_TO_DEVICE; + + if (sg_cnt != bsg_job->request_payload.sg_cnt) { + ql_log(ql_log_warn, vha, 0x7041, + "DMA mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", + bsg_job->request_payload.sg_cnt, sg_cnt); + rval = -EAGAIN; + goto done_unmap_sg; + } + + data_len = bsg_job->request_payload.payload_len; + mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, + &mgmt_dma, GFP_KERNEL); + if (!mgmt_b) { + ql_log(ql_log_warn, vha, 0x7042, + "DMA alloc failed for mgmt_b.\n"); + rval = -ENOMEM; + goto done_unmap_sg; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, mgmt_b, data_len); + + mn->options = cpu_to_le16(ACO_LOAD_MEMORY); + mn->parameter1 = + cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); + break; + + case QLA84_MGMT_CHNG_CONFIG: + mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); + mn->parameter1 = + cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); + + mn->parameter2 = + cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); + + mn->parameter3 = + cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); + break; + + default: + rval = -EIO; + goto exit_mgmt; + } + + if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { + mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); + mn->dseg_count = cpu_to_le16(1); + put_unaligned_le64(mgmt_dma, &mn->dsd.address); + mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); + } + + rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); + + if (rval) { + ql_log(ql_log_warn, vha, 0x7043, + "Vendor request 84xx mgmt failed.\n"); + + rval = (DID_ERROR << 16); + + } else { + ql_dbg(ql_dbg_user, vha, 0x7044, + "Vendor request 84xx mgmt completed.\n"); + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK; + + if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || + (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { + bsg_reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, mgmt_b, + data_len); + } + } + +done_unmap_sg: + if (mgmt_b) + dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); + + if (dma_direction == DMA_TO_DEVICE) + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + else if (dma_direction == DMA_FROM_DEVICE) + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + +exit_mgmt: + dma_pool_free(ha->s_dma_pool, mn, mn_dma); + + if (!rval) + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rval; +} + +static int +qla24xx_iidma(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + int rval = 0; + struct qla_port_param *port_param = NULL; + fc_port_t *fcport = NULL; + int found = 0; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + uint8_t *rsp_ptr = NULL; + + if (!IS_IIDMA_CAPABLE(vha->hw)) { + ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); + return -EINVAL; + } + + port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); + if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { + ql_log(ql_log_warn, vha, 0x7048, + "Invalid destination type.\n"); + return -EINVAL; + } + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + + if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, + fcport->port_name, sizeof(fcport->port_name))) + continue; + + found = 1; + break; + } + + if (!found) { + ql_log(ql_log_warn, vha, 0x7049, + "Failed to find port.\n"); + return -EINVAL; + } + + if (atomic_read(&fcport->state) != FCS_ONLINE) { + ql_log(ql_log_warn, vha, 0x704a, + "Port is not online.\n"); + return -EINVAL; + } + + if (fcport->flags & FCF_LOGIN_NEEDED) { + ql_log(ql_log_warn, vha, 0x704b, + "Remote port not logged in flags = 0x%x.\n", fcport->flags); + return -EINVAL; + } + + if (port_param->mode) + rval = qla2x00_set_idma_speed(vha, fcport->loop_id, + port_param->speed, mb); + else + rval = qla2x00_get_idma_speed(vha, fcport->loop_id, + &port_param->speed, mb); + + if (rval) { + ql_log(ql_log_warn, vha, 0x704c, + "iiDMA cmd failed for %8phN -- " + "%04x %x %04x %04x.\n", fcport->port_name, + rval, fcport->fp_speed, mb[0], mb[1]); + rval = (DID_ERROR << 16); + } else { + if (!port_param->mode) { + bsg_job->reply_len = sizeof(struct fc_bsg_reply) + + sizeof(struct qla_port_param); + + rsp_ptr = ((uint8_t *)bsg_reply) + + sizeof(struct fc_bsg_reply); + + memcpy(rsp_ptr, port_param, + sizeof(struct qla_port_param)); + } + + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + + return rval; +} + +static int +qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, + uint8_t is_update) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + uint32_t start = 0; + int valid = 0; + struct qla_hw_data *ha = vha->hw; + + if (unlikely(pci_channel_offline(ha->pdev))) + return -EINVAL; + + start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + if (start > ha->optrom_size) { + ql_log(ql_log_warn, vha, 0x7055, + "start %d > optrom_size %d.\n", start, ha->optrom_size); + return -EINVAL; + } + + if (ha->optrom_state != QLA_SWAITING) { + ql_log(ql_log_info, vha, 0x7056, + "optrom_state %d.\n", ha->optrom_state); + return -EBUSY; + } + + ha->optrom_region_start = start; + ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); + if (is_update) { + if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) + valid = 1; + else if (start == (ha->flt_region_boot * 4) || + start == (ha->flt_region_fw * 4)) + valid = 1; + else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || + IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) + valid = 1; + if (!valid) { + ql_log(ql_log_warn, vha, 0x7058, + "Invalid start region 0x%x/0x%x.\n", start, + bsg_job->request_payload.payload_len); + return -EINVAL; + } + + ha->optrom_region_size = start + + bsg_job->request_payload.payload_len > ha->optrom_size ? + ha->optrom_size - start : + bsg_job->request_payload.payload_len; + ha->optrom_state = QLA_SWRITING; + } else { + ha->optrom_region_size = start + + bsg_job->reply_payload.payload_len > ha->optrom_size ? + ha->optrom_size - start : + bsg_job->reply_payload.payload_len; + ha->optrom_state = QLA_SREADING; + } + + ha->optrom_buffer = vzalloc(ha->optrom_region_size); + if (!ha->optrom_buffer) { + ql_log(ql_log_warn, vha, 0x7059, + "Read: Unable to allocate memory for optrom retrieval " + "(%x)\n", ha->optrom_region_size); + + ha->optrom_state = QLA_SWAITING; + return -ENOMEM; + } + + return 0; +} + +static int +qla2x00_read_optrom(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + + if (ha->flags.nic_core_reset_hdlr_active) + return -EBUSY; + + mutex_lock(&ha->optrom_mutex); + rval = qla2x00_optrom_setup(bsg_job, vha, 0); + if (rval) { + mutex_unlock(&ha->optrom_mutex); + return rval; + } + + ha->isp_ops->read_optrom(vha, ha->optrom_buffer, + ha->optrom_region_start, ha->optrom_region_size); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, + ha->optrom_region_size); + + bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; + bsg_reply->result = DID_OK; + vfree(ha->optrom_buffer); + ha->optrom_buffer = NULL; + ha->optrom_state = QLA_SWAITING; + mutex_unlock(&ha->optrom_mutex); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rval; +} + +static int +qla2x00_update_optrom(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + + mutex_lock(&ha->optrom_mutex); + rval = qla2x00_optrom_setup(bsg_job, vha, 1); + if (rval) { + mutex_unlock(&ha->optrom_mutex); + return rval; + } + + /* Set the isp82xx_no_md_cap not to capture minidump */ + ha->flags.isp82xx_no_md_cap = 1; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, ha->optrom_buffer, + ha->optrom_region_size); + + rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, + ha->optrom_region_start, ha->optrom_region_size); + + if (rval) { + bsg_reply->result = -EINVAL; + rval = -EINVAL; + } else { + bsg_reply->result = DID_OK; + } + vfree(ha->optrom_buffer); + ha->optrom_buffer = NULL; + ha->optrom_state = QLA_SWAITING; + mutex_unlock(&ha->optrom_mutex); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rval; +} + +static int +qla2x00_update_fru_versions(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_image_version_list *list = (void *)bsg; + struct qla_image_version *image; + uint32_t count; + dma_addr_t sfp_dma; + void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + + if (!sfp) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); + + image = list->version; + count = list->count; + while (count--) { + memcpy(sfp, &image->field_info, sizeof(image->field_info)); + rval = qla2x00_write_sfp(vha, sfp_dma, sfp, + image->field_address.device, image->field_address.offset, + sizeof(image->field_info), image->field_address.option); + if (rval) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + image++; + } + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla2x00_read_fru_status(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_status_reg *sr = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + + if (!sfp) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); + + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, + sr->field_address.device, sr->field_address.offset, + sizeof(sr->status_reg), sr->field_address.option); + sr->status_reg = *sfp; + + if (rval) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sizeof(*sr); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla2x00_write_fru_status(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_status_reg *sr = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + + if (!sfp) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); + + *sfp = sr->status_reg; + rval = qla2x00_write_sfp(vha, sfp_dma, sfp, + sr->field_address.device, sr->field_address.offset, + sizeof(sr->status_reg), sr->field_address.option); + + if (rval) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla2x00_write_i2c(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_i2c_access *i2c = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + + if (!sfp) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); + + memcpy(sfp, i2c->buffer, i2c->length); + rval = qla2x00_write_sfp(vha, sfp_dma, sfp, + i2c->device, i2c->offset, i2c->length, i2c->option); + + if (rval) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla2x00_read_i2c(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_i2c_access *i2c = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + + if (!sfp) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); + + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, + i2c->device, i2c->offset, i2c->length, i2c->option); + + if (rval) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + memcpy(i2c->buffer, sfp, i2c->length); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sizeof(*i2c); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + uint32_t rval = EXT_STATUS_OK; + uint16_t req_sg_cnt = 0; + uint16_t rsp_sg_cnt = 0; + uint16_t nextlid = 0; + uint32_t tot_dsds; + srb_t *sp = NULL; + uint32_t req_data_len; + uint32_t rsp_data_len; + + /* Check the type of the adapter */ + if (!IS_BIDI_CAPABLE(ha)) { + ql_log(ql_log_warn, vha, 0x70a0, + "This adapter is not supported\n"); + rval = EXT_STATUS_NOT_SUPPORTED; + goto done; + } + + if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || + test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + rval = EXT_STATUS_BUSY; + goto done; + } + + /* Check if host is online */ + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x70a1, + "Host is not online\n"); + rval = EXT_STATUS_DEVICE_OFFLINE; + goto done; + } + + /* Check if cable is plugged in or not */ + if (vha->device_flags & DFLG_NO_CABLE) { + ql_log(ql_log_warn, vha, 0x70a2, + "Cable is unplugged...\n"); + rval = EXT_STATUS_INVALID_CFG; + goto done; + } + + /* Check if the switch is connected or not */ + if (ha->current_topology != ISP_CFG_F) { + ql_log(ql_log_warn, vha, 0x70a3, + "Host is not connected to the switch\n"); + rval = EXT_STATUS_INVALID_CFG; + goto done; + } + + /* Check if operating mode is P2P */ + if (ha->operating_mode != P2P) { + ql_log(ql_log_warn, vha, 0x70a4, + "Host operating mode is not P2p\n"); + rval = EXT_STATUS_INVALID_CFG; + goto done; + } + + mutex_lock(&ha->selflogin_lock); + if (vha->self_login_loop_id == 0) { + /* Initialize all required fields of fcport */ + vha->bidir_fcport.vha = vha; + vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; + vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; + vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; + vha->bidir_fcport.loop_id = vha->loop_id; + + if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { + ql_log(ql_log_warn, vha, 0x70a7, + "Failed to login port %06X for bidirectional IOCB\n", + vha->bidir_fcport.d_id.b24); + mutex_unlock(&ha->selflogin_lock); + rval = EXT_STATUS_MAILBOX; + goto done; + } + vha->self_login_loop_id = nextlid - 1; + + } + /* Assign the self login loop id to fcport */ + mutex_unlock(&ha->selflogin_lock); + + vha->bidir_fcport.loop_id = vha->self_login_loop_id; + + req_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, + DMA_TO_DEVICE); + + if (!req_sg_cnt) { + rval = EXT_STATUS_NO_MEMORY; + goto done; + } + + rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, + DMA_FROM_DEVICE); + + if (!rsp_sg_cnt) { + rval = EXT_STATUS_NO_MEMORY; + goto done_unmap_req_sg; + } + + if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || + (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { + ql_dbg(ql_dbg_user, vha, 0x70a9, + "Dma mapping resulted in different sg counts " + "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " + "%x dma_reply_sg_cnt: %x]\n", + bsg_job->request_payload.sg_cnt, req_sg_cnt, + bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); + rval = EXT_STATUS_NO_MEMORY; + goto done_unmap_sg; + } + + req_data_len = bsg_job->request_payload.payload_len; + rsp_data_len = bsg_job->reply_payload.payload_len; + + if (req_data_len != rsp_data_len) { + rval = EXT_STATUS_BUSY; + ql_log(ql_log_warn, vha, 0x70aa, + "req_data_len != rsp_data_len\n"); + goto done_unmap_sg; + } + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); + if (!sp) { + ql_dbg(ql_dbg_user, vha, 0x70ac, + "Alloc SRB structure failed\n"); + rval = EXT_STATUS_NO_MEMORY; + goto done_unmap_sg; + } + + /*Populate srb->ctx with bidir ctx*/ + sp->u.bsg_job = bsg_job; + sp->free = qla2x00_bsg_sp_free; + sp->type = SRB_BIDI_CMD; + sp->done = qla2x00_bsg_job_done; + + /* Add the read and write sg count */ + tot_dsds = rsp_sg_cnt + req_sg_cnt; + + rval = qla2x00_start_bidir(sp, vha, tot_dsds); + if (rval != EXT_STATUS_OK) + goto done_free_srb; + /* the bsg request will be completed in the interrupt handler */ + return rval; + +done_free_srb: + mempool_free(sp, ha->srb_mempool); +done_unmap_sg: + dma_unmap_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); +done_unmap_req_sg: + dma_unmap_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); +done: + + /* Return an error vendor specific response + * and complete the bsg request + */ + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = 0; + bsg_reply->result = (DID_OK) << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + /* Always return success, vendor rsp carries correct status */ + return 0; +} + +static int +qlafx00_mgmt_cmd(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = (DID_ERROR << 16); + struct qla_mt_iocb_rqst_fx00 *piocb_rqst; + srb_t *sp; + int req_sg_cnt = 0, rsp_sg_cnt = 0; + struct fc_port *fcport; + char *type = "FC_BSG_HST_FX_MGMT"; + + /* Copy the IOCB specific information */ + piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) + &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + + /* Dump the vendor information */ + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, + piocb_rqst, sizeof(*piocb_rqst)); + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x70d0, + "Host is not online.\n"); + rval = -EIO; + goto done; + } + + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { + req_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + if (!req_sg_cnt) { + ql_log(ql_log_warn, vha, 0x70c7, + "dma_map_sg return %d for request\n", req_sg_cnt); + rval = -ENOMEM; + goto done; + } + } + + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { + rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + if (!rsp_sg_cnt) { + ql_log(ql_log_warn, vha, 0x70c8, + "dma_map_sg return %d for reply\n", rsp_sg_cnt); + rval = -ENOMEM; + goto done_unmap_req_sg; + } + } + + ql_dbg(ql_dbg_user, vha, 0x70c9, + "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " + "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, + req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); + + /* Allocate a dummy fcport structure, since functions preparing the + * IOCB and mailbox command retrieves port specific information + * from fcport structure. For Host based ELS commands there will be + * no fcport structure allocated + */ + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + ql_log(ql_log_warn, vha, 0x70ca, + "Failed to allocate fcport.\n"); + rval = -ENOMEM; + goto done_unmap_rsp_sg; + } + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_log(ql_log_warn, vha, 0x70cb, + "qla2x00_get_sp failed.\n"); + rval = -ENOMEM; + goto done_free_fcport; + } + + /* Initialize all required fields of fcport */ + fcport->vha = vha; + fcport->loop_id = le32_to_cpu(piocb_rqst->dataword); + + sp->type = SRB_FXIOCB_BCMD; + sp->name = "bsg_fx_mgmt"; + sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); + sp->u.bsg_job = bsg_job; + sp->free = qla2x00_bsg_sp_free; + sp->done = qla2x00_bsg_job_done; + + ql_dbg(ql_dbg_user, vha, 0x70cc, + "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", + type, piocb_rqst->func_type, fcport->loop_id); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x70cd, + "qla2x00_start_sp failed=%d.\n", rval); + mempool_free(sp, ha->srb_mempool); + rval = -EIO; + goto done_free_fcport; + } + return rval; + +done_free_fcport: + qla2x00_free_fcport(fcport); + +done_unmap_rsp_sg: + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); +done_unmap_req_sg: + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + +done: + return rval; +} + +static int +qla26xx_serdes_op(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + int rval = 0; + struct qla_serdes_reg sr; + + memset(&sr, 0, sizeof(sr)); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); + + switch (sr.cmd) { + case INT_SC_SERDES_WRITE_REG: + rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); + bsg_reply->reply_payload_rcv_len = 0; + break; + case INT_SC_SERDES_READ_REG: + rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); + bsg_reply->reply_payload_rcv_len = sizeof(sr); + break; + default: + ql_dbg(ql_dbg_user, vha, 0x708c, + "Unknown serdes cmd %x.\n", sr.cmd); + rval = -EINVAL; + break; + } + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : 0; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; +} + +static int +qla8044_serdes_op(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + int rval = 0; + struct qla_serdes_reg_ex sr; + + memset(&sr, 0, sizeof(sr)); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); + + switch (sr.cmd) { + case INT_SC_SERDES_WRITE_REG: + rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); + bsg_reply->reply_payload_rcv_len = 0; + break; + case INT_SC_SERDES_READ_REG: + rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); + bsg_reply->reply_payload_rcv_len = sizeof(sr); + break; + default: + ql_dbg(ql_dbg_user, vha, 0x7020, + "Unknown serdes cmd %x.\n", sr.cmd); + rval = -EINVAL; + break; + } + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : 0; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; +} + +static int +qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct qla_flash_update_caps cap; + + if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) + return -EPERM; + + memset(&cap, 0, sizeof(cap)); + cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | + (uint64_t)ha->fw_attributes_ext[0] << 32 | + (uint64_t)ha->fw_attributes_h << 16 | + (uint64_t)ha->fw_attributes; + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); + bsg_reply->reply_payload_rcv_len = sizeof(cap); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; +} + +static int +qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + uint64_t online_fw_attr = 0; + struct qla_flash_update_caps cap; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + memset(&cap, 0, sizeof(cap)); + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); + + online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | + (uint64_t)ha->fw_attributes_ext[0] << 32 | + (uint64_t)ha->fw_attributes_h << 16 | + (uint64_t)ha->fw_attributes; + + if (online_fw_attr != cap.capabilities) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_INVALID_PARAM; + return -EINVAL; + } + + if (cap.outage_duration < MAX_LOOP_TIMEOUT) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_INVALID_PARAM; + return -EINVAL; + } + + bsg_reply->reply_payload_rcv_len = 0; + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; +} + +static int +qla27xx_get_bbcr_data(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct qla_bbcr_data bbcr; + uint16_t loop_id, topo, sw_cap; + uint8_t domain, area, al_pa, state; + int rval; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + memset(&bbcr, 0, sizeof(bbcr)); + + if (vha->flags.bbcr_enable) + bbcr.status = QLA_BBCR_STATUS_ENABLED; + else + bbcr.status = QLA_BBCR_STATUS_DISABLED; + + if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { + rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, + &area, &domain, &topo, &sw_cap); + if (rval != QLA_SUCCESS) { + bbcr.status = QLA_BBCR_STATUS_UNKNOWN; + bbcr.state = QLA_BBCR_STATE_OFFLINE; + bbcr.mbx1 = loop_id; + goto done; + } + + state = (vha->bbcr >> 12) & 0x1; + + if (state) { + bbcr.state = QLA_BBCR_STATE_OFFLINE; + bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; + } else { + bbcr.state = QLA_BBCR_STATE_ONLINE; + bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; + } + + bbcr.configured_bbscn = vha->bbcr & 0xf; + } + +done: + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); + bsg_reply->reply_payload_rcv_len = sizeof(bbcr); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; +} + +static int +qla2x00_get_priv_stats(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct link_statistics *stats = NULL; + dma_addr_t stats_dma; + int rval; + uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; + uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return -ENODEV; + + if (unlikely(pci_channel_offline(ha->pdev))) + return -ENODEV; + + if (qla2x00_reset_active(vha)) + return -EBUSY; + + if (!IS_FWI2_CAPABLE(ha)) + return -EPERM; + + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, + GFP_KERNEL); + if (!stats) { + ql_log(ql_log_warn, vha, 0x70e2, + "Failed to allocate memory for stats.\n"); + return -ENOMEM; + } + + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); + + if (rval == QLA_SUCCESS) { + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, + stats, sizeof(*stats)); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); + } + + bsg_reply->reply_payload_rcv_len = sizeof(*stats); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + dma_free_coherent(&ha->pdev->dev, sizeof(*stats), + stats, stats_dma); + + return 0; +} + +static int +qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + int rval; + struct qla_dport_diag *dd; + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && + !IS_QLA28XX(vha->hw)) + return -EPERM; + + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) { + ql_log(ql_log_warn, vha, 0x70db, + "Failed to allocate memory for dport.\n"); + return -ENOMEM; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); + + rval = qla26xx_dport_diagnostics( + vha, dd->buf, sizeof(dd->buf), dd->options); + if (rval == QLA_SUCCESS) { + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); + } + + bsg_reply->reply_payload_rcv_len = sizeof(*dd); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + kfree(dd); + + return 0; +} + +static int +qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + int rval; + struct qla_dport_diag_v2 *dd; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + uint16_t options; + + if (!IS_DPORT_CAPABLE(vha->hw)) + return -EPERM; + + dd = kzalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); + + options = dd->options; + + /* Check dport Test in progress */ + if (options == QLA_GET_DPORT_RESULT_V2 && + vha->dport_status & DPORT_DIAG_IN_PROGRESS) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_DPORT_DIAG_IN_PROCESS; + goto dportcomplete; + } + + /* Check chip reset in progress and start/restart requests arrive */ + if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && + (options == QLA_START_DPORT_TEST_V2 || + options == QLA_RESTART_DPORT_TEST_V2)) { + vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS; + } + + /* Check chip reset in progress and get result request arrive */ + if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && + options == QLA_GET_DPORT_RESULT_V2) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_DPORT_DIAG_NOT_RUNNING; + goto dportcomplete; + } + + rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp); + + if (rval == QLA_SUCCESS) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; + if (options == QLA_START_DPORT_TEST_V2 || + options == QLA_RESTART_DPORT_TEST_V2) { + dd->mbx1 = mcp->mb[0]; + dd->mbx2 = mcp->mb[1]; + vha->dport_status |= DPORT_DIAG_IN_PROGRESS; + } else if (options == QLA_GET_DPORT_RESULT_V2) { + dd->mbx1 = le16_to_cpu(vha->dport_data[1]); + dd->mbx2 = le16_to_cpu(vha->dport_data[2]); + } + } else { + dd->mbx1 = mcp->mb[0]; + dd->mbx2 = mcp->mb[1]; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_DPORT_DIAG_ERR; + } + +dportcomplete: + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); + + bsg_reply->reply_payload_rcv_len = sizeof(*dd); + bsg_job->reply_len = sizeof(*bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + kfree(dd); + + return 0; +} + +static int +qla2x00_get_flash_image_status(struct bsg_job *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_hw_data *ha = vha->hw; + struct qla_active_regions regions = { }; + struct active_regions active_regions = { }; + + qla27xx_get_active_image(vha, &active_regions); + regions.global_image = active_regions.global; + + if (IS_QLA27XX(ha)) + regions.nvme_params = QLA27XX_PRIMARY_IMAGE; + + if (IS_QLA28XX(ha)) { + qla28xx_get_aux_images(vha, &active_regions); + regions.board_config = active_regions.aux.board_config; + regions.vpd_nvram = active_regions.aux.vpd_nvram; + regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; + regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; + regions.nvme_params = active_regions.aux.nvme_params; + } + + ql_dbg(ql_dbg_user, vha, 0x70e1, + "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n", + __func__, vha->host_no, regions.global_image, + regions.board_config, regions.vpd_nvram, + regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_reply->reply_payload_rcv_len = sizeof(regions); + bsg_reply->result = DID_OK << 16; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla2x00_manage_host_stats(struct bsg_job *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_mng_host_stats_param *req_data; + struct ql_vnd_mng_host_stats_resp rsp_data; + u32 req_data_len; + int ret = 0; + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); + return -EIO; + } + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, + req_data_len); + + switch (req_data->action) { + case QLA_STOP: + ret = qla2xxx_stop_stats(vha->host, req_data->stat_type); + break; + case QLA_START: + ret = qla2xxx_start_stats(vha->host, req_data->stat_type); + break; + case QLA_CLEAR: + ret = qla2xxx_reset_stats(vha->host, req_data->stat_type); + break; + default: + ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); + ret = -EIO; + break; + } + + kfree(req_data); + + /* Prepare response */ + rsp_data.status = ret; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &rsp_data, + sizeof(struct ql_vnd_mng_host_stats_resp)); + + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return ret; +} + +static int +qla2x00_get_host_stats(struct bsg_job *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_stats_param *req_data; + struct ql_vnd_host_stats_resp rsp_data; + u32 req_data_len; + int ret = 0; + u64 ini_entry_count = 0; + u64 entry_count = 0; + u64 tgt_num = 0; + u64 tmp_stat_type = 0; + u64 response_len = 0; + void *data; + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_stats_param)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, req_data_len); + + /* Copy stat type to work on it */ + tmp_stat_type = req_data->stat_type; + + if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) { + /* Num of tgts connected to this host */ + tgt_num = qla2x00_get_num_tgts(vha); + /* unset BIT_17 */ + tmp_stat_type &= ~(1 << 17); + } + + /* Total ini stats */ + ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); + + /* Total number of entries */ + entry_count = ini_entry_count + tgt_num; + + response_len = sizeof(struct ql_vnd_host_stats_resp) + + (sizeof(struct ql_vnd_stat_entry) * entry_count); + + if (response_len > bsg_job->reply_payload.payload_len) { + rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &rsp_data, + sizeof(struct ql_vnd_mng_host_stats_resp)); + + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + goto host_stat_out; + } + + data = kzalloc(response_len, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto host_stat_out; + } + + ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, + data, response_len); + + rsp_data.status = EXT_STATUS_OK; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + data, response_len); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + kfree(data); +host_stat_out: + kfree(req_data); + return ret; +} + +static struct fc_rport * +qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num) +{ + fc_port_t *fcport = NULL; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->rport->number == tgt_num) + return fcport->rport; + } + return NULL; +} + +static int +qla2x00_get_tgt_stats(struct bsg_job *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_tgt_stats_param *req_data; + u32 req_data_len; + int ret = 0; + u64 response_len = 0; + struct ql_vnd_tgt_stats_resp *data = NULL; + struct fc_rport *rport = NULL; + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); + return -EIO; + } + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_stat_entry)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, + req_data, req_data_len); + + response_len = sizeof(struct ql_vnd_tgt_stats_resp) + + sizeof(struct ql_vnd_stat_entry); + + /* structure + size for one entry */ + data = kzalloc(response_len, GFP_KERNEL); + if (!data) { + kfree(req_data); + return -ENOMEM; + } + + if (response_len > bsg_job->reply_payload.payload_len) { + data->status = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, data, + sizeof(struct ql_vnd_tgt_stats_resp)); + + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + goto tgt_stat_out; + } + + rport = qla2xxx_find_rport(vha, req_data->tgt_id); + if (!rport) { + ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id); + ret = EXT_STATUS_INVALID_PARAM; + data->status = EXT_STATUS_INVALID_PARAM; + goto reply; + } + + ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, + rport, (void *)data, response_len); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; +reply: + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, data, + response_len); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + +tgt_stat_out: + kfree(data); + kfree(req_data); + + return ret; +} + +static int +qla2x00_manage_host_port(struct bsg_job *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_mng_host_port_param *req_data; + struct ql_vnd_mng_host_port_resp rsp_data; + u32 req_data_len; + int ret = 0; + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, req_data_len); + + switch (req_data->action) { + case QLA_ENABLE: + ret = qla2xxx_enable_port(vha->host); + break; + case QLA_DISABLE: + ret = qla2xxx_disable_port(vha->host); + break; + default: + ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); + ret = -EIO; + break; + } + + kfree(req_data); + + /* Prepare response */ + rsp_data.status = ret; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp); + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &rsp_data, + sizeof(struct ql_vnd_mng_host_port_resp)); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return ret; +} + +static int +qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + + ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n", + __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]); + + switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { + case QL_VND_LOOPBACK: + return qla2x00_process_loopback(bsg_job); + + case QL_VND_A84_RESET: + return qla84xx_reset(bsg_job); + + case QL_VND_A84_UPDATE_FW: + return qla84xx_updatefw(bsg_job); + + case QL_VND_A84_MGMT_CMD: + return qla84xx_mgmt_cmd(bsg_job); + + case QL_VND_IIDMA: + return qla24xx_iidma(bsg_job); + + case QL_VND_FCP_PRIO_CFG_CMD: + return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); + + case QL_VND_READ_FLASH: + return qla2x00_read_optrom(bsg_job); + + case QL_VND_UPDATE_FLASH: + return qla2x00_update_optrom(bsg_job); + + case QL_VND_SET_FRU_VERSION: + return qla2x00_update_fru_versions(bsg_job); + + case QL_VND_READ_FRU_STATUS: + return qla2x00_read_fru_status(bsg_job); + + case QL_VND_WRITE_FRU_STATUS: + return qla2x00_write_fru_status(bsg_job); + + case QL_VND_WRITE_I2C: + return qla2x00_write_i2c(bsg_job); + + case QL_VND_READ_I2C: + return qla2x00_read_i2c(bsg_job); + + case QL_VND_DIAG_IO_CMD: + return qla24xx_process_bidir_cmd(bsg_job); + + case QL_VND_FX00_MGMT_CMD: + return qlafx00_mgmt_cmd(bsg_job); + + case QL_VND_SERDES_OP: + return qla26xx_serdes_op(bsg_job); + + case QL_VND_SERDES_OP_EX: + return qla8044_serdes_op(bsg_job); + + case QL_VND_GET_FLASH_UPDATE_CAPS: + return qla27xx_get_flash_upd_cap(bsg_job); + + case QL_VND_SET_FLASH_UPDATE_CAPS: + return qla27xx_set_flash_upd_cap(bsg_job); + + case QL_VND_GET_BBCR_DATA: + return qla27xx_get_bbcr_data(bsg_job); + + case QL_VND_GET_PRIV_STATS: + case QL_VND_GET_PRIV_STATS_EX: + return qla2x00_get_priv_stats(bsg_job); + + case QL_VND_DPORT_DIAGNOSTICS: + return qla2x00_do_dport_diagnostics(bsg_job); + + case QL_VND_DPORT_DIAGNOSTICS_V2: + return qla2x00_do_dport_diagnostics_v2(bsg_job); + + case QL_VND_EDIF_MGMT: + return qla_edif_app_mgmt(bsg_job); + + case QL_VND_SS_GET_FLASH_IMAGE_STATUS: + return qla2x00_get_flash_image_status(bsg_job); + + case QL_VND_MANAGE_HOST_STATS: + return qla2x00_manage_host_stats(bsg_job); + + case QL_VND_GET_HOST_STATS: + return qla2x00_get_host_stats(bsg_job); + + case QL_VND_GET_TGT_STATS: + return qla2x00_get_tgt_stats(bsg_job); + + case QL_VND_MANAGE_HOST_PORT: + return qla2x00_manage_host_port(bsg_job); + + case QL_VND_MBX_PASSTHRU: + return qla2x00_mailbox_passthru(bsg_job); + + default: + return -ENOSYS; + } +} + +int +qla24xx_bsg_request(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + int ret = -EINVAL; + struct fc_rport *rport; + struct Scsi_Host *host; + scsi_qla_host_t *vha; + + /* In case no data transferred. */ + bsg_reply->reply_payload_rcv_len = 0; + + if (bsg_request->msgcode == FC_BSG_RPT_ELS) { + rport = fc_bsg_to_rport(bsg_job); + if (!rport) + return ret; + host = rport_to_shost(rport); + vha = shost_priv(host); + } else { + host = fc_bsg_to_shost(bsg_job); + vha = shost_priv(host); + } + + /* Disable port will bring down the chip, allow enable command */ + if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT || + bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS) + goto skip_chip_chk; + + if (vha->hw->flags.port_isolated) { + bsg_reply->result = DID_ERROR; + /* operation not permitted */ + return -EPERM; + } + + if (qla2x00_chip_is_down(vha)) { + ql_dbg(ql_dbg_user, vha, 0x709f, + "BSG: ISP abort active/needed -- cmd=%d.\n", + bsg_request->msgcode); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + return -EBUSY; + } + + if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + return -EIO; + } + +skip_chip_chk: + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, + "Entered %s msgcode=0x%x. bsg ptr %px\n", + __func__, bsg_request->msgcode, bsg_job); + + switch (bsg_request->msgcode) { + case FC_BSG_RPT_ELS: + case FC_BSG_HST_ELS_NOLOGIN: + ret = qla2x00_process_els(bsg_job); + break; + case FC_BSG_HST_CT: + ret = qla2x00_process_ct(bsg_job); + break; + case FC_BSG_HST_VENDOR: + ret = qla2x00_process_vendor_specific(vha, bsg_job); + break; + case FC_BSG_HST_ADD_RPORT: + case FC_BSG_HST_DEL_RPORT: + case FC_BSG_RPT_CT: + default: + ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); + break; + } + + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, + "%s done with return %x\n", __func__, ret); + + return ret; +} + +int +qla24xx_bsg_timeout(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + int cnt, que; + unsigned long flags; + struct req_que *req; + + ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", + __func__, bsg_job); + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x9007, + "PCI/Register disconnect.\n"); + qla_pci_set_eeh_busy(vha); + } + + /* find the bsg job from the active list of commands */ + spin_lock_irqsave(&ha->hardware_lock, flags); + for (que = 0; que < ha->max_req_queues; que++) { + req = ha->req_q_map[que]; + if (!req) + continue; + + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (sp && + (sp->type == SRB_CT_CMD || + sp->type == SRB_ELS_CMD_HST || + sp->type == SRB_ELS_CMD_HST_NOLOGIN || + sp->type == SRB_FXIOCB_BCMD) && + sp->u.bsg_job == bsg_job) { + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { + ql_log(ql_log_warn, vha, 0x7089, + "mbx abort_command failed.\n"); + bsg_reply->result = -EIO; + } else { + ql_dbg(ql_dbg_user, vha, 0x708a, + "mbx abort_command success.\n"); + bsg_reply->result = 0; + } + spin_lock_irqsave(&ha->hardware_lock, flags); + goto done; + + } + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); + bsg_reply->result = -ENXIO; + return 0; + +done: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return 0; +} + +int qla2x00_mailbox_passthru(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + int ret = -EINVAL; + int ptsize = sizeof(struct qla_mbx_passthru); + struct qla_mbx_passthru *req_data = NULL; + uint32_t req_data_len; + + req_data_len = bsg_job->request_payload.payload_len; + if (req_data_len != ptsize) { + ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n"); + return -EIO; + } + req_data = kzalloc(ptsize, GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0xf0a4, + "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, ptsize); + ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out); + + /* Copy the req_data in request buffer */ + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, req_data, ptsize); + + bsg_reply->reply_payload_rcv_len = ptsize; + if (ret == QLA_SUCCESS) + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + else + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + kfree(req_data); + + return ret; +} diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h new file mode 100644 index 000000000..d38dab0a0 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#ifndef __QLA_BSG_H +#define __QLA_BSG_H + +/* BSG Vendor specific commands */ +#define QL_VND_LOOPBACK 0x01 +#define QL_VND_A84_RESET 0x02 +#define QL_VND_A84_UPDATE_FW 0x03 +#define QL_VND_A84_MGMT_CMD 0x04 +#define QL_VND_IIDMA 0x05 +#define QL_VND_FCP_PRIO_CFG_CMD 0x06 +#define QL_VND_READ_FLASH 0x07 +#define QL_VND_UPDATE_FLASH 0x08 +#define QL_VND_SET_FRU_VERSION 0x0B +#define QL_VND_READ_FRU_STATUS 0x0C +#define QL_VND_WRITE_FRU_STATUS 0x0D +#define QL_VND_DIAG_IO_CMD 0x0A +#define QL_VND_WRITE_I2C 0x10 +#define QL_VND_READ_I2C 0x11 +#define QL_VND_FX00_MGMT_CMD 0x12 +#define QL_VND_SERDES_OP 0x13 +#define QL_VND_SERDES_OP_EX 0x14 +#define QL_VND_GET_FLASH_UPDATE_CAPS 0x15 +#define QL_VND_SET_FLASH_UPDATE_CAPS 0x16 +#define QL_VND_GET_BBCR_DATA 0x17 +#define QL_VND_GET_PRIV_STATS 0x18 +#define QL_VND_DPORT_DIAGNOSTICS 0x19 +#define QL_VND_GET_PRIV_STATS_EX 0x1A +#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E +#define QL_VND_EDIF_MGMT 0X1F +#define QL_VND_MANAGE_HOST_STATS 0x23 +#define QL_VND_GET_HOST_STATS 0x24 +#define QL_VND_GET_TGT_STATS 0x25 +#define QL_VND_MANAGE_HOST_PORT 0x26 +#define QL_VND_MBX_PASSTHRU 0x2B +#define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C + +/* BSG Vendor specific subcode returns */ +#define EXT_STATUS_OK 0 +#define EXT_STATUS_ERR 1 +#define EXT_STATUS_BUSY 2 +#define EXT_STATUS_INVALID_PARAM 6 +#define EXT_STATUS_DATA_OVERRUN 7 +#define EXT_STATUS_DATA_UNDERRUN 8 +#define EXT_STATUS_MAILBOX 11 +#define EXT_STATUS_BUFFER_TOO_SMALL 16 +#define EXT_STATUS_NO_MEMORY 17 +#define EXT_STATUS_DEVICE_OFFLINE 22 + +/* + * To support bidirectional iocb + * BSG Vendor specific returns + */ +#define EXT_STATUS_NOT_SUPPORTED 27 +#define EXT_STATUS_INVALID_CFG 28 +#define EXT_STATUS_DMA_ERR 29 +#define EXT_STATUS_TIMEOUT 30 +#define EXT_STATUS_THREAD_FAILED 31 +#define EXT_STATUS_DATA_CMP_FAILED 32 +#define EXT_STATUS_DPORT_DIAG_ERR 40 +#define EXT_STATUS_DPORT_DIAG_IN_PROCESS 41 +#define EXT_STATUS_DPORT_DIAG_NOT_RUNNING 42 + +/* BSG definations for interpreting CommandSent field */ +#define INT_DEF_LB_LOOPBACK_CMD 0 +#define INT_DEF_LB_ECHO_CMD 1 + +/* Loopback related definations */ +#define INTERNAL_LOOPBACK 0xF1 +#define EXTERNAL_LOOPBACK 0xF2 +#define ENABLE_INTERNAL_LOOPBACK 0x02 +#define ENABLE_EXTERNAL_LOOPBACK 0x04 +#define INTERNAL_LOOPBACK_MASK 0x000E +#define MAX_ELS_FRAME_PAYLOAD 252 +#define ELS_OPCODE_BYTE 0x10 + +/* BSG Vendor specific definations */ +#define A84_ISSUE_WRITE_TYPE_CMD 0 +#define A84_ISSUE_READ_TYPE_CMD 1 +#define A84_CLEANUP_CMD 2 +#define A84_ISSUE_RESET_OP_FW 3 +#define A84_ISSUE_RESET_DIAG_FW 4 +#define A84_ISSUE_UPDATE_OPFW_CMD 5 +#define A84_ISSUE_UPDATE_DIAGFW_CMD 6 + +struct qla84_mgmt_param { + union { + struct { + uint32_t start_addr; + } mem; /* for QLA84_MGMT_READ/WRITE_MEM */ + struct { + uint32_t id; +#define QLA84_MGMT_CONFIG_ID_UIF 1 +#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2 +#define QLA84_MGMT_CONFIG_ID_PAUSE 3 +#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4 + + uint32_t param0; + uint32_t param1; + } config; /* for QLA84_MGMT_CHNG_CONFIG */ + + struct { + uint32_t type; +#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */ +#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */ +#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */ +#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */ +#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */ +#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */ +#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */ + + uint32_t context; +/* +* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA +*/ +#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0 +#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1 +#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2 +#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3 +#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4 +#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5 +#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6 +#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7 +#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8 +#define IC_LOG_DATA_LOG_ID_DCX_LOG 9 + +/* +* context definitions for QLA84_MGMT_INFO_PORT_STAT +*/ +#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0 +#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1 +#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2 +#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3 +#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4 +#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5 + + +/* +* context definitions for QLA84_MGMT_INFO_LIF_STAT +*/ +#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0 +#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1 +#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2 +#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3 +#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6 + + } info; /* for QLA84_MGMT_GET_INFO */ + } u; +}; + +struct qla84_msg_mgmt { + uint16_t cmd; +#define QLA84_MGMT_READ_MEM 0x00 +#define QLA84_MGMT_WRITE_MEM 0x01 +#define QLA84_MGMT_CHNG_CONFIG 0x02 +#define QLA84_MGMT_GET_INFO 0x03 + uint16_t rsrvd; + struct qla84_mgmt_param mgmtp;/* parameters for cmd */ + uint32_t len; /* bytes in payload following this struct */ + uint8_t payload[]; /* payload for cmd */ +}; + +struct qla_bsg_a84_mgmt { + struct qla84_msg_mgmt mgmt; +} __attribute__ ((packed)); + +struct qla_scsi_addr { + uint16_t bus; + uint16_t target; +} __attribute__ ((packed)); + +struct qla_ext_dest_addr { + union { + uint8_t wwnn[8]; + uint8_t wwpn[8]; + uint8_t id[4]; + struct qla_scsi_addr scsi_addr; + } dest_addr; + uint16_t dest_type; +#define EXT_DEF_TYPE_WWPN 2 + uint16_t lun; + uint16_t padding[2]; +} __attribute__ ((packed)); + +struct qla_port_param { + struct qla_ext_dest_addr fc_scsi_addr; + uint16_t mode; + uint16_t speed; +} __attribute__ ((packed)); + +struct qla_mbx_passthru { + uint16_t reserved1[2]; + uint16_t mbx_in[32]; + uint16_t mbx_out[32]; + uint32_t reserved2[16]; +} __packed; + +/* FRU VPD */ + +#define MAX_FRU_SIZE 36 + +struct qla_field_address { + uint16_t offset; + uint16_t device; + uint16_t option; +} __packed; + +struct qla_field_info { + uint8_t version[MAX_FRU_SIZE]; +} __packed; + +struct qla_image_version { + struct qla_field_address field_address; + struct qla_field_info field_info; +} __packed; + +struct qla_image_version_list { + uint32_t count; + struct qla_image_version version[]; +} __packed; + +struct qla_status_reg { + struct qla_field_address field_address; + uint8_t status_reg; + uint8_t reserved[7]; +} __packed; + +struct qla_i2c_access { + uint16_t device; + uint16_t offset; + uint16_t option; + uint16_t length; + uint8_t buffer[0x40]; +} __packed; + +/* 26xx serdes register interface */ + +/* serdes reg commands */ +#define INT_SC_SERDES_READ_REG 1 +#define INT_SC_SERDES_WRITE_REG 2 + +struct qla_serdes_reg { + uint16_t cmd; + uint16_t addr; + uint16_t val; +} __packed; + +struct qla_serdes_reg_ex { + uint16_t cmd; + uint32_t addr; + uint32_t val; +} __packed; + +struct qla_flash_update_caps { + uint64_t capabilities; + uint32_t outage_duration; + uint8_t reserved[20]; +} __packed; + +/* BB_CR Status */ +#define QLA_BBCR_STATUS_DISABLED 0 +#define QLA_BBCR_STATUS_ENABLED 1 +#define QLA_BBCR_STATUS_UNKNOWN 2 + +/* BB_CR State */ +#define QLA_BBCR_STATE_OFFLINE 0 +#define QLA_BBCR_STATE_ONLINE 1 + +/* BB_CR Offline Reason Code */ +#define QLA_BBCR_REASON_PORT_SPEED 1 +#define QLA_BBCR_REASON_PEER_PORT 2 +#define QLA_BBCR_REASON_SWITCH 3 +#define QLA_BBCR_REASON_LOGIN_REJECT 4 + +struct qla_bbcr_data { + uint8_t status; /* 1 - enabled, 0 - Disabled */ + uint8_t state; /* 1 - online, 0 - offline */ + uint8_t configured_bbscn; /* 0-15 */ + uint8_t negotiated_bbscn; /* 0-15 */ + uint8_t offline_reason_code; + uint16_t mbx1; /* Port state */ + uint8_t reserved[9]; +} __packed; + +struct qla_dport_diag { + uint16_t options; + uint32_t buf[16]; + uint8_t unused[62]; +} __packed; + +#define QLA_GET_DPORT_RESULT_V2 0 /* Get Result */ +#define QLA_RESTART_DPORT_TEST_V2 1 /* Restart test */ +#define QLA_START_DPORT_TEST_V2 2 /* Start test */ +struct qla_dport_diag_v2 { + uint16_t options; + uint16_t mbx1; + uint16_t mbx2; + uint8_t unused[58]; + uint8_t buf[1024]; /* Test Result */ +} __packed; + +/* D_Port options */ +#define QLA_DPORT_RESULT 0x0 +#define QLA_DPORT_START 0x2 + +/* active images in flash */ +struct qla_active_regions { + uint8_t global_image; + uint8_t board_config; + uint8_t vpd_nvram; + uint8_t npiv_config_0_1; + uint8_t npiv_config_2_3; + uint8_t nvme_params; + uint8_t reserved[31]; +} __packed; + +#include "qla_edif_bsg.h" + +#endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c new file mode 100644 index 000000000..691ef827a --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -0,0 +1,2800 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ + +/* + * Table for showing the current message id in use for particular level + * Change this table for addition of log/debug messages. + * ---------------------------------------------------------------------- + * | Level | Last Value Used | Holes | + * ---------------------------------------------------------------------- + * | Module Init and Probe | 0x0199 | | + * | Mailbox commands | 0x1206 | 0x11a5-0x11ff | + * | Device Discovery | 0x2134 | 0x2112-0x2115 | + * | | | 0x2127-0x2128 | + * | Queue Command and IO tracing | 0x3074 | 0x300b | + * | | | 0x3027-0x3028 | + * | | | 0x303d-0x3041 | + * | | | 0x302e,0x3033 | + * | | | 0x3036,0x3038 | + * | | | 0x303a | + * | DPC Thread | 0x4023 | 0x4002,0x4013 | + * | Async Events | 0x509c | | + * | Timer Routines | 0x6012 | | + * | User Space Interactions | 0x70e3 | 0x7018,0x702e | + * | | | 0x7020,0x7024 | + * | | | 0x7039,0x7045 | + * | | | 0x7073-0x7075 | + * | | | 0x70a5-0x70a6 | + * | | | 0x70a8,0x70ab | + * | | | 0x70ad-0x70ae | + * | | | 0x70d0-0x70d6 | + * | | | 0x70d7-0x70db | + * | Task Management | 0x8042 | 0x8000 | + * | | | 0x8019 | + * | | | 0x8025,0x8026 | + * | | | 0x8031,0x8032 | + * | | | 0x8039,0x803c | + * | AER/EEH | 0x9011 | | + * | Virtual Port | 0xa007 | | + * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | + * | | | 0xb09e,0xb0ae | + * | | | 0xb0c3,0xb0c6 | + * | | | 0xb0e0-0xb0ef | + * | | | 0xb085,0xb0dc | + * | | | 0xb107,0xb108 | + * | | | 0xb111,0xb11e | + * | | | 0xb12c,0xb12d | + * | | | 0xb13a,0xb142 | + * | | | 0xb13c-0xb140 | + * | | | 0xb149 | + * | MultiQ | 0xc010 | | + * | Misc | 0xd303 | 0xd031-0xd0ff | + * | | | 0xd101-0xd1fe | + * | | | 0xd214-0xd2fe | + * | Target Mode | 0xe081 | | + * | Target Mode Management | 0xf09b | 0xf002 | + * | | | 0xf046-0xf049 | + * | Target Mode Task Management | 0x1000d | | + * ---------------------------------------------------------------------- + */ + +#include "qla_def.h" + +#include +#define CREATE_TRACE_POINTS +#include + +static uint32_t ql_dbg_offset = 0x800; + +static inline void +qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) +{ + fw_dump->fw_major_version = htonl(ha->fw_major_version); + fw_dump->fw_minor_version = htonl(ha->fw_minor_version); + fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); + fw_dump->fw_attributes = htonl(ha->fw_attributes); + + fw_dump->vendor = htonl(ha->pdev->vendor); + fw_dump->device = htonl(ha->pdev->device); + fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); + fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); +} + +static inline void * +qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) +{ + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + /* Request queue. */ + memcpy(ptr, req->ring, req->length * + sizeof(request_t)); + + /* Response queue. */ + ptr += req->length * sizeof(request_t); + memcpy(ptr, rsp->ring, rsp->length * + sizeof(response_t)); + + return ptr + (rsp->length * sizeof(response_t)); +} + +int +qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, + uint32_t ram_dwords, void **nxt) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + dma_addr_t dump_dma = ha->gid_list_dma; + uint32_t *chunk = (uint32_t *)ha->gid_list; + uint32_t dwords = qla2x00_gid_list_size(ha) / 4; + uint32_t stat; + ulong i, j, timer = 6000000; + int rval = QLA_FUNCTION_FAILED; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + if (qla_pci_disconnected(vha, reg)) + return rval; + + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { + if (i + dwords > ram_dwords) + dwords = ram_dwords - i; + + wrt_reg_word(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); + wrt_reg_word(®->mailbox1, LSW(addr)); + wrt_reg_word(®->mailbox8, MSW(addr)); + + wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); + wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); + wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); + wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); + + wrt_reg_word(®->mailbox4, MSW(dwords)); + wrt_reg_word(®->mailbox5, LSW(dwords)); + + wrt_reg_word(®->mailbox9, 0); + wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + + ha->flags.mbox_int = 0; + while (timer--) { + udelay(5); + + if (qla_pci_disconnected(vha, reg)) + return rval; + + stat = rd_reg_dword(®->host_status); + /* Check for pending interrupts. */ + if (!(stat & HSRX_RISC_INT)) + continue; + + stat &= 0xff; + if (stat != 0x1 && stat != 0x2 && + stat != 0x10 && stat != 0x11) { + + /* Clear this intr; it wasn't a mailbox intr */ + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + rd_reg_dword(®->hccr); + continue; + } + + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + rval = rd_reg_word(®->mailbox0) & MBS_MASK; + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + rd_reg_dword(®->hccr); + break; + } + ha->flags.mbox_int = 1; + *nxt = ram + i; + + if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + /* no interrupt, timed out*/ + return rval; + } + if (rval) { + /* error completion status */ + return rval; + } + for (j = 0; j < dwords; j++) { + ram[i + j] = + (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? + chunk[j] : swab32(chunk[j]); + } + } + + *nxt = ram + i; + return QLA_SUCCESS; +} + +int +qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, + uint32_t ram_dwords, void **nxt) +{ + int rval = QLA_FUNCTION_FAILED; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + dma_addr_t dump_dma = ha->gid_list_dma; + uint32_t *chunk = (uint32_t *)ha->gid_list; + uint32_t dwords = qla2x00_gid_list_size(ha) / 4; + uint32_t stat; + ulong i, j, timer = 6000000; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + if (qla_pci_disconnected(vha, reg)) + return rval; + + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { + if (i + dwords > ram_dwords) + dwords = ram_dwords - i; + + wrt_reg_word(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); + wrt_reg_word(®->mailbox1, LSW(addr)); + wrt_reg_word(®->mailbox8, MSW(addr)); + wrt_reg_word(®->mailbox10, 0); + + wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); + wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); + wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); + wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); + + wrt_reg_word(®->mailbox4, MSW(dwords)); + wrt_reg_word(®->mailbox5, LSW(dwords)); + wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + + ha->flags.mbox_int = 0; + while (timer--) { + udelay(5); + if (qla_pci_disconnected(vha, reg)) + return rval; + + stat = rd_reg_dword(®->host_status); + /* Check for pending interrupts. */ + if (!(stat & HSRX_RISC_INT)) + continue; + + stat &= 0xff; + if (stat != 0x1 && stat != 0x2 && + stat != 0x10 && stat != 0x11) { + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + rd_reg_dword(®->hccr); + continue; + } + + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + rval = rd_reg_word(®->mailbox0) & MBS_MASK; + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + rd_reg_dword(®->hccr); + break; + } + ha->flags.mbox_int = 1; + *nxt = ram + i; + + if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + /* no interrupt, timed out*/ + return rval; + } + if (rval) { + /* error completion status */ + return rval; + } + for (j = 0; j < dwords; j++) { + ram[i + j] = (__force __be32) + ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? + chunk[j] : swab32(chunk[j])); + } + } + + *nxt = ram + i; + return QLA_SUCCESS; +} + +static int +qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram, + uint32_t cram_size, void **nxt) +{ + int rval; + + /* Code RAM. */ + rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); + if (rval != QLA_SUCCESS) + return rval; + + set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); + + /* External Memory. */ + rval = qla24xx_dump_ram(ha, 0x100000, *nxt, + ha->fw_memory_size - 0x100000 + 1, nxt); + if (rval == QLA_SUCCESS) + set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); + + return rval; +} + +static __be32 * +qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, + uint32_t count, __be32 *buf) +{ + __le32 __iomem *dmp_reg; + + wrt_reg_dword(®->iobase_addr, iobase); + dmp_reg = ®->iobase_window; + for ( ; count--; dmp_reg++) + *buf++ = htonl(rd_reg_dword(dmp_reg)); + + return buf; +} + +void +qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) +{ + wrt_reg_dword(®->hccr, HCCRX_SET_RISC_PAUSE); + + /* 100 usec delay is sufficient enough for hardware to pause RISC */ + udelay(100); + if (rd_reg_dword(®->host_status) & HSRX_RISC_PAUSED) + set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); +} + +int +qla24xx_soft_reset(struct qla_hw_data *ha) +{ + int rval = QLA_SUCCESS; + uint32_t cnt; + uint16_t wd; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + /* + * Reset RISC. The delay is dependent on system architecture. + * Driver can proceed with the reset sequence after waiting + * for a timeout period. + */ + wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) + break; + + udelay(10); + } + if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) + set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); + + wrt_reg_dword(®->ctrl_status, + CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); + + udelay(100); + + /* Wait for soft-reset to complete. */ + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_dword(®->ctrl_status) & + CSRX_ISP_SOFT_RESET) == 0) + break; + + udelay(10); + } + if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); + + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); + rd_reg_dword(®->hccr); /* PCI Posting. */ + + for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(10); + else + rval = QLA_FUNCTION_TIMEOUT; + } + if (rval == QLA_SUCCESS) + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); + + return rval; +} + +static int +qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, + uint32_t ram_words, void **nxt) +{ + int rval; + uint32_t cnt, stat, timer, words, idx; + uint16_t mb0; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + dma_addr_t dump_dma = ha->gid_list_dma; + __le16 *dump = (__force __le16 *)ha->gid_list; + + rval = QLA_SUCCESS; + mb0 = 0; + + WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + words = qla2x00_gid_list_size(ha) / 2; + for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; + cnt += words, addr += words) { + if (cnt + words > ram_words) + words = ram_words - cnt; + + WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); + WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); + + WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); + WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); + WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); + WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); + + WRT_MAILBOX_REG(ha, reg, 4, words); + wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); + + for (timer = 6000000; timer; timer--) { + /* Check for pending interrupts. */ + stat = rd_reg_dword(®->u.isp2300.host_status); + if (stat & HSR_RISC_INT) { + stat &= 0xff; + + if (stat == 0x1 || stat == 0x2) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); + + mb0 = RD_MAILBOX_REG(ha, reg, 0); + + /* Release mailbox registers. */ + wrt_reg_word(®->semaphore, 0); + wrt_reg_word(®->hccr, + HCCR_CLR_RISC_INT); + rd_reg_word(®->hccr); + break; + } else if (stat == 0x10 || stat == 0x11) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); + + mb0 = RD_MAILBOX_REG(ha, reg, 0); + + wrt_reg_word(®->hccr, + HCCR_CLR_RISC_INT); + rd_reg_word(®->hccr); + break; + } + + /* clear this intr; it wasn't a mailbox intr */ + wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); + rd_reg_word(®->hccr); + } + udelay(5); + } + + if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + rval = mb0 & MBS_MASK; + for (idx = 0; idx < words; idx++) + ram[cnt + idx] = + cpu_to_be16(le16_to_cpu(dump[idx])); + } else { + rval = QLA_FUNCTION_FAILED; + } + } + + *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; + return rval; +} + +static inline void +qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, + __be16 *buf) +{ + __le16 __iomem *dmp_reg = ®->u.isp2300.fb_cmd; + + for ( ; count--; dmp_reg++) + *buf++ = htons(rd_reg_word(dmp_reg)); +} + +static inline void * +qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) +{ + if (!ha->eft) + return ptr; + + memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); + return ptr + ntohl(ha->fw_dump->eft_size); +} + +static inline void * +qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +{ + uint32_t cnt; + __be32 *iter_reg; + struct qla2xxx_fce_chain *fcec = ptr; + + if (!ha->fce) + return ptr; + + *last_chain = &fcec->type; + fcec->type = htonl(DUMP_CHAIN_FCE); + fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + + fce_calc_size(ha->fce_bufs)); + fcec->size = htonl(fce_calc_size(ha->fce_bufs)); + fcec->addr_l = htonl(LSD(ha->fce_dma)); + fcec->addr_h = htonl(MSD(ha->fce_dma)); + + iter_reg = fcec->eregs; + for (cnt = 0; cnt < 8; cnt++) + *iter_reg++ = htonl(ha->fce_mb[cnt]); + + memcpy(iter_reg, ha->fce, ntohl(fcec->size)); + + return (char *)iter_reg + ntohl(fcec->size); +} + +static inline void * +qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +{ + struct qla2xxx_offld_chain *c = ptr; + + if (!ha->exlogin_buf) + return ptr; + + *last_chain = &c->type; + + c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN); + c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + + ha->exlogin_size); + c->size = cpu_to_be32(ha->exlogin_size); + c->addr = cpu_to_be64(ha->exlogin_buf_dma); + + ptr += sizeof(struct qla2xxx_offld_chain); + memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); + + return (char *)ptr + be32_to_cpu(c->size); +} + +static inline void * +qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +{ + struct qla2xxx_offld_chain *c = ptr; + + if (!ha->exchoffld_buf) + return ptr; + + *last_chain = &c->type; + + c->type = cpu_to_be32(DUMP_CHAIN_EXCHG); + c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + + ha->exchoffld_size); + c->size = cpu_to_be32(ha->exchoffld_size); + c->addr = cpu_to_be64(ha->exchoffld_buf_dma); + + ptr += sizeof(struct qla2xxx_offld_chain); + memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); + + return (char *)ptr + be32_to_cpu(c->size); +} + +static inline void * +qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, + __be32 **last_chain) +{ + struct qla2xxx_mqueue_chain *q; + struct qla2xxx_mqueue_header *qh; + uint32_t num_queues; + int que; + struct { + int length; + void *ring; + } aq, *aqp; + + if (!ha->tgt.atio_ring) + return ptr; + + num_queues = 1; + aqp = &aq; + aqp->length = ha->tgt.atio_q_length; + aqp->ring = ha->tgt.atio_ring; + + for (que = 0; que < num_queues; que++) { + /* aqp = ha->atio_q_map[que]; */ + q = ptr; + *last_chain = &q->type; + q->type = htonl(DUMP_CHAIN_QUEUE); + q->chain_size = htonl( + sizeof(struct qla2xxx_mqueue_chain) + + sizeof(struct qla2xxx_mqueue_header) + + (aqp->length * sizeof(request_t))); + ptr += sizeof(struct qla2xxx_mqueue_chain); + + /* Add header. */ + qh = ptr; + qh->queue = htonl(TYPE_ATIO_QUEUE); + qh->number = htonl(que); + qh->size = htonl(aqp->length * sizeof(request_t)); + ptr += sizeof(struct qla2xxx_mqueue_header); + + /* Add data. */ + memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); + + ptr += aqp->length * sizeof(request_t); + } + + return ptr; +} + +static inline void * +qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +{ + struct qla2xxx_mqueue_chain *q; + struct qla2xxx_mqueue_header *qh; + struct req_que *req; + struct rsp_que *rsp; + int que; + + if (!ha->mqenable) + return ptr; + + /* Request queues */ + for (que = 1; que < ha->max_req_queues; que++) { + req = ha->req_q_map[que]; + if (!req) + break; + + /* Add chain. */ + q = ptr; + *last_chain = &q->type; + q->type = htonl(DUMP_CHAIN_QUEUE); + q->chain_size = htonl( + sizeof(struct qla2xxx_mqueue_chain) + + sizeof(struct qla2xxx_mqueue_header) + + (req->length * sizeof(request_t))); + ptr += sizeof(struct qla2xxx_mqueue_chain); + + /* Add header. */ + qh = ptr; + qh->queue = htonl(TYPE_REQUEST_QUEUE); + qh->number = htonl(que); + qh->size = htonl(req->length * sizeof(request_t)); + ptr += sizeof(struct qla2xxx_mqueue_header); + + /* Add data. */ + memcpy(ptr, req->ring, req->length * sizeof(request_t)); + ptr += req->length * sizeof(request_t); + } + + /* Response queues */ + for (que = 1; que < ha->max_rsp_queues; que++) { + rsp = ha->rsp_q_map[que]; + if (!rsp) + break; + + /* Add chain. */ + q = ptr; + *last_chain = &q->type; + q->type = htonl(DUMP_CHAIN_QUEUE); + q->chain_size = htonl( + sizeof(struct qla2xxx_mqueue_chain) + + sizeof(struct qla2xxx_mqueue_header) + + (rsp->length * sizeof(response_t))); + ptr += sizeof(struct qla2xxx_mqueue_chain); + + /* Add header. */ + qh = ptr; + qh->queue = htonl(TYPE_RESPONSE_QUEUE); + qh->number = htonl(que); + qh->size = htonl(rsp->length * sizeof(response_t)); + ptr += sizeof(struct qla2xxx_mqueue_header); + + /* Add data. */ + memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); + ptr += rsp->length * sizeof(response_t); + } + + return ptr; +} + +static inline void * +qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +{ + uint32_t cnt, que_idx; + uint8_t que_cnt; + struct qla2xxx_mq_chain *mq = ptr; + device_reg_t *reg; + + if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) + return ptr; + + mq = ptr; + *last_chain = &mq->type; + mq->type = htonl(DUMP_CHAIN_MQ); + mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain)); + + que_cnt = ha->max_req_queues > ha->max_rsp_queues ? + ha->max_req_queues : ha->max_rsp_queues; + mq->count = htonl(que_cnt); + for (cnt = 0; cnt < que_cnt; cnt++) { + reg = ISP_QUE_REG(ha, cnt); + que_idx = cnt * 4; + mq->qregs[que_idx] = + htonl(rd_reg_dword(®->isp25mq.req_q_in)); + mq->qregs[que_idx+1] = + htonl(rd_reg_dword(®->isp25mq.req_q_out)); + mq->qregs[que_idx+2] = + htonl(rd_reg_dword(®->isp25mq.rsp_q_in)); + mq->qregs[que_idx+3] = + htonl(rd_reg_dword(®->isp25mq.rsp_q_out)); + } + + return ptr + sizeof(struct qla2xxx_mq_chain); +} + +void +qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) +{ + struct qla_hw_data *ha = vha->hw; + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xd000, + "Failed to dump firmware (%x), dump status flags (0x%lx).\n", + rval, ha->fw_dump_cap_flags); + ha->fw_dumped = false; + } else { + ql_log(ql_log_info, vha, 0xd001, + "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", + vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); + ha->fw_dumped = true; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + } +} + +void qla2xxx_dump_fw(scsi_qla_host_t *vha) +{ + unsigned long flags; + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + vha->hw->isp_ops->fw_dump(vha); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} + +/** + * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. + * @vha: HA context + */ +void +qla2300_fw_dump(scsi_qla_host_t *vha) +{ + int rval; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + __le16 __iomem *dmp_reg; + struct qla2300_fw_dump *fw; + void *nxt; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd002, + "No buffer available for dump.\n"); + return; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd003, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); + return; + } + fw = &ha->fw_dump->isp.isp23; + qla2xxx_prep_dump(ha, ha->fw_dump); + + rval = QLA_SUCCESS; + fw->hccr = htons(rd_reg_word(®->hccr)); + + /* Pause RISC. */ + wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + if (IS_QLA2300(ha)) { + for (cnt = 30000; + (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); + else + rval = QLA_FUNCTION_TIMEOUT; + } + } else { + rd_reg_word(®->hccr); /* PCI Posting. */ + udelay(10); + } + + if (rval == QLA_SUCCESS) { + dmp_reg = ®->flash_address; + for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + dmp_reg = ®->u.isp2300.req_q_in; + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg); + cnt++, dmp_reg++) + fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + dmp_reg = ®->u.isp2300.mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); + cnt++, dmp_reg++) + fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + wrt_reg_word(®->ctrl_status, 0x40); + qla2xxx_read_window(reg, 32, fw->resp_dma_reg); + + wrt_reg_word(®->ctrl_status, 0x50); + qla2xxx_read_window(reg, 48, fw->dma_reg); + + wrt_reg_word(®->ctrl_status, 0x00); + dmp_reg = ®->risc_hw; + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); + cnt++, dmp_reg++) + fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + wrt_reg_word(®->pcr, 0x2000); + qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); + + wrt_reg_word(®->pcr, 0x2200); + qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); + + wrt_reg_word(®->pcr, 0x2400); + qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); + + wrt_reg_word(®->pcr, 0x2600); + qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); + + wrt_reg_word(®->pcr, 0x2800); + qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); + + wrt_reg_word(®->pcr, 0x2A00); + qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); + + wrt_reg_word(®->pcr, 0x2C00); + qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); + + wrt_reg_word(®->pcr, 0x2E00); + qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); + + wrt_reg_word(®->ctrl_status, 0x10); + qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); + + wrt_reg_word(®->ctrl_status, 0x20); + qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); + + wrt_reg_word(®->ctrl_status, 0x30); + qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); + + /* Reset RISC. */ + wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_word(®->ctrl_status) & + CSR_ISP_SOFT_RESET) == 0) + break; + + udelay(10); + } + } + + if (!IS_QLA2300(ha)) { + for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); + else + rval = QLA_FUNCTION_TIMEOUT; + } + } + + /* Get RISC SRAM. */ + if (rval == QLA_SUCCESS) + rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, + ARRAY_SIZE(fw->risc_ram), &nxt); + + /* Get stack SRAM. */ + if (rval == QLA_SUCCESS) + rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, + ARRAY_SIZE(fw->stack_ram), &nxt); + + /* Get data SRAM. */ + if (rval == QLA_SUCCESS) + rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, + ha->fw_memory_size - 0x11000 + 1, &nxt); + + if (rval == QLA_SUCCESS) + qla2xxx_copy_queues(ha, nxt); + + qla2xxx_dump_post_process(base_vha, rval); +} + +/** + * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. + * @vha: HA context + */ +void +qla2100_fw_dump(scsi_qla_host_t *vha) +{ + int rval; + uint32_t cnt, timer; + uint16_t risc_address = 0; + uint16_t mb0 = 0, mb2 = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + __le16 __iomem *dmp_reg; + struct qla2100_fw_dump *fw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd004, + "No buffer available for dump.\n"); + return; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd005, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); + return; + } + fw = &ha->fw_dump->isp.isp21; + qla2xxx_prep_dump(ha, ha->fw_dump); + + rval = QLA_SUCCESS; + fw->hccr = htons(rd_reg_word(®->hccr)); + + /* Pause RISC. */ + wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + for (cnt = 30000; (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); + else + rval = QLA_FUNCTION_TIMEOUT; + } + if (rval == QLA_SUCCESS) { + dmp_reg = ®->flash_address; + for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + dmp_reg = ®->u.isp2100.mailbox0; + for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { + if (cnt == 8) + dmp_reg = ®->u_end.isp2200.mailbox8; + + fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); + } + + dmp_reg = ®->u.isp2100.unused_2[0]; + for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++) + fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + wrt_reg_word(®->ctrl_status, 0x00); + dmp_reg = ®->risc_hw; + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) + fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + wrt_reg_word(®->pcr, 0x2000); + qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); + + wrt_reg_word(®->pcr, 0x2100); + qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); + + wrt_reg_word(®->pcr, 0x2200); + qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); + + wrt_reg_word(®->pcr, 0x2300); + qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); + + wrt_reg_word(®->pcr, 0x2400); + qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); + + wrt_reg_word(®->pcr, 0x2500); + qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); + + wrt_reg_word(®->pcr, 0x2600); + qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); + + wrt_reg_word(®->pcr, 0x2700); + qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); + + wrt_reg_word(®->ctrl_status, 0x10); + qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); + + wrt_reg_word(®->ctrl_status, 0x20); + qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); + + wrt_reg_word(®->ctrl_status, 0x30); + qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); + + /* Reset the ISP. */ + wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + } + + for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); + else + rval = QLA_FUNCTION_TIMEOUT; + } + + /* Pause RISC. */ + if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && + (rd_reg_word(®->mctr) & (BIT_1 | BIT_0)) != 0))) { + + wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + for (cnt = 30000; + (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); + else + rval = QLA_FUNCTION_TIMEOUT; + } + if (rval == QLA_SUCCESS) { + /* Set memory configuration and timing. */ + if (IS_QLA2100(ha)) + wrt_reg_word(®->mctr, 0xf1); + else + wrt_reg_word(®->mctr, 0xf2); + rd_reg_word(®->mctr); /* PCI Posting. */ + + /* Release RISC. */ + wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + } + } + + if (rval == QLA_SUCCESS) { + /* Get RISC SRAM. */ + risc_address = 0x1000; + WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + } + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS; + cnt++, risc_address++) { + WRT_MAILBOX_REG(ha, reg, 1, risc_address); + wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); + + for (timer = 6000000; timer != 0; timer--) { + /* Check for pending interrupts. */ + if (rd_reg_word(®->istatus) & ISR_RISC_INT) { + if (rd_reg_word(®->semaphore) & BIT_0) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); + + mb0 = RD_MAILBOX_REG(ha, reg, 0); + mb2 = RD_MAILBOX_REG(ha, reg, 2); + + wrt_reg_word(®->semaphore, 0); + wrt_reg_word(®->hccr, + HCCR_CLR_RISC_INT); + rd_reg_word(®->hccr); + break; + } + wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); + rd_reg_word(®->hccr); + } + udelay(5); + } + + if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + rval = mb0 & MBS_MASK; + fw->risc_ram[cnt] = htons(mb2); + } else { + rval = QLA_FUNCTION_FAILED; + } + } + + if (rval == QLA_SUCCESS) + qla2xxx_copy_queues(ha, &fw->queue_dump[0]); + + qla2xxx_dump_post_process(base_vha, rval); +} + +void +qla24xx_fw_dump(scsi_qla_host_t *vha) +{ + int rval; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + __le32 __iomem *dmp_reg; + __be32 *iter_reg; + __le16 __iomem *mbx_reg; + struct qla24xx_fw_dump *fw; + void *nxt; + void *nxt_chain; + __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); + + if (IS_P3P_TYPE(ha)) + return; + + ha->fw_dump_cap_flags = 0; + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd006, + "No buffer available for dump.\n"); + return; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd007, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); + return; + } + QLA_FW_STOPPED(ha); + fw = &ha->fw_dump->isp.isp24; + qla2xxx_prep_dump(ha, ha->fw_dump); + + fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ + wrt_reg_dword(®->ictrl, 0); + rd_reg_dword(®->ictrl); + + /* Shadow registers. */ + wrt_reg_dword(®->iobase_addr, 0x0F70); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); + qla24xx_read_window(reg, 0xBF70, 16, iter_reg); + + qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); + qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); + + /* Receive sequence registers. */ + iter_reg = fw->rseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); + qla24xx_read_window(reg, 0xFF70, 16, iter_reg); + + qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); + qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); + qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); + + /* Command DMA registers. */ + qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); + + /* Queues. */ + iter_reg = fw->req0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); + qla24xx_read_window(reg, 0x7610, 16, iter_reg); + + iter_reg = fw->xmt1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); + qla24xx_read_window(reg, 0x7630, 16, iter_reg); + + iter_reg = fw->xmt2_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); + qla24xx_read_window(reg, 0x7650, 16, iter_reg); + + iter_reg = fw->xmt3_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); + qla24xx_read_window(reg, 0x7670, 16, iter_reg); + + iter_reg = fw->xmt4_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); + qla24xx_read_window(reg, 0x7690, 16, iter_reg); + + qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); + + /* Receive DMA registers. */ + iter_reg = fw->rcvt0_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); + qla24xx_read_window(reg, 0x7710, 16, iter_reg); + + iter_reg = fw->rcvt1_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); + qla24xx_read_window(reg, 0x7730, 16, iter_reg); + + /* RISC registers. */ + iter_reg = fw->risc_gp_reg; + iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); + qla24xx_read_window(reg, 0x0F70, 16, iter_reg); + + /* Local memory controller registers. */ + iter_reg = fw->lmc_reg; + iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); + qla24xx_read_window(reg, 0x3060, 16, iter_reg); + + /* Fibre Protocol Module registers. */ + iter_reg = fw->fpm_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); + qla24xx_read_window(reg, 0x40B0, 16, iter_reg); + + /* Frame Buffer registers. */ + iter_reg = fw->fb_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); + qla24xx_read_window(reg, 0x61B0, 16, iter_reg); + + rval = qla24xx_soft_reset(ha); + if (rval != QLA_SUCCESS) + goto qla24xx_fw_dump_failed_0; + + rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), + &nxt); + if (rval != QLA_SUCCESS) + goto qla24xx_fw_dump_failed_0; + + nxt = qla2xxx_copy_queues(ha, nxt); + + qla24xx_copy_eft(ha, nxt); + + nxt_chain = (void *)ha->fw_dump + ha->chain_offset; + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + if (last_chain) { + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); + } + + /* Adjust valid length. */ + ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); + +qla24xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +} + +void +qla25xx_fw_dump(scsi_qla_host_t *vha) +{ + int rval; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + __le32 __iomem *dmp_reg; + __be32 *iter_reg; + __le16 __iomem *mbx_reg; + struct qla25xx_fw_dump *fw; + void *nxt, *nxt_chain; + __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); + + ha->fw_dump_cap_flags = 0; + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd008, + "No buffer available for dump.\n"); + return; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd009, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); + return; + } + QLA_FW_STOPPED(ha); + fw = &ha->fw_dump->isp.isp25; + qla2xxx_prep_dump(ha, ha->fw_dump); + ha->fw_dump->version = htonl(2); + + fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); + + /* Host/Risc registers. */ + iter_reg = fw->host_risc_reg; + iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); + qla24xx_read_window(reg, 0x7010, 16, iter_reg); + + /* PCIe registers. */ + wrt_reg_dword(®->iobase_addr, 0x7C00); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_window, 0x01); + dmp_reg = ®->iobase_c4; + fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; + fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; + fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + + wrt_reg_dword(®->iobase_window, 0x00); + rd_reg_dword(®->iobase_window); + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ + wrt_reg_dword(®->ictrl, 0); + rd_reg_dword(®->ictrl); + + /* Shadow registers. */ + wrt_reg_dword(®->iobase_addr, 0x0F70); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0700000); + fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0800000); + fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0900000); + fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0A00000); + fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* RISC I/O register. */ + wrt_reg_dword(®->iobase_addr, 0x0010); + fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); + qla24xx_read_window(reg, 0xBF70, 16, iter_reg); + + iter_reg = fw->xseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); + qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); + + qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); + + /* Receive sequence registers. */ + iter_reg = fw->rseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); + qla24xx_read_window(reg, 0xFF70, 16, iter_reg); + + iter_reg = fw->rseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); + qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); + + qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); + qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); + + /* Auxiliary sequence registers. */ + iter_reg = fw->aseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); + qla24xx_read_window(reg, 0xB070, 16, iter_reg); + + iter_reg = fw->aseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); + qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); + + qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); + qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); + + /* Command DMA registers. */ + qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); + + /* Queues. */ + iter_reg = fw->req0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); + qla24xx_read_window(reg, 0x7610, 16, iter_reg); + + iter_reg = fw->xmt1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); + qla24xx_read_window(reg, 0x7630, 16, iter_reg); + + iter_reg = fw->xmt2_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); + qla24xx_read_window(reg, 0x7650, 16, iter_reg); + + iter_reg = fw->xmt3_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); + qla24xx_read_window(reg, 0x7670, 16, iter_reg); + + iter_reg = fw->xmt4_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); + qla24xx_read_window(reg, 0x7690, 16, iter_reg); + + qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); + + /* Receive DMA registers. */ + iter_reg = fw->rcvt0_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); + qla24xx_read_window(reg, 0x7710, 16, iter_reg); + + iter_reg = fw->rcvt1_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); + qla24xx_read_window(reg, 0x7730, 16, iter_reg); + + /* RISC registers. */ + iter_reg = fw->risc_gp_reg; + iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); + qla24xx_read_window(reg, 0x0F70, 16, iter_reg); + + /* Local memory controller registers. */ + iter_reg = fw->lmc_reg; + iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); + qla24xx_read_window(reg, 0x3070, 16, iter_reg); + + /* Fibre Protocol Module registers. */ + iter_reg = fw->fpm_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); + qla24xx_read_window(reg, 0x40B0, 16, iter_reg); + + /* Frame Buffer registers. */ + iter_reg = fw->fb_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); + qla24xx_read_window(reg, 0x6F00, 16, iter_reg); + + /* Multi queue registers */ + nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, + &last_chain); + + rval = qla24xx_soft_reset(ha); + if (rval != QLA_SUCCESS) + goto qla25xx_fw_dump_failed_0; + + rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), + &nxt); + if (rval != QLA_SUCCESS) + goto qla25xx_fw_dump_failed_0; + + nxt = qla2xxx_copy_queues(ha, nxt); + + qla24xx_copy_eft(ha, nxt); + + /* Chain entries -- started with MQ. */ + nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); + if (last_chain) { + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); + } + + /* Adjust valid length. */ + ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); + +qla25xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +} + +void +qla81xx_fw_dump(scsi_qla_host_t *vha) +{ + int rval; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + __le32 __iomem *dmp_reg; + __be32 *iter_reg; + __le16 __iomem *mbx_reg; + struct qla81xx_fw_dump *fw; + void *nxt, *nxt_chain; + __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); + + ha->fw_dump_cap_flags = 0; + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd00a, + "No buffer available for dump.\n"); + return; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd00b, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); + return; + } + fw = &ha->fw_dump->isp.isp81; + qla2xxx_prep_dump(ha, ha->fw_dump); + + fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); + + /* Host/Risc registers. */ + iter_reg = fw->host_risc_reg; + iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); + qla24xx_read_window(reg, 0x7010, 16, iter_reg); + + /* PCIe registers. */ + wrt_reg_dword(®->iobase_addr, 0x7C00); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_window, 0x01); + dmp_reg = ®->iobase_c4; + fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; + fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; + fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + + wrt_reg_dword(®->iobase_window, 0x00); + rd_reg_dword(®->iobase_window); + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ + wrt_reg_dword(®->ictrl, 0); + rd_reg_dword(®->ictrl); + + /* Shadow registers. */ + wrt_reg_dword(®->iobase_addr, 0x0F70); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0700000); + fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0800000); + fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0900000); + fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0A00000); + fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* RISC I/O register. */ + wrt_reg_dword(®->iobase_addr, 0x0010); + fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); + qla24xx_read_window(reg, 0xBF70, 16, iter_reg); + + iter_reg = fw->xseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); + qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); + + qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); + + /* Receive sequence registers. */ + iter_reg = fw->rseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); + qla24xx_read_window(reg, 0xFF70, 16, iter_reg); + + iter_reg = fw->rseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); + qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); + + qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); + qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); + + /* Auxiliary sequence registers. */ + iter_reg = fw->aseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); + qla24xx_read_window(reg, 0xB070, 16, iter_reg); + + iter_reg = fw->aseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); + qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); + + qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); + qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); + + /* Command DMA registers. */ + qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); + + /* Queues. */ + iter_reg = fw->req0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); + qla24xx_read_window(reg, 0x7610, 16, iter_reg); + + iter_reg = fw->xmt1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); + qla24xx_read_window(reg, 0x7630, 16, iter_reg); + + iter_reg = fw->xmt2_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); + qla24xx_read_window(reg, 0x7650, 16, iter_reg); + + iter_reg = fw->xmt3_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); + qla24xx_read_window(reg, 0x7670, 16, iter_reg); + + iter_reg = fw->xmt4_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); + qla24xx_read_window(reg, 0x7690, 16, iter_reg); + + qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); + + /* Receive DMA registers. */ + iter_reg = fw->rcvt0_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); + qla24xx_read_window(reg, 0x7710, 16, iter_reg); + + iter_reg = fw->rcvt1_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); + qla24xx_read_window(reg, 0x7730, 16, iter_reg); + + /* RISC registers. */ + iter_reg = fw->risc_gp_reg; + iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); + qla24xx_read_window(reg, 0x0F70, 16, iter_reg); + + /* Local memory controller registers. */ + iter_reg = fw->lmc_reg; + iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); + qla24xx_read_window(reg, 0x3070, 16, iter_reg); + + /* Fibre Protocol Module registers. */ + iter_reg = fw->fpm_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); + qla24xx_read_window(reg, 0x40D0, 16, iter_reg); + + /* Frame Buffer registers. */ + iter_reg = fw->fb_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); + qla24xx_read_window(reg, 0x6F00, 16, iter_reg); + + /* Multi queue registers */ + nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, + &last_chain); + + rval = qla24xx_soft_reset(ha); + if (rval != QLA_SUCCESS) + goto qla81xx_fw_dump_failed_0; + + rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), + &nxt); + if (rval != QLA_SUCCESS) + goto qla81xx_fw_dump_failed_0; + + nxt = qla2xxx_copy_queues(ha, nxt); + + qla24xx_copy_eft(ha, nxt); + + /* Chain entries -- started with MQ. */ + nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); + nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); + if (last_chain) { + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); + } + + /* Adjust valid length. */ + ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); + +qla81xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +} + +void +qla83xx_fw_dump(scsi_qla_host_t *vha) +{ + int rval; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + __le32 __iomem *dmp_reg; + __be32 *iter_reg; + __le16 __iomem *mbx_reg; + struct qla83xx_fw_dump *fw; + void *nxt, *nxt_chain; + __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); + + ha->fw_dump_cap_flags = 0; + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd00c, + "No buffer available for dump!!!\n"); + return; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd00d, + "Firmware has been previously dumped (%p) -- ignoring " + "request...\n", ha->fw_dump); + return; + } + QLA_FW_STOPPED(ha); + fw = &ha->fw_dump->isp.isp83; + qla2xxx_prep_dump(ha, ha->fw_dump); + + fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); + + wrt_reg_dword(®->iobase_addr, 0x6000); + dmp_reg = ®->iobase_window; + rd_reg_dword(dmp_reg); + wrt_reg_dword(dmp_reg, 0); + + dmp_reg = ®->unused_4_1[0]; + rd_reg_dword(dmp_reg); + wrt_reg_dword(dmp_reg, 0); + + wrt_reg_dword(®->iobase_addr, 0x6010); + dmp_reg = ®->unused_4_1[2]; + rd_reg_dword(dmp_reg); + wrt_reg_dword(dmp_reg, 0); + + /* select PCR and disable ecc checking and correction */ + wrt_reg_dword(®->iobase_addr, 0x0F70); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_select, 0x60000000); /* write to F0h = PCR */ + + /* Host/Risc registers. */ + iter_reg = fw->host_risc_reg; + iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); + qla24xx_read_window(reg, 0x7040, 16, iter_reg); + + /* PCIe registers. */ + wrt_reg_dword(®->iobase_addr, 0x7C00); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_window, 0x01); + dmp_reg = ®->iobase_c4; + fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; + fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; + fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + + wrt_reg_dword(®->iobase_window, 0x00); + rd_reg_dword(®->iobase_window); + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ + wrt_reg_dword(®->ictrl, 0); + rd_reg_dword(®->ictrl); + + /* Shadow registers. */ + wrt_reg_dword(®->iobase_addr, 0x0F70); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0700000); + fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0800000); + fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0900000); + fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + + wrt_reg_dword(®->iobase_select, 0xB0A00000); + fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* RISC I/O register. */ + wrt_reg_dword(®->iobase_addr, 0x0010); + fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); + qla24xx_read_window(reg, 0xBF70, 16, iter_reg); + + iter_reg = fw->xseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); + qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); + + qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); + + qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); + + /* Receive sequence registers. */ + iter_reg = fw->rseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); + qla24xx_read_window(reg, 0xFF70, 16, iter_reg); + + iter_reg = fw->rseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); + qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); + + qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); + qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); + qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); + + /* Auxiliary sequence registers. */ + iter_reg = fw->aseq_gp_reg; + iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); + qla24xx_read_window(reg, 0xB170, 16, iter_reg); + + iter_reg = fw->aseq_0_reg; + iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); + qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); + + qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); + qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); + qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); + + /* Command DMA registers. */ + iter_reg = fw->cmd_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); + qla24xx_read_window(reg, 0x71F0, 16, iter_reg); + + /* Queues. */ + iter_reg = fw->req0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); + qla24xx_read_window(reg, 0x7610, 16, iter_reg); + + iter_reg = fw->xmt1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); + qla24xx_read_window(reg, 0x7630, 16, iter_reg); + + iter_reg = fw->xmt2_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); + qla24xx_read_window(reg, 0x7650, 16, iter_reg); + + iter_reg = fw->xmt3_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); + qla24xx_read_window(reg, 0x7670, 16, iter_reg); + + iter_reg = fw->xmt4_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); + qla24xx_read_window(reg, 0x7690, 16, iter_reg); + + qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); + + /* Receive DMA registers. */ + iter_reg = fw->rcvt0_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); + qla24xx_read_window(reg, 0x7710, 16, iter_reg); + + iter_reg = fw->rcvt1_data_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); + qla24xx_read_window(reg, 0x7730, 16, iter_reg); + + /* RISC registers. */ + iter_reg = fw->risc_gp_reg; + iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); + qla24xx_read_window(reg, 0x0F70, 16, iter_reg); + + /* Local memory controller registers. */ + iter_reg = fw->lmc_reg; + iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); + qla24xx_read_window(reg, 0x3070, 16, iter_reg); + + /* Fibre Protocol Module registers. */ + iter_reg = fw->fpm_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); + qla24xx_read_window(reg, 0x40F0, 16, iter_reg); + + /* RQ0 Array registers. */ + iter_reg = fw->rq0_array_reg; + iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); + qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); + + /* RQ1 Array registers. */ + iter_reg = fw->rq1_array_reg; + iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); + qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); + + /* RP0 Array registers. */ + iter_reg = fw->rp0_array_reg; + iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); + qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); + + /* RP1 Array registers. */ + iter_reg = fw->rp1_array_reg; + iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); + qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); + + iter_reg = fw->at0_array_reg; + iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); + qla24xx_read_window(reg, 0x70F0, 16, iter_reg); + + /* I/O Queue Control registers. */ + qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); + + /* Frame Buffer registers. */ + iter_reg = fw->fb_hdw_reg; + iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); + iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); + qla24xx_read_window(reg, 0x6F00, 16, iter_reg); + + /* Multi queue registers */ + nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, + &last_chain); + + rval = qla24xx_soft_reset(ha); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xd00e, + "SOFT RESET FAILED, forcing continuation of dump!!!\n"); + rval = QLA_SUCCESS; + + ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); + + wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); + rd_reg_dword(®->hccr); + + wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); + rd_reg_dword(®->hccr); + + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); + rd_reg_dword(®->hccr); + + for (cnt = 30000; cnt && (rd_reg_word(®->mailbox0)); cnt--) + udelay(5); + + if (!cnt) { + nxt = fw->code_ram; + nxt += sizeof(fw->code_ram); + nxt += (ha->fw_memory_size - 0x100000 + 1); + goto copy_queue; + } else { + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); + ql_log(ql_log_warn, vha, 0xd010, + "bigger hammer success?\n"); + } + } + + rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), + &nxt); + if (rval != QLA_SUCCESS) + goto qla83xx_fw_dump_failed_0; + +copy_queue: + nxt = qla2xxx_copy_queues(ha, nxt); + + qla24xx_copy_eft(ha, nxt); + + /* Chain entries -- started with MQ. */ + nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); + nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); + if (last_chain) { + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); + } + + /* Adjust valid length. */ + ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); + +qla83xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +} + +/****************************************************************************/ +/* Driver Debug Functions. */ +/****************************************************************************/ + +/* Write the debug message prefix into @pbuf. */ +static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev, + const scsi_qla_host_t *vha, uint msg_id) +{ + if (vha) { + const struct pci_dev *pdev = vha->hw->pdev; + + /* []-:: */ + snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR, + dev_name(&(pdev->dev)), msg_id, vha->host_no); + } else if (pdev) { + snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, + dev_name(&pdev->dev), msg_id); + } else { + /* []-: : */ + snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, + "0000:00:00.0", msg_id); + } +} + +/* + * This function is for formatting and logging debug information. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. + * parameters: + * level: The level of the debug messages to be printed. + * If ql2xextended_error_logging value is correctly set, + * this message will appear in the messages file. + * vha: Pointer to the scsi_qla_host_t. + * id: This is a unique identifier for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char pbuf[64]; + + ql_ktrace(1, level, pbuf, NULL, vha, id, fmt); + + if (!ql_mask_match(level)) + return; + + if (!pbuf[0]) /* set by ql_ktrace */ + ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + pr_warn("%s%pV", pbuf, &vaf); + + va_end(va); + +} + +/* + * This function is for formatting and logging debug information. + * It is to be used when vha is not available and pci is available, + * i.e., before host allocation. It formats the message and logs it + * to the messages file. + * parameters: + * level: The level of the debug messages to be printed. + * If ql2xextended_error_logging value is correctly set, + * this message will appear in the messages file. + * pdev: Pointer to the struct pci_dev. + * id: This is a unique id for the level. It identifies the part + * of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char pbuf[128]; + + if (pdev == NULL) + return; + + ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt); + + if (!ql_mask_match(level)) + return; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!pbuf[0]) /* set by ql_ktrace */ + ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, + id + ql_dbg_offset); + pr_warn("%s%pV", pbuf, &vaf); + + va_end(va); +} + +/* + * This function is for formatting and logging log messages. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. All the messages will be logged + * irrespective of value of ql2xextended_error_logging. + * parameters: + * level: The level of the log messages to be printed in the + * messages file. + * vha: Pointer to the scsi_qla_host_t + * id: This is a unique id for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char pbuf[128]; + + if (level > ql_errlev) + return; + + ql_ktrace(0, level, pbuf, NULL, vha, id, fmt); + + if (!pbuf[0]) /* set by ql_ktrace */ + ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + switch (level) { + case ql_log_fatal: /* FATAL LOG */ + pr_crit("%s%pV", pbuf, &vaf); + break; + case ql_log_warn: + pr_err("%s%pV", pbuf, &vaf); + break; + case ql_log_info: + pr_warn("%s%pV", pbuf, &vaf); + break; + default: + pr_info("%s%pV", pbuf, &vaf); + break; + } + + va_end(va); +} + +/* + * This function is for formatting and logging log messages. + * It is to be used when vha is not available and pci is available, + * i.e., before host allocation. It formats the message and logs + * it to the messages file. All the messages are logged irrespective + * of the value of ql2xextended_error_logging. + * parameters: + * level: The level of the log messages to be printed in the + * messages file. + * pdev: Pointer to the struct pci_dev. + * id: This is a unique id for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char pbuf[128]; + + if (pdev == NULL) + return; + if (level > ql_errlev) + return; + + ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt); + + if (!pbuf[0]) /* set by ql_ktrace */ + ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + switch (level) { + case ql_log_fatal: /* FATAL LOG */ + pr_crit("%s%pV", pbuf, &vaf); + break; + case ql_log_warn: + pr_err("%s%pV", pbuf, &vaf); + break; + case ql_log_info: + pr_warn("%s%pV", pbuf, &vaf); + break; + default: + pr_info("%s%pV", pbuf, &vaf); + break; + } + + va_end(va); +} + +void +ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) +{ + int i; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; + __le16 __iomem *mbx_reg; + + if (!ql_mask_match(level)) + return; + + if (IS_P3P_TYPE(ha)) + mbx_reg = ®82->mailbox_in[0]; + else if (IS_FWI2_CAPABLE(ha)) + mbx_reg = ®24->mailbox0; + else + mbx_reg = MAILBOX_REG(ha, reg, 0); + + ql_dbg(level, vha, id, "Mailbox registers:\n"); + for (i = 0; i < 6; i++, mbx_reg++) + ql_dbg(level, vha, id, + "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg)); +} + +void +ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, + uint size) +{ + uint cnt; + + if (!ql_mask_match(level)) + return; + + ql_dbg(level, vha, id, + "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); + ql_dbg(level, vha, id, + "----- -----------------------------------------------\n"); + for (cnt = 0; cnt < size; cnt += 16) { + ql_dbg(level, vha, id, "%04x: ", cnt); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, + buf + cnt, min(16U, size - cnt), false); + } +} + +/* + * This function is for formatting and logging log messages. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. All the messages will be logged + * irrespective of value of ql2xextended_error_logging. + * parameters: + * level: The level of the log messages to be printed in the + * messages file. + * vha: Pointer to the scsi_qla_host_t + * id: This is a unique id for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char pbuf[128]; + + if (level > ql_errlev) + return; + + ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt); + + if (!pbuf[0]) /* set by ql_ktrace */ + ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, + qpair ? qpair->vha : NULL, id); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + switch (level) { + case ql_log_fatal: /* FATAL LOG */ + pr_crit("%s%pV", pbuf, &vaf); + break; + case ql_log_warn: + pr_err("%s%pV", pbuf, &vaf); + break; + case ql_log_info: + pr_warn("%s%pV", pbuf, &vaf); + break; + default: + pr_info("%s%pV", pbuf, &vaf); + break; + } + + va_end(va); +} + +/* + * This function is for formatting and logging debug information. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. + * parameters: + * level: The level of the debug messages to be printed. + * If ql2xextended_error_logging value is correctly set, + * this message will appear in the messages file. + * vha: Pointer to the scsi_qla_host_t. + * id: This is a unique identifier for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char pbuf[128]; + + ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt); + + if (!ql_mask_match(level)) + return; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!pbuf[0]) /* set by ql_ktrace */ + ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, + qpair ? qpair->vha : NULL, id + ql_dbg_offset); + + pr_warn("%s%pV", pbuf, &vaf); + + va_end(va); + +} diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h new file mode 100644 index 000000000..54f0a4122 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -0,0 +1,431 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ + +#include "qla_def.h" + +/* + * Firmware Dump structure definition + */ + +struct qla2300_fw_dump { + __be16 hccr; + __be16 pbiu_reg[8]; + __be16 risc_host_reg[8]; + __be16 mailbox_reg[32]; + __be16 resp_dma_reg[32]; + __be16 dma_reg[48]; + __be16 risc_hdw_reg[16]; + __be16 risc_gp0_reg[16]; + __be16 risc_gp1_reg[16]; + __be16 risc_gp2_reg[16]; + __be16 risc_gp3_reg[16]; + __be16 risc_gp4_reg[16]; + __be16 risc_gp5_reg[16]; + __be16 risc_gp6_reg[16]; + __be16 risc_gp7_reg[16]; + __be16 frame_buf_hdw_reg[64]; + __be16 fpm_b0_reg[64]; + __be16 fpm_b1_reg[64]; + __be16 risc_ram[0xf800]; + __be16 stack_ram[0x1000]; + __be16 data_ram[1]; +}; + +struct qla2100_fw_dump { + __be16 hccr; + __be16 pbiu_reg[8]; + __be16 mailbox_reg[32]; + __be16 dma_reg[48]; + __be16 risc_hdw_reg[16]; + __be16 risc_gp0_reg[16]; + __be16 risc_gp1_reg[16]; + __be16 risc_gp2_reg[16]; + __be16 risc_gp3_reg[16]; + __be16 risc_gp4_reg[16]; + __be16 risc_gp5_reg[16]; + __be16 risc_gp6_reg[16]; + __be16 risc_gp7_reg[16]; + __be16 frame_buf_hdw_reg[16]; + __be16 fpm_b0_reg[64]; + __be16 fpm_b1_reg[64]; + __be16 risc_ram[0xf000]; + u8 queue_dump[]; +}; + +struct qla24xx_fw_dump { + __be32 host_status; + __be32 host_reg[32]; + __be32 shadow_reg[7]; + __be16 mailbox_reg[32]; + __be32 xseq_gp_reg[128]; + __be32 xseq_0_reg[16]; + __be32 xseq_1_reg[16]; + __be32 rseq_gp_reg[128]; + __be32 rseq_0_reg[16]; + __be32 rseq_1_reg[16]; + __be32 rseq_2_reg[16]; + __be32 cmd_dma_reg[16]; + __be32 req0_dma_reg[15]; + __be32 resp0_dma_reg[15]; + __be32 req1_dma_reg[15]; + __be32 xmt0_dma_reg[32]; + __be32 xmt1_dma_reg[32]; + __be32 xmt2_dma_reg[32]; + __be32 xmt3_dma_reg[32]; + __be32 xmt4_dma_reg[32]; + __be32 xmt_data_dma_reg[16]; + __be32 rcvt0_data_dma_reg[32]; + __be32 rcvt1_data_dma_reg[32]; + __be32 risc_gp_reg[128]; + __be32 lmc_reg[112]; + __be32 fpm_hdw_reg[192]; + __be32 fb_hdw_reg[176]; + __be32 code_ram[0x2000]; + __be32 ext_mem[1]; +}; + +struct qla25xx_fw_dump { + __be32 host_status; + __be32 host_risc_reg[32]; + __be32 pcie_regs[4]; + __be32 host_reg[32]; + __be32 shadow_reg[11]; + __be32 risc_io_reg; + __be16 mailbox_reg[32]; + __be32 xseq_gp_reg[128]; + __be32 xseq_0_reg[48]; + __be32 xseq_1_reg[16]; + __be32 rseq_gp_reg[128]; + __be32 rseq_0_reg[32]; + __be32 rseq_1_reg[16]; + __be32 rseq_2_reg[16]; + __be32 aseq_gp_reg[128]; + __be32 aseq_0_reg[32]; + __be32 aseq_1_reg[16]; + __be32 aseq_2_reg[16]; + __be32 cmd_dma_reg[16]; + __be32 req0_dma_reg[15]; + __be32 resp0_dma_reg[15]; + __be32 req1_dma_reg[15]; + __be32 xmt0_dma_reg[32]; + __be32 xmt1_dma_reg[32]; + __be32 xmt2_dma_reg[32]; + __be32 xmt3_dma_reg[32]; + __be32 xmt4_dma_reg[32]; + __be32 xmt_data_dma_reg[16]; + __be32 rcvt0_data_dma_reg[32]; + __be32 rcvt1_data_dma_reg[32]; + __be32 risc_gp_reg[128]; + __be32 lmc_reg[128]; + __be32 fpm_hdw_reg[192]; + __be32 fb_hdw_reg[192]; + __be32 code_ram[0x2000]; + __be32 ext_mem[1]; +}; + +struct qla81xx_fw_dump { + __be32 host_status; + __be32 host_risc_reg[32]; + __be32 pcie_regs[4]; + __be32 host_reg[32]; + __be32 shadow_reg[11]; + __be32 risc_io_reg; + __be16 mailbox_reg[32]; + __be32 xseq_gp_reg[128]; + __be32 xseq_0_reg[48]; + __be32 xseq_1_reg[16]; + __be32 rseq_gp_reg[128]; + __be32 rseq_0_reg[32]; + __be32 rseq_1_reg[16]; + __be32 rseq_2_reg[16]; + __be32 aseq_gp_reg[128]; + __be32 aseq_0_reg[32]; + __be32 aseq_1_reg[16]; + __be32 aseq_2_reg[16]; + __be32 cmd_dma_reg[16]; + __be32 req0_dma_reg[15]; + __be32 resp0_dma_reg[15]; + __be32 req1_dma_reg[15]; + __be32 xmt0_dma_reg[32]; + __be32 xmt1_dma_reg[32]; + __be32 xmt2_dma_reg[32]; + __be32 xmt3_dma_reg[32]; + __be32 xmt4_dma_reg[32]; + __be32 xmt_data_dma_reg[16]; + __be32 rcvt0_data_dma_reg[32]; + __be32 rcvt1_data_dma_reg[32]; + __be32 risc_gp_reg[128]; + __be32 lmc_reg[128]; + __be32 fpm_hdw_reg[224]; + __be32 fb_hdw_reg[208]; + __be32 code_ram[0x2000]; + __be32 ext_mem[1]; +}; + +struct qla83xx_fw_dump { + __be32 host_status; + __be32 host_risc_reg[48]; + __be32 pcie_regs[4]; + __be32 host_reg[32]; + __be32 shadow_reg[11]; + __be32 risc_io_reg; + __be16 mailbox_reg[32]; + __be32 xseq_gp_reg[256]; + __be32 xseq_0_reg[48]; + __be32 xseq_1_reg[16]; + __be32 xseq_2_reg[16]; + __be32 rseq_gp_reg[256]; + __be32 rseq_0_reg[32]; + __be32 rseq_1_reg[16]; + __be32 rseq_2_reg[16]; + __be32 rseq_3_reg[16]; + __be32 aseq_gp_reg[256]; + __be32 aseq_0_reg[32]; + __be32 aseq_1_reg[16]; + __be32 aseq_2_reg[16]; + __be32 aseq_3_reg[16]; + __be32 cmd_dma_reg[64]; + __be32 req0_dma_reg[15]; + __be32 resp0_dma_reg[15]; + __be32 req1_dma_reg[15]; + __be32 xmt0_dma_reg[32]; + __be32 xmt1_dma_reg[32]; + __be32 xmt2_dma_reg[32]; + __be32 xmt3_dma_reg[32]; + __be32 xmt4_dma_reg[32]; + __be32 xmt_data_dma_reg[16]; + __be32 rcvt0_data_dma_reg[32]; + __be32 rcvt1_data_dma_reg[32]; + __be32 risc_gp_reg[128]; + __be32 lmc_reg[128]; + __be32 fpm_hdw_reg[256]; + __be32 rq0_array_reg[256]; + __be32 rq1_array_reg[256]; + __be32 rp0_array_reg[256]; + __be32 rp1_array_reg[256]; + __be32 queue_control_reg[16]; + __be32 fb_hdw_reg[432]; + __be32 at0_array_reg[128]; + __be32 code_ram[0x2400]; + __be32 ext_mem[1]; +}; + +#define EFT_NUM_BUFFERS 4 +#define EFT_BYTES_PER_BUFFER 0x4000 +#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) + +#define FCE_NUM_BUFFERS 64 +#define FCE_BYTES_PER_BUFFER 0x400 +#define FCE_SIZE ((FCE_BYTES_PER_BUFFER) * (FCE_NUM_BUFFERS)) +#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b)) + +struct qla2xxx_fce_chain { + __be32 type; + __be32 chain_size; + + __be32 size; + __be32 addr_l; + __be32 addr_h; + __be32 eregs[8]; +}; + +/* used by exchange off load and extended login offload */ +struct qla2xxx_offld_chain { + __be32 type; + __be32 chain_size; + + __be32 size; + __be32 reserved; + __be64 addr; +}; + +struct qla2xxx_mq_chain { + __be32 type; + __be32 chain_size; + + __be32 count; + __be32 qregs[4 * QLA_MQ_SIZE]; +}; + +struct qla2xxx_mqueue_header { + __be32 queue; +#define TYPE_REQUEST_QUEUE 0x1 +#define TYPE_RESPONSE_QUEUE 0x2 +#define TYPE_ATIO_QUEUE 0x3 + __be32 number; + __be32 size; +}; + +struct qla2xxx_mqueue_chain { + __be32 type; + __be32 chain_size; +}; + +#define DUMP_CHAIN_VARIANT 0x80000000 +#define DUMP_CHAIN_FCE 0x7FFFFAF0 +#define DUMP_CHAIN_MQ 0x7FFFFAF1 +#define DUMP_CHAIN_QUEUE 0x7FFFFAF2 +#define DUMP_CHAIN_EXLOGIN 0x7FFFFAF3 +#define DUMP_CHAIN_EXCHG 0x7FFFFAF4 +#define DUMP_CHAIN_LAST 0x80000000 + +struct qla2xxx_fw_dump { + uint8_t signature[4]; + __be32 version; + + __be32 fw_major_version; + __be32 fw_minor_version; + __be32 fw_subminor_version; + __be32 fw_attributes; + + __be32 vendor; + __be32 device; + __be32 subsystem_vendor; + __be32 subsystem_device; + + __be32 fixed_size; + __be32 mem_size; + __be32 req_q_size; + __be32 rsp_q_size; + + __be32 eft_size; + __be32 eft_addr_l; + __be32 eft_addr_h; + + __be32 header_size; + + union { + struct qla2100_fw_dump isp21; + struct qla2300_fw_dump isp23; + struct qla24xx_fw_dump isp24; + struct qla25xx_fw_dump isp25; + struct qla81xx_fw_dump isp81; + struct qla83xx_fw_dump isp83; + } isp; +}; + +#define QL_MSGHDR "qla2xxx" +#define QL_DBG_DEFAULT1_MASK 0x1e600000 + +#define ql_log_fatal 0 /* display fatal errors */ +#define ql_log_warn 1 /* display critical errors */ +#define ql_log_info 2 /* display all recovered errors */ +#define ql_log_all 3 /* This value is only used by ql_errlev. + * No messages will use this value. + * This should be always highest value + * as compared to other log levels. + */ + +extern uint ql_errlev; + +void __attribute__((format (printf, 4, 5))) +ql_dbg(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...); +void __attribute__((format (printf, 4, 5))) +ql_dbg_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...); +void __attribute__((format (printf, 4, 5))) +ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); + + +void __attribute__((format (printf, 4, 5))) +ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...); +void __attribute__((format (printf, 4, 5))) +ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...); + +void __attribute__((format (printf, 4, 5))) +ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); + +/* Debug Levels */ +/* The 0x40000000 is the max value any debug level can have + * as ql2xextended_error_logging is of type signed int + */ +#define ql_dbg_init 0x40000000 /* Init Debug */ +#define ql_dbg_mbx 0x20000000 /* MBX Debug */ +#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */ +#define ql_dbg_io 0x08000000 /* IO Tracing Debug */ +#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */ +#define ql_dbg_async 0x02000000 /* Async events Debug */ +#define ql_dbg_timer 0x01000000 /* Timer Debug */ +#define ql_dbg_user 0x00800000 /* User Space Interations Debug */ +#define ql_dbg_taskm 0x00400000 /* Task Management Debug */ +#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */ +#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */ +#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */ +#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */ +#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */ +#define ql_dbg_misc 0x00010000 /* For dumping everything that is not + * not covered by upper categories + */ +#define ql_dbg_verbose 0x00008000 /* More verbosity for each level + * This is to be used with other levels where + * more verbosity is required. It might not + * be applicable to all the levels. + */ +#define ql_dbg_tgt 0x00004000 /* Target mode */ +#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ +#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ +#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ +#define ql_dbg_edif 0x00000400 /* edif and purex debug */ +#define ql_dbg_unsol 0x00000100 /* Unsolicited path debug */ + +extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, + uint32_t, void **); +extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *, + uint32_t, void **); +extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, + struct qla_hw_data *); +extern int qla24xx_soft_reset(struct qla_hw_data *); + +static inline int +ql_mask_match(uint level) +{ + if (ql2xextended_error_logging == 1) + ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; + + return level && ((level & ql2xextended_error_logging) == level); +} + +static inline int +ql_mask_match_ext(uint level, int *log_tunable) +{ + if (*log_tunable == 1) + *log_tunable = QL_DBG_DEFAULT1_MASK; + + return (level & *log_tunable) == level; +} + +/* Assumes local variable pbuf and pbuf_ready present. */ +#define ql_ktrace(dbg_msg, level, pbuf, pdev, vha, id, fmt) do { \ + struct va_format _vaf; \ + va_list _va; \ + u32 dbg_off = dbg_msg ? ql_dbg_offset : 0; \ + \ + pbuf[0] = 0; \ + if (!trace_ql_dbg_log_enabled()) \ + break; \ + \ + if (dbg_msg && !ql_mask_match_ext(level, \ + &ql2xextended_error_logging_ktrace)) \ + break; \ + \ + ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, vha, id + dbg_off); \ + \ + va_start(_va, fmt); \ + _vaf.fmt = fmt; \ + _vaf.va = &_va; \ + \ + trace_ql_dbg_log(pbuf, &_vaf); \ + \ + va_end(_va); \ +} while (0) + +#define QLA_ENABLE_KERNEL_TRACING + +#ifdef QLA_ENABLE_KERNEL_TRACING +#define QLA_TRACE_ENABLE(_tr) \ + trace_array_set_clr_event(_tr, "qla", NULL, true) +#else /* QLA_ENABLE_KERNEL_TRACING */ +#define QLA_TRACE_ENABLE(_tr) +#endif /* QLA_ENABLE_KERNEL_TRACING */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h new file mode 100644 index 000000000..deb642607 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -0,0 +1,5563 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#ifndef __QLA_DEF_H +#define __QLA_DEF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#define QLA_DFS_DEFINE_DENTRY(_debugfs_file_name) \ + struct dentry *dfs_##_debugfs_file_name +#define QLA_DFS_ROOT_DEFINE_DENTRY(_debugfs_file_name) \ + struct dentry *qla_dfs_##_debugfs_file_name + +/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */ +typedef struct { + uint8_t domain; + uint8_t area; + uint8_t al_pa; +} be_id_t; + +/* Little endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */ +typedef struct { + uint8_t al_pa; + uint8_t area; + uint8_t domain; +} le_id_t; + +/* + * 24 bit port ID type definition. + */ +typedef union { + uint32_t b24 : 24; + struct { +#ifdef __BIG_ENDIAN + uint8_t domain; + uint8_t area; + uint8_t al_pa; +#elif defined(__LITTLE_ENDIAN) + uint8_t al_pa; + uint8_t area; + uint8_t domain; +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" +#endif + uint8_t rsvd_1; + } b; +} port_id_t; +#define INVALID_PORT_ID 0xFFFFFF + +#include "qla_bsg.h" +#include "qla_dsd.h" +#include "qla_nx.h" +#include "qla_nx2.h" +#include "qla_nvme.h" +#define QLA2XXX_DRIVER_NAME "qla2xxx" +#define QLA2XXX_APIDEV "ql2xapidev" +#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc." + +/* + * We have MAILBOX_REGISTER_COUNT sized arrays in a few places, + * but that's fine as we don't look at the last 24 ones for + * ISP2100 HBAs. + */ +#define MAILBOX_REGISTER_COUNT_2100 8 +#define MAILBOX_REGISTER_COUNT_2200 24 +#define MAILBOX_REGISTER_COUNT 32 + +#define QLA2200A_RISC_ROM_VER 4 +#define FPM_2300 6 +#define FPM_2310 7 + +#include "qla_settings.h" + +#define MODE_DUAL (MODE_TARGET | MODE_INITIATOR) + +/* + * Data bit definitions + */ +#define BIT_0 0x1 +#define BIT_1 0x2 +#define BIT_2 0x4 +#define BIT_3 0x8 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 +#define BIT_8 0x100 +#define BIT_9 0x200 +#define BIT_10 0x400 +#define BIT_11 0x800 +#define BIT_12 0x1000 +#define BIT_13 0x2000 +#define BIT_14 0x4000 +#define BIT_15 0x8000 +#define BIT_16 0x10000 +#define BIT_17 0x20000 +#define BIT_18 0x40000 +#define BIT_19 0x80000 +#define BIT_20 0x100000 +#define BIT_21 0x200000 +#define BIT_22 0x400000 +#define BIT_23 0x800000 +#define BIT_24 0x1000000 +#define BIT_25 0x2000000 +#define BIT_26 0x4000000 +#define BIT_27 0x8000000 +#define BIT_28 0x10000000 +#define BIT_29 0x20000000 +#define BIT_30 0x40000000 +#define BIT_31 0x80000000 + +#define LSB(x) ((uint8_t)(x)) +#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) + +#define LSW(x) ((uint16_t)(x)) +#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) + +#define LSD(x) ((uint32_t)((uint64_t)(x))) +#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) + +static inline uint32_t make_handle(uint16_t x, uint16_t y) +{ + return ((uint32_t)x << 16) | y; +} + +/* + * I/O register +*/ + +static inline u8 rd_reg_byte(const volatile u8 __iomem *addr) +{ + return readb(addr); +} + +static inline u16 rd_reg_word(const volatile __le16 __iomem *addr) +{ + return readw(addr); +} + +static inline u32 rd_reg_dword(const volatile __le32 __iomem *addr) +{ + return readl(addr); +} + +static inline u8 rd_reg_byte_relaxed(const volatile u8 __iomem *addr) +{ + return readb_relaxed(addr); +} + +static inline u16 rd_reg_word_relaxed(const volatile __le16 __iomem *addr) +{ + return readw_relaxed(addr); +} + +static inline u32 rd_reg_dword_relaxed(const volatile __le32 __iomem *addr) +{ + return readl_relaxed(addr); +} + +static inline void wrt_reg_byte(volatile u8 __iomem *addr, u8 data) +{ + return writeb(data, addr); +} + +static inline void wrt_reg_word(volatile __le16 __iomem *addr, u16 data) +{ + return writew(data, addr); +} + +static inline void wrt_reg_dword(volatile __le32 __iomem *addr, u32 data) +{ + return writel(data, addr); +} + +/* + * ISP83XX specific remote register addresses + */ +#define QLA83XX_LED_PORT0 0x00201320 +#define QLA83XX_LED_PORT1 0x00201328 +#define QLA83XX_IDC_DEV_STATE 0x22102384 +#define QLA83XX_IDC_MAJOR_VERSION 0x22102380 +#define QLA83XX_IDC_MINOR_VERSION 0x22102398 +#define QLA83XX_IDC_DRV_PRESENCE 0x22102388 +#define QLA83XX_IDC_DRIVER_ACK 0x2210238c +#define QLA83XX_IDC_CONTROL 0x22102390 +#define QLA83XX_IDC_AUDIT 0x22102394 +#define QLA83XX_IDC_LOCK_RECOVERY 0x2210239c +#define QLA83XX_DRIVER_LOCKID 0x22102104 +#define QLA83XX_DRIVER_LOCK 0x8111c028 +#define QLA83XX_DRIVER_UNLOCK 0x8111c02c +#define QLA83XX_FLASH_LOCKID 0x22102100 +#define QLA83XX_FLASH_LOCK 0x8111c010 +#define QLA83XX_FLASH_UNLOCK 0x8111c014 +#define QLA83XX_DEV_PARTINFO1 0x221023e0 +#define QLA83XX_DEV_PARTINFO2 0x221023e4 +#define QLA83XX_FW_HEARTBEAT 0x221020b0 +#define QLA83XX_PEG_HALT_STATUS1 0x221020a8 +#define QLA83XX_PEG_HALT_STATUS2 0x221020ac + +/* 83XX: Macros defining 8200 AEN Reason codes */ +#define IDC_DEVICE_STATE_CHANGE BIT_0 +#define IDC_PEG_HALT_STATUS_CHANGE BIT_1 +#define IDC_NIC_FW_REPORTED_FAILURE BIT_2 +#define IDC_HEARTBEAT_FAILURE BIT_3 + +/* 83XX: Macros defining 8200 AEN Error-levels */ +#define ERR_LEVEL_NON_FATAL 0x1 +#define ERR_LEVEL_RECOVERABLE_FATAL 0x2 +#define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4 + +/* 83XX: Macros for IDC Version */ +#define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01 +#define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0 + +/* 83XX: Macros for scheduling dpc tasks */ +#define QLA83XX_NIC_CORE_RESET 0x1 +#define QLA83XX_IDC_STATE_HANDLER 0x2 +#define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3 + +/* 83XX: Macros for defining IDC-Control bits */ +#define QLA83XX_IDC_RESET_DISABLED BIT_0 +#define QLA83XX_IDC_GRACEFUL_RESET BIT_1 + +/* 83XX: Macros for different timeouts */ +#define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30 +#define QLA83XX_IDC_RESET_ACK_TIMEOUT 10 +#define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ) + +/* 83XX: Macros for defining class in DEV-Partition Info register */ +#define QLA83XX_CLASS_TYPE_NONE 0x0 +#define QLA83XX_CLASS_TYPE_NIC 0x1 +#define QLA83XX_CLASS_TYPE_FCOE 0x2 +#define QLA83XX_CLASS_TYPE_ISCSI 0x3 + +/* 83XX: Macros for IDC Lock-Recovery stages */ +#define IDC_LOCK_RECOVERY_STAGE1 0x1 /* Stage1: Intent for + * lock-recovery + */ +#define IDC_LOCK_RECOVERY_STAGE2 0x2 /* Stage2: Perform lock-recovery */ + +/* 83XX: Macros for IDC Audit type */ +#define IDC_AUDIT_TIMESTAMP 0x0 /* IDC-AUDIT: Record timestamp of + * dev-state change to NEED-RESET + * or NEED-QUIESCENT + */ +#define IDC_AUDIT_COMPLETION 0x1 /* IDC-AUDIT: Record duration of + * reset-recovery completion is + * second + */ +/* ISP2031: Values for laser on/off */ +#define PORT_0_2031 0x00201340 +#define PORT_1_2031 0x00201350 +#define LASER_ON_2031 0x01800100 +#define LASER_OFF_2031 0x01800180 + +/* + * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an + * 133Mhz slot. + */ +#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr)) +#define WRT_REG_WORD_PIO(addr, data) (outw(data, (unsigned long)addr)) + +/* + * Fibre Channel device definitions. + */ +#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ +#define MAX_FIBRE_DEVICES_2100 512 +#define MAX_FIBRE_DEVICES_2400 2048 +#define MAX_FIBRE_DEVICES_LOOP 128 +#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400 +#define LOOPID_MAP_SIZE (ha->max_fibre_devices) +#define MAX_FIBRE_LUNS 0xFFFF +#define MAX_HOST_COUNT 16 + +/* + * Host adapter default definitions. + */ +#define MAX_BUSES 1 /* We only have one bus today */ +#define MIN_LUNS 8 +#define MAX_LUNS MAX_FIBRE_LUNS +#define MAX_CMDS_PER_LUN 255 + +/* + * Fibre Channel device definitions. + */ +#define SNS_LAST_LOOP_ID_2100 0xfe +#define SNS_LAST_LOOP_ID_2300 0x7ff + +#define LAST_LOCAL_LOOP_ID 0x7d +#define SNS_FL_PORT 0x7e +#define FABRIC_CONTROLLER 0x7f +#define SIMPLE_NAME_SERVER 0x80 +#define SNS_FIRST_LOOP_ID 0x81 +#define MANAGEMENT_SERVER 0xfe +#define BROADCAST 0xff + +/* + * There is no correspondence between an N-PORT id and an AL_PA. Therefore the + * valid range of an N-PORT id is 0 through 0x7ef. + */ +#define NPH_LAST_HANDLE 0x7ee +#define NPH_MGMT_SERVER 0x7ef /* FFFFEF */ +#define NPH_SNS 0x7fc /* FFFFFC */ +#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */ +#define NPH_F_PORT 0x7fe /* FFFFFE */ +#define NPH_IP_BROADCAST 0x7ff /* FFFFFF */ + +#define NPH_SNS_LID(ha) (IS_FWI2_CAPABLE(ha) ? NPH_SNS : SIMPLE_NAME_SERVER) + +#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */ +#include "qla_fw.h" + +struct name_list_extended { + struct get_name_list_extended *l; + dma_addr_t ldma; + struct list_head fcports; + u32 size; + u8 sent; +}; + +struct qla_nvme_fc_rjt { + struct fcnvme_ls_rjt *c; + dma_addr_t cdma; + u16 size; +}; + +struct els_reject { + struct fc_els_ls_rjt *c; + dma_addr_t cdma; + u16 size; +}; + +/* + * Timeout timer counts in seconds + */ +#define PORT_RETRY_TIME 1 +#define LOOP_DOWN_TIMEOUT 60 +#define LOOP_DOWN_TIME 255 /* 240 */ +#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30) + +#define DEFAULT_OUTSTANDING_COMMANDS 4096 +#define MIN_OUTSTANDING_COMMANDS 128 + +/* ISP request and response entry counts (37-65535) */ +#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */ +#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */ +#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ +#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */ +#define RESPONSE_ENTRY_CNT_83XX 4096 /* Number of response entries.*/ +#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ +#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ +#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ +#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ +#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ +#define FW_DEF_EXCHANGES_CNT 2048 +#define FW_MAX_EXCHANGES_CNT (32 * 1024) +#define REDUCE_EXCHANGES_CNT (8 * 1024) + +#define SET_DID_STATUS(stat_var, status) (stat_var = status << 16) + +struct req_que; +struct qla_tgt_sess; + +struct qla_buf_dsc { + u16 tag; +#define TAG_FREED 0xffff + void *buf; + dma_addr_t buf_dma; +}; + +/* + * SCSI Request Block + */ +struct srb_cmd { + struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ + uint32_t request_sense_length; + uint32_t fw_sense_length; + uint8_t *request_sense_ptr; + struct crc_context *crc_ctx; + struct ct6_dsd ct6_ctx; + struct qla_buf_dsc buf_dsc; +}; + +/* + * SRB flag definitions + */ +#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ +#define SRB_GOT_BUF BIT_1 +#define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */ +#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */ +#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */ +#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */ +#define SRB_WAKEUP_ON_COMP BIT_6 +#define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */ +#define SRB_EDIF_CLEANUP_DELETE BIT_9 + +/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ +#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) +#define ISP_REG16_DISCONNECT 0xFFFF + +static inline le_id_t be_id_to_le(be_id_t id) +{ + le_id_t res; + + res.domain = id.domain; + res.area = id.area; + res.al_pa = id.al_pa; + + return res; +} + +static inline be_id_t le_id_to_be(le_id_t id) +{ + be_id_t res; + + res.domain = id.domain; + res.area = id.area; + res.al_pa = id.al_pa; + + return res; +} + +static inline port_id_t be_to_port_id(be_id_t id) +{ + port_id_t res; + + res.b.domain = id.domain; + res.b.area = id.area; + res.b.al_pa = id.al_pa; + res.b.rsvd_1 = 0; + + return res; +} + +static inline be_id_t port_id_to_be_id(port_id_t port_id) +{ + be_id_t res; + + res.domain = port_id.b.domain; + res.area = port_id.b.area; + res.al_pa = port_id.b.al_pa; + + return res; +} + +struct tmf_arg { + struct list_head tmf_elem; + struct qla_qpair *qpair; + struct fc_port *fcport; + struct scsi_qla_host *vha; + u64 lun; + u32 flags; + uint8_t modifier; +}; + +struct els_logo_payload { + uint8_t opcode; + uint8_t rsvd[3]; + uint8_t s_id[3]; + uint8_t rsvd1[1]; + uint8_t wwpn[WWN_SIZE]; +}; + +struct els_plogi_payload { + uint8_t opcode; + uint8_t rsvd[3]; + __be32 data[112 / 4]; +}; + +struct ct_arg { + void *iocb; + u16 nport_handle; + dma_addr_t req_dma; + dma_addr_t rsp_dma; + u32 req_size; + u32 rsp_size; + u32 req_allocated_size; + u32 rsp_allocated_size; + void *req; + void *rsp; + port_id_t id; +}; + +struct qla_nvme_lsrjt_pt_arg { + struct fc_port *fcport; + u8 opcode; + u8 vp_idx; + u8 reason; + u8 explanation; + __le16 nport_handle; + u16 control_flags; + __le16 ox_id; + __le32 xchg_address; + u32 tx_byte_count, rx_byte_count; + dma_addr_t tx_addr, rx_addr; +}; + +/* + * SRB extensions. + */ +struct srb_iocb { + union { + struct { + uint16_t flags; +#define SRB_LOGIN_RETRIED BIT_0 +#define SRB_LOGIN_COND_PLOGI BIT_1 +#define SRB_LOGIN_SKIP_PRLI BIT_2 +#define SRB_LOGIN_NVME_PRLI BIT_3 +#define SRB_LOGIN_PRLI_ONLY BIT_4 +#define SRB_LOGIN_FCSP BIT_5 + uint16_t data[2]; + u32 iop[2]; + } logio; + struct { +#define ELS_DCMD_TIMEOUT 20 +#define ELS_DCMD_LOGO 0x5 + uint32_t flags; + uint32_t els_cmd; + struct completion comp; + struct els_logo_payload *els_logo_pyld; + dma_addr_t els_logo_pyld_dma; + } els_logo; + struct els_plogi { +#define ELS_DCMD_PLOGI 0x3 + uint32_t flags; + uint32_t els_cmd; + struct completion comp; + struct els_plogi_payload *els_plogi_pyld; + struct els_plogi_payload *els_resp_pyld; + u32 tx_size; + u32 rx_size; + dma_addr_t els_plogi_pyld_dma; + dma_addr_t els_resp_pyld_dma; + __le32 fw_status[3]; + __le16 comp_status; + __le16 len; + } els_plogi; + struct { + /* + * Values for flags field below are as + * defined in tsk_mgmt_entry struct + * for control_flags field in qla_fw.h. + */ + uint64_t lun; + uint32_t flags; + uint32_t data; + struct completion comp; + __le16 comp_status; + + uint8_t modifier; + uint8_t vp_index; + uint16_t loop_id; + } tmf; + struct { +#define SRB_FXDISC_REQ_DMA_VALID BIT_0 +#define SRB_FXDISC_RESP_DMA_VALID BIT_1 +#define SRB_FXDISC_REQ_DWRD_VALID BIT_2 +#define SRB_FXDISC_RSP_DWRD_VALID BIT_3 +#define FXDISC_TIMEOUT 20 + uint8_t flags; + uint32_t req_len; + uint32_t rsp_len; + void *req_addr; + void *rsp_addr; + dma_addr_t req_dma_handle; + dma_addr_t rsp_dma_handle; + __le32 adapter_id; + __le32 adapter_id_hi; + __le16 req_func_type; + __le32 req_data; + __le32 req_data_extra; + __le32 result; + __le32 seq_number; + __le16 fw_flags; + struct completion fxiocb_comp; + __le32 reserved_0; + uint8_t reserved_1; + } fxiocb; + struct { + uint32_t cmd_hndl; + __le16 comp_status; + __le16 req_que_no; + struct completion comp; + } abt; + struct ct_arg ctarg; +#define MAX_IOCB_MB_REG 28 +#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) + struct { + u16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ + u16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ + void *out, *in; + dma_addr_t out_dma, in_dma; + struct completion comp; + int rc; + } mbx; + struct { + struct imm_ntfy_from_isp *ntfy; + } nack; + struct { + __le16 comp_status; + __le16 rsp_pyld_len; + uint8_t aen_op; + void *desc; + + /* These are only used with ls4 requests */ + __le32 cmd_len; + __le32 rsp_len; + dma_addr_t cmd_dma; + dma_addr_t rsp_dma; + enum nvmefc_fcp_datadir dir; + uint32_t dl; + uint32_t timeout_sec; + __le32 exchange_address; + __le16 nport_handle; + __le16 ox_id; + struct list_head entry; + } nvme; + struct { + u16 cmd; + u16 vp_index; + } ctrlvp; + struct { + struct edif_sa_ctl *sa_ctl; + struct qla_sa_update_frame sa_frame; + } sa_update; + } u; + + struct timer_list timer; + void (*timeout)(void *); +}; + +/* Values for srb_ctx type */ +#define SRB_LOGIN_CMD 1 +#define SRB_LOGOUT_CMD 2 +#define SRB_ELS_CMD_RPT 3 +#define SRB_ELS_CMD_HST 4 +#define SRB_CT_CMD 5 +#define SRB_ADISC_CMD 6 +#define SRB_TM_CMD 7 +#define SRB_SCSI_CMD 8 +#define SRB_BIDI_CMD 9 +#define SRB_FXIOCB_DCMD 10 +#define SRB_FXIOCB_BCMD 11 +#define SRB_ABT_CMD 12 +#define SRB_ELS_DCMD 13 +#define SRB_MB_IOCB 14 +#define SRB_CT_PTHRU_CMD 15 +#define SRB_NACK_PLOGI 16 +#define SRB_NACK_PRLI 17 +#define SRB_NACK_LOGO 18 +#define SRB_NVME_CMD 19 +#define SRB_NVME_LS 20 +#define SRB_PRLI_CMD 21 +#define SRB_CTRL_VP 22 +#define SRB_PRLO_CMD 23 +#define SRB_SA_UPDATE 25 +#define SRB_ELS_CMD_HST_NOLOGIN 26 +#define SRB_SA_REPLACE 27 +#define SRB_MARKER 28 + +struct qla_els_pt_arg { + u8 els_opcode; + u8 vp_idx; + __le16 nport_handle; + u16 control_flags, ox_id; + __le32 rx_xchg_address; + port_id_t did, sid; + u32 tx_len, tx_byte_count, rx_len, rx_byte_count; + dma_addr_t tx_addr, rx_addr; + +}; + +enum { + TYPE_SRB, + TYPE_TGT_CMD, + TYPE_TGT_TMCMD, /* task management */ +}; + +struct iocb_resource { + u8 res_type; + u8 exch_cnt; + u16 iocb_cnt; +}; + +struct bsg_cmd { + struct bsg_job *bsg_job; + union { + struct qla_els_pt_arg els_arg; + } u; +}; + +typedef struct srb { + /* + * Do not move cmd_type field, it needs to + * line up with qla_tgt_cmd->cmd_type + */ + uint8_t cmd_type; + uint8_t pad[3]; + struct iocb_resource iores; + struct kref cmd_kref; /* need to migrate ref_count over to this */ + void *priv; + struct fc_port *fcport; + struct scsi_qla_host *vha; + unsigned int start_timer:1; + unsigned int abort:1; + unsigned int aborted:1; + unsigned int completed:1; + unsigned int unsol_rsp:1; + + uint32_t handle; + uint16_t flags; + uint16_t type; + const char *name; + int iocbs; + struct qla_qpair *qpair; + struct srb *cmd_sp; + struct list_head elem; + u32 gen1; /* scratch */ + u32 gen2; /* scratch */ + int rc; + int retry_count; + struct completion *comp; + union { + struct srb_iocb iocb_cmd; + struct bsg_job *bsg_job; + struct srb_cmd scmd; + struct bsg_cmd bsg_cmd; + } u; + struct { + bool remapped; + struct { + dma_addr_t dma; + void *buf; + uint len; + } req; + struct { + dma_addr_t dma; + void *buf; + uint len; + } rsp; + } remap; + /* + * Report completion status @res and call sp_put(@sp). @res is + * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a + * QLA_* status value. + */ + void (*done)(struct srb *sp, int res); + /* Stop the timer and free @sp. Only used by the FCP code. */ + void (*free)(struct srb *sp); + /* + * Call nvme_private->fd->done() and free @sp. Only used by the NVMe + * code. + */ + void (*put_fn)(struct kref *kref); + + /* + * Report completion for asynchronous commands. + */ + void (*async_done)(struct srb *sp, int res); +} srb_t; + +#define GET_CMD_SP(sp) (sp->u.scmd.cmd) + +#define GET_CMD_SENSE_LEN(sp) \ + (sp->u.scmd.request_sense_length) +#define SET_CMD_SENSE_LEN(sp, len) \ + (sp->u.scmd.request_sense_length = len) +#define GET_CMD_SENSE_PTR(sp) \ + (sp->u.scmd.request_sense_ptr) +#define SET_CMD_SENSE_PTR(sp, ptr) \ + (sp->u.scmd.request_sense_ptr = ptr) +#define GET_FW_SENSE_LEN(sp) \ + (sp->u.scmd.fw_sense_length) +#define SET_FW_SENSE_LEN(sp, len) \ + (sp->u.scmd.fw_sense_length = len) + +struct msg_echo_lb { + dma_addr_t send_dma; + dma_addr_t rcv_dma; + uint16_t req_sg_cnt; + uint16_t rsp_sg_cnt; + uint16_t options; + uint32_t transfer_size; + uint32_t iteration_count; +}; + +/* + * ISP I/O Register Set structure definitions. + */ +struct device_reg_2xxx { + __le16 flash_address; /* Flash BIOS address */ + __le16 flash_data; /* Flash BIOS data */ + __le16 unused_1[1]; /* Gap */ + __le16 ctrl_status; /* Control/Status */ +#define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */ +#define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */ +#define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */ + + __le16 ictrl; /* Interrupt control */ +#define ICR_EN_INT BIT_15 /* ISP enable interrupts. */ +#define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */ + + __le16 istatus; /* Interrupt status */ +#define ISR_RISC_INT BIT_3 /* RISC interrupt */ + + __le16 semaphore; /* Semaphore */ + __le16 nvram; /* NVRAM register. */ +#define NVR_DESELECT 0 +#define NVR_BUSY BIT_15 +#define NVR_WRT_ENABLE BIT_14 /* Write enable */ +#define NVR_PR_ENABLE BIT_13 /* Protection register enable */ +#define NVR_DATA_IN BIT_3 +#define NVR_DATA_OUT BIT_2 +#define NVR_SELECT BIT_1 +#define NVR_CLOCK BIT_0 + +#define NVR_WAIT_CNT 20000 + + union { + struct { + __le16 mailbox0; + __le16 mailbox1; + __le16 mailbox2; + __le16 mailbox3; + __le16 mailbox4; + __le16 mailbox5; + __le16 mailbox6; + __le16 mailbox7; + __le16 unused_2[59]; /* Gap */ + } __attribute__((packed)) isp2100; + struct { + /* Request Queue */ + __le16 req_q_in; /* In-Pointer */ + __le16 req_q_out; /* Out-Pointer */ + /* Response Queue */ + __le16 rsp_q_in; /* In-Pointer */ + __le16 rsp_q_out; /* Out-Pointer */ + + /* RISC to Host Status */ + __le32 host_status; +#define HSR_RISC_INT BIT_15 /* RISC interrupt */ +#define HSR_RISC_PAUSED BIT_8 /* RISC Paused */ + + /* Host to Host Semaphore */ + __le16 host_semaphore; + __le16 unused_3[17]; /* Gap */ + __le16 mailbox0; + __le16 mailbox1; + __le16 mailbox2; + __le16 mailbox3; + __le16 mailbox4; + __le16 mailbox5; + __le16 mailbox6; + __le16 mailbox7; + __le16 mailbox8; + __le16 mailbox9; + __le16 mailbox10; + __le16 mailbox11; + __le16 mailbox12; + __le16 mailbox13; + __le16 mailbox14; + __le16 mailbox15; + __le16 mailbox16; + __le16 mailbox17; + __le16 mailbox18; + __le16 mailbox19; + __le16 mailbox20; + __le16 mailbox21; + __le16 mailbox22; + __le16 mailbox23; + __le16 mailbox24; + __le16 mailbox25; + __le16 mailbox26; + __le16 mailbox27; + __le16 mailbox28; + __le16 mailbox29; + __le16 mailbox30; + __le16 mailbox31; + __le16 fb_cmd; + __le16 unused_4[10]; /* Gap */ + } __attribute__((packed)) isp2300; + } u; + + __le16 fpm_diag_config; + __le16 unused_5[0x4]; /* Gap */ + __le16 risc_hw; + __le16 unused_5_1; /* Gap */ + __le16 pcr; /* Processor Control Register. */ + __le16 unused_6[0x5]; /* Gap */ + __le16 mctr; /* Memory Configuration and Timing. */ + __le16 unused_7[0x3]; /* Gap */ + __le16 fb_cmd_2100; /* Unused on 23XX */ + __le16 unused_8[0x3]; /* Gap */ + __le16 hccr; /* Host command & control register. */ +#define HCCR_HOST_INT BIT_7 /* Host interrupt bit */ +#define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */ + /* HCCR commands */ +#define HCCR_RESET_RISC 0x1000 /* Reset RISC */ +#define HCCR_PAUSE_RISC 0x2000 /* Pause RISC */ +#define HCCR_RELEASE_RISC 0x3000 /* Release RISC from reset. */ +#define HCCR_SET_HOST_INT 0x5000 /* Set host interrupt */ +#define HCCR_CLR_HOST_INT 0x6000 /* Clear HOST interrupt */ +#define HCCR_CLR_RISC_INT 0x7000 /* Clear RISC interrupt */ +#define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */ +#define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */ + + __le16 unused_9[5]; /* Gap */ + __le16 gpiod; /* GPIO Data register. */ + __le16 gpioe; /* GPIO Enable register. */ +#define GPIO_LED_MASK 0x00C0 +#define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000 +#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 +#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 +#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 +#define GPIO_LED_ALL_OFF 0x0000 +#define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */ +#define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */ + + union { + struct { + __le16 unused_10[8]; /* Gap */ + __le16 mailbox8; + __le16 mailbox9; + __le16 mailbox10; + __le16 mailbox11; + __le16 mailbox12; + __le16 mailbox13; + __le16 mailbox14; + __le16 mailbox15; + __le16 mailbox16; + __le16 mailbox17; + __le16 mailbox18; + __le16 mailbox19; + __le16 mailbox20; + __le16 mailbox21; + __le16 mailbox22; + __le16 mailbox23; /* Also probe reg. */ + } __attribute__((packed)) isp2200; + } u_end; +}; + +struct device_reg_25xxmq { + __le32 req_q_in; + __le32 req_q_out; + __le32 rsp_q_in; + __le32 rsp_q_out; + __le32 atio_q_in; + __le32 atio_q_out; +}; + + +struct device_reg_fx00 { + __le32 mailbox0; /* 00 */ + __le32 mailbox1; /* 04 */ + __le32 mailbox2; /* 08 */ + __le32 mailbox3; /* 0C */ + __le32 mailbox4; /* 10 */ + __le32 mailbox5; /* 14 */ + __le32 mailbox6; /* 18 */ + __le32 mailbox7; /* 1C */ + __le32 mailbox8; /* 20 */ + __le32 mailbox9; /* 24 */ + __le32 mailbox10; /* 28 */ + __le32 mailbox11; + __le32 mailbox12; + __le32 mailbox13; + __le32 mailbox14; + __le32 mailbox15; + __le32 mailbox16; + __le32 mailbox17; + __le32 mailbox18; + __le32 mailbox19; + __le32 mailbox20; + __le32 mailbox21; + __le32 mailbox22; + __le32 mailbox23; + __le32 mailbox24; + __le32 mailbox25; + __le32 mailbox26; + __le32 mailbox27; + __le32 mailbox28; + __le32 mailbox29; + __le32 mailbox30; + __le32 mailbox31; + __le32 aenmailbox0; + __le32 aenmailbox1; + __le32 aenmailbox2; + __le32 aenmailbox3; + __le32 aenmailbox4; + __le32 aenmailbox5; + __le32 aenmailbox6; + __le32 aenmailbox7; + /* Request Queue. */ + __le32 req_q_in; /* A0 - Request Queue In-Pointer */ + __le32 req_q_out; /* A4 - Request Queue Out-Pointer */ + /* Response Queue. */ + __le32 rsp_q_in; /* A8 - Response Queue In-Pointer */ + __le32 rsp_q_out; /* AC - Response Queue Out-Pointer */ + /* Init values shadowed on FW Up Event */ + __le32 initval0; /* B0 */ + __le32 initval1; /* B4 */ + __le32 initval2; /* B8 */ + __le32 initval3; /* BC */ + __le32 initval4; /* C0 */ + __le32 initval5; /* C4 */ + __le32 initval6; /* C8 */ + __le32 initval7; /* CC */ + __le32 fwheartbeat; /* D0 */ + __le32 pseudoaen; /* D4 */ +}; + + + +typedef union { + struct device_reg_2xxx isp; + struct device_reg_24xx isp24; + struct device_reg_25xxmq isp25mq; + struct device_reg_82xx isp82; + struct device_reg_fx00 ispfx00; +} __iomem device_reg_t; + +#define ISP_REQ_Q_IN(ha, reg) \ + (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ + &(reg)->u.isp2100.mailbox4 : \ + &(reg)->u.isp2300.req_q_in) +#define ISP_REQ_Q_OUT(ha, reg) \ + (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ + &(reg)->u.isp2100.mailbox4 : \ + &(reg)->u.isp2300.req_q_out) +#define ISP_RSP_Q_IN(ha, reg) \ + (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ + &(reg)->u.isp2100.mailbox5 : \ + &(reg)->u.isp2300.rsp_q_in) +#define ISP_RSP_Q_OUT(ha, reg) \ + (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ + &(reg)->u.isp2100.mailbox5 : \ + &(reg)->u.isp2300.rsp_q_out) + +#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in) +#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out) + +#define MAILBOX_REG(ha, reg, num) \ + (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ + (num < 8 ? \ + &(reg)->u.isp2100.mailbox0 + (num) : \ + &(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \ + &(reg)->u.isp2300.mailbox0 + (num)) +#define RD_MAILBOX_REG(ha, reg, num) \ + rd_reg_word(MAILBOX_REG(ha, reg, num)) +#define WRT_MAILBOX_REG(ha, reg, num, data) \ + wrt_reg_word(MAILBOX_REG(ha, reg, num), data) + +#define FB_CMD_REG(ha, reg) \ + (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ + &(reg)->fb_cmd_2100 : \ + &(reg)->u.isp2300.fb_cmd) +#define RD_FB_CMD_REG(ha, reg) \ + rd_reg_word(FB_CMD_REG(ha, reg)) +#define WRT_FB_CMD_REG(ha, reg, data) \ + wrt_reg_word(FB_CMD_REG(ha, reg), data) + +typedef struct { + uint32_t out_mb; /* outbound from driver */ + uint32_t in_mb; /* Incoming from RISC */ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + long buf_size; + void *bufp; + uint32_t tov; + uint8_t flags; +#define MBX_DMA_IN BIT_0 +#define MBX_DMA_OUT BIT_1 +#define IOCTL_CMD BIT_2 +} mbx_cmd_t; + +struct mbx_cmd_32 { + uint32_t out_mb; /* outbound from driver */ + uint32_t in_mb; /* Incoming from RISC */ + uint32_t mb[MAILBOX_REGISTER_COUNT]; + long buf_size; + void *bufp; + uint32_t tov; + uint8_t flags; +#define MBX_DMA_IN BIT_0 +#define MBX_DMA_OUT BIT_1 +#define IOCTL_CMD BIT_2 +}; + + +#define MBX_TOV_SECONDS 30 + +/* + * ISP product identification definitions in mailboxes after reset. + */ +#define PROD_ID_1 0x4953 +#define PROD_ID_2 0x0000 +#define PROD_ID_2a 0x5020 +#define PROD_ID_3 0x2020 + +/* + * ISP mailbox Self-Test status codes + */ +#define MBS_FRM_ALIVE 0 /* Firmware Alive. */ +#define MBS_CHKSUM_ERR 1 /* Checksum Error. */ +#define MBS_BUSY 4 /* Busy. */ + +/* + * ISP mailbox command complete status codes + */ +#define MBS_COMMAND_COMPLETE 0x4000 +#define MBS_INVALID_COMMAND 0x4001 +#define MBS_HOST_INTERFACE_ERROR 0x4002 +#define MBS_TEST_FAILED 0x4003 +#define MBS_COMMAND_ERROR 0x4005 +#define MBS_COMMAND_PARAMETER_ERROR 0x4006 +#define MBS_PORT_ID_USED 0x4007 +#define MBS_LOOP_ID_USED 0x4008 +#define MBS_ALL_IDS_IN_USE 0x4009 +#define MBS_NOT_LOGGED_IN 0x400A +#define MBS_LINK_DOWN_ERROR 0x400B +#define MBS_DIAG_ECHO_TEST_ERROR 0x400C + +static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) +{ + return MBS_COMMAND_COMPLETE <= mbs && mbs <= MBS_DIAG_ECHO_TEST_ERROR; +} + +/* + * ISP mailbox asynchronous event status codes + */ +#define MBA_ASYNC_EVENT 0x8000 /* Asynchronous event. */ +#define MBA_RESET 0x8001 /* Reset Detected. */ +#define MBA_SYSTEM_ERR 0x8002 /* System Error. */ +#define MBA_REQ_TRANSFER_ERR 0x8003 /* Request Transfer Error. */ +#define MBA_RSP_TRANSFER_ERR 0x8004 /* Response Transfer Error. */ +#define MBA_WAKEUP_THRES 0x8005 /* Request Queue Wake-up. */ +#define MBA_LIP_OCCURRED 0x8010 /* Loop Initialization Procedure */ + /* occurred. */ +#define MBA_LOOP_UP 0x8011 /* FC Loop UP. */ +#define MBA_LOOP_DOWN 0x8012 /* FC Loop Down. */ +#define MBA_LIP_RESET 0x8013 /* LIP reset occurred. */ +#define MBA_PORT_UPDATE 0x8014 /* Port Database update. */ +#define MBA_RSCN_UPDATE 0x8015 /* Register State Chg Notification. */ +#define MBA_LIP_F8 0x8016 /* Received a LIP F8. */ +#define MBA_LOOP_INIT_ERR 0x8017 /* Loop Initialization Error. */ +#define MBA_FABRIC_AUTH_REQ 0x801b /* Fabric Authentication Required. */ +#define MBA_CONGN_NOTI_RECV 0x801e /* Congestion Notification Received */ +#define MBA_SCSI_COMPLETION 0x8020 /* SCSI Command Complete. */ +#define MBA_CTIO_COMPLETION 0x8021 /* CTIO Complete. */ +#define MBA_IP_COMPLETION 0x8022 /* IP Transmit Command Complete. */ +#define MBA_IP_RECEIVE 0x8023 /* IP Received. */ +#define MBA_IP_BROADCAST 0x8024 /* IP Broadcast Received. */ +#define MBA_IP_LOW_WATER_MARK 0x8025 /* IP Low Water Mark reached. */ +#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ +#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ + /* used. */ +#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */ +#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ +#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ +#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ +#define MBA_CMPLT_3_16BIT 0x8033 /* Completion 3 16bit IOSB. */ +#define MBA_CMPLT_4_16BIT 0x8034 /* Completion 4 16bit IOSB. */ +#define MBA_CMPLT_5_16BIT 0x8035 /* Completion 5 16bit IOSB. */ +#define MBA_CHG_IN_CONNECTION 0x8036 /* Change in connection mode. */ +#define MBA_RIO_RESPONSE 0x8040 /* RIO response queue update. */ +#define MBA_ZIO_RESPONSE 0x8040 /* ZIO response queue update. */ +#define MBA_CMPLT_2_32BIT 0x8042 /* Completion 2 32bit IOSB. */ +#define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */ +#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ +#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ +#define MBA_FW_NOT_STARTED 0x8050 /* Firmware not started */ +#define MBA_FW_STARTING 0x8051 /* Firmware starting */ +#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */ +#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */ +#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */ +#define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */ +#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */ +#define MBA_TRANS_INSERT 0x8130 /* Transceiver Insertion */ +#define MBA_TRANS_REMOVE 0x8131 /* Transceiver Removal */ +#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */ +#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change + Notification */ +#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */ +#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */ +#define MBA_FW_INIT_INPROGRESS 0x8500 /* Firmware boot in progress */ +/* 83XX FCoE specific */ +#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ + +/* Interrupt type codes */ +#define INTR_ROM_MB_SUCCESS 0x1 +#define INTR_ROM_MB_FAILED 0x2 +#define INTR_MB_SUCCESS 0x10 +#define INTR_MB_FAILED 0x11 +#define INTR_ASYNC_EVENT 0x12 +#define INTR_RSP_QUE_UPDATE 0x13 +#define INTR_RSP_QUE_UPDATE_83XX 0x14 +#define INTR_ATIO_QUE_UPDATE 0x1C +#define INTR_ATIO_RSP_QUE_UPDATE 0x1D +#define INTR_ATIO_QUE_UPDATE_27XX 0x1E + +/* ISP mailbox loopback echo diagnostic error code */ +#define MBS_LB_RESET 0x17 + +/* AEN mailbox Port Diagnostics test */ +#define AEN_START_DIAG_TEST 0x0 /* start the diagnostics */ +#define AEN_DONE_DIAG_TEST_WITH_NOERR 0x1 /* Done with no errors */ +#define AEN_DONE_DIAG_TEST_WITH_ERR 0x2 /* Done with error.*/ + +/* + * Firmware options 1, 2, 3. + */ +#define FO1_AE_ON_LIPF8 BIT_0 +#define FO1_AE_ALL_LIP_RESET BIT_1 +#define FO1_CTIO_RETRY BIT_3 +#define FO1_DISABLE_LIP_F7_SW BIT_4 +#define FO1_DISABLE_100MS_LOS_WAIT BIT_5 +#define FO1_DISABLE_GPIO6_7 BIT_6 /* LED bits */ +#define FO1_AE_ON_LOOP_INIT_ERR BIT_7 +#define FO1_SET_EMPHASIS_SWING BIT_8 +#define FO1_AE_AUTO_BYPASS BIT_9 +#define FO1_ENABLE_PURE_IOCB BIT_10 +#define FO1_AE_PLOGI_RJT BIT_11 +#define FO1_ENABLE_ABORT_SEQUENCE BIT_12 +#define FO1_AE_QUEUE_FULL BIT_13 + +#define FO2_ENABLE_ATIO_TYPE_3 BIT_0 +#define FO2_REV_LOOPBACK BIT_1 + +#define FO3_ENABLE_EMERG_IOCB BIT_0 +#define FO3_AE_RND_ERROR BIT_1 + +/* 24XX additional firmware options */ +#define ADD_FO_COUNT 3 +#define ADD_FO1_DISABLE_GPIO_LED_CTRL BIT_6 /* LED bits */ +#define ADD_FO1_ENABLE_PUREX_IOCB BIT_10 + +#define ADD_FO2_ENABLE_SEL_CLS2 BIT_5 + +#define ADD_FO3_NO_ABT_ON_LINK_DOWN BIT_14 + +/* + * ISP mailbox commands + */ +#define MBC_LOAD_RAM 1 /* Load RAM. */ +#define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware. */ +#define MBC_READ_RAM_WORD 5 /* Read RAM word. */ +#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */ +#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum. */ +#define MBC_GET_FIRMWARE_VERSION 8 /* Get firmware revision. */ +#define MBC_LOAD_RISC_RAM 9 /* Load RAM command. */ +#define MBC_DUMP_RISC_RAM 0xa /* Dump RAM command. */ +#define MBC_SECURE_FLASH_UPDATE 0xa /* Secure Flash Update(28xx) */ +#define MBC_LOAD_RISC_RAM_EXTENDED 0xb /* Load RAM extended. */ +#define MBC_DUMP_RISC_RAM_EXTENDED 0xc /* Dump RAM extended. */ +#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */ +#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */ +#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */ +#define MBC_STOP_FIRMWARE 0x14 /* Stop firmware. */ +#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */ +#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */ +#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */ +#define MBC_RESET 0x18 /* Reset. */ +#define MBC_GET_ADAPTER_LOOP_ID 0x20 /* Get loop id of ISP2200. */ +#define MBC_GET_SET_ZIO_THRESHOLD 0x21 /* Get/SET ZIO THRESHOLD. */ +#define MBC_GET_RETRY_COUNT 0x22 /* Get f/w retry cnt/delay. */ +#define MBC_DISABLE_VI 0x24 /* Disable VI operation. */ +#define MBC_ENABLE_VI 0x25 /* Enable VI operation. */ +#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */ +#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/ +#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */ +#define MBC_SET_GET_FC_LED_CONFIG 0x3b /* Set/Get FC LED config */ +#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */ +#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */ +#define MBC_GET_RESOURCE_COUNTS 0x42 /* Get Resource Counts. */ +#define MBC_NON_PARTICIPATE 0x43 /* Non-Participating Mode. */ +#define MBC_DIAGNOSTIC_ECHO 0x44 /* Diagnostic echo. */ +#define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */ +#define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */ +#define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */ +#define MBC_CONFIGURE_VF 0x4b /* Configure VFs */ +#define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */ +#define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */ +#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */ +#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */ +#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */ +#define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */ +#define MBC_DATA_RATE 0x5d /* Data Rate */ +#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */ +#define MBC_INITIATE_LIP 0x62 /* Initiate Loop */ + /* Initialization Procedure */ +#define MBC_GET_FC_AL_POSITION_MAP 0x63 /* Get FC_AL Position Map. */ +#define MBC_GET_PORT_DATABASE 0x64 /* Get Port Database. */ +#define MBC_CLEAR_ACA 0x65 /* Clear ACA. */ +#define MBC_TARGET_RESET 0x66 /* Target Reset. */ +#define MBC_CLEAR_TASK_SET 0x67 /* Clear Task Set. */ +#define MBC_ABORT_TASK_SET 0x68 /* Abort Task Set. */ +#define MBC_GET_FIRMWARE_STATE 0x69 /* Get firmware state. */ +#define MBC_GET_PORT_NAME 0x6a /* Get port name. */ +#define MBC_GET_LINK_STATUS 0x6b /* Get port link status. */ +#define MBC_LIP_RESET 0x6c /* LIP reset. */ +#define MBC_SEND_SNS_COMMAND 0x6e /* Send Simple Name Server */ + /* commandd. */ +#define MBC_LOGIN_FABRIC_PORT 0x6f /* Login fabric port. */ +#define MBC_SEND_CHANGE_REQUEST 0x70 /* Send Change Request. */ +#define MBC_LOGOUT_FABRIC_PORT 0x71 /* Logout fabric port. */ +#define MBC_LIP_FULL_LOGIN 0x72 /* Full login LIP. */ +#define MBC_LOGIN_LOOP_PORT 0x74 /* Login Loop Port. */ +#define MBC_PORT_NODE_NAME_LIST 0x75 /* Get port/node name list. */ +#define MBC_INITIALIZE_RECEIVE_QUEUE 0x77 /* Initialize receive queue */ +#define MBC_UNLOAD_IP 0x79 /* Shutdown IP */ +#define MBC_GET_ID_LIST 0x7C /* Get Port ID list. */ +#define MBC_SEND_LFA_COMMAND 0x7D /* Send Loop Fabric Address */ +#define MBC_LUN_RESET 0x7E /* Send LUN reset */ + +/* + * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones + * should be defined with MBC_MR_* + */ +#define MBC_MR_DRV_SHUTDOWN 0x6A + +/* + * ISP24xx mailbox commands + */ +#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */ +#define MBC_READ_SERDES 0x4 /* Read serdes word. */ +#define MBC_LOAD_DUMP_MPI_RAM 0x5 /* Load/Dump MPI RAM. */ +#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ +#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */ +#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */ +#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */ +#define MBC_TRACE_CONTROL 0x27 /* Trace control command. */ +#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */ +#define MBC_WRITE_SFP 0x30 /* Write SFP Data. */ +#define MBC_READ_SFP 0x31 /* Read SFP Data. */ +#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */ +#define MBC_DPORT_DIAGNOSTICS 0x47 /* D-Port Diagnostics */ +#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */ +#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */ +#define MBC_MID_GET_VP_ENTRY 0x4a /* MID Get VP Entry. */ +#define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */ +#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ +#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ +#define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */ +#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ +#define MBC_PORT_RESET 0x120 /* Port Reset */ +#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ +#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ + +/* + * ISP81xx mailbox commands + */ +#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */ + +/* + * ISP8044 mailbox commands + */ +#define MBC_SET_GET_ETH_SERDES_REG 0x150 +#define HCS_WRITE_SERDES 0x3 +#define HCS_READ_SERDES 0x4 + +/* Firmware return data sizes */ +#define FCAL_MAP_SIZE 128 + +/* Mailbox bit definitions for out_mb and in_mb */ +#define MBX_31 BIT_31 +#define MBX_30 BIT_30 +#define MBX_29 BIT_29 +#define MBX_28 BIT_28 +#define MBX_27 BIT_27 +#define MBX_26 BIT_26 +#define MBX_25 BIT_25 +#define MBX_24 BIT_24 +#define MBX_23 BIT_23 +#define MBX_22 BIT_22 +#define MBX_21 BIT_21 +#define MBX_20 BIT_20 +#define MBX_19 BIT_19 +#define MBX_18 BIT_18 +#define MBX_17 BIT_17 +#define MBX_16 BIT_16 +#define MBX_15 BIT_15 +#define MBX_14 BIT_14 +#define MBX_13 BIT_13 +#define MBX_12 BIT_12 +#define MBX_11 BIT_11 +#define MBX_10 BIT_10 +#define MBX_9 BIT_9 +#define MBX_8 BIT_8 +#define MBX_7 BIT_7 +#define MBX_6 BIT_6 +#define MBX_5 BIT_5 +#define MBX_4 BIT_4 +#define MBX_3 BIT_3 +#define MBX_2 BIT_2 +#define MBX_1 BIT_1 +#define MBX_0 BIT_0 + +#define RNID_TYPE_ELS_CMD 0x5 +#define RNID_TYPE_PORT_LOGIN 0x7 +#define RNID_BUFFER_CREDITS 0x8 +#define RNID_TYPE_SET_VERSION 0x9 +#define RNID_TYPE_ASIC_TEMP 0xC + +#define ELS_CMD_MAP_SIZE 32 + +/* + * Firmware state codes from get firmware state mailbox command + */ +#define FSTATE_CONFIG_WAIT 0 +#define FSTATE_WAIT_AL_PA 1 +#define FSTATE_WAIT_LOGIN 2 +#define FSTATE_READY 3 +#define FSTATE_LOSS_OF_SYNC 4 +#define FSTATE_ERROR 5 +#define FSTATE_REINIT 6 +#define FSTATE_NON_PART 7 + +#define FSTATE_CONFIG_CORRECT 0 +#define FSTATE_P2P_RCV_LIP 1 +#define FSTATE_P2P_CHOOSE_LOOP 2 +#define FSTATE_P2P_RCV_UNIDEN_LIP 3 +#define FSTATE_FATAL_ERROR 4 +#define FSTATE_LOOP_BACK_CONN 5 + +#define QLA27XX_IMG_STATUS_VER_MAJOR 0x01 +#define QLA27XX_IMG_STATUS_VER_MINOR 0x00 +#define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE +#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF +#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF +#define QLA28XX_AUX_IMG_STATUS_SIGN 0xFACEFAED +#define QLA27XX_DEFAULT_IMAGE 0 +#define QLA27XX_PRIMARY_IMAGE 1 +#define QLA27XX_SECONDARY_IMAGE 2 + +/* + * Port Database structure definition + * Little endian except where noted. + */ +#define PORT_DATABASE_SIZE 128 /* bytes */ +typedef struct { + uint8_t options; + uint8_t control; + uint8_t master_state; + uint8_t slave_state; + uint8_t reserved[2]; + uint8_t hard_address; + uint8_t reserved_1; + uint8_t port_id[4]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + __le16 execution_throttle; + uint16_t execution_count; + uint8_t reset_count; + uint8_t reserved_2; + uint16_t resource_allocation; + uint16_t current_allocation; + uint16_t queue_head; + uint16_t queue_tail; + uint16_t transmit_execution_list_next; + uint16_t transmit_execution_list_previous; + uint16_t common_features; + uint16_t total_concurrent_sequences; + uint16_t RO_by_information_category; + uint8_t recipient; + uint8_t initiator; + uint16_t receive_data_size; + uint16_t concurrent_sequences; + uint16_t open_sequences_per_exchange; + uint16_t lun_abort_flags; + uint16_t lun_stop_flags; + uint16_t stop_queue_head; + uint16_t stop_queue_tail; + uint16_t port_retry_timer; + uint16_t next_sequence_id; + uint16_t frame_count; + uint16_t PRLI_payload_length; + uint8_t prli_svc_param_word_0[2]; /* Big endian */ + /* Bits 15-0 of word 0 */ + uint8_t prli_svc_param_word_3[2]; /* Big endian */ + /* Bits 15-0 of word 3 */ + uint16_t loop_id; + uint16_t extended_lun_info_list_pointer; + uint16_t extended_lun_stop_list_pointer; +} port_database_t; + +/* + * Port database slave/master states + */ +#define PD_STATE_DISCOVERY 0 +#define PD_STATE_WAIT_DISCOVERY_ACK 1 +#define PD_STATE_PORT_LOGIN 2 +#define PD_STATE_WAIT_PORT_LOGIN_ACK 3 +#define PD_STATE_PROCESS_LOGIN 4 +#define PD_STATE_WAIT_PROCESS_LOGIN_ACK 5 +#define PD_STATE_PORT_LOGGED_IN 6 +#define PD_STATE_PORT_UNAVAILABLE 7 +#define PD_STATE_PROCESS_LOGOUT 8 +#define PD_STATE_WAIT_PROCESS_LOGOUT_ACK 9 +#define PD_STATE_PORT_LOGOUT 10 +#define PD_STATE_WAIT_PORT_LOGOUT_ACK 11 + + +#define QLA_ZIO_MODE_6 (BIT_2 | BIT_1) +#define QLA_ZIO_DISABLED 0 +#define QLA_ZIO_DEFAULT_TIMER 2 + +/* + * ISP Initialization Control Block. + * Little endian except where noted. + */ +#define ICB_VERSION 1 +typedef struct { + uint8_t version; + uint8_t reserved_1; + + /* + * LSB BIT 0 = Enable Hard Loop Id + * LSB BIT 1 = Enable Fairness + * LSB BIT 2 = Enable Full-Duplex + * LSB BIT 3 = Enable Fast Posting + * LSB BIT 4 = Enable Target Mode + * LSB BIT 5 = Disable Initiator Mode + * LSB BIT 6 = Enable ADISC + * LSB BIT 7 = Enable Target Inquiry Data + * + * MSB BIT 0 = Enable PDBC Notify + * MSB BIT 1 = Non Participating LIP + * MSB BIT 2 = Descending Loop ID Search + * MSB BIT 3 = Acquire Loop ID in LIPA + * MSB BIT 4 = Stop PortQ on Full Status + * MSB BIT 5 = Full Login after LIP + * MSB BIT 6 = Node Name Option + * MSB BIT 7 = Ext IFWCB enable bit + */ + uint8_t firmware_options[2]; + + __le16 frame_payload_size; + __le16 max_iocb_allocation; + __le16 execution_throttle; + uint8_t retry_count; + uint8_t retry_delay; /* unused */ + uint8_t port_name[WWN_SIZE]; /* Big endian. */ + uint16_t hard_address; + uint8_t inquiry_data; + uint8_t login_timeout; + uint8_t node_name[WWN_SIZE]; /* Big endian. */ + + __le16 request_q_outpointer; + __le16 response_q_inpointer; + __le16 request_q_length; + __le16 response_q_length; + __le64 request_q_address __packed; + __le64 response_q_address __packed; + + __le16 lun_enables; + uint8_t command_resource_count; + uint8_t immediate_notify_resource_count; + __le16 timeout; + uint8_t reserved_2[2]; + + /* + * LSB BIT 0 = Timer Operation mode bit 0 + * LSB BIT 1 = Timer Operation mode bit 1 + * LSB BIT 2 = Timer Operation mode bit 2 + * LSB BIT 3 = Timer Operation mode bit 3 + * LSB BIT 4 = Init Config Mode bit 0 + * LSB BIT 5 = Init Config Mode bit 1 + * LSB BIT 6 = Init Config Mode bit 2 + * LSB BIT 7 = Enable Non part on LIHA failure + * + * MSB BIT 0 = Enable class 2 + * MSB BIT 1 = Enable ACK0 + * MSB BIT 2 = + * MSB BIT 3 = + * MSB BIT 4 = FC Tape Enable + * MSB BIT 5 = Enable FC Confirm + * MSB BIT 6 = Enable command queuing in target mode + * MSB BIT 7 = No Logo On Link Down + */ + uint8_t add_firmware_options[2]; + + uint8_t response_accumulation_timer; + uint8_t interrupt_delay_timer; + + /* + * LSB BIT 0 = Enable Read xfr_rdy + * LSB BIT 1 = Soft ID only + * LSB BIT 2 = + * LSB BIT 3 = + * LSB BIT 4 = FCP RSP Payload [0] + * LSB BIT 5 = FCP RSP Payload [1] / Sbus enable - 2200 + * LSB BIT 6 = Enable Out-of-Order frame handling + * LSB BIT 7 = Disable Automatic PLOGI on Local Loop + * + * MSB BIT 0 = Sbus enable - 2300 + * MSB BIT 1 = + * MSB BIT 2 = + * MSB BIT 3 = + * MSB BIT 4 = LED mode + * MSB BIT 5 = enable 50 ohm termination + * MSB BIT 6 = Data Rate (2300 only) + * MSB BIT 7 = Data Rate (2300 only) + */ + uint8_t special_options[2]; + + uint8_t reserved_3[26]; +} init_cb_t; + +/* Special Features Control Block */ +struct init_sf_cb { + uint8_t format; + uint8_t reserved0; + /* + * BIT 15-14 = Reserved + * BIT_13 = SAN Congestion Management (1 - Enabled, 0 - Disabled) + * BIT_12 = Remote Write Optimization (1 - Enabled, 0 - Disabled) + * BIT 11-0 = Reserved + */ + __le16 flags; + uint8_t reserved1[32]; + uint16_t discard_OHRB_timeout_value; + uint16_t remote_write_opt_queue_num; + uint8_t reserved2[40]; + uint8_t scm_related_parameter[16]; + uint8_t reserved3[32]; +}; + +/* + * Get Link Status mailbox command return buffer. + */ +#define GLSO_SEND_RPS BIT_0 +#define GLSO_USE_DID BIT_3 + +struct link_statistics { + __le32 link_fail_cnt; + __le32 loss_sync_cnt; + __le32 loss_sig_cnt; + __le32 prim_seq_err_cnt; + __le32 inval_xmit_word_cnt; + __le32 inval_crc_cnt; + __le32 lip_cnt; + __le32 link_up_cnt; + __le32 link_down_loop_init_tmo; + __le32 link_down_los; + __le32 link_down_loss_rcv_clk; + uint32_t reserved0[5]; + __le32 port_cfg_chg; + uint32_t reserved1[11]; + __le32 rsp_q_full; + __le32 atio_q_full; + __le32 drop_ae; + __le32 els_proto_err; + __le32 reserved2; + __le32 tx_frames; + __le32 rx_frames; + __le32 discarded_frames; + __le32 dropped_frames; + uint32_t reserved3; + __le32 nos_rcvd; + uint32_t reserved4[4]; + __le32 tx_prjt; + __le32 rcv_exfail; + __le32 rcv_abts; + __le32 seq_frm_miss; + __le32 corr_err; + __le32 mb_rqst; + __le32 nport_full; + __le32 eofa; + uint32_t reserved5; + __le64 fpm_recv_word_cnt; + __le64 fpm_disc_word_cnt; + __le64 fpm_xmit_word_cnt; + uint32_t reserved6[70]; +}; + +/* + * NVRAM Command values. + */ +#define NV_START_BIT BIT_2 +#define NV_WRITE_OP (BIT_26+BIT_24) +#define NV_READ_OP (BIT_26+BIT_25) +#define NV_ERASE_OP (BIT_26+BIT_25+BIT_24) +#define NV_MASK_OP (BIT_26+BIT_25+BIT_24) +#define NV_DELAY_COUNT 10 + +/* + * QLogic ISP2100, ISP2200 and ISP2300 NVRAM structure definition. + */ +typedef struct { + /* + * NVRAM header + */ + uint8_t id[4]; + uint8_t nvram_version; + uint8_t reserved_0; + + /* + * NVRAM RISC parameter block + */ + uint8_t parameter_block_version; + uint8_t reserved_1; + + /* + * LSB BIT 0 = Enable Hard Loop Id + * LSB BIT 1 = Enable Fairness + * LSB BIT 2 = Enable Full-Duplex + * LSB BIT 3 = Enable Fast Posting + * LSB BIT 4 = Enable Target Mode + * LSB BIT 5 = Disable Initiator Mode + * LSB BIT 6 = Enable ADISC + * LSB BIT 7 = Enable Target Inquiry Data + * + * MSB BIT 0 = Enable PDBC Notify + * MSB BIT 1 = Non Participating LIP + * MSB BIT 2 = Descending Loop ID Search + * MSB BIT 3 = Acquire Loop ID in LIPA + * MSB BIT 4 = Stop PortQ on Full Status + * MSB BIT 5 = Full Login after LIP + * MSB BIT 6 = Node Name Option + * MSB BIT 7 = Ext IFWCB enable bit + */ + uint8_t firmware_options[2]; + + __le16 frame_payload_size; + __le16 max_iocb_allocation; + __le16 execution_throttle; + uint8_t retry_count; + uint8_t retry_delay; /* unused */ + uint8_t port_name[WWN_SIZE]; /* Big endian. */ + uint16_t hard_address; + uint8_t inquiry_data; + uint8_t login_timeout; + uint8_t node_name[WWN_SIZE]; /* Big endian. */ + + /* + * LSB BIT 0 = Timer Operation mode bit 0 + * LSB BIT 1 = Timer Operation mode bit 1 + * LSB BIT 2 = Timer Operation mode bit 2 + * LSB BIT 3 = Timer Operation mode bit 3 + * LSB BIT 4 = Init Config Mode bit 0 + * LSB BIT 5 = Init Config Mode bit 1 + * LSB BIT 6 = Init Config Mode bit 2 + * LSB BIT 7 = Enable Non part on LIHA failure + * + * MSB BIT 0 = Enable class 2 + * MSB BIT 1 = Enable ACK0 + * MSB BIT 2 = + * MSB BIT 3 = + * MSB BIT 4 = FC Tape Enable + * MSB BIT 5 = Enable FC Confirm + * MSB BIT 6 = Enable command queuing in target mode + * MSB BIT 7 = No Logo On Link Down + */ + uint8_t add_firmware_options[2]; + + uint8_t response_accumulation_timer; + uint8_t interrupt_delay_timer; + + /* + * LSB BIT 0 = Enable Read xfr_rdy + * LSB BIT 1 = Soft ID only + * LSB BIT 2 = + * LSB BIT 3 = + * LSB BIT 4 = FCP RSP Payload [0] + * LSB BIT 5 = FCP RSP Payload [1] / Sbus enable - 2200 + * LSB BIT 6 = Enable Out-of-Order frame handling + * LSB BIT 7 = Disable Automatic PLOGI on Local Loop + * + * MSB BIT 0 = Sbus enable - 2300 + * MSB BIT 1 = + * MSB BIT 2 = + * MSB BIT 3 = + * MSB BIT 4 = LED mode + * MSB BIT 5 = enable 50 ohm termination + * MSB BIT 6 = Data Rate (2300 only) + * MSB BIT 7 = Data Rate (2300 only) + */ + uint8_t special_options[2]; + + /* Reserved for expanded RISC parameter block */ + uint8_t reserved_2[22]; + + /* + * LSB BIT 0 = Tx Sensitivity 1G bit 0 + * LSB BIT 1 = Tx Sensitivity 1G bit 1 + * LSB BIT 2 = Tx Sensitivity 1G bit 2 + * LSB BIT 3 = Tx Sensitivity 1G bit 3 + * LSB BIT 4 = Rx Sensitivity 1G bit 0 + * LSB BIT 5 = Rx Sensitivity 1G bit 1 + * LSB BIT 6 = Rx Sensitivity 1G bit 2 + * LSB BIT 7 = Rx Sensitivity 1G bit 3 + * + * MSB BIT 0 = Tx Sensitivity 2G bit 0 + * MSB BIT 1 = Tx Sensitivity 2G bit 1 + * MSB BIT 2 = Tx Sensitivity 2G bit 2 + * MSB BIT 3 = Tx Sensitivity 2G bit 3 + * MSB BIT 4 = Rx Sensitivity 2G bit 0 + * MSB BIT 5 = Rx Sensitivity 2G bit 1 + * MSB BIT 6 = Rx Sensitivity 2G bit 2 + * MSB BIT 7 = Rx Sensitivity 2G bit 3 + * + * LSB BIT 0 = Output Swing 1G bit 0 + * LSB BIT 1 = Output Swing 1G bit 1 + * LSB BIT 2 = Output Swing 1G bit 2 + * LSB BIT 3 = Output Emphasis 1G bit 0 + * LSB BIT 4 = Output Emphasis 1G bit 1 + * LSB BIT 5 = Output Swing 2G bit 0 + * LSB BIT 6 = Output Swing 2G bit 1 + * LSB BIT 7 = Output Swing 2G bit 2 + * + * MSB BIT 0 = Output Emphasis 2G bit 0 + * MSB BIT 1 = Output Emphasis 2G bit 1 + * MSB BIT 2 = Output Enable + * MSB BIT 3 = + * MSB BIT 4 = + * MSB BIT 5 = + * MSB BIT 6 = + * MSB BIT 7 = + */ + uint8_t seriallink_options[4]; + + /* + * NVRAM host parameter block + * + * LSB BIT 0 = Enable spinup delay + * LSB BIT 1 = Disable BIOS + * LSB BIT 2 = Enable Memory Map BIOS + * LSB BIT 3 = Enable Selectable Boot + * LSB BIT 4 = Disable RISC code load + * LSB BIT 5 = Set cache line size 1 + * LSB BIT 6 = PCI Parity Disable + * LSB BIT 7 = Enable extended logging + * + * MSB BIT 0 = Enable 64bit addressing + * MSB BIT 1 = Enable lip reset + * MSB BIT 2 = Enable lip full login + * MSB BIT 3 = Enable target reset + * MSB BIT 4 = Enable database storage + * MSB BIT 5 = Enable cache flush read + * MSB BIT 6 = Enable database load + * MSB BIT 7 = Enable alternate WWN + */ + uint8_t host_p[2]; + + uint8_t boot_node_name[WWN_SIZE]; + uint8_t boot_lun_number; + uint8_t reset_delay; + uint8_t port_down_retry_count; + uint8_t boot_id_number; + __le16 max_luns_per_target; + uint8_t fcode_boot_port_name[WWN_SIZE]; + uint8_t alternate_port_name[WWN_SIZE]; + uint8_t alternate_node_name[WWN_SIZE]; + + /* + * BIT 0 = Selective Login + * BIT 1 = Alt-Boot Enable + * BIT 2 = + * BIT 3 = Boot Order List + * BIT 4 = + * BIT 5 = Selective LUN + * BIT 6 = + * BIT 7 = unused + */ + uint8_t efi_parameters; + + uint8_t link_down_timeout; + + uint8_t adapter_id[16]; + + uint8_t alt1_boot_node_name[WWN_SIZE]; + uint16_t alt1_boot_lun_number; + uint8_t alt2_boot_node_name[WWN_SIZE]; + uint16_t alt2_boot_lun_number; + uint8_t alt3_boot_node_name[WWN_SIZE]; + uint16_t alt3_boot_lun_number; + uint8_t alt4_boot_node_name[WWN_SIZE]; + uint16_t alt4_boot_lun_number; + uint8_t alt5_boot_node_name[WWN_SIZE]; + uint16_t alt5_boot_lun_number; + uint8_t alt6_boot_node_name[WWN_SIZE]; + uint16_t alt6_boot_lun_number; + uint8_t alt7_boot_node_name[WWN_SIZE]; + uint16_t alt7_boot_lun_number; + + uint8_t reserved_3[2]; + + /* Offset 200-215 : Model Number */ + uint8_t model_number[16]; + + /* OEM related items */ + uint8_t oem_specific[16]; + + /* + * NVRAM Adapter Features offset 232-239 + * + * LSB BIT 0 = External GBIC + * LSB BIT 1 = Risc RAM parity + * LSB BIT 2 = Buffer Plus Module + * LSB BIT 3 = Multi Chip Adapter + * LSB BIT 4 = Internal connector + * LSB BIT 5 = + * LSB BIT 6 = + * LSB BIT 7 = + * + * MSB BIT 0 = + * MSB BIT 1 = + * MSB BIT 2 = + * MSB BIT 3 = + * MSB BIT 4 = + * MSB BIT 5 = + * MSB BIT 6 = + * MSB BIT 7 = + */ + uint8_t adapter_features[2]; + + uint8_t reserved_4[16]; + + /* Subsystem vendor ID for ISP2200 */ + uint16_t subsystem_vendor_id_2200; + + /* Subsystem device ID for ISP2200 */ + uint16_t subsystem_device_id_2200; + + uint8_t reserved_5; + uint8_t checksum; +} nvram_t; + +/* + * ISP queue - response queue entry definition. + */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + uint8_t data[52]; + uint32_t signature; +#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ +} response_t; + +/* + * ISP queue - ATIO queue entry definition. + */ +struct atio { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + __le16 attr_n_length; + uint8_t data[56]; + uint32_t signature; +#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ +}; + +typedef union { + __le16 extended; + struct { + uint8_t reserved; + uint8_t standard; + } id; +} target_id_t; + +#define SET_TARGET_ID(ha, to, from) \ +do { \ + if (HAS_EXTENDED_IDS(ha)) \ + to.extended = cpu_to_le16(from); \ + else \ + to.id.standard = (uint8_t)from; \ +} while (0) + +/* + * ISP queue - command entry structure definition. + */ +#define COMMAND_TYPE 0x11 /* Command entry */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System handle. */ + target_id_t target; /* SCSI ID */ + __le16 lun; /* SCSI LUN */ + __le16 control_flags; /* Control flags. */ +#define CF_WRITE BIT_6 +#define CF_READ BIT_5 +#define CF_SIMPLE_TAG BIT_3 +#define CF_ORDERED_TAG BIT_2 +#define CF_HEAD_TAG BIT_1 + uint16_t reserved_1; + __le16 timeout; /* Command timeout. */ + __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ + __le32 byte_count; /* Total byte count. */ + union { + struct dsd32 dsd32[3]; + struct dsd64 dsd64[2]; + }; +} cmd_entry_t; + +/* + * ISP queue - 64-Bit addressing, command entry structure definition. + */ +#define COMMAND_A64_TYPE 0x19 /* Command A64 entry */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System handle. */ + target_id_t target; /* SCSI ID */ + __le16 lun; /* SCSI LUN */ + __le16 control_flags; /* Control flags. */ + uint16_t reserved_1; + __le16 timeout; /* Command timeout. */ + __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ + uint32_t byte_count; /* Total byte count. */ + struct dsd64 dsd[2]; +} cmd_a64_entry_t, request_t; + +/* + * ISP queue - continuation entry structure definition. + */ +#define CONTINUE_TYPE 0x02 /* Continuation entry. */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t reserved; + struct dsd32 dsd[7]; +} cont_entry_t; + +/* + * ISP queue - 64-Bit addressing, continuation entry structure definition. + */ +#define CONTINUE_A64_TYPE 0x0A /* Continuation A64 entry. */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + struct dsd64 dsd[5]; +} cont_a64_entry_t; + +#define PO_MODE_DIF_INSERT 0 +#define PO_MODE_DIF_REMOVE 1 +#define PO_MODE_DIF_PASS 2 +#define PO_MODE_DIF_REPLACE 3 +#define PO_MODE_DIF_TCP_CKSUM 6 +#define PO_ENABLE_INCR_GUARD_SEED BIT_3 +#define PO_DISABLE_GUARD_CHECK BIT_4 +#define PO_DISABLE_INCR_REF_TAG BIT_5 +#define PO_DIS_HEADER_MODE BIT_7 +#define PO_ENABLE_DIF_BUNDLING BIT_8 +#define PO_DIS_FRAME_MODE BIT_9 +#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */ +#define PO_DIS_VALD_APP_REF_ESC BIT_11 + +#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */ +#define PO_DIS_REF_TAG_REPL BIT_13 +#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */ +#define PO_DIS_REF_TAG_VALD BIT_15 + +/* + * ISP queue - 64-Bit addressing, continuation crc entry structure definition. + */ +struct crc_context { + uint32_t handle; /* System handle. */ + __le32 ref_tag; + __le16 app_tag; + uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ + uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ + __le16 guard_seed; /* Initial Guard Seed */ + __le16 prot_opts; /* Requested Data Protection Mode */ + __le16 blk_size; /* Data size in bytes */ + __le16 runt_blk_guard; /* Guard value for runt block (tape + * only) */ + __le32 byte_count; /* Total byte count/ total data + * transfer count */ + union { + struct { + uint32_t reserved_1; + uint16_t reserved_2; + uint16_t reserved_3; + uint32_t reserved_4; + struct dsd64 data_dsd[1]; + uint32_t reserved_5[2]; + uint32_t reserved_6; + } nobundling; + struct { + __le32 dif_byte_count; /* Total DIF byte + * count */ + uint16_t reserved_1; + __le16 dseg_count; /* Data segment count */ + uint32_t reserved_2; + struct dsd64 data_dsd[1]; + struct dsd64 dif_dsd; + } bundling; + } u; + + struct fcp_cmnd fcp_cmnd; + dma_addr_t crc_ctx_dma; + /* List of DMA context transfers */ + struct list_head dsd_list; + + /* List of DIF Bundling context DMA address */ + struct list_head ldif_dsd_list; + u8 no_ldif_dsd; + + struct list_head ldif_dma_hndl_list; + u32 dif_bundl_len; + u8 no_dif_bundl; + /* This structure should not exceed 512 bytes */ +}; + +#define CRC_CONTEXT_LEN_FW (offsetof(struct crc_context, fcp_cmnd.lun)) +#define CRC_CONTEXT_FCPCMND_OFF (offsetof(struct crc_context, fcp_cmnd.lun)) + +/* + * ISP queue - status entry structure definition. + */ +#define STATUS_TYPE 0x03 /* Status entry. */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System handle. */ + __le16 scsi_status; /* SCSI status. */ + __le16 comp_status; /* Completion status. */ + __le16 state_flags; /* State flags. */ + __le16 status_flags; /* Status flags. */ + __le16 rsp_info_len; /* Response Info Length. */ + __le16 req_sense_length; /* Request sense data length. */ + __le32 residual_length; /* Residual transfer length. */ + uint8_t rsp_info[8]; /* FCP response information. */ + uint8_t req_sense_data[32]; /* Request sense data. */ +} sts_entry_t; + +/* + * Status entry entry status + */ +#define RF_RQ_DMA_ERROR BIT_6 /* Request Queue DMA error. */ +#define RF_INV_E_ORDER BIT_5 /* Invalid entry order. */ +#define RF_INV_E_COUNT BIT_4 /* Invalid entry count. */ +#define RF_INV_E_PARAM BIT_3 /* Invalid entry parameter. */ +#define RF_INV_E_TYPE BIT_2 /* Invalid entry type. */ +#define RF_BUSY BIT_1 /* Busy */ +#define RF_MASK (RF_RQ_DMA_ERROR | RF_INV_E_ORDER | RF_INV_E_COUNT | \ + RF_INV_E_PARAM | RF_INV_E_TYPE | RF_BUSY) +#define RF_MASK_24XX (RF_INV_E_ORDER | RF_INV_E_COUNT | RF_INV_E_PARAM | \ + RF_INV_E_TYPE) + +/* + * Status entry SCSI status bit definitions. + */ +#define SS_MASK 0xfff /* Reserved bits BIT_12-BIT_15*/ +#define SS_RESIDUAL_UNDER BIT_11 +#define SS_RESIDUAL_OVER BIT_10 +#define SS_SENSE_LEN_VALID BIT_9 +#define SS_RESPONSE_INFO_LEN_VALID BIT_8 +#define SS_SCSI_STATUS_BYTE 0xff + +#define SS_RESERVE_CONFLICT (BIT_4 | BIT_3) +#define SS_BUSY_CONDITION BIT_3 +#define SS_CONDITION_MET BIT_2 +#define SS_CHECK_CONDITION BIT_1 + +/* + * Status entry completion status + */ +#define CS_COMPLETE 0x0 /* No errors */ +#define CS_INCOMPLETE 0x1 /* Incomplete transfer of cmd. */ +#define CS_DMA 0x2 /* A DMA direction error. */ +#define CS_TRANSPORT 0x3 /* Transport error. */ +#define CS_RESET 0x4 /* SCSI bus reset occurred */ +#define CS_ABORTED 0x5 /* System aborted command. */ +#define CS_TIMEOUT 0x6 /* Timeout error. */ +#define CS_DATA_OVERRUN 0x7 /* Data overrun. */ +#define CS_DIF_ERROR 0xC /* DIF error detected */ + +#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */ +#define CS_QUEUE_FULL 0x1C /* Queue Full. */ +#define CS_PORT_UNAVAILABLE 0x28 /* Port unavailable */ + /* (selection timeout) */ +#define CS_PORT_LOGGED_OUT 0x29 /* Port Logged Out */ +#define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */ +#define CS_PORT_BUSY 0x2B /* Port Busy */ +#define CS_COMPLETE_CHKCOND 0x30 /* Error? */ +#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request + failure */ +#define CS_REJECT_RECEIVED 0x4E /* Reject received */ +#define CS_EDIF_AUTH_ERROR 0x63 /* decrypt error */ +#define CS_EDIF_PAD_LEN_ERROR 0x65 /* pad > frame size, not 4byte align */ +#define CS_EDIF_INV_REQ 0x66 /* invalid request */ +#define CS_EDIF_SPI_ERROR 0x67 /* rx frame unable to locate sa */ +#define CS_EDIF_HDR_ERROR 0x69 /* data frame != expected len */ +#define CS_BAD_PAYLOAD 0x80 /* Driver defined */ +#define CS_UNKNOWN 0x81 /* Driver defined */ +#define CS_RETRY 0x82 /* Driver defined */ +#define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */ + +#define CS_BIDIR_RD_OVERRUN 0x700 +#define CS_BIDIR_RD_WR_OVERRUN 0x707 +#define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN 0x715 +#define CS_BIDIR_RD_UNDERRUN 0x1500 +#define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN 0x1507 +#define CS_BIDIR_RD_WR_UNDERRUN 0x1515 +#define CS_BIDIR_DMA 0x200 +/* + * Status entry status flags + */ +#define SF_ABTS_TERMINATED BIT_10 +#define SF_LOGOUT_SENT BIT_13 + +/* + * ISP queue - status continuation entry structure definition. + */ +#define STATUS_CONT_TYPE 0x10 /* Status continuation entry. */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint8_t data[60]; /* data */ +} sts_cont_entry_t; + +/* + * ISP queue - RIO Type 1 status entry (32 bit I/O entry handles) + * structure definition. + */ +#define STATUS_TYPE_21 0x21 /* Status entry. */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle[15]; /* System handles. */ +} sts21_entry_t; + +/* + * ISP queue - RIO Type 2 status entry (16 bit I/O entry handles) + * structure definition. + */ +#define STATUS_TYPE_22 0x22 /* Status entry. */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + uint16_t handle[30]; /* System handles. */ +} sts22_entry_t; + +/* + * ISP queue - marker entry structure definition. + */ +#define MARKER_TYPE 0x04 /* Marker entry. */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t sys_define_2; /* System defined. */ + target_id_t target; /* SCSI ID */ + uint8_t modifier; /* Modifier (7-0). */ +#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */ +#define MK_SYNC_ID 1 /* Synchronize ID */ +#define MK_SYNC_ALL 2 /* Synchronize all ID/LUN */ +#define MK_SYNC_LIP 3 /* Synchronize all ID/LUN, */ + /* clear port changed, */ + /* use sequence number. */ + uint8_t reserved_1; + __le16 sequence_number; /* Sequence number of event */ + __le16 lun; /* SCSI LUN */ + uint8_t reserved_2[48]; +} mrk_entry_t; + +/* + * ISP queue - Management Server entry structure definition. + */ +#define MS_IOCB_TYPE 0x29 /* Management Server IOCB entry */ +typedef struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle1; /* System handle. */ + target_id_t loop_id; + __le16 status; + __le16 control_flags; /* Control flags. */ + uint16_t reserved2; + __le16 timeout; + __le16 cmd_dsd_count; + __le16 total_dsd_count; + uint8_t type; + uint8_t r_ctl; + __le16 rx_id; + uint16_t reserved3; + uint32_t handle2; + __le32 rsp_bytecount; + __le32 req_bytecount; + struct dsd64 req_dsd; + struct dsd64 rsp_dsd; +} ms_iocb_entry_t; + +#define SCM_EDC_ACC_RECEIVED BIT_6 +#define SCM_RDF_ACC_RECEIVED BIT_7 + +/* + * ISP queue - Mailbox Command entry structure definition. + */ +#define MBX_IOCB_TYPE 0x39 +struct mbx_entry { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_define1; + /* Use sys_define1 for source type */ +#define SOURCE_SCSI 0x00 +#define SOURCE_IP 0x01 +#define SOURCE_VI 0x02 +#define SOURCE_SCTP 0x03 +#define SOURCE_MP 0x04 +#define SOURCE_MPIOCTL 0x05 +#define SOURCE_ASYNC_IOCB 0x07 + + uint8_t entry_status; + + uint32_t handle; + target_id_t loop_id; + + __le16 status; + __le16 state_flags; + __le16 status_flags; + + uint32_t sys_define2[2]; + + __le16 mb0; + __le16 mb1; + __le16 mb2; + __le16 mb3; + __le16 mb6; + __le16 mb7; + __le16 mb9; + __le16 mb10; + uint32_t reserved_2[2]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; +}; + +#ifndef IMMED_NOTIFY_TYPE +#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ +/* + * ISP queue - immediate notify entry structure definition. + * This is sent by the ISP to the Target driver. + * This IOCB would have report of events sent by the + * initiator, that needs to be handled by the target + * driver immediately. + */ +struct imm_ntfy_from_isp { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + union { + struct { + __le32 sys_define_2; /* System defined. */ + target_id_t target; + __le16 lun; + uint8_t target_id; + uint8_t reserved_1; + __le16 status_modifier; + __le16 status; + __le16 task_flags; + __le16 seq_id; + __le16 srr_rx_id; + __le32 srr_rel_offs; + __le16 srr_ui; +#define SRR_IU_DATA_IN 0x1 +#define SRR_IU_DATA_OUT 0x5 +#define SRR_IU_STATUS 0x7 + __le16 srr_ox_id; + uint8_t reserved_2[28]; + } isp2x; + struct { + uint32_t reserved; + __le16 nport_handle; + uint16_t reserved_2; + __le16 flags; +#define NOTIFY24XX_FLAGS_FCSP BIT_5 +#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 +#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 + __le16 srr_rx_id; + __le16 status; + uint8_t status_subcode; + uint8_t fw_handle; + __le32 exchange_address; + __le32 srr_rel_offs; + __le16 srr_ui; + __le16 srr_ox_id; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ + __le16 wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; + __le16 nport_handle; + uint16_t resv2; + } req_els; + } u; + uint8_t port_name[8]; + uint8_t resv3[3]; + uint8_t vp_index; + uint32_t reserved_5; + uint8_t port_id[3]; + uint8_t reserved_6; + } isp24; + } u; + uint16_t reserved_7; + __le16 ox_id; +} __packed; +#endif + +/* + * ISP request and response queue entry sizes + */ +#define RESPONSE_ENTRY_SIZE (sizeof(response_t)) +#define REQUEST_ENTRY_SIZE (sizeof(request_t)) + + + +/* + * Switch info gathering structure. + */ +typedef struct { + port_id_t d_id; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + uint8_t fabric_port_name[WWN_SIZE]; + uint16_t fp_speed; + uint8_t fc4_type; + uint8_t fc4_features; +} sw_info_t; + +/* FCP-4 types */ +#define FC4_TYPE_FCP_SCSI 0x08 +#define FC4_TYPE_NVME 0x28 +#define FC4_TYPE_OTHER 0x0 +#define FC4_TYPE_UNKNOWN 0xff + +/* mailbox command 4G & above */ +struct mbx_24xx_entry { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_define1; + uint8_t entry_status; + uint32_t handle; + uint16_t mb[28]; +}; + +#define IOCB_SIZE 64 + +/* + * Fibre channel port type. + */ +typedef enum { + FCT_UNKNOWN, + FCT_BROADCAST = 0x01, + FCT_INITIATOR = 0x02, + FCT_TARGET = 0x04, + FCT_NVME_INITIATOR = 0x10, + FCT_NVME_TARGET = 0x20, + FCT_NVME_DISCOVERY = 0x40, + FCT_NVME = 0xf0, +} fc_port_type_t; + +enum qla_sess_deletion { + QLA_SESS_DELETION_NONE = 0, + QLA_SESS_DELETION_IN_PROGRESS, + QLA_SESS_DELETED, +}; + +enum qlt_plogi_link_t { + QLT_PLOGI_LINK_SAME_WWN, + QLT_PLOGI_LINK_CONFLICT, + QLT_PLOGI_LINK_MAX +}; + +struct qlt_plogi_ack_t { + struct list_head list; + struct imm_ntfy_from_isp iocb; + port_id_t id; + int ref_count; + void *fcport; +}; + +struct ct_sns_desc { + struct ct_sns_pkt *ct_sns; + dma_addr_t ct_sns_dma; +}; + +enum discovery_state { + DSC_DELETED, + DSC_GNL, + DSC_LOGIN_PEND, + DSC_LOGIN_FAILED, + DSC_GPDB, + DSC_UPD_FCPORT, + DSC_LOGIN_COMPLETE, + DSC_ADISC, + DSC_DELETE_PEND, + DSC_LOGIN_AUTH_PEND, +}; + +enum login_state { /* FW control Target side */ + DSC_LS_LLIOCB_SENT = 2, + DSC_LS_PLOGI_PEND, + DSC_LS_PLOGI_COMP, + DSC_LS_PRLI_PEND, + DSC_LS_PRLI_COMP, + DSC_LS_PORT_UNAVAIL, + DSC_LS_PRLO_PEND = 9, + DSC_LS_LOGO_PEND, +}; + +enum rscn_addr_format { + RSCN_PORT_ADDR, + RSCN_AREA_ADDR, + RSCN_DOM_ADDR, + RSCN_FAB_ADDR, +}; + +/* + * Fibre channel port structure. + */ +typedef struct fc_port { + struct list_head list; + struct scsi_qla_host *vha; + struct list_head unsol_ctx_head; + + unsigned int conf_compl_supported:1; + unsigned int deleted:2; + unsigned int free_pending:1; + unsigned int local:1; + unsigned int logout_on_delete:1; + unsigned int logo_ack_needed:1; + unsigned int keep_nport_handle:1; + unsigned int send_els_logo:1; + unsigned int login_pause:1; + unsigned int login_succ:1; + unsigned int query:1; + unsigned int id_changed:1; + unsigned int scan_needed:1; + unsigned int n2n_flag:1; + unsigned int explicit_logout:1; + unsigned int prli_pend_timer:1; + unsigned int do_prli_nvme:1; + + uint8_t nvme_flag; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + port_id_t d_id; + uint16_t loop_id; + uint16_t old_loop_id; + + struct completion nvme_del_done; + uint32_t nvme_prli_service_param; +#define NVME_PRLI_SP_PI_CTRL BIT_9 +#define NVME_PRLI_SP_SLER BIT_8 +#define NVME_PRLI_SP_CONF BIT_7 +#define NVME_PRLI_SP_INITIATOR BIT_5 +#define NVME_PRLI_SP_TARGET BIT_4 +#define NVME_PRLI_SP_DISCOVERY BIT_3 +#define NVME_PRLI_SP_FIRST_BURST BIT_0 + + uint32_t nvme_first_burst_size; +#define NVME_FLAG_REGISTERED 4 +#define NVME_FLAG_DELETING 2 +#define NVME_FLAG_RESETTING 1 + + struct fc_port *conflict; + unsigned char logout_completed; + int generation; + + struct se_session *se_sess; + struct list_head sess_cmd_list; + spinlock_t sess_cmd_lock; + struct kref sess_kref; + struct qla_tgt *tgt; + unsigned long expires; + struct list_head del_list_entry; + struct work_struct free_work; + struct work_struct reg_work; + uint64_t jiffies_at_registration; + unsigned long prli_expired; + struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; + + uint16_t tgt_id; + uint16_t old_tgt_id; + uint16_t sec_since_registration; + + uint8_t fcp_prio; + + uint8_t fabric_port_name[WWN_SIZE]; + uint16_t fp_speed; + + fc_port_type_t port_type; + + atomic_t state; + uint32_t flags; + + int login_retry; + + struct fc_rport *rport; + u32 supported_classes; + + uint8_t fc4_type; + uint8_t fc4_features; + uint8_t scan_state; + + unsigned long last_queue_full; + unsigned long last_ramp_up; + + uint16_t port_id; + + struct nvme_fc_remote_port *nvme_remote_port; + + unsigned long retry_delay_timestamp; + struct qla_tgt_sess *tgt_session; + struct ct_sns_desc ct_desc; + enum discovery_state disc_state; + atomic_t shadow_disc_state; + enum discovery_state next_disc_state; + enum login_state fw_login_state; + unsigned long dm_login_expire; + unsigned long plogi_nack_done_deadline; + + u32 login_gen, last_login_gen; + u32 rscn_gen, last_rscn_gen; + u32 chip_reset; + struct list_head gnl_entry; + struct work_struct del_work; + u8 iocb[IOCB_SIZE]; + u8 current_login_state; + u8 last_login_state; + u16 n2n_link_reset_cnt; + u16 n2n_chip_reset; + + struct dentry *dfs_rport_dir; + + u64 tgt_short_link_down_cnt; + u64 tgt_link_down_time; + u64 dev_loss_tmo; + /* + * EDIF parameters for encryption. + */ + struct { + uint32_t enable:1; /* device is edif enabled/req'd */ + uint32_t app_stop:2; + uint32_t aes_gmac:1; + uint32_t app_sess_online:1; + uint32_t tx_sa_set:1; + uint32_t rx_sa_set:1; + uint32_t tx_sa_pending:1; + uint32_t rx_sa_pending:1; + uint32_t tx_rekey_cnt; + uint32_t rx_rekey_cnt; + uint64_t tx_bytes; + uint64_t rx_bytes; + uint8_t sess_down_acked; + uint8_t auth_state; + uint16_t authok:1; + uint16_t rekey_cnt; + struct list_head edif_indx_list; + spinlock_t indx_list_lock; + + struct list_head tx_sa_list; + struct list_head rx_sa_list; + spinlock_t sa_list_lock; + } edif; +} fc_port_t; + +enum { + FC4_PRIORITY_NVME = 1, + FC4_PRIORITY_FCP = 2, +}; + +#define QLA_FCPORT_SCAN 1 +#define QLA_FCPORT_FOUND 2 + +struct event_arg { + fc_port_t *fcport; + srb_t *sp; + port_id_t id; + u16 data[2], rc; + u8 port_name[WWN_SIZE]; + u32 iop[2]; +}; + +#include "qla_mr.h" + +/* + * Fibre channel port/lun states. + */ +enum { + FCS_UNKNOWN, + FCS_UNCONFIGURED, + FCS_DEVICE_DEAD, + FCS_DEVICE_LOST, + FCS_ONLINE, +}; + +extern const char *const port_state_str[5]; + +static const char *const port_dstate_str[] = { + [DSC_DELETED] = "DELETED", + [DSC_GNL] = "GNL", + [DSC_LOGIN_PEND] = "LOGIN_PEND", + [DSC_LOGIN_FAILED] = "LOGIN_FAILED", + [DSC_GPDB] = "GPDB", + [DSC_UPD_FCPORT] = "UPD_FCPORT", + [DSC_LOGIN_COMPLETE] = "LOGIN_COMPLETE", + [DSC_ADISC] = "ADISC", + [DSC_DELETE_PEND] = "DELETE_PEND", + [DSC_LOGIN_AUTH_PEND] = "LOGIN_AUTH_PEND", +}; + +/* + * FC port flags. + */ +#define FCF_FABRIC_DEVICE BIT_0 +#define FCF_LOGIN_NEEDED BIT_1 +#define FCF_FCP2_DEVICE BIT_2 +#define FCF_ASYNC_SENT BIT_3 +#define FCF_CONF_COMP_SUPPORTED BIT_4 +#define FCF_ASYNC_ACTIVE BIT_5 +#define FCF_FCSP_DEVICE BIT_6 +#define FCF_EDIF_DELETE BIT_7 + +/* No loop ID flag. */ +#define FC_NO_LOOP_ID 0x1000 + +/* + * FC-CT interface + * + * NOTE: All structures are big-endian in form. + */ + +#define CT_REJECT_RESPONSE 0x8001 +#define CT_ACCEPT_RESPONSE 0x8002 +#define CT_REASON_INVALID_COMMAND_CODE 0x01 +#define CT_REASON_CANNOT_PERFORM 0x09 +#define CT_REASON_COMMAND_UNSUPPORTED 0x0b +#define CT_EXPL_ALREADY_REGISTERED 0x10 +#define CT_EXPL_HBA_ATTR_NOT_REGISTERED 0x11 +#define CT_EXPL_MULTIPLE_HBA_ATTR 0x12 +#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH 0x13 +#define CT_EXPL_MISSING_REQ_HBA_ATTR 0x14 +#define CT_EXPL_PORT_NOT_REGISTERED_ 0x15 +#define CT_EXPL_MISSING_HBA_ID_PORT_LIST 0x16 +#define CT_EXPL_HBA_NOT_REGISTERED 0x17 +#define CT_EXPL_PORT_ATTR_NOT_REGISTERED 0x20 +#define CT_EXPL_PORT_NOT_REGISTERED 0x21 +#define CT_EXPL_MULTIPLE_PORT_ATTR 0x22 +#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH 0x23 + +#define NS_N_PORT_TYPE 0x01 +#define NS_NL_PORT_TYPE 0x02 +#define NS_NX_PORT_TYPE 0x7F + +#define GA_NXT_CMD 0x100 +#define GA_NXT_REQ_SIZE (16 + 4) +#define GA_NXT_RSP_SIZE (16 + 620) + +#define GPN_FT_CMD 0x172 +#define GPN_FT_REQ_SIZE (16 + 4) +#define GNN_FT_CMD 0x173 +#define GNN_FT_REQ_SIZE (16 + 4) + +#define GID_PT_CMD 0x1A1 +#define GID_PT_REQ_SIZE (16 + 4) + +#define GPN_ID_CMD 0x112 +#define GPN_ID_REQ_SIZE (16 + 4) +#define GPN_ID_RSP_SIZE (16 + 8) + +#define GNN_ID_CMD 0x113 +#define GNN_ID_REQ_SIZE (16 + 4) +#define GNN_ID_RSP_SIZE (16 + 8) + +#define GFT_ID_CMD 0x117 +#define GFT_ID_REQ_SIZE (16 + 4) +#define GFT_ID_RSP_SIZE (16 + 32) + +#define GID_PN_CMD 0x121 +#define GID_PN_REQ_SIZE (16 + 8) +#define GID_PN_RSP_SIZE (16 + 4) + +#define RFT_ID_CMD 0x217 +#define RFT_ID_REQ_SIZE (16 + 4 + 32) +#define RFT_ID_RSP_SIZE 16 + +#define RFF_ID_CMD 0x21F +#define RFF_ID_REQ_SIZE (16 + 4 + 2 + 1 + 1) +#define RFF_ID_RSP_SIZE 16 + +#define RNN_ID_CMD 0x213 +#define RNN_ID_REQ_SIZE (16 + 4 + 8) +#define RNN_ID_RSP_SIZE 16 + +#define RSNN_NN_CMD 0x239 +#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255) +#define RSNN_NN_RSP_SIZE 16 + +#define GFPN_ID_CMD 0x11C +#define GFPN_ID_REQ_SIZE (16 + 4) +#define GFPN_ID_RSP_SIZE (16 + 8) + +#define GPSC_CMD 0x127 +#define GPSC_REQ_SIZE (16 + 8) +#define GPSC_RSP_SIZE (16 + 2 + 2) + +#define GFF_ID_CMD 0x011F +#define GFF_ID_REQ_SIZE (16 + 4) +#define GFF_ID_RSP_SIZE (16 + 128) + +/* + * FDMI HBA attribute types. + */ +#define FDMI1_HBA_ATTR_COUNT 10 +#define FDMI2_HBA_ATTR_COUNT 17 + +#define FDMI_HBA_NODE_NAME 0x1 +#define FDMI_HBA_MANUFACTURER 0x2 +#define FDMI_HBA_SERIAL_NUMBER 0x3 +#define FDMI_HBA_MODEL 0x4 +#define FDMI_HBA_MODEL_DESCRIPTION 0x5 +#define FDMI_HBA_HARDWARE_VERSION 0x6 +#define FDMI_HBA_DRIVER_VERSION 0x7 +#define FDMI_HBA_OPTION_ROM_VERSION 0x8 +#define FDMI_HBA_FIRMWARE_VERSION 0x9 +#define FDMI_HBA_OS_NAME_AND_VERSION 0xa +#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb + +#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc +#define FDMI_HBA_VENDOR_SPECIFIC_INFO 0xd +#define FDMI_HBA_NUM_PORTS 0xe +#define FDMI_HBA_FABRIC_NAME 0xf +#define FDMI_HBA_BOOT_BIOS_NAME 0x10 +#define FDMI_HBA_VENDOR_IDENTIFIER 0xe0 + +struct ct_fdmi_hba_attr { + __be16 type; + __be16 len; + union { + uint8_t node_name[WWN_SIZE]; + uint8_t manufacturer[64]; + uint8_t serial_num[32]; + uint8_t model[16+1]; + uint8_t model_desc[80]; + uint8_t hw_version[32]; + uint8_t driver_version[32]; + uint8_t orom_version[16]; + uint8_t fw_version[32]; + uint8_t os_version[128]; + __be32 max_ct_len; + + uint8_t sym_name[256]; + __be32 vendor_specific_info; + __be32 num_ports; + uint8_t fabric_name[WWN_SIZE]; + uint8_t bios_name[32]; + uint8_t vendor_identifier[8]; + } a; +}; + +struct ct_fdmi1_hba_attributes { + __be32 count; + struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT]; +}; + +struct ct_fdmi2_hba_attributes { + __be32 count; + struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT]; +}; + +/* + * FDMI Port attribute types. + */ +#define FDMI1_PORT_ATTR_COUNT 6 +#define FDMI2_PORT_ATTR_COUNT 16 +#define FDMI2_SMARTSAN_PORT_ATTR_COUNT 23 + +#define FDMI_PORT_FC4_TYPES 0x1 +#define FDMI_PORT_SUPPORT_SPEED 0x2 +#define FDMI_PORT_CURRENT_SPEED 0x3 +#define FDMI_PORT_MAX_FRAME_SIZE 0x4 +#define FDMI_PORT_OS_DEVICE_NAME 0x5 +#define FDMI_PORT_HOST_NAME 0x6 + +#define FDMI_PORT_NODE_NAME 0x7 +#define FDMI_PORT_NAME 0x8 +#define FDMI_PORT_SYM_NAME 0x9 +#define FDMI_PORT_TYPE 0xa +#define FDMI_PORT_SUPP_COS 0xb +#define FDMI_PORT_FABRIC_NAME 0xc +#define FDMI_PORT_FC4_TYPE 0xd +#define FDMI_PORT_STATE 0x101 +#define FDMI_PORT_COUNT 0x102 +#define FDMI_PORT_IDENTIFIER 0x103 + +#define FDMI_SMARTSAN_SERVICE 0xF100 +#define FDMI_SMARTSAN_GUID 0xF101 +#define FDMI_SMARTSAN_VERSION 0xF102 +#define FDMI_SMARTSAN_PROD_NAME 0xF103 +#define FDMI_SMARTSAN_PORT_INFO 0xF104 +#define FDMI_SMARTSAN_QOS_SUPPORT 0xF105 +#define FDMI_SMARTSAN_SECURITY_SUPPORT 0xF106 + +#define FDMI_PORT_SPEED_1GB 0x1 +#define FDMI_PORT_SPEED_2GB 0x2 +#define FDMI_PORT_SPEED_10GB 0x4 +#define FDMI_PORT_SPEED_4GB 0x8 +#define FDMI_PORT_SPEED_8GB 0x10 +#define FDMI_PORT_SPEED_16GB 0x20 +#define FDMI_PORT_SPEED_32GB 0x40 +#define FDMI_PORT_SPEED_20GB 0x80 +#define FDMI_PORT_SPEED_40GB 0x100 +#define FDMI_PORT_SPEED_128GB 0x200 +#define FDMI_PORT_SPEED_64GB 0x400 +#define FDMI_PORT_SPEED_256GB 0x800 +#define FDMI_PORT_SPEED_UNKNOWN 0x8000 + +#define FC_CLASS_2 0x04 +#define FC_CLASS_3 0x08 +#define FC_CLASS_2_3 0x0C + +struct ct_fdmi_port_attr { + __be16 type; + __be16 len; + union { + uint8_t fc4_types[32]; + __be32 sup_speed; + __be32 cur_speed; + __be32 max_frame_size; + uint8_t os_dev_name[32]; + uint8_t host_name[256]; + + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + uint8_t port_sym_name[128]; + __be32 port_type; + __be32 port_supported_cos; + uint8_t fabric_name[WWN_SIZE]; + uint8_t port_fc4_type[32]; + __be32 port_state; + __be32 num_ports; + __be32 port_id; + + uint8_t smartsan_service[24]; + uint8_t smartsan_guid[16]; + uint8_t smartsan_version[24]; + uint8_t smartsan_prod_name[16]; + __be32 smartsan_port_info; + __be32 smartsan_qos_support; + __be32 smartsan_security_support; + } a; +}; + +struct ct_fdmi1_port_attributes { + __be32 count; + struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT]; +}; + +struct ct_fdmi2_port_attributes { + __be32 count; + struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT]; +}; + +#define FDMI_ATTR_TYPELEN(obj) \ + (sizeof((obj)->type) + sizeof((obj)->len)) + +#define FDMI_ATTR_ALIGNMENT(len) \ + (4 - ((len) & 3)) + +/* FDMI register call options */ +#define CALLOPT_FDMI1 0 +#define CALLOPT_FDMI2 1 +#define CALLOPT_FDMI2_SMARTSAN 2 + +/* FDMI definitions. */ +#define GRHL_CMD 0x100 +#define GHAT_CMD 0x101 +#define GRPL_CMD 0x102 +#define GPAT_CMD 0x110 + +#define RHBA_CMD 0x200 +#define RHBA_RSP_SIZE 16 + +#define RHAT_CMD 0x201 + +#define RPRT_CMD 0x210 +#define RPRT_RSP_SIZE 24 + +#define RPA_CMD 0x211 +#define RPA_RSP_SIZE 16 +#define SMARTSAN_RPA_RSP_SIZE 24 + +#define DHBA_CMD 0x300 +#define DHBA_REQ_SIZE (16 + 8) +#define DHBA_RSP_SIZE 16 + +#define DHAT_CMD 0x301 +#define DPRT_CMD 0x310 +#define DPA_CMD 0x311 + +/* CT command header -- request/response common fields */ +struct ct_cmd_hdr { + uint8_t revision; + uint8_t in_id[3]; + uint8_t gs_type; + uint8_t gs_subtype; + uint8_t options; + uint8_t reserved; +}; + +/* CT command request */ +struct ct_sns_req { + struct ct_cmd_hdr header; + __be16 command; + __be16 max_rsp_size; + uint8_t fragment_id; + uint8_t reserved[3]; + + union { + /* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */ + struct { + uint8_t reserved; + be_id_t port_id; + } port_id; + + struct { + uint8_t reserved; + uint8_t domain; + uint8_t area; + uint8_t port_type; + } gpn_ft; + + struct { + uint8_t port_type; + uint8_t domain; + uint8_t area; + uint8_t reserved; + } gid_pt; + + struct { + uint8_t reserved; + be_id_t port_id; + uint8_t fc4_types[32]; + } rft_id; + + struct { + uint8_t reserved; + be_id_t port_id; + uint16_t reserved2; + uint8_t fc4_feature; + uint8_t fc4_type; + } rff_id; + + struct { + uint8_t reserved; + be_id_t port_id; + uint8_t node_name[8]; + } rnn_id; + + struct { + uint8_t node_name[8]; + uint8_t name_len; + uint8_t sym_node_name[255]; + } rsnn_nn; + + struct { + uint8_t hba_identifier[8]; + } ghat; + + struct { + uint8_t hba_identifier[8]; + __be32 entry_count; + uint8_t port_name[8]; + struct ct_fdmi2_hba_attributes attrs; + } rhba; + + struct { + uint8_t hba_identifier[8]; + struct ct_fdmi1_hba_attributes attrs; + } rhat; + + struct { + uint8_t port_name[8]; + struct ct_fdmi2_port_attributes attrs; + } rpa; + + struct { + uint8_t hba_identifier[8]; + uint8_t port_name[8]; + struct ct_fdmi2_port_attributes attrs; + } rprt; + + struct { + uint8_t port_name[8]; + } dhba; + + struct { + uint8_t port_name[8]; + } dhat; + + struct { + uint8_t port_name[8]; + } dprt; + + struct { + uint8_t port_name[8]; + } dpa; + + struct { + uint8_t port_name[8]; + } gpsc; + + struct { + uint8_t reserved; + uint8_t port_id[3]; + } gff_id; + + struct { + uint8_t port_name[8]; + } gid_pn; + } req; +}; + +/* CT command response header */ +struct ct_rsp_hdr { + struct ct_cmd_hdr header; + __be16 response; + uint16_t residual; + uint8_t fragment_id; + uint8_t reason_code; + uint8_t explanation_code; + uint8_t vendor_unique; +}; + +struct ct_sns_gid_pt_data { + uint8_t control_byte; + be_id_t port_id; +}; + +/* It's the same for both GPN_FT and GNN_FT */ +struct ct_sns_gpnft_rsp { + struct { + struct ct_cmd_hdr header; + uint16_t response; + uint16_t residual; + uint8_t fragment_id; + uint8_t reason_code; + uint8_t explanation_code; + uint8_t vendor_unique; + }; + /* Assume the largest number of targets for the union */ + DECLARE_FLEX_ARRAY(struct ct_sns_gpn_ft_data { + u8 control_byte; + u8 port_id[3]; + u32 reserved; + u8 port_name[8]; + }, entries); +}; + +/* CT command response */ +struct ct_sns_rsp { + struct ct_rsp_hdr header; + + union { + struct { + uint8_t port_type; + be_id_t port_id; + uint8_t port_name[8]; + uint8_t sym_port_name_len; + uint8_t sym_port_name[255]; + uint8_t node_name[8]; + uint8_t sym_node_name_len; + uint8_t sym_node_name[255]; + uint8_t init_proc_assoc[8]; + uint8_t node_ip_addr[16]; + uint8_t class_of_service[4]; + uint8_t fc4_types[32]; + uint8_t ip_address[16]; + uint8_t fabric_port_name[8]; + uint8_t reserved; + uint8_t hard_address[3]; + } ga_nxt; + + struct { + /* Assume the largest number of targets for the union */ + struct ct_sns_gid_pt_data + entries[MAX_FIBRE_DEVICES_MAX]; + } gid_pt; + + struct { + uint8_t port_name[8]; + } gpn_id; + + struct { + uint8_t node_name[8]; + } gnn_id; + + struct { + uint8_t fc4_types[32]; + } gft_id; + + struct { + uint32_t entry_count; + uint8_t port_name[8]; + struct ct_fdmi1_hba_attributes attrs; + } ghat; + + struct { + uint8_t port_name[8]; + } gfpn_id; + + struct { + __be16 speeds; + __be16 speed; + } gpsc; + +#define GFF_FCP_SCSI_OFFSET 7 +#define GFF_NVME_OFFSET 23 /* type = 28h */ + struct { + uint8_t fc4_features[128]; +#define FC4_FF_TARGET BIT_0 +#define FC4_FF_INITIATOR BIT_1 + } gff_id; + struct { + uint8_t reserved; + uint8_t port_id[3]; + } gid_pn; + } rsp; +}; + +struct ct_sns_pkt { + union { + struct ct_sns_req req; + struct ct_sns_rsp rsp; + } p; +}; + +struct ct_sns_gpnft_pkt { + union { + struct ct_sns_req req; + struct ct_sns_gpnft_rsp rsp; + } p; +}; + +enum scan_flags_t { + SF_SCANNING = BIT_0, + SF_QUEUED = BIT_1, +}; + +enum fc4type_t { + FS_FC4TYPE_FCP = BIT_0, + FS_FC4TYPE_NVME = BIT_1, + FS_FCP_IS_N2N = BIT_7, +}; + +struct fab_scan_rp { + port_id_t id; + enum fc4type_t fc4type; + u8 port_name[8]; + u8 node_name[8]; +}; + +struct fab_scan { + struct fab_scan_rp *l; + u32 size; + u16 scan_retry; +#define MAX_SCAN_RETRIES 5 + enum scan_flags_t scan_flags; + struct delayed_work scan_work; +}; + +/* + * SNS command structures -- for 2200 compatibility. + */ +#define RFT_ID_SNS_SCMD_LEN 22 +#define RFT_ID_SNS_CMD_SIZE 60 +#define RFT_ID_SNS_DATA_SIZE 16 + +#define RNN_ID_SNS_SCMD_LEN 10 +#define RNN_ID_SNS_CMD_SIZE 36 +#define RNN_ID_SNS_DATA_SIZE 16 + +#define GA_NXT_SNS_SCMD_LEN 6 +#define GA_NXT_SNS_CMD_SIZE 28 +#define GA_NXT_SNS_DATA_SIZE (620 + 16) + +#define GID_PT_SNS_SCMD_LEN 6 +#define GID_PT_SNS_CMD_SIZE 28 +/* + * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older + * adapters. + */ +#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES_2100 * 4 + 16) + +#define GPN_ID_SNS_SCMD_LEN 6 +#define GPN_ID_SNS_CMD_SIZE 28 +#define GPN_ID_SNS_DATA_SIZE (8 + 16) + +#define GNN_ID_SNS_SCMD_LEN 6 +#define GNN_ID_SNS_CMD_SIZE 28 +#define GNN_ID_SNS_DATA_SIZE (8 + 16) + +struct sns_cmd_pkt { + union { + struct { + __le16 buffer_length; + __le16 reserved_1; + __le64 buffer_address __packed; + __le16 subcommand_length; + __le16 reserved_2; + __le16 subcommand; + __le16 size; + uint32_t reserved_3; + uint8_t param[36]; + } cmd; + + uint8_t rft_data[RFT_ID_SNS_DATA_SIZE]; + uint8_t rnn_data[RNN_ID_SNS_DATA_SIZE]; + uint8_t gan_data[GA_NXT_SNS_DATA_SIZE]; + uint8_t gid_data[GID_PT_SNS_DATA_SIZE]; + uint8_t gpn_data[GPN_ID_SNS_DATA_SIZE]; + uint8_t gnn_data[GNN_ID_SNS_DATA_SIZE]; + } p; +}; + +struct fw_blob { + char *name; + uint32_t segs[4]; + const struct firmware *fw; +}; + +/* Return data from MBC_GET_ID_LIST call. */ +struct gid_list_info { + uint8_t al_pa; + uint8_t area; + uint8_t domain; + uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */ + __le16 loop_id; /* ISP23XX -- 6 bytes. */ + uint16_t reserved_1; /* ISP24XX -- 8 bytes. */ +}; + +/* NPIV */ +typedef struct vport_info { + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + int vp_id; + uint16_t loop_id; + unsigned long host_no; + uint8_t port_id[3]; + int loop_state; +} vport_info_t; + +typedef struct vport_params { + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + uint32_t options; +#define VP_OPTS_RETRY_ENABLE BIT_0 +#define VP_OPTS_VP_DISABLE BIT_1 +} vport_params_t; + +/* NPIV - return codes of VP create and modify */ +#define VP_RET_CODE_OK 0 +#define VP_RET_CODE_FATAL 1 +#define VP_RET_CODE_WRONG_ID 2 +#define VP_RET_CODE_WWPN 3 +#define VP_RET_CODE_RESOURCES 4 +#define VP_RET_CODE_NO_MEM 5 +#define VP_RET_CODE_NOT_FOUND 6 + +struct qla_hw_data; +struct rsp_que; +/* + * ISP operations + */ +struct isp_operations { + + int (*pci_config) (struct scsi_qla_host *); + int (*reset_chip)(struct scsi_qla_host *); + int (*chip_diag) (struct scsi_qla_host *); + void (*config_rings) (struct scsi_qla_host *); + int (*reset_adapter)(struct scsi_qla_host *); + int (*nvram_config) (struct scsi_qla_host *); + void (*update_fw_options) (struct scsi_qla_host *); + int (*load_risc) (struct scsi_qla_host *, uint32_t *); + + char * (*pci_info_str)(struct scsi_qla_host *, char *, size_t); + char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t); + + irq_handler_t intr_handler; + void (*enable_intrs) (struct qla_hw_data *); + void (*disable_intrs) (struct qla_hw_data *); + + int (*abort_command) (srb_t *); + int (*target_reset) (struct fc_port *, uint64_t, int); + int (*lun_reset) (struct fc_port *, uint64_t, int); + int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, + uint8_t, uint8_t, uint16_t *, uint8_t); + int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, + uint8_t, uint8_t); + + uint16_t (*calc_req_entries) (uint16_t); + void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t); + void *(*prep_ms_iocb) (struct scsi_qla_host *, struct ct_arg *); + void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, + uint32_t); + + uint8_t *(*read_nvram)(struct scsi_qla_host *, void *, + uint32_t, uint32_t); + int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t, + uint32_t); + + void (*fw_dump)(struct scsi_qla_host *vha); + void (*mpi_fw_dump)(struct scsi_qla_host *, int); + + /* Context: task, might sleep */ + int (*beacon_on) (struct scsi_qla_host *); + int (*beacon_off) (struct scsi_qla_host *); + + void (*beacon_blink) (struct scsi_qla_host *); + + void *(*read_optrom)(struct scsi_qla_host *, void *, + uint32_t, uint32_t); + int (*write_optrom)(struct scsi_qla_host *, void *, uint32_t, + uint32_t); + + int (*get_flash_version) (struct scsi_qla_host *, void *); + int (*start_scsi) (srb_t *); + int (*start_scsi_mq) (srb_t *); + + /* Context: task, might sleep */ + int (*abort_isp) (struct scsi_qla_host *); + + int (*iospace_config)(struct qla_hw_data *); + int (*initialize_adapter)(struct scsi_qla_host *); +}; + +/* MSI-X Support *************************************************************/ + +#define QLA_MSIX_CHIP_REV_24XX 3 +#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) +#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) + +#define QLA_BASE_VECTORS 2 /* default + RSP */ +#define QLA_MSIX_RSP_Q 0x01 +#define QLA_ATIO_VECTOR 0x02 +#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 +#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04 + +#define QLA_MIDX_DEFAULT 0 +#define QLA_MIDX_RSP_Q 1 +#define QLA_PCI_MSIX_CONTROL 0xa2 +#define QLA_83XX_PCI_MSIX_CONTROL 0x92 + +struct scsi_qla_host; + + +#define QLA83XX_RSPQ_MSIX_ENTRY_NUMBER 1 /* refer to qla83xx_msix_entries */ + +struct qla_msix_entry { + int have_irq; + int in_use; + uint32_t vector; + uint32_t vector_base0; + uint16_t entry; + char name[30]; + void *handle; + int cpuid; +}; + +#define WATCH_INTERVAL 1 /* number of seconds */ + +/* Work events. */ +enum qla_work_type { + QLA_EVT_AEN, + QLA_EVT_IDC_ACK, + QLA_EVT_ASYNC_LOGIN, + QLA_EVT_ASYNC_LOGOUT, + QLA_EVT_ASYNC_ADISC, + QLA_EVT_UEVENT, + QLA_EVT_AENFX, + QLA_EVT_UNMAP, + QLA_EVT_NEW_SESS, + QLA_EVT_GPDB, + QLA_EVT_PRLI, + QLA_EVT_GPSC, + QLA_EVT_GNL, + QLA_EVT_NACK, + QLA_EVT_RELOGIN, + QLA_EVT_ASYNC_PRLO, + QLA_EVT_ASYNC_PRLO_DONE, + QLA_EVT_GPNFT, + QLA_EVT_GPNFT_DONE, + QLA_EVT_GNNFT_DONE, + QLA_EVT_GFPNID, + QLA_EVT_SP_RETRY, + QLA_EVT_IIDMA, + QLA_EVT_ELS_PLOGI, + QLA_EVT_SA_REPLACE, +}; + + +struct qla_work_evt { + struct list_head list; + enum qla_work_type type; + u32 flags; +#define QLA_EVT_FLAG_FREE 0x1 + + union { + struct { + enum fc_host_event_code code; + u32 data; + } aen; + struct { +#define QLA_IDC_ACK_REGS 7 + uint16_t mb[QLA_IDC_ACK_REGS]; + } idc_ack; + struct { + struct fc_port *fcport; +#define QLA_LOGIO_LOGIN_RETRIED BIT_0 + u16 data[2]; + } logio; + struct { + u32 code; +#define QLA_UEVENT_CODE_FW_DUMP 0 + } uevent; + struct { + uint32_t evtcode; + uint32_t mbx[8]; + uint32_t count; + } aenfx; + struct { + srb_t *sp; + } iosb; + struct { + port_id_t id; + u8 port_name[8]; + u8 node_name[8]; + void *pla; + u8 fc4_type; + } new_sess; + struct { /*Get PDB, Get Speed, update fcport, gnl */ + fc_port_t *fcport; + u8 opt; + } fcport; + struct { + fc_port_t *fcport; + u8 iocb[IOCB_SIZE]; + int type; + } nack; + struct { + u8 fc4_type; + srb_t *sp; + } gpnft; + struct { + struct edif_sa_ctl *sa_ctl; + fc_port_t *fcport; + uint16_t nport_handle; + } sa_update; + } u; +}; + +struct qla_chip_state_84xx { + struct list_head list; + struct kref kref; + + void *bus; + spinlock_t access_lock; + struct mutex fw_update_mutex; + uint32_t fw_update; + uint32_t op_fw_version; + uint32_t op_fw_size; + uint32_t op_fw_seq_size; + uint32_t diag_fw_version; + uint32_t gold_fw_version; +}; + +struct qla_dif_statistics { + uint64_t dif_input_bytes; + uint64_t dif_output_bytes; + uint64_t dif_input_requests; + uint64_t dif_output_requests; + uint32_t dif_guard_err; + uint32_t dif_ref_tag_err; + uint32_t dif_app_tag_err; +}; + +struct qla_statistics { + uint32_t total_isp_aborts; + uint64_t input_bytes; + uint64_t output_bytes; + uint64_t input_requests; + uint64_t output_requests; + uint32_t control_requests; + + uint64_t jiffies_at_last_reset; + uint32_t stat_max_pend_cmds; + uint32_t stat_max_qfull_cmds_alloc; + uint32_t stat_max_qfull_cmds_dropped; + + struct qla_dif_statistics qla_dif_stats; +}; + +struct bidi_statistics { + unsigned long long io_count; + unsigned long long transfer_bytes; +}; + +struct qla_tc_param { + struct scsi_qla_host *vha; + uint32_t blk_sz; + uint32_t bufflen; + struct scatterlist *sg; + struct scatterlist *prot_sg; + struct crc_context *ctx; + uint8_t *ctx_dsd_alloced; +}; + +/* Multi queue support */ +#define MBC_INITIALIZE_MULTIQ 0x1f +#define QLA_QUE_PAGE 0X1000 +#define QLA_MQ_SIZE 32 +#define QLA_MAX_QUEUES 256 +#define ISP_QUE_REG(ha, id) \ + ((ha->mqenable || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? \ + ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\ + ((void __iomem *)ha->iobase)) +#define QLA_REQ_QUE_ID(tag) \ + ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0) +#define QLA_DEFAULT_QUE_QOS 5 +#define QLA_PRECONFIG_VPORTS 32 +#define QLA_MAX_VPORTS_QLA24XX 128 +#define QLA_MAX_VPORTS_QLA25XX 256 + +struct qla_tgt_counters { + uint64_t qla_core_sbt_cmd; + uint64_t core_qla_que_buf; + uint64_t qla_core_ret_ctio; + uint64_t core_qla_snd_status; + uint64_t qla_core_ret_sta_ctio; + uint64_t core_qla_free_cmd; + uint64_t num_q_full_sent; + uint64_t num_alloc_iocb_failed; + uint64_t num_term_xchg_sent; +}; + +struct qla_counters { + uint64_t input_bytes; + uint64_t input_requests; + uint64_t output_bytes; + uint64_t output_requests; + +}; + +struct qla_qpair; + +/* Response queue data structure */ +struct rsp_que { + dma_addr_t dma; + response_t *ring; + response_t *ring_ptr; + __le32 __iomem *rsp_q_in; /* FWI2-capable only. */ + __le32 __iomem *rsp_q_out; + uint16_t ring_index; + uint16_t out_ptr; + uint16_t *in_ptr; /* queue shadow in index */ + uint16_t length; + uint16_t options; + uint16_t rid; + uint16_t id; + uint16_t vp_idx; + struct qla_hw_data *hw; + struct qla_msix_entry *msix; + struct req_que *req; + srb_t *status_srb; /* status continuation entry */ + struct qla_qpair *qpair; + + dma_addr_t dma_fx00; + response_t *ring_fx00; + uint16_t length_fx00; + uint8_t rsp_pkt[REQUEST_ENTRY_SIZE]; +}; + +/* Request queue data structure */ +struct req_que { + dma_addr_t dma; + request_t *ring; + request_t *ring_ptr; + __le32 __iomem *req_q_in; /* FWI2-capable only. */ + __le32 __iomem *req_q_out; + uint16_t ring_index; + uint16_t in_ptr; + uint16_t *out_ptr; /* queue shadow out index */ + uint16_t cnt; + uint16_t length; + uint16_t options; + uint16_t rid; + uint16_t id; + uint16_t qos; + uint16_t vp_idx; + struct rsp_que *rsp; + srb_t **outstanding_cmds; + uint32_t current_outstanding_cmd; + uint16_t num_outstanding_cmds; + int max_q_depth; + + dma_addr_t dma_fx00; + request_t *ring_fx00; + uint16_t length_fx00; + uint8_t req_pkt[REQUEST_ENTRY_SIZE]; +}; + +struct qla_fw_resources { + u16 iocbs_total; + u16 iocbs_limit; + u16 iocbs_qp_limit; + u16 iocbs_used; + u16 exch_total; + u16 exch_limit; + u16 exch_used; + u16 pad; +}; + +struct qla_fw_res { + u16 iocb_total; + u16 iocb_limit; + atomic_t iocb_used; + + u16 exch_total; + u16 exch_limit; + atomic_t exch_used; +}; + +#define QLA_IOCB_PCT_LIMIT 95 + +struct qla_buf_pool { + u16 num_bufs; + u16 num_active; + u16 max_used; + u16 num_alloc; + u16 prev_max; + u16 pad; + uint32_t take_snapshot:1; + unsigned long *buf_map; + void **buf_array; + dma_addr_t *dma_array; +}; + +/*Queue pair data structure */ +struct qla_qpair { + spinlock_t qp_lock; + atomic_t ref_count; + uint32_t lun_cnt; + /* + * For qpair 0, qp_lock_ptr will point at hardware_lock due to + * legacy code. For other Qpair(s), it will point at qp_lock. + */ + spinlock_t *qp_lock_ptr; + struct scsi_qla_host *vha; + u32 chip_reset; + + /* distill these fields down to 'online=0/1' + * ha->flags.eeh_busy + * ha->flags.pci_channel_io_perm_failure + * base_vha->loop_state + */ + uint32_t online:1; + /* move vha->flags.difdix_supported here */ + uint32_t difdix_supported:1; + uint32_t delete_in_progress:1; + uint32_t fw_started:1; + uint32_t enable_class_2:1; + uint32_t enable_explicit_conf:1; + uint32_t use_shadow_reg:1; + uint32_t rcv_intr:1; + + uint16_t id; /* qp number used with FW */ + uint16_t vp_idx; /* vport ID */ + + uint16_t dsd_inuse; + uint16_t dsd_avail; + struct list_head dsd_list; +#define NUM_DSD_CHAIN 4096 + + mempool_t *srb_mempool; + + struct pci_dev *pdev; + void (*reqq_start_iocbs)(struct qla_qpair *); + + /* to do: New driver: move queues to here instead of pointers */ + struct req_que *req; + struct rsp_que *rsp; + struct atio_que *atio; + struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */ + struct qla_hw_data *hw; + struct work_struct q_work; + struct qla_counters counters; + + struct list_head qp_list_elem; /* vha->qp_list */ + struct list_head hints_list; + + uint16_t retry_term_cnt; + __le32 retry_term_exchg_addr; + uint64_t retry_term_jiff; + struct qla_tgt_counters tgt_counters; + uint16_t cpuid; + bool cpu_mapped; + struct qla_fw_resources fwres ____cacheline_aligned; + struct qla_buf_pool buf_pool; + u32 cmd_cnt; + u32 cmd_completion_cnt; + u32 prev_completion_cnt; +}; + +/* Place holder for FW buffer parameters */ +struct qlfc_fw { + void *fw_buf; + dma_addr_t fw_dma; + uint32_t len; +}; + +struct rdp_req_payload { + uint32_t els_request; + uint32_t desc_list_len; + + /* NPIV descriptor */ + struct { + uint32_t desc_tag; + uint32_t desc_len; + uint8_t reserved; + uint8_t nport_id[3]; + } npiv_desc; +}; + +struct rdp_rsp_payload { + struct { + __be32 cmd; + __be32 len; + } hdr; + + /* LS Request Info descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + __be32 req_payload_word_0; + } ls_req_info_desc; + + /* LS Request Info descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + __be32 req_payload_word_0; + } ls_req_info_desc2; + + /* SFP diagnostic param descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + __be16 temperature; + __be16 vcc; + __be16 tx_bias; + __be16 tx_power; + __be16 rx_power; + __be16 sfp_flags; + } sfp_diag_desc; + + /* Port Speed Descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + __be16 speed_capab; + __be16 operating_speed; + } port_speed_desc; + + /* Link Error Status Descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + __be32 link_fail_cnt; + __be32 loss_sync_cnt; + __be32 loss_sig_cnt; + __be32 prim_seq_err_cnt; + __be32 inval_xmit_word_cnt; + __be32 inval_crc_cnt; + uint8_t pn_port_phy_type; + uint8_t reserved[3]; + } ls_err_desc; + + /* Port name description with diag param */ + struct { + __be32 desc_tag; + __be32 desc_len; + uint8_t WWNN[WWN_SIZE]; + uint8_t WWPN[WWN_SIZE]; + } port_name_diag_desc; + + /* Port Name desc for Direct attached Fx_Port or Nx_Port */ + struct { + __be32 desc_tag; + __be32 desc_len; + uint8_t WWNN[WWN_SIZE]; + uint8_t WWPN[WWN_SIZE]; + } port_name_direct_desc; + + /* Buffer Credit descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + __be32 fcport_b2b; + __be32 attached_fcport_b2b; + __be32 fcport_rtt; + } buffer_credit_desc; + + /* Optical Element Data Descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + __be16 high_alarm; + __be16 low_alarm; + __be16 high_warn; + __be16 low_warn; + __be32 element_flags; + } optical_elmt_desc[5]; + + /* Optical Product Data Descriptor */ + struct { + __be32 desc_tag; + __be32 desc_len; + uint8_t vendor_name[16]; + uint8_t part_number[16]; + uint8_t serial_number[16]; + uint8_t revision[4]; + uint8_t date[8]; + } optical_prod_desc; +}; + +#define RDP_DESC_LEN(obj) \ + (sizeof(obj) - sizeof((obj).desc_tag) - sizeof((obj).desc_len)) + +#define RDP_PORT_SPEED_1GB BIT_15 +#define RDP_PORT_SPEED_2GB BIT_14 +#define RDP_PORT_SPEED_4GB BIT_13 +#define RDP_PORT_SPEED_10GB BIT_12 +#define RDP_PORT_SPEED_8GB BIT_11 +#define RDP_PORT_SPEED_16GB BIT_10 +#define RDP_PORT_SPEED_32GB BIT_9 +#define RDP_PORT_SPEED_64GB BIT_8 +#define RDP_PORT_SPEED_UNKNOWN BIT_0 + +struct scsi_qlt_host { + void *target_lport_ptr; + struct mutex tgt_mutex; + struct mutex tgt_host_action_mutex; + struct qla_tgt *qla_tgt; +}; + +struct qlt_hw_data { + /* Protected by hw lock */ + uint32_t node_name_set:1; + + dma_addr_t atio_dma; /* Physical address. */ + struct atio *atio_ring; /* Base virtual address */ + struct atio *atio_ring_ptr; /* Current address. */ + uint16_t atio_ring_index; /* Current index. */ + uint16_t atio_q_length; + __le32 __iomem *atio_q_in; + __le32 __iomem *atio_q_out; + + const struct qla_tgt_func_tmpl *tgt_ops; + + int saved_set; + __le16 saved_exchange_count; + __le32 saved_firmware_options_1; + __le32 saved_firmware_options_2; + __le32 saved_firmware_options_3; + uint8_t saved_firmware_options[2]; + uint8_t saved_add_firmware_options[2]; + + uint8_t tgt_node_name[WWN_SIZE]; + + struct dentry *dfs_tgt_sess; + struct dentry *dfs_tgt_port_database; + struct dentry *dfs_naqp; + + struct list_head q_full_list; + uint32_t num_pend_cmds; + uint32_t num_qfull_cmds_alloc; + uint32_t num_qfull_cmds_dropped; + spinlock_t q_full_lock; + uint32_t leak_exchg_thresh_hold; + spinlock_t sess_lock; + int num_act_qpairs; +#define DEFAULT_NAQP 2 + spinlock_t atio_lock ____cacheline_aligned; +}; + +#define MAX_QFULL_CMDS_ALLOC 8192 +#define Q_FULL_THRESH_HOLD_PERCENT 90 +#define Q_FULL_THRESH_HOLD(ha) \ + ((ha->cur_fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT) + +#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ + +struct qla_hw_data_stat { + u32 num_fw_dump; + u32 num_mpi_reset; +}; + +/* refer to pcie_do_recovery reference */ +typedef enum { + QLA_PCI_RESUME, + QLA_PCI_ERR_DETECTED, + QLA_PCI_MMIO_ENABLED, + QLA_PCI_SLOT_RESET, +} pci_error_state_t; +/* + * Qlogic host adapter specific data structure. +*/ +struct qla_hw_data { + struct pci_dev *pdev; + /* SRB cache. */ +#define SRB_MIN_REQ 128 + mempool_t *srb_mempool; + u8 port_name[WWN_SIZE]; + + volatile struct { + uint32_t mbox_int :1; + uint32_t mbox_busy :1; + uint32_t disable_risc_code_load :1; + uint32_t enable_64bit_addressing :1; + uint32_t enable_lip_reset :1; + uint32_t enable_target_reset :1; + uint32_t enable_lip_full_login :1; + uint32_t enable_led_scheme :1; + + uint32_t msi_enabled :1; + uint32_t msix_enabled :1; + uint32_t disable_serdes :1; + uint32_t gpsc_supported :1; + uint32_t npiv_supported :1; + uint32_t pci_channel_io_perm_failure :1; + uint32_t fce_enabled :1; + uint32_t fac_supported :1; + + uint32_t chip_reset_done :1; + uint32_t running_gold_fw :1; + uint32_t eeh_busy :1; + uint32_t disable_msix_handshake :1; + uint32_t fcp_prio_enabled :1; + uint32_t isp82xx_fw_hung:1; + uint32_t nic_core_hung:1; + + uint32_t quiesce_owner:1; + uint32_t nic_core_reset_hdlr_active:1; + uint32_t nic_core_reset_owner:1; + uint32_t isp82xx_no_md_cap:1; + uint32_t host_shutting_down:1; + uint32_t idc_compl_status:1; + uint32_t mr_reset_hdlr_active:1; + uint32_t mr_intr_valid:1; + + uint32_t dport_enabled:1; + uint32_t fawwpn_enabled:1; + uint32_t exlogins_enabled:1; + uint32_t exchoffld_enabled:1; + + uint32_t lip_ae:1; + uint32_t n2n_ae:1; + uint32_t fw_started:1; + uint32_t fw_init_done:1; + + uint32_t lr_detected:1; + + uint32_t rida_fmt2:1; + uint32_t purge_mbox:1; + uint32_t n2n_bigger:1; + uint32_t secure_adapter:1; + uint32_t secure_fw:1; + /* Supported by Adapter */ + uint32_t scm_supported_a:1; + /* Supported by Firmware */ + uint32_t scm_supported_f:1; + /* Enabled in Driver */ + uint32_t scm_enabled:1; + uint32_t edif_hw:1; + uint32_t edif_enabled:1; + uint32_t n2n_fw_acc_sec:1; + uint32_t plogi_template_valid:1; + uint32_t port_isolated:1; + uint32_t eeh_flush:2; +#define EEH_FLUSH_RDY 1 +#define EEH_FLUSH_DONE 2 + } flags; + + uint16_t max_exchg; + uint16_t lr_distance; /* 32G & above */ +#define LR_DISTANCE_5K 1 +#define LR_DISTANCE_10K 0 + + /* This spinlock is used to protect "io transactions", you must + * acquire it before doing any IO to the card, eg with RD_REG*() and + * WRT_REG*() for the duration of your entire commandtransaction. + * + * This spinlock is of lower priority than the io request lock. + */ + + spinlock_t hardware_lock ____cacheline_aligned; + int bars; + int mem_only; + device_reg_t *iobase; /* Base I/O address */ + resource_size_t pio_address; + +#define MIN_IOBASE_LEN 0x100 + dma_addr_t bar0_hdl; + + void __iomem *cregbase; + dma_addr_t bar2_hdl; +#define BAR0_LEN_FX00 (1024 * 1024) +#define BAR2_LEN_FX00 (128 * 1024) + + uint32_t rqstq_intr_code; + uint32_t mbx_intr_code; + uint32_t req_que_len; + uint32_t rsp_que_len; + uint32_t req_que_off; + uint32_t rsp_que_off; + unsigned long eeh_jif; + + /* Multi queue data structs */ + device_reg_t *mqiobase; + device_reg_t *msixbase; + uint16_t msix_count; + uint8_t mqenable; + struct req_que **req_q_map; + struct rsp_que **rsp_q_map; + struct qla_qpair **queue_pair_map; + struct qla_qpair **qp_cpu_map; + unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; + unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; + unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8) + / sizeof(unsigned long)]; + uint8_t max_req_queues; + uint8_t max_rsp_queues; + uint8_t max_qpairs; + uint8_t num_qpairs; + struct qla_qpair *base_qpair; + struct qla_npiv_entry *npiv_info; + uint16_t nvram_npiv_size; + + uint16_t switch_cap; +#define FLOGI_SEQ_DEL BIT_8 +#define FLOGI_MID_SUPPORT BIT_10 +#define FLOGI_VSAN_SUPPORT BIT_12 +#define FLOGI_SP_SUPPORT BIT_13 + + uint8_t port_no; /* Physical port of adapter */ + uint8_t exch_starvation; + + /* Timeout timers. */ + uint8_t loop_down_abort_time; /* port down timer */ + atomic_t loop_down_timer; /* loop down timer */ + uint8_t link_down_timeout; /* link down timeout */ + uint16_t max_loop_id; + uint16_t max_fibre_devices; /* Maximum number of targets */ + + uint16_t fb_rev; + uint16_t min_external_loopid; /* First external loop Id */ + +#define PORT_SPEED_UNKNOWN 0xFFFF +#define PORT_SPEED_1GB 0x00 +#define PORT_SPEED_2GB 0x01 +#define PORT_SPEED_AUTO 0x02 +#define PORT_SPEED_4GB 0x03 +#define PORT_SPEED_8GB 0x04 +#define PORT_SPEED_16GB 0x05 +#define PORT_SPEED_32GB 0x06 +#define PORT_SPEED_64GB 0x07 +#define PORT_SPEED_10GB 0x13 + uint16_t link_data_rate; /* F/W operating speed */ + uint16_t set_data_rate; /* Set by user */ + + uint8_t current_topology; + uint8_t prev_topology; +#define ISP_CFG_NL 1 +#define ISP_CFG_N 2 +#define ISP_CFG_FL 4 +#define ISP_CFG_F 8 + + uint8_t operating_mode; /* F/W operating mode */ +#define LOOP 0 +#define P2P 1 +#define LOOP_P2P 2 +#define P2P_LOOP 3 + uint8_t interrupts_on; + uint32_t isp_abort_cnt; +#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 +#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 +#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 +#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 +#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 +#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 +#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 +#define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261 +#define PCI_DEVICE_ID_QLOGIC_ISP2061 0x2061 +#define PCI_DEVICE_ID_QLOGIC_ISP2081 0x2081 +#define PCI_DEVICE_ID_QLOGIC_ISP2089 0x2089 +#define PCI_DEVICE_ID_QLOGIC_ISP2281 0x2281 +#define PCI_DEVICE_ID_QLOGIC_ISP2289 0x2289 + + uint32_t isp_type; +#define DT_ISP2100 BIT_0 +#define DT_ISP2200 BIT_1 +#define DT_ISP2300 BIT_2 +#define DT_ISP2312 BIT_3 +#define DT_ISP2322 BIT_4 +#define DT_ISP6312 BIT_5 +#define DT_ISP6322 BIT_6 +#define DT_ISP2422 BIT_7 +#define DT_ISP2432 BIT_8 +#define DT_ISP5422 BIT_9 +#define DT_ISP5432 BIT_10 +#define DT_ISP2532 BIT_11 +#define DT_ISP8432 BIT_12 +#define DT_ISP8001 BIT_13 +#define DT_ISP8021 BIT_14 +#define DT_ISP2031 BIT_15 +#define DT_ISP8031 BIT_16 +#define DT_ISPFX00 BIT_17 +#define DT_ISP8044 BIT_18 +#define DT_ISP2071 BIT_19 +#define DT_ISP2271 BIT_20 +#define DT_ISP2261 BIT_21 +#define DT_ISP2061 BIT_22 +#define DT_ISP2081 BIT_23 +#define DT_ISP2089 BIT_24 +#define DT_ISP2281 BIT_25 +#define DT_ISP2289 BIT_26 +#define DT_ISP_LAST (DT_ISP2289 << 1) + + uint32_t device_type; +#define DT_T10_PI BIT_25 +#define DT_IIDMA BIT_26 +#define DT_FWI2 BIT_27 +#define DT_ZIO_SUPPORTED BIT_28 +#define DT_OEM_001 BIT_29 +#define DT_ISP2200A BIT_30 +#define DT_EXTENDED_IDS BIT_31 + +#define DT_MASK(ha) ((ha)->isp_type & (DT_ISP_LAST - 1)) +#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100) +#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200) +#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300) +#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312) +#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322) +#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312) +#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322) +#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422) +#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432) +#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422) +#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432) +#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) +#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) +#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) +#define IS_QLA81XX(ha) (IS_QLA8001(ha)) +#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) +#define IS_QLA8044(ha) (DT_MASK(ha) & DT_ISP8044) +#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) +#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) +#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) +#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) +#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) +#define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261) +#define IS_QLA2081(ha) (DT_MASK(ha) & DT_ISP2081) +#define IS_QLA2281(ha) (DT_MASK(ha) & DT_ISP2281) + +#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ + IS_QLA6312(ha) || IS_QLA6322(ha)) +#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) +#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) +#define IS_QLA25XX(ha) (IS_QLA2532(ha)) +#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) +#define IS_QLA84XX(ha) (IS_QLA8432(ha)) +#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha)) +#define IS_QLA28XX(ha) (IS_QLA2081(ha) || IS_QLA2281(ha)) +#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ + IS_QLA84XX(ha)) +#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ + IS_QLA8031(ha) || IS_QLA8044(ha)) +#define IS_P3P_TYPE(ha) (IS_QLA82XX(ha) || IS_QLA8044(ha)) +#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ + IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ + IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA8044(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) +#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) + +#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) +#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) +#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) +#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) +#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) +#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) +#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) +#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_BIDI_CAPABLE(ha) \ + (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) +/* Bit 21 of fw_attributes decides the MCTP capabilities */ +#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ + ((ha)->fw_attributes_ext[0] & BIT_0)) +#define QLA_ABTS_FW_ENABLED(_ha) ((_ha)->fw_attributes_ext[0] & BIT_14) +#define QLA_SRB_NVME_LS(_sp) ((_sp)->type == SRB_NVME_LS) +#define QLA_SRB_NVME_CMD(_sp) ((_sp)->type == SRB_NVME_CMD) +#define QLA_NVME_IOS(_sp) (QLA_SRB_NVME_CMD(_sp) || QLA_SRB_NVME_LS(_sp)) +#define QLA_LS_ABTS_WAIT_ENABLED(_sp) \ + (QLA_SRB_NVME_LS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw)) +#define QLA_CMD_ABTS_WAIT_ENABLED(_sp) \ + (QLA_SRB_NVME_CMD(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw)) +#define QLA_ABTS_WAIT_ENABLED(_sp) \ + (QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw)) + +#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) +#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ + (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) +#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) +#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_EXCHG_OFFLD_CAPABLE(ha) \ + (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \ + (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\ + IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + +#define IS_ZIO_THRESHOLD_CAPABLE(ha) \ + ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\ + (ha->zio_mode == QLA_ZIO_MODE_6)) + + /* HBA serial number */ + uint8_t serial0; + uint8_t serial1; + uint8_t serial2; + + /* NVRAM configuration data */ +#define MAX_NVRAM_SIZE 4096 +#define VPD_OFFSET (MAX_NVRAM_SIZE / 2) + uint16_t nvram_size; + uint16_t nvram_base; + void *nvram; + uint16_t vpd_size; + uint16_t vpd_base; + void *vpd; + + uint16_t loop_reset_delay; + uint8_t retry_count; + uint8_t login_timeout; + uint16_t r_a_tov; + int port_down_retry_count; + uint8_t mbx_count; + uint8_t aen_mbx_count; + atomic_t num_pend_mbx_stage1; + atomic_t num_pend_mbx_stage2; + uint16_t frame_payload_size; + + uint32_t login_retry_count; + /* SNS command interfaces. */ + ms_iocb_entry_t *ms_iocb; + dma_addr_t ms_iocb_dma; + struct ct_sns_pkt *ct_sns; + dma_addr_t ct_sns_dma; + /* SNS command interfaces for 2200. */ + struct sns_cmd_pkt *sns_cmd; + dma_addr_t sns_cmd_dma; + +#define SFP_DEV_SIZE 512 +#define SFP_BLOCK_SIZE 64 +#define SFP_RTDI_LEN SFP_BLOCK_SIZE + + void *sfp_data; + dma_addr_t sfp_data_dma; + + struct qla_flt_header *flt; + dma_addr_t flt_dma; + +#define XGMAC_DATA_SIZE 4096 + void *xgmac_data; + dma_addr_t xgmac_data_dma; + +#define DCBX_TLV_DATA_SIZE 4096 + void *dcbx_tlv; + dma_addr_t dcbx_tlv_dma; + + struct task_struct *dpc_thread; + uint8_t dpc_active; /* DPC routine is active */ + + dma_addr_t gid_list_dma; + struct gid_list_info *gid_list; + int gid_list_info_size; + + /* Small DMA pool allocations -- maximum 256 bytes in length. */ +#define DMA_POOL_SIZE 256 + struct dma_pool *s_dma_pool; + + dma_addr_t init_cb_dma; + init_cb_t *init_cb; + int init_cb_size; + dma_addr_t ex_init_cb_dma; + struct ex_init_cb_81xx *ex_init_cb; + dma_addr_t sf_init_cb_dma; + struct init_sf_cb *sf_init_cb; + + void *scm_fpin_els_buff; + uint64_t scm_fpin_els_buff_size; + bool scm_fpin_valid; + bool scm_fpin_payload_size; + + void *async_pd; + dma_addr_t async_pd_dma; + +#define ENABLE_EXTENDED_LOGIN BIT_7 + + /* Extended Logins */ + void *exlogin_buf; + dma_addr_t exlogin_buf_dma; + uint32_t exlogin_size; + +#define ENABLE_EXCHANGE_OFFLD BIT_2 + + /* Exchange Offload */ + void *exchoffld_buf; + dma_addr_t exchoffld_buf_dma; + int exchoffld_size; + int exchoffld_count; + + /* n2n */ + struct fc_els_flogi plogi_els_payld; + + void *swl; + + /* These are used by mailbox operations. */ + uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; + uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT]; + uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00]; + + mbx_cmd_t *mcp; + struct mbx_cmd_32 *mcp32; + + unsigned long mbx_cmd_flags; +#define MBX_INTERRUPT 1 +#define MBX_INTR_WAIT 2 +#define MBX_UPDATE_FLASH_ACTIVE 3 + + struct mutex vport_lock; /* Virtual port synchronization */ + spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */ + struct mutex mq_lock; /* multi-queue synchronization */ + struct completion mbx_cmd_comp; /* Serialize mbx access */ + struct completion mbx_intr_comp; /* Used for completion notification */ + struct completion dcbx_comp; /* For set port config notification */ + struct completion lb_portup_comp; /* Used to wait for link up during + * loopback */ +#define DCBX_COMP_TIMEOUT 20 +#define LB_PORTUP_COMP_TIMEOUT 10 + + int notify_dcbx_comp; + int notify_lb_portup_comp; + struct mutex selflogin_lock; + + /* Basic firmware related information. */ + uint16_t fw_major_version; + uint16_t fw_minor_version; + uint16_t fw_subminor_version; + uint16_t fw_attributes; + uint16_t fw_attributes_h; +#define FW_ATTR_H_NVME_FBURST BIT_1 +#define FW_ATTR_H_NVME BIT_10 +#define FW_ATTR_H_NVME_UPDATED BIT_14 + + /* About firmware SCM support */ +#define FW_ATTR_EXT0_SCM_SUPPORTED BIT_12 + /* Brocade fabric attached */ +#define FW_ATTR_EXT0_SCM_BROCADE 0x00001000 + /* Cisco fabric attached */ +#define FW_ATTR_EXT0_SCM_CISCO 0x00002000 +#define FW_ATTR_EXT0_NVME2 BIT_13 +#define FW_ATTR_EXT0_EDIF BIT_5 + uint16_t fw_attributes_ext[2]; + uint32_t fw_memory_size; + uint32_t fw_transfer_size; + uint32_t fw_srisc_address; +#define RISC_START_ADDRESS_2100 0x1000 +#define RISC_START_ADDRESS_2300 0x800 +#define RISC_START_ADDRESS_2400 0x100000 + + uint16_t orig_fw_tgt_xcb_count; + uint16_t cur_fw_tgt_xcb_count; + uint16_t orig_fw_xcb_count; + uint16_t cur_fw_xcb_count; + uint16_t orig_fw_iocb_count; + uint16_t cur_fw_iocb_count; + uint16_t fw_max_fcf_count; + + uint32_t fw_shared_ram_start; + uint32_t fw_shared_ram_end; + uint32_t fw_ddr_ram_start; + uint32_t fw_ddr_ram_end; + + uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ + uint8_t fw_seriallink_options[4]; + __le16 fw_seriallink_options24[4]; + + uint8_t serdes_version[3]; + uint8_t mpi_version[3]; + uint32_t mpi_capabilities; + uint8_t phy_version[3]; + uint8_t pep_version[3]; + + /* Firmware dump template */ + struct fwdt { + void *template; + ulong length; + ulong dump_size; + } fwdt[2]; + struct qla2xxx_fw_dump *fw_dump; + uint32_t fw_dump_len; + u32 fw_dump_alloc_len; + bool fw_dumped; + unsigned long fw_dump_cap_flags; +#define RISC_PAUSE_CMPL 0 +#define DMA_SHUTDOWN_CMPL 1 +#define ISP_RESET_CMPL 2 +#define RISC_RDY_AFT_RESET 3 +#define RISC_SRAM_DUMP_CMPL 4 +#define RISC_EXT_MEM_DUMP_CMPL 5 +#define ISP_MBX_RDY 6 +#define ISP_SOFT_RESET_CMPL 7 + int fw_dump_reading; + void *mpi_fw_dump; + u32 mpi_fw_dump_len; + unsigned int mpi_fw_dump_reading:1; + unsigned int mpi_fw_dumped:1; + int prev_minidump_failed; + dma_addr_t eft_dma; + void *eft; +/* Current size of mctp dump is 0x086064 bytes */ +#define MCTP_DUMP_SIZE 0x086064 + dma_addr_t mctp_dump_dma; + void *mctp_dump; + int mctp_dumped; + int mctp_dump_reading; + uint32_t chain_offset; + struct dentry *dfs_dir; + struct dentry *dfs_fce; + struct dentry *dfs_tgt_counters; + struct dentry *dfs_fw_resource_cnt; + + dma_addr_t fce_dma; + void *fce; + uint32_t fce_bufs; + uint16_t fce_mb[8]; + uint64_t fce_wr, fce_rd; + struct mutex fce_mutex; + + uint32_t pci_attr; + uint16_t chip_revision; + + uint16_t product_id[4]; + + uint8_t model_number[16+1]; + char model_desc[80]; + uint8_t adapter_id[16+1]; + + /* Option ROM information. */ + char *optrom_buffer; + uint32_t optrom_size; + int optrom_state; +#define QLA_SWAITING 0 +#define QLA_SREADING 1 +#define QLA_SWRITING 2 + uint32_t optrom_region_start; + uint32_t optrom_region_size; + struct mutex optrom_mutex; + +/* PCI expansion ROM image information. */ +#define ROM_CODE_TYPE_BIOS 0 +#define ROM_CODE_TYPE_FCODE 1 +#define ROM_CODE_TYPE_EFI 3 + uint8_t bios_revision[2]; + uint8_t efi_revision[2]; + uint8_t fcode_revision[16]; + uint32_t fw_revision[4]; + + uint32_t gold_fw_version[4]; + + /* Offsets for flash/nvram access (set to ~0 if not used). */ + uint32_t flash_conf_off; + uint32_t flash_data_off; + uint32_t nvram_conf_off; + uint32_t nvram_data_off; + + uint32_t fdt_wrt_disable; + uint32_t fdt_wrt_enable; + uint32_t fdt_erase_cmd; + uint32_t fdt_block_size; + uint32_t fdt_unprotect_sec_cmd; + uint32_t fdt_protect_sec_cmd; + uint32_t fdt_wrt_sts_reg_cmd; + + struct { + uint32_t flt_region_flt; + uint32_t flt_region_fdt; + uint32_t flt_region_boot; + uint32_t flt_region_boot_sec; + uint32_t flt_region_fw; + uint32_t flt_region_fw_sec; + uint32_t flt_region_vpd_nvram; + uint32_t flt_region_vpd_nvram_sec; + uint32_t flt_region_vpd; + uint32_t flt_region_vpd_sec; + uint32_t flt_region_nvram; + uint32_t flt_region_nvram_sec; + uint32_t flt_region_npiv_conf; + uint32_t flt_region_gold_fw; + uint32_t flt_region_fcp_prio; + uint32_t flt_region_bootload; + uint32_t flt_region_img_status_pri; + uint32_t flt_region_img_status_sec; + uint32_t flt_region_aux_img_status_pri; + uint32_t flt_region_aux_img_status_sec; + }; + uint8_t active_image; + uint8_t active_tmf; +#define MAX_ACTIVE_TMF 8 + + /* Needed for BEACON */ + uint16_t beacon_blink_led; + uint8_t beacon_color_state; +#define QLA_LED_GRN_ON 0x01 +#define QLA_LED_YLW_ON 0x02 +#define QLA_LED_ABR_ON 0x04 +#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */ + /* ISP2322: red, green, amber. */ + uint16_t zio_mode; + uint16_t zio_timer; + + struct qla_msix_entry *msix_entries; + + struct list_head tmf_pending; + struct list_head tmf_active; + struct list_head vp_list; /* list of VP */ + unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / + sizeof(unsigned long)]; + uint16_t num_vhosts; /* number of vports created */ + uint16_t num_vsans; /* number of vsan created */ + uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ + int cur_vport_count; + + struct qla_chip_state_84xx *cs84xx; + struct isp_operations *isp_ops; + struct workqueue_struct *wq; + struct work_struct heartbeat_work; + struct qlfc_fw fw_buf; + unsigned long last_heartbeat_run_jiffies; + + /* FCP_CMND priority support */ + struct qla_fcp_prio_cfg *fcp_prio_cfg; + + struct dma_pool *dl_dma_pool; +#define DSD_LIST_DMA_POOL_SIZE 512 + + struct dma_pool *fcp_cmnd_dma_pool; + mempool_t *ctx_mempool; +#define FCP_CMND_DMA_POOL_SIZE 512 + + void __iomem *nx_pcibase; /* Base I/O address */ + void __iomem *nxdb_rd_ptr; /* Doorbell read pointer */ + void __iomem *nxdb_wr_ptr; /* Door bell write pointer */ + + uint32_t crb_win; + uint32_t curr_window; + uint32_t ddr_mn_window; + unsigned long mn_win_crb; + unsigned long ms_win_crb; + int qdr_sn_window; + uint32_t fcoe_dev_init_timeout; + uint32_t fcoe_reset_timeout; + rwlock_t hw_lock; + uint16_t portnum; /* port number */ + int link_width; + struct fw_blob *hablob; + struct qla82xx_legacy_intr_set nx_legacy_intr; + + uint8_t fw_type; + uint32_t file_prd_off; /* File firmware product offset */ + + uint32_t md_template_size; + void *md_tmplt_hdr; + dma_addr_t md_tmplt_hdr_dma; + void *md_dump; + uint32_t md_dump_size; + + void *loop_id_map; + + /* QLA83XX IDC specific fields */ + uint32_t idc_audit_ts; + uint32_t idc_extend_tmo; + + /* DPC low-priority workqueue */ + struct workqueue_struct *dpc_lp_wq; + struct work_struct idc_aen; + /* DPC high-priority workqueue */ + struct workqueue_struct *dpc_hp_wq; + struct work_struct nic_core_reset; + struct work_struct idc_state_handler; + struct work_struct nic_core_unrecoverable; + struct work_struct board_disable; + + struct mr_data_fx00 mr; + uint32_t chip_reset; + + struct qlt_hw_data tgt; + int allow_cna_fw_dump; + uint32_t fw_ability_mask; + uint16_t min_supported_speed; + uint16_t max_supported_speed; + + /* DMA pool for the DIF bundling buffers */ + struct dma_pool *dif_bundl_pool; + #define DIF_BUNDLING_DMA_POOL_SIZE 1024 + struct { + struct { + struct list_head head; + uint count; + } good; + struct { + struct list_head head; + uint count; + } unusable; + } pool; + + unsigned long long dif_bundle_crossed_pages; + unsigned long long dif_bundle_reads; + unsigned long long dif_bundle_writes; + unsigned long long dif_bundle_kallocs; + unsigned long long dif_bundle_dma_allocs; + + atomic_t nvme_active_aen_cnt; + uint16_t nvme_last_rptd_aen; /* Last recorded aen count */ + + uint8_t fc4_type_priority; + + atomic_t zio_threshold; + uint16_t last_zio_threshold; + +#define DEFAULT_ZIO_THRESHOLD 5 + + struct qla_hw_data_stat stat; + pci_error_state_t pci_error_state; + struct dma_pool *purex_dma_pool; + struct btree_head32 host_map; + +#define EDIF_NUM_SA_INDEX 512 +#define EDIF_TX_SA_INDEX_BASE EDIF_NUM_SA_INDEX + void *edif_rx_sa_id_map; + void *edif_tx_sa_id_map; + spinlock_t sadb_fp_lock; + + struct list_head sadb_tx_index_list; + struct list_head sadb_rx_index_list; + spinlock_t sadb_lock; /* protects list */ + struct els_reject elsrej; + u8 edif_post_stop_cnt_down; + struct qla_vp_map *vp_map; + struct qla_nvme_fc_rjt lsrjt; + struct qla_fw_res fwres ____cacheline_aligned; +}; + +#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES)) + +struct active_regions { + uint8_t global; + struct { + uint8_t board_config; + uint8_t vpd_nvram; + uint8_t npiv_config_0_1; + uint8_t npiv_config_2_3; + uint8_t nvme_params; + } aux; +}; + +#define FW_ABILITY_MAX_SPEED_MASK 0xFUL +#define FW_ABILITY_MAX_SPEED_16G 0x0 +#define FW_ABILITY_MAX_SPEED_32G 0x1 +#define FW_ABILITY_MAX_SPEED(ha) \ + (ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK) + +#define QLA_GET_DATA_RATE 0 +#define QLA_SET_DATA_RATE_NOLR 1 +#define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */ + +#define QLA_DEFAULT_PAYLOAD_SIZE 64 +/* + * This item might be allocated with a size > sizeof(struct purex_item). + * The "size" variable gives the size of the payload (which + * is variable) starting at "iocb". + */ +struct purex_item { + void *purls_context; + struct list_head list; + struct scsi_qla_host *vha; + void (*process_item)(struct scsi_qla_host *vha, + struct purex_item *pkt); + atomic_t in_use; + uint16_t size; + struct { + uint8_t iocb[64]; + } iocb; +}; + +#include "qla_edif.h" + +#define SCM_FLAG_RDF_REJECT 0x00 +#define SCM_FLAG_RDF_COMPLETED 0x01 + +#define QLA_CON_PRIMITIVE_RECEIVED 0x1 +#define QLA_CONGESTION_ARB_WARNING 0x1 +#define QLA_CONGESTION_ARB_ALARM 0X2 + +/* + * Qlogic scsi host structure + */ +typedef struct scsi_qla_host { + struct list_head list; + struct list_head vp_fcports; /* list of fcports */ + struct list_head work_list; + spinlock_t work_lock; + struct work_struct iocb_work; + + /* Commonly used flags and state information. */ + struct Scsi_Host *host; + unsigned long host_no; + uint8_t host_str[16]; + + volatile struct { + uint32_t init_done :1; + uint32_t online :1; + uint32_t reset_active :1; + + uint32_t management_server_logged_in :1; + uint32_t process_response_queue :1; + uint32_t difdix_supported:1; + uint32_t delete_progress:1; + + uint32_t fw_tgt_reported:1; + uint32_t bbcr_enable:1; + uint32_t qpairs_available:1; + uint32_t qpairs_req_created:1; + uint32_t qpairs_rsp_created:1; + uint32_t nvme_enabled:1; + uint32_t nvme_first_burst:1; + uint32_t nvme2_enabled:1; + } flags; + + atomic_t loop_state; +#define LOOP_TIMEOUT 1 +#define LOOP_DOWN 2 +#define LOOP_UP 3 +#define LOOP_UPDATE 4 +#define LOOP_READY 5 +#define LOOP_DEAD 6 + + unsigned long buf_expired; + unsigned long relogin_jif; + unsigned long dpc_flags; +#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */ +#define RESET_ACTIVE 1 +#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */ +#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */ +#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */ +#define LOOP_RESYNC_ACTIVE 5 +#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */ +#define RSCN_UPDATE 7 /* Perform an RSCN update. */ +#define RELOGIN_NEEDED 8 +#define REGISTER_FC4_NEEDED 9 /* SNS FC4 registration required. */ +#define ISP_ABORT_RETRY 10 /* ISP aborted. */ +#define BEACON_BLINK_NEEDED 11 +#define REGISTER_FDMI_NEEDED 12 +#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */ +#define UNLOADING 15 +#define NPIV_CONFIG_NEEDED 16 +#define ISP_UNRECOVERABLE 17 +#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ +#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ +#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ +#define N2N_LINK_RESET 21 +#define PORT_UPDATE_NEEDED 22 +#define FX00_RESET_RECOVERY 23 +#define FX00_TARGET_SCAN 24 +#define FX00_CRITEMP_RECOVERY 25 +#define FX00_HOST_INFO_RESEND 26 +#define QPAIR_ONLINE_CHECK_NEEDED 27 +#define DO_EEH_RECOVERY 28 +#define DETECT_SFP_CHANGE 29 +#define N2N_LOGIN_NEEDED 30 +#define IOCB_WORK_ACTIVE 31 +#define SET_ZIO_THRESHOLD_NEEDED 32 +#define ISP_ABORT_TO_ROM 33 +#define VPORT_DELETE 34 + +#define PROCESS_PUREX_IOCB 63 + + unsigned long pci_flags; +#define PFLG_DISCONNECTED 0 /* PCI device removed */ +#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */ +#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */ + + uint32_t device_flags; +#define SWITCH_FOUND BIT_0 +#define DFLG_NO_CABLE BIT_1 +#define DFLG_DEV_FAILED BIT_5 + + /* ISP configuration data. */ + uint16_t loop_id; /* Host adapter loop id */ + uint16_t self_login_loop_id; /* host adapter loop id + * get it on self login + */ + fc_port_t bidir_fcport; /* fcport used for bidir cmnds + * no need of allocating it for + * each command + */ + + port_id_t d_id; /* Host adapter port id */ + uint8_t marker_needed; + uint16_t mgmt_svr_loop_id; + + + + /* Timeout timers. */ + uint8_t loop_down_abort_time; /* port down timer */ + atomic_t loop_down_timer; /* loop down timer */ + uint8_t link_down_timeout; /* link down timeout */ + + uint32_t timer_active; + struct timer_list timer; + + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + uint8_t fabric_node_name[WWN_SIZE]; + uint8_t fabric_port_name[WWN_SIZE]; + + struct nvme_fc_local_port *nvme_local_port; + struct completion nvme_del_done; + + uint16_t fcoe_vlan_id; + uint16_t fcoe_fcf_idx; + uint8_t fcoe_vn_port_mac[6]; + + /* list of commands waiting on workqueue */ + struct list_head qla_cmd_list; + struct list_head unknown_atio_list; + spinlock_t cmd_list_lock; + struct delayed_work unknown_atio_work; + + /* Counter to detect races between ELS and RSCN events */ + atomic_t generation_tick; + /* Time when global fcport update has been scheduled */ + int total_fcport_update_gen; + /* List of pending LOGOs, protected by tgt_mutex */ + struct list_head logo_list; + /* List of pending PLOGI acks, protected by hw lock */ + struct list_head plogi_ack_list; + + struct list_head qp_list; + + uint32_t vp_abort_cnt; + + struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ + uint16_t vp_idx; /* vport ID */ + struct qla_qpair *qpair; /* base qpair */ + + unsigned long vp_flags; +#define VP_IDX_ACQUIRED 0 /* bit no 0 */ +#define VP_CREATE_NEEDED 1 +#define VP_BIND_NEEDED 2 +#define VP_DELETE_NEEDED 3 +#define VP_SCR_NEEDED 4 /* State Change Request registration */ +#define VP_CONFIG_OK 5 /* Flag to cfg VP, if FW is ready */ + atomic_t vp_state; +#define VP_OFFLINE 0 +#define VP_ACTIVE 1 +#define VP_FAILED 2 +// #define VP_DISABLE 3 + uint16_t vp_err_state; + uint16_t vp_prev_err_state; +#define VP_ERR_UNKWN 0 +#define VP_ERR_PORTDWN 1 +#define VP_ERR_FAB_UNSUPPORTED 2 +#define VP_ERR_FAB_NORESOURCES 3 +#define VP_ERR_FAB_LOGOUT 4 +#define VP_ERR_ADAP_NORESOURCES 5 + struct qla_hw_data *hw; + struct scsi_qlt_host vha_tgt; + struct req_que *req; + int fw_heartbeat_counter; + int seconds_since_last_heartbeat; + struct fc_host_statistics fc_host_stat; + struct qla_statistics qla_stats; + struct bidi_statistics bidi_stats; + atomic_t vref_count; + struct qla8044_reset_template reset_tmplt; + uint16_t bbcr; + + uint16_t u_ql2xexchoffld; + uint16_t u_ql2xiniexchg; + uint16_t qlini_mode; + uint16_t ql2xexchoffld; + uint16_t ql2xiniexchg; + + struct dentry *dfs_rport_root; + + struct purex_list { + struct list_head head; + spinlock_t lock; + } purex_list; + struct purex_item default_item; + + struct name_list_extended gnl; + /* Count of active session/fcport */ + int fcport_count; + wait_queue_head_t fcport_waitQ; + wait_queue_head_t vref_waitq; + uint8_t min_supported_speed; + uint8_t n2n_node_name[WWN_SIZE]; + uint8_t n2n_port_name[WWN_SIZE]; + uint16_t n2n_id; + __le16 dport_data[4]; + struct fab_scan scan; + uint8_t scm_fabric_connection_flags; + + unsigned int irq_offset; + + u64 hw_err_cnt; + u64 interface_err_cnt; + u64 cmd_timeout_cnt; + u64 reset_cmd_err_cnt; + u64 link_down_time; + u64 short_link_down_cnt; + struct edif_dbell e_dbell; + struct pur_core pur_cinfo; + +#define DPORT_DIAG_IN_PROGRESS BIT_0 +#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1 + uint16_t dport_status; +} scsi_qla_host_t; + +struct qla27xx_image_status { + uint8_t image_status_mask; + __le16 generation; + uint8_t ver_major; + uint8_t ver_minor; + uint8_t bitmap; /* 28xx only */ + uint8_t reserved[2]; + __le32 checksum; + __le32 signature; +} __packed; + +/* 28xx aux image status bimap values */ +#define QLA28XX_AUX_IMG_BOARD_CONFIG BIT_0 +#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1 +#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2 +#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3 +#define QLA28XX_AUX_IMG_NVME_PARAMS BIT_4 + +#define SET_VP_IDX 1 +#define SET_AL_PA 2 +#define RESET_VP_IDX 3 +#define RESET_AL_PA 4 +struct qla_vp_map { + uint8_t idx; + scsi_qla_host_t *vha; +}; + +struct qla2_sgx { + dma_addr_t dma_addr; /* OUT */ + uint32_t dma_len; /* OUT */ + + uint32_t tot_bytes; /* IN */ + struct scatterlist *cur_sg; /* IN */ + + /* for book keeping, bzero on initial invocation */ + uint32_t bytes_consumed; + uint32_t num_bytes; + uint32_t tot_partial; + + /* for debugging */ + uint32_t num_sg; + srb_t *sp; +}; + +#define QLA_FW_STARTED(_ha) { \ + int i; \ + _ha->flags.fw_started = 1; \ + _ha->base_qpair->fw_started = 1; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->fw_started = 1; \ + } \ +} + +#define QLA_FW_STOPPED(_ha) { \ + int i; \ + _ha->flags.fw_started = 0; \ + _ha->base_qpair->fw_started = 0; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->fw_started = 0; \ + } \ +} + + +#define SFUB_CHECKSUM_SIZE 4 + +struct secure_flash_update_block { + uint32_t block_info; + uint32_t signature_lo; + uint32_t signature_hi; + uint32_t signature_upper[0x3e]; +}; + +struct secure_flash_update_block_pk { + uint32_t block_info; + uint32_t signature_lo; + uint32_t signature_hi; + uint32_t signature_upper[0x3e]; + uint32_t public_key[0x41]; +}; + +/* + * Macros to help code, maintain, etc. + */ +#define LOOP_TRANSITION(ha) \ + (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ + test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ + atomic_read(&ha->loop_state) == LOOP_DOWN) + +#define STATE_TRANSITION(ha) \ + (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ + test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) + +static inline bool qla_vha_mark_busy(scsi_qla_host_t *vha) +{ + atomic_inc(&vha->vref_count); + mb(); + if (vha->flags.delete_progress) { + atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); + return true; + } + return false; +} + +#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ + atomic_dec(&__vha->vref_count); \ + wake_up(&__vha->vref_waitq); \ +} while (0) \ + +#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ + atomic_inc(&__qpair->ref_count); \ + mb(); \ + if (__qpair->delete_in_progress) { \ + atomic_dec(&__qpair->ref_count); \ + __bail = 1; \ + } else { \ + __bail = 0; \ + } \ +} while (0) + +#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \ + atomic_dec(&__qpair->ref_count) + +#define QLA_ENA_CONF(_ha) {\ + int i;\ + _ha->base_qpair->enable_explicit_conf = 1; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->enable_explicit_conf = 1; \ + } \ +} + +#define QLA_DIS_CONF(_ha) {\ + int i;\ + _ha->base_qpair->enable_explicit_conf = 0; \ + for (i = 0; i < _ha->max_qpairs; i++) { \ + if (_ha->queue_pair_map[i]) \ + _ha->queue_pair_map[i]->enable_explicit_conf = 0; \ + } \ +} + +/* + * qla2x00 local function return status codes + */ +#define MBS_MASK 0x3fff + +#define QLA_SUCCESS (MBS_COMMAND_COMPLETE & MBS_MASK) +#define QLA_INVALID_COMMAND (MBS_INVALID_COMMAND & MBS_MASK) +#define QLA_INTERFACE_ERROR (MBS_HOST_INTERFACE_ERROR & MBS_MASK) +#define QLA_TEST_FAILED (MBS_TEST_FAILED & MBS_MASK) +#define QLA_COMMAND_ERROR (MBS_COMMAND_ERROR & MBS_MASK) +#define QLA_PARAMETER_ERROR (MBS_COMMAND_PARAMETER_ERROR & MBS_MASK) +#define QLA_PORT_ID_USED (MBS_PORT_ID_USED & MBS_MASK) +#define QLA_LOOP_ID_USED (MBS_LOOP_ID_USED & MBS_MASK) +#define QLA_ALL_IDS_IN_USE (MBS_ALL_IDS_IN_USE & MBS_MASK) +#define QLA_NOT_LOGGED_IN (MBS_NOT_LOGGED_IN & MBS_MASK) + +#define QLA_FUNCTION_TIMEOUT 0x100 +#define QLA_FUNCTION_PARAMETER_ERROR 0x101 +#define QLA_FUNCTION_FAILED 0x102 +#define QLA_MEMORY_ALLOC_FAILED 0x103 +#define QLA_LOCK_TIMEOUT 0x104 +#define QLA_ABORTED 0x105 +#define QLA_SUSPENDED 0x106 +#define QLA_BUSY 0x107 +#define QLA_ALREADY_REGISTERED 0x109 +#define QLA_OS_TIMER_EXPIRED 0x10a +#define QLA_ERR_NO_QPAIR 0x10b +#define QLA_ERR_NOT_FOUND 0x10c +#define QLA_ERR_FROM_FW 0x10d + +#define NVRAM_DELAY() udelay(10) + +/* + * Flash support definitions + */ +#define OPTROM_SIZE_2300 0x20000 +#define OPTROM_SIZE_2322 0x100000 +#define OPTROM_SIZE_24XX 0x100000 +#define OPTROM_SIZE_25XX 0x200000 +#define OPTROM_SIZE_81XX 0x400000 +#define OPTROM_SIZE_82XX 0x800000 +#define OPTROM_SIZE_83XX 0x1000000 +#define OPTROM_SIZE_28XX 0x2000000 + +#define OPTROM_BURST_SIZE 0x1000 +#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) + +#define QLA_DSDS_PER_IOCB 37 + +#define QLA_SG_ALL 1024 + +enum nexus_wait_type { + WAIT_HOST = 0, + WAIT_TARGET, + WAIT_LUN, +}; + +#define INVALID_EDIF_SA_INDEX 0xffff +#define RX_DELETE_NO_EDIF_SA_INDEX 0xfffe + +#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE + +/* edif hash element */ +struct edif_list_entry { + uint16_t handle; /* nport_handle */ + uint32_t update_sa_index; + uint32_t delete_sa_index; + uint32_t count; /* counter for filtering sa_index */ +#define EDIF_ENTRY_FLAGS_CLEANUP 0x01 /* this index is being cleaned up */ + uint32_t flags; /* used by sadb cleanup code */ + fc_port_t *fcport; /* needed by rx delay timer function */ + struct timer_list timer; /* rx delay timer */ + struct list_head next; +}; + +#define EDIF_TX_INDX_BASE 512 +#define EDIF_RX_INDX_BASE 0 +#define EDIF_RX_DELETE_FILTER_COUNT 3 /* delay queuing rx delete until this many */ + +/* entry in the sa_index free pool */ + +struct sa_index_pair { + uint16_t sa_index; + uint32_t spi; +}; + +/* edif sa_index data structure */ +struct edif_sa_index_entry { + struct sa_index_pair sa_pair[2]; + fc_port_t *fcport; + uint16_t handle; + struct list_head next; +}; + +/* Refer to SNIA SFF 8247 */ +struct sff_8247_a0 { + u8 txid; /* transceiver id */ + u8 ext_txid; + u8 connector; + /* compliance code */ + u8 eth_infi_cc3; /* ethernet, inifiband */ + u8 sonet_cc4[2]; + u8 eth_cc6; + /* link length */ +#define FC_LL_VL BIT_7 /* very long */ +#define FC_LL_S BIT_6 /* Short */ +#define FC_LL_I BIT_5 /* Intermidiate*/ +#define FC_LL_L BIT_4 /* Long */ +#define FC_LL_M BIT_3 /* Medium */ +#define FC_LL_SA BIT_2 /* ShortWave laser */ +#define FC_LL_LC BIT_1 /* LongWave laser */ +#define FC_LL_EL BIT_0 /* Electrical inter enclosure */ + u8 fc_ll_cc7; + /* FC technology */ +#define FC_TEC_EL BIT_7 /* Electrical inter enclosure */ +#define FC_TEC_SN BIT_6 /* short wave w/o OFC */ +#define FC_TEC_SL BIT_5 /* short wave with OFC */ +#define FC_TEC_LL BIT_4 /* Longwave Laser */ +#define FC_TEC_ACT BIT_3 /* Active cable */ +#define FC_TEC_PAS BIT_2 /* Passive cable */ + u8 fc_tec_cc8; + /* Transmission Media */ +#define FC_MED_TW BIT_7 /* Twin Ax */ +#define FC_MED_TP BIT_6 /* Twited Pair */ +#define FC_MED_MI BIT_5 /* Min Coax */ +#define FC_MED_TV BIT_4 /* Video Coax */ +#define FC_MED_M6 BIT_3 /* Multimode, 62.5um */ +#define FC_MED_M5 BIT_2 /* Multimode, 50um */ +#define FC_MED_SM BIT_0 /* Single Mode */ + u8 fc_med_cc9; + /* speed FC_SP_12: 12*100M = 1200 MB/s */ +#define FC_SP_12 BIT_7 +#define FC_SP_8 BIT_6 +#define FC_SP_16 BIT_5 +#define FC_SP_4 BIT_4 +#define FC_SP_32 BIT_3 +#define FC_SP_2 BIT_2 +#define FC_SP_1 BIT_0 + u8 fc_sp_cc10; + u8 encode; + u8 bitrate; + u8 rate_id; + u8 length_km; /* offset 14/eh */ + u8 length_100m; + u8 length_50um_10m; + u8 length_62um_10m; + u8 length_om4_10m; + u8 length_om3_10m; +#define SFF_VEN_NAME_LEN 16 + u8 vendor_name[SFF_VEN_NAME_LEN]; /* offset 20/14h */ + u8 tx_compat; + u8 vendor_oui[3]; +#define SFF_PART_NAME_LEN 16 + u8 vendor_pn[SFF_PART_NAME_LEN]; /* part number */ + u8 vendor_rev[4]; + u8 wavelength[2]; + u8 resv; + u8 cc_base; + u8 options[2]; /* offset 64 */ + u8 br_max; + u8 br_min; + u8 vendor_sn[16]; + u8 date_code[8]; + u8 diag; + u8 enh_options; + u8 sff_revision; + u8 cc_ext; + u8 vendor_specific[32]; + u8 resv2[128]; +}; + +/* BPM -- Buffer Plus Management support. */ +#define IS_BPM_CAPABLE(ha) \ + (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_BPM_RANGE_CAPABLE(ha) \ + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_BPM_ENABLED(vha) \ + (ql2xautodetectsfp && !vha->vp_idx && IS_BPM_CAPABLE(vha->hw)) + +#define FLASH_SEMAPHORE_REGISTER_ADDR 0x00101016 + +#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \ + (IS_QLA27XX(_ha) || IS_QLA28XX(_ha) || IS_QLA83XX(_ha))) + +#define SAVE_TOPO(_ha) { \ + if (_ha->current_topology) \ + _ha->prev_topology = _ha->current_topology; \ +} + +#define N2N_TOPO(ha) \ + ((ha->prev_topology == ISP_CFG_N && !ha->current_topology) || \ + ha->current_topology == ISP_CFG_N || \ + !ha->current_topology) + +#define QLA_N2N_WAIT_TIME 5 /* 2 * ra_tov(n2n) + 1 */ + +#define NVME_TYPE(fcport) \ + (fcport->fc4_type & FS_FC4TYPE_NVME) \ + +#define FCP_TYPE(fcport) \ + (fcport->fc4_type & FS_FC4TYPE_FCP) \ + +#define NVME_ONLY_TARGET(fcport) \ + (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \ + +#define NVME_FCP_TARGET(fcport) \ + (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \ + +#define NVME_PRIORITY(ha, fcport) \ + (NVME_FCP_TARGET(fcport) && \ + (ha->fc4_type_priority == FC4_PRIORITY_NVME)) + +#define NVME_TARGET(ha, fcport) \ + (fcport->do_prli_nvme || \ + NVME_ONLY_TARGET(fcport)) \ + +#define PRLI_PHASE(_cls) \ + ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP)) + +enum ql_vnd_host_stat_action { + QLA_STOP = 0, + QLA_START, + QLA_CLEAR, +}; + +struct ql_vnd_mng_host_stats_param { + u32 stat_type; + enum ql_vnd_host_stat_action action; +} __packed; + +struct ql_vnd_mng_host_stats_resp { + u32 status; +} __packed; + +struct ql_vnd_stats_param { + u32 stat_type; +} __packed; + +struct ql_vnd_tgt_stats_param { + s32 tgt_id; + u32 stat_type; +} __packed; + +enum ql_vnd_host_port_action { + QLA_ENABLE = 0, + QLA_DISABLE, +}; + +struct ql_vnd_mng_host_port_param { + enum ql_vnd_host_port_action action; +} __packed; + +struct ql_vnd_mng_host_port_resp { + u32 status; +} __packed; + +struct ql_vnd_stat_entry { + u32 stat_type; /* Failure type */ + u32 tgt_num; /* Target Num */ + u64 cnt; /* Counter value */ +} __packed; + +struct ql_vnd_stats { + u64 entry_count; /* Num of entries */ + u64 rservd; + struct ql_vnd_stat_entry entry[]; /* Place holder of entries */ +} __packed; + +struct ql_vnd_host_stats_resp { + u32 status; + struct ql_vnd_stats stats; +} __packed; + +struct ql_vnd_tgt_stats_resp { + u32 status; + struct ql_vnd_stats stats; +} __packed; + +#include "qla_target.h" +#include "qla_gbl.h" +#include "qla_dbg.h" +#include "qla_inline.h" + +#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \ + _fcport->disc_state == DSC_DELETED) + +#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \ + "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \ + __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \ + _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \ + _fp->flags + +#define TMF_NOT_READY(_fcport) \ + (!_fcport || IS_SESSION_DELETED(_fcport) || atomic_read(&_fcport->state) != FCS_ONLINE || \ + !_fcport->vha->hw->flags.fw_started) + +#endif diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h new file mode 100644 index 000000000..ffb9694be --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_devtbl.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#define QLA_MODEL_NAMES 0x5C + +/* + * Adapter model names and descriptions. + */ +static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = { + "QLA2340", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x100 */ + "QLA2342", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x101 */ + "QLA2344", "133MHz PCI-X to 2Gb FC, Quad Channel", /* 0x102 */ + "QCP2342", "cPCI to 2Gb FC, Dual Channel", /* 0x103 */ + "QSB2340", "SBUS to 2Gb FC, Single Channel", /* 0x104 */ + "QSB2342", "SBUS to 2Gb FC, Dual Channel", /* 0x105 */ + "QLA2310", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x106 */ + "QLA2332", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x107 */ + "QCP2332", "Sun cPCI to 2Gb FC, Dual Channel", /* 0x108 */ + "QCP2340", "cPCI to 2Gb FC, Single Channel", /* 0x109 */ + "QLA2342", "Sun 133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x10a */ + "QCP2342", "Sun - cPCI to 2Gb FC, Dual Channel", /* 0x10b */ + "QLA2350", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x10c */ + "QLA2352", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x10d */ + "QLA2352", "Sun 133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x10e */ + " ", " ", /* 0x10f */ + " ", " ", /* 0x110 */ + " ", " ", /* 0x111 */ + " ", " ", /* 0x112 */ + " ", " ", /* 0x113 */ + " ", " ", /* 0x114 */ + "QLA2360", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x115 */ + "QLA2362", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x116 */ + "QLE2360", "PCI-Express to 2Gb FC, Single Channel", /* 0x117 */ + "QLE2362", "PCI-Express to 2Gb FC, Dual Channel", /* 0x118 */ + "QLA200", "133MHz PCI-X to 2Gb FC Optical", /* 0x119 */ + " ", " ", /* 0x11a */ + " ", " ", /* 0x11b */ + "QLA200P", "133MHz PCI-X to 2Gb FC SFP", /* 0x11c */ + " ", " ", /* 0x11d */ + " ", " ", /* 0x11e */ + " ", " ", /* 0x11f */ + " ", " ", /* 0x120 */ + " ", " ", /* 0x121 */ + " ", " ", /* 0x122 */ + " ", " ", /* 0x123 */ + " ", " ", /* 0x124 */ + " ", " ", /* 0x125 */ + " ", " ", /* 0x126 */ + " ", " ", /* 0x127 */ + " ", " ", /* 0x128 */ + " ", " ", /* 0x129 */ + " ", " ", /* 0x12a */ + " ", " ", /* 0x12b */ + " ", " ", /* 0x12c */ + " ", " ", /* 0x12d */ + " ", " ", /* 0x12e */ + "QLA210", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x12f */ + "EMC 250", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x130 */ + "HP A7538A", "HP 1p2g PCI-X to 2Gb FC, Single Channel", /* 0x131 */ + "QLA210", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x132 */ + "QLA2460", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x133 */ + "QLA2462", "PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x134 */ + "QMC2462", "IBM eServer BC 4Gb FC Expansion Card", /* 0x135 */ + "QMC2462S", "IBM eServer BC 4Gb FC Expansion Card SFF", /* 0x136 */ + "QLE2460", "PCI-Express to 4Gb FC, Single Channel", /* 0x137 */ + "QLE2462", "PCI-Express to 4Gb FC, Dual Channel", /* 0x138 */ + "QME2462", "Dell BS PCI-Express to 4Gb FC, Dual Channel", /* 0x139 */ + " ", " ", /* 0x13a */ + " ", " ", /* 0x13b */ + " ", " ", /* 0x13c */ + "QEM2462", "Sun Server I/O Module 4Gb FC, Dual Channel", /* 0x13d */ + "QLE210", "PCI-Express to 2Gb FC, Single Channel", /* 0x13e */ + "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x13f */ + "QLA2460", "Sun PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x140 */ + "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */ + "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */ + "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */ + "QEM2462", "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */ + "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ + "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ + "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ + "HP AE369A", "PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x148 */ + "QLA2340", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x149 */ + " ", " ", /* 0x14a */ + " ", " ", /* 0x14b */ + "QMC2432M", "IBM eServer BC 4Gb FC Expansion Card CFFE", /* 0x14c */ + "QMC2422M", "IBM eServer BC 4Gb FC Expansion Card CFFX", /* 0x14d */ + "QLE220", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x14e */ + " ", " ", /* 0x14f */ + " ", " ", /* 0x150 */ + " ", " ", /* 0x151 */ + "QME2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x152 */ + "QMH2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x153 */ + " ", " ", /* 0x154 */ + "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x155 */ + "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x156 */ + " ", " ", /* 0x157 */ + " ", " ", /* 0x158 */ + " ", " ", /* 0x159 */ + " ", " ", /* 0x15a */ + "QME2472", "Dell BS PCI-Express to 4Gb FC, Dual Channel", /* 0x15b */ +}; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c new file mode 100644 index 000000000..a7a364760 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" + +#include +#include + +static struct dentry *qla2x00_dfs_root; +static atomic_t qla2x00_dfs_root_count; + +#define QLA_DFS_RPORT_DEVLOSS_TMO 1 + +static int +qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val) +{ + switch (attr_id) { + case QLA_DFS_RPORT_DEVLOSS_TMO: + /* Only supported for FC-NVMe devices that are registered. */ + if (!(fp->nvme_flag & NVME_FLAG_REGISTERED)) + return -EIO; + *val = fp->nvme_remote_port->dev_loss_tmo; + break; + default: + return -EINVAL; + } + return 0; +} + +static int +qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val) +{ + switch (attr_id) { + case QLA_DFS_RPORT_DEVLOSS_TMO: + /* Only supported for FC-NVMe devices that are registered. */ + if (!(fp->nvme_flag & NVME_FLAG_REGISTERED)) + return -EIO; +#if (IS_ENABLED(CONFIG_NVME_FC)) + return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port, + val); +#else /* CONFIG_NVME_FC */ + return -EINVAL; +#endif /* CONFIG_NVME_FC */ + default: + return -EINVAL; + } + return 0; +} + +#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \ +static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \ +{ \ + struct fc_port *fp = data; \ + return qla_dfs_rport_get(fp, _attr_id, val); \ +} \ +static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \ +{ \ + struct fc_port *fp = data; \ + return qla_dfs_rport_set(fp, _attr_id, val); \ +} \ +DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \ + qla_dfs_rport_##_attr##_get, \ + qla_dfs_rport_##_attr##_set, "%llu\n") + +/* + * Wrapper for getting fc_port fields. + * + * _attr : Attribute name. + * _get_val : Accessor macro to retrieve the value. + */ +#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \ +static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \ +{ \ + struct fc_port *fp = data; \ + *val = _get_val; \ + return 0; \ +} \ +DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \ + qla_dfs_rport_field_##_attr##_get, \ + NULL, "%llu\n") + +#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \ + DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) + +#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \ + DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr) + +DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo); + +DEFINE_QLA_DFS_RPORT_FIELD(disc_state); +DEFINE_QLA_DFS_RPORT_FIELD(scan_state); +DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state); +DEFINE_QLA_DFS_RPORT_FIELD(login_pause); +DEFINE_QLA_DFS_RPORT_FIELD(flags); +DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag); +DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen); +DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen); +DEFINE_QLA_DFS_RPORT_FIELD(login_gen); +DEFINE_QLA_DFS_RPORT_FIELD(loop_id); +DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24); +DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref)); + +void +qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp) +{ + char wwn[32]; + +#define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \ + debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \ + fp, &qla_dfs_rport_field_##_attr##_fops) + + if (!vha->dfs_rport_root || fp->dfs_rport_dir) + return; + + sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name)); + fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root); + if (IS_ERR(fp->dfs_rport_dir)) + return; + if (NVME_TARGET(vha->hw, fp)) + debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir, + fp, &qla_dfs_rport_dev_loss_tmo_fops); + + QLA_CREATE_RPORT_FIELD_ATTR(disc_state); + QLA_CREATE_RPORT_FIELD_ATTR(scan_state); + QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state); + QLA_CREATE_RPORT_FIELD_ATTR(login_pause); + QLA_CREATE_RPORT_FIELD_ATTR(flags); + QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag); + QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen); + QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen); + QLA_CREATE_RPORT_FIELD_ATTR(login_gen); + QLA_CREATE_RPORT_FIELD_ATTR(loop_id); + QLA_CREATE_RPORT_FIELD_ATTR(port_id); + QLA_CREATE_RPORT_FIELD_ATTR(sess_kref); +} + +void +qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp) +{ + if (!vha->dfs_rport_root || !fp->dfs_rport_dir) + return; + debugfs_remove_recursive(fp->dfs_rport_dir); + fp->dfs_rport_dir = NULL; +} + +static int +qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) +{ + scsi_qla_host_t *vha = s->private; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + struct fc_port *sess = NULL; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + + seq_printf(s, "%s\n", vha->host_str); + if (tgt) { + seq_puts(s, "Port ID Port Name Handle\n"); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + list_for_each_entry(sess, &vha->vp_fcports, list) + seq_printf(s, "%02x:%02x:%02x %8phC %d\n", + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->port_name, + sess->loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess); + +static int +qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) +{ + scsi_qla_host_t *vha = s->private; + struct qla_hw_data *ha = vha->hw; + struct gid_list_info *gid_list; + dma_addr_t gid_list_dma; + fc_port_t fc_port; + char *id_iter; + int rc, i; + uint16_t entries, loop_id; + + seq_printf(s, "%s\n", vha->host_str); + gid_list = dma_alloc_coherent(&ha->pdev->dev, + qla2x00_gid_list_size(ha), + &gid_list_dma, GFP_KERNEL); + if (!gid_list) { + ql_dbg(ql_dbg_user, vha, 0x7018, + "DMA allocation failed for %u\n", + qla2x00_gid_list_size(ha)); + return 0; + } + + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, + &entries); + if (rc != QLA_SUCCESS) + goto out_free_id_list; + + id_iter = (char *)gid_list; + + seq_puts(s, "Port Name Port ID Loop ID\n"); + + for (i = 0; i < entries; i++) { + struct gid_list_info *gid = + (struct gid_list_info *)id_iter; + loop_id = le16_to_cpu(gid->loop_id); + memset(&fc_port, 0, sizeof(fc_port_t)); + + fc_port.loop_id = loop_id; + + rc = qla24xx_gpdb_wait(vha, &fc_port, 0); + seq_printf(s, "%8phC %02x%02x%02x %d\n", + fc_port.port_name, fc_port.d_id.b.domain, + fc_port.d_id.b.area, fc_port.d_id.b.al_pa, + fc_port.loop_id); + id_iter += ha->gid_list_info_size; + } +out_free_id_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + gid_list, gid_list_dma); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database); + +static int +qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) +{ + struct scsi_qla_host *vha = s->private; + uint16_t mb[MAX_IOCB_MB_REG]; + int rc; + struct qla_hw_data *ha = vha->hw; + u16 iocbs_used, i, exch_used; + + rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG); + if (rc != QLA_SUCCESS) { + seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]); + } else { + seq_puts(s, "FW Resource count\n\n"); + seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]); + seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]); + seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]); + seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]); + seq_printf(s, "Current IOCB count[%d]\n", mb[7]); + seq_printf(s, "Original IOCB count[%d]\n", mb[10]); + seq_printf(s, "MAX VP count[%d]\n", mb[11]); + seq_printf(s, "MAX FCF count[%d]\n", mb[12]); + seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n", + mb[20]); + seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n", + mb[21]); + seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n", + mb[22]); + seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n", + mb[23]); + } + + if (ql2xenforce_iocb_limit) { + /* lock is not require. It's an estimate. */ + iocbs_used = ha->base_qpair->fwres.iocbs_used; + exch_used = ha->base_qpair->fwres.exch_used; + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { + iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; + exch_used += ha->queue_pair_map[i]->fwres.exch_used; + } + } + + seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n", + iocbs_used, ha->base_qpair->fwres.iocbs_limit); + + seq_printf(s, "estimate exchange used[%d] high water limit [%d] n", + exch_used, ha->base_qpair->fwres.exch_limit); + + if (ql2xenforce_iocb_limit == 2) { + iocbs_used = atomic_read(&ha->fwres.iocb_used); + exch_used = atomic_read(&ha->fwres.exch_used); + seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n", + iocbs_used, ha->fwres.iocb_limit); + + seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n", + exch_used, ha->fwres.exch_limit); + } + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt); + +static int +qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) +{ + struct scsi_qla_host *vha = s->private; + struct qla_qpair *qpair = vha->hw->base_qpair; + uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio, + core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd, + num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent; + u16 i; + fc_port_t *fcport = NULL; + + if (qla2x00_chip_is_down(vha)) + return 0; + + qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd; + core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf; + qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio; + core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status; + qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio; + core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd; + num_q_full_sent = qpair->tgt_counters.num_q_full_sent; + num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed; + num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent; + + for (i = 0; i < vha->hw->max_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) + continue; + qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd; + core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf; + qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio; + core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status; + qla_core_ret_sta_ctio += + qpair->tgt_counters.qla_core_ret_sta_ctio; + core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd; + num_q_full_sent += qpair->tgt_counters.num_q_full_sent; + num_alloc_iocb_failed += + qpair->tgt_counters.num_alloc_iocb_failed; + num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent; + } + + seq_puts(s, "Target Counters\n"); + seq_printf(s, "qla_core_sbt_cmd = %lld\n", + qla_core_sbt_cmd); + seq_printf(s, "qla_core_ret_sta_ctio = %lld\n", + qla_core_ret_sta_ctio); + seq_printf(s, "qla_core_ret_ctio = %lld\n", + qla_core_ret_ctio); + seq_printf(s, "core_qla_que_buf = %lld\n", + core_qla_que_buf); + seq_printf(s, "core_qla_snd_status = %lld\n", + core_qla_snd_status); + seq_printf(s, "core_qla_free_cmd = %lld\n", + core_qla_free_cmd); + seq_printf(s, "num alloc iocb failed = %lld\n", + num_alloc_iocb_failed); + seq_printf(s, "num term exchange sent = %lld\n", + num_term_xchg_sent); + seq_printf(s, "num Q full sent = %lld\n", + num_q_full_sent); + + /* DIF stats */ + seq_printf(s, "DIF Inp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_bytes); + seq_printf(s, "DIF Outp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_bytes); + seq_printf(s, "DIF Inp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_requests); + seq_printf(s, "DIF Outp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_requests); + seq_printf(s, "DIF Guard err = %d\n", + vha->qla_stats.qla_dif_stats.dif_guard_err); + seq_printf(s, "DIF Ref tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_ref_tag_err); + seq_printf(s, "DIF App tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_app_tag_err); + + seq_puts(s, "\n"); + seq_puts(s, "Initiator Error Counters\n"); + seq_printf(s, "HW Error Count = %14lld\n", + vha->hw_err_cnt); + seq_printf(s, "Link Down Count = %14lld\n", + vha->short_link_down_cnt); + seq_printf(s, "Interface Err Count = %14lld\n", + vha->interface_err_cnt); + seq_printf(s, "Cmd Timeout Count = %14lld\n", + vha->cmd_timeout_cnt); + seq_printf(s, "Reset Count = %14lld\n", + vha->reset_cmd_err_cnt); + seq_puts(s, "\n"); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!fcport->rport) + continue; + + seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n", + fcport->rport->number, fcport->tgt_short_link_down_cnt); + } + seq_puts(s, "\n"); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters); + +static int +qla2x00_dfs_fce_show(struct seq_file *s, void *unused) +{ + scsi_qla_host_t *vha = s->private; + uint32_t cnt; + uint32_t *fce; + uint64_t fce_start; + struct qla_hw_data *ha = vha->hw; + + mutex_lock(&ha->fce_mutex); + + seq_puts(s, "FCE Trace Buffer\n"); + seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); + seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); + seq_puts(s, "FCE Enable Registers\n"); + seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", + ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], + ha->fce_mb[5], ha->fce_mb[6]); + + fce = (uint32_t *) ha->fce; + fce_start = (unsigned long long) ha->fce_dma; + for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { + if (cnt % 8 == 0) + seq_printf(s, "\n%llx: ", + (unsigned long long)((cnt * 4) + fce_start)); + else + seq_putc(s, ' '); + seq_printf(s, "%08x", *fce++); + } + + seq_puts(s, "\nEnd\n"); + + mutex_unlock(&ha->fce_mutex); + + return 0; +} + +static int +qla2x00_dfs_fce_open(struct inode *inode, struct file *file) +{ + scsi_qla_host_t *vha = inode->i_private; + struct qla_hw_data *ha = vha->hw; + int rval; + + if (!ha->flags.fce_enabled) + goto out; + + mutex_lock(&ha->fce_mutex); + + /* Pause tracing to flush FCE buffers. */ + rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd); + if (rval) + ql_dbg(ql_dbg_user, vha, 0x705c, + "DebugFS: Unable to disable FCE (%d).\n", rval); + + ha->flags.fce_enabled = 0; + + mutex_unlock(&ha->fce_mutex); +out: + return single_open(file, qla2x00_dfs_fce_show, vha); +} + +static int +qla2x00_dfs_fce_release(struct inode *inode, struct file *file) +{ + scsi_qla_host_t *vha = inode->i_private; + struct qla_hw_data *ha = vha->hw; + int rval; + + if (ha->flags.fce_enabled) + goto out; + + mutex_lock(&ha->fce_mutex); + + /* Re-enable FCE tracing. */ + ha->flags.fce_enabled = 1; + memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); + rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, + ha->fce_mb, &ha->fce_bufs); + if (rval) { + ql_dbg(ql_dbg_user, vha, 0x700d, + "DebugFS: Unable to reinitialize FCE (%d).\n", rval); + ha->flags.fce_enabled = 0; + } + + mutex_unlock(&ha->fce_mutex); +out: + return single_release(inode, file); +} + +static const struct file_operations dfs_fce_ops = { + .open = qla2x00_dfs_fce_open, + .read = seq_read, + .llseek = seq_lseek, + .release = qla2x00_dfs_fce_release, +}; + +static int +qla_dfs_naqp_show(struct seq_file *s, void *unused) +{ + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + + seq_printf(s, "%d\n", ha->tgt.num_act_qpairs); + return 0; +} + +/* + * Helper macros for setting up debugfs entries. + * _name: The name of the debugfs entry + * _ctx_struct: The context that was passed when creating the debugfs file + * + * QLA_DFS_SETUP_RD could be used when there is only a show function. + * - show function take the name qla_dfs__show + * + * QLA_DFS_SETUP_RW could be used when there are both show and write functions. + * - show function take the name qla_dfs__show + * - write function take the name qla_dfs__write + * + * To have a new debugfs entry, do: + * 1. Create a "struct dentry *" in the appropriate structure in the format + * dfs_ + * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW + * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE + * or QLA_DFS_ROOT_CREATE_FILE + * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE + * or QLA_DFS_ROOT_REMOVE_FILE + * + * Example for creating "TEST" sysfs file: + * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; } + * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t); + * 3. In qla2x00_dfs_setup(): + * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha); + * 4. In qla2x00_dfs_remove(): + * QLA_DFS_REMOVE_FILE(ha, TEST); + */ +#define QLA_DFS_SETUP_RD(_name, _ctx_struct) \ +static int \ +qla_dfs_##_name##_open(struct inode *inode, struct file *file) \ +{ \ + _ctx_struct *__ctx = inode->i_private; \ + \ + return single_open(file, qla_dfs_##_name##_show, __ctx); \ +} \ + \ +static const struct file_operations qla_dfs_##_name##_ops = { \ + .open = qla_dfs_##_name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +}; + +#define QLA_DFS_SETUP_RW(_name, _ctx_struct) \ +static int \ +qla_dfs_##_name##_open(struct inode *inode, struct file *file) \ +{ \ + _ctx_struct *__ctx = inode->i_private; \ + \ + return single_open(file, qla_dfs_##_name##_show, __ctx); \ +} \ + \ +static const struct file_operations qla_dfs_##_name##_ops = { \ + .open = qla_dfs_##_name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + .write = qla_dfs_##_name##_write, \ +}; + +#define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \ + do { \ + if (!qla_dfs_##_name) \ + qla_dfs_##_name = debugfs_create_file(#_name, \ + _perm, qla2x00_dfs_root, _ctx, \ + &qla_dfs_##_name##_ops); \ + } while (0) + +#define QLA_DFS_ROOT_REMOVE_FILE(_name) \ + do { \ + if (qla_dfs_##_name) { \ + debugfs_remove(qla_dfs_##_name); \ + qla_dfs_##_name = NULL; \ + } \ + } while (0) + +#define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \ + do { \ + (_struct)->dfs_##_name = debugfs_create_file(#_name, \ + _perm, _parent, _ctx, \ + &qla_dfs_##_name##_ops) \ + } while (0) + +#define QLA_DFS_REMOVE_FILE(_struct, _name) \ + do { \ + if ((_struct)->dfs_##_name) { \ + debugfs_remove((_struct)->dfs_##_name); \ + (_struct)->dfs_##_name = NULL; \ + } \ + } while (0) + +static int +qla_dfs_naqp_open(struct inode *inode, struct file *file) +{ + struct scsi_qla_host *vha = inode->i_private; + + return single_open(file, qla_dfs_naqp_show, vha); +} + +static ssize_t +qla_dfs_naqp_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + char *buf; + int rc = 0; + unsigned long num_act_qp; + + if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) { + pr_err("host%ld: this adapter does not support Multi Q.", + vha->host_no); + return -EINVAL; + } + + if (!vha->flags.qpairs_available) { + pr_err("host%ld: Driver is not setup with Multi Q.", + vha->host_no); + return -EINVAL; + } + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) { + pr_err("host%ld: fail to copy user buffer.", + vha->host_no); + return PTR_ERR(buf); + } + + num_act_qp = simple_strtoul(buf, NULL, 0); + + if (num_act_qp >= vha->hw->max_qpairs) { + pr_err("User set invalid number of qpairs %lu. Max = %d", + num_act_qp, vha->hw->max_qpairs); + rc = -EINVAL; + goto out_free; + } + + if (num_act_qp != ha->tgt.num_act_qpairs) { + ha->tgt.num_act_qpairs = num_act_qp; + qlt_clr_qp_table(vha); + } + rc = count; +out_free: + kfree(buf); + return rc; +} + +static const struct file_operations dfs_naqp_ops = { + .open = qla_dfs_naqp_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = qla_dfs_naqp_write, +}; + + +int +qla2x00_dfs_setup(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + goto out; + if (!ha->fce) + goto out; + + if (qla2x00_dfs_root) + goto create_dir; + + atomic_set(&qla2x00_dfs_root_count, 0); + qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); + +create_dir: + if (ha->dfs_dir) + goto create_nodes; + + mutex_init(&ha->fce_mutex); + ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); + + atomic_inc(&qla2x00_dfs_root_count); + +create_nodes: + ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count", + S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops); + + ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR, + ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops); + + ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", + S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops); + + ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, + &dfs_fce_ops); + + ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess", + S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops); + + if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) { + ha->tgt.dfs_naqp = debugfs_create_file("naqp", + 0400, ha->dfs_dir, vha, &dfs_naqp_ops); + if (IS_ERR(ha->tgt.dfs_naqp)) { + ql_log(ql_log_warn, vha, 0xd011, + "Unable to create debugFS naqp node.\n"); + goto out; + } + } + vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir); + if (IS_ERR(vha->dfs_rport_root)) { + ql_log(ql_log_warn, vha, 0xd012, + "Unable to create debugFS rports node.\n"); + goto out; + } +out: + return 0; +} + +int +qla2x00_dfs_remove(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (ha->tgt.dfs_naqp) { + debugfs_remove(ha->tgt.dfs_naqp); + ha->tgt.dfs_naqp = NULL; + } + + if (ha->tgt.dfs_tgt_sess) { + debugfs_remove(ha->tgt.dfs_tgt_sess); + ha->tgt.dfs_tgt_sess = NULL; + } + + if (ha->tgt.dfs_tgt_port_database) { + debugfs_remove(ha->tgt.dfs_tgt_port_database); + ha->tgt.dfs_tgt_port_database = NULL; + } + + if (ha->dfs_fw_resource_cnt) { + debugfs_remove(ha->dfs_fw_resource_cnt); + ha->dfs_fw_resource_cnt = NULL; + } + + if (ha->dfs_tgt_counters) { + debugfs_remove(ha->dfs_tgt_counters); + ha->dfs_tgt_counters = NULL; + } + + if (ha->dfs_fce) { + debugfs_remove(ha->dfs_fce); + ha->dfs_fce = NULL; + } + + if (vha->dfs_rport_root) { + debugfs_remove_recursive(vha->dfs_rport_root); + vha->dfs_rport_root = NULL; + } + + if (ha->dfs_dir) { + debugfs_remove(ha->dfs_dir); + ha->dfs_dir = NULL; + atomic_dec(&qla2x00_dfs_root_count); + } + + if (atomic_read(&qla2x00_dfs_root_count) == 0 && + qla2x00_dfs_root) { + debugfs_remove(qla2x00_dfs_root); + qla2x00_dfs_root = NULL; + } + + return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h new file mode 100644 index 000000000..20788054b --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_dsd.h @@ -0,0 +1,32 @@ +#ifndef _QLA_DSD_H_ +#define _QLA_DSD_H_ + +#include + +/* 32-bit data segment descriptor (8 bytes) */ +struct dsd32 { + __le32 address; + __le32 length; +}; + +static inline void append_dsd32(struct dsd32 **dsd, struct scatterlist *sg) +{ + put_unaligned_le32(sg_dma_address(sg), &(*dsd)->address); + put_unaligned_le32(sg_dma_len(sg), &(*dsd)->length); + (*dsd)++; +} + +/* 64-bit data segment descriptor (12 bytes) */ +struct dsd64 { + __le64 address; + __le32 length; +} __packed; + +static inline void append_dsd64(struct dsd64 **dsd, struct scatterlist *sg) +{ + put_unaligned_le64(sg_dma_address(sg), &(*dsd)->address); + put_unaligned_le32(sg_dma_len(sg), &(*dsd)->length); + (*dsd)++; +} + +#endif diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c new file mode 100644 index 000000000..26e6b3e3a --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -0,0 +1,3714 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell Fibre Channel HBA Driver + * Copyright (c) 2021 Marvell + */ +#include "qla_def.h" +#include "qla_edif.h" + +#include +#include +#include +#include + +static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, + struct list_head *sa_list); +static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame); +static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, + uint16_t sa_index); +static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *); + +struct edb_node { + struct list_head list; + uint32_t ntype; + union { + port_id_t plogi_did; + uint32_t async; + port_id_t els_sid; + struct edif_sa_update_aen sa_aen; + } u; +}; + +static struct els_sub_cmd { + uint16_t cmd; + const char *str; +} sc_str[] = { + {SEND_ELS, "send ELS"}, + {SEND_ELS_REPLY, "send ELS Reply"}, + {PULL_ELS, "retrieve ELS"}, +}; + +const char *sc_to_str(uint16_t cmd) +{ + int i; + struct els_sub_cmd *e; + + for (i = 0; i < ARRAY_SIZE(sc_str); i++) { + e = sc_str + i; + if (cmd == e->cmd) + return e->str; + } + return "unknown"; +} + +static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct edb_node *edbnode = NULL; + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + + /* db nodes are fifo - no qualifications done */ + if (!list_empty(&vha->e_dbell.head)) { + edbnode = list_first_entry(&vha->e_dbell.head, + struct edb_node, list); + list_del_init(&edbnode->list); + } + + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + return edbnode; +} + +static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node) +{ + list_del_init(&node->list); + kfree(node); +} + +static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport, + uint16_t handle) +{ + struct edif_list_entry *entry; + struct edif_list_entry *tentry; + struct list_head *indx_list = &fcport->edif.edif_indx_list; + + list_for_each_entry_safe(entry, tentry, indx_list, next) { + if (entry->handle == handle) + return entry; + } + return NULL; +} + +/* timeout called when no traffic and delayed rx sa_index delete */ +static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t) +{ + struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer); + fc_port_t *fcport = edif_entry->fcport; + struct scsi_qla_host *vha = fcport->vha; + struct edif_sa_ctl *sa_ctl; + uint16_t nport_handle; + unsigned long flags = 0; + + ql_dbg(ql_dbg_edif, vha, 0x3069, + "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n", + __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24); + + /* + * if delete_sa_index is valid then no one has serviced this + * delayed delete + */ + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + + /* + * delete_sa_index is invalidated when we find the new sa_index in + * the incoming data stream. If it is not invalidated then we are + * still looking for the new sa_index because there is no I/O and we + * need to just force the rx delete and move on. Otherwise + * we could get another rekey which will result in an error 66. + */ + if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { + uint16_t delete_sa_index = edif_entry->delete_sa_index; + + edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + nport_handle = edif_entry->handle; + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, + delete_sa_index, 0); + + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n", + __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index, + nport_handle); + + sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; + set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); + qla_post_sa_replace_work(fcport->vha, fcport, + nport_handle, sa_ctl); + + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl not found for delete_sa_index: %d\n", + __func__, edif_entry->delete_sa_index); + } + } else { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + } +} + +/* + * create a new list entry for this nport handle and + * add an sa_update index to the list - called for sa_update + */ +static int qla_edif_list_add_sa_update_index(fc_port_t *fcport, + uint16_t sa_index, uint16_t handle) +{ + struct edif_list_entry *entry; + unsigned long flags = 0; + + /* if the entry exists, then just update the sa_index */ + entry = qla_edif_list_find_sa_index(fcport, handle); + if (entry) { + entry->update_sa_index = sa_index; + entry->count = 0; + return 0; + } + + /* + * This is the normal path - there should be no existing entry + * when update is called. The exception is at startup + * when update is called for the first two sa_indexes + * followed by a delete of the first sa_index + */ + entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + INIT_LIST_HEAD(&entry->next); + entry->handle = handle; + entry->update_sa_index = sa_index; + entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + entry->count = 0; + entry->flags = 0; + timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0); + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_add_tail(&entry->next, &fcport->edif.edif_indx_list); + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return 0; +} + +/* remove an entry from the list */ +static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_del(&entry->next); + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); +} + +int qla_post_sa_replace_work(struct scsi_qla_host *vha, + fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.sa_update.fcport = fcport; + e->u.sa_update.sa_ctl = sa_ctl; + e->u.sa_update.nport_handle = nport_handle; + fcport->flags |= FCF_ASYNC_ACTIVE; + return qla2x00_post_work(vha, e); +} + +static void +qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport) +{ + ql_dbg(ql_dbg_edif, vha, 0x2058, + "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n", + fcport->node_name, fcport->port_name, fcport->d_id.b24); + + fcport->edif.tx_rekey_cnt = 0; + fcport->edif.rx_rekey_cnt = 0; + + fcport->edif.tx_bytes = 0; + fcport->edif.rx_bytes = 0; +} + +static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job, +fc_port_t *fcport) +{ + struct extra_auth_els *p; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_bsg_auth_els_request *req = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + if (!vha->hw->flags.edif_enabled) { + ql_dbg(ql_dbg_edif, vha, 0x9105, + "%s edif not enabled\n", __func__); + goto done; + } + if (DBELL_INACTIVE(vha)) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + goto done; + } + + p = &req->e; + + /* Get response */ + if (p->sub_cmd == PULL_ELS) { + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + qla_pur_get_pending(vha, fcport, bsg_job); + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n", + __func__, sc_to_str(p->sub_cmd), fcport->port_name, + fcport->d_id.b24, rpl->rx_xchg_address, + rpl->r.reply_payload_rcv_len, bsg_job); + + goto done; + } + return 0; + +done: + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return -EIO; +} + +fc_port_t * +qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id) +{ + fc_port_t *f, *tf; + + f = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->d_id.b24 == id->b24) + return f; + } + return NULL; +} + +/** + * qla_edif_app_check(): check for valid application id. + * @vha: host adapter pointer + * @appid: application id + * Return: false = fail, true = pass + */ +static bool +qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid) +{ + /* check that the app is allow/known to the driver */ + + if (appid.app_vid != EDIF_APP_ID) { + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)", + __func__, appid.app_vid); + return false; + } + + if (appid.version != EDIF_VERSION1) { + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)", + __func__, appid.version); + return false; + } + + return true; +} + +static void +qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl, + int index) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + list_del(&sa_ctl->next); + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); + if (index >= 512) + fcport->edif.tx_rekey_cnt--; + else + fcport->edif.rx_rekey_cnt--; + kfree(sa_ctl); +} + +/* return an index to the freepool */ +static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir, + uint16_t sa_index) +{ + void *sa_id_map; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + u16 lsa_index = sa_index; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: entry\n", __func__); + + if (dir) { + sa_id_map = ha->edif_tx_sa_id_map; + lsa_index -= EDIF_TX_SA_INDEX_BASE; + } else { + sa_id_map = ha->edif_rx_sa_id_map; + } + + spin_lock_irqsave(&ha->sadb_fp_lock, flags); + clear_bit(lsa_index, sa_id_map); + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: index %d added to free pool\n", __func__, sa_index); +} + +static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha, + struct fc_port *fcport, struct edif_sa_index_entry *entry, + int pdir) +{ + struct edif_list_entry *edif_entry; + struct edif_sa_ctl *sa_ctl; + int i, dir; + int key_cnt = 0; + + for (i = 0; i < 2; i++) { + if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX) + continue; + + if (fcport->loop_id != entry->handle) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n", + __func__, i, entry->handle, fcport->loop_id, + entry->sa_pair[i].sa_index); + } + + /* release the sa_ctl */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, + entry->sa_pair[i].sa_index, pdir); + if (sa_ctl && + qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index); + qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl); + } + + /* Release the index */ + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, entry->sa_pair[i].sa_index, entry->handle); + + dir = (entry->sa_pair[i].sa_index < + EDIF_TX_SA_INDEX_BASE) ? 0 : 1; + qla_edif_add_sa_index_to_freepool(fcport, dir, + entry->sa_pair[i].sa_index); + + /* Delete timer on RX */ + if (pdir != SAU_FLG_TX) { + edif_entry = + qla_edif_list_find_sa_index(fcport, entry->handle); + if (edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n", + __func__, edif_entry, edif_entry->update_sa_index, + edif_entry->delete_sa_index); + qla_edif_list_delete_sa_index(fcport, edif_entry); + /* + * valid delete_sa_index indicates there is a rx + * delayed delete queued + */ + if (edif_entry->delete_sa_index != + INVALID_EDIF_SA_INDEX) { + timer_shutdown(&edif_entry->timer); + + /* build and send the aen */ + fcport->edif.rx_sa_set = 1; + fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(vha, + VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, fcport); + } + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n", + __func__, edif_entry, edif_entry->update_sa_index, + edif_entry->delete_sa_index); + + kfree(edif_entry); + } + } + key_cnt++; + } + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: %d %s keys released\n", + __func__, key_cnt, pdir ? "tx" : "rx"); +} + +/* find an release all outstanding sadb sa_indicies */ +void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + struct edif_sa_index_entry *entry, *tmp; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: Starting...\n", __func__); + + spin_lock_irqsave(&ha->sadb_lock, flags); + + list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { + if (entry->fcport == fcport) { + list_del(&entry->next); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + __qla2x00_release_all_sadb(vha, fcport, entry, 0); + kfree(entry); + spin_lock_irqsave(&ha->sadb_lock, flags); + break; + } + } + + list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { + if (entry->fcport == fcport) { + list_del(&entry->next); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX); + + kfree(entry); + spin_lock_irqsave(&ha->sadb_lock, flags); + break; + } + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); +} + +/** + * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and + * wait for tear down to complete. In N2N topology, there is only one + * session being active in tracking the remote device. + * @vha: host adapter pointer + * return code: 0 - found the session and completed the tear down. + * 1 - timeout occurred. Caller to use link bounce to reset. + */ +static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha) +{ + struct fc_port *fcport; + int rc = -EIO; + ulong expire = jiffies + 23 * HZ; + + if (!N2N_TOPO(vha->hw)) + return 0; + + fcport = NULL; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!fcport->n2n_flag) + continue; + + ql_dbg(ql_dbg_disc, fcport->vha, 0x2016, + "%s reset sess at app start \n", __func__); + + qla_edif_sa_ctl_init(vha, fcport); + qlt_schedule_sess_for_deletion(fcport); + + while (time_before_eq(jiffies, expire)) { + if (fcport->disc_state != DSC_DELETE_PEND) { + rc = 0; + break; + } + msleep(1); + } + + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + + return rc; +} + +/** + * qla_edif_app_start: application has announce its present + * @vha: host adapter pointer + * @bsg_job: user request + * + * Set/activate doorbell. Reset current sessions and re-login with + * secure flag. + */ +static int +qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct app_start appstart; + struct app_start_reply appreply; + struct fc_port *fcport, *tf; + + ql_log(ql_log_info, vha, 0x1313, + "EDIF application registration with driver, FC device connections will be re-established.\n"); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appstart, + sizeof(struct app_start)); + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n", + __func__, appstart.app_info.app_vid, appstart.app_start_flags); + + if (DBELL_INACTIVE(vha)) { + /* mark doorbell as active since an app is now present */ + vha->e_dbell.db_flags |= EDB_ACTIVE; + } else { + goto out; + } + + if (N2N_TOPO(vha->hw)) { + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) + fcport->n2n_link_reset_cnt = 0; + + if (vha->hw->flags.n2n_fw_acc_sec) { + bool link_bounce = false; + /* + * While authentication app was not running, remote device + * could still try to login with this local port. Let's + * reset the session, reconnect and re-authenticate. + */ + if (qla_delete_n2n_sess_and_wait(vha)) + link_bounce = true; + + /* bounce the link to start login */ + if (!vha->hw->flags.n2n_bigger || link_bounce) { + set_bit(N2N_LINK_RESET, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else { + qla2x00_wait_for_hba_online(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_hba_online(vha); + } + } else { + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + ql_dbg(ql_dbg_edif, vha, 0x2058, + "FCSP - nn %8phN pn %8phN portid=%06x.\n", + fcport->node_name, fcport->port_name, + fcport->d_id.b24); + ql_dbg(ql_dbg_edif, vha, 0xf084, + "%s: se_sess %p / sess %p from port %8phC " + "loop_id %#04x s_id %06x logout %d " + "keep %d els_logo %d disc state %d auth state %d" + "stop state %d\n", + __func__, fcport->se_sess, fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b24, fcport->logout_on_delete, + fcport->keep_nport_handle, fcport->send_els_logo, + fcport->disc_state, fcport->edif.auth_state, + fcport->edif.app_stop); + + if (atomic_read(&vha->loop_state) == LOOP_DOWN) + break; + + fcport->login_retry = vha->hw->login_retry_count; + + fcport->edif.app_stop = 0; + fcport->edif.app_sess_online = 0; + + if (fcport->scan_state != QLA_FCPORT_FOUND) + continue; + + if (fcport->port_type == FCT_UNKNOWN && + !fcport->fc4_features) + rval = qla24xx_async_gffid(vha, fcport, true); + + if (!rval && !(fcport->fc4_features & FC4_FF_TARGET || + fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET))) + continue; + + rval = 0; + + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", + __func__, fcport->port_name); + qlt_schedule_sess_for_deletion(fcport); + qla_edif_sa_ctl_init(vha, fcport); + } + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + /* mark as active since an app is now present */ + vha->pur_cinfo.enode_flags = ENODE_ACTIVE; + } else { + ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n", + __func__); + } + +out: + appreply.host_support_edif = vha->hw->flags.edif_enabled; + appreply.edif_enode_active = vha->pur_cinfo.enode_flags; + appreply.edif_edb_active = vha->e_dbell.db_flags; + appreply.version = EDIF_VERSION1; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + SET_DID_STATUS(bsg_reply->result, DID_OK); + + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &appreply, + sizeof(struct app_start_reply)); + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s app start completed with 0x%x\n", + __func__, rval); + + return rval; +} + +/** + * qla_edif_app_stop - app has announced it's exiting. + * @vha: host adapter pointer + * @bsg_job: user space command pointer + * + * Free any in flight messages, clear all doorbell events + * to application. Reject any message relate to security. + */ +static int +qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + struct app_stop appstop; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct fc_port *fcport, *tf; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appstop, + sizeof(struct app_stop)); + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n", + __func__, appstop.app_info.app_vid); + + /* Call db stop and enode stop functions */ + + /* if we leave this running short waits are operational < 16 secs */ + qla_enode_stop(vha); /* stop enode */ + qla_edb_stop(vha); /* stop db */ + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (!(fcport->flags & FCF_FCSP_DEVICE)) + continue; + + if (fcport->flags & FCF_FCSP_DEVICE) { + ql_dbg(ql_dbg_edif, vha, 0xf084, + "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n", + __func__, fcport, + fcport->port_name, fcport->loop_id, fcport->d_id.b24, + fcport->logout_on_delete, fcport->keep_nport_handle, + fcport->send_els_logo); + + if (atomic_read(&vha->loop_state) == LOOP_DOWN) + break; + + fcport->edif.app_stop = 1; + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", + __func__, fcport->port_name); + + fcport->send_els_logo = 1; + qlt_schedule_sess_for_deletion(fcport); + } + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + /* no return interface to app - it assumes we cleaned up ok */ + + return 0; +} + +static int +qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport, + struct app_plogi_reply *appplogireply) +{ + int ret = 0; + + if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", + __func__, fcport->port_name, fcport->edif.tx_sa_set, + fcport->edif.rx_sa_set); + appplogireply->prli_status = 0; + ret = 1; + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC Both SA(s) updated.\n", __func__, + fcport->port_name); + fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; + appplogireply->prli_status = 1; + } + return ret; +} + +/** + * qla_edif_app_authok - authentication by app succeeded. Driver can proceed + * with prli + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int +qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + struct auth_complete_cmd appplogiok; + struct app_plogi_reply appplogireply = {0}; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + port_id_t portid = {0}; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appplogiok, + sizeof(struct auth_complete_cmd)); + + /* silent unaligned access warning */ + portid.b.domain = appplogiok.u.d_id.b.domain; + portid.b.area = appplogiok.u.d_id.b.area; + portid.b.al_pa = appplogiok.u.d_id.b.al_pa; + + appplogireply.version = EDIF_VERSION1; + switch (appplogiok.type) { + case PL_TYPE_WWPN: + fcport = qla2x00_find_fcport_by_wwpn(vha, + appplogiok.u.wwpn, 0); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s wwpn lookup failed: %8phC\n", + __func__, appplogiok.u.wwpn); + break; + case PL_TYPE_DID: + fcport = qla2x00_find_fcport_by_pid(vha, &portid); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s d_id lookup failed: %x\n", __func__, + portid.b24); + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s undefined type: %x\n", __func__, + appplogiok.type); + break; + } + + if (!fcport) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto errstate_exit; + } + + /* + * if port is online then this is a REKEY operation + * Only do sa update checking + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s Skipping PRLI complete based on rekey\n", __func__); + appplogireply.prli_status = 1; + SET_DID_STATUS(bsg_reply->result, DID_OK); + qla_edif_app_chk_sa_update(vha, fcport, &appplogireply); + goto errstate_exit; + } + + /* make sure in AUTH_PENDING or else reject */ + if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC is not in auth pending state (%x)\n", + __func__, fcport->port_name, fcport->disc_state); + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 0; + goto errstate_exit; + } + + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 1; + fcport->edif.authok = 1; + if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", + __func__, fcport->port_name, fcport->edif.tx_sa_set, + fcport->edif.rx_sa_set); + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 0; + goto errstate_exit; + + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC Both SA(s) updated.\n", __func__, + fcport->port_name); + fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; + } + + if (qla_ini_mode_enabled(vha)) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s AUTH complete - RESUME with prli for wwpn %8phC\n", + __func__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + } + +errstate_exit: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &appplogireply, + sizeof(struct app_plogi_reply)); + + return 0; +} + +/** + * qla_edif_app_authfail - authentication by app has failed. Driver is given + * notice to tear down current session. + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int +qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct auth_complete_cmd appplogifail; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + port_id_t portid = {0}; + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appplogifail, + sizeof(struct auth_complete_cmd)); + + /* silent unaligned access warning */ + portid.b.domain = appplogifail.u.d_id.b.domain; + portid.b.area = appplogifail.u.d_id.b.area; + portid.b.al_pa = appplogifail.u.d_id.b.al_pa; + + /* + * TODO: edif: app has failed this plogi. Inform driver to + * take any action (if any). + */ + switch (appplogifail.type) { + case PL_TYPE_WWPN: + fcport = qla2x00_find_fcport_by_wwpn(vha, + appplogifail.u.wwpn, 0); + SET_DID_STATUS(bsg_reply->result, DID_OK); + break; + case PL_TYPE_DID: + fcport = qla2x00_find_fcport_by_pid(vha, &portid); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s d_id lookup failed: %x\n", __func__, + portid.b24); + SET_DID_STATUS(bsg_reply->result, DID_OK); + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s undefined type: %x\n", __func__, + appplogifail.type); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + break; + } + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s fcport is 0x%p\n", __func__, fcport); + + if (fcport) { + /* set/reset edif values and flags */ + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n", + __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24); + + if (qla_ini_mode_enabled(fcport->vha)) { + fcport->send_els_logo = 1; + qlt_schedule_sess_for_deletion(fcport); + } + } + + return rval; +} + +/** + * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid, + * [initiator|target] mode. It can specific session with specific nport id or + * all sessions. + * @vha: host adapter pointer + * @bsg_job: user request pointer + */ +static int +qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + int32_t pcnt = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct app_pinfo_req app_req; + struct app_pinfo_reply *app_reply; + port_id_t tdid; + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &app_req, + sizeof(struct app_pinfo_req)); + + app_reply = kzalloc((sizeof(struct app_pinfo_reply) + + sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL); + + if (!app_reply) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } else { + struct fc_port *fcport = NULL, *tf; + + app_reply->version = EDIF_VERSION1; + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (!(fcport->flags & FCF_FCSP_DEVICE)) + continue; + + tdid.b.domain = app_req.remote_pid.domain; + tdid.b.area = app_req.remote_pid.area; + tdid.b.al_pa = app_req.remote_pid.al_pa; + + ql_dbg(ql_dbg_edif, vha, 0x2058, + "APP request entry - portid=%06x.\n", tdid.b24); + + /* Ran out of space */ + if (pcnt >= app_req.num_ports) + break; + + if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) + continue; + + if (!N2N_TOPO(vha->hw)) { + if (fcport->scan_state != QLA_FCPORT_FOUND) + continue; + + if (fcport->port_type == FCT_UNKNOWN && + !fcport->fc4_features) + rval = qla24xx_async_gffid(vha, fcport, + true); + + if (!rval && + !(fcport->fc4_features & FC4_FF_TARGET || + fcport->port_type & + (FCT_TARGET | FCT_NVME_TARGET))) + continue; + } + + rval = 0; + + app_reply->ports[pcnt].version = EDIF_VERSION1; + app_reply->ports[pcnt].remote_type = + VND_CMD_RTYPE_UNKNOWN; + if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET)) + app_reply->ports[pcnt].remote_type |= + VND_CMD_RTYPE_TARGET; + if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR)) + app_reply->ports[pcnt].remote_type |= + VND_CMD_RTYPE_INITIATOR; + + app_reply->ports[pcnt].remote_pid = fcport->d_id; + + ql_dbg(ql_dbg_edif, vha, 0x2058, + "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n", + fcport->node_name, fcport->port_name, pcnt, + fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE); + + switch (fcport->edif.auth_state) { + case VND_CMD_AUTH_STATE_ELS_RCVD: + if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) { + fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED; + app_reply->ports[pcnt].auth_state = + VND_CMD_AUTH_STATE_NEEDED; + } else { + app_reply->ports[pcnt].auth_state = + VND_CMD_AUTH_STATE_ELS_RCVD; + } + break; + default: + app_reply->ports[pcnt].auth_state = fcport->edif.auth_state; + break; + } + + memcpy(app_reply->ports[pcnt].remote_wwpn, + fcport->port_name, 8); + + app_reply->ports[pcnt].remote_state = + (atomic_read(&fcport->state) == + FCS_ONLINE ? 1 : 0); + + pcnt++; + + if (tdid.b24 != 0) + break; + } + app_reply->port_count = pcnt; + SET_DID_STATUS(bsg_reply->result, DID_OK); + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + app_reply, + sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt); + + kfree(app_reply); + + return rval; +} + +/** + * qla_edif_app_getstats - app would like to read various statistics info + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int32_t +qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t size; + + struct app_sinfo_req app_req; + struct app_stats_reply *app_reply; + uint32_t pcnt = 0; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &app_req, + sizeof(struct app_sinfo_req)); + if (app_req.num_ports == 0) { + ql_dbg(ql_dbg_async, vha, 0x911d, + "%s app did not indicate number of ports to return\n", + __func__); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } + + size = sizeof(struct app_stats_reply) + + (sizeof(struct app_sinfo) * app_req.num_ports); + + app_reply = kzalloc(size, GFP_KERNEL); + if (!app_reply) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } else { + struct fc_port *fcport = NULL, *tf; + + app_reply->version = EDIF_VERSION1; + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (fcport->edif.enable) { + if (pcnt > app_req.num_ports) + break; + + app_reply->elem[pcnt].rekey_count = + fcport->edif.rekey_cnt; + app_reply->elem[pcnt].tx_bytes = + fcport->edif.tx_bytes; + app_reply->elem[pcnt].rx_bytes = + fcport->edif.rx_bytes; + + memcpy(app_reply->elem[pcnt].remote_wwpn, + fcport->port_name, 8); + + pcnt++; + } + } + app_reply->elem_count = pcnt; + SET_DID_STATUS(bsg_reply->result, DID_OK); + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, app_reply, + sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt)); + + kfree(app_reply); + + return rval; +} + +static int32_t +qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + struct fc_port *fcport; + struct aen_complete_cmd ack; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &ack, sizeof(ack)); + + ql_dbg(ql_dbg_edif, vha, 0x70cf, + "%s: %06x event_code %x\n", + __func__, ack.port_id.b24, ack.event_code); + + fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x70cf, + "%s: unable to find fcport %06x \n", + __func__, ack.port_id.b24); + return 0; + } + + switch (ack.event_code) { + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + fcport->edif.sess_down_acked = 1; + break; + default: + break; + } + return 0; +} + +static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + u32 sg_skip, reply_payload_len; + bool keep; + struct edb_node *dbnode = NULL; + struct edif_app_dbell ap; + int dat_size = 0; + + sg_skip = 0; + reply_payload_len = bsg_job->reply_payload.payload_len; + + while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) { + dbnode = qla_edb_getnext(vha); + if (dbnode) { + keep = true; + dat_size = 0; + ap.event_code = dbnode->ntype; + switch (dbnode->ntype) { + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + case VND_CMD_AUTH_STATE_NEEDED: + ap.port_id = dbnode->u.plogi_did; + dat_size += sizeof(ap.port_id); + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + ap.port_id = dbnode->u.els_sid; + dat_size += sizeof(ap.port_id); + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + ap.port_id = dbnode->u.sa_aen.port_id; + memcpy(&ap.event_data, &dbnode->u, + sizeof(struct edif_sa_update_aen)); + dat_size += sizeof(struct edif_sa_update_aen); + break; + default: + keep = false; + ql_log(ql_log_warn, vha, 0x09102, + "%s unknown DB type=%d %p\n", + __func__, dbnode->ntype, dbnode); + break; + } + ap.event_data_size = dat_size; + /* 8 = sizeof(ap.event_code + ap.event_data_size) */ + dat_size += 8; + if (keep) + sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &ap, dat_size, sg_skip, false); + + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s Doorbell consumed : type=%d %p\n", + __func__, dbnode->ntype, dbnode); + + kfree(dbnode); + } else { + break; + } + } + + SET_DID_STATUS(bsg_reply->result, DID_OK); + bsg_reply->reply_payload_rcv_len = sg_skip; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + return 0; +} + +static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job, + u32 delay) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + + /* small sleep for doorbell events to accumulate */ + if (delay) + msleep(delay); + + qla_edif_consume_dbell(vha, bsg_job); + + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); +} + +static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct bsg_job *prev_bsg_job = NULL; + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + if (vha->e_dbell.dbell_bsg_job) { + prev_bsg_job = vha->e_dbell.dbell_bsg_job; + vha->e_dbell.dbell_bsg_job = NULL; + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + if (prev_bsg_job) + __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0); +} + +static int +qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + unsigned long flags; + bool return_bsg = false; + + /* flush previous dbell bsg */ + qla_edif_dbell_bsg_done(vha); + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) { + /* + * when the next db event happens, bsg_job will return. + * Otherwise, timer will return it. + */ + vha->e_dbell.dbell_bsg_job = bsg_job; + vha->e_dbell.bsg_expire = jiffies + 10 * HZ; + } else { + return_bsg = true; + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + if (return_bsg) + __qla_edif_dbell_bsg_done(vha, bsg_job, 1); + + return 0; +} + +int32_t +qla_edif_app_mgmt(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct app_id appcheck; + bool done = true; + int32_t rval = 0; + uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + u32 level = ql_dbg_edif; + + /* doorbell is high traffic */ + if (vnd_sc == QL_VND_SC_READ_DBELL) + level = 0; + + ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n", + __func__, vnd_sc); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appcheck, + sizeof(struct app_id)); + + if (!vha->hw->flags.edif_enabled || + test_bit(VPORT_DELETE, &vha->dpc_flags)) { + ql_dbg(level, vha, 0x911d, + "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n", + __func__, bsg_job, vha->dpc_flags); + + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + if (!qla_edif_app_check(vha, appcheck)) { + ql_dbg(level, vha, 0x911d, + "%s app checked failed.\n", + __func__); + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + switch (vnd_sc) { + case QL_VND_SC_SA_UPDATE: + done = false; + rval = qla24xx_sadb_update(bsg_job); + break; + case QL_VND_SC_APP_START: + rval = qla_edif_app_start(vha, bsg_job); + break; + case QL_VND_SC_APP_STOP: + rval = qla_edif_app_stop(vha, bsg_job); + break; + case QL_VND_SC_AUTH_OK: + rval = qla_edif_app_authok(vha, bsg_job); + break; + case QL_VND_SC_AUTH_FAIL: + rval = qla_edif_app_authfail(vha, bsg_job); + break; + case QL_VND_SC_GET_FCINFO: + rval = qla_edif_app_getfcinfo(vha, bsg_job); + break; + case QL_VND_SC_GET_STATS: + rval = qla_edif_app_getstats(vha, bsg_job); + break; + case QL_VND_SC_AEN_COMPLETE: + rval = qla_edif_ack(vha, bsg_job); + break; + case QL_VND_SC_READ_DBELL: + rval = qla_edif_dbell_bsg(vha, bsg_job); + done = false; + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n", + __func__, + bsg_request->rqst_data.h_vendor.vendor_cmd[1]); + rval = EXT_STATUS_INVALID_PARAM; + done = false; + break; + } + +done: + if (done) { + ql_dbg(level, vha, 0x7009, + "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + + return rval; +} + +static struct edif_sa_ctl * +qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame, + int dir) +{ + struct edif_sa_ctl *sa_ctl; + struct qla_sa_update_frame *sap; + int index = sa_frame->fast_sa_index; + unsigned long flags = 0; + + sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL); + if (!sa_ctl) { + /* couldn't get space */ + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "unable to allocate SA CTL\n"); + return NULL; + } + + /* + * need to allocate sa_index here and save it + * in both sa_ctl->index and sa_frame->fast_sa_index; + * If alloc fails then delete sa_ctl and return NULL + */ + INIT_LIST_HEAD(&sa_ctl->next); + sap = &sa_ctl->sa_frame; + *sap = *sa_frame; + sa_ctl->index = index; + sa_ctl->fcport = fcport; + sa_ctl->flags = 0; + sa_ctl->state = 0L; + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Added sa_ctl %p, index %d, state 0x%lx\n", + __func__, sa_ctl, sa_ctl->index, sa_ctl->state); + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + if (dir == SAU_FLG_TX) + list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list); + else + list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list); + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); + + return sa_ctl; +} + +void +qla_edif_flush_sa_ctl_lists(fc_port_t *fcport) +{ + struct edif_sa_ctl *sa_ctl, *tsa_ctl; + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + + list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list, + next) { + list_del(&sa_ctl->next); + kfree(sa_ctl); + } + + list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list, + next) { + list_del(&sa_ctl->next); + kfree(sa_ctl); + } + + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); +} + +struct edif_sa_ctl * +qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir) +{ + struct edif_sa_ctl *sa_ctl, *tsa_ctl; + struct list_head *sa_list; + + if (dir == SAU_FLG_TX) + sa_list = &fcport->edif.tx_sa_list; + else + sa_list = &fcport->edif.rx_sa_list; + + list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) { + if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) && + sa_ctl->index == index) + return sa_ctl; + } + return NULL; +} + +/* add the sa to the correct list */ +static int +qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame) +{ + struct edif_sa_ctl *sa_ctl = NULL; + int dir; + uint16_t sa_index; + + dir = (sa_frame->flags & SAU_FLG_TX); + + /* map the spi to an sa_index */ + sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame); + if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) { + /* process rx delete */ + ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, + "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n", + __func__, fcport->loop_id, sa_frame->spi); + + /* build and send the aen */ + fcport->edif.rx_sa_set = 1; + fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(fcport->vha, + VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, fcport); + + /* force a return of good bsg status; */ + return RX_DELETE_NO_EDIF_SA_INDEX; + } else if (sa_index == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Failed to get sa_index for spi 0x%x, dir: %d\n", + __func__, sa_frame->spi, dir); + return INVALID_EDIF_SA_INDEX; + } + + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n", + __func__, sa_index, sa_frame->spi, dir, fcport->loop_id); + + /* This is a local copy of sa_frame. */ + sa_frame->fast_sa_index = sa_index; + /* create the sa_ctl */ + sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir); + if (!sa_ctl) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n", + __func__, sa_frame->spi, dir, sa_index); + return -1; + } + + set_bit(EDIF_SA_CTL_USED, &sa_ctl->state); + + if (dir == SAU_FLG_TX) + fcport->edif.tx_rekey_cnt++; + else + fcport->edif.rx_rekey_cnt++; + + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n", + __func__, sa_ctl, sa_ctl->index, sa_ctl->state, + fcport->edif.tx_rekey_cnt, + fcport->edif.rx_rekey_cnt, fcport->loop_id); + + return 0; +} + +#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0 +#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2 +#define EDIF_MSLEEP_INTERVAL 100 +#define EDIF_RETRY_COUNT 50 + +int +qla24xx_sadb_update(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = NULL; + srb_t *sp = NULL; + struct edif_list_entry *edif_entry = NULL; + int found = 0; + int rval = 0; + int result = 0, cnt; + struct qla_sa_update_frame sa_frame; + struct srb_iocb *iocb_cmd; + port_id_t portid; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, + "%s entered, vha: 0x%p\n", __func__, vha); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sa_frame, + sizeof(struct qla_sa_update_frame)); + + /* Check if host is online */ + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n"); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + if (DBELL_INACTIVE(vha)) { + ql_log(ql_log_warn, vha, 0x70a1, "App not started\n"); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + /* silent unaligned access warning */ + portid.b.domain = sa_frame.port_id.b.domain; + portid.b.area = sa_frame.port_id.b.area; + portid.b.al_pa = sa_frame.port_id.b.al_pa; + + fcport = qla2x00_find_fcport_by_pid(vha, &portid); + if (fcport) { + found = 1; + if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY) + fcport->edif.tx_bytes = 0; + if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY) + fcport->edif.rx_bytes = 0; + } + + if (!found) { + ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n", + sa_frame.port_id.b24); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT); + goto done; + } + + /* make sure the nport_handle is valid */ + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n", + __func__, fcport->port_name, sa_frame.spi, + fcport->disc_state); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT); + goto done; + } + + /* allocate and queue an sa_ctl */ + result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame); + + /* failure of bsg */ + if (result == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN, skipping update.\n", + __func__, fcport->port_name); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + + /* rx delete failure */ + } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN, skipping rx delete.\n", + __func__, fcport->port_name); + SET_DID_STATUS(bsg_reply->result, DID_OK); + goto done; + } + + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN, sa_index in sa_frame: %d flags %xh\n", + __func__, fcport->port_name, sa_frame.fast_sa_index, + sa_frame.flags); + + /* looking for rx index and delete */ + if (((sa_frame.flags & SAU_FLG_TX) == 0) && + (sa_frame.flags & SAU_FLG_INV)) { + uint16_t nport_handle = fcport->loop_id; + uint16_t sa_index = sa_frame.fast_sa_index; + + /* + * make sure we have an existing rx key, otherwise just process + * this as a straight delete just like TX + * This is NOT a normal case, it indicates an error recovery or key cleanup + * by the ipsec code above us. + */ + edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id); + if (!edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n", + __func__, fcport->loop_id, sa_index); + goto force_rx_delete; + } + + /* + * if we have a forced delete for rx, remove the sa_index from the edif list + * and proceed with normal delete. The rx delay timer should not be running + */ + if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) { + qla_edif_list_delete_sa_index(fcport, edif_entry); + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n", + __func__, fcport->loop_id, sa_index); + kfree(edif_entry); + goto force_rx_delete; + } + + /* + * delayed rx delete + * + * if delete_sa_index is not invalid then there is already + * a delayed index in progress, return bsg bad status + */ + if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { + struct edif_sa_ctl *sa_ctl; + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: delete for lid 0x%x, delete_sa_index %d is pending\n", + __func__, edif_entry->handle, edif_entry->delete_sa_index); + + /* free up the sa_ctl that was allocated with the sa_index */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index, + (sa_frame.flags & SAU_FLG_TX)); + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_ctl for index %d\n", + __func__, sa_ctl->index); + qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); + } + + /* release the sa_index */ + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, sa_index, nport_handle); + qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index); + + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + fcport->edif.rekey_cnt++; + + /* configure and start the rx delay timer */ + edif_entry->fcport = fcport; + edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ; + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n", + __func__, edif_entry, sa_index, nport_handle); + + /* + * Start the timer when we queue the delayed rx delete. + * This is an activity timer that goes off if we have not + * received packets with the new sa_index + */ + add_timer(&edif_entry->timer); + + /* + * sa_delete for rx key with an active rx key including this one + * add the delete rx sa index to the hash so we can look for it + * in the rsp queue. Do this after making any changes to the + * edif_entry as part of the rx delete. + */ + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n", + __func__, sa_index, nport_handle, bsg_job); + + edif_entry->delete_sa_index = sa_index; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + + goto done; + + /* + * rx index and update + * add the index to the list and continue with normal update + */ + } else if (((sa_frame.flags & SAU_FLG_TX) == 0) && + ((sa_frame.flags & SAU_FLG_INV) == 0)) { + /* sa_update for rx key */ + uint32_t nport_handle = fcport->loop_id; + uint16_t sa_index = sa_frame.fast_sa_index; + int result; + + /* + * add the update rx sa index to the hash so we can look for it + * in the rsp queue and continue normally + */ + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: adding update sa_index %d, lid 0x%x to edif_list\n", + __func__, sa_index, nport_handle); + + result = qla_edif_list_add_sa_update_index(fcport, sa_index, + nport_handle); + if (result) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n", + __func__, sa_index, nport_handle); + } + } + if (sa_frame.flags & SAU_FLG_GMAC_MODE) + fcport->edif.aes_gmac = 1; + else + fcport->edif.aes_gmac = 0; + +force_rx_delete: + /* + * sa_update for both rx and tx keys, sa_delete for tx key + * immediately process the request + */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + sp->type = SRB_SA_UPDATE; + sp->name = "bsg_sa_update"; + sp->u.bsg_job = bsg_job; + /* sp->free = qla2x00_bsg_sp_free; */ + sp->free = qla2x00_rel_sp; + sp->done = qla2x00_bsg_job_done; + iocb_cmd = &sp->u.iocb_cmd; + iocb_cmd->u.sa_update.sa_frame = sa_frame; + cnt = 0; +retry: + rval = qla2x00_start_sp(sp); + switch (rval) { + case QLA_SUCCESS: + break; + case EAGAIN: + msleep(EDIF_MSLEEP_INTERVAL); + cnt++; + if (cnt < EDIF_RETRY_COUNT) + goto retry; + + fallthrough; + default: + ql_log(ql_dbg_edif, vha, 0x70e3, + "%s qla2x00_start_sp failed=%d.\n", + __func__, rval); + + qla2x00_rel_sp(sp); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: %s sent, hdl=%x, portid=%06x.\n", + __func__, sp->name, sp->handle, fcport->d_id.b24); + + fcport->edif.rekey_cnt++; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + return 0; + +/* + * send back error status + */ +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n", + __func__, bsg_reply->result, bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static void +qla_enode_free(scsi_qla_host_t *vha, struct enode *node) +{ + node->ntype = N_UNDEF; + kfree(node); +} + +/** + * qla_enode_init - initialize enode structs & lock + * @vha: host adapter pointer + * + * should only be called when driver attaching + */ +void +qla_enode_init(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + char name[32]; + + if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) { + /* list still active - error */ + ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n", + __func__); + return; + } + + /* initialize lock which protects pur_core & init list */ + spin_lock_init(&vha->pur_cinfo.pur_lock); + INIT_LIST_HEAD(&vha->pur_cinfo.head); + + snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME, + ha->pdev->device); +} + +/** + * qla_enode_stop - stop and clear and enode data + * @vha: host adapter pointer + * + * called when app notified it is exiting + */ +void +qla_enode_stop(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct enode *node, *q; + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s enode not active\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + + vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */ + + /* hopefully this is a null list at this point */ + list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) { + ql_dbg(ql_dbg_edif, vha, 0x910f, + "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype, + node->dinfo.nodecnt); + list_del_init(&node->list); + qla_enode_free(vha, node); + } + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); +} + +static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct enode *e, *tmp; + struct purexevent *purex; + LIST_HEAD(enode_list); + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s enode not active\n", __func__); + return; + } + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) { + purex = &e->u.purexinfo; + if (purex->pur_info.pur_sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s free ELS sid=%06x. xchg %x, nb=%xh\n", + __func__, portid.b24, + purex->pur_info.pur_rx_xchg_address, + purex->pur_info.pur_bytes_rcvd); + + list_del_init(&e->list); + list_add_tail(&e->list, &enode_list); + } + } + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + list_for_each_entry_safe(e, tmp, &enode_list, list) { + list_del_init(&e->list); + qla_enode_free(vha, e); + } +} + +/* + * allocate enode struct and populate buffer + * returns: enode pointer with buffers + * NULL on error + */ +static struct enode * +qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype) +{ + struct enode *node; + struct purexevent *purex; + + node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC); + if (!node) + return NULL; + + purex = &node->u.purexinfo; + purex->msgp = (u8 *)(node + 1); + purex->msgp_len = ELS_MAX_PAYLOAD; + + node->ntype = ntype; + INIT_LIST_HEAD(&node->list); + return node; +} + +static void +qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr) +{ + unsigned long flags; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109, + "%s add enode for type=%x, cnt=%x\n", + __func__, ptr->ntype, ptr->dinfo.nodecnt); + + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + list_add_tail(&ptr->list, &vha->pur_cinfo.head); + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + return; +} + +static struct enode * +qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2) +{ + struct enode *node_rtn = NULL; + struct enode *list_node, *q; + unsigned long flags; + uint32_t sid; + struct purexevent *purex; + + /* secure the list from moving under us */ + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + + list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) { + + /* node type determines what p1 and p2 are */ + purex = &list_node->u.purexinfo; + sid = p1; + + if (purex->pur_info.pur_sid.b24 == sid) { + /* found it and its complete */ + node_rtn = list_node; + list_del(&list_node->list); + break; + } + } + + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + return node_rtn; +} + +/** + * qla_pur_get_pending - read/return authentication message sent + * from remote port + * @vha: host adapter pointer + * @fcport: session pointer + * @bsg_job: user request where the message is copy to. + */ +static int +qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport, + struct bsg_job *bsg_job) +{ + struct enode *ptr; + struct purexevent *purex; + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + bsg_job->reply_len = sizeof(*rpl); + + ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET); + if (!ptr) { + ql_dbg(ql_dbg_edif, vha, 0x9111, + "%s no enode data found for %8phN sid=%06x\n", + __func__, fcport->port_name, fcport->d_id.b24); + SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY); + return -EIO; + } + + /* + * enode is now off the linked list and is ours to deal with + */ + purex = &ptr->u.purexinfo; + + /* Copy info back to caller */ + rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address; + + SET_DID_STATUS(rpl->r.result, DID_OK); + rpl->r.reply_payload_rcv_len = + sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, purex->msgp, + purex->pur_info.pur_bytes_rcvd, 0); + + /* data copy / passback completed - destroy enode */ + qla_enode_free(vha, ptr); + + return 0; +} + +/* it is assume qpair lock is held */ +static int +qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, + struct qla_els_pt_arg *a) +{ + struct els_entry_24xx *els_iocb; + + els_iocb = __qla2x00_alloc_iocbs(qp, NULL); + if (!els_iocb) { + ql_log(ql_log_warn, vha, 0x700c, + "qla2x00_alloc_iocbs failed.\n"); + return QLA_FUNCTION_FAILED; + } + + qla_els_pt_iocb(vha, els_iocb, a); + + ql_dbg(ql_dbg_edif, vha, 0x0183, + "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n", + a->ox_id, a->sid.b24, a->did.b24); + ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185, + vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c)); + /* flush iocb to mem before notifying hw doorbell */ + wmb(); + qla2x00_start_iocbs(vha, qp->req); + return 0; +} + +void +qla_edb_init(scsi_qla_host_t *vha) +{ + if (DBELL_ACTIVE(vha)) { + /* list already init'd - error */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "edif db already initialized, cannot reinit\n"); + return; + } + + /* initialize lock which protects doorbell & init list */ + spin_lock_init(&vha->e_dbell.db_lock); + INIT_LIST_HEAD(&vha->e_dbell.head); +} + +static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct edb_node *e, *tmp; + port_id_t sid; + LIST_HEAD(edb_list); + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) { + switch (e->ntype) { + case VND_CMD_AUTH_STATE_NEEDED: + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + sid = e->u.plogi_did; + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + sid = e->u.els_sid; + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + /* app wants to see this */ + continue; + default: + ql_log(ql_log_warn, vha, 0x09102, + "%s unknown node type: %x\n", __func__, e->ntype); + sid.b24 = 0; + break; + } + if (sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x910f, + "%s free doorbell event : node type = %x %p\n", + __func__, e->ntype, e); + list_del_init(&e->list); + list_add_tail(&e->list, &edb_list); + } + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + list_for_each_entry_safe(e, tmp, &edb_list, list) + qla_edb_node_free(vha, e); +} + +/* function called when app is stopping */ + +void +qla_edb_stop(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct edb_node *node, *q; + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + + vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */ + /* hopefully this is a null list at this point */ + list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) { + ql_dbg(ql_dbg_edif, vha, 0x910f, + "%s freeing edb_node type=%x\n", + __func__, node->ntype); + qla_edb_node_free(vha, node); + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + qla_edif_dbell_bsg_done(vha); +} + +static struct edb_node * +qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype) +{ + struct edb_node *node; + + node = kzalloc(sizeof(*node), GFP_ATOMIC); + if (!node) { + /* couldn't get space */ + ql_dbg(ql_dbg_edif, vha, 0x9100, + "edb node unable to be allocated\n"); + return NULL; + } + + node->ntype = ntype; + INIT_LIST_HEAD(&node->list); + return node; +} + +/* adds a already allocated enode to the linked list */ +static bool +qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr) +{ + unsigned long flags; + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + return false; + } + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + list_add_tail(&ptr->list, &vha->e_dbell.head); + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + return true; +} + +/* adds event to doorbell list */ +void +qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, + uint32_t data, uint32_t data2, fc_port_t *sfcport) +{ + struct edb_node *edbnode; + fc_port_t *fcport = sfcport; + port_id_t id; + + if (!vha->hw->flags.edif_enabled) { + /* edif not enabled */ + return; + } + + if (DBELL_INACTIVE(vha)) { + if (fcport) + fcport->edif.auth_state = dbtype; + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled (type=%d\n", __func__, dbtype); + return; + } + + edbnode = qla_edb_node_alloc(vha, dbtype); + if (!edbnode) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s unable to alloc db node\n", __func__); + return; + } + + if (!fcport) { + id.b.domain = (data >> 16) & 0xff; + id.b.area = (data >> 8) & 0xff; + id.b.al_pa = data & 0xff; + ql_dbg(ql_dbg_edif, vha, 0x09222, + "%s: Arrived s_id: %06x\n", __func__, + id.b24); + fcport = qla2x00_find_fcport_by_pid(vha, &id); + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s can't find fcport for sid= 0x%x - ignoring\n", + __func__, id.b24); + kfree(edbnode); + return; + } + } + + /* populate the edb node */ + switch (dbtype) { + case VND_CMD_AUTH_STATE_NEEDED: + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + edbnode->u.plogi_did.b24 = fcport->d_id.b24; + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + edbnode->u.els_sid.b24 = fcport->d_id.b24; + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + edbnode->u.sa_aen.port_id = fcport->d_id; + edbnode->u.sa_aen.status = data; + edbnode->u.sa_aen.key_type = data2; + edbnode->u.sa_aen.version = EDIF_VERSION1; + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s unknown type: %x\n", __func__, dbtype); + kfree(edbnode); + edbnode = NULL; + break; + } + + if (edbnode) { + if (!qla_edb_node_add(vha, edbnode)) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s unable to add dbnode\n", __func__); + kfree(edbnode); + return; + } + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode); + qla_edif_dbell_bsg_done(vha); + if (fcport) + fcport->edif.auth_state = dbtype; + } +} + +void +qla_edif_timer(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) { + if (DBELL_INACTIVE(vha) && + ha->edif_post_stop_cnt_down) { + ha->edif_post_stop_cnt_down--; + + /* + * turn off auto 'Plogi Acc + secure=1' feature + * Set Add FW option[3] + * BIT_15, if. + */ + if (ha->edif_post_stop_cnt_down == 0) { + ql_dbg(ql_dbg_async, vha, 0x911d, + "%s chip reset to turn off PLOGI ACC + secure\n", + __func__); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + } else { + ha->edif_post_stop_cnt_down = 60; + } + } + + if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire)) + qla_edif_dbell_bsg_done(vha); +} + +static void qla_noop_sp_done(srb_t *sp, int res) +{ + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +/* + * Called from work queue + * build and send the sa_update iocb to delete an rx sa_index + */ +int +qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e) +{ + srb_t *sp; + fc_port_t *fcport = NULL; + struct srb_iocb *iocb_cmd = NULL; + int rval = QLA_SUCCESS; + struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl; + uint16_t nport_handle = e->u.sa_update.nport_handle; + + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "%s: starting, sa_ctl: %p\n", __func__, sa_ctl); + + if (!sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "sa_ctl allocation failed\n"); + rval = -ENOMEM; + return rval; + } + + fcport = sa_ctl->fcport; + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "SRB allocation failed\n"); + rval = -ENOMEM; + goto done; + } + + fcport->flags |= FCF_ASYNC_SENT; + iocb_cmd = &sp->u.iocb_cmd; + iocb_cmd->u.sa_update.sa_ctl = sa_ctl; + + ql_dbg(ql_dbg_edif, vha, 0x3073, + "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n", + fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle); + /* + * if this is a sadb cleanup delete, mark it so the isr can + * take the correct action + */ + if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) { + /* mark this srb as a cleanup delete */ + sp->flags |= SRB_EDIF_CLEANUP_DELETE; + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp); + } + + sp->type = SRB_SA_REPLACE; + sp->name = "SA_REPLACE"; + sp->fcport = fcport; + sp->free = qla2x00_rel_sp; + sp->done = qla_noop_sp_done; + + rval = qla2x00_start_sp(sp); + + if (rval != QLA_SUCCESS) { + goto done_free_sp; + } + + return rval; +done_free_sp: + kref_put(&sp->cmd_kref, qla2x00_sp_release); + fcport->flags &= ~FCF_ASYNC_SENT; +done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; +} + +void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) +{ + int itr = 0; + struct scsi_qla_host *vha = sp->vha; + struct qla_sa_update_frame *sa_frame = + &sp->u.iocb_cmd.u.sa_update.sa_frame; + u8 flags = 0; + + switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) { + case 0: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + break; + case 1: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_INVALIDATE; + break; + case 2: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_TX; + break; + case 3: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE; + break; + } + + sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; + sa_update_iocb->entry_count = 1; + sa_update_iocb->sys_define = 0; + sa_update_iocb->entry_status = 0; + sa_update_iocb->handle = sp->handle; + sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id); + sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; + sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; + sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; + + sa_update_iocb->flags = flags; + sa_update_iocb->salt = cpu_to_le32(sa_frame->salt); + sa_update_iocb->spi = cpu_to_le32(sa_frame->spi); + sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index); + + sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP; + if (sp->fcport->edif.aes_gmac) + sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC; + + if (sa_frame->flags & SAU_FLG_KEY256) { + sa_update_iocb->sa_control |= SA_CNTL_KEY256; + for (itr = 0; itr < 32; itr++) + sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; + } else { + sa_update_iocb->sa_control |= SA_CNTL_KEY128; + for (itr = 0; itr < 16; itr++) + sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; + } + + ql_dbg(ql_dbg_edif, vha, 0x921d, + "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n", + __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], + sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index, + sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle, + sp->fcport->edif.aes_gmac); + + if (sa_frame->flags & SAU_FLG_TX) + sp->fcport->edif.tx_sa_pending = 1; + else + sp->fcport->edif.rx_sa_pending = 1; + + sp->fcport->vha->qla_stats.control_requests++; +} + +void +qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) +{ + struct scsi_qla_host *vha = sp->vha; + struct srb_iocb *srb_iocb = &sp->u.iocb_cmd; + struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl; + uint16_t nport_handle = sp->fcport->loop_id; + + sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; + sa_update_iocb->entry_count = 1; + sa_update_iocb->sys_define = 0; + sa_update_iocb->entry_status = 0; + sa_update_iocb->handle = sp->handle; + + sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle); + + sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; + sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; + sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; + + /* Invalidate the index. salt, spi, control & key are ignore */ + sa_update_iocb->flags = SA_FLAG_INVALIDATE; + sa_update_iocb->salt = 0; + sa_update_iocb->spi = 0; + sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index); + sa_update_iocb->sa_control = 0; + + ql_dbg(ql_dbg_edif, vha, 0x921d, + "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n", + __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], + sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags, + sa_update_iocb->sa_index, sp->handle); + + sp->fcport->vha->qla_stats.control_requests++; +} + +void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) +{ + struct purex_entry_24xx *p = *pkt; + struct enode *ptr; + int sid; + u16 totlen; + struct purexevent *purex; + struct scsi_qla_host *host = NULL; + int rc; + struct fc_port *fcport; + struct qla_els_pt_arg a; + be_id_t beid; + + memset(&a, 0, sizeof(a)); + + a.els_opcode = ELS_AUTH_ELS; + a.nport_handle = p->nport_handle; + a.rx_xchg_address = p->rx_xchg_addr; + a.did.b.domain = p->s_id[2]; + a.did.b.area = p->s_id[1]; + a.did.b.al_pa = p->s_id[0]; + a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt); + a.tx_addr = vha->hw->elsrej.cdma; + a.vp_idx = vha->vp_idx; + a.control_flags = EPD_ELS_RJT; + a.ox_id = le16_to_cpu(p->ox_id); + + sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16); + + totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE; + if (le16_to_cpu(p->status_flags) & 0x8000) { + totlen = le16_to_cpu(p->trunc_frame_size); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + if (totlen > ELS_MAX_PAYLOAD) { + ql_dbg(ql_dbg_edif, vha, 0x0910d, + "%s WARNING: verbose ELS frame received (totlen=%x)\n", + __func__, totlen); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + if (!vha->hw->flags.edif_enabled) { + /* edif support not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n", + __func__); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + ptr = qla_enode_alloc(vha, N_PUREX); + if (!ptr) { + ql_dbg(ql_dbg_edif, vha, 0x09109, + "WARNING: enode alloc failed for sid=%x\n", + sid); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + purex = &ptr->u.purexinfo; + purex->pur_info.pur_sid = a.did; + purex->pur_info.pur_bytes_rcvd = totlen; + purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr); + purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle); + purex->pur_info.pur_did.b.domain = p->d_id[2]; + purex->pur_info.pur_did.b.area = p->d_id[1]; + purex->pur_info.pur_did.b.al_pa = p->d_id[0]; + purex->pur_info.vp_idx = p->vp_idx; + + a.sid = purex->pur_info.pur_did; + + rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp, + purex->msgp_len); + if (rc) { + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + qla_enode_free(vha, ptr); + return; + } + beid.al_pa = purex->pur_info.pur_did.b.al_pa; + beid.area = purex->pur_info.pur_did.b.area; + beid.domain = purex->pur_info.pur_did.b.domain; + host = qla_find_host_by_d_id(vha, beid); + if (!host) { + ql_log(ql_log_fatal, vha, 0x508b, + "%s Drop ELS due to unable to find host %06x\n", + __func__, purex->pur_info.pur_did.b24); + + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + qla_enode_free(vha, ptr); + return; + } + + fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid); + + if (DBELL_INACTIVE(vha)) { + ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n", + __func__, host->e_dbell.db_flags, + fcport ? fcport->d_id.b24 : 0); + + qla_els_reject_iocb(host, (*rsp)->qpair, &a); + qla_enode_free(host, ptr); + return; + } + + if (fcport && EDIF_SESSION_DOWN(fcport)) { + ql_dbg(ql_dbg_edif, host, 0x13b6, + "%s terminate exchange. Send logo to 0x%x\n", + __func__, a.did.b24); + + a.tx_byte_count = a.tx_len = 0; + a.tx_addr = 0; + a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */ + qla_els_reject_iocb(host, (*rsp)->qpair, &a); + qla_enode_free(host, ptr); + /* send logo to let remote port knows to tear down session */ + fcport->send_els_logo = 1; + qlt_schedule_sess_for_deletion(fcport); + return; + } + + /* add the local enode to the list */ + qla_enode_add(host, ptr); + + ql_dbg(ql_dbg_edif, host, 0x0910c, + "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n", + __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24, + purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address); + + qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL); +} + +static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + void *sa_id_map; + unsigned long flags = 0; + u16 sa_index; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: entry\n", __func__); + + if (dir) + sa_id_map = ha->edif_tx_sa_id_map; + else + sa_id_map = ha->edif_rx_sa_id_map; + + spin_lock_irqsave(&ha->sadb_fp_lock, flags); + sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX); + if (sa_index >= EDIF_NUM_SA_INDEX) { + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + return INVALID_EDIF_SA_INDEX; + } + set_bit(sa_index, sa_id_map); + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + + if (dir) + sa_index += EDIF_TX_SA_INDEX_BASE; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: index retrieved from free pool %d\n", __func__, sa_index); + + return sa_index; +} + +/* find an sadb entry for an nport_handle */ +static struct edif_sa_index_entry * +qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, + struct list_head *sa_list) +{ + struct edif_sa_index_entry *entry; + struct edif_sa_index_entry *tentry; + struct list_head *indx_list = sa_list; + + list_for_each_entry_safe(entry, tentry, indx_list, next) { + if (entry->handle == nport_handle) + return entry; + } + return NULL; +} + +/* remove an sa_index from the nport_handle and return it to the free pool */ +static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, + uint16_t sa_index) +{ + struct edif_sa_index_entry *entry; + struct list_head *sa_list; + int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1; + int slot = 0; + int free_slot_count = 0; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: entry\n", __func__); + + if (dir) + sa_list = &ha->sadb_tx_index_list; + else + sa_list = &ha->sadb_rx_index_list; + + entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); + if (!entry) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: no entry found for nport_handle 0x%x\n", + __func__, nport_handle); + return -1; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + /* + * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic + * the other is use at re-key time. + */ + for (slot = 0; slot < 2; slot++) { + if (entry->sa_pair[slot].sa_index == sa_index) { + entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX; + entry->sa_pair[slot].spi = 0; + free_slot_count++; + qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index); + } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { + free_slot_count++; + } + } + + if (free_slot_count == 2) { + list_del(&entry->next); + kfree(entry); + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_index %d removed, free_slot_count: %d\n", + __func__, sa_index, free_slot_count); + + return 0; +} + +void +qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req, + struct sa_update_28xx *pkt) +{ + const char *func = "SA_UPDATE_RESPONSE_IOCB"; + srb_t *sp; + struct edif_sa_ctl *sa_ctl; + int old_sa_deleted = 1; + uint16_t nport_handle; + struct scsi_qla_host *vha; + + sp = qla2x00_get_sp_from_handle(v, func, req, pkt); + + if (!sp) { + ql_dbg(ql_dbg_edif, v, 0x3063, + "%s: no sp found for pkt\n", __func__); + return; + } + /* use sp->vha due to npiv */ + vha = sp->vha; + + switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) { + case 0: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + case 1: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + case 2: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + case 3: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + } + + /* + * dig the nport handle out of the iocb, fcport->loop_id can not be trusted + * to be correct during cleanup sa_update iocbs. + */ + nport_handle = sp->fcport->loop_id; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n", + __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info, + nport_handle, pkt->sa_index, pkt->flags, sp->handle); + + /* if rx delete, remove the timer */ + if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) { + struct edif_list_entry *edif_entry; + + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle); + if (edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: removing edif_entry %p, new sa_index: 0x%x\n", + __func__, edif_entry, pkt->sa_index); + qla_edif_list_delete_sa_index(sp->fcport, edif_entry); + timer_shutdown(&edif_entry->timer); + + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: releasing edif_entry %p, new sa_index: 0x%x\n", + __func__, edif_entry, pkt->sa_index); + + kfree(edif_entry); + } + } + + /* + * if this is a delete for either tx or rx, make sure it succeeded. + * The new_sa_info field should be 0xffff on success + */ + if (pkt->flags & SA_FLAG_INVALIDATE) + old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0; + + /* Process update and delete the same way */ + + /* If this is an sadb cleanup delete, bypass sending events to IPSEC */ + if (sp->flags & SRB_EDIF_CLEANUP_DELETE) { + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: nph 0x%x, sa_index %d removed from fw\n", + __func__, sp->fcport->loop_id, pkt->sa_index); + + } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) && + old_sa_deleted) { + /* + * Note: Wa are only keeping track of latest SA, + * so we know when we can start enableing encryption per I/O. + * If all SA's get deleted, let FW reject the IOCB. + + * TODO: edif: don't set enabled here I think + * TODO: edif: prli complete is where it should be set + */ + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "SA(%x)updated for s_id %02x%02x%02x\n", + pkt->new_sa_info, + pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); + sp->fcport->edif.enable = 1; + if (pkt->flags & SA_FLAG_TX) { + sp->fcport->edif.tx_sa_set = 1; + sp->fcport->edif.tx_sa_pending = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_TX_SA_KEY, sp->fcport); + } else { + sp->fcport->edif.rx_sa_set = 1; + sp->fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, sp->fcport); + } + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n", + __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info, + pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); + + if (pkt->flags & SA_FLAG_TX) + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, + QL_VND_TX_SA_KEY, sp->fcport); + else + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, + QL_VND_RX_SA_KEY, sp->fcport); + } + + /* for delete, release sa_ctl, sa_index */ + if (pkt->flags & SA_FLAG_INVALIDATE) { + /* release the sa_ctl */ + sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport, + le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX)); + if (sa_ctl && + qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index, + (pkt->flags & SA_FLAG_TX)) != NULL) { + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: freeing sa_ctl for index %d\n", + __func__, sa_ctl->index); + qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index); + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl NOT freed, sa_ctl: %p\n", + __func__, sa_ctl); + } + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, le16_to_cpu(pkt->sa_index), nport_handle); + qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, + le16_to_cpu(pkt->sa_index)); + /* + * check for a failed sa_update and remove + * the sadb entry. + */ + } else if (pkt->u.comp_sts) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, pkt->sa_index, nport_handle); + qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, + le16_to_cpu(pkt->sa_index)); + switch (le16_to_cpu(pkt->u.comp_sts)) { + case CS_PORT_EDIF_UNAVAIL: + case CS_PORT_EDIF_LOGOUT: + qlt_schedule_sess_for_deletion(sp->fcport); + break; + default: + break; + } + } + + sp->done(sp, 0); +} + +/** + * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP + * @sp: command to send to the ISP + * + * Return: non-zero if a failure occurred, else zero. + */ +int +qla28xx_start_scsi_edif(srb_t *sp) +{ + int nseg; + unsigned long flags; + struct scsi_cmnd *cmd; + uint32_t *clr_ptr; + uint32_t index, i; + uint32_t handle; + uint16_t cnt; + int16_t req_cnt; + uint16_t tot_dsds; + __be32 *fcp_dl; + uint8_t additional_cdb_len; + struct ct6_dsd *ctx; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_6 *cmd_pkt; + struct dsd64 *cur_dsd; + uint8_t avail_dsds = 0; + struct scatterlist *sg; + struct req_que *req = sp->qpair->req; + spinlock_t *lock = sp->qpair->qp_lock_ptr; + + /* Setup device pointers. */ + cmd = GET_CMD_SP(sp); + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x300c, + "qla2x00_marker failed for cmd=%p.\n", cmd); + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(lock, flags); + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else { + nseg = 0; + } + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + if (qla_get_fw_resources(sp->qpair, &sp->iores)) + goto queuing_error; + + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + rd_reg_dword(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + if (qla_get_buf(vha, sp->qpair, &sp->u.scmd.buf_dsc)) { + ql_log(ql_log_fatal, vha, 0x3011, + "Failed to allocate buf for fcp_cmnd for cmd=%p.\n", cmd); + goto queuing_error; + } + + sp->flags |= SRB_GOT_BUF; + ctx = &sp->u.scmd.ct6_ctx; + ctx->fcp_cmnd = sp->u.scmd.buf_dsc.buf; + ctx->fcp_cmnd_dma = sp->u.scmd.buf_dsc.buf_dma; + + if (cmd->cmd_len > 16) { + additional_cdb_len = cmd->cmd_len - 16; + if ((cmd->cmd_len % 4) != 0) { + /* + * SCSI command bigger than 16 bytes must be + * multiple of 4 + */ + ql_log(ql_log_warn, vha, 0x3012, + "scsi cmd len %d not multiple of 4 for cmd=%p.\n", + cmd->cmd_len, cmd); + goto queuing_error_fcp_cmnd; + } + ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; + } else { + additional_cdb_len = 0; + ctx->fcp_cmnd_len = 12 + 16 + 4; + } + + cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* + * Zero out remaining portion of packet. + * tagged queuing modifier -- default is TSK_SIMPLE (0). + */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + goto no_dsds; + } + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; + sp->fcport->edif.tx_bytes += scsi_bufflen(cmd); + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; + sp->fcport->edif.rx_bytes += scsi_bufflen(cmd); + } + + cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); + cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA)); + + /* One DSD is available in the Command Type 6 IOCB */ + avail_dsds = 1; + cur_dsd = &cmd_pkt->fcp_dsd; + + /* Load data segments */ + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + dma_addr_t sle_dma; + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + } + + sle_dma = sg_dma_address(sg); + put_unaligned_le64(sle_dma, &cur_dsd->address); + cur_dsd->length = cpu_to_le32(sg_dma_len(sg)); + cur_dsd++; + avail_dsds--; + } + +no_dsds: + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->vha->vp_idx; + + cmd_pkt->entry_type = COMMAND_TYPE_6; + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* build FCP_CMND IU */ + int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 2; + + /* Populate the FCP_PRIO. */ + if (ha->flags.fcp_prio_enabled) + ctx->fcp_cmnd->task_attribute |= + sp->fcport->fcp_prio << 3; + + memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); + + fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + + additional_cdb_len); + *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); + + cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); + put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address); + + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + cmd_pkt->entry_status = 0; + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + /* Adjust ring index. */ + wmb(); + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + sp->qpair->cmd_cnt++; + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + spin_unlock_irqrestore(lock, flags); + + return QLA_SUCCESS; + +queuing_error_fcp_cmnd: +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(lock, flags); + + return QLA_FUNCTION_FAILED; +} + +/********************************************** + * edif update/delete sa_index list functions * + **********************************************/ + +/* clear the edif_indx_list for this port */ +void qla_edif_list_del(fc_port_t *fcport) +{ + struct edif_list_entry *indx_lst; + struct edif_list_entry *tindx_lst; + struct list_head *indx_list = &fcport->edif.edif_indx_list; + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) { + list_del(&indx_lst->next); + kfree(indx_lst); + } + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); +} + +/****************** + * SADB functions * + ******************/ + +/* allocate/retrieve an sa_index for a given spi */ +static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame) +{ + struct edif_sa_index_entry *entry; + struct list_head *sa_list; + uint16_t sa_index; + int dir = sa_frame->flags & SAU_FLG_TX; + int slot = 0; + int free_slot = -1; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + uint16_t nport_handle = fcport->loop_id; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: entry fc_port: %p, nport_handle: 0x%x\n", + __func__, fcport, nport_handle); + + if (dir) + sa_list = &ha->sadb_tx_index_list; + else + sa_list = &ha->sadb_rx_index_list; + + entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); + if (!entry) { + if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: rx delete request with no entry\n", __func__); + return RX_DELETE_NO_EDIF_SA_INDEX; + } + + /* if there is no entry for this nport, add one */ + entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC); + if (!entry) + return INVALID_EDIF_SA_INDEX; + + sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); + if (sa_index == INVALID_EDIF_SA_INDEX) { + kfree(entry); + return INVALID_EDIF_SA_INDEX; + } + + INIT_LIST_HEAD(&entry->next); + entry->handle = nport_handle; + entry->fcport = fcport; + entry->sa_pair[0].spi = sa_frame->spi; + entry->sa_pair[0].sa_index = sa_index; + entry->sa_pair[1].spi = 0; + entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX; + spin_lock_irqsave(&ha->sadb_lock, flags); + list_add_tail(&entry->next, sa_list); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n", + __func__, nport_handle, sa_frame->spi, sa_index); + + return sa_index; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + + /* see if we already have an entry for this spi */ + for (slot = 0; slot < 2; slot++) { + if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { + free_slot = slot; + } else { + if (entry->sa_pair[slot].spi == sa_frame->spi) { + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n", + __func__, slot, entry->handle, sa_frame->spi, + entry->sa_pair[slot].sa_index); + return entry->sa_pair[slot].sa_index; + } + } + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + /* both slots are used */ + if (free_slot == -1) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n", + __func__, entry->handle, sa_frame->spi); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n", + __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index, + entry->sa_pair[1].spi, entry->sa_pair[1].sa_index); + + return INVALID_EDIF_SA_INDEX; + } + + /* there is at least one free slot, use it */ + sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); + if (sa_index == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, + "%s: empty freepool!!\n", __func__); + return INVALID_EDIF_SA_INDEX; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + entry->sa_pair[free_slot].spi = sa_frame->spi; + entry->sa_pair[free_slot].sa_index = sa_index; + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, + "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n", + __func__, free_slot, entry->handle, sa_frame->spi, sa_index); + + return sa_index; +} + +/* release any sadb entries -- only done at teardown */ +void qla_edif_sadb_release(struct qla_hw_data *ha) +{ + struct edif_sa_index_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { + list_del(&entry->next); + kfree(entry); + } + + list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { + list_del(&entry->next); + kfree(entry); + } +} + +/************************** + * sadb freepool functions + **************************/ + +/* build the rx and tx sa_index free pools -- only done at fcport init */ +int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha) +{ + ha->edif_tx_sa_id_map = + kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL); + + if (!ha->edif_tx_sa_id_map) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0009, + "Unable to allocate memory for sadb tx.\n"); + return -ENOMEM; + } + + ha->edif_rx_sa_id_map = + kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL); + if (!ha->edif_rx_sa_id_map) { + kfree(ha->edif_tx_sa_id_map); + ha->edif_tx_sa_id_map = NULL; + ql_log_pci(ql_log_fatal, ha->pdev, 0x0009, + "Unable to allocate memory for sadb rx.\n"); + return -ENOMEM; + } + return 0; +} + +/* release the free pool - only done during fcport teardown */ +void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha) +{ + kfree(ha->edif_tx_sa_id_map); + ha->edif_tx_sa_id_map = NULL; + kfree(ha->edif_rx_sa_id_map); + ha->edif_rx_sa_id_map = NULL; +} + +static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + fc_port_t *fcport, uint32_t handle, uint16_t sa_index) +{ + struct edif_list_entry *edif_entry; + struct edif_sa_ctl *sa_ctl; + uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX; + unsigned long flags = 0; + uint16_t nport_handle = fcport->loop_id; + uint16_t cached_nport_handle; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle); + if (!edif_entry) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; /* no pending delete for this handle */ + } + + /* + * check for no pending delete for this index or iocb does not + * match rx sa_index + */ + if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX || + edif_entry->update_sa_index != sa_index) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; + } + + /* + * wait until we have seen at least EDIF_DELAY_COUNT transfers before + * queueing RX delete + */ + if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; + } + + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n", + __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index); + + delete_sa_index = edif_entry->delete_sa_index; + edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + cached_nport_handle = edif_entry->handle; + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + + /* sanity check on the nport handle */ + if (nport_handle != cached_nport_handle) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n", + __func__, nport_handle, cached_nport_handle); + } + + /* find the sa_ctl for the delete and schedule the delete */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0); + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n", + __func__, sa_ctl, sa_index); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n", + delete_sa_index, + edif_entry->update_sa_index, nport_handle, handle); + + sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; + set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); + qla_post_sa_replace_work(fcport->vha, fcport, + nport_handle, sa_ctl); + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n", + __func__, delete_sa_index); + } +} + +void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + srb_t *sp, struct sts_entry_24xx *sts24) +{ + fc_port_t *fcport = sp->fcport; + /* sa_index used by this iocb */ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + uint32_t handle; + + handle = (uint32_t)LSW(sts24->handle); + + /* find out if this status iosb is for a scsi read */ + if (cmd->sc_data_direction != DMA_FROM_DEVICE) + return; + + return __chk_edif_rx_sa_delete_pending(vha, fcport, handle, + le16_to_cpu(sts24->edif_sa_index)); +} + +void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, + struct ctio7_from_24xx *pkt) +{ + __chk_edif_rx_sa_delete_pending(vha, fcport, + pkt->handle, le16_to_cpu(pkt->edif_sa_index)); +} + +static void qla_parse_auth_els_ctl(struct srb *sp) +{ + struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg; + struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job; + struct fc_bsg_request *request = bsg_job->request; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + a->tx_len = a->tx_byte_count = sp->remap.req.len; + a->tx_addr = sp->remap.req.dma; + a->rx_len = a->rx_byte_count = sp->remap.rsp.len; + a->rx_addr = sp->remap.rsp.dma; + + if (p->e.sub_cmd == SEND_ELS_REPLY) { + a->control_flags = p->e.extra_control_flags << 13; + a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address); + if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC) + a->els_opcode = ELS_LS_ACC; + else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT) + a->els_opcode = ELS_LS_RJT; + } + a->did = sp->fcport->d_id; + a->els_opcode = request->rqst_data.h_els.command_code; + a->nport_handle = cpu_to_le16(sp->fcport->loop_id); + a->vp_idx = sp->vha->vp_idx; +} + +int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + int rval = (DID_ERROR << 16), cnt; + port_id_t d_id; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + rpl->version = EDIF_VERSION1; + + d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2]; + d_id.b.area = bsg_request->rqst_data.h_els.port_id[1]; + d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0]; + + /* find matching d_id in fcport list */ + fcport = qla2x00_find_fcport_by_pid(vha, &d_id); + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x911a, + "%s fcport not find online portid=%06x.\n", + __func__, d_id.b24); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + return -EIO; + } + + if (qla_bsg_check(vha, bsg_job, fcport)) + return 0; + + if (EDIF_SESS_DELETE(fcport)) { + ql_dbg(ql_dbg_edif, vha, 0x910d, + "%s ELS code %x, no loop id.\n", __func__, + bsg_request->rqst_data.r_els.els_code); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + return -ENXIO; + } + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + rval = -EIO; + goto done; + } + + /* pass through is supported only for ISP 4Gb or higher */ + if (!IS_FWI2_CAPABLE(ha)) { + ql_dbg(ql_dbg_user, vha, 0x7001, + "ELS passthru not supported for ISP23xx based adapters.\n"); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + rval = -EPERM; + goto done; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_dbg(ql_dbg_user, vha, 0x7004, + "Failed get sp pid=%06x\n", fcport->d_id.b24); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + sp->remap.req.len = bsg_job->request_payload.payload_len; + sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool, + GFP_KERNEL, &sp->remap.req.dma); + if (!sp->remap.req.buf) { + ql_dbg(ql_dbg_user, vha, 0x7005, + "Failed allocate request dma len=%x\n", + bsg_job->request_payload.payload_len); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done_free_sp; + } + + sp->remap.rsp.len = bsg_job->reply_payload.payload_len; + sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool, + GFP_KERNEL, &sp->remap.rsp.dma); + if (!sp->remap.rsp.buf) { + ql_dbg(ql_dbg_user, vha, 0x7006, + "Failed allocate response dma len=%x\n", + bsg_job->reply_payload.payload_len); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done_free_remap_req; + } + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sp->remap.req.buf, + sp->remap.req.len); + sp->remap.remapped = true; + + sp->type = SRB_ELS_CMD_HST_NOLOGIN; + sp->name = "SPCN_BSG_HST_NOLOGIN"; + sp->u.bsg_cmd.bsg_job = bsg_job; + qla_parse_auth_els_ctl(sp); + + sp->free = qla2x00_bsg_sp_free; + sp->done = qla2x00_bsg_job_done; + + cnt = 0; +retry: + rval = qla2x00_start_sp(sp); + switch (rval) { + case QLA_SUCCESS: + ql_dbg(ql_dbg_edif, vha, 0x700a, + "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n", + __func__, sc_to_str(p->e.sub_cmd), fcport->port_name, + p->e.extra_rx_xchg_address, p->e.extra_control_flags, + sp->handle, sp->remap.req.len, bsg_job); + break; + case EAGAIN: + msleep(EDIF_MSLEEP_INTERVAL); + cnt++; + if (cnt < EDIF_RETRY_COUNT) + goto retry; + fallthrough; + default: + ql_log(ql_log_warn, vha, 0x700e, + "%s qla2x00_start_sp failed = %d\n", __func__, rval); + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + rval = -EIO; + goto done_free_remap_rsp; + } + return rval; + +done_free_remap_rsp: + dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, + sp->remap.rsp.dma); +done_free_remap_req: + dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, + sp->remap.req.dma); +done_free_sp: + qla2x00_rel_sp(sp); + +done: + return rval; +} + +void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) +{ + u16 cnt = 0; + + if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) { + ql_dbg(ql_dbg_disc, vha, 0xf09c, + "%s: sess %8phN send port_offline event\n", + __func__, sess->port_name); + sess->edif.app_sess_online = 0; + sess->edif.sess_down_acked = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN, + sess->d_id.b24, 0, sess); + qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24); + + while (!READ_ONCE(sess->edif.sess_down_acked) && + !test_bit(VPORT_DELETE, &vha->dpc_flags)) { + msleep(100); + cnt++; + if (cnt > 100) + break; + } + sess->edif.sess_down_acked = 0; + ql_dbg(ql_dbg_disc, vha, 0xf09c, + "%s: sess %8phN port_offline event completed\n", + __func__, sess->port_name); + } +} + +void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + if (!(fcport->flags & FCF_FCSP_DEVICE)) + return; + + qla_edb_clear(vha, fcport->d_id); + qla_enode_clear(vha, fcport->d_id); +} diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h new file mode 100644 index 000000000..aa566cdb7 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell Fibre Channel HBA Driver + * Copyright (c) 2021 Marvell + */ +#ifndef __QLA_EDIF_H +#define __QLA_EDIF_H + +struct qla_scsi_host; +#define EDIF_APP_ID 0x73730001 + +#define EDIF_MAX_INDEX 2048 +struct edif_sa_ctl { + struct list_head next; + uint16_t del_index; + uint16_t index; + uint16_t slot; + uint16_t flags; +#define EDIF_SA_CTL_FLG_REPL BIT_0 +#define EDIF_SA_CTL_FLG_DEL BIT_1 +#define EDIF_SA_CTL_FLG_CLEANUP_DEL BIT_4 + // Invalidate Index bit and mirrors QLA_SA_UPDATE_FLAGS_DELETE + unsigned long state; +#define EDIF_SA_CTL_USED 1 /* Active Sa update */ +#define EDIF_SA_CTL_PEND 2 /* Waiting for slot */ +#define EDIF_SA_CTL_REPL 3 /* Active Replace and Delete */ +#define EDIF_SA_CTL_DEL 4 /* Delete Pending */ + struct fc_port *fcport; + struct bsg_job *bsg_job; + struct qla_sa_update_frame sa_frame; +}; + +enum enode_flags_t { + ENODE_ACTIVE = 0x1, +}; + +struct pur_core { + enum enode_flags_t enode_flags; + spinlock_t pur_lock; + struct list_head head; +}; + +enum db_flags_t { + EDB_ACTIVE = BIT_0, +}; + +#define DBELL_ACTIVE(_v) (_v->e_dbell.db_flags & EDB_ACTIVE) +#define DBELL_INACTIVE(_v) (!(_v->e_dbell.db_flags & EDB_ACTIVE)) + +struct edif_dbell { + enum db_flags_t db_flags; + spinlock_t db_lock; + struct list_head head; + struct bsg_job *dbell_bsg_job; + unsigned long bsg_expire; +}; + +#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */ +struct sa_update_28xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* IOCB System handle. */ + + union { + __le16 nport_handle; /* in: N_PORT handle. */ + __le16 comp_sts; /* out: completion status */ +#define CS_PORT_EDIF_UNAVAIL 0x28 +#define CS_PORT_EDIF_LOGOUT 0x29 +#define CS_PORT_EDIF_SUPP_NOT_RDY 0x64 +#define CS_PORT_EDIF_INV_REQ 0x66 + } u; + uint8_t vp_index; + uint8_t reserved_1; + uint8_t port_id[3]; + uint8_t flags; +#define SA_FLAG_INVALIDATE BIT_0 +#define SA_FLAG_TX BIT_1 // 1=tx, 0=rx + + uint8_t sa_key[32]; /* 256 bit key */ + __le32 salt; + __le32 spi; + uint8_t sa_control; +#define SA_CNTL_ENC_FCSP (1 << 3) +#define SA_CNTL_ENC_OPD (2 << 3) +#define SA_CNTL_ENC_MSK (3 << 3) // mask bits 4,3 +#define SA_CNTL_AES_GMAC (1 << 2) +#define SA_CNTL_KEY256 (2 << 0) +#define SA_CNTL_KEY128 0 + + uint8_t reserved_2; + __le16 sa_index; // reserve: bit 11-15 + __le16 old_sa_info; + __le16 new_sa_info; +}; + +#define NUM_ENTRIES 256 +#define PUR_GET 1 + +struct dinfo { + int nodecnt; + int lstate; +}; + +struct pur_ninfo { + port_id_t pur_sid; + port_id_t pur_did; + uint8_t vp_idx; + short pur_bytes_rcvd; + unsigned short pur_nphdl; + unsigned int pur_rx_xchg_address; +}; + +struct purexevent { + struct pur_ninfo pur_info; + unsigned char *msgp; + u32 msgp_len; +}; + +#define N_UNDEF 0 +#define N_PUREX 1 +struct enode { + struct list_head list; + struct dinfo dinfo; + uint32_t ntype; + union { + struct purexevent purexinfo; + } u; +}; + +#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES)) + +#define EDIF_SESSION_DOWN(_s) \ + (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \ + _s->disc_state == DSC_DELETED || \ + !_s->edif.app_sess_online)) + +#define EDIF_NEGOTIATION_PENDING(_fcport) \ + (DBELL_ACTIVE(_fcport->vha) && \ + (_fcport->disc_state == DSC_LOGIN_AUTH_PEND)) + +#define EDIF_SESS_DELETE(_s) \ + (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \ + _s->disc_state == DSC_DELETED)) + +#define EDIF_CAP(_ha) (ql2xsecenable && IS_QLA28XX(_ha)) + +#endif /* __QLA_EDIF_H */ diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h new file mode 100644 index 000000000..514c265ba --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell Fibre Channel HBA Driver + * Copyright (C) 2018- Marvell + * + */ +#ifndef __QLA_EDIF_BSG_H +#define __QLA_EDIF_BSG_H + +#define EDIF_VERSION1 1 + +/* BSG Vendor specific commands */ +#define ELS_MAX_PAYLOAD 2112 +#ifndef WWN_SIZE +#define WWN_SIZE 8 +#endif +#define VND_CMD_APP_RESERVED_SIZE 28 +#define VND_CMD_PAD_SIZE 3 +enum auth_els_sub_cmd { + SEND_ELS = 0, + SEND_ELS_REPLY, + PULL_ELS, +}; + +struct extra_auth_els { + enum auth_els_sub_cmd sub_cmd; + uint32_t extra_rx_xchg_address; + uint8_t extra_control_flags; +#define BSG_CTL_FLAG_INIT 0 +#define BSG_CTL_FLAG_LS_ACC 1 +#define BSG_CTL_FLAG_LS_RJT 2 +#define BSG_CTL_FLAG_TRM 3 + uint8_t version; + uint8_t pad[2]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct qla_bsg_auth_els_request { + struct fc_bsg_request r; + struct extra_auth_els e; +}; + +struct qla_bsg_auth_els_reply { + struct fc_bsg_reply r; + uint32_t rx_xchg_address; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +}; + +struct app_id { + int app_vid; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_start_reply { + uint32_t host_support_edif; + uint32_t edif_enode_active; + uint32_t edif_edb_active; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_start { + struct app_id app_info; + uint8_t app_start_flags; + uint8_t version; + uint8_t pad[2]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_stop { + struct app_id app_info; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_plogi_reply { + uint32_t prli_status; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_pinfo_req { + struct app_id app_info; + uint8_t num_ports; + struct { +#ifdef __BIG_ENDIAN + uint8_t domain; + uint8_t area; + uint8_t al_pa; +#elif defined(__LITTLE_ENDIAN) + uint8_t al_pa; + uint8_t area; + uint8_t domain; +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" +#endif + uint8_t rsvd_1; + } remote_pid; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_pinfo { + port_id_t remote_pid; + uint8_t remote_wwpn[WWN_SIZE]; + uint8_t remote_type; +#define VND_CMD_RTYPE_UNKNOWN 0 +#define VND_CMD_RTYPE_TARGET 1 +#define VND_CMD_RTYPE_INITIATOR 2 + uint8_t remote_state; + uint8_t auth_state; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +/* AUTH States */ +#define VND_CMD_AUTH_STATE_UNDEF 0 +#define VND_CMD_AUTH_STATE_SESSION_SHUTDOWN 1 +#define VND_CMD_AUTH_STATE_NEEDED 2 +#define VND_CMD_AUTH_STATE_ELS_RCVD 3 +#define VND_CMD_AUTH_STATE_SAUPDATE_COMPL 4 + +struct app_pinfo_reply { + uint8_t port_count; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; + struct app_pinfo ports[]; +} __packed; + +struct app_sinfo_req { + struct app_id app_info; + uint8_t num_ports; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_sinfo { + uint8_t remote_wwpn[WWN_SIZE]; + int64_t rekey_count; + uint8_t rekey_mode; + int64_t tx_bytes; + int64_t rx_bytes; +} __packed; + +struct app_stats_reply { + uint8_t elem_count; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; + struct app_sinfo elem[]; +} __packed; + +struct qla_sa_update_frame { + struct app_id app_info; + uint16_t flags; +#define SAU_FLG_INV 0x01 /* delete key */ +#define SAU_FLG_TX 0x02 /* 1=tx, 0 = rx */ +#define SAU_FLG_FORCE_DELETE 0x08 +#define SAU_FLG_GMAC_MODE 0x20 /* + * GMAC mode is cleartext for the IO + * (i.e. NULL encryption) + */ +#define SAU_FLG_KEY128 0x40 +#define SAU_FLG_KEY256 0x80 + uint16_t fast_sa_index:10, + reserved:6; + uint32_t salt; + uint32_t spi; + uint8_t sa_key[32]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + port_id_t port_id; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved2[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +#define QL_VND_SC_UNDEF 0 +#define QL_VND_SC_SA_UPDATE 1 +#define QL_VND_SC_APP_START 2 +#define QL_VND_SC_APP_STOP 3 +#define QL_VND_SC_AUTH_OK 4 +#define QL_VND_SC_AUTH_FAIL 5 +#define QL_VND_SC_REKEY_CONFIG 6 +#define QL_VND_SC_GET_FCINFO 7 +#define QL_VND_SC_GET_STATS 8 +#define QL_VND_SC_AEN_COMPLETE 9 +#define QL_VND_SC_READ_DBELL 10 + +/* + * bsg caller to provide empty buffer for doorbell events. + * + * sg_io_v4.din_xferp = empty buffer for door bell events + * sg_io_v4.dout_xferp = struct edif_read_dbell *buf + */ +struct edif_read_dbell { + struct app_id app_info; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +}; + + +/* Application interface data structure for rtn data */ +#define EXT_DEF_EVENT_DATA_SIZE 64 +struct edif_app_dbell { + uint32_t event_code; + uint32_t event_data_size; + union { + port_id_t port_id; + uint8_t event_data[EXT_DEF_EVENT_DATA_SIZE]; + }; +} __packed; + +struct edif_sa_update_aen { + port_id_t port_id; + uint32_t key_type; /* Tx (1) or RX (2) */ + uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */ + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +#define QL_VND_SA_STAT_SUCCESS 0 +#define QL_VND_SA_STAT_FAILED 1 +#define QL_VND_SA_STAT_TIMEOUT 2 +#define QL_VND_SA_STAT_ERROR 3 + +#define QL_VND_RX_SA_KEY 1 +#define QL_VND_TX_SA_KEY 2 + +/* App defines for plogi auth'd ok and plogi auth bad requests */ +struct auth_complete_cmd { + struct app_id app_info; +#define PL_TYPE_WWPN 1 +#define PL_TYPE_DID 2 + uint32_t type; + union { + uint8_t wwpn[WWN_SIZE]; + port_id_t d_id; + } u; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct aen_complete_cmd { + struct app_id app_info; + port_id_t port_id; + uint32_t event_code; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +#define RX_DELAY_DELETE_TIMEOUT 20 + +#define FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN 1 + +#endif /* QLA_EDIF_BSG_H */ diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h new file mode 100644 index 000000000..f307beed9 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -0,0 +1,2287 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#ifndef __QLA_FW_H +#define __QLA_FW_H + +#include +#include + +#include "qla_dsd.h" + +#define MBS_CHECKSUM_ERROR 0x4010 +#define MBS_INVALID_PRODUCT_KEY 0x4020 + +/* + * Firmware Options. + */ +#define FO1_ENABLE_PUREX BIT_10 +#define FO1_DISABLE_LED_CTRL BIT_6 +#define FO1_ENABLE_8016 BIT_0 +#define FO2_ENABLE_SEL_CLASS2 BIT_5 +#define FO3_NO_ABTS_ON_LINKDOWN BIT_14 +#define FO3_HOLD_STS_IOCB BIT_12 + +/* + * Port Database structure definition for ISP 24xx. + */ +#define PDO_FORCE_ADISC BIT_1 +#define PDO_FORCE_PLOGI BIT_0 + +struct buffer_credit_24xx { + u32 parameter[28]; +}; + +#define PORT_DATABASE_24XX_SIZE 64 +struct port_database_24xx { + uint16_t flags; +#define PDF_TASK_RETRY_ID BIT_14 +#define PDF_FC_TAPE BIT_7 +#define PDF_ACK0_CAPABLE BIT_6 +#define PDF_FCP2_CONF BIT_5 +#define PDF_CLASS_2 BIT_4 +#define PDF_HARD_ADDR BIT_1 + + /* + * for NVMe, the login_state field has been + * split into nibbles. + * The lower nibble is for FCP. + * The upper nibble is for NVMe. + */ + uint8_t current_login_state; + uint8_t last_login_state; +#define PDS_PLOGI_PENDING 0x03 +#define PDS_PLOGI_COMPLETE 0x04 +#define PDS_PRLI_PENDING 0x05 +#define PDS_PRLI_COMPLETE 0x06 +#define PDS_PORT_UNAVAILABLE 0x07 +#define PDS_PRLO_PENDING 0x09 +#define PDS_LOGO_PENDING 0x11 +#define PDS_PRLI2_PENDING 0x12 + + uint8_t hard_address[3]; + uint8_t reserved_1; + + uint8_t port_id[3]; + uint8_t sequence_id; + + uint16_t port_timer; + + uint16_t nport_handle; /* N_PORT handle. */ + + uint16_t receive_data_size; + uint16_t reserved_2; + + uint8_t prli_svc_param_word_0[2]; /* Big endian */ + /* Bits 15-0 of word 0 */ + uint8_t prli_svc_param_word_3[2]; /* Big endian */ + /* Bits 15-0 of word 3 */ + + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + + uint8_t reserved_3[2]; + uint16_t nvme_first_burst_size; + uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */ + uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */ + uint8_t secure_login; + uint8_t reserved_4[14]; +}; + +/* + * MB 75h returns a list of DB entries similar to port_database_24xx(64B). + * However, in this case it returns 1st 40 bytes. + */ +struct get_name_list_extended { + __le16 flags; + u8 current_login_state; + u8 last_login_state; + u8 hard_address[3]; + u8 reserved_1; + u8 port_id[3]; + u8 sequence_id; + __le16 port_timer; + __le16 nport_handle; /* N_PORT handle. */ + __le16 receive_data_size; + __le16 reserved_2; + + /* PRLI SVC Param are Big endian */ + u8 prli_svc_param_word_0[2]; /* Bits 15-0 of word 0 */ + u8 prli_svc_param_word_3[2]; /* Bits 15-0 of word 3 */ + u8 port_name[WWN_SIZE]; + u8 node_name[WWN_SIZE]; +}; + +/* MB 75h: This is the short version of the database */ +struct get_name_list { + u8 port_node_name[WWN_SIZE]; /* B7 most sig, B0 least sig */ + __le16 nport_handle; + u8 reserved; +}; + +struct vp_database_24xx { + uint16_t vp_status; + uint8_t options; + uint8_t id; + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + uint16_t port_id_low; + uint16_t port_id_high; +}; + +struct nvram_24xx { + /* NVRAM header. */ + uint8_t id[4]; + __le16 nvram_version; + uint16_t reserved_0; + + /* Firmware Initialization Control Block. */ + __le16 version; + uint16_t reserved_1; + __le16 frame_payload_size; + __le16 execution_throttle; + __le16 exchange_count; + __le16 hard_address; + + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + + __le16 login_retry_count; + __le16 link_down_on_nos; + __le16 interrupt_delay_timer; + __le16 login_timeout; + + __le32 firmware_options_1; + __le32 firmware_options_2; + __le32 firmware_options_3; + + /* Offset 56. */ + + /* + * BIT 0 = Control Enable + * BIT 1-15 = + * + * BIT 0-7 = Reserved + * BIT 8-10 = Output Swing 1G + * BIT 11-13 = Output Emphasis 1G + * BIT 14-15 = Reserved + * + * BIT 0-7 = Reserved + * BIT 8-10 = Output Swing 2G + * BIT 11-13 = Output Emphasis 2G + * BIT 14-15 = Reserved + * + * BIT 0-7 = Reserved + * BIT 8-10 = Output Swing 4G + * BIT 11-13 = Output Emphasis 4G + * BIT 14-15 = Reserved + */ + __le16 seriallink_options[4]; + + uint16_t reserved_2[16]; + + /* Offset 96. */ + uint16_t reserved_3[16]; + + /* PCIe table entries. */ + uint16_t reserved_4[16]; + + /* Offset 160. */ + uint16_t reserved_5[16]; + + /* Offset 192. */ + uint16_t reserved_6[16]; + + /* Offset 224. */ + uint16_t reserved_7[16]; + + /* + * BIT 0 = Enable spinup delay + * BIT 1 = Disable BIOS + * BIT 2 = Enable Memory Map BIOS + * BIT 3 = Enable Selectable Boot + * BIT 4 = Disable RISC code load + * BIT 5 = Disable Serdes + * BIT 6 = + * BIT 7 = + * + * BIT 8 = + * BIT 9 = + * BIT 10 = Enable lip full login + * BIT 11 = Enable target reset + * BIT 12 = + * BIT 13 = + * BIT 14 = + * BIT 15 = Enable alternate WWN + * + * BIT 16-31 = + */ + __le32 host_p; + + uint8_t alternate_port_name[WWN_SIZE]; + uint8_t alternate_node_name[WWN_SIZE]; + + uint8_t boot_port_name[WWN_SIZE]; + __le16 boot_lun_number; + uint16_t reserved_8; + + uint8_t alt1_boot_port_name[WWN_SIZE]; + __le16 alt1_boot_lun_number; + uint16_t reserved_9; + + uint8_t alt2_boot_port_name[WWN_SIZE]; + __le16 alt2_boot_lun_number; + uint16_t reserved_10; + + uint8_t alt3_boot_port_name[WWN_SIZE]; + __le16 alt3_boot_lun_number; + uint16_t reserved_11; + + /* + * BIT 0 = Selective Login + * BIT 1 = Alt-Boot Enable + * BIT 2 = Reserved + * BIT 3 = Boot Order List + * BIT 4 = Reserved + * BIT 5 = Selective LUN + * BIT 6 = Reserved + * BIT 7-31 = + */ + __le32 efi_parameters; + + uint8_t reset_delay; + uint8_t reserved_12; + uint16_t reserved_13; + + __le16 boot_id_number; + uint16_t reserved_14; + + __le16 max_luns_per_target; + uint16_t reserved_15; + + __le16 port_down_retry_count; + __le16 link_down_timeout; + + /* FCode parameters. */ + __le16 fcode_parameter; + + uint16_t reserved_16[3]; + + /* Offset 352. */ + uint8_t prev_drv_ver_major; + uint8_t prev_drv_ver_submajob; + uint8_t prev_drv_ver_minor; + uint8_t prev_drv_ver_subminor; + + __le16 prev_bios_ver_major; + __le16 prev_bios_ver_minor; + + __le16 prev_efi_ver_major; + __le16 prev_efi_ver_minor; + + __le16 prev_fw_ver_major; + uint8_t prev_fw_ver_minor; + uint8_t prev_fw_ver_subminor; + + uint16_t reserved_17[8]; + + /* Offset 384. */ + uint16_t reserved_18[16]; + + /* Offset 416. */ + uint16_t reserved_19[16]; + + /* Offset 448. */ + uint16_t reserved_20[16]; + + /* Offset 480. */ + uint8_t model_name[16]; + + uint16_t reserved_21[2]; + + /* Offset 500. */ + /* HW Parameter Block. */ + uint16_t pcie_table_sig; + uint16_t pcie_table_offset; + + uint16_t subsystem_vendor_id; + uint16_t subsystem_device_id; + + __le32 checksum; +}; + +/* + * ISP Initialization Control Block. + * Little endian except where noted. + */ +#define ICB_VERSION 1 +struct init_cb_24xx { + __le16 version; + uint16_t reserved_1; + + __le16 frame_payload_size; + __le16 execution_throttle; + __le16 exchange_count; + + __le16 hard_address; + + uint8_t port_name[WWN_SIZE]; /* Big endian. */ + uint8_t node_name[WWN_SIZE]; /* Big endian. */ + + __le16 response_q_inpointer; + __le16 request_q_outpointer; + + __le16 login_retry_count; + + __le16 prio_request_q_outpointer; + + __le16 response_q_length; + __le16 request_q_length; + + __le16 link_down_on_nos; /* Milliseconds. */ + + __le16 prio_request_q_length; + + __le64 request_q_address __packed; + __le64 response_q_address __packed; + __le64 prio_request_q_address __packed; + + __le16 msix; + __le16 msix_atio; + uint8_t reserved_2[4]; + + __le16 atio_q_inpointer; + __le16 atio_q_length; + __le64 atio_q_address __packed; + + __le16 interrupt_delay_timer; /* 100us increments. */ + __le16 login_timeout; + + /* + * BIT 0 = Enable Hard Loop Id + * BIT 1 = Enable Fairness + * BIT 2 = Enable Full-Duplex + * BIT 3 = Reserved + * BIT 4 = Enable Target Mode + * BIT 5 = Disable Initiator Mode + * BIT 6 = Acquire FA-WWN + * BIT 7 = Enable D-port Diagnostics + * + * BIT 8 = Reserved + * BIT 9 = Non Participating LIP + * BIT 10 = Descending Loop ID Search + * BIT 11 = Acquire Loop ID in LIPA + * BIT 12 = Reserved + * BIT 13 = Full Login after LIP + * BIT 14 = Node Name Option + * BIT 15-31 = Reserved + */ + __le32 firmware_options_1; + + /* + * BIT 0 = Operation Mode bit 0 + * BIT 1 = Operation Mode bit 1 + * BIT 2 = Operation Mode bit 2 + * BIT 3 = Operation Mode bit 3 + * BIT 4 = Connection Options bit 0 + * BIT 5 = Connection Options bit 1 + * BIT 6 = Connection Options bit 2 + * BIT 7 = Enable Non part on LIHA failure + * + * BIT 8 = Enable Class 2 + * BIT 9 = Enable ACK0 + * BIT 10 = Reserved + * BIT 11 = Enable FC-SP Security + * BIT 12 = FC Tape Enable + * BIT 13 = Reserved + * BIT 14 = Enable Target PRLI Control + * BIT 15-31 = Reserved + */ + __le32 firmware_options_2; + + /* + * BIT 0 = Reserved + * BIT 1 = Soft ID only + * BIT 2 = Reserved + * BIT 3 = Reserved + * BIT 4 = FCP RSP Payload bit 0 + * BIT 5 = FCP RSP Payload bit 1 + * BIT 6 = Enable Receive Out-of-Order data frame handling + * BIT 7 = Disable Automatic PLOGI on Local Loop + * + * BIT 8 = Reserved + * BIT 9 = Enable Out-of-Order FCP_XFER_RDY relative offset handling + * BIT 10 = Reserved + * BIT 11 = Reserved + * BIT 12 = Reserved + * BIT 13 = Data Rate bit 0 + * BIT 14 = Data Rate bit 1 + * BIT 15 = Data Rate bit 2 + * BIT 16 = Enable 75 ohm Termination Select + * BIT 17-28 = Reserved + * BIT 29 = Enable response queue 0 in index shadowing + * BIT 30 = Enable request queue 0 out index shadowing + * BIT 31 = Reserved + */ + __le32 firmware_options_3; + __le16 qos; + __le16 rid; + uint8_t reserved_3[20]; +}; + +/* + * ISP queue - command entry structure definition. + */ +#define COMMAND_BIDIRECTIONAL 0x75 +struct cmd_bidir { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined */ + uint8_t entry_status; /* Entry status. */ + + uint32_t handle; /* System handle. */ + + __le16 nport_handle; /* N_PORT handle. */ + + __le16 timeout; /* Command timeout. */ + + __le16 wr_dseg_count; /* Write Data segment count. */ + __le16 rd_dseg_count; /* Read Data segment count. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + + __le16 control_flags; /* Control flags. */ +#define BD_WRAP_BACK BIT_3 +#define BD_READ_DATA BIT_1 +#define BD_WRITE_DATA BIT_0 + + __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + __le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */ + + uint16_t reserved[2]; /* Reserved */ + + __le32 rd_byte_count; /* Total Byte count Read. */ + __le32 wr_byte_count; /* Total Byte count write. */ + + uint8_t port_id[3]; /* PortID of destination port.*/ + uint8_t vp_index; + + struct dsd64 fcp_dsd; +}; + +#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */ +struct cmd_type_6 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ + + __le16 dseg_count; /* Data segment count. */ + + __le16 fcp_rsp_dsd_len; /* FCP_RSP DSD length. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + + __le16 control_flags; /* Control flags. */ +#define CF_NEW_SA BIT_12 +#define CF_EN_EDIF BIT_9 +#define CF_ADDITIONAL_PARAM_BLK BIT_8 +#define CF_DIF_SEG_DESCR_ENABLE BIT_3 +#define CF_DATA_SEG_DESCR_ENABLE BIT_2 +#define CF_READ_DATA BIT_1 +#define CF_WRITE_DATA BIT_0 + + __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + /* Data segment address. */ + __le64 fcp_cmnd_dseg_address __packed; + /* Data segment address. */ + __le64 fcp_rsp_dseg_address __packed; + + __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + + struct dsd64 fcp_dsd; +}; + +#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */ +struct cmd_type_7 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ +#define FW_MAX_TIMEOUT 0x1999 + + __le16 dseg_count; /* Data segment count. */ + uint16_t reserved_1; + + struct scsi_lun lun; /* FCP LUN (BE). */ + + __le16 task_mgmt_flags; /* Task management flags. */ +#define TMF_CLEAR_ACA BIT_14 +#define TMF_TARGET_RESET BIT_13 +#define TMF_LUN_RESET BIT_12 +#define TMF_CLEAR_TASK_SET BIT_10 +#define TMF_ABORT_TASK_SET BIT_9 +#define TMF_DSD_LIST_ENABLE BIT_2 +#define TMF_READ_DATA BIT_1 +#define TMF_WRITE_DATA BIT_0 + + uint8_t task; +#define TSK_SIMPLE 0 +#define TSK_HEAD_OF_QUEUE 1 +#define TSK_ORDERED 2 +#define TSK_ACA 4 +#define TSK_UNTAGGED 5 + + uint8_t crn; + + uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */ + __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + + struct dsd64 dsd; +}; + +#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6) + * (T10-DIF) */ +struct cmd_type_crc_2 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ + + __le16 dseg_count; /* Data segment count. */ + + __le16 fcp_rsp_dseg_len; /* FCP_RSP DSD length. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + + __le16 control_flags; /* Control flags. */ + + __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + __le64 fcp_cmnd_dseg_address __packed; + /* Data segment address. */ + __le64 fcp_rsp_dseg_address __packed; + + __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + + __le64 crc_context_address __packed; /* Data segment address. */ + __le16 crc_context_len; /* Data segment length. */ + uint16_t reserved_1; /* MUST be set to 0. */ +}; + + +/* + * ISP queue - status entry structure definition. + */ +#define STATUS_TYPE 0x03 /* Status entry. */ +struct sts_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 comp_status; /* Completion status. */ + __le16 ox_id; /* OX_ID used by the firmware. */ + + __le32 residual_len; /* FW calc residual transfer length. */ + + union { + __le16 reserved_1; + __le16 nvme_rsp_pyld_len; + __le16 edif_sa_index; /* edif sa_index used for initiator read data */ + }; + + __le16 state_flags; /* State flags. */ +#define SF_TRANSFERRED_DATA BIT_11 +#define SF_NVME_ERSP BIT_6 +#define SF_FCP_RSP_DMA BIT_0 + + __le16 status_qualifier; + __le16 scsi_status; /* SCSI status. */ +#define SS_CONFIRMATION_REQ BIT_12 + + __le32 rsp_residual_count; /* FCP RSP residual count. */ + + __le32 sense_len; /* FCP SENSE length. */ + + union { + struct { + __le32 rsp_data_len; /* FCP response data length */ + uint8_t data[28]; /* FCP rsp/sense information */ + }; + struct nvme_fc_ersp_iu nvme_ersp; + uint8_t nvme_ersp_data[32]; + }; + + /* + * If DIF Error is set in comp_status, these additional fields are + * defined: + * + * !!! NOTE: Firmware sends expected/actual DIF data in big endian + * format; but all of the "data" field gets swab32-d in the beginning + * of qla2x00_status_entry(). + * + * &data[10] : uint8_t report_runt_bg[2]; - computed guard + * &data[12] : uint8_t actual_dif[8]; - DIF Data received + * &data[20] : uint8_t expected_dif[8]; - DIF Data computed + */ +}; + + +/* + * Status entry completion status + */ +#define CS_DATA_REASSEMBLY_ERROR 0x11 /* Data Reassembly Error.. */ +#define CS_ABTS_BY_TARGET 0x13 /* Target send ABTS to abort IOCB. */ +#define CS_FW_RESOURCE 0x2C /* Firmware Resource Unavailable. */ +#define CS_TASK_MGMT_OVERRUN 0x30 /* Task management overrun (8+). */ +#define CS_ABORT_BY_TARGET 0x47 /* Abort By Target. */ + +/* + * ISP queue - marker entry structure definition. + */ +#define MARKER_TYPE 0x04 /* Marker entry. */ +struct mrk_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 nport_handle; /* N_PORT handle. */ + + uint8_t modifier; /* Modifier (7-0). */ +#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */ +#define MK_SYNC_ID 1 /* Synchronize ID */ +#define MK_SYNC_ALL 2 /* Synchronize all ID/LUN */ + uint8_t reserved_1; + + uint8_t reserved_2; + uint8_t vp_index; + + uint16_t reserved_3; + + uint8_t lun[8]; /* FCP LUN (BE). */ + uint8_t reserved_4[40]; +}; + +/* + * ISP queue - CT Pass-Through entry structure definition. + */ +#define CT_IOCB_TYPE 0x29 /* CT Pass-Through IOCB entry */ +struct ct_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 comp_status; /* Completion status. */ + + __le16 nport_handle; /* N_PORT handle. */ + + __le16 cmd_dsd_count; + + uint8_t vp_index; + uint8_t reserved_1; + + __le16 timeout; /* Command timeout. */ + uint16_t reserved_2; + + __le16 rsp_dsd_count; + + uint8_t reserved_3[10]; + + __le32 rsp_byte_count; + __le32 cmd_byte_count; + + struct dsd64 dsd[2]; +}; + +#define PURX_ELS_HEADER_SIZE 0x18 + +/* + * ISP queue - PUREX IOCB entry structure definition + */ +#define PUREX_IOCB_TYPE 0x51 /* CT Pass Through IOCB entry */ +struct purex_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + __le16 reserved1; + uint8_t vp_idx; + uint8_t reserved2; + + __le16 status_flags; + __le16 nport_handle; + + __le16 frame_size; + __le16 trunc_frame_size; + + __le32 rx_xchg_addr; + + uint8_t d_id[3]; + uint8_t r_ctl; + + uint8_t s_id[3]; + uint8_t cs_ctl; + + uint8_t f_ctl[3]; + uint8_t type; + + __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + + __le16 rx_id; + __le16 ox_id; + __le32 param; + + uint8_t els_frame_payload[20]; +}; + +/* + * ISP queue - ELS Pass-Through entry structure definition. + */ +#define ELS_IOCB_TYPE 0x53 /* ELS Pass-Through IOCB entry */ +struct els_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 comp_status; /* response only */ + __le16 nport_handle; + + __le16 tx_dsd_count; + + uint8_t vp_index; + uint8_t sof_type; +#define EST_SOFI3 (1 << 4) +#define EST_SOFI2 (3 << 4) + + __le32 rx_xchg_address; /* Receive exchange address. */ + __le16 rx_dsd_count; + + uint8_t opcode; + uint8_t reserved_2; + + uint8_t d_id[3]; + uint8_t s_id[3]; + + __le16 control_flags; /* Control flags. */ +#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13) +#define EPD_ELS_COMMAND (0 << 13) +#define EPD_ELS_ACC (1 << 13) +#define EPD_ELS_RJT (2 << 13) +#define EPD_RX_XCHG (3 << 13) /* terminate exchange */ +#define ECF_CLR_PASSTHRU_PEND BIT_12 +#define ECF_INCL_FRAME_HDR BIT_11 +#define ECF_SEC_LOGIN BIT_3 + + union { + struct { + __le32 rx_byte_count; + __le32 tx_byte_count; + + __le64 tx_address __packed; /* DSD 0 address. */ + __le32 tx_len; /* DSD 0 length. */ + + __le64 rx_address __packed; /* DSD 1 address. */ + __le32 rx_len; /* DSD 1 length. */ + }; + struct { + __le32 total_byte_count; + __le32 error_subcode_1; + __le32 error_subcode_2; + __le32 error_subcode_3; + }; + }; +}; + +struct els_sts_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + __le32 handle; /* System handle. */ + + __le16 comp_status; + + __le16 nport_handle; /* N_PORT handle. */ + + __le16 reserved_1; + + uint8_t vp_index; + uint8_t sof_type; + + __le32 rx_xchg_address; /* Receive exchange address. */ + __le16 reserved_2; + + uint8_t opcode; + uint8_t reserved_3; + + uint8_t d_id[3]; + uint8_t s_id[3]; + + __le16 control_flags; /* Control flags. */ + __le32 total_byte_count; + __le32 error_subcode_1; + __le32 error_subcode_2; + __le32 error_subcode_3; + + __le32 reserved_4[4]; +}; +/* + * ISP queue - Mailbox Command entry structure definition. + */ +#define MBX_IOCB_TYPE 0x39 +struct mbx_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + uint16_t mbx[28]; +}; + + +#define LOGINOUT_PORT_IOCB_TYPE 0x52 /* Login/Logout Port entry. */ +struct logio_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 comp_status; /* Completion status. */ +#define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */ + + __le16 nport_handle; /* N_PORT handle. */ + + __le16 control_flags; /* Control flags. */ + /* Modifiers. */ +#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */ +#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */ +#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */ +#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */ +#define LCF_COMMON_FEAT BIT_7 /* PLOGI - Set Common Features Field */ +#define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */ +#define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */ +#define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */ +#define LCF_IMPL_LOGO_ALL BIT_5 /* Implicit LOGO to all ports. */ +#define LCF_COND_PLOGI BIT_4 /* PLOGI only if not logged-in. */ +#define LCF_IMPL_LOGO BIT_4 /* Perform an implicit LOGO. */ +#define LCF_IMPL_PRLO BIT_4 /* Perform an implicit PRLO. */ + /* Commands. */ +#define LCF_COMMAND_PLOGI 0x00 /* PLOGI. */ +#define LCF_COMMAND_PRLI 0x01 /* PRLI. */ +#define LCF_COMMAND_PDISC 0x02 /* PDISC. */ +#define LCF_COMMAND_ADISC 0x03 /* ADISC. */ +#define LCF_COMMAND_LOGO 0x08 /* LOGO. */ +#define LCF_COMMAND_PRLO 0x09 /* PRLO. */ +#define LCF_COMMAND_TPRLO 0x0A /* TPRLO. */ + + uint8_t vp_index; + uint8_t reserved_1; + + uint8_t port_id[3]; /* PortID of destination port. */ + + uint8_t rsp_size; /* Response size in 32bit words. */ + + __le32 io_parameter[11]; /* General I/O parameters. */ +#define LIO_COMM_FEAT_FCSP BIT_21 +#define LIO_COMM_FEAT_CIO BIT_31 +#define LSC_SCODE_NOLINK 0x01 +#define LSC_SCODE_NOIOCB 0x02 +#define LSC_SCODE_NOXCB 0x03 +#define LSC_SCODE_CMD_FAILED 0x04 +#define LSC_SCODE_NOFABRIC 0x05 +#define LSC_SCODE_FW_NOT_READY 0x07 +#define LSC_SCODE_NOT_LOGGED_IN 0x09 +#define LSC_SCODE_NOPCB 0x0A + +#define LSC_SCODE_ELS_REJECT 0x18 +#define LSC_SCODE_CMD_PARAM_ERR 0x19 +#define LSC_SCODE_PORTID_USED 0x1A +#define LSC_SCODE_NPORT_USED 0x1B +#define LSC_SCODE_NONPORT 0x1C +#define LSC_SCODE_LOGGED_IN 0x1D +#define LSC_SCODE_NOFLOGI_ACC 0x1F +}; + +#define TSK_MGMT_IOCB_TYPE 0x14 +struct tsk_mgmt_entry { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 nport_handle; /* N_PORT handle. */ + + uint16_t reserved_1; + + __le16 delay; /* Activity delay in seconds. */ + + __le16 timeout; /* Command timeout. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + + __le32 control_flags; /* Control Flags. */ +#define TCF_NOTMCMD_TO_TARGET BIT_31 +#define TCF_LUN_RESET BIT_4 +#define TCF_ABORT_TASK_SET BIT_3 +#define TCF_CLEAR_TASK_SET BIT_2 +#define TCF_TARGET_RESET BIT_1 +#define TCF_CLEAR_ACA BIT_0 + + uint8_t reserved_2[20]; + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + + uint8_t reserved_3[12]; +}; + +#define ABORT_IOCB_TYPE 0x33 +struct abort_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; /* Handle count. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + union { + __le16 nport_handle; /* N_PORT handle. */ + __le16 comp_status; /* Completion status. */ + }; + + __le16 options; /* Options. */ +#define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */ +#define AOF_NO_RRQ BIT_1 /* Do not send RRQ. */ +#define AOF_ABTS_TIMEOUT BIT_2 /* Disable logout on ABTS timeout. */ +#define AOF_ABTS_RTY_CNT BIT_3 /* Use driver specified retry count. */ +#define AOF_RSP_TIMEOUT BIT_4 /* Use specified response timeout. */ + + + uint32_t handle_to_abort; /* System handle to abort. */ + + __le16 req_que_no; + uint8_t reserved_1[30]; + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + u8 reserved_2[4]; + union { + struct { + __le16 abts_rty_cnt; + __le16 rsp_timeout; + } drv; + struct { + u8 ba_rjt_vendorUnique; + u8 ba_rjt_reasonCodeExpl; + u8 ba_rjt_reasonCode; + u8 reserved_3; + } fw; + }; + u8 reserved_4[4]; +}; + +#define ABTS_RCV_TYPE 0x54 +#define ABTS_RSP_TYPE 0x55 +struct abts_entry_24xx { + uint8_t entry_type; + uint8_t entry_count; + uint8_t handle_count; + uint8_t entry_status; + + __le32 handle; /* type 0x55 only */ + + __le16 comp_status; /* type 0x55 only */ + __le16 nport_handle; /* type 0x54 only */ + + __le16 control_flags; /* type 0x55 only */ + uint8_t vp_idx; + uint8_t sof_type; /* sof_type is upper nibble */ + + __le32 rx_xch_addr; + + uint8_t d_id[3]; + uint8_t r_ctl; + + uint8_t s_id[3]; + uint8_t cs_ctl; + + uint8_t f_ctl[3]; + uint8_t type; + + __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + + __le16 rx_id; + __le16 ox_id; + + __le32 param; + + union { + struct { + __le32 subcode3; + __le32 rsvd; + __le32 subcode1; + __le32 subcode2; + } error; + struct { + __le16 rsrvd1; + uint8_t last_seq_id; + uint8_t seq_id_valid; + __le16 aborted_rx_id; + __le16 aborted_ox_id; + __le16 high_seq_cnt; + __le16 low_seq_cnt; + } ba_acc; + struct { + uint8_t vendor_unique; + uint8_t explanation; + uint8_t reason; + } ba_rjt; + } payload; + + __le32 rx_xch_addr_to_abort; +} __packed; + +/* ABTS payload explanation values */ +#define BA_RJT_EXP_NO_ADDITIONAL 0 +#define BA_RJT_EXP_INV_OX_RX_ID 3 +#define BA_RJT_EXP_SEQ_ABORTED 5 + +/* ABTS payload reason values */ +#define BA_RJT_RSN_INV_CMD_CODE 1 +#define BA_RJT_RSN_LOGICAL_ERROR 3 +#define BA_RJT_RSN_LOGICAL_BUSY 5 +#define BA_RJT_RSN_PROTOCOL_ERROR 7 +#define BA_RJT_RSN_UNABLE_TO_PERFORM 9 +#define BA_RJT_RSN_VENDOR_SPECIFIC 0xff + +/* FC_F values */ +#define FC_TYPE_BLD 0x000 /* Basic link data */ +#define FC_F_CTL_RSP_CNTXT 0x800000 /* Responder of exchange */ +#define FC_F_CTL_LAST_SEQ 0x100000 /* Last sequence */ +#define FC_F_CTL_END_SEQ 0x80000 /* Last sequence */ +#define FC_F_CTL_SEQ_INIT 0x010000 /* Sequence initiative */ +#define FC_ROUTING_BLD 0x80 /* Basic link data frame */ +#define FC_R_CTL_BLD_BA_ACC 0x04 /* BA_ACC (basic accept) */ + +/* + * ISP I/O Register Set structure definitions. + */ +struct device_reg_24xx { + __le32 flash_addr; /* Flash/NVRAM BIOS address. */ +#define FARX_DATA_FLAG BIT_31 +#define FARX_ACCESS_FLASH_CONF 0x7FFD0000 +#define FARX_ACCESS_FLASH_DATA 0x7FF00000 +#define FARX_ACCESS_NVRAM_CONF 0x7FFF0000 +#define FARX_ACCESS_NVRAM_DATA 0x7FFE0000 + +#define FA_NVRAM_FUNC0_ADDR 0x80 +#define FA_NVRAM_FUNC1_ADDR 0x180 + +#define FA_NVRAM_VPD_SIZE 0x200 +#define FA_NVRAM_VPD0_ADDR 0x00 +#define FA_NVRAM_VPD1_ADDR 0x100 + +#define FA_BOOT_CODE_ADDR 0x00000 + /* + * RISC code begins at offset 512KB + * within flash. Consisting of two + * contiguous RISC code segments. + */ +#define FA_RISC_CODE_ADDR 0x20000 +#define FA_RISC_CODE_SEGMENTS 2 + +#define FA_FLASH_DESCR_ADDR_24 0x11000 +#define FA_FLASH_LAYOUT_ADDR_24 0x11400 +#define FA_NPIV_CONF0_ADDR_24 0x16000 +#define FA_NPIV_CONF1_ADDR_24 0x17000 + +#define FA_FW_AREA_ADDR 0x40000 +#define FA_VPD_NVRAM_ADDR 0x48000 +#define FA_FEATURE_ADDR 0x4C000 +#define FA_FLASH_DESCR_ADDR 0x50000 +#define FA_FLASH_LAYOUT_ADDR 0x50400 +#define FA_HW_EVENT0_ADDR 0x54000 +#define FA_HW_EVENT1_ADDR 0x54400 +#define FA_HW_EVENT_SIZE 0x200 +#define FA_HW_EVENT_ENTRY_SIZE 4 +#define FA_NPIV_CONF0_ADDR 0x5C000 +#define FA_NPIV_CONF1_ADDR 0x5D000 +#define FA_FCP_PRIO0_ADDR 0x10000 +#define FA_FCP_PRIO1_ADDR 0x12000 + +/* + * Flash Error Log Event Codes. + */ +#define HW_EVENT_RESET_ERR 0xF00B +#define HW_EVENT_ISP_ERR 0xF020 +#define HW_EVENT_PARITY_ERR 0xF022 +#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 +#define HW_EVENT_FLASH_FW_ERR 0xF024 + + __le32 flash_data; /* Flash/NVRAM BIOS data. */ + + __le32 ctrl_status; /* Control/Status. */ +#define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */ +#define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */ +#define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */ +#define CSRX_FUNCTION BIT_15 /* Function number. */ + /* PCI-X Bus Mode. */ +#define CSRX_PCIX_BUS_MODE_MASK (BIT_11|BIT_10|BIT_9|BIT_8) +#define PBM_PCI_33MHZ (0 << 8) +#define PBM_PCIX_M1_66MHZ (1 << 8) +#define PBM_PCIX_M1_100MHZ (2 << 8) +#define PBM_PCIX_M1_133MHZ (3 << 8) +#define PBM_PCIX_M2_66MHZ (5 << 8) +#define PBM_PCIX_M2_100MHZ (6 << 8) +#define PBM_PCIX_M2_133MHZ (7 << 8) +#define PBM_PCI_66MHZ (8 << 8) + /* Max Write Burst byte count. */ +#define CSRX_MAX_WRT_BURST_MASK (BIT_5|BIT_4) +#define MWB_512_BYTES (0 << 4) +#define MWB_1024_BYTES (1 << 4) +#define MWB_2048_BYTES (2 << 4) +#define MWB_4096_BYTES (3 << 4) + +#define CSRX_64BIT_SLOT BIT_2 /* PCI 64-Bit Bus Slot. */ +#define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */ +#define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */ + + __le32 ictrl; /* Interrupt control. */ +#define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */ + + __le32 istatus; /* Interrupt status. */ +#define ISRX_RISC_INT BIT_3 /* RISC interrupt. */ + + __le32 unused_1[2]; /* Gap. */ + + /* Request Queue. */ + __le32 req_q_in; /* In-Pointer. */ + __le32 req_q_out; /* Out-Pointer. */ + /* Response Queue. */ + __le32 rsp_q_in; /* In-Pointer. */ + __le32 rsp_q_out; /* Out-Pointer. */ + /* Priority Request Queue. */ + __le32 preq_q_in; /* In-Pointer. */ + __le32 preq_q_out; /* Out-Pointer. */ + + __le32 unused_2[2]; /* Gap. */ + + /* ATIO Queue. */ + __le32 atio_q_in; /* In-Pointer. */ + __le32 atio_q_out; /* Out-Pointer. */ + + __le32 host_status; +#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */ +#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */ + + __le32 hccr; /* Host command & control register. */ + /* HCCR statuses. */ +#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ +#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ + /* HCCR commands. */ + /* NOOP. */ +#define HCCRX_NOOP 0x00000000 + /* Set RISC Reset. */ +#define HCCRX_SET_RISC_RESET 0x10000000 + /* Clear RISC Reset. */ +#define HCCRX_CLR_RISC_RESET 0x20000000 + /* Set RISC Pause. */ +#define HCCRX_SET_RISC_PAUSE 0x30000000 + /* Releases RISC Pause. */ +#define HCCRX_REL_RISC_PAUSE 0x40000000 + /* Set HOST to RISC interrupt. */ +#define HCCRX_SET_HOST_INT 0x50000000 + /* Clear HOST to RISC interrupt. */ +#define HCCRX_CLR_HOST_INT 0x60000000 + /* Clear RISC to PCI interrupt. */ +#define HCCRX_CLR_RISC_INT 0xA0000000 + + __le32 gpiod; /* GPIO Data register. */ + + /* LED update mask. */ +#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18) + /* Data update mask. */ +#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16) + /* Data update mask. */ +#define GPDX_DATA_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16) + /* LED control mask. */ +#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2) + /* LED bit values. Color names as + * referenced in fw spec. + */ +#define GPDX_LED_YELLOW_ON BIT_2 +#define GPDX_LED_GREEN_ON BIT_3 +#define GPDX_LED_AMBER_ON BIT_4 + /* Data in/out. */ +#define GPDX_DATA_INOUT (BIT_1|BIT_0) + + __le32 gpioe; /* GPIO Enable register. */ + /* Enable update mask. */ +#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16) + /* Enable update mask. */ +#define GPEX_ENABLE_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16) + /* Enable. */ +#define GPEX_ENABLE (BIT_1|BIT_0) + + __le32 iobase_addr; /* I/O Bus Base Address register. */ + + __le32 unused_3[10]; /* Gap. */ + + __le16 mailbox0; + __le16 mailbox1; + __le16 mailbox2; + __le16 mailbox3; + __le16 mailbox4; + __le16 mailbox5; + __le16 mailbox6; + __le16 mailbox7; + __le16 mailbox8; + __le16 mailbox9; + __le16 mailbox10; + __le16 mailbox11; + __le16 mailbox12; + __le16 mailbox13; + __le16 mailbox14; + __le16 mailbox15; + __le16 mailbox16; + __le16 mailbox17; + __le16 mailbox18; + __le16 mailbox19; + __le16 mailbox20; + __le16 mailbox21; + __le16 mailbox22; + __le16 mailbox23; + __le16 mailbox24; + __le16 mailbox25; + __le16 mailbox26; + __le16 mailbox27; + __le16 mailbox28; + __le16 mailbox29; + __le16 mailbox30; + __le16 mailbox31; + + __le32 iobase_window; + __le32 iobase_c4; + __le32 iobase_c8; + __le32 unused_4_1[6]; /* Gap. */ + __le32 iobase_q; + __le32 unused_5[2]; /* Gap. */ + __le32 iobase_select; + __le32 unused_6[2]; /* Gap. */ + __le32 iobase_sdata; +}; +/* RISC-RISC semaphore register PCI offet */ +#define RISC_REGISTER_BASE_OFFSET 0x7010 +#define RISC_REGISTER_WINDOW_OFFSET 0x6 + +/* RISC-RISC semaphore/flag register (risc address 0x7016) */ + +#define RISC_SEMAPHORE 0x1UL +#define RISC_SEMAPHORE_WE (RISC_SEMAPHORE << 16) +#define RISC_SEMAPHORE_CLR (RISC_SEMAPHORE_WE | 0x0UL) +#define RISC_SEMAPHORE_SET (RISC_SEMAPHORE_WE | RISC_SEMAPHORE) + +#define RISC_SEMAPHORE_FORCE 0x8000UL +#define RISC_SEMAPHORE_FORCE_WE (RISC_SEMAPHORE_FORCE << 16) +#define RISC_SEMAPHORE_FORCE_CLR (RISC_SEMAPHORE_FORCE_WE | 0x0UL) +#define RISC_SEMAPHORE_FORCE_SET \ + (RISC_SEMAPHORE_FORCE_WE | RISC_SEMAPHORE_FORCE) + +/* RISC semaphore timeouts (ms) */ +#define TIMEOUT_SEMAPHORE 2500 +#define TIMEOUT_SEMAPHORE_FORCE 2000 +#define TIMEOUT_TOTAL_ELAPSED 4500 + +/* Trace Control *************************************************************/ + +#define TC_AEN_DISABLE 0 + +#define TC_EFT_ENABLE 4 +#define TC_EFT_DISABLE 5 + +#define TC_FCE_ENABLE 8 +#define TC_FCE_OPTIONS 0 +#define TC_FCE_DEFAULT_RX_SIZE 2112 +#define TC_FCE_DEFAULT_TX_SIZE 2112 +#define TC_FCE_DISABLE 9 +#define TC_FCE_DISABLE_TRACE BIT_0 + +/* MID Support ***************************************************************/ + +#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */ +#define MAX_MULTI_ID_FABRIC 256 /* ... */ + +struct mid_conf_entry_24xx { + uint16_t reserved_1; + + /* + * BIT 0 = Enable Hard Loop Id + * BIT 1 = Acquire Loop ID in LIPA + * BIT 2 = ID not Acquired + * BIT 3 = Enable VP + * BIT 4 = Enable Initiator Mode + * BIT 5 = Disable Target Mode + * BIT 6-7 = Reserved + */ + uint8_t options; + + uint8_t hard_address; + + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; +}; + +struct mid_init_cb_24xx { + struct init_cb_24xx init_cb; + + __le16 count; + __le16 options; + + struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC]; +}; + + +struct mid_db_entry_24xx { + uint16_t status; +#define MDBS_NON_PARTIC BIT_3 +#define MDBS_ID_ACQUIRED BIT_1 +#define MDBS_ENABLED BIT_0 + + uint8_t options; + uint8_t hard_address; + + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + + uint8_t port_id[3]; + uint8_t reserved_1; +}; + +/* + * Virtual Port Control IOCB + */ +#define VP_CTRL_IOCB_TYPE 0x30 /* Virtual Port Control entry. */ +struct vp_ctrl_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 vp_idx_failed; + + __le16 comp_status; /* Completion status. */ +#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */ +#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */ +#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */ + + __le16 command; +#define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */ +#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */ +#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */ +#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */ +#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */ + + __le16 vp_count; + + uint8_t vp_idx_map[16]; + __le16 flags; + __le16 id; + uint16_t reserved_4; + __le16 hopct; + uint8_t reserved_5[24]; +}; + +/* + * Modify Virtual Port Configuration IOCB + */ +#define VP_CONFIG_IOCB_TYPE 0x31 /* Virtual Port Config entry. */ +struct vp_config_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + __le16 flags; +#define CS_VF_BIND_VPORTS_TO_VF BIT_0 +#define CS_VF_SET_QOS_OF_VPORTS BIT_1 +#define CS_VF_SET_HOPS_OF_VPORTS BIT_2 + + __le16 comp_status; /* Completion status. */ +#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */ +#define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */ +#define CS_VCT_ERROR 0x03 /* Unknown error. */ +#define CS_VCT_IDX_ERROR 0x02 /* Invalid VP index. */ +#define CS_VCT_BUSY 0x05 /* Firmware not ready to accept cmd. */ + + uint8_t command; +#define VCT_COMMAND_MOD_VPS 0x00 /* Modify VP configurations. */ +#define VCT_COMMAND_MOD_ENABLE_VPS 0x01 /* Modify configuration & enable VPs. */ + + uint8_t vp_count; + + uint8_t vp_index1; + uint8_t vp_index2; + + uint8_t options_idx1; + uint8_t hard_address_idx1; + uint16_t reserved_vp1; + uint8_t port_name_idx1[WWN_SIZE]; + uint8_t node_name_idx1[WWN_SIZE]; + + uint8_t options_idx2; + uint8_t hard_address_idx2; + uint16_t reserved_vp2; + uint8_t port_name_idx2[WWN_SIZE]; + uint8_t node_name_idx2[WWN_SIZE]; + __le16 id; + uint16_t reserved_4; + __le16 hopct; + uint8_t reserved_5[2]; +}; + +#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */ +enum VP_STATUS { + VP_STAT_COMPL, + VP_STAT_FAIL, + VP_STAT_ID_CHG, + VP_STAT_SNS_TO, /* timeout */ + VP_STAT_SNS_RJT, + VP_STAT_SCR_TO, /* timeout */ + VP_STAT_SCR_RJT, +}; + +enum VP_FLAGS { + VP_FLAGS_CON_FLOOP = 1, + VP_FLAGS_CON_P2P = 2, + VP_FLAGS_CON_FABRIC = 3, + VP_FLAGS_NAME_VALID = BIT_5, +}; + +struct vp_rpt_id_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + __le32 resv1; + uint8_t vp_acquired; + uint8_t vp_setup; + uint8_t vp_idx; /* Format 0=reserved */ + uint8_t vp_status; /* Format 0=reserved */ + + uint8_t port_id[3]; + uint8_t format; + union { + struct _f0 { + /* format 0 loop */ + uint8_t vp_idx_map[16]; + uint8_t reserved_4[32]; + } f0; + struct _f1 { + /* format 1 fabric */ + uint8_t vpstat1_subcode; /* vp_status=1 subcode */ + uint8_t flags; +#define TOPO_MASK 0xE +#define TOPO_FL 0x2 +#define TOPO_N2N 0x4 +#define TOPO_F 0x6 + + uint16_t fip_flags; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint16_t bbcr; + uint8_t reserved_5[6]; + } f1; + struct _f2 { /* format 2: N2N direct connect */ + uint8_t vpstat1_subcode; + uint8_t flags; + uint16_t fip_flags; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint16_t bbcr; + uint8_t reserved_5[2]; + uint8_t remote_nport_id[4]; + } f2; + } u; +}; + +#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */ +struct vf_evfp_entry_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + __le16 comp_status; /* Completion status. */ + __le16 timeout; /* timeout */ + __le16 adim_tagging_mode; + + __le16 vfport_id; + uint32_t exch_addr; + + __le16 nport_handle; /* N_PORT handle. */ + __le16 control_flags; + uint32_t io_parameter_0; + uint32_t io_parameter_1; + __le64 tx_address __packed; /* Data segment 0 address. */ + uint32_t tx_len; /* Data segment 0 length. */ + __le64 rx_address __packed; /* Data segment 1 address. */ + uint32_t rx_len; /* Data segment 1 length. */ +}; + +/* END MID Support ***********************************************************/ + +/* Flash Description Table ***************************************************/ + +struct qla_fdt_layout { + uint8_t sig[4]; + __le16 version; + __le16 len; + __le16 checksum; + uint8_t unused1[2]; + uint8_t model[16]; + __le16 man_id; + __le16 id; + uint8_t flags; + uint8_t erase_cmd; + uint8_t alt_erase_cmd; + uint8_t wrt_enable_cmd; + uint8_t wrt_enable_bits; + uint8_t wrt_sts_reg_cmd; + uint8_t unprotect_sec_cmd; + uint8_t read_man_id_cmd; + __le32 block_size; + __le32 alt_block_size; + __le32 flash_size; + __le32 wrt_enable_data; + uint8_t read_id_addr_len; + uint8_t wrt_disable_bits; + uint8_t read_dev_id_len; + uint8_t chip_erase_cmd; + __le16 read_timeout; + uint8_t protect_sec_cmd; + uint8_t unused2[65]; +}; + +/* Flash Layout Table ********************************************************/ + +struct qla_flt_location { + uint8_t sig[4]; + __le16 start_lo; + __le16 start_hi; + uint8_t version; + uint8_t unused[5]; + __le16 checksum; +}; + +#define FLT_REG_FW 0x01 +#define FLT_REG_BOOT_CODE 0x07 +#define FLT_REG_VPD_0 0x14 +#define FLT_REG_NVRAM_0 0x15 +#define FLT_REG_VPD_1 0x16 +#define FLT_REG_NVRAM_1 0x17 +#define FLT_REG_VPD_2 0xD4 +#define FLT_REG_NVRAM_2 0xD5 +#define FLT_REG_VPD_3 0xD6 +#define FLT_REG_NVRAM_3 0xD7 +#define FLT_REG_FDT 0x1a +#define FLT_REG_FLT 0x1c +#define FLT_REG_HW_EVENT_0 0x1d +#define FLT_REG_HW_EVENT_1 0x1f +#define FLT_REG_NPIV_CONF_0 0x29 +#define FLT_REG_NPIV_CONF_1 0x2a +#define FLT_REG_GOLD_FW 0x2f +#define FLT_REG_FCP_PRIO_0 0x87 +#define FLT_REG_FCP_PRIO_1 0x88 +#define FLT_REG_CNA_FW 0x97 +#define FLT_REG_BOOT_CODE_8044 0xA2 +#define FLT_REG_FCOE_FW 0xA4 +#define FLT_REG_FCOE_NVRAM_0 0xAA +#define FLT_REG_FCOE_NVRAM_1 0xAC + +/* 27xx */ +#define FLT_REG_IMG_PRI_27XX 0x95 +#define FLT_REG_IMG_SEC_27XX 0x96 +#define FLT_REG_FW_SEC_27XX 0x02 +#define FLT_REG_BOOTLOAD_SEC_27XX 0x9 +#define FLT_REG_VPD_SEC_27XX_0 0x50 +#define FLT_REG_VPD_SEC_27XX_1 0x52 +#define FLT_REG_VPD_SEC_27XX_2 0xD8 +#define FLT_REG_VPD_SEC_27XX_3 0xDA +#define FLT_REG_NVME_PARAMS_27XX 0x21 + +/* 28xx */ +#define FLT_REG_AUX_IMG_PRI_28XX 0x125 +#define FLT_REG_AUX_IMG_SEC_28XX 0x126 +#define FLT_REG_VPD_SEC_28XX_0 0x10C +#define FLT_REG_VPD_SEC_28XX_1 0x10E +#define FLT_REG_VPD_SEC_28XX_2 0x110 +#define FLT_REG_VPD_SEC_28XX_3 0x112 +#define FLT_REG_NVRAM_SEC_28XX_0 0x10D +#define FLT_REG_NVRAM_SEC_28XX_1 0x10F +#define FLT_REG_NVRAM_SEC_28XX_2 0x111 +#define FLT_REG_NVRAM_SEC_28XX_3 0x113 +#define FLT_REG_MPI_PRI_28XX 0xD3 +#define FLT_REG_MPI_SEC_28XX 0xF0 +#define FLT_REG_PEP_PRI_28XX 0xD1 +#define FLT_REG_PEP_SEC_28XX 0xF1 +#define FLT_REG_NVME_PARAMS_PRI_28XX 0x14E +#define FLT_REG_NVME_PARAMS_SEC_28XX 0x179 + +struct qla_flt_region { + __le16 code; + uint8_t attribute; + uint8_t reserved; + __le32 size; + __le32 start; + __le32 end; +}; + +struct qla_flt_header { + __le16 version; + __le16 length; + __le16 checksum; + __le16 unused; + struct qla_flt_region region[]; +}; + +#define FLT_REGION_SIZE 16 +#define FLT_MAX_REGIONS 0xFF +#define FLT_REGIONS_SIZE (FLT_REGION_SIZE * FLT_MAX_REGIONS) + +/* Flash NPIV Configuration Table ********************************************/ + +struct qla_npiv_header { + uint8_t sig[2]; + __le16 version; + __le16 entries; + __le16 unused[4]; + __le16 checksum; +}; + +struct qla_npiv_entry { + __le16 flags; + __le16 vf_id; + uint8_t q_qos; + uint8_t f_qos; + __le16 unused1; + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; +}; + +/* 84XX Support **************************************************************/ + +#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ +#define A84_PANIC_RECOVERY 0x1 +#define A84_OP_LOGIN_COMPLETE 0x2 +#define A84_DIAG_LOGIN_COMPLETE 0x3 +#define A84_GOLD_LOGIN_COMPLETE 0x4 + +#define MBC_ISP84XX_RESET 0x3a /* Reset. */ + +#define FSTATE_REMOTE_FC_DOWN BIT_0 +#define FSTATE_NSL_LINK_DOWN BIT_1 +#define FSTATE_IS_DIAG_FW BIT_2 +#define FSTATE_LOGGED_IN BIT_3 +#define FSTATE_WAITING_FOR_VERIFY BIT_4 + +#define VERIFY_CHIP_IOCB_TYPE 0x1B +struct verify_chip_entry_84xx { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_defined; + uint8_t entry_status; + + uint32_t handle; + + __le16 options; +#define VCO_DONT_UPDATE_FW BIT_0 +#define VCO_FORCE_UPDATE BIT_1 +#define VCO_DONT_RESET_UPDATE BIT_2 +#define VCO_DIAG_FW BIT_3 +#define VCO_END_OF_DATA BIT_14 +#define VCO_ENABLE_DSD BIT_15 + + __le16 reserved_1; + + __le16 data_seg_cnt; + __le16 reserved_2[3]; + + __le32 fw_ver; + __le32 exchange_address; + + __le32 reserved_3[3]; + __le32 fw_size; + __le32 fw_seq_size; + __le32 relative_offset; + + struct dsd64 dsd; +}; + +struct verify_chip_rsp_84xx { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_defined; + uint8_t entry_status; + + uint32_t handle; + + __le16 comp_status; +#define CS_VCS_CHIP_FAILURE 0x3 +#define CS_VCS_BAD_EXCHANGE 0x8 +#define CS_VCS_SEQ_COMPLETEi 0x40 + + __le16 failure_code; +#define VFC_CHECKSUM_ERROR 0x1 +#define VFC_INVALID_LEN 0x2 +#define VFC_ALREADY_IN_PROGRESS 0x8 + + __le16 reserved_1[4]; + + __le32 fw_ver; + __le32 exchange_address; + + __le32 reserved_2[6]; +}; + +#define ACCESS_CHIP_IOCB_TYPE 0x2B +struct access_chip_84xx { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_defined; + uint8_t entry_status; + + uint32_t handle; + + __le16 options; +#define ACO_DUMP_MEMORY 0x0 +#define ACO_LOAD_MEMORY 0x1 +#define ACO_CHANGE_CONFIG_PARAM 0x2 +#define ACO_REQUEST_INFO 0x3 + + __le16 reserved1; + + __le16 dseg_count; + __le16 reserved2[3]; + + __le32 parameter1; + __le32 parameter2; + __le32 parameter3; + + __le32 reserved3[3]; + __le32 total_byte_cnt; + __le32 reserved4; + + struct dsd64 dsd; +}; + +struct access_chip_rsp_84xx { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_defined; + uint8_t entry_status; + + uint32_t handle; + + __le16 comp_status; + __le16 failure_code; + __le32 residual_count; + + __le32 reserved[12]; +}; + +/* 81XX Support **************************************************************/ + +#define MBA_DCBX_START 0x8016 +#define MBA_DCBX_COMPLETE 0x8030 +#define MBA_FCF_CONF_ERR 0x8031 +#define MBA_DCBX_PARAM_UPDATE 0x8032 +#define MBA_IDC_COMPLETE 0x8100 +#define MBA_IDC_NOTIFY 0x8101 +#define MBA_IDC_TIME_EXT 0x8102 + +#define MBC_IDC_ACK 0x101 +#define MBC_RESTART_MPI_FW 0x3d +#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ +#define MBC_GET_XGMAC_STATS 0x7a +#define MBC_GET_DCBX_PARAMS 0x51 + +/* + * ISP83xx mailbox commands + */ +#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */ +#define MBC_READ_REMOTE_REG 0x0009 /* Read remote register */ +#define MBC_RESTART_NIC_FIRMWARE 0x003d /* Restart NIC firmware */ +#define MBC_SET_ACCESS_CONTROL 0x003e /* Access control command */ + +/* Flash access control option field bit definitions */ +#define FAC_OPT_FORCE_SEMAPHORE BIT_15 +#define FAC_OPT_REQUESTOR_ID BIT_14 +#define FAC_OPT_CMD_SUBCODE 0xff + +/* Flash access control command subcodes */ +#define FAC_OPT_CMD_WRITE_PROTECT 0x00 +#define FAC_OPT_CMD_WRITE_ENABLE 0x01 +#define FAC_OPT_CMD_ERASE_SECTOR 0x02 +#define FAC_OPT_CMD_LOCK_SEMAPHORE 0x03 +#define FAC_OPT_CMD_UNLOCK_SEMAPHORE 0x04 +#define FAC_OPT_CMD_GET_SECTOR_SIZE 0x05 + +/* enhanced features bit definitions */ +#define NEF_LR_DIST_ENABLE BIT_0 + +/* LR Distance bit positions */ +#define LR_DIST_NV_POS 2 +#define LR_DIST_NV_MASK 0xf +#define LR_DIST_FW_POS 12 + +/* FAC semaphore defines */ +#define FAC_SEMAPHORE_UNLOCK 0 +#define FAC_SEMAPHORE_LOCK 1 + +struct nvram_81xx { + /* NVRAM header. */ + uint8_t id[4]; + __le16 nvram_version; + __le16 reserved_0; + + /* Firmware Initialization Control Block. */ + __le16 version; + __le16 reserved_1; + __le16 frame_payload_size; + __le16 execution_throttle; + __le16 exchange_count; + __le16 reserved_2; + + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + + __le16 login_retry_count; + __le16 reserved_3; + __le16 interrupt_delay_timer; + __le16 login_timeout; + + __le32 firmware_options_1; + __le32 firmware_options_2; + __le32 firmware_options_3; + + __le16 reserved_4[4]; + + /* Offset 64. */ + uint8_t enode_mac[6]; + __le16 reserved_5[5]; + + /* Offset 80. */ + __le16 reserved_6[24]; + + /* Offset 128. */ + __le16 ex_version; + uint8_t prio_fcf_matching_flags; + uint8_t reserved_6_1[3]; + __le16 pri_fcf_vlan_id; + uint8_t pri_fcf_fabric_name[8]; + __le16 reserved_6_2[7]; + uint8_t spma_mac_addr[6]; + __le16 reserved_6_3[14]; + + /* Offset 192. */ + uint8_t min_supported_speed; + uint8_t reserved_7_0; + __le16 reserved_7[31]; + + /* + * BIT 0 = Enable spinup delay + * BIT 1 = Disable BIOS + * BIT 2 = Enable Memory Map BIOS + * BIT 3 = Enable Selectable Boot + * BIT 4 = Disable RISC code load + * BIT 5 = Disable Serdes + * BIT 6 = Opt boot mode + * BIT 7 = Interrupt enable + * + * BIT 8 = EV Control enable + * BIT 9 = Enable lip reset + * BIT 10 = Enable lip full login + * BIT 11 = Enable target reset + * BIT 12 = Stop firmware + * BIT 13 = Enable nodename option + * BIT 14 = Default WWPN valid + * BIT 15 = Enable alternate WWN + * + * BIT 16 = CLP LUN string + * BIT 17 = CLP Target string + * BIT 18 = CLP BIOS enable string + * BIT 19 = CLP Serdes string + * BIT 20 = CLP WWPN string + * BIT 21 = CLP WWNN string + * BIT 22 = + * BIT 23 = + * BIT 24 = Keep WWPN + * BIT 25 = Temp WWPN + * BIT 26-31 = + */ + __le32 host_p; + + uint8_t alternate_port_name[WWN_SIZE]; + uint8_t alternate_node_name[WWN_SIZE]; + + uint8_t boot_port_name[WWN_SIZE]; + __le16 boot_lun_number; + __le16 reserved_8; + + uint8_t alt1_boot_port_name[WWN_SIZE]; + __le16 alt1_boot_lun_number; + __le16 reserved_9; + + uint8_t alt2_boot_port_name[WWN_SIZE]; + __le16 alt2_boot_lun_number; + __le16 reserved_10; + + uint8_t alt3_boot_port_name[WWN_SIZE]; + __le16 alt3_boot_lun_number; + __le16 reserved_11; + + /* + * BIT 0 = Selective Login + * BIT 1 = Alt-Boot Enable + * BIT 2 = Reserved + * BIT 3 = Boot Order List + * BIT 4 = Reserved + * BIT 5 = Selective LUN + * BIT 6 = Reserved + * BIT 7-31 = + */ + __le32 efi_parameters; + + uint8_t reset_delay; + uint8_t reserved_12; + __le16 reserved_13; + + __le16 boot_id_number; + __le16 reserved_14; + + __le16 max_luns_per_target; + __le16 reserved_15; + + __le16 port_down_retry_count; + __le16 link_down_timeout; + + /* FCode parameters. */ + __le16 fcode_parameter; + + __le16 reserved_16[3]; + + /* Offset 352. */ + uint8_t reserved_17[4]; + __le16 reserved_18[5]; + uint8_t reserved_19[2]; + __le16 reserved_20[8]; + + /* Offset 384. */ + uint8_t reserved_21[16]; + __le16 reserved_22[3]; + + /* Offset 406 (0x196) Enhanced Features + * BIT 0 = Extended BB credits for LR + * BIT 1 = Virtual Fabric Enable + * BIT 2-5 = Distance Support if BIT 0 is on + * BIT 6 = Prefer FCP + * BIT 7 = SCM Disabled if BIT is set (1) + * BIT 8-15 = Unused + */ + uint16_t enhanced_features; + + uint16_t reserved_24[4]; + + /* Offset 416. */ + __le16 reserved_25[32]; + + /* Offset 480. */ + uint8_t model_name[16]; + + /* Offset 496. */ + __le16 feature_mask_l; + __le16 feature_mask_h; + __le16 reserved_26[2]; + + __le16 subsystem_vendor_id; + __le16 subsystem_device_id; + + __le32 checksum; +}; + +/* + * ISP Initialization Control Block. + * Little endian except where noted. + */ +#define ICB_VERSION 1 +struct init_cb_81xx { + __le16 version; + __le16 reserved_1; + + __le16 frame_payload_size; + __le16 execution_throttle; + __le16 exchange_count; + + __le16 reserved_2; + + uint8_t port_name[WWN_SIZE]; /* Big endian. */ + uint8_t node_name[WWN_SIZE]; /* Big endian. */ + + __le16 response_q_inpointer; + __le16 request_q_outpointer; + + __le16 login_retry_count; + + __le16 prio_request_q_outpointer; + + __le16 response_q_length; + __le16 request_q_length; + + __le16 reserved_3; + + __le16 prio_request_q_length; + + __le64 request_q_address __packed; + __le64 response_q_address __packed; + __le64 prio_request_q_address __packed; + + uint8_t reserved_4[8]; + + __le16 atio_q_inpointer; + __le16 atio_q_length; + __le64 atio_q_address __packed; + + __le16 interrupt_delay_timer; /* 100us increments. */ + __le16 login_timeout; + + /* + * BIT 0-3 = Reserved + * BIT 4 = Enable Target Mode + * BIT 5 = Disable Initiator Mode + * BIT 6 = Reserved + * BIT 7 = Reserved + * + * BIT 8-13 = Reserved + * BIT 14 = Node Name Option + * BIT 15-31 = Reserved + */ + __le32 firmware_options_1; + + /* + * BIT 0 = Operation Mode bit 0 + * BIT 1 = Operation Mode bit 1 + * BIT 2 = Operation Mode bit 2 + * BIT 3 = Operation Mode bit 3 + * BIT 4-7 = Reserved + * + * BIT 8 = Enable Class 2 + * BIT 9 = Enable ACK0 + * BIT 10 = Reserved + * BIT 11 = Enable FC-SP Security + * BIT 12 = FC Tape Enable + * BIT 13 = Reserved + * BIT 14 = Enable Target PRLI Control + * BIT 15-31 = Reserved + */ + __le32 firmware_options_2; + + /* + * BIT 0-3 = Reserved + * BIT 4 = FCP RSP Payload bit 0 + * BIT 5 = FCP RSP Payload bit 1 + * BIT 6 = Enable Receive Out-of-Order data frame handling + * BIT 7 = Reserved + * + * BIT 8 = Reserved + * BIT 9 = Enable Out-of-Order FCP_XFER_RDY relative offset handling + * BIT 10-16 = Reserved + * BIT 17 = Enable multiple FCFs + * BIT 18-20 = MAC addressing mode + * BIT 21-25 = Ethernet data rate + * BIT 26 = Enable ethernet header rx IOCB for ATIO q + * BIT 27 = Enable ethernet header rx IOCB for response q + * BIT 28 = SPMA selection bit 0 + * BIT 28 = SPMA selection bit 1 + * BIT 30-31 = Reserved + */ + __le32 firmware_options_3; + + uint8_t reserved_5[8]; + + uint8_t enode_mac[6]; + + uint8_t reserved_6[10]; +}; + +struct mid_init_cb_81xx { + struct init_cb_81xx init_cb; + + uint16_t count; + uint16_t options; + + struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC]; +}; + +struct ex_init_cb_81xx { + uint16_t ex_version; + uint8_t prio_fcf_matching_flags; + uint8_t reserved_1[3]; + uint16_t pri_fcf_vlan_id; + uint8_t pri_fcf_fabric_name[8]; + uint16_t reserved_2[7]; + uint8_t spma_mac_addr[6]; + uint16_t reserved_3[14]; +}; + +#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000 +#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000 +#define FARX_ACCESS_FLASH_CONF_28XX 0x7FFD0000 +#define FARX_ACCESS_FLASH_DATA_28XX 0x7F7D0000 + +/* FCP priority config defines *************************************/ +/* operations */ +#define QLFC_FCP_PRIO_DISABLE 0x0 +#define QLFC_FCP_PRIO_ENABLE 0x1 +#define QLFC_FCP_PRIO_GET_CONFIG 0x2 +#define QLFC_FCP_PRIO_SET_CONFIG 0x3 + +struct qla_fcp_prio_entry { + uint16_t flags; /* Describes parameter(s) in FCP */ + /* priority entry that are valid */ +#define FCP_PRIO_ENTRY_VALID 0x1 +#define FCP_PRIO_ENTRY_TAG_VALID 0x2 +#define FCP_PRIO_ENTRY_SPID_VALID 0x4 +#define FCP_PRIO_ENTRY_DPID_VALID 0x8 +#define FCP_PRIO_ENTRY_LUNB_VALID 0x10 +#define FCP_PRIO_ENTRY_LUNE_VALID 0x20 +#define FCP_PRIO_ENTRY_SWWN_VALID 0x40 +#define FCP_PRIO_ENTRY_DWWN_VALID 0x80 + uint8_t tag; /* Priority value */ + uint8_t reserved; /* Reserved for future use */ + uint32_t src_pid; /* Src port id. high order byte */ + /* unused; -1 (wild card) */ + uint32_t dst_pid; /* Src port id. high order byte */ + /* unused; -1 (wild card) */ + uint16_t lun_beg; /* 1st lun num of lun range. */ + /* -1 (wild card) */ + uint16_t lun_end; /* 2nd lun num of lun range. */ + /* -1 (wild card) */ + uint8_t src_wwpn[8]; /* Source WWPN: -1 (wild card) */ + uint8_t dst_wwpn[8]; /* Destination WWPN: -1 (wild card) */ +}; + +struct qla_fcp_prio_cfg { + uint8_t signature[4]; /* "HQOS" signature of config data */ + uint16_t version; /* 1: Initial version */ + uint16_t length; /* config data size in num bytes */ + uint16_t checksum; /* config data bytes checksum */ + uint16_t num_entries; /* Number of entries */ + uint16_t size_of_entry; /* Size of each entry in num bytes */ + uint8_t attributes; /* enable/disable, persistence */ +#define FCP_PRIO_ATTR_DISABLE 0x0 +#define FCP_PRIO_ATTR_ENABLE 0x1 +#define FCP_PRIO_ATTR_PERSIST 0x2 + uint8_t reserved; /* Reserved for future use */ +#define FCP_PRIO_CFG_HDR_SIZE offsetof(struct qla_fcp_prio_cfg, entry) + struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */ + uint8_t reserved2[16]; +}; + +#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/ + +/* 25XX Support ****************************************************/ +#define FA_FCP_PRIO0_ADDR_25 0x3C000 +#define FA_FCP_PRIO1_ADDR_25 0x3E000 + +/* 81XX Flash locations -- occupies second 2MB region. */ +#define FA_BOOT_CODE_ADDR_81 0x80000 +#define FA_RISC_CODE_ADDR_81 0xA0000 +#define FA_FW_AREA_ADDR_81 0xC0000 +#define FA_VPD_NVRAM_ADDR_81 0xD0000 +#define FA_VPD0_ADDR_81 0xD0000 +#define FA_VPD1_ADDR_81 0xD0400 +#define FA_NVRAM0_ADDR_81 0xD0080 +#define FA_NVRAM1_ADDR_81 0xD0180 +#define FA_FEATURE_ADDR_81 0xD4000 +#define FA_FLASH_DESCR_ADDR_81 0xD8000 +#define FA_FLASH_LAYOUT_ADDR_81 0xD8400 +#define FA_HW_EVENT0_ADDR_81 0xDC000 +#define FA_HW_EVENT1_ADDR_81 0xDC400 +#define FA_NPIV_CONF0_ADDR_81 0xD1000 +#define FA_NPIV_CONF1_ADDR_81 0xD2000 + +/* 83XX Flash locations -- occupies second 8MB region. */ +#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4) +#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4) + +#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196 + +#endif diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h new file mode 100644 index 000000000..09cb94136 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -0,0 +1,1021 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#ifndef __QLA_GBL_H +#define __QLA_GBL_H + +#include + +/* + * Global Function Prototypes in qla_init.c source file. + */ +extern int qla2x00_initialize_adapter(scsi_qla_host_t *); +extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport); + +extern int qla2100_pci_config(struct scsi_qla_host *); +extern int qla2300_pci_config(struct scsi_qla_host *); +extern int qla24xx_pci_config(scsi_qla_host_t *); +extern int qla25xx_pci_config(scsi_qla_host_t *); +extern int qla2x00_reset_chip(struct scsi_qla_host *); +extern int qla24xx_reset_chip(struct scsi_qla_host *); +extern int qla2x00_chip_diag(struct scsi_qla_host *); +extern int qla24xx_chip_diag(struct scsi_qla_host *); +extern void qla2x00_config_rings(struct scsi_qla_host *); +extern void qla24xx_config_rings(struct scsi_qla_host *); +extern int qla2x00_reset_adapter(struct scsi_qla_host *); +extern int qla24xx_reset_adapter(struct scsi_qla_host *); +extern int qla2x00_nvram_config(struct scsi_qla_host *); +extern int qla24xx_nvram_config(struct scsi_qla_host *); +extern int qla81xx_nvram_config(struct scsi_qla_host *); +extern void qla2x00_update_fw_options(struct scsi_qla_host *); +extern void qla24xx_update_fw_options(scsi_qla_host_t *); + +extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); +extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); +extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *); + +extern int qla2x00_perform_loop_resync(scsi_qla_host_t *); +extern int qla2x00_loop_resync(scsi_qla_host_t *); +extern void qla2x00_clear_loop_id(fc_port_t *fcport); + +extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); +extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); + +extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); +extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool); +extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, + struct els_plogi *els_plogi); + +extern int qla2x00_abort_isp(scsi_qla_host_t *); +extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); +extern void qla2x00_quiesce_io(scsi_qla_host_t *); + +extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); +void qla_register_fcport_fn(struct work_struct *); +extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); +extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); + +extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *); + +extern void qla84xx_put_chip(struct scsi_qla_host *); + +extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *); +extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *); +extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t); +struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, + enum qla_work_type); +extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); +int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); +extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *); +extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); +extern int qla24xx_async_abort_cmd(srb_t *, bool); + +extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state); +extern fc_port_t * +qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); + +extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t); +extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *); +extern void qla83xx_idc_audit(scsi_qla_host_t *, int); +extern int qla83xx_nic_core_reset(scsi_qla_host_t *); +extern void qla83xx_reset_ownership(scsi_qla_host_t *); +extern int qla2xxx_mctp_dump(scsi_qla_host_t *); + +extern int +qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); +extern int qla2x00_init_rings(scsi_qla_host_t *); +extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, + int, int, bool); +extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); +void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea); +void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, + struct event_arg *ea); +void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, + struct event_arg *ea); +int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*, + void *, u8); +int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); +int qla24xx_detect_sfp(scsi_qla_host_t *); +int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); + +extern void qla28xx_get_aux_images(struct scsi_qla_host *, + struct active_regions *); +extern void qla27xx_get_active_image(struct scsi_qla_host *, + struct active_regions *); + +void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *, + fc_port_t *, uint16_t *); +int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); +void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); +int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *); +void qla_rscn_replay(fc_port_t *fcport); +void qla24xx_free_purex_item(struct purex_item *item); +extern bool qla24xx_risc_firmware_invalid(uint32_t *); +void qla_init_iocb_limit(scsi_qla_host_t *); + +void qla_edif_list_del(fc_port_t *fcport); +void qla_edif_sadb_release(struct qla_hw_data *ha); +int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha); +void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha); +void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + srb_t *sp, struct sts_entry_24xx *sts24); +void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, + struct ctio7_from_24xx *ctio); +void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport); +int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob); +void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess); +void qla_edif_clear_appdata(struct scsi_qla_host *vha, + struct fc_port *fcport); +const char *sc_to_str(uint16_t cmd); +void qla_adjust_iocb_limit(scsi_qla_host_t *vha); + +/* + * Global Data in qla_os.c source file. + */ +extern char qla2x00_version_str[]; + +extern struct kmem_cache *srb_cachep; +extern struct kmem_cache *qla_tgt_plogi_cachep; + +extern int ql2xlogintimeout; +extern int qlport_down_retry; +extern int ql2xplogiabsentdevice; +extern int ql2xloginretrycount; +extern int ql2xfdmienable; +extern int ql2xrdpenable; +extern int ql2xsmartsan; +extern int ql2xallocfwdump; +extern int ql2xextended_error_logging; +extern int ql2xextended_error_logging_ktrace; +extern int ql2xiidmaenable; +extern int ql2xmqsupport; +extern int ql2xfwloadbin; +extern int ql2xetsenable; +extern int ql2xshiftctondsd; +extern int ql2xdbwr; +extern int ql2xasynctmfenable; +extern int ql2xgffidenable; +extern int ql2xenabledif; +extern int ql2xenablehba_err_chk; +extern int ql2xdontresethba; +extern uint64_t ql2xmaxlun; +extern int ql2xmdcapmask; +extern int ql2xmdenable; +extern int ql2xexlogins; +extern int ql2xexchoffld; +extern int ql2xiniexchg; +extern int ql2xfwholdabts; +extern int ql2xmvasynctoatio; +extern int ql2xuctrlirq; +extern int ql2xnvmeenable; +extern int ql2xautodetectsfp; +extern int ql2xenablemsix; +extern int qla2xuseresexchforels; +extern int ql2xdifbundlinginternalbuffers; +extern int ql2xfulldump_on_mpifail; +extern int ql2xsecenable; +extern int ql2xenforce_iocb_limit; +extern int ql2xabts_wait_nvme; +extern u32 ql2xnvme_queues; +extern int ql2xfc2target; + +extern int qla2x00_loop_reset(scsi_qla_host_t *); +extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); +extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum + fc_host_event_code, u32); +extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); +extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *); +extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *); +extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *); +extern void qla2x00_free_exchoffld_buffer(struct qla_hw_data *); + +extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); + +extern struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *, + struct qla_hw_data *); +extern void qla2x00_relogin(struct scsi_qla_host *); +extern void qla2x00_do_work(struct scsi_qla_host *); +extern void qla2x00_free_fcports(struct scsi_qla_host *); +extern void qla2x00_free_fcport(fc_port_t *); + +extern void qla83xx_schedule_work(scsi_qla_host_t *, int); +extern void qla83xx_service_idc_aen(struct work_struct *); +extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *); +extern void qla83xx_idc_state_handler_work(struct work_struct *); +extern void qla83xx_nic_core_reset_work(struct work_struct *); + +extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t); +extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t); +extern int qla83xx_idc_state_handler(scsi_qla_host_t *); +extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha); +extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha); +extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha); +extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha); + +extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); +extern void qla2x00_disable_board_on_pci_error(struct work_struct *); +extern void qla2x00_sp_compl(srb_t *sp, int); +extern void qla2xxx_qpair_sp_free_dma(srb_t *sp); +extern void qla2xxx_qpair_sp_compl(srb_t *sp, int); +extern void qla24xx_sched_upd_fcport(fc_port_t *); +int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_post_relogin_work(struct scsi_qla_host *vha); +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); +void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, + struct purex_item *pkt); +void qla_pci_set_eeh_busy(struct scsi_qla_host *); +void qla_schedule_eeh_work(struct scsi_qla_host *); +struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, + int index, int dir); + +/* + * Global Functions in qla_mid.c source file. + */ +extern void qla_update_vp_map(struct scsi_qla_host *, int); +extern struct scsi_host_template qla2xxx_driver_template; +extern struct scsi_transport_template *qla2xxx_transport_vport_template; +extern void qla2x00_timer(struct timer_list *); +extern void qla2x00_start_timer(scsi_qla_host_t *, unsigned long); +extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *); +extern int qla24xx_disable_vp (scsi_qla_host_t *); +extern int qla24xx_enable_vp (scsi_qla_host_t *); +extern int qla24xx_control_vp(scsi_qla_host_t *, int ); +extern int qla24xx_modify_vp_config(scsi_qla_host_t *); +extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t); +extern void qla2x00_vp_stop_timer(scsi_qla_host_t *); +extern int qla24xx_configure_vhba (scsi_qla_host_t *); +extern void qla24xx_report_id_acquisition(scsi_qla_host_t *, + struct vp_rpt_id_entry_24xx *); +extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); +extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); +extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *); + +extern void qla2x00_sp_free_dma(srb_t *sp); + +extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int); +extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *); +extern int qla24xx_async_abort_cmd(srb_t *, bool); + +extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); + +extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); +extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *); +extern int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *); + +extern void qla2xxx_wake_dpc(struct scsi_qla_host *); +extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *); +extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *, + uint16_t *); +extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); +void qla_adjust_buf(struct scsi_qla_host *); + +/* + * Global Function Prototypes in qla_iocb.c source file. + */ +void qla_els_pt_iocb(struct scsi_qla_host *vha, + struct els_entry_24xx *pkt, struct qla_els_pt_arg *a); +cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, + struct req_que *que); +extern uint16_t qla2x00_calc_iocbs_32(uint16_t); +extern uint16_t qla2x00_calc_iocbs_64(uint16_t); +extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); +extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); +extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, + uint16_t, struct req_que *); +extern uint32_t qla2xxx_get_next_handle(struct req_que *req); +extern int qla2x00_start_scsi(srb_t *sp); +extern int qla24xx_start_scsi(srb_t *sp); +int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *, + uint16_t, uint64_t, uint8_t); +extern int qla2x00_start_sp(srb_t *); +extern int qla24xx_dif_start_scsi(srb_t *); +extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); +extern int qla2xxx_dif_start_scsi_mq(srb_t *); +extern void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, + void (*done)(struct srb *, int)); +extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); + +extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); +extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *); +extern int qla2x00_issue_marker(scsi_qla_host_t *, int); +extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, + struct dsd64 *, uint16_t, struct qla_tc_param *); +extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, + struct dsd64 *, uint16_t, struct qla_tc_param *); +extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, + struct dsd64 *, uint16_t, struct qla_tgt_cmd *); +extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); +extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); +extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, + struct qla_work_evt *e); +void qla2x00_sp_release(struct kref *kref); +void qla2x00_els_dcmd2_iocb_timeout(void *data); + +/* + * Global Function Prototypes in qla_mbx.c source file. + */ +extern int +qla2x00_load_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); + +extern int +qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); + +extern int +qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); + +extern int +qla2x00_get_fw_version(scsi_qla_host_t *); + +extern int +qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *); + +extern int +qla2x00_set_fw_options(scsi_qla_host_t *, uint16_t *); + +extern int +qla2x00_mbx_reg_test(scsi_qla_host_t *); + +extern int +qla2x00_verify_checksum(scsi_qla_host_t *, uint32_t); + +extern int +qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); + +extern int +qla2x00_abort_command(srb_t *); + +extern int +qla2x00_abort_target(struct fc_port *, uint64_t, int); + +extern int +qla2x00_lun_reset(struct fc_port *, uint64_t, int); + +extern int +qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, + uint8_t *, uint16_t *, uint16_t *); + +extern int +qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *); + +extern int +qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); + +extern int +qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); + +extern int +qla24xx_get_port_database(scsi_qla_host_t *, u16, struct port_database_24xx *); + +extern int +qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *); + +extern int +qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t); + +extern int +qla24xx_link_initialize(scsi_qla_host_t *); + +extern int +qla2x00_lip_reset(scsi_qla_host_t *); + +extern int +qla2x00_send_sns(scsi_qla_host_t *, dma_addr_t, uint16_t, size_t); + +extern int +qla2x00_login_fabric(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t, + uint16_t *, uint8_t); +extern int +qla24xx_login_fabric(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t, + uint16_t *, uint8_t); + +extern int +qla2x00_login_local_device(scsi_qla_host_t *, fc_port_t *, uint16_t *, + uint8_t); + +extern int +qla2x00_fabric_logout(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t); + +extern int +qla24xx_fabric_logout(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t); + +extern int +qla2x00_full_login_lip(scsi_qla_host_t *ha); + +extern int +qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *); + +extern int +qla2x00_get_resource_cnts(scsi_qla_host_t *); + +extern int +qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map, + u8 *num_entries); + +extern int +qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, + dma_addr_t); + +extern int +qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, + dma_addr_t, uint16_t); + +extern int qla24xx_abort_command(srb_t *); +extern int qla24xx_async_abort_command(srb_t *); +extern int +qla24xx_abort_target(struct fc_port *, uint64_t, int); +extern int +qla24xx_lun_reset(struct fc_port *, uint64_t, int); +extern int +qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *, unsigned int, + uint64_t, enum nexus_wait_type); +extern int +qla2x00_system_error(scsi_qla_host_t *); + +extern int +qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t); +extern int +qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *); + +extern int +qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t); +extern int +qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *); + +extern int +qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); + +extern int +qla2x00_stop_firmware(scsi_qla_host_t *); + +extern int +qla2x00_enable_eft_trace(scsi_qla_host_t *, dma_addr_t, uint16_t); +extern int +qla2x00_disable_eft_trace(scsi_qla_host_t *); + +extern int +qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *, + uint32_t *); + +extern int +qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); + +extern int +qla82xx_set_driver_version(scsi_qla_host_t *, char *); + +extern int +qla25xx_set_driver_version(scsi_qla_host_t *, char *); + +extern int +qla25xx_set_els_cmds_supported(scsi_qla_host_t *); + +extern int +qla24xx_get_buffer_credits(scsi_qla_host_t *, struct buffer_credit_24xx *, + dma_addr_t); + +extern int +qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, + uint16_t, uint16_t, uint16_t, uint16_t); + +extern int +qla2x00_write_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, + uint16_t, uint16_t, uint16_t, uint16_t); + +extern int +qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); + +extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); + +extern int qla81xx_idc_ack(scsi_qla_host_t *, uint16_t *); + +extern int +qla81xx_fac_get_sector_size(scsi_qla_host_t *, uint32_t *); + +extern int +qla81xx_fac_do_write_enable(scsi_qla_host_t *, int); + +extern int +qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); + +extern int qla81xx_fac_semaphore_access(scsi_qla_host_t *, int); + +extern int +qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *); + +extern int +qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t); + +extern int +qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); + +extern int +qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); + +extern int +qla81xx_write_mpi_register(scsi_qla_host_t *, uint16_t *); +extern int qla2x00_get_data_rate(scsi_qla_host_t *); +extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t, + uint16_t *); +extern int +qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *); + +extern int +qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *); + +extern int +qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *); + +extern int +qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); + +extern int +qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); + +extern int +qla26xx_dport_diagnostics_v2(scsi_qla_host_t *, + struct qla_dport_diag_v2 *, mbx_cmd_t *); + +int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); +int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, + uint16_t *); +int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, + struct port_database_24xx *); +int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t, + void *, uint16_t); + +extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *); +extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t); +int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int); + +extern int qla28xx_secure_flash_update(scsi_qla_host_t *, uint16_t, uint16_t, + uint32_t, dma_addr_t, uint32_t); + +extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t, + uint32_t *); +extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t, + uint32_t); +void qla_no_op_mb(struct scsi_qla_host *vha); + +/* + * Global Function Prototypes in qla_isr.c source file. + */ +extern irqreturn_t qla2100_intr_handler(int, void *); +extern irqreturn_t qla2300_intr_handler(int, void *); +extern irqreturn_t qla24xx_intr_handler(int, void *); +extern void qla2x00_process_response_queue(struct rsp_que *); +extern void +qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *); +extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); +extern void qla2x00_free_irqs(scsi_qla_host_t *); + +extern int qla2x00_get_data_rate(scsi_qla_host_t *); +extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t); +extern srb_t * +qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *, + void *); +extern void +qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, + uint32_t); +extern irqreturn_t +qla2xxx_msix_rsp_q(int irq, void *dev_id); +extern irqreturn_t +qla2xxx_msix_rsp_q_hs(int irq, void *dev_id); +fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t); +fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8); +fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); +void qla24xx_queue_purex_item(scsi_qla_host_t *, struct purex_item *, + void (*process_item)(struct scsi_qla_host *, + struct purex_item *)); +void __qla_consume_iocb(struct scsi_qla_host *, void **, struct rsp_que **); +void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp); + +/* + * Global Function Prototypes in qla_sup.c source file. + */ +extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, + uint32_t, uint32_t); +extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern int qla2x00_write_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern int qla24xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern int qla25xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); + +extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); +bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t); +bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t); + +extern int qla2x00_beacon_on(struct scsi_qla_host *); +extern int qla2x00_beacon_off(struct scsi_qla_host *); +extern void qla2x00_beacon_blink(struct scsi_qla_host *); +extern int qla24xx_beacon_on(struct scsi_qla_host *); +extern int qla24xx_beacon_off(struct scsi_qla_host *); +extern void qla24xx_beacon_blink(struct scsi_qla_host *); +extern void qla83xx_beacon_blink(struct scsi_qla_host *); +extern int qla82xx_beacon_on(struct scsi_qla_host *); +extern int qla82xx_beacon_off(struct scsi_qla_host *); +extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t); +extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *); +extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *); +extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t, + uint32_t, uint16_t *); + +extern void *qla2x00_read_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); +extern int qla2x00_write_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); +extern void *qla24xx_read_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); +extern int qla24xx_write_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); +extern void *qla25xx_read_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); +extern void *qla8044_read_optrom_data(struct scsi_qla_host *, + void *, uint32_t, uint32_t); +extern void qla8044_watchdog(struct scsi_qla_host *vha); + +extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); +extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); +extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *); + +extern int qla2xxx_get_flash_info(scsi_qla_host_t *); +extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); + +extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *); +extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *); +extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job); +int qla2x00_sys_ld_info(struct bsg_job *bsg_job); +int __qla_copy_purex_to_buffer(struct scsi_qla_host *, void **, + struct rsp_que **, u8 *, u32); +struct purex_item *qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp, bool is_purls, bool byte_order); +int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in, + uint16_t *mbx_out); + +/* + * Global Function Prototypes in qla_dbg.c source file. + */ +void qla2xxx_dump_fw(scsi_qla_host_t *vha); +void qla2100_fw_dump(scsi_qla_host_t *vha); +void qla2300_fw_dump(scsi_qla_host_t *vha); +void qla24xx_fw_dump(scsi_qla_host_t *vha); +void qla25xx_fw_dump(scsi_qla_host_t *vha); +void qla81xx_fw_dump(scsi_qla_host_t *vha); +void qla82xx_fw_dump(scsi_qla_host_t *vha); +void qla8044_fw_dump(scsi_qla_host_t *vha); + +void qla27xx_fwdump(scsi_qla_host_t *vha); +extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int); +extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *); +extern int qla27xx_fwdt_template_valid(void *); +extern ulong qla27xx_fwdt_template_size(void *); + +extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); +extern void ql_dump_regs(uint, scsi_qla_host_t *, uint); +extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, const void *, uint); +/* + * Global Function Prototypes in qla_gs.c source file. + */ +extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); +extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); +extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *); +extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *); +extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *); +extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *); +extern void qla2x00_gff_id(scsi_qla_host_t *, sw_info_t *); +extern int qla2x00_rft_id(scsi_qla_host_t *); +extern int qla2x00_rff_id(scsi_qla_host_t *, u8); +extern int qla2x00_rnn_id(scsi_qla_host_t *); +extern int qla2x00_rsnn_nn(scsi_qla_host_t *); +extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); +extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); +extern int qla2x00_fdmi_register(scsi_qla_host_t *); +extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); +extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); +extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); +extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, + struct ct_sns_rsp *, const char *); +extern void qla2x00_async_iocb_timeout(void *data); + +int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); +void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *); +int qla2x00_mgmt_svr_login(scsi_qla_host_t *); +int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool); +int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *); +void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *); +void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *); +int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *); +void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *); +void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *); +void qla_scan_work_fn(struct work_struct *); +uint qla25xx_fdmi_port_speed_capability(struct qla_hw_data *); +uint qla25xx_fdmi_port_speed_currently(struct qla_hw_data *); + +/* + * Global Function Prototypes in qla_attr.c source file. + */ +struct device_attribute; +extern const struct attribute_group *qla2x00_host_groups[]; +struct fc_function_template; +extern struct fc_function_template qla2xxx_transport_functions; +extern struct fc_function_template qla2xxx_transport_vport_functions; +extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); +extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool); +extern void qla2x00_init_host_attr(scsi_qla_host_t *); +extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); +extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); +extern int qla2x00_echo_test(scsi_qla_host_t *, + struct msg_echo_lb *, uint16_t *); +extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); +extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *, + struct qla_fcp_prio_cfg *, uint8_t); +/* + * Global Function Prototypes in qla_dfs.c source file. + */ +extern int qla2x00_dfs_setup(scsi_qla_host_t *); +extern int qla2x00_dfs_remove(scsi_qla_host_t *); + +/* Globa function prototypes for multi-q */ +extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *, + struct qla_msix_entry *, int); +extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); +extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); +extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, + uint16_t, int, uint8_t, bool); +extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, + uint16_t, struct qla_qpair *, bool); + +extern void qla2x00_init_response_q_entries(struct rsp_que *); +extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); +extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); +extern int qla25xx_delete_queues(struct scsi_qla_host *); + +/* qlafx00 related functions */ +extern int qlafx00_pci_config(struct scsi_qla_host *); +extern int qlafx00_initialize_adapter(struct scsi_qla_host *); +extern int qlafx00_soft_reset(scsi_qla_host_t *); +extern int qlafx00_chip_diag(scsi_qla_host_t *); +extern void qlafx00_config_rings(struct scsi_qla_host *); +extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *, size_t); +extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t); +extern irqreturn_t qlafx00_intr_handler(int, void *); +extern void qlafx00_enable_intrs(struct qla_hw_data *); +extern void qlafx00_disable_intrs(struct qla_hw_data *); +extern int qlafx00_abort_target(fc_port_t *, uint64_t, int); +extern int qlafx00_lun_reset(fc_port_t *, uint64_t, int); +extern int qlafx00_start_scsi(srb_t *); +extern int qlafx00_abort_isp(scsi_qla_host_t *); +extern int qlafx00_iospace_config(struct qla_hw_data *); +extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t); +extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int); +extern int qlafx00_fw_ready(scsi_qla_host_t *); +extern int qlafx00_configure_devices(scsi_qla_host_t *); +extern int qlafx00_reset_initialize(scsi_qla_host_t *); +extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t); +extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *); +extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t, + uint32_t *, int); +extern uint32_t qlafx00_fw_state_show(struct device *, + struct device_attribute *, char *); +extern void qlafx00_get_host_speed(struct Scsi_Host *); +extern void qlafx00_init_response_q_entries(struct rsp_que *); + +extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *); +extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *); +extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *); +extern void qlafx00_timer_routine(scsi_qla_host_t *); +extern int qlafx00_rescan_isp(scsi_qla_host_t *); + +/* qla82xx related functions */ + +/* PCI related functions */ +extern int qla82xx_pci_config(struct scsi_qla_host *); +extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); +extern int qla82xx_pci_region_offset(struct pci_dev *, int); +extern int qla82xx_iospace_config(struct qla_hw_data *); + +/* Initialization related functions */ +extern int qla82xx_reset_chip(struct scsi_qla_host *); +extern void qla82xx_config_rings(struct scsi_qla_host *); +extern void qla82xx_watchdog(scsi_qla_host_t *); +extern int qla82xx_start_firmware(scsi_qla_host_t *); + +/* Firmware and flash related functions */ +extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); +extern void *qla82xx_read_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); +extern int qla82xx_write_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); + +/* Mailbox related functions */ +extern int qla82xx_abort_isp(scsi_qla_host_t *); +extern int qla82xx_restart_isp(scsi_qla_host_t *); + +/* IOCB related functions */ +extern int qla82xx_start_scsi(srb_t *); +extern void qla2x00_sp_free(srb_t *sp); +extern void qla2x00_sp_timeout(struct timer_list *); +extern void qla2x00_bsg_job_done(srb_t *sp, int); +extern void qla2x00_bsg_sp_free(srb_t *sp); +extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); + +/* Interrupt related */ +extern irqreturn_t qla82xx_intr_handler(int, void *); +extern irqreturn_t qla82xx_msix_default(int, void *); +extern irqreturn_t qla82xx_msix_rsp_q(int, void *); +extern void qla82xx_enable_intrs(struct qla_hw_data *); +extern void qla82xx_disable_intrs(struct qla_hw_data *); +extern void qla82xx_poll(int, void *); +extern void qla82xx_init_flags(struct qla_hw_data *); + +/* ISP 8021 hardware related */ +extern void qla82xx_set_drv_active(scsi_qla_host_t *); +extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); +extern int qla82xx_rd_32(struct qla_hw_data *, ulong); + +/* ISP 8021 IDC */ +extern void qla82xx_clear_drv_active(struct qla_hw_data *); +extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t); +extern int qla82xx_idc_lock(struct qla_hw_data *); +extern void qla82xx_idc_unlock(struct qla_hw_data *); +extern int qla82xx_device_state_handler(scsi_qla_host_t *); +extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *); +extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); + +extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, size_t, + const char *); +extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); +extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); +extern void qla82xx_start_iocbs(scsi_qla_host_t *); +extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); +extern int qla82xx_check_md_needed(scsi_qla_host_t *); +extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); +extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *); +extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *); +extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); +extern const char *qdev_state(uint32_t); +extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *); +extern int qla82xx_read_temperature(scsi_qla_host_t *); +extern int qla8044_read_temperature(scsi_qla_host_t *); +extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int); +extern int ql26xx_led_config(scsi_qla_host_t *, uint16_t, uint16_t *); + +/* BSG related functions */ +extern int qla24xx_bsg_request(struct bsg_job *); +extern int qla24xx_bsg_timeout(struct bsg_job *); +extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t); +extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, + dma_addr_t, size_t, uint32_t); +extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, + uint16_t *, uint16_t *); +extern int qla24xx_sadb_update(struct bsg_job *bsg_job); +extern int qla_post_sa_replace_work(struct scsi_qla_host *vha, + fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl); + +/* 83xx related functions */ +void qla83xx_fw_dump(scsi_qla_host_t *vha); + +/* Minidump related functions */ +extern int qla82xx_md_get_template_size(scsi_qla_host_t *); +extern int qla82xx_md_get_template(scsi_qla_host_t *); +extern int qla82xx_md_alloc(scsi_qla_host_t *); +extern void qla82xx_md_free(scsi_qla_host_t *); +extern int qla82xx_md_collect(scsi_qla_host_t *); +extern void qla82xx_md_prep(scsi_qla_host_t *); +extern void qla82xx_set_reset_owner(scsi_qla_host_t *); +extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha); + +/* Function declarations for ISP8044 */ +extern int qla8044_idc_lock(struct qla_hw_data *ha); +extern void qla8044_idc_unlock(struct qla_hw_data *ha); +extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr); +extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val); +extern void qla8044_read_reset_template(struct scsi_qla_host *ha); +extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha); +extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg); +extern void qla8044_wr_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg, const uint32_t value); +extern int qla8044_device_state_handler(struct scsi_qla_host *vha); +extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha); +extern void qla8044_clear_drv_active(struct qla_hw_data *); +void qla8044_get_minidump(struct scsi_qla_host *vha); +int qla8044_collect_md_data(struct scsi_qla_host *vha); +extern int qla8044_md_get_template(scsi_qla_host_t *); +extern int qla8044_write_optrom_data(struct scsi_qla_host *, void *, + uint32_t, uint32_t); +extern irqreturn_t qla8044_intr_handler(int, void *); +extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t); +extern int qla8044_abort_isp(scsi_qla_host_t *); +extern int qla8044_check_fw_alive(struct scsi_qla_host *); +extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *, + uint16_t *); +extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr); +extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *); +extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *); +extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *, + response_t *); + +struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *); +void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *, + struct fc_port *, enum qlt_plogi_link_t); +void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *); +extern void qlt_schedule_sess_for_deletion(struct fc_port *); +extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, + uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); +void qla24xx_delete_sess_fn(struct work_struct *); +void qlt_unknown_atio_work_fn(struct work_struct *); +void qla_update_host_map(struct scsi_qla_host *, port_id_t); +void qla_remove_hostmap(struct qla_hw_data *ha); +void qlt_clr_qp_table(struct scsi_qla_host *vha); +void qlt_set_mode(struct scsi_qla_host *); +int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode); +extern void qla24xx_process_purex_list(struct purex_list *); +extern void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp); +extern void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp); +extern void qla_wait_nvme_release_cmd_kref(srb_t *sp); +extern void qla_nvme_abort_set_option + (struct abort_entry_24xx *abt, srb_t *sp); +extern void qla_nvme_abort_process_comp_status + (struct abort_entry_24xx *abt, srb_t *sp); +struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, + uint16_t vp_idx); + +/* nvme.c */ +void qla_nvme_unregister_remote_port(struct fc_port *fcport); + +/* qla_edif.c */ +fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id); +void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2, + fc_port_t *fcport); +void qla_edb_stop(scsi_qla_host_t *vha); +int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job); +void qla_enode_init(scsi_qla_host_t *vha); +void qla_enode_stop(scsi_qla_host_t *vha); +void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport); +void qla_edb_init(scsi_qla_host_t *vha); +void qla_edif_timer(scsi_qla_host_t *vha); +int qla28xx_start_scsi_edif(srb_t *sp); +void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb); +void qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb); +void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp); +void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct sa_update_28xx *pkt); +void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea); + +#define QLA2XX_HW_ERROR BIT_0 +#define QLA2XX_SHT_LNK_DWN BIT_1 +#define QLA2XX_INT_ERR BIT_2 +#define QLA2XX_CMD_TIMEOUT BIT_3 +#define QLA2XX_RESET_CMD_ERR BIT_4 +#define QLA2XX_TGT_SHT_LNK_DOWN BIT_17 + +#define QLA2XX_MAX_LINK_DOWN_TIME 100 + +int qla2xxx_start_stats(struct Scsi_Host *shost, u32 flags); +int qla2xxx_stop_stats(struct Scsi_Host *shost, u32 flags); +int qla2xxx_reset_stats(struct Scsi_Host *shost, u32 flags); + +int qla2xxx_get_ini_stats(struct Scsi_Host *shost, u32 flags, void *data, u64 size); +int qla2xxx_get_tgt_stats(struct Scsi_Host *shost, u32 flags, + struct fc_rport *rport, void *data, u64 size); +int qla2xxx_disable_port(struct Scsi_Host *shost); +int qla2xxx_enable_port(struct Scsi_Host *shost); + +uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha); +uint64_t qla2x00_count_set_bits(u32 num); +int qla_create_buf_pool(struct scsi_qla_host *, struct qla_qpair *); +void qla_free_buf_pool(struct qla_qpair *); +int qla_get_buf(struct scsi_qla_host *, struct qla_qpair *, struct qla_buf_dsc *); +void qla_put_buf(struct qla_qpair *, struct qla_buf_dsc *); +#endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c new file mode 100644 index 000000000..1cf9d200d --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -0,0 +1,4022 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_target.h" +#include + +static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); +static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); +static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *); +static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *); +static int qla2x00_sns_rft_id(scsi_qla_host_t *); +static int qla2x00_sns_rnn_id(scsi_qla_host_t *); +static int qla_async_rftid(scsi_qla_host_t *, port_id_t *); +static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8); +static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*); +static int qla_async_rsnn_nn(scsi_qla_host_t *); + + + +/** + * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query. + * @vha: HA context + * @arg: CT arguments + * + * Returns a pointer to the @vha's ms_iocb. + */ +void * +qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) +{ + struct qla_hw_data *ha = vha->hw; + ms_iocb_entry_t *ms_pkt; + + ms_pkt = (ms_iocb_entry_t *)arg->iocb; + memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); + + ms_pkt->entry_type = MS_IOCB_TYPE; + ms_pkt->entry_count = 1; + SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); + ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); + ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); + ms_pkt->cmd_dsd_count = cpu_to_le16(1); + ms_pkt->total_dsd_count = cpu_to_le16(2); + ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); + ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); + + put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address); + ms_pkt->req_dsd.length = ms_pkt->req_bytecount; + + put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address); + ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; + + vha->qla_stats.control_requests++; + + return (ms_pkt); +} + +/** + * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query. + * @vha: HA context + * @arg: CT arguments + * + * Returns a pointer to the @ha's ms_iocb. + */ +void * +qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) +{ + struct qla_hw_data *ha = vha->hw; + struct ct_entry_24xx *ct_pkt; + + ct_pkt = (struct ct_entry_24xx *)arg->iocb; + memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); + + ct_pkt->entry_type = CT_IOCB_TYPE; + ct_pkt->entry_count = 1; + ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); + ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); + ct_pkt->cmd_dsd_count = cpu_to_le16(1); + ct_pkt->rsp_dsd_count = cpu_to_le16(1); + ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); + ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); + + put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address); + ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; + + put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address); + ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; + ct_pkt->vp_index = vha->vp_idx; + + vha->qla_stats.control_requests++; + + return (ct_pkt); +} + +/** + * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query. + * @p: CT request buffer + * @cmd: GS command + * @rsp_size: response size in bytes + * + * Returns a pointer to the intitialized @ct_req. + */ +static inline struct ct_sns_req * +qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) +{ + memset(p, 0, sizeof(struct ct_sns_pkt)); + + p->p.req.header.revision = 0x01; + p->p.req.header.gs_type = 0xFC; + p->p.req.header.gs_subtype = 0x02; + p->p.req.command = cpu_to_be16(cmd); + p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); + + return &p->p.req; +} + +int +qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, + struct ct_sns_rsp *ct_rsp, const char *routine) +{ + int rval; + uint16_t comp_status; + struct qla_hw_data *ha = vha->hw; + bool lid_is_sns = false; + + rval = QLA_FUNCTION_FAILED; + if (ms_pkt->entry_status != 0) { + ql_dbg(ql_dbg_disc, vha, 0x2031, + "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", + routine, ms_pkt->entry_status, vha->d_id.b.domain, + vha->d_id.b.area, vha->d_id.b.al_pa); + } else { + if (IS_FWI2_CAPABLE(ha)) + comp_status = le16_to_cpu( + ((struct ct_entry_24xx *)ms_pkt)->comp_status); + else + comp_status = le16_to_cpu(ms_pkt->status); + switch (comp_status) { + case CS_COMPLETE: + case CS_DATA_UNDERRUN: + case CS_DATA_OVERRUN: /* Overrun? */ + if (ct_rsp->header.response != + cpu_to_be16(CT_ACCEPT_RESPONSE)) { + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, + "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n", + routine, vha->d_id.b.domain, + vha->d_id.b.area, vha->d_id.b.al_pa, + comp_status, ct_rsp->header.response); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, + 0x2078, ct_rsp, + offsetof(typeof(*ct_rsp), rsp)); + rval = QLA_INVALID_COMMAND; + } else + rval = QLA_SUCCESS; + break; + case CS_PORT_LOGGED_OUT: + if (IS_FWI2_CAPABLE(ha)) { + if (le16_to_cpu(ms_pkt->loop_id.extended) == + NPH_SNS) + lid_is_sns = true; + } else { + if (le16_to_cpu(ms_pkt->loop_id.extended) == + SIMPLE_NAME_SERVER) + lid_is_sns = true; + } + if (lid_is_sns) { + ql_dbg(ql_dbg_async, vha, 0x502b, + "%s failed, Name server has logged out", + routine); + rval = QLA_NOT_LOGGED_IN; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + } + break; + case CS_TIMEOUT: + rval = QLA_FUNCTION_TIMEOUT; + fallthrough; + default: + ql_dbg(ql_dbg_disc, vha, 0x2033, + "%s failed, completion status (%x) on port_id: " + "%02x%02x%02x.\n", routine, comp_status, + vha->d_id.b.domain, vha->d_id.b.area, + vha->d_id.b.al_pa); + break; + } + } + return rval; +} + +/** + * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command. + * @vha: HA context + * @fcport: fcport entry to updated + * + * Returns 0 on success. + */ +int +qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval; + + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return qla2x00_sns_ga_nxt(vha, fcport); + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GA_NXT_REQ_SIZE; + arg.rsp_size = GA_NXT_RSP_SIZE; + arg.nport_handle = NPH_SNS; + + /* Issue GA_NXT */ + /* Prepare common MS IOCB */ + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, + GA_NXT_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare CT arguments -- port_id */ + ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2062, + "GA_NXT issue IOCB failed (%d).\n", rval); + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != + QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + } else { + /* Populate fc_port_t entry. */ + fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id); + + memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, + WWN_SIZE); + memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, + WWN_SIZE); + + fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? + FS_FC4TYPE_FCP : FC4_TYPE_OTHER; + + if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && + ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) + fcport->d_id.b.domain = 0xf0; + + ql_dbg(ql_dbg_disc, vha, 0x2063, + "GA_NXT entry - nn %8phN pn %8phN " + "port_id=%02x%02x%02x.\n", + fcport->node_name, fcport->port_name, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + } + + return (rval); +} + +static inline int +qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha) +{ + return vha->hw->max_fibre_devices * 4 + 16; +} + +/** + * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. + * @vha: HA context + * @list: switch info entries to populate + * + * NOTE: Non-Nx_Ports are not requested. + * + * Returns 0 on success. + */ +int +qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval; + uint16_t i; + + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + + struct ct_sns_gid_pt_data *gid_data; + struct qla_hw_data *ha = vha->hw; + uint16_t gid_pt_rsp_size; + struct ct_arg arg; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return qla2x00_sns_gid_pt(vha, list); + + gid_data = NULL; + gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GID_PT_REQ_SIZE; + arg.rsp_size = gid_pt_rsp_size; + arg.nport_handle = NPH_SNS; + + /* Issue GID_PT */ + /* Prepare common MS IOCB */ + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare CT arguments -- port_type */ + ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2055, + "GID_PT issue IOCB failed (%d).\n", rval); + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != + QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + } else { + /* Set port IDs in switch info list. */ + for (i = 0; i < ha->max_fibre_devices; i++) { + gid_data = &ct_rsp->rsp.gid_pt.entries[i]; + list[i].d_id = be_to_port_id(gid_data->port_id); + memset(list[i].fabric_port_name, 0, WWN_SIZE); + list[i].fp_speed = PORT_SPEED_UNKNOWN; + + /* Last one exit. */ + if (gid_data->control_byte & BIT_7) { + list[i].d_id.b.rsvd_1 = gid_data->control_byte; + break; + } + } + + /* + * If we've used all available slots, then the switch is + * reporting back more devices than we can handle with this + * single call. Return a failed status, and let GA_NXT handle + * the overload. + */ + if (i == ha->max_fibre_devices) + rval = QLA_FUNCTION_FAILED; + } + + return (rval); +} + +/** + * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. + * @vha: HA context + * @list: switch info entries to populate + * + * Returns 0 on success. + */ +int +qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval = QLA_SUCCESS; + uint16_t i; + + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return qla2x00_sns_gpn_id(vha, list); + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPN_ID_REQ_SIZE; + arg.rsp_size = GPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + + for (i = 0; i < ha->max_fibre_devices; i++) { + /* Issue GPN_ID */ + /* Prepare common MS IOCB */ + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, + GPN_ID_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare CT arguments -- port_id */ + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2056, + "GPN_ID issue IOCB failed (%d).\n", rval); + break; + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, + "GPN_ID") != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + break; + } else { + /* Save portname */ + memcpy(list[i].port_name, + ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); + } + + /* Last device exit. */ + if (list[i].d_id.b.rsvd_1 != 0) + break; + } + + return (rval); +} + +/** + * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query. + * @vha: HA context + * @list: switch info entries to populate + * + * Returns 0 on success. + */ +int +qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval = QLA_SUCCESS; + uint16_t i; + struct qla_hw_data *ha = vha->hw; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return qla2x00_sns_gnn_id(vha, list); + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GNN_ID_REQ_SIZE; + arg.rsp_size = GNN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + + for (i = 0; i < ha->max_fibre_devices; i++) { + /* Issue GNN_ID */ + /* Prepare common MS IOCB */ + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, + GNN_ID_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare CT arguments -- port_id */ + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2057, + "GNN_ID issue IOCB failed (%d).\n", rval); + break; + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, + "GNN_ID") != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + break; + } else { + /* Save nodename */ + memcpy(list[i].node_name, + ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); + + ql_dbg(ql_dbg_disc, vha, 0x2058, + "GID_PT entry - nn %8phN pn %8phN " + "portid=%02x%02x%02x.\n", + list[i].node_name, list[i].port_name, + list[i].d_id.b.domain, list[i].d_id.b.area, + list[i].d_id.b.al_pa); + } + + /* Last device exit. */ + if (list[i].d_id.b.rsvd_1 != 0) + break; + } + + return (rval); +} + +static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) +{ + struct scsi_qla_host *vha = sp->vha; + struct ct_sns_pkt *ct_sns; + struct qla_work_evt *e; + + sp->rc = rc; + if (rc == QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x204f, + "Async done-%s exiting normally.\n", + sp->name); + } else if (rc == QLA_FUNCTION_TIMEOUT) { + ql_dbg(ql_dbg_disc, vha, 0x204f, + "Async done-%s timeout\n", sp->name); + } else { + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + sp->retry_count++; + if (sp->retry_count > 3) + goto err; + + ql_dbg(ql_dbg_disc, vha, 0x204f, + "Async done-%s fail rc %x. Retry count %d\n", + sp->name, rc, sp->retry_count); + + e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY); + if (!e) + goto err2; + + e->u.iosb.sp = sp; + qla2x00_post_work(vha, e); + return; + } + +err: + e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); +err2: + if (!e) { + /* please ignore kernel warning. otherwise, we have mem leak. */ + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return; + } + + e->u.iosb.sp = sp; + qla2x00_post_work(vha, e); +} + +/** + * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2x00_rft_id(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return qla2x00_sns_rft_id(vha); + + return qla_async_rftid(vha, &vha->d_id); +} + +static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) +{ + int rval = QLA_MEMORY_ALLOC_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + struct ct_sns_pkt *ct_sns; + + if (!vha->flags.online) + goto done; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "rft_id"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xd042, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); + + /* Prepare CT arguments -- port_id, FC-4 types */ + ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id); + ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ + + if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha)) + ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ + + sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x portid %06x.\n", + sp->name, sp->handle, d_id->b24); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x2043, + "RFT_ID issue IOCB failed (%d).\n", rval); + goto done_free_sp; + } + return rval; +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +/** + * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. + * @vha: HA context + * @type: not used + * + * Returns 0 on success. + */ +int +qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) +{ + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { + ql_dbg(ql_dbg_disc, vha, 0x2046, + "RFF_ID call not supported on ISP2100/ISP2200.\n"); + return (QLA_SUCCESS); + } + + return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type); +} + +static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, + u8 fc4feature, u8 fc4type) +{ + int rval = QLA_MEMORY_ALLOC_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + struct ct_sns_pkt *ct_sns; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "rff_id"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xd042, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); + + /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ + ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); + ct_req->req.rff_id.fc4_feature = fc4feature; + ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */ + + sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x portid %06x feature %x type %x.\n", + sp->name, sp->handle, d_id->b24, fc4feature, fc4type); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x2047, + "RFF_ID issue IOCB failed (%d).\n", rval); + goto done_free_sp; + } + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +/** + * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2x00_rnn_id(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return qla2x00_sns_rnn_id(vha); + + return qla_async_rnnid(vha, &vha->d_id, vha->node_name); +} + +static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, + u8 *node_name) +{ + int rval = QLA_MEMORY_ALLOC_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + struct ct_sns_pkt *ct_sns; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "rnid"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xd042, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); + + /* Prepare CT arguments -- port_id, node_name */ + ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id); + memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); + + sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x portid %06x\n", + sp->name, sp->handle, d_id->b24); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x204d, + "RNN_ID issue IOCB failed (%d).\n", rval); + goto done_free_sp; + } + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +size_t +qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size) +{ + struct qla_hw_data *ha = vha->hw; + + if (IS_QLAFX00(ha)) + return scnprintf(snn, size, "%s FW:v%s DVR:v%s", + ha->model_number, ha->mr.fw_version, qla2x00_version_str); + + return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s", + ha->model_number, ha->fw_major_version, ha->fw_minor_version, + ha->fw_subminor_version, qla2x00_version_str); +} + +/** + * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2x00_rsnn_nn(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { + ql_dbg(ql_dbg_disc, vha, 0x2050, + "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); + return (QLA_SUCCESS); + } + + return qla_async_rsnn_nn(vha); +} + +static int qla_async_rsnn_nn(scsi_qla_host_t *vha) +{ + int rval = QLA_MEMORY_ALLOC_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + struct ct_sns_pkt *ct_sns; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "rsnn_nn"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xd042, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); + + /* Prepare CT arguments -- node_name, symbolic node_name, size */ + memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); + + /* Prepare the Symbolic Node Name */ + qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name, + sizeof(ct_req->req.rsnn_nn.sym_node_name)); + ct_req->req.rsnn_nn.name_len = + (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); + + + sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len; + sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x.\n", + sp->name, sp->handle); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x2043, + "RFT_ID issue IOCB failed (%d).\n", rval); + goto done_free_sp; + } + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +/** + * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query. + * @vha: HA context + * @cmd: GS command + * @scmd_len: Subcommand length + * @data_size: response size in bytes + * + * Returns a pointer to the @ha's sns_cmd. + */ +static inline struct sns_cmd_pkt * +qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, + uint16_t data_size) +{ + uint16_t wc; + struct sns_cmd_pkt *sns_cmd; + struct qla_hw_data *ha = vha->hw; + + sns_cmd = ha->sns_cmd; + memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); + wc = data_size / 2; /* Size in 16bit words. */ + sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc); + put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address); + sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len); + sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd); + wc = (data_size - 16) / 4; /* Size in 32bit words. */ + sns_cmd->p.cmd.size = cpu_to_le16(wc); + + vha->qla_stats.control_requests++; + + return (sns_cmd); +} + +/** + * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command. + * @vha: HA context + * @fcport: fcport entry to updated + * + * This command uses the old Exectute SNS Command mailbox routine. + * + * Returns 0 on success. + */ +static int +qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + struct sns_cmd_pkt *sns_cmd; + + /* Issue GA_NXT. */ + /* Prepare SNS command request. */ + sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, + GA_NXT_SNS_DATA_SIZE); + + /* Prepare SNS command arguments -- port_id. */ + sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa; + sns_cmd->p.cmd.param[1] = fcport->d_id.b.area; + sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; + + /* Execute SNS command. */ + rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, + sizeof(struct sns_cmd_pkt)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x205f, + "GA_NXT Send SNS failed (%d).\n", rval); + } else if (sns_cmd->p.gan_data[8] != 0x80 || + sns_cmd->p.gan_data[9] != 0x02) { + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084, + "GA_NXT failed, rejected request ga_nxt_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, + sns_cmd->p.gan_data, 16); + rval = QLA_FUNCTION_FAILED; + } else { + /* Populate fc_port_t entry. */ + fcport->d_id.b.domain = sns_cmd->p.gan_data[17]; + fcport->d_id.b.area = sns_cmd->p.gan_data[18]; + fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19]; + + memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE); + memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE); + + if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE && + sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) + fcport->d_id.b.domain = 0xf0; + + ql_dbg(ql_dbg_disc, vha, 0x2061, + "GA_NXT entry - nn %8phN pn %8phN " + "port_id=%02x%02x%02x.\n", + fcport->node_name, fcport->port_name, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + } + + return (rval); +} + +/** + * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command. + * @vha: HA context + * @list: switch info entries to populate + * + * This command uses the old Exectute SNS Command mailbox routine. + * + * NOTE: Non-Nx_Ports are not requested. + * + * Returns 0 on success. + */ +static int +qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + uint16_t i; + uint8_t *entry; + struct sns_cmd_pkt *sns_cmd; + uint16_t gid_pt_sns_data_size; + + gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha); + + /* Issue GID_PT. */ + /* Prepare SNS command request. */ + sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, + gid_pt_sns_data_size); + + /* Prepare SNS command arguments -- port_type. */ + sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; + + /* Execute SNS command. */ + rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, + sizeof(struct sns_cmd_pkt)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x206d, + "GID_PT Send SNS failed (%d).\n", rval); + } else if (sns_cmd->p.gid_data[8] != 0x80 || + sns_cmd->p.gid_data[9] != 0x02) { + ql_dbg(ql_dbg_disc, vha, 0x202f, + "GID_PT failed, rejected request, gid_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081, + sns_cmd->p.gid_data, 16); + rval = QLA_FUNCTION_FAILED; + } else { + /* Set port IDs in switch info list. */ + for (i = 0; i < ha->max_fibre_devices; i++) { + entry = &sns_cmd->p.gid_data[(i * 4) + 16]; + list[i].d_id.b.domain = entry[1]; + list[i].d_id.b.area = entry[2]; + list[i].d_id.b.al_pa = entry[3]; + + /* Last one exit. */ + if (entry[0] & BIT_7) { + list[i].d_id.b.rsvd_1 = entry[0]; + break; + } + } + + /* + * If we've used all available slots, then the switch is + * reporting back more devices that we can handle with this + * single call. Return a failed status, and let GA_NXT handle + * the overload. + */ + if (i == ha->max_fibre_devices) + rval = QLA_FUNCTION_FAILED; + } + + return (rval); +} + +/** + * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query. + * @vha: HA context + * @list: switch info entries to populate + * + * This command uses the old Exectute SNS Command mailbox routine. + * + * Returns 0 on success. + */ +static int +qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + uint16_t i; + struct sns_cmd_pkt *sns_cmd; + + for (i = 0; i < ha->max_fibre_devices; i++) { + /* Issue GPN_ID */ + /* Prepare SNS command request. */ + sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, + GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); + + /* Prepare SNS command arguments -- port_id. */ + sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; + sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; + sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; + + /* Execute SNS command. */ + rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, + GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2032, + "GPN_ID Send SNS failed (%d).\n", rval); + } else if (sns_cmd->p.gpn_data[8] != 0x80 || + sns_cmd->p.gpn_data[9] != 0x02) { + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, + "GPN_ID failed, rejected request, gpn_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f, + sns_cmd->p.gpn_data, 16); + rval = QLA_FUNCTION_FAILED; + } else { + /* Save portname */ + memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16], + WWN_SIZE); + } + + /* Last device exit. */ + if (list[i].d_id.b.rsvd_1 != 0) + break; + } + + return (rval); +} + +/** + * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query. + * @vha: HA context + * @list: switch info entries to populate + * + * This command uses the old Exectute SNS Command mailbox routine. + * + * Returns 0 on success. + */ +static int +qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + uint16_t i; + struct sns_cmd_pkt *sns_cmd; + + for (i = 0; i < ha->max_fibre_devices; i++) { + /* Issue GNN_ID */ + /* Prepare SNS command request. */ + sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, + GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); + + /* Prepare SNS command arguments -- port_id. */ + sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; + sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; + sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; + + /* Execute SNS command. */ + rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, + GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x203f, + "GNN_ID Send SNS failed (%d).\n", rval); + } else if (sns_cmd->p.gnn_data[8] != 0x80 || + sns_cmd->p.gnn_data[9] != 0x02) { + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082, + "GNN_ID failed, rejected request, gnn_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a, + sns_cmd->p.gnn_data, 16); + rval = QLA_FUNCTION_FAILED; + } else { + /* Save nodename */ + memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], + WWN_SIZE); + + ql_dbg(ql_dbg_disc, vha, 0x206e, + "GID_PT entry - nn %8phN pn %8phN " + "port_id=%02x%02x%02x.\n", + list[i].node_name, list[i].port_name, + list[i].d_id.b.domain, list[i].d_id.b.area, + list[i].d_id.b.al_pa); + } + + /* Last device exit. */ + if (list[i].d_id.b.rsvd_1 != 0) + break; + } + + return (rval); +} + +/** + * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. + * @vha: HA context + * + * This command uses the old Exectute SNS Command mailbox routine. + * + * Returns 0 on success. + */ +static int +qla2x00_sns_rft_id(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + struct sns_cmd_pkt *sns_cmd; + + /* Issue RFT_ID. */ + /* Prepare SNS command request. */ + sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, + RFT_ID_SNS_DATA_SIZE); + + /* Prepare SNS command arguments -- port_id, FC-4 types */ + sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; + sns_cmd->p.cmd.param[1] = vha->d_id.b.area; + sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; + + sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ + + /* Execute SNS command. */ + rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, + sizeof(struct sns_cmd_pkt)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2060, + "RFT_ID Send SNS failed (%d).\n", rval); + } else if (sns_cmd->p.rft_data[8] != 0x80 || + sns_cmd->p.rft_data[9] != 0x02) { + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083, + "RFT_ID failed, rejected request rft_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080, + sns_cmd->p.rft_data, 16); + rval = QLA_FUNCTION_FAILED; + } else { + ql_dbg(ql_dbg_disc, vha, 0x2073, + "RFT_ID exiting normally.\n"); + } + + return (rval); +} + +/** + * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. + * @vha: HA context + * + * This command uses the old Exectute SNS Command mailbox routine. + * + * Returns 0 on success. + */ +static int +qla2x00_sns_rnn_id(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + struct sns_cmd_pkt *sns_cmd; + + /* Issue RNN_ID. */ + /* Prepare SNS command request. */ + sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, + RNN_ID_SNS_DATA_SIZE); + + /* Prepare SNS command arguments -- port_id, nodename. */ + sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; + sns_cmd->p.cmd.param[1] = vha->d_id.b.area; + sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; + + sns_cmd->p.cmd.param[4] = vha->node_name[7]; + sns_cmd->p.cmd.param[5] = vha->node_name[6]; + sns_cmd->p.cmd.param[6] = vha->node_name[5]; + sns_cmd->p.cmd.param[7] = vha->node_name[4]; + sns_cmd->p.cmd.param[8] = vha->node_name[3]; + sns_cmd->p.cmd.param[9] = vha->node_name[2]; + sns_cmd->p.cmd.param[10] = vha->node_name[1]; + sns_cmd->p.cmd.param[11] = vha->node_name[0]; + + /* Execute SNS command. */ + rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, + sizeof(struct sns_cmd_pkt)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x204a, + "RNN_ID Send SNS failed (%d).\n", rval); + } else if (sns_cmd->p.rnn_data[8] != 0x80 || + sns_cmd->p.rnn_data[9] != 0x02) { + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b, + "RNN_ID failed, rejected request, rnn_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c, + sns_cmd->p.rnn_data, 16); + rval = QLA_FUNCTION_FAILED; + } else { + ql_dbg(ql_dbg_disc, vha, 0x204c, + "RNN_ID exiting normally.\n"); + } + + return (rval); +} + +/** + * qla2x00_mgmt_svr_login() - Login to fabric Management Service. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) +{ + int ret, rval; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + struct qla_hw_data *ha = vha->hw; + + ret = QLA_SUCCESS; + if (vha->flags.management_server_logged_in) + return ret; + + rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, + 0xfa, mb, BIT_1); + if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { + if (rval == QLA_MEMORY_ALLOC_FAILED) + ql_dbg(ql_dbg_disc, vha, 0x2085, + "Failed management_server login: loopid=%x " + "rval=%d\n", vha->mgmt_svr_loop_id, rval); + else + ql_dbg(ql_dbg_disc, vha, 0x2024, + "Failed management_server login: loopid=%x " + "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", + vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], + mb[7]); + ret = QLA_FUNCTION_FAILED; + } else + vha->flags.management_server_logged_in = 1; + + return ret; +} + +/** + * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. + * @vha: HA context + * @req_size: request size in bytes + * @rsp_size: response size in bytes + * + * Returns a pointer to the @ha's ms_iocb. + */ +void * +qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, + uint32_t rsp_size) +{ + ms_iocb_entry_t *ms_pkt; + struct qla_hw_data *ha = vha->hw; + + ms_pkt = ha->ms_iocb; + memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); + + ms_pkt->entry_type = MS_IOCB_TYPE; + ms_pkt->entry_count = 1; + SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id); + ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); + ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); + ms_pkt->cmd_dsd_count = cpu_to_le16(1); + ms_pkt->total_dsd_count = cpu_to_le16(2); + ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); + ms_pkt->req_bytecount = cpu_to_le32(req_size); + + put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address); + ms_pkt->req_dsd.length = ms_pkt->req_bytecount; + + put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address); + ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; + + return ms_pkt; +} + +/** + * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. + * @vha: HA context + * @req_size: request size in bytes + * @rsp_size: response size in bytes + * + * Returns a pointer to the @ha's ms_iocb. + */ +void * +qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, + uint32_t rsp_size) +{ + struct ct_entry_24xx *ct_pkt; + struct qla_hw_data *ha = vha->hw; + + ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; + memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); + + ct_pkt->entry_type = CT_IOCB_TYPE; + ct_pkt->entry_count = 1; + ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); + ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); + ct_pkt->cmd_dsd_count = cpu_to_le16(1); + ct_pkt->rsp_dsd_count = cpu_to_le16(1); + ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); + ct_pkt->cmd_byte_count = cpu_to_le32(req_size); + + put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address); + ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; + + put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address); + ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; + ct_pkt->vp_index = vha->vp_idx; + + return ct_pkt; +} + +static void +qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) +{ + struct qla_hw_data *ha = vha->hw; + ms_iocb_entry_t *ms_pkt = ha->ms_iocb; + struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; + + if (IS_FWI2_CAPABLE(ha)) { + ct_pkt->cmd_byte_count = cpu_to_le32(req_size); + ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; + } else { + ms_pkt->req_bytecount = cpu_to_le32(req_size); + ms_pkt->req_dsd.length = ms_pkt->req_bytecount; + } +} + +/** + * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query. + * @p: CT request buffer + * @cmd: GS command + * @rsp_size: response size in bytes + * + * Returns a pointer to the intitialized @ct_req. + */ +static inline struct ct_sns_req * +qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd, + uint16_t rsp_size) +{ + memset(p, 0, sizeof(struct ct_sns_pkt)); + + p->p.req.header.revision = 0x01; + p->p.req.header.gs_type = 0xFA; + p->p.req.header.gs_subtype = 0x10; + p->p.req.command = cpu_to_be16(cmd); + p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); + + return &p->p.req; +} + +uint +qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) +{ + uint speeds = 0; + + if (IS_CNA_CAPABLE(ha)) + return FDMI_PORT_SPEED_10GB; + if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { + if (ha->max_supported_speed == 2) { + if (ha->min_supported_speed <= 6) + speeds |= FDMI_PORT_SPEED_64GB; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1) { + if (ha->min_supported_speed <= 5) + speeds |= FDMI_PORT_SPEED_32GB; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 4) + speeds |= FDMI_PORT_SPEED_16GB; + } + if (ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 3) + speeds |= FDMI_PORT_SPEED_8GB; + } + if (ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 2) + speeds |= FDMI_PORT_SPEED_4GB; + } + return speeds; + } + if (IS_QLA2031(ha)) { + if ((ha->pdev->subsystem_vendor == 0x103C) && + ((ha->pdev->subsystem_device == 0x8002) || + (ha->pdev->subsystem_device == 0x8086))) { + speeds = FDMI_PORT_SPEED_16GB; + } else { + speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| + FDMI_PORT_SPEED_4GB; + } + return speeds; + } + if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) + return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| + FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; + if (IS_QLA24XX_TYPE(ha)) + return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB| + FDMI_PORT_SPEED_1GB; + if (IS_QLA23XX(ha)) + return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; + return FDMI_PORT_SPEED_1GB; +} + +uint +qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha) +{ + switch (ha->link_data_rate) { + case PORT_SPEED_1GB: + return FDMI_PORT_SPEED_1GB; + case PORT_SPEED_2GB: + return FDMI_PORT_SPEED_2GB; + case PORT_SPEED_4GB: + return FDMI_PORT_SPEED_4GB; + case PORT_SPEED_8GB: + return FDMI_PORT_SPEED_8GB; + case PORT_SPEED_10GB: + return FDMI_PORT_SPEED_10GB; + case PORT_SPEED_16GB: + return FDMI_PORT_SPEED_16GB; + case PORT_SPEED_32GB: + return FDMI_PORT_SPEED_32GB; + case PORT_SPEED_64GB: + return FDMI_PORT_SPEED_64GB; + default: + return FDMI_PORT_SPEED_UNKNOWN; + } +} + +/** + * qla2x00_hba_attributes() - perform HBA attributes registration + * @vha: HA context + * @entries: number of entries to use + * @callopt: Option to issue extended or standard FDMI + * command parameter + * + * Returns 0 on success. + */ +static unsigned long +qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, + unsigned int callopt) +{ + struct qla_hw_data *ha = vha->hw; + struct new_utsname *p_sysid = utsname(); + struct ct_fdmi_hba_attr *eiter; + uint16_t alen; + unsigned long size = 0; + + /* Nodename. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); + memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); + alen = sizeof(eiter->a.node_name); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a0, + "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); + /* Manufacturer. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); + alen = scnprintf( + eiter->a.manufacturer, sizeof(eiter->a.manufacturer), + "%s", QLA2XXX_MANUFACTURER); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a1, + "MANUFACTURER = %s.\n", eiter->a.manufacturer); + /* Serial number. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); + alen = 0; + if (IS_FWI2_CAPABLE(ha)) { + alen = qla2xxx_get_vpd_field(vha, "SN", + eiter->a.serial_num, sizeof(eiter->a.serial_num)); + } + if (!alen) { + uint32_t sn = ((ha->serial0 & 0x1f) << 16) | + (ha->serial2 << 8) | ha->serial1; + alen = scnprintf( + eiter->a.serial_num, sizeof(eiter->a.serial_num), + "%c%05d", 'A' + sn / 100000, sn % 100000); + } + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a2, + "SERIAL NUMBER = %s.\n", eiter->a.serial_num); + /* Model name. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MODEL); + alen = scnprintf( + eiter->a.model, sizeof(eiter->a.model), + "%s", ha->model_number); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a3, + "MODEL NAME = %s.\n", eiter->a.model); + /* Model description. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); + alen = scnprintf( + eiter->a.model_desc, sizeof(eiter->a.model_desc), + "%s", ha->model_desc); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a4, + "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc); + /* Hardware version. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); + alen = 0; + if (IS_FWI2_CAPABLE(ha)) { + if (!alen) { + alen = qla2xxx_get_vpd_field(vha, "MN", + eiter->a.hw_version, sizeof(eiter->a.hw_version)); + } + if (!alen) { + alen = qla2xxx_get_vpd_field(vha, "EC", + eiter->a.hw_version, sizeof(eiter->a.hw_version)); + } + } + if (!alen) { + alen = scnprintf( + eiter->a.hw_version, sizeof(eiter->a.hw_version), + "HW:%s", ha->adapter_id); + } + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a5, + "HARDWARE VERSION = %s.\n", eiter->a.hw_version); + /* Driver version. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION); + alen = scnprintf( + eiter->a.driver_version, sizeof(eiter->a.driver_version), + "%s", qla2x00_version_str); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a6, + "DRIVER VERSION = %s.\n", eiter->a.driver_version); + /* Option ROM version. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); + alen = scnprintf( + eiter->a.orom_version, sizeof(eiter->a.orom_version), + "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + + ql_dbg(ql_dbg_disc, vha, 0x20a7, + "OPTROM VERSION = %d.%02d.\n", + eiter->a.orom_version[1], eiter->a.orom_version[0]); + /* Firmware version */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); + ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, + sizeof(eiter->a.fw_version)); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a8, + "FIRMWARE VERSION = %s.\n", eiter->a.fw_version); + /* OS Name and Version */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION); + alen = 0; + if (p_sysid) { + alen = scnprintf( + eiter->a.os_version, sizeof(eiter->a.os_version), + "%s %s %s", + p_sysid->sysname, p_sysid->release, p_sysid->machine); + } + if (!alen) { + alen = scnprintf( + eiter->a.os_version, sizeof(eiter->a.os_version), + "%s %s", + "Linux", fc_host_system_hostname(vha->host)); + } + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20a9, + "OS VERSION = %s.\n", eiter->a.os_version); + if (callopt == CALLOPT_FDMI1) + goto done; + /* MAX CT Payload Length */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); + eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2); + + alen = sizeof(eiter->a.max_ct_len); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20aa, + "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len)); + /* Node Symbolic Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME); + alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name, + sizeof(eiter->a.sym_name)); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20ab, + "SYMBOLIC NAME = %s.\n", eiter->a.sym_name); + /* Vendor Specific information */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO); + eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC); + alen = sizeof(eiter->a.vendor_specific_info); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20ac, + "VENDOR SPECIFIC INFO = 0x%x.\n", + be32_to_cpu(eiter->a.vendor_specific_info)); + /* Num Ports */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS); + eiter->a.num_ports = cpu_to_be32(1); + alen = sizeof(eiter->a.num_ports); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20ad, + "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); + /* Fabric Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME); + memcpy(eiter->a.fabric_name, vha->fabric_node_name, + sizeof(eiter->a.fabric_name)); + alen = sizeof(eiter->a.fabric_name); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20ae, + "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); + /* BIOS Version */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME); + alen = scnprintf( + eiter->a.bios_name, sizeof(eiter->a.bios_name), + "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20af, + "BIOS NAME = %s\n", eiter->a.bios_name); + /* Vendor Identifier */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER); + alen = scnprintf( + eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier), + "%s", "QLGC"); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20b0, + "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier); +done: + return size; +} + +/** + * qla2x00_port_attributes() - perform Port attributes registration + * @vha: HA context + * @entries: number of entries to use + * @callopt: Option to issue extended or standard FDMI + * command parameter + * + * Returns 0 on success. + */ +static unsigned long +qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, + unsigned int callopt) +{ + struct qla_hw_data *ha = vha->hw; + struct new_utsname *p_sysid = utsname(); + char *hostname = p_sysid ? + p_sysid->nodename : fc_host_system_hostname(vha->host); + struct ct_fdmi_port_attr *eiter; + uint16_t alen; + unsigned long size = 0; + + /* FC4 types. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); + eiter->a.fc4_types[0] = 0x00; + eiter->a.fc4_types[1] = 0x00; + eiter->a.fc4_types[2] = 0x01; + eiter->a.fc4_types[3] = 0x00; + alen = sizeof(eiter->a.fc4_types); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c0, + "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types); + if (vha->flags.nvme_enabled) { + eiter->a.fc4_types[6] = 1; /* NVMe type 28h */ + ql_dbg(ql_dbg_disc, vha, 0x211f, + "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n", + eiter->a.fc4_types[6]); + } + /* Supported speed. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); + eiter->a.sup_speed = cpu_to_be32( + qla25xx_fdmi_port_speed_capability(ha)); + alen = sizeof(eiter->a.sup_speed); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c1, + "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed)); + /* Current speed. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED); + eiter->a.cur_speed = cpu_to_be32( + qla25xx_fdmi_port_speed_currently(ha)); + alen = sizeof(eiter->a.cur_speed); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c2, + "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed)); + /* Max frame size. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); + eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size); + alen = sizeof(eiter->a.max_frame_size); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c3, + "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size)); + /* OS device name. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); + alen = scnprintf( + eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name), + "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c4, + "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name); + /* Hostname. */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME); + if (!*hostname || !strncmp(hostname, "(none)", 6)) + hostname = "Linux-default"; + alen = scnprintf( + eiter->a.host_name, sizeof(eiter->a.host_name), + "%s", hostname); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c5, + "HOSTNAME = %s.\n", eiter->a.host_name); + + if (callopt == CALLOPT_FDMI1) + goto done; + + /* Node Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME); + memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); + alen = sizeof(eiter->a.node_name); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c6, + "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); + + /* Port Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_NAME); + memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name)); + alen = sizeof(eiter->a.port_name); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c7, + "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name)); + + /* Port Symbolic Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME); + alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name, + sizeof(eiter->a.port_sym_name)); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c8, + "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name); + + /* Port Type */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_TYPE); + eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE); + alen = sizeof(eiter->a.port_type); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20c9, + "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type)); + + /* Supported Class of Service */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS); + eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3); + alen = sizeof(eiter->a.port_supported_cos); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20ca, + "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos)); + + /* Port Fabric Name */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME); + memcpy(eiter->a.fabric_name, vha->fabric_node_name, + sizeof(eiter->a.fabric_name)); + alen = sizeof(eiter->a.fabric_name); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20cb, + "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); + + /* FC4_type */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE); + eiter->a.port_fc4_type[0] = 0x00; + eiter->a.port_fc4_type[1] = 0x00; + eiter->a.port_fc4_type[2] = 0x01; + eiter->a.port_fc4_type[3] = 0x00; + alen = sizeof(eiter->a.port_fc4_type); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20cc, + "PORT ACTIVE FC4 TYPE = %016llx.\n", + *(uint64_t *)eiter->a.port_fc4_type); + + /* Port State */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_STATE); + eiter->a.port_state = cpu_to_be32(2); + alen = sizeof(eiter->a.port_state); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20cd, + "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state)); + + /* Number of Ports */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_COUNT); + eiter->a.num_ports = cpu_to_be32(1); + alen = sizeof(eiter->a.num_ports); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20ce, + "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); + + /* Port Identifier */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER); + eiter->a.port_id = cpu_to_be32(vha->d_id.b24); + alen = sizeof(eiter->a.port_id); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20cf, + "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id)); + + if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan) + goto done; + + /* Smart SAN Service Category (Populate Smart SAN Initiator)*/ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE); + alen = scnprintf( + eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service), + "%s", "Smart SAN Initiator"); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20d0, + "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service); + + /* Smart SAN GUID (NWWN+PWWN) */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID); + memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE); + memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE); + alen = sizeof(eiter->a.smartsan_guid); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20d1, + "Smart SAN GUID = %016llx-%016llx\n", + wwn_to_u64(eiter->a.smartsan_guid), + wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE)); + + /* Smart SAN Version (populate "Smart SAN Version 1.0") */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION); + alen = scnprintf( + eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version), + "%s", "Smart SAN Version 2.0"); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20d2, + "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version); + + /* Smart SAN Product Name (Specify Adapter Model No) */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME); + alen = scnprintf(eiter->a.smartsan_prod_name, + sizeof(eiter->a.smartsan_prod_name), + "ISP%04x", ha->pdev->device); + alen += FDMI_ATTR_ALIGNMENT(alen); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20d3, + "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name); + + /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO); + eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1); + alen = sizeof(eiter->a.smartsan_port_info); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20d4, + "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info); + + /* Smart SAN Security Support */ + eiter = entries + size; + eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT); + eiter->a.smartsan_security_support = cpu_to_be32(1); + alen = sizeof(eiter->a.smartsan_security_support); + alen += FDMI_ATTR_TYPELEN(eiter); + eiter->len = cpu_to_be16(alen); + size += alen; + ql_dbg(ql_dbg_disc, vha, 0x20d6, + "SMARTSAN SECURITY SUPPORT = %d\n", + be32_to_cpu(eiter->a.smartsan_security_support)); + +done: + return size; +} + +/** + * qla2x00_fdmi_rhba() - perform RHBA FDMI registration + * @vha: HA context + * @callopt: Option to issue FDMI registration + * + * Returns 0 on success. + */ +static int +qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long size = 0; + unsigned int rval, count; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + void *entries; + + count = callopt != CALLOPT_FDMI1 ? + FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT; + + size = RHBA_RSP_SIZE; + + ql_dbg(ql_dbg_disc, vha, 0x20e0, + "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size); + + /* Request size adjusted after CT preparation */ + ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare FDMI command entries */ + memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, + sizeof(ct_req->req.rhba.hba_identifier)); + size += sizeof(ct_req->req.rhba.hba_identifier); + + ct_req->req.rhba.entry_count = cpu_to_be32(1); + size += sizeof(ct_req->req.rhba.entry_count); + + memcpy(ct_req->req.rhba.port_name, vha->port_name, + sizeof(ct_req->req.rhba.port_name)); + size += sizeof(ct_req->req.rhba.port_name); + + /* Attribute count */ + ct_req->req.rhba.attrs.count = cpu_to_be32(count); + size += sizeof(ct_req->req.rhba.attrs.count); + + /* Attribute block */ + entries = &ct_req->req.rhba.attrs.entry; + + size += qla2x00_hba_attributes(vha, entries, callopt); + + /* Update MS request size. */ + qla2x00_update_ms_fdmi_iocb(vha, size + 16); + + ql_dbg(ql_dbg_disc, vha, 0x20e1, + "RHBA %016llx %016llx.\n", + wwn_to_u64(ct_req->req.rhba.hba_identifier), + wwn_to_u64(ct_req->req.rhba.port_name)); + + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2, + entries, size); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(*ha->ms_iocb)); + if (rval) { + ql_dbg(ql_dbg_disc, vha, 0x20e3, + "RHBA iocb failed (%d).\n", rval); + return rval; + } + + rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); + if (rval) { + if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && + ct_rsp->header.explanation_code == + CT_EXPL_ALREADY_REGISTERED) { + ql_dbg(ql_dbg_disc, vha, 0x20e4, + "RHBA already registered.\n"); + return QLA_ALREADY_REGISTERED; + } + + ql_dbg(ql_dbg_disc, vha, 0x20e5, + "RHBA failed, CT Reason %#x, CT Explanation %#x\n", + ct_rsp->header.reason_code, + ct_rsp->header.explanation_code); + return rval; + } + + ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n"); + return rval; +} + + +static int +qla2x00_fdmi_dhba(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + /* Issue RPA */ + /* Prepare common MS IOCB */ + ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE, + DHBA_RSP_SIZE); + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + /* Prepare FDMI command arguments -- portname. */ + memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); + ql_dbg(ql_dbg_disc, vha, 0x2036, + "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name); + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2037, + "DHBA issue IOCB failed (%d).\n", rval); + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != + QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + } else { + ql_dbg(ql_dbg_disc, vha, 0x2038, + "DHBA exiting normally.\n"); + } + return rval; +} + +/** + * qla2x00_fdmi_rprt() - perform RPRT registration + * @vha: HA context + * @callopt: Option to issue extended or standard FDMI + * command parameter + * + * Returns 0 on success. + */ +static int +qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) +{ + struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); + struct qla_hw_data *ha = vha->hw; + ulong size = 0; + uint rval, count; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + void *entries; + count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? + FDMI2_SMARTSAN_PORT_ATTR_COUNT : + callopt != CALLOPT_FDMI1 ? + FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; + + size = RPRT_RSP_SIZE; + ql_dbg(ql_dbg_disc, vha, 0x20e8, + "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size); + /* Request size adjusted after CT preparation */ + ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size); + ct_rsp = &ha->ct_sns->p.rsp; + /* Prepare FDMI command entries */ + memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name, + sizeof(ct_req->req.rprt.hba_identifier)); + size += sizeof(ct_req->req.rprt.hba_identifier); + memcpy(ct_req->req.rprt.port_name, vha->port_name, + sizeof(ct_req->req.rprt.port_name)); + size += sizeof(ct_req->req.rprt.port_name); + /* Attribute count */ + ct_req->req.rprt.attrs.count = cpu_to_be32(count); + size += sizeof(ct_req->req.rprt.attrs.count); + /* Attribute block */ + entries = ct_req->req.rprt.attrs.entry; + size += qla2x00_port_attributes(vha, entries, callopt); + /* Update MS request size. */ + qla2x00_update_ms_fdmi_iocb(vha, size + 16); + ql_dbg(ql_dbg_disc, vha, 0x20e9, + "RPRT %016llx %016llx.\n", + wwn_to_u64(ct_req->req.rprt.port_name), + wwn_to_u64(ct_req->req.rprt.port_name)); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea, + entries, size); + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(*ha->ms_iocb)); + if (rval) { + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "RPRT iocb failed (%d).\n", rval); + return rval; + } + rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT"); + if (rval) { + if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && + ct_rsp->header.explanation_code == + CT_EXPL_ALREADY_REGISTERED) { + ql_dbg(ql_dbg_disc, vha, 0x20ec, + "RPRT already registered.\n"); + return QLA_ALREADY_REGISTERED; + } + + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n", + ct_rsp->header.reason_code, + ct_rsp->header.explanation_code); + return rval; + } + ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n"); + return rval; +} + +/** + * qla2x00_fdmi_rpa() - perform RPA registration + * @vha: HA context + * @callopt: Option to issue FDMI registration + * + * Returns 0 on success. + */ +static int +qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt) +{ + struct qla_hw_data *ha = vha->hw; + ulong size = 0; + uint rval, count; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + void *entries; + + count = + callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? + FDMI2_SMARTSAN_PORT_ATTR_COUNT : + callopt != CALLOPT_FDMI1 ? + FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; + + size = + callopt != CALLOPT_FDMI1 ? + SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE; + + ql_dbg(ql_dbg_disc, vha, 0x20f0, + "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size); + + /* Request size adjusted after CT preparation */ + ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare FDMI command entries. */ + memcpy(ct_req->req.rpa.port_name, vha->port_name, + sizeof(ct_req->req.rpa.port_name)); + size += sizeof(ct_req->req.rpa.port_name); + + /* Attribute count */ + ct_req->req.rpa.attrs.count = cpu_to_be32(count); + size += sizeof(ct_req->req.rpa.attrs.count); + + /* Attribute block */ + entries = ct_req->req.rpa.attrs.entry; + + size += qla2x00_port_attributes(vha, entries, callopt); + + /* Update MS request size. */ + qla2x00_update_ms_fdmi_iocb(vha, size + 16); + + ql_dbg(ql_dbg_disc, vha, 0x20f1, + "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name)); + + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2, + entries, size); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(*ha->ms_iocb)); + if (rval) { + ql_dbg(ql_dbg_disc, vha, 0x20f3, + "RPA iocb failed (%d).\n", rval); + return rval; + } + + rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); + if (rval) { + if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && + ct_rsp->header.explanation_code == + CT_EXPL_ALREADY_REGISTERED) { + ql_dbg(ql_dbg_disc, vha, 0x20f4, + "RPA already registered.\n"); + return QLA_ALREADY_REGISTERED; + } + + ql_dbg(ql_dbg_disc, vha, 0x20f5, + "RPA failed, CT Reason code: %#x, CT Explanation %#x\n", + ct_rsp->header.reason_code, + ct_rsp->header.explanation_code); + return rval; + } + + ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n"); + return rval; +} + +/** + * qla2x00_fdmi_register() - + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2x00_fdmi_register(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA2100(ha) || IS_QLA2200(ha) || + IS_QLAFX00(ha)) + return rval; + + rval = qla2x00_mgmt_svr_login(vha); + if (rval) + return rval; + + /* For npiv/vport send rprt only */ + if (vha->vp_idx) { + if (ql2xsmartsan) + rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN); + if (rval || !ql2xsmartsan) + rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2); + if (rval) + rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1); + + return rval; + } + + /* Try fdmi2 first, if fails then try fdmi1 */ + rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); + if (rval) { + if (rval != QLA_ALREADY_REGISTERED) + goto try_fdmi; + + rval = qla2x00_fdmi_dhba(vha); + if (rval) + goto try_fdmi; + + rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); + if (rval) + goto try_fdmi; + } + + if (ql2xsmartsan) + rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN); + if (rval || !ql2xsmartsan) + rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2); + if (rval) + goto try_fdmi; + + return rval; + +try_fdmi: + rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); + if (rval) { + if (rval != QLA_ALREADY_REGISTERED) + return rval; + + rval = qla2x00_fdmi_dhba(vha); + if (rval) + return rval; + + rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); + if (rval) + return rval; + } + + rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1); + + return rval; +} + +/** + * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query. + * @vha: HA context + * @list: switch info entries to populate + * + * Returns 0 on success. + */ +int +qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval = QLA_SUCCESS; + uint16_t i; + struct qla_hw_data *ha = vha->hw; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; + + if (!IS_IIDMA_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFPN_ID_REQ_SIZE; + arg.rsp_size = GFPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + + for (i = 0; i < ha->max_fibre_devices; i++) { + /* Issue GFPN_ID */ + /* Prepare common MS IOCB */ + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, + GFPN_ID_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare CT arguments -- port_id */ + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2023, + "GFPN_ID issue IOCB failed (%d).\n", rval); + break; + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, + "GFPN_ID") != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + break; + } else { + /* Save fabric portname */ + memcpy(list[i].fabric_port_name, + ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE); + } + + /* Last device exit. */ + if (list[i].d_id.b.rsvd_1 != 0) + break; + } + + return (rval); +} + + +static inline struct ct_sns_req * +qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, + uint16_t rsp_size) +{ + memset(p, 0, sizeof(struct ct_sns_pkt)); + + p->p.req.header.revision = 0x01; + p->p.req.header.gs_type = 0xFA; + p->p.req.header.gs_subtype = 0x01; + p->p.req.command = cpu_to_be16(cmd); + p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); + + return &p->p.req; +} + +static uint16_t +qla2x00_port_speed_capability(uint16_t speed) +{ + switch (speed) { + case BIT_15: + return PORT_SPEED_1GB; + case BIT_14: + return PORT_SPEED_2GB; + case BIT_13: + return PORT_SPEED_4GB; + case BIT_12: + return PORT_SPEED_10GB; + case BIT_11: + return PORT_SPEED_8GB; + case BIT_10: + return PORT_SPEED_16GB; + case BIT_8: + return PORT_SPEED_32GB; + case BIT_7: + return PORT_SPEED_64GB; + default: + return PORT_SPEED_UNKNOWN; + } +} + +/** + * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. + * @vha: HA context + * @list: switch info entries to populate + * + * Returns 0 on success. + */ +int +qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval; + uint16_t i; + struct qla_hw_data *ha = vha->hw; + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; + + if (!IS_IIDMA_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + if (!ha->flags.gpsc_supported) + return QLA_FUNCTION_FAILED; + + rval = qla2x00_mgmt_svr_login(vha); + if (rval) + return rval; + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPSC_REQ_SIZE; + arg.rsp_size = GPSC_RSP_SIZE; + arg.nport_handle = vha->mgmt_svr_loop_id; + + for (i = 0; i < ha->max_fibre_devices; i++) { + /* Issue GFPN_ID */ + /* Prepare common MS IOCB */ + ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); + + /* Prepare CT request */ + ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, + GPSC_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare CT arguments -- port_name */ + memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name, + WWN_SIZE); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x2059, + "GPSC issue IOCB failed (%d).\n", rval); + } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, + "GPSC")) != QLA_SUCCESS) { + /* FM command unsupported? */ + if (rval == QLA_INVALID_COMMAND && + (ct_rsp->header.reason_code == + CT_REASON_INVALID_COMMAND_CODE || + ct_rsp->header.reason_code == + CT_REASON_COMMAND_UNSUPPORTED)) { + ql_dbg(ql_dbg_disc, vha, 0x205a, + "GPSC command unsupported, disabling " + "query.\n"); + ha->flags.gpsc_supported = 0; + rval = QLA_FUNCTION_FAILED; + break; + } + rval = QLA_FUNCTION_FAILED; + } else { + list->fp_speed = qla2x00_port_speed_capability( + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + ql_dbg(ql_dbg_disc, vha, 0x205b, + "GPSC ext entry - fpn " + "%8phN speeds=%04x speed=%04x.\n", + list[i].fabric_port_name, + be16_to_cpu(ct_rsp->rsp.gpsc.speeds), + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + } + + /* Last device exit. */ + if (list[i].d_id.b.rsvd_1 != 0) + break; + } + + return (rval); +} + +/** + * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query. + * + * @vha: HA context + * @list: switch info entries to populate + * + */ +void +qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) +{ + int rval; + uint16_t i; + + ms_iocb_entry_t *ms_pkt; + struct ct_sns_req *ct_req; + struct ct_sns_rsp *ct_rsp; + struct qla_hw_data *ha = vha->hw; + uint8_t fcp_scsi_features = 0, nvme_features = 0; + struct ct_arg arg; + + for (i = 0; i < ha->max_fibre_devices; i++) { + /* Set default FC4 Type as UNKNOWN so the default is to + * Process this port */ + list[i].fc4_type = 0; + + /* Do not attempt GFF_ID if we are not FWI_2 capable */ + if (!IS_FWI2_CAPABLE(ha)) + continue; + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFF_ID_REQ_SIZE; + arg.rsp_size = GFF_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + + /* Prepare common MS IOCB */ + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); + + /* Prepare CT request */ + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, + GFF_ID_RSP_SIZE); + ct_rsp = &ha->ct_sns->p.rsp; + + /* Prepare CT arguments -- port_id */ + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); + + /* Execute MS IOCB */ + rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, + sizeof(ms_iocb_entry_t)); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x205c, + "GFF_ID issue IOCB failed (%d).\n", rval); + } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, + "GFF_ID") != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x205d, + "GFF_ID IOCB status had a failure status code.\n"); + } else { + fcp_scsi_features = + ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; + fcp_scsi_features &= 0x0f; + + if (fcp_scsi_features) { + list[i].fc4_type = FS_FC4TYPE_FCP; + list[i].fc4_features = fcp_scsi_features; + } + + nvme_features = + ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; + nvme_features &= 0xf; + + if (nvme_features) { + list[i].fc4_type |= FS_FC4TYPE_NVME; + list[i].fc4_features = nvme_features; + } + } + + /* Last device exit. */ + if (list[i].d_id.b.rsvd_1 != 0) + break; + } +} + +int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + struct fc_port *fcport = ea->fcport; + + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, + ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id); + + if (fcport->disc_state == DSC_DELETE_PEND) + return; + + /* We will figure-out what happen after AUTH completes */ + if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) + return; + + if (ea->sp->gen2 != fcport->login_gen) { + /* target side must have changed it. */ + ql_dbg(ql_dbg_disc, vha, 0x20d3, + "%s %8phC generation changed\n", + __func__, fcport->port_name); + return; + } else if (ea->sp->gen1 != fcport->rscn_gen) { + return; + } + + qla_post_iidma_work(vha, fcport); +} + +static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = sp->fcport; + struct ct_sns_rsp *ct_rsp; + struct event_arg ea; + + ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + + ql_dbg(ql_dbg_disc, vha, 0x2053, + "Async done-%s res %x, WWPN %8phC \n", + sp->name, res, fcport->port_name); + + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + if (res == QLA_FUNCTION_TIMEOUT) + goto done; + + if (res == (DID_ERROR << 16)) { + /* entry status error */ + goto done; + } else if (res) { + if ((ct_rsp->header.reason_code == + CT_REASON_INVALID_COMMAND_CODE) || + (ct_rsp->header.reason_code == + CT_REASON_COMMAND_UNSUPPORTED)) { + ql_dbg(ql_dbg_disc, vha, 0x2019, + "GPSC command unsupported, disabling query.\n"); + ha->flags.gpsc_supported = 0; + goto done; + } + } else { + fcport->fp_speed = qla2x00_port_speed_capability( + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + + ql_dbg(ql_dbg_disc, vha, 0x2054, + "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", + sp->name, fcport->fabric_port_name, + be16_to_cpu(ct_rsp->rsp.gpsc.speeds), + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + } + memset(&ea, 0, sizeof(ea)); + ea.rc = res; + ea.fcport = fcport; + ea.sp = sp; + qla24xx_handle_gpsc_event(vha, &ea); + +done: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + return rval; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpsc"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gpsc_sp_done); + + /* CT_IU preamble */ + ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, + GPSC_RSP_SIZE); + + /* GPSC req */ + memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name, + WWN_SIZE); + + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; + + ql_dbg(ql_dbg_disc, vha, 0x205e, + "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", + sp->name, fcport->port_name, sp->handle, + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) +{ + struct srb_iocb *c = &sp->u.iocb_cmd; + + switch (sp->type) { + case SRB_ELS_DCMD: + qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); + break; + case SRB_CT_PTHRU_CMD: + default: + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + break; + } + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +void qla24xx_async_gffid_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + fc_port_t *fcport = sp->fcport; + struct ct_sns_rsp *ct_rsp; + uint8_t fc4_scsi_feat; + uint8_t fc4_nvme_feat; + + ql_dbg(ql_dbg_disc, vha, 0x2133, + "Async done-%s res %x ID %x. %8phC\n", + sp->name, res, fcport->d_id.b24, fcport->port_name); + + ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp; + fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; + fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; + sp->rc = res; + + /* + * FC-GS-7, 5.2.3.12 FC-4 Features - format + * The format of the FC-4 Features object, as defined by the FC-4, + * Shall be an array of 4-bit values, one for each type code value + */ + if (!res) { + if (fc4_scsi_feat & 0xf) { + /* w1 b00:03 */ + fcport->fc4_type = FS_FC4TYPE_FCP; + fcport->fc4_features = fc4_scsi_feat & 0xf; + } + + if (fc4_nvme_feat & 0xf) { + /* w5 [00:03]/28h */ + fcport->fc4_type |= FS_FC4TYPE_NVME; + fcport->fc4_features = fc4_nvme_feat & 0xf; + } + } + + if (sp->flags & SRB_WAKEUP_ON_COMP) { + complete(sp->comp); + } else { + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + /* we should not be here */ + dump_stack(); + } +} + +/* Get FC4 Feature with Nport ID. */ +int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + DECLARE_COMPLETION_ONSTACK(comp); + + /* this routine does not have handling for no wait */ + if (!vha->flags.online || !wait) + return rval; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + return rval; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gffid"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gffid_sp_done); + sp->comp = ∁ + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; + + if (wait) + sp->flags = SRB_WAKEUP_ON_COMP; + + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns response.\n", + __func__); + goto done_free_sp; + } + + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE); + + ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain; + ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area; + ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa; + + sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + rval = qla2x00_start_sp(sp); + + if (rval != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + goto done_free_sp; + } else { + ql_dbg(ql_dbg_disc, vha, 0x3074, + "Async-%s hdl=%x portid %06x\n", + sp->name, sp->handle, fcport->d_id.b24); + } + + wait_for_completion(sp->comp); + rval = sp->rc; + +done_free_sp: + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return rval; +} + +/* GPN_FT + GNN_FT*/ +static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *vp; + unsigned long flags; + u64 twwn; + int rc = 0; + + if (!ha->num_vhosts) + return 0; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + twwn = wwn_to_u64(vp->port_name); + if (wwn == twwn) { + rc = 1; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + return rc; +} + +void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) +{ + fc_port_t *fcport; + u32 i, rc; + bool found; + struct fab_scan_rp *rp, *trp; + unsigned long flags; + u8 recheck = 0; + u16 dup = 0, dup_cnt = 0; + + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s enter\n", __func__); + + if (sp->gen1 != vha->hw->base_qpair->chip_reset) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s scan stop due to chip reset %x/%x\n", + sp->name, sp->gen1, vha->hw->base_qpair->chip_reset); + goto out; + } + + rc = sp->rc; + if (rc) { + vha->scan.scan_retry++; + if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + goto out; + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: Fabric scan failed for %d retries.\n", + __func__, vha->scan.scan_retry); + /* + * Unable to scan any rports. logout loop below + * will unregister all sessions. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { + fcport->scan_state = QLA_FCPORT_SCAN; + if (fcport->loop_id == FC_NO_LOOP_ID) + fcport->logout_on_delete = 0; + else + fcport->logout_on_delete = 1; + } + } + goto login_logout; + } + } + vha->scan.scan_retry = 0; + + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->scan_state = QLA_FCPORT_SCAN; + + for (i = 0; i < vha->hw->max_fibre_devices; i++) { + u64 wwn; + int k; + + rp = &vha->scan.l[i]; + found = false; + + wwn = wwn_to_u64(rp->port_name); + if (wwn == 0) + continue; + + /* Remove duplicate NPORT ID entries from switch data base */ + for (k = i + 1; k < vha->hw->max_fibre_devices; k++) { + trp = &vha->scan.l[k]; + if (rp->id.b24 == trp->id.b24) { + dup = 1; + dup_cnt++; + ql_dbg(ql_dbg_disc + ql_dbg_verbose, + vha, 0xffff, + "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n", + rp->id.b24, rp->port_name, trp->port_name); + memset(trp, 0, sizeof(*trp)); + } + } + + if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE)) + continue; + + /* Bypass reserved domain fields. */ + if ((rp->id.b.domain & 0xf0) == 0xf0) + continue; + + /* Bypass virtual ports of the same host. */ + if (qla2x00_is_a_vp(vha, wwn)) + continue; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) + continue; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->fc4_type = rp->fc4type; + found = true; + + if (fcport->scan_needed) { + if (NVME_PRIORITY(vha->hw, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + } + + /* + * If device was not a fabric device before. + */ + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { + qla2x00_clear_loop_id(fcport); + fcport->flags |= FCF_FABRIC_DEVICE; + } else if (fcport->d_id.b24 != rp->id.b24 || + (fcport->scan_needed && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_NVME_INITIATOR)) { + qlt_schedule_sess_for_deletion(fcport); + } + fcport->d_id.b24 = rp->id.b24; + fcport->scan_needed = 0; + break; + } + + if (!found) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post new sess\n", + __func__, __LINE__, rp->port_name); + qla24xx_post_newsess_work(vha, &rp->id, rp->port_name, + rp->node_name, NULL, rp->fc4type); + } + } + + if (dup) { + ql_log(ql_log_warn, vha, 0xffff, + "Detected %d duplicate NPORT ID(s) from switch data base\n", + dup_cnt); + } + +login_logout: + /* + * Logout all previous fabric dev marked lost, except FCP2 devices. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { + fcport->scan_needed = 0; + continue; + } + + if (fcport->scan_state != QLA_FCPORT_FOUND) { + bool do_delete = false; + + if (fcport->scan_needed && + fcport->disc_state == DSC_LOGIN_PEND) { + /* Cable got disconnected after we sent + * a login. Do delete to prevent timeout. + */ + fcport->logout_on_delete = 1; + do_delete = true; + } + + fcport->scan_needed = 0; + if (((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) || + do_delete) { + if (fcport->loop_id != FC_NO_LOOP_ID) { + if (fcport->flags & FCF_FCP2_DEVICE) + continue; + + ql_log(ql_log_warn, vha, 0x20f0, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + fcport->port_name); + + fcport->tgt_link_down_time = 0; + qlt_schedule_sess_for_deletion(fcport); + continue; + } + } + } else { + if (fcport->scan_needed || + fcport->disc_state != DSC_LOGIN_COMPLETE) { + if (fcport->login_retry == 0) { + fcport->login_retry = + vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0x20a3, + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", + fcport->port_name, fcport->loop_id, + fcport->login_retry); + } + fcport->scan_needed = 0; + qla24xx_fcport_handle_login(vha, fcport); + } + } + } + + recheck = 1; +out: + qla24xx_sp_unmap(vha, sp); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (recheck) { + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->scan_needed) { + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + break; + } + } + } +} + +static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, + srb_t *sp, int cmd) +{ + struct qla_work_evt *e; + + if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) + return QLA_PARAMETER_ERROR; + + e = qla2x00_alloc_work(vha, cmd); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.iosb.sp = sp; + + return qla2x00_post_work(vha, e); +} + +static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, + srb_t *sp, int cmd) +{ + struct qla_work_evt *e; + + if (cmd != QLA_EVT_GPNFT) + return QLA_PARAMETER_ERROR; + + e = qla2x00_alloc_work(vha, cmd); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.gpnft.fc4_type = FC4_TYPE_NVME; + e->u.gpnft.sp = sp; + + return qla2x00_post_work(vha, e); +} + +static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, + struct srb *sp) +{ + struct qla_hw_data *ha = vha->hw; + int num_fibre_dev = ha->max_fibre_devices; + struct ct_sns_req *ct_req = + (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; + struct ct_sns_gpnft_rsp *ct_rsp = + (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; + struct ct_sns_gpn_ft_data *d; + struct fab_scan_rp *rp; + u16 cmd = be16_to_cpu(ct_req->command); + u8 fc4_type = sp->gen2; + int i, j, k; + port_id_t id; + u8 found; + u64 wwn; + + j = 0; + for (i = 0; i < num_fibre_dev; i++) { + d = &ct_rsp->entries[i]; + + id.b.rsvd_1 = 0; + id.b.domain = d->port_id[0]; + id.b.area = d->port_id[1]; + id.b.al_pa = d->port_id[2]; + wwn = wwn_to_u64(d->port_name); + + if (id.b24 == 0 || wwn == 0) + continue; + + if (fc4_type == FC4_TYPE_FCP_SCSI) { + if (cmd == GPN_FT_CMD) { + rp = &vha->scan.l[j]; + rp->id = id; + memcpy(rp->port_name, d->port_name, 8); + j++; + rp->fc4type = FS_FC4TYPE_FCP; + } else { + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (id.b24 == rp->id.b24) { + memcpy(rp->node_name, + d->port_name, 8); + break; + } + } + } + } else { + /* Search if the fibre device supports FC4_TYPE_NVME */ + if (cmd == GPN_FT_CMD) { + found = 0; + + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (!memcmp(rp->port_name, + d->port_name, 8)) { + /* + * Supports FC-NVMe & FCP + */ + rp->fc4type |= FS_FC4TYPE_NVME; + found = 1; + break; + } + } + + /* We found new FC-NVMe only port */ + if (!found) { + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (wwn_to_u64(rp->port_name)) { + continue; + } else { + rp->id = id; + memcpy(rp->port_name, + d->port_name, 8); + rp->fc4type = + FS_FC4TYPE_NVME; + break; + } + } + } + } else { + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (id.b24 == rp->id.b24) { + memcpy(rp->node_name, + d->port_name, 8); + break; + } + } + } + } + } +} + +static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct ct_sns_req *ct_req = + (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; + u16 cmd = be16_to_cpu(ct_req->command); + u8 fc4_type = sp->gen2; + unsigned long flags; + int rc; + + /* gen2 field is holding the fc4type */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x FC4Type %x\n", + sp->name, res, sp->gen2); + + sp->rc = res; + if (res) { + unsigned long flags; + const char *name = sp->name; + + if (res == QLA_OS_TIMER_EXPIRED) { + /* switch is ignoring all commands. + * This might be a zone disable behavior. + * This means we hit 64s timeout. + * 22s GPNFT + 44s Abort = 64s + */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: Switch Zone check please .\n", + name); + qla2x00_mark_all_devices_lost(vha); + } + + /* + * We are in an Interrupt context, queue up this + * sp for GNNFT_DONE work. This will allow all + * the resource to get freed up. + */ + rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, + QLA_EVT_GNNFT_DONE); + if (rc) { + /* Cleanup here to prevent memory leak */ + qla24xx_sp_unmap(vha, sp); + + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + vha->scan.scan_retry++; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s rescan failed on all retries.\n", + name); + } + } + return; + } + + qla2x00_find_free_fcp_nvme_slot(vha, sp); + + if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && + cmd == GNN_FT_CMD) { + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + + sp->rc = res; + rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); + if (rc) { + qla24xx_sp_unmap(vha, sp); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } + return; + } + + if (cmd == GPN_FT_CMD) { + rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, + QLA_EVT_GPNFT_DONE); + } else { + rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, + QLA_EVT_GNNFT_DONE); + } + + if (rc) { + qla24xx_sp_unmap(vha, sp); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return; + } +} + +/* + * Get WWNN list for fc4_type + * + * It is assumed the same SRB is re-used from GPNFT to avoid + * mem free & re-alloc + */ +static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, + u8 fc4_type) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + struct ct_sns_pkt *ct_sns; + unsigned long flags; + + if (!vha->flags.online) { + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + goto done_free_sp; + } + + if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: req %p rsp %p are not setup\n", + __func__, sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.rsp); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + WARN_ON(1); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + goto done_free_sp; + } + + ql_dbg(ql_dbg_disc, vha, 0xfffff, + "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", + __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, + sp->u.iocb_cmd.u.ctarg.req_size); + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gnnft"; + sp->gen1 = vha->hw->base_qpair->chip_reset; + sp->gen2 = fc4_type; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gpnft_gnnft_sp_done); + + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, + sp->u.iocb_cmd.u.ctarg.rsp_size); + + /* GPN_FT req */ + ct_req->req.gpn_ft.port_type = fc4_type; + + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s hdl=%x FC4Type %x.\n", sp->name, + sp->handle, ct_req->req.gpn_ft.port_type); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + goto done_free_sp; + } + + return rval; + +done_free_sp: + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + if (vha->scan.scan_flags == 0) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: schedule\n", __func__); + vha->scan.scan_flags |= SF_QUEUED; + schedule_delayed_work(&vha->scan.scan_work, 5); + } + spin_unlock_irqrestore(&vha->work_lock, flags); + + + return rval; +} /* GNNFT */ + +void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) +{ + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s enter\n", __func__); + qla24xx_async_gnnft(vha, sp, sp->gen2); +} + +/* Get WWPN list for certain fc4_type */ +int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + struct ct_sns_pkt *ct_sns; + u32 rspsz; + unsigned long flags; + + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s enter\n", __func__); + + if (!vha->flags.online) + return rval; + + spin_lock_irqsave(&vha->work_lock, flags); + if (vha->scan.scan_flags & SF_SCANNING) { + spin_unlock_irqrestore(&vha->work_lock, flags); + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s: scan active\n", __func__); + return rval; + } + vha->scan.scan_flags |= SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (fc4_type == FC4_TYPE_FCP_SCSI) { + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s: Performing FCP Scan\n", __func__); + + if (sp) { + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + } + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) { + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + return rval; + } + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + qla2x00_rel_sp(sp); + return rval; + } + sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; + + rspsz = sizeof(struct ct_sns_gpnft_rsp) + + vha->hw->max_fibre_devices * + sizeof(struct ct_sns_gpn_ft_data); + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + rspsz, + &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + /* ref: INIT */ + qla2x00_rel_sp(sp); + return rval; + } + sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz; + + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s scan list size %d\n", __func__, vha->scan.size); + + memset(vha->scan.l, 0, vha->scan.size); + } else if (!sp) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "NVME scan did not provide SP\n"); + return rval; + } + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpnft"; + sp->gen1 = vha->hw->base_qpair->chip_reset; + sp->gen2 = fc4_type; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gpnft_gnnft_sp_done); + + rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); + + /* GPN_FT req */ + ct_req->req.gpn_ft.port_type = fc4_type; + + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s hdl=%x FC4Type %x.\n", sp->name, + sp->handle, ct_req->req.gpn_ft.port_type); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + goto done_free_sp; + } + + return rval; + +done_free_sp: + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + if (vha->scan.scan_flags == 0) { + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s: Scan scheduled.\n", __func__); + vha->scan.scan_flags |= SF_QUEUED; + schedule_delayed_work(&vha->scan.scan_work, 5); + } + spin_unlock_irqrestore(&vha->work_lock, flags); + + + return rval; +} + +void qla_scan_work_fn(struct work_struct *work) +{ + struct fab_scan *s = container_of(to_delayed_work(work), + struct fab_scan, scan_work); + struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host, + scan); + unsigned long flags; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: schedule loop resync\n", __func__); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_QUEUED; + spin_unlock_irqrestore(&vha->work_lock, flags); +} + +/* GPFN_ID */ +void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, + fcport->rscn_gen, ea->sp->gen1, vha->fcport_count); + + if (fcport->disc_state == DSC_DELETE_PEND) + return; + + if (ea->sp->gen2 != fcport->login_gen) { + /* target side must have changed it. */ + ql_dbg(ql_dbg_disc, vha, 0x20d3, + "%s %8phC generation changed\n", + __func__, fcport->port_name); + return; + } else if (ea->sp->gen1 != fcport->rscn_gen) { + return; + } + + qla24xx_post_gpsc_work(vha, fcport); +} + +static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + fc_port_t *fcport = sp->fcport; + u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name; + struct event_arg ea; + u64 wwn; + + wwn = wwn_to_u64(fpn); + if (wwn) + memcpy(fcport->fabric_port_name, fpn, WWN_SIZE); + + memset(&ea, 0, sizeof(ea)); + ea.fcport = fcport; + ea.sp = sp; + ea.rc = res; + + ql_dbg(ql_dbg_disc, vha, 0x204f, + "Async done-%s res %x, WWPN %8phC %8phC\n", + sp->name, res, fcport->port_name, fcport->fabric_port_name); + + qla24xx_handle_gfpnid_event(vha, &ea); + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + return rval; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gfpnid"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gfpnid_sp_done); + + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD, + GFPN_ID_RSP_SIZE); + + /* GFPN_ID req */ + ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); + + + /* req & rsp use the same buffer */ + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", + sp->name, fcport->port_name, + sp->handle, fcport->loop_id, fcport->d_id.b24); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + int ls; + + ls = atomic_read(&vha->loop_state); + if (((ls != LOOP_READY) && (ls != LOOP_UP)) || + test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c new file mode 100644 index 000000000..a314cfc5b --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -0,0 +1,10029 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_gbl.h" + +#include +#include +#include + +#include "qla_devtbl.h" + +#ifdef CONFIG_SPARC +#include +#endif + +#include "qla_target.h" + +/* +* QLogic ISP2x00 Hardware Support Function Prototypes. +*/ +static int qla2x00_isp_firmware(scsi_qla_host_t *); +static int qla2x00_setup_chip(scsi_qla_host_t *); +static int qla2x00_fw_ready(scsi_qla_host_t *); +static int qla2x00_configure_hba(scsi_qla_host_t *); +static int qla2x00_configure_loop(scsi_qla_host_t *); +static int qla2x00_configure_local_loop(scsi_qla_host_t *); +static int qla2x00_configure_fabric(scsi_qla_host_t *); +static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); +static int qla2x00_restart_isp(scsi_qla_host_t *); + +static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); +static int qla84xx_init_chip(scsi_qla_host_t *); +static int qla25xx_init_queues(struct qla_hw_data *); +static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, + struct event_arg *ea); +static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, + struct event_arg *); +static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); + +/* SRB Extensions ---------------------------------------------------------- */ + +void +qla2x00_sp_timeout(struct timer_list *t) +{ + srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); + struct srb_iocb *iocb; + scsi_qla_host_t *vha = sp->vha; + + WARN_ON(irqs_disabled()); + iocb = &sp->u.iocb_cmd; + iocb->timeout(sp); + + /* ref: TMR */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + + if (vha && qla2x00_isp_reg_stat(vha->hw)) { + ql_log(ql_log_info, vha, 0x9008, + "PCI/Register disconnect.\n"); + qla_pci_set_eeh_busy(vha); + } +} + +void qla2x00_sp_free(srb_t *sp) +{ + struct srb_iocb *iocb = &sp->u.iocb_cmd; + + del_timer(&iocb->timer); + qla2x00_rel_sp(sp); +} + +void qla2xxx_rel_done_warning(srb_t *sp, int res) +{ + WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); +} + +void qla2xxx_rel_free_warning(srb_t *sp) +{ + WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); +} + +/* Asynchronous Login/Logout Routines -------------------------------------- */ + +unsigned long +qla2x00_get_async_timeout(struct scsi_qla_host *vha) +{ + unsigned long tmo; + struct qla_hw_data *ha = vha->hw; + + /* Firmware should use switch negotiated r_a_tov for timeout. */ + tmo = ha->r_a_tov / 10 * 2; + if (IS_QLAFX00(ha)) { + tmo = FX00_DEF_RATOV * 2; + } else if (!IS_FWI2_CAPABLE(ha)) { + /* + * Except for earlier ISPs where the timeout is seeded from the + * initialization control block. + */ + tmo = ha->login_timeout; + } + return tmo; +} + +static void qla24xx_abort_iocb_timeout(void *data) +{ + srb_t *sp = data; + struct srb_iocb *abt = &sp->u.iocb_cmd; + struct qla_qpair *qpair = sp->qpair; + u32 handle; + unsigned long flags; + int sp_found = 0, cmdsp_found = 0; + + if (sp->cmd_sp) + ql_dbg(ql_dbg_async, sp->vha, 0x507c, + "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", + sp->cmd_sp->handle, sp->cmd_sp->type, + sp->handle, sp->type); + else + ql_dbg(ql_dbg_async, sp->vha, 0x507c, + "Abort timeout 2 - hdl=%x, type=%x\n", + sp->handle, sp->type); + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { + if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == + sp->cmd_sp)) { + qpair->req->outstanding_cmds[handle] = NULL; + cmdsp_found = 1; + qla_put_fw_resources(qpair, &sp->cmd_sp->iores); + } + + /* removing the abort */ + if (qpair->req->outstanding_cmds[handle] == sp) { + qpair->req->outstanding_cmds[handle] = NULL; + sp_found = 1; + qla_put_fw_resources(qpair, &sp->iores); + break; + } + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (cmdsp_found && sp->cmd_sp) { + /* + * This done function should take care of + * original command ref: INIT + */ + sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); + } + + if (sp_found) { + abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); + sp->done(sp, QLA_OS_TIMER_EXPIRED); + } +} + +static void qla24xx_abort_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *abt = &sp->u.iocb_cmd; + srb_t *orig_sp = sp->cmd_sp; + + if (orig_sp) + qla_wait_nvme_release_cmd_kref(orig_sp); + + if (sp->flags & SRB_WAKEUP_ON_COMP) + complete(&abt->u.abt.comp); + else + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) +{ + scsi_qla_host_t *vha = cmd_sp->vha; + struct srb_iocb *abt_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + + /* ref: INIT for ABTS command */ + sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, + GFP_ATOMIC); + if (!sp) + return QLA_MEMORY_ALLOC_FAILED; + + qla_vha_mark_busy(vha); + abt_iocb = &sp->u.iocb_cmd; + sp->type = SRB_ABT_CMD; + sp->name = "abort"; + sp->qpair = cmd_sp->qpair; + sp->cmd_sp = cmd_sp; + if (wait) + sp->flags = SRB_WAKEUP_ON_COMP; + + init_completion(&abt_iocb->u.abt.comp); + /* FW can send 2 x ABTS's timeout/20s */ + qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done); + sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout; + + abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; + abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); + + ql_dbg(ql_dbg_async, vha, 0x507c, + "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, + cmd_sp->type); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return rval; + } + + if (wait) { + wait_for_completion(&abt_iocb->u.abt.comp); + rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? + QLA_SUCCESS : QLA_ERR_FROM_FW; + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + } + + return rval; +} + +void +qla2x00_async_iocb_timeout(void *data) +{ + srb_t *sp = data; + fc_port_t *fcport = sp->fcport; + struct srb_iocb *lio = &sp->u.iocb_cmd; + int rc, h; + unsigned long flags; + + if (fcport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, + "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", + sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); + + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + } else { + pr_info("Async-%s timeout - hdl=%x.\n", + sp->name, sp->handle); + } + + switch (sp->type) { + case SRB_LOGIN_CMD: + rc = qla24xx_async_abort_cmd(sp, false); + if (rc) { + /* Retry as needed. */ + lio->u.logio.data[0] = MBS_COMMAND_ERROR; + lio->u.logio.data[1] = + lio->u.logio.flags & SRB_LOGIN_RETRIED ? + QLA_LOGIO_LOGIN_RETRIED : 0; + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; + h++) { + if (sp->qpair->req->outstanding_cmds[h] == + sp) { + sp->qpair->req->outstanding_cmds[h] = + NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + sp->done(sp, QLA_FUNCTION_TIMEOUT); + } + break; + case SRB_LOGOUT_CMD: + case SRB_CT_PTHRU_CMD: + case SRB_MB_IOCB: + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + case SRB_CTRL_VP: + default: + rc = qla24xx_async_abort_cmd(sp, false); + if (rc) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; + h++) { + if (sp->qpair->req->outstanding_cmds[h] == + sp) { + sp->qpair->req->outstanding_cmds[h] = + NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + sp->done(sp, QLA_FUNCTION_TIMEOUT); + } + break; + } +} + +static void qla2x00_async_login_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0x20dd, + "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); + + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + if (!test_bit(UNLOADING, &vha->dpc_flags)) { + memset(&ea, 0, sizeof(ea)); + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.iop[0] = lio->u.logio.iop[0]; + ea.iop[1] = lio->u.logio.iop[1]; + ea.sp = sp; + if (res) + ea.data[0] = MBS_COMMAND_ERROR; + qla24xx_handle_plogi_done_event(vha, &ea); + } + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int +qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, + uint16_t *data) +{ + srb_t *sp; + struct srb_iocb *lio; + int rval = QLA_FUNCTION_FAILED; + + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || + fcport->loop_id == FC_NO_LOOP_ID) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC - not sending command.\n", + __func__, fcport->port_name); + return rval; + } + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); + fcport->flags |= FCF_ASYNC_SENT; + fcport->logout_completed = 0; + + sp->type = SRB_LOGIN_CMD; + sp->name = "login"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_login_sp_done); + + lio = &sp->u.iocb_cmd; + if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { + lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; + } else { + if (vha->hw->flags.edif_enabled && + DBELL_ACTIVE(vha)) { + lio->u.logio.flags |= + (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); + } else { + lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; + } + } + + if (NVME_TARGET(vha->hw, fcport)) + lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; + + rval = qla2x00_start_sp(sp); + + ql_dbg(ql_dbg_disc, vha, 0x2072, + "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", + fcport->port_name, sp->handle, fcport->loop_id, + fcport->d_id.b24, fcport->login_retry, + lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : ""); + + if (rval != QLA_SUCCESS) { + fcport->flags |= FCF_LOGIN_NEEDED; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + goto done_free_sp; + } + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + fcport->flags &= ~FCF_ASYNC_SENT; +done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; + + /* + * async login failed. Could be due to iocb/exchange resource + * being low. Set state DELETED for re-login process to start again. + */ + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); + return rval; +} + +static void qla2x00_async_logout_sp_done(srb_t *sp, int res) +{ + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + sp->fcport->login_gen++; + qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int +qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + + fcport->flags |= FCF_ASYNC_SENT; + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_LOGOUT_CMD; + sp->name = "logout"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_logout_sp_done), + + ql_dbg(ql_dbg_disc, vha, 0x2070, + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->port_name, fcport->explicit_logout); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + return rval; +} + +void +qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, + uint16_t *data) +{ + fcport->flags &= ~FCF_ASYNC_ACTIVE; + /* Don't re-login in target mode */ + if (!fcport->tgt_session) + qla2x00_mark_device_lost(vha, fcport, 1); + qlt_logo_completion_handler(fcport, data[0]); +} + +static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct scsi_qla_host *vha = sp->vha; + + sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; + if (!test_bit(UNLOADING, &vha->dpc_flags)) + qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, + lio->u.logio.data); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int +qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + int rval; + + rval = QLA_FUNCTION_FAILED; + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_PRLO_CMD; + sp->name = "prlo"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_prlo_sp_done); + + ql_dbg(ql_dbg_disc, vha, 0x2070, + "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", + sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; +} + +static +void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + struct fc_port *fcport = ea->fcport; + unsigned long flags; + + ql_dbg(ql_dbg_disc, vha, 0x20d2, + "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, + fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); + + WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", + ea->data[0]); + + if (ea->data[0] != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_disc, vha, 0x2066, + "%s %8phC: adisc fail: post delete\n", + __func__, ea->fcport->port_name); + + spin_lock_irqsave(&vha->work_lock, flags); + /* deleted = 0 & logout_on_delete = force fw cleanup */ + if (fcport->deleted == QLA_SESS_DELETED) + fcport->deleted = 0; + + fcport->logout_on_delete = 1; + spin_unlock_irqrestore(&vha->work_lock, flags); + + qlt_schedule_sess_for_deletion(ea->fcport); + return; + } + + if (ea->fcport->disc_state == DSC_DELETE_PEND) + return; + + if (ea->sp->gen2 != ea->fcport->login_gen) { + /* target side must have changed it. */ + ql_dbg(ql_dbg_disc, vha, 0x20d3, + "%s %8phC generation changed\n", + __func__, ea->fcport->port_name); + return; + } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { + qla_rscn_replay(fcport); + qlt_schedule_sess_for_deletion(fcport); + return; + } + + __qla24xx_handle_gpdb_event(vha, ea); +} + +static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + fcport->flags |= FCF_ASYNC_ACTIVE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); + return qla2x00_post_work(vha, e); +} + +static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct event_arg ea; + struct srb_iocb *lio = &sp->u.iocb_cmd; + + ql_dbg(ql_dbg_disc, vha, 0x2066, + "Async done-%s res %x %8phC\n", + sp->name, res, sp->fcport->port_name); + + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + memset(&ea, 0, sizeof(ea)); + ea.rc = res; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.iop[0] = lio->u.logio.iop[0]; + ea.iop[1] = lio->u.logio.iop[1]; + ea.fcport = sp->fcport; + ea.sp = sp; + if (res) + ea.data[0] = MBS_COMMAND_ERROR; + + qla24xx_handle_adisc_event(vha, &ea); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int +qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, + uint16_t *data) +{ + srb_t *sp; + struct srb_iocb *lio; + int rval = QLA_FUNCTION_FAILED; + + if (IS_SESSION_DELETED(fcport)) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; + } + + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + return rval; + + fcport->flags |= FCF_ASYNC_SENT; + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_ADISC_CMD; + sp->name = "adisc"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_adisc_sp_done); + + if (data[1] & QLA_LOGIO_LOGIN_RETRIED) { + lio = &sp->u.iocb_cmd; + lio->u.logio.flags |= SRB_LOGIN_RETRIED; + } + + ql_dbg(ql_dbg_disc, vha, 0x206f, + "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + qla2x00_post_async_adisc_work(vha, fcport, data); + return rval; +} + +static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) +{ + struct qla_hw_data *ha = vha->hw; + + if (IS_FWI2_CAPABLE(ha)) + return loop_id > NPH_LAST_HANDLE; + + return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || + loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST; +} + +/** + * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID + * @vha: adapter state pointer. + * @dev: port structure pointer. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + + rval = QLA_SUCCESS; + + spin_lock_irqsave(&ha->vport_slock, flags); + + dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); + if (dev->loop_id >= LOOPID_MAP_SIZE || + qla2x00_is_reserved_id(vha, dev->loop_id)) { + dev->loop_id = FC_NO_LOOP_ID; + rval = QLA_FUNCTION_FAILED; + } else { + set_bit(dev->loop_id, ha->loop_id_map); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + if (rval == QLA_SUCCESS) + ql_dbg(ql_dbg_disc, dev->vha, 0x2086, + "Assigning new loopid=%x, portid=%x.\n", + dev->loop_id, dev->d_id.b24); + else + ql_log(ql_log_warn, dev->vha, 0x2087, + "No loop_id's available, portid=%x.\n", + dev->d_id.b24); + + return rval; +} + +void qla2x00_clear_loop_id(fc_port_t *fcport) +{ + struct qla_hw_data *ha = fcport->vha->hw; + + if (fcport->loop_id == FC_NO_LOOP_ID || + qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) + return; + + clear_bit(fcport->loop_id, ha->loop_id_map); + fcport->loop_id = FC_NO_LOOP_ID; +} + +static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport, *conflict_fcport; + struct get_name_list_extended *e; + u16 i, n, found = 0, loop_id; + port_id_t id; + u64 wwn; + u16 data[2]; + u8 current_login_state, nvme_cls; + + fcport = ea->fcport; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, ea->rc, + fcport->login_gen, fcport->last_login_gen, + fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable); + + if (fcport->disc_state == DSC_DELETE_PEND) + return; + + if (ea->rc) { /* rval */ + if (fcport->login_retry == 0) { + ql_dbg(ql_dbg_disc, vha, 0x20de, + "GNL failed Port login retry %8phN, retry cnt=%d.\n", + fcport->port_name, fcport->login_retry); + } + return; + } + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + qla_rscn_replay(fcport); + qlt_schedule_sess_for_deletion(fcport); + return; + } else if (fcport->last_login_gen != fcport->login_gen) { + ql_dbg(ql_dbg_disc, vha, 0x20e0, + "%s %8phC login gen changed\n", + __func__, fcport->port_name); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return; + } + + n = ea->data[0] / sizeof(struct get_name_list_extended); + + ql_dbg(ql_dbg_disc, vha, 0x20e1, + "%s %d %8phC n %d %02x%02x%02x lid %d \n", + __func__, __LINE__, fcport->port_name, n, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, fcport->loop_id); + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + wwn = wwn_to_u64(e->port_name); + id.b.domain = e->port_id[2]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[0]; + id.b.rsvd_1 = 0; + + if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) + continue; + + if (IS_SW_RESV_ADDR(id)) + continue; + + found = 1; + + loop_id = le16_to_cpu(e->nport_handle); + loop_id = (loop_id & 0x7fff); + nvme_cls = e->current_login_state >> 4; + current_login_state = e->current_login_state & 0xf; + + if (PRLI_PHASE(nvme_cls)) { + current_login_state = nvme_cls; + fcport->fc4_type &= ~FS_FC4TYPE_FCP; + fcport->fc4_type |= FS_FC4TYPE_NVME; + } else if (PRLI_PHASE(current_login_state)) { + fcport->fc4_type |= FS_FC4TYPE_FCP; + fcport->fc4_type &= ~FS_FC4TYPE_NVME; + } + + ql_dbg(ql_dbg_disc, vha, 0x20e2, + "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", + __func__, fcport->port_name, + e->current_login_state, fcport->fw_login_state, + fcport->fc4_type, id.b24, fcport->d_id.b24, + loop_id, fcport->loop_id); + + switch (fcport->disc_state) { + case DSC_DELETE_PEND: + case DSC_DELETED: + break; + default: + if ((id.b24 != fcport->d_id.b24 && + fcport->d_id.b24 && + fcport->loop_id != FC_NO_LOOP_ID) || + (fcport->loop_id != FC_NO_LOOP_ID && + fcport->loop_id != loop_id)) { + ql_dbg(ql_dbg_disc, vha, 0x20e3, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + if (fcport->n2n_flag) + fcport->d_id.b24 = 0; + qlt_schedule_sess_for_deletion(fcport); + return; + } + break; + } + + fcport->loop_id = loop_id; + if (fcport->n2n_flag) + fcport->d_id.b24 = id.b24; + + wwn = wwn_to_u64(fcport->port_name); + qlt_find_sess_invalidate_other(vha, wwn, + id, loop_id, &conflict_fcport); + + if (conflict_fcport) { + /* + * Another share fcport share the same loop_id & + * nport id. Conflict fcport needs to finish + * cleanup before this fcport can proceed to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + } + + switch (vha->hw->current_topology) { + default: + switch (current_login_state) { + case DSC_LS_PRLI_COMP: + ql_dbg(ql_dbg_disc, + vha, 0x20e4, "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); + + if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + data[0] = data[1] = 0; + qla2x00_post_async_adisc_work(vha, fcport, + data); + break; + case DSC_LS_PLOGI_COMP: + if (vha->hw->flags.edif_enabled) { + /* check to see if App support Secure */ + qla24xx_post_gpdb_work(vha, fcport, 0); + break; + } + fallthrough; + case DSC_LS_PORT_UNAVAIL: + default: + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = + DSC_LS_PORT_UNAVAIL; + } + ql_dbg(ql_dbg_disc, vha, 0x20e5, + "%s %d %8phC\n", __func__, __LINE__, + fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + break; + } + break; + case ISP_CFG_N: + fcport->fw_login_state = current_login_state; + fcport->d_id = id; + switch (current_login_state) { + case DSC_LS_PRLI_PEND: + /* + * In the middle of PRLI. Let it finish. + * Allow relogin code to recheck state again + * with GNL. Push disc_state back to DELETED + * so GNL can go out again + */ + qla2x00_set_fcport_disc_state(fcport, + DSC_DELETED); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + case DSC_LS_PRLI_COMP: + if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + data[0] = data[1] = 0; + qla2x00_post_async_adisc_work(vha, fcport, + data); + break; + case DSC_LS_PLOGI_COMP: + if (vha->hw->flags.edif_enabled && + DBELL_ACTIVE(vha)) { + /* check to see if App support secure or not */ + qla24xx_post_gpdb_work(vha, fcport, 0); + break; + } + if (fcport_is_bigger(fcport)) { + /* local adapter is smaller */ + if (fcport->loop_id != FC_NO_LOOP_ID) + qla2x00_clear_loop_id(fcport); + + fcport->loop_id = loop_id; + qla24xx_fcport_handle_login(vha, + fcport); + break; + } + fallthrough; + default: + if (fcport_is_smaller(fcport)) { + /* local adapter is bigger */ + if (fcport->loop_id != FC_NO_LOOP_ID) + qla2x00_clear_loop_id(fcport); + + fcport->loop_id = loop_id; + qla24xx_fcport_handle_login(vha, + fcport); + } + break; + } + break; + } /* switch (ha->current_topology) */ + } + + if (!found) { + switch (vha->hw->current_topology) { + case ISP_CFG_F: + case ISP_CFG_FL: + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + id.b.domain = e->port_id[0]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[2]; + id.b.rsvd_1 = 0; + loop_id = le16_to_cpu(e->nport_handle); + + if (fcport->d_id.b24 == id.b24) { + conflict_fcport = + qla2x00_find_fcport_by_wwpn(vha, + e->port_name, 0); + if (conflict_fcport) { + ql_dbg(ql_dbg_disc + ql_dbg_verbose, + vha, 0x20e5, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + conflict_fcport->port_name); + qlt_schedule_sess_for_deletion + (conflict_fcport); + } + } + /* + * FW already picked this loop id for + * another fcport + */ + if (fcport->loop_id == loop_id) + fcport->loop_id = FC_NO_LOOP_ID; + } + qla24xx_fcport_handle_login(vha, fcport); + break; + case ISP_CFG_N: + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); + if (time_after_eq(jiffies, fcport->dm_login_expire)) { + if (fcport->n2n_link_reset_cnt < 2) { + fcport->n2n_link_reset_cnt++; + /* + * remote port is not sending PLOGI. + * Reset link to kick start his state + * machine + */ + set_bit(N2N_LINK_RESET, + &vha->dpc_flags); + } else { + if (fcport->n2n_chip_reset < 1) { + ql_log(ql_log_info, vha, 0x705d, + "Chip reset to bring laser down"); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + fcport->n2n_chip_reset++; + } else { + ql_log(ql_log_info, vha, 0x705d, + "Remote port %8ph is not coming back\n", + fcport->port_name); + fcport->scan_state = 0; + } + } + qla2xxx_wake_dpc(vha); + } else { + /* + * report port suppose to do PLOGI. Give him + * more time. FW will catch it. + */ + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } + break; + case ISP_CFG_NL: + qla24xx_fcport_handle_login(vha, fcport); + break; + default: + break; + } + } +} /* gnl_event */ + +static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + struct fc_port *fcport = NULL, *tf; + u16 i, n = 0, loop_id; + struct event_arg ea; + struct get_name_list_extended *e; + u64 wwn; + struct list_head h; + bool found = false; + + ql_dbg(ql_dbg_disc, vha, 0x20e7, + "Async done-%s res %x mb[1]=%x mb[2]=%x \n", + sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], + sp->u.iocb_cmd.u.mbx.in_mb[2]); + + + sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); + memset(&ea, 0, sizeof(ea)); + ea.sp = sp; + ea.rc = res; + + if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= + sizeof(struct get_name_list_extended)) { + n = sp->u.iocb_cmd.u.mbx.in_mb[1] / + sizeof(struct get_name_list_extended); + ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ + } + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + loop_id = le16_to_cpu(e->nport_handle); + /* mask out reserve bit */ + loop_id = (loop_id & 0x7fff); + set_bit(loop_id, vha->hw->loop_id_map); + wwn = wwn_to_u64(e->port_name); + + ql_dbg(ql_dbg_disc, vha, 0x20e8, + "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", + __func__, &wwn, e->port_id[2], e->port_id[1], + e->port_id[0], e->current_login_state, e->last_login_state, + (loop_id & 0x7fff)); + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + + INIT_LIST_HEAD(&h); + fcport = tf = NULL; + if (!list_empty(&vha->gnl.fcports)) + list_splice_init(&vha->gnl.fcports, &h); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + ea.fcport = fcport; + + qla24xx_handle_gnl_done_event(vha, &ea); + } + + /* create new fcport if fw has knowledge of new sessions */ + for (i = 0; i < n; i++) { + port_id_t id; + u64 wwnn; + + e = &vha->gnl.l[i]; + wwn = wwn_to_u64(e->port_name); + + found = false; + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (!memcmp((u8 *)&wwn, fcport->port_name, + WWN_SIZE)) { + found = true; + break; + } + } + + id.b.domain = e->port_id[2]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[0]; + id.b.rsvd_1 = 0; + + if (!found && wwn && !IS_SW_RESV_ADDR(id)) { + ql_dbg(ql_dbg_disc, vha, 0x2065, + "%s %d %8phC %06x post new sess\n", + __func__, __LINE__, (u8 *)&wwn, id.b24); + wwnn = wwn_to_u64(e->node_name); + qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, + (u8 *)&wwnn, NULL, 0); + } + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + vha->gnl.sent = 0; + if (!list_empty(&vha->gnl.fcports)) { + /* retrigger gnl */ + list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, + gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) + break; + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + unsigned long flags; + u16 *mb; + + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + goto done; + + ql_dbg(ql_dbg_disc, vha, 0x20d9, + "Async-gnlist WWPN %8phC \n", fcport->port_name); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->flags |= FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + + list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); + if (vha->gnl.sent) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + return QLA_SUCCESS; + } + vha->gnl.sent = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MB_IOCB; + sp->name = "gnlist"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gnl_sp_done); + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_PORT_NODE_NAME_LIST; + mb[1] = BIT_2 | BIT_3; + mb[2] = MSW(vha->gnl.ldma); + mb[3] = LSW(vha->gnl.ldma); + mb[6] = MSW(MSD(vha->gnl.ldma)); + mb[7] = LSW(MSD(vha->gnl.ldma)); + mb[8] = vha->gnl.size; + mb[9] = vha->vp_idx; + + ql_dbg(ql_dbg_disc, vha, 0x20da, + "Async-%s - OUT WWPN %8phC hndl %x\n", + sp->name, fcport->port_name, sp->handle); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + fcport->flags &= ~(FCF_ASYNC_SENT); +done: + fcport->flags &= ~(FCF_ASYNC_ACTIVE); + return rval; +} + +int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GNL); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + fcport->flags |= FCF_ASYNC_ACTIVE; + return qla2x00_post_work(vha, e); +} + +static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = sp->fcport; + u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0x20db, + "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", + sp->name, res, fcport->port_name, mb[1], mb[2]); + + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + if (res == QLA_FUNCTION_TIMEOUT) + goto done; + + memset(&ea, 0, sizeof(ea)); + ea.fcport = fcport; + ea.sp = sp; + + qla24xx_handle_gpdb_event(vha, &ea); + +done: + dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, + sp->u.iocb_cmd.u.mbx.in_dma); + + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + if (vha->host->active_mode == MODE_TARGET) + return QLA_FUNCTION_FAILED; + + e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + + return qla2x00_post_work(vha, e); +} + +static void qla2x00_async_prli_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0x2129, + "%s %8phC res %x\n", __func__, + sp->fcport->port_name, res); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + + if (!test_bit(UNLOADING, &vha->dpc_flags)) { + memset(&ea, 0, sizeof(ea)); + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.iop[0] = lio->u.logio.iop[0]; + ea.iop[1] = lio->u.logio.iop[1]; + ea.sp = sp; + if (res == QLA_OS_TIMER_EXPIRED) + ea.data[0] = QLA_OS_TIMER_EXPIRED; + else if (res) + ea.data[0] = MBS_COMMAND_ERROR; + + qla24xx_handle_prli_done_event(vha, &ea); + } + + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int +qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + struct srb_iocb *lio; + int rval = QLA_FUNCTION_FAILED; + + if (!vha->flags.online) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); + return rval; + } + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || + fcport->fw_login_state == DSC_LS_PRLI_PEND) && + qla_dual_mode_enabled(vha)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); + return rval; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + return rval; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->logout_completed = 0; + + sp->type = SRB_PRLI_CMD; + sp->name = "prli"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_prli_sp_done); + + lio = &sp->u.iocb_cmd; + lio->u.logio.flags = 0; + + if (NVME_TARGET(vha->hw, fcport)) + lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; + + ql_dbg(ql_dbg_disc, vha, 0x211b, + "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n", + fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, + fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority, + NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp"); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + fcport->flags |= FCF_LOGIN_NEEDED; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + goto done_free_sp; + } + + return rval; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + e->u.fcport.opt = opt; + fcport->flags |= FCF_ASYNC_ACTIVE; + return qla2x00_post_work(vha, e); +} + +int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + srb_t *sp; + struct srb_iocb *mbx; + int rval = QLA_FUNCTION_FAILED; + u16 *mb; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + + if (IS_SESSION_DELETED(fcport)) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; + } + + if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC online %d flags %x - not sending command.\n", + __func__, fcport->port_name, vha->flags.online, fcport->flags); + goto done; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); + + fcport->flags |= FCF_ASYNC_SENT; + sp->type = SRB_MB_IOCB; + sp->name = "gpdb"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gpdb_sp_done); + + pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xd043, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_GET_PORT_DATABASE; + mb[1] = fcport->loop_id; + mb[2] = MSW(pd_dma); + mb[3] = LSW(pd_dma); + mb[6] = MSW(MSD(pd_dma)); + mb[7] = LSW(MSD(pd_dma)); + mb[9] = vha->vp_idx; + mb[10] = opt; + + mbx = &sp->u.iocb_cmd; + mbx->u.mbx.in = (void *)pd; + mbx->u.mbx.in_dma = pd_dma; + + ql_dbg(ql_dbg_disc, vha, 0x20dc, + "Async-%s %8phC hndl %x opt %x\n", + sp->name, fcport->port_name, sp->handle, opt); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); + + kref_put(&sp->cmd_kref, qla2x00_sp_release); + fcport->flags &= ~FCF_ASYNC_SENT; +done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; + qla24xx_post_gpdb_work(vha, fcport, opt); + return rval; +} + +static +void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + ea->fcport->login_gen++; + ea->fcport->logout_on_delete = 1; + + if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { + vha->fcport_count++; + ea->fcport->login_succ = 1; + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + qla24xx_sched_upd_fcport(ea->fcport); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + } else if (ea->fcport->login_succ) { + /* + * We have an existing session. A late RSCN delivery + * must have triggered the session to be re-validate. + * Session is still valid. + */ + ql_dbg(ql_dbg_disc, vha, 0x20d6, + "%s %d %8phC session revalidate success\n", + __func__, __LINE__, ea->fcport->port_name); + qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); +} + +static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, + struct port_database_24xx *pd) +{ + int rc = 0; + + if (pd->secure_login) { + ql_dbg(ql_dbg_disc, vha, 0x104d, + "Secure Login established on %8phC\n", + fcport->port_name); + fcport->flags |= FCF_FCSP_DEVICE; + } else { + ql_dbg(ql_dbg_disc, vha, 0x104d, + "non-Secure Login %8phC", + fcport->port_name); + fcport->flags &= ~FCF_FCSP_DEVICE; + } + if (vha->hw->flags.edif_enabled) { + if (fcport->flags & FCF_FCSP_DEVICE) { + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND); + /* Start edif prli timer & ring doorbell for app */ + fcport->edif.rx_sa_set = 0; + fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = 0; + fcport->edif.tx_sa_pending = 0; + + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + fcport->d_id.b24); + + if (DBELL_ACTIVE(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x20ef, + "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", + __func__, __LINE__, fcport->port_name); + fcport->edif.app_sess_online = 1; + + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, + fcport->d_id.b24, 0, fcport); + } + + rc = 1; + } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x2117, + "%s %d %8phC post prli\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + rc = 1; + } + } + return rc; +} + +static +void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + struct port_database_24xx *pd; + struct srb *sp = ea->sp; + uint8_t ls; + + pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; + + fcport->flags &= ~FCF_ASYNC_SENT; + + ql_dbg(ql_dbg_disc, vha, 0x20d2, + "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__, + fcport->port_name, fcport->disc_state, pd->current_login_state, + fcport->fc4_type, ea->rc); + + if (fcport->disc_state == DSC_DELETE_PEND) { + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n", + __func__, __LINE__, fcport->port_name); + return; + } + + if (NVME_TARGET(vha->hw, fcport)) + ls = pd->current_login_state >> 4; + else + ls = pd->current_login_state & 0xf; + + if (ea->sp->gen2 != fcport->login_gen) { + /* target side must have changed it. */ + + ql_dbg(ql_dbg_disc, vha, 0x20d3, + "%s %8phC generation changed\n", + __func__, fcport->port_name); + return; + } else if (ea->sp->gen1 != fcport->rscn_gen) { + qla_rscn_replay(fcport); + qlt_schedule_sess_for_deletion(fcport); + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", + __func__, __LINE__, fcport->port_name, ls); + return; + } + + switch (ls) { + case PDS_PRLI_COMPLETE: + __qla24xx_parse_gpdb(vha, fcport, pd); + break; + case PDS_PLOGI_COMPLETE: + if (qla_chk_secure_login(vha, fcport, pd)) { + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", + __func__, __LINE__, fcport->port_name, ls); + return; + } + fallthrough; + case PDS_PLOGI_PENDING: + case PDS_PRLI_PENDING: + case PDS_PRLI2_PENDING: + /* Set discovery state back to GNL to Relogin attempt */ + if (qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) { + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", + __func__, __LINE__, fcport->port_name, ls); + return; + case PDS_LOGO_PENDING: + case PDS_PORT_UNAVAILABLE: + default: + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion(fcport); + return; + } + __qla24xx_handle_gpdb_event(vha, ea); +} /* gpdb event */ + +static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + u8 login = 0; + int rc; + + ql_dbg(ql_dbg_disc, vha, 0x307b, + "%s %8phC DS %d LS %d lid %d retries=%d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->loop_id, fcport->login_retry); + + if (qla_tgt_mode_enabled(vha)) + return; + + if (qla_dual_mode_enabled(vha)) { + if (N2N_TOPO(vha->hw)) { + u64 mywwn, wwn; + + mywwn = wwn_to_u64(vha->port_name); + wwn = wwn_to_u64(fcport->port_name); + if (mywwn > wwn) + login = 1; + else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) + && time_after_eq(jiffies, + fcport->plogi_nack_done_deadline)) + login = 1; + } else { + login = 1; + } + } else { + /* initiator mode */ + login = 1; + } + + if (login && fcport->login_retry) { + fcport->login_retry--; + if (fcport->loop_id == FC_NO_LOOP_ID) { + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + rc = qla2x00_find_new_loop_id(vha, fcport); + if (rc) { + ql_dbg(ql_dbg_disc, vha, 0x20e6, + "%s %d %8phC post del sess - out of loopid\n", + __func__, __LINE__, fcport->port_name); + fcport->scan_state = 0; + qlt_schedule_sess_for_deletion(fcport); + return; + } + } + ql_dbg(ql_dbg_disc, vha, 0x20bf, + "%s %d %8phC post login\n", + __func__, __LINE__, fcport->port_name); + qla2x00_post_async_login_work(vha, fcport, NULL); + } +} + +int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + u16 data[2]; + u16 sec; + + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, fcport->flags, + fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, + fcport->login_gen, fcport->loop_id, fcport->scan_state, + fcport->fc4_type); + + if (fcport->scan_state != QLA_FCPORT_FOUND || + fcport->disc_state == DSC_DELETE_PEND) + return 0; + + if ((fcport->loop_id != FC_NO_LOOP_ID) && + qla_dual_mode_enabled(vha) && + ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND))) + return 0; + + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && + !N2N_TOPO(vha->hw)) { + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return 0; + } + } + + /* Target won't initiate port login if fabric is present */ + if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) + return 0; + + if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return 0; + } + + switch (fcport->disc_state) { + case DSC_DELETED: + switch (vha->hw->current_topology) { + case ISP_CFG_N: + if (fcport_is_smaller(fcport)) { + /* this adapter is bigger */ + if (fcport->login_retry) { + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, + fcport); + fcport->fw_login_state = + DSC_LS_PORT_UNAVAIL; + } + fcport->login_retry--; + qla_post_els_plogi_work(vha, fcport); + } else { + ql_log(ql_log_info, vha, 0x705d, + "Unable to reach remote port %8phC", + fcport->port_name); + } + } else { + qla24xx_post_gnl_work(vha, fcport); + } + break; + default: + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_disc, vha, 0x20bd, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gnl_work(vha, fcport); + } else { + qla_chk_n2n_b4_login(vha, fcport); + } + break; + } + break; + + case DSC_GNL: + switch (vha->hw->current_topology) { + case ISP_CFG_N: + if ((fcport->current_login_state & 0xf) == 0x6) { + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post GPDB work\n", + __func__, __LINE__, fcport->port_name); + fcport->chip_reset = + vha->hw->base_qpair->chip_reset; + qla24xx_post_gpdb_work(vha, fcport, 0); + } else { + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post %s PRLI\n", + __func__, __LINE__, fcport->port_name, + NVME_TARGET(vha->hw, fcport) ? "NVME" : + "FC"); + qla24xx_post_prli_work(vha, fcport); + } + break; + default: + if (fcport->login_pause) { + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + qla_chk_n2n_b4_login(vha, fcport); + break; + } + break; + + case DSC_LOGIN_FAILED: + if (N2N_TOPO(vha->hw)) + qla_chk_n2n_b4_login(vha, fcport); + else + qlt_schedule_sess_for_deletion(fcport); + break; + + case DSC_LOGIN_COMPLETE: + /* recheck login state */ + data[0] = data[1] = 0; + qla2x00_post_async_adisc_work(vha, fcport, data); + break; + + case DSC_LOGIN_PEND: + if (vha->hw->flags.edif_enabled) + break; + + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post %s PRLI\n", + __func__, __LINE__, fcport->port_name, + NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC"); + qla24xx_post_prli_work(vha, fcport); + } + break; + + case DSC_UPD_FCPORT: + sec = jiffies_to_msecs(jiffies - + fcport->jiffies_at_registration)/1000; + if (fcport->sec_since_registration < sec && sec && + !(sec % 60)) { + fcport->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC - Slow Rport registration(%d Sec)\n", + __func__, fcport->port_name, sec); + } + + if (fcport->next_disc_state != DSC_DELETE_PEND) + fcport->next_disc_state = DSC_ADISC; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + + default: + break; + } + + return 0; +} + +int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, + u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.new_sess.id = *id; + e->u.new_sess.pla = pla; + e->u.new_sess.fc4_type = fc4_type; + memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); + if (node_name) + memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); + + return qla2x00_post_work(vha, e); +} + +void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport; + unsigned long flags; + + switch (ea->id.b.rsvd_1) { + case RSCN_PORT_ADDR: + fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); + if (fcport) { + if (ql2xfc2target && + fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, vha, 0x2115, + "Delaying session delete for FCP2 portid=%06x %8phC ", + fcport->d_id.b24, fcport->port_name); + return; + } + + if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { + /* + * On ipsec start by remote port, Target port + * may use RSCN to trigger initiator to + * relogin. If driver is already in the + * process of a relogin, then ignore the RSCN + * and allow the current relogin to continue. + * This reduces thrashing of the connection. + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) { + /* + * If state = online, then set scan_needed=1 to do relogin. + * Otherwise we're already in the middle of a relogin + */ + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } else { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_AREA_ADDR: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) + continue; + + if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_DOM_ADDR: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) + continue; + + if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_FAB_ADDR: + default: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) + continue; + + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + break; + } + + spin_lock_irqsave(&vha->work_lock, flags); + if (vha->scan.scan_flags == 0) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); + vha->scan.scan_flags |= SF_QUEUED; + schedule_delayed_work(&vha->scan.scan_work, 5); + } + spin_unlock_irqrestore(&vha->work_lock, flags); +} + +void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + + ql_dbg(ql_dbg_disc, vha, 0x2102, + "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, + fcport->deleted, fcport->conflict, + fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, + fcport->flags); + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gnl_work(vha, fcport); + return; + } + + qla24xx_fcport_handle_login(vha, fcport); +} + +void qla_handle_els_plogi_done(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) && + vha->hw->flags.edif_enabled) { + /* check to see if App support Secure */ + qla24xx_post_gpdb_work(vha, ea->fcport, 0); + return; + } + + /* for pure Target Mode, PRLI will not be initiated */ + if (vha->host->active_mode == MODE_TARGET) + return; + + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post PRLI\n", + __func__, __LINE__, ea->fcport->port_name); + qla24xx_post_prli_work(vha, ea->fcport); +} + +/* + * RSCN(s) came in for this fcport, but the RSCN(s) was not able + * to be consumed by the fcport + */ +void qla_rscn_replay(fc_port_t *fcport) +{ + struct event_arg ea; + + switch (fcport->disc_state) { + case DSC_DELETE_PEND: + return; + default: + break; + } + + if (fcport->scan_needed) { + memset(&ea, 0, sizeof(ea)); + ea.id = fcport->d_id; + ea.id.b.rsvd_1 = RSCN_PORT_ADDR; + qla2x00_handle_rscn(fcport->vha, &ea); + } +} + +static void +qla2x00_tmf_iocb_timeout(void *data) +{ + srb_t *sp = data; + struct srb_iocb *tmf = &sp->u.iocb_cmd; + int rc, h; + unsigned long flags; + + if (sp->type == SRB_MARKER) + rc = QLA_FUNCTION_FAILED; + else + rc = qla24xx_async_abort_cmd(sp, false); + + if (rc) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + qla_put_fw_resources(sp->qpair, &sp->iores); + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); + tmf->u.tmf.data = QLA_FUNCTION_FAILED; + complete(&tmf->u.tmf.comp); + } +} + +static void qla_marker_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *tmf = &sp->u.iocb_cmd; + + if (res != QLA_SUCCESS) + ql_dbg(ql_dbg_taskm, sp->vha, 0x8004, + "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n", + sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags, + sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id); + + sp->u.iocb_cmd.u.tmf.data = res; + complete(&tmf->u.tmf.comp); +} + +#define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \ +{\ + int cnt = 5; \ + do { \ + if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\ + _rval = EINVAL; \ + break; \ + } \ + _rval = qla2x00_start_sp(_sp); \ + if (_rval == EAGAIN) \ + msleep(1); \ + else \ + break; \ + cnt--; \ + } while (cnt); \ +} + +/** + * qla26xx_marker: send marker IOCB and wait for the completion of it. + * @arg: pointer to argument list. + * It is assume caller will provide an fcport pointer and modifier + */ +static int +qla26xx_marker(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct srb_iocb *tm_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + u32 chip_gen, login_gen; + + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8039, + "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + + chip_gen = vha->hw->chip_reset; + login_gen = fcport->login_gen; + + /* ref: INIT */ + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MARKER; + sp->name = "marker"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; + + tm_iocb = &sp->u.iocb_cmd; + init_completion(&tm_iocb->u.tmf.comp); + tm_iocb->u.tmf.modifier = arg->modifier; + tm_iocb->u.tmf.lun = arg->lun; + tm_iocb->u.tmf.loop_id = fcport->loop_id; + tm_iocb->u.tmf.vp_index = vha->vp_idx; + + START_SP_W_RETRIES(sp, rval, chip_gen, login_gen); + + ql_dbg(ql_dbg_taskm, vha, 0x8006, + "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8031, + "Marker IOCB send failure (%x).\n", rval); + goto done_free_sp; + } + + wait_for_completion(&tm_iocb->u.tmf.comp); + rval = tm_iocb->u.tmf.data; + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8019, + "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + } + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +static void qla2x00_tmf_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *tmf = &sp->u.iocb_cmd; + + if (res) + tmf->u.tmf.data = res; + complete(&tmf->u.tmf.comp); +} + +static int qla_tmf_wait(struct tmf_arg *arg) +{ + /* there are only 2 types of error handling that reaches here, lun or target reset */ + if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET)) + return qla2x00_eh_wait_for_pending_commands(arg->vha, + arg->fcport->d_id.b24, arg->lun, WAIT_LUN); + else + return qla2x00_eh_wait_for_pending_commands(arg->vha, + arg->fcport->d_id.b24, arg->lun, WAIT_TARGET); +} + +static int +__qla2x00_async_tm_cmd(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct srb_iocb *tm_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + u32 chip_gen, login_gen; + u64 jif; + + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8032, + "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + + chip_gen = vha->hw->chip_reset; + login_gen = fcport->login_gen; + + /* ref: INIT */ + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); + if (!sp) + goto done; + + qla_vha_mark_busy(vha); + sp->type = SRB_TM_CMD; + sp->name = "tmf"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), + qla2x00_tmf_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; + + tm_iocb = &sp->u.iocb_cmd; + init_completion(&tm_iocb->u.tmf.comp); + tm_iocb->u.tmf.flags = arg->flags; + tm_iocb->u.tmf.lun = arg->lun; + + START_SP_W_RETRIES(sp, rval, chip_gen, login_gen); + + ql_dbg(ql_dbg_taskm, vha, 0x802f, + "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->flags, arg->lun, sp->qpair->id, rval); + + if (rval != QLA_SUCCESS) + goto done_free_sp; + wait_for_completion(&tm_iocb->u.tmf.comp); + + rval = tm_iocb->u.tmf.data; + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8030, + "TM IOCB failed (%x).\n", rval); + } + + if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { + jif = jiffies; + if (qla_tmf_wait(arg)) { + ql_log(ql_log_info, vha, 0x803e, + "Waited %u ms Nexus=%ld:%06x:%llu.\n", + jiffies_to_msecs(jiffies - jif), vha->host_no, + fcport->d_id.b24, arg->lun); + } + + if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) { + rval = qla26xx_marker(arg); + } else { + ql_log(ql_log_info, vha, 0x803e, + "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n", + vha->host_no, fcport->d_id.b24, arg->lun); + rval = QLA_FUNCTION_FAILED; + } + } + if (tm_iocb->u.tmf.data) + rval = tm_iocb->u.tmf.data; + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +static void qla_put_tmf(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ha->active_tmf--; + list_del(&arg->tmf_elem); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +static +int qla_get_tmf(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + fc_port_t *fcport = arg->fcport; + int rc = 0; + struct tmf_arg *t; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + list_for_each_entry(t, &ha->tmf_active, tmf_elem) { + if (t->fcport == arg->fcport && t->lun == arg->lun) { + /* reject duplicate TMF */ + ql_log(ql_log_warn, vha, 0x802c, + "found duplicate TMF. Nexus=%ld:%06x:%llu.\n", + vha->host_no, fcport->d_id.b24, arg->lun); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return -EINVAL; + } + } + + list_add_tail(&arg->tmf_elem, &ha->tmf_pending); + while (ha->active_tmf >= MAX_ACTIVE_TMF) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + msleep(1); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (TMF_NOT_READY(fcport)) { + ql_log(ql_log_warn, vha, 0x802c, + "Unable to acquire TM resource due to disruption.\n"); + rc = EIO; + break; + } + if (ha->active_tmf < MAX_ACTIVE_TMF && + list_is_first(&arg->tmf_elem, &ha->tmf_pending)) + break; + } + + list_del(&arg->tmf_elem); + + if (!rc) { + ha->active_tmf++; + list_add_tail(&arg->tmf_elem, &ha->tmf_active); + } + + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return rc; +} + +int +qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, + uint32_t tag) +{ + struct scsi_qla_host *vha = fcport->vha; + struct tmf_arg a; + int rval = QLA_SUCCESS; + + if (TMF_NOT_READY(fcport)) + return QLA_SUSPENDED; + + a.vha = fcport->vha; + a.fcport = fcport; + a.lun = lun; + a.flags = flags; + INIT_LIST_HEAD(&a.tmf_elem); + + if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { + a.modifier = MK_SYNC_ID_LUN; + } else { + a.modifier = MK_SYNC_ID; + } + + if (qla_get_tmf(&a)) + return QLA_FUNCTION_FAILED; + + a.qpair = vha->hw->base_qpair; + rval = __qla2x00_async_tm_cmd(&a); + + qla_put_tmf(&a); + return rval; +} + +int +qla24xx_async_abort_command(srb_t *sp) +{ + unsigned long flags = 0; + + uint32_t handle; + fc_port_t *fcport = sp->fcport; + struct qla_qpair *qpair = sp->qpair; + struct scsi_qla_host *vha = fcport->vha; + struct req_que *req = qpair->req; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + for (handle = 1; handle < req->num_outstanding_cmds; handle++) { + if (req->outstanding_cmds[handle] == sp) + break; + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (handle == req->num_outstanding_cmds) { + /* Command not found. */ + return QLA_ERR_NOT_FOUND; + } + if (sp->type == SRB_FXIOCB_DCMD) + return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, + FXDISC_ABORT_IOCTL); + + return qla24xx_async_abort_cmd(sp, true); +} + +static void +qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) +{ + struct srb *sp; + WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", + ea->data[0]); + + switch (ea->data[0]) { + case MBS_COMMAND_COMPLETE: + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, ea->fcport->port_name); + + ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; + ea->fcport->logout_on_delete = 1; + ea->fcport->nvme_prli_service_param = ea->iop[0]; + if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) + ea->fcport->nvme_first_burst_size = + (ea->iop[1] & 0xffff) * 512; + else + ea->fcport->nvme_first_burst_size = 0; + qla24xx_post_gpdb_work(vha, ea->fcport, 0); + break; + default: + sp = ea->sp; + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC priority %s, fc4type %x prev try %s\n", + __func__, __LINE__, ea->fcport->port_name, + vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? + "FCP" : "NVMe", ea->fcport->fc4_type, + (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ? + "NVME" : "FCP"); + + if (NVME_FCP_TARGET(ea->fcport)) { + if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) + ea->fcport->do_prli_nvme = 0; + else + ea->fcport->do_prli_nvme = 1; + } else { + ea->fcport->do_prli_nvme = 0; + } + + if (N2N_TOPO(vha->hw)) { + if (ea->fcport->n2n_link_reset_cnt == + vha->hw->login_retry_count && + ea->fcport->flags & FCF_FCSP_DEVICE) { + /* remote authentication app just started */ + ea->fcport->n2n_link_reset_cnt = 0; + } + + if (ea->fcport->n2n_link_reset_cnt < + vha->hw->login_retry_count) { + ea->fcport->n2n_link_reset_cnt++; + vha->relogin_jif = jiffies + 2 * HZ; + /* + * PRLI failed. Reset link to kick start + * state machine + */ + set_bit(N2N_LINK_RESET, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + ql_log(ql_log_warn, vha, 0x2119, + "%s %d %8phC Unable to reconnect\n", + __func__, __LINE__, + ea->fcport->port_name); + } + } else { + /* + * switch connect. login failed. Take connection down + * and allow relogin to retrigger + */ + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; + qlt_schedule_sess_for_deletion(ea->fcport); + } + break; + } +} + +void +qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) +{ + port_id_t cid; /* conflict Nport id */ + u16 lid; + struct fc_port *conflict_fcport; + unsigned long flags; + struct fc_port *fcport = ea->fcport; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, + ea->sp->gen1, fcport->rscn_gen, + ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { + ql_dbg(ql_dbg_disc, vha, 0x20ea, + "%s %d %8phC Remote is trying to login\n", + __func__, __LINE__, fcport->port_name); + return; + } + + if ((fcport->disc_state == DSC_DELETE_PEND) || + (fcport->disc_state == DSC_DELETED)) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return; + } + + if (ea->sp->gen2 != fcport->login_gen) { + /* target side must have changed it. */ + ql_dbg(ql_dbg_disc, vha, 0x20d3, + "%s %8phC generation changed\n", + __func__, fcport->port_name); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return; + } else if (ea->sp->gen1 != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0x20d3, + "%s %8phC RSCN generation changed\n", + __func__, fcport->port_name); + qla_rscn_replay(fcport); + qlt_schedule_sess_for_deletion(fcport); + return; + } + + WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", + ea->data[0]); + + switch (ea->data[0]) { + case MBS_COMMAND_COMPLETE: + /* + * Driver must validate login state - If PRLI not complete, + * force a relogin attempt via implicit LOGO, PLOGI, and PRLI + * requests. + */ + if (vha->hw->flags.edif_enabled) { + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; + ea->fcport->logout_on_delete = 1; + ea->fcport->send_els_logo = 0; + ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + qla24xx_post_gpdb_work(vha, ea->fcport, 0); + } else { + if (NVME_TARGET(vha->hw, fcport)) { + ql_dbg(ql_dbg_disc, vha, 0x2117, + "%s %d %8phC post prli\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ea, + "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", + __func__, __LINE__, fcport->port_name, + fcport->loop_id, fcport->d_id.b24); + + set_bit(fcport->loop_id, vha->hw->loop_id_map); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->chip_reset = vha->hw->base_qpair->chip_reset; + fcport->logout_on_delete = 1; + fcport->send_els_logo = 0; + fcport->fw_login_state = DSC_LS_PRLI_COMP; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + qla24xx_post_gpdb_work(vha, fcport, 0); + } + } + break; + case MBS_COMMAND_ERROR: + ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", + __func__, __LINE__, ea->fcport->port_name, ea->data[1]); + + qlt_schedule_sess_for_deletion(ea->fcport); + break; + case MBS_LOOP_ID_USED: + /* data[1] = IO PARAM 1 = nport ID */ + cid.b.domain = (ea->iop[1] >> 16) & 0xff; + cid.b.area = (ea->iop[1] >> 8) & 0xff; + cid.b.al_pa = ea->iop[1] & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0x20ec, + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->loop_id, cid.b24); + + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; + qla24xx_post_gnl_work(vha, ea->fcport); + break; + case MBS_PORT_ID_USED: + lid = ea->iop[1] & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(ea->fcport->port_name), + ea->fcport->d_id, lid, &conflict_fcport); + + if (conflict_fcport) { + /* + * Another fcport share the same loop_id/nport id. + * Conflict fcport needs to finish cleanup before this + * fcport can proceed to login. + */ + conflict_fcport->conflict = ea->fcport; + ea->fcport->login_pause = 1; + + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b24, lid); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b24, lid); + + qla2x00_clear_loop_id(ea->fcport); + set_bit(lid, vha->hw->loop_id_map); + ea->fcport->loop_id = lid; + ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; + qlt_schedule_sess_for_deletion(ea->fcport); + } + break; + } + return; +} + +/****************************************************************************/ +/* QLogic ISP2x00 Hardware Support Functions. */ +/****************************************************************************/ + +static int +qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + uint32_t idc_major_ver, idc_minor_ver; + uint16_t config[4]; + + qla83xx_idc_lock(vha, 0); + + /* SV: TODO: Assign initialization timeout from + * flash-info / other param + */ + ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; + ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; + + /* Set our fcoe function presence */ + if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, vha, 0xb077, + "Error while setting DRV-Presence.\n"); + rval = QLA_FUNCTION_FAILED; + goto exit; + } + + /* Decide the reset ownership */ + qla83xx_reset_ownership(vha); + + /* + * On first protocol driver load: + * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery + * register. + * Others: Check compatibility with current IDC Major version. + */ + qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); + if (ha->flags.nic_core_reset_owner) { + /* Set IDC Major version */ + idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; + qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); + + /* Clearing IDC-Lock-Recovery register */ + qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); + } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { + /* + * Clear further IDC participation if we are not compatible with + * the current IDC Major Version. + */ + ql_log(ql_log_warn, vha, 0xb07d, + "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", + idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); + __qla83xx_clear_drv_presence(vha); + rval = QLA_FUNCTION_FAILED; + goto exit; + } + /* Each function sets its supported Minor version. */ + qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); + idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); + qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); + + if (ha->flags.nic_core_reset_owner) { + memset(config, 0, sizeof(config)); + if (!qla81xx_get_port_config(vha, config)) + qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, + QLA8XXX_DEV_READY); + } + + rval = qla83xx_idc_state_handler(vha); + +exit: + qla83xx_idc_unlock(vha, 0); + + return rval; +} + +/* +* qla2x00_initialize_adapter +* Initialize board. +* +* Input: +* ha = adapter block pointer. +* +* Returns: +* 0 = success +*/ +int +qla2x00_initialize_adapter(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); + memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); + + /* Clear adapter flags. */ + vha->flags.online = 0; + ha->flags.chip_reset_done = 0; + vha->flags.reset_active = 0; + ha->flags.pci_channel_io_perm_failure = 0; + ha->flags.eeh_busy = 0; + vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + atomic_set(&vha->loop_state, LOOP_DOWN); + vha->device_flags = DFLG_NO_CABLE; + vha->dpc_flags = 0; + vha->flags.management_server_logged_in = 0; + vha->marker_needed = 0; + ha->isp_abort_cnt = 0; + ha->beacon_blink_led = 0; + + set_bit(0, ha->req_qid_map); + set_bit(0, ha->rsp_qid_map); + + ql_dbg(ql_dbg_init, vha, 0x0040, + "Configuring PCI space...\n"); + rval = ha->isp_ops->pci_config(vha); + if (rval) { + ql_log(ql_log_warn, vha, 0x0044, + "Unable to configure PCI space.\n"); + return (rval); + } + + ha->isp_ops->reset_chip(vha); + + /* Check for secure flash support */ + if (IS_QLA28XX(ha)) { + if (rd_reg_word(®->mailbox12) & BIT_0) + ha->flags.secure_adapter = 1; + ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", + (ha->flags.secure_adapter) ? "Yes" : "No"); + } + + + rval = qla2xxx_get_flash_info(vha); + if (rval) { + ql_log(ql_log_fatal, vha, 0x004f, + "Unable to validate FLASH data.\n"); + return rval; + } + + if (IS_QLA8044(ha)) { + qla8044_read_reset_template(vha); + + /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. + * If DONRESET_BIT0 is set, drivers should not set dev_state + * to NEED_RESET. But if NEED_RESET is set, drivers should + * should honor the reset. */ + if (ql2xdontresethba == 1) + qla8044_set_idc_dontreset(vha); + } + + ha->isp_ops->get_flash_version(vha, req->ring); + ql_dbg(ql_dbg_init, vha, 0x0061, + "Configure NVRAM parameters...\n"); + + /* Let priority default to FCP, can be overridden by nvram_config */ + ha->fc4_type_priority = FC4_PRIORITY_FCP; + + ha->isp_ops->nvram_config(vha); + + if (ha->fc4_type_priority != FC4_PRIORITY_FCP && + ha->fc4_type_priority != FC4_PRIORITY_NVME) + ha->fc4_type_priority = FC4_PRIORITY_FCP; + + ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", + ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); + + if (ha->flags.disable_serdes) { + /* Mask HBA via NVRAM settings? */ + ql_log(ql_log_info, vha, 0x0077, + "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_init, vha, 0x0078, + "Verifying loaded RISC code...\n"); + + /* If smartsan enabled then require fdmi and rdp enabled */ + if (ql2xsmartsan) { + ql2xfdmienable = 1; + ql2xrdpenable = 1; + } + + if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { + rval = ha->isp_ops->chip_diag(vha); + if (rval) + return (rval); + rval = qla2x00_setup_chip(vha); + if (rval) + return (rval); + } + + if (IS_QLA84XX(ha)) { + ha->cs84xx = qla84xx_get_chip(vha); + if (!ha->cs84xx) { + ql_log(ql_log_warn, vha, 0x00d0, + "Unable to configure ISP84XX.\n"); + return QLA_FUNCTION_FAILED; + } + } + + if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) + rval = qla2x00_init_rings(vha); + + /* No point in continuing if firmware initialization failed. */ + if (rval != QLA_SUCCESS) + return rval; + + ha->flags.chip_reset_done = 1; + + if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { + /* Issue verify 84xx FW IOCB to complete 84xx initialization */ + rval = qla84xx_init_chip(vha); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x00d4, + "Unable to initialize ISP84XX.\n"); + qla84xx_put_chip(vha); + } + } + + /* Load the NIC Core f/w if we are the first protocol driver. */ + if (IS_QLA8031(ha)) { + rval = qla83xx_nic_core_fw_load(vha); + if (rval) + ql_log(ql_log_warn, vha, 0x0124, + "Error in initializing NIC Core f/w.\n"); + } + + if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) + qla24xx_read_fcp_prio_cfg(vha); + + if (IS_P3P_TYPE(ha)) + qla82xx_set_driver_version(vha, QLA2XXX_VERSION); + else + qla25xx_set_driver_version(vha, QLA2XXX_VERSION); + + return (rval); +} + +/** + * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2100_pci_config(scsi_qla_host_t *vha) +{ + uint16_t w; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + pci_set_master(ha->pdev); + pci_try_set_mwi(ha->pdev); + + pci_read_config_word(ha->pdev, PCI_COMMAND, &w); + w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + pci_write_config_word(ha->pdev, PCI_COMMAND, w); + + pci_disable_rom(ha->pdev); + + /* Get PCI bus information. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->pci_attr = rd_reg_word(®->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +} + +/** + * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2300_pci_config(scsi_qla_host_t *vha) +{ + uint16_t w; + unsigned long flags = 0; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + pci_set_master(ha->pdev); + pci_try_set_mwi(ha->pdev); + + pci_read_config_word(ha->pdev, PCI_COMMAND, &w); + w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + + if (IS_QLA2322(ha) || IS_QLA6322(ha)) + w &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(ha->pdev, PCI_COMMAND, w); + + /* + * If this is a 2300 card and not 2312, reset the + * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, + * the 2310 also reports itself as a 2300 so we need to get the + * fb revision level -- a 6 indicates it really is a 2300 and + * not a 2310. + */ + if (IS_QLA2300(ha)) { + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Pause RISC. */ + wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) + break; + + udelay(10); + } + + /* Select FPM registers. */ + wrt_reg_word(®->ctrl_status, 0x20); + rd_reg_word(®->ctrl_status); + + /* Get the fb rev level */ + ha->fb_rev = RD_FB_CMD_REG(ha, reg); + + if (ha->fb_rev == FPM_2300) + pci_clear_mwi(ha->pdev); + + /* Deselect FPM registers. */ + wrt_reg_word(®->ctrl_status, 0x0); + rd_reg_word(®->ctrl_status); + + /* Release RISC module. */ + wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0) + break; + + udelay(10); + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); + + pci_disable_rom(ha->pdev); + + /* Get PCI bus information. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->pci_attr = rd_reg_word(®->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +} + +/** + * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla24xx_pci_config(scsi_qla_host_t *vha) +{ + uint16_t w; + unsigned long flags = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + pci_set_master(ha->pdev); + pci_try_set_mwi(ha->pdev); + + pci_read_config_word(ha->pdev, PCI_COMMAND, &w); + w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + w &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(ha->pdev, PCI_COMMAND, w); + + pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); + + /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ + if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) + pcix_set_mmrbc(ha->pdev, 2048); + + /* PCIe -- adjust Maximum Read Request Size (2048). */ + if (pci_is_pcie(ha->pdev)) + pcie_set_readrq(ha->pdev, 4096); + + pci_disable_rom(ha->pdev); + + ha->chip_revision = ha->pdev->revision; + + /* Get PCI bus information. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->pci_attr = rd_reg_dword(®->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +} + +/** + * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla25xx_pci_config(scsi_qla_host_t *vha) +{ + uint16_t w; + struct qla_hw_data *ha = vha->hw; + + pci_set_master(ha->pdev); + pci_try_set_mwi(ha->pdev); + + pci_read_config_word(ha->pdev, PCI_COMMAND, &w); + w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + w &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(ha->pdev, PCI_COMMAND, w); + + /* PCIe -- adjust Maximum Read Request Size (2048). */ + if (pci_is_pcie(ha->pdev)) + pcie_set_readrq(ha->pdev, 4096); + + pci_disable_rom(ha->pdev); + + ha->chip_revision = ha->pdev->revision; + + return QLA_SUCCESS; +} + +/** + * qla2x00_isp_firmware() - Choose firmware image. + * @vha: HA context + * + * Returns 0 on success. + */ +static int +qla2x00_isp_firmware(scsi_qla_host_t *vha) +{ + int rval; + uint16_t loop_id, topo, sw_cap; + uint8_t domain, area, al_pa; + struct qla_hw_data *ha = vha->hw; + + /* Assume loading risc code */ + rval = QLA_FUNCTION_FAILED; + + if (ha->flags.disable_risc_code_load) { + ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); + + /* Verify checksum of loaded RISC code. */ + rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); + if (rval == QLA_SUCCESS) { + /* And, verify we are not in ROM code. */ + rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, + &area, &domain, &topo, &sw_cap); + } + } + + if (rval) + ql_dbg(ql_dbg_init, vha, 0x007a, + "**** Load RISC code ****.\n"); + + return (rval); +} + +/** + * qla2x00_reset_chip() - Reset ISP chip. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2x00_reset_chip(scsi_qla_host_t *vha) +{ + unsigned long flags = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint32_t cnt; + uint16_t cmd; + int rval = QLA_FUNCTION_FAILED; + + if (unlikely(pci_channel_offline(ha->pdev))) + return rval; + + ha->isp_ops->disable_intrs(ha); + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Turn off master enable */ + cmd = 0; + pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); + cmd &= ~PCI_COMMAND_MASTER; + pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); + + if (!IS_QLA2100(ha)) { + /* Pause RISC. */ + wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + if (IS_QLA2200(ha) || IS_QLA2300(ha)) { + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_word(®->hccr) & + HCCR_RISC_PAUSE) != 0) + break; + udelay(100); + } + } else { + rd_reg_word(®->hccr); /* PCI Posting. */ + udelay(10); + } + + /* Select FPM registers. */ + wrt_reg_word(®->ctrl_status, 0x20); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + /* FPM Soft Reset. */ + wrt_reg_word(®->fpm_diag_config, 0x100); + rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ + + /* Toggle Fpm Reset. */ + if (!IS_QLA2200(ha)) { + wrt_reg_word(®->fpm_diag_config, 0x0); + rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ + } + + /* Select frame buffer registers. */ + wrt_reg_word(®->ctrl_status, 0x10); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + /* Reset frame buffer FIFOs. */ + if (IS_QLA2200(ha)) { + WRT_FB_CMD_REG(ha, reg, 0xa000); + RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ + } else { + WRT_FB_CMD_REG(ha, reg, 0x00fc); + + /* Read back fb_cmd until zero or 3 seconds max */ + for (cnt = 0; cnt < 3000; cnt++) { + if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) + break; + udelay(100); + } + } + + /* Select RISC module registers. */ + wrt_reg_word(®->ctrl_status, 0); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + /* Reset RISC processor. */ + wrt_reg_word(®->hccr, HCCR_RESET_RISC); + rd_reg_word(®->hccr); /* PCI Posting. */ + + /* Release RISC processor. */ + wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + rd_reg_word(®->hccr); /* PCI Posting. */ + } + + wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); + wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT); + + /* Reset ISP chip. */ + wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + + /* Wait for RISC to recover from reset. */ + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { + /* + * It is necessary to for a delay here since the card doesn't + * respond to PCI reads during a reset. On some architectures + * this will result in an MCA. + */ + udelay(20); + for (cnt = 30000; cnt; cnt--) { + if ((rd_reg_word(®->ctrl_status) & + CSR_ISP_SOFT_RESET) == 0) + break; + udelay(100); + } + } else + udelay(10); + + /* Reset RISC processor. */ + wrt_reg_word(®->hccr, HCCR_RESET_RISC); + + wrt_reg_word(®->semaphore, 0); + + /* Release RISC processor. */ + wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + rd_reg_word(®->hccr); /* PCI Posting. */ + + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { + for (cnt = 0; cnt < 30000; cnt++) { + if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) + break; + + udelay(100); + } + } else + udelay(100); + + /* Turn on master enable */ + cmd |= PCI_COMMAND_MASTER; + pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); + + /* Disable RISC pause on FPM parity error. */ + if (!IS_QLA2100(ha)) { + wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE); + rd_reg_word(®->hccr); /* PCI Posting. */ + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +} + +/** + * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. + * @vha: HA context + * + * Returns 0 on success. + */ +static int +qla81xx_reset_mpi(scsi_qla_host_t *vha) +{ + uint16_t mb[4] = {0x1010, 0, 1, 0}; + + if (!IS_QLA81XX(vha->hw)) + return QLA_SUCCESS; + + return qla81xx_write_mpi_register(vha, mb); +} + +static int +qla_chk_risc_recovery(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + __le16 __iomem *mbptr = ®->mailbox0; + int i; + u16 mb[32]; + int rc = QLA_SUCCESS; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return rc; + + /* this check is only valid after RISC reset */ + mb[0] = rd_reg_word(mbptr); + mbptr++; + if (mb[0] == 0xf) { + rc = QLA_FUNCTION_FAILED; + + for (i = 1; i < 32; i++) { + mb[i] = rd_reg_word(mbptr); + mbptr++; + } + + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]); + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14], + mb[15]); + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22], + mb[23]); + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30], + mb[31]); + } + return rc; +} + +/** + * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. + * @vha: HA context + * + * Returns 0 on success. + */ +static inline int +qla24xx_reset_risc(scsi_qla_host_t *vha) +{ + unsigned long flags = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + uint32_t cnt; + uint16_t wd; + static int abts_cnt; /* ISP abort retry counts */ + int rval = QLA_SUCCESS; + int print = 1; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Reset RISC. */ + wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) + break; + + udelay(10); + } + + if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) + set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, + "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", + rd_reg_dword(®->hccr), + rd_reg_dword(®->ctrl_status), + (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)); + + wrt_reg_dword(®->ctrl_status, + CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); + + udelay(100); + + /* Wait for firmware to complete NVRAM accesses. */ + rd_reg_word(®->mailbox0); + for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && + rval == QLA_SUCCESS; cnt--) { + barrier(); + if (cnt) + udelay(5); + else + rval = QLA_FUNCTION_TIMEOUT; + } + + if (rval == QLA_SUCCESS) + set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, + "HCCR: 0x%x, MailBox0 Status 0x%x\n", + rd_reg_dword(®->hccr), + rd_reg_word(®->mailbox0)); + + /* Wait for soft-reset to complete. */ + rd_reg_dword(®->ctrl_status); + for (cnt = 0; cnt < 60; cnt++) { + barrier(); + if ((rd_reg_dword(®->ctrl_status) & + CSRX_ISP_SOFT_RESET) == 0) + break; + + udelay(5); + } + if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, + "HCCR: 0x%x, Soft Reset status: 0x%x\n", + rd_reg_dword(®->hccr), + rd_reg_dword(®->ctrl_status)); + + /* If required, do an MPI FW reset now */ + if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { + if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { + if (++abts_cnt < 5) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); + } else { + /* + * We exhausted the ISP abort retries. We have to + * set the board offline. + */ + abts_cnt = 0; + vha->flags.online = 0; + } + } + } + + wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); + rd_reg_dword(®->hccr); + + wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); + rd_reg_dword(®->hccr); + + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); + mdelay(10); + rd_reg_dword(®->hccr); + + wd = rd_reg_word(®->mailbox0); + for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) { + barrier(); + if (cnt) { + mdelay(1); + if (print && qla_chk_risc_recovery(vha)) + print = 0; + + wd = rd_reg_word(®->mailbox0); + } else { + rval = QLA_FUNCTION_TIMEOUT; + + ql_log(ql_log_warn, vha, 0x015e, + "RISC reset timeout\n"); + } + } + + if (rval == QLA_SUCCESS) + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, + "Host Risc 0x%x, mailbox0 0x%x\n", + rd_reg_dword(®->hccr), + rd_reg_word(®->mailbox0)); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, + "Driver in %s mode\n", + IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); + + if (IS_NOPOLLING_TYPE(ha)) + ha->isp_ops->enable_intrs(ha); + + return rval; +} + +static void +qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) +{ + struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; + + wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); + *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); +} + +static void +qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) +{ + struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; + + wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); + wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); +} + +static void +qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) +{ + uint32_t wd32 = 0; + uint delta_msec = 100; + uint elapsed_msec = 0; + uint timeout_msec; + ulong n; + + if (vha->hw->pdev->subsystem_device != 0x0175 && + vha->hw->pdev->subsystem_device != 0x0240) + return; + + wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); + udelay(100); + +attempt: + timeout_msec = TIMEOUT_SEMAPHORE; + n = timeout_msec / delta_msec; + while (n--) { + qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); + qla25xx_read_risc_sema_reg(vha, &wd32); + if (wd32 & RISC_SEMAPHORE) + break; + msleep(delta_msec); + elapsed_msec += delta_msec; + if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) + goto force; + } + + if (!(wd32 & RISC_SEMAPHORE)) + goto force; + + if (!(wd32 & RISC_SEMAPHORE_FORCE)) + goto acquired; + + qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); + timeout_msec = TIMEOUT_SEMAPHORE_FORCE; + n = timeout_msec / delta_msec; + while (n--) { + qla25xx_read_risc_sema_reg(vha, &wd32); + if (!(wd32 & RISC_SEMAPHORE_FORCE)) + break; + msleep(delta_msec); + elapsed_msec += delta_msec; + if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) + goto force; + } + + if (wd32 & RISC_SEMAPHORE_FORCE) + qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); + + goto attempt; + +force: + qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); + +acquired: + return; +} + +/** + * qla24xx_reset_chip() - Reset ISP24xx chip. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla24xx_reset_chip(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_FUNCTION_FAILED; + + if (pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure) { + return rval; + } + + ha->isp_ops->disable_intrs(ha); + + qla25xx_manipulate_risc_semaphore(vha); + + /* Perform RISC reset. */ + rval = qla24xx_reset_risc(vha); + + return rval; +} + +/** + * qla2x00_chip_diag() - Test chip for proper operation. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla2x00_chip_diag(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + unsigned long flags = 0; + uint16_t data; + uint32_t cnt; + uint16_t mb[5]; + struct req_que *req = ha->req_q_map[0]; + + /* Assume a failed state */ + rval = QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", + ®->flash_address); + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Reset ISP chip. */ + wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + + /* + * We need to have a delay here since the card will not respond while + * in reset causing an MCA on some architectures. + */ + udelay(20); + data = qla2x00_debounce_register(®->ctrl_status); + for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { + udelay(5); + data = rd_reg_word(®->ctrl_status); + barrier(); + } + + if (!cnt) + goto chip_diag_failed; + + ql_dbg(ql_dbg_init, vha, 0x007c, + "Reset register cleared by chip reset.\n"); + + /* Reset RISC processor. */ + wrt_reg_word(®->hccr, HCCR_RESET_RISC); + wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + + /* Workaround for QLA2312 PCI parity error */ + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { + data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); + for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { + udelay(5); + data = RD_MAILBOX_REG(ha, reg, 0); + barrier(); + } + } else + udelay(10); + + if (!cnt) + goto chip_diag_failed; + + /* Check product ID of chip */ + ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); + + mb[1] = RD_MAILBOX_REG(ha, reg, 1); + mb[2] = RD_MAILBOX_REG(ha, reg, 2); + mb[3] = RD_MAILBOX_REG(ha, reg, 3); + mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); + if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || + mb[3] != PROD_ID_3) { + ql_log(ql_log_warn, vha, 0x0062, + "Wrong product ID = 0x%x,0x%x,0x%x.\n", + mb[1], mb[2], mb[3]); + + goto chip_diag_failed; + } + ha->product_id[0] = mb[1]; + ha->product_id[1] = mb[2]; + ha->product_id[2] = mb[3]; + ha->product_id[3] = mb[4]; + + /* Adjust fw RISC transfer size */ + if (req->length > 1024) + ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; + else + ha->fw_transfer_size = REQUEST_ENTRY_SIZE * + req->length; + + if (IS_QLA2200(ha) && + RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { + /* Limit firmware transfer size with a 2200A */ + ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); + + ha->device_type |= DT_ISP2200A; + ha->fw_transfer_size = 128; + } + + /* Wrap Incoming Mailboxes Test. */ + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); + rval = qla2x00_mbx_reg_test(vha); + if (rval) + ql_log(ql_log_warn, vha, 0x0080, + "Failed mailbox send register test.\n"); + else + /* Flag a successful rval */ + rval = QLA_SUCCESS; + spin_lock_irqsave(&ha->hardware_lock, flags); + +chip_diag_failed: + if (rval) + ql_log(ql_log_info, vha, 0x0081, + "Chip diagnostics **** FAILED ****.\n"); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return (rval); +} + +/** + * qla24xx_chip_diag() - Test ISP24xx for proper operation. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla24xx_chip_diag(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + + if (IS_P3P_TYPE(ha)) + return QLA_SUCCESS; + + ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; + + rval = qla2x00_mbx_reg_test(vha); + if (rval) { + ql_log(ql_log_warn, vha, 0x0082, + "Failed mailbox send register test.\n"); + } else { + /* Flag a successful rval */ + rval = QLA_SUCCESS; + } + + return rval; +} + +static void +qla2x00_init_fce_trace(scsi_qla_host_t *vha) +{ + int rval; + dma_addr_t tc_dma; + void *tc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha)) + return; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return; + + if (ha->fce) { + ql_dbg(ql_dbg_init, vha, 0x00bd, + "%s: FCE Mem is already allocated.\n", + __func__); + return; + } + + /* Allocate memory for Fibre Channel Event Buffer. */ + tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, + GFP_KERNEL); + if (!tc) { + ql_log(ql_log_warn, vha, 0x00be, + "Unable to allocate (%d KB) for FCE.\n", + FCE_SIZE / 1024); + return; + } + + rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, + ha->fce_mb, &ha->fce_bufs); + if (rval) { + ql_log(ql_log_warn, vha, 0x00bf, + "Unable to initialize FCE (%d).\n", rval); + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); + return; + } + + ql_dbg(ql_dbg_init, vha, 0x00c0, + "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); + + ha->flags.fce_enabled = 1; + ha->fce_dma = tc_dma; + ha->fce = tc; +} + +static void +qla2x00_init_eft_trace(scsi_qla_host_t *vha) +{ + int rval; + dma_addr_t tc_dma; + void *tc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha)) + return; + + if (ha->eft) { + ql_dbg(ql_dbg_init, vha, 0x00bd, + "%s: EFT Mem is already allocated.\n", + __func__); + return; + } + + /* Allocate memory for Extended Trace Buffer. */ + tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, + GFP_KERNEL); + if (!tc) { + ql_log(ql_log_warn, vha, 0x00c1, + "Unable to allocate (%d KB) for EFT.\n", + EFT_SIZE / 1024); + return; + } + + rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); + if (rval) { + ql_log(ql_log_warn, vha, 0x00c2, + "Unable to initialize EFT (%d).\n", rval); + dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); + return; + } + + ql_dbg(ql_dbg_init, vha, 0x00c3, + "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); + + ha->eft_dma = tc_dma; + ha->eft = tc; +} + +static void +qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) +{ + qla2x00_init_fce_trace(vha); + qla2x00_init_eft_trace(vha); +} + +void +qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) +{ + uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, + eft_size, fce_size, mq_size; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + struct qla2xxx_fw_dump *fw_dump; + + if (ha->fw_dump) { + ql_dbg(ql_dbg_init, vha, 0x00bd, + "Firmware dump already allocated.\n"); + return; + } + + ha->fw_dumped = 0; + ha->fw_dump_cap_flags = 0; + dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; + req_q_size = rsp_q_size = 0; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { + fixed_size = sizeof(struct qla2100_fw_dump); + } else if (IS_QLA23XX(ha)) { + fixed_size = offsetof(struct qla2300_fw_dump, data_ram); + mem_size = (ha->fw_memory_size - 0x11000 + 1) * + sizeof(uint16_t); + } else if (IS_FWI2_CAPABLE(ha)) { + if (IS_QLA83XX(ha)) + fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); + else if (IS_QLA81XX(ha)) + fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); + else if (IS_QLA25XX(ha)) + fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); + else + fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); + + mem_size = (ha->fw_memory_size - 0x100000 + 1) * + sizeof(uint32_t); + if (ha->mqenable) { + if (!IS_QLA83XX(ha)) + mq_size = sizeof(struct qla2xxx_mq_chain); + /* + * Allocate maximum buffer size for all queues - Q0. + * Resizing must be done at end-of-dump processing. + */ + mq_size += (ha->max_req_queues - 1) * + (req->length * sizeof(request_t)); + mq_size += (ha->max_rsp_queues - 1) * + (rsp->length * sizeof(response_t)); + } + if (ha->tgt.atio_ring) + mq_size += ha->tgt.atio_q_length * sizeof(request_t); + + qla2x00_init_fce_trace(vha); + if (ha->fce) + fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; + qla2x00_init_eft_trace(vha); + if (ha->eft) + eft_size = EFT_SIZE; + } + + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + struct fwdt *fwdt = ha->fwdt; + uint j; + + for (j = 0; j < 2; j++, fwdt++) { + if (!fwdt->template) { + ql_dbg(ql_dbg_init, vha, 0x00ba, + "-> fwdt%u no template\n", j); + continue; + } + ql_dbg(ql_dbg_init, vha, 0x00fa, + "-> fwdt%u calculating fwdump size...\n", j); + fwdt->dump_size = qla27xx_fwdt_calculate_dump_size( + vha, fwdt->template); + ql_dbg(ql_dbg_init, vha, 0x00fa, + "-> fwdt%u calculated fwdump size = %#lx bytes\n", + j, fwdt->dump_size); + dump_size += fwdt->dump_size; + } + /* Add space for spare MPI fw dump. */ + dump_size += ha->fwdt[1].dump_size; + } else { + req_q_size = req->length * sizeof(request_t); + rsp_q_size = rsp->length * sizeof(response_t); + dump_size = offsetof(struct qla2xxx_fw_dump, isp); + dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + + eft_size; + ha->chain_offset = dump_size; + dump_size += mq_size + fce_size; + if (ha->exchoffld_buf) + dump_size += sizeof(struct qla2xxx_offld_chain) + + ha->exchoffld_size; + if (ha->exlogin_buf) + dump_size += sizeof(struct qla2xxx_offld_chain) + + ha->exlogin_size; + } + + if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { + + ql_dbg(ql_dbg_init, vha, 0x00c5, + "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n", + __func__, dump_size, ha->fw_dump_len, + ha->fw_dump_alloc_len); + + fw_dump = vmalloc(dump_size); + if (!fw_dump) { + ql_log(ql_log_warn, vha, 0x00c4, + "Unable to allocate (%d KB) for firmware dump.\n", + dump_size / 1024); + } else { + mutex_lock(&ha->optrom_mutex); + if (ha->fw_dumped) { + memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); + vfree(ha->fw_dump); + ha->fw_dump = fw_dump; + ha->fw_dump_alloc_len = dump_size; + ql_dbg(ql_dbg_init, vha, 0x00c5, + "Re-Allocated (%d KB) and save firmware dump.\n", + dump_size / 1024); + } else { + vfree(ha->fw_dump); + ha->fw_dump = fw_dump; + + ha->fw_dump_len = ha->fw_dump_alloc_len = + dump_size; + ql_dbg(ql_dbg_init, vha, 0x00c5, + "Allocated (%d KB) for firmware dump.\n", + dump_size / 1024); + + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->mpi_fw_dump = (char *)fw_dump + + ha->fwdt[1].dump_size; + mutex_unlock(&ha->optrom_mutex); + return; + } + + ha->fw_dump->signature[0] = 'Q'; + ha->fw_dump->signature[1] = 'L'; + ha->fw_dump->signature[2] = 'G'; + ha->fw_dump->signature[3] = 'C'; + ha->fw_dump->version = htonl(1); + + ha->fw_dump->fixed_size = htonl(fixed_size); + ha->fw_dump->mem_size = htonl(mem_size); + ha->fw_dump->req_q_size = htonl(req_q_size); + ha->fw_dump->rsp_q_size = htonl(rsp_q_size); + + ha->fw_dump->eft_size = htonl(eft_size); + ha->fw_dump->eft_addr_l = + htonl(LSD(ha->eft_dma)); + ha->fw_dump->eft_addr_h = + htonl(MSD(ha->eft_dma)); + + ha->fw_dump->header_size = + htonl(offsetof + (struct qla2xxx_fw_dump, isp)); + } + mutex_unlock(&ha->optrom_mutex); + } + } +} + +static int +qla81xx_mpi_sync(scsi_qla_host_t *vha) +{ +#define MPS_MASK 0xe0 + int rval; + uint16_t dc; + uint32_t dw; + + if (!IS_QLA81XX(vha->hw)) + return QLA_SUCCESS; + + rval = qla2x00_write_ram_word(vha, 0x7c00, 1); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x0105, + "Unable to acquire semaphore.\n"); + goto done; + } + + pci_read_config_word(vha->hw->pdev, 0x54, &dc); + rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); + goto done_release; + } + + dc &= MPS_MASK; + if (dc == (dw & MPS_MASK)) + goto done_release; + + dw &= ~MPS_MASK; + dw |= dc; + rval = qla2x00_write_ram_word(vha, 0x7a15, dw); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); + } + +done_release: + rval = qla2x00_write_ram_word(vha, 0x7c00, 0); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x006d, + "Unable to release semaphore.\n"); + } + +done: + return rval; +} + +int +qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) +{ + /* Don't try to reallocate the array */ + if (req->outstanding_cmds) + return QLA_SUCCESS; + + if (!IS_FWI2_CAPABLE(ha)) + req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; + else { + if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) + req->num_outstanding_cmds = ha->cur_fw_xcb_count; + else + req->num_outstanding_cmds = ha->cur_fw_iocb_count; + } + + req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, + sizeof(srb_t *), + GFP_KERNEL); + + if (!req->outstanding_cmds) { + /* + * Try to allocate a minimal size just so we can get through + * initialization. + */ + req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; + req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, + sizeof(srb_t *), + GFP_KERNEL); + + if (!req->outstanding_cmds) { + ql_log(ql_log_fatal, NULL, 0x0126, + "Failed to allocate memory for " + "outstanding_cmds for req_que %p.\n", req); + req->num_outstanding_cmds = 0; + return QLA_FUNCTION_FAILED; + } + } + + return QLA_SUCCESS; +} + +#define PRINT_FIELD(_field, _flag, _str) { \ + if (a0->_field & _flag) {\ + if (p) {\ + strcat(ptr, "|");\ + ptr++;\ + leftover--;\ + } \ + len = snprintf(ptr, leftover, "%s", _str); \ + p = 1;\ + leftover -= len;\ + ptr += len; \ + } \ +} + +static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) +{ +#define STR_LEN 64 + struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; + u8 str[STR_LEN], *ptr, p; + int leftover, len; + + memset(str, 0, STR_LEN); + snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); + ql_dbg(ql_dbg_init, vha, 0x015a, + "SFP MFG Name: %s\n", str); + + memset(str, 0, STR_LEN); + snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); + ql_dbg(ql_dbg_init, vha, 0x015c, + "SFP Part Name: %s\n", str); + + /* media */ + memset(str, 0, STR_LEN); + ptr = str; + leftover = STR_LEN; + p = len = 0; + PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); + PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); + PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); + PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); + PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); + PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); + PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); + ql_dbg(ql_dbg_init, vha, 0x0160, + "SFP Media: %s\n", str); + + /* link length */ + memset(str, 0, STR_LEN); + ptr = str; + leftover = STR_LEN; + p = len = 0; + PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); + PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); + PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); + PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); + PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); + ql_dbg(ql_dbg_init, vha, 0x0196, + "SFP Link Length: %s\n", str); + + memset(str, 0, STR_LEN); + ptr = str; + leftover = STR_LEN; + p = len = 0; + PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); + PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); + PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); + PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); + PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); + ql_dbg(ql_dbg_init, vha, 0x016e, + "SFP FC Link Tech: %s\n", str); + + if (a0->length_km) + ql_dbg(ql_dbg_init, vha, 0x016f, + "SFP Distant: %d km\n", a0->length_km); + if (a0->length_100m) + ql_dbg(ql_dbg_init, vha, 0x0170, + "SFP Distant: %d m\n", a0->length_100m*100); + if (a0->length_50um_10m) + ql_dbg(ql_dbg_init, vha, 0x0189, + "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); + if (a0->length_62um_10m) + ql_dbg(ql_dbg_init, vha, 0x018a, + "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); + if (a0->length_om4_10m) + ql_dbg(ql_dbg_init, vha, 0x0194, + "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); + if (a0->length_om3_10m) + ql_dbg(ql_dbg_init, vha, 0x0195, + "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); +} + + +/** + * qla24xx_detect_sfp() + * + * @vha: adapter state pointer. + * + * @return + * 0 -- Configure firmware to use short-range settings -- normal + * buffer-to-buffer credits. + * + * 1 -- Configure firmware to use long-range settings -- extra + * buffer-to-buffer credits should be allocated with + * ha->lr_distance containing distance settings from NVRAM or SFP + * (if supported). + */ +int +qla24xx_detect_sfp(scsi_qla_host_t *vha) +{ + int rc, used_nvram; + struct sff_8247_a0 *a; + struct qla_hw_data *ha = vha->hw; + struct nvram_81xx *nv = ha->nvram; +#define LR_DISTANCE_UNKNOWN 2 + static const char * const types[] = { "Short", "Long" }; + static const char * const lengths[] = { "(10km)", "(5km)", "" }; + u8 ll = 0; + + /* Seed with NVRAM settings. */ + used_nvram = 0; + ha->flags.lr_detected = 0; + if (IS_BPM_RANGE_CAPABLE(ha) && + (nv->enhanced_features & NEF_LR_DIST_ENABLE)) { + used_nvram = 1; + ha->flags.lr_detected = 1; + ha->lr_distance = + (nv->enhanced_features >> LR_DIST_NV_POS) + & LR_DIST_NV_MASK; + } + + if (!IS_BPM_ENABLED(vha)) + goto out; + /* Determine SR/LR capabilities of SFP/Transceiver. */ + rc = qla2x00_read_sfp_dev(vha, NULL, 0); + if (rc) + goto out; + + used_nvram = 0; + a = (struct sff_8247_a0 *)vha->hw->sfp_data; + qla2xxx_print_sfp_info(vha); + + ha->flags.lr_detected = 0; + ll = a->fc_ll_cc7; + if (ll & FC_LL_VL || ll & FC_LL_L) { + /* Long range, track length. */ + ha->flags.lr_detected = 1; + + if (a->length_km > 5 || a->length_100m > 50) + ha->lr_distance = LR_DISTANCE_10K; + else + ha->lr_distance = LR_DISTANCE_5K; + } + +out: + ql_dbg(ql_dbg_async, vha, 0x507b, + "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n", + types[ha->flags.lr_detected], + ha->flags.lr_detected ? lengths[ha->lr_distance] : + lengths[LR_DISTANCE_UNKNOWN], + used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); + return ha->flags.lr_detected; +} + +static void __qla_adjust_iocb_limit(struct qla_qpair *qpair) +{ + u8 num_qps; + u16 limit; + struct qla_hw_data *ha = qpair->vha->hw; + + num_qps = ha->num_qpairs + 1; + limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; + + qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; + qpair->fwres.iocbs_limit = limit; + qpair->fwres.iocbs_qp_limit = limit / num_qps; + + qpair->fwres.exch_total = ha->orig_fw_xcb_count; + qpair->fwres.exch_limit = (ha->orig_fw_xcb_count * + QLA_IOCB_PCT_LIMIT) / 100; +} + +void qla_init_iocb_limit(scsi_qla_host_t *vha) +{ + u8 i; + struct qla_hw_data *ha = vha->hw; + + __qla_adjust_iocb_limit(ha->base_qpair); + ha->base_qpair->fwres.iocbs_used = 0; + ha->base_qpair->fwres.exch_used = 0; + + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { + __qla_adjust_iocb_limit(ha->queue_pair_map[i]); + ha->queue_pair_map[i]->fwres.iocbs_used = 0; + ha->queue_pair_map[i]->fwres.exch_used = 0; + } + } + + ha->fwres.iocb_total = ha->orig_fw_iocb_count; + ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; + ha->fwres.exch_total = ha->orig_fw_xcb_count; + ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100; + + atomic_set(&ha->fwres.iocb_used, 0); + atomic_set(&ha->fwres.exch_used, 0); +} + +void qla_adjust_iocb_limit(scsi_qla_host_t *vha) +{ + u8 i; + struct qla_hw_data *ha = vha->hw; + + __qla_adjust_iocb_limit(ha->base_qpair); + + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) + __qla_adjust_iocb_limit(ha->queue_pair_map[i]); + } +} + +/** + * qla2x00_setup_chip() - Load and start RISC firmware. + * @vha: HA context + * + * Returns 0 on success. + */ +static int +qla2x00_setup_chip(scsi_qla_host_t *vha) +{ + int rval; + uint32_t srisc_address = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + unsigned long flags; + uint16_t fw_major_version; + int done_once = 0; + + if (IS_P3P_TYPE(ha)) { + rval = ha->isp_ops->load_risc(vha, &srisc_address); + if (rval == QLA_SUCCESS) { + qla2x00_stop_firmware(vha); + goto enable_82xx_npiv; + } else + goto failed; + } + + if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { + /* Disable SRAM, Instruction RAM and GP RAM parity. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); + rd_reg_word(®->hccr); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + qla81xx_mpi_sync(vha); + +execute_fw_with_lr: + /* Load firmware sequences */ + rval = ha->isp_ops->load_risc(vha, &srisc_address); + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_init, vha, 0x00c9, + "Verifying Checksum of loaded RISC code.\n"); + + rval = qla2x00_verify_checksum(vha, srisc_address); + if (rval == QLA_SUCCESS) { + /* Start firmware execution. */ + ql_dbg(ql_dbg_init, vha, 0x00ca, + "Starting firmware.\n"); + + if (ql2xexlogins) + ha->flags.exlogins_enabled = 1; + + if (qla_is_exch_offld_enabled(vha)) + ha->flags.exchoffld_enabled = 1; + + rval = qla2x00_execute_fw(vha, srisc_address); + /* Retrieve firmware information. */ + if (rval == QLA_SUCCESS) { + /* Enable BPM support? */ + if (!done_once++ && qla24xx_detect_sfp(vha)) { + ql_dbg(ql_dbg_init, vha, 0x00ca, + "Re-starting firmware -- BPM.\n"); + /* Best-effort - re-init. */ + ha->isp_ops->reset_chip(vha); + ha->isp_ops->chip_diag(vha); + goto execute_fw_with_lr; + } + + if (IS_ZIO_THRESHOLD_CAPABLE(ha)) + qla27xx_set_zio_threshold(vha, + ha->last_zio_threshold); + + rval = qla2x00_set_exlogins_buffer(vha); + if (rval != QLA_SUCCESS) + goto failed; + + rval = qla2x00_set_exchoffld_buffer(vha); + if (rval != QLA_SUCCESS) + goto failed; + +enable_82xx_npiv: + fw_major_version = ha->fw_major_version; + if (IS_P3P_TYPE(ha)) + qla82xx_check_md_needed(vha); + else + rval = qla2x00_get_fw_version(vha); + if (rval != QLA_SUCCESS) + goto failed; + ha->flags.npiv_supported = 0; + if (IS_QLA2XXX_MIDTYPE(ha) && + (ha->fw_attributes & BIT_2)) { + ha->flags.npiv_supported = 1; + if ((!ha->max_npiv_vports) || + ((ha->max_npiv_vports + 1) % + MIN_MULTI_ID_FABRIC)) + ha->max_npiv_vports = + MIN_MULTI_ID_FABRIC - 1; + } + qla2x00_get_resource_cnts(vha); + qla_init_iocb_limit(vha); + + /* + * Allocate the array of outstanding commands + * now that we know the firmware resources. + */ + rval = qla2x00_alloc_outstanding_cmds(ha, + vha->req); + if (rval != QLA_SUCCESS) + goto failed; + + if (!fw_major_version && !(IS_P3P_TYPE(ha))) + qla2x00_alloc_offload_mem(vha); + + if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) + qla2x00_alloc_fw_dump(vha); + + } else { + goto failed; + } + } else { + ql_log(ql_log_fatal, vha, 0x00cd, + "ISP Firmware failed checksum.\n"); + goto failed; + } + + /* Enable PUREX PASSTHRU */ + if (ql2xrdpenable || ha->flags.scm_supported_f || + ha->flags.edif_enabled) + qla25xx_set_els_cmds_supported(vha); + } else + goto failed; + + if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { + /* Enable proper parity. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + if (IS_QLA2300(ha)) + /* SRAM parity */ + wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1); + else + /* SRAM, Instruction RAM and GP RAM parity */ + wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7); + rd_reg_word(®->hccr); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flags.fac_supported = 1; + else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { + uint32_t size; + + rval = qla81xx_fac_get_sector_size(vha, &size); + if (rval == QLA_SUCCESS) { + ha->flags.fac_supported = 1; + ha->fdt_block_size = size << 2; + } else { + ql_log(ql_log_warn, vha, 0x00ce, + "Unsupported FAC firmware (%d.%02d.%02d).\n", + ha->fw_major_version, ha->fw_minor_version, + ha->fw_subminor_version); + + if (IS_QLA83XX(ha)) { + ha->flags.fac_supported = 0; + rval = QLA_SUCCESS; + } + } + } +failed: + if (rval) { + ql_log(ql_log_fatal, vha, 0x00cf, + "Setup chip ****FAILED****.\n"); + } + + return (rval); +} + +/** + * qla2x00_init_response_q_entries() - Initializes response queue entries. + * @rsp: response queue + * + * Beginning of request ring has initialization control block already built + * by nvram config routine. + * + * Returns 0 on success. + */ +void +qla2x00_init_response_q_entries(struct rsp_que *rsp) +{ + uint16_t cnt; + response_t *pkt; + + rsp->ring_ptr = rsp->ring; + rsp->ring_index = 0; + rsp->status_srb = NULL; + pkt = rsp->ring_ptr; + for (cnt = 0; cnt < rsp->length; cnt++) { + pkt->signature = RESPONSE_PROCESSED; + pkt++; + } +} + +/** + * qla2x00_update_fw_options() - Read and process firmware options. + * @vha: HA context + * + * Returns 0 on success. + */ +void +qla2x00_update_fw_options(scsi_qla_host_t *vha) +{ + uint16_t swing, emphasis, tx_sens, rx_sens; + struct qla_hw_data *ha = vha->hw; + + memset(ha->fw_options, 0, sizeof(ha->fw_options)); + qla2x00_get_fw_options(vha, ha->fw_options); + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return; + + /* Serial Link options. */ + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, + "Serial link options.\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, + ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); + + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; + if (ha->fw_seriallink_options[3] & BIT_2) { + ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; + + /* 1G settings */ + swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); + emphasis = (ha->fw_seriallink_options[2] & + (BIT_4 | BIT_3)) >> 3; + tx_sens = ha->fw_seriallink_options[0] & + (BIT_3 | BIT_2 | BIT_1 | BIT_0); + rx_sens = (ha->fw_seriallink_options[0] & + (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; + ha->fw_options[10] = (emphasis << 14) | (swing << 8); + if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { + if (rx_sens == 0x0) + rx_sens = 0x3; + ha->fw_options[10] |= (tx_sens << 4) | rx_sens; + } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) + ha->fw_options[10] |= BIT_5 | + ((rx_sens & (BIT_1 | BIT_0)) << 2) | + (tx_sens & (BIT_1 | BIT_0)); + + /* 2G settings */ + swing = (ha->fw_seriallink_options[2] & + (BIT_7 | BIT_6 | BIT_5)) >> 5; + emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); + tx_sens = ha->fw_seriallink_options[1] & + (BIT_3 | BIT_2 | BIT_1 | BIT_0); + rx_sens = (ha->fw_seriallink_options[1] & + (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; + ha->fw_options[11] = (emphasis << 14) | (swing << 8); + if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { + if (rx_sens == 0x0) + rx_sens = 0x3; + ha->fw_options[11] |= (tx_sens << 4) | rx_sens; + } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) + ha->fw_options[11] |= BIT_5 | + ((rx_sens & (BIT_1 | BIT_0)) << 2) | + (tx_sens & (BIT_1 | BIT_0)); + } + + /* FCP2 options. */ + /* Return command IOCBs without waiting for an ABTS to complete. */ + ha->fw_options[3] |= BIT_13; + + /* LED scheme. */ + if (ha->flags.enable_led_scheme) + ha->fw_options[2] |= BIT_12; + + /* Detect ISP6312. */ + if (IS_QLA6312(ha)) + ha->fw_options[2] |= BIT_13; + + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2100, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + + /* Update firmware options. */ + qla2x00_set_fw_options(vha, ha->fw_options); +} + +void +qla24xx_update_fw_options(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + + if (IS_P3P_TYPE(ha)) + return; + + /* Hold status IOCBs until ABTS response received. */ + if (ql2xfwholdabts) + ha->fw_options[3] |= BIT_12; + + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2101, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio && !ha->flags.edif_enabled && + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { + /* + * Tell FW to track each exchange to prevent + * driver from using stale exchange. + */ + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_4; + else + ha->fw_options[2] &= ~(BIT_4); + + /* Reserve 1/2 of emergency exchanges for ELS.*/ + if (qla2xuseresexchforels) + ha->fw_options[2] |= BIT_8; + else + ha->fw_options[2] &= ~BIT_8; + + /* + * N2N: set Secure=1 for PLOGI ACC and + * fw shal not send PRLI after PLOGI Acc + */ + if (ha->flags.edif_enabled && + DBELL_ACTIVE(vha)) { + ha->fw_options[3] |= BIT_15; + ha->flags.n2n_fw_acc_sec = 1; + } else { + ha->fw_options[3] &= ~BIT_15; + ha->flags.n2n_fw_acc_sec = 0; + } + } + + if (ql2xrdpenable || ha->flags.scm_supported_f || + ha->flags.edif_enabled) + ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; + + /* Enable Async 8130/8131 events -- transceiver insertion/removal */ + if (IS_BPM_RANGE_CAPABLE(ha)) + ha->fw_options[3] |= BIT_10; + + ql_dbg(ql_dbg_init, vha, 0x00e8, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); + + if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) + qla2x00_set_fw_options(vha, ha->fw_options); + + /* Update Serial Link options. */ + if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) + return; + + rval = qla2x00_set_serdes_params(vha, + le16_to_cpu(ha->fw_seriallink_options24[1]), + le16_to_cpu(ha->fw_seriallink_options24[2]), + le16_to_cpu(ha->fw_seriallink_options24[3])); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x0104, + "Unable to update Serial Link options (%x).\n", rval); + } +} + +void +qla2x00_config_rings(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + + /* Setup ring parameters in initialization control block. */ + ha->init_cb->request_q_outpointer = cpu_to_le16(0); + ha->init_cb->response_q_inpointer = cpu_to_le16(0); + ha->init_cb->request_q_length = cpu_to_le16(req->length); + ha->init_cb->response_q_length = cpu_to_le16(rsp->length); + put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); + put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); + + wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); + wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); + wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); + wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); + rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ +} + +void +qla24xx_config_rings(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + device_reg_t *reg = ISP_QUE_REG(ha, 0); + struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; + struct qla_msix_entry *msix; + struct init_cb_24xx *icb; + uint16_t rid = 0; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + + /* Setup ring parameters in initialization control block. */ + icb = (struct init_cb_24xx *)ha->init_cb; + icb->request_q_outpointer = cpu_to_le16(0); + icb->response_q_inpointer = cpu_to_le16(0); + icb->request_q_length = cpu_to_le16(req->length); + icb->response_q_length = cpu_to_le16(rsp->length); + put_unaligned_le64(req->dma, &icb->request_q_address); + put_unaligned_le64(rsp->dma, &icb->response_q_address); + + /* Setup ATIO queue dma pointers for target mode */ + icb->atio_q_inpointer = cpu_to_le16(0); + icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); + put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); + + if (IS_SHADOW_REG_CAPABLE(ha)) + icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); + + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { + icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); + icb->rid = cpu_to_le16(rid); + if (ha->flags.msix_enabled) { + msix = &ha->msix_entries[1]; + ql_dbg(ql_dbg_init, vha, 0x0019, + "Registering vector 0x%x for base que.\n", + msix->entry); + icb->msix = cpu_to_le16(msix->entry); + } + /* Use alternate PCI bus number */ + if (MSB(rid)) + icb->firmware_options_2 |= cpu_to_le32(BIT_19); + /* Use alternate PCI devfn */ + if (LSB(rid)) + icb->firmware_options_2 |= cpu_to_le32(BIT_18); + + /* Use Disable MSIX Handshake mode for capable adapters */ + if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && + (ha->flags.msix_enabled)) { + icb->firmware_options_2 &= cpu_to_le32(~BIT_22); + ha->flags.disable_msix_handshake = 1; + ql_dbg(ql_dbg_init, vha, 0x00fe, + "MSIX Handshake Disable Mode turned on.\n"); + } else { + icb->firmware_options_2 |= cpu_to_le32(BIT_22); + } + icb->firmware_options_2 |= cpu_to_le32(BIT_23); + + wrt_reg_dword(®->isp25mq.req_q_in, 0); + wrt_reg_dword(®->isp25mq.req_q_out, 0); + wrt_reg_dword(®->isp25mq.rsp_q_in, 0); + wrt_reg_dword(®->isp25mq.rsp_q_out, 0); + } else { + wrt_reg_dword(®->isp24.req_q_in, 0); + wrt_reg_dword(®->isp24.req_q_out, 0); + wrt_reg_dword(®->isp24.rsp_q_in, 0); + wrt_reg_dword(®->isp24.rsp_q_out, 0); + } + + qlt_24xx_config_rings(vha); + + /* If the user has configured the speed, set it here */ + if (ha->set_data_rate) { + ql_dbg(ql_dbg_init, vha, 0x00fd, + "Speed set by user : %s Gbps \n", + qla2x00_get_link_speed_str(ha, ha->set_data_rate)); + icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); + } + + /* PCI posting */ + rd_reg_word(&ioreg->hccr); +} + +/** + * qla2x00_init_rings() - Initializes firmware. + * @vha: HA context + * + * Beginning of request ring has initialization control block already built + * by nvram config routine. + * + * Returns 0 on success. + */ +int +qla2x00_init_rings(scsi_qla_host_t *vha) +{ + int rval; + unsigned long flags = 0; + int cnt, que; + struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct rsp_que *rsp; + struct mid_init_cb_24xx *mid_init_cb = + (struct mid_init_cb_24xx *) ha->init_cb; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Clear outstanding commands array. */ + for (que = 0; que < ha->max_req_queues; que++) { + req = ha->req_q_map[que]; + if (!req || !test_bit(que, ha->req_qid_map)) + continue; + req->out_ptr = (uint16_t *)(req->ring + req->length); + *req->out_ptr = 0; + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) + req->outstanding_cmds[cnt] = NULL; + + req->current_outstanding_cmd = 1; + + /* Initialize firmware. */ + req->ring_ptr = req->ring; + req->ring_index = 0; + req->cnt = req->length; + } + + for (que = 0; que < ha->max_rsp_queues; que++) { + rsp = ha->rsp_q_map[que]; + if (!rsp || !test_bit(que, ha->rsp_qid_map)) + continue; + rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); + *rsp->in_ptr = 0; + /* Initialize response queue entries */ + if (IS_QLAFX00(ha)) + qlafx00_init_response_q_entries(rsp); + else + qla2x00_init_response_q_entries(rsp); + } + + ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; + ha->tgt.atio_ring_index = 0; + /* Initialize ATIO queue entries */ + qlt_init_atio_q_entries(vha); + + ha->isp_ops->config_rings(vha); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (IS_QLAFX00(ha)) { + rval = qlafx00_init_firmware(vha, ha->init_cb_size); + goto next_check; + } + + /* Update any ISP specific firmware options before initialization. */ + ha->isp_ops->update_fw_options(vha); + + ql_dbg(ql_dbg_init, vha, 0x00d1, + "Issue init firmware FW opt 1-3= %08x %08x %08x.\n", + le32_to_cpu(mid_init_cb->init_cb.firmware_options_1), + le32_to_cpu(mid_init_cb->init_cb.firmware_options_2), + le32_to_cpu(mid_init_cb->init_cb.firmware_options_3)); + + if (ha->flags.npiv_supported) { + if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) + ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; + mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); + } + + if (IS_FWI2_CAPABLE(ha)) { + mid_init_cb->options = cpu_to_le16(BIT_1); + mid_init_cb->init_cb.execution_throttle = + cpu_to_le16(ha->cur_fw_xcb_count); + ha->flags.dport_enabled = + (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & + BIT_7) != 0; + ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", + (ha->flags.dport_enabled) ? "enabled" : "disabled"); + /* FA-WWPN Status */ + ha->flags.fawwpn_enabled = + (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & + BIT_6) != 0; + ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", + (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); + /* Init_cb will be reused for other command(s). Save a backup copy of port_name */ + memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE); + } + + /* ELS pass through payload is limit by frame size. */ + if (ha->flags.edif_enabled) + mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD); + + QLA_FW_STARTED(ha); + rval = qla2x00_init_firmware(vha, ha->init_cb_size); +next_check: + if (rval) { + QLA_FW_STOPPED(ha); + ql_log(ql_log_fatal, vha, 0x00d2, + "Init Firmware **** FAILED ****.\n"); + } else { + ql_dbg(ql_dbg_init, vha, 0x00d3, + "Init Firmware -- success.\n"); + vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; + } + + return (rval); +} + +/** + * qla2x00_fw_ready() - Waits for firmware ready. + * @vha: HA context + * + * Returns 0 on success. + */ +static int +qla2x00_fw_ready(scsi_qla_host_t *vha) +{ + int rval; + unsigned long wtime, mtime, cs84xx_time; + uint16_t min_wait; /* Minimum wait time if loop is down */ + uint16_t wait_time; /* Wait time if loop is coming ready */ + uint16_t state[6]; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLAFX00(vha->hw)) + return qlafx00_fw_ready(vha); + + /* Time to wait for loop down */ + if (IS_P3P_TYPE(ha)) + min_wait = 30; + else + min_wait = 20; + + /* + * Firmware should take at most one RATOV to login, plus 5 seconds for + * our own processing. + */ + if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { + wait_time = min_wait; + } + + /* Min wait time if loop down */ + mtime = jiffies + (min_wait * HZ); + + /* wait time before firmware ready */ + wtime = jiffies + (wait_time * HZ); + + /* Wait for ISP to finish LIP */ + if (!vha->flags.init_done) + ql_log(ql_log_info, vha, 0x801e, + "Waiting for LIP to complete.\n"); + + do { + memset(state, -1, sizeof(state)); + rval = qla2x00_get_firmware_state(vha, state); + if (rval == QLA_SUCCESS) { + if (state[0] < FSTATE_LOSS_OF_SYNC) { + vha->device_flags &= ~DFLG_NO_CABLE; + } + if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { + ql_dbg(ql_dbg_taskm, vha, 0x801f, + "fw_state=%x 84xx=%x.\n", state[0], + state[2]); + if ((state[2] & FSTATE_LOGGED_IN) && + (state[2] & FSTATE_WAITING_FOR_VERIFY)) { + ql_dbg(ql_dbg_taskm, vha, 0x8028, + "Sending verify iocb.\n"); + + cs84xx_time = jiffies; + rval = qla84xx_init_chip(vha); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, + vha, 0x8007, + "Init chip failed.\n"); + break; + } + + /* Add time taken to initialize. */ + cs84xx_time = jiffies - cs84xx_time; + wtime += cs84xx_time; + mtime += cs84xx_time; + ql_dbg(ql_dbg_taskm, vha, 0x8008, + "Increasing wait time by %ld. " + "New time %ld.\n", cs84xx_time, + wtime); + } + } else if (state[0] == FSTATE_READY) { + ql_dbg(ql_dbg_taskm, vha, 0x8037, + "F/W Ready - OK.\n"); + + qla2x00_get_retry_cnt(vha, &ha->retry_count, + &ha->login_timeout, &ha->r_a_tov); + + rval = QLA_SUCCESS; + break; + } + + rval = QLA_FUNCTION_FAILED; + + if (atomic_read(&vha->loop_down_timer) && + state[0] != FSTATE_READY) { + /* Loop down. Timeout on min_wait for states + * other than Wait for Login. + */ + if (time_after_eq(jiffies, mtime)) { + ql_log(ql_log_info, vha, 0x8038, + "Cable is unplugged...\n"); + + vha->device_flags |= DFLG_NO_CABLE; + break; + } + } + } else { + /* Mailbox cmd failed. Timeout on min_wait. */ + if (time_after_eq(jiffies, mtime) || + ha->flags.isp82xx_fw_hung) + break; + } + + if (time_after_eq(jiffies, wtime)) + break; + + /* Delay for a while */ + msleep(500); + } while (1); + + ql_dbg(ql_dbg_taskm, vha, 0x803a, + "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], + state[1], state[2], state[3], state[4], state[5], jiffies); + + if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { + ql_log(ql_log_warn, vha, 0x803b, + "Firmware ready **** FAILED ****.\n"); + } + + return (rval); +} + +/* +* qla2x00_configure_hba +* Setup adapter context. +* +* Input: +* ha = adapter state pointer. +* +* Returns: +* 0 = success +* +* Context: +* Kernel context. +*/ +static int +qla2x00_configure_hba(scsi_qla_host_t *vha) +{ + int rval; + uint16_t loop_id; + uint16_t topo; + uint16_t sw_cap; + uint8_t al_pa; + uint8_t area; + uint8_t domain; + char connect_type[22]; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + port_id_t id; + unsigned long flags; + + /* Get host addresses. */ + rval = qla2x00_get_adapter_id(vha, + &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); + if (rval != QLA_SUCCESS) { + if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || + IS_CNA_CAPABLE(ha) || + (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { + ql_dbg(ql_dbg_disc, vha, 0x2008, + "Loop is in a transition state.\n"); + } else { + ql_log(ql_log_warn, vha, 0x2009, + "Unable to get host loop ID.\n"); + if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && + (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { + ql_log(ql_log_warn, vha, 0x1151, + "Doing link init.\n"); + if (qla24xx_link_initialize(vha) == QLA_SUCCESS) + return rval; + } + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + return (rval); + } + + if (topo == 4) { + ql_log(ql_log_info, vha, 0x200a, + "Cannot get topology - retrying.\n"); + return (QLA_FUNCTION_FAILED); + } + + vha->loop_id = loop_id; + + /* initialize */ + ha->min_external_loopid = SNS_FIRST_LOOP_ID; + ha->operating_mode = LOOP; + + switch (topo) { + case 0: + ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); + ha->switch_cap = 0; + ha->current_topology = ISP_CFG_NL; + strcpy(connect_type, "(Loop)"); + break; + + case 1: + ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); + ha->switch_cap = sw_cap; + ha->current_topology = ISP_CFG_FL; + strcpy(connect_type, "(FL_Port)"); + break; + + case 2: + ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); + ha->switch_cap = 0; + ha->operating_mode = P2P; + ha->current_topology = ISP_CFG_N; + strcpy(connect_type, "(N_Port-to-N_Port)"); + break; + + case 3: + ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); + ha->switch_cap = sw_cap; + ha->operating_mode = P2P; + ha->current_topology = ISP_CFG_F; + strcpy(connect_type, "(F_Port)"); + break; + + default: + ql_dbg(ql_dbg_disc, vha, 0x200f, + "HBA in unknown topology %x, using NL.\n", topo); + ha->switch_cap = 0; + ha->current_topology = ISP_CFG_NL; + strcpy(connect_type, "(Loop)"); + break; + } + + /* Save Host port and loop ID. */ + /* byte order - Big Endian */ + id.b.domain = domain; + id.b.area = area; + id.b.al_pa = al_pa; + id.b.rsvd_1 = 0; + spin_lock_irqsave(&ha->hardware_lock, flags); + if (vha->hw->flags.edif_enabled) { + if (topo != 2) + qla_update_host_map(vha, id); + } else if (!(topo == 2 && ha->flags.n2n_bigger)) + qla_update_host_map(vha, id); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (!vha->flags.init_done) + ql_log(ql_log_info, vha, 0x2010, + "Topology - %s, Host Loop address 0x%x.\n", + connect_type, vha->loop_id); + + return(rval); +} + +inline void +qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, + const char *def) +{ + char *st, *en; + uint16_t index; + uint64_t zero[2] = { 0 }; + struct qla_hw_data *ha = vha->hw; + int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && + !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); + + if (len > sizeof(zero)) + len = sizeof(zero); + if (memcmp(model, &zero, len) != 0) { + memcpy(ha->model_number, model, len); + st = en = ha->model_number; + en += len - 1; + while (en > st) { + if (*en != 0x20 && *en != 0x00) + break; + *en-- = '\0'; + } + + index = (ha->pdev->subsystem_device & 0xff); + if (use_tbl && + ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + index < QLA_MODEL_NAMES) + strscpy(ha->model_desc, + qla2x00_model_name[index * 2 + 1], + sizeof(ha->model_desc)); + } else { + index = (ha->pdev->subsystem_device & 0xff); + if (use_tbl && + ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + index < QLA_MODEL_NAMES) { + strscpy(ha->model_number, + qla2x00_model_name[index * 2], + sizeof(ha->model_number)); + strscpy(ha->model_desc, + qla2x00_model_name[index * 2 + 1], + sizeof(ha->model_desc)); + } else { + strscpy(ha->model_number, def, + sizeof(ha->model_number)); + } + } + if (IS_FWI2_CAPABLE(ha)) + qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, + sizeof(ha->model_desc)); +} + +/* On sparc systems, obtain port and node WWN from firmware + * properties. + */ +static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) +{ +#ifdef CONFIG_SPARC + struct qla_hw_data *ha = vha->hw; + struct pci_dev *pdev = ha->pdev; + struct device_node *dp = pci_device_to_OF_node(pdev); + const u8 *val; + int len; + + val = of_get_property(dp, "port-wwn", &len); + if (val && len >= WWN_SIZE) + memcpy(nv->port_name, val, WWN_SIZE); + + val = of_get_property(dp, "node-wwn", &len); + if (val && len >= WWN_SIZE) + memcpy(nv->node_name, val, WWN_SIZE); +#endif +} + +/* +* NVRAM configuration for ISP 2xxx +* +* Input: +* ha = adapter block pointer. +* +* Output: +* initialization control block in response_ring +* host adapters parameters in host adapter block +* +* Returns: +* 0 = success. +*/ +int +qla2x00_nvram_config(scsi_qla_host_t *vha) +{ + int rval; + uint8_t chksum = 0; + uint16_t cnt; + uint8_t *dptr1, *dptr2; + struct qla_hw_data *ha = vha->hw; + init_cb_t *icb = ha->init_cb; + nvram_t *nv = ha->nvram; + uint8_t *ptr = ha->nvram; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + rval = QLA_SUCCESS; + + /* Determine NVRAM starting address. */ + ha->nvram_size = sizeof(*nv); + ha->nvram_base = 0; + if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) + if ((rd_reg_word(®->ctrl_status) >> 14) == 1) + ha->nvram_base = 0x80; + + /* Get NVRAM data and calculate checksum. */ + ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); + for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) + chksum += *ptr++; + + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, + "Contents of NVRAM.\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, + nv, ha->nvram_size); + + /* Bad NVRAM data, set defaults parameters. */ + if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || + nv->nvram_version < 1) { + /* Reset NVRAM data. */ + ql_log(ql_log_warn, vha, 0x0064, + "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n", + chksum, nv->id, nv->nvram_version); + ql_log(ql_log_warn, vha, 0x0065, + "Falling back to " + "functioning (yet invalid -- WWPN) defaults.\n"); + + /* + * Set default initialization control block. + */ + memset(nv, 0, ha->nvram_size); + nv->parameter_block_version = ICB_VERSION; + + if (IS_QLA23XX(ha)) { + nv->firmware_options[0] = BIT_2 | BIT_1; + nv->firmware_options[1] = BIT_7 | BIT_5; + nv->add_firmware_options[0] = BIT_5; + nv->add_firmware_options[1] = BIT_5 | BIT_4; + nv->frame_payload_size = cpu_to_le16(2048); + nv->special_options[1] = BIT_7; + } else if (IS_QLA2200(ha)) { + nv->firmware_options[0] = BIT_2 | BIT_1; + nv->firmware_options[1] = BIT_7 | BIT_5; + nv->add_firmware_options[0] = BIT_5; + nv->add_firmware_options[1] = BIT_5 | BIT_4; + nv->frame_payload_size = cpu_to_le16(1024); + } else if (IS_QLA2100(ha)) { + nv->firmware_options[0] = BIT_3 | BIT_1; + nv->firmware_options[1] = BIT_5; + nv->frame_payload_size = cpu_to_le16(1024); + } + + nv->max_iocb_allocation = cpu_to_le16(256); + nv->execution_throttle = cpu_to_le16(16); + nv->retry_count = 8; + nv->retry_delay = 1; + + nv->port_name[0] = 33; + nv->port_name[3] = 224; + nv->port_name[4] = 139; + + qla2xxx_nvram_wwn_from_ofw(vha, nv); + + nv->login_timeout = 4; + + /* + * Set default host adapter parameters + */ + nv->host_p[1] = BIT_2; + nv->reset_delay = 5; + nv->port_down_retry_count = 8; + nv->max_luns_per_target = cpu_to_le16(8); + nv->link_down_timeout = 60; + + rval = 1; + } + + /* Reset Initialization control block */ + memset(icb, 0, ha->init_cb_size); + + /* + * Setup driver NVRAM options. + */ + nv->firmware_options[0] |= (BIT_6 | BIT_1); + nv->firmware_options[0] &= ~(BIT_5 | BIT_4); + nv->firmware_options[1] |= (BIT_5 | BIT_0); + nv->firmware_options[1] &= ~BIT_4; + + if (IS_QLA23XX(ha)) { + nv->firmware_options[0] |= BIT_2; + nv->firmware_options[0] &= ~BIT_3; + nv->special_options[0] &= ~BIT_6; + nv->add_firmware_options[1] |= BIT_5 | BIT_4; + + if (IS_QLA2300(ha)) { + if (ha->fb_rev == FPM_2310) { + strcpy(ha->model_number, "QLA2310"); + } else { + strcpy(ha->model_number, "QLA2300"); + } + } else { + qla2x00_set_model_info(vha, nv->model_number, + sizeof(nv->model_number), "QLA23xx"); + } + } else if (IS_QLA2200(ha)) { + nv->firmware_options[0] |= BIT_2; + /* + * 'Point-to-point preferred, else loop' is not a safe + * connection mode setting. + */ + if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == + (BIT_5 | BIT_4)) { + /* Force 'loop preferred, else point-to-point'. */ + nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); + nv->add_firmware_options[0] |= BIT_5; + } + strcpy(ha->model_number, "QLA22xx"); + } else /*if (IS_QLA2100(ha))*/ { + strcpy(ha->model_number, "QLA2100"); + } + + /* + * Copy over NVRAM RISC parameter block to initialization control block. + */ + dptr1 = (uint8_t *)icb; + dptr2 = (uint8_t *)&nv->parameter_block_version; + cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; + while (cnt--) + *dptr1++ = *dptr2++; + + /* Copy 2nd half. */ + dptr1 = (uint8_t *)icb->add_firmware_options; + cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; + while (cnt--) + *dptr1++ = *dptr2++; + ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); + /* Use alternate WWN? */ + if (nv->host_p[1] & BIT_7) { + memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); + memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); + } + + /* Prepare nodename */ + if ((icb->firmware_options[1] & BIT_6) == 0) { + /* + * Firmware will apply the following mask if the nodename was + * not provided. + */ + memcpy(icb->node_name, icb->port_name, WWN_SIZE); + icb->node_name[0] &= 0xF0; + } + + /* + * Set host adapter parameters. + */ + + /* + * BIT_7 in the host-parameters section allows for modification to + * internal driver logging. + */ + if (nv->host_p[0] & BIT_7) + ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; + ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); + /* Always load RISC code on non ISP2[12]00 chips. */ + if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) + ha->flags.disable_risc_code_load = 0; + ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); + ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); + ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); + ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; + ha->flags.disable_serdes = 0; + + ha->operating_mode = + (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; + + memcpy(ha->fw_seriallink_options, nv->seriallink_options, + sizeof(ha->fw_seriallink_options)); + + /* save HBA serial number */ + ha->serial0 = icb->port_name[5]; + ha->serial1 = icb->port_name[6]; + ha->serial2 = icb->port_name[7]; + memcpy(vha->node_name, icb->node_name, WWN_SIZE); + memcpy(vha->port_name, icb->port_name, WWN_SIZE); + + icb->execution_throttle = cpu_to_le16(0xFFFF); + + ha->retry_count = nv->retry_count; + + /* Set minimum login_timeout to 4 seconds. */ + if (nv->login_timeout != ql2xlogintimeout) + nv->login_timeout = ql2xlogintimeout; + if (nv->login_timeout < 4) + nv->login_timeout = 4; + ha->login_timeout = nv->login_timeout; + + /* Set minimum RATOV to 100 tenths of a second. */ + ha->r_a_tov = 100; + + ha->loop_reset_delay = nv->reset_delay; + + /* Link Down Timeout = 0: + * + * When Port Down timer expires we will start returning + * I/O's to OS with "DID_NO_CONNECT". + * + * Link Down Timeout != 0: + * + * The driver waits for the link to come up after link down + * before returning I/Os to OS with "DID_NO_CONNECT". + */ + if (nv->link_down_timeout == 0) { + ha->loop_down_abort_time = + (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); + } else { + ha->link_down_timeout = nv->link_down_timeout; + ha->loop_down_abort_time = + (LOOP_DOWN_TIME - ha->link_down_timeout); + } + + /* + * Need enough time to try and get the port back. + */ + ha->port_down_retry_count = nv->port_down_retry_count; + if (qlport_down_retry) + ha->port_down_retry_count = qlport_down_retry; + /* Set login_retry_count */ + ha->login_retry_count = nv->retry_count; + if (ha->port_down_retry_count == nv->port_down_retry_count && + ha->port_down_retry_count > 3) + ha->login_retry_count = ha->port_down_retry_count; + else if (ha->port_down_retry_count > (int)ha->login_retry_count) + ha->login_retry_count = ha->port_down_retry_count; + if (ql2xloginretrycount) + ha->login_retry_count = ql2xloginretrycount; + + icb->lun_enables = cpu_to_le16(0); + icb->command_resource_count = 0; + icb->immediate_notify_resource_count = 0; + icb->timeout = cpu_to_le16(0); + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { + /* Enable RIO */ + icb->firmware_options[0] &= ~BIT_3; + icb->add_firmware_options[0] &= + ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); + icb->add_firmware_options[0] |= BIT_2; + icb->response_accumulation_timer = 3; + icb->interrupt_delay_timer = 5; + + vha->flags.process_response_queue = 1; + } else { + /* Enable ZIO. */ + if (!vha->flags.init_done) { + ha->zio_mode = icb->add_firmware_options[0] & + (BIT_3 | BIT_2 | BIT_1 | BIT_0); + ha->zio_timer = icb->interrupt_delay_timer ? + icb->interrupt_delay_timer : 2; + } + icb->add_firmware_options[0] &= + ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); + vha->flags.process_response_queue = 0; + if (ha->zio_mode != QLA_ZIO_DISABLED) { + ha->zio_mode = QLA_ZIO_MODE_6; + + ql_log(ql_log_info, vha, 0x0068, + "ZIO mode %d enabled; timer delay (%d us).\n", + ha->zio_mode, ha->zio_timer * 100); + + icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; + icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; + vha->flags.process_response_queue = 1; + } + } + + if (rval) { + ql_log(ql_log_warn, vha, 0x0069, + "NVRAM configuration failed.\n"); + } + return (rval); +} + +void qla2x00_set_fcport_state(fc_port_t *fcport, int state) +{ + int old_state; + + old_state = atomic_read(&fcport->state); + atomic_set(&fcport->state, state); + + /* Don't print state transitions during initial allocation of fcport */ + if (old_state && old_state != state) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, + "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n", + fcport->port_name, port_state_str[old_state], + port_state_str[state], fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + } +} + +/** + * qla2x00_alloc_fcport() - Allocate a generic fcport. + * @vha: HA context + * @flags: allocation flags + * + * Returns a pointer to the allocated fcport, or NULL, if none available. + */ +fc_port_t * +qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) +{ + fc_port_t *fcport; + + fcport = kzalloc(sizeof(fc_port_t), flags); + if (!fcport) + return NULL; + + fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, + flags); + if (!fcport->ct_desc.ct_sns) { + ql_log(ql_log_warn, vha, 0xd049, + "Failed to allocate ct_sns request.\n"); + kfree(fcport); + return NULL; + } + + /* Setup fcport template structure. */ + fcport->vha = vha; + fcport->port_type = FCT_UNKNOWN; + fcport->loop_id = FC_NO_LOOP_ID; + qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); + fcport->supported_classes = FC_COS_UNSPECIFIED; + fcport->fp_speed = PORT_SPEED_UNKNOWN; + + fcport->disc_state = DSC_DELETED; + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + fcport->deleted = QLA_SESS_DELETED; + fcport->login_retry = vha->hw->login_retry_count; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; + fcport->logout_on_delete = 1; + fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + fcport->tgt_short_link_down_cnt = 0; + fcport->dev_loss_tmo = 0; + + if (!fcport->ct_desc.ct_sns) { + ql_log(ql_log_warn, vha, 0xd049, + "Failed to allocate ct_sns request.\n"); + kfree(fcport); + return NULL; + } + + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_WORK(&fcport->free_work, qlt_free_session_done); + INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); + INIT_LIST_HEAD(&fcport->gnl_entry); + INIT_LIST_HEAD(&fcport->list); + INIT_LIST_HEAD(&fcport->unsol_ctx_head); + + INIT_LIST_HEAD(&fcport->sess_cmd_list); + spin_lock_init(&fcport->sess_cmd_lock); + + spin_lock_init(&fcport->edif.sa_list_lock); + INIT_LIST_HEAD(&fcport->edif.tx_sa_list); + INIT_LIST_HEAD(&fcport->edif.rx_sa_list); + + spin_lock_init(&fcport->edif.indx_list_lock); + INIT_LIST_HEAD(&fcport->edif.edif_indx_list); + + return fcport; +} + +void +qla2x00_free_fcport(fc_port_t *fcport) +{ + if (fcport->ct_desc.ct_sns) { + dma_free_coherent(&fcport->vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, + fcport->ct_desc.ct_sns_dma); + + fcport->ct_desc.ct_sns = NULL; + } + + qla_edif_flush_sa_ctl_lists(fcport); + list_del(&fcport->list); + qla2x00_clear_loop_id(fcport); + + qla_edif_list_del(fcport); + + kfree(fcport); +} + +static void qla_get_login_template(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval; + u32 *bp, sz; + __be32 *q; + + memset(ha->init_cb, 0, ha->init_cb_size); + sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); + rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, + ha->init_cb, sz); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_init, vha, 0x00d1, + "PLOGI ELS param read fail.\n"); + return; + } + q = (__be32 *)&ha->plogi_els_payld.fl_csp; + + bp = (uint32_t *)ha->init_cb; + cpu_to_be32_array(q, bp, sz / 4); + ha->flags.plogi_template_valid = 1; +} + +/* + * qla2x00_configure_loop + * Updates Fibre Channel Device Database with what is actually on loop. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + * 1 = error. + * 2 = database was full and device was not configured. + */ +static int +qla2x00_configure_loop(scsi_qla_host_t *vha) +{ + int rval; + unsigned long flags, save_flags; + struct qla_hw_data *ha = vha->hw; + + rval = QLA_SUCCESS; + + /* Get Initiator ID */ + if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { + rval = qla2x00_configure_hba(vha); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x2013, + "Unable to configure HBA.\n"); + return (rval); + } + } + + save_flags = flags = vha->dpc_flags; + ql_dbg(ql_dbg_disc, vha, 0x2014, + "Configure loop -- dpc flags = 0x%lx.\n", flags); + + /* + * If we have both an RSCN and PORT UPDATE pending then handle them + * both at the same time. + */ + clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + clear_bit(RSCN_UPDATE, &vha->dpc_flags); + + qla2x00_get_data_rate(vha); + qla_get_login_template(vha); + + /* Determine what we need to do */ + if ((ha->current_topology == ISP_CFG_FL || + ha->current_topology == ISP_CFG_F) && + (test_bit(LOCAL_LOOP_UPDATE, &flags))) { + + set_bit(RSCN_UPDATE, &flags); + clear_bit(LOCAL_LOOP_UPDATE, &flags); + + } else if (ha->current_topology == ISP_CFG_NL || + ha->current_topology == ISP_CFG_N) { + clear_bit(RSCN_UPDATE, &flags); + set_bit(LOCAL_LOOP_UPDATE, &flags); + } else if (!vha->flags.online || + (test_bit(ABORT_ISP_ACTIVE, &flags))) { + set_bit(RSCN_UPDATE, &flags); + set_bit(LOCAL_LOOP_UPDATE, &flags); + } + + if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { + ql_dbg(ql_dbg_disc, vha, 0x2015, + "Loop resync needed, failing.\n"); + rval = QLA_FUNCTION_FAILED; + } else + rval = qla2x00_configure_local_loop(vha); + } + + if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { + if (LOOP_TRANSITION(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x2099, + "Needs RSCN update and loop transition.\n"); + rval = QLA_FUNCTION_FAILED; + } + else + rval = qla2x00_configure_fabric(vha); + } + + if (rval == QLA_SUCCESS) { + if (atomic_read(&vha->loop_down_timer) || + test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { + rval = QLA_FUNCTION_FAILED; + } else { + atomic_set(&vha->loop_state, LOOP_READY); + ql_dbg(ql_dbg_disc, vha, 0x2069, + "LOOP READY.\n"); + ha->flags.fw_init_done = 1; + + /* + * use link up to wake up app to get ready for + * authentication. + */ + if (ha->flags.edif_enabled && DBELL_INACTIVE(vha)) + qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, + ha->link_data_rate); + + /* + * Process any ATIO queue entries that came in + * while we weren't online. + */ + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, + flags); + } + } + } + + if (rval) { + ql_dbg(ql_dbg_disc, vha, 0x206a, + "%s *** FAILED ***.\n", __func__); + } else { + ql_dbg(ql_dbg_disc, vha, 0x206b, + "%s: exiting normally. local port wwpn %8phN id %06x)\n", + __func__, vha->port_name, vha->d_id.b24); + } + + /* Restore state if a resync event occurred during processing */ + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { + if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + if (test_bit(RSCN_UPDATE, &save_flags)) { + set_bit(RSCN_UPDATE, &vha->dpc_flags); + } + } + + return (rval); +} + +static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) +{ + unsigned long flags; + fc_port_t *fcport; + + ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__); + + if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->n2n_flag) { + qla24xx_fcport_handle_login(vha, fcport); + return QLA_SUCCESS; + } + } + + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_retry++; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } + return QLA_FUNCTION_FAILED; +} + +static void +qla_reinitialize_link(scsi_qla_host_t *vha) +{ + int rval; + + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + rval = qla2x00_full_login_lip(vha); + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n"); + } else { + ql_dbg(ql_dbg_disc, vha, 0xd051, + "Link reinitialization failed (%d)\n", rval); + } +} + +/* + * qla2x00_configure_local_loop + * Updates Fibre Channel Device Database with local loop devices. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + */ +static int +qla2x00_configure_local_loop(scsi_qla_host_t *vha) +{ + int rval, rval2; + int found; + fc_port_t *fcport, *new_fcport; + uint16_t index; + uint16_t entries; + struct gid_list_info *gid; + uint16_t loop_id; + uint8_t domain, area, al_pa; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + /* Inititae N2N login. */ + if (N2N_TOPO(ha)) + return qla2x00_configure_n2n_loop(vha); + + new_fcport = NULL; + entries = MAX_FIBRE_DEVICES_LOOP; + + /* Get list of logged in devices. */ + memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); + rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, + &entries); + if (rval != QLA_SUCCESS) + goto err; + + ql_dbg(ql_dbg_disc, vha, 0x2011, + "Entries in ID list (%d).\n", entries); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, + ha->gid_list, entries * sizeof(*ha->gid_list)); + + if (entries == 0) { + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_retry++; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + u8 loop_map_entries = 0; + int rc; + + rc = qla2x00_get_fcal_position_map(vha, NULL, + &loop_map_entries); + if (rc == QLA_SUCCESS && loop_map_entries > 1) { + /* + * There are devices that are still not logged + * in. Reinitialize to give them a chance. + */ + qla_reinitialize_link(vha); + return QLA_FUNCTION_FAILED; + } + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } + } else { + vha->scan.scan_retry = 0; + } + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = QLA_FCPORT_SCAN; + } + + /* Allocate temporary fcport for any new fcports discovered. */ + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0x2012, + "Memory allocation failed for fcport.\n"); + rval = QLA_MEMORY_ALLOC_FAILED; + goto err; + } + new_fcport->flags &= ~FCF_FABRIC_DEVICE; + + /* Add devices to port list. */ + gid = ha->gid_list; + for (index = 0; index < entries; index++) { + domain = gid->domain; + area = gid->area; + al_pa = gid->al_pa; + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + loop_id = gid->loop_id_2100; + else + loop_id = le16_to_cpu(gid->loop_id); + gid = (void *)gid + ha->gid_list_info_size; + + /* Bypass reserved domain fields. */ + if ((domain & 0xf0) == 0xf0) + continue; + + /* Bypass if not same domain and area of adapter. */ + if (area && domain && ((area != vha->d_id.b.area) || + (domain != vha->d_id.b.domain)) && + (ha->current_topology == ISP_CFG_NL)) + continue; + + + /* Bypass invalid local loop ID. */ + if (loop_id > LAST_LOCAL_LOOP_ID) + continue; + + memset(new_fcport->port_name, 0, WWN_SIZE); + + /* Fill in member data. */ + new_fcport->d_id.b.domain = domain; + new_fcport->d_id.b.area = area; + new_fcport->d_id.b.al_pa = al_pa; + new_fcport->loop_id = loop_id; + new_fcport->scan_state = QLA_FCPORT_FOUND; + + rval2 = qla2x00_get_port_database(vha, new_fcport, 0); + if (rval2 != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x2097, + "Failed to retrieve fcport information " + "-- get_port_database=%x, loop_id=0x%04x.\n", + rval2, new_fcport->loop_id); + /* Skip retry if N2N */ + if (ha->current_topology != ISP_CFG_N) { + ql_dbg(ql_dbg_disc, vha, 0x2105, + "Scheduling resync.\n"); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + continue; + } + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* Check for matching device in port list. */ + found = 0; + fcport = NULL; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (memcmp(new_fcport->port_name, fcport->port_name, + WWN_SIZE)) + continue; + + fcport->flags &= ~FCF_FABRIC_DEVICE; + fcport->loop_id = new_fcport->loop_id; + fcport->port_type = new_fcport->port_type; + fcport->d_id.b24 = new_fcport->d_id.b24; + memcpy(fcport->node_name, new_fcport->node_name, + WWN_SIZE); + fcport->scan_state = QLA_FCPORT_FOUND; + if (fcport->login_retry == 0) { + fcport->login_retry = vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0x2135, + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", + fcport->port_name, fcport->loop_id, + fcport->login_retry); + } + found++; + break; + } + + if (!found) { + /* New device, add to fcports list. */ + list_add_tail(&new_fcport->list, &vha->vp_fcports); + + /* Allocate a new replacement fcport. */ + fcport = new_fcport; + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + + if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0xd031, + "Failed to allocate memory for fcport.\n"); + rval = QLA_MEMORY_ALLOC_FAILED; + goto err; + } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + new_fcport->flags &= ~FCF_FABRIC_DEVICE; + } + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + /* Base iIDMA settings on HBA port speed. */ + fcport->fp_speed = ha->link_data_rate; + } + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + if (fcport->scan_state == QLA_FCPORT_SCAN) { + if ((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) { + qla2x00_mark_device_lost(vha, fcport, + ql2xplogiabsentdevice); + if (fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_BROADCAST) { + ql_dbg(ql_dbg_disc, vha, 0x20f0, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + fcport->port_name); + + qlt_schedule_sess_for_deletion(fcport); + continue; + } + } + } + + if (fcport->scan_state == QLA_FCPORT_FOUND) + qla24xx_fcport_handle_login(vha, fcport); + } + + qla2x00_free_fcport(new_fcport); + + return rval; + +err: + ql_dbg(ql_dbg_disc, vha, 0x2098, + "Configure local loop error exit: rval=%x.\n", rval); + return rval; +} + +static void +qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + struct qla_hw_data *ha = vha->hw; + + if (!IS_IIDMA_CAPABLE(ha)) + return; + + if (atomic_read(&fcport->state) != FCS_ONLINE) + return; + + if (fcport->fp_speed == PORT_SPEED_UNKNOWN || + fcport->fp_speed > ha->link_data_rate || + !ha->flags.gpsc_supported) + return; + + rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, + mb); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x2004, + "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", + fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); + } else { + ql_dbg(ql_dbg_disc, vha, 0x2005, + "iIDMA adjusted to %s GB/s (%X) on %8phN.\n", + qla2x00_get_link_speed_str(ha, fcport->fp_speed), + fcport->fp_speed, fcport->port_name); + } +} + +void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + qla2x00_iidma_fcport(vha, fcport); + qla24xx_update_fcport_fcp_prio(vha, fcport); +} + +int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ +static void +qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + struct fc_rport_identifiers rport_ids; + struct fc_rport *rport; + unsigned long flags; + + if (atomic_read(&fcport->state) == FCS_ONLINE) + return; + + rport_ids.node_name = wwn_to_u64(fcport->node_name); + rport_ids.port_name = wwn_to_u64(fcport->port_name); + rport_ids.port_id = fcport->d_id.b.domain << 16 | + fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; + rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; + fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); + if (!rport) { + ql_log(ql_log_warn, vha, 0x2006, + "Unable to allocate fc remote port.\n"); + return; + } + + spin_lock_irqsave(fcport->vha->host->host_lock, flags); + *((fc_port_t **)rport->dd_data) = fcport; + spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); + fcport->dev_loss_tmo = rport->dev_loss_tmo; + + rport->supported_classes = fcport->supported_classes; + + rport_ids.roles = FC_PORT_ROLE_UNKNOWN; + if (fcport->port_type == FCT_INITIATOR) + rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; + if (fcport->port_type == FCT_TARGET) + rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; + if (fcport->port_type & FCT_NVME_INITIATOR) + rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; + if (fcport->port_type & FCT_NVME_TARGET) + rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; + if (fcport->port_type & FCT_NVME_DISCOVERY) + rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; + + fc_remote_port_rolechg(rport, rport_ids.roles); + + ql_dbg(ql_dbg_disc, vha, 0x20ee, + "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n", + __func__, fcport->port_name, vha->host_no, + rport->scsi_target_id, rport, + (fcport->port_type == FCT_TARGET) ? "tgt" : + ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); +} + +/* + * qla2x00_update_fcport + * Updates device on list. + * + * Input: + * ha = adapter block pointer. + * fcport = port structure pointer. + * + * Return: + * 0 - Success + * BIT_0 - error + * + * Context: + * Kernel context. + */ +void +qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + unsigned long flags; + + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", + __func__, fcport->port_name); + + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); + fcport->login_retry = vha->hw->login_retry_count; + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + + spin_lock_irqsave(&vha->work_lock, flags); + fcport->deleted = 0; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (vha->hw->current_topology == ISP_CFG_NL) + fcport->logout_on_delete = 0; + else + fcport->logout_on_delete = 1; + fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; + + if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) { + fcport->tgt_short_link_down_cnt++; + fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + } + + switch (vha->hw->current_topology) { + case ISP_CFG_N: + case ISP_CFG_NL: + fcport->keep_nport_handle = 1; + break; + default: + break; + } + + qla2x00_iidma_fcport(vha, fcport); + + qla2x00_dfs_create_rport(vha, fcport); + + qla24xx_update_fcport_fcp_prio(vha, fcport); + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + qla2x00_reg_remote_port(vha, fcport); + break; + case MODE_TARGET: + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + case MODE_DUAL: + qla2x00_reg_remote_port(vha, fcport); + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + default: + break; + } + + if (NVME_TARGET(vha->hw, fcport)) + qla_nvme_register_remote(vha, fcport); + + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + + if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { + if (fcport->id_changed) { + fcport->id_changed = 0; + ql_dbg(ql_dbg_disc, vha, 0x20d7, + "%s %d %8phC post gfpnid fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + qla24xx_post_gfpnid_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20d7, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + qla24xx_post_gpsc_work(vha, fcport); + } + } + + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); +} + +void qla_register_fcport_fn(struct work_struct *work) +{ + fc_port_t *fcport = container_of(work, struct fc_port, reg_work); + u32 rscn_gen = fcport->rscn_gen; + u16 data[2]; + + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + qla2x00_update_fcport(fcport->vha, fcport); + + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s rscn gen %d/%d next DS %d\n", __func__, + rscn_gen, fcport->rscn_gen, fcport->next_disc_state); + + if (rscn_gen != fcport->rscn_gen) { + /* RSCN(s) came in while registration */ + switch (fcport->next_disc_state) { + case DSC_DELETE_PEND: + qlt_schedule_sess_for_deletion(fcport); + break; + case DSC_ADISC: + data[0] = data[1] = 0; + qla2x00_post_async_adisc_work(fcport->vha, fcport, + data); + break; + default: + break; + } + } +} + +/* + * qla2x00_configure_fabric + * Setup SNS devices with loop ID's. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + * BIT_0 = error + */ +static int +qla2x00_configure_fabric(scsi_qla_host_t *vha) +{ + int rval; + fc_port_t *fcport; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + uint16_t loop_id; + struct qla_hw_data *ha = vha->hw; + int discovery_gen; + + /* If FL port exists, then SNS is present */ + if (IS_FWI2_CAPABLE(ha)) + loop_id = NPH_F_PORT; + else + loop_id = SNS_FL_PORT; + rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x20a0, + "MBX_GET_PORT_NAME failed, No FL Port.\n"); + + vha->device_flags &= ~SWITCH_FOUND; + return (QLA_SUCCESS); + } + vha->device_flags |= SWITCH_FOUND; + + rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0); + if (rval != QLA_SUCCESS) + ql_dbg(ql_dbg_disc, vha, 0x20ff, + "Failed to get Fabric Port Name\n"); + + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + rval = qla2x00_send_change_request(vha, 0x3, 0); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x121, + "Failed to enable receiving of RSCN requests: 0x%x.\n", + rval); + } + + do { + qla2x00_mgmt_svr_login(vha); + + /* Ensure we are logged into the SNS. */ + loop_id = NPH_SNS_LID(ha); + rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, + 0xfc, mb, BIT_1|BIT_0); + if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_disc, vha, 0x20a1, + "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", + loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return rval; + } + + /* FDMI support. */ + if (ql2xfdmienable && + test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) + qla2x00_fdmi_register(vha); + + if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { + if (qla2x00_rft_id(vha)) { + /* EMPTY */ + ql_dbg(ql_dbg_disc, vha, 0x20a2, + "Register FC-4 TYPE failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, + &vha->dpc_flags)) + break; + } + if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { + /* EMPTY */ + ql_dbg(ql_dbg_disc, vha, 0x209a, + "Register FC-4 Features failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, + &vha->dpc_flags)) + break; + } + if (vha->flags.nvme_enabled) { + if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { + ql_dbg(ql_dbg_disc, vha, 0x2049, + "Register NVME FC Type Features failed.\n"); + } + } + if (qla2x00_rnn_id(vha)) { + /* EMPTY */ + ql_dbg(ql_dbg_disc, vha, 0x2104, + "Register Node Name failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, + &vha->dpc_flags)) + break; + } else if (qla2x00_rsnn_nn(vha)) { + /* EMPTY */ + ql_dbg(ql_dbg_disc, vha, 0x209b, + "Register Symbolic Node Name failed.\n"); + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + } + } + + + /* Mark the time right before querying FW for connected ports. + * This process is long, asynchronous and by the time it's done, + * collected information might not be accurate anymore. E.g. + * disconnected port might have re-connected and a brand new + * session has been created. In this case session's generation + * will be newer than discovery_gen. */ + qlt_do_generation_tick(vha, &discovery_gen); + + if (USE_ASYNC_SCAN(ha)) { + rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, + NULL); + if (rval) + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } else { + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->scan_state = QLA_FCPORT_SCAN; + + rval = qla2x00_find_all_fabric_devs(vha); + } + if (rval != QLA_SUCCESS) + break; + } while (0); + + if (!vha->nvme_local_port && vha->flags.nvme_enabled) + qla_nvme_register_hba(vha); + + if (rval) + ql_dbg(ql_dbg_disc, vha, 0x2068, + "Configure fabric error exit rval=%d.\n", rval); + + return (rval); +} + +/* + * qla2x00_find_all_fabric_devs + * + * Input: + * ha = adapter block pointer. + * dev = database device entry pointer. + * + * Returns: + * 0 = success. + * + * Context: + * Kernel context. + */ +static int +qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) +{ + int rval; + uint16_t loop_id; + fc_port_t *fcport, *new_fcport; + int found; + + sw_info_t *swl; + int swl_idx; + int first_dev, last_dev; + port_id_t wrap = {}, nxt_d_id; + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + unsigned long flags; + + rval = QLA_SUCCESS; + + /* Try GID_PT to get device list, else GAN. */ + if (!ha->swl) + ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), + GFP_KERNEL); + swl = ha->swl; + if (!swl) { + /*EMPTY*/ + ql_dbg(ql_dbg_disc, vha, 0x209c, + "GID_PT allocations failed, fallback on GA_NXT.\n"); + } else { + memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); + if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { + swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; + } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { + swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; + } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { + swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; + } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { + swl = NULL; + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; + } + + /* If other queries succeeded probe for FC-4 type */ + if (swl) { + qla2x00_gff_id(vha, swl); + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + return rval; + } + } + swl_idx = 0; + + /* Allocate temporary fcport for any new fcports discovered. */ + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0x209d, + "Failed to allocate memory for fcport.\n"); + return (QLA_MEMORY_ALLOC_FAILED); + } + new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); + /* Set start port ID scan at adapter ID. */ + first_dev = 1; + last_dev = 0; + + /* Starting free loop ID. */ + loop_id = ha->min_external_loopid; + for (; loop_id <= ha->max_loop_id; loop_id++) { + if (qla2x00_is_reserved_id(vha, loop_id)) + continue; + + if (ha->current_topology == ISP_CFG_FL && + (atomic_read(&vha->loop_down_timer) || + LOOP_TRANSITION(vha))) { + atomic_set(&vha->loop_down_timer, 0); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + } + + if (swl != NULL) { + if (last_dev) { + wrap.b24 = new_fcport->d_id.b24; + } else { + new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; + memcpy(new_fcport->node_name, + swl[swl_idx].node_name, WWN_SIZE); + memcpy(new_fcport->port_name, + swl[swl_idx].port_name, WWN_SIZE); + memcpy(new_fcport->fabric_port_name, + swl[swl_idx].fabric_port_name, WWN_SIZE); + new_fcport->fp_speed = swl[swl_idx].fp_speed; + new_fcport->fc4_type = swl[swl_idx].fc4_type; + + new_fcport->nvme_flag = 0; + if (vha->flags.nvme_enabled && + swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { + ql_log(ql_log_info, vha, 0x2131, + "FOUND: NVME port %8phC as FC Type 28h\n", + new_fcport->port_name); + } + + if (swl[swl_idx].d_id.b.rsvd_1 != 0) { + last_dev = 1; + } + swl_idx++; + } + } else { + /* Send GA_NXT to the switch */ + rval = qla2x00_ga_nxt(vha, new_fcport); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x209e, + "SNS scan failed -- assuming " + "zero-entry result.\n"); + rval = QLA_SUCCESS; + break; + } + } + + /* If wrap on switch device list, exit. */ + if (first_dev) { + wrap.b24 = new_fcport->d_id.b24; + first_dev = 0; + } else if (new_fcport->d_id.b24 == wrap.b24) { + ql_dbg(ql_dbg_disc, vha, 0x209f, + "Device wrap (%02x%02x%02x).\n", + new_fcport->d_id.b.domain, + new_fcport->d_id.b.area, + new_fcport->d_id.b.al_pa); + break; + } + + /* Bypass if same physical adapter. */ + if (new_fcport->d_id.b24 == base_vha->d_id.b24) + continue; + + /* Bypass virtual ports of the same host. */ + if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) + continue; + + /* Bypass if same domain and area of adapter. */ + if (((new_fcport->d_id.b24 & 0xffff00) == + (vha->d_id.b24 & 0xffff00)) && ha->current_topology == + ISP_CFG_FL) + continue; + + /* Bypass reserved domain fields. */ + if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) + continue; + + /* Bypass ports whose FCP-4 type is not FCP_SCSI */ + if (ql2xgffidenable && + (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && + new_fcport->fc4_type != 0)) + continue; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + + /* Locate matching device in database. */ + found = 0; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (memcmp(new_fcport->port_name, fcport->port_name, + WWN_SIZE)) + continue; + + fcport->scan_state = QLA_FCPORT_FOUND; + + found++; + + /* Update port state. */ + memcpy(fcport->fabric_port_name, + new_fcport->fabric_port_name, WWN_SIZE); + fcport->fp_speed = new_fcport->fp_speed; + + /* + * If address the same and state FCS_ONLINE + * (or in target mode), nothing changed. + */ + if (fcport->d_id.b24 == new_fcport->d_id.b24 && + (atomic_read(&fcport->state) == FCS_ONLINE || + (vha->host->active_mode == MODE_TARGET))) { + break; + } + + if (fcport->login_retry == 0) + fcport->login_retry = + vha->hw->login_retry_count; + /* + * If device was not a fabric device before. + */ + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { + fcport->d_id.b24 = new_fcport->d_id.b24; + qla2x00_clear_loop_id(fcport); + fcport->flags |= (FCF_FABRIC_DEVICE | + FCF_LOGIN_NEEDED); + break; + } + + /* + * Port ID changed or device was marked to be updated; + * Log it out if still logged in and mark it for + * relogin later. + */ + if (qla_tgt_mode_enabled(base_vha)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, + "port changed FC ID, %8phC" + " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", + fcport->port_name, + fcport->d_id.b.domain, + fcport->d_id.b.area, + fcport->d_id.b.al_pa, + fcport->loop_id, + new_fcport->d_id.b.domain, + new_fcport->d_id.b.area, + new_fcport->d_id.b.al_pa); + fcport->d_id.b24 = new_fcport->d_id.b24; + break; + } + + fcport->d_id.b24 = new_fcport->d_id.b24; + fcport->flags |= FCF_LOGIN_NEEDED; + break; + } + + if (found && NVME_TARGET(vha->hw, fcport)) { + if (fcport->disc_state == DSC_DELETE_PEND) { + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); + vha->fcport_count--; + fcport->login_succ = 0; + } + } + + if (found) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + continue; + } + /* If device was not in our fcports list, then add it. */ + new_fcport->scan_state = QLA_FCPORT_FOUND; + list_add_tail(&new_fcport->list, &vha->vp_fcports); + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + + /* Allocate a new replacement fcport. */ + nxt_d_id.b24 = new_fcport->d_id.b24; + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0xd032, + "Memory allocation failed for fcport.\n"); + return (QLA_MEMORY_ALLOC_FAILED); + } + new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); + new_fcport->d_id.b24 = nxt_d_id.b24; + } + + qla2x00_free_fcport(new_fcport); + + /* + * Logout all previous fabric dev marked lost, except FCP2 devices. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) + continue; + + if (fcport->scan_state == QLA_FCPORT_SCAN) { + if ((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) { + qla2x00_mark_device_lost(vha, fcport, + ql2xplogiabsentdevice); + if (fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_BROADCAST) { + ql_dbg(ql_dbg_disc, vha, 0x20f0, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + fcport->port_name); + qlt_schedule_sess_for_deletion(fcport); + continue; + } + } + } + + if (fcport->scan_state == QLA_FCPORT_FOUND && + (fcport->flags & FCF_LOGIN_NEEDED) != 0) + qla24xx_fcport_handle_login(vha, fcport); + } + return (rval); +} + +/* FW does not set aside Loop id for MGMT Server/FFFFFAh */ +int +qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) +{ + int loop_id = FC_NO_LOOP_ID; + int lid = NPH_MGMT_SERVER - vha->vp_idx; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx == 0) { + set_bit(NPH_MGMT_SERVER, ha->loop_id_map); + return NPH_MGMT_SERVER; + } + + /* pick id from high and work down to low */ + spin_lock_irqsave(&ha->vport_slock, flags); + for (; lid > 0; lid--) { + if (!test_bit(lid, vha->hw->loop_id_map)) { + set_bit(lid, vha->hw->loop_id_map); + loop_id = lid; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + return loop_id; +} + +/* + * qla2x00_fabric_login + * Issue fabric login command. + * + * Input: + * ha = adapter block pointer. + * device = pointer to FC device type structure. + * + * Returns: + * 0 - Login successfully + * 1 - Login failed + * 2 - Initiator device + * 3 - Fatal error + */ +int +qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, + uint16_t *next_loopid) +{ + int rval; + int retry; + uint16_t tmp_loopid; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + struct qla_hw_data *ha = vha->hw; + + retry = 0; + tmp_loopid = 0; + + for (;;) { + ql_dbg(ql_dbg_disc, vha, 0x2000, + "Trying Fabric Login w/loop id 0x%04x for port " + "%02x%02x%02x.\n", + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + /* Login fcport on switch. */ + rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, mb, BIT_0); + if (rval != QLA_SUCCESS) { + return rval; + } + if (mb[0] == MBS_PORT_ID_USED) { + /* + * Device has another loop ID. The firmware team + * recommends the driver perform an implicit login with + * the specified ID again. The ID we just used is save + * here so we return with an ID that can be tried by + * the next login. + */ + retry++; + tmp_loopid = fcport->loop_id; + fcport->loop_id = mb[1]; + + ql_dbg(ql_dbg_disc, vha, 0x2001, + "Fabric Login: port in use - next loop " + "id=0x%04x, port id= %02x%02x%02x.\n", + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + } else if (mb[0] == MBS_COMMAND_COMPLETE) { + /* + * Login succeeded. + */ + if (retry) { + /* A retry occurred before. */ + *next_loopid = tmp_loopid; + } else { + /* + * No retry occurred before. Just increment the + * ID value for next login. + */ + *next_loopid = (fcport->loop_id + 1); + } + + if (mb[1] & BIT_0) { + fcport->port_type = FCT_INITIATOR; + } else { + fcport->port_type = FCT_TARGET; + if (mb[1] & BIT_1) { + fcport->flags |= FCF_FCP2_DEVICE; + } + } + + if (mb[10] & BIT_0) + fcport->supported_classes |= FC_COS_CLASS2; + if (mb[10] & BIT_1) + fcport->supported_classes |= FC_COS_CLASS3; + + if (IS_FWI2_CAPABLE(ha)) { + if (mb[10] & BIT_7) + fcport->flags |= + FCF_CONF_COMP_SUPPORTED; + } + + rval = QLA_SUCCESS; + break; + } else if (mb[0] == MBS_LOOP_ID_USED) { + /* + * Loop ID already used, try next loop ID. + */ + fcport->loop_id++; + rval = qla2x00_find_new_loop_id(vha, fcport); + if (rval != QLA_SUCCESS) { + /* Ran out of loop IDs to use */ + break; + } + } else if (mb[0] == MBS_COMMAND_ERROR) { + /* + * Firmware possibly timed out during login. If NO + * retries are left to do then the device is declared + * dead. + */ + *next_loopid = fcport->loop_id; + ha->isp_ops->fabric_logout(vha, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + qla2x00_mark_device_lost(vha, fcport, 1); + + rval = 1; + break; + } else { + /* + * unrecoverable / not handled error + */ + ql_dbg(ql_dbg_disc, vha, 0x2002, + "Failed=%x port_id=%02x%02x%02x loop_id=%x " + "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->loop_id, jiffies); + + *next_loopid = fcport->loop_id; + ha->isp_ops->fabric_logout(vha, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + qla2x00_clear_loop_id(fcport); + fcport->login_retry = 0; + + rval = 3; + break; + } + } + + return (rval); +} + +/* + * qla2x00_local_device_login + * Issue local device login command. + * + * Input: + * ha = adapter block pointer. + * loop_id = loop id of device to login to. + * + * Returns (Where's the #define!!!!): + * 0 - Login successfully + * 1 - Login failed + * 3 - Fatal error + */ +int +qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + + memset(mb, 0, sizeof(mb)); + rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); + if (rval == QLA_SUCCESS) { + /* Interrogate mailbox registers for any errors */ + if (mb[0] == MBS_COMMAND_ERROR) + rval = 1; + else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) + /* device not in PCB table */ + rval = 3; + } + + return (rval); +} + +/* + * qla2x00_loop_resync + * Resync with fibre channel devices. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qla2x00_loop_resync(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + uint32_t wait_time; + + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + if (vha->flags.online) { + if (!(rval = qla2x00_fw_ready(vha))) { + /* Wait at most MAX_TARGET RSCNs for a stable link. */ + wait_time = 256; + do { + if (!IS_QLAFX00(vha->hw)) { + /* + * Issue a marker after FW becomes + * ready. + */ + qla2x00_marker(vha, vha->hw->base_qpair, + 0, 0, MK_SYNC_ALL); + vha->marker_needed = 0; + } + + /* Remap devices on Loop. */ + clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + + if (IS_QLAFX00(vha->hw)) + qlafx00_configure_devices(vha); + else + qla2x00_configure_loop(vha); + + wait_time--; + } while (!atomic_read(&vha->loop_down_timer) && + !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) + && wait_time && (test_bit(LOOP_RESYNC_NEEDED, + &vha->dpc_flags))); + } + } + + if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) + return (QLA_FUNCTION_FAILED); + + if (rval) + ql_dbg(ql_dbg_disc, vha, 0x206c, + "%s *** FAILED ***.\n", __func__); + + return (rval); +} + +/* +* qla2x00_perform_loop_resync +* Description: This function will set the appropriate flags and call +* qla2x00_loop_resync. If successful loop will be resynced +* Arguments : scsi_qla_host_t pointer +* returm : Success or Failure +*/ + +int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) +{ + int32_t rval = 0; + + if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { + /*Configure the flags so that resync happens properly*/ + atomic_set(&ha->loop_down_timer, 0); + if (!(ha->device_flags & DFLG_NO_CABLE)) { + atomic_set(&ha->loop_state, LOOP_UP); + set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); + set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); + + rval = qla2x00_loop_resync(ha); + } else + atomic_set(&ha->loop_state, LOOP_DEAD); + + clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); + } + + return rval; +} + +/* Assumes idc_lock always held on entry */ +void +qla83xx_reset_ownership(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t drv_presence, drv_presence_mask; + uint32_t dev_part_info1, dev_part_info2, class_type; + uint32_t class_type_mask = 0x3; + uint16_t fcoe_other_function = 0xffff, i; + + if (IS_QLA8044(ha)) { + drv_presence = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + dev_part_info1 = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_PART_INFO_INDEX); + dev_part_info2 = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_PART_INFO2); + } else { + qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); + qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); + qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); + } + for (i = 0; i < 8; i++) { + class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); + if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && + (i != ha->portnum)) { + fcoe_other_function = i; + break; + } + } + if (fcoe_other_function == 0xffff) { + for (i = 0; i < 8; i++) { + class_type = ((dev_part_info2 >> (i * 4)) & + class_type_mask); + if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && + ((i + 8) != ha->portnum)) { + fcoe_other_function = i + 8; + break; + } + } + } + /* + * Prepare drv-presence mask based on fcoe functions present. + * However consider only valid physical fcoe function numbers (0-15). + */ + drv_presence_mask = ~((1 << (ha->portnum)) | + ((fcoe_other_function == 0xffff) ? + 0 : (1 << (fcoe_other_function)))); + + /* We are the reset owner iff: + * - No other protocol drivers present. + * - This is the lowest among fcoe functions. */ + if (!(drv_presence & drv_presence_mask) && + (ha->portnum < fcoe_other_function)) { + ql_dbg(ql_dbg_p3p, vha, 0xb07f, + "This host is Reset owner.\n"); + ha->flags.nic_core_reset_owner = 1; + } +} + +static int +__qla83xx_set_drv_ack(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + uint32_t drv_ack; + + rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); + if (rval == QLA_SUCCESS) { + drv_ack |= (1 << ha->portnum); + rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); + } + + return rval; +} + +static int +__qla83xx_clear_drv_ack(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + uint32_t drv_ack; + + rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); + if (rval == QLA_SUCCESS) { + drv_ack &= ~(1 << ha->portnum); + rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); + } + + return rval; +} + +/* Assumes idc-lock always held on entry */ +void +qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t idc_audit_reg = 0, duration_secs = 0; + + switch (audit_type) { + case IDC_AUDIT_TIMESTAMP: + ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); + idc_audit_reg = (ha->portnum) | + (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); + qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); + break; + + case IDC_AUDIT_COMPLETION: + duration_secs = ((jiffies_to_msecs(jiffies) - + jiffies_to_msecs(ha->idc_audit_ts)) / 1000); + idc_audit_reg = (ha->portnum) | + (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); + qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); + break; + + default: + ql_log(ql_log_warn, vha, 0xb078, + "Invalid audit type specified.\n"); + break; + } +} + +/* Assumes idc_lock always held on entry */ +static int +qla83xx_initiating_reset(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t idc_control, dev_state; + + __qla83xx_get_idc_control(vha, &idc_control); + if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { + ql_log(ql_log_info, vha, 0xb080, + "NIC Core reset has been disabled. idc-control=0x%x\n", + idc_control); + return QLA_FUNCTION_FAILED; + } + + /* Set NEED-RESET iff in READY state and we are the reset-owner */ + qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); + if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { + qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); + qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); + } else { + ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", + qdev_state(dev_state)); + + /* SV: XXX: Is timeout required here? */ + /* Wait for IDC state change READY -> NEED_RESET */ + while (dev_state == QLA8XXX_DEV_READY) { + qla83xx_idc_unlock(vha, 0); + msleep(200); + qla83xx_idc_lock(vha, 0); + qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); + } + } + + /* Send IDC ack by writing to drv-ack register */ + __qla83xx_set_drv_ack(vha); + + return QLA_SUCCESS; +} + +int +__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) +{ + return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); +} + +int +__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) +{ + return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); +} + +static int +qla83xx_check_driver_presence(scsi_qla_host_t *vha) +{ + uint32_t drv_presence = 0; + struct qla_hw_data *ha = vha->hw; + + qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); + if (drv_presence & (1 << ha->portnum)) + return QLA_SUCCESS; + else + return QLA_TEST_FAILED; +} + +int +qla83xx_nic_core_reset(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb058, + "Entered %s().\n", __func__); + + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_log(ql_log_warn, vha, 0xb059, + "Device in unrecoverable FAILED state.\n"); + return QLA_FUNCTION_FAILED; + } + + qla83xx_idc_lock(vha, 0); + + if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb05a, + "Function=0x%x has been removed from IDC participation.\n", + ha->portnum); + rval = QLA_FUNCTION_FAILED; + goto exit; + } + + qla83xx_reset_ownership(vha); + + rval = qla83xx_initiating_reset(vha); + + /* + * Perform reset if we are the reset-owner, + * else wait till IDC state changes to READY/FAILED. + */ + if (rval == QLA_SUCCESS) { + rval = qla83xx_idc_state_handler(vha); + + if (rval == QLA_SUCCESS) + ha->flags.nic_core_hung = 0; + __qla83xx_clear_drv_ack(vha); + } + +exit: + qla83xx_idc_unlock(vha, 0); + + ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); + + return rval; +} + +int +qla2xxx_mctp_dump(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_FUNCTION_FAILED; + + if (!IS_MCTP_CAPABLE(ha)) { + /* This message can be removed from the final version */ + ql_log(ql_log_info, vha, 0x506d, + "This board is not MCTP capable\n"); + return rval; + } + + if (!ha->mctp_dump) { + ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, + MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); + + if (!ha->mctp_dump) { + ql_log(ql_log_warn, vha, 0x506e, + "Failed to allocate memory for mctp dump\n"); + return rval; + } + } + +#define MCTP_DUMP_STR_ADDR 0x00000000 + rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, + MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x506f, + "Failed to capture mctp dump\n"); + } else { + ql_log(ql_log_info, vha, 0x5070, + "Mctp dump capture for host (%ld/%p).\n", + vha->host_no, ha->mctp_dump); + ha->mctp_dumped = 1; + } + + if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { + ha->flags.nic_core_reset_hdlr_active = 1; + rval = qla83xx_restart_nic_firmware(vha); + if (rval) + /* NIC Core reset failed. */ + ql_log(ql_log_warn, vha, 0x5071, + "Failed to restart nic firmware\n"); + else + ql_dbg(ql_dbg_p3p, vha, 0xb084, + "Restarted NIC firmware successfully.\n"); + ha->flags.nic_core_reset_hdlr_active = 0; + } + + return rval; + +} + +/* +* qla2x00_quiesce_io +* Description: This function will block the new I/Os +* Its not aborting any I/Os as context +* is not destroyed during quiescence +* Arguments: scsi_qla_host_t +* return : void +*/ +void +qla2x00_quiesce_io(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *vp, *tvp; + unsigned long flags; + + ql_dbg(ql_dbg_dpc, vha, 0x401d, + "Quiescing I/O - ha=%p.\n", ha); + + atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + qla2x00_mark_all_devices_lost(vha); + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + qla2x00_mark_all_devices_lost(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else { + if (!atomic_read(&vha->loop_down_timer)) + atomic_set(&vha->loop_down_timer, + LOOP_DOWN_TIME); + } + /* Wait for pending cmds to complete */ + WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) + != QLA_SUCCESS); +} + +void +qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *vp, *tvp; + unsigned long flags; + fc_port_t *fcport; + u16 i; + + /* For ISP82XX, driver waits for completion of the commands. + * online flag should be set. + */ + if (!(IS_P3P_TYPE(ha))) + vha->flags.online = 0; + ha->flags.chip_reset_done = 0; + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + vha->qla_stats.total_isp_aborts++; + + ql_log(ql_log_info, vha, 0x00af, + "Performing ISP error recovery - ha=%p.\n", ha); + + ha->flags.purge_mbox = 1; + /* For ISP82XX, reset_chip is just disabling interrupts. + * Driver waits for the completion of the commands. + * the interrupts need to be enabled. + */ + if (!(IS_P3P_TYPE(ha))) + ha->isp_ops->reset_chip(vha); + + ha->link_data_rate = PORT_SPEED_UNKNOWN; + SAVE_TOPO(ha); + ha->flags.rida_fmt2 = 0; + ha->flags.n2n_ae = 0; + ha->flags.lip_ae = 0; + ha->current_topology = 0; + QLA_FW_STOPPED(ha); + ha->flags.fw_init_done = 0; + ha->chip_reset++; + ha->base_qpair->chip_reset = ha->chip_reset; + ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; + ha->base_qpair->prev_completion_cnt = 0; + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { + ha->queue_pair_map[i]->chip_reset = + ha->base_qpair->chip_reset; + ha->queue_pair_map[i]->cmd_cnt = + ha->queue_pair_map[i]->cmd_completion_cnt = 0; + ha->base_qpair->prev_completion_cnt = 0; + } + } + + /* purge MBox commands */ + spin_lock_irqsave(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) { + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + i = 0; + while (atomic_read(&ha->num_pend_mbx_stage2) || + atomic_read(&ha->num_pend_mbx_stage1)) { + msleep(20); + i++; + if (i > 50) + break; + } + ha->flags.purge_mbox = 0; + + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + qla2x00_mark_all_devices_lost(vha); + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + qla2x00_mark_all_devices_lost(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else { + if (!atomic_read(&vha->loop_down_timer)) + atomic_set(&vha->loop_down_timer, + LOOP_DOWN_TIME); + } + + /* Clear all async request states across all VPs. */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + fcport->scan_state = 0; + } + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + list_for_each_entry(fcport, &vp->vp_fcports, list) + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + /* Make sure for ISP 82XX IO DMA is complete */ + if (IS_P3P_TYPE(ha)) { + qla82xx_chip_reset_cleanup(vha); + ql_log(ql_log_info, vha, 0x00b4, + "Done chip reset cleanup.\n"); + + /* Done waiting for pending commands. Reset online flag */ + vha->flags.online = 0; + } + + /* Requeue all commands in outstanding command list. */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); + /* memory barrier */ + wmb(); +} + +/* +* qla2x00_abort_isp +* Resets ISP and aborts all outstanding commands. +* +* Input: +* ha = adapter block pointer. +* +* Returns: +* 0 = success +*/ +int +qla2x00_abort_isp(scsi_qla_host_t *vha) +{ + int rval; + uint8_t status = 0; + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *vp, *tvp; + struct req_que *req = ha->req_q_map[0]; + unsigned long flags; + + if (vha->flags.online) { + qla2x00_abort_isp_cleanup(vha); + + vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS; + vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; + + if (vha->hw->flags.port_isolated) + return status; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "ISP Abort - ISP reg disconnect, exiting.\n"); + return status; + } + + if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { + ha->flags.chip_reset_done = 1; + vha->flags.online = 1; + status = 0; + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + return status; + } + + if (IS_QLA8031(ha)) { + ql_dbg(ql_dbg_p3p, vha, 0xb05c, + "Clearing fcoe driver presence.\n"); + if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_p3p, vha, 0xb073, + "Error while clearing DRV-Presence.\n"); + } + + if (unlikely(pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure)) { + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + status = 0; + return status; + } + + switch (vha->qlini_mode) { + case QLA2XXX_INI_MODE_DISABLED: + if (!qla_tgt_mode_enabled(vha)) + return 0; + break; + case QLA2XXX_INI_MODE_DUAL: + if (!qla_dual_mode_enabled(vha) && + !qla_ini_mode_enabled(vha)) + return 0; + break; + case QLA2XXX_INI_MODE_ENABLED: + default: + break; + } + + ha->isp_ops->get_flash_version(vha, req->ring); + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n"); + return status; + } + ha->isp_ops->nvram_config(vha); + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); + return status; + } + if (!qla2x00_restart_isp(vha)) { + clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + + if (!atomic_read(&vha->loop_down_timer)) { + /* + * Issue marker command only when we are going + * to start the I/O . + */ + vha->marker_needed = 1; + } + + vha->flags.online = 1; + + ha->isp_ops->enable_intrs(ha); + + ha->isp_abort_cnt = 0; + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + + if (IS_QLA81XX(ha) || IS_QLA8031(ha)) + qla2x00_get_fw_version(vha); + if (ha->fce) { + ha->flags.fce_enabled = 1; + memset(ha->fce, 0, + fce_calc_size(ha->fce_bufs)); + rval = qla2x00_enable_fce_trace(vha, + ha->fce_dma, ha->fce_bufs, ha->fce_mb, + &ha->fce_bufs); + if (rval) { + ql_log(ql_log_warn, vha, 0x8033, + "Unable to reinitialize FCE " + "(%d).\n", rval); + ha->flags.fce_enabled = 0; + } + } + + if (ha->eft) { + memset(ha->eft, 0, EFT_SIZE); + rval = qla2x00_enable_eft_trace(vha, + ha->eft_dma, EFT_NUM_BUFFERS); + if (rval) { + ql_log(ql_log_warn, vha, 0x8034, + "Unable to reinitialize EFT " + "(%d).\n", rval); + } + } + } else { /* failed the ISP abort */ + vha->flags.online = 1; + if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + if (ha->isp_abort_cnt == 0) { + ql_log(ql_log_fatal, vha, 0x8035, + "ISP error recover failed - " + "board disabled.\n"); + /* + * The next call disables the board + * completely. + */ + qla2x00_abort_isp_cleanup(vha); + vha->flags.online = 0; + clear_bit(ISP_ABORT_RETRY, + &vha->dpc_flags); + status = 0; + } else { /* schedule another ISP abort */ + ha->isp_abort_cnt--; + ql_dbg(ql_dbg_taskm, vha, 0x8020, + "ISP abort - retry remaining %d.\n", + ha->isp_abort_cnt); + status = 1; + } + } else { + ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; + ql_dbg(ql_dbg_taskm, vha, 0x8021, + "ISP error recovery - retrying (%d) " + "more times.\n", ha->isp_abort_cnt); + set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + status = 1; + } + } + + } + + if (vha->hw->flags.port_isolated) { + qla2x00_abort_isp_cleanup(vha); + return status; + } + + if (!status) { + ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); + qla2x00_configure_hba(vha); + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + qla2x00_vp_abort_isp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + if (IS_QLA8031(ha)) { + ql_dbg(ql_dbg_p3p, vha, 0xb05d, + "Setting back fcoe driver presence.\n"); + if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_p3p, vha, 0xb074, + "Error while setting DRV-Presence.\n"); + } + } else { + ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", + __func__); + } + + return(status); +} + +/* +* qla2x00_restart_isp +* restarts the ISP after a reset +* +* Input: +* ha = adapter block pointer. +* +* Returns: +* 0 = success +*/ +static int +qla2x00_restart_isp(scsi_qla_host_t *vha) +{ + int status; + struct qla_hw_data *ha = vha->hw; + + /* If firmware needs to be loaded */ + if (qla2x00_isp_firmware(vha)) { + vha->flags.online = 0; + status = ha->isp_ops->chip_diag(vha); + if (status) + return status; + status = qla2x00_setup_chip(vha); + if (status) + return status; + } + + status = qla2x00_init_rings(vha); + if (status) + return status; + + clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + ha->flags.chip_reset_done = 1; + + /* Initialize the queues in use */ + qla25xx_init_queues(ha); + + status = qla2x00_fw_ready(vha); + if (status) { + /* if no cable then assume it's good */ + return vha->device_flags & DFLG_NO_CABLE ? 0 : status; + } + + /* Issue a marker after FW becomes ready. */ + qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + + return 0; +} + +static int +qla25xx_init_queues(struct qla_hw_data *ha) +{ + struct rsp_que *rsp = NULL; + struct req_que *req = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + int ret = -1; + int i; + + for (i = 1; i < ha->max_rsp_queues; i++) { + rsp = ha->rsp_q_map[i]; + if (rsp && test_bit(i, ha->rsp_qid_map)) { + rsp->options &= ~BIT_0; + ret = qla25xx_init_rsp_que(base_vha, rsp); + if (ret != QLA_SUCCESS) + ql_dbg(ql_dbg_init, base_vha, 0x00ff, + "%s Rsp que: %d init failed.\n", + __func__, rsp->id); + else + ql_dbg(ql_dbg_init, base_vha, 0x0100, + "%s Rsp que: %d inited.\n", + __func__, rsp->id); + } + } + for (i = 1; i < ha->max_req_queues; i++) { + req = ha->req_q_map[i]; + if (req && test_bit(i, ha->req_qid_map)) { + /* Clear outstanding commands array. */ + req->options &= ~BIT_0; + ret = qla25xx_init_req_que(base_vha, req); + if (ret != QLA_SUCCESS) + ql_dbg(ql_dbg_init, base_vha, 0x0101, + "%s Req que: %d init failed.\n", + __func__, req->id); + else + ql_dbg(ql_dbg_init, base_vha, 0x0102, + "%s Req que: %d inited.\n", + __func__, req->id); + } + } + return ret; +} + +/* +* qla2x00_reset_adapter +* Reset adapter. +* +* Input: +* ha = adapter block pointer. +*/ +int +qla2x00_reset_adapter(scsi_qla_host_t *vha) +{ + unsigned long flags = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + vha->flags.online = 0; + ha->isp_ops->disable_intrs(ha); + + spin_lock_irqsave(&ha->hardware_lock, flags); + wrt_reg_word(®->hccr, HCCR_RESET_RISC); + rd_reg_word(®->hccr); /* PCI Posting. */ + wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + rd_reg_word(®->hccr); /* PCI Posting. */ + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +} + +int +qla24xx_reset_adapter(scsi_qla_host_t *vha) +{ + unsigned long flags = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + if (IS_P3P_TYPE(ha)) + return QLA_SUCCESS; + + vha->flags.online = 0; + ha->isp_ops->disable_intrs(ha); + + spin_lock_irqsave(&ha->hardware_lock, flags); + wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); + rd_reg_dword(®->hccr); + wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); + rd_reg_dword(®->hccr); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (IS_NOPOLLING_TYPE(ha)) + ha->isp_ops->enable_intrs(ha); + + return QLA_SUCCESS; +} + +/* On sparc systems, obtain port and node WWN from firmware + * properties. + */ +static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, + struct nvram_24xx *nv) +{ +#ifdef CONFIG_SPARC + struct qla_hw_data *ha = vha->hw; + struct pci_dev *pdev = ha->pdev; + struct device_node *dp = pci_device_to_OF_node(pdev); + const u8 *val; + int len; + + val = of_get_property(dp, "port-wwn", &len); + if (val && len >= WWN_SIZE) + memcpy(nv->port_name, val, WWN_SIZE); + + val = of_get_property(dp, "node-wwn", &len); + if (val && len >= WWN_SIZE) + memcpy(nv->node_name, val, WWN_SIZE); +#endif +} + +int +qla24xx_nvram_config(scsi_qla_host_t *vha) +{ + int rval; + struct init_cb_24xx *icb; + struct nvram_24xx *nv; + __le32 *dptr; + uint8_t *dptr1, *dptr2; + uint32_t chksum; + uint16_t cnt; + struct qla_hw_data *ha = vha->hw; + + rval = QLA_SUCCESS; + icb = (struct init_cb_24xx *)ha->init_cb; + nv = ha->nvram; + + /* Determine NVRAM starting address. */ + if (ha->port_no == 0) { + ha->nvram_base = FA_NVRAM_FUNC0_ADDR; + ha->vpd_base = FA_NVRAM_VPD0_ADDR; + } else { + ha->nvram_base = FA_NVRAM_FUNC1_ADDR; + ha->vpd_base = FA_NVRAM_VPD1_ADDR; + } + + ha->nvram_size = sizeof(*nv); + ha->vpd_size = FA_NVRAM_VPD_SIZE; + + /* Get VPD data into cache */ + ha->vpd = ha->nvram + VPD_OFFSET; + ha->isp_ops->read_nvram(vha, ha->vpd, + ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); + + /* Get NVRAM data into cache and calculate checksum. */ + dptr = (__force __le32 *)nv; + ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); + for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) + chksum += le32_to_cpu(*dptr); + + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, + "Contents of NVRAM\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, + nv, ha->nvram_size); + + /* Bad NVRAM data, set defaults parameters. */ + if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || + le16_to_cpu(nv->nvram_version) < ICB_VERSION) { + /* Reset NVRAM data. */ + ql_log(ql_log_warn, vha, 0x006b, + "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", + chksum, nv->id, nv->nvram_version); + ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv)); + ql_log(ql_log_warn, vha, 0x006c, + "Falling back to functioning (yet invalid -- WWPN) " + "defaults.\n"); + + /* + * Set default initialization control block. + */ + memset(nv, 0, ha->nvram_size); + nv->nvram_version = cpu_to_le16(ICB_VERSION); + nv->version = cpu_to_le16(ICB_VERSION); + nv->frame_payload_size = cpu_to_le16(2048); + nv->execution_throttle = cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0); + nv->hard_address = cpu_to_le16(124); + nv->port_name[0] = 0x21; + nv->port_name[1] = 0x00 + ha->port_no + 1; + nv->port_name[2] = 0x00; + nv->port_name[3] = 0xe0; + nv->port_name[4] = 0x8b; + nv->port_name[5] = 0x1c; + nv->port_name[6] = 0x55; + nv->port_name[7] = 0x86; + nv->node_name[0] = 0x20; + nv->node_name[1] = 0x00; + nv->node_name[2] = 0x00; + nv->node_name[3] = 0xe0; + nv->node_name[4] = 0x8b; + nv->node_name[5] = 0x1c; + nv->node_name[6] = 0x55; + nv->node_name[7] = 0x86; + qla24xx_nvram_wwn_from_ofw(vha, nv); + nv->login_retry_count = cpu_to_le16(8); + nv->interrupt_delay_timer = cpu_to_le16(0); + nv->login_timeout = cpu_to_le16(0); + nv->firmware_options_1 = + cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); + nv->firmware_options_2 = cpu_to_le32(2 << 4); + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + nv->firmware_options_3 = cpu_to_le32(2 << 13); + nv->host_p = cpu_to_le32(BIT_11|BIT_10); + nv->efi_parameters = cpu_to_le32(0); + nv->reset_delay = 5; + nv->max_luns_per_target = cpu_to_le16(128); + nv->port_down_retry_count = cpu_to_le16(30); + nv->link_down_timeout = cpu_to_le16(30); + + rval = 1; + } + + if (qla_tgt_mode_enabled(vha)) { + /* Don't enable full login after initial LIP */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_13); + /* Don't enable LIP full login for initiator */ + nv->host_p &= cpu_to_le32(~BIT_10); + } + + qlt_24xx_config_nvram_stage1(vha, nv); + + /* Reset Initialization control block */ + memset(icb, 0, ha->init_cb_size); + + /* Copy 1st segment. */ + dptr1 = (uint8_t *)icb; + dptr2 = (uint8_t *)&nv->version; + cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; + while (cnt--) + *dptr1++ = *dptr2++; + + icb->login_retry_count = nv->login_retry_count; + icb->link_down_on_nos = nv->link_down_on_nos; + + /* Copy 2nd segment. */ + dptr1 = (uint8_t *)&icb->interrupt_delay_timer; + dptr2 = (uint8_t *)&nv->interrupt_delay_timer; + cnt = (uint8_t *)&icb->reserved_3 - + (uint8_t *)&icb->interrupt_delay_timer; + while (cnt--) + *dptr1++ = *dptr2++; + ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); + /* + * Setup driver NVRAM options. + */ + qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), + "QLA2462"); + + qlt_24xx_config_nvram_stage2(vha, icb); + + if (nv->host_p & cpu_to_le32(BIT_15)) { + /* Use alternate WWN? */ + memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); + memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); + } + + /* Prepare nodename */ + if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { + /* + * Firmware will apply the following mask if the nodename was + * not provided. + */ + memcpy(icb->node_name, icb->port_name, WWN_SIZE); + icb->node_name[0] &= 0xF0; + } + + /* Set host adapter parameters. */ + ha->flags.disable_risc_code_load = 0; + ha->flags.enable_lip_reset = 0; + ha->flags.enable_lip_full_login = + le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; + ha->flags.enable_target_reset = + le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; + ha->flags.enable_led_scheme = 0; + ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; + + ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & + (BIT_6 | BIT_5 | BIT_4)) >> 4; + + memcpy(ha->fw_seriallink_options24, nv->seriallink_options, + sizeof(ha->fw_seriallink_options24)); + + /* save HBA serial number */ + ha->serial0 = icb->port_name[5]; + ha->serial1 = icb->port_name[6]; + ha->serial2 = icb->port_name[7]; + memcpy(vha->node_name, icb->node_name, WWN_SIZE); + memcpy(vha->port_name, icb->port_name, WWN_SIZE); + + icb->execution_throttle = cpu_to_le16(0xFFFF); + + ha->retry_count = le16_to_cpu(nv->login_retry_count); + + /* Set minimum login_timeout to 4 seconds. */ + if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) + nv->login_timeout = cpu_to_le16(ql2xlogintimeout); + if (le16_to_cpu(nv->login_timeout) < 4) + nv->login_timeout = cpu_to_le16(4); + ha->login_timeout = le16_to_cpu(nv->login_timeout); + + /* Set minimum RATOV to 100 tenths of a second. */ + ha->r_a_tov = 100; + + ha->loop_reset_delay = nv->reset_delay; + + /* Link Down Timeout = 0: + * + * When Port Down timer expires we will start returning + * I/O's to OS with "DID_NO_CONNECT". + * + * Link Down Timeout != 0: + * + * The driver waits for the link to come up after link down + * before returning I/Os to OS with "DID_NO_CONNECT". + */ + if (le16_to_cpu(nv->link_down_timeout) == 0) { + ha->loop_down_abort_time = + (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); + } else { + ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); + ha->loop_down_abort_time = + (LOOP_DOWN_TIME - ha->link_down_timeout); + } + + /* Need enough time to try and get the port back. */ + ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); + if (qlport_down_retry) + ha->port_down_retry_count = qlport_down_retry; + + /* Set login_retry_count */ + ha->login_retry_count = le16_to_cpu(nv->login_retry_count); + if (ha->port_down_retry_count == + le16_to_cpu(nv->port_down_retry_count) && + ha->port_down_retry_count > 3) + ha->login_retry_count = ha->port_down_retry_count; + else if (ha->port_down_retry_count > (int)ha->login_retry_count) + ha->login_retry_count = ha->port_down_retry_count; + if (ql2xloginretrycount) + ha->login_retry_count = ql2xloginretrycount; + + /* N2N: driver will initiate Login instead of FW */ + icb->firmware_options_3 |= cpu_to_le32(BIT_8); + + /* Enable ZIO. */ + if (!vha->flags.init_done) { + ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & + (BIT_3 | BIT_2 | BIT_1 | BIT_0); + ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? + le16_to_cpu(icb->interrupt_delay_timer) : 2; + } + icb->firmware_options_2 &= cpu_to_le32( + ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); + if (ha->zio_mode != QLA_ZIO_DISABLED) { + ha->zio_mode = QLA_ZIO_MODE_6; + + ql_log(ql_log_info, vha, 0x006f, + "ZIO mode %d enabled; timer delay (%d us).\n", + ha->zio_mode, ha->zio_timer * 100); + + icb->firmware_options_2 |= cpu_to_le32( + (uint32_t)ha->zio_mode); + icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); + } + + if (rval) { + ql_log(ql_log_warn, vha, 0x0070, + "NVRAM configuration failed.\n"); + } + return (rval); +} + +static void +qla27xx_print_image(struct scsi_qla_host *vha, char *name, + struct qla27xx_image_status *image_status) +{ + ql_dbg(ql_dbg_init, vha, 0x018b, + "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n", + name, "status", + image_status->image_status_mask, + le16_to_cpu(image_status->generation), + image_status->ver_major, + image_status->ver_minor, + image_status->bitmap, + le32_to_cpu(image_status->checksum), + le32_to_cpu(image_status->signature)); +} + +static bool +qla28xx_check_aux_image_status_signature( + struct qla27xx_image_status *image_status) +{ + ulong signature = le32_to_cpu(image_status->signature); + + return signature != QLA28XX_AUX_IMG_STATUS_SIGN; +} + +static bool +qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) +{ + ulong signature = le32_to_cpu(image_status->signature); + + return + signature != QLA27XX_IMG_STATUS_SIGN && + signature != QLA28XX_IMG_STATUS_SIGN; +} + +static ulong +qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) +{ + __le32 *p = (__force __le32 *)image_status; + uint n = sizeof(*image_status) / sizeof(*p); + uint32_t sum = 0; + + for ( ; n--; p++) + sum += le32_to_cpup(p); + + return sum; +} + +static inline uint +qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask) +{ + return aux->bitmap & bitmask ? + QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE; +} + +static void +qla28xx_component_status( + struct active_regions *active_regions, struct qla27xx_image_status *aux) +{ + active_regions->aux.board_config = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG); + + active_regions->aux.vpd_nvram = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM); + + active_regions->aux.npiv_config_0_1 = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1); + + active_regions->aux.npiv_config_2_3 = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); + + active_regions->aux.nvme_params = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS); +} + +static int +qla27xx_compare_image_generation( + struct qla27xx_image_status *pri_image_status, + struct qla27xx_image_status *sec_image_status) +{ + /* calculate generation delta as uint16 (this accounts for wrap) */ + int16_t delta = + le16_to_cpu(pri_image_status->generation) - + le16_to_cpu(sec_image_status->generation); + + ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta); + + return delta; +} + +void +qla28xx_get_aux_images( + struct scsi_qla_host *vha, struct active_regions *active_regions) +{ + struct qla_hw_data *ha = vha->hw; + struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; + bool valid_pri_image = false, valid_sec_image = false; + bool active_pri_image = false, active_sec_image = false; + + if (!ha->flt_region_aux_img_status_pri) { + ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); + goto check_sec_image; + } + + qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, + ha->flt_region_aux_img_status_pri, + sizeof(pri_aux_image_status) >> 2); + qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); + + if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Primary aux image signature (%#x) not valid\n", + le32_to_cpu(pri_aux_image_status.signature)); + goto check_sec_image; + } + + if (qla27xx_image_status_checksum(&pri_aux_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Primary aux image checksum failed\n"); + goto check_sec_image; + } + + valid_pri_image = true; + + if (pri_aux_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Primary aux image is active\n"); + active_pri_image = true; + } + +check_sec_image: + if (!ha->flt_region_aux_img_status_sec) { + ql_dbg(ql_dbg_init, vha, 0x018a, + "Secondary aux image not addressed\n"); + goto check_valid_image; + } + + qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, + ha->flt_region_aux_img_status_sec, + sizeof(sec_aux_image_status) >> 2); + qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); + + if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Secondary aux image signature (%#x) not valid\n", + le32_to_cpu(sec_aux_image_status.signature)); + goto check_valid_image; + } + + if (qla27xx_image_status_checksum(&sec_aux_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Secondary aux image checksum failed\n"); + goto check_valid_image; + } + + valid_sec_image = true; + + if (sec_aux_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Secondary aux image is active\n"); + active_sec_image = true; + } + +check_valid_image: + if (valid_pri_image && active_pri_image && + valid_sec_image && active_sec_image) { + if (qla27xx_compare_image_generation(&pri_aux_image_status, + &sec_aux_image_status) >= 0) { + qla28xx_component_status(active_regions, + &pri_aux_image_status); + } else { + qla28xx_component_status(active_regions, + &sec_aux_image_status); + } + } else if (valid_pri_image && active_pri_image) { + qla28xx_component_status(active_regions, &pri_aux_image_status); + } else if (valid_sec_image && active_sec_image) { + qla28xx_component_status(active_regions, &sec_aux_image_status); + } + + ql_dbg(ql_dbg_init, vha, 0x018f, + "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n", + active_regions->aux.board_config, + active_regions->aux.vpd_nvram, + active_regions->aux.npiv_config_0_1, + active_regions->aux.npiv_config_2_3, + active_regions->aux.nvme_params); +} + +void +qla27xx_get_active_image(struct scsi_qla_host *vha, + struct active_regions *active_regions) +{ + struct qla_hw_data *ha = vha->hw; + struct qla27xx_image_status pri_image_status, sec_image_status; + bool valid_pri_image = false, valid_sec_image = false; + bool active_pri_image = false, active_sec_image = false; + + if (!ha->flt_region_img_status_pri) { + ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); + goto check_sec_image; + } + + if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, + ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != + QLA_SUCCESS) { + WARN_ON_ONCE(true); + goto check_sec_image; + } + qla27xx_print_image(vha, "Primary image", &pri_image_status); + + if (qla27xx_check_image_status_signature(&pri_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Primary image signature (%#x) not valid\n", + le32_to_cpu(pri_image_status.signature)); + goto check_sec_image; + } + + if (qla27xx_image_status_checksum(&pri_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Primary image checksum failed\n"); + goto check_sec_image; + } + + valid_pri_image = true; + + if (pri_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Primary image is active\n"); + active_pri_image = true; + } + +check_sec_image: + if (!ha->flt_region_img_status_sec) { + ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n"); + goto check_valid_image; + } + + qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), + ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); + qla27xx_print_image(vha, "Secondary image", &sec_image_status); + + if (qla27xx_check_image_status_signature(&sec_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Secondary image signature (%#x) not valid\n", + le32_to_cpu(sec_image_status.signature)); + goto check_valid_image; + } + + if (qla27xx_image_status_checksum(&sec_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Secondary image checksum failed\n"); + goto check_valid_image; + } + + valid_sec_image = true; + + if (sec_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Secondary image is active\n"); + active_sec_image = true; + } + +check_valid_image: + if (valid_pri_image && active_pri_image) + active_regions->global = QLA27XX_PRIMARY_IMAGE; + + if (valid_sec_image && active_sec_image) { + if (!active_regions->global || + qla27xx_compare_image_generation( + &pri_image_status, &sec_image_status) < 0) { + active_regions->global = QLA27XX_SECONDARY_IMAGE; + } + } + + ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n", + active_regions->global == QLA27XX_DEFAULT_IMAGE ? + "default (boot/fw)" : + active_regions->global == QLA27XX_PRIMARY_IMAGE ? + "primary" : + active_regions->global == QLA27XX_SECONDARY_IMAGE ? + "secondary" : "invalid", + active_regions->global); +} + +bool qla24xx_risc_firmware_invalid(uint32_t *dword) +{ + return + !(dword[4] | dword[5] | dword[6] | dword[7]) || + !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]); +} + +static int +qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, + uint32_t faddr) +{ + int rval; + uint templates, segments, fragment; + ulong i; + uint j; + ulong dlen; + uint32_t *dcode; + uint32_t risc_addr, risc_size, risc_attr = 0; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct fwdt *fwdt = ha->fwdt; + + ql_dbg(ql_dbg_init, vha, 0x008b, + "FW: Loading firmware from flash (%x).\n", faddr); + + dcode = (uint32_t *)req->ring; + qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_fatal, vha, 0x008c, + "Unable to verify the integrity of flash firmware " + "image.\n"); + ql_log(ql_log_fatal, vha, 0x008d, + "Firmware data: %08x %08x %08x %08x.\n", + dcode[0], dcode[1], dcode[2], dcode[3]); + + return QLA_FUNCTION_FAILED; + } + + dcode = (uint32_t *)req->ring; + *srisc_addr = 0; + segments = FA_RISC_CODE_SEGMENTS; + for (j = 0; j < segments; j++) { + ql_dbg(ql_dbg_init, vha, 0x008d, + "-> Loading segment %u...\n", j); + qla24xx_read_flash_data(vha, dcode, faddr, 10); + risc_addr = be32_to_cpu((__force __be32)dcode[2]); + risc_size = be32_to_cpu((__force __be32)dcode[3]); + if (!*srisc_addr) { + *srisc_addr = risc_addr; + risc_attr = be32_to_cpu((__force __be32)dcode[9]); + } + + dlen = ha->fw_transfer_size >> 2; + for (fragment = 0; risc_size; fragment++) { + if (dlen > risc_size) + dlen = risc_size; + + ql_dbg(ql_dbg_init, vha, 0x008e, + "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", + fragment, risc_addr, faddr, dlen); + qla24xx_read_flash_data(vha, dcode, faddr, dlen); + for (i = 0; i < dlen; i++) + dcode[i] = swab32(dcode[i]); + + rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); + if (rval) { + ql_log(ql_log_fatal, vha, 0x008f, + "-> Failed load firmware fragment %u.\n", + fragment); + return QLA_FUNCTION_FAILED; + } + + faddr += dlen; + risc_addr += dlen; + risc_size -= dlen; + } + } + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_SUCCESS; + + templates = (risc_attr & BIT_9) ? 2 : 1; + ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates); + for (j = 0; j < templates; j++, fwdt++) { + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + dcode = (uint32_t *)req->ring; + qla24xx_read_flash_data(vha, dcode, faddr, 7); + risc_size = be32_to_cpu((__force __be32)dcode[2]); + ql_dbg(ql_dbg_init, vha, 0x0161, + "-> fwdt%u template array at %#x (%#x dwords)\n", + j, faddr, risc_size); + if (!risc_size || !~risc_size) { + ql_dbg(ql_dbg_init, vha, 0x0162, + "-> fwdt%u failed to read array\n", j); + goto failed; + } + + /* skip header and ignore checksum */ + faddr += 7; + risc_size -= 8; + + ql_dbg(ql_dbg_init, vha, 0x0163, + "-> fwdt%u template allocate template %#x words...\n", + j, risc_size); + fwdt->template = vmalloc_array(risc_size, sizeof(*dcode)); + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0x0164, + "-> fwdt%u failed allocate template.\n", j); + goto failed; + } + + dcode = fwdt->template; + qla24xx_read_flash_data(vha, dcode, faddr, risc_size); + + if (!qla27xx_fwdt_template_valid(dcode)) { + ql_log(ql_log_warn, vha, 0x0165, + "-> fwdt%u failed template validate\n", j); + goto failed; + } + + dlen = qla27xx_fwdt_template_size(dcode); + ql_dbg(ql_dbg_init, vha, 0x0166, + "-> fwdt%u template size %#lx bytes (%#lx words)\n", + j, dlen, dlen / sizeof(*dcode)); + if (dlen > risc_size * sizeof(*dcode)) { + ql_log(ql_log_warn, vha, 0x0167, + "-> fwdt%u template exceeds array (%-lu bytes)\n", + j, dlen - risc_size * sizeof(*dcode)); + goto failed; + } + + fwdt->length = dlen; + ql_dbg(ql_dbg_init, vha, 0x0168, + "-> fwdt%u loaded template ok\n", j); + + faddr += risc_size + 1; + } + + return QLA_SUCCESS; + +failed: + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + return QLA_SUCCESS; +} + +#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" + +int +qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) +{ + int rval; + int i, fragment; + uint16_t *wcode; + __be16 *fwcode; + uint32_t risc_addr, risc_size, fwclen, wlen, *seg; + struct fw_blob *blob; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + + /* Load firmware blob. */ + blob = qla2x00_request_firmware(vha); + if (!blob) { + ql_log(ql_log_info, vha, 0x0083, + "Firmware image unavailable.\n"); + ql_log(ql_log_info, vha, 0x0084, + "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); + return QLA_FUNCTION_FAILED; + } + + rval = QLA_SUCCESS; + + wcode = (uint16_t *)req->ring; + *srisc_addr = 0; + fwcode = (__force __be16 *)blob->fw->data; + fwclen = 0; + + /* Validate firmware image by checking version. */ + if (blob->fw->size < 8 * sizeof(uint16_t)) { + ql_log(ql_log_fatal, vha, 0x0085, + "Unable to verify integrity of firmware image (%zd).\n", + blob->fw->size); + goto fail_fw_integrity; + } + for (i = 0; i < 4; i++) + wcode[i] = be16_to_cpu(fwcode[i + 4]); + if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && + wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && + wcode[2] == 0 && wcode[3] == 0)) { + ql_log(ql_log_fatal, vha, 0x0086, + "Unable to verify integrity of firmware image.\n"); + ql_log(ql_log_fatal, vha, 0x0087, + "Firmware data: %04x %04x %04x %04x.\n", + wcode[0], wcode[1], wcode[2], wcode[3]); + goto fail_fw_integrity; + } + + seg = blob->segs; + while (*seg && rval == QLA_SUCCESS) { + risc_addr = *seg; + *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; + risc_size = be16_to_cpu(fwcode[3]); + + /* Validate firmware image size. */ + fwclen += risc_size * sizeof(uint16_t); + if (blob->fw->size < fwclen) { + ql_log(ql_log_fatal, vha, 0x0088, + "Unable to verify integrity of firmware image " + "(%zd).\n", blob->fw->size); + goto fail_fw_integrity; + } + + fragment = 0; + while (risc_size > 0 && rval == QLA_SUCCESS) { + wlen = (uint16_t)(ha->fw_transfer_size >> 1); + if (wlen > risc_size) + wlen = risc_size; + ql_dbg(ql_dbg_init, vha, 0x0089, + "Loading risc segment@ risc addr %x number of " + "words 0x%x.\n", risc_addr, wlen); + + for (i = 0; i < wlen; i++) + wcode[i] = swab16((__force u32)fwcode[i]); + + rval = qla2x00_load_ram(vha, req->dma, risc_addr, + wlen); + if (rval) { + ql_log(ql_log_fatal, vha, 0x008a, + "Failed to load segment %d of firmware.\n", + fragment); + break; + } + + fwcode += wlen; + risc_addr += wlen; + risc_size -= wlen; + fragment++; + } + + /* Next segment. */ + seg++; + } + return rval; + +fail_fw_integrity: + return QLA_FUNCTION_FAILED; +} + +static int +qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) +{ + int rval; + uint templates, segments, fragment; + uint32_t *dcode; + ulong dlen; + uint32_t risc_addr, risc_size, risc_attr = 0; + ulong i; + uint j; + struct fw_blob *blob; + __be32 *fwcode; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct fwdt *fwdt = ha->fwdt; + + ql_dbg(ql_dbg_init, vha, 0x0090, + "-> FW: Loading via request-firmware.\n"); + + blob = qla2x00_request_firmware(vha); + if (!blob) { + ql_log(ql_log_warn, vha, 0x0092, + "-> Firmware file not found.\n"); + + return QLA_FUNCTION_FAILED; + } + + fwcode = (__force __be32 *)blob->fw->data; + dcode = (__force uint32_t *)fwcode; + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_fatal, vha, 0x0093, + "Unable to verify integrity of firmware image (%zd).\n", + blob->fw->size); + ql_log(ql_log_fatal, vha, 0x0095, + "Firmware data: %08x %08x %08x %08x.\n", + dcode[0], dcode[1], dcode[2], dcode[3]); + return QLA_FUNCTION_FAILED; + } + + dcode = (uint32_t *)req->ring; + *srisc_addr = 0; + segments = FA_RISC_CODE_SEGMENTS; + for (j = 0; j < segments; j++) { + ql_dbg(ql_dbg_init, vha, 0x0096, + "-> Loading segment %u...\n", j); + risc_addr = be32_to_cpu(fwcode[2]); + risc_size = be32_to_cpu(fwcode[3]); + + if (!*srisc_addr) { + *srisc_addr = risc_addr; + risc_attr = be32_to_cpu(fwcode[9]); + } + + dlen = ha->fw_transfer_size >> 2; + for (fragment = 0; risc_size; fragment++) { + if (dlen > risc_size) + dlen = risc_size; + + ql_dbg(ql_dbg_init, vha, 0x0097, + "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n", + fragment, risc_addr, + (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data), + dlen); + + for (i = 0; i < dlen; i++) + dcode[i] = swab32((__force u32)fwcode[i]); + + rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); + if (rval) { + ql_log(ql_log_fatal, vha, 0x0098, + "-> Failed load firmware fragment %u.\n", + fragment); + return QLA_FUNCTION_FAILED; + } + + fwcode += dlen; + risc_addr += dlen; + risc_size -= dlen; + } + } + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_SUCCESS; + + templates = (risc_attr & BIT_9) ? 2 : 1; + ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates); + for (j = 0; j < templates; j++, fwdt++) { + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + risc_size = be32_to_cpu(fwcode[2]); + ql_dbg(ql_dbg_init, vha, 0x0171, + "-> fwdt%u template array at %#x (%#x dwords)\n", + j, (uint32_t)((void *)fwcode - (void *)blob->fw->data), + risc_size); + if (!risc_size || !~risc_size) { + ql_dbg(ql_dbg_init, vha, 0x0172, + "-> fwdt%u failed to read array\n", j); + goto failed; + } + + /* skip header and ignore checksum */ + fwcode += 7; + risc_size -= 8; + + ql_dbg(ql_dbg_init, vha, 0x0173, + "-> fwdt%u template allocate template %#x words...\n", + j, risc_size); + fwdt->template = vmalloc_array(risc_size, sizeof(*dcode)); + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0x0174, + "-> fwdt%u failed allocate template.\n", j); + goto failed; + } + + dcode = fwdt->template; + for (i = 0; i < risc_size; i++) + dcode[i] = (__force u32)fwcode[i]; + + if (!qla27xx_fwdt_template_valid(dcode)) { + ql_log(ql_log_warn, vha, 0x0175, + "-> fwdt%u failed template validate\n", j); + goto failed; + } + + dlen = qla27xx_fwdt_template_size(dcode); + ql_dbg(ql_dbg_init, vha, 0x0176, + "-> fwdt%u template size %#lx bytes (%#lx words)\n", + j, dlen, dlen / sizeof(*dcode)); + if (dlen > risc_size * sizeof(*dcode)) { + ql_log(ql_log_warn, vha, 0x0177, + "-> fwdt%u template exceeds array (%-lu bytes)\n", + j, dlen - risc_size * sizeof(*dcode)); + goto failed; + } + + fwdt->length = dlen; + ql_dbg(ql_dbg_init, vha, 0x0178, + "-> fwdt%u loaded template ok\n", j); + + fwcode += risc_size + 1; + } + + return QLA_SUCCESS; + +failed: + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + return QLA_SUCCESS; +} + +int +qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) +{ + int rval; + + if (ql2xfwloadbin == 1) + return qla81xx_load_risc(vha, srisc_addr); + + /* + * FW Load priority: + * 1) Firmware via request-firmware interface (.bin file). + * 2) Firmware residing in flash. + */ + rval = qla24xx_load_risc_blob(vha, srisc_addr); + if (rval == QLA_SUCCESS) + return rval; + + return qla24xx_load_risc_flash(vha, srisc_addr, + vha->hw->flt_region_fw); +} + +int +qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + struct active_regions active_regions = { }; + + if (ql2xfwloadbin == 2) + goto try_blob_fw; + + /* FW Load priority: + * 1) Firmware residing in flash. + * 2) Firmware via request-firmware interface (.bin file). + * 3) Golden-Firmware residing in flash -- (limited operation). + */ + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + goto try_primary_fw; + + qla27xx_get_active_image(vha, &active_regions); + + if (active_regions.global != QLA27XX_SECONDARY_IMAGE) + goto try_primary_fw; + + ql_dbg(ql_dbg_init, vha, 0x008b, + "Loading secondary firmware image.\n"); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); + if (!rval) + return rval; + +try_primary_fw: + ql_dbg(ql_dbg_init, vha, 0x008b, + "Loading primary firmware image.\n"); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); + if (!rval) + return rval; + +try_blob_fw: + rval = qla24xx_load_risc_blob(vha, srisc_addr); + if (!rval || !ha->flt_region_gold_fw) + return rval; + + ql_log(ql_log_info, vha, 0x0099, + "Attempting to fallback to golden firmware.\n"); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); + if (rval) + return rval; + + ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); + ha->flags.running_gold_fw = 1; + return rval; +} + +void +qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) +{ + int ret, retries; + struct qla_hw_data *ha = vha->hw; + + if (ha->flags.pci_channel_io_perm_failure) + return; + if (!IS_FWI2_CAPABLE(ha)) + return; + if (!ha->fw_major_version) + return; + if (!ha->flags.fw_started) + return; + + ret = qla2x00_stop_firmware(vha); + for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && + ret != QLA_INVALID_COMMAND && retries ; retries--) { + ha->isp_ops->reset_chip(vha); + if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) + continue; + if (qla2x00_setup_chip(vha) != QLA_SUCCESS) + continue; + ql_log(ql_log_info, vha, 0x8015, + "Attempting retry of stop-firmware command.\n"); + ret = qla2x00_stop_firmware(vha); + } + + QLA_FW_STOPPED(ha); + ha->flags.fw_init_done = 0; +} + +int +qla24xx_configure_vhba(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + int rval2; + uint16_t mb[MAILBOX_REGISTER_COUNT]; + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + if (!vha->vp_idx) + return -EINVAL; + + rval = qla2x00_fw_ready(base_vha); + + if (rval == QLA_SUCCESS) { + clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); + } + + vha->flags.management_server_logged_in = 0; + + /* Login to SNS first */ + rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, + BIT_1); + if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { + if (rval2 == QLA_MEMORY_ALLOC_FAILED) + ql_dbg(ql_dbg_init, vha, 0x0120, + "Failed SNS login: loop_id=%x, rval2=%d\n", + NPH_SNS, rval2); + else + ql_dbg(ql_dbg_init, vha, 0x0103, + "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " + "mb[2]=%x mb[6]=%x mb[7]=%x.\n", + NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); + return (QLA_FUNCTION_FAILED); + } + + atomic_set(&vha->loop_down_timer, 0); + atomic_set(&vha->loop_state, LOOP_UP); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + rval = qla2x00_loop_resync(base_vha); + + return rval; +} + +/* 84XX Support **************************************************************/ + +static LIST_HEAD(qla_cs84xx_list); +static DEFINE_MUTEX(qla_cs84xx_mutex); + +static struct qla_chip_state_84xx * +qla84xx_get_chip(struct scsi_qla_host *vha) +{ + struct qla_chip_state_84xx *cs84xx; + struct qla_hw_data *ha = vha->hw; + + mutex_lock(&qla_cs84xx_mutex); + + /* Find any shared 84xx chip. */ + list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { + if (cs84xx->bus == ha->pdev->bus) { + kref_get(&cs84xx->kref); + goto done; + } + } + + cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); + if (!cs84xx) + goto done; + + kref_init(&cs84xx->kref); + spin_lock_init(&cs84xx->access_lock); + mutex_init(&cs84xx->fw_update_mutex); + cs84xx->bus = ha->pdev->bus; + + list_add_tail(&cs84xx->list, &qla_cs84xx_list); +done: + mutex_unlock(&qla_cs84xx_mutex); + return cs84xx; +} + +static void +__qla84xx_chip_release(struct kref *kref) +{ + struct qla_chip_state_84xx *cs84xx = + container_of(kref, struct qla_chip_state_84xx, kref); + + mutex_lock(&qla_cs84xx_mutex); + list_del(&cs84xx->list); + mutex_unlock(&qla_cs84xx_mutex); + kfree(cs84xx); +} + +void +qla84xx_put_chip(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (ha->cs84xx) + kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); +} + +static int +qla84xx_init_chip(scsi_qla_host_t *vha) +{ + int rval; + uint16_t status[2]; + struct qla_hw_data *ha = vha->hw; + + mutex_lock(&ha->cs84xx->fw_update_mutex); + + rval = qla84xx_verify_chip(vha, status); + + mutex_unlock(&ha->cs84xx->fw_update_mutex); + + return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED : + QLA_SUCCESS; +} + +/* 81XX Support **************************************************************/ + +int +qla81xx_nvram_config(scsi_qla_host_t *vha) +{ + int rval; + struct init_cb_81xx *icb; + struct nvram_81xx *nv; + __le32 *dptr; + uint8_t *dptr1, *dptr2; + uint32_t chksum; + uint16_t cnt; + struct qla_hw_data *ha = vha->hw; + uint32_t faddr; + struct active_regions active_regions = { }; + + rval = QLA_SUCCESS; + icb = (struct init_cb_81xx *)ha->init_cb; + nv = ha->nvram; + + /* Determine NVRAM starting address. */ + ha->nvram_size = sizeof(*nv); + ha->vpd_size = FA_NVRAM_VPD_SIZE; + if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) + ha->vpd_size = FA_VPD_SIZE_82XX; + + if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) + qla28xx_get_aux_images(vha, &active_regions); + + /* Get VPD data into cache */ + ha->vpd = ha->nvram + VPD_OFFSET; + + faddr = ha->flt_region_vpd; + if (IS_QLA28XX(ha)) { + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_vpd_sec; + ql_dbg(ql_dbg_init, vha, 0x0110, + "Loading %s nvram image.\n", + active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? + "primary" : "secondary"); + } + ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); + + /* Get NVRAM data into cache and calculate checksum. */ + faddr = ha->flt_region_nvram; + if (IS_QLA28XX(ha)) { + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_nvram_sec; + } + ql_dbg(ql_dbg_init, vha, 0x0110, + "Loading %s nvram image.\n", + active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? + "primary" : "secondary"); + ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); + + dptr = (__force __le32 *)nv; + for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) + chksum += le32_to_cpu(*dptr); + + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, + "Contents of NVRAM:\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, + nv, ha->nvram_size); + + /* Bad NVRAM data, set defaults parameters. */ + if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || + le16_to_cpu(nv->nvram_version) < ICB_VERSION) { + /* Reset NVRAM data. */ + ql_log(ql_log_info, vha, 0x0073, + "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", + chksum, nv->id, le16_to_cpu(nv->nvram_version)); + ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv)); + ql_log(ql_log_info, vha, 0x0074, + "Falling back to functioning (yet invalid -- WWPN) " + "defaults.\n"); + + /* + * Set default initialization control block. + */ + memset(nv, 0, ha->nvram_size); + nv->nvram_version = cpu_to_le16(ICB_VERSION); + nv->version = cpu_to_le16(ICB_VERSION); + nv->frame_payload_size = cpu_to_le16(2048); + nv->execution_throttle = cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0); + nv->port_name[0] = 0x21; + nv->port_name[1] = 0x00 + ha->port_no + 1; + nv->port_name[2] = 0x00; + nv->port_name[3] = 0xe0; + nv->port_name[4] = 0x8b; + nv->port_name[5] = 0x1c; + nv->port_name[6] = 0x55; + nv->port_name[7] = 0x86; + nv->node_name[0] = 0x20; + nv->node_name[1] = 0x00; + nv->node_name[2] = 0x00; + nv->node_name[3] = 0xe0; + nv->node_name[4] = 0x8b; + nv->node_name[5] = 0x1c; + nv->node_name[6] = 0x55; + nv->node_name[7] = 0x86; + nv->login_retry_count = cpu_to_le16(8); + nv->interrupt_delay_timer = cpu_to_le16(0); + nv->login_timeout = cpu_to_le16(0); + nv->firmware_options_1 = + cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); + nv->firmware_options_2 = cpu_to_le32(2 << 4); + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + nv->firmware_options_3 = cpu_to_le32(2 << 13); + nv->host_p = cpu_to_le32(BIT_11|BIT_10); + nv->efi_parameters = cpu_to_le32(0); + nv->reset_delay = 5; + nv->max_luns_per_target = cpu_to_le16(128); + nv->port_down_retry_count = cpu_to_le16(30); + nv->link_down_timeout = cpu_to_le16(180); + nv->enode_mac[0] = 0x00; + nv->enode_mac[1] = 0xC0; + nv->enode_mac[2] = 0xDD; + nv->enode_mac[3] = 0x04; + nv->enode_mac[4] = 0x05; + nv->enode_mac[5] = 0x06 + ha->port_no + 1; + + rval = 1; + } + + if (IS_T10_PI_CAPABLE(ha)) + nv->frame_payload_size &= cpu_to_le16(~7); + + qlt_81xx_config_nvram_stage1(vha, nv); + + /* Reset Initialization control block */ + memset(icb, 0, ha->init_cb_size); + + /* Copy 1st segment. */ + dptr1 = (uint8_t *)icb; + dptr2 = (uint8_t *)&nv->version; + cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; + while (cnt--) + *dptr1++ = *dptr2++; + + icb->login_retry_count = nv->login_retry_count; + + /* Copy 2nd segment. */ + dptr1 = (uint8_t *)&icb->interrupt_delay_timer; + dptr2 = (uint8_t *)&nv->interrupt_delay_timer; + cnt = (uint8_t *)&icb->reserved_5 - + (uint8_t *)&icb->interrupt_delay_timer; + while (cnt--) + *dptr1++ = *dptr2++; + + memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); + /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ + if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { + icb->enode_mac[0] = 0x00; + icb->enode_mac[1] = 0xC0; + icb->enode_mac[2] = 0xDD; + icb->enode_mac[3] = 0x04; + icb->enode_mac[4] = 0x05; + icb->enode_mac[5] = 0x06 + ha->port_no + 1; + } + + /* Use extended-initialization control block. */ + memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); + ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); + /* + * Setup driver NVRAM options. + */ + qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), + "QLE8XXX"); + + qlt_81xx_config_nvram_stage2(vha, icb); + + /* Use alternate WWN? */ + if (nv->host_p & cpu_to_le32(BIT_15)) { + memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); + memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); + } + + /* Prepare nodename */ + if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { + /* + * Firmware will apply the following mask if the nodename was + * not provided. + */ + memcpy(icb->node_name, icb->port_name, WWN_SIZE); + icb->node_name[0] &= 0xF0; + } + + if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { + if ((nv->enhanced_features & BIT_7) == 0) + ha->flags.scm_supported_a = 1; + } + + /* Set host adapter parameters. */ + ha->flags.disable_risc_code_load = 0; + ha->flags.enable_lip_reset = 0; + ha->flags.enable_lip_full_login = + le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; + ha->flags.enable_target_reset = + le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; + ha->flags.enable_led_scheme = 0; + ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; + + ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & + (BIT_6 | BIT_5 | BIT_4)) >> 4; + + /* save HBA serial number */ + ha->serial0 = icb->port_name[5]; + ha->serial1 = icb->port_name[6]; + ha->serial2 = icb->port_name[7]; + memcpy(vha->node_name, icb->node_name, WWN_SIZE); + memcpy(vha->port_name, icb->port_name, WWN_SIZE); + + icb->execution_throttle = cpu_to_le16(0xFFFF); + + ha->retry_count = le16_to_cpu(nv->login_retry_count); + + /* Set minimum login_timeout to 4 seconds. */ + if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) + nv->login_timeout = cpu_to_le16(ql2xlogintimeout); + if (le16_to_cpu(nv->login_timeout) < 4) + nv->login_timeout = cpu_to_le16(4); + ha->login_timeout = le16_to_cpu(nv->login_timeout); + + /* Set minimum RATOV to 100 tenths of a second. */ + ha->r_a_tov = 100; + + ha->loop_reset_delay = nv->reset_delay; + + /* Link Down Timeout = 0: + * + * When Port Down timer expires we will start returning + * I/O's to OS with "DID_NO_CONNECT". + * + * Link Down Timeout != 0: + * + * The driver waits for the link to come up after link down + * before returning I/Os to OS with "DID_NO_CONNECT". + */ + if (le16_to_cpu(nv->link_down_timeout) == 0) { + ha->loop_down_abort_time = + (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); + } else { + ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); + ha->loop_down_abort_time = + (LOOP_DOWN_TIME - ha->link_down_timeout); + } + + /* Need enough time to try and get the port back. */ + ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); + if (qlport_down_retry) + ha->port_down_retry_count = qlport_down_retry; + + /* Set login_retry_count */ + ha->login_retry_count = le16_to_cpu(nv->login_retry_count); + if (ha->port_down_retry_count == + le16_to_cpu(nv->port_down_retry_count) && + ha->port_down_retry_count > 3) + ha->login_retry_count = ha->port_down_retry_count; + else if (ha->port_down_retry_count > (int)ha->login_retry_count) + ha->login_retry_count = ha->port_down_retry_count; + if (ql2xloginretrycount) + ha->login_retry_count = ql2xloginretrycount; + + /* if not running MSI-X we need handshaking on interrupts */ + if (!vha->hw->flags.msix_enabled && + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) + icb->firmware_options_2 |= cpu_to_le32(BIT_22); + + /* Enable ZIO. */ + if (!vha->flags.init_done) { + ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & + (BIT_3 | BIT_2 | BIT_1 | BIT_0); + ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? + le16_to_cpu(icb->interrupt_delay_timer) : 2; + } + icb->firmware_options_2 &= cpu_to_le32( + ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); + vha->flags.process_response_queue = 0; + if (ha->zio_mode != QLA_ZIO_DISABLED) { + ha->zio_mode = QLA_ZIO_MODE_6; + + ql_log(ql_log_info, vha, 0x0075, + "ZIO mode %d enabled; timer delay (%d us).\n", + ha->zio_mode, + ha->zio_timer * 100); + + icb->firmware_options_2 |= cpu_to_le32( + (uint32_t)ha->zio_mode); + icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); + vha->flags.process_response_queue = 1; + } + + /* enable RIDA Format2 */ + icb->firmware_options_3 |= cpu_to_le32(BIT_0); + + /* N2N: driver will initiate Login instead of FW */ + icb->firmware_options_3 |= cpu_to_le32(BIT_8); + + /* Determine NVMe/FCP priority for target ports */ + ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); + + if (rval) { + ql_log(ql_log_warn, vha, 0x0076, + "NVRAM configuration failed.\n"); + } + return (rval); +} + +int +qla82xx_restart_isp(scsi_qla_host_t *vha) +{ + int status, rval; + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *vp, *tvp; + unsigned long flags; + + status = qla2x00_init_rings(vha); + if (!status) { + clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + ha->flags.chip_reset_done = 1; + + status = qla2x00_fw_ready(vha); + if (!status) { + /* Issue a marker after FW becomes ready. */ + qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); + vha->flags.online = 1; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } + + /* if no cable then assume it's good */ + if ((vha->device_flags & DFLG_NO_CABLE)) + status = 0; + } + + if (!status) { + clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + + if (!atomic_read(&vha->loop_down_timer)) { + /* + * Issue marker command only when we are going + * to start the I/O . + */ + vha->marker_needed = 1; + } + + ha->isp_ops->enable_intrs(ha); + + ha->isp_abort_cnt = 0; + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + + /* Update the firmware version */ + status = qla82xx_check_md_needed(vha); + + if (ha->fce) { + ha->flags.fce_enabled = 1; + memset(ha->fce, 0, + fce_calc_size(ha->fce_bufs)); + rval = qla2x00_enable_fce_trace(vha, + ha->fce_dma, ha->fce_bufs, ha->fce_mb, + &ha->fce_bufs); + if (rval) { + ql_log(ql_log_warn, vha, 0x8001, + "Unable to reinitialize FCE (%d).\n", + rval); + ha->flags.fce_enabled = 0; + } + } + + if (ha->eft) { + memset(ha->eft, 0, EFT_SIZE); + rval = qla2x00_enable_eft_trace(vha, + ha->eft_dma, EFT_NUM_BUFFERS); + if (rval) { + ql_log(ql_log_warn, vha, 0x8010, + "Unable to reinitialize EFT (%d).\n", + rval); + } + } + } + + if (!status) { + ql_dbg(ql_dbg_taskm, vha, 0x8011, + "qla82xx_restart_isp succeeded.\n"); + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + qla2x00_vp_abort_isp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + } else { + ql_log(ql_log_warn, vha, 0x8016, + "qla82xx_restart_isp **** FAILED ****.\n"); + } + + return status; +} + +/* + * qla24xx_get_fcp_prio + * Gets the fcp cmd priority value for the logged in port. + * Looks for a match of the port descriptors within + * each of the fcp prio config entries. If a match is found, + * the tag (priority) value is returned. + * + * Input: + * vha = scsi host structure pointer. + * fcport = port structure pointer. + * + * Return: + * non-zero (if found) + * -1 (if not found) + * + * Context: + * Kernel context + */ +static int +qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int i, entries; + uint8_t pid_match, wwn_match; + int priority; + uint32_t pid1, pid2; + uint64_t wwn1, wwn2; + struct qla_fcp_prio_entry *pri_entry; + struct qla_hw_data *ha = vha->hw; + + if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) + return -1; + + priority = -1; + entries = ha->fcp_prio_cfg->num_entries; + pri_entry = &ha->fcp_prio_cfg->entry[0]; + + for (i = 0; i < entries; i++) { + pid_match = wwn_match = 0; + + if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { + pri_entry++; + continue; + } + + /* check source pid for a match */ + if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { + pid1 = pri_entry->src_pid & INVALID_PORT_ID; + pid2 = vha->d_id.b24 & INVALID_PORT_ID; + if (pid1 == INVALID_PORT_ID) + pid_match++; + else if (pid1 == pid2) + pid_match++; + } + + /* check destination pid for a match */ + if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { + pid1 = pri_entry->dst_pid & INVALID_PORT_ID; + pid2 = fcport->d_id.b24 & INVALID_PORT_ID; + if (pid1 == INVALID_PORT_ID) + pid_match++; + else if (pid1 == pid2) + pid_match++; + } + + /* check source WWN for a match */ + if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { + wwn1 = wwn_to_u64(vha->port_name); + wwn2 = wwn_to_u64(pri_entry->src_wwpn); + if (wwn2 == (uint64_t)-1) + wwn_match++; + else if (wwn1 == wwn2) + wwn_match++; + } + + /* check destination WWN for a match */ + if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { + wwn1 = wwn_to_u64(fcport->port_name); + wwn2 = wwn_to_u64(pri_entry->dst_wwpn); + if (wwn2 == (uint64_t)-1) + wwn_match++; + else if (wwn1 == wwn2) + wwn_match++; + } + + if (pid_match == 2 || wwn_match == 2) { + /* Found a matching entry */ + if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) + priority = pri_entry->tag; + break; + } + + pri_entry++; + } + + return priority; +} + +/* + * qla24xx_update_fcport_fcp_prio + * Activates fcp priority for the logged in fc port + * + * Input: + * vha = scsi host structure pointer. + * fcp = port structure pointer. + * + * Return: + * QLA_SUCCESS or QLA_FUNCTION_FAILED + * + * Context: + * Kernel context. + */ +int +qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int ret; + int priority; + uint16_t mb[5]; + + if (fcport->port_type != FCT_TARGET || + fcport->loop_id == FC_NO_LOOP_ID) + return QLA_FUNCTION_FAILED; + + priority = qla24xx_get_fcp_prio(vha, fcport); + if (priority < 0) + return QLA_FUNCTION_FAILED; + + if (IS_P3P_TYPE(vha->hw)) { + fcport->fcp_prio = priority & 0xf; + return QLA_SUCCESS; + } + + ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); + if (ret == QLA_SUCCESS) { + if (fcport->fcp_prio != priority) + ql_dbg(ql_dbg_user, vha, 0x709e, + "Updated FCP_CMND priority - value=%d loop_id=%d " + "port_id=%02x%02x%02x.\n", priority, + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + fcport->fcp_prio = priority & 0xf; + } else + ql_dbg(ql_dbg_user, vha, 0x704f, + "Unable to update FCP_CMND priority - ret=0x%x for " + "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + return ret; +} + +/* + * qla24xx_update_all_fcp_prio + * Activates fcp priority for all the logged in ports + * + * Input: + * ha = adapter block pointer. + * + * Return: + * QLA_SUCCESS or QLA_FUNCTION_FAILED + * + * Context: + * Kernel context. + */ +int +qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) +{ + int ret; + fc_port_t *fcport; + + ret = QLA_FUNCTION_FAILED; + /* We need to set priority for all logged in ports */ + list_for_each_entry(fcport, &vha->vp_fcports, list) + ret = qla24xx_update_fcport_fcp_prio(vha, fcport); + + return ret; +} + +struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, + int vp_idx, bool startqp) +{ + int rsp_id = 0; + int req_id = 0; + int i; + struct qla_hw_data *ha = vha->hw; + uint16_t qpair_id = 0; + struct qla_qpair *qpair = NULL; + struct qla_msix_entry *msix; + + if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { + ql_log(ql_log_warn, vha, 0x00181, + "FW/Driver is not multi-queue capable.\n"); + return NULL; + } + + if (ql2xmqsupport || ql2xnvmeenable) { + qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); + if (qpair == NULL) { + ql_log(ql_log_warn, vha, 0x0182, + "Failed to allocate memory for queue pair.\n"); + return NULL; + } + + qpair->hw = vha->hw; + qpair->vha = vha; + qpair->qp_lock_ptr = &qpair->qp_lock; + spin_lock_init(&qpair->qp_lock); + qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; + + /* Assign available que pair id */ + mutex_lock(&ha->mq_lock); + qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); + if (ha->num_qpairs >= ha->max_qpairs) { + mutex_unlock(&ha->mq_lock); + ql_log(ql_log_warn, vha, 0x0183, + "No resources to create additional q pair.\n"); + goto fail_qid_map; + } + ha->num_qpairs++; + set_bit(qpair_id, ha->qpair_qid_map); + ha->queue_pair_map[qpair_id] = qpair; + qpair->id = qpair_id; + qpair->vp_idx = vp_idx; + qpair->fw_started = ha->flags.fw_started; + INIT_LIST_HEAD(&qpair->hints_list); + INIT_LIST_HEAD(&qpair->dsd_list); + qpair->chip_reset = ha->base_qpair->chip_reset; + qpair->enable_class_2 = ha->base_qpair->enable_class_2; + qpair->enable_explicit_conf = + ha->base_qpair->enable_explicit_conf; + + for (i = 0; i < ha->msix_count; i++) { + msix = &ha->msix_entries[i]; + if (msix->in_use) + continue; + qpair->msix = msix; + ql_dbg(ql_dbg_multiq, vha, 0xc00f, + "Vector %x selected for qpair\n", msix->vector); + break; + } + if (!qpair->msix) { + ql_log(ql_log_warn, vha, 0x0184, + "Out of MSI-X vectors!.\n"); + goto fail_msix; + } + + qpair->msix->in_use = 1; + list_add_tail(&qpair->qp_list_elem, &vha->qp_list); + qpair->pdev = ha->pdev; + if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) + qpair->reqq_start_iocbs = qla_83xx_start_iocbs; + + mutex_unlock(&ha->mq_lock); + + /* Create response queue first */ + rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); + if (!rsp_id) { + ql_log(ql_log_warn, vha, 0x0185, + "Failed to create response queue.\n"); + goto fail_rsp; + } + + qpair->rsp = ha->rsp_q_map[rsp_id]; + + /* Create request queue */ + req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, + startqp); + if (!req_id) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to create request queue.\n"); + goto fail_req; + } + + qpair->req = ha->req_q_map[req_id]; + qpair->rsp->req = qpair->req; + qpair->rsp->qpair = qpair; + + if (!qpair->cpu_mapped) + qla_cpu_update(qpair, raw_smp_processor_id()); + + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { + if (ha->fw_attributes & BIT_4) + qpair->difdix_supported = 1; + } + + qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); + if (!qpair->srb_mempool) { + ql_log(ql_log_warn, vha, 0xd036, + "Failed to create srb mempool for qpair %d\n", + qpair->id); + goto fail_mempool; + } + + if (qla_create_buf_pool(vha, qpair)) { + ql_log(ql_log_warn, vha, 0xd036, + "Failed to initialize buf pool for qpair %d\n", + qpair->id); + goto fail_bufpool; + } + + /* Mark as online */ + qpair->online = 1; + + if (!vha->flags.qpairs_available) + vha->flags.qpairs_available = 1; + + ql_dbg(ql_dbg_multiq, vha, 0xc00d, + "Request/Response queue pair created, id %d\n", + qpair->id); + ql_dbg(ql_dbg_init, vha, 0x0187, + "Request/Response queue pair created, id %d\n", + qpair->id); + } + return qpair; + +fail_bufpool: + mempool_destroy(qpair->srb_mempool); +fail_mempool: + qla25xx_delete_req_que(vha, qpair->req); +fail_req: + qla25xx_delete_rsp_que(vha, qpair->rsp); +fail_rsp: + mutex_lock(&ha->mq_lock); + qpair->msix->in_use = 0; + list_del(&qpair->qp_list_elem); + if (list_empty(&vha->qp_list)) + vha->flags.qpairs_available = 0; +fail_msix: + ha->queue_pair_map[qpair_id] = NULL; + clear_bit(qpair_id, ha->qpair_qid_map); + ha->num_qpairs--; + mutex_unlock(&ha->mq_lock); +fail_qid_map: + kfree(qpair); + return NULL; +} + +int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) +{ + int ret = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha = qpair->hw; + + qpair->delete_in_progress = 1; + + qla_free_buf_pool(qpair); + + ret = qla25xx_delete_req_que(vha, qpair->req); + if (ret != QLA_SUCCESS) + goto fail; + + ret = qla25xx_delete_rsp_que(vha, qpair->rsp); + if (ret != QLA_SUCCESS) + goto fail; + + if (!list_empty(&qpair->dsd_list)) { + struct dsd_dma *dsd_ptr, *tdsd_ptr; + + /* clean up allocated prev pool */ + list_for_each_entry_safe(dsd_ptr, tdsd_ptr, + &qpair->dsd_list, list) { + dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, + dsd_ptr->dsd_list_dma); + list_del(&dsd_ptr->list); + kfree(dsd_ptr); + } + } + + mutex_lock(&ha->mq_lock); + ha->queue_pair_map[qpair->id] = NULL; + clear_bit(qpair->id, ha->qpair_qid_map); + ha->num_qpairs--; + list_del(&qpair->qp_list_elem); + if (list_empty(&vha->qp_list)) { + vha->flags.qpairs_available = 0; + vha->flags.qpairs_req_created = 0; + vha->flags.qpairs_rsp_created = 0; + } + mempool_destroy(qpair->srb_mempool); + kfree(qpair); + mutex_unlock(&ha->mq_lock); + + return QLA_SUCCESS; +fail: + return ret; +} + +uint64_t +qla2x00_count_set_bits(uint32_t num) +{ + /* Brian Kernighan's Algorithm */ + u64 count = 0; + + while (num) { + num &= (num - 1); + count++; + } + return count; +} + +uint64_t +qla2x00_get_num_tgts(scsi_qla_host_t *vha) +{ + fc_port_t *f, *tf; + u64 count = 0; + + f = NULL; + tf = NULL; + + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->port_type != FCT_TARGET) + continue; + count++; + } + return count; +} + +int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags) +{ + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = NULL; + unsigned long int_flags; + + if (flags & QLA2XX_HW_ERROR) + vha->hw_err_cnt = 0; + if (flags & QLA2XX_SHT_LNK_DWN) + vha->short_link_down_cnt = 0; + if (flags & QLA2XX_INT_ERR) + vha->interface_err_cnt = 0; + if (flags & QLA2XX_CMD_TIMEOUT) + vha->cmd_timeout_cnt = 0; + if (flags & QLA2XX_RESET_CMD_ERR) + vha->reset_cmd_err_cnt = 0; + if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { + spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->tgt_short_link_down_cnt = 0; + fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); + } + vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + return 0; +} + +int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags) +{ + return qla2xxx_reset_stats(host, flags); +} + +int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags) +{ + return qla2xxx_reset_stats(host, flags); +} + +int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags, + void *data, u64 size) +{ + scsi_qla_host_t *vha = shost_priv(host); + struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data; + struct ql_vnd_stats *rsp_data = &resp->stats; + u64 ini_entry_count = 0; + u64 i = 0; + u64 entry_count = 0; + u64 num_tgt = 0; + u32 tmp_stat_type = 0; + fc_port_t *fcport = NULL; + unsigned long int_flags; + + /* Copy stat type to work on it */ + tmp_stat_type = flags; + + if (tmp_stat_type & BIT_17) { + num_tgt = qla2x00_get_num_tgts(vha); + /* unset BIT_17 */ + tmp_stat_type &= ~(1 << 17); + } + ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); + + entry_count = ini_entry_count + num_tgt; + + rsp_data->entry_count = entry_count; + + i = 0; + if (flags & QLA2XX_HW_ERROR) { + rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->hw_err_cnt; + i++; + } + + if (flags & QLA2XX_SHT_LNK_DWN) { + rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->short_link_down_cnt; + i++; + } + + if (flags & QLA2XX_INT_ERR) { + rsp_data->entry[i].stat_type = QLA2XX_INT_ERR; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->interface_err_cnt; + i++; + } + + if (flags & QLA2XX_CMD_TIMEOUT) { + rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->cmd_timeout_cnt; + i++; + } + + if (flags & QLA2XX_RESET_CMD_ERR) { + rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt; + i++; + } + + /* i will continue from previous loop, as target + * entries are after initiator + */ + if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { + spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + if (!fcport->rport) + continue; + rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN; + rsp_data->entry[i].tgt_num = fcport->rport->number; + rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt; + i++; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); + } + resp->status = EXT_STATUS_OK; + + return 0; +} + +int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags, + struct fc_rport *rport, void *data, u64 size) +{ + struct ql_vnd_tgt_stats_resp *tgt_data = data; + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + + tgt_data->status = 0; + tgt_data->stats.entry_count = 1; + tgt_data->stats.entry[0].stat_type = flags; + tgt_data->stats.entry[0].tgt_num = rport->number; + tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt; + + return 0; +} + +int qla2xxx_disable_port(struct Scsi_Host *host) +{ + scsi_qla_host_t *vha = shost_priv(host); + + vha->hw->flags.port_isolated = 1; + + if (qla2x00_isp_reg_stat(vha->hw)) { + ql_log(ql_log_info, vha, 0x9006, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + if (qla2x00_chip_is_down(vha)) + return 0; + + if (vha->flags.online) { + qla2x00_abort_isp_cleanup(vha); + qla2x00_wait_for_sess_deletion(vha); + } + + return 0; +} + +int qla2xxx_enable_port(struct Scsi_Host *host) +{ + scsi_qla_host_t *vha = shost_priv(host); + + if (qla2x00_isp_reg_stat(vha->hw)) { + ql_log(ql_log_info, vha, 0x9001, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + + vha->hw->flags.port_isolated = 0; + /* Set the flag to 1, so that isp_abort can proceed */ + vha->flags.online = 1; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + + return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h new file mode 100644 index 000000000..a4a56ab0b --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -0,0 +1,633 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ + +#include "qla_target.h" +/** + * qla24xx_calc_iocbs() - Determine number of Command Type 3 and + * Continuation Type 1 IOCBs to allocate. + * + * @vha: HA context + * @dsds: number of data segment descriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +static inline uint16_t +qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > 1) { + iocbs += (dsds - 1) / 5; + if ((dsds - 1) % 5) + iocbs++; + } + return iocbs; +} + +/* + * qla2x00_debounce_register + * Debounce register. + * + * Input: + * port = register address. + * + * Returns: + * register value. + */ +static __inline__ uint16_t +qla2x00_debounce_register(volatile __le16 __iomem *addr) +{ + volatile uint16_t first; + volatile uint16_t second; + + do { + first = rd_reg_word(addr); + barrier(); + cpu_relax(); + second = rd_reg_word(addr); + } while (first != second); + + return (first); +} + +static inline void +qla2x00_poll(struct rsp_que *rsp) +{ + struct qla_hw_data *ha = rsp->hw; + + if (IS_P3P_TYPE(ha)) + qla82xx_poll(0, rsp); + else + ha->isp_ops->intr_handler(0, rsp); +} + +static inline uint8_t * +host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) +{ + uint32_t *ifcp = (uint32_t *) fcp; + uint32_t *ofcp = (uint32_t *) fcp; + uint32_t iter = bsize >> 2; + + for (; iter ; iter--) + *ofcp++ = swab32(*ifcp++); + + return fcp; +} + +static inline void +host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize) +{ + uint32_t *isrc = (uint32_t *) src; + __le32 *odest = (__le32 *) dst; + uint32_t iter = bsize >> 2; + + for ( ; iter--; isrc++) + *odest++ = cpu_to_le32(*isrc); +} + +static inline void +qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx) +{ + struct dsd_dma *dsd, *tdsd; + + /* clean up allocated prev pool */ + list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) { + dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr, + dsd->dsd_list_dma); + list_del(&dsd->list); + kfree(dsd); + } + INIT_LIST_HEAD(&ctx->dsd_list); +} + +static inline void +qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) +{ + int old_val; + uint8_t shiftbits, mask; + uint8_t port_dstate_str_sz; + + /* This will have to change when the max no. of states > 16 */ + shiftbits = 4; + mask = (1 << shiftbits) - 1; + + port_dstate_str_sz = sizeof(port_dstate_str) / sizeof(char *); + fcport->disc_state = state; + while (1) { + old_val = atomic_read(&fcport->shadow_disc_state); + if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state, + old_val, (old_val << shiftbits) | state)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2134, + "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n", + fcport->port_name, (old_val & mask) < port_dstate_str_sz ? + port_dstate_str[old_val & mask] : "Unknown", + port_dstate_str[state], fcport->d_id.b24); + return; + } + } +} + +static inline int +qla2x00_hba_err_chk_enabled(srb_t *sp) +{ + /* + * Uncomment when corresponding SCSI changes are done. + * + if (!sp->cmd->prot_chk) + return 0; + * + */ + switch (scsi_get_prot_op(GET_CMD_SP(sp))) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + if (ql2xenablehba_err_chk >= 1) + return 1; + break; + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + if (ql2xenablehba_err_chk >= 2) + return 1; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + return 1; + } + return 0; +} + +static inline int +qla2x00_reset_active(scsi_qla_host_t *vha) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); + + /* Test appropriate base-vha and vha flags. */ + return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || + test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || + test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); +} + +static inline int +qla2x00_chip_is_down(scsi_qla_host_t *vha) +{ + return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started); +} + +static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha, + struct qla_qpair *qpair, fc_port_t *fcport) +{ + memset(sp, 0, sizeof(*sp)); + sp->fcport = fcport; + sp->iocbs = 1; + sp->vha = vha; + sp->qpair = qpair; + sp->cmd_type = TYPE_SRB; + /* ref : INIT - normal flow */ + kref_init(&sp->cmd_kref); + INIT_LIST_HEAD(&sp->elem); +} + +static inline srb_t * +qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, + fc_port_t *fcport, gfp_t flag) +{ + srb_t *sp = NULL; + uint8_t bail; + + QLA_QPAIR_MARK_BUSY(qpair, bail); + if (unlikely(bail)) + return NULL; + + sp = mempool_alloc(qpair->srb_mempool, flag); + if (sp) + qla2xxx_init_sp(sp, vha, qpair, fcport); + else + QLA_QPAIR_MARK_NOT_BUSY(qpair); + return sp; +} + +void qla2xxx_rel_done_warning(srb_t *sp, int res); +void qla2xxx_rel_free_warning(srb_t *sp); + +static inline void +qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp) +{ + sp->qpair = NULL; + sp->done = qla2xxx_rel_done_warning; + sp->free = qla2xxx_rel_free_warning; + mempool_free(sp, qpair->srb_mempool); + QLA_QPAIR_MARK_NOT_BUSY(qpair); +} + +static inline srb_t * +qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) +{ + srb_t *sp = NULL; + struct qla_qpair *qpair; + + if (unlikely(qla_vha_mark_busy(vha))) + return NULL; + + qpair = vha->hw->base_qpair; + sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag); + if (!sp) + goto done; + + sp->vha = vha; +done: + if (!sp) + QLA_VHA_MARK_NOT_BUSY(vha); + return sp; +} + +static inline void +qla2x00_rel_sp(srb_t *sp) +{ + QLA_VHA_MARK_NOT_BUSY(sp->vha); + qla2xxx_rel_qpair_sp(sp->qpair, sp); +} + +static inline int +qla2x00_gid_list_size(struct qla_hw_data *ha) +{ + if (IS_QLAFX00(ha)) + return sizeof(uint32_t) * 32; + else + return sizeof(struct gid_list_info) * ha->max_fibre_devices; +} + +static inline void +qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) +{ + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } +} + +static inline void +qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual) +{ + u8 scope; + u16 qual; +#define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */ +#define SQ_SCOPE_SHIFT 14 +#define SQ_QUAL_MASK 0x3fff + +#define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */ +#define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */ + + if (!sts_qual) /* Common case. */ + return; + + scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT; + /* Handle only scope 1 or 2, which is for I-T nexus. */ + if (scope != 1 && scope != 2) + return; + + /* Skip processing, if retry delay timer is already in effect. */ + if (fcport->retry_delay_timestamp && + time_before(jiffies, fcport->retry_delay_timestamp)) + return; + + qual = sts_qual & SQ_QUAL_MASK; + if (qual < 1 || qual > 0x3fef) + return; + qual = min(qual, (u16)SQ_MAX_WAIT_TIME); + + /* qual is expressed in 100ms increments. */ + fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10); + + ql_log(ql_log_warn, fcport->vha, 0x5101, + "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n", + fcport->port_name, sts_qual, qual * 100); +} + +static inline bool +qla_is_exch_offld_enabled(struct scsi_qla_host *vha) +{ + if (qla_ini_mode_enabled(vha) && + (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)) + return true; + else if (qla_tgt_mode_enabled(vha) && + (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)) + return true; + else if (qla_dual_mode_enabled(vha) && + ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT)) + return true; + else + return false; +} + +static inline void +qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid) +{ + qpair->cpuid = cpuid; + + if (!list_empty(&qpair->hints_list)) { + struct qla_qpair_hint *h; + + list_for_each_entry(h, &qpair->hints_list, hint_elem) + h->cpuid = qpair->cpuid; + } +} + +static inline struct qla_qpair_hint * +qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair) +{ + struct qla_qpair_hint *h; + u16 i; + + for (i = 0; i < tgt->ha->max_qpairs + 1; i++) { + h = &tgt->qphints[i]; + if (h->qpair == qpair) + return h; + } + + return NULL; +} + +static inline void +qla_83xx_start_iocbs(struct qla_qpair *qpair) +{ + struct req_que *req = qpair->req; + + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + wrt_reg_dword(req->req_q_in, req->ring_index); +} + +static inline int +qla2xxx_get_fc4_priority(struct scsi_qla_host *vha) +{ + uint32_t data; + + data = + ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET]; + + + return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME; +} + +enum { + RESOURCE_NONE, + RESOURCE_IOCB = BIT_0, + RESOURCE_EXCH = BIT_1, /* exchange */ + RESOURCE_FORCE = BIT_2, + RESOURCE_HA = BIT_3, +}; + +static inline int +qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores) +{ + u16 iocbs_used, i; + u16 exch_used; + struct qla_hw_data *ha = qp->hw; + + if (!ql2xenforce_iocb_limit) { + iores->res_type = RESOURCE_NONE; + return 0; + } + if (iores->res_type & RESOURCE_FORCE) + goto force; + + if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) { + /* no need to acquire qpair lock. It's just rough calculation */ + iocbs_used = ha->base_qpair->fwres.iocbs_used; + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) + iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; + } + + if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) { + iores->res_type = RESOURCE_NONE; + return -ENOSPC; + } + } + + if (iores->res_type & RESOURCE_EXCH) { + exch_used = ha->base_qpair->fwres.exch_used; + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) + exch_used += ha->queue_pair_map[i]->fwres.exch_used; + } + + if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) { + iores->res_type = RESOURCE_NONE; + return -ENOSPC; + } + } + + if (ql2xenforce_iocb_limit == 2) { + if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >= + ha->fwres.iocb_limit) { + iores->res_type = RESOURCE_NONE; + return -ENOSPC; + } + + if (iores->res_type & RESOURCE_EXCH) { + if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >= + ha->fwres.exch_limit) { + iores->res_type = RESOURCE_NONE; + return -ENOSPC; + } + } + } + +force: + qp->fwres.iocbs_used += iores->iocb_cnt; + qp->fwres.exch_used += iores->exch_cnt; + if (ql2xenforce_iocb_limit == 2) { + atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used); + atomic_add(iores->exch_cnt, &ha->fwres.exch_used); + iores->res_type |= RESOURCE_HA; + } + return 0; +} + +/* + * decrement to zero. This routine will not decrement below zero + * @v: pointer of type atomic_t + * @amount: amount to decrement from v + */ +static void qla_atomic_dtz(atomic_t *v, int amount) +{ + int c, old, dec; + + c = atomic_read(v); + for (;;) { + dec = c - amount; + if (unlikely(dec < 0)) + dec = 0; + + old = atomic_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } +} + +static inline void +qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores) +{ + struct qla_hw_data *ha = qp->hw; + + if (iores->res_type & RESOURCE_HA) { + if (iores->res_type & RESOURCE_IOCB) + qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt); + + if (iores->res_type & RESOURCE_EXCH) + qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt); + } + + if (iores->res_type & RESOURCE_IOCB) { + if (qp->fwres.iocbs_used >= iores->iocb_cnt) { + qp->fwres.iocbs_used -= iores->iocb_cnt; + } else { + /* should not happen */ + qp->fwres.iocbs_used = 0; + } + } + + if (iores->res_type & RESOURCE_EXCH) { + if (qp->fwres.exch_used >= iores->exch_cnt) { + qp->fwres.exch_used -= iores->exch_cnt; + } else { + /* should not happen */ + qp->fwres.exch_used = 0; + } + } + iores->res_type = RESOURCE_NONE; +} + +#define ISP_REG_DISCONNECT 0xffffffffU +/************************************************************************** + * qla2x00_isp_reg_stat + * + * Description: + * Read the host status register of ISP before aborting the command. + * + * Input: + * ha = pointer to host adapter structure. + * + * + * Returns: + * Either true or false. + * + * Note: Return true if there is register disconnect. + **************************************************************************/ +static inline +uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; + + if (IS_P3P_TYPE(ha)) + return ((rd_reg_dword(®82->host_int)) == ISP_REG_DISCONNECT); + else + return ((rd_reg_dword(®->host_status)) == + ISP_REG_DISCONNECT); +} + +static inline +bool qla_pci_disconnected(struct scsi_qla_host *vha, + struct device_reg_24xx __iomem *reg) +{ + uint32_t stat; + bool ret = false; + + stat = rd_reg_dword(®->host_status); + if (stat == 0xffffffff) { + ql_log(ql_log_info, vha, 0x8041, + "detected PCI disconnect.\n"); + qla_schedule_eeh_work(vha); + ret = true; + } + return ret; +} + +static inline bool +fcport_is_smaller(fc_port_t *fcport) +{ + if (wwn_to_u64(fcport->port_name) < + wwn_to_u64(fcport->vha->port_name)) + return true; + else + return false; +} + +static inline bool +fcport_is_bigger(fc_port_t *fcport) +{ + return !fcport_is_smaller(fcport); +} + +static inline struct qla_qpair * +qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair) +{ + int cpuid = raw_smp_processor_id(); + + if (qpair->cpuid != cpuid && + ha->qp_cpu_map[cpuid]) { + qpair = ha->qp_cpu_map[cpuid]; + } + return qpair; +} + +static inline void +qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha, + struct qla_msix_entry *msix, + struct qla_qpair *qpair) +{ + const struct cpumask *mask; + unsigned int cpu; + + if (!ha->qp_cpu_map) + return; + mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0); + if (!mask) + return; + qpair->cpuid = cpumask_first(mask); + for_each_cpu(cpu, mask) { + ha->qp_cpu_map[cpu] = qpair; + } + msix->cpuid = qpair->cpuid; + qpair->cpu_mapped = true; +} + +static inline void +qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha) +{ + if (ha->qp_cpu_map) { + kfree(ha->qp_cpu_map); + ha->qp_cpu_map = NULL; + } +} + +static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + if (!ha->qp_cpu_map) { + ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *), + GFP_KERNEL); + if (!ha->qp_cpu_map) { + ql_log(ql_log_fatal, vha, 0x0180, + "Unable to allocate memory for qp_cpu_map ptrs.\n"); + return -1; + } + } + return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c new file mode 100644 index 000000000..df90169f8 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -0,0 +1,4471 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_target.h" + +#include +#include + +#include + +static int qla_start_scsi_type6(srb_t *sp); +/** + * qla2x00_get_cmd_direction() - Determine control_flag data direction. + * @sp: SCSI command + * + * Returns the proper CF_* direction based on CDB. + */ +static inline uint16_t +qla2x00_get_cmd_direction(srb_t *sp) +{ + uint16_t cflags; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->vha; + + cflags = 0; + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + cflags = CF_WRITE; + vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + cflags = CF_READ; + vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; + } + return (cflags); +} + +/** + * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and + * Continuation Type 0 IOCBs to allocate. + * + * @dsds: number of data segment descriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +uint16_t +qla2x00_calc_iocbs_32(uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > 3) { + iocbs += (dsds - 3) / 7; + if ((dsds - 3) % 7) + iocbs++; + } + return (iocbs); +} + +/** + * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and + * Continuation Type 1 IOCBs to allocate. + * + * @dsds: number of data segment descriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +uint16_t +qla2x00_calc_iocbs_64(uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > 2) { + iocbs += (dsds - 2) / 5; + if ((dsds - 2) % 5) + iocbs++; + } + return (iocbs); +} + +/** + * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. + * @vha: HA context + * + * Returns a pointer to the Continuation Type 0 IOCB packet. + */ +static inline cont_entry_t * +qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) +{ + cont_entry_t *cont_pkt; + struct req_que *req = vha->req; + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + cont_pkt = (cont_entry_t *)req->ring_ptr; + + /* Load packet defaults. */ + put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); + + return (cont_pkt); +} + +/** + * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. + * @vha: HA context + * @req: request queue + * + * Returns a pointer to the continuation type 1 IOCB packet. + */ +cont_a64_entry_t * +qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) +{ + cont_a64_entry_t *cont_pkt; + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + cont_pkt = (cont_a64_entry_t *)req->ring_ptr; + + /* Load packet defaults. */ + put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : + CONTINUE_A64_TYPE, &cont_pkt->entry_type); + + return (cont_pkt); +} + +inline int +qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) +{ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + + /* We always use DIFF Bundling for best performance */ + *fw_prot_opts = 0; + + /* Translate SCSI opcode to a protection opcode */ + switch (scsi_get_prot_op(cmd)) { + case SCSI_PROT_READ_STRIP: + *fw_prot_opts |= PO_MODE_DIF_REMOVE; + break; + case SCSI_PROT_WRITE_INSERT: + *fw_prot_opts |= PO_MODE_DIF_INSERT; + break; + case SCSI_PROT_READ_INSERT: + *fw_prot_opts |= PO_MODE_DIF_INSERT; + break; + case SCSI_PROT_WRITE_STRIP: + *fw_prot_opts |= PO_MODE_DIF_REMOVE; + break; + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM) + *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; + else + *fw_prot_opts |= PO_MODE_DIF_PASS; + break; + default: /* Normal Request */ + *fw_prot_opts |= PO_MODE_DIF_PASS; + break; + } + + if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK)) + *fw_prot_opts |= PO_DISABLE_GUARD_CHECK; + + return scsi_prot_sg_count(cmd); +} + +/* + * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit + * capable IOCB types. + * + * @sp: SRB command to process + * @cmd_pkt: Command type 2 IOCB + * @tot_dsds: Total number of segments to transfer + */ +void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, + uint16_t tot_dsds) +{ + uint16_t avail_dsds; + struct dsd32 *cur_dsd; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i; + + cmd = GET_CMD_SP(sp); + + /* Update entry type to indicate Command Type 2 IOCB */ + put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + return; + } + + vha = sp->vha; + cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); + + /* Three DSDs are available in the Command Type 2 IOCB */ + avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); + cur_dsd = cmd_pkt->dsd32; + + /* Load data segments */ + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + cont_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Seven DSDs are available in the Continuation + * Type 0 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type0_iocb(vha); + cur_dsd = cont_pkt->dsd; + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); + } + + append_dsd32(&cur_dsd, sg); + avail_dsds--; + } +} + +/** + * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit + * capable IOCB types. + * + * @sp: SRB command to process + * @cmd_pkt: Command type 3 IOCB + * @tot_dsds: Total number of segments to transfer + */ +void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, + uint16_t tot_dsds) +{ + uint16_t avail_dsds; + struct dsd64 *cur_dsd; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i; + + cmd = GET_CMD_SP(sp); + + /* Update entry type to indicate Command Type 3 IOCB */ + put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + return; + } + + vha = sp->vha; + cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); + + /* Two DSDs are available in the Command Type 3 IOCB */ + avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); + cur_dsd = cmd_pkt->dsd64; + + /* Load data segments */ + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); + cur_dsd = cont_pkt->dsd; + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } +} + +/* + * Find the first handle that is not in use, starting from + * req->current_outstanding_cmd + 1. The caller must hold the lock that is + * associated with @req. + */ +uint32_t qla2xxx_get_next_handle(struct req_que *req) +{ + uint32_t index, handle = req->current_outstanding_cmd; + + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + return handle; + } + + return 0; +} + +/** + * qla2x00_start_scsi() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qla2x00_start_scsi(srb_t *sp) +{ + int nseg; + unsigned long flags; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + uint32_t *clr_ptr; + uint32_t handle; + cmd_entry_t *cmd_pkt; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct device_reg_2xxx __iomem *reg; + struct qla_hw_data *ha; + struct req_que *req; + struct rsp_que *rsp; + + /* Setup device pointers. */ + vha = sp->vha; + ha = vha->hw; + reg = &ha->iobase->isp; + cmd = GET_CMD_SP(sp); + req = ha->req_q_map[0]; + rsp = ha->rsp_q_map[0]; + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + return (QLA_FUNCTION_FAILED); + } + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else + nseg = 0; + + tot_dsds = nseg; + + /* Calculate the number of request entries needed. */ + req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + /* If still no head room then bail out */ + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + /* Build command packet */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + cmd_pkt = (cmd_entry_t *)req->ring_ptr; + cmd_pkt->handle = handle; + /* Zero out remaining portion of packet. */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set target ID and LUN number*/ + SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); + cmd_pkt->lun = cpu_to_le16(cmd->device->lun); + cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); + + /* Load SCSI command packet. */ + memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + + /* Build IOCB segments */ + ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); + rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla2x00_process_response_queue(rsp); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return (QLA_SUCCESS); + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return (QLA_FUNCTION_FAILED); +} + +/** + * qla2x00_start_iocbs() - Execute the IOCB command + * @vha: HA context + * @req: request queue + */ +void +qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) +{ + struct qla_hw_data *ha = vha->hw; + device_reg_t *reg = ISP_QUE_REG(ha, req->id); + + if (IS_P3P_TYPE(ha)) { + qla82xx_start_iocbs(vha); + } else { + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + /* Set chip new ring index. */ + if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + wrt_reg_dword(req->req_q_in, req->ring_index); + } else if (IS_QLA83XX(ha)) { + wrt_reg_dword(req->req_q_in, req->ring_index); + rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); + } else if (IS_QLAFX00(ha)) { + wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); + rd_reg_dword_relaxed(®->ispfx00.req_q_in); + QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); + } else if (IS_FWI2_CAPABLE(ha)) { + wrt_reg_dword(®->isp24.req_q_in, req->ring_index); + rd_reg_dword_relaxed(®->isp24.req_q_in); + } else { + wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), + req->ring_index); + rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); + } + } +} + +/** + * __qla2x00_marker() - Send a marker IOCB to the firmware. + * @vha: HA context + * @qpair: queue pair pointer + * @loop_id: loop ID + * @lun: LUN + * @type: marker modifier + * + * Can be called from both normal and interrupt context. + * + * Returns non-zero if a failure occurred, else zero. + */ +static int +__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, + uint16_t loop_id, uint64_t lun, uint8_t type) +{ + mrk_entry_t *mrk; + struct mrk_entry_24xx *mrk24 = NULL; + struct req_que *req = qpair->req; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); + if (mrk == NULL) { + ql_log(ql_log_warn, base_vha, 0x3026, + "Failed to allocate Marker IOCB.\n"); + + return (QLA_FUNCTION_FAILED); + } + + mrk24 = (struct mrk_entry_24xx *)mrk; + + mrk->entry_type = MARKER_TYPE; + mrk->modifier = type; + if (type != MK_SYNC_ALL) { + if (IS_FWI2_CAPABLE(ha)) { + mrk24->nport_handle = cpu_to_le16(loop_id); + int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); + host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); + mrk24->vp_index = vha->vp_idx; + } else { + SET_TARGET_ID(ha, mrk->target, loop_id); + mrk->lun = cpu_to_le16((uint16_t)lun); + } + } + + if (IS_FWI2_CAPABLE(ha)) + mrk24->handle = QLA_SKIP_HANDLE; + + wmb(); + + qla2x00_start_iocbs(vha, req); + + return (QLA_SUCCESS); +} + +int +qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, + uint16_t loop_id, uint64_t lun, uint8_t type) +{ + int ret; + unsigned long flags = 0; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + return (ret); +} + +/* + * qla2x00_issue_marker + * + * Issue marker + * Caller CAN have hardware lock held as specified by ha_locked parameter. + * Might release it, then reaquire. + */ +int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) +{ + if (ha_locked) { + if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, + MK_SYNC_ALL) != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + } else { + if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, + MK_SYNC_ALL) != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + + return QLA_SUCCESS; +} + +static inline int +qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, + uint16_t tot_dsds) +{ + struct dsd64 *cur_dsd = NULL, *next_dsd; + struct scsi_cmnd *cmd; + struct scatterlist *cur_seg; + uint8_t avail_dsds; + uint8_t first_iocb = 1; + uint32_t dsd_list_len; + struct dsd_dma *dsd_ptr; + struct ct6_dsd *ctx; + struct qla_qpair *qpair = sp->qpair; + + cmd = GET_CMD_SP(sp); + + /* Update entry type to indicate Command Type 3 IOCB */ + put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE || + tot_dsds == 0) { + cmd_pkt->byte_count = cpu_to_le32(0); + return 0; + } + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + qpair->counters.output_bytes += scsi_bufflen(cmd); + qpair->counters.output_requests++; + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + qpair->counters.input_bytes += scsi_bufflen(cmd); + qpair->counters.input_requests++; + } + + cur_seg = scsi_sglist(cmd); + ctx = &sp->u.scmd.ct6_ctx; + + while (tot_dsds) { + avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : tot_dsds; + tot_dsds -= avail_dsds; + dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; + + dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list); + next_dsd = dsd_ptr->dsd_addr; + list_del(&dsd_ptr->list); + qpair->dsd_avail--; + list_add_tail(&dsd_ptr->list, &ctx->dsd_list); + ctx->dsd_use_cnt++; + qpair->dsd_inuse++; + + if (first_iocb) { + first_iocb = 0; + put_unaligned_le64(dsd_ptr->dsd_list_dma, + &cmd_pkt->fcp_dsd.address); + cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); + } else { + put_unaligned_le64(dsd_ptr->dsd_list_dma, + &cur_dsd->address); + cur_dsd->length = cpu_to_le32(dsd_list_len); + cur_dsd++; + } + cur_dsd = next_dsd; + while (avail_dsds) { + append_dsd64(&cur_dsd, cur_seg); + cur_seg = sg_next(cur_seg); + avail_dsds--; + } + } + + /* Null termination */ + cur_dsd->address = 0; + cur_dsd->length = 0; + cur_dsd++; + cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); + return 0; +} + +/* + * qla24xx_calc_dsd_lists() - Determine number of DSD list required + * for Command Type 6. + * + * @dsds: number of data segment descriptors needed + * + * Returns the number of dsd list needed to store @dsds. + */ +static inline uint16_t +qla24xx_calc_dsd_lists(uint16_t dsds) +{ + uint16_t dsd_lists = 0; + + dsd_lists = (dsds/QLA_DSDS_PER_IOCB); + if (dsds % QLA_DSDS_PER_IOCB) + dsd_lists++; + return dsd_lists; +} + + +/** + * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 + * IOCB types. + * + * @sp: SRB command to process + * @cmd_pkt: Command type 3 IOCB + * @tot_dsds: Total number of segments to transfer + * @req: pointer to request queue + */ +inline void +qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, + uint16_t tot_dsds, struct req_que *req) +{ + uint16_t avail_dsds; + struct dsd64 *cur_dsd; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i; + struct qla_qpair *qpair = sp->qpair; + + cmd = GET_CMD_SP(sp); + + /* Update entry type to indicate Command Type 3 IOCB */ + put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + return; + } + + vha = sp->vha; + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); + qpair->counters.output_bytes += scsi_bufflen(cmd); + qpair->counters.output_requests++; + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); + qpair->counters.input_bytes += scsi_bufflen(cmd); + qpair->counters.input_requests++; + } + + /* One DSD is available in the Command Type 3 IOCB */ + avail_dsds = 1; + cur_dsd = &cmd_pkt->dsd; + + /* Load data segments */ + + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); + cur_dsd = cont_pkt->dsd; + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } +} + +struct fw_dif_context { + __le32 ref_tag; + __le16 app_tag; + uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ + uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ +}; + +/* + * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command + * + */ +static inline void +qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, + unsigned int protcnt) +{ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + + pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd)); + + if (cmd->prot_flags & SCSI_PROT_REF_CHECK && + qla2x00_hba_err_chk_enabled(sp)) { + pkt->ref_tag_mask[0] = 0xff; + pkt->ref_tag_mask[1] = 0xff; + pkt->ref_tag_mask[2] = 0xff; + pkt->ref_tag_mask[3] = 0xff; + } + + pkt->app_tag = cpu_to_le16(0); + pkt->app_tag_mask[0] = 0x0; + pkt->app_tag_mask[1] = 0x0; +} + +int +qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, + uint32_t *partial) +{ + struct scatterlist *sg; + uint32_t cumulative_partial, sg_len; + dma_addr_t sg_dma_addr; + + if (sgx->num_bytes == sgx->tot_bytes) + return 0; + + sg = sgx->cur_sg; + cumulative_partial = sgx->tot_partial; + + sg_dma_addr = sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; + + if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { + sgx->dma_len = (blk_sz - cumulative_partial); + sgx->tot_partial = 0; + sgx->num_bytes += blk_sz; + *partial = 0; + } else { + sgx->dma_len = sg_len - sgx->bytes_consumed; + sgx->tot_partial += sgx->dma_len; + *partial = 1; + } + + sgx->bytes_consumed += sgx->dma_len; + + if (sg_len == sgx->bytes_consumed) { + sg = sg_next(sg); + sgx->num_sg++; + sgx->cur_sg = sg; + sgx->bytes_consumed = 0; + } + + return 1; +} + +int +qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, + struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) +{ + void *next_dsd; + uint8_t avail_dsds = 0; + uint32_t dsd_list_len; + struct dsd_dma *dsd_ptr; + struct scatterlist *sg_prot; + struct dsd64 *cur_dsd = dsd; + uint16_t used_dsds = tot_dsds; + uint32_t prot_int; /* protection interval */ + uint32_t partial; + struct qla2_sgx sgx; + dma_addr_t sle_dma; + uint32_t sle_dma_len, tot_prot_dma_len = 0; + struct scsi_cmnd *cmd; + + memset(&sgx, 0, sizeof(struct qla2_sgx)); + if (sp) { + cmd = GET_CMD_SP(sp); + prot_int = scsi_prot_interval(cmd); + + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + sg_prot = scsi_prot_sglist(cmd); + } else if (tc) { + prot_int = tc->blk_sz; + sgx.tot_bytes = tc->bufflen; + sgx.cur_sg = tc->sg; + sg_prot = tc->prot_sg; + } else { + BUG(); + return 1; + } + + while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { + + sle_dma = sgx.dma_addr; + sle_dma_len = sgx.dma_len; +alloc_and_fill: + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : used_dsds; + dsd_list_len = (avail_dsds + 1) * 12; + used_dsds -= avail_dsds; + + /* allocate tracking DS */ + dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); + if (!dsd_ptr) + return 1; + + /* allocate new list */ + dsd_ptr->dsd_addr = next_dsd = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + + if (!next_dsd) { + /* + * Need to cleanup only this dsd_ptr, rest + * will be done by sp_free_dma() + */ + kfree(dsd_ptr); + return 1; + } + + if (sp) { + list_add_tail(&dsd_ptr->list, + &sp->u.scmd.crc_ctx->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + *tc->ctx_dsd_alloced = 1; + } + + + /* add new list to cmd iocb or last list */ + put_unaligned_le64(dsd_ptr->dsd_list_dma, + &cur_dsd->address); + cur_dsd->length = cpu_to_le32(dsd_list_len); + cur_dsd = next_dsd; + } + put_unaligned_le64(sle_dma, &cur_dsd->address); + cur_dsd->length = cpu_to_le32(sle_dma_len); + cur_dsd++; + avail_dsds--; + + if (partial == 0) { + /* Got a full protection interval */ + sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; + sle_dma_len = 8; + + tot_prot_dma_len += sle_dma_len; + if (tot_prot_dma_len == sg_dma_len(sg_prot)) { + tot_prot_dma_len = 0; + sg_prot = sg_next(sg_prot); + } + + partial = 1; /* So as to not re-enter this block */ + goto alloc_and_fill; + } + } + /* Null termination */ + cur_dsd->address = 0; + cur_dsd->length = 0; + cur_dsd++; + return 0; +} + +int +qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, + struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) +{ + void *next_dsd; + uint8_t avail_dsds = 0; + uint32_t dsd_list_len; + struct dsd_dma *dsd_ptr; + struct scatterlist *sg, *sgl; + struct dsd64 *cur_dsd = dsd; + int i; + uint16_t used_dsds = tot_dsds; + struct scsi_cmnd *cmd; + + if (sp) { + cmd = GET_CMD_SP(sp); + sgl = scsi_sglist(cmd); + } else if (tc) { + sgl = tc->sg; + } else { + BUG(); + return 1; + } + + + for_each_sg(sgl, sg, tot_dsds, i) { + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : used_dsds; + dsd_list_len = (avail_dsds + 1) * 12; + used_dsds -= avail_dsds; + + /* allocate tracking DS */ + dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); + if (!dsd_ptr) + return 1; + + /* allocate new list */ + dsd_ptr->dsd_addr = next_dsd = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + + if (!next_dsd) { + /* + * Need to cleanup only this dsd_ptr, rest + * will be done by sp_free_dma() + */ + kfree(dsd_ptr); + return 1; + } + + if (sp) { + list_add_tail(&dsd_ptr->list, + &sp->u.scmd.crc_ctx->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + *tc->ctx_dsd_alloced = 1; + } + + /* add new list to cmd iocb or last list */ + put_unaligned_le64(dsd_ptr->dsd_list_dma, + &cur_dsd->address); + cur_dsd->length = cpu_to_le32(dsd_list_len); + cur_dsd = next_dsd; + } + append_dsd64(&cur_dsd, sg); + avail_dsds--; + + } + /* Null termination */ + cur_dsd->address = 0; + cur_dsd->length = 0; + cur_dsd++; + return 0; +} + +int +qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, + struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) +{ + struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; + struct scatterlist *sg, *sgl; + struct crc_context *difctx = NULL; + struct scsi_qla_host *vha; + uint dsd_list_len; + uint avail_dsds = 0; + uint used_dsds = tot_dsds; + bool dif_local_dma_alloc = false; + bool direction_to_device = false; + int i; + + if (sp) { + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + + sgl = scsi_prot_sglist(cmd); + vha = sp->vha; + difctx = sp->u.scmd.crc_ctx; + direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, + "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", + __func__, cmd, difctx, sp); + } else if (tc) { + vha = tc->vha; + sgl = tc->prot_sg; + difctx = tc->ctx; + direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; + } else { + BUG(); + return 1; + } + + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, + "%s: enter (write=%u)\n", __func__, direction_to_device); + + /* if initiator doing write or target doing read */ + if (direction_to_device) { + for_each_sg(sgl, sg, tot_dsds, i) { + u64 sle_phys = sg_phys(sg); + + /* If SGE addr + len flips bits in upper 32-bits */ + if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, + "%s: page boundary crossing (phys=%llx len=%x)\n", + __func__, sle_phys, sg->length); + + if (difctx) { + ha->dif_bundle_crossed_pages++; + dif_local_dma_alloc = true; + } else { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, + vha, 0xe022, + "%s: difctx pointer is NULL\n", + __func__); + } + break; + } + } + ha->dif_bundle_writes++; + } else { + ha->dif_bundle_reads++; + } + + if (ql2xdifbundlinginternalbuffers) + dif_local_dma_alloc = direction_to_device; + + if (dif_local_dma_alloc) { + u32 track_difbundl_buf = 0; + u32 ldma_sg_len = 0; + u8 ldma_needed = 1; + + difctx->no_dif_bundl = 0; + difctx->dif_bundl_len = 0; + + /* Track DSD buffers */ + INIT_LIST_HEAD(&difctx->ldif_dsd_list); + /* Track local DMA buffers */ + INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); + + for_each_sg(sgl, sg, tot_dsds, i) { + u32 sglen = sg_dma_len(sg); + + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, + "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", + __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, + difctx->dif_bundl_len, ldma_needed); + + while (sglen) { + u32 xfrlen = 0; + + if (ldma_needed) { + /* + * Allocate list item to store + * the DMA buffers + */ + dsd_ptr = kzalloc(sizeof(*dsd_ptr), + GFP_ATOMIC); + if (!dsd_ptr) { + ql_dbg(ql_dbg_tgt, vha, 0xe024, + "%s: failed alloc dsd_ptr\n", + __func__); + return 1; + } + ha->dif_bundle_kallocs++; + + /* allocate dma buffer */ + dsd_ptr->dsd_addr = dma_pool_alloc + (ha->dif_bundl_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + ql_dbg(ql_dbg_tgt, vha, 0xe024, + "%s: failed alloc ->dsd_ptr\n", + __func__); + /* + * need to cleanup only this + * dsd_ptr rest will be done + * by sp_free_dma() + */ + kfree(dsd_ptr); + ha->dif_bundle_kallocs--; + return 1; + } + ha->dif_bundle_dma_allocs++; + ldma_needed = 0; + difctx->no_dif_bundl++; + list_add_tail(&dsd_ptr->list, + &difctx->ldif_dma_hndl_list); + } + + /* xfrlen is min of dma pool size and sglen */ + xfrlen = (sglen > + (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? + DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : + sglen; + + /* replace with local allocated dma buffer */ + sg_pcopy_to_buffer(sgl, sg_nents(sgl), + dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, + difctx->dif_bundl_len); + difctx->dif_bundl_len += xfrlen; + sglen -= xfrlen; + ldma_sg_len += xfrlen; + if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || + sg_is_last(sg)) { + ldma_needed = 1; + ldma_sg_len = 0; + } + } + } + + track_difbundl_buf = used_dsds = difctx->no_dif_bundl; + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, + "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", + difctx->dif_bundl_len, difctx->no_dif_bundl, + track_difbundl_buf); + + if (sp) + sp->flags |= SRB_DIF_BUNDL_DMA_VALID; + else + tc->prot_flags = DIF_BUNDL_DMA_VALID; + + list_for_each_entry_safe(dif_dsd, nxt_dsd, + &difctx->ldif_dma_hndl_list, list) { + u32 sglen = (difctx->dif_bundl_len > + DIF_BUNDLING_DMA_POOL_SIZE) ? + DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; + + BUG_ON(track_difbundl_buf == 0); + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, + 0xe024, + "%s: adding continuation iocb's\n", + __func__); + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : used_dsds; + dsd_list_len = (avail_dsds + 1) * 12; + used_dsds -= avail_dsds; + + /* allocate tracking DS */ + dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); + if (!dsd_ptr) { + ql_dbg(ql_dbg_tgt, vha, 0xe026, + "%s: failed alloc dsd_ptr\n", + __func__); + return 1; + } + ha->dif_bundle_kallocs++; + + difctx->no_ldif_dsd++; + /* allocate new list */ + dsd_ptr->dsd_addr = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + ql_dbg(ql_dbg_tgt, vha, 0xe026, + "%s: failed alloc ->dsd_addr\n", + __func__); + /* + * need to cleanup only this dsd_ptr + * rest will be done by sp_free_dma() + */ + kfree(dsd_ptr); + ha->dif_bundle_kallocs--; + return 1; + } + ha->dif_bundle_dma_allocs++; + + if (sp) { + list_add_tail(&dsd_ptr->list, + &difctx->ldif_dsd_list); + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &difctx->ldif_dsd_list); + tc->ctx_dsd_alloced = 1; + } + + /* add new list to cmd iocb or last list */ + put_unaligned_le64(dsd_ptr->dsd_list_dma, + &cur_dsd->address); + cur_dsd->length = cpu_to_le32(dsd_list_len); + cur_dsd = dsd_ptr->dsd_addr; + } + put_unaligned_le64(dif_dsd->dsd_list_dma, + &cur_dsd->address); + cur_dsd->length = cpu_to_le32(sglen); + cur_dsd++; + avail_dsds--; + difctx->dif_bundl_len -= sglen; + track_difbundl_buf--; + } + + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, + "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, + difctx->no_ldif_dsd, difctx->no_dif_bundl); + } else { + for_each_sg(sgl, sg, tot_dsds, i) { + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : used_dsds; + dsd_list_len = (avail_dsds + 1) * 12; + used_dsds -= avail_dsds; + + /* allocate tracking DS */ + dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); + if (!dsd_ptr) { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, + vha, 0xe027, + "%s: failed alloc dsd_dma...\n", + __func__); + return 1; + } + + /* allocate new list */ + dsd_ptr->dsd_addr = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + /* need to cleanup only this dsd_ptr */ + /* rest will be done by sp_free_dma() */ + kfree(dsd_ptr); + return 1; + } + + if (sp) { + list_add_tail(&dsd_ptr->list, + &difctx->dsd_list); + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &difctx->dsd_list); + tc->ctx_dsd_alloced = 1; + } + + /* add new list to cmd iocb or last list */ + put_unaligned_le64(dsd_ptr->dsd_list_dma, + &cur_dsd->address); + cur_dsd->length = cpu_to_le32(dsd_list_len); + cur_dsd = dsd_ptr->dsd_addr; + } + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } + } + /* Null termination */ + cur_dsd->address = 0; + cur_dsd->length = 0; + cur_dsd++; + return 0; +} + +/** + * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command + * Type 6 IOCB types. + * + * @sp: SRB command to process + * @cmd_pkt: Command type 3 IOCB + * @tot_dsds: Total number of segments to transfer + * @tot_prot_dsds: Total number of segments with protection information + * @fw_prot_opts: Protection options to be passed to firmware + */ +static inline int +qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, + uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) +{ + struct dsd64 *cur_dsd; + __be32 *fcp_dl; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + uint32_t total_bytes = 0; + uint32_t data_bytes; + uint32_t dif_bytes; + uint8_t bundling = 1; + uint16_t blk_size; + struct crc_context *crc_ctx_pkt = NULL; + struct qla_hw_data *ha; + uint8_t additional_fcpcdb_len; + uint16_t fcp_cmnd_len; + struct fcp_cmnd *fcp_cmnd; + dma_addr_t crc_ctx_dma; + + cmd = GET_CMD_SP(sp); + + /* Update entry type to indicate Command Type CRC_2 IOCB */ + put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); + + vha = sp->vha; + ha = vha->hw; + + /* No data transfer */ + data_bytes = scsi_bufflen(cmd); + if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + return QLA_SUCCESS; + } + + cmd_pkt->vp_index = sp->vha->vp_idx; + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + cmd_pkt->control_flags = + cpu_to_le16(CF_WRITE_DATA); + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + cmd_pkt->control_flags = + cpu_to_le16(CF_READ_DATA); + } + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || + (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) + bundling = 0; + + /* Allocate CRC context from global pool */ + crc_ctx_pkt = sp->u.scmd.crc_ctx = + dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); + + if (!crc_ctx_pkt) + goto crc_queuing_error; + + crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; + + sp->flags |= SRB_CRC_CTX_DMA_VALID; + + /* Set handle */ + crc_ctx_pkt->handle = cmd_pkt->handle; + + INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); + + qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) + &crc_ctx_pkt->ref_tag, tot_prot_dsds); + + put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); + cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); + + /* Determine SCSI command length -- align to 4 byte boundary */ + if (cmd->cmd_len > 16) { + additional_fcpcdb_len = cmd->cmd_len - 16; + if ((cmd->cmd_len % 4) != 0) { + /* SCSI cmd > 16 bytes must be multiple of 4 */ + goto crc_queuing_error; + } + fcp_cmnd_len = 12 + cmd->cmd_len + 4; + } else { + additional_fcpcdb_len = 0; + fcp_cmnd_len = 12 + 16 + 4; + } + + fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; + + fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; + if (cmd->sc_data_direction == DMA_TO_DEVICE) + fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + fcp_cmnd->additional_cdb_len |= 2; + + int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); + memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); + cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); + put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, + &cmd_pkt->fcp_cmnd_dseg_address); + fcp_cmnd->task_management = 0; + fcp_cmnd->task_attribute = TSK_SIMPLE; + + cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ + + /* Compute dif len and adjust data len to incude protection */ + dif_bytes = 0; + blk_size = cmd->device->sector_size; + dif_bytes = (data_bytes / blk_size) * 8; + + switch (scsi_get_prot_op(GET_CMD_SP(sp))) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + total_bytes = data_bytes; + data_bytes += dif_bytes; + break; + + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + total_bytes = data_bytes + dif_bytes; + break; + default: + BUG(); + } + + if (!qla2x00_hba_err_chk_enabled(sp)) + fw_prot_opts |= 0x10; /* Disable Guard tag checking */ + /* HBA error checking enabled */ + else if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) + || (scsi_get_prot_type(GET_CMD_SP(sp)) == + SCSI_PROT_DIF_TYPE2)) + fw_prot_opts |= BIT_10; + else if (scsi_get_prot_type(GET_CMD_SP(sp)) == + SCSI_PROT_DIF_TYPE3) + fw_prot_opts |= BIT_11; + } + + if (!bundling) { + cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; + } else { + /* + * Configure Bundling if we need to fetch interlaving + * protection PCI accesses + */ + fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; + crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); + crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - + tot_prot_dsds); + cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; + } + + /* Finish the common fields of CRC pkt */ + crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); + crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); + crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); + crc_ctx_pkt->guard_seed = cpu_to_le16(0); + /* Fibre channel byte count */ + cmd_pkt->byte_count = cpu_to_le32(total_bytes); + fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + + additional_fcpcdb_len); + *fcp_dl = htonl(total_bytes); + + if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + return QLA_SUCCESS; + } + /* Walks data segments */ + + cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); + + if (!bundling && tot_prot_dsds) { + if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, + cur_dsd, tot_dsds, NULL)) + goto crc_queuing_error; + } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, + (tot_dsds - tot_prot_dsds), NULL)) + goto crc_queuing_error; + + if (bundling && tot_prot_dsds) { + /* Walks dif segments */ + cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); + cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; + if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, + tot_prot_dsds, NULL)) + goto crc_queuing_error; + } + return QLA_SUCCESS; + +crc_queuing_error: + /* Cleanup will be performed by the caller */ + + return QLA_FUNCTION_FAILED; +} + +/** + * qla24xx_start_scsi() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qla24xx_start_scsi(srb_t *sp) +{ + int nseg; + unsigned long flags; + uint32_t *clr_ptr; + uint32_t handle; + struct cmd_type_7 *cmd_pkt; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct req_que *req = NULL; + struct rsp_que *rsp; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + + if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) + return qla28xx_start_scsi_edif(sp); + + /* Setup device pointers. */ + req = vha->req; + rsp = req->rsp; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else + nseg = 0; + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + if (qla_get_fw_resources(sp->qpair, &sp->iores)) + goto queuing_error; + + if (req->cnt < (req_cnt + 2)) { + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* Zero out remaining portion of packet. */ + /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->vha->vp_idx; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + cmd_pkt->task = TSK_SIMPLE; + + /* Load SCSI command packet. */ + memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); + host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); + + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + + /* Build IOCB segments */ + qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + wmb(); + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->qpair->cmd_cnt++; + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_FUNCTION_FAILED; +} + +/** + * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qla24xx_dif_start_scsi(srb_t *sp) +{ + int nseg; + unsigned long flags; + uint32_t *clr_ptr; + uint32_t handle; + uint16_t cnt; + uint16_t req_cnt = 0; + uint16_t tot_dsds; + uint16_t tot_prot_dsds; + uint16_t fw_prot_opts = 0; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_crc_2 *cmd_pkt; + uint32_t status = 0; + +#define QDSS_GOT_Q_SPACE BIT_0 + + /* Only process protection or >16 cdb in this routine */ + if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { + if (cmd->cmd_len <= 16) + return qla24xx_start_scsi(sp); + else + return qla_start_scsi_type6(sp); + } + + /* Setup device pointers. */ + req = vha->req; + rsp = req->rsp; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Compute number of required data segments */ + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + else + sp->flags |= SRB_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + struct qla2_sgx sgx; + uint32_t partial; + + memset(&sgx, 0, sizeof(struct qla2_sgx)); + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + nseg = 0; + while (qla24xx_get_one_block_sg( + cmd->device->sector_size, &sgx, &partial)) + nseg++; + } + } else + nseg = 0; + + /* number of required data segments */ + tot_dsds = nseg; + + /* Compute number of required protection segments */ + if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), + scsi_prot_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + else + sp->flags |= SRB_CRC_PROT_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + nseg = scsi_bufflen(cmd) / cmd->device->sector_size; + } + } else { + nseg = 0; + } + + req_cnt = 1; + /* Total Data and protection sg segment(s) */ + tot_prot_dsds = nseg; + tot_dsds += nseg; + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (qla_get_fw_resources(sp->qpair, &sp->iores)) + goto queuing_error; + + if (req->cnt < (req_cnt + 2)) { + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + status |= QDSS_GOT_Q_SPACE; + + /* Build header part of command packet (excluding the OPCODE). */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + /* Fill-in common area */ + cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* Total Data and protection segment(s) */ + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Build IOCB segments and adjust for data protection segments */ + if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) + req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != + QLA_SUCCESS) + goto queuing_error; + + cmd_pkt->entry_count = (uint8_t)req_cnt; + /* Specify response queue number where completion should happen */ + cmd_pkt->entry_status = (uint8_t) rsp->id; + cmd_pkt->timeout = cpu_to_le16(0); + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->qpair->cmd_cnt++; + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; + +queuing_error: + if (status & QDSS_GOT_Q_SPACE) { + req->outstanding_cmds[handle] = NULL; + req->cnt += req_cnt; + } + /* Cleanup will be performed by the caller (queuecommand) */ + + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_FUNCTION_FAILED; +} + +/** + * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +static int +qla2xxx_start_scsi_mq(srb_t *sp) +{ + int nseg; + unsigned long flags; + uint32_t *clr_ptr; + uint32_t handle; + struct cmd_type_7 *cmd_pkt; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct req_que *req = NULL; + struct rsp_que *rsp; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair = sp->qpair; + + if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) + return qla28xx_start_scsi_edif(sp); + + /* Acquire qpair specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + + /* Setup qpair pointers */ + req = qpair->req; + rsp = qpair->rsp; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + } + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else + nseg = 0; + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + if (qla_get_fw_resources(sp->qpair, &sp->iores)) + goto queuing_error; + + if (req->cnt < (req_cnt + 2)) { + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* Zero out remaining portion of packet. */ + /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + cmd_pkt->task = TSK_SIMPLE; + + /* Load SCSI command packet. */ + memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); + host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); + + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + + /* Build IOCB segments */ + qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + wmb(); + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->qpair->cmd_cnt++; + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_SUCCESS; + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return QLA_FUNCTION_FAILED; +} + + +/** + * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qla2xxx_dif_start_scsi_mq(srb_t *sp) +{ + int nseg; + unsigned long flags; + uint32_t *clr_ptr; + uint32_t handle; + uint16_t cnt; + uint16_t req_cnt = 0; + uint16_t tot_dsds; + uint16_t tot_prot_dsds; + uint16_t fw_prot_opts = 0; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_crc_2 *cmd_pkt; + uint32_t status = 0; + struct qla_qpair *qpair = sp->qpair; + +#define QDSS_GOT_Q_SPACE BIT_0 + + /* Check for host side state */ + if (!qpair->online) { + cmd->result = DID_NO_CONNECT << 16; + return QLA_INTERFACE_ERROR; + } + + if (!qpair->difdix_supported && + scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + cmd->result = DID_NO_CONNECT << 16; + return QLA_INTERFACE_ERROR; + } + + /* Only process protection or >16 cdb in this routine */ + if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { + if (cmd->cmd_len <= 16) + return qla2xxx_start_scsi_mq(sp); + else + return qla_start_scsi_type6(sp); + } + + spin_lock_irqsave(&qpair->qp_lock, flags); + + /* Setup qpair pointers */ + rsp = qpair->rsp; + req = qpair->req; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + } + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Compute number of required data segments */ + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + else + sp->flags |= SRB_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + struct qla2_sgx sgx; + uint32_t partial; + + memset(&sgx, 0, sizeof(struct qla2_sgx)); + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + nseg = 0; + while (qla24xx_get_one_block_sg( + cmd->device->sector_size, &sgx, &partial)) + nseg++; + } + } else + nseg = 0; + + /* number of required data segments */ + tot_dsds = nseg; + + /* Compute number of required protection segments */ + if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), + scsi_prot_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + else + sp->flags |= SRB_CRC_PROT_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + nseg = scsi_bufflen(cmd) / cmd->device->sector_size; + } + } else { + nseg = 0; + } + + req_cnt = 1; + /* Total Data and protection sg segment(s) */ + tot_prot_dsds = nseg; + tot_dsds += nseg; + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (qla_get_fw_resources(sp->qpair, &sp->iores)) + goto queuing_error; + + if (req->cnt < (req_cnt + 2)) { + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + status |= QDSS_GOT_Q_SPACE; + + /* Build header part of command packet (excluding the OPCODE). */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + /* Fill-in common area */ + cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* Total Data and protection segment(s) */ + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Build IOCB segments and adjust for data protection segments */ + if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) + req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != + QLA_SUCCESS) + goto queuing_error; + + cmd_pkt->entry_count = (uint8_t)req_cnt; + cmd_pkt->timeout = cpu_to_le16(0); + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->qpair->cmd_cnt++; + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return QLA_SUCCESS; + +queuing_error: + if (status & QDSS_GOT_Q_SPACE) { + req->outstanding_cmds[handle] = NULL; + req->cnt += req_cnt; + } + /* Cleanup will be performed by the caller (queuecommand) */ + + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return QLA_FUNCTION_FAILED; +} + +/* Generic Control-SRB manipulation functions. */ + +/* hardware_lock assumed to be held. */ + +void * +__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) +{ + scsi_qla_host_t *vha = qpair->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = qpair->req; + device_reg_t *reg = ISP_QUE_REG(ha, req->id); + uint32_t handle; + request_t *pkt; + uint16_t cnt, req_cnt; + + pkt = NULL; + req_cnt = 1; + handle = 0; + + if (sp && (sp->type != SRB_SCSI_CMD)) { + /* Adjust entry-counts as needed. */ + req_cnt = sp->iocbs; + } + + /* Check for room on request queue. */ + if (req->cnt < req_cnt + 2) { + if (qpair->use_shadow_reg) + cnt = *req->out_ptr; + else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) + cnt = rd_reg_dword(®->isp25mq.req_q_out); + else if (IS_P3P_TYPE(ha)) + cnt = rd_reg_dword(reg->isp82.req_q_out); + else if (IS_FWI2_CAPABLE(ha)) + cnt = rd_reg_dword(®->isp24.req_q_out); + else if (IS_QLAFX00(ha)) + cnt = rd_reg_dword(®->ispfx00.req_q_out); + else + cnt = qla2x00_debounce_register( + ISP_REQ_Q_OUT(ha, ®->isp)); + + if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { + qla_schedule_eeh_work(vha); + return NULL; + } + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + } + if (req->cnt < req_cnt + 2) + goto queuing_error; + + if (sp) { + handle = qla2xxx_get_next_handle(req); + if (handle == 0) { + ql_log(ql_log_warn, vha, 0x700b, + "No room on outstanding cmd array.\n"); + goto queuing_error; + } + + /* Prep command array. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + } + + /* Prep packet */ + req->cnt -= req_cnt; + pkt = req->ring_ptr; + memset(pkt, 0, REQUEST_ENTRY_SIZE); + if (IS_QLAFX00(ha)) { + wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); + wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); + } else { + pkt->entry_count = req_cnt; + pkt->handle = handle; + } + + return pkt; + +queuing_error: + qpair->tgt_counters.num_alloc_iocb_failed++; + return pkt; +} + +void * +qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) +{ + scsi_qla_host_t *vha = qpair->vha; + + if (qla2x00_reset_active(vha)) + return NULL; + + return __qla2x00_alloc_iocbs(qpair, sp); +} + +void * +qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) +{ + return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); +} + +static void +qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + struct srb_iocb *lio = &sp->u.iocb_cmd; + + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); + if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { + logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); + if (sp->vha->flags.nvme_first_burst) + logio->io_parameter[0] = + cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); + if (sp->vha->flags.nvme2_enabled) { + /* Set service parameter BIT_7 for NVME CONF support */ + logio->io_parameter[0] |= + cpu_to_le32(NVME_PRLI_SP_CONF); + /* Set service parameter BIT_8 for SLER support */ + logio->io_parameter[0] |= + cpu_to_le32(NVME_PRLI_SP_SLER); + /* Set service parameter BIT_9 for PI control support */ + logio->io_parameter[0] |= + cpu_to_le32(NVME_PRLI_SP_PI_CTRL); + } + } + + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; + logio->port_id[2] = sp->fcport->d_id.b.domain; + logio->vp_index = sp->vha->vp_idx; +} + +static void +qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + struct srb_iocb *lio = &sp->u.iocb_cmd; + + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); + + if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { + logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); + } else { + logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); + if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) + logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); + if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) + logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); + if (lio->u.logio.flags & SRB_LOGIN_FCSP) { + logio->control_flags |= + cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI); + logio->io_parameter[0] = + cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO); + } + } + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; + logio->port_id[2] = sp->fcport->d_id.b.domain; + logio->vp_index = sp->vha->vp_idx; +} + +static void +qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) +{ + struct qla_hw_data *ha = sp->vha->hw; + struct srb_iocb *lio = &sp->u.iocb_cmd; + uint16_t opts; + + mbx->entry_type = MBX_IOCB_TYPE; + SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); + mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); + opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; + opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; + if (HAS_EXTENDED_IDS(ha)) { + mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); + mbx->mb10 = cpu_to_le16(opts); + } else { + mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); + } + mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); + mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | + sp->fcport->d_id.b.al_pa); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); +} + +static void +qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + u16 control_flags = LCF_COMMAND_LOGO; + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + + if (sp->fcport->explicit_logout) { + control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; + } else { + control_flags |= LCF_IMPL_LOGO; + + if (!sp->fcport->keep_nport_handle) + control_flags |= LCF_FREE_NPORT; + } + + logio->control_flags = cpu_to_le16(control_flags); + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; + logio->port_id[2] = sp->fcport->d_id.b.domain; + logio->vp_index = sp->vha->vp_idx; +} + +static void +qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) +{ + struct qla_hw_data *ha = sp->vha->hw; + + mbx->entry_type = MBX_IOCB_TYPE; + SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); + mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); + mbx->mb1 = HAS_EXTENDED_IDS(ha) ? + cpu_to_le16(sp->fcport->loop_id) : + cpu_to_le16(sp->fcport->loop_id << 8); + mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); + mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | + sp->fcport->d_id.b.al_pa); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); + /* Implicit: mbx->mbx10 = 0. */ +} + +static void +qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->vp_index = sp->vha->vp_idx; +} + +static void +qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) +{ + struct qla_hw_data *ha = sp->vha->hw; + + mbx->entry_type = MBX_IOCB_TYPE; + SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); + mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); + if (HAS_EXTENDED_IDS(ha)) { + mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); + mbx->mb10 = cpu_to_le16(BIT_0); + } else { + mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); + } + mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); + mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); + mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); + mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); +} + +static void +qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) +{ + uint32_t flags; + uint64_t lun; + struct fc_port *fcport = sp->fcport; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct srb_iocb *iocb = &sp->u.iocb_cmd; + struct req_que *req = sp->qpair->req; + + flags = iocb->u.tmf.flags; + lun = iocb->u.tmf.lun; + + tsk->entry_type = TSK_MGMT_IOCB_TYPE; + tsk->entry_count = 1; + tsk->handle = make_handle(req->id, tsk->handle); + tsk->nport_handle = cpu_to_le16(fcport->loop_id); + tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); + tsk->control_flags = cpu_to_le32(flags); + tsk->port_id[0] = fcport->d_id.b.al_pa; + tsk->port_id[1] = fcport->d_id.b.area; + tsk->port_id[2] = fcport->d_id.b.domain; + tsk->vp_index = fcport->vha->vp_idx; + + if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET| + TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { + int_to_scsilun(lun, &tsk->lun); + host_to_fcp_swap((uint8_t *)&tsk->lun, + sizeof(tsk->lun)); + } +} + +static void +qla2x00_async_done(struct srb *sp, int res) +{ + if (del_timer(&sp->u.iocb_cmd.timer)) { + /* + * Successfully cancelled the timeout handler + * ref: TMR + */ + if (kref_put(&sp->cmd_kref, qla2x00_sp_release)) + return; + } + sp->async_done(sp, res); +} + +void +qla2x00_sp_release(struct kref *kref) +{ + struct srb *sp = container_of(kref, struct srb, cmd_kref); + + sp->free(sp); +} + +void +qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, + void (*done)(struct srb *sp, int res)) +{ + timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); + sp->done = qla2x00_async_done; + sp->async_done = done; + sp->free = qla2x00_sp_free; + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; + if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) + init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); + sp->start_timer = 1; +} + +static void qla2x00_els_dcmd_sp_free(srb_t *sp) +{ + struct srb_iocb *elsio = &sp->u.iocb_cmd; + + kfree(sp->fcport); + + if (elsio->u.els_logo.els_logo_pyld) + dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, + elsio->u.els_logo.els_logo_pyld, + elsio->u.els_logo.els_logo_pyld_dma); + + del_timer(&elsio->timer); + qla2x00_rel_sp(sp); +} + +static void +qla2x00_els_dcmd_iocb_timeout(void *data) +{ + srb_t *sp = data; + fc_port_t *fcport = sp->fcport; + struct scsi_qla_host *vha = sp->vha; + struct srb_iocb *lio = &sp->u.iocb_cmd; + unsigned long flags = 0; + int res, h; + + ql_dbg(ql_dbg_io, vha, 0x3069, + "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", + sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + + /* Abort the exchange */ + res = qla24xx_async_abort_cmd(sp, false); + if (res) { + ql_dbg(ql_dbg_io, vha, 0x3070, + "mbx abort_command failed.\n"); + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + complete(&lio->u.els_logo.comp); + } else { + ql_dbg(ql_dbg_io, vha, 0x3071, + "mbx abort_command success.\n"); + } +} + +static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) +{ + fc_port_t *fcport = sp->fcport; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct scsi_qla_host *vha = sp->vha; + + ql_dbg(ql_dbg_io, vha, 0x3072, + "%s hdl=%x, portid=%02x%02x%02x done\n", + sp->name, sp->handle, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + complete(&lio->u.els_logo.comp); +} + +int +qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, + port_id_t remote_did) +{ + srb_t *sp; + fc_port_t *fcport = NULL; + struct srb_iocb *elsio = NULL; + struct qla_hw_data *ha = vha->hw; + struct els_logo_payload logo_pyld; + int rval = QLA_SUCCESS; + + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); + return -ENOMEM; + } + + /* Alloc SRB structure + * ref: INIT + */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + kfree(fcport); + ql_log(ql_log_info, vha, 0x70e6, + "SRB allocation failed\n"); + return -ENOMEM; + } + + elsio = &sp->u.iocb_cmd; + fcport->loop_id = 0xFFFF; + fcport->d_id.b.domain = remote_did.b.domain; + fcport->d_id.b.area = remote_did.b.area; + fcport->d_id.b.al_pa = remote_did.b.al_pa; + + ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + sp->type = SRB_ELS_DCMD; + sp->name = "ELS_DCMD"; + sp->fcport = fcport; + qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT, + qla2x00_els_dcmd_sp_done); + sp->free = qla2x00_els_dcmd_sp_free; + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout; + init_completion(&sp->u.iocb_cmd.u.els_logo.comp); + + elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, + DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, + GFP_KERNEL); + + if (!elsio->u.els_logo.els_logo_pyld) { + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return QLA_FUNCTION_FAILED; + } + + memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); + + elsio->u.els_logo.els_cmd = els_opcode; + logo_pyld.opcode = els_opcode; + logo_pyld.s_id[0] = vha->d_id.b.al_pa; + logo_pyld.s_id[1] = vha->d_id.b.area; + logo_pyld.s_id[2] = vha->d_id.b.domain; + host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); + memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); + + memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, + sizeof(struct els_logo_payload)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, + elsio->u.els_logo.els_logo_pyld, + sizeof(*elsio->u.els_logo.els_logo_pyld)); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_io, vha, 0x3074, + "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", + sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + wait_for_completion(&elsio->u.els_logo.comp); + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return rval; +} + +static void +qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) +{ + scsi_qla_host_t *vha = sp->vha; + struct srb_iocb *elsio = &sp->u.iocb_cmd; + + els_iocb->entry_type = ELS_IOCB_TYPE; + els_iocb->entry_count = 1; + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = sp->handle; + els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + els_iocb->tx_dsd_count = cpu_to_le16(1); + els_iocb->vp_index = vha->vp_idx; + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = 0; + els_iocb->opcode = elsio->u.els_logo.els_cmd; + + els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; + els_iocb->d_id[1] = sp->fcport->d_id.b.area; + els_iocb->d_id[2] = sp->fcport->d_id.b.domain; + /* For SID the byte order is different than DID */ + els_iocb->s_id[1] = vha->d_id.b.al_pa; + els_iocb->s_id[2] = vha->d_id.b.area; + els_iocb->s_id[0] = vha->d_id.b.domain; + + if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { + if (vha->hw->flags.edif_enabled) + els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN); + else + els_iocb->control_flags = 0; + els_iocb->tx_byte_count = els_iocb->tx_len = + cpu_to_le32(sizeof(struct els_plogi_payload)); + put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, + &els_iocb->tx_address); + els_iocb->rx_dsd_count = cpu_to_le16(1); + els_iocb->rx_byte_count = els_iocb->rx_len = + cpu_to_le32(sizeof(struct els_plogi_payload)); + put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, + &els_iocb->rx_address); + + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, + "PLOGI ELS IOCB:\n"); + ql_dump_buffer(ql_log_info, vha, 0x0109, + (uint8_t *)els_iocb, + sizeof(*els_iocb)); + } else { + els_iocb->tx_byte_count = + cpu_to_le32(sizeof(struct els_logo_payload)); + put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, + &els_iocb->tx_address); + els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); + + els_iocb->rx_byte_count = 0; + els_iocb->rx_address = 0; + els_iocb->rx_len = 0; + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, + "LOGO ELS IOCB:"); + ql_dump_buffer(ql_log_info, vha, 0x010b, + els_iocb, + sizeof(*els_iocb)); + } + + sp->vha->qla_stats.control_requests++; +} + +void +qla2x00_els_dcmd2_iocb_timeout(void *data) +{ + srb_t *sp = data; + fc_port_t *fcport = sp->fcport; + struct scsi_qla_host *vha = sp->vha; + unsigned long flags = 0; + int res, h; + + ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, + "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", + sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); + + /* Abort the exchange */ + res = qla24xx_async_abort_cmd(sp, false); + ql_dbg(ql_dbg_io, vha, 0x3070, + "mbx abort_command %s\n", + (res == QLA_SUCCESS) ? "successful" : "failed"); + if (res) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + sp->done(sp, QLA_FUNCTION_TIMEOUT); + } +} + +void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) +{ + if (els_plogi->els_plogi_pyld) + dma_free_coherent(&vha->hw->pdev->dev, + els_plogi->tx_size, + els_plogi->els_plogi_pyld, + els_plogi->els_plogi_pyld_dma); + + if (els_plogi->els_resp_pyld) + dma_free_coherent(&vha->hw->pdev->dev, + els_plogi->rx_size, + els_plogi->els_resp_pyld, + els_plogi->els_resp_pyld_dma); +} + +static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) +{ + fc_port_t *fcport = sp->fcport; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct scsi_qla_host *vha = sp->vha; + struct event_arg ea; + struct qla_work_evt *e; + struct fc_port *conflict_fcport; + port_id_t cid; /* conflict Nport id */ + const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; + u16 lid; + + ql_dbg(ql_dbg_disc, vha, 0x3072, + "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", + sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); + + fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); + /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/ + fcport->logout_on_delete = 1; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; + + if (sp->flags & SRB_WAKEUP_ON_COMP) + complete(&lio->u.els_plogi.comp); + else { + switch (le32_to_cpu(fw_status[0])) { + case CS_DATA_UNDERRUN: + case CS_COMPLETE: + memset(&ea, 0, sizeof(ea)); + ea.fcport = fcport; + ea.rc = res; + qla_handle_els_plogi_done(vha, &ea); + break; + + case CS_IOCB_ERROR: + switch (le32_to_cpu(fw_status[1])) { + case LSC_SCODE_PORTID_USED: + lid = le32_to_cpu(fw_status[2]) & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(fcport->port_name), + fcport->d_id, lid, &conflict_fcport); + if (conflict_fcport) { + /* + * Another fcport shares the same + * loop_id & nport id; conflict + * fcport needs to finish cleanup + * before this fcport can proceed + * to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x.\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x sched del\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + qla2x00_clear_loop_id(fcport); + set_bit(lid, vha->hw->loop_id_map); + fcport->loop_id = lid; + fcport->keep_nport_handle = 0; + qlt_schedule_sess_for_deletion(fcport); + } + break; + + case LSC_SCODE_NPORT_USED: + cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) + & 0xff; + cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) + & 0xff; + cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0x20ec, + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", + __func__, __LINE__, fcport->port_name, + fcport->loop_id, cid.b24); + set_bit(fcport->loop_id, + vha->hw->loop_id_map); + fcport->loop_id = FC_NO_LOOP_ID; + qla24xx_post_gnl_work(vha, fcport); + break; + + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xd046, + "Exchange starvation. Resetting RISC\n"); + vha->hw->exch_starvation = 0; + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + break; + } + fallthrough; + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + fcport->flags &= ~FCF_ASYNC_SENT; + qlt_schedule_sess_for_deletion(fcport); + break; + } + break; + + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + qlt_schedule_sess_for_deletion(fcport); + break; + } + + e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); + if (!e) { + struct srb_iocb *elsio = &sp->u.iocb_cmd; + + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return; + } + e->u.iosb.sp = sp; + qla2x00_post_work(vha, e); + } +} + +int +qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, + fc_port_t *fcport, bool wait) +{ + srb_t *sp; + struct srb_iocb *elsio = NULL; + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS; + void *ptr, *resp_ptr; + + /* Alloc SRB structure + * ref: INIT + */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_log(ql_log_info, vha, 0x70e6, + "SRB allocation failed\n"); + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return -ENOMEM; + } + + fcport->flags |= FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); + elsio = &sp->u.iocb_cmd; + ql_dbg(ql_dbg_io, vha, 0x3073, + "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24); + + if (wait) + sp->flags = SRB_WAKEUP_ON_COMP; + + sp->type = SRB_ELS_DCMD; + sp->name = "ELS_DCMD"; + sp->fcport = fcport; + qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2, + qla2x00_els_dcmd2_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; + + elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; + + ptr = elsio->u.els_plogi.els_plogi_pyld = + dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size, + &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); + + if (!elsio->u.els_plogi.els_plogi_pyld) { + rval = QLA_FUNCTION_FAILED; + goto out; + } + + resp_ptr = elsio->u.els_plogi.els_resp_pyld = + dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size, + &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); + + if (!elsio->u.els_plogi.els_resp_pyld) { + rval = QLA_FUNCTION_FAILED; + goto out; + } + + ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); + + memset(ptr, 0, sizeof(struct els_plogi_payload)); + memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); + memcpy(elsio->u.els_plogi.els_plogi_pyld->data, + (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp), + sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp)); + + elsio->u.els_plogi.els_cmd = els_opcode; + elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; + + if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { + struct fc_els_flogi *p = ptr; + + p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); + } + + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, + (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, + sizeof(*elsio->u.els_plogi.els_plogi_pyld)); + + init_completion(&elsio->u.els_plogi.comp); + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + } else { + ql_dbg(ql_dbg_disc, vha, 0x3074, + "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", + sp->name, sp->handle, fcport->loop_id, + fcport->d_id.b24, vha->d_id.b24); + } + + if (wait) { + wait_for_completion(&elsio->u.els_plogi.comp); + + if (elsio->u.els_plogi.comp_status != CS_COMPLETE) + rval = QLA_FUNCTION_FAILED; + } else { + goto done; + } + +out: + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +/* it is assume qpair lock is held */ +void qla_els_pt_iocb(struct scsi_qla_host *vha, + struct els_entry_24xx *els_iocb, + struct qla_els_pt_arg *a) +{ + els_iocb->entry_type = ELS_IOCB_TYPE; + els_iocb->entry_count = 1; + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = QLA_SKIP_HANDLE; + els_iocb->nport_handle = a->nport_handle; + els_iocb->rx_xchg_address = a->rx_xchg_address; + els_iocb->tx_dsd_count = cpu_to_le16(1); + els_iocb->vp_index = a->vp_idx; + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = cpu_to_le16(0); + els_iocb->opcode = a->els_opcode; + + els_iocb->d_id[0] = a->did.b.al_pa; + els_iocb->d_id[1] = a->did.b.area; + els_iocb->d_id[2] = a->did.b.domain; + /* For SID the byte order is different than DID */ + els_iocb->s_id[1] = vha->d_id.b.al_pa; + els_iocb->s_id[2] = vha->d_id.b.area; + els_iocb->s_id[0] = vha->d_id.b.domain; + + els_iocb->control_flags = cpu_to_le16(a->control_flags); + + els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); + els_iocb->tx_len = cpu_to_le32(a->tx_len); + put_unaligned_le64(a->tx_addr, &els_iocb->tx_address); + + els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count); + els_iocb->rx_len = cpu_to_le32(a->rx_len); + put_unaligned_le64(a->rx_addr, &els_iocb->rx_address); +} + +static void +qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) +{ + struct bsg_job *bsg_job = sp->u.bsg_job; + struct fc_bsg_request *bsg_request = bsg_job->request; + + els_iocb->entry_type = ELS_IOCB_TYPE; + els_iocb->entry_count = 1; + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = sp->handle; + els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); + els_iocb->vp_index = sp->vha->vp_idx; + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); + + els_iocb->opcode = + sp->type == SRB_ELS_CMD_RPT ? + bsg_request->rqst_data.r_els.els_code : + bsg_request->rqst_data.h_els.command_code; + els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; + els_iocb->d_id[1] = sp->fcport->d_id.b.area; + els_iocb->d_id[2] = sp->fcport->d_id.b.domain; + els_iocb->control_flags = 0; + els_iocb->rx_byte_count = + cpu_to_le32(bsg_job->reply_payload.payload_len); + els_iocb->tx_byte_count = + cpu_to_le32(bsg_job->request_payload.payload_len); + + put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), + &els_iocb->tx_address); + els_iocb->tx_len = cpu_to_le32(sg_dma_len + (bsg_job->request_payload.sg_list)); + + put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), + &els_iocb->rx_address); + els_iocb->rx_len = cpu_to_le32(sg_dma_len + (bsg_job->reply_payload.sg_list)); + + sp->vha->qla_stats.control_requests++; +} + +static void +qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) +{ + uint16_t avail_dsds; + struct dsd64 *cur_dsd; + struct scatterlist *sg; + int index; + uint16_t tot_dsds; + scsi_qla_host_t *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct bsg_job *bsg_job = sp->u.bsg_job; + int entry_count = 1; + + memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); + ct_iocb->entry_type = CT_IOCB_TYPE; + ct_iocb->entry_status = 0; + ct_iocb->handle1 = sp->handle; + SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); + ct_iocb->status = cpu_to_le16(0); + ct_iocb->control_flags = cpu_to_le16(0); + ct_iocb->timeout = 0; + ct_iocb->cmd_dsd_count = + cpu_to_le16(bsg_job->request_payload.sg_cnt); + ct_iocb->total_dsd_count = + cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); + ct_iocb->req_bytecount = + cpu_to_le32(bsg_job->request_payload.payload_len); + ct_iocb->rsp_bytecount = + cpu_to_le32(bsg_job->reply_payload.payload_len); + + put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), + &ct_iocb->req_dsd.address); + ct_iocb->req_dsd.length = ct_iocb->req_bytecount; + + put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), + &ct_iocb->rsp_dsd.address); + ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; + + avail_dsds = 1; + cur_dsd = &ct_iocb->rsp_dsd; + index = 0; + tot_dsds = bsg_job->reply_payload.sg_cnt; + + for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, + vha->hw->req_q_map[0]); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + entry_count++; + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } + ct_iocb->entry_count = entry_count; + + sp->vha->qla_stats.control_requests++; +} + +static void +qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) +{ + uint16_t avail_dsds; + struct dsd64 *cur_dsd; + struct scatterlist *sg; + int index; + uint16_t cmd_dsds, rsp_dsds; + scsi_qla_host_t *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct bsg_job *bsg_job = sp->u.bsg_job; + int entry_count = 1; + cont_a64_entry_t *cont_pkt = NULL; + + ct_iocb->entry_type = CT_IOCB_TYPE; + ct_iocb->entry_status = 0; + ct_iocb->sys_define = 0; + ct_iocb->handle = sp->handle; + + ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + ct_iocb->vp_index = sp->vha->vp_idx; + ct_iocb->comp_status = cpu_to_le16(0); + + cmd_dsds = bsg_job->request_payload.sg_cnt; + rsp_dsds = bsg_job->reply_payload.sg_cnt; + + ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); + ct_iocb->timeout = 0; + ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); + ct_iocb->cmd_byte_count = + cpu_to_le32(bsg_job->request_payload.payload_len); + + avail_dsds = 2; + cur_dsd = ct_iocb->dsd; + index = 0; + + for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb( + vha, ha->req_q_map[0]); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + entry_count++; + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } + + index = 0; + + for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, + ha->req_q_map[0]); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + entry_count++; + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } + ct_iocb->entry_count = entry_count; +} + +/* + * qla82xx_start_scsi() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qla82xx_start_scsi(srb_t *sp) +{ + int nseg; + unsigned long flags; + struct scsi_cmnd *cmd; + uint32_t *clr_ptr; + uint32_t handle; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct device_reg_82xx __iomem *reg; + uint32_t dbval; + __be32 *fcp_dl; + uint8_t additional_cdb_len; + struct ct6_dsd *ctx; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct qla_qpair *qpair = sp->qpair; + + /* Setup device pointers. */ + reg = &ha->iobase->isp82; + cmd = GET_CMD_SP(sp); + req = vha->req; + rsp = ha->rsp_q_map[0]; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + dbval = 0x04 | (ha->portnum << 5); + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, ha->base_qpair, + 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x300c, + "qla2x00_marker failed for cmd=%p.\n", cmd); + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else + nseg = 0; + + tot_dsds = nseg; + + if (tot_dsds > ql2xshiftctondsd) { + struct cmd_type_6 *cmd_pkt; + uint16_t more_dsd_lists = 0; + struct dsd_dma *dsd_ptr; + uint16_t i; + + more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); + if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) { + ql_dbg(ql_dbg_io, vha, 0x300d, + "Num of DSD list %d is than %d for cmd=%p.\n", + more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, + cmd); + goto queuing_error; + } + + if (more_dsd_lists <= qpair->dsd_avail) + goto sufficient_dsds; + else + more_dsd_lists -= qpair->dsd_avail; + + for (i = 0; i < more_dsd_lists; i++) { + dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); + if (!dsd_ptr) { + ql_log(ql_log_fatal, vha, 0x300e, + "Failed to allocate memory for dsd_dma " + "for cmd=%p.\n", cmd); + goto queuing_error; + } + + dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, + GFP_ATOMIC, &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + kfree(dsd_ptr); + ql_log(ql_log_fatal, vha, 0x300f, + "Failed to allocate memory for dsd_addr " + "for cmd=%p.\n", cmd); + goto queuing_error; + } + list_add_tail(&dsd_ptr->list, &qpair->dsd_list); + qpair->dsd_avail++; + } + +sufficient_dsds: + req_cnt = 1; + + if (req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)rd_reg_dword_relaxed( + ®->req_q_out[0]); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + ctx = &sp->u.scmd.ct6_ctx; + + memset(ctx, 0, sizeof(struct ct6_dsd)); + ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, + GFP_ATOMIC, &ctx->fcp_cmnd_dma); + if (!ctx->fcp_cmnd) { + ql_log(ql_log_fatal, vha, 0x3011, + "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); + goto queuing_error; + } + + /* Initialize the DSD list and dma handle */ + INIT_LIST_HEAD(&ctx->dsd_list); + ctx->dsd_use_cnt = 0; + + if (cmd->cmd_len > 16) { + additional_cdb_len = cmd->cmd_len - 16; + if ((cmd->cmd_len % 4) != 0) { + /* SCSI command bigger than 16 bytes must be + * multiple of 4 + */ + ql_log(ql_log_warn, vha, 0x3012, + "scsi cmd len %d not multiple of 4 " + "for cmd=%p.\n", cmd->cmd_len, cmd); + goto queuing_error_fcp_cmnd; + } + ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; + } else { + additional_cdb_len = 0; + ctx->fcp_cmnd_len = 12 + 16 + 4; + } + + cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* Zero out remaining portion of packet. */ + /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->vha->vp_idx; + + /* Build IOCB segments */ + if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) + goto queuing_error_fcp_cmnd; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* build FCP_CMND IU */ + int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 2; + + /* Populate the FCP_PRIO. */ + if (ha->flags.fcp_prio_enabled) + ctx->fcp_cmnd->task_attribute |= + sp->fcport->fcp_prio << 3; + + memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); + + fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + + additional_cdb_len); + *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); + + cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); + put_unaligned_le64(ctx->fcp_cmnd_dma, + &cmd_pkt->fcp_cmnd_dseg_address); + + sp->flags |= SRB_FCP_CMND_DMA_VALID; + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + /* Specify response queue number where + * completion should happen + */ + cmd_pkt->entry_status = (uint8_t) rsp->id; + } else { + struct cmd_type_7 *cmd_pkt; + + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)rd_reg_dword_relaxed( + ®->req_q_out[0]); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + } + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + + cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* Zero out remaining portion of packet. */ + /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->vha->vp_idx; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, + sizeof(cmd_pkt->lun)); + + /* Populate the FCP_PRIO. */ + if (ha->flags.fcp_prio_enabled) + cmd_pkt->task |= sp->fcport->fcp_prio << 3; + + /* Load SCSI command packet. */ + memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); + host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); + + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + + /* Build IOCB segments */ + qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + /* Specify response queue number where + * completion should happen. + */ + cmd_pkt->entry_status = (uint8_t) rsp->id; + + } + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + /* write, read and verify logic */ + dbval = dbval | (req->id << 8) | (req->ring_index << 16); + if (ql2xdbwr) + qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); + else { + wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); + while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { + wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); + } + } + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; + +queuing_error_fcp_cmnd: + dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + if (sp->u.scmd.crc_ctx) { + mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); + sp->u.scmd.crc_ctx = NULL; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_FUNCTION_FAILED; +} + +static void +qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) +{ + struct srb_iocb *aio = &sp->u.iocb_cmd; + scsi_qla_host_t *vha = sp->vha; + struct req_que *req = sp->qpair->req; + srb_t *orig_sp = sp->cmd_sp; + + memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); + abt_iocb->entry_type = ABORT_IOCB_TYPE; + abt_iocb->entry_count = 1; + abt_iocb->handle = make_handle(req->id, sp->handle); + if (sp->fcport) { + abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + abt_iocb->port_id[1] = sp->fcport->d_id.b.area; + abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; + } + abt_iocb->handle_to_abort = + make_handle(le16_to_cpu(aio->u.abt.req_que_no), + aio->u.abt.cmd_hndl); + abt_iocb->vp_index = vha->vp_idx; + abt_iocb->req_que_no = aio->u.abt.req_que_no; + + /* need to pass original sp */ + if (orig_sp) + qla_nvme_abort_set_option(abt_iocb, orig_sp); + + /* Send the command to the firmware */ + wmb(); +} + +static void +qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) +{ + int i, sz; + + mbx->entry_type = MBX_IOCB_TYPE; + mbx->handle = sp->handle; + sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); + + for (i = 0; i < sz; i++) + mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; +} + +static void +qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) +{ + sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; + qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); + ct_pkt->handle = sp->handle; +} + +static void qla2x00_send_notify_ack_iocb(srb_t *sp, + struct nack_to_isp *nack) +{ + struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; + + nack->entry_type = NOTIFY_ACK_TYPE; + nack->entry_count = 1; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.handle = sp->handle; + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.srr_flags = 0; + nack->u.isp24.srr_reject_code = 0; + nack->u.isp24.srr_reject_code_expl = 0; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + if (ntfy->u.isp24.status_subcode == ELS_PLOGI && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) && + sp->vha->hw->flags.edif_enabled) { + ql_dbg(ql_dbg_disc, sp->vha, 0x3074, + "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n", + sp->name, sp->handle, sp->fcport->loop_id, + sp->fcport->d_id.b24); + nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); + } +} + +/* + * Build NVME LS request + */ +static void +qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) +{ + struct srb_iocb *nvme; + + nvme = &sp->u.iocb_cmd; + cmd_pkt->entry_type = PT_LS4_REQUEST; + cmd_pkt->entry_count = 1; + cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + + if (sp->unsol_rsp) { + cmd_pkt->control_flags = + cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT); + cmd_pkt->nport_handle = nvme->u.nvme.nport_handle; + cmd_pkt->exchange_address = nvme->u.nvme.exchange_address; + } else { + cmd_pkt->control_flags = + cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->rx_dseg_count = cpu_to_le16(1); + cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; + cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len; + put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); + } + + cmd_pkt->tx_dseg_count = cpu_to_le16(1); + cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; + cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len; + put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); +} + +static void +qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) +{ + int map, pos; + + vce->entry_type = VP_CTRL_IOCB_TYPE; + vce->handle = sp->handle; + vce->entry_count = 1; + vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); + vce->vp_count = cpu_to_le16(1); + + /* + * index map in firmware starts with 1; decrement index + * this is ok as we never use index 0 + */ + map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; + pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; + vce->vp_idx_map[map] |= 1 << pos; +} + +static void +qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = + cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); + + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; + logio->port_id[2] = sp->fcport->d_id.b.domain; + logio->vp_index = sp->fcport->vha->vp_idx; +} + +static int qla_get_iocbs_resource(struct srb *sp) +{ + bool get_exch; + bool push_it_through = false; + + if (!ql2xenforce_iocb_limit) { + sp->iores.res_type = RESOURCE_NONE; + return 0; + } + sp->iores.res_type = RESOURCE_NONE; + + switch (sp->type) { + case SRB_TM_CMD: + case SRB_PRLI_CMD: + case SRB_ADISC_CMD: + push_it_through = true; + fallthrough; + case SRB_LOGIN_CMD: + case SRB_ELS_CMD_RPT: + case SRB_ELS_CMD_HST: + case SRB_ELS_CMD_HST_NOLOGIN: + case SRB_CT_CMD: + case SRB_NVME_LS: + case SRB_ELS_DCMD: + get_exch = true; + break; + + case SRB_FXIOCB_DCMD: + case SRB_FXIOCB_BCMD: + sp->iores.res_type = RESOURCE_NONE; + return 0; + + case SRB_SA_UPDATE: + case SRB_SA_REPLACE: + case SRB_MB_IOCB: + case SRB_ABT_CMD: + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + case SRB_LOGOUT_CMD: + case SRB_CTRL_VP: + case SRB_MARKER: + default: + push_it_through = true; + get_exch = false; + } + + sp->iores.res_type |= RESOURCE_IOCB; + sp->iores.iocb_cnt = 1; + if (get_exch) { + sp->iores.res_type |= RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + } + if (push_it_through) + sp->iores.res_type |= RESOURCE_FORCE; + + return qla_get_fw_resources(sp->qpair, &sp->iores); +} + +static void +qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) +{ + mrk->entry_type = MARKER_TYPE; + mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier; + mrk->handle = make_handle(sp->qpair->req->id, sp->handle); + if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) { + mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id); + int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun); + host_to_fcp_swap(mrk->lun, sizeof(mrk->lun)); + mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index; + } +} + +int +qla2x00_start_sp(srb_t *sp) +{ + int rval = QLA_SUCCESS; + scsi_qla_host_t *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qp = sp->qpair; + void *pkt; + unsigned long flags; + + if (vha->hw->flags.eeh_busy) + return -EIO; + + spin_lock_irqsave(qp->qp_lock_ptr, flags); + rval = qla_get_iocbs_resource(sp); + if (rval) { + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + return -EAGAIN; + } + + pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); + if (!pkt) { + rval = -EAGAIN; + ql_log(ql_log_warn, vha, 0x700c, + "qla2x00_alloc_iocbs failed.\n"); + goto done; + } + + switch (sp->type) { + case SRB_LOGIN_CMD: + IS_FWI2_CAPABLE(ha) ? + qla24xx_login_iocb(sp, pkt) : + qla2x00_login_iocb(sp, pkt); + break; + case SRB_PRLI_CMD: + qla24xx_prli_iocb(sp, pkt); + break; + case SRB_LOGOUT_CMD: + IS_FWI2_CAPABLE(ha) ? + qla24xx_logout_iocb(sp, pkt) : + qla2x00_logout_iocb(sp, pkt); + break; + case SRB_ELS_CMD_RPT: + case SRB_ELS_CMD_HST: + qla24xx_els_iocb(sp, pkt); + break; + case SRB_ELS_CMD_HST_NOLOGIN: + qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg); + ((struct els_entry_24xx *)pkt)->handle = sp->handle; + break; + case SRB_CT_CMD: + IS_FWI2_CAPABLE(ha) ? + qla24xx_ct_iocb(sp, pkt) : + qla2x00_ct_iocb(sp, pkt); + break; + case SRB_ADISC_CMD: + IS_FWI2_CAPABLE(ha) ? + qla24xx_adisc_iocb(sp, pkt) : + qla2x00_adisc_iocb(sp, pkt); + break; + case SRB_TM_CMD: + IS_QLAFX00(ha) ? + qlafx00_tm_iocb(sp, pkt) : + qla24xx_tm_iocb(sp, pkt); + break; + case SRB_FXIOCB_DCMD: + case SRB_FXIOCB_BCMD: + qlafx00_fxdisc_iocb(sp, pkt); + break; + case SRB_NVME_LS: + qla_nvme_ls(sp, pkt); + break; + case SRB_ABT_CMD: + IS_QLAFX00(ha) ? + qlafx00_abort_iocb(sp, pkt) : + qla24xx_abort_iocb(sp, pkt); + break; + case SRB_ELS_DCMD: + qla24xx_els_logo_iocb(sp, pkt); + break; + case SRB_CT_PTHRU_CMD: + qla2x00_ctpthru_cmd_iocb(sp, pkt); + break; + case SRB_MB_IOCB: + qla2x00_mb_iocb(sp, pkt); + break; + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + qla2x00_send_notify_ack_iocb(sp, pkt); + break; + case SRB_CTRL_VP: + qla25xx_ctrlvp_iocb(sp, pkt); + break; + case SRB_PRLO_CMD: + qla24xx_prlo_iocb(sp, pkt); + break; + case SRB_SA_UPDATE: + qla24xx_sa_update_iocb(sp, pkt); + break; + case SRB_SA_REPLACE: + qla24xx_sa_replace_iocb(sp, pkt); + break; + case SRB_MARKER: + qla_marker_iocb(sp, pkt); + break; + default: + break; + } + + if (sp->start_timer) { + /* ref: TMR timer ref + * this code should be just before start_iocbs function + * This will make sure that caller function don't to do + * kref_put even on failure + */ + kref_get(&sp->cmd_kref); + add_timer(&sp->u.iocb_cmd.timer); + } + + wmb(); + qla2x00_start_iocbs(vha, qp->req); +done: + if (rval) + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + return rval; +} + +static void +qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, + struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) +{ + uint16_t avail_dsds; + struct dsd64 *cur_dsd; + uint32_t req_data_len = 0; + uint32_t rsp_data_len = 0; + struct scatterlist *sg; + int index; + int entry_count = 1; + struct bsg_job *bsg_job = sp->u.bsg_job; + + /*Update entry type to indicate bidir command */ + put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); + + /* Set the transfer direction, in this set both flags + * Also set the BD_WRAP_BACK flag, firmware will take care + * assigning DID=SID for outgoing pkts. + */ + cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); + cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); + cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | + BD_WRAP_BACK); + + req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; + cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); + cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); + cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); + + vha->bidi_stats.transfer_bytes += req_data_len; + vha->bidi_stats.io_count++; + + vha->qla_stats.output_bytes += req_data_len; + vha->qla_stats.output_requests++; + + /* Only one dsd is available for bidirectional IOCB, remaining dsds + * are bundled in continuation iocb + */ + avail_dsds = 1; + cur_dsd = &cmd_pkt->fcp_dsd; + + index = 0; + + for_each_sg(bsg_job->request_payload.sg_list, sg, + bsg_job->request_payload.sg_cnt, index) { + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets */ + if (avail_dsds == 0) { + /* Continuation type 1 IOCB can accomodate + * 5 DSDS + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + entry_count++; + } + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } + /* For read request DSD will always goes to continuation IOCB + * and follow the write DSD. If there is room on the current IOCB + * then it is added to that IOCB else new continuation IOCB is + * allocated. + */ + for_each_sg(bsg_job->reply_payload.sg_list, sg, + bsg_job->reply_payload.sg_cnt, index) { + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets */ + if (avail_dsds == 0) { + /* Continuation type 1 IOCB can accomodate + * 5 DSDS + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + entry_count++; + } + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } + /* This value should be same as number of IOCB required for this cmd */ + cmd_pkt->entry_count = entry_count; +} + +int +qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) +{ + + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + uint32_t handle; + uint16_t req_cnt; + uint16_t cnt; + uint32_t *clr_ptr; + struct cmd_bidir *cmd_pkt = NULL; + struct rsp_que *rsp; + struct req_que *req; + int rval = EXT_STATUS_OK; + + rval = QLA_SUCCESS; + + rsp = ha->rsp_q_map[0]; + req = vha->req; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, ha->base_qpair, + 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) + return EXT_STATUS_MAILBOX; + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) { + rval = EXT_STATUS_BUSY; + goto queuing_error; + } + + /* Calculate number of IOCB required */ + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + /* Check for room on request queue. */ + if (req->cnt < req_cnt + 2) { + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + } + if (req->cnt < req_cnt + 2) { + rval = EXT_STATUS_BUSY; + goto queuing_error; + } + + cmd_pkt = (struct cmd_bidir *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* Zero out remaining portion of packet. */ + /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + + /* Set NPORT-ID (of vha)*/ + cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); + cmd_pkt->port_id[0] = vha->d_id.b.al_pa; + cmd_pkt->port_id[1] = vha->d_id.b.area; + cmd_pkt->port_id[2] = vha->d_id.b.domain; + + qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); + cmd_pkt->entry_status = (uint8_t) rsp->id; + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + req->cnt -= req_cnt; + + /* Send the command to the firmware */ + wmb(); + qla2x00_start_iocbs(vha, req); +queuing_error: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return rval; +} + +/** + * qla_start_scsi_type6() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +static int +qla_start_scsi_type6(srb_t *sp) +{ + int nseg; + unsigned long flags; + uint32_t *clr_ptr; + uint32_t handle; + struct cmd_type_6 *cmd_pkt; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct req_que *req = NULL; + struct rsp_que *rsp; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair = sp->qpair; + uint16_t more_dsd_lists = 0; + struct dsd_dma *dsd_ptr; + uint16_t i; + __be32 *fcp_dl; + uint8_t additional_cdb_len; + struct ct6_dsd *ctx; + + /* Acquire qpair specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + + /* Setup qpair pointers */ + req = qpair->req; + rsp = qpair->rsp; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + } + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else { + nseg = 0; + } + + tot_dsds = nseg; + + /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */ + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + + if (qla_get_fw_resources(sp->qpair, &sp->iores)) + goto queuing_error; + + more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); + if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) { + ql_dbg(ql_dbg_io, vha, 0x3028, + "Num of DSD list %d is than %d for cmd=%p.\n", + more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd); + goto queuing_error; + } + + if (more_dsd_lists <= qpair->dsd_avail) + goto sufficient_dsds; + else + more_dsd_lists -= qpair->dsd_avail; + + for (i = 0; i < more_dsd_lists; i++) { + dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); + if (!dsd_ptr) { + ql_log(ql_log_fatal, vha, 0x3029, + "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd); + goto queuing_error; + } + INIT_LIST_HEAD(&dsd_ptr->list); + + dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, + GFP_ATOMIC, &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + kfree(dsd_ptr); + ql_log(ql_log_fatal, vha, 0x302a, + "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd); + goto queuing_error; + } + list_add_tail(&dsd_ptr->list, &qpair->dsd_list); + qpair->dsd_avail++; + } + +sufficient_dsds: + req_cnt = 1; + + if (req->cnt < (req_cnt + 2)) { + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + ctx = &sp->u.scmd.ct6_ctx; + + memset(ctx, 0, sizeof(struct ct6_dsd)); + ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, + GFP_ATOMIC, &ctx->fcp_cmnd_dma); + if (!ctx->fcp_cmnd) { + ql_log(ql_log_fatal, vha, 0x3031, + "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); + goto queuing_error; + } + + /* Initialize the DSD list and dma handle */ + INIT_LIST_HEAD(&ctx->dsd_list); + ctx->dsd_use_cnt = 0; + + if (cmd->cmd_len > 16) { + additional_cdb_len = cmd->cmd_len - 16; + if (cmd->cmd_len % 4 || + cmd->cmd_len > QLA_CDB_BUF_SIZE) { + /* + * SCSI command bigger than 16 bytes must be + * multiple of 4 or too big. + */ + ql_log(ql_log_warn, vha, 0x3033, + "scsi cmd len %d not multiple of 4 for cmd=%p.\n", + cmd->cmd_len, cmd); + goto queuing_error_fcp_cmnd; + } + ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; + } else { + additional_cdb_len = 0; + ctx->fcp_cmnd_len = 12 + 16 + 4; + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set NPORT-ID and LUN number */ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->vha->vp_idx; + + /* Build IOCB segments */ + qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds); + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* build FCP_CMND IU */ + int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 2; + + /* Populate the FCP_PRIO. */ + if (ha->flags.fcp_prio_enabled) + ctx->fcp_cmnd->task_attribute |= + sp->fcport->fcp_prio << 3; + + memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); + + fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + + additional_cdb_len); + *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); + + cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); + put_unaligned_le64(ctx->fcp_cmnd_dma, + &cmd_pkt->fcp_cmnd_dseg_address); + + sp->flags |= SRB_FCP_CMND_DMA_VALID; + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + + wmb(); + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + sp->qpair->cmd_cnt++; + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return QLA_SUCCESS; + +queuing_error_fcp_cmnd: + dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + qla_put_fw_resources(sp->qpair, &sp->iores); + + if (sp->u.scmd.crc_ctx) { + mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); + sp->u.scmd.crc_ctx = NULL; + } + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return QLA_FUNCTION_FAILED; +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c new file mode 100644 index 000000000..d48007e18 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -0,0 +1,4820 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_target.h" +#include "qla_gbl.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); +static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); +static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); +static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, + sts_entry_t *); +static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, + struct purex_item *item); +static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, + uint16_t size); +static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, + void *pkt); +static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp); + +static void +qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) +{ + void *pkt = &item->iocb; + uint16_t pkt_size = item->size; + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, + "%s: Enter\n", __func__); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, + "-------- ELS REQ -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, + pkt, pkt_size); + + fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt, 0); +} + +const char *const port_state_str[] = { + [FCS_UNKNOWN] = "Unknown", + [FCS_UNCONFIGURED] = "UNCONFIGURED", + [FCS_DEVICE_DEAD] = "DEAD", + [FCS_DEVICE_LOST] = "LOST", + [FCS_ONLINE] = "ONLINE" +}; + +#define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */ +#define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */ + +static inline void display_Laser_info(scsi_qla_host_t *vha, + u16 mb1, u16 mb2, u16 mb3) { + + if (mb1 == SFP_DISABLE_LASER_INITIATED) + ql_log(ql_log_warn, vha, 0xf0a2, + "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n", + mb3, mb2); + if (mb1 == SFP_ENABLE_LASER_INITIATED) + ql_log(ql_log_warn, vha, 0xf0a3, + "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n", + mb3); +} + +static void +qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) +{ + struct abts_entry_24xx *abts = + (struct abts_entry_24xx *)&pkt->iocb; + struct qla_hw_data *ha = vha->hw; + struct els_entry_24xx *rsp_els; + struct abts_entry_24xx *abts_rsp; + dma_addr_t dma; + uint32_t fctl; + int rval; + + ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); + + ql_log(ql_log_warn, vha, 0x0287, + "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", + abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, + abts->seq_id, abts->seq_cnt); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, + "-------- ABTS RCV -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, + (uint8_t *)abts, sizeof(*abts)); + + rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, + GFP_KERNEL); + if (!rsp_els) { + ql_log(ql_log_warn, vha, 0x0287, + "Failed allocate dma buffer ABTS/ELS RSP.\n"); + return; + } + + /* terminate exchange */ + rsp_els->entry_type = ELS_IOCB_TYPE; + rsp_els->entry_count = 1; + rsp_els->nport_handle = cpu_to_le16(~0); + rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; + rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); + ql_dbg(ql_dbg_init, vha, 0x0283, + "Sending ELS Response to terminate exchange %#x...\n", + abts->rx_xch_addr_to_abort); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, + (uint8_t *)rsp_els, sizeof(*rsp_els)); + rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); + if (rval) { + ql_log(ql_log_warn, vha, 0x0288, + "%s: iocb failed to execute -> %x\n", __func__, rval); + } else if (rsp_els->comp_status) { + ql_log(ql_log_warn, vha, 0x0289, + "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", + __func__, rsp_els->comp_status, + rsp_els->error_subcode_1, rsp_els->error_subcode_2); + } else { + ql_dbg(ql_dbg_init, vha, 0x028a, + "%s: abort exchange done.\n", __func__); + } + + /* send ABTS response */ + abts_rsp = (void *)rsp_els; + memset(abts_rsp, 0, sizeof(*abts_rsp)); + abts_rsp->entry_type = ABTS_RSP_TYPE; + abts_rsp->entry_count = 1; + abts_rsp->nport_handle = abts->nport_handle; + abts_rsp->vp_idx = abts->vp_idx; + abts_rsp->sof_type = abts->sof_type & 0xf0; + abts_rsp->rx_xch_addr = abts->rx_xch_addr; + abts_rsp->d_id[0] = abts->s_id[0]; + abts_rsp->d_id[1] = abts->s_id[1]; + abts_rsp->d_id[2] = abts->s_id[2]; + abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; + abts_rsp->s_id[0] = abts->d_id[0]; + abts_rsp->s_id[1] = abts->d_id[1]; + abts_rsp->s_id[2] = abts->d_id[2]; + abts_rsp->cs_ctl = abts->cs_ctl; + /* include flipping bit23 in fctl */ + fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | + FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; + abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; + abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; + abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; + abts_rsp->type = FC_TYPE_BLD; + abts_rsp->rx_id = abts->rx_id; + abts_rsp->ox_id = abts->ox_id; + abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; + abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; + abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); + abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; + ql_dbg(ql_dbg_init, vha, 0x028b, + "Sending BA ACC response to ABTS %#x...\n", + abts->rx_xch_addr_to_abort); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, + (uint8_t *)abts_rsp, sizeof(*abts_rsp)); + rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); + if (rval) { + ql_log(ql_log_warn, vha, 0x028c, + "%s: iocb failed to execute -> %x\n", __func__, rval); + } else if (abts_rsp->comp_status) { + ql_log(ql_log_warn, vha, 0x028d, + "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", + __func__, abts_rsp->comp_status, + abts_rsp->payload.error.subcode1, + abts_rsp->payload.error.subcode2); + } else { + ql_dbg(ql_dbg_init, vha, 0x028ea, + "%s: done.\n", __func__); + } + + dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); +} + +/** + * __qla_consume_iocb - this routine is used to tell fw driver has processed + * or consumed the head IOCB along with the continuation IOCB's from the + * provided respond queue. + * @vha: host adapter pointer + * @pkt: pointer to current packet. On return, this pointer shall move + * to the next packet. + * @rsp: respond queue pointer. + * + * it is assumed pkt is the head iocb, not the continuation iocbk + */ +void __qla_consume_iocb(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp) +{ + struct rsp_que *rsp_q = *rsp; + response_t *new_pkt; + uint16_t entry_count_remaining; + struct purex_entry_24xx *purex = *pkt; + + entry_count_remaining = purex->entry_count; + while (entry_count_remaining > 0) { + new_pkt = rsp_q->ring_ptr; + *pkt = new_pkt; + + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } + + new_pkt->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + --entry_count_remaining; + } +} + +/** + * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB + * and save to provided buffer + * @vha: host adapter pointer + * @pkt: pointer Purex IOCB + * @rsp: respond queue + * @buf: extracted ELS payload copy here + * @buf_len: buffer length + */ +int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len) +{ + struct purex_entry_24xx *purex = *pkt; + struct rsp_que *rsp_q = *rsp; + sts_cont_entry_t *new_pkt; + uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; + uint16_t buffer_copy_offset = 0; + uint16_t entry_count_remaining; + u16 tpad; + + entry_count_remaining = purex->entry_count; + total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) + - PURX_ELS_HEADER_SIZE; + + /* + * end of payload may not end in 4bytes boundary. Need to + * round up / pad for room to swap, before saving data + */ + tpad = roundup(total_bytes, 4); + + if (buf_len < tpad) { + ql_dbg(ql_dbg_async, vha, 0x5084, + "%s buffer is too small %d < %d\n", + __func__, buf_len, tpad); + __qla_consume_iocb(vha, pkt, rsp); + return -EIO; + } + + pending_bytes = total_bytes = tpad; + no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? + sizeof(purex->els_frame_payload) : pending_bytes; + + memcpy(buf, &purex->els_frame_payload[0], no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + + ((response_t *)purex)->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + + do { + while ((total_bytes > 0) && (entry_count_remaining > 0)) { + new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; + *pkt = new_pkt; + + if (new_pkt->entry_type != STATUS_CONT_TYPE) { + ql_log(ql_log_warn, vha, 0x507a, + "Unexpected IOCB type, partial data 0x%x\n", + buffer_copy_offset); + break; + } + + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } + no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? + sizeof(new_pkt->data) : pending_bytes; + if ((buffer_copy_offset + no_bytes) <= total_bytes) { + memcpy((buf + buffer_copy_offset), new_pkt->data, + no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + } else { + ql_log(ql_log_warn, vha, 0x5044, + "Attempt to copy more that we got, optimizing..%x\n", + buffer_copy_offset); + memcpy((buf + buffer_copy_offset), new_pkt->data, + total_bytes - buffer_copy_offset); + } + + ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + } + + if (pending_bytes != 0 || entry_count_remaining != 0) { + ql_log(ql_log_fatal, vha, 0x508b, + "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n", + total_bytes, entry_count_remaining); + return -EIO; + } + } while (entry_count_remaining > 0); + + be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2); + + return 0; +} + +/** + * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. + * @irq: interrupt number + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qla2100_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct device_reg_2xxx __iomem *reg; + int status; + unsigned long iter; + uint16_t hccr; + uint16_t mb[8]; + struct rsp_que *rsp; + unsigned long flags; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0x505d, + "%s: NULL response queue pointer.\n", __func__); + return (IRQ_NONE); + } + + ha = rsp->hw; + reg = &ha->iobase->isp; + status = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; ) { + hccr = rd_reg_word(®->hccr); + if (qla2x00_check_reg16_for_disconnect(vha, hccr)) + break; + if (hccr & HCCR_RISC_PAUSE) { + if (pci_channel_offline(ha->pdev)) + break; + + /* + * Issue a "HARD" reset in order for the RISC interrupt + * bit to be cleared. Schedule a big hammer to get + * out of the RISC PAUSED state. + */ + wrt_reg_word(®->hccr, HCCR_RESET_RISC); + rd_reg_word(®->hccr); + + ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) + break; + + if (rd_reg_word(®->semaphore) & BIT_0) { + wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); + rd_reg_word(®->hccr); + + /* Get mailbox data. */ + mb[0] = RD_MAILBOX_REG(ha, reg, 0); + if (mb[0] > 0x3fff && mb[0] < 0x8000) { + qla2x00_mbx_completion(vha, mb[0]); + status |= MBX_INTERRUPT; + } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { + mb[1] = RD_MAILBOX_REG(ha, reg, 1); + mb[2] = RD_MAILBOX_REG(ha, reg, 2); + mb[3] = RD_MAILBOX_REG(ha, reg, 3); + qla2x00_async_event(vha, rsp, mb); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_async, vha, 0x5025, + "Unrecognized interrupt type (%d).\n", + mb[0]); + } + /* Release mailbox registers. */ + wrt_reg_word(®->semaphore, 0); + rd_reg_word(®->semaphore); + } else { + qla2x00_process_response_queue(rsp); + + wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); + rd_reg_word(®->hccr); + } + } + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return (IRQ_HANDLED); +} + +bool +qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) +{ + /* Check for PCI disconnection */ + if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { + if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && + !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && + !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { + qla_schedule_eeh_work(vha); + } + return true; + } else + return false; +} + +bool +qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) +{ + return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); +} + +/** + * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. + * @irq: interrupt number + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qla2300_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct device_reg_2xxx __iomem *reg; + int status; + unsigned long iter; + uint32_t stat; + uint16_t hccr; + uint16_t mb[8]; + struct rsp_que *rsp; + struct qla_hw_data *ha; + unsigned long flags; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0x5058, + "%s: NULL response queue pointer.\n", __func__); + return (IRQ_NONE); + } + + ha = rsp->hw; + reg = &ha->iobase->isp; + status = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; ) { + stat = rd_reg_dword(®->u.isp2300.host_status); + if (qla2x00_check_reg32_for_disconnect(vha, stat)) + break; + if (stat & HSR_RISC_PAUSED) { + if (unlikely(pci_channel_offline(ha->pdev))) + break; + + hccr = rd_reg_word(®->hccr); + + if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) + ql_log(ql_log_warn, vha, 0x5026, + "Parity error -- HCCR=%x, Dumping " + "firmware.\n", hccr); + else + ql_log(ql_log_warn, vha, 0x5027, + "RISC paused -- HCCR=%x, Dumping " + "firmware.\n", hccr); + + /* + * Issue a "HARD" reset in order for the RISC + * interrupt bit to be cleared. Schedule a big + * hammer to get out of the RISC PAUSED state. + */ + wrt_reg_word(®->hccr, HCCR_RESET_RISC); + rd_reg_word(®->hccr); + + ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((stat & HSR_RISC_INT) == 0) + break; + + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla2x00_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + + /* Release mailbox registers. */ + wrt_reg_word(®->semaphore, 0); + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = RD_MAILBOX_REG(ha, reg, 1); + mb[2] = RD_MAILBOX_REG(ha, reg, 2); + mb[3] = RD_MAILBOX_REG(ha, reg, 3); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: + qla2x00_process_response_queue(rsp); + break; + case 0x15: + mb[0] = MBA_CMPLT_1_16BIT; + mb[1] = MSW(stat); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x16: + mb[0] = MBA_SCSI_COMPLETION; + mb[1] = MSW(stat); + mb[2] = RD_MAILBOX_REG(ha, reg, 2); + qla2x00_async_event(vha, rsp, mb); + break; + default: + ql_dbg(ql_dbg_async, vha, 0x5028, + "Unrecognized interrupt type (%d).\n", stat & 0xff); + break; + } + wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); + rd_reg_word_relaxed(®->hccr); + } + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return (IRQ_HANDLED); +} + +/** + * qla2x00_mbx_completion() - Process mailbox command completions. + * @vha: SCSI driver HA context + * @mb0: Mailbox0 register + */ +static void +qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) +{ + uint16_t cnt; + uint32_t mboxes; + __le16 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + /* Read all mbox registers? */ + WARN_ON_ONCE(ha->mbx_count > 32); + mboxes = (1ULL << ha->mbx_count) - 1; + if (!ha->mcp) + ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); + else + mboxes = ha->mcp->in_mb; + + /* Load return mailbox registers. */ + ha->flags.mbox_int = 1; + ha->mailbox_out[0] = mb0; + mboxes >>= 1; + wptr = MAILBOX_REG(ha, reg, 1); + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + if (IS_QLA2200(ha) && cnt == 8) + wptr = MAILBOX_REG(ha, reg, 8); + if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) + ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); + else if (mboxes & BIT_0) + ha->mailbox_out[cnt] = rd_reg_word(wptr); + + wptr++; + mboxes >>= 1; + } +} + +static void +qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) +{ + static char *event[] = + { "Complete", "Request Notification", "Time Extension" }; + int rval; + struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; + __le16 __iomem *wptr; + uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; + + /* Seed data -- mailbox1 -> mailbox7. */ + if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) + wptr = ®24->mailbox1; + else if (IS_QLA8044(vha->hw)) + wptr = ®82->mailbox_out[1]; + else + return; + + for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) + mb[cnt] = rd_reg_word(wptr); + + ql_dbg(ql_dbg_async, vha, 0x5021, + "Inter-Driver Communication %s -- " + "%04x %04x %04x %04x %04x %04x %04x.\n", + event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], + mb[4], mb[5], mb[6]); + switch (aen) { + /* Handle IDC Error completion case. */ + case MBA_IDC_COMPLETE: + if (mb[1] >> 15) { + vha->hw->flags.idc_compl_status = 1; + if (vha->hw->notify_dcbx_comp && !vha->vp_idx) + complete(&vha->hw->dcbx_comp); + } + break; + + case MBA_IDC_NOTIFY: + /* Acknowledgement needed? [Notify && non-zero timeout]. */ + timeout = (descr >> 8) & 0xf; + ql_dbg(ql_dbg_async, vha, 0x5022, + "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", + vha->host_no, event[aen & 0xff], timeout); + + if (!timeout) + return; + rval = qla2x00_post_idc_ack_work(vha, mb); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x5023, + "IDC failed to post ACK.\n"); + break; + case MBA_IDC_TIME_EXT: + vha->hw->idc_extend_tmo = descr; + ql_dbg(ql_dbg_async, vha, 0x5087, + "%lu Inter-Driver Communication %s -- " + "Extend timeout by=%d.\n", + vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); + break; + } +} + +#define LS_UNKNOWN 2 +const char * +qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) +{ + static const char *const link_speeds[] = { + "1", "2", "?", "4", "8", "16", "32", "64", "10" + }; +#define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return link_speeds[0]; + else if (speed == 0x13) + return link_speeds[QLA_LAST_SPEED]; + else if (speed < QLA_LAST_SPEED) + return link_speeds[speed]; + else + return link_speeds[LS_UNKNOWN]; +} + +static void +qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) +{ + struct qla_hw_data *ha = vha->hw; + + /* + * 8200 AEN Interpretation: + * mb[0] = AEN code + * mb[1] = AEN Reason code + * mb[2] = LSW of Peg-Halt Status-1 Register + * mb[6] = MSW of Peg-Halt Status-1 Register + * mb[3] = LSW of Peg-Halt Status-2 register + * mb[7] = MSW of Peg-Halt Status-2 register + * mb[4] = IDC Device-State Register value + * mb[5] = IDC Driver-Presence Register value + */ + ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " + "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", + mb[0], mb[1], mb[2], mb[6]); + ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " + "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " + "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); + + if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | + IDC_HEARTBEAT_FAILURE)) { + ha->flags.nic_core_hung = 1; + ql_log(ql_log_warn, vha, 0x5060, + "83XX: F/W Error Reported: Check if reset required.\n"); + + if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { + uint32_t protocol_engine_id, fw_err_code, err_level; + + /* + * IDC_PEG_HALT_STATUS_CHANGE interpretation: + * - PEG-Halt Status-1 Register: + * (LSW = mb[2], MSW = mb[6]) + * Bits 0-7 = protocol-engine ID + * Bits 8-28 = f/w error code + * Bits 29-31 = Error-level + * Error-level 0x1 = Non-Fatal error + * Error-level 0x2 = Recoverable Fatal error + * Error-level 0x4 = UnRecoverable Fatal error + * - PEG-Halt Status-2 Register: + * (LSW = mb[3], MSW = mb[7]) + */ + protocol_engine_id = (mb[2] & 0xff); + fw_err_code = (((mb[2] & 0xff00) >> 8) | + ((mb[6] & 0x1fff) << 8)); + err_level = ((mb[6] & 0xe000) >> 13); + ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " + "Register: protocol_engine_id=0x%x " + "fw_err_code=0x%x err_level=0x%x.\n", + protocol_engine_id, fw_err_code, err_level); + ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " + "Register: 0x%x%x.\n", mb[7], mb[3]); + if (err_level == ERR_LEVEL_NON_FATAL) { + ql_log(ql_log_warn, vha, 0x5063, + "Not a fatal error, f/w has recovered itself.\n"); + } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { + ql_log(ql_log_fatal, vha, 0x5064, + "Recoverable Fatal error: Chip reset " + "required.\n"); + qla83xx_schedule_work(vha, + QLA83XX_NIC_CORE_RESET); + } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { + ql_log(ql_log_fatal, vha, 0x5065, + "Unrecoverable Fatal error: Set FAILED " + "state, reboot required.\n"); + qla83xx_schedule_work(vha, + QLA83XX_NIC_CORE_UNRECOVERABLE); + } + } + + if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { + uint16_t peg_fw_state, nw_interface_link_up; + uint16_t nw_interface_signal_detect, sfp_status; + uint16_t htbt_counter, htbt_monitor_enable; + uint16_t sfp_additional_info, sfp_multirate; + uint16_t sfp_tx_fault, link_speed, dcbx_status; + + /* + * IDC_NIC_FW_REPORTED_FAILURE interpretation: + * - PEG-to-FC Status Register: + * (LSW = mb[2], MSW = mb[6]) + * Bits 0-7 = Peg-Firmware state + * Bit 8 = N/W Interface Link-up + * Bit 9 = N/W Interface signal detected + * Bits 10-11 = SFP Status + * SFP Status 0x0 = SFP+ transceiver not expected + * SFP Status 0x1 = SFP+ transceiver not present + * SFP Status 0x2 = SFP+ transceiver invalid + * SFP Status 0x3 = SFP+ transceiver present and + * valid + * Bits 12-14 = Heartbeat Counter + * Bit 15 = Heartbeat Monitor Enable + * Bits 16-17 = SFP Additional Info + * SFP info 0x0 = Unregocnized transceiver for + * Ethernet + * SFP info 0x1 = SFP+ brand validation failed + * SFP info 0x2 = SFP+ speed validation failed + * SFP info 0x3 = SFP+ access error + * Bit 18 = SFP Multirate + * Bit 19 = SFP Tx Fault + * Bits 20-22 = Link Speed + * Bits 23-27 = Reserved + * Bits 28-30 = DCBX Status + * DCBX Status 0x0 = DCBX Disabled + * DCBX Status 0x1 = DCBX Enabled + * DCBX Status 0x2 = DCBX Exchange error + * Bit 31 = Reserved + */ + peg_fw_state = (mb[2] & 0x00ff); + nw_interface_link_up = ((mb[2] & 0x0100) >> 8); + nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); + sfp_status = ((mb[2] & 0x0c00) >> 10); + htbt_counter = ((mb[2] & 0x7000) >> 12); + htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); + sfp_additional_info = (mb[6] & 0x0003); + sfp_multirate = ((mb[6] & 0x0004) >> 2); + sfp_tx_fault = ((mb[6] & 0x0008) >> 3); + link_speed = ((mb[6] & 0x0070) >> 4); + dcbx_status = ((mb[6] & 0x7000) >> 12); + + ql_log(ql_log_warn, vha, 0x5066, + "Peg-to-Fc Status Register:\n" + "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " + "nw_interface_signal_detect=0x%x" + "\nsfp_statis=0x%x.\n ", peg_fw_state, + nw_interface_link_up, nw_interface_signal_detect, + sfp_status); + ql_log(ql_log_warn, vha, 0x5067, + "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " + "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", + htbt_counter, htbt_monitor_enable, + sfp_additional_info, sfp_multirate); + ql_log(ql_log_warn, vha, 0x5068, + "sfp_tx_fault=0x%x, link_state=0x%x, " + "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, + dcbx_status); + + qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); + } + + if (mb[1] & IDC_HEARTBEAT_FAILURE) { + ql_log(ql_log_warn, vha, 0x5069, + "Heartbeat Failure encountered, chip reset " + "required.\n"); + + qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); + } + } + + if (mb[1] & IDC_DEVICE_STATE_CHANGE) { + ql_log(ql_log_info, vha, 0x506a, + "IDC Device-State changed = 0x%x.\n", mb[4]); + if (ha->flags.nic_core_reset_owner) + return; + qla83xx_schedule_work(vha, MBA_IDC_AEN); + } +} + +/** + * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can + * span over multiple IOCBs. + * @vha: SCSI driver HA context + * @pkt: ELS packet + * @rsp: Response queue + * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB + * false, for Unsolicited Received ELS IOCB + * @byte_order: True, to change the byte ordering of iocb payload + */ +struct purex_item * +qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt, + struct rsp_que **rsp, bool is_purls, + bool byte_order) +{ + struct purex_entry_24xx *purex = NULL; + struct pt_ls4_rx_unsol *purls = NULL; + struct rsp_que *rsp_q = *rsp; + sts_cont_entry_t *new_pkt; + uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; + uint16_t buffer_copy_offset = 0, payload_size = 0; + uint16_t entry_count, entry_count_remaining; + struct purex_item *item; + void *iocb_pkt = NULL; + + if (is_purls) { + purls = *pkt; + total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) - + PURX_ELS_HEADER_SIZE; + entry_count = entry_count_remaining = purls->entry_count; + payload_size = sizeof(purls->payload); + } else { + purex = *pkt; + total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) - + PURX_ELS_HEADER_SIZE; + entry_count = entry_count_remaining = purex->entry_count; + payload_size = sizeof(purex->els_frame_payload); + } + + pending_bytes = total_bytes; + no_bytes = (pending_bytes > payload_size) ? payload_size : + pending_bytes; + ql_dbg(ql_dbg_async, vha, 0x509a, + "%s LS, frame_size 0x%x, entry count %d\n", + (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count); + + item = qla24xx_alloc_purex_item(vha, total_bytes); + if (!item) + return item; + + iocb_pkt = &item->iocb; + + if (is_purls) + memcpy(iocb_pkt, &purls->payload[0], no_bytes); + else + memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + + if (is_purls) + ((response_t *)purls)->signature = RESPONSE_PROCESSED; + else + ((response_t *)purex)->signature = RESPONSE_PROCESSED; + wmb(); + + do { + while ((total_bytes > 0) && (entry_count_remaining > 0)) { + if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { + ql_dbg(ql_dbg_async, vha, 0x5084, + "Ran out of IOCBs, partial data 0x%x\n", + buffer_copy_offset); + cpu_relax(); + continue; + } + + new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; + *pkt = new_pkt; + + if (new_pkt->entry_type != STATUS_CONT_TYPE) { + ql_log(ql_log_warn, vha, 0x507a, + "Unexpected IOCB type, partial data 0x%x\n", + buffer_copy_offset); + break; + } + + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } + no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? + sizeof(new_pkt->data) : pending_bytes; + if ((buffer_copy_offset + no_bytes) <= total_bytes) { + memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset), + new_pkt->data, no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + } else { + ql_log(ql_log_warn, vha, 0x5044, + "Attempt to copy more that we got, optimizing..%x\n", + buffer_copy_offset); + memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset), + new_pkt->data, + total_bytes - buffer_copy_offset); + } + + ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; + wmb(); + } + + if (pending_bytes != 0 || entry_count_remaining != 0) { + ql_log(ql_log_fatal, vha, 0x508b, + "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", + total_bytes, entry_count_remaining); + qla24xx_free_purex_item(item); + return NULL; + } + } while (entry_count_remaining > 0); + + if (byte_order) + host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); + + return item; +} + +int +qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *vp; + uint32_t vp_did; + unsigned long flags; + int ret = 0; + + if (!ha->num_vhosts) + return ret; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + vp_did = vp->d_id.b24; + if (vp_did == rscn_entry) { + ret = 1; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + return ret; +} + +fc_port_t * +qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) + if (f->loop_id == loop_id) + return f; + return NULL; +} + +fc_port_t * +qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } + return NULL; +} + +fc_port_t * +qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, + u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->d_id.b24 == id->b24) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } + return NULL; +} + +/* Shall be called only on supported adapters. */ +static void +qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) +{ + struct qla_hw_data *ha = vha->hw; + bool reset_isp_needed = false; + + ql_log(ql_log_warn, vha, 0x02f0, + "MPI Heartbeat stop. MPI reset is%s needed. " + "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", + mb[1] & BIT_8 ? "" : " not", + mb[0], mb[1], mb[2], mb[3]); + + if ((mb[1] & BIT_8) == 0) + return; + + ql_log(ql_log_warn, vha, 0x02f1, + "MPI Heartbeat stop. FW dump needed\n"); + + if (ql2xfulldump_on_mpifail) { + ha->isp_ops->fw_dump(vha); + reset_isp_needed = true; + } + + ha->isp_ops->mpi_fw_dump(vha, 1); + + if (reset_isp_needed) { + vha->hw->flags.fw_init_done = 0; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } +} + +static struct purex_item * +qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) +{ + struct purex_item *item = NULL; + uint8_t item_hdr_size = sizeof(*item); + + if (size > QLA_DEFAULT_PAYLOAD_SIZE) { + item = kzalloc(item_hdr_size + + (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); + } else { + if (atomic_inc_return(&vha->default_item.in_use) == 1) { + item = &vha->default_item; + goto initialize_purex_header; + } else { + item = kzalloc(item_hdr_size, GFP_ATOMIC); + } + } + if (!item) { + ql_log(ql_log_warn, vha, 0x5092, + ">> Failed allocate purex list item.\n"); + + return NULL; + } + +initialize_purex_header: + item->vha = vha; + item->size = size; + return item; +} + +void +qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, + void (*process_item)(struct scsi_qla_host *vha, + struct purex_item *pkt)) +{ + struct purex_list *list = &vha->purex_list; + ulong flags; + + pkt->process_item = process_item; + + spin_lock_irqsave(&list->lock, flags); + list_add_tail(&pkt->list, &list->head); + spin_unlock_irqrestore(&list->lock, flags); + + set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); +} + +/** + * qla24xx_copy_std_pkt() - Copy over purex ELS which is + * contained in a single IOCB. + * purex packet. + * @vha: SCSI driver HA context + * @pkt: ELS packet + */ +static struct purex_item +*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) +{ + struct purex_item *item; + + item = qla24xx_alloc_purex_item(vha, + QLA_DEFAULT_PAYLOAD_SIZE); + if (!item) + return item; + + memcpy(&item->iocb, pkt, sizeof(item->iocb)); + return item; +} + +/** + * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can + * span over multiple IOCBs. + * @vha: SCSI driver HA context + * @pkt: ELS packet + * @rsp: Response queue + */ +static struct purex_item * +qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, + struct rsp_que **rsp) +{ + struct purex_entry_24xx *purex = *pkt; + struct rsp_que *rsp_q = *rsp; + sts_cont_entry_t *new_pkt; + uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; + uint16_t buffer_copy_offset = 0; + uint16_t entry_count, entry_count_remaining; + struct purex_item *item; + void *fpin_pkt = NULL; + + total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) + - PURX_ELS_HEADER_SIZE; + pending_bytes = total_bytes; + entry_count = entry_count_remaining = purex->entry_count; + no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? + sizeof(purex->els_frame_payload) : pending_bytes; + ql_log(ql_log_info, vha, 0x509a, + "FPIN ELS, frame_size 0x%x, entry count %d\n", + total_bytes, entry_count); + + item = qla24xx_alloc_purex_item(vha, total_bytes); + if (!item) + return item; + + fpin_pkt = &item->iocb; + + memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + + ((response_t *)purex)->signature = RESPONSE_PROCESSED; + wmb(); + + do { + while ((total_bytes > 0) && (entry_count_remaining > 0)) { + if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { + ql_dbg(ql_dbg_async, vha, 0x5084, + "Ran out of IOCBs, partial data 0x%x\n", + buffer_copy_offset); + cpu_relax(); + continue; + } + + new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; + *pkt = new_pkt; + + if (new_pkt->entry_type != STATUS_CONT_TYPE) { + ql_log(ql_log_warn, vha, 0x507a, + "Unexpected IOCB type, partial data 0x%x\n", + buffer_copy_offset); + break; + } + + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } + no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? + sizeof(new_pkt->data) : pending_bytes; + if ((buffer_copy_offset + no_bytes) <= total_bytes) { + memcpy(((uint8_t *)fpin_pkt + + buffer_copy_offset), new_pkt->data, + no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + } else { + ql_log(ql_log_warn, vha, 0x5044, + "Attempt to copy more that we got, optimizing..%x\n", + buffer_copy_offset); + memcpy(((uint8_t *)fpin_pkt + + buffer_copy_offset), new_pkt->data, + total_bytes - buffer_copy_offset); + } + + ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; + wmb(); + } + + if (pending_bytes != 0 || entry_count_remaining != 0) { + ql_log(ql_log_fatal, vha, 0x508b, + "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", + total_bytes, entry_count_remaining); + qla24xx_free_purex_item(item); + return NULL; + } + } while (entry_count_remaining > 0); + host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); + return item; +} + +/** + * qla2x00_async_event() - Process aynchronous events. + * @vha: SCSI driver HA context + * @rsp: response queue + * @mb: Mailbox registers (0 - 3) + */ +void +qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) +{ + uint16_t handle_cnt; + uint16_t cnt, mbx; + uint32_t handles[5]; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; + uint32_t rscn_entry, host_pid; + unsigned long flags; + fc_port_t *fcport = NULL; + + if (!vha->hw->flags.fw_started) { + ql_log(ql_log_warn, vha, 0x50ff, + "Dropping AEN - %04x %04x %04x %04x.\n", + mb[0], mb[1], mb[2], mb[3]); + return; + } + + /* Setup to process RIO completion. */ + handle_cnt = 0; + if (IS_CNA_CAPABLE(ha)) + goto skip_rio; + switch (mb[0]) { + case MBA_SCSI_COMPLETION: + handles[0] = make_handle(mb[2], mb[1]); + handle_cnt = 1; + break; + case MBA_CMPLT_1_16BIT: + handles[0] = mb[1]; + handle_cnt = 1; + mb[0] = MBA_SCSI_COMPLETION; + break; + case MBA_CMPLT_2_16BIT: + handles[0] = mb[1]; + handles[1] = mb[2]; + handle_cnt = 2; + mb[0] = MBA_SCSI_COMPLETION; + break; + case MBA_CMPLT_3_16BIT: + handles[0] = mb[1]; + handles[1] = mb[2]; + handles[2] = mb[3]; + handle_cnt = 3; + mb[0] = MBA_SCSI_COMPLETION; + break; + case MBA_CMPLT_4_16BIT: + handles[0] = mb[1]; + handles[1] = mb[2]; + handles[2] = mb[3]; + handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); + handle_cnt = 4; + mb[0] = MBA_SCSI_COMPLETION; + break; + case MBA_CMPLT_5_16BIT: + handles[0] = mb[1]; + handles[1] = mb[2]; + handles[2] = mb[3]; + handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); + handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); + handle_cnt = 5; + mb[0] = MBA_SCSI_COMPLETION; + break; + case MBA_CMPLT_2_32BIT: + handles[0] = make_handle(mb[2], mb[1]); + handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), + RD_MAILBOX_REG(ha, reg, 6)); + handle_cnt = 2; + mb[0] = MBA_SCSI_COMPLETION; + break; + default: + break; + } +skip_rio: + switch (mb[0]) { + case MBA_SCSI_COMPLETION: /* Fast Post */ + if (!vha->flags.online) + break; + + for (cnt = 0; cnt < handle_cnt; cnt++) + qla2x00_process_completed_request(vha, rsp->req, + handles[cnt]); + break; + + case MBA_RESET: /* Reset */ + ql_dbg(ql_dbg_async, vha, 0x5002, + "Asynchronous RESET.\n"); + + set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + break; + + case MBA_SYSTEM_ERR: /* System Error */ + mbx = 0; + + vha->hw_err_cnt++; + + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || + IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + u16 m[4]; + + m[0] = rd_reg_word(®24->mailbox4); + m[1] = rd_reg_word(®24->mailbox5); + m[2] = rd_reg_word(®24->mailbox6); + mbx = m[3] = rd_reg_word(®24->mailbox7); + + ql_log(ql_log_warn, vha, 0x5003, + "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", + mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); + } else + ql_log(ql_log_warn, vha, 0x5003, + "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", + mb[1], mb[2], mb[3]); + + if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && + rd_reg_word(®24->mailbox7) & BIT_8) + ha->isp_ops->mpi_fw_dump(vha, 1); + ha->isp_ops->fw_dump(vha); + ha->flags.fw_init_done = 0; + QLA_FW_STOPPED(ha); + + if (IS_FWI2_CAPABLE(ha)) { + if (mb[1] == 0 && mb[2] == 0) { + ql_log(ql_log_fatal, vha, 0x5004, + "Unrecoverable Hardware Error: adapter " + "marked OFFLINE!\n"); + vha->flags.online = 0; + vha->device_flags |= DFLG_DEV_FAILED; + } else { + /* Check to see if MPI timeout occurred */ + if ((mbx & MBX_3) && (ha->port_no == 0)) + set_bit(MPI_RESET_NEEDED, + &vha->dpc_flags); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + } else if (mb[1] == 0) { + ql_log(ql_log_fatal, vha, 0x5005, + "Unrecoverable Hardware Error: adapter marked " + "OFFLINE!\n"); + vha->flags.online = 0; + vha->device_flags |= DFLG_DEV_FAILED; + } else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ + ql_log(ql_log_warn, vha, 0x5006, + "ISP Request Transfer Error (%x).\n", mb[1]); + + vha->hw_err_cnt++; + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ + ql_log(ql_log_warn, vha, 0x5007, + "ISP Response Transfer Error (%x).\n", mb[1]); + + vha->hw_err_cnt++; + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ + ql_dbg(ql_dbg_async, vha, 0x5008, + "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); + break; + + case MBA_LOOP_INIT_ERR: + ql_log(ql_log_warn, vha, 0x5090, + "LOOP INIT ERROR (%x).\n", mb[1]); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ + ha->flags.lip_ae = 1; + + ql_dbg(ql_dbg_async, vha, 0x5009, + "LIP occurred (%x).\n", mb[1]); + + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + qla2x00_mark_all_devices_lost(vha); + } + + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + } + + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + + vha->flags.management_server_logged_in = 0; + qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); + break; + + case MBA_LOOP_UP: /* Loop Up Event */ + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + ha->link_data_rate = PORT_SPEED_1GB; + else + ha->link_data_rate = mb[1]; + + ql_log(ql_log_info, vha, 0x500a, + "LOOP UP detected (%s Gbps).\n", + qla2x00_get_link_speed_str(ha, ha->link_data_rate)); + + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (mb[2] & BIT_0) + ql_log(ql_log_info, vha, 0x11a0, + "FEC=enabled (link up).\n"); + } + + vha->flags.management_server_logged_in = 0; + qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); + + if (vha->link_down_time < vha->hw->port_down_retry_count) { + vha->short_link_down_cnt++; + vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + } + + break; + + case MBA_LOOP_DOWN: /* Loop Down Event */ + SAVE_TOPO(ha); + ha->flags.lip_ae = 0; + ha->current_topology = 0; + vha->link_down_time = 0; + + mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) + ? rd_reg_word(®24->mailbox4) : 0; + mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) + : mbx; + ql_log(ql_log_info, vha, 0x500b, + "LOOP DOWN detected (%x %x %x %x).\n", + mb[1], mb[2], mb[3], mbx); + + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + /* + * In case of loop down, restore WWPN from + * NVRAM in case of FA-WWPN capable ISP + * Restore for Physical Port only + */ + if (!vha->vp_idx) { + if (ha->flags.fawwpn_enabled && + (ha->current_topology == ISP_CFG_F)) { + memcpy(vha->port_name, ha->port_name, WWN_SIZE); + fc_host_port_name(vha->host) = + wwn_to_u64(vha->port_name); + ql_dbg(ql_dbg_init + ql_dbg_verbose, + vha, 0x00d8, "LOOP DOWN detected," + "restore WWPN %016llx\n", + wwn_to_u64(vha->port_name)); + } + + clear_bit(VP_CONFIG_OK, &vha->vp_flags); + } + + vha->device_flags |= DFLG_NO_CABLE; + qla2x00_mark_all_devices_lost(vha); + } + + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + } + + vha->flags.management_server_logged_in = 0; + ha->link_data_rate = PORT_SPEED_UNKNOWN; + qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); + break; + + case MBA_LIP_RESET: /* LIP reset occurred */ + ql_dbg(ql_dbg_async, vha, 0x500c, + "LIP reset occurred (%x).\n", mb[1]); + + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + qla2x00_mark_all_devices_lost(vha); + } + + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + } + + set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + + ha->operating_mode = LOOP; + vha->flags.management_server_logged_in = 0; + qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); + break; + + /* case MBA_DCBX_COMPLETE: */ + case MBA_POINT_TO_POINT: /* Point-to-Point */ + ha->flags.lip_ae = 0; + + if (IS_QLA2100(ha)) + break; + + if (IS_CNA_CAPABLE(ha)) { + ql_dbg(ql_dbg_async, vha, 0x500d, + "DCBX Completed -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + if (ha->notify_dcbx_comp && !vha->vp_idx) + complete(&ha->dcbx_comp); + + } else + ql_dbg(ql_dbg_async, vha, 0x500e, + "Asynchronous P2P MODE received.\n"); + + /* + * Until there's a transition from loop down to loop up, treat + * this as loop down only. + */ + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + if (!atomic_read(&vha->loop_down_timer)) + atomic_set(&vha->loop_down_timer, + LOOP_DOWN_TIME); + if (!N2N_TOPO(ha)) + qla2x00_mark_all_devices_lost(vha); + } + + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + } + + if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) + set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + + vha->flags.management_server_logged_in = 0; + break; + + case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ + if (IS_QLA2100(ha)) + break; + + ql_dbg(ql_dbg_async, vha, 0x500f, + "Configuration change detected: value=%x.\n", mb[1]); + + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + if (!atomic_read(&vha->loop_down_timer)) + atomic_set(&vha->loop_down_timer, + LOOP_DOWN_TIME); + qla2x00_mark_all_devices_lost(vha); + } + + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + } + + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + + case MBA_PORT_UPDATE: /* Port database update */ + /* + * Handle only global and vn-port update events + * + * Relevant inputs: + * mb[1] = N_Port handle of changed port + * OR 0xffff for global event + * mb[2] = New login state + * 7 = Port logged out + * mb[3] = LSB is vp_idx, 0xff = all vps + * + * Skip processing if: + * Event is global, vp_idx is NOT all vps, + * vp_idx does not match + * Event is not global, vp_idx does not match + */ + if (IS_QLA2XXX_MIDTYPE(ha) && + ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || + (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) + break; + + if (mb[2] == 0x7) { + ql_dbg(ql_dbg_async, vha, 0x5010, + "Port %s %04x %04x %04x.\n", + mb[1] == 0xffff ? "unavailable" : "logout", + mb[1], mb[2], mb[3]); + + if (mb[1] == 0xffff) + goto global_port_update; + + if (mb[1] == NPH_SNS_LID(ha)) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + } + + /* use handle_cnt for loop id/nport handle */ + if (IS_FWI2_CAPABLE(ha)) + handle_cnt = NPH_SNS; + else + handle_cnt = SIMPLE_NAME_SERVER; + if (mb[1] == handle_cnt) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + } + + /* Port logout */ + fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); + if (!fcport) + break; + if (atomic_read(&fcport->state) != FCS_ONLINE) + break; + ql_dbg(ql_dbg_async, vha, 0x508a, + "Marking port lost loopid=%04x portid=%06x.\n", + fcport->loop_id, fcport->d_id.b24); + if (qla_ini_mode_enabled(vha)) { + fcport->logout_on_delete = 0; + qlt_schedule_sess_for_deletion(fcport); + } + break; + +global_port_update: + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, + LOOP_DOWN_TIME); + vha->device_flags |= DFLG_NO_CABLE; + qla2x00_mark_all_devices_lost(vha); + } + + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, + FC_VPORT_FAILED); + qla2x00_mark_all_devices_lost(vha); + } + + vha->flags.management_server_logged_in = 0; + ha->link_data_rate = PORT_SPEED_UNKNOWN; + break; + } + + /* + * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET + * event etc. earlier indicating loop is down) then process + * it. Otherwise ignore it and Wait for RSCN to come in. + */ + atomic_set(&vha->loop_down_timer, 0); + if (atomic_read(&vha->loop_state) != LOOP_DOWN && + !ha->flags.n2n_ae && + atomic_read(&vha->loop_state) != LOOP_DEAD) { + ql_dbg(ql_dbg_async, vha, 0x5011, + "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", + mb[1], mb[2], mb[3]); + break; + } + + ql_dbg(ql_dbg_async, vha, 0x5012, + "Port database changed %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + + /* + * Mark all devices as missing so we will login again. + */ + atomic_set(&vha->loop_state, LOOP_UP); + vha->scan.scan_retry = 0; + + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(VP_CONFIG_OK, &vha->vp_flags); + break; + + case MBA_RSCN_UPDATE: /* State Change Registration */ + /* Check if the Vport has issued a SCR */ + if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) + break; + /* Only handle SCNs for our Vport index. */ + if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) + break; + + ql_log(ql_log_warn, vha, 0x5013, + "RSCN database changed -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + + rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; + host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) + | vha->d_id.b.al_pa; + if (rscn_entry == host_pid) { + ql_dbg(ql_dbg_async, vha, 0x5014, + "Ignoring RSCN update to local host " + "port ID (%06x).\n", host_pid); + break; + } + + /* Ignore reserved bits from RSCN-payload. */ + rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; + + /* Skip RSCNs for virtual ports on the same physical port */ + if (qla2x00_is_a_vp_did(vha, rscn_entry)) + break; + + atomic_set(&vha->loop_down_timer, 0); + vha->flags.management_server_logged_in = 0; + { + struct event_arg ea; + + memset(&ea, 0, sizeof(ea)); + ea.id.b24 = rscn_entry; + ea.id.b.rsvd_1 = rscn_entry >> 24; + qla2x00_handle_rscn(vha, &ea); + qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + } + break; + case MBA_CONGN_NOTI_RECV: + if (!ha->flags.scm_enabled || + mb[1] != QLA_CON_PRIMITIVE_RECEIVED) + break; + + if (mb[2] == QLA_CONGESTION_ARB_WARNING) { + ql_dbg(ql_dbg_async, vha, 0x509b, + "Congestion Warning %04x %04x.\n", mb[1], mb[2]); + } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { + ql_log(ql_log_warn, vha, 0x509b, + "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); + } + break; + /* case MBA_RIO_RESPONSE: */ + case MBA_ZIO_RESPONSE: + ql_dbg(ql_dbg_async, vha, 0x5015, + "[R|Z]IO update completion.\n"); + + if (IS_FWI2_CAPABLE(ha)) + qla24xx_process_response_queue(vha, rsp); + else + qla2x00_process_response_queue(rsp); + break; + + case MBA_DISCARD_RND_FRAME: + ql_dbg(ql_dbg_async, vha, 0x5016, + "Discard RND Frame -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + vha->interface_err_cnt++; + break; + + case MBA_TRACE_NOTIFICATION: + ql_dbg(ql_dbg_async, vha, 0x5017, + "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); + break; + + case MBA_ISP84XX_ALERT: + ql_dbg(ql_dbg_async, vha, 0x5018, + "ISP84XX Alert Notification -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + + spin_lock_irqsave(&ha->cs84xx->access_lock, flags); + switch (mb[1]) { + case A84_PANIC_RECOVERY: + ql_log(ql_log_info, vha, 0x5019, + "Alert 84XX: panic recovery %04x %04x.\n", + mb[2], mb[3]); + break; + case A84_OP_LOGIN_COMPLETE: + ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; + ql_log(ql_log_info, vha, 0x501a, + "Alert 84XX: firmware version %x.\n", + ha->cs84xx->op_fw_version); + break; + case A84_DIAG_LOGIN_COMPLETE: + ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; + ql_log(ql_log_info, vha, 0x501b, + "Alert 84XX: diagnostic firmware version %x.\n", + ha->cs84xx->diag_fw_version); + break; + case A84_GOLD_LOGIN_COMPLETE: + ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; + ha->cs84xx->fw_update = 1; + ql_log(ql_log_info, vha, 0x501c, + "Alert 84XX: gold firmware version %x.\n", + ha->cs84xx->gold_fw_version); + break; + default: + ql_log(ql_log_warn, vha, 0x501d, + "Alert 84xx: Invalid Alert %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + } + spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); + break; + case MBA_DCBX_START: + ql_dbg(ql_dbg_async, vha, 0x501e, + "DCBX Started -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + break; + case MBA_DCBX_PARAM_UPDATE: + ql_dbg(ql_dbg_async, vha, 0x501f, + "DCBX Parameters Updated -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + break; + case MBA_FCF_CONF_ERR: + ql_dbg(ql_dbg_async, vha, 0x5020, + "FCF Configuration Error -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); + break; + case MBA_IDC_NOTIFY: + if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { + mb[4] = rd_reg_word(®24->mailbox4); + if (((mb[2] & 0x7fff) == MBC_PORT_RESET || + (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && + (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { + set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + /* + * Extend loop down timer since port is active. + */ + if (atomic_read(&vha->loop_state) == LOOP_DOWN) + atomic_set(&vha->loop_down_timer, + LOOP_DOWN_TIME); + qla2xxx_wake_dpc(vha); + } + } + fallthrough; + case MBA_IDC_COMPLETE: + if (ha->notify_lb_portup_comp && !vha->vp_idx) + complete(&ha->lb_portup_comp); + fallthrough; + case MBA_IDC_TIME_EXT: + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || + IS_QLA8044(ha)) + qla81xx_idc_event(vha, mb[0], mb[1]); + break; + + case MBA_IDC_AEN: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + vha->hw_err_cnt++; + qla27xx_handle_8200_aen(vha, mb); + } else if (IS_QLA83XX(ha)) { + mb[4] = rd_reg_word(®24->mailbox4); + mb[5] = rd_reg_word(®24->mailbox5); + mb[6] = rd_reg_word(®24->mailbox6); + mb[7] = rd_reg_word(®24->mailbox7); + qla83xx_handle_8200_aen(vha, mb); + } else { + ql_dbg(ql_dbg_async, vha, 0x5052, + "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", + mb[0], mb[1], mb[2], mb[3]); + } + break; + + case MBA_DPORT_DIAGNOSTICS: + if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR || + (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR) + vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: %04x %04x %04x %04x\n", + mb[0], mb[1], mb[2], mb[3]); + memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + static char *results[] = { + "start", "done(pass)", "done(error)", "undefined" }; + static char *types[] = { + "none", "dynamic", "static", "other" }; + uint result = mb[1] >> 0 & 0x3; + uint type = mb[1] >> 6 & 0x3; + uint sw = mb[1] >> 15 & 0x1; + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", + results[result], types[type], sw); + if (result == 2) { + static char *reasons[] = { + "reserved", "unexpected reject", + "unexpected phase", "retry exceeded", + "timed out", "not supported", + "user stopped" }; + uint reason = mb[2] >> 0 & 0xf; + uint phase = mb[2] >> 12 & 0xf; + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: reason=%s phase=%u \n", + reason < 7 ? reasons[reason] : "other", + phase >> 1); + } + } + break; + + case MBA_TEMPERATURE_ALERT: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + display_Laser_info(vha, mb[1], mb[2], mb[3]); + ql_dbg(ql_dbg_async, vha, 0x505e, + "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); + break; + + case MBA_TRANS_INSERT: + ql_dbg(ql_dbg_async, vha, 0x5091, + "Transceiver Insertion: %04x\n", mb[1]); + set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); + break; + + case MBA_TRANS_REMOVE: + ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); + break; + + default: + ql_dbg(ql_dbg_async, vha, 0x5057, + "Unknown AEN:%04x %04x %04x %04x\n", + mb[0], mb[1], mb[2], mb[3]); + } + + qlt_async_event(mb[0], vha, mb); + + if (!vha->vp_idx && ha->num_vhosts) + qla2x00_alert_all_vps(rsp, mb); +} + +/** + * qla2x00_process_completed_request() - Process a Fast Post response. + * @vha: SCSI driver HA context + * @req: request queue + * @index: SRB index + */ +void +qla2x00_process_completed_request(struct scsi_qla_host *vha, + struct req_que *req, uint32_t index) +{ + srb_t *sp; + struct qla_hw_data *ha = vha->hw; + + /* Validate handle. */ + if (index >= req->num_outstanding_cmds) { + ql_log(ql_log_warn, vha, 0x3014, + "Invalid SCSI command index (%x).\n", index); + + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + return; + } + + sp = req->outstanding_cmds[index]; + if (sp) { + /* Free outstanding command slot. */ + req->outstanding_cmds[index] = NULL; + + /* Save ISP completion status */ + sp->done(sp, DID_OK << 16); + } else { + ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); + + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } +} + +static srb_t * +qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, + struct req_que *req, void *iocb, u16 *ret_index) +{ + struct qla_hw_data *ha = vha->hw; + sts_entry_t *pkt = iocb; + srb_t *sp; + uint16_t index; + + if (pkt->handle == QLA_SKIP_HANDLE) + return NULL; + + index = LSW(pkt->handle); + if (index >= req->num_outstanding_cmds) { + ql_log(ql_log_warn, vha, 0x5031, + "%s: Invalid command index (%x) type %8ph.\n", + func, index, iocb); + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + return NULL; + } + sp = req->outstanding_cmds[index]; + if (!sp) { + ql_log(ql_log_warn, vha, 0x5032, + "%s: Invalid completion handle (%x) -- timed-out.\n", + func, index); + return NULL; + } + if (sp->handle != index) { + ql_log(ql_log_warn, vha, 0x5033, + "%s: SRB handle (%x) mismatch %x.\n", func, + sp->handle, index); + return NULL; + } + + *ret_index = index; + qla_put_fw_resources(sp->qpair, &sp->iores); + return sp; +} + +srb_t * +qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, + struct req_que *req, void *iocb) +{ + uint16_t index; + srb_t *sp; + + sp = qla_get_sp_from_handle(vha, func, req, iocb, &index); + if (sp) + req->outstanding_cmds[index] = NULL; + + return sp; +} + +static void +qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mbx_entry *mbx) +{ + const char func[] = "MBX-IOCB"; + const char *type; + fc_port_t *fcport; + srb_t *sp; + struct srb_iocb *lio; + uint16_t *data; + uint16_t status; + + sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); + if (!sp) + return; + + lio = &sp->u.iocb_cmd; + type = sp->name; + fcport = sp->fcport; + data = lio->u.logio.data; + + data[0] = MBS_COMMAND_ERROR; + data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? + QLA_LOGIO_LOGIN_RETRIED : 0; + if (mbx->entry_status) { + ql_dbg(ql_dbg_async, vha, 0x5043, + "Async-%s error entry - hdl=%x portid=%02x%02x%02x " + "entry-status=%x status=%x state-flag=%x " + "status-flags=%x.\n", type, sp->handle, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, mbx->entry_status, + le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), + le16_to_cpu(mbx->status_flags)); + + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, + mbx, sizeof(*mbx)); + + goto logio_done; + } + + status = le16_to_cpu(mbx->status); + if (status == 0x30 && sp->type == SRB_LOGIN_CMD && + le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) + status = 0; + if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_async, vha, 0x5045, + "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", + type, sp->handle, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + le16_to_cpu(mbx->mb1)); + + data[0] = MBS_COMMAND_COMPLETE; + if (sp->type == SRB_LOGIN_CMD) { + fcport->port_type = FCT_TARGET; + if (le16_to_cpu(mbx->mb1) & BIT_0) + fcport->port_type = FCT_INITIATOR; + else if (le16_to_cpu(mbx->mb1) & BIT_1) + fcport->flags |= FCF_FCP2_DEVICE; + } + goto logio_done; + } + + data[0] = le16_to_cpu(mbx->mb0); + switch (data[0]) { + case MBS_PORT_ID_USED: + data[1] = le16_to_cpu(mbx->mb1); + break; + case MBS_LOOP_ID_USED: + break; + default: + data[0] = MBS_COMMAND_ERROR; + break; + } + + ql_log(ql_log_warn, vha, 0x5046, + "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " + "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, + status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), + le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), + le16_to_cpu(mbx->mb7)); + +logio_done: + sp->done(sp, 0); +} + +static void +qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mbx_24xx_entry *pkt) +{ + const char func[] = "MBX-IOCB2"; + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + struct srb_iocb *si; + u16 sz, i; + int res; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (sp->type == SRB_SCSI_CMD || + sp->type == SRB_NVME_CMD || + sp->type == SRB_TM_CMD) { + ql_log(ql_log_warn, vha, 0x509d, + "Inconsistent event entry type %d\n", sp->type); + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + return; + } + + si = &sp->u.iocb_cmd; + sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); + + for (i = 0; i < sz; i++) + si->u.mbx.in_mb[i] = pkt->mb[i]; + + res = (si->u.mbx.in_mb[0] & MBS_MASK); + + sp->done(sp, res); +} + +static void +qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct nack_to_isp *pkt) +{ + const char func[] = "nack"; + srb_t *sp; + int res = 0; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) + res = QLA_FUNCTION_FAILED; + + sp->done(sp, res); +} + +static void +qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, + sts_entry_t *pkt, int iocb_type) +{ + const char func[] = "CT_IOCB"; + const char *type; + srb_t *sp; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; + uint16_t comp_status; + int res = 0; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + switch (sp->type) { + case SRB_CT_CMD: + bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; + + type = "ct pass-through"; + + comp_status = le16_to_cpu(pkt->comp_status); + + /* + * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT + * fc payload to the caller + */ + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + if (comp_status != CS_COMPLETE) { + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + le16_to_cpu(pkt->rsp_info_len); + + ql_log(ql_log_warn, vha, 0x5048, + "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", + type, comp_status, + bsg_reply->reply_payload_rcv_len); + } else { + ql_log(ql_log_warn, vha, 0x5049, + "CT pass-through-%s error comp_status=0x%x.\n", + type, comp_status); + res = DID_ERROR << 16; + bsg_reply->reply_payload_rcv_len = 0; + } + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, + pkt, sizeof(*pkt)); + } else { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + bsg_job->reply_len = 0; + } + break; + case SRB_CT_PTHRU_CMD: + /* + * borrowing sts_entry_24xx.comp_status. + * same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + break; + } + + sp->done(sp, res); +} + +static void +qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, + struct sts_entry_24xx *pkt, int iocb_type) +{ + struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; + const char func[] = "ELS_CT_IOCB"; + const char *type; + srb_t *sp; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; + uint16_t comp_status; + uint32_t fw_status[3]; + int res, logit = 1; + struct srb_iocb *els; + uint n; + scsi_qla_host_t *vha; + struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt; + + sp = qla2x00_get_sp_from_handle(v, func, req, pkt); + if (!sp) + return; + bsg_job = sp->u.bsg_job; + vha = sp->vha; + + type = NULL; + + comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); + fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); + fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); + + switch (sp->type) { + case SRB_ELS_CMD_RPT: + case SRB_ELS_CMD_HST: + type = "rpt hst"; + break; + case SRB_ELS_CMD_HST_NOLOGIN: + type = "els"; + { + struct els_entry_24xx *els = (void *)pkt; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + ql_dbg(ql_dbg_user, vha, 0x700f, + "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n", + __func__, sc_to_str(p->e.sub_cmd), + e->d_id[2], e->d_id[1], e->d_id[0], + comp_status, p->e.extra_rx_xchg_address, bsg_job); + + if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) { + if (sp->remap.remapped) { + n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + sp->remap.rsp.buf, + sp->remap.rsp.len); + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e, + "%s: SG copied %x of %x\n", + __func__, n, sp->remap.rsp.len); + } else { + ql_dbg(ql_dbg_user, vha, 0x700f, + "%s: NOT REMAPPED (error)...!!!\n", + __func__); + } + } + } + break; + case SRB_CT_CMD: + type = "ct pass-through"; + break; + case SRB_ELS_DCMD: + type = "Driver ELS logo"; + if (iocb_type != ELS_IOCB_TYPE) { + ql_dbg(ql_dbg_user, vha, 0x5047, + "Completing %s: (%p) type=%d.\n", + type, sp, sp->type); + sp->done(sp, 0); + return; + } + break; + case SRB_CT_PTHRU_CMD: + /* borrowing sts_entry_24xx.comp_status. + same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + sp->done(sp, res); + return; + default: + ql_dbg(ql_dbg_user, vha, 0x503e, + "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); + return; + } + + if (iocb_type == ELS_IOCB_TYPE) { + els = &sp->u.iocb_cmd; + els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); + els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); + els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); + els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); + if (comp_status == CS_COMPLETE) { + res = DID_OK << 16; + } else { + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; + els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( + ese->total_byte_count)); + + if (sp->remap.remapped && + ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) { + ql_dbg(ql_dbg_user, vha, 0x503f, + "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x", + __func__, e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + logit = 0; + } + + } else if (comp_status == CS_PORT_LOGGED_OUT) { + ql_dbg(ql_dbg_disc, vha, 0x911e, + "%s %d schedule session deletion\n", + __func__, __LINE__); + + els->u.els_plogi.len = 0; + res = DID_IMM_RETRY << 16; + qlt_schedule_sess_for_deletion(sp->fcport); + } else { + els->u.els_plogi.len = 0; + res = DID_ERROR << 16; + } + + if (sp->remap.remapped && + ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) { + if (logit) { + ql_dbg(ql_dbg_user, vha, 0x503f, + "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n", + type, sp->handle, comp_status); + + ql_dbg(ql_dbg_user, vha, 0x503f, + "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", + fw_status[1], fw_status[2], + le32_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count), + e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + } + if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE && + sp->type == SRB_ELS_CMD_HST_NOLOGIN) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s rcv reject. Sched delete\n", __func__); + qlt_schedule_sess_for_deletion(sp->fcport); + } + } else if (logit) { + ql_log(ql_log_info, vha, 0x503f, + "%s IOCB Done hdl=%x comp_status=0x%x\n", + type, sp->handle, comp_status); + ql_log(ql_log_info, vha, 0x503f, + "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", + fw_status[1], fw_status[2], + le32_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count), + e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + } + } + goto els_ct_done; + } + + /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT + * fc payload to the caller + */ + bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); + + if (comp_status != CS_COMPLETE) { + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + le32_to_cpu(ese->total_byte_count); + + ql_dbg(ql_dbg_user, vha, 0x503f, + "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " + "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", + type, sp->handle, comp_status, fw_status[1], fw_status[2], + le32_to_cpu(ese->total_byte_count)); + } else { + ql_dbg(ql_dbg_user, vha, 0x5040, + "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " + "error subcode 1=0x%x error subcode 2=0x%x.\n", + type, sp->handle, comp_status, + le32_to_cpu(ese->error_subcode_1), + le32_to_cpu(ese->error_subcode_2)); + res = DID_ERROR << 16; + bsg_reply->reply_payload_rcv_len = 0; + } + memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), + fw_status, sizeof(fw_status)); + ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, + pkt, sizeof(*pkt)); + } + else { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; + bsg_job->reply_len = 0; + } +els_ct_done: + + sp->done(sp, res); +} + +static void +qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, + struct logio_entry_24xx *logio) +{ + const char func[] = "LOGIO-IOCB"; + const char *type; + fc_port_t *fcport; + srb_t *sp; + struct srb_iocb *lio; + uint16_t *data; + uint32_t iop[2]; + int logit = 1; + + sp = qla2x00_get_sp_from_handle(vha, func, req, logio); + if (!sp) + return; + + lio = &sp->u.iocb_cmd; + type = sp->name; + fcport = sp->fcport; + data = lio->u.logio.data; + + data[0] = MBS_COMMAND_ERROR; + data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? + QLA_LOGIO_LOGIN_RETRIED : 0; + if (logio->entry_status) { + ql_log(ql_log_warn, fcport->vha, 0x5034, + "Async-%s error entry - %8phC hdl=%x" + "portid=%02x%02x%02x entry-status=%x.\n", + type, fcport->port_name, sp->handle, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + logio->entry_status); + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, + logio, sizeof(*logio)); + + goto logio_done; + } + + if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { + ql_dbg(ql_dbg_async, sp->vha, 0x5036, + "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", + type, sp->handle, fcport->d_id.b24, fcport->port_name, + le32_to_cpu(logio->io_parameter[0])); + + vha->hw->exch_starvation = 0; + data[0] = MBS_COMMAND_COMPLETE; + + if (sp->type == SRB_PRLI_CMD) { + lio->u.logio.iop[0] = + le32_to_cpu(logio->io_parameter[0]); + lio->u.logio.iop[1] = + le32_to_cpu(logio->io_parameter[1]); + goto logio_done; + } + + if (sp->type != SRB_LOGIN_CMD) + goto logio_done; + + lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]); + if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP) + fcport->flags |= FCF_FCSP_DEVICE; + + iop[0] = le32_to_cpu(logio->io_parameter[0]); + if (iop[0] & BIT_4) { + fcport->port_type = FCT_TARGET; + if (iop[0] & BIT_8) + fcport->flags |= FCF_FCP2_DEVICE; + } else if (iop[0] & BIT_5) + fcport->port_type = FCT_INITIATOR; + + if (iop[0] & BIT_7) + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + + if (logio->io_parameter[7] || logio->io_parameter[8]) + fcport->supported_classes |= FC_COS_CLASS2; + if (logio->io_parameter[9] || logio->io_parameter[10]) + fcport->supported_classes |= FC_COS_CLASS3; + + goto logio_done; + } + + iop[0] = le32_to_cpu(logio->io_parameter[0]); + iop[1] = le32_to_cpu(logio->io_parameter[1]); + lio->u.logio.iop[0] = iop[0]; + lio->u.logio.iop[1] = iop[1]; + switch (iop[0]) { + case LSC_SCODE_PORTID_USED: + data[0] = MBS_PORT_ID_USED; + data[1] = LSW(iop[1]); + logit = 0; + break; + case LSC_SCODE_NPORT_USED: + data[0] = MBS_LOOP_ID_USED; + logit = 0; + break; + case LSC_SCODE_CMD_FAILED: + if (iop[1] == 0x0606) { + /* + * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, + * Target side acked. + */ + data[0] = MBS_COMMAND_COMPLETE; + goto logio_done; + } + data[0] = MBS_COMMAND_ERROR; + break; + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xd046, + "Exchange starvation. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + fallthrough; + default: + data[0] = MBS_COMMAND_ERROR; + break; + } + + if (logit) + ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: " + "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", + type, sp->handle, fcport->d_id.b24, fcport->port_name, + le16_to_cpu(logio->comp_status), + le32_to_cpu(logio->io_parameter[0]), + le32_to_cpu(logio->io_parameter[1])); + else + ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: " + "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", + type, sp->handle, fcport->d_id.b24, fcport->port_name, + le16_to_cpu(logio->comp_status), + le32_to_cpu(logio->io_parameter[0]), + le32_to_cpu(logio->io_parameter[1])); + +logio_done: + sp->done(sp, 0); +} + +static void +qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) +{ + const char func[] = "TMF-IOCB"; + const char *type; + fc_port_t *fcport; + srb_t *sp; + struct srb_iocb *iocb; + struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; + u16 comp_status; + + sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); + if (!sp) + return; + + comp_status = le16_to_cpu(sts->comp_status); + iocb = &sp->u.iocb_cmd; + type = sp->name; + fcport = sp->fcport; + iocb->u.tmf.data = QLA_SUCCESS; + + if (sts->entry_status) { + ql_log(ql_log_warn, fcport->vha, 0x5038, + "Async-%s error - hdl=%x entry-status(%x).\n", + type, sp->handle, sts->entry_status); + iocb->u.tmf.data = QLA_FUNCTION_FAILED; + } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { + ql_log(ql_log_warn, fcport->vha, 0x5039, + "Async-%s error - hdl=%x completion status(%x).\n", + type, sp->handle, comp_status); + iocb->u.tmf.data = QLA_FUNCTION_FAILED; + } else if ((le16_to_cpu(sts->scsi_status) & + SS_RESPONSE_INFO_LEN_VALID)) { + host_to_fcp_swap(sts->data, sizeof(sts->data)); + if (le32_to_cpu(sts->rsp_data_len) < 4) { + ql_log(ql_log_warn, fcport->vha, 0x503b, + "Async-%s error - hdl=%x not enough response(%d).\n", + type, sp->handle, sts->rsp_data_len); + } else if (sts->data[3]) { + ql_log(ql_log_warn, fcport->vha, 0x503c, + "Async-%s error - hdl=%x response(%x).\n", + type, sp->handle, sts->data[3]); + iocb->u.tmf.data = QLA_FUNCTION_FAILED; + } + } + + switch (comp_status) { + case CS_PORT_LOGGED_OUT: + case CS_PORT_CONFIG_CHG: + case CS_PORT_BUSY: + case CS_INCOMPLETE: + case CS_PORT_UNAVAILABLE: + case CS_RESET: + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n", + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, + port_state_str[FCS_ONLINE], + comp_status); + + qlt_schedule_sess_for_deletion(fcport); + } + break; + + default: + break; + } + + if (iocb->u.tmf.data != QLA_SUCCESS) + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, + sts, sizeof(*sts)); + + sp->done(sp, 0); +} + +static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + void *tsk, srb_t *sp) +{ + fc_port_t *fcport; + struct srb_iocb *iocb; + struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; + uint16_t state_flags; + struct nvmefc_fcp_req *fd; + uint16_t ret = QLA_SUCCESS; + __le16 comp_status = sts->comp_status; + int logit = 0; + + iocb = &sp->u.iocb_cmd; + fcport = sp->fcport; + iocb->u.nvme.comp_status = comp_status; + state_flags = le16_to_cpu(sts->state_flags); + fd = iocb->u.nvme.desc; + + if (unlikely(iocb->u.nvme.aen_op)) + atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); + else + sp->qpair->cmd_completion_cnt++; + + if (unlikely(comp_status != CS_COMPLETE)) + logit = 1; + + fd->transferred_length = fd->payload_length - + le32_to_cpu(sts->residual_len); + + /* + * State flags: Bit 6 and 0. + * If 0 is set, we don't care about 6. + * both cases resp was dma'd to host buffer + * if both are 0, that is good path case. + * if six is set and 0 is clear, we need to + * copy resp data from status iocb to resp buffer. + */ + if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { + iocb->u.nvme.rsp_pyld_len = 0; + } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == + (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { + /* Response already DMA'd to fd->rspaddr. */ + iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; + } else if ((state_flags & SF_FCP_RSP_DMA)) { + /* + * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this + * as an error. + */ + iocb->u.nvme.rsp_pyld_len = 0; + fd->transferred_length = 0; + ql_dbg(ql_dbg_io, fcport->vha, 0x307a, + "Unexpected values in NVMe_RSP IU.\n"); + logit = 1; + } else if (state_flags & SF_NVME_ERSP) { + uint32_t *inbuf, *outbuf; + uint16_t iter; + + inbuf = (uint32_t *)&sts->nvme_ersp_data; + outbuf = (uint32_t *)fd->rspaddr; + iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; + if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > + sizeof(struct nvme_fc_ersp_iu))) { + if (ql_mask_match(ql_dbg_io)) { + WARN_ONCE(1, "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + ql_log(ql_log_warn, fcport->vha, 0x5100, + "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + } + iocb->u.nvme.rsp_pyld_len = + cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); + } + iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; + for (; iter; iter--) + *outbuf++ = swab32(*inbuf++); + } + + if (state_flags & SF_NVME_ERSP) { + struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; + u32 tgt_xfer_len; + + tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); + if (fd->transferred_length != tgt_xfer_len) { + ql_log(ql_log_warn, fcport->vha, 0x3079, + "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", + tgt_xfer_len, fd->transferred_length); + logit = 1; + } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { + /* + * Do not log if this is just an underflow and there + * is no data loss. + */ + logit = 0; + } + } + + if (unlikely(logit)) + ql_dbg(ql_dbg_io, fcport->vha, 0x5060, + "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", + sp->name, sp->handle, comp_status, + fd->transferred_length, le32_to_cpu(sts->residual_len), + sts->ox_id); + + /* + * If transport error then Failure (HBA rejects request) + * otherwise transport will handle. + */ + switch (le16_to_cpu(comp_status)) { + case CS_COMPLETE: + break; + + case CS_RESET: + case CS_PORT_UNAVAILABLE: + case CS_PORT_LOGGED_OUT: + fcport->nvme_flag |= NVME_FLAG_RESETTING; + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "Port to be marked lost on fcport=%06x, current " + "port state= %s comp_status %x.\n", + fcport->d_id.b24, port_state_str[FCS_ONLINE], + comp_status); + + qlt_schedule_sess_for_deletion(fcport); + } + fallthrough; + case CS_ABORTED: + case CS_PORT_BUSY: + fd->transferred_length = 0; + iocb->u.nvme.rsp_pyld_len = 0; + ret = QLA_ABORTED; + break; + case CS_DATA_UNDERRUN: + break; + default: + ret = QLA_FUNCTION_FAILED; + break; + } + sp->done(sp, ret); +} + +static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, + struct vp_ctrl_entry_24xx *vce) +{ + const char func[] = "CTRLVP-IOCB"; + srb_t *sp; + int rval = QLA_SUCCESS; + + sp = qla2x00_get_sp_from_handle(vha, func, req, vce); + if (!sp) + return; + + if (vce->entry_status != 0) { + ql_dbg(ql_dbg_vport, vha, 0x10c4, + "%s: Failed to complete IOCB -- error status (%x)\n", + sp->name, vce->entry_status); + rval = QLA_FUNCTION_FAILED; + } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { + ql_dbg(ql_dbg_vport, vha, 0x10c5, + "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", + sp->name, le16_to_cpu(vce->comp_status), + le16_to_cpu(vce->vp_idx_failed)); + rval = QLA_FUNCTION_FAILED; + } else { + ql_dbg(ql_dbg_vport, vha, 0x10c6, + "Done %s.\n", __func__); + } + + sp->rc = rval; + sp->done(sp, rval); +} + +/* Process a single response queue entry. */ +static void qla2x00_process_response_entry(struct scsi_qla_host *vha, + struct rsp_que *rsp, + sts_entry_t *pkt) +{ + sts21_entry_t *sts21_entry; + sts22_entry_t *sts22_entry; + uint16_t handle_cnt; + uint16_t cnt; + + switch (pkt->entry_type) { + case STATUS_TYPE: + qla2x00_status_entry(vha, rsp, pkt); + break; + case STATUS_TYPE_21: + sts21_entry = (sts21_entry_t *)pkt; + handle_cnt = sts21_entry->handle_count; + for (cnt = 0; cnt < handle_cnt; cnt++) + qla2x00_process_completed_request(vha, rsp->req, + sts21_entry->handle[cnt]); + break; + case STATUS_TYPE_22: + sts22_entry = (sts22_entry_t *)pkt; + handle_cnt = sts22_entry->handle_count; + for (cnt = 0; cnt < handle_cnt; cnt++) + qla2x00_process_completed_request(vha, rsp->req, + sts22_entry->handle[cnt]); + break; + case STATUS_CONT_TYPE: + qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); + break; + case MBX_IOCB_TYPE: + qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); + break; + case CT_IOCB_TYPE: + qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); + break; + default: + /* Type Not Supported. */ + ql_log(ql_log_warn, vha, 0x504a, + "Received unknown response pkt type %x entry status=%x.\n", + pkt->entry_type, pkt->entry_status); + break; + } +} + +/** + * qla2x00_process_response_queue() - Process response queue entries. + * @rsp: response queue + */ +void +qla2x00_process_response_queue(struct rsp_que *rsp) +{ + struct scsi_qla_host *vha; + struct qla_hw_data *ha = rsp->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + sts_entry_t *pkt; + + vha = pci_get_drvdata(ha->pdev); + + if (!vha->flags.online) + return; + + while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { + pkt = (sts_entry_t *)rsp->ring_ptr; + + rsp->ring_index++; + if (rsp->ring_index == rsp->length) { + rsp->ring_index = 0; + rsp->ring_ptr = rsp->ring; + } else { + rsp->ring_ptr++; + } + + if (pkt->entry_status != 0) { + qla2x00_error_entry(vha, rsp, pkt); + ((response_t *)pkt)->signature = RESPONSE_PROCESSED; + wmb(); + continue; + } + + qla2x00_process_response_entry(vha, rsp, pkt); + ((response_t *)pkt)->signature = RESPONSE_PROCESSED; + wmb(); + } + + /* Adjust ring index */ + wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); +} + +static inline void +qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, + uint32_t sense_len, struct rsp_que *rsp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct scsi_cmnd *cp = GET_CMD_SP(sp); + uint32_t track_sense_len; + + if (sense_len >= SCSI_SENSE_BUFFERSIZE) + sense_len = SCSI_SENSE_BUFFERSIZE; + + SET_CMD_SENSE_LEN(sp, sense_len); + SET_CMD_SENSE_PTR(sp, cp->sense_buffer); + track_sense_len = sense_len; + + if (sense_len > par_sense_len) + sense_len = par_sense_len; + + memcpy(cp->sense_buffer, sense_data, sense_len); + + SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); + track_sense_len -= sense_len; + SET_CMD_SENSE_LEN(sp, track_sense_len); + + if (track_sense_len != 0) { + rsp->status_srb = sp; + cp->result = res; + } + + if (sense_len) { + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, + "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", + sp->vha->host_no, cp->device->id, cp->device->lun, + cp); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, + cp->sense_buffer, sense_len); + } +} + +struct scsi_dif_tuple { + __be16 guard; /* Checksum */ + __be16 app_tag; /* APPL identifier */ + __be32 ref_tag; /* Target LBA or indirect LBA */ +}; + +/* + * Checks the guard or meta-data for the type of error + * detected by the HBA. In case of errors, we set the + * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST + * to indicate to the kernel that the HBA detected error. + */ +static inline int +qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) +{ + struct scsi_qla_host *vha = sp->vha; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + uint8_t *ap = &sts24->data[12]; + uint8_t *ep = &sts24->data[20]; + uint32_t e_ref_tag, a_ref_tag; + uint16_t e_app_tag, a_app_tag; + uint16_t e_guard, a_guard; + + /* + * swab32 of the "data" field in the beginning of qla2x00_status_entry() + * would make guard field appear at offset 2 + */ + a_guard = get_unaligned_le16(ap + 2); + a_app_tag = get_unaligned_le16(ap + 0); + a_ref_tag = get_unaligned_le32(ap + 4); + e_guard = get_unaligned_le16(ep + 2); + e_app_tag = get_unaligned_le16(ep + 0); + e_ref_tag = get_unaligned_le32(ep + 4); + + ql_dbg(ql_dbg_io, vha, 0x3023, + "iocb(s) %p Returned STATUS.\n", sts24); + + ql_dbg(ql_dbg_io, vha, 0x3024, + "DIF ERROR in cmd 0x%x lba 0x%llx act ref" + " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" + " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", + cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, + a_app_tag, e_app_tag, a_guard, e_guard); + + /* + * Ignore sector if: + * For type 3: ref & app tag is all 'f's + * For type 0,1,2: app tag is all 'f's + */ + if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && + (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || + a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { + uint32_t blocks_done, resid; + sector_t lba_s = scsi_get_lba(cmd); + + /* 2TB boundary case covered automatically with this */ + blocks_done = e_ref_tag - (uint32_t)lba_s + 1; + + resid = scsi_bufflen(cmd) - (blocks_done * + cmd->device->sector_size); + + scsi_set_resid(cmd, resid); + cmd->result = DID_OK << 16; + + /* Update protection tag */ + if (scsi_prot_sg_count(cmd)) { + uint32_t i, j = 0, k = 0, num_ent; + struct scatterlist *sg; + struct t10_pi_tuple *spt; + + /* Patch the corresponding protection tags */ + scsi_for_each_prot_sg(cmd, sg, + scsi_prot_sg_count(cmd), i) { + num_ent = sg_dma_len(sg) / 8; + if (k + num_ent < blocks_done) { + k += num_ent; + continue; + } + j = blocks_done - k - 1; + k = blocks_done; + break; + } + + if (k != blocks_done) { + ql_log(ql_log_warn, vha, 0x302f, + "unexpected tag values tag:lba=%x:%llx)\n", + e_ref_tag, (unsigned long long)lba_s); + return 1; + } + + spt = page_address(sg_page(sg)) + sg->offset; + spt += j; + + spt->app_tag = T10_PI_APP_ESCAPE; + if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) + spt->ref_tag = T10_PI_REF_ESCAPE; + } + + return 0; + } + + /* check guard */ + if (e_guard != a_guard) { + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); + set_host_byte(cmd, DID_ABORT); + return 1; + } + + /* check ref tag */ + if (e_ref_tag != a_ref_tag) { + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); + set_host_byte(cmd, DID_ABORT); + return 1; + } + + /* check appl tag */ + if (e_app_tag != a_app_tag) { + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); + set_host_byte(cmd, DID_ABORT); + return 1; + } + + return 1; +} + +static void +qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, + struct req_que *req, uint32_t index) +{ + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + uint16_t comp_status; + uint16_t scsi_status; + uint16_t thread_id; + uint32_t rval = EXT_STATUS_OK; + struct bsg_job *bsg_job = NULL; + struct fc_bsg_request *bsg_request; + struct fc_bsg_reply *bsg_reply; + sts_entry_t *sts = pkt; + struct sts_entry_24xx *sts24 = pkt; + + /* Validate handle. */ + if (index >= req->num_outstanding_cmds) { + ql_log(ql_log_warn, vha, 0x70af, + "Invalid SCSI completion handle 0x%x.\n", index); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + return; + } + + sp = req->outstanding_cmds[index]; + if (!sp) { + ql_log(ql_log_warn, vha, 0x70b0, + "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", + req->id, index); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + return; + } + + /* Free outstanding command slot. */ + req->outstanding_cmds[index] = NULL; + bsg_job = sp->u.bsg_job; + bsg_request = bsg_job->request; + bsg_reply = bsg_job->reply; + + if (IS_FWI2_CAPABLE(ha)) { + comp_status = le16_to_cpu(sts24->comp_status); + scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; + } else { + comp_status = le16_to_cpu(sts->comp_status); + scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; + } + + thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + switch (comp_status) { + case CS_COMPLETE: + if (scsi_status == 0) { + bsg_reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + vha->qla_stats.input_bytes += + bsg_reply->reply_payload_rcv_len; + vha->qla_stats.input_requests++; + rval = EXT_STATUS_OK; + } + goto done; + + case CS_DATA_OVERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b1, + "Command completed with data overrun thread_id=%d\n", + thread_id); + rval = EXT_STATUS_DATA_OVERRUN; + break; + + case CS_DATA_UNDERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b2, + "Command completed with data underrun thread_id=%d\n", + thread_id); + rval = EXT_STATUS_DATA_UNDERRUN; + break; + case CS_BIDIR_RD_OVERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b3, + "Command completed with read data overrun thread_id=%d\n", + thread_id); + rval = EXT_STATUS_DATA_OVERRUN; + break; + + case CS_BIDIR_RD_WR_OVERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b4, + "Command completed with read and write data overrun " + "thread_id=%d\n", thread_id); + rval = EXT_STATUS_DATA_OVERRUN; + break; + + case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b5, + "Command completed with read data over and write data " + "underrun thread_id=%d\n", thread_id); + rval = EXT_STATUS_DATA_OVERRUN; + break; + + case CS_BIDIR_RD_UNDERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b6, + "Command completed with read data underrun " + "thread_id=%d\n", thread_id); + rval = EXT_STATUS_DATA_UNDERRUN; + break; + + case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b7, + "Command completed with read data under and write data " + "overrun thread_id=%d\n", thread_id); + rval = EXT_STATUS_DATA_UNDERRUN; + break; + + case CS_BIDIR_RD_WR_UNDERRUN: + ql_dbg(ql_dbg_user, vha, 0x70b8, + "Command completed with read and write data underrun " + "thread_id=%d\n", thread_id); + rval = EXT_STATUS_DATA_UNDERRUN; + break; + + case CS_BIDIR_DMA: + ql_dbg(ql_dbg_user, vha, 0x70b9, + "Command completed with data DMA error thread_id=%d\n", + thread_id); + rval = EXT_STATUS_DMA_ERR; + break; + + case CS_TIMEOUT: + ql_dbg(ql_dbg_user, vha, 0x70ba, + "Command completed with timeout thread_id=%d\n", + thread_id); + rval = EXT_STATUS_TIMEOUT; + break; + default: + ql_dbg(ql_dbg_user, vha, 0x70bb, + "Command completed with completion status=0x%x " + "thread_id=%d\n", comp_status, thread_id); + rval = EXT_STATUS_ERR; + break; + } + bsg_reply->reply_payload_rcv_len = 0; + +done: + /* Return the vendor specific reply to API */ + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + /* Always return DID_OK, bsg will send the vendor specific response + * in this case only */ + sp->done(sp, DID_OK << 16); + +} + +/** + * qla2x00_status_entry() - Process a Status IOCB entry. + * @vha: SCSI driver HA context + * @rsp: response queue + * @pkt: Entry pointer + */ +static void +qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) +{ + srb_t *sp; + fc_port_t *fcport; + struct scsi_cmnd *cp; + sts_entry_t *sts = pkt; + struct sts_entry_24xx *sts24 = pkt; + uint16_t comp_status; + uint16_t scsi_status; + uint16_t ox_id; + uint8_t lscsi_status; + int32_t resid; + uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, + fw_resid_len; + uint8_t *rsp_info, *sense_data; + struct qla_hw_data *ha = vha->hw; + uint32_t handle; + uint16_t que; + struct req_que *req; + int logit = 1; + int res = 0; + uint16_t state_flags = 0; + uint16_t sts_qual = 0; + + if (IS_FWI2_CAPABLE(ha)) { + comp_status = le16_to_cpu(sts24->comp_status); + scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; + state_flags = le16_to_cpu(sts24->state_flags); + } else { + comp_status = le16_to_cpu(sts->comp_status); + scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; + } + handle = (uint32_t) LSW(sts->handle); + que = MSW(sts->handle); + req = ha->req_q_map[que]; + + /* Check for invalid queue pointer */ + if (req == NULL || + que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { + ql_dbg(ql_dbg_io, vha, 0x3059, + "Invalid status handle (0x%x): Bad req pointer. req=%p, " + "que=%u.\n", sts->handle, req, que); + return; + } + + /* Validate handle. */ + if (handle < req->num_outstanding_cmds) { + sp = req->outstanding_cmds[handle]; + if (!sp) { + ql_dbg(ql_dbg_io, vha, 0x3075, + "%s(%ld): Already returned command for status handle (0x%x).\n", + __func__, vha->host_no, sts->handle); + return; + } + } else { + ql_dbg(ql_dbg_io, vha, 0x3017, + "Invalid status handle, out of range (0x%x).\n", + sts->handle); + + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + return; + } + qla_put_fw_resources(sp->qpair, &sp->iores); + + if (sp->cmd_type != TYPE_SRB) { + req->outstanding_cmds[handle] = NULL; + ql_dbg(ql_dbg_io, vha, 0x3015, + "Unknown sp->cmd_type %x %p).\n", + sp->cmd_type, sp); + return; + } + + /* NVME completion. */ + if (sp->type == SRB_NVME_CMD) { + req->outstanding_cmds[handle] = NULL; + qla24xx_nvme_iocb_entry(vha, req, pkt, sp); + return; + } + + if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { + qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); + return; + } + + /* Task Management completion. */ + if (sp->type == SRB_TM_CMD) { + qla24xx_tm_iocb_entry(vha, req, pkt); + return; + } + + /* Fast path completion. */ + qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24); + sp->qpair->cmd_completion_cnt++; + + if (comp_status == CS_COMPLETE && scsi_status == 0) { + qla2x00_process_completed_request(vha, req, handle); + + return; + } + + cp = GET_CMD_SP(sp); + if (cp == NULL) { + ql_dbg(ql_dbg_io, vha, 0x3018, + "Command already returned (0x%x/%p).\n", + sts->handle, sp); + + req->outstanding_cmds[handle] = NULL; + return; + } + + lscsi_status = scsi_status & STATUS_MASK; + + fcport = sp->fcport; + + ox_id = 0; + sense_len = par_sense_len = rsp_info_len = resid_len = + fw_resid_len = 0; + if (IS_FWI2_CAPABLE(ha)) { + if (scsi_status & SS_SENSE_LEN_VALID) + sense_len = le32_to_cpu(sts24->sense_len); + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) + rsp_info_len = le32_to_cpu(sts24->rsp_data_len); + if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) + resid_len = le32_to_cpu(sts24->rsp_residual_count); + if (comp_status == CS_DATA_UNDERRUN) + fw_resid_len = le32_to_cpu(sts24->residual_len); + rsp_info = sts24->data; + sense_data = sts24->data; + host_to_fcp_swap(sts24->data, sizeof(sts24->data)); + ox_id = le16_to_cpu(sts24->ox_id); + par_sense_len = sizeof(sts24->data); + sts_qual = le16_to_cpu(sts24->status_qualifier); + } else { + if (scsi_status & SS_SENSE_LEN_VALID) + sense_len = le16_to_cpu(sts->req_sense_length); + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) + rsp_info_len = le16_to_cpu(sts->rsp_info_len); + resid_len = le32_to_cpu(sts->residual_length); + rsp_info = sts->rsp_info; + sense_data = sts->req_sense_data; + par_sense_len = sizeof(sts->req_sense_data); + } + + /* Check for any FCP transport errors. */ + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { + /* Sense data lies beyond any FCP RESPONSE data. */ + if (IS_FWI2_CAPABLE(ha)) { + sense_data += rsp_info_len; + par_sense_len -= rsp_info_len; + } + if (rsp_info_len > 3 && rsp_info[3]) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3019, + "FCP I/O protocol failure (0x%x/0x%x).\n", + rsp_info_len, rsp_info[3]); + + res = DID_BUS_BUSY << 16; + goto out; + } + } + + /* Check for overrun. */ + if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && + scsi_status & SS_RESIDUAL_OVER) + comp_status = CS_DATA_OVERRUN; + + /* + * Check retry_delay_timer value if we receive a busy or + * queue full. + */ + if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || + lscsi_status == SAM_STAT_BUSY)) + qla2x00_set_retry_delay_timestamp(fcport, sts_qual); + + /* + * Based on Host and scsi status generate status code for Linux + */ + switch (comp_status) { + case CS_COMPLETE: + case CS_QUEUE_FULL: + if (scsi_status == 0) { + res = DID_OK << 16; + break; + } + if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { + resid = resid_len; + scsi_set_resid(cp, resid); + + if (!lscsi_status && + ((unsigned)(scsi_bufflen(cp) - resid) < + cp->underflow)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x301a, + "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + res = DID_ERROR << 16; + break; + } + } + res = DID_OK << 16 | lscsi_status; + + if (lscsi_status == SAM_STAT_TASK_SET_FULL) { + ql_dbg(ql_dbg_io, fcport->vha, 0x301b, + "QUEUE FULL detected.\n"); + break; + } + logit = 0; + if (lscsi_status != SS_CHECK_CONDITION) + break; + + memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (!(scsi_status & SS_SENSE_LEN_VALID)) + break; + + qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, + rsp, res); + break; + + case CS_DATA_UNDERRUN: + /* Use F/W calculated residual length. */ + resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; + scsi_set_resid(cp, resid); + if (scsi_status & SS_RESIDUAL_UNDER) { + if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { + ql_log(ql_log_warn, fcport->vha, 0x301d, + "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + res = DID_ERROR << 16 | lscsi_status; + goto check_scsi_status; + } + + if (!lscsi_status && + ((unsigned)(scsi_bufflen(cp) - resid) < + cp->underflow)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x301e, + "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + res = DID_ERROR << 16; + break; + } + } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && + lscsi_status != SAM_STAT_BUSY) { + /* + * scsi status of task set and busy are considered to be + * task not completed. + */ + + ql_log(ql_log_warn, fcport->vha, 0x301f, + "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + vha->interface_err_cnt++; + + res = DID_ERROR << 16 | lscsi_status; + goto check_scsi_status; + } else { + ql_dbg(ql_dbg_io, fcport->vha, 0x3030, + "scsi_status: 0x%x, lscsi_status: 0x%x\n", + scsi_status, lscsi_status); + } + + res = DID_OK << 16 | lscsi_status; + logit = 0; + +check_scsi_status: + /* + * Check to see if SCSI Status is non zero. If so report SCSI + * Status. + */ + if (lscsi_status != 0) { + if (lscsi_status == SAM_STAT_TASK_SET_FULL) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3020, + "QUEUE FULL detected.\n"); + logit = 1; + break; + } + if (lscsi_status != SS_CHECK_CONDITION) + break; + + memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (!(scsi_status & SS_SENSE_LEN_VALID)) + break; + + qla2x00_handle_sense(sp, sense_data, par_sense_len, + sense_len, rsp, res); + } + break; + + case CS_PORT_LOGGED_OUT: + case CS_PORT_CONFIG_CHG: + case CS_PORT_BUSY: + case CS_INCOMPLETE: + case CS_PORT_UNAVAILABLE: + case CS_TIMEOUT: + case CS_RESET: + case CS_EDIF_INV_REQ: + + /* + * We are going to have the fc class block the rport + * while we try to recover so instruct the mid layer + * to requeue until the class decides how to handle this. + */ + res = DID_TRANSPORT_DISRUPTED << 16; + + if (comp_status == CS_TIMEOUT) { + if (IS_FWI2_CAPABLE(ha)) + break; + else if ((le16_to_cpu(sts->status_flags) & + SF_LOGOUT_SENT) == 0) + break; + } + + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s comp_status %x.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[FCS_ONLINE], + comp_status); + + qlt_schedule_sess_for_deletion(fcport); + } + + break; + + case CS_ABORTED: + res = DID_RESET << 16; + break; + + case CS_DIF_ERROR: + logit = qla2x00_handle_dif_error(sp, sts24); + res = cp->result; + break; + + case CS_TRANSPORT: + res = DID_ERROR << 16; + vha->hw_err_cnt++; + + if (!IS_PI_SPLIT_DET_CAPABLE(ha)) + break; + + if (state_flags & BIT_4) + scmd_printk(KERN_WARNING, cp, + "Unsupported device '%s' found.\n", + cp->device->vendor); + break; + + case CS_DMA: + ql_log(ql_log_info, fcport->vha, 0x3022, + "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", + comp_status, scsi_status, res, vha->host_no, + cp->device->id, cp->device->lun, fcport->d_id.b24, + ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, + resid_len, fw_resid_len, sp, cp); + ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, + pkt, sizeof(*sts24)); + res = DID_ERROR << 16; + vha->hw_err_cnt++; + break; + default: + res = DID_ERROR << 16; + break; + } + +out: + if (logit) + ql_dbg(ql_dbg_io, fcport->vha, 0x3022, + "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", + comp_status, scsi_status, res, vha->host_no, + cp->device->id, cp->device->lun, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, + cp->cmnd, scsi_bufflen(cp), rsp_info_len, + resid_len, fw_resid_len, sp, cp); + + if (rsp->status_srb == NULL) + sp->done(sp, res); + + /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */ + req->outstanding_cmds[handle] = NULL; +} + +/** + * qla2x00_status_cont_entry() - Process a Status Continuations entry. + * @rsp: response queue + * @pkt: Entry pointer + * + * Extended sense data. + */ +static void +qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) +{ + uint8_t sense_sz = 0; + struct qla_hw_data *ha = rsp->hw; + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); + srb_t *sp = rsp->status_srb; + struct scsi_cmnd *cp; + uint32_t sense_len; + uint8_t *sense_ptr; + + if (!sp || !GET_CMD_SENSE_LEN(sp)) + return; + + sense_len = GET_CMD_SENSE_LEN(sp); + sense_ptr = GET_CMD_SENSE_PTR(sp); + + cp = GET_CMD_SP(sp); + if (cp == NULL) { + ql_log(ql_log_warn, vha, 0x3025, + "cmd is NULL: already returned to OS (sp=%p).\n", sp); + + rsp->status_srb = NULL; + return; + } + + if (sense_len > sizeof(pkt->data)) + sense_sz = sizeof(pkt->data); + else + sense_sz = sense_len; + + /* Move sense data. */ + if (IS_FWI2_CAPABLE(ha)) + host_to_fcp_swap(pkt->data, sizeof(pkt->data)); + memcpy(sense_ptr, pkt->data, sense_sz); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, + sense_ptr, sense_sz); + + sense_len -= sense_sz; + sense_ptr += sense_sz; + + SET_CMD_SENSE_PTR(sp, sense_ptr); + SET_CMD_SENSE_LEN(sp, sense_len); + + /* Place command on done queue. */ + if (sense_len == 0) { + rsp->status_srb = NULL; + sp->done(sp, cp->result); + } +} + +/** + * qla2x00_error_entry() - Process an error entry. + * @vha: SCSI driver HA context + * @rsp: response queue + * @pkt: Entry pointer + * return : 1=allow further error analysis. 0=no additional error analysis. + */ +static int +qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) +{ + srb_t *sp; + struct qla_hw_data *ha = vha->hw; + const char func[] = "ERROR-IOCB"; + uint16_t que = MSW(pkt->handle); + struct req_que *req = NULL; + int res = DID_ERROR << 16; + u16 index; + + ql_dbg(ql_dbg_async, vha, 0x502a, + "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", + pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); + + if (que >= ha->max_req_queues || !ha->req_q_map[que]) + goto fatal; + + req = ha->req_q_map[que]; + + if (pkt->entry_status & RF_BUSY) + res = DID_BUS_BUSY << 16; + + if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) + return 0; + + switch (pkt->entry_type) { + case NOTIFY_ACK_TYPE: + case STATUS_CONT_TYPE: + case LOGINOUT_PORT_IOCB_TYPE: + case CT_IOCB_TYPE: + case ELS_IOCB_TYPE: + case ABORT_IOCB_TYPE: + case MBX_IOCB_TYPE: + default: + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (sp) { + sp->done(sp, res); + return 0; + } + break; + + case SA_UPDATE_IOCB_TYPE: + case ABTS_RESP_24XX: + case CTIO_TYPE7: + case CTIO_CRC2: + return 1; + case STATUS_TYPE: + sp = qla_get_sp_from_handle(vha, func, req, pkt, &index); + if (sp) { + sp->done(sp, res); + req->outstanding_cmds[index] = NULL; + return 0; + } + break; + } +fatal: + ql_log(ql_log_warn, vha, 0x5030, + "Error entry - invalid handle/queue (%04x).\n", que); + return 0; +} + +/** + * qla24xx_mbx_completion() - Process mailbox command completions. + * @vha: SCSI driver HA context + * @mb0: Mailbox0 register + */ +static void +qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) +{ + uint16_t cnt; + uint32_t mboxes; + __le16 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + /* Read all mbox registers? */ + WARN_ON_ONCE(ha->mbx_count > 32); + mboxes = (1ULL << ha->mbx_count) - 1; + if (!ha->mcp) + ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); + else + mboxes = ha->mcp->in_mb; + + /* Load return mailbox registers. */ + ha->flags.mbox_int = 1; + ha->mailbox_out[0] = mb0; + mboxes >>= 1; + wptr = ®->mailbox1; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) + ha->mailbox_out[cnt] = rd_reg_word(wptr); + + mboxes >>= 1; + wptr++; + } +} + +static void +qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct abort_entry_24xx *pkt) +{ + const char func[] = "ABT_IOCB"; + srb_t *sp; + srb_t *orig_sp = NULL; + struct srb_iocb *abt; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + abt = &sp->u.iocb_cmd; + abt->u.abt.comp_status = pkt->comp_status; + orig_sp = sp->cmd_sp; + /* Need to pass original sp */ + if (orig_sp) + qla_nvme_abort_process_comp_status(pkt, orig_sp); + + sp->done(sp, 0); +} + +void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, + struct pt_ls4_request *pkt, struct req_que *req) +{ + srb_t *sp; + const char func[] = "LS4_IOCB"; + uint16_t comp_status; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + comp_status = le16_to_cpu(pkt->status); + sp->done(sp, comp_status); +} + +/** + * qla_chk_cont_iocb_avail - check for all continuation iocbs are available + * before iocb processing can start. + * @vha: host adapter pointer + * @rsp: respond queue + * @pkt: head iocb describing how many continuation iocb + * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived. + */ +static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in) +{ + int start_pkt_ring_index; + u32 iocb_cnt = 0; + int rc = 0; + + if (pkt->entry_count == 1) + return rc; + + /* ring_index was pre-increment. set it back to current pkt */ + if (rsp->ring_index == 0) + start_pkt_ring_index = rsp->length - 1; + else + start_pkt_ring_index = rsp->ring_index - 1; + + if (rsp_q_in < start_pkt_ring_index) + /* q in ptr is wrapped */ + iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in; + else + iocb_cnt = rsp_q_in - start_pkt_ring_index; + + if (iocb_cnt < pkt->entry_count) + rc = -EIO; + + ql_dbg(ql_dbg_init, vha, 0x5091, + "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n", + __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc); + + return rc; +} + +static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mrk_entry_24xx *pkt) +{ + const char func[] = "MRK-IOCB"; + srb_t *sp; + int res = QLA_SUCCESS; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->entry_status) { + ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n"); + res = QLA_COMMAND_ERROR; + } + sp->u.iocb_cmd.u.tmf.data = res; + sp->done(sp, res); +} + +/** + * qla24xx_process_response_queue() - Process response queue entries. + * @vha: SCSI driver HA context + * @rsp: response queue + */ +void qla24xx_process_response_queue(struct scsi_qla_host *vha, + struct rsp_que *rsp) +{ + struct sts_entry_24xx *pkt; + struct qla_hw_data *ha = vha->hw; + struct purex_entry_24xx *purex_entry; + struct purex_item *pure_item; + struct pt_ls4_rx_unsol *p; + u16 rsp_in = 0, cur_ring_index; + int is_shadow_hba; + + if (!ha->flags.fw_started) + return; + + if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) { + rsp->qpair->rcv_intr = 1; + + if (!rsp->qpair->cpu_mapped) + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); + } + +#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \ + do { \ + _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \ + rd_reg_dword_relaxed((_rsp)->rsp_q_in); \ + } while (0) + + is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha); + + __update_rsp_in(is_shadow_hba, rsp, rsp_in); + + while (rsp->ring_index != rsp_in && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) { + pkt = (struct sts_entry_24xx *)rsp->ring_ptr; + cur_ring_index = rsp->ring_index; + + rsp->ring_index++; + if (rsp->ring_index == rsp->length) { + rsp->ring_index = 0; + rsp->ring_ptr = rsp->ring; + } else { + rsp->ring_ptr++; + } + + if (pkt->entry_status != 0) { + if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) + goto process_err; + + ((response_t *)pkt)->signature = RESPONSE_PROCESSED; + wmb(); + continue; + } +process_err: + + switch (pkt->entry_type) { + case STATUS_TYPE: + qla2x00_status_entry(vha, rsp, pkt); + break; + case STATUS_CONT_TYPE: + qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); + break; + case VP_RPT_ID_IOCB_TYPE: + qla24xx_report_id_acquisition(vha, + (struct vp_rpt_id_entry_24xx *)pkt); + break; + case LOGINOUT_PORT_IOCB_TYPE: + qla24xx_logio_entry(vha, rsp->req, + (struct logio_entry_24xx *)pkt); + break; + case CT_IOCB_TYPE: + qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); + break; + case ELS_IOCB_TYPE: + qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); + break; + case ABTS_RECV_24XX: + if (qla_ini_mode_enabled(vha)) { + pure_item = qla24xx_copy_std_pkt(vha, pkt); + if (!pure_item) + break; + qla24xx_queue_purex_item(vha, pure_item, + qla24xx_process_abts); + break; + } + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { + /* ensure that the ATIO queue is empty */ + qlt_handle_abts_recv(vha, rsp, + (response_t *)pkt); + break; + } else { + qlt_24xx_process_atio_queue(vha, 1); + } + fallthrough; + case ABTS_RESP_24XX: + case CTIO_TYPE7: + case CTIO_CRC2: + qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); + break; + case PT_LS4_REQUEST: + qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, + rsp->req); + break; + case NOTIFY_ACK_TYPE: + if (pkt->handle == QLA_TGT_SKIP_HANDLE) + qlt_response_pkt_all_vps(vha, rsp, + (response_t *)pkt); + else + qla24xxx_nack_iocb_entry(vha, rsp->req, + (struct nack_to_isp *)pkt); + break; + case MARKER_TYPE: + qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt); + break; + case ABORT_IOCB_TYPE: + qla24xx_abort_iocb_entry(vha, rsp->req, + (struct abort_entry_24xx *)pkt); + break; + case MBX_IOCB_TYPE: + qla24xx_mbx_iocb_entry(vha, rsp->req, + (struct mbx_24xx_entry *)pkt); + break; + case VP_CTRL_IOCB_TYPE: + qla_ctrlvp_completed(vha, rsp->req, + (struct vp_ctrl_entry_24xx *)pkt); + break; + case PUREX_IOCB_TYPE: + purex_entry = (void *)pkt; + switch (purex_entry->els_frame_payload[3]) { + case ELS_RDP: + pure_item = qla24xx_copy_std_pkt(vha, pkt); + if (!pure_item) + break; + qla24xx_queue_purex_item(vha, pure_item, + qla24xx_process_purex_rdp); + break; + case ELS_FPIN: + if (!vha->hw->flags.scm_enabled) { + ql_log(ql_log_warn, vha, 0x5094, + "SCM not active for this port\n"); + break; + } + pure_item = qla27xx_copy_fpin_pkt(vha, + (void **)&pkt, &rsp); + __update_rsp_in(is_shadow_hba, rsp, rsp_in); + if (!pure_item) + break; + qla24xx_queue_purex_item(vha, pure_item, + qla27xx_process_purex_fpin); + break; + + case ELS_AUTH_ELS: + if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) { + /* + * ring_ptr and ring_index were + * pre-incremented above. Reset them + * back to current. Wait for next + * interrupt with all IOCBs to arrive + * and re-process. + */ + rsp->ring_ptr = (response_t *)pkt; + rsp->ring_index = cur_ring_index; + + ql_dbg(ql_dbg_init, vha, 0x5091, + "Defer processing ELS opcode %#x...\n", + purex_entry->els_frame_payload[3]); + return; + } + qla24xx_auth_els(vha, (void **)&pkt, &rsp); + break; + default: + ql_log(ql_log_warn, vha, 0x509c, + "Discarding ELS Request opcode 0x%x\n", + purex_entry->els_frame_payload[3]); + } + break; + case SA_UPDATE_IOCB_TYPE: + qla28xx_sa_update_iocb_entry(vha, rsp->req, + (struct sa_update_28xx *)pkt); + break; + case PT_LS4_UNSOL: + p = (void *)pkt; + if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) { + rsp->ring_ptr = (response_t *)pkt; + rsp->ring_index = cur_ring_index; + + ql_dbg(ql_dbg_init, vha, 0x2124, + "Defer processing UNSOL LS req opcode %#x...\n", + p->payload[0]); + return; + } + qla2xxx_process_purls_iocb((void **)&pkt, &rsp); + break; + default: + /* Type Not Supported. */ + ql_dbg(ql_dbg_async, vha, 0x5042, + "Received unknown response pkt type 0x%x entry status=%x.\n", + pkt->entry_type, pkt->entry_status); + break; + } + ((response_t *)pkt)->signature = RESPONSE_PROCESSED; + wmb(); + } + + /* Adjust ring index */ + if (IS_P3P_TYPE(ha)) { + struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; + + wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); + } else { + wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); + } +} + +static void +qla2xxx_check_risc_status(scsi_qla_host_t *vha) +{ + int rval; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return; + + rval = QLA_SUCCESS; + wrt_reg_dword(®->iobase_addr, 0x7C00); + rd_reg_dword(®->iobase_addr); + wrt_reg_dword(®->iobase_window, 0x0001); + for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) { + wrt_reg_dword(®->iobase_window, 0x0001); + udelay(10); + } else + rval = QLA_FUNCTION_TIMEOUT; + } + if (rval == QLA_SUCCESS) + goto next_test; + + rval = QLA_SUCCESS; + wrt_reg_dword(®->iobase_window, 0x0003); + for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) { + wrt_reg_dword(®->iobase_window, 0x0003); + udelay(10); + } else + rval = QLA_FUNCTION_TIMEOUT; + } + if (rval != QLA_SUCCESS) + goto done; + +next_test: + if (rd_reg_dword(®->iobase_c8) & BIT_3) + ql_log(ql_log_info, vha, 0x504c, + "Additional code -- 0x55AA.\n"); + +done: + wrt_reg_dword(®->iobase_window, 0x0000); + rd_reg_dword(®->iobase_window); +} + +/** + * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. + * @irq: interrupt number + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qla24xx_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct device_reg_24xx __iomem *reg; + int status; + unsigned long iter; + uint32_t stat; + uint32_t hccr; + uint16_t mb[8]; + struct rsp_que *rsp; + unsigned long flags; + bool process_atio = false; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0x5059, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + + ha = rsp->hw; + reg = &ha->iobase->isp24; + status = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; ) { + stat = rd_reg_dword(®->host_status); + if (qla2x00_check_reg32_for_disconnect(vha, stat)) + break; + if (stat & HSRX_RISC_PAUSED) { + if (unlikely(pci_channel_offline(ha->pdev))) + break; + + hccr = rd_reg_dword(®->hccr); + + ql_log(ql_log_warn, vha, 0x504b, + "RISC paused -- HCCR=%x, Dumping firmware.\n", + hccr); + + qla2xxx_check_risc_status(vha); + + ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((stat & HSRX_RISC_INT) == 0) + break; + + switch (stat & 0xff) { + case INTR_ROM_MB_SUCCESS: + case INTR_ROM_MB_FAILED: + case INTR_MB_SUCCESS: + case INTR_MB_FAILED: + qla24xx_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + + break; + case INTR_ASYNC_EVENT: + mb[0] = MSW(stat); + mb[1] = rd_reg_word(®->mailbox1); + mb[2] = rd_reg_word(®->mailbox2); + mb[3] = rd_reg_word(®->mailbox3); + qla2x00_async_event(vha, rsp, mb); + break; + case INTR_RSP_QUE_UPDATE: + case INTR_RSP_QUE_UPDATE_83XX: + qla24xx_process_response_queue(vha, rsp); + break; + case INTR_ATIO_QUE_UPDATE_27XX: + case INTR_ATIO_QUE_UPDATE: + process_atio = true; + break; + case INTR_ATIO_RSP_QUE_UPDATE: + process_atio = true; + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_async, vha, 0x504f, + "Unrecognized interrupt type (%d).\n", stat * 0xff); + break; + } + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + rd_reg_dword_relaxed(®->hccr); + if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) + ndelay(3500); + } + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (process_atio) { + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); + } + + return IRQ_HANDLED; +} + +static irqreturn_t +qla24xx_msix_rsp_q(int irq, void *dev_id) +{ + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_24xx __iomem *reg; + struct scsi_qla_host *vha; + unsigned long flags; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0x505a, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + reg = &ha->iobase->isp24; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + vha = pci_get_drvdata(ha->pdev); + qla24xx_process_response_queue(vha, rsp); + if (!ha->flags.disable_msix_handshake) { + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + rd_reg_dword_relaxed(®->hccr); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t +qla24xx_msix_default(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_24xx __iomem *reg; + int status; + uint32_t stat; + uint32_t hccr; + uint16_t mb[8]; + unsigned long flags; + bool process_atio = false; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0x505c, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + reg = &ha->iobase->isp24; + status = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + do { + stat = rd_reg_dword(®->host_status); + if (qla2x00_check_reg32_for_disconnect(vha, stat)) + break; + if (stat & HSRX_RISC_PAUSED) { + if (unlikely(pci_channel_offline(ha->pdev))) + break; + + hccr = rd_reg_dword(®->hccr); + + ql_log(ql_log_info, vha, 0x5050, + "RISC paused -- HCCR=%x, Dumping firmware.\n", + hccr); + + qla2xxx_check_risc_status(vha); + vha->hw_err_cnt++; + + ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((stat & HSRX_RISC_INT) == 0) + break; + + switch (stat & 0xff) { + case INTR_ROM_MB_SUCCESS: + case INTR_ROM_MB_FAILED: + case INTR_MB_SUCCESS: + case INTR_MB_FAILED: + qla24xx_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + + break; + case INTR_ASYNC_EVENT: + mb[0] = MSW(stat); + mb[1] = rd_reg_word(®->mailbox1); + mb[2] = rd_reg_word(®->mailbox2); + mb[3] = rd_reg_word(®->mailbox3); + qla2x00_async_event(vha, rsp, mb); + break; + case INTR_RSP_QUE_UPDATE: + case INTR_RSP_QUE_UPDATE_83XX: + qla24xx_process_response_queue(vha, rsp); + break; + case INTR_ATIO_QUE_UPDATE_27XX: + case INTR_ATIO_QUE_UPDATE: + process_atio = true; + break; + case INTR_ATIO_RSP_QUE_UPDATE: + process_atio = true; + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_async, vha, 0x5051, + "Unrecognized interrupt type (%d).\n", stat & 0xff); + break; + } + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + } while (0); + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (process_atio) { + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); + } + + return IRQ_HANDLED; +} + +irqreturn_t +qla2xxx_msix_rsp_q(int irq, void *dev_id) +{ + struct qla_hw_data *ha; + struct qla_qpair *qpair; + + qpair = dev_id; + if (!qpair) { + ql_log(ql_log_info, NULL, 0x505b, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + ha = qpair->hw; + + queue_work(ha->wq, &qpair->q_work); + + return IRQ_HANDLED; +} + +irqreturn_t +qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) +{ + struct qla_hw_data *ha; + struct qla_qpair *qpair; + struct device_reg_24xx __iomem *reg; + unsigned long flags; + + qpair = dev_id; + if (!qpair) { + ql_log(ql_log_info, NULL, 0x505b, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + ha = qpair->hw; + + reg = &ha->iobase->isp24; + spin_lock_irqsave(&ha->hardware_lock, flags); + wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + queue_work(ha->wq, &qpair->q_work); + + return IRQ_HANDLED; +} + +/* Interrupt handling helpers. */ + +struct qla_init_msix_entry { + const char *name; + irq_handler_t handler; +}; + +static const struct qla_init_msix_entry msix_entries[] = { + { "default", qla24xx_msix_default }, + { "rsp_q", qla24xx_msix_rsp_q }, + { "atio_q", qla83xx_msix_atio_q }, + { "qpair_multiq", qla2xxx_msix_rsp_q }, + { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, +}; + +static const struct qla_init_msix_entry qla82xx_msix_entries[] = { + { "qla2xxx (default)", qla82xx_msix_default }, + { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, +}; + +static int +qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) +{ + int i, ret; + struct qla_msix_entry *qentry; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + int min_vecs = QLA_BASE_VECTORS; + struct irq_affinity desc = { + .pre_vectors = QLA_BASE_VECTORS, + }; + + if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && + IS_ATIO_MSIX_CAPABLE(ha)) { + desc.pre_vectors++; + min_vecs++; + } + + if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { + /* user wants to control IRQ setting for target mode */ + ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, + min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), + PCI_IRQ_MSIX); + } else + ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, + min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &desc); + + if (ret < 0) { + ql_log(ql_log_fatal, vha, 0x00c7, + "MSI-X: Failed to enable support, " + "giving up -- %d/%d.\n", + ha->msix_count, ret); + goto msix_out; + } else if (ret < ha->msix_count) { + ql_log(ql_log_info, vha, 0x00c6, + "MSI-X: Using %d vectors\n", ret); + ha->msix_count = ret; + /* Recalculate queue values */ + if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { + ha->max_req_queues = ha->msix_count - 1; + + /* ATIOQ needs 1 vector. That's 1 less QPair */ + if (QLA_TGT_MODE_ENABLED()) + ha->max_req_queues--; + + ha->max_rsp_queues = ha->max_req_queues; + + ha->max_qpairs = ha->max_req_queues - 1; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, + "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); + } + } + vha->irq_offset = desc.pre_vectors; + ha->msix_entries = kcalloc(ha->msix_count, + sizeof(struct qla_msix_entry), + GFP_KERNEL); + if (!ha->msix_entries) { + ql_log(ql_log_fatal, vha, 0x00c8, + "Failed to allocate memory for ha->msix_entries.\n"); + ret = -ENOMEM; + goto free_irqs; + } + ha->flags.msix_enabled = 1; + + for (i = 0; i < ha->msix_count; i++) { + qentry = &ha->msix_entries[i]; + qentry->vector = pci_irq_vector(ha->pdev, i); + qentry->vector_base0 = i; + qentry->entry = i; + qentry->have_irq = 0; + qentry->in_use = 0; + qentry->handle = NULL; + } + + /* Enable MSI-X vectors for the base queue */ + for (i = 0; i < QLA_BASE_VECTORS; i++) { + qentry = &ha->msix_entries[i]; + qentry->handle = rsp; + rsp->msix = qentry; + scnprintf(qentry->name, sizeof(qentry->name), + "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); + if (IS_P3P_TYPE(ha)) + ret = request_irq(qentry->vector, + qla82xx_msix_entries[i].handler, + 0, qla82xx_msix_entries[i].name, rsp); + else + ret = request_irq(qentry->vector, + msix_entries[i].handler, + 0, qentry->name, rsp); + if (ret) + goto msix_register_fail; + qentry->have_irq = 1; + qentry->in_use = 1; + } + + /* + * If target mode is enable, also request the vector for the ATIO + * queue. + */ + if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && + IS_ATIO_MSIX_CAPABLE(ha)) { + qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; + rsp->msix = qentry; + qentry->handle = rsp; + scnprintf(qentry->name, sizeof(qentry->name), + "qla2xxx%lu_%s", vha->host_no, + msix_entries[QLA_ATIO_VECTOR].name); + qentry->in_use = 1; + ret = request_irq(qentry->vector, + msix_entries[QLA_ATIO_VECTOR].handler, + 0, qentry->name, rsp); + qentry->have_irq = 1; + } + +msix_register_fail: + if (ret) { + ql_log(ql_log_fatal, vha, 0x00cb, + "MSI-X: unable to register handler -- %x/%d.\n", + qentry->vector, ret); + qla2x00_free_irqs(vha); + ha->mqenable = 0; + goto msix_out; + } + + /* Enable MSI-X vector for response queue update for queue 0 */ + if (IS_MQUE_CAPABLE(ha) && + (ha->msixbase && ha->mqiobase && ha->max_qpairs)) + ha->mqenable = 1; + else + ha->mqenable = 0; + + ql_dbg(ql_dbg_multiq, vha, 0xc005, + "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", + ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); + ql_dbg(ql_dbg_init, vha, 0x0055, + "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", + ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); + +msix_out: + return ret; + +free_irqs: + pci_free_irq_vectors(ha->pdev); + goto msix_out; +} + +int +qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) +{ + int ret = QLA_FUNCTION_FAILED; + device_reg_t *reg = ha->iobase; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + /* If possible, enable MSI-X. */ + if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && + !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && + !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) + goto skip_msi; + + if (ql2xenablemsix == 2) + goto skip_msix; + + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && + (ha->pdev->subsystem_device == 0x7040 || + ha->pdev->subsystem_device == 0x7041 || + ha->pdev->subsystem_device == 0x1705)) { + ql_log(ql_log_warn, vha, 0x0034, + "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", + ha->pdev->subsystem_vendor, + ha->pdev->subsystem_device); + goto skip_msi; + } + + if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { + ql_log(ql_log_warn, vha, 0x0035, + "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", + ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); + goto skip_msix; + } + + ret = qla24xx_enable_msix(ha, rsp); + if (!ret) { + ql_dbg(ql_dbg_init, vha, 0x0036, + "MSI-X: Enabled (0x%X, 0x%X).\n", + ha->chip_revision, ha->fw_attributes); + goto clear_risc_ints; + } + +skip_msix: + + ql_log(ql_log_info, vha, 0x0037, + "Falling back-to MSI mode -- ret=%d.\n", ret); + + if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && + !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + goto skip_msi; + + ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); + if (ret > 0) { + ql_dbg(ql_dbg_init, vha, 0x0038, + "MSI: Enabled.\n"); + ha->flags.msi_enabled = 1; + } else + ql_log(ql_log_warn, vha, 0x0039, + "Falling back-to INTa mode -- ret=%d.\n", ret); +skip_msi: + + /* Skip INTx on ISP82xx. */ + if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) + return QLA_FUNCTION_FAILED; + + ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, + ha->flags.msi_enabled ? 0 : IRQF_SHARED, + QLA2XXX_DRIVER_NAME, rsp); + if (ret) { + ql_log(ql_log_warn, vha, 0x003a, + "Failed to reserve interrupt %d already in use.\n", + ha->pdev->irq); + goto fail; + } else if (!ha->flags.msi_enabled) { + ql_dbg(ql_dbg_init, vha, 0x0125, + "INTa mode: Enabled.\n"); + ha->flags.mr_intr_valid = 1; + /* Set max_qpair to 0, as MSI-X and MSI in not enabled */ + ha->max_qpairs = 0; + } + +clear_risc_ints: + if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) + goto fail; + + spin_lock_irq(&ha->hardware_lock); + wrt_reg_word(®->isp.semaphore, 0); + spin_unlock_irq(&ha->hardware_lock); + +fail: + return ret; +} + +void +qla2x00_free_irqs(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct rsp_que *rsp; + struct qla_msix_entry *qentry; + int i; + + /* + * We need to check that ha->rsp_q_map is valid in case we are called + * from a probe failure context. + */ + if (!ha->rsp_q_map || !ha->rsp_q_map[0]) + goto free_irqs; + rsp = ha->rsp_q_map[0]; + + if (ha->flags.msix_enabled) { + for (i = 0; i < ha->msix_count; i++) { + qentry = &ha->msix_entries[i]; + if (qentry->have_irq) { + irq_set_affinity_notifier(qentry->vector, NULL); + free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); + } + } + kfree(ha->msix_entries); + ha->msix_entries = NULL; + ha->flags.msix_enabled = 0; + ql_dbg(ql_dbg_init, vha, 0x0042, + "Disabled MSI-X.\n"); + } else { + free_irq(pci_irq_vector(ha->pdev, 0), rsp); + } + +free_irqs: + pci_free_irq_vectors(ha->pdev); +} + +int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, + struct qla_msix_entry *msix, int vector_type) +{ + const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + int ret; + + scnprintf(msix->name, sizeof(msix->name), + "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); + ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); + if (ret) { + ql_log(ql_log_fatal, vha, 0x00e6, + "MSI-X: Unable to register handler -- %x/%d.\n", + msix->vector, ret); + return ret; + } + msix->have_irq = 1; + msix->handle = qpair; + qla_mapq_init_qp_cpu_map(ha, msix, qpair); + return ret; +} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c new file mode 100644 index 000000000..21ec32b4f --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -0,0 +1,7109 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_target.h" + +#include +#include + +#ifdef CONFIG_PPC +#define IS_PPCARCH true +#else +#define IS_PPCARCH false +#endif + +static struct mb_cmd_name { + uint16_t cmd; + const char *str; +} mb_str[] = { + {MBC_GET_PORT_DATABASE, "GPDB"}, + {MBC_GET_ID_LIST, "GIDList"}, + {MBC_GET_LINK_PRIV_STATS, "Stats"}, + {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, +}; + +static const char *mb_to_str(uint16_t cmd) +{ + int i; + struct mb_cmd_name *e; + + for (i = 0; i < ARRAY_SIZE(mb_str); i++) { + e = mb_str + i; + if (cmd == e->cmd) + return e->str; + } + return "unknown"; +} + +static struct rom_cmd { + uint16_t cmd; +} rom_cmds[] = { + { MBC_LOAD_RAM }, + { MBC_EXECUTE_FIRMWARE }, + { MBC_READ_RAM_WORD }, + { MBC_MAILBOX_REGISTER_TEST }, + { MBC_VERIFY_CHECKSUM }, + { MBC_GET_FIRMWARE_VERSION }, + { MBC_LOAD_RISC_RAM }, + { MBC_DUMP_RISC_RAM }, + { MBC_LOAD_RISC_RAM_EXTENDED }, + { MBC_DUMP_RISC_RAM_EXTENDED }, + { MBC_WRITE_RAM_WORD_EXTENDED }, + { MBC_READ_RAM_EXTENDED }, + { MBC_GET_RESOURCE_COUNTS }, + { MBC_SET_FIRMWARE_OPTION }, + { MBC_MID_INITIALIZE_FIRMWARE }, + { MBC_GET_FIRMWARE_STATE }, + { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, + { MBC_GET_RETRY_COUNT }, + { MBC_TRACE_CONTROL }, + { MBC_INITIALIZE_MULTIQ }, + { MBC_IOCB_COMMAND_A64 }, + { MBC_GET_ADAPTER_LOOP_ID }, + { MBC_READ_SFP }, + { MBC_SET_RNID_PARAMS }, + { MBC_GET_RNID_PARAMS }, + { MBC_GET_SET_ZIO_THRESHOLD }, +}; + +static int is_rom_cmd(uint16_t cmd) +{ + int i; + struct rom_cmd *wc; + + for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { + wc = rom_cmds + i; + if (wc->cmd == cmd) + return 1; + } + + return 0; +} + +/* + * qla2x00_mailbox_command + * Issue mailbox command and waits for completion. + * + * Input: + * ha = adapter block pointer. + * mcp = driver internal mbx struct pointer. + * + * Output: + * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. + * + * Returns: + * 0 : QLA_SUCCESS = cmd performed success + * 1 : QLA_FUNCTION_FAILED (error encountered) + * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) + * + * Context: + * Kernel context. + */ +static int +qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) +{ + int rval, i; + unsigned long flags = 0; + device_reg_t *reg; + uint8_t abort_active, eeh_delay; + uint8_t io_lock_on; + uint16_t command = 0; + uint16_t *iptr; + __le16 __iomem *optr; + uint32_t cnt; + uint32_t mboxes; + unsigned long wait_time; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + u32 chip_reset; + + + ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); + + if (ha->pdev->error_state == pci_channel_io_perm_failure) { + ql_log(ql_log_warn, vha, 0x1001, + "PCI channel failed permanently, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_log(ql_log_warn, vha, 0x1002, + "Device in failed state, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + /* if PCI error, then avoid mbx processing.*/ + if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && + test_bit(UNLOADING, &base_vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0xd04e, + "PCI error, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + eeh_delay = 0; + reg = ha->iobase; + io_lock_on = base_vha->flags.init_done; + + rval = QLA_SUCCESS; + abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + chip_reset = ha->chip_reset; + + if (ha->flags.pci_channel_io_perm_failure) { + ql_log(ql_log_warn, vha, 0x1003, + "Perm failure on EEH timeout MBX, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { + /* Setting Link-Down error */ + mcp->mb[0] = MBS_LINK_DOWN_ERROR; + ql_log(ql_log_warn, vha, 0x1004, + "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); + return QLA_FUNCTION_TIMEOUT; + } + + /* check if ISP abort is active and return cmd with timeout */ + if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && + !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) { + ql_log(ql_log_info, vha, 0x1005, + "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", + mcp->mb[0]); + return QLA_FUNCTION_TIMEOUT; + } + + atomic_inc(&ha->num_pend_mbx_stage1); + /* + * Wait for active mailbox commands to finish by waiting at most tov + * seconds. This is to serialize actual issuing of mailbox cmds during + * non ISP abort time. + */ + if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { + /* Timeout occurred. Return error. */ + ql_log(ql_log_warn, vha, 0xd035, + "Cmd access timeout, cmd=0x%x, Exiting.\n", + mcp->mb[0]); + vha->hw_err_cnt++; + atomic_dec(&ha->num_pend_mbx_stage1); + return QLA_FUNCTION_TIMEOUT; + } + atomic_dec(&ha->num_pend_mbx_stage1); + if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || + ha->flags.eeh_busy) { + ql_log(ql_log_warn, vha, 0xd035, + "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", + ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]); + rval = QLA_ABORTED; + goto premature_exit; + } + + + /* Save mailbox command for debug */ + ha->mcp = mcp; + + ql_dbg(ql_dbg_mbx, vha, 0x1006, + "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); + + spin_lock_irqsave(&ha->hardware_lock, flags); + + if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || + ha->flags.mbox_busy) { + rval = QLA_ABORTED; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + goto premature_exit; + } + ha->flags.mbox_busy = 1; + + /* Load mailbox registers. */ + if (IS_P3P_TYPE(ha)) + optr = ®->isp82.mailbox_in[0]; + else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) + optr = ®->isp24.mailbox0; + else + optr = MAILBOX_REG(ha, ®->isp, 0); + + iptr = mcp->mb; + command = mcp->mb[0]; + mboxes = mcp->out_mb; + + ql_dbg(ql_dbg_mbx, vha, 0x1111, + "Mailbox registers (OUT):\n"); + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (IS_QLA2200(ha) && cnt == 8) + optr = MAILBOX_REG(ha, ®->isp, 8); + if (mboxes & BIT_0) { + ql_dbg(ql_dbg_mbx, vha, 0x1112, + "mbox[%d]<-0x%04x\n", cnt, *iptr); + wrt_reg_word(optr, *iptr); + } else { + wrt_reg_word(optr, 0); + } + + mboxes >>= 1; + optr++; + iptr++; + } + + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, + "I/O Address = %p.\n", optr); + + /* Issue set host interrupt command to send cmd out. */ + ha->flags.mbox_int = 0; + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + /* Unlock mbx registers and wait for interrupt */ + ql_dbg(ql_dbg_mbx, vha, 0x100f, + "Going to unlock irq & waiting for interrupts. " + "jiffies=%lx.\n", jiffies); + + /* Wait for mbx cmd completion until timeout */ + atomic_inc(&ha->num_pend_mbx_stage2); + if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { + set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + + if (IS_P3P_TYPE(ha)) + wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); + else if (IS_FWI2_CAPABLE(ha)) + wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); + else + wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + wait_time = jiffies; + if (!wait_for_completion_timeout(&ha->mbx_intr_comp, + mcp->tov * HZ)) { + ql_dbg(ql_dbg_mbx, vha, 0x117a, + "cmd=%x Timeout.\n", command); + spin_lock_irqsave(&ha->hardware_lock, flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (chip_reset != ha->chip_reset) { + eeh_delay = ha->flags.eeh_busy ? 1 : 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + atomic_dec(&ha->num_pend_mbx_stage2); + rval = QLA_ABORTED; + goto premature_exit; + } + } else if (ha->flags.purge_mbox || + chip_reset != ha->chip_reset) { + eeh_delay = ha->flags.eeh_busy ? 1 : 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + atomic_dec(&ha->num_pend_mbx_stage2); + rval = QLA_ABORTED; + goto premature_exit; + } + + if (time_after(jiffies, wait_time + 5 * HZ)) + ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", + command, jiffies_to_msecs(jiffies - wait_time)); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1011, + "Cmd=%x Polling Mode.\n", command); + + if (IS_P3P_TYPE(ha)) { + if (rd_reg_dword(®->isp82.hint) & + HINT_MBX_INT_PENDING) { + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + atomic_dec(&ha->num_pend_mbx_stage2); + ql_dbg(ql_dbg_mbx, vha, 0x1012, + "Pending mailbox timeout, exiting.\n"); + vha->hw_err_cnt++; + rval = QLA_FUNCTION_TIMEOUT; + goto premature_exit; + } + wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); + } else if (IS_FWI2_CAPABLE(ha)) + wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); + else + wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ + while (!ha->flags.mbox_int) { + if (ha->flags.purge_mbox || + chip_reset != ha->chip_reset) { + eeh_delay = ha->flags.eeh_busy ? 1 : 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + atomic_dec(&ha->num_pend_mbx_stage2); + rval = QLA_ABORTED; + goto premature_exit; + } + + if (time_after(jiffies, wait_time)) + break; + + /* Check for pending interrupts. */ + qla2x00_poll(ha->rsp_q_map[0]); + + if (!ha->flags.mbox_int && + !(IS_QLA2200(ha) && + command == MBC_LOAD_RISC_RAM_EXTENDED)) + msleep(10); + } /* while */ + ql_dbg(ql_dbg_mbx, vha, 0x1013, + "Waited %d sec.\n", + (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); + } + atomic_dec(&ha->num_pend_mbx_stage2); + + /* Check whether we timed out */ + if (ha->flags.mbox_int) { + uint16_t *iptr2; + + ql_dbg(ql_dbg_mbx, vha, 0x1014, + "Cmd=%x completed.\n", command); + + /* Got interrupt. Clear the flag. */ + ha->flags.mbox_int = 0; + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Setting Link-Down error */ + mcp->mb[0] = MBS_LINK_DOWN_ERROR; + ha->mcp = NULL; + rval = QLA_FUNCTION_FAILED; + ql_log(ql_log_warn, vha, 0xd048, + "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); + goto premature_exit; + } + + if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0x11ff, + "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], + MBS_COMMAND_COMPLETE); + rval = QLA_FUNCTION_FAILED; + } + + /* Load return mailbox registers. */ + iptr2 = mcp->mb; + iptr = (uint16_t *)&ha->mailbox_out[0]; + mboxes = mcp->in_mb; + + ql_dbg(ql_dbg_mbx, vha, 0x1113, + "Mailbox registers (IN):\n"); + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) { + *iptr2 = *iptr; + ql_dbg(ql_dbg_mbx, vha, 0x1114, + "mbox[%d]->0x%04x\n", cnt, *iptr2); + } + + mboxes >>= 1; + iptr2++; + iptr++; + } + } else { + + uint16_t mb[8]; + uint32_t ictrl, host_status, hccr; + uint16_t w; + + if (IS_FWI2_CAPABLE(ha)) { + mb[0] = rd_reg_word(®->isp24.mailbox0); + mb[1] = rd_reg_word(®->isp24.mailbox1); + mb[2] = rd_reg_word(®->isp24.mailbox2); + mb[3] = rd_reg_word(®->isp24.mailbox3); + mb[7] = rd_reg_word(®->isp24.mailbox7); + ictrl = rd_reg_dword(®->isp24.ictrl); + host_status = rd_reg_dword(®->isp24.host_status); + hccr = rd_reg_dword(®->isp24.hccr); + + ql_log(ql_log_warn, vha, 0xd04c, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", + command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], + mb[7], host_status, hccr); + vha->hw_err_cnt++; + + } else { + mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); + ictrl = rd_reg_word(®->isp.ictrl); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); + vha->hw_err_cnt++; + } + ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); + + /* Capture FW dump only, if PCI device active */ + if (!pci_channel_offline(vha->hw->pdev)) { + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); + if (w == 0xffff || ictrl == 0xffffffff || + (chip_reset != ha->chip_reset)) { + /* This is special case if there is unload + * of driver happening and if PCI device go + * into bad state due to PCI error condition + * then only PCI ERR flag would be set. + * we will do premature exit for above case. + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + rval = QLA_FUNCTION_TIMEOUT; + goto premature_exit; + } + + /* Attempt to capture firmware dump for further + * anallysis of the current formware state. we do not + * need to do this if we are intentionally generating + * a dump + */ + if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) + qla2xxx_dump_fw(vha); + rval = QLA_FUNCTION_TIMEOUT; + } + } + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Clean up */ + ha->mcp = NULL; + + if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x101a, + "Checking for additional resp interrupt.\n"); + + /* polling mode for non isp_abort commands. */ + qla2x00_poll(ha->rsp_q_map[0]); + } + + if (rval == QLA_FUNCTION_TIMEOUT && + mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { + if (!io_lock_on || (mcp->flags & IOCTL_CMD) || + ha->flags.eeh_busy) { + /* not in dpc. schedule it for dpc to take over. */ + ql_dbg(ql_dbg_mbx, vha, 0x101b, + "Timeout, schedule isp_abort_needed.\n"); + + if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && + !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + if (IS_QLA82XX(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x112a, + "disabling pause transmit on port " + "0 & 1.\n"); + qla82xx_wr_32(ha, + QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0| + CRB_NIU_XG_PAUSE_CTL_P1); + } + ql_log(ql_log_info, base_vha, 0x101c, + "Mailbox cmd timeout occurred, cmd=0x%x, " + "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " + "abort.\n", command, mcp->mb[0], + ha->flags.eeh_busy); + vha->hw_err_cnt++; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else if (current == ha->dpc_thread) { + /* call abort directly since we are in the DPC thread */ + ql_dbg(ql_dbg_mbx, vha, 0x101d, + "Timeout, calling abort_isp.\n"); + + if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && + !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + if (IS_QLA82XX(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x112b, + "disabling pause transmit on port " + "0 & 1.\n"); + qla82xx_wr_32(ha, + QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0| + CRB_NIU_XG_PAUSE_CTL_P1); + } + ql_log(ql_log_info, base_vha, 0x101e, + "Mailbox cmd timeout occurred, cmd=0x%x, " + "mb[0]=0x%x. Scheduling ISP abort ", + command, mcp->mb[0]); + vha->hw_err_cnt++; + set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + /* Allow next mbx cmd to come in. */ + complete(&ha->mbx_cmd_comp); + if (ha->isp_ops->abort_isp(vha) && + !ha->flags.eeh_busy) { + /* Failed. retry later. */ + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); + ql_dbg(ql_dbg_mbx, vha, 0x101f, + "Finished abort_isp.\n"); + goto mbx_done; + } + } + } + +premature_exit: + /* Allow next mbx cmd to come in. */ + complete(&ha->mbx_cmd_comp); + +mbx_done: + if (rval == QLA_ABORTED) { + ql_log(ql_log_info, vha, 0xd035, + "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", + mcp->mb[0]); + } else if (rval) { + if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { + pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, + dev_name(&ha->pdev->dev), 0x1020+0x800, + vha->host_no, rval); + mboxes = mcp->in_mb; + cnt = 4; + for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) + if (mboxes & BIT_0) { + printk(" mb[%u]=%x", i, mcp->mb[i]); + cnt--; + } + pr_warn(" cmd=%x ****\n", command); + } + if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { + ql_dbg(ql_dbg_mbx, vha, 0x1198, + "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", + rd_reg_dword(®->isp24.host_status), + rd_reg_dword(®->isp24.ictrl), + rd_reg_dword(®->isp24.istatus)); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1206, + "ctrl_status=%#x ictrl=%#x istatus=%#x\n", + rd_reg_word(®->isp.ctrl_status), + rd_reg_word(®->isp.ictrl), + rd_reg_word(®->isp.istatus)); + } + } else { + ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); + } + + i = 500; + while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) { + /* + * The caller of this mailbox encounter pci error. + * Hold the thread until PCIE link reset complete to make + * sure caller does not unmap dma while recovery is + * in progress. + */ + msleep(1); + i--; + } + return rval; +} + +int +qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, + uint32_t risc_code_size) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, + "Entered %s.\n", __func__); + + if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { + mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_0; + } else { + mcp->mb[0] = MBC_LOAD_RISC_RAM; + mcp->out_mb = MBX_0; + } + mcp->mb[1] = LSW(risc_addr); + mcp->mb[2] = MSW(req_dma); + mcp->mb[3] = LSW(req_dma); + mcp->mb[6] = MSW(MSD(req_dma)); + mcp->mb[7] = LSW(MSD(req_dma)); + mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; + if (IS_FWI2_CAPABLE(ha)) { + mcp->mb[4] = MSW(risc_code_size); + mcp->mb[5] = LSW(risc_code_size); + mcp->out_mb |= MBX_5|MBX_4; + } else { + mcp->mb[4] = LSW(risc_code_size); + mcp->out_mb |= MBX_4; + } + + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1023, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + vha->hw_err_cnt++; + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, + "Done %s.\n", __func__); + } + + return rval; +} + +#define NVME_ENABLE_FLAG BIT_3 +#define EDIF_HW_SUPPORT BIT_10 + +/* + * qla2x00_execute_fw + * Start adapter firmware. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + u8 semaphore = 0; +#define EXE_FW_FORCE_SEMAPHORE BIT_7 + u8 retry = 5; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, + "Entered %s.\n", __func__); + +again: + mcp->mb[0] = MBC_EXECUTE_FIRMWARE; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0; + if (IS_FWI2_CAPABLE(ha)) { + mcp->mb[1] = MSW(risc_addr); + mcp->mb[2] = LSW(risc_addr); + mcp->mb[3] = 0; + mcp->mb[4] = 0; + mcp->mb[11] = 0; + + /* Enable BPM? */ + if (ha->flags.lr_detected) { + mcp->mb[4] = BIT_0; + if (IS_BPM_RANGE_CAPABLE(ha)) + mcp->mb[4] |= + ha->lr_distance << LR_DIST_FW_POS; + } + + if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) + mcp->mb[4] |= NVME_ENABLE_FLAG; + + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + struct nvram_81xx *nv = ha->nvram; + /* set minimum speed if specified in nvram */ + if (nv->min_supported_speed >= 2 && + nv->min_supported_speed <= 5) { + mcp->mb[4] |= BIT_4; + mcp->mb[11] |= nv->min_supported_speed & 0xF; + mcp->out_mb |= MBX_11; + mcp->in_mb |= BIT_5; + vha->min_supported_speed = + nv->min_supported_speed; + } + + if (IS_PPCARCH) + mcp->mb[11] |= BIT_4; + } + + if (ha->flags.exlogins_enabled) + mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; + + if (ha->flags.exchoffld_enabled) + mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; + + if (semaphore) + mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; + + mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; + mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1; + } else { + mcp->mb[1] = LSW(risc_addr); + mcp->out_mb |= MBX_1; + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + mcp->mb[2] = 0; + mcp->out_mb |= MBX_2; + } + } + + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && + mcp->mb[1] == 0x27 && retry) { + semaphore = 1; + retry--; + ql_dbg(ql_dbg_async, vha, 0x1026, + "Exe FW: force semaphore.\n"); + goto again; + } + + if (retry) { + retry--; + ql_dbg(ql_dbg_async, vha, 0x509d, + "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry); + goto again; + } + ql_dbg(ql_dbg_mbx, vha, 0x1026, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + vha->hw_err_cnt++; + return rval; + } + + if (!IS_FWI2_CAPABLE(ha)) + goto done; + + ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; + ql_dbg(ql_dbg_mbx, vha, 0x119a, + "fw_ability_mask=%x.\n", ha->fw_ability_mask); + ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); + ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", + ha->max_supported_speed == 0 ? "16Gps" : + ha->max_supported_speed == 1 ? "32Gps" : + ha->max_supported_speed == 2 ? "64Gps" : "unknown"); + if (vha->min_supported_speed) { + ha->min_supported_speed = mcp->mb[5] & + (BIT_0 | BIT_1 | BIT_2); + ql_dbg(ql_dbg_mbx, vha, 0x119c, + "min_supported_speed=%s.\n", + ha->min_supported_speed == 6 ? "64Gps" : + ha->min_supported_speed == 5 ? "32Gps" : + ha->min_supported_speed == 4 ? "16Gps" : + ha->min_supported_speed == 3 ? "8Gps" : + ha->min_supported_speed == 2 ? "4Gps" : "unknown"); + } + } + + if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) { + ha->flags.edif_hw = 1; + ql_log(ql_log_info, vha, 0xffff, + "%s: edif HW\n", __func__); + } + +done: + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, + "Done %s.\n", __func__); + + return rval; +} + +/* + * qla_get_exlogin_status + * Get extended login status + * uses the memory offload control/status Mailbox + * + * Input: + * ha: adapter state pointer. + * fwopt: firmware options + * + * Returns: + * qla2x00 local function status + * + * Context: + * Kernel context. + */ +#define FETCH_XLOGINS_STAT 0x8 +int +qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, + uint16_t *ex_logins_cnt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, + "Entered %s\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = FETCH_XLOGINS_STAT; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_10|MBX_4|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); + } else { + *buf_sz = mcp->mb[4]; + *ex_logins_cnt = mcp->mb[10]; + + ql_log(ql_log_info, vha, 0x1190, + "buffer size 0x%x, exchange login count=%d\n", + mcp->mb[4], mcp->mb[10]); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla_set_exlogin_mem_cfg + * set extended login memory configuration + * Mbx needs to be issues before init_cb is set + * + * Input: + * ha: adapter state pointer. + * buffer: buffer pointer + * phys_addr: physical address of buffer + * size: size of buffer + * TARGET_QUEUE_LOCK must be released + * ADAPTER_STATE_LOCK must be release + * + * Returns: + * qla2x00 local funxtion status code. + * + * Context: + * Kernel context. + */ +#define CONFIG_XLOGINS_MEM 0x9 +int +qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, + "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = CONFIG_XLOGINS_MEM; + mcp->mb[2] = MSW(phys_addr); + mcp->mb[3] = LSW(phys_addr); + mcp->mb[6] = MSW(MSD(phys_addr)); + mcp->mb[7] = LSW(MSD(phys_addr)); + mcp->mb[8] = MSW(ha->exlogin_size); + mcp->mb[9] = LSW(ha->exlogin_size); + mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_11|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x111b, + "EXlogin Failed=%x. MB0=%x MB11=%x\n", + rval, mcp->mb[0], mcp->mb[11]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla_get_exchoffld_status + * Get exchange offload status + * uses the memory offload control/status Mailbox + * + * Input: + * ha: adapter state pointer. + * fwopt: firmware options + * + * Returns: + * qla2x00 local function status + * + * Context: + * Kernel context. + */ +#define FETCH_XCHOFFLD_STAT 0x2 +int +qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, + uint16_t *ex_logins_cnt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, + "Entered %s\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = FETCH_XCHOFFLD_STAT; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_10|MBX_4|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); + } else { + *buf_sz = mcp->mb[4]; + *ex_logins_cnt = mcp->mb[10]; + + ql_log(ql_log_info, vha, 0x118e, + "buffer size 0x%x, exchange offload count=%d\n", + mcp->mb[4], mcp->mb[10]); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla_set_exchoffld_mem_cfg + * Set exchange offload memory configuration + * Mbx needs to be issues before init_cb is set + * + * Input: + * ha: adapter state pointer. + * buffer: buffer pointer + * phys_addr: physical address of buffer + * size: size of buffer + * TARGET_QUEUE_LOCK must be released + * ADAPTER_STATE_LOCK must be release + * + * Returns: + * qla2x00 local funxtion status code. + * + * Context: + * Kernel context. + */ +#define CONFIG_XCHOFFLD_MEM 0x3 +int +qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, + "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = CONFIG_XCHOFFLD_MEM; + mcp->mb[2] = MSW(ha->exchoffld_buf_dma); + mcp->mb[3] = LSW(ha->exchoffld_buf_dma); + mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); + mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); + mcp->mb[8] = MSW(ha->exchoffld_size); + mcp->mb[9] = LSW(ha->exchoffld_size); + mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_11|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_get_fw_version + * Get firmware version. + * + * Input: + * ha: adapter state pointer. + * major: pointer for major number. + * minor: pointer for minor number. + * subminor: pointer for subminor number. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_fw_version(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) + mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; + if (IS_FWI2_CAPABLE(ha)) + mcp->in_mb |= MBX_17|MBX_16|MBX_15; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + mcp->in_mb |= + MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| + MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; + + mcp->flags = 0; + mcp->tov = MBX_TOV_SECONDS; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) + goto failed; + + /* Return mailbox data. */ + ha->fw_major_version = mcp->mb[1]; + ha->fw_minor_version = mcp->mb[2]; + ha->fw_subminor_version = mcp->mb[3]; + ha->fw_attributes = mcp->mb[6]; + if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) + ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ + else + ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; + + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { + ha->mpi_version[0] = mcp->mb[10] & 0xff; + ha->mpi_version[1] = mcp->mb[11] >> 8; + ha->mpi_version[2] = mcp->mb[11] & 0xff; + ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; + ha->phy_version[0] = mcp->mb[8] & 0xff; + ha->phy_version[1] = mcp->mb[9] >> 8; + ha->phy_version[2] = mcp->mb[9] & 0xff; + } + + if (IS_FWI2_CAPABLE(ha)) { + ha->fw_attributes_h = mcp->mb[15]; + ha->fw_attributes_ext[0] = mcp->mb[16]; + ha->fw_attributes_ext[1] = mcp->mb[17]; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, + "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", + __func__, mcp->mb[15], mcp->mb[6]); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, + "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", + __func__, mcp->mb[17], mcp->mb[16]); + + if (ha->fw_attributes_h & 0x4) + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, + "%s: Firmware supports Extended Login 0x%x\n", + __func__, ha->fw_attributes_h); + + if (ha->fw_attributes_h & 0x8) + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, + "%s: Firmware supports Exchange Offload 0x%x\n", + __func__, ha->fw_attributes_h); + + /* + * FW supports nvme and driver load parameter requested nvme. + * BIT 26 of fw_attributes indicates NVMe support. + */ + if ((ha->fw_attributes_h & + (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && + ql2xnvmeenable) { + if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) + vha->flags.nvme_first_burst = 1; + + vha->flags.nvme_enabled = 1; + ql_log(ql_log_info, vha, 0xd302, + "%s: FC-NVMe is Enabled (0x%x)\n", + __func__, ha->fw_attributes_h); + } + + /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ + if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { + ql_log(ql_log_info, vha, 0xd302, + "Firmware supports NVMe2 0x%x\n", + ha->fw_attributes_ext[0]); + vha->flags.nvme2_enabled = 1; + } + + if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable && + (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) { + ha->flags.edif_enabled = 1; + ql_log(ql_log_info, vha, 0xffff, + "%s: edif is enabled\n", __func__); + } + } + + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->serdes_version[0] = mcp->mb[7] & 0xff; + ha->serdes_version[1] = mcp->mb[8] >> 8; + ha->serdes_version[2] = mcp->mb[8] & 0xff; + ha->mpi_version[0] = mcp->mb[10] & 0xff; + ha->mpi_version[1] = mcp->mb[11] >> 8; + ha->mpi_version[2] = mcp->mb[11] & 0xff; + ha->pep_version[0] = mcp->mb[13] & 0xff; + ha->pep_version[1] = mcp->mb[14] >> 8; + ha->pep_version[2] = mcp->mb[14] & 0xff; + ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; + ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; + ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; + ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; + if (IS_QLA28XX(ha)) { + if (mcp->mb[16] & BIT_10) + ha->flags.secure_fw = 1; + + ql_log(ql_log_info, vha, 0xffff, + "Secure Flash Update in FW: %s\n", + (ha->flags.secure_fw) ? "Supported" : + "Not Supported"); + } + + if (ha->flags.scm_supported_a && + (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { + ha->flags.scm_supported_f = 1; + ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); + } + ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", + (ha->flags.scm_supported_f) ? "Supported" : + "Not Supported"); + + if (vha->flags.nvme2_enabled) { + /* set BIT_15 of special feature control block for SLER */ + ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); + /* set BIT_14 of special feature control block for PI CTRL*/ + ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); + } + } + +failed: + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, + "Done %s.\n", __func__); + } + return rval; +} + +/* + * qla2x00_get_fw_options + * Set firmware options. + * + * Input: + * ha = adapter block pointer. + * fwopt = pointer for firmware options. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); + } else { + fwopts[0] = mcp->mb[0]; + fwopts[1] = mcp->mb[1]; + fwopts[2] = mcp->mb[2]; + fwopts[3] = mcp->mb[3]; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, + "Done %s.\n", __func__); + } + + return rval; +} + + +/* + * qla2x00_set_fw_options + * Set firmware options. + * + * Input: + * ha = adapter block pointer. + * fwopt = pointer for firmware options. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; + mcp->mb[1] = fwopts[1]; + mcp->mb[2] = fwopts[2]; + mcp->mb[3] = fwopts[3]; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + if (IS_FWI2_CAPABLE(vha->hw)) { + mcp->in_mb |= MBX_1; + mcp->mb[10] = fwopts[10]; + mcp->out_mb |= MBX_10; + } else { + mcp->mb[10] = fwopts[10]; + mcp->mb[11] = fwopts[11]; + mcp->mb[12] = 0; /* Undocumented, but used */ + mcp->out_mb |= MBX_12|MBX_11|MBX_10; + } + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + fwopts[0] = mcp->mb[0]; + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1030, + "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_mbx_reg_test + * Mailbox register wrap test. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_mbx_reg_test(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; + mcp->mb[1] = 0xAAAA; + mcp->mb[2] = 0x5555; + mcp->mb[3] = 0xAA55; + mcp->mb[4] = 0x55AA; + mcp->mb[5] = 0xA5A5; + mcp->mb[6] = 0x5A5A; + mcp->mb[7] = 0x2525; + mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval == QLA_SUCCESS) { + if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || + mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) + rval = QLA_FUNCTION_FAILED; + if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || + mcp->mb[7] != 0x2525) + rval = QLA_FUNCTION_FAILED; + } + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); + vha->hw_err_cnt++; + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_verify_checksum + * Verify firmware checksum. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_VERIFY_CHECKSUM; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0; + if (IS_FWI2_CAPABLE(vha->hw)) { + mcp->mb[1] = MSW(risc_addr); + mcp->mb[2] = LSW(risc_addr); + mcp->out_mb |= MBX_2|MBX_1; + mcp->in_mb |= MBX_2|MBX_1; + } else { + mcp->mb[1] = LSW(risc_addr); + mcp->out_mb |= MBX_1; + mcp->in_mb |= MBX_1; + } + + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1036, + "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? + (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_issue_iocb + * Issue IOCB using mailbox command + * + * Input: + * ha = adapter state pointer. + * buffer = buffer pointer. + * phys_addr = physical address of buffer. + * size = size of buffer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, + dma_addr_t phys_addr, size_t size, uint32_t tov) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!vha->hw->flags.fw_started) + return QLA_INVALID_COMMAND; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_IOCB_COMMAND_A64; + mcp->mb[1] = 0; + mcp->mb[2] = MSW(LSD(phys_addr)); + mcp->mb[3] = LSW(LSD(phys_addr)); + mcp->mb[6] = MSW(MSD(phys_addr)); + mcp->mb[7] = LSW(MSD(phys_addr)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = tov; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); + } else { + sts_entry_t *sts_entry = buffer; + + /* Mask reserved bits. */ + sts_entry->entry_status &= + IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, + "Done %s (status=%x).\n", __func__, + sts_entry->entry_status); + } + + return rval; +} + +int +qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, + size_t size) +{ + return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, + MBX_TOV_SECONDS); +} + +/* + * qla2x00_abort_command + * Abort command aborts a specified IOCB. + * + * Input: + * ha = adapter block pointer. + * sp = SB structure pointer. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_abort_command(srb_t *sp) +{ + unsigned long flags = 0; + int rval; + uint32_t handle = 0; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + fc_port_t *fcport = sp->fcport; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, + "Entered %s.\n", __func__); + + if (sp->qpair) + req = sp->qpair->req; + else + req = vha->req; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (handle = 1; handle < req->num_outstanding_cmds; handle++) { + if (req->outstanding_cmds[handle] == sp) + break; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (handle == req->num_outstanding_cmds) { + /* command not found */ + return QLA_FUNCTION_FAILED; + } + + mcp->mb[0] = MBC_ABORT_COMMAND; + if (HAS_EXTENDED_IDS(ha)) + mcp->mb[1] = fcport->loop_id; + else + mcp->mb[1] = fcport->loop_id << 8; + mcp->mb[2] = (uint16_t)handle; + mcp->mb[3] = (uint16_t)(handle >> 16); + mcp->mb[6] = (uint16_t)cmd->device->lun; + mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) +{ + int rval, rval2; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + scsi_qla_host_t *vha; + + vha = fcport->vha; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_ABORT_TARGET; + mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; + if (HAS_EXTENDED_IDS(vha->hw)) { + mcp->mb[1] = fcport->loop_id; + mcp->mb[10] = 0; + mcp->out_mb |= MBX_10; + } else { + mcp->mb[1] = fcport->loop_id << 8; + } + mcp->mb[2] = vha->hw->loop_reset_delay; + mcp->mb[9] = vha->vp_idx; + + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, + "Failed=%x.\n", rval); + } + + /* Issue marker IOCB. */ + rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, + MK_SYNC_ID); + if (rval2 != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1040, + "Failed to issue marker IOCB (%x).\n", rval2); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) +{ + int rval, rval2; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + scsi_qla_host_t *vha; + + vha = fcport->vha; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_LUN_RESET; + mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; + if (HAS_EXTENDED_IDS(vha->hw)) + mcp->mb[1] = fcport->loop_id; + else + mcp->mb[1] = fcport->loop_id << 8; + mcp->mb[2] = (u32)l; + mcp->mb[3] = 0; + mcp->mb[9] = vha->vp_idx; + + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); + } + + /* Issue marker IOCB. */ + rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, + MK_SYNC_ID_LUN); + if (rval2 != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1044, + "Failed to issue marker IOCB (%x).\n", rval2); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_get_adapter_id + * Get adapter ID and topology. + * + * Input: + * ha = adapter block pointer. + * id = pointer for loop ID. + * al_pa = pointer for AL_PA. + * area = pointer for area. + * domain = pointer for domain. + * top = pointer for topology. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, + uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; + mcp->mb[9] = vha->vp_idx; + mcp->out_mb = MBX_9|MBX_0; + mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + if (IS_CNA_CAPABLE(vha->hw)) + mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; + if (IS_FWI2_CAPABLE(vha->hw)) + mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; + if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) + mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; + + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (mcp->mb[0] == MBS_COMMAND_ERROR) + rval = QLA_COMMAND_ERROR; + else if (mcp->mb[0] == MBS_INVALID_COMMAND) + rval = QLA_INVALID_COMMAND; + + /* Return data. */ + *id = mcp->mb[1]; + *al_pa = LSB(mcp->mb[2]); + *area = MSB(mcp->mb[2]); + *domain = LSB(mcp->mb[3]); + *top = mcp->mb[6]; + *sw_cap = mcp->mb[7]; + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, + "Done %s.\n", __func__); + + if (IS_CNA_CAPABLE(vha->hw)) { + vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; + vha->fcoe_fcf_idx = mcp->mb[10]; + vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; + vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; + vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; + vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; + vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; + vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; + } + /* If FA-WWN supported */ + if (IS_FAWWN_CAPABLE(vha->hw)) { + if (mcp->mb[7] & BIT_14) { + vha->port_name[0] = MSB(mcp->mb[16]); + vha->port_name[1] = LSB(mcp->mb[16]); + vha->port_name[2] = MSB(mcp->mb[17]); + vha->port_name[3] = LSB(mcp->mb[17]); + vha->port_name[4] = MSB(mcp->mb[18]); + vha->port_name[5] = LSB(mcp->mb[18]); + vha->port_name[6] = MSB(mcp->mb[19]); + vha->port_name[7] = LSB(mcp->mb[19]); + fc_host_port_name(vha->host) = + wwn_to_u64(vha->port_name); + ql_dbg(ql_dbg_mbx, vha, 0x10ca, + "FA-WWN acquired %016llx\n", + wwn_to_u64(vha->port_name)); + } + } + + if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { + vha->bbcr = mcp->mb[15]; + if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { + ql_log(ql_log_info, vha, 0x11a4, + "SCM: EDC ELS completed, flags 0x%x\n", + mcp->mb[21]); + } + if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { + vha->hw->flags.scm_enabled = 1; + vha->scm_fabric_connection_flags |= + SCM_FLAG_RDF_COMPLETED; + ql_log(ql_log_info, vha, 0x11a5, + "SCM: RDF ELS completed, flags 0x%x\n", + mcp->mb[23]); + } + } + } + + return rval; +} + +/* + * qla2x00_get_retry_cnt + * Get current firmware login retry count and delay. + * + * Input: + * ha = adapter block pointer. + * retry_cnt = pointer to login retry count. + * tov = pointer to login timeout value. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, + uint16_t *r_a_tov) +{ + int rval; + uint16_t ratov; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_RETRY_COUNT; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x104a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + /* Convert returned data and check our values. */ + *r_a_tov = mcp->mb[3] / 2; + ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ + if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { + /* Update to the larger values */ + *retry_cnt = (uint8_t)mcp->mb[1]; + *tov = ratov; + } + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, + "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); + } + + return rval; +} + +/* + * qla2x00_init_firmware + * Initialize adapter firmware. + * + * Input: + * ha = adapter block pointer. + * dptr = Initialization control block pointer. + * size = size of initialization control block. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, + "Entered %s.\n", __func__); + + if (IS_P3P_TYPE(ha) && ql2xdbwr) + qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, + (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); + + if (ha->flags.npiv_supported) + mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; + else + mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; + + mcp->mb[1] = 0; + mcp->mb[2] = MSW(ha->init_cb_dma); + mcp->mb[3] = LSW(ha->init_cb_dma); + mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); + mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { + mcp->mb[1] = BIT_0; + mcp->mb[10] = MSW(ha->ex_init_cb_dma); + mcp->mb[11] = LSW(ha->ex_init_cb_dma); + mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); + mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); + mcp->mb[14] = sizeof(*ha->ex_init_cb); + mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; + } + + if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { + mcp->mb[1] |= BIT_1; + mcp->mb[16] = MSW(ha->sf_init_cb_dma); + mcp->mb[17] = LSW(ha->sf_init_cb_dma); + mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); + mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); + mcp->mb[15] = sizeof(*ha->sf_init_cb); + mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; + } + + /* 1 and 2 should normally be captured. */ + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + /* mb3 is additional info about the installed SFP. */ + mcp->in_mb |= MBX_3; + mcp->buf_size = size; + mcp->flags = MBX_DMA_OUT; + mcp->tov = MBX_TOV_SECONDS; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x104d, + "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); + if (ha->init_cb) { + ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, + 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); + } + if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { + ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, + 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); + } + } else { + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (mcp->mb[2] == 6 || mcp->mb[3] == 2) + ql_dbg(ql_dbg_mbx, vha, 0x119d, + "Invalid SFP/Validation Failed\n"); + } + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, + "Done %s.\n", __func__); + } + + return rval; +} + + +/* + * qla2x00_get_port_database + * Issue normal/enhanced get port database mailbox command + * and copy device name as necessary. + * + * Input: + * ha = adapter state pointer. + * dev = structure pointer. + * opt = enhanced cmd option byte. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + port_database_t *pd; + struct port_database_24xx *pd24; + dma_addr_t pd_dma; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, + "Entered %s.\n", __func__); + + pd24 = NULL; + pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0x1050, + "Failed to allocate port database structure.\n"); + fcport->query = 0; + return QLA_MEMORY_ALLOC_FAILED; + } + + mcp->mb[0] = MBC_GET_PORT_DATABASE; + if (opt != 0 && !IS_FWI2_CAPABLE(ha)) + mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; + mcp->mb[2] = MSW(pd_dma); + mcp->mb[3] = LSW(pd_dma); + mcp->mb[6] = MSW(MSD(pd_dma)); + mcp->mb[7] = LSW(MSD(pd_dma)); + mcp->mb[9] = vha->vp_idx; + mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; + mcp->in_mb = MBX_0; + if (IS_FWI2_CAPABLE(ha)) { + mcp->mb[1] = fcport->loop_id; + mcp->mb[10] = opt; + mcp->out_mb |= MBX_10|MBX_1; + mcp->in_mb |= MBX_1; + } else if (HAS_EXTENDED_IDS(ha)) { + mcp->mb[1] = fcport->loop_id; + mcp->mb[10] = opt; + mcp->out_mb |= MBX_10|MBX_1; + } else { + mcp->mb[1] = fcport->loop_id << 8 | opt; + mcp->out_mb |= MBX_1; + } + mcp->buf_size = IS_FWI2_CAPABLE(ha) ? + PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; + mcp->flags = MBX_DMA_IN; + mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) + goto gpd_error_out; + + if (IS_FWI2_CAPABLE(ha)) { + uint64_t zero = 0; + u8 current_login_state, last_login_state; + + pd24 = (struct port_database_24xx *) pd; + + /* Check for logged in state. */ + if (NVME_TARGET(ha, fcport)) { + current_login_state = pd24->current_login_state >> 4; + last_login_state = pd24->last_login_state >> 4; + } else { + current_login_state = pd24->current_login_state & 0xf; + last_login_state = pd24->last_login_state & 0xf; + } + fcport->current_login_state = pd24->current_login_state; + fcport->last_login_state = pd24->last_login_state; + + /* Check for logged in state. */ + if (current_login_state != PDS_PRLI_COMPLETE && + last_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0x119a, + "Unable to verify login-state (%x/%x) for loop_id %x.\n", + current_login_state, last_login_state, + fcport->loop_id); + rval = QLA_FUNCTION_FAILED; + + if (!fcport->query) + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd24->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); + memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd24->port_id[0]; + fcport->d_id.b.area = pd24->port_id[1]; + fcport->d_id.b.al_pa = pd24->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + /* If not target must be initiator or unknown type. */ + if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd24->prli_svc_param_word_3[0] & BIT_7) + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + } else { + uint64_t zero = 0; + + /* Check for logged in state. */ + if (pd->master_state != PD_STATE_PORT_LOGGED_IN && + pd->slave_state != PD_STATE_PORT_LOGGED_IN) { + ql_dbg(ql_dbg_mbx, vha, 0x100a, + "Unable to verify login-state (%x/%x) - " + "portid=%02x%02x%02x.\n", pd->master_state, + pd->slave_state, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + rval = QLA_FUNCTION_FAILED; + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); + memcpy(fcport->port_name, pd->port_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd->port_id[0]; + fcport->d_id.b.area = pd->port_id[3]; + fcport->d_id.b.al_pa = pd->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd->options & BIT_4) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + } + +gpd_error_out: + dma_pool_free(ha->s_dma_pool, pd, pd_dma); + fcport->query = 0; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1052, + "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, + mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, + struct port_database_24xx *pdb) +{ + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + dma_addr_t pdb_dma; + int rval; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, + "Entered %s.\n", __func__); + + memset(pdb, 0, sizeof(*pdb)); + + pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, + sizeof(*pdb), DMA_FROM_DEVICE); + if (!pdb_dma) { + ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + mcp->mb[0] = MBC_GET_PORT_DATABASE; + mcp->mb[1] = nport_handle; + mcp->mb[2] = MSW(LSD(pdb_dma)); + mcp->mb[3] = LSW(LSD(pdb_dma)); + mcp->mb[6] = MSW(MSD(pdb_dma)); + mcp->mb[7] = LSW(MSD(pdb_dma)); + mcp->mb[9] = 0; + mcp->mb[10] = 0; + mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->buf_size = sizeof(*pdb); + mcp->flags = MBX_DMA_IN; + mcp->tov = vha->hw->login_timeout * 2; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x111a, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, + "Done %s.\n", __func__); + } + + dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, + sizeof(*pdb), DMA_FROM_DEVICE); + + return rval; +} + +/* + * qla2x00_get_firmware_state + * Get adapter firmware state. + * + * Input: + * ha = adapter block pointer. + * dptr = pointer for firmware state. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, + "Entered %s.\n", __func__); + + if (!ha->flags.fw_started) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_GET_FIRMWARE_STATE; + mcp->out_mb = MBX_0; + if (IS_FWI2_CAPABLE(vha->hw)) + mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + else + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Return firmware states. */ + states[0] = mcp->mb[1]; + if (IS_FWI2_CAPABLE(vha->hw)) { + states[1] = mcp->mb[2]; + states[2] = mcp->mb[3]; /* SFP info */ + states[3] = mcp->mb[4]; + states[4] = mcp->mb[5]; + states[5] = mcp->mb[6]; /* DPORT status */ + } + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); + } else { + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (mcp->mb[2] == 6 || mcp->mb[3] == 2) + ql_dbg(ql_dbg_mbx, vha, 0x119e, + "Invalid SFP/Validation Failed\n"); + } + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_get_port_name + * Issue get port name mailbox command. + * Returned name is in big endian format. + * + * Input: + * ha = adapter block pointer. + * loop_id = loop ID of device. + * name = pointer for name. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, + uint8_t opt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_PORT_NAME; + mcp->mb[9] = vha->vp_idx; + mcp->out_mb = MBX_9|MBX_1|MBX_0; + if (HAS_EXTENDED_IDS(vha->hw)) { + mcp->mb[1] = loop_id; + mcp->mb[10] = opt; + mcp->out_mb |= MBX_10; + } else { + mcp->mb[1] = loop_id << 8 | opt; + } + + mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); + } else { + if (name != NULL) { + /* This function returns name in big endian. */ + name[0] = MSB(mcp->mb[2]); + name[1] = LSB(mcp->mb[2]); + name[2] = MSB(mcp->mb[3]); + name[3] = LSB(mcp->mb[3]); + name[4] = MSB(mcp->mb[6]); + name[5] = LSB(mcp->mb[6]); + name[6] = MSB(mcp->mb[7]); + name[7] = LSB(mcp->mb[7]); + } + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla24xx_link_initialization + * Issue link initialization mailbox command. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla24xx_link_initialize(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_LINK_INITIALIZATION; + mcp->mb[1] = BIT_4; + if (vha->hw->operating_mode == LOOP) + mcp->mb[1] |= BIT_6; + else + mcp->mb[1] |= BIT_5; + mcp->mb[2] = 0; + mcp->mb[3] = 0; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_lip_reset + * Issue LIP reset mailbox command. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_lip_reset(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_disc, vha, 0x105a, + "Entered %s.\n", __func__); + + if (IS_CNA_CAPABLE(vha->hw)) { + /* Logout across all FCFs. */ + mcp->mb[0] = MBC_LIP_FULL_LOGIN; + mcp->mb[1] = BIT_1; + mcp->mb[2] = 0; + mcp->out_mb = MBX_2|MBX_1|MBX_0; + } else if (IS_FWI2_CAPABLE(vha->hw)) { + mcp->mb[0] = MBC_LIP_FULL_LOGIN; + mcp->mb[1] = BIT_4; + mcp->mb[2] = 0; + mcp->mb[3] = vha->hw->loop_reset_delay; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + } else { + mcp->mb[0] = MBC_LIP_RESET; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + if (HAS_EXTENDED_IDS(vha->hw)) { + mcp->mb[1] = 0x00ff; + mcp->mb[10] = 0; + mcp->out_mb |= MBX_10; + } else { + mcp->mb[1] = 0xff00; + } + mcp->mb[2] = vha->hw->loop_reset_delay; + mcp->mb[3] = 0; + } + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_send_sns + * Send SNS command. + * + * Input: + * ha = adapter block pointer. + * sns = pointer for command. + * cmd_size = command size. + * buf_size = response/command size. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, + uint16_t cmd_size, size_t buf_size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, + "Entered %s.\n", __func__); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, + "Retry cnt=%d ratov=%d total tov=%d.\n", + vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); + + mcp->mb[0] = MBC_SEND_SNS_COMMAND; + mcp->mb[1] = cmd_size; + mcp->mb[2] = MSW(sns_phys_address); + mcp->mb[3] = LSW(sns_phys_address); + mcp->mb[6] = MSW(MSD(sns_phys_address)); + mcp->mb[7] = LSW(MSD(sns_phys_address)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0|MBX_1; + mcp->buf_size = buf_size; + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; + mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x105f, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, + uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) +{ + int rval; + + struct logio_entry_24xx *lg; + dma_addr_t lg_dma; + uint32_t iop[2]; + struct qla_hw_data *ha = vha->hw; + struct req_que *req; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, + "Entered %s.\n", __func__); + + if (vha->vp_idx && vha->qpair) + req = vha->qpair->req; + else + req = ha->req_q_map[0]; + + lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); + if (lg == NULL) { + ql_log(ql_log_warn, vha, 0x1062, + "Failed to allocate login IOCB.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; + lg->entry_count = 1; + lg->handle = make_handle(req->id, lg->handle); + lg->nport_handle = cpu_to_le16(loop_id); + lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); + if (opt & BIT_0) + lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); + if (opt & BIT_1) + lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); + lg->port_id[0] = al_pa; + lg->port_id[1] = area; + lg->port_id[2] = domain; + lg->vp_index = vha->vp_idx; + rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, + (ha->r_a_tov / 10 * 2) + 2); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1063, + "Failed to issue login IOCB (%x).\n", rval); + } else if (lg->entry_status != 0) { + ql_dbg(ql_dbg_mbx, vha, 0x1064, + "Failed to complete IOCB -- error status (%x).\n", + lg->entry_status); + rval = QLA_FUNCTION_FAILED; + } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { + iop[0] = le32_to_cpu(lg->io_parameter[0]); + iop[1] = le32_to_cpu(lg->io_parameter[1]); + + ql_dbg(ql_dbg_mbx, vha, 0x1065, + "Failed to complete IOCB -- completion status (%x) " + "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), + iop[0], iop[1]); + + switch (iop[0]) { + case LSC_SCODE_PORTID_USED: + mb[0] = MBS_PORT_ID_USED; + mb[1] = LSW(iop[1]); + break; + case LSC_SCODE_NPORT_USED: + mb[0] = MBS_LOOP_ID_USED; + break; + case LSC_SCODE_NOLINK: + case LSC_SCODE_NOIOCB: + case LSC_SCODE_NOXCB: + case LSC_SCODE_CMD_FAILED: + case LSC_SCODE_NOFABRIC: + case LSC_SCODE_FW_NOT_READY: + case LSC_SCODE_NOT_LOGGED_IN: + case LSC_SCODE_NOPCB: + case LSC_SCODE_ELS_REJECT: + case LSC_SCODE_CMD_PARAM_ERR: + case LSC_SCODE_NONPORT: + case LSC_SCODE_LOGGED_IN: + case LSC_SCODE_NOFLOGI_ACC: + default: + mb[0] = MBS_COMMAND_ERROR; + break; + } + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, + "Done %s.\n", __func__); + + iop[0] = le32_to_cpu(lg->io_parameter[0]); + + mb[0] = MBS_COMMAND_COMPLETE; + mb[1] = 0; + if (iop[0] & BIT_4) { + if (iop[0] & BIT_8) + mb[1] |= BIT_1; + } else + mb[1] = BIT_0; + + /* Passback COS information. */ + mb[10] = 0; + if (lg->io_parameter[7] || lg->io_parameter[8]) + mb[10] |= BIT_0; /* Class 2. */ + if (lg->io_parameter[9] || lg->io_parameter[10]) + mb[10] |= BIT_1; /* Class 3. */ + if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) + mb[10] |= BIT_7; /* Confirmed Completion + * Allowed + */ + } + + dma_pool_free(ha->s_dma_pool, lg, lg_dma); + + return rval; +} + +/* + * qla2x00_login_fabric + * Issue login fabric port mailbox command. + * + * Input: + * ha = adapter block pointer. + * loop_id = device loop ID. + * domain = device domain. + * area = device area. + * al_pa = device AL_PA. + * status = pointer for return status. + * opt = command options. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, + uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + if (HAS_EXTENDED_IDS(ha)) { + mcp->mb[1] = loop_id; + mcp->mb[10] = opt; + mcp->out_mb |= MBX_10; + } else { + mcp->mb[1] = (loop_id << 8) | opt; + } + mcp->mb[2] = domain; + mcp->mb[3] = area << 8 | al_pa; + + mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; + mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Return mailbox statuses. */ + if (mb != NULL) { + mb[0] = mcp->mb[0]; + mb[1] = mcp->mb[1]; + mb[2] = mcp->mb[2]; + mb[6] = mcp->mb[6]; + mb[7] = mcp->mb[7]; + /* COS retrieved from Get-Port-Database mailbox command. */ + mb[10] = 0; + } + + if (rval != QLA_SUCCESS) { + /* RLU tmp code: need to change main mailbox_command function to + * return ok even when the mailbox completion value is not + * SUCCESS. The caller needs to be responsible to interpret + * the return values of this mailbox command if we're not + * to change too much of the existing code. + */ + if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || + mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || + mcp->mb[0] == 0x4006) + rval = QLA_SUCCESS; + + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1068, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_login_local_device + * Issue login loop port mailbox command. + * + * Input: + * ha = adapter block pointer. + * loop_id = device loop ID. + * opt = command options. + * + * Returns: + * Return status code. + * + * Context: + * Kernel context. + * + */ +int +qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, + uint16_t *mb_ret, uint8_t opt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, + "Entered %s.\n", __func__); + + if (IS_FWI2_CAPABLE(ha)) + return qla24xx_login_fabric(vha, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, mb_ret, opt); + + mcp->mb[0] = MBC_LOGIN_LOOP_PORT; + if (HAS_EXTENDED_IDS(ha)) + mcp->mb[1] = fcport->loop_id; + else + mcp->mb[1] = fcport->loop_id << 8; + mcp->mb[2] = opt; + mcp->out_mb = MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; + mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Return mailbox statuses. */ + if (mb_ret != NULL) { + mb_ret[0] = mcp->mb[0]; + mb_ret[1] = mcp->mb[1]; + mb_ret[6] = mcp->mb[6]; + mb_ret[7] = mcp->mb[7]; + } + + if (rval != QLA_SUCCESS) { + /* AV tmp code: need to change main mailbox_command function to + * return ok even when the mailbox completion value is not + * SUCCESS. The caller needs to be responsible to interpret + * the return values of this mailbox command if we're not + * to change too much of the existing code. + */ + if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) + rval = QLA_SUCCESS; + + ql_dbg(ql_dbg_mbx, vha, 0x106b, + "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, + "Done %s.\n", __func__); + } + + return (rval); +} + +int +qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, + uint8_t area, uint8_t al_pa) +{ + int rval; + struct logio_entry_24xx *lg; + dma_addr_t lg_dma; + struct qla_hw_data *ha = vha->hw; + struct req_que *req; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, + "Entered %s.\n", __func__); + + lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); + if (lg == NULL) { + ql_log(ql_log_warn, vha, 0x106e, + "Failed to allocate logout IOCB.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + req = vha->req; + lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; + lg->entry_count = 1; + lg->handle = make_handle(req->id, lg->handle); + lg->nport_handle = cpu_to_le16(loop_id); + lg->control_flags = + cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| + LCF_FREE_NPORT); + lg->port_id[0] = al_pa; + lg->port_id[1] = area; + lg->port_id[2] = domain; + lg->vp_index = vha->vp_idx; + rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, + (ha->r_a_tov / 10 * 2) + 2); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x106f, + "Failed to issue logout IOCB (%x).\n", rval); + } else if (lg->entry_status != 0) { + ql_dbg(ql_dbg_mbx, vha, 0x1070, + "Failed to complete IOCB -- error status (%x).\n", + lg->entry_status); + rval = QLA_FUNCTION_FAILED; + } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { + ql_dbg(ql_dbg_mbx, vha, 0x1071, + "Failed to complete IOCB -- completion status (%x) " + "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), + le32_to_cpu(lg->io_parameter[0]), + le32_to_cpu(lg->io_parameter[1])); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, + "Done %s.\n", __func__); + } + + dma_pool_free(ha->s_dma_pool, lg, lg_dma); + + return rval; +} + +/* + * qla2x00_fabric_logout + * Issue logout fabric port mailbox command. + * + * Input: + * ha = adapter block pointer. + * loop_id = device loop ID. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, + uint8_t area, uint8_t al_pa) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; + mcp->out_mb = MBX_1|MBX_0; + if (HAS_EXTENDED_IDS(vha->hw)) { + mcp->mb[1] = loop_id; + mcp->mb[10] = 0; + mcp->out_mb |= MBX_10; + } else { + mcp->mb[1] = loop_id << 8; + } + + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1074, + "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_full_login_lip + * Issue full login LIP mailbox command. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_full_login_lip(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_LIP_FULL_LOGIN; + mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; + mcp->mb[2] = 0; + mcp->mb[3] = 0; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_get_id_list + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, + uint16_t *entries) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, + "Entered %s.\n", __func__); + + if (id_list == NULL) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_GET_ID_LIST; + mcp->out_mb = MBX_0; + if (IS_FWI2_CAPABLE(vha->hw)) { + mcp->mb[2] = MSW(id_list_dma); + mcp->mb[3] = LSW(id_list_dma); + mcp->mb[6] = MSW(MSD(id_list_dma)); + mcp->mb[7] = LSW(MSD(id_list_dma)); + mcp->mb[8] = 0; + mcp->mb[9] = vha->vp_idx; + mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; + } else { + mcp->mb[1] = MSW(id_list_dma); + mcp->mb[2] = LSW(id_list_dma); + mcp->mb[3] = MSW(MSD(id_list_dma)); + mcp->mb[6] = LSW(MSD(id_list_dma)); + mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; + } + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); + } else { + *entries = mcp->mb[1]; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_get_resource_cnts + * Get current firmware resource counts. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_resource_cnts(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || + IS_QLA27XX(ha) || IS_QLA28XX(ha)) + mcp->in_mb |= MBX_12; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x107d, + "Failed mb[0]=%x.\n", mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, + "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " + "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], + mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], + mcp->mb[11], mcp->mb[12]); + + ha->orig_fw_tgt_xcb_count = mcp->mb[1]; + ha->cur_fw_tgt_xcb_count = mcp->mb[2]; + ha->cur_fw_xcb_count = mcp->mb[3]; + ha->orig_fw_xcb_count = mcp->mb[6]; + ha->cur_fw_iocb_count = mcp->mb[7]; + ha->orig_fw_iocb_count = mcp->mb[10]; + if (ha->flags.npiv_supported) + ha->max_npiv_vports = mcp->mb[11]; + if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) + ha->fw_max_fcf_count = mcp->mb[12]; + } + + return (rval); +} + +/* + * qla2x00_get_fcal_position_map + * Get FCAL (LILP) position map using mailbox command + * + * Input: + * ha = adapter state pointer. + * pos_map = buffer pointer (can be NULL). + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map, + u8 *num_entries) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + char *pmap; + dma_addr_t pmap_dma; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, + "Entered %s.\n", __func__); + + pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); + if (pmap == NULL) { + ql_log(ql_log_warn, vha, 0x1080, + "Memory alloc failed.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; + mcp->mb[2] = MSW(pmap_dma); + mcp->mb[3] = LSW(pmap_dma); + mcp->mb[6] = MSW(MSD(pmap_dma)); + mcp->mb[7] = LSW(MSD(pmap_dma)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->buf_size = FCAL_MAP_SIZE; + mcp->flags = MBX_DMA_IN; + mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, + "mb0/mb1=%x/%X FC/AL position map size (%x).\n", + mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, + pmap, pmap[0] + 1); + + if (pos_map) + memcpy(pos_map, pmap, FCAL_MAP_SIZE); + if (num_entries) + *num_entries = pmap[0]; + } + dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_get_link_status + * + * Input: + * ha = adapter block pointer. + * loop_id = device loop ID. + * ret_buf = pointer to link status return buffer. + * + * Returns: + * 0 = success. + * BIT_0 = mem alloc error. + * BIT_1 = mailbox error. + */ +int +qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, + struct link_statistics *stats, dma_addr_t stats_dma) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + uint32_t *iter = (uint32_t *)stats; + ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_LINK_STATUS; + mcp->mb[2] = MSW(LSD(stats_dma)); + mcp->mb[3] = LSW(LSD(stats_dma)); + mcp->mb[6] = MSW(MSD(stats_dma)); + mcp->mb[7] = LSW(MSD(stats_dma)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; + mcp->in_mb = MBX_0; + if (IS_FWI2_CAPABLE(ha)) { + mcp->mb[1] = loop_id; + mcp->mb[4] = 0; + mcp->mb[10] = 0; + mcp->out_mb |= MBX_10|MBX_4|MBX_1; + mcp->in_mb |= MBX_1; + } else if (HAS_EXTENDED_IDS(ha)) { + mcp->mb[1] = loop_id; + mcp->mb[10] = 0; + mcp->out_mb |= MBX_10|MBX_1; + } else { + mcp->mb[1] = loop_id << 8; + mcp->out_mb |= MBX_1; + } + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = IOCTL_CMD; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval == QLA_SUCCESS) { + if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0x1085, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + rval = QLA_FUNCTION_FAILED; + } else { + /* Re-endianize - firmware data is le32. */ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, + "Done %s.\n", __func__); + for ( ; dwords--; iter++) + le32_to_cpus(iter); + } + } else { + /* Failed. */ + ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); + } + + return rval; +} + +int +qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, + dma_addr_t stats_dma, uint16_t options) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + uint32_t *iter = (uint32_t *)stats; + ushort dwords = sizeof(*stats)/sizeof(*iter); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, + "Entered %s.\n", __func__); + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_LINK_PRIV_STATS; + mc.mb[2] = MSW(LSD(stats_dma)); + mc.mb[3] = LSW(LSD(stats_dma)); + mc.mb[6] = MSW(MSD(stats_dma)); + mc.mb[7] = LSW(MSD(stats_dma)); + mc.mb[8] = dwords; + mc.mb[9] = vha->vp_idx; + mc.mb[10] = options; + + rval = qla24xx_send_mb_cmd(vha, &mc); + + if (rval == QLA_SUCCESS) { + if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0x1089, + "Failed mb[0]=%x.\n", mcp->mb[0]); + rval = QLA_FUNCTION_FAILED; + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, + "Done %s.\n", __func__); + /* Re-endianize - firmware data is le32. */ + for ( ; dwords--; iter++) + le32_to_cpus(iter); + } + } else { + /* Failed. */ + ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); + } + + return rval; +} + +int +qla24xx_abort_command(srb_t *sp) +{ + int rval; + unsigned long flags = 0; + + struct abort_entry_24xx *abt; + dma_addr_t abt_dma; + uint32_t handle; + fc_port_t *fcport = sp->fcport; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct qla_qpair *qpair = sp->qpair; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, + "Entered %s.\n", __func__); + + if (sp->qpair) + req = sp->qpair->req; + else + return QLA_ERR_NO_QPAIR; + + if (ql2xasynctmfenable) + return qla24xx_async_abort_command(sp); + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + for (handle = 1; handle < req->num_outstanding_cmds; handle++) { + if (req->outstanding_cmds[handle] == sp) + break; + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + if (handle == req->num_outstanding_cmds) { + /* Command not found. */ + return QLA_ERR_NOT_FOUND; + } + + abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); + if (abt == NULL) { + ql_log(ql_log_warn, vha, 0x108d, + "Failed to allocate abort IOCB.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + abt->entry_type = ABORT_IOCB_TYPE; + abt->entry_count = 1; + abt->handle = make_handle(req->id, abt->handle); + abt->nport_handle = cpu_to_le16(fcport->loop_id); + abt->handle_to_abort = make_handle(req->id, handle); + abt->port_id[0] = fcport->d_id.b.al_pa; + abt->port_id[1] = fcport->d_id.b.area; + abt->port_id[2] = fcport->d_id.b.domain; + abt->vp_index = fcport->vha->vp_idx; + + abt->req_que_no = cpu_to_le16(req->id); + /* Need to pass original sp */ + qla_nvme_abort_set_option(abt, sp); + + rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x108e, + "Failed to issue IOCB (%x).\n", rval); + } else if (abt->entry_status != 0) { + ql_dbg(ql_dbg_mbx, vha, 0x108f, + "Failed to complete IOCB -- error status (%x).\n", + abt->entry_status); + rval = QLA_FUNCTION_FAILED; + } else if (abt->nport_handle != cpu_to_le16(0)) { + ql_dbg(ql_dbg_mbx, vha, 0x1090, + "Failed to complete IOCB -- completion status (%x).\n", + le16_to_cpu(abt->nport_handle)); + if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) + rval = QLA_FUNCTION_PARAMETER_ERROR; + else + rval = QLA_FUNCTION_FAILED; + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, + "Done %s.\n", __func__); + } + if (rval == QLA_SUCCESS) + qla_nvme_abort_process_comp_status(abt, sp); + + qla_wait_nvme_release_cmd_kref(sp); + + dma_pool_free(ha->s_dma_pool, abt, abt_dma); + + return rval; +} + +struct tsk_mgmt_cmd { + union { + struct tsk_mgmt_entry tsk; + struct sts_entry_24xx sts; + } p; +}; + +static int +__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, + uint64_t l, int tag) +{ + int rval, rval2; + struct tsk_mgmt_cmd *tsk; + struct sts_entry_24xx *sts; + dma_addr_t tsk_dma; + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct req_que *req; + struct qla_qpair *qpair; + + vha = fcport->vha; + ha = vha->hw; + req = vha->req; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, + "Entered %s.\n", __func__); + + if (vha->vp_idx && vha->qpair) { + /* NPIV port */ + qpair = vha->qpair; + req = qpair->req; + } + + tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); + if (tsk == NULL) { + ql_log(ql_log_warn, vha, 0x1093, + "Failed to allocate task management IOCB.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; + tsk->p.tsk.entry_count = 1; + tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); + tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); + tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); + tsk->p.tsk.control_flags = cpu_to_le32(type); + tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; + tsk->p.tsk.port_id[1] = fcport->d_id.b.area; + tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; + tsk->p.tsk.vp_index = fcport->vha->vp_idx; + if (type == TCF_LUN_RESET) { + int_to_scsilun(l, &tsk->p.tsk.lun); + host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, + sizeof(tsk->p.tsk.lun)); + } + + sts = &tsk->p.sts; + rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1094, + "Failed to issue %s reset IOCB (%x).\n", name, rval); + } else if (sts->entry_status != 0) { + ql_dbg(ql_dbg_mbx, vha, 0x1095, + "Failed to complete IOCB -- error status (%x).\n", + sts->entry_status); + rval = QLA_FUNCTION_FAILED; + } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { + ql_dbg(ql_dbg_mbx, vha, 0x1096, + "Failed to complete IOCB -- completion status (%x).\n", + le16_to_cpu(sts->comp_status)); + rval = QLA_FUNCTION_FAILED; + } else if (le16_to_cpu(sts->scsi_status) & + SS_RESPONSE_INFO_LEN_VALID) { + if (le32_to_cpu(sts->rsp_data_len) < 4) { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, + "Ignoring inconsistent data length -- not enough " + "response info (%d).\n", + le32_to_cpu(sts->rsp_data_len)); + } else if (sts->data[3]) { + ql_dbg(ql_dbg_mbx, vha, 0x1098, + "Failed to complete IOCB -- response (%x).\n", + sts->data[3]); + rval = QLA_FUNCTION_FAILED; + } + } + + /* Issue marker IOCB. */ + rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, + type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); + if (rval2 != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1099, + "Failed to issue marker IOCB (%x).\n", rval2); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, + "Done %s.\n", __func__); + } + + dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); + + return rval; +} + +int +qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) +{ + struct qla_hw_data *ha = fcport->vha->hw; + + if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) + return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); + + return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); +} + +int +qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) +{ + struct qla_hw_data *ha = fcport->vha->hw; + + if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) + return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); + + return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); +} + +int +qla2x00_system_error(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 5; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_WRITE_SERDES; + mcp->mb[1] = addr; + if (IS_QLA2031(vha->hw)) + mcp->mb[2] = data & 0xff; + else + mcp->mb[2] = data; + + mcp->mb[3] = 0; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1183, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_READ_SERDES; + mcp->mb[1] = addr; + mcp->mb[3] = 0; + mcp->out_mb = MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (IS_QLA2031(vha->hw)) + *data = mcp->mb[1] & 0xff; + else + *data = mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1186, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA8044(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; + mcp->mb[1] = HCS_WRITE_SERDES; + mcp->mb[3] = LSW(addr); + mcp->mb[4] = MSW(addr); + mcp->mb[5] = LSW(data); + mcp->mb[6] = MSW(data); + mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x11a1, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA8044(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; + mcp->mb[1] = HCS_READ_SERDES; + mcp->mb[3] = LSW(addr); + mcp->mb[4] = MSW(addr); + mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + *data = mcp->mb[2] << 16 | mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x118a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, + "Done %s.\n", __func__); + } + + return rval; +} + +/** + * qla2x00_set_serdes_params() - + * @vha: HA context + * @sw_em_1g: serial link options + * @sw_em_2g: serial link options + * @sw_em_4g: serial link options + * + * Returns + */ +int +qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, + uint16_t sw_em_2g, uint16_t sw_em_4g) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SERDES_PARAMS; + mcp->mb[1] = BIT_0; + mcp->mb[2] = sw_em_1g | BIT_15; + mcp->mb[3] = sw_em_2g | BIT_15; + mcp->mb[4] = sw_em_4g | BIT_15; + mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x109f, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_stop_firmware(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_STOP_FIRMWARE; + mcp->mb[1] = 0; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 5; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); + if (mcp->mb[0] == MBS_INVALID_COMMAND) + rval = QLA_INVALID_COMMAND; + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, + uint16_t buffers) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_TRACE_CONTROL; + mcp->mb[1] = TC_EFT_ENABLE; + mcp->mb[2] = LSW(eft_dma); + mcp->mb[3] = MSW(eft_dma); + mcp->mb[4] = LSW(MSD(eft_dma)); + mcp->mb[5] = MSW(MSD(eft_dma)); + mcp->mb[6] = buffers; + mcp->mb[7] = TC_AEN_DISABLE; + mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10a5, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_disable_eft_trace(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_TRACE_CONTROL; + mcp->mb[1] = TC_EFT_DISABLE; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10a8, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, + uint16_t buffers, uint16_t *mb, uint32_t *dwords) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, + "Entered %s.\n", __func__); + + if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && + !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && + !IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_TRACE_CONTROL; + mcp->mb[1] = TC_FCE_ENABLE; + mcp->mb[2] = LSW(fce_dma); + mcp->mb[3] = MSW(fce_dma); + mcp->mb[4] = LSW(MSD(fce_dma)); + mcp->mb[5] = MSW(MSD(fce_dma)); + mcp->mb[6] = buffers; + mcp->mb[7] = TC_AEN_DISABLE; + mcp->mb[8] = 0; + mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; + mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; + mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| + MBX_1|MBX_0; + mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10ab, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, + "Done %s.\n", __func__); + + if (mb) + memcpy(mb, mcp->mb, 8 * sizeof(*mb)); + if (dwords) + *dwords = buffers; + } + + return rval; +} + +int +qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_TRACE_CONTROL; + mcp->mb[1] = TC_FCE_DISABLE; + mcp->mb[2] = TC_FCE_DISABLE_TRACE; + mcp->out_mb = MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| + MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10ae, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, + "Done %s.\n", __func__); + + if (wr) + *wr = (uint64_t) mcp->mb[5] << 48 | + (uint64_t) mcp->mb[4] << 32 | + (uint64_t) mcp->mb[3] << 16 | + (uint64_t) mcp->mb[2]; + if (rd) + *rd = (uint64_t) mcp->mb[9] << 48 | + (uint64_t) mcp->mb[8] << 32 | + (uint64_t) mcp->mb[7] << 16 | + (uint64_t) mcp->mb[6]; + } + + return rval; +} + +int +qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, + uint16_t *port_speed, uint16_t *mb) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, + "Entered %s.\n", __func__); + + if (!IS_IIDMA_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_PORT_PARAMS; + mcp->mb[1] = loop_id; + mcp->mb[2] = mcp->mb[3] = 0; + mcp->mb[9] = vha->vp_idx; + mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Return mailbox statuses. */ + if (mb) { + mb[0] = mcp->mb[0]; + mb[1] = mcp->mb[1]; + mb[3] = mcp->mb[3]; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, + "Done %s.\n", __func__); + if (port_speed) + *port_speed = mcp->mb[3]; + } + + return rval; +} + +int +qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, + uint16_t port_speed, uint16_t *mb) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, + "Entered %s.\n", __func__); + + if (!IS_IIDMA_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_PORT_PARAMS; + mcp->mb[1] = loop_id; + mcp->mb[2] = BIT_0; + mcp->mb[3] = port_speed & 0x3F; + mcp->mb[9] = vha->vp_idx; + mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Return mailbox statuses. */ + if (mb) { + mb[0] = mcp->mb[0]; + mb[1] = mcp->mb[1]; + mb[3] = mcp->mb[3]; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10b4, + "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, + "Done %s.\n", __func__); + } + + return rval; +} + +void +qla24xx_report_id_acquisition(scsi_qla_host_t *vha, + struct vp_rpt_id_entry_24xx *rptid_entry) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *vp = NULL; + unsigned long flags; + int found; + port_id_t id; + struct fc_port *fcport; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, + "Entered %s.\n", __func__); + + if (rptid_entry->entry_status != 0) + return; + + id.b.domain = rptid_entry->port_id[2]; + id.b.area = rptid_entry->port_id[1]; + id.b.al_pa = rptid_entry->port_id[0]; + id.b.rsvd_1 = 0; + ha->flags.n2n_ae = 0; + + if (rptid_entry->format == 0) { + /* loop */ + ql_dbg(ql_dbg_async, vha, 0x10b7, + "Format 0 : Number of VPs setup %d, number of " + "VPs acquired %d.\n", rptid_entry->vp_setup, + rptid_entry->vp_acquired); + ql_dbg(ql_dbg_async, vha, 0x10b8, + "Primary port id %02x%02x%02x.\n", + rptid_entry->port_id[2], rptid_entry->port_id[1], + rptid_entry->port_id[0]); + ha->current_topology = ISP_CFG_NL; + qla_update_host_map(vha, id); + + } else if (rptid_entry->format == 1) { + /* fabric */ + ql_dbg(ql_dbg_async, vha, 0x10b9, + "Format 1: VP[%d] enabled - status %d - with " + "port id %02x%02x%02x.\n", rptid_entry->vp_idx, + rptid_entry->vp_status, + rptid_entry->port_id[2], rptid_entry->port_id[1], + rptid_entry->port_id[0]); + ql_dbg(ql_dbg_async, vha, 0x5075, + "Format 1: Remote WWPN %8phC.\n", + rptid_entry->u.f1.port_name); + + ql_dbg(ql_dbg_async, vha, 0x5075, + "Format 1: WWPN %8phC.\n", + vha->port_name); + + switch (rptid_entry->u.f1.flags & TOPO_MASK) { + case TOPO_N2N: + ha->current_topology = ISP_CFG_N; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = QLA_FCPORT_SCAN; + fcport->n2n_flag = 0; + } + id.b24 = 0; + if (wwn_to_u64(vha->port_name) > + wwn_to_u64(rptid_entry->u.f1.port_name)) { + vha->d_id.b24 = 0; + vha->d_id.b.al_pa = 1; + ha->flags.n2n_bigger = 1; + + id.b.al_pa = 2; + ql_dbg(ql_dbg_async, vha, 0x5075, + "Format 1: assign local id %x remote id %x\n", + vha->d_id.b24, id.b24); + } else { + ql_dbg(ql_dbg_async, vha, 0x5075, + "Format 1: Remote login - Waiting for WWPN %8phC.\n", + rptid_entry->u.f1.port_name); + ha->flags.n2n_bigger = 0; + } + + fcport = qla2x00_find_fcport_by_wwpn(vha, + rptid_entry->u.f1.port_name, 1); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + + if (fcport) { + fcport->plogi_nack_done_deadline = jiffies + HZ; + fcport->dm_login_expire = jiffies + + QLA_N2N_WAIT_TIME * HZ; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->n2n_flag = 1; + fcport->keep_nport_handle = 1; + fcport->login_retry = vha->hw->login_retry_count; + fcport->fc4_type = FS_FC4TYPE_FCP; + if (vha->flags.nvme_enabled) + fcport->fc4_type |= FS_FC4TYPE_NVME; + + if (wwn_to_u64(vha->port_name) > + wwn_to_u64(fcport->port_name)) { + fcport->d_id = id; + } + + switch (fcport->disc_state) { + case DSC_DELETED: + set_bit(RELOGIN_NEEDED, + &vha->dpc_flags); + break; + case DSC_DELETE_PEND: + break; + default: + qlt_schedule_sess_for_deletion(fcport); + break; + } + } else { + qla24xx_post_newsess_work(vha, &id, + rptid_entry->u.f1.port_name, + rptid_entry->u.f1.node_name, + NULL, + FS_FCP_IS_N2N); + } + + /* if our portname is higher then initiate N2N login */ + + set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); + return; + case TOPO_FL: + ha->current_topology = ISP_CFG_FL; + break; + case TOPO_F: + ha->current_topology = ISP_CFG_F; + break; + default: + break; + } + + ha->flags.gpsc_supported = 1; + ha->current_topology = ISP_CFG_F; + /* buffer to buffer credit flag */ + vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; + + if (rptid_entry->vp_idx == 0) { + if (rptid_entry->vp_status == VP_STAT_COMPL) { + /* FA-WWN is only for physical port */ + if (qla_ini_mode_enabled(vha) && + ha->flags.fawwpn_enabled && + (rptid_entry->u.f1.flags & + BIT_6)) { + memcpy(vha->port_name, + rptid_entry->u.f1.port_name, + WWN_SIZE); + } + + qla_update_host_map(vha, id); + } + + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + } else { + if (rptid_entry->vp_status != VP_STAT_COMPL && + rptid_entry->vp_status != VP_STAT_ID_CHG) { + ql_dbg(ql_dbg_mbx, vha, 0x10ba, + "Could not acquire ID for VP[%d].\n", + rptid_entry->vp_idx); + return; + } + + found = 0; + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (rptid_entry->vp_idx == vp->vp_idx) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + if (!found) + return; + + qla_update_host_map(vp, id); + + /* + * Cannot configure here as we are still sitting on the + * response queue. Handle it in dpc context. + */ + set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); + set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + } + set_bit(VP_DPC_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else if (rptid_entry->format == 2) { + ql_dbg(ql_dbg_async, vha, 0x505f, + "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", + rptid_entry->port_id[2], rptid_entry->port_id[1], + rptid_entry->port_id[0]); + + ql_dbg(ql_dbg_async, vha, 0x5075, + "N2N: Remote WWPN %8phC.\n", + rptid_entry->u.f2.port_name); + + /* N2N. direct connect */ + ha->current_topology = ISP_CFG_N; + ha->flags.rida_fmt2 = 1; + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + + ha->flags.n2n_ae = 1; + spin_lock_irqsave(&ha->vport_slock, flags); + qla_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = QLA_FCPORT_SCAN; + fcport->n2n_flag = 0; + } + + fcport = qla2x00_find_fcport_by_wwpn(vha, + rptid_entry->u.f2.port_name, 1); + + if (fcport) { + fcport->login_retry = vha->hw->login_retry_count; + fcport->plogi_nack_done_deadline = jiffies + HZ; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->keep_nport_handle = 1; + fcport->n2n_flag = 1; + fcport->d_id.b.domain = + rptid_entry->u.f2.remote_nport_id[2]; + fcport->d_id.b.area = + rptid_entry->u.f2.remote_nport_id[1]; + fcport->d_id.b.al_pa = + rptid_entry->u.f2.remote_nport_id[0]; + + /* + * For the case where remote port sending PRLO, FW + * sends up RIDA Format 2 as an indication of session + * loss. In other word, FW state change from PRLI + * complete back to PLOGI complete. Delete the + * session and let relogin drive the reconnect. + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) + qlt_schedule_sess_for_deletion(fcport); + } + } +} + +/* + * qla24xx_modify_vp_config + * Change VP configuration for vha + * + * Input: + * vha = adapter block pointer. + * + * Returns: + * qla2xxx local function return status code. + * + * Context: + * Kernel context. + */ +int +qla24xx_modify_vp_config(scsi_qla_host_t *vha) +{ + int rval; + struct vp_config_entry_24xx *vpmod; + dma_addr_t vpmod_dma; + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + /* This can be called by the parent */ + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, + "Entered %s.\n", __func__); + + vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); + if (!vpmod) { + ql_log(ql_log_warn, vha, 0x10bc, + "Failed to allocate modify VP IOCB.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + vpmod->entry_type = VP_CONFIG_IOCB_TYPE; + vpmod->entry_count = 1; + vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; + vpmod->vp_count = 1; + vpmod->vp_index1 = vha->vp_idx; + vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; + + qlt_modify_vp_config(vha, vpmod); + + memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); + memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); + vpmod->entry_count = 1; + + rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10bd, + "Failed to issue VP config IOCB (%x).\n", rval); + } else if (vpmod->comp_status != 0) { + ql_dbg(ql_dbg_mbx, vha, 0x10be, + "Failed to complete IOCB -- error status (%x).\n", + vpmod->comp_status); + rval = QLA_FUNCTION_FAILED; + } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { + ql_dbg(ql_dbg_mbx, vha, 0x10bf, + "Failed to complete IOCB -- completion status (%x).\n", + le16_to_cpu(vpmod->comp_status)); + rval = QLA_FUNCTION_FAILED; + } else { + /* EMPTY */ + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, + "Done %s.\n", __func__); + fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); + } + dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); + + return rval; +} + +/* + * qla2x00_send_change_request + * Receive or disable RSCN request from fabric controller + * + * Input: + * ha = adapter block pointer + * format = registration format: + * 0 - Reserved + * 1 - Fabric detected registration + * 2 - N_port detected registration + * 3 - Full registration + * FF - clear registration + * vp_idx = Virtual port index + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel Context + */ + +int +qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, + uint16_t vp_idx) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; + mcp->mb[1] = format; + mcp->mb[9] = vp_idx; + mcp->out_mb = MBX_9|MBX_1|MBX_0; + mcp->in_mb = MBX_0|MBX_1; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval == QLA_SUCCESS) { + if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { + rval = BIT_1; + } + } else + rval = BIT_1; + + return rval; +} + +int +qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, + uint32_t size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, + "Entered %s.\n", __func__); + + if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { + mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; + mcp->mb[8] = MSW(addr); + mcp->mb[10] = 0; + mcp->out_mb = MBX_10|MBX_8|MBX_0; + } else { + mcp->mb[0] = MBC_DUMP_RISC_RAM; + mcp->out_mb = MBX_0; + } + mcp->mb[1] = LSW(addr); + mcp->mb[2] = MSW(req_dma); + mcp->mb[3] = LSW(req_dma); + mcp->mb[6] = MSW(MSD(req_dma)); + mcp->mb[7] = LSW(MSD(req_dma)); + mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; + if (IS_FWI2_CAPABLE(vha->hw)) { + mcp->mb[4] = MSW(size); + mcp->mb[5] = LSW(size); + mcp->out_mb |= MBX_5|MBX_4; + } else { + mcp->mb[4] = LSW(size); + mcp->out_mb |= MBX_4; + } + + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1008, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, + "Done %s.\n", __func__); + } + + return rval; +} +/* 84XX Support **************************************************************/ + +struct cs84xx_mgmt_cmd { + union { + struct verify_chip_entry_84xx req; + struct verify_chip_rsp_84xx rsp; + } p; +}; + +int +qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) +{ + int rval, retry; + struct cs84xx_mgmt_cmd *mn; + dma_addr_t mn_dma; + uint16_t options; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, + "Entered %s.\n", __func__); + + mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); + if (mn == NULL) { + return QLA_MEMORY_ALLOC_FAILED; + } + + /* Force Update? */ + options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; + /* Diagnostic firmware? */ + /* options |= MENLO_DIAG_FW; */ + /* We update the firmware with only one data sequence. */ + options |= VCO_END_OF_DATA; + + do { + retry = 0; + memset(mn, 0, sizeof(*mn)); + mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; + mn->p.req.entry_count = 1; + mn->p.req.options = cpu_to_le16(options); + + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, + "Dump of Verify Request.\n"); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, + mn, sizeof(*mn)); + + rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10cb, + "Failed to issue verify IOCB (%x).\n", rval); + goto verify_done; + } + + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, + "Dump of Verify Response.\n"); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, + mn, sizeof(*mn)); + + status[0] = le16_to_cpu(mn->p.rsp.comp_status); + status[1] = status[0] == CS_VCS_CHIP_FAILURE ? + le16_to_cpu(mn->p.rsp.failure_code) : 0; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, + "cs=%x fc=%x.\n", status[0], status[1]); + + if (status[0] != CS_COMPLETE) { + rval = QLA_FUNCTION_FAILED; + if (!(options & VCO_DONT_UPDATE_FW)) { + ql_dbg(ql_dbg_mbx, vha, 0x10cf, + "Firmware update failed. Retrying " + "without update firmware.\n"); + options |= VCO_DONT_UPDATE_FW; + options &= ~VCO_FORCE_UPDATE; + retry = 1; + } + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, + "Firmware updated to %x.\n", + le32_to_cpu(mn->p.rsp.fw_ver)); + + /* NOTE: we only update OP firmware. */ + spin_lock_irqsave(&ha->cs84xx->access_lock, flags); + ha->cs84xx->op_fw_version = + le32_to_cpu(mn->p.rsp.fw_ver); + spin_unlock_irqrestore(&ha->cs84xx->access_lock, + flags); + } + } while (retry); + +verify_done: + dma_pool_free(ha->s_dma_pool, mn, mn_dma); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10d1, + "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) +{ + int rval; + unsigned long flags; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!ha->flags.fw_started) + return QLA_SUCCESS; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, + "Entered %s.\n", __func__); + + if (IS_SHADOW_REG_CAPABLE(ha)) + req->options |= BIT_13; + + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; + mcp->mb[1] = req->options; + mcp->mb[2] = MSW(LSD(req->dma)); + mcp->mb[3] = LSW(LSD(req->dma)); + mcp->mb[6] = MSW(MSD(req->dma)); + mcp->mb[7] = LSW(MSD(req->dma)); + mcp->mb[5] = req->length; + if (req->rsp) + mcp->mb[10] = req->rsp->id; + mcp->mb[12] = req->qos; + mcp->mb[11] = req->vp_idx; + mcp->mb[13] = req->rid; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + mcp->mb[15] = 0; + + mcp->mb[4] = req->id; + /* que in ptr index */ + mcp->mb[8] = 0; + /* que out ptr index */ + mcp->mb[9] = *req->out_ptr = 0; + mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| + MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->flags = MBX_DMA_OUT; + mcp->tov = MBX_TOV_SECONDS * 2; + + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) + mcp->in_mb |= MBX_1; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + mcp->out_mb |= MBX_15; + /* debug q create issue in SR-IOV */ + mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (!(req->options & BIT_0)) { + wrt_reg_dword(req->req_q_in, 0); + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + wrt_reg_dword(req->req_q_out, 0); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10d4, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) +{ + int rval; + unsigned long flags; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!ha->flags.fw_started) + return QLA_SUCCESS; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, + "Entered %s.\n", __func__); + + if (IS_SHADOW_REG_CAPABLE(ha)) + rsp->options |= BIT_13; + + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; + mcp->mb[1] = rsp->options; + mcp->mb[2] = MSW(LSD(rsp->dma)); + mcp->mb[3] = LSW(LSD(rsp->dma)); + mcp->mb[6] = MSW(MSD(rsp->dma)); + mcp->mb[7] = LSW(MSD(rsp->dma)); + mcp->mb[5] = rsp->length; + mcp->mb[14] = rsp->msix->entry; + mcp->mb[13] = rsp->rid; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + mcp->mb[15] = 0; + + mcp->mb[4] = rsp->id; + /* que in ptr index */ + mcp->mb[8] = *rsp->in_ptr = 0; + /* que out ptr index */ + mcp->mb[9] = 0; + mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 + |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->flags = MBX_DMA_OUT; + mcp->tov = MBX_TOV_SECONDS * 2; + + if (IS_QLA81XX(ha)) { + mcp->out_mb |= MBX_12|MBX_11|MBX_10; + mcp->in_mb |= MBX_1; + } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; + mcp->in_mb |= MBX_1; + /* debug q create issue in SR-IOV */ + mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (!(rsp->options & BIT_0)) { + wrt_reg_dword(rsp->rsp_q_out, 0); + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + wrt_reg_dword(rsp->rsp_q_in, 0); + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10d7, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_IDC_ACK; + memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); + mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10da, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, + "Entered %s.\n", __func__); + + if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; + mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10dd, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, + "Done %s.\n", __func__); + *sector_size = mcp->mb[1]; + } + + return rval; +} + +int +qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; + mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : + FAC_OPT_CMD_WRITE_PROTECT; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e0, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; + mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; + mcp->mb[2] = LSW(start); + mcp->mb[3] = MSW(start); + mcp->mb[4] = LSW(finish); + mcp->mb[5] = MSW(finish); + mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e3, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) +{ + int rval = QLA_SUCCESS; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return rval; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; + mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : + FAC_OPT_CMD_UNLOCK_SEMAPHORE); + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e3, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) +{ + int rval = 0; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_RESTART_MPI_FW; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0|MBX_1; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e6, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int i; + int len; + __le16 *str; + struct qla_hw_data *ha = vha->hw; + + if (!IS_P3P_TYPE(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, + "Entered %s.\n", __func__); + + str = (__force __le16 *)version; + len = strlen(version); + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; + mcp->out_mb = MBX_1|MBX_0; + for (i = 4; i < 16 && len; i++, str++, len -= 2) { + mcp->mb[i] = le16_to_cpup(str); + mcp->out_mb |= 1<mb[i] = 0; + mcp->out_mb |= 1<in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x117c, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int len; + uint16_t dwlen; + uint8_t *str; + dma_addr_t str_dma; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || + IS_P3P_TYPE(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, + "Entered %s.\n", __func__); + + str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); + if (!str) { + ql_log(ql_log_warn, vha, 0x117f, + "Failed to allocate driver version param.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memcpy(str, "\x7\x3\x11\x0", 4); + dwlen = str[0]; + len = dwlen * 4 - 4; + memset(str + 4, 0, len); + if (len > strlen(version)) + len = strlen(version); + memcpy(str + 4, version, len); + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; + mcp->mb[2] = MSW(LSD(str_dma)); + mcp->mb[3] = LSW(LSD(str_dma)); + mcp->mb[6] = MSW(MSD(str_dma)); + mcp->mb[7] = LSW(MSD(str_dma)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1180, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, + "Done %s.\n", __func__); + } + + dma_pool_free(ha->s_dma_pool, str, str_dma); + + return rval; +} + +int +qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, + void *buf, uint16_t bufsiz) +{ + int rval, i; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + uint32_t *bp; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; + mcp->mb[2] = MSW(buf_dma); + mcp->mb[3] = LSW(buf_dma); + mcp->mb[6] = MSW(MSD(buf_dma)); + mcp->mb[7] = LSW(MSD(buf_dma)); + mcp->mb[8] = bufsiz/4; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x115a, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, + "Done %s.\n", __func__); + bp = (uint32_t *) buf; + for (i = 0; i < (bufsiz-4)/4; i++, bp++) + *bp = le32_to_cpu((__force __le32)*bp); + } + + return rval; +} + +#define PUREX_CMD_COUNT 4 +int +qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + uint8_t *els_cmd_map; + uint8_t active_cnt = 0; + dma_addr_t els_cmd_map_dma; + uint8_t cmd_opcode[PUREX_CMD_COUNT]; + uint8_t i, index, purex_bit; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_SUCCESS; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, + "Entered %s.\n", __func__); + + els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, + &els_cmd_map_dma, GFP_KERNEL); + if (!els_cmd_map) { + ql_log(ql_log_warn, vha, 0x7101, + "Failed to allocate RDP els command param.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + /* List of Purex ELS */ + if (ql2xrdpenable) { + cmd_opcode[active_cnt] = ELS_RDP; + active_cnt++; + } + if (ha->flags.scm_supported_f) { + cmd_opcode[active_cnt] = ELS_FPIN; + active_cnt++; + } + if (ha->flags.edif_enabled) { + cmd_opcode[active_cnt] = ELS_AUTH_ELS; + active_cnt++; + } + + for (i = 0; i < active_cnt; i++) { + index = cmd_opcode[i] / 8; + purex_bit = cmd_opcode[i] % 8; + els_cmd_map[index] |= 1 << purex_bit; + } + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; + mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); + mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); + mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); + mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = MBX_DMA_OUT; + mcp->buf_size = ELS_CMD_MAP_SIZE; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x118d, + "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, + "Done %s.\n", __func__); + } + + dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, + els_cmd_map, els_cmd_map_dma); + + return rval; +} + +static int +qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + *temp = mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x115a, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, + uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + if (len == 1) + opt |= BIT_0; + + mcp->mb[0] = MBC_READ_SFP; + mcp->mb[1] = dev; + mcp->mb[2] = MSW(LSD(sfp_dma)); + mcp->mb[3] = LSW(LSD(sfp_dma)); + mcp->mb[6] = MSW(MSD(sfp_dma)); + mcp->mb[7] = LSW(MSD(sfp_dma)); + mcp->mb[8] = len; + mcp->mb[9] = off; + mcp->mb[10] = opt; + mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (opt & BIT_0) + *sfp = mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e9, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { + /* sfp is not there */ + rval = QLA_INTERFACE_ERROR; + } + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, + uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + if (len == 1) + opt |= BIT_0; + + if (opt & BIT_0) + len = *sfp; + + mcp->mb[0] = MBC_WRITE_SFP; + mcp->mb[1] = dev; + mcp->mb[2] = MSW(LSD(sfp_dma)); + mcp->mb[3] = LSW(LSD(sfp_dma)); + mcp->mb[6] = MSW(MSD(sfp_dma)); + mcp->mb[7] = LSW(MSD(sfp_dma)); + mcp->mb[8] = len; + mcp->mb[9] = off; + mcp->mb[10] = opt; + mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10ec, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, + uint16_t size_in_bytes, uint16_t *actual_size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, + "Entered %s.\n", __func__); + + if (!IS_CNA_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_GET_XGMAC_STATS; + mcp->mb[2] = MSW(stats_dma); + mcp->mb[3] = LSW(stats_dma); + mcp->mb[6] = MSW(MSD(stats_dma)); + mcp->mb[7] = LSW(MSD(stats_dma)); + mcp->mb[8] = size_in_bytes >> 2; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10ef, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, + "Done %s.\n", __func__); + + + *actual_size = mcp->mb[2] << 2; + } + + return rval; +} + +int +qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, + uint16_t size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, + "Entered %s.\n", __func__); + + if (!IS_CNA_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_GET_DCBX_PARAMS; + mcp->mb[1] = 0; + mcp->mb[2] = MSW(tlv_dma); + mcp->mb[3] = LSW(tlv_dma); + mcp->mb[6] = MSW(MSD(tlv_dma)); + mcp->mb[7] = LSW(MSD(tlv_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10f2, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_READ_RAM_EXTENDED; + mcp->mb[1] = LSW(risc_addr); + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10f5, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, + "Done %s.\n", __func__); + *data = mcp->mb[3] << 16 | mcp->mb[2]; + } + + return rval; +} + +int +qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, + uint16_t *mresp) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, + "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; + mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing + + /* transfer count */ + mcp->mb[10] = LSW(mreq->transfer_size); + mcp->mb[11] = MSW(mreq->transfer_size); + + /* send data address */ + mcp->mb[14] = LSW(mreq->send_dma); + mcp->mb[15] = MSW(mreq->send_dma); + mcp->mb[20] = LSW(MSD(mreq->send_dma)); + mcp->mb[21] = MSW(MSD(mreq->send_dma)); + + /* receive data address */ + mcp->mb[16] = LSW(mreq->rcv_dma); + mcp->mb[17] = MSW(mreq->rcv_dma); + mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); + mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); + + /* Iteration count */ + mcp->mb[18] = LSW(mreq->iteration_count); + mcp->mb[19] = MSW(mreq->iteration_count); + + mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| + MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; + if (IS_CNA_CAPABLE(vha->hw)) + mcp->out_mb |= MBX_2; + mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; + + mcp->buf_size = mreq->transfer_size; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10f8, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " + "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], + mcp->mb[3], mcp->mb[18], mcp->mb[19]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, + "Done %s.\n", __func__); + } + + /* Copy mailbox information */ + memcpy( mresp, mcp->mb, 64); + return rval; +} + +int +qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, + uint16_t *mresp) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, + "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; + /* BIT_6 specifies 64bit address */ + mcp->mb[1] = mreq->options | BIT_15 | BIT_6; + if (IS_CNA_CAPABLE(ha)) { + mcp->mb[2] = vha->fcoe_fcf_idx; + } + mcp->mb[16] = LSW(mreq->rcv_dma); + mcp->mb[17] = MSW(mreq->rcv_dma); + mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); + mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); + + mcp->mb[10] = LSW(mreq->transfer_size); + + mcp->mb[14] = LSW(mreq->send_dma); + mcp->mb[15] = MSW(mreq->send_dma); + mcp->mb[20] = LSW(MSD(mreq->send_dma)); + mcp->mb[21] = MSW(MSD(mreq->send_dma)); + + mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| + MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; + if (IS_CNA_CAPABLE(ha)) + mcp->out_mb |= MBX_2; + + mcp->in_mb = MBX_0; + if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || + IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + mcp->in_mb |= MBX_1; + if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) + mcp->in_mb |= MBX_3; + + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->buf_size = mreq->transfer_size; + + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10fb, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, + "Done %s.\n", __func__); + } + + /* Copy mailbox information */ + memcpy(mresp, mcp->mb, 64); + return rval; +} + +int +qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, + "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); + + mcp->mb[0] = MBC_ISP84XX_RESET; + mcp->mb[1] = enable_diagnostic; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) + ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); + else + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, + "Done %s.\n", __func__); + + return rval; +} + +int +qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; + mcp->mb[1] = LSW(risc_addr); + mcp->mb[2] = LSW(data); + mcp->mb[3] = MSW(data); + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1101, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) +{ + int rval; + uint32_t stat, timer; + uint16_t mb0 = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + rval = QLA_SUCCESS; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, + "Entered %s.\n", __func__); + + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + /* Write the MBC data to the registers */ + wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); + wrt_reg_word(®->mailbox1, mb[0]); + wrt_reg_word(®->mailbox2, mb[1]); + wrt_reg_word(®->mailbox3, mb[2]); + wrt_reg_word(®->mailbox4, mb[3]); + + wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + + /* Poll for MBC interrupt */ + for (timer = 6000000; timer; timer--) { + /* Check for pending interrupts. */ + stat = rd_reg_dword(®->host_status); + if (stat & HSRX_RISC_INT) { + stat &= 0xff; + + if (stat == 0x1 || stat == 0x2 || + stat == 0x10 || stat == 0x11) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); + mb0 = rd_reg_word(®->mailbox0); + wrt_reg_dword(®->hccr, + HCCRX_CLR_RISC_INT); + rd_reg_dword(®->hccr); + break; + } + } + udelay(5); + } + + if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) + rval = mb0 & MBS_MASK; + else + rval = QLA_FUNCTION_FAILED; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1104, + "Failed=%x mb[0]=%x.\n", rval, mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, + "Done %s.\n", __func__); + } + + return rval; +} + +/* Set the specified data rate */ +int +qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + uint16_t val; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, + "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, + mode); + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + memset(mcp, 0, sizeof(*mcp)); + switch (ha->set_data_rate) { + case PORT_SPEED_AUTO: + case PORT_SPEED_4GB: + case PORT_SPEED_8GB: + case PORT_SPEED_16GB: + case PORT_SPEED_32GB: + val = ha->set_data_rate; + break; + default: + ql_log(ql_log_warn, vha, 0x1199, + "Unrecognized speed setting:%d. Setting Autoneg\n", + ha->set_data_rate); + val = ha->set_data_rate = PORT_SPEED_AUTO; + break; + } + + mcp->mb[0] = MBC_DATA_RATE; + mcp->mb[1] = mode; + mcp->mb[2] = val; + + mcp->out_mb = MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + mcp->in_mb |= MBX_4|MBX_3; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1107, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + if (mcp->mb[1] != 0x7) + ql_dbg(ql_dbg_mbx, vha, 0x1179, + "Speed set:0x%x\n", mcp->mb[1]); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_get_data_rate(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_DATA_RATE; + mcp->mb[1] = QLA_GET_DATA_RATE; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + mcp->in_mb |= MBX_4|MBX_3; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1107, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + if (mcp->mb[1] != 0x7) + ha->link_data_rate = mcp->mb[1]; + + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (mcp->mb[4] & BIT_0) + ql_log(ql_log_info, vha, 0x11a2, + "FEC=enabled (data rate).\n"); + } + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, + "Done %s.\n", __func__); + if (mcp->mb[1] != 0x7) + ha->link_data_rate = mcp->mb[1]; + } + + return rval; +} + +int +qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, + "Entered %s.\n", __func__); + + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_FUNCTION_FAILED; + mcp->mb[0] = MBC_GET_PORT_CONFIG; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x110a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + /* Copy all bits to preserve original value */ + memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, + "Done %s.\n", __func__); + } + return rval; +} + +int +qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_PORT_CONFIG; + /* Copy all bits to preserve original setting */ + memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); + mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x110d, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, + "Done %s.\n", __func__); + + return rval; +} + + +int +qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, + uint16_t *mb) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, + "Entered %s.\n", __func__); + + if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) + return QLA_FUNCTION_FAILED; + + mcp->mb[0] = MBC_PORT_PARAMS; + mcp->mb[1] = loop_id; + if (ha->flags.fcp_prio_enabled) + mcp->mb[2] = BIT_1; + else + mcp->mb[2] = BIT_2; + mcp->mb[4] = priority & 0xf; + mcp->mb[9] = vha->vp_idx; + mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (mb != NULL) { + mb[0] = mcp->mb[0]; + mb[1] = mcp->mb[1]; + mb[3] = mcp->mb[3]; + mb[4] = mcp->mb[4]; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) +{ + int rval = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha = vha->hw; + uint8_t byte; + + if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x1150, + "Thermal not supported by this card.\n"); + return rval; + } + + if (IS_QLA25XX(ha)) { + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + ha->pdev->subsystem_device == 0x0175) { + rval = qla2x00_read_sfp(vha, 0, &byte, + 0x98, 0x1, 1, BIT_13|BIT_0); + *temp = byte; + return rval; + } + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && + ha->pdev->subsystem_device == 0x338e) { + rval = qla2x00_read_sfp(vha, 0, &byte, + 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); + *temp = byte; + return rval; + } + ql_dbg(ql_dbg_mbx, vha, 0x10c9, + "Thermal not supported by this card.\n"); + return rval; + } + + if (IS_QLA82XX(ha)) { + *temp = qla82xx_read_temperature(vha); + rval = QLA_SUCCESS; + return rval; + } else if (IS_QLA8044(ha)) { + *temp = qla8044_read_temperature(vha); + rval = QLA_SUCCESS; + return rval; + } + + rval = qla2x00_read_asic_temperature(vha, temp); + return rval; +} + +int +qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, + "Entered %s.\n", __func__); + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + memset(mcp, 0, sizeof(mbx_cmd_t)); + mcp->mb[0] = MBC_TOGGLE_INTERRUPT; + mcp->mb[1] = 1; + + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1016, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, + "Entered %s.\n", __func__); + + if (!IS_P3P_TYPE(ha)) + return QLA_FUNCTION_FAILED; + + memset(mcp, 0, sizeof(mbx_cmd_t)); + mcp->mb[0] = MBC_TOGGLE_INTERRUPT; + mcp->mb[1] = 0; + + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x100c, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla82xx_md_get_template_size(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, + "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT_SIZE); + mcp->mb[3] = MSW(RQST_TMPLT_SIZE); + + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Always copy back return mailbox values. */ + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1120, + "mailbox command FAILED=0x%x, subcode=%x.\n", + (mcp->mb[1] << 16) | mcp->mb[0], + (mcp->mb[3] << 16) | mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, + "Done %s.\n", __func__); + ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); + if (!ha->md_template_size) { + ql_dbg(ql_dbg_mbx, vha, 0x1122, + "Null template size obtained.\n"); + rval = QLA_FUNCTION_FAILED; + } + } + return rval; +} + +int +qla82xx_md_get_template(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, + "Entered %s.\n", __func__); + + ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, + ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); + if (!ha->md_tmplt_hdr) { + ql_log(ql_log_warn, vha, 0x1124, + "Unable to allocate memory for Minidump template.\n"); + return rval; + } + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT); + mcp->mb[3] = MSW(RQST_TMPLT); + mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); + mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); + mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); + mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); + mcp->mb[8] = LSW(ha->md_template_size); + mcp->mb[9] = MSW(ha->md_template_size); + + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1125, + "mailbox command FAILED=0x%x, subcode=%x.\n", + ((mcp->mb[1] << 16) | mcp->mb[0]), + ((mcp->mb[3] << 16) | mcp->mb[2])); + } else + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, + "Done %s.\n", __func__); + return rval; +} + +int +qla8044_md_get_template(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + int offset = 0, size = MINIDUMP_SIZE_36K; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, + "Entered %s.\n", __func__); + + ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, + ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); + if (!ha->md_tmplt_hdr) { + ql_log(ql_log_warn, vha, 0xb11b, + "Unable to allocate memory for Minidump template.\n"); + return rval; + } + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + while (offset < ha->md_template_size) { + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT); + mcp->mb[3] = MSW(RQST_TMPLT); + mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[8] = LSW(size); + mcp->mb[9] = MSW(size); + mcp->mb[10] = offset & 0x0000FFFF; + mcp->mb[11] = offset & 0xFFFF0000; + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xb11c, + "mailbox command FAILED=0x%x, subcode=%x.\n", + ((mcp->mb[1] << 16) | mcp->mb[0]), + ((mcp->mb[3] << 16) | mcp->mb[2])); + return rval; + } else + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, + "Done %s.\n", __func__); + offset = offset + size; + } + return rval; +} + +int +qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, + "Entered %s.\n", __func__); + + memset(mcp, 0, sizeof(mbx_cmd_t)); + mcp->mb[0] = MBC_SET_LED_CONFIG; + mcp->mb[1] = led_cfg[0]; + mcp->mb[2] = led_cfg[1]; + if (IS_QLA8031(ha)) { + mcp->mb[3] = led_cfg[2]; + mcp->mb[4] = led_cfg[3]; + mcp->mb[5] = led_cfg[4]; + mcp->mb[6] = led_cfg[5]; + } + + mcp->out_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA8031(ha)) + mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1134, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, + "Entered %s.\n", __func__); + + memset(mcp, 0, sizeof(mbx_cmd_t)); + mcp->mb[0] = MBC_GET_LED_CONFIG; + + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA8031(ha)) + mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1137, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + led_cfg[0] = mcp->mb[1]; + led_cfg[1] = mcp->mb[2]; + if (IS_QLA8031(ha)) { + led_cfg[2] = mcp->mb[3]; + led_cfg[3] = mcp->mb[4]; + led_cfg[4] = mcp->mb[5]; + led_cfg[5] = mcp->mb[6]; + } + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_P3P_TYPE(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, + "Entered %s.\n", __func__); + + memset(mcp, 0, sizeof(mbx_cmd_t)); + mcp->mb[0] = MBC_SET_LED_CONFIG; + if (enable) + mcp->mb[7] = 0xE; + else + mcp->mb[7] = 0xD; + + mcp->out_mb = MBX_7|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1128, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_WRITE_REMOTE_REG; + mcp->mb[1] = LSW(reg); + mcp->mb[2] = MSW(reg); + mcp->mb[3] = LSW(data); + mcp->mb[4] = MSW(data); + mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1131, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, + "Implicit LOGO Unsupported.\n"); + return QLA_FUNCTION_FAILED; + } + + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, + "Entering %s.\n", __func__); + + /* Perform Implicit LOGO. */ + mcp->mb[0] = MBC_PORT_LOGOUT; + mcp->mb[1] = fcport->loop_id; + mcp->mb[10] = BIT_15; + mcp->out_mb = MBX_10|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) + ql_dbg(ql_dbg_mbx, vha, 0x113d, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + else + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, + "Done %s.\n", __func__); + + return rval; +} + +int +qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + unsigned long retry_max_time = jiffies + (2 * HZ); + + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); + +retry_rd_reg: + mcp->mb[0] = MBC_READ_REMOTE_REG; + mcp->mb[1] = LSW(reg); + mcp->mb[2] = MSW(reg); + mcp->out_mb = MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x114c, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + } else { + *data = (mcp->mb[3] | (mcp->mb[4] << 16)); + if (*data == QLA8XXX_BAD_VALUE) { + /* + * During soft-reset CAMRAM register reads might + * return 0xbad0bad0. So retry for MAX of 2 sec + * while reading camram registers. + */ + if (time_after(jiffies, retry_max_time)) { + ql_dbg(ql_dbg_mbx, vha, 0x1141, + "Failure to read CAMRAM register. " + "data=0x%x.\n", *data); + return QLA_FUNCTION_FAILED; + } + msleep(100); + goto retry_rd_reg; + } + ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); + } + + return rval; +} + +int +qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA83XX(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1144, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); + qla2xxx_dump_fw(vha); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); + } + + return rval; +} + +int +qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, + uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + uint8_t subcode = (uint8_t)options; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA8031(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_ACCESS_CONTROL; + mcp->mb[1] = options; + mcp->out_mb = MBX_1|MBX_0; + if (subcode & BIT_2) { + mcp->mb[2] = LSW(start_addr); + mcp->mb[3] = MSW(start_addr); + mcp->mb[4] = LSW(end_addr); + mcp->mb[5] = MSW(end_addr); + mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; + } + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (!(subcode & (BIT_2 | BIT_5))) + mcp->in_mb |= MBX_4|MBX_3; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1147, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], + mcp->mb[4]); + qla2xxx_dump_fw(vha); + } else { + if (subcode & BIT_5) + *sector_size = mcp->mb[1]; + else if (subcode & (BIT_6 | BIT_7)) { + ql_dbg(ql_dbg_mbx, vha, 0x1148, + "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); + } else if (subcode & (BIT_3 | BIT_4)) { + ql_dbg(ql_dbg_mbx, vha, 0x1149, + "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); + } + ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, + uint32_t size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_MCTP_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; + mcp->mb[1] = LSW(addr); + mcp->mb[2] = MSW(req_dma); + mcp->mb[3] = LSW(req_dma); + mcp->mb[4] = MSW(size); + mcp->mb[5] = LSW(size); + mcp->mb[6] = MSW(MSD(req_dma)); + mcp->mb[7] = LSW(MSD(req_dma)); + mcp->mb[8] = MSW(addr); + /* Setting RAM ID to valid */ + /* For MCTP RAM ID is 0x40 */ + mcp->mb[10] = BIT_7 | 0x40; + + mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| + MBX_0; + + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x114e, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla26xx_dport_diagnostics(scsi_qla_host_t *vha, + void *dd_buf, uint size, uint options) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + dma_addr_t dd_dma; + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && + !IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, + "Entered %s.\n", __func__); + + dd_dma = dma_map_single(&vha->hw->pdev->dev, + dd_buf, size, DMA_FROM_DEVICE); + if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { + ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memset(dd_buf, 0, size); + + mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; + mcp->mb[1] = options; + mcp->mb[2] = MSW(LSD(dd_dma)); + mcp->mb[3] = LSW(LSD(dd_dma)); + mcp->mb[6] = MSW(MSD(dd_dma)); + mcp->mb[7] = LSW(MSD(dd_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_IN; + mcp->tov = MBX_TOV_SECONDS * 4; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, + "Done %s.\n", __func__); + } + + dma_unmap_single(&vha->hw->pdev->dev, dd_dma, + size, DMA_FROM_DEVICE); + + return rval; +} + +int +qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha, + struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp) +{ + int rval; + dma_addr_t dd_dma; + uint size = sizeof(dd->buf); + uint16_t options = dd->options; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, + "Entered %s.\n", __func__); + + dd_dma = dma_map_single(&vha->hw->pdev->dev, + dd->buf, size, DMA_FROM_DEVICE); + if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { + ql_log(ql_log_warn, vha, 0x1194, + "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memset(dd->buf, 0, size); + + mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; + mcp->mb[1] = options; + mcp->mb[2] = MSW(LSD(dd_dma)); + mcp->mb[3] = LSW(LSD(dd_dma)); + mcp->mb[6] = MSW(MSD(dd_dma)); + mcp->mb[7] = LSW(MSD(dd_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0; + mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_IN; + mcp->tov = MBX_TOV_SECONDS * 4; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, + "Done %s.\n", __func__); + } + + dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE); + + return rval; +} + +static void qla2x00_async_mb_sp_done(srb_t *sp, int res) +{ + sp->u.iocb_cmd.u.mbx.rc = res; + + complete(&sp->u.iocb_cmd.u.mbx.comp); + /* don't free sp here. Let the caller do the free */ +} + +/* + * This mailbox uses the iocb interface to send MB command. + * This allows non-critial (non chip setup) command to go + * out in parrallel. + */ +int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + struct srb_iocb *c; + + if (!vha->hw->flags.fw_started) + goto done; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + c = &sp->u.iocb_cmd; + init_completion(&c->u.mbx.comp); + + sp->type = SRB_MB_IOCB; + sp->name = mb_to_str(mcp->mb[0]); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_mb_sp_done); + + memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1018, + "%s: %s Failed submission. %x.\n", + __func__, sp->name, rval); + goto done_free_sp; + } + + ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", + sp->name, sp->handle); + + wait_for_completion(&c->u.mbx.comp); + memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); + + rval = c->u.mbx.rc; + switch (rval) { + case QLA_FUNCTION_TIMEOUT: + ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", + __func__, sp->name, rval); + break; + case QLA_SUCCESS: + ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", + __func__, sp->name); + break; + default: + ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", + __func__, sp->name, rval); + break; + } + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +/* + * qla24xx_gpdb_wait + * NOTE: Do not call this routine from DPC thread + */ +int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + int rval = QLA_FUNCTION_FAILED; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xd047, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_PORT_DATABASE; + mc.mb[1] = fcport->loop_id; + mc.mb[2] = MSW(pd_dma); + mc.mb[3] = LSW(pd_dma); + mc.mb[6] = MSW(MSD(pd_dma)); + mc.mb[7] = LSW(MSD(pd_dma)); + mc.mb[9] = vha->vp_idx; + mc.mb[10] = opt; + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1193, + "%s: %8phC fail\n", __func__, fcport->port_name); + goto done_free_sp; + } + + rval = __qla24xx_parse_gpdb(vha, fcport, pd); + + ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", + __func__, fcport->port_name); + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); +done: + return rval; +} + +int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, + struct port_database_24xx *pd) +{ + int rval = QLA_SUCCESS; + uint64_t zero = 0; + u8 current_login_state, last_login_state; + + if (NVME_TARGET(vha->hw, fcport)) { + current_login_state = pd->current_login_state >> 4; + last_login_state = pd->last_login_state >> 4; + } else { + current_login_state = pd->current_login_state & 0xf; + last_login_state = pd->last_login_state & 0xf; + } + + /* Check for logged in state. */ + if (current_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0x119a, + "Unable to verify login-state (%x/%x) for loop_id %x.\n", + current_login_state, last_login_state, fcport->loop_id); + rval = QLA_FUNCTION_FAILED; + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); + memcpy(fcport->port_name, pd->port_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd->port_id[0]; + fcport->d_id.b.area = pd->port_id[1]; + fcport->d_id.b.al_pa = pd->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0x2062, + "%8phC SVC Param w3 %02x%02x", + fcport->port_name, + pd->prli_svc_param_word_3[1], + pd->prli_svc_param_word_3[0]); + + if (NVME_TARGET(vha->hw, fcport)) { + fcport->port_type = FCT_NVME; + if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) + fcport->port_type |= FCT_NVME_INITIATOR; + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type |= FCT_NVME_TARGET; + if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) + fcport->port_type |= FCT_NVME_DISCOVERY; + } else { + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + } + /* Passback COS information. */ + fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd->prli_svc_param_word_3[0] & BIT_7) { + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + fcport->conf_compl_supported = 1; + } + +gpd_error_out: + return rval; +} + +/* + * qla24xx_gidlist__wait + * NOTE: don't call this routine from DPC thread. + */ +int qla24xx_gidlist_wait(struct scsi_qla_host *vha, + void *id_list, dma_addr_t id_list_dma, uint16_t *entries) +{ + int rval = QLA_FUNCTION_FAILED; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_ID_LIST; + mc.mb[2] = MSW(id_list_dma); + mc.mb[3] = LSW(id_list_dma); + mc.mb[6] = MSW(MSD(id_list_dma)); + mc.mb[7] = LSW(MSD(id_list_dma)); + mc.mb[8] = 0; + mc.mb[9] = vha->vp_idx; + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x119b, + "%s: fail\n", __func__); + } else { + *entries = mc.mb[1]; + ql_dbg(ql_dbg_mbx, vha, 0x119c, + "%s: done\n", __func__); + } +done: + return rval; +} + +int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, + "Entered %s\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; + mcp->mb[1] = 1; + mcp->mb[2] = value; + mcp->out_mb = MBX_2 | MBX_1 | MBX_0; + mcp->in_mb = MBX_2 | MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + + ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", + (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); + + return rval; +} + +int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, + "Entered %s\n", __func__); + + memset(mcp->mb, 0, sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; + mcp->mb[1] = 0; + mcp->out_mb = MBX_1 | MBX_0; + mcp->in_mb = MBX_2 | MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval == QLA_SUCCESS) + *value = mc.mb[2]; + + ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", + (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); + + return rval; +} + +int +qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t iter, addr, offset; + dma_addr_t phys_addr; + int rval, c; + u8 *sfp_data; + + memset(ha->sfp_data, 0, SFP_DEV_SIZE); + addr = 0xa0; + phys_addr = ha->sfp_data_dma; + sfp_data = ha->sfp_data; + offset = c = 0; + + for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { + if (iter == 4) { + /* Skip to next device address. */ + addr = 0xa2; + offset = 0; + } + + rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, + addr, offset, SFP_BLOCK_SIZE, BIT_1); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x706d, + "Unable to read SFP data (%x/%x/%x).\n", rval, + addr, offset); + + return rval; + } + + if (buf && (c < count)) { + u16 sz; + + if ((count - c) >= SFP_BLOCK_SIZE) + sz = SFP_BLOCK_SIZE; + else + sz = count - c; + + memcpy(buf, sfp_data, sz); + buf += SFP_BLOCK_SIZE; + c += sz; + } + phys_addr += SFP_BLOCK_SIZE; + sfp_data += SFP_BLOCK_SIZE; + offset += SFP_BLOCK_SIZE; + } + + return rval; +} + +int qla24xx_res_count_wait(struct scsi_qla_host *vha, + uint16_t *out_mb, int out_mb_sz) +{ + int rval = QLA_FUNCTION_FAILED; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_RESOURCE_COUNTS; + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: fail\n", __func__); + } else { + if (out_mb_sz <= SIZEOF_IOCB_MB_REG) + memcpy(out_mb, mc.mb, out_mb_sz); + else + memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); + + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: done\n", __func__); + } +done: + return rval; +} + +int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, + uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, + uint32_t sfub_len) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; + mcp->mb[1] = opts; + mcp->mb[2] = region; + mcp->mb[3] = MSW(len); + mcp->mb[4] = LSW(len); + mcp->mb[5] = MSW(sfub_dma_addr); + mcp->mb[6] = LSW(sfub_dma_addr); + mcp->mb[7] = MSW(MSD(sfub_dma_addr)); + mcp->mb[8] = LSW(MSD(sfub_dma_addr)); + mcp->mb[9] = sfub_len; + mcp->out_mb = + MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", + __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], + mcp->mb[2]); + } + + return rval; +} + +int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, + uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_WRITE_REMOTE_REG; + mcp->mb[1] = LSW(addr); + mcp->mb[2] = MSW(addr); + mcp->mb[3] = LSW(data); + mcp->mb[4] = MSW(data); + mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e9, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, + "Done %s.\n", __func__); + } + + return rval; +} + +int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, + uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_READ_REMOTE_REG; + mcp->mb[1] = LSW(addr); + mcp->mb[2] = MSW(addr); + mcp->out_mb = MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e9, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, + "Done %s.\n", __func__); + } + + return rval; +} + +int +ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval; + + if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", + __func__, options); + + mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; + mcp->mb[1] = options; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + if (options & BIT_0) { + if (options & BIT_1) { + mcp->mb[2] = led[2]; + mcp->out_mb |= MBX_2; + } + if (options & BIT_2) { + mcp->mb[3] = led[0]; + mcp->out_mb |= MBX_3; + } + if (options & BIT_3) { + mcp->mb[4] = led[1]; + mcp->out_mb |= MBX_4; + } + } else { + mcp->in_mb |= MBX_4|MBX_3|MBX_2; + } + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval) { + ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", + __func__, rval, mcp->mb[0], mcp->mb[1]); + return rval; + } + + if (options & BIT_0) { + ha->beacon_blink_led = 0; + ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); + } else { + led[2] = mcp->mb[2]; + led[0] = mcp->mb[3]; + led[1] = mcp->mb[4]; + ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", + __func__, led[0], led[1], led[2]); + } + + return rval; +} + +/** + * qla_no_op_mb(): This MB is used to check if FW is still alive and + * able to generate an interrupt. Otherwise, a timeout will trigger + * FW dump + reset + * @vha: host adapter pointer + * Return: None + */ +void qla_no_op_mb(struct scsi_qla_host *vha) +{ + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval; + + memset(&mc, 0, sizeof(mc)); + mcp->mb[0] = 0; // noop cmd= 0 + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 5; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval) { + ql_dbg(ql_dbg_async, vha, 0x7071, + "Failed %s %x\n", __func__, rval); + } +} + +int qla_mailbox_passthru(scsi_qla_host_t *vha, + uint16_t *mbx_in, uint16_t *mbx_out) +{ + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = -EINVAL; + + memset(&mc, 0, sizeof(mc)); + /* Receiving all 32 register's contents */ + memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t))); + + mcp->out_mb = 0xFFFFFFFF; + mcp->in_mb = 0xFFFFFFFF; + + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + mcp->bufp = NULL; + + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xf0a2, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n", + __func__); + /* passing all 32 register's contents */ + memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t)); + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c new file mode 100644 index 000000000..b67416951 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -0,0 +1,1292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_gbl.h" +#include "qla_target.h" + +#include +#include +#include +#include + +#include +#include +#include + +void +qla2x00_vp_stop_timer(scsi_qla_host_t *vha) +{ + if (vha->vp_idx && vha->timer_active) { + del_timer_sync(&vha->timer); + vha->timer_active = 0; + } +} + +static uint32_t +qla24xx_allocate_vp_id(scsi_qla_host_t *vha) +{ + uint32_t vp_id; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + /* Find an empty slot and assign an vp_id */ + mutex_lock(&ha->vport_lock); + vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); + if (vp_id > ha->max_npiv_vports) { + ql_dbg(ql_dbg_vport, vha, 0xa000, + "vp_id %d is bigger than max-supported %d.\n", + vp_id, ha->max_npiv_vports); + mutex_unlock(&ha->vport_lock); + return vp_id; + } + + set_bit(vp_id, ha->vp_idx_map); + ha->num_vhosts++; + vha->vp_idx = vp_id; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_add_tail(&vha->list, &ha->vp_list); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + spin_lock_irqsave(&ha->hardware_lock, flags); + qla_update_vp_map(vha, SET_VP_IDX); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + mutex_unlock(&ha->vport_lock); + return vp_id; +} + +void +qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) +{ + uint16_t vp_id; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + u32 i, bailout; + + mutex_lock(&ha->vport_lock); + /* + * Wait for all pending activities to finish before removing vport from + * the list. + * Lock needs to be held for safe removal from the list (it + * ensures no active vp_list traversal while the vport is removed + * from the queue) + */ + bailout = 0; + for (i = 0; i < 500; i++) { + spin_lock_irqsave(&ha->vport_slock, flags); + if (atomic_read(&vha->vref_count) == 0) { + list_del(&vha->list); + qla_update_vp_map(vha, RESET_VP_IDX); + bailout = 1; + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + if (bailout) + break; + else + msleep(20); + } + if (!bailout) { + ql_log(ql_log_info, vha, 0xfffa, + "vha->vref_count=%u timeout\n", vha->vref_count.counter); + spin_lock_irqsave(&ha->vport_slock, flags); + list_del(&vha->list); + qla_update_vp_map(vha, RESET_VP_IDX); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } + + vp_id = vha->vp_idx; + ha->num_vhosts--; + clear_bit(vp_id, ha->vp_idx_map); + + mutex_unlock(&ha->vport_lock); +} + +static scsi_qla_host_t * +qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) +{ + scsi_qla_host_t *vha; + struct scsi_qla_host *tvha; + unsigned long flags; + + spin_lock_irqsave(&ha->vport_slock, flags); + /* Locate matching device in database. */ + list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { + if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + return vha; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + return NULL; +} + +/* + * qla2x00_mark_vp_devices_dead + * Updates fcport state when device goes offline. + * + * Input: + * ha = adapter block pointer. + * fcport = port structure pointer. + * + * Return: + * None. + * + * Context: + */ +static void +qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) +{ + /* + * !!! NOTE !!! + * This function, if called in contexts other than vp create, disable + * or delete, please make sure this is synchronized with the + * delete thread. + */ + fc_port_t *fcport; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + ql_dbg(ql_dbg_vport, vha, 0xa001, + "Marking port dead, loop_id=0x%04x : %x.\n", + fcport->loop_id, fcport->vha->vp_idx); + + qla2x00_mark_device_lost(vha, fcport, 0); + qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); + } +} + +int +qla24xx_disable_vp(scsi_qla_host_t *vha) +{ + unsigned long flags; + int ret = QLA_SUCCESS; + fc_port_t *fcport; + + if (vha->hw->flags.edif_enabled) { + if (DBELL_ACTIVE(vha)) + qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE, + FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN); + /* delete sessions and flush sa_indexes */ + qla2x00_wait_for_sess_deletion(vha); + } + + if (vha->hw->flags.fw_started) + ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); + + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + + if (!vha->hw->flags.edif_enabled) + qla2x00_wait_for_sess_deletion(vha); + + /* Remove port id from vp target map */ + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + qla_update_vp_map(vha, RESET_AL_PA); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + + qla2x00_mark_vp_devices_dead(vha); + atomic_set(&vha->vp_state, VP_FAILED); + vha->flags.management_server_logged_in = 0; + if (ret == QLA_SUCCESS) { + fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); + } else { + fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + return -1; + } + return 0; +} + +int +qla24xx_enable_vp(scsi_qla_host_t *vha) +{ + int ret; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + /* Check if physical ha port is Up */ + if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || + atomic_read(&base_vha->loop_state) == LOOP_DEAD || + !(ha->current_topology & ISP_CFG_F)) { + vha->vp_err_state = VP_ERR_PORTDWN; + fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); + ql_dbg(ql_dbg_taskm, vha, 0x800b, + "%s skip enable. loop_state %x topo %x\n", + __func__, base_vha->loop_state.counter, + ha->current_topology); + + goto enable_failed; + } + + /* Initialize the new vport unless it is a persistent port */ + mutex_lock(&ha->vport_lock); + ret = qla24xx_modify_vp_config(vha); + mutex_unlock(&ha->vport_lock); + + if (ret != QLA_SUCCESS) { + fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + goto enable_failed; + } + + ql_dbg(ql_dbg_taskm, vha, 0x801a, + "Virtual port with id: %d - Enabled.\n", vha->vp_idx); + return 0; + +enable_failed: + ql_dbg(ql_dbg_taskm, vha, 0x801b, + "Virtual port with id: %d - Disabled.\n", vha->vp_idx); + return 1; +} + +static void +qla24xx_configure_vp(scsi_qla_host_t *vha) +{ + struct fc_vport *fc_vport; + int ret; + + fc_vport = vha->fc_vport; + + ql_dbg(ql_dbg_vport, vha, 0xa002, + "%s: change request #3.\n", __func__); + ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); + if (ret != QLA_SUCCESS) { + ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " + "receiving of RSCN requests: 0x%x.\n", ret); + return; + } else { + /* Corresponds to SCR enabled */ + clear_bit(VP_SCR_NEEDED, &vha->vp_flags); + } + + vha->flags.online = 1; + if (qla24xx_configure_vhba(vha)) + return; + + atomic_set(&vha->vp_state, VP_ACTIVE); + fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); +} + +void +qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) +{ + scsi_qla_host_t *vha, *tvp; + struct qla_hw_data *ha = rsp->hw; + int i = 0; + unsigned long flags; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { + if (vha->vp_idx) { + if (test_bit(VPORT_DELETE, &vha->dpc_flags)) + continue; + + atomic_inc(&vha->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + switch (mb[0]) { + case MBA_LIP_OCCURRED: + case MBA_LOOP_UP: + case MBA_LOOP_DOWN: + case MBA_LIP_RESET: + case MBA_POINT_TO_POINT: + case MBA_CHG_IN_CONNECTION: + ql_dbg(ql_dbg_async, vha, 0x5024, + "Async_event for VP[%d], mb=0x%x vha=%p.\n", + i, *mb, vha); + qla2x00_async_event(vha, rsp, mb); + break; + case MBA_PORT_UPDATE: + case MBA_RSCN_UPDATE: + if ((mb[3] & 0xff) == vha->vp_idx) { + ql_dbg(ql_dbg_async, vha, 0x5024, + "Async_event for VP[%d], mb=0x%x vha=%p\n", + i, *mb, vha); + qla2x00_async_event(vha, rsp, mb); + } + break; + } + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); + } + i++; + } + spin_unlock_irqrestore(&ha->vport_slock, flags); +} + +int +qla2x00_vp_abort_isp(scsi_qla_host_t *vha) +{ + fc_port_t *fcport; + + /* + * To exclusively reset vport, we need to log it out first. + * Note: This control_vp can fail if ISP reset is already + * issued, this is expected, as the vp would be already + * logged out due to ISP reset. + */ + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + } + + /* + * Physical port will do most of the abort and recovery work. We can + * just treat it as a loop down + */ + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + qla2x00_mark_all_devices_lost(vha); + } else { + if (!atomic_read(&vha->loop_down_timer)) + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + } + + ql_dbg(ql_dbg_taskm, vha, 0x801d, + "Scheduling enable of Vport %d.\n", vha->vp_idx); + + return qla24xx_enable_vp(vha); +} + +static int +qla2x00_do_dpc_vp(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, + "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); + + /* Check if Fw is ready to configure VP first */ + if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { + if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { + /* VP acquired. complete port configuration */ + ql_dbg(ql_dbg_dpc, vha, 0x4014, + "Configure VP scheduled.\n"); + qla24xx_configure_vp(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4015, + "Configure VP end.\n"); + return 0; + } + } + + if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) { + if (atomic_read(&vha->loop_state) == LOOP_READY) { + qla24xx_process_purex_list(&vha->purex_list); + clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); + } + } + + if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && + !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && + atomic_read(&vha->loop_state) != LOOP_DOWN) { + + if (!vha->relogin_jif || + time_after_eq(jiffies, vha->relogin_jif)) { + vha->relogin_jif = jiffies + HZ; + clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); + + ql_dbg(ql_dbg_dpc, vha, 0x4018, + "Relogin needed scheduled.\n"); + qla24xx_post_relogin_work(vha); + } + } + + if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && + (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { + clear_bit(RESET_ACTIVE, &vha->dpc_flags); + } + + if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { + if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { + ql_dbg(ql_dbg_dpc, vha, 0x401a, + "Loop resync scheduled.\n"); + qla2x00_loop_resync(vha); + clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); + ql_dbg(ql_dbg_dpc, vha, 0x401b, + "Loop resync end.\n"); + } + } + + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, + "Exiting %s.\n", __func__); + return 0; +} + +void +qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *vp, *tvp; + unsigned long flags = 0; + + if (vha->vp_idx) + return; + if (list_empty(&ha->vp_list)) + return; + + clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); + + if (!(ha->current_topology & ISP_CFG_F)) + return; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + qla2x00_do_dpc_vp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); +} + +int +qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) +{ + scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); + struct qla_hw_data *ha = base_vha->hw; + scsi_qla_host_t *vha; + uint8_t port_name[WWN_SIZE]; + + if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) + return VPCERR_UNSUPPORTED; + + /* Check up the F/W and H/W support NPIV */ + if (!ha->flags.npiv_supported) + return VPCERR_UNSUPPORTED; + + /* Check up whether npiv supported switch presented */ + if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) + return VPCERR_NO_FABRIC_SUPP; + + /* Check up unique WWPN */ + u64_to_wwn(fc_vport->port_name, port_name); + if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) + return VPCERR_BAD_WWN; + vha = qla24xx_find_vhost_by_name(ha, port_name); + if (vha) + return VPCERR_BAD_WWN; + + /* Check up max-npiv-supports */ + if (ha->num_vhosts > ha->max_npiv_vports) { + ql_dbg(ql_dbg_vport, vha, 0xa004, + "num_vhosts %ud is bigger " + "than max_npiv_vports %ud.\n", + ha->num_vhosts, ha->max_npiv_vports); + return VPCERR_UNSUPPORTED; + } + return 0; +} + +scsi_qla_host_t * +qla24xx_create_vhost(struct fc_vport *fc_vport) +{ + scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); + struct qla_hw_data *ha = base_vha->hw; + scsi_qla_host_t *vha; + const struct scsi_host_template *sht = &qla2xxx_driver_template; + struct Scsi_Host *host; + + vha = qla2x00_create_host(sht, ha); + if (!vha) { + ql_log(ql_log_warn, vha, 0xa005, + "scsi_host_alloc() failed for vport.\n"); + return(NULL); + } + + host = vha->host; + fc_vport->dd_data = vha; + /* New host info */ + u64_to_wwn(fc_vport->node_name, vha->node_name); + u64_to_wwn(fc_vport->port_name, vha->port_name); + + vha->fc_vport = fc_vport; + vha->device_flags = 0; + vha->vp_idx = qla24xx_allocate_vp_id(vha); + if (vha->vp_idx > ha->max_npiv_vports) { + ql_dbg(ql_dbg_vport, vha, 0xa006, + "Couldn't allocate vp_id.\n"); + goto create_vhost_failed; + } + vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); + + vha->dpc_flags = 0L; + ha->dpc_active = 0; + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); + + /* + * To fix the issue of processing a parent's RSCN for the vport before + * its SCR is complete. + */ + set_bit(VP_SCR_NEEDED, &vha->vp_flags); + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + + qla2x00_start_timer(vha, WATCH_INTERVAL); + + vha->req = base_vha->req; + vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; + host->can_queue = base_vha->req->length + 128; + host->cmd_per_lun = 3; + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) + host->max_cmd_len = 32; + else + host->max_cmd_len = MAX_CMDSZ; + host->max_channel = MAX_BUSES - 1; + host->max_lun = ql2xmaxlun; + host->unique_id = host->host_no; + host->max_id = ha->max_fibre_devices; + host->transportt = qla2xxx_transport_vport_template; + + ql_dbg(ql_dbg_vport, vha, 0xa007, + "Detect vport hba %ld at address = %p.\n", + vha->host_no, vha); + + vha->flags.init_done = 1; + + mutex_lock(&ha->vport_lock); + set_bit(vha->vp_idx, ha->vp_idx_map); + ha->cur_vport_count++; + mutex_unlock(&ha->vport_lock); + + return vha; + +create_vhost_failed: + return NULL; +} + +static void +qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t que_id = req->id; + + dma_free_coherent(&ha->pdev->dev, (req->length + 1) * + sizeof(request_t), req->ring, req->dma); + req->ring = NULL; + req->dma = 0; + if (que_id) { + ha->req_q_map[que_id] = NULL; + mutex_lock(&ha->vport_lock); + clear_bit(que_id, ha->req_qid_map); + mutex_unlock(&ha->vport_lock); + } + kfree(req->outstanding_cmds); + kfree(req); +} + +static void +qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t que_id = rsp->id; + + if (rsp->msix && rsp->msix->have_irq) { + free_irq(rsp->msix->vector, rsp->msix->handle); + rsp->msix->have_irq = 0; + rsp->msix->in_use = 0; + rsp->msix->handle = NULL; + } + dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * + sizeof(response_t), rsp->ring, rsp->dma); + rsp->ring = NULL; + rsp->dma = 0; + if (que_id) { + ha->rsp_q_map[que_id] = NULL; + mutex_lock(&ha->vport_lock); + clear_bit(que_id, ha->rsp_qid_map); + mutex_unlock(&ha->vport_lock); + } + kfree(rsp); +} + +int +qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) +{ + int ret = QLA_SUCCESS; + + if (req && vha->flags.qpairs_req_created) { + req->options |= BIT_0; + ret = qla25xx_init_req_que(vha, req); + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + + qla25xx_free_req_que(vha, req); + } + + return ret; +} + +int +qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) +{ + int ret = QLA_SUCCESS; + + if (rsp && vha->flags.qpairs_rsp_created) { + rsp->options |= BIT_0; + ret = qla25xx_init_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + + qla25xx_free_rsp_que(vha, rsp); + } + + return ret; +} + +/* Delete all queues for a given vhost */ +int +qla25xx_delete_queues(struct scsi_qla_host *vha) +{ + int cnt, ret = 0; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair, *tqpair; + + if (ql2xmqsupport || ql2xnvmeenable) { + list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, + qp_list_elem) + qla2xxx_delete_qpair(vha, qpair); + } else { + /* Delete request queues */ + for (cnt = 1; cnt < ha->max_req_queues; cnt++) { + req = ha->req_q_map[cnt]; + if (req && test_bit(cnt, ha->req_qid_map)) { + ret = qla25xx_delete_req_que(vha, req); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x00ea, + "Couldn't delete req que %d.\n", + req->id); + return ret; + } + } + } + + /* Delete response queues */ + for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { + rsp = ha->rsp_q_map[cnt]; + if (rsp && test_bit(cnt, ha->rsp_qid_map)) { + ret = qla25xx_delete_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x00eb, + "Couldn't delete rsp que %d.\n", + rsp->id); + return ret; + } + } + } + } + + return ret; +} + +int +qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, + uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) +{ + int ret = 0; + struct req_que *req = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); + uint16_t que_id = 0; + device_reg_t *reg; + uint32_t cnt; + + req = kzalloc(sizeof(struct req_que), GFP_KERNEL); + if (req == NULL) { + ql_log(ql_log_fatal, base_vha, 0x00d9, + "Failed to allocate memory for request queue.\n"); + goto failed; + } + + req->length = REQUEST_ENTRY_CNT_24XX; + req->ring = dma_alloc_coherent(&ha->pdev->dev, + (req->length + 1) * sizeof(request_t), + &req->dma, GFP_KERNEL); + if (req->ring == NULL) { + ql_log(ql_log_fatal, base_vha, 0x00da, + "Failed to allocate memory for request_ring.\n"); + goto que_failed; + } + + ret = qla2x00_alloc_outstanding_cmds(ha, req); + if (ret != QLA_SUCCESS) + goto que_failed; + + mutex_lock(&ha->mq_lock); + que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); + if (que_id >= ha->max_req_queues) { + mutex_unlock(&ha->mq_lock); + ql_log(ql_log_warn, base_vha, 0x00db, + "No resources to create additional request queue.\n"); + goto que_failed; + } + set_bit(que_id, ha->req_qid_map); + ha->req_q_map[que_id] = req; + req->rid = rid; + req->vp_idx = vp_idx; + req->qos = qos; + + ql_dbg(ql_dbg_multiq, base_vha, 0xc002, + "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", + que_id, req->rid, req->vp_idx, req->qos); + ql_dbg(ql_dbg_init, base_vha, 0x00dc, + "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", + que_id, req->rid, req->vp_idx, req->qos); + if (rsp_que < 0) + req->rsp = NULL; + else + req->rsp = ha->rsp_q_map[rsp_que]; + /* Use alternate PCI bus number */ + if (MSB(req->rid)) + options |= BIT_4; + /* Use alternate PCI devfn */ + if (LSB(req->rid)) + options |= BIT_5; + req->options = options; + + ql_dbg(ql_dbg_multiq, base_vha, 0xc003, + "options=0x%x.\n", req->options); + ql_dbg(ql_dbg_init, base_vha, 0x00dd, + "options=0x%x.\n", req->options); + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) + req->outstanding_cmds[cnt] = NULL; + req->current_outstanding_cmd = 1; + + req->ring_ptr = req->ring; + req->ring_index = 0; + req->cnt = req->length; + req->id = que_id; + reg = ISP_QUE_REG(ha, que_id); + req->req_q_in = ®->isp25mq.req_q_in; + req->req_q_out = ®->isp25mq.req_q_out; + req->max_q_depth = ha->req_q_map[0]->max_q_depth; + req->out_ptr = (uint16_t *)(req->ring + req->length); + mutex_unlock(&ha->mq_lock); + ql_dbg(ql_dbg_multiq, base_vha, 0xc004, + "ring_ptr=%p ring_index=%d, " + "cnt=%d id=%d max_q_depth=%d.\n", + req->ring_ptr, req->ring_index, + req->cnt, req->id, req->max_q_depth); + ql_dbg(ql_dbg_init, base_vha, 0x00de, + "ring_ptr=%p ring_index=%d, " + "cnt=%d id=%d max_q_depth=%d.\n", + req->ring_ptr, req->ring_index, req->cnt, + req->id, req->max_q_depth); + + if (startqp) { + ret = qla25xx_init_req_que(base_vha, req); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_fatal, base_vha, 0x00df, + "%s failed.\n", __func__); + mutex_lock(&ha->mq_lock); + clear_bit(que_id, ha->req_qid_map); + mutex_unlock(&ha->mq_lock); + goto que_failed; + } + vha->flags.qpairs_req_created = 1; + } + + return req->id; + +que_failed: + qla25xx_free_req_que(base_vha, req); +failed: + return 0; +} + +static void qla_do_work(struct work_struct *work) +{ + unsigned long flags; + struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); + struct scsi_qla_host *vha = qpair->vha; + + spin_lock_irqsave(&qpair->qp_lock, flags); + qla24xx_process_response_queue(vha, qpair->rsp); + spin_unlock_irqrestore(&qpair->qp_lock, flags); + +} + +/* create response queue */ +int +qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, + uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) +{ + int ret = 0; + struct rsp_que *rsp = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); + uint16_t que_id = 0; + device_reg_t *reg; + + rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); + if (rsp == NULL) { + ql_log(ql_log_warn, base_vha, 0x0066, + "Failed to allocate memory for response queue.\n"); + goto failed; + } + + rsp->length = RESPONSE_ENTRY_CNT_MQ; + rsp->ring = dma_alloc_coherent(&ha->pdev->dev, + (rsp->length + 1) * sizeof(response_t), + &rsp->dma, GFP_KERNEL); + if (rsp->ring == NULL) { + ql_log(ql_log_warn, base_vha, 0x00e1, + "Failed to allocate memory for response ring.\n"); + goto que_failed; + } + + mutex_lock(&ha->mq_lock); + que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); + if (que_id >= ha->max_rsp_queues) { + mutex_unlock(&ha->mq_lock); + ql_log(ql_log_warn, base_vha, 0x00e2, + "No resources to create additional request queue.\n"); + goto que_failed; + } + set_bit(que_id, ha->rsp_qid_map); + + rsp->msix = qpair->msix; + + ha->rsp_q_map[que_id] = rsp; + rsp->rid = rid; + rsp->vp_idx = vp_idx; + rsp->hw = ha; + ql_dbg(ql_dbg_init, base_vha, 0x00e4, + "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", + que_id, rsp->rid, rsp->vp_idx, rsp->hw); + /* Use alternate PCI bus number */ + if (MSB(rsp->rid)) + options |= BIT_4; + /* Use alternate PCI devfn */ + if (LSB(rsp->rid)) + options |= BIT_5; + /* Enable MSIX handshake mode on for uncapable adapters */ + if (!IS_MSIX_NACK_CAPABLE(ha)) + options |= BIT_6; + + /* Set option to indicate response queue creation */ + options |= BIT_1; + + rsp->options = options; + rsp->id = que_id; + reg = ISP_QUE_REG(ha, que_id); + rsp->rsp_q_in = ®->isp25mq.rsp_q_in; + rsp->rsp_q_out = ®->isp25mq.rsp_q_out; + rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); + mutex_unlock(&ha->mq_lock); + ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", + rsp->options, rsp->id, rsp->rsp_q_in, + rsp->rsp_q_out); + ql_dbg(ql_dbg_init, base_vha, 0x00e5, + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", + rsp->options, rsp->id, rsp->rsp_q_in, + rsp->rsp_q_out); + + ret = qla25xx_request_irq(ha, qpair, qpair->msix, + ha->flags.disable_msix_handshake ? + QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS); + if (ret) + goto que_failed; + + if (startqp) { + ret = qla25xx_init_rsp_que(base_vha, rsp); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_fatal, base_vha, 0x00e7, + "%s failed.\n", __func__); + mutex_lock(&ha->mq_lock); + clear_bit(que_id, ha->rsp_qid_map); + mutex_unlock(&ha->mq_lock); + goto que_failed; + } + vha->flags.qpairs_rsp_created = 1; + } + rsp->req = NULL; + + qla2x00_init_response_q_entries(rsp); + if (qpair->hw->wq) + INIT_WORK(&qpair->q_work, qla_do_work); + return rsp->id; + +que_failed: + qla25xx_free_rsp_que(base_vha, rsp); +failed: + return 0; +} + +static void qla_ctrlvp_sp_done(srb_t *sp, int res) +{ + if (sp->comp) + complete(sp->comp); + /* don't free sp here. Let the caller do the free */ +} + +/** + * qla24xx_control_vp() - Enable a virtual port for given host + * @vha: adapter block pointer + * @cmd: command type to be sent for enable virtual port + * + * Return: qla2xxx local function return status code. + */ +int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) +{ + int rval = QLA_MEMORY_ALLOC_FAILED; + struct qla_hw_data *ha = vha->hw; + int vp_index = vha->vp_idx; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + DECLARE_COMPLETION_ONSTACK(comp); + srb_t *sp; + + ql_dbg(ql_dbg_vport, vha, 0x10c1, + "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index); + + if (vp_index == 0 || vp_index >= ha->max_npiv_vports) + return QLA_PARAMETER_ERROR; + + /* ref: INIT */ + sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); + if (!sp) + return rval; + + sp->type = SRB_CTRL_VP; + sp->name = "ctrl_vp"; + sp->comp = ∁ + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla_ctrlvp_sp_done); + sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; + sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "%s: %s Failed submission. %x.\n", + __func__, sp->name, rval); + goto done; + } + + ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", + sp->name, sp->handle); + + wait_for_completion(&comp); + sp->comp = NULL; + + rval = sp->rc; + switch (rval) { + case QLA_FUNCTION_TIMEOUT: + ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n", + __func__, sp->name, rval); + break; + case QLA_SUCCESS: + ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", + __func__, sp->name); + break; + default: + ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", + __func__, sp->name, rval); + break; + } +done: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return rval; +} + +struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx == vp_idx) + return vha; + + BUG_ON(ha->vp_map == NULL); + if (likely(test_bit(vp_idx, ha->vp_idx_map))) + return ha->vp_map[vp_idx].vha; + + return NULL; +} + +/* vport_slock to be held by the caller */ +void +qla_update_vp_map(struct scsi_qla_host *vha, int cmd) +{ + void *slot; + u32 key; + int rc; + + if (!vha->hw->vp_map) + return; + + key = vha->d_id.b24; + + switch (cmd) { + case SET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = vha; + break; + case SET_AL_PA: + slot = btree_lookup32(&vha->hw->host_map, key); + if (!slot) { + ql_dbg(ql_dbg_disc, vha, 0xf018, + "Save vha in host_map %p %06x\n", vha, key); + rc = btree_insert32(&vha->hw->host_map, + key, vha, GFP_ATOMIC); + if (rc) + ql_log(ql_log_info, vha, 0xd03e, + "Unable to insert s_id into host_map: %06x\n", + key); + return; + } + ql_dbg(ql_dbg_disc, vha, 0xf019, + "replace existing vha in host_map %p %06x\n", vha, key); + btree_update32(&vha->hw->host_map, key, vha); + break; + case RESET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = NULL; + break; + case RESET_AL_PA: + ql_dbg(ql_dbg_disc, vha, 0xf01a, + "clear vha in host_map %p %06x\n", vha, key); + slot = btree_lookup32(&vha->hw->host_map, key); + if (slot) + btree_remove32(&vha->hw->host_map, key); + vha->d_id.b24 = 0; + break; + } +} + +void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id) +{ + + if (!vha->d_id.b24) { + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } else if (vha->d_id.b24 != id.b24) { + qla_update_vp_map(vha, RESET_AL_PA); + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } +} + +int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp) +{ + int sz; + + qp->buf_pool.num_bufs = qp->req->length; + + sz = BITS_TO_LONGS(qp->req->length); + qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL); + if (!qp->buf_pool.buf_map) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_map(%zd).\n", sz * sizeof(unsigned long)); + return -ENOMEM; + } + sz = qp->req->length * sizeof(void *); + qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void *), GFP_KERNEL); + if (!qp->buf_pool.buf_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + return -ENOMEM; + } + sz = qp->req->length * sizeof(dma_addr_t); + qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL); + if (!qp->buf_pool.dma_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate dma_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + kfree(qp->buf_pool.buf_array); + return -ENOMEM; + } + set_bit(0, qp->buf_pool.buf_map); + return 0; +} + +void qla_free_buf_pool(struct qla_qpair *qp) +{ + int i; + struct qla_hw_data *ha = qp->vha->hw; + + for (i = 0; i < qp->buf_pool.num_bufs; i++) { + if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i]) + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i], + qp->buf_pool.dma_array[i]); + qp->buf_pool.buf_array[i] = NULL; + qp->buf_pool.dma_array[i] = 0; + } + + kfree(qp->buf_pool.dma_array); + kfree(qp->buf_pool.buf_array); + kfree(qp->buf_pool.buf_map); +} + +/* it is assume qp->qp_lock is held at this point */ +int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + u16 tag, i = 0; + void *buf; + dma_addr_t buf_dma; + struct qla_hw_data *ha = vha->hw; + + dsc->tag = TAG_FREED; +again: + tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs); + if (tag >= qp->buf_pool.num_bufs) { + ql_dbg(ql_dbg_io, vha, 0x00e2, + "qp(%d) ran out of buf resource.\n", qp->id); + return -EIO; + } + if (tag == 0) { + set_bit(0, qp->buf_pool.buf_map); + i++; + if (i == 5) { + ql_dbg(ql_dbg_io, vha, 0x00e3, + "qp(%d) unable to get tag.\n", qp->id); + return -EIO; + } + goto again; + } + + if (!qp->buf_pool.buf_array[tag]) { + buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma); + if (!buf) { + ql_log(ql_log_fatal, vha, 0x13b1, + "Failed to allocate buf.\n"); + return -ENOMEM; + } + + dsc->buf = qp->buf_pool.buf_array[tag] = buf; + dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; + qp->buf_pool.num_alloc++; + } else { + dsc->buf = qp->buf_pool.buf_array[tag]; + dsc->buf_dma = qp->buf_pool.dma_array[tag]; + memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE); + } + + qp->buf_pool.num_active++; + if (qp->buf_pool.num_active > qp->buf_pool.max_used) + qp->buf_pool.max_used = qp->buf_pool.num_active; + + dsc->tag = tag; + set_bit(tag, qp->buf_pool.buf_map); + return 0; +} + +static void qla_trim_buf(struct qla_qpair *qp, u16 trim) +{ + int i, j; + struct qla_hw_data *ha = qp->vha->hw; + + if (!trim) + return; + + for (i = 0; i < trim; i++) { + j = qp->buf_pool.num_alloc - 1; + if (test_bit(j, qp->buf_pool.buf_map)) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b, + "QP id(%d): trim active buf[%d]. Remain %d bufs\n", + qp->id, j, qp->buf_pool.num_alloc); + return; + } + + if (qp->buf_pool.buf_array[j]) { + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j], + qp->buf_pool.dma_array[j]); + qp->buf_pool.buf_array[j] = NULL; + qp->buf_pool.dma_array[j] = 0; + } + qp->buf_pool.num_alloc--; + if (!qp->buf_pool.num_alloc) + break; + } + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010, + "QP id(%d): trimmed %d bufs. Remain %d bufs\n", + qp->id, trim, qp->buf_pool.num_alloc); +} + +static void __qla_adjust_buf(struct qla_qpair *qp) +{ + u32 trim; + + qp->buf_pool.take_snapshot = 0; + qp->buf_pool.prev_max = qp->buf_pool.max_used; + qp->buf_pool.max_used = qp->buf_pool.num_active; + + if (qp->buf_pool.prev_max > qp->buf_pool.max_used && + qp->buf_pool.num_alloc > qp->buf_pool.max_used) { + /* down trend */ + trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used; + trim = (trim * 10) / 100; + trim = trim ? trim : 1; + qla_trim_buf(qp, trim); + } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) { + /* 2 periods of no io */ + qla_trim_buf(qp, qp->buf_pool.num_alloc); + } +} + +/* it is assume qp->qp_lock is held at this point */ +void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + if (dsc->tag == TAG_FREED) + return; + lockdep_assert_held(qp->qp_lock_ptr); + + clear_bit(dsc->tag, qp->buf_pool.buf_map); + qp->buf_pool.num_active--; + dsc->tag = TAG_FREED; + + if (qp->buf_pool.take_snapshot) + __qla_adjust_buf(qp); +} + +#define EXPIRE (60 * HZ) +void qla_adjust_buf(struct scsi_qla_host *vha) +{ + unsigned long flags; + int i; + struct qla_qpair *qp; + + if (vha->vp_idx) + return; + + if (!vha->buf_expired) { + vha->buf_expired = jiffies + EXPIRE; + return; + } + if (time_before(jiffies, vha->buf_expired)) + return; + + vha->buf_expired = jiffies + EXPIRE; + + for (i = 0; i < vha->hw->num_qpairs; i++) { + qp = vha->hw->queue_pair_map[i]; + if (!qp) + continue; + if (!qp->buf_pool.num_alloc) + continue; + + if (qp->buf_pool.take_snapshot) { + /* no io has gone through in the last EXPIRE period */ + spin_lock_irqsave(qp->qp_lock_ptr, flags); + __qla_adjust_buf(qp); + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + } else { + qp->buf_pool.take_snapshot = 1; + } + } +} diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c new file mode 100644 index 000000000..083f94e43 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -0,0 +1,3407 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include +#include +#include +#include +#include +#include +#include + + +/* QLAFX00 specific Mailbox implementation functions */ + +/* + * qlafx00_mailbox_command + * Issue mailbox command and waits for completion. + * + * Input: + * ha = adapter block pointer. + * mcp = driver internal mbx struct pointer. + * + * Output: + * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. + * + * Returns: + * 0 : QLA_SUCCESS = cmd performed success + * 1 : QLA_FUNCTION_FAILED (error encountered) + * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) + * + * Context: + * Kernel context. + */ +static int +qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) + +{ + int rval; + unsigned long flags = 0; + device_reg_t *reg; + uint8_t abort_active; + uint8_t io_lock_on; + uint16_t command = 0; + uint32_t *iptr; + __le32 __iomem *optr; + uint32_t cnt; + uint32_t mboxes; + unsigned long wait_time; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + if (ha->pdev->error_state == pci_channel_io_perm_failure) { + ql_log(ql_log_warn, vha, 0x115c, + "PCI channel failed permanently, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_log(ql_log_warn, vha, 0x115f, + "Device in failed state, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + reg = ha->iobase; + io_lock_on = base_vha->flags.init_done; + + rval = QLA_SUCCESS; + abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + + if (ha->flags.pci_channel_io_perm_failure) { + ql_log(ql_log_warn, vha, 0x1175, + "Perm failure on EEH timeout MBX, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + if (ha->flags.isp82xx_fw_hung) { + /* Setting Link-Down error */ + mcp->mb[0] = MBS_LINK_DOWN_ERROR; + ql_log(ql_log_warn, vha, 0x1176, + "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); + rval = QLA_FUNCTION_FAILED; + goto premature_exit; + } + + /* + * Wait for active mailbox commands to finish by waiting at most tov + * seconds. This is to serialize actual issuing of mailbox cmds during + * non ISP abort time. + */ + if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { + /* Timeout occurred. Return error. */ + ql_log(ql_log_warn, vha, 0x1177, + "Cmd access timeout, cmd=0x%x, Exiting.\n", + mcp->mb[0]); + return QLA_FUNCTION_TIMEOUT; + } + + ha->flags.mbox_busy = 1; + /* Save mailbox command for debug */ + ha->mcp32 = mcp; + + ql_dbg(ql_dbg_mbx, vha, 0x1178, + "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Load mailbox registers. */ + optr = ®->ispfx00.mailbox0; + + iptr = mcp->mb; + command = mcp->mb[0]; + mboxes = mcp->out_mb; + + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) + wrt_reg_dword(optr, *iptr); + + mboxes >>= 1; + optr++; + iptr++; + } + + /* Issue set host interrupt command to send cmd out. */ + ha->flags.mbox_int = 0; + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172, + (uint8_t *)mcp->mb, 16); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173, + ((uint8_t *)mcp->mb + 0x10), 16); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174, + ((uint8_t *)mcp->mb + 0x20), 8); + + /* Unlock mbx registers and wait for interrupt */ + ql_dbg(ql_dbg_mbx, vha, 0x1179, + "Going to unlock irq & waiting for interrupts. " + "jiffies=%lx.\n", jiffies); + + /* Wait for mbx cmd completion until timeout */ + if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { + set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + + QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp, + mcp->tov * HZ) != 0); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x112c, + "Cmd=%x Polling Mode.\n", command); + + QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ + while (!ha->flags.mbox_int) { + if (time_after(jiffies, wait_time)) + break; + + /* Check for pending interrupts. */ + qla2x00_poll(ha->rsp_q_map[0]); + + if (!ha->flags.mbox_int && + !(IS_QLA2200(ha) && + command == MBC_LOAD_RISC_RAM_EXTENDED)) + usleep_range(10000, 11000); + } /* while */ + ql_dbg(ql_dbg_mbx, vha, 0x112d, + "Waited %d sec.\n", + (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); + } + + /* Check whether we timed out */ + if (ha->flags.mbox_int) { + uint32_t *iptr2; + + ql_dbg(ql_dbg_mbx, vha, 0x112e, + "Cmd=%x completed.\n", command); + + /* Got interrupt. Clear the flag. */ + ha->flags.mbox_int = 0; + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) + rval = QLA_FUNCTION_FAILED; + + /* Load return mailbox registers. */ + iptr2 = mcp->mb; + iptr = (uint32_t *)&ha->mailbox_out32[0]; + mboxes = mcp->in_mb; + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) + *iptr2 = *iptr; + + mboxes >>= 1; + iptr2++; + iptr++; + } + } else { + + rval = QLA_FUNCTION_TIMEOUT; + } + + ha->flags.mbox_busy = 0; + + /* Clean up */ + ha->mcp32 = NULL; + + if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x113a, + "checking for additional resp interrupt.\n"); + + /* polling mode for non isp_abort commands. */ + qla2x00_poll(ha->rsp_q_map[0]); + } + + if (rval == QLA_FUNCTION_TIMEOUT && + mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { + if (!io_lock_on || (mcp->flags & IOCTL_CMD) || + ha->flags.eeh_busy) { + /* not in dpc. schedule it for dpc to take over. */ + ql_dbg(ql_dbg_mbx, vha, 0x115d, + "Timeout, schedule isp_abort_needed.\n"); + + if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && + !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + + ql_log(ql_log_info, base_vha, 0x115e, + "Mailbox cmd timeout occurred, cmd=0x%x, " + "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " + "abort.\n", command, mcp->mb[0], + ha->flags.eeh_busy); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else if (!abort_active) { + /* call abort directly since we are in the DPC thread */ + ql_dbg(ql_dbg_mbx, vha, 0x1160, + "Timeout, calling abort_isp.\n"); + + if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && + !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + + ql_log(ql_log_info, base_vha, 0x1161, + "Mailbox cmd timeout occurred, cmd=0x%x, " + "mb[0]=0x%x. Scheduling ISP abort ", + command, mcp->mb[0]); + + set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (ha->isp_ops->abort_isp(vha)) { + /* Failed. retry later. */ + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); + ql_dbg(ql_dbg_mbx, vha, 0x1162, + "Finished abort_isp.\n"); + } + } + } + +premature_exit: + /* Allow next mbx cmd to come in. */ + complete(&ha->mbx_cmd_comp); + + if (rval) { + ql_log(ql_log_warn, base_vha, 0x1163, + "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], + command); + } else { + ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qlafx00_driver_shutdown + * Indicate a driver shutdown to firmware. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * local function return status code. + * + * Context: + * Kernel context. + */ +int +qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_MR_DRV_SHUTDOWN; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0; + if (tmo) + mcp->tov = tmo; + else + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qlafx00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1167, + "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qlafx00_get_firmware_state + * Get adapter firmware state. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla7xxx local function return status code. + * + * Context: + * Kernel context. + */ +static int +qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_FIRMWARE_STATE; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qlafx00_mailbox_command(vha, mcp); + + /* Return firmware states. */ + states[0] = mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x116a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b, + "Done %s.\n", __func__); + } + return rval; +} + +/* + * qlafx00_init_firmware + * Initialize adapter firmware. + * + * Input: + * ha = adapter block pointer. + * dptr = Initialization control block pointer. + * size = size of initialization control block. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qlafx00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; + + mcp->mb[1] = 0; + mcp->mb[2] = MSD(ha->init_cb_dma); + mcp->mb[3] = LSD(ha->init_cb_dma); + + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_OUT; + mcp->tov = MBX_TOV_SECONDS; + rval = qlafx00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x116d, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e, + "Done %s.\n", __func__); + } + return rval; +} + +/* + * qlafx00_mbx_reg_test + */ +static int +qlafx00_mbx_reg_test(scsi_qla_host_t *vha) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f, + "Entered %s.\n", __func__); + + + mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; + mcp->mb[1] = 0xAAAA; + mcp->mb[2] = 0x5555; + mcp->mb[3] = 0xAA55; + mcp->mb[4] = 0x55AA; + mcp->mb[5] = 0xA5A5; + mcp->mb[6] = 0x5A5A; + mcp->mb[7] = 0x2525; + mcp->mb[8] = 0xBBBB; + mcp->mb[9] = 0x6666; + mcp->mb[10] = 0xBB66; + mcp->mb[11] = 0x66BB; + mcp->mb[12] = 0xB6B6; + mcp->mb[13] = 0x6B6B; + mcp->mb[14] = 0x3636; + mcp->mb[15] = 0xCCCC; + + + mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->buf_size = 0; + mcp->flags = MBX_DMA_OUT; + mcp->tov = MBX_TOV_SECONDS; + rval = qlafx00_mailbox_command(vha, mcp); + if (rval == QLA_SUCCESS) { + if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 || + mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA) + rval = QLA_FUNCTION_FAILED; + if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A || + mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB) + rval = QLA_FUNCTION_FAILED; + if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 || + mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6) + rval = QLA_FUNCTION_FAILED; + if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 || + mcp->mb[31] != 0xCCCC) + rval = QLA_FUNCTION_FAILED; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1170, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171, + "Done %s.\n", __func__); + } + return rval; +} + +/** + * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qlafx00_pci_config(scsi_qla_host_t *vha) +{ + uint16_t w; + struct qla_hw_data *ha = vha->hw; + + pci_set_master(ha->pdev); + pci_try_set_mwi(ha->pdev); + + pci_read_config_word(ha->pdev, PCI_COMMAND, &w); + w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + w &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(ha->pdev, PCI_COMMAND, w); + + /* PCIe -- adjust Maximum Read Request Size (2048). */ + if (pci_is_pcie(ha->pdev)) + pcie_set_readrq(ha->pdev, 2048); + + ha->chip_revision = ha->pdev->revision; + + return QLA_SUCCESS; +} + +/** + * qlafx00_soc_cpu_reset() - Perform warm reset of iSA(CPUs being reset on SOC). + * @vha: HA context + * + */ +static inline void +qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) +{ + unsigned long flags = 0; + struct qla_hw_data *ha = vha->hw; + int i, core; + uint32_t cnt; + uint32_t reg_val; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); + QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); + + /* stop the XOR DMA engines */ + QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); + + /* stop the IDMA engines */ + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); + + for (i = 0; i < 100000; i++) { + if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && + (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) + break; + udelay(100); + } + + /* Set all 4 cores in reset */ + for (i = 0; i < 4; i++) { + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); + } + + /* Reset all units in Fabric */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); + + /* */ + QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); + QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); + + /* Set all 4 core Memory Power Down Registers */ + for (i = 0; i < 5; i++) { + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); + } + + /* Reset all interrupt control registers */ + for (i = 0; i < 115; i++) { + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0)); + } + + /* Reset Timers control registers. per core */ + for (core = 0; core < 4; core++) + for (i = 0; i < 8; i++) + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0)); + + /* Reset per core IRQ ack register */ + for (core = 0; core < 4; core++) + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF)); + + /* Set Fabric control and config to defaults */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); + + /* Kick in Fabric units */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); + + /* Kick in Core0 to start boot process */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Wait 10secs for soft-reset to complete. */ + for (cnt = 10; cnt; cnt--) { + msleep(1000); + barrier(); + } +} + +/** + * qlafx00_soft_reset() - Soft Reset ISPFx00. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qlafx00_soft_reset(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_FUNCTION_FAILED; + + if (unlikely(pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure)) + return rval; + + ha->isp_ops->disable_intrs(ha); + qlafx00_soc_cpu_reset(vha); + + return QLA_SUCCESS; +} + +/** + * qlafx00_chip_diag() - Test ISPFx00 for proper operation. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qlafx00_chip_diag(scsi_qla_host_t *vha) +{ + int rval = 0; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + + ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; + + rval = qlafx00_mbx_reg_test(vha); + if (rval) { + ql_log(ql_log_warn, vha, 0x1165, + "Failed mailbox send register test\n"); + } else { + /* Flag a successful rval */ + rval = QLA_SUCCESS; + } + return rval; +} + +void +qlafx00_config_rings(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + + wrt_reg_dword(®->req_q_in, 0); + wrt_reg_dword(®->req_q_out, 0); + + wrt_reg_dword(®->rsp_q_in, 0); + wrt_reg_dword(®->rsp_q_out, 0); + + /* PCI posting */ + rd_reg_dword(®->rsp_q_out); +} + +char * +qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) +{ + struct qla_hw_data *ha = vha->hw; + + if (pci_is_pcie(ha->pdev)) + strscpy(str, "PCIe iSA", str_len); + return str; +} + +char * +qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) +{ + struct qla_hw_data *ha = vha->hw; + + snprintf(str, size, "%s", ha->mr.fw_version); + return str; +} + +void +qlafx00_enable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 1; + QLAFX00_ENABLE_ICNTRL_REG(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +void +qlafx00_disable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 0; + QLAFX00_DISABLE_ICNTRL_REG(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +int +qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag) +{ + return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); +} + +int +qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) +{ + return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); +} + +int +qlafx00_iospace_config(struct qla_hw_data *ha) +{ + if (pci_request_selected_regions(ha->pdev, ha->bars, + QLA2XXX_DRIVER_NAME)) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, + "Failed to reserve PIO/MMIO regions (%s), aborting.\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + /* Use MMIO operations for all accesses. */ + if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { + ql_log_pci(ql_log_warn, ha->pdev, 0x014f, + "Invalid pci I/O region size (%s).\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { + ql_log_pci(ql_log_warn, ha->pdev, 0x0127, + "Invalid PCI mem BAR0 region size (%s), aborting\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + ha->cregbase = + ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); + if (!ha->cregbase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, + "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); + goto iospace_error_exit; + } + + if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { + ql_log_pci(ql_log_warn, ha->pdev, 0x0129, + "region #2 not an MMIO resource (%s), aborting\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { + ql_log_pci(ql_log_warn, ha->pdev, 0x012a, + "Invalid PCI mem BAR2 region size (%s), aborting\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + ha->iobase = + ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); + if (!ha->iobase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, + "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); + goto iospace_error_exit; + } + + /* Determine queue resources */ + ha->max_req_queues = ha->max_rsp_queues = 1; + + ql_log_pci(ql_log_info, ha->pdev, 0x012c, + "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", + ha->bars, ha->cregbase, ha->iobase); + + return 0; + +iospace_error_exit: + return -ENOMEM; +} + +static void +qlafx00_save_queue_ptrs(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + + req->length_fx00 = req->length; + req->ring_fx00 = req->ring; + req->dma_fx00 = req->dma; + + rsp->length_fx00 = rsp->length; + rsp->ring_fx00 = rsp->ring; + rsp->dma_fx00 = rsp->dma; + + ql_dbg(ql_dbg_init, vha, 0x012d, + "req: %p, ring_fx00: %p, length_fx00: 0x%x," + "req->dma_fx00: 0x%llx\n", req, req->ring_fx00, + req->length_fx00, (u64)req->dma_fx00); + + ql_dbg(ql_dbg_init, vha, 0x012e, + "rsp: %p, ring_fx00: %p, length_fx00: 0x%x," + "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, + rsp->length_fx00, (u64)rsp->dma_fx00); +} + +static int +qlafx00_config_queues(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); + + req->length = ha->req_que_len; + req->ring = (void __force *)ha->iobase + ha->req_que_off; + req->dma = bar2_hdl + ha->req_que_off; + if ((!req->ring) || (req->length == 0)) { + ql_log_pci(ql_log_info, ha->pdev, 0x012f, + "Unable to allocate memory for req_ring\n"); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_init, vha, 0x0130, + "req: %p req_ring pointer %p req len 0x%x " + "req off 0x%x\n, req->dma: 0x%llx", + req, req->ring, req->length, + ha->req_que_off, (u64)req->dma); + + rsp->length = ha->rsp_que_len; + rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off; + rsp->dma = bar2_hdl + ha->rsp_que_off; + if ((!rsp->ring) || (rsp->length == 0)) { + ql_log_pci(ql_log_info, ha->pdev, 0x0131, + "Unable to allocate memory for rsp_ring\n"); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_init, vha, 0x0132, + "rsp: %p rsp_ring pointer %p rsp len 0x%x " + "rsp off 0x%x, rsp->dma: 0x%llx\n", + rsp, rsp->ring, rsp->length, + ha->rsp_que_off, (u64)rsp->dma); + + return QLA_SUCCESS; +} + +static int +qlafx00_init_fw_ready(scsi_qla_host_t *vha) +{ + int rval = 0; + unsigned long wtime; + uint16_t wait_time; /* Wait time */ + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + uint32_t aenmbx, aenmbx7 = 0; + uint32_t pseudo_aen; + uint32_t state[5]; + bool done = false; + + /* 30 seconds wait - Adjust if required */ + wait_time = 30; + + pseudo_aen = rd_reg_dword(®->pseudoaen); + if (pseudo_aen == 1) { + aenmbx7 = rd_reg_dword(®->initval7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + rval = qlafx00_driver_shutdown(vha, 10); + if (rval != QLA_SUCCESS) + qlafx00_soft_reset(vha); + } + + /* wait time before firmware ready */ + wtime = jiffies + (wait_time * HZ); + do { + aenmbx = rd_reg_dword(®->aenmailbox0); + barrier(); + ql_dbg(ql_dbg_mbx, vha, 0x0133, + "aenmbx: 0x%x\n", aenmbx); + + switch (aenmbx) { + case MBA_FW_NOT_STARTED: + case MBA_FW_STARTING: + break; + + case MBA_SYSTEM_ERR: + case MBA_REQ_TRANSFER_ERR: + case MBA_RSP_TRANSFER_ERR: + case MBA_FW_INIT_FAILURE: + qlafx00_soft_reset(vha); + break; + + case MBA_FW_RESTART_CMPLT: + /* Set the mbx and rqstq intr code */ + aenmbx7 = rd_reg_dword(®->aenmailbox7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + ha->req_que_off = rd_reg_dword(®->aenmailbox1); + ha->rsp_que_off = rd_reg_dword(®->aenmailbox3); + ha->req_que_len = rd_reg_dword(®->aenmailbox5); + ha->rsp_que_len = rd_reg_dword(®->aenmailbox6); + wrt_reg_dword(®->aenmailbox0, 0); + rd_reg_dword_relaxed(®->aenmailbox0); + ql_dbg(ql_dbg_init, vha, 0x0134, + "f/w returned mbx_intr_code: 0x%x, " + "rqstq_intr_code: 0x%x\n", + ha->mbx_intr_code, ha->rqstq_intr_code); + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + rval = QLA_SUCCESS; + done = true; + break; + + default: + if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS) + break; + + /* If fw is apparently not ready. In order to continue, + * we might need to issue Mbox cmd, but the problem is + * that the DoorBell vector values that come with the + * 8060 AEN are most likely gone by now (and thus no + * bell would be rung on the fw side when mbox cmd is + * issued). We have to therefore grab the 8060 AEN + * shadow regs (filled in by FW when the last 8060 + * AEN was being posted). + * Do the following to determine what is needed in + * order to get the FW ready: + * 1. reload the 8060 AEN values from the shadow regs + * 2. clear int status to get rid of possible pending + * interrupts + * 3. issue Get FW State Mbox cmd to determine fw state + * Set the mbx and rqstq intr code from Shadow Regs + */ + aenmbx7 = rd_reg_dword(®->initval7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + ha->req_que_off = rd_reg_dword(®->initval1); + ha->rsp_que_off = rd_reg_dword(®->initval3); + ha->req_que_len = rd_reg_dword(®->initval5); + ha->rsp_que_len = rd_reg_dword(®->initval6); + ql_dbg(ql_dbg_init, vha, 0x0135, + "f/w returned mbx_intr_code: 0x%x, " + "rqstq_intr_code: 0x%x\n", + ha->mbx_intr_code, ha->rqstq_intr_code); + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + + /* Get the FW state */ + rval = qlafx00_get_firmware_state(vha, state); + if (rval != QLA_SUCCESS) { + /* Retry if timer has not expired */ + break; + } + + if (state[0] == FSTATE_FX00_CONFIG_WAIT) { + /* Firmware is waiting to be + * initialized by driver + */ + rval = QLA_SUCCESS; + done = true; + break; + } + + /* Issue driver shutdown and wait until f/w recovers. + * Driver should continue to poll until 8060 AEN is + * received indicating firmware recovery. + */ + ql_dbg(ql_dbg_init, vha, 0x0136, + "Sending Driver shutdown fw_state 0x%x\n", + state[0]); + + rval = qlafx00_driver_shutdown(vha, 10); + if (rval != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + break; + } + msleep(500); + + wtime = jiffies + (wait_time * HZ); + break; + } + + if (!done) { + if (time_after_eq(jiffies, wtime)) { + ql_dbg(ql_dbg_init, vha, 0x0137, + "Init f/w failed: aen[7]: 0x%x\n", + rd_reg_dword(®->aenmailbox7)); + rval = QLA_FUNCTION_FAILED; + done = true; + break; + } + /* Delay for a while */ + msleep(500); + } + } while (!done); + + if (rval) + ql_dbg(ql_dbg_init, vha, 0x0138, + "%s **** FAILED ****.\n", __func__); + else + ql_dbg(ql_dbg_init, vha, 0x0139, + "%s **** SUCCESS ****.\n", __func__); + + return rval; +} + +/* + * qlafx00_fw_ready() - Waits for firmware ready. + * @ha: HA context + * + * Returns 0 on success. + */ +int +qlafx00_fw_ready(scsi_qla_host_t *vha) +{ + int rval; + unsigned long wtime; + uint16_t wait_time; /* Wait time if loop is coming ready */ + uint32_t state[5]; + + rval = QLA_SUCCESS; + + wait_time = 10; + + /* wait time before firmware ready */ + wtime = jiffies + (wait_time * HZ); + + /* Wait for ISP to finish init */ + if (!vha->flags.init_done) + ql_dbg(ql_dbg_init, vha, 0x013a, + "Waiting for init to complete...\n"); + + do { + rval = qlafx00_get_firmware_state(vha, state); + + if (rval == QLA_SUCCESS) { + if (state[0] == FSTATE_FX00_INITIALIZED) { + ql_dbg(ql_dbg_init, vha, 0x013b, + "fw_state=%x\n", state[0]); + rval = QLA_SUCCESS; + break; + } + } + rval = QLA_FUNCTION_FAILED; + + if (time_after_eq(jiffies, wtime)) + break; + + /* Delay for a while */ + msleep(500); + + ql_dbg(ql_dbg_init, vha, 0x013c, + "fw_state=%x curr time=%lx.\n", state[0], jiffies); + } while (1); + + + if (rval) + ql_dbg(ql_dbg_init, vha, 0x013d, + "Firmware ready **** FAILED ****.\n"); + else + ql_dbg(ql_dbg_init, vha, 0x013e, + "Firmware ready **** SUCCESS ****.\n"); + + return rval; +} + +static int +qlafx00_find_all_targets(scsi_qla_host_t *vha, + struct list_head *new_fcports) +{ + int rval; + uint16_t tgt_id; + fc_port_t *fcport, *new_fcport; + int found; + struct qla_hw_data *ha = vha->hw; + + rval = QLA_SUCCESS; + + if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) + return QLA_FUNCTION_FAILED; + + if ((atomic_read(&vha->loop_down_timer) || + STATE_TRANSITION(vha))) { + atomic_set(&vha->loop_down_timer, 0); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, + "Listing Target bit map...\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089, + ha->gid_list, 32); + + /* Allocate temporary rmtport for any new rmtports discovered. */ + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) + return QLA_MEMORY_ALLOC_FAILED; + + for_each_set_bit(tgt_id, (void *)ha->gid_list, + QLAFX00_TGT_NODE_LIST_SIZE) { + + /* Send get target node info */ + new_fcport->tgt_id = tgt_id; + rval = qlafx00_fx_disc(vha, new_fcport, + FXDISC_GET_TGT_NODE_INFO); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x208a, + "Target info scan failed -- assuming zero-entry " + "result...\n"); + continue; + } + + /* Locate matching device in database. */ + found = 0; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (memcmp(new_fcport->port_name, + fcport->port_name, WWN_SIZE)) + continue; + + found++; + + /* + * If tgt_id is same and state FCS_ONLINE, nothing + * changed. + */ + if (fcport->tgt_id == new_fcport->tgt_id && + atomic_read(&fcport->state) == FCS_ONLINE) + break; + + /* + * Tgt ID changed or device was marked to be updated. + */ + ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b, + "TGT-ID Change(%s): Present tgt id: " + "0x%x state: 0x%x " + "wwnn = %llx wwpn = %llx.\n", + __func__, fcport->tgt_id, + atomic_read(&fcport->state), + (unsigned long long)wwn_to_u64(fcport->node_name), + (unsigned long long)wwn_to_u64(fcport->port_name)); + + ql_log(ql_log_info, vha, 0x208c, + "TGT-ID Announce(%s): Discovered tgt " + "id 0x%x wwnn = %llx " + "wwpn = %llx.\n", __func__, new_fcport->tgt_id, + (unsigned long long) + wwn_to_u64(new_fcport->node_name), + (unsigned long long) + wwn_to_u64(new_fcport->port_name)); + + if (atomic_read(&fcport->state) != FCS_ONLINE) { + fcport->old_tgt_id = fcport->tgt_id; + fcport->tgt_id = new_fcport->tgt_id; + ql_log(ql_log_info, vha, 0x208d, + "TGT-ID: New fcport Added: %p\n", fcport); + qla2x00_update_fcport(vha, fcport); + } else { + ql_log(ql_log_info, vha, 0x208e, + " Existing TGT-ID %x did not get " + " offline event from firmware.\n", + fcport->old_tgt_id); + qla2x00_mark_device_lost(vha, fcport, 0); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + qla2x00_free_fcport(new_fcport); + return rval; + } + break; + } + + if (found) + continue; + + /* If device was not in our fcports list, then add it. */ + list_add_tail(&new_fcport->list, new_fcports); + + /* Allocate a new replacement fcport. */ + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) + return QLA_MEMORY_ALLOC_FAILED; + } + + qla2x00_free_fcport(new_fcport); + return rval; +} + +/* + * qlafx00_configure_all_targets + * Setup target devices with node ID's. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + * BIT_0 = error + */ +static int +qlafx00_configure_all_targets(scsi_qla_host_t *vha) +{ + int rval; + fc_port_t *fcport, *rmptemp; + LIST_HEAD(new_fcports); + + rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, + FXDISC_GET_TGT_NODE_LIST); + if (rval != QLA_SUCCESS) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return rval; + } + + rval = qlafx00_find_all_targets(vha, &new_fcports); + if (rval != QLA_SUCCESS) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return rval; + } + + /* + * Delete all previous devices marked lost. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { + if (fcport->port_type != FCT_INITIATOR) + qla2x00_mark_device_lost(vha, fcport, 0); + } + } + + /* + * Add the new devices to our devices list. + */ + list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + qla2x00_update_fcport(vha, fcport); + list_move_tail(&fcport->list, &vha->vp_fcports); + ql_log(ql_log_info, vha, 0x208f, + "Attach new target id 0x%x wwnn = %llx " + "wwpn = %llx.\n", + fcport->tgt_id, + (unsigned long long)wwn_to_u64(fcport->node_name), + (unsigned long long)wwn_to_u64(fcport->port_name)); + } + + /* Free all new device structures not processed. */ + list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { + list_del(&fcport->list); + qla2x00_free_fcport(fcport); + } + + return rval; +} + +/* + * qlafx00_configure_devices + * Updates Fibre Channel Device Database with what is actually on loop. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + * 1 = error. + * 2 = database was full and device was not configured. + */ +int +qlafx00_configure_devices(scsi_qla_host_t *vha) +{ + int rval; + unsigned long flags; + + rval = QLA_SUCCESS; + + flags = vha->dpc_flags; + + ql_dbg(ql_dbg_disc, vha, 0x2090, + "Configure devices -- dpc flags =0x%lx\n", flags); + + rval = qlafx00_configure_all_targets(vha); + + if (rval == QLA_SUCCESS) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { + rval = QLA_FUNCTION_FAILED; + } else { + atomic_set(&vha->loop_state, LOOP_READY); + ql_log(ql_log_info, vha, 0x2091, + "Device Ready\n"); + } + } + + if (rval) { + ql_dbg(ql_dbg_disc, vha, 0x2092, + "%s *** FAILED ***.\n", __func__); + } else { + ql_dbg(ql_dbg_disc, vha, 0x2093, + "%s: exiting normally.\n", __func__); + } + return rval; +} + +static void +qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp) +{ + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + + vha->flags.online = 0; + ha->mr.fw_hbt_en = 0; + + if (!critemp) { + ha->flags.chip_reset_done = 0; + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + vha->qla_stats.total_isp_aborts++; + ql_log(ql_log_info, vha, 0x013f, + "Performing ISP error recovery - ha = %p.\n", ha); + ha->isp_ops->reset_chip(vha); + } + + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, + QLAFX00_LOOP_DOWN_TIME); + } else { + if (!atomic_read(&vha->loop_down_timer)) + atomic_set(&vha->loop_down_timer, + QLAFX00_LOOP_DOWN_TIME); + } + + /* Clear all async request states across all VPs. */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->flags = 0; + if (atomic_read(&fcport->state) == FCS_ONLINE) + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + } + + if (!ha->flags.eeh_busy) { + if (critemp) { + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + } else { + /* Requeue all commands in outstanding command list. */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); + } + } + + qla2x00_free_irqs(vha); + if (critemp) + set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); + else + set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + + /* Clear the Interrupts */ + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + + ql_log(ql_log_info, vha, 0x0140, + "%s Done done - ha=%p.\n", __func__, ha); +} + +/** + * qlafx00_init_response_q_entries() - Initializes response queue entries. + * @rsp: response queue + * + * Beginning of request ring has initialization control block already built + * by nvram config routine. + * + * Returns 0 on success. + */ +void +qlafx00_init_response_q_entries(struct rsp_que *rsp) +{ + uint16_t cnt; + response_t *pkt; + + rsp->ring_ptr = rsp->ring; + rsp->ring_index = 0; + rsp->status_srb = NULL; + pkt = rsp->ring_ptr; + for (cnt = 0; cnt < rsp->length; cnt++) { + pkt->signature = RESPONSE_PROCESSED; + wrt_reg_dword((void __force __iomem *)&pkt->signature, + RESPONSE_PROCESSED); + pkt++; + } +} + +int +qlafx00_rescan_isp(scsi_qla_host_t *vha) +{ + uint32_t status = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + uint32_t aenmbx7; + + qla2x00_request_irqs(ha, ha->rsp_q_map[0]); + + aenmbx7 = rd_reg_dword(®->aenmailbox7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + ha->req_que_off = rd_reg_dword(®->aenmailbox1); + ha->rsp_que_off = rd_reg_dword(®->aenmailbox3); + ha->req_que_len = rd_reg_dword(®->aenmailbox5); + ha->rsp_que_len = rd_reg_dword(®->aenmailbox6); + + ql_dbg(ql_dbg_disc, vha, 0x2094, + "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " + " Req que offset 0x%x Rsp que offset 0x%x\n", + ha->mbx_intr_code, ha->rqstq_intr_code, + ha->req_que_off, ha->rsp_que_len); + + /* Clear the Interrupts */ + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + + status = qla2x00_init_rings(vha); + if (!status) { + vha->flags.online = 1; + + /* if no cable then assume it's good */ + if ((vha->device_flags & DFLG_NO_CABLE)) + status = 0; + /* Register system information */ + if (qlafx00_fx_disc(vha, + &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) + ql_dbg(ql_dbg_disc, vha, 0x2095, + "failed to register host info\n"); + } + scsi_unblock_requests(vha->host); + return status; +} + +void +qlafx00_timer_routine(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t fw_heart_beat; + uint32_t aenmbx0; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + uint32_t tempc; + + /* Check firmware health */ + if (ha->mr.fw_hbt_cnt) + ha->mr.fw_hbt_cnt--; + else { + if ((!ha->flags.mr_reset_hdlr_active) && + (!test_bit(UNLOADING, &vha->dpc_flags)) && + (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && + (ha->mr.fw_hbt_en)) { + fw_heart_beat = rd_reg_dword(®->fwheartbeat); + if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { + ha->mr.old_fw_hbt_cnt = fw_heart_beat; + ha->mr.fw_hbt_miss_cnt = 0; + } else { + ha->mr.fw_hbt_miss_cnt++; + if (ha->mr.fw_hbt_miss_cnt == + QLAFX00_HEARTBEAT_MISS_CNT) { + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + ha->mr.fw_hbt_miss_cnt = 0; + } + } + } + ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; + } + + if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { + /* Reset recovery to be performed in timer routine */ + aenmbx0 = rd_reg_dword(®->aenmailbox0); + if (ha->mr.fw_reset_timer_exp) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + ha->mr.fw_reset_timer_exp = 0; + } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) { + /* Wake up DPC to rescan the targets */ + set_bit(FX00_TARGET_SCAN, &vha->dpc_flags); + clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + } else if ((aenmbx0 == MBA_FW_STARTING) && + (!ha->mr.fw_hbt_en)) { + ha->mr.fw_hbt_en = 1; + } else if (!ha->mr.fw_reset_timer_tick) { + if (aenmbx0 == ha->mr.old_aenmbx0_state) + ha->mr.fw_reset_timer_exp = 1; + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + } else if (aenmbx0 == 0xFFFFFFFF) { + uint32_t data0, data1; + + data0 = QLAFX00_RD_REG(ha, + QLAFX00_BAR1_BASE_ADDR_REG); + data1 = QLAFX00_RD_REG(ha, + QLAFX00_PEX0_WIN0_BASE_ADDR_REG); + + data0 &= 0xffff0000; + data1 &= 0x0000ffff; + + QLAFX00_WR_REG(ha, + QLAFX00_PEX0_WIN0_BASE_ADDR_REG, + (data0 | data1)); + } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { + ha->mr.fw_reset_timer_tick = + QLAFX00_MAX_RESET_INTERVAL; + } else if (aenmbx0 == MBA_FW_RESET_FCT) { + ha->mr.fw_reset_timer_tick = + QLAFX00_MAX_RESET_INTERVAL; + } + if (ha->mr.old_aenmbx0_state != aenmbx0) { + ha->mr.old_aenmbx0_state = aenmbx0; + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + } + ha->mr.fw_reset_timer_tick--; + } + if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { + /* + * Critical temperature recovery to be + * performed in timer routine + */ + if (ha->mr.fw_critemp_timer_tick == 0) { + tempc = QLAFX00_GET_TEMPERATURE(ha); + ql_dbg(ql_dbg_timer, vha, 0x6012, + "ISPFx00(%s): Critical temp timer, " + "current SOC temperature: %d\n", + __func__, tempc); + if (tempc < ha->mr.critical_temperature) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + clear_bit(FX00_CRITEMP_RECOVERY, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + ha->mr.fw_critemp_timer_tick = + QLAFX00_CRITEMP_INTERVAL; + } else { + ha->mr.fw_critemp_timer_tick--; + } + } + if (ha->mr.host_info_resend) { + /* + * Incomplete host info might be sent to firmware + * durinng system boot - info should be resend + */ + if (ha->mr.hinfo_resend_timer_tick == 0) { + ha->mr.host_info_resend = false; + set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags); + ha->mr.hinfo_resend_timer_tick = + QLAFX00_HINFO_RESEND_INTERVAL; + qla2xxx_wake_dpc(vha); + } else { + ha->mr.hinfo_resend_timer_tick--; + } + } + +} + +/* + * qlfx00a_reset_initialize + * Re-initialize after a iSA device reset. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qlafx00_reset_initialize(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_dbg(ql_dbg_init, vha, 0x0142, + "Device in failed state\n"); + return QLA_SUCCESS; + } + + ha->flags.mr_reset_hdlr_active = 1; + + if (vha->flags.online) { + scsi_block_requests(vha->host); + qlafx00_abort_isp_cleanup(vha, false); + } + + ql_log(ql_log_info, vha, 0x0143, + "(%s): succeeded.\n", __func__); + ha->flags.mr_reset_hdlr_active = 0; + return QLA_SUCCESS; +} + +/* + * qlafx00_abort_isp + * Resets ISP and aborts all outstanding commands. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qlafx00_abort_isp(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.online) { + if (unlikely(pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure)) { + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + return QLA_SUCCESS; + } + + scsi_block_requests(vha->host); + qlafx00_abort_isp_cleanup(vha, false); + } else { + scsi_block_requests(vha->host); + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + vha->qla_stats.total_isp_aborts++; + ha->isp_ops->reset_chip(vha); + set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + /* Clear the Interrupts */ + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + } + + ql_log(ql_log_info, vha, 0x0145, + "(%s): succeeded.\n", __func__); + + return QLA_SUCCESS; +} + +static inline fc_port_t* +qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) +{ + fc_port_t *fcport; + + /* Check for matching device in remote port list. */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->tgt_id == tgt_id) { + ql_dbg(ql_dbg_async, vha, 0x5072, + "Matching fcport(%p) found with TGT-ID: 0x%x " + "and Remote TGT_ID: 0x%x\n", + fcport, fcport->tgt_id, tgt_id); + return fcport; + } + } + return NULL; +} + +static void +qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) +{ + fc_port_t *fcport; + + ql_log(ql_log_info, vha, 0x5073, + "Detach TGT-ID: 0x%x\n", tgt_id); + + fcport = qlafx00_get_fcport(vha, tgt_id); + if (!fcport) + return; + + qla2x00_mark_device_lost(vha, fcport, 0); + + return; +} + +void +qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) +{ + uint32_t aen_code, aen_data; + + aen_code = FCH_EVT_VENDOR_UNIQUE; + aen_data = evt->u.aenfx.evtcode; + + switch (evt->u.aenfx.evtcode) { + case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ + if (evt->u.aenfx.mbx[1] == 0) { + if (evt->u.aenfx.mbx[2] == 1) { + if (!vha->flags.fw_tgt_reported) + vha->flags.fw_tgt_reported = 1; + atomic_set(&vha->loop_down_timer, 0); + atomic_set(&vha->loop_state, LOOP_UP); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else if (evt->u.aenfx.mbx[2] == 2) { + qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]); + } + } else if (evt->u.aenfx.mbx[1] == 0xffff) { + if (evt->u.aenfx.mbx[2] == 1) { + if (!vha->flags.fw_tgt_reported) + vha->flags.fw_tgt_reported = 1; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } else if (evt->u.aenfx.mbx[2] == 2) { + vha->device_flags |= DFLG_NO_CABLE; + qla2x00_mark_all_devices_lost(vha); + } + } + break; + case QLAFX00_MBA_LINK_UP: + aen_code = FCH_EVT_LINKUP; + aen_data = 0; + break; + case QLAFX00_MBA_LINK_DOWN: + aen_code = FCH_EVT_LINKDOWN; + aen_data = 0; + break; + case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ + ql_log(ql_log_info, vha, 0x5082, + "Process critical temperature event " + "aenmb[0]: %x\n", + evt->u.aenfx.evtcode); + scsi_block_requests(vha->host); + qlafx00_abort_isp_cleanup(vha, true); + scsi_unblock_requests(vha->host); + break; + } + + fc_host_post_event(vha->host, fc_get_event_number(), + aen_code, aen_data); +} + +static void +qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) +{ + u64 port_name = 0, node_name = 0; + + port_name = (unsigned long long)wwn_to_u64(pinfo->port_name); + node_name = (unsigned long long)wwn_to_u64(pinfo->node_name); + + fc_host_node_name(vha->host) = node_name; + fc_host_port_name(vha->host) = port_name; + if (!pinfo->port_type) + vha->hw->current_topology = ISP_CFG_F; + if (pinfo->link_status == QLAFX00_LINK_STATUS_UP) + atomic_set(&vha->loop_state, LOOP_READY); + else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN) + atomic_set(&vha->loop_state, LOOP_DOWN); + vha->hw->link_data_rate = (uint16_t)pinfo->link_config; +} + +static void +qla2x00_fxdisc_iocb_timeout(void *data) +{ + srb_t *sp = data; + struct srb_iocb *lio = &sp->u.iocb_cmd; + + complete(&lio->u.fxiocb.fxiocb_comp); +} + +static void qla2x00_fxdisc_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *lio = &sp->u.iocb_cmd; + + complete(&lio->u.fxiocb.fxiocb_comp); +} + +int +qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) +{ + srb_t *sp; + struct srb_iocb *fdisc; + int rval = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha = vha->hw; + struct host_system_info *phost_info; + struct register_host_info *preg_hsi; + struct new_utsname *p_sysid = NULL; + + /* ref: INIT */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_FXIOCB_DCMD; + sp->name = "fxdisc"; + qla2x00_init_async_sp(sp, FXDISC_TIMEOUT, + qla2x00_fxdisc_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout; + + fdisc = &sp->u.iocb_cmd; + switch (fx_type) { + case FXDISC_GET_CONFIG_INFO: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID; + fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data); + break; + case FXDISC_GET_PORT_INFO: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; + fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO; + fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id); + break; + case FXDISC_GET_TGT_NODE_INFO: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; + fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO; + fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id); + break; + case FXDISC_GET_TGT_NODE_LIST: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; + fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE; + break; + case FXDISC_REG_HOST_INFO: + fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID; + fdisc->u.fxiocb.req_len = sizeof(struct register_host_info); + p_sysid = utsname(); + if (!p_sysid) { + ql_log(ql_log_warn, vha, 0x303c, + "Not able to get the system information\n"); + goto done_free_sp; + } + break; + case FXDISC_ABORT_IOCTL: + default: + break; + } + + if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { + fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, + fdisc->u.fxiocb.req_len, + &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL); + if (!fdisc->u.fxiocb.req_addr) + goto done_free_sp; + + if (fx_type == FXDISC_REG_HOST_INFO) { + preg_hsi = (struct register_host_info *) + fdisc->u.fxiocb.req_addr; + phost_info = &preg_hsi->hsi; + memset(preg_hsi, 0, sizeof(struct register_host_info)); + phost_info->os_type = OS_TYPE_LINUX; + strscpy(phost_info->sysname, p_sysid->sysname, + sizeof(phost_info->sysname)); + strscpy(phost_info->nodename, p_sysid->nodename, + sizeof(phost_info->nodename)); + if (!strcmp(phost_info->nodename, "(none)")) + ha->mr.host_info_resend = true; + strscpy(phost_info->release, p_sysid->release, + sizeof(phost_info->release)); + strscpy(phost_info->version, p_sysid->version, + sizeof(phost_info->version)); + strscpy(phost_info->machine, p_sysid->machine, + sizeof(phost_info->machine)); + strscpy(phost_info->domainname, p_sysid->domainname, + sizeof(phost_info->domainname)); + strscpy(phost_info->hostdriver, QLA2XXX_VERSION, + sizeof(phost_info->hostdriver)); + preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); + ql_dbg(ql_dbg_init, vha, 0x0149, + "ISP%04X: Host registration with firmware\n", + ha->pdev->device); + ql_dbg(ql_dbg_init, vha, 0x014a, + "os_type = '%d', sysname = '%s', nodname = '%s'\n", + phost_info->os_type, + phost_info->sysname, + phost_info->nodename); + ql_dbg(ql_dbg_init, vha, 0x014b, + "release = '%s', version = '%s'\n", + phost_info->release, + phost_info->version); + ql_dbg(ql_dbg_init, vha, 0x014c, + "machine = '%s' " + "domainname = '%s', hostdriver = '%s'\n", + phost_info->machine, + phost_info->domainname, + phost_info->hostdriver); + ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, + phost_info, sizeof(*phost_info)); + } + } + + if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { + fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, + fdisc->u.fxiocb.rsp_len, + &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL); + if (!fdisc->u.fxiocb.rsp_addr) + goto done_unmap_req; + } + + fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_unmap_dma; + + wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp); + + if (fx_type == FXDISC_GET_CONFIG_INFO) { + struct config_info_data *pinfo = + (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; + strscpy(vha->hw->model_number, pinfo->model_num, + ARRAY_SIZE(vha->hw->model_number)); + strscpy(vha->hw->model_desc, pinfo->model_description, + ARRAY_SIZE(vha->hw->model_desc)); + memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, + sizeof(vha->hw->mr.symbolic_name)); + memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, + sizeof(vha->hw->mr.serial_num)); + memcpy(&vha->hw->mr.hw_version, pinfo->hw_version, + sizeof(vha->hw->mr.hw_version)); + memcpy(&vha->hw->mr.fw_version, pinfo->fw_version, + sizeof(vha->hw->mr.fw_version)); + strim(vha->hw->mr.fw_version); + memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version, + sizeof(vha->hw->mr.uboot_version)); + memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, + sizeof(vha->hw->mr.fru_serial_num)); + vha->hw->mr.critical_temperature = + (pinfo->nominal_temp_value) ? + pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD; + ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & + QLAFX00_EXTENDED_IO_EN_MASK) != 0; + } else if (fx_type == FXDISC_GET_PORT_INFO) { + struct port_info_data *pinfo = + (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; + memcpy(vha->node_name, pinfo->node_name, WWN_SIZE); + memcpy(vha->port_name, pinfo->port_name, WWN_SIZE); + vha->d_id.b.domain = pinfo->port_id[0]; + vha->d_id.b.area = pinfo->port_id[1]; + vha->d_id.b.al_pa = pinfo->port_id[2]; + qlafx00_update_host_attr(vha, pinfo); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, + pinfo, 16); + } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { + struct qlafx00_tgt_node_info *pinfo = + (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; + memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE); + memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); + fcport->port_type = FCT_TARGET; + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, + pinfo, 16); + } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { + struct qlafx00_tgt_node_info *pinfo = + (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, + pinfo, 16); + memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); + } else if (fx_type == FXDISC_ABORT_IOCTL) + fdisc->u.fxiocb.result = + (fdisc->u.fxiocb.result == + cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ? + cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); + + rval = le32_to_cpu(fdisc->u.fxiocb.result); + +done_unmap_dma: + if (fdisc->u.fxiocb.rsp_addr) + dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, + fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle); + +done_unmap_req: + if (fdisc->u.fxiocb.req_addr) + dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, + fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +/* + * qlafx00_initialize_adapter + * Initialize board. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qlafx00_initialize_adapter(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + uint32_t tempc; + + /* Clear adapter flags. */ + vha->flags.online = 0; + ha->flags.chip_reset_done = 0; + vha->flags.reset_active = 0; + ha->flags.pci_channel_io_perm_failure = 0; + ha->flags.eeh_busy = 0; + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + atomic_set(&vha->loop_state, LOOP_DOWN); + vha->device_flags = DFLG_NO_CABLE; + vha->dpc_flags = 0; + vha->flags.management_server_logged_in = 0; + ha->isp_abort_cnt = 0; + ha->beacon_blink_led = 0; + + set_bit(0, ha->req_qid_map); + set_bit(0, ha->rsp_qid_map); + + ql_dbg(ql_dbg_init, vha, 0x0147, + "Configuring PCI space...\n"); + + rval = ha->isp_ops->pci_config(vha); + if (rval) { + ql_log(ql_log_warn, vha, 0x0148, + "Unable to configure PCI space.\n"); + return rval; + } + + rval = qlafx00_init_fw_ready(vha); + if (rval != QLA_SUCCESS) + return rval; + + qlafx00_save_queue_ptrs(vha); + + rval = qlafx00_config_queues(vha); + if (rval != QLA_SUCCESS) + return rval; + + /* + * Allocate the array of outstanding commands + * now that we know the firmware resources. + */ + rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); + if (rval != QLA_SUCCESS) + return rval; + + rval = qla2x00_init_rings(vha); + ha->flags.chip_reset_done = 1; + + tempc = QLAFX00_GET_TEMPERATURE(ha); + ql_dbg(ql_dbg_init, vha, 0x0152, + "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", + __func__, tempc); + + return rval; +} + +uint32_t +qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval = QLA_FUNCTION_FAILED; + uint32_t state[1]; + + if (qla2x00_reset_active(vha)) + ql_log(ql_log_warn, vha, 0x70ce, + "ISP reset active.\n"); + else if (!vha->hw->flags.eeh_busy) { + rval = qlafx00_get_firmware_state(vha, state); + } + if (rval != QLA_SUCCESS) + memset(state, -1, sizeof(state)); + + return state[0]; +} + +void +qlafx00_get_host_speed(struct Scsi_Host *shost) +{ + struct qla_hw_data *ha = ((struct scsi_qla_host *) + (shost_priv(shost)))->hw; + u32 speed = FC_PORTSPEED_UNKNOWN; + + switch (ha->link_data_rate) { + case QLAFX00_PORT_SPEED_2G: + speed = FC_PORTSPEED_2GBIT; + break; + case QLAFX00_PORT_SPEED_4G: + speed = FC_PORTSPEED_4GBIT; + break; + case QLAFX00_PORT_SPEED_8G: + speed = FC_PORTSPEED_8GBIT; + break; + case QLAFX00_PORT_SPEED_10G: + speed = FC_PORTSPEED_10GBIT; + break; + } + fc_host_speed(shost) = speed; +} + +/** QLAFX00 specific ISR implementation functions */ + +static inline void +qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, + uint32_t sense_len, struct rsp_que *rsp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct scsi_cmnd *cp = GET_CMD_SP(sp); + uint32_t track_sense_len; + + SET_FW_SENSE_LEN(sp, sense_len); + + if (sense_len >= SCSI_SENSE_BUFFERSIZE) + sense_len = SCSI_SENSE_BUFFERSIZE; + + SET_CMD_SENSE_LEN(sp, sense_len); + SET_CMD_SENSE_PTR(sp, cp->sense_buffer); + track_sense_len = sense_len; + + if (sense_len > par_sense_len) + sense_len = par_sense_len; + + memcpy(cp->sense_buffer, sense_data, sense_len); + + SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len); + + SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); + track_sense_len -= sense_len; + SET_CMD_SENSE_LEN(sp, track_sense_len); + + ql_dbg(ql_dbg_io, vha, 0x304d, + "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", + sense_len, par_sense_len, track_sense_len); + if (GET_FW_SENSE_LEN(sp) > 0) { + rsp->status_srb = sp; + cp->result = res; + } + + if (sense_len) { + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, + "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", + sp->vha->host_no, cp->device->id, cp->device->lun, + cp); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, + cp->sense_buffer, sense_len); + } +} + +static void +qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp, + __le16 sstatus, __le16 cpstatus) +{ + struct srb_iocb *tmf; + + tmf = &sp->u.iocb_cmd; + if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) || + (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) + cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); + tmf->u.tmf.comp_status = cpstatus; + sp->done(sp, 0); +} + +static void +qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct abort_iocb_entry_fx00 *pkt) +{ + const char func[] = "ABT_IOCB"; + srb_t *sp; + struct srb_iocb *abt; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + abt = &sp->u.iocb_cmd; + abt->u.abt.comp_status = pkt->tgt_id_sts; + sp->done(sp, 0); +} + +static void +qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct ioctl_iocb_entry_fx00 *pkt) +{ + const char func[] = "IOSB_IOCB"; + srb_t *sp; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; + struct srb_iocb *iocb_job; + int res = 0; + struct qla_mt_iocb_rsp_fx00 fstatus; + uint8_t *fw_sts_ptr; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (sp->type == SRB_FXIOCB_DCMD) { + iocb_job = &sp->u.iocb_cmd; + iocb_job->u.fxiocb.seq_number = pkt->seq_no; + iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; + iocb_job->u.fxiocb.result = pkt->status; + if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID) + iocb_job->u.fxiocb.req_data = + pkt->dataword_r; + } else { + bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; + + memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); + + fstatus.reserved_1 = pkt->reserved_0; + fstatus.func_type = pkt->comp_func_num; + fstatus.ioctl_flags = pkt->fw_iotcl_flags; + fstatus.ioctl_data = pkt->dataword_r; + fstatus.adapid = pkt->adapid; + fstatus.reserved_2 = pkt->dataword_r_extra; + fstatus.res_count = pkt->residuallen; + fstatus.status = pkt->status; + fstatus.seq_number = pkt->seq_no; + memcpy(fstatus.reserved_3, + pkt->reserved_2, 20 * sizeof(uint8_t)); + + fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); + + memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus)); + bsg_job->reply_len = sizeof(struct fc_bsg_reply) + + sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); + + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->vha, 0x5080, pkt, sizeof(*pkt)); + + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->vha, 0x5074, + fw_sts_ptr, sizeof(fstatus)); + + res = bsg_reply->result = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + } + sp->done(sp, res); +} + +/** + * qlafx00_status_entry() - Process a Status IOCB entry. + * @vha: SCSI driver HA context + * @rsp: response queue + * @pkt: Entry pointer + */ +static void +qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) +{ + srb_t *sp; + fc_port_t *fcport; + struct scsi_cmnd *cp; + struct sts_entry_fx00 *sts; + __le16 comp_status; + __le16 scsi_status; + __le16 lscsi_status; + int32_t resid; + uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, + fw_resid_len; + uint8_t *rsp_info = NULL, *sense_data = NULL; + struct qla_hw_data *ha = vha->hw; + uint32_t hindex, handle; + uint16_t que; + struct req_que *req; + int logit = 1; + int res = 0; + + sts = (struct sts_entry_fx00 *) pkt; + + comp_status = sts->comp_status; + scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK); + hindex = sts->handle; + handle = LSW(hindex); + + que = MSW(hindex); + req = ha->req_q_map[que]; + + /* Validate handle. */ + if (handle < req->num_outstanding_cmds) + sp = req->outstanding_cmds[handle]; + else + sp = NULL; + + if (sp == NULL) { + ql_dbg(ql_dbg_io, vha, 0x3034, + "Invalid status handle (0x%x).\n", handle); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + return; + } + + if (sp->type == SRB_TM_CMD) { + req->outstanding_cmds[handle] = NULL; + qlafx00_tm_iocb_entry(vha, req, pkt, sp, + scsi_status, comp_status); + return; + } + + /* Fast path completion. */ + if (comp_status == CS_COMPLETE && scsi_status == 0) { + qla2x00_process_completed_request(vha, req, handle); + return; + } + + req->outstanding_cmds[handle] = NULL; + cp = GET_CMD_SP(sp); + if (cp == NULL) { + ql_dbg(ql_dbg_io, vha, 0x3048, + "Command already returned (0x%x/%p).\n", + handle, sp); + + return; + } + + lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK); + + fcport = sp->fcport; + + sense_len = par_sense_len = rsp_info_len = resid_len = + fw_resid_len = 0; + if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) + sense_len = sts->sense_len; + if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER + | (uint16_t)SS_RESIDUAL_OVER))) + resid_len = le32_to_cpu(sts->residual_len); + if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN)) + fw_resid_len = le32_to_cpu(sts->residual_len); + rsp_info = sense_data = sts->data; + par_sense_len = sizeof(sts->data); + + /* Check for overrun. */ + if (comp_status == CS_COMPLETE && + scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER)) + comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN); + + /* + * Based on Host and scsi status generate status code for Linux + */ + switch (le16_to_cpu(comp_status)) { + case CS_COMPLETE: + case CS_QUEUE_FULL: + if (scsi_status == 0) { + res = DID_OK << 16; + break; + } + if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER + | (uint16_t)SS_RESIDUAL_OVER))) { + resid = resid_len; + scsi_set_resid(cp, resid); + + if (!lscsi_status && + ((unsigned)(scsi_bufflen(cp) - resid) < + cp->underflow)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3050, + "Mid-layer underflow " + "detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + res = DID_ERROR << 16; + break; + } + } + res = DID_OK << 16 | le16_to_cpu(lscsi_status); + + if (lscsi_status == + cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3051, + "QUEUE FULL detected.\n"); + break; + } + logit = 0; + if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) + break; + + memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) + break; + + qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, + rsp, res); + break; + + case CS_DATA_UNDERRUN: + /* Use F/W calculated residual length. */ + if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) + resid = fw_resid_len; + else + resid = resid_len; + scsi_set_resid(cp, resid); + if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) { + if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) + && fw_resid_len != resid_len) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3052, + "Dropped frame(s) detected " + "(0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + res = DID_ERROR << 16 | + le16_to_cpu(lscsi_status); + goto check_scsi_status; + } + + if (!lscsi_status && + ((unsigned)(scsi_bufflen(cp) - resid) < + cp->underflow)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3053, + "Mid-layer underflow " + "detected (0x%x of 0x%x bytes, " + "cp->underflow: 0x%x).\n", + resid, scsi_bufflen(cp), cp->underflow); + + res = DID_ERROR << 16; + break; + } + } else if (lscsi_status != + cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) && + lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) { + /* + * scsi status of task set and busy are considered + * to be task not completed. + */ + + ql_dbg(ql_dbg_io, fcport->vha, 0x3054, + "Dropped frame(s) detected (0x%x " + "of 0x%x bytes).\n", resid, + scsi_bufflen(cp)); + + res = DID_ERROR << 16 | le16_to_cpu(lscsi_status); + goto check_scsi_status; + } else { + ql_dbg(ql_dbg_io, fcport->vha, 0x3055, + "scsi_status: 0x%x, lscsi_status: 0x%x\n", + scsi_status, lscsi_status); + } + + res = DID_OK << 16 | le16_to_cpu(lscsi_status); + logit = 0; + +check_scsi_status: + /* + * Check to see if SCSI Status is non zero. If so report SCSI + * Status. + */ + if (lscsi_status != 0) { + if (lscsi_status == + cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3056, + "QUEUE FULL detected.\n"); + logit = 1; + break; + } + if (lscsi_status != + cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) + break; + + memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (!(scsi_status & + cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) + break; + + qlafx00_handle_sense(sp, sense_data, par_sense_len, + sense_len, rsp, res); + } + break; + + case CS_PORT_LOGGED_OUT: + case CS_PORT_CONFIG_CHG: + case CS_PORT_BUSY: + case CS_INCOMPLETE: + case CS_PORT_UNAVAILABLE: + case CS_TIMEOUT: + case CS_RESET: + + /* + * We are going to have the fc class block the rport + * while we try to recover so instruct the mid layer + * to requeue until the class decides how to handle this. + */ + res = DID_TRANSPORT_DISRUPTED << 16; + + ql_dbg(ql_dbg_io, fcport->vha, 0x3057, + "Port down status: port-state=0x%x.\n", + atomic_read(&fcport->state)); + + if (atomic_read(&fcport->state) == FCS_ONLINE) + qla2x00_mark_device_lost(fcport->vha, fcport, 1); + break; + + case CS_ABORTED: + res = DID_RESET << 16; + break; + + default: + res = DID_ERROR << 16; + break; + } + + if (logit) + ql_dbg(ql_dbg_io, fcport->vha, 0x3058, + "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " + "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " + "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, " + "par_sense_len=0x%x, rsp_info_len=0x%x\n", + comp_status, scsi_status, res, vha->host_no, + cp->device->id, cp->device->lun, fcport->tgt_id, + lscsi_status, cp->cmnd, scsi_bufflen(cp), + rsp_info, resid_len, fw_resid_len, sense_len, + par_sense_len, rsp_info_len); + + if (rsp->status_srb == NULL) + sp->done(sp, res); + else + WARN_ON_ONCE(true); +} + +/** + * qlafx00_status_cont_entry() - Process a Status Continuations entry. + * @rsp: response queue + * @pkt: Entry pointer + * + * Extended sense data. + */ +static void +qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) +{ + uint8_t sense_sz = 0; + struct qla_hw_data *ha = rsp->hw; + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); + srb_t *sp = rsp->status_srb; + struct scsi_cmnd *cp; + uint32_t sense_len; + uint8_t *sense_ptr; + + if (!sp) { + ql_dbg(ql_dbg_io, vha, 0x3037, + "no SP, sp = %p\n", sp); + return; + } + + if (!GET_FW_SENSE_LEN(sp)) { + ql_dbg(ql_dbg_io, vha, 0x304b, + "no fw sense data, sp = %p\n", sp); + return; + } + cp = GET_CMD_SP(sp); + if (cp == NULL) { + ql_log(ql_log_warn, vha, 0x303b, + "cmd is NULL: already returned to OS (sp=%p).\n", sp); + + rsp->status_srb = NULL; + return; + } + + if (!GET_CMD_SENSE_LEN(sp)) { + ql_dbg(ql_dbg_io, vha, 0x304c, + "no sense data, sp = %p\n", sp); + } else { + sense_len = GET_CMD_SENSE_LEN(sp); + sense_ptr = GET_CMD_SENSE_PTR(sp); + ql_dbg(ql_dbg_io, vha, 0x304f, + "sp=%p sense_len=0x%x sense_ptr=%p.\n", + sp, sense_len, sense_ptr); + + if (sense_len > sizeof(pkt->data)) + sense_sz = sizeof(pkt->data); + else + sense_sz = sense_len; + + /* Move sense data. */ + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, + pkt, sizeof(*pkt)); + memcpy(sense_ptr, pkt->data, sense_sz); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, + sense_ptr, sense_sz); + + sense_len -= sense_sz; + sense_ptr += sense_sz; + + SET_CMD_SENSE_PTR(sp, sense_ptr); + SET_CMD_SENSE_LEN(sp, sense_len); + } + sense_len = GET_FW_SENSE_LEN(sp); + sense_len = (sense_len > sizeof(pkt->data)) ? + (sense_len - sizeof(pkt->data)) : 0; + SET_FW_SENSE_LEN(sp, sense_len); + + /* Place command on done queue. */ + if (sense_len == 0) { + rsp->status_srb = NULL; + sp->done(sp, cp->result); + } else { + WARN_ON_ONCE(true); + } +} + +/** + * qlafx00_multistatus_entry() - Process Multi response queue entries. + * @vha: SCSI driver HA context + * @rsp: response queue + * @pkt: received packet + */ +static void +qlafx00_multistatus_entry(struct scsi_qla_host *vha, + struct rsp_que *rsp, void *pkt) +{ + srb_t *sp; + struct multi_sts_entry_fx00 *stsmfx; + struct qla_hw_data *ha = vha->hw; + uint32_t handle, hindex, handle_count, i; + uint16_t que; + struct req_que *req; + __le32 *handle_ptr; + + stsmfx = (struct multi_sts_entry_fx00 *) pkt; + + handle_count = stsmfx->handle_count; + + if (handle_count > MAX_HANDLE_COUNT) { + ql_dbg(ql_dbg_io, vha, 0x3035, + "Invalid handle count (0x%x).\n", handle_count); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + return; + } + + handle_ptr = &stsmfx->handles[0]; + + for (i = 0; i < handle_count; i++) { + hindex = le32_to_cpu(*handle_ptr); + handle = LSW(hindex); + que = MSW(hindex); + req = ha->req_q_map[que]; + + /* Validate handle. */ + if (handle < req->num_outstanding_cmds) + sp = req->outstanding_cmds[handle]; + else + sp = NULL; + + if (sp == NULL) { + ql_dbg(ql_dbg_io, vha, 0x3044, + "Invalid status handle (0x%x).\n", handle); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + return; + } + qla2x00_process_completed_request(vha, req, handle); + handle_ptr++; + } +} + +/** + * qlafx00_error_entry() - Process an error entry. + * @vha: SCSI driver HA context + * @rsp: response queue + * @pkt: Entry pointer + */ +static void +qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, + struct sts_entry_fx00 *pkt) +{ + srb_t *sp; + struct qla_hw_data *ha = vha->hw; + const char func[] = "ERROR-IOCB"; + uint16_t que = 0; + struct req_que *req = NULL; + int res = DID_ERROR << 16; + + req = ha->req_q_map[que]; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (sp) { + sp->done(sp, res); + return; + } + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); +} + +/** + * qlafx00_process_response_queue() - Process response queue entries. + * @vha: SCSI driver HA context + * @rsp: response queue + */ +static void +qlafx00_process_response_queue(struct scsi_qla_host *vha, + struct rsp_que *rsp) +{ + struct sts_entry_fx00 *pkt; + response_t *lptr; + uint16_t lreq_q_in = 0; + uint16_t lreq_q_out = 0; + + lreq_q_in = rd_reg_dword(rsp->rsp_q_in); + lreq_q_out = rsp->ring_index; + + while (lreq_q_in != lreq_q_out) { + lptr = rsp->ring_ptr; + memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, + sizeof(rsp->rsp_pkt)); + pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; + + rsp->ring_index++; + lreq_q_out++; + if (rsp->ring_index == rsp->length) { + lreq_q_out = 0; + rsp->ring_index = 0; + rsp->ring_ptr = rsp->ring; + } else { + rsp->ring_ptr++; + } + + if (pkt->entry_status != 0 && + pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { + ql_dbg(ql_dbg_async, vha, 0x507f, + "type of error status in response: 0x%x\n", + pkt->entry_status); + qlafx00_error_entry(vha, rsp, + (struct sts_entry_fx00 *)pkt); + continue; + } + + switch (pkt->entry_type) { + case STATUS_TYPE_FX00: + qlafx00_status_entry(vha, rsp, pkt); + break; + + case STATUS_CONT_TYPE_FX00: + qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); + break; + + case MULTI_STATUS_TYPE_FX00: + qlafx00_multistatus_entry(vha, rsp, pkt); + break; + + case ABORT_IOCB_TYPE_FX00: + qlafx00_abort_iocb_entry(vha, rsp->req, + (struct abort_iocb_entry_fx00 *)pkt); + break; + + case IOCTL_IOSB_TYPE_FX00: + qlafx00_ioctl_iosb_entry(vha, rsp->req, + (struct ioctl_iocb_entry_fx00 *)pkt); + break; + default: + /* Type Not Supported. */ + ql_dbg(ql_dbg_async, vha, 0x5081, + "Received unknown response pkt type %x " + "entry status=%x.\n", + pkt->entry_type, pkt->entry_status); + break; + } + } + + /* Adjust ring index */ + wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); +} + +/** + * qlafx00_async_event() - Process aynchronous events. + * @vha: SCSI driver HA context + */ +static void +qlafx00_async_event(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg; + int data_size = 1; + + reg = &ha->iobase->ispfx00; + /* Setup to process RIO completion. */ + switch (ha->aenmb[0]) { + case QLAFX00_MBA_SYSTEM_ERR: /* System Error */ + ql_log(ql_log_warn, vha, 0x5079, + "ISP System Error - mbx1=%x\n", ha->aenmb[0]); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */ + ql_dbg(ql_dbg_async, vha, 0x5076, + "Asynchronous FW shutdown requested.\n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + break; + + case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ + ha->aenmb[1] = rd_reg_dword(®->aenmailbox1); + ha->aenmb[2] = rd_reg_dword(®->aenmailbox2); + ha->aenmb[3] = rd_reg_dword(®->aenmailbox3); + ql_dbg(ql_dbg_async, vha, 0x5077, + "Asynchronous port Update received " + "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", + ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); + data_size = 4; + break; + + case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */ + ql_log(ql_log_info, vha, 0x5085, + "Asynchronous over temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */ + ql_log(ql_log_info, vha, 0x5086, + "Asynchronous normal temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ + ql_log(ql_log_info, vha, 0x5083, + "Asynchronous critical temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + default: + ha->aenmb[1] = rd_reg_dword(®->aenmailbox1); + ha->aenmb[2] = rd_reg_dword(®->aenmailbox2); + ha->aenmb[3] = rd_reg_dword(®->aenmailbox3); + ha->aenmb[4] = rd_reg_dword(®->aenmailbox4); + ha->aenmb[5] = rd_reg_dword(®->aenmailbox5); + ha->aenmb[6] = rd_reg_dword(®->aenmailbox6); + ha->aenmb[7] = rd_reg_dword(®->aenmailbox7); + ql_dbg(ql_dbg_async, vha, 0x5078, + "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", + ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], + ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); + break; + } + qlafx00_post_aenfx_work(vha, ha->aenmb[0], + (uint32_t *)ha->aenmb, data_size); +} + +/** + * qlafx00_mbx_completion() - Process mailbox command completions. + * @vha: SCSI driver HA context + * @mb0: value to be written into mailbox register 0 + */ +static void +qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) +{ + uint16_t cnt; + __le32 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + + if (!ha->mcp32) + ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n"); + + /* Load return mailbox registers. */ + ha->flags.mbox_int = 1; + ha->mailbox_out32[0] = mb0; + wptr = ®->mailbox17; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + ha->mailbox_out32[cnt] = rd_reg_dword(wptr); + wptr++; + } +} + +/** + * qlafx00_intr_handler() - Process interrupts for the ISPFX00. + * @irq: interrupt number + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qlafx00_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct device_reg_fx00 __iomem *reg; + int status; + unsigned long iter; + uint32_t stat; + uint32_t mb[8]; + struct rsp_que *rsp; + unsigned long flags; + uint32_t clr_intr = 0; + uint32_t intr_stat = 0; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0x507d, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + + ha = rsp->hw; + reg = &ha->iobase->ispfx00; + status = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; clr_intr = 0) { + stat = QLAFX00_RD_INTR_REG(ha); + if (qla2x00_check_reg32_for_disconnect(vha, stat)) + break; + intr_stat = stat & QLAFX00_HST_INT_STS_BITS; + if (!intr_stat) + break; + + if (stat & QLAFX00_INTR_MB_CMPLT) { + mb[0] = rd_reg_dword(®->mailbox16); + qlafx00_mbx_completion(vha, mb[0]); + status |= MBX_INTERRUPT; + clr_intr |= QLAFX00_INTR_MB_CMPLT; + } + if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { + ha->aenmb[0] = rd_reg_dword(®->aenmailbox0); + qlafx00_async_event(vha); + clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; + } + if (intr_stat & QLAFX00_INTR_RSP_CMPLT) { + qlafx00_process_response_queue(vha, rsp); + clr_intr |= QLAFX00_INTR_RSP_CMPLT; + } + + QLAFX00_CLR_INTR_REG(ha, clr_intr); + QLAFX00_RD_INTR_REG(ha); + } + + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +/** QLAFX00 specific IOCB implementation functions */ + +static inline cont_a64_entry_t * +qlafx00_prep_cont_type1_iocb(struct req_que *req, + cont_a64_entry_t *lcont_pkt) +{ + cont_a64_entry_t *cont_pkt; + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + cont_pkt = (cont_a64_entry_t *)req->ring_ptr; + + /* Load packet defaults. */ + lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00; + + return cont_pkt; +} + +static inline void +qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, + uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) +{ + uint16_t avail_dsds; + struct dsd64 *cur_dsd; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i, cont; + struct req_que *req; + cont_a64_entry_t lcont_pkt; + cont_a64_entry_t *cont_pkt; + + vha = sp->vha; + req = vha->req; + + cmd = GET_CMD_SP(sp); + cont = 0; + cont_pkt = NULL; + + /* Update entry type to indicate Command Type 3 IOCB */ + lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7; + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + lcmd_pkt->byte_count = cpu_to_le32(0); + return; + } + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; + vha->qla_stats.output_bytes += scsi_bufflen(cmd); + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + lcmd_pkt->cntrl_flags = TMF_READ_DATA; + vha->qla_stats.input_bytes += scsi_bufflen(cmd); + } + + /* One DSD is available in the Command Type 3 IOCB */ + avail_dsds = 1; + cur_dsd = &lcmd_pkt->dsd; + + /* Load data segments */ + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); + cont_pkt = + qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); + cur_dsd = lcont_pkt.dsd; + avail_dsds = 5; + cont = 1; + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + if (avail_dsds == 0 && cont == 1) { + cont = 0; + memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, + sizeof(lcont_pkt)); + } + + } + if (avail_dsds != 0 && cont == 1) { + memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, + sizeof(lcont_pkt)); + } +} + +/** + * qlafx00_start_scsi() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qlafx00_start_scsi(srb_t *sp) +{ + int nseg; + unsigned long flags; + uint32_t handle; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_7_fx00 *cmd_pkt; + struct cmd_type_7_fx00 lcmd_pkt; + struct scsi_lun llun; + + /* Setup device pointers. */ + rsp = ha->rsp_q_map[0]; + req = vha->req; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else + nseg = 0; + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = rd_reg_dword_relaxed(req->req_q_out); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; + + memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); + + lcmd_pkt.handle = make_handle(req->id, sp->handle); + lcmd_pkt.reserved_0 = 0; + lcmd_pkt.port_path_ctrl = 0; + lcmd_pkt.reserved_1 = 0; + lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); + lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); + + int_to_scsilun(cmd->device->lun, &llun); + host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, + sizeof(lcmd_pkt.lun)); + + /* Load SCSI command packet. */ + host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); + lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + + /* Build IOCB segments */ + qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt); + + /* Set total data segment count. */ + lcmd_pkt.entry_count = (uint8_t)req_cnt; + + /* Specify response queue number where completion should happen */ + lcmd_pkt.entry_status = (uint8_t) rsp->id; + + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, + cmd->cmnd, cmd->cmd_len); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, + &lcmd_pkt, sizeof(lcmd_pkt)); + + memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_FUNCTION_FAILED; +} + +void +qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) +{ + struct srb_iocb *fxio = &sp->u.iocb_cmd; + scsi_qla_host_t *vha = sp->vha; + struct req_que *req = vha->req; + struct tsk_mgmt_entry_fx00 tm_iocb; + struct scsi_lun llun; + + memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); + tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; + tm_iocb.entry_count = 1; + tm_iocb.handle = make_handle(req->id, sp->handle); + tm_iocb.reserved_0 = 0; + tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); + tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); + if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { + int_to_scsilun(fxio->u.tmf.lun, &llun); + host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun, + sizeof(struct scsi_lun)); + } + + memcpy(ptm_iocb, &tm_iocb, + sizeof(struct tsk_mgmt_entry_fx00)); + wmb(); +} + +void +qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) +{ + struct srb_iocb *fxio = &sp->u.iocb_cmd; + scsi_qla_host_t *vha = sp->vha; + struct req_que *req = vha->req; + struct abort_iocb_entry_fx00 abt_iocb; + + memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); + abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; + abt_iocb.entry_count = 1; + abt_iocb.handle = make_handle(req->id, sp->handle); + abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl); + abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); + abt_iocb.req_que_no = cpu_to_le16(req->id); + + memcpy(pabt_iocb, &abt_iocb, + sizeof(struct abort_iocb_entry_fx00)); + wmb(); +} + +void +qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) +{ + struct srb_iocb *fxio = &sp->u.iocb_cmd; + struct qla_mt_iocb_rqst_fx00 *piocb_rqst; + struct bsg_job *bsg_job; + struct fc_bsg_request *bsg_request; + struct fxdisc_entry_fx00 fx_iocb; + uint8_t entry_cnt = 1; + + memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); + fx_iocb.entry_type = FX00_IOCB_TYPE; + fx_iocb.handle = sp->handle; + fx_iocb.entry_count = entry_cnt; + + if (sp->type == SRB_FXIOCB_DCMD) { + fx_iocb.func_num = + sp->u.iocb_cmd.u.fxiocb.req_func_type; + fx_iocb.adapid = fxio->u.fxiocb.adapter_id; + fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; + fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; + fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; + fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; + + if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { + fx_iocb.req_dsdcnt = cpu_to_le16(1); + fx_iocb.req_xfrcnt = + cpu_to_le16(fxio->u.fxiocb.req_len); + put_unaligned_le64(fxio->u.fxiocb.req_dma_handle, + &fx_iocb.dseg_rq[0].address); + fx_iocb.dseg_rq[0].length = + cpu_to_le32(fxio->u.fxiocb.req_len); + } + + if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { + fx_iocb.rsp_dsdcnt = cpu_to_le16(1); + fx_iocb.rsp_xfrcnt = + cpu_to_le16(fxio->u.fxiocb.rsp_len); + put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle, + &fx_iocb.dseg_rsp[0].address); + fx_iocb.dseg_rsp[0].length = + cpu_to_le32(fxio->u.fxiocb.rsp_len); + } + + if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) { + fx_iocb.dataword = fxio->u.fxiocb.req_data; + } + fx_iocb.flags = fxio->u.fxiocb.flags; + } else { + struct scatterlist *sg; + + bsg_job = sp->u.bsg_job; + bsg_request = bsg_job->request; + piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) + &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + + fx_iocb.func_num = piocb_rqst->func_type; + fx_iocb.adapid = piocb_rqst->adapid; + fx_iocb.adapid_hi = piocb_rqst->adapid_hi; + fx_iocb.reserved_0 = piocb_rqst->reserved_0; + fx_iocb.reserved_1 = piocb_rqst->reserved_1; + fx_iocb.dataword_extra = piocb_rqst->dataword_extra; + fx_iocb.dataword = piocb_rqst->dataword; + fx_iocb.req_xfrcnt = piocb_rqst->req_len; + fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; + + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { + int avail_dsds, tot_dsds; + cont_a64_entry_t lcont_pkt; + cont_a64_entry_t *cont_pkt = NULL; + struct dsd64 *cur_dsd; + int index = 0, cont = 0; + + fx_iocb.req_dsdcnt = + cpu_to_le16(bsg_job->request_payload.sg_cnt); + tot_dsds = + bsg_job->request_payload.sg_cnt; + cur_dsd = &fx_iocb.dseg_rq[0]; + avail_dsds = 1; + for_each_sg(bsg_job->request_payload.sg_list, sg, + tot_dsds, index) { + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + memset(&lcont_pkt, 0, + REQUEST_ENTRY_SIZE); + cont_pkt = + qlafx00_prep_cont_type1_iocb( + sp->vha->req, &lcont_pkt); + cur_dsd = lcont_pkt.dsd; + avail_dsds = 5; + cont = 1; + entry_cnt++; + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + + if (avail_dsds == 0 && cont == 1) { + cont = 0; + memcpy_toio( + (void __iomem *)cont_pkt, + &lcont_pkt, REQUEST_ENTRY_SIZE); + ql_dump_buffer( + ql_dbg_user + ql_dbg_verbose, + sp->vha, 0x3042, + (uint8_t *)&lcont_pkt, + REQUEST_ENTRY_SIZE); + } + } + if (avail_dsds != 0 && cont == 1) { + memcpy_toio((void __iomem *)cont_pkt, + &lcont_pkt, REQUEST_ENTRY_SIZE); + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->vha, 0x3043, + (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); + } + } + + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { + int avail_dsds, tot_dsds; + cont_a64_entry_t lcont_pkt; + cont_a64_entry_t *cont_pkt = NULL; + struct dsd64 *cur_dsd; + int index = 0, cont = 0; + + fx_iocb.rsp_dsdcnt = + cpu_to_le16(bsg_job->reply_payload.sg_cnt); + tot_dsds = bsg_job->reply_payload.sg_cnt; + cur_dsd = &fx_iocb.dseg_rsp[0]; + avail_dsds = 1; + + for_each_sg(bsg_job->reply_payload.sg_list, sg, + tot_dsds, index) { + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + memset(&lcont_pkt, 0, + REQUEST_ENTRY_SIZE); + cont_pkt = + qlafx00_prep_cont_type1_iocb( + sp->vha->req, &lcont_pkt); + cur_dsd = lcont_pkt.dsd; + avail_dsds = 5; + cont = 1; + entry_cnt++; + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + + if (avail_dsds == 0 && cont == 1) { + cont = 0; + memcpy_toio((void __iomem *)cont_pkt, + &lcont_pkt, + REQUEST_ENTRY_SIZE); + ql_dump_buffer( + ql_dbg_user + ql_dbg_verbose, + sp->vha, 0x3045, + (uint8_t *)&lcont_pkt, + REQUEST_ENTRY_SIZE); + } + } + if (avail_dsds != 0 && cont == 1) { + memcpy_toio((void __iomem *)cont_pkt, + &lcont_pkt, REQUEST_ENTRY_SIZE); + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->vha, 0x3046, + (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); + } + } + + if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID) + fx_iocb.dataword = piocb_rqst->dataword; + fx_iocb.flags = piocb_rqst->flags; + fx_iocb.entry_count = entry_cnt; + } + + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb)); + + memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb)); + wmb(); +} diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h new file mode 100644 index 000000000..4f63aff33 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_mr.h @@ -0,0 +1,529 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#ifndef __QLA_MR_H +#define __QLA_MR_H + +#include "qla_dsd.h" + +/* + * The PCI VendorID and DeviceID for our board. + */ +#define PCI_DEVICE_ID_QLOGIC_ISPF001 0xF001 + +/* FX00 specific definitions */ + +#define FX00_COMMAND_TYPE_7 0x07 /* Command Type 7 entry for 7XXX */ +struct cmd_type_7_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint8_t reserved_0; + uint8_t port_path_ctrl; + uint16_t reserved_1; + + __le16 tgt_idx; /* Target Idx. */ + uint16_t timeout; /* Command timeout. */ + + __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_rsp_dsd_len; + uint8_t reserved_2; + + struct scsi_lun lun; /* LUN (LE). */ + + uint8_t cntrl_flags; + + uint8_t task_mgmt_flags; /* Task management flags. */ + + uint8_t task; + + uint8_t crn; + + uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */ + __le32 byte_count; /* Total byte count. */ + + struct dsd64 dsd; +}; + +#define STATUS_TYPE_FX00 0x01 /* Status entry. */ +struct sts_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint32_t reserved_3; /* System handle. */ + + __le16 comp_status; /* Completion status. */ + uint16_t reserved_0; /* OX_ID used by the firmware. */ + + __le32 residual_len; /* FW calc residual transfer length. */ + + uint16_t reserved_1; + uint16_t state_flags; /* State flags. */ + + uint16_t reserved_2; + __le16 scsi_status; /* SCSI status. */ + + uint32_t sense_len; /* FCP SENSE length. */ + uint8_t data[32]; /* FCP response/sense information. */ +}; + + +#define MAX_HANDLE_COUNT 15 +#define MULTI_STATUS_TYPE_FX00 0x0D + +struct multi_sts_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; + uint8_t entry_status; + + __le32 handles[MAX_HANDLE_COUNT]; +}; + +#define TSK_MGMT_IOCB_TYPE_FX00 0x05 +struct tsk_mgmt_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + + uint32_t reserved_0; + + __le16 tgt_id; /* Target Idx. */ + + uint16_t reserved_1; + uint16_t reserved_3; + uint16_t reserved_4; + + struct scsi_lun lun; /* LUN (LE). */ + + __le32 control_flags; /* Control Flags. */ + + uint8_t reserved_2[32]; +}; + + +#define ABORT_IOCB_TYPE_FX00 0x08 /* Abort IOCB status. */ +struct abort_iocb_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + __le32 reserved_0; + + __le16 tgt_id_sts; /* Completion status. */ + __le16 options; + + uint32_t abort_handle; /* System handle. */ + __le32 reserved_2; + + __le16 req_que_no; + uint8_t reserved_1[38]; +}; + +#define IOCTL_IOSB_TYPE_FX00 0x0C +struct ioctl_iocb_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint32_t reserved_0; /* System handle. */ + + uint16_t comp_func_num; + __le16 fw_iotcl_flags; + + __le32 dataword_r; /* Data word returned */ + uint32_t adapid; /* Adapter ID */ + uint32_t dataword_r_extra; + + __le32 seq_no; + uint8_t reserved_2[20]; + uint32_t residuallen; + __le32 status; +}; + +#define STATUS_CONT_TYPE_FX00 0x04 + +#define FX00_IOCB_TYPE 0x0B +struct fxdisc_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + __le32 reserved_0; /* System handle. */ + + __le16 func_num; + __le16 req_xfrcnt; + __le16 req_dsdcnt; + __le16 rsp_xfrcnt; + __le16 rsp_dsdcnt; + uint8_t flags; + uint8_t reserved_1; + + /* + * Use array size 1 below to prevent that Coverity complains about + * the append_dsd64() calls for the two arrays below. + */ + struct dsd64 dseg_rq[1]; + struct dsd64 dseg_rsp[1]; + + __le32 dataword; + __le32 adapid; + __le32 adapid_hi; + __le32 dataword_extra; +}; + +struct qlafx00_tgt_node_info { + uint8_t tgt_node_wwpn[WWN_SIZE]; + uint8_t tgt_node_wwnn[WWN_SIZE]; + uint32_t tgt_node_state; + uint8_t reserved[128]; + uint32_t reserved_1[8]; + uint64_t reserved_2[4]; +} __packed; + +#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info) + +#define QLAFX00_LINK_STATUS_DOWN 0x10 +#define QLAFX00_LINK_STATUS_UP 0x11 + +#define QLAFX00_PORT_SPEED_2G 0x2 +#define QLAFX00_PORT_SPEED_4G 0x4 +#define QLAFX00_PORT_SPEED_8G 0x8 +#define QLAFX00_PORT_SPEED_10G 0xa +struct port_info_data { + uint8_t port_state; + uint8_t port_type; + uint16_t port_identifier; + uint32_t up_port_state; + uint8_t fw_ver_num[32]; + uint8_t portal_attrib; + uint16_t host_option; + uint8_t reset_delay; + uint8_t pdwn_retry_cnt; + uint16_t max_luns2tgt; + uint8_t risc_ver; + uint8_t pconn_option; + uint16_t risc_option; + uint16_t max_frame_len; + uint16_t max_iocb_alloc; + uint16_t exec_throttle; + uint8_t retry_cnt; + uint8_t retry_delay; + uint8_t port_name[8]; + uint8_t port_id[3]; + uint8_t link_status; + uint8_t plink_rate; + uint32_t link_config; + uint16_t adap_haddr; + uint8_t tgt_disc; + uint8_t log_tout; + uint8_t node_name[8]; + uint16_t erisc_opt1; + uint8_t resp_acc_tmr; + uint8_t intr_del_tmr; + uint8_t erisc_opt2; + uint8_t alt_port_name[8]; + uint8_t alt_node_name[8]; + uint8_t link_down_tout; + uint8_t conn_type; + uint8_t fc_fw_mode; + uint32_t uiReserved[48]; +} __packed; + +/* OS Type Designations */ +#define OS_TYPE_UNKNOWN 0 +#define OS_TYPE_LINUX 2 + +/* Linux Info */ +#define SYSNAME_LENGTH 128 +#define NODENAME_LENGTH 64 +#define RELEASE_LENGTH 64 +#define VERSION_LENGTH 64 +#define MACHINE_LENGTH 64 +#define DOMNAME_LENGTH 64 + +struct host_system_info { + uint32_t os_type; + char sysname[SYSNAME_LENGTH]; + char nodename[NODENAME_LENGTH]; + char release[RELEASE_LENGTH]; + char version[VERSION_LENGTH]; + char machine[MACHINE_LENGTH]; + char domainname[DOMNAME_LENGTH]; + char hostdriver[VERSION_LENGTH]; + uint32_t reserved[64]; +} __packed; + +struct register_host_info { + struct host_system_info hsi; /* host system info */ + uint64_t utc; /* UTC (system time) */ + uint32_t reserved[64]; /* future additions */ +} __packed; + + +#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data)) +#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32) + +struct config_info_data { + uint8_t model_num[16]; + uint8_t model_description[80]; + uint8_t reserved0[160]; + uint8_t symbolic_name[64]; + uint8_t serial_num[32]; + uint8_t hw_version[16]; + uint8_t fw_version[16]; + uint8_t uboot_version[16]; + uint8_t fru_serial_num[32]; + + uint8_t fc_port_count; + uint8_t iscsi_port_count; + uint8_t reserved1[2]; + + uint8_t mode; + uint8_t log_level; + uint8_t reserved2[2]; + + uint32_t log_size; + + uint8_t tgt_pres_mode; + uint8_t iqn_flags; + uint8_t lun_mapping; + + uint64_t adapter_id; + + uint32_t cluster_key_len; + uint8_t cluster_key[16]; + + uint64_t cluster_master_id; + uint64_t cluster_slave_id; + uint8_t cluster_flags; + uint32_t enabled_capabilities; + uint32_t nominal_temp_value; +} __packed; + +#define FXDISC_GET_CONFIG_INFO 0x01 +#define FXDISC_GET_PORT_INFO 0x02 +#define FXDISC_GET_TGT_NODE_INFO 0x80 +#define FXDISC_GET_TGT_NODE_LIST 0x81 +#define FXDISC_REG_HOST_INFO 0x99 +#define FXDISC_ABORT_IOCTL 0xff + +#define QLAFX00_HBA_ICNTRL_REG 0x20B08 +#define QLAFX00_ICR_ENB_MASK 0x80000000 +#define QLAFX00_ICR_DIS_MASK 0x7fffffff +#define QLAFX00_HST_RST_REG 0x18264 +#define QLAFX00_SOC_TEMP_REG 0x184C4 +#define QLAFX00_HST_TO_HBA_REG 0x20A04 +#define QLAFX00_HBA_TO_HOST_REG 0x21B70 +#define QLAFX00_HST_INT_STS_BITS 0x7 +#define QLAFX00_BAR1_BASE_ADDR_REG 0x40018 +#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG 0x41824 + +#define QLAFX00_INTR_MB_CMPLT 0x1 +#define QLAFX00_INTR_RSP_CMPLT 0x2 +#define QLAFX00_INTR_ASYNC_CMPLT 0x4 + +#define QLAFX00_MBA_SYSTEM_ERR 0x8002 +#define QLAFX00_MBA_TEMP_OVER 0x8005 +#define QLAFX00_MBA_TEMP_NORM 0x8006 +#define QLAFX00_MBA_TEMP_CRIT 0x8007 +#define QLAFX00_MBA_LINK_UP 0x8011 +#define QLAFX00_MBA_LINK_DOWN 0x8012 +#define QLAFX00_MBA_PORT_UPDATE 0x8014 +#define QLAFX00_MBA_SHUTDOWN_RQSTD 0x8062 + +#define SOC_SW_RST_CONTROL_REG_CORE0 0x0020800 +#define SOC_FABRIC_RST_CONTROL_REG 0x0020840 +#define SOC_FABRIC_CONTROL_REG 0x0020200 +#define SOC_FABRIC_CONFIG_REG 0x0020204 +#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C + +#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00 +#define SOC_CORE_TIMER_REG 0x0021850 +#define SOC_IRQ_ACK_REG 0x00218b4 + +#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */ + +#define QLAFX00_SET_HST_INTR(ha, value) \ + wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \ + value) + +#define QLAFX00_CLR_HST_INTR(ha, value) \ + wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + ~value) + +#define QLAFX00_RD_INTR_REG(ha) \ + rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG) + +#define QLAFX00_CLR_INTR_REG(ha, value) \ + wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + ~value) + +#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\ + wrt_reg_dword((ha)->cregbase + off, val) + +#define QLAFX00_GET_HBA_SOC_REG(ha, off)\ + rd_reg_dword((ha)->cregbase + off) + +#define QLAFX00_HBA_RST_REG(ha, val)\ + wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val) + +#define QLAFX00_RD_ICNTRL_REG(ha) \ + rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG) + +#define QLAFX00_ENABLE_ICNTRL_REG(ha) \ + wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \ + QLAFX00_ICR_ENB_MASK)) + +#define QLAFX00_DISABLE_ICNTRL_REG(ha) \ + wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \ + QLAFX00_ICR_DIS_MASK)) + +#define QLAFX00_RD_REG(ha, off) \ + rd_reg_dword((ha)->cregbase + off) + +#define QLAFX00_WR_REG(ha, off, val) \ + wrt_reg_dword((ha)->cregbase + off, val) + +struct qla_mt_iocb_rqst_fx00 { + __le32 reserved_0; + + __le16 func_type; + uint8_t flags; + uint8_t reserved_1; + + __le32 dataword; + + __le32 adapid; + __le32 adapid_hi; + + __le32 dataword_extra; + + __le16 req_len; + __le16 reserved_2; + + __le16 rsp_len; + __le16 reserved_3; +}; + +struct qla_mt_iocb_rsp_fx00 { + uint32_t reserved_1; + + uint16_t func_type; + __le16 ioctl_flags; + + __le32 ioctl_data; + + uint32_t adapid; + uint32_t adapid_hi; + + uint32_t reserved_2; + __le32 seq_number; + + uint8_t reserved_3[20]; + + int32_t res_count; + + __le32 status; +}; + + +#define MAILBOX_REGISTER_COUNT_FX00 16 +#define AEN_MAILBOX_REGISTER_COUNT_FX00 8 +#define MAX_FIBRE_DEVICES_FX00 512 +#define MAX_LUNS_FX00 0x1024 +#define MAX_TARGETS_FX00 MAX_ISA_DEVICES +#define REQUEST_ENTRY_CNT_FX00 512 /* Number of request entries. */ +#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ + +/* + * Firmware state codes for QLAFX00 adapters + */ +#define FSTATE_FX00_CONFIG_WAIT 0x0000 /* Waiting for driver to issue + * Initialize FW Mbox cmd + */ +#define FSTATE_FX00_INITIALIZED 0x1000 /* FW has been initialized by + * the driver + */ + +#define FX00_DEF_RATOV 10 + +struct mr_data_fx00 { + uint8_t symbolic_name[64]; + uint8_t serial_num[32]; + uint8_t hw_version[16]; + uint8_t fw_version[16]; + uint8_t uboot_version[16]; + uint8_t fru_serial_num[32]; + fc_port_t fcport; /* fcport used for requests + * that are not linked + * to a particular target + */ + uint8_t fw_hbt_en; + uint8_t fw_hbt_cnt; + uint8_t fw_hbt_miss_cnt; + uint32_t old_fw_hbt_cnt; + uint16_t fw_reset_timer_tick; + uint8_t fw_reset_timer_exp; + uint16_t fw_critemp_timer_tick; + uint32_t old_aenmbx0_state; + uint32_t critical_temperature; + bool extended_io_enabled; + bool host_info_resend; + uint8_t hinfo_resend_timer_tick; +}; + +#define QLAFX00_EXTENDED_IO_EN_MASK 0x20 + +/* + * SoC Junction Temperature is stored in + * bits 9:1 of SoC Junction Temperature Register + * in a firmware specific format format. + * To get the temperature in Celsius degrees + * the value from this bitfiled should be converted + * using this formula: + * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825) + * where X is the bit field value + * this macro reads the register, extracts the bitfield value, + * performs the calcualtions and returns temperature in Celsius + */ +#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \ + ((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825) + + +#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */ +#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */ +#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */ +#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */ +#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */ +#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */ +#define QLAFX00_HINFO_RESEND_INTERVAL 60 /* number of seconds */ + +#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */ + +/* Max conncurrent IOs that can be queued */ +#define QLAFX00_MAX_CANQUEUE 1024 + +/* IOCTL IOCB abort success */ +#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS 0x68 + +#endif diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c new file mode 100644 index 000000000..a8ddf356e --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -0,0 +1,1326 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2017 QLogic Corporation + */ +#include "qla_nvme.h" +#include +#include +#include +#include +#include +#include + +static struct nvme_fc_port_template qla_nvme_fc_transport; +static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, + struct qla_qpair *qp, + struct qla_nvme_lsrjt_pt_arg *a, + bool is_xchg_terminate); + +struct qla_nvme_unsol_ctx { + struct list_head elem; + struct scsi_qla_host *vha; + struct fc_port *fcport; + struct srb *sp; + struct nvmefc_ls_rsp lsrsp; + struct nvmefc_ls_rsp *fd_rsp; + struct work_struct lsrsp_work; + struct work_struct abort_work; + __le32 exchange_address; + __le16 nport_handle; + __le16 ox_id; + int comp_status; + spinlock_t cmd_lock; +}; + +int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + struct qla_nvme_rport *rport; + struct nvme_fc_port_info req; + int ret; + + if (!IS_ENABLED(CONFIG_NVME_FC)) + return 0; + + if (!vha->flags.nvme_enabled) { + ql_log(ql_log_info, vha, 0x2100, + "%s: Not registering target since Host NVME is not enabled\n", + __func__); + return 0; + } + + if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) + return 0; + + if (!(fcport->nvme_prli_service_param & + (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || + (fcport->nvme_flag & NVME_FLAG_REGISTERED)) + return 0; + + fcport->nvme_flag &= ~NVME_FLAG_RESETTING; + + memset(&req, 0, sizeof(struct nvme_fc_port_info)); + req.port_name = wwn_to_u64(fcport->port_name); + req.node_name = wwn_to_u64(fcport->node_name); + req.port_role = 0; + req.dev_loss_tmo = fcport->dev_loss_tmo; + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) + req.port_role = FC_PORT_ROLE_NVME_INITIATOR; + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) + req.port_role |= FC_PORT_ROLE_NVME_TARGET; + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) + req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; + + req.port_id = fcport->d_id.b24; + + ql_log(ql_log_info, vha, 0x2102, + "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n", + __func__, req.node_name, req.port_name, + req.port_id); + + ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req, + &fcport->nvme_remote_port); + if (ret) { + ql_log(ql_log_warn, vha, 0x212e, + "Failed to register remote port. Transport returned %d\n", + ret); + return ret; + } + + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, + fcport->dev_loss_tmo); + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) + ql_log(ql_log_info, vha, 0x212a, + "PortID:%06x Supports SLER\n", req.port_id); + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL) + ql_log(ql_log_info, vha, 0x212b, + "PortID:%06x Supports PI control\n", req.port_id); + + rport = fcport->nvme_remote_port->private; + rport->fcport = fcport; + + fcport->nvme_flag |= NVME_FLAG_REGISTERED; + return 0; +} + +/* Allocate a queue for NVMe traffic */ +static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, + unsigned int qidx, u16 qsize, void **handle) +{ + struct scsi_qla_host *vha; + struct qla_hw_data *ha; + struct qla_qpair *qpair; + + /* Map admin queue and 1st IO queue to index 0 */ + if (qidx) + qidx--; + + vha = (struct scsi_qla_host *)lport->private; + ha = vha->hw; + + ql_log(ql_log_info, vha, 0x2104, + "%s: handle %p, idx =%d, qsize %d\n", + __func__, handle, qidx, qsize); + + if (qidx > qla_nvme_fc_transport.max_hw_queues) { + ql_log(ql_log_warn, vha, 0x212f, + "%s: Illegal qidx=%d. Max=%d\n", + __func__, qidx, qla_nvme_fc_transport.max_hw_queues); + return -EINVAL; + } + + /* Use base qpair if max_qpairs is 0 */ + if (!ha->max_qpairs) { + qpair = ha->base_qpair; + } else { + if (ha->queue_pair_map[qidx]) { + *handle = ha->queue_pair_map[qidx]; + ql_log(ql_log_info, vha, 0x2121, + "Returning existing qpair of %p for idx=%x\n", + *handle, qidx); + return 0; + } + + qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); + if (!qpair) { + ql_log(ql_log_warn, vha, 0x2122, + "Failed to allocate qpair\n"); + return -EINVAL; + } + qla_adjust_iocb_limit(vha); + } + *handle = qpair; + + return 0; +} + +static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) +{ + struct srb *sp = container_of(kref, struct srb, cmd_kref); + struct nvme_private *priv = (struct nvme_private *)sp->priv; + struct nvmefc_fcp_req *fd; + struct srb_iocb *nvme; + unsigned long flags; + + if (!priv) + goto out; + + nvme = &sp->u.iocb_cmd; + fd = nvme->u.nvme.desc; + + spin_lock_irqsave(&priv->cmd_lock, flags); + priv->sp = NULL; + sp->priv = NULL; + if (priv->comp_status == QLA_SUCCESS) { + fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); + fd->status = NVME_SC_SUCCESS; + } else { + fd->rcv_rsplen = 0; + fd->transferred_length = 0; + fd->status = NVME_SC_INTERNAL; + } + spin_unlock_irqrestore(&priv->cmd_lock, flags); + + fd->done(fd); +out: + qla2xxx_rel_qpair_sp(sp->qpair, sp); +} + +static void qla_nvme_release_ls_cmd_kref(struct kref *kref) +{ + struct srb *sp = container_of(kref, struct srb, cmd_kref); + struct nvme_private *priv = (struct nvme_private *)sp->priv; + struct nvmefc_ls_req *fd; + unsigned long flags; + + if (!priv) + goto out; + + spin_lock_irqsave(&priv->cmd_lock, flags); + priv->sp = NULL; + sp->priv = NULL; + spin_unlock_irqrestore(&priv->cmd_lock, flags); + + fd = priv->fd; + + fd->done(fd, priv->comp_status); +out: + qla2x00_rel_sp(sp); +} + +static void qla_nvme_ls_complete(struct work_struct *work) +{ + struct nvme_private *priv = + container_of(work, struct nvme_private, ls_work); + + kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); +} + +static void qla_nvme_sp_ls_done(srb_t *sp, int res) +{ + struct nvme_private *priv = sp->priv; + + if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) + return; + + if (res) + res = -EINVAL; + + priv->comp_status = res; + INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); + schedule_work(&priv->ls_work); +} + +static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref) +{ + struct srb *sp = container_of(kref, struct srb, cmd_kref); + struct qla_nvme_unsol_ctx *uctx = sp->priv; + struct nvmefc_ls_rsp *fd_rsp; + unsigned long flags; + + if (!uctx) { + qla2x00_rel_sp(sp); + return; + } + + spin_lock_irqsave(&uctx->cmd_lock, flags); + uctx->sp = NULL; + sp->priv = NULL; + spin_unlock_irqrestore(&uctx->cmd_lock, flags); + + fd_rsp = uctx->fd_rsp; + + list_del(&uctx->elem); + + fd_rsp->done(fd_rsp); + kfree(uctx); + qla2x00_rel_sp(sp); +} + +static void qla_nvme_lsrsp_complete(struct work_struct *work) +{ + struct qla_nvme_unsol_ctx *uctx = + container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work); + + kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref); +} + +static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res) +{ + struct qla_nvme_unsol_ctx *uctx = sp->priv; + + if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) + return; + + if (res) + res = -EINVAL; + + uctx->comp_status = res; + INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete); + schedule_work(&uctx->lsrsp_work); +} + +/* it assumed that QPair lock is held. */ +static void qla_nvme_sp_done(srb_t *sp, int res) +{ + struct nvme_private *priv = sp->priv; + + priv->comp_status = res; + kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); + + return; +} + +static void qla_nvme_abort_work(struct work_struct *work) +{ + struct nvme_private *priv = + container_of(work, struct nvme_private, abort_work); + srb_t *sp = priv->sp; + fc_port_t *fcport = sp->fcport; + struct qla_hw_data *ha = fcport->vha->hw; + int rval, abts_done_called = 1; + bool io_wait_for_abort_done; + uint32_t handle; + + ql_dbg(ql_dbg_io, fcport->vha, 0xffff, + "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n", + __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted); + + if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED) + goto out; + + if (ha->flags.host_shutting_down) { + ql_log(ql_log_info, sp->fcport->vha, 0xffff, + "%s Calling done on sp: %p, type: 0x%x\n", + __func__, sp, sp->type); + sp->done(sp, 0); + goto out; + } + + /* + * sp may not be valid after abort_command if return code is either + * SUCCESS or ERR_FROM_FW codes, so cache the value here. + */ + io_wait_for_abort_done = ql2xabts_wait_nvme && + QLA_ABTS_WAIT_ENABLED(sp); + handle = sp->handle; + + rval = ha->isp_ops->abort_command(sp); + + ql_dbg(ql_dbg_io, fcport->vha, 0x212b, + "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", + __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", + sp, handle, fcport, rval); + + /* + * If async tmf is enabled, the abort callback is called only on + * return codes QLA_SUCCESS and QLA_ERR_FROM_FW. + */ + if (ql2xasynctmfenable && + rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW) + abts_done_called = 0; + + /* + * Returned before decreasing kref so that I/O requests + * are waited until ABTS complete. This kref is decreased + * at qla24xx_abort_sp_done function. + */ + if (abts_done_called && io_wait_for_abort_done) + return; +out: + /* kref_get was done before work was schedule. */ + kref_put(&sp->cmd_kref, sp->put_fn); +} + +static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, + struct nvmefc_ls_rsp *fd_resp) +{ + struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp, + struct qla_nvme_unsol_ctx, lsrsp); + struct qla_nvme_rport *qla_rport = rport->private; + fc_port_t *fcport = qla_rport->fcport; + struct scsi_qla_host *vha = uctx->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_nvme_lsrjt_pt_arg a; + struct srb_iocb *nvme; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + uint8_t cnt = 0; + + if (!fcport || fcport->deleted) + goto out; + + if (!ha->flags.fw_started) + goto out; + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto out; + + sp->type = SRB_NVME_LS; + sp->name = "nvme_ls"; + sp->done = qla_nvme_sp_lsrsp_done; + sp->put_fn = qla_nvme_release_lsrsp_cmd_kref; + sp->priv = (void *)uctx; + sp->unsol_rsp = 1; + uctx->sp = sp; + spin_lock_init(&uctx->cmd_lock); + nvme = &sp->u.iocb_cmd; + uctx->fd_rsp = fd_resp; + nvme->u.nvme.desc = fd_resp; + nvme->u.nvme.dir = 0; + nvme->u.nvme.dl = 0; + nvme->u.nvme.timeout_sec = 0; + nvme->u.nvme.cmd_dma = fd_resp->rspdma; + nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen); + nvme->u.nvme.rsp_len = 0; + nvme->u.nvme.rsp_dma = 0; + nvme->u.nvme.exchange_address = uctx->exchange_address; + nvme->u.nvme.nport_handle = uctx->nport_handle; + nvme->u.nvme.ox_id = uctx->ox_id; + dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, + fd_resp->rsplen, DMA_TO_DEVICE); + + ql_dbg(ql_dbg_unsol, vha, 0x2122, + "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n", + fcport->d_id.b24, fcport->port_name, uctx->exchange_address, + uctx->ox_id, uctx->nport_handle); +retry: + rval = qla2x00_start_sp(sp); + switch (rval) { + case QLA_SUCCESS: + break; + case EAGAIN: + msleep(PURLS_MSLEEP_INTERVAL); + cnt++; + if (cnt < PURLS_RETRY_COUNT) + goto retry; + + fallthrough; + default: + ql_dbg(ql_log_warn, vha, 0x2123, + "Failed to xmit Unsol ls response = %d\n", rval); + rval = -EIO; + qla2x00_rel_sp(sp); + goto out; + } + + return 0; +out: + memset((void *)&a, 0, sizeof(a)); + a.vp_idx = vha->vp_idx; + a.nport_handle = uctx->nport_handle; + a.xchg_address = uctx->exchange_address; + qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true); + kfree(uctx); + return rval; +} + +static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) +{ + struct nvme_private *priv = fd->private; + unsigned long flags; + + spin_lock_irqsave(&priv->cmd_lock, flags); + if (!priv->sp) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + + if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + spin_unlock_irqrestore(&priv->cmd_lock, flags); + + INIT_WORK(&priv->abort_work, qla_nvme_abort_work); + schedule_work(&priv->abort_work); +} + +static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) +{ + struct qla_nvme_rport *qla_rport = rport->private; + fc_port_t *fcport = qla_rport->fcport; + struct srb_iocb *nvme; + struct nvme_private *priv = fd->private; + struct scsi_qla_host *vha; + int rval = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha; + srb_t *sp; + + if (!fcport || fcport->deleted) + return rval; + + vha = fcport->vha; + ha = vha->hw; + + if (!ha->flags.fw_started) + return rval; + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + return rval; + + sp->type = SRB_NVME_LS; + sp->name = "nvme_ls"; + sp->done = qla_nvme_sp_ls_done; + sp->put_fn = qla_nvme_release_ls_cmd_kref; + sp->priv = priv; + priv->sp = sp; + kref_init(&sp->cmd_kref); + spin_lock_init(&priv->cmd_lock); + nvme = &sp->u.iocb_cmd; + priv->fd = fd; + nvme->u.nvme.desc = fd; + nvme->u.nvme.dir = 0; + nvme->u.nvme.dl = 0; + nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen); + nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen); + nvme->u.nvme.rsp_dma = fd->rspdma; + nvme->u.nvme.timeout_sec = fd->timeout; + nvme->u.nvme.cmd_dma = fd->rqstdma; + dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, + fd->rqstlen, DMA_TO_DEVICE); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x700e, + "qla2x00_start_sp failed = %d\n", rval); + sp->priv = NULL; + priv->sp = NULL; + qla2x00_rel_sp(sp); + return rval; + } + + return rval; +} + +static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, void *hw_queue_handle, + struct nvmefc_fcp_req *fd) +{ + struct nvme_private *priv = fd->private; + unsigned long flags; + + spin_lock_irqsave(&priv->cmd_lock, flags); + if (!priv->sp) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + spin_unlock_irqrestore(&priv->cmd_lock, flags); + + INIT_WORK(&priv->abort_work, qla_nvme_abort_work); + schedule_work(&priv->abort_work); +} + +static inline int qla2x00_start_nvme_mq(srb_t *sp) +{ + unsigned long flags; + uint32_t *clr_ptr; + uint32_t handle; + struct cmd_nvme *cmd_pkt; + uint16_t cnt, i; + uint16_t req_cnt; + uint16_t tot_dsds; + uint16_t avail_dsds; + struct dsd64 *cur_dsd; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair = sp->qpair; + struct srb_iocb *nvme = &sp->u.iocb_cmd; + struct scatterlist *sgl, *sg; + struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; + struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; + uint32_t rval = QLA_SUCCESS; + + /* Setup qpair pointers */ + req = qpair->req; + rsp = qpair->rsp; + tot_dsds = fd->sg_cnt; + + /* Acquire qpair specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + + handle = qla2xxx_get_next_handle(req); + if (handle == 0) { + rval = -EBUSY; + goto queuing_error; + } + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + if (qla_get_fw_resources(sp->qpair, &sp->iores)) { + rval = -EBUSY; + goto queuing_error; + } + + if (req->cnt < (req_cnt + 2)) { + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) { + rval = -EBUSY; + goto queuing_error; + } + } + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - (req->ring_index - cnt); + + if (req->cnt < (req_cnt + 2)){ + rval = -EBUSY; + goto queuing_error; + } + } + + if (unlikely(!fd->sqid)) { + if (cmd->sqe.common.opcode == nvme_admin_async_event) { + nvme->u.nvme.aen_op = 1; + atomic_inc(&ha->nvme_active_aen_cnt); + } + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_nvme *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* Zero out remaining portion of packet. */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + + cmd_pkt->entry_status = 0; + + /* Update entry type to indicate Command NVME IOCB */ + cmd_pkt->entry_type = COMMAND_NVME; + + /* No data transfer how do we check buffer len == 0?? */ + if (fd->io_dir == NVMEFC_FCP_READ) { + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + qpair->counters.input_bytes += fd->payload_length; + qpair->counters.input_requests++; + } else if (fd->io_dir == NVMEFC_FCP_WRITE) { + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + if ((vha->flags.nvme_first_burst) && + (sp->fcport->nvme_prli_service_param & + NVME_PRLI_SP_FIRST_BURST)) { + if ((fd->payload_length <= + sp->fcport->nvme_first_burst_size) || + (sp->fcport->nvme_first_burst_size == 0)) + cmd_pkt->control_flags |= + cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); + } + qpair->counters.output_bytes += fd->payload_length; + qpair->counters.output_requests++; + } else if (fd->io_dir == 0) { + cmd_pkt->control_flags = 0; + } + + if (sp->fcport->edif.enable && fd->io_dir != 0) + cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); + + /* Set BIT_13 of control flags for Async event */ + if (vha->flags.nvme2_enabled && + cmd->sqe.common.opcode == nvme_admin_async_event) { + cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT); + } + + /* Set NPORT-ID */ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + + /* NVME RSP IU */ + cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); + put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address); + + /* NVME CNMD IU */ + cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); + cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma); + + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); + + /* One DSD is available in the Command Type NVME IOCB */ + avail_dsds = 1; + cur_dsd = &cmd_pkt->nvme_dsd; + sgl = fd->first_sgl; + + /* Load data segments */ + for_each_sg(sgl, sg, tot_dsds, i) { + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + + /* Adjust ring index */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + cont_pkt = (cont_a64_entry_t *)req->ring_ptr; + put_unaligned_le32(CONTINUE_A64_TYPE, + &cont_pkt->entry_type); + + cur_dsd = cont_pkt->dsd; + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); + } + + append_dsd64(&cur_dsd, sg); + avail_dsds--; + } + + /* Set total entry count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + /* ignore nvme async cmd due to long timeout */ + if (!nvme->u.nvme.aen_op) + sp->qpair->cmd_cnt++; + + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + +queuing_error: + if (rval) + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return rval; +} + +/* Post a command */ +static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, + struct nvme_fc_remote_port *rport, void *hw_queue_handle, + struct nvmefc_fcp_req *fd) +{ + fc_port_t *fcport; + struct srb_iocb *nvme; + struct scsi_qla_host *vha; + struct qla_hw_data *ha; + int rval; + srb_t *sp; + struct qla_qpair *qpair = hw_queue_handle; + struct nvme_private *priv = fd->private; + struct qla_nvme_rport *qla_rport = rport->private; + + if (!priv) { + /* nvme association has been torn down */ + return -ENODEV; + } + + fcport = qla_rport->fcport; + + if (unlikely(!qpair || !fcport || fcport->deleted)) + return -EBUSY; + + if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) + return -ENODEV; + + vha = fcport->vha; + ha = vha->hw; + + if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) + return -EBUSY; + + /* + * If we know the dev is going away while the transport is still sending + * IO's return busy back to stall the IO Q. This happens when the + * link goes away and fw hasn't notified us yet, but IO's are being + * returned. If the dev comes back quickly we won't exhaust the IO + * retry count at the core. + */ + if (fcport->nvme_flag & NVME_FLAG_RESETTING) + return -EBUSY; + + qpair = qla_mapq_nvme_select_qpair(ha, qpair); + + /* Alloc SRB structure */ + sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); + if (!sp) + return -EBUSY; + + kref_init(&sp->cmd_kref); + spin_lock_init(&priv->cmd_lock); + sp->priv = priv; + priv->sp = sp; + sp->type = SRB_NVME_CMD; + sp->name = "nvme_cmd"; + sp->done = qla_nvme_sp_done; + sp->put_fn = qla_nvme_release_fcp_cmd_kref; + sp->qpair = qpair; + sp->vha = vha; + sp->cmd_sp = sp; + nvme = &sp->u.iocb_cmd; + nvme->u.nvme.desc = fd; + + rval = qla2x00_start_nvme_mq(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d, + "qla2x00_start_nvme_mq failed = %d\n", rval); + sp->priv = NULL; + priv->sp = NULL; + qla2xxx_rel_qpair_sp(sp->qpair, sp); + } + + return rval; +} + +static void qla_nvme_map_queues(struct nvme_fc_local_port *lport, + struct blk_mq_queue_map *map) +{ + struct scsi_qla_host *vha = lport->private; + + blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset); +} + +static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) +{ + struct scsi_qla_host *vha = lport->private; + + ql_log(ql_log_info, vha, 0x210f, + "localport delete of %p completed.\n", vha->nvme_local_port); + vha->nvme_local_port = NULL; + complete(&vha->nvme_del_done); +} + +static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) +{ + fc_port_t *fcport; + struct qla_nvme_rport *qla_rport = rport->private; + + fcport = qla_rport->fcport; + fcport->nvme_remote_port = NULL; + fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; + fcport->nvme_flag &= ~NVME_FLAG_DELETING; + ql_log(ql_log_info, fcport->vha, 0x2110, + "remoteport_delete of %p %8phN completed.\n", + fcport, fcport->port_name); + complete(&fcport->nvme_del_done); +} + +static struct nvme_fc_port_template qla_nvme_fc_transport = { + .localport_delete = qla_nvme_localport_delete, + .remoteport_delete = qla_nvme_remoteport_delete, + .create_queue = qla_nvme_alloc_queue, + .delete_queue = NULL, + .ls_req = qla_nvme_ls_req, + .ls_abort = qla_nvme_ls_abort, + .fcp_io = qla_nvme_post_cmd, + .fcp_abort = qla_nvme_fcp_abort, + .xmt_ls_rsp = qla_nvme_xmt_ls_rsp, + .map_queues = qla_nvme_map_queues, + .max_hw_queues = DEF_NVME_HW_QUEUES, + .max_sgl_segments = 1024, + .max_dif_sgl_segments = 64, + .dma_boundary = 0xFFFFFFFF, + .local_priv_sz = 8, + .remote_priv_sz = sizeof(struct qla_nvme_rport), + .lsrqst_priv_sz = sizeof(struct nvme_private), + .fcprqst_priv_sz = sizeof(struct nvme_private), +}; + +void qla_nvme_unregister_remote_port(struct fc_port *fcport) +{ + int ret; + + if (!IS_ENABLED(CONFIG_NVME_FC)) + return; + + ql_log(ql_log_warn, fcport->vha, 0x2112, + "%s: unregister remoteport on %p %8phN\n", + __func__, fcport, fcport->port_name); + + if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); + + init_completion(&fcport->nvme_del_done); + ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); + if (ret) + ql_log(ql_log_info, fcport->vha, 0x2114, + "%s: Failed to unregister nvme_remote_port (%d)\n", + __func__, ret); + wait_for_completion(&fcport->nvme_del_done); +} + +void qla_nvme_delete(struct scsi_qla_host *vha) +{ + int nv_ret; + + if (!IS_ENABLED(CONFIG_NVME_FC)) + return; + + if (vha->nvme_local_port) { + init_completion(&vha->nvme_del_done); + ql_log(ql_log_info, vha, 0x2116, + "unregister localport=%p\n", + vha->nvme_local_port); + nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); + if (nv_ret) + ql_log(ql_log_info, vha, 0x2115, + "Unregister of localport failed\n"); + else + wait_for_completion(&vha->nvme_del_done); + } +} + +int qla_nvme_register_hba(struct scsi_qla_host *vha) +{ + struct nvme_fc_port_template *tmpl; + struct qla_hw_data *ha; + struct nvme_fc_port_info pinfo; + int ret = -EINVAL; + + if (!IS_ENABLED(CONFIG_NVME_FC)) + return ret; + + ha = vha->hw; + tmpl = &qla_nvme_fc_transport; + + if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) { + ql_log(ql_log_warn, vha, 0xfffd, + "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n", + ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES); + ql2xnvme_queues = DEF_NVME_HW_QUEUES; + } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) { + ql_log(ql_log_warn, vha, 0xfffd, + "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n", + ql2xnvme_queues, (ha->max_qpairs - 1), + (ha->max_qpairs - 1)); + ql2xnvme_queues = ((ha->max_qpairs - 1)); + } + + qla_nvme_fc_transport.max_hw_queues = + min((uint8_t)(ql2xnvme_queues), + (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1)); + + ql_log(ql_log_info, vha, 0xfffb, + "Number of NVME queues used for this port: %d\n", + qla_nvme_fc_transport.max_hw_queues); + + pinfo.node_name = wwn_to_u64(vha->node_name); + pinfo.port_name = wwn_to_u64(vha->port_name); + pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; + pinfo.port_id = vha->d_id.b24; + + mutex_lock(&ha->vport_lock); + /* + * Check again for nvme_local_port to see if any other thread raced + * with this one and finished registration. + */ + if (!vha->nvme_local_port) { + ql_log(ql_log_info, vha, 0xffff, + "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", + pinfo.node_name, pinfo.port_name, pinfo.port_id); + qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; + + ret = nvme_fc_register_localport(&pinfo, tmpl, + get_device(&ha->pdev->dev), + &vha->nvme_local_port); + mutex_unlock(&ha->vport_lock); + } else { + mutex_unlock(&ha->vport_lock); + return 0; + } + if (ret) { + ql_log(ql_log_warn, vha, 0xffff, + "register_localport failed: ret=%x\n", ret); + } else { + vha->nvme_local_port->private = vha; + } + + return ret; +} + +void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp) +{ + struct qla_hw_data *ha; + + if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) + return; + + ha = orig_sp->fcport->vha->hw; + + WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0)); + /* Use Driver Specified Retry Count */ + abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT); + abt->drv.abts_rty_cnt = cpu_to_le16(2); + /* Use specified response timeout */ + abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT); + /* set it to 2 * r_a_tov in secs */ + abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10)); +} + +void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp) +{ + u16 comp_status; + struct scsi_qla_host *vha; + + if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) + return; + + vha = orig_sp->fcport->vha; + + comp_status = le16_to_cpu(abt->comp_status); + switch (comp_status) { + case CS_RESET: /* reset event aborted */ + case CS_ABORTED: /* IOCB was cleaned */ + /* N_Port handle is not currently logged in */ + case CS_TIMEOUT: + /* N_Port handle was logged out while waiting for ABTS to complete */ + case CS_PORT_UNAVAILABLE: + /* Firmware found that the port name changed */ + case CS_PORT_LOGGED_OUT: + /* BA_RJT was received for the ABTS */ + case CS_PORT_CONFIG_CHG: + ql_dbg(ql_dbg_async, vha, 0xf09d, + "Abort I/O IOCB completed with error, comp_status=%x\n", + comp_status); + break; + + /* BA_RJT was received for the ABTS */ + case CS_REJECT_RECEIVED: + ql_dbg(ql_dbg_async, vha, 0xf09e, + "BA_RJT was received for the ABTS rjt_vendorUnique = %u", + abt->fw.ba_rjt_vendorUnique); + ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e, + "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n", + abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode); + break; + + case CS_COMPLETE: + ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f, + "IOCB request is completed successfully comp_status=%x\n", + comp_status); + break; + + case CS_IOCB_ERROR: + ql_dbg(ql_dbg_async, vha, 0xf0a0, + "IOCB request is failed, comp_status=%x\n", comp_status); + break; + + default: + ql_dbg(ql_dbg_async, vha, 0xf0a1, + "Invalid Abort IO IOCB Completion Status %x\n", + comp_status); + break; + } +} + +inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp) +{ + if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) + return; + kref_put(&orig_sp->cmd_kref, orig_sp->put_fn); +} + +static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason, + u8 explanation, u8 vendor) +{ + struct fcnvme_ls_rjt *rjt = buf; + + rjt->w0.ls_cmd = FCNVME_LSDESC_RQST; + rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)); + rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); + rjt->rqst.desc_len = + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); + rjt->rqst.w0.ls_cmd = ls_cmd; + rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); + rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); + rjt->rjt.reason_code = reason; + rjt->rjt.reason_explanation = explanation; + rjt->rjt.vendor = vendor; +} + +static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha, + struct pt_ls4_request *lsrjt_iocb, + struct qla_nvme_lsrjt_pt_arg *a) +{ + lsrjt_iocb->entry_type = PT_LS4_REQUEST; + lsrjt_iocb->entry_count = 1; + lsrjt_iocb->sys_define = 0; + lsrjt_iocb->entry_status = 0; + lsrjt_iocb->handle = QLA_SKIP_HANDLE; + lsrjt_iocb->nport_handle = a->nport_handle; + lsrjt_iocb->exchange_address = a->xchg_address; + lsrjt_iocb->vp_index = a->vp_idx; + + lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags); + + put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address); + lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count); + lsrjt_iocb->tx_dseg_count = cpu_to_le16(1); + lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); + + put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address); + lsrjt_iocb->dsd[1].length = 0; + lsrjt_iocb->rx_dseg_count = 0; + lsrjt_iocb->rx_byte_count = 0; +} + +static int +qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp, + struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate) +{ + struct pt_ls4_request *lsrjt_iocb; + + lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL); + if (!lsrjt_iocb) { + ql_log(ql_log_warn, vha, 0x210e, + "qla2x00_alloc_iocbs failed.\n"); + return QLA_FUNCTION_FAILED; + } + + if (!is_xchg_terminate) { + qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode, + a->reason, a->explanation, 0); + + a->tx_byte_count = sizeof(struct fcnvme_ls_rjt); + a->tx_addr = vha->hw->lsrjt.cdma; + a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT; + + ql_dbg(ql_dbg_unsol, vha, 0x211f, + "Sending nvme fc ls reject ox_id %04x op %04x\n", + a->ox_id, a->opcode); + ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f, + vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c)); + } else { + a->tx_byte_count = 0; + a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT; + ql_dbg(ql_dbg_unsol, vha, 0x2110, + "Terminate nvme ls xchg 0x%x\n", a->xchg_address); + } + + qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a); + /* flush iocb to mem before notifying hw doorbell */ + wmb(); + qla2x00_start_iocbs(vha, qp->req); + return 0; +} + +/* + * qla2xxx_process_purls_pkt() - Pass-up Unsolicited + * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req(). + * LLDD need to provide memory for response buffer, which + * will be used to reference the exchange corresponding + * to the LS when issuing an ls response. LLDD will have to free + * response buffer in lport->ops->xmt_ls_rsp(). + * + * @vha: SCSI qla host + * @item: ptr to purex_item + */ +static void +qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item) +{ + struct qla_nvme_unsol_ctx *uctx = item->purls_context; + struct qla_nvme_lsrjt_pt_arg a; + int ret = 1; + +#if (IS_ENABLED(CONFIG_NVME_FC)) + ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp, + &item->iocb, item->size); +#endif + if (ret) { + ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n"); + memset((void *)&a, 0, sizeof(a)); + a.vp_idx = vha->vp_idx; + a.nport_handle = uctx->nport_handle; + a.xchg_address = uctx->exchange_address; + qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true); + list_del(&uctx->elem); + kfree(uctx); + } +} + +static scsi_qla_host_t * +qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index) +{ + scsi_qla_host_t *base_vha, *vha, *tvp; + unsigned long flags; + + base_vha = pci_get_drvdata(ha->pdev); + + if (!vp_index && !ha->num_vhosts) + return base_vha; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { + if (vha->vp_idx == vp_index) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + return vha; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + return NULL; +} + +void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp) +{ + struct nvme_fc_remote_port *rport; + struct qla_nvme_rport *qla_rport; + struct qla_nvme_lsrjt_pt_arg a; + struct pt_ls4_rx_unsol *p = *pkt; + struct qla_nvme_unsol_ctx *uctx; + struct rsp_que *rsp_q = *rsp; + struct qla_hw_data *ha; + scsi_qla_host_t *vha; + fc_port_t *fcport = NULL; + struct purex_item *item; + port_id_t d_id = {0}; + port_id_t id = {0}; + u8 *opcode; + bool xmt_reject = false; + + ha = rsp_q->hw; + + vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index); + if (!vha) { + ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index); + WARN_ON_ONCE(1); + return; + } + + memset((void *)&a, 0, sizeof(a)); + opcode = (u8 *)&p->payload[0]; + a.opcode = opcode[3]; + a.vp_idx = p->vp_index; + a.nport_handle = p->nport_handle; + a.ox_id = p->ox_id; + a.xchg_address = p->exchange_address; + + id.b.domain = p->s_id.domain; + id.b.area = p->s_id.area; + id.b.al_pa = p->s_id.al_pa; + d_id.b.domain = p->d_id[2]; + d_id.b.area = p->d_id[1]; + d_id.b.al_pa = p->d_id[0]; + + fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0); + if (!fcport) { + ql_dbg(ql_dbg_unsol, vha, 0x211e, + "Failed to find sid=%06x did=%06x\n", + id.b24, d_id.b24); + a.reason = FCNVME_RJT_RC_INV_ASSOC; + a.explanation = FCNVME_RJT_EXP_NONE; + xmt_reject = true; + goto out; + } + rport = fcport->nvme_remote_port; + qla_rport = rport->private; + + item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false); + if (!item) { + a.reason = FCNVME_RJT_RC_LOGIC; + a.explanation = FCNVME_RJT_EXP_NONE; + xmt_reject = true; + goto out; + } + + uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC); + if (!uctx) { + ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n"); + a.reason = FCNVME_RJT_RC_LOGIC; + a.explanation = FCNVME_RJT_EXP_NONE; + xmt_reject = true; + kfree(item); + goto out; + } + + uctx->vha = vha; + uctx->fcport = fcport; + uctx->exchange_address = p->exchange_address; + uctx->nport_handle = p->nport_handle; + uctx->ox_id = p->ox_id; + qla_rport->uctx = uctx; + INIT_LIST_HEAD(&uctx->elem); + list_add_tail(&uctx->elem, &fcport->unsol_ctx_head); + item->purls_context = (void *)uctx; + + ql_dbg(ql_dbg_unsol, vha, 0x2121, + "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n", + item->iocb.iocb[3], item->size, uctx->exchange_address, + fcport->d_id.b24); + /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F + * ----- ----------------------------------------------- + * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00 + * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00 + * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + */ + ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120, + &item->iocb, item->size); + + qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt); +out: + if (xmt_reject) { + qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false); + __qla_consume_iocb(vha, pkt, rsp); + } +} diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h new file mode 100644 index 000000000..a253ac551 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nvme.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2017 QLogic Corporation + */ +#ifndef __QLA_NVME_H +#define __QLA_NVME_H + +#include +#include +#include + +#include "qla_def.h" +#include "qla_dsd.h" + +#define MIN_NVME_HW_QUEUES 1 +#define DEF_NVME_HW_QUEUES 8 + +#define NVME_ATIO_CMD_OFF 32 +#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF) +#define Q2T_NVME_NUM_TAGS 2048 +#define QLA_MAX_FC_SEGMENTS 64 + +struct qla_nvme_unsol_ctx; +struct scsi_qla_host; +struct qla_hw_data; +struct req_que; +struct srb; + +struct nvme_private { + struct srb *sp; + struct nvmefc_ls_req *fd; + struct work_struct ls_work; + struct work_struct abort_work; + int comp_status; + spinlock_t cmd_lock; +}; + +struct qla_nvme_rport { + struct fc_port *fcport; + struct qla_nvme_unsol_ctx *uctx; +}; + +#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */ +struct cmd_nvme { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + __le16 nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ + + __le16 dseg_count; /* Data segment count. */ + __le16 nvme_rsp_dsd_len; /* NVMe RSP DSD length */ + + uint64_t rsvd; + + __le16 control_flags; /* Control Flags */ +#define CF_ADMIN_ASYNC_EVENT BIT_13 +#define CF_NVME_FIRST_BURST_ENABLE BIT_11 +#define CF_DIF_SEG_DESCR_ENABLE BIT_3 +#define CF_DATA_SEG_DESCR_ENABLE BIT_2 +#define CF_READ_DATA BIT_1 +#define CF_WRITE_DATA BIT_0 + + __le16 nvme_cmnd_dseg_len; /* Data segment length. */ + __le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */ + __le64 nvme_rsp_dseg_address __packed; /* Data segment address. */ + + __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + + struct dsd64 nvme_dsd; +}; + +#define PURLS_MSLEEP_INTERVAL 1 +#define PURLS_RETRY_COUNT 5 + +#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */ +struct pt_ls4_request { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_define; + uint8_t entry_status; + uint32_t handle; + __le16 status; + __le16 nport_handle; + __le16 tx_dseg_count; + uint8_t vp_index; + uint8_t rsvd; + __le16 timeout; + __le16 control_flags; +#define CF_LS4_SHIFT 13 +#define CF_LS4_ORIGINATOR 0 +#define CF_LS4_RESPONDER 1 +#define CF_LS4_RESPONDER_TERM 2 + + __le16 rx_dseg_count; + __le16 rsvd2; + __le32 exchange_address; + __le32 rsvd3; + __le32 rx_byte_count; + __le32 tx_byte_count; + struct dsd64 dsd[2]; +}; + +#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */ +struct pt_ls4_rx_unsol { + uint8_t entry_type; + uint8_t entry_count; + __le16 rsvd0; + __le16 rsvd1; + uint8_t vp_index; + uint8_t rsvd2; + __le16 rsvd3; + __le16 nport_handle; + __le16 frame_size; + __le16 rsvd4; + __le32 exchange_address; + uint8_t d_id[3]; + uint8_t r_ctl; + le_id_t s_id; + uint8_t cs_ctl; + uint8_t f_ctl[3]; + uint8_t type; + __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + __le16 rx_id; + __le16 ox_id; + __le32 desc0; +#define PT_LS4_PAYLOAD_OFFSET 0x2c +#define PT_LS4_FIRST_PACKET_LEN 20 + __le32 payload[5]; +}; + +/* + * Global functions prototype in qla_nvme.c source file. + */ +int qla_nvme_register_hba(struct scsi_qla_host *); +int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *); +void qla_nvme_delete(struct scsi_qla_host *); +void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *, + struct req_que *); +void qla24xx_async_gffid_sp_done(struct srb *sp, int); +#endif diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c new file mode 100644 index 000000000..6dfb70edb --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -0,0 +1,4477 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include +#include +#include +#include +#include +#include + +#define MASK(n) ((1ULL<<(n))-1) +#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ + ((addr >> 25) & 0x3ff)) +#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ + ((addr >> 25) & 0x3ff)) +#define MS_WIN(addr) (addr & 0x0ffc0000) +#define QLA82XX_PCI_MN_2M (0) +#define QLA82XX_PCI_MS_2M (0x80000) +#define QLA82XX_PCI_OCM0_2M (0xc0000) +#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) +#define BLOCK_PROTECT_BITS 0x0F + +/* CRB window related */ +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) +#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ + ((off) & 0xf0000)) +#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) +#define CRB_INDIRECT_2M (0x1e0000UL) + +#define MAX_CRB_XFORM 60 +static unsigned long crb_addr_xform[MAX_CRB_XFORM]; +static int qla82xx_crb_table_initialized; + +#define qla82xx_crb_addr_transform(name) \ + (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ + QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) + +const int MD_MIU_TEST_AGT_RDDATA[] = { + 0x410000A8, 0x410000AC, + 0x410000B8, 0x410000BC +}; + +static void qla82xx_crb_addr_transform_setup(void) +{ + qla82xx_crb_addr_transform(XDMA); + qla82xx_crb_addr_transform(TIMR); + qla82xx_crb_addr_transform(SRE); + qla82xx_crb_addr_transform(SQN3); + qla82xx_crb_addr_transform(SQN2); + qla82xx_crb_addr_transform(SQN1); + qla82xx_crb_addr_transform(SQN0); + qla82xx_crb_addr_transform(SQS3); + qla82xx_crb_addr_transform(SQS2); + qla82xx_crb_addr_transform(SQS1); + qla82xx_crb_addr_transform(SQS0); + qla82xx_crb_addr_transform(RPMX7); + qla82xx_crb_addr_transform(RPMX6); + qla82xx_crb_addr_transform(RPMX5); + qla82xx_crb_addr_transform(RPMX4); + qla82xx_crb_addr_transform(RPMX3); + qla82xx_crb_addr_transform(RPMX2); + qla82xx_crb_addr_transform(RPMX1); + qla82xx_crb_addr_transform(RPMX0); + qla82xx_crb_addr_transform(ROMUSB); + qla82xx_crb_addr_transform(SN); + qla82xx_crb_addr_transform(QMN); + qla82xx_crb_addr_transform(QMS); + qla82xx_crb_addr_transform(PGNI); + qla82xx_crb_addr_transform(PGND); + qla82xx_crb_addr_transform(PGN3); + qla82xx_crb_addr_transform(PGN2); + qla82xx_crb_addr_transform(PGN1); + qla82xx_crb_addr_transform(PGN0); + qla82xx_crb_addr_transform(PGSI); + qla82xx_crb_addr_transform(PGSD); + qla82xx_crb_addr_transform(PGS3); + qla82xx_crb_addr_transform(PGS2); + qla82xx_crb_addr_transform(PGS1); + qla82xx_crb_addr_transform(PGS0); + qla82xx_crb_addr_transform(PS); + qla82xx_crb_addr_transform(PH); + qla82xx_crb_addr_transform(NIU); + qla82xx_crb_addr_transform(I2Q); + qla82xx_crb_addr_transform(EG); + qla82xx_crb_addr_transform(MN); + qla82xx_crb_addr_transform(MS); + qla82xx_crb_addr_transform(CAS2); + qla82xx_crb_addr_transform(CAS1); + qla82xx_crb_addr_transform(CAS0); + qla82xx_crb_addr_transform(CAM); + qla82xx_crb_addr_transform(C2C1); + qla82xx_crb_addr_transform(C2C0); + qla82xx_crb_addr_transform(SMB); + qla82xx_crb_addr_transform(OCM0); + /* + * Used only in P3 just define it for P2 also. + */ + qla82xx_crb_addr_transform(I2C0); + + qla82xx_crb_table_initialized = 1; +} + +static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { + {{{0, 0, 0, 0} } }, + {{{1, 0x0100000, 0x0102000, 0x120000}, + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } } , + {{{1, 0x0200000, 0x0210000, 0x180000} } }, + {{{0, 0, 0, 0} } }, + {{{1, 0x0400000, 0x0401000, 0x169000} } }, + {{{1, 0x0500000, 0x0510000, 0x140000} } }, + {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, + {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, + {{{1, 0x0800000, 0x0802000, 0x170000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, + {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, + {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, + {{{1, 0x1100000, 0x1101000, 0x160000} } }, + {{{1, 0x1200000, 0x1201000, 0x161000} } }, + {{{1, 0x1300000, 0x1301000, 0x162000} } }, + {{{1, 0x1400000, 0x1401000, 0x163000} } }, + {{{1, 0x1500000, 0x1501000, 0x165000} } }, + {{{1, 0x1600000, 0x1601000, 0x166000} } }, + {{{0, 0, 0, 0} } }, + {{{0, 0, 0, 0} } }, + {{{0, 0, 0, 0} } }, + {{{0, 0, 0, 0} } }, + {{{0, 0, 0, 0} } }, + {{{0, 0, 0, 0} } }, + {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, + {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, + {{{0} } }, + {{{1, 0x2100000, 0x2102000, 0x120000}, + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, + {{{0} } }, + {{{0} } }, + {{{0} } }, + {{{0} } }, + {{{0} } }, + {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, + {{{1, 0x2900000, 0x2901000, 0x16b000} } }, + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, + {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, + {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, + {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, + {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, + {{{0} } }, + {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, + {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, + {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, + {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, + {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, + {{{0} } }, + {{{0} } }, + {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, + {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static unsigned qla82xx_crb_hub_agt[64] = { + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PS, + QLA82XX_HW_CRB_HUB_AGT_ADR_MN, + QLA82XX_HW_CRB_HUB_AGT_ADR_MS, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, + QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, + QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, + QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, + QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, + QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, + QLA82XX_HW_CRB_HUB_AGT_ADR_SN, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_EG, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PS, + QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, + QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, + QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, + QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +/* Device states */ +static const char *const q_dev_state[] = { + [QLA8XXX_DEV_UNKNOWN] = "Unknown", + [QLA8XXX_DEV_COLD] = "Cold/Re-init", + [QLA8XXX_DEV_INITIALIZING] = "Initializing", + [QLA8XXX_DEV_READY] = "Ready", + [QLA8XXX_DEV_NEED_RESET] = "Need Reset", + [QLA8XXX_DEV_NEED_QUIESCENT] = "Need Quiescent", + [QLA8XXX_DEV_FAILED] = "Failed", + [QLA8XXX_DEV_QUIESCENT] = "Quiescent", +}; + +const char *qdev_state(uint32_t dev_state) +{ + return (dev_state < MAX_STATES) ? q_dev_state[dev_state] : "Unknown"; +} + +/* + * In: 'off_in' is offset from CRB space in 128M pci map + * Out: 'off_out' is 2M pci map addr + * side effect: lock crb window + */ +static void +qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in, + void __iomem **off_out) +{ + u32 win_read; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + ha->crb_win = CRB_HI(off_in); + writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase); + + /* Read back value to make sure write has gone through before trying + * to use it. + */ + win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); + if (win_read != ha->crb_win) { + ql_dbg(ql_dbg_p3p, vha, 0xb000, + "%s: Written crbwin (0x%x) " + "!= Read crbwin (0x%x), off=0x%lx.\n", + __func__, ha->crb_win, win_read, off_in); + } + *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; +} + +static int +qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, + void __iomem **off_out) +{ + struct crb_128M_2M_sub_block_map *m; + + if (off_in >= QLA82XX_CRB_MAX) + return -1; + + if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) { + *off_out = (off_in - QLA82XX_PCI_CAMQM) + + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; + return 0; + } + + if (off_in < QLA82XX_PCI_CRBSPACE) + return -1; + + off_in -= QLA82XX_PCI_CRBSPACE; + + /* Try direct map */ + m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; + + if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) { + *off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase; + return 0; + } + /* Not in direct map, use crb window */ + *off_out = (void __iomem *)off_in; + return 1; +} + +#define CRB_WIN_LOCK_TIMEOUT 100000000 +static int qla82xx_crb_win_lock(struct qla_hw_data *ha) +{ + int done = 0, timeout = 0; + + while (!done) { + /* acquire semaphore3 from PCI HW block */ + done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); + if (done == 1) + break; + if (timeout >= CRB_WIN_LOCK_TIMEOUT) + return -1; + timeout++; + } + qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); + return 0; +} + +int +qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data) +{ + void __iomem *off; + unsigned long flags = 0; + int rv; + + rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); + + BUG_ON(rv == -1); + + if (rv == 1) { +#ifndef __CHECKER__ + write_lock_irqsave(&ha->hw_lock, flags); +#endif + qla82xx_crb_win_lock(ha); + qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); + } + + writel(data, (void __iomem *)off); + + if (rv == 1) { + qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); +#ifndef __CHECKER__ + write_unlock_irqrestore(&ha->hw_lock, flags); +#endif + } + return 0; +} + +int +qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in) +{ + void __iomem *off; + unsigned long flags = 0; + int rv; + u32 data; + + rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); + + BUG_ON(rv == -1); + + if (rv == 1) { +#ifndef __CHECKER__ + write_lock_irqsave(&ha->hw_lock, flags); +#endif + qla82xx_crb_win_lock(ha); + qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); + } + data = rd_reg_dword(off); + + if (rv == 1) { + qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); +#ifndef __CHECKER__ + write_unlock_irqrestore(&ha->hw_lock, flags); +#endif + } + return data; +} + +/* + * Context: task, might sleep + */ +int qla82xx_idc_lock(struct qla_hw_data *ha) +{ + const int delay_ms = 100, timeout_ms = 2000; + int done, total = 0; + + might_sleep(); + + while (true) { + /* acquire semaphore5 from PCI HW block */ + done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); + if (done == 1) + break; + if (WARN_ON_ONCE(total >= timeout_ms)) + return -1; + + total += delay_ms; + msleep(delay_ms); + } + + return 0; +} + +void qla82xx_idc_unlock(struct qla_hw_data *ha) +{ + qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); +} + +/* + * check memory access boundary. + * used by test agent. support ddr access only for now + */ +static unsigned long +qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, + unsigned long long addr, int size) +{ + if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET, + QLA82XX_ADDR_DDR_NET_MAX) || + !addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET, + QLA82XX_ADDR_DDR_NET_MAX) || + ((size != 1) && (size != 2) && (size != 4) && (size != 8))) + return 0; + else + return 1; +} + +static int qla82xx_pci_set_window_warning_count; + +static unsigned long +qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) +{ + int window; + u32 win_read; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, + QLA82XX_ADDR_DDR_NET_MAX)) { + /* DDR network side */ + window = MN_WIN(addr); + ha->ddr_mn_window = window; + qla82xx_wr_32(ha, + ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); + win_read = qla82xx_rd_32(ha, + ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); + if ((win_read << 17) != window) { + ql_dbg(ql_dbg_p3p, vha, 0xb003, + "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", + __func__, window, win_read); + } + addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; + } else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, + QLA82XX_ADDR_OCM0_MAX)) { + unsigned int temp1; + + if ((addr & 0x00ff800) == 0xff800) { + ql_log(ql_log_warn, vha, 0xb004, + "%s: QM access not handled.\n", __func__); + addr = -1UL; + } + window = OCM_WIN(addr); + ha->ddr_mn_window = window; + qla82xx_wr_32(ha, + ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); + win_read = qla82xx_rd_32(ha, + ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); + temp1 = ((window & 0x1FF) << 7) | + ((window & 0x0FFFE0000) >> 17); + if (win_read != temp1) { + ql_log(ql_log_warn, vha, 0xb005, + "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", + __func__, temp1, win_read); + } + addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; + + } else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, + QLA82XX_P3_ADDR_QDR_NET_MAX)) { + /* QDR network side */ + window = MS_WIN(addr); + ha->qdr_sn_window = window; + qla82xx_wr_32(ha, + ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); + win_read = qla82xx_rd_32(ha, + ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); + if (win_read != window) { + ql_log(ql_log_warn, vha, 0xb006, + "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", + __func__, window, win_read); + } + addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; + } else { + /* + * peg gdb frequently accesses memory that doesn't exist, + * this limits the chit chat so debugging isn't slowed down. + */ + if ((qla82xx_pci_set_window_warning_count++ < 8) || + (qla82xx_pci_set_window_warning_count%64 == 0)) { + ql_log(ql_log_warn, vha, 0xb007, + "%s: Warning:%s Unknown address range!.\n", + __func__, QLA2XXX_DRIVER_NAME); + } + addr = -1UL; + } + return addr; +} + +/* check if address is in the same windows as the previous access */ +static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, + unsigned long long addr) +{ + int window; + unsigned long long qdr_max; + + qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; + + /* DDR network side */ + if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, + QLA82XX_ADDR_DDR_NET_MAX)) + BUG(); + else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, + QLA82XX_ADDR_OCM0_MAX)) + return 1; + else if (addr_in_range(addr, QLA82XX_ADDR_OCM1, + QLA82XX_ADDR_OCM1_MAX)) + return 1; + else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { + /* QDR network side */ + window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; + if (ha->qdr_sn_window == window) + return 1; + } + return 0; +} + +static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, + u64 off, void *data, int size) +{ + unsigned long flags; + void __iomem *addr = NULL; + int ret = 0; + u64 start; + uint8_t __iomem *mem_ptr = NULL; + unsigned long mem_base; + unsigned long mem_page; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + write_lock_irqsave(&ha->hw_lock, flags); + + /* + * If attempting to access unknown address or straddle hw windows, + * do not access. + */ + start = qla82xx_pci_set_window(ha, off); + if ((start == -1UL) || + (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { + write_unlock_irqrestore(&ha->hw_lock, flags); + ql_log(ql_log_fatal, vha, 0xb008, + "%s out of bound pci memory " + "access, offset is 0x%llx.\n", + QLA2XXX_DRIVER_NAME, off); + return -1; + } + + write_unlock_irqrestore(&ha->hw_lock, flags); + mem_base = pci_resource_start(ha->pdev, 0); + mem_page = start & PAGE_MASK; + /* Map two pages whenever user tries to access addresses in two + * consecutive pages. + */ + if (mem_page != ((start + size - 1) & PAGE_MASK)) + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); + else + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); + if (mem_ptr == NULL) { + *(u8 *)data = 0; + return -1; + } + addr = mem_ptr; + addr += start & (PAGE_SIZE - 1); + write_lock_irqsave(&ha->hw_lock, flags); + + switch (size) { + case 1: + *(u8 *)data = readb(addr); + break; + case 2: + *(u16 *)data = readw(addr); + break; + case 4: + *(u32 *)data = readl(addr); + break; + case 8: + *(u64 *)data = readq(addr); + break; + default: + ret = -1; + break; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + + if (mem_ptr) + iounmap(mem_ptr); + return ret; +} + +static int +qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, + u64 off, void *data, int size) +{ + unsigned long flags; + void __iomem *addr = NULL; + int ret = 0; + u64 start; + uint8_t __iomem *mem_ptr = NULL; + unsigned long mem_base; + unsigned long mem_page; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + write_lock_irqsave(&ha->hw_lock, flags); + + /* + * If attempting to access unknown address or straddle hw windows, + * do not access. + */ + start = qla82xx_pci_set_window(ha, off); + if ((start == -1UL) || + (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { + write_unlock_irqrestore(&ha->hw_lock, flags); + ql_log(ql_log_fatal, vha, 0xb009, + "%s out of bound memory " + "access, offset is 0x%llx.\n", + QLA2XXX_DRIVER_NAME, off); + return -1; + } + + write_unlock_irqrestore(&ha->hw_lock, flags); + mem_base = pci_resource_start(ha->pdev, 0); + mem_page = start & PAGE_MASK; + /* Map two pages whenever user tries to access addresses in two + * consecutive pages. + */ + if (mem_page != ((start + size - 1) & PAGE_MASK)) + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); + else + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); + if (mem_ptr == NULL) + return -1; + + addr = mem_ptr; + addr += start & (PAGE_SIZE - 1); + write_lock_irqsave(&ha->hw_lock, flags); + + switch (size) { + case 1: + writeb(*(u8 *)data, addr); + break; + case 2: + writew(*(u16 *)data, addr); + break; + case 4: + writel(*(u32 *)data, addr); + break; + case 8: + writeq(*(u64 *)data, addr); + break; + default: + ret = -1; + break; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + if (mem_ptr) + iounmap(mem_ptr); + return ret; +} + +#define MTU_FUDGE_FACTOR 100 +static unsigned long +qla82xx_decode_crb_addr(unsigned long addr) +{ + int i; + unsigned long base_addr, offset, pci_base; + + if (!qla82xx_crb_table_initialized) + qla82xx_crb_addr_transform_setup(); + + pci_base = ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == ADDR_ERROR) + return pci_base; + return pci_base + offset; +} + +static long rom_max_timeout = 100; +static long qla82xx_rom_lock_timeout = 100; + +static int +qla82xx_rom_lock(struct qla_hw_data *ha) +{ + int done = 0, timeout = 0; + uint32_t lock_owner = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while (!done) { + /* acquire semaphore2 from PCI HW block */ + done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); + if (done == 1) + break; + if (timeout >= qla82xx_rom_lock_timeout) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); + ql_dbg(ql_dbg_p3p, vha, 0xb157, + "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", + __func__, ha->portnum, lock_owner); + return -1; + } + timeout++; + } + qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum); + return 0; +} + +static void +qla82xx_rom_unlock(struct qla_hw_data *ha) +{ + qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff); + qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); +} + +static int +qla82xx_wait_rom_busy(struct qla_hw_data *ha) +{ + long timeout = 0; + long done = 0 ; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while (done == 0) { + done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); + done &= 4; + timeout++; + if (timeout >= rom_max_timeout) { + ql_dbg(ql_dbg_p3p, vha, 0xb00a, + "%s: Timeout reached waiting for rom busy.\n", + QLA2XXX_DRIVER_NAME); + return -1; + } + } + return 0; +} + +static int +qla82xx_wait_rom_done(struct qla_hw_data *ha) +{ + long timeout = 0; + long done = 0 ; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while (done == 0) { + done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); + done &= 2; + timeout++; + if (timeout >= rom_max_timeout) { + ql_dbg(ql_dbg_p3p, vha, 0xb00b, + "%s: Timeout reached waiting for rom done.\n", + QLA2XXX_DRIVER_NAME); + return -1; + } + } + return 0; +} + +static int +qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) +{ + uint32_t off_value, rval = 0; + + wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); + + /* Read back value to make sure write has gone through */ + rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); + off_value = (off & 0x0000FFFF); + + if (flag) + wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, + data); + else + rval = rd_reg_dword(off_value + CRB_INDIRECT_2M + + ha->nx_pcibase); + + return rval; +} + +static int +qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) +{ + /* Dword reads to flash. */ + qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1); + *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + + (addr & 0x0000FFFF), 0, 0); + + return 0; +} + +static int +qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) +{ + int ret, loops = 0; + uint32_t lock_owner = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { + udelay(100); + schedule(); + loops++; + } + if (loops >= 50000) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); + ql_log(ql_log_fatal, vha, 0x00b9, + "Failed to acquire SEM2 lock, Lock Owner %u.\n", + lock_owner); + return -1; + } + ret = qla82xx_do_rom_fast_read(ha, addr, valp); + qla82xx_rom_unlock(ha); + return ret; +} + +static int +qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); + qla82xx_wait_rom_busy(ha); + if (qla82xx_wait_rom_done(ha)) { + ql_log(ql_log_warn, vha, 0xb00c, + "Error waiting for rom done.\n"); + return -1; + } + *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); + return 0; +} + +static int +qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) +{ + uint32_t val = 0; + int i, ret; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); + for (i = 0; i < 50000; i++) { + ret = qla82xx_read_status_reg(ha, &val); + if (ret < 0 || (val & 1) == 0) + return ret; + udelay(10); + cond_resched(); + } + ql_log(ql_log_warn, vha, 0xb00d, + "Timeout reached waiting for write finish.\n"); + return -1; +} + +static int +qla82xx_flash_set_write_enable(struct qla_hw_data *ha) +{ + uint32_t val; + + qla82xx_wait_rom_busy(ha); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); + qla82xx_wait_rom_busy(ha); + if (qla82xx_wait_rom_done(ha)) + return -1; + if (qla82xx_read_status_reg(ha, &val) != 0) + return -1; + if ((val & 2) != 2) + return -1; + return 0; +} + +static int +qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + if (qla82xx_flash_set_write_enable(ha)) + return -1; + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); + if (qla82xx_wait_rom_done(ha)) { + ql_log(ql_log_warn, vha, 0xb00e, + "Error waiting for rom done.\n"); + return -1; + } + return qla82xx_flash_wait_write_finish(ha); +} + +static int +qla82xx_write_disable_flash(struct qla_hw_data *ha) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); + if (qla82xx_wait_rom_done(ha)) { + ql_log(ql_log_warn, vha, 0xb00f, + "Error waiting for rom done.\n"); + return -1; + } + return 0; +} + +static int +ql82xx_rom_lock_d(struct qla_hw_data *ha) +{ + int loops = 0; + uint32_t lock_owner = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { + udelay(100); + cond_resched(); + loops++; + } + if (loops >= 50000) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); + ql_log(ql_log_warn, vha, 0xb010, + "ROM lock failed, Lock Owner %u.\n", lock_owner); + return -1; + } + return 0; +} + +static int +qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, + uint32_t data) +{ + int ret = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + ret = ql82xx_rom_lock_d(ha); + if (ret < 0) { + ql_log(ql_log_warn, vha, 0xb011, + "ROM lock failed.\n"); + return ret; + } + + ret = qla82xx_flash_set_write_enable(ha); + if (ret < 0) + goto done_write; + + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); + qla82xx_wait_rom_busy(ha); + if (qla82xx_wait_rom_done(ha)) { + ql_log(ql_log_warn, vha, 0xb012, + "Error waiting for rom done.\n"); + ret = -1; + goto done_write; + } + + ret = qla82xx_flash_wait_write_finish(ha); + +done_write: + qla82xx_rom_unlock(ha); + return ret; +} + +/* This routine does CRB initialize sequence + * to put the ISP into operational state + */ +static int +qla82xx_pinit_from_rom(scsi_qla_host_t *vha) +{ + int addr, val; + int i ; + struct crb_addr_pair *buf; + unsigned long off; + unsigned offset, n; + struct qla_hw_data *ha = vha->hw; + + struct crb_addr_pair { + long addr; + long data; + }; + + /* Halt all the individual PEGs and other blocks of the ISP */ + qla82xx_rom_lock(ha); + + /* disable all I2Q */ + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); + + /* disable all niu interrupts */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); + /* disable xge rx/tx */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); + /* disable xg1 rx/tx */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); + /* disable sideband mac */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); + /* disable ap0 mac */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); + /* disable ap1 mac */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); + + /* halt sre */ + val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); + qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); + + /* halt epg */ + qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); + + /* halt timers */ + qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); + + /* halt pegs */ + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); + msleep(20); + + /* big hammer */ + if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) + /* don't reset CAM block on reset */ + qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); + else + qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); + qla82xx_rom_unlock(ha); + + /* Read the signature value from the flash. + * Offset 0: Contain signature (0xcafecafe) + * Offset 4: Offset and number of addr/value pairs + * that present in CRB initialize sequence + */ + n = 0; + if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || + qla82xx_rom_fast_read(ha, 4, &n) != 0) { + ql_log(ql_log_fatal, vha, 0x006e, + "Error Reading crb_init area: n: %08x.\n", n); + return -1; + } + + /* Offset in flash = lower 16 bits + * Number of entries = upper 16 bits + */ + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + + /* number of addr/value pair should not exceed 1024 entries */ + if (n >= 1024) { + ql_log(ql_log_fatal, vha, 0x0071, + "Card flash not initialized:n=0x%x.\n", n); + return -1; + } + + ql_log(ql_log_info, vha, 0x0072, + "%d CRB init values found in ROM.\n", n); + + buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) { + ql_log(ql_log_fatal, vha, 0x010c, + "Unable to allocate memory.\n"); + return -ENOMEM; + } + + for (i = 0; i < n; i++) { + if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || + qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); + return -1; + } + + buf[i].addr = addr; + buf[i].data = val; + } + + for (i = 0; i < n; i++) { + /* Translate internal CRB initialization + * address to PCI bus address + */ + off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + + QLA82XX_PCI_CRBSPACE; + /* Not all CRB addr/value pair to be written, + * some of them are skipped + */ + + /* skipping cold reboot MAGIC */ + if (off == QLA82XX_CAM_RAM(0x1fc)) + continue; + + /* do not reset PCI */ + if (off == (ROMUSB_GLB + 0xbc)) + continue; + + /* skip core clock, so that firmware can increase the clock */ + if (off == (ROMUSB_GLB + 0xc8)) + continue; + + /* skip the function enable register */ + if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + + if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + + if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) + continue; + + if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) + continue; + + if (off == ADDR_ERROR) { + ql_log(ql_log_fatal, vha, 0x0116, + "Unknown addr: 0x%08lx.\n", buf[i].addr); + continue; + } + + qla82xx_wr_32(ha, off, buf[i].data); + + /* ISP requires much bigger delay to settle down, + * else crb_window returns 0xffffffff + */ + if (off == QLA82XX_ROMUSB_GLB_SW_RESET) + msleep(1000); + + /* ISP requires millisec delay between + * successive CRB register updation + */ + msleep(1); + } + + kfree(buf); + + /* Resetting the data and instruction cache */ + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); + + /* Clear all protocol processing engines */ + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); + return 0; +} + +static int +qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, + u64 off, void *data, int size) +{ + int i, j, ret = 0, loop, sz[2], off0; + int scale, shift_amount, startword; + uint32_t temp; + uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; + + /* + * If not MN, go check for MS or invalid. + */ + if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) + mem_crb = QLA82XX_CRB_QDR_NET; + else { + mem_crb = QLA82XX_CRB_DDR_NET; + if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) + return qla82xx_pci_mem_write_direct(ha, + off, data, size); + } + + off0 = off & 0x7; + sz[0] = (size < (8 - off0)) ? size : (8 - off0); + sz[1] = size - sz[0]; + + off8 = off & 0xfffffff0; + loop = (((off & 0xf) + size - 1) >> 4) + 1; + shift_amount = 4; + scale = 2; + startword = (off & 0xf)/8; + + for (i = 0; i < loop; i++) { + if (qla82xx_pci_mem_read_2M(ha, off8 + + (i << shift_amount), &word[i * scale], 8)) + return -1; + } + + switch (size) { + case 1: + tmpw = *((uint8_t *)data); + break; + case 2: + tmpw = *((uint16_t *)data); + break; + case 4: + tmpw = *((uint32_t *)data); + break; + case 8: + default: + tmpw = *((uint64_t *)data); + break; + } + + if (sz[0] == 8) { + word[startword] = tmpw; + } else { + word[startword] &= + ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); + word[startword] |= tmpw << (off0 * 8); + } + if (sz[1] != 0) { + word[startword+1] &= ~(~0ULL << (sz[1] * 8)); + word[startword+1] |= tmpw >> (sz[0] * 8); + } + + for (i = 0; i < loop; i++) { + temp = off8 + (i << shift_amount); + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); + temp = 0; + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); + temp = word[i * scale] & 0xffffffff; + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); + temp = (word[i * scale] >> 32) & 0xffffffff; + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); + temp = word[i*scale + 1] & 0xffffffff; + qla82xx_wr_32(ha, mem_crb + + MIU_TEST_AGT_WRDATA_UPPER_LO, temp); + temp = (word[i*scale + 1] >> 32) & 0xffffffff; + qla82xx_wr_32(ha, mem_crb + + MIU_TEST_AGT_WRDATA_UPPER_HI, temp); + + temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); + if ((temp & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&ha->pdev->dev, + "failed to write through agent.\n"); + ret = -1; + break; + } + } + + return ret; +} + +static int +qla82xx_fw_load_from_flash(struct qla_hw_data *ha) +{ + int i; + long size = 0; + long flashaddr = ha->flt_region_bootload << 2; + long memaddr = BOOTLD_START; + u64 data; + u32 high, low; + + size = (IMAGE_START - BOOTLD_START) / 8; + + for (i = 0; i < size; i++) { + if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || + (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { + return -1; + } + data = ((u64)high << 32) | low ; + qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); + flashaddr += 8; + memaddr += 8; + + if (i % 0x1000 == 0) + msleep(1); + } + udelay(100); + read_lock(&ha->hw_lock); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); + read_unlock(&ha->hw_lock); + return 0; +} + +int +qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, + u64 off, void *data, int size) +{ + int i, j = 0, k, start, end, loop, sz[2], off0[2]; + int shift_amount; + uint32_t temp; + uint64_t off8, val, mem_crb, word[2] = {0, 0}; + + /* + * If not MN, go check for MS or invalid. + */ + + if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) + mem_crb = QLA82XX_CRB_QDR_NET; + else { + mem_crb = QLA82XX_CRB_DDR_NET; + if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) + return qla82xx_pci_mem_read_direct(ha, + off, data, size); + } + + off8 = off & 0xfffffff0; + off0[0] = off & 0xf; + sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); + shift_amount = 4; + loop = ((off0[0] + size - 1) >> shift_amount) + 1; + off0[1] = 0; + sz[1] = size - sz[0]; + + for (i = 0; i < loop; i++) { + temp = off8 + (i << shift_amount); + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); + temp = 0; + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); + temp = MIU_TA_CTL_ENABLE; + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); + if ((temp & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&ha->pdev->dev, + "failed to read through agent.\n"); + break; + } + + start = off0[i] >> 2; + end = (off0[i] + sz[i] - 1) >> 2; + for (k = start; k <= end; k++) { + temp = qla82xx_rd_32(ha, + mem_crb + MIU_TEST_AGT_RDDATA(k)); + word[i] |= ((uint64_t)temp << (32 * (k & 1))); + } + } + + if (j >= MAX_CTL_CHECK) + return -1; + + if ((off0[0] & 7) == 0) { + val = word[0]; + } else { + val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | + ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); + } + + switch (size) { + case 1: + *(uint8_t *)data = val; + break; + case 2: + *(uint16_t *)data = val; + break; + case 4: + *(uint32_t *)data = val; + break; + case 8: + *(uint64_t *)data = val; + break; + } + return 0; +} + + +static struct qla82xx_uri_table_desc * +qla82xx_get_table_desc(const u8 *unirom, int section) +{ + uint32_t i; + struct qla82xx_uri_table_desc *directory = + (struct qla82xx_uri_table_desc *)&unirom[0]; + uint32_t offset; + uint32_t tab_type; + uint32_t entries = le32_to_cpu(directory->num_entries); + + for (i = 0; i < entries; i++) { + offset = le32_to_cpu(directory->findex) + + (i * le32_to_cpu(directory->entry_size)); + tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8); + + if (tab_type == section) + return (struct qla82xx_uri_table_desc *)&unirom[offset]; + } + + return NULL; +} + +static struct qla82xx_uri_data_desc * +qla82xx_get_data_desc(struct qla_hw_data *ha, + u32 section, u32 idx_offset) +{ + const u8 *unirom = ha->hablob->fw->data; + int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] + + idx_offset); + struct qla82xx_uri_table_desc *tab_desc = NULL; + uint32_t offset; + + tab_desc = qla82xx_get_table_desc(unirom, section); + if (!tab_desc) + return NULL; + + offset = le32_to_cpu(tab_desc->findex) + + (le32_to_cpu(tab_desc->entry_size) * idx); + + return (struct qla82xx_uri_data_desc *)&unirom[offset]; +} + +static u8 * +qla82xx_get_bootld_offset(struct qla_hw_data *ha) +{ + u32 offset = BOOTLD_START; + struct qla82xx_uri_data_desc *uri_desc = NULL; + + if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { + uri_desc = qla82xx_get_data_desc(ha, + QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); + if (uri_desc) + offset = le32_to_cpu(uri_desc->findex); + } + + return (u8 *)&ha->hablob->fw->data[offset]; +} + +static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) +{ + struct qla82xx_uri_data_desc *uri_desc = NULL; + + if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { + uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, + QLA82XX_URI_FIRMWARE_IDX_OFF); + if (uri_desc) + return le32_to_cpu(uri_desc->size); + } + + return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); +} + +static u8 * +qla82xx_get_fw_offs(struct qla_hw_data *ha) +{ + u32 offset = IMAGE_START; + struct qla82xx_uri_data_desc *uri_desc = NULL; + + if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { + uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, + QLA82XX_URI_FIRMWARE_IDX_OFF); + if (uri_desc) + offset = le32_to_cpu(uri_desc->findex); + } + + return (u8 *)&ha->hablob->fw->data[offset]; +} + +/* PCI related functions */ +int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) +{ + unsigned long val = 0; + u32 control; + + switch (region) { + case 0: + val = 0; + break; + case 1: + pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); + val = control + QLA82XX_MSIX_TBL_SPACE; + break; + } + return val; +} + + +int +qla82xx_iospace_config(struct qla_hw_data *ha) +{ + uint32_t len = 0; + + if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x000c, + "Failed to reserver selected regions.\n"); + goto iospace_error_exit; + } + + /* Use MMIO operations for all accesses. */ + if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x000d, + "Region #0 not an MMIO resource, aborting.\n"); + goto iospace_error_exit; + } + + len = pci_resource_len(ha->pdev, 0); + ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len); + if (!ha->nx_pcibase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, + "Cannot remap pcibase MMIO, aborting.\n"); + goto iospace_error_exit; + } + + /* Mapping of IO base pointer */ + if (IS_QLA8044(ha)) { + ha->iobase = ha->nx_pcibase; + } else if (IS_QLA82XX(ha)) { + ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11); + } + + if (!ql2xdbwr) { + ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) + + (ha->pdev->devfn << 12)), 4); + if (!ha->nxdb_wr_ptr) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, + "Cannot remap MMIO, aborting.\n"); + goto iospace_error_exit; + } + + /* Mapping of IO base pointer, + * door bell read and write pointer + */ + ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) + + (ha->pdev->devfn * 8); + } else { + ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ? + QLA82XX_CAMRAM_DB1 : + QLA82XX_CAMRAM_DB2); + } + + ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = ha->max_rsp_queues + 1; + ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, + "nx_pci_base=%p iobase=%p " + "max_req_queues=%d msix_count=%d.\n", + ha->nx_pcibase, ha->iobase, + ha->max_req_queues, ha->msix_count); + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, + "nx_pci_base=%p iobase=%p " + "max_req_queues=%d msix_count=%d.\n", + ha->nx_pcibase, ha->iobase, + ha->max_req_queues, ha->msix_count); + return 0; + +iospace_error_exit: + return -ENOMEM; +} + +/* GS related functions */ + +/* Initialization related functions */ + +/** + * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. + * @vha: HA context + * + * Returns 0 on success. +*/ +int +qla82xx_pci_config(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int ret; + + pci_set_master(ha->pdev); + ret = pci_set_mwi(ha->pdev); + ha->chip_revision = ha->pdev->revision; + ql_dbg(ql_dbg_init, vha, 0x0043, + "Chip revision:%d; pci_set_mwi() returned %d.\n", + ha->chip_revision, ret); + return 0; +} + +/** + * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. + * @vha: HA context + * + * Returns 0 on success. + */ +int +qla82xx_reset_chip(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + ha->isp_ops->disable_intrs(ha); + + return QLA_SUCCESS; +} + +void qla82xx_config_rings(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; + struct init_cb_81xx *icb; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + + /* Setup ring parameters in initialization control block. */ + icb = (struct init_cb_81xx *)ha->init_cb; + icb->request_q_outpointer = cpu_to_le16(0); + icb->response_q_inpointer = cpu_to_le16(0); + icb->request_q_length = cpu_to_le16(req->length); + icb->response_q_length = cpu_to_le16(rsp->length); + put_unaligned_le64(req->dma, &icb->request_q_address); + put_unaligned_le64(rsp->dma, &icb->response_q_address); + + wrt_reg_dword(®->req_q_out[0], 0); + wrt_reg_dword(®->rsp_q_in[0], 0); + wrt_reg_dword(®->rsp_q_out[0], 0); +} + +static int +qla82xx_fw_load_from_blob(struct qla_hw_data *ha) +{ + u64 *ptr64; + u32 i, flashaddr, size; + __le64 data; + + size = (IMAGE_START - BOOTLD_START) / 8; + + ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); + flashaddr = BOOTLD_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) + return -EIO; + flashaddr += 8; + } + + flashaddr = FLASH_ADDR_START; + size = qla82xx_get_fw_size(ha) / 8; + ptr64 = (u64 *)qla82xx_get_fw_offs(ha); + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) + return -EIO; + flashaddr += 8; + } + udelay(100); + + /* Write a magic value to CAMRAM register + * at a specified offset to indicate + * that all data is written and + * ready for firmware to initialize. + */ + qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); + + read_lock(&ha->hw_lock); + qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); + read_unlock(&ha->hw_lock); + return 0; +} + +static int +qla82xx_set_product_offset(struct qla_hw_data *ha) +{ + struct qla82xx_uri_table_desc *ptab_desc = NULL; + const uint8_t *unirom = ha->hablob->fw->data; + uint32_t i; + uint32_t entries; + uint32_t flags, file_chiprev, offset; + uint8_t chiprev = ha->chip_revision; + /* Hardcoding mn_present flag for P3P */ + int mn_present = 0; + uint32_t flagbit; + + ptab_desc = qla82xx_get_table_desc(unirom, + QLA82XX_URI_DIR_SECT_PRODUCT_TBL); + if (!ptab_desc) + return -1; + + entries = le32_to_cpu(ptab_desc->num_entries); + + for (i = 0; i < entries; i++) { + offset = le32_to_cpu(ptab_desc->findex) + + (i * le32_to_cpu(ptab_desc->entry_size)); + flags = le32_to_cpu(*((__le32 *)&unirom[offset] + + QLA82XX_URI_FLAGS_OFF)); + file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] + + QLA82XX_URI_CHIP_REV_OFF)); + + flagbit = mn_present ? 1 : 2; + + if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { + ha->file_prd_off = offset; + return 0; + } + } + return -1; +} + +static int +qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) +{ + uint32_t val; + uint32_t min_size; + struct qla_hw_data *ha = vha->hw; + const struct firmware *fw = ha->hablob->fw; + + ha->fw_type = fw_type; + + if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { + if (qla82xx_set_product_offset(ha)) + return -EINVAL; + + min_size = QLA82XX_URI_FW_MIN_SIZE; + } else { + val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]); + if (val != QLA82XX_BDINFO_MAGIC) + return -EINVAL; + + min_size = QLA82XX_FW_MIN_SIZE; + } + + if (fw->size < min_size) + return -EINVAL; + return 0; +} + +static int +qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) +{ + u32 val = 0; + int retries = 60; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + do { + read_lock(&ha->hw_lock); + val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); + read_unlock(&ha->hw_lock); + + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return QLA_SUCCESS; + case PHAN_INITIALIZE_FAILED: + break; + default: + break; + } + ql_log(ql_log_info, vha, 0x00a8, + "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", + val, retries); + + msleep(500); + + } while (--retries); + + ql_log(ql_log_fatal, vha, 0x00a9, + "Cmd Peg initialization failed: 0x%x.\n", val); + + val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); + read_lock(&ha->hw_lock); + qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + read_unlock(&ha->hw_lock); + return QLA_FUNCTION_FAILED; +} + +static int +qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) +{ + u32 val = 0; + int retries = 60; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + do { + read_lock(&ha->hw_lock); + val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); + read_unlock(&ha->hw_lock); + + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return QLA_SUCCESS; + case PHAN_INITIALIZE_FAILED: + break; + default: + break; + } + ql_log(ql_log_info, vha, 0x00ab, + "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", + val, retries); + + msleep(500); + + } while (--retries); + + ql_log(ql_log_fatal, vha, 0x00ac, + "Rcv Peg initialization failed: 0x%x.\n", val); + read_lock(&ha->hw_lock); + qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); + read_unlock(&ha->hw_lock); + return QLA_FUNCTION_FAILED; +} + +/* ISR related functions */ +static struct qla82xx_legacy_intr_set legacy_intr[] = + QLA82XX_LEGACY_INTR_CONFIG; + +/* + * qla82xx_mbx_completion() - Process mailbox command completions. + * @ha: SCSI driver HA context + * @mb0: Mailbox0 register + */ +void +qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) +{ + uint16_t cnt; + __le16 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; + + wptr = ®->mailbox_out[1]; + + /* Load return mailbox registers. */ + ha->flags.mbox_int = 1; + ha->mailbox_out[0] = mb0; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + ha->mailbox_out[cnt] = rd_reg_word(wptr); + wptr++; + } + + if (!ha->mcp) + ql_dbg(ql_dbg_async, vha, 0x5053, + "MBX pointer ERROR.\n"); +} + +/** + * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. + * @irq: interrupt number + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qla82xx_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_82xx __iomem *reg; + int status = 0, status1 = 0; + unsigned long flags; + unsigned long iter; + uint32_t stat = 0; + uint16_t mb[8]; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0xb053, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + + if (!ha->flags.msi_enabled) { + status = qla82xx_rd_32(ha, ISR_INT_VECTOR); + if (!(status & ha->nx_legacy_intr.int_vec_bit)) + return IRQ_NONE; + + status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); + if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) + return IRQ_NONE; + } + + /* clear the interrupt */ + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); + + /* read twice to ensure write is flushed */ + qla82xx_rd_32(ha, ISR_INT_VECTOR); + qla82xx_rd_32(ha, ISR_INT_VECTOR); + + reg = &ha->iobase->isp82; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 1; iter--; ) { + + if (rd_reg_dword(®->host_int)) { + stat = rd_reg_dword(®->host_status); + + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla82xx_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = rd_reg_word(®->mailbox_out[1]); + mb[2] = rd_reg_word(®->mailbox_out[2]); + mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_async, vha, 0x5054, + "Unrecognized interrupt type (%d).\n", + stat & 0xff); + break; + } + } + wrt_reg_dword(®->host_int, 0); + } + + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (!ha->flags.msi_enabled) + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); + + return IRQ_HANDLED; +} + +irqreturn_t +qla82xx_msix_default(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_82xx __iomem *reg; + int status = 0; + unsigned long flags; + uint32_t stat = 0; + uint32_t host_int = 0; + uint16_t mb[8]; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + printk(KERN_INFO + "%s(): NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + + reg = &ha->iobase->isp82; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + do { + host_int = rd_reg_dword(®->host_int); + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) + break; + if (host_int) { + stat = rd_reg_dword(®->host_status); + + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla82xx_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = rd_reg_word(®->mailbox_out[1]); + mb[2] = rd_reg_word(®->mailbox_out[2]); + mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_async, vha, 0x5041, + "Unrecognized interrupt type (%d).\n", + stat & 0xff); + break; + } + } + wrt_reg_dword(®->host_int, 0); + } while (0); + + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +irqreturn_t +qla82xx_msix_rsp_q(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_82xx __iomem *reg; + unsigned long flags; + uint32_t host_int = 0; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + printk(KERN_INFO + "%s(): NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + + ha = rsp->hw; + reg = &ha->iobase->isp82; + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + host_int = rd_reg_dword(®->host_int); + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) + goto out; + qla24xx_process_response_queue(vha, rsp); + wrt_reg_dword(®->host_int, 0); +out: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; +} + +void +qla82xx_poll(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_82xx __iomem *reg; + uint32_t stat; + uint32_t host_int = 0; + uint16_t mb[8]; + unsigned long flags; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + printk(KERN_INFO + "%s(): NULL response queue pointer.\n", __func__); + return; + } + ha = rsp->hw; + + reg = &ha->iobase->isp82; + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + + host_int = rd_reg_dword(®->host_int); + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) + goto out; + if (host_int) { + stat = rd_reg_dword(®->host_status); + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla82xx_mbx_completion(vha, MSW(stat)); + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = rd_reg_word(®->mailbox_out[1]); + mb[2] = rd_reg_word(®->mailbox_out[2]); + mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_p3p, vha, 0xb013, + "Unrecognized interrupt type (%d).\n", + stat * 0xff); + break; + } + wrt_reg_dword(®->host_int, 0); + } +out: + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +void +qla82xx_enable_intrs(struct qla_hw_data *ha) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + qla82xx_mbx_intr_enable(vha); + spin_lock_irq(&ha->hardware_lock); + if (IS_QLA8044(ha)) + qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0); + else + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); + spin_unlock_irq(&ha->hardware_lock); + ha->interrupts_on = 1; +} + +void +qla82xx_disable_intrs(struct qla_hw_data *ha) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + if (ha->interrupts_on) + qla82xx_mbx_intr_disable(vha); + + spin_lock_irq(&ha->hardware_lock); + if (IS_QLA8044(ha)) + qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1); + else + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); + spin_unlock_irq(&ha->hardware_lock); + ha->interrupts_on = 0; +} + +void qla82xx_init_flags(struct qla_hw_data *ha) +{ + struct qla82xx_legacy_intr_set *nx_legacy_intr; + + /* ISP 8021 initializations */ + rwlock_init(&ha->hw_lock); + ha->qdr_sn_window = -1; + ha->ddr_mn_window = -1; + ha->curr_window = 255; + ha->portnum = PCI_FUNC(ha->pdev->devfn); + nx_legacy_intr = &legacy_intr[ha->portnum]; + ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; + ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; + ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; + ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; +} + +static inline void +qla82xx_set_idc_version(scsi_qla_host_t *vha) +{ + int idc_ver; + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) { + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, + QLA82XX_IDC_VERSION); + ql_log(ql_log_info, vha, 0xb082, + "IDC version updated to %d\n", QLA82XX_IDC_VERSION); + } else { + idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION); + if (idc_ver != QLA82XX_IDC_VERSION) + ql_log(ql_log_info, vha, 0xb083, + "qla2xxx driver IDC version %d is not compatible " + "with IDC version %d of the other drivers\n", + QLA82XX_IDC_VERSION, idc_ver); + } +} + +inline void +qla82xx_set_drv_active(scsi_qla_host_t *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + + /* If reset value is all FF's, initialize DRV_ACTIVE */ + if (drv_active == 0xffffffff) { + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, + QLA82XX_DRV_NOT_ACTIVE); + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + } + drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); +} + +inline void +qla82xx_clear_drv_active(struct qla_hw_data *ha) +{ + uint32_t drv_active; + + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); +} + +static inline int +qla82xx_need_reset(struct qla_hw_data *ha) +{ + uint32_t drv_state; + int rval; + + if (ha->flags.nic_core_reset_owner) + return 1; + else { + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); + return rval; + } +} + +static inline void +qla82xx_set_rst_ready(struct qla_hw_data *ha) +{ + uint32_t drv_state; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + + /* If reset value is all FF's, initialize DRV_STATE */ + if (drv_state == 0xffffffff) { + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + } + drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); + ql_dbg(ql_dbg_init, vha, 0x00bb, + "drv_state = 0x%08x.\n", drv_state); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); +} + +static inline void +qla82xx_clear_rst_ready(struct qla_hw_data *ha) +{ + uint32_t drv_state; + + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); +} + +static inline void +qla82xx_set_qsnt_ready(struct qla_hw_data *ha) +{ + uint32_t qsnt_state; + + qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); +} + +void +qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t qsnt_state; + + qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); +} + +static int +qla82xx_load_fw(scsi_qla_host_t *vha) +{ + int rst; + struct fw_blob *blob; + struct qla_hw_data *ha = vha->hw; + + if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0x009f, + "Error during CRB initialization.\n"); + return QLA_FUNCTION_FAILED; + } + udelay(500); + + /* Bring QM and CAMRAM out of reset */ + rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); + rst &= ~((1 << 28) | (1 << 24)); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); + + /* + * FW Load priority: + * 1) Operational firmware residing in flash. + * 2) Firmware via request-firmware interface (.bin file). + */ + if (ql2xfwloadbin == 2) + goto try_blob_fw; + + ql_log(ql_log_info, vha, 0x00a0, + "Attempting to load firmware from flash.\n"); + + if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0x00a1, + "Firmware loaded successfully from flash.\n"); + return QLA_SUCCESS; + } else { + ql_log(ql_log_warn, vha, 0x0108, + "Firmware load from flash failed.\n"); + } + +try_blob_fw: + ql_log(ql_log_info, vha, 0x00a2, + "Attempting to load firmware from blob.\n"); + + /* Load firmware blob. */ + blob = ha->hablob = qla2x00_request_firmware(vha); + if (!blob) { + ql_log(ql_log_fatal, vha, 0x00a3, + "Firmware image not present.\n"); + goto fw_load_failed; + } + + /* Validating firmware blob */ + if (qla82xx_validate_firmware_blob(vha, + QLA82XX_FLASH_ROMIMAGE)) { + /* Fallback to URI format */ + if (qla82xx_validate_firmware_blob(vha, + QLA82XX_UNIFIED_ROMIMAGE)) { + ql_log(ql_log_fatal, vha, 0x00a4, + "No valid firmware image found.\n"); + return QLA_FUNCTION_FAILED; + } + } + + if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0x00a5, + "Firmware loaded successfully from binary blob.\n"); + return QLA_SUCCESS; + } + + ql_log(ql_log_fatal, vha, 0x00a6, + "Firmware load failed for binary blob.\n"); + blob->fw = NULL; + blob = NULL; + +fw_load_failed: + return QLA_FUNCTION_FAILED; +} + +int +qla82xx_start_firmware(scsi_qla_host_t *vha) +{ + uint16_t lnk; + struct qla_hw_data *ha = vha->hw; + + /* scrub dma mask expansion register */ + qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); + + /* Put both the PEG CMD and RCV PEG to default state + * of 0 before resetting the hardware + */ + qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); + qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); + + /* Overwrite stale initialization register values */ + qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); + qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); + + if (qla82xx_load_fw(vha) != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0x00a7, + "Error trying to start fw.\n"); + return QLA_FUNCTION_FAILED; + } + + /* Handshake with the card before we register the devices. */ + if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0x00aa, + "Error during card handshake.\n"); + return QLA_FUNCTION_FAILED; + } + + /* Negotiated Link width */ + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); + ha->link_width = (lnk >> 4) & 0x3f; + + /* Synchronize with Receive peg */ + return qla82xx_check_rcvpeg_state(ha); +} + +static __le32 * +qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, + uint32_t length) +{ + uint32_t i; + uint32_t val; + struct qla_hw_data *ha = vha->hw; + + /* Dword reads to flash. */ + for (i = 0; i < length/4; i++, faddr += 4) { + if (qla82xx_rom_fast_read(ha, faddr, &val)) { + ql_log(ql_log_warn, vha, 0x0106, + "Do ROM fast read failed.\n"); + goto done_read; + } + dwptr[i] = cpu_to_le32(val); + } +done_read: + return dwptr; +} + +static int +qla82xx_unprotect_flash(struct qla_hw_data *ha) +{ + int ret; + uint32_t val; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + ret = ql82xx_rom_lock_d(ha); + if (ret < 0) { + ql_log(ql_log_warn, vha, 0xb014, + "ROM Lock failed.\n"); + return ret; + } + + ret = qla82xx_read_status_reg(ha, &val); + if (ret < 0) + goto done_unprotect; + + val &= ~(BLOCK_PROTECT_BITS << 2); + ret = qla82xx_write_status_reg(ha, val); + if (ret < 0) { + val |= (BLOCK_PROTECT_BITS << 2); + qla82xx_write_status_reg(ha, val); + } + + if (qla82xx_write_disable_flash(ha) != 0) + ql_log(ql_log_warn, vha, 0xb015, + "Write disable failed.\n"); + +done_unprotect: + qla82xx_rom_unlock(ha); + return ret; +} + +static int +qla82xx_protect_flash(struct qla_hw_data *ha) +{ + int ret; + uint32_t val; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + ret = ql82xx_rom_lock_d(ha); + if (ret < 0) { + ql_log(ql_log_warn, vha, 0xb016, + "ROM Lock failed.\n"); + return ret; + } + + ret = qla82xx_read_status_reg(ha, &val); + if (ret < 0) + goto done_protect; + + val |= (BLOCK_PROTECT_BITS << 2); + /* LOCK all sectors */ + ret = qla82xx_write_status_reg(ha, val); + if (ret < 0) + ql_log(ql_log_warn, vha, 0xb017, + "Write status register failed.\n"); + + if (qla82xx_write_disable_flash(ha) != 0) + ql_log(ql_log_warn, vha, 0xb018, + "Write disable failed.\n"); +done_protect: + qla82xx_rom_unlock(ha); + return ret; +} + +static int +qla82xx_erase_sector(struct qla_hw_data *ha, int addr) +{ + int ret = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + ret = ql82xx_rom_lock_d(ha); + if (ret < 0) { + ql_log(ql_log_warn, vha, 0xb019, + "ROM Lock failed.\n"); + return ret; + } + + qla82xx_flash_set_write_enable(ha); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); + + if (qla82xx_wait_rom_done(ha)) { + ql_log(ql_log_warn, vha, 0xb01a, + "Error waiting for rom done.\n"); + ret = -1; + goto done; + } + ret = qla82xx_flash_wait_write_finish(ha); +done: + qla82xx_rom_unlock(ha); + return ret; +} + +/* + * Address and length are byte address + */ +void * +qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + scsi_block_requests(vha->host); + qla82xx_read_flash_data(vha, buf, offset, length); + scsi_unblock_requests(vha->host); + return buf; +} + +static int +qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr, + uint32_t faddr, uint32_t dwords) +{ + int ret; + uint32_t liter; + uint32_t rest_addr; + dma_addr_t optrom_dma; + void *optrom = NULL; + int page_mode = 0; + struct qla_hw_data *ha = vha->hw; + + ret = -1; + + /* Prepare burst-capable write on supported ISPs. */ + if (page_mode && !(faddr & 0xfff) && + dwords > OPTROM_BURST_DWORDS) { + optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, + &optrom_dma, GFP_KERNEL); + if (!optrom) { + ql_log(ql_log_warn, vha, 0xb01b, + "Unable to allocate memory " + "for optrom burst write (%x KB).\n", + OPTROM_BURST_SIZE / 1024); + } + } + + rest_addr = ha->fdt_block_size - 1; + + ret = qla82xx_unprotect_flash(ha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb01c, + "Unable to unprotect flash for update.\n"); + goto write_done; + } + + for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { + /* Are we at the beginning of a sector? */ + if ((faddr & rest_addr) == 0) { + + ret = qla82xx_erase_sector(ha, faddr); + if (ret) { + ql_log(ql_log_warn, vha, 0xb01d, + "Unable to erase sector: address=%x.\n", + faddr); + break; + } + } + + /* Go with burst-write. */ + if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { + /* Copy data to DMA'ble buffer. */ + memcpy(optrom, dwptr, OPTROM_BURST_SIZE); + + ret = qla2x00_load_ram(vha, optrom_dma, + (ha->flash_data_off | faddr), + OPTROM_BURST_DWORDS); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb01e, + "Unable to burst-write optrom segment " + "(%x/%x/%llx).\n", ret, + (ha->flash_data_off | faddr), + (unsigned long long)optrom_dma); + ql_log(ql_log_warn, vha, 0xb01f, + "Reverting to slow-write.\n"); + + dma_free_coherent(&ha->pdev->dev, + OPTROM_BURST_SIZE, optrom, optrom_dma); + optrom = NULL; + } else { + liter += OPTROM_BURST_DWORDS - 1; + faddr += OPTROM_BURST_DWORDS - 1; + dwptr += OPTROM_BURST_DWORDS - 1; + continue; + } + } + + ret = qla82xx_write_flash_dword(ha, faddr, + le32_to_cpu(*dwptr)); + if (ret) { + ql_dbg(ql_dbg_p3p, vha, 0xb020, + "Unable to program flash address=%x data=%x.\n", + faddr, *dwptr); + break; + } + } + + ret = qla82xx_protect_flash(ha); + if (ret) + ql_log(ql_log_warn, vha, 0xb021, + "Unable to protect flash after update.\n"); +write_done: + if (optrom) + dma_free_coherent(&ha->pdev->dev, + OPTROM_BURST_SIZE, optrom, optrom_dma); + return ret; +} + +int +qla82xx_write_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + int rval; + + /* Suspend HBA. */ + scsi_block_requests(vha->host); + rval = qla82xx_write_flash_data(vha, buf, offset, length >> 2); + scsi_unblock_requests(vha->host); + + /* Convert return ISP82xx to generic */ + if (rval) + rval = QLA_FUNCTION_FAILED; + else + rval = QLA_SUCCESS; + return rval; +} + +void +qla82xx_start_iocbs(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + uint32_t dbval; + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + dbval = 0x04 | (ha->portnum << 5); + + dbval = dbval | (req->id << 8) | (req->ring_index << 16); + if (ql2xdbwr) + qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval); + else { + wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); + while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { + wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); + } + } +} + +static void +qla82xx_rom_lock_recovery(struct qla_hw_data *ha) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + uint32_t lock_owner = 0; + + if (qla82xx_rom_lock(ha)) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); + /* Someone else is holding the lock. */ + ql_log(ql_log_info, vha, 0xb022, + "Resetting rom_lock, Lock Owner %u.\n", lock_owner); + } + /* + * Either we got the lock, or someone + * else died while holding it. + * In either case, unlock. + */ + qla82xx_rom_unlock(ha); +} + +/* + * qla82xx_device_bootstrap + * Initialize device, set DEV_READY, start fw + * + * Note: + * IDC lock must be held upon entry + * + * Return: + * Success : 0 + * Failed : 1 + */ +static int +qla82xx_device_bootstrap(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + int i; + uint32_t old_count, count; + struct qla_hw_data *ha = vha->hw; + int need_reset = 0; + + need_reset = qla82xx_need_reset(ha); + + if (need_reset) { + /* We are trying to perform a recovery here. */ + if (ha->flags.isp82xx_fw_hung) + qla82xx_rom_lock_recovery(ha); + } else { + old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + for (i = 0; i < 10; i++) { + msleep(200); + count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } + } + qla82xx_rom_lock_recovery(ha); + } + + /* set to DEV_INITIALIZING */ + ql_log(ql_log_info, vha, 0x009e, + "HW State: INITIALIZING.\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); + + qla82xx_idc_unlock(ha); + rval = qla82xx_start_firmware(vha); + qla82xx_idc_lock(ha); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0x00ad, + "HW State: FAILED.\n"); + qla82xx_clear_drv_active(ha); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); + return rval; + } + +dev_ready: + ql_log(ql_log_info, vha, 0x00ae, + "HW State: READY.\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); + + return QLA_SUCCESS; +} + +/* +* qla82xx_need_qsnt_handler +* Code to start quiescence sequence +* +* Note: +* IDC lock must be held upon entry +* +* Return: void +*/ + +static void +qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t dev_state, drv_state, drv_active; + unsigned long reset_timeout; + + if (vha->flags.online) { + /*Block any further I/O and wait for pending cmnds to complete*/ + qla2x00_quiesce_io(vha); + } + + /* Set the quiescence ready bit */ + qla82xx_set_qsnt_ready(ha); + + /*wait for 30 secs for other functions to ack */ + reset_timeout = jiffies + (30 * HZ); + + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + /* Its 2 that is written when qsnt is acked, moving one bit */ + drv_active = drv_active << 0x01; + + while (drv_state != drv_active) { + + if (time_after_eq(jiffies, reset_timeout)) { + /* quiescence timeout, other functions didn't ack + * changing the state to DEV_READY + */ + ql_log(ql_log_info, vha, 0xb023, + "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d " + "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, + drv_active, drv_state); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_READY); + ql_log(ql_log_info, vha, 0xb025, + "HW State: DEV_READY.\n"); + qla82xx_idc_unlock(ha); + qla2x00_perform_loop_resync(vha); + qla82xx_idc_lock(ha); + + qla82xx_clear_qsnt_ready(vha); + return; + } + + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + drv_active = drv_active << 0x01; + } + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + /* everyone acked so set the state to DEV_QUIESCENCE */ + if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { + ql_log(ql_log_info, vha, 0xb026, + "HW State: DEV_QUIESCENT.\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT); + } +} + +/* +* qla82xx_wait_for_state_change +* Wait for device state to change from given current state +* +* Note: +* IDC lock must not be held upon entry +* +* Return: +* Changed device state. +*/ +uint32_t +qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t dev_state; + + do { + msleep(1000); + qla82xx_idc_lock(ha); + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + qla82xx_idc_unlock(ha); + } while (dev_state == curr_state); + + return dev_state; +} + +void +qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + /* Disable the board */ + ql_log(ql_log_fatal, vha, 0x00b8, + "Disabling the board.\n"); + + if (IS_QLA82XX(ha)) { + qla82xx_clear_drv_active(ha); + qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_clear_drv_active(ha); + qla8044_idc_unlock(ha); + } + + /* Set DEV_FAILED flag to disable timer */ + vha->device_flags |= DFLG_DEV_FAILED; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + qla2x00_mark_all_devices_lost(vha); + vha->flags.online = 0; + vha->flags.init_done = 0; +} + +/* + * qla82xx_need_reset_handler + * Code to start reset sequence + * + * Note: + * IDC lock must be held upon entry + * + * Return: + * Success : 0 + * Failed : 1 + */ +static void +qla82xx_need_reset_handler(scsi_qla_host_t *vha) +{ + uint32_t dev_state, drv_state, drv_active; + uint32_t active_mask = 0; + unsigned long reset_timeout; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + + if (vha->flags.online) { + qla82xx_idc_unlock(ha); + qla2x00_abort_isp_cleanup(vha); + ha->isp_ops->get_flash_version(vha, req->ring); + ha->isp_ops->nvram_config(vha); + qla82xx_idc_lock(ha); + } + + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + if (!ha->flags.nic_core_reset_owner) { + ql_dbg(ql_dbg_p3p, vha, 0xb028, + "reset_acknowledged by 0x%x\n", ha->portnum); + qla82xx_set_rst_ready(ha); + } else { + active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); + drv_active &= active_mask; + ql_dbg(ql_dbg_p3p, vha, 0xb029, + "active_mask: 0x%08x\n", active_mask); + } + + /* wait for 10 seconds for reset ack from all functions */ + reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); + + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + + ql_dbg(ql_dbg_p3p, vha, 0xb02a, + "drv_state: 0x%08x, drv_active: 0x%08x, " + "dev_state: 0x%08x, active_mask: 0x%08x\n", + drv_state, drv_active, dev_state, active_mask); + + while (drv_state != drv_active && + dev_state != QLA8XXX_DEV_INITIALIZING) { + if (time_after_eq(jiffies, reset_timeout)) { + ql_log(ql_log_warn, vha, 0x00b5, + "Reset timeout.\n"); + break; + } + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + if (ha->flags.nic_core_reset_owner) + drv_active &= active_mask; + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + } + + ql_dbg(ql_dbg_p3p, vha, 0xb02b, + "drv_state: 0x%08x, drv_active: 0x%08x, " + "dev_state: 0x%08x, active_mask: 0x%08x\n", + drv_state, drv_active, dev_state, active_mask); + + ql_log(ql_log_info, vha, 0x00b6, + "Device state is 0x%x = %s.\n", + dev_state, qdev_state(dev_state)); + + /* Force to DEV_COLD unless someone else is starting a reset */ + if (dev_state != QLA8XXX_DEV_INITIALIZING && + dev_state != QLA8XXX_DEV_COLD) { + ql_log(ql_log_info, vha, 0x00b7, + "HW State: COLD/RE-INIT.\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); + qla82xx_set_rst_ready(ha); + if (ql2xmdenable) { + if (qla82xx_md_collect(vha)) + ql_log(ql_log_warn, vha, 0xb02c, + "Minidump not collected.\n"); + } else + ql_log(ql_log_warn, vha, 0xb04f, + "Minidump disabled.\n"); + } +} + +int +qla82xx_check_md_needed(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t fw_major_version, fw_minor_version, fw_subminor_version; + int rval = QLA_SUCCESS; + + fw_major_version = ha->fw_major_version; + fw_minor_version = ha->fw_minor_version; + fw_subminor_version = ha->fw_subminor_version; + + rval = qla2x00_get_fw_version(vha); + if (rval != QLA_SUCCESS) + return rval; + + if (ql2xmdenable) { + if (!ha->fw_dumped) { + if ((fw_major_version != ha->fw_major_version || + fw_minor_version != ha->fw_minor_version || + fw_subminor_version != ha->fw_subminor_version) || + (ha->prev_minidump_failed)) { + ql_dbg(ql_dbg_p3p, vha, 0xb02d, + "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n", + fw_major_version, fw_minor_version, + fw_subminor_version, + ha->fw_major_version, + ha->fw_minor_version, + ha->fw_subminor_version, + ha->prev_minidump_failed); + /* Release MiniDump resources */ + qla82xx_md_free(vha); + /* ALlocate MiniDump resources */ + qla82xx_md_prep(vha); + } + } else + ql_log(ql_log_info, vha, 0xb02e, + "Firmware dump available to retrieve\n"); + } + return rval; +} + + +static int +qla82xx_check_fw_alive(scsi_qla_host_t *vha) +{ + uint32_t fw_heartbeat_counter; + int status = 0; + + fw_heartbeat_counter = qla82xx_rd_32(vha->hw, + QLA82XX_PEG_ALIVE_COUNTER); + /* all 0xff, assume AER/EEH in progress, ignore */ + if (fw_heartbeat_counter == 0xffffffff) { + ql_dbg(ql_dbg_timer, vha, 0x6003, + "FW heartbeat counter is 0xffffffff, " + "returning status=%d.\n", status); + return status; + } + if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { + vha->seconds_since_last_heartbeat++; + /* FW not alive after 2 seconds */ + if (vha->seconds_since_last_heartbeat == 2) { + vha->seconds_since_last_heartbeat = 0; + status = 1; + } + } else + vha->seconds_since_last_heartbeat = 0; + vha->fw_heartbeat_counter = fw_heartbeat_counter; + if (status) + ql_dbg(ql_dbg_timer, vha, 0x6004, + "Returning status=%d.\n", status); + return status; +} + +/* + * qla82xx_device_state_handler + * Main state handler + * + * Note: + * IDC lock must be held upon entry + * + * Return: + * Success : 0 + * Failed : 1 + */ +int +qla82xx_device_state_handler(scsi_qla_host_t *vha) +{ + uint32_t dev_state; + uint32_t old_dev_state; + int rval = QLA_SUCCESS; + unsigned long dev_init_timeout; + struct qla_hw_data *ha = vha->hw; + int loopcount = 0; + + qla82xx_idc_lock(ha); + if (!vha->flags.init_done) { + qla82xx_set_drv_active(vha); + qla82xx_set_idc_version(vha); + } + + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + old_dev_state = dev_state; + ql_log(ql_log_info, vha, 0x009b, + "Device state is 0x%x = %s.\n", + dev_state, qdev_state(dev_state)); + + /* wait for 30 seconds for device to go ready */ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); + + while (1) { + + if (time_after_eq(jiffies, dev_init_timeout)) { + ql_log(ql_log_fatal, vha, 0x009c, + "Device init failed.\n"); + rval = QLA_FUNCTION_FAILED; + break; + } + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + if (old_dev_state != dev_state) { + loopcount = 0; + old_dev_state = dev_state; + } + if (loopcount < 5) { + ql_log(ql_log_info, vha, 0x009d, + "Device state is 0x%x = %s.\n", + dev_state, qdev_state(dev_state)); + } + + switch (dev_state) { + case QLA8XXX_DEV_READY: + ha->flags.nic_core_reset_owner = 0; + goto rel_lock; + case QLA8XXX_DEV_COLD: + rval = qla82xx_device_bootstrap(vha); + break; + case QLA8XXX_DEV_INITIALIZING: + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + break; + case QLA8XXX_DEV_NEED_RESET: + if (!ql2xdontresethba) + qla82xx_need_reset_handler(vha); + else { + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + } + dev_init_timeout = jiffies + + (ha->fcoe_dev_init_timeout * HZ); + break; + case QLA8XXX_DEV_NEED_QUIESCENT: + qla82xx_need_qsnt_handler(vha); + /* Reset timeout value after quiescence handler */ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout + * HZ); + break; + case QLA8XXX_DEV_QUIESCENT: + /* Owner will exit and other will wait for the state + * to get changed + */ + if (ha->flags.quiesce_owner) + goto rel_lock; + + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + + /* Reset timeout value after quiescence handler */ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout + * HZ); + break; + case QLA8XXX_DEV_FAILED: + qla8xxx_dev_failed_handler(vha); + rval = QLA_FUNCTION_FAILED; + goto exit; + default: + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + } + loopcount++; + } +rel_lock: + qla82xx_idc_unlock(ha); +exit: + return rval; +} + +static int qla82xx_check_temp(scsi_qla_host_t *vha) +{ + uint32_t temp, temp_state, temp_val; + struct qla_hw_data *ha = vha->hw; + + temp = qla82xx_rd_32(ha, CRB_TEMP_STATE); + temp_state = qla82xx_get_temp_state(temp); + temp_val = qla82xx_get_temp_val(temp); + + if (temp_state == QLA82XX_TEMP_PANIC) { + ql_log(ql_log_warn, vha, 0x600e, + "Device temperature %d degrees C exceeds " + " maximum allowed. Hardware has been shut down.\n", + temp_val); + return 1; + } else if (temp_state == QLA82XX_TEMP_WARN) { + ql_log(ql_log_warn, vha, 0x600f, + "Device temperature %d degrees C exceeds " + "operating range. Immediate action needed.\n", + temp_val); + } + return 0; +} + +int qla82xx_read_temperature(scsi_qla_host_t *vha) +{ + uint32_t temp; + + temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE); + return qla82xx_get_temp_val(temp); +} + +void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (ha->flags.mbox_busy) { + ha->flags.mbox_int = 1; + ha->flags.mbox_busy = 0; + ql_log(ql_log_warn, vha, 0x6010, + "Doing premature completion of mbx command.\n"); + if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) + complete(&ha->mbx_intr_comp); + } +} + +void qla82xx_watchdog(scsi_qla_host_t *vha) +{ + uint32_t dev_state, halt_status; + struct qla_hw_data *ha = vha->hw; + + /* don't poll if reset is going on */ + if (!ha->flags.nic_core_reset_hdlr_active) { + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + if (qla82xx_check_temp(vha)) { + set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); + ha->flags.isp82xx_fw_hung = 1; + qla82xx_clear_pending_mbx(vha); + } else if (dev_state == QLA8XXX_DEV_NEED_RESET && + !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x6001, + "Adapter reset needed.\n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && + !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x6002, + "Quiescent needed.\n"); + set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + } else if (dev_state == QLA8XXX_DEV_FAILED && + !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) && + vha->flags.online == 1) { + ql_log(ql_log_warn, vha, 0xb055, + "Adapter state is failed. Offlining.\n"); + set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); + ha->flags.isp82xx_fw_hung = 1; + qla82xx_clear_pending_mbx(vha); + } else { + if (qla82xx_check_fw_alive(vha)) { + ql_dbg(ql_dbg_timer, vha, 0x6011, + "disabling pause transmit on port 0 & 1.\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1); + halt_status = qla82xx_rd_32(ha, + QLA82XX_PEG_HALT_STATUS1); + ql_log(ql_log_info, vha, 0x6005, + "dumping hw/fw registers:.\n " + " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " + " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " + " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n " + " PEG_NET_4_PC: 0x%x.\n", halt_status, + qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_0 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_1 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_2 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_3 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_4 + 0x3c)); + if (((halt_status & 0x1fffff00) >> 8) == 0x67) + ql_log(ql_log_warn, vha, 0xb052, + "Firmware aborted with " + "error code 0x00006700. Device is " + "being reset.\n"); + if (halt_status & HALT_STATUS_UNRECOVERABLE) { + set_bit(ISP_UNRECOVERABLE, + &vha->dpc_flags); + } else { + ql_log(ql_log_info, vha, 0x6006, + "Detect abort needed.\n"); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + ha->flags.isp82xx_fw_hung = 1; + ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); + qla82xx_clear_pending_mbx(vha); + } + } + } +} + +int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) +{ + int rval = -1; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA82XX(ha)) + rval = qla82xx_device_state_handler(vha); + else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + /* Decide the reset ownership */ + qla83xx_reset_ownership(vha); + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + } + return rval; +} + +void +qla82xx_set_reset_owner(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t dev_state = 0; + + if (IS_QLA82XX(ha)) + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + else if (IS_QLA8044(ha)) + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (dev_state == QLA8XXX_DEV_READY) { + ql_log(ql_log_info, vha, 0xb02f, + "HW State: NEED RESET\n"); + if (IS_QLA82XX(ha)) { + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + ha->flags.nic_core_reset_owner = 1; + ql_dbg(ql_dbg_p3p, vha, 0xb030, + "reset_owner is 0x%x\n", ha->portnum); + } else if (IS_QLA8044(ha)) + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_NEED_RESET); + } else + ql_log(ql_log_info, vha, 0xb031, + "Device state is 0x%x = %s.\n", + dev_state, qdev_state(dev_state)); +} + +/* + * qla82xx_abort_isp + * Resets ISP and aborts all outstanding commands. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qla82xx_abort_isp(scsi_qla_host_t *vha) +{ + int rval = -1; + struct qla_hw_data *ha = vha->hw; + + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_log(ql_log_warn, vha, 0x8024, + "Device in failed state, exiting.\n"); + return QLA_SUCCESS; + } + ha->flags.nic_core_reset_hdlr_active = 1; + + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + + if (IS_QLA82XX(ha)) + rval = qla82xx_device_state_handler(vha); + else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + /* Decide the reset ownership */ + qla83xx_reset_ownership(vha); + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + } + + qla82xx_idc_lock(ha); + qla82xx_clear_rst_ready(ha); + qla82xx_idc_unlock(ha); + + if (rval == QLA_SUCCESS) { + ha->flags.isp82xx_fw_hung = 0; + ha->flags.nic_core_reset_hdlr_active = 0; + qla82xx_restart_isp(vha); + } + + if (rval) { + vha->flags.online = 1; + if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + if (ha->isp_abort_cnt == 0) { + ql_log(ql_log_warn, vha, 0x8027, + "ISP error recover failed - board " + "disabled.\n"); + /* + * The next call disables the board + * completely. + */ + ha->isp_ops->reset_adapter(vha); + vha->flags.online = 0; + clear_bit(ISP_ABORT_RETRY, + &vha->dpc_flags); + rval = QLA_SUCCESS; + } else { /* schedule another ISP abort */ + ha->isp_abort_cnt--; + ql_log(ql_log_warn, vha, 0x8036, + "ISP abort - retry remaining %d.\n", + ha->isp_abort_cnt); + rval = QLA_FUNCTION_FAILED; + } + } else { + ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; + ql_dbg(ql_dbg_taskm, vha, 0x8029, + "ISP error recovery - retrying (%d) more times.\n", + ha->isp_abort_cnt); + set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + rval = QLA_FUNCTION_FAILED; + } + } + return rval; +} + +/* + * qla82xx_fcoe_ctx_reset + * Perform a quick reset and aborts all outstanding commands. + * This will only perform an FCoE context reset and avoids a full blown + * chip reset. + * + * Input: + * ha = adapter block pointer. + * is_reset_path = flag for identifying the reset path. + * + * Returns: + * 0 = success + */ +int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) +{ + int rval = QLA_FUNCTION_FAILED; + + if (vha->flags.online) { + /* Abort all outstanding commands, so as to be requeued later */ + qla2x00_abort_isp_cleanup(vha); + } + + /* Stop currently executing firmware. + * This will destroy existing FCoE context at the F/W end. + */ + qla2x00_try_to_stop_firmware(vha); + + /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ + rval = qla82xx_restart_isp(vha); + + return rval; +} + +/* + * qla2x00_wait_for_fcoe_ctx_reset + * Wait till the FCoE context is reset. + * + * Note: + * Does context switching here. + * Release SPIN_LOCK (if any) before calling this routine. + * + * Return: + * Success (fcoe_ctx reset is done) : 0 + * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 + */ +int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) +{ + int status = QLA_FUNCTION_FAILED; + unsigned long wait_reset; + + wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); + while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || + test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) + && time_before(jiffies, wait_reset)) { + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + + if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + status = QLA_SUCCESS; + break; + } + } + ql_dbg(ql_dbg_p3p, vha, 0xb027, + "%s: status=%d.\n", __func__, status); + + return status; +} + +void +qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) +{ + int i, fw_state = 0; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + /* Check if 82XX firmware is alive or not + * We may have arrived here from NEED_RESET + * detection only + */ + if (!ha->flags.isp82xx_fw_hung) { + for (i = 0; i < 2; i++) { + msleep(1000); + if (IS_QLA82XX(ha)) + fw_state = qla82xx_check_fw_alive(vha); + else if (IS_QLA8044(ha)) + fw_state = qla8044_check_fw_alive(vha); + if (fw_state) { + ha->flags.isp82xx_fw_hung = 1; + qla82xx_clear_pending_mbx(vha); + break; + } + } + } + ql_dbg(ql_dbg_init, vha, 0x00b0, + "Entered %s fw_hung=%d.\n", + __func__, ha->flags.isp82xx_fw_hung); + + /* Abort all commands gracefully if fw NOT hung */ + if (!ha->flags.isp82xx_fw_hung) { + int cnt, que; + srb_t *sp; + struct req_que *req; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (que = 0; que < ha->max_req_queues; que++) { + req = ha->req_q_map[que]; + if (!req) + continue; + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (sp) { + if ((!sp->u.scmd.crc_ctx || + (sp->flags & + SRB_FCP_CMND_DMA_VALID)) && + !ha->flags.isp82xx_fw_hung) { + spin_unlock_irqrestore( + &ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + ql_log(ql_log_info, vha, + 0x00b1, + "mbx abort failed.\n"); + } else { + ql_log(ql_log_info, vha, + 0x00b2, + "mbx abort success.\n"); + } + spin_lock_irqsave(&ha->hardware_lock, flags); + } + } + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Wait for pending cmds (physical and virtual) to complete */ + if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, + WAIT_HOST) == QLA_SUCCESS) { + ql_dbg(ql_dbg_init, vha, 0x00b3, + "Done wait for " + "pending commands.\n"); + } else { + WARN_ON_ONCE(true); + } + } +} + +/* Minidump related functions */ +static int +qla82xx_minidump_process_control(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + struct qla82xx_md_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time; + uint32_t addr, index, crb_addr; + unsigned long wtime; + struct qla82xx_md_template_hdr *tmplt_hdr; + uint32_t rval = QLA_SUCCESS; + int i; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; + crb_addr = crb_entry->addr; + + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + if (opcode & QLA82XX_DBG_OPCODE_WR) { + qla82xx_md_rw_32(ha, crb_addr, + crb_entry->value_1, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WR; + } + + if (opcode & QLA82XX_DBG_OPCODE_RW) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_RW; + } + + if (opcode & QLA82XX_DBG_OPCODE_AND) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + read_value &= crb_entry->value_2; + opcode &= ~QLA82XX_DBG_OPCODE_AND; + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + } + + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + read_value |= crb_entry->value_3; + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + + if (opcode & QLA82XX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + + do { + if ((read_value & crb_entry->value_2) + == crb_entry->value_1) + break; + else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_FUNCTION_FAILED; + break; + } else + read_value = qla82xx_md_rw_32(ha, + crb_addr, 0, 0); + } while (1); + opcode &= ~QLA82XX_DBG_OPCODE_POLL; + } + + if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else + addr = crb_addr; + + read_value = qla82xx_md_rw_32(ha, addr, 0, 0); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else + addr = crb_addr; + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else + read_value = crb_entry->value_1; + + qla82xx_md_rw_32(ha, addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + return rval; +} + +static void +qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_rdocm *ocm_hdr; + __le32 *data_ptr = *d_ptr; + + ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + r_value = rd_reg_dword(r_addr + ha->nx_pcibase); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla82xx_md_entry_mux *mux_hdr; + __le32 *data_ptr = *d_ptr; + + mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, s_addr, s_value, 1); + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(s_value); + *data_ptr++ = cpu_to_le32(r_value); + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_crb *crb_hdr; + __le32 *data_ptr = *d_ptr; + + crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_addr); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int +qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla82xx_md_entry_cache *cache_hdr; + int rval = QLA_FUNCTION_FAILED; + __le32 *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); + if (c_value_w) + qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); + if ((c_value_r & p_mask) == 0) + break; + else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + ql_dbg(ql_dbg_p3p, vha, 0xb032, + "c_value_r: 0x%x, poll_mask: 0x%lx, " + "w_time: 0x%lx\n", + c_value_r, p_mask, w_time); + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static void +qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla82xx_md_entry_cache *cache_hdr; + __le32 *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); + qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_queue(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla82xx_md_entry_queue *q_hdr; + __le32 *data_ptr = *d_ptr; + + q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, s_addr, qid, 1); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value; + uint32_t i, loop_cnt; + struct qla82xx_md_entry_rdrom *rom_hdr; + __le32 *data_ptr = *d_ptr; + + rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; + r_addr = rom_hdr->read_addr; + loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, + (r_addr & 0xFFFF0000), 1); + r_value = qla82xx_md_rw_32(ha, + MD_DIRECT_ROM_READ_BASE + + (r_addr & 0x0000FFFF), 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += sizeof(uint32_t); + } + *d_ptr = data_ptr; +} + +static int +qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla82xx_md_entry_rdmem *m_hdr; + unsigned long flags; + int rval = QLA_FUNCTION_FAILED; + __le32 *data_ptr = *d_ptr; + + m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + if (r_addr & 0xf) { + ql_log(ql_log_warn, vha, 0xb033, + "Read addr 0x%x not 16 bytes aligned\n", r_addr); + return rval; + } + + if (m_hdr->read_data_size % 16) { + ql_log(ql_log_warn, vha, 0xb034, + "Read data[0x%x] not multiple of 16 bytes\n", + m_hdr->read_data_size); + return rval; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb035, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); + r_value = 0; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); + r_value = MIU_TA_CTL_ENABLE; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + r_value = qla82xx_md_rw_32(ha, + MD_MIU_TEST_AGT_CTRL, 0, 0); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR + "failed to read through agent\n"); + write_unlock_irqrestore(&ha->hw_lock, flags); + return rval; + } + + for (j = 0; j < 4; j++) { + r_data = qla82xx_md_rw_32(ha, + MD_MIU_TEST_AGT_RDDATA[j], 0, 0); + *data_ptr++ = cpu_to_le32(r_data); + } + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +int +qla82xx_validate_template_chksum(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint64_t chksum = 0; + uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; + int count = ha->md_template_size/sizeof(uint32_t); + + while (count-- > 0) + chksum += *d_ptr++; + while (chksum >> 32) + chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); + return ~chksum; +} + +static void +qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; + ql_dbg(ql_dbg_p3p, vha, 0xb036, + "Skipping entry[%d]: " + "ETYPE[0x%x]-ELEVEL[0x%x]\n", + index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); +} + +int +qla82xx_md_collect(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int no_entry_hdr = 0; + qla82xx_md_entry_hdr_t *entry_hdr; + struct qla82xx_md_template_hdr *tmplt_hdr; + __le32 *data_ptr; + uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; + int i = 0, rval = QLA_FUNCTION_FAILED; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + data_ptr = ha->md_dump; + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xb037, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", ha->fw_dump); + goto md_failed; + } + + ha->fw_dumped = false; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb038, + "Memory not allocated for minidump capture\n"); + goto md_failed; + } + + if (ha->flags.isp82xx_no_md_cap) { + ql_log(ql_log_warn, vha, 0xb054, + "Forced reset from application, " + "ignore minidump capture\n"); + ha->flags.isp82xx_no_md_cap = 0; + goto md_failed; + } + + if (qla82xx_validate_template_chksum(vha)) { + ql_log(ql_log_info, vha, 0xb039, + "Template checksum validation error\n"); + goto md_failed; + } + + no_entry_hdr = tmplt_hdr->num_of_entries; + ql_dbg(ql_dbg_p3p, vha, 0xb03a, + "No of entry headers in Template: 0x%x\n", no_entry_hdr); + + ql_dbg(ql_dbg_p3p, vha, 0xb03b, + "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); + + f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; + + /* Validate whether required debug level is set */ + if ((f_capture_mask & 0x3) != 0x3) { + ql_log(ql_log_warn, vha, 0xb03c, + "Minimum required capture mask[0x%x] level not set\n", + f_capture_mask); + goto md_failed; + } + tmplt_hdr->driver_capture_mask = ql2xmdcapmask; + + tmplt_hdr->driver_info[0] = vha->host_no; + tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | + (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | + QLA_DRIVER_BETA_VER; + + total_data_size = ha->md_dump_size; + + ql_dbg(ql_dbg_p3p, vha, 0xb03d, + "Total minidump data_size 0x%x to be captured\n", total_data_size); + + /* Check whether template obtained is valid */ + if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { + ql_log(ql_log_warn, vha, 0xb04e, + "Bad template header entry type: 0x%x obtained\n", + tmplt_hdr->entry_type); + goto md_failed; + } + + entry_hdr = (qla82xx_md_entry_hdr_t *) + (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); + + /* Walk through the entry headers */ + for (i = 0; i < no_entry_hdr; i++) { + + if (data_collected > total_data_size) { + ql_log(ql_log_warn, vha, 0xb03e, + "More MiniDump data collected: [0x%x]\n", + data_collected); + goto md_failed; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ql2xmdcapmask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA82XX_DBG_SKIPPED_FLAG; + ql_dbg(ql_dbg_p3p, vha, 0xb03f, + "Skipping entry[%d]: " + "ETYPE[0x%x]-ELEVEL[0x%x]\n", + i, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); + goto skip_nxt_entry; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb040, + "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" + "entry_type: 0x%x, capture_mask: 0x%x\n", + __func__, i, data_ptr, entry_hdr, + entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); + + ql_dbg(ql_dbg_p3p, vha, 0xb041, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, (ha->md_dump_size - data_collected)); + + /* Decode the entry type and take + * required action to capture debug data */ + switch (entry_hdr->entry_type) { + case QLA82XX_RDEND: + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_CNTRL: + rval = qla82xx_minidump_process_control(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_RDCRB: + qla82xx_minidump_process_rdcrb(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMEM: + rval = qla82xx_minidump_process_rdmem(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_BOARD: + case QLA82XX_RDROM: + qla82xx_minidump_process_rdrom(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_L2DTG: + case QLA82XX_L2ITG: + case QLA82XX_L2DAT: + case QLA82XX_L2INS: + rval = qla82xx_minidump_process_l2tag(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_L1DAT: + case QLA82XX_L1INS: + qla82xx_minidump_process_l1cache(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDOCM: + qla82xx_minidump_process_rdocm(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMUX: + qla82xx_minidump_process_rdmux(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_QUEUE: + qla82xx_minidump_process_queue(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDNOP: + default: + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + break; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb042, + "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); + + data_collected = (uint8_t *)data_ptr - + (uint8_t *)ha->md_dump; +skip_nxt_entry: + entry_hdr = (qla82xx_md_entry_hdr_t *) + (((uint8_t *)entry_hdr) + entry_hdr->entry_size); + } + + if (data_collected != total_data_size) { + ql_dbg(ql_dbg_p3p, vha, 0xb043, + "MiniDump data mismatch: Data collected: [0x%x]," + "total_data_size:[0x%x]\n", + data_collected, total_data_size); + goto md_failed; + } + + ql_log(ql_log_info, vha, 0xb044, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); + ha->fw_dumped = true; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + +md_failed: + return rval; +} + +int +qla82xx_md_alloc(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int i, k; + struct qla82xx_md_template_hdr *tmplt_hdr; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + + if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { + ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; + ql_log(ql_log_info, vha, 0xb045, + "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", + ql2xmdcapmask); + } + + for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { + if (i & ql2xmdcapmask) + ha->md_dump_size += tmplt_hdr->capture_size_array[k]; + } + + if (ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb046, + "Firmware dump previously allocated.\n"); + return 1; + } + + ha->md_dump = vmalloc(ha->md_dump_size); + if (ha->md_dump == NULL) { + ql_log(ql_log_warn, vha, 0xb047, + "Unable to allocate memory for Minidump size " + "(0x%x).\n", ha->md_dump_size); + return 1; + } + return 0; +} + +void +qla82xx_md_free(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + /* Release the template header allocated */ + if (ha->md_tmplt_hdr) { + ql_log(ql_log_info, vha, 0xb048, + "Free MiniDump template: %p, size (%d KB)\n", + ha->md_tmplt_hdr, ha->md_template_size / 1024); + dma_free_coherent(&ha->pdev->dev, ha->md_template_size, + ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); + ha->md_tmplt_hdr = NULL; + } + + /* Release the template data buffer allocated */ + if (ha->md_dump) { + ql_log(ql_log_info, vha, 0xb049, + "Free MiniDump memory: %p, size (%d KB)\n", + ha->md_dump, ha->md_dump_size / 1024); + vfree(ha->md_dump); + ha->md_dump_size = 0; + ha->md_dump = NULL; + } +} + +void +qla82xx_md_prep(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval; + + /* Get Minidump template size */ + rval = qla82xx_md_get_template_size(vha); + if (rval == QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0xb04a, + "MiniDump Template size obtained (%d KB)\n", + ha->md_template_size / 1024); + + /* Get Minidump template */ + if (IS_QLA8044(ha)) + rval = qla8044_md_get_template(vha); + else + rval = qla82xx_md_get_template(vha); + + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, vha, 0xb04b, + "MiniDump Template obtained\n"); + + /* Allocate memory for minidump */ + rval = qla82xx_md_alloc(vha); + if (rval == QLA_SUCCESS) + ql_log(ql_log_info, vha, 0xb04c, + "MiniDump memory allocated (%d KB)\n", + ha->md_dump_size / 1024); + else { + ql_log(ql_log_info, vha, 0xb04d, + "Free MiniDump template: %p, size: (%d KB)\n", + ha->md_tmplt_hdr, + ha->md_template_size / 1024); + dma_free_coherent(&ha->pdev->dev, + ha->md_template_size, + ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); + ha->md_tmplt_hdr = NULL; + } + + } + } +} + +int +qla82xx_beacon_on(struct scsi_qla_host *vha) +{ + + int rval; + struct qla_hw_data *ha = vha->hw; + + qla82xx_idc_lock(ha); + rval = qla82xx_mbx_beacon_ctl(vha, 1); + + if (rval) { + ql_log(ql_log_warn, vha, 0xb050, + "mbx set led config failed in %s\n", __func__); + goto exit; + } + ha->beacon_blink_led = 1; +exit: + qla82xx_idc_unlock(ha); + return rval; +} + +int +qla82xx_beacon_off(struct scsi_qla_host *vha) +{ + + int rval; + struct qla_hw_data *ha = vha->hw; + + qla82xx_idc_lock(ha); + rval = qla82xx_mbx_beacon_ctl(vha, 0); + + if (rval) { + ql_log(ql_log_warn, vha, 0xb051, + "mbx set led config failed in %s\n", __func__); + goto exit; + } + ha->beacon_blink_led = 0; +exit: + qla82xx_idc_unlock(ha); + return rval; +} + +void +qla82xx_fw_dump(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!ha->allow_cna_fw_dump) + return; + + scsi_block_requests(vha->host); + ha->flags.isp82xx_no_md_cap = 1; + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + qla2x00_wait_for_chip_reset(vha); + scsi_unblock_requests(vha->host); +} diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h new file mode 100644 index 000000000..5d1bdc15b --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -0,0 +1,1192 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#ifndef __QLA_NX_H +#define __QLA_NX_H + +#include + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. +*/ +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f +#define PHAN_PEG_RCV_INITIALIZED 0xff01 + +/*CRB_RELATED*/ +#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200) +#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X)) + +#define CRB_CMDPEG_STATE QLA82XX_REG(0x50) +#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) +#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) +#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) +#define CRB_TEMP_STATE QLA82XX_REG(0x1b4) +#define QLA82XX_DMA_SHIFT_VALUE 0x55555555 + +#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 +#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E +#define QLA82XX_HW_H2_CH_HUB_ADR 0x03 +#define QLA82XX_HW_H3_CH_HUB_ADR 0x01 +#define QLA82XX_HW_H4_CH_HUB_ADR 0x06 +#define QLA82XX_HW_H5_CH_HUB_ADR 0x07 +#define QLA82XX_HW_H6_CH_HUB_ADR 0x08 + +/* Hub 0 */ +#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15 +#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25 + +/* Hub 1 */ +#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73 +#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00 +#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b +#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01 +#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02 +#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03 +#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04 +#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58 +#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59 +#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a +#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a +#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c +#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f +#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12 +#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18 + +/* Hub 2 */ +#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31 +#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19 +#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29 + +#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10 +#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20 +#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22 +#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21 +#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66 +#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60 +#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61 +#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62 +#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63 +#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09 +#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d +#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e +#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11 + +/* Hub 3 */ +#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A +#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50 +#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51 +#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08 + +/* Hub 4 */ +#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40 +#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41 +#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42 +#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43 +#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44 +#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45 +#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46 +#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47 +#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48 +#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49 +#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a +#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b + +/* Hub 5 */ +#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40 +#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41 +#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42 +#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43 +#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44 +#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45 +#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46 + +/* Hub 6 */ +#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46 +#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47 +#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48 +#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49 +#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16 +#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17 +#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05 +#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06 +#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +/* */ +#define QLA82XX_HW_PX_MAP_CRB_PH 0 +#define QLA82XX_HW_PX_MAP_CRB_PS 1 +#define QLA82XX_HW_PX_MAP_CRB_MN 2 +#define QLA82XX_HW_PX_MAP_CRB_MS 3 +#define QLA82XX_HW_PX_MAP_CRB_SRE 5 +#define QLA82XX_HW_PX_MAP_CRB_NIU 6 +#define QLA82XX_HW_PX_MAP_CRB_QMN 7 +#define QLA82XX_HW_PX_MAP_CRB_SQN0 8 +#define QLA82XX_HW_PX_MAP_CRB_SQN1 9 +#define QLA82XX_HW_PX_MAP_CRB_SQN2 10 +#define QLA82XX_HW_PX_MAP_CRB_SQN3 11 +#define QLA82XX_HW_PX_MAP_CRB_QMS 12 +#define QLA82XX_HW_PX_MAP_CRB_SQS0 13 +#define QLA82XX_HW_PX_MAP_CRB_SQS1 14 +#define QLA82XX_HW_PX_MAP_CRB_SQS2 15 +#define QLA82XX_HW_PX_MAP_CRB_SQS3 16 +#define QLA82XX_HW_PX_MAP_CRB_PGN0 17 +#define QLA82XX_HW_PX_MAP_CRB_PGN1 18 +#define QLA82XX_HW_PX_MAP_CRB_PGN2 19 +#define QLA82XX_HW_PX_MAP_CRB_PGN3 20 +#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2 +#define QLA82XX_HW_PX_MAP_CRB_PGND 21 +#define QLA82XX_HW_PX_MAP_CRB_PGNI 22 +#define QLA82XX_HW_PX_MAP_CRB_PGS0 23 +#define QLA82XX_HW_PX_MAP_CRB_PGS1 24 +#define QLA82XX_HW_PX_MAP_CRB_PGS2 25 +#define QLA82XX_HW_PX_MAP_CRB_PGS3 26 +#define QLA82XX_HW_PX_MAP_CRB_PGSD 27 +#define QLA82XX_HW_PX_MAP_CRB_PGSI 28 +#define QLA82XX_HW_PX_MAP_CRB_SN 29 +#define QLA82XX_HW_PX_MAP_CRB_EG 31 +#define QLA82XX_HW_PX_MAP_CRB_PH2 32 +#define QLA82XX_HW_PX_MAP_CRB_PS2 33 +#define QLA82XX_HW_PX_MAP_CRB_CAM 34 +#define QLA82XX_HW_PX_MAP_CRB_CAS0 35 +#define QLA82XX_HW_PX_MAP_CRB_CAS1 36 +#define QLA82XX_HW_PX_MAP_CRB_CAS2 37 +#define QLA82XX_HW_PX_MAP_CRB_C2C0 38 +#define QLA82XX_HW_PX_MAP_CRB_C2C1 39 +#define QLA82XX_HW_PX_MAP_CRB_TIMR 40 +#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42 +#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43 +#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44 +#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45 +#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46 +#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47 +#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48 +#define QLA82XX_HW_PX_MAP_CRB_XDMA 49 +#define QLA82XX_HW_PX_MAP_CRB_I2Q 50 +#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51 +#define QLA82XX_HW_PX_MAP_CRB_CAS3 52 +#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53 +#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54 +#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55 +#define QLA82XX_HW_PX_MAP_CRB_OCM0 56 +#define QLA82XX_HW_PX_MAP_CRB_OCM1 57 +#define QLA82XX_HW_PX_MAP_CRB_SMB 58 +#define QLA82XX_HW_PX_MAP_CRB_I2C0 59 +#define QLA82XX_HW_PX_MAP_CRB_I2C1 60 +#define QLA82XX_HW_PX_MAP_CRB_LPC 61 +#define QLA82XX_HW_PX_MAP_CRB_PGNC 62 +#define QLA82XX_HW_PX_MAP_CRB_PGR0 63 +#define QLA82XX_HW_PX_MAP_CRB_PGR1 4 +#define QLA82XX_HW_PX_MAP_CRB_PGR2 30 +#define QLA82XX_HW_PX_MAP_CRB_PGR3 41 + +/* This field defines CRB adr [31:20] of the agents */ +/* */ + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \ + QLA82XX_HW_MN_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PH_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \ + QLA82XX_HW_MS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_QMS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_C2C0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_C2C1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX4_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX7_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX9_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SMB_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \ + QLA82XX_HW_NIU_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \ + QLA82XX_HW_I2C0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \ + QLA82XX_HW_I2C1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SRE_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_EG_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_QM_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX5_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX6_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX8_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGNI_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGND_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN4_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGNC_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGSI_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGSD_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGSC_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_NCM_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_TMR_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_XDMA_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SN_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_I2Q_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_ROMUSB_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_OCM0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_OCM1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_LPC_CRB_AGT_ADR) + +#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000) +#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000) +#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) + +#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */ +#define QLA82XX_PCI_CRB_WINDOW(A) \ + (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) +#define QLA82XX_CRB_C2C_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0) +#define QLA82XX_CRB_C2C_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1) +#define QLA82XX_CRB_C2C_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2) +#define QLA82XX_CRB_CAM \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM) +#define QLA82XX_CRB_CASPER \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS) +#define QLA82XX_CRB_CASPER_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0) +#define QLA82XX_CRB_CASPER_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1) +#define QLA82XX_CRB_CASPER_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2) +#define QLA82XX_CRB_DDR_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS) +#define QLA82XX_CRB_DDR_NET \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN) +#define QLA82XX_CRB_EPG \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG) +#define QLA82XX_CRB_I2Q \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q) +#define QLA82XX_CRB_NIU \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU) + +#define QLA82XX_CRB_PCIX_HOST \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH) +#define QLA82XX_CRB_PCIX_HOST2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2) +#define QLA82XX_CRB_PCIX_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS) +#define QLA82XX_CRB_PCIE \ + QLA82XX_CRB_PCIX_MD + +/* window 1 pcie slot */ +#define QLA82XX_CRB_PCIE2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2) +#define QLA82XX_CRB_PEG_MD_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0) +#define QLA82XX_CRB_PEG_MD_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1) +#define QLA82XX_CRB_PEG_MD_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2) +#define QLA82XX_CRB_PEG_MD_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3) +#define QLA82XX_CRB_PEG_MD_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3) +#define QLA82XX_CRB_PEG_MD_D \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD) +#define QLA82XX_CRB_PEG_MD_I \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI) +#define QLA82XX_CRB_PEG_NET_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0) +#define QLA82XX_CRB_PEG_NET_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1) +#define QLA82XX_CRB_PEG_NET_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2) +#define QLA82XX_CRB_PEG_NET_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3) +#define QLA82XX_CRB_PEG_NET_4 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4) +#define QLA82XX_CRB_PEG_NET_D \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND) +#define QLA82XX_CRB_PEG_NET_I \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI) +#define QLA82XX_CRB_PQM_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS) +#define QLA82XX_CRB_PQM_NET \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN) +#define QLA82XX_CRB_QDR_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS) +#define QLA82XX_CRB_QDR_NET \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN) +#define QLA82XX_CRB_ROMUSB \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB) +#define QLA82XX_CRB_RPMX_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0) +#define QLA82XX_CRB_RPMX_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1) +#define QLA82XX_CRB_RPMX_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2) +#define QLA82XX_CRB_RPMX_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3) +#define QLA82XX_CRB_RPMX_4 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4) +#define QLA82XX_CRB_RPMX_5 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5) +#define QLA82XX_CRB_RPMX_6 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6) +#define QLA82XX_CRB_RPMX_7 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7) +#define QLA82XX_CRB_SQM_MD_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0) +#define QLA82XX_CRB_SQM_MD_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1) +#define QLA82XX_CRB_SQM_MD_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2) +#define QLA82XX_CRB_SQM_MD_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3) +#define QLA82XX_CRB_SQM_NET_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0) +#define QLA82XX_CRB_SQM_NET_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1) +#define QLA82XX_CRB_SQM_NET_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2) +#define QLA82XX_CRB_SQM_NET_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3) +#define QLA82XX_CRB_SRE \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE) +#define QLA82XX_CRB_TIMER \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR) +#define QLA82XX_CRB_XDMA \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA) +#define QLA82XX_CRB_I2C0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0) +#define QLA82XX_CRB_I2C1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1) +#define QLA82XX_CRB_OCM0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0) +#define QLA82XX_CRB_SMB \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB) +#define QLA82XX_CRB_MAX \ + QLA82XX_PCI_CRB_WINDOW(64) + +/* + * ====================== BASE ADDRESSES ON-CHIP ====================== + * Base addresses of major components on-chip. + * ====================== BASE ADDRESSES ON-CHIP ====================== + */ +#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL) + +/* Imbus address bit used to indicate a host address. This bit is + * eliminated by the pcie bar and bar select before presentation + * over pcie. */ +/* host memory via IMBUS */ +#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL) +#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL) +#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL) +#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL) +#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) +#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) +#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) + +#define QLA82XX_PCI_CRBSPACE 0x06000000UL +#define QLA82XX_PCI_DIRECT_CRB 0x04400000UL +#define QLA82XX_PCI_CAMQM 0x04800000UL +#define QLA82XX_PCI_CAMQM_MAX 0x04ffffffUL +#define QLA82XX_PCI_DDR_NET 0x00000000UL +#define QLA82XX_PCI_QDR_NET 0x04000000UL +#define QLA82XX_PCI_QDR_NET_MAX 0x043fffffUL + +/* + * Register offsets for MN + */ +#define MIU_CONTROL (0x000) +#define MIU_TAG (0x004) +#define MIU_TEST_AGT_CTRL (0x090) +#define MIU_TEST_AGT_ADDR_LO (0x094) +#define MIU_TEST_AGT_ADDR_HI (0x098) +#define MIU_TEST_AGT_WRDATA_LO (0x0a0) +#define MIU_TEST_AGT_WRDATA_HI (0x0a4) +#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i))) +#define MIU_TEST_AGT_RDDATA_LO (0x0a8) +#define MIU_TEST_AGT_RDDATA_HI (0x0ac) +#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i))) +#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 +#define MIU_TEST_AGT_UPPER_ADDR(off) (0) + +/* MIU_TEST_AGT_CTRL flags. work for SIU as well */ +#define MIU_TA_CTL_START 1 +#define MIU_TA_CTL_ENABLE 2 +#define MIU_TA_CTL_WRITE 4 +#define MIU_TA_CTL_BUSY 8 + +/*CAM RAM */ +# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) +# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) + +#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) +#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) +#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) +#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0)) + +#define QLA82XX_CAMRAM_DB1 (QLA82XX_CAM_RAM(0x1b8)) +#define QLA82XX_CAMRAM_DB2 (QLA82XX_CAM_RAM(0x1bc)) + +#define HALT_STATUS_UNRECOVERABLE 0x80000000 +#define HALT_STATUS_RECOVERABLE 0x40000000 + +/* Driver Coexistence Defines */ +#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138)) +#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140)) +#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144)) +#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148)) +#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c)) +#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174)) + +/* Every driver should use these Device State */ +enum { + QLA8XXX_DEV_UNKNOWN, + QLA8XXX_DEV_COLD, + QLA8XXX_DEV_INITIALIZING, + QLA8XXX_DEV_READY, + QLA8XXX_DEV_NEED_RESET, + QLA8XXX_DEV_NEED_QUIESCENT, + QLA8XXX_DEV_FAILED, + QLA8XXX_DEV_QUIESCENT, + MAX_STATES, /* Increment if new state added */ +}; + +#define QLA8XXX_BAD_VALUE 0xbad0bad0 + +#define QLA82XX_IDC_VERSION 1 +#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30 +#define QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT 10 + +#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100)) +#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124)) +#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150)) +#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154)) +#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) +#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) + +#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg)) +#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg)) + +#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ +#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ +#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */ +#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */ +#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ +#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ + +/* Different drive state */ +#define QLA82XX_DRVST_NOT_RDY 0 +#define QLA82XX_DRVST_RST_RDY 1 +#define QLA82XX_DRVST_QSNT_RDY 2 + +/* Different drive active state */ +#define QLA82XX_DRV_NOT_ACTIVE 0 +#define QLA82XX_DRV_ACTIVE 1 + +/* + * The PCI VendorID and DeviceID for our board. + */ +#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021 +#define PCI_DEVICE_ID_QLOGIC_ISP8044 0x8044 + +#define QLA82XX_MSIX_TBL_SPACE 8192 +#define QLA82XX_PCI_REG_MSIX_TBL 0x44 +#define QLA82XX_PCI_MSIX_CONTROL 0x40 + +struct crb_128M_2M_sub_block_map { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +}; + +struct crb_128M_2M_block_map { + struct crb_128M_2M_sub_block_map sub_block[16]; +}; + +struct crb_addr_pair { + long addr; + long data; +}; + +#define ADDR_ERROR ((unsigned long) 0xffffffff) +#define MAX_CTL_CHECK 1000 + +/*************************************************************************** + * PCI related defines. + **************************************************************************/ + +/* + * Interrupt related defines. + */ +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +/* + * Message Signaled Interrupts + */ +#define PCIX_MSI_F0 (0x13000) +#define PCIX_MSI_F1 (0x13004) +#define PCIX_MSI_F2 (0x13008) +#define PCIX_MSI_F3 (0x1300c) +#define PCIX_MSI_F4 (0x13010) +#define PCIX_MSI_F5 (0x13014) +#define PCIX_MSI_F6 (0x13018) +#define PCIX_MSI_F7 (0x1301c) +#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4)) +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +/* + * Interrupt state machine and other bits. + */ +#define PCIE_MISCCFG_RC (0x1206c) + +#define ISR_INT_TARGET_STATUS \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_STATUS_F1 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_STATUS_F2 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_STATUS_F3 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_STATUS_F4 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_STATUS_F5 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_STATUS_F6 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_STATUS_F7 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) + +#define ISR_INT_TARGET_MASK \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_MASK_F1 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_MASK_F2 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_MASK_F3 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_MASK_F4 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_MASK_F5 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_MASK_F6 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_MASK_F7 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define ISR_INT_VECTOR \ + (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK \ + (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_STATE_REG \ + (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC)) + +#define ISR_MSI_INT_TRIGGER(FUNC) \ + (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC))) + +#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0) +#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +struct qla82xx_legacy_intr_set { + uint32_t int_vec_bit; + uint32_t tgt_status_reg; + uint32_t tgt_mask_reg; + uint32_t pci_int_reg; +}; + +#define QLA82XX_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ +} + +#define BRDCFG_START 0x4000 +#define BOOTLD_START 0x10000 +#define IMAGE_START 0x100000 +#define FLASH_ADDR_START 0x43000 + +/* Magic number to let user know flash is programmed */ +#define QLA82XX_BDINFO_MAGIC 0x12345678 +#define QLA82XX_FW_MAGIC_OFFSET (BRDCFG_START + 0x128) +#define FW_SIZE_OFFSET (0x3e840c) +#define QLA82XX_FW_MIN_SIZE 0x3fffff + +/* UNIFIED ROMIMAGE START */ +#define QLA82XX_URI_FW_MIN_SIZE 0xc8000 +#define QLA82XX_URI_DIR_SECT_PRODUCT_TBL 0x0 +#define QLA82XX_URI_DIR_SECT_BOOTLD 0x6 +#define QLA82XX_URI_DIR_SECT_FW 0x7 + +/* Offsets */ +#define QLA82XX_URI_CHIP_REV_OFF 10 +#define QLA82XX_URI_FLAGS_OFF 11 +#define QLA82XX_URI_BIOS_VERSION_OFF 12 +#define QLA82XX_URI_BOOTLD_IDX_OFF 27 +#define QLA82XX_URI_FIRMWARE_IDX_OFF 29 + +struct qla82xx_uri_table_desc{ + __le32 findex; + __le32 num_entries; + __le32 entry_size; + __le32 reserved[5]; +}; + +struct qla82xx_uri_data_desc{ + __le32 findex; + __le32 size; + __le32 reserved[5]; +}; + +/* UNIFIED ROMIMAGE END */ + +#define QLA82XX_UNIFIED_ROMIMAGE 3 +#define QLA82XX_FLASH_ROMIMAGE 4 +#define QLA82XX_UNKNOWN_ROMIMAGE 0xff + +#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) +#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) + +/* Request and response queue size */ +#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */ +#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/ + +/* + * ISP 8021 I/O Register Set structure definitions. + */ +struct device_reg_82xx { + __le32 req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */ + __le32 rsp_q_in[64]; /* Response Queue In-Pointer. */ + __le32 rsp_q_out[64]; /* Response Queue Out-Pointer. */ + + __le16 mailbox_in[32]; /* Mailbox In registers */ + __le16 unused_1[32]; + __le32 hint; /* Host interrupt register */ +#define HINT_MBX_INT_PENDING BIT_0 + __le16 unused_2[62]; + __le16 mailbox_out[32]; /* Mailbox Out registers */ + __le32 unused_3[48]; + + __le32 host_status; /* host status */ +#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */ +#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */ + __le32 host_int; /* Interrupt status. */ +#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */ +}; + +struct fcp_cmnd { + struct scsi_lun lun; + uint8_t crn; + uint8_t task_attribute; + uint8_t task_management; + uint8_t additional_cdb_len; +#define QLA_CDB_BUF_SIZE 256 +#define QLA_FCP_DL_SIZE 4 + uint8_t cdb[QLA_CDB_BUF_SIZE + QLA_FCP_DL_SIZE]; /* 256 for CDB len and 4 for FCP_DL */ +}; + +struct dsd_dma { + struct list_head list; + dma_addr_t dsd_list_dma; + void *dsd_addr; +}; + +#define QLA_DSDS_PER_IOCB 37 +#define QLA_DSD_SIZE 12 +struct ct6_dsd { + uint16_t fcp_cmnd_len; + dma_addr_t fcp_cmnd_dma; + struct fcp_cmnd *fcp_cmnd; + int dsd_use_cnt; + struct list_head dsd_list; +}; + +#define MBC_TOGGLE_INTERRUPT 0x10 +#define MBC_SET_LED_CONFIG 0x125 /* FCoE specific LED control */ +#define MBC_GET_LED_CONFIG 0x126 /* FCoE specific LED control */ + +/* Flash offset */ +#define FLT_REG_BOOTLOAD_82XX 0x72 +#define FLT_REG_BOOT_CODE_82XX 0x78 +#define FLT_REG_FW_82XX 0x74 +#define FLT_REG_GOLD_FW_82XX 0x75 +#define FLT_REG_VPD_8XXX 0x81 + +#define FA_VPD_SIZE_82XX 0x400 + +#define FA_FLASH_LAYOUT_ADDR_82 0xFC400 + +/****************************************************************************** +* +* Definitions specific to M25P flash +* +******************************************************************************* +* Instructions +*/ +#define M25P_INSTR_WREN 0x06 +#define M25P_INSTR_WRDI 0x04 +#define M25P_INSTR_RDID 0x9f +#define M25P_INSTR_RDSR 0x05 +#define M25P_INSTR_WRSR 0x01 +#define M25P_INSTR_READ 0x03 +#define M25P_INSTR_FAST_READ 0x0b +#define M25P_INSTR_PP 0x02 +#define M25P_INSTR_SE 0xd8 +#define M25P_INSTR_BE 0xc7 +#define M25P_INSTR_DP 0xb9 +#define M25P_INSTR_RES 0xab + +/* Minidump related */ + +/* + * Version of the template + * 4 Bytes + * X.Major.Minor.RELEASE + */ +#define QLA82XX_MINIDUMP_VERSION 0x10101 + +/* + * Entry Type Defines + */ +#define QLA82XX_RDNOP 0 +#define QLA82XX_RDCRB 1 +#define QLA82XX_RDMUX 2 +#define QLA82XX_QUEUE 3 +#define QLA82XX_BOARD 4 +#define QLA82XX_RDSRE 5 +#define QLA82XX_RDOCM 6 +#define QLA82XX_CACHE 10 +#define QLA82XX_L1DAT 11 +#define QLA82XX_L1INS 12 +#define QLA82XX_L2DTG 21 +#define QLA82XX_L2ITG 22 +#define QLA82XX_L2DAT 23 +#define QLA82XX_L2INS 24 +#define QLA82XX_RDROM 71 +#define QLA82XX_RDMEM 72 +#define QLA82XX_CNTRL 98 +#define QLA82XX_TLHDR 99 +#define QLA82XX_RDEND 255 +#define QLA8044_POLLRD 35 +#define QLA8044_RDMUX2 36 +#define QLA8044_L1DTG 8 +#define QLA8044_L1ITG 9 +#define QLA8044_POLLRDMWR 37 + +/* + * Opcodes for Control Entries. + * These Flags are bit fields. + */ +#define QLA82XX_DBG_OPCODE_WR 0x01 +#define QLA82XX_DBG_OPCODE_RW 0x02 +#define QLA82XX_DBG_OPCODE_AND 0x04 +#define QLA82XX_DBG_OPCODE_OR 0x08 +#define QLA82XX_DBG_OPCODE_POLL 0x10 +#define QLA82XX_DBG_OPCODE_RDSTATE 0x20 +#define QLA82XX_DBG_OPCODE_WRSTATE 0x40 +#define QLA82XX_DBG_OPCODE_MDSTATE 0x80 + +/* + * Template Header and Entry Header definitions start here. + */ + +/* + * Template Header + * Parts of the template header can be modified by the driver. + * These include the saved_state_array, capture_debug_level, driver_timestamp + */ + +#define QLA82XX_DBG_STATE_ARRAY_LEN 16 +#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA82XX_DBG_RSVD_ARRAY_LEN 8 + +/* + * Driver Flags + */ +#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ +#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */ + +struct qla82xx_md_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t template_checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info[3]; + + uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN]; + + /* markers_array used to capture some special locations on board */ + uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN]; + uint32_t num_of_free_entries; /* For internal use */ + uint32_t free_entry_offset; /* For internal use */ + uint32_t total_table_size; /* For internal use */ + uint32_t bkup_table_offset; /* For internal use */ +} __packed; + +/* + * Entry Header: Common to All Entry Types + */ + +/* + * Driver Code is for driver to write some info about the entry. + * Currently not used. + */ +typedef struct qla82xx_md_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +} __packed qla82xx_md_entry_hdr_t; + +/* + * Read CRB entry header + */ +struct qla82xx_md_entry_crb { + qla82xx_md_entry_hdr_t h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +} __packed; + +/* + * Cache entry header + */ +struct qla82xx_md_entry_cache { + qla82xx_md_entry_hdr_t h; + + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + + uint32_t data_size; + uint32_t op_count; + + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +} __packed; + +/* + * Read OCM + */ +struct qla82xx_md_entry_rdocm { + qla82xx_md_entry_hdr_t h; + + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; + uint32_t read_addr_cntrl; +} __packed; + +/* + * Read Memory + */ +struct qla82xx_md_entry_rdmem { + qla82xx_md_entry_hdr_t h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* + * Read ROM + */ +struct qla82xx_md_entry_rdrom { + qla82xx_md_entry_hdr_t h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +struct qla82xx_md_entry_mux { + qla82xx_md_entry_hdr_t h; + + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +} __packed; + +struct qla82xx_md_entry_queue { + qla82xx_md_entry_hdr_t h; + + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +} __packed; + +#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 +#define RQST_TMPLT_SIZE 0x0 +#define RQST_TMPLT 0x1 +#define MD_DIRECT_ROM_WINDOW 0x42110030 +#define MD_DIRECT_ROM_READ_BASE 0x42150000 +#define MD_MIU_TEST_AGT_CTRL 0x41000090 +#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 +#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 + +extern const int MD_MIU_TEST_AGT_RDDATA[4]; + +#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 +#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 + +#define qla82xx_get_temp_val(x) ((x) >> 16) +#define qla82xx_get_temp_state(x) ((x) & 0xffff) +#define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */ + QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ + QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +#define LEG_INTR_PTR_OFFSET 0x38C0 +#define LEG_INTR_TRIG_OFFSET 0x38C4 +#define LEG_INTR_MASK_OFFSET 0x38C8 +#endif diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c new file mode 100644 index 000000000..41ff6fbdb --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -0,0 +1,4075 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ + +#include +#include + +#include "qla_def.h" +#include "qla_gbl.h" + +#define TIMEOUT_100_MS 100 + +static const uint32_t qla8044_reg_tbl[] = { + QLA8044_PEG_HALT_STATUS1, + QLA8044_PEG_HALT_STATUS2, + QLA8044_PEG_ALIVE_COUNTER, + QLA8044_CRB_DRV_ACTIVE, + QLA8044_CRB_DEV_STATE, + QLA8044_CRB_DRV_STATE, + QLA8044_CRB_DRV_SCRATCH, + QLA8044_CRB_DEV_PART_INFO1, + QLA8044_CRB_IDC_VER_MAJOR, + QLA8044_FW_VER_MAJOR, + QLA8044_FW_VER_MINOR, + QLA8044_FW_VER_SUB, + QLA8044_CMDPEG_STATE, + QLA8044_ASIC_TEMP, +}; + +/* 8044 Flash Read/Write functions */ +uint32_t +qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) +{ + return readl((void __iomem *) (ha->nx_pcibase + addr)); +} + +void +qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val) +{ + writel(val, (void __iomem *)((ha)->nx_pcibase + addr)); +} + +int +qla8044_rd_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg) +{ + struct qla_hw_data *ha = vha->hw; + + if (crb_reg < CRB_REG_INDEX_MAX) + return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]); + else + return QLA_FUNCTION_FAILED; +} + +void +qla8044_wr_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg, + const uint32_t value) +{ + struct qla_hw_data *ha = vha->hw; + + if (crb_reg < CRB_REG_INDEX_MAX) + qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value); +} + +static int +qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr) +{ + uint32_t val; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr); + val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum)); + + if (val != addr) { + ql_log(ql_log_warn, vha, 0xb087, + "%s: Failed to set register window : " + "addr written 0x%x, read 0x%x!\n", + __func__, addr, val); + ret_val = QLA_FUNCTION_FAILED; + } + return ret_val; +} + +static int +qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_set_win_base(vha, addr); + if (!ret_val) + *data = qla8044_rd_reg(ha, QLA8044_WILDCARD); + else + ql_log(ql_log_warn, vha, 0xb088, + "%s: failed read of addr 0x%x!\n", __func__, addr); + return ret_val; +} + +static int +qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_set_win_base(vha, addr); + if (!ret_val) + qla8044_wr_reg(ha, QLA8044_WILDCARD, data); + else + ql_log(ql_log_warn, vha, 0xb089, + "%s: failed wrt to addr 0x%x, data 0x%x\n", + __func__, addr, data); + return ret_val; +} + +/* + * qla8044_read_write_crb_reg - Read from raddr and write value to waddr. + * + * @ha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + * + */ +static void +qla8044_read_write_crb_reg(struct scsi_qla_host *vha, + uint32_t raddr, uint32_t waddr) +{ + uint32_t value; + + qla8044_rd_reg_indirect(vha, raddr, &value); + qla8044_wr_reg_indirect(vha, waddr, value); +} + +static int +qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, + uint32_t mask) +{ + unsigned long timeout; + uint32_t temp = 0; + + /* jiffies after 100ms */ + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + if (time_after_eq(jiffies, timeout)) { + ql_log(ql_log_warn, vha, 0xb151, + "Error in processing rdmdio entry\n"); + return -1; + } + } while (1); + + return 0; +} + +static uint32_t +qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha, + uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr) +{ + uint32_t temp; + int ret = 0; + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + temp = (0x40000000 | addr); + qla8044_wr_reg_indirect(vha, addr1, temp); + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return 0; + + qla8044_rd_reg_indirect(vha, addr3, &ret); + + return ret; +} + + +static int +qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha, + uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + + /* jiffies after 100 msecs */ + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); + if ((temp & 0x1) != 1) + break; + if (time_after_eq(jiffies, timeout)) { + ql_log(ql_log_warn, vha, 0xb152, + "Error in processing mdiobus idle\n"); + return -1; + } + } while (1); + + return 0; +} + +static int +qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1, + uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) +{ + int ret = 0; + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + qla8044_wr_reg_indirect(vha, addr3, value); + qla8044_wr_reg_indirect(vha, addr1, addr); + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + return 0; +} +/* + * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, + * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. + * + * @vha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + * @p_rmw_hdr : header with shift/or/xor values. + * + */ +static void +qla8044_rmw_crb_reg(struct scsi_qla_host *vha, + uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr) +{ + uint32_t value; + + if (p_rmw_hdr->index_a) + value = vha->reset_tmplt.array[p_rmw_hdr->index_a]; + else + qla8044_rd_reg_indirect(vha, raddr, &value); + value &= p_rmw_hdr->test_mask; + value <<= p_rmw_hdr->shl; + value >>= p_rmw_hdr->shr; + value |= p_rmw_hdr->or_value; + value ^= p_rmw_hdr->xor_value; + qla8044_wr_reg_indirect(vha, waddr, value); + return; +} + +static inline void +qla8044_set_qsnt_ready(struct scsi_qla_host *vha) +{ + uint32_t qsnt_state; + struct qla_hw_data *ha = vha->hw; + + qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + qsnt_state |= (1 << ha->portnum); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); + ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n", + __func__, vha->host_no, qsnt_state); +} + +void +qla8044_clear_qsnt_ready(struct scsi_qla_host *vha) +{ + uint32_t qsnt_state; + struct qla_hw_data *ha = vha->hw; + + qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + qsnt_state &= ~(1 << ha->portnum); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); + ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n", + __func__, vha->host_no, qsnt_state); +} + +/** + * qla8044_lock_recovery - Recovers the idc_lock. + * @vha : Pointer to adapter structure + * + * Lock Recovery Register + * 5-2 Lock recovery owner: Function ID of driver doing lock recovery, + * valid if bits 1..0 are set by driver doing lock recovery. + * 1-0 1 - Driver intends to force unlock the IDC lock. + * 2 - Driver is moving forward to unlock the IDC lock. Driver clears + * this field after force unlocking the IDC lock. + * + * Lock Recovery process + * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is + * greater than 0, then wait for the other driver to unlock otherwise + * move to the next step. + * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY + * register bits 1..0 and also set the function# in bits 5..2. + * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms. + * Wait for the other driver to perform lock recovery if the function + * number in bits 5..2 has changed, otherwise move to the next step. + * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0 + * leaving your function# in bits 5..2. + * e. Force unlock using the DRIVER_UNLOCK register and immediately clear + * the IDC_LOCK_RECOVERY bits 5..0 by writing 0. + **/ +static int +qla8044_lock_recovery(struct scsi_qla_host *vha) +{ + uint32_t lock = 0, lockid; + struct qla_hw_data *ha = vha->hw; + + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); + + /* Check for other Recovery in progress, go wait */ + if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0) + return QLA_FUNCTION_FAILED; + + /* Intent to Recover */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, + (ha->portnum << + IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER); + msleep(200); + + /* Check Intent to Recover is advertised */ + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); + if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum << + IDC_LOCK_RECOVERY_STATE_SHIFT_BITS)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n" + , __func__, ha->portnum); + + /* Proceed to Recover */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, + (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | + PROCEED_TO_RECOVER); + + /* Force Unlock() */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF); + qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); + + /* Clear bits 0-5 in IDC_RECOVERY register*/ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0); + + /* Get lock() */ + lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); + if (lock) { + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum; + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid); + return QLA_SUCCESS; + } else + return QLA_FUNCTION_FAILED; +} + +int +qla8044_idc_lock(struct qla_hw_data *ha) +{ + uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0; + uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while (status == 0) { + /* acquire semaphore5 from PCI HW block */ + status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); + + if (status) { + /* Increment Counter (8-31) and update func_num (0-7) on + * getting a successful lock */ + lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum; + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id); + break; + } + + if (timeout == 0) + first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + + if (++timeout >= + (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) { + tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + func_num = tmo_owner & 0xFF; + lock_cnt = tmo_owner >> 8; + ql_log(ql_log_warn, vha, 0xb114, + "%s: Lock by func %d failed after 2s, lock held " + "by func %d, lock count %d, first_owner %d\n", + __func__, ha->portnum, func_num, lock_cnt, + (first_owner & 0xFF)); + if (first_owner != tmo_owner) { + /* Some other driver got lock, + * OR same driver got lock again (counter + * value changed), when we were waiting for + * lock. Retry for another 2 sec */ + ql_dbg(ql_dbg_p3p, vha, 0xb115, + "%s: %d: IDC lock failed\n", + __func__, ha->portnum); + timeout = 0; + } else { + /* Same driver holding lock > 2sec. + * Force Recovery */ + if (qla8044_lock_recovery(vha) == QLA_SUCCESS) { + /* Recovered and got lock */ + ret_val = QLA_SUCCESS; + ql_dbg(ql_dbg_p3p, vha, 0xb116, + "%s:IDC lock Recovery by %d" + "successful...\n", __func__, + ha->portnum); + } + /* Recovery Failed, some other function + * has the lock, wait for 2secs + * and retry + */ + ql_dbg(ql_dbg_p3p, vha, 0xb08a, + "%s: IDC lock Recovery by %d " + "failed, Retrying timeout\n", __func__, + ha->portnum); + timeout = 0; + } + } + msleep(QLA8044_DRV_LOCK_MSLEEP); + } + return ret_val; +} + +void +qla8044_idc_unlock(struct qla_hw_data *ha) +{ + int id; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + + if ((id & 0xFF) != ha->portnum) { + ql_log(ql_log_warn, vha, 0xb118, + "%s: IDC Unlock by %d failed, lock owner is %d!\n", + __func__, ha->portnum, (id & 0xFF)); + return; + } + + /* Keep lock counter value, update the ha->func_num to 0xFF */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF)); + qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); +} + +/* 8044 Flash Lock/Unlock functions */ +static int +qla8044_flash_lock(scsi_qla_host_t *vha) +{ + int lock_owner; + int timeout = 0; + uint32_t lock_status = 0; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + while (lock_status == 0) { + lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK); + if (lock_status) + break; + + if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) { + lock_owner = qla8044_rd_reg(ha, + QLA8044_FLASH_LOCK_ID); + ql_log(ql_log_warn, vha, 0xb113, + "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", + __func__, ha->portnum, lock_owner); + ret_val = QLA_FUNCTION_FAILED; + break; + } + msleep(20); + } + qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum); + return ret_val; +} + +static void +qla8044_flash_unlock(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + /* Reading FLASH_UNLOCK register unlocks the Flash */ + qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF); + qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); +} + + +static +void qla8044_flash_lock_recovery(struct scsi_qla_host *vha) +{ + + if (qla8044_flash_lock(vha)) { + /* Someone else is holding the lock. */ + ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n"); + } + + /* + * Either we got the lock, or someone + * else died while holding it. + * In either case, unlock. + */ + qla8044_flash_unlock(vha); +} + +/* + * Address and length are byte address + */ +static int +qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data, + uint32_t flash_addr, int u32_word_count) +{ + int i, ret_val = QLA_SUCCESS; + uint32_t u32_word; + + if (qla8044_flash_lock(vha) != QLA_SUCCESS) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_lock_error; + } + + if (flash_addr & 0x03) { + ql_log(ql_log_warn, vha, 0xb117, + "%s: Illegal addr = 0x%x\n", __func__, flash_addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_flash_read; + } + + for (i = 0; i < u32_word_count; i++) { + if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW, + (flash_addr & 0xFFFF0000))) { + ql_log(ql_log_warn, vha, 0xb119, + "%s: failed to write addr 0x%x to " + "FLASH_DIRECT_WINDOW\n! ", + __func__, flash_addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_flash_read; + } + + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(flash_addr), + &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb08c, + "%s: failed to read addr 0x%x!\n", + __func__, flash_addr); + goto exit_flash_read; + } + + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + flash_addr = flash_addr + 4; + } + +exit_flash_read: + qla8044_flash_unlock(vha); + +exit_lock_error: + return ret_val; +} + +/* + * Address and length are byte address + */ +void * +qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + scsi_block_requests(vha->host); + if (qla8044_read_flash_data(vha, buf, offset, length / 4) + != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb08d, + "%s: Failed to read from flash\n", + __func__); + } + scsi_unblock_requests(vha->host); + return buf; +} + +static inline int +qla8044_need_reset(struct scsi_qla_host *vha) +{ + uint32_t drv_state, drv_active; + int rval; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + rval = drv_state & (1 << ha->portnum); + + if (ha->flags.eeh_busy && drv_active) + rval = 1; + return rval; +} + +/* + * qla8044_write_list - Write the value (p_entry->arg2) to address specified + * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between + * entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset_entry header for WRITE_LIST opcode. + * + */ +static void +qla8044_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + uint32_t i; + + p_entry = (struct qla8044_entry *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_read_write_list - Read from address specified by p_entry->arg1, + * write value read to address specified by p_entry->arg2, for all entries in + * header with delay of p_hdr->delay between entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset_entry header for READ_WRITE_LIST opcode. + * + */ +static void +qla8044_read_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + uint32_t i; + + p_entry = (struct qla8044_entry *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_read_write_crb_reg(vha, p_entry->arg1, + p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_poll_reg - Poll the given CRB addr for duration msecs till + * value read ANDed with test_mask is equal to test_result. + * + * @ha : Pointer to adapter structure + * @addr : CRB register address + * @duration : Poll for total of "duration" msecs + * @test_mask : Mask value read with "test_mask" + * @test_result : Compare (value&test_mask) with test_result. + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr, + int duration, uint32_t test_mask, uint32_t test_result) +{ + uint32_t value = 0; + int timeout_error; + uint8_t retries; + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_rd_reg_indirect(vha, addr, &value); + if (ret_val == QLA_FUNCTION_FAILED) { + timeout_error = 1; + goto exit_poll_reg; + } + + /* poll every 1/10 of the total duration */ + retries = duration/10; + + do { + if ((value & test_mask) != test_result) { + timeout_error = 1; + msleep(duration/10); + ret_val = qla8044_rd_reg_indirect(vha, addr, &value); + if (ret_val == QLA_FUNCTION_FAILED) { + timeout_error = 1; + goto exit_poll_reg; + } + } else { + timeout_error = 0; + break; + } + } while (retries--); + +exit_poll_reg: + if (timeout_error) { + vha->reset_tmplt.seq_error++; + ql_log(ql_log_fatal, vha, 0xb090, + "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", + __func__, value, test_mask, test_result); + } + + return timeout_error; +} + +/* + * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB + * register specified by p_entry->arg1 and compare (value AND test_mask) with + * test_result to validate it. Wait for p_hdr->delay between processing entries. + * + * @ha : Pointer to adapter structure + * @p_hdr : reset_entry header for POLL_LIST opcode. + * + */ +static void +qla8044_poll_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla8044_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla8044_poll *) + ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); + + /* Entries start after 8 byte qla8044_poll, poll header contains + * the test_mask, test_value. + */ + p_entry = (struct qla8044_entry *)((char *)p_poll + + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + if (!delay) { + for (i = 0; i < p_hdr->count; i++, p_entry++) + qla8044_poll_reg(vha, p_entry->arg1, + delay, p_poll->test_mask, p_poll->test_value); + } else { + for (i = 0; i < p_hdr->count; i++, p_entry++) { + if (delay) { + if (qla8044_poll_reg(vha, + p_entry->arg1, delay, + p_poll->test_mask, + p_poll->test_value)) { + /*If + * (data_read&test_mask != test_value) + * read TIMEOUT_ADDR (arg1) and + * ADDR (arg2) registers + */ + qla8044_rd_reg_indirect(vha, + p_entry->arg1, &value); + qla8044_rd_reg_indirect(vha, + p_entry->arg2, &value); + } + } + } + } +} + +/* + * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr, + * read ar_addr, if (value& test_mask != test_mask) re-read till timeout + * expires. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset entry header for POLL_WRITE_LIST opcode. + * + */ +static void +qla8044_poll_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla8044_quad_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + + p_poll = (struct qla8044_poll *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_quad_entry *)((char *)p_poll + + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, + p_entry->dr_addr, p_entry->dr_value); + qla8044_wr_reg_indirect(vha, + p_entry->ar_addr, p_entry->ar_value); + if (delay) { + if (qla8044_poll_reg(vha, + p_entry->ar_addr, delay, + p_poll->test_mask, + p_poll->test_value)) { + ql_dbg(ql_dbg_p3p, vha, 0xb091, + "%s: Timeout Error: poll list, ", + __func__); + ql_dbg(ql_dbg_p3p, vha, 0xb092, + "item_num %d, entry_num %d\n", i, + vha->reset_tmplt.seq_index); + } + } + } +} + +/* + * qla8044_read_modify_write - Read value from p_entry->arg1, modify the + * value, write value to p_entry->arg2. Process entries with p_hdr->delay + * between entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : header with shift/or/xor values. + * + */ +static void +qla8044_read_modify_write(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + struct qla8044_rmw *p_rmw_hdr; + uint32_t i; + + p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr + + sizeof(struct qla8044_rmw)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_rmw_crb_reg(vha, p_entry->arg1, + p_entry->arg2, p_rmw_hdr); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_pause - Wait for p_hdr->delay msecs, called between processing + * two entries of a sequence. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static +void qla8044_pause(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + if (p_hdr->delay) + mdelay((uint32_t)((long)p_hdr->delay)); +} + +/* + * qla8044_template_end - Indicates end of reset sequence processing. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static void +qla8044_template_end(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + vha->reset_tmplt.template_end = 1; + + if (vha->reset_tmplt.seq_error == 0) { + ql_dbg(ql_dbg_p3p, vha, 0xb093, + "%s: Reset sequence completed SUCCESSFULLY.\n", __func__); + } else { + ql_log(ql_log_fatal, vha, 0xb094, + "%s: Reset sequence completed with some timeout " + "errors.\n", __func__); + } +} + +/* + * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr, + * if (value & test_mask != test_value) re-read till timeout value expires, + * read dr_addr register and assign to reset_tmplt.array. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static void +qla8044_poll_read_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + int index; + struct qla8044_quad_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla8044_poll *) + ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_quad_entry *) + ((char *)p_poll + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, p_entry->ar_addr, + p_entry->ar_value); + if (delay) { + if (qla8044_poll_reg(vha, p_entry->ar_addr, delay, + p_poll->test_mask, p_poll->test_value)) { + ql_dbg(ql_dbg_p3p, vha, 0xb095, + "%s: Timeout Error: poll " + "list, ", __func__); + ql_dbg(ql_dbg_p3p, vha, 0xb096, + "Item_num %d, " + "entry_num %d\n", i, + vha->reset_tmplt.seq_index); + } else { + index = vha->reset_tmplt.array_index; + qla8044_rd_reg_indirect(vha, + p_entry->dr_addr, &value); + vha->reset_tmplt.array[index++] = value; + if (index == QLA8044_MAX_RESET_SEQ_ENTRIES) + vha->reset_tmplt.array_index = 1; + } + } + } +} + +/* + * qla8031_process_reset_template - Process all entries in reset template + * till entry with SEQ_END opcode, which indicates end of the reset template + * processing. Each entry has a Reset Entry header, entry opcode/command, with + * size of the entry, number of entries in sub-sequence and delay in microsecs + * or timeout in millisecs. + * + * @ha : Pointer to adapter structure + * @p_buff : Common reset entry header. + * + */ +static void +qla8044_process_reset_template(struct scsi_qla_host *vha, + char *p_buff) +{ + int index, entries; + struct qla8044_reset_entry_hdr *p_hdr; + char *p_entry = p_buff; + + vha->reset_tmplt.seq_end = 0; + vha->reset_tmplt.template_end = 0; + entries = vha->reset_tmplt.hdr->entries; + index = vha->reset_tmplt.seq_index; + + for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) { + p_hdr = (struct qla8044_reset_entry_hdr *)p_entry; + switch (p_hdr->cmd) { + case OPCODE_NOP: + break; + case OPCODE_WRITE_LIST: + qla8044_write_list(vha, p_hdr); + break; + case OPCODE_READ_WRITE_LIST: + qla8044_read_write_list(vha, p_hdr); + break; + case OPCODE_POLL_LIST: + qla8044_poll_list(vha, p_hdr); + break; + case OPCODE_POLL_WRITE_LIST: + qla8044_poll_write_list(vha, p_hdr); + break; + case OPCODE_READ_MODIFY_WRITE: + qla8044_read_modify_write(vha, p_hdr); + break; + case OPCODE_SEQ_PAUSE: + qla8044_pause(vha, p_hdr); + break; + case OPCODE_SEQ_END: + vha->reset_tmplt.seq_end = 1; + break; + case OPCODE_TMPL_END: + qla8044_template_end(vha, p_hdr); + break; + case OPCODE_POLL_READ_LIST: + qla8044_poll_read_list(vha, p_hdr); + break; + default: + ql_log(ql_log_fatal, vha, 0xb097, + "%s: Unknown command ==> 0x%04x on " + "entry = %d\n", __func__, p_hdr->cmd, index); + break; + } + /* + *Set pointer to next entry in the sequence. + */ + p_entry += p_hdr->size; + } + vha->reset_tmplt.seq_index = index; +} + +static void +qla8044_process_init_seq(struct scsi_qla_host *vha) +{ + qla8044_process_reset_template(vha, + vha->reset_tmplt.init_offset); + if (vha->reset_tmplt.seq_end != 1) + ql_log(ql_log_fatal, vha, 0xb098, + "%s: Abrupt INIT Sub-Sequence end.\n", + __func__); +} + +static void +qla8044_process_stop_seq(struct scsi_qla_host *vha) +{ + vha->reset_tmplt.seq_index = 0; + qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset); + if (vha->reset_tmplt.seq_end != 1) + ql_log(ql_log_fatal, vha, 0xb099, + "%s: Abrupt STOP Sub-Sequence end.\n", __func__); +} + +static void +qla8044_process_start_seq(struct scsi_qla_host *vha) +{ + qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset); + if (vha->reset_tmplt.template_end != 1) + ql_log(ql_log_fatal, vha, 0xb09a, + "%s: Abrupt START Sub-Sequence end.\n", + __func__); +} + +static int +qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha, + uint32_t flash_addr, uint8_t *p_data, int u32_word_count) +{ + uint32_t i; + uint32_t u32_word; + uint32_t flash_offset; + uint32_t addr = flash_addr; + int ret_val = QLA_SUCCESS; + + flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1); + + if (addr & 0x3) { + ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n", + __func__, addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_lockless_read; + } + + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_DIRECT_WINDOW, (addr)); + + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09c, + "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + + /* Check if data is spread across multiple sectors */ + if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > + (QLA8044_FLASH_SECTOR_SIZE - 1)) { + /* Multi sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09d, + "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + addr = addr + 4; + flash_offset = flash_offset + 4; + if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) { + /* This write is needed once for each sector */ + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_DIRECT_WINDOW, (addr)); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09f, + "%s: failed to write addr " + "0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + flash_offset = 0; + } + } + } else { + /* Single sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb0a0, + "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + addr = addr + 4; + } + } + +exit_lockless_read: + return ret_val; +} + +/* + * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory + * + * @vha : Pointer to adapter structure + * addr : Flash address to write to + * data : Data to be written + * count : word_count to be written + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_ms_mem_write_128b(struct scsi_qla_host *vha, + uint64_t addr, uint32_t *data, uint32_t count) +{ + int i, j, ret_val = QLA_SUCCESS; + uint32_t agt_ctrl; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + /* Only 128-bit aligned access */ + if (addr & 0xF) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write; + } + write_lock_irqsave(&ha->hw_lock, flags); + + /* Write address */ + ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a1, + "%s: write to AGT_ADDR_HI failed!\n", __func__); + goto exit_ms_mem_write_unlock; + } + + for (i = 0; i < count; i++, addr += 16) { + if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET, + QLA8044_ADDR_QDR_NET_MAX)) || + (addr_in_range(addr, QLA8044_ADDR_DDR_NET, + QLA8044_ADDR_DDR_NET_MAX)))) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write_unlock; + } + + ret_val = qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_ADDR_LO, addr); + + /* Write data */ + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_LO, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_HI, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_ULO, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_UHI, *data++); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a2, + "%s: write to AGT_WRDATA failed!\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + /* Check write status */ + ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_ENABLE); + ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_START); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a3, + "%s: write to AGT_CTRL failed!\n", __func__); + goto exit_ms_mem_write_unlock; + } + + for (j = 0; j < MAX_CTL_CHECK; j++) { + ret_val = qla8044_rd_reg_indirect(vha, + MD_MIU_TEST_AGT_CTRL, &agt_ctrl); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a4, + "%s: failed to read " + "MD_MIU_TEST_AGT_CTRL!\n", __func__); + goto exit_ms_mem_write_unlock; + } + if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) + break; + } + + /* Status check failed */ + if (j >= MAX_CTL_CHECK) { + ql_log(ql_log_fatal, vha, 0xb0a5, + "%s: MS memory write failed!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write_unlock; + } + } + +exit_ms_mem_write_unlock: + write_unlock_irqrestore(&ha->hw_lock, flags); + +exit_ms_mem_write: + return ret_val; +} + +static int +qla8044_copy_bootloader(struct scsi_qla_host *vha) +{ + uint8_t *p_cache; + uint32_t src, count, size; + uint64_t dest; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + src = QLA8044_BOOTLOADER_FLASH_ADDR; + dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR); + size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE); + + /* 128 bit alignment check */ + if (size & 0xF) + size = (size + 16) & ~0xF; + + /* 16 byte count */ + count = size/16; + + p_cache = vmalloc(size); + if (p_cache == NULL) { + ql_log(ql_log_fatal, vha, 0xb0a6, + "%s: Failed to allocate memory for " + "boot loader cache\n", __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_copy_bootloader; + } + + ret_val = qla8044_lockless_flash_read_u32(vha, src, + p_cache, size/sizeof(uint32_t)); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a7, + "%s: Error reading F/W from flash!!!\n", __func__); + goto exit_copy_error; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n", + __func__); + + /* 128 bit/16 byte write to MS memory */ + ret_val = qla8044_ms_mem_write_128b(vha, dest, + (uint32_t *)p_cache, count); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a9, + "%s: Error writing F/W to MS !!!\n", __func__); + goto exit_copy_error; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0aa, + "%s: Wrote F/W (size %d) to MS !!!\n", + __func__, size); + +exit_copy_error: + vfree(p_cache); + +exit_copy_bootloader: + return ret_val; +} + +static int +qla8044_restart(struct scsi_qla_host *vha) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + qla8044_process_stop_seq(vha); + + /* Collect minidump */ + if (ql2xmdenable) + qla8044_get_minidump(vha); + else + ql_log(ql_log_fatal, vha, 0xb14c, + "Minidump disabled.\n"); + + qla8044_process_init_seq(vha); + + if (qla8044_copy_bootloader(vha)) { + ql_log(ql_log_fatal, vha, 0xb0ab, + "%s: Copy bootloader, firmware restart failed!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_restart; + } + + /* + * Loads F/W from flash + */ + qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH); + + qla8044_process_start_seq(vha); + +exit_restart: + return ret_val; +} + +/* + * qla8044_check_cmd_peg_status - Check peg status to see if Peg is + * initialized. + * + * @ha : Pointer to adapter structure + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_check_cmd_peg_status(struct scsi_qla_host *vha) +{ + uint32_t val, ret_val = QLA_FUNCTION_FAILED; + int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; + struct qla_hw_data *ha = vha->hw; + + do { + val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE); + if (val == PHAN_INITIALIZE_COMPLETE) { + ql_dbg(ql_dbg_p3p, vha, 0xb0ac, + "%s: Command Peg initialization " + "complete! state=0x%x\n", __func__, val); + ret_val = QLA_SUCCESS; + break; + } + msleep(CRB_CMDPEG_CHECK_DELAY); + } while (--retries); + + return ret_val; +} + +static int +qla8044_start_firmware(struct scsi_qla_host *vha) +{ + int ret_val = QLA_SUCCESS; + + if (qla8044_restart(vha)) { + ql_log(ql_log_fatal, vha, 0xb0ad, + "%s: Restart Error!!!, Need Reset!!!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_start_fw; + } else + ql_dbg(ql_dbg_p3p, vha, 0xb0af, + "%s: Restart done!\n", __func__); + + ret_val = qla8044_check_cmd_peg_status(vha); + if (ret_val) { + ql_log(ql_log_fatal, vha, 0xb0b0, + "%s: Peg not initialized!\n", __func__); + ret_val = QLA_FUNCTION_FAILED; + } + +exit_start_fw: + return ret_val; +} + +void +qla8044_clear_drv_active(struct qla_hw_data *ha) +{ + uint32_t drv_active; + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_active &= ~(1 << (ha->portnum)); + + ql_log(ql_log_info, vha, 0xb0b1, + "%s(%ld): drv_active: 0x%08x\n", + __func__, vha->host_no, drv_active); + + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); +} + +/* + * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +static int +qla8044_device_bootstrap(struct scsi_qla_host *vha) +{ + int rval = QLA_FUNCTION_FAILED; + int i; + uint32_t old_count = 0, count = 0; + int need_reset = 0; + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + need_reset = qla8044_need_reset(vha); + + if (!need_reset) { + old_count = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + + for (i = 0; i < 10; i++) { + msleep(200); + + count = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } + } + qla8044_flash_lock_recovery(vha); + } else { + /* We are trying to perform a recovery here. */ + if (ha->flags.isp82xx_fw_hung) + qla8044_flash_lock_recovery(vha); + } + + /* set to DEV_INITIALIZING */ + ql_log(ql_log_info, vha, 0xb0b2, + "%s: HW State: INITIALIZING\n", __func__); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_INITIALIZING); + + qla8044_idc_unlock(ha); + rval = qla8044_start_firmware(vha); + qla8044_idc_lock(ha); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0xb0b3, + "%s: HW State: FAILED\n", __func__); + qla8044_clear_drv_active(ha); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + return rval; + } + + /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after + * device goes to INIT state. */ + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + if (idc_ctrl & GRACEFUL_RESET_BIT1) { + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_ctrl & ~GRACEFUL_RESET_BIT1)); + ha->fw_dumped = false; + } + +dev_ready: + ql_log(ql_log_info, vha, 0xb0b4, + "%s: HW State: READY\n", __func__); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY); + + return rval; +} + +/*-------------------------Reset Sequence Functions-----------------------*/ +static void +qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha) +{ + u8 *phdr; + + if (!vha->reset_tmplt.buff) { + ql_log(ql_log_fatal, vha, 0xb0b5, + "%s: Error Invalid reset_seq_template\n", __func__); + return; + } + + phdr = vha->reset_tmplt.buff; + ql_dbg(ql_dbg_p3p, vha, 0xb0b6, + "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X" + "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n" + "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n", + *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), + *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), + *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), + *(phdr+13), *(phdr+14), *(phdr+15)); +} + +/* + * qla8044_reset_seq_checksum_test - Validate Reset Sequence template. + * + * @ha : Pointer to adapter structure + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha) +{ + uint32_t sum = 0; + uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff; + int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t); + + while (u16_count-- > 0) + sum += *buff++; + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + /* checksum of 0 indicates a valid template */ + if (~sum) { + return QLA_SUCCESS; + } else { + ql_log(ql_log_fatal, vha, 0xb0b7, + "%s: Reset seq checksum failed\n", __func__); + return QLA_FUNCTION_FAILED; + } +} + +/* + * qla8044_read_reset_template - Read Reset Template from Flash, validate + * the template and store offsets of stop/start/init offsets in ha->reset_tmplt. + * + * @ha : Pointer to adapter structure + */ +void +qla8044_read_reset_template(struct scsi_qla_host *vha) +{ + uint8_t *p_buff; + uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; + + vha->reset_tmplt.seq_error = 0; + vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE); + if (vha->reset_tmplt.buff == NULL) { + ql_log(ql_log_fatal, vha, 0xb0b8, + "%s: Failed to allocate reset template resources\n", + __func__); + goto exit_read_reset_template; + } + + p_buff = vha->reset_tmplt.buff; + addr = QLA8044_RESET_TEMPLATE_ADDR; + + tmplt_hdr_def_size = + sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0b9, + "%s: Read template hdr size %d from Flash\n", + __func__, tmplt_hdr_def_size); + + /* Copy template header from flash */ + if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { + ql_log(ql_log_fatal, vha, 0xb0ba, + "%s: Failed to read reset template\n", __func__); + goto exit_read_template_error; + } + + vha->reset_tmplt.hdr = + (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff; + + /* Validate the template header size and signature */ + tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); + if ((tmplt_hdr_size != tmplt_hdr_def_size) || + (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { + ql_log(ql_log_fatal, vha, 0xb0bb, + "%s: Template Header size invalid %d " + "tmplt_hdr_def_size %d!!!\n", __func__, + tmplt_hdr_size, tmplt_hdr_def_size); + goto exit_read_template_error; + } + + addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size; + p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size; + tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size - + vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0bc, + "%s: Read rest of the template size %d\n", + __func__, vha->reset_tmplt.hdr->size); + + /* Copy rest of the template */ + if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { + ql_log(ql_log_fatal, vha, 0xb0bd, + "%s: Failed to read reset template\n", __func__); + goto exit_read_template_error; + } + + /* Integrity check */ + if (qla8044_reset_seq_checksum_test(vha)) { + ql_log(ql_log_fatal, vha, 0xb0be, + "%s: Reset Seq checksum failed!\n", __func__); + goto exit_read_template_error; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb0bf, + "%s: Reset Seq checksum passed! Get stop, " + "start and init seq offsets\n", __func__); + + /* Get STOP, START, INIT sequence offsets */ + vha->reset_tmplt.init_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->init_seq_offset; + + vha->reset_tmplt.start_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->start_seq_offset; + + vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->hdr_size; + + qla8044_dump_reset_seq_hdr(vha); + + goto exit_read_reset_template; + +exit_read_template_error: + vfree(vha->reset_tmplt.buff); + +exit_read_reset_template: + return; +} + +void +qla8044_set_idc_dontreset(struct scsi_qla_host *vha) +{ + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + idc_ctrl |= DONTRESET_BIT0; + ql_dbg(ql_dbg_p3p, vha, 0xb0c0, + "%s: idc_ctrl = %d\n", __func__, idc_ctrl); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); +} + +static inline void +qla8044_set_rst_ready(struct scsi_qla_host *vha) +{ + uint32_t drv_state; + struct qla_hw_data *ha = vha->hw; + + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + /* For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function.*/ + drv_state |= (1 << ha->portnum); + + ql_log(ql_log_info, vha, 0xb0c1, + "%s(%ld): drv_state: 0x%08x\n", + __func__, vha->host_no, drv_state); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); +} + +/** + * qla8044_need_reset_handler - Code to start reset sequence + * @vha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + */ +static void +qla8044_need_reset_handler(struct scsi_qla_host *vha) +{ + uint32_t dev_state = 0, drv_state, drv_active; + unsigned long reset_timeout; + struct qla_hw_data *ha = vha->hw; + + ql_log(ql_log_fatal, vha, 0xb0c2, + "%s: Performing ISP error recovery\n", __func__); + + if (vha->flags.online) { + qla8044_idc_unlock(ha); + qla2x00_abort_isp_cleanup(vha); + ha->isp_ops->get_flash_version(vha, vha->req->ring); + ha->isp_ops->nvram_config(vha); + qla8044_idc_lock(ha); + } + + dev_state = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX); + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + + ql_log(ql_log_info, vha, 0xb0c5, + "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", + __func__, vha->host_no, drv_state, drv_active, dev_state); + + qla8044_set_rst_ready(vha); + + /* wait for 10 seconds for reset ack from all functions */ + reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); + + do { + if (time_after_eq(jiffies, reset_timeout)) { + ql_log(ql_log_info, vha, 0xb0c4, + "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", + __func__, ha->portnum, drv_state, drv_active); + break; + } + + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + dev_state = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX); + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + } while (((drv_state & drv_active) != drv_active) && + (dev_state == QLA8XXX_DEV_NEED_RESET)); + + /* Remove IDC participation of functions not acknowledging */ + if (drv_state != drv_active) { + ql_log(ql_log_info, vha, 0xb0c7, + "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", + __func__, vha->host_no, ha->portnum, + (drv_active ^ drv_state)); + drv_active = drv_active & drv_state; + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, + drv_active); + } else { + /* + * Reset owner should execute reset recovery, + * if all functions acknowledged + */ + if ((ha->flags.nic_core_reset_owner) && + (dev_state == QLA8XXX_DEV_NEED_RESET)) { + ha->flags.nic_core_reset_owner = 0; + qla8044_device_bootstrap(vha); + return; + } + } + + /* Exit if non active function */ + if (!(drv_active & (1 << ha->portnum))) { + ha->flags.nic_core_reset_owner = 0; + return; + } + + /* + * Execute Reset Recovery if Reset Owner or Function 7 + * is the only active function + */ + if (ha->flags.nic_core_reset_owner || + ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) { + ha->flags.nic_core_reset_owner = 0; + qla8044_device_bootstrap(vha); + } +} + +static void +qla8044_set_drv_active(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function.*/ + drv_active |= (1 << ha->portnum); + + ql_log(ql_log_info, vha, 0xb0c8, + "%s(%ld): drv_active: 0x%08x\n", + __func__, vha->host_no, drv_active); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); +} + +static int +qla8044_check_drv_active(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + if (drv_active & (1 << ha->portnum)) + return QLA_SUCCESS; + else + return QLA_TEST_FAILED; +} + +static void +qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) +{ + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + idc_ctrl &= ~DONTRESET_BIT0; + ql_log(ql_log_info, vha, 0xb0c9, + "%s: idc_ctrl = %d\n", __func__, + idc_ctrl); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); +} + +static int +qla8044_set_idc_ver(struct scsi_qla_host *vha) +{ + int idc_ver; + uint32_t drv_active; + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + if (drv_active == (1 << ha->portnum)) { + idc_ver = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_IDC_VERSION_INDEX); + idc_ver &= (~0xFF); + idc_ver |= QLA8044_IDC_VER_MAJ_VALUE; + qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX, + idc_ver); + ql_log(ql_log_info, vha, 0xb0ca, + "%s: IDC version updated to %d\n", + __func__, idc_ver); + } else { + idc_ver = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_IDC_VERSION_INDEX); + idc_ver &= 0xFF; + if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) { + ql_log(ql_log_info, vha, 0xb0cb, + "%s: qla4xxx driver IDC version %d " + "is not compatible with IDC version %d " + "of other drivers!\n", + __func__, QLA8044_IDC_VER_MAJ_VALUE, + idc_ver); + rval = QLA_FUNCTION_FAILED; + goto exit_set_idc_ver; + } + } + + /* Update IDC_MINOR_VERSION */ + idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR); + idc_ver &= ~(0x03 << (ha->portnum * 2)); + idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2)); + qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver); + +exit_set_idc_ver: + return rval; +} + +static int +qla8044_update_idc_reg(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.init_done) + goto exit_update_idc_reg; + + qla8044_idc_lock(ha); + qla8044_set_drv_active(vha); + + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* If we are the first driver to load and + * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */ + if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba) + qla8044_clear_idc_dontreset(vha); + + rval = qla8044_set_idc_ver(vha); + if (rval == QLA_FUNCTION_FAILED) + qla8044_clear_drv_active(ha); + qla8044_idc_unlock(ha); + +exit_update_idc_reg: + return rval; +} + +/** + * qla8044_need_qsnt_handler - Code to start qsnt + * @vha: pointer to adapter structure + */ +static void +qla8044_need_qsnt_handler(struct scsi_qla_host *vha) +{ + unsigned long qsnt_timeout; + uint32_t drv_state, drv_active, dev_state; + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.online) + qla2x00_quiesce_io(vha); + else + return; + + qla8044_set_qsnt_ready(vha); + + /* Wait for 30 secs for all functions to ack qsnt mode */ + qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ); + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* Shift drv_active by 1 to match drv_state. As quiescent ready bit + position is at bit 1 and drv active is at bit 0 */ + drv_active = drv_active << 1; + + while (drv_state != drv_active) { + if (time_after_eq(jiffies, qsnt_timeout)) { + /* Other functions did not ack, changing state to + * DEV_READY + */ + clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_READY); + qla8044_clear_qsnt_ready(vha); + ql_log(ql_log_info, vha, 0xb0cc, + "Timeout waiting for quiescent ack!!!\n"); + return; + } + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_active = drv_active << 1; + } + + /* All functions have Acked. Set quiescent state */ + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_QUIESCENT); + ql_log(ql_log_info, vha, 0xb0cd, + "%s: HW State: QUIESCENT\n", __func__); + } +} + +/* + * qla8044_device_state_handler - Adapter state machine + * @ha: pointer to host adapter structure. + * + * Note: IDC lock must be UNLOCKED upon entry + **/ +int +qla8044_device_state_handler(struct scsi_qla_host *vha) +{ + uint32_t dev_state; + int rval = QLA_SUCCESS; + unsigned long dev_init_timeout; + struct qla_hw_data *ha = vha->hw; + + rval = qla8044_update_idc_reg(vha); + if (rval == QLA_FUNCTION_FAILED) + goto exit_error; + + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + ql_dbg(ql_dbg_p3p, vha, 0xb0ce, + "Device state is 0x%x = %s\n", + dev_state, qdev_state(dev_state)); + + /* wait for 30 seconds for device to go ready */ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); + + qla8044_idc_lock(ha); + + while (1) { + if (time_after_eq(jiffies, dev_init_timeout)) { + if (qla8044_check_drv_active(vha) == QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb0cf, + "%s: Device Init Failed 0x%x = %s\n", + QLA2XXX_DRIVER_NAME, dev_state, + qdev_state(dev_state)); + qla8044_wr_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + } + } + + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + ql_log(ql_log_info, vha, 0xb0d0, + "Device state is 0x%x = %s\n", + dev_state, qdev_state(dev_state)); + + /* NOTE: Make sure idc unlocked upon exit of switch statement */ + switch (dev_state) { + case QLA8XXX_DEV_READY: + ha->flags.nic_core_reset_owner = 0; + goto exit; + case QLA8XXX_DEV_COLD: + rval = qla8044_device_bootstrap(vha); + break; + case QLA8XXX_DEV_INITIALIZING: + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + break; + case QLA8XXX_DEV_NEED_RESET: + /* For ISP8044, if NEED_RESET is set by any driver, + * it should be honored, irrespective of IDC_CTRL + * DONTRESET_BIT0 */ + qla8044_need_reset_handler(vha); + break; + case QLA8XXX_DEV_NEED_QUIESCENT: + /* idc locked/unlocked in handler */ + qla8044_need_qsnt_handler(vha); + + /* Reset the init timeout after qsnt handler */ + dev_init_timeout = jiffies + + (ha->fcoe_reset_timeout * HZ); + break; + case QLA8XXX_DEV_QUIESCENT: + ql_log(ql_log_info, vha, 0xb0d1, + "HW State: QUIESCENT\n"); + + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + /* Reset the init timeout after qsnt handler */ + dev_init_timeout = jiffies + + (ha->fcoe_reset_timeout * HZ); + break; + case QLA8XXX_DEV_FAILED: + ha->flags.nic_core_reset_owner = 0; + qla8044_idc_unlock(ha); + qla8xxx_dev_failed_handler(vha); + rval = QLA_FUNCTION_FAILED; + qla8044_idc_lock(ha); + goto exit; + default: + qla8044_idc_unlock(ha); + qla8xxx_dev_failed_handler(vha); + rval = QLA_FUNCTION_FAILED; + qla8044_idc_lock(ha); + goto exit; + } + } +exit: + qla8044_idc_unlock(ha); + +exit_error: + return rval; +} + +/** + * qla8044_check_temp - Check the ISP82XX temperature. + * @vha: adapter block pointer. + * + * Note: The caller should not hold the idc lock. + */ +static int +qla8044_check_temp(struct scsi_qla_host *vha) +{ + uint32_t temp, temp_state, temp_val; + int status = QLA_SUCCESS; + + temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); + temp_state = qla82xx_get_temp_state(temp); + temp_val = qla82xx_get_temp_val(temp); + + if (temp_state == QLA82XX_TEMP_PANIC) { + ql_log(ql_log_warn, vha, 0xb0d2, + "Device temperature %d degrees C" + " exceeds maximum allowed. Hardware has been shut" + " down\n", temp_val); + status = QLA_FUNCTION_FAILED; + return status; + } else if (temp_state == QLA82XX_TEMP_WARN) { + ql_log(ql_log_warn, vha, 0xb0d3, + "Device temperature %d" + " degrees C exceeds operating range." + " Immediate action needed.\n", temp_val); + } + return 0; +} + +int qla8044_read_temperature(scsi_qla_host_t *vha) +{ + uint32_t temp; + + temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); + return qla82xx_get_temp_val(temp); +} + +/** + * qla8044_check_fw_alive - Check firmware health + * @vha: Pointer to host adapter structure. + * + * Context: Interrupt + */ +int +qla8044_check_fw_alive(struct scsi_qla_host *vha) +{ + uint32_t fw_heartbeat_counter; + uint32_t halt_status1, halt_status2; + int status = QLA_SUCCESS; + + fw_heartbeat_counter = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + + /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ + if (fw_heartbeat_counter == 0xffffffff) { + ql_dbg(ql_dbg_p3p, vha, 0xb0d4, + "scsi%ld: %s: Device in frozen " + "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", + vha->host_no, __func__); + return status; + } + + if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { + vha->seconds_since_last_heartbeat++; + /* FW not alive after 2 seconds */ + if (vha->seconds_since_last_heartbeat == 2) { + vha->seconds_since_last_heartbeat = 0; + halt_status1 = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS1_INDEX); + halt_status2 = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS2_INDEX); + + ql_log(ql_log_info, vha, 0xb0d5, + "scsi(%ld): %s, ISP8044 " + "Dumping hw/fw registers:\n" + " PEG_HALT_STATUS1: 0x%x, " + "PEG_HALT_STATUS2: 0x%x,\n", + vha->host_no, __func__, halt_status1, + halt_status2); + status = QLA_FUNCTION_FAILED; + } + } else + vha->seconds_since_last_heartbeat = 0; + + vha->fw_heartbeat_counter = fw_heartbeat_counter; + return status; +} + +void +qla8044_watchdog(struct scsi_qla_host *vha) +{ + uint32_t dev_state, halt_status; + int halt_status_unrecoverable = 0; + struct qla_hw_data *ha = vha->hw; + + /* don't poll if reset is going on or FW hang in quiescent state */ + if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || + test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (qla8044_check_fw_alive(vha)) { + ha->flags.isp82xx_fw_hung = 1; + ql_log(ql_log_warn, vha, 0xb10a, + "Firmware hung.\n"); + qla82xx_clear_pending_mbx(vha); + } + + if (qla8044_check_temp(vha)) { + set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); + ha->flags.isp82xx_fw_hung = 1; + qla2xxx_wake_dpc(vha); + } else if (dev_state == QLA8XXX_DEV_NEED_RESET && + !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0xb0d6, + "%s: HW State: NEED RESET!\n", + __func__); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && + !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0xb0d7, + "%s: HW State: NEED QUIES detected!\n", + __func__); + set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + /* Check firmware health */ + if (ha->flags.isp82xx_fw_hung) { + halt_status = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS1_INDEX); + if (halt_status & + QLA8044_HALT_STATUS_FW_RESET) { + ql_log(ql_log_fatal, vha, + 0xb0d8, "%s: Firmware " + "error detected device " + "is being reset\n", + __func__); + } else if (halt_status & + QLA8044_HALT_STATUS_UNRECOVERABLE) { + halt_status_unrecoverable = 1; + } + + /* Since we cannot change dev_state in interrupt + * context, set appropriate DPC flag then wakeup + * DPC */ + if (halt_status_unrecoverable) { + set_bit(ISP_UNRECOVERABLE, + &vha->dpc_flags); + } else { + if (dev_state == + QLA8XXX_DEV_QUIESCENT) { + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + ql_log(ql_log_info, vha, 0xb0d9, + "%s: FW CONTEXT Reset " + "needed!\n", __func__); + } else { + ql_log(ql_log_info, vha, + 0xb0da, "%s: " + "detect abort needed\n", + __func__); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + } + qla2xxx_wake_dpc(vha); + } + } + + } +} + +static int +qla8044_minidump_process_control(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr) +{ + struct qla8044_minidump_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time, addr, index; + uint32_t crb_addr, rval = QLA_SUCCESS; + unsigned long wtime; + struct qla8044_minidump_template_hdr *tmplt_hdr; + int i; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__); + tmplt_hdr = (struct qla8044_minidump_template_hdr *) + ha->md_tmplt_hdr; + crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr; + + crb_addr = crb_entry->addr; + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + + if (opcode & QLA82XX_DBG_OPCODE_WR) { + qla8044_wr_reg_indirect(vha, crb_addr, + crb_entry->value_1); + } + + if (opcode & QLA82XX_DBG_OPCODE_RW) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + } + + if (opcode & QLA82XX_DBG_OPCODE_AND) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + read_value &= crb_entry->value_2; + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + } + if (opcode & QLA82XX_DBG_OPCODE_OR) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + read_value |= crb_entry->value_3; + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + } + if (opcode & QLA82XX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + + do { + if ((read_value & crb_entry->value_2) == + crb_entry->value_1) { + break; + } else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_FUNCTION_FAILED; + break; + } else { + qla8044_rd_reg_indirect(vha, + crb_addr, &read_value); + } + } while (1); + } + + if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + qla8044_rd_reg_indirect(vha, addr, &read_value); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + } + + if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else { + read_value = crb_entry->value_1; + } + + qla8044_wr_reg_indirect(vha, addr, read_value); + } + + if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + return rval; +} + +static void +qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8044_minidump_entry_crb *crb_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__); + crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = r_addr; + *data_ptr++ = r_value; + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int +qla8044_minidump_process_rdmem(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla8044_minidump_entry_rdmem *m_hdr; + unsigned long flags; + uint32_t *data_ptr = *d_ptr; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__); + m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f0, + "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size); + + if (r_addr & 0xf) { + ql_dbg(ql_dbg_p3p, vha, 0xb0f1, + "[%s]: Read addr 0x%x not 16 bytes aligned\n", + __func__, r_addr); + return QLA_FUNCTION_FAILED; + } + + if (m_hdr->read_data_size % 16) { + ql_dbg(ql_dbg_p3p, vha, 0xb0f2, + "[%s]: Read data[0x%x] not multiple of 16 bytes\n", + __func__, m_hdr->read_data_size); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb0f3, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr); + r_value = 0; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value); + r_value = MIU_TA_CTL_ENABLE; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); + r_value = MIU_TA_CTL_START_ENABLE; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + &r_value); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + write_unlock_irqrestore(&ha->hw_lock, flags); + return QLA_SUCCESS; + } + + for (j = 0; j < 4; j++) { + qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j], + &r_data); + *data_ptr++ = r_data; + } + + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + + ql_dbg(ql_dbg_p3p, vha, 0xb0f4, + "Leaving fn: %s datacount: 0x%x\n", + __func__, (loop_cnt * 16)); + + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +/* ISP83xx flash read for _RDROM _BOARD */ +static uint32_t +qla8044_minidump_process_rdrom(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t fl_addr, u32_count, rval; + struct qla8044_minidump_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr; + fl_addr = rom_hdr->read_addr; + u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n", + __func__, fl_addr, u32_count); + + rval = qla8044_lockless_flash_read_u32(vha, fl_addr, + (u8 *)(data_ptr), u32_count); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb0f6, + "%s: Flash Read Error,Count=%d\n", __func__, u32_count); + return QLA_FUNCTION_FAILED; + } else { + data_ptr += u32_count; + *d_ptr = data_ptr; + return QLA_SUCCESS; + } +} + +static void +qla8044_mark_entry_skipped(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; + + ql_log(ql_log_info, vha, 0xb0f7, + "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", + vha->host_no, index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); +} + +static int +qla8044_minidump_process_l2tag(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla8044_minidump_entry_cache *cache_hdr; + int rval = QLA_FUNCTION_FAILED; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__); + cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; + + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + qla8044_wr_reg_indirect(vha, t_r_addr, t_value); + if (c_value_w) + qla8044_wr_reg_indirect(vha, c_addr, c_value_w); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + qla8044_rd_reg_indirect(vha, c_addr, + &c_value_r); + if ((c_value_r & p_mask) == 0) { + break; + } else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, addr, &r_value); + *data_ptr++ = r_value; + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static void +qla8044_minidump_process_l1cache(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla8044_minidump_entry_cache *cache_hdr; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + qla8044_wr_reg_indirect(vha, t_r_addr, t_value); + qla8044_wr_reg_indirect(vha, c_addr, c_value_w); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, addr, &r_value); + *data_ptr++ = r_value; + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_rdocm(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8044_minidump_entry_rdocm *ocm_hdr; + uint32_t *data_ptr = *d_ptr; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__); + + ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fa, + "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, r_stride, loop_cnt); + + for (i = 0; i < loop_cnt; i++) { + r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); + *data_ptr++ = r_value; + r_addr += r_stride; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n", + __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))); + + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_rdmux(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value = 0; + struct qla8044_minidump_entry_mux *mux_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__); + + mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, s_addr, s_value); + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = s_value; + *data_ptr++ = r_value; + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_queue(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla8044_minidump_entry_queue *q_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__); + q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, s_addr, qid); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = r_value; + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +/* ISP83xx functions to process new minidump entries... */ +static uint32_t +qla8044_minidump_process_pollrd(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask; + uint16_t s_stride, i; + struct qla8044_minidump_entry_pollrd *pollrd_hdr; + uint32_t *data_ptr = *d_ptr; + + pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr; + s_addr = pollrd_hdr->select_addr; + r_addr = pollrd_hdr->read_addr; + s_value = pollrd_hdr->select_value; + s_stride = pollrd_hdr->select_value_stride; + + poll_wait = pollrd_hdr->poll_wait; + poll_mask = pollrd_hdr->poll_mask; + + for (i = 0; i < pollrd_hdr->op_count; i++) { + qla8044_wr_reg_indirect(vha, s_addr, s_value); + poll_wait = pollrd_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, s_addr, &r_value); + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb0fe, + "%s: TIMEOUT\n", __func__); + goto error; + } + } + } + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = s_value; + *data_ptr++ = r_value; + + s_value += s_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; + +error: + return QLA_FUNCTION_FAILED; +} + +static void +qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t sel_val1, sel_val2, t_sel_val, data, i; + uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr; + struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr; + uint32_t *data_ptr = *d_ptr; + + rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr; + sel_val1 = rdmux2_hdr->select_value_1; + sel_val2 = rdmux2_hdr->select_value_2; + sel_addr1 = rdmux2_hdr->select_addr_1; + sel_addr2 = rdmux2_hdr->select_addr_2; + sel_val_mask = rdmux2_hdr->select_value_mask; + read_addr = rdmux2_hdr->read_addr; + + for (i = 0; i < rdmux2_hdr->op_count; i++) { + qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1); + t_sel_val = sel_val1 & sel_val_mask; + *data_ptr++ = t_sel_val; + + qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); + qla8044_rd_reg_indirect(vha, read_addr, &data); + + *data_ptr++ = data; + + qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2); + t_sel_val = sel_val2 & sel_val_mask; + *data_ptr++ = t_sel_val; + + qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); + qla8044_rd_reg_indirect(vha, read_addr, &data); + + *data_ptr++ = data; + + sel_val1 += rdmux2_hdr->select_value_stride; + sel_val2 += rdmux2_hdr->select_value_stride; + } + + *d_ptr = data_ptr; +} + +static uint32_t +qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t poll_wait, poll_mask, r_value, data; + uint32_t addr_1, addr_2, value_1, value_2; + struct qla8044_minidump_entry_pollrdmwr *poll_hdr; + uint32_t *data_ptr = *d_ptr; + + poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr; + addr_1 = poll_hdr->addr_1; + addr_2 = poll_hdr->addr_2; + value_1 = poll_hdr->value_1; + value_2 = poll_hdr->value_2; + poll_mask = poll_hdr->poll_mask; + + qla8044_wr_reg_indirect(vha, addr_1, value_1); + + poll_wait = poll_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb0ff, + "%s: TIMEOUT\n", __func__); + goto error; + } + } + } + + qla8044_rd_reg_indirect(vha, addr_2, &data); + data &= poll_hdr->modify_mask; + qla8044_wr_reg_indirect(vha, addr_2, data); + qla8044_wr_reg_indirect(vha, addr_1, value_2); + + poll_wait = poll_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb100, + "%s: TIMEOUT2\n", __func__); + goto error; + } + } + } + + *data_ptr++ = addr_2; + *data_ptr++ = data; + + *d_ptr = data_ptr; + + return QLA_SUCCESS; + +error: + return QLA_FUNCTION_FAILED; +} + +#define ISP8044_PEX_DMA_ENGINE_INDEX 8 +#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000 +#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000UL +#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0 +#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04 +#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08 + +#define ISP8044_PEX_DMA_READ_SIZE (16 * 1024) +#define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ + +static int +qla8044_check_dma_engine_state(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = ha->md_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); + + /* Read the pex-dma's command-status-and-control register. */ + rval = qla8044_rd_reg_indirect(vha, + (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + return QLA_FUNCTION_FAILED; + + /* Check if requested pex-dma engine is available. */ + if (cmd_sts_and_cntrl & BIT_31) + return QLA_SUCCESS; + + return QLA_FUNCTION_FAILED; +} + +static int +qla8044_start_pex_dma(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS, wait = 0; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = ha->md_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW, + m_hdr->desc_card_addr); + if (rval) + goto error_exit; + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0); + if (rval) + goto error_exit; + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL, + m_hdr->start_dma_cmd); + if (rval) + goto error_exit; + + /* Wait for dma operation to complete. */ + for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) { + rval = qla8044_rd_reg_indirect(vha, + (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + goto error_exit; + + if ((cmd_sts_and_cntrl & BIT_1) == 0) + break; + + udelay(10); + } + + /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ + if (wait >= ISP8044_PEX_DMA_MAX_WAIT) { + rval = QLA_FUNCTION_FAILED; + goto error_exit; + } + +error_exit: + return rval; +} + +static int +qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS; + struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL; + uint32_t chunk_size, read_size; + uint8_t *data_ptr = (uint8_t *)*d_ptr; + void *rdmem_buffer = NULL; + dma_addr_t rdmem_dma; + struct qla8044_pex_dma_descriptor dma_desc; + + rval = qla8044_check_dma_engine_state(vha); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, vha, 0xb147, + "DMA engine not available. Fallback to rdmem-read.\n"); + return QLA_FUNCTION_FAILED; + } + + m_hdr = (void *)entry_hdr; + + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, + ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL); + if (!rdmem_buffer) { + ql_dbg(ql_dbg_p3p, vha, 0xb148, + "Unable to allocate rdmem dma buffer\n"); + return QLA_FUNCTION_FAILED; + } + + /* Prepare pex-dma descriptor to be written to MS memory. */ + /* dma-desc-cmd layout: + * 0-3: dma-desc-cmd 0-3 + * 4-7: pcid function number + * 8-15: dma-desc-cmd 8-15 + * dma_bus_addr: dma buffer address + * cmd.read_data_size: amount of data-chunk to be read. + */ + dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); + dma_desc.cmd.dma_desc_cmd |= + ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); + + dma_desc.dma_bus_addr = rdmem_dma; + dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE; + read_size = 0; + + /* + * Perform rdmem operation using pex-dma. + * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE. + */ + while (read_size < m_hdr->read_data_size) { + if (m_hdr->read_data_size - read_size < + ISP8044_PEX_DMA_READ_SIZE) { + chunk_size = (m_hdr->read_data_size - read_size); + dma_desc.cmd.read_data_size = chunk_size; + } + + dma_desc.src_addr = m_hdr->read_addr + read_size; + + /* Prepare: Write pex-dma descriptor to MS memory. */ + rval = qla8044_ms_mem_write_128b(vha, + m_hdr->desc_card_addr, (uint32_t *)&dma_desc, + (sizeof(struct qla8044_pex_dma_descriptor)/16)); + if (rval) { + ql_log(ql_log_warn, vha, 0xb14a, + "%s: Error writing rdmem-dma-init to MS !!!\n", + __func__); + goto error_exit; + } + ql_dbg(ql_dbg_p3p, vha, 0xb14b, + "%s: Dma-descriptor: Instruct for rdmem dma " + "(chunk_size 0x%x).\n", __func__, chunk_size); + + /* Execute: Start pex-dma operation. */ + rval = qla8044_start_pex_dma(vha, m_hdr); + if (rval) + goto error_exit; + + memcpy(data_ptr, rdmem_buffer, chunk_size); + data_ptr += chunk_size; + read_size += chunk_size; + } + + *d_ptr = (uint32_t *)data_ptr; + +error_exit: + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE, + rdmem_buffer, rdmem_dma); + + return rval; +} + +static uint32_t +qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + int loop_cnt; + uint32_t addr1, addr2, value, data, temp, wrVal; + uint8_t stride, stride2; + uint16_t count; + uint32_t poll, mask, modify_mask; + uint32_t wait_count = 0; + uint32_t *data_ptr = *d_ptr; + struct qla8044_minidump_entry_rddfe *rddfe; + + rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr; + + addr1 = rddfe->addr_1; + value = rddfe->value; + stride = rddfe->stride; + stride2 = rddfe->stride2; + count = rddfe->count; + + poll = rddfe->poll; + mask = rddfe->mask; + modify_mask = rddfe->modify_mask; + + addr2 = addr1 + stride; + + for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { + qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value)); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb153, + "%s: TIMEOUT\n", __func__); + goto error; + } else { + qla8044_rd_reg_indirect(vha, addr2, &temp); + temp = temp & modify_mask; + temp = (temp | ((loop_cnt << 16) | loop_cnt)); + wrVal = ((temp << 16) | temp); + + qla8044_wr_reg_indirect(vha, addr2, wrVal); + qla8044_wr_reg_indirect(vha, addr1, value); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb154, + "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_wr_reg_indirect(vha, addr1, + ((0x40000000 | value) + stride2)); + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb155, + "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_rd_reg_indirect(vha, addr2, &data); + + *data_ptr++ = wrVal; + *data_ptr++ = data; + } + + } + + *d_ptr = data_ptr; + return QLA_SUCCESS; + +error: + return -1; + +} + +static uint32_t +qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + int ret = 0; + uint32_t addr1, addr2, value1, value2, data, selVal; + uint8_t stride1, stride2; + uint32_t addr3, addr4, addr5, addr6, addr7; + uint16_t count, loop_cnt; + uint32_t mask; + uint32_t *data_ptr = *d_ptr; + + struct qla8044_minidump_entry_rdmdio *rdmdio; + + rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr; + + addr1 = rdmdio->addr_1; + addr2 = rdmdio->addr_2; + value1 = rdmdio->value_1; + stride1 = rdmdio->stride_1; + stride2 = rdmdio->stride_2; + count = rdmdio->count; + + mask = rdmdio->mask; + value2 = rdmdio->value_2; + + addr3 = addr1 + stride1; + + for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { + ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, + addr3, mask); + if (ret == -1) + goto error; + + addr4 = addr2 - stride1; + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, + value2); + if (ret == -1) + goto error; + + addr5 = addr2 - (2 * stride1); + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, + value1); + if (ret == -1) + goto error; + + addr6 = addr2 - (3 * stride1); + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, + addr6, 0x2); + if (ret == -1) + goto error; + + ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, + addr3, mask); + if (ret == -1) + goto error; + + addr7 = addr2 - (4 * stride1); + data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7); + if (data == -1) + goto error; + + selVal = (value2 << 18) | (value1 << 2) | 2; + + stride2 = rdmdio->stride_2; + *data_ptr++ = selVal; + *data_ptr++ = data; + + value1 = value1 + stride2; + *d_ptr = data_ptr; + } + + return 0; + +error: + return -1; +} + +static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t addr1, addr2, value1, value2, poll, r_value; + uint32_t wait_count = 0; + struct qla8044_minidump_entry_pollwr *pollwr_hdr; + + pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; + addr1 = pollwr_hdr->addr_1; + addr2 = pollwr_hdr->addr_2; + value1 = pollwr_hdr->value_1; + value2 = pollwr_hdr->value_2; + + poll = pollwr_hdr->poll; + + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_wr_reg_indirect(vha, addr2, value2); + qla8044_wr_reg_indirect(vha, addr1, value1); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + + return QLA_SUCCESS; + +error: + return -1; +} + +/* + * + * qla8044_collect_md_data - Retrieve firmware minidump data. + * @ha: pointer to adapter structure + **/ +int +qla8044_collect_md_data(struct scsi_qla_host *vha) +{ + int num_entry_hdr = 0; + struct qla8044_minidump_entry_hdr *entry_hdr; + struct qla8044_minidump_template_hdr *tmplt_hdr; + uint32_t *data_ptr; + uint32_t data_collected = 0, f_capture_mask; + int i, rval = QLA_FUNCTION_FAILED; + uint64_t now; + uint32_t timestamp, idc_control; + struct qla_hw_data *ha = vha->hw; + + if (!ha->md_dump) { + ql_log(ql_log_info, vha, 0xb101, + "%s(%ld) No buffer to dump\n", + __func__, vha->host_no); + return rval; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xb10d, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", ha->fw_dump); + goto md_failed; + } + + ha->fw_dumped = false; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb10e, + "Memory not allocated for minidump capture\n"); + goto md_failed; + } + + qla8044_idc_lock(ha); + idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + if (idc_control & GRACEFUL_RESET_BIT1) { + ql_log(ql_log_warn, vha, 0xb112, + "Forced reset from application, " + "ignore minidump capture\n"); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_control & ~GRACEFUL_RESET_BIT1)); + qla8044_idc_unlock(ha); + + goto md_failed; + } + qla8044_idc_unlock(ha); + + if (qla82xx_validate_template_chksum(vha)) { + ql_log(ql_log_info, vha, 0xb109, + "Template checksum validation error\n"); + goto md_failed; + } + + tmplt_hdr = (struct qla8044_minidump_template_hdr *) + ha->md_tmplt_hdr; + data_ptr = (uint32_t *)((uint8_t *)ha->md_dump); + num_entry_hdr = tmplt_hdr->num_of_entries; + + ql_dbg(ql_dbg_p3p, vha, 0xb11a, + "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); + + f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; + + /* Validate whether required debug level is set */ + if ((f_capture_mask & 0x3) != 0x3) { + ql_log(ql_log_warn, vha, 0xb10f, + "Minimum required capture mask[0x%x] level not set\n", + f_capture_mask); + + } + tmplt_hdr->driver_capture_mask = ql2xmdcapmask; + ql_log(ql_log_info, vha, 0xb102, + "[%s]: starting data ptr: %p\n", + __func__, data_ptr); + ql_log(ql_log_info, vha, 0xb10b, + "[%s]: no of entry headers in Template: 0x%x\n", + __func__, num_entry_hdr); + ql_log(ql_log_info, vha, 0xb10c, + "[%s]: Total_data_size 0x%x, %d obtained\n", + __func__, ha->md_dump_size, ha->md_dump_size); + + /* Update current timestamp before taking dump */ + now = get_jiffies_64(); + timestamp = (u32)(jiffies_to_msecs(now) / 1000); + tmplt_hdr->driver_timestamp = timestamp; + + entry_hdr = (struct qla8044_minidump_entry_hdr *) + (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); + tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] = + tmplt_hdr->ocm_window_reg[ha->portnum]; + + /* Walk through the entry headers - validate/perform required action */ + for (i = 0; i < num_entry_hdr; i++) { + if (data_collected > ha->md_dump_size) { + ql_log(ql_log_info, vha, 0xb103, + "Data collected: [0x%x], " + "Total Dump size: [0x%x]\n", + data_collected, ha->md_dump_size); + return rval; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ql2xmdcapmask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA82XX_DBG_SKIPPED_FLAG; + goto skip_nxt_entry; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb104, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, + (ha->md_dump_size - data_collected)); + + /* Decode the entry type and take required action to capture + * debug data + */ + switch (entry_hdr->entry_type) { + case QLA82XX_RDEND: + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_CNTRL: + rval = qla8044_minidump_process_control(vha, + entry_hdr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_RDCRB: + qla8044_minidump_process_rdcrb(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMEM: + rval = qla8044_minidump_pex_dma_read(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + rval = qla8044_minidump_process_rdmem(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, + entry_hdr, i); + goto md_failed; + } + } + break; + case QLA82XX_BOARD: + case QLA82XX_RDROM: + rval = qla8044_minidump_process_rdrom(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, + entry_hdr, i); + } + break; + case QLA82XX_L2DTG: + case QLA82XX_L2ITG: + case QLA82XX_L2DAT: + case QLA82XX_L2INS: + rval = qla8044_minidump_process_l2tag(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA8044_L1DTG: + case QLA8044_L1ITG: + case QLA82XX_L1DAT: + case QLA82XX_L1INS: + qla8044_minidump_process_l1cache(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDOCM: + qla8044_minidump_process_rdocm(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMUX: + qla8044_minidump_process_rdmux(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_QUEUE: + qla8044_minidump_process_queue(vha, + entry_hdr, &data_ptr); + break; + case QLA8044_POLLRD: + rval = qla8044_minidump_process_pollrd(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDMUX2: + qla8044_minidump_process_rdmux2(vha, + entry_hdr, &data_ptr); + break; + case QLA8044_POLLRDMWR: + rval = qla8044_minidump_process_pollrdmwr(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDDFE: + rval = qla8044_minidump_process_rddfe(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDMDIO: + rval = qla8044_minidump_process_rdmdio(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_POLLWR: + rval = qla8044_minidump_process_pollwr(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_RDNOP: + default: + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + } + + data_collected = (uint8_t *)data_ptr - + (uint8_t *)((uint8_t *)ha->md_dump); +skip_nxt_entry: + /* + * next entry in the template + */ + entry_hdr = (struct qla8044_minidump_entry_hdr *) + (((uint8_t *)entry_hdr) + entry_hdr->entry_size); + } + + if (data_collected != ha->md_dump_size) { + ql_log(ql_log_info, vha, 0xb105, + "Dump data mismatch: Data collected: " + "[0x%x], total_data_size:[0x%x]\n", + data_collected, ha->md_dump_size); + rval = QLA_FUNCTION_FAILED; + goto md_failed; + } + + ql_log(ql_log_info, vha, 0xb110, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); + ha->fw_dumped = true; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + + + ql_log(ql_log_info, vha, 0xb106, + "Leaving fn: %s Last entry: 0x%x\n", + __func__, i); +md_failed: + return rval; +} + +void +qla8044_get_minidump(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!qla8044_collect_md_data(vha)) { + ha->fw_dumped = true; + ha->prev_minidump_failed = 0; + } else { + ql_log(ql_log_fatal, vha, 0xb0db, + "%s: Unable to collect minidump\n", + __func__); + ha->prev_minidump_failed = 1; + } +} + +static int +qla8044_poll_flash_status_reg(struct scsi_qla_host *vha) +{ + uint32_t flash_status; + int retries = QLA8044_FLASH_READ_RETRY_COUNT; + int ret_val = QLA_SUCCESS; + + while (retries--) { + ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS, + &flash_status); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb13c, + "%s: Failed to read FLASH_STATUS reg.\n", + __func__); + break; + } + if ((flash_status & QLA8044_FLASH_STATUS_READY) == + QLA8044_FLASH_STATUS_READY) + break; + msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY); + } + + if (!retries) + ret_val = QLA_FUNCTION_FAILED; + + return ret_val; +} + +static int +qla8044_write_flash_status_reg(struct scsi_qla_host *vha, + uint32_t data) +{ + int ret_val = QLA_SUCCESS; + uint32_t cmd; + + cmd = vha->hw->fdt_wrt_sts_reg_cmd; + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb125, + "%s: Failed to write to FLASH_ADDR.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb126, + "%s: Failed to write to FLASH_WRDATA.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_SECOND_ERASE_MS_VAL); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb127, + "%s: Failed to write to FLASH_CONTROL.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb128, + "%s: Error polling flash status reg.\n", __func__); + +exit_func: + return ret_val; +} + +/* + * This function assumes that the flash lock is held. + */ +static int +qla8044_unprotect_flash(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb139, + "%s: Write flash status failed.\n", __func__); + + return ret_val; +} + +/* + * This function assumes that the flash lock is held. + */ +static int +qla8044_protect_flash(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb13b, + "%s: Write flash status failed.\n", __func__); + + return ret_val; +} + + +static int +qla8044_erase_flash_sector(struct scsi_qla_host *vha, + uint32_t sector_start_addr) +{ + uint32_t reversed_addr; + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb12e, + "%s: Poll flash status after erase failed..\n", __func__); + } + + reversed_addr = (((sector_start_addr & 0xFF) << 16) | + (sector_start_addr & 0xFF00) | + ((sector_start_addr & 0xFF0000) >> 16)); + + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_WRDATA, reversed_addr); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb12f, + "%s: Failed to write to FLASH_WRDATA.\n", __func__); + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb130, + "%s: Failed to write to FLASH_ADDR.\n", __func__); + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_LAST_ERASE_MS_VAL); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb131, + "%s: Failed write to FLASH_CONTROL.\n", __func__); + } + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb132, + "%s: Poll flash status failed.\n", __func__); + } + + + return ret_val; +} + +/* + * qla8044_flash_write_u32 - Write data to flash + * + * @ha : Pointer to adapter structure + * addr : Flash address to write to + * p_data : Data to be written + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + * + * NOTE: Lock should be held on entry + */ +static int +qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr, + uint32_t *p_data) +{ + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + 0x00800000 | (addr >> 2)); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb134, + "%s: Failed write to FLASH_ADDR.\n", __func__); + goto exit_func; + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb135, + "%s: Failed write to FLASH_WRDATA.\n", __func__); + goto exit_func; + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb136, + "%s: Failed write to FLASH_CONTROL.\n", __func__); + goto exit_func; + } + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb137, + "%s: Poll flash status failed.\n", __func__); + } + +exit_func: + return ret_val; +} + +static int +qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr, + uint32_t faddr, uint32_t dwords) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t spi_val; + + if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS || + dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) { + ql_dbg(ql_dbg_user, vha, 0xb123, + "Got unsupported dwords = 0x%x.\n", + dwords); + return QLA_FUNCTION_FAILED; + } + + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + spi_val | QLA8044_FLASH_SPI_CTL); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_FIRST_TEMP_VAL); + + /* First DWORD write to FLASH_WRDATA */ + ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, + *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_FIRST_MS_PATTERN); + + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb124, + "%s: Failed.\n", __func__); + goto exit_func; + } + + dwords--; + + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_SECOND_TEMP_VAL); + + + /* Second to N-1 DWORDS writes */ + while (dwords != 1) { + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_SECOND_MS_PATTERN); + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb129, + "%s: Failed.\n", __func__); + goto exit_func; + } + dwords--; + } + + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2)); + + /* Last DWORD write */ + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_LAST_MS_PATTERN); + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb12a, + "%s: Failed.\n", __func__); + goto exit_func; + } + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val); + + if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) { + ql_log(ql_log_warn, vha, 0xb12b, + "%s: Failed.\n", __func__); + spi_val = 0; + /* Operation failed, clear error bit. */ + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + &spi_val); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + spi_val | QLA8044_FLASH_SPI_CTL); + } +exit_func: + return ret; +} + +static int +qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr, + uint32_t faddr, uint32_t dwords) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t liter; + + for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { + ret = qla8044_flash_write_u32(vha, faddr, dwptr); + if (ret) { + ql_dbg(ql_dbg_p3p, vha, 0xb141, + "%s: flash address=%x data=%x.\n", __func__, + faddr, *dwptr); + break; + } + } + + return ret; +} + +int +qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + int rval = QLA_FUNCTION_FAILED, i, burst_iter_count; + int dword_count, erase_sec_count; + uint32_t erase_offset; + uint8_t *p_cache, *p_src; + + erase_offset = offset; + + p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL); + if (!p_cache) + return QLA_FUNCTION_FAILED; + + memcpy(p_cache, buf, length); + p_src = p_cache; + dword_count = length / sizeof(uint32_t); + /* Since the offset and legth are sector aligned, it will be always + * multiple of burst_iter_count (64) + */ + burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS; + erase_sec_count = length / QLA8044_SECTOR_SIZE; + + /* Suspend HBA. */ + scsi_block_requests(vha->host); + /* Lock and enable write for whole operation. */ + qla8044_flash_lock(vha); + qla8044_unprotect_flash(vha); + + /* Erasing the sectors */ + for (i = 0; i < erase_sec_count; i++) { + rval = qla8044_erase_flash_sector(vha, erase_offset); + ql_dbg(ql_dbg_user, vha, 0xb138, + "Done erase of sector=0x%x.\n", + erase_offset); + if (rval) { + ql_log(ql_log_warn, vha, 0xb121, + "Failed to erase the sector having address: " + "0x%x.\n", erase_offset); + goto out; + } + erase_offset += QLA8044_SECTOR_SIZE; + } + ql_dbg(ql_dbg_user, vha, 0xb13f, + "Got write for addr = 0x%x length=0x%x.\n", + offset, length); + + for (i = 0; i < burst_iter_count; i++) { + + /* Go with write. */ + rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src, + offset, QLA8044_MAX_OPTROM_BURST_DWORDS); + if (rval) { + /* Buffer Mode failed skip to dword mode */ + ql_log(ql_log_warn, vha, 0xb122, + "Failed to write flash in buffer mode, " + "Reverting to slow-write.\n"); + rval = qla8044_write_flash_dword_mode(vha, + (uint32_t *)p_src, offset, + QLA8044_MAX_OPTROM_BURST_DWORDS); + } + p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; + offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; + } + ql_dbg(ql_dbg_user, vha, 0xb133, + "Done writing.\n"); + +out: + qla8044_protect_flash(vha); + qla8044_flash_unlock(vha); + scsi_unblock_requests(vha->host); + kfree(p_cache); + + return rval; +} + +#define LEG_INT_PTR_B31 (1 << 31) +#define LEG_INT_PTR_B30 (1 << 30) +#define PF_BITS_MASK (0xF << 16) +/** + * qla8044_intr_handler() - Process interrupts for the ISP8044 + * @irq: interrupt number + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qla8044_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_82xx __iomem *reg; + int status = 0; + unsigned long flags; + unsigned long iter; + uint32_t stat; + uint16_t mb[8]; + uint32_t leg_int_ptr = 0, pf_bit; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0xb143, + "%s(): NULL response queue pointer\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + vha = pci_get_drvdata(ha->pdev); + + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + + leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); + + /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ + if (!(leg_int_ptr & (LEG_INT_PTR_B31))) { + ql_dbg(ql_dbg_p3p, vha, 0xb144, + "%s: Legacy Interrupt Bit 31 not set, " + "spurious interrupt!\n", __func__); + return IRQ_NONE; + } + + pf_bit = ha->portnum << 16; + /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ + if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) { + ql_dbg(ql_dbg_p3p, vha, 0xb145, + "%s: Incorrect function ID 0x%x in " + "legacy interrupt register, " + "ha->pf_bit = 0x%x\n", __func__, + (leg_int_ptr & (PF_BITS_MASK)), pf_bit); + return IRQ_NONE; + } + + /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger + * Control register and poll till Legacy Interrupt Pointer register + * bit32 is 0. + */ + qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0); + do { + leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); + if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) + break; + } while (leg_int_ptr & (LEG_INT_PTR_B30)); + + reg = &ha->iobase->isp82; + spin_lock_irqsave(&ha->hardware_lock, flags); + for (iter = 1; iter--; ) { + + if (rd_reg_dword(®->host_int)) { + stat = rd_reg_dword(®->host_status); + if ((stat & HSRX_RISC_INT) == 0) + break; + + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla82xx_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = rd_reg_word(®->mailbox_out[1]); + mb[2] = rd_reg_word(®->mailbox_out[2]); + mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_p3p, vha, 0xb146, + "Unrecognized interrupt type " + "(%d).\n", stat & 0xff); + break; + } + } + wrt_reg_dword(®->host_int, 0); + } + + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +static int +qla8044_idc_dontreset(struct qla_hw_data *ha) +{ + uint32_t idc_ctrl; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + return idc_ctrl & DONTRESET_BIT0; +} + +static void +qla8044_clear_rst_ready(scsi_qla_host_t *vha) +{ + uint32_t drv_state; + + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + /* + * For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP82xx, drv_active has 4 bits per function + */ + drv_state &= ~(1 << vha->hw->portnum); + + ql_dbg(ql_dbg_p3p, vha, 0xb13d, + "drv_state: 0x%08x\n", drv_state); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); +} + +int +qla8044_abort_isp(scsi_qla_host_t *vha) +{ + int rval; + uint32_t dev_state; + struct qla_hw_data *ha = vha->hw; + + qla8044_idc_lock(ha); + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (ql2xdontresethba) + qla8044_set_idc_dontreset(vha); + + /* If device_state is NEED_RESET, go ahead with + * Reset,irrespective of ql2xdontresethba. This is to allow a + * non-reset-owner to force a reset. Non-reset-owner sets + * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset + * and then forces a Reset by setting device_state to + * NEED_RESET. */ + if (dev_state == QLA8XXX_DEV_READY) { + /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset + * recovery */ + if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) { + ql_dbg(ql_dbg_p3p, vha, 0xb13e, + "Reset recovery disabled\n"); + rval = QLA_FUNCTION_FAILED; + goto exit_isp_reset; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb140, + "HW State: NEED RESET\n"); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_NEED_RESET); + } + + /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority + * and which drivers are present. Unlike ISP82XX, the function setting + * NEED_RESET, may not be the Reset owner. */ + qla83xx_reset_ownership(vha); + + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + qla8044_idc_lock(ha); + qla8044_clear_rst_ready(vha); + +exit_isp_reset: + qla8044_idc_unlock(ha); + if (rval == QLA_SUCCESS) { + ha->flags.isp82xx_fw_hung = 0; + ha->flags.nic_core_reset_hdlr_active = 0; + rval = qla82xx_restart_isp(vha); + } + + return rval; +} + +void +qla8044_fw_dump(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!ha->allow_cna_fw_dump) + return; + + scsi_block_requests(vha->host); + ha->flags.isp82xx_no_md_cap = 1; + qla8044_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); + qla2x00_wait_for_chip_reset(vha); + scsi_unblock_requests(vha->host); +} diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h new file mode 100644 index 000000000..2fc902a9f --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -0,0 +1,579 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ + +#ifndef __QLA_NX2_H +#define __QLA_NX2_H + +#define QSNT_ACK_TOV 30 +#define INTENT_TO_RECOVER 0x01 +#define PROCEED_TO_RECOVER 0x02 +#define IDC_LOCK_RECOVERY_OWNER_MASK 0x3C +#define IDC_LOCK_RECOVERY_STATE_MASK 0x3 +#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS 2 + +#define QLA8044_DRV_LOCK_MSLEEP 200 +#define QLA8044_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLA8044_ADDR_DDR_NET_MAX (0x000000000fffffffULL) + +#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0 +#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4 +#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0 +#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4 + +/* MIU_TEST_AGT_CTRL flags. work for SIU as well */ +#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE) +#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE | \ + MIU_TA_CTL_START) +#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE) + +/* Imbus address bit used to indicate a host address. This bit is + * eliminated by the pcie bar and bar select before presentation + * over pcie. */ +/* host memory via IMBUS */ +#define QLA8044_P2_ADDR_PCIE (0x0000000800000000ULL) +#define QLA8044_P3_ADDR_PCIE (0x0000008000000000ULL) +#define QLA8044_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL) +#define QLA8044_ADDR_OCM0 (0x0000000200000000ULL) +#define QLA8044_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLA8044_ADDR_OCM1 (0x0000000200400000ULL) +#define QLA8044_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLA8044_ADDR_QDR_NET (0x0000000300000000ULL) +#define QLA8044_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) +#define QLA8044_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) +#define QLA8044_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) +#define QLA8044_PCI_CRBSPACE ((unsigned long)0x06000000) +#define QLA8044_PCI_DIRECT_CRB ((unsigned long)0x04400000) +#define QLA8044_PCI_CAMQM ((unsigned long)0x04800000) +#define QLA8044_PCI_CAMQM_MAX ((unsigned long)0x04ffffff) +#define QLA8044_PCI_DDR_NET ((unsigned long)0x00000000) +#define QLA8044_PCI_QDR_NET ((unsigned long)0x04000000) +#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff) + +/* PCI Windowing for DDR regions. */ +static inline bool addr_in_range(u64 addr, u64 low, u64 high) +{ + return addr <= high && addr >= low; +} + +/* Indirectly Mapped Registers */ +#define QLA8044_FLASH_SPI_STATUS 0x2808E010 +#define QLA8044_FLASH_SPI_CONTROL 0x2808E014 +#define QLA8044_FLASH_STATUS 0x42100004 +#define QLA8044_FLASH_CONTROL 0x42110004 +#define QLA8044_FLASH_ADDR 0x42110008 +#define QLA8044_FLASH_WRDATA 0x4211000C +#define QLA8044_FLASH_RDDATA 0x42110018 +#define QLA8044_FLASH_DIRECT_WINDOW 0x42110030 +#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA)) + +/* Flash access regs */ +#define QLA8044_FLASH_LOCK 0x3850 +#define QLA8044_FLASH_UNLOCK 0x3854 +#define QLA8044_FLASH_LOCK_ID 0x3500 + +/* Driver Lock regs */ +#define QLA8044_DRV_LOCK 0x3868 +#define QLA8044_DRV_UNLOCK 0x386C +#define QLA8044_DRV_LOCK_ID 0x3504 +#define QLA8044_DRV_LOCKRECOVERY 0x379C + +/* IDC version */ +#define QLA8044_IDC_VER_MAJ_VALUE 0x1 +#define QLA8044_IDC_VER_MIN_VALUE 0x0 + +/* IDC Registers : Driver Coexistence Defines */ +#define QLA8044_CRB_IDC_VER_MAJOR 0x3780 +#define QLA8044_CRB_IDC_VER_MINOR 0x3798 +#define QLA8044_IDC_DRV_AUDIT 0x3794 +#define QLA8044_SRE_SHIM_CONTROL 0x0D200284 +#define QLA8044_PORT0_RXB_PAUSE_THRS 0x0B2003A4 +#define QLA8044_PORT1_RXB_PAUSE_THRS 0x0B2013A4 +#define QLA8044_PORT0_RXB_TC_MAX_CELL 0x0B200388 +#define QLA8044_PORT1_RXB_TC_MAX_CELL 0x0B201388 +#define QLA8044_PORT0_RXB_TC_STATS 0x0B20039C +#define QLA8044_PORT1_RXB_TC_STATS 0x0B20139C +#define QLA8044_PORT2_IFB_PAUSE_THRS 0x0B200704 +#define QLA8044_PORT3_IFB_PAUSE_THRS 0x0B201704 + +/* set value to pause threshold value */ +#define QLA8044_SET_PAUSE_VAL 0x0 +#define QLA8044_SET_TC_MAX_CELL_VAL 0x03FF03FF +#define QLA8044_PEG_HALT_STATUS1 0x34A8 +#define QLA8044_PEG_HALT_STATUS2 0x34AC +#define QLA8044_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */ +#define QLA8044_FW_CAPABILITIES 0x3528 +#define QLA8044_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */ +#define QLA8044_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */ +#define QLA8044_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */ +#define QLA8044_CRB_DRV_SCRATCH 0x3548 +#define QLA8044_CRB_DEV_PART_INFO1 0x37E0 +#define QLA8044_CRB_DEV_PART_INFO2 0x37E4 +#define QLA8044_FW_VER_MAJOR 0x3550 +#define QLA8044_FW_VER_MINOR 0x3554 +#define QLA8044_FW_VER_SUB 0x3558 +#define QLA8044_NPAR_STATE 0x359C +#define QLA8044_FW_IMAGE_VALID 0x35FC +#define QLA8044_CMDPEG_STATE 0x3650 +#define QLA8044_ASIC_TEMP 0x37B4 +#define QLA8044_FW_API 0x356C +#define QLA8044_DRV_OP_MODE 0x3570 +#define QLA8044_CRB_WIN_BASE 0x3800 +#define QLA8044_CRB_WIN_FUNC(f) (QLA8044_CRB_WIN_BASE+((f)*4)) +#define QLA8044_SEM_LOCK_BASE 0x3840 +#define QLA8044_SEM_UNLOCK_BASE 0x3844 +#define QLA8044_SEM_LOCK_FUNC(f) (QLA8044_SEM_LOCK_BASE+((f)*8)) +#define QLA8044_SEM_UNLOCK_FUNC(f) (QLA8044_SEM_UNLOCK_BASE+((f)*8)) +#define QLA8044_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0)) +#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) +#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) +#define QLA8044_LINK_SPEED_FACTOR 10 +#define QLA8044_FUN7_ACTIVE_INDEX 0x80 + +/* FLASH API Defines */ +#define QLA8044_FLASH_MAX_WAIT_USEC 100 +#define QLA8044_FLASH_LOCK_TIMEOUT 10000 +#define QLA8044_FLASH_SECTOR_SIZE 65536 +#define QLA8044_DRV_LOCK_TIMEOUT 2000 +#define QLA8044_FLASH_SECTOR_ERASE_CMD 0xdeadbeef +#define QLA8044_FLASH_WRITE_CMD 0xdacdacda +#define QLA8044_FLASH_BUFFER_WRITE_CMD 0xcadcadca +#define QLA8044_FLASH_READ_RETRY_COUNT 2000 +#define QLA8044_FLASH_STATUS_READY 0x6 +#define QLA8044_FLASH_BUFFER_WRITE_MIN 2 +#define QLA8044_FLASH_BUFFER_WRITE_MAX 64 +#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1 +#define QLA8044_ERASE_MODE 1 +#define QLA8044_WRITE_MODE 2 +#define QLA8044_DWORD_WRITE_MODE 3 +#define QLA8044_GLOBAL_RESET 0x38CC +#define QLA8044_WILDCARD 0x38F0 +#define QLA8044_INFORMANT 0x38FC +#define QLA8044_HOST_MBX_CTRL 0x3038 +#define QLA8044_FW_MBX_CTRL 0x303C +#define QLA8044_BOOTLOADER_ADDR 0x355C +#define QLA8044_BOOTLOADER_SIZE 0x3560 +#define QLA8044_FW_IMAGE_ADDR 0x3564 +#define QLA8044_MBX_INTR_ENABLE 0x1000 +#define QLA8044_MBX_INTR_MASK 0x1200 + +/* IDC Control Register bit defines */ +#define DONTRESET_BIT0 0x1 +#define GRACEFUL_RESET_BIT1 0x2 + +/* ISP8044 PEG_HALT_STATUS1 bits */ +#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29) +#define QLA8044_HALT_STATUS_FW_RESET (0x2 << 29) +#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29) + +/* Firmware image definitions */ +#define QLA8044_BOOTLOADER_FLASH_ADDR 0x10000 +#define QLA8044_BOOT_FROM_FLASH 0 +#define QLA8044_IDC_PARAM_ADDR 0x3e8020 + +/* FLASH related definitions */ +#define QLA8044_OPTROM_BURST_SIZE 0x100 +#define QLA8044_MAX_OPTROM_BURST_DWORDS (QLA8044_OPTROM_BURST_SIZE / 4) +#define QLA8044_MIN_OPTROM_BURST_DWORDS 2 +#define QLA8044_SECTOR_SIZE (64 * 1024) + +#define QLA8044_FLASH_SPI_CTL 0x4 +#define QLA8044_FLASH_FIRST_TEMP_VAL 0x00800000 +#define QLA8044_FLASH_SECOND_TEMP_VAL 0x00800001 +#define QLA8044_FLASH_FIRST_MS_PATTERN 0x43 +#define QLA8044_FLASH_SECOND_MS_PATTERN 0x7F +#define QLA8044_FLASH_LAST_MS_PATTERN 0x7D +#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG 0xFD0100 +#define QLA8044_FLASH_SECOND_ERASE_MS_VAL 0x5 +#define QLA8044_FLASH_ERASE_SIG 0xFD0300 +#define QLA8044_FLASH_LAST_ERASE_MS_VAL 0x3D + +/* Reset template definitions */ +#define QLA8044_MAX_RESET_SEQ_ENTRIES 16 +#define QLA8044_RESTART_TEMPLATE_SIZE 0x2000 +#define QLA8044_RESET_TEMPLATE_ADDR 0x4F0000 +#define QLA8044_RESET_SEQ_VERSION 0x0101 + +/* Reset template entry opcodes */ +#define OPCODE_NOP 0x0000 +#define OPCODE_WRITE_LIST 0x0001 +#define OPCODE_READ_WRITE_LIST 0x0002 +#define OPCODE_POLL_LIST 0x0004 +#define OPCODE_POLL_WRITE_LIST 0x0008 +#define OPCODE_READ_MODIFY_WRITE 0x0010 +#define OPCODE_SEQ_PAUSE 0x0020 +#define OPCODE_SEQ_END 0x0040 +#define OPCODE_TMPL_END 0x0080 +#define OPCODE_POLL_READ_LIST 0x0100 + +/* Template Header */ +#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE +#define QLA8044_IDC_DRV_CTRL 0x3790 +#define AF_8044_NO_FW_DUMP 27 /* 0x08000000 */ + +#define MINIDUMP_SIZE_36K 36864 + +struct qla8044_reset_template_hdr { + uint16_t version; + uint16_t signature; + uint16_t size; + uint16_t entries; + uint16_t hdr_size; + uint16_t checksum; + uint16_t init_seq_offset; + uint16_t start_seq_offset; +} __packed; + +/* Common Entry Header. */ +struct qla8044_reset_entry_hdr { + uint16_t cmd; + uint16_t size; + uint16_t count; + uint16_t delay; +} __packed; + +/* Generic poll entry type. */ +struct qla8044_poll { + uint32_t test_mask; + uint32_t test_value; +} __packed; + +/* Read modify write entry type. */ +struct qla8044_rmw { + uint32_t test_mask; + uint32_t xor_value; + uint32_t or_value; + uint8_t shl; + uint8_t shr; + uint8_t index_a; + uint8_t rsvd; +} __packed; + +/* Generic Entry Item with 2 DWords. */ +struct qla8044_entry { + uint32_t arg1; + uint32_t arg2; +} __packed; + +/* Generic Entry Item with 4 DWords.*/ +struct qla8044_quad_entry { + uint32_t dr_addr; + uint32_t dr_value; + uint32_t ar_addr; + uint32_t ar_value; +} __packed; + +struct qla8044_reset_template { + int seq_index; + int seq_error; + int array_index; + uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES]; + uint8_t *buff; + uint8_t *stop_offset; + uint8_t *start_offset; + uint8_t *init_offset; + struct qla8044_reset_template_hdr *hdr; + uint8_t seq_end; + uint8_t template_end; +}; + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +struct qla8044_minidump_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +} __packed; + +/* Read CRB entry header */ +struct qla8044_minidump_entry_crb { + struct qla8044_minidump_entry_hdr h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +} __packed; + +struct qla8044_minidump_entry_cache { + struct qla8044_minidump_entry_hdr h; + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + uint32_t data_size; + uint32_t op_count; + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +} __packed; + +/* Read OCM */ +struct qla8044_minidump_entry_rdocm { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; +} __packed; + +/* Read Memory */ +struct qla8044_minidump_entry_rdmem { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +/* Read Memory: For Pex-DMA */ +struct qla8044_minidump_entry_rdmem_pex_dma { + struct qla8044_minidump_entry_hdr h; + uint32_t desc_card_addr; + uint16_t dma_desc_cmd; + uint8_t rsvd[2]; + uint32_t start_dma_cmd; + uint8_t rsvd2[12]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* Read ROM */ +struct qla8044_minidump_entry_rdrom { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* Mux entry */ +struct qla8044_minidump_entry_mux { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +} __packed; + +/* Queue entry */ +struct qla8044_minidump_entry_queue { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +} __packed; + +/* POLLRD Entry */ +struct qla8044_minidump_entry_pollrd { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t read_addr; + uint32_t select_value; + uint16_t select_value_stride; + uint16_t op_count; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t data_size; + uint32_t rsvd_1; +} __packed; + +struct qla8044_minidump_entry_rddfe { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t value; + uint8_t stride; + uint8_t stride2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t modify_mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { + struct qla8044_minidump_entry_hdr h; + + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint8_t stride_1; + uint8_t stride_2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t value_2; + uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll; + uint32_t mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +/* RDMUX2 Entry */ +struct qla8044_minidump_entry_rdmux2 { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr_1; + uint32_t select_addr_2; + uint32_t select_value_1; + uint32_t select_value_2; + uint32_t op_count; + uint32_t select_value_mask; + uint32_t read_addr; + uint8_t select_value_stride; + uint8_t data_size; + uint8_t rsvd[2]; +} __packed; + +/* POLLRDMWR Entry */ +struct qla8044_minidump_entry_pollrdmwr { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t modify_mask; + uint32_t data_size; +} __packed; + +/* IDC additional information */ +struct qla8044_idc_information { + uint32_t request_desc; /* IDC request descriptor */ + uint32_t info1; /* IDC additional info */ + uint32_t info2; /* IDC additional info */ + uint32_t info3; /* IDC additional info */ +} __packed; + +enum qla_regs { + QLA8044_PEG_HALT_STATUS1_INDEX = 0, + QLA8044_PEG_HALT_STATUS2_INDEX, + QLA8044_PEG_ALIVE_COUNTER_INDEX, + QLA8044_CRB_DRV_ACTIVE_INDEX, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8044_CRB_DRV_STATE_INDEX, + QLA8044_CRB_DRV_SCRATCH_INDEX, + QLA8044_CRB_DEV_PART_INFO_INDEX, + QLA8044_CRB_DRV_IDC_VERSION_INDEX, + QLA8044_FW_VERSION_MAJOR_INDEX, + QLA8044_FW_VERSION_MINOR_INDEX, + QLA8044_FW_VERSION_SUB_INDEX, + QLA8044_CRB_CMDPEG_STATE_INDEX, + QLA8044_CRB_TEMP_STATE_INDEX, +} __packed; + +#define CRB_REG_INDEX_MAX 14 +#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 +#define CRB_CMDPEG_CHECK_DELAY 500 + +/* MiniDump Structures */ + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +#define QLA8044_SS_OCM_WNDREG_INDEX 3 +#define QLA8044_DBG_STATE_ARRAY_LEN 16 +#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA8044_DBG_RSVD_ARRAY_LEN 8 +#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16 +#define QLA8044_SS_PCI_INDEX 0 +#define QLA8044_RDDFE 38 +#define QLA8044_RDMDIO 39 +#define QLA8044_POLLWR 40 + +struct qla8044_minidump_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info_word2; + uint32_t driver_info_word3; + uint32_t driver_info_word4; + + uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN]; + uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN]; +}; + +struct qla8044_pex_dma_descriptor { + struct { + uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */ + uint8_t rsvd[2]; + uint16_t dma_desc_cmd; + } cmd; + uint64_t src_addr; + uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/ + uint8_t rsvd[24]; +} __packed; + +#endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c new file mode 100644 index 000000000..c45eef743 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -0,0 +1,8379 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "qla_target.h" + +/* + * Driver version + */ +char qla2x00_version_str[40]; + +static int apidev_major; + +/* + * SRB allocation cache + */ +struct kmem_cache *srb_cachep; + +static struct trace_array *qla_trc_array; + +int ql2xfulldump_on_mpifail; +module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql2xfulldump_on_mpifail, + "Set this to take full dump on MPI hang."); + +int ql2xenforce_iocb_limit = 2; +module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql2xenforce_iocb_limit, + "Enforce IOCB throttling, to avoid FW congestion. (default: 2) " + "1: track usage per queue, 2: track usage per adapter"); + +/* + * CT6 CTX allocation cache + */ +static struct kmem_cache *ctx_cachep; +/* + * error level for logging + */ +uint ql_errlev = 0x8001; + +int ql2xsecenable; +module_param(ql2xsecenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xsecenable, + "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled."); + +static int ql2xenableclass2; +module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); +MODULE_PARM_DESC(ql2xenableclass2, + "Specify if Class 2 operations are supported from the very " + "beginning. Default is 0 - class 2 not supported."); + + +int ql2xlogintimeout = 20; +module_param(ql2xlogintimeout, int, S_IRUGO); +MODULE_PARM_DESC(ql2xlogintimeout, + "Login timeout value in seconds."); + +int qlport_down_retry; +module_param(qlport_down_retry, int, S_IRUGO); +MODULE_PARM_DESC(qlport_down_retry, + "Maximum number of command retries to a port that returns " + "a PORT-DOWN status."); + +int ql2xplogiabsentdevice; +module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xplogiabsentdevice, + "Option to enable PLOGI to devices that are not present after " + "a Fabric scan. This is needed for several broken switches. " + "Default is 0 - no PLOGI. 1 - perform PLOGI."); + +int ql2xloginretrycount; +module_param(ql2xloginretrycount, int, S_IRUGO); +MODULE_PARM_DESC(ql2xloginretrycount, + "Specify an alternate value for the NVRAM login retry count."); + +int ql2xallocfwdump = 1; +module_param(ql2xallocfwdump, int, S_IRUGO); +MODULE_PARM_DESC(ql2xallocfwdump, + "Option to enable allocation of memory for a firmware dump " + "during HBA initialization. Memory allocation requirements " + "vary by ISP type. Default is 1 - allocate memory."); + +int ql2xextended_error_logging; +module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); +module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xextended_error_logging, + "Option to enable extended error logging,\n" + "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" + "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" + "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" + "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" + "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" + "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" + "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" + "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" + "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" + "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" + "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" + "\t\t0x1e400000 - Preferred value for capturing essential " + "debug information (equivalent to old " + "ql2xextended_error_logging=1).\n" + "\t\tDo LOGICAL OR of the value to enable more than one level"); + +int ql2xextended_error_logging_ktrace = 1; +module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xextended_error_logging_ktrace, + "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n"); + +int ql2xshiftctondsd = 6; +module_param(ql2xshiftctondsd, int, S_IRUGO); +MODULE_PARM_DESC(ql2xshiftctondsd, + "Set to control shifting of command type processing " + "based on total number of SG elements."); + +int ql2xfdmienable = 1; +module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); +module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xfdmienable, + "Enables FDMI registrations. " + "0 - no FDMI registrations. " + "1 - provide FDMI registrations (default)."); + +#define MAX_Q_DEPTH 64 +static int ql2xmaxqdepth = MAX_Q_DEPTH; +module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xmaxqdepth, + "Maximum queue depth to set for each LUN. " + "Default is 64."); + +int ql2xenabledif = 2; +module_param(ql2xenabledif, int, S_IRUGO); +MODULE_PARM_DESC(ql2xenabledif, + " Enable T10-CRC-DIF:\n" + " Default is 2.\n" + " 0 -- No DIF Support\n" + " 1 -- Enable DIF for all types\n" + " 2 -- Enable DIF for all types, except Type 0.\n"); + +#if (IS_ENABLED(CONFIG_NVME_FC)) +int ql2xnvmeenable = 1; +#else +int ql2xnvmeenable; +#endif +module_param(ql2xnvmeenable, int, 0644); +MODULE_PARM_DESC(ql2xnvmeenable, + "Enables NVME support. " + "0 - no NVMe. Default is Y"); + +int ql2xenablehba_err_chk = 2; +module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xenablehba_err_chk, + " Enable T10-CRC-DIF Error isolation by HBA:\n" + " Default is 2.\n" + " 0 -- Error isolation disabled\n" + " 1 -- Error isolation enabled only for DIX Type 0\n" + " 2 -- Error isolation enabled for all Types\n"); + +int ql2xiidmaenable = 1; +module_param(ql2xiidmaenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xiidmaenable, + "Enables iIDMA settings " + "Default is 1 - perform iIDMA. 0 - no iIDMA."); + +int ql2xmqsupport = 1; +module_param(ql2xmqsupport, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmqsupport, + "Enable on demand multiple queue pairs support " + "Default is 1 for supported. " + "Set it to 0 to turn off mq qpair support."); + +int ql2xfwloadbin; +module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); +module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xfwloadbin, + "Option to specify location from which to load ISP firmware:.\n" + " 2 -- load firmware via the request_firmware() (hotplug).\n" + " interface.\n" + " 1 -- load firmware from flash.\n" + " 0 -- use default semantics.\n"); + +int ql2xetsenable; +module_param(ql2xetsenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xetsenable, + "Enables firmware ETS burst." + "Default is 0 - skip ETS enablement."); + +int ql2xdbwr = 1; +module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xdbwr, + "Option to specify scheme for request queue posting.\n" + " 0 -- Regular doorbell.\n" + " 1 -- CAMRAM doorbell (faster).\n"); + +int ql2xgffidenable; +module_param(ql2xgffidenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xgffidenable, + "Enables GFF_ID checks of port type. " + "Default is 0 - Do not use GFF_ID information."); + +int ql2xasynctmfenable = 1; +module_param(ql2xasynctmfenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xasynctmfenable, + "Enables issue of TM IOCBs asynchronously via IOCB mechanism" + "Default is 1 - Issue TM IOCBs via mailbox mechanism."); + +int ql2xdontresethba; +module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xdontresethba, + "Option to specify reset behaviour.\n" + " 0 (Default) -- Reset on failure.\n" + " 1 -- Do not reset on failure.\n"); + +uint64_t ql2xmaxlun = MAX_LUNS; +module_param(ql2xmaxlun, ullong, S_IRUGO); +MODULE_PARM_DESC(ql2xmaxlun, + "Defines the maximum LU number to register with the SCSI " + "midlayer. Default is 65535."); + +int ql2xmdcapmask = 0x1F; +module_param(ql2xmdcapmask, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmdcapmask, + "Set the Minidump driver capture mask level. " + "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); + +int ql2xmdenable = 1; +module_param(ql2xmdenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmdenable, + "Enable/disable MiniDump. " + "0 - MiniDump disabled. " + "1 (Default) - MiniDump enabled."); + +int ql2xexlogins; +module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xexlogins, + "Number of extended Logins. " + "0 (Default)- Disabled."); + +int ql2xexchoffld = 1024; +module_param(ql2xexchoffld, uint, 0644); +MODULE_PARM_DESC(ql2xexchoffld, + "Number of target exchanges."); + +int ql2xiniexchg = 1024; +module_param(ql2xiniexchg, uint, 0644); +MODULE_PARM_DESC(ql2xiniexchg, + "Number of initiator exchanges."); + +int ql2xfwholdabts; +module_param(ql2xfwholdabts, int, S_IRUGO); +MODULE_PARM_DESC(ql2xfwholdabts, + "Allow FW to hold status IOCB until ABTS rsp received. " + "0 (Default) Do not set fw option. " + "1 - Set fw option to hold ABTS."); + +int ql2xmvasynctoatio = 1; +module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xmvasynctoatio, + "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" + "0 (Default). Do not move IOCBs" + "1 - Move IOCBs."); + +int ql2xautodetectsfp = 1; +module_param(ql2xautodetectsfp, int, 0444); +MODULE_PARM_DESC(ql2xautodetectsfp, + "Detect SFP range and set appropriate distance.\n" + "1 (Default): Enable\n"); + +int ql2xenablemsix = 1; +module_param(ql2xenablemsix, int, 0444); +MODULE_PARM_DESC(ql2xenablemsix, + "Set to enable MSI or MSI-X interrupt mechanism.\n" + " Default is 1, enable MSI-X interrupt mechanism.\n" + " 0 -- enable traditional pin-based mechanism.\n" + " 1 -- enable MSI-X interrupt mechanism.\n" + " 2 -- enable MSI interrupt mechanism.\n"); + +int qla2xuseresexchforels; +module_param(qla2xuseresexchforels, int, 0444); +MODULE_PARM_DESC(qla2xuseresexchforels, + "Reserve 1/2 of emergency exchanges for ELS.\n" + " 0 (default): disabled"); + +static int ql2xprotmask; +module_param(ql2xprotmask, int, 0644); +MODULE_PARM_DESC(ql2xprotmask, + "Override DIF/DIX protection capabilities mask\n" + "Default is 0 which sets protection mask based on " + "capabilities reported by HBA firmware.\n"); + +static int ql2xprotguard; +module_param(ql2xprotguard, int, 0644); +MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" + " 0 -- Let HBA firmware decide\n" + " 1 -- Force T10 CRC\n" + " 2 -- Force IP checksum\n"); + +int ql2xdifbundlinginternalbuffers; +module_param(ql2xdifbundlinginternalbuffers, int, 0644); +MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, + "Force using internal buffers for DIF information\n" + "0 (Default). Based on check.\n" + "1 Force using internal buffers\n"); + +int ql2xsmartsan; +module_param(ql2xsmartsan, int, 0444); +module_param_named(smartsan, ql2xsmartsan, int, 0444); +MODULE_PARM_DESC(ql2xsmartsan, + "Send SmartSAN Management Attributes for FDMI Registration." + " Default is 0 - No SmartSAN registration," + " 1 - Register SmartSAN Management Attributes."); + +int ql2xrdpenable; +module_param(ql2xrdpenable, int, 0444); +module_param_named(rdpenable, ql2xrdpenable, int, 0444); +MODULE_PARM_DESC(ql2xrdpenable, + "Enables RDP responses. " + "0 - no RDP responses (default). " + "1 - provide RDP responses."); +int ql2xabts_wait_nvme = 1; +module_param(ql2xabts_wait_nvme, int, 0444); +MODULE_PARM_DESC(ql2xabts_wait_nvme, + "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)"); + + +static u32 ql2xdelay_before_pci_error_handling = 5; +module_param(ql2xdelay_before_pci_error_handling, uint, 0644); +MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling, + "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n"); + +static void qla2x00_clear_drv_active(struct qla_hw_data *); +static void qla2x00_free_device(scsi_qla_host_t *); +static void qla2xxx_map_queues(struct Scsi_Host *shost); +static void qla2x00_destroy_deferred_work(struct qla_hw_data *); + +u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES; +module_param(ql2xnvme_queues, uint, S_IRUGO); +MODULE_PARM_DESC(ql2xnvme_queues, + "Number of NVMe Queues that can be configured.\n" + "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n" + "1 - Minimum number of queues supported\n" + "8 - Default value"); + +int ql2xfc2target = 1; +module_param(ql2xfc2target, int, 0444); +MODULE_PARM_DESC(qla2xfc2target, + "Enables FC2 Target support. " + "0 - FC2 Target support is disabled. " + "1 - FC2 Target support is enabled (default)."); + +static struct scsi_transport_template *qla2xxx_transport_template = NULL; +struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; + +/* TODO Convert to inlines + * + * Timer routines + */ + +__inline__ void +qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) +{ + timer_setup(&vha->timer, qla2x00_timer, 0); + vha->timer.expires = jiffies + interval * HZ; + add_timer(&vha->timer); + vha->timer_active = 1; +} + +static inline void +qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) +{ + /* Currently used for 82XX only. */ + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_dbg(ql_dbg_timer, vha, 0x600d, + "Device in a failed state, returning.\n"); + return; + } + + mod_timer(&vha->timer, jiffies + interval * HZ); +} + +static __inline__ void +qla2x00_stop_timer(scsi_qla_host_t *vha) +{ + del_timer_sync(&vha->timer); + vha->timer_active = 0; +} + +static int qla2x00_do_dpc(void *data); + +static void qla2x00_rst_aen(scsi_qla_host_t *); + +static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, + struct req_que **, struct rsp_que **); +static void qla2x00_free_fw_dump(struct qla_hw_data *); +static void qla2x00_mem_free(struct qla_hw_data *); +int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, + struct qla_qpair *qpair); + +/* -------------------------------------------------------------------------- */ +static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, + struct rsp_que *rsp) +{ + struct qla_hw_data *ha = vha->hw; + + rsp->qpair = ha->base_qpair; + rsp->req = req; + ha->base_qpair->hw = ha; + ha->base_qpair->req = req; + ha->base_qpair->rsp = rsp; + ha->base_qpair->vha = vha; + ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; + ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; + ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; + ha->base_qpair->srb_mempool = ha->srb_mempool; + INIT_LIST_HEAD(&ha->base_qpair->hints_list); + INIT_LIST_HEAD(&ha->base_qpair->dsd_list); + ha->base_qpair->enable_class_2 = ql2xenableclass2; + /* init qpair to this cpu. Will adjust at run time. */ + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); + ha->base_qpair->pdev = ha->pdev; + + if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) + ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; +} + +static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, + struct rsp_que *rsp) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *), + GFP_KERNEL); + if (!ha->req_q_map) { + ql_log(ql_log_fatal, vha, 0x003b, + "Unable to allocate memory for request queue ptrs.\n"); + goto fail_req_map; + } + + ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *), + GFP_KERNEL); + if (!ha->rsp_q_map) { + ql_log(ql_log_fatal, vha, 0x003c, + "Unable to allocate memory for response queue ptrs.\n"); + goto fail_rsp_map; + } + + ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); + if (ha->base_qpair == NULL) { + ql_log(ql_log_warn, vha, 0x00e0, + "Failed to allocate base queue pair memory.\n"); + goto fail_base_qpair; + } + + qla_init_base_qpair(vha, req, rsp); + + if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { + ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), + GFP_KERNEL); + if (!ha->queue_pair_map) { + ql_log(ql_log_fatal, vha, 0x0180, + "Unable to allocate memory for queue pair ptrs.\n"); + goto fail_qpair_map; + } + if (qla_mapq_alloc_qp_cpu_map(ha) != 0) { + kfree(ha->queue_pair_map); + ha->queue_pair_map = NULL; + goto fail_qpair_map; + } + } + + /* + * Make sure we record at least the request and response queue zero in + * case we need to free them if part of the probe fails. + */ + ha->rsp_q_map[0] = rsp; + ha->req_q_map[0] = req; + set_bit(0, ha->rsp_qid_map); + set_bit(0, ha->req_qid_map); + return 0; + +fail_qpair_map: + kfree(ha->base_qpair); + ha->base_qpair = NULL; +fail_base_qpair: + kfree(ha->rsp_q_map); + ha->rsp_q_map = NULL; +fail_rsp_map: + kfree(ha->req_q_map); + ha->req_q_map = NULL; +fail_req_map: + return -ENOMEM; +} + +static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) +{ + if (IS_QLAFX00(ha)) { + if (req && req->ring_fx00) + dma_free_coherent(&ha->pdev->dev, + (req->length_fx00 + 1) * sizeof(request_t), + req->ring_fx00, req->dma_fx00); + } else if (req && req->ring) + dma_free_coherent(&ha->pdev->dev, + (req->length + 1) * sizeof(request_t), + req->ring, req->dma); + + if (req) + kfree(req->outstanding_cmds); + + kfree(req); +} + +static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) +{ + if (IS_QLAFX00(ha)) { + if (rsp && rsp->ring_fx00) + dma_free_coherent(&ha->pdev->dev, + (rsp->length_fx00 + 1) * sizeof(request_t), + rsp->ring_fx00, rsp->dma_fx00); + } else if (rsp && rsp->ring) { + dma_free_coherent(&ha->pdev->dev, + (rsp->length + 1) * sizeof(response_t), + rsp->ring, rsp->dma); + } + kfree(rsp); +} + +static void qla2x00_free_queues(struct qla_hw_data *ha) +{ + struct req_que *req; + struct rsp_que *rsp; + int cnt; + unsigned long flags; + + if (ha->queue_pair_map) { + kfree(ha->queue_pair_map); + ha->queue_pair_map = NULL; + } + if (ha->base_qpair) { + kfree(ha->base_qpair); + ha->base_qpair = NULL; + } + + qla_mapq_free_qp_cpu_map(ha); + spin_lock_irqsave(&ha->hardware_lock, flags); + for (cnt = 0; cnt < ha->max_req_queues; cnt++) { + if (!test_bit(cnt, ha->req_qid_map)) + continue; + + req = ha->req_q_map[cnt]; + clear_bit(cnt, ha->req_qid_map); + ha->req_q_map[cnt] = NULL; + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2x00_free_req_que(ha, req); + spin_lock_irqsave(&ha->hardware_lock, flags); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + kfree(ha->req_q_map); + ha->req_q_map = NULL; + + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { + if (!test_bit(cnt, ha->rsp_qid_map)) + continue; + + rsp = ha->rsp_q_map[cnt]; + clear_bit(cnt, ha->rsp_qid_map); + ha->rsp_q_map[cnt] = NULL; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2x00_free_rsp_que(ha, rsp); + spin_lock_irqsave(&ha->hardware_lock, flags); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + kfree(ha->rsp_q_map); + ha->rsp_q_map = NULL; +} + +static char * +qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) +{ + struct qla_hw_data *ha = vha->hw; + static const char *const pci_bus_modes[] = { + "33", "66", "100", "133", + }; + uint16_t pci_bus; + + pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; + if (pci_bus) { + snprintf(str, str_len, "PCI-X (%s MHz)", + pci_bus_modes[pci_bus]); + } else { + pci_bus = (ha->pci_attr & BIT_8) >> 8; + snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); + } + + return str; +} + +static char * +qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) +{ + static const char *const pci_bus_modes[] = { + "33", "66", "100", "133", + }; + struct qla_hw_data *ha = vha->hw; + uint32_t pci_bus; + + if (pci_is_pcie(ha->pdev)) { + uint32_t lstat, lspeed, lwidth; + const char *speed_str; + + pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); + lspeed = lstat & PCI_EXP_LNKCAP_SLS; + lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; + + switch (lspeed) { + case 1: + speed_str = "2.5GT/s"; + break; + case 2: + speed_str = "5.0GT/s"; + break; + case 3: + speed_str = "8.0GT/s"; + break; + case 4: + speed_str = "16.0GT/s"; + break; + default: + speed_str = ""; + break; + } + snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); + + return str; + } + + pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; + if (pci_bus == 0 || pci_bus == 8) + snprintf(str, str_len, "PCI (%s MHz)", + pci_bus_modes[pci_bus >> 3]); + else + snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", + pci_bus & 4 ? 2 : 1, + pci_bus_modes[pci_bus & 3]); + + return str; +} + +static char * +qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) +{ + char un_str[10]; + struct qla_hw_data *ha = vha->hw; + + snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, + ha->fw_minor_version, ha->fw_subminor_version); + + if (ha->fw_attributes & BIT_9) { + strcat(str, "FLX"); + return (str); + } + + switch (ha->fw_attributes & 0xFF) { + case 0x7: + strcat(str, "EF"); + break; + case 0x17: + strcat(str, "TP"); + break; + case 0x37: + strcat(str, "IP"); + break; + case 0x77: + strcat(str, "VI"); + break; + default: + sprintf(un_str, "(%x)", ha->fw_attributes); + strcat(str, un_str); + break; + } + if (ha->fw_attributes & 0x100) + strcat(str, "X"); + + return (str); +} + +static char * +qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) +{ + struct qla_hw_data *ha = vha->hw; + + snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, + ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); + return str; +} + +void qla2x00_sp_free_dma(srb_t *sp) +{ + struct qla_hw_data *ha = sp->vha->hw; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + + if (sp->flags & SRB_DMA_VALID) { + scsi_dma_unmap(cmd); + sp->flags &= ~SRB_DMA_VALID; + } + + if (sp->flags & SRB_CRC_PROT_DMA_VALID) { + dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), + scsi_prot_sg_count(cmd), cmd->sc_data_direction); + sp->flags &= ~SRB_CRC_PROT_DMA_VALID; + } + + if (sp->flags & SRB_CRC_CTX_DSD_VALID) { + /* List assured to be having elements */ + qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); + sp->flags &= ~SRB_CRC_CTX_DSD_VALID; + } + + if (sp->flags & SRB_CRC_CTX_DMA_VALID) { + struct crc_context *ctx0 = sp->u.scmd.crc_ctx; + + dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); + sp->flags &= ~SRB_CRC_CTX_DMA_VALID; + } + + if (sp->flags & SRB_FCP_CMND_DMA_VALID) { + struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; + + dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, + ctx1->fcp_cmnd_dma); + list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list); + sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt; + sp->qpair->dsd_avail += ctx1->dsd_use_cnt; + } + + if (sp->flags & SRB_GOT_BUF) + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); +} + +void qla2x00_sp_compl(srb_t *sp, int res) +{ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct completion *comp = sp->comp; + + /* kref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + cmd->result = res; + sp->type = 0; + scsi_done(cmd); + if (comp) + complete(comp); +} + +void qla2xxx_qpair_sp_free_dma(srb_t *sp) +{ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct qla_hw_data *ha = sp->fcport->vha->hw; + + if (sp->flags & SRB_DMA_VALID) { + scsi_dma_unmap(cmd); + sp->flags &= ~SRB_DMA_VALID; + } + + if (sp->flags & SRB_CRC_PROT_DMA_VALID) { + dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), + scsi_prot_sg_count(cmd), cmd->sc_data_direction); + sp->flags &= ~SRB_CRC_PROT_DMA_VALID; + } + + if (sp->flags & SRB_CRC_CTX_DSD_VALID) { + /* List assured to be having elements */ + qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); + sp->flags &= ~SRB_CRC_CTX_DSD_VALID; + } + + if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { + struct crc_context *difctx = sp->u.scmd.crc_ctx; + struct dsd_dma *dif_dsd, *nxt_dsd; + + list_for_each_entry_safe(dif_dsd, nxt_dsd, + &difctx->ldif_dma_hndl_list, list) { + list_del(&dif_dsd->list); + dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, + dif_dsd->dsd_list_dma); + kfree(dif_dsd); + difctx->no_dif_bundl--; + } + + list_for_each_entry_safe(dif_dsd, nxt_dsd, + &difctx->ldif_dsd_list, list) { + list_del(&dif_dsd->list); + dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, + dif_dsd->dsd_list_dma); + kfree(dif_dsd); + difctx->no_ldif_dsd--; + } + + if (difctx->no_ldif_dsd) { + ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, + "%s: difctx->no_ldif_dsd=%x\n", + __func__, difctx->no_ldif_dsd); + } + + if (difctx->no_dif_bundl) { + ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, + "%s: difctx->no_dif_bundl=%x\n", + __func__, difctx->no_dif_bundl); + } + sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; + } + + if (sp->flags & SRB_FCP_CMND_DMA_VALID) { + struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; + + dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, + ctx1->fcp_cmnd_dma); + list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list); + sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt; + sp->qpair->dsd_avail += ctx1->dsd_use_cnt; + sp->flags &= ~SRB_FCP_CMND_DMA_VALID; + } + + if (sp->flags & SRB_CRC_CTX_DMA_VALID) { + struct crc_context *ctx0 = sp->u.scmd.crc_ctx; + + dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); + sp->flags &= ~SRB_CRC_CTX_DMA_VALID; + } + + if (sp->flags & SRB_GOT_BUF) + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); +} + +void qla2xxx_qpair_sp_compl(srb_t *sp, int res) +{ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct completion *comp = sp->comp; + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + cmd->result = res; + sp->type = 0; + scsi_done(cmd); + if (comp) + complete(comp); +} + +static int +qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; + struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + srb_t *sp; + int rval; + + if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || + WARN_ON_ONCE(!rport)) { + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + + if (ha->mqenable) { + uint32_t tag; + uint16_t hwq; + struct qla_qpair *qpair = NULL; + + tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); + hwq = blk_mq_unique_tag_to_hwq(tag); + qpair = ha->queue_pair_map[hwq]; + + if (qpair) + return qla2xxx_mqueuecommand(host, cmd, qpair); + } + + if (ha->flags.eeh_busy) { + if (ha->flags.pci_channel_io_perm_failure) { + ql_dbg(ql_dbg_aer, vha, 0x9010, + "PCI Channel IO permanent failure, exiting " + "cmd=%p.\n", cmd); + cmd->result = DID_NO_CONNECT << 16; + } else { + ql_dbg(ql_dbg_aer, vha, 0x9011, + "EEH_Busy, Requeuing the cmd=%p.\n", cmd); + cmd->result = DID_REQUEUE << 16; + } + goto qc24_fail_command; + } + + rval = fc_remote_port_chkready(rport); + if (rval) { + cmd->result = rval; + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, + "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", + cmd, rval); + goto qc24_fail_command; + } + + if (!vha->flags.difdix_supported && + scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + ql_dbg(ql_dbg_io, vha, 0x3004, + "DIF Cap not reg, fail DIF capable cmd's:%p.\n", + cmd); + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + + if (!fcport || fcport->deleted) { + cmd->result = DID_IMM_RETRY << 16; + goto qc24_fail_command; + } + + if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { + if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || + atomic_read(&base_vha->loop_state) == LOOP_DEAD) { + ql_dbg(ql_dbg_io, vha, 0x3005, + "Returning DNC, fcport_state=%d loop_state=%d.\n", + atomic_read(&fcport->state), + atomic_read(&base_vha->loop_state)); + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + goto qc24_target_busy; + } + + /* + * Return target busy if we've received a non-zero retry_delay_timer + * in a FCP_RSP. + */ + if (fcport->retry_delay_timestamp == 0) { + /* retry delay not set */ + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) + fcport->retry_delay_timestamp = 0; + else + goto qc24_target_busy; + + sp = scsi_cmd_priv(cmd); + /* ref: INIT */ + qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); + + sp->u.scmd.cmd = cmd; + sp->type = SRB_SCSI_CMD; + sp->free = qla2x00_sp_free_dma; + sp->done = qla2x00_sp_compl; + + rval = ha->isp_ops->start_scsi(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, + "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); + goto qc24_host_busy_free_sp; + } + + return 0; + +qc24_host_busy_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + +qc24_target_busy: + return SCSI_MLQUEUE_TARGET_BUSY; + +qc24_fail_command: + scsi_done(cmd); + + return 0; +} + +/* For MQ supported I/O */ +int +qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, + struct qla_qpair *qpair) +{ + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; + struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + srb_t *sp; + int rval; + + rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16); + if (rval) { + cmd->result = rval; + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, + "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", + cmd, rval); + goto qc24_fail_command; + } + + if (!qpair->online) { + ql_dbg(ql_dbg_io, vha, 0x3077, + "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy); + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + + if (!fcport || fcport->deleted) { + cmd->result = DID_IMM_RETRY << 16; + goto qc24_fail_command; + } + + if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { + if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || + atomic_read(&base_vha->loop_state) == LOOP_DEAD) { + ql_dbg(ql_dbg_io, vha, 0x3077, + "Returning DNC, fcport_state=%d loop_state=%d.\n", + atomic_read(&fcport->state), + atomic_read(&base_vha->loop_state)); + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + goto qc24_target_busy; + } + + /* + * Return target busy if we've received a non-zero retry_delay_timer + * in a FCP_RSP. + */ + if (fcport->retry_delay_timestamp == 0) { + /* retry delay not set */ + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) + fcport->retry_delay_timestamp = 0; + else + goto qc24_target_busy; + + sp = scsi_cmd_priv(cmd); + /* ref: INIT */ + qla2xxx_init_sp(sp, vha, qpair, fcport); + + sp->u.scmd.cmd = cmd; + sp->type = SRB_SCSI_CMD; + sp->free = qla2xxx_qpair_sp_free_dma; + sp->done = qla2xxx_qpair_sp_compl; + + rval = ha->isp_ops->start_scsi_mq(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, + "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); + goto qc24_host_busy_free_sp; + } + + return 0; + +qc24_host_busy_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + +qc24_target_busy: + return SCSI_MLQUEUE_TARGET_BUSY; + +qc24_fail_command: + scsi_done(cmd); + + return 0; +} + +/* + * qla2x00_wait_for_hba_online + * Wait till the HBA is online after going through + * <= MAX_RETRIES_OF_ISP_ABORT or + * finally HBA is disabled ie marked offline + * + * Input: + * ha - pointer to host adapter structure + * + * Note: + * Does context switching-Release SPIN_LOCK + * (if any) before calling this routine. + * + * Return: + * Success (Adapter is online) : 0 + * Failed (Adapter is offline/disabled) : 1 + */ +int +qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) +{ + int return_status; + unsigned long wait_online; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); + while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || + test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || + ha->dpc_active) && time_before(jiffies, wait_online)) { + + msleep(1000); + } + if (base_vha->flags.online) + return_status = QLA_SUCCESS; + else + return_status = QLA_FUNCTION_FAILED; + + return (return_status); +} + +static inline int test_fcport_count(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + int res; + /* Return 0 = sleep, x=wake */ + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ql_dbg(ql_dbg_init, vha, 0x00ec, + "tgt %p, fcport_count=%d\n", + vha, vha->fcport_count); + res = (vha->fcport_count == 0); + if (res) { + struct fc_port *fcport; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->deleted != QLA_SESS_DELETED) { + /* session(s) may not be fully logged in + * (ie fcport_count=0), but session + * deletion thread(s) may be inflight. + */ + + res = 0; + break; + } + } + } + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return res; +} + +/* + * qla2x00_wait_for_sess_deletion can only be called from remove_one. + * it has dependency on UNLOADING flag to stop device discovery + */ +void +qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) +{ + u8 i; + + qla2x00_mark_all_devices_lost(vha); + + for (i = 0; i < 10; i++) { + if (wait_event_timeout(vha->fcport_waitQ, + test_fcport_count(vha), HZ) > 0) + break; + } + + flush_workqueue(vha->hw->wq); +} + +/* + * qla2x00_wait_for_hba_ready + * Wait till the HBA is ready before doing driver unload + * + * Input: + * ha - pointer to host adapter structure + * + * Note: + * Does context switching-Release SPIN_LOCK + * (if any) before calling this routine. + * + */ +static void +qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + while ((qla2x00_reset_active(vha) || ha->dpc_active || + ha->flags.mbox_busy) || + test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || + test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + break; + msleep(1000); + } +} + +int +qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) +{ + int return_status; + unsigned long wait_reset; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); + while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || + test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || + ha->dpc_active) && time_before(jiffies, wait_reset)) { + + msleep(1000); + + if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && + ha->flags.chip_reset_done) + break; + } + if (ha->flags.chip_reset_done) + return_status = QLA_SUCCESS; + else + return_status = QLA_FUNCTION_FAILED; + + return return_status; +} + +/************************************************************************** +* qla2xxx_eh_abort +* +* Description: +* The abort function will abort the specified command. +* +* Input: +* cmd = Linux SCSI command packet to be aborted. +* +* Returns: +* Either SUCCESS or FAILED. +* +* Note: +* Only return FAILED if command not returned by firmware. +**************************************************************************/ +static int +qla2xxx_eh_abort(struct scsi_cmnd *cmd) +{ + scsi_qla_host_t *vha = shost_priv(cmd->device->host); + DECLARE_COMPLETION_ONSTACK(comp); + srb_t *sp; + int ret; + unsigned int id; + uint64_t lun; + int rval; + struct qla_hw_data *ha = vha->hw; + uint32_t ratov_j; + struct qla_qpair *qpair; + unsigned long flags; + int fast_fail_status = SUCCESS; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8042, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + + /* Save any FAST_IO_FAIL value to return later if abort succeeds */ + ret = fc_block_scsi_eh(cmd); + if (ret != 0) + fast_fail_status = ret; + + sp = scsi_cmd_priv(cmd); + qpair = sp->qpair; + + vha->cmd_timeout_cnt++; + + if ((sp->fcport && sp->fcport->deleted) || !qpair) + return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + sp->comp = ∁ + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + + id = cmd->device->id; + lun = cmd->device->lun; + + ql_dbg(ql_dbg_taskm, vha, 0x8002, + "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", + vha->host_no, id, lun, sp, cmd, sp->handle); + + /* + * Abort will release the original Command/sp from FW. Let the + * original command call scsi_done. In return, he will wakeup + * this sleeping thread. + */ + rval = ha->isp_ops->abort_command(sp); + + ql_dbg(ql_dbg_taskm, vha, 0x8003, + "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); + + /* Wait for the command completion. */ + ratov_j = ha->r_a_tov/10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); + switch (rval) { + case QLA_SUCCESS: + if (!wait_for_completion_timeout(&comp, ratov_j)) { + ql_dbg(ql_dbg_taskm, vha, 0xffff, + "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", + __func__, ha->r_a_tov/10); + ret = FAILED; + } else { + ret = fast_fail_status; + } + break; + default: + ret = FAILED; + break; + } + + sp->comp = NULL; + + ql_log(ql_log_info, vha, 0x801c, + "Abort command issued nexus=%ld:%d:%llu -- %x.\n", + vha->host_no, id, lun, ret); + + return ret; +} + +#define ABORT_POLLING_PERIOD 1000 +#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) + +/* + * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. + */ +static int +__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t, + uint64_t l, enum nexus_wait_type type) +{ + int cnt, match, status; + unsigned long flags; + scsi_qla_host_t *vha = qpair->vha; + struct req_que *req = qpair->req; + srb_t *sp; + struct scsi_cmnd *cmd; + unsigned long wait_iter = ABORT_WAIT_ITER; + bool found; + struct qla_hw_data *ha = vha->hw; + + status = QLA_SUCCESS; + + while (wait_iter--) { + found = false; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (!sp) + continue; + if (sp->type != SRB_SCSI_CMD) + continue; + if (vha->vp_idx != sp->vha->vp_idx) + continue; + match = 0; + cmd = GET_CMD_SP(sp); + switch (type) { + case WAIT_HOST: + match = 1; + break; + case WAIT_TARGET: + if (sp->fcport) + match = sp->fcport->d_id.b24 == t; + else + match = 0; + break; + case WAIT_LUN: + if (sp->fcport) + match = (sp->fcport->d_id.b24 == t && + cmd->device->lun == l); + else + match = 0; + break; + } + if (!match) + continue; + + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (unlikely(pci_channel_offline(ha->pdev)) || + ha->flags.eeh_busy) { + ql_dbg(ql_dbg_taskm, vha, 0x8005, + "Return:eh_wait.\n"); + return status; + } + + /* + * SRB_SCSI_CMD is still in the outstanding_cmds array. + * it means scsi_done has not called. Wait for it to + * clear from outstanding_cmds. + */ + msleep(ABORT_POLLING_PERIOD); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + found = true; + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (!found) + break; + } + + if (wait_iter == -1) + status = QLA_FUNCTION_FAILED; + + return status; +} + +int +qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, + uint64_t l, enum nexus_wait_type type) +{ + struct qla_qpair *qpair; + struct qla_hw_data *ha = vha->hw; + int i, status = QLA_SUCCESS; + + status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l, + type); + for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) { + qpair = ha->queue_pair_map[i]; + if (!qpair) + continue; + status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l, + type); + } + return status; +} + +static char *reset_errors[] = { + "HBA not online", + "HBA not ready", + "Task management failed", + "Waiting for command completions", +}; + +static int +qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + scsi_qla_host_t *vha = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + fc_port_t *fcport = (struct fc_port *) sdev->hostdata; + struct qla_hw_data *ha = vha->hw; + int err; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803e, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + + if (!fcport) { + return FAILED; + } + + err = fc_block_rport(rport); + if (err != 0) + return err; + + if (fcport->deleted) + return FAILED; + + ql_log(ql_log_info, vha, 0x8009, + "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no, + sdev->id, sdev->lun, cmd); + + err = 0; + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800a, + "Wait for hba online failed for cmd=%p.\n", cmd); + goto eh_reset_failed; + } + err = 2; + if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1) + != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800c, + "do_reset failed for cmd=%p.\n", cmd); + goto eh_reset_failed; + } + err = 3; + if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, + cmd->device->lun, + WAIT_LUN) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800d, + "wait for pending cmds failed for cmd=%p.\n", cmd); + goto eh_reset_failed; + } + + ql_log(ql_log_info, vha, 0x800e, + "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", + vha->host_no, sdev->id, sdev->lun, cmd); + + return SUCCESS; + +eh_reset_failed: + ql_log(ql_log_info, vha, 0x800f, + "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", + reset_errors[err], vha->host_no, sdev->id, sdev->lun, + cmd); + vha->reset_cmd_err_cnt++; + return FAILED; +} + +static int +qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport)); + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + int err; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + + if (!fcport) { + return FAILED; + } + + err = fc_block_rport(rport); + if (err != 0) + return err; + + if (fcport->deleted) + return FAILED; + + ql_log(ql_log_info, vha, 0x8009, + "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no, + sdev->id, cmd); + + err = 0; + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800a, + "Wait for hba online failed for cmd=%p.\n", cmd); + goto eh_reset_failed; + } + err = 2; + if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800c, + "target_reset failed for cmd=%p.\n", cmd); + goto eh_reset_failed; + } + err = 3; + if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0, + WAIT_TARGET) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800d, + "wait for pending cmds failed for cmd=%p.\n", cmd); + goto eh_reset_failed; + } + + ql_log(ql_log_info, vha, 0x800e, + "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n", + vha->host_no, sdev->id, cmd); + + return SUCCESS; + +eh_reset_failed: + ql_log(ql_log_info, vha, 0x800f, + "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", + reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, + cmd); + vha->reset_cmd_err_cnt++; + return FAILED; +} + +/************************************************************************** +* qla2xxx_eh_bus_reset +* +* Description: +* The bus reset function will reset the bus and abort any executing +* commands. +* +* Input: +* cmd = Linux SCSI command packet of the command that cause the +* bus reset. +* +* Returns: +* SUCCESS/FAILURE (defined as macro in scsi.h). +* +**************************************************************************/ +static int +qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) +{ + scsi_qla_host_t *vha = shost_priv(cmd->device->host); + int ret = FAILED; + unsigned int id; + uint64_t lun; + struct qla_hw_data *ha = vha->hw; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8040, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + + id = cmd->device->id; + lun = cmd->device->lun; + + if (qla2x00_chip_is_down(vha)) + return ret; + + ql_log(ql_log_info, vha, 0x8012, + "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); + + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0x8013, + "Wait for hba online failed board disabled.\n"); + goto eh_bus_reset_done; + } + + if (qla2x00_loop_reset(vha) == QLA_SUCCESS) + ret = SUCCESS; + + if (ret == FAILED) + goto eh_bus_reset_done; + + /* Flush outstanding commands. */ + if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != + QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8014, + "Wait for pending commands failed.\n"); + ret = FAILED; + } + +eh_bus_reset_done: + ql_log(ql_log_warn, vha, 0x802b, + "BUS RESET %s nexus=%ld:%d:%llu.\n", + (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); + + return ret; +} + +/************************************************************************** +* qla2xxx_eh_host_reset +* +* Description: +* The reset function will reset the Adapter. +* +* Input: +* cmd = Linux SCSI command packet of the command that cause the +* adapter reset. +* +* Returns: +* Either SUCCESS or FAILED. +* +* Note: +**************************************************************************/ +static int +qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) +{ + scsi_qla_host_t *vha = shost_priv(cmd->device->host); + struct qla_hw_data *ha = vha->hw; + int ret = FAILED; + unsigned int id; + uint64_t lun; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8041, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return SUCCESS; + } + + id = cmd->device->id; + lun = cmd->device->lun; + + ql_log(ql_log_info, vha, 0x8018, + "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); + + /* + * No point in issuing another reset if one is active. Also do not + * attempt a reset if we are updating flash. + */ + if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) + goto eh_host_reset_lock; + + if (vha != base_vha) { + if (qla2x00_vp_abort_isp(vha)) + goto eh_host_reset_lock; + } else { + if (IS_P3P_TYPE(vha->hw)) { + if (!qla82xx_fcoe_ctx_reset(vha)) { + /* Ctx reset success */ + ret = SUCCESS; + goto eh_host_reset_lock; + } + /* fall thru if ctx reset failed */ + } + if (ha->wq) + flush_workqueue(ha->wq); + + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + if (ha->isp_ops->abort_isp(base_vha)) { + clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + /* failed. schedule dpc to try */ + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); + + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x802a, + "wait for hba online failed.\n"); + goto eh_host_reset_lock; + } + } + clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + } + + /* Waiting for command to be returned to OS.*/ + if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == + QLA_SUCCESS) + ret = SUCCESS; + +eh_host_reset_lock: + ql_log(ql_log_info, vha, 0x8017, + "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", + (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); + + return ret; +} + +/* +* qla2x00_loop_reset +* Issue loop reset. +* +* Input: +* ha = adapter block pointer. +* +* Returns: +* 0 = success +*/ +int +qla2x00_loop_reset(scsi_qla_host_t *vha) +{ + int ret; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLAFX00(ha)) + return QLA_SUCCESS; + + if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + qla2x00_mark_all_devices_lost(vha); + ret = qla2x00_full_login_lip(vha); + if (ret != QLA_SUCCESS) { + ql_dbg(ql_dbg_taskm, vha, 0x802d, + "full_login_lip=%d.\n", ret); + } + } + + if (ha->flags.enable_lip_reset) { + ret = qla2x00_lip_reset(vha); + if (ret != QLA_SUCCESS) + ql_dbg(ql_dbg_taskm, vha, 0x802e, + "lip_reset failed (%d).\n", ret); + } + + /* Issue marker command only when we are going to start the I/O */ + vha->marker_needed = 1; + + return QLA_SUCCESS; +} + +/* + * The caller must ensure that no completion interrupts will happen + * while this function is in progress. + */ +static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, + unsigned long *flags) + __releases(qp->qp_lock_ptr) + __acquires(qp->qp_lock_ptr) +{ + DECLARE_COMPLETION_ONSTACK(comp); + scsi_qla_host_t *vha = qp->vha; + struct qla_hw_data *ha = vha->hw; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + int rval; + bool ret_cmd; + uint32_t ratov_j; + + lockdep_assert_held(qp->qp_lock_ptr); + + if (qla2x00_chip_is_down(vha)) { + sp->done(sp, res); + return; + } + + if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || + (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && + !qla2x00_isp_reg_stat(ha))) { + if (sp->comp) { + sp->done(sp, res); + return; + } + + sp->comp = ∁ + spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); + + rval = ha->isp_ops->abort_command(sp); + /* Wait for command completion. */ + ret_cmd = false; + ratov_j = ha->r_a_tov/10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); + switch (rval) { + case QLA_SUCCESS: + if (wait_for_completion_timeout(&comp, ratov_j)) { + ql_dbg(ql_dbg_taskm, vha, 0xffff, + "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", + __func__, ha->r_a_tov/10); + ret_cmd = true; + } + /* else FW return SP to driver */ + break; + default: + ret_cmd = true; + break; + } + + spin_lock_irqsave(qp->qp_lock_ptr, *flags); + switch (sp->type) { + case SRB_SCSI_CMD: + if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) + sp->done(sp, res); + break; + default: + if (ret_cmd) + sp->done(sp, res); + break; + } + } else { + sp->done(sp, res); + } +} + +/* + * The caller must ensure that no completion interrupts will happen + * while this function is in progress. + */ +static void +__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) +{ + int cnt; + unsigned long flags; + srb_t *sp; + scsi_qla_host_t *vha = qp->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_tgt_cmd *cmd; + + if (!ha->req_q_map) + return; + spin_lock_irqsave(qp->qp_lock_ptr, flags); + req = qp->req; + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (sp) { + /* + * perform lockless completion during driver unload + */ + if (qla2x00_chip_is_down(vha)) { + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + sp->done(sp, res); + spin_lock_irqsave(qp->qp_lock_ptr, flags); + continue; + } + + switch (sp->cmd_type) { + case TYPE_SRB: + qla2x00_abort_srb(qp, sp, res, &flags); + break; + case TYPE_TGT_CMD: + if (!vha->hw->tgt.tgt_ops || !tgt || + qla_ini_mode_enabled(vha)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, + "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", + vha->dpc_flags); + continue; + } + cmd = (struct qla_tgt_cmd *)sp; + cmd->aborted = 1; + break; + case TYPE_TGT_TMCMD: + /* Skip task management functions. */ + break; + default: + break; + } + req->outstanding_cmds[cnt] = NULL; + } + } + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); +} + +/* + * The caller must ensure that no completion interrupts will happen + * while this function is in progress. + */ +void +qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) +{ + int que; + struct qla_hw_data *ha = vha->hw; + + /* Continue only if initialization complete. */ + if (!ha->base_qpair) + return; + __qla2x00_abort_all_cmds(ha->base_qpair, res); + + if (!ha->queue_pair_map) + return; + for (que = 0; que < ha->max_qpairs; que++) { + if (!ha->queue_pair_map[que]) + continue; + + __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res); + } +} + +static int +qla2xxx_slave_alloc(struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + sdev->hostdata = *(fc_port_t **)rport->dd_data; + + return 0; +} + +static int +qla2xxx_slave_configure(struct scsi_device *sdev) +{ + scsi_qla_host_t *vha = shost_priv(sdev->host); + struct req_que *req = vha->req; + + if (IS_T10_PI_CAPABLE(vha->hw)) + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + + scsi_change_queue_depth(sdev, req->max_q_depth); + return 0; +} + +static void +qla2xxx_slave_destroy(struct scsi_device *sdev) +{ + sdev->hostdata = NULL; +} + +/** + * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. + * @ha: HA context + * + * At exit, the @ha's flags.enable_64bit_addressing set to indicated + * supported addressing method. + */ +static void +qla2x00_config_dma_addressing(struct qla_hw_data *ha) +{ + /* Assume a 32bit DMA mask. */ + ha->flags.enable_64bit_addressing = 0; + + if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { + /* Any upper-dword bits set? */ + if (MSD(dma_get_required_mask(&ha->pdev->dev)) && + !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { + /* Ok, a 64bit DMA mask is applicable. */ + ha->flags.enable_64bit_addressing = 1; + ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; + ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; + return; + } + } + + dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); +} + +static void +qla2x00_enable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 1; + /* enable risc and host interrupts */ + wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC); + rd_reg_word(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +} + +static void +qla2x00_disable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 0; + /* disable risc and host interrupts */ + wrt_reg_word(®->ictrl, 0); + rd_reg_word(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void +qla24xx_enable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 1; + wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT); + rd_reg_dword(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void +qla24xx_disable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + if (IS_NOPOLLING_TYPE(ha)) + return; + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 0; + wrt_reg_dword(®->ictrl, 0); + rd_reg_dword(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static int +qla2x00_iospace_config(struct qla_hw_data *ha) +{ + resource_size_t pio; + uint16_t msix; + + if (pci_request_selected_regions(ha->pdev, ha->bars, + QLA2XXX_DRIVER_NAME)) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, + "Failed to reserve PIO/MMIO regions (%s), aborting.\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + if (!(ha->bars & 1)) + goto skip_pio; + + /* We only need PIO for Flash operations on ISP2312 v2 chips. */ + pio = pci_resource_start(ha->pdev, 0); + if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { + if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { + ql_log_pci(ql_log_warn, ha->pdev, 0x0012, + "Invalid pci I/O region size (%s).\n", + pci_name(ha->pdev)); + pio = 0; + } + } else { + ql_log_pci(ql_log_warn, ha->pdev, 0x0013, + "Region #0 no a PIO resource (%s).\n", + pci_name(ha->pdev)); + pio = 0; + } + ha->pio_address = pio; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, + "PIO address=%llu.\n", + (unsigned long long)ha->pio_address); + +skip_pio: + /* Use MMIO operations for all accesses. */ + if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, + "Region #1 not an MMIO resource (%s), aborting.\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, + "Invalid PCI mem region size (%s), aborting.\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); + if (!ha->iobase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, + "Cannot remap MMIO (%s), aborting.\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + /* Determine queue resources */ + ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = QLA_BASE_VECTORS; + + /* Check if FW supports MQ or not */ + if (!(ha->fw_attributes & BIT_6)) + goto mqiobase_exit; + + if (!ql2xmqsupport || !ql2xnvmeenable || + (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) + goto mqiobase_exit; + + ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), + pci_resource_len(ha->pdev, 3)); + if (ha->mqiobase) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, + "MQIO Base=%p.\n", ha->mqiobase); + /* Read MSIX vector size of the board */ + pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); + ha->msix_count = msix + 1; + /* Max queues are bounded by available msix vectors */ + /* MB interrupt uses 1 vector */ + ha->max_req_queues = ha->msix_count - 1; + ha->max_rsp_queues = ha->max_req_queues; + /* Queue pairs is the max value minus the base queue pair */ + ha->max_qpairs = ha->max_rsp_queues - 1; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, + "Max no of queues pairs: %d.\n", ha->max_qpairs); + + ql_log_pci(ql_log_info, ha->pdev, 0x001a, + "MSI-X vector count: %d.\n", ha->msix_count); + } else + ql_log_pci(ql_log_info, ha->pdev, 0x001b, + "BAR 3 not enabled.\n"); + +mqiobase_exit: + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, + "MSIX Count: %d.\n", ha->msix_count); + return (0); + +iospace_error_exit: + return (-ENOMEM); +} + + +static int +qla83xx_iospace_config(struct qla_hw_data *ha) +{ + uint16_t msix; + + if (pci_request_selected_regions(ha->pdev, ha->bars, + QLA2XXX_DRIVER_NAME)) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, + "Failed to reserve PIO/MMIO regions (%s), aborting.\n", + pci_name(ha->pdev)); + + goto iospace_error_exit; + } + + /* Use MMIO operations for all accesses. */ + if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { + ql_log_pci(ql_log_warn, ha->pdev, 0x0118, + "Invalid pci I/O region size (%s).\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { + ql_log_pci(ql_log_warn, ha->pdev, 0x0119, + "Invalid PCI mem region size (%s), aborting\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); + if (!ha->iobase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, + "Cannot remap MMIO (%s), aborting.\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ + /* 83XX 26XX always use MQ type access for queues + * - mbar 2, a.k.a region 4 */ + ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = QLA_BASE_VECTORS; + ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), + pci_resource_len(ha->pdev, 4)); + + if (!ha->mqiobase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, + "BAR2/region4 not enabled\n"); + goto mqiobase_exit; + } + + ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), + pci_resource_len(ha->pdev, 2)); + if (ha->msixbase) { + /* Read MSIX vector size of the board */ + pci_read_config_word(ha->pdev, + QLA_83XX_PCI_MSIX_CONTROL, &msix); + ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; + /* + * By default, driver uses at least two msix vectors + * (default & rspq) + */ + if (ql2xmqsupport || ql2xnvmeenable) { + /* MB interrupt uses 1 vector */ + ha->max_req_queues = ha->msix_count - 1; + + /* ATIOQ needs 1 vector. That's 1 less QPair */ + if (QLA_TGT_MODE_ENABLED()) + ha->max_req_queues--; + + ha->max_rsp_queues = ha->max_req_queues; + + /* Queue pairs is the max value minus + * the base queue pair */ + ha->max_qpairs = ha->max_req_queues - 1; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, + "Max no of queues pairs: %d.\n", ha->max_qpairs); + } + ql_log_pci(ql_log_info, ha->pdev, 0x011c, + "MSI-X vector count: %d.\n", ha->msix_count); + } else + ql_log_pci(ql_log_info, ha->pdev, 0x011e, + "BAR 1 not enabled.\n"); + +mqiobase_exit: + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, + "MSIX Count: %d.\n", ha->msix_count); + return 0; + +iospace_error_exit: + return -ENOMEM; +} + +static struct isp_operations qla2100_isp_ops = { + .pci_config = qla2100_pci_config, + .reset_chip = qla2x00_reset_chip, + .chip_diag = qla2x00_chip_diag, + .config_rings = qla2x00_config_rings, + .reset_adapter = qla2x00_reset_adapter, + .nvram_config = qla2x00_nvram_config, + .update_fw_options = qla2x00_update_fw_options, + .load_risc = qla2x00_load_risc, + .pci_info_str = qla2x00_pci_info_str, + .fw_version_str = qla2x00_fw_version_str, + .intr_handler = qla2100_intr_handler, + .enable_intrs = qla2x00_enable_intrs, + .disable_intrs = qla2x00_disable_intrs, + .abort_command = qla2x00_abort_command, + .target_reset = qla2x00_abort_target, + .lun_reset = qla2x00_lun_reset, + .fabric_login = qla2x00_login_fabric, + .fabric_logout = qla2x00_fabric_logout, + .calc_req_entries = qla2x00_calc_iocbs_32, + .build_iocbs = qla2x00_build_scsi_iocbs_32, + .prep_ms_iocb = qla2x00_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, + .read_nvram = qla2x00_read_nvram_data, + .write_nvram = qla2x00_write_nvram_data, + .fw_dump = qla2100_fw_dump, + .beacon_on = NULL, + .beacon_off = NULL, + .beacon_blink = NULL, + .read_optrom = qla2x00_read_optrom_data, + .write_optrom = qla2x00_write_optrom_data, + .get_flash_version = qla2x00_get_flash_version, + .start_scsi = qla2x00_start_scsi, + .start_scsi_mq = NULL, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla2300_isp_ops = { + .pci_config = qla2300_pci_config, + .reset_chip = qla2x00_reset_chip, + .chip_diag = qla2x00_chip_diag, + .config_rings = qla2x00_config_rings, + .reset_adapter = qla2x00_reset_adapter, + .nvram_config = qla2x00_nvram_config, + .update_fw_options = qla2x00_update_fw_options, + .load_risc = qla2x00_load_risc, + .pci_info_str = qla2x00_pci_info_str, + .fw_version_str = qla2x00_fw_version_str, + .intr_handler = qla2300_intr_handler, + .enable_intrs = qla2x00_enable_intrs, + .disable_intrs = qla2x00_disable_intrs, + .abort_command = qla2x00_abort_command, + .target_reset = qla2x00_abort_target, + .lun_reset = qla2x00_lun_reset, + .fabric_login = qla2x00_login_fabric, + .fabric_logout = qla2x00_fabric_logout, + .calc_req_entries = qla2x00_calc_iocbs_32, + .build_iocbs = qla2x00_build_scsi_iocbs_32, + .prep_ms_iocb = qla2x00_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, + .read_nvram = qla2x00_read_nvram_data, + .write_nvram = qla2x00_write_nvram_data, + .fw_dump = qla2300_fw_dump, + .beacon_on = qla2x00_beacon_on, + .beacon_off = qla2x00_beacon_off, + .beacon_blink = qla2x00_beacon_blink, + .read_optrom = qla2x00_read_optrom_data, + .write_optrom = qla2x00_write_optrom_data, + .get_flash_version = qla2x00_get_flash_version, + .start_scsi = qla2x00_start_scsi, + .start_scsi_mq = NULL, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla24xx_isp_ops = { + .pci_config = qla24xx_pci_config, + .reset_chip = qla24xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla24xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla24xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla24xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla24xx_intr_handler, + .enable_intrs = qla24xx_enable_intrs, + .disable_intrs = qla24xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = qla24xx_read_nvram_data, + .write_nvram = qla24xx_write_nvram_data, + .fw_dump = qla24xx_fw_dump, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = qla24xx_beacon_blink, + .read_optrom = qla24xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qla24xx_start_scsi, + .start_scsi_mq = NULL, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla25xx_isp_ops = { + .pci_config = qla25xx_pci_config, + .reset_chip = qla24xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla24xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla24xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla24xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla24xx_intr_handler, + .enable_intrs = qla24xx_enable_intrs, + .disable_intrs = qla24xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = qla25xx_read_nvram_data, + .write_nvram = qla25xx_write_nvram_data, + .fw_dump = qla25xx_fw_dump, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = qla24xx_beacon_blink, + .read_optrom = qla25xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla81xx_isp_ops = { + .pci_config = qla25xx_pci_config, + .reset_chip = qla24xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla24xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla81xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla24xx_intr_handler, + .enable_intrs = qla24xx_enable_intrs, + .disable_intrs = qla24xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla81xx_fw_dump, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = qla83xx_beacon_blink, + .read_optrom = qla25xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla82xx_isp_ops = { + .pci_config = qla82xx_pci_config, + .reset_chip = qla82xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla82xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla82xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla82xx_intr_handler, + .enable_intrs = qla82xx_enable_intrs, + .disable_intrs = qla82xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = qla24xx_read_nvram_data, + .write_nvram = qla24xx_write_nvram_data, + .fw_dump = qla82xx_fw_dump, + .beacon_on = qla82xx_beacon_on, + .beacon_off = qla82xx_beacon_off, + .beacon_blink = NULL, + .read_optrom = qla82xx_read_optrom_data, + .write_optrom = qla82xx_write_optrom_data, + .get_flash_version = qla82xx_get_flash_version, + .start_scsi = qla82xx_start_scsi, + .start_scsi_mq = NULL, + .abort_isp = qla82xx_abort_isp, + .iospace_config = qla82xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla8044_isp_ops = { + .pci_config = qla82xx_pci_config, + .reset_chip = qla82xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla82xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla82xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla8044_intr_handler, + .enable_intrs = qla82xx_enable_intrs, + .disable_intrs = qla82xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla8044_fw_dump, + .beacon_on = qla82xx_beacon_on, + .beacon_off = qla82xx_beacon_off, + .beacon_blink = NULL, + .read_optrom = qla8044_read_optrom_data, + .write_optrom = qla8044_write_optrom_data, + .get_flash_version = qla82xx_get_flash_version, + .start_scsi = qla82xx_start_scsi, + .start_scsi_mq = NULL, + .abort_isp = qla8044_abort_isp, + .iospace_config = qla82xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla83xx_isp_ops = { + .pci_config = qla25xx_pci_config, + .reset_chip = qla24xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla24xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla81xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla24xx_intr_handler, + .enable_intrs = qla24xx_enable_intrs, + .disable_intrs = qla24xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla83xx_fw_dump, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = qla83xx_beacon_blink, + .read_optrom = qla25xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla83xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qlafx00_isp_ops = { + .pci_config = qlafx00_pci_config, + .reset_chip = qlafx00_soft_reset, + .chip_diag = qlafx00_chip_diag, + .config_rings = qlafx00_config_rings, + .reset_adapter = qlafx00_soft_reset, + .nvram_config = NULL, + .update_fw_options = NULL, + .load_risc = NULL, + .pci_info_str = qlafx00_pci_info_str, + .fw_version_str = qlafx00_fw_version_str, + .intr_handler = qlafx00_intr_handler, + .enable_intrs = qlafx00_enable_intrs, + .disable_intrs = qlafx00_disable_intrs, + .abort_command = qla24xx_async_abort_command, + .target_reset = qlafx00_abort_target, + .lun_reset = qlafx00_lun_reset, + .fabric_login = NULL, + .fabric_logout = NULL, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = qla24xx_read_nvram_data, + .write_nvram = qla24xx_write_nvram_data, + .fw_dump = NULL, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = NULL, + .read_optrom = qla24xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qlafx00_start_scsi, + .start_scsi_mq = NULL, + .abort_isp = qlafx00_abort_isp, + .iospace_config = qlafx00_iospace_config, + .initialize_adapter = qlafx00_initialize_adapter, +}; + +static struct isp_operations qla27xx_isp_ops = { + .pci_config = qla25xx_pci_config, + .reset_chip = qla24xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla24xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla81xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla24xx_intr_handler, + .enable_intrs = qla24xx_enable_intrs, + .disable_intrs = qla24xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla27xx_fwdump, + .mpi_fw_dump = qla27xx_mpi_fwdump, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = qla83xx_beacon_blink, + .read_optrom = qla25xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla83xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static inline void +qla2x00_set_isp_flags(struct qla_hw_data *ha) +{ + ha->device_type = DT_EXTENDED_IDS; + switch (ha->pdev->device) { + case PCI_DEVICE_ID_QLOGIC_ISP2100: + ha->isp_type |= DT_ISP2100; + ha->device_type &= ~DT_EXTENDED_IDS; + ha->fw_srisc_address = RISC_START_ADDRESS_2100; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2200: + ha->isp_type |= DT_ISP2200; + ha->device_type &= ~DT_EXTENDED_IDS; + ha->fw_srisc_address = RISC_START_ADDRESS_2100; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2300: + ha->isp_type |= DT_ISP2300; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->fw_srisc_address = RISC_START_ADDRESS_2300; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2312: + ha->isp_type |= DT_ISP2312; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->fw_srisc_address = RISC_START_ADDRESS_2300; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2322: + ha->isp_type |= DT_ISP2322; + ha->device_type |= DT_ZIO_SUPPORTED; + if (ha->pdev->subsystem_vendor == 0x1028 && + ha->pdev->subsystem_device == 0x0170) + ha->device_type |= DT_OEM_001; + ha->fw_srisc_address = RISC_START_ADDRESS_2300; + break; + case PCI_DEVICE_ID_QLOGIC_ISP6312: + ha->isp_type |= DT_ISP6312; + ha->fw_srisc_address = RISC_START_ADDRESS_2300; + break; + case PCI_DEVICE_ID_QLOGIC_ISP6322: + ha->isp_type |= DT_ISP6322; + ha->fw_srisc_address = RISC_START_ADDRESS_2300; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2422: + ha->isp_type |= DT_ISP2422; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2432: + ha->isp_type |= DT_ISP2432; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP8432: + ha->isp_type |= DT_ISP8432; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP5422: + ha->isp_type |= DT_ISP5422; + ha->device_type |= DT_FWI2; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP5432: + ha->isp_type |= DT_ISP5432; + ha->device_type |= DT_FWI2; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2532: + ha->isp_type |= DT_ISP2532; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP8001: + ha->isp_type |= DT_ISP8001; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP8021: + ha->isp_type |= DT_ISP8021; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + /* Initialize 82XX ISP flags */ + qla82xx_init_flags(ha); + break; + case PCI_DEVICE_ID_QLOGIC_ISP8044: + ha->isp_type |= DT_ISP8044; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + /* Initialize 82XX ISP flags */ + qla82xx_init_flags(ha); + break; + case PCI_DEVICE_ID_QLOGIC_ISP2031: + ha->isp_type |= DT_ISP2031; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP8031: + ha->isp_type |= DT_ISP8031; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISPF001: + ha->isp_type |= DT_ISPFX00; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2071: + ha->isp_type |= DT_ISP2071; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2271: + ha->isp_type |= DT_ISP2271; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2261: + ha->isp_type |= DT_ISP2261; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2081: + case PCI_DEVICE_ID_QLOGIC_ISP2089: + ha->isp_type |= DT_ISP2081; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2281: + case PCI_DEVICE_ID_QLOGIC_ISP2289: + ha->isp_type |= DT_ISP2281; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + } + + if (IS_QLA82XX(ha)) + ha->port_no = ha->portnum & 1; + else { + /* Get adapter physical port no from interrupt pin register. */ + pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || + IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->port_no--; + else + ha->port_no = !(ha->port_no & 1); + } + + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, + "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", + ha->device_type, ha->port_no, ha->fw_srisc_address); +} + +static void +qla2xxx_scan_start(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + if (vha->hw->flags.running_gold_fw) + return; + + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(RSCN_UPDATE, &vha->dpc_flags); + set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); +} + +static int +qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 1; + if (!vha->host) + return 1; + if (time > vha->hw->loop_reset_delay * HZ) + return 1; + + return atomic_read(&vha->loop_state) == LOOP_READY; +} + +static void qla_heartbeat_work_fn(struct work_struct *work) +{ + struct qla_hw_data *ha = container_of(work, + struct qla_hw_data, heartbeat_work); + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + if (!ha->flags.mbox_busy && base_vha->flags.init_done) + qla_no_op_mb(base_vha); +} + +static void qla2x00_iocb_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(work, + struct scsi_qla_host, iocb_work); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + int i = 2; + unsigned long flags; + + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return; + + while (!list_empty(&vha->work_list) && i > 0) { + qla2x00_do_work(vha); + i--; + } + + spin_lock_irqsave(&vha->work_lock, flags); + clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags); + spin_unlock_irqrestore(&vha->work_lock, flags); +} + +static void +qla_trace_init(void) +{ + qla_trc_array = trace_array_get_by_name("qla2xxx"); + if (!qla_trc_array) { + ql_log(ql_log_fatal, NULL, 0x0001, + "Unable to create qla2xxx trace instance, instance logging will be disabled.\n"); + return; + } + + QLA_TRACE_ENABLE(qla_trc_array); +} + +static void +qla_trace_uninit(void) +{ + if (!qla_trc_array) + return; + trace_array_put(qla_trc_array); +} + +/* + * PCI driver interface + */ +static int +qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret = -ENODEV; + struct Scsi_Host *host; + scsi_qla_host_t *base_vha = NULL; + struct qla_hw_data *ha; + char pci_info[30]; + char fw_str[30], wq_name[30]; + struct scsi_host_template *sht; + int bars, mem_only = 0; + uint16_t req_length = 0, rsp_length = 0; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + int i; + + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + sht = &qla2xxx_driver_template; + if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + mem_only = 1; + ql_dbg_pci(ql_dbg_init, pdev, 0x0007, + "Mem only adapter.\n"); + } + ql_dbg_pci(ql_dbg_init, pdev, 0x0008, + "Bars=%d.\n", bars); + + if (mem_only) { + if (pci_enable_device_mem(pdev)) + return ret; + } else { + if (pci_enable_device(pdev)) + return ret; + } + + if (is_kdump_kernel()) { + ql2xmqsupport = 0; + ql2xallocfwdump = 0; + } + + ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); + if (!ha) { + ql_log_pci(ql_log_fatal, pdev, 0x0009, + "Unable to allocate memory for ha.\n"); + goto disable_device; + } + ql_dbg_pci(ql_dbg_init, pdev, 0x000a, + "Memory allocated for ha=%p.\n", ha); + ha->pdev = pdev; + INIT_LIST_HEAD(&ha->tgt.q_full_list); + spin_lock_init(&ha->tgt.q_full_lock); + spin_lock_init(&ha->tgt.sess_lock); + spin_lock_init(&ha->tgt.atio_lock); + + spin_lock_init(&ha->sadb_lock); + INIT_LIST_HEAD(&ha->sadb_tx_index_list); + INIT_LIST_HEAD(&ha->sadb_rx_index_list); + + spin_lock_init(&ha->sadb_fp_lock); + + if (qla_edif_sadb_build_free_pool(ha)) { + kfree(ha); + goto disable_device; + } + + atomic_set(&ha->nvme_active_aen_cnt, 0); + + /* Clear our data area */ + ha->bars = bars; + ha->mem_only = mem_only; + spin_lock_init(&ha->hardware_lock); + spin_lock_init(&ha->vport_slock); + mutex_init(&ha->selflogin_lock); + mutex_init(&ha->optrom_mutex); + + /* Set ISP-type information. */ + qla2x00_set_isp_flags(ha); + + /* Set EEH reset type to fundamental if required by hba */ + if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || + IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + pdev->needs_freset = 1; + + ha->prev_topology = 0; + ha->init_cb_size = sizeof(init_cb_t); + ha->link_data_rate = PORT_SPEED_UNKNOWN; + ha->optrom_size = OPTROM_SIZE_2300; + ha->max_exchg = FW_MAX_EXCHANGES_CNT; + atomic_set(&ha->num_pend_mbx_stage1, 0); + atomic_set(&ha->num_pend_mbx_stage2, 0); + atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); + ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; + INIT_LIST_HEAD(&ha->tmf_pending); + INIT_LIST_HEAD(&ha->tmf_active); + + /* Assign ISP specific operations. */ + if (IS_QLA2100(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; + ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; + req_length = REQUEST_ENTRY_CNT_2100; + rsp_length = RESPONSE_ENTRY_CNT_2100; + ha->max_loop_id = SNS_LAST_LOOP_ID_2100; + ha->gid_list_info_size = 4; + ha->flash_conf_off = ~0; + ha->flash_data_off = ~0; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; + ha->isp_ops = &qla2100_isp_ops; + } else if (IS_QLA2200(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; + ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; + req_length = REQUEST_ENTRY_CNT_2200; + rsp_length = RESPONSE_ENTRY_CNT_2100; + ha->max_loop_id = SNS_LAST_LOOP_ID_2100; + ha->gid_list_info_size = 4; + ha->flash_conf_off = ~0; + ha->flash_data_off = ~0; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; + ha->isp_ops = &qla2100_isp_ops; + } else if (IS_QLA23XX(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_2200; + rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->gid_list_info_size = 6; + if (IS_QLA2322(ha) || IS_QLA6322(ha)) + ha->optrom_size = OPTROM_SIZE_2322; + ha->flash_conf_off = ~0; + ha->flash_data_off = ~0; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; + ha->isp_ops = &qla2300_isp_ops; + } else if (IS_QLA24XX_TYPE(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_24XX; + rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_24xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_24XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; + ha->isp_ops = &qla24xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA; + ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; + ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; + } else if (IS_QLA25XX(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_24XX; + rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_24xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_25XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla25xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA; + ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; + ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; + } else if (IS_QLA81XX(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_24XX; + rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_81XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla81xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; + } else if (IS_QLA82XX(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_82XX; + rsp_length = RESPONSE_ENTRY_CNT_82XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_82XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla82xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA; + ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; + ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; + } else if (IS_QLA8044(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_82XX; + rsp_length = RESPONSE_ENTRY_CNT_82XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_83XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla8044_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA; + ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; + ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; + } else if (IS_QLA83XX(ha)) { + ha->portnum = PCI_FUNC(ha->pdev->devfn); + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_83XX; + rsp_length = RESPONSE_ENTRY_CNT_83XX; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_83XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla83xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; + } else if (IS_QLAFX00(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; + ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; + ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; + req_length = REQUEST_ENTRY_CNT_FX00; + rsp_length = RESPONSE_ENTRY_CNT_FX00; + ha->isp_ops = &qlafx00_isp_ops; + ha->port_down_retry_count = 30; /* default value */ + ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; + ha->mr.fw_hbt_en = 1; + ha->mr.host_info_resend = false; + ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; + } else if (IS_QLA27XX(ha)) { + ha->portnum = PCI_FUNC(ha->pdev->devfn); + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_83XX; + rsp_length = RESPONSE_ENTRY_CNT_83XX; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_83XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla27xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; + } else if (IS_QLA28XX(ha)) { + ha->portnum = PCI_FUNC(ha->pdev->devfn); + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_83XX; + rsp_length = RESPONSE_ENTRY_CNT_83XX; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_28XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla27xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; + } + + ql_dbg_pci(ql_dbg_init, pdev, 0x001e, + "mbx_count=%d, req_length=%d, " + "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " + "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " + "max_fibre_devices=%d.\n", + ha->mbx_count, req_length, rsp_length, ha->max_loop_id, + ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, + ha->nvram_npiv_size, ha->max_fibre_devices); + ql_dbg_pci(ql_dbg_init, pdev, 0x001f, + "isp_ops=%p, flash_conf_off=%d, " + "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", + ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, + ha->nvram_conf_off, ha->nvram_data_off); + + /* Configure PCI I/O space */ + ret = ha->isp_ops->iospace_config(ha); + if (ret) + goto iospace_config_failed; + + ql_log_pci(ql_log_info, pdev, 0x001d, + "Found an ISP%04X irq %d iobase 0x%p.\n", + pdev->device, pdev->irq, ha->iobase); + mutex_init(&ha->vport_lock); + mutex_init(&ha->mq_lock); + init_completion(&ha->mbx_cmd_comp); + complete(&ha->mbx_cmd_comp); + init_completion(&ha->mbx_intr_comp); + init_completion(&ha->dcbx_comp); + init_completion(&ha->lb_portup_comp); + + set_bit(0, (unsigned long *) ha->vp_idx_map); + + qla2x00_config_dma_addressing(ha); + ql_dbg_pci(ql_dbg_init, pdev, 0x0020, + "64 Bit addressing is %s.\n", + ha->flags.enable_64bit_addressing ? "enable" : + "disable"); + ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); + if (ret) { + ql_log_pci(ql_log_fatal, pdev, 0x0031, + "Failed to allocate memory for adapter, aborting.\n"); + + goto probe_hw_failed; + } + + req->max_q_depth = MAX_Q_DEPTH; + if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) + req->max_q_depth = ql2xmaxqdepth; + + + base_vha = qla2x00_create_host(sht, ha); + if (!base_vha) { + ret = -ENOMEM; + goto probe_hw_failed; + } + + pci_set_drvdata(pdev, base_vha); + set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); + + host = base_vha->host; + base_vha->req = req; + if (IS_QLA2XXX_MIDTYPE(ha)) + base_vha->mgmt_svr_loop_id = + qla2x00_reserve_mgmt_server_loop_id(base_vha); + else + base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + + base_vha->vp_idx; + + /* Setup fcport template structure. */ + ha->mr.fcport.vha = base_vha; + ha->mr.fcport.port_type = FCT_UNKNOWN; + ha->mr.fcport.loop_id = FC_NO_LOOP_ID; + qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); + ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; + ha->mr.fcport.scan_state = 1; + + qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN | + QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT | + QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN); + + /* Set the SG table size based on ISP type */ + if (!IS_FWI2_CAPABLE(ha)) { + if (IS_QLA2100(ha)) + host->sg_tablesize = 32; + } else { + if (!IS_QLA82XX(ha)) + host->sg_tablesize = QLA_SG_ALL; + } + host->max_id = ha->max_fibre_devices; + host->cmd_per_lun = 3; + host->unique_id = host->host_no; + + if (ql2xenabledif && ql2xenabledif != 2) { + ql_log(ql_log_warn, base_vha, 0x302d, + "Invalid value for ql2xenabledif, resetting it to default (2)\n"); + ql2xenabledif = 2; + } + + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) + host->max_cmd_len = 32; + else + host->max_cmd_len = MAX_CMDSZ; + host->max_channel = MAX_BUSES - 1; + /* Older HBAs support only 16-bit LUNs */ + if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && + ql2xmaxlun > 0xffff) + host->max_lun = 0xffff; + else + host->max_lun = ql2xmaxlun; + host->transportt = qla2xxx_transport_template; + sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); + + ql_dbg(ql_dbg_init, base_vha, 0x0033, + "max_id=%d this_id=%d " + "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " + "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, + host->this_id, host->cmd_per_lun, host->unique_id, + host->max_cmd_len, host->max_channel, host->max_lun, + host->transportt, sht->vendor_id); + + INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); + + /* Set up the irqs */ + ret = qla2x00_request_irqs(ha, rsp); + if (ret) + goto probe_failed; + + /* Alloc arrays of request and response ring ptrs */ + ret = qla2x00_alloc_queues(ha, req, rsp); + if (ret) { + ql_log(ql_log_fatal, base_vha, 0x003d, + "Failed to allocate memory for queue pointers..." + "aborting.\n"); + ret = -ENODEV; + goto probe_failed; + } + + if (ha->mqenable) { + /* number of hardware queues supported by blk/scsi-mq*/ + host->nr_hw_queues = ha->max_qpairs; + + ql_dbg(ql_dbg_init, base_vha, 0x0192, + "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); + } else { + if (ql2xnvmeenable) { + host->nr_hw_queues = ha->max_qpairs; + ql_dbg(ql_dbg_init, base_vha, 0x0194, + "FC-NVMe support is enabled, HW queues=%d\n", + host->nr_hw_queues); + } else { + ql_dbg(ql_dbg_init, base_vha, 0x0193, + "blk/scsi-mq disabled.\n"); + } + } + + qlt_probe_one_stage1(base_vha, ha); + + pci_save_state(pdev); + + /* Assign back pointers */ + rsp->req = req; + req->rsp = rsp; + + if (IS_QLAFX00(ha)) { + ha->rsp_q_map[0] = rsp; + ha->req_q_map[0] = req; + set_bit(0, ha->req_qid_map); + set_bit(0, ha->rsp_qid_map); + } + + /* FWI2-capable only. */ + req->req_q_in = &ha->iobase->isp24.req_q_in; + req->req_q_out = &ha->iobase->isp24.req_q_out; + rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; + rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { + req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; + req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; + rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; + rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; + } + + if (IS_QLAFX00(ha)) { + req->req_q_in = &ha->iobase->ispfx00.req_q_in; + req->req_q_out = &ha->iobase->ispfx00.req_q_out; + rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; + rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; + } + + if (IS_P3P_TYPE(ha)) { + req->req_q_out = &ha->iobase->isp82.req_q_out[0]; + rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; + rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; + } + + ql_dbg(ql_dbg_multiq, base_vha, 0xc009, + "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", + ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); + ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, + "req->req_q_in=%p req->req_q_out=%p " + "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", + req->req_q_in, req->req_q_out, + rsp->rsp_q_in, rsp->rsp_q_out); + ql_dbg(ql_dbg_init, base_vha, 0x003e, + "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", + ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); + ql_dbg(ql_dbg_init, base_vha, 0x003f, + "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", + req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); + + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); + if (unlikely(!ha->wq)) { + ret = -ENOMEM; + goto probe_failed; + } + + if (ha->isp_ops->initialize_adapter(base_vha)) { + ql_log(ql_log_fatal, base_vha, 0x00d6, + "Failed to initialize adapter - Adapter flags %x.\n", + base_vha->device_flags); + + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + qla82xx_idc_unlock(ha); + ql_log(ql_log_fatal, base_vha, 0x00d7, + "HW State: FAILED.\n"); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_wr_direct(base_vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + qla8044_idc_unlock(ha); + ql_log(ql_log_fatal, base_vha, 0x0150, + "HW State: FAILED.\n"); + } + + ret = -ENODEV; + goto probe_failed; + } + + if (IS_QLAFX00(ha)) + host->can_queue = QLAFX00_MAX_CANQUEUE; + else + host->can_queue = req->num_outstanding_cmds - 10; + + ql_dbg(ql_dbg_init, base_vha, 0x0032, + "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", + host->can_queue, base_vha->req, + base_vha->mgmt_svr_loop_id, host->sg_tablesize); + + /* Check if FW supports MQ or not for ISP25xx */ + if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6)) + ha->mqenable = 0; + + if (ha->mqenable) { + bool startit = false; + + if (QLA_TGT_MODE_ENABLED()) + startit = false; + + if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) + startit = true; + + /* Create start of day qpairs for Block MQ */ + for (i = 0; i < ha->max_qpairs; i++) + qla2xxx_create_qpair(base_vha, 5, 0, startit); + } + qla_init_iocb_limit(base_vha); + + if (ha->flags.running_gold_fw) + goto skip_dpc; + + /* + * Startup the kernel thread for this host adapter + */ + ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, + "%s_dpc", base_vha->host_str); + if (IS_ERR(ha->dpc_thread)) { + ql_log(ql_log_fatal, base_vha, 0x00ed, + "Failed to start DPC thread.\n"); + ret = PTR_ERR(ha->dpc_thread); + ha->dpc_thread = NULL; + goto probe_failed; + } + ql_dbg(ql_dbg_init, base_vha, 0x00ee, + "DPC thread started successfully.\n"); + + /* + * If we're not coming up in initiator mode, we might sit for + * a while without waking up the dpc thread, which leads to a + * stuck process warning. So just kick the dpc once here and + * let the kthread start (and go back to sleep in qla2x00_do_dpc). + */ + qla2xxx_wake_dpc(base_vha); + + INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); + + if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { + sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); + ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); + INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); + + sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); + ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); + INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); + INIT_WORK(&ha->idc_state_handler, + qla83xx_idc_state_handler_work); + INIT_WORK(&ha->nic_core_unrecoverable, + qla83xx_nic_core_unrecoverable_work); + } + +skip_dpc: + list_add_tail(&base_vha->list, &ha->vp_list); + base_vha->host->irq = ha->pdev->irq; + + /* Initialized the timer */ + qla2x00_start_timer(base_vha, WATCH_INTERVAL); + ql_dbg(ql_dbg_init, base_vha, 0x00ef, + "Started qla2x00_timer with " + "interval=%d.\n", WATCH_INTERVAL); + ql_dbg(ql_dbg_init, base_vha, 0x00f0, + "Detected hba at address=%p.\n", + ha); + + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { + if (ha->fw_attributes & BIT_4) { + int prot = 0, guard; + + base_vha->flags.difdix_supported = 1; + ql_dbg(ql_dbg_init, base_vha, 0x00f1, + "Registering for DIF/DIX type 1 and 3 protection.\n"); + if (ql2xprotmask) + scsi_host_set_prot(host, ql2xprotmask); + else + scsi_host_set_prot(host, + prot | SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION + | SHOST_DIX_TYPE1_PROTECTION + | SHOST_DIX_TYPE2_PROTECTION + | SHOST_DIX_TYPE3_PROTECTION); + + guard = SHOST_DIX_GUARD_CRC; + + if (IS_PI_IPGUARD_CAPABLE(ha) && + (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) + guard |= SHOST_DIX_GUARD_IP; + + if (ql2xprotguard) + scsi_host_set_guard(host, ql2xprotguard); + else + scsi_host_set_guard(host, guard); + } else + base_vha->flags.difdix_supported = 0; + } + + ha->isp_ops->enable_intrs(ha); + + if (IS_QLAFX00(ha)) { + ret = qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); + host->sg_tablesize = (ha->mr.extended_io_enabled) ? + QLA_SG_ALL : 128; + } + + ret = scsi_add_host(host, &pdev->dev); + if (ret) + goto probe_failed; + + base_vha->flags.init_done = 1; + base_vha->flags.online = 1; + ha->prev_minidump_failed = 0; + + ql_dbg(ql_dbg_init, base_vha, 0x00f2, + "Init done and hba is online.\n"); + + if (qla_ini_mode_enabled(base_vha) || + qla_dual_mode_enabled(base_vha)) + scsi_scan_host(host); + else + ql_log(ql_log_info, base_vha, 0x0122, + "skipping scsi_scan_host() for non-initiator port\n"); + + qla2x00_alloc_sysfs_attr(base_vha); + + if (IS_QLAFX00(ha)) { + ret = qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); + + /* Register system information */ + ret = qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); + } + + qla2x00_init_host_attr(base_vha); + + qla2x00_dfs_setup(base_vha); + + ql_log(ql_log_info, base_vha, 0x00fb, + "QLogic %s - %s.\n", ha->model_number, ha->model_desc); + ql_log(ql_log_info, base_vha, 0x00fc, + "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", + pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, + sizeof(pci_info)), + pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', + base_vha->host_no, + ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); + + qlt_add_target(ha, base_vha); + + clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); + + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return -ENODEV; + + return 0; + +probe_failed: + qla_enode_stop(base_vha); + qla_edb_stop(base_vha); + vfree(base_vha->scan.l); + if (base_vha->gnl.l) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + } + + if (base_vha->timer_active) + qla2x00_stop_timer(base_vha); + base_vha->flags.online = 0; + if (ha->dpc_thread) { + struct task_struct *t = ha->dpc_thread; + + ha->dpc_thread = NULL; + kthread_stop(t); + } + + qla2x00_free_device(base_vha); + scsi_host_put(base_vha->host); + /* + * Need to NULL out local req/rsp after + * qla2x00_free_device => qla2x00_free_queues frees + * what these are pointing to. Or else we'll + * fall over below in qla2x00_free_req/rsp_que. + */ + req = NULL; + rsp = NULL; + +probe_hw_failed: + qla2x00_mem_free(ha); + qla2x00_free_req_que(ha, req); + qla2x00_free_rsp_que(ha, rsp); + qla2x00_clear_drv_active(ha); + +iospace_config_failed: + if (IS_P3P_TYPE(ha)) { + if (!ha->nx_pcibase) + iounmap((device_reg_t *)ha->nx_pcibase); + if (!ql2xdbwr) + iounmap((device_reg_t *)ha->nxdb_wr_ptr); + } else { + if (ha->iobase) + iounmap(ha->iobase); + if (ha->cregbase) + iounmap(ha->cregbase); + } + pci_release_selected_regions(ha->pdev, ha->bars); + kfree(ha); + +disable_device: + pci_disable_device(pdev); + return ret; +} + +static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) +{ + scsi_qla_host_t *vp; + unsigned long flags; + struct qla_hw_data *ha; + + if (!base_vha) + return; + + ha = base_vha->hw; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) + set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); + + /* + * Indicate device removal to prevent future board_disable + * and wait until any pending board_disable has completed. + */ + set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); + spin_unlock_irqrestore(&ha->vport_slock, flags); +} + +static void +qla2x00_shutdown(struct pci_dev *pdev) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + + vha = pci_get_drvdata(pdev); + ha = vha->hw; + + ql_log(ql_log_info, vha, 0xfffa, + "Adapter shutdown\n"); + + /* + * Prevent future board_disable and wait + * until any pending board_disable has completed. + */ + __qla_set_remove_flag(vha); + cancel_work_sync(&ha->board_disable); + + if (!atomic_read(&pdev->enable_cnt)) + return; + + /* Notify ISPFX00 firmware */ + if (IS_QLAFX00(ha)) + qlafx00_driver_shutdown(vha, 20); + + /* Turn-off FCE trace */ + if (ha->flags.fce_enabled) { + qla2x00_disable_fce_trace(vha, NULL, NULL); + ha->flags.fce_enabled = 0; + } + + /* Turn-off EFT trace */ + if (ha->eft) + qla2x00_disable_eft_trace(vha); + + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { + if (ha->flags.fw_started) + qla2x00_abort_isp_cleanup(vha); + } else { + /* Stop currently executing firmware. */ + qla2x00_try_to_stop_firmware(vha); + } + + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + + /* Turn adapter off line */ + vha->flags.online = 0; + + /* turn-off interrupts on the card */ + if (ha->interrupts_on) { + vha->flags.init_done = 0; + ha->isp_ops->disable_intrs(ha); + } + + qla2x00_free_irqs(vha); + + qla2x00_free_fw_dump(ha); + + pci_disable_device(pdev); + ql_log(ql_log_info, vha, 0xfffe, + "Adapter shutdown successfully.\n"); +} + +/* Deletes all the virtual ports for a given ha */ +static void +qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) +{ + scsi_qla_host_t *vha; + unsigned long flags; + + mutex_lock(&ha->vport_lock); + while (ha->cur_vport_count) { + spin_lock_irqsave(&ha->vport_slock, flags); + + BUG_ON(base_vha->list.next == &ha->vp_list); + /* This assumes first entry in ha->vp_list is always base vha */ + vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); + scsi_host_get(vha->host); + + spin_unlock_irqrestore(&ha->vport_slock, flags); + mutex_unlock(&ha->vport_lock); + + qla_nvme_delete(vha); + + fc_vport_terminate(vha->fc_vport); + scsi_host_put(vha->host); + + mutex_lock(&ha->vport_lock); + } + mutex_unlock(&ha->vport_lock); +} + +/* Stops all deferred work threads */ +static void +qla2x00_destroy_deferred_work(struct qla_hw_data *ha) +{ + /* Cancel all work and destroy DPC workqueues */ + if (ha->dpc_lp_wq) { + cancel_work_sync(&ha->idc_aen); + destroy_workqueue(ha->dpc_lp_wq); + ha->dpc_lp_wq = NULL; + } + + if (ha->dpc_hp_wq) { + cancel_work_sync(&ha->nic_core_reset); + cancel_work_sync(&ha->idc_state_handler); + cancel_work_sync(&ha->nic_core_unrecoverable); + destroy_workqueue(ha->dpc_hp_wq); + ha->dpc_hp_wq = NULL; + } + + /* Kill the kernel thread for this host */ + if (ha->dpc_thread) { + struct task_struct *t = ha->dpc_thread; + + /* + * qla2xxx_wake_dpc checks for ->dpc_thread + * so we need to zero it out. + */ + ha->dpc_thread = NULL; + kthread_stop(t); + } +} + +static void +qla2x00_unmap_iobases(struct qla_hw_data *ha) +{ + if (IS_QLA82XX(ha)) { + + iounmap((device_reg_t *)ha->nx_pcibase); + if (!ql2xdbwr) + iounmap((device_reg_t *)ha->nxdb_wr_ptr); + } else { + if (ha->iobase) + iounmap(ha->iobase); + + if (ha->cregbase) + iounmap(ha->cregbase); + + if (ha->mqiobase) + iounmap(ha->mqiobase); + + if (ha->msixbase) + iounmap(ha->msixbase); + } +} + +static void +qla2x00_clear_drv_active(struct qla_hw_data *ha) +{ + if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_drv_active(ha); + qla8044_idc_unlock(ha); + } else if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_clear_drv_active(ha); + qla82xx_idc_unlock(ha); + } +} + +static void +qla2x00_remove_one(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha; + struct qla_hw_data *ha; + + base_vha = pci_get_drvdata(pdev); + ha = base_vha->hw; + ql_log(ql_log_info, base_vha, 0xb079, + "Removing driver\n"); + __qla_set_remove_flag(base_vha); + cancel_work_sync(&ha->board_disable); + + /* + * If the PCI device is disabled then there was a PCI-disconnect and + * qla2x00_disable_board_on_pci_error has taken care of most of the + * resources. + */ + if (!atomic_read(&pdev->enable_cnt)) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + scsi_host_put(base_vha->host); + kfree(ha); + pci_set_drvdata(pdev, NULL); + return; + } + qla2x00_wait_for_hba_ready(base_vha); + + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; + + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { + if (ha->flags.fw_started) + qla2x00_abort_isp_cleanup(base_vha); + } else if (!IS_QLAFX00(ha)) { + if (IS_QLA8031(ha)) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, + "Clearing fcoe driver presence.\n"); + if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_p3p, base_vha, 0xb079, + "Error while clearing DRV-Presence.\n"); + } + + qla2x00_try_to_stop_firmware(base_vha); + } + + qla2x00_wait_for_sess_deletion(base_vha); + + qla_nvme_delete(base_vha); + + dma_free_coherent(&ha->pdev->dev, + base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); + + base_vha->gnl.l = NULL; + qla_enode_stop(base_vha); + qla_edb_stop(base_vha); + + vfree(base_vha->scan.l); + + if (IS_QLAFX00(ha)) + qlafx00_driver_shutdown(base_vha, 20); + + qla2x00_delete_all_vps(ha, base_vha); + + qla2x00_dfs_remove(base_vha); + + qla84xx_put_chip(base_vha); + + /* Disable timer */ + if (base_vha->timer_active) + qla2x00_stop_timer(base_vha); + + base_vha->flags.online = 0; + + /* free DMA memory */ + if (ha->exlogin_buf) + qla2x00_free_exlogin_buffer(ha); + + /* free DMA memory */ + if (ha->exchoffld_buf) + qla2x00_free_exchoffld_buffer(ha); + + qla2x00_destroy_deferred_work(ha); + + qlt_remove_target(ha, base_vha); + + qla2x00_free_sysfs_attr(base_vha, true); + + fc_remove_host(base_vha->host); + + scsi_remove_host(base_vha->host); + + qla2x00_free_device(base_vha); + + qla2x00_clear_drv_active(ha); + + scsi_host_put(base_vha->host); + + qla2x00_unmap_iobases(ha); + + pci_release_selected_regions(ha->pdev, ha->bars); + kfree(ha); + + pci_disable_device(pdev); +} + +static inline void +qla24xx_free_purex_list(struct purex_list *list) +{ + struct purex_item *item, *next; + ulong flags; + + spin_lock_irqsave(&list->lock, flags); + list_for_each_entry_safe(item, next, &list->head, list) { + list_del(&item->list); + if (item == &item->vha->default_item) + continue; + kfree(item); + } + spin_unlock_irqrestore(&list->lock, flags); +} + +static void +qla2x00_free_device(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + + qla25xx_delete_queues(vha); + vha->flags.online = 0; + + /* turn-off interrupts on the card */ + if (ha->interrupts_on) { + vha->flags.init_done = 0; + ha->isp_ops->disable_intrs(ha); + } + + qla2x00_free_fcports(vha); + + qla2x00_free_irqs(vha); + + /* Flush the work queue and remove it */ + if (ha->wq) { + destroy_workqueue(ha->wq); + ha->wq = NULL; + } + + + qla24xx_free_purex_list(&vha->purex_list); + + qla2x00_mem_free(ha); + + qla82xx_md_free(vha); + + qla_edif_sadb_release_free_pool(ha); + qla_edif_sadb_release(ha); + + qla2x00_free_queues(ha); +} + +void qla2x00_free_fcports(struct scsi_qla_host *vha) +{ + fc_port_t *fcport, *tfcport; + + list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) + qla2x00_free_fcport(fcport); +} + +static inline void +qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + int now; + + if (!fcport->rport) + return; + + if (fcport->rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, + "%s %8phN. rport %p roles %x\n", + __func__, fcport->port_name, fcport->rport, + fcport->rport->roles); + fc_remote_port_delete(fcport->rport); + } + qlt_do_generation_tick(vha, &now); +} + +/* + * qla2x00_mark_device_lost Updates fcport state when device goes offline. + * + * Input: ha = adapter block pointer. fcport = port structure pointer. + * + * Return: None. + * + * Context: + */ +void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, + int do_login) +{ + if (IS_QLAFX00(vha->hw)) { + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + qla2x00_schedule_rport_del(vha, fcport); + return; + } + + if (atomic_read(&fcport->state) == FCS_ONLINE && + vha->vp_idx == fcport->vha->vp_idx) { + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + qla2x00_schedule_rport_del(vha, fcport); + } + + /* + * We may need to retry the login, so don't change the state of the + * port but do the retries. + */ + if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + + if (!do_login) + return; + + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); +} + +void +qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) +{ + fc_port_t *fcport; + + ql_dbg(ql_dbg_disc, vha, 0x20f1, + "Mark all dev lost\n"); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (ql2xfc2target && + fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) && + fcport->port_type == FCT_TARGET && + !qla2x00_reset_active(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x211a, + "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC", + fcport->flags, fcport->port_type, + fcport->d_id.b24, fcport->port_name); + continue; + } + fcport->scan_state = 0; + qlt_schedule_sess_for_deletion(fcport); + } +} + +static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) +{ + int i; + + if (IS_FWI2_CAPABLE(ha)) + return; + + for (i = 0; i < SNS_FIRST_LOOP_ID; i++) + set_bit(i, ha->loop_id_map); + set_bit(MANAGEMENT_SERVER, ha->loop_id_map); + set_bit(BROADCAST, ha->loop_id_map); +} + +/* +* qla2x00_mem_alloc +* Allocates adapter memory. +* +* Returns: +* 0 = success. +* !0 = failure. +*/ +static int +qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, + struct req_que **req, struct rsp_que **rsp) +{ + char name[16]; + int rc; + + if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) { + ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL); + if (!ha->vp_map) + goto fail; + } + + ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, + &ha->init_cb_dma, GFP_KERNEL); + if (!ha->init_cb) + goto fail_free_vp_map; + + rc = btree_init32(&ha->host_map); + if (rc) + goto fail_free_init_cb; + + if (qlt_mem_alloc(ha) < 0) + goto fail_free_btree; + + ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, + qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); + if (!ha->gid_list) + goto fail_free_tgt_mem; + + ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); + if (!ha->srb_mempool) + goto fail_free_gid_list; + + if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) { + /* Allocate cache for CT6 Ctx. */ + if (!ctx_cachep) { + ctx_cachep = kmem_cache_create("qla2xxx_ctx", + sizeof(struct ct6_dsd), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!ctx_cachep) + goto fail_free_srb_mempool; + } + ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, + ctx_cachep); + if (!ha->ctx_mempool) + goto fail_free_srb_mempool; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, + "ctx_cachep=%p ctx_mempool=%p.\n", + ctx_cachep, ha->ctx_mempool); + } + + /* Get memory for cached NVRAM */ + ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); + if (!ha->nvram) + goto fail_free_ctx_mempool; + + snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, + ha->pdev->device); + ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, + DMA_POOL_SIZE, 8, 0); + if (!ha->s_dma_pool) + goto fail_free_nvram; + + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, + "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", + ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); + + if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) { + ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, + DSD_LIST_DMA_POOL_SIZE, 8, 0); + if (!ha->dl_dma_pool) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, + "Failed to allocate memory for dl_dma_pool.\n"); + goto fail_s_dma_pool; + } + + ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, + FCP_CMND_DMA_POOL_SIZE, 8, 0); + if (!ha->fcp_cmnd_dma_pool) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, + "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); + goto fail_dl_dma_pool; + } + + if (ql2xenabledif) { + u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; + struct dsd_dma *dsd, *nxt; + uint i; + /* Creata a DMA pool of buffers for DIF bundling */ + ha->dif_bundl_pool = dma_pool_create(name, + &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); + if (!ha->dif_bundl_pool) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, + "%s: failed create dif_bundl_pool\n", + __func__); + goto fail_dif_bundl_dma_pool; + } + + INIT_LIST_HEAD(&ha->pool.good.head); + INIT_LIST_HEAD(&ha->pool.unusable.head); + ha->pool.good.count = 0; + ha->pool.unusable.count = 0; + for (i = 0; i < 128; i++) { + dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); + if (!dsd) { + ql_dbg_pci(ql_dbg_init, ha->pdev, + 0xe0ee, "%s: failed alloc dsd\n", + __func__); + return -ENOMEM; + } + ha->dif_bundle_kallocs++; + + dsd->dsd_addr = dma_pool_alloc( + ha->dif_bundl_pool, GFP_ATOMIC, + &dsd->dsd_list_dma); + if (!dsd->dsd_addr) { + ql_dbg_pci(ql_dbg_init, ha->pdev, + 0xe0ee, + "%s: failed alloc ->dsd_addr\n", + __func__); + kfree(dsd); + ha->dif_bundle_kallocs--; + continue; + } + ha->dif_bundle_dma_allocs++; + + /* + * if DMA buffer crosses 4G boundary, + * put it on bad list + */ + if (MSD(dsd->dsd_list_dma) ^ + MSD(dsd->dsd_list_dma + bufsize)) { + list_add_tail(&dsd->list, + &ha->pool.unusable.head); + ha->pool.unusable.count++; + } else { + list_add_tail(&dsd->list, + &ha->pool.good.head); + ha->pool.good.count++; + } + } + + /* return the good ones back to the pool */ + list_for_each_entry_safe(dsd, nxt, + &ha->pool.good.head, list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, + dsd->dsd_addr, dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + } + + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, + "%s: dif dma pool (good=%u unusable=%u)\n", + __func__, ha->pool.good.count, + ha->pool.unusable.count); + } + + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, + "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", + ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, + ha->dif_bundl_pool); + } + + /* Allocate memory for SNS commands */ + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { + /* Get consistent memory allocated for SNS commands */ + ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); + if (!ha->sns_cmd) + goto fail_dma_pool; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, + "sns_cmd: %p.\n", ha->sns_cmd); + } else { + /* Get consistent memory allocated for MS IOCB */ + ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, + &ha->ms_iocb_dma); + if (!ha->ms_iocb) + goto fail_dma_pool; + /* Get consistent memory allocated for CT SNS commands */ + ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); + if (!ha->ct_sns) + goto fail_free_ms_iocb; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, + "ms_iocb=%p ct_sns=%p.\n", + ha->ms_iocb, ha->ct_sns); + } + + /* Allocate memory for request ring */ + *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); + if (!*req) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, + "Failed to allocate memory for req.\n"); + goto fail_req; + } + (*req)->length = req_len; + (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, + ((*req)->length + 1) * sizeof(request_t), + &(*req)->dma, GFP_KERNEL); + if (!(*req)->ring) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, + "Failed to allocate memory for req_ring.\n"); + goto fail_req_ring; + } + /* Allocate memory for response ring */ + *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); + if (!*rsp) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, + "Failed to allocate memory for rsp.\n"); + goto fail_rsp; + } + (*rsp)->hw = ha; + (*rsp)->length = rsp_len; + (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, + ((*rsp)->length + 1) * sizeof(response_t), + &(*rsp)->dma, GFP_KERNEL); + if (!(*rsp)->ring) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, + "Failed to allocate memory for rsp_ring.\n"); + goto fail_rsp_ring; + } + (*req)->rsp = *rsp; + (*rsp)->req = *req; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, + "req=%p req->length=%d req->ring=%p rsp=%p " + "rsp->length=%d rsp->ring=%p.\n", + *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, + (*rsp)->ring); + /* Allocate memory for NVRAM data for vports */ + if (ha->nvram_npiv_size) { + ha->npiv_info = kcalloc(ha->nvram_npiv_size, + sizeof(struct qla_npiv_entry), + GFP_KERNEL); + if (!ha->npiv_info) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, + "Failed to allocate memory for npiv_info.\n"); + goto fail_npiv_info; + } + } else + ha->npiv_info = NULL; + + /* Get consistent memory allocated for EX-INIT-CB. */ + if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { + ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, + &ha->ex_init_cb_dma); + if (!ha->ex_init_cb) + goto fail_ex_init_cb; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, + "ex_init_cb=%p.\n", ha->ex_init_cb); + } + + /* Get consistent memory allocated for Special Features-CB. */ + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, + &ha->sf_init_cb_dma); + if (!ha->sf_init_cb) + goto fail_sf_init_cb; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199, + "sf_init_cb=%p.\n", ha->sf_init_cb); + } + + + /* Get consistent memory allocated for Async Port-Database. */ + if (!IS_FWI2_CAPABLE(ha)) { + ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, + &ha->async_pd_dma); + if (!ha->async_pd) + goto fail_async_pd; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, + "async_pd=%p.\n", ha->async_pd); + } + + INIT_LIST_HEAD(&ha->vp_list); + + /* Allocate memory for our loop_id bitmap */ + ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), + sizeof(long), + GFP_KERNEL); + if (!ha->loop_id_map) + goto fail_loop_id_map; + else { + qla2x00_set_reserved_loop_ids(ha); + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, + "loop_id_map=%p.\n", ha->loop_id_map); + } + + ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, + SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); + if (!ha->sfp_data) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, + "Unable to allocate memory for SFP read-data.\n"); + goto fail_sfp_data; + } + + ha->flt = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, + GFP_KERNEL); + if (!ha->flt) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, + "Unable to allocate memory for FLT.\n"); + goto fail_flt_buffer; + } + + /* allocate the purex dma pool */ + ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, + ELS_MAX_PAYLOAD, 8, 0); + + if (!ha->purex_dma_pool) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, + "Unable to allocate purex_dma_pool.\n"); + goto fail_flt; + } + + ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; + ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev, + ha->elsrej.size, + &ha->elsrej.cdma, + GFP_KERNEL); + if (!ha->elsrej.c) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, + "Alloc failed for els reject cmd.\n"); + goto fail_elsrej; + } + ha->elsrej.c->er_cmd = ELS_LS_RJT; + ha->elsrej.c->er_reason = ELS_RJT_LOGIC; + ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; + + ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt); + ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size, + &ha->lsrjt.cdma, GFP_KERNEL); + if (!ha->lsrjt.c) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, + "Alloc failed for nvme fc reject cmd.\n"); + goto fail_lsrjt; + } + + return 0; + +fail_lsrjt: + dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, + ha->elsrej.c, ha->elsrej.cdma); +fail_elsrej: + dma_pool_destroy(ha->purex_dma_pool); +fail_flt: + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + ha->flt, ha->flt_dma); + +fail_flt_buffer: + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + ha->sfp_data, ha->sfp_data_dma); +fail_sfp_data: + kfree(ha->loop_id_map); +fail_loop_id_map: + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); +fail_async_pd: + dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma); +fail_sf_init_cb: + dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); +fail_ex_init_cb: + kfree(ha->npiv_info); +fail_npiv_info: + dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * + sizeof(response_t), (*rsp)->ring, (*rsp)->dma); + (*rsp)->ring = NULL; + (*rsp)->dma = 0; +fail_rsp_ring: + kfree(*rsp); + *rsp = NULL; +fail_rsp: + dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * + sizeof(request_t), (*req)->ring, (*req)->dma); + (*req)->ring = NULL; + (*req)->dma = 0; +fail_req_ring: + kfree(*req); + *req = NULL; +fail_req: + dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), + ha->ct_sns, ha->ct_sns_dma); + ha->ct_sns = NULL; + ha->ct_sns_dma = 0; +fail_free_ms_iocb: + dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); + ha->ms_iocb = NULL; + ha->ms_iocb_dma = 0; + + if (ha->sns_cmd) + dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), + ha->sns_cmd, ha->sns_cmd_dma); +fail_dma_pool: + if (ql2xenabledif) { + struct dsd_dma *dsd, *nxt; + + list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, + list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, + dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + ha->pool.unusable.count--; + } + dma_pool_destroy(ha->dif_bundl_pool); + ha->dif_bundl_pool = NULL; + } + +fail_dif_bundl_dma_pool: + if (IS_QLA82XX(ha) || ql2xenabledif) { + dma_pool_destroy(ha->fcp_cmnd_dma_pool); + ha->fcp_cmnd_dma_pool = NULL; + } +fail_dl_dma_pool: + if (IS_QLA82XX(ha) || ql2xenabledif) { + dma_pool_destroy(ha->dl_dma_pool); + ha->dl_dma_pool = NULL; + } +fail_s_dma_pool: + dma_pool_destroy(ha->s_dma_pool); + ha->s_dma_pool = NULL; +fail_free_nvram: + kfree(ha->nvram); + ha->nvram = NULL; +fail_free_ctx_mempool: + mempool_destroy(ha->ctx_mempool); + ha->ctx_mempool = NULL; +fail_free_srb_mempool: + mempool_destroy(ha->srb_mempool); + ha->srb_mempool = NULL; +fail_free_gid_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + ha->gid_list, + ha->gid_list_dma); + ha->gid_list = NULL; + ha->gid_list_dma = 0; +fail_free_tgt_mem: + qlt_mem_free(ha); +fail_free_btree: + btree_destroy32(&ha->host_map); +fail_free_init_cb: + dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, + ha->init_cb_dma); + ha->init_cb = NULL; + ha->init_cb_dma = 0; +fail_free_vp_map: + kfree(ha->vp_map); +fail: + ql_log(ql_log_fatal, NULL, 0x0030, + "Memory allocation failure.\n"); + return -ENOMEM; +} + +int +qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) +{ + int rval; + uint16_t size, max_cnt; + uint32_t temp; + struct qla_hw_data *ha = vha->hw; + + /* Return if we don't need to alloacate any extended logins */ + if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400) + return QLA_SUCCESS; + + if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) + return QLA_SUCCESS; + + ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); + max_cnt = 0; + rval = qla_get_exlogin_status(vha, &size, &max_cnt); + if (rval != QLA_SUCCESS) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, + "Failed to get exlogin status.\n"); + return rval; + } + + temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; + temp *= size; + + if (temp != ha->exlogin_size) { + qla2x00_free_exlogin_buffer(ha); + ha->exlogin_size = temp; + + ql_log(ql_log_info, vha, 0xd024, + "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", + max_cnt, size, temp); + + ql_log(ql_log_info, vha, 0xd025, + "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); + + /* Get consistent memory for extended logins */ + ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, + ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); + if (!ha->exlogin_buf) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, + "Failed to allocate memory for exlogin_buf_dma.\n"); + return -ENOMEM; + } + } + + /* Now configure the dma buffer */ + rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); + if (rval) { + ql_log(ql_log_fatal, vha, 0xd033, + "Setup extended login buffer ****FAILED****.\n"); + qla2x00_free_exlogin_buffer(ha); + } + + return rval; +} + +/* +* qla2x00_free_exlogin_buffer +* +* Input: +* ha = adapter block pointer +*/ +void +qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) +{ + if (ha->exlogin_buf) { + dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, + ha->exlogin_buf, ha->exlogin_buf_dma); + ha->exlogin_buf = NULL; + ha->exlogin_size = 0; + } +} + +static void +qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) +{ + u32 temp; + struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; + *ret_cnt = FW_DEF_EXCHANGES_CNT; + + if (max_cnt > vha->hw->max_exchg) + max_cnt = vha->hw->max_exchg; + + if (qla_ini_mode_enabled(vha)) { + if (vha->ql2xiniexchg > max_cnt) + vha->ql2xiniexchg = max_cnt; + + if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) + *ret_cnt = vha->ql2xiniexchg; + + } else if (qla_tgt_mode_enabled(vha)) { + if (vha->ql2xexchoffld > max_cnt) { + vha->ql2xexchoffld = max_cnt; + icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); + } + + if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) + *ret_cnt = vha->ql2xexchoffld; + } else if (qla_dual_mode_enabled(vha)) { + temp = vha->ql2xiniexchg + vha->ql2xexchoffld; + if (temp > max_cnt) { + vha->ql2xiniexchg -= (temp - max_cnt)/2; + vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); + temp = max_cnt; + icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); + } + + if (temp > FW_DEF_EXCHANGES_CNT) + *ret_cnt = temp; + } +} + +int +qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) +{ + int rval; + u16 size, max_cnt; + u32 actual_cnt, totsz; + struct qla_hw_data *ha = vha->hw; + + if (!ha->flags.exchoffld_enabled) + return QLA_SUCCESS; + + if (!IS_EXCHG_OFFLD_CAPABLE(ha)) + return QLA_SUCCESS; + + max_cnt = 0; + rval = qla_get_exchoffld_status(vha, &size, &max_cnt); + if (rval != QLA_SUCCESS) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, + "Failed to get exlogin status.\n"); + return rval; + } + + qla2x00_number_of_exch(vha, &actual_cnt, max_cnt); + ql_log(ql_log_info, vha, 0xd014, + "Actual exchange offload count: %d.\n", actual_cnt); + + totsz = actual_cnt * size; + + if (totsz != ha->exchoffld_size) { + qla2x00_free_exchoffld_buffer(ha); + if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { + ha->exchoffld_size = 0; + ha->flags.exchoffld_enabled = 0; + return QLA_SUCCESS; + } + + ha->exchoffld_size = totsz; + + ql_log(ql_log_info, vha, 0xd016, + "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n", + max_cnt, actual_cnt, size, totsz); + + ql_log(ql_log_info, vha, 0xd017, + "Exchange Buffers requested size = 0x%x\n", + ha->exchoffld_size); + + /* Get consistent memory for extended logins */ + ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, + ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); + if (!ha->exchoffld_buf) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, + "Failed to allocate memory for Exchange Offload.\n"); + + if (ha->max_exchg > + (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { + ha->max_exchg -= REDUCE_EXCHANGES_CNT; + } else if (ha->max_exchg > + (FW_DEF_EXCHANGES_CNT + 512)) { + ha->max_exchg -= 512; + } else { + ha->flags.exchoffld_enabled = 0; + ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, + "Disabling Exchange offload due to lack of memory\n"); + } + ha->exchoffld_size = 0; + + return -ENOMEM; + } + } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { + /* pathological case */ + qla2x00_free_exchoffld_buffer(ha); + ha->exchoffld_size = 0; + ha->flags.exchoffld_enabled = 0; + ql_log(ql_log_info, vha, 0xd016, + "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n", + ha->exchoffld_size, actual_cnt, size, totsz); + return 0; + } + + /* Now configure the dma buffer */ + rval = qla_set_exchoffld_mem_cfg(vha); + if (rval) { + ql_log(ql_log_fatal, vha, 0xd02e, + "Setup exchange offload buffer ****FAILED****.\n"); + qla2x00_free_exchoffld_buffer(ha); + } else { + /* re-adjust number of target exchange */ + struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; + + if (qla_ini_mode_enabled(vha)) + icb->exchange_count = 0; + else + icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); + } + + return rval; +} + +/* +* qla2x00_free_exchoffld_buffer +* +* Input: +* ha = adapter block pointer +*/ +void +qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) +{ + if (ha->exchoffld_buf) { + dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, + ha->exchoffld_buf, ha->exchoffld_buf_dma); + ha->exchoffld_buf = NULL; + ha->exchoffld_size = 0; + } +} + +/* +* qla2x00_free_fw_dump +* Frees fw dump stuff. +* +* Input: +* ha = adapter block pointer +*/ +static void +qla2x00_free_fw_dump(struct qla_hw_data *ha) +{ + struct fwdt *fwdt = ha->fwdt; + uint j; + + if (ha->fce) + dma_free_coherent(&ha->pdev->dev, + FCE_SIZE, ha->fce, ha->fce_dma); + + if (ha->eft) + dma_free_coherent(&ha->pdev->dev, + EFT_SIZE, ha->eft, ha->eft_dma); + + vfree(ha->fw_dump); + + ha->fce = NULL; + ha->fce_dma = 0; + ha->flags.fce_enabled = 0; + ha->eft = NULL; + ha->eft_dma = 0; + ha->fw_dumped = false; + ha->fw_dump_cap_flags = 0; + ha->fw_dump_reading = 0; + ha->fw_dump = NULL; + ha->fw_dump_len = 0; + + for (j = 0; j < 2; j++, fwdt++) { + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + } +} + +/* +* qla2x00_mem_free +* Frees all adapter allocated memory. +* +* Input: +* ha = adapter block pointer. +*/ +static void +qla2x00_mem_free(struct qla_hw_data *ha) +{ + qla2x00_free_fw_dump(ha); + + if (ha->mctp_dump) + dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, + ha->mctp_dump_dma); + ha->mctp_dump = NULL; + + mempool_destroy(ha->srb_mempool); + ha->srb_mempool = NULL; + + if (ha->dcbx_tlv) + dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, + ha->dcbx_tlv, ha->dcbx_tlv_dma); + ha->dcbx_tlv = NULL; + + if (ha->xgmac_data) + dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, + ha->xgmac_data, ha->xgmac_data_dma); + ha->xgmac_data = NULL; + + if (ha->sns_cmd) + dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), + ha->sns_cmd, ha->sns_cmd_dma); + ha->sns_cmd = NULL; + ha->sns_cmd_dma = 0; + + if (ha->ct_sns) + dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), + ha->ct_sns, ha->ct_sns_dma); + ha->ct_sns = NULL; + ha->ct_sns_dma = 0; + + if (ha->sfp_data) + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, + ha->sfp_data_dma); + ha->sfp_data = NULL; + + if (ha->flt) + dma_free_coherent(&ha->pdev->dev, + sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, + ha->flt, ha->flt_dma); + ha->flt = NULL; + ha->flt_dma = 0; + + if (ha->ms_iocb) + dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); + ha->ms_iocb = NULL; + ha->ms_iocb_dma = 0; + + if (ha->sf_init_cb) + dma_pool_free(ha->s_dma_pool, + ha->sf_init_cb, ha->sf_init_cb_dma); + + if (ha->ex_init_cb) + dma_pool_free(ha->s_dma_pool, + ha->ex_init_cb, ha->ex_init_cb_dma); + ha->ex_init_cb = NULL; + ha->ex_init_cb_dma = 0; + + if (ha->async_pd) + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); + ha->async_pd = NULL; + ha->async_pd_dma = 0; + + dma_pool_destroy(ha->s_dma_pool); + ha->s_dma_pool = NULL; + + if (ha->gid_list) + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + ha->gid_list, ha->gid_list_dma); + ha->gid_list = NULL; + ha->gid_list_dma = 0; + + if (ha->base_qpair && !list_empty(&ha->base_qpair->dsd_list)) { + struct dsd_dma *dsd_ptr, *tdsd_ptr; + + /* clean up allocated prev pool */ + list_for_each_entry_safe(dsd_ptr, tdsd_ptr, + &ha->base_qpair->dsd_list, list) { + dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, + dsd_ptr->dsd_list_dma); + list_del(&dsd_ptr->list); + kfree(dsd_ptr); + } + } + + dma_pool_destroy(ha->dl_dma_pool); + ha->dl_dma_pool = NULL; + + dma_pool_destroy(ha->fcp_cmnd_dma_pool); + ha->fcp_cmnd_dma_pool = NULL; + + mempool_destroy(ha->ctx_mempool); + ha->ctx_mempool = NULL; + + if (ql2xenabledif && ha->dif_bundl_pool) { + struct dsd_dma *dsd, *nxt; + + list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, + list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, + dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + ha->pool.unusable.count--; + } + list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, + dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + } + } + + dma_pool_destroy(ha->dif_bundl_pool); + ha->dif_bundl_pool = NULL; + + qlt_mem_free(ha); + qla_remove_hostmap(ha); + + if (ha->init_cb) + dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, + ha->init_cb, ha->init_cb_dma); + + dma_pool_destroy(ha->purex_dma_pool); + ha->purex_dma_pool = NULL; + + if (ha->elsrej.c) { + dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, + ha->elsrej.c, ha->elsrej.cdma); + ha->elsrej.c = NULL; + } + + if (ha->lsrjt.c) { + dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c, + ha->lsrjt.cdma); + ha->lsrjt.c = NULL; + } + + ha->init_cb = NULL; + ha->init_cb_dma = 0; + + vfree(ha->optrom_buffer); + ha->optrom_buffer = NULL; + kfree(ha->nvram); + ha->nvram = NULL; + kfree(ha->npiv_info); + ha->npiv_info = NULL; + kfree(ha->swl); + ha->swl = NULL; + kfree(ha->loop_id_map); + ha->sf_init_cb = NULL; + ha->sf_init_cb_dma = 0; + ha->loop_id_map = NULL; + + kfree(ha->vp_map); + ha->vp_map = NULL; +} + +struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *sht, + struct qla_hw_data *ha) +{ + struct Scsi_Host *host; + struct scsi_qla_host *vha = NULL; + + host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); + if (!host) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, + "Failed to allocate host from the scsi layer, aborting.\n"); + return NULL; + } + + /* Clear our data area */ + vha = shost_priv(host); + memset(vha, 0, sizeof(scsi_qla_host_t)); + + vha->host = host; + vha->host_no = host->host_no; + vha->hw = ha; + + vha->qlini_mode = ql2x_ini_mode; + vha->ql2xexchoffld = ql2xexchoffld; + vha->ql2xiniexchg = ql2xiniexchg; + + INIT_LIST_HEAD(&vha->vp_fcports); + INIT_LIST_HEAD(&vha->work_list); + INIT_LIST_HEAD(&vha->list); + INIT_LIST_HEAD(&vha->qla_cmd_list); + INIT_LIST_HEAD(&vha->logo_list); + INIT_LIST_HEAD(&vha->plogi_ack_list); + INIT_LIST_HEAD(&vha->qp_list); + INIT_LIST_HEAD(&vha->gnl.fcports); + INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); + + INIT_LIST_HEAD(&vha->purex_list.head); + spin_lock_init(&vha->purex_list.lock); + + spin_lock_init(&vha->work_lock); + spin_lock_init(&vha->cmd_list_lock); + init_waitqueue_head(&vha->fcport_waitQ); + init_waitqueue_head(&vha->vref_waitq); + qla_enode_init(vha); + qla_edb_init(vha); + + + vha->gnl.size = sizeof(struct get_name_list_extended) * + (ha->max_loop_id + 1); + vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, + vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); + if (!vha->gnl.l) { + ql_log(ql_log_fatal, vha, 0xd04a, + "Alloc failed for name list.\n"); + scsi_host_put(vha->host); + return NULL; + } + + /* todo: what about ext login? */ + vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); + vha->scan.l = vmalloc(vha->scan.size); + if (!vha->scan.l) { + ql_log(ql_log_fatal, vha, 0xd04a, + "Alloc failed for scan database.\n"); + dma_free_coherent(&ha->pdev->dev, vha->gnl.size, + vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; + scsi_host_put(vha->host); + return NULL; + } + INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); + + snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu", + QLA2XXX_DRIVER_NAME, vha->host_no); + ql_dbg(ql_dbg_init, vha, 0x0041, + "Allocated the host=%p hw=%p vha=%p dev_name=%s", + vha->host, vha->hw, vha, + dev_name(&(ha->pdev->dev))); + + return vha; +} + +struct qla_work_evt * +qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) +{ + struct qla_work_evt *e; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return NULL; + + if (qla_vha_mark_busy(vha)) + return NULL; + + e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); + if (!e) { + QLA_VHA_MARK_NOT_BUSY(vha); + return NULL; + } + + INIT_LIST_HEAD(&e->list); + e->type = type; + e->flags = QLA_EVT_FLAG_FREE; + return e; +} + +int +qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + unsigned long flags; + bool q = false; + + spin_lock_irqsave(&vha->work_lock, flags); + list_add_tail(&e->list, &vha->work_list); + + if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) + q = true; + + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (q) + queue_work(vha->hw->wq, &vha->iocb_work); + + return QLA_SUCCESS; +} + +int +qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, + u32 data) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_AEN); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.aen.code = code; + e->u.aen.data = data; + return qla2x00_post_work(vha, e); +} + +int +qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); + if (!e) + return QLA_FUNCTION_FAILED; + + memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); + return qla2x00_post_work(vha, e); +} + +#define qla2x00_post_async_work(name, type) \ +int qla2x00_post_async_##name##_work( \ + struct scsi_qla_host *vha, \ + fc_port_t *fcport, uint16_t *data) \ +{ \ + struct qla_work_evt *e; \ + \ + e = qla2x00_alloc_work(vha, type); \ + if (!e) \ + return QLA_FUNCTION_FAILED; \ + \ + e->u.logio.fcport = fcport; \ + if (data) { \ + e->u.logio.data[0] = data[0]; \ + e->u.logio.data[1] = data[1]; \ + } \ + fcport->flags |= FCF_ASYNC_ACTIVE; \ + return qla2x00_post_work(vha, e); \ +} + +qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); +qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); +qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); +qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); +qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); + +int +qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.uevent.code = code; + return qla2x00_post_work(vha, e); +} + +static void +qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) +{ + char event_string[40]; + char *envp[] = { event_string, NULL }; + + switch (code) { + case QLA_UEVENT_CODE_FW_DUMP: + snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", + vha->host_no); + break; + default: + /* do nothing */ + break; + } + kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); +} + +int +qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, + uint32_t *data, int cnt) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.aenfx.evtcode = evtcode; + e->u.aenfx.count = cnt; + memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); + return qla2x00_post_work(vha, e); +} + +void qla24xx_sched_upd_fcport(fc_port_t *fcport) +{ + unsigned long flags; + + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + spin_lock_irqsave(&fcport->vha->work_lock, flags); + if (fcport->disc_state == DSC_UPD_FCPORT) { + spin_unlock_irqrestore(&fcport->vha->work_lock, flags); + return; + } + fcport->jiffies_at_registration = jiffies; + fcport->sec_since_registration = 0; + fcport->next_disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); + spin_unlock_irqrestore(&fcport->vha->work_lock, flags); + + queue_work(system_unbound_wq, &fcport->reg_work); +} + +static +void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + unsigned long flags; + fc_port_t *fcport = NULL, *tfcp; + struct qlt_plogi_ack_t *pla = + (struct qlt_plogi_ack_t *)e->u.new_sess.pla; + uint8_t free_fcport = 0; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC enter\n", + __func__, __LINE__, e->u.new_sess.port_name); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + if (pla) { + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + memcpy(fcport->node_name, + pla->iocb.u.isp24.u.plogi.node_name, + WWN_SIZE); + qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); + /* we took an extra ref_count to prevent PLOGI ACK when + * fcport/sess has not been created. + */ + pla->ref_count--; + } + } else { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + fcport->flags |= FCF_FABRIC_DEVICE; + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + fcport->tgt_short_link_down_cnt = 0; + + memcpy(fcport->port_name, e->u.new_sess.port_name, + WWN_SIZE); + + fcport->fc4_type = e->u.new_sess.fc4_type; + if (NVME_PRIORITY(vha->hw, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + + if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { + fcport->dm_login_expire = jiffies + + QLA_N2N_WAIT_TIME * HZ; + fcport->fc4_type = FS_FC4TYPE_FCP; + fcport->n2n_flag = 1; + if (vha->flags.nvme_enabled) + fcport->fc4_type |= FS_FC4TYPE_NVME; + } + + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC mem alloc fail.\n", + __func__, e->u.new_sess.port_name); + + if (pla) { + list_del(&pla->list); + kmem_cache_free(qla_tgt_plogi_cachep, pla); + } + return; + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* search again to make sure no one else got ahead */ + tfcp = qla2x00_find_fcport_by_wwpn(vha, + e->u.new_sess.port_name, 1); + if (tfcp) { + /* should rarily happen */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC found existing fcport b4 add. DS %d LS %d\n", + __func__, tfcp->port_name, tfcp->disc_state, + tfcp->fw_login_state); + + free_fcport = 1; + } else { + list_add_tail(&fcport->list, &vha->vp_fcports); + + } + if (pla) { + qlt_plogi_ack_link(vha, pla, fcport, + QLT_PLOGI_LINK_SAME_WWN); + pla->ref_count--; + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (fcport) { + fcport->id_changed = 1; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; + memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); + + if (pla) { + if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { + u16 wd3_lo; + + fcport->fw_login_state = DSC_LS_PRLI_PEND; + fcport->local = 0; + fcport->loop_id = + le16_to_cpu( + pla->iocb.u.isp24.nport_handle); + fcport->fw_login_state = DSC_LS_PRLI_PEND; + wd3_lo = + le16_to_cpu( + pla->iocb.u.isp24.u.prli.wd3_lo); + + if (wd3_lo & BIT_7) + fcport->conf_compl_supported = 1; + + if ((wd3_lo & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + } + qlt_plogi_ack_unref(vha, pla); + } else { + fc_port_t *dfcp = NULL; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + tfcp = qla2x00_find_fcport_by_nportid(vha, + &e->u.new_sess.id, 1); + if (tfcp && (tfcp != fcport)) { + /* + * We have a conflict fcport with same NportID. + */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC found conflict b4 add. DS %d LS %d\n", + __func__, tfcp->port_name, tfcp->disc_state, + tfcp->fw_login_state); + + switch (tfcp->disc_state) { + case DSC_DELETED: + break; + case DSC_DELETE_PEND: + fcport->login_pause = 1; + tfcp->conflict = fcport; + break; + default: + fcport->login_pause = 1; + tfcp->conflict = fcport; + dfcp = tfcp; + break; + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + if (dfcp) + qlt_schedule_sess_for_deletion(tfcp); + + if (N2N_TOPO(vha->hw)) { + fcport->flags &= ~FCF_FABRIC_DEVICE; + fcport->keep_nport_handle = 1; + if (vha->flags.nvme_enabled) { + fcport->fc4_type = + (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); + fcport->n2n_flag = 1; + } + fcport->fw_login_state = 0; + + schedule_delayed_work(&vha->scan.scan_work, 5); + } else { + qla24xx_fcport_handle_login(vha, fcport); + } + } + } + + if (free_fcport) { + qla2x00_free_fcport(fcport); + if (pla) { + list_del(&pla->list); + kmem_cache_free(qla_tgt_plogi_cachep, pla); + } + } +} + +static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + struct srb *sp = e->u.iosb.sp; + int rval; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0x2043, + "%s: %s: Re-issue IOCB failed (%d).\n", + __func__, sp->name, rval); + qla24xx_sp_unmap(vha, sp); + } +} + +void +qla2x00_do_work(struct scsi_qla_host *vha) +{ + struct qla_work_evt *e, *tmp; + unsigned long flags; + LIST_HEAD(work); + int rc; + + spin_lock_irqsave(&vha->work_lock, flags); + list_splice_init(&vha->work_list, &work); + spin_unlock_irqrestore(&vha->work_lock, flags); + + list_for_each_entry_safe(e, tmp, &work, list) { + rc = QLA_SUCCESS; + switch (e->type) { + case QLA_EVT_AEN: + fc_host_post_event(vha->host, fc_get_event_number(), + e->u.aen.code, e->u.aen.data); + break; + case QLA_EVT_IDC_ACK: + qla81xx_idc_ack(vha, e->u.idc_ack.mb); + break; + case QLA_EVT_ASYNC_LOGIN: + qla2x00_async_login(vha, e->u.logio.fcport, + e->u.logio.data); + break; + case QLA_EVT_ASYNC_LOGOUT: + rc = qla2x00_async_logout(vha, e->u.logio.fcport); + break; + case QLA_EVT_ASYNC_ADISC: + qla2x00_async_adisc(vha, e->u.logio.fcport, + e->u.logio.data); + break; + case QLA_EVT_UEVENT: + qla2x00_uevent_emit(vha, e->u.uevent.code); + break; + case QLA_EVT_AENFX: + qlafx00_process_aen(vha, e); + break; + case QLA_EVT_UNMAP: + qla24xx_sp_unmap(vha, e->u.iosb.sp); + break; + case QLA_EVT_RELOGIN: + qla2x00_relogin(vha); + break; + case QLA_EVT_NEW_SESS: + qla24xx_create_new_sess(vha, e); + break; + case QLA_EVT_GPDB: + qla24xx_async_gpdb(vha, e->u.fcport.fcport, + e->u.fcport.opt); + break; + case QLA_EVT_PRLI: + qla24xx_async_prli(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GPSC: + qla24xx_async_gpsc(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GNL: + qla24xx_async_gnl(vha, e->u.fcport.fcport); + break; + case QLA_EVT_NACK: + qla24xx_do_nack_work(vha, e); + break; + case QLA_EVT_ASYNC_PRLO: + rc = qla2x00_async_prlo(vha, e->u.logio.fcport); + break; + case QLA_EVT_ASYNC_PRLO_DONE: + qla2x00_async_prlo_done(vha, e->u.logio.fcport, + e->u.logio.data); + break; + case QLA_EVT_GPNFT: + qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, + e->u.gpnft.sp); + break; + case QLA_EVT_GPNFT_DONE: + qla24xx_async_gpnft_done(vha, e->u.iosb.sp); + break; + case QLA_EVT_GNNFT_DONE: + qla24xx_async_gnnft_done(vha, e->u.iosb.sp); + break; + case QLA_EVT_GFPNID: + qla24xx_async_gfpnid(vha, e->u.fcport.fcport); + break; + case QLA_EVT_SP_RETRY: + qla_sp_retry(vha, e); + break; + case QLA_EVT_IIDMA: + qla_do_iidma_work(vha, e->u.fcport.fcport); + break; + case QLA_EVT_ELS_PLOGI: + qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, + e->u.fcport.fcport, false); + break; + case QLA_EVT_SA_REPLACE: + rc = qla24xx_issue_sa_replace_iocb(vha, e); + break; + } + + if (rc == EAGAIN) { + /* put 'work' at head of 'vha->work_list' */ + spin_lock_irqsave(&vha->work_lock, flags); + list_splice(&work, &vha->work_list); + spin_unlock_irqrestore(&vha->work_lock, flags); + break; + } + list_del_init(&e->list); + if (e->flags & QLA_EVT_FLAG_FREE) + kfree(e); + + /* For each work completed decrement vha ref count */ + QLA_VHA_MARK_NOT_BUSY(vha); + } +} + +int qla24xx_post_relogin_work(struct scsi_qla_host *vha) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN); + + if (!e) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return QLA_FUNCTION_FAILED; + } + + return qla2x00_post_work(vha, e); +} + +/* Relogins all the fcports of a vport + * Context: dpc thread + */ +void qla2x00_relogin(struct scsi_qla_host *vha) +{ + fc_port_t *fcport; + int status, relogin_needed = 0; + struct event_arg ea; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + /* + * If the port is not ONLINE then try to login + * to it if we haven't run out of retries. + */ + if (atomic_read(&fcport->state) != FCS_ONLINE && + fcport->login_retry) { + if (fcport->scan_state != QLA_FCPORT_FOUND || + fcport->disc_state == DSC_LOGIN_AUTH_PEND || + fcport->disc_state == DSC_LOGIN_COMPLETE) + continue; + + if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || + fcport->disc_state == DSC_DELETE_PEND) { + relogin_needed = 1; + } else { + if (vha->hw->current_topology != ISP_CFG_NL) { + memset(&ea, 0, sizeof(ea)); + ea.fcport = fcport; + qla24xx_handle_relogin_event(vha, &ea); + } else if (vha->hw->current_topology == + ISP_CFG_NL && + IS_QLA2XXX_MIDTYPE(vha->hw)) { + (void)qla24xx_fcport_handle_login(vha, + fcport); + } else if (vha->hw->current_topology == + ISP_CFG_NL) { + fcport->login_retry--; + status = + qla2x00_local_device_login(vha, + fcport); + if (status == QLA_SUCCESS) { + fcport->old_loop_id = + fcport->loop_id; + ql_dbg(ql_dbg_disc, vha, 0x2003, + "Port login OK: logged in ID 0x%x.\n", + fcport->loop_id); + qla2x00_update_fcport + (vha, fcport); + } else if (status == 1) { + set_bit(RELOGIN_NEEDED, + &vha->dpc_flags); + /* retry the login again */ + ql_dbg(ql_dbg_disc, vha, 0x2007, + "Retrying %d login again loop_id 0x%x.\n", + fcport->login_retry, + fcport->loop_id); + } else { + fcport->login_retry = 0; + } + + if (fcport->login_retry == 0 && + status != QLA_SUCCESS) + qla2x00_clear_loop_id(fcport); + } + } + } + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + } + + if (relogin_needed) + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + + ql_dbg(ql_dbg_disc, vha, 0x400e, + "Relogin end.\n"); +} + +/* Schedule work on any of the dpc-workqueues */ +void +qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) +{ + struct qla_hw_data *ha = base_vha->hw; + + switch (work_code) { + case MBA_IDC_AEN: /* 0x8200 */ + if (ha->dpc_lp_wq) + queue_work(ha->dpc_lp_wq, &ha->idc_aen); + break; + + case QLA83XX_NIC_CORE_RESET: /* 0x1 */ + if (!ha->flags.nic_core_reset_hdlr_active) { + if (ha->dpc_hp_wq) + queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); + } else + ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, + "NIC Core reset is already active. Skip " + "scheduling it again.\n"); + break; + case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ + if (ha->dpc_hp_wq) + queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); + break; + case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ + if (ha->dpc_hp_wq) + queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); + break; + default: + ql_log(ql_log_warn, base_vha, 0xb05f, + "Unknown work-code=0x%x.\n", work_code); + } + + return; +} + +/* Work: Perform NIC Core Unrecoverable state handling */ +void +qla83xx_nic_core_unrecoverable_work(struct work_struct *work) +{ + struct qla_hw_data *ha = + container_of(work, struct qla_hw_data, nic_core_unrecoverable); + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + uint32_t dev_state = 0; + + qla83xx_idc_lock(base_vha, 0); + qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); + qla83xx_reset_ownership(base_vha); + if (ha->flags.nic_core_reset_owner) { + ha->flags.nic_core_reset_owner = 0; + qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, + QLA8XXX_DEV_FAILED); + ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); + qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); + } + qla83xx_idc_unlock(base_vha, 0); +} + +/* Work: Execute IDC state handler */ +void +qla83xx_idc_state_handler_work(struct work_struct *work) +{ + struct qla_hw_data *ha = + container_of(work, struct qla_hw_data, idc_state_handler); + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + uint32_t dev_state = 0; + + qla83xx_idc_lock(base_vha, 0); + qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); + if (dev_state == QLA8XXX_DEV_FAILED || + dev_state == QLA8XXX_DEV_NEED_QUIESCENT) + qla83xx_idc_state_handler(base_vha); + qla83xx_idc_unlock(base_vha, 0); +} + +static int +qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) +{ + int rval = QLA_SUCCESS; + unsigned long heart_beat_wait = jiffies + (1 * HZ); + uint32_t heart_beat_counter1, heart_beat_counter2; + + do { + if (time_after(jiffies, heart_beat_wait)) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, + "Nic Core f/w is not alive.\n"); + rval = QLA_FUNCTION_FAILED; + break; + } + + qla83xx_idc_lock(base_vha, 0); + qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, + &heart_beat_counter1); + qla83xx_idc_unlock(base_vha, 0); + msleep(100); + qla83xx_idc_lock(base_vha, 0); + qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, + &heart_beat_counter2); + qla83xx_idc_unlock(base_vha, 0); + } while (heart_beat_counter1 == heart_beat_counter2); + + return rval; +} + +/* Work: Perform NIC Core Reset handling */ +void +qla83xx_nic_core_reset_work(struct work_struct *work) +{ + struct qla_hw_data *ha = + container_of(work, struct qla_hw_data, nic_core_reset); + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + uint32_t dev_state = 0; + + if (IS_QLA2031(ha)) { + if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) + ql_log(ql_log_warn, base_vha, 0xb081, + "Failed to dump mctp\n"); + return; + } + + if (!ha->flags.nic_core_reset_hdlr_active) { + if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { + qla83xx_idc_lock(base_vha, 0); + qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, + &dev_state); + qla83xx_idc_unlock(base_vha, 0); + if (dev_state != QLA8XXX_DEV_NEED_RESET) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, + "Nic Core f/w is alive.\n"); + return; + } + } + + ha->flags.nic_core_reset_hdlr_active = 1; + if (qla83xx_nic_core_reset(base_vha)) { + /* NIC Core reset failed. */ + ql_dbg(ql_dbg_p3p, base_vha, 0xb061, + "NIC Core reset failed.\n"); + } + ha->flags.nic_core_reset_hdlr_active = 0; + } +} + +/* Work: Handle 8200 IDC aens */ +void +qla83xx_service_idc_aen(struct work_struct *work) +{ + struct qla_hw_data *ha = + container_of(work, struct qla_hw_data, idc_aen); + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + uint32_t dev_state, idc_control; + + qla83xx_idc_lock(base_vha, 0); + qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); + qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); + qla83xx_idc_unlock(base_vha, 0); + if (dev_state == QLA8XXX_DEV_NEED_RESET) { + if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb062, + "Application requested NIC Core Reset.\n"); + qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); + } else if (qla83xx_check_nic_core_fw_alive(base_vha) == + QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, + "Other protocol driver requested NIC Core Reset.\n"); + qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); + } + } else if (dev_state == QLA8XXX_DEV_FAILED || + dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { + qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); + } +} + +/* + * Control the frequency of IDC lock retries + */ +#define QLA83XX_WAIT_LOGIC_MS 100 + +static int +qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) +{ + int rval; + uint32_t data; + uint32_t idc_lck_rcvry_stage_mask = 0x3; + uint32_t idc_lck_rcvry_owner_mask = 0x3c; + struct qla_hw_data *ha = base_vha->hw; + + ql_dbg(ql_dbg_p3p, base_vha, 0xb086, + "Trying force recovery of the IDC lock.\n"); + + rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); + if (rval) + return rval; + + if ((data & idc_lck_rcvry_stage_mask) > 0) { + return QLA_SUCCESS; + } else { + data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); + rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, + data); + if (rval) + return rval; + + msleep(200); + + rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, + &data); + if (rval) + return rval; + + if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { + data &= (IDC_LOCK_RECOVERY_STAGE2 | + ~(idc_lck_rcvry_stage_mask)); + rval = qla83xx_wr_reg(base_vha, + QLA83XX_IDC_LOCK_RECOVERY, data); + if (rval) + return rval; + + /* Forcefully perform IDC UnLock */ + rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, + &data); + if (rval) + return rval; + /* Clear lock-id by setting 0xff */ + rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, + 0xff); + if (rval) + return rval; + /* Clear lock-recovery by setting 0x0 */ + rval = qla83xx_wr_reg(base_vha, + QLA83XX_IDC_LOCK_RECOVERY, 0x0); + if (rval) + return rval; + } else + return QLA_SUCCESS; + } + + return rval; +} + +static int +qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) +{ + int rval = QLA_SUCCESS; + uint32_t o_drv_lockid, n_drv_lockid; + unsigned long lock_recovery_timeout; + + lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; +retry_lockid: + rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); + if (rval) + goto exit; + + /* MAX wait time before forcing IDC Lock recovery = 2 secs */ + if (time_after_eq(jiffies, lock_recovery_timeout)) { + if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) + return QLA_SUCCESS; + else + return QLA_FUNCTION_FAILED; + } + + rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); + if (rval) + goto exit; + + if (o_drv_lockid == n_drv_lockid) { + msleep(QLA83XX_WAIT_LOGIC_MS); + goto retry_lockid; + } else + return QLA_SUCCESS; + +exit: + return rval; +} + +/* + * Context: task, can sleep + */ +void +qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) +{ + uint32_t data; + uint32_t lock_owner; + struct qla_hw_data *ha = base_vha->hw; + + might_sleep(); + + /* IDC-lock implementation using driver-lock/lock-id remote registers */ +retry_lock: + if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) + == QLA_SUCCESS) { + if (data) { + /* Setting lock-id to our function-number */ + qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, + ha->portnum); + } else { + qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, + &lock_owner); + ql_dbg(ql_dbg_p3p, base_vha, 0xb063, + "Failed to acquire IDC lock, acquired by %d, " + "retrying...\n", lock_owner); + + /* Retry/Perform IDC-Lock recovery */ + if (qla83xx_idc_lock_recovery(base_vha) + == QLA_SUCCESS) { + msleep(QLA83XX_WAIT_LOGIC_MS); + goto retry_lock; + } else + ql_log(ql_log_warn, base_vha, 0xb075, + "IDC Lock recovery FAILED.\n"); + } + + } + + return; +} + +static bool +qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, + struct purex_entry_24xx *purex) +{ + char fwstr[16]; + u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; + struct port_database_24xx *pdb; + + /* Domain Controller is always logged-out. */ + /* if RDP request is not from Domain Controller: */ + if (sid != 0xfffc01) + return false; + + ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid); + + pdb = kzalloc(sizeof(*pdb), GFP_KERNEL); + if (!pdb) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Failed allocate pdb\n", __func__); + } else if (qla24xx_get_port_database(vha, + le16_to_cpu(purex->nport_handle), pdb)) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Failed get pdb sid=%x\n", __func__, sid); + } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && + pdb->current_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Port not logged in sid=%#x\n", __func__, sid); + } else { + /* RDP request is from logged in port */ + kfree(pdb); + return false; + } + kfree(pdb); + + vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); + fwstr[strcspn(fwstr, " ")] = 0; + /* if FW version allows RDP response length upto 2048 bytes: */ + if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0) + return false; + + ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr); + + /* RDP response length is to be reduced to maximum 256 bytes */ + return true; +} + +/* + * Function Name: qla24xx_process_purex_iocb + * + * Description: + * Prepare a RDP response and send to Fabric switch + * + * PARAMETERS: + * vha: SCSI qla host + * purex: RDP request received by HBA + */ +void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, + struct purex_item *item) +{ + struct qla_hw_data *ha = vha->hw; + struct purex_entry_24xx *purex = + (struct purex_entry_24xx *)&item->iocb; + dma_addr_t rsp_els_dma; + dma_addr_t rsp_payload_dma; + dma_addr_t stat_dma; + dma_addr_t sfp_dma; + struct els_entry_24xx *rsp_els = NULL; + struct rdp_rsp_payload *rsp_payload = NULL; + struct link_statistics *stat = NULL; + uint8_t *sfp = NULL; + uint16_t sfp_flags = 0; + uint rsp_payload_length = sizeof(*rsp_payload); + int rval; + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, + "%s: Enter\n", __func__); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, + "-------- ELS REQ -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, + purex, sizeof(*purex)); + + if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { + rsp_payload_length = + offsetof(typeof(*rsp_payload), optical_elmt_desc); + ql_dbg(ql_dbg_init, vha, 0x0181, + "Reducing RSP payload length to %u bytes...\n", + rsp_payload_length); + } + + rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), + &rsp_els_dma, GFP_KERNEL); + if (!rsp_els) { + ql_log(ql_log_warn, vha, 0x0183, + "Failed allocate dma buffer ELS RSP.\n"); + goto dealloc; + } + + rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload), + &rsp_payload_dma, GFP_KERNEL); + if (!rsp_payload) { + ql_log(ql_log_warn, vha, 0x0184, + "Failed allocate dma buffer ELS RSP payload.\n"); + goto dealloc; + } + + sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN, + &sfp_dma, GFP_KERNEL); + + stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat), + &stat_dma, GFP_KERNEL); + + /* Prepare Response IOCB */ + rsp_els->entry_type = ELS_IOCB_TYPE; + rsp_els->entry_count = 1; + rsp_els->sys_define = 0; + rsp_els->entry_status = 0; + rsp_els->handle = 0; + rsp_els->nport_handle = purex->nport_handle; + rsp_els->tx_dsd_count = cpu_to_le16(1); + rsp_els->vp_index = purex->vp_idx; + rsp_els->sof_type = EST_SOFI3; + rsp_els->rx_xchg_address = purex->rx_xchg_addr; + rsp_els->rx_dsd_count = 0; + rsp_els->opcode = purex->els_frame_payload[0]; + + rsp_els->d_id[0] = purex->s_id[0]; + rsp_els->d_id[1] = purex->s_id[1]; + rsp_els->d_id[2] = purex->s_id[2]; + + rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); + rsp_els->rx_byte_count = 0; + rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); + + put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address); + rsp_els->tx_len = rsp_els->tx_byte_count; + + rsp_els->rx_address = 0; + rsp_els->rx_len = 0; + + /* Prepare Response Payload */ + rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ + rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - + sizeof(rsp_payload->hdr)); + + /* Link service Request Info Descriptor */ + rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); + rsp_payload->ls_req_info_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); + rsp_payload->ls_req_info_desc.req_payload_word_0 = + cpu_to_be32p((uint32_t *)purex->els_frame_payload); + + /* Link service Request Info Descriptor 2 */ + rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); + rsp_payload->ls_req_info_desc2.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); + rsp_payload->ls_req_info_desc2.req_payload_word_0 = + cpu_to_be32p((uint32_t *)purex->els_frame_payload); + + + rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); + rsp_payload->sfp_diag_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); + + if (sfp) { + /* SFP Flags */ + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); + if (!rval) { + /* SFP Flags bits 3-0: Port Tx Laser Type */ + if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) + sfp_flags |= BIT_0; /* short wave */ + else if (sfp[0] & BIT_1) + sfp_flags |= BIT_1; /* long wave 1310nm */ + else if (sfp[1] & BIT_4) + sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ + } + + /* SFP Type */ + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); + if (!rval) { + sfp_flags |= BIT_4; /* optical */ + if (sfp[0] == 0x3) + sfp_flags |= BIT_6; /* sfp+ */ + } + + rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); + + /* SFP Diagnostics */ + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); + if (!rval) { + __be16 *trx = (__force __be16 *)sfp; /* already be16 */ + rsp_payload->sfp_diag_desc.temperature = trx[0]; + rsp_payload->sfp_diag_desc.vcc = trx[1]; + rsp_payload->sfp_diag_desc.tx_bias = trx[2]; + rsp_payload->sfp_diag_desc.tx_power = trx[3]; + rsp_payload->sfp_diag_desc.rx_power = trx[4]; + } + } + + /* Port Speed Descriptor */ + rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); + rsp_payload->port_speed_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); + rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( + qla25xx_fdmi_port_speed_capability(ha)); + rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( + qla25xx_fdmi_port_speed_currently(ha)); + + /* Link Error Status Descriptor */ + rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); + rsp_payload->ls_err_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); + + if (stat) { + rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); + if (!rval) { + rsp_payload->ls_err_desc.link_fail_cnt = + cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); + rsp_payload->ls_err_desc.loss_sync_cnt = + cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); + rsp_payload->ls_err_desc.loss_sig_cnt = + cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); + rsp_payload->ls_err_desc.prim_seq_err_cnt = + cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); + rsp_payload->ls_err_desc.inval_xmit_word_cnt = + cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); + rsp_payload->ls_err_desc.inval_crc_cnt = + cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); + rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; + } + } + + /* Portname Descriptor */ + rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); + rsp_payload->port_name_diag_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); + memcpy(rsp_payload->port_name_diag_desc.WWNN, + vha->node_name, + sizeof(rsp_payload->port_name_diag_desc.WWNN)); + memcpy(rsp_payload->port_name_diag_desc.WWPN, + vha->port_name, + sizeof(rsp_payload->port_name_diag_desc.WWPN)); + + /* F-Port Portname Descriptor */ + rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); + rsp_payload->port_name_direct_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); + memcpy(rsp_payload->port_name_direct_desc.WWNN, + vha->fabric_node_name, + sizeof(rsp_payload->port_name_direct_desc.WWNN)); + memcpy(rsp_payload->port_name_direct_desc.WWPN, + vha->fabric_port_name, + sizeof(rsp_payload->port_name_direct_desc.WWPN)); + + /* Bufer Credit Descriptor */ + rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); + rsp_payload->buffer_credit_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); + rsp_payload->buffer_credit_desc.fcport_b2b = 0; + rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); + rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); + + if (ha->flags.plogi_template_valid) { + uint32_t tmp = + be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); + rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); + } + + if (rsp_payload_length < sizeof(*rsp_payload)) + goto send; + + /* Optical Element Descriptor, Temperature */ + rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[0].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Voltage */ + rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[1].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Tx Bias Current */ + rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[2].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Tx Power */ + rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[3].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Rx Power */ + rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[4].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + + if (sfp) { + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); + if (!rval) { + __be16 *trx = (__force __be16 *)sfp; /* already be16 */ + + /* Optical Element Descriptor, Temperature */ + rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; + rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; + rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; + rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; + rsp_payload->optical_elmt_desc[0].element_flags = + cpu_to_be32(1 << 28); + + /* Optical Element Descriptor, Voltage */ + rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; + rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; + rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; + rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; + rsp_payload->optical_elmt_desc[1].element_flags = + cpu_to_be32(2 << 28); + + /* Optical Element Descriptor, Tx Bias Current */ + rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; + rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; + rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; + rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; + rsp_payload->optical_elmt_desc[2].element_flags = + cpu_to_be32(3 << 28); + + /* Optical Element Descriptor, Tx Power */ + rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; + rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; + rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; + rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; + rsp_payload->optical_elmt_desc[3].element_flags = + cpu_to_be32(4 << 28); + + /* Optical Element Descriptor, Rx Power */ + rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; + rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; + rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; + rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; + rsp_payload->optical_elmt_desc[4].element_flags = + cpu_to_be32(5 << 28); + } + + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); + if (!rval) { + /* Temperature high/low alarm/warning */ + rsp_payload->optical_elmt_desc[0].element_flags |= + cpu_to_be32( + (sfp[0] >> 7 & 1) << 3 | + (sfp[0] >> 6 & 1) << 2 | + (sfp[4] >> 7 & 1) << 1 | + (sfp[4] >> 6 & 1) << 0); + + /* Voltage high/low alarm/warning */ + rsp_payload->optical_elmt_desc[1].element_flags |= + cpu_to_be32( + (sfp[0] >> 5 & 1) << 3 | + (sfp[0] >> 4 & 1) << 2 | + (sfp[4] >> 5 & 1) << 1 | + (sfp[4] >> 4 & 1) << 0); + + /* Tx Bias Current high/low alarm/warning */ + rsp_payload->optical_elmt_desc[2].element_flags |= + cpu_to_be32( + (sfp[0] >> 3 & 1) << 3 | + (sfp[0] >> 2 & 1) << 2 | + (sfp[4] >> 3 & 1) << 1 | + (sfp[4] >> 2 & 1) << 0); + + /* Tx Power high/low alarm/warning */ + rsp_payload->optical_elmt_desc[3].element_flags |= + cpu_to_be32( + (sfp[0] >> 1 & 1) << 3 | + (sfp[0] >> 0 & 1) << 2 | + (sfp[4] >> 1 & 1) << 1 | + (sfp[4] >> 0 & 1) << 0); + + /* Rx Power high/low alarm/warning */ + rsp_payload->optical_elmt_desc[4].element_flags |= + cpu_to_be32( + (sfp[1] >> 7 & 1) << 3 | + (sfp[1] >> 6 & 1) << 2 | + (sfp[5] >> 7 & 1) << 1 | + (sfp[5] >> 6 & 1) << 0); + } + } + + /* Optical Product Data Descriptor */ + rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); + rsp_payload->optical_prod_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); + + if (sfp) { + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); + if (!rval) { + memcpy(rsp_payload->optical_prod_desc.vendor_name, + sfp + 0, + sizeof(rsp_payload->optical_prod_desc.vendor_name)); + memcpy(rsp_payload->optical_prod_desc.part_number, + sfp + 20, + sizeof(rsp_payload->optical_prod_desc.part_number)); + memcpy(rsp_payload->optical_prod_desc.revision, + sfp + 36, + sizeof(rsp_payload->optical_prod_desc.revision)); + memcpy(rsp_payload->optical_prod_desc.serial_number, + sfp + 48, + sizeof(rsp_payload->optical_prod_desc.serial_number)); + } + + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); + if (!rval) { + memcpy(rsp_payload->optical_prod_desc.date, + sfp + 0, + sizeof(rsp_payload->optical_prod_desc.date)); + } + } + +send: + ql_dbg(ql_dbg_init, vha, 0x0183, + "Sending ELS Response to RDP Request...\n"); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, + rsp_els, sizeof(*rsp_els)); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, + "-------- ELS RSP PAYLOAD -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, + rsp_payload, rsp_payload_length); + + rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); + + if (rval) { + ql_log(ql_log_warn, vha, 0x0188, + "%s: iocb failed to execute -> %x\n", __func__, rval); + } else if (rsp_els->comp_status) { + ql_log(ql_log_warn, vha, 0x0189, + "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", + __func__, rsp_els->comp_status, + rsp_els->error_subcode_1, rsp_els->error_subcode_2); + } else { + ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__); + } + +dealloc: + if (stat) + dma_free_coherent(&ha->pdev->dev, sizeof(*stat), + stat, stat_dma); + if (sfp) + dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN, + sfp, sfp_dma); + if (rsp_payload) + dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload), + rsp_payload, rsp_payload_dma); + if (rsp_els) + dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), + rsp_els, rsp_els_dma); +} + +void +qla24xx_free_purex_item(struct purex_item *item) +{ + if (item == &item->vha->default_item) + memset(&item->vha->default_item, 0, sizeof(struct purex_item)); + else + kfree(item); +} + +void qla24xx_process_purex_list(struct purex_list *list) +{ + struct list_head head = LIST_HEAD_INIT(head); + struct purex_item *item, *next; + ulong flags; + + spin_lock_irqsave(&list->lock, flags); + list_splice_init(&list->head, &head); + spin_unlock_irqrestore(&list->lock, flags); + + list_for_each_entry_safe(item, next, &head, list) { + list_del(&item->list); + item->process_item(item->vha, item); + qla24xx_free_purex_item(item); + } +} + +/* + * Context: task, can sleep + */ +void +qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) +{ +#if 0 + uint16_t options = (requester_id << 15) | BIT_7; +#endif + uint16_t retry; + uint32_t data; + struct qla_hw_data *ha = base_vha->hw; + + might_sleep(); + + /* IDC-unlock implementation using driver-unlock/lock-id + * remote registers + */ + retry = 0; +retry_unlock: + if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) + == QLA_SUCCESS) { + if (data == ha->portnum) { + qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); + /* Clearing lock-id by setting 0xff */ + qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); + } else if (retry < 10) { + /* SV: XXX: IDC unlock retrying needed here? */ + + /* Retry for IDC-unlock */ + msleep(QLA83XX_WAIT_LOGIC_MS); + retry++; + ql_dbg(ql_dbg_p3p, base_vha, 0xb064, + "Failed to release IDC lock, retrying=%d\n", retry); + goto retry_unlock; + } + } else if (retry < 10) { + /* Retry for IDC-unlock */ + msleep(QLA83XX_WAIT_LOGIC_MS); + retry++; + ql_dbg(ql_dbg_p3p, base_vha, 0xb065, + "Failed to read drv-lockid, retrying=%d\n", retry); + goto retry_unlock; + } + + return; + +#if 0 + /* XXX: IDC-unlock implementation using access-control mbx */ + retry = 0; +retry_unlock2: + if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { + if (retry < 10) { + /* Retry for IDC-unlock */ + msleep(QLA83XX_WAIT_LOGIC_MS); + retry++; + ql_dbg(ql_dbg_p3p, base_vha, 0xb066, + "Failed to release IDC lock, retrying=%d\n", retry); + goto retry_unlock2; + } + } + + return; +#endif +} + +int +__qla83xx_set_drv_presence(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + uint32_t drv_presence; + + rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); + if (rval == QLA_SUCCESS) { + drv_presence |= (1 << ha->portnum); + rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, + drv_presence); + } + + return rval; +} + +int +qla83xx_set_drv_presence(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + + qla83xx_idc_lock(vha, 0); + rval = __qla83xx_set_drv_presence(vha); + qla83xx_idc_unlock(vha, 0); + + return rval; +} + +int +__qla83xx_clear_drv_presence(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + uint32_t drv_presence; + + rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); + if (rval == QLA_SUCCESS) { + drv_presence &= ~(1 << ha->portnum); + rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, + drv_presence); + } + + return rval; +} + +int +qla83xx_clear_drv_presence(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + + qla83xx_idc_lock(vha, 0); + rval = __qla83xx_clear_drv_presence(vha); + qla83xx_idc_unlock(vha, 0); + + return rval; +} + +static void +qla83xx_need_reset_handler(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t drv_ack, drv_presence; + unsigned long ack_timeout; + + /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ + ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); + while (1) { + qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); + qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); + if ((drv_ack & drv_presence) == drv_presence) + break; + + if (time_after_eq(jiffies, ack_timeout)) { + ql_log(ql_log_warn, vha, 0xb067, + "RESET ACK TIMEOUT! drv_presence=0x%x " + "drv_ack=0x%x\n", drv_presence, drv_ack); + /* + * The function(s) which did not ack in time are forced + * to withdraw any further participation in the IDC + * reset. + */ + if (drv_ack != drv_presence) + qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, + drv_ack); + break; + } + + qla83xx_idc_unlock(vha, 0); + msleep(1000); + qla83xx_idc_lock(vha, 0); + } + + qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); + ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); +} + +static int +qla83xx_device_bootstrap(scsi_qla_host_t *vha) +{ + int rval = QLA_SUCCESS; + uint32_t idc_control; + + qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); + ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); + + /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ + __qla83xx_get_idc_control(vha, &idc_control); + idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; + __qla83xx_set_idc_control(vha, 0); + + qla83xx_idc_unlock(vha, 0); + rval = qla83xx_restart_nic_firmware(vha); + qla83xx_idc_lock(vha, 0); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb06a, + "Failed to restart NIC f/w.\n"); + qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); + ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); + } else { + ql_dbg(ql_dbg_p3p, vha, 0xb06c, + "Success in restarting nic f/w.\n"); + qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); + ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); + } + + return rval; +} + +/* Assumes idc_lock always held on entry */ +int +qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) +{ + struct qla_hw_data *ha = base_vha->hw; + int rval = QLA_SUCCESS; + unsigned long dev_init_timeout; + uint32_t dev_state; + + /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); + + while (1) { + + if (time_after_eq(jiffies, dev_init_timeout)) { + ql_log(ql_log_warn, base_vha, 0xb06e, + "Initialization TIMEOUT!\n"); + /* Init timeout. Disable further NIC Core + * communication. + */ + qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, + QLA8XXX_DEV_FAILED); + ql_log(ql_log_info, base_vha, 0xb06f, + "HW State: FAILED.\n"); + } + + qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); + switch (dev_state) { + case QLA8XXX_DEV_READY: + if (ha->flags.nic_core_reset_owner) + qla83xx_idc_audit(base_vha, + IDC_AUDIT_COMPLETION); + ha->flags.nic_core_reset_owner = 0; + ql_dbg(ql_dbg_p3p, base_vha, 0xb070, + "Reset_owner reset by 0x%x.\n", + ha->portnum); + goto exit; + case QLA8XXX_DEV_COLD: + if (ha->flags.nic_core_reset_owner) + rval = qla83xx_device_bootstrap(base_vha); + else { + /* Wait for AEN to change device-state */ + qla83xx_idc_unlock(base_vha, 0); + msleep(1000); + qla83xx_idc_lock(base_vha, 0); + } + break; + case QLA8XXX_DEV_INITIALIZING: + /* Wait for AEN to change device-state */ + qla83xx_idc_unlock(base_vha, 0); + msleep(1000); + qla83xx_idc_lock(base_vha, 0); + break; + case QLA8XXX_DEV_NEED_RESET: + if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) + qla83xx_need_reset_handler(base_vha); + else { + /* Wait for AEN to change device-state */ + qla83xx_idc_unlock(base_vha, 0); + msleep(1000); + qla83xx_idc_lock(base_vha, 0); + } + /* reset timeout value after need reset handler */ + dev_init_timeout = jiffies + + (ha->fcoe_dev_init_timeout * HZ); + break; + case QLA8XXX_DEV_NEED_QUIESCENT: + /* XXX: DEBUG for now */ + qla83xx_idc_unlock(base_vha, 0); + msleep(1000); + qla83xx_idc_lock(base_vha, 0); + break; + case QLA8XXX_DEV_QUIESCENT: + /* XXX: DEBUG for now */ + if (ha->flags.quiesce_owner) + goto exit; + + qla83xx_idc_unlock(base_vha, 0); + msleep(1000); + qla83xx_idc_lock(base_vha, 0); + dev_init_timeout = jiffies + + (ha->fcoe_dev_init_timeout * HZ); + break; + case QLA8XXX_DEV_FAILED: + if (ha->flags.nic_core_reset_owner) + qla83xx_idc_audit(base_vha, + IDC_AUDIT_COMPLETION); + ha->flags.nic_core_reset_owner = 0; + __qla83xx_clear_drv_presence(base_vha); + qla83xx_idc_unlock(base_vha, 0); + qla8xxx_dev_failed_handler(base_vha); + rval = QLA_FUNCTION_FAILED; + qla83xx_idc_lock(base_vha, 0); + goto exit; + case QLA8XXX_BAD_VALUE: + qla83xx_idc_unlock(base_vha, 0); + msleep(1000); + qla83xx_idc_lock(base_vha, 0); + break; + default: + ql_log(ql_log_warn, base_vha, 0xb071, + "Unknown Device State: %x.\n", dev_state); + qla83xx_idc_unlock(base_vha, 0); + qla8xxx_dev_failed_handler(base_vha); + rval = QLA_FUNCTION_FAILED; + qla83xx_idc_lock(base_vha, 0); + goto exit; + } + } + +exit: + return rval; +} + +void +qla2x00_disable_board_on_pci_error(struct work_struct *work) +{ + struct qla_hw_data *ha = container_of(work, struct qla_hw_data, + board_disable); + struct pci_dev *pdev = ha->pdev; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + ql_log(ql_log_warn, base_vha, 0x015b, + "Disabling adapter.\n"); + + if (!atomic_read(&pdev->enable_cnt)) { + ql_log(ql_log_info, base_vha, 0xfffc, + "PCI device disabled, no action req for PCI error=%lx\n", + base_vha->pci_flags); + return; + } + + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; + + qla2x00_wait_for_sess_deletion(base_vha); + + qla2x00_delete_all_vps(ha, base_vha); + + qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + + qla2x00_dfs_remove(base_vha); + + qla84xx_put_chip(base_vha); + + if (base_vha->timer_active) + qla2x00_stop_timer(base_vha); + + base_vha->flags.online = 0; + + qla2x00_destroy_deferred_work(ha); + + /* + * Do not try to stop beacon blink as it will issue a mailbox + * command. + */ + qla2x00_free_sysfs_attr(base_vha, false); + + fc_remove_host(base_vha->host); + + scsi_remove_host(base_vha->host); + + base_vha->flags.init_done = 0; + qla25xx_delete_queues(base_vha); + qla2x00_free_fcports(base_vha); + qla2x00_free_irqs(base_vha); + qla2x00_mem_free(ha); + qla82xx_md_free(base_vha); + qla2x00_free_queues(ha); + + qla2x00_unmap_iobases(ha); + + pci_release_selected_regions(ha->pdev, ha->bars); + pci_disable_device(pdev); + + /* + * Let qla2x00_remove_one cleanup qla_hw_data on device removal. + */ +} + +/************************************************************************** +* qla2x00_do_dpc +* This kernel thread is a task that is schedule by the interrupt handler +* to perform the background processing for interrupts. +* +* Notes: +* This task always run in the context of a kernel thread. It +* is kick-off by the driver's detect code and starts up +* up one per adapter. It immediately goes to sleep and waits for +* some fibre event. When either the interrupt handler or +* the timer routine detects a event it will one of the task +* bits then wake us up. +**************************************************************************/ +static int +qla2x00_do_dpc(void *data) +{ + scsi_qla_host_t *base_vha; + struct qla_hw_data *ha; + uint32_t online; + struct qla_qpair *qpair; + + ha = (struct qla_hw_data *)data; + base_vha = pci_get_drvdata(ha->pdev); + + set_user_nice(current, MIN_NICE); + + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4000, + "DPC handler sleeping.\n"); + + schedule(); + + if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) + qla_pci_set_eeh_busy(base_vha); + + if (!base_vha->flags.init_done || ha->flags.mbox_busy) + goto end_loop; + + if (ha->flags.eeh_busy) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4003, + "eeh_busy=%d.\n", ha->flags.eeh_busy); + goto end_loop; + } + + ha->dpc_active = 1; + + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, + "DPC handler waking up, dpc_flags=0x%lx.\n", + base_vha->dpc_flags); + + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + break; + + if (IS_P3P_TYPE(ha)) { + if (IS_QLA8044(ha)) { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + qla8044_idc_lock(ha); + qla8044_wr_direct(base_vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + qla8044_idc_unlock(ha); + ql_log(ql_log_info, base_vha, 0x4004, + "HW State: FAILED.\n"); + qla8044_device_state_handler(base_vha); + continue; + } + + } else { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + qla82xx_idc_lock(ha); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + qla82xx_idc_unlock(ha); + ql_log(ql_log_info, base_vha, 0x0151, + "HW State: FAILED.\n"); + qla82xx_device_state_handler(base_vha); + continue; + } + } + + if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, + &base_vha->dpc_flags)) { + + ql_dbg(ql_dbg_dpc, base_vha, 0x4005, + "FCoE context reset scheduled.\n"); + if (!(test_and_set_bit(ABORT_ISP_ACTIVE, + &base_vha->dpc_flags))) { + if (qla82xx_fcoe_ctx_reset(base_vha)) { + /* FCoE-ctx reset failed. + * Escalate to chip-reset + */ + set_bit(ISP_ABORT_NEEDED, + &base_vha->dpc_flags); + } + clear_bit(ABORT_ISP_ACTIVE, + &base_vha->dpc_flags); + } + + ql_dbg(ql_dbg_dpc, base_vha, 0x4006, + "FCoE context reset end.\n"); + } + } else if (IS_QLAFX00(ha)) { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4020, + "Firmware Reset Recovery\n"); + if (qlafx00_reset_initialize(base_vha)) { + /* Failed. Abort isp later. */ + if (!test_bit(UNLOADING, + &base_vha->dpc_flags)) { + set_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, + 0x4021, + "Reset Recovery Failed\n"); + } + } + } + + if (test_and_clear_bit(FX00_TARGET_SCAN, + &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4022, + "ISPFx00 Target Scan scheduled\n"); + if (qlafx00_rescan_isp(base_vha)) { + if (!test_bit(UNLOADING, + &base_vha->dpc_flags)) + set_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, 0x401e, + "ISPFx00 Target Scan Failed\n"); + } + ql_dbg(ql_dbg_dpc, base_vha, 0x401f, + "ISPFx00 Target Scan End\n"); + } + if (test_and_clear_bit(FX00_HOST_INFO_RESEND, + &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4023, + "ISPFx00 Host Info resend scheduled\n"); + qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, + FXDISC_REG_HOST_INFO); + } + } + + if (test_and_clear_bit(DETECT_SFP_CHANGE, + &base_vha->dpc_flags)) { + /* Semantic: + * - NO-OP -- await next ISP-ABORT. Preferred method + * to minimize disruptions that will occur + * when a forced chip-reset occurs. + * - Force -- ISP-ABORT scheduled. + */ + /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */ + } + + if (test_and_clear_bit + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && + !test_bit(UNLOADING, &base_vha->dpc_flags)) { + bool do_reset = true; + + switch (base_vha->qlini_mode) { + case QLA2XXX_INI_MODE_ENABLED: + break; + case QLA2XXX_INI_MODE_DISABLED: + if (!qla_tgt_mode_enabled(base_vha) && + !ha->flags.fw_started) + do_reset = false; + break; + case QLA2XXX_INI_MODE_DUAL: + if (!qla_dual_mode_enabled(base_vha) && + !ha->flags.fw_started) + do_reset = false; + break; + default: + break; + } + + if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, + &base_vha->dpc_flags))) { + base_vha->flags.online = 1; + ql_dbg(ql_dbg_dpc, base_vha, 0x4007, + "ISP abort scheduled.\n"); + if (ha->isp_ops->abort_isp(base_vha)) { + /* failed. retry later */ + set_bit(ISP_ABORT_NEEDED, + &base_vha->dpc_flags); + } + clear_bit(ABORT_ISP_ACTIVE, + &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, 0x4008, + "ISP abort end.\n"); + } + } + + if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) { + if (atomic_read(&base_vha->loop_state) == LOOP_READY) { + qla24xx_process_purex_list + (&base_vha->purex_list); + clear_bit(PROCESS_PUREX_IOCB, + &base_vha->dpc_flags); + } + } + + if (IS_QLAFX00(ha)) + goto loop_resync_check; + + if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4009, + "Quiescence mode scheduled.\n"); + if (IS_P3P_TYPE(ha)) { + if (IS_QLA82XX(ha)) + qla82xx_device_state_handler(base_vha); + if (IS_QLA8044(ha)) + qla8044_device_state_handler(base_vha); + clear_bit(ISP_QUIESCE_NEEDED, + &base_vha->dpc_flags); + if (!ha->flags.quiesce_owner) { + qla2x00_perform_loop_resync(base_vha); + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_clear_qsnt_ready( + base_vha); + qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_qsnt_ready( + base_vha); + qla8044_idc_unlock(ha); + } + } + } else { + clear_bit(ISP_QUIESCE_NEEDED, + &base_vha->dpc_flags); + qla2x00_quiesce_io(base_vha); + } + ql_dbg(ql_dbg_dpc, base_vha, 0x400a, + "Quiescence mode end.\n"); + } + + if (test_and_clear_bit(RESET_MARKER_NEEDED, + &base_vha->dpc_flags) && + (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { + + ql_dbg(ql_dbg_dpc, base_vha, 0x400b, + "Reset marker scheduled.\n"); + qla2x00_rst_aen(base_vha); + clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, 0x400c, + "Reset marker end.\n"); + } + + /* Retry each device up to login retry count */ + if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && + !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && + atomic_read(&base_vha->loop_state) != LOOP_DOWN) { + + if (!base_vha->relogin_jif || + time_after_eq(jiffies, base_vha->relogin_jif)) { + base_vha->relogin_jif = jiffies + HZ; + clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); + + ql_dbg(ql_dbg_disc, base_vha, 0x400d, + "Relogin scheduled.\n"); + qla24xx_post_relogin_work(base_vha); + } + } +loop_resync_check: + if (!qla2x00_reset_active(base_vha) && + test_and_clear_bit(LOOP_RESYNC_NEEDED, + &base_vha->dpc_flags)) { + /* + * Allow abort_isp to complete before moving on to scanning. + */ + ql_dbg(ql_dbg_dpc, base_vha, 0x400f, + "Loop resync scheduled.\n"); + + if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, + &base_vha->dpc_flags))) { + + qla2x00_loop_resync(base_vha); + + clear_bit(LOOP_RESYNC_ACTIVE, + &base_vha->dpc_flags); + } + + ql_dbg(ql_dbg_dpc, base_vha, 0x4010, + "Loop resync end.\n"); + } + + if (IS_QLAFX00(ha)) + goto intr_on_check; + + if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && + atomic_read(&base_vha->loop_state) == LOOP_READY) { + clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); + qla2xxx_flash_npiv_conf(base_vha); + } + +intr_on_check: + if (!ha->interrupts_on) + ha->isp_ops->enable_intrs(ha); + + if (test_and_clear_bit(BEACON_BLINK_NEEDED, + &base_vha->dpc_flags)) { + if (ha->beacon_blink_led == 1) + ha->isp_ops->beacon_blink(base_vha); + } + + /* qpair online check */ + if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, + &base_vha->dpc_flags)) { + if (ha->flags.eeh_busy || + ha->flags.pci_channel_io_perm_failure) + online = 0; + else + online = 1; + + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, + qp_list_elem) + qpair->online = online; + mutex_unlock(&ha->mq_lock); + } + + if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, + &base_vha->dpc_flags)) { + u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; + + if (threshold > ha->orig_fw_xcb_count) + threshold = ha->orig_fw_xcb_count; + + ql_log(ql_log_info, base_vha, 0xffffff, + "SET ZIO Activity exchange threshold to %d.\n", + threshold); + if (qla27xx_set_zio_threshold(base_vha, threshold)) { + ql_log(ql_log_info, base_vha, 0xffffff, + "Unable to SET ZIO Activity exchange threshold to %d.\n", + threshold); + } + } + + if (!IS_QLAFX00(ha)) + qla2x00_do_dpc_all_vps(base_vha); + + if (test_and_clear_bit(N2N_LINK_RESET, + &base_vha->dpc_flags)) { + qla2x00_lip_reset(base_vha); + } + + ha->dpc_active = 0; +end_loop: + set_current_state(TASK_INTERRUPTIBLE); + } /* End of while(1) */ + __set_current_state(TASK_RUNNING); + + ql_dbg(ql_dbg_dpc, base_vha, 0x4011, + "DPC handler exiting.\n"); + + /* + * Make sure that nobody tries to wake us up again. + */ + ha->dpc_active = 0; + + /* Cleanup any residual CTX SRBs. */ + qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + + return 0; +} + +void +qla2xxx_wake_dpc(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct task_struct *t = ha->dpc_thread; + + if (!test_bit(UNLOADING, &vha->dpc_flags) && t) + wake_up_process(t); +} + +/* +* qla2x00_rst_aen +* Processes asynchronous reset. +* +* Input: +* ha = adapter block pointer. +*/ +static void +qla2x00_rst_aen(scsi_qla_host_t *vha) +{ + if (vha->flags.online && !vha->flags.reset_active && + !atomic_read(&vha->loop_down_timer) && + !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { + do { + clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + + /* + * Issue marker command only when we are going to start + * the I/O. + */ + vha->marker_needed = 1; + } while (!atomic_read(&vha->loop_down_timer) && + (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); + } +} + +static bool qla_do_heartbeat(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + u32 cmpl_cnt; + u16 i; + bool do_heartbeat = false; + + /* + * Allow do_heartbeat only if we don’t have any active interrupts, + * but there are still IOs outstanding with firmware. + */ + cmpl_cnt = ha->base_qpair->cmd_completion_cnt; + if (cmpl_cnt == ha->base_qpair->prev_completion_cnt && + cmpl_cnt != ha->base_qpair->cmd_cnt) { + do_heartbeat = true; + goto skip; + } + ha->base_qpair->prev_completion_cnt = cmpl_cnt; + + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { + cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt; + if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt && + cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) { + do_heartbeat = true; + break; + } + ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt; + } + } + +skip: + return do_heartbeat; +} + +static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx) + return; + + if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) + return; + + /* + * dpc thread cannot run if heartbeat is running at the same time. + * We also do not want to starve heartbeat task. Therefore, do + * heartbeat task at least once every 5 seconds. + */ + if (dpc_started && + time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ)) + return; + + if (qla_do_heartbeat(vha)) { + ha->last_heartbeat_run_jiffies = jiffies; + queue_work(ha->wq, &ha->heartbeat_work); + } +} + +static void qla_wind_down_chip(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!ha->flags.eeh_busy) + return; + if (ha->pci_error_state) + /* system is trying to recover */ + return; + + /* + * Current system is not handling PCIE error. At this point, this is + * best effort to wind down the adapter. + */ + if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) && + !ha->flags.eeh_flush) { + ql_log(ql_log_info, vha, 0x9009, + "PCI Error detected, attempting to reset hardware.\n"); + + ha->isp_ops->reset_chip(vha); + ha->isp_ops->disable_intrs(ha); + + ha->flags.eeh_flush = EEH_FLUSH_RDY; + ha->eeh_jif = jiffies; + + } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY && + time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) { + pci_clear_master(ha->pdev); + + /* flush all command */ + qla2x00_abort_isp_cleanup(vha); + ha->flags.eeh_flush = EEH_FLUSH_DONE; + + ql_log(ql_log_info, vha, 0x900a, + "PCI Error handling complete, all IOs aborted.\n"); + } +} + +/************************************************************************** +* qla2x00_timer +* +* Description: +* One second timer +* +* Context: Interrupt +***************************************************************************/ +void +qla2x00_timer(struct timer_list *t) +{ + scsi_qla_host_t *vha = from_timer(vha, t, timer); + unsigned long cpu_flags = 0; + int start_dpc = 0; + int index; + srb_t *sp; + uint16_t w; + struct qla_hw_data *ha = vha->hw; + struct req_que *req; + unsigned long flags; + fc_port_t *fcport = NULL; + + if (ha->flags.eeh_busy) { + qla_wind_down_chip(vha); + + ql_dbg(ql_dbg_timer, vha, 0x6000, + "EEH = %d, restarting timer.\n", + ha->flags.eeh_busy); + qla2x00_restart_timer(vha, WATCH_INTERVAL); + return; + } + + /* + * Hardware read to raise pending EEH errors during mailbox waits. If + * the read returns -1 then disable the board. + */ + if (!pci_channel_offline(ha->pdev)) { + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); + qla2x00_check_reg16_for_disconnect(vha, w); + } + + /* Make sure qla82xx_watchdog is run only for physical port */ + if (!vha->vp_idx && IS_P3P_TYPE(ha)) { + if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) + start_dpc++; + if (IS_QLA82XX(ha)) + qla82xx_watchdog(vha); + else if (IS_QLA8044(ha)) + qla8044_watchdog(vha); + } + + if (!vha->vp_idx && IS_QLAFX00(ha)) + qlafx00_timer_routine(vha); + + if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) + vha->link_down_time++; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) + fcport->tgt_link_down_time++; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + /* Loop down handler. */ + if (atomic_read(&vha->loop_down_timer) > 0 && + !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && + !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) + && vha->flags.online) { + + if (atomic_read(&vha->loop_down_timer) == + vha->loop_down_abort_time) { + + ql_log(ql_log_info, vha, 0x6008, + "Loop down - aborting the queues before time expires.\n"); + + if (!IS_QLA2100(ha) && vha->link_down_timeout) + atomic_set(&vha->loop_state, LOOP_DEAD); + + /* + * Schedule an ISP abort to return any FCP2-device + * commands. + */ + /* NPIV - scan physical port only */ + if (!vha->vp_idx) { + spin_lock_irqsave(&ha->hardware_lock, + cpu_flags); + req = ha->req_q_map[0]; + for (index = 1; + index < req->num_outstanding_cmds; + index++) { + fc_port_t *sfcp; + + sp = req->outstanding_cmds[index]; + if (!sp) + continue; + if (sp->cmd_type != TYPE_SRB) + continue; + if (sp->type != SRB_SCSI_CMD) + continue; + sfcp = sp->fcport; + if (!(sfcp->flags & FCF_FCP2_DEVICE)) + continue; + + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + break; + } + spin_unlock_irqrestore(&ha->hardware_lock, + cpu_flags); + } + start_dpc++; + } + + /* if the loop has been down for 4 minutes, reinit adapter */ + if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { + if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) { + ql_log(ql_log_warn, vha, 0x6009, + "Loop down - aborting ISP.\n"); + + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + } + ql_dbg(ql_dbg_timer, vha, 0x600a, + "Loop down - seconds remaining %d.\n", + atomic_read(&vha->loop_down_timer)); + } + /* Check if beacon LED needs to be blinked for physical host only */ + if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { + /* There is no beacon_blink function for ISP82xx */ + if (!IS_P3P_TYPE(ha)) { + set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); + start_dpc++; + } + } + + /* check if edif running */ + if (vha->hw->flags.edif_enabled) + qla_edif_timer(vha); + + /* Process any deferred work. */ + if (!list_empty(&vha->work_list)) { + unsigned long flags; + bool q = false; + + spin_lock_irqsave(&vha->work_lock, flags); + if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) + q = true; + spin_unlock_irqrestore(&vha->work_lock, flags); + if (q) + queue_work(vha->hw->wq, &vha->iocb_work); + } + + /* + * FC-NVME + * see if the active AEN count has changed from what was last reported. + */ + index = atomic_read(&ha->nvme_active_aen_cnt); + if (!vha->vp_idx && + (index != ha->nvme_last_rptd_aen) && + ha->zio_mode == QLA_ZIO_MODE_6 && + !ha->flags.host_shutting_down) { + ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); + ql_log(ql_log_info, vha, 0x3002, + "nvme: Sched: Set ZIO exchange threshold to %d.\n", + ha->nvme_last_rptd_aen); + set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); + start_dpc++; + } + + if (!vha->vp_idx && + atomic_read(&ha->zio_threshold) != ha->last_zio_threshold && + IS_ZIO_THRESHOLD_CAPABLE(ha)) { + ql_log(ql_log_info, vha, 0x3002, + "Sched: Set ZIO exchange threshold to %d.\n", + ha->last_zio_threshold); + ha->last_zio_threshold = atomic_read(&ha->zio_threshold); + set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); + start_dpc++; + } + qla_adjust_buf(vha); + + /* borrowing w to signify dpc will run */ + w = 0; + /* Schedule the DPC routine if needed */ + if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || + test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || + start_dpc || + test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || + test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || + test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || + test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || + test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || + test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || + test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { + ql_dbg(ql_dbg_timer, vha, 0x600b, + "isp_abort_needed=%d loop_resync_needed=%d " + "start_dpc=%d reset_marker_needed=%d", + test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), + test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), + start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); + ql_dbg(ql_dbg_timer, vha, 0x600c, + "beacon_blink_needed=%d isp_unrecoverable=%d " + "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " + "relogin_needed=%d, Process_purex_iocb=%d.\n", + test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), + test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), + test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), + test_bit(VP_DPC_NEEDED, &vha->dpc_flags), + test_bit(RELOGIN_NEEDED, &vha->dpc_flags), + test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)); + qla2xxx_wake_dpc(vha); + w = 1; + } + + qla_heart_beat(vha, w); + + qla2x00_restart_timer(vha, WATCH_INTERVAL); +} + +/* Firmware interface routines. */ + +#define FW_ISP21XX 0 +#define FW_ISP22XX 1 +#define FW_ISP2300 2 +#define FW_ISP2322 3 +#define FW_ISP24XX 4 +#define FW_ISP25XX 5 +#define FW_ISP81XX 6 +#define FW_ISP82XX 7 +#define FW_ISP2031 8 +#define FW_ISP8031 9 +#define FW_ISP27XX 10 +#define FW_ISP28XX 11 + +#define FW_FILE_ISP21XX "ql2100_fw.bin" +#define FW_FILE_ISP22XX "ql2200_fw.bin" +#define FW_FILE_ISP2300 "ql2300_fw.bin" +#define FW_FILE_ISP2322 "ql2322_fw.bin" +#define FW_FILE_ISP24XX "ql2400_fw.bin" +#define FW_FILE_ISP25XX "ql2500_fw.bin" +#define FW_FILE_ISP81XX "ql8100_fw.bin" +#define FW_FILE_ISP82XX "ql8200_fw.bin" +#define FW_FILE_ISP2031 "ql2600_fw.bin" +#define FW_FILE_ISP8031 "ql8300_fw.bin" +#define FW_FILE_ISP27XX "ql2700_fw.bin" +#define FW_FILE_ISP28XX "ql2800_fw.bin" + + +static DEFINE_MUTEX(qla_fw_lock); + +static struct fw_blob qla_fw_blobs[] = { + { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, + { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, + { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, + { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, + { .name = FW_FILE_ISP24XX, }, + { .name = FW_FILE_ISP25XX, }, + { .name = FW_FILE_ISP81XX, }, + { .name = FW_FILE_ISP82XX, }, + { .name = FW_FILE_ISP2031, }, + { .name = FW_FILE_ISP8031, }, + { .name = FW_FILE_ISP27XX, }, + { .name = FW_FILE_ISP28XX, }, + { .name = NULL, }, +}; + +struct fw_blob * +qla2x00_request_firmware(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct fw_blob *blob; + + if (IS_QLA2100(ha)) { + blob = &qla_fw_blobs[FW_ISP21XX]; + } else if (IS_QLA2200(ha)) { + blob = &qla_fw_blobs[FW_ISP22XX]; + } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { + blob = &qla_fw_blobs[FW_ISP2300]; + } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + blob = &qla_fw_blobs[FW_ISP2322]; + } else if (IS_QLA24XX_TYPE(ha)) { + blob = &qla_fw_blobs[FW_ISP24XX]; + } else if (IS_QLA25XX(ha)) { + blob = &qla_fw_blobs[FW_ISP25XX]; + } else if (IS_QLA81XX(ha)) { + blob = &qla_fw_blobs[FW_ISP81XX]; + } else if (IS_QLA82XX(ha)) { + blob = &qla_fw_blobs[FW_ISP82XX]; + } else if (IS_QLA2031(ha)) { + blob = &qla_fw_blobs[FW_ISP2031]; + } else if (IS_QLA8031(ha)) { + blob = &qla_fw_blobs[FW_ISP8031]; + } else if (IS_QLA27XX(ha)) { + blob = &qla_fw_blobs[FW_ISP27XX]; + } else if (IS_QLA28XX(ha)) { + blob = &qla_fw_blobs[FW_ISP28XX]; + } else { + return NULL; + } + + if (!blob->name) + return NULL; + + mutex_lock(&qla_fw_lock); + if (blob->fw) + goto out; + + if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { + ql_log(ql_log_warn, vha, 0x0063, + "Failed to load firmware image (%s).\n", blob->name); + blob->fw = NULL; + blob = NULL; + } + +out: + mutex_unlock(&qla_fw_lock); + return blob; +} + +static void +qla2x00_release_firmware(void) +{ + struct fw_blob *blob; + + mutex_lock(&qla_fw_lock); + for (blob = qla_fw_blobs; blob->name; blob++) + release_firmware(blob->fw); + mutex_unlock(&qla_fw_lock); +} + +static void qla_pci_error_cleanup(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + struct qla_qpair *qpair = NULL; + struct scsi_qla_host *vp, *tvp; + fc_port_t *fcport; + int i; + unsigned long flags; + + ql_dbg(ql_dbg_aer, vha, 0x9000, + "%s\n", __func__); + ha->chip_reset++; + + ha->base_qpair->chip_reset = ha->chip_reset; + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) + ha->queue_pair_map[i]->chip_reset = + ha->base_qpair->chip_reset; + } + + /* + * purge mailbox might take a while. Slot Reset/chip reset + * will take care of the purge + */ + + mutex_lock(&ha->mq_lock); + ha->base_qpair->online = 0; + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 0; + wmb(); + mutex_unlock(&ha->mq_lock); + + qla2x00_mark_all_devices_lost(vha); + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_mark_all_devices_lost(vp); + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + /* Clear all async request states across all VPs. */ + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + list_for_each_entry(fcport, &vp->vp_fcports, list) + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); +} + + +static pci_ers_result_t +qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + scsi_qla_host_t *vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = vha->hw; + pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET; + + ql_log(ql_log_warn, vha, 0x9000, + "PCI error detected, state %x.\n", state); + ha->pci_error_state = QLA_PCI_ERR_DETECTED; + + if (!atomic_read(&pdev->enable_cnt)) { + ql_log(ql_log_info, vha, 0xffff, + "PCI device is disabled,state %x\n", state); + ret = PCI_ERS_RESULT_NEED_RESET; + goto out; + } + + switch (state) { + case pci_channel_io_normal: + qla_pci_set_eeh_busy(vha); + if (ql2xmqsupport || ql2xnvmeenable) { + set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + ret = PCI_ERS_RESULT_CAN_RECOVER; + break; + case pci_channel_io_frozen: + qla_pci_set_eeh_busy(vha); + ret = PCI_ERS_RESULT_NEED_RESET; + break; + case pci_channel_io_perm_failure: + ha->flags.pci_channel_io_perm_failure = 1; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + if (ql2xmqsupport || ql2xnvmeenable) { + set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + ret = PCI_ERS_RESULT_DISCONNECT; + } +out: + ql_dbg(ql_dbg_aer, vha, 0x600d, + "PCI error detected returning [%x].\n", ret); + return ret; +} + +static pci_ers_result_t +qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) +{ + int risc_paused = 0; + uint32_t stat; + unsigned long flags; + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + + ql_log(ql_log_warn, base_vha, 0x9000, + "mmio enabled\n"); + + ha->pci_error_state = QLA_PCI_MMIO_ENABLED; + + if (IS_QLA82XX(ha)) + return PCI_ERS_RESULT_RECOVERED; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, base_vha, 0x803f, + "During mmio enabled, PCI/Register disconnect still detected.\n"); + goto out; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (IS_QLA2100(ha) || IS_QLA2200(ha)){ + stat = rd_reg_word(®->hccr); + if (stat & HCCR_RISC_PAUSE) + risc_paused = 1; + } else if (IS_QLA23XX(ha)) { + stat = rd_reg_dword(®->u.isp2300.host_status); + if (stat & HSR_RISC_PAUSED) + risc_paused = 1; + } else if (IS_FWI2_CAPABLE(ha)) { + stat = rd_reg_dword(®24->host_status); + if (stat & HSRX_RISC_PAUSED) + risc_paused = 1; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (risc_paused) { + ql_log(ql_log_info, base_vha, 0x9003, + "RISC paused -- mmio_enabled, Dumping firmware.\n"); + qla2xxx_dump_fw(base_vha); + } +out: + /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ + ql_dbg(ql_dbg_aer, base_vha, 0x600d, + "mmio enabled returning.\n"); + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t +qla2xxx_pci_slot_reset(struct pci_dev *pdev) +{ + pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + int rc; + struct qla_qpair *qpair = NULL; + + ql_log(ql_log_warn, base_vha, 0x9004, + "Slot Reset.\n"); + + ha->pci_error_state = QLA_PCI_SLOT_RESET; + /* Workaround: qla2xxx driver which access hardware earlier + * needs error state to be pci_channel_io_online. + * Otherwise mailbox command timesout. + */ + pdev->error_state = pci_channel_io_normal; + + pci_restore_state(pdev); + + /* pci_restore_state() clears the saved_state flag of the device + * save restored state which resets saved_state flag + */ + pci_save_state(pdev); + + if (ha->mem_only) + rc = pci_enable_device_mem(pdev); + else + rc = pci_enable_device(pdev); + + if (rc) { + ql_log(ql_log_warn, base_vha, 0x9005, + "Can't re-enable PCI device after reset.\n"); + goto exit_slot_reset; + } + + + if (ha->isp_ops->pci_config(base_vha)) + goto exit_slot_reset; + + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 1; + mutex_unlock(&ha->mq_lock); + + ha->flags.eeh_busy = 0; + base_vha->flags.online = 1; + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + ha->isp_ops->abort_isp(base_vha); + clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + + if (qla2x00_isp_reg_stat(ha)) { + ha->flags.eeh_busy = 1; + qla_pci_error_cleanup(base_vha); + ql_log(ql_log_warn, base_vha, 0x9005, + "Device unable to recover from PCI error.\n"); + } else { + ret = PCI_ERS_RESULT_RECOVERED; + } + +exit_slot_reset: + ql_dbg(ql_dbg_aer, base_vha, 0x900e, + "Slot Reset returning %x.\n", ret); + + return ret; +} + +static void +qla2xxx_pci_resume(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + int ret; + + ql_log(ql_log_warn, base_vha, 0x900f, + "Pci Resume.\n"); + + + ret = qla2x00_wait_for_hba_online(base_vha); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_fatal, base_vha, 0x9002, + "The device failed to resume I/O from slot/link_reset.\n"); + } + ha->pci_error_state = QLA_PCI_RESUME; + ql_dbg(ql_dbg_aer, base_vha, 0x600d, + "Pci Resume returning.\n"); +} + +void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + bool do_cleanup = false; + unsigned long flags; + + if (ha->flags.eeh_busy) + return; + + spin_lock_irqsave(&base_vha->work_lock, flags); + if (!ha->flags.eeh_busy) { + ha->eeh_jif = jiffies; + ha->flags.eeh_flush = 0; + + ha->flags.eeh_busy = 1; + do_cleanup = true; + } + spin_unlock_irqrestore(&base_vha->work_lock, flags); + + if (do_cleanup) + qla_pci_error_cleanup(base_vha); +} + +/* + * this routine will schedule a task to pause IO from interrupt context + * if caller sees a PCIE error event (register read = 0xf's) + */ +void qla_schedule_eeh_work(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + if (ha->flags.eeh_busy) + return; + + set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); +} + +static void +qla_pci_reset_prepare(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + struct qla_qpair *qpair; + + ql_log(ql_log_warn, base_vha, 0xffff, + "%s.\n", __func__); + + /* + * PCI FLR/function reset is about to reset the + * slot. Stop the chip to stop all DMA access. + * It is assumed that pci_reset_done will be called + * after FLR to resume Chip operation. + */ + ha->flags.eeh_busy = 1; + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 0; + mutex_unlock(&ha->mq_lock); + + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + qla2x00_abort_isp_cleanup(base_vha); + qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); +} + +static void +qla_pci_reset_done(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + struct qla_qpair *qpair; + + ql_log(ql_log_warn, base_vha, 0xffff, + "%s.\n", __func__); + + /* + * FLR just completed by PCI layer. Resume adapter + */ + ha->flags.eeh_busy = 0; + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 1; + mutex_unlock(&ha->mq_lock); + + base_vha->flags.online = 1; + ha->isp_ops->abort_isp(base_vha); + clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); +} + +static void qla2xxx_map_queues(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + + if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) + blk_mq_map_queues(qmap); + else + blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); +} + +struct scsi_host_template qla2xxx_driver_template = { + .module = THIS_MODULE, + .name = QLA2XXX_DRIVER_NAME, + .queuecommand = qla2xxx_queuecommand, + + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = qla2xxx_eh_abort, + .eh_should_retry_cmd = fc_eh_should_retry_cmd, + .eh_device_reset_handler = qla2xxx_eh_device_reset, + .eh_target_reset_handler = qla2xxx_eh_target_reset, + .eh_bus_reset_handler = qla2xxx_eh_bus_reset, + .eh_host_reset_handler = qla2xxx_eh_host_reset, + + .slave_configure = qla2xxx_slave_configure, + + .slave_alloc = qla2xxx_slave_alloc, + .slave_destroy = qla2xxx_slave_destroy, + .scan_finished = qla2xxx_scan_finished, + .scan_start = qla2xxx_scan_start, + .change_queue_depth = scsi_change_queue_depth, + .map_queues = qla2xxx_map_queues, + .this_id = -1, + .cmd_per_lun = 3, + .sg_tablesize = SG_ALL, + + .max_sectors = 0xFFFF, + .shost_groups = qla2x00_host_groups, + + .supported_mode = MODE_INITIATOR, + .track_queue_depth = 1, + .cmd_size = sizeof(srb_t), +}; + +static const struct pci_error_handlers qla2xxx_err_handler = { + .error_detected = qla2xxx_pci_error_detected, + .mmio_enabled = qla2xxx_pci_mmio_enabled, + .slot_reset = qla2xxx_pci_slot_reset, + .resume = qla2xxx_pci_resume, + .reset_prepare = qla_pci_reset_prepare, + .reset_done = qla_pci_reset_done, +}; + +static struct pci_device_id qla2xxx_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); + +static struct pci_driver qla2xxx_pci_driver = { + .name = QLA2XXX_DRIVER_NAME, + .driver = { + .owner = THIS_MODULE, + }, + .id_table = qla2xxx_pci_tbl, + .probe = qla2x00_probe_one, + .remove = qla2x00_remove_one, + .shutdown = qla2x00_shutdown, + .err_handler = &qla2xxx_err_handler, +}; + +static const struct file_operations apidev_fops = { + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +/** + * qla2x00_module_init - Module initialization. + **/ +static int __init +qla2x00_module_init(void) +{ + int ret = 0; + + BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); + BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); + BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); + BUILD_BUG_ON(sizeof(cont_entry_t) != 64); + BUILD_BUG_ON(sizeof(init_cb_t) != 96); + BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); + BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); + BUILD_BUG_ON(sizeof(request_t) != 64); + BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); + BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); + BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); + BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); + BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); + BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); + BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); + BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); + BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); + BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604); + BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); + BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); + BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); + BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); + BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); + BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); + BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); + BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); + BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); + BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); + BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); + BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); + BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); + BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); + BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); + BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); + BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); + BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); + BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); + BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); + BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); + BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); + BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); + BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); + BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); + BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); + BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); + BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); + BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); + BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); + BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); + BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); + BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); + BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); + BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); + BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); + BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); + BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); + BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); + BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); + BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); + BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); + BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); + BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); + BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); + BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); + BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); + BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); + BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); + BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); + BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); + BUILD_BUG_ON(sizeof(sts_entry_t) != 64); + BUILD_BUG_ON(sizeof(sw_info_t) != 32); + BUILD_BUG_ON(sizeof(target_id_t) != 2); + + qla_trace_init(); + + /* Allocate cache for SRBs. */ + srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (srb_cachep == NULL) { + ql_log(ql_log_fatal, NULL, 0x0001, + "Unable to allocate SRB cache...Failing load!.\n"); + return -ENOMEM; + } + + /* Initialize target kmem_cache and mem_pools */ + ret = qlt_init(); + if (ret < 0) { + goto destroy_cache; + } else if (ret > 0) { + /* + * If initiator mode is explictly disabled by qlt_init(), + * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from + * performing scsi_scan_target() during LOOP UP event. + */ + qla2xxx_transport_functions.disable_target_scan = 1; + qla2xxx_transport_vport_functions.disable_target_scan = 1; + } + + /* Derive version string. */ + strcpy(qla2x00_version_str, QLA2XXX_VERSION); + if (ql2xextended_error_logging) + strcat(qla2x00_version_str, "-debug"); + if (ql2xextended_error_logging == 1) + ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; + + qla2xxx_transport_template = + fc_attach_transport(&qla2xxx_transport_functions); + if (!qla2xxx_transport_template) { + ql_log(ql_log_fatal, NULL, 0x0002, + "fc_attach_transport failed...Failing load!.\n"); + ret = -ENODEV; + goto qlt_exit; + } + + apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); + if (apidev_major < 0) { + ql_log(ql_log_fatal, NULL, 0x0003, + "Unable to register char device %s.\n", QLA2XXX_APIDEV); + } + + qla2xxx_transport_vport_template = + fc_attach_transport(&qla2xxx_transport_vport_functions); + if (!qla2xxx_transport_vport_template) { + ql_log(ql_log_fatal, NULL, 0x0004, + "fc_attach_transport vport failed...Failing load!.\n"); + ret = -ENODEV; + goto unreg_chrdev; + } + ql_log(ql_log_info, NULL, 0x0005, + "QLogic Fibre Channel HBA Driver: %s.\n", + qla2x00_version_str); + ret = pci_register_driver(&qla2xxx_pci_driver); + if (ret) { + ql_log(ql_log_fatal, NULL, 0x0006, + "pci_register_driver failed...ret=%d Failing load!.\n", + ret); + goto release_vport_transport; + } + return ret; + +release_vport_transport: + fc_release_transport(qla2xxx_transport_vport_template); + +unreg_chrdev: + if (apidev_major >= 0) + unregister_chrdev(apidev_major, QLA2XXX_APIDEV); + fc_release_transport(qla2xxx_transport_template); + +qlt_exit: + qlt_exit(); + +destroy_cache: + kmem_cache_destroy(srb_cachep); + + qla_trace_uninit(); + return ret; +} + +/** + * qla2x00_module_exit - Module cleanup. + **/ +static void __exit +qla2x00_module_exit(void) +{ + pci_unregister_driver(&qla2xxx_pci_driver); + qla2x00_release_firmware(); + kmem_cache_destroy(ctx_cachep); + fc_release_transport(qla2xxx_transport_vport_template); + if (apidev_major >= 0) + unregister_chrdev(apidev_major, QLA2XXX_APIDEV); + fc_release_transport(qla2xxx_transport_template); + qlt_exit(); + kmem_cache_destroy(srb_cachep); + qla_trace_uninit(); +} + +module_init(qla2x00_module_init); +module_exit(qla2x00_module_exit); + +MODULE_AUTHOR("QLogic Corporation"); +MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FW_FILE_ISP21XX); +MODULE_FIRMWARE(FW_FILE_ISP22XX); +MODULE_FIRMWARE(FW_FILE_ISP2300); +MODULE_FIRMWARE(FW_FILE_ISP2322); +MODULE_FIRMWARE(FW_FILE_ISP24XX); +MODULE_FIRMWARE(FW_FILE_ISP25XX); diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h new file mode 100644 index 000000000..a5f3000ae --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_settings.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#define MAX_RETRIES_OF_ISP_ABORT 5 + +/* Max time to wait for the loop to be in LOOP_READY state */ +#define MAX_LOOP_TIMEOUT (60 * 5) + +#include "qla_version.h" diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c new file mode 100644 index 000000000..c092a6b1c --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -0,0 +1,3640 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" + +#include +#include +#include +#include + +/* + * NVRAM support routines + */ + +/** + * qla2x00_lock_nvram_access() - + * @ha: HA context + */ +static void +qla2x00_lock_nvram_access(struct qla_hw_data *ha) +{ + uint16_t data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { + data = rd_reg_word(®->nvram); + while (data & NVR_BUSY) { + udelay(100); + data = rd_reg_word(®->nvram); + } + + /* Lock resource */ + wrt_reg_word(®->u.isp2300.host_semaphore, 0x1); + rd_reg_word(®->u.isp2300.host_semaphore); + udelay(5); + data = rd_reg_word(®->u.isp2300.host_semaphore); + while ((data & BIT_0) == 0) { + /* Lock failed */ + udelay(100); + wrt_reg_word(®->u.isp2300.host_semaphore, 0x1); + rd_reg_word(®->u.isp2300.host_semaphore); + udelay(5); + data = rd_reg_word(®->u.isp2300.host_semaphore); + } + } +} + +/** + * qla2x00_unlock_nvram_access() - + * @ha: HA context + */ +static void +qla2x00_unlock_nvram_access(struct qla_hw_data *ha) +{ + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { + wrt_reg_word(®->u.isp2300.host_semaphore, 0); + rd_reg_word(®->u.isp2300.host_semaphore); + } +} + +/** + * qla2x00_nv_write() - Prepare for NVRAM read/write operation. + * @ha: HA context + * @data: Serial interface selector + */ +static void +qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data) +{ + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); + rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_CLOCK | + NVR_WRT_ENABLE); + rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); + rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); +} + +/** + * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from + * NVRAM. + * @ha: HA context + * @nv_cmd: NVRAM command + * + * Bit definitions for NVRAM command: + * + * Bit 26 = start bit + * Bit 25, 24 = opcode + * Bit 23-16 = address + * Bit 15-0 = write data + * + * Returns the word read from nvram @addr. + */ +static uint16_t +qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd) +{ + uint8_t cnt; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint16_t data = 0; + uint16_t reg_data; + + /* Send command to NVRAM. */ + nv_cmd <<= 5; + for (cnt = 0; cnt < 11; cnt++) { + if (nv_cmd & BIT_31) + qla2x00_nv_write(ha, NVR_DATA_OUT); + else + qla2x00_nv_write(ha, 0); + nv_cmd <<= 1; + } + + /* Read data from NVRAM. */ + for (cnt = 0; cnt < 16; cnt++) { + wrt_reg_word(®->nvram, NVR_SELECT | NVR_CLOCK); + rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + data <<= 1; + reg_data = rd_reg_word(®->nvram); + if (reg_data & NVR_DATA_IN) + data |= BIT_0; + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + } + + /* Deselect chip. */ + wrt_reg_word(®->nvram, NVR_DESELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + + return data; +} + + +/** + * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the + * request routine to get the word from NVRAM. + * @ha: HA context + * @addr: Address in NVRAM to read + * + * Returns the word read from nvram @addr. + */ +static uint16_t +qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr) +{ + uint16_t data; + uint32_t nv_cmd; + + nv_cmd = addr << 16; + nv_cmd |= NV_READ_OP; + data = qla2x00_nvram_request(ha, nv_cmd); + + return (data); +} + +/** + * qla2x00_nv_deselect() - Deselect NVRAM operations. + * @ha: HA context + */ +static void +qla2x00_nv_deselect(struct qla_hw_data *ha) +{ + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + wrt_reg_word(®->nvram, NVR_DESELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); +} + +/** + * qla2x00_write_nvram_word() - Write NVRAM data. + * @ha: HA context + * @addr: Address in NVRAM to write + * @data: word to program + */ +static void +qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) +{ + int count; + uint16_t word; + uint32_t nv_cmd, wait_cnt; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + qla2x00_nv_write(ha, NVR_DATA_OUT); + qla2x00_nv_write(ha, 0); + qla2x00_nv_write(ha, 0); + + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_DATA_OUT); + + qla2x00_nv_deselect(ha); + + /* Write data */ + nv_cmd = (addr << 16) | NV_WRITE_OP; + nv_cmd |= (__force u16)data; + nv_cmd <<= 5; + for (count = 0; count < 27; count++) { + if (nv_cmd & BIT_31) + qla2x00_nv_write(ha, NVR_DATA_OUT); + else + qla2x00_nv_write(ha, 0); + + nv_cmd <<= 1; + } + + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready */ + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + wait_cnt = NVR_WAIT_CNT; + do { + if (!--wait_cnt) { + ql_dbg(ql_dbg_user, vha, 0x708d, + "NVRAM didn't go ready...\n"); + break; + } + NVRAM_DELAY(); + word = rd_reg_word(®->nvram); + } while ((word & NVR_DATA_IN) == 0); + + qla2x00_nv_deselect(ha); + + /* Disable writes */ + qla2x00_nv_write(ha, NVR_DATA_OUT); + for (count = 0; count < 10; count++) + qla2x00_nv_write(ha, 0); + + qla2x00_nv_deselect(ha); +} + +static int +qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr, + __le16 data, uint32_t tmo) +{ + int ret, count; + uint16_t word; + uint32_t nv_cmd; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + ret = QLA_SUCCESS; + + qla2x00_nv_write(ha, NVR_DATA_OUT); + qla2x00_nv_write(ha, 0); + qla2x00_nv_write(ha, 0); + + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_DATA_OUT); + + qla2x00_nv_deselect(ha); + + /* Write data */ + nv_cmd = (addr << 16) | NV_WRITE_OP; + nv_cmd |= (__force u16)data; + nv_cmd <<= 5; + for (count = 0; count < 27; count++) { + if (nv_cmd & BIT_31) + qla2x00_nv_write(ha, NVR_DATA_OUT); + else + qla2x00_nv_write(ha, 0); + + nv_cmd <<= 1; + } + + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready */ + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + do { + NVRAM_DELAY(); + word = rd_reg_word(®->nvram); + if (!--tmo) { + ret = QLA_FUNCTION_FAILED; + break; + } + } while ((word & NVR_DATA_IN) == 0); + + qla2x00_nv_deselect(ha); + + /* Disable writes */ + qla2x00_nv_write(ha, NVR_DATA_OUT); + for (count = 0; count < 10; count++) + qla2x00_nv_write(ha, 0); + + qla2x00_nv_deselect(ha); + + return ret; +} + +/** + * qla2x00_clear_nvram_protection() - + * @ha: HA context + */ +static int +qla2x00_clear_nvram_protection(struct qla_hw_data *ha) +{ + int ret, stat; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint32_t word, wait_cnt; + __le16 wprot, wprot_old; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + /* Clear NVRAM write protection. */ + ret = QLA_FUNCTION_FAILED; + + wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); + stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base, + cpu_to_le16(0x1234), 100000); + wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); + if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) { + /* Write enable. */ + qla2x00_nv_write(ha, NVR_DATA_OUT); + qla2x00_nv_write(ha, 0); + qla2x00_nv_write(ha, 0); + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_DATA_OUT); + + qla2x00_nv_deselect(ha); + + /* Enable protection register. */ + qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); + qla2x00_nv_write(ha, NVR_PR_ENABLE); + qla2x00_nv_write(ha, NVR_PR_ENABLE); + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE); + + qla2x00_nv_deselect(ha); + + /* Clear protection register (ffff is cleared). */ + qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); + qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); + qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE); + + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready. */ + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + wait_cnt = NVR_WAIT_CNT; + do { + if (!--wait_cnt) { + ql_dbg(ql_dbg_user, vha, 0x708e, + "NVRAM didn't go ready...\n"); + break; + } + NVRAM_DELAY(); + word = rd_reg_word(®->nvram); + } while ((word & NVR_DATA_IN) == 0); + + if (wait_cnt) + ret = QLA_SUCCESS; + } else + qla2x00_write_nvram_word(ha, ha->nvram_base, wprot_old); + + return ret; +} + +static void +qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) +{ + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint32_t word, wait_cnt; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + if (stat != QLA_SUCCESS) + return; + + /* Set NVRAM write protection. */ + /* Write enable. */ + qla2x00_nv_write(ha, NVR_DATA_OUT); + qla2x00_nv_write(ha, 0); + qla2x00_nv_write(ha, 0); + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_DATA_OUT); + + qla2x00_nv_deselect(ha); + + /* Enable protection register. */ + qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); + qla2x00_nv_write(ha, NVR_PR_ENABLE); + qla2x00_nv_write(ha, NVR_PR_ENABLE); + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE); + + qla2x00_nv_deselect(ha); + + /* Enable protection register. */ + qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); + qla2x00_nv_write(ha, NVR_PR_ENABLE); + qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); + for (word = 0; word < 8; word++) + qla2x00_nv_write(ha, NVR_PR_ENABLE); + + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready. */ + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + wait_cnt = NVR_WAIT_CNT; + do { + if (!--wait_cnt) { + ql_dbg(ql_dbg_user, vha, 0x708f, + "NVRAM didn't go ready...\n"); + break; + } + NVRAM_DELAY(); + word = rd_reg_word(®->nvram); + } while ((word & NVR_DATA_IN) == 0); +} + + +/*****************************************************************************/ +/* Flash Manipulation Routines */ +/*****************************************************************************/ + +static inline uint32_t +flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr) +{ + return ha->flash_conf_off + faddr; +} + +static inline uint32_t +flash_data_addr(struct qla_hw_data *ha, uint32_t faddr) +{ + return ha->flash_data_off + faddr; +} + +static inline uint32_t +nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr) +{ + return ha->nvram_conf_off + naddr; +} + +static inline uint32_t +nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr) +{ + return ha->nvram_data_off + naddr; +} + +static int +qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 30000; + + wrt_reg_dword(®->flash_addr, addr & ~FARX_DATA_FLAG); + + while (cnt--) { + if (rd_reg_dword(®->flash_addr) & FARX_DATA_FLAG) { + *data = rd_reg_dword(®->flash_data); + return QLA_SUCCESS; + } + udelay(10); + cond_resched(); + } + + ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090, + "Flash read dword at %x timeout.\n", addr); + *data = 0xDEADDEAD; + return QLA_FUNCTION_TIMEOUT; +} + +int +qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, + uint32_t dwords) +{ + ulong i; + int ret = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + /* Dword reads to flash. */ + faddr = flash_data_addr(ha, faddr); + for (i = 0; i < dwords; i++, faddr++, dwptr++) { + ret = qla24xx_read_flash_dword(ha, faddr, dwptr); + if (ret != QLA_SUCCESS) + break; + cpu_to_le32s(dwptr); + } + + return ret; +} + +static int +qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 500000; + + wrt_reg_dword(®->flash_data, data); + wrt_reg_dword(®->flash_addr, addr | FARX_DATA_FLAG); + + while (cnt--) { + if (!(rd_reg_dword(®->flash_addr) & FARX_DATA_FLAG)) + return QLA_SUCCESS; + udelay(10); + cond_resched(); + } + + ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090, + "Flash write dword at %x timeout.\n", addr); + return QLA_FUNCTION_TIMEOUT; +} + +static void +qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id, + uint8_t *flash_id) +{ + uint32_t faddr, ids = 0; + + *man_id = *flash_id = 0; + + faddr = flash_conf_addr(ha, 0x03ab); + if (!qla24xx_read_flash_dword(ha, faddr, &ids)) { + *man_id = LSB(ids); + *flash_id = MSB(ids); + } + + /* Check if man_id and flash_id are valid. */ + if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) { + /* Read information using 0x9f opcode + * Device ID, Mfg ID would be read in the format: + * + * Example: ATMEL 0x00 01 45 1F + * Extract MFG and Dev ID from last two bytes. + */ + faddr = flash_conf_addr(ha, 0x009f); + if (!qla24xx_read_flash_dword(ha, faddr, &ids)) { + *man_id = LSB(ids); + *flash_id = MSB(ids); + } + } +} + +static int +qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) +{ + const char *loc, *locations[] = { "DEF", "PCI" }; + uint32_t pcihdr, pcids; + uint16_t cnt, chksum; + __le16 *wptr; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct qla_flt_location *fltl = (void *)req->ring; + uint32_t *dcode = (uint32_t *)req->ring; + uint8_t *buf = (void *)req->ring, *bcode, last_image; + + /* + * FLT-location structure resides after the last PCI region. + */ + + /* Begin with sane defaults. */ + loc = locations[0]; + *start = 0; + if (IS_QLA24XX_TYPE(ha)) + *start = FA_FLASH_LAYOUT_ADDR_24; + else if (IS_QLA25XX(ha)) + *start = FA_FLASH_LAYOUT_ADDR; + else if (IS_QLA81XX(ha)) + *start = FA_FLASH_LAYOUT_ADDR_81; + else if (IS_P3P_TYPE(ha)) { + *start = FA_FLASH_LAYOUT_ADDR_82; + goto end; + } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + *start = FA_FLASH_LAYOUT_ADDR_83; + goto end; + } else if (IS_QLA28XX(ha)) { + *start = FA_FLASH_LAYOUT_ADDR_28; + goto end; + } + + /* Begin with first PCI expansion ROM header. */ + pcihdr = 0; + do { + /* Verify PCI expansion ROM header. */ + qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + bcode = buf + (pcihdr % 4); + if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) + goto end; + + /* Locate PCI data structure. */ + pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); + qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + bcode = buf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || + bcode[0x2] != 'I' || bcode[0x3] != 'R') + goto end; + + last_image = bcode[0x15] & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; + } while (!last_image); + + /* Now verify FLT-location structure. */ + qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); + if (memcmp(fltl->sig, "QFLT", 4)) + goto end; + + wptr = (__force __le16 *)req->ring; + cnt = sizeof(*fltl) / sizeof(*wptr); + for (chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); + if (chksum) { + ql_log(ql_log_fatal, vha, 0x0045, + "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e, + fltl, sizeof(*fltl)); + return QLA_FUNCTION_FAILED; + } + + /* Good data. Use specified location. */ + loc = locations[1]; + *start = (le16_to_cpu(fltl->start_hi) << 16 | + le16_to_cpu(fltl->start_lo)) >> 2; +end: + ql_dbg(ql_dbg_init, vha, 0x0046, + "FLTL[%s] = 0x%x.\n", + loc, *start); + return QLA_SUCCESS; +} + +static void +qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) +{ + const char *locations[] = { "DEF", "FLT" }, *loc = locations[1]; + const uint32_t def_fw[] = + { FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 }; + const uint32_t def_boot[] = + { FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 }; + const uint32_t def_vpd_nvram[] = + { FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 }; + const uint32_t def_vpd0[] = + { 0, 0, FA_VPD0_ADDR_81 }; + const uint32_t def_vpd1[] = + { 0, 0, FA_VPD1_ADDR_81 }; + const uint32_t def_nvram0[] = + { 0, 0, FA_NVRAM0_ADDR_81 }; + const uint32_t def_nvram1[] = + { 0, 0, FA_NVRAM1_ADDR_81 }; + const uint32_t def_fdt[] = + { FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR, + FA_FLASH_DESCR_ADDR_81 }; + const uint32_t def_npiv_conf0[] = + { FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR, + FA_NPIV_CONF0_ADDR_81 }; + const uint32_t def_npiv_conf1[] = + { FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR, + FA_NPIV_CONF1_ADDR_81 }; + const uint32_t fcp_prio_cfg0[] = + { FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25, + 0 }; + const uint32_t fcp_prio_cfg1[] = + { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25, + 0 }; + + struct qla_hw_data *ha = vha->hw; + uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0; + struct qla_flt_header *flt = ha->flt; + struct qla_flt_region *region = &flt->region[0]; + __le16 *wptr; + uint16_t cnt, chksum; + uint32_t start; + + /* Assign FCP prio region since older adapters may not have FLT, or + FCP prio region in it's FLT. + */ + ha->flt_region_fcp_prio = (ha->port_no == 0) ? + fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; + + ha->flt_region_flt = flt_addr; + wptr = (__force __le16 *)ha->flt; + ha->isp_ops->read_optrom(vha, flt, flt_addr << 2, + (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE)); + + if (le16_to_cpu(*wptr) == 0xffff) + goto no_flash_data; + if (flt->version != cpu_to_le16(1)) { + ql_log(ql_log_warn, vha, 0x0047, + "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", + le16_to_cpu(flt->version), le16_to_cpu(flt->length), + le16_to_cpu(flt->checksum)); + goto no_flash_data; + } + + cnt = (sizeof(*flt) + le16_to_cpu(flt->length)) / sizeof(*wptr); + for (chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); + if (chksum) { + ql_log(ql_log_fatal, vha, 0x0048, + "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", + le16_to_cpu(flt->version), le16_to_cpu(flt->length), + le16_to_cpu(flt->checksum)); + goto no_flash_data; + } + + cnt = le16_to_cpu(flt->length) / sizeof(*region); + for ( ; cnt; cnt--, region++) { + /* Store addresses as DWORD offsets. */ + start = le32_to_cpu(region->start) >> 2; + ql_dbg(ql_dbg_init, vha, 0x0049, + "FLT[%#x]: start=%#x end=%#x size=%#x.\n", + le16_to_cpu(region->code), start, + le32_to_cpu(region->end) >> 2, + le32_to_cpu(region->size) >> 2); + if (region->attribute) + ql_log(ql_dbg_init, vha, 0xffff, + "Region %x is secure\n", region->code); + + switch (le16_to_cpu(region->code)) { + case FLT_REG_FCOE_FW: + if (!IS_QLA8031(ha)) + break; + ha->flt_region_fw = start; + break; + case FLT_REG_FW: + if (IS_QLA8031(ha)) + break; + ha->flt_region_fw = start; + break; + case FLT_REG_BOOT_CODE: + ha->flt_region_boot = start; + break; + case FLT_REG_VPD_0: + if (IS_QLA8031(ha)) + break; + ha->flt_region_vpd_nvram = start; + if (IS_P3P_TYPE(ha)) + break; + if (ha->port_no == 0) + ha->flt_region_vpd = start; + break; + case FLT_REG_VPD_1: + if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) + break; + if (ha->port_no == 1) + ha->flt_region_vpd = start; + break; + case FLT_REG_VPD_2: + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + break; + if (ha->port_no == 2) + ha->flt_region_vpd = start; + break; + case FLT_REG_VPD_3: + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + break; + if (ha->port_no == 3) + ha->flt_region_vpd = start; + break; + case FLT_REG_NVRAM_0: + if (IS_QLA8031(ha)) + break; + if (ha->port_no == 0) + ha->flt_region_nvram = start; + break; + case FLT_REG_NVRAM_1: + if (IS_QLA8031(ha)) + break; + if (ha->port_no == 1) + ha->flt_region_nvram = start; + break; + case FLT_REG_NVRAM_2: + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + break; + if (ha->port_no == 2) + ha->flt_region_nvram = start; + break; + case FLT_REG_NVRAM_3: + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + break; + if (ha->port_no == 3) + ha->flt_region_nvram = start; + break; + case FLT_REG_FDT: + ha->flt_region_fdt = start; + break; + case FLT_REG_NPIV_CONF_0: + if (ha->port_no == 0) + ha->flt_region_npiv_conf = start; + break; + case FLT_REG_NPIV_CONF_1: + if (ha->port_no == 1) + ha->flt_region_npiv_conf = start; + break; + case FLT_REG_GOLD_FW: + ha->flt_region_gold_fw = start; + break; + case FLT_REG_FCP_PRIO_0: + if (ha->port_no == 0) + ha->flt_region_fcp_prio = start; + break; + case FLT_REG_FCP_PRIO_1: + if (ha->port_no == 1) + ha->flt_region_fcp_prio = start; + break; + case FLT_REG_BOOT_CODE_82XX: + ha->flt_region_boot = start; + break; + case FLT_REG_BOOT_CODE_8044: + if (IS_QLA8044(ha)) + ha->flt_region_boot = start; + break; + case FLT_REG_FW_82XX: + ha->flt_region_fw = start; + break; + case FLT_REG_CNA_FW: + if (IS_CNA_CAPABLE(ha)) + ha->flt_region_fw = start; + break; + case FLT_REG_GOLD_FW_82XX: + ha->flt_region_gold_fw = start; + break; + case FLT_REG_BOOTLOAD_82XX: + ha->flt_region_bootload = start; + break; + case FLT_REG_VPD_8XXX: + if (IS_CNA_CAPABLE(ha)) + ha->flt_region_vpd = start; + break; + case FLT_REG_FCOE_NVRAM_0: + if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) + break; + if (ha->port_no == 0) + ha->flt_region_nvram = start; + break; + case FLT_REG_FCOE_NVRAM_1: + if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) + break; + if (ha->port_no == 1) + ha->flt_region_nvram = start; + break; + case FLT_REG_IMG_PRI_27XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_img_status_pri = start; + break; + case FLT_REG_IMG_SEC_27XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_img_status_sec = start; + break; + case FLT_REG_FW_SEC_27XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_fw_sec = start; + break; + case FLT_REG_BOOTLOAD_SEC_27XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_boot_sec = start; + break; + case FLT_REG_AUX_IMG_PRI_28XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_aux_img_status_pri = start; + break; + case FLT_REG_AUX_IMG_SEC_28XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_aux_img_status_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_0: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 0) + ha->flt_region_nvram_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_1: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 1) + ha->flt_region_nvram_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_2: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 2) + ha->flt_region_nvram_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_3: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 3) + ha->flt_region_nvram_sec = start; + break; + case FLT_REG_VPD_SEC_27XX_0: + case FLT_REG_VPD_SEC_28XX_0: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->flt_region_vpd_nvram_sec = start; + if (ha->port_no == 0) + ha->flt_region_vpd_sec = start; + } + break; + case FLT_REG_VPD_SEC_27XX_1: + case FLT_REG_VPD_SEC_28XX_1: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 1) + ha->flt_region_vpd_sec = start; + break; + case FLT_REG_VPD_SEC_27XX_2: + case FLT_REG_VPD_SEC_28XX_2: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 2) + ha->flt_region_vpd_sec = start; + break; + case FLT_REG_VPD_SEC_27XX_3: + case FLT_REG_VPD_SEC_28XX_3: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 3) + ha->flt_region_vpd_sec = start; + break; + } + } + goto done; + +no_flash_data: + /* Use hardcoded defaults. */ + loc = locations[0]; + ha->flt_region_fw = def_fw[def]; + ha->flt_region_boot = def_boot[def]; + ha->flt_region_vpd_nvram = def_vpd_nvram[def]; + ha->flt_region_vpd = (ha->port_no == 0) ? + def_vpd0[def] : def_vpd1[def]; + ha->flt_region_nvram = (ha->port_no == 0) ? + def_nvram0[def] : def_nvram1[def]; + ha->flt_region_fdt = def_fdt[def]; + ha->flt_region_npiv_conf = (ha->port_no == 0) ? + def_npiv_conf0[def] : def_npiv_conf1[def]; +done: + ql_dbg(ql_dbg_init, vha, 0x004a, + "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x " + "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", + loc, ha->flt_region_boot, ha->flt_region_fw, + ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram, + ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf, + ha->flt_region_fcp_prio); +} + +static void +qla2xxx_get_fdt_info(scsi_qla_host_t *vha) +{ +#define FLASH_BLK_SIZE_4K 0x1000 +#define FLASH_BLK_SIZE_32K 0x8000 +#define FLASH_BLK_SIZE_64K 0x10000 + const char *loc, *locations[] = { "MID", "FDT" }; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + uint16_t cnt, chksum; + __le16 *wptr = (__force __le16 *)req->ring; + struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring; + uint8_t man_id, flash_id; + uint16_t mid = 0, fid = 0; + + ha->isp_ops->read_optrom(vha, fdt, ha->flt_region_fdt << 2, + OPTROM_BURST_DWORDS); + if (le16_to_cpu(*wptr) == 0xffff) + goto no_flash_data; + if (memcmp(fdt->sig, "QLID", 4)) + goto no_flash_data; + + for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++) + chksum += le16_to_cpu(*wptr); + if (chksum) { + ql_dbg(ql_dbg_init, vha, 0x004c, + "Inconsistent FDT detected:" + " checksum=0x%x id=%c version0x%x.\n", chksum, + fdt->sig[0], le16_to_cpu(fdt->version)); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113, + fdt, sizeof(*fdt)); + goto no_flash_data; + } + + loc = locations[1]; + mid = le16_to_cpu(fdt->man_id); + fid = le16_to_cpu(fdt->id); + ha->fdt_wrt_disable = fdt->wrt_disable_bits; + ha->fdt_wrt_enable = fdt->wrt_enable_bits; + ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd; + if (IS_QLA8044(ha)) + ha->fdt_erase_cmd = fdt->erase_cmd; + else + ha->fdt_erase_cmd = + flash_conf_addr(ha, 0x0300 | fdt->erase_cmd); + ha->fdt_block_size = le32_to_cpu(fdt->block_size); + if (fdt->unprotect_sec_cmd) { + ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 | + fdt->unprotect_sec_cmd); + ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ? + flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd) : + flash_conf_addr(ha, 0x0336); + } + goto done; +no_flash_data: + loc = locations[0]; + if (IS_P3P_TYPE(ha)) { + ha->fdt_block_size = FLASH_BLK_SIZE_64K; + goto done; + } + qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); + mid = man_id; + fid = flash_id; + ha->fdt_wrt_disable = 0x9c; + ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8); + switch (man_id) { + case 0xbf: /* STT flash. */ + if (flash_id == 0x8e) + ha->fdt_block_size = FLASH_BLK_SIZE_64K; + else + ha->fdt_block_size = FLASH_BLK_SIZE_32K; + + if (flash_id == 0x80) + ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352); + break; + case 0x13: /* ST M25P80. */ + ha->fdt_block_size = FLASH_BLK_SIZE_64K; + break; + case 0x1f: /* Atmel 26DF081A. */ + ha->fdt_block_size = FLASH_BLK_SIZE_4K; + ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320); + ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339); + ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336); + break; + default: + /* Default to 64 kb sector size. */ + ha->fdt_block_size = FLASH_BLK_SIZE_64K; + break; + } +done: + ql_dbg(ql_dbg_init, vha, 0x004d, + "FDT[%s]: (0x%x/0x%x) erase=0x%x " + "pr=%x wrtd=0x%x blk=0x%x.\n", + loc, mid, fid, + ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, + ha->fdt_wrt_disable, ha->fdt_block_size); + +} + +static void +qla2xxx_get_idc_param(scsi_qla_host_t *vha) +{ +#define QLA82XX_IDC_PARAM_ADDR 0x003e885c + __le32 *wptr; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + + if (!(IS_P3P_TYPE(ha))) + return; + + wptr = (__force __le32 *)req->ring; + ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8); + + if (*wptr == cpu_to_le32(0xffffffff)) { + ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; + ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; + } else { + ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr); + wptr++; + ha->fcoe_reset_timeout = le32_to_cpu(*wptr); + } + ql_dbg(ql_dbg_init, vha, 0x004e, + "fcoe_dev_init_timeout=%d " + "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout, + ha->fcoe_reset_timeout); + return; +} + +int +qla2xxx_get_flash_info(scsi_qla_host_t *vha) +{ + int ret; + uint32_t flt_addr; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && + !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_SUCCESS; + + ret = qla2xxx_find_flt_start(vha, &flt_addr); + if (ret != QLA_SUCCESS) + return ret; + + qla2xxx_get_flt_info(vha, flt_addr); + qla2xxx_get_fdt_info(vha); + qla2xxx_get_idc_param(vha); + + return QLA_SUCCESS; +} + +void +qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) +{ +#define NPIV_CONFIG_SIZE (16*1024) + void *data; + __le16 *wptr; + uint16_t cnt, chksum; + int i; + struct qla_npiv_header hdr; + struct qla_npiv_entry *entry; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && + !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) + return; + + if (ha->flags.nic_core_reset_hdlr_active) + return; + + if (IS_QLA8044(ha)) + return; + + ha->isp_ops->read_optrom(vha, &hdr, ha->flt_region_npiv_conf << 2, + sizeof(struct qla_npiv_header)); + if (hdr.version == cpu_to_le16(0xffff)) + return; + if (hdr.version != cpu_to_le16(1)) { + ql_dbg(ql_dbg_user, vha, 0x7090, + "Unsupported NPIV-Config " + "detected: version=0x%x entries=0x%x checksum=0x%x.\n", + le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), + le16_to_cpu(hdr.checksum)); + return; + } + + data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); + if (!data) { + ql_log(ql_log_warn, vha, 0x7091, + "Unable to allocate memory for data.\n"); + return; + } + + ha->isp_ops->read_optrom(vha, data, ha->flt_region_npiv_conf << 2, + NPIV_CONFIG_SIZE); + + cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1; + for (wptr = data, chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); + if (chksum) { + ql_dbg(ql_dbg_user, vha, 0x7092, + "Inconsistent NPIV-Config " + "detected: version=0x%x entries=0x%x checksum=0x%x.\n", + le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), + le16_to_cpu(hdr.checksum)); + goto done; + } + + entry = data + sizeof(struct qla_npiv_header); + cnt = le16_to_cpu(hdr.entries); + for (i = 0; cnt; cnt--, entry++, i++) { + uint16_t flags; + struct fc_vport_identifiers vid; + struct fc_vport *vport; + + memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); + + flags = le16_to_cpu(entry->flags); + if (flags == 0xffff) + continue; + if ((flags & BIT_0) == 0) + continue; + + memset(&vid, 0, sizeof(vid)); + vid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vid.vport_type = FC_PORTTYPE_NPIV; + vid.disable = false; + vid.port_name = wwn_to_u64(entry->port_name); + vid.node_name = wwn_to_u64(entry->node_name); + + ql_dbg(ql_dbg_user, vha, 0x7093, + "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=%#x Q_qos=%#x F_qos=%#x.\n", + cnt, vid.port_name, vid.node_name, + le16_to_cpu(entry->vf_id), + entry->q_qos, entry->f_qos); + + if (i < QLA_PRECONFIG_VPORTS) { + vport = fc_vport_create(vha->host, 0, &vid); + if (!vport) + ql_log(ql_log_warn, vha, 0x7094, + "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n", + cnt, vid.port_name, vid.node_name); + } + } +done: + kfree(data); +} + +static int +qla24xx_unprotect_flash(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + if (ha->flags.fac_supported) + return qla81xx_fac_do_write_enable(vha, 1); + + /* Enable flash write. */ + wrt_reg_dword(®->ctrl_status, + rd_reg_dword(®->ctrl_status) | CSRX_FLASH_ENABLE); + rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + + if (!ha->fdt_wrt_disable) + goto done; + + /* Disable flash write-protection, first clear SR protection bit */ + qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); + /* Then write zero again to clear remaining SR bits.*/ + qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); +done: + return QLA_SUCCESS; +} + +static int +qla24xx_protect_flash(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 300; + uint32_t faddr, dword; + + if (ha->flags.fac_supported) + return qla81xx_fac_do_write_enable(vha, 0); + + if (!ha->fdt_wrt_disable) + goto skip_wrt_protect; + + /* Enable flash write-protection and wait for completion. */ + faddr = flash_conf_addr(ha, 0x101); + qla24xx_write_flash_dword(ha, faddr, ha->fdt_wrt_disable); + faddr = flash_conf_addr(ha, 0x5); + while (cnt--) { + if (!qla24xx_read_flash_dword(ha, faddr, &dword)) { + if (!(dword & BIT_0)) + break; + } + udelay(10); + } + +skip_wrt_protect: + /* Disable flash write. */ + wrt_reg_dword(®->ctrl_status, + rd_reg_dword(®->ctrl_status) & ~CSRX_FLASH_ENABLE); + + return QLA_SUCCESS; +} + +static int +qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t start, finish; + + if (ha->flags.fac_supported) { + start = fdata >> 2; + finish = start + (ha->fdt_block_size >> 2) - 1; + return qla81xx_fac_erase_sector(vha, flash_data_addr(ha, + start), flash_data_addr(ha, finish)); + } + + return qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd, + (fdata & 0xff00) | ((fdata << 16) & 0xff0000) | + ((fdata >> 16) & 0xff)); +} + +static int +qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, + uint32_t dwords) +{ + int ret; + ulong liter; + ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */ + uint32_t sec_mask, rest_addr, fdata; + dma_addr_t optrom_dma; + void *optrom = NULL; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + goto next; + + /* Allocate dma buffer for burst write */ + optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, + &optrom_dma, GFP_KERNEL); + if (!optrom) { + ql_log(ql_log_warn, vha, 0x7095, + "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE); + } + +next: + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Unprotect flash...\n"); + ret = qla24xx_unprotect_flash(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0x7096, + "Failed to unprotect flash.\n"); + goto done; + } + + rest_addr = (ha->fdt_block_size >> 2) - 1; + sec_mask = ~rest_addr; + for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { + fdata = (faddr & sec_mask) << 2; + + /* Are we at the beginning of a sector? */ + if (!(faddr & rest_addr)) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Erase sector %#x...\n", faddr); + + ret = qla24xx_erase_sector(vha, fdata); + if (ret) { + ql_dbg(ql_dbg_user, vha, 0x7007, + "Failed to erase sector %x.\n", faddr); + break; + } + } + + if (optrom) { + /* If smaller than a burst remaining */ + if (dwords - liter < dburst) + dburst = dwords - liter; + + /* Copy to dma buffer */ + memcpy(optrom, dwptr, dburst << 2); + + /* Burst write */ + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Write burst (%#lx dwords)...\n", dburst); + ret = qla2x00_load_ram(vha, optrom_dma, + flash_data_addr(ha, faddr), dburst); + if (!ret) { + liter += dburst - 1; + faddr += dburst - 1; + dwptr += dburst - 1; + continue; + } + + ql_log(ql_log_warn, vha, 0x7097, + "Failed burst-write at %x (%p/%#llx)....\n", + flash_data_addr(ha, faddr), optrom, + (u64)optrom_dma); + + dma_free_coherent(&ha->pdev->dev, + OPTROM_BURST_SIZE, optrom, optrom_dma); + optrom = NULL; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + break; + ql_log(ql_log_warn, vha, 0x7098, + "Reverting to slow write...\n"); + } + + /* Slow write */ + ret = qla24xx_write_flash_dword(ha, + flash_data_addr(ha, faddr), le32_to_cpu(*dwptr)); + if (ret) { + ql_dbg(ql_dbg_user, vha, 0x7006, + "Failed slow write %x (%x)\n", faddr, *dwptr); + break; + } + } + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Protect flash...\n"); + ret = qla24xx_protect_flash(vha); + if (ret) + ql_log(ql_log_warn, vha, 0x7099, + "Failed to protect flash\n"); +done: + if (optrom) + dma_free_coherent(&ha->pdev->dev, + OPTROM_BURST_SIZE, optrom, optrom_dma); + + return ret; +} + +uint8_t * +qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, + uint32_t bytes) +{ + uint32_t i; + __le16 *wptr; + struct qla_hw_data *ha = vha->hw; + + /* Word reads to NVRAM via registers. */ + wptr = buf; + qla2x00_lock_nvram_access(ha); + for (i = 0; i < bytes >> 1; i++, naddr++) + wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha, + naddr)); + qla2x00_unlock_nvram_access(ha); + + return buf; +} + +uint8_t * +qla24xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, + uint32_t bytes) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t *dwptr = buf; + uint32_t i; + + if (IS_P3P_TYPE(ha)) + return buf; + + /* Dword reads to flash. */ + naddr = nvram_data_addr(ha, naddr); + bytes >>= 2; + for (i = 0; i < bytes; i++, naddr++, dwptr++) { + if (qla24xx_read_flash_dword(ha, naddr, dwptr)) + break; + cpu_to_le32s(dwptr); + } + + return buf; +} + +int +qla2x00_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, + uint32_t bytes) +{ + int ret, stat; + uint32_t i; + uint16_t *wptr; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + ret = QLA_SUCCESS; + + spin_lock_irqsave(&ha->hardware_lock, flags); + qla2x00_lock_nvram_access(ha); + + /* Disable NVRAM write-protection. */ + stat = qla2x00_clear_nvram_protection(ha); + + wptr = (uint16_t *)buf; + for (i = 0; i < bytes >> 1; i++, naddr++) { + qla2x00_write_nvram_word(ha, naddr, + cpu_to_le16(*wptr)); + wptr++; + } + + /* Enable NVRAM write-protection. */ + qla2x00_set_nvram_protection(ha, stat); + + qla2x00_unlock_nvram_access(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return ret; +} + +int +qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, + uint32_t bytes) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + __le32 *dwptr = buf; + uint32_t i; + int ret; + + ret = QLA_SUCCESS; + + if (IS_P3P_TYPE(ha)) + return ret; + + /* Enable flash write. */ + wrt_reg_dword(®->ctrl_status, + rd_reg_dword(®->ctrl_status) | CSRX_FLASH_ENABLE); + rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + + /* Disable NVRAM write-protection. */ + qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0); + qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0); + + /* Dword writes to flash. */ + naddr = nvram_data_addr(ha, naddr); + bytes >>= 2; + for (i = 0; i < bytes; i++, naddr++, dwptr++) { + if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) { + ql_dbg(ql_dbg_user, vha, 0x709a, + "Unable to program nvram address=%x data=%x.\n", + naddr, *dwptr); + break; + } + } + + /* Enable NVRAM write-protection. */ + qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c); + + /* Disable flash write. */ + wrt_reg_dword(®->ctrl_status, + rd_reg_dword(®->ctrl_status) & ~CSRX_FLASH_ENABLE); + rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + + return ret; +} + +uint8_t * +qla25xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, + uint32_t bytes) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t *dwptr = buf; + uint32_t i; + + /* Dword reads to flash. */ + naddr = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr); + bytes >>= 2; + for (i = 0; i < bytes; i++, naddr++, dwptr++) { + if (qla24xx_read_flash_dword(ha, naddr, dwptr)) + break; + + cpu_to_le32s(dwptr); + } + + return buf; +} + +#define RMW_BUFFER_SIZE (64 * 1024) +int +qla25xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, + uint32_t bytes) +{ + struct qla_hw_data *ha = vha->hw; + uint8_t *dbuf = vmalloc(RMW_BUFFER_SIZE); + + if (!dbuf) + return QLA_MEMORY_ALLOC_FAILED; + ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2, + RMW_BUFFER_SIZE); + memcpy(dbuf + (naddr << 2), buf, bytes); + ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2, + RMW_BUFFER_SIZE); + vfree(dbuf); + + return QLA_SUCCESS; +} + +static inline void +qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags) +{ + if (IS_QLA2322(ha)) { + /* Flip all colors. */ + if (ha->beacon_color_state == QLA_LED_ALL_ON) { + /* Turn off. */ + ha->beacon_color_state = 0; + *pflags = GPIO_LED_ALL_OFF; + } else { + /* Turn on. */ + ha->beacon_color_state = QLA_LED_ALL_ON; + *pflags = GPIO_LED_RGA_ON; + } + } else { + /* Flip green led only. */ + if (ha->beacon_color_state == QLA_LED_GRN_ON) { + /* Turn off. */ + ha->beacon_color_state = 0; + *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF; + } else { + /* Turn on. */ + ha->beacon_color_state = QLA_LED_GRN_ON; + *pflags = GPIO_LED_GREEN_ON_AMBER_OFF; + } + } +} + +#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r)) + +void +qla2x00_beacon_blink(struct scsi_qla_host *vha) +{ + uint16_t gpio_enable; + uint16_t gpio_data; + uint16_t led_color = 0; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + if (IS_P3P_TYPE(ha)) + return; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Save the Original GPIOE. */ + if (ha->pio_address) { + gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); + gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); + } else { + gpio_enable = rd_reg_word(®->gpioe); + gpio_data = rd_reg_word(®->gpiod); + } + + /* Set the modified gpio_enable values */ + gpio_enable |= GPIO_LED_MASK; + + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); + } else { + wrt_reg_word(®->gpioe, gpio_enable); + rd_reg_word(®->gpioe); + } + + qla2x00_flip_colors(ha, &led_color); + + /* Clear out any previously set LED color. */ + gpio_data &= ~GPIO_LED_MASK; + + /* Set the new input LED color to GPIOD. */ + gpio_data |= led_color; + + /* Set the modified gpio_data values */ + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); + } else { + wrt_reg_word(®->gpiod, gpio_data); + rd_reg_word(®->gpiod); + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +int +qla2x00_beacon_on(struct scsi_qla_host *vha) +{ + uint16_t gpio_enable; + uint16_t gpio_data; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; + ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; + + if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x709b, + "Unable to update fw options (beacon on).\n"); + return QLA_FUNCTION_FAILED; + } + + /* Turn off LEDs. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + if (ha->pio_address) { + gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); + gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); + } else { + gpio_enable = rd_reg_word(®->gpioe); + gpio_data = rd_reg_word(®->gpiod); + } + gpio_enable |= GPIO_LED_MASK; + + /* Set the modified gpio_enable values. */ + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); + } else { + wrt_reg_word(®->gpioe, gpio_enable); + rd_reg_word(®->gpioe); + } + + /* Clear out previously set LED colour. */ + gpio_data &= ~GPIO_LED_MASK; + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); + } else { + wrt_reg_word(®->gpiod, gpio_data); + rd_reg_word(®->gpiod); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* + * Let the per HBA timer kick off the blinking process based on + * the following flags. No need to do anything else now. + */ + ha->beacon_blink_led = 1; + ha->beacon_color_state = 0; + + return QLA_SUCCESS; +} + +int +qla2x00_beacon_off(struct scsi_qla_host *vha) +{ + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ha->beacon_blink_led = 0; + + /* Set the on flag so when it gets flipped it will be off. */ + if (IS_QLA2322(ha)) + ha->beacon_color_state = QLA_LED_ALL_ON; + else + ha->beacon_color_state = QLA_LED_GRN_ON; + + ha->isp_ops->beacon_blink(vha); /* This turns green LED off */ + + ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; + ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; + + rval = qla2x00_set_fw_options(vha, ha->fw_options); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x709c, + "Unable to update fw options (beacon off).\n"); + return rval; +} + + +static inline void +qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags) +{ + /* Flip all colors. */ + if (ha->beacon_color_state == QLA_LED_ALL_ON) { + /* Turn off. */ + ha->beacon_color_state = 0; + *pflags = 0; + } else { + /* Turn on. */ + ha->beacon_color_state = QLA_LED_ALL_ON; + *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON; + } +} + +void +qla24xx_beacon_blink(struct scsi_qla_host *vha) +{ + uint16_t led_color = 0; + uint32_t gpio_data; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + /* Save the Original GPIOD. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + gpio_data = rd_reg_dword(®->gpiod); + + /* Enable the gpio_data reg for update. */ + gpio_data |= GPDX_LED_UPDATE_MASK; + + wrt_reg_dword(®->gpiod, gpio_data); + gpio_data = rd_reg_dword(®->gpiod); + + /* Set the color bits. */ + qla24xx_flip_colors(ha, &led_color); + + /* Clear out any previously set LED color. */ + gpio_data &= ~GPDX_LED_COLOR_MASK; + + /* Set the new input LED color to GPIOD. */ + gpio_data |= led_color; + + /* Set the modified gpio_data values. */ + wrt_reg_dword(®->gpiod, gpio_data); + gpio_data = rd_reg_dword(®->gpiod); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static uint32_t +qla83xx_select_led_port(struct qla_hw_data *ha) +{ + uint32_t led_select_value = 0; + + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + goto out; + + if (ha->port_no == 0) + led_select_value = QLA83XX_LED_PORT0; + else + led_select_value = QLA83XX_LED_PORT1; + +out: + return led_select_value; +} + +void +qla83xx_beacon_blink(struct scsi_qla_host *vha) +{ + uint32_t led_select_value; + struct qla_hw_data *ha = vha->hw; + uint16_t led_cfg[6]; + uint16_t orig_led_cfg[6]; + uint32_t led_10_value, led_43_value; + + if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha) && + !IS_QLA28XX(ha)) + return; + + if (!ha->beacon_blink_led) + return; + + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + qla2x00_write_ram_word(vha, 0x1003, 0x40000230); + qla2x00_write_ram_word(vha, 0x1004, 0x40000230); + } else if (IS_QLA2031(ha)) { + led_select_value = qla83xx_select_led_port(ha); + + qla83xx_wr_reg(vha, led_select_value, 0x40000230); + qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230); + } else if (IS_QLA8031(ha)) { + led_select_value = qla83xx_select_led_port(ha); + + qla83xx_rd_reg(vha, led_select_value, &led_10_value); + qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value); + qla83xx_wr_reg(vha, led_select_value, 0x01f44000); + msleep(500); + qla83xx_wr_reg(vha, led_select_value, 0x400001f4); + msleep(1000); + qla83xx_wr_reg(vha, led_select_value, led_10_value); + qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value); + } else if (IS_QLA81XX(ha)) { + int rval; + + /* Save Current */ + rval = qla81xx_get_led_config(vha, orig_led_cfg); + /* Do the blink */ + if (rval == QLA_SUCCESS) { + if (IS_QLA81XX(ha)) { + led_cfg[0] = 0x4000; + led_cfg[1] = 0x2000; + led_cfg[2] = 0; + led_cfg[3] = 0; + led_cfg[4] = 0; + led_cfg[5] = 0; + } else { + led_cfg[0] = 0x4000; + led_cfg[1] = 0x4000; + led_cfg[2] = 0x4000; + led_cfg[3] = 0x2000; + led_cfg[4] = 0; + led_cfg[5] = 0x2000; + } + rval = qla81xx_set_led_config(vha, led_cfg); + msleep(1000); + if (IS_QLA81XX(ha)) { + led_cfg[0] = 0x4000; + led_cfg[1] = 0x2000; + led_cfg[2] = 0; + } else { + led_cfg[0] = 0x4000; + led_cfg[1] = 0x2000; + led_cfg[2] = 0x4000; + led_cfg[3] = 0x4000; + led_cfg[4] = 0; + led_cfg[5] = 0x2000; + } + rval = qla81xx_set_led_config(vha, led_cfg); + } + /* On exit, restore original (presumes no status change) */ + qla81xx_set_led_config(vha, orig_led_cfg); + } +} + +int +qla24xx_beacon_on(struct scsi_qla_host *vha) +{ + uint32_t gpio_data; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + if (IS_P3P_TYPE(ha)) + return QLA_SUCCESS; + + if (IS_QLA8031(ha) || IS_QLA81XX(ha)) + goto skip_gpio; /* let blink handle it */ + + if (ha->beacon_blink_led == 0) { + /* Enable firmware for update */ + ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; + + if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + + if (qla2x00_get_fw_options(vha, ha->fw_options) != + QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7009, + "Unable to update fw options (beacon on).\n"); + return QLA_FUNCTION_FAILED; + } + + if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + goto skip_gpio; + + spin_lock_irqsave(&ha->hardware_lock, flags); + gpio_data = rd_reg_dword(®->gpiod); + + /* Enable the gpio_data reg for update. */ + gpio_data |= GPDX_LED_UPDATE_MASK; + wrt_reg_dword(®->gpiod, gpio_data); + rd_reg_dword(®->gpiod); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + /* So all colors blink together. */ + ha->beacon_color_state = 0; + +skip_gpio: + /* Let the per HBA timer kick off the blinking process. */ + ha->beacon_blink_led = 1; + + return QLA_SUCCESS; +} + +int +qla24xx_beacon_off(struct scsi_qla_host *vha) +{ + uint32_t gpio_data; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + if (IS_P3P_TYPE(ha)) + return QLA_SUCCESS; + + if (!ha->flags.fw_started) + return QLA_SUCCESS; + + ha->beacon_blink_led = 0; + + if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + goto set_fw_options; + + if (IS_QLA8031(ha) || IS_QLA81XX(ha)) + return QLA_SUCCESS; + + ha->beacon_color_state = QLA_LED_ALL_ON; + + ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */ + + /* Give control back to firmware. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + gpio_data = rd_reg_dword(®->gpiod); + + /* Disable the gpio_data reg for update. */ + gpio_data &= ~GPDX_LED_UPDATE_MASK; + wrt_reg_dword(®->gpiod, gpio_data); + rd_reg_dword(®->gpiod); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +set_fw_options: + ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; + + if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x704d, + "Unable to update fw options (beacon on).\n"); + return QLA_FUNCTION_FAILED; + } + + if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x704e, + "Unable to update fw options (beacon on).\n"); + return QLA_FUNCTION_FAILED; + } + + return QLA_SUCCESS; +} + + +/* + * Flash support routines + */ + +/** + * qla2x00_flash_enable() - Setup flash for reading and writing. + * @ha: HA context + */ +static void +qla2x00_flash_enable(struct qla_hw_data *ha) +{ + uint16_t data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + data = rd_reg_word(®->ctrl_status); + data |= CSR_FLASH_ENABLE; + wrt_reg_word(®->ctrl_status, data); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ +} + +/** + * qla2x00_flash_disable() - Disable flash and allow RISC to run. + * @ha: HA context + */ +static void +qla2x00_flash_disable(struct qla_hw_data *ha) +{ + uint16_t data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + data = rd_reg_word(®->ctrl_status); + data &= ~(CSR_FLASH_ENABLE); + wrt_reg_word(®->ctrl_status, data); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ +} + +/** + * qla2x00_read_flash_byte() - Reads a byte from flash + * @ha: HA context + * @addr: Address in flash to read + * + * A word is read from the chip, but, only the lower byte is valid. + * + * Returns the byte read from flash @addr. + */ +static uint8_t +qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr) +{ + uint16_t data; + uint16_t bank_select; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + bank_select = rd_reg_word(®->ctrl_status); + + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + /* Specify 64K address range: */ + /* clear out Module Select and Flash Address bits [19:16]. */ + bank_select &= ~0xf8; + bank_select |= addr >> 12 & 0xf0; + bank_select |= CSR_FLASH_64K_BANK; + wrt_reg_word(®->ctrl_status, bank_select); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + wrt_reg_word(®->flash_address, (uint16_t)addr); + data = rd_reg_word(®->flash_data); + + return (uint8_t)data; + } + + /* Setup bit 16 of flash address. */ + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { + bank_select |= CSR_FLASH_64K_BANK; + wrt_reg_word(®->ctrl_status, bank_select); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } else if (((addr & BIT_16) == 0) && + (bank_select & CSR_FLASH_64K_BANK)) { + bank_select &= ~(CSR_FLASH_64K_BANK); + wrt_reg_word(®->ctrl_status, bank_select); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } + + /* Always perform IO mapped accesses to the FLASH registers. */ + if (ha->pio_address) { + uint16_t data2; + + WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); + do { + data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); + barrier(); + cpu_relax(); + data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); + } while (data != data2); + } else { + wrt_reg_word(®->flash_address, (uint16_t)addr); + data = qla2x00_debounce_register(®->flash_data); + } + + return (uint8_t)data; +} + +/** + * qla2x00_write_flash_byte() - Write a byte to flash + * @ha: HA context + * @addr: Address in flash to write + * @data: Data to write + */ +static void +qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data) +{ + uint16_t bank_select; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + bank_select = rd_reg_word(®->ctrl_status); + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + /* Specify 64K address range: */ + /* clear out Module Select and Flash Address bits [19:16]. */ + bank_select &= ~0xf8; + bank_select |= addr >> 12 & 0xf0; + bank_select |= CSR_FLASH_64K_BANK; + wrt_reg_word(®->ctrl_status, bank_select); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + wrt_reg_word(®->flash_address, (uint16_t)addr); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + wrt_reg_word(®->flash_data, (uint16_t)data); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + return; + } + + /* Setup bit 16 of flash address. */ + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { + bank_select |= CSR_FLASH_64K_BANK; + wrt_reg_word(®->ctrl_status, bank_select); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } else if (((addr & BIT_16) == 0) && + (bank_select & CSR_FLASH_64K_BANK)) { + bank_select &= ~(CSR_FLASH_64K_BANK); + wrt_reg_word(®->ctrl_status, bank_select); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } + + /* Always perform IO mapped accesses to the FLASH registers. */ + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); + WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data); + } else { + wrt_reg_word(®->flash_address, (uint16_t)addr); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + wrt_reg_word(®->flash_data, (uint16_t)data); + rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } +} + +/** + * qla2x00_poll_flash() - Polls flash for completion. + * @ha: HA context + * @addr: Address in flash to poll + * @poll_data: Data to be polled + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * This function polls the device until bit 7 of what is read matches data + * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed + * out (a fatal error). The flash book recommeds reading bit 7 again after + * reading bit 5 as a 1. + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data, + uint8_t man_id, uint8_t flash_id) +{ + int status; + uint8_t flash_data; + uint32_t cnt; + + status = 1; + + /* Wait for 30 seconds for command to finish. */ + poll_data &= BIT_7; + for (cnt = 3000000; cnt; cnt--) { + flash_data = qla2x00_read_flash_byte(ha, addr); + if ((flash_data & BIT_7) == poll_data) { + status = 0; + break; + } + + if (man_id != 0x40 && man_id != 0xda) { + if ((flash_data & BIT_5) && cnt > 2) + cnt = 2; + } + udelay(10); + barrier(); + cond_resched(); + } + return status; +} + +/** + * qla2x00_program_flash_address() - Programs a flash address + * @ha: HA context + * @addr: Address in flash to program + * @data: Data to be written in flash + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr, + uint8_t data, uint8_t man_id, uint8_t flash_id) +{ + /* Write Program Command Sequence. */ + if (IS_OEM_001(ha)) { + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); + qla2x00_write_flash_byte(ha, 0x555, 0x55); + qla2x00_write_flash_byte(ha, 0xaaa, 0xa0); + qla2x00_write_flash_byte(ha, addr, data); + } else { + if (man_id == 0xda && flash_id == 0xc1) { + qla2x00_write_flash_byte(ha, addr, data); + if (addr & 0x7e) + return 0; + } else { + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0xa0); + qla2x00_write_flash_byte(ha, addr, data); + } + } + + udelay(150); + + /* Wait for write to complete. */ + return qla2x00_poll_flash(ha, addr, data, man_id, flash_id); +} + +/** + * qla2x00_erase_flash() - Erase the flash. + * @ha: HA context + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id) +{ + /* Individual Sector Erase Command Sequence */ + if (IS_OEM_001(ha)) { + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); + qla2x00_write_flash_byte(ha, 0x555, 0x55); + qla2x00_write_flash_byte(ha, 0xaaa, 0x80); + qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); + qla2x00_write_flash_byte(ha, 0x555, 0x55); + qla2x00_write_flash_byte(ha, 0xaaa, 0x10); + } else { + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x80); + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x10); + } + + udelay(150); + + /* Wait for erase to complete. */ + return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id); +} + +/** + * qla2x00_erase_flash_sector() - Erase a flash sector. + * @ha: HA context + * @addr: Flash sector to erase + * @sec_mask: Sector address mask + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + * + * Returns 0 on success, else non-zero. + */ +static int +qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr, + uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) +{ + /* Individual Sector Erase Command Sequence */ + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x80); + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + if (man_id == 0x1f && flash_id == 0x13) + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10); + else + qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30); + + udelay(150); + + /* Wait for erase to complete. */ + return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id); +} + +/** + * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip. + * @ha: host adapter + * @man_id: Flash manufacturer ID + * @flash_id: Flash ID + */ +static void +qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id, + uint8_t *flash_id) +{ + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0x90); + *man_id = qla2x00_read_flash_byte(ha, 0x0000); + *flash_id = qla2x00_read_flash_byte(ha, 0x0001); + qla2x00_write_flash_byte(ha, 0x5555, 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); + qla2x00_write_flash_byte(ha, 0x5555, 0xf0); +} + +static void +qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf, + uint32_t saddr, uint32_t length) +{ + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint32_t midpoint, ilength; + uint8_t data; + + midpoint = length / 2; + + wrt_reg_word(®->nvram, 0); + rd_reg_word(®->nvram); + for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) { + if (ilength == midpoint) { + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); + } + data = qla2x00_read_flash_byte(ha, saddr); + if (saddr % 100) + udelay(10); + *tmp_buf = data; + cond_resched(); + } +} + +static inline void +qla2x00_suspend_hba(struct scsi_qla_host *vha) +{ + int cnt; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + /* Suspend HBA. */ + scsi_block_requests(vha->host); + ha->isp_ops->disable_intrs(ha); + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + + /* Pause RISC. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + rd_reg_word(®->hccr); + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { + for (cnt = 0; cnt < 30000; cnt++) { + if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) + break; + udelay(100); + } + } else { + udelay(10); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static inline void +qla2x00_resume_hba(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + /* Resume HBA. */ + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + scsi_unblock_requests(vha->host); +} + +void * +qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + uint32_t addr, midpoint; + uint8_t *data; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + /* Suspend HBA. */ + qla2x00_suspend_hba(vha); + + /* Go with read. */ + midpoint = ha->optrom_size / 2; + + qla2x00_flash_enable(ha); + wrt_reg_word(®->nvram, 0); + rd_reg_word(®->nvram); /* PCI Posting. */ + for (addr = offset, data = buf; addr < length; addr++, data++) { + if (addr == midpoint) { + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); /* PCI Posting. */ + } + + *data = qla2x00_read_flash_byte(ha, addr); + } + qla2x00_flash_disable(ha); + + /* Resume HBA. */ + qla2x00_resume_hba(vha); + + return buf; +} + +int +qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + + int rval; + uint8_t man_id, flash_id, sec_number, *data; + uint16_t wd; + uint32_t addr, liter, sec_mask, rest_addr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + /* Suspend HBA. */ + qla2x00_suspend_hba(vha); + + rval = QLA_SUCCESS; + sec_number = 0; + + /* Reset ISP chip. */ + wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); + + /* Go with write. */ + qla2x00_flash_enable(ha); + do { /* Loop once to provide quick error exit */ + /* Structure of flash memory based on manufacturer */ + if (IS_OEM_001(ha)) { + /* OEM variant with special flash part. */ + man_id = flash_id = 0; + rest_addr = 0xffff; + sec_mask = 0x10000; + goto update_flash; + } + qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id); + switch (man_id) { + case 0x20: /* ST flash. */ + if (flash_id == 0xd2 || flash_id == 0xe3) { + /* + * ST m29w008at part - 64kb sector size with + * 32kb,8kb,8kb,16kb sectors at memory address + * 0xf0000. + */ + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } + /* + * ST m29w010b part - 16kb sector size + * Default to 16kb sectors + */ + rest_addr = 0x3fff; + sec_mask = 0x1c000; + break; + case 0x40: /* Mostel flash. */ + /* Mostel v29c51001 part - 512 byte sector size. */ + rest_addr = 0x1ff; + sec_mask = 0x1fe00; + break; + case 0xbf: /* SST flash. */ + /* SST39sf10 part - 4kb sector size. */ + rest_addr = 0xfff; + sec_mask = 0x1f000; + break; + case 0xda: /* Winbond flash. */ + /* Winbond W29EE011 part - 256 byte sector size. */ + rest_addr = 0x7f; + sec_mask = 0x1ff80; + break; + case 0xc2: /* Macronix flash. */ + /* 64k sector size. */ + if (flash_id == 0x38 || flash_id == 0x4f) { + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } + fallthrough; + + case 0x1f: /* Atmel flash. */ + /* 512k sector size. */ + if (flash_id == 0x13) { + rest_addr = 0x7fffffff; + sec_mask = 0x80000000; + break; + } + fallthrough; + + case 0x01: /* AMD flash. */ + if (flash_id == 0x38 || flash_id == 0x40 || + flash_id == 0x4f) { + /* Am29LV081 part - 64kb sector size. */ + /* Am29LV002BT part - 64kb sector size. */ + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } else if (flash_id == 0x3e) { + /* + * Am29LV008b part - 64kb sector size with + * 32kb,8kb,8kb,16kb sector at memory address + * h0xf0000. + */ + rest_addr = 0xffff; + sec_mask = 0x10000; + break; + } else if (flash_id == 0x20 || flash_id == 0x6e) { + /* + * Am29LV010 part or AM29f010 - 16kb sector + * size. + */ + rest_addr = 0x3fff; + sec_mask = 0x1c000; + break; + } else if (flash_id == 0x6d) { + /* Am29LV001 part - 8kb sector size. */ + rest_addr = 0x1fff; + sec_mask = 0x1e000; + break; + } + fallthrough; + default: + /* Default to 16 kb sector size. */ + rest_addr = 0x3fff; + sec_mask = 0x1c000; + break; + } + +update_flash: + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + if (qla2x00_erase_flash(ha, man_id, flash_id)) { + rval = QLA_FUNCTION_FAILED; + break; + } + } + + for (addr = offset, liter = 0; liter < length; liter++, + addr++) { + data = buf + liter; + /* Are we at the beginning of a sector? */ + if ((addr & rest_addr) == 0) { + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + if (addr >= 0x10000UL) { + if (((addr >> 12) & 0xf0) && + ((man_id == 0x01 && + flash_id == 0x3e) || + (man_id == 0x20 && + flash_id == 0xd2))) { + sec_number++; + if (sec_number == 1) { + rest_addr = + 0x7fff; + sec_mask = + 0x18000; + } else if ( + sec_number == 2 || + sec_number == 3) { + rest_addr = + 0x1fff; + sec_mask = + 0x1e000; + } else if ( + sec_number == 4) { + rest_addr = + 0x3fff; + sec_mask = + 0x1c000; + } + } + } + } else if (addr == ha->optrom_size / 2) { + wrt_reg_word(®->nvram, NVR_SELECT); + rd_reg_word(®->nvram); + } + + if (flash_id == 0xda && man_id == 0xc1) { + qla2x00_write_flash_byte(ha, 0x5555, + 0xaa); + qla2x00_write_flash_byte(ha, 0x2aaa, + 0x55); + qla2x00_write_flash_byte(ha, 0x5555, + 0xa0); + } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) { + /* Then erase it */ + if (qla2x00_erase_flash_sector(ha, + addr, sec_mask, man_id, + flash_id)) { + rval = QLA_FUNCTION_FAILED; + break; + } + if (man_id == 0x01 && flash_id == 0x6d) + sec_number++; + } + } + + if (man_id == 0x01 && flash_id == 0x6d) { + if (sec_number == 1 && + addr == (rest_addr - 1)) { + rest_addr = 0x0fff; + sec_mask = 0x1f000; + } else if (sec_number == 3 && (addr & 0x7ffe)) { + rest_addr = 0x3fff; + sec_mask = 0x1c000; + } + } + + if (qla2x00_program_flash_address(ha, addr, *data, + man_id, flash_id)) { + rval = QLA_FUNCTION_FAILED; + break; + } + cond_resched(); + } + } while (0); + qla2x00_flash_disable(ha); + + /* Resume HBA. */ + qla2x00_resume_hba(vha); + + return rval; +} + +void * +qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + struct qla_hw_data *ha = vha->hw; + + /* Suspend HBA. */ + scsi_block_requests(vha->host); + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + + /* Go with read. */ + qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); + + /* Resume HBA. */ + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + scsi_unblock_requests(vha->host); + + return buf; +} + +static int +qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, __le32 *buf, + uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf) +{ + uint32_t check_sum = 0; + __le32 *p; + int i; + + p = buf + buf_size_without_sfub; + + /* Extract SFUB from end of file */ + memcpy(sfub_buf, (uint8_t *)p, + sizeof(struct secure_flash_update_block)); + + for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++) + check_sum += le32_to_cpu(p[i]); + + check_sum = (~check_sum) + 1; + + if (check_sum != le32_to_cpu(p[i])) { + ql_log(ql_log_warn, vha, 0x7097, + "SFUB checksum failed, 0x%x, 0x%x\n", + check_sum, le32_to_cpu(p[i])); + return QLA_COMMAND_ERROR; + } + + return QLA_SUCCESS; +} + +static int +qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start, + struct qla_flt_region *region) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_flt_header *flt = ha->flt; + struct qla_flt_region *flt_reg = &flt->region[0]; + uint16_t cnt; + int rval = QLA_FUNCTION_FAILED; + + if (!ha->flt) + return QLA_FUNCTION_FAILED; + + cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); + for (; cnt; cnt--, flt_reg++) { + if (le32_to_cpu(flt_reg->start) == start) { + memcpy((uint8_t *)region, flt_reg, + sizeof(struct qla_flt_region)); + rval = QLA_SUCCESS; + break; + } + } + + return rval; +} + +static int +qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, + uint32_t dwords) +{ + struct qla_hw_data *ha = vha->hw; + ulong liter; + ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */ + uint32_t sec_mask, rest_addr, fdata; + void *optrom = NULL; + dma_addr_t optrom_dma; + int rval, ret; + struct secure_flash_update_block *sfub; + dma_addr_t sfub_dma; + uint32_t offset = faddr << 2; + uint32_t buf_size_without_sfub = 0; + struct qla_flt_region region; + bool reset_to_rom = false; + uint32_t risc_size, risc_attr = 0; + __be32 *fw_array = NULL; + + /* Retrieve region info - must be a start address passed in */ + rval = qla28xx_get_flash_region(vha, offset, ®ion); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Invalid address %x - not a region start address\n", + offset); + goto done; + } + + /* Allocate dma buffer for burst write */ + optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, + &optrom_dma, GFP_KERNEL); + if (!optrom) { + ql_log(ql_log_warn, vha, 0x7095, + "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE); + rval = QLA_COMMAND_ERROR; + goto done; + } + + /* + * If adapter supports secure flash and region is secure + * extract secure flash update block (SFUB) and verify + */ + if (ha->flags.secure_adapter && region.attribute) { + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Region %x is secure\n", le16_to_cpu(region.code)); + + switch (le16_to_cpu(region.code)) { + case FLT_REG_FW: + case FLT_REG_FW_SEC_27XX: + case FLT_REG_MPI_PRI_28XX: + case FLT_REG_MPI_SEC_28XX: + fw_array = (__force __be32 *)dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); + risc_attr = be32_to_cpu(fw_array[9]); + + buf_size_without_sfub = risc_size; + fw_array += risc_size; + + /* 2nd fw array */ + risc_size = be32_to_cpu(fw_array[3]); + + buf_size_without_sfub += risc_size; + fw_array += risc_size; + + /* 1st dump template */ + risc_size = be32_to_cpu(fw_array[2]); + + /* skip header and ignore checksum */ + buf_size_without_sfub += risc_size; + fw_array += risc_size; + + if (risc_attr & BIT_9) { + /* 2nd dump template */ + risc_size = be32_to_cpu(fw_array[2]); + + /* skip header and ignore checksum */ + buf_size_without_sfub += risc_size; + fw_array += risc_size; + } + break; + + case FLT_REG_PEP_PRI_28XX: + case FLT_REG_PEP_SEC_28XX: + fw_array = (__force __be32 *)dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); + risc_attr = be32_to_cpu(fw_array[9]); + + buf_size_without_sfub = risc_size; + fw_array += risc_size; + break; + + default: + ql_log(ql_log_warn + ql_dbg_verbose, vha, + 0xffff, "Secure region %x not supported\n", + le16_to_cpu(region.code)); + rval = QLA_COMMAND_ERROR; + goto done; + } + + sfub = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct secure_flash_update_block), &sfub_dma, + GFP_KERNEL); + if (!sfub) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to allocate memory for SFUB\n"); + rval = QLA_COMMAND_ERROR; + goto done; + } + + rval = qla28xx_extract_sfub_and_verify(vha, (__le32 *)dwptr, + dwords, buf_size_without_sfub, (uint8_t *)sfub); + + if (rval != QLA_SUCCESS) + goto done; + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "SFUB extract and verify successful\n"); + } + + rest_addr = (ha->fdt_block_size >> 2) - 1; + sec_mask = ~rest_addr; + + /* Lock semaphore */ + rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to lock flash semaphore."); + goto done; + } + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Unprotect flash...\n"); + rval = qla24xx_unprotect_flash(vha); + if (rval) { + qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); + ql_log(ql_log_warn, vha, 0x7096, "Failed unprotect flash\n"); + goto done; + } + + for (liter = 0; liter < dwords; liter++, faddr++) { + fdata = (faddr & sec_mask) << 2; + + /* If start of sector */ + if (!(faddr & rest_addr)) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Erase sector %#x...\n", faddr); + rval = qla24xx_erase_sector(vha, fdata); + if (rval) { + ql_dbg(ql_dbg_user, vha, 0x7007, + "Failed erase sector %#x\n", faddr); + goto write_protect; + } + } + } + + if (ha->flags.secure_adapter) { + /* + * If adapter supports secure flash but FW doesn't, + * disable write protect, release semaphore and reset + * chip to execute ROM code in order to update region securely + */ + if (!ha->flags.secure_fw) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Disable Write and Release Semaphore."); + rval = qla24xx_protect_flash(vha); + if (rval != QLA_SUCCESS) { + qla81xx_fac_semaphore_access(vha, + FAC_SEMAPHORE_UNLOCK); + ql_log(ql_log_warn, vha, 0xffff, + "Unable to protect flash."); + goto done; + } + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Reset chip to ROM."); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + set_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + rval = qla2x00_wait_for_chip_reset(vha); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to reset to ROM code."); + goto done; + } + reset_to_rom = true; + ha->flags.fac_supported = 0; + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Lock Semaphore"); + rval = qla2xxx_write_remote_register(vha, + FLASH_SEMAPHORE_REGISTER_ADDR, 0x00020002); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to lock flash semaphore."); + goto done; + } + + /* Unprotect flash */ + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Enable Write."); + rval = qla2x00_write_ram_word(vha, 0x7ffd0101, 0); + if (rval) { + ql_log(ql_log_warn, vha, 0x7096, + "Failed unprotect flash\n"); + goto done; + } + } + + /* If region is secure, send Secure Flash MB Cmd */ + if (region.attribute && buf_size_without_sfub) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Sending Secure Flash MB Cmd\n"); + rval = qla28xx_secure_flash_update(vha, 0, + le16_to_cpu(region.code), + buf_size_without_sfub, sfub_dma, + sizeof(struct secure_flash_update_block) >> 2); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Secure Flash MB Cmd failed %x.", rval); + goto write_protect; + } + } + + } + + /* re-init flash offset */ + faddr = offset >> 2; + + for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { + fdata = (faddr & sec_mask) << 2; + + /* If smaller than a burst remaining */ + if (dwords - liter < dburst) + dburst = dwords - liter; + + /* Copy to dma buffer */ + memcpy(optrom, dwptr, dburst << 2); + + /* Burst write */ + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Write burst (%#lx dwords)...\n", dburst); + rval = qla2x00_load_ram(vha, optrom_dma, + flash_data_addr(ha, faddr), dburst); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7097, + "Failed burst write at %x (%p/%#llx)...\n", + flash_data_addr(ha, faddr), optrom, + (u64)optrom_dma); + break; + } + + liter += dburst - 1; + faddr += dburst - 1; + dwptr += dburst - 1; + } + +write_protect: + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Protect flash...\n"); + ret = qla24xx_protect_flash(vha); + if (ret) { + qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); + ql_log(ql_log_warn, vha, 0x7099, + "Failed protect flash\n"); + rval = QLA_COMMAND_ERROR; + } + + if (reset_to_rom == true) { + /* Schedule DPC to restart the RISC */ + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + + ret = qla2x00_wait_for_hba_online(vha); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Adapter did not come out of reset\n"); + rval = QLA_COMMAND_ERROR; + } + } + +done: + if (optrom) + dma_free_coherent(&ha->pdev->dev, + OPTROM_BURST_SIZE, optrom, optrom_dma); + + return rval; +} + +int +qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + + /* Suspend HBA. */ + scsi_block_requests(vha->host); + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + + /* Go with write. */ + if (IS_QLA28XX(ha)) + rval = qla28xx_write_flash_data(vha, buf, offset >> 2, + length >> 2); + else + rval = qla24xx_write_flash_data(vha, buf, offset >> 2, + length >> 2); + + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + scsi_unblock_requests(vha->host); + + return rval; +} + +void * +qla25xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, + uint32_t offset, uint32_t length) +{ + int rval; + dma_addr_t optrom_dma; + void *optrom; + uint8_t *pbuf; + uint32_t faddr, left, burst; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || + IS_QLA27XX(ha) || IS_QLA28XX(ha)) + goto try_fast; + if (offset & 0xfff) + goto slow_read; + if (length < OPTROM_BURST_SIZE) + goto slow_read; + +try_fast: + if (offset & 0xff) + goto slow_read; + optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, + &optrom_dma, GFP_KERNEL); + if (!optrom) { + ql_log(ql_log_warn, vha, 0x00cc, + "Unable to allocate memory for optrom burst read (%x KB).\n", + OPTROM_BURST_SIZE / 1024); + goto slow_read; + } + + pbuf = buf; + faddr = offset >> 2; + left = length >> 2; + burst = OPTROM_BURST_DWORDS; + while (left != 0) { + if (burst > left) + burst = left; + + rval = qla2x00_dump_ram(vha, optrom_dma, + flash_data_addr(ha, faddr), burst); + if (rval) { + ql_log(ql_log_warn, vha, 0x00f5, + "Unable to burst-read optrom segment (%x/%x/%llx).\n", + rval, flash_data_addr(ha, faddr), + (unsigned long long)optrom_dma); + ql_log(ql_log_warn, vha, 0x00f6, + "Reverting to slow-read.\n"); + + dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, + optrom, optrom_dma); + goto slow_read; + } + + memcpy(pbuf, optrom, burst * 4); + + left -= burst; + faddr += burst; + pbuf += burst * 4; + } + + dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, + optrom_dma); + + return buf; + +slow_read: + return qla24xx_read_optrom_data(vha, buf, offset, length); +} + +/** + * qla2x00_get_fcode_version() - Determine an FCODE image's version. + * @ha: HA context + * @pcids: Pointer to the FCODE PCI data structure + * + * The process of retrieving the FCODE version information is at best + * described as interesting. + * + * Within the first 100h bytes of the image an ASCII string is present + * which contains several pieces of information including the FCODE + * version. Unfortunately it seems the only reliable way to retrieve + * the version is by scanning for another sentinel within the string, + * the FCODE build date: + * + * ... 2.00.02 10/17/02 ... + * + * Returns QLA_SUCCESS on successful retrieval of version. + */ +static void +qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t istart, iend, iter, vend; + uint8_t do_next, rbyte, *vbyte; + + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + + /* Skip the PCI data structure. */ + istart = pcids + + ((qla2x00_read_flash_byte(ha, pcids + 0x0B) << 8) | + qla2x00_read_flash_byte(ha, pcids + 0x0A)); + iend = istart + 0x100; + do { + /* Scan for the sentinel date string...eeewww. */ + do_next = 0; + iter = istart; + while ((iter < iend) && !do_next) { + iter++; + if (qla2x00_read_flash_byte(ha, iter) == '/') { + if (qla2x00_read_flash_byte(ha, iter + 2) == + '/') + do_next++; + else if (qla2x00_read_flash_byte(ha, + iter + 3) == '/') + do_next++; + } + } + if (!do_next) + break; + + /* Backtrack to previous ' ' (space). */ + do_next = 0; + while ((iter > istart) && !do_next) { + iter--; + if (qla2x00_read_flash_byte(ha, iter) == ' ') + do_next++; + } + if (!do_next) + break; + + /* + * Mark end of version tag, and find previous ' ' (space) or + * string length (recent FCODE images -- major hack ahead!!!). + */ + vend = iter - 1; + do_next = 0; + while ((iter > istart) && !do_next) { + iter--; + rbyte = qla2x00_read_flash_byte(ha, iter); + if (rbyte == ' ' || rbyte == 0xd || rbyte == 0x10) + do_next++; + } + if (!do_next) + break; + + /* Mark beginning of version tag, and copy data. */ + iter++; + if ((vend - iter) && + ((vend - iter) < sizeof(ha->fcode_revision))) { + vbyte = ha->fcode_revision; + while (iter <= vend) { + *vbyte++ = qla2x00_read_flash_byte(ha, iter); + iter++; + } + ret = QLA_SUCCESS; + } + } while (0); + + if (ret != QLA_SUCCESS) + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); +} + +int +qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) +{ + int ret = QLA_SUCCESS; + uint8_t code_type, last_image; + uint32_t pcihdr, pcids; + uint8_t *dbyte; + uint16_t *dcode; + struct qla_hw_data *ha = vha->hw; + + if (!ha->pio_address || !mbuf) + return QLA_FUNCTION_FAILED; + + memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); + memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + + qla2x00_flash_enable(ha); + + /* Begin with first PCI expansion ROM header. */ + pcihdr = 0; + last_image = 1; + do { + /* Verify PCI expansion ROM header. */ + if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || + qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { + /* No signature */ + ql_log(ql_log_fatal, vha, 0x0050, + "No matching ROM signature.\n"); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Locate PCI data structure. */ + pcids = pcihdr + + ((qla2x00_read_flash_byte(ha, pcihdr + 0x19) << 8) | + qla2x00_read_flash_byte(ha, pcihdr + 0x18)); + + /* Validate signature of PCI data structure. */ + if (qla2x00_read_flash_byte(ha, pcids) != 'P' || + qla2x00_read_flash_byte(ha, pcids + 0x1) != 'C' || + qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || + qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { + /* Incorrect header. */ + ql_log(ql_log_fatal, vha, 0x0051, + "PCI data struct not found pcir_adr=%x.\n", pcids); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Read version */ + code_type = qla2x00_read_flash_byte(ha, pcids + 0x14); + switch (code_type) { + case ROM_CODE_TYPE_BIOS: + /* Intel x86, PC-AT compatible. */ + ha->bios_revision[0] = + qla2x00_read_flash_byte(ha, pcids + 0x12); + ha->bios_revision[1] = + qla2x00_read_flash_byte(ha, pcids + 0x13); + ql_dbg(ql_dbg_init, vha, 0x0052, + "Read BIOS %d.%d.\n", + ha->bios_revision[1], ha->bios_revision[0]); + break; + case ROM_CODE_TYPE_FCODE: + /* Open Firmware standard for PCI (FCode). */ + /* Eeeewww... */ + qla2x00_get_fcode_version(ha, pcids); + break; + case ROM_CODE_TYPE_EFI: + /* Extensible Firmware Interface (EFI). */ + ha->efi_revision[0] = + qla2x00_read_flash_byte(ha, pcids + 0x12); + ha->efi_revision[1] = + qla2x00_read_flash_byte(ha, pcids + 0x13); + ql_dbg(ql_dbg_init, vha, 0x0053, + "Read EFI %d.%d.\n", + ha->efi_revision[1], ha->efi_revision[0]); + break; + default: + ql_log(ql_log_warn, vha, 0x0054, + "Unrecognized code type %x at pcids %x.\n", + code_type, pcids); + break; + } + + last_image = qla2x00_read_flash_byte(ha, pcids + 0x15) & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((qla2x00_read_flash_byte(ha, pcids + 0x11) << 8) | + qla2x00_read_flash_byte(ha, pcids + 0x10)) * 512; + } while (!last_image); + + if (IS_QLA2322(ha)) { + /* Read firmware image information. */ + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + dbyte = mbuf; + memset(dbyte, 0, 8); + dcode = (uint16_t *)dbyte; + + qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, + 8); + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a, + "Dumping fw " + "ver from flash:.\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b, + dbyte, 32); + + if ((dcode[0] == 0xffff && dcode[1] == 0xffff && + dcode[2] == 0xffff && dcode[3] == 0xffff) || + (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && + dcode[3] == 0)) { + ql_log(ql_log_warn, vha, 0x0057, + "Unrecognized fw revision at %x.\n", + ha->flt_region_fw * 4); + } else { + /* values are in big endian */ + ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; + ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; + ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; + ql_dbg(ql_dbg_init, vha, 0x0058, + "FW Version: " + "%d.%d.%d.\n", ha->fw_revision[0], + ha->fw_revision[1], ha->fw_revision[2]); + } + } + + qla2x00_flash_disable(ha); + + return ret; +} + +int +qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) +{ + int ret = QLA_SUCCESS; + uint32_t pcihdr, pcids; + uint32_t *dcode = mbuf; + uint8_t *bcode = mbuf; + uint8_t code_type, last_image; + struct qla_hw_data *ha = vha->hw; + + if (!mbuf) + return QLA_FUNCTION_FAILED; + + memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); + memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + + /* Begin with first PCI expansion ROM header. */ + pcihdr = ha->flt_region_boot << 2; + last_image = 1; + do { + /* Verify PCI expansion ROM header. */ + ha->isp_ops->read_optrom(vha, dcode, pcihdr, 0x20 * 4); + bcode = mbuf + (pcihdr % 4); + if (memcmp(bcode, "\x55\xaa", 2)) { + /* No signature */ + ql_log(ql_log_fatal, vha, 0x0154, + "No matching ROM signature.\n"); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Locate PCI data structure. */ + pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); + + ha->isp_ops->read_optrom(vha, dcode, pcids, 0x20 * 4); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (memcmp(bcode, "PCIR", 4)) { + /* Incorrect header. */ + ql_log(ql_log_fatal, vha, 0x0155, + "PCI data struct not found pcir_adr=%x.\n", pcids); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Read version */ + code_type = bcode[0x14]; + switch (code_type) { + case ROM_CODE_TYPE_BIOS: + /* Intel x86, PC-AT compatible. */ + ha->bios_revision[0] = bcode[0x12]; + ha->bios_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0156, + "Read BIOS %d.%d.\n", + ha->bios_revision[1], ha->bios_revision[0]); + break; + case ROM_CODE_TYPE_FCODE: + /* Open Firmware standard for PCI (FCode). */ + ha->fcode_revision[0] = bcode[0x12]; + ha->fcode_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0157, + "Read FCODE %d.%d.\n", + ha->fcode_revision[1], ha->fcode_revision[0]); + break; + case ROM_CODE_TYPE_EFI: + /* Extensible Firmware Interface (EFI). */ + ha->efi_revision[0] = bcode[0x12]; + ha->efi_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0158, + "Read EFI %d.%d.\n", + ha->efi_revision[1], ha->efi_revision[0]); + break; + default: + ql_log(ql_log_warn, vha, 0x0159, + "Unrecognized code type %x at pcids %x.\n", + code_type, pcids); + break; + } + + last_image = bcode[0x15] & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; + } while (!last_image); + + /* Read firmware image information. */ + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + dcode = mbuf; + ha->isp_ops->read_optrom(vha, dcode, ha->flt_region_fw << 2, 0x20); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 && + bcode[0x2] == 0x40 && bcode[0x3] == 0x40) { + ha->fw_revision[0] = bcode[0x4]; + ha->fw_revision[1] = bcode[0x5]; + ha->fw_revision[2] = bcode[0x6]; + ql_dbg(ql_dbg_init, vha, 0x0153, + "Firmware revision %d.%d.%d\n", + ha->fw_revision[0], ha->fw_revision[1], + ha->fw_revision[2]); + } + + return ret; +} + +int +qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) +{ + int ret = QLA_SUCCESS; + uint32_t pcihdr = 0, pcids = 0; + uint32_t *dcode = mbuf; + uint8_t *bcode = mbuf; + uint8_t code_type, last_image; + int i; + struct qla_hw_data *ha = vha->hw; + uint32_t faddr = 0; + struct active_regions active_regions = { }; + + if (IS_P3P_TYPE(ha)) + return ret; + + if (!mbuf) + return QLA_FUNCTION_FAILED; + + memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); + memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + + pcihdr = ha->flt_region_boot << 2; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + qla27xx_get_active_image(vha, &active_regions); + if (active_regions.global == QLA27XX_SECONDARY_IMAGE) { + pcihdr = ha->flt_region_boot_sec << 2; + } + } + + do { + /* Verify PCI expansion ROM header. */ + qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + bcode = mbuf + (pcihdr % 4); + if (memcmp(bcode, "\x55\xaa", 2)) { + /* No signature */ + ql_log(ql_log_fatal, vha, 0x0059, + "No matching ROM signature.\n"); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Locate PCI data structure. */ + pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); + + qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (memcmp(bcode, "PCIR", 4)) { + /* Incorrect header. */ + ql_log(ql_log_fatal, vha, 0x005a, + "PCI data struct not found pcir_adr=%x.\n", pcids); + ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Read version */ + code_type = bcode[0x14]; + switch (code_type) { + case ROM_CODE_TYPE_BIOS: + /* Intel x86, PC-AT compatible. */ + ha->bios_revision[0] = bcode[0x12]; + ha->bios_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x005b, + "Read BIOS %d.%d.\n", + ha->bios_revision[1], ha->bios_revision[0]); + break; + case ROM_CODE_TYPE_FCODE: + /* Open Firmware standard for PCI (FCode). */ + ha->fcode_revision[0] = bcode[0x12]; + ha->fcode_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x005c, + "Read FCODE %d.%d.\n", + ha->fcode_revision[1], ha->fcode_revision[0]); + break; + case ROM_CODE_TYPE_EFI: + /* Extensible Firmware Interface (EFI). */ + ha->efi_revision[0] = bcode[0x12]; + ha->efi_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x005d, + "Read EFI %d.%d.\n", + ha->efi_revision[1], ha->efi_revision[0]); + break; + default: + ql_log(ql_log_warn, vha, 0x005e, + "Unrecognized code type %x at pcids %x.\n", + code_type, pcids); + break; + } + + last_image = bcode[0x15] & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; + } while (!last_image); + + /* Read firmware image information. */ + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + faddr = ha->flt_region_fw; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + qla27xx_get_active_image(vha, &active_regions); + if (active_regions.global == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_fw_sec; + } + + qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_warn, vha, 0x005f, + "Unrecognized fw revision at %x.\n", + ha->flt_region_fw * 4); + ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); + } else { + for (i = 0; i < 4; i++) + ha->fw_revision[i] = + be32_to_cpu((__force __be32)dcode[4+i]); + ql_dbg(ql_dbg_init, vha, 0x0060, + "Firmware revision (flash) %u.%u.%u (%x).\n", + ha->fw_revision[0], ha->fw_revision[1], + ha->fw_revision[2], ha->fw_revision[3]); + } + + /* Check for golden firmware and get version if available */ + if (!IS_QLA81XX(ha)) { + /* Golden firmware is not present in non 81XX adapters */ + return ret; + } + + memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); + faddr = ha->flt_region_gold_fw; + qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_warn, vha, 0x0056, + "Unrecognized golden fw at %#x.\n", faddr); + ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); + return ret; + } + + for (i = 0; i < 4; i++) + ha->gold_fw_version[i] = + be32_to_cpu((__force __be32)dcode[4+i]); + + return ret; +} + +static int +qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end) +{ + if (pos >= end || *pos != 0x82) + return 0; + + pos += 3 + pos[1]; + if (pos >= end || *pos != 0x90) + return 0; + + pos += 3 + pos[1]; + if (pos >= end || *pos != 0x78) + return 0; + + return 1; +} + +int +qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size) +{ + struct qla_hw_data *ha = vha->hw; + uint8_t *pos = ha->vpd; + uint8_t *end = pos + ha->vpd_size; + int len = 0; + + if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end)) + return 0; + + while (pos < end && *pos != 0x78) { + len = (*pos == 0x82) ? pos[1] : pos[2]; + + if (!strncmp(pos, key, strlen(key))) + break; + + if (*pos != 0x90 && *pos != 0x91) + pos += len; + + pos += 3; + } + + if (pos < end - len && *pos != 0x78) + return scnprintf(str, size, "%.*s", len, pos + 3); + + return 0; +} + +int +qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) +{ + int len, max_len; + uint32_t fcp_prio_addr; + struct qla_hw_data *ha = vha->hw; + + if (!ha->fcp_prio_cfg) { + ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); + if (!ha->fcp_prio_cfg) { + ql_log(ql_log_warn, vha, 0x00d5, + "Unable to allocate memory for fcp priority data (%x).\n", + FCP_PRIO_CFG_SIZE); + return QLA_FUNCTION_FAILED; + } + } + memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); + + fcp_prio_addr = ha->flt_region_fcp_prio; + + /* first read the fcp priority data header from flash */ + ha->isp_ops->read_optrom(vha, ha->fcp_prio_cfg, + fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); + + if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0)) + goto fail; + + /* read remaining FCP CMD config data from flash */ + fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2); + len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry); + max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE; + + ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0], + fcp_prio_addr << 2, (len < max_len ? len : max_len)); + + /* revalidate the entire FCP priority config data, including entries */ + if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) + goto fail; + + ha->flags.fcp_prio_enabled = 1; + return QLA_SUCCESS; +fail: + vfree(ha->fcp_prio_cfg); + ha->fcp_prio_cfg = NULL; + return QLA_FUNCTION_FAILED; +} diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c new file mode 100644 index 000000000..2ef2dbac0 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -0,0 +1,7323 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx + * + * based on qla2x00t.c code: + * + * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin + * Copyright (C) 2004 - 2005 Leonid Stoljar + * Copyright (C) 2006 Nathaniel Clark + * Copyright (C) 2006 - 2010 ID7 Ltd. + * + * Forward port and refactoring to modern qla2xxx and target/configfs + * + * Copyright (C) 2010-2013 Nicholas A. Bellinger + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qla_def.h" +#include "qla_target.h" + +static int ql2xtgt_tape_enable; +module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xtgt_tape_enable, + "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); + +static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; +module_param(qlini_mode, charp, S_IRUGO); +MODULE_PARM_DESC(qlini_mode, + "Determines when initiator mode will be enabled. Possible values: " + "\"exclusive\" - initiator mode will be enabled on load, " + "disabled on enabling target mode and then on disabling target mode " + "enabled back; " + "\"disabled\" - initiator mode will never be enabled; " + "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " + "when ready " + "\"enabled\" (default) - initiator mode will always stay enabled."); + +int ql2xuctrlirq = 1; +module_param(ql2xuctrlirq, int, 0644); +MODULE_PARM_DESC(ql2xuctrlirq, + "User to control IRQ placement via smp_affinity." + "Valid with qlini_mode=disabled." + "1(default): enable"); + +int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; + +static int qla_sam_status = SAM_STAT_BUSY; +static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ + +/* + * From scsi/fc/fc_fcp.h + */ +enum fcp_resp_rsp_codes { + FCP_TMF_CMPL = 0, + FCP_DATA_LEN_INVALID = 1, + FCP_CMND_FIELDS_INVALID = 2, + FCP_DATA_PARAM_MISMATCH = 3, + FCP_TMF_REJECTED = 4, + FCP_TMF_FAILED = 5, + FCP_TMF_INVALID_LUN = 9, +}; + +/* + * fc_pri_ta from scsi/fc/fc_fcp.h + */ +#define FCP_PTA_SIMPLE 0 /* simple task attribute */ +#define FCP_PTA_HEADQ 1 /* head of queue task attribute */ +#define FCP_PTA_ORDERED 2 /* ordered task attribute */ +#define FCP_PTA_ACA 4 /* auto. contingent allegiance */ +#define FCP_PTA_MASK 7 /* mask for task attribute field */ +#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ +#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ + +/* + * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which + * must be called under HW lock and could unlock/lock it inside. + * It isn't an issue, since in the current implementation on the time when + * those functions are called: + * + * - Either context is IRQ and only IRQ handler can modify HW data, + * including rings related fields, + * + * - Or access to target mode variables from struct qla_tgt doesn't + * cross those functions boundaries, except tgt_stop, which + * additionally protected by irq_cmd_count. + */ +/* Predefs for callbacks handed to qla2xxx LLD */ +static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, + struct atio_from_isp *pkt, uint8_t); +static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, + response_t *pkt); +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, + int fn, void *iocb, int flags); +static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd + *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); +static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint16_t status, int qfull); +static void qlt_disable_vha(struct scsi_qla_host *vha); +static void qlt_clear_tgt_db(struct qla_tgt *tgt); +static void qlt_send_notify_ack(struct qla_qpair *qpair, + struct imm_ntfy_from_isp *ntfy, + uint32_t add_flags, uint16_t resp_code, int resp_code_valid, + uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); +static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *imm, int ha_locked); +static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, + fc_port_t *fcport, bool local); +void qlt_unreg_sess(struct fc_port *sess); +static void qlt_24xx_handle_abts(struct scsi_qla_host *, + struct abts_recv_from_24xx *); +static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, + uint16_t); +static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t); +static inline uint32_t qlt_make_handle(struct qla_qpair *); + +/* + * Global Variables + */ +static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; +struct kmem_cache *qla_tgt_plogi_cachep; +static mempool_t *qla_tgt_mgmt_cmd_mempool; +static struct workqueue_struct *qla_tgt_wq; +static DEFINE_MUTEX(qla_tgt_mutex); +static LIST_HEAD(qla_tgt_glist); + +static const char *prot_op_str(u32 prot_op) +{ + switch (prot_op) { + case TARGET_PROT_NORMAL: return "NORMAL"; + case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; + case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; + case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; + case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; + case TARGET_PROT_DIN_PASS: return "DIN_PASS"; + case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; + default: return "UNKNOWN"; + } +} + +/* This API intentionally takes dest as a parameter, rather than returning + * int value to avoid caller forgetting to issue wmb() after the store */ +void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); + *dest = atomic_inc_return(&base_vha->generation_tick); + /* memory barrier */ + wmb(); +} + +/* Might release hw lock, then reaquire!! */ +static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) +{ + /* Send marker if required */ + if (unlikely(vha->marker_needed != 0)) { + int rc = qla2x00_issue_marker(vha, vha_locked); + + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_tgt, vha, 0xe03d, + "qla_target(%d): issue_marker() failed\n", + vha->vp_idx); + } + return rc; + } + return QLA_SUCCESS; +} + +struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, + be_id_t d_id) +{ + struct scsi_qla_host *host; + uint32_t key; + + if (vha->d_id.b.area == d_id.area && + vha->d_id.b.domain == d_id.domain && + vha->d_id.b.al_pa == d_id.al_pa) + return vha; + + key = be_to_port_id(d_id).b24; + + host = btree_lookup32(&vha->hw->host_map, key); + if (!host) + ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, + "Unable to find host %06x\n", key); + + return host; +} + +static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) +{ + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + + vha->hw->tgt.num_pend_cmds++; + if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) + vha->qla_stats.stat_max_pend_cmds = + vha->hw->tgt.num_pend_cmds; + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); +} +static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) +{ + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + vha->hw->tgt.num_pend_cmds--; + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); +} + + +static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, + struct atio_from_isp *atio, uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + + if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async, vha, 0x502c, + "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", + vha->vp_idx); + goto out_term; + } + + u = kzalloc(sizeof(*u), GFP_ATOMIC); + if (u == NULL) + goto out_term; + + u->vha = vha; + memcpy(&u->atio, atio, sizeof(*atio)); + INIT_LIST_HEAD(&u->cmd_list); + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_add_tail(&u->cmd_list, &vha->unknown_atio_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + schedule_delayed_work(&vha->unknown_atio_work, 1); + +out: + return; + +out_term: + qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); + goto out; +} + +static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, + uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u, *t; + scsi_qla_host_t *host; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + uint8_t queued = 0; + + list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { + if (u->aborted) { + ql_dbg(ql_dbg_async, vha, 0x502e, + "Freeing unknown %s %p, because of Abort\n", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha->hw->base_qpair, NULL, + &u->atio, ha_locked, 0); + goto abort; + } + + host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); + if (host != NULL) { + ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, + "Requeuing unknown ATIO_TYPE7 %p\n", u); + qlt_24xx_atio_pkt(host, &u->atio, ha_locked); + } else if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, + "Freeing unknown %s %p, because tgt is being stopped\n", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha->hw->base_qpair, NULL, + &u->atio, ha_locked, 0); + } else { + ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, + "Reschedule u %p, vha %p, host %p\n", u, vha, host); + if (!queued) { + queued = 1; + schedule_delayed_work(&vha->unknown_atio_work, + 1); + } + continue; + } + +abort: + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&u->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + kfree(u); + } +} + +void qlt_unknown_atio_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(to_delayed_work(work), + struct scsi_qla_host, unknown_atio_work); + + qlt_try_to_dequeue_unknown_atios(vha, 0); +} + +static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint8_t ha_locked) +{ + ql_dbg(ql_dbg_tgt, vha, 0xe072, + "%s: qla_target(%d): type %x ox_id %04x\n", + __func__, vha->vp_idx, atio->u.raw.entry_type, + be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + + switch (atio->u.raw.entry_type) { + case ATIO_TYPE7: + { + struct scsi_qla_host *host = qla_find_host_by_d_id(vha, + atio->u.isp24.fcp_hdr.d_id); + if (unlikely(NULL == host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe03e, + "qla_target(%d): Received ATIO_TYPE7 " + "with unknown d_id %x:%x:%x\n", vha->vp_idx, + atio->u.isp24.fcp_hdr.d_id.domain, + atio->u.isp24.fcp_hdr.d_id.area, + atio->u.isp24.fcp_hdr.d_id.al_pa); + + + qlt_queue_unknown_atio(vha, atio, ha_locked); + break; + } + if (unlikely(!list_empty(&vha->unknown_atio_list))) + qlt_try_to_dequeue_unknown_atios(vha, ha_locked); + + qlt_24xx_atio_pkt(host, atio, ha_locked); + break; + } + + case IMMED_NOTIFY_TYPE: + { + struct scsi_qla_host *host = vha; + struct imm_ntfy_from_isp *entry = + (struct imm_ntfy_from_isp *)atio; + + qlt_issue_marker(vha, ha_locked); + + if ((entry->u.isp24.vp_index != 0xFF) && + (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { + host = qla_find_host_by_vp_idx(vha, + entry->u.isp24.vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe03f, + "qla_target(%d): Received " + "ATIO (IMMED_NOTIFY_TYPE) " + "with unknown vp_index %d\n", + vha->vp_idx, entry->u.isp24.vp_index); + break; + } + } + qlt_24xx_atio_pkt(host, atio, ha_locked); + break; + } + + case VP_RPT_ID_IOCB_TYPE: + qla24xx_report_id_acquisition(vha, + (struct vp_rpt_id_entry_24xx *)atio); + break; + + case ABTS_RECV_24XX: + { + struct abts_recv_from_24xx *entry = + (struct abts_recv_from_24xx *)atio; + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, + entry->vp_index); + unsigned long flags; + + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe00a, + "qla_target(%d): Response pkt (ABTS_RECV_24XX) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->vp_index); + break; + } + if (!ha_locked) + spin_lock_irqsave(&host->hw->hardware_lock, flags); + qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); + if (!ha_locked) + spin_unlock_irqrestore(&host->hw->hardware_lock, flags); + break; + } + + /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ + + default: + ql_dbg(ql_dbg_tgt, vha, 0xe040, + "qla_target(%d): Received unknown ATIO atio " + "type %x\n", vha->vp_idx, atio->u.raw.entry_type); + break; + } + + return false; +} + +void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt) +{ + switch (pkt->entry_type) { + case CTIO_CRC2: + ql_dbg(ql_dbg_tgt, vha, 0xe073, + "qla_target(%d):%s: CRC2 Response pkt\n", + vha->vp_idx, __func__); + fallthrough; + case CTIO_TYPE7: + { + struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe041, + "qla_target(%d): Response pkt (CTIO_TYPE7) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, rsp, pkt); + break; + } + + case IMMED_NOTIFY_TYPE: + { + struct scsi_qla_host *host; + struct imm_ntfy_from_isp *entry = + (struct imm_ntfy_from_isp *)pkt; + + host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe042, + "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->u.isp24.vp_index); + break; + } + qlt_response_pkt(host, rsp, pkt); + break; + } + + case NOTIFY_ACK_TYPE: + { + struct scsi_qla_host *host = vha; + struct nack_to_isp *entry = (struct nack_to_isp *)pkt; + + if (0xFF != entry->u.isp24.vp_index) { + host = qla_find_host_by_vp_idx(vha, + entry->u.isp24.vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe043, + "qla_target(%d): Response " + "pkt (NOTIFY_ACK_TYPE) " + "received, with unknown " + "vp_index %d\n", vha->vp_idx, + entry->u.isp24.vp_index); + break; + } + } + qlt_response_pkt(host, rsp, pkt); + break; + } + + case ABTS_RECV_24XX: + { + struct abts_recv_from_24xx *entry = + (struct abts_recv_from_24xx *)pkt; + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe044, + "qla_target(%d): Response pkt " + "(ABTS_RECV_24XX) received, with unknown " + "vp_index %d\n", vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, rsp, pkt); + break; + } + + case ABTS_RESP_24XX: + { + struct abts_resp_to_24xx *entry = + (struct abts_resp_to_24xx *)pkt; + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe045, + "qla_target(%d): Response pkt " + "(ABTS_RECV_24XX) received, with unknown " + "vp_index %d\n", vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, rsp, pkt); + break; + } + default: + qlt_response_pkt(vha, rsp, pkt); + break; + } + +} + +/* + * All qlt_plogi_ack_t operations are protected by hardware_lock + */ +static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_NACK); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.nack.fcport = fcport; + e->u.nack.type = type; + memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); + return qla2x00_post_work(vha, e); +} + +static void qla2x00_async_nack_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + + ql_dbg(ql_dbg_disc, vha, 0x20f2, + "Async done-%s res %x %8phC type %d\n", + sp->name, res, sp->fcport->port_name, sp->type); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + sp->fcport->flags &= ~FCF_ASYNC_SENT; + sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; + + switch (sp->type) { + case SRB_NACK_PLOGI: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; + sp->fcport->logout_on_delete = 1; + sp->fcport->plogi_nack_done_deadline = jiffies + HZ; + sp->fcport->send_els_logo = 0; + + if (sp->fcport->flags & FCF_FCSP_DEVICE) { + ql_dbg(ql_dbg_edif, vha, 0x20ef, + "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__, + sp->fcport->port_name); + qla2x00_set_fcport_disc_state(sp->fcport, + DSC_LOGIN_AUTH_PEND); + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + sp->fcport->d_id.b24); + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24, + 0, sp->fcport); + } + break; + + case SRB_NACK_PRLI: + sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; + sp->fcport->deleted = 0; + sp->fcport->send_els_logo = 0; + + if (!sp->fcport->login_succ && + !IS_SW_RESV_ADDR(sp->fcport->d_id)) { + sp->fcport->login_succ = 1; + + vha->fcport_count++; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + qla24xx_sched_upd_fcport(sp->fcport); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + } else { + sp->fcport->login_retry = 0; + qla2x00_set_fcport_disc_state(sp->fcport, + DSC_LOGIN_COMPLETE); + sp->fcport->deleted = 0; + sp->fcport->logout_on_delete = 1; + } + break; + + case SRB_NACK_LOGO: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); + break; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + char *c = NULL; + + fcport->flags |= FCF_ASYNC_SENT; + switch (type) { + case SRB_NACK_PLOGI: + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + c = "PLOGI"; + if (vha->hw->flags.edif_enabled && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) + fcport->flags |= FCF_FCSP_DEVICE; + break; + case SRB_NACK_PRLI: + fcport->fw_login_state = DSC_LS_PRLI_PEND; + fcport->deleted = 0; + c = "PRLI"; + break; + case SRB_NACK_LOGO: + fcport->fw_login_state = DSC_LS_LOGO_PEND; + c = "LOGO"; + break; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = type; + sp->name = "nack"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_nack_sp_done); + + sp->u.iocb_cmd.u.nack.ntfy = ntfy; + + ql_dbg(ql_dbg_disc, vha, 0x20f4, + "Async-%s %8phC hndl %x %s\n", + sp->name, fcport->port_name, sp->handle, c); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + return rval; + +done_free_sp: + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + fc_port_t *t; + + switch (e->u.nack.type) { + case SRB_NACK_PRLI: + t = e->u.nack.fcport; + flush_work(&t->del_work); + flush_work(&t->free_work); + mutex_lock(&vha->vha_tgt.tgt_mutex); + t = qlt_create_sess(vha, e->u.nack.fcport, 0); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + if (t) { + ql_log(ql_log_info, vha, 0xd034, + "%s create sess success %p", __func__, t); + /* create sess has an extra kref */ + vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); + } + break; + } + qla24xx_async_notify_ack(vha, e->u.nack.fcport, + (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type); +} + +void qla24xx_delete_sess_fn(struct work_struct *work) +{ + fc_port_t *fcport = container_of(work, struct fc_port, del_work); + struct qla_hw_data *ha = NULL; + + if (!fcport || !fcport->vha || !fcport->vha->hw) + return; + + ha = fcport->vha->hw; + + if (fcport->se_sess) { + ha->tgt.tgt_ops->shutdown_sess(fcport); + ha->tgt.tgt_ops->put_sess(fcport); + } else { + qlt_unreg_sess(fcport); + } +} + +/* + * Called from qla2x00_reg_remote_port() + */ +void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct fc_port *sess = fcport; + unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (tgt->tgt_stop) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (fcport->disc_state == DSC_DELETE_PEND) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!sess->se_sess) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + mutex_lock(&vha->vha_tgt.tgt_mutex); + sess = qlt_create_sess(vha, fcport, false); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + } else { + if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0x2107, + "%s: kref_get fail sess %8phC \n", + __func__, sess->port_name); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, + "qla_target(%u): %ssession for port %8phC " + "(loop ID %d) reappeared\n", vha->vp_idx, + sess->local ? "local " : "", sess->port_name, sess->loop_id); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, + "Reappeared sess %p\n", sess); + + ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, + fcport->loop_id, + (fcport->flags & FCF_CONF_COMP_SUPPORTED)); + } + + if (sess && sess->local) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, + "qla_target(%u): local session for " + "port %8phC (loop ID %d) became global\n", vha->vp_idx, + fcport->port_name, sess->loop_id); + sess->local = 0; + } + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + ha->tgt.tgt_ops->put_sess(sess); +} + +/* + * This is a zero-base ref-counting solution, since hardware_lock + * guarantees that ref_count is not modified concurrently. + * Upon successful return content of iocb is undefined + */ +static struct qlt_plogi_ack_t * +qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, + struct imm_ntfy_from_isp *iocb) +{ + struct qlt_plogi_ack_t *pla; + + lockdep_assert_held(&vha->hw->hardware_lock); + + list_for_each_entry(pla, &vha->plogi_ack_list, list) { + if (pla->id.b24 == id->b24) { + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, + "%s %d %8phC Term INOT due to new INOT", + __func__, __LINE__, + pla->iocb.u.isp24.port_name); + qlt_send_term_imm_notif(vha, &pla->iocb, 1); + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); + return pla; + } + } + + pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); + if (!pla) { + ql_dbg(ql_dbg_async, vha, 0x5088, + "qla_target(%d): Allocation of plogi_ack failed\n", + vha->vp_idx); + return NULL; + } + + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); + pla->id = *id; + list_add_tail(&pla->list, &vha->plogi_ack_list); + + return pla; +} + +void qlt_plogi_ack_unref(struct scsi_qla_host *vha, + struct qlt_plogi_ack_t *pla) +{ + struct imm_ntfy_from_isp *iocb = &pla->iocb; + port_id_t port_id; + uint16_t loop_id; + fc_port_t *fcport = pla->fcport; + + BUG_ON(!pla->ref_count); + pla->ref_count--; + + if (pla->ref_count) + return; + + ql_dbg(ql_dbg_disc, vha, 0x5089, + "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" + " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, + iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], + iocb->u.isp24.port_id[0], + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.exchange_address, iocb->ox_id); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + + fcport->loop_id = loop_id; + fcport->d_id = port_id; + if (iocb->u.isp24.status_subcode == ELS_PLOGI) + qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); + else + qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; + } + + list_del(&pla->list); + kmem_cache_free(qla_tgt_plogi_cachep, pla); +} + +void +qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, + struct fc_port *sess, enum qlt_plogi_link_t link) +{ + struct imm_ntfy_from_isp *iocb = &pla->iocb; + /* Inc ref_count first because link might already be pointing at pla */ + pla->ref_count++; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, + "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" + " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", + sess, link, sess->port_name, + iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + pla->ref_count, pla, link); + + if (link == QLT_PLOGI_LINK_CONFLICT) { + switch (sess->disc_state) { + case DSC_DELETED: + case DSC_DELETE_PEND: + pla->ref_count--; + return; + default: + break; + } + } + + if (sess->plogi_link[link]) + qlt_plogi_ack_unref(vha, sess->plogi_link[link]); + + if (link == QLT_PLOGI_LINK_SAME_WWN) + pla->fcport = sess; + + sess->plogi_link[link] = pla; +} + +typedef struct { + /* These fields must be initialized by the caller */ + port_id_t id; + /* + * number of cmds dropped while we were waiting for + * initiator to ack LOGO initialize to 1 if LOGO is + * triggered by a command, otherwise, to 0 + */ + int cmd_count; + + /* These fields are used by callee */ + struct list_head list; +} qlt_port_logo_t; + +static void +qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) +{ + qlt_port_logo_t *tmp; + int res; + + if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { + res = 0; + goto out; + } + + mutex_lock(&vha->vha_tgt.tgt_mutex); + + list_for_each_entry(tmp, &vha->logo_list, list) { + if (tmp->id.b24 == logo->id.b24) { + tmp->cmd_count += logo->cmd_count; + mutex_unlock(&vha->vha_tgt.tgt_mutex); + return; + } + } + + list_add_tail(&logo->list, &vha->logo_list); + + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); + + mutex_lock(&vha->vha_tgt.tgt_mutex); + list_del(&logo->list); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + +out: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, + "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", + logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, + logo->cmd_count, res); +} + +void qlt_free_session_done(struct work_struct *work) +{ + struct fc_port *sess = container_of(work, struct fc_port, + free_work); + struct qla_tgt *tgt = sess->tgt; + struct scsi_qla_host *vha = sess->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + bool logout_started = false; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + struct qlt_plogi_ack_t *own = + sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; + + ql_dbg(ql_dbg_disc, vha, 0xf084, + "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" + " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", + __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, + sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, + sess->logout_on_delete, sess->keep_nport_handle, + sess->send_els_logo); + + if (!IS_SW_RESV_ADDR(sess->d_id)) { + qla2x00_mark_device_lost(vha, sess, 0); + + if (sess->send_els_logo) { + qlt_port_logo_t logo; + + logo.id = sess->d_id; + logo.cmd_count = 0; + INIT_LIST_HEAD(&logo.list); + if (!own) + qlt_send_first_logo(vha, &logo); + sess->send_els_logo = 0; + } + + if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { + int rc; + + if (!own || + (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { + sess->logout_completed = 0; + rc = qla2x00_post_async_logout_work(vha, sess, + NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule logo failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } else if (own && (own->iocb.u.isp24.status_subcode == + ELS_PRLI) && ha->flags.rida_fmt2) { + rc = qla2x00_post_async_prlo_work(vha, sess, + NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule PRLO failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } + } /* if sess->logout_on_delete */ + + if (sess->nvme_flag & NVME_FLAG_REGISTERED && + !(sess->nvme_flag & NVME_FLAG_DELETING)) { + sess->nvme_flag |= NVME_FLAG_DELETING; + qla_nvme_unregister_remote_port(sess); + } + + if (ha->flags.edif_enabled && + (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { + sess->edif.authok = 0; + if (!ha->flags.host_shutting_down) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla2x00_release_all_sadb\n", + __func__, sess->port_name); + qla2x00_release_all_sadb(vha, sess); + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s bypassing release_all_sadb\n", + __func__); + } + + qla_edif_clear_appdata(vha, sess); + qla_edif_sess_down(vha, sess); + } + } + + /* + * Release the target session for FC Nexus from fabric module code. + */ + if (sess->se_sess != NULL) + ha->tgt.tgt_ops->free_session(sess); + + if (logout_started) { + bool traced = false; + u16 cnt = 0; + + while (!READ_ONCE(sess->logout_completed)) { + if (!traced) { + ql_dbg(ql_dbg_disc, vha, 0xf086, + "%s: waiting for sess %p logout\n", + __func__, sess); + traced = true; + } + msleep(100); + cnt++; + /* + * Driver timeout is set to 22 Sec, update count value to loop + * long enough for log-out to complete before advancing. Otherwise, + * straddling logout can interfere with re-login attempt. + */ + if (cnt > 230) + break; + } + + ql_dbg(ql_dbg_disc, vha, 0xf087, + "%s: sess %p logout completed\n", __func__, sess); + } + + if (sess->logo_ack_needed) { + sess->logo_ack_needed = 0; + qla24xx_async_notify_ack(vha, sess, + (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); + } + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (sess->se_sess) { + sess->se_sess = NULL; + if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) + tgt->sess_count--; + } + + qla2x00_set_fcport_disc_state(sess, DSC_DELETED); + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + + if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { + vha->fcport_count--; + sess->login_succ = 0; + } + + qla2x00_clear_loop_id(sess); + + if (sess->conflict) { + sess->conflict->login_pause = 0; + sess->conflict = NULL; + if (!test_bit(UNLOADING, &vha->dpc_flags)) + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } + + { + struct qlt_plogi_ack_t *con = + sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; + struct imm_ntfy_from_isp *iocb; + + own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; + + if (con) { + iocb = &con->iocb; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, + "se_sess %p / sess %p port %8phC is gone," + " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", + sess->se_sess, sess, sess->port_name, + own ? "releasing own PLOGI" : "no own PLOGI pending", + own ? own->ref_count : -1, + iocb->u.isp24.port_name, con->ref_count); + qlt_plogi_ack_unref(vha, con); + sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, + "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", + sess->se_sess, sess, sess->port_name, + own ? "releasing own PLOGI" : + "no own PLOGI pending", + own ? own->ref_count : -1); + } + + if (own) { + sess->fw_login_state = DSC_LS_PLOGI_PEND; + qlt_plogi_ack_unref(vha, own); + sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + } + } + + sess->explicit_logout = 0; + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + qla2x00_dfs_remove_rport(vha, sess); + + spin_lock_irqsave(&vha->work_lock, flags); + sess->flags &= ~FCF_ASYNC_SENT; + sess->deleted = QLA_SESS_DELETED; + sess->free_pending = 0; + spin_unlock_irqrestore(&vha->work_lock, flags); + + ql_dbg(ql_dbg_disc, vha, 0xf001, + "Unregistration of sess %p %8phC finished fcp_cnt %d\n", + sess, sess->port_name, vha->fcport_count); + + if (tgt && (tgt->sess_count == 0)) + wake_up_all(&tgt->waitQ); + + if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && + !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && + (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + break; + case MODE_TARGET: + default: + /* no-op */ + break; + } + } + + if (vha->fcport_count == 0) + wake_up_all(&vha->fcport_waitQ); +} + +/* ha->tgt.sess_lock supposed to be held on entry */ +void qlt_unreg_sess(struct fc_port *sess) +{ + struct scsi_qla_host *vha = sess->vha; + unsigned long flags; + + ql_dbg(ql_dbg_disc, sess->vha, 0x210a, + "%s sess %p for deletion %8phC\n", + __func__, sess, sess->port_name); + + spin_lock_irqsave(&sess->vha->work_lock, flags); + if (sess->free_pending) { + spin_unlock_irqrestore(&sess->vha->work_lock, flags); + return; + } + sess->free_pending = 1; + /* + * Use FCF_ASYNC_SENT flag to block other cmds used in sess + * management from being sent. + */ + sess->flags |= FCF_ASYNC_SENT; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + spin_unlock_irqrestore(&sess->vha->work_lock, flags); + + if (sess->se_sess) + vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); + + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); + sess->last_rscn_gen = sess->rscn_gen; + sess->last_login_gen = sess->login_gen; + + queue_work(sess->vha->hw->wq, &sess->free_work); +} +EXPORT_SYMBOL(qlt_unreg_sess); + +static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) +{ + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess = NULL; + uint16_t loop_id; + int res = 0; + struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; + unsigned long flags; + + loop_id = le16_to_cpu(n->u.isp24.nport_handle); + if (loop_id == 0xFFFF) { + /* Global event */ + atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + } else { + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + } + + ql_dbg(ql_dbg_tgt, vha, 0xe000, + "Using sess for qla_tgt_reset: %p\n", sess); + if (!sess) { + res = -ESRCH; + return res; + } + + ql_dbg(ql_dbg_tgt, vha, 0xe047, + "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " + "loop_id %d)\n", vha->host_no, sess, sess->port_name, + mcmd, loop_id); + + return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); +} + +static void qla24xx_chk_fcp_state(struct fc_port *sess) +{ + if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { + sess->logout_on_delete = 0; + sess->logo_ack_needed = 0; + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + } +} + +void qlt_schedule_sess_for_deletion(struct fc_port *sess) +{ + struct qla_tgt *tgt = sess->tgt; + unsigned long flags; + u16 sec; + + switch (sess->disc_state) { + case DSC_DELETE_PEND: + return; + case DSC_DELETED: + if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && + !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { + if (tgt && tgt->tgt_stop && tgt->sess_count == 0) + wake_up_all(&tgt->waitQ); + + if (sess->vha->fcport_count == 0) + wake_up_all(&sess->vha->fcport_waitQ); + return; + } + break; + case DSC_UPD_FCPORT: + /* + * This port is not done reporting to upper layer. + * let it finish + */ + sess->next_disc_state = DSC_DELETE_PEND; + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration)/1000; + if (sess->sec_since_registration < sec && sec && !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s %8phC : Slow Rport registration(%d Sec)\n", + __func__, sess->port_name, sec); + } + return; + default: + break; + } + + spin_lock_irqsave(&sess->vha->work_lock, flags); + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + spin_unlock_irqrestore(&sess->vha->work_lock, flags); + return; + } + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + spin_unlock_irqrestore(&sess->vha->work_lock, flags); + + sess->prli_pend_timer = 0; + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); + + qla24xx_chk_fcp_state(sess); + + ql_dbg(ql_log_warn, sess->vha, 0xe001, + "Scheduling sess %p for deletion %8phC fc4_type %x\n", + sess, sess->port_name, sess->fc4_type); + + WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); +} + +static void qlt_clear_tgt_db(struct qla_tgt *tgt) +{ + struct fc_port *sess; + scsi_qla_host_t *vha = tgt->vha; + + list_for_each_entry(sess, &vha->vp_fcports, list) { + if (sess->se_sess) + qlt_schedule_sess_for_deletion(sess); + } + + /* At this point tgt could be already dead */ +} + +static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id, + uint16_t *loop_id) +{ + struct qla_hw_data *ha = vha->hw; + dma_addr_t gid_list_dma; + struct gid_list_info *gid_list, *gid; + int res, rc, i; + uint16_t entries; + + gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + &gid_list_dma, GFP_KERNEL); + if (!gid_list) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, + "qla_target(%d): DMA Alloc failed of %u\n", + vha->vp_idx, qla2x00_gid_list_size(ha)); + return -ENOMEM; + } + + /* Get list of logged in devices */ + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, + "qla_target(%d): get_id_list() failed: %x\n", + vha->vp_idx, rc); + res = -EBUSY; + goto out_free_id_list; + } + + gid = gid_list; + res = -ENOENT; + for (i = 0; i < entries; i++) { + if (gid->al_pa == s_id.al_pa && + gid->area == s_id.area && + gid->domain == s_id.domain) { + *loop_id = le16_to_cpu(gid->loop_id); + res = 0; + break; + } + gid = (void *)gid + ha->gid_list_info_size; + } + +out_free_id_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + gid_list, gid_list_dma); + return res; +} + +/* + * Adds an extra ref to allow to drop hw lock after adding sess to the list. + * Caller must put it. + */ +static struct fc_port *qlt_create_sess( + struct scsi_qla_host *vha, + fc_port_t *fcport, + bool local) +{ + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess = fcport; + unsigned long flags; + + if (vha->vha_tgt.qla_tgt->tgt_stop) + return NULL; + + if (fcport->se_sess) { + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0x20f6, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; + } + return fcport; + } + sess->tgt = vha->vha_tgt.qla_tgt; + sess->local = local; + + /* + * Under normal circumstances we want to logout from firmware when + * session eventually ends and release corresponding nport handle. + * In the exception cases (e.g. when new PLOGI is waiting) corresponding + * code will adjust these flags as necessary. + */ + sess->logout_on_delete = 1; + sess->keep_nport_handle = 0; + sess->logout_completed = 0; + + if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, + &fcport->port_name[0], sess) < 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, + "(%d) %8phC check_initiator_node_acl failed\n", + vha->vp_idx, fcport->port_name); + return NULL; + } else { + kref_init(&fcport->sess_kref); + /* + * Take an extra reference to ->sess_kref here to handle + * fc_port access across ->tgt.sess_lock reaquire. + */ + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0x20f7, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; + } + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (!IS_SW_RESV_ADDR(sess->d_id)) + vha->vha_tgt.qla_tgt->sess_count++; + + qlt_do_generation_tick(vha, &sess->generation); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, + "Adding sess %p se_sess %p to tgt %p sess_count %d\n", + sess, sess->se_sess, vha->vha_tgt.qla_tgt, + vha->vha_tgt.qla_tgt->sess_count); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, + "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " + "s_id %x:%x:%x, confirmed completion %ssupported) added\n", + vha->vp_idx, local ? "local " : "", fcport->port_name, + fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); + + return sess; +} + +/* + * max_gen - specifies maximum session generation + * at which this deletion requestion is still valid + */ +void +qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) +{ + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct fc_port *sess = fcport; + unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; + + if (!tgt) + return; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + if (tgt->tgt_stop) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + return; + } + if (!sess->se_sess) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + return; + } + + if (max_gen - sess->generation < 0) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, + "Ignoring stale deletion request for se_sess %p / sess %p" + " for port %8phC, req_gen %d, sess_gen %d\n", + sess->se_sess, sess, sess->port_name, max_gen, + sess->generation); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); + + sess->local = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + qlt_schedule_sess_for_deletion(sess); +} + +static inline int test_tgt_sess_count(struct qla_tgt *tgt) +{ + struct qla_hw_data *ha = tgt->ha; + unsigned long flags; + int res; + /* + * We need to protect against race, when tgt is freed before or + * inside wake_up() + */ + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, + "tgt %p, sess_count=%d\n", + tgt, tgt->sess_count); + res = (tgt->sess_count == 0); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return res; +} + +/* Called by tcm_qla2xxx configfs code */ +int qlt_stop_phase1(struct qla_tgt *tgt) +{ + struct scsi_qla_host *vha = tgt->vha; + struct qla_hw_data *ha = tgt->ha; + unsigned long flags; + + mutex_lock(&ha->optrom_mutex); + mutex_lock(&qla_tgt_mutex); + + if (tgt->tgt_stop || tgt->tgt_stopped) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, + "Already in tgt->tgt_stop or tgt_stopped state\n"); + mutex_unlock(&qla_tgt_mutex); + mutex_unlock(&ha->optrom_mutex); + return -EPERM; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", + vha->host_no, vha); + /* + * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. + * Lock is needed, because we still can get an incoming packet. + */ + mutex_lock(&vha->vha_tgt.tgt_mutex); + tgt->tgt_stop = 1; + qlt_clear_tgt_db(tgt); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + mutex_unlock(&qla_tgt_mutex); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, + "Waiting for sess works (tgt %p)", tgt); + spin_lock_irqsave(&tgt->sess_work_lock, flags); + do { + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + flush_work(&tgt->sess_work); + spin_lock_irqsave(&tgt->sess_work_lock, flags); + } while (!list_empty(&tgt->sess_works_list)); + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, + "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); + + wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); + + /* Big hammer */ + if (!ha->flags.host_shutting_down && + (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) + qlt_disable_vha(vha); + + /* Wait for sessions to clear out (just in case) */ + wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); + mutex_unlock(&ha->optrom_mutex); + + return 0; +} +EXPORT_SYMBOL(qlt_stop_phase1); + +/* Called by tcm_qla2xxx configfs code */ +void qlt_stop_phase2(struct qla_tgt *tgt) +{ + scsi_qla_host_t *vha = tgt->vha; + + if (tgt->tgt_stopped) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, + "Already in tgt->tgt_stopped state\n"); + dump_stack(); + return; + } + if (!tgt->tgt_stop) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, + "%s: phase1 stop is not completed\n", __func__); + dump_stack(); + return; + } + + mutex_lock(&tgt->ha->optrom_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); + tgt->tgt_stop = 0; + tgt->tgt_stopped = 1; + mutex_unlock(&vha->vha_tgt.tgt_mutex); + mutex_unlock(&tgt->ha->optrom_mutex); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", + tgt); + + switch (vha->qlini_mode) { + case QLA2XXX_INI_MODE_EXCLUSIVE: + vha->flags.online = 1; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + default: + break; + } +} +EXPORT_SYMBOL(qlt_stop_phase2); + +/* Called from qlt_remove_target() -> qla2x00_remove_one() */ +static void qlt_release(struct qla_tgt *tgt) +{ + scsi_qla_host_t *vha = tgt->vha; + void *node; + u64 key = 0; + u16 i; + struct qla_qpair_hint *h; + struct qla_hw_data *ha = vha->hw; + + if (!tgt->tgt_stop && !tgt->tgt_stopped) + qlt_stop_phase1(tgt); + + if (!tgt->tgt_stopped) + qlt_stop_phase2(tgt); + + for (i = 0; i < vha->hw->max_qpairs + 1; i++) { + unsigned long flags; + + h = &tgt->qphints[i]; + if (h->qpair) { + spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); + list_del(&h->hint_elem); + spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); + h->qpair = NULL; + } + } + kfree(tgt->qphints); + mutex_lock(&qla_tgt_mutex); + list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); + mutex_unlock(&qla_tgt_mutex); + + btree_for_each_safe64(&tgt->lun_qpair_map, key, node) + btree_remove64(&tgt->lun_qpair_map, key); + + btree_destroy64(&tgt->lun_qpair_map); + + if (vha->vp_idx) + if (ha->tgt.tgt_ops && + ha->tgt.tgt_ops->remove_target && + vha->vha_tgt.target_lport_ptr) + ha->tgt.tgt_ops->remove_target(vha); + + vha->vha_tgt.qla_tgt = NULL; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, + "Release of tgt %p finished\n", tgt); + + kfree(tgt); +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, + const void *param, unsigned int param_size) +{ + struct qla_tgt_sess_work_param *prm; + unsigned long flags; + + prm = kzalloc(sizeof(*prm), GFP_ATOMIC); + if (!prm) { + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, + "qla_target(%d): Unable to create session " + "work, command will be refused", 0); + return -ENOMEM; + } + + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, + "Scheduling work (type %d, prm %p)" + " to find session for param %p (size %d, tgt %p)\n", + type, prm, param, param_size, tgt); + + prm->type = type; + memcpy(&prm->tm_iocb, param, param_size); + + spin_lock_irqsave(&tgt->sess_work_lock, flags); + list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + + schedule_work(&tgt->sess_work); + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_send_notify_ack(struct qla_qpair *qpair, + struct imm_ntfy_from_isp *ntfy, + uint32_t add_flags, uint16_t resp_code, int resp_code_valid, + uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) +{ + struct scsi_qla_host *vha = qpair->vha; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + struct nack_to_isp *nack; + + if (!ha->flags.fw_started) + return; + + ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); + + pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); + if (!pkt) { + ql_dbg(ql_dbg_tgt, vha, 0xe049, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return; + } + + if (vha->vha_tgt.qla_tgt != NULL) + vha->vha_tgt.qla_tgt->notify_ack_expected++; + + pkt->entry_type = NOTIFY_ACK_TYPE; + pkt->entry_count = 1; + + nack = (struct nack_to_isp *)pkt; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); + nack->u.isp24.srr_reject_code = srr_reject_code; + nack->u.isp24.srr_reject_code_expl = srr_explan; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + /* TODO qualify this with EDIF enable */ + if (ntfy->u.isp24.status_subcode == ELS_PLOGI && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); + } + + ql_dbg(ql_dbg_tgt, vha, 0xe005, + "qla_target(%d): Sending 24xx Notify Ack %d\n", + vha->vp_idx, nack->u.isp24.status); + + /* Memory Barrier */ + wmb(); + qla2x00_start_iocbs(vha, qpair->req); +} + +static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) +{ + struct scsi_qla_host *vha = mcmd->vha; + struct qla_hw_data *ha = vha->hw; + struct abts_resp_to_24xx *resp; + __le32 f_ctl; + uint32_t h; + uint8_t *p; + int rc; + struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; + struct qla_qpair *qpair = mcmd->qpair; + + ql_dbg(ql_dbg_tgt, vha, 0xe006, + "Sending task mgmt ABTS response (ha=%p, status=%x)\n", + ha, mcmd->fc_tm_rsp); + + rc = qlt_check_reserve_free_req(qpair, 1); + if (rc) { + ql_dbg(ql_dbg_tgt, vha, 0xe04a, + "qla_target(%d): %s failed: unable to allocate request packet\n", + vha->vp_idx, __func__); + return -EAGAIN; + } + + resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr; + memset(resp, 0, sizeof(*resp)); + + h = qlt_make_handle(qpair); + if (unlikely(h == QLA_TGT_NULL_HANDLE)) { + /* + * CTIO type 7 from the firmware doesn't provide a way to + * know the initiator's LOOP ID, hence we can't find + * the session and, so, the command. + */ + return -EAGAIN; + } else { + qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; + } + + resp->handle = make_handle(qpair->req->id, h); + resp->entry_type = ABTS_RESP_24XX; + resp->entry_count = 1; + resp->nport_handle = abts->nport_handle; + resp->vp_index = vha->vp_idx; + resp->sof_type = abts->sof_type; + resp->exchange_address = abts->exchange_address; + resp->fcp_hdr_le = abts->fcp_hdr_le; + f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | + F_CTL_LAST_SEQ | F_CTL_END_SEQ | + F_CTL_SEQ_INITIATIVE); + p = (uint8_t *)&f_ctl; + resp->fcp_hdr_le.f_ctl[0] = *p++; + resp->fcp_hdr_le.f_ctl[1] = *p++; + resp->fcp_hdr_le.f_ctl[2] = *p; + + resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; + resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; + + resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; + if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; + resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; + resp->payload.ba_acct.low_seq_cnt = 0x0000; + resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); + resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; + resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; + } else { + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; + resp->payload.ba_rjt.reason_code = + BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; + /* Other bytes are zero */ + } + + vha->vha_tgt.qla_tgt->abts_resp_expected++; + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + + return rc; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, + struct abts_recv_from_24xx *abts, uint32_t status, + bool ids_reversed) +{ + struct scsi_qla_host *vha = qpair->vha; + struct qla_hw_data *ha = vha->hw; + struct abts_resp_to_24xx *resp; + __le32 f_ctl; + uint8_t *p; + + ql_dbg(ql_dbg_tgt, vha, 0xe006, + "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", + ha, abts, status); + + resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, + NULL); + if (!resp) { + ql_dbg(ql_dbg_tgt, vha, 0xe04a, + "qla_target(%d): %s failed: unable to allocate " + "request packet", vha->vp_idx, __func__); + return; + } + + resp->entry_type = ABTS_RESP_24XX; + resp->handle = QLA_TGT_SKIP_HANDLE; + resp->entry_count = 1; + resp->nport_handle = abts->nport_handle; + resp->vp_index = vha->vp_idx; + resp->sof_type = abts->sof_type; + resp->exchange_address = abts->exchange_address; + resp->fcp_hdr_le = abts->fcp_hdr_le; + f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | + F_CTL_LAST_SEQ | F_CTL_END_SEQ | + F_CTL_SEQ_INITIATIVE); + p = (uint8_t *)&f_ctl; + resp->fcp_hdr_le.f_ctl[0] = *p++; + resp->fcp_hdr_le.f_ctl[1] = *p++; + resp->fcp_hdr_le.f_ctl[2] = *p; + if (ids_reversed) { + resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id; + resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id; + } else { + resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; + resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; + } + resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; + if (status == FCP_TMF_CMPL) { + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; + resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; + resp->payload.ba_acct.low_seq_cnt = 0x0000; + resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); + resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; + resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; + } else { + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; + resp->payload.ba_rjt.reason_code = + BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; + /* Other bytes are zero */ + } + + vha->vha_tgt.qla_tgt->abts_resp_expected++; + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, + struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd) +{ + struct ctio7_to_24xx *ctio; + u16 tmp; + struct abts_recv_from_24xx *entry; + + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); + if (ctio == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe04b, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return; + } + + if (mcmd) + /* abts from remote port */ + entry = &mcmd->orig_iocb.abts; + else + /* abts from this driver. */ + entry = (struct abts_recv_from_24xx *)pkt; + + /* + * We've got on entrance firmware's response on by us generated + * ABTS response. So, in it ID fields are reversed. + */ + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->nport_handle = entry->nport_handle; + ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = vha->vp_idx; + ctio->exchange_addr = entry->exchange_addr_to_abort; + tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); + + if (mcmd) { + ctio->initiator_id = entry->fcp_hdr_le.s_id; + + if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) + tmp |= (mcmd->abort_io_attr << 9); + else if (qpair->retry_term_cnt & 1) + tmp |= (0x4 << 9); + } else { + ctio->initiator_id = entry->fcp_hdr_le.d_id; + + if (qpair->retry_term_cnt & 1) + tmp |= (0x4 << 9); + } + ctio->u.status1.flags = cpu_to_le16(tmp); + ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; + + ql_dbg(ql_dbg_tgt, vha, 0xe007, + "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n", + le16_to_cpu(ctio->u.status1.flags), + le16_to_cpu(ctio->u.status1.ox_id), + (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0); + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + + if (mcmd) + qlt_build_abts_resp_iocb(mcmd); + else + qlt_24xx_send_abts_resp(qpair, + (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); + +} + +/* drop cmds for the given lun + * XXX only looks for cmds on the port through which lun reset was recieved + * XXX does not go through the list of other port (which may have cmds + * for the same lun) + */ +static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + unsigned long flags; + + key = sid_to_key(s_id); + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key; + u64 op_lun; + + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + op_lun = scsilun_to_int( + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); + if (op_key == key && op_lun == lun) + op->aborted = true; + } + + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key; + u64 cmd_lun; + + cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + cmd_lun = scsilun_to_int( + (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); + if (cmd_key == key && cmd_lun == lun) + cmd->aborted = 1; + } + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); +} + +static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, + uint64_t unpacked_lun) +{ + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_qpair_hint *h = NULL; + + if (vha->flags.qpairs_available) { + h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); + if (!h) + h = &tgt->qphints[0]; + } else { + h = &tgt->qphints[0]; + } + + return h; +} + +static void qlt_do_tmr_work(struct work_struct *work) +{ + struct qla_tgt_mgmt_cmd *mcmd = + container_of(work, struct qla_tgt_mgmt_cmd, work); + struct qla_hw_data *ha = mcmd->vha->hw; + int rc; + uint32_t tag; + unsigned long flags; + + switch (mcmd->tmr_func) { + case QLA_TGT_ABTS: + tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort); + break; + default: + tag = 0; + break; + } + + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, + mcmd->tmr_func, tag); + + if (rc != 0) { + spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); + switch (mcmd->tmr_func) { + case QLA_TGT_ABTS: + mcmd->fc_tm_rsp = FCP_TMF_REJECTED; + qlt_build_abts_resp_iocb(mcmd); + break; + case QLA_TGT_LUN_RESET: + case QLA_TGT_CLEAR_TS: + case QLA_TGT_ABORT_TS: + case QLA_TGT_CLEAR_ACA: + case QLA_TGT_TARGET_RESET: + qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, + qla_sam_status); + break; + + case QLA_TGT_ABORT_ALL: + case QLA_TGT_NEXUS_LOSS_SESS: + case QLA_TGT_NEXUS_LOSS: + qlt_send_notify_ack(mcmd->qpair, + &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); + break; + } + spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); + + ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, + "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", + mcmd->vha->vp_idx, rc); + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); + } +} + +/* ha->hardware_lock supposed to be held on entry */ +static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, + struct abts_recv_from_24xx *abts, struct fc_port *sess) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_mgmt_cmd *mcmd; + struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; + struct qla_tgt_cmd *abort_cmd; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, + "qla_target(%d): task abort (tag=%d)\n", + vha->vp_idx, abts->exchange_addr_to_abort); + + mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); + if (mcmd == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, + "qla_target(%d): %s: Allocation of ABORT cmd failed", + vha->vp_idx, __func__); + return -ENOMEM; + } + memset(mcmd, 0, sizeof(*mcmd)); + mcmd->cmd_type = TYPE_TGT_TMCMD; + mcmd->sess = sess; + memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); + mcmd->reset_count = ha->base_qpair->chip_reset; + mcmd->tmr_func = QLA_TGT_ABTS; + mcmd->qpair = h->qpair; + mcmd->vha = vha; + + /* + * LUN is looked up by target-core internally based on the passed + * abts->exchange_addr_to_abort tag. + */ + mcmd->se_cmd.cpuid = h->cpuid; + + abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, + le32_to_cpu(abts->exchange_addr_to_abort)); + if (!abort_cmd) { + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); + return -EIO; + } + mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun; + + if (abort_cmd->qpair) { + mcmd->qpair = abort_cmd->qpair; + mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; + mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr; + mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID; + } + + INIT_WORK(&mcmd->work, qlt_do_tmr_work); + queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, + struct abts_recv_from_24xx *abts) +{ + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess; + uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort); + be_id_t s_id; + int rc; + unsigned long flags; + + if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, + "qla_target(%d): ABTS: Abort Sequence not " + "supported\n", vha->vp_idx); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); + return; + } + + if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, + "qla_target(%d): ABTS: Unknown Exchange " + "Address received\n", vha->vp_idx); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, + "qla_target(%d): task abort (s_id=%x:%x:%x, " + "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain, + abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag, + le32_to_cpu(abts->fcp_hdr_le.parameter)); + + s_id = le_id_to_be(abts->fcp_hdr_le.s_id); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); + if (!sess) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, + "qla_target(%d): task abort for non-existent session\n", + vha->vp_idx); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); + return; + } + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + + if (sess->deleted) { + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); + return; + } + + rc = __qlt_24xx_handle_abts(vha, abts, sess); + if (rc != 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, + "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", + vha->vp_idx, rc); + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, + false); + return; + } +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, + struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) +{ + struct scsi_qla_host *ha = mcmd->vha; + struct atio_from_isp *atio = &mcmd->orig_iocb.atio; + struct ctio7_to_24xx *ctio; + uint16_t temp; + + ql_dbg(ql_dbg_tgt, ha, 0xe008, + "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", + ha, atio, resp_code); + + + ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); + if (ctio == NULL) { + ql_dbg(ql_dbg_tgt, ha, 0xe04c, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", ha->vp_idx, __func__); + return; + } + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id); + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = ha->vp_idx; + ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); + ctio->exchange_addr = atio->u.isp24.exchange_addr; + temp = (atio->u.isp24.attr << 9)| + CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; + ctio->u.status1.flags = cpu_to_le16(temp); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); + ctio->u.status1.scsi_status = + cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); + ctio->u.status1.response_len = cpu_to_le16(8); + ctio->u.status1.sense_data[0] = resp_code; + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(ha, qpair->req); +} + +void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) +{ + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); +} +EXPORT_SYMBOL(qlt_free_mcmd); + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then + * reacquire + */ +void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, + uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) +{ + struct atio_from_isp *atio = &cmd->atio; + struct ctio7_to_24xx *ctio; + uint16_t temp; + struct scsi_qla_host *vha = cmd->vha; + + ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, + "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " + "sense_key=%02x, asc=%02x, ascq=%02x", + vha, atio, scsi_status, sense_key, asc, ascq); + + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + if (!ctio) { + ql_dbg(ql_dbg_async, vha, 0x3067, + "qla2x00t(%ld): %s failed: unable to allocate request packet", + vha->host_no, __func__); + goto out; + } + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE; + ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id); + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = vha->vp_idx; + ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); + ctio->exchange_addr = atio->u.isp24.exchange_addr; + temp = (atio->u.isp24.attr << 9) | + CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; + ctio->u.status1.flags = cpu_to_le16(temp); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); + ctio->u.status1.scsi_status = + cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); + ctio->u.status1.response_len = cpu_to_le16(18); + ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); + + if (ctio->u.status1.residual != 0) + ctio->u.status1.scsi_status |= + cpu_to_le16(SS_RESIDUAL_UNDER); + + /* Fixed format sense data. */ + ctio->u.status1.sense_data[0] = 0x70; + ctio->u.status1.sense_data[2] = sense_key; + /* Additional sense length */ + ctio->u.status1.sense_data[7] = 0xa; + /* ASC and ASCQ */ + ctio->u.status1.sense_data[12] = asc; + ctio->u.status1.sense_data[13] = ascq; + + /* Memory Barrier */ + wmb(); + + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + +out: + return; +} + +/* callback from target fabric module code */ +void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) +{ + struct scsi_qla_host *vha = mcmd->sess->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + struct qla_qpair *qpair = mcmd->qpair; + bool free_mcmd = true; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, + "TM response mcmd (%p) status %#x state %#x", + mcmd, mcmd->fc_tm_rsp, mcmd->flags); + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + + if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { + /* + * Either the port is not online or this request was from + * previous life, just abort the processing. + */ + ql_dbg(ql_dbg_async, vha, 0xe100, + "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", + vha->flags.online, qla2x00_reset_active(vha), + mcmd->reset_count, qpair->chip_reset); + ha->tgt.tgt_ops->free_mcmd(mcmd); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + return; + } + + if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { + switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) { + case ELS_LOGO: + case ELS_PRLO: + case ELS_TPRLO: + ql_dbg(ql_dbg_disc, vha, 0x2106, + "TM response logo %8phC status %#x state %#x", + mcmd->sess->port_name, mcmd->fc_tm_rsp, + mcmd->flags); + qlt_schedule_sess_for_deletion(mcmd->sess); + break; + default: + qlt_send_notify_ack(vha->hw->base_qpair, + &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); + break; + } + } else { + if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { + qlt_build_abts_resp_iocb(mcmd); + free_mcmd = false; + } else + qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, + mcmd->fc_tm_rsp); + } + /* + * Make the callback for ->free_mcmd() to queue_work() and invoke + * target_put_sess_cmd() to drop cmd_kref to 1. The final + * target_put_sess_cmd() call will be made from TFO->check_stop_free() + * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd + * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> + * qlt_xmit_tm_rsp() returns here.. + */ + if (free_mcmd) + ha->tgt.tgt_ops->free_mcmd(mcmd); + + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); +} +EXPORT_SYMBOL(qlt_xmit_tm_rsp); + +/* No locks */ +static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) +{ + struct qla_tgt_cmd *cmd = prm->cmd; + + BUG_ON(cmd->sg_cnt == 0); + + prm->sg = (struct scatterlist *)cmd->sg; + prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg, + cmd->sg_cnt, cmd->dma_data_direction); + if (unlikely(prm->seg_cnt == 0)) + goto out_err; + + prm->cmd->sg_mapped = 1; + + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { + /* + * If greater than four sg entries then we need to allocate + * the continuation entries + */ + if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) + prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - + QLA_TGT_DATASEGS_PER_CMD_24XX, + QLA_TGT_DATASEGS_PER_CONT_24XX); + } else { + /* DIF */ + if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || + (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { + prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); + prm->tot_dsds = prm->seg_cnt; + } else + prm->tot_dsds = prm->seg_cnt; + + if (cmd->prot_sg_cnt) { + prm->prot_sg = cmd->prot_sg; + prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, + cmd->prot_sg, cmd->prot_sg_cnt, + cmd->dma_data_direction); + if (unlikely(prm->prot_seg_cnt == 0)) + goto out_err; + + if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || + (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { + /* Dif Bundling not support here */ + prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, + cmd->blk_sz); + prm->tot_dsds += prm->prot_seg_cnt; + } else + prm->tot_dsds += prm->prot_seg_cnt; + } + } + + return 0; + +out_err: + ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, + "qla_target(%d): PCI mapping failed: sg_cnt=%d", + 0, prm->cmd->sg_cnt); + return -1; +} + +static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) +{ + struct qla_hw_data *ha; + struct qla_qpair *qpair; + + if (!cmd->sg_mapped) + return; + + qpair = cmd->qpair; + + dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt, + cmd->dma_data_direction); + cmd->sg_mapped = 0; + + if (cmd->prot_sg_cnt) + dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, + cmd->dma_data_direction); + + if (!cmd->ctx) + return; + ha = vha->hw; + if (cmd->ctx_dsd_alloced) + qla2x00_clean_dsd_pool(ha, cmd->ctx); + + dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); +} + +static int qlt_check_reserve_free_req(struct qla_qpair *qpair, + uint32_t req_cnt) +{ + uint32_t cnt; + struct req_que *req = qpair->req; + + if (req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : + rd_reg_dword_relaxed(req->req_q_out)); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - (req->ring_index - cnt); + + if (unlikely(req->cnt < (req_cnt + 2))) + return -EAGAIN; + } + + req->cnt -= req_cnt; + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static inline void *qlt_get_req_pkt(struct req_que *req) +{ + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + return (cont_entry_t *)req->ring_ptr; +} + +/* ha->hardware_lock supposed to be held on entry */ +static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) +{ + uint32_t h; + int index; + uint8_t found = 0; + struct req_que *req = qpair->req; + + h = req->current_outstanding_cmd; + + for (index = 1; index < req->num_outstanding_cmds; index++) { + h++; + if (h == req->num_outstanding_cmds) + h = 1; + + if (h == QLA_TGT_SKIP_HANDLE) + continue; + + if (!req->outstanding_cmds[h]) { + found = 1; + break; + } + } + + if (found) { + req->current_outstanding_cmd = h; + } else { + ql_dbg(ql_dbg_io, qpair->vha, 0x305b, + "qla_target(%d): Ran out of empty cmd slots\n", + qpair->vha->vp_idx); + h = QLA_TGT_NULL_HANDLE; + } + + return h; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, + struct qla_tgt_prm *prm) +{ + uint32_t h; + struct ctio7_to_24xx *pkt; + struct atio_from_isp *atio = &prm->cmd->atio; + uint16_t temp; + struct qla_tgt_cmd *cmd = prm->cmd; + + pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; + prm->pkt = pkt; + memset(pkt, 0, sizeof(*pkt)); + + pkt->entry_type = CTIO_TYPE7; + pkt->entry_count = (uint8_t)prm->req_cnt; + pkt->vp_index = prm->cmd->vp_idx; + + h = qlt_make_handle(qpair); + if (unlikely(h == QLA_TGT_NULL_HANDLE)) { + /* + * CTIO type 7 from the firmware doesn't provide a way to + * know the initiator's LOOP ID, hence we can't find + * the session and, so, the command. + */ + return -EAGAIN; + } else + qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; + + pkt->handle = make_handle(qpair->req->id, h); + pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; + pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); + pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); + pkt->exchange_addr = atio->u.isp24.exchange_addr; + temp = atio->u.isp24.attr << 9; + pkt->u.status0.flags |= cpu_to_le16(temp); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->u.status0.ox_id = cpu_to_le16(temp); + pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); + + if (cmd->edif) { + if (cmd->dma_data_direction == DMA_TO_DEVICE) + prm->cmd->sess->edif.rx_bytes += cmd->bufflen; + if (cmd->dma_data_direction == DMA_FROM_DEVICE) + prm->cmd->sess->edif.tx_bytes += cmd->bufflen; + + pkt->u.status0.edif_flags |= EF_EN_EDIF; + } + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. We have already made sure + * that there is sufficient amount of request entries to not drop it. + */ +static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) +{ + int cnt; + struct dsd64 *cur_dsd; + + /* Build continuation packets */ + while (prm->seg_cnt > 0) { + cont_a64_entry_t *cont_pkt64 = + (cont_a64_entry_t *)qlt_get_req_pkt( + prm->cmd->qpair->req); + + /* + * Make sure that from cont_pkt64 none of + * 64-bit specific fields used for 32-bit + * addressing. Cast to (cont_entry_t *) for + * that. + */ + + memset(cont_pkt64, 0, sizeof(*cont_pkt64)); + + cont_pkt64->entry_count = 1; + cont_pkt64->sys_define = 0; + + cont_pkt64->entry_type = CONTINUE_A64_TYPE; + cur_dsd = cont_pkt64->dsd; + + /* Load continuation entry data segments */ + for (cnt = 0; + cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; + cnt++, prm->seg_cnt--) { + append_dsd64(&cur_dsd, prm->sg); + prm->sg = sg_next(prm->sg); + } + } +} + +/* + * ha->hardware_lock supposed to be held on entry. We have already made sure + * that there is sufficient amount of request entries to not drop it. + */ +static void qlt_load_data_segments(struct qla_tgt_prm *prm) +{ + int cnt; + struct dsd64 *cur_dsd; + struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; + + pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); + + /* Setup packet address segment pointer */ + cur_dsd = &pkt24->u.status0.dsd; + + /* Set total data segment count */ + if (prm->seg_cnt) + pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); + + if (prm->seg_cnt == 0) { + /* No data transfer */ + cur_dsd->address = 0; + cur_dsd->length = 0; + return; + } + + /* If scatter gather */ + + /* Load command entry data segments */ + for (cnt = 0; + (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; + cnt++, prm->seg_cnt--) { + append_dsd64(&cur_dsd, prm->sg); + prm->sg = sg_next(prm->sg); + } + + qlt_load_cont_data_segments(prm); +} + +static inline int qlt_has_data(struct qla_tgt_cmd *cmd) +{ + return cmd->bufflen > 0; +} + +static void qlt_print_dif_err(struct qla_tgt_prm *prm) +{ + struct qla_tgt_cmd *cmd; + struct scsi_qla_host *vha; + + /* asc 0x10=dif error */ + if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { + cmd = prm->cmd; + vha = cmd->vha; + /* ASCQ */ + switch (prm->sense_buffer[13]) { + case 1: + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, + "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 2: + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, + "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 3: + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, + "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + default: + ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, + "BE detected Dif ERR: lba[%llx|%lld] len[%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + } + ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); + } +} + +/* + * Called without ha->hardware_lock held + */ +static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, + struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, + uint32_t *full_req_cnt) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct qla_qpair *qpair = cmd->qpair; + + prm->cmd = cmd; + prm->tgt = cmd->tgt; + prm->pkt = NULL; + prm->rq_result = scsi_status; + prm->sense_buffer = &cmd->sense_buffer[0]; + prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; + prm->sg = NULL; + prm->seg_cnt = -1; + prm->req_cnt = 1; + prm->residual = 0; + prm->add_status_pkt = 0; + prm->prot_sg = NULL; + prm->prot_seg_cnt = 0; + prm->tot_dsds = 0; + + if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { + if (qlt_pci_map_calc_cnt(prm) != 0) + return -EAGAIN; + } + + *full_req_cnt = prm->req_cnt; + + if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + prm->residual = se_cmd->residual_count; + ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, + "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", + prm->residual, se_cmd->tag, + se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, + cmd->bufflen, prm->rq_result); + prm->rq_result |= SS_RESIDUAL_UNDER; + } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + prm->residual = se_cmd->residual_count; + ql_dbg_qp(ql_dbg_io, qpair, 0x305d, + "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", + prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? + se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); + prm->rq_result |= SS_RESIDUAL_OVER; + } + + if (xmit_type & QLA_TGT_XMIT_STATUS) { + /* + * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be + * ignored in *xmit_response() below + */ + if (qlt_has_data(cmd)) { + if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || + (IS_FWI2_CAPABLE(cmd->vha->hw) && + (prm->rq_result != 0))) { + prm->add_status_pkt = 1; + (*full_req_cnt)++; + } + } + } + + return 0; +} + +static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, + int sending_sense) +{ + if (cmd->qpair->enable_class_2) + return 0; + + if (sending_sense) + return cmd->conf_compl_supported; + else + return cmd->qpair->enable_explicit_conf && + cmd->conf_compl_supported; +} + +static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, + struct qla_tgt_prm *prm) +{ + prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, + (uint32_t)sizeof(ctio->u.status1.sense_data)); + ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); + if (qlt_need_explicit_conf(prm->cmd, 0)) { + ctio->u.status0.flags |= cpu_to_le16( + CTIO7_FLAGS_EXPLICIT_CONFORM | + CTIO7_FLAGS_CONFORM_REQ); + } + ctio->u.status0.residual = cpu_to_le32(prm->residual); + ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); + if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { + int i; + + if (qlt_need_explicit_conf(prm->cmd, 1)) { + if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { + ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, + "Skipping EXPLICIT_CONFORM and " + "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " + "non GOOD status\n"); + goto skip_explict_conf; + } + ctio->u.status1.flags |= cpu_to_le16( + CTIO7_FLAGS_EXPLICIT_CONFORM | + CTIO7_FLAGS_CONFORM_REQ); + } +skip_explict_conf: + ctio->u.status1.flags &= + ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); + ctio->u.status1.flags |= + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); + ctio->u.status1.scsi_status |= + cpu_to_le16(SS_SENSE_LEN_VALID); + ctio->u.status1.sense_length = + cpu_to_le16(prm->sense_buffer_len); + for (i = 0; i < prm->sense_buffer_len/4; i++) { + uint32_t v; + + v = get_unaligned_be32( + &((uint32_t *)prm->sense_buffer)[i]); + put_unaligned_le32(v, + &((uint32_t *)ctio->u.status1.sense_data)[i]); + } + qlt_print_dif_err(prm); + + } else { + ctio->u.status1.flags &= + ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); + ctio->u.status1.flags |= + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); + ctio->u.status1.sense_length = 0; + memset(ctio->u.status1.sense_data, 0, + sizeof(ctio->u.status1.sense_data)); + } + + /* Sense with len > 24, is it possible ??? */ +} + +static inline int +qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) +{ + switch (se_cmd->prot_op) { + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + if (ql2xenablehba_err_chk >= 1) + return 1; + break; + case TARGET_PROT_DOUT_PASS: + case TARGET_PROT_DIN_PASS: + if (ql2xenablehba_err_chk >= 2) + return 1; + break; + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + return 1; + default: + break; + } + return 0; +} + +static inline int +qla_tgt_ref_mask_check(struct se_cmd *se_cmd) +{ + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_STRIP: + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + return 1; + default: + return 0; + } + return 0; +} + +/* + * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command + */ +static void +qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, + uint16_t *pfw_prot_opts) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + uint32_t lba = 0xffffffff & se_cmd->t_task_lba; + scsi_qla_host_t *vha = cmd->tgt->vha; + struct qla_hw_data *ha = vha->hw; + uint32_t t32 = 0; + + /* + * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 + * have been immplemented by TCM, before AppTag is avail. + * Look for modesense_handlers[] + */ + ctx->app_tag = 0; + ctx->app_tag_mask[0] = 0x0; + ctx->app_tag_mask[1] = 0x0; + + if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) + *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) + *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; + } + + t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); + + switch (se_cmd->prot_type) { + case TARGET_DIF_TYPE0_PROT: + /* + * No check for ql2xenablehba_err_chk, as it + * would be an I/O error if hba tag generation + * is not done. + */ + ctx->ref_tag = cpu_to_le32(lba); + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + case TARGET_DIF_TYPE1_PROT: + /* + * For TYPE 1 protection: 16 bit GUARD tag, 32 bit + * REF tag, and 16 bit app tag. + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + case TARGET_DIF_TYPE2_PROT: + /* + * For TYPE 2 protection: 16 bit GUARD + 32 bit REF + * tag has to match LBA in CDB + N + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + case TARGET_DIF_TYPE3_PROT: + /* For TYPE 3 protection: 16 bit GUARD only */ + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = + ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; + break; + } +} + +static inline int +qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) +{ + struct dsd64 *cur_dsd; + uint32_t transfer_length = 0; + uint32_t data_bytes; + uint32_t dif_bytes; + uint8_t bundling = 1; + struct crc_context *crc_ctx_pkt = NULL; + struct qla_hw_data *ha; + struct ctio_crc2_to_fw *pkt; + dma_addr_t crc_ctx_dma; + uint16_t fw_prot_opts = 0; + struct qla_tgt_cmd *cmd = prm->cmd; + struct se_cmd *se_cmd = &cmd->se_cmd; + uint32_t h; + struct atio_from_isp *atio = &prm->cmd->atio; + struct qla_tc_param tc; + uint16_t t16; + scsi_qla_host_t *vha = cmd->vha; + + ha = vha->hw; + + pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; + prm->pkt = pkt; + memset(pkt, 0, sizeof(*pkt)); + + ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, + "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", + cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, + prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); + + if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || + (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) + bundling = 0; + + /* Compute dif len and adjust data len to incude protection */ + data_bytes = cmd->bufflen; + dif_bytes = (data_bytes / cmd->blk_sz) * 8; + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + transfer_length = data_bytes; + if (cmd->prot_sg_cnt) + data_bytes += dif_bytes; + break; + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + transfer_length = data_bytes + dif_bytes; + break; + default: + BUG(); + break; + } + + if (!qlt_hba_err_chk_enabled(se_cmd)) + fw_prot_opts |= 0x10; /* Disable Guard tag checking */ + /* HBA error checking enabled */ + else if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) + fw_prot_opts |= PO_DIS_VALD_APP_ESC; + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) + fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; + } + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_INSERT: + fw_prot_opts |= PO_MODE_DIF_INSERT; + break; + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_STRIP: + fw_prot_opts |= PO_MODE_DIF_REMOVE; + break; + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + fw_prot_opts |= PO_MODE_DIF_PASS; + /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ + break; + default:/* Normal Request */ + fw_prot_opts |= PO_MODE_DIF_PASS; + break; + } + + /* ---- PKT ---- */ + /* Update entry type to indicate Command Type CRC_2 IOCB */ + pkt->entry_type = CTIO_CRC2; + pkt->entry_count = 1; + pkt->vp_index = cmd->vp_idx; + + h = qlt_make_handle(qpair); + if (unlikely(h == QLA_TGT_NULL_HANDLE)) { + /* + * CTIO type 7 from the firmware doesn't provide a way to + * know the initiator's LOOP ID, hence we can't find + * the session and, so, the command. + */ + return -EAGAIN; + } else + qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; + + pkt->handle = make_handle(qpair->req->id, h); + pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; + pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); + pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); + pkt->exchange_addr = atio->u.isp24.exchange_addr; + + /* silence compile warning */ + t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->ox_id = cpu_to_le16(t16); + + t16 = (atio->u.isp24.attr << 9); + pkt->flags |= cpu_to_le16(t16); + pkt->relative_offset = cpu_to_le32(prm->cmd->offset); + + /* Set transfer direction */ + if (cmd->dma_data_direction == DMA_TO_DEVICE) + pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); + else if (cmd->dma_data_direction == DMA_FROM_DEVICE) + pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); + + pkt->dseg_count = cpu_to_le16(prm->tot_dsds); + /* Fibre channel byte count */ + pkt->transfer_length = cpu_to_le32(transfer_length); + + /* ----- CRC context -------- */ + + /* Allocate CRC context from global pool */ + crc_ctx_pkt = cmd->ctx = + dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); + + if (!crc_ctx_pkt) + goto crc_queuing_error; + + crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; + INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); + + /* Set handle */ + crc_ctx_pkt->handle = pkt->handle; + + qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); + + put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); + pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); + + if (!bundling) { + cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; + } else { + /* + * Configure Bundling if we need to fetch interlaving + * protection PCI accesses + */ + fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; + crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); + crc_ctx_pkt->u.bundling.dseg_count = + cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); + cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; + } + + /* Finish the common fields of CRC pkt */ + crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); + crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); + crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); + crc_ctx_pkt->guard_seed = cpu_to_le16(0); + + memset((uint8_t *)&tc, 0 , sizeof(tc)); + tc.vha = vha; + tc.blk_sz = cmd->blk_sz; + tc.bufflen = cmd->bufflen; + tc.sg = cmd->sg; + tc.prot_sg = cmd->prot_sg; + tc.ctx = crc_ctx_pkt; + tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; + + /* Walks data segments */ + pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); + + if (!bundling && prm->prot_seg_cnt) { + if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, + prm->tot_dsds, &tc)) + goto crc_queuing_error; + } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, + (prm->tot_dsds - prm->prot_seg_cnt), &tc)) + goto crc_queuing_error; + + if (bundling && prm->prot_seg_cnt) { + /* Walks dif segments */ + pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; + + cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; + if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, + prm->prot_seg_cnt, cmd)) + goto crc_queuing_error; + } + return QLA_SUCCESS; + +crc_queuing_error: + /* Cleanup will be performed by the caller */ + qpair->req->outstanding_cmds[h] = NULL; + + return QLA_FUNCTION_FAILED; +} + +/* + * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * + * QLA_TGT_XMIT_STATUS for >= 24xx silicon + */ +int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, + uint8_t scsi_status) +{ + struct scsi_qla_host *vha = cmd->vha; + struct qla_qpair *qpair = cmd->qpair; + struct ctio7_to_24xx *pkt; + struct qla_tgt_prm prm; + uint32_t full_req_cnt = 0; + unsigned long flags = 0; + int res; + + if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || + (cmd->sess && cmd->sess->deleted)) { + cmd->state = QLA_TGT_STATE_PROCESSED; + return 0; + } + + ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, + "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", + (xmit_type & QLA_TGT_XMIT_STATUS) ? + 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, + &cmd->se_cmd, qpair->id); + + res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, + &full_req_cnt); + if (unlikely(res != 0)) { + return res; + } + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + + if (xmit_type == QLA_TGT_XMIT_STATUS) + qpair->tgt_counters.core_qla_snd_status++; + else + qpair->tgt_counters.core_qla_que_buf++; + + if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { + /* + * Either the port is not online or this request was from + * previous life, just abort the processing. + */ + cmd->state = QLA_TGT_STATE_PROCESSED; + ql_dbg_qp(ql_dbg_async, qpair, 0xe101, + "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", + vha->flags.online, qla2x00_reset_active(vha), + cmd->reset_count, qpair->chip_reset); + res = 0; + goto out_unmap_unlock; + } + + /* Does F/W have an IOCBs for this request */ + res = qlt_check_reserve_free_req(qpair, full_req_cnt); + if (unlikely(res)) + goto out_unmap_unlock; + + if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) + res = qlt_build_ctio_crc2_pkt(qpair, &prm); + else + res = qlt_24xx_build_ctio_pkt(qpair, &prm); + if (unlikely(res != 0)) { + qpair->req->cnt += full_req_cnt; + goto out_unmap_unlock; + } + + pkt = (struct ctio7_to_24xx *)prm.pkt; + + if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { + pkt->u.status0.flags |= + cpu_to_le16(CTIO7_FLAGS_DATA_IN | + CTIO7_FLAGS_STATUS_MODE_0); + + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) + qlt_load_data_segments(&prm); + + if (prm.add_status_pkt == 0) { + if (xmit_type & QLA_TGT_XMIT_STATUS) { + pkt->u.status0.scsi_status = + cpu_to_le16(prm.rq_result); + if (!cmd->edif) + pkt->u.status0.residual = + cpu_to_le32(prm.residual); + + pkt->u.status0.flags |= cpu_to_le16( + CTIO7_FLAGS_SEND_STATUS); + if (qlt_need_explicit_conf(cmd, 0)) { + pkt->u.status0.flags |= + cpu_to_le16( + CTIO7_FLAGS_EXPLICIT_CONFORM | + CTIO7_FLAGS_CONFORM_REQ); + } + } + + } else { + /* + * We have already made sure that there is sufficient + * amount of request entries to not drop HW lock in + * req_pkt(). + */ + struct ctio7_to_24xx *ctio = + (struct ctio7_to_24xx *)qlt_get_req_pkt( + qpair->req); + + ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, + "Building additional status packet 0x%p.\n", + ctio); + + /* + * T10Dif: ctio_crc2_to_fw overlay ontop of + * ctio7_to_24xx + */ + memcpy(ctio, pkt, sizeof(*ctio)); + /* reset back to CTIO7 */ + ctio->entry_count = 1; + ctio->entry_type = CTIO_TYPE7; + ctio->dseg_count = 0; + ctio->u.status1.flags &= ~cpu_to_le16( + CTIO7_FLAGS_DATA_IN); + + /* Real finish is ctio_m1's finish */ + pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; + pkt->u.status0.flags |= cpu_to_le16( + CTIO7_FLAGS_DONT_RET_CTIO); + + /* qlt_24xx_init_ctio_to_isp will correct + * all neccessary fields that's part of CTIO7. + * There should be no residual of CTIO-CRC2 data. + */ + qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, + &prm); + } + } else + qlt_24xx_init_ctio_to_isp(pkt, &prm); + + + cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ + cmd->cmd_sent_to_fw = 1; + cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + return 0; + +out_unmap_unlock: + qlt_unmap_sg(vha, cmd); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + return res; +} +EXPORT_SYMBOL(qlt_xmit_response); + +int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) +{ + struct ctio7_to_24xx *pkt; + struct scsi_qla_host *vha = cmd->vha; + struct qla_tgt *tgt = cmd->tgt; + struct qla_tgt_prm prm; + unsigned long flags = 0; + int res = 0; + struct qla_qpair *qpair = cmd->qpair; + + memset(&prm, 0, sizeof(prm)); + prm.cmd = cmd; + prm.tgt = tgt; + prm.sg = NULL; + prm.req_cnt = 1; + + if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || + (cmd->sess && cmd->sess->deleted)) { + /* + * Either the port is not online or this request was from + * previous life, just abort the processing. + */ + cmd->aborted = 1; + cmd->write_data_transferred = 0; + cmd->state = QLA_TGT_STATE_DATA_IN; + vha->hw->tgt.tgt_ops->handle_data(cmd); + ql_dbg_qp(ql_dbg_async, qpair, 0xe102, + "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", + vha->flags.online, qla2x00_reset_active(vha), + cmd->reset_count, qpair->chip_reset); + return 0; + } + + /* Calculate number of entries and segments required */ + if (qlt_pci_map_calc_cnt(&prm) != 0) + return -EAGAIN; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + /* Does F/W have an IOCBs for this request */ + res = qlt_check_reserve_free_req(qpair, prm.req_cnt); + if (res != 0) + goto out_unlock_free_unmap; + if (cmd->se_cmd.prot_op) + res = qlt_build_ctio_crc2_pkt(qpair, &prm); + else + res = qlt_24xx_build_ctio_pkt(qpair, &prm); + + if (unlikely(res != 0)) { + qpair->req->cnt += prm.req_cnt; + goto out_unlock_free_unmap; + } + + pkt = (struct ctio7_to_24xx *)prm.pkt; + pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | + CTIO7_FLAGS_STATUS_MODE_0); + + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) + qlt_load_data_segments(&prm); + + cmd->state = QLA_TGT_STATE_NEED_DATA; + cmd->cmd_sent_to_fw = 1; + cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + return res; + +out_unlock_free_unmap: + qlt_unmap_sg(vha, cmd); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + return res; +} +EXPORT_SYMBOL(qlt_rdy_to_xfer); + + +/* + * it is assumed either hardware_lock or qpair lock is held. + */ +static void +qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, + struct ctio_crc_from_fw *sts) +{ + uint8_t *ap = &sts->actual_dif[0]; + uint8_t *ep = &sts->expected_dif[0]; + uint64_t lba = cmd->se_cmd.t_task_lba; + uint8_t scsi_status, sense_key, asc, ascq; + unsigned long flags; + struct scsi_qla_host *vha = cmd->vha; + + cmd->trc_flags |= TRC_DIF_ERR; + + cmd->a_guard = get_unaligned_be16(ap + 0); + cmd->a_app_tag = get_unaligned_be16(ap + 2); + cmd->a_ref_tag = get_unaligned_be32(ap + 4); + + cmd->e_guard = get_unaligned_be16(ep + 0); + cmd->e_app_tag = get_unaligned_be16(ep + 2); + cmd->e_ref_tag = get_unaligned_be32(ep + 4); + + ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, + "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); + + scsi_status = sense_key = asc = ascq = 0; + + /* check appl tag */ + if (cmd->e_app_tag != cmd->a_app_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, + "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, + cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, + cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_APP; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x2; + } + + /* check ref tag */ + if (cmd->e_ref_tag != cmd->a_ref_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, + "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, + cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, + cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_REF; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x3; + goto out; + } + + /* check guard */ + if (cmd->e_guard != cmd->a_guard) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, + "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, + cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, + cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_GRD; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x1; + } +out: + switch (cmd->state) { + case QLA_TGT_STATE_NEED_DATA: + /* handle_data will load DIF error code */ + cmd->state = QLA_TGT_STATE_DATA_IN; + vha->hw->tgt.tgt_ops->handle_data(cmd); + break; + default: + spin_lock_irqsave(&cmd->cmd_lock, flags); + if (cmd->aborted) { + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + + qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, + ascq); + /* assume scsi status gets out on the wire. + * Will not wait for completion. + */ + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } +} + +/* If hardware_lock held on entry, might drop it, then reaquire */ +/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ +static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy) +{ + struct nack_to_isp *nack; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + int ret = 0; + + ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, + "Sending TERM ELS CTIO (ha=%p)\n", ha); + + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); + if (pkt == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe080, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return -ENOMEM; + } + + pkt->entry_type = NOTIFY_ACK_TYPE; + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE; + + nack = (struct nack_to_isp *)pkt; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + + /* terminate */ + nack->u.isp24.flags |= + __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); + + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + qla2x00_start_iocbs(vha, vha->req); + return ret; +} + +static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *imm, int ha_locked) +{ + int rc; + + WARN_ON_ONCE(!ha_locked); + rc = __qlt_send_term_imm_notif(vha, imm); + pr_debug("rc = %d\n", rc); +} + +/* + * If hardware_lock held on entry, might drop it, then reaquire + * This function sends the appropriate CTIO to ISP 2xxx or 24xx + */ +static int __qlt_send_term_exchange(struct qla_qpair *qpair, + struct qla_tgt_cmd *cmd, + struct atio_from_isp *atio) +{ + struct scsi_qla_host *vha = qpair->vha; + struct ctio7_to_24xx *ctio24; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + int ret = 0; + uint16_t temp; + + ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); + + if (cmd) + vha = cmd->vha; + + pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); + if (pkt == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe050, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return -ENOMEM; + } + + if (cmd != NULL) { + if (cmd->state < QLA_TGT_STATE_PROCESSED) { + ql_dbg(ql_dbg_tgt, vha, 0xe051, + "qla_target(%d): Terminating cmd %p with " + "incorrect state %d\n", vha->vp_idx, cmd, + cmd->state); + } else + ret = 1; + } + + qpair->tgt_counters.num_term_xchg_sent++; + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + + ctio24 = (struct ctio7_to_24xx *)pkt; + ctio24->entry_type = CTIO_TYPE7; + ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED); + ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->vp_index = vha->vp_idx; + ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); + ctio24->exchange_addr = atio->u.isp24.exchange_addr; + temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | + CTIO7_FLAGS_TERMINATE; + ctio24->u.status1.flags = cpu_to_le16(temp); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio24->u.status1.ox_id = cpu_to_le16(temp); + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + return ret; +} + +static void qlt_send_term_exchange(struct qla_qpair *qpair, + struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, + int ul_abort) +{ + struct scsi_qla_host *vha; + unsigned long flags = 0; + int rc; + + /* why use different vha? NPIV */ + if (cmd) + vha = cmd->vha; + else + vha = qpair->vha; + + if (ha_locked) { + rc = __qlt_send_term_exchange(qpair, cmd, atio); + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, atio, 0, 0); + goto done; + } + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + rc = __qlt_send_term_exchange(qpair, cmd, atio); + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, atio, 0, 0); + +done: + if (cmd && !ul_abort && !cmd->aborted) { + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); + vha->hw->tgt.tgt_ops->free_cmd(cmd); + } + + if (!ha_locked) + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + return; +} + +static void qlt_init_term_exchange(struct scsi_qla_host *vha) +{ + struct list_head free_list; + struct qla_tgt_cmd *cmd, *tcmd; + + vha->hw->tgt.leak_exchg_thresh_hold = + (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; + + cmd = tcmd = NULL; + if (!list_empty(&vha->hw->tgt.q_full_list)) { + INIT_LIST_HEAD(&free_list); + list_splice_init(&vha->hw->tgt.q_full_list, &free_list); + + list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { + list_del(&cmd->cmd_list); + /* This cmd was never sent to TCM. There is no need + * to schedule free or call free_cmd + */ + qlt_free_cmd(cmd); + vha->hw->tgt.num_qfull_cmds_alloc--; + } + } + vha->hw->tgt.num_qfull_cmds_dropped = 0; +} + +static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) +{ + uint32_t total_leaked; + + total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; + + if (vha->hw->tgt.leak_exchg_thresh_hold && + (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { + + ql_dbg(ql_dbg_tgt, vha, 0xe079, + "Chip reset due to exchange starvation: %d/%d.\n", + total_leaked, vha->hw->cur_fw_xcb_count); + + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + +} + +int qlt_abort_cmd(struct qla_tgt_cmd *cmd) +{ + struct qla_tgt *tgt = cmd->tgt; + struct scsi_qla_host *vha = tgt->vha; + struct se_cmd *se_cmd = &cmd->se_cmd; + unsigned long flags; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, + "qla_target(%d): terminating exchange for aborted cmd=%p " + "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, + se_cmd->tag); + + spin_lock_irqsave(&cmd->cmd_lock, flags); + if (cmd->aborted) { + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); + + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + /* + * It's normal to see 2 calls in this path: + * 1) XFER Rdy completion + CMD_T_ABORT + * 2) TCM TMR - drain_state_list + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, + "multiple abort. %p transport_state %x, t_state %x, " + "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); + return -EIO; + } + cmd->aborted = 1; + cmd->trc_flags |= TRC_ABORT; + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + + qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); + return 0; +} +EXPORT_SYMBOL(qlt_abort_cmd); + +void qlt_free_cmd(struct qla_tgt_cmd *cmd) +{ + struct fc_port *sess = cmd->sess; + + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, + "%s: se_cmd[%p] ox_id %04x\n", + __func__, &cmd->se_cmd, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + + BUG_ON(cmd->cmd_in_wq); + + if (!cmd->q_full) + qlt_decr_num_pend_cmds(cmd->vha); + + BUG_ON(cmd->sg_mapped); + cmd->jiffies_at_free = get_jiffies_64(); + + if (!sess || !sess->se_sess) { + WARN_ON(1); + return; + } + cmd->jiffies_at_free = get_jiffies_64(); + cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); +} +EXPORT_SYMBOL(qlt_free_cmd); + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, + struct qla_tgt_cmd *cmd, uint32_t status) +{ + int term = 0; + struct scsi_qla_host *vha = qpair->vha; + + if (cmd->se_cmd.prot_op) + ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, + "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x] op %#x/%s", + cmd->lba, cmd->lba, + cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr, + cmd->se_cmd.prot_op, + prot_op_str(cmd->se_cmd.prot_op)); + + if (ctio != NULL) { + struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; + + term = !(c->flags & + cpu_to_le16(OF_TERM_EXCH)); + } else + term = 1; + + if (term) + qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); + + return term; +} + + +/* ha->hardware_lock supposed to be held on entry */ +static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, + struct rsp_que *rsp, uint32_t handle, void *ctio) +{ + void *cmd = NULL; + struct req_que *req; + int qid = GET_QID(handle); + uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; + + if (unlikely(h == QLA_TGT_SKIP_HANDLE)) + return NULL; + + if (qid == rsp->req->id) { + req = rsp->req; + } else if (vha->hw->req_q_map[qid]) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, + "qla_target(%d): CTIO completion with different QID %d handle %x\n", + vha->vp_idx, rsp->id, handle); + req = vha->hw->req_q_map[qid]; + } else { + return NULL; + } + + h &= QLA_CMD_HANDLE_MASK; + + if (h != QLA_TGT_NULL_HANDLE) { + if (unlikely(h >= req->num_outstanding_cmds)) { + ql_dbg(ql_dbg_tgt, vha, 0xe052, + "qla_target(%d): Wrong handle %x received\n", + vha->vp_idx, handle); + return NULL; + } + + cmd = req->outstanding_cmds[h]; + if (unlikely(cmd == NULL)) { + ql_dbg(ql_dbg_async, vha, 0xe053, + "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", + vha->vp_idx, handle, req->id, rsp->id); + return NULL; + } + req->outstanding_cmds[h] = NULL; + } else if (ctio != NULL) { + /* We can't get loop ID from CTIO7 */ + ql_dbg(ql_dbg_tgt, vha, 0xe054, + "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " + "support NULL handles\n", vha->vp_idx); + return NULL; + } + + return cmd; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_do_ctio_completion(struct scsi_qla_host *vha, + struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) +{ + struct qla_hw_data *ha = vha->hw; + struct se_cmd *se_cmd; + struct qla_tgt_cmd *cmd; + struct qla_qpair *qpair = rsp->qpair; + + if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { + /* That could happen only in case of an error/reset/abort */ + if (status != CTIO_SUCCESS) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, + "Intermediate CTIO received" + " (status %x)\n", status); + } + return; + } + + cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); + if (cmd == NULL) + return; + + if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) && + cmd->sess) { + qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, + (struct ctio7_from_24xx *)ctio); + } + + se_cmd = &cmd->se_cmd; + cmd->cmd_sent_to_fw = 0; + + qlt_unmap_sg(vha, cmd); + + if (unlikely(status != CTIO_SUCCESS)) { + switch (status & 0xFFFF) { + case CTIO_INVALID_RX_ID: + if (printk_ratelimit()) + dev_info(&vha->hw->pdev->dev, + "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n", + vha->vp_idx, cmd->atio.u.isp24.attr, + ((cmd->ctio_flags >> 9) & 0xf), + cmd->ctio_flags); + + break; + case CTIO_LIP_RESET: + case CTIO_TARGET_RESET: + case CTIO_ABORTED: + /* driver request abort via Terminate exchange */ + case CTIO_TIMEOUT: + /* They are OK */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, + "qla_target(%d): CTIO with " + "status %#x received, state %x, se_cmd %p, " + "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " + "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, + status, cmd->state, se_cmd); + break; + + case CTIO_PORT_LOGGED_OUT: + case CTIO_PORT_UNAVAILABLE: + { + int logged_out = + (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, + "qla_target(%d): CTIO with %s status %x " + "received (state %x, se_cmd %p)\n", vha->vp_idx, + logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", + status, cmd->state, se_cmd); + + if (logged_out && cmd->sess) { + /* + * Session is already logged out, but we need + * to notify initiator, who's not aware of this + */ + cmd->sess->send_els_logo = 1; + ql_dbg(ql_dbg_disc, vha, 0x20f8, + "%s %d %8phC post del sess\n", + __func__, __LINE__, cmd->sess->port_name); + + qlt_schedule_sess_for_deletion(cmd->sess); + } + break; + } + case CTIO_DIF_ERROR: { + struct ctio_crc_from_fw *crc = + (struct ctio_crc_from_fw *)ctio; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, + "qla_target(%d): CTIO with DIF_ERROR status %x " + "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " + "expect_dif[0x%llx]\n", + vha->vp_idx, status, cmd->state, se_cmd, + *((u64 *)&crc->actual_dif[0]), + *((u64 *)&crc->expected_dif[0])); + + qlt_handle_dif_error(qpair, cmd, ctio); + return; + } + + case CTIO_FAST_AUTH_ERR: + case CTIO_FAST_INCOMP_PAD_LEN: + case CTIO_FAST_INVALID_REQ: + case CTIO_FAST_SPI_ERR: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, + "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n", + vha->vp_idx, status, cmd->state, se_cmd); + break; + + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, + "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", + vha->vp_idx, status, cmd->state, se_cmd); + break; + } + + + /* "cmd->aborted" means + * cmd is already aborted/terminated, we don't + * need to terminate again. The exchange is already + * cleaned up/freed at FW level. Just cleanup at driver + * level. + */ + if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && + (!cmd->aborted)) { + cmd->trc_flags |= TRC_CTIO_ERR; + if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) + return; + } + } + + if (cmd->state == QLA_TGT_STATE_PROCESSED) { + cmd->trc_flags |= TRC_CTIO_DONE; + } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { + cmd->state = QLA_TGT_STATE_DATA_IN; + + if (status == CTIO_SUCCESS) + cmd->write_data_transferred = 1; + + ha->tgt.tgt_ops->handle_data(cmd); + return; + } else if (cmd->aborted) { + cmd->trc_flags |= TRC_CTIO_ABORTED; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, + "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); + } else { + cmd->trc_flags |= TRC_CTIO_STRANGE; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, + "qla_target(%d): A command in state (%d) should " + "not return a CTIO complete\n", vha->vp_idx, cmd->state); + } + + if (unlikely(status != CTIO_SUCCESS) && + !cmd->aborted) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); + dump_stack(); + } + + ha->tgt.tgt_ops->free_cmd(cmd); +} + +static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, + uint8_t task_codes) +{ + int fcp_task_attr; + + switch (task_codes) { + case ATIO_SIMPLE_QUEUE: + fcp_task_attr = TCM_SIMPLE_TAG; + break; + case ATIO_HEAD_OF_QUEUE: + fcp_task_attr = TCM_HEAD_TAG; + break; + case ATIO_ORDERED_QUEUE: + fcp_task_attr = TCM_ORDERED_TAG; + break; + case ATIO_ACA_QUEUE: + fcp_task_attr = TCM_ACA_TAG; + break; + case ATIO_UNTAGGED: + fcp_task_attr = TCM_SIMPLE_TAG; + break; + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, + "qla_target: unknown task code %x, use ORDERED instead\n", + task_codes); + fcp_task_attr = TCM_ORDERED_TAG; + break; + } + + return fcp_task_attr; +} + +/* + * Process context for I/O path into tcm_qla2xxx code + */ +static void __qlt_do_work(struct qla_tgt_cmd *cmd) +{ + scsi_qla_host_t *vha = cmd->vha; + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess = cmd->sess; + struct atio_from_isp *atio = &cmd->atio; + unsigned char *cdb; + unsigned long flags; + uint32_t data_length; + int ret, fcp_task_attr, data_dir, bidi = 0; + struct qla_qpair *qpair = cmd->qpair; + + cmd->cmd_in_wq = 0; + cmd->trc_flags |= TRC_DO_WORK; + + if (cmd->aborted) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, + "cmd with tag %u is aborted\n", + cmd->atio.u.isp24.exchange_addr); + goto out_term; + } + + spin_lock_init(&cmd->cmd_lock); + cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; + cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr); + + if (atio->u.isp24.fcp_cmnd.rddata && + atio->u.isp24.fcp_cmnd.wrdata) { + bidi = 1; + data_dir = DMA_TO_DEVICE; + } else if (atio->u.isp24.fcp_cmnd.rddata) + data_dir = DMA_FROM_DEVICE; + else if (atio->u.isp24.fcp_cmnd.wrdata) + data_dir = DMA_TO_DEVICE; + else + data_dir = DMA_NONE; + + fcp_task_attr = qlt_get_fcp_task_attr(vha, + atio->u.isp24.fcp_cmnd.task_attr); + data_length = get_datalen_for_atio(atio); + + ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, + fcp_task_attr, data_dir, bidi); + if (ret != 0) + goto out_term; + /* + * Drop extra session reference from qlt_handle_cmd_for_atio(). + */ + ha->tgt.tgt_ops->put_sess(sess); + return; + +out_term: + ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); + /* + * cmd has not sent to target yet, so pass NULL as the second + * argument to qlt_send_term_exchange() and free the memory here. + */ + cmd->trc_flags |= TRC_DO_WORK_ERR; + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); + + qlt_decr_num_pend_cmds(vha); + cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + ha->tgt.tgt_ops->put_sess(sess); +} + +static void qlt_do_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + scsi_qla_host_t *vha = cmd->vha; + unsigned long flags; + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&cmd->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + __qlt_do_work(cmd); +} + +void qlt_clr_qp_table(struct scsi_qla_host *vha) +{ + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + void *node; + u64 key = 0; + + ql_log(ql_log_info, vha, 0x706c, + "User update Number of Active Qpairs %d\n", + ha->tgt.num_act_qpairs); + + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + + btree_for_each_safe64(&tgt->lun_qpair_map, key, node) + btree_remove64(&tgt->lun_qpair_map, key); + + ha->base_qpair->lun_cnt = 0; + for (key = 0; key < ha->max_qpairs; key++) + if (ha->queue_pair_map[key]) + ha->queue_pair_map[key]->lun_cnt = 0; + + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); +} + +static void qlt_assign_qpair(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd) +{ + struct qla_qpair *qpair, *qp; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_qpair_hint *h; + + if (vha->flags.qpairs_available) { + h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); + if (unlikely(!h)) { + /* spread lun to qpair ratio evently */ + int lcnt = 0, rc; + struct scsi_qla_host *base_vha = + pci_get_drvdata(vha->hw->pdev); + + qpair = vha->hw->base_qpair; + if (qpair->lun_cnt == 0) { + qpair->lun_cnt++; + h = qla_qpair_to_hint(tgt, qpair); + BUG_ON(!h); + rc = btree_insert64(&tgt->lun_qpair_map, + cmd->unpacked_lun, h, GFP_ATOMIC); + if (rc) { + qpair->lun_cnt--; + ql_log(ql_log_info, vha, 0xd037, + "Unable to insert lun %llx into lun_qpair_map\n", + cmd->unpacked_lun); + } + goto out; + } else { + lcnt = qpair->lun_cnt; + } + + h = NULL; + list_for_each_entry(qp, &base_vha->qp_list, + qp_list_elem) { + if (qp->lun_cnt == 0) { + qp->lun_cnt++; + h = qla_qpair_to_hint(tgt, qp); + BUG_ON(!h); + rc = btree_insert64(&tgt->lun_qpair_map, + cmd->unpacked_lun, h, GFP_ATOMIC); + if (rc) { + qp->lun_cnt--; + ql_log(ql_log_info, vha, 0xd038, + "Unable to insert lun %llx into lun_qpair_map\n", + cmd->unpacked_lun); + } + qpair = qp; + goto out; + } else { + if (qp->lun_cnt < lcnt) { + lcnt = qp->lun_cnt; + qpair = qp; + continue; + } + } + } + BUG_ON(!qpair); + qpair->lun_cnt++; + h = qla_qpair_to_hint(tgt, qpair); + BUG_ON(!h); + rc = btree_insert64(&tgt->lun_qpair_map, + cmd->unpacked_lun, h, GFP_ATOMIC); + if (rc) { + qpair->lun_cnt--; + ql_log(ql_log_info, vha, 0xd039, + "Unable to insert lun %llx into lun_qpair_map\n", + cmd->unpacked_lun); + } + } + } else { + h = &tgt->qphints[0]; + } +out: + cmd->qpair = h->qpair; + cmd->se_cmd.cpuid = h->cpuid; +} + +static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, + struct fc_port *sess, + struct atio_from_isp *atio) +{ + struct qla_tgt_cmd *cmd; + + cmd = vha->hw->tgt.tgt_ops->get_cmd(sess); + if (!cmd) + return NULL; + + cmd->cmd_type = TYPE_TGT_CMD; + memcpy(&cmd->atio, atio, sizeof(*atio)); + INIT_LIST_HEAD(&cmd->sess_cmd_list); + cmd->state = QLA_TGT_STATE_NEW; + cmd->tgt = vha->vha_tgt.qla_tgt; + qlt_incr_num_pend_cmds(vha); + cmd->vha = vha; + cmd->sess = sess; + cmd->loop_id = sess->loop_id; + cmd->conf_compl_supported = sess->conf_compl_supported; + + cmd->trc_flags = 0; + cmd->jiffies_at_alloc = get_jiffies_64(); + + cmd->unpacked_lun = scsilun_to_int( + (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); + qlt_assign_qpair(vha, cmd); + cmd->reset_count = vha->hw->base_qpair->chip_reset; + cmd->vp_idx = vha->vp_idx; + cmd->edif = sess->edif.enable; + + return cmd; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, + struct atio_from_isp *atio) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct fc_port *sess; + struct qla_tgt_cmd *cmd; + unsigned long flags; + port_id_t id; + + if (unlikely(tgt->tgt_stop)) { + ql_dbg(ql_dbg_io, vha, 0x3061, + "New command while device %p is shutting down\n", tgt); + return -ENODEV; + } + + id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); + if (IS_SW_RESV_ADDR(id)) + return -EBUSY; + + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); + if (unlikely(!sess)) + return -EFAULT; + + /* Another WWN used to have our s_id. Our PLOGI scheduled its + * session deletion, but it's still in sess_del_work wq */ + if (sess->deleted) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, + "New command while old session %p is being deleted\n", + sess); + return -EFAULT; + } + + /* + * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. + */ + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, + "%s: kref_get fail, %8phC oxid %x \n", + __func__, sess->port_name, + be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + return -EFAULT; + } + + cmd = qlt_get_tag(vha, sess, atio); + if (!cmd) { + ql_dbg(ql_dbg_io, vha, 0x3062, + "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); + ha->tgt.tgt_ops->put_sess(sess); + return -EBUSY; + } + + cmd->cmd_in_wq = 1; + cmd->trc_flags |= TRC_NEW_CMD; + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + INIT_WORK(&cmd->work, qlt_do_work); + if (vha->flags.qpairs_available) { + queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); + } else if (ha->msix_count) { + if (cmd->atio.u.isp24.fcp_cmnd.rddata) + queue_work(qla_tgt_wq, &cmd->work); + else + queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, + &cmd->work); + } else { + queue_work(qla_tgt_wq, &cmd->work); + } + + return 0; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, + int fn, void *iocb, int flags) +{ + struct scsi_qla_host *vha = sess->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_mgmt_cmd *mcmd; + struct atio_from_isp *a = (struct atio_from_isp *)iocb; + struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; + + mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); + if (!mcmd) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, + "qla_target(%d): Allocation of management " + "command failed, some commands and their data could " + "leak\n", vha->vp_idx); + return -ENOMEM; + } + memset(mcmd, 0, sizeof(*mcmd)); + mcmd->sess = sess; + + if (iocb) { + memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, + sizeof(mcmd->orig_iocb.imm_ntfy)); + } + mcmd->tmr_func = fn; + mcmd->flags = flags; + mcmd->reset_count = ha->base_qpair->chip_reset; + mcmd->qpair = h->qpair; + mcmd->vha = vha; + mcmd->se_cmd.cpuid = h->cpuid; + mcmd->unpacked_lun = lun; + + switch (fn) { + case QLA_TGT_LUN_RESET: + case QLA_TGT_CLEAR_TS: + case QLA_TGT_ABORT_TS: + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); + fallthrough; + case QLA_TGT_CLEAR_ACA: + h = qlt_find_qphint(vha, mcmd->unpacked_lun); + mcmd->qpair = h->qpair; + mcmd->se_cmd.cpuid = h->cpuid; + break; + + case QLA_TGT_TARGET_RESET: + case QLA_TGT_NEXUS_LOSS_SESS: + case QLA_TGT_NEXUS_LOSS: + case QLA_TGT_ABORT_ALL: + default: + /* no-op */ + break; + } + + INIT_WORK(&mcmd->work, qlt_do_tmr_work); + queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, + &mcmd->work); + + return 0; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) +{ + struct atio_from_isp *a = (struct atio_from_isp *)iocb; + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess; + u64 unpacked_lun; + int fn; + unsigned long flags; + + fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, + a->u.isp24.fcp_hdr.s_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + unpacked_lun = + scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); + + if (sess == NULL || sess->deleted) + return -EFAULT; + + return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); +} + +/* ha->hardware_lock supposed to be held on entry */ +static int __qlt_abort_task(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb, struct fc_port *sess) +{ + struct atio_from_isp *a = (struct atio_from_isp *)iocb; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_mgmt_cmd *mcmd; + u64 unpacked_lun; + int rc; + + mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); + if (mcmd == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, + "qla_target(%d): %s: Allocation of ABORT cmd failed\n", + vha->vp_idx, __func__); + return -ENOMEM; + } + memset(mcmd, 0, sizeof(*mcmd)); + + mcmd->sess = sess; + memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, + sizeof(mcmd->orig_iocb.imm_ntfy)); + + unpacked_lun = + scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); + mcmd->reset_count = ha->base_qpair->chip_reset; + mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; + mcmd->qpair = ha->base_qpair; + + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, + le16_to_cpu(iocb->u.isp2x.seq_id)); + if (rc != 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, + "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", + vha->vp_idx, rc); + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); + return -EFAULT; + } + + return 0; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_abort_task(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess; + int loop_id; + unsigned long flags; + + loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + if (sess == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, + "qla_target(%d): task abort for unexisting " + "session\n", vha->vp_idx); + return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, + QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); + } + + return __qlt_abort_task(vha, iocb, sess); +} + +void qlt_logo_completion_handler(fc_port_t *fcport, int rc) +{ + if (rc != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, + "%s: se_sess %p / sess %p from" + " port %8phC loop_id %#04x s_id %02x:%02x:%02x" + " LOGO failed: %#x\n", + __func__, + fcport->se_sess, + fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, rc); + } + + fcport->logout_completed = 1; +} + +/* +* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) +* +* Schedules sessions with matching port_id/loop_id but different wwn for +* deletion. Returns existing session with matching wwn if present. +* Null otherwise. +*/ +struct fc_port * +qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, + port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) +{ + struct fc_port *sess = NULL, *other_sess; + uint64_t other_wwn; + + *conflict_sess = NULL; + + list_for_each_entry(other_sess, &vha->vp_fcports, list) { + + other_wwn = wwn_to_u64(other_sess->port_name); + + if (wwn == other_wwn) { + WARN_ON(sess); + sess = other_sess; + continue; + } + + /* find other sess with nport_id collision */ + if (port_id.b24 == other_sess->d_id.b24) { + if (loop_id != other_sess->loop_id) { + ql_dbg(ql_dbg_disc, vha, 0x1000c, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* + * logout_on_delete is set by default, but another + * session that has the same s_id/loop_id combo + * might have cleared it when requested this session + * deletion, so don't touch it + */ + qlt_schedule_sess_for_deletion(other_sess); + } else { + /* + * Another wwn used to have our s_id/loop_id + * kill the session, but don't free the loop_id + */ + ql_dbg(ql_dbg_disc, vha, 0xf01b, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + other_sess->keep_nport_handle = 1; + if (other_sess->disc_state != DSC_DELETED) + *conflict_sess = other_sess; + qlt_schedule_sess_for_deletion(other_sess); + } + continue; + } + + /* find other sess with nport handle collision */ + if ((loop_id == other_sess->loop_id) && + (loop_id != FC_NO_LOOP_ID)) { + ql_dbg(ql_dbg_disc, vha, 0x1000d, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* Same loop_id but different s_id + * Ok to kill and logout */ + qlt_schedule_sess_for_deletion(other_sess); + } + } + + return sess; +} + +/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ +static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + int count = 0; + unsigned long flags; + + key = (((u32)s_id->b.domain << 16) | + ((u32)s_id->b.area << 8) | + ((u32)s_id->b.al_pa)); + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + + if (op_key == key) { + op->aborted = true; + count++; + } + } + + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + + if (cmd_key == key) { + cmd->aborted = 1; + count++; + } + } + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + return count; +} + +static int qlt_handle_login(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct fc_port *sess = NULL, *conflict_sess = NULL; + uint64_t wwn; + port_id_t port_id; + uint16_t loop_id, wd3_lo; + int res = 0; + struct qlt_plogi_ack_t *pla; + unsigned long flags; + + lockdep_assert_held(&vha->hw->hardware_lock); + + wwn = wwn_to_u64(iocb->u.isp24.port_name); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + + /* Mark all stale commands sitting in qla_tgt_wq for deletion */ + abort_cmds_for_s_id(vha, &port_id); + + if (wwn) { + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + sess = qlt_find_sess_invalidate_other(vha, wwn, + port_id, loop_id, &conflict_sess); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + + if (IS_SW_RESV_ADDR(port_id)) { + res = 1; + goto out; + } + + if (vha->hw->flags.edif_enabled && + !(vha->e_dbell.db_flags & EDB_ACTIVE) && + iocb->u.isp24.status_subcode == ELS_PLOGI && + !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to app not available lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + + if (vha->hw->flags.edif_enabled) { + if (DBELL_INACTIVE(vha)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to app not started lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } else if (iocb->u.isp24.status_subcode == ELS_PLOGI && + !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to unsecure lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + } + + pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); + if (!pla) { + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s %d %8phC Term INOT due to mem alloc fail", + __func__, __LINE__, + iocb->u.isp24.port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + + if (conflict_sess) { + conflict_sess->login_gen++; + qlt_plogi_ack_link(vha, pla, conflict_sess, + QLT_PLOGI_LINK_CONFLICT); + } + + if (!sess) { + pla->ref_count++; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post new sess\n", + __func__, __LINE__, iocb->u.isp24.port_name); + if (iocb->u.isp24.status_subcode == ELS_PLOGI) + qla24xx_post_newsess_work(vha, &port_id, + iocb->u.isp24.port_name, + iocb->u.isp24.u.plogi.node_name, + pla, 0); + else + qla24xx_post_newsess_work(vha, &port_id, + iocb->u.isp24.port_name, NULL, + pla, 0); + + goto out; + } + + if (sess->disc_state == DSC_UPD_FCPORT) { + u16 sec; + + /* + * Remote port registration is still going on from + * previous login. Allow it to finish before we + * accept the new login. + */ + sess->next_disc_state = DSC_DELETE_PEND; + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration) / 1000; + if (sess->sec_since_registration < sec && sec && + !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC - Slow Rport registration (%d Sec)\n", + __func__, sess->port_name, sec); + } + + if (!conflict_sess) { + list_del(&pla->list); + kmem_cache_free(qla_tgt_plogi_cachep, pla); + } + + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + + qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); + sess->d_id = port_id; + sess->login_gen++; + sess->loop_id = loop_id; + + if (iocb->u.isp24.status_subcode == ELS_PLOGI) { + /* remote port has assigned Port ID */ + if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess)) + vha->d_id = sess->d_id; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC - send port online\n", + __func__, sess->port_name); + + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + sess->d_id.b24); + } + + if (iocb->u.isp24.status_subcode == ELS_PRLI) { + sess->fw_login_state = DSC_LS_PRLI_PEND; + sess->local = 0; + sess->loop_id = loop_id; + sess->d_id = port_id; + sess->fw_login_state = DSC_LS_PRLI_PEND; + wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); + + if (wd3_lo & BIT_7) + sess->conf_compl_supported = 1; + + if ((wd3_lo & BIT_4) == 0) + sess->port_type = FCT_INITIATOR; + else + sess->port_type = FCT_TARGET; + + } else + sess->fw_login_state = DSC_LS_PLOGI_PEND; + + + ql_dbg(ql_dbg_disc, vha, 0x20f9, + "%s %d %8phC DS %d\n", + __func__, __LINE__, sess->port_name, sess->disc_state); + + switch (sess->disc_state) { + case DSC_DELETED: + case DSC_LOGIN_PEND: + qlt_plogi_ack_unref(vha, pla); + break; + + default: + /* + * Under normal circumstances we want to release nport handle + * during LOGO process to avoid nport handle leaks inside FW. + * The exception is when LOGO is done while another PLOGI with + * the same nport handle is waiting as might be the case here. + * Note: there is always a possibily of a race where session + * deletion has already started for other reasons (e.g. ACL + * removal) and now PLOGI arrives: + * 1. if PLOGI arrived in FW after nport handle has been freed, + * FW must have assigned this PLOGI a new/same handle and we + * can proceed ACK'ing it as usual when session deletion + * completes. + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT + * bit reached it, the handle has now been released. We'll + * get an error when we ACK this PLOGI. Nothing will be sent + * back to initiator. Initiator should eventually retry + * PLOGI and situation will correct itself. + */ + sess->keep_nport_handle = ((sess->loop_id == loop_id) && + (sess->d_id.b24 == port_id.b24)); + + ql_dbg(ql_dbg_disc, vha, 0x20f9, + "%s %d %8phC post del sess\n", + __func__, __LINE__, sess->port_name); + + + qlt_schedule_sess_for_deletion(sess); + break; + } +out: + return res; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static int qlt_24xx_handle_els(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess = NULL, *conflict_sess = NULL; + uint64_t wwn; + port_id_t port_id; + uint16_t loop_id; + uint16_t wd3_lo; + int res = 0; + unsigned long flags; + + lockdep_assert_held(&ha->hardware_lock); + + wwn = wwn_to_u64(iocb->u.isp24.port_name); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + + ql_dbg(ql_dbg_disc, vha, 0xf026, + "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", + vha->vp_idx, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + iocb->u.isp24.status_subcode, loop_id, + iocb->u.isp24.port_name); + + /* res = 1 means ack at the end of thread + * res = 0 means ack async/later. + */ + switch (iocb->u.isp24.status_subcode) { + case ELS_PLOGI: + res = qlt_handle_login(vha, iocb); + break; + + case ELS_PRLI: + if (N2N_TOPO(ha)) { + sess = qla2x00_find_fcport_by_wwpn(vha, + iocb->u.isp24.port_name, 1); + + if (vha->hw->flags.edif_enabled && sess && + (!(sess->flags & FCF_FCSP_DEVICE) || + !sess->edif.authok)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC Term PRLI due to unauthorize PRLI\n", + __func__, __LINE__, iocb->u.isp24.port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + break; + } + + if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", + __func__, __LINE__, + iocb->u.isp24.port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + break; + } + + res = qlt_handle_login(vha, iocb); + break; + } + + if (IS_SW_RESV_ADDR(port_id)) { + res = 1; + break; + } + + wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); + + if (wwn) { + spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); + sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, + loop_id, &conflict_sess); + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); + } + + if (conflict_sess) { + switch (conflict_sess->disc_state) { + case DSC_DELETED: + case DSC_DELETE_PEND: + break; + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, + "PRLI with conflicting sess %p port %8phC\n", + conflict_sess, conflict_sess->port_name); + conflict_sess->fw_login_state = + DSC_LS_PORT_UNAVAIL; + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; + } + } + + if (sess != NULL) { + bool delete = false; + int sec; + + if (vha->hw->flags.edif_enabled && sess && + (!(sess->flags & FCF_FCSP_DEVICE) || + !sess->edif.authok)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC Term PRLI due to unauthorize prli\n", + __func__, __LINE__, iocb->u.isp24.port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + break; + } + + spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); + switch (sess->fw_login_state) { + case DSC_LS_PLOGI_PEND: + case DSC_LS_PLOGI_COMP: + case DSC_LS_PRLI_COMP: + break; + default: + delete = true; + break; + } + + switch (sess->disc_state) { + case DSC_UPD_FCPORT: + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, + flags); + + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration)/1000; + if (sess->sec_since_registration < sec && sec && + !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s %8phC : Slow Rport registration(%d Sec)\n", + __func__, sess->port_name, sec); + } + qlt_send_term_imm_notif(vha, iocb, 1); + return 0; + + case DSC_LOGIN_PEND: + case DSC_GPDB: + case DSC_LOGIN_COMPLETE: + case DSC_ADISC: + delete = false; + break; + default: + break; + } + + if (delete) { + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, + flags); + /* + * Impatient initiator sent PRLI before last + * PLOGI could finish. Will force him to re-try, + * while last one finishes. + */ + ql_log(ql_log_warn, sess->vha, 0xf095, + "sess %p PRLI received, before plogi ack.\n", + sess); + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; + } + + /* + * This shouldn't happen under normal circumstances, + * since we have deleted the old session during PLOGI + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, + "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", + sess->loop_id, sess, iocb->u.isp24.nport_handle); + + sess->local = 0; + sess->loop_id = loop_id; + sess->d_id = port_id; + sess->fw_login_state = DSC_LS_PRLI_PEND; + + if (wd3_lo & BIT_7) + sess->conf_compl_supported = 1; + + if ((wd3_lo & BIT_4) == 0) + sess->port_type = FCT_INITIATOR; + else + sess->port_type = FCT_TARGET; + + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); + } + res = 1; /* send notify ack */ + + /* Make session global (not used in fabric mode) */ + if (ha->current_topology != ISP_CFG_F) { + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0x20fa, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; + } else { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else { + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0x20fb, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; + } + } + break; + + case ELS_TPRLO: + if (le16_to_cpu(iocb->u.isp24.flags) & + NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { + loop_id = 0xFFFF; + qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); + res = 1; + break; + } + fallthrough; + case ELS_LOGO: + case ELS_PRLO: + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = qla2x00_find_fcport_by_loopid(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + if (sess) { + sess->login_gen++; + sess->fw_login_state = DSC_LS_LOGO_PEND; + sess->logo_ack_needed = 1; + memcpy(sess->iocb, iocb, IOCB_SIZE); + } + + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + + ql_dbg(ql_dbg_disc, vha, 0x20fc, + "%s: logo %llx res %d sess %p ", + __func__, wwn, res, sess); + if (res == 0) { + /* + * cmd went upper layer, look for qlt_xmit_tm_rsp() + * for LOGO_ACK & sess delete + */ + BUG_ON(!sess); + res = 0; + } else { + /* cmd did not go to upper layer. */ + if (sess) { + qlt_schedule_sess_for_deletion(sess); + res = 0; + } + /* else logo will be ack */ + } + break; + case ELS_PDISC: + case ELS_ADISC: + { + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(ha->base_qpair, + &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); + tgt->link_reinit_iocb_pending = 0; + } + + sess = qla2x00_find_fcport_by_wwpn(vha, + iocb->u.isp24.port_name, 1); + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0x20fd, + "sess %p lid %d|%d DS %d LS %d\n", + sess, sess->loop_id, loop_id, + sess->disc_state, sess->fw_login_state); + } + + res = 1; /* send notify ack */ + break; + } + + case ELS_FLOGI: /* should never happen */ + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, + "qla_target(%d): Unsupported ELS command %x " + "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + break; + } + + ql_dbg(ql_dbg_disc, vha, 0xf026, + "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", + vha->vp_idx, iocb->u.isp24.status_subcode, res); + + return res; +} + +/* + * ha->hardware_lock supposed to be held on entry. + * Might drop it, then reacquire. + */ +static void qlt_handle_imm_notify(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t add_flags = 0; + int send_notify_ack = 1; + uint16_t status; + + lockdep_assert_held(&ha->hardware_lock); + + status = le16_to_cpu(iocb->u.isp2x.status); + switch (status) { + case IMM_NTFY_LIP_RESET: + { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, + "qla_target(%d): LIP reset (loop %#x), subcode %x\n", + vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.status_subcode); + + if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) + send_notify_ack = 0; + break; + } + + case IMM_NTFY_LIP_LINK_REINIT: + { + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, + "qla_target(%d): LINK REINIT (loop %#x, " + "subcode %x)\n", vha->vp_idx, + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.status_subcode); + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(ha->base_qpair, + &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); + } + memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); + tgt->link_reinit_iocb_pending = 1; + /* + * QLogic requires to wait after LINK REINIT for possible + * PDISC or ADISC ELS commands + */ + send_notify_ack = 0; + break; + } + + case IMM_NTFY_PORT_LOGOUT: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, + "qla_target(%d): Port logout (loop " + "%#x, subcode %x)\n", vha->vp_idx, + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.status_subcode); + + if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) + send_notify_ack = 0; + /* The sessions will be cleared in the callback, if needed */ + break; + + case IMM_NTFY_GLBL_TPRLO: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, + "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); + if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) + send_notify_ack = 0; + /* The sessions will be cleared in the callback, if needed */ + break; + + case IMM_NTFY_PORT_CONFIG: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, + "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, + status); + if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) + send_notify_ack = 0; + /* The sessions will be cleared in the callback, if needed */ + break; + + case IMM_NTFY_GLBL_LOGO: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, + "qla_target(%d): Link failure detected\n", + vha->vp_idx); + /* I_T nexus loss */ + if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) + send_notify_ack = 0; + break; + + case IMM_NTFY_IOCB_OVERFLOW: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, + "qla_target(%d): Cannot provide requested " + "capability (IOCB overflowed the immediate notify " + "resource count)\n", vha->vp_idx); + break; + + case IMM_NTFY_ABORT_TASK: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, + "qla_target(%d): Abort Task (S %08x I %#x -> " + "L %#x)\n", vha->vp_idx, + le16_to_cpu(iocb->u.isp2x.seq_id), + GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), + le16_to_cpu(iocb->u.isp2x.lun)); + if (qlt_abort_task(vha, iocb) == 0) + send_notify_ack = 0; + break; + + case IMM_NTFY_RESOURCE: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, + "qla_target(%d): Out of resources, host %ld\n", + vha->vp_idx, vha->host_no); + break; + + case IMM_NTFY_MSG_RX: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, + "qla_target(%d): Immediate notify task %x\n", + vha->vp_idx, iocb->u.isp2x.task_flags); + break; + + case IMM_NTFY_ELS: + if (qlt_24xx_handle_els(vha, iocb) == 0) + send_notify_ack = 0; + break; + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, + "qla_target(%d): Received unknown immediate " + "notify status %x\n", vha->vp_idx, status); + break; + } + + if (send_notify_ack) + qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, + 0, 0); +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + * This function sends busy to ISP 2xxx or 24xx. + */ +static int __qlt_send_busy(struct qla_qpair *qpair, + struct atio_from_isp *atio, uint16_t status) +{ + struct scsi_qla_host *vha = qpair->vha; + struct ctio7_to_24xx *ctio24; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + struct fc_port *sess = NULL; + unsigned long flags; + u16 temp; + port_id_t id; + + id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (!sess) { + qlt_send_term_exchange(qpair, NULL, atio, 1, 0); + return 0; + } + /* Sending marker isn't necessary, since we called from ISR */ + + pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); + if (!pkt) { + ql_dbg(ql_dbg_io, vha, 0x3063, + "qla_target(%d): %s failed: unable to allocate " + "request packet", vha->vp_idx, __func__); + return -ENOMEM; + } + + qpair->tgt_counters.num_q_full_sent++; + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + + ctio24 = (struct ctio7_to_24xx *)pkt; + ctio24->entry_type = CTIO_TYPE7; + ctio24->nport_handle = cpu_to_le16(sess->loop_id); + ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->vp_index = vha->vp_idx; + ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); + ctio24->exchange_addr = atio->u.isp24.exchange_addr; + temp = (atio->u.isp24.attr << 9) | + CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | + CTIO7_FLAGS_DONT_RET_CTIO; + ctio24->u.status1.flags = cpu_to_le16(temp); + /* + * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, + * if the explicit conformation is used. + */ + ctio24->u.status1.ox_id = + cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + ctio24->u.status1.scsi_status = cpu_to_le16(status); + + ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); + + if (ctio24->u.status1.residual != 0) + ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); + + /* Memory Barrier */ + wmb(); + if (qpair->reqq_start_iocbs) + qpair->reqq_start_iocbs(qpair); + else + qla2x00_start_iocbs(vha, qpair->req); + return 0; +} + +/* + * This routine is used to allocate a command for either a QFull condition + * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go + * out previously. + */ +static void +qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint16_t status, int qfull) +{ + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess; + struct qla_tgt_cmd *cmd; + unsigned long flags; + + if (unlikely(tgt->tgt_stop)) { + ql_dbg(ql_dbg_io, vha, 0x300a, + "New command while device %p is shutting down\n", tgt); + return; + } + + if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { + vha->hw->tgt.num_qfull_cmds_dropped++; + if (vha->hw->tgt.num_qfull_cmds_dropped > + vha->qla_stats.stat_max_qfull_cmds_dropped) + vha->qla_stats.stat_max_qfull_cmds_dropped = + vha->hw->tgt.num_qfull_cmds_dropped; + + ql_dbg(ql_dbg_io, vha, 0x3068, + "qla_target(%d): %s: QFull CMD dropped[%d]\n", + vha->vp_idx, __func__, + vha->hw->tgt.num_qfull_cmds_dropped); + + qlt_chk_exch_leak_thresh_hold(vha); + return; + } + + sess = ha->tgt.tgt_ops->find_sess_by_s_id + (vha, atio->u.isp24.fcp_hdr.s_id); + if (!sess) + return; + + cmd = ha->tgt.tgt_ops->get_cmd(sess); + if (!cmd) { + ql_dbg(ql_dbg_io, vha, 0x3009, + "qla_target(%d): %s: Allocation of cmd failed\n", + vha->vp_idx, __func__); + + vha->hw->tgt.num_qfull_cmds_dropped++; + if (vha->hw->tgt.num_qfull_cmds_dropped > + vha->qla_stats.stat_max_qfull_cmds_dropped) + vha->qla_stats.stat_max_qfull_cmds_dropped = + vha->hw->tgt.num_qfull_cmds_dropped; + + qlt_chk_exch_leak_thresh_hold(vha); + return; + } + + qlt_incr_num_pend_cmds(vha); + INIT_LIST_HEAD(&cmd->cmd_list); + memcpy(&cmd->atio, atio, sizeof(*atio)); + + cmd->tgt = vha->vha_tgt.qla_tgt; + cmd->vha = vha; + cmd->reset_count = ha->base_qpair->chip_reset; + cmd->q_full = 1; + cmd->qpair = ha->base_qpair; + + if (qfull) { + cmd->q_full = 1; + /* NOTE: borrowing the state field to carry the status */ + cmd->state = status; + } else + cmd->term_exchg = 1; + + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); + + vha->hw->tgt.num_qfull_cmds_alloc++; + if (vha->hw->tgt.num_qfull_cmds_alloc > + vha->qla_stats.stat_max_qfull_cmds_alloc) + vha->qla_stats.stat_max_qfull_cmds_alloc = + vha->hw->tgt.num_qfull_cmds_alloc; + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); +} + +int +qlt_free_qfull_cmds(struct qla_qpair *qpair) +{ + struct scsi_qla_host *vha = qpair->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + struct qla_tgt_cmd *cmd, *tcmd; + struct list_head free_list, q_full_list; + int rc = 0; + + if (list_empty(&ha->tgt.q_full_list)) + return 0; + + INIT_LIST_HEAD(&free_list); + INIT_LIST_HEAD(&q_full_list); + + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + if (list_empty(&ha->tgt.q_full_list)) { + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); + return 0; + } + + list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { + if (cmd->q_full) + /* cmd->state is a borrowed field to hold status */ + rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); + else if (cmd->term_exchg) + rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); + + if (rc == -ENOMEM) + break; + + if (cmd->q_full) + ql_dbg(ql_dbg_io, vha, 0x3006, + "%s: busy sent for ox_id[%04x]\n", __func__, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + else if (cmd->term_exchg) + ql_dbg(ql_dbg_io, vha, 0x3007, + "%s: Term exchg sent for ox_id[%04x]\n", __func__, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + else + ql_dbg(ql_dbg_io, vha, 0x3008, + "%s: Unexpected cmd in QFull list %p\n", __func__, + cmd); + + list_move_tail(&cmd->cmd_list, &free_list); + + /* piggy back on hardware_lock for protection */ + vha->hw->tgt.num_qfull_cmds_alloc--; + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + cmd = NULL; + + list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { + list_del(&cmd->cmd_list); + /* This cmd was never sent to TCM. There is no need + * to schedule free or call free_cmd + */ + qlt_free_cmd(cmd); + } + + if (!list_empty(&q_full_list)) { + spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); + list_splice(&q_full_list, &vha->hw->tgt.q_full_list); + spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); + } + + return rc; +} + +static void +qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, + uint16_t status) +{ + int rc = 0; + struct scsi_qla_host *vha = qpair->vha; + + rc = __qlt_send_busy(qpair, atio, status); + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, atio, status, 1); +} + +static int +qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, + struct atio_from_isp *atio, uint8_t ha_locked) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) + return 0; + + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_busy(qpair, atio, qla_sam_status); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return 1; +} + +/* ha->hardware_lock supposed to be held on entry */ +/* called via callback from qla2xxx */ +static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint8_t ha_locked) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + int rc; + unsigned long flags = 0; + + if (unlikely(tgt == NULL)) { + ql_dbg(ql_dbg_tgt, vha, 0x3064, + "ATIO pkt, but no tgt (ha %p)", ha); + return; + } + /* + * In tgt_stop mode we also should allow all requests to pass. + * Otherwise, some commands can stuck. + */ + + tgt->atio_irq_cmd_count++; + + switch (atio->u.raw.entry_type) { + case ATIO_TYPE7: + if (unlikely(atio->u.isp24.exchange_addr == + cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) { + ql_dbg(ql_dbg_io, vha, 0x3065, + "qla_target(%d): ATIO_TYPE7 " + "received with UNKNOWN exchange address, " + "sending QUEUE_FULL\n", vha->vp_idx); + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_busy(ha->base_qpair, atio, qla_sam_status); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + break; + } + + if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { + rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, + atio, ha_locked); + if (rc != 0) { + tgt->atio_irq_cmd_count--; + return; + } + rc = qlt_handle_cmd_for_atio(vha, atio); + } else { + rc = qlt_handle_task_mgmt(vha, atio); + } + if (unlikely(rc != 0)) { + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); + switch (rc) { + case -ENODEV: + ql_dbg(ql_dbg_tgt, vha, 0xe05f, + "qla_target: Unable to send command to target\n"); + break; + case -EBADF: + ql_dbg(ql_dbg_tgt, vha, 0xe05f, + "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); + qlt_send_term_exchange(ha->base_qpair, NULL, + atio, 1, 0); + break; + case -EBUSY: + ql_dbg(ql_dbg_tgt, vha, 0xe060, + "qla_target(%d): Unable to send command to target, sending BUSY status\n", + vha->vp_idx); + qlt_send_busy(ha->base_qpair, atio, + tc_sam_status); + break; + default: + ql_dbg(ql_dbg_tgt, vha, 0xe060, + "qla_target(%d): Unable to send command to target, sending BUSY status\n", + vha->vp_idx); + qlt_send_busy(ha->base_qpair, atio, + qla_sam_status); + break; + } + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + } + break; + + case IMMED_NOTIFY_TYPE: + { + if (unlikely(atio->u.isp2x.entry_status != 0)) { + ql_dbg(ql_dbg_tgt, vha, 0xe05b, + "qla_target(%d): Received ATIO packet %x " + "with error status %x\n", vha->vp_idx, + atio->u.raw.entry_type, + atio->u.isp2x.entry_status); + break; + } + ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); + + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); + break; + } + + default: + ql_dbg(ql_dbg_tgt, vha, 0xe05c, + "qla_target(%d): Received unknown ATIO atio " + "type %x\n", vha->vp_idx, atio->u.raw.entry_type); + break; + } + + tgt->atio_irq_cmd_count--; +} + +/* + * qpair lock is assume to be held + * rc = 0 : send terminate & abts respond + * rc != 0: do not send term & abts respond + */ +static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, + struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry) +{ + struct qla_hw_data *ha = vha->hw; + int rc = 0; + + /* + * Detect unresolved exchange. If the same ABTS is unable + * to terminate an existing command and the same ABTS loops + * between FW & Driver, then force FW dump. Under 1 jiff, + * we should see multiple loops. + */ + if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort && + qpair->retry_term_jiff == jiffies) { + /* found existing exchange */ + qpair->retry_term_cnt++; + if (qpair->retry_term_cnt >= 5) { + rc = -EIO; + qpair->retry_term_cnt = 0; + ql_log(ql_log_warn, vha, 0xffff, + "Unable to send ABTS Respond. Dumping firmware.\n"); + ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer, + vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); + + if (qpair == ha->base_qpair) + ha->isp_ops->fw_dump(vha); + else + qla2xxx_dump_fw(vha); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else if (qpair->retry_term_jiff != jiffies) { + qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort; + qpair->retry_term_cnt = 0; + qpair->retry_term_jiff = jiffies; + } + + return rc; +} + + +static void qlt_handle_abts_completion(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt) +{ + struct abts_resp_from_24xx_fw *entry = + (struct abts_resp_from_24xx_fw *)pkt; + u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; + struct qla_tgt_mgmt_cmd *mcmd; + struct qla_hw_data *ha = vha->hw; + + mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt); + if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { + ql_dbg(ql_dbg_async, vha, 0xe064, + "qla_target(%d): ABTS Comp without mcmd\n", + vha->vp_idx); + return; + } + + if (mcmd) + vha = mcmd->vha; + vha->vha_tgt.qla_tgt->abts_resp_expected--; + + ql_dbg(ql_dbg_tgt, vha, 0xe038, + "ABTS_RESP_24XX: compl_status %x\n", + entry->compl_status); + + if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { + if (le32_to_cpu(entry->error_subcode1) == 0x1E && + le32_to_cpu(entry->error_subcode2) == 0) { + if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { + ha->tgt.tgt_ops->free_mcmd(mcmd); + return; + } + qlt_24xx_retry_term_exchange(vha, rsp->qpair, + pkt, mcmd); + } else { + ql_dbg(ql_dbg_tgt, vha, 0xe063, + "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", + vha->vp_idx, entry->compl_status, + entry->error_subcode1, + entry->error_subcode2); + ha->tgt.tgt_ops->free_mcmd(mcmd); + } + } else if (mcmd) { + ha->tgt.tgt_ops->free_mcmd(mcmd); + } +} + +/* ha->hardware_lock supposed to be held on entry */ +/* called via callback from qla2xxx */ +static void qlt_response_pkt(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt) +{ + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + + if (unlikely(tgt == NULL)) { + ql_dbg(ql_dbg_tgt, vha, 0xe05d, + "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", + vha->vp_idx, pkt->entry_type, vha->hw); + return; + } + + /* + * In tgt_stop mode we also should allow all requests to pass. + * Otherwise, some commands can stuck. + */ + + switch (pkt->entry_type) { + case CTIO_CRC2: + case CTIO_TYPE7: + { + struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; + + qlt_do_ctio_completion(vha, rsp, entry->handle, + le16_to_cpu(entry->status)|(pkt->entry_status << 16), + entry); + break; + } + + case ACCEPT_TGT_IO_TYPE: + { + struct atio_from_isp *atio = (struct atio_from_isp *)pkt; + int rc; + + if (atio->u.isp2x.status != + cpu_to_le16(ATIO_CDB_VALID)) { + ql_dbg(ql_dbg_tgt, vha, 0xe05e, + "qla_target(%d): ATIO with error " + "status %x received\n", vha->vp_idx, + le16_to_cpu(atio->u.isp2x.status)); + break; + } + + rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); + if (rc != 0) + return; + + rc = qlt_handle_cmd_for_atio(vha, atio); + if (unlikely(rc != 0)) { + switch (rc) { + case -ENODEV: + ql_dbg(ql_dbg_tgt, vha, 0xe05f, + "qla_target: Unable to send command to target\n"); + break; + case -EBADF: + ql_dbg(ql_dbg_tgt, vha, 0xe05f, + "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); + qlt_send_term_exchange(rsp->qpair, NULL, + atio, 1, 0); + break; + case -EBUSY: + ql_dbg(ql_dbg_tgt, vha, 0xe060, + "qla_target(%d): Unable to send command to target, sending BUSY status\n", + vha->vp_idx); + qlt_send_busy(rsp->qpair, atio, + tc_sam_status); + break; + default: + ql_dbg(ql_dbg_tgt, vha, 0xe060, + "qla_target(%d): Unable to send command to target, sending BUSY status\n", + vha->vp_idx); + qlt_send_busy(rsp->qpair, atio, + qla_sam_status); + break; + } + } + } + break; + + case CONTINUE_TGT_IO_TYPE: + { + struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; + + qlt_do_ctio_completion(vha, rsp, entry->handle, + le16_to_cpu(entry->status)|(pkt->entry_status << 16), + entry); + break; + } + + case CTIO_A64_TYPE: + { + struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; + + qlt_do_ctio_completion(vha, rsp, entry->handle, + le16_to_cpu(entry->status)|(pkt->entry_status << 16), + entry); + break; + } + + case IMMED_NOTIFY_TYPE: + ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); + qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); + break; + + case NOTIFY_ACK_TYPE: + if (tgt->notify_ack_expected > 0) { + struct nack_to_isp *entry = (struct nack_to_isp *)pkt; + + ql_dbg(ql_dbg_tgt, vha, 0xe036, + "NOTIFY_ACK seq %08x status %x\n", + le16_to_cpu(entry->u.isp2x.seq_id), + le16_to_cpu(entry->u.isp2x.status)); + tgt->notify_ack_expected--; + if (entry->u.isp2x.status != + cpu_to_le16(NOTIFY_ACK_SUCCESS)) { + ql_dbg(ql_dbg_tgt, vha, 0xe061, + "qla_target(%d): NOTIFY_ACK " + "failed %x\n", vha->vp_idx, + le16_to_cpu(entry->u.isp2x.status)); + } + } else { + ql_dbg(ql_dbg_tgt, vha, 0xe062, + "qla_target(%d): Unexpected NOTIFY_ACK received\n", + vha->vp_idx); + } + break; + + case ABTS_RECV_24XX: + ql_dbg(ql_dbg_tgt, vha, 0xe037, + "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); + qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); + break; + + case ABTS_RESP_24XX: + if (tgt->abts_resp_expected > 0) { + qlt_handle_abts_completion(vha, rsp, pkt); + } else { + ql_dbg(ql_dbg_tgt, vha, 0xe064, + "qla_target(%d): Unexpected ABTS_RESP_24XX " + "received\n", vha->vp_idx); + } + break; + + default: + ql_dbg(ql_dbg_tgt, vha, 0xe065, + "qla_target(%d): Received unknown response pkt " + "type %x\n", vha->vp_idx, pkt->entry_type); + break; + } + +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, + uint16_t *mailbox) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + int login_code; + + if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) + return; + + if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && + IS_QLA2100(ha)) + return; + /* + * In tgt_stop mode we also should allow all requests to pass. + * Otherwise, some commands can stuck. + */ + + + switch (code) { + case MBA_RESET: /* Reset */ + case MBA_SYSTEM_ERR: /* System Error */ + case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ + case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, + "qla_target(%d): System error async event %#x " + "occurred", vha->vp_idx, code); + break; + case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case MBA_LOOP_UP: + { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, + "qla_target(%d): Async LOOP_UP occurred " + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, + mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(ha->base_qpair, + &tgt->link_reinit_iocb, + 0, 0, 0, 0, 0, 0); + tgt->link_reinit_iocb_pending = 0; + } + break; + } + + case MBA_LIP_OCCURRED: + case MBA_LOOP_DOWN: + case MBA_LIP_RESET: + case MBA_RSCN_UPDATE: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, + "qla_target(%d): Async event %#x occurred " + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, + mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + break; + + case MBA_REJECTED_FCP_CMD: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, + "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", + vha->vp_idx, + mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + + if (mailbox[3] == 1) { + /* exchange starvation. */ + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xd03a, + "Exchange starvation-. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } + break; + + case MBA_PORT_UPDATE: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, + "qla_target(%d): Port update async event %#x " + "occurred: updating the ports database (m[0]=%x, m[1]=%x, " + "m[2]=%x, m[3]=%x)", vha->vp_idx, code, + mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + + login_code = mailbox[2]; + if (login_code == 0x4) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, + "Async MB 2: Got PLOGI Complete\n"); + vha->hw->exch_starvation = 0; + } else if (login_code == 0x7) + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, + "Async MB 2: Port Logged Out\n"); + break; + default: + break; + } + +} + +static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, + uint16_t loop_id) +{ + fc_port_t *fcport, *tfcp, *del; + int rc; + unsigned long flags; + u8 newfcport = 0; + + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, + "qla_target(%d): Allocation of tmp FC port failed", + vha->vp_idx); + return NULL; + } + + fcport->loop_id = loop_id; + + rc = qla24xx_gpdb_wait(vha, fcport, 0); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, + "qla_target(%d): Failed to retrieve fcport " + "information -- get_port_database() returned %x " + "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); + kfree(fcport); + return NULL; + } + + del = NULL; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); + + if (tfcp) { + tfcp->d_id = fcport->d_id; + tfcp->port_type = fcport->port_type; + tfcp->supported_classes = fcport->supported_classes; + tfcp->flags |= fcport->flags; + tfcp->scan_state = QLA_FCPORT_FOUND; + + del = fcport; + fcport = tfcp; + } else { + if (vha->hw->current_topology == ISP_CFG_F) + fcport->flags |= FCF_FABRIC_DEVICE; + + list_add_tail(&fcport->list, &vha->vp_fcports); + if (!IS_SW_RESV_ADDR(fcport->d_id)) + vha->fcport_count++; + fcport->login_gen++; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); + fcport->login_succ = 1; + newfcport = 1; + } + + fcport->deleted = 0; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + if (newfcport) { + if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { + qla24xx_sched_upd_fcport(fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, vha->fcport_count); + qla24xx_post_gpsc_work(vha, fcport); + } + } + break; + + case MODE_TARGET: + default: + break; + } + if (del) + qla2x00_free_fcport(del); + + return fcport; +} + +/* Must be called under tgt_mutex */ +static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, + be_id_t s_id) +{ + struct fc_port *sess = NULL; + fc_port_t *fcport = NULL; + int rc, global_resets; + uint16_t loop_id = 0; + + if (s_id.domain == 0xFF && s_id.area == 0xFC) { + /* + * This is Domain Controller, so it should be + * OK to drop SCSI commands from it. + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, + "Unable to find initiator with S_ID %x:%x:%x", + s_id.domain, s_id.area, s_id.al_pa); + return NULL; + } + + mutex_lock(&vha->vha_tgt.tgt_mutex); + +retry: + global_resets = + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); + + rc = qla24xx_get_loop_id(vha, s_id, &loop_id); + if (rc != 0) { + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + ql_log(ql_log_info, vha, 0xf071, + "qla_target(%d): Unable to find " + "initiator with S_ID %x:%x:%x", + vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa); + + if (rc == -ENOENT) { + qlt_port_logo_t logo; + + logo.id = be_to_port_id(s_id); + logo.cmd_count = 1; + qlt_send_first_logo(vha, &logo); + } + + return NULL; + } + + fcport = qlt_get_port_database(vha, loop_id); + if (!fcport) { + mutex_unlock(&vha->vha_tgt.tgt_mutex); + return NULL; + } + + if (global_resets != + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, + "qla_target(%d): global reset during session discovery " + "(counter was %d, new %d), retrying", vha->vp_idx, + global_resets, + atomic_read(&vha->vha_tgt. + qla_tgt->tgt_global_resets_count)); + goto retry; + } + + sess = qlt_create_sess(vha, fcport, true); + + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + return sess; +} + +static void qlt_abort_work(struct qla_tgt *tgt, + struct qla_tgt_sess_work_param *prm) +{ + struct scsi_qla_host *vha = tgt->vha; + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess = NULL; + unsigned long flags = 0, flags2 = 0; + be_id_t s_id; + int rc; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags2); + + if (tgt->tgt_stop) + goto out_term2; + + s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id); + + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); + if (!sess) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + + sess = qlt_make_local_sess(vha, s_id); + /* sess has got an extra creation ref */ + + spin_lock_irqsave(&ha->tgt.sess_lock, flags2); + if (!sess) + goto out_term2; + } else { + if (sess->deleted) { + sess = NULL; + goto out_term2; + } + + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, + "%s: kref_get fail %8phC \n", + __func__, sess->port_name); + sess = NULL; + goto out_term2; + } + } + + rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + + ha->tgt.tgt_ops->put_sess(sess); + + if (rc != 0) + goto out_term; + return; + +out_term2: + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + +out_term: + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, + FCP_TMF_REJECTED, false); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void qlt_sess_work_fn(struct work_struct *work) +{ + struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); + struct scsi_qla_host *vha = tgt->vha; + unsigned long flags; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); + + spin_lock_irqsave(&tgt->sess_work_lock, flags); + while (!list_empty(&tgt->sess_works_list)) { + struct qla_tgt_sess_work_param *prm = list_entry( + tgt->sess_works_list.next, typeof(*prm), + sess_works_list_entry); + + /* + * This work can be scheduled on several CPUs at time, so we + * must delete the entry to eliminate double processing + */ + list_del(&prm->sess_works_list_entry); + + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + + switch (prm->type) { + case QLA_TGT_SESS_WORK_ABORT: + qlt_abort_work(tgt, prm); + break; + default: + BUG_ON(1); + break; + } + + spin_lock_irqsave(&tgt->sess_work_lock, flags); + + kfree(prm); + } + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); +} + +/* Must be called under tgt_host_action_mutex */ +int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) +{ + struct qla_tgt *tgt; + int rc, i; + struct qla_qpair_hint *h; + + if (!QLA_TGT_MODE_ENABLED()) + return 0; + + if (!IS_TGT_MODE_CAPABLE(ha)) { + ql_log(ql_log_warn, base_vha, 0xe070, + "This adapter does not support target mode.\n"); + return 0; + } + + ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, + "Registering target for host %ld(%p).\n", base_vha->host_no, ha); + + BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); + + tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); + if (!tgt) { + ql_dbg(ql_dbg_tgt, base_vha, 0xe066, + "Unable to allocate struct qla_tgt\n"); + return -ENOMEM; + } + + tgt->qphints = kcalloc(ha->max_qpairs + 1, + sizeof(struct qla_qpair_hint), + GFP_KERNEL); + if (!tgt->qphints) { + kfree(tgt); + ql_log(ql_log_warn, base_vha, 0x0197, + "Unable to allocate qpair hints.\n"); + return -ENOMEM; + } + + qla2xxx_driver_template.supported_mode |= MODE_TARGET; + + rc = btree_init64(&tgt->lun_qpair_map); + if (rc) { + kfree(tgt->qphints); + kfree(tgt); + ql_log(ql_log_info, base_vha, 0x0198, + "Unable to initialize lun_qpair_map btree\n"); + return -EIO; + } + h = &tgt->qphints[0]; + h->qpair = ha->base_qpair; + INIT_LIST_HEAD(&h->hint_elem); + h->cpuid = ha->base_qpair->cpuid; + list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); + + for (i = 0; i < ha->max_qpairs; i++) { + unsigned long flags; + + struct qla_qpair *qpair = ha->queue_pair_map[i]; + + h = &tgt->qphints[i + 1]; + INIT_LIST_HEAD(&h->hint_elem); + if (qpair) { + h->qpair = qpair; + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + list_add_tail(&h->hint_elem, &qpair->hints_list); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + h->cpuid = qpair->cpuid; + } + } + + tgt->ha = ha; + tgt->vha = base_vha; + init_waitqueue_head(&tgt->waitQ); + spin_lock_init(&tgt->sess_work_lock); + INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); + INIT_LIST_HEAD(&tgt->sess_works_list); + atomic_set(&tgt->tgt_global_resets_count, 0); + + base_vha->vha_tgt.qla_tgt = tgt; + + ql_dbg(ql_dbg_tgt, base_vha, 0xe067, + "qla_target(%d): using 64 Bit PCI addressing", + base_vha->vp_idx); + /* 3 is reserved */ + tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); + + mutex_lock(&qla_tgt_mutex); + list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); + mutex_unlock(&qla_tgt_mutex); + + if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) + ha->tgt.tgt_ops->add_target(base_vha); + + return 0; +} + +/* Must be called under tgt_host_action_mutex */ +int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) +{ + if (!vha->vha_tgt.qla_tgt) + return 0; + + if (vha->fc_vport) { + qlt_release(vha->vha_tgt.qla_tgt); + return 0; + } + + /* free left over qfull cmds */ + qlt_init_term_exchange(vha); + + ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", + vha->host_no, ha); + qlt_release(vha->vha_tgt.qla_tgt); + + return 0; +} + +void qla_remove_hostmap(struct qla_hw_data *ha) +{ + struct scsi_qla_host *node; + u32 key = 0; + + btree_for_each_safe32(&ha->host_map, key, node) + btree_remove32(&ha->host_map, key); + + btree_destroy32(&ha->host_map); +} + +static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, + unsigned char *b) +{ + pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name); + pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name); + put_unaligned_be64(wwpn, b); + pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b); +} + +/** + * qlt_lport_register - register lport with external module + * + * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data + * @phys_wwpn: physical port WWPN + * @npiv_wwpn: NPIV WWPN + * @npiv_wwnn: NPIV WWNN + * @callback: lport initialization callback for tcm_qla2xxx code + */ +int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, + u64 npiv_wwpn, u64 npiv_wwnn, + int (*callback)(struct scsi_qla_host *, void *, u64, u64)) +{ + struct qla_tgt *tgt; + struct scsi_qla_host *vha; + struct qla_hw_data *ha; + struct Scsi_Host *host; + unsigned long flags; + int rc; + u8 b[WWN_SIZE]; + + mutex_lock(&qla_tgt_mutex); + list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { + vha = tgt->vha; + ha = vha->hw; + + host = vha->host; + if (!host) + continue; + + if (!(host->hostt->supported_mode & MODE_TARGET)) + continue; + + if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) + continue; + + spin_lock_irqsave(&ha->hardware_lock, flags); + if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { + pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", + host->host_no); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + continue; + } + if (tgt->tgt_stop) { + pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", + host->host_no); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + continue; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (!scsi_host_get(host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe068, + "Unable to scsi_host_get() for" + " qla2xxx scsi_host\n"); + continue; + } + qlt_lport_dump(vha, phys_wwpn, b); + + if (memcmp(vha->port_name, b, WWN_SIZE)) { + scsi_host_put(host); + continue; + } + rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); + if (rc != 0) + scsi_host_put(host); + + mutex_unlock(&qla_tgt_mutex); + return rc; + } + mutex_unlock(&qla_tgt_mutex); + + return -ENODEV; +} +EXPORT_SYMBOL(qlt_lport_register); + +/** + * qlt_lport_deregister - Degister lport + * + * @vha: Registered scsi_qla_host pointer + */ +void qlt_lport_deregister(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct Scsi_Host *sh = vha->host; + /* + * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data + */ + vha->vha_tgt.target_lport_ptr = NULL; + ha->tgt.tgt_ops = NULL; + /* + * Release the Scsi_Host reference for the underlying qla2xxx host + */ + scsi_host_put(sh); +} +EXPORT_SYMBOL(qlt_lport_deregister); + +/* Must be called under HW lock */ +void qlt_set_mode(struct scsi_qla_host *vha) +{ + switch (vha->qlini_mode) { + case QLA2XXX_INI_MODE_DISABLED: + case QLA2XXX_INI_MODE_EXCLUSIVE: + vha->host->active_mode = MODE_TARGET; + break; + case QLA2XXX_INI_MODE_ENABLED: + vha->host->active_mode = MODE_INITIATOR; + break; + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_DUAL; + break; + default: + break; + } +} + +/* Must be called under HW lock */ +static void qlt_clear_mode(struct scsi_qla_host *vha) +{ + switch (vha->qlini_mode) { + case QLA2XXX_INI_MODE_DISABLED: + vha->host->active_mode = MODE_UNKNOWN; + break; + case QLA2XXX_INI_MODE_EXCLUSIVE: + vha->host->active_mode = MODE_INITIATOR; + break; + case QLA2XXX_INI_MODE_ENABLED: + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_INITIATOR; + break; + default: + break; + } +} + +/* + * qla_tgt_enable_vha - NO LOCK HELD + * + * host_reset, bring up w/ Target Mode Enabled + */ +void +qlt_enable_vha(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + if (!tgt) { + ql_dbg(ql_dbg_tgt, vha, 0xe069, + "Unable to locate qla_tgt pointer from" + " struct qla_hw_data\n"); + dump_stack(); + return; + } + if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) + return; + + if (ha->tgt.num_act_qpairs > ha->max_qpairs) + ha->tgt.num_act_qpairs = ha->max_qpairs; + spin_lock_irqsave(&ha->hardware_lock, flags); + tgt->tgt_stopped = 0; + qlt_set_mode(vha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + mutex_lock(&ha->optrom_mutex); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, + "%s.\n", __func__); + if (vha->vp_idx) { + qla24xx_disable_vp(vha); + qla24xx_enable_vp(vha); + } else { + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); + WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != + QLA_SUCCESS); + } + mutex_unlock(&ha->optrom_mutex); +} +EXPORT_SYMBOL(qlt_enable_vha); + +/* + * qla_tgt_disable_vha - NO LOCK HELD + * + * Disable Target Mode and reset the adapter + */ +static void qlt_disable_vha(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + + if (!tgt) { + ql_dbg(ql_dbg_tgt, vha, 0xe06a, + "Unable to locate qla_tgt pointer from" + " struct qla_hw_data\n"); + dump_stack(); + return; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_clear_mode(vha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + + /* + * We are expecting the offline state. + * QLA_FUNCTION_FAILED means that adapter is offline. + */ + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_tgt, vha, 0xe081, + "adapter is offline\n"); +} + +/* + * Called from qla_init.c:qla24xx_vport_create() contex to setup + * the target mode specific struct scsi_qla_host and struct qla_hw_data + * members. + */ +void +qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) +{ + vha->vha_tgt.qla_tgt = NULL; + + mutex_init(&vha->vha_tgt.tgt_mutex); + mutex_init(&vha->vha_tgt.tgt_host_action_mutex); + + INIT_LIST_HEAD(&vha->unknown_atio_list); + INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn); + + qlt_clear_mode(vha); + + /* + * NOTE: Currently the value is kept the same for <24xx and + * >=24xx ISPs. If it is necessary to change it, + * the check should be added for specific ISPs, + * assigning the value appropriately. + */ + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + + qlt_add_target(ha, vha); +} + +u8 +qlt_rff_id(struct scsi_qla_host *vha) +{ + u8 fc4_feature = 0; + /* + * FC-4 Feature bit 0 indicates target functionality to the name server. + */ + if (qla_tgt_mode_enabled(vha)) { + fc4_feature = BIT_0; + } else if (qla_ini_mode_enabled(vha)) { + fc4_feature = BIT_1; + } else if (qla_dual_mode_enabled(vha)) + fc4_feature = BIT_0 | BIT_1; + + return fc4_feature; +} + +/* + * qlt_init_atio_q_entries() - Initializes ATIO queue entries. + * @ha: HA context + * + * Beginning of ATIO ring has initialization control block already built + * by nvram config routine. + * + * Returns 0 on success. + */ +void +qlt_init_atio_q_entries(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t cnt; + struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; + + if (qla_ini_mode_enabled(vha)) + return; + + for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { + pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); + pkt++; + } + +} + +/* + * qlt_24xx_process_atio_queue() - Process ATIO queue entries. + * @ha: SCSI driver HA context + */ +void +qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) +{ + struct qla_hw_data *ha = vha->hw; + struct atio_from_isp *pkt; + int cnt, i; + + if (!ha->flags.fw_started) + return; + + while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || + fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { + pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; + cnt = pkt->u.raw.entry_count; + + if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { + /* + * This packet is corrupted. The header + payload + * can not be trusted. There is no point in passing + * it further up. + */ + ql_log(ql_log_warn, vha, 0xd03c, + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", + &pkt->u.isp24.fcp_hdr.s_id, + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), + pkt->u.isp24.exchange_addr, pkt); + + adjust_corrupted_atio(pkt); + qlt_send_term_exchange(ha->base_qpair, NULL, pkt, + ha_locked, 0); + } else { + qlt_24xx_atio_pkt_all_vps(vha, + (struct atio_from_isp *)pkt, ha_locked); + } + + for (i = 0; i < cnt; i++) { + ha->tgt.atio_ring_index++; + if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { + ha->tgt.atio_ring_index = 0; + ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; + } else + ha->tgt.atio_ring_ptr++; + + pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); + pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; + } + wmb(); + } + + /* Adjust ring index */ + wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); +} + +void +qlt_24xx_config_rings(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_msix_entry *msix = &ha->msix_entries[2]; + struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; + + if (!QLA_TGT_MODE_ENABLED()) + return; + + wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0); + wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0); + rd_reg_dword(ISP_ATIO_Q_OUT(vha)); + + if (ha->flags.msix_enabled) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + icb->msix_atio = cpu_to_le16(msix->entry); + icb->firmware_options_2 &= cpu_to_le32(~BIT_26); + ql_dbg(ql_dbg_init, vha, 0xf072, + "Registering ICB vector 0x%x for atio que.\n", + msix->entry); + } + } else { + /* INTx|MSI */ + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + icb->msix_atio = 0; + icb->firmware_options_2 |= cpu_to_le32(BIT_26); + ql_dbg(ql_dbg_init, vha, 0xf072, + "%s: Use INTx for ATIOQ.\n", __func__); + } + } +} + +void +qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) +{ + struct qla_hw_data *ha = vha->hw; + u32 tmp; + + if (!QLA_TGT_MODE_ENABLED()) + return; + + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + if (!ha->tgt.saved_set) { + /* We save only once */ + ha->tgt.saved_exchange_count = nv->exchange_count; + ha->tgt.saved_firmware_options_1 = + nv->firmware_options_1; + ha->tgt.saved_firmware_options_2 = + nv->firmware_options_2; + ha->tgt.saved_firmware_options_3 = + nv->firmware_options_3; + ha->tgt.saved_set = 1; + } + + if (qla_tgt_mode_enabled(vha)) + nv->exchange_count = cpu_to_le16(0xFFFF); + else /* dual */ + nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); + + /* Enable target mode */ + nv->firmware_options_1 |= cpu_to_le32(BIT_4); + + /* Disable ini mode, if requested */ + if (qla_tgt_mode_enabled(vha)) + nv->firmware_options_1 |= cpu_to_le32(BIT_5); + + /* Disable Full Login after LIP */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_13); + /* Enable initial LIP */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_9); + if (ql2xtgt_tape_enable) + /* Enable FC Tape support */ + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + else + /* Disable FC Tape support */ + nv->firmware_options_2 &= cpu_to_le32(~BIT_12); + + /* Disable Full Login after LIP */ + nv->host_p &= cpu_to_le32(~BIT_10); + + /* + * clear BIT 15 explicitly as we have seen at least + * a couple of instances where this was set and this + * was causing the firmware to not be initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); + /* Enable target PRLI control */ + nv->firmware_options_2 |= cpu_to_le32(BIT_14); + + if (IS_QLA25XX(ha)) { + /* Change Loop-prefer to Pt-Pt */ + tmp = ~(BIT_4|BIT_5|BIT_6); + nv->firmware_options_2 &= cpu_to_le32(tmp); + tmp = P2P << 4; + nv->firmware_options_2 |= cpu_to_le32(tmp); + } + } else { + if (ha->tgt.saved_set) { + nv->exchange_count = ha->tgt.saved_exchange_count; + nv->firmware_options_1 = + ha->tgt.saved_firmware_options_1; + nv->firmware_options_2 = + ha->tgt.saved_firmware_options_2; + nv->firmware_options_3 = + ha->tgt.saved_firmware_options_3; + } + return; + } + + if (ha->base_qpair->enable_class_2) { + if (vha->flags.init_done) + fc_host_supported_classes(vha->host) = + FC_COS_CLASS2 | FC_COS_CLASS3; + + nv->firmware_options_2 |= cpu_to_le32(BIT_8); + } else { + if (vha->flags.init_done) + fc_host_supported_classes(vha->host) = FC_COS_CLASS3; + + nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); + } +} + +void +qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, + struct init_cb_24xx *icb) +{ + struct qla_hw_data *ha = vha->hw; + + if (!QLA_TGT_MODE_ENABLED()) + return; + + if (ha->tgt.node_name_set) { + memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); + icb->firmware_options_1 |= cpu_to_le32(BIT_14); + } +} + +void +qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) +{ + struct qla_hw_data *ha = vha->hw; + u32 tmp; + + if (!QLA_TGT_MODE_ENABLED()) + return; + + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + if (!ha->tgt.saved_set) { + /* We save only once */ + ha->tgt.saved_exchange_count = nv->exchange_count; + ha->tgt.saved_firmware_options_1 = + nv->firmware_options_1; + ha->tgt.saved_firmware_options_2 = + nv->firmware_options_2; + ha->tgt.saved_firmware_options_3 = + nv->firmware_options_3; + ha->tgt.saved_set = 1; + } + + if (qla_tgt_mode_enabled(vha)) + nv->exchange_count = cpu_to_le16(0xFFFF); + else /* dual */ + nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); + + /* Enable target mode */ + nv->firmware_options_1 |= cpu_to_le32(BIT_4); + + /* Disable ini mode, if requested */ + if (qla_tgt_mode_enabled(vha)) + nv->firmware_options_1 |= cpu_to_le32(BIT_5); + /* Disable Full Login after LIP */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_13); + /* Enable initial LIP */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_9); + /* + * clear BIT 15 explicitly as we have seen at + * least a couple of instances where this was set + * and this was causing the firmware to not be + * initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); + if (ql2xtgt_tape_enable) + /* Enable FC tape support */ + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + else + /* Disable FC tape support */ + nv->firmware_options_2 &= cpu_to_le32(~BIT_12); + + /* Disable Full Login after LIP */ + nv->host_p &= cpu_to_le32(~BIT_10); + /* Enable target PRLI control */ + nv->firmware_options_2 |= cpu_to_le32(BIT_14); + + /* Change Loop-prefer to Pt-Pt */ + tmp = ~(BIT_4|BIT_5|BIT_6); + nv->firmware_options_2 &= cpu_to_le32(tmp); + tmp = P2P << 4; + nv->firmware_options_2 |= cpu_to_le32(tmp); + } else { + if (ha->tgt.saved_set) { + nv->exchange_count = ha->tgt.saved_exchange_count; + nv->firmware_options_1 = + ha->tgt.saved_firmware_options_1; + nv->firmware_options_2 = + ha->tgt.saved_firmware_options_2; + nv->firmware_options_3 = + ha->tgt.saved_firmware_options_3; + } + return; + } + + if (ha->base_qpair->enable_class_2) { + if (vha->flags.init_done) + fc_host_supported_classes(vha->host) = + FC_COS_CLASS2 | FC_COS_CLASS3; + + nv->firmware_options_2 |= cpu_to_le32(BIT_8); + } else { + if (vha->flags.init_done) + fc_host_supported_classes(vha->host) = FC_COS_CLASS3; + + nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); + } +} + +void +qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, + struct init_cb_81xx *icb) +{ + struct qla_hw_data *ha = vha->hw; + + if (!QLA_TGT_MODE_ENABLED()) + return; + + if (ha->tgt.node_name_set) { + memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); + icb->firmware_options_1 |= cpu_to_le32(BIT_14); + } +} + +void +qlt_83xx_iospace_config(struct qla_hw_data *ha) +{ + if (!QLA_TGT_MODE_ENABLED()) + return; + + ha->msix_count += 1; /* For ATIO Q */ +} + + +void +qlt_modify_vp_config(struct scsi_qla_host *vha, + struct vp_config_entry_24xx *vpmod) +{ + /* enable target mode. Bit5 = 1 => disable */ + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) + vpmod->options_idx1 &= ~BIT_5; + + /* Disable ini mode, if requested. bit4 = 1 => disable */ + if (qla_tgt_mode_enabled(vha)) + vpmod->options_idx1 &= ~BIT_4; +} + +void +qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) +{ + mutex_init(&base_vha->vha_tgt.tgt_mutex); + if (!QLA_TGT_MODE_ENABLED()) + return; + + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; + ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; + } else { + ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; + ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; + } + + mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); + + INIT_LIST_HEAD(&base_vha->unknown_atio_list); + INIT_DELAYED_WORK(&base_vha->unknown_atio_work, + qlt_unknown_atio_work_fn); + + qlt_clear_mode(base_vha); + + qla_update_vp_map(base_vha, SET_VP_IDX); +} + +irqreturn_t +qla83xx_msix_atio_q(int irq, void *dev_id) +{ + struct rsp_que *rsp; + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + unsigned long flags; + + rsp = (struct rsp_que *) dev_id; + ha = rsp->hw; + vha = pci_get_drvdata(ha->pdev); + + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + + qlt_24xx_process_atio_queue(vha, 0); + + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); + + return IRQ_HANDLED; +} + +static void +qlt_handle_abts_recv_work(struct work_struct *work) +{ + struct qla_tgt_sess_op *op = container_of(work, + struct qla_tgt_sess_op, work); + scsi_qla_host_t *vha = op->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + if (qla2x00_reset_active(vha) || + (op->chip_reset != ha->base_qpair->chip_reset)) + return; + + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); + + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + kfree(op); +} + +void +qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, + response_t *pkt) +{ + struct qla_tgt_sess_op *op; + + op = kzalloc(sizeof(*op), GFP_ATOMIC); + + if (!op) { + /* do not reach for ATIO queue here. This is best effort err + * recovery at this point. + */ + qlt_response_pkt_all_vps(vha, rsp, pkt); + return; + } + + memcpy(&op->atio, pkt, sizeof(*pkt)); + op->vha = vha; + op->chip_reset = vha->hw->base_qpair->chip_reset; + op->rsp = rsp; + INIT_WORK(&op->work, qlt_handle_abts_recv_work); + queue_work(qla_tgt_wq, &op->work); + return; +} + +int +qlt_mem_alloc(struct qla_hw_data *ha) +{ + if (!QLA_TGT_MODE_ENABLED()) + return 0; + + ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, + (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), + &ha->tgt.atio_dma, GFP_KERNEL); + if (!ha->tgt.atio_ring) { + return -ENOMEM; + } + return 0; +} + +void +qlt_mem_free(struct qla_hw_data *ha) +{ + if (!QLA_TGT_MODE_ENABLED()) + return; + + if (ha->tgt.atio_ring) { + dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * + sizeof(struct atio_from_isp), ha->tgt.atio_ring, + ha->tgt.atio_dma); + } + ha->tgt.atio_ring = NULL; + ha->tgt.atio_dma = 0; +} + +static int __init qlt_parse_ini_mode(void) +{ + if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; + else + return false; + + return true; +} + +int __init qlt_init(void) +{ + int ret; + + BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); + BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); + + if (!qlt_parse_ini_mode()) { + ql_log(ql_log_fatal, NULL, 0xe06b, + "qlt_parse_ini_mode() failed\n"); + return -EINVAL; + } + + if (!QLA_TGT_MODE_ENABLED()) + return 0; + + qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", + sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct + qla_tgt_mgmt_cmd), 0, NULL); + if (!qla_tgt_mgmt_cmd_cachep) { + ql_log(ql_log_fatal, NULL, 0xd04b, + "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); + return -ENOMEM; + } + + qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", + sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), + 0, NULL); + + if (!qla_tgt_plogi_cachep) { + ql_log(ql_log_fatal, NULL, 0xe06d, + "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); + ret = -ENOMEM; + goto out_mgmt_cmd_cachep; + } + + qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, + mempool_free_slab, qla_tgt_mgmt_cmd_cachep); + if (!qla_tgt_mgmt_cmd_mempool) { + ql_log(ql_log_fatal, NULL, 0xe06e, + "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); + ret = -ENOMEM; + goto out_plogi_cachep; + } + + qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); + if (!qla_tgt_wq) { + ql_log(ql_log_fatal, NULL, 0xe06f, + "alloc_workqueue for qla_tgt_wq failed\n"); + ret = -ENOMEM; + goto out_cmd_mempool; + } + /* + * Return 1 to signal that initiator-mode is being disabled + */ + return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; + +out_cmd_mempool: + mempool_destroy(qla_tgt_mgmt_cmd_mempool); +out_plogi_cachep: + kmem_cache_destroy(qla_tgt_plogi_cachep); +out_mgmt_cmd_cachep: + kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); + return ret; +} + +void qlt_exit(void) +{ + if (!QLA_TGT_MODE_ENABLED()) + return; + + destroy_workqueue(qla_tgt_wq); + mempool_destroy(qla_tgt_mgmt_cmd_mempool); + kmem_cache_destroy(qla_tgt_plogi_cachep); + kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); +} diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h new file mode 100644 index 000000000..354fca2e7 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -0,0 +1,1093 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin + * Copyright (C) 2004 - 2005 Leonid Stoljar + * Copyright (C) 2006 Nathaniel Clark + * Copyright (C) 2007 - 2010 ID7 Ltd. + * + * Forward port and refactoring to modern qla2xxx and target/configfs + * + * Copyright (C) 2010-2011 Nicholas A. Bellinger + * + * Additional file for the target driver support. + */ +/* + * This is the global def file that is useful for including from the + * target portion. + */ + +#ifndef __QLA_TARGET_H +#define __QLA_TARGET_H + +#include "qla_def.h" +#include "qla_dsd.h" + +/* + * Must be changed on any change in any initiator visible interfaces or + * data in the target add-on + */ +#define QLA2XXX_TARGET_MAGIC 269 + +/* + * Must be changed on any change in any target visible interfaces or + * data in the initiator + */ +#define QLA2XXX_INITIATOR_MAGIC 57222 + +#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive" +#define QLA2XXX_INI_MODE_STR_DISABLED "disabled" +#define QLA2XXX_INI_MODE_STR_ENABLED "enabled" +#define QLA2XXX_INI_MODE_STR_DUAL "dual" + +#define QLA2XXX_INI_MODE_EXCLUSIVE 0 +#define QLA2XXX_INI_MODE_DISABLED 1 +#define QLA2XXX_INI_MODE_ENABLED 2 +#define QLA2XXX_INI_MODE_DUAL 3 + +#define QLA2XXX_COMMAND_COUNT_INIT 250 +#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250 + +/* + * Used to mark which completion handles (for RIO Status's) are for CTIO's + * vs. regular (non-target) info. This is checked for in + * qla2x00_process_response_queue() to see if a handle coming back in a + * multi-complete should come to the tgt driver or be handled there by qla2xxx + */ +#define CTIO_COMPLETION_HANDLE_MARK BIT_29 +#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS) +#error "CTIO_COMPLETION_HANDLE_MARK not larger than " + "DEFAULT_OUTSTANDING_COMMANDS" +#endif +#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK) + +/* Used to mark CTIO as intermediate */ +#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30 +#define QLA_TGT_NULL_HANDLE 0 + +#define QLA_TGT_HANDLE_MASK 0xF0000000 +#define QLA_QPID_HANDLE_MASK 0x00FF0000 /* qpair id mask */ +#define QLA_CMD_HANDLE_MASK 0x0000FFFF +#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~QLA_TGT_HANDLE_MASK) + +#define QLA_QPID_HANDLE_SHIFT 16 +#define GET_QID(_h) ((_h & QLA_QPID_HANDLE_MASK) >> QLA_QPID_HANDLE_SHIFT) + + +#ifndef OF_SS_MODE_0 +/* + * ISP target entries - Flags bit definitions. + */ +#define OF_SS_MODE_0 0 +#define OF_SS_MODE_1 1 +#define OF_SS_MODE_2 2 +#define OF_SS_MODE_3 3 + +#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */ +#define OF_DATA_IN BIT_6 /* Data in to initiator */ + /* (data from target to initiator) */ +#define OF_DATA_OUT BIT_7 /* Data out from initiator */ + /* (data from initiator to target) */ +#define OF_NO_DATA (BIT_7 | BIT_6) +#define OF_INC_RC BIT_8 /* Increment command resource count */ +#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */ +#define OF_CONF_REQ BIT_13 /* Confirmation Requested */ +#define OF_TERM_EXCH BIT_14 /* Terminate exchange */ +#define OF_SSTS BIT_15 /* Send SCSI status */ +#endif + +#ifndef QLA_TGT_DATASEGS_PER_CMD32 +#define QLA_TGT_DATASEGS_PER_CMD32 3 +#define QLA_TGT_DATASEGS_PER_CONT32 7 +#define QLA_TGT_MAX_SG32(ql) \ + (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \ + QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0) + +#define QLA_TGT_DATASEGS_PER_CMD64 2 +#define QLA_TGT_DATASEGS_PER_CONT64 5 +#define QLA_TGT_MAX_SG64(ql) \ + (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \ + QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0) +#endif + +#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX +#define QLA_TGT_DATASEGS_PER_CMD_24XX 1 +#define QLA_TGT_DATASEGS_PER_CONT_24XX 5 +#define QLA_TGT_MAX_SG_24XX(ql) \ + (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ + QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) +#endif + +#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ + ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ + : (uint16_t)(iocb)->u.isp2x.target.id.standard) + +#ifndef NOTIFY_ACK_TYPE +#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */ +/* + * ISP queue - notify acknowledge entry structure definition. + * This is sent to the ISP from the target driver. + */ +struct nack_to_isp { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + union { + struct { + __le32 sys_define_2; /* System defined. */ + target_id_t target; + uint8_t target_id; + uint8_t reserved_1; + __le16 flags; + __le16 resp_code; + __le16 status; + __le16 task_flags; + __le16 seq_id; + __le16 srr_rx_id; + __le32 srr_rel_offs; + __le16 srr_ui; + __le16 srr_flags; + __le16 srr_reject_code; + uint8_t srr_reject_vendor_uniq; + uint8_t srr_reject_code_expl; + uint8_t reserved_2[24]; + } isp2x; + struct { + uint32_t handle; + __le16 nport_handle; + uint16_t reserved_1; + __le16 flags; + __le16 srr_rx_id; + __le16 status; + uint8_t status_subcode; + uint8_t fw_handle; + __le32 exchange_address; + __le32 srr_rel_offs; + __le16 srr_ui; + __le16 srr_flags; + uint8_t reserved_4[19]; + uint8_t vp_index; + uint8_t srr_reject_vendor_uniq; + uint8_t srr_reject_code_expl; + uint8_t srr_reject_code; + uint8_t reserved_5[5]; + } isp24; + } u; + uint8_t reserved[2]; + __le16 ox_id; +} __packed; +#define NOTIFY_ACK_FLAGS_FCSP BIT_5 +#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3 +#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 +#define NOTIFY_ACK_SRR_FLAGS_REJECT 1 + +#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9 + +#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0 +#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a + +#define NOTIFY_ACK_SUCCESS 0x01 +#endif + +#ifndef ACCEPT_TGT_IO_TYPE +#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */ +#endif + +#ifndef CONTINUE_TGT_IO_TYPE +#define CONTINUE_TGT_IO_TYPE 0x17 +/* + * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure. + * This structure is sent to the ISP 2xxx from target driver. + */ +struct ctio_to_2xxx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + target_id_t target; + __le16 rx_id; + __le16 flags; + __le16 status; + __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + __le16 dseg_count; /* Data segment count. */ + __le32 relative_offset; + __le32 residual; + __le16 reserved_1[3]; + __le16 scsi_status; + __le32 transfer_length; + struct dsd32 dsd[3]; +} __packed; +#define ATIO_PATH_INVALID 0x07 +#define ATIO_CANT_PROV_CAP 0x16 +#define ATIO_CDB_VALID 0x3D + +#define ATIO_EXEC_READ BIT_1 +#define ATIO_EXEC_WRITE BIT_0 +#endif + +#ifndef CTIO_A64_TYPE +#define CTIO_A64_TYPE 0x1F +#define CTIO_SUCCESS 0x01 +#define CTIO_ABORTED 0x02 +#define CTIO_INVALID_RX_ID 0x08 +#define CTIO_TIMEOUT 0x0B +#define CTIO_DIF_ERROR 0x0C /* DIF error detected */ +#define CTIO_LIP_RESET 0x0E +#define CTIO_TARGET_RESET 0x17 +#define CTIO_PORT_UNAVAILABLE 0x28 +#define CTIO_PORT_LOGGED_OUT 0x29 +#define CTIO_PORT_CONF_CHANGED 0x2A +#define CTIO_SRR_RECEIVED 0x45 +#define CTIO_FAST_AUTH_ERR 0x63 +#define CTIO_FAST_INCOMP_PAD_LEN 0x65 +#define CTIO_FAST_INVALID_REQ 0x66 +#define CTIO_FAST_SPI_ERR 0x67 +#endif + +#ifndef CTIO_RET_TYPE +#define CTIO_RET_TYPE 0x17 /* CTIO return entry */ +#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ +#endif + +struct fcp_hdr { + uint8_t r_ctl; + be_id_t d_id; + uint8_t cs_ctl; + be_id_t s_id; + uint8_t type; + uint8_t f_ctl[3]; + uint8_t seq_id; + uint8_t df_ctl; + uint16_t seq_cnt; + __be16 ox_id; + uint16_t rx_id; + __le32 parameter; +}; + +struct fcp_hdr_le { + le_id_t d_id; + uint8_t r_ctl; + le_id_t s_id; + uint8_t cs_ctl; + uint8_t f_ctl[3]; + uint8_t type; + __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + __le16 rx_id; + __le16 ox_id; + __le32 parameter; +}; + +#define F_CTL_EXCH_CONTEXT_RESP BIT_23 +#define F_CTL_SEQ_CONTEXT_RESIP BIT_22 +#define F_CTL_LAST_SEQ BIT_20 +#define F_CTL_END_SEQ BIT_19 +#define F_CTL_SEQ_INITIATIVE BIT_16 + +#define R_CTL_BASIC_LINK_SERV 0x80 +#define R_CTL_B_ACC 0x4 +#define R_CTL_B_RJT 0x5 + +struct atio7_fcp_cmnd { + uint64_t lun; + uint8_t cmnd_ref; + uint8_t task_attr:3; + uint8_t reserved:5; + uint8_t task_mgmt_flags; +#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6 +#define FCP_CMND_TASK_MGMT_TARGET_RESET 5 +#define FCP_CMND_TASK_MGMT_LU_RESET 4 +#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2 +#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1 + uint8_t wrdata:1; + uint8_t rddata:1; + uint8_t add_cdb_len:6; + uint8_t cdb[16]; + /* + * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4 + * only to make sizeof(struct atio7_fcp_cmnd) be as expected by + * BUILD_BUG_ON in qlt_init(). + */ + uint8_t add_cdb[4]; + /* __le32 data_length; */ +} __packed; + +/* + * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure. + * This is sent from the ISP to the target driver. + */ +struct atio_from_isp { + union { + struct { + __le16 entry_hdr; + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + __le32 sys_define_2; /* System defined. */ + target_id_t target; + __le16 rx_id; + __le16 flags; + __le16 status; + uint8_t command_ref; + uint8_t task_codes; + uint8_t task_flags; + uint8_t execution_codes; + uint8_t cdb[MAX_CMDSZ]; + __le32 data_length; + __le16 lun; + uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */ + __le16 reserved_32[6]; + __le16 ox_id; + } isp2x; + struct { + __le16 entry_hdr; + uint8_t fcp_cmnd_len_low; + uint8_t fcp_cmnd_len_high:4; + uint8_t attr:4; + __le32 exchange_addr; +#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF + struct fcp_hdr fcp_hdr; + struct atio7_fcp_cmnd fcp_cmnd; + } isp24; + struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + __le16 attr_n_length; +#define FCP_CMD_LENGTH_MASK 0x0fff +#define FCP_CMD_LENGTH_MIN 0x38 + uint8_t data[56]; + __le32 signature; +#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ + } raw; + } u; +} __packed; + +static inline int fcpcmd_is_corrupted(struct atio *atio) +{ + if (atio->entry_type == ATIO_TYPE7 && + ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) + return 1; + else + return 0; +} + +/* adjust corrupted atio so we won't trip over the same entry again. */ +static inline void adjust_corrupted_atio(struct atio_from_isp *atio) +{ + atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN); + atio->u.isp24.fcp_cmnd.add_cdb_len = 0; +} + +static inline int get_datalen_for_atio(struct atio_from_isp *atio) +{ + int len = atio->u.isp24.fcp_cmnd.add_cdb_len; + + return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]); +} + +#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ + +/* + * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure. + * This structure is sent to the ISP 24xx from the target driver. + */ + +struct ctio7_to_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + __le16 nport_handle; +#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF + __le16 timeout; + __le16 dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t add_flags; + le_id_t initiator_id; + uint8_t reserved; + __le32 exchange_addr; + union { + struct { + __le16 reserved1; + __le16 flags; + union { + __le32 residual; + struct { + uint8_t rsvd1; + uint8_t edif_flags; +#define EF_EN_EDIF BIT_0 +#define EF_NEW_SA BIT_1 + uint16_t rsvd2; + }; + }; + __le16 ox_id; + __le16 scsi_status; + __le32 relative_offset; + __le32 reserved2; + __le32 transfer_length; + __le32 reserved3; + struct dsd64 dsd; + } status0; + struct { + __le16 sense_length; + __le16 flags; + __le32 residual; + __le16 ox_id; + __le16 scsi_status; + __le16 response_len; + __le16 reserved; + uint8_t sense_data[24]; + } status1; + } u; +} __packed; + +/* + * ISP queue - CTIO type 7 from ISP 24xx to target driver + * returned entry structure. + */ +struct ctio7_from_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + __le16 status; + __le16 timeout; + __le16 dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t reserved1[5]; + __le32 exchange_address; + __le16 edif_sa_index; + __le16 flags; + __le32 residual; + __le16 ox_id; + __le16 reserved3; + __le32 relative_offset; + uint8_t reserved4[24]; +} __packed; + +/* CTIO7 flags values */ +#define CTIO7_FLAGS_SEND_STATUS BIT_15 +#define CTIO7_FLAGS_TERMINATE BIT_14 +#define CTIO7_FLAGS_CONFORM_REQ BIT_13 +#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 +#define CTIO7_FLAGS_STATUS_MODE_0 0 +#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 +#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7 +#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 +#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 +#define CTIO7_FLAGS_DSD_PTR BIT_2 +#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */ +#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */ + +#define ELS_PLOGI 0x3 +#define ELS_FLOGI 0x4 +#define ELS_LOGO 0x5 +#define ELS_PRLI 0x20 +#define ELS_PRLO 0x21 +#define ELS_TPRLO 0x24 +#define ELS_PDISC 0x50 +#define ELS_ADISC 0x52 + +/* + *CTIO Type CRC_2 IOCB + */ +struct ctio_crc2_to_fw { + uint8_t entry_type; /* Entry type. */ +#define CTIO_CRC2 0x7A + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + __le16 nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ + + __le16 dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t add_flags; /* additional flags */ +#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 + + le_id_t initiator_id; /* initiator ID */ + uint8_t reserved1; + __le32 exchange_addr; /* rcv exchange address */ + __le16 reserved2; + __le16 flags; /* refer to CTIO7 flags values */ + __le32 residual; + __le16 ox_id; + __le16 scsi_status; + __le32 relative_offset; + __le32 reserved5; + __le32 transfer_length; /* total fc transfer length */ + __le32 reserved6; + __le64 crc_context_address __packed; /* Data segment address. */ + __le16 crc_context_len; /* Data segment length. */ + __le16 reserved_1; /* MUST be set to 0. */ +}; + +/* CTIO Type CRC_x Status IOCB */ +struct ctio_crc_from_fw { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + __le16 status; + __le16 timeout; /* Command timeout. */ + __le16 dseg_count; /* Data segment count. */ + __le32 reserved1; + __le16 state_flags; +#define CTIO_CRC_SF_DIF_CHOPPED BIT_4 + + __le32 exchange_address; /* rcv exchange address */ + __le16 reserved2; + __le16 flags; + __le32 resid_xfer_length; + __le16 ox_id; + uint8_t reserved3[12]; + __le16 runt_guard; /* reported runt blk guard */ + uint8_t actual_dif[8]; + uint8_t expected_dif[8]; +} __packed; + +/* + * ISP queue - ABTS received/response entries structure definition for 24xx. + */ +#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ +#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */ + +/* + * ISP queue - ABTS received IOCB entry structure definition for 24xx. + * The ABTS BLS received from the wire is sent to the + * target driver by the ISP 24xx. + * The IOCB is placed on the response queue. + */ +struct abts_recv_from_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint8_t reserved_1[6]; + __le16 nport_handle; + uint8_t reserved_2[2]; + uint8_t vp_index; + uint8_t reserved_3:4; + uint8_t sof_type:4; + __le32 exchange_address; + struct fcp_hdr_le fcp_hdr_le; + uint8_t reserved_4[16]; + __le32 exchange_addr_to_abort; +} __packed; + +#define ABTS_PARAM_ABORT_SEQ BIT_0 + +struct ba_acc_le { + __le16 reserved; + uint8_t seq_id_last; + uint8_t seq_id_valid; +#define SEQ_ID_VALID 0x80 +#define SEQ_ID_INVALID 0x00 + __le16 rx_id; + __le16 ox_id; + __le16 high_seq_cnt; + __le16 low_seq_cnt; +} __packed; + +struct ba_rjt_le { + uint8_t vendor_uniq; + uint8_t reason_expl; + uint8_t reason_code; +#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1 +#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9 + uint8_t reserved; +} __packed; + +/* + * ISP queue - ABTS Response IOCB entry structure definition for 24xx. + * The ABTS response to the ABTS received is sent by the + * target driver to the ISP 24xx. + * The IOCB is placed on the request queue. + */ +struct abts_resp_to_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; + __le16 reserved_1; + __le16 nport_handle; + __le16 control_flags; +#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0 + uint8_t vp_index; + uint8_t reserved_3:4; + uint8_t sof_type:4; + __le32 exchange_address; + struct fcp_hdr_le fcp_hdr_le; + union { + struct ba_acc_le ba_acct; + struct ba_rjt_le ba_rjt; + } __packed payload; + __le32 reserved_4; + __le32 exchange_addr_to_abort; +} __packed; + +/* + * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure. + * The ABTS response with completion status to the ABTS response + * (sent by the target driver to the ISP 24xx) is sent by the + * ISP24xx firmware to the target driver. + * The IOCB is placed on the response queue. + */ +struct abts_resp_from_24xx_fw { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; + __le16 compl_status; +#define ABTS_RESP_COMPL_SUCCESS 0 +#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31 + __le16 nport_handle; + __le16 reserved_1; + uint8_t reserved_2; + uint8_t reserved_3:4; + uint8_t sof_type:4; + __le32 exchange_address; + struct fcp_hdr_le fcp_hdr_le; + uint8_t reserved_4[8]; + __le32 error_subcode1; +#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E + __le32 error_subcode2; + __le32 exchange_addr_to_abort; +} __packed; + +/********************************************************************\ + * Type Definitions used by initiator & target halves +\********************************************************************/ + +struct qla_tgt_mgmt_cmd; +struct fc_port; +struct qla_tgt_cmd; + +/* + * This structure provides a template of function calls that the + * target driver (from within qla_target.c) can issue to the + * target module (tcm_qla2xxx). + */ +struct qla_tgt_func_tmpl { + struct qla_tgt_cmd *(*find_cmd_by_tag)(struct fc_port *, uint64_t); + int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, + unsigned char *, uint32_t, int, int, int); + void (*handle_data)(struct qla_tgt_cmd *); + int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t, + uint32_t); + struct qla_tgt_cmd *(*get_cmd)(struct fc_port *); + void (*rel_cmd)(struct qla_tgt_cmd *); + void (*free_cmd)(struct qla_tgt_cmd *); + void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); + void (*free_session)(struct fc_port *); + + int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, + struct fc_port *); + void (*update_sess)(struct fc_port *, port_id_t, uint16_t, bool); + struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *, + const uint16_t); + struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *, + const be_id_t); + void (*clear_nacl_from_fcport_map)(struct fc_port *); + void (*put_sess)(struct fc_port *); + void (*shutdown_sess)(struct fc_port *); + int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); + int (*chk_dif_tags)(uint32_t tag); + void (*add_target)(struct scsi_qla_host *); + void (*remove_target)(struct scsi_qla_host *); +}; + +int qla2x00_wait_for_hba_online(struct scsi_qla_host *); + +#include + +#define QLA_TGT_TIMEOUT 10 /* in seconds */ + +#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */ + +/* Immediate notify status constants */ +#define IMM_NTFY_LIP_RESET 0x000E +#define IMM_NTFY_LIP_LINK_REINIT 0x000F +#define IMM_NTFY_IOCB_OVERFLOW 0x0016 +#define IMM_NTFY_ABORT_TASK 0x0020 +#define IMM_NTFY_PORT_LOGOUT 0x0029 +#define IMM_NTFY_PORT_CONFIG 0x002A +#define IMM_NTFY_GLBL_TPRLO 0x002D +#define IMM_NTFY_GLBL_LOGO 0x002E +#define IMM_NTFY_RESOURCE 0x0034 +#define IMM_NTFY_MSG_RX 0x0036 +#define IMM_NTFY_SRR 0x0045 +#define IMM_NTFY_ELS 0x0046 + +/* Immediate notify task flags */ +#define IMM_NTFY_TASK_MGMT_SHIFT 8 + +#define QLA_TGT_CLEAR_ACA 0x40 +#define QLA_TGT_TARGET_RESET 0x20 +#define QLA_TGT_LUN_RESET 0x10 +#define QLA_TGT_CLEAR_TS 0x04 +#define QLA_TGT_ABORT_TS 0x02 +#define QLA_TGT_ABORT_ALL_SESS 0xFFFF +#define QLA_TGT_ABORT_ALL 0xFFFE +#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD +#define QLA_TGT_NEXUS_LOSS 0xFFFC +#define QLA_TGT_ABTS 0xFFFB +#define QLA_TGT_2G_ABORT_TASK 0xFFFA + +/* Notify Acknowledge flags */ +#define NOTIFY_ACK_RES_COUNT BIT_8 +#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5 +#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4 + +/* Command's states */ +#define QLA_TGT_STATE_NEW 0 /* New command + target processing */ +#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */ +#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ +#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ + +/* ATIO task_codes field */ +#define ATIO_SIMPLE_QUEUE 0 +#define ATIO_HEAD_OF_QUEUE 1 +#define ATIO_ORDERED_QUEUE 2 +#define ATIO_ACA_QUEUE 4 +#define ATIO_UNTAGGED 5 + +/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */ +#define FC_TM_SUCCESS 0 +#define FC_TM_BAD_FCP_DATA 1 +#define FC_TM_BAD_CMD 2 +#define FC_TM_FCP_DATA_MISMATCH 3 +#define FC_TM_REJECT 4 +#define FC_TM_FAILED 5 + +#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \ + (((const uint8_t *)(sense))[0] & 0x70) == 0x70) + +struct qla_port_24xx_data { + uint8_t port_name[WWN_SIZE]; + uint16_t loop_id; + uint16_t reserved; +}; + +struct qla_qpair_hint { + struct list_head hint_elem; + struct qla_qpair *qpair; + u16 cpuid; + uint8_t cmd_cnt; +}; + +struct qla_tgt { + struct scsi_qla_host *vha; + struct qla_hw_data *ha; + struct btree_head64 lun_qpair_map; + struct qla_qpair_hint *qphints; + /* + * To sync between IRQ handlers and qlt_target_release(). Needed, + * because req_pkt() can drop/reaquire HW lock inside. Protected by + * HW lock. + */ + int atio_irq_cmd_count; + + int sg_tablesize; + + /* Target's flags, serialized by pha->hardware_lock */ + unsigned int link_reinit_iocb_pending:1; + + /* + * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex + * OR hardware_lock for reading. + */ + int tgt_stop; /* the target mode driver is being stopped */ + int tgt_stopped; /* the target mode driver has been stopped */ + + /* Count of sessions refering qla_tgt. Protected by hardware_lock. */ + int sess_count; + + spinlock_t sess_work_lock; + struct list_head sess_works_list; + struct work_struct sess_work; + + struct imm_ntfy_from_isp link_reinit_iocb; + wait_queue_head_t waitQ; + int notify_ack_expected; + int abts_resp_expected; + int modify_lun_expected; + atomic_t tgt_global_resets_count; + struct list_head tgt_list_entry; +}; + +struct qla_tgt_sess_op { + struct scsi_qla_host *vha; + uint32_t chip_reset; + struct atio_from_isp atio; + struct work_struct work; + struct list_head cmd_list; + bool aborted; + struct rsp_que *rsp; +}; + +enum trace_flags { + TRC_NEW_CMD = BIT_0, + TRC_DO_WORK = BIT_1, + TRC_DO_WORK_ERR = BIT_2, + TRC_XFR_RDY = BIT_3, + TRC_XMIT_DATA = BIT_4, + TRC_XMIT_STATUS = BIT_5, + TRC_SRR_RSP = BIT_6, + TRC_SRR_XRDY = BIT_7, + TRC_SRR_TERM = BIT_8, + TRC_SRR_CTIO = BIT_9, + TRC_FLUSH = BIT_10, + TRC_CTIO_ERR = BIT_11, + TRC_CTIO_DONE = BIT_12, + TRC_CTIO_ABORTED = BIT_13, + TRC_CTIO_STRANGE = BIT_14, + TRC_CMD_DONE = BIT_15, + TRC_CMD_CHK_STOP = BIT_16, + TRC_CMD_FREE = BIT_17, + TRC_DATA_IN = BIT_18, + TRC_ABORT = BIT_19, + TRC_DIF_ERR = BIT_20, +}; + +struct qla_tgt_cmd { + /* + * Do not move cmd_type field. it needs to line up with srb->cmd_type + */ + uint8_t cmd_type; + uint8_t pad[7]; + struct se_cmd se_cmd; + struct list_head sess_cmd_list; + struct fc_port *sess; + struct qla_qpair *qpair; + uint32_t reset_count; + int state; + struct work_struct work; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; + + spinlock_t cmd_lock; + /* to save extra sess dereferences */ + unsigned int conf_compl_supported:1; + unsigned int sg_mapped:1; + unsigned int write_data_transferred:1; + unsigned int q_full:1; + unsigned int term_exchg:1; + unsigned int cmd_sent_to_fw:1; + unsigned int cmd_in_wq:1; + unsigned int edif:1; + + /* + * This variable may be set from outside the LIO and I/O completion + * callback functions. Do not declare this member variable as a + * bitfield to avoid a read-modify-write operation when this variable + * is set. + */ + unsigned int aborted; + + struct scatterlist *sg; /* cmd data buffer SG vector */ + int sg_cnt; /* SG segments count */ + int bufflen; /* cmd buffer length */ + int offset; + u64 unpacked_lun; + enum dma_data_direction dma_data_direction; + + uint16_t ctio_flags; + uint16_t vp_idx; + uint16_t loop_id; /* to save extra sess dereferences */ + struct qla_tgt *tgt; /* to save extra sess dereferences */ + struct scsi_qla_host *vha; + struct list_head cmd_list; + + struct atio_from_isp atio; + + uint8_t ctx_dsd_alloced; + + /* T10-DIF */ +#define DIF_ERR_NONE 0 +#define DIF_ERR_GRD 1 +#define DIF_ERR_REF 2 +#define DIF_ERR_APP 3 + int8_t dif_err_code; + struct scatterlist *prot_sg; + uint32_t prot_sg_cnt; + uint32_t blk_sz, num_blks; + uint8_t scsi_status, sense_key, asc, ascq; + + struct crc_context *ctx; + const uint8_t *cdb; + uint64_t lba; + uint16_t a_guard, e_guard, a_app_tag, e_app_tag; + uint32_t a_ref_tag, e_ref_tag; +#define DIF_BUNDL_DMA_VALID 1 + uint16_t prot_flags; + + uint64_t jiffies_at_alloc; + uint64_t jiffies_at_free; + + enum trace_flags trc_flags; +}; + +struct qla_tgt_sess_work_param { + struct list_head sess_works_list_entry; + +#define QLA_TGT_SESS_WORK_ABORT 1 + int type; + + union { + struct abts_recv_from_24xx abts; + struct imm_ntfy_from_isp tm_iocb; + struct atio_from_isp tm_iocb2; + }; +}; + +struct qla_tgt_mgmt_cmd { + uint8_t cmd_type; + uint8_t pad[3]; + uint16_t tmr_func; + uint8_t fc_tm_rsp; + uint8_t abort_io_attr; + struct fc_port *sess; + struct qla_qpair *qpair; + struct scsi_qla_host *vha; + struct se_cmd se_cmd; + struct work_struct free_work; + unsigned int flags; +#define QLA24XX_MGMT_SEND_NACK BIT_0 +#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1 + uint32_t reset_count; + struct work_struct work; + uint64_t unpacked_lun; + union { + struct atio_from_isp atio; + struct imm_ntfy_from_isp imm_ntfy; + struct abts_recv_from_24xx abts; + } __packed orig_iocb; +}; + +struct qla_tgt_prm { + struct qla_tgt_cmd *cmd; + struct qla_tgt *tgt; + void *pkt; + struct scatterlist *sg; /* cmd data buffer SG vector */ + unsigned char *sense_buffer; + int seg_cnt; + int req_cnt; + uint16_t rq_result; + int sense_buffer_len; + int residual; + int add_status_pkt; + /* dif */ + struct scatterlist *prot_sg; + uint16_t prot_seg_cnt; + uint16_t tot_dsds; +}; + +/* Check for Switch reserved address */ +#define IS_SW_RESV_ADDR(_s_id) \ + ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0)) + +#define QLA_TGT_XMIT_DATA 1 +#define QLA_TGT_XMIT_STATUS 2 +#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) + + +extern struct qla_tgt_data qla_target; + +/* + * Function prototypes for qla_target.c logic used by qla2xxx LLD code. + */ +extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); +extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); +extern int qlt_lport_register(void *, u64, u64, u64, + int (*callback)(struct scsi_qla_host *, void *, u64, u64)); +extern void qlt_lport_deregister(struct scsi_qla_host *); +extern void qlt_unreg_sess(struct fc_port *); +extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); +extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); +extern int __init qlt_init(void); +extern void qlt_exit(void); +extern void qlt_free_session_done(struct work_struct *); +/* + * This macro is used during early initializations when host->active_mode + * is not set. Right now, ha value is ignored. + */ +#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) + +extern int ql2x_ini_mode; + +static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) +{ + return ha->host->active_mode == MODE_TARGET; +} + +static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha) +{ + return ha->host->active_mode == MODE_INITIATOR; +} + +static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha) +{ + return (ha->host->active_mode == MODE_DUAL); +} + +static inline uint32_t sid_to_key(const be_id_t s_id) +{ + return s_id.domain << 16 | + s_id.area << 8 | + s_id.al_pa; +} + +/* + * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. + */ +extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *, + response_t *); +extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); +extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern int qlt_abort_cmd(struct qla_tgt_cmd *); +extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); +extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); +extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); +extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); +extern void qlt_enable_vha(struct scsi_qla_host *); +extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); +extern u8 qlt_rff_id(struct scsi_qla_host *); +extern void qlt_init_atio_q_entries(struct scsi_qla_host *); +extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t); +extern void qlt_24xx_config_rings(struct scsi_qla_host *); +extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *, + struct nvram_24xx *); +extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *, + struct init_cb_24xx *); +extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *, + struct init_cb_81xx *); +extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *, + struct nvram_81xx *); +extern void qlt_modify_vp_config(struct scsi_qla_host *, + struct vp_config_entry_24xx *); +extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); +extern int qlt_mem_alloc(struct qla_hw_data *); +extern void qlt_mem_free(struct qla_hw_data *); +extern int qlt_stop_phase1(struct qla_tgt *); +extern void qlt_stop_phase2(struct qla_tgt *); +extern irqreturn_t qla83xx_msix_atio_q(int, void *); +extern void qlt_83xx_iospace_config(struct qla_hw_data *); +extern int qlt_free_qfull_cmds(struct qla_qpair *); +extern void qlt_logo_completion_handler(fc_port_t *, int); +extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); + +void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t, + uint8_t, uint8_t, uint8_t); + +#endif /* __QLA_TARGET_H */ diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c new file mode 100644 index 000000000..b0a74b036 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -0,0 +1,1100 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +#include "qla_def.h" +#include "qla_tmpl.h" + +#define ISPREG(vha) (&(vha)->hw->iobase->isp24) +#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr) +#define IOBASE(vha) IOBAR(ISPREG(vha)) +#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL) + +static inline void +qla27xx_insert16(uint16_t value, void *buf, ulong *len) +{ + if (buf) { + buf += *len; + *(__le16 *)buf = cpu_to_le16(value); + } + *len += sizeof(value); +} + +static inline void +qla27xx_insert32(uint32_t value, void *buf, ulong *len) +{ + if (buf) { + buf += *len; + *(__le32 *)buf = cpu_to_le32(value); + } + *len += sizeof(value); +} + +static inline void +qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) +{ + if (buf && mem && size) { + buf += *len; + memcpy(buf, mem, size); + } + *len += size; +} + +static inline void +qla27xx_read8(void __iomem *window, void *buf, ulong *len) +{ + uint8_t value = ~0; + + if (buf) { + value = rd_reg_byte(window); + } + qla27xx_insert32(value, buf, len); +} + +static inline void +qla27xx_read16(void __iomem *window, void *buf, ulong *len) +{ + uint16_t value = ~0; + + if (buf) { + value = rd_reg_word(window); + } + qla27xx_insert32(value, buf, len); +} + +static inline void +qla27xx_read32(void __iomem *window, void *buf, ulong *len) +{ + uint32_t value = ~0; + + if (buf) { + value = rd_reg_dword(window); + } + qla27xx_insert32(value, buf, len); +} + +static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *) +{ + return + (width == 1) ? qla27xx_read8 : + (width == 2) ? qla27xx_read16 : + qla27xx_read32; +} + +static inline void +qla27xx_read_reg(__iomem struct device_reg_24xx *reg, + uint offset, void *buf, ulong *len) +{ + void __iomem *window = (void __iomem *)reg + offset; + + qla27xx_read32(window, buf, len); +} + +static inline void +qla27xx_write_reg(__iomem struct device_reg_24xx *reg, + uint offset, uint32_t data, void *buf) +{ + if (buf) { + void __iomem *window = (void __iomem *)reg + offset; + + wrt_reg_dword(window, data); + } +} + +static inline void +qla27xx_read_window(__iomem struct device_reg_24xx *reg, + uint32_t addr, uint offset, uint count, uint width, void *buf, + ulong *len) +{ + void __iomem *window = (void __iomem *)reg + offset; + void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width); + + qla27xx_write_reg(reg, IOBAR(reg), addr, buf); + while (count--) { + qla27xx_insert32(addr, buf, len); + readn(window, buf, len); + window += width; + addr++; + } +} + +static inline void +qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) +{ + if (buf) + ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; +} + +static inline struct qla27xx_fwdt_entry * +qla27xx_next_entry(struct qla27xx_fwdt_entry *ent) +{ + return (void *)ent + le32_to_cpu(ent->hdr.size); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd100, + "%s: nop [%lx]\n", __func__, *len); + qla27xx_skip_entry(ent, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd1ff, + "%s: end [%lx]\n", __func__, *len); + qla27xx_skip_entry(ent, buf); + + /* terminate */ + return NULL; +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong addr = le32_to_cpu(ent->t256.base_addr); + uint offset = ent->t256.pci_offset; + ulong count = le16_to_cpu(ent->t256.reg_count); + uint width = ent->t256.reg_width; + + ql_dbg(ql_dbg_misc, vha, 0xd200, + "%s: rdio t1 [%lx]\n", __func__, *len); + qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong addr = le32_to_cpu(ent->t257.base_addr); + uint offset = ent->t257.pci_offset; + ulong data = le32_to_cpu(ent->t257.write_data); + + ql_dbg(ql_dbg_misc, vha, 0xd201, + "%s: wrio t1 [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint banksel = ent->t258.banksel_offset; + ulong bank = le32_to_cpu(ent->t258.bank); + ulong addr = le32_to_cpu(ent->t258.base_addr); + uint offset = ent->t258.pci_offset; + uint count = le16_to_cpu(ent->t258.reg_count); + uint width = ent->t258.reg_width; + + ql_dbg(ql_dbg_misc, vha, 0xd202, + "%s: rdio t2 [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); + qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong addr = le32_to_cpu(ent->t259.base_addr); + uint banksel = ent->t259.banksel_offset; + ulong bank = le32_to_cpu(ent->t259.bank); + uint offset = ent->t259.pci_offset; + ulong data = le32_to_cpu(ent->t259.write_data); + + ql_dbg(ql_dbg_misc, vha, 0xd203, + "%s: wrio t2 [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); + qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint offset = ent->t260.pci_offset; + + ql_dbg(ql_dbg_misc, vha, 0xd204, + "%s: rdpci [%lx]\n", __func__, *len); + qla27xx_insert32(offset, buf, len); + qla27xx_read_reg(ISPREG(vha), offset, buf, len); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint offset = ent->t261.pci_offset; + ulong data = le32_to_cpu(ent->t261.write_data); + + ql_dbg(ql_dbg_misc, vha, 0xd205, + "%s: wrpci [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint area = ent->t262.ram_area; + ulong start = le32_to_cpu(ent->t262.start_addr); + ulong end = le32_to_cpu(ent->t262.end_addr); + ulong dwords; + int rc; + + ql_dbg(ql_dbg_misc, vha, 0xd206, + "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); + + if (area == T262_RAM_AREA_CRITICAL_RAM) { + ; + } else if (area == T262_RAM_AREA_EXTERNAL_RAM) { + end = vha->hw->fw_memory_size; + if (buf) + ent->t262.end_addr = cpu_to_le32(end); + } else if (area == T262_RAM_AREA_SHARED_RAM) { + start = vha->hw->fw_shared_ram_start; + end = vha->hw->fw_shared_ram_end; + if (buf) { + ent->t262.start_addr = cpu_to_le32(start); + ent->t262.end_addr = cpu_to_le32(end); + } + } else if (area == T262_RAM_AREA_DDR_RAM) { + start = vha->hw->fw_ddr_ram_start; + end = vha->hw->fw_ddr_ram_end; + if (buf) { + ent->t262.start_addr = cpu_to_le32(start); + ent->t262.end_addr = cpu_to_le32(end); + } + } else if (area == T262_RAM_AREA_MISC) { + if (buf) { + ent->t262.start_addr = cpu_to_le32(start); + ent->t262.end_addr = cpu_to_le32(end); + } + } else { + ql_dbg(ql_dbg_misc, vha, 0xd022, + "%s: unknown area %x\n", __func__, area); + qla27xx_skip_entry(ent, buf); + goto done; + } + + if (end < start || start == 0 || end == 0) { + ql_dbg(ql_dbg_misc, vha, 0xd023, + "%s: unusable range (start=%lx end=%lx)\n", + __func__, start, end); + qla27xx_skip_entry(ent, buf); + goto done; + } + + dwords = end - start + 1; + if (buf) { + buf += *len; + rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n", + __func__, area, start, end); + return INVALID_ENTRY; + } + } + *len += dwords * sizeof(uint32_t); +done: + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint type = ent->t263.queue_type; + uint count = 0; + uint i; + uint length; + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207, + "%s: getq(%x) [%lx]\n", __func__, type, *len); + if (type == T263_QUEUE_TYPE_REQ) { + for (i = 0; i < vha->hw->max_req_queues; i++) { + struct req_que *req = vha->hw->req_q_map[i]; + + if (req || !buf) { + length = req ? + req->length : REQUEST_ENTRY_CNT_24XX; + qla27xx_insert16(i, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(req ? req->ring : NULL, + length * sizeof(*req->ring), buf, len); + count++; + } + } + } else if (type == T263_QUEUE_TYPE_RSP) { + for (i = 0; i < vha->hw->max_rsp_queues; i++) { + struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (rsp || !buf) { + length = rsp ? + rsp->length : RESPONSE_ENTRY_CNT_MQ; + qla27xx_insert16(i, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(rsp ? rsp->ring : NULL, + length * sizeof(*rsp->ring), buf, len); + count++; + } + } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring; + + if (atr || !buf) { + length = ha->tgt.atio_q_length; + qla27xx_insert16(0, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len); + count++; + } + } else { + ql_dbg(ql_dbg_misc, vha, 0xd026, + "%s: unknown queue %x\n", __func__, type); + qla27xx_skip_entry(ent, buf); + } + + if (buf) { + if (count) + ent->t263.num_queues = count; + else + qla27xx_skip_entry(ent, buf); + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd208, + "%s: getfce [%lx]\n", __func__, *len); + if (vha->hw->fce) { + if (buf) { + ent->t264.fce_trace_size = FCE_SIZE; + ent->t264.write_pointer = vha->hw->fce_wr; + ent->t264.base_pointer = vha->hw->fce_dma; + ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0]; + ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2]; + ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3]; + ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4]; + ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5]; + ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6]; + } + qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd027, + "%s: missing fce\n", __func__); + qla27xx_skip_entry(ent, buf); + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209, + "%s: pause risc [%lx]\n", __func__, *len); + if (buf) + qla24xx_pause_risc(ISPREG(vha), vha->hw); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd20a, + "%s: reset risc [%lx]\n", __func__, *len); + if (buf) { + if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) { + ql_dbg(ql_dbg_async, vha, 0x5001, + "%s: unable to soft reset\n", __func__); + return INVALID_ENTRY; + } + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint offset = ent->t267.pci_offset; + ulong data = le32_to_cpu(ent->t267.data); + + ql_dbg(ql_dbg_misc, vha, 0xd20b, + "%s: dis intr [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd20c, + "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); + switch (ent->t268.buf_type) { + case T268_BUF_TYPE_EXTD_TRACE: + if (vha->hw->eft) { + if (buf) { + ent->t268.buf_size = EFT_SIZE; + ent->t268.start_addr = vha->hw->eft_dma; + } + qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd028, + "%s: missing eft\n", __func__); + qla27xx_skip_entry(ent, buf); + } + break; + case T268_BUF_TYPE_EXCH_BUFOFF: + if (vha->hw->exchoffld_buf) { + if (buf) { + ent->t268.buf_size = vha->hw->exchoffld_size; + ent->t268.start_addr = + vha->hw->exchoffld_buf_dma; + } + qla27xx_insertbuf(vha->hw->exchoffld_buf, + vha->hw->exchoffld_size, buf, len); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd028, + "%s: missing exch offld\n", __func__); + qla27xx_skip_entry(ent, buf); + } + break; + case T268_BUF_TYPE_EXTD_LOGIN: + if (vha->hw->exlogin_buf) { + if (buf) { + ent->t268.buf_size = vha->hw->exlogin_size; + ent->t268.start_addr = + vha->hw->exlogin_buf_dma; + } + qla27xx_insertbuf(vha->hw->exlogin_buf, + vha->hw->exlogin_size, buf, len); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd028, + "%s: missing ext login\n", __func__); + qla27xx_skip_entry(ent, buf); + } + break; + + case T268_BUF_TYPE_REQ_MIRROR: + case T268_BUF_TYPE_RSP_MIRROR: + /* + * Mirror pointers are not implemented in the + * driver, instead shadow pointers are used by + * the drier. Skip these entries. + */ + qla27xx_skip_entry(ent, buf); + break; + default: + ql_dbg(ql_dbg_async, vha, 0xd02b, + "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); + qla27xx_skip_entry(ent, buf); + break; + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd20d, + "%s: scratch [%lx]\n", __func__, *len); + qla27xx_insert32(0xaaaaaaaa, buf, len); + qla27xx_insert32(0xbbbbbbbb, buf, len); + qla27xx_insert32(0xcccccccc, buf, len); + qla27xx_insert32(0xdddddddd, buf, len); + qla27xx_insert32(*len + sizeof(uint32_t), buf, len); + if (buf) + ent->t269.scratch_size = 5 * sizeof(uint32_t); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong addr = le32_to_cpu(ent->t270.addr); + ulong dwords = le32_to_cpu(ent->t270.count); + + ql_dbg(ql_dbg_misc, vha, 0xd20e, + "%s: rdremreg [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf); + while (dwords--) { + qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf); + qla27xx_insert32(addr, buf, len); + qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len); + addr += sizeof(uint32_t); + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong addr = le32_to_cpu(ent->t271.addr); + ulong data = le32_to_cpu(ent->t271.data); + + ql_dbg(ql_dbg_misc, vha, 0xd20f, + "%s: wrremreg [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf); + qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf); + qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong dwords = le32_to_cpu(ent->t272.count); + ulong start = le32_to_cpu(ent->t272.addr); + + ql_dbg(ql_dbg_misc, vha, 0xd210, + "%s: rdremram [%lx]\n", __func__, *len); + if (buf) { + ql_dbg(ql_dbg_misc, vha, 0xd02c, + "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); + buf += *len; + qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf); + } + *len += dwords * sizeof(uint32_t); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong dwords = le32_to_cpu(ent->t273.count); + ulong addr = le32_to_cpu(ent->t273.addr); + uint32_t value; + + ql_dbg(ql_dbg_misc, vha, 0xd211, + "%s: pcicfg [%lx]\n", __func__, *len); + while (dwords--) { + value = ~0; + if (pci_read_config_dword(vha->hw->pdev, addr, &value)) + ql_dbg(ql_dbg_misc, vha, 0xd02d, + "%s: failed pcicfg read at %lx\n", __func__, addr); + qla27xx_insert32(addr, buf, len); + qla27xx_insert32(value, buf, len); + addr += sizeof(uint32_t); + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong type = ent->t274.queue_type; + uint count = 0; + uint i; + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212, + "%s: getqsh(%lx) [%lx]\n", __func__, type, *len); + if (type == T274_QUEUE_TYPE_REQ_SHAD) { + for (i = 0; i < vha->hw->max_req_queues; i++) { + struct req_que *req = vha->hw->req_q_map[i]; + + if (req || !buf) { + qla27xx_insert16(i, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(req && req->out_ptr ? + *req->out_ptr : 0, buf, len); + count++; + } + } + } else if (type == T274_QUEUE_TYPE_RSP_SHAD) { + for (i = 0; i < vha->hw->max_rsp_queues; i++) { + struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (rsp || !buf) { + qla27xx_insert16(i, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(rsp && rsp->in_ptr ? + *rsp->in_ptr : 0, buf, len); + count++; + } + } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring_ptr; + + if (atr || !buf) { + qla27xx_insert16(0, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(ha->tgt.atio_q_in ? + readl(ha->tgt.atio_q_in) : 0, buf, len); + count++; + } + } else { + ql_dbg(ql_dbg_misc, vha, 0xd02f, + "%s: unknown queue %lx\n", __func__, type); + qla27xx_skip_entry(ent, buf); + } + + if (buf) { + if (count) + ent->t274.num_queues = count; + else + qla27xx_skip_entry(ent, buf); + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong offset = offsetof(typeof(*ent), t275.buffer); + ulong length = le32_to_cpu(ent->t275.length); + ulong size = le32_to_cpu(ent->hdr.size); + void *buffer = ent->t275.buffer; + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213, + "%s: buffer(%lx) [%lx]\n", __func__, length, *len); + if (!length) { + ql_dbg(ql_dbg_misc, vha, 0xd020, + "%s: buffer zero length\n", __func__); + qla27xx_skip_entry(ent, buf); + goto done; + } + if (offset + length > size) { + length = size - offset; + ql_dbg(ql_dbg_misc, vha, 0xd030, + "%s: buffer overflow, truncate [%lx]\n", __func__, length); + ent->t275.length = cpu_to_le32(length); + } + + qla27xx_insertbuf(buffer, length, buf, len); +done: + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214, + "%s: cond [%lx]\n", __func__, *len); + + if (buf) { + ulong cond1 = le32_to_cpu(ent->t276.cond1); + ulong cond2 = le32_to_cpu(ent->t276.cond2); + uint type = vha->hw->pdev->device >> 4 & 0xf; + uint func = vha->hw->port_no & 0x3; + + if (type != cond1 || func != cond2) { + struct qla27xx_fwdt_template *tmp = buf; + + tmp->count--; + ent = qla27xx_next_entry(ent); + qla27xx_skip_entry(ent, buf); + } + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr); + ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data); + ulong data_addr = le32_to_cpu(ent->t277.data_addr); + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215, + "%s: rdpep [%lx]\n", __func__, *len); + qla27xx_insert32(wr_cmd_data, buf, len); + qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); + qla27xx_read_reg(ISPREG(vha), data_addr, buf, len); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr); + ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data); + ulong data_addr = le32_to_cpu(ent->t278.data_addr); + ulong wr_data = le32_to_cpu(ent->t278.wr_data); + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216, + "%s: wrpep [%lx]\n", __func__, *len); + qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf); + qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong type = le32_to_cpu(ent->hdr.type); + + ql_dbg(ql_dbg_misc, vha, 0xd2ff, + "%s: other %lx [%lx]\n", __func__, type, *len); + qla27xx_skip_entry(ent, buf); + + return qla27xx_next_entry(ent); +} + +static struct { + uint type; + typeof(qla27xx_fwdt_entry_other)(*call); +} qla27xx_fwdt_entry_call[] = { + { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 }, + { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 }, + { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 }, + { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 }, + { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 }, + { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 }, + { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 }, + { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 }, + { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 }, + { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 }, + { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 }, + { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 }, + { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 }, + { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 }, + { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 }, + { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 }, + { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 }, + { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 }, + { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 }, + { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 }, + { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 }, + { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 }, + { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 }, + { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 }, + { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 }, + { -1, qla27xx_fwdt_entry_other } +}; + +static inline +typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type)) +{ + typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call; + + while (list->type < type) + list++; + + if (list->type == type) + return list->call; + return qla27xx_fwdt_entry_other; +} + +static void +qla27xx_walk_template(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) +{ + struct qla27xx_fwdt_entry *ent = (void *)tmp + + le32_to_cpu(tmp->entry_offset); + ulong type; + + tmp->count = le32_to_cpu(tmp->entry_count); + ql_dbg(ql_dbg_misc, vha, 0xd01a, + "%s: entry count %u\n", __func__, tmp->count); + while (ent && tmp->count--) { + type = le32_to_cpu(ent->hdr.type); + ent = qla27xx_find_entry(type)(vha, ent, buf, len); + if (!ent) + break; + + if (ent == INVALID_ENTRY) { + *len = 0; + ql_dbg(ql_dbg_async, vha, 0xffff, + "Unable to capture FW dump"); + goto bailout; + } + } + + if (tmp->count) + ql_dbg(ql_dbg_misc, vha, 0xd018, + "%s: entry count residual=+%u\n", __func__, tmp->count); + + if (ent) + ql_dbg(ql_dbg_misc, vha, 0xd019, + "%s: missing end entry\n", __func__); + +bailout: + cpu_to_le32s(&tmp->count); /* endianize residual count */ +} + +static void +qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp) +{ + tmp->capture_timestamp = cpu_to_le32(jiffies); +} + +static void +qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) +{ + uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; + + WARN_ON_ONCE(sscanf(qla2x00_version_str, + "%hhu.%hhu.%hhu.%hhu", + v + 0, v + 1, v + 2, v + 3) != 4); + + tmp->driver_info[0] = cpu_to_le32( + v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]); + tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]); + tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678); +} + +static void +qla27xx_firmware_info(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp) +{ + tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version); + tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version); + tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version); + tmp->firmware_version[3] = cpu_to_le32( + vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes); + tmp->firmware_version[4] = cpu_to_le32( + vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]); +} + +static void +ql27xx_edit_template(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp) +{ + qla27xx_time_stamp(tmp); + qla27xx_driver_info(tmp); + qla27xx_firmware_info(vha, tmp); +} + +static inline uint32_t +qla27xx_template_checksum(void *p, ulong size) +{ + __le32 *buf = p; + uint64_t sum = 0; + + size /= sizeof(*buf); + + for ( ; size--; buf++) + sum += le32_to_cpu(*buf); + + sum = (sum & 0xffffffff) + (sum >> 32); + + return ~sum; +} + +static inline int +qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) +{ + return qla27xx_template_checksum(tmp, + le32_to_cpu(tmp->template_size)) == 0; +} + +static inline int +qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp) +{ + return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP; +} + +static ulong +qla27xx_execute_fwdt_template(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp, void *buf) +{ + ulong len = 0; + + if (qla27xx_fwdt_template_valid(tmp)) { + len = le32_to_cpu(tmp->template_size); + tmp = memcpy(buf, tmp, len); + ql27xx_edit_template(vha, tmp); + qla27xx_walk_template(vha, tmp, buf, &len); + } + + return len; +} + +ulong +qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p) +{ + struct qla27xx_fwdt_template *tmp = p; + ulong len = 0; + + if (qla27xx_fwdt_template_valid(tmp)) { + len = le32_to_cpu(tmp->template_size); + qla27xx_walk_template(vha, tmp, NULL, &len); + } + + return len; +} + +ulong +qla27xx_fwdt_template_size(void *p) +{ + struct qla27xx_fwdt_template *tmp = p; + + return le32_to_cpu(tmp->template_size); +} + +int +qla27xx_fwdt_template_valid(void *p) +{ + struct qla27xx_fwdt_template *tmp = p; + + if (!qla27xx_verify_template_header(tmp)) { + ql_log(ql_log_warn, NULL, 0xd01c, + "%s: template type %x\n", __func__, + le32_to_cpu(tmp->template_type)); + return false; + } + + if (!qla27xx_verify_template_checksum(tmp)) { + ql_log(ql_log_warn, NULL, 0xd01d, + "%s: failed template checksum\n", __func__); + return false; + } + + return true; +} + +void +qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) +{ + ulong flags = 0; + + if (!hardware_locked) + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + if (!vha->hw->mpi_fw_dump) { + ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n"); + } else { + struct fwdt *fwdt = &vha->hw->fwdt[1]; + ulong len; + void *buf = vha->hw->mpi_fw_dump; + bool walk_template_only = false; + + if (vha->hw->mpi_fw_dumped) { + /* Use the spare area for any further dumps. */ + buf += fwdt->dump_size; + walk_template_only = true; + ql_log(ql_log_warn, vha, 0x02f4, + "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n", + buf); + } + + ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n"); + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0x02f6, + "-> fwdt1 no template\n"); + goto bailout; + } + len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); + if (len == 0) { + goto bailout; + } else if (len != fwdt->dump_size) { + ql_log(ql_log_warn, vha, 0x02f7, + "-> fwdt1 fwdump residual=%+ld\n", + fwdt->dump_size - len); + } + vha->hw->stat.num_mpi_reset++; + if (walk_template_only) + goto bailout; + + vha->hw->mpi_fw_dump_len = len; + vha->hw->mpi_fw_dumped = 1; + + ql_log(ql_log_warn, vha, 0x02f8, + "-> MPI firmware dump saved to buffer (%lu/%p)\n", + vha->host_no, vha->hw->mpi_fw_dump); + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + } + +bailout: + if (!hardware_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} + +void +qla27xx_fwdump(scsi_qla_host_t *vha) +{ + lockdep_assert_held(&vha->hw->hardware_lock); + + if (!vha->hw->fw_dump) { + ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n"); + } else if (vha->hw->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd01f, + "-> Firmware already dumped (%p) -- ignoring request\n", + vha->hw->fw_dump); + } else { + struct fwdt *fwdt = vha->hw->fwdt; + ulong len; + void *buf = vha->hw->fw_dump; + + ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n"); + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0xd012, + "-> fwdt0 no template\n"); + return; + } + len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); + if (len == 0) { + return; + } else if (len != fwdt->dump_size) { + ql_log(ql_log_warn, vha, 0xd013, + "-> fwdt0 fwdump residual=%+ld\n", + fwdt->dump_size - len); + } + + vha->hw->fw_dump_len = len; + vha->hw->fw_dumped = true; + + ql_log(ql_log_warn, vha, 0xd015, + "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n", + vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags); + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + } +} diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h new file mode 100644 index 000000000..6e0987edf --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ + +#ifndef __QLA_DMP27_H__ +#define __QLA_DMP27_H__ + +#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr) + +struct __packed qla27xx_fwdt_template { + __le32 template_type; + __le32 entry_offset; + __le32 template_size; + uint32_t count; /* borrow field for running/residual count */ + + __le32 entry_count; + uint32_t template_version; + __le32 capture_timestamp; + uint32_t template_checksum; + + uint32_t reserved_2; + __le32 driver_info[3]; + + uint32_t saved_state[16]; + + uint32_t reserved_3[8]; + __le32 firmware_version[5]; +}; + +#define TEMPLATE_TYPE_FWDUMP 99 + +#define ENTRY_TYPE_NOP 0 +#define ENTRY_TYPE_TMP_END 255 +#define ENTRY_TYPE_RD_IOB_T1 256 +#define ENTRY_TYPE_WR_IOB_T1 257 +#define ENTRY_TYPE_RD_IOB_T2 258 +#define ENTRY_TYPE_WR_IOB_T2 259 +#define ENTRY_TYPE_RD_PCI 260 +#define ENTRY_TYPE_WR_PCI 261 +#define ENTRY_TYPE_RD_RAM 262 +#define ENTRY_TYPE_GET_QUEUE 263 +#define ENTRY_TYPE_GET_FCE 264 +#define ENTRY_TYPE_PSE_RISC 265 +#define ENTRY_TYPE_RST_RISC 266 +#define ENTRY_TYPE_DIS_INTR 267 +#define ENTRY_TYPE_GET_HBUF 268 +#define ENTRY_TYPE_SCRATCH 269 +#define ENTRY_TYPE_RDREMREG 270 +#define ENTRY_TYPE_WRREMREG 271 +#define ENTRY_TYPE_RDREMRAM 272 +#define ENTRY_TYPE_PCICFG 273 +#define ENTRY_TYPE_GET_SHADOW 274 +#define ENTRY_TYPE_WRITE_BUF 275 +#define ENTRY_TYPE_CONDITIONAL 276 +#define ENTRY_TYPE_RDPEPREG 277 +#define ENTRY_TYPE_WRPEPREG 278 + +#define CAPTURE_FLAG_PHYS_ONLY BIT_0 +#define CAPTURE_FLAG_PHYS_VIRT BIT_1 + +#define DRIVER_FLAG_SKIP_ENTRY BIT_7 + +struct __packed qla27xx_fwdt_entry { + struct __packed { + __le32 type; + __le32 size; + uint32_t reserved_1; + + uint8_t capture_flags; + uint8_t reserved_2[2]; + uint8_t driver_flags; + } hdr; + union __packed { + struct __packed { + } t0; + + struct __packed { + } t255; + + struct __packed { + __le32 base_addr; + uint8_t reg_width; + __le16 reg_count; + uint8_t pci_offset; + } t256; + + struct __packed { + __le32 base_addr; + __le32 write_data; + uint8_t pci_offset; + uint8_t reserved[3]; + } t257; + + struct __packed { + __le32 base_addr; + uint8_t reg_width; + __le16 reg_count; + uint8_t pci_offset; + uint8_t banksel_offset; + uint8_t reserved[3]; + __le32 bank; + } t258; + + struct __packed { + __le32 base_addr; + __le32 write_data; + uint8_t reserved[2]; + uint8_t pci_offset; + uint8_t banksel_offset; + __le32 bank; + } t259; + + struct __packed { + uint8_t pci_offset; + uint8_t reserved[3]; + } t260; + + struct __packed { + uint8_t pci_offset; + uint8_t reserved[3]; + __le32 write_data; + } t261; + + struct __packed { + uint8_t ram_area; + uint8_t reserved[3]; + __le32 start_addr; + __le32 end_addr; + } t262; + + struct __packed { + uint32_t num_queues; + uint8_t queue_type; + uint8_t reserved[3]; + } t263; + + struct __packed { + uint32_t fce_trace_size; + uint64_t write_pointer; + uint64_t base_pointer; + uint32_t fce_enable_mb0; + uint32_t fce_enable_mb2; + uint32_t fce_enable_mb3; + uint32_t fce_enable_mb4; + uint32_t fce_enable_mb5; + uint32_t fce_enable_mb6; + } t264; + + struct __packed { + } t265; + + struct __packed { + } t266; + + struct __packed { + uint8_t pci_offset; + uint8_t reserved[3]; + __le32 data; + } t267; + + struct __packed { + uint8_t buf_type; + uint8_t reserved[3]; + uint32_t buf_size; + uint64_t start_addr; + } t268; + + struct __packed { + uint32_t scratch_size; + } t269; + + struct __packed { + __le32 addr; + __le32 count; + } t270; + + struct __packed { + __le32 addr; + __le32 data; + } t271; + + struct __packed { + __le32 addr; + __le32 count; + } t272; + + struct __packed { + __le32 addr; + __le32 count; + } t273; + + struct __packed { + uint32_t num_queues; + uint8_t queue_type; + uint8_t reserved[3]; + } t274; + + struct __packed { + __le32 length; + uint8_t buffer[]; + } t275; + + struct __packed { + __le32 cond1; + __le32 cond2; + } t276; + + struct __packed { + __le32 cmd_addr; + __le32 wr_cmd_data; + __le32 data_addr; + } t277; + + struct __packed { + __le32 cmd_addr; + __le32 wr_cmd_data; + __le32 data_addr; + __le32 wr_data; + } t278; + }; +}; + +#define T262_RAM_AREA_CRITICAL_RAM 1 +#define T262_RAM_AREA_EXTERNAL_RAM 2 +#define T262_RAM_AREA_SHARED_RAM 3 +#define T262_RAM_AREA_DDR_RAM 4 +#define T262_RAM_AREA_MISC 5 + +#define T263_QUEUE_TYPE_REQ 1 +#define T263_QUEUE_TYPE_RSP 2 +#define T263_QUEUE_TYPE_ATIO 3 + +#define T268_BUF_TYPE_EXTD_TRACE 1 +#define T268_BUF_TYPE_EXCH_BUFOFF 2 +#define T268_BUF_TYPE_EXTD_LOGIN 3 +#define T268_BUF_TYPE_REQ_MIRROR 4 +#define T268_BUF_TYPE_RSP_MIRROR 5 + +#define T274_QUEUE_TYPE_REQ_SHAD 1 +#define T274_QUEUE_TYPE_RSP_SHAD 2 +#define T274_QUEUE_TYPE_ATIO_SHAD 3 + +#endif diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h new file mode 100644 index 000000000..d903563e9 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + */ +/* + * Driver version + */ +#define QLA2XXX_VERSION "10.02.09.100-k" + +#define QLA_DRIVER_MAJOR_VER 10 +#define QLA_DRIVER_MINOR_VER 2 +#define QLA_DRIVER_PATCH_VER 9 +#define QLA_DRIVER_BETA_VER 100 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c new file mode 100644 index 000000000..68a0e6a2f --- /dev/null +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -0,0 +1,1938 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains tcm implementation using v4 configfs fabric infrastructure + * for QLogic target mode HBAs + * + * (c) Copyright 2010-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger + * + * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from + * the TCM_FC / Open-FCoE.org fabric module. + * + * Copyright (c) 2010 Cisco Systems, Inc + * + ****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qla_def.h" +#include "qla_target.h" +#include "tcm_qla2xxx.h" + +static struct workqueue_struct *tcm_qla2xxx_free_wq; + +/* + * Parse WWN. + * If strict, we require lower-case hex and colon separators to be sure + * the name is the same as what would be generated by ft_format_wwn() + * so the name and wwn are mapped one-to-one. + */ +static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) +{ + const char *cp; + char c; + u32 nibble; + u32 byte = 0; + u32 pos = 0; + u32 err; + + *wwn = 0; + for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { + c = *cp; + if (c == '\n' && cp[1] == '\0') + continue; + if (strict && pos++ == 2 && byte++ < 7) { + pos = 0; + if (c == ':') + continue; + err = 1; + goto fail; + } + if (c == '\0') { + err = 2; + if (strict && byte != 8) + goto fail; + return cp - name; + } + err = 3; + if (isdigit(c)) + nibble = c - '0'; + else if (isxdigit(c) && (islower(c) || !strict)) + nibble = tolower(c) - 'a' + 10; + else + goto fail; + *wwn = (*wwn << 4) | nibble; + } + err = 4; +fail: + pr_debug("err %u len %zu pos %u byte %u\n", + err, cp - name, pos, byte); + return -1; +} + +static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) +{ + u8 b[8]; + + put_unaligned_be64(wwn, b); + return snprintf(buf, len, + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); +} + +/* + * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn + */ +static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) +{ + unsigned int i, j; + u8 wwn[8]; + + memset(wwn, 0, sizeof(wwn)); + + /* Validate and store the new name */ + for (i = 0, j = 0; i < 16; i++) { + int value; + + value = hex_to_bin(*ns++); + if (value >= 0) + j = (j << 4) | value; + else + return -EINVAL; + + if (i % 2) { + wwn[i/2] = j & 0xff; + j = 0; + } + } + + *nm = wwn_to_u64(wwn); + return 0; +} + +/* + * This parsing logic follows drivers/scsi/scsi_transport_fc.c: + * store_fc_host_vport_create() + */ +static int tcm_qla2xxx_npiv_parse_wwn( + const char *name, + size_t count, + u64 *wwpn, + u64 *wwnn) +{ + unsigned int cnt = count; + int rc; + + *wwpn = 0; + *wwnn = 0; + + /* count may include a LF at end of string */ + if (name[cnt-1] == '\n' || name[cnt-1] == 0) + cnt--; + + /* validate we have enough characters for WWPN */ + if ((cnt != (16+1+16)) || (name[16] != ':')) + return -EINVAL; + + rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); + if (rc != 0) + return rc; + + rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); + if (rc != 0) + return rc; + + return 0; +} + +static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + + return lport->lport_naa_name; +} + +static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + return tpg->lport_tpgt; +} + +static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.generate_node_acls; +} + +static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.cache_dynamic_acls; +} + +static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.demo_mode_write_protect; +} + +static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.prod_mode_write_protect; +} + +static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.demo_mode_login_only; +} + +static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.fabric_prot_type; +} + +static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->lport_tpgt; +} + +static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) +{ + struct qla_tgt_mgmt_cmd *mcmd = container_of(work, + struct qla_tgt_mgmt_cmd, free_work); + + transport_generic_free_cmd(&mcmd->se_cmd, 0); +} + +/* + * Called from qla_target_template->free_mcmd(), and will call + * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops + * release callback. qla_hw_data->hardware_lock is expected to be held + */ +static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) +{ + if (!mcmd) + return; + INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); + queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); +} + +static void tcm_qla2xxx_complete_free(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + unsigned long flags; + + cmd->cmd_in_wq = 0; + + WARN_ON(cmd->trc_flags & TRC_CMD_FREE); + + /* To do: protect all tgt_counters manipulations with proper locking. */ + cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++; + cmd->trc_flags |= TRC_CMD_FREE; + cmd->cmd_sent_to_fw = 0; + + spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags); + list_del_init(&cmd->sess_cmd_list); + spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags); + + transport_generic_free_cmd(&cmd->se_cmd, 0); +} + +static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess) +{ + struct se_session *se_sess = sess->se_sess; + struct qla_tgt_cmd *cmd; + int tag, cpu; + + tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); + if (tag < 0) + return NULL; + + cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct qla_tgt_cmd)); + cmd->se_cmd.map_tag = tag; + cmd->se_cmd.map_cpu = cpu; + + return cmd; +} + +static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd) +{ + target_free_tag(cmd->sess->se_sess, &cmd->se_cmd); +} + +/* + * Called from qla_target_template->free_cmd(), and will call + * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops + * release callback. qla_hw_data->hardware_lock is expected to be held + */ +static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) +{ + cmd->qpair->tgt_counters.core_qla_free_cmd++; + cmd->cmd_in_wq = 1; + + WARN_ON(cmd->trc_flags & TRC_CMD_DONE); + cmd->trc_flags |= TRC_CMD_DONE; + + INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); +} + +/* + * Called from struct target_core_fabric_ops->check_stop_free() context + */ +static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd; + + if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { + cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + cmd->trc_flags |= TRC_CMD_CHK_STOP; + } + + return target_put_sess_cmd(se_cmd); +} + +/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying + * fabric descriptor @se_cmd command to release + */ +static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd; + + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { + struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, + struct qla_tgt_mgmt_cmd, se_cmd); + qlt_free_mcmd(mcmd); + return; + } + cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + + if (WARN_ON(cmd->cmd_sent_to_fw)) + return; + + qlt_free_cmd(cmd); +} + +static void tcm_qla2xxx_release_session(struct kref *kref) +{ + struct fc_port *sess = container_of(kref, + struct fc_port, sess_kref); + + qlt_unreg_sess(sess); +} + +static void tcm_qla2xxx_put_sess(struct fc_port *sess) +{ + if (!sess) + return; + + kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); +} + +static void tcm_qla2xxx_close_session(struct se_session *se_sess) +{ + struct fc_port *sess = se_sess->fabric_sess_ptr; + + BUG_ON(!sess); + + target_stop_session(se_sess); + + sess->explicit_logout = 1; + tcm_qla2xxx_put_sess(sess); +} + +static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + + if (cmd->aborted) { + /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug("write_pending aborted cmd[%p] refcount %d " + "transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd, kref_read(&cmd->se_cmd.cmd_kref), + cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + transport_generic_request_failure(&cmd->se_cmd, + TCM_CHECK_CONDITION_ABORT_CMD); + return 0; + } + cmd->trc_flags |= TRC_XFR_RDY; + cmd->bufflen = se_cmd->data_length; + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); + + cmd->sg_cnt = se_cmd->t_data_nents; + cmd->sg = se_cmd->t_data_sg; + + cmd->prot_sg_cnt = se_cmd->t_prot_nents; + cmd->prot_sg = se_cmd->t_prot_sg; + cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; + se_cmd->pi_err = 0; + + /* + * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup + * the SGL mappings into PCIe memory for incoming FCP WRITE data. + */ + return qlt_rdy_to_xfer(cmd); +} + +static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) +{ + if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + return cmd->state; + } + + return 0; +} + +/* + * Called from process context in qla_target.c:qlt_do_work() code + */ +static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, + unsigned char *cdb, uint32_t data_length, int fcp_task_attr, + int data_dir, int bidi) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct se_session *se_sess; + struct fc_port *sess; +#ifdef CONFIG_TCM_QLA2XXX_DEBUG + struct se_portal_group *se_tpg; + struct tcm_qla2xxx_tpg *tpg; +#endif + int rc, target_flags = TARGET_SCF_ACK_KREF; + unsigned long flags; + + if (bidi) + target_flags |= TARGET_SCF_BIDI_OP; + + if (se_cmd->cpuid != WORK_CPU_UNBOUND) + target_flags |= TARGET_SCF_USE_CPUID; + + sess = cmd->sess; + if (!sess) { + pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n"); + return -EINVAL; + } + + se_sess = sess->se_sess; + if (!se_sess) { + pr_err("Unable to locate active struct se_session\n"); + return -EINVAL; + } + +#ifdef CONFIG_TCM_QLA2XXX_DEBUG + se_tpg = se_sess->se_tpg; + tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); + if (unlikely(tpg->tpg_attrib.jam_host)) { + /* return, and dont run target_submit_cmd,discarding command */ + return 0; + } +#endif + cmd->qpair->tgt_counters.qla_core_sbt_cmd++; + + spin_lock_irqsave(&sess->sess_cmd_lock, flags); + list_add_tail(&cmd->sess_cmd_list, &sess->sess_cmd_list); + spin_unlock_irqrestore(&sess->sess_cmd_lock, flags); + + rc = target_init_cmd(se_cmd, se_sess, &cmd->sense_buffer[0], + cmd->unpacked_lun, data_length, fcp_task_attr, + data_dir, target_flags); + if (rc) + return rc; + + if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, + GFP_KERNEL)) + return 0; + + target_submit(se_cmd); + return 0; +} + +static void tcm_qla2xxx_handle_data_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + + /* + * Ensure that the complete FCP WRITE payload has been received. + * Otherwise return an exception via CHECK_CONDITION status. + */ + cmd->cmd_in_wq = 0; + cmd->cmd_sent_to_fw = 0; + if (cmd->aborted) { + transport_generic_request_failure(&cmd->se_cmd, + TCM_CHECK_CONDITION_ABORT_CMD); + return; + } + + cmd->qpair->tgt_counters.qla_core_ret_ctio++; + if (!cmd->write_data_transferred) { + switch (cmd->dif_err_code) { + case DIF_ERR_GRD: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case DIF_ERR_REF: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case DIF_ERR_APP: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + case DIF_ERR_NONE: + default: + break; + } + + if (cmd->se_cmd.pi_err) + transport_generic_request_failure(&cmd->se_cmd, + cmd->se_cmd.pi_err); + else + transport_generic_request_failure(&cmd->se_cmd, + TCM_CHECK_CONDITION_ABORT_CMD); + + return; + } + + return target_execute_cmd(&cmd->se_cmd); +} + +/* + * Called from qla_target.c:qlt_do_ctio_completion() + */ +static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) +{ + cmd->trc_flags |= TRC_DATA_IN; + cmd->cmd_in_wq = 1; + INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); +} + +static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) +{ + return 0; +} + +static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, + uint16_t *pfw_prot_opts) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) + *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) + *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; + + return 0; +} + +/* + * Called from qla_target.c:qlt_issue_task_mgmt() + */ +static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, + uint16_t tmr_func, uint32_t tag) +{ + struct fc_port *sess = mcmd->sess; + struct se_cmd *se_cmd = &mcmd->se_cmd; + int transl_tmr_func = 0; + + switch (tmr_func) { + case QLA_TGT_ABTS: + pr_debug("%ld: ABTS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_2G_ABORT_TASK: + pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_CLEAR_ACA: + pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_ACA; + break; + case QLA_TGT_TARGET_RESET: + pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_TARGET_WARM_RESET; + break; + case QLA_TGT_LUN_RESET: + pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_LUN_RESET; + break; + case QLA_TGT_CLEAR_TS: + pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_TASK_SET; + break; + case QLA_TGT_ABORT_TS: + pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK_SET; + break; + default: + pr_debug("%ld: Unknown task mgmt fn 0x%x\n", + sess->vha->host_no, tmr_func); + return -ENOSYS; + } + + return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, + transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); +} + +static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess, + uint64_t tag) +{ + struct qla_tgt_cmd *cmd; + unsigned long flags; + + if (!sess->se_sess) + return NULL; + + spin_lock_irqsave(&sess->sess_cmd_lock, flags); + list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list) { + if (cmd->se_cmd.tag == tag) + goto done; + } + cmd = NULL; +done: + spin_unlock_irqrestore(&sess->sess_cmd_lock, flags); + + return cmd; +} + +static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + + if (cmd->aborted) { + /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug("queue_data_in aborted cmd[%p] refcount %d " + "transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd, kref_read(&cmd->se_cmd.cmd_kref), + cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + return 0; + } + + cmd->trc_flags |= TRC_XMIT_DATA; + cmd->bufflen = se_cmd->data_length; + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); + + cmd->sg_cnt = se_cmd->t_data_nents; + cmd->sg = se_cmd->t_data_sg; + cmd->offset = 0; + + cmd->prot_sg_cnt = se_cmd->t_prot_nents; + cmd->prot_sg = se_cmd->t_prot_sg; + cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; + se_cmd->pi_err = 0; + + /* + * Now queue completed DATA_IN the qla2xxx LLD and response ring + */ + return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, + se_cmd->scsi_status); +} + +static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + int xmit_type = QLA_TGT_XMIT_STATUS; + + if (cmd->aborted) { + /* + * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug( + "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd, kref_read(&cmd->se_cmd.cmd_kref), + cmd->se_cmd.transport_state, cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + return 0; + } + cmd->bufflen = se_cmd->data_length; + cmd->sg = NULL; + cmd->sg_cnt = 0; + cmd->offset = 0; + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); + cmd->trc_flags |= TRC_XMIT_STATUS; + + if (se_cmd->data_direction == DMA_FROM_DEVICE) { + /* + * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen + * for qla_tgt_xmit_response LLD code + */ + if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; + se_cmd->residual_count = 0; + } + se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + se_cmd->residual_count += se_cmd->data_length; + + cmd->bufflen = 0; + } + /* + * Now queue status response to qla2xxx LLD code and response ring + */ + return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); +} + +static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) +{ + struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; + struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, + struct qla_tgt_mgmt_cmd, se_cmd); + + pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", + mcmd, se_tmr->function, se_tmr->response); + /* + * Do translation between TCM TM response codes and + * QLA2xxx FC TM response codes. + */ + switch (se_tmr->response) { + case TMR_FUNCTION_COMPLETE: + mcmd->fc_tm_rsp = FC_TM_SUCCESS; + break; + case TMR_TASK_DOES_NOT_EXIST: + mcmd->fc_tm_rsp = FC_TM_BAD_CMD; + break; + case TMR_FUNCTION_REJECTED: + mcmd->fc_tm_rsp = FC_TM_REJECT; + break; + case TMR_LUN_DOES_NOT_EXIST: + default: + mcmd->fc_tm_rsp = FC_TM_FAILED; + break; + } + /* + * Queue the TM response to QLA2xxx LLD to build a + * CTIO response packet. + */ + qlt_xmit_tm_rsp(mcmd); +} + +static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd; + unsigned long flags; + + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + return; + + cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + + spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags); + list_del_init(&cmd->sess_cmd_list); + spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags); + + qlt_abort_cmd(cmd); +} + +static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, + struct tcm_qla2xxx_nacl *, struct fc_port *); +/* + * Expected to be called with struct qla_hw_data->tgt.sess_lock held + */ +static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) +{ + struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; + struct se_portal_group *se_tpg = se_nacl->se_tpg; + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, + struct tcm_qla2xxx_nacl, se_node_acl); + void *node; + + pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); + + node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); + if (WARN_ON(node && (node != se_nacl))) { + /* + * The nacl no longer matches what we think it should be. + * Most likely a new dynamic acl has been added while + * someone dropped the hardware lock. It clearly is a + * bug elsewhere, but this bit can't make things worse. + */ + btree_insert32(&lport->lport_fcport_map, nacl->nport_id, + node, GFP_ATOMIC); + } + + pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", + se_nacl, nacl->nport_wwnn, nacl->nport_id); + /* + * Now clear the se_nacl and session pointers from our HW lport lookup + * table mapping for this initiator's fabric S_ID and LOOP_ID entries. + * + * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> + * target_wait_for_sess_cmds() before the session waits for outstanding + * I/O to complete, to avoid a race between session shutdown execution + * and incoming ATIOs or TMRs picking up a stale se_node_act reference. + */ + tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); +} + +static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) +{ + target_stop_session(sess->se_sess); +} + +static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl, + const char *name) +{ + struct tcm_qla2xxx_nacl *nacl = + container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + u64 wwnn; + + if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) + return -EINVAL; + + nacl->nport_wwnn = wwnn; + tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); + + return 0; +} + +/* Start items for tcm_qla2xxx_tpg_attrib_cit */ + +#define DEF_QLA_TPG_ATTRIB(name) \ + \ +static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \ + struct config_item *item, char *page) \ +{ \ + struct se_portal_group *se_tpg = attrib_to_tpg(item); \ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ + struct tcm_qla2xxx_tpg, se_tpg); \ + \ + return sprintf(page, "%d\n", tpg->tpg_attrib.name); \ +} \ + \ +static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \ + struct config_item *item, const char *page, size_t count) \ +{ \ + struct se_portal_group *se_tpg = attrib_to_tpg(item); \ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ + struct tcm_qla2xxx_tpg, se_tpg); \ + struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ + unsigned long val; \ + int ret; \ + \ + ret = kstrtoul(page, 0, &val); \ + if (ret < 0) { \ + pr_err("kstrtoul() failed with" \ + " ret: %d\n", ret); \ + return -EINVAL; \ + } \ + \ + if ((val != 0) && (val != 1)) { \ + pr_err("Illegal boolean value %lu\n", val); \ + return -EINVAL; \ + } \ + \ + a->name = val; \ + \ + return count; \ +} \ +CONFIGFS_ATTR(tcm_qla2xxx_tpg_attrib_, name) + +DEF_QLA_TPG_ATTRIB(generate_node_acls); +DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); +DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); +DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); +DEF_QLA_TPG_ATTRIB(demo_mode_login_only); +#ifdef CONFIG_TCM_QLA2XXX_DEBUG +DEF_QLA_TPG_ATTRIB(jam_host); +#endif + +static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { + &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, + &tcm_qla2xxx_tpg_attrib_attr_cache_dynamic_acls, + &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, + &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, + &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, +#ifdef CONFIG_TCM_QLA2XXX_DEBUG + &tcm_qla2xxx_tpg_attrib_attr_jam_host, +#endif + NULL, +}; + +/* End items for tcm_qla2xxx_tpg_attrib_cit */ + +static int tcm_qla2xxx_enable_tpg(struct se_portal_group *se_tpg, + bool enable) +{ + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + if (enable) { + if (atomic_read(&tpg->lport_tpg_enabled)) + return -EEXIST; + + atomic_set(&tpg->lport_tpg_enabled, 1); + qlt_enable_vha(vha); + } else { + if (!atomic_read(&tpg->lport_tpg_enabled)) + return 0; + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return 0; +} + +static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item, + char *page) +{ + return target_show_dynamic_sessions(to_tpg(item), page); +} + +static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + unsigned long val; + int ret = kstrtoul(page, 0, &val); + + if (ret) { + pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); + return ret; + } + if (val != 0 && val != 1 && val != 3) { + pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); + return -EINVAL; + } + tpg->tpg_attrib.fabric_prot_type = val; + + return count; +} + +static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); +} + +CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); +CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); + +static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { + &tcm_qla2xxx_tpg_attr_dynamic_sessions, + &tcm_qla2xxx_tpg_attr_fabric_prot_type, + NULL, +}; + +static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn, + const char *name) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct tcm_qla2xxx_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) + return ERR_PTR(-EINVAL); + + if ((tpgt != 1)) { + pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); + return ERR_PTR(-ENOSYS); + } + + tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); + return ERR_PTR(-ENOMEM); + } + tpg->lport = lport; + tpg->lport_tpgt = tpgt; + /* + * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic + * NodeACLs + */ + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; + tpg->tpg_attrib.jam_host = 0; + + ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); + if (ret < 0) { + kfree(tpg); + return NULL; + } + + lport->tpg_1 = tpg; + + return &tpg->se_tpg; +} + +static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + struct scsi_qla_host *vha = lport->qla_vha; + /* + * Call into qla2x_target.c LLD logic to shutdown the active + * FC Nexuses and disable target mode operation for this qla_hw_data + */ + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) + qlt_stop_phase1(vha->vha_tgt.qla_tgt); + + core_tpg_deregister(se_tpg); + /* + * Clear local TPG=1 pointer for non NPIV mode. + */ + lport->tpg_1 = NULL; + kfree(tpg); +} + +static int tcm_qla2xxx_npiv_enable_tpg(struct se_portal_group *se_tpg, + bool enable) +{ + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + if (enable) { + if (atomic_read(&tpg->lport_tpg_enabled)) + return -EEXIST; + + atomic_set(&tpg->lport_tpg_enabled, 1); + qlt_enable_vha(vha); + } else { + if (!atomic_read(&tpg->lport_tpg_enabled)) + return 0; + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return 0; +} + +static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, + const char *name) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct tcm_qla2xxx_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); + return ERR_PTR(-ENOMEM); + } + tpg->lport = lport; + tpg->lport_tpgt = tpgt; + + /* + * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic + * NodeACLs + */ + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; + + ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); + if (ret < 0) { + kfree(tpg); + return NULL; + } + lport->tpg_1 = tpg; + return &tpg->se_tpg; +} + +/* + * Expected to be called with struct qla_hw_data->tgt.sess_lock held + */ +static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha, + const be_id_t s_id) +{ + struct tcm_qla2xxx_lport *lport; + struct se_node_acl *se_nacl; + struct tcm_qla2xxx_nacl *nacl; + u32 key; + + lport = vha->vha_tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return NULL; + } + + key = sid_to_key(s_id); + pr_debug("find_sess_by_s_id: 0x%06x\n", key); + + se_nacl = btree_lookup32(&lport->lport_fcport_map, key); + if (!se_nacl) { + pr_debug("Unable to locate s_id: 0x%06x\n", key); + return NULL; + } + pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", + se_nacl, se_nacl->initiatorname); + + nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); + return NULL; + } + + return nacl->fc_port; +} + +/* + * Expected to be called with struct qla_hw_data->tgt.sess_lock held + */ +static void tcm_qla2xxx_set_sess_by_s_id( + struct tcm_qla2xxx_lport *lport, + struct se_node_acl *new_se_nacl, + struct tcm_qla2xxx_nacl *nacl, + struct se_session *se_sess, + struct fc_port *fc_port, + be_id_t s_id) +{ + u32 key; + void *slot; + int rc; + + key = sid_to_key(s_id); + pr_debug("set_sess_by_s_id: %06x\n", key); + + slot = btree_lookup32(&lport->lport_fcport_map, key); + if (!slot) { + if (new_se_nacl) { + pr_debug("Setting up new fc_port entry to new_se_nacl\n"); + nacl->nport_id = key; + rc = btree_insert32(&lport->lport_fcport_map, key, + new_se_nacl, GFP_ATOMIC); + if (rc) + printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", + (int)key); + } else { + pr_debug("Wiping nonexisting fc_port entry\n"); + } + + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; + return; + } + + if (nacl->fc_port) { + if (new_se_nacl == NULL) { + pr_debug("Clearing existing nacl->fc_port and fc_port entry\n"); + btree_remove32(&lport->lport_fcport_map, key); + nacl->fc_port = NULL; + return; + } + pr_debug("Replacing existing nacl->fc_port and fc_port entry\n"); + btree_update32(&lport->lport_fcport_map, key, new_se_nacl); + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; + return; + } + + if (new_se_nacl == NULL) { + pr_debug("Clearing existing fc_port entry\n"); + btree_remove32(&lport->lport_fcport_map, key); + return; + } + + pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n"); + btree_update32(&lport->lport_fcport_map, key, new_se_nacl); + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; + + pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); +} + +/* + * Expected to be called with struct qla_hw_data->tgt.sess_lock held + */ +static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( + scsi_qla_host_t *vha, + const uint16_t loop_id) +{ + struct tcm_qla2xxx_lport *lport; + struct se_node_acl *se_nacl; + struct tcm_qla2xxx_nacl *nacl; + struct tcm_qla2xxx_fc_loopid *fc_loopid; + + lport = vha->vha_tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return NULL; + } + + pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); + + fc_loopid = lport->lport_loopid_map + loop_id; + se_nacl = fc_loopid->se_nacl; + if (!se_nacl) { + pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", + loop_id); + return NULL; + } + + nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); + return NULL; + } + + return nacl->fc_port; +} + +/* + * Expected to be called with struct qla_hw_data->tgt.sess_lock held + */ +static void tcm_qla2xxx_set_sess_by_loop_id( + struct tcm_qla2xxx_lport *lport, + struct se_node_acl *new_se_nacl, + struct tcm_qla2xxx_nacl *nacl, + struct se_session *se_sess, + struct fc_port *fc_port, + uint16_t loop_id) +{ + struct se_node_acl *saved_nacl; + struct tcm_qla2xxx_fc_loopid *fc_loopid; + + pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); + + fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) + lport->lport_loopid_map)[loop_id]; + + saved_nacl = fc_loopid->se_nacl; + if (!saved_nacl) { + pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); + fc_loopid->se_nacl = new_se_nacl; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; + return; + } + + if (nacl->fc_port) { + if (new_se_nacl == NULL) { + pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n"); + fc_loopid->se_nacl = NULL; + nacl->fc_port = NULL; + return; + } + + pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n"); + fc_loopid->se_nacl = new_se_nacl; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; + return; + } + + if (new_se_nacl == NULL) { + pr_debug("Clearing fc_loopid->se_nacl\n"); + fc_loopid->se_nacl = NULL; + return; + } + + pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n"); + fc_loopid->se_nacl = new_se_nacl; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; + + pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); +} + +/* + * Should always be called with qla_hw_data->tgt.sess_lock held. + */ +static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, + struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) +{ + struct se_session *se_sess = sess->se_sess; + + tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, + sess, port_id_to_be_id(sess->d_id)); + tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, + sess, sess->loop_id); +} + +static void tcm_qla2xxx_free_session(struct fc_port *sess) +{ + struct qla_tgt *tgt = sess->tgt; + struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + struct se_session *se_sess; + struct tcm_qla2xxx_lport *lport; + + se_sess = sess->se_sess; + if (!se_sess) { + pr_err("struct fc_port->se_sess is NULL\n"); + dump_stack(); + return; + } + + lport = vha->vha_tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return; + } + target_wait_for_sess_cmds(se_sess); + + target_remove_session(se_sess); +} + +static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, + struct se_session *se_sess, void *p) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + struct qla_hw_data *ha = lport->qla_vha->hw; + struct se_node_acl *se_nacl = se_sess->se_node_acl; + struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, + struct tcm_qla2xxx_nacl, se_node_acl); + struct fc_port *qlat_sess = p; + uint16_t loop_id = qlat_sess->loop_id; + unsigned long flags; + + /* + * And now setup se_nacl and session pointers into HW lport internal + * mappings for fabric S_ID and LOOP_ID. + */ + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess, + port_id_to_be_id(qlat_sess->d_id)); + tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, + se_sess, qlat_sess, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return 0; +} + +/* + * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() + * to locate struct se_node_acl + */ +static int tcm_qla2xxx_check_initiator_node_acl( + scsi_qla_host_t *vha, + unsigned char *fc_wwpn, + struct fc_port *qlat_sess) +{ + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_lport *lport; + struct tcm_qla2xxx_tpg *tpg; + struct se_session *se_sess; + unsigned char port_name[36]; + int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : + TCM_QLA2XXX_DEFAULT_TAGS; + + lport = vha->vha_tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return -EINVAL; + } + /* + * Locate the TPG=1 reference.. + */ + tpg = lport->tpg_1; + if (!tpg) { + pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n"); + return -EINVAL; + } + /* + * Format the FCP Initiator port_name into colon seperated values to + * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. + */ + memset(&port_name, 0, 36); + snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn); + /* + * Locate our struct se_node_acl either from an explict NodeACL created + * via ConfigFS, or via running in TPG demo mode. + */ + se_sess = target_setup_session(&tpg->se_tpg, num_tags, + sizeof(struct qla_tgt_cmd), + TARGET_PROT_ALL, port_name, + qlat_sess, tcm_qla2xxx_session_cb); + if (IS_ERR(se_sess)) + return PTR_ERR(se_sess); + + return 0; +} + +static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, + uint16_t loop_id, bool conf_compl_supported) +{ + struct qla_tgt *tgt = sess->tgt; + struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; + struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; + struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, + struct tcm_qla2xxx_nacl, se_node_acl); + u32 key; + + + if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) + pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", + sess, sess->port_name, + sess->loop_id, loop_id, sess->d_id.b.domain, + sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, + s_id.b.area, s_id.b.al_pa); + + if (sess->loop_id != loop_id) { + /* + * Because we can shuffle loop IDs around and we + * update different sessions non-atomically, we might + * have overwritten this session's old loop ID + * already, and we might end up overwriting some other + * session that will be updated later. So we have to + * be extra careful and we can't warn about those things... + */ + if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) + lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; + + lport->lport_loopid_map[loop_id].se_nacl = se_nacl; + + sess->loop_id = loop_id; + } + + if (sess->d_id.b24 != s_id.b24) { + key = (((u32) sess->d_id.b.domain << 16) | + ((u32) sess->d_id.b.area << 8) | + ((u32) sess->d_id.b.al_pa)); + + if (btree_lookup32(&lport->lport_fcport_map, key)) + WARN(btree_remove32(&lport->lport_fcport_map, key) != + se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n", + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); + else + WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); + + key = (((u32) s_id.b.domain << 16) | + ((u32) s_id.b.area << 8) | + ((u32) s_id.b.al_pa)); + + if (btree_lookup32(&lport->lport_fcport_map, key)) { + WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", + s_id.b.domain, s_id.b.area, s_id.b.al_pa); + btree_update32(&lport->lport_fcport_map, key, se_nacl); + } else { + btree_insert32(&lport->lport_fcport_map, key, se_nacl, + GFP_ATOMIC); + } + + sess->d_id = s_id; + nacl->nport_id = key; + } + + sess->conf_compl_supported = conf_compl_supported; + +} + +/* + * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. + */ +static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = { + .find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag, + .handle_cmd = tcm_qla2xxx_handle_cmd, + .handle_data = tcm_qla2xxx_handle_data, + .handle_tmr = tcm_qla2xxx_handle_tmr, + .get_cmd = tcm_qla2xxx_get_cmd, + .rel_cmd = tcm_qla2xxx_rel_cmd, + .free_cmd = tcm_qla2xxx_free_cmd, + .free_mcmd = tcm_qla2xxx_free_mcmd, + .free_session = tcm_qla2xxx_free_session, + .update_sess = tcm_qla2xxx_update_sess, + .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, + .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, + .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, + .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, + .put_sess = tcm_qla2xxx_put_sess, + .shutdown_sess = tcm_qla2xxx_shutdown_sess, + .get_dif_tags = tcm_qla2xxx_dif_tags, + .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, +}; + +static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) +{ + int rc; + size_t map_sz; + + rc = btree_init32(&lport->lport_fcport_map); + if (rc) { + pr_err("Unable to initialize lport->lport_fcport_map btree\n"); + return rc; + } + + map_sz = array_size(65536, sizeof(struct tcm_qla2xxx_fc_loopid)); + + lport->lport_loopid_map = vzalloc(map_sz); + if (!lport->lport_loopid_map) { + pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", map_sz); + btree_destroy32(&lport->lport_fcport_map); + return -ENOMEM; + } + pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", map_sz); + return 0; +} + +static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, + void *target_lport_ptr, + u64 npiv_wwpn, u64 npiv_wwnn) +{ + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_lport *lport = + (struct tcm_qla2xxx_lport *)target_lport_ptr; + /* + * Setup tgt_ops, local pointer to vha and target_lport_ptr + */ + ha->tgt.tgt_ops = &tcm_qla2xxx_template; + vha->vha_tgt.target_lport_ptr = target_lport_ptr; + lport->qla_vha = vha; + + return 0; +} + +static struct se_wwn *tcm_qla2xxx_make_lport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_qla2xxx_lport *lport; + u64 wwpn; + int ret = -ENODEV; + + if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); + + lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); + if (!lport) { + pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); + return ERR_PTR(-ENOMEM); + } + lport->lport_wwpn = wwpn; + tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, + wwpn); + sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); + + ret = tcm_qla2xxx_init_lport(lport); + if (ret != 0) + goto out; + + ret = qlt_lport_register(lport, wwpn, 0, 0, + tcm_qla2xxx_lport_register_cb); + if (ret != 0) + goto out_lport; + + return &lport->lport_wwn; +out_lport: + vfree(lport->lport_loopid_map); + btree_destroy32(&lport->lport_fcport_map); +out: + kfree(lport); + return ERR_PTR(ret); +} + +static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; + struct se_node_acl *node; + u32 key = 0; + + /* + * Call into qla2x_target.c LLD logic to complete the + * shutdown of struct qla_tgt after the call to + * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. + */ + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_stop_phase2(vha->vha_tgt.qla_tgt); + + qlt_lport_deregister(vha); + + vfree(lport->lport_loopid_map); + btree_for_each_safe32(&lport->lport_fcport_map, key, node) + btree_remove32(&lport->lport_fcport_map, key); + btree_destroy32(&lport->lport_fcport_map); + kfree(lport); +} + +static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, + void *target_lport_ptr, + u64 npiv_wwpn, u64 npiv_wwnn) +{ + struct fc_vport *vport; + struct Scsi_Host *sh = base_vha->host; + struct scsi_qla_host *npiv_vha; + struct tcm_qla2xxx_lport *lport = + (struct tcm_qla2xxx_lport *)target_lport_ptr; + struct tcm_qla2xxx_lport *base_lport = + (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; + struct fc_vport_identifiers vport_id; + + if (qla_ini_mode_enabled(base_vha)) { + pr_err("qla2xxx base_vha not enabled for target mode\n"); + return -EPERM; + } + + if (!base_lport || !base_lport->tpg_1 || + !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { + pr_err("qla2xxx base_lport or tpg_1 not available\n"); + return -EPERM; + } + + memset(&vport_id, 0, sizeof(vport_id)); + vport_id.port_name = npiv_wwpn; + vport_id.node_name = npiv_wwnn; + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; + vport_id.vport_type = FC_PORTTYPE_NPIV; + vport_id.disable = false; + + vport = fc_vport_create(sh, 0, &vport_id); + if (!vport) { + pr_err("fc_vport_create failed for qla2xxx_npiv\n"); + return -ENODEV; + } + /* + * Setup local pointer to NPIV vhba + target_lport_ptr + */ + npiv_vha = (struct scsi_qla_host *)vport->dd_data; + npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; + lport->qla_vha = npiv_vha; + scsi_host_get(npiv_vha->host); + return 0; +} + + +static struct se_wwn *tcm_qla2xxx_npiv_make_lport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_qla2xxx_lport *lport; + u64 phys_wwpn, npiv_wwpn, npiv_wwnn; + char *p, tmp[128]; + int ret; + + snprintf(tmp, 128, "%s", name); + + p = strchr(tmp, '@'); + if (!p) { + pr_err("Unable to locate NPIV '@' separator\n"); + return ERR_PTR(-EINVAL); + } + *p++ = '\0'; + + if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) + return ERR_PTR(-EINVAL); + + if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, + &npiv_wwpn, &npiv_wwnn) < 0) + return ERR_PTR(-EINVAL); + + lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); + if (!lport) { + pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); + return ERR_PTR(-ENOMEM); + } + lport->lport_npiv_wwpn = npiv_wwpn; + lport->lport_npiv_wwnn = npiv_wwnn; + sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); + + ret = tcm_qla2xxx_init_lport(lport); + if (ret != 0) + goto out; + + ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, + tcm_qla2xxx_lport_register_npiv_cb); + if (ret != 0) + goto out_lport; + + return &lport->lport_wwn; +out_lport: + vfree(lport->lport_loopid_map); + btree_destroy32(&lport->lport_fcport_map); +out: + kfree(lport); + return ERR_PTR(ret); +} + +static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *npiv_vha = lport->qla_vha; + struct qla_hw_data *ha = npiv_vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + scsi_host_put(npiv_vha->host); + /* + * Notify libfc that we want to release the vha->fc_vport + */ + fc_vport_terminate(npiv_vha->fc_vport); + scsi_host_put(base_vha->host); + kfree(lport); +} + + +static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, + char *page) +{ + return sprintf(page, + "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n", + QLA2XXX_VERSION, utsname()->sysname, + utsname()->machine, utsname()->release); +} + +CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version); + +static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { + &tcm_qla2xxx_wwn_attr_version, + NULL, +}; + +static const struct target_core_fabric_ops tcm_qla2xxx_ops = { + .module = THIS_MODULE, + .fabric_name = "qla2xxx", + .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), + /* + * XXX: Limit assumes single page per scatter-gather-list entry. + * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096 + */ + .max_data_sg_nents = 1200, + .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, + .tpg_get_tag = tcm_qla2xxx_get_tag, + .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, + .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = + tcm_qla2xxx_check_demo_write_protect, + .tpg_check_prod_mode_write_protect = + tcm_qla2xxx_check_prod_write_protect, + .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, + .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, + .check_stop_free = tcm_qla2xxx_check_stop_free, + .release_cmd = tcm_qla2xxx_release_cmd, + .close_session = tcm_qla2xxx_close_session, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_qla2xxx_write_pending, + .get_cmd_state = tcm_qla2xxx_get_cmd_state, + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_qla2xxx_make_lport, + .fabric_drop_wwn = tcm_qla2xxx_drop_lport, + .fabric_make_tpg = tcm_qla2xxx_make_tpg, + .fabric_enable_tpg = tcm_qla2xxx_enable_tpg, + .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, + .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, + + .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, + .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, + .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, +}; + +static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { + .module = THIS_MODULE, + .fabric_name = "qla2xxx_npiv", + .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), + .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, + .tpg_get_tag = tcm_qla2xxx_get_tag, + .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, + .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, + .tpg_check_prod_mode_write_protect = + tcm_qla2xxx_check_prod_write_protect, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, + .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, + .check_stop_free = tcm_qla2xxx_check_stop_free, + .release_cmd = tcm_qla2xxx_release_cmd, + .close_session = tcm_qla2xxx_close_session, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_qla2xxx_write_pending, + .get_cmd_state = tcm_qla2xxx_get_cmd_state, + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, + .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, + .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, + .fabric_enable_tpg = tcm_qla2xxx_npiv_enable_tpg, + .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, + .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, + + .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, +}; + +static int tcm_qla2xxx_register_configfs(void) +{ + int ret; + + pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n", + QLA2XXX_VERSION, utsname()->sysname, + utsname()->machine, utsname()->release); + + ret = target_register_template(&tcm_qla2xxx_ops); + if (ret) + return ret; + + ret = target_register_template(&tcm_qla2xxx_npiv_ops); + if (ret) + goto out_fabric; + + tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", + WQ_MEM_RECLAIM, 0); + if (!tcm_qla2xxx_free_wq) { + ret = -ENOMEM; + goto out_fabric_npiv; + } + + return 0; + +out_fabric_npiv: + target_unregister_template(&tcm_qla2xxx_npiv_ops); +out_fabric: + target_unregister_template(&tcm_qla2xxx_ops); + return ret; +} + +static void tcm_qla2xxx_deregister_configfs(void) +{ + destroy_workqueue(tcm_qla2xxx_free_wq); + + target_unregister_template(&tcm_qla2xxx_ops); + target_unregister_template(&tcm_qla2xxx_npiv_ops); +} + +static int __init tcm_qla2xxx_init(void) +{ + int ret; + + BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64); + BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64); + BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32); + BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64); + BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12); + BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4); + BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64); + BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); + BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); + BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64); + BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); + BUILD_BUG_ON(sizeof(struct fcp_hdr) != 24); + BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24); + BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64); + + ret = tcm_qla2xxx_register_configfs(); + if (ret < 0) + return ret; + + return 0; +} + +static void __exit tcm_qla2xxx_exit(void) +{ + tcm_qla2xxx_deregister_configfs(); +} + +MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver"); +MODULE_LICENSE("GPL"); +module_init(tcm_qla2xxx_init); +module_exit(tcm_qla2xxx_exit); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h new file mode 100644 index 000000000..147cf6c90 --- /dev/null +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include + +/* length of ASCII WWPNs including pad */ +#define TCM_QLA2XXX_NAMELEN 32 +/* + * Number of pre-allocated per-session tags, based upon the worst-case + * per port number of iocbs + */ +#define TCM_QLA2XXX_DEFAULT_TAGS 2088 + +#include "qla_target.h" + +struct tcm_qla2xxx_nacl { + struct se_node_acl se_node_acl; + + /* From libfc struct fc_rport->port_id */ + u32 nport_id; + /* Binary World Wide unique Node Name for remote FC Initiator Nport */ + u64 nport_wwnn; + /* ASCII formatted WWPN for FC Initiator Nport */ + char nport_name[TCM_QLA2XXX_NAMELEN]; + /* Pointer to fc_port */ + struct fc_port *fc_port; + /* Pointer to TCM FC nexus */ + struct se_session *nport_nexus; +}; + +struct tcm_qla2xxx_tpg_attrib { + int generate_node_acls; + int cache_dynamic_acls; + int demo_mode_write_protect; + int prod_mode_write_protect; + int demo_mode_login_only; + int fabric_prot_type; + int jam_host; +}; + +struct tcm_qla2xxx_tpg { + /* FC lport target portal group tag for TCM */ + u16 lport_tpgt; + /* Atomic bit to determine TPG active status */ + atomic_t lport_tpg_enabled; + /* Pointer back to tcm_qla2xxx_lport */ + struct tcm_qla2xxx_lport *lport; + /* Used by tcm_qla2xxx_tpg_attrib_cit */ + struct tcm_qla2xxx_tpg_attrib tpg_attrib; + /* Returned by tcm_qla2xxx_make_tpg() */ + struct se_portal_group se_tpg; +}; + +struct tcm_qla2xxx_fc_loopid { + struct se_node_acl *se_nacl; +}; + +struct tcm_qla2xxx_lport { + /* Binary World Wide unique Port Name for FC Target Lport */ + u64 lport_wwpn; + /* Binary World Wide unique Port Name for FC NPIV Target Lport */ + u64 lport_npiv_wwpn; + /* Binary World Wide unique Node Name for FC NPIV Target Lport */ + u64 lport_npiv_wwnn; + /* ASCII formatted WWPN for FC Target Lport */ + char lport_name[TCM_QLA2XXX_NAMELEN]; + /* ASCII formatted naa WWPN for VPD page 83 etc */ + char lport_naa_name[TCM_QLA2XXX_NAMELEN]; + /* map for fc_port pointers in 24-bit FC Port ID space */ + struct btree_head32 lport_fcport_map; + /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */ + struct tcm_qla2xxx_fc_loopid *lport_loopid_map; + /* Pointer to struct scsi_qla_host from qla2xxx LLD */ + struct scsi_qla_host *qla_vha; + /* Pointer to struct qla_tgt pointer */ + struct qla_tgt lport_qla_tgt; + /* Pointer to TPG=1 for non NPIV mode */ + struct tcm_qla2xxx_tpg *tpg_1; + /* Returned by tcm_qla2xxx_make_lport() */ + struct se_wwn lport_wwn; +}; diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig new file mode 100644 index 000000000..2fa249db6 --- /dev/null +++ b/drivers/scsi/qla4xxx/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_QLA_ISCSI + tristate "QLogic ISP4XXX and ISP82XX host adapter family support" + depends on PCI && SCSI && NET + select SCSI_ISCSI_ATTRS + select ISCSI_BOOT_SYSFS + help + This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX) + and 8032 (ISP83XX) iSCSI host adapter family. diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile new file mode 100644 index 000000000..1f8a9096c --- /dev/null +++ b/drivers/scsi/qla4xxx/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ + ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o + +obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o + diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c new file mode 100644 index 000000000..db41d90a5 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_83xx.c @@ -0,0 +1,1584 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#include + +#include "ql4_def.h" +#include "ql4_version.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" + +uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr) +{ + return readl((void __iomem *)(ha->nx_pcibase + addr)); +} + +void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val) +{ + writel(val, (void __iomem *)(ha->nx_pcibase + addr)); +} + +static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr) +{ + uint32_t val; + int ret_val = QLA_SUCCESS; + + qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr); + val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num)); + if (val != addr) { + ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", + __func__, addr, val); + ret_val = QLA_ERROR; + } + + return ret_val; +} + +int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, + uint32_t *data) +{ + int ret_val; + + ret_val = qla4_83xx_set_win_base(ha, addr); + + if (ret_val == QLA_SUCCESS) { + *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD); + } else { + *data = 0xffffffff; + ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n", + __func__, addr); + } + + return ret_val; +} + +int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, + uint32_t data) +{ + int ret_val; + + ret_val = qla4_83xx_set_win_base(ha, addr); + + if (ret_val == QLA_SUCCESS) + qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data); + else + ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n", + __func__, addr, data); + + return ret_val; +} + +static int qla4_83xx_flash_lock(struct scsi_qla_host *ha) +{ + int lock_owner; + int timeout = 0; + uint32_t lock_status = 0; + int ret_val = QLA_SUCCESS; + + while (lock_status == 0) { + lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK); + if (lock_status) + break; + + if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) { + lock_owner = qla4_83xx_rd_reg(ha, + QLA83XX_FLASH_LOCK_ID); + ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n", + __func__, ha->func_num, lock_owner); + ret_val = QLA_ERROR; + break; + } + msleep(20); + } + + qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num); + return ret_val; +} + +static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha) +{ + /* Reading FLASH_UNLOCK register unlocks the Flash */ + qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF); + qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK); +} + +int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, + uint8_t *p_data, int u32_word_count) +{ + int i; + uint32_t u32_word; + uint32_t addr = flash_addr; + int ret_val = QLA_SUCCESS; + + ret_val = qla4_83xx_flash_lock(ha); + if (ret_val == QLA_ERROR) + goto exit_lock_error; + + if (addr & 0x03) { + ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", + __func__, addr); + ret_val = QLA_ERROR; + goto exit_flash_read; + } + + for (i = 0; i < u32_word_count; i++) { + ret_val = qla4_83xx_wr_reg_indirect(ha, + QLA83XX_FLASH_DIRECT_WINDOW, + (addr & 0xFFFF0000)); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!", + __func__, addr); + goto exit_flash_read; + } + + ret_val = qla4_83xx_rd_reg_indirect(ha, + QLA83XX_FLASH_DIRECT_DATA(addr), + &u32_word); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_flash_read; + } + + *(__le32 *)p_data = le32_to_cpu(u32_word); + p_data = p_data + 4; + addr = addr + 4; + } + +exit_flash_read: + qla4_83xx_flash_unlock(ha); + +exit_lock_error: + return ret_val; +} + +int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha, + uint32_t flash_addr, uint8_t *p_data, + int u32_word_count) +{ + uint32_t i; + uint32_t u32_word; + uint32_t flash_offset; + uint32_t addr = flash_addr; + int ret_val = QLA_SUCCESS; + + flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1); + + if (addr & 0x3) { + ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", + __func__, addr); + ret_val = QLA_ERROR; + goto exit_lockless_read; + } + + ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, + addr); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + + /* Check if data is spread across multiple sectors */ + if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > + (QLA83XX_FLASH_SECTOR_SIZE - 1)) { + + /* Multi sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla4_83xx_rd_reg_indirect(ha, + QLA83XX_FLASH_DIRECT_DATA(addr), + &u32_word); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + + *(__le32 *)p_data = le32_to_cpu(u32_word); + p_data = p_data + 4; + addr = addr + 4; + flash_offset = flash_offset + 4; + + if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { + /* This write is needed once for each sector */ + ret_val = qla4_83xx_wr_reg_indirect(ha, + QLA83XX_FLASH_DIRECT_WINDOW, + addr); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + flash_offset = 0; + } + } + } else { + /* Single sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla4_83xx_rd_reg_indirect(ha, + QLA83XX_FLASH_DIRECT_DATA(addr), + &u32_word); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + + *(__le32 *)p_data = le32_to_cpu(u32_word); + p_data = p_data + 4; + addr = addr + 4; + } + } + +exit_lockless_read: + return ret_val; +} + +void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) +{ + if (qla4_83xx_flash_lock(ha)) + ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__); + + /* + * We got the lock, or someone else is holding the lock + * since we are restting, forcefully unlock + */ + qla4_83xx_flash_unlock(ha); +} + +#define INTENT_TO_RECOVER 0x01 +#define PROCEED_TO_RECOVER 0x02 + +static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha) +{ + + uint32_t lock = 0, lockid; + int ret_val = QLA_ERROR; + + lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); + + /* Check for other Recovery in progress, go wait */ + if ((lockid & 0x3) != 0) + goto exit_lock_recovery; + + /* Intent to Recover */ + ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, + (ha->func_num << 2) | INTENT_TO_RECOVER); + + msleep(200); + + /* Check Intent to Recover is advertised */ + lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); + if ((lockid & 0x3C) != (ha->func_num << 2)) + goto exit_lock_recovery; + + ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n", + __func__, ha->func_num); + + /* Proceed to Recover */ + ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, + (ha->func_num << 2) | PROCEED_TO_RECOVER); + + /* Force Unlock */ + ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF); + ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK); + + /* Clear bits 0-5 in IDC_RECOVERY register*/ + ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0); + + /* Get lock */ + lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK); + if (lock) { + lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); + lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num; + ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid); + ret_val = QLA_SUCCESS; + } + +exit_lock_recovery: + return ret_val; +} + +#define QLA83XX_DRV_LOCK_MSLEEP 200 + +int qla4_83xx_drv_lock(struct scsi_qla_host *ha) +{ + int timeout = 0; + uint32_t status = 0; + int ret_val = QLA_SUCCESS; + uint32_t first_owner = 0; + uint32_t tmo_owner = 0; + uint32_t lock_id; + uint32_t func_num; + uint32_t lock_cnt; + + while (status == 0) { + status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK); + if (status) { + /* Increment Counter (8-31) and update func_num (0-7) on + * getting a successful lock */ + lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); + lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num; + qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id); + break; + } + + if (timeout == 0) + /* Save counter + ID of function holding the lock for + * first failure */ + first_owner = ha->isp_ops->rd_reg_direct(ha, + QLA83XX_DRV_LOCK_ID); + + if (++timeout >= + (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) { + tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); + func_num = tmo_owner & 0xFF; + lock_cnt = tmo_owner >> 8; + ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", + __func__, ha->func_num, func_num, lock_cnt, + (first_owner & 0xFF)); + + if (first_owner != tmo_owner) { + /* Some other driver got lock, OR same driver + * got lock again (counter value changed), when + * we were waiting for lock. + * Retry for another 2 sec */ + ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n", + __func__, ha->func_num); + timeout = 0; + } else { + /* Same driver holding lock > 2sec. + * Force Recovery */ + ret_val = qla4_83xx_lock_recovery(ha); + if (ret_val == QLA_SUCCESS) { + /* Recovered and got lock */ + ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n", + __func__, ha->func_num); + break; + } + /* Recovery Failed, some other function + * has the lock, wait for 2secs and retry */ + ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n", + __func__, ha->func_num); + timeout = 0; + } + } + msleep(QLA83XX_DRV_LOCK_MSLEEP); + } + + return ret_val; +} + +void qla4_83xx_drv_unlock(struct scsi_qla_host *ha) +{ + int id; + + id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); + + if ((id & 0xFF) != ha->func_num) { + ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n", + __func__, ha->func_num, (id & 0xFF)); + return; + } + + /* Keep lock counter value, update the ha->func_num to 0xFF */ + qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF)); + qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK); +} + +void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha) +{ + uint32_t idc_ctrl; + + idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); + idc_ctrl |= DONTRESET_BIT0; + qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, + idc_ctrl)); +} + +void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha) +{ + uint32_t idc_ctrl; + + idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); + idc_ctrl &= ~DONTRESET_BIT0; + qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, + idc_ctrl)); +} + +int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha) +{ + uint32_t idc_ctrl; + + idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); + return idc_ctrl & DONTRESET_BIT0; +} + +/*-------------------------IDC State Machine ---------------------*/ + +enum { + UNKNOWN_CLASS = 0, + NIC_CLASS, + FCOE_CLASS, + ISCSI_CLASS +}; + +struct device_info { + int func_num; + int device_type; + int port_num; +}; + +int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha) +{ + uint32_t drv_active; + uint32_t dev_part, dev_part1, dev_part2; + int i; + struct device_info device_map[16]; + int func_nibble; + int nibble; + int nic_present = 0; + int iscsi_present = 0; + int iscsi_func_low = 0; + + /* Use the dev_partition register to determine the PCI function number + * and then check drv_active register to see which driver is loaded */ + dev_part1 = qla4_83xx_rd_reg(ha, + ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]); + dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2); + drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]); + + /* Each function has 4 bits in dev_partition Info register, + * Lower 2 bits - device type, Upper 2 bits - physical port number */ + dev_part = dev_part1; + for (i = nibble = 0; i <= 15; i++, nibble++) { + func_nibble = dev_part & (0xF << (nibble * 4)); + func_nibble >>= (nibble * 4); + device_map[i].func_num = i; + device_map[i].device_type = func_nibble & 0x3; + device_map[i].port_num = func_nibble & 0xC; + + if (device_map[i].device_type == NIC_CLASS) { + if (drv_active & (1 << device_map[i].func_num)) { + nic_present++; + break; + } + } else if (device_map[i].device_type == ISCSI_CLASS) { + if (drv_active & (1 << device_map[i].func_num)) { + if (!iscsi_present || + iscsi_func_low > device_map[i].func_num) + iscsi_func_low = device_map[i].func_num; + + iscsi_present++; + } + } + + /* For function_num[8..15] get info from dev_part2 register */ + if (nibble == 7) { + nibble = 0; + dev_part = dev_part2; + } + } + + /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets + * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers + * present. */ + if (!nic_present && (ha->func_num == iscsi_func_low)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: can reset - NIC not present and lower iSCSI function is %d\n", + __func__, ha->func_num)); + return 1; + } + + return 0; +} + +/** + * qla4_83xx_need_reset_handler - Code to start reset sequence + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha) +{ + uint32_t dev_state, drv_state, drv_active; + unsigned long reset_timeout, dev_init_timeout; + + ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n", + __func__); + + if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n", + __func__)); + qla4_8xxx_set_rst_ready(ha); + + /* Non-reset owners ACK Reset and wait for device INIT state + * as part of Reset Recovery by Reset Owner */ + dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); + + do { + if (time_after_eq(jiffies, dev_init_timeout)) { + ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n", + __func__); + break; + } + + ha->isp_ops->idc_unlock(ha); + msleep(1000); + ha->isp_ops->idc_lock(ha); + + dev_state = qla4_8xxx_rd_direct(ha, + QLA8XXX_CRB_DEV_STATE); + } while (dev_state == QLA8XXX_DEV_NEED_RESET); + } else { + qla4_8xxx_set_rst_ready(ha); + reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); + drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + + ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n", + __func__, drv_state, drv_active); + + while (drv_state != drv_active) { + if (time_after_eq(jiffies, reset_timeout)) { + ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", + __func__, DRIVER_NAME, drv_state, + drv_active); + break; + } + + ha->isp_ops->idc_unlock(ha); + msleep(1000); + ha->isp_ops->idc_lock(ha); + + drv_state = qla4_8xxx_rd_direct(ha, + QLA8XXX_CRB_DRV_STATE); + drv_active = qla4_8xxx_rd_direct(ha, + QLA8XXX_CRB_DRV_ACTIVE); + } + + if (drv_state != drv_active) { + ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n", + __func__, (drv_active ^ drv_state)); + drv_active = drv_active & drv_state; + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, + drv_active); + } + + clear_bit(AF_8XXX_RST_OWNER, &ha->flags); + /* Start Reset Recovery */ + qla4_8xxx_device_bootstrap(ha); + } +} + +void qla4_83xx_get_idc_param(struct scsi_qla_host *ha) +{ + uint32_t idc_params, ret_val; + + ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR, + (uint8_t *)&idc_params, 1); + if (ret_val == QLA_SUCCESS) { + ha->nx_dev_init_timeout = idc_params & 0xFFFF; + ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF; + } else { + ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT; + ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT; + } + + DEBUG2(ql4_printk(KERN_DEBUG, ha, + "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n", + __func__, ha->nx_dev_init_timeout, + ha->nx_reset_timeout)); +} + +/*-------------------------Reset Sequence Functions-----------------------*/ + +static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha) +{ + uint8_t *phdr; + + if (!ha->reset_tmplt.buff) { + ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n", + __func__); + return; + } + + phdr = ha->reset_tmplt.buff; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n", + *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), + *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), + *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), + *(phdr+13), *(phdr+14), *(phdr+15))); +} + +static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha) +{ + uint8_t *p_cache; + uint32_t src, count, size; + uint64_t dest; + int ret_val = QLA_SUCCESS; + + src = QLA83XX_BOOTLOADER_FLASH_ADDR; + dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR); + size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE); + + /* 128 bit alignment check */ + if (size & 0xF) + size = (size + 16) & ~0xF; + + /* 16 byte count */ + count = size/16; + + p_cache = vmalloc(size); + if (p_cache == NULL) { + ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n", + __func__); + ret_val = QLA_ERROR; + goto exit_copy_bootloader; + } + + ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache, + size / sizeof(uint32_t)); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n", + __func__); + goto exit_copy_error; + } + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n", + __func__)); + + /* 128 bit/16 byte write to MS memory */ + ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, + count); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", + __func__); + goto exit_copy_error; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n", + __func__, size)); + +exit_copy_error: + vfree(p_cache); + +exit_copy_bootloader: + return ret_val; +} + +static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha) +{ + uint32_t val, ret_val = QLA_ERROR; + int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; + + do { + val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE); + if (val == PHAN_INITIALIZE_COMPLETE) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Command Peg initialization complete. State=0x%x\n", + __func__, val)); + ret_val = QLA_SUCCESS; + break; + } + msleep(CRB_CMDPEG_CHECK_DELAY); + } while (--retries); + + return ret_val; +} + +/** + * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till + * value read ANDed with test_mask is equal to test_result. + * + * @ha : Pointer to adapter structure + * @addr : CRB register address + * @duration : Poll for total of "duration" msecs + * @test_mask : Mask value read with "test_mask" + * @test_result : Compare (value&test_mask) with test_result. + **/ +static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr, + int duration, uint32_t test_mask, + uint32_t test_result) +{ + uint32_t value; + uint8_t retries; + int ret_val = QLA_SUCCESS; + + ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); + if (ret_val == QLA_ERROR) + goto exit_poll_reg; + + retries = duration / 10; + do { + if ((value & test_mask) != test_result) { + msleep(duration / 10); + ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); + if (ret_val == QLA_ERROR) + goto exit_poll_reg; + + ret_val = QLA_ERROR; + } else { + ret_val = QLA_SUCCESS; + break; + } + } while (retries--); + +exit_poll_reg: + if (ret_val == QLA_ERROR) { + ha->reset_tmplt.seq_error++; + ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", + __func__, value, test_mask, test_result); + } + + return ret_val; +} + +static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha) +{ + uint32_t sum = 0; + uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff; + int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t); + int ret_val; + + while (u16_count-- > 0) + sum += *buff++; + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + /* checksum of 0 indicates a valid template */ + if (~sum) { + ret_val = QLA_SUCCESS; + } else { + ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n", + __func__); + ret_val = QLA_ERROR; + } + + return ret_val; +} + +/** + * qla4_83xx_read_reset_template - Read Reset Template from Flash + * @ha: Pointer to adapter structure + **/ +void qla4_83xx_read_reset_template(struct scsi_qla_host *ha) +{ + uint8_t *p_buff; + uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; + uint32_t ret_val; + + ha->reset_tmplt.seq_error = 0; + ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE); + if (ha->reset_tmplt.buff == NULL) { + ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n", + __func__); + goto exit_read_reset_template; + } + + p_buff = ha->reset_tmplt.buff; + addr = QLA83XX_RESET_TEMPLATE_ADDR; + + tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) / + sizeof(uint32_t); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Read template hdr size %d from Flash\n", + __func__, tmplt_hdr_def_size)); + + /* Copy template header from flash */ + ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, + tmplt_hdr_def_size); + if (ret_val != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n", + __func__); + goto exit_read_template_error; + } + + ha->reset_tmplt.hdr = + (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff; + + /* Validate the template header size and signature */ + tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); + if ((tmplt_hdr_size != tmplt_hdr_def_size) || + (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { + ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n", + __func__, tmplt_hdr_size, tmplt_hdr_def_size); + goto exit_read_template_error; + } + + addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size; + p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size; + tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size - + ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Read rest of the template size %d\n", + __func__, ha->reset_tmplt.hdr->size)); + + /* Copy rest of the template */ + ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, + tmplt_hdr_def_size); + if (ret_val != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n", + __func__); + goto exit_read_template_error; + } + + /* Integrity check */ + if (qla4_83xx_reset_seq_checksum_test(ha)) { + ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n", + __func__); + goto exit_read_template_error; + } + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n", + __func__)); + + /* Get STOP, START, INIT sequence offsets */ + ha->reset_tmplt.init_offset = ha->reset_tmplt.buff + + ha->reset_tmplt.hdr->init_seq_offset; + ha->reset_tmplt.start_offset = ha->reset_tmplt.buff + + ha->reset_tmplt.hdr->start_seq_offset; + ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff + + ha->reset_tmplt.hdr->hdr_size; + qla4_83xx_dump_reset_seq_hdr(ha); + + goto exit_read_reset_template; + +exit_read_template_error: + vfree(ha->reset_tmplt.buff); + +exit_read_reset_template: + return; +} + +/** + * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr. + * + * @ha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + **/ +static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha, + uint32_t raddr, uint32_t waddr) +{ + uint32_t value; + + qla4_83xx_rd_reg_indirect(ha, raddr, &value); + qla4_83xx_wr_reg_indirect(ha, waddr, value); +} + +/** + * qla4_83xx_rmw_crb_reg - Read Modify Write crb register + * + * This function read value from raddr, AND with test_mask, + * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. + * + * @ha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + * @p_rmw_hdr : header with shift/or/xor values. + **/ +static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr, + uint32_t waddr, + struct qla4_83xx_rmw *p_rmw_hdr) +{ + uint32_t value; + + if (p_rmw_hdr->index_a) + value = ha->reset_tmplt.array[p_rmw_hdr->index_a]; + else + qla4_83xx_rd_reg_indirect(ha, raddr, &value); + + value &= p_rmw_hdr->test_mask; + value <<= p_rmw_hdr->shl; + value >>= p_rmw_hdr->shr; + value |= p_rmw_hdr->or_value; + value ^= p_rmw_hdr->xor_value; + + qla4_83xx_wr_reg_indirect(ha, waddr, value); + + return; +} + +static void qla4_83xx_write_list(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + struct qla4_83xx_entry *p_entry; + uint32_t i; + + p_entry = (struct qla4_83xx_entry *) + ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +static void qla4_83xx_read_write_list(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + struct qla4_83xx_entry *p_entry; + uint32_t i; + + p_entry = (struct qla4_83xx_entry *) + ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +static void qla4_83xx_poll_list(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla4_83xx_entry *p_entry; + struct qla4_83xx_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla4_83xx_poll *) + ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); + + /* Entries start after 8 byte qla4_83xx_poll, poll header contains + * the test_mask, test_value. */ + p_entry = (struct qla4_83xx_entry *)((char *)p_poll + + sizeof(struct qla4_83xx_poll)); + + delay = (long)p_hdr->delay; + if (!delay) { + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla4_83xx_poll_reg(ha, p_entry->arg1, delay, + p_poll->test_mask, + p_poll->test_value); + } + } else { + for (i = 0; i < p_hdr->count; i++, p_entry++) { + if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay, + p_poll->test_mask, + p_poll->test_value)) { + qla4_83xx_rd_reg_indirect(ha, p_entry->arg1, + &value); + qla4_83xx_rd_reg_indirect(ha, p_entry->arg2, + &value); + } + } + } +} + +static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla4_83xx_quad_entry *p_entry; + struct qla4_83xx_poll *p_poll; + uint32_t i; + + p_poll = (struct qla4_83xx_poll *) + ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); + p_entry = (struct qla4_83xx_quad_entry *) + ((char *)p_poll + sizeof(struct qla4_83xx_poll)); + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr, + p_entry->dr_value); + qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, + p_entry->ar_value); + if (delay) { + if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, + p_poll->test_mask, + p_poll->test_value)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Timeout Error: poll list, item_num %d, entry_num %d\n", + __func__, i, + ha->reset_tmplt.seq_index)); + } + } + } +} + +static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + struct qla4_83xx_entry *p_entry; + struct qla4_83xx_rmw *p_rmw_hdr; + uint32_t i; + + p_rmw_hdr = (struct qla4_83xx_rmw *) + ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); + p_entry = (struct qla4_83xx_entry *) + ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2, + p_rmw_hdr); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +static void qla4_83xx_pause(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + if (p_hdr->delay) + mdelay((uint32_t)((long)p_hdr->delay)); +} + +static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + long delay; + int index; + struct qla4_83xx_quad_entry *p_entry; + struct qla4_83xx_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla4_83xx_poll *) + ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); + p_entry = (struct qla4_83xx_quad_entry *) + ((char *)p_poll + sizeof(struct qla4_83xx_poll)); + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, + p_entry->ar_value); + if (delay) { + if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, + p_poll->test_mask, + p_poll->test_value)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n", + __func__, i, + ha->reset_tmplt.seq_index)); + } else { + index = ha->reset_tmplt.array_index; + qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr, + &value); + ha->reset_tmplt.array[index++] = value; + + if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES) + ha->reset_tmplt.array_index = 1; + } + } + } +} + +static void qla4_83xx_seq_end(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + ha->reset_tmplt.seq_end = 1; +} + +static void qla4_83xx_template_end(struct scsi_qla_host *ha, + struct qla4_83xx_reset_entry_hdr *p_hdr) +{ + ha->reset_tmplt.template_end = 1; + + if (ha->reset_tmplt.seq_error == 0) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Reset sequence completed SUCCESSFULLY.\n", + __func__)); + } else { + ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n", + __func__); + } +} + +/** + * qla4_83xx_process_reset_template - Process reset template. + * + * Process all entries in reset template till entry with SEQ_END opcode, + * which indicates end of the reset template processing. Each entry has a + * Reset Entry header, entry opcode/command, with size of the entry, number + * of entries in sub-sequence and delay in microsecs or timeout in millisecs. + * + * @ha : Pointer to adapter structure + * @p_buff : Common reset entry header. + **/ +static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha, + char *p_buff) +{ + int index, entries; + struct qla4_83xx_reset_entry_hdr *p_hdr; + char *p_entry = p_buff; + + ha->reset_tmplt.seq_end = 0; + ha->reset_tmplt.template_end = 0; + entries = ha->reset_tmplt.hdr->entries; + index = ha->reset_tmplt.seq_index; + + for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) { + + p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry; + switch (p_hdr->cmd) { + case OPCODE_NOP: + break; + case OPCODE_WRITE_LIST: + qla4_83xx_write_list(ha, p_hdr); + break; + case OPCODE_READ_WRITE_LIST: + qla4_83xx_read_write_list(ha, p_hdr); + break; + case OPCODE_POLL_LIST: + qla4_83xx_poll_list(ha, p_hdr); + break; + case OPCODE_POLL_WRITE_LIST: + qla4_83xx_poll_write_list(ha, p_hdr); + break; + case OPCODE_READ_MODIFY_WRITE: + qla4_83xx_read_modify_write(ha, p_hdr); + break; + case OPCODE_SEQ_PAUSE: + qla4_83xx_pause(ha, p_hdr); + break; + case OPCODE_SEQ_END: + qla4_83xx_seq_end(ha, p_hdr); + break; + case OPCODE_TMPL_END: + qla4_83xx_template_end(ha, p_hdr); + break; + case OPCODE_POLL_READ_LIST: + qla4_83xx_poll_read_list(ha, p_hdr); + break; + default: + ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n", + __func__, p_hdr->cmd, index); + break; + } + + /* Set pointer to next entry in the sequence. */ + p_entry += p_hdr->size; + } + + ha->reset_tmplt.seq_index = index; +} + +static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha) +{ + ha->reset_tmplt.seq_index = 0; + qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset); + + if (ha->reset_tmplt.seq_end != 1) + ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n", + __func__); +} + +static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha) +{ + qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset); + + if (ha->reset_tmplt.template_end != 1) + ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n", + __func__); +} + +static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha) +{ + qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset); + + if (ha->reset_tmplt.seq_end != 1) + ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n", + __func__); +} + +static int qla4_83xx_restart(struct scsi_qla_host *ha) +{ + int ret_val = QLA_SUCCESS; + uint32_t idc_ctrl; + + qla4_83xx_process_stop_seq(ha); + + /* + * Collect minidump. + * If IDC_CTRL BIT1 is set, clear it on going to INIT state and + * don't collect minidump + */ + idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); + if (idc_ctrl & GRACEFUL_RESET_BIT1) { + qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, + (idc_ctrl & ~GRACEFUL_RESET_BIT1)); + ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n", + __func__); + } else { + qla4_8xxx_get_minidump(ha); + } + + qla4_83xx_process_init_seq(ha); + + if (qla4_83xx_copy_bootloader(ha)) { + ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n", + __func__); + ret_val = QLA_ERROR; + goto exit_restart; + } + + qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH); + qla4_83xx_process_start_seq(ha); + +exit_restart: + return ret_val; +} + +int qla4_83xx_start_firmware(struct scsi_qla_host *ha) +{ + int ret_val = QLA_SUCCESS; + + ret_val = qla4_83xx_restart(ha); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__); + goto exit_start_fw; + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n", + __func__)); + } + + ret_val = qla4_83xx_check_cmd_peg_status(ha); + if (ret_val == QLA_ERROR) + ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n", + __func__); + +exit_start_fw: + return ret_val; +} + +/*----------------------Interrupt Related functions ---------------------*/ + +static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha) +{ + if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) + qla4_8xxx_intr_disable(ha); +} + +static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha) +{ + uint32_t mb_int, ret; + + if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { + ret = readl(&ha->qla4_83xx_reg->mbox_int); + mb_int = ret & ~INT_ENABLE_FW_MB; + writel(mb_int, &ha->qla4_83xx_reg->mbox_int); + writel(1, &ha->qla4_83xx_reg->leg_int_mask); + } +} + +void qla4_83xx_disable_intrs(struct scsi_qla_host *ha) +{ + qla4_83xx_disable_mbox_intrs(ha); + qla4_83xx_disable_iocb_intrs(ha); +} + +static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha) +{ + if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) { + qla4_8xxx_intr_enable(ha); + set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags); + } +} + +void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha) +{ + uint32_t mb_int; + + if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { + mb_int = INT_ENABLE_FW_MB; + writel(mb_int, &ha->qla4_83xx_reg->mbox_int); + writel(0, &ha->qla4_83xx_reg->leg_int_mask); + set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags); + } +} + + +void qla4_83xx_enable_intrs(struct scsi_qla_host *ha) +{ + qla4_83xx_enable_mbox_intrs(ha); + qla4_83xx_enable_iocb_intrs(ha); +} + + +void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, + int incount) +{ + int i; + + /* Load all mailbox registers, except mailbox 0. */ + for (i = 1; i < incount; i++) + writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]); + + writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]); + + /* Set Host Interrupt register to 1, to tell the firmware that + * a mailbox command is pending. Firmware after reading the + * mailbox command, clears the host interrupt register */ + writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr); +} + +void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount) +{ + int intr_status; + + intr_status = readl(&ha->qla4_83xx_reg->risc_intr); + if (intr_status) { + ha->mbox_status_count = outcount; + ha->isp_ops->interrupt_service_routine(ha, intr_status); + } +} + +/** + * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands. + * @ha: pointer to host adapter structure. + **/ +int qla4_83xx_isp_reset(struct scsi_qla_host *ha) +{ + int rval; + uint32_t dev_state; + + ha->isp_ops->idc_lock(ha); + dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); + + if (ql4xdontresethba) + qla4_83xx_set_idc_dontreset(ha); + + if (dev_state == QLA8XXX_DEV_READY) { + /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset + * recovery */ + if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) { + ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n", + __func__); + rval = QLA_ERROR; + goto exit_isp_reset; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n", + __func__)); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + + } else { + /* If device_state is NEED_RESET, go ahead with + * Reset,irrespective of ql4xdontresethba. This is to allow a + * non-reset-owner to force a reset. Non-reset-owner sets + * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset + * and then forces a Reset by setting device_state to + * NEED_RESET. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: HW state already set to NEED_RESET\n", + __func__)); + } + + /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on + * priority and which drivers are present. Unlike ISP8022, the function + * setting NEED_RESET, may not be the Reset owner. */ + if (qla4_83xx_can_perform_reset(ha)) + set_bit(AF_8XXX_RST_OWNER, &ha->flags); + + ha->isp_ops->idc_unlock(ha); + rval = qla4_8xxx_device_state_handler(ha); + + ha->isp_ops->idc_lock(ha); + qla4_8xxx_clear_rst_ready(ha); +exit_isp_reset: + ha->isp_ops->idc_unlock(ha); + + if (rval == QLA_SUCCESS) + clear_bit(AF_FW_RECOVERY, &ha->flags); + + return rval; +} + +static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha) +{ + u32 val = 0, val1 = 0; + int i; + + qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val); + DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val)); + + /* Port 0 Rx Buffer Pause Threshold Registers. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); + for (i = 0; i < 8; i++) { + qla4_83xx_rd_reg_indirect(ha, + QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val); + DEBUG2(pr_info("0x%x ", val)); + } + + DEBUG2(pr_info("\n")); + + /* Port 1 Rx Buffer Pause Threshold Registers. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); + for (i = 0; i < 8; i++) { + qla4_83xx_rd_reg_indirect(ha, + QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val); + DEBUG2(pr_info("0x%x ", val)); + } + + DEBUG2(pr_info("\n")); + + /* Port 0 RxB Traffic Class Max Cell Registers. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "Port 0 RxB Traffic Class Max Cell Registers[3..0]:")); + for (i = 0; i < 4; i++) { + qla4_83xx_rd_reg_indirect(ha, + QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val); + DEBUG2(pr_info("0x%x ", val)); + } + + DEBUG2(pr_info("\n")); + + /* Port 1 RxB Traffic Class Max Cell Registers. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "Port 1 RxB Traffic Class Max Cell Registers[3..0]:")); + for (i = 0; i < 4; i++) { + qla4_83xx_rd_reg_indirect(ha, + QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val); + DEBUG2(pr_info("0x%x ", val)); + } + + DEBUG2(pr_info("\n")); + + /* Port 0 RxB Rx Traffic Class Stats. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]")); + for (i = 7; i >= 0; i--) { + qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val); + val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ + qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, + (val | (i << 29))); + qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val); + DEBUG2(pr_info("0x%x ", val)); + } + + DEBUG2(pr_info("\n")); + + /* Port 1 RxB Rx Traffic Class Stats. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]")); + for (i = 7; i >= 0; i--) { + qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val); + val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ + qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, + (val | (i << 29))); + qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val); + DEBUG2(pr_info("0x%x ", val)); + } + + DEBUG2(pr_info("\n")); + + qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val); + qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", + val, val1)); +} + +static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha) +{ + int i; + + /* set SRE-Shim Control Register */ + qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, + QLA83XX_SET_PAUSE_VAL); + + for (i = 0; i < 8; i++) { + /* Port 0 Rx Buffer Pause Threshold Registers. */ + qla4_83xx_wr_reg_indirect(ha, + QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), + QLA83XX_SET_PAUSE_VAL); + /* Port 1 Rx Buffer Pause Threshold Registers. */ + qla4_83xx_wr_reg_indirect(ha, + QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), + QLA83XX_SET_PAUSE_VAL); + } + + for (i = 0; i < 4; i++) { + /* Port 0 RxB Traffic Class Max Cell Registers. */ + qla4_83xx_wr_reg_indirect(ha, + QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), + QLA83XX_SET_TC_MAX_CELL_VAL); + /* Port 1 RxB Traffic Class Max Cell Registers. */ + qla4_83xx_wr_reg_indirect(ha, + QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), + QLA83XX_SET_TC_MAX_CELL_VAL); + } + + qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, + QLA83XX_SET_PAUSE_VAL); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, + QLA83XX_SET_PAUSE_VAL); + + ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n"); +} + +/** + * qla4_83xx_eport_init - Initialize EPort. + * @ha: Pointer to host adapter structure. + * + * If EPort hardware is in reset state before disabling pause, there would be + * serious hardware wedging issues. To prevent this perform eport init everytime + * before disabling pause frames. + **/ +static void qla4_83xx_eport_init(struct scsi_qla_host *ha) +{ + /* Clear the 8 registers */ + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0); + + /* Write any value to Reset Control register */ + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF); + + ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n"); +} + +void qla4_83xx_disable_pause(struct scsi_qla_host *ha) +{ + ha->isp_ops->idc_lock(ha); + /* Before disabling pause frames, ensure that eport is not in reset */ + qla4_83xx_eport_init(ha); + qla4_83xx_dump_pause_control_regs(ha); + __qla4_83xx_disable_pause(ha); + ha->isp_ops->idc_unlock(ha); +} + +/** + * qla4_83xx_is_detached - Check if we are marked invisible. + * @ha: Pointer to host adapter structure. + **/ +int qla4_83xx_is_detached(struct scsi_qla_host *ha) +{ + uint32_t drv_active; + + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + + if (test_bit(AF_INIT_DONE, &ha->flags) && + !(drv_active & (1 << ha->func_num))) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n", + __func__, drv_active)); + return QLA_SUCCESS; + } + + return QLA_ERROR; +} diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h new file mode 100644 index 000000000..f10167c71 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_83xx.h @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#ifndef __QL483XX_H +#define __QL483XX_H + +/* Indirectly Mapped Registers */ +#define QLA83XX_FLASH_SPI_STATUS 0x2808E010 +#define QLA83XX_FLASH_SPI_CONTROL 0x2808E014 +#define QLA83XX_FLASH_STATUS 0x42100004 +#define QLA83XX_FLASH_CONTROL 0x42110004 +#define QLA83XX_FLASH_ADDR 0x42110008 +#define QLA83XX_FLASH_WRDATA 0x4211000C +#define QLA83XX_FLASH_RDDATA 0x42110018 +#define QLA83XX_FLASH_DIRECT_WINDOW 0x42110030 +#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA)) + +/* Directly Mapped Registers in 83xx register table */ + +/* Flash access regs */ +#define QLA83XX_FLASH_LOCK 0x3850 +#define QLA83XX_FLASH_UNLOCK 0x3854 +#define QLA83XX_FLASH_LOCK_ID 0x3500 + +/* Driver Lock regs */ +#define QLA83XX_DRV_LOCK 0x3868 +#define QLA83XX_DRV_UNLOCK 0x386C +#define QLA83XX_DRV_LOCK_ID 0x3504 +#define QLA83XX_DRV_LOCKRECOVERY 0x379C + +/* IDC version */ +#define QLA83XX_IDC_VER_MAJ_VALUE 0x1 +#define QLA83XX_IDC_VER_MIN_VALUE 0x0 + +/* IDC Registers : Driver Coexistence Defines */ +#define QLA83XX_CRB_IDC_VER_MAJOR 0x3780 +#define QLA83XX_CRB_IDC_VER_MINOR 0x3798 +#define QLA83XX_IDC_DRV_CTRL 0x3790 +#define QLA83XX_IDC_DRV_AUDIT 0x3794 +#define QLA83XX_SRE_SHIM_CONTROL 0x0D200284 +#define QLA83XX_PORT0_RXB_PAUSE_THRS 0x0B2003A4 +#define QLA83XX_PORT1_RXB_PAUSE_THRS 0x0B2013A4 +#define QLA83XX_PORT0_RXB_TC_MAX_CELL 0x0B200388 +#define QLA83XX_PORT1_RXB_TC_MAX_CELL 0x0B201388 +#define QLA83XX_PORT0_RXB_TC_STATS 0x0B20039C +#define QLA83XX_PORT1_RXB_TC_STATS 0x0B20139C +#define QLA83XX_PORT2_IFB_PAUSE_THRS 0x0B200704 +#define QLA83XX_PORT3_IFB_PAUSE_THRS 0x0B201704 + +/* set value to pause threshold value */ +#define QLA83XX_SET_PAUSE_VAL 0x0 +#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF + +#define QLA83XX_RESET_CONTROL 0x28084E50 +#define QLA83XX_RESET_REG 0x28084E60 +#define QLA83XX_RESET_PORT0 0x28084E70 +#define QLA83XX_RESET_PORT1 0x28084E80 +#define QLA83XX_RESET_PORT2 0x28084E90 +#define QLA83XX_RESET_PORT3 0x28084EA0 +#define QLA83XX_RESET_SRE_SHIM 0x28084EB0 +#define QLA83XX_RESET_EPG_SHIM 0x28084EC0 +#define QLA83XX_RESET_ETHER_PCS 0x28084ED0 + +/* qla_83xx_reg_tbl registers */ +#define QLA83XX_PEG_HALT_STATUS1 0x34A8 +#define QLA83XX_PEG_HALT_STATUS2 0x34AC +#define QLA83XX_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */ +#define QLA83XX_FW_CAPABILITIES 0x3528 +#define QLA83XX_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */ +#define QLA83XX_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */ +#define QLA83XX_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */ +#define QLA83XX_CRB_DRV_SCRATCH 0x3548 +#define QLA83XX_CRB_DEV_PART_INFO1 0x37E0 +#define QLA83XX_CRB_DEV_PART_INFO2 0x37E4 + +#define QLA83XX_FW_VER_MAJOR 0x3550 +#define QLA83XX_FW_VER_MINOR 0x3554 +#define QLA83XX_FW_VER_SUB 0x3558 +#define QLA83XX_NPAR_STATE 0x359C +#define QLA83XX_FW_IMAGE_VALID 0x35FC +#define QLA83XX_CMDPEG_STATE 0x3650 +#define QLA83XX_ASIC_TEMP 0x37B4 +#define QLA83XX_FW_API 0x356C +#define QLA83XX_DRV_OP_MODE 0x3570 + +#define QLA83XX_CRB_WIN_BASE 0x3800 +#define QLA83XX_CRB_WIN_FUNC(f) (QLA83XX_CRB_WIN_BASE+((f)*4)) +#define QLA83XX_SEM_LOCK_BASE 0x3840 +#define QLA83XX_SEM_UNLOCK_BASE 0x3844 +#define QLA83XX_SEM_LOCK_FUNC(f) (QLA83XX_SEM_LOCK_BASE+((f)*8)) +#define QLA83XX_SEM_UNLOCK_FUNC(f) (QLA83XX_SEM_UNLOCK_BASE+((f)*8)) +#define QLA83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0)) +#define QLA83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) +#define QLA83XX_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) +#define QLA83XX_LINK_SPEED_FACTOR 10 + +/* FLASH API Defines */ +#define QLA83xx_FLASH_MAX_WAIT_USEC 100 +#define QLA83XX_FLASH_LOCK_TIMEOUT 10000 +#define QLA83XX_FLASH_SECTOR_SIZE 65536 +#define QLA83XX_DRV_LOCK_TIMEOUT 2000 +#define QLA83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef +#define QLA83XX_FLASH_WRITE_CMD 0xdacdacda +#define QLA83XX_FLASH_BUFFER_WRITE_CMD 0xcadcadca +#define QLA83XX_FLASH_READ_RETRY_COUNT 2000 +#define QLA83XX_FLASH_STATUS_READY 0x6 +#define QLA83XX_FLASH_BUFFER_WRITE_MIN 2 +#define QLA83XX_FLASH_BUFFER_WRITE_MAX 64 +#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1 +#define QLA83XX_ERASE_MODE 1 +#define QLA83XX_WRITE_MODE 2 +#define QLA83XX_DWORD_WRITE_MODE 3 + +#define QLA83XX_GLOBAL_RESET 0x38CC +#define QLA83XX_WILDCARD 0x38F0 +#define QLA83XX_INFORMANT 0x38FC +#define QLA83XX_HOST_MBX_CTRL 0x3038 +#define QLA83XX_FW_MBX_CTRL 0x303C +#define QLA83XX_BOOTLOADER_ADDR 0x355C +#define QLA83XX_BOOTLOADER_SIZE 0x3560 +#define QLA83XX_FW_IMAGE_ADDR 0x3564 +#define QLA83XX_MBX_INTR_ENABLE 0x1000 +#define QLA83XX_MBX_INTR_MASK 0x1200 + +/* IDC Control Register bit defines */ +#define DONTRESET_BIT0 0x1 +#define GRACEFUL_RESET_BIT1 0x2 + +#define QLA83XX_HALT_STATUS_INFORMATIONAL (0x1 << 29) +#define QLA83XX_HALT_STATUS_FW_RESET (0x2 << 29) +#define QLA83XX_HALT_STATUS_UNRECOVERABLE (0x4 << 29) + +/* Firmware image definitions */ +#define QLA83XX_BOOTLOADER_FLASH_ADDR 0x10000 +#define QLA83XX_BOOT_FROM_FLASH 0 + +#define QLA83XX_IDC_PARAM_ADDR 0x3e8020 +/* Reset template definitions */ +#define QLA83XX_MAX_RESET_SEQ_ENTRIES 16 +#define QLA83XX_RESTART_TEMPLATE_SIZE 0x2000 +#define QLA83XX_RESET_TEMPLATE_ADDR 0x4F0000 +#define QLA83XX_RESET_SEQ_VERSION 0x0101 + +/* Reset template entry opcodes */ +#define OPCODE_NOP 0x0000 +#define OPCODE_WRITE_LIST 0x0001 +#define OPCODE_READ_WRITE_LIST 0x0002 +#define OPCODE_POLL_LIST 0x0004 +#define OPCODE_POLL_WRITE_LIST 0x0008 +#define OPCODE_READ_MODIFY_WRITE 0x0010 +#define OPCODE_SEQ_PAUSE 0x0020 +#define OPCODE_SEQ_END 0x0040 +#define OPCODE_TMPL_END 0x0080 +#define OPCODE_POLL_READ_LIST 0x0100 + +/* Template Header */ +#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE +struct qla4_83xx_reset_template_hdr { + __le16 version; + __le16 signature; + __le16 size; + __le16 entries; + __le16 hdr_size; + __le16 checksum; + __le16 init_seq_offset; + __le16 start_seq_offset; +} __packed; + +/* Common Entry Header. */ +struct qla4_83xx_reset_entry_hdr { + __le16 cmd; + __le16 size; + __le16 count; + __le16 delay; +} __packed; + +/* Generic poll entry type. */ +struct qla4_83xx_poll { + __le32 test_mask; + __le32 test_value; +} __packed; + +/* Read modify write entry type. */ +struct qla4_83xx_rmw { + __le32 test_mask; + __le32 xor_value; + __le32 or_value; + uint8_t shl; + uint8_t shr; + uint8_t index_a; + uint8_t rsvd; +} __packed; + +/* Generic Entry Item with 2 DWords. */ +struct qla4_83xx_entry { + __le32 arg1; + __le32 arg2; +} __packed; + +/* Generic Entry Item with 4 DWords.*/ +struct qla4_83xx_quad_entry { + __le32 dr_addr; + __le32 dr_value; + __le32 ar_addr; + __le32 ar_value; +} __packed; + +struct qla4_83xx_reset_template { + int seq_index; + int seq_error; + int array_index; + uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES]; + uint8_t *buff; + uint8_t *stop_offset; + uint8_t *start_offset; + uint8_t *init_offset; + struct qla4_83xx_reset_template_hdr *hdr; + uint8_t seq_end; + uint8_t template_end; +}; + +/* POLLRD Entry */ +struct qla83xx_minidump_entry_pollrd { + struct qla8xxx_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t read_addr; + uint32_t select_value; + uint16_t select_value_stride; + uint16_t op_count; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t data_size; + uint32_t rsvd_1; +}; + +struct qla8044_minidump_entry_rddfe { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t value; + uint8_t stride; + uint8_t stride2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t modify_mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { + struct qla8xxx_minidump_entry_hdr h; + + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint8_t stride_1; + uint8_t stride_2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t value_2; + uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll; + uint32_t mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +/* RDMUX2 Entry */ +struct qla83xx_minidump_entry_rdmux2 { + struct qla8xxx_minidump_entry_hdr h; + uint32_t select_addr_1; + uint32_t select_addr_2; + uint32_t select_value_1; + uint32_t select_value_2; + uint32_t op_count; + uint32_t select_value_mask; + uint32_t read_addr; + uint8_t select_value_stride; + uint8_t data_size; + uint8_t rsvd[2]; +}; + +/* POLLRDMWR Entry */ +struct qla83xx_minidump_entry_pollrdmwr { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t modify_mask; + uint32_t data_size; +}; + +/* IDC additional information */ +struct qla4_83xx_idc_information { + uint32_t request_desc; /* IDC request descriptor */ + uint32_t info1; /* IDC additional info */ + uint32_t info2; /* IDC additional info */ + uint32_t info3; /* IDC additional info */ +}; + +#define QLA83XX_PEX_DMA_ENGINE_INDEX 8 +#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000 +#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000 +#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0 +#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04 +#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08 + +#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024) +#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ + +/* Read Memory: For Pex-DMA */ +struct qla4_83xx_minidump_entry_rdmem_pex_dma { + struct qla8xxx_minidump_entry_hdr h; + uint32_t desc_card_addr; + uint16_t dma_desc_cmd; + uint8_t rsvd[2]; + uint32_t start_dma_cmd; + uint8_t rsvd2[12]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +struct qla4_83xx_pex_dma_descriptor { + struct { + uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */ + uint8_t rsvd[2]; + uint16_t dma_desc_cmd; + } cmd; + uint64_t src_addr; + uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func, + * 8-15: desc-cmd */ + uint8_t rsvd[24]; +} __packed; + +#endif diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c new file mode 100644 index 000000000..abfa6ef60 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_attr.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" + +static ssize_t +qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, + struct device, kobj))); + + if (is_qla40XX(ha)) + return -EINVAL; + + if (!test_bit(AF_82XX_DUMP_READING, &ha->flags)) + return 0; + + return memory_read_from_buffer(buf, count, &off, ha->fw_dump, + ha->fw_dump_size); +} + +static ssize_t +qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, + struct device, kobj))); + uint32_t dev_state; + long reading; + int ret = 0; + + if (is_qla40XX(ha)) + return -EINVAL; + + if (off != 0) + return ret; + + buf[1] = 0; + ret = kstrtol(buf, 10, &reading); + if (ret) { + ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n", + __func__, ret); + return ret; + } + + switch (reading) { + case 0: + /* clear dump collection flags */ + if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) { + clear_bit(AF_82XX_FW_DUMPED, &ha->flags); + /* Reload minidump template */ + qla4xxx_alloc_fw_dump(ha); + DEBUG2(ql4_printk(KERN_INFO, ha, + "Firmware template reloaded\n")); + } + break; + case 1: + /* Set flag to read dump */ + if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) && + !test_bit(AF_82XX_DUMP_READING, &ha->flags)) { + set_bit(AF_82XX_DUMP_READING, &ha->flags); + DEBUG2(ql4_printk(KERN_INFO, ha, + "Raw firmware dump ready for read on (%ld).\n", + ha->host_no)); + } + break; + case 2: + /* Reset HBA and collect FW dump */ + ha->isp_ops->idc_lock(ha); + dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); + if (dev_state == QLA8XXX_DEV_READY) { + ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n", + __func__); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + if (is_qla8022(ha) || + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_can_perform_reset(ha))) { + set_bit(AF_8XXX_RST_OWNER, &ha->flags); + set_bit(AF_FW_RECOVERY, &ha->flags); + ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n", + __func__, ha->func_num); + } + } else + ql4_printk(KERN_INFO, ha, + "%s: Reset not performed as device state is 0x%x\n", + __func__, dev_state); + + ha->isp_ops->idc_unlock(ha); + break; + default: + /* do nothing */ + break; + } + + return count; +} + +static struct bin_attribute sysfs_fw_dump_attr = { + .attr = { + .name = "fw_dump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qla4_8xxx_sysfs_read_fw_dump, + .write = qla4_8xxx_sysfs_write_fw_dump, +}; + +static struct sysfs_entry { + char *name; + struct bin_attribute *attr; +} bin_file_entries[] = { + { "fw_dump", &sysfs_fw_dump_attr }, + { NULL }, +}; + +void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha) +{ + struct Scsi_Host *host = ha->host; + struct sysfs_entry *iter; + int ret; + + for (iter = bin_file_entries; iter->name; iter++) { + ret = sysfs_create_bin_file(&host->shost_gendev.kobj, + iter->attr); + if (ret) + ql4_printk(KERN_ERR, ha, + "Unable to create sysfs %s binary attribute (%d).\n", + iter->name, ret); + } +} + +void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha) +{ + struct Scsi_Host *host = ha->host; + struct sysfs_entry *iter; + + for (iter = bin_file_entries; iter->name; iter++) + sysfs_remove_bin_file(&host->shost_gendev.kobj, + iter->attr); +} + +/* Scsi_Host attributes. */ +static ssize_t +qla4xxx_fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + if (is_qla80XX(ha)) + return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", + ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); + else + return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", + ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); +} + +static ssize_t +qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number); +} + +static ssize_t +qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major, + ha->fw_info.iscsi_minor); +} + +static ssize_t +qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", + ha->fw_info.bootload_major, ha->fw_info.bootload_minor, + ha->fw_info.bootload_patch, ha->fw_info.bootload_build); +} + +static ssize_t +qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id); +} + +static ssize_t +qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + qla4xxx_get_firmware_state(ha); + return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state, + ha->addl_fw_state); +} + +static ssize_t +qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + if (is_qla40XX(ha)) + return -ENOSYS; + + return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt); +} + +static ssize_t +qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + if (is_qla40XX(ha)) + return -ENOSYS; + + return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num); +} + +static ssize_t +qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + if (is_qla40XX(ha)) + return -ENOSYS; + + return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt); +} + +static ssize_t +qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name); +} + +static ssize_t +qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date, + ha->fw_info.fw_build_time); +} + +static ssize_t +qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user); +} + +static ssize_t +qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp); +} + +static ssize_t +qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + char *load_src = NULL; + + switch (ha->fw_info.fw_load_source) { + case 1: + load_src = "Flash Primary"; + break; + case 2: + load_src = "Flash Secondary"; + break; + case 3: + load_src = "Host Download"; + break; + } + + return snprintf(buf, PAGE_SIZE, "%s\n", load_src); +} + +static ssize_t +qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + qla4xxx_about_firmware(ha); + return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs, + ha->fw_uptime_msecs); +} + +static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL); +static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL); +static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL); +static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL); +static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL); +static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL); +static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL); +static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL); +static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL); +static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL); +static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL); +static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show, + NULL); +static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL); +static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL); + +static struct attribute *qla4xxx_host_attrs[] = { + &dev_attr_fw_version.attr, + &dev_attr_serial_num.attr, + &dev_attr_iscsi_version.attr, + &dev_attr_optrom_version.attr, + &dev_attr_board_id.attr, + &dev_attr_fw_state.attr, + &dev_attr_phy_port_cnt.attr, + &dev_attr_phy_port_num.attr, + &dev_attr_iscsi_func_cnt.attr, + &dev_attr_hba_model.attr, + &dev_attr_fw_timestamp.attr, + &dev_attr_fw_build_user.attr, + &dev_attr_fw_ext_timestamp.attr, + &dev_attr_fw_load_src.attr, + &dev_attr_fw_uptime.attr, + NULL, +}; + +static const struct attribute_group qla4xxx_host_attr_group = { + .attrs = qla4xxx_host_attrs +}; + +const struct attribute_group *qla4xxx_host_groups[] = { + &qla4xxx_host_attr_group, + NULL +}; diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c new file mode 100644 index 000000000..c447a9d59 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_bsg.c @@ -0,0 +1,872 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2011-2013 QLogic Corporation + */ + +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_bsg.h" + +static int +qla4xxx_read_flash(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + struct iscsi_bsg_request *bsg_req = bsg_job->request; + uint32_t offset = 0; + uint32_t length = 0; + dma_addr_t flash_dma; + uint8_t *flash = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + if (ha->flash_state != QLFLASH_WAITING) { + ql4_printk(KERN_ERR, ha, "%s: another flash operation " + "active\n", __func__); + rval = -EBUSY; + goto leave; + } + + ha->flash_state = QLFLASH_READING; + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + length = bsg_job->reply_payload.payload_len; + + flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, + GFP_KERNEL); + if (!flash) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + rval = qla4xxx_get_flash(ha, flash_dma, offset, length); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + flash, length); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); +leave: + ha->flash_state = QLFLASH_WAITING; + return rval; +} + +static int +qla4xxx_update_flash(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + struct iscsi_bsg_request *bsg_req = bsg_job->request; + uint32_t length = 0; + uint32_t offset = 0; + uint32_t options = 0; + dma_addr_t flash_dma; + uint8_t *flash = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + if (ha->flash_state != QLFLASH_WAITING) { + ql4_printk(KERN_ERR, ha, "%s: another flash operation " + "active\n", __func__); + rval = -EBUSY; + goto leave; + } + + ha->flash_state = QLFLASH_WRITING; + length = bsg_job->request_payload.payload_len; + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; + + flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, + GFP_KERNEL); + if (!flash) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, flash, length); + + rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else + bsg_reply->result = DID_OK << 16; + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); +leave: + ha->flash_state = QLFLASH_WAITING; + return rval; +} + +static int +qla4xxx_get_acb_state(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t status[MBOX_REG_COUNT]; + uint32_t acb_idx; + uint32_t ip_idx; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + /* Only 4022 and above adapters are supported */ + if (is_qla4010(ha)) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + if (bsg_job->reply_payload.payload_len < sizeof(status)) { + ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n", + __func__, bsg_job->reply_payload.payload_len); + rval = -EINVAL; + goto leave; + } + + acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; + + rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + status, sizeof(status)); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +leave: + return rval; +} + +static int +qla4xxx_read_nvram(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t offset = 0; + uint32_t len = 0; + uint32_t total_len = 0; + dma_addr_t nvram_dma; + uint8_t *nvram = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + /* Only 40xx adapters are supported */ + if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + len = bsg_job->reply_payload.payload_len; + total_len = offset + len; + + /* total len should not be greater than max NVRAM size */ + if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || + ((is_qla4022(ha) || is_qla4032(ha)) && + total_len > QL40X2_NVRAM_SIZE)) { + ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" + " nvram size, offset=%d len=%d\n", + __func__, offset, len); + goto leave; + } + + nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, + GFP_KERNEL); + if (!nvram) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + nvram, len); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); +leave: + return rval; +} + +static int +qla4xxx_update_nvram(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t offset = 0; + uint32_t len = 0; + uint32_t total_len = 0; + dma_addr_t nvram_dma; + uint8_t *nvram = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + len = bsg_job->request_payload.payload_len; + total_len = offset + len; + + /* total len should not be greater than max NVRAM size */ + if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || + ((is_qla4022(ha) || is_qla4032(ha)) && + total_len > QL40X2_NVRAM_SIZE)) { + ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" + " nvram size, offset=%d len=%d\n", + __func__, offset, len); + goto leave; + } + + nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, + GFP_KERNEL); + if (!nvram) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, nvram, len); + + rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else + bsg_reply->result = DID_OK << 16; + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); +leave: + return rval; +} + +static int +qla4xxx_restore_defaults(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t region = 0; + uint32_t field0 = 0; + uint32_t field1 = 0; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (is_qla4010(ha)) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; + field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; + + rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else + bsg_reply->result = DID_OK << 16; + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +leave: + return rval; +} + +static int +qla4xxx_bsg_get_acb(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t acb_type = 0; + uint32_t len = 0; + dma_addr_t acb_dma; + uint8_t *acb = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + /* Only 4022 and above adapters are supported */ + if (is_qla4010(ha)) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + len = bsg_job->reply_payload.payload_len; + if (len < sizeof(struct addr_ctrl_blk)) { + ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n", + __func__, len); + rval = -EINVAL; + goto leave; + } + + acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL); + if (!acb) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + acb, len); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma); +leave: + return rval; +} + +static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint8_t *rsp_ptr = NULL; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_ERROR; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + goto exit_diag_mem_test; + } + + bsg_reply->reply_payload_rcv_len = 0; + memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], + sizeof(uint32_t) * MBOX_REG_COUNT); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], + mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], + mbox_cmd[7])); + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], + &mbox_sts[0]); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], + mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], + mbox_sts[7])); + + if (status == QLA_SUCCESS) + bsg_reply->result = DID_OK << 16; + else + bsg_reply->result = DID_ERROR << 16; + + /* Send mbox_sts to application */ + bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); + rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); + memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); + +exit_diag_mem_test: + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: bsg_reply->result = x%x, status = %s\n", + __func__, bsg_reply->result, STATUS(status))); + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +} + +static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha, + int wait_for_link) +{ + int status = QLA_SUCCESS; + + if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) { + ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout", + __func__, ha->idc_extend_tmo); + if (ha->idc_extend_tmo) { + if (!wait_for_completion_timeout(&ha->idc_comp, + (ha->idc_extend_tmo * HZ))) { + ha->notify_idc_comp = 0; + ha->notify_link_up_comp = 0; + ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received", + __func__); + status = QLA_ERROR; + goto exit_wait; + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: IDC Complete notification received\n", + __func__)); + } + } + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: IDC Complete notification received\n", + __func__)); + } + ha->notify_idc_comp = 0; + + if (wait_for_link) { + if (!wait_for_completion_timeout(&ha->link_up_comp, + (IDC_COMP_TOV * HZ))) { + ha->notify_link_up_comp = 0; + ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received", + __func__); + status = QLA_ERROR; + goto exit_wait; + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: LINK UP notification received\n", + __func__)); + } + ha->notify_link_up_comp = 0; + } + +exit_wait: + return status; +} + +static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha, + uint32_t *mbox_cmd) +{ + uint32_t config = 0; + int status = QLA_SUCCESS; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + status = qla4_83xx_get_port_config(ha, &config); + if (status != QLA_SUCCESS) + goto exit_pre_loopback_config; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n", + __func__, config)); + + if ((config & ENABLE_INTERNAL_LOOPBACK) || + (config & ENABLE_EXTERNAL_LOOPBACK)) { + ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n", + __func__); + goto exit_pre_loopback_config; + } + + if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) + config |= ENABLE_INTERNAL_LOOPBACK; + + if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) + config |= ENABLE_EXTERNAL_LOOPBACK; + + config &= ~ENABLE_DCBX; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n", + __func__, config)); + + ha->notify_idc_comp = 1; + ha->notify_link_up_comp = 1; + + /* get the link state */ + qla4xxx_get_firmware_state(ha); + + status = qla4_83xx_set_port_config(ha, &config); + if (status != QLA_SUCCESS) { + ha->notify_idc_comp = 0; + ha->notify_link_up_comp = 0; + goto exit_pre_loopback_config; + } +exit_pre_loopback_config: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, + STATUS(status))); + return status; +} + +static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha, + uint32_t *mbox_cmd) +{ + int status = QLA_SUCCESS; + uint32_t config = 0; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + status = qla4_83xx_get_port_config(ha, &config); + if (status != QLA_SUCCESS) + goto exit_post_loopback_config; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__, + config)); + + if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) + config &= ~ENABLE_INTERNAL_LOOPBACK; + else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) + config &= ~ENABLE_EXTERNAL_LOOPBACK; + + config |= ENABLE_DCBX; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Restore default port config=%08X\n", __func__, + config)); + + ha->notify_idc_comp = 1; + if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) + ha->notify_link_up_comp = 1; + + status = qla4_83xx_set_port_config(ha, &config); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n", + __func__); + set_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(AF_LOOPBACK, &ha->flags); + goto exit_post_loopback_config; + } + +exit_post_loopback_config: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, + STATUS(status))); + return status; +} + +static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint8_t *rsp_ptr = NULL; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int wait_for_link = 1; + int status = QLA_ERROR; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + bsg_reply->reply_payload_rcv_len = 0; + + if (test_bit(AF_LOOPBACK, &ha->flags)) { + ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], + sizeof(uint32_t) * MBOX_REG_COUNT); + + if (is_qla8032(ha) || is_qla8042(ha)) { + status = qla4_83xx_pre_loopback_config(ha, mbox_cmd); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + status = qla4_83xx_wait_for_loopback_config_comp(ha, + wait_for_link); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_TIME_OUT << 16; + goto restore; + } + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], + mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], + mbox_cmd[7])); + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], + &mbox_sts[0]); + + if (status == QLA_SUCCESS) + bsg_reply->result = DID_OK << 16; + else + bsg_reply->result = DID_ERROR << 16; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], + mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], + mbox_sts[7])); + + /* Send mbox_sts to application */ + bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); + rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); + memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); +restore: + if (is_qla8032(ha) || is_qla8042(ha)) { + status = qla4_83xx_post_loopback_config(ha, mbox_cmd); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + /* for pre_loopback_config() wait for LINK UP only + * if PHY LINK is UP */ + if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP)) + wait_for_link = 0; + + status = qla4_83xx_wait_for_loopback_config_comp(ha, + wait_for_link); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_TIME_OUT << 16; + goto exit_loopback_cmd; + } + } +exit_loopback_cmd: + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: bsg_reply->result = x%x, status = %s\n", + __func__, bsg_reply->result, STATUS(status))); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +} + +static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + uint32_t diag_cmd; + int rval = -EINVAL; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + if (diag_cmd == MBOX_CMD_DIAG_TEST) { + switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) { + case QL_DIAG_CMD_TEST_DDR_SIZE: + case QL_DIAG_CMD_TEST_DDR_RW: + case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW: + case QL_DIAG_CMD_TEST_NVRAM: + case QL_DIAG_CMD_TEST_FLASH_ROM: + case QL_DIAG_CMD_TEST_DMA_XFER: + case QL_DIAG_CMD_SELF_DDR_RW: + case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW: + /* Execute diag test for adapter RAM/FLASH */ + ql4xxx_execute_diag_cmd(bsg_job); + /* Always return success as we want to sent bsg_reply + * to Application */ + rval = QLA_SUCCESS; + break; + + case QL_DIAG_CMD_TEST_INT_LOOPBACK: + case QL_DIAG_CMD_TEST_EXT_LOOPBACK: + /* Execute diag test for Network */ + qla4xxx_execute_diag_loopback_cmd(bsg_job); + /* Always return success as we want to sent bsg_reply + * to Application */ + rval = QLA_SUCCESS; + break; + default: + ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n", + __func__, + bsg_req->rqst_data.h_vendor.vendor_cmd[2]); + } + } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) || + (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) { + ql4xxx_execute_diag_cmd(bsg_job); + rval = QLA_SUCCESS; + } else { + ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n", + __func__, diag_cmd); + } + + return rval; +} + +/** + * qla4xxx_process_vendor_specific - handle vendor specific bsg request + * @bsg_job: iscsi_bsg_job to handle + **/ +int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job) +{ + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + + switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { + case QLISCSI_VND_READ_FLASH: + return qla4xxx_read_flash(bsg_job); + + case QLISCSI_VND_UPDATE_FLASH: + return qla4xxx_update_flash(bsg_job); + + case QLISCSI_VND_GET_ACB_STATE: + return qla4xxx_get_acb_state(bsg_job); + + case QLISCSI_VND_READ_NVRAM: + return qla4xxx_read_nvram(bsg_job); + + case QLISCSI_VND_UPDATE_NVRAM: + return qla4xxx_update_nvram(bsg_job); + + case QLISCSI_VND_RESTORE_DEFAULTS: + return qla4xxx_restore_defaults(bsg_job); + + case QLISCSI_VND_GET_ACB: + return qla4xxx_bsg_get_acb(bsg_job); + + case QLISCSI_VND_DIAG_TEST: + return qla4xxx_execute_diag_test(bsg_job); + + default: + ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: " + "0x%x\n", __func__, bsg_req->msgcode); + bsg_reply->result = (DID_ERROR << 16); + bsg_reply->reply_payload_rcv_len = 0; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return -ENOSYS; + } +} + +/** + * qla4xxx_bsg_request - handle bsg request from ISCSI transport + * @bsg_job: iscsi_bsg_job to handle + */ +int qla4xxx_bsg_request(struct bsg_job *bsg_job) +{ + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + + switch (bsg_req->msgcode) { + case ISCSI_BSG_HST_VENDOR: + return qla4xxx_process_vendor_specific(bsg_job); + + default: + ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n", + __func__, bsg_req->msgcode); + } + + return -ENOSYS; +} diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h new file mode 100644 index 000000000..06db38561 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_bsg.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2011 QLogic Corporation + */ +#ifndef __QL4_BSG_H +#define __QL4_BSG_H + +/* BSG Vendor specific commands */ +#define QLISCSI_VND_READ_FLASH 1 +#define QLISCSI_VND_UPDATE_FLASH 2 +#define QLISCSI_VND_GET_ACB_STATE 3 +#define QLISCSI_VND_READ_NVRAM 4 +#define QLISCSI_VND_UPDATE_NVRAM 5 +#define QLISCSI_VND_RESTORE_DEFAULTS 6 +#define QLISCSI_VND_GET_ACB 7 +#define QLISCSI_VND_DIAG_TEST 8 + +/* QLISCSI_VND_DIAG_CMD sub code */ +#define QL_DIAG_CMD_TEST_DDR_SIZE 0x2 +#define QL_DIAG_CMD_TEST_DDR_RW 0x3 +#define QL_DIAG_CMD_TEST_ONCHIP_MEM_RW 0x4 +#define QL_DIAG_CMD_TEST_NVRAM 0x5 /* Only ISP4XXX */ +#define QL_DIAG_CMD_TEST_FLASH_ROM 0x6 +#define QL_DIAG_CMD_TEST_INT_LOOPBACK 0x7 +#define QL_DIAG_CMD_TEST_EXT_LOOPBACK 0x8 +#define QL_DIAG_CMD_TEST_DMA_XFER 0x9 /* Only ISP4XXX */ +#define QL_DIAG_CMD_SELF_DDR_RW 0xC +#define QL_DIAG_CMD_SELF_ONCHIP_MEM_RW 0xD + +#endif diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c new file mode 100644 index 000000000..f43e675c5 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_dbg.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2012 QLogic Corporation + */ + +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" + +void qla4xxx_dump_buffer(void *b, uint32_t size) +{ + uint32_t cnt; + uint8_t *c = b; + + printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " + "Fh\n"); + printk("------------------------------------------------------------" + "--\n"); + for (cnt = 0; cnt < size; c++) { + printk("%02x", *c); + if (!(++cnt % 16)) + printk("\n"); + + else + printk(" "); + } + printk(KERN_INFO "\n"); +} + +void qla4xxx_dump_registers(struct scsi_qla_host *ha) +{ + uint8_t i; + + if (is_qla8022(ha)) { + for (i = 1; i < MBOX_REG_COUNT; i++) + printk(KERN_INFO "mailbox[%d] = 0x%08X\n", + i, readl(&ha->qla4_82xx_reg->mailbox_in[i])); + return; + } + + for (i = 0; i < MBOX_REG_COUNT; i++) { + printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, mailbox[i]), i, + readw(&ha->reg->mailbox[i])); + } + + printk(KERN_INFO "0x%02X flash_address = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, flash_address), + readw(&ha->reg->flash_address)); + printk(KERN_INFO "0x%02X flash_data = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, flash_data), + readw(&ha->reg->flash_data)); + printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, ctrl_status), + readw(&ha->reg->ctrl_status)); + + if (is_qla4010(ha)) { + printk(KERN_INFO "0x%02X nvram = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram), + readw(&ha->reg->u1.isp4010.nvram)); + } else if (is_qla4022(ha) | is_qla4032(ha)) { + printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask), + readw(&ha->reg->u1.isp4022.intr_mask)); + printk(KERN_INFO "0x%02X nvram = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram), + readw(&ha->reg->u1.isp4022.nvram)); + printk(KERN_INFO "0x%02X semaphore = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore), + readw(&ha->reg->u1.isp4022.semaphore)); + } + printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, req_q_in), + readw(&ha->reg->req_q_in)); + printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, rsp_q_out), + readw(&ha->reg->rsp_q_out)); + + if (is_qla4010(ha)) { + printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf), + readw(&ha->reg->u2.isp4010.ext_hw_conf)); + printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl), + readw(&ha->reg->u2.isp4010.port_ctrl)); + printk(KERN_INFO "0x%02X port_status = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status), + readw(&ha->reg->u2.isp4010.port_status)); + printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out), + readw(&ha->reg->u2.isp4010.req_q_out)); + printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out), + readw(&ha->reg->u2.isp4010.gp_out)); + printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in), + readw(&ha->reg->u2.isp4010.gp_in)); + printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4010.port_err_status), + readw(&ha->reg->u2.isp4010.port_err_status)); + } else if (is_qla4022(ha) | is_qla4032(ha)) { + printk(KERN_INFO "Page 0 Registers:\n"); + printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf), + readw(&ha->reg->u2.isp4022.p0.ext_hw_conf)); + printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl), + readw(&ha->reg->u2.isp4022.p0.port_ctrl)); + printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.port_status), + readw(&ha->reg->u2.isp4022.p0.port_status)); + printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out), + readw(&ha->reg->u2.isp4022.p0.gp_out)); + printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in), + readw(&ha->reg->u2.isp4022.p0.gp_in)); + printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.port_err_status), + readw(&ha->reg->u2.isp4022.p0.port_err_status)); + printk(KERN_INFO "Page 1 Registers:\n"); + writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), + &ha->reg->ctrl_status); + printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out), + readw(&ha->reg->u2.isp4022.p1.req_q_out)); + writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), + &ha->reg->ctrl_status); + } +} + +void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha) +{ + uint32_t halt_status1, halt_status2; + + halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); + halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2); + + if (is_qla8022(ha)) { + ql4_printk(KERN_INFO, ha, + "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" + " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" + " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" + " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" + " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__, + ha->pdev->device, halt_status1, halt_status2, + qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), + qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), + qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), + qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), + qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, + "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" + " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", + ha->host_no, __func__, ha->pdev->device, + halt_status1, halt_status2); + } +} diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h new file mode 100644 index 000000000..171c89165 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_dbg.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2012 QLogic Corporation + */ + +/* + * Driver debug definitions. + */ +/* #define QL_DEBUG */ /* DEBUG messages */ +/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */ +/* #define QL_DEBUG_LEVEL_4 */ +/* #define QL_DEBUG_LEVEL_5 */ +/* #define QL_DEBUG_LEVEL_7 */ +/* #define QL_DEBUG_LEVEL_9 */ + +#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */ +#if defined(QL_DEBUG) +#define DEBUG(x) do {x;} while (0); +#else +#define DEBUG(x) do {} while (0); +#endif + +#if defined(QL_DEBUG_LEVEL_2) +#define DEBUG2(x) do {if(ql4xextended_error_logging == 2) x;} while (0); +#define DEBUG2_3(x) do {x;} while (0); +#else /* */ +#define DEBUG2(x) do {} while (0); +#endif /* */ + +#if defined(QL_DEBUG_LEVEL_3) +#define DEBUG3(x) do {if(ql4xextended_error_logging == 3) x;} while (0); +#else /* */ +#define DEBUG3(x) do {} while (0); +#if !defined(QL_DEBUG_LEVEL_2) +#define DEBUG2_3(x) do {} while (0); +#endif /* */ +#endif /* */ +#if defined(QL_DEBUG_LEVEL_4) +#define DEBUG4(x) do {x;} while (0); +#else /* */ +#define DEBUG4(x) do {} while (0); +#endif /* */ + +#if defined(QL_DEBUG_LEVEL_5) +#define DEBUG5(x) do {x;} while (0); +#else /* */ +#define DEBUG5(x) do {} while (0); +#endif /* */ + +#if defined(QL_DEBUG_LEVEL_7) +#define DEBUG7(x) do {x; } while (0) +#else /* */ +#define DEBUG7(x) do {} while (0) +#endif /* */ + +#if defined(QL_DEBUG_LEVEL_9) +#define DEBUG9(x) do {x;} while (0); +#else /* */ +#define DEBUG9(x) do {} while (0); +#endif /* */ diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h new file mode 100644 index 000000000..5e683ba49 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -0,0 +1,1084 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#ifndef __QL4_DEF_H +#define __QL4_DEF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ql4_dbg.h" +#include "ql4_nx.h" +#include "ql4_fw.h" +#include "ql4_nvram.h" +#include "ql4_83xx.h" + +#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 +#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 +#endif + +#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022 +#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022 +#endif + +#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032 +#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032 +#endif + +#ifndef PCI_DEVICE_ID_QLOGIC_ISP8022 +#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 +#endif + +#ifndef PCI_DEVICE_ID_QLOGIC_ISP8324 +#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032 +#endif + +#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042 +#define PCI_DEVICE_ID_QLOGIC_ISP8042 0x8042 +#endif + +#define ISP4XXX_PCI_FN_1 0x1 +#define ISP4XXX_PCI_FN_2 0x3 + +#define QLA_SUCCESS 0 +#define QLA_ERROR 1 +#define STATUS(status) status == QLA_ERROR ? "FAILED" : "SUCCEEDED" + +/* + * Data bit definitions + */ +#define BIT_0 0x1 +#define BIT_1 0x2 +#define BIT_2 0x4 +#define BIT_3 0x8 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 +#define BIT_8 0x100 +#define BIT_9 0x200 +#define BIT_10 0x400 +#define BIT_11 0x800 +#define BIT_12 0x1000 +#define BIT_13 0x2000 +#define BIT_14 0x4000 +#define BIT_15 0x8000 +#define BIT_16 0x10000 +#define BIT_17 0x20000 +#define BIT_18 0x40000 +#define BIT_19 0x80000 +#define BIT_20 0x100000 +#define BIT_21 0x200000 +#define BIT_22 0x400000 +#define BIT_23 0x800000 +#define BIT_24 0x1000000 +#define BIT_25 0x2000000 +#define BIT_26 0x4000000 +#define BIT_27 0x8000000 +#define BIT_28 0x10000000 +#define BIT_29 0x20000000 +#define BIT_30 0x40000000 +#define BIT_31 0x80000000 + +/** + * Macros to help code, maintain, etc. + **/ +#define ql4_printk(level, ha, format, arg...) \ + dev_printk(level , &((ha)->pdev->dev) , format , ## arg) + + +/* + * Host adapter default definitions + ***********************************/ +#define MAX_HBAS 16 +#define MAX_BUSES 1 +#define MAX_TARGETS MAX_DEV_DB_ENTRIES +#define MAX_LUNS 0xffff +#define MAX_AEN_ENTRIES MAX_DEV_DB_ENTRIES +#define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES +#define MAX_PDU_ENTRIES 32 +#define INVALID_ENTRY 0xFFFF +#define MAX_CMDS_TO_RISC 1024 +#define MAX_SRBS MAX_CMDS_TO_RISC +#define MBOX_AEN_REG_COUNT 8 +#define MAX_INIT_RETRIES 5 + +/* + * Buffer sizes + */ +#define REQUEST_QUEUE_DEPTH MAX_CMDS_TO_RISC +#define RESPONSE_QUEUE_DEPTH 64 +#define QUEUE_SIZE 64 +#define DMA_BUFFER_SIZE 512 +#define IOCB_HIWAT_CUSHION 4 + +/* + * Misc + */ +#define MAC_ADDR_LEN 6 /* in bytes */ +#define IP_ADDR_LEN 4 /* in bytes */ +#define IPv6_ADDR_LEN 16 /* IPv6 address size */ +#define DRIVER_NAME "qla4xxx" + +#define MAX_LINKED_CMDS_PER_LUN 3 +#define MAX_REQS_SERVICED_PER_INTR 1 + +#define ISCSI_IPADDR_SIZE 4 /* IP address size */ +#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */ +#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */ + +#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */ + /* recovery timeout */ + +#define LSDW(x) ((u32)((u64)(x))) +#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) + +#define DEV_DB_NON_PERSISTENT 0 +#define DEV_DB_PERSISTENT 1 + +#define QL4_ISP_REG_DISCONNECT 0xffffffffU + +#define COPY_ISID(dst_isid, src_isid) { \ + int i, j; \ + for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ + dst_isid[i++] = src_isid[j--]; \ +} + +#define SET_BITVAL(o, n, v) { \ + if (o) \ + n |= v; \ + else \ + n &= ~v; \ +} + +#define OP_STATE(o, f, p) { \ + p = (o & f) ? "enable" : "disable"; \ +} + +/* + * Retry & Timeout Values + */ +#define MBOX_TOV 60 +#define SOFT_RESET_TOV 30 +#define RESET_INTR_TOV 3 +#define SEMAPHORE_TOV 10 +#define ADAPTER_INIT_TOV 30 +#define ADAPTER_RESET_TOV 180 +#define EXTEND_CMD_TOV 60 +#define WAIT_CMD_TOV 5 +#define EH_WAIT_CMD_TOV 120 +#define FIRMWARE_UP_TOV 60 +#define RESET_FIRMWARE_TOV 30 +#define LOGOUT_TOV 10 +#define IOCB_TOV_MARGIN 10 +#define RELOGIN_TOV 18 +#define ISNS_DEREG_TOV 5 +#define HBA_ONLINE_TOV 30 +#define DISABLE_ACB_TOV 30 +#define IP_CONFIG_TOV 30 +#define LOGIN_TOV 12 +#define BOOT_LOGIN_RESP_TOV 60 + +#define MAX_RESET_HA_RETRIES 2 +#define FW_ALIVE_WAIT_TOV 3 +#define IDC_EXTEND_TOV 8 +#define IDC_COMP_TOV 5 +#define LINK_UP_COMP_TOV 30 + +/* + * Note: the data structure below does not have a struct iscsi_cmd member since + * the qla4xxx driver does not use libiscsi for SCSI I/O. + */ +struct qla4xxx_cmd_priv { + struct srb *srb; +}; + +static inline struct qla4xxx_cmd_priv *qla4xxx_cmd_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +/* + * SCSI Request Block structure (srb) that is associated with each scsi_cmnd. + */ +struct srb { + struct list_head list; /* (8) */ + struct scsi_qla_host *ha; /* HA the SP is queued on */ + struct ddb_entry *ddb; + uint16_t flags; /* (1) Status flags. */ + +#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */ +#define SRB_GOT_SENSE BIT_4 /* sense data received. */ + uint8_t state; /* (1) Status flags. */ + +#define SRB_NO_QUEUE_STATE 0 /* Request is in between states */ +#define SRB_FREE_STATE 1 +#define SRB_ACTIVE_STATE 3 +#define SRB_ACTIVE_TIMEOUT_STATE 4 +#define SRB_SUSPENDED_STATE 7 /* Request in suspended state */ + + struct scsi_cmnd *cmd; /* (4) SCSI command block */ + dma_addr_t dma_handle; /* (4) for unmap of single transfers */ + struct kref srb_ref; /* reference count for this srb */ + uint8_t err_id; /* error id */ +#define SRB_ERR_PORT 1 /* Request failed because "port down" */ +#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */ +#define SRB_ERR_DEVICE 3 /* Request failed because "device error" */ +#define SRB_ERR_OTHER 4 + + uint16_t reserved; + uint16_t iocb_tov; + uint16_t iocb_cnt; /* Number of used iocbs */ + uint16_t cc_stat; + + /* Used for extended sense / status continuation */ + uint8_t *req_sense_ptr; + uint16_t req_sense_len; + uint16_t reserved2; +}; + +/* Mailbox request block structure */ +struct mrb { + struct scsi_qla_host *ha; + struct mbox_cmd_iocb *mbox; + uint32_t mbox_cmd; + uint16_t iocb_cnt; /* Number of used iocbs */ + uint32_t pid; +}; + +/* + * Asynchronous Event Queue structure + */ +struct aen { + uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; +}; + +struct ql4_aen_log { + int count; + struct aen entry[MAX_AEN_ENTRIES]; +}; + +/* + * Device Database (DDB) structure + */ +struct ddb_entry { + struct scsi_qla_host *ha; + struct iscsi_cls_session *sess; + struct iscsi_cls_conn *conn; + + uint16_t fw_ddb_index; /* DDB firmware index */ + uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ + uint16_t ddb_type; +#define FLASH_DDB 0x01 + + struct dev_db_entry fw_ddb_entry; + int (*unblock_sess)(struct iscsi_cls_session *cls_session); + int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + struct ddb_entry *ddb_entry, uint32_t state); + + /* Driver Re-login */ + unsigned long flags; /* DDB Flags */ +#define DDB_CONN_CLOSE_FAILURE 0 /* 0x00000001 */ + + uint16_t default_relogin_timeout; /* Max time to wait for + * relogin to complete */ + atomic_t retry_relogin_timer; /* Min Time between relogins + * (4000 only) */ + atomic_t relogin_timer; /* Max Time to wait for + * relogin to complete */ + atomic_t relogin_retry_count; /* Num of times relogin has been + * retried */ + uint32_t default_time2wait; /* Default Min time between + * relogins (+aens) */ + uint16_t chap_tbl_idx; +}; + +struct qla_ddb_index { + struct list_head list; + uint16_t fw_ddb_idx; + uint16_t flash_ddb_idx; + struct dev_db_entry fw_ddb; + uint8_t flash_isid[6]; +}; + +#define DDB_IPADDR_LEN 64 + +struct ql4_tuple_ddb { + int port; + int tpgt; + char ip_addr[DDB_IPADDR_LEN]; + char iscsi_name[ISCSI_NAME_SIZE]; + uint16_t options; +#define DDB_OPT_IPV6 0x0e0e +#define DDB_OPT_IPV4 0x0f0f + uint8_t isid[6]; +}; + +/* + * DDB states. + */ +#define DDB_STATE_DEAD 0 /* We can no longer talk to + * this device */ +#define DDB_STATE_ONLINE 1 /* Device ready to accept + * commands */ +#define DDB_STATE_MISSING 2 /* Device logged off, trying + * to re-login */ + +/* + * DDB flags. + */ +#define DF_RELOGIN 0 /* Relogin to device */ +#define DF_BOOT_TGT 1 /* Boot target entry */ +#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ +#define DF_FO_MASKED 3 +#define DF_DISABLE_RELOGIN 4 /* Disable relogin to device */ + +enum qla4_work_type { + QLA4_EVENT_AEN, + QLA4_EVENT_PING_STATUS, +}; + +struct qla4_work_evt { + struct list_head list; + enum qla4_work_type type; + union { + struct { + enum iscsi_host_event_code code; + uint32_t data_size; + uint8_t data[]; + } aen; + struct { + uint32_t status; + uint32_t pid; + uint32_t data_size; + uint8_t data[]; + } ping; + } u; +}; + +struct ql82xx_hw_data { + /* Offsets for flash/nvram access (set to ~0 if not used). */ + uint32_t flash_conf_off; + uint32_t flash_data_off; + + uint32_t fdt_wrt_disable; + uint32_t fdt_erase_cmd; + uint32_t fdt_block_size; + uint32_t fdt_unprotect_sec_cmd; + uint32_t fdt_protect_sec_cmd; + + uint32_t flt_region_flt; + uint32_t flt_region_fdt; + uint32_t flt_region_boot; + uint32_t flt_region_bootload; + uint32_t flt_region_fw; + + uint32_t flt_iscsi_param; + uint32_t flt_region_chap; + uint32_t flt_chap_size; + uint32_t flt_region_ddb; + uint32_t flt_ddb_size; +}; + +struct qla4_8xxx_legacy_intr_set { + uint32_t int_vec_bit; + uint32_t tgt_status_reg; + uint32_t tgt_mask_reg; + uint32_t pci_int_reg; +}; + +/* MSI-X Support */ +#define QLA_MSIX_ENTRIES 2 + +/* + * ISP Operations + */ +struct isp_operations { + int (*iospace_config) (struct scsi_qla_host *ha); + void (*pci_config) (struct scsi_qla_host *); + void (*disable_intrs) (struct scsi_qla_host *); + void (*enable_intrs) (struct scsi_qla_host *); + int (*start_firmware) (struct scsi_qla_host *); + int (*restart_firmware) (struct scsi_qla_host *); + irqreturn_t (*intr_handler) (int , void *); + void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t); + int (*need_reset) (struct scsi_qla_host *); + int (*reset_chip) (struct scsi_qla_host *); + int (*reset_firmware) (struct scsi_qla_host *); + void (*queue_iocb) (struct scsi_qla_host *); + void (*complete_iocb) (struct scsi_qla_host *); + uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *); + uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *); + int (*get_sys_info) (struct scsi_qla_host *); + uint32_t (*rd_reg_direct) (struct scsi_qla_host *, ulong); + void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t); + int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *); + int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t); + int (*idc_lock) (struct scsi_qla_host *); /* Context: task, can sleep */ + void (*idc_unlock) (struct scsi_qla_host *); + void (*rom_lock_recovery) (struct scsi_qla_host *); /* Context: task, can sleep */ + void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int); + void (*process_mailbox_interrupt) (struct scsi_qla_host *, int); +}; + +struct ql4_mdump_size_table { + uint32_t size; + uint32_t size_cmask_02; + uint32_t size_cmask_04; + uint32_t size_cmask_08; + uint32_t size_cmask_10; + uint32_t size_cmask_FF; + uint32_t version; +}; + +/*qla4xxx ipaddress configuration details */ +struct ipaddress_config { + uint16_t ipv4_options; + uint16_t tcp_options; + uint16_t ipv4_vlan_tag; + uint8_t ipv4_addr_state; + uint8_t ip_address[IP_ADDR_LEN]; + uint8_t subnet_mask[IP_ADDR_LEN]; + uint8_t gateway[IP_ADDR_LEN]; + uint32_t ipv6_options; + uint32_t ipv6_addl_options; + uint8_t ipv6_link_local_state; + uint8_t ipv6_addr0_state; + uint8_t ipv6_addr1_state; + uint8_t ipv6_default_router_state; + uint16_t ipv6_vlan_tag; + struct in6_addr ipv6_link_local_addr; + struct in6_addr ipv6_addr0; + struct in6_addr ipv6_addr1; + struct in6_addr ipv6_default_router_addr; + uint16_t eth_mtu_size; + uint16_t ipv4_port; + uint16_t ipv6_port; + uint8_t control; + uint16_t ipv6_tcp_options; + uint8_t tcp_wsf; + uint8_t ipv6_tcp_wsf; + uint8_t ipv4_tos; + uint8_t ipv4_cache_id; + uint8_t ipv6_cache_id; + uint8_t ipv4_alt_cid_len; + uint8_t ipv4_alt_cid[11]; + uint8_t ipv4_vid_len; + uint8_t ipv4_vid[11]; + uint8_t ipv4_ttl; + uint16_t ipv6_flow_lbl; + uint8_t ipv6_traffic_class; + uint8_t ipv6_hop_limit; + uint32_t ipv6_nd_reach_time; + uint32_t ipv6_nd_rexmit_timer; + uint32_t ipv6_nd_stale_timeout; + uint8_t ipv6_dup_addr_detect_count; + uint32_t ipv6_gw_advrt_mtu; + uint16_t def_timeout; + uint8_t abort_timer; + uint16_t iscsi_options; + uint16_t iscsi_max_pdu_size; + uint16_t iscsi_first_burst_len; + uint16_t iscsi_max_outstnd_r2t; + uint16_t iscsi_max_burst_len; + uint8_t iscsi_name[224]; +}; + +#define QL4_CHAP_MAX_NAME_LEN 256 +#define QL4_CHAP_MAX_SECRET_LEN 100 +#define LOCAL_CHAP 0 +#define BIDI_CHAP 1 + +struct ql4_chap_format { + u8 intr_chap_name[QL4_CHAP_MAX_NAME_LEN]; + u8 intr_secret[QL4_CHAP_MAX_SECRET_LEN]; + u8 target_chap_name[QL4_CHAP_MAX_NAME_LEN]; + u8 target_secret[QL4_CHAP_MAX_SECRET_LEN]; + u16 intr_chap_name_length; + u16 intr_secret_length; + u16 target_chap_name_length; + u16 target_secret_length; +}; + +struct ip_address_format { + u8 ip_type; + u8 ip_address[16]; +}; + +struct ql4_conn_info { + u16 dest_port; + struct ip_address_format dest_ipaddr; + struct ql4_chap_format chap; +}; + +struct ql4_boot_session_info { + u8 target_name[224]; + struct ql4_conn_info conn_list[1]; +}; + +struct ql4_boot_tgt_info { + struct ql4_boot_session_info boot_pri_sess; + struct ql4_boot_session_info boot_sec_sess; +}; + +/* + * Linux Host Adapter structure + */ +struct scsi_qla_host { + /* Linux adapter configuration data */ + unsigned long flags; + +#define AF_ONLINE 0 /* 0x00000001 */ +#define AF_INIT_DONE 1 /* 0x00000002 */ +#define AF_MBOX_COMMAND 2 /* 0x00000004 */ +#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ +#define AF_ST_DISCOVERY_IN_PROGRESS 4 /* 0x00000010 */ +#define AF_INTERRUPTS_ON 6 /* 0x00000040 */ +#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ +#define AF_LINK_UP 8 /* 0x00000100 */ +#define AF_LOOPBACK 9 /* 0x00000200 */ +#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ +#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ +#define AF_HA_REMOVAL 12 /* 0x00001000 */ +#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */ +#define AF_FW_RECOVERY 19 /* 0x00080000 */ +#define AF_EEH_BUSY 20 /* 0x00100000 */ +#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ +#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */ +#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */ +#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */ +#define AF_82XX_DUMP_READING 26 /* 0x04000000 */ +#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */ +#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */ + + unsigned long dpc_flags; + +#define DPC_RESET_HA 1 /* 0x00000002 */ +#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */ +#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */ +#define DPC_RESET_HA_FW_CONTEXT 4 /* 0x00000010 */ +#define DPC_RESET_HA_INTR 5 /* 0x00000020 */ +#define DPC_ISNS_RESTART 7 /* 0x00000080 */ +#define DPC_AEN 9 /* 0x00000200 */ +#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ +#define DPC_LINK_CHANGED 18 /* 0x00040000 */ +#define DPC_RESET_ACTIVE 20 /* 0x00100000 */ +#define DPC_HA_UNRECOVERABLE 21 /* 0x00200000 ISP-82xx only*/ +#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/ +#define DPC_POST_IDC_ACK 23 /* 0x00800000 */ +#define DPC_RESTORE_ACB 24 /* 0x01000000 */ +#define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */ + + struct Scsi_Host *host; /* pointer to host data */ + uint32_t tot_ddbs; + + uint16_t iocb_cnt; + uint16_t iocb_hiwat; + + /* SRB cache. */ +#define SRB_MIN_REQ 128 + mempool_t *srb_mempool; + + /* pci information */ + struct pci_dev *pdev; + + struct isp_reg __iomem *reg; /* Base I/O address */ + unsigned long pio_address; + unsigned long pio_length; +#define MIN_IOBASE_LEN 0x100 + + uint16_t req_q_count; + + unsigned long host_no; + + /* NVRAM registers */ + struct eeprom_data *nvram; + spinlock_t hardware_lock ____cacheline_aligned; + uint32_t eeprom_cmd_data; + + /* Counters for general statistics */ + uint64_t isr_count; + uint64_t adapter_error_count; + uint64_t device_error_count; + uint64_t total_io_count; + uint64_t total_mbytes_xferred; + uint64_t link_failure_count; + uint64_t invalid_crc_count; + uint32_t bytes_xfered; + uint32_t spurious_int_count; + uint32_t aborted_io_count; + uint32_t io_timeout_count; + uint32_t mailbox_timeout_count; + uint32_t seconds_since_last_intr; + uint32_t seconds_since_last_heartbeat; + uint32_t mac_index; + + /* Info Needed for Management App */ + /* --- From GetFwVersion --- */ + uint32_t firmware_version[2]; + uint32_t patch_number; + uint32_t build_number; + uint32_t board_id; + + /* --- From Init_FW --- */ + /* init_cb_t *init_cb; */ + uint16_t firmware_options; + uint8_t alias[32]; + uint8_t name_string[256]; + uint8_t heartbeat_interval; + + /* --- From FlashSysInfo --- */ + uint8_t my_mac[MAC_ADDR_LEN]; + uint8_t serial_number[16]; + uint16_t port_num; + /* --- From GetFwState --- */ + uint32_t firmware_state; + uint32_t addl_fw_state; + + /* Linux kernel thread */ + struct workqueue_struct *dpc_thread; + struct work_struct dpc_work; + + /* Linux timer thread */ + struct timer_list timer; + uint32_t timer_active; + + /* Recovery Timers */ + atomic_t check_relogin_timeouts; + uint32_t retry_reset_ha_cnt; + uint32_t isp_reset_timer; /* reset test timer */ + uint32_t nic_reset_timer; /* simulated nic reset test timer */ + int eh_start; + struct list_head free_srb_q; + uint16_t free_srb_q_count; + uint16_t num_srbs_allocated; + + /* DMA Memory Block */ + void *queues; + dma_addr_t queues_dma; + unsigned long queues_len; + +#define MEM_ALIGN_VALUE \ + ((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \ + sizeof(struct queue_entry)) + /* request and response queue variables */ + dma_addr_t request_dma; + struct queue_entry *request_ring; + struct queue_entry *request_ptr; + dma_addr_t response_dma; + struct queue_entry *response_ring; + struct queue_entry *response_ptr; + dma_addr_t shadow_regs_dma; + struct shadow_regs *shadow_regs; + uint16_t request_in; /* Current indexes. */ + uint16_t request_out; + uint16_t response_in; + uint16_t response_out; + + /* aen queue variables */ + uint16_t aen_q_count; /* Number of available aen_q entries */ + uint16_t aen_in; /* Current indexes */ + uint16_t aen_out; + struct aen aen_q[MAX_AEN_ENTRIES]; + + struct ql4_aen_log aen_log;/* tracks all aens */ + + /* This mutex protects several threads to do mailbox commands + * concurrently. + */ + struct mutex mbox_sem; + + /* temporary mailbox status registers */ + volatile uint8_t mbox_status_count; + volatile uint32_t mbox_status[MBOX_REG_COUNT]; + + /* FW ddb index map */ + struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; + + /* Saved srb for status continuation entry processing */ + struct srb *status_srb; + + uint8_t acb_version; + + /* qla82xx specific fields */ + struct device_reg_82xx __iomem *qla4_82xx_reg; /* Base I/O address */ + unsigned long nx_pcibase; /* Base I/O address */ + uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */ + unsigned long nx_db_wr_ptr; /* Door bell write pointer */ + unsigned long first_page_group_start; + unsigned long first_page_group_end; + + uint32_t crb_win; + uint32_t curr_window; + uint32_t ddr_mn_window; + unsigned long mn_win_crb; + unsigned long ms_win_crb; + int qdr_sn_window; + rwlock_t hw_lock; + uint16_t func_num; + int link_width; + + struct qla4_8xxx_legacy_intr_set nx_legacy_intr; + u32 nx_crb_mask; + + uint8_t revision_id; + uint32_t fw_heartbeat_counter; + + struct isp_operations *isp_ops; + struct ql82xx_hw_data hw; + + uint32_t nx_dev_init_timeout; + uint32_t nx_reset_timeout; + void *fw_dump; + uint32_t fw_dump_size; + uint32_t fw_dump_capture_mask; + void *fw_dump_tmplt_hdr; + uint32_t fw_dump_tmplt_size; + uint32_t fw_dump_skip_size; + + struct completion mbx_intr_comp; + + struct ipaddress_config ip_config; + struct iscsi_iface *iface_ipv4; + struct iscsi_iface *iface_ipv6_0; + struct iscsi_iface *iface_ipv6_1; + + /* --- From About Firmware --- */ + struct about_fw_info fw_info; + uint32_t fw_uptime_secs; /* seconds elapsed since fw bootup */ + uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */ + uint16_t def_timeout; /* Default login timeout */ + + uint32_t flash_state; +#define QLFLASH_WAITING 0 +#define QLFLASH_READING 1 +#define QLFLASH_WRITING 2 + struct dma_pool *chap_dma_pool; + uint8_t *chap_list; /* CHAP table cache */ + struct mutex chap_sem; + +#define CHAP_DMA_BLOCK_SIZE 512 + struct workqueue_struct *task_wq; + unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG]; +#define SYSFS_FLAG_FW_SEL_BOOT 2 + struct iscsi_boot_kset *boot_kset; + struct ql4_boot_tgt_info boot_tgt; + uint16_t phy_port_num; + uint16_t phy_port_cnt; + uint16_t iscsi_pci_func_cnt; + uint8_t model_name[16]; + struct completion disable_acb_comp; + struct dma_pool *fw_ddb_dma_pool; +#define DDB_DMA_BLOCK_SIZE 512 + uint16_t pri_ddb_idx; + uint16_t sec_ddb_idx; + int is_reset; + uint16_t temperature; + + /* event work list */ + struct list_head work_list; + spinlock_t work_lock; + + /* mbox iocb */ +#define MAX_MRB 128 + struct mrb *active_mrb_array[MAX_MRB]; + uint32_t mrb_index; + + uint32_t *reg_tbl; + struct qla4_83xx_reset_template reset_tmplt; + struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address + for ISP8324 and + and ISP8042 */ + uint32_t pf_bit; + struct qla4_83xx_idc_information idc_info; + struct addr_ctrl_blk *saved_acb; + int notify_idc_comp; + int notify_link_up_comp; + int idc_extend_tmo; + struct completion idc_comp; + struct completion link_up_comp; +}; + +struct ql4_task_data { + struct scsi_qla_host *ha; + uint8_t iocb_req_cnt; + dma_addr_t data_dma; + void *req_buffer; + dma_addr_t req_dma; + uint32_t req_len; + void *resp_buffer; + dma_addr_t resp_dma; + uint32_t resp_len; + struct iscsi_task *task; + struct passthru_status sts; + struct work_struct task_work; +}; + +struct qla_endpoint { + struct Scsi_Host *host; + struct sockaddr_storage dst_addr; +}; + +struct qla_conn { + struct qla_endpoint *qla_ep; +}; + +static inline int is_ipv4_enabled(struct scsi_qla_host *ha) +{ + return ((ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) != 0); +} + +static inline int is_ipv6_enabled(struct scsi_qla_host *ha) +{ + return ((ha->ip_config.ipv6_options & + IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0); +} + +static inline int is_qla4010(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010; +} + +static inline int is_qla4022(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022; +} + +static inline int is_qla4032(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032; +} + +static inline int is_qla40XX(struct scsi_qla_host *ha) +{ + return is_qla4032(ha) || is_qla4022(ha) || is_qla4010(ha); +} + +static inline int is_qla8022(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; +} + +static inline int is_qla8032(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324; +} + +static inline int is_qla8042(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042; +} + +static inline int is_qla80XX(struct scsi_qla_host *ha) +{ + return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha); +} + +static inline int is_aer_supported(struct scsi_qla_host *ha) +{ + return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) || + (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) || + (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042)); +} + +static inline int adapter_up(struct scsi_qla_host *ha) +{ + return (test_bit(AF_ONLINE, &ha->flags) != 0) && + (test_bit(AF_LINK_UP, &ha->flags) != 0) && + (!test_bit(AF_LOOPBACK, &ha->flags)); +} + +static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost) +{ + return (struct scsi_qla_host *)iscsi_host_priv(shost); +} + +static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + &ha->reg->u1.isp4010.nvram : + &ha->reg->u1.isp4022.semaphore); +} + +static inline void __iomem* isp_nvram(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + &ha->reg->u1.isp4010.nvram : + &ha->reg->u1.isp4022.nvram); +} + +static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + &ha->reg->u2.isp4010.ext_hw_conf : + &ha->reg->u2.isp4022.p0.ext_hw_conf); +} + +static inline void __iomem* isp_port_status(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + &ha->reg->u2.isp4010.port_status : + &ha->reg->u2.isp4022.p0.port_status); +} + +static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + &ha->reg->u2.isp4010.port_ctrl : + &ha->reg->u2.isp4022.p0.port_ctrl); +} + +static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + &ha->reg->u2.isp4010.port_err_status : + &ha->reg->u2.isp4022.p0.port_err_status); +} + +static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + &ha->reg->u2.isp4010.gp_out : + &ha->reg->u2.isp4022.p0.gp_out); +} + +static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha) +{ + return (is_qla4010(ha) ? + offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 : + offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2); +} + +int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits); +void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask); +int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits); + +static inline int ql4xxx_lock_flash(struct scsi_qla_host *a) +{ + if (is_qla4010(a)) + return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK, + QL4010_FLASH_SEM_BITS); + else + return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK, + (QL4022_RESOURCE_BITS_BASE_CODE | + (a->mac_index)) << 13); +} + +static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a) +{ + if (is_qla4010(a)) + ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK); + else + ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK); +} + +static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a) +{ + if (is_qla4010(a)) + return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK, + QL4010_NVRAM_SEM_BITS); + else + return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK, + (QL4022_RESOURCE_BITS_BASE_CODE | + (a->mac_index)) << 10); +} + +static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a) +{ + if (is_qla4010(a)) + ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK); + else + ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK); +} + +static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a) +{ + if (is_qla4010(a)) + return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK, + QL4010_DRVR_SEM_BITS); + else + return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK, + (QL4022_RESOURCE_BITS_BASE_CODE | + (a->mac_index)) << 1); +} + +static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) +{ + if (is_qla4010(a)) + ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK); + else + ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK); +} + +static inline int ql4xxx_reset_active(struct scsi_qla_host *ha) +{ + return test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || + test_bit(DPC_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || + test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); + +} + +static inline int qla4_8xxx_rd_direct(struct scsi_qla_host *ha, + const uint32_t crb_reg) +{ + return ha->isp_ops->rd_reg_direct(ha, ha->reg_tbl[crb_reg]); +} + +static inline void qla4_8xxx_wr_direct(struct scsi_qla_host *ha, + const uint32_t crb_reg, + const uint32_t value) +{ + ha->isp_ops->wr_reg_direct(ha, ha->reg_tbl[crb_reg], value); +} + +/*---------------------------------------------------------------------------*/ + +/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */ + +#define INIT_ADAPTER 0 +#define RESET_ADAPTER 1 + +#define PRESERVE_DDB_LIST 0 +#define REBUILD_DDB_LIST 1 + +/* Defines for process_aen() */ +#define PROCESS_ALL_AENS 0 +#define FLUSH_DDB_CHANGED_AENS 1 + +/* Defines for udev events */ +#define QL4_UEVENT_CODE_FW_DUMP 0 + +#endif /*_QLA4XXX_H */ diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h new file mode 100644 index 000000000..860ec61b5 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -0,0 +1,1441 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#ifndef _QLA4X_FW_H +#define _QLA4X_FW_H + + +#define MAX_PRST_DEV_DB_ENTRIES 64 +#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES +#define MAX_DEV_DB_ENTRIES 512 +#define MAX_DEV_DB_ENTRIES_40XX 256 + +/************************************************************************* + * + * ISP 4010 I/O Register Set Structure and Definitions + * + *************************************************************************/ + +struct port_ctrl_stat_regs { + __le32 ext_hw_conf; /* 0x50 R/W */ + __le32 rsrvd0; /* 0x54 */ + __le32 port_ctrl; /* 0x58 */ + __le32 port_status; /* 0x5c */ + __le32 rsrvd1[32]; /* 0x60-0xdf */ + __le32 gp_out; /* 0xe0 */ + __le32 gp_in; /* 0xe4 */ + __le32 rsrvd2[5]; /* 0xe8-0xfb */ + __le32 port_err_status; /* 0xfc */ +}; + +struct host_mem_cfg_regs { + __le32 rsrvd0[12]; /* 0x50-0x79 */ + __le32 req_q_out; /* 0x80 */ + __le32 rsrvd1[31]; /* 0x84-0xFF */ +}; + +/* + * ISP 82xx I/O Register Set structure definitions. + */ +struct device_reg_82xx { + __le32 req_q_out; /* 0x0000 (R): Request Queue out-Pointer. */ + __le32 reserve1[63]; /* Request Queue out-Pointer. (64 * 4) */ + __le32 rsp_q_in; /* 0x0100 (R/W): Response Queue In-Pointer. */ + __le32 reserve2[63]; /* Response Queue In-Pointer. */ + __le32 rsp_q_out; /* 0x0200 (R/W): Response Queue Out-Pointer. */ + __le32 reserve3[63]; /* Response Queue Out-Pointer. */ + + __le32 mailbox_in[8]; /* 0x0300 (R/W): Mail box In registers */ + __le32 reserve4[24]; + __le32 hint; /* 0x0380 (R/W): Host interrupt register */ +#define HINT_MBX_INT_PENDING BIT_0 + __le32 reserve5[31]; + __le32 mailbox_out[8]; /* 0x0400 (R): Mail box Out registers */ + __le32 reserve6[56]; + + __le32 host_status; /* Offset 0x500 (R): host status */ +#define HSRX_RISC_MB_INT BIT_0 /* RISC to Host Mailbox interrupt */ +#define HSRX_RISC_IOCB_INT BIT_1 /* RISC to Host IOCB interrupt */ + + __le32 host_int; /* Offset 0x0504 (R/W): Interrupt status. */ +#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */ +}; + +/* ISP 83xx I/O Register Set structure */ +struct device_reg_83xx { + __le32 mailbox_in[16]; /* 0x0000 */ + __le32 reserve1[496]; /* 0x0040 */ + __le32 mailbox_out[16]; /* 0x0800 */ + __le32 reserve2[496]; + __le32 mbox_int; /* 0x1000 */ + __le32 reserve3[63]; + __le32 req_q_out; /* 0x1100 */ + __le32 reserve4[63]; + + __le32 rsp_q_in; /* 0x1200 */ + __le32 reserve5[1919]; + + __le32 req_q_in; /* 0x3000 */ + __le32 reserve6[3]; + __le32 iocb_int_mask; /* 0x3010 */ + __le32 reserve7[3]; + __le32 rsp_q_out; /* 0x3020 */ + __le32 reserve8[3]; + __le32 anonymousbuff; /* 0x3030 */ + __le32 mb_int_mask; /* 0x3034 */ + + __le32 host_intr; /* 0x3038 - Host Interrupt Register */ + __le32 risc_intr; /* 0x303C - RISC Interrupt Register */ + __le32 reserve9[544]; + __le32 leg_int_ptr; /* 0x38C0 - Legacy Interrupt Pointer Register */ + __le32 leg_int_trig; /* 0x38C4 - Legacy Interrupt Trigger Control */ + __le32 leg_int_mask; /* 0x38C8 - Legacy Interrupt Mask Register */ +}; + +#define INT_ENABLE_FW_MB (1 << 2) +#define INT_MASK_FW_MB (1 << 2) + +/* remote register set (access via PCI memory read/write) */ +struct isp_reg { +#define MBOX_REG_COUNT 8 + __le32 mailbox[MBOX_REG_COUNT]; + + __le32 flash_address; /* 0x20 */ + __le32 flash_data; + __le32 ctrl_status; + + union { + struct { + __le32 nvram; + __le32 reserved1[2]; /* 0x30 */ + } __attribute__ ((packed)) isp4010; + struct { + __le32 intr_mask; + __le32 nvram; /* 0x30 */ + __le32 semaphore; + } __attribute__ ((packed)) isp4022; + } u1; + + __le32 req_q_in; /* SCSI Request Queue Producer Index */ + __le32 rsp_q_out; /* SCSI Completion Queue Consumer Index */ + + __le32 reserved2[4]; /* 0x40 */ + + union { + struct { + __le32 ext_hw_conf; /* 0x50 */ + __le32 flow_ctrl; + __le32 port_ctrl; + __le32 port_status; + + __le32 reserved3[8]; /* 0x60 */ + + __le32 req_q_out; /* 0x80 */ + + __le32 reserved4[23]; /* 0x84 */ + + __le32 gp_out; /* 0xe0 */ + __le32 gp_in; + + __le32 reserved5[5]; + + __le32 port_err_status; /* 0xfc */ + } __attribute__ ((packed)) isp4010; + struct { + union { + struct port_ctrl_stat_regs p0; + struct host_mem_cfg_regs p1; + }; + } __attribute__ ((packed)) isp4022; + } u2; +}; /* 256 x100 */ + + +/* Semaphore Defines for 4010 */ +#define QL4010_DRVR_SEM_BITS 0x00000030 +#define QL4010_GPIO_SEM_BITS 0x000000c0 +#define QL4010_SDRAM_SEM_BITS 0x00000300 +#define QL4010_PHY_SEM_BITS 0x00000c00 +#define QL4010_NVRAM_SEM_BITS 0x00003000 +#define QL4010_FLASH_SEM_BITS 0x0000c000 + +#define QL4010_DRVR_SEM_MASK 0x00300000 +#define QL4010_GPIO_SEM_MASK 0x00c00000 +#define QL4010_SDRAM_SEM_MASK 0x03000000 +#define QL4010_PHY_SEM_MASK 0x0c000000 +#define QL4010_NVRAM_SEM_MASK 0x30000000 +#define QL4010_FLASH_SEM_MASK 0xc0000000 + +/* Semaphore Defines for 4022 */ +#define QL4022_RESOURCE_MASK_BASE_CODE 0x7 +#define QL4022_RESOURCE_BITS_BASE_CODE 0x4 + + +#define QL4022_DRVR_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (1+16)) +#define QL4022_DDR_RAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (4+16)) +#define QL4022_PHY_GIO_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (7+16)) +#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16)) +#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16)) + +/* nvram address for 4032 */ +#define NVRAM_PORT0_BOOT_MODE 0x03b1 +#define NVRAM_PORT0_BOOT_PRI_TGT 0x03b2 +#define NVRAM_PORT0_BOOT_SEC_TGT 0x03bb +#define NVRAM_PORT1_BOOT_MODE 0x07b1 +#define NVRAM_PORT1_BOOT_PRI_TGT 0x07b2 +#define NVRAM_PORT1_BOOT_SEC_TGT 0x07bb + + +/* Page # defines for 4022 */ +#define PORT_CTRL_STAT_PAGE 0 /* 4022 */ +#define HOST_MEM_CFG_PAGE 1 /* 4022 */ +#define LOCAL_RAM_CFG_PAGE 2 /* 4022 */ +#define PROT_STAT_PAGE 3 /* 4022 */ + +/* Register Mask - sets corresponding mask bits in the upper word */ +static inline uint32_t set_rmask(uint32_t val) +{ + return (val & 0xffff) | (val << 16); +} + + +static inline uint32_t clr_rmask(uint32_t val) +{ + return 0 | (val << 16); +} + +/* ctrl_status definitions */ +#define CSR_SCSI_PAGE_SELECT 0x00000003 +#define CSR_SCSI_INTR_ENABLE 0x00000004 /* 4010 */ +#define CSR_SCSI_RESET_INTR 0x00000008 +#define CSR_SCSI_COMPLETION_INTR 0x00000010 +#define CSR_SCSI_PROCESSOR_INTR 0x00000020 +#define CSR_INTR_RISC 0x00000040 +#define CSR_BOOT_ENABLE 0x00000080 +#define CSR_NET_PAGE_SELECT 0x00000300 /* 4010 */ +#define CSR_FUNC_NUM 0x00000700 /* 4022 */ +#define CSR_NET_RESET_INTR 0x00000800 /* 4010 */ +#define CSR_FORCE_SOFT_RESET 0x00002000 /* 4022 */ +#define CSR_FATAL_ERROR 0x00004000 +#define CSR_SOFT_RESET 0x00008000 +#define ISP_CONTROL_FN_MASK CSR_FUNC_NUM +#define ISP_CONTROL_FN0_SCSI 0x0500 +#define ISP_CONTROL_FN1_SCSI 0x0700 + +#define INTR_PENDING (CSR_SCSI_COMPLETION_INTR |\ + CSR_SCSI_PROCESSOR_INTR |\ + CSR_SCSI_RESET_INTR) + +/* ISP InterruptMask definitions */ +#define IMR_SCSI_INTR_ENABLE 0x00000004 /* 4022 */ + +/* ISP 4022 nvram definitions */ +#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */ + +#define QL4010_NVRAM_SIZE 0x200 +#define QL40X2_NVRAM_SIZE 0x800 + +/* ISP port_status definitions */ + +/* ISP Semaphore definitions */ + +/* ISP General Purpose Output definitions */ +#define GPOR_TOPCAT_RESET 0x00000004 + +/* shadow registers (DMA'd from HA to system memory. read only) */ +struct shadow_regs { + /* SCSI Request Queue Consumer Index */ + __le32 req_q_out; /* 0 x0 R */ + + /* SCSI Completion Queue Producer Index */ + __le32 rsp_q_in; /* 4 x4 R */ +}; /* 8 x8 */ + + +/* External hardware configuration register */ +union external_hw_config_reg { + struct { + /* FIXME: Do we even need this? All values are + * referred to by 16 bit quantities. Platform and + * endianess issues. */ + __le32 bReserved0:1; + __le32 bSDRAMProtectionMethod:2; + __le32 bSDRAMBanks:1; + __le32 bSDRAMChipWidth:1; + __le32 bSDRAMChipSize:2; + __le32 bParityDisable:1; + __le32 bExternalMemoryType:1; + __le32 bFlashBIOSWriteEnable:1; + __le32 bFlashUpperBankSelect:1; + __le32 bWriteBurst:2; + __le32 bReserved1:3; + __le32 bMask:16; + }; + uint32_t Asuint32_t; +}; + +/* 82XX Support start */ +/* 82xx Default FLT Addresses */ +#define FA_FLASH_LAYOUT_ADDR_82 0xFC400 +#define FA_FLASH_DESCR_ADDR_82 0xFC000 +#define FA_BOOT_LOAD_ADDR_82 0x04000 +#define FA_BOOT_CODE_ADDR_82 0x20000 +#define FA_RISC_CODE_ADDR_82 0x40000 +#define FA_GOLD_RISC_CODE_ADDR_82 0x80000 +#define FA_FLASH_ISCSI_CHAP 0x540000 +#define FA_FLASH_CHAP_SIZE 0xC0000 +#define FA_FLASH_ISCSI_DDB 0x420000 +#define FA_FLASH_DDB_SIZE 0x080000 + +/* Flash Description Table */ +struct qla_fdt_layout { + uint8_t sig[4]; + uint16_t version; + uint16_t len; + uint16_t checksum; + uint8_t unused1[2]; + uint8_t model[16]; + uint16_t man_id; + uint16_t id; + uint8_t flags; + uint8_t erase_cmd; + uint8_t alt_erase_cmd; + uint8_t wrt_enable_cmd; + uint8_t wrt_enable_bits; + uint8_t wrt_sts_reg_cmd; + uint8_t unprotect_sec_cmd; + uint8_t read_man_id_cmd; + uint32_t block_size; + uint32_t alt_block_size; + uint32_t flash_size; + uint32_t wrt_enable_data; + uint8_t read_id_addr_len; + uint8_t wrt_disable_bits; + uint8_t read_dev_id_len; + uint8_t chip_erase_cmd; + uint16_t read_timeout; + uint8_t protect_sec_cmd; + uint8_t unused2[65]; +}; + +/* Flash Layout Table */ + +struct qla_flt_location { + uint8_t sig[4]; + uint16_t start_lo; + uint16_t start_hi; + uint8_t version; + uint8_t unused[5]; + uint16_t checksum; +}; + +struct qla_flt_header { + uint16_t version; + uint16_t length; + uint16_t checksum; + uint16_t unused; +}; + +/* 82xx FLT Regions */ +#define FLT_REG_FDT 0x1a +#define FLT_REG_FLT 0x1c +#define FLT_REG_BOOTLOAD_82 0x72 +#define FLT_REG_FW_82 0x74 +#define FLT_REG_FW_82_1 0x97 +#define FLT_REG_GOLD_FW_82 0x75 +#define FLT_REG_BOOT_CODE_82 0x78 +#define FLT_REG_ISCSI_PARAM 0x65 +#define FLT_REG_ISCSI_CHAP 0x63 +#define FLT_REG_ISCSI_DDB 0x6A + +struct qla_flt_region { + uint32_t code; + uint32_t size; + uint32_t start; + uint32_t end; +}; + +/************************************************************************* + * + * Mailbox Commands Structures and Definitions + * + *************************************************************************/ + +/* Mailbox command definitions */ +#define MBOX_CMD_ABOUT_FW 0x0009 +#define MBOX_CMD_PING 0x000B +#define PING_IPV6_PROTOCOL_ENABLE 0x1 +#define PING_IPV6_LINKLOCAL_ADDR 0x4 +#define PING_IPV6_ADDR0 0x8 +#define PING_IPV6_ADDR1 0xC +#define MBOX_CMD_ENABLE_INTRS 0x0010 +#define INTR_DISABLE 0 +#define INTR_ENABLE 1 +#define MBOX_CMD_STOP_FW 0x0014 +#define MBOX_CMD_ABORT_TASK 0x0015 +#define MBOX_CMD_LUN_RESET 0x0016 +#define MBOX_CMD_TARGET_WARM_RESET 0x0017 +#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E +#define MBOX_CMD_GET_FW_STATUS 0x001F +#define MBOX_CMD_SET_ISNS_SERVICE 0x0021 +#define ISNS_DISABLE 0 +#define ISNS_ENABLE 1 +#define MBOX_CMD_COPY_FLASH 0x0024 +#define MBOX_CMD_WRITE_FLASH 0x0025 +#define MBOX_CMD_READ_FLASH 0x0026 +#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 +#define MBOX_CMD_CONN_OPEN 0x0074 +#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 +#define DDB_NOT_LOGGED_IN 0x09 +#define LOGOUT_OPTION_CLOSE_SESSION 0x0002 +#define LOGOUT_OPTION_RELOGIN 0x0004 +#define LOGOUT_OPTION_FREE_DDB 0x0008 +#define MBOX_CMD_SET_PARAM 0x0059 +#define SET_DRVR_VERSION 0x200 +#define MAX_DRVR_VER_LEN 24 +#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A +#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060 +#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061 +#define MBOX_CMD_REQUEST_DATABASE_ENTRY 0x0062 +#define MBOX_CMD_SET_DATABASE_ENTRY 0x0063 +#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064 +#define DDB_DS_UNASSIGNED 0x00 +#define DDB_DS_NO_CONNECTION_ACTIVE 0x01 +#define DDB_DS_DISCOVERY 0x02 +#define DDB_DS_SESSION_ACTIVE 0x04 +#define DDB_DS_SESSION_FAILED 0x06 +#define DDB_DS_LOGIN_IN_PROCESS 0x07 +#define MBOX_CMD_GET_FW_STATE 0x0069 +#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A +#define MBOX_CMD_DIAG_TEST 0x0075 +#define MBOX_CMD_GET_SYS_INFO 0x0078 +#define MBOX_CMD_GET_NVRAM 0x0078 /* For 40xx */ +#define MBOX_CMD_SET_NVRAM 0x0079 /* For 40xx */ +#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087 +#define MBOX_CMD_SET_ACB 0x0088 +#define MBOX_CMD_GET_ACB 0x0089 +#define MBOX_CMD_DISABLE_ACB 0x008A +#define MBOX_CMD_GET_IPV6_NEIGHBOR_CACHE 0x008B +#define MBOX_CMD_GET_IPV6_DEST_CACHE 0x008C +#define MBOX_CMD_GET_IPV6_DEF_ROUTER_LIST 0x008D +#define MBOX_CMD_GET_IPV6_LCL_PREFIX_LIST 0x008E +#define MBOX_CMD_SET_IPV6_NEIGHBOR_CACHE 0x0090 +#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091 +#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092 +#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093 +#define MBOX_CMD_SET_PORT_CONFIG 0x0122 +#define MBOX_CMD_GET_PORT_CONFIG 0x0123 +#define MBOX_CMD_SET_LED_CONFIG 0x0125 +#define MBOX_CMD_GET_LED_CONFIG 0x0126 +#define MBOX_CMD_MINIDUMP 0x0129 + +/* Port Config */ +#define ENABLE_INTERNAL_LOOPBACK 0x04 +#define ENABLE_EXTERNAL_LOOPBACK 0x08 +#define ENABLE_DCBX 0x10 + +/* Minidump subcommand */ +#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00 +#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01 + +/* Mailbox 1 */ +#define FW_STATE_READY 0x0000 +#define FW_STATE_CONFIG_WAIT 0x0001 +#define FW_STATE_WAIT_AUTOCONNECT 0x0002 +#define FW_STATE_ERROR 0x0004 +#define FW_STATE_CONFIGURING_IP 0x0008 + +/* Mailbox 3 */ +#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001 +#define FW_ADDSTATE_DHCPv4_ENABLED 0x0002 +#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED 0x0004 +#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008 +#define FW_ADDSTATE_LINK_UP 0x0010 +#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020 +#define FW_ADDSTATE_LINK_SPEED_10MBPS 0x0100 +#define FW_ADDSTATE_LINK_SPEED_100MBPS 0x0200 +#define FW_ADDSTATE_LINK_SPEED_1GBPS 0x0400 +#define FW_ADDSTATE_LINK_SPEED_10GBPS 0x0800 + +#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B +#define IPV6_DEFAULT_DDB_ENTRY 0x0001 + +#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074 +#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */ +#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 + +#define MBOX_CMD_IDC_ACK 0x0101 +#define MBOX_CMD_IDC_TIME_EXTEND 0x0102 +#define MBOX_CMD_PORT_RESET 0x0120 +#define MBOX_CMD_SET_PORT_CONFIG 0x0122 + +/* Mailbox status definitions */ +#define MBOX_COMPLETION_STATUS 4 +#define MBOX_STS_BUSY 0x0007 +#define MBOX_STS_INTERMEDIATE_COMPLETION 0x1000 +#define MBOX_STS_COMMAND_COMPLETE 0x4000 +#define MBOX_STS_COMMAND_ERROR 0x4005 + +#define MBOX_ASYNC_EVENT_STATUS 8 +#define MBOX_ASTS_SYSTEM_ERROR 0x8002 +#define MBOX_ASTS_REQUEST_TRANSFER_ERROR 0x8003 +#define MBOX_ASTS_RESPONSE_TRANSFER_ERROR 0x8004 +#define MBOX_ASTS_PROTOCOL_STATISTIC_ALARM 0x8005 +#define MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED 0x8006 +#define MBOX_ASTS_LINK_UP 0x8010 +#define MBOX_ASTS_LINK_DOWN 0x8011 +#define MBOX_ASTS_DATABASE_CHANGED 0x8014 +#define MBOX_ASTS_UNSOLICITED_PDU_RECEIVED 0x8015 +#define MBOX_ASTS_SELF_TEST_FAILED 0x8016 +#define MBOX_ASTS_LOGIN_FAILED 0x8017 +#define MBOX_ASTS_DNS 0x8018 +#define MBOX_ASTS_HEARTBEAT 0x8019 +#define MBOX_ASTS_NVRAM_INVALID 0x801A +#define MBOX_ASTS_MAC_ADDRESS_CHANGED 0x801B +#define MBOX_ASTS_IP_ADDRESS_CHANGED 0x801C +#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D +#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F +#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021 +#define MBOX_ASTS_DUPLICATE_IP 0x8025 +#define MBOX_ASTS_ARP_COMPLETE 0x8026 +#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 +#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028 +#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029 +#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A +#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE 0x802B +#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED 0x802C +#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED 0x802D +#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E +#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031 +#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036 +#define MBOX_ASTS_IDC_COMPLETE 0x8100 +#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101 +#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION 0x8102 +#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110 +#define MBOX_ASTS_TXSCVR_INSERTED 0x8130 +#define MBOX_ASTS_TXSCVR_REMOVED 0x8131 + +#define ISNS_EVENT_DATA_RECEIVED 0x0000 +#define ISNS_EVENT_CONNECTION_OPENED 0x0001 +#define ISNS_EVENT_CONNECTION_FAILED 0x0002 +#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022 +#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 + +/* ACB Configuration Defines */ +#define ACB_CONFIG_DISABLE 0x00 +#define ACB_CONFIG_SET 0x01 + +/* ACB/IP Address State Defines */ +#define IP_ADDRSTATE_UNCONFIGURED 0 +#define IP_ADDRSTATE_INVALID 1 +#define IP_ADDRSTATE_ACQUIRING 2 +#define IP_ADDRSTATE_TENTATIVE 3 +#define IP_ADDRSTATE_DEPRICATED 4 +#define IP_ADDRSTATE_PREFERRED 5 +#define IP_ADDRSTATE_DISABLING 6 + +/* FLASH offsets */ +#define FLASH_SEGMENT_IFCB 0x04000000 + +#define FLASH_OPT_RMW_HOLD 0 +#define FLASH_OPT_RMW_INIT 1 +#define FLASH_OPT_COMMIT 2 +#define FLASH_OPT_RMW_COMMIT 3 + +/* generic defines to enable/disable params */ +#define QL4_PARAM_DISABLE 0 +#define QL4_PARAM_ENABLE 1 + +/*************************************************************************/ + +/* Host Adapter Initialization Control Block (from host) */ +struct addr_ctrl_blk { + uint8_t version; /* 00 */ +#define IFCB_VER_MIN 0x01 +#define IFCB_VER_MAX 0x02 + uint8_t control; /* 01 */ +#define CTRLOPT_NEW_CONN_DISABLE 0x0002 + + uint16_t fw_options; /* 02-03 */ +#define FWOPT_HEARTBEAT_ENABLE 0x1000 +#define FWOPT_SESSION_MODE 0x0040 +#define FWOPT_INITIATOR_MODE 0x0020 +#define FWOPT_TARGET_MODE 0x0010 +#define FWOPT_ENABLE_CRBDB 0x8000 + + uint16_t exec_throttle; /* 04-05 */ + uint8_t zio_count; /* 06 */ + uint8_t res0; /* 07 */ + uint16_t eth_mtu_size; /* 08-09 */ + uint16_t add_fw_options; /* 0A-0B */ +#define ADFWOPT_SERIALIZE_TASK_MGMT 0x0400 +#define ADFWOPT_AUTOCONN_DISABLE 0x0002 + + uint8_t hb_interval; /* 0C */ + uint8_t inst_num; /* 0D */ + uint16_t res1; /* 0E-0F */ + uint16_t rqq_consumer_idx; /* 10-11 */ + uint16_t compq_producer_idx; /* 12-13 */ + uint16_t rqq_len; /* 14-15 */ + uint16_t compq_len; /* 16-17 */ + uint32_t rqq_addr_lo; /* 18-1B */ + uint32_t rqq_addr_hi; /* 1C-1F */ + uint32_t compq_addr_lo; /* 20-23 */ + uint32_t compq_addr_hi; /* 24-27 */ + uint32_t shdwreg_addr_lo; /* 28-2B */ + uint32_t shdwreg_addr_hi; /* 2C-2F */ + + uint16_t iscsi_opts; /* 30-31 */ +#define ISCSIOPTS_HEADER_DIGEST_EN 0x2000 +#define ISCSIOPTS_DATA_DIGEST_EN 0x1000 +#define ISCSIOPTS_IMMEDIATE_DATA_EN 0x0800 +#define ISCSIOPTS_INITIAL_R2T_EN 0x0400 +#define ISCSIOPTS_DATA_SEQ_INORDER_EN 0x0200 +#define ISCSIOPTS_DATA_PDU_INORDER_EN 0x0100 +#define ISCSIOPTS_CHAP_AUTH_EN 0x0080 +#define ISCSIOPTS_SNACK_EN 0x0040 +#define ISCSIOPTS_DISCOVERY_LOGOUT_EN 0x0020 +#define ISCSIOPTS_BIDI_CHAP_EN 0x0010 +#define ISCSIOPTS_DISCOVERY_AUTH_EN 0x0008 +#define ISCSIOPTS_STRICT_LOGIN_COMP_EN 0x0004 +#define ISCSIOPTS_ERL 0x0003 + uint16_t ipv4_tcp_opts; /* 32-33 */ +#define TCPOPT_DELAYED_ACK_DISABLE 0x8000 +#define TCPOPT_DHCP_ENABLE 0x0200 +#define TCPOPT_DNS_SERVER_IP_EN 0x0100 +#define TCPOPT_SLP_DA_INFO_EN 0x0080 +#define TCPOPT_NAGLE_ALGO_DISABLE 0x0020 +#define TCPOPT_WINDOW_SCALE_DISABLE 0x0010 +#define TCPOPT_TIMER_SCALE 0x000E +#define TCPOPT_TIMESTAMP_ENABLE 0x0001 + uint16_t ipv4_ip_opts; /* 34-35 */ +#define IPOPT_IPV4_PROTOCOL_ENABLE 0x8000 +#define IPOPT_IPV4_TOS_EN 0x4000 +#define IPOPT_VLAN_TAGGING_ENABLE 0x2000 +#define IPOPT_GRAT_ARP_EN 0x1000 +#define IPOPT_ALT_CID_EN 0x0800 +#define IPOPT_REQ_VID_EN 0x0400 +#define IPOPT_USE_VID_EN 0x0200 +#define IPOPT_LEARN_IQN_EN 0x0100 +#define IPOPT_FRAGMENTATION_DISABLE 0x0010 +#define IPOPT_IN_FORWARD_EN 0x0008 +#define IPOPT_ARP_REDIRECT_EN 0x0004 + + uint16_t iscsi_max_pdu_size; /* 36-37 */ + uint8_t ipv4_tos; /* 38 */ + uint8_t ipv4_ttl; /* 39 */ + uint8_t acb_version; /* 3A */ +#define ACB_NOT_SUPPORTED 0x00 +#define ACB_SUPPORTED 0x02 /* Capable of ACB Version 2 + Features */ + + uint8_t res2; /* 3B */ + uint16_t def_timeout; /* 3C-3D */ + uint16_t iscsi_fburst_len; /* 3E-3F */ + uint16_t iscsi_def_time2wait; /* 40-41 */ + uint16_t iscsi_def_time2retain; /* 42-43 */ + uint16_t iscsi_max_outstnd_r2t; /* 44-45 */ + uint16_t conn_ka_timeout; /* 46-47 */ + uint16_t ipv4_port; /* 48-49 */ + uint16_t iscsi_max_burst_len; /* 4A-4B */ + uint32_t res5; /* 4C-4F */ + uint8_t ipv4_addr[4]; /* 50-53 */ + uint16_t ipv4_vlan_tag; /* 54-55 */ + uint8_t ipv4_addr_state; /* 56 */ + uint8_t ipv4_cacheid; /* 57 */ + uint8_t res6[8]; /* 58-5F */ + uint8_t ipv4_subnet[4]; /* 60-63 */ + uint8_t res7[12]; /* 64-6F */ + uint8_t ipv4_gw_addr[4]; /* 70-73 */ + uint8_t res8[0xc]; /* 74-7F */ + uint8_t pri_dns_srvr_ip[4];/* 80-83 */ + uint8_t sec_dns_srvr_ip[4];/* 84-87 */ + uint16_t min_eph_port; /* 88-89 */ + uint16_t max_eph_port; /* 8A-8B */ + uint8_t res9[4]; /* 8C-8F */ + uint8_t iscsi_alias[32];/* 90-AF */ + uint8_t res9_1[0x16]; /* B0-C5 */ + uint16_t tgt_portal_grp;/* C6-C7 */ + uint8_t abort_timer; /* C8 */ + uint8_t ipv4_tcp_wsf; /* C9 */ + uint8_t res10[6]; /* CA-CF */ + uint8_t ipv4_sec_ip_addr[4]; /* D0-D3 */ + uint8_t ipv4_dhcp_vid_len; /* D4 */ + uint8_t ipv4_dhcp_vid[11]; /* D5-DF */ + uint8_t res11[20]; /* E0-F3 */ + uint8_t ipv4_dhcp_alt_cid_len; /* F4 */ + uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */ + uint8_t iscsi_name[224]; /* 100-1DF */ + uint8_t res12[32]; /* 1E0-1FF */ + uint32_t cookie; /* 200-203 */ + uint16_t ipv6_port; /* 204-205 */ + uint16_t ipv6_opts; /* 206-207 */ +#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000 +#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000 +#define IPV6_OPT_GRAT_NEIGHBOR_ADV_EN 0x1000 +#define IPV6_OPT_REDIRECT_EN 0x0004 + + uint16_t ipv6_addtl_opts; /* 208-209 */ +#define IPV6_ADDOPT_IGNORE_ICMP_ECHO_REQ 0x0040 +#define IPV6_ADDOPT_MLD_EN 0x0004 +#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB + Only */ +#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001 + + uint16_t ipv6_tcp_opts; /* 20A-20B */ +#define IPV6_TCPOPT_DELAYED_ACK_DISABLE 0x8000 +#define IPV6_TCPOPT_NAGLE_ALGO_DISABLE 0x0020 +#define IPV6_TCPOPT_WINDOW_SCALE_DISABLE 0x0010 +#define IPV6_TCPOPT_TIMER_SCALE 0x000E +#define IPV6_TCPOPT_TIMESTAMP_EN 0x0001 + uint8_t ipv6_tcp_wsf; /* 20C */ + uint16_t ipv6_flow_lbl; /* 20D-20F */ + uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */ + uint16_t ipv6_vlan_tag; /* 220-221 */ + uint8_t ipv6_lnk_lcl_addr_state;/* 222 */ + uint8_t ipv6_addr0_state; /* 223 */ + uint8_t ipv6_addr1_state; /* 224 */ + uint8_t ipv6_dflt_rtr_state; /* 225 */ +#define IPV6_RTRSTATE_UNKNOWN 0 +#define IPV6_RTRSTATE_MANUAL 1 +#define IPV6_RTRSTATE_ADVERTISED 3 +#define IPV6_RTRSTATE_STALE 4 + + uint8_t ipv6_traffic_class; /* 226 */ + uint8_t ipv6_hop_limit; /* 227 */ + uint8_t ipv6_if_id[8]; /* 228-22F */ + uint8_t ipv6_addr0[16]; /* 230-23F */ + uint8_t ipv6_addr1[16]; /* 240-24F */ + uint32_t ipv6_nd_reach_time; /* 250-253 */ + uint32_t ipv6_nd_rexmit_timer; /* 254-257 */ + uint32_t ipv6_nd_stale_timeout; /* 258-25B */ + uint8_t ipv6_dup_addr_detect_count; /* 25C */ + uint8_t ipv6_cache_id; /* 25D */ + uint8_t res13[18]; /* 25E-26F */ + uint32_t ipv6_gw_advrt_mtu; /* 270-273 */ + uint8_t res14[140]; /* 274-2FF */ +}; + +#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface + * One IPv4, one IPv6 link local and 2 IPv6 + */ + +#define IP_STATE_MASK 0x0F000000 +#define IP_STATE_SHIFT 24 + +struct init_fw_ctrl_blk { + struct addr_ctrl_blk pri; +/* struct addr_ctrl_blk sec;*/ +}; + +#define PRIMARI_ACB 0 +#define SECONDARY_ACB 1 + +struct addr_ctrl_blk_def { + uint8_t reserved1[1]; /* 00 */ + uint8_t control; /* 01 */ + uint8_t reserved2[11]; /* 02-0C */ + uint8_t inst_num; /* 0D */ + uint8_t reserved3[34]; /* 0E-2F */ + uint16_t iscsi_opts; /* 30-31 */ + uint16_t ipv4_tcp_opts; /* 32-33 */ + uint16_t ipv4_ip_opts; /* 34-35 */ + uint16_t iscsi_max_pdu_size; /* 36-37 */ + uint8_t ipv4_tos; /* 38 */ + uint8_t ipv4_ttl; /* 39 */ + uint8_t reserved4[2]; /* 3A-3B */ + uint16_t def_timeout; /* 3C-3D */ + uint16_t iscsi_fburst_len; /* 3E-3F */ + uint8_t reserved5[4]; /* 40-43 */ + uint16_t iscsi_max_outstnd_r2t; /* 44-45 */ + uint8_t reserved6[2]; /* 46-47 */ + uint16_t ipv4_port; /* 48-49 */ + uint16_t iscsi_max_burst_len; /* 4A-4B */ + uint8_t reserved7[4]; /* 4C-4F */ + uint8_t ipv4_addr[4]; /* 50-53 */ + uint16_t ipv4_vlan_tag; /* 54-55 */ + uint8_t ipv4_addr_state; /* 56 */ + uint8_t ipv4_cacheid; /* 57 */ + uint8_t reserved8[8]; /* 58-5F */ + uint8_t ipv4_subnet[4]; /* 60-63 */ + uint8_t reserved9[12]; /* 64-6F */ + uint8_t ipv4_gw_addr[4]; /* 70-73 */ + uint8_t reserved10[84]; /* 74-C7 */ + uint8_t abort_timer; /* C8 */ + uint8_t ipv4_tcp_wsf; /* C9 */ + uint8_t reserved11[10]; /* CA-D3 */ + uint8_t ipv4_dhcp_vid_len; /* D4 */ + uint8_t ipv4_dhcp_vid[11]; /* D5-DF */ + uint8_t reserved12[20]; /* E0-F3 */ + uint8_t ipv4_dhcp_alt_cid_len; /* F4 */ + uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */ + uint8_t iscsi_name[224]; /* 100-1DF */ + uint8_t reserved13[32]; /* 1E0-1FF */ + uint32_t cookie; /* 200-203 */ + uint16_t ipv6_port; /* 204-205 */ + uint16_t ipv6_opts; /* 206-207 */ + uint16_t ipv6_addtl_opts; /* 208-209 */ + uint16_t ipv6_tcp_opts; /* 20A-20B */ + uint8_t ipv6_tcp_wsf; /* 20C */ + uint16_t ipv6_flow_lbl; /* 20D-20F */ + uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */ + uint16_t ipv6_vlan_tag; /* 220-221 */ + uint8_t ipv6_lnk_lcl_addr_state; /* 222 */ + uint8_t ipv6_addr0_state; /* 223 */ + uint8_t ipv6_addr1_state; /* 224 */ + uint8_t ipv6_dflt_rtr_state; /* 225 */ + uint8_t ipv6_traffic_class; /* 226 */ + uint8_t ipv6_hop_limit; /* 227 */ + uint8_t ipv6_if_id[8]; /* 228-22F */ + uint8_t ipv6_addr0[16]; /* 230-23F */ + uint8_t ipv6_addr1[16]; /* 240-24F */ + uint32_t ipv6_nd_reach_time; /* 250-253 */ + uint32_t ipv6_nd_rexmit_timer; /* 254-257 */ + uint32_t ipv6_nd_stale_timeout; /* 258-25B */ + uint8_t ipv6_dup_addr_detect_count; /* 25C */ + uint8_t ipv6_cache_id; /* 25D */ + uint8_t reserved14[18]; /* 25E-26F */ + uint32_t ipv6_gw_advrt_mtu; /* 270-273 */ + uint8_t reserved15[140]; /* 274-2FF */ +}; + +/*************************************************************************/ + +#define MAX_CHAP_ENTRIES_40XX 128 +#define MAX_CHAP_ENTRIES_82XX 1024 +#define MAX_RESRV_CHAP_IDX 3 +#define FLASH_CHAP_OFFSET 0x06000000 + +struct ql4_chap_table { + uint16_t link; + uint8_t flags; + uint8_t secret_len; +#define MIN_CHAP_SECRET_LEN 12 +#define MAX_CHAP_SECRET_LEN 100 + uint8_t secret[MAX_CHAP_SECRET_LEN]; +#define MAX_CHAP_NAME_LEN 256 + uint8_t name[MAX_CHAP_NAME_LEN]; + uint16_t reserved; +#define CHAP_VALID_COOKIE 0x4092 +#define CHAP_INVALID_COOKIE 0xFFEE + uint16_t cookie; +}; + +struct dev_db_entry { + uint16_t options; /* 00-01 */ +#define DDB_OPT_DISC_SESSION 0x10 +#define DDB_OPT_TARGET 0x02 /* device is a target */ +#define DDB_OPT_IPV6_DEVICE 0x100 +#define DDB_OPT_AUTO_SENDTGTS_DISABLE 0x40 +#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */ +#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */ + +#define OPT_IS_FW_ASSIGNED_IPV6 11 +#define OPT_IPV6_DEVICE 8 +#define OPT_AUTO_SENDTGTS_DISABLE 6 +#define OPT_DISC_SESSION 4 +#define OPT_ENTRY_STATE 3 + uint16_t exec_throttle; /* 02-03 */ + uint16_t exec_count; /* 04-05 */ + uint16_t res0; /* 06-07 */ + uint16_t iscsi_options; /* 08-09 */ +#define ISCSIOPT_HEADER_DIGEST_EN 13 +#define ISCSIOPT_DATA_DIGEST_EN 12 +#define ISCSIOPT_IMMEDIATE_DATA_EN 11 +#define ISCSIOPT_INITIAL_R2T_EN 10 +#define ISCSIOPT_DATA_SEQ_IN_ORDER 9 +#define ISCSIOPT_DATA_PDU_IN_ORDER 8 +#define ISCSIOPT_CHAP_AUTH_EN 7 +#define ISCSIOPT_SNACK_REQ_EN 6 +#define ISCSIOPT_DISCOVERY_LOGOUT_EN 5 +#define ISCSIOPT_BIDI_CHAP_EN 4 +#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL 3 +#define ISCSIOPT_ERL1 1 +#define ISCSIOPT_ERL0 0 + + uint16_t tcp_options; /* 0A-0B */ +#define TCPOPT_TIMESTAMP_STAT 6 +#define TCPOPT_NAGLE_DISABLE 5 +#define TCPOPT_WSF_DISABLE 4 +#define TCPOPT_TIMER_SCALE3 3 +#define TCPOPT_TIMER_SCALE2 2 +#define TCPOPT_TIMER_SCALE1 1 +#define TCPOPT_TIMESTAMP_EN 0 + + uint16_t ip_options; /* 0C-0D */ +#define IPOPT_FRAGMENT_DISABLE 4 + + uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */ +#define BYTE_UNITS 512 + uint32_t res1; /* 10-13 */ + uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */ + uint16_t iscsi_first_burst_len; /* 16-17 */ + uint16_t iscsi_def_time2wait; /* 18-19 */ + uint16_t iscsi_def_time2retain; /* 1A-1B */ + uint16_t iscsi_max_outsnd_r2t; /* 1C-1D */ + uint16_t ka_timeout; /* 1E-1F */ + uint8_t isid[6]; /* 20-25 big-endian, must be converted + * to little-endian */ + uint16_t tsid; /* 26-27 */ + uint16_t port; /* 28-29 */ + uint16_t iscsi_max_burst_len; /* 2A-2B */ + uint16_t def_timeout; /* 2C-2D */ + uint16_t res2; /* 2E-2F */ + uint8_t ip_addr[0x10]; /* 30-3F */ + uint8_t iscsi_alias[0x20]; /* 40-5F */ + uint8_t tgt_addr[0x20]; /* 60-7F */ + uint16_t mss; /* 80-81 */ + uint16_t res3; /* 82-83 */ + uint16_t lcl_port; /* 84-85 */ + uint8_t ipv4_tos; /* 86 */ + uint16_t ipv6_flow_lbl; /* 87-89 */ + uint8_t res4[0x36]; /* 8A-BF */ + uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a + * pointer to a string so we + * don't have to reserve so + * much RAM */ + uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */ + uint8_t res5[0x10]; /* 1B0-1BF */ +#define DDB_NO_LINK 0xFFFF +#define DDB_ISNS 0xFFFD + uint16_t ddb_link; /* 1C0-1C1 */ + uint16_t chap_tbl_idx; /* 1C2-1C3 */ + uint16_t tgt_portal_grp; /* 1C4-1C5 */ + uint8_t tcp_xmt_wsf; /* 1C6 */ + uint8_t tcp_rcv_wsf; /* 1C7 */ + uint32_t stat_sn; /* 1C8-1CB */ + uint32_t exp_stat_sn; /* 1CC-1CF */ + uint8_t res6[0x2b]; /* 1D0-1FB */ +#define DDB_VALID_COOKIE 0x9034 + uint16_t cookie; /* 1FC-1FD */ + uint16_t len; /* 1FE-1FF */ +}; + +/*************************************************************************/ + +/* Flash definitions */ + +#define FLASH_OFFSET_SYS_INFO 0x02000000 +#define FLASH_DEFAULTBLOCKSIZE 0x20000 +#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes + * for EOF + * signature */ +#define FLASH_RAW_ACCESS_ADDR 0x8e000000 + +#define BOOT_PARAM_OFFSET_PORT0 0x3b0 +#define BOOT_PARAM_OFFSET_PORT1 0x7b0 + +#define FLASH_OFFSET_DB_INFO 0x05000000 +#define FLASH_OFFSET_DB_END (FLASH_OFFSET_DB_INFO + 0x7fff) + + +struct sys_info_phys_addr { + uint8_t address[6]; /* 00-05 */ + uint8_t filler[2]; /* 06-07 */ +}; + +struct flash_sys_info { + uint32_t cookie; /* 00-03 */ + uint32_t physAddrCount; /* 04-07 */ + struct sys_info_phys_addr physAddr[4]; /* 08-27 */ + uint8_t vendorId[128]; /* 28-A7 */ + uint8_t productId[128]; /* A8-127 */ + uint32_t serialNumber; /* 128-12B */ + + /* PCI Configuration values */ + uint32_t pciDeviceVendor; /* 12C-12F */ + uint32_t pciDeviceId; /* 130-133 */ + uint32_t pciSubsysVendor; /* 134-137 */ + uint32_t pciSubsysId; /* 138-13B */ + + /* This validates version 1. */ + uint32_t crumbs; /* 13C-13F */ + + uint32_t enterpriseNumber; /* 140-143 */ + + uint32_t mtu; /* 144-147 */ + uint32_t reserved0; /* 148-14b */ + uint32_t crumbs2; /* 14c-14f */ + uint8_t acSerialNumber[16]; /* 150-15f */ + uint32_t crumbs3; /* 160-16f */ + + /* Leave this last in the struct so it is declared invalid if + * any new items are added. + */ + uint32_t reserved1[39]; /* 170-1ff */ +}; /* 200 */ + +struct mbx_sys_info { + uint8_t board_id_str[16]; /* 0-f Keep board ID string first */ + /* in this structure for GUI. */ + uint16_t board_id; /* 10-11 board ID code */ + uint16_t phys_port_cnt; /* 12-13 number of physical network ports */ + uint16_t port_num; /* 14-15 network port for this PCI function */ + /* (port 0 is first port) */ + uint8_t mac_addr[6]; /* 16-1b MAC address for this PCI function */ + uint32_t iscsi_pci_func_cnt; /* 1c-1f number of iSCSI PCI functions */ + uint32_t pci_func; /* 20-23 this PCI function */ + unsigned char serial_number[16]; /* 24-33 serial number string */ + uint8_t reserved[12]; /* 34-3f */ +}; + +struct about_fw_info { + uint16_t fw_major; /* 00 - 01 */ + uint16_t fw_minor; /* 02 - 03 */ + uint16_t fw_patch; /* 04 - 05 */ + uint16_t fw_build; /* 06 - 07 */ + uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */ + uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */ + uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */ + uint16_t fw_load_source; /* 38 - 39 */ + /* 1 = Flash Primary, + 2 = Flash Secondary, + 3 = Host Download + */ + uint8_t reserved1[6]; /* 3A - 3F */ + uint16_t iscsi_major; /* 40 - 41 */ + uint16_t iscsi_minor; /* 42 - 43 */ + uint16_t bootload_major; /* 44 - 45 */ + uint16_t bootload_minor; /* 46 - 47 */ + uint16_t bootload_patch; /* 48 - 49 */ + uint16_t bootload_build; /* 4A - 4B */ + uint8_t extended_timestamp[180];/* 4C - FF */ +}; + +struct crash_record { + uint16_t fw_major_version; /* 00 - 01 */ + uint16_t fw_minor_version; /* 02 - 03 */ + uint16_t fw_patch_version; /* 04 - 05 */ + uint16_t fw_build_version; /* 06 - 07 */ + + uint8_t build_date[16]; /* 08 - 17 */ + uint8_t build_time[16]; /* 18 - 27 */ + uint8_t build_user[16]; /* 28 - 37 */ + uint8_t card_serial_num[16]; /* 38 - 47 */ + + uint32_t time_of_crash_in_secs; /* 48 - 4B */ + uint32_t time_of_crash_in_ms; /* 4C - 4F */ + + uint16_t out_RISC_sd_num_frames; /* 50 - 51 */ + uint16_t OAP_sd_num_words; /* 52 - 53 */ + uint16_t IAP_sd_num_frames; /* 54 - 55 */ + uint16_t in_RISC_sd_num_words; /* 56 - 57 */ + + uint8_t reserved1[28]; /* 58 - 7F */ + + uint8_t out_RISC_reg_dump[256]; /* 80 -17F */ + uint8_t in_RISC_reg_dump[256]; /*180 -27F */ + uint8_t in_out_RISC_stack_dump[]; /*280 - ??? */ +}; + +struct conn_event_log_entry { +#define MAX_CONN_EVENT_LOG_ENTRIES 100 + uint32_t timestamp_sec; /* 00 - 03 seconds since boot */ + uint32_t timestamp_ms; /* 04 - 07 milliseconds since boot */ + uint16_t device_index; /* 08 - 09 */ + uint16_t fw_conn_state; /* 0A - 0B */ + uint8_t event_type; /* 0C - 0C */ + uint8_t error_code; /* 0D - 0D */ + uint16_t error_code_detail; /* 0E - 0F */ + uint8_t num_consecutive_events; /* 10 - 10 */ + uint8_t rsvd[3]; /* 11 - 13 */ +}; + +/************************************************************************* + * + * IOCB Commands Structures and Definitions + * + *************************************************************************/ +#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */ +#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */ +#define IOCB_MAX_EXT_SENSEDATA_LEN 60 /* Bytes of extended sense data */ + +/* IOCB header structure */ +struct qla4_header { + uint8_t entryType; +#define ET_STATUS 0x03 +#define ET_MARKER 0x04 +#define ET_CONT_T1 0x0A +#define ET_STATUS_CONTINUATION 0x10 +#define ET_CMND_T3 0x19 +#define ET_PASSTHRU0 0x3A +#define ET_PASSTHRU_STATUS 0x3C +#define ET_MBOX_CMD 0x38 +#define ET_MBOX_STATUS 0x39 + + uint8_t entryStatus; + uint8_t systemDefined; +#define SD_ISCSI_PDU 0x01 + uint8_t entryCount; + + /* SyetemDefined definition */ +}; + +/* Generic queue entry structure*/ +struct queue_entry { + uint8_t data[60]; + uint32_t signature; + +}; + +/* 64 bit addressing segment counts*/ + +#define COMMAND_SEG_A64 1 +#define CONTINUE_SEG_A64 5 + +/* 64 bit addressing segment definition*/ + +struct data_seg_a64 { + struct { + uint32_t addrLow; + uint32_t addrHigh; + + } base; + + uint32_t count; + +}; + +/* Command Type 3 entry structure*/ + +struct command_t3_entry { + struct qla4_header hdr; /* 00-03 */ + + uint32_t handle; /* 04-07 */ + uint16_t target; /* 08-09 */ + uint16_t connection_id; /* 0A-0B */ + + uint8_t control_flags; /* 0C */ + + /* data direction (bits 5-6) */ +#define CF_WRITE 0x20 +#define CF_READ 0x40 +#define CF_NO_DATA 0x00 + + /* task attributes (bits 2-0) */ +#define CF_HEAD_TAG 0x03 +#define CF_ORDERED_TAG 0x02 +#define CF_SIMPLE_TAG 0x01 + + /* STATE FLAGS FIELD IS A PLACE HOLDER. THE FW WILL SET BITS + * IN THIS FIELD AS THE COMMAND IS PROCESSED. WHEN THE IOCB IS + * CHANGED TO AN IOSB THIS FIELD WILL HAVE THE STATE FLAGS SET + * PROPERLY. + */ + uint8_t state_flags; /* 0D */ + uint8_t cmdRefNum; /* 0E */ + uint8_t reserved1; /* 0F */ + uint8_t cdb[IOCB_MAX_CDB_LEN]; /* 10-1F */ + struct scsi_lun lun; /* FCP LUN (BE). */ + uint32_t cmdSeqNum; /* 28-2B */ + uint16_t timeout; /* 2C-2D */ + uint16_t dataSegCnt; /* 2E-2F */ + uint32_t ttlByteCnt; /* 30-33 */ + struct data_seg_a64 dataseg[COMMAND_SEG_A64]; /* 34-3F */ + +}; + + +/* Continuation Type 1 entry structure*/ +struct continuation_t1_entry { + struct qla4_header hdr; + + struct data_seg_a64 dataseg[CONTINUE_SEG_A64]; + +}; + +/* Parameterize for 64 or 32 bits */ +#define COMMAND_SEG COMMAND_SEG_A64 +#define CONTINUE_SEG CONTINUE_SEG_A64 + +#define ET_COMMAND ET_CMND_T3 +#define ET_CONTINUE ET_CONT_T1 + +/* Marker entry structure*/ +struct qla4_marker_entry { + struct qla4_header hdr; /* 00-03 */ + + uint32_t system_defined; /* 04-07 */ + uint16_t target; /* 08-09 */ + uint16_t modifier; /* 0A-0B */ +#define MM_LUN_RESET 0 +#define MM_TGT_WARM_RESET 1 + + uint16_t flags; /* 0C-0D */ + uint16_t reserved1; /* 0E-0F */ + struct scsi_lun lun; /* FCP LUN (BE). */ + uint64_t reserved2; /* 18-1F */ + uint64_t reserved3; /* 20-27 */ + uint64_t reserved4; /* 28-2F */ + uint64_t reserved5; /* 30-37 */ + uint64_t reserved6; /* 38-3F */ +}; + +/* Status entry structure*/ +struct status_entry { + struct qla4_header hdr; /* 00-03 */ + + uint32_t handle; /* 04-07 */ + + uint8_t scsiStatus; /* 08 */ + + uint8_t iscsiFlags; /* 09 */ +#define ISCSI_FLAG_RESIDUAL_UNDER 0x02 +#define ISCSI_FLAG_RESIDUAL_OVER 0x04 + + uint8_t iscsiResponse; /* 0A */ + + uint8_t completionStatus; /* 0B */ +#define SCS_COMPLETE 0x00 +#define SCS_INCOMPLETE 0x01 +#define SCS_RESET_OCCURRED 0x04 +#define SCS_ABORTED 0x05 +#define SCS_TIMEOUT 0x06 +#define SCS_DATA_OVERRUN 0x07 +#define SCS_DATA_UNDERRUN 0x15 +#define SCS_QUEUE_FULL 0x1C +#define SCS_DEVICE_UNAVAILABLE 0x28 +#define SCS_DEVICE_LOGGED_OUT 0x29 + + uint8_t reserved1; /* 0C */ + + /* state_flags MUST be at the same location as state_flags in + * the Command_T3/4_Entry */ + uint8_t state_flags; /* 0D */ + + uint16_t senseDataByteCnt; /* 0E-0F */ + uint32_t residualByteCnt; /* 10-13 */ + uint32_t bidiResidualByteCnt; /* 14-17 */ + uint32_t expSeqNum; /* 18-1B */ + uint32_t maxCmdSeqNum; /* 1C-1F */ + uint8_t senseData[IOCB_MAX_SENSEDATA_LEN]; /* 20-3F */ + +}; + +/* Status Continuation entry */ +struct status_cont_entry { + struct qla4_header hdr; /* 00-03 */ + uint8_t ext_sense_data[IOCB_MAX_EXT_SENSEDATA_LEN]; /* 04-63 */ +}; + +struct passthru0 { + struct qla4_header hdr; /* 00-03 */ + uint32_t handle; /* 04-07 */ + uint16_t target; /* 08-09 */ + uint16_t connection_id; /* 0A-0B */ +#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000) + + uint16_t control_flags; /* 0C-0D */ +#define PT_FLAG_ETHERNET_FRAME 0x8000 +#define PT_FLAG_ISNS_PDU 0x8000 +#define PT_FLAG_SEND_BUFFER 0x0200 +#define PT_FLAG_WAIT_4_RESPONSE 0x0100 +#define PT_FLAG_ISCSI_PDU 0x1000 + + uint16_t timeout; /* 0E-0F */ +#define PT_DEFAULT_TIMEOUT 30 /* seconds */ + + struct data_seg_a64 out_dsd; /* 10-1B */ + uint32_t res1; /* 1C-1F */ + struct data_seg_a64 in_dsd; /* 20-2B */ + uint8_t res2[20]; /* 2C-3F */ +}; + +struct passthru_status { + struct qla4_header hdr; /* 00-03 */ + uint32_t handle; /* 04-07 */ + uint16_t target; /* 08-09 */ + uint16_t connectionID; /* 0A-0B */ + + uint8_t completionStatus; /* 0C */ +#define PASSTHRU_STATUS_COMPLETE 0x01 + + uint8_t residualFlags; /* 0D */ + + uint16_t timeout; /* 0E-0F */ + uint16_t portNumber; /* 10-11 */ + uint8_t res1[10]; /* 12-1B */ + uint32_t outResidual; /* 1C-1F */ + uint8_t res2[12]; /* 20-2B */ + uint32_t inResidual; /* 2C-2F */ + uint8_t res4[16]; /* 30-3F */ +}; + +struct mbox_cmd_iocb { + struct qla4_header hdr; /* 00-03 */ + uint32_t handle; /* 04-07 */ + uint32_t in_mbox[8]; /* 08-25 */ + uint32_t res1[6]; /* 26-3F */ +}; + +struct mbox_status_iocb { + struct qla4_header hdr; /* 00-03 */ + uint32_t handle; /* 04-07 */ + uint32_t out_mbox[8]; /* 08-25 */ + uint32_t res1[6]; /* 26-3F */ +}; + +/* + * ISP queue - response queue entry definition. + */ +struct response { + uint8_t data[60]; + uint32_t signature; +#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ +}; + +struct ql_iscsi_stats { + uint64_t mac_tx_frames; /* 0000–0007 */ + uint64_t mac_tx_bytes; /* 0008–000F */ + uint64_t mac_tx_multicast_frames; /* 0010–0017 */ + uint64_t mac_tx_broadcast_frames; /* 0018–001F */ + uint64_t mac_tx_pause_frames; /* 0020–0027 */ + uint64_t mac_tx_control_frames; /* 0028–002F */ + uint64_t mac_tx_deferral; /* 0030–0037 */ + uint64_t mac_tx_excess_deferral; /* 0038–003F */ + uint64_t mac_tx_late_collision; /* 0040–0047 */ + uint64_t mac_tx_abort; /* 0048–004F */ + uint64_t mac_tx_single_collision; /* 0050–0057 */ + uint64_t mac_tx_multiple_collision; /* 0058–005F */ + uint64_t mac_tx_collision; /* 0060–0067 */ + uint64_t mac_tx_frames_dropped; /* 0068–006F */ + uint64_t mac_tx_jumbo_frames; /* 0070–0077 */ + uint64_t mac_rx_frames; /* 0078–007F */ + uint64_t mac_rx_bytes; /* 0080–0087 */ + uint64_t mac_rx_unknown_control_frames; /* 0088–008F */ + uint64_t mac_rx_pause_frames; /* 0090–0097 */ + uint64_t mac_rx_control_frames; /* 0098–009F */ + uint64_t mac_rx_dribble; /* 00A0–00A7 */ + uint64_t mac_rx_frame_length_error; /* 00A8–00AF */ + uint64_t mac_rx_jabber; /* 00B0–00B7 */ + uint64_t mac_rx_carrier_sense_error; /* 00B8–00BF */ + uint64_t mac_rx_frame_discarded; /* 00C0–00C7 */ + uint64_t mac_rx_frames_dropped; /* 00C8–00CF */ + uint64_t mac_crc_error; /* 00D0–00D7 */ + uint64_t mac_encoding_error; /* 00D8–00DF */ + uint64_t mac_rx_length_error_large; /* 00E0–00E7 */ + uint64_t mac_rx_length_error_small; /* 00E8–00EF */ + uint64_t mac_rx_multicast_frames; /* 00F0–00F7 */ + uint64_t mac_rx_broadcast_frames; /* 00F8–00FF */ + uint64_t ip_tx_packets; /* 0100–0107 */ + uint64_t ip_tx_bytes; /* 0108–010F */ + uint64_t ip_tx_fragments; /* 0110–0117 */ + uint64_t ip_rx_packets; /* 0118–011F */ + uint64_t ip_rx_bytes; /* 0120–0127 */ + uint64_t ip_rx_fragments; /* 0128–012F */ + uint64_t ip_datagram_reassembly; /* 0130–0137 */ + uint64_t ip_invalid_address_error; /* 0138–013F */ + uint64_t ip_error_packets; /* 0140–0147 */ + uint64_t ip_fragrx_overlap; /* 0148–014F */ + uint64_t ip_fragrx_outoforder; /* 0150–0157 */ + uint64_t ip_datagram_reassembly_timeout; /* 0158–015F */ + uint64_t ipv6_tx_packets; /* 0160–0167 */ + uint64_t ipv6_tx_bytes; /* 0168–016F */ + uint64_t ipv6_tx_fragments; /* 0170–0177 */ + uint64_t ipv6_rx_packets; /* 0178–017F */ + uint64_t ipv6_rx_bytes; /* 0180–0187 */ + uint64_t ipv6_rx_fragments; /* 0188–018F */ + uint64_t ipv6_datagram_reassembly; /* 0190–0197 */ + uint64_t ipv6_invalid_address_error; /* 0198–019F */ + uint64_t ipv6_error_packets; /* 01A0–01A7 */ + uint64_t ipv6_fragrx_overlap; /* 01A8–01AF */ + uint64_t ipv6_fragrx_outoforder; /* 01B0–01B7 */ + uint64_t ipv6_datagram_reassembly_timeout; /* 01B8–01BF */ + uint64_t tcp_tx_segments; /* 01C0–01C7 */ + uint64_t tcp_tx_bytes; /* 01C8–01CF */ + uint64_t tcp_rx_segments; /* 01D0–01D7 */ + uint64_t tcp_rx_byte; /* 01D8–01DF */ + uint64_t tcp_duplicate_ack_retx; /* 01E0–01E7 */ + uint64_t tcp_retx_timer_expired; /* 01E8–01EF */ + uint64_t tcp_rx_duplicate_ack; /* 01F0–01F7 */ + uint64_t tcp_rx_pure_ackr; /* 01F8–01FF */ + uint64_t tcp_tx_delayed_ack; /* 0200–0207 */ + uint64_t tcp_tx_pure_ack; /* 0208–020F */ + uint64_t tcp_rx_segment_error; /* 0210–0217 */ + uint64_t tcp_rx_segment_outoforder; /* 0218–021F */ + uint64_t tcp_rx_window_probe; /* 0220–0227 */ + uint64_t tcp_rx_window_update; /* 0228–022F */ + uint64_t tcp_tx_window_probe_persist; /* 0230–0237 */ + uint64_t ecc_error_correction; /* 0238–023F */ + uint64_t iscsi_pdu_tx; /* 0240-0247 */ + uint64_t iscsi_data_bytes_tx; /* 0248-024F */ + uint64_t iscsi_pdu_rx; /* 0250-0257 */ + uint64_t iscsi_data_bytes_rx; /* 0258-025F */ + uint64_t iscsi_io_completed; /* 0260-0267 */ + uint64_t iscsi_unexpected_io_rx; /* 0268-026F */ + uint64_t iscsi_format_error; /* 0270-0277 */ + uint64_t iscsi_hdr_digest_error; /* 0278-027F */ + uint64_t iscsi_data_digest_error; /* 0280-0287 */ + uint64_t iscsi_sequence_error; /* 0288-028F */ + uint32_t tx_cmd_pdu; /* 0290-0293 */ + uint32_t tx_resp_pdu; /* 0294-0297 */ + uint32_t rx_cmd_pdu; /* 0298-029B */ + uint32_t rx_resp_pdu; /* 029C-029F */ + + uint64_t tx_data_octets; /* 02A0-02A7 */ + uint64_t rx_data_octets; /* 02A8-02AF */ + + uint32_t hdr_digest_err; /* 02B0–02B3 */ + uint32_t data_digest_err; /* 02B4–02B7 */ + uint32_t conn_timeout_err; /* 02B8–02BB */ + uint32_t framing_err; /* 02BC–02BF */ + + uint32_t tx_nopout_pdus; /* 02C0–02C3 */ + uint32_t tx_scsi_cmd_pdus; /* 02C4–02C7 */ + uint32_t tx_tmf_cmd_pdus; /* 02C8–02CB */ + uint32_t tx_login_cmd_pdus; /* 02CC–02CF */ + uint32_t tx_text_cmd_pdus; /* 02D0–02D3 */ + uint32_t tx_scsi_write_pdus; /* 02D4–02D7 */ + uint32_t tx_logout_cmd_pdus; /* 02D8–02DB */ + uint32_t tx_snack_req_pdus; /* 02DC–02DF */ + + uint32_t rx_nopin_pdus; /* 02E0–02E3 */ + uint32_t rx_scsi_resp_pdus; /* 02E4–02E7 */ + uint32_t rx_tmf_resp_pdus; /* 02E8–02EB */ + uint32_t rx_login_resp_pdus; /* 02EC–02EF */ + uint32_t rx_text_resp_pdus; /* 02F0–02F3 */ + uint32_t rx_scsi_read_pdus; /* 02F4–02F7 */ + uint32_t rx_logout_resp_pdus; /* 02F8–02FB */ + + uint32_t rx_r2t_pdus; /* 02FC–02FF */ + uint32_t rx_async_pdus; /* 0300–0303 */ + uint32_t rx_reject_pdus; /* 0304–0307 */ + + uint8_t reserved2[264]; /* 0x0308 - 0x040F */ +}; + +#define QLA8XXX_DBG_STATE_ARRAY_LEN 16 +#define QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA8XXX_DBG_RSVD_ARRAY_LEN 8 +#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16 +#define QLA83XX_SS_OCM_WNDREG_INDEX 3 +#define QLA83XX_SS_PCI_INDEX 0 +#define QLA8022_TEMPLATE_CAP_OFFSET 172 +#define QLA83XX_TEMPLATE_CAP_OFFSET 268 +#define QLA80XX_TEMPLATE_RESERVED_BITS 16 + +struct qla4_8xxx_minidump_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info_word2; + uint32_t driver_info_word3; + uint32_t driver_info_word4; + + uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN]; + uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN]; + uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS]; +}; + +#endif /* _QLA4X_FW_H */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h new file mode 100644 index 000000000..c08733815 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#ifndef __QLA4x_GBL_H +#define __QLA4x_GBL_H + +struct iscsi_cls_conn; + +int qla4xxx_hw_reset(struct scsi_qla_host *ha); +int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a); +int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb); +int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset); +int qla4xxx_soft_reset(struct scsi_qla_host *ha); +irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id); + +void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry); +void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen); + +int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha); +int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb); +int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, + uint64_t lun); +int qla4xxx_reset_target(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry); +int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, + uint32_t offset, uint32_t len); +int qla4xxx_get_firmware_status(struct scsi_qla_host *ha); +int qla4xxx_get_firmware_state(struct scsi_qla_host *ha); +int qla4xxx_initialize_fw_cb(struct scsi_qla_host *ha); + +/* FIXME: Goodness! this really wants a small struct to hold the + * parameters. On x86 the args will get passed on the stack! */ +int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, + uint16_t fw_ddb_index, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, + uint32_t *num_valid_ddb_entries, + uint32_t *next_ddb_index, + uint32_t *fw_ddb_device_state, + uint32_t *conn_err_detail, + uint16_t *tcp_source_port_num, + uint16_t *connection_id); + +int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, + dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts); +uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma); +int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, + uint16_t fw_ddb_index, + uint16_t connection_id, + uint16_t option); +int qla4xxx_disable_acb(struct scsi_qla_host *ha); +int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t acb_dma); +int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, + uint32_t acb_type, uint32_t len); +int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx, + uint32_t ip_idx, uint32_t *sts); +void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session); +u16 rd_nvram_word(struct scsi_qla_host *ha, int offset); +u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset); +void qla4xxx_get_crash_record(struct scsi_qla_host *ha); +int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha); +int qla4xxx_about_firmware(struct scsi_qla_host *ha); +void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha, + uint32_t intr_status); +int qla4xxx_init_rings(struct scsi_qla_host *ha); +void qla4xxx_srb_compl(struct kref *ref); +struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, + uint32_t index); +int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + uint32_t state, uint32_t conn_error); +void qla4xxx_dump_buffer(void *b, uint32_t size); +int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod); +int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, + uint32_t offset, uint32_t length, uint32_t options); +int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, + uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); +int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, + char *password, int bidi, uint16_t *chap_index); +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx, int bidi); + +void qla4xxx_queue_iocb(struct scsi_qla_host *ha); +void qla4xxx_complete_iocb(struct scsi_qla_host *ha); +int qla4xxx_get_sys_info(struct scsi_qla_host *ha); +int qla4xxx_iospace_config(struct scsi_qla_host *ha); +void qla4xxx_pci_config(struct scsi_qla_host *ha); +int qla4xxx_start_firmware(struct scsi_qla_host *ha); +irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id); +uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha); +uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha); +int qla4xxx_request_irqs(struct scsi_qla_host *ha); +void qla4xxx_free_irqs(struct scsi_qla_host *ha); +void qla4xxx_process_response_queue(struct scsi_qla_host *ha); +void qla4xxx_wake_dpc(struct scsi_qla_host *ha); +void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); +void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha); +void qla4xxx_dump_registers(struct scsi_qla_host *ha); +uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, + uint32_t *mbox_cmd, + uint32_t *mbox_sts, + struct addr_ctrl_blk *init_fw_cb, + dma_addr_t init_fw_cb_dma); + +void qla4_8xxx_pci_config(struct scsi_qla_host *); +int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); +int qla4_8xxx_load_risc(struct scsi_qla_host *); +irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id); +void qla4_82xx_queue_iocb(struct scsi_qla_host *ha); +void qla4_82xx_complete_iocb(struct scsi_qla_host *ha); + +void qla4_82xx_crb_win_unlock(struct scsi_qla_host *); +int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *); +void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32); +uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong); +int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int); +int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int); +int qla4_82xx_isp_reset(struct scsi_qla_host *ha); +void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha, + uint32_t intr_status); +uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha); +uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha); +int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha); +void qla4_8xxx_watchdog(struct scsi_qla_host *ha); +int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha); +int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha); +void qla4_82xx_enable_intrs(struct scsi_qla_host *ha); +void qla4_82xx_disable_intrs(struct scsi_qla_host *ha); +int qla4_8xxx_enable_msix(struct scsi_qla_host *ha); +irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id); +irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id); +irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id); +void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha); +void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha); +int qla4_82xx_idc_lock(struct scsi_qla_host *ha); +void qla4_82xx_idc_unlock(struct scsi_qla_host *ha); +int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); +void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); +void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); +void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); +int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index); +int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct iscsi_cls_conn *cls_conn, + uint32_t *mbx_sts); +int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, int options); +int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + uint32_t *mbx_sts); +int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index); +int qla4xxx_send_passthru0(struct iscsi_task *task); +void qla4xxx_free_ddb_index(struct scsi_qla_host *ha); +int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index, + uint16_t stats_size, dma_addr_t stats_dma); +void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry); +void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry); +int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index); +int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, + char *password, uint16_t idx); +int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size); +int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size); +int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha, + uint32_t region, uint32_t field0, + uint32_t field1); +int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index); +void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session); +int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session); +int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session); +int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + struct ddb_entry *ddb_entry, uint32_t state); +int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + struct ddb_entry *ddb_entry, uint32_t state); +void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset); +int qla4xxx_post_aen_work(struct scsi_qla_host *ha, + enum iscsi_host_event_code aen_code, + uint32_t data_size, uint8_t *data); +int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options, + uint32_t payload_size, uint32_t pid, uint8_t *ipaddr); +int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, + uint32_t status, uint32_t pid, + uint32_t data_size, uint8_t *data); +int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index); + +/* BSG Functions */ +int qla4xxx_bsg_request(struct bsg_job *bsg_job); +int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job); + +void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry); +int qla4xxx_get_minidump_template(struct scsi_qla_host *ha, + dma_addr_t phys_addr); +int qla4xxx_req_template_size(struct scsi_qla_host *ha); +void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha); +void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha); +void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha); +int qla4_82xx_try_start_fw(struct scsi_qla_host *ha); +int qla4_8xxx_need_reset(struct scsi_qla_host *ha); +int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data); +int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data); +void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha); +void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, + int incount); +void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount); +void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, + int incount); +void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount); +void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha); +void qla4_83xx_disable_intrs(struct scsi_qla_host *ha); +void qla4_83xx_enable_intrs(struct scsi_qla_host *ha); +int qla4_83xx_start_firmware(struct scsi_qla_host *ha); +irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id); +void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha, + uint32_t intr_status); +int qla4_83xx_isp_reset(struct scsi_qla_host *ha); +void qla4_83xx_queue_iocb(struct scsi_qla_host *ha); +void qla4_83xx_complete_iocb(struct scsi_qla_host *ha); +uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr); +void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val); +int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, + uint32_t *data); +int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, + uint32_t data); +int qla4_83xx_drv_lock(struct scsi_qla_host *ha); +void qla4_83xx_drv_unlock(struct scsi_qla_host *ha); +void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha); +void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, + int incount); +void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount); +void qla4_83xx_read_reset_template(struct scsi_qla_host *ha); +void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha); +int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha); +int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha, + uint32_t flash_addr, uint8_t *p_data, + int u32_word_count); +void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha); +void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha); +int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, + uint8_t *p_data, int u32_word_count); +void qla4_83xx_get_idc_param(struct scsi_qla_host *ha); +void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha); +void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha); +int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha); +void qla4_8xxx_get_minidump(struct scsi_qla_host *ha); +int qla4_8xxx_intr_disable(struct scsi_qla_host *ha); +int qla4_8xxx_intr_enable(struct scsi_qla_host *ha); +int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param); +int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha); +int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha); +void qla4_83xx_disable_pause(struct scsi_qla_host *ha); +void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha); +int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha); +int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, + dma_addr_t dma_addr); +int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, + char *password, uint16_t chap_index); +int qla4xxx_disable_acb(struct scsi_qla_host *ha); +int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t acb_dma); +int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, + uint32_t acb_type, uint32_t len); +int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, + uint64_t addr, uint32_t *data, uint32_t count); +uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); +int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); +int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); +int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha); +int qla4_83xx_is_detached(struct scsi_qla_host *ha); +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha); + +extern int ql4xextended_error_logging; +extern int ql4xdontresethba; +extern int ql4xenablemsix; +extern int ql4xmdcapmask; +extern int ql4xenablemd; + +extern const struct attribute_group *qla4xxx_host_groups[]; + +#endif /* _QLA4x_GBL_H */ diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c new file mode 100644 index 000000000..301bc09c8 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -0,0 +1,1265 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#include +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" + +static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) +{ + uint32_t value; + unsigned long flags; + + /* Get the function number */ + spin_lock_irqsave(&ha->hardware_lock, flags); + value = readw(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + switch (value & ISP_CONTROL_FN_MASK) { + case ISP_CONTROL_FN0_SCSI: + ha->mac_index = 1; + break; + case ISP_CONTROL_FN1_SCSI: + ha->mac_index = 3; + break; + default: + DEBUG2(printk("scsi%ld: %s: Invalid function number, " + "ispControlStatus = 0x%x\n", ha->host_no, + __func__, value)); + break; + } + DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__, + ha->mac_index)); +} + +/** + * qla4xxx_free_ddb - deallocate ddb + * @ha: pointer to host adapter structure. + * @ddb_entry: pointer to device database entry + * + * This routine marks a DDB entry INVALID + **/ +void qla4xxx_free_ddb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry) +{ + /* Remove device pointer from index mapping arrays */ + ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = + (struct ddb_entry *) INVALID_ENTRY; + ha->tot_ddbs--; +} + +/** + * qla4xxx_init_response_q_entries() - Initializes response queue entries. + * @ha: HA context + * + * Beginning of request ring has initialization control block already built + * by nvram config routine. + **/ +static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha) +{ + uint16_t cnt; + struct response *pkt; + + pkt = (struct response *)ha->response_ptr; + for (cnt = 0; cnt < RESPONSE_QUEUE_DEPTH; cnt++) { + pkt->signature = RESPONSE_PROCESSED; + pkt++; + } +} + +/** + * qla4xxx_init_rings - initialize hw queues + * @ha: pointer to host adapter structure. + * + * This routine initializes the internal queues for the specified adapter. + * The QLA4010 requires us to restart the queues at index 0. + * The QLA4000 doesn't care, so just default to QLA4010's requirement. + **/ +int qla4xxx_init_rings(struct scsi_qla_host *ha) +{ + unsigned long flags = 0; + int i; + + /* Initialize request queue. */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->request_out = 0; + ha->request_in = 0; + ha->request_ptr = &ha->request_ring[ha->request_in]; + ha->req_q_count = REQUEST_QUEUE_DEPTH; + + /* Initialize response queue. */ + ha->response_in = 0; + ha->response_out = 0; + ha->response_ptr = &ha->response_ring[ha->response_out]; + + if (is_qla8022(ha)) { + writel(0, + (unsigned long __iomem *)&ha->qla4_82xx_reg->req_q_out); + writel(0, + (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in); + writel(0, + (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + writel(0, + (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in); + writel(0, + (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in); + writel(0, + (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out); + } else { + /* + * Initialize DMA Shadow registers. The firmware is really + * supposed to take care of this, but on some uniprocessor + * systems, the shadow registers aren't cleared-- causing + * the interrupt_handler to think there are responses to be + * processed when there aren't. + */ + ha->shadow_regs->req_q_out = cpu_to_le32(0); + ha->shadow_regs->rsp_q_in = cpu_to_le32(0); + wmb(); + + writel(0, &ha->reg->req_q_in); + writel(0, &ha->reg->rsp_q_out); + readl(&ha->reg->rsp_q_out); + } + + qla4xxx_init_response_q_entries(ha); + + /* Initialize mailbox active array */ + for (i = 0; i < MAX_MRB; i++) + ha->active_mrb_array[i] = NULL; + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +} + +/** + * qla4xxx_get_sys_info - validate adapter MAC address(es) + * @ha: pointer to host adapter structure. + * + **/ +int qla4xxx_get_sys_info(struct scsi_qla_host *ha) +{ + struct flash_sys_info *sys_info; + dma_addr_t sys_info_dma; + int status = QLA_ERROR; + + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), + &sys_info_dma, GFP_KERNEL); + if (sys_info == NULL) { + DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", + ha->host_no, __func__)); + + goto exit_get_sys_info_no_free; + } + + /* Get flash sys info */ + if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO, + sizeof(*sys_info)) != QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO " + "failed\n", ha->host_no, __func__)); + + goto exit_get_sys_info; + } + + /* Save M.A.C. address & serial_number */ + memcpy(ha->my_mac, &sys_info->physAddr[0].address[0], + min(sizeof(ha->my_mac), + sizeof(sys_info->physAddr[0].address))); + memcpy(ha->serial_number, &sys_info->acSerialNumber, + min(sizeof(ha->serial_number), + sizeof(sys_info->acSerialNumber))); + + status = QLA_SUCCESS; + +exit_get_sys_info: + dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info, + sys_info_dma); + +exit_get_sys_info_no_free: + return status; +} + +/** + * qla4xxx_init_local_data - initialize adapter specific local data + * @ha: pointer to host adapter structure. + * + **/ +static void qla4xxx_init_local_data(struct scsi_qla_host *ha) +{ + /* Initialize aen queue */ + ha->aen_q_count = MAX_AEN_ENTRIES; +} + +static uint8_t +qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) +{ + uint8_t ipv4_wait = 0; + uint8_t ipv6_wait = 0; + int8_t ip_address[IPv6_ADDR_LEN] = {0} ; + + /* If both IPv4 & IPv6 are enabled, possibly only one + * IP address may be acquired, so check to see if we + * need to wait for another */ + if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) { + if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) && + ((ha->addl_fw_state & + FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) { + ipv4_wait = 1; + } + if (((ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) && + ((ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_ACQUIRING) || + (ha->ip_config.ipv6_addr0_state == + IP_ADDRSTATE_ACQUIRING) || + (ha->ip_config.ipv6_addr1_state == + IP_ADDRSTATE_ACQUIRING))) { + + ipv6_wait = 1; + + if ((ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_PREFERRED) || + (ha->ip_config.ipv6_addr0_state == + IP_ADDRSTATE_PREFERRED) || + (ha->ip_config.ipv6_addr1_state == + IP_ADDRSTATE_PREFERRED)) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: " + "Preferred IP configured." + " Don't wait!\n", ha->host_no, + __func__)); + ipv6_wait = 0; + } + if (memcmp(&ha->ip_config.ipv6_default_router_addr, + ip_address, IPv6_ADDR_LEN) == 0) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: " + "No Router configured. " + "Don't wait!\n", ha->host_no, + __func__)); + ipv6_wait = 0; + } + if ((ha->ip_config.ipv6_default_router_state == + IPV6_RTRSTATE_MANUAL) && + (ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_TENTATIVE) && + (memcmp(&ha->ip_config.ipv6_link_local_addr, + &ha->ip_config.ipv6_default_router_addr, 4) == + 0)) { + DEBUG2(printk("scsi%ld: %s: LinkLocal Router & " + "IP configured. Don't wait!\n", + ha->host_no, __func__)); + ipv6_wait = 0; + } + } + if (ipv4_wait || ipv6_wait) { + DEBUG2(printk("scsi%ld: %s: Wait for additional " + "IP(s) \"", ha->host_no, __func__)); + if (ipv4_wait) + DEBUG2(printk("IPv4 ")); + if (ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_ACQUIRING) + DEBUG2(printk("IPv6LinkLocal ")); + if (ha->ip_config.ipv6_addr0_state == + IP_ADDRSTATE_ACQUIRING) + DEBUG2(printk("IPv6Addr0 ")); + if (ha->ip_config.ipv6_addr1_state == + IP_ADDRSTATE_ACQUIRING) + DEBUG2(printk("IPv6Addr1 ")); + DEBUG2(printk("\"\n")); + } + } + + return ipv4_wait|ipv6_wait; +} + +static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha, + struct qla4_8xxx_minidump_template_hdr *md_hdr) +{ + int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET : + QLA83XX_TEMPLATE_CAP_OFFSET; + int rval = 1; + uint32_t *cap_offset; + + cap_offset = (uint32_t *)((char *)md_hdr + offset); + + if (!(le32_to_cpu(*cap_offset) & BIT_0)) { + ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n", + *cap_offset); + rval = 0; + } + + return rval; +} + +/** + * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. + * @ha: pointer to host adapter structure. + **/ +void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) +{ + int status; + uint32_t capture_debug_level; + int hdr_entry_bit, k; + void *md_tmp; + dma_addr_t md_tmp_dma; + struct qla4_8xxx_minidump_template_hdr *md_hdr; + int dma_capable; + + if (ha->fw_dump) { + ql4_printk(KERN_WARNING, ha, + "Firmware dump previously allocated.\n"); + return; + } + + status = qla4xxx_req_template_size(ha); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, + "scsi%ld: Failed to get template size\n", + ha->host_no); + return; + } + + clear_bit(AF_82XX_FW_DUMPED, &ha->flags); + + /* Allocate memory for saving the template */ + md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, + &md_tmp_dma, GFP_KERNEL); + if (!md_tmp) { + ql4_printk(KERN_INFO, ha, + "scsi%ld: Failed to allocate DMA memory\n", + ha->host_no); + return; + } + + /* Request template */ + status = qla4xxx_get_minidump_template(ha, md_tmp_dma); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, + "scsi%ld: Failed to get minidump template\n", + ha->host_no); + goto alloc_cleanup; + } + + md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; + + dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr); + + capture_debug_level = md_hdr->capture_debug_level; + + /* Get capture mask based on module loadtime setting. */ + if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) || + (ql4xmdcapmask == 0xFF && dma_capable)) { + ha->fw_dump_capture_mask = ql4xmdcapmask; + } else { + if (ql4xmdcapmask == 0xFF) + ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n"); + ha->fw_dump_capture_mask = capture_debug_level; + } + + md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n", + md_hdr->num_of_entries)); + DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n", + ha->fw_dump_tmplt_size)); + DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n", + ha->fw_dump_capture_mask)); + + /* Calculate fw_dump_size */ + for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF); + hdr_entry_bit <<= 1, k++) { + if (hdr_entry_bit & ha->fw_dump_capture_mask) + ha->fw_dump_size += md_hdr->capture_size_array[k]; + } + + /* Total firmware dump size including command header */ + ha->fw_dump_size += ha->fw_dump_tmplt_size; + ha->fw_dump = vmalloc(ha->fw_dump_size); + if (!ha->fw_dump) + goto alloc_cleanup; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Minidump Template Size = 0x%x KB\n", + ha->fw_dump_tmplt_size)); + DEBUG2(ql4_printk(KERN_INFO, ha, + "Total Minidump size = 0x%x KB\n", ha->fw_dump_size)); + + memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size); + ha->fw_dump_tmplt_hdr = ha->fw_dump; + +alloc_cleanup: + dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, + md_tmp, md_tmp_dma); +} + +static int qla4xxx_fw_ready(struct scsi_qla_host *ha) +{ + uint32_t timeout_count; + int ready = 0; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for Firmware Ready..\n")); + for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0; + timeout_count--) { + if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) + qla4xxx_get_dhcp_ip_address(ha); + + /* Get firmware state. */ + if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: unable to get firmware " + "state\n", ha->host_no, __func__)); + break; + } + + if (ha->firmware_state & FW_STATE_ERROR) { + DEBUG2(printk("scsi%ld: %s: an unrecoverable error has" + " occurred\n", ha->host_no, __func__)); + break; + + } + if (ha->firmware_state & FW_STATE_CONFIG_WAIT) { + /* + * The firmware has not yet been issued an Initialize + * Firmware command, so issue it now. + */ + if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) + break; + + /* Go back and test for ready state - no wait. */ + continue; + } + + if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:" + "AUTOCONNECT in progress\n", + ha->host_no, __func__)); + } + + if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:" + " CONFIGURING IP\n", + ha->host_no, __func__)); + /* + * Check for link state after 15 secs and if link is + * still DOWN then, cable is unplugged. Ignore "DHCP + * in Progress/CONFIGURING IP" bit to check if firmware + * is in ready state or not after 15 secs. + * This is applicable for both 2.x & 3.x firmware + */ + if (timeout_count <= (ADAPTER_INIT_TOV - 15)) { + if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s:" + " LINK UP (Cable plugged)\n", + ha->host_no, __func__)); + } else if (ha->firmware_state & + (FW_STATE_CONFIGURING_IP | + FW_STATE_READY)) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: " + "LINK DOWN (Cable unplugged)\n", + ha->host_no, __func__)); + ha->firmware_state = FW_STATE_READY; + } + } + } + + if (ha->firmware_state == FW_STATE_READY) { + /* If DHCP IP Addr is available, retrieve it now. */ + if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, + &ha->dpc_flags)) + qla4xxx_get_dhcp_ip_address(ha); + + if (!qla4xxx_wait_for_ip_config(ha) || + timeout_count == 1) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Firmware Ready..\n")); + /* The firmware is ready to process SCSI + commands. */ + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: MEDIA TYPE" + " - %s\n", ha->host_no, + __func__, (ha->addl_fw_state & + FW_ADDSTATE_OPTICAL_MEDIA) + != 0 ? "OPTICAL" : "COPPER")); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: DHCPv4 STATE" + " Enabled %s\n", ha->host_no, + __func__, (ha->addl_fw_state & + FW_ADDSTATE_DHCPv4_ENABLED) != 0 ? + "YES" : "NO")); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: LINK %s\n", + ha->host_no, __func__, + (ha->addl_fw_state & + FW_ADDSTATE_LINK_UP) != 0 ? + "UP" : "DOWN")); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: iSNS Service " + "Started %s\n", + ha->host_no, __func__, + (ha->addl_fw_state & + FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ? + "YES" : "NO")); + + ready = 1; + break; + } + } + DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - " + "seconds expired= %d\n", ha->host_no, __func__, + ha->firmware_state, ha->addl_fw_state, + timeout_count)); + if (is_qla4032(ha) && + !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) && + (timeout_count < ADAPTER_INIT_TOV - 5)) { + break; + } + + msleep(1000); + } /* end of for */ + + if (timeout_count <= 0) + DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", + ha->host_no, __func__)); + + if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { + DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting " + "it's waiting to configure an IP address\n", + ha->host_no, __func__)); + ready = 1; + } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { + DEBUG2(printk("scsi%ld: %s: FW initialized, but " + "auto-discovery still in process\n", + ha->host_no, __func__)); + ready = 1; + } + + return ready; +} + +/** + * qla4xxx_init_firmware - initializes the firmware. + * @ha: pointer to host adapter structure. + * + **/ +static int qla4xxx_init_firmware(struct scsi_qla_host *ha) +{ + int status = QLA_ERROR; + + if (is_aer_supported(ha) && + test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) + return status; + + /* For 82xx, stop firmware before initializing because if BIOS + * has previously initialized firmware, then driver's initialize + * firmware will fail. */ + if (is_qla80XX(ha)) + qla4_8xxx_stop_firmware(ha); + + ql4_printk(KERN_INFO, ha, "Initializing firmware..\n"); + if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) { + DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware " + "control block\n", ha->host_no, __func__)); + return status; + } + + if (!qla4xxx_fw_ready(ha)) + return status; + + if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags)) + qla4xxx_alloc_fw_dump(ha); + + return qla4xxx_get_firmware_status(ha); +} + +static void qla4xxx_set_model_info(struct scsi_qla_host *ha) +{ + uint16_t board_id_string[8]; + int i; + int size = sizeof(ha->nvram->isp4022.boardIdStr); + int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2; + + for (i = 0; i < (size / 2) ; i++) { + board_id_string[i] = rd_nvram_word(ha, offset); + offset += 1; + } + + memcpy(ha->model_name, board_id_string, size); +} + +static int qla4xxx_config_nvram(struct scsi_qla_host *ha) +{ + unsigned long flags; + union external_hw_config_reg extHwConfig; + + DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, + __func__)); + if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) + return QLA_ERROR; + if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { + ql4xxx_unlock_flash(ha); + return QLA_ERROR; + } + + /* Get EEPRom Parameters from NVRAM and validate */ + ql4_printk(KERN_INFO, ha, "Configuring NVRAM ...\n"); + if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) { + spin_lock_irqsave(&ha->hardware_lock, flags); + extHwConfig.Asuint32_t = + rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } else { + ql4_printk(KERN_WARNING, ha, + "scsi%ld: %s: EEProm checksum invalid. " + "Please update your EEPROM\n", ha->host_no, + __func__); + + /* Attempt to set defaults */ + if (is_qla4010(ha)) + extHwConfig.Asuint32_t = 0x1912; + else if (is_qla4022(ha) | is_qla4032(ha)) + extHwConfig.Asuint32_t = 0x0023; + else + return QLA_ERROR; + } + + if (is_qla4022(ha) || is_qla4032(ha)) + qla4xxx_set_model_info(ha); + else + strcpy(ha->model_name, "QLA4010"); + + DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", + ha->host_no, __func__, extHwConfig.Asuint32_t)); + + spin_lock_irqsave(&ha->hardware_lock, flags); + writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha)); + readl(isp_ext_hw_conf(ha)); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ql4xxx_unlock_nvram(ha); + ql4xxx_unlock_flash(ha); + + return QLA_SUCCESS; +} + +/** + * qla4_8xxx_pci_config() - Setup ISP82xx PCI configuration registers. + * @ha: HA context + */ +void qla4_8xxx_pci_config(struct scsi_qla_host *ha) +{ + pci_set_master(ha->pdev); +} + +void qla4xxx_pci_config(struct scsi_qla_host *ha) +{ + uint16_t w; + int status; + + ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n"); + + pci_set_master(ha->pdev); + status = pci_set_mwi(ha->pdev); + if (status) + ql4_printk(KERN_WARNING, ha, "Failed to set MWI\n"); + + /* + * We want to respect framework's setting of PCI configuration space + * command register and also want to make sure that all bits of + * interest to us are properly set in command register. + */ + pci_read_config_word(ha->pdev, PCI_COMMAND, &w); + w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + w &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(ha->pdev, PCI_COMMAND, w); +} + +static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha) +{ + int status = QLA_ERROR; + unsigned long max_wait_time; + unsigned long flags; + uint32_t mbox_status; + + ql4_printk(KERN_INFO, ha, "Starting firmware ...\n"); + + /* + * Start firmware from flash ROM + * + * WORKAROUND: Stuff a non-constant value that the firmware can + * use as a seed for a random number generator in MB7 prior to + * setting BOOT_ENABLE. Fixes problem where the TCP + * connections use the same TCP ports after each reboot, + * causing some connections to not get re-established. + */ + DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n", + ha->host_no, __func__)); + + spin_lock_irqsave(&ha->hardware_lock, flags); + writel(jiffies, &ha->reg->mailbox[7]); + if (is_qla4022(ha) | is_qla4032(ha)) + writel(set_rmask(NVR_WRITE_ENABLE), + &ha->reg->u1.isp4022.nvram); + + writel(2, &ha->reg->mailbox[6]); + readl(&ha->reg->mailbox[6]); + + writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Wait for firmware to come UP. */ + DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for " + "boot firmware to complete...\n", + ha->host_no, __func__, FIRMWARE_UP_TOV)); + max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ); + do { + uint32_t ctrl_status; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ctrl_status = readw(&ha->reg->ctrl_status); + mbox_status = readw(&ha->reg->mailbox[0]); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR)) + break; + if (mbox_status == MBOX_STS_COMMAND_COMPLETE) + break; + + DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot " + "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n", + ha->host_no, __func__, ctrl_status, max_wait_time)); + + msleep_interruptible(250); + } while (!time_after_eq(jiffies, max_wait_time)); + + if (mbox_status == MBOX_STS_COMMAND_COMPLETE) { + DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n", + ha->host_no, __func__)); + + spin_lock_irqsave(&ha->hardware_lock, flags); + writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + status = QLA_SUCCESS; + } else { + printk(KERN_INFO "scsi%ld: %s: Boot firmware failed " + "- mbox status 0x%x\n", ha->host_no, __func__, + mbox_status); + status = QLA_ERROR; + } + return status; +} + +int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a) +{ +#define QL4_LOCK_DRVR_WAIT 60 +#define QL4_LOCK_DRVR_SLEEP 1 + + int drvr_wait = QL4_LOCK_DRVR_WAIT; + while (drvr_wait) { + if (ql4xxx_lock_drvr(a) == 0) { + ssleep(QL4_LOCK_DRVR_SLEEP); + DEBUG2(printk("scsi%ld: %s: Waiting for " + "Global Init Semaphore(%d)...\n", + a->host_no, + __func__, drvr_wait)); + drvr_wait -= QL4_LOCK_DRVR_SLEEP; + } else { + DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " + "acquired\n", a->host_no, __func__)); + return QLA_SUCCESS; + } + } + return QLA_ERROR; +} + +/** + * qla4xxx_start_firmware - starts qla4xxx firmware + * @ha: Pointer to host adapter structure. + * + * This routine performs the necessary steps to start the firmware for + * the QLA4010 adapter. + **/ +int qla4xxx_start_firmware(struct scsi_qla_host *ha) +{ + unsigned long flags = 0; + uint32_t mbox_status; + int status = QLA_ERROR; + int soft_reset = 1; + int config_chip = 0; + + if (is_qla4022(ha) | is_qla4032(ha)) + ql4xxx_set_mac_number(ha); + + if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) + return QLA_ERROR; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no, + __func__, readw(isp_port_ctrl(ha)))); + DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no, + __func__, readw(isp_port_status(ha)))); + + /* Is Hardware already initialized? */ + if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) { + DEBUG(printk("scsi%ld: %s: Hardware has already been " + "initialized\n", ha->host_no, __func__)); + + /* Receive firmware boot acknowledgement */ + mbox_status = readw(&ha->reg->mailbox[0]); + + DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= " + "0x%x\n", ha->host_no, __func__, mbox_status)); + + /* Is firmware already booted? */ + if (mbox_status == 0) { + /* F/W not running, must be config by net driver */ + config_chip = 1; + soft_reset = 0; + } else { + writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + writel(set_rmask(CSR_SCSI_COMPLETION_INTR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: Get firmware " + "state -- state = 0x%x\n", + ha->host_no, + __func__, ha->firmware_state)); + /* F/W is running */ + if (ha->firmware_state & + FW_STATE_CONFIG_WAIT) { + DEBUG2(printk("scsi%ld: %s: Firmware " + "in known state -- " + "config and " + "boot, state = 0x%x\n", + ha->host_no, __func__, + ha->firmware_state)); + config_chip = 1; + soft_reset = 0; + } + } else { + DEBUG2(printk("scsi%ld: %s: Firmware in " + "unknown state -- resetting," + " state = " + "0x%x\n", ha->host_no, __func__, + ha->firmware_state)); + } + spin_lock_irqsave(&ha->hardware_lock, flags); + } + } else { + DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been " + "started - resetting\n", ha->host_no, __func__)); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ", + ha->host_no, __func__, soft_reset, config_chip)); + if (soft_reset) { + DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no, + __func__)); + status = qla4xxx_soft_reset(ha); /* NOTE: acquires drvr + * lock again, but ok */ + if (status == QLA_ERROR) { + DEBUG(printk("scsi%d: %s: Soft Reset failed!\n", + ha->host_no, __func__)); + ql4xxx_unlock_drvr(ha); + return QLA_ERROR; + } + config_chip = 1; + + /* Reset clears the semaphore, so acquire again */ + if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) + return QLA_ERROR; + } + + if (config_chip) { + if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS) + status = qla4xxx_start_firmware_from_flash(ha); + } + + ql4xxx_unlock_drvr(ha); + if (status == QLA_SUCCESS) { + if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) + qla4xxx_get_crash_record(ha); + + qla4xxx_init_rings(ha); + } else { + DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", + ha->host_no, __func__)); + } + return status; +} +/** + * qla4xxx_free_ddb_index - Free DDBs reserved by firmware + * @ha: pointer to adapter structure + * + * Since firmware is not running in autoconnect mode the DDB indices should + * be freed so that when login happens from user space there are free DDB + * indices available. + **/ +void qla4xxx_free_ddb_index(struct scsi_qla_host *ha) +{ + int max_ddbs; + int ret; + uint32_t idx = 0, next_idx = 0; + uint32_t state = 0, conn_err = 0; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + for (idx = 0; idx < max_ddbs; idx = next_idx) { + ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, + &next_idx, &state, &conn_err, + NULL, NULL); + if (ret == QLA_ERROR) { + next_idx++; + continue; + } + if (state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Freeing DDB index = 0x%x\n", idx)); + ret = qla4xxx_clear_ddb_entry(ha, idx); + if (ret == QLA_ERROR) + ql4_printk(KERN_ERR, ha, + "Unable to clear DDB index = " + "0x%x\n", idx); + } + if (next_idx == 0) + break; + } +} + +/** + * qla4xxx_initialize_adapter - initiailizes hba + * @ha: Pointer to host adapter structure. + * @is_reset: Is this init path or reset path + * + * This routine parforms all of the steps necessary to initialize the adapter. + * + **/ +int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset) +{ + int status = QLA_ERROR; + + ha->eeprom_cmd_data = 0; + + ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n"); + ha->isp_ops->pci_config(ha); + + ha->isp_ops->disable_intrs(ha); + + /* Initialize the Host adapter request/response queues and firmware */ + if (ha->isp_ops->start_firmware(ha) == QLA_ERROR) + goto exit_init_hba; + + /* + * For ISP83XX, mailbox and IOCB interrupts are enabled separately. + * Mailbox interrupts must be enabled prior to issuing any mailbox + * command in order to prevent the possibility of losing interrupts + * while switching from polling to interrupt mode. IOCB interrupts are + * enabled via isp_ops->enable_intrs. + */ + if (is_qla8032(ha) || is_qla8042(ha)) + qla4_83xx_enable_mbox_intrs(ha); + + if (qla4xxx_about_firmware(ha) == QLA_ERROR) + goto exit_init_hba; + + if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR) + goto exit_init_hba; + + qla4xxx_init_local_data(ha); + + status = qla4xxx_init_firmware(ha); + if (status == QLA_ERROR) + goto exit_init_hba; + + if (is_reset == RESET_ADAPTER) + qla4xxx_build_ddb_list(ha, is_reset); + + set_bit(AF_ONLINE, &ha->flags); + +exit_init_hba: + DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, + status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); + return status; +} + +int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + struct ddb_entry *ddb_entry, uint32_t state) +{ + uint32_t old_fw_ddb_device_state; + int status = QLA_ERROR; + + old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: DDB - old state = 0x%x, new state = 0x%x for " + "index [%d]\n", __func__, + ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); + + ddb_entry->fw_ddb_device_state = state; + + switch (old_fw_ddb_device_state) { + case DDB_DS_LOGIN_IN_PROCESS: + switch (state) { + case DDB_DS_SESSION_ACTIVE: + case DDB_DS_DISCOVERY: + qla4xxx_update_session_conn_param(ha, ddb_entry); + ddb_entry->unblock_sess(ddb_entry->sess); + status = QLA_SUCCESS; + break; + case DDB_DS_SESSION_FAILED: + case DDB_DS_NO_CONNECTION_ACTIVE: + iscsi_conn_login_event(ddb_entry->conn, + ISCSI_CONN_STATE_FREE); + status = QLA_SUCCESS; + break; + } + break; + case DDB_DS_SESSION_ACTIVE: + case DDB_DS_DISCOVERY: + switch (state) { + case DDB_DS_SESSION_FAILED: + /* + * iscsi_session failure will cause userspace to + * stop the connection which in turn would block the + * iscsi_session and start relogin + */ + iscsi_session_failure(ddb_entry->sess->dd_data, + ISCSI_ERR_CONN_FAILED); + status = QLA_SUCCESS; + break; + case DDB_DS_NO_CONNECTION_ACTIVE: + clear_bit(fw_ddb_index, ha->ddb_idx_map); + status = QLA_SUCCESS; + break; + } + break; + case DDB_DS_SESSION_FAILED: + switch (state) { + case DDB_DS_SESSION_ACTIVE: + case DDB_DS_DISCOVERY: + ddb_entry->unblock_sess(ddb_entry->sess); + qla4xxx_update_session_conn_param(ha, ddb_entry); + status = QLA_SUCCESS; + break; + case DDB_DS_SESSION_FAILED: + iscsi_session_failure(ddb_entry->sess->dd_data, + ISCSI_ERR_CONN_FAILED); + status = QLA_SUCCESS; + break; + } + break; + default: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n", + __func__)); + break; + } + return status; +} + +void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry) +{ + /* + * This triggers a relogin. After the relogin_timer + * expires, the relogin gets scheduled. We must wait a + * minimum amount of time since receiving an 0x8014 AEN + * with failed device_state or a logout response before + * we can issue another relogin. + * + * Firmware pads this timeout: (time2wait +1). + * Driver retry to login should be longer than F/W. + * Otherwise F/W will fail + * set_ddb() mbx cmd with 0x4005 since it still + * counting down its time2wait. + */ + atomic_set(&ddb_entry->relogin_timer, 0); + atomic_set(&ddb_entry->retry_relogin_timer, + ddb_entry->default_time2wait + 4); + +} + +int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + struct ddb_entry *ddb_entry, uint32_t state) +{ + uint32_t old_fw_ddb_device_state; + int status = QLA_ERROR; + + old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: DDB - old state = 0x%x, new state = 0x%x for " + "index [%d]\n", __func__, + ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); + + ddb_entry->fw_ddb_device_state = state; + + switch (old_fw_ddb_device_state) { + case DDB_DS_LOGIN_IN_PROCESS: + case DDB_DS_NO_CONNECTION_ACTIVE: + switch (state) { + case DDB_DS_SESSION_ACTIVE: + ddb_entry->unblock_sess(ddb_entry->sess); + qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); + status = QLA_SUCCESS; + break; + case DDB_DS_SESSION_FAILED: + iscsi_block_session(ddb_entry->sess); + if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) + qla4xxx_arm_relogin_timer(ddb_entry); + status = QLA_SUCCESS; + break; + } + break; + case DDB_DS_SESSION_ACTIVE: + switch (state) { + case DDB_DS_SESSION_FAILED: + iscsi_block_session(ddb_entry->sess); + if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) + qla4xxx_arm_relogin_timer(ddb_entry); + status = QLA_SUCCESS; + break; + } + break; + case DDB_DS_SESSION_FAILED: + switch (state) { + case DDB_DS_SESSION_ACTIVE: + ddb_entry->unblock_sess(ddb_entry->sess); + qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); + status = QLA_SUCCESS; + break; + case DDB_DS_SESSION_FAILED: + if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) + qla4xxx_arm_relogin_timer(ddb_entry); + status = QLA_SUCCESS; + break; + } + break; + default: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n", + __func__)); + break; + } + return status; +} + +/** + * qla4xxx_process_ddb_changed - process ddb state change + * @ha: Pointer to host adapter structure. + * @fw_ddb_index: Firmware's device database index + * @state: Device state + * @conn_err: Unused + * + * This routine processes a Decive Database Changed AEN Event. + **/ +int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, + uint32_t fw_ddb_index, + uint32_t state, uint32_t conn_err) +{ + struct ddb_entry *ddb_entry; + + /* check for out of range index */ + if (fw_ddb_index >= MAX_DDB_ENTRIES) + goto exit_ddb_event; + + /* Get the corresponging ddb entry */ + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); + /* Device does not currently exist in our database. */ + if (ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n", + __func__, fw_ddb_index); + + if (state == DDB_DS_NO_CONNECTION_ACTIVE) + clear_bit(fw_ddb_index, ha->ddb_idx_map); + + goto exit_ddb_event; + } + + ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state); + +exit_ddb_event: + return QLA_ERROR; +} + +/** + * qla4xxx_login_flash_ddb - Login to target (DDB) + * @cls_session: Pointer to the session to login + * + * This routine logins to the target. + * Issues setddb and conn open mbx + **/ +void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_dma; + uint32_t mbx_sts = 0; + int ret; + + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + if (!test_bit(AF_LINK_UP, &ha->flags)) + return; + + if (ddb_entry->ddb_type != FLASH_DDB) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Skipping login to non FLASH DB")); + goto exit_login; + } + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); + goto exit_login; + } + + if (ddb_entry->fw_ddb_index == INVALID_ENTRY) { + ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index); + if (ret == QLA_ERROR) + goto exit_login; + + ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; + ha->tot_ddbs++; + } + + memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry, + sizeof(struct dev_db_entry)); + ddb_entry->sess->target_id = ddb_entry->fw_ddb_index; + + ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_dma, &mbx_sts); + if (ret == QLA_ERROR) { + DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n")); + goto exit_login; + } + + ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; + ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); + if (ret == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, + sess->targetname); + goto exit_login; + } + +exit_login: + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); +} + diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h new file mode 100644 index 000000000..9ced6b325 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_inline.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +/* + * + * qla4xxx_lookup_ddb_by_fw_index + * This routine locates a device handle given the firmware device + * database index. If device doesn't exist, returns NULL. + * + * Input: + * ha - Pointer to host adapter structure. + * fw_ddb_index - Firmware's device database index + * + * Returns: + * Pointer to the corresponding internal device database structure + */ +static inline struct ddb_entry * +qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index) +{ + struct ddb_entry *ddb_entry = NULL; + + if ((fw_ddb_index < MAX_DDB_ENTRIES) && + (ha->fw_ddb_index_map[fw_ddb_index] != + (struct ddb_entry *) INVALID_ENTRY)) { + ddb_entry = ha->fw_ddb_index_map[fw_ddb_index]; + } + + DEBUG3(printk("scsi%d: %s: ddb [%d], ddb_entry = %p\n", + ha->host_no, __func__, fw_ddb_index, ddb_entry)); + + return ddb_entry; +} + +static inline void +__qla4xxx_enable_intrs(struct scsi_qla_host *ha) +{ + if (is_qla4022(ha) | is_qla4032(ha)) { + writel(set_rmask(IMR_SCSI_INTR_ENABLE), + &ha->reg->u1.isp4022.intr_mask); + readl(&ha->reg->u1.isp4022.intr_mask); + } else { + writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + } + set_bit(AF_INTERRUPTS_ON, &ha->flags); +} + +static inline void +__qla4xxx_disable_intrs(struct scsi_qla_host *ha) +{ + if (is_qla4022(ha) | is_qla4032(ha)) { + writel(clr_rmask(IMR_SCSI_INTR_ENABLE), + &ha->reg->u1.isp4022.intr_mask); + readl(&ha->reg->u1.isp4022.intr_mask); + } else { + writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + } + clear_bit(AF_INTERRUPTS_ON, &ha->flags); +} + +static inline void +qla4xxx_enable_intrs(struct scsi_qla_host *ha) +{ + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + __qla4xxx_enable_intrs(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static inline void +qla4xxx_disable_intrs(struct scsi_qla_host *ha) +{ + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + __qla4xxx_disable_intrs(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry) +{ + int type; + + if (chap_entry->flags & BIT_7) + type = LOCAL_CHAP; + else + type = BIDI_CHAP; + + return type; +} diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c new file mode 100644 index 000000000..28eab0793 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -0,0 +1,541 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" + +#include + +static int +qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt) +{ + uint16_t cnt; + + /* Calculate number of free request entries. */ + if ((req_cnt + 2) >= ha->req_q_count) { + cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha); + if (ha->request_in < cnt) + ha->req_q_count = cnt - ha->request_in; + else + ha->req_q_count = REQUEST_QUEUE_DEPTH - + (ha->request_in - cnt); + } + + /* Check if room for request in request ring. */ + if ((req_cnt + 2) < ha->req_q_count) + return 1; + else + return 0; +} + +static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha) +{ + /* Advance request queue pointer */ + if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { + ha->request_in = 0; + ha->request_ptr = ha->request_ring; + } else { + ha->request_in++; + ha->request_ptr++; + } +} + +/** + * qla4xxx_get_req_pkt - returns a valid entry in request queue. + * @ha: Pointer to host adapter structure. + * @queue_entry: Pointer to pointer to queue entry structure + * + * This routine performs the following tasks: + * - returns the current request_in pointer (if queue not full) + * - advances the request_in pointer + * - checks for queue full + **/ +static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, + struct queue_entry **queue_entry) +{ + uint16_t req_cnt = 1; + + if (qla4xxx_space_in_req_ring(ha, req_cnt)) { + *queue_entry = ha->request_ptr; + memset(*queue_entry, 0, sizeof(**queue_entry)); + + qla4xxx_advance_req_ring_ptr(ha); + ha->req_q_count -= req_cnt; + return QLA_SUCCESS; + } + + return QLA_ERROR; +} + +/** + * qla4xxx_send_marker_iocb - issues marker iocb to HBA + * @ha: Pointer to host adapter structure. + * @ddb_entry: Pointer to device database entry + * @lun: SCSI LUN + * @mrkr_mod: marker identifier + * + * This routine issues a marker IOCB. + **/ +int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod) +{ + struct qla4_marker_entry *marker_entry; + unsigned long flags = 0; + uint8_t status = QLA_SUCCESS; + + /* Acquire hardware specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Get pointer to the queue entry for the marker */ + if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) != + QLA_SUCCESS) { + status = QLA_ERROR; + goto exit_send_marker; + } + + /* Put the marker in the request queue */ + marker_entry->hdr.entryType = ET_MARKER; + marker_entry->hdr.entryCount = 1; + marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); + marker_entry->modifier = cpu_to_le16(mrkr_mod); + int_to_scsilun(lun, &marker_entry->lun); + wmb(); + + /* Tell ISP it's got a new I/O request */ + ha->isp_ops->queue_iocb(ha); + +exit_send_marker: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return status; +} + +static struct continuation_t1_entry * +qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha) +{ + struct continuation_t1_entry *cont_entry; + + cont_entry = (struct continuation_t1_entry *)ha->request_ptr; + + qla4xxx_advance_req_ring_ptr(ha); + + /* Load packet defaults */ + cont_entry->hdr.entryType = ET_CONTINUE; + cont_entry->hdr.entryCount = 1; + cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in); + + return cont_entry; +} + +static uint16_t qla4xxx_calc_request_entries(uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > COMMAND_SEG) { + iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG; + if ((dsds - COMMAND_SEG) % CONTINUE_SEG) + iocbs++; + } + return iocbs; +} + +static void qla4xxx_build_scsi_iocbs(struct srb *srb, + struct command_t3_entry *cmd_entry, + uint16_t tot_dsds) +{ + struct scsi_qla_host *ha; + uint16_t avail_dsds; + struct data_seg_a64 *cur_dsd; + struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i; + + cmd = srb->cmd; + ha = srb->ha; + + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + /* No data being transferred */ + cmd_entry->ttlByteCnt = cpu_to_le32(0); + return; + } + + avail_dsds = COMMAND_SEG; + cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]); + + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + dma_addr_t sle_dma; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + struct continuation_t1_entry *cont_entry; + + cont_entry = qla4xxx_alloc_cont_entry(ha); + cur_dsd = + (struct data_seg_a64 *) + &cont_entry->dataseg[0]; + avail_dsds = CONTINUE_SEG; + } + + sle_dma = sg_dma_address(sg); + cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma)); + cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma)); + cur_dsd->count = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; + + cur_dsd++; + } +} + +void qla4_83xx_queue_iocb(struct scsi_qla_host *ha) +{ + writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in); + readl(&ha->qla4_83xx_reg->req_q_in); +} + +void qla4_83xx_complete_iocb(struct scsi_qla_host *ha) +{ + writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out); + readl(&ha->qla4_83xx_reg->rsp_q_out); +} + +/** + * qla4_82xx_queue_iocb - Tell ISP it's got new request(s) + * @ha: pointer to host adapter structure. + * + * This routine notifies the ISP that one or more new request + * queue entries have been placed on the request queue. + **/ +void qla4_82xx_queue_iocb(struct scsi_qla_host *ha) +{ + uint32_t dbval = 0; + + dbval = 0x14 | (ha->func_num << 5); + dbval = dbval | (0 << 8) | (ha->request_in << 16); + + qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in); +} + +/** + * qla4_82xx_complete_iocb - Tell ISP we're done with response(s) + * @ha: pointer to host adapter structure. + * + * This routine notifies the ISP that one or more response/completion + * queue entries have been processed by the driver. + * This also clears the interrupt. + **/ +void qla4_82xx_complete_iocb(struct scsi_qla_host *ha) +{ + writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out); + readl(&ha->qla4_82xx_reg->rsp_q_out); +} + +/** + * qla4xxx_queue_iocb - Tell ISP it's got new request(s) + * @ha: pointer to host adapter structure. + * + * This routine is notifies the ISP that one or more new request + * queue entries have been placed on the request queue. + **/ +void qla4xxx_queue_iocb(struct scsi_qla_host *ha) +{ + writel(ha->request_in, &ha->reg->req_q_in); + readl(&ha->reg->req_q_in); +} + +/** + * qla4xxx_complete_iocb - Tell ISP we're done with response(s) + * @ha: pointer to host adapter structure. + * + * This routine is notifies the ISP that one or more response/completion + * queue entries have been processed by the driver. + * This also clears the interrupt. + **/ +void qla4xxx_complete_iocb(struct scsi_qla_host *ha) +{ + writel(ha->response_out, &ha->reg->rsp_q_out); + readl(&ha->reg->rsp_q_out); +} + +/** + * qla4xxx_send_command_to_isp - issues command to HBA + * @ha: pointer to host adapter structure. + * @srb: pointer to SCSI Request Block to be sent to ISP + * + * This routine is called by qla4xxx_queuecommand to build an ISP + * command and pass it to the ISP for execution. + **/ +int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) +{ + struct scsi_cmnd *cmd = srb->cmd; + struct ddb_entry *ddb_entry; + struct command_t3_entry *cmd_entry; + int nseg; + uint16_t tot_dsds; + uint16_t req_cnt; + unsigned long flags; + uint32_t index; + + /* Get real lun and adapter */ + ddb_entry = srb->ddb; + + tot_dsds = 0; + + /* Acquire hardware specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + index = scsi_cmd_to_rq(cmd)->tag; + + /* + * Check to see if adapter is online before placing request on + * request queue. If a reset occurs and a request is in the queue, + * the firmware will still attempt to process the request, retrieving + * garbage for pointers. + */ + if (!test_bit(AF_ONLINE, &ha->flags)) { + DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! " + "Do not issue command.\n", + ha->host_no, __func__)); + goto queuing_error; + } + + /* Calculate the number of request entries needed. */ + nseg = scsi_dma_map(cmd); + if (nseg < 0) + goto queuing_error; + tot_dsds = nseg; + + req_cnt = qla4xxx_calc_request_entries(tot_dsds); + if (!qla4xxx_space_in_req_ring(ha, req_cnt)) + goto queuing_error; + + /* total iocbs active */ + if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat) + goto queuing_error; + + /* Build command packet */ + cmd_entry = (struct command_t3_entry *) ha->request_ptr; + memset(cmd_entry, 0, sizeof(struct command_t3_entry)); + cmd_entry->hdr.entryType = ET_COMMAND; + cmd_entry->handle = cpu_to_le32(index); + cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); + + int_to_scsilun(cmd->device->lun, &cmd_entry->lun); + cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd)); + memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); + cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); + cmd_entry->hdr.entryCount = req_cnt; + + /* Set data transfer direction control flags + * NOTE: Look at data_direction bits iff there is data to be + * transferred, as the data direction bit is sometimed filled + * in when there is no data to be transferred */ + cmd_entry->control_flags = CF_NO_DATA; + if (scsi_bufflen(cmd)) { + if (cmd->sc_data_direction == DMA_TO_DEVICE) + cmd_entry->control_flags = CF_WRITE; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + cmd_entry->control_flags = CF_READ; + + ha->bytes_xfered += scsi_bufflen(cmd); + if (ha->bytes_xfered & ~0xFFFFF){ + ha->total_mbytes_xferred += ha->bytes_xfered >> 20; + ha->bytes_xfered &= 0xFFFFF; + } + } + + /* Set tagged queueing control flags */ + cmd_entry->control_flags |= CF_SIMPLE_TAG; + + qla4xxx_advance_req_ring_ptr(ha); + qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); + wmb(); + + srb->cmd->host_scribble = (unsigned char *)(unsigned long)index; + + /* update counters */ + srb->state = SRB_ACTIVE_STATE; + srb->flags |= SRB_DMA_VALID; + + /* Track IOCB used */ + ha->iocb_cnt += req_cnt; + srb->iocb_cnt = req_cnt; + ha->req_q_count -= req_cnt; + + ha->isp_ops->queue_iocb(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_ERROR; +} + +int qla4xxx_send_passthru0(struct iscsi_task *task) +{ + struct passthru0 *passthru_iocb; + struct iscsi_session *sess = task->conn->session; + struct ddb_entry *ddb_entry = sess->dd_data; + struct scsi_qla_host *ha = ddb_entry->ha; + struct ql4_task_data *task_data = task->dd_data; + uint16_t ctrl_flags = 0; + unsigned long flags; + int ret = QLA_ERROR; + + spin_lock_irqsave(&ha->hardware_lock, flags); + task_data->iocb_req_cnt = 1; + /* Put the IOCB on the request queue */ + if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt)) + goto queuing_error; + + passthru_iocb = (struct passthru0 *) ha->request_ptr; + + memset(passthru_iocb, 0, sizeof(struct passthru0)); + passthru_iocb->hdr.entryType = ET_PASSTHRU0; + passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU; + passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt; + passthru_iocb->handle = task->itt; + passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index); + passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT); + + /* Setup the out & in DSDs */ + if (task_data->req_len) { + memcpy((uint8_t *)task_data->req_buffer + + sizeof(struct iscsi_hdr), task->data, task->data_count); + ctrl_flags |= PT_FLAG_SEND_BUFFER; + passthru_iocb->out_dsd.base.addrLow = + cpu_to_le32(LSDW(task_data->req_dma)); + passthru_iocb->out_dsd.base.addrHigh = + cpu_to_le32(MSDW(task_data->req_dma)); + passthru_iocb->out_dsd.count = + cpu_to_le32(task->data_count + + sizeof(struct iscsi_hdr)); + } + if (task_data->resp_len) { + passthru_iocb->in_dsd.base.addrLow = + cpu_to_le32(LSDW(task_data->resp_dma)); + passthru_iocb->in_dsd.base.addrHigh = + cpu_to_le32(MSDW(task_data->resp_dma)); + passthru_iocb->in_dsd.count = + cpu_to_le32(task_data->resp_len); + } + + ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE); + passthru_iocb->control_flags = cpu_to_le16(ctrl_flags); + + /* Update the request pointer */ + qla4xxx_advance_req_ring_ptr(ha); + wmb(); + + /* Track IOCB used */ + ha->iocb_cnt += task_data->iocb_req_cnt; + ha->req_q_count -= task_data->iocb_req_cnt; + ha->isp_ops->queue_iocb(ha); + ret = QLA_SUCCESS; + +queuing_error: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return ret; +} + +static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha) +{ + struct mrb *mrb; + + mrb = kzalloc(sizeof(*mrb), GFP_KERNEL); + if (!mrb) + return mrb; + + mrb->ha = ha; + return mrb; +} + +static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb, + uint32_t *in_mbox) +{ + int rval = QLA_SUCCESS; + uint32_t i; + unsigned long flags; + uint32_t index = 0; + + /* Acquire hardware specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Get pointer to the queue entry for the marker */ + rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox)); + if (rval != QLA_SUCCESS) + goto exit_mbox_iocb; + + index = ha->mrb_index; + /* get valid mrb index*/ + for (i = 0; i < MAX_MRB; i++) { + index++; + if (index == MAX_MRB) + index = 1; + if (ha->active_mrb_array[index] == NULL) { + ha->mrb_index = index; + break; + } + } + + mrb->iocb_cnt = 1; + ha->active_mrb_array[index] = mrb; + mrb->mbox->handle = index; + mrb->mbox->hdr.entryType = ET_MBOX_CMD; + mrb->mbox->hdr.entryCount = mrb->iocb_cnt; + memcpy(mrb->mbox->in_mbox, in_mbox, 32); + mrb->mbox_cmd = in_mbox[0]; + wmb(); + + ha->iocb_cnt += mrb->iocb_cnt; + ha->isp_ops->queue_iocb(ha); +exit_mbox_iocb: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return rval; +} + +int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options, + uint32_t payload_size, uint32_t pid, uint8_t *ipaddr) +{ + uint32_t in_mbox[8]; + struct mrb *mrb = NULL; + int rval = QLA_SUCCESS; + + memset(in_mbox, 0, sizeof(in_mbox)); + + mrb = qla4xxx_get_new_mrb(ha); + if (!mrb) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n", + __func__)); + rval = QLA_ERROR; + goto exit_ping; + } + + in_mbox[0] = MBOX_CMD_PING; + in_mbox[1] = options; + memcpy(&in_mbox[2], &ipaddr[0], 4); + memcpy(&in_mbox[3], &ipaddr[4], 4); + memcpy(&in_mbox[4], &ipaddr[8], 4); + memcpy(&in_mbox[5], &ipaddr[12], 4); + in_mbox[6] = payload_size; + + mrb->pid = pid; + rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox); + + if (rval != QLA_SUCCESS) + goto exit_ping; + + return rval; +exit_ping: + kfree(mrb); + return rval; +} diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c new file mode 100644 index 000000000..cf52258ec --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -0,0 +1,1621 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" + +/** + * qla4xxx_copy_sense - copy sense data into cmd sense buffer + * @ha: Pointer to host adapter structure. + * @sts_entry: Pointer to status entry structure. + * @srb: Pointer to srb structure. + **/ +static void qla4xxx_copy_sense(struct scsi_qla_host *ha, + struct status_entry *sts_entry, + struct srb *srb) +{ + struct scsi_cmnd *cmd = srb->cmd; + uint16_t sense_len; + + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); + if (sense_len == 0) { + DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:" + " sense len 0\n", ha->host_no, + cmd->device->channel, cmd->device->id, + cmd->device->lun, __func__)); + ha->status_srb = NULL; + return; + } + /* Save total available sense length, + * not to exceed cmd's sense buffer size */ + sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); + srb->req_sense_ptr = cmd->sense_buffer; + srb->req_sense_len = sense_len; + + /* Copy sense from sts_entry pkt */ + sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN); + memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len); + + DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, " + "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, + cmd->device->channel, cmd->device->id, + cmd->device->lun, __func__, + sts_entry->senseData[2] & 0x0f, + sts_entry->senseData[7], + sts_entry->senseData[12], + sts_entry->senseData[13])); + + DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len)); + srb->flags |= SRB_GOT_SENSE; + + /* Update srb, in case a sts_cont pkt follows */ + srb->req_sense_ptr += sense_len; + srb->req_sense_len -= sense_len; + if (srb->req_sense_len != 0) + ha->status_srb = srb; + else + ha->status_srb = NULL; +} + +/** + * qla4xxx_status_cont_entry - Process a Status Continuations entry. + * @ha: SCSI driver HA context + * @sts_cont: Entry pointer + * + * Extended sense data. + */ +static void +qla4xxx_status_cont_entry(struct scsi_qla_host *ha, + struct status_cont_entry *sts_cont) +{ + struct srb *srb = ha->status_srb; + struct scsi_cmnd *cmd; + uint16_t sense_len; + + if (srb == NULL) + return; + + cmd = srb->cmd; + if (cmd == NULL) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned " + "back to OS srb=%p srb->state:%d\n", ha->host_no, + __func__, srb, srb->state)); + ha->status_srb = NULL; + return; + } + + /* Copy sense data. */ + sense_len = min_t(uint16_t, srb->req_sense_len, + IOCB_MAX_EXT_SENSEDATA_LEN); + memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len); + DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len)); + + srb->req_sense_ptr += sense_len; + srb->req_sense_len -= sense_len; + + /* Place command on done queue. */ + if (srb->req_sense_len == 0) { + kref_put(&srb->srb_ref, qla4xxx_srb_compl); + ha->status_srb = NULL; + } +} + +/** + * qla4xxx_status_entry - processes status IOCBs + * @ha: Pointer to host adapter structure. + * @sts_entry: Pointer to status entry structure. + **/ +static void qla4xxx_status_entry(struct scsi_qla_host *ha, + struct status_entry *sts_entry) +{ + uint8_t scsi_status; + struct scsi_cmnd *cmd; + struct srb *srb; + struct ddb_entry *ddb_entry; + uint32_t residual; + + srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); + if (!srb) { + ql4_printk(KERN_WARNING, ha, "%s invalid status entry: " + "handle=0x%0x, srb=%p\n", __func__, + sts_entry->handle, srb); + if (is_qla80XX(ha)) + set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); + else + set_bit(DPC_RESET_HA, &ha->dpc_flags); + return; + } + + cmd = srb->cmd; + if (cmd == NULL) { + DEBUG2(printk("scsi%ld: %s: Command already returned back to " + "OS pkt->handle=%d srb=%p srb->state:%d\n", + ha->host_no, __func__, sts_entry->handle, + srb, srb->state)); + ql4_printk(KERN_WARNING, ha, "Command is NULL:" + " already returned to OS (srb=%p)\n", srb); + return; + } + + ddb_entry = srb->ddb; + if (ddb_entry == NULL) { + cmd->result = DID_NO_CONNECT << 16; + goto status_entry_exit; + } + + residual = le32_to_cpu(sts_entry->residualByteCnt); + + /* Translate ISP error to a Linux SCSI error. */ + scsi_status = sts_entry->scsiStatus; + switch (sts_entry->completionStatus) { + case SCS_COMPLETE: + + if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { + cmd->result = DID_ERROR << 16; + break; + } + + if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { + scsi_set_resid(cmd, residual); + if (!scsi_status && ((scsi_bufflen(cmd) - residual) < + cmd->underflow)) { + + cmd->result = DID_ERROR << 16; + + DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " + "Mid-layer Data underrun0, " + "xferlen = 0x%x, " + "residual = 0x%x\n", ha->host_no, + cmd->device->channel, + cmd->device->id, + cmd->device->lun, __func__, + scsi_bufflen(cmd), residual)); + break; + } + } + + cmd->result = DID_OK << 16 | scsi_status; + + if (scsi_status != SAM_STAT_CHECK_CONDITION) + break; + + /* Copy Sense Data into sense buffer. */ + qla4xxx_copy_sense(ha, sts_entry, srb); + break; + + case SCS_INCOMPLETE: + /* Always set the status to DID_ERROR, since + * all conditions result in that status anyway */ + cmd->result = DID_ERROR << 16; + break; + + case SCS_RESET_OCCURRED: + DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n", + ha->host_no, cmd->device->channel, + cmd->device->id, cmd->device->lun, __func__)); + + cmd->result = DID_RESET << 16; + break; + + case SCS_ABORTED: + DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n", + ha->host_no, cmd->device->channel, + cmd->device->id, cmd->device->lun, __func__)); + + cmd->result = DID_RESET << 16; + break; + + case SCS_TIMEOUT: + DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n", + ha->host_no, cmd->device->channel, + cmd->device->id, cmd->device->lun)); + + cmd->result = DID_TRANSPORT_DISRUPTED << 16; + + /* + * Mark device missing so that we won't continue to send + * I/O to this device. We should get a ddb state change + * AEN soon. + */ + if (iscsi_is_session_online(ddb_entry->sess)) + qla4xxx_mark_device_missing(ddb_entry->sess); + break; + + case SCS_DATA_UNDERRUN: + case SCS_DATA_OVERRUN: + if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || + (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { + DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n", + ha->host_no, + cmd->device->channel, cmd->device->id, + cmd->device->lun, __func__)); + + cmd->result = DID_ERROR << 16; + break; + } + + scsi_set_resid(cmd, residual); + + if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) { + + /* Both the firmware and target reported UNDERRUN: + * + * MID-LAYER UNDERFLOW case: + * Some kernels do not properly detect midlayer + * underflow, so we manually check it and return + * ERROR if the minimum required data was not + * received. + * + * ALL OTHER cases: + * Fall thru to check scsi_status + */ + if (!scsi_status && (scsi_bufflen(cmd) - residual) < + cmd->underflow) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n", + ha->host_no, + cmd->device->channel, + cmd->device->id, + cmd->device->lun, __func__, + scsi_bufflen(cmd), + residual)); + + cmd->result = DID_ERROR << 16; + break; + } + + } else if (scsi_status != SAM_STAT_TASK_SET_FULL && + scsi_status != SAM_STAT_BUSY) { + + /* + * The firmware reports UNDERRUN, but the target does + * not report it: + * + * scsi_status | host_byte device_byte + * | (19:16) (7:0) + * ============= | ========= =========== + * TASK_SET_FULL | DID_OK scsi_status + * BUSY | DID_OK scsi_status + * ALL OTHERS | DID_ERROR scsi_status + * + * Note: If scsi_status is task set full or busy, + * then this else if would fall thru to check the + * scsi_status and return DID_OK. + */ + + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n", + ha->host_no, + cmd->device->channel, + cmd->device->id, + cmd->device->lun, __func__, + residual, + scsi_bufflen(cmd))); + + cmd->result = DID_ERROR << 16 | scsi_status; + goto check_scsi_status; + } + + cmd->result = DID_OK << 16 | scsi_status; + +check_scsi_status: + if (scsi_status == SAM_STAT_CHECK_CONDITION) + qla4xxx_copy_sense(ha, sts_entry, srb); + + break; + + case SCS_DEVICE_LOGGED_OUT: + case SCS_DEVICE_UNAVAILABLE: + DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE " + "state: 0x%x\n", ha->host_no, + cmd->device->channel, cmd->device->id, + cmd->device->lun, sts_entry->completionStatus)); + /* + * Mark device missing so that we won't continue to + * send I/O to this device. We should get a ddb + * state change AEN soon. + */ + if (iscsi_is_session_online(ddb_entry->sess)) + qla4xxx_mark_device_missing(ddb_entry->sess); + + cmd->result = DID_TRANSPORT_DISRUPTED << 16; + break; + + case SCS_QUEUE_FULL: + /* + * SCSI Mid-Layer handles device queue full + */ + cmd->result = DID_OK << 16 | sts_entry->scsiStatus; + DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected " + "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x," + " iResp=%02x\n", ha->host_no, cmd->device->id, + cmd->device->lun, __func__, + sts_entry->completionStatus, + sts_entry->scsiStatus, sts_entry->state_flags, + sts_entry->iscsiFlags, + sts_entry->iscsiResponse)); + break; + + default: + cmd->result = DID_ERROR << 16; + break; + } + +status_entry_exit: + + /* complete the request, if not waiting for status_continuation pkt */ + srb->cc_stat = sts_entry->completionStatus; + if (ha->status_srb == NULL) + kref_put(&srb->srb_ref, qla4xxx_srb_compl); +} + +/** + * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C) + * @ha: Pointer to host adapter structure. + * @sts_entry: Pointer to status entry structure. + **/ +static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha, + struct passthru_status *sts_entry) +{ + struct iscsi_task *task; + struct ddb_entry *ddb_entry; + struct ql4_task_data *task_data; + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + itt_t itt; + uint32_t fw_ddb_index; + + itt = sts_entry->handle; + fw_ddb_index = le32_to_cpu(sts_entry->target); + + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); + + if (ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n", + __func__, sts_entry->target); + return; + } + + cls_conn = ddb_entry->conn; + conn = cls_conn->dd_data; + spin_lock(&conn->session->back_lock); + task = iscsi_itt_to_task(conn, itt); + spin_unlock(&conn->session->back_lock); + + if (task == NULL) { + ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__); + return; + } + + task_data = task->dd_data; + memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status)); + ha->iocb_cnt -= task_data->iocb_req_cnt; + queue_work(ha->task_wq, &task_data->task_work); +} + +static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha, + uint32_t index) +{ + struct mrb *mrb = NULL; + + /* validate handle and remove from active array */ + if (index >= MAX_MRB) + return mrb; + + mrb = ha->active_mrb_array[index]; + ha->active_mrb_array[index] = NULL; + if (!mrb) + return mrb; + + /* update counters */ + ha->iocb_cnt -= mrb->iocb_cnt; + + return mrb; +} + +static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha, + struct mbox_status_iocb *mbox_sts_entry) +{ + struct mrb *mrb; + uint32_t status; + uint32_t data_size; + + mrb = qla4xxx_del_mrb_from_active_array(ha, + le32_to_cpu(mbox_sts_entry->handle)); + + if (mrb == NULL) { + ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__, + mbox_sts_entry->handle); + return; + } + + switch (mrb->mbox_cmd) { + case MBOX_CMD_PING: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, " + "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n", + __func__, mrb->mbox_cmd, + mbox_sts_entry->out_mbox[0], + mbox_sts_entry->out_mbox[6])); + + if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE) + status = ISCSI_PING_SUCCESS; + else + status = mbox_sts_entry->out_mbox[6]; + + data_size = sizeof(mbox_sts_entry->out_mbox); + + qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size, + (uint8_t *) mbox_sts_entry->out_mbox); + break; + + default: + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = " + "0x%x\n", __func__, mrb->mbox_cmd)); + } + + kfree(mrb); + return; +} + +/** + * qla4xxx_process_response_queue - process response queue completions + * @ha: Pointer to host adapter structure. + * + * This routine process response queue completions in interrupt context. + * Hardware_lock locked upon entry + **/ +void qla4xxx_process_response_queue(struct scsi_qla_host *ha) +{ + struct srb *srb = NULL; + struct status_entry *sts_entry; + + /* Process all responses from response queue */ + while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) { + sts_entry = (struct status_entry *) ha->response_ptr; + + /* Advance pointers for next entry */ + if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) { + ha->response_out = 0; + ha->response_ptr = ha->response_ring; + } else { + ha->response_out++; + ha->response_ptr++; + } + + /* process entry */ + switch (sts_entry->hdr.entryType) { + case ET_STATUS: + /* Common status */ + qla4xxx_status_entry(ha, sts_entry); + break; + + case ET_PASSTHRU_STATUS: + if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU) + qla4xxx_passthru_status_entry(ha, + (struct passthru_status *)sts_entry); + else + ql4_printk(KERN_ERR, ha, + "%s: Invalid status received\n", + __func__); + + break; + + case ET_STATUS_CONTINUATION: + qla4xxx_status_cont_entry(ha, + (struct status_cont_entry *) sts_entry); + break; + + case ET_COMMAND: + /* ISP device queue is full. Command not + * accepted by ISP. Queue command for + * later */ + + srb = qla4xxx_del_from_active_array(ha, + le32_to_cpu(sts_entry-> + handle)); + if (srb == NULL) + goto exit_prq_invalid_handle; + + DEBUG2(printk("scsi%ld: %s: FW device queue full, " + "srb %p\n", ha->host_no, __func__, srb)); + + /* ETRY normally by sending it back with + * DID_BUS_BUSY */ + srb->cmd->result = DID_BUS_BUSY << 16; + kref_put(&srb->srb_ref, qla4xxx_srb_compl); + break; + + case ET_CONTINUE: + /* Just throw away the continuation entries */ + DEBUG2(printk("scsi%ld: %s: Continuation entry - " + "ignoring\n", ha->host_no, __func__)); + break; + + case ET_MBOX_STATUS: + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox status IOCB\n", __func__)); + qla4xxx_mbox_status_entry(ha, + (struct mbox_status_iocb *)sts_entry); + break; + + default: + /* + * Invalid entry in response queue, reset RISC + * firmware. + */ + DEBUG2(printk("scsi%ld: %s: Invalid entry %x in " + "response queue \n", ha->host_no, + __func__, + sts_entry->hdr.entryType)); + goto exit_prq_error; + } + ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED; + wmb(); + } + + /* + * Tell ISP we're done with response(s). This also clears the interrupt. + */ + ha->isp_ops->complete_iocb(ha); + + return; + +exit_prq_invalid_handle: + DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n", + ha->host_no, __func__, srb, sts_entry->hdr.entryType, + sts_entry->completionStatus)); + +exit_prq_error: + ha->isp_ops->complete_iocb(ha); + set_bit(DPC_RESET_HA, &ha->dpc_flags); +} + +/** + * qla4_83xx_loopback_in_progress: Is loopback in progress? + * @ha: Pointer to host adapter structure. + * returns: 1 = loopback in progress, 0 = loopback not in progress + **/ +static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha) +{ + int rval = 1; + + if (is_qla8032(ha) || is_qla8042(ha)) { + if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) || + (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Loopback diagnostics in progress\n", + __func__)); + rval = 1; + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Loopback diagnostics not in progress\n", + __func__)); + rval = 0; + } + } + + return rval; +} + +static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha, + uint32_t ipaddr_idx, + uint32_t ipaddr_fw_state) +{ + uint8_t ipaddr_state; + uint8_t ip_idx; + + ip_idx = ipaddr_idx & 0xF; + ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state); + + switch (ip_idx) { + case 0: + ha->ip_config.ipv4_addr_state = ipaddr_state; + break; + case 1: + ha->ip_config.ipv6_link_local_state = ipaddr_state; + break; + case 2: + ha->ip_config.ipv6_addr0_state = ipaddr_state; + break; + case 3: + ha->ip_config.ipv6_addr1_state = ipaddr_state; + break; + default: + ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n", + __func__, ip_idx); + } +} + +static void qla4xxx_default_router_changed(struct scsi_qla_host *ha, + uint32_t *mbox_sts) +{ + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0], + &mbox_sts[2], sizeof(uint32_t)); + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1], + &mbox_sts[3], sizeof(uint32_t)); + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2], + &mbox_sts[4], sizeof(uint32_t)); + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3], + &mbox_sts[5], sizeof(uint32_t)); +} + +/** + * qla4xxx_isr_decode_mailbox - decodes mailbox status + * @ha: Pointer to host adapter structure. + * @mbox_status: Mailbox status. + * + * This routine decodes the mailbox status during the ISR. + * Hardware_lock locked upon entry. runs in interrupt context. + **/ +static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, + uint32_t mbox_status) +{ + int i; + uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; + __le32 __iomem *mailbox_out; + uint32_t opcode = 0; + + if (is_qla8032(ha) || is_qla8042(ha)) + mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0]; + else if (is_qla8022(ha)) + mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0]; + else + mailbox_out = &ha->reg->mailbox[0]; + + if ((mbox_status == MBOX_STS_BUSY) || + (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || + (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) { + ha->mbox_status[0] = mbox_status; + + if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { + /* + * Copy all mailbox registers to a temporary + * location and set mailbox command done flag + */ + for (i = 0; i < ha->mbox_status_count; i++) + ha->mbox_status[i] = readl(&mailbox_out[i]); + + set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); + + if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) + complete(&ha->mbx_intr_comp); + } + } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { + for (i = 0; i < MBOX_AEN_REG_COUNT; i++) + mbox_sts[i] = readl(&mailbox_out[i]); + + /* Immediately process the AENs that don't require much work. + * Only queue the database_changed AENs */ + if (ha->aen_log.count < MAX_AEN_ENTRIES) { + for (i = 0; i < MBOX_AEN_REG_COUNT; i++) + ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = + mbox_sts[i]; + ha->aen_log.count++; + } + switch (mbox_status) { + case MBOX_ASTS_SYSTEM_ERROR: + /* Log Mailbox registers */ + ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); + qla4xxx_dump_registers(ha); + + if ((is_qla8022(ha) && ql4xdontresethba) || + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { + DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", + ha->host_no, __func__)); + } else { + set_bit(AF_GET_CRASH_RECORD, &ha->flags); + set_bit(DPC_RESET_HA, &ha->dpc_flags); + } + break; + + case MBOX_ASTS_REQUEST_TRANSFER_ERROR: + case MBOX_ASTS_RESPONSE_TRANSFER_ERROR: + case MBOX_ASTS_NVRAM_INVALID: + case MBOX_ASTS_IP_ADDRESS_CHANGED: + case MBOX_ASTS_DHCP_LEASE_EXPIRED: + DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " + "Reset HA\n", ha->host_no, mbox_status)); + if (is_qla80XX(ha)) + set_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + else + set_bit(DPC_RESET_HA, &ha->dpc_flags); + break; + + case MBOX_ASTS_LINK_UP: + set_bit(AF_LINK_UP, &ha->flags); + if (test_bit(AF_INIT_DONE, &ha->flags)) + set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); + + ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__); + qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP, + sizeof(mbox_sts), + (uint8_t *) mbox_sts); + + if ((is_qla8032(ha) || is_qla8042(ha)) && + ha->notify_link_up_comp) + complete(&ha->link_up_comp); + + break; + + case MBOX_ASTS_LINK_DOWN: + clear_bit(AF_LINK_UP, &ha->flags); + if (test_bit(AF_INIT_DONE, &ha->flags)) { + set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); + qla4xxx_wake_dpc(ha); + } + + ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__); + qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN, + sizeof(mbox_sts), + (uint8_t *) mbox_sts); + break; + + case MBOX_ASTS_HEARTBEAT: + ha->seconds_since_last_heartbeat = 0; + break; + + case MBOX_ASTS_DHCP_LEASE_ACQUIRED: + DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE " + "ACQUIRED\n", ha->host_no, mbox_status)); + set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); + break; + + case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM: + case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target + * mode + * only */ + case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ + case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: + case MBOX_ASTS_SUBNET_STATE_CHANGE: + case MBOX_ASTS_DUPLICATE_IP: + /* No action */ + DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, + mbox_status)); + break; + + case MBOX_ASTS_IP_ADDR_STATE_CHANGED: + printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, " + "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0], + mbox_sts[2], mbox_sts[3]); + + qla4xxx_update_ipaddr_state(ha, mbox_sts[5], + mbox_sts[3]); + /* mbox_sts[2] = Old ACB state + * mbox_sts[3] = new ACB state */ + if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) && + ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) || + (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) { + set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); + } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) && + (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) { + if (is_qla80XX(ha)) + set_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + else + set_bit(DPC_RESET_HA, &ha->dpc_flags); + } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n", + ha->host_no, __func__); + } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) { + complete(&ha->disable_acb_comp); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n", + ha->host_no, __func__); + } + break; + + case MBOX_ASTS_IPV6_LINK_MTU_CHANGE: + case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED: + case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED: + /* No action */ + DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n", + ha->host_no, mbox_status)); + break; + + case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, IPv6 ERROR, " + "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + break; + + case MBOX_ASTS_MAC_ADDRESS_CHANGED: + case MBOX_ASTS_DNS: + /* No action */ + DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " + "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", + ha->host_no, mbox_sts[0], + mbox_sts[1], mbox_sts[2])); + break; + + case MBOX_ASTS_SELF_TEST_FAILED: + case MBOX_ASTS_LOGIN_FAILED: + /* No action */ + DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " + "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3])); + break; + + case MBOX_ASTS_DATABASE_CHANGED: + /* Queue AEN information and process it in the DPC + * routine */ + if (ha->aen_q_count > 0) { + + /* decrement available counter */ + ha->aen_q_count--; + + for (i = 0; i < MBOX_AEN_REG_COUNT; i++) + ha->aen_q[ha->aen_in].mbox_sts[i] = + mbox_sts[i]; + + /* print debug message */ + DEBUG2(printk("scsi%ld: AEN[%d] %04x queued " + "mb1:0x%x mb2:0x%x mb3:0x%x " + "mb4:0x%x mb5:0x%x\n", + ha->host_no, ha->aen_in, + mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], + mbox_sts[4], mbox_sts[5])); + + /* advance pointer */ + ha->aen_in++; + if (ha->aen_in == MAX_AEN_ENTRIES) + ha->aen_in = 0; + + /* The DPC routine will process the aen */ + set_bit(DPC_AEN, &ha->dpc_flags); + } else { + DEBUG2(printk("scsi%ld: %s: aen %04x, queue " + "overflowed! AEN LOST!!\n", + ha->host_no, __func__, + mbox_sts[0])); + + DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", + ha->host_no)); + + for (i = 0; i < MAX_AEN_ENTRIES; i++) { + DEBUG2(printk("AEN[%d] %04x %04x %04x " + "%04x\n", i, mbox_sts[0], + mbox_sts[1], mbox_sts[2], + mbox_sts[3])); + } + } + break; + + case MBOX_ASTS_TXSCVR_INSERTED: + DEBUG2(printk(KERN_WARNING + "scsi%ld: AEN %04x Transceiver" + " inserted\n", ha->host_no, mbox_sts[0])); + break; + + case MBOX_ASTS_TXSCVR_REMOVED: + DEBUG2(printk(KERN_WARNING + "scsi%ld: AEN %04x Transceiver" + " removed\n", ha->host_no, mbox_sts[0])); + break; + + case MBOX_ASTS_IDC_REQUEST_NOTIFICATION: + if (is_qla8032(ha) || is_qla8042(ha)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", + ha->host_no, mbox_sts[0], + mbox_sts[1], mbox_sts[2], + mbox_sts[3], mbox_sts[4])); + opcode = mbox_sts[1] >> 16; + if ((opcode == MBOX_CMD_SET_PORT_CONFIG) || + (opcode == MBOX_CMD_PORT_RESET)) { + set_bit(DPC_POST_IDC_ACK, + &ha->dpc_flags); + ha->idc_info.request_desc = mbox_sts[1]; + ha->idc_info.info1 = mbox_sts[2]; + ha->idc_info.info2 = mbox_sts[3]; + ha->idc_info.info3 = mbox_sts[4]; + qla4xxx_wake_dpc(ha); + } + } + break; + + case MBOX_ASTS_IDC_COMPLETE: + if (is_qla8032(ha) || is_qla8042(ha)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", + ha->host_no, mbox_sts[0], + mbox_sts[1], mbox_sts[2], + mbox_sts[3], mbox_sts[4])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi:%ld: AEN %04x IDC Complete notification\n", + ha->host_no, mbox_sts[0])); + + opcode = mbox_sts[1] >> 16; + if (ha->notify_idc_comp) + complete(&ha->idc_comp); + + if ((opcode == MBOX_CMD_SET_PORT_CONFIG) || + (opcode == MBOX_CMD_PORT_RESET)) + ha->idc_info.info2 = mbox_sts[3]; + + if (qla4_83xx_loopback_in_progress(ha)) { + set_bit(AF_LOOPBACK, &ha->flags); + } else { + clear_bit(AF_LOOPBACK, &ha->flags); + if (ha->saved_acb) + set_bit(DPC_RESTORE_ACB, + &ha->dpc_flags); + } + qla4xxx_wake_dpc(ha); + } + break; + + case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x Received IPv6 default router changed notification\n", + ha->host_no, mbox_sts[0])); + qla4xxx_default_router_changed(ha, mbox_sts); + break; + + case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n", + ha->host_no, mbox_sts[0])); + /* new IDC timeout */ + ha->idc_extend_tmo = mbox_sts[1]; + break; + + case MBOX_ASTS_INITIALIZATION_FAILED: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n", + ha->host_no, mbox_sts[0], + mbox_sts[3])); + break; + + case MBOX_ASTS_SYSTEM_WARNING_EVENT: + DEBUG2(ql4_printk(KERN_WARNING, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + break; + + case MBOX_ASTS_DCBX_CONF_CHANGE: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x Received DCBX configuration changed notification\n", + ha->host_no, mbox_sts[0])); + break; + + default: + DEBUG2(printk(KERN_WARNING + "scsi%ld: AEN %04x UNKNOWN\n", + ha->host_no, mbox_sts[0])); + break; + } + } else { + DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n", + ha->host_no, mbox_status)); + + ha->mbox_status[0] = mbox_status; + } +} + +void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha, + uint32_t intr_status) +{ + /* Process mailbox/asynch event interrupt.*/ + if (intr_status) { + qla4xxx_isr_decode_mailbox(ha, + readl(&ha->qla4_83xx_reg->mailbox_out[0])); + /* clear the interrupt */ + writel(0, &ha->qla4_83xx_reg->risc_intr); + } else { + qla4xxx_process_response_queue(ha); + } + + /* clear the interrupt */ + writel(0, &ha->qla4_83xx_reg->mb_int_mask); +} + +/** + * qla4_82xx_interrupt_service_routine - isr + * @ha: pointer to host adapter structure. + * @intr_status: Local interrupt status/type. + * + * This is the main interrupt service routine. + * hardware_lock locked upon entry. runs in interrupt context. + **/ +void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha, + uint32_t intr_status) +{ + /* Process response queue interrupt. */ + if ((intr_status & HSRX_RISC_IOCB_INT) && + test_bit(AF_INIT_DONE, &ha->flags)) + qla4xxx_process_response_queue(ha); + + /* Process mailbox/asynch event interrupt.*/ + if (intr_status & HSRX_RISC_MB_INT) + qla4xxx_isr_decode_mailbox(ha, + readl(&ha->qla4_82xx_reg->mailbox_out[0])); + + /* clear the interrupt */ + writel(0, &ha->qla4_82xx_reg->host_int); + readl(&ha->qla4_82xx_reg->host_int); +} + +/** + * qla4xxx_interrupt_service_routine - isr + * @ha: pointer to host adapter structure. + * @intr_status: Local interrupt status/type. + * + * This is the main interrupt service routine. + * hardware_lock locked upon entry. runs in interrupt context. + **/ +void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, + uint32_t intr_status) +{ + /* Process response queue interrupt. */ + if (intr_status & CSR_SCSI_COMPLETION_INTR) + qla4xxx_process_response_queue(ha); + + /* Process mailbox/asynch event interrupt.*/ + if (intr_status & CSR_SCSI_PROCESSOR_INTR) { + qla4xxx_isr_decode_mailbox(ha, + readl(&ha->reg->mailbox[0])); + + /* Clear Mailbox Interrupt */ + writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + } +} + +/** + * qla4_82xx_spurious_interrupt - processes spurious interrupt + * @ha: pointer to host adapter structure. + * @reqs_count: . + * + **/ +static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha, + uint8_t reqs_count) +{ + if (reqs_count) + return; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n")); + if (is_qla8022(ha)) { + writel(0, &ha->qla4_82xx_reg->host_int); + if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled) + qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, + 0xfbff); + } + ha->spurious_int_count++; +} + +/** + * qla4xxx_intr_handler - hardware interrupt handler. + * @irq: Unused + * @dev_id: Pointer to host adapter structure + **/ +irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha; + uint32_t intr_status; + unsigned long flags = 0; + uint8_t reqs_count = 0; + + ha = (struct scsi_qla_host *) dev_id; + if (!ha) { + DEBUG2(printk(KERN_INFO + "qla4xxx: Interrupt with NULL host ptr\n")); + return IRQ_NONE; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + + ha->isr_count++; + /* + * Repeatedly service interrupts up to a maximum of + * MAX_REQS_SERVICED_PER_INTR + */ + while (1) { + /* + * Read interrupt status + */ + if (ha->isp_ops->rd_shdw_rsp_q_in(ha) != + ha->response_out) + intr_status = CSR_SCSI_COMPLETION_INTR; + else + intr_status = readl(&ha->reg->ctrl_status); + + if ((intr_status & + (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) { + if (reqs_count == 0) + ha->spurious_int_count++; + break; + } + + if (intr_status & CSR_FATAL_ERROR) { + DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, " + "Status 0x%04x\n", ha->host_no, + readl(isp_port_error_status (ha)))); + + /* Issue Soft Reset to clear this error condition. + * This will prevent the RISC from repeatedly + * interrupting the driver; thus, allowing the DPC to + * get scheduled to continue error recovery. + * NOTE: Disabling RISC interrupts does not work in + * this case, as CSR_FATAL_ERROR overrides + * CSR_SCSI_INTR_ENABLE */ + if ((readl(&ha->reg->ctrl_status) & + CSR_SCSI_RESET_INTR) == 0) { + writel(set_rmask(CSR_SOFT_RESET), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + } + + writel(set_rmask(CSR_FATAL_ERROR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + + __qla4xxx_disable_intrs(ha); + + set_bit(DPC_RESET_HA, &ha->dpc_flags); + + break; + } else if (intr_status & CSR_SCSI_RESET_INTR) { + clear_bit(AF_ONLINE, &ha->flags); + __qla4xxx_disable_intrs(ha); + + writel(set_rmask(CSR_SCSI_RESET_INTR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + + if (!test_bit(AF_HA_REMOVAL, &ha->flags)) + set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); + + break; + } else if (intr_status & INTR_PENDING) { + ha->isp_ops->interrupt_service_routine(ha, intr_status); + ha->total_io_count++; + if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) + break; + } + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +/** + * qla4_82xx_intr_handler - hardware interrupt handler. + * @irq: Unused + * @dev_id: Pointer to host adapter structure + **/ +irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + uint32_t intr_status; + uint32_t status; + unsigned long flags = 0; + uint8_t reqs_count = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + + ha->isr_count++; + status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR); + if (!(status & ha->nx_legacy_intr.int_vec_bit)) + return IRQ_NONE; + + status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG); + if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) { + DEBUG7(ql4_printk(KERN_INFO, ha, + "%s legacy Int not triggered\n", __func__)); + return IRQ_NONE; + } + + /* clear the interrupt */ + qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); + + /* read twice to ensure write is flushed */ + qla4_82xx_rd_32(ha, ISR_INT_VECTOR); + qla4_82xx_rd_32(ha, ISR_INT_VECTOR); + + spin_lock_irqsave(&ha->hardware_lock, flags); + while (1) { + if (!(readl(&ha->qla4_82xx_reg->host_int) & + ISRX_82XX_RISC_INT)) { + qla4_82xx_spurious_interrupt(ha, reqs_count); + break; + } + intr_status = readl(&ha->qla4_82xx_reg->host_status); + if ((intr_status & + (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { + qla4_82xx_spurious_interrupt(ha, reqs_count); + break; + } + + ha->isp_ops->interrupt_service_routine(ha, intr_status); + + /* Enable Interrupt */ + qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); + + if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) + break; + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; +} + +#define LEG_INT_PTR_B31 (1 << 31) +#define LEG_INT_PTR_B30 (1 << 30) +#define PF_BITS_MASK (0xF << 16) + +/** + * qla4_83xx_intr_handler - hardware interrupt handler. + * @irq: Unused + * @dev_id: Pointer to host adapter structure + **/ +irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + uint32_t leg_int_ptr = 0; + unsigned long flags = 0; + + ha->isr_count++; + leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr); + + /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ + if (!(leg_int_ptr & LEG_INT_PTR_B31)) { + DEBUG7(ql4_printk(KERN_ERR, ha, + "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n", + __func__)); + return IRQ_NONE; + } + + /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ + if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) { + DEBUG7(ql4_printk(KERN_ERR, ha, + "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n", + __func__, (leg_int_ptr & PF_BITS_MASK), + ha->pf_bit)); + return IRQ_NONE; + } + + /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger + * Control register and poll till Legacy Interrupt Pointer register + * bit30 is 0. + */ + writel(0, &ha->qla4_83xx_reg->leg_int_trig); + do { + leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr); + if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) + break; + } while (leg_int_ptr & LEG_INT_PTR_B30); + + spin_lock_irqsave(&ha->hardware_lock, flags); + leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr); + ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +irqreturn_t +qla4_8xxx_msi_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha; + + ha = (struct scsi_qla_host *) dev_id; + if (!ha) { + DEBUG2(printk(KERN_INFO + "qla4xxx: MSIX: Interrupt with NULL host ptr\n")); + return IRQ_NONE; + } + + ha->isr_count++; + /* clear the interrupt */ + qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); + + /* read twice to ensure write is flushed */ + qla4_82xx_rd_32(ha, ISR_INT_VECTOR); + qla4_82xx_rd_32(ha, ISR_INT_VECTOR); + + return qla4_8xxx_default_intr_handler(irq, dev_id); +} + +static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + unsigned long flags; + uint32_t ival = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + ival = readl(&ha->qla4_83xx_reg->risc_intr); + if (ival == 0) { + ql4_printk(KERN_INFO, ha, + "%s: It is a spurious mailbox interrupt!\n", + __func__); + ival = readl(&ha->qla4_83xx_reg->mb_int_mask); + ival &= ~INT_MASK_FW_MB; + writel(ival, &ha->qla4_83xx_reg->mb_int_mask); + goto exit; + } + + qla4xxx_isr_decode_mailbox(ha, + readl(&ha->qla4_83xx_reg->mailbox_out[0])); + writel(0, &ha->qla4_83xx_reg->risc_intr); + ival = readl(&ha->qla4_83xx_reg->mb_int_mask); + ival &= ~INT_MASK_FW_MB; + writel(ival, &ha->qla4_83xx_reg->mb_int_mask); + ha->isr_count++; +exit: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; +} + +/** + * qla4_8xxx_default_intr_handler - hardware interrupt handler. + * @irq: Unused + * @dev_id: Pointer to host adapter structure + * + * This interrupt handler is called directly for MSI-X, and + * called indirectly for MSI. + **/ +irqreturn_t +qla4_8xxx_default_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + unsigned long flags; + uint32_t intr_status; + uint8_t reqs_count = 0; + + if (is_qla8032(ha) || is_qla8042(ha)) { + qla4_83xx_mailbox_intr_handler(irq, dev_id); + } else { + spin_lock_irqsave(&ha->hardware_lock, flags); + while (1) { + if (!(readl(&ha->qla4_82xx_reg->host_int) & + ISRX_82XX_RISC_INT)) { + qla4_82xx_spurious_interrupt(ha, reqs_count); + break; + } + + intr_status = readl(&ha->qla4_82xx_reg->host_status); + if ((intr_status & + (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { + qla4_82xx_spurious_interrupt(ha, reqs_count); + break; + } + + ha->isp_ops->interrupt_service_routine(ha, intr_status); + + if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) + break; + } + ha->isr_count++; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + return IRQ_HANDLED; +} + +irqreturn_t +qla4_8xxx_msix_rsp_q(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + unsigned long flags; + int intr_status; + uint32_t ival = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (is_qla8032(ha) || is_qla8042(ha)) { + ival = readl(&ha->qla4_83xx_reg->iocb_int_mask); + if (ival == 0) { + ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n", + __func__); + goto exit_msix_rsp_q; + } + qla4xxx_process_response_queue(ha); + writel(0, &ha->qla4_83xx_reg->iocb_int_mask); + } else { + intr_status = readl(&ha->qla4_82xx_reg->host_status); + if (intr_status & HSRX_RISC_IOCB_INT) { + qla4xxx_process_response_queue(ha); + writel(0, &ha->qla4_82xx_reg->host_int); + } else { + ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n", + __func__); + goto exit_msix_rsp_q; + } + } + ha->isr_count++; +exit_msix_rsp_q: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; +} + +/** + * qla4xxx_process_aen - processes AENs generated by firmware + * @ha: pointer to host adapter structure. + * @process_aen: type of AENs to process + * + * Processes specific types of Asynchronous Events generated by firmware. + * The type of AENs to process is specified by process_aen and can be + * PROCESS_ALL_AENS 0 + * FLUSH_DDB_CHANGED_AENS 1 + * RELOGIN_DDB_CHANGED_AENS 2 + **/ +void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) +{ + uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; + struct aen *aen; + int i; + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + while (ha->aen_out != ha->aen_in) { + aen = &ha->aen_q[ha->aen_out]; + /* copy aen information to local structure */ + for (i = 0; i < MBOX_AEN_REG_COUNT; i++) + mbox_sts[i] = aen->mbox_sts[i]; + + ha->aen_q_count++; + ha->aen_out++; + + if (ha->aen_out == MAX_AEN_ENTRIES) + ha->aen_out = 0; + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x" + " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no, + (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)), + mbox_sts[0], mbox_sts[1], mbox_sts[2], + mbox_sts[3], mbox_sts[4])); + + switch (mbox_sts[0]) { + case MBOX_ASTS_DATABASE_CHANGED: + switch (process_aen) { + case FLUSH_DDB_CHANGED_AENS: + DEBUG2(printk("scsi%ld: AEN[%d] %04x, index " + "[%d] state=%04x FLUSHED!\n", + ha->host_no, ha->aen_out, + mbox_sts[0], mbox_sts[2], + mbox_sts[3])); + break; + case PROCESS_ALL_AENS: + default: + /* Specific device. */ + if (mbox_sts[1] == 1) + qla4xxx_process_ddb_changed(ha, + mbox_sts[2], mbox_sts[3], + mbox_sts[4]); + break; + } + } + spin_lock_irqsave(&ha->hardware_lock, flags); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +int qla4xxx_request_irqs(struct scsi_qla_host *ha) +{ + int ret = 0; + int rval = QLA_ERROR; + + if (is_qla40XX(ha)) + goto try_intx; + + if (ql4xenablemsix == 2) { + /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */ + if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n", + __func__, ha->pdev->device); + goto try_intx; + } + goto try_msi; + } + + if (ql4xenablemsix == 0 || ql4xenablemsix != 1) + goto try_intx; + + /* Trying MSI-X */ + ret = qla4_8xxx_enable_msix(ha); + if (!ret) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "MSI-X: Enabled (0x%X).\n", ha->revision_id)); + goto irq_attached; + } else { + if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n", + __func__, ha->pdev->device, ret); + goto try_intx; + } + } + + ql4_printk(KERN_WARNING, ha, + "MSI-X: Falling back-to MSI mode -- %d.\n", ret); + +try_msi: + /* Trying MSI */ + ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); + if (ret > 0) { + ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, + 0, DRIVER_NAME, ha); + if (!ret) { + DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); + goto irq_attached; + } else { + ql4_printk(KERN_WARNING, ha, + "MSI: Failed to reserve interrupt %d " + "already in use.\n", ha->pdev->irq); + pci_free_irq_vectors(ha->pdev); + } + } + +try_intx: + if (is_qla8022(ha)) { + ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n", + __func__); + goto irq_not_attached; + } + + /* Trying INTx */ + ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, + IRQF_SHARED, DRIVER_NAME, ha); + if (!ret) { + DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); + goto irq_attached; + + } else { + ql4_printk(KERN_WARNING, ha, + "INTx: Failed to reserve interrupt %d already in" + " use.\n", ha->pdev->irq); + goto irq_not_attached; + } + +irq_attached: + set_bit(AF_IRQ_ATTACHED, &ha->flags); + ha->host->irq = ha->pdev->irq; + ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", + __func__, ha->pdev->irq); + rval = QLA_SUCCESS; +irq_not_attached: + return rval; +} + +void qla4xxx_free_irqs(struct scsi_qla_host *ha) +{ + if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) + return; + + if (ha->pdev->msix_enabled) + free_irq(pci_irq_vector(ha->pdev, 1), ha); + free_irq(pci_irq_vector(ha->pdev, 0), ha); + pci_free_irq_vectors(ha->pdev); +} diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c new file mode 100644 index 000000000..249f1d702 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -0,0 +1,2451 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#include +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" +#include "ql4_version.h" + +void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, + int in_count) +{ + int i; + + /* Load all mailbox registers, except mailbox 0. */ + for (i = 1; i < in_count; i++) + writel(mbx_cmd[i], &ha->reg->mailbox[i]); + + /* Wakeup firmware */ + writel(mbx_cmd[0], &ha->reg->mailbox[0]); + readl(&ha->reg->mailbox[0]); + writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); +} + +void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count) +{ + int intr_status; + + intr_status = readl(&ha->reg->ctrl_status); + if (intr_status & INTR_PENDING) { + /* + * Service the interrupt. + * The ISR will save the mailbox status registers + * to a temporary storage location in the adapter structure. + */ + ha->mbox_status_count = out_count; + ha->isp_ops->interrupt_service_routine(ha, intr_status); + } +} + +/** + * qla4xxx_is_intr_poll_mode - Are we allowed to poll for interrupts? + * @ha: Pointer to host adapter structure. + * returns: 1=polling mode, 0=non-polling mode + **/ +static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha) +{ + int rval = 1; + + if (is_qla8032(ha) || is_qla8042(ha)) { + if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && + test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) + rval = 0; + } else { + if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && + test_bit(AF_INTERRUPTS_ON, &ha->flags) && + test_bit(AF_ONLINE, &ha->flags) && + !test_bit(AF_HA_REMOVAL, &ha->flags)) + rval = 0; + } + + return rval; +} + +/** + * qla4xxx_mailbox_command - issues mailbox commands + * @ha: Pointer to host adapter structure. + * @inCount: number of mailbox registers to load. + * @outCount: number of mailbox registers to return. + * @mbx_cmd: data pointer for mailbox in registers. + * @mbx_sts: data pointer for mailbox out registers. + * + * This routine issue mailbox commands and waits for completion. + * If outCount is 0, this routine completes successfully WITHOUT waiting + * for the mailbox command to complete. + **/ +int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, + uint8_t outCount, uint32_t *mbx_cmd, + uint32_t *mbx_sts) +{ + int status = QLA_ERROR; + uint8_t i; + u_long wait_count; + unsigned long flags = 0; + uint32_t dev_state; + + /* Make sure that pointers are valid */ + if (!mbx_cmd || !mbx_sts) { + DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts " + "pointer\n", ha->host_no, __func__)); + return status; + } + + if (is_qla40XX(ha)) { + if (test_bit(AF_HA_REMOVAL, &ha->flags)) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " + "prematurely completing mbx cmd as " + "adapter removal detected\n", + ha->host_no, __func__)); + return status; + } + } + + if ((is_aer_supported(ha)) && + (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { + DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " + "timeout MBX Exiting.\n", ha->host_no, __func__)); + return status; + } + + /* Mailbox code active */ + wait_count = MBOX_TOV * 100; + + while (wait_count--) { + mutex_lock(&ha->mbox_sem); + if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) { + set_bit(AF_MBOX_COMMAND, &ha->flags); + mutex_unlock(&ha->mbox_sem); + break; + } + mutex_unlock(&ha->mbox_sem); + if (!wait_count) { + DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n", + ha->host_no, __func__)); + return status; + } + msleep(10); + } + + if (is_qla80XX(ha)) { + if (test_bit(AF_FW_RECOVERY, &ha->flags)) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n", + ha->host_no, __func__)); + goto mbox_exit; + } + /* Do not send any mbx cmd if h/w is in failed state*/ + ha->isp_ops->idc_lock(ha); + dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); + ha->isp_ops->idc_unlock(ha); + if (dev_state == QLA8XXX_DEV_FAILED) { + ql4_printk(KERN_WARNING, ha, + "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n", + ha->host_no, __func__); + goto mbox_exit; + } + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + + ha->mbox_status_count = outCount; + for (i = 0; i < outCount; i++) + ha->mbox_status[i] = 0; + + /* Queue the mailbox command to the firmware */ + ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Wait for completion */ + + /* + * If we don't want status, don't wait for the mailbox command to + * complete. For example, MBOX_CMD_RESET_FW doesn't return status, + * you must poll the inbound Interrupt Mask for completion. + */ + if (outCount == 0) { + status = QLA_SUCCESS; + goto mbox_exit; + } + + /* + * Wait for completion: Poll or completion queue + */ + if (qla4xxx_is_intr_poll_mode(ha)) { + /* Poll for command to complete */ + wait_count = jiffies + MBOX_TOV * HZ; + while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) { + if (time_after_eq(jiffies, wait_count)) + break; + /* + * Service the interrupt. + * The ISR will save the mailbox status registers + * to a temporary storage location in the adapter + * structure. + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->isp_ops->process_mailbox_interrupt(ha, outCount); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + msleep(10); + } + } else { + /* Do not poll for completion. Use completion queue */ + set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); + wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); + clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); + } + + /* Check for mailbox timeout. */ + if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) { + if (is_qla80XX(ha) && + test_bit(AF_FW_RECOVERY, &ha->flags)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: prematurely completing mbx cmd as " + "firmware recovery detected\n", + ha->host_no, __func__)); + goto mbox_exit; + } + ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n", + ha->host_no, mbx_cmd[0]); + ha->mailbox_timeout_count++; + mbx_sts[0] = (-1); + set_bit(DPC_RESET_HA, &ha->dpc_flags); + if (is_qla8022(ha)) { + ql4_printk(KERN_INFO, ha, + "disabling pause transmit on port 0 & 1.\n"); + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0 | + CRB_NIU_XG_PAUSE_CTL_P1); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n", + __func__); + qla4_83xx_disable_pause(ha); + } + goto mbox_exit; + } + + /* + * Copy the mailbox out registers to the caller's mailbox in/out + * structure. + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + for (i = 0; i < outCount; i++) + mbx_sts[i] = ha->mbox_status[i]; + + /* Set return status and error flags (if applicable). */ + switch (ha->mbox_status[0]) { + case MBOX_STS_COMMAND_COMPLETE: + status = QLA_SUCCESS; + break; + + case MBOX_STS_INTERMEDIATE_COMPLETION: + status = QLA_SUCCESS; + break; + + case MBOX_STS_BUSY: + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n", + ha->host_no, __func__, mbx_cmd[0]); + ha->mailbox_timeout_count++; + break; + + default: + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n", + ha->host_no, __func__, mbx_cmd[0], mbx_sts[0], + mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4], + mbx_sts[5], mbx_sts[6], mbx_sts[7]); + break; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +mbox_exit: + mutex_lock(&ha->mbox_sem); + clear_bit(AF_MBOX_COMMAND, &ha->flags); + mutex_unlock(&ha->mbox_sem); + clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags); + + return status; +} + +/** + * qla4xxx_get_minidump_template - Get the firmware template + * @ha: Pointer to host adapter structure. + * @phys_addr: dma address for template + * + * Obtain the minidump template from firmware during initialization + * as it may not be available when minidump is desired. + **/ +int qla4xxx_get_minidump_template(struct scsi_qla_host *ha, + dma_addr_t phys_addr) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_MINIDUMP; + mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND; + mbox_cmd[2] = LSDW(phys_addr); + mbox_cmd[3] = MSDW(phys_addr); + mbox_cmd[4] = ha->fw_dump_tmplt_size; + mbox_cmd[5] = 0; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n", + ha->host_no, __func__, mbox_cmd[0], + mbox_sts[0], mbox_sts[1])); + } + return status; +} + +/** + * qla4xxx_req_template_size - Get minidump template size from firmware. + * @ha: Pointer to host adapter structure. + **/ +int qla4xxx_req_template_size(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_MINIDUMP; + mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], + &mbox_sts[0]); + if (status == QLA_SUCCESS) { + ha->fw_dump_tmplt_size = mbox_sts[1]; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n", + __func__, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5], mbox_sts[6], mbox_sts[7])); + if (ha->fw_dump_tmplt_size == 0) + status = QLA_ERROR; + } else { + ql4_printk(KERN_WARNING, ha, + "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n", + __func__, mbox_sts[0], mbox_sts[1]); + status = QLA_ERROR; + } + + return status; +} + +void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha) +{ + set_bit(AF_FW_RECOVERY, &ha->flags); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n", + ha->host_no, __func__); + + if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { + if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) { + complete(&ha->mbx_intr_comp); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw " + "recovery, doing premature completion of " + "mbx cmd\n", ha->host_no, __func__); + + } else { + set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw " + "recovery, doing premature completion of " + "polling mbx cmd\n", ha->host_no, __func__); + } + } +} + +static uint8_t +qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) +{ + memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); + memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); + + if (is_qla8022(ha)) + qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0); + + mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; + mbox_cmd[1] = 0; + mbox_cmd[2] = LSDW(init_fw_cb_dma); + mbox_cmd[3] = MSDW(init_fw_cb_dma); + mbox_cmd[4] = sizeof(struct addr_ctrl_blk); + + if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) != + QLA_SUCCESS) { + DEBUG2(printk(KERN_WARNING "scsi%ld: %s: " + "MBOX_CMD_INITIALIZE_FIRMWARE" + " failed w/ status %04X\n", + ha->host_no, __func__, mbox_sts[0])); + return QLA_ERROR; + } + return QLA_SUCCESS; +} + +uint8_t +qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) +{ + memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); + memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); + mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; + mbox_cmd[2] = LSDW(init_fw_cb_dma); + mbox_cmd[3] = MSDW(init_fw_cb_dma); + mbox_cmd[4] = sizeof(struct addr_ctrl_blk); + + if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) != + QLA_SUCCESS) { + DEBUG2(printk(KERN_WARNING "scsi%ld: %s: " + "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK" + " failed w/ status %04X\n", + ha->host_no, __func__, mbox_sts[0])); + return QLA_ERROR; + } + return QLA_SUCCESS; +} + +uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state) +{ + uint8_t ipaddr_state; + + switch (fw_ipaddr_state) { + case IP_ADDRSTATE_UNCONFIGURED: + ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; + break; + case IP_ADDRSTATE_INVALID: + ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID; + break; + case IP_ADDRSTATE_ACQUIRING: + ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING; + break; + case IP_ADDRSTATE_TENTATIVE: + ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE; + break; + case IP_ADDRSTATE_DEPRICATED: + ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED; + break; + case IP_ADDRSTATE_PREFERRED: + ipaddr_state = ISCSI_IPDDRESS_STATE_VALID; + break; + case IP_ADDRSTATE_DISABLING: + ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING; + break; + default: + ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; + } + return ipaddr_state; +} + +static void +qla4xxx_update_local_ip(struct scsi_qla_host *ha, + struct addr_ctrl_blk *init_fw_cb) +{ + ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts); + ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts); + ha->ip_config.ipv4_addr_state = + qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state); + ha->ip_config.eth_mtu_size = + le16_to_cpu(init_fw_cb->eth_mtu_size); + ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port); + + if (ha->acb_version == ACB_SUPPORTED) { + ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts); + ha->ip_config.ipv6_addl_options = + le16_to_cpu(init_fw_cb->ipv6_addtl_opts); + ha->ip_config.ipv6_tcp_options = + le16_to_cpu(init_fw_cb->ipv6_tcp_opts); + } + + /* Save IPv4 Address Info */ + memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr, + min(sizeof(ha->ip_config.ip_address), + sizeof(init_fw_cb->ipv4_addr))); + memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet, + min(sizeof(ha->ip_config.subnet_mask), + sizeof(init_fw_cb->ipv4_subnet))); + memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr, + min(sizeof(ha->ip_config.gateway), + sizeof(init_fw_cb->ipv4_gw_addr))); + + ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag); + ha->ip_config.control = init_fw_cb->control; + ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf; + ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos; + ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid; + ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len; + memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid, + min(sizeof(ha->ip_config.ipv4_alt_cid), + sizeof(init_fw_cb->ipv4_dhcp_alt_cid))); + ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len; + memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid, + min(sizeof(ha->ip_config.ipv4_vid), + sizeof(init_fw_cb->ipv4_dhcp_vid))); + ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl; + ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout); + ha->ip_config.abort_timer = init_fw_cb->abort_timer; + ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts); + ha->ip_config.iscsi_max_pdu_size = + le16_to_cpu(init_fw_cb->iscsi_max_pdu_size); + ha->ip_config.iscsi_first_burst_len = + le16_to_cpu(init_fw_cb->iscsi_fburst_len); + ha->ip_config.iscsi_max_outstnd_r2t = + le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t); + ha->ip_config.iscsi_max_burst_len = + le16_to_cpu(init_fw_cb->iscsi_max_burst_len); + memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name, + min(sizeof(ha->ip_config.iscsi_name), + sizeof(init_fw_cb->iscsi_name))); + + if (is_ipv6_enabled(ha)) { + /* Save IPv6 Address */ + ha->ip_config.ipv6_link_local_state = + qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state); + ha->ip_config.ipv6_addr0_state = + qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state); + ha->ip_config.ipv6_addr1_state = + qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state); + + switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) { + case IPV6_RTRSTATE_UNKNOWN: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_UNKNOWN; + break; + case IPV6_RTRSTATE_MANUAL: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_MANUAL; + break; + case IPV6_RTRSTATE_ADVERTISED: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_ADVERTISED; + break; + case IPV6_RTRSTATE_STALE: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_STALE; + break; + default: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_UNKNOWN; + } + + ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE; + ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; + + memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8], + init_fw_cb->ipv6_if_id, + min(sizeof(ha->ip_config.ipv6_link_local_addr)/2, + sizeof(init_fw_cb->ipv6_if_id))); + memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0, + min(sizeof(ha->ip_config.ipv6_addr0), + sizeof(init_fw_cb->ipv6_addr0))); + memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1, + min(sizeof(ha->ip_config.ipv6_addr1), + sizeof(init_fw_cb->ipv6_addr1))); + memcpy(&ha->ip_config.ipv6_default_router_addr, + init_fw_cb->ipv6_dflt_rtr_addr, + min(sizeof(ha->ip_config.ipv6_default_router_addr), + sizeof(init_fw_cb->ipv6_dflt_rtr_addr))); + ha->ip_config.ipv6_vlan_tag = + be16_to_cpu(init_fw_cb->ipv6_vlan_tag); + ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port); + ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id; + ha->ip_config.ipv6_flow_lbl = + le16_to_cpu(init_fw_cb->ipv6_flow_lbl); + ha->ip_config.ipv6_traffic_class = + init_fw_cb->ipv6_traffic_class; + ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit; + ha->ip_config.ipv6_nd_reach_time = + le32_to_cpu(init_fw_cb->ipv6_nd_reach_time); + ha->ip_config.ipv6_nd_rexmit_timer = + le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer); + ha->ip_config.ipv6_nd_stale_timeout = + le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout); + ha->ip_config.ipv6_dup_addr_detect_count = + init_fw_cb->ipv6_dup_addr_detect_count; + ha->ip_config.ipv6_gw_advrt_mtu = + le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu); + ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf; + } +} + +uint8_t +qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, + uint32_t *mbox_cmd, + uint32_t *mbox_sts, + struct addr_ctrl_blk *init_fw_cb, + dma_addr_t init_fw_cb_dma) +{ + if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma) + != QLA_SUCCESS) { + DEBUG2(printk(KERN_WARNING + "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", + ha->host_no, __func__)); + return QLA_ERROR; + } + + DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk))); + + /* Save some info in adapter structure. */ + ha->acb_version = init_fw_cb->acb_version; + ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options); + ha->heartbeat_interval = init_fw_cb->hb_interval; + memcpy(ha->name_string, init_fw_cb->iscsi_name, + min(sizeof(ha->name_string), + sizeof(init_fw_cb->iscsi_name))); + ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout); + /*memcpy(ha->alias, init_fw_cb->Alias, + min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ + + qla4xxx_update_local_ip(ha, init_fw_cb); + + return QLA_SUCCESS; +} + +/** + * qla4xxx_initialize_fw_cb - initializes firmware control block. + * @ha: Pointer to host adapter structure. + **/ +int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) +{ + struct addr_ctrl_blk *init_fw_cb; + dma_addr_t init_fw_cb_dma; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_ERROR; + + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &init_fw_cb_dma, GFP_KERNEL); + if (init_fw_cb == NULL) { + DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", + ha->host_no, __func__)); + goto exit_init_fw_cb_no_free; + } + + /* Get Initialize Firmware Control Block. */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != + QLA_SUCCESS) { + goto exit_init_fw_cb; + } + + /* Fill in the request and response queue information. */ + init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out); + init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in); + init_fw_cb->rqq_len = cpu_to_le16(REQUEST_QUEUE_DEPTH); + init_fw_cb->compq_len = cpu_to_le16(RESPONSE_QUEUE_DEPTH); + init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma)); + init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma)); + init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma)); + init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma)); + init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma)); + init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma)); + + /* Set up required options. */ + init_fw_cb->fw_options |= + cpu_to_le16(FWOPT_SESSION_MODE | + FWOPT_INITIATOR_MODE); + + if (is_qla80XX(ha)) + init_fw_cb->fw_options |= + cpu_to_le16(FWOPT_ENABLE_CRBDB); + + init_fw_cb->fw_options &= cpu_to_le16(~FWOPT_TARGET_MODE); + + init_fw_cb->add_fw_options = 0; + init_fw_cb->add_fw_options |= + cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT); + init_fw_cb->add_fw_options |= + cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE); + + if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) + != QLA_SUCCESS) { + DEBUG2(printk(KERN_WARNING + "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n", + ha->host_no, __func__)); + goto exit_init_fw_cb; + } + + if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], + init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n", + ha->host_no, __func__)); + goto exit_init_fw_cb; + } + status = QLA_SUCCESS; + +exit_init_fw_cb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), + init_fw_cb, init_fw_cb_dma); +exit_init_fw_cb_no_free: + return status; +} + +/** + * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP + * @ha: Pointer to host adapter structure. + **/ +int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha) +{ + struct addr_ctrl_blk *init_fw_cb; + dma_addr_t init_fw_cb_dma; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &init_fw_cb_dma, GFP_KERNEL); + if (init_fw_cb == NULL) { + printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, + __func__); + return QLA_ERROR; + } + + /* Get Initialize Firmware Control Block. */ + if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != + QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", + ha->host_no, __func__)); + dma_free_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + init_fw_cb, init_fw_cb_dma); + return QLA_ERROR; + } + + /* Save IP Address. */ + qla4xxx_update_local_ip(ha, init_fw_cb); + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), + init_fw_cb, init_fw_cb_dma); + + return QLA_SUCCESS; +} + +/** + * qla4xxx_get_firmware_state - gets firmware state of HBA + * @ha: Pointer to host adapter structure. + **/ +int qla4xxx_get_firmware_state(struct scsi_qla_host * ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + /* Get firmware version */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_FW_STATE; + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + return QLA_ERROR; + } + ha->firmware_state = mbox_sts[1]; + ha->board_id = mbox_sts[2]; + ha->addl_fw_state = mbox_sts[3]; + DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n", + ha->host_no, __func__, ha->firmware_state);) + + return QLA_SUCCESS; +} + +/** + * qla4xxx_get_firmware_status - retrieves firmware status + * @ha: Pointer to host adapter structure. + **/ +int qla4xxx_get_firmware_status(struct scsi_qla_host * ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + /* Get firmware version */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS; + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + return QLA_ERROR; + } + + /* High-water mark of IOCBs */ + ha->iocb_hiwat = mbox_sts[2]; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: firmware IOCBs available = %d\n", __func__, + ha->iocb_hiwat)); + + if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION) + ha->iocb_hiwat -= IOCB_HIWAT_CUSHION; + + /* Ideally, we should not enter this code, as the # of firmware + * IOCBs is hard-coded in the firmware. We set a default + * iocb_hiwat here just in case */ + if (ha->iocb_hiwat == 0) { + ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4; + DEBUG2(ql4_printk(KERN_WARNING, ha, + "%s: Setting IOCB's to = %d\n", __func__, + ha->iocb_hiwat)); + } + + return QLA_SUCCESS; +} + +/* + * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry + * @ha: Pointer to host adapter structure. + * @fw_ddb_index: Firmware's device database index + * @fw_ddb_entry: Pointer to firmware's device database entry structure + * @num_valid_ddb_entries: Pointer to number of valid ddb entries + * @next_ddb_index: Pointer to next valid device database index + * @fw_ddb_device_state: Pointer to device state + **/ +int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, + uint16_t fw_ddb_index, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, + uint32_t *num_valid_ddb_entries, + uint32_t *next_ddb_index, + uint32_t *fw_ddb_device_state, + uint32_t *conn_err_detail, + uint16_t *tcp_source_port_num, + uint16_t *connection_id) +{ + int status = QLA_ERROR; + uint16_t options; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + /* Make sure the device index is valid */ + if (fw_ddb_index >= MAX_DDB_ENTRIES) { + DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n", + ha->host_no, __func__, fw_ddb_index)); + goto exit_get_fwddb; + } + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + if (fw_ddb_entry) + memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry)); + + mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY; + mbox_cmd[1] = (uint32_t) fw_ddb_index; + mbox_cmd[2] = LSDW(fw_ddb_entry_dma); + mbox_cmd[3] = MSDW(fw_ddb_entry_dma); + mbox_cmd[4] = sizeof(struct dev_db_entry); + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) == + QLA_ERROR) { + DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed" + " with status 0x%04X\n", ha->host_no, __func__, + mbox_sts[0])); + goto exit_get_fwddb; + } + if (fw_ddb_index != mbox_sts[1]) { + DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n", + ha->host_no, __func__, fw_ddb_index, + mbox_sts[1])); + goto exit_get_fwddb; + } + if (fw_ddb_entry) { + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) { + ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d " + "Next %d State %04x ConnErr %08x %pI6 " + ":%04d \"%s\"\n", __func__, fw_ddb_index, + mbox_sts[0], mbox_sts[2], mbox_sts[3], + mbox_sts[4], mbox_sts[5], + fw_ddb_entry->ip_addr, + le16_to_cpu(fw_ddb_entry->port), + fw_ddb_entry->iscsi_name); + } else { + ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d " + "Next %d State %04x ConnErr %08x %pI4 " + ":%04d \"%s\"\n", __func__, fw_ddb_index, + mbox_sts[0], mbox_sts[2], mbox_sts[3], + mbox_sts[4], mbox_sts[5], + fw_ddb_entry->ip_addr, + le16_to_cpu(fw_ddb_entry->port), + fw_ddb_entry->iscsi_name); + } + } + if (num_valid_ddb_entries) + *num_valid_ddb_entries = mbox_sts[2]; + if (next_ddb_index) + *next_ddb_index = mbox_sts[3]; + if (fw_ddb_device_state) + *fw_ddb_device_state = mbox_sts[4]; + + /* + * RA: This mailbox has been changed to pass connection error and + * details. Its true for ISP4010 as per Version E - Not sure when it + * was changed. Get the time2wait from the fw_dd_entry field : + * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY + * struct. + */ + if (conn_err_detail) + *conn_err_detail = mbox_sts[5]; + if (tcp_source_port_num) + *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16); + if (connection_id) + *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; + status = QLA_SUCCESS; + +exit_get_fwddb: + return status; +} + +int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CONN_OPEN; + mbox_cmd[1] = fw_ddb_index; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], + &mbox_sts[0]); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n", + __func__, status, mbox_sts[0], mbox_sts[1])); + return status; +} + +/** + * qla4xxx_set_ddb_entry - sets a ddb entry. + * @ha: Pointer to host adapter structure. + * @fw_ddb_index: Firmware's device database index + * @fw_ddb_entry_dma: dma address of ddb entry + * @mbx_sts: mailbox 0 to be returned or NULL + * + * This routine initializes or updates the adapter's device database + * entry for the specified device. + **/ +int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, + dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + /* Do not wait for completion. The firmware will send us an + * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status. + */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY; + mbox_cmd[1] = (uint32_t) fw_ddb_index; + mbox_cmd[2] = LSDW(fw_ddb_entry_dma); + mbox_cmd[3] = MSDW(fw_ddb_entry_dma); + mbox_cmd[4] = sizeof(struct dev_db_entry); + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], + &mbox_sts[0]); + if (mbx_sts) + *mbx_sts = mbox_sts[0]; + DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n", + ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);) + + return status; +} + +int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, int options) +{ + int status; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT; + mbox_cmd[1] = ddb_entry->fw_ddb_index; + mbox_cmd[3] = options; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT " + "failed sts %04X %04X", __func__, + mbox_sts[0], mbox_sts[1])); + if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) && + (mbox_sts[1] == DDB_NOT_LOGGED_IN)) { + set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); + } + } + + return status; +} + +/** + * qla4xxx_get_crash_record - retrieves crash record. + * @ha: Pointer to host adapter structure. + * + * This routine retrieves a crash record from the QLA4010 after an 8002h aen. + **/ +void qla4xxx_get_crash_record(struct scsi_qla_host * ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct crash_record *crash_record = NULL; + dma_addr_t crash_record_dma = 0; + uint32_t crash_record_size = 0; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_cmd)); + + /* Get size of crash record. */ + mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n", + ha->host_no, __func__)); + goto exit_get_crash_record; + } + crash_record_size = mbox_sts[4]; + if (crash_record_size == 0) { + DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n", + ha->host_no, __func__)); + goto exit_get_crash_record; + } + + /* Alloc Memory for Crash Record. */ + crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size, + &crash_record_dma, GFP_KERNEL); + if (crash_record == NULL) + goto exit_get_crash_record; + + /* Get Crash Record. */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_cmd)); + + mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; + mbox_cmd[2] = LSDW(crash_record_dma); + mbox_cmd[3] = MSDW(crash_record_dma); + mbox_cmd[4] = crash_record_size; + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) + goto exit_get_crash_record; + + /* Dump Crash Record. */ + +exit_get_crash_record: + if (crash_record) + dma_free_coherent(&ha->pdev->dev, crash_record_size, + crash_record, crash_record_dma); +} + +/** + * qla4xxx_get_conn_event_log - retrieves connection event log + * @ha: Pointer to host adapter structure. + **/ +void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct conn_event_log_entry *event_log = NULL; + dma_addr_t event_log_dma = 0; + uint32_t event_log_size = 0; + uint32_t num_valid_entries; + uint32_t oldest_entry = 0; + uint32_t max_event_log_entries; + uint8_t i; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_cmd)); + + /* Get size of crash record. */ + mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) + goto exit_get_event_log; + + event_log_size = mbox_sts[4]; + if (event_log_size == 0) + goto exit_get_event_log; + + /* Alloc Memory for Crash Record. */ + event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size, + &event_log_dma, GFP_KERNEL); + if (event_log == NULL) + goto exit_get_event_log; + + /* Get Crash Record. */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_cmd)); + + mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; + mbox_cmd[2] = LSDW(event_log_dma); + mbox_cmd[3] = MSDW(event_log_dma); + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event " + "log!\n", ha->host_no, __func__)); + goto exit_get_event_log; + } + + /* Dump Event Log. */ + num_valid_entries = mbox_sts[1]; + + max_event_log_entries = event_log_size / + sizeof(struct conn_event_log_entry); + + if (num_valid_entries > max_event_log_entries) + oldest_entry = num_valid_entries % max_event_log_entries; + + DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n", + ha->host_no, num_valid_entries)); + + if (ql4xextended_error_logging == 3) { + if (oldest_entry == 0) { + /* Circular Buffer has not wrapped around */ + for (i=0; i < num_valid_entries; i++) { + qla4xxx_dump_buffer((uint8_t *)event_log+ + (i*sizeof(*event_log)), + sizeof(*event_log)); + } + } + else { + /* Circular Buffer has wrapped around - + * display accordingly*/ + for (i=oldest_entry; i < max_event_log_entries; i++) { + qla4xxx_dump_buffer((uint8_t *)event_log+ + (i*sizeof(*event_log)), + sizeof(*event_log)); + } + for (i=0; i < oldest_entry; i++) { + qla4xxx_dump_buffer((uint8_t *)event_log+ + (i*sizeof(*event_log)), + sizeof(*event_log)); + } + } + } + +exit_get_event_log: + if (event_log) + dma_free_coherent(&ha->pdev->dev, event_log_size, event_log, + event_log_dma); +} + +/** + * qla4xxx_abort_task - issues Abort Task + * @ha: Pointer to host adapter structure. + * @srb: Pointer to srb entry + * + * This routine performs a LUN RESET on the specified target/lun. + * The caller must ensure that the ddb_entry and lun_entry pointers + * are valid before calling this routine. + **/ +int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct scsi_cmnd *cmd = srb->cmd; + int status = QLA_SUCCESS; + unsigned long flags = 0; + uint32_t index; + + /* + * Send abort task command to ISP, so that the ISP will return + * request with ABORT status + */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + spin_lock_irqsave(&ha->hardware_lock, flags); + index = (unsigned long)(unsigned char *)cmd->host_scribble; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Firmware already posted completion on response queue */ + if (index == MAX_SRBS) + return status; + + mbox_cmd[0] = MBOX_CMD_ABORT_TASK; + mbox_cmd[1] = srb->ddb->fw_ddb_index; + mbox_cmd[2] = index; + /* Immediate Command Enable */ + mbox_cmd[5] = 0x01; + + qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], + &mbox_sts[0]); + if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) { + status = QLA_ERROR; + + DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: " + "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n", + ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0], + mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4])); + } + + return status; +} + +/** + * qla4xxx_reset_lun - issues LUN Reset + * @ha: Pointer to host adapter structure. + * @ddb_entry: Pointer to device database entry + * @lun: lun number + * + * This routine performs a LUN RESET on the specified target/lun. + * The caller must ensure that the ddb_entry and lun_entry pointers + * are valid before calling this routine. + **/ +int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, + uint64_t lun) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + uint32_t scsi_lun[2]; + int status = QLA_SUCCESS; + + DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no, + ddb_entry->fw_ddb_index, lun)); + + /* + * Send lun reset command to ISP, so that the ISP will return all + * outstanding requests with RESET status + */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + int_to_scsilun(lun, (struct scsi_lun *) scsi_lun); + + mbox_cmd[0] = MBOX_CMD_LUN_RESET; + mbox_cmd[1] = ddb_entry->fw_ddb_index; + /* FW expects LUN bytes 0-3 in Incoming Mailbox 2 + * (LUN byte 0 is LSByte, byte 3 is MSByte) */ + mbox_cmd[2] = cpu_to_le32(scsi_lun[0]); + /* FW expects LUN bytes 4-7 in Incoming Mailbox 3 + * (LUN byte 4 is LSByte, byte 7 is MSByte) */ + mbox_cmd[3] = cpu_to_le32(scsi_lun[1]); + mbox_cmd[5] = 0x01; /* Immediate Command Enable */ + + qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); + if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE && + mbox_sts[0] != MBOX_STS_COMMAND_ERROR) + status = QLA_ERROR; + + return status; +} + +/** + * qla4xxx_reset_target - issues target Reset + * @ha: Pointer to host adapter structure. + * @ddb_entry: Pointer to device database entry + * + * This routine performs a TARGET RESET on the specified target. + * The caller must ensure that the ddb_entry pointers + * are valid before calling this routine. + **/ +int qla4xxx_reset_target(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no, + ddb_entry->fw_ddb_index)); + + /* + * Send target reset command to ISP, so that the ISP will return all + * outstanding requests with RESET status + */ + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET; + mbox_cmd[1] = ddb_entry->fw_ddb_index; + mbox_cmd[5] = 0x01; /* Immediate Command Enable */ + + qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE && + mbox_sts[0] != MBOX_STS_COMMAND_ERROR) + status = QLA_ERROR; + + return status; +} + +int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, + uint32_t offset, uint32_t len) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_READ_FLASH; + mbox_cmd[1] = LSDW(dma_addr); + mbox_cmd[2] = MSDW(dma_addr); + mbox_cmd[3] = offset; + mbox_cmd[4] = len; + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ " + "status %04X %04X, offset %08x, len %08x\n", ha->host_no, + __func__, mbox_sts[0], mbox_sts[1], offset, len)); + return QLA_ERROR; + } + return QLA_SUCCESS; +} + +/** + * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version + * @ha: Pointer to host adapter structure. + * + * Retrieves the FW version, iSCSI draft version & bootloader version of HBA. + * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to + * those mailboxes, if unused. + **/ +int qla4xxx_about_firmware(struct scsi_qla_host *ha) +{ + struct about_fw_info *about_fw = NULL; + dma_addr_t about_fw_dma; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_ERROR; + + about_fw = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct about_fw_info), + &about_fw_dma, GFP_KERNEL); + if (!about_fw) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " + "for about_fw\n", __func__)); + return status; + } + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_ABOUT_FW; + mbox_cmd[2] = LSDW(about_fw_dma); + mbox_cmd[3] = MSDW(about_fw_dma); + mbox_cmd[4] = sizeof(struct about_fw_info); + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW " + "failed w/ status %04X\n", __func__, + mbox_sts[0])); + goto exit_about_fw; + } + + /* Save version information. */ + ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major); + ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor); + ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch); + ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build); + memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date, + sizeof(about_fw->fw_build_date)); + memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time, + sizeof(about_fw->fw_build_time)); + strcpy((char *)ha->fw_info.fw_build_user, + skip_spaces((char *)about_fw->fw_build_user)); + ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source); + ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major); + ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor); + ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major); + ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor); + ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch); + ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build); + strcpy((char *)ha->fw_info.extended_timestamp, + skip_spaces((char *)about_fw->extended_timestamp)); + + ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]); + ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]); + status = QLA_SUCCESS; + +exit_about_fw: + dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info), + about_fw, about_fw_dma); + return status; +} + +int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, + dma_addr_t dma_addr) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS; + mbox_cmd[1] = options; + mbox_cmd[2] = LSDW(dma_addr); + mbox_cmd[3] = MSDW(dma_addr); + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) != + QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: failed status %04X\n", + ha->host_no, __func__, mbox_sts[0])); + return QLA_ERROR; + } + return QLA_SUCCESS; +} + +int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index, + uint32_t *mbx_sts) +{ + int status; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY; + mbox_cmd[1] = ddb_index; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", + __func__, mbox_sts[0])); + } + + *mbx_sts = mbox_sts[0]; + return status; +} + +int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index) +{ + int status; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY; + mbox_cmd[1] = ddb_index; + + status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", + __func__, mbox_sts[0])); + } + + return status; +} + +int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, + uint32_t offset, uint32_t length, uint32_t options) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_WRITE_FLASH; + mbox_cmd[1] = LSDW(dma_addr); + mbox_cmd[2] = MSDW(dma_addr); + mbox_cmd[3] = offset; + mbox_cmd[4] = length; + mbox_cmd[5] = options; + + status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH " + "failed w/ status %04X, mbx1 %04X\n", + __func__, mbox_sts[0], mbox_sts[1])); + } + return status; +} + +int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index) +{ + uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; + uint32_t dev_db_end_offset; + int status = QLA_ERROR; + + memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); + + dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry)); + dev_db_end_offset = FLASH_OFFSET_DB_END; + + if (dev_db_start_offset > dev_db_end_offset) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s:Invalid DDB index %d", __func__, + ddb_index)); + goto exit_bootdb_failed; + } + + if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + sizeof(*fw_ddb_entry)) != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" + "failed\n", ha->host_no, __func__); + goto exit_bootdb_failed; + } + + if (fw_ddb_entry->cookie == DDB_VALID_COOKIE) + status = QLA_SUCCESS; + +exit_bootdb_failed: + return status; +} + +int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index) +{ + uint32_t dev_db_start_offset; + uint32_t dev_db_end_offset; + int status = QLA_ERROR; + + memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); + + if (is_qla40XX(ha)) { + dev_db_start_offset = FLASH_OFFSET_DB_INFO; + dev_db_end_offset = FLASH_OFFSET_DB_END; + } else { + dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + + (ha->hw.flt_region_ddb << 2); + /* flt_ddb_size is DDB table size for both ports + * so divide it by 2 to calculate the offset for second port + */ + if (ha->port_num == 1) + dev_db_start_offset += (ha->hw.flt_ddb_size / 2); + + dev_db_end_offset = dev_db_start_offset + + (ha->hw.flt_ddb_size / 2); + } + + dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry)); + + if (dev_db_start_offset > dev_db_end_offset) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s:Invalid DDB index %d", __func__, + ddb_index)); + goto exit_fdb_failed; + } + + if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + sizeof(*fw_ddb_entry)) != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n", + ha->host_no, __func__); + goto exit_fdb_failed; + } + + if (fw_ddb_entry->cookie == DDB_VALID_COOKIE) + status = QLA_SUCCESS; + +exit_fdb_failed: + return status; +} + +int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx) +{ + int ret = 0; + int rval = QLA_ERROR; + uint32_t offset = 0, chap_size; + struct ql4_chap_table *chap_table; + dma_addr_t chap_dma; + + chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); + if (chap_table == NULL) + return -ENOMEM; + + chap_size = sizeof(struct ql4_chap_table); + + if (is_qla40XX(ha)) + offset = FLASH_CHAP_OFFSET | (idx * chap_size); + else { + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + /* flt_chap_size is CHAP table size for both ports + * so divide it by 2 to calculate the offset for second port + */ + if (ha->port_num == 1) + offset += (ha->hw.flt_chap_size / 2); + offset += (idx * chap_size); + } + + rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); + if (rval != QLA_SUCCESS) { + ret = -EINVAL; + goto exit_get_chap; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", + __le16_to_cpu(chap_table->cookie))); + + if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { + ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); + goto exit_get_chap; + } + + strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); + +exit_get_chap: + dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); + return ret; +} + +/** + * qla4xxx_set_chap - Make a chap entry at the given index + * @ha: pointer to adapter structure + * @username: CHAP username to set + * @password: CHAP password to set + * @idx: CHAP index at which to make the entry + * @bidi: type of chap entry (chap_in or chap_out) + * + * Create chap entry at the given index with the information provided. + * + * Note: Caller should acquire the chap lock before getting here. + **/ +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx, int bidi) +{ + int ret = 0; + int rval = QLA_ERROR; + uint32_t offset = 0; + struct ql4_chap_table *chap_table; + uint32_t chap_size = 0; + dma_addr_t chap_dma; + + chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); + if (chap_table == NULL) { + ret = -ENOMEM; + goto exit_set_chap; + } + + if (bidi) + chap_table->flags |= BIT_6; /* peer */ + else + chap_table->flags |= BIT_7; /* local */ + chap_table->secret_len = strlen(password); + strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1); + strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1); + chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); + + if (is_qla40XX(ha)) { + chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table); + offset = FLASH_CHAP_OFFSET; + } else { /* Single region contains CHAP info for both ports which is + * divided into half for each port. + */ + chap_size = ha->hw.flt_chap_size / 2; + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + if (ha->port_num == 1) + offset += chap_size; + } + + offset += (idx * sizeof(struct ql4_chap_table)); + rval = qla4xxx_set_flash(ha, chap_dma, offset, + sizeof(struct ql4_chap_table), + FLASH_OPT_RMW_COMMIT); + + if (rval == QLA_SUCCESS && ha->chap_list) { + /* Update ha chap_list cache */ + memcpy((struct ql4_chap_table *)ha->chap_list + idx, + chap_table, sizeof(struct ql4_chap_table)); + } + dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); + if (rval != QLA_SUCCESS) + ret = -EINVAL; + +exit_set_chap: + return ret; +} + + +int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, + char *password, uint16_t chap_index) +{ + int rval = QLA_ERROR; + struct ql4_chap_table *chap_table = NULL; + int max_chap_entries; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); + rval = QLA_ERROR; + goto exit_uni_chap; + } + + if (!username || !password) { + ql4_printk(KERN_ERR, ha, "No memory for username & secret\n"); + rval = QLA_ERROR; + goto exit_uni_chap; + } + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (chap_index > max_chap_entries) { + ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); + rval = QLA_ERROR; + goto exit_uni_chap; + } + + mutex_lock(&ha->chap_sem); + chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index; + if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) { + rval = QLA_ERROR; + goto exit_unlock_uni_chap; + } + + if (!(chap_table->flags & BIT_7)) { + ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n"); + rval = QLA_ERROR; + goto exit_unlock_uni_chap; + } + + strscpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); + strscpy(username, chap_table->name, MAX_CHAP_NAME_LEN); + + rval = QLA_SUCCESS; + +exit_unlock_uni_chap: + mutex_unlock(&ha->chap_sem); +exit_uni_chap: + return rval; +} + +/** + * qla4xxx_get_chap_index - Get chap index given username and secret + * @ha: pointer to adapter structure + * @username: CHAP username to be searched + * @password: CHAP password to be searched + * @bidi: Is this a BIDI CHAP + * @chap_index: CHAP index to be returned + * + * Match the username and password in the chap_list, return the index if a + * match is found. If a match is not found then add the entry in FLASH and + * return the index at which entry is written in the FLASH. + **/ +int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, + char *password, int bidi, uint16_t *chap_index) +{ + int i, rval; + int free_index = -1; + int found_index = 0; + int max_chap_entries = 0; + struct ql4_chap_table *chap_table; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); + return QLA_ERROR; + } + + if (!username || !password) { + ql4_printk(KERN_ERR, ha, "Do not have username and psw\n"); + return QLA_ERROR; + } + + mutex_lock(&ha->chap_sem); + for (i = 0; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + if (chap_table->cookie != + cpu_to_le16(CHAP_VALID_COOKIE)) { + if (i > MAX_RESRV_CHAP_IDX && free_index == -1) + free_index = i; + continue; + } + if (bidi) { + if (chap_table->flags & BIT_7) + continue; + } else { + if (chap_table->flags & BIT_6) + continue; + } + if (!strncmp(chap_table->secret, password, + MAX_CHAP_SECRET_LEN) && + !strncmp(chap_table->name, username, + MAX_CHAP_NAME_LEN)) { + *chap_index = i; + found_index = 1; + break; + } + } + + /* If chap entry is not present and a free index is available then + * write the entry in flash + */ + if (!found_index && free_index != -1) { + rval = qla4xxx_set_chap(ha, username, password, + free_index, bidi); + if (!rval) { + *chap_index = free_index; + found_index = 1; + } + } + + mutex_unlock(&ha->chap_sem); + + if (found_index) + return QLA_SUCCESS; + return QLA_ERROR; +} + +int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, + uint16_t fw_ddb_index, + uint16_t connection_id, + uint16_t option) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT; + mbox_cmd[1] = fw_ddb_index; + mbox_cmd[2] = connection_id; + mbox_cmd[3] = option; + + status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE " + "option %04x failed w/ status %04X %04X\n", + __func__, option, mbox_sts[0], mbox_sts[1])); + } + return status; +} + +/** + * qla4_84xx_extend_idc_tmo - Extend IDC Timeout. + * @ha: Pointer to host adapter structure. + * @ext_tmo: idc timeout value + * + * Requests firmware to extend the idc timeout value. + **/ +static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + ext_tmo &= 0xf; + + mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND; + mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) | + (ext_tmo << 8)); /* new timeout */ + mbox_cmd[2] = ha->idc_info.info1; + mbox_cmd[3] = ha->idc_info.info2; + mbox_cmd[4] = ha->idc_info.info3; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: failed status %04X\n", + ha->host_no, __func__, mbox_sts[0])); + return QLA_ERROR; + } else { + ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n", + __func__, ext_tmo); + } + + return QLA_SUCCESS; +} + +int qla4xxx_disable_acb(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_DISABLE_ACB; + + status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB " + "failed w/ status %04X %04X %04X", __func__, + mbox_sts[0], mbox_sts[1], mbox_sts[2])); + } else { + if (is_qla8042(ha) && + test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) && + (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) { + /* + * Disable ACB mailbox command takes time to complete + * based on the total number of targets connected. + * For 512 targets, it took approximately 5 secs to + * complete. Setting the timeout value to 8, with the 3 + * secs buffer. + */ + qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV); + if (!wait_for_completion_timeout(&ha->disable_acb_comp, + IDC_EXTEND_TOV * HZ)) { + ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n", + __func__); + } + } + } + return status; +} + +int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, + uint32_t acb_type, uint32_t len) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_ACB; + mbox_cmd[1] = acb_type; + mbox_cmd[2] = LSDW(acb_dma); + mbox_cmd[3] = MSDW(acb_dma); + mbox_cmd[4] = len; + + status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB " + "failed w/ status %04X\n", __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t acb_dma) +{ + int status = QLA_SUCCESS; + + memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); + memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); + mbox_cmd[0] = MBOX_CMD_SET_ACB; + mbox_cmd[1] = 0; /* Primary ACB */ + mbox_cmd[2] = LSDW(acb_dma); + mbox_cmd[3] = MSDW(acb_dma); + mbox_cmd[4] = sizeof(struct addr_ctrl_blk); + + status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB " + "failed w/ status %04X\n", __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct iscsi_cls_conn *cls_conn, + uint32_t *mbx_sts) +{ + struct dev_db_entry *fw_ddb_entry; + struct iscsi_conn *conn; + struct iscsi_session *sess; + struct qla_conn *qla_conn; + struct sockaddr *dst_addr; + dma_addr_t fw_ddb_entry_dma; + int status = QLA_SUCCESS; + int rval = 0; + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + char *ip; + uint16_t iscsi_opts = 0; + uint32_t options = 0; + uint16_t idx, *ptid; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer.\n", + __func__)); + rval = -ENOMEM; + goto exit_set_param_no_free; + } + + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + sess = conn->session; + dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; + + if (dst_addr->sa_family == AF_INET6) + options |= IPV6_DEFAULT_DDB_ENTRY; + + status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (status == QLA_ERROR) { + rval = -EINVAL; + goto exit_set_param; + } + + ptid = (uint16_t *)&fw_ddb_entry->isid[1]; + *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id); + + DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid)); + + iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options); + memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias)); + + memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name)); + + if (sess->targetname != NULL) { + memcpy(fw_ddb_entry->iscsi_name, sess->targetname, + min(strlen(sess->targetname), + sizeof(fw_ddb_entry->iscsi_name))); + } + + memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr)); + memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr)); + + fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE; + + if (dst_addr->sa_family == AF_INET) { + addr = (struct sockaddr_in *)dst_addr; + ip = (char *)&addr->sin_addr; + memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN); + fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port)); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Destination Address [%pI4]: index [%d]\n", + __func__, fw_ddb_entry->ip_addr, + ddb_entry->fw_ddb_index)); + } else if (dst_addr->sa_family == AF_INET6) { + addr6 = (struct sockaddr_in6 *)dst_addr; + ip = (char *)&addr6->sin6_addr; + memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN); + fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port)); + fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Destination Address [%pI6]: index [%d]\n", + __func__, fw_ddb_entry->ip_addr, + ddb_entry->fw_ddb_index)); + } else { + ql4_printk(KERN_ERR, ha, + "%s: Failed to get IP Address\n", + __func__); + rval = -EINVAL; + goto exit_set_param; + } + + /* CHAP */ + if (sess->username != NULL && sess->password != NULL) { + if (strlen(sess->username) && strlen(sess->password)) { + iscsi_opts |= BIT_7; + + rval = qla4xxx_get_chap_index(ha, sess->username, + sess->password, + LOCAL_CHAP, &idx); + if (rval) + goto exit_set_param; + + fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx); + } + } + + if (sess->username_in != NULL && sess->password_in != NULL) { + /* Check if BIDI CHAP */ + if (strlen(sess->username_in) && strlen(sess->password_in)) { + iscsi_opts |= BIT_4; + + rval = qla4xxx_get_chap_index(ha, sess->username_in, + sess->password_in, + BIDI_CHAP, &idx); + if (rval) + goto exit_set_param; + } + } + + if (sess->initial_r2t_en) + iscsi_opts |= BIT_10; + + if (sess->imm_data_en) + iscsi_opts |= BIT_11; + + fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts); + + if (conn->max_recv_dlength) + fw_ddb_entry->iscsi_max_rcv_data_seg_len = + cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS)); + + if (sess->max_r2t) + fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); + + if (sess->first_burst) + fw_ddb_entry->iscsi_first_burst_len = + cpu_to_le16((sess->first_burst / BYTE_UNITS)); + + if (sess->max_burst) + fw_ddb_entry->iscsi_max_burst_len = + cpu_to_le16((sess->max_burst / BYTE_UNITS)); + + if (sess->time2wait) + fw_ddb_entry->iscsi_def_time2wait = + cpu_to_le16(sess->time2wait); + + if (sess->time2retain) + fw_ddb_entry->iscsi_def_time2retain = + cpu_to_le16(sess->time2retain); + + status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry_dma, mbx_sts); + + if (status != QLA_SUCCESS) + rval = -EINVAL; +exit_set_param: + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); +exit_set_param_no_free: + return rval; +} + +int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index, + uint16_t stats_size, dma_addr_t stats_dma) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); + memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); + mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA; + mbox_cmd[1] = fw_ddb_index; + mbox_cmd[2] = LSDW(stats_dma); + mbox_cmd[3] = MSDW(stats_dma); + mbox_cmd[4] = stats_size; + + status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "%s: MBOX_CMD_GET_MANAGEMENT_DATA " + "failed w/ status %04X\n", __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx, + uint32_t ip_idx, uint32_t *sts) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE; + mbox_cmd[1] = acb_idx; + mbox_cmd[2] = ip_idx; + + status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: " + "MBOX_CMD_GET_IP_ADDR_STATE failed w/ " + "status %04X\n", __func__, mbox_sts[0])); + } + memcpy(sts, mbox_sts, sizeof(mbox_sts)); + return status; +} + +int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_NVRAM; + mbox_cmd[1] = LSDW(nvram_dma); + mbox_cmd[2] = MSDW(nvram_dma); + mbox_cmd[3] = offset; + mbox_cmd[4] = size; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_SET_NVRAM; + mbox_cmd[1] = LSDW(nvram_dma); + mbox_cmd[2] = MSDW(nvram_dma); + mbox_cmd[3] = offset; + mbox_cmd[4] = size; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha, + uint32_t region, uint32_t field0, + uint32_t field1) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS; + mbox_cmd[3] = region; + mbox_cmd[4] = field0; + mbox_cmd[5] = field1; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + } + return status; +} + +/** + * qla4_8xxx_set_param - set driver version in firmware. + * @ha: Pointer to host adapter structure. + * @param: Parameter to set i.e driver version + **/ +int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + uint32_t status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_SET_PARAM; + if (param == SET_DRVR_VERSION) { + mbox_cmd[1] = SET_DRVR_VERSION; + strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION, + MAX_DRVR_VER_LEN - 1); + } else { + ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n", + __func__, param); + status = QLA_ERROR; + goto exit_set_param; + } + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd, + mbox_sts); + if (status == QLA_ERROR) + ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", + __func__, mbox_sts[0]); + +exit_set_param: + return status; +} + +/** + * qla4_83xx_post_idc_ack - post IDC ACK + * @ha: Pointer to host adapter structure. + * + * Posts IDC ACK for IDC Request Notification AEN. + **/ +int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_IDC_ACK; + mbox_cmd[1] = ha->idc_info.request_desc; + mbox_cmd[2] = ha->idc_info.info1; + mbox_cmd[3] = ha->idc_info.info2; + mbox_cmd[4] = ha->idc_info.info3; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status == QLA_ERROR) + ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, + mbox_sts[0]); + else + ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__); + + return status; +} + +int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct addr_ctrl_blk *acb = NULL; + uint32_t acb_len = sizeof(struct addr_ctrl_blk); + int rval = QLA_SUCCESS; + dma_addr_t acb_dma; + + acb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &acb_dma, GFP_KERNEL); + if (!acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); + rval = QLA_ERROR; + goto exit_config_acb; + } + memset(acb, 0, acb_len); + + switch (acb_config) { + case ACB_CONFIG_DISABLE: + rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + rval = qla4xxx_disable_acb(ha); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + if (!ha->saved_acb) + ha->saved_acb = kzalloc(acb_len, GFP_KERNEL); + + if (!ha->saved_acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", + __func__); + rval = QLA_ERROR; + goto exit_free_acb; + } + memcpy(ha->saved_acb, acb, acb_len); + break; + case ACB_CONFIG_SET: + + if (!ha->saved_acb) { + ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n", + __func__); + rval = QLA_ERROR; + goto exit_free_acb; + } + + memcpy(acb, ha->saved_acb, acb_len); + + rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + break; + default: + ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n", + __func__); + } + +exit_free_acb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, + acb_dma); +exit_config_acb: + if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) { + kfree(ha->saved_acb); + ha->saved_acb = NULL; + } + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s %s\n", __func__, + rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); + return rval; +} + +int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status == QLA_SUCCESS) + *config = mbox_sts[1]; + else + ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, + mbox_sts[0]); + + return status; +} + +int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG; + mbox_cmd[1] = *config; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status != QLA_SUCCESS) + ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, + mbox_sts[0]); + + return status; +} diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c new file mode 100644 index 000000000..f08a5abcb --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_nvram.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" + +static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha) +{ + writel(cmd, isp_nvram(ha)); + readl(isp_nvram(ha)); + udelay(1); +} + +static inline int eeprom_size(struct scsi_qla_host *ha) +{ + return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16; +} + +static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha) +{ + return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 : + FM93C86A_NO_ADDR_BITS_16 ; +} + +static inline int eeprom_no_data_bits(struct scsi_qla_host *ha) +{ + return FM93C56A_DATA_BITS_16; +} + +static int fm93c56a_select(struct scsi_qla_host * ha) +{ + DEBUG5(printk(KERN_ERR "fm93c56a_select:\n")); + + ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000; + eeprom_cmd(ha->eeprom_cmd_data, ha); + return 1; +} + +static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr) +{ + int i; + int mask; + int dataBit; + int previousBit; + + /* Clock in a zero, then do the start bit. */ + eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha); + + eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | + AUBURN_EEPROM_CLK_RISE, ha); + eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | + AUBURN_EEPROM_CLK_FALL, ha); + + mask = 1 << (FM93C56A_CMD_BITS - 1); + + /* Force the previous data bit to be different. */ + previousBit = 0xffff; + for (i = 0; i < FM93C56A_CMD_BITS; i++) { + dataBit = + (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + + /* + * If the bit changed, then change the DO state to + * match. + */ + eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha); + previousBit = dataBit; + } + eeprom_cmd(ha->eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_RISE, ha); + eeprom_cmd(ha->eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_FALL, ha); + + cmd = cmd << 1; + } + mask = 1 << (eeprom_no_addr_bits(ha) - 1); + + /* Force the previous data bit to be different. */ + previousBit = 0xffff; + for (i = 0; i < eeprom_no_addr_bits(ha); i++) { + dataBit = addr & mask ? AUBURN_EEPROM_DO_1 : + AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* + * If the bit changed, then change the DO state to + * match. + */ + eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha); + + previousBit = dataBit; + } + eeprom_cmd(ha->eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_RISE, ha); + eeprom_cmd(ha->eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_FALL, ha); + + addr = addr << 1; + } + return 1; +} + +static int fm93c56a_deselect(struct scsi_qla_host * ha) +{ + ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000; + eeprom_cmd(ha->eeprom_cmd_data, ha); + return 1; +} + +static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value) +{ + int i; + int data = 0; + int dataBit; + + /* Read the data bits + * The first bit is a dummy. Clock right over it. */ + for (i = 0; i < eeprom_no_data_bits(ha); i++) { + eeprom_cmd(ha->eeprom_cmd_data | + AUBURN_EEPROM_CLK_RISE, ha); + eeprom_cmd(ha->eeprom_cmd_data | + AUBURN_EEPROM_CLK_FALL, ha); + + dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0; + + data = (data << 1) | dataBit; + } + + *value = data; + return 1; +} + +static int eeprom_readword(int eepromAddr, u16 * value, + struct scsi_qla_host * ha) +{ + fm93c56a_select(ha); + fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr); + fm93c56a_datain(ha, value); + fm93c56a_deselect(ha); + return 1; +} + +/* Hardware_lock must be set before calling */ +u16 rd_nvram_word(struct scsi_qla_host * ha, int offset) +{ + u16 val = 0; + + /* NOTE: NVRAM uses half-word addresses */ + eeprom_readword(offset, &val, ha); + return val; +} + +u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset) +{ + u16 val = 0; + u8 rval = 0; + int index = 0; + + if (offset & 0x1) + index = (offset - 1) / 2; + else + index = offset / 2; + + val = le16_to_cpu(rd_nvram_word(ha, index)); + + if (offset & 0x1) + rval = (u8)((val & 0xff00) >> 8); + else + rval = (u8)((val & 0x00ff)); + + return rval; +} + +int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha) +{ + int status = QLA_ERROR; + uint16_t checksum = 0; + uint32_t index; + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (index = 0; index < eeprom_size(ha); index++) + checksum += rd_nvram_word(ha, index); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (checksum == 0) + status = QLA_SUCCESS; + + return status; +} + +/************************************************************************* + * + * Hardware Semaphore routines + * + *************************************************************************/ +int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits) +{ + uint32_t value; + unsigned long flags; + unsigned int seconds = 30; + + DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = " + "0x%x\n", ha->host_no, sem_mask, sem_bits)); + do { + spin_lock_irqsave(&ha->hardware_lock, flags); + writel((sem_mask | sem_bits), isp_semaphore(ha)); + value = readw(isp_semaphore(ha)); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if ((value & (sem_mask >> 16)) == sem_bits) { + DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, " + "code = 0x%x\n", ha->host_no, + sem_mask, sem_bits)); + return QLA_SUCCESS; + } + ssleep(1); + } while (--seconds); + return QLA_ERROR; +} + +void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask) +{ + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + writel(sem_mask, isp_semaphore(ha)); + readl(isp_semaphore(ha)); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no, + sem_mask)); +} + +int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits) +{ + uint32_t value; + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + writel((sem_mask | sem_bits), isp_semaphore(ha)); + value = readw(isp_semaphore(ha)); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if ((value & (sem_mask >> 16)) == sem_bits) { + DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = " + "0x%x, sema code=0x%x\n", ha->host_no, + sem_mask, sem_bits, value)); + return 1; + } + return 0; +} diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h new file mode 100644 index 000000000..b96c06f50 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_nvram.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#ifndef _QL4XNVRM_H_ +#define _QL4XNVRM_H_ + +/** + * AM29LV Flash definitions + **/ +#define FM93C56A_SIZE_8 0x100 +#define FM93C56A_SIZE_16 0x80 +#define FM93C66A_SIZE_8 0x200 +#define FM93C66A_SIZE_16 0x100/* 4010 */ +#define FM93C86A_SIZE_16 0x400/* 4022 */ + +#define FM93C56A_START 0x1 + +/* Commands */ +#define FM93C56A_READ 0x2 +#define FM93C56A_WEN 0x0 +#define FM93C56A_WRITE 0x1 +#define FM93C56A_WRITE_ALL 0x0 +#define FM93C56A_WDS 0x0 +#define FM93C56A_ERASE 0x3 +#define FM93C56A_ERASE_ALL 0x0 + +/* Command Extensions */ +#define FM93C56A_WEN_EXT 0x3 +#define FM93C56A_WRITE_ALL_EXT 0x1 +#define FM93C56A_WDS_EXT 0x0 +#define FM93C56A_ERASE_ALL_EXT 0x2 + +/* Address Bits */ +#define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */ +#define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */ +#define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */ + +/* Data Bits */ +#define FM93C56A_DATA_BITS_16 16 +#define FM93C56A_DATA_BITS_8 8 + +/* Special Bits */ +#define FM93C56A_READ_DUMMY_BITS 1 +#define FM93C56A_READY 0 +#define FM93C56A_BUSY 1 +#define FM93C56A_CMD_BITS 2 + +/* Auburn Bits */ +#define AUBURN_EEPROM_DI 0x8 +#define AUBURN_EEPROM_DI_0 0x0 +#define AUBURN_EEPROM_DI_1 0x8 +#define AUBURN_EEPROM_DO 0x4 +#define AUBURN_EEPROM_DO_0 0x0 +#define AUBURN_EEPROM_DO_1 0x4 +#define AUBURN_EEPROM_CS 0x2 +#define AUBURN_EEPROM_CS_0 0x0 +#define AUBURN_EEPROM_CS_1 0x2 +#define AUBURN_EEPROM_CLK_RISE 0x1 +#define AUBURN_EEPROM_CLK_FALL 0x0 + +/**/ +/* EEPROM format */ +/**/ +struct bios_params { + uint16_t SpinUpDelay:1; + uint16_t BIOSDisable:1; + uint16_t MMAPEnable:1; + uint16_t BootEnable:1; + uint16_t Reserved0:12; + uint8_t bootID0:7; + uint8_t bootID0Valid:1; + uint8_t bootLUN0[8]; + uint8_t bootID1:7; + uint8_t bootID1Valid:1; + uint8_t bootLUN1[8]; + uint16_t MaxLunsPerTarget; + uint8_t Reserved1[10]; +}; + +struct eeprom_port_cfg { + + /* MTU MAC 0 */ + u16 etherMtu_mac; + + /* Flow Control MAC 0 */ + u16 pauseThreshold_mac; + u16 resumeThreshold_mac; + u16 reserved[13]; +}; + +struct eeprom_function_cfg { + u8 reserved[30]; + + /* MAC ADDR */ + u8 macAddress[6]; + u8 macAddressSecondary[6]; + u16 subsysVendorId; + u16 subsysDeviceId; +}; + +struct eeprom_data { + union { + struct { /* isp4010 */ + u8 asic_id[4]; /* x00 */ + u8 version; /* x04 */ + u8 reserved; /* x05 */ + u16 board_id; /* x06 */ +#define EEPROM_BOARDID_ELDORADO 1 +#define EEPROM_BOARDID_PLACER 2 + +#define EEPROM_SERIAL_NUM_SIZE 16 + u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */ + + /* ExtHwConfig: */ + /* Offset = 24bytes + * + * | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | | + * |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + */ + u16 ext_hw_conf; /* x18 */ + u8 mac0[6]; /* x1A */ + u8 mac1[6]; /* x20 */ + u8 mac2[6]; /* x26 */ + u8 mac3[6]; /* x2C */ + u16 etherMtu; /* x32 */ + u16 macConfig; /* x34 */ +#define MAC_CONFIG_ENABLE_ANEG 0x0001 +#define MAC_CONFIG_ENABLE_PAUSE 0x0002 + u16 phyConfig; /* x36 */ +#define PHY_CONFIG_PHY_ADDR_MASK 0x1f +#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20 + u16 reserved_56; /* x38 */ + +#define EEPROM_UNUSED_1_SIZE 2 + u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */ + u16 bufletSize; /* x3C */ + u16 bufletCount; /* x3E */ + u16 bufletPauseThreshold; /* x40 */ + u16 tcpWindowThreshold50; /* x42 */ + u16 tcpWindowThreshold25; /* x44 */ + u16 tcpWindowThreshold0; /* x46 */ + u16 ipHashTableBaseHi; /* x48 */ + u16 ipHashTableBaseLo; /* x4A */ + u16 ipHashTableSize; /* x4C */ + u16 tcpHashTableBaseHi; /* x4E */ + u16 tcpHashTableBaseLo; /* x50 */ + u16 tcpHashTableSize; /* x52 */ + u16 ncbTableBaseHi; /* x54 */ + u16 ncbTableBaseLo; /* x56 */ + u16 ncbTableSize; /* x58 */ + u16 drbTableBaseHi; /* x5A */ + u16 drbTableBaseLo; /* x5C */ + u16 drbTableSize; /* x5E */ + +#define EEPROM_UNUSED_2_SIZE 4 + u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */ + u16 ipReassemblyTimeout; /* x64 */ + u16 tcpMaxWindowSizeHi; /* x66 */ + u16 tcpMaxWindowSizeLo; /* x68 */ + u32 net_ip_addr0; /* x6A Added for TOE + * functionality. */ + u32 net_ip_addr1; /* x6E */ + u32 scsi_ip_addr0; /* x72 */ + u32 scsi_ip_addr1; /* x76 */ +#define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account + * for ip addresses */ + u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */ + u16 subsysVendorId_f0; /* xFA */ + u16 subsysDeviceId_f0; /* xFC */ + + /* Address = 0x7F */ +#define FM93C56A_SIGNATURE 0x9356 +#define FM93C66A_SIGNATURE 0x9366 + u16 signature; /* xFE */ + +#define EEPROM_UNUSED_4_SIZE 250 + u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */ + u16 subsysVendorId_f1; /* x1FA */ + u16 subsysDeviceId_f1; /* x1FC */ + u16 checksum; /* x1FE */ + } __attribute__ ((packed)) isp4010; + struct { /* isp4022 */ + u8 asicId[4]; /* x00 */ + u8 version; /* x04 */ + u8 reserved_5; /* x05 */ + u16 boardId; /* x06 */ + u8 boardIdStr[16]; /* x08 */ + u8 serialNumber[16]; /* x18 */ + + /* External Hardware Configuration */ + u16 ext_hw_conf; /* x28 */ + + /* MAC 0 CONFIGURATION */ + struct eeprom_port_cfg macCfg_port0; /* x2A */ + + /* MAC 1 CONFIGURATION */ + struct eeprom_port_cfg macCfg_port1; /* x4A */ + + /* DDR SDRAM Configuration */ + u16 bufletSize; /* x6A */ + u16 bufletCount; /* x6C */ + u16 tcpWindowThreshold50; /* x6E */ + u16 tcpWindowThreshold25; /* x70 */ + u16 tcpWindowThreshold0; /* x72 */ + u16 ipHashTableBaseHi; /* x74 */ + u16 ipHashTableBaseLo; /* x76 */ + u16 ipHashTableSize; /* x78 */ + u16 tcpHashTableBaseHi; /* x7A */ + u16 tcpHashTableBaseLo; /* x7C */ + u16 tcpHashTableSize; /* x7E */ + u16 ncbTableBaseHi; /* x80 */ + u16 ncbTableBaseLo; /* x82 */ + u16 ncbTableSize; /* x84 */ + u16 drbTableBaseHi; /* x86 */ + u16 drbTableBaseLo; /* x88 */ + u16 drbTableSize; /* x8A */ + u16 reserved_142[4]; /* x8C */ + + /* TCP/IP Parameters */ + u16 ipReassemblyTimeout; /* x94 */ + u16 tcpMaxWindowSize; /* x96 */ + u16 ipSecurity; /* x98 */ + u8 reserved_156[294]; /* x9A */ + u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */ + struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */ + u16 reserved_510; /* x1FE */ + + /* Address = 512 */ + u8 oemSpace[432]; /* x200 */ + struct bios_params sBIOSParams_fn1; /* x3B0 */ + struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */ + u16 reserved_1022; /* x3FE */ + + /* Address = 1024 */ + u8 reserved_1024[464]; /* x400 */ + struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */ + u16 reserved_1534; /* x5FE */ + + /* Address = 1536 */ + u8 reserved_1536[432]; /* x600 */ + struct bios_params sBIOSParams_fn3; /* x7B0 */ + struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */ + u16 checksum; /* x7FE */ + } __attribute__ ((packed)) isp4022; + }; +}; + + +#endif /* _QL4XNVRM_H_ */ diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c new file mode 100644 index 000000000..47adff9f0 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -0,0 +1,4209 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ +#include +#include +#include +#include +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_inline.h" + +#include + +#define TIMEOUT_100_MS 100 +#define MASK(n) DMA_BIT_MASK(n) +#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define MS_WIN(addr) (addr & 0x0ffc0000) +#define QLA82XX_PCI_MN_2M (0) +#define QLA82XX_PCI_MS_2M (0x80000) +#define QLA82XX_PCI_OCM0_2M (0xc0000) +#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) + +/* CRB window related */ +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define CRB_HI(off) ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ + ((off) & 0xf0000)) +#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) +#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) +#define CRB_INDIRECT_2M (0x1e0000UL) + +static inline void __iomem * +qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off) +{ + if ((off < ha->first_page_group_end) && + (off >= ha->first_page_group_start)) + return (void __iomem *)(ha->nx_pcibase + off); + + return NULL; +} + +static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, + 0x410000AC, 0x410000B8, 0x410000BC }; +#define MAX_CRB_XFORM 60 +static unsigned long crb_addr_xform[MAX_CRB_XFORM]; +static int qla4_8xxx_crb_table_initialized; + +#define qla4_8xxx_crb_addr_transform(name) \ + (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ + QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) +static void +qla4_82xx_crb_addr_transform_setup(void) +{ + qla4_8xxx_crb_addr_transform(XDMA); + qla4_8xxx_crb_addr_transform(TIMR); + qla4_8xxx_crb_addr_transform(SRE); + qla4_8xxx_crb_addr_transform(SQN3); + qla4_8xxx_crb_addr_transform(SQN2); + qla4_8xxx_crb_addr_transform(SQN1); + qla4_8xxx_crb_addr_transform(SQN0); + qla4_8xxx_crb_addr_transform(SQS3); + qla4_8xxx_crb_addr_transform(SQS2); + qla4_8xxx_crb_addr_transform(SQS1); + qla4_8xxx_crb_addr_transform(SQS0); + qla4_8xxx_crb_addr_transform(RPMX7); + qla4_8xxx_crb_addr_transform(RPMX6); + qla4_8xxx_crb_addr_transform(RPMX5); + qla4_8xxx_crb_addr_transform(RPMX4); + qla4_8xxx_crb_addr_transform(RPMX3); + qla4_8xxx_crb_addr_transform(RPMX2); + qla4_8xxx_crb_addr_transform(RPMX1); + qla4_8xxx_crb_addr_transform(RPMX0); + qla4_8xxx_crb_addr_transform(ROMUSB); + qla4_8xxx_crb_addr_transform(SN); + qla4_8xxx_crb_addr_transform(QMN); + qla4_8xxx_crb_addr_transform(QMS); + qla4_8xxx_crb_addr_transform(PGNI); + qla4_8xxx_crb_addr_transform(PGND); + qla4_8xxx_crb_addr_transform(PGN3); + qla4_8xxx_crb_addr_transform(PGN2); + qla4_8xxx_crb_addr_transform(PGN1); + qla4_8xxx_crb_addr_transform(PGN0); + qla4_8xxx_crb_addr_transform(PGSI); + qla4_8xxx_crb_addr_transform(PGSD); + qla4_8xxx_crb_addr_transform(PGS3); + qla4_8xxx_crb_addr_transform(PGS2); + qla4_8xxx_crb_addr_transform(PGS1); + qla4_8xxx_crb_addr_transform(PGS0); + qla4_8xxx_crb_addr_transform(PS); + qla4_8xxx_crb_addr_transform(PH); + qla4_8xxx_crb_addr_transform(NIU); + qla4_8xxx_crb_addr_transform(I2Q); + qla4_8xxx_crb_addr_transform(EG); + qla4_8xxx_crb_addr_transform(MN); + qla4_8xxx_crb_addr_transform(MS); + qla4_8xxx_crb_addr_transform(CAS2); + qla4_8xxx_crb_addr_transform(CAS1); + qla4_8xxx_crb_addr_transform(CAS0); + qla4_8xxx_crb_addr_transform(CAM); + qla4_8xxx_crb_addr_transform(C2C1); + qla4_8xxx_crb_addr_transform(C2C0); + qla4_8xxx_crb_addr_transform(SMB); + qla4_8xxx_crb_addr_transform(OCM0); + qla4_8xxx_crb_addr_transform(I2C0); + + qla4_8xxx_crb_table_initialized = 1; +} + +static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { + {{{0, 0, 0, 0} } }, /* 0: PCI */ + {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ + {{{0, 0, 0, 0} } }, /* 3: */ + {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ + {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ + {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ + {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ + {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ + {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ + {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ + {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ + {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ + {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ + {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ + {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ + {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ + {{{0, 0, 0, 0} } }, /* 23: */ + {{{0, 0, 0, 0} } }, /* 24: */ + {{{0, 0, 0, 0} } }, /* 25: */ + {{{0, 0, 0, 0} } }, /* 26: */ + {{{0, 0, 0, 0} } }, /* 27: */ + {{{0, 0, 0, 0} } }, /* 28: */ + {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ + {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ + {{{0} } }, /* 32: PCI */ + {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ + {{{0} } }, /* 35: */ + {{{0} } }, /* 36: */ + {{{0} } }, /* 37: */ + {{{0} } }, /* 38: */ + {{{0} } }, /* 39: */ + {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ + {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ + {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ + {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ + {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ + {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ + {{{0} } }, /* 52: */ + {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ + {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ + {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ + {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ + {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ + {{{0} } }, /* 59: I2C0 */ + {{{0} } }, /* 60: I2C1 */ + {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },/* 61: LPC */ + {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static unsigned qla4_82xx_crb_hub_agt[64] = { + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PS, + QLA82XX_HW_CRB_HUB_AGT_ADR_MN, + QLA82XX_HW_CRB_HUB_AGT_ADR_MS, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, + QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, + QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, + QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, + QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, + QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, + QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, + QLA82XX_HW_CRB_HUB_AGT_ADR_SN, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_EG, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PS, + QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, + QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, + QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, + QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, + QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, + QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +/* Device states */ +static char *qdev_state[] = { + "Unknown", + "Cold", + "Initializing", + "Ready", + "Need Reset", + "Need Quiescent", + "Failed", + "Quiescent", +}; + +/* + * In: 'off' is offset from CRB space in 128M pci map + * Out: 'off' is 2M pci map addr + * side effect: lock crb window + */ +static void +qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off) +{ + u32 win_read; + + ha->crb_win = CRB_HI(*off); + writel(ha->crb_win, + (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + + /* Read back value to make sure write has gone through before trying + * to use it. */ + win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + if (win_read != ha->crb_win) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Written crbwin (0x%x) != Read crbwin (0x%x)," + " off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); + } + *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; +} + +#define CRB_WIN_LOCK_TIMEOUT 100000000 + +/* + * Context: atomic + */ +static int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha) +{ + int done = 0, timeout = 0; + + while (!done) { + /* acquire semaphore3 from PCI HW block */ + done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); + if (done == 1) + break; + if (timeout >= CRB_WIN_LOCK_TIMEOUT) + return -1; + + timeout++; + udelay(10); + } + qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num); + return 0; +} + +void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha) +{ + qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); +} + +void +qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data) +{ + unsigned long flags = 0; + int rv; + + rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off); + + BUG_ON(rv == -1); + + if (rv == 1) { + write_lock_irqsave(&ha->hw_lock, flags); + qla4_82xx_crb_win_lock(ha); + qla4_82xx_pci_set_crbwindow_2M(ha, &off); + } + + writel(data, (void __iomem *)off); + + if (rv == 1) { + qla4_82xx_crb_win_unlock(ha); + write_unlock_irqrestore(&ha->hw_lock, flags); + } +} + +uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off) +{ + unsigned long flags = 0; + int rv; + u32 data; + + rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off); + + BUG_ON(rv == -1); + + if (rv == 1) { + write_lock_irqsave(&ha->hw_lock, flags); + qla4_82xx_crb_win_lock(ha); + qla4_82xx_pci_set_crbwindow_2M(ha, &off); + } + data = readl((void __iomem *)off); + + if (rv == 1) { + qla4_82xx_crb_win_unlock(ha); + write_unlock_irqrestore(&ha->hw_lock, flags); + } + return data; +} + +/* Minidump related functions */ +int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data) +{ + uint32_t win_read, off_value; + int rval = QLA_SUCCESS; + + off_value = off & 0xFFFF0000; + writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + + /* + * Read back value to make sure write has gone through before trying + * to use it. + */ + win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + if (win_read != off_value) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", + __func__, off_value, win_read, off)); + rval = QLA_ERROR; + } else { + off_value = off & 0x0000FFFF; + *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M + + ha->nx_pcibase)); + } + return rval; +} + +int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data) +{ + uint32_t win_read, off_value; + int rval = QLA_SUCCESS; + + off_value = off & 0xFFFF0000; + writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + + /* Read back value to make sure write has gone through before trying + * to use it. + */ + win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + if (win_read != off_value) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", + __func__, off_value, win_read, off)); + rval = QLA_ERROR; + } else { + off_value = off & 0x0000FFFF; + writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M + + ha->nx_pcibase)); + } + return rval; +} + +#define IDC_LOCK_TIMEOUT 100000000 + +/** + * qla4_82xx_idc_lock - hw_lock + * @ha: pointer to adapter structure + * + * General purpose lock used to synchronize access to + * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc. + * + * Context: task, can sleep + **/ +int qla4_82xx_idc_lock(struct scsi_qla_host *ha) +{ + int done = 0, timeout = 0; + + might_sleep(); + + while (!done) { + /* acquire semaphore5 from PCI HW block */ + done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); + if (done == 1) + break; + if (timeout >= IDC_LOCK_TIMEOUT) + return -1; + + timeout++; + msleep(100); + } + return 0; +} + +void qla4_82xx_idc_unlock(struct scsi_qla_host *ha) +{ + qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); +} + +int +qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off) +{ + struct crb_128M_2M_sub_block_map *m; + + if (*off >= QLA82XX_CRB_MAX) + return -1; + + if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { + *off = (*off - QLA82XX_PCI_CAMQM) + + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; + return 0; + } + + if (*off < QLA82XX_PCI_CRBSPACE) + return -1; + + *off -= QLA82XX_PCI_CRBSPACE; + /* + * Try direct map + */ + + m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; + + if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { + *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; + return 0; + } + + /* + * Not in direct map, use crb window + */ + return 1; +} + +/* +* check memory access boundary. +* used by test agent. support ddr access only for now +*/ +static unsigned long +qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha, + unsigned long long addr, int size) +{ + if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, + QLA8XXX_ADDR_DDR_NET_MAX) || + !QLA8XXX_ADDR_IN_RANGE(addr + size - 1, + QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) || + ((size != 1) && (size != 2) && (size != 4) && (size != 8))) { + return 0; + } + return 1; +} + +static int qla4_82xx_pci_set_window_warning_count; + +static unsigned long +qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr) +{ + int window; + u32 win_read; + + if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, + QLA8XXX_ADDR_DDR_NET_MAX)) { + /* DDR network side */ + window = MN_WIN(addr); + ha->ddr_mn_window = window; + qla4_82xx_wr_32(ha, ha->mn_win_crb | + QLA82XX_PCI_CRBSPACE, window); + win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb | + QLA82XX_PCI_CRBSPACE); + if ((win_read << 17) != window) { + ql4_printk(KERN_WARNING, ha, + "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", + __func__, window, win_read); + } + addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; + } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0, + QLA8XXX_ADDR_OCM0_MAX)) { + unsigned int temp1; + /* if bits 19:18&17:11 are on */ + if ((addr & 0x00ff800) == 0xff800) { + printk("%s: QM access not handled.\n", __func__); + addr = -1UL; + } + + window = OCM_WIN(addr); + ha->ddr_mn_window = window; + qla4_82xx_wr_32(ha, ha->mn_win_crb | + QLA82XX_PCI_CRBSPACE, window); + win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb | + QLA82XX_PCI_CRBSPACE); + temp1 = ((window & 0x1FF) << 7) | + ((window & 0x0FFFE0000) >> 17); + if (win_read != temp1) { + printk("%s: Written OCMwin (0x%x) != Read" + " OCMwin (0x%x)\n", __func__, temp1, win_read); + } + addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; + + } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, + QLA82XX_P3_ADDR_QDR_NET_MAX)) { + /* QDR network side */ + window = MS_WIN(addr); + ha->qdr_sn_window = window; + qla4_82xx_wr_32(ha, ha->ms_win_crb | + QLA82XX_PCI_CRBSPACE, window); + win_read = qla4_82xx_rd_32(ha, + ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); + if (win_read != window) { + printk("%s: Written MSwin (0x%x) != Read " + "MSwin (0x%x)\n", __func__, window, win_read); + } + addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; + + } else { + /* + * peg gdb frequently accesses memory that doesn't exist, + * this limits the chit chat so debugging isn't slowed down. + */ + if ((qla4_82xx_pci_set_window_warning_count++ < 8) || + (qla4_82xx_pci_set_window_warning_count%64 == 0)) { + printk("%s: Warning:%s Unknown address range!\n", + __func__, DRIVER_NAME); + } + addr = -1UL; + } + return addr; +} + +/* check if address is in the same windows as the previous access */ +static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha, + unsigned long long addr) +{ + int window; + unsigned long long qdr_max; + + qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; + + if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, + QLA8XXX_ADDR_DDR_NET_MAX)) { + /* DDR network side */ + BUG(); /* MN access can not come here */ + } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0, + QLA8XXX_ADDR_OCM0_MAX)) { + return 1; + } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1, + QLA8XXX_ADDR_OCM1_MAX)) { + return 1; + } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, + qdr_max)) { + /* QDR network side */ + window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f; + if (ha->qdr_sn_window == window) + return 1; + } + + return 0; +} + +static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha, + u64 off, void *data, int size) +{ + unsigned long flags; + void __iomem *addr; + int ret = 0; + u64 start; + void __iomem *mem_ptr = NULL; + unsigned long mem_base; + unsigned long mem_page; + + write_lock_irqsave(&ha->hw_lock, flags); + + /* + * If attempting to access unknown address or straddle hw windows, + * do not access. + */ + start = qla4_82xx_pci_set_window(ha, off); + if ((start == -1UL) || + (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) { + write_unlock_irqrestore(&ha->hw_lock, flags); + printk(KERN_ERR"%s out of bound pci memory access. " + "offset is 0x%llx\n", DRIVER_NAME, off); + return -1; + } + + addr = qla4_8xxx_pci_base_offsetfset(ha, start); + if (!addr) { + write_unlock_irqrestore(&ha->hw_lock, flags); + mem_base = pci_resource_start(ha->pdev, 0); + mem_page = start & PAGE_MASK; + /* Map two pages whenever user tries to access addresses in two + consecutive pages. + */ + if (mem_page != ((start + size - 1) & PAGE_MASK)) + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); + else + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); + + if (mem_ptr == NULL) { + *(u8 *)data = 0; + return -1; + } + addr = mem_ptr; + addr += start & (PAGE_SIZE - 1); + write_lock_irqsave(&ha->hw_lock, flags); + } + + switch (size) { + case 1: + *(u8 *)data = readb(addr); + break; + case 2: + *(u16 *)data = readw(addr); + break; + case 4: + *(u32 *)data = readl(addr); + break; + case 8: + *(u64 *)data = readq(addr); + break; + default: + ret = -1; + break; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + + if (mem_ptr) + iounmap(mem_ptr); + return ret; +} + +static int +qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off, + void *data, int size) +{ + unsigned long flags; + void __iomem *addr; + int ret = 0; + u64 start; + void __iomem *mem_ptr = NULL; + unsigned long mem_base; + unsigned long mem_page; + + write_lock_irqsave(&ha->hw_lock, flags); + + /* + * If attempting to access unknown address or straddle hw windows, + * do not access. + */ + start = qla4_82xx_pci_set_window(ha, off); + if ((start == -1UL) || + (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) { + write_unlock_irqrestore(&ha->hw_lock, flags); + printk(KERN_ERR"%s out of bound pci memory access. " + "offset is 0x%llx\n", DRIVER_NAME, off); + return -1; + } + + addr = qla4_8xxx_pci_base_offsetfset(ha, start); + if (!addr) { + write_unlock_irqrestore(&ha->hw_lock, flags); + mem_base = pci_resource_start(ha->pdev, 0); + mem_page = start & PAGE_MASK; + /* Map two pages whenever user tries to access addresses in two + consecutive pages. + */ + if (mem_page != ((start + size - 1) & PAGE_MASK)) + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); + else + mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); + if (mem_ptr == NULL) + return -1; + + addr = mem_ptr; + addr += start & (PAGE_SIZE - 1); + write_lock_irqsave(&ha->hw_lock, flags); + } + + switch (size) { + case 1: + writeb(*(u8 *)data, addr); + break; + case 2: + writew(*(u16 *)data, addr); + break; + case 4: + writel(*(u32 *)data, addr); + break; + case 8: + writeq(*(u64 *)data, addr); + break; + default: + ret = -1; + break; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + if (mem_ptr) + iounmap(mem_ptr); + return ret; +} + +#define MTU_FUDGE_FACTOR 100 + +static unsigned long +qla4_82xx_decode_crb_addr(unsigned long addr) +{ + int i; + unsigned long base_addr, offset, pci_base; + + if (!qla4_8xxx_crb_table_initialized) + qla4_82xx_crb_addr_transform_setup(); + + pci_base = ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == ADDR_ERROR) + return pci_base; + else + return pci_base + offset; +} + +static long rom_max_timeout = 100; +static long qla4_82xx_rom_lock_timeout = 100; + +/* + * Context: task, can_sleep + */ +static int +qla4_82xx_rom_lock(struct scsi_qla_host *ha) +{ + int done = 0, timeout = 0; + + might_sleep(); + + while (!done) { + /* acquire semaphore2 from PCI HW block */ + done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); + if (done == 1) + break; + if (timeout >= qla4_82xx_rom_lock_timeout) + return -1; + + timeout++; + msleep(20); + } + qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); + return 0; +} + +static void +qla4_82xx_rom_unlock(struct scsi_qla_host *ha) +{ + qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); +} + +static int +qla4_82xx_wait_rom_done(struct scsi_qla_host *ha) +{ + long timeout = 0; + long done = 0 ; + + while (done == 0) { + done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); + done &= 2; + timeout++; + if (timeout >= rom_max_timeout) { + printk("%s: Timeout reached waiting for rom done", + DRIVER_NAME); + return -1; + } + } + return 0; +} + +static int +qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) +{ + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); + if (qla4_82xx_wait_rom_done(ha)) { + printk("%s: Error waiting for rom done\n", DRIVER_NAME); + return -1; + } + /* reset abyte_cnt and dummy_byte_cnt */ + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + udelay(10); + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); + + *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); + return 0; +} + +static int +qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) +{ + int ret, loops = 0; + + while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) { + udelay(100); + loops++; + } + if (loops >= 50000) { + ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n", + DRIVER_NAME); + return -1; + } + ret = qla4_82xx_do_rom_fast_read(ha, addr, valp); + qla4_82xx_rom_unlock(ha); + return ret; +} + +/* + * This routine does CRB initialize sequence + * to put the ISP into operational state + */ +static int +qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) +{ + int addr, val; + int i ; + struct crb_addr_pair *buf; + unsigned long off; + unsigned offset, n; + + struct crb_addr_pair { + long addr; + long data; + }; + + /* Halt all the indiviual PEGs and other blocks of the ISP */ + qla4_82xx_rom_lock(ha); + + /* disable all I2Q */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); + + /* disable all niu interrupts */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); + /* disable xge rx/tx */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); + /* disable xg1 rx/tx */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); + /* disable sideband mac */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); + /* disable ap0 mac */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); + /* disable ap1 mac */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); + + /* halt sre */ + val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); + qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); + + /* halt epg */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); + + /* halt timers */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); + + /* halt pegs */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); + msleep(5); + + /* big hammer */ + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) + /* don't reset CAM block on reset */ + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); + else + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); + + qla4_82xx_rom_unlock(ha); + + /* Read the signature value from the flash. + * Offset 0: Contain signature (0xcafecafe) + * Offset 4: Offset and number of addr/value pairs + * that present in CRB initialize sequence + */ + if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || + qla4_82xx_rom_fast_read(ha, 4, &n) != 0) { + ql4_printk(KERN_WARNING, ha, + "[ERROR] Reading crb_init area: n: %08x\n", n); + return -1; + } + + /* Offset in flash = lower 16 bits + * Number of enteries = upper 16 bits + */ + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + + /* number of addr/value pair should not exceed 1024 enteries */ + if (n >= 1024) { + ql4_printk(KERN_WARNING, ha, + "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", + DRIVER_NAME, __func__, n); + return -1; + } + + ql4_printk(KERN_INFO, ha, + "%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n); + + buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) { + ql4_printk(KERN_WARNING, ha, + "%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME); + return -1; + } + + for (i = 0; i < n; i++) { + if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || + qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != + 0) { + kfree(buf); + return -1; + } + + buf[i].addr = addr; + buf[i].data = val; + } + + for (i = 0; i < n; i++) { + /* Translate internal CRB initialization + * address to PCI bus address + */ + off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) + + QLA82XX_PCI_CRBSPACE; + /* Not all CRB addr/value pair to be written, + * some of them are skipped + */ + + /* skip if LS bit is set*/ + if (off & 0x1) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "Skip CRB init replay for offset = 0x%lx\n", off)); + continue; + } + + /* skipping cold reboot MAGIC */ + if (off == QLA82XX_CAM_RAM(0x1fc)) + continue; + + /* do not reset PCI */ + if (off == (ROMUSB_GLB + 0xbc)) + continue; + + /* skip core clock, so that firmware can increase the clock */ + if (off == (ROMUSB_GLB + 0xc8)) + continue; + + /* skip the function enable register */ + if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + + if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + + if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) + continue; + + if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) + continue; + + if (off == ADDR_ERROR) { + ql4_printk(KERN_WARNING, ha, + "%s: [ERROR] Unknown addr: 0x%08lx\n", + DRIVER_NAME, buf[i].addr); + continue; + } + + qla4_82xx_wr_32(ha, off, buf[i].data); + + /* ISP requires much bigger delay to settle down, + * else crb_window returns 0xffffffff + */ + if (off == QLA82XX_ROMUSB_GLB_SW_RESET) + msleep(1000); + + /* ISP requires millisec delay between + * successive CRB register updation + */ + msleep(1); + } + + kfree(buf); + + /* Resetting the data and instruction cache */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); + + /* Clear all protocol processing engines */ + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); + + return 0; +} + +/** + * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory + * @ha: Pointer to adapter structure + * @addr: Flash address to write to + * @data: Data to be written + * @count: word_count to be written + * + * Return: On success return QLA_SUCCESS + * On error return QLA_ERROR + **/ +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, + uint32_t *data, uint32_t count) +{ + int i, j; + uint32_t agt_ctrl; + unsigned long flags; + int ret_val = QLA_SUCCESS; + + /* Only 128-bit aligned access */ + if (addr & 0xF) { + ret_val = QLA_ERROR; + goto exit_ms_mem_write; + } + + write_lock_irqsave(&ha->hw_lock, flags); + + /* Write address */ + ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + for (i = 0; i < count; i++, addr += 16) { + if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, + QLA8XXX_ADDR_QDR_NET_MAX)) || + (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, + QLA8XXX_ADDR_DDR_NET_MAX)))) { + ret_val = QLA_ERROR; + goto exit_ms_mem_write_unlock; + } + + ret_val = ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_ADDR_LO, + addr); + /* Write data */ + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_LO, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_HI, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_ULO, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_UHI, + *data++); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + /* Check write status */ + ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_ENABLE); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_START); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + for (j = 0; j < MAX_CTL_CHECK; j++) { + ret_val = ha->isp_ops->rd_reg_indirect(ha, + MD_MIU_TEST_AGT_CTRL, + &agt_ctrl); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", + __func__); + goto exit_ms_mem_write_unlock; + } + if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) + break; + } + + /* Status check failed */ + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", + __func__); + ret_val = QLA_ERROR; + goto exit_ms_mem_write_unlock; + } + } + +exit_ms_mem_write_unlock: + write_unlock_irqrestore(&ha->hw_lock, flags); + +exit_ms_mem_write: + return ret_val; +} + +static int +qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) +{ + int i, rval = 0; + long size = 0; + long flashaddr, memaddr; + u64 data; + u32 high, low; + + flashaddr = memaddr = ha->hw.flt_region_bootload; + size = (image_start - flashaddr) / 8; + + DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n", + ha->host_no, __func__, flashaddr, image_start)); + + for (i = 0; i < size; i++) { + if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || + (qla4_82xx_rom_fast_read(ha, flashaddr + 4, + (int *)&high))) { + rval = -1; + goto exit_load_from_flash; + } + data = ((u64)high << 32) | low ; + rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8); + if (rval) + goto exit_load_from_flash; + + flashaddr += 8; + memaddr += 8; + + if (i % 0x1000 == 0) + msleep(1); + + } + + udelay(100); + + read_lock(&ha->hw_lock); + qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); + read_unlock(&ha->hw_lock); + +exit_load_from_flash: + return rval; +} + +static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start) +{ + u32 rst; + + qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); + if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) { + printk(KERN_WARNING "%s: Error during CRB Initialization\n", + __func__); + return QLA_ERROR; + } + + udelay(500); + + /* at this point, QM is in reset. This could be a problem if there are + * incoming d* transition queue messages. QM/PCIE could wedge. + * To get around this, QM is brought out of reset. + */ + + rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); + /* unreset qm */ + rst &= ~(1 << 28); + qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); + + if (qla4_82xx_load_from_flash(ha, image_start)) { + printk("%s: Error trying to load fw from flash!\n", __func__); + return QLA_ERROR; + } + + return QLA_SUCCESS; +} + +int +qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha, + u64 off, void *data, int size) +{ + int i, j = 0, k, start, end, loop, sz[2], off0[2]; + int shift_amount; + uint32_t temp; + uint64_t off8, val, mem_crb, word[2] = {0, 0}; + + /* + * If not MN, go check for MS or invalid. + */ + + if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) + mem_crb = QLA82XX_CRB_QDR_NET; + else { + mem_crb = QLA82XX_CRB_DDR_NET; + if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0) + return qla4_82xx_pci_mem_read_direct(ha, + off, data, size); + } + + + off8 = off & 0xfffffff0; + off0[0] = off & 0xf; + sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); + shift_amount = 4; + + loop = ((off0[0] + size - 1) >> shift_amount) + 1; + off0[1] = 0; + sz[1] = size - sz[0]; + + for (i = 0; i < loop; i++) { + temp = off8 + (i << shift_amount); + qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); + temp = 0; + qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); + temp = MIU_TA_CTL_ENABLE; + qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + temp = MIU_TA_CTL_START_ENABLE; + qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); + if ((temp & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR + "%s: failed to read through agent\n", + __func__); + break; + } + + start = off0[i] >> 2; + end = (off0[i] + sz[i] - 1) >> 2; + for (k = start; k <= end; k++) { + temp = qla4_82xx_rd_32(ha, + mem_crb + MIU_TEST_AGT_RDDATA(k)); + word[i] |= ((uint64_t)temp << (32 * (k & 1))); + } + } + + if (j >= MAX_CTL_CHECK) + return -1; + + if ((off0[0] & 7) == 0) { + val = word[0]; + } else { + val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | + ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); + } + + switch (size) { + case 1: + *(uint8_t *)data = val; + break; + case 2: + *(uint16_t *)data = val; + break; + case 4: + *(uint32_t *)data = val; + break; + case 8: + *(uint64_t *)data = val; + break; + } + return 0; +} + +int +qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, + u64 off, void *data, int size) +{ + int i, j, ret = 0, loop, sz[2], off0; + int scale, shift_amount, startword; + uint32_t temp; + uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; + + /* + * If not MN, go check for MS or invalid. + */ + if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) + mem_crb = QLA82XX_CRB_QDR_NET; + else { + mem_crb = QLA82XX_CRB_DDR_NET; + if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0) + return qla4_82xx_pci_mem_write_direct(ha, + off, data, size); + } + + off0 = off & 0x7; + sz[0] = (size < (8 - off0)) ? size : (8 - off0); + sz[1] = size - sz[0]; + + off8 = off & 0xfffffff0; + loop = (((off & 0xf) + size - 1) >> 4) + 1; + shift_amount = 4; + scale = 2; + startword = (off & 0xf)/8; + + for (i = 0; i < loop; i++) { + if (qla4_82xx_pci_mem_read_2M(ha, off8 + + (i << shift_amount), &word[i * scale], 8)) + return -1; + } + + switch (size) { + case 1: + tmpw = *((uint8_t *)data); + break; + case 2: + tmpw = *((uint16_t *)data); + break; + case 4: + tmpw = *((uint32_t *)data); + break; + case 8: + default: + tmpw = *((uint64_t *)data); + break; + } + + if (sz[0] == 8) + word[startword] = tmpw; + else { + word[startword] &= + ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); + word[startword] |= tmpw << (off0 * 8); + } + + if (sz[1] != 0) { + word[startword+1] &= ~(~0ULL << (sz[1] * 8)); + word[startword+1] |= tmpw >> (sz[0] * 8); + } + + for (i = 0; i < loop; i++) { + temp = off8 + (i << shift_amount); + qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); + temp = 0; + qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); + temp = word[i * scale] & 0xffffffff; + qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); + temp = (word[i * scale] >> 32) & 0xffffffff; + qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); + temp = word[i*scale + 1] & 0xffffffff; + qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, + temp); + temp = (word[i*scale + 1] >> 32) & 0xffffffff; + qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, + temp); + + temp = MIU_TA_CTL_WRITE_ENABLE; + qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); + temp = MIU_TA_CTL_WRITE_START; + qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); + if ((temp & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + ql4_printk(KERN_ERR, ha, + "%s: failed to read through agent\n", + __func__); + ret = -1; + break; + } + } + + return ret; +} + +static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val) +{ + u32 val = 0; + int retries = 60; + + if (!pegtune_val) { + do { + val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE); + if ((val == PHAN_INITIALIZE_COMPLETE) || + (val == PHAN_INITIALIZE_ACK)) + return 0; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(500); + + } while (--retries); + + if (!retries) { + pegtune_val = qla4_82xx_rd_32(ha, + QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); + printk(KERN_WARNING "%s: init failed, " + "pegtune_val = %x\n", __func__, pegtune_val); + return -1; + } + } + return 0; +} + +static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha) +{ + uint32_t state = 0; + int loops = 0; + + /* Window 1 call */ + read_lock(&ha->hw_lock); + state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE); + read_unlock(&ha->hw_lock); + + while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) { + udelay(100); + /* Window 1 call */ + read_lock(&ha->hw_lock); + state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE); + read_unlock(&ha->hw_lock); + + loops++; + } + + if (loops >= 30000) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Receive Peg initialization not complete: 0x%x.\n", state)); + return QLA_ERROR; + } + + return QLA_SUCCESS; +} + +void +qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) +{ + uint32_t drv_active; + + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + + /* + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP8022, drv_active has 4 bits per function + */ + if (is_qla8032(ha) || is_qla8042(ha)) + drv_active |= (1 << ha->func_num); + else + drv_active |= (1 << (ha->func_num * 4)); + + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", + __func__, ha->host_no, drv_active); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active); +} + +void +qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha) +{ + uint32_t drv_active; + + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + + /* + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP8022, drv_active has 4 bits per function + */ + if (is_qla8032(ha) || is_qla8042(ha)) + drv_active &= ~(1 << (ha->func_num)); + else + drv_active &= ~(1 << (ha->func_num * 4)); + + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", + __func__, ha->host_no, drv_active); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active); +} + +inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha) +{ + uint32_t drv_state, drv_active; + int rval; + + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); + + /* + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP8022, drv_active has 4 bits per function + */ + if (is_qla8032(ha) || is_qla8042(ha)) + rval = drv_state & (1 << ha->func_num); + else + rval = drv_state & (1 << (ha->func_num * 4)); + + if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active) + rval = 1; + + return rval; +} + +void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha) +{ + uint32_t drv_state; + + drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); + + /* + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP8022, drv_active has 4 bits per function + */ + if (is_qla8032(ha) || is_qla8042(ha)) + drv_state |= (1 << ha->func_num); + else + drv_state |= (1 << (ha->func_num * 4)); + + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", + __func__, ha->host_no, drv_state); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state); +} + +void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha) +{ + uint32_t drv_state; + + drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); + + /* + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP8022, drv_active has 4 bits per function + */ + if (is_qla8032(ha) || is_qla8042(ha)) + drv_state &= ~(1 << ha->func_num); + else + drv_state &= ~(1 << (ha->func_num * 4)); + + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", + __func__, ha->host_no, drv_state); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state); +} + +static inline void +qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha) +{ + uint32_t qsnt_state; + + qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); + + /* + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP8022, drv_active has 4 bits per function. + */ + if (is_qla8032(ha) || is_qla8042(ha)) + qsnt_state |= (1 << ha->func_num); + else + qsnt_state |= (2 << (ha->func_num * 4)); + + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state); +} + + +static int +qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) +{ + uint16_t lnk; + + /* scrub dma mask expansion register */ + qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); + + /* Overwrite stale initialization register values */ + qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); + qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); + qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); + qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); + + if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) { + printk("%s: Error trying to start fw!\n", __func__); + return QLA_ERROR; + } + + /* Handshake with the card before we register the devices. */ + if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) { + printk("%s: Error during card handshake!\n", __func__); + return QLA_ERROR; + } + + /* Negotiated Link width */ + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); + ha->link_width = (lnk >> 4) & 0x3f; + + /* Synchronize with Receive peg */ + return qla4_82xx_rcvpeg_ready(ha); +} + +int qla4_82xx_try_start_fw(struct scsi_qla_host *ha) +{ + int rval; + + /* + * FW Load priority: + * 1) Operational firmware residing in flash. + * 2) Fail + */ + + ql4_printk(KERN_INFO, ha, + "FW: Retrieving flash offsets from FLT/FDT ...\n"); + rval = qla4_8xxx_get_flash_info(ha); + if (rval != QLA_SUCCESS) + return rval; + + ql4_printk(KERN_INFO, ha, + "FW: Attempting to load firmware from flash...\n"); + rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw); + + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash" + " FAILED...\n"); + return rval; + } + + return rval; +} + +void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha) +{ + if (qla4_82xx_rom_lock(ha)) { + /* Someone else is holding the lock. */ + dev_info(&ha->pdev->dev, "Resetting rom_lock\n"); + } + + /* + * Either we got the lock, or someone + * else died while holding it. + * In either case, unlock. + */ + qla4_82xx_rom_unlock(ha); +} + +static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha, + uint32_t addr1, uint32_t mask) +{ + unsigned long timeout; + uint32_t rval = QLA_SUCCESS; + uint32_t temp; + + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + + if (time_after_eq(jiffies, timeout)) { + ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n"); + return QLA_ERROR; + } + } while (1); + + return rval; +} + +static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1, + uint32_t addr3, uint32_t mask, uint32_t addr, + uint32_t *data_ptr) +{ + int rval = QLA_SUCCESS; + uint32_t temp; + uint32_t data; + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_rd_reg; + + temp = (0x40000000 | addr); + ha->isp_ops->wr_reg_indirect(ha, addr1, temp); + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_rd_reg; + + ha->isp_ops->rd_reg_indirect(ha, addr3, &data); + *data_ptr = data; + +exit_ipmdio_rd_reg: + return rval; +} + + +static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha, + uint32_t addr1, + uint32_t addr2, + uint32_t addr3, + uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + uint32_t rval = QLA_SUCCESS; + + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp); + if ((temp & 0x1) != 1) + break; + if (time_after_eq(jiffies, timeout)) { + ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n"); + return QLA_ERROR; + } + } while (1); + + return rval; +} + +static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha, + uint32_t addr1, uint32_t addr3, + uint32_t mask, uint32_t addr, + uint32_t value) +{ + int rval = QLA_SUCCESS; + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_wr_reg; + + ha->isp_ops->wr_reg_indirect(ha, addr3, value); + ha->isp_ops->wr_reg_indirect(ha, addr1, addr); + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_wr_reg; + +exit_ipmdio_wr_reg: + return rval; +} + +static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8xxx_minidump_entry_crb *crb_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); + *data_ptr++ = cpu_to_le32(r_addr); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha) +{ + int rval = QLA_SUCCESS; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); + + /* Read the pex-dma's command-status-and-control register. */ + rval = ha->isp_ops->rd_reg_indirect(ha, + (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + + if (rval) + return QLA_ERROR; + + /* Check if requested pex-dma engine is available. */ + if (cmd_sts_and_cntrl & BIT_31) + return QLA_SUCCESS; + else + return QLA_ERROR; +} + +static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha, + struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr) +{ + int rval = QLA_SUCCESS, wait = 0; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW, + m_hdr->desc_card_addr); + if (rval) + goto error_exit; + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0); + if (rval) + goto error_exit; + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL, + m_hdr->start_dma_cmd); + if (rval) + goto error_exit; + + /* Wait for dma operation to complete. */ + for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) { + rval = ha->isp_ops->rd_reg_indirect(ha, + (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + goto error_exit; + + if ((cmd_sts_and_cntrl & BIT_1) == 0) + break; + else + udelay(10); + } + + /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ + if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) { + rval = QLA_ERROR; + goto error_exit; + } + +error_exit: + return rval; +} + +static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int rval = QLA_SUCCESS; + struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL; + uint32_t size, read_size; + uint8_t *data_ptr = (uint8_t *)*d_ptr; + void *rdmem_buffer = NULL; + dma_addr_t rdmem_dma; + struct qla4_83xx_pex_dma_descriptor dma_desc; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + + rval = qla4_83xx_check_dma_engine_state(ha); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: DMA engine not available. Fallback to rdmem-read.\n", + __func__)); + return QLA_ERROR; + } + + m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr; + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, + QLA83XX_PEX_DMA_READ_SIZE, + &rdmem_dma, GFP_KERNEL); + if (!rdmem_buffer) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Unable to allocate rdmem dma buffer\n", + __func__)); + return QLA_ERROR; + } + + /* Prepare pex-dma descriptor to be written to MS memory. */ + /* dma-desc-cmd layout: + * 0-3: dma-desc-cmd 0-3 + * 4-7: pcid function number + * 8-15: dma-desc-cmd 8-15 + */ + dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); + dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); + dma_desc.dma_bus_addr = rdmem_dma; + + size = 0; + read_size = 0; + /* + * Perform rdmem operation using pex-dma. + * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE. + */ + while (read_size < m_hdr->read_data_size) { + if (m_hdr->read_data_size - read_size >= + QLA83XX_PEX_DMA_READ_SIZE) + size = QLA83XX_PEX_DMA_READ_SIZE; + else { + size = (m_hdr->read_data_size - read_size); + + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, + QLA83XX_PEX_DMA_READ_SIZE, + rdmem_buffer, rdmem_dma); + + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size, + &rdmem_dma, + GFP_KERNEL); + if (!rdmem_buffer) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Unable to allocate rdmem dma buffer\n", + __func__)); + return QLA_ERROR; + } + dma_desc.dma_bus_addr = rdmem_dma; + } + + dma_desc.src_addr = m_hdr->read_addr + read_size; + dma_desc.cmd.read_data_size = size; + + /* Prepare: Write pex-dma descriptor to MS memory. */ + rval = qla4_8xxx_ms_mem_write_128b(ha, + (uint64_t)m_hdr->desc_card_addr, + (uint32_t *)&dma_desc, + (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, + "%s: Error writing rdmem-dma-init to MS !!!\n", + __func__); + goto error_exit; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n", + __func__, size)); + /* Execute: Start pex-dma operation. */ + rval = qla4_83xx_start_pex_dma(ha, m_hdr); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi(%ld): start-pex-dma failed rval=0x%x\n", + ha->host_no, rval)); + goto error_exit; + } + + memcpy(data_ptr, rdmem_buffer, size); + data_ptr += size; + read_size += size; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__)); + + *d_ptr = (uint32_t *)data_ptr; + +error_exit: + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer, + rdmem_dma); + + return rval; +} + +static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla8xxx_minidump_entry_cache *cache_hdr; + int rval = QLA_ERROR; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr; + + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value); + + if (c_value_w) + ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + ha->isp_ops->rd_reg_indirect(ha, c_addr, + &c_value_r); + if ((c_value_r & p_mask) == 0) { + break; + } else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + ha->isp_ops->rd_reg_indirect(ha, addr, &r_value); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr) +{ + struct qla8xxx_minidump_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS; + uint32_t crb_addr; + unsigned long wtime; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; + int i; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr; + + crb_addr = crb_entry->addr; + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + if (opcode & QLA8XXX_DBG_OPCODE_WR) { + ha->isp_ops->wr_reg_indirect(ha, crb_addr, + crb_entry->value_1); + opcode &= ~QLA8XXX_DBG_OPCODE_WR; + } + if (opcode & QLA8XXX_DBG_OPCODE_RW) { + ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); + ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value); + opcode &= ~QLA8XXX_DBG_OPCODE_RW; + } + if (opcode & QLA8XXX_DBG_OPCODE_AND) { + ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); + read_value &= crb_entry->value_2; + opcode &= ~QLA8XXX_DBG_OPCODE_AND; + if (opcode & QLA8XXX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA8XXX_DBG_OPCODE_OR; + } + ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value); + } + if (opcode & QLA8XXX_DBG_OPCODE_OR) { + ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); + read_value |= crb_entry->value_3; + ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value); + opcode &= ~QLA8XXX_DBG_OPCODE_OR; + } + if (opcode & QLA8XXX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); + + do { + if ((read_value & crb_entry->value_2) == + crb_entry->value_1) { + break; + } else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_ERROR; + break; + } else { + ha->isp_ops->rd_reg_indirect(ha, + crb_addr, &read_value); + } + } while (1); + opcode &= ~QLA8XXX_DBG_OPCODE_POLL; + } + + if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + ha->isp_ops->rd_reg_indirect(ha, addr, &read_value); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE; + } + + if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else { + read_value = crb_entry->value_1; + } + + ha->isp_ops->wr_reg_indirect(ha, addr, read_value); + opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE; + } + + if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__)); + return rval; +} + +static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8xxx_minidump_entry_rdocm *ocm_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, r_stride, loop_cnt)); + + for (i = 0; i < loop_cnt; i++) { + r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n", + __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)))); + *d_ptr = data_ptr; +} + +static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla8xxx_minidump_entry_mux *mux_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value); + ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); + *data_ptr++ = cpu_to_le32(s_value); + *data_ptr++ = cpu_to_le32(r_value); + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla8xxx_minidump_entry_cache *cache_hdr; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value); + ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + ha->isp_ops->rd_reg_indirect(ha, addr, &r_value); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla8xxx_minidump_entry_queue *q_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + ha->isp_ops->wr_reg_indirect(ha, s_addr, qid); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +#define MD_DIRECT_ROM_WINDOW 0x42110030 +#define MD_DIRECT_ROM_READ_BASE 0x42150000 + +static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_value; + uint32_t i, loop_cnt; + struct qla8xxx_minidump_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr; + r_addr = rom_hdr->read_addr; + loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n", + __func__, r_addr, loop_cnt)); + + for (i = 0; i < loop_cnt; i++) { + ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW, + (r_addr & 0xFFFF0000)); + ha->isp_ops->rd_reg_indirect(ha, + MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF), + &r_value); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += sizeof(uint32_t); + } + *d_ptr = data_ptr; +} + +#define MD_MIU_TEST_AGT_CTRL 0x41000090 +#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 +#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 + +static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla8xxx_minidump_entry_rdmem *m_hdr; + unsigned long flags; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size)); + + if (r_addr & 0xf) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: Read addr 0x%x not 16 bytes aligned\n", + __func__, r_addr)); + return QLA_ERROR; + } + + if (m_hdr->read_data_size % 16) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: Read data[0x%x] not multiple of 16 bytes\n", + __func__, m_hdr->read_data_size)); + return QLA_ERROR; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt)); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, + r_addr); + r_value = 0; + ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, + r_value); + r_value = MIU_TA_CTL_ENABLE; + ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value); + r_value = MIU_TA_CTL_START_ENABLE; + ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, + &r_value); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR + "%s: failed to read through agent\n", + __func__); + write_unlock_irqrestore(&ha->hw_lock, flags); + return QLA_SUCCESS; + } + + for (j = 0; j < 4; j++) { + ha->isp_ops->rd_reg_indirect(ha, + MD_MIU_TEST_AGT_RDDATA[j], + &r_data); + *data_ptr++ = cpu_to_le32(r_data); + } + + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n", + __func__, (loop_cnt * 16))); + + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t *data_ptr = *d_ptr; + int rval = QLA_SUCCESS; + + rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, + &data_ptr); + *d_ptr = data_ptr; + return rval; +} + +static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG; + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", + ha->host_no, index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask)); + /* If driver encounters a new entry type that it cannot process, + * it should just skip the entry and adjust the total buffer size by + * from subtracting the skipped bytes from it + */ + ha->fw_dump_skip_size += entry_hdr->entry_capture_size; +} + +/* ISP83xx functions to process new minidump entries... */ +static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask; + uint16_t s_stride, i; + uint32_t *data_ptr = *d_ptr; + uint32_t rval = QLA_SUCCESS; + struct qla83xx_minidump_entry_pollrd *pollrd_hdr; + + pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr; + s_addr = le32_to_cpu(pollrd_hdr->select_addr); + r_addr = le32_to_cpu(pollrd_hdr->read_addr); + s_value = le32_to_cpu(pollrd_hdr->select_value); + s_stride = le32_to_cpu(pollrd_hdr->select_value_stride); + + poll_wait = le32_to_cpu(pollrd_hdr->poll_wait); + poll_mask = le32_to_cpu(pollrd_hdr->poll_mask); + + for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) { + ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value); + poll_wait = le32_to_cpu(pollrd_hdr->poll_wait); + while (1) { + ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + msleep(1); + if (--poll_wait == 0) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", + __func__); + rval = QLA_ERROR; + goto exit_process_pollrd; + } + } + } + ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); + *data_ptr++ = cpu_to_le32(s_value); + *data_ptr++ = cpu_to_le32(r_value); + s_value += s_stride; + } + + *d_ptr = data_ptr; + +exit_process_pollrd: + return rval; +} + +static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int loop_cnt; + uint32_t addr1, addr2, value, data, temp, wrval; + uint8_t stride, stride2; + uint16_t count; + uint32_t poll, mask, modify_mask; + uint32_t wait_count = 0; + uint32_t *data_ptr = *d_ptr; + struct qla8044_minidump_entry_rddfe *rddfe; + uint32_t rval = QLA_SUCCESS; + + rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; + addr1 = le32_to_cpu(rddfe->addr_1); + value = le32_to_cpu(rddfe->value); + stride = le32_to_cpu(rddfe->stride); + stride2 = le32_to_cpu(rddfe->stride2); + count = le32_to_cpu(rddfe->count); + + poll = le32_to_cpu(rddfe->poll); + mask = le32_to_cpu(rddfe->mask); + modify_mask = le32_to_cpu(rddfe->modify_mask); + + addr2 = addr1 + stride; + + for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { + ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value)); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } else { + ha->isp_ops->rd_reg_indirect(ha, addr2, &temp); + temp = temp & modify_mask; + temp = (temp | ((loop_cnt << 16) | loop_cnt)); + wrval = ((temp << 16) | temp); + + ha->isp_ops->wr_reg_indirect(ha, addr2, wrval); + ha->isp_ops->wr_reg_indirect(ha, addr1, value); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", + __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } + + ha->isp_ops->wr_reg_indirect(ha, addr1, + ((0x40000000 | value) + + stride2)); + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", + __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } + + ha->isp_ops->rd_reg_indirect(ha, addr2, &data); + + *data_ptr++ = cpu_to_le32(wrval); + *data_ptr++ = cpu_to_le32(data); + } + } + + *d_ptr = data_ptr; +exit_process_rddfe: + return rval; +} + +static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int rval = QLA_SUCCESS; + uint32_t addr1, addr2, value1, value2, data, selval; + uint8_t stride1, stride2; + uint32_t addr3, addr4, addr5, addr6, addr7; + uint16_t count, loop_cnt; + uint32_t mask; + uint32_t *data_ptr = *d_ptr; + struct qla8044_minidump_entry_rdmdio *rdmdio; + + rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; + addr1 = le32_to_cpu(rdmdio->addr_1); + addr2 = le32_to_cpu(rdmdio->addr_2); + value1 = le32_to_cpu(rdmdio->value_1); + stride1 = le32_to_cpu(rdmdio->stride_1); + stride2 = le32_to_cpu(rdmdio->stride_2); + count = le32_to_cpu(rdmdio->count); + + mask = le32_to_cpu(rdmdio->mask); + value2 = le32_to_cpu(rdmdio->value_2); + + addr3 = addr1 + stride1; + + for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { + rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, + addr3, mask); + if (rval) + goto exit_process_rdmdio; + + addr4 = addr2 - stride1; + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4, + value2); + if (rval) + goto exit_process_rdmdio; + + addr5 = addr2 - (2 * stride1); + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5, + value1); + if (rval) + goto exit_process_rdmdio; + + addr6 = addr2 - (3 * stride1); + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, + addr6, 0x2); + if (rval) + goto exit_process_rdmdio; + + rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, + addr3, mask); + if (rval) + goto exit_process_rdmdio; + + addr7 = addr2 - (4 * stride1); + rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, + mask, addr7, &data); + if (rval) + goto exit_process_rdmdio; + + selval = (value2 << 18) | (value1 << 2) | 2; + + stride2 = le32_to_cpu(rdmdio->stride_2); + *data_ptr++ = cpu_to_le32(selval); + *data_ptr++ = cpu_to_le32(data); + + value1 = value1 + stride2; + *d_ptr = data_ptr; + } + +exit_process_rdmdio: + return rval; +} + +static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr1, addr2, value1, value2, poll, r_value; + struct qla8044_minidump_entry_pollwr *pollwr_hdr; + uint32_t wait_count = 0; + uint32_t rval = QLA_SUCCESS; + + pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; + addr1 = le32_to_cpu(pollwr_hdr->addr_1); + addr2 = le32_to_cpu(pollwr_hdr->addr_2); + value1 = le32_to_cpu(pollwr_hdr->value_1); + value2 = le32_to_cpu(pollwr_hdr->value_2); + + poll = le32_to_cpu(pollwr_hdr->poll); + + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); + rval = QLA_ERROR; + goto exit_process_pollwr; + } + + ha->isp_ops->wr_reg_indirect(ha, addr2, value2); + ha->isp_ops->wr_reg_indirect(ha, addr1, value1); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + +exit_process_pollwr: + return rval; +} + +static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t sel_val1, sel_val2, t_sel_val, data, i; + uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr; + struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr; + uint32_t *data_ptr = *d_ptr; + + rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr; + sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1); + sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2); + sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1); + sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2); + sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask); + read_addr = le32_to_cpu(rdmux2_hdr->read_addr); + + for (i = 0; i < rdmux2_hdr->op_count; i++) { + ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1); + t_sel_val = sel_val1 & sel_val_mask; + *data_ptr++ = cpu_to_le32(t_sel_val); + + ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val); + ha->isp_ops->rd_reg_indirect(ha, read_addr, &data); + + *data_ptr++ = cpu_to_le32(data); + + ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2); + t_sel_val = sel_val2 & sel_val_mask; + *data_ptr++ = cpu_to_le32(t_sel_val); + + ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val); + ha->isp_ops->rd_reg_indirect(ha, read_addr, &data); + + *data_ptr++ = cpu_to_le32(data); + + sel_val1 += rdmux2_hdr->select_value_stride; + sel_val2 += rdmux2_hdr->select_value_stride; + } + + *d_ptr = data_ptr; +} + +static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t poll_wait, poll_mask, r_value, data; + uint32_t addr_1, addr_2, value_1, value_2; + uint32_t *data_ptr = *d_ptr; + uint32_t rval = QLA_SUCCESS; + struct qla83xx_minidump_entry_pollrdmwr *poll_hdr; + + poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr; + addr_1 = le32_to_cpu(poll_hdr->addr_1); + addr_2 = le32_to_cpu(poll_hdr->addr_2); + value_1 = le32_to_cpu(poll_hdr->value_1); + value_2 = le32_to_cpu(poll_hdr->value_2); + poll_mask = le32_to_cpu(poll_hdr->poll_mask); + + ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1); + + poll_wait = le32_to_cpu(poll_hdr->poll_wait); + while (1) { + ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + msleep(1); + if (--poll_wait == 0) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n", + __func__); + rval = QLA_ERROR; + goto exit_process_pollrdmwr; + } + } + } + + ha->isp_ops->rd_reg_indirect(ha, addr_2, &data); + data &= le32_to_cpu(poll_hdr->modify_mask); + ha->isp_ops->wr_reg_indirect(ha, addr_2, data); + ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2); + + poll_wait = le32_to_cpu(poll_hdr->poll_wait); + while (1) { + ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + msleep(1); + if (--poll_wait == 0) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n", + __func__); + rval = QLA_ERROR; + goto exit_process_pollrdmwr; + } + } + } + + *data_ptr++ = cpu_to_le32(addr_2); + *data_ptr++ = cpu_to_le32(data); + *d_ptr = data_ptr; + +exit_process_pollrdmwr: + return rval; +} + +static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t fl_addr, u32_count, rval; + struct qla8xxx_minidump_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr; + fl_addr = le32_to_cpu(rom_hdr->read_addr); + u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t); + + DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n", + __func__, fl_addr, u32_count)); + + rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr, + (u8 *)(data_ptr), u32_count); + + if (rval == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n", + __func__, u32_count); + goto exit_process_rdrom; + } + + data_ptr += u32_count; + *d_ptr = data_ptr; + +exit_process_rdrom: + return rval; +} + +/** + * qla4_8xxx_collect_md_data - Retrieve firmware minidump data. + * @ha: pointer to adapter structure + **/ +static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) +{ + int num_entry_hdr = 0; + struct qla8xxx_minidump_entry_hdr *entry_hdr; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; + uint32_t *data_ptr; + uint32_t data_collected = 0; + int i, rval = QLA_ERROR; + uint64_t now; + uint32_t timestamp; + + ha->fw_dump_skip_size = 0; + if (!ha->fw_dump) { + ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n", + __func__, ha->host_no); + return rval; + } + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump + + ha->fw_dump_tmplt_size); + data_collected += ha->fw_dump_tmplt_size; + + num_entry_hdr = tmplt_hdr->num_of_entries; + ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n", + __func__, data_ptr); + ql4_printk(KERN_INFO, ha, + "[%s]: no of entry headers in Template: 0x%x\n", + __func__, num_entry_hdr); + ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n", + __func__, ha->fw_dump_capture_mask); + ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n", + __func__, ha->fw_dump_size, ha->fw_dump_size); + + /* Update current timestamp before taking dump */ + now = get_jiffies_64(); + timestamp = (u32)(jiffies_to_msecs(now) / 1000); + tmplt_hdr->driver_timestamp = timestamp; + + entry_hdr = (struct qla8xxx_minidump_entry_hdr *) + (((uint8_t *)ha->fw_dump_tmplt_hdr) + + tmplt_hdr->first_entry_offset); + + if (is_qla8032(ha) || is_qla8042(ha)) + tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] = + tmplt_hdr->ocm_window_reg[ha->func_num]; + + /* Walk through the entry headers - validate/perform required action */ + for (i = 0; i < num_entry_hdr; i++) { + if (data_collected > ha->fw_dump_size) { + ql4_printk(KERN_INFO, ha, + "Data collected: [0x%x], Total Dump size: [0x%x]\n", + data_collected, ha->fw_dump_size); + return rval; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ha->fw_dump_capture_mask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA8XXX_DBG_SKIPPED_FLAG; + goto skip_nxt_entry; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, + (ha->fw_dump_size - data_collected))); + + /* Decode the entry type and take required action to capture + * debug data + */ + switch (entry_hdr->entry_type) { + case QLA8XXX_RDEND: + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8XXX_CNTRL: + rval = qla4_8xxx_minidump_process_control(ha, + entry_hdr); + if (rval != QLA_SUCCESS) { + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + goto md_failed; + } + break; + case QLA8XXX_RDCRB: + qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr, + &data_ptr); + break; + case QLA8XXX_RDMEM: + rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) { + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + goto md_failed; + } + break; + case QLA8XXX_BOARD: + case QLA8XXX_RDROM: + if (is_qla8022(ha)) { + qla4_82xx_minidump_process_rdrom(ha, entry_hdr, + &data_ptr); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + rval = qla4_83xx_minidump_process_rdrom(ha, + entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, + entry_hdr, + i); + } + break; + case QLA8XXX_L2DTG: + case QLA8XXX_L2ITG: + case QLA8XXX_L2DAT: + case QLA8XXX_L2INS: + rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) { + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + goto md_failed; + } + break; + case QLA8XXX_L1DTG: + case QLA8XXX_L1ITG: + case QLA8XXX_L1DAT: + case QLA8XXX_L1INS: + qla4_8xxx_minidump_process_l1cache(ha, entry_hdr, + &data_ptr); + break; + case QLA8XXX_RDOCM: + qla4_8xxx_minidump_process_rdocm(ha, entry_hdr, + &data_ptr); + break; + case QLA8XXX_RDMUX: + qla4_8xxx_minidump_process_rdmux(ha, entry_hdr, + &data_ptr); + break; + case QLA8XXX_QUEUE: + qla4_8xxx_minidump_process_queue(ha, entry_hdr, + &data_ptr); + break; + case QLA83XX_POLLRD: + if (is_qla8022(ha)) { + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + } + rval = qla83xx_minidump_process_pollrd(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA83XX_RDMUX2: + if (is_qla8022(ha)) { + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + } + qla83xx_minidump_process_rdmux2(ha, entry_hdr, + &data_ptr); + break; + case QLA83XX_POLLRDMWR: + if (is_qla8022(ha)) { + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + } + rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8044_RDDFE: + rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8044_RDMDIO: + rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8044_POLLWR: + rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8XXX_RDNOP: + default: + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + } + + data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump; +skip_nxt_entry: + /* next entry in the template */ + entry_hdr = (struct qla8xxx_minidump_entry_hdr *) + (((uint8_t *)entry_hdr) + + entry_hdr->entry_size); + } + + if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) { + ql4_printk(KERN_INFO, ha, + "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", + data_collected, ha->fw_dump_size); + rval = QLA_ERROR; + goto md_failed; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n", + __func__, i)); +md_failed: + return rval; +} + +/** + * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready. + * @ha: pointer to adapter structure + * @code: uevent code to act upon + **/ +static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code) +{ + char event_string[40]; + char *envp[] = { event_string, NULL }; + + switch (code) { + case QL4_UEVENT_CODE_FW_DUMP: + snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", + ha->host_no); + break; + default: + /*do nothing*/ + break; + } + + kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp); +} + +void qla4_8xxx_get_minidump(struct scsi_qla_host *ha) +{ + if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) && + !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) { + if (!qla4_8xxx_collect_md_data(ha)) { + qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP); + set_bit(AF_82XX_FW_DUMPED, &ha->flags); + } else { + ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n", + __func__); + } + } +} + +/** + * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) +{ + int rval = QLA_ERROR; + int i; + uint32_t old_count, count; + int need_reset = 0; + + need_reset = ha->isp_ops->need_reset(ha); + + if (need_reset) { + /* We are trying to perform a recovery here. */ + if (test_bit(AF_FW_RECOVERY, &ha->flags)) + ha->isp_ops->rom_lock_recovery(ha); + } else { + old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); + for (i = 0; i < 10; i++) { + msleep(200); + count = qla4_8xxx_rd_direct(ha, + QLA8XXX_PEG_ALIVE_COUNTER); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } + } + ha->isp_ops->rom_lock_recovery(ha); + } + + /* set to DEV_INITIALIZING */ + ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_INITIALIZING); + + ha->isp_ops->idc_unlock(ha); + + if (is_qla8022(ha)) + qla4_8xxx_get_minidump(ha); + + rval = ha->isp_ops->restart_firmware(ha); + ha->isp_ops->idc_lock(ha); + + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); + qla4_8xxx_clear_drv_active(ha); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + return rval; + } + +dev_ready: + ql4_printk(KERN_INFO, ha, "HW State: READY\n"); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY); + + return rval; +} + +/** + * qla4_82xx_need_reset_handler - Code to start reset sequence + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +static void +qla4_82xx_need_reset_handler(struct scsi_qla_host *ha) +{ + uint32_t dev_state, drv_state, drv_active; + uint32_t active_mask = 0xFFFFFFFF; + unsigned long reset_timeout; + + ql4_printk(KERN_INFO, ha, + "Performing ISP error recovery\n"); + + if (test_and_clear_bit(AF_ONLINE, &ha->flags)) { + qla4_82xx_idc_unlock(ha); + ha->isp_ops->disable_intrs(ha); + qla4_82xx_idc_lock(ha); + } + + if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s(%ld): reset acknowledged\n", + __func__, ha->host_no)); + qla4_8xxx_set_rst_ready(ha); + } else { + active_mask = (~(1 << (ha->func_num * 4))); + } + + /* wait for 10 seconds for reset ack from all functions */ + reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); + + drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + + ql4_printk(KERN_INFO, ha, + "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", + __func__, ha->host_no, drv_state, drv_active); + + while (drv_state != (drv_active & active_mask)) { + if (time_after_eq(jiffies, reset_timeout)) { + ql4_printk(KERN_INFO, ha, + "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", + DRIVER_NAME, drv_state, drv_active); + break; + } + + /* + * When reset_owner times out, check which functions + * acked/did not ack + */ + if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { + ql4_printk(KERN_INFO, ha, + "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", + __func__, ha->host_no, drv_state, + drv_active); + } + qla4_82xx_idc_unlock(ha); + msleep(1000); + qla4_82xx_idc_lock(ha); + + drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + } + + /* Clear RESET OWNER as we are not going to use it any further */ + clear_bit(AF_8XXX_RST_OWNER, &ha->flags); + + dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state, + dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + + /* Force to DEV_COLD unless someone else is starting a reset */ + if (dev_state != QLA8XXX_DEV_INITIALIZING) { + ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); + qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); + qla4_8xxx_set_rst_ready(ha); + } +} + +/** + * qla4_8xxx_need_qsnt_handler - Code to start qsnt + * @ha: pointer to adapter structure + **/ +void +qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha) +{ + ha->isp_ops->idc_lock(ha); + qla4_8xxx_set_qsnt_ready(ha); + ha->isp_ops->idc_unlock(ha); +} + +static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha) +{ + int idc_ver; + uint32_t drv_active; + + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + if (drv_active == (1 << (ha->func_num * 4))) { + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, + QLA82XX_IDC_VERSION); + ql4_printk(KERN_INFO, ha, + "%s: IDC version updated to %d\n", __func__, + QLA82XX_IDC_VERSION); + } else { + idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION); + if (QLA82XX_IDC_VERSION != idc_ver) { + ql4_printk(KERN_INFO, ha, + "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", + __func__, QLA82XX_IDC_VERSION, idc_ver); + } + } +} + +static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha) +{ + int idc_ver; + uint32_t drv_active; + int rval = QLA_SUCCESS; + + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + if (drv_active == (1 << ha->func_num)) { + idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION); + idc_ver &= (~0xFF); + idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE; + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver); + ql4_printk(KERN_INFO, ha, + "%s: IDC version updated to %d\n", __func__, + idc_ver); + } else { + idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION); + idc_ver &= 0xFF; + if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) { + ql4_printk(KERN_INFO, ha, + "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", + __func__, QLA83XX_IDC_VER_MAJ_VALUE, + idc_ver); + rval = QLA_ERROR; + goto exit_set_idc_ver; + } + } + + /* Update IDC_MINOR_VERSION */ + idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR); + idc_ver &= ~(0x03 << (ha->func_num * 2)); + idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2)); + qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver); + +exit_set_idc_ver: + return rval; +} + +int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha) +{ + uint32_t drv_active; + int rval = QLA_SUCCESS; + + if (test_bit(AF_INIT_DONE, &ha->flags)) + goto exit_update_idc_reg; + + ha->isp_ops->idc_lock(ha); + qla4_8xxx_set_drv_active(ha); + + /* + * If we are the first driver to load and + * ql4xdontresethba is not set, clear IDC_CTRL BIT0. + */ + if (is_qla8032(ha) || is_qla8042(ha)) { + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba) + qla4_83xx_clear_idc_dontreset(ha); + } + + if (is_qla8022(ha)) { + qla4_82xx_set_idc_ver(ha); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + rval = qla4_83xx_set_idc_ver(ha); + if (rval == QLA_ERROR) + qla4_8xxx_clear_drv_active(ha); + } + + ha->isp_ops->idc_unlock(ha); + +exit_update_idc_reg: + return rval; +} + +/** + * qla4_8xxx_device_state_handler - Adapter state machine + * @ha: pointer to host adapter structure. + * + * Note: IDC lock must be UNLOCKED upon entry + **/ +int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha) +{ + uint32_t dev_state; + int rval = QLA_SUCCESS; + unsigned long dev_init_timeout; + + rval = qla4_8xxx_update_idc_reg(ha); + if (rval == QLA_ERROR) + goto exit_state_handler; + + dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); + DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown")); + + /* wait for 30 seconds for device to go ready */ + dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); + + ha->isp_ops->idc_lock(ha); + while (1) { + + if (time_after_eq(jiffies, dev_init_timeout)) { + ql4_printk(KERN_WARNING, ha, + "%s: Device Init Failed 0x%x = %s\n", + DRIVER_NAME, + dev_state, dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown"); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + } + + dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); + ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown"); + + /* NOTE: Make sure idc unlocked upon exit of switch statement */ + switch (dev_state) { + case QLA8XXX_DEV_READY: + goto exit; + case QLA8XXX_DEV_COLD: + rval = qla4_8xxx_device_bootstrap(ha); + goto exit; + case QLA8XXX_DEV_INITIALIZING: + ha->isp_ops->idc_unlock(ha); + msleep(1000); + ha->isp_ops->idc_lock(ha); + break; + case QLA8XXX_DEV_NEED_RESET: + /* + * For ISP8324 and ISP8042, if NEED_RESET is set by any + * driver, it should be honored, irrespective of + * IDC_CTRL DONTRESET_BIT0 + */ + if (is_qla8032(ha) || is_qla8042(ha)) { + qla4_83xx_need_reset_handler(ha); + } else if (is_qla8022(ha)) { + if (!ql4xdontresethba) { + qla4_82xx_need_reset_handler(ha); + /* Update timeout value after need + * reset handler */ + dev_init_timeout = jiffies + + (ha->nx_dev_init_timeout * HZ); + } else { + ha->isp_ops->idc_unlock(ha); + msleep(1000); + ha->isp_ops->idc_lock(ha); + } + } + break; + case QLA8XXX_DEV_NEED_QUIESCENT: + /* idc locked/unlocked in handler */ + qla4_8xxx_need_qsnt_handler(ha); + break; + case QLA8XXX_DEV_QUIESCENT: + ha->isp_ops->idc_unlock(ha); + msleep(1000); + ha->isp_ops->idc_lock(ha); + break; + case QLA8XXX_DEV_FAILED: + ha->isp_ops->idc_unlock(ha); + qla4xxx_dead_adapter_cleanup(ha); + rval = QLA_ERROR; + ha->isp_ops->idc_lock(ha); + goto exit; + default: + ha->isp_ops->idc_unlock(ha); + qla4xxx_dead_adapter_cleanup(ha); + rval = QLA_ERROR; + ha->isp_ops->idc_lock(ha); + goto exit; + } + } +exit: + ha->isp_ops->idc_unlock(ha); +exit_state_handler: + return rval; +} + +int qla4_8xxx_load_risc(struct scsi_qla_host *ha) +{ + int retval; + + /* clear the interrupt */ + if (is_qla8032(ha) || is_qla8042(ha)) { + writel(0, &ha->qla4_83xx_reg->risc_intr); + readl(&ha->qla4_83xx_reg->risc_intr); + } else if (is_qla8022(ha)) { + writel(0, &ha->qla4_82xx_reg->host_int); + readl(&ha->qla4_82xx_reg->host_int); + } + + retval = qla4_8xxx_device_state_handler(ha); + + /* Initialize request and response queues. */ + if (retval == QLA_SUCCESS) + qla4xxx_init_rings(ha); + + if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) + retval = qla4xxx_request_irqs(ha); + + return retval; +} + +/*****************************************************************************/ +/* Flash Manipulation Routines */ +/*****************************************************************************/ + +#define OPTROM_BURST_SIZE 0x1000 +#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) + +#define FARX_DATA_FLAG BIT_31 +#define FARX_ACCESS_FLASH_CONF 0x7FFD0000 +#define FARX_ACCESS_FLASH_DATA 0x7FF00000 + +static inline uint32_t +flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr) +{ + return hw->flash_conf_off | faddr; +} + +static uint32_t * +qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr, + uint32_t faddr, uint32_t length) +{ + uint32_t i; + uint32_t val; + int loops = 0; + while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) { + udelay(100); + cond_resched(); + loops++; + } + if (loops >= 50000) { + ql4_printk(KERN_WARNING, ha, "ROM lock failed\n"); + return dwptr; + } + + /* Dword reads to flash. */ + for (i = 0; i < length/4; i++, faddr += 4) { + if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) { + ql4_printk(KERN_WARNING, ha, + "Do ROM fast read failed\n"); + goto done_read; + } + dwptr[i] = cpu_to_le32(val); + } + +done_read: + qla4_82xx_rom_unlock(ha); + return dwptr; +} + +/* + * Address and length are byte address + */ +static uint8_t * +qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length); + return buf; +} + +static int +qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start) +{ + const char *loc, *locations[] = { "DEF", "PCI" }; + + /* + * FLT-location structure resides after the last PCI region. + */ + + /* Begin with sane defaults. */ + loc = locations[0]; + *start = FA_FLASH_LAYOUT_ADDR_82; + + DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); + return QLA_SUCCESS; +} + +static void +qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr) +{ + const char *loc, *locations[] = { "DEF", "FLT" }; + uint16_t *wptr; + uint16_t cnt, chksum; + uint32_t start, status; + struct qla_flt_header *flt; + struct qla_flt_region *region; + struct ql82xx_hw_data *hw = &ha->hw; + + hw->flt_region_flt = flt_addr; + wptr = (uint16_t *)ha->request_ring; + flt = (struct qla_flt_header *)ha->request_ring; + region = (struct qla_flt_region *)&flt[1]; + + if (is_qla8022(ha)) { + qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, + flt_addr << 2, OPTROM_BURST_SIZE); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + status = qla4_83xx_flash_read_u32(ha, flt_addr << 2, + (uint8_t *)ha->request_ring, + 0x400); + if (status != QLA_SUCCESS) + goto no_flash_data; + } + + if (*wptr == cpu_to_le16(0xffff)) + goto no_flash_data; + if (flt->version != cpu_to_le16(1)) { + DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: " + "version=0x%x length=0x%x checksum=0x%x.\n", + le16_to_cpu(flt->version), le16_to_cpu(flt->length), + le16_to_cpu(flt->checksum))); + goto no_flash_data; + } + + cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1; + for (chksum = 0; cnt; cnt--) + chksum += le16_to_cpu(*wptr++); + if (chksum) { + DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FLT detected: " + "version=0x%x length=0x%x checksum=0x%x.\n", + le16_to_cpu(flt->version), le16_to_cpu(flt->length), + chksum)); + goto no_flash_data; + } + + loc = locations[1]; + cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); + for ( ; cnt; cnt--, region++) { + /* Store addresses as DWORD offsets. */ + start = le32_to_cpu(region->start) >> 2; + + DEBUG3(ql4_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " + "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, + le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); + + switch (le32_to_cpu(region->code) & 0xff) { + case FLT_REG_FDT: + hw->flt_region_fdt = start; + break; + case FLT_REG_BOOT_CODE_82: + hw->flt_region_boot = start; + break; + case FLT_REG_FW_82: + case FLT_REG_FW_82_1: + hw->flt_region_fw = start; + break; + case FLT_REG_BOOTLOAD_82: + hw->flt_region_bootload = start; + break; + case FLT_REG_ISCSI_PARAM: + hw->flt_iscsi_param = start; + break; + case FLT_REG_ISCSI_CHAP: + hw->flt_region_chap = start; + hw->flt_chap_size = le32_to_cpu(region->size); + break; + case FLT_REG_ISCSI_DDB: + hw->flt_region_ddb = start; + hw->flt_ddb_size = le32_to_cpu(region->size); + break; + } + } + goto done; + +no_flash_data: + /* Use hardcoded defaults. */ + loc = locations[0]; + + hw->flt_region_fdt = FA_FLASH_DESCR_ADDR_82; + hw->flt_region_boot = FA_BOOT_CODE_ADDR_82; + hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82; + hw->flt_region_fw = FA_RISC_CODE_ADDR_82; + hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2; + hw->flt_chap_size = FA_FLASH_CHAP_SIZE; + hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2; + hw->flt_ddb_size = FA_FLASH_DDB_SIZE; + +done: + DEBUG2(ql4_printk(KERN_INFO, ha, + "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n", + loc, hw->flt_region_flt, hw->flt_region_fdt, + hw->flt_region_boot, hw->flt_region_bootload, + hw->flt_region_fw, hw->flt_region_chap, + hw->flt_chap_size, hw->flt_region_ddb, + hw->flt_ddb_size)); +} + +static void +qla4_82xx_get_fdt_info(struct scsi_qla_host *ha) +{ +#define FLASH_BLK_SIZE_4K 0x1000 +#define FLASH_BLK_SIZE_32K 0x8000 +#define FLASH_BLK_SIZE_64K 0x10000 + const char *loc, *locations[] = { "MID", "FDT" }; + uint16_t cnt, chksum; + uint16_t *wptr; + struct qla_fdt_layout *fdt; + uint16_t mid = 0; + uint16_t fid = 0; + struct ql82xx_hw_data *hw = &ha->hw; + + hw->flash_conf_off = FARX_ACCESS_FLASH_CONF; + hw->flash_data_off = FARX_ACCESS_FLASH_DATA; + + wptr = (uint16_t *)ha->request_ring; + fdt = (struct qla_fdt_layout *)ha->request_ring; + qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, + hw->flt_region_fdt << 2, OPTROM_BURST_SIZE); + + if (*wptr == cpu_to_le16(0xffff)) + goto no_flash_data; + + if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || + fdt->sig[3] != 'D') + goto no_flash_data; + + for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1; + cnt++) + chksum += le16_to_cpu(*wptr++); + + if (chksum) { + DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FDT detected: " + "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], + le16_to_cpu(fdt->version))); + goto no_flash_data; + } + + loc = locations[1]; + mid = le16_to_cpu(fdt->man_id); + fid = le16_to_cpu(fdt->id); + hw->fdt_wrt_disable = fdt->wrt_disable_bits; + hw->fdt_erase_cmd = flash_conf_addr(hw, 0x0300 | fdt->erase_cmd); + hw->fdt_block_size = le32_to_cpu(fdt->block_size); + + if (fdt->unprotect_sec_cmd) { + hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, 0x0300 | + fdt->unprotect_sec_cmd); + hw->fdt_protect_sec_cmd = fdt->protect_sec_cmd ? + flash_conf_addr(hw, 0x0300 | fdt->protect_sec_cmd) : + flash_conf_addr(hw, 0x0336); + } + goto done; + +no_flash_data: + loc = locations[0]; + hw->fdt_block_size = FLASH_BLK_SIZE_64K; +done: + DEBUG2(ql4_printk(KERN_INFO, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " + "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, + hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd, + hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable, + hw->fdt_block_size)); +} + +static void +qla4_82xx_get_idc_param(struct scsi_qla_host *ha) +{ +#define QLA82XX_IDC_PARAM_ADDR 0x003e885c + uint32_t *wptr; + + if (!is_qla8022(ha)) + return; + wptr = (uint32_t *)ha->request_ring; + qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, + QLA82XX_IDC_PARAM_ADDR , 8); + + if (*wptr == cpu_to_le32(0xffffffff)) { + ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT; + ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT; + } else { + ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); + ha->nx_reset_timeout = le32_to_cpu(*wptr); + } + + DEBUG2(ql4_printk(KERN_DEBUG, ha, + "ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout)); + DEBUG2(ql4_printk(KERN_DEBUG, ha, + "ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout)); + return; +} + +void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, + int in_count) +{ + int i; + + /* Load all mailbox registers, except mailbox 0. */ + for (i = 1; i < in_count; i++) + writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]); + + /* Wakeup firmware */ + writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]); + readl(&ha->qla4_82xx_reg->mailbox_in[0]); + writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint); + readl(&ha->qla4_82xx_reg->hint); +} + +void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count) +{ + int intr_status; + + intr_status = readl(&ha->qla4_82xx_reg->host_int); + if (intr_status & ISRX_82XX_RISC_INT) { + ha->mbox_status_count = out_count; + intr_status = readl(&ha->qla4_82xx_reg->host_status); + ha->isp_ops->interrupt_service_routine(ha, intr_status); + + if (test_bit(AF_INTERRUPTS_ON, &ha->flags) && + (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)) + qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, + 0xfbff); + } +} + +int +qla4_8xxx_get_flash_info(struct scsi_qla_host *ha) +{ + int ret; + uint32_t flt_addr; + + ret = qla4_8xxx_find_flt_start(ha, &flt_addr); + if (ret != QLA_SUCCESS) + return ret; + + qla4_8xxx_get_flt_info(ha, flt_addr); + if (is_qla8022(ha)) { + qla4_82xx_get_fdt_info(ha); + qla4_82xx_get_idc_param(ha); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + qla4_83xx_get_idc_param(ha); + } + + return QLA_SUCCESS; +} + +/** + * qla4_8xxx_stop_firmware - stops firmware on specified adapter instance + * @ha: pointer to host adapter structure. + * + * Remarks: + * For iSCSI, throws away all I/O and AENs into bit bucket, so they will + * not be available after successful return. Driver must cleanup potential + * outstanding I/O's after calling this funcion. + **/ +int +qla4_8xxx_stop_firmware(struct scsi_qla_host *ha) +{ + int status; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_STOP_FW; + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, + &mbox_cmd[0], &mbox_sts[0]); + + DEBUG2(printk("scsi%ld: %s: status = %d\n", ha->host_no, + __func__, status)); + return status; +} + +/** + * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands. + * @ha: pointer to host adapter structure. + **/ +int +qla4_82xx_isp_reset(struct scsi_qla_host *ha) +{ + int rval; + uint32_t dev_state; + + qla4_82xx_idc_lock(ha); + dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + + if (dev_state == QLA8XXX_DEV_READY) { + ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); + qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + set_bit(AF_8XXX_RST_OWNER, &ha->flags); + } else + ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); + + qla4_82xx_idc_unlock(ha); + + rval = qla4_8xxx_device_state_handler(ha); + + qla4_82xx_idc_lock(ha); + qla4_8xxx_clear_rst_ready(ha); + qla4_82xx_idc_unlock(ha); + + if (rval == QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n"); + clear_bit(AF_FW_RECOVERY, &ha->flags); + } + + return rval; +} + +/** + * qla4_8xxx_get_sys_info - get adapter MAC address(es) and serial number + * @ha: pointer to host adapter structure. + * + **/ +int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct mbx_sys_info *sys_info; + dma_addr_t sys_info_dma; + int status = QLA_ERROR; + + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), + &sys_info_dma, GFP_KERNEL); + if (sys_info == NULL) { + DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", + ha->host_no, __func__)); + return status; + } + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_SYS_INFO; + mbox_cmd[1] = LSDW(sys_info_dma); + mbox_cmd[2] = MSDW(sys_info_dma); + mbox_cmd[4] = sizeof(*sys_info); + + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 6, &mbox_cmd[0], + &mbox_sts[0]) != QLA_SUCCESS) { + DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO failed\n", + ha->host_no, __func__)); + goto exit_validate_mac82; + } + + /* Make sure we receive the minimum required data to cache internally */ + if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) < + offsetof(struct mbx_sys_info, reserved)) { + DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" + " error (%x)\n", ha->host_no, __func__, mbox_sts[4])); + goto exit_validate_mac82; + } + + /* Save M.A.C. address & serial_number */ + ha->port_num = sys_info->port_num; + memcpy(ha->my_mac, &sys_info->mac_addr[0], + min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr))); + memcpy(ha->serial_number, &sys_info->serial_number, + min(sizeof(ha->serial_number), sizeof(sys_info->serial_number))); + memcpy(ha->model_name, &sys_info->board_id_str, + min(sizeof(ha->model_name), sizeof(sys_info->board_id_str))); + ha->phy_port_cnt = sys_info->phys_port_cnt; + ha->phy_port_num = sys_info->port_num; + ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt; + + DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n", + ha->host_no, __func__, ha->my_mac, ha->serial_number)); + + status = QLA_SUCCESS; + +exit_validate_mac82: + dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info, + sys_info_dma); + return status; +} + +/* Interrupt handling helpers. */ + +int qla4_8xxx_intr_enable(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__)); + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS; + mbox_cmd[1] = INTR_ENABLE; + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]) != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n", + __func__, mbox_sts[0])); + return QLA_ERROR; + } + return QLA_SUCCESS; +} + +int qla4_8xxx_intr_disable(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__)); + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS; + mbox_cmd[1] = INTR_DISABLE; + if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]) != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n", + __func__, mbox_sts[0])); + return QLA_ERROR; + } + + return QLA_SUCCESS; +} + +void +qla4_82xx_enable_intrs(struct scsi_qla_host *ha) +{ + qla4_8xxx_intr_enable(ha); + + spin_lock_irq(&ha->hardware_lock); + /* BIT 10 - reset */ + qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); + spin_unlock_irq(&ha->hardware_lock); + set_bit(AF_INTERRUPTS_ON, &ha->flags); +} + +void +qla4_82xx_disable_intrs(struct scsi_qla_host *ha) +{ + if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) + qla4_8xxx_intr_disable(ha); + + spin_lock_irq(&ha->hardware_lock); + /* BIT 10 - set */ + qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); + spin_unlock_irq(&ha->hardware_lock); +} + +int +qla4_8xxx_enable_msix(struct scsi_qla_host *ha) +{ + int ret; + + ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES, + QLA_MSIX_ENTRIES, PCI_IRQ_MSIX); + if (ret < 0) { + ql4_printk(KERN_WARNING, ha, + "MSI-X: Failed to enable support -- %d/%d\n", + QLA_MSIX_ENTRIES, ret); + return ret; + } + + ret = request_irq(pci_irq_vector(ha->pdev, 0), + qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)", + ha); + if (ret) + goto out_free_vectors; + + ret = request_irq(pci_irq_vector(ha->pdev, 1), + qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha); + if (ret) + goto out_free_default_irq; + + return 0; + +out_free_default_irq: + free_irq(pci_irq_vector(ha->pdev, 0), ha); +out_free_vectors: + pci_free_irq_vectors(ha->pdev); + return ret; +} + +int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha) +{ + int status = QLA_SUCCESS; + + /* Dont retry adapter initialization if IRQ allocation failed */ + if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) { + ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n", + __func__); + status = QLA_ERROR; + goto exit_init_adapter_failure; + } + + /* Since interrupts are registered in start_firmware for + * 8xxx, release them here if initialize_adapter fails + * and retry adapter initialization */ + qla4xxx_free_irqs(ha); + +exit_init_adapter_failure: + return status; +} diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h new file mode 100644 index 000000000..52a5209ae --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -0,0 +1,1007 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ +#ifndef __QLA_NX_H +#define __QLA_NX_H + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. +*/ +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f +#define PHAN_PEG_RCV_INITIALIZED 0xff01 + +/*CRB_RELATED*/ +#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200)) +#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X)) +#define CRB_CMDPEG_STATE QLA82XX_REG(0x50) +#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) +#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) +#define CRB_TEMP_STATE QLA82XX_REG(0x1b4) +#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 +#define CRB_CMDPEG_CHECK_DELAY 500 + +#define qla82xx_get_temp_val(x) ((x) >> 16) +#define qla82xx_get_temp_state(x) ((x) & 0xffff) +#define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */ + QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ + QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 +#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 + +#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 +#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E +#define QLA82XX_HW_H2_CH_HUB_ADR 0x03 +#define QLA82XX_HW_H3_CH_HUB_ADR 0x01 +#define QLA82XX_HW_H4_CH_HUB_ADR 0x06 +#define QLA82XX_HW_H5_CH_HUB_ADR 0x07 +#define QLA82XX_HW_H6_CH_HUB_ADR 0x08 + +/* Hub 0 */ +#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15 +#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25 + +/* Hub 1 */ +#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73 +#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00 +#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b +#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01 +#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02 +#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03 +#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04 +#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58 +#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59 +#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a +#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a +#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c +#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f +#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12 +#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18 + +/* Hub 2 */ +#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31 +#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19 +#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29 + +#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10 +#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20 +#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22 +#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21 +#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66 +#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60 +#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61 +#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62 +#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63 +#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09 +#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d +#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e +#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11 + +/* Hub 3 */ +#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A +#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50 +#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51 +#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08 + +/* Hub 4 */ +#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40 +#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41 +#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42 +#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43 +#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44 +#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45 +#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46 +#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47 +#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48 +#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49 +#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a +#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b + +/* Hub 5 */ +#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40 +#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41 +#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42 +#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43 + +#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44 +#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45 +#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46 + +/* Hub 6 */ +#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46 +#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47 +#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48 +#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49 +#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16 +#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17 +#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05 +#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06 +#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +/* */ +#define QLA82XX_HW_PX_MAP_CRB_PH 0 +#define QLA82XX_HW_PX_MAP_CRB_PS 1 +#define QLA82XX_HW_PX_MAP_CRB_MN 2 +#define QLA82XX_HW_PX_MAP_CRB_MS 3 +#define QLA82XX_HW_PX_MAP_CRB_SRE 5 +#define QLA82XX_HW_PX_MAP_CRB_NIU 6 +#define QLA82XX_HW_PX_MAP_CRB_QMN 7 +#define QLA82XX_HW_PX_MAP_CRB_SQN0 8 +#define QLA82XX_HW_PX_MAP_CRB_SQN1 9 +#define QLA82XX_HW_PX_MAP_CRB_SQN2 10 +#define QLA82XX_HW_PX_MAP_CRB_SQN3 11 +#define QLA82XX_HW_PX_MAP_CRB_QMS 12 +#define QLA82XX_HW_PX_MAP_CRB_SQS0 13 +#define QLA82XX_HW_PX_MAP_CRB_SQS1 14 +#define QLA82XX_HW_PX_MAP_CRB_SQS2 15 +#define QLA82XX_HW_PX_MAP_CRB_SQS3 16 +#define QLA82XX_HW_PX_MAP_CRB_PGN0 17 +#define QLA82XX_HW_PX_MAP_CRB_PGN1 18 +#define QLA82XX_HW_PX_MAP_CRB_PGN2 19 +#define QLA82XX_HW_PX_MAP_CRB_PGN3 20 +#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2 +#define QLA82XX_HW_PX_MAP_CRB_PGND 21 +#define QLA82XX_HW_PX_MAP_CRB_PGNI 22 +#define QLA82XX_HW_PX_MAP_CRB_PGS0 23 +#define QLA82XX_HW_PX_MAP_CRB_PGS1 24 +#define QLA82XX_HW_PX_MAP_CRB_PGS2 25 +#define QLA82XX_HW_PX_MAP_CRB_PGS3 26 +#define QLA82XX_HW_PX_MAP_CRB_PGSD 27 +#define QLA82XX_HW_PX_MAP_CRB_PGSI 28 +#define QLA82XX_HW_PX_MAP_CRB_SN 29 +#define QLA82XX_HW_PX_MAP_CRB_EG 31 +#define QLA82XX_HW_PX_MAP_CRB_PH2 32 +#define QLA82XX_HW_PX_MAP_CRB_PS2 33 +#define QLA82XX_HW_PX_MAP_CRB_CAM 34 +#define QLA82XX_HW_PX_MAP_CRB_CAS0 35 +#define QLA82XX_HW_PX_MAP_CRB_CAS1 36 +#define QLA82XX_HW_PX_MAP_CRB_CAS2 37 +#define QLA82XX_HW_PX_MAP_CRB_C2C0 38 +#define QLA82XX_HW_PX_MAP_CRB_C2C1 39 +#define QLA82XX_HW_PX_MAP_CRB_TIMR 40 +#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42 +#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43 +#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44 +#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45 +#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46 +#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47 +#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48 +#define QLA82XX_HW_PX_MAP_CRB_XDMA 49 +#define QLA82XX_HW_PX_MAP_CRB_I2Q 50 +#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51 +#define QLA82XX_HW_PX_MAP_CRB_CAS3 52 +#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53 +#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54 +#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55 +#define QLA82XX_HW_PX_MAP_CRB_OCM0 56 +#define QLA82XX_HW_PX_MAP_CRB_OCM1 57 +#define QLA82XX_HW_PX_MAP_CRB_SMB 58 +#define QLA82XX_HW_PX_MAP_CRB_I2C0 59 +#define QLA82XX_HW_PX_MAP_CRB_I2C1 60 +#define QLA82XX_HW_PX_MAP_CRB_LPC 61 +#define QLA82XX_HW_PX_MAP_CRB_PGNC 62 +#define QLA82XX_HW_PX_MAP_CRB_PGR0 63 +#define QLA82XX_HW_PX_MAP_CRB_PGR1 4 +#define QLA82XX_HW_PX_MAP_CRB_PGR2 30 +#define QLA82XX_HW_PX_MAP_CRB_PGR3 41 + +/* This field defines CRB adr [31:20] of the agents */ +/* */ + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \ + QLA82XX_HW_MN_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PH_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \ + QLA82XX_HW_MS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_QMS_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQGS3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_C2C0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_C2C1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX4_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX7_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX9_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SMB_CRB_AGT_ADR) + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \ + QLA82XX_HW_NIU_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \ + QLA82XX_HW_I2C0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \ + QLA82XX_HW_I2C1_CRB_AGT_ADR) + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SRE_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_EG_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_QM_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SQG3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX5_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX6_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_RPMX8_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \ + QLA82XX_HW_CAS3_CRB_AGT_ADR) + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGNI_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGND_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGN4_CRB_AGT_ADR) + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGNC_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGR3_CRB_AGT_ADR) + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGSI_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGSD_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS2_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGS3_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \ + QLA82XX_HW_PEGSC_CRB_AGT_ADR) + +#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_NCM_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_TMR_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_XDMA_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_SN_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_I2Q_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_ROMUSB_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_OCM0_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_OCM1_CRB_AGT_ADR) +#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \ + QLA82XX_HW_LPC_CRB_AGT_ADR) + +#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000) +#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000) +#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) + +/* Lock IDs for ROM lock */ +#define ROM_LOCK_DRIVER 0x0d417340 + +#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */ +#define QLA82XX_PCI_CRB_WINDOW(A) (QLA82XX_PCI_CRBSPACE + \ + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) + +#define QLA82XX_CRB_C2C_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0) +#define QLA82XX_CRB_C2C_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1) +#define QLA82XX_CRB_C2C_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2) +#define QLA82XX_CRB_CAM \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM) +#define QLA82XX_CRB_CASPER \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS) +#define QLA82XX_CRB_CASPER_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0) +#define QLA82XX_CRB_CASPER_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1) +#define QLA82XX_CRB_CASPER_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2) +#define QLA82XX_CRB_DDR_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS) +#define QLA82XX_CRB_DDR_NET \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN) +#define QLA82XX_CRB_EPG \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG) +#define QLA82XX_CRB_I2Q \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q) +#define QLA82XX_CRB_NIU \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU) +/* HACK upon HACK upon HACK (for PCIE builds) */ +#define QLA82XX_CRB_PCIX_HOST \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH) +#define QLA82XX_CRB_PCIX_HOST2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2) +#define QLA82XX_CRB_PCIX_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS) +#define QLA82XX_CRB_PCIE QLA82XX_CRB_PCIX_MD +/* window 1 pcie slot */ +#define QLA82XX_CRB_PCIE2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2) + +#define QLA82XX_CRB_PEG_MD_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0) +#define QLA82XX_CRB_PEG_MD_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1) +#define QLA82XX_CRB_PEG_MD_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2) +#define QLA82XX_CRB_PEG_MD_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3) +#define QLA82XX_CRB_PEG_MD_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3) +#define QLA82XX_CRB_PEG_MD_D \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD) +#define QLA82XX_CRB_PEG_MD_I \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI) +#define QLA82XX_CRB_PEG_NET_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0) +#define QLA82XX_CRB_PEG_NET_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1) +#define QLA82XX_CRB_PEG_NET_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2) +#define QLA82XX_CRB_PEG_NET_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3) +#define QLA82XX_CRB_PEG_NET_4 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4) +#define QLA82XX_CRB_PEG_NET_D \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND) +#define QLA82XX_CRB_PEG_NET_I \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI) +#define QLA82XX_CRB_PQM_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS) +#define QLA82XX_CRB_PQM_NET \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN) +#define QLA82XX_CRB_QDR_MD \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS) +#define QLA82XX_CRB_QDR_NET \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN) +#define QLA82XX_CRB_ROMUSB \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB) +#define QLA82XX_CRB_RPMX_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0) +#define QLA82XX_CRB_RPMX_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1) +#define QLA82XX_CRB_RPMX_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2) +#define QLA82XX_CRB_RPMX_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3) +#define QLA82XX_CRB_RPMX_4 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4) +#define QLA82XX_CRB_RPMX_5 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5) +#define QLA82XX_CRB_RPMX_6 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6) +#define QLA82XX_CRB_RPMX_7 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7) +#define QLA82XX_CRB_SQM_MD_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0) +#define QLA82XX_CRB_SQM_MD_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1) +#define QLA82XX_CRB_SQM_MD_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2) +#define QLA82XX_CRB_SQM_MD_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3) +#define QLA82XX_CRB_SQM_NET_0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0) +#define QLA82XX_CRB_SQM_NET_1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1) +#define QLA82XX_CRB_SQM_NET_2 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2) +#define QLA82XX_CRB_SQM_NET_3 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3) +#define QLA82XX_CRB_SRE \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE) +#define QLA82XX_CRB_TIMER \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR) +#define QLA82XX_CRB_XDMA \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA) +#define QLA82XX_CRB_I2C0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0) +#define QLA82XX_CRB_I2C1 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1) +#define QLA82XX_CRB_OCM0 \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0) +#define QLA82XX_CRB_SMB \ + QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB) + +#define QLA82XX_CRB_MAX QLA82XX_PCI_CRB_WINDOW(64) + +/* + * ====================== BASE ADDRESSES ON-CHIP ====================== + * Base addresses of major components on-chip. + * ====================== BASE ADDRESSES ON-CHIP ====================== + */ +#define QLA8XXX_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLA8XXX_ADDR_DDR_NET_MAX (0x000000000fffffffULL) + +/* Imbus address bit used to indicate a host address. This bit is + * eliminated by the pcie bar and bar select before presentation + * over pcie. */ +/* host memory via IMBUS */ +#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL) +#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL) +#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL) +#define QLA8XXX_ADDR_OCM0 (0x0000000200000000ULL) +#define QLA8XXX_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLA8XXX_ADDR_OCM1 (0x0000000200400000ULL) +#define QLA8XXX_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLA8XXX_ADDR_QDR_NET (0x0000000300000000ULL) + +#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) +#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) +#define QLA8XXX_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) + +#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 +#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000 +#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000 +#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff +#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000 +#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000 +#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff + +/* PCI Windowing for DDR regions. */ +#define QLA8XXX_ADDR_IN_RANGE(addr, low, high) \ + (((addr) <= (high)) && ((addr) >= (low))) + +/* + * Register offsets for MN + */ +#define MIU_CONTROL (0x000) +#define MIU_TAG (0x004) +#define MIU_TEST_AGT_CTRL (0x090) +#define MIU_TEST_AGT_ADDR_LO (0x094) +#define MIU_TEST_AGT_ADDR_HI (0x098) +#define MIU_TEST_AGT_WRDATA_LO (0x0a0) +#define MIU_TEST_AGT_WRDATA_HI (0x0a4) +#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i))) +#define MIU_TEST_AGT_RDDATA_LO (0x0a8) +#define MIU_TEST_AGT_RDDATA_HI (0x0ac) +#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i))) +#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 +#define MIU_TEST_AGT_UPPER_ADDR(off) (0) + +/* MIU_TEST_AGT_CTRL flags. work for SIU as well */ +#define MIU_TA_CTL_START 1 +#define MIU_TA_CTL_ENABLE 2 +#define MIU_TA_CTL_WRITE 4 +#define MIU_TA_CTL_BUSY 8 + +#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE) +#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |\ + MIU_TA_CTL_START) +#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE) + +/*CAM RAM */ +# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) +# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) + +#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) +#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) +#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) +#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0)) +#define QLA82XX_CAM_RAM_DB1 (QLA82XX_CAM_RAM(0x1b0)) +#define QLA82XX_CAM_RAM_DB2 (QLA82XX_CAM_RAM(0x1b4)) + +#define HALT_STATUS_UNRECOVERABLE 0x80000000 +#define HALT_STATUS_RECOVERABLE 0x40000000 + + +#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100)) +#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124)) +#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150)) +#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154)) +#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) +#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) + +/* Driver Coexistence Defines */ +#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138)) +#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140)) +#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144)) +#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148)) +#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c)) +#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174)) + +enum qla_regs { + QLA8XXX_PEG_HALT_STATUS1 = 0, + QLA8XXX_PEG_HALT_STATUS2, + QLA8XXX_PEG_ALIVE_COUNTER, + QLA8XXX_CRB_DRV_ACTIVE, + QLA8XXX_CRB_DEV_STATE, + QLA8XXX_CRB_DRV_STATE, + QLA8XXX_CRB_DRV_SCRATCH, + QLA8XXX_CRB_DEV_PART_INFO, + QLA8XXX_CRB_DRV_IDC_VERSION, + QLA8XXX_FW_VERSION_MAJOR, + QLA8XXX_FW_VERSION_MINOR, + QLA8XXX_FW_VERSION_SUB, + QLA8XXX_CRB_CMDPEG_STATE, + QLA8XXX_CRB_TEMP_STATE, +}; + +/* Every driver should use these Device State */ +#define QLA8XXX_DEV_COLD 1 +#define QLA8XXX_DEV_INITIALIZING 2 +#define QLA8XXX_DEV_READY 3 +#define QLA8XXX_DEV_NEED_RESET 4 +#define QLA8XXX_DEV_NEED_QUIESCENT 5 +#define QLA8XXX_DEV_FAILED 6 +#define QLA8XXX_DEV_QUIESCENT 7 +#define MAX_STATES 8 /* Increment if new state added */ + +#define QLA82XX_IDC_VERSION 0x1 +#define ROM_DEV_INIT_TIMEOUT 30 +#define ROM_DRV_RESET_ACK_TIMEOUT 10 + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) + +#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg)) +#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg)) + +#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ +#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ +#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */ +#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */ +#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ +#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ + +/* + * The PCI VendorID and DeviceID for our board. + */ +#define QLA82XX_MSIX_TBL_SPACE 8192 +#define QLA82XX_PCI_REG_MSIX_TBL 0x44 +#define QLA82XX_PCI_MSIX_CONTROL 0x40 + +struct crb_128M_2M_sub_block_map { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +}; + +struct crb_128M_2M_block_map { + struct crb_128M_2M_sub_block_map sub_block[16]; +}; + +struct crb_addr_pair { + long addr; + long data; +}; + +#define ADDR_ERROR ((unsigned long) 0xffffffff) +#define MAX_CTL_CHECK 1000 +#define QLA82XX_FWERROR_CODE(code) ((code >> 8) & 0x1fffff) + +/*************************************************************************** + * PCI related defines. + **************************************************************************/ + +/* + * Interrupt related defines. + */ +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +/* + * Message Signaled Interrupts + */ +#define PCIX_MSI_F0 (0x13000) +#define PCIX_MSI_F1 (0x13004) +#define PCIX_MSI_F2 (0x13008) +#define PCIX_MSI_F3 (0x1300c) +#define PCIX_MSI_F4 (0x13010) +#define PCIX_MSI_F5 (0x13014) +#define PCIX_MSI_F6 (0x13018) +#define PCIX_MSI_F7 (0x1301c) +#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4)) + +/* + * + */ +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +/* + * Interrupt state machine and other bits. + */ +#define PCIE_MISCCFG_RC (0x1206c) + + +#define ISR_INT_TARGET_STATUS \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_STATUS_F1 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_STATUS_F2 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_STATUS_F3 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_STATUS_F4 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_STATUS_F5 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_STATUS_F6 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_STATUS_F7 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) + +#define ISR_INT_TARGET_MASK \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_MASK_F1 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_MASK_F2 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_MASK_F3 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_MASK_F4 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_MASK_F5 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_MASK_F6 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_MASK_F7 \ + (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define ISR_INT_VECTOR (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_STATE_REG (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC)) + +#define ISR_MSI_INT_TRIGGER(FUNC) (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC))) + + +#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0) +#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +/* struct qla4_8xxx_legacy_intr_set defined in ql4_def.h */ + +#define QLA82XX_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ +} + +/* Magic number to let user know flash is programmed */ +#define QLA82XX_BDINFO_MAGIC 0x12345678 +#define FW_SIZE_OFFSET (0x3e840c) + +/* QLA82XX additions */ +#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) +#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) + +/* Minidump related */ + +/* Entry Type Defines */ +#define QLA8XXX_RDNOP 0 +#define QLA8XXX_RDCRB 1 +#define QLA8XXX_RDMUX 2 +#define QLA8XXX_QUEUE 3 +#define QLA8XXX_BOARD 4 +#define QLA8XXX_RDOCM 6 +#define QLA8XXX_PREGS 7 +#define QLA8XXX_L1DTG 8 +#define QLA8XXX_L1ITG 9 +#define QLA8XXX_L1DAT 11 +#define QLA8XXX_L1INS 12 +#define QLA8XXX_L2DTG 21 +#define QLA8XXX_L2ITG 22 +#define QLA8XXX_L2DAT 23 +#define QLA8XXX_L2INS 24 +#define QLA83XX_POLLRD 35 +#define QLA83XX_RDMUX2 36 +#define QLA83XX_POLLRDMWR 37 +#define QLA8044_RDDFE 38 +#define QLA8044_RDMDIO 39 +#define QLA8044_POLLWR 40 +#define QLA8XXX_RDROM 71 +#define QLA8XXX_RDMEM 72 +#define QLA8XXX_CNTRL 98 +#define QLA83XX_TLHDR 99 +#define QLA8XXX_RDEND 255 + +/* Opcodes for Control Entries. + * These Flags are bit fields. + */ +#define QLA8XXX_DBG_OPCODE_WR 0x01 +#define QLA8XXX_DBG_OPCODE_RW 0x02 +#define QLA8XXX_DBG_OPCODE_AND 0x04 +#define QLA8XXX_DBG_OPCODE_OR 0x08 +#define QLA8XXX_DBG_OPCODE_POLL 0x10 +#define QLA8XXX_DBG_OPCODE_RDSTATE 0x20 +#define QLA8XXX_DBG_OPCODE_WRSTATE 0x40 +#define QLA8XXX_DBG_OPCODE_MDSTATE 0x80 + +/* Driver Flags */ +#define QLA8XXX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ +#define QLA8XXX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size + * mismatch */ + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +struct qla8xxx_minidump_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +}; + +/* Read CRB entry header */ +struct qla8xxx_minidump_entry_crb { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +}; + +struct qla8xxx_minidump_entry_cache { + struct qla8xxx_minidump_entry_hdr h; + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + uint32_t data_size; + uint32_t op_count; + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +}; + +/* Read OCM */ +struct qla8xxx_minidump_entry_rdocm { + struct qla8xxx_minidump_entry_hdr h; + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; +}; + +/* Read Memory */ +struct qla8xxx_minidump_entry_rdmem { + struct qla8xxx_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +/* Read ROM */ +struct qla8xxx_minidump_entry_rdrom { + struct qla8xxx_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +/* Mux entry */ +struct qla8xxx_minidump_entry_mux { + struct qla8xxx_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +}; + +/* Queue entry */ +struct qla8xxx_minidump_entry_queue { + struct qla8xxx_minidump_entry_hdr h; + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +}; + +#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 +#define RQST_TMPLT_SIZE 0x0 +#define RQST_TMPLT 0x1 +#define MD_DIRECT_ROM_WINDOW 0x42110030 +#define MD_DIRECT_ROM_READ_BASE 0x42150000 +#define MD_MIU_TEST_AGT_CTRL 0x41000090 +#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 +#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 + +#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0 +#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4 +#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0 +#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4 + +#endif diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c new file mode 100644 index 000000000..675332e49 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -0,0 +1,9956 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ +#include +#include +#include +#include +#include + +#include +#include + +#include "ql4_def.h" +#include "ql4_version.h" +#include "ql4_glbl.h" +#include "ql4_dbg.h" +#include "ql4_inline.h" +#include "ql4_83xx.h" + +/* + * Driver version + */ +static char qla4xxx_version_str[40]; + +/* + * SRB allocation cache + */ +static struct kmem_cache *srb_cachep; + +/* + * Module parameter information and variables + */ +static int ql4xdisablesysfsboot = 1; +module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xdisablesysfsboot, + " Set to disable exporting boot targets to sysfs.\n" + "\t\t 0 - Export boot targets\n" + "\t\t 1 - Do not export boot targets (Default)"); + +int ql4xdontresethba; +module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xdontresethba, + " Don't reset the HBA for driver recovery.\n" + "\t\t 0 - It will reset HBA (Default)\n" + "\t\t 1 - It will NOT reset HBA"); + +int ql4xextended_error_logging; +module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xextended_error_logging, + " Option to enable extended error logging.\n" + "\t\t 0 - no logging (Default)\n" + "\t\t 2 - debug logging"); + +int ql4xenablemsix = 1; +module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql4xenablemsix, + " Set to enable MSI or MSI-X interrupt mechanism.\n" + "\t\t 0 = enable INTx interrupt mechanism.\n" + "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" + "\t\t 2 = enable MSI interrupt mechanism."); + +#define QL4_DEF_QDEPTH 32 +static int ql4xmaxqdepth = QL4_DEF_QDEPTH; +module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xmaxqdepth, + " Maximum queue depth to report for target devices.\n" + "\t\t Default: 32."); + +static int ql4xqfulltracking = 1; +module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xqfulltracking, + " Enable or disable dynamic tracking and adjustment of\n" + "\t\t scsi device queue depth.\n" + "\t\t 0 - Disable.\n" + "\t\t 1 - Enable. (Default)"); + +static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; +module_param(ql4xsess_recovery_tmo, int, S_IRUGO); +MODULE_PARM_DESC(ql4xsess_recovery_tmo, + " Target Session Recovery Timeout.\n" + "\t\t Default: 120 sec."); + +int ql4xmdcapmask = 0; +module_param(ql4xmdcapmask, int, S_IRUGO); +MODULE_PARM_DESC(ql4xmdcapmask, + " Set the Minidump driver capture mask level.\n" + "\t\t Default is 0 (firmware default capture mask)\n" + "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); + +int ql4xenablemd = 1; +module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xenablemd, + " Set to enable minidump.\n" + "\t\t 0 - disable minidump\n" + "\t\t 1 - enable minidump (Default)"); + +static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); +/* + * SCSI host template entry points + */ +static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); + +/* + * iSCSI template entry points + */ +static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, + enum iscsi_param param, char *buf); +static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, + enum iscsi_param param, char *buf); +static int qla4xxx_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf); +static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, + uint32_t len); +static int qla4xxx_get_iface_param(struct iscsi_iface *iface, + enum iscsi_param_type param_type, + int param, char *buf); +static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); +static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking); +static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); +static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); +static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf); +static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); +static struct iscsi_cls_conn * +qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); +static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading); +static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); +static struct iscsi_cls_session * +qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, + uint16_t qdepth, uint32_t initial_cmdsn); +static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); +static void qla4xxx_task_work(struct work_struct *wdata); +static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); +static int qla4xxx_task_xmit(struct iscsi_task *); +static void qla4xxx_task_cleanup(struct iscsi_task *); +static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); +static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats); +static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, + uint32_t iface_type, uint32_t payload_size, + uint32_t pid, struct sockaddr *dst_addr); +static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, + uint32_t *num_entries, char *buf); +static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, + int len); +static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); + +/* + * SCSI host template entry points + */ +static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); +static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); +static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); +static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); +static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); +static int qla4xxx_slave_alloc(struct scsi_device *device); +static umode_t qla4_attr_is_visible(int param_type, int param); +static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); + +/* + * iSCSI Flash DDB sysfs entry points + */ +static int +qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn, + void *data, int len); +static int +qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, + int param, char *buf); +static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, + int len); +static int +qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); +static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn); +static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn); +static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); + +static struct qla4_8xxx_legacy_intr_set legacy_intr[] = + QLA82XX_LEGACY_INTR_CONFIG; + +static const uint32_t qla4_82xx_reg_tbl[] = { + QLA82XX_PEG_HALT_STATUS1, + QLA82XX_PEG_HALT_STATUS2, + QLA82XX_PEG_ALIVE_COUNTER, + QLA82XX_CRB_DRV_ACTIVE, + QLA82XX_CRB_DEV_STATE, + QLA82XX_CRB_DRV_STATE, + QLA82XX_CRB_DRV_SCRATCH, + QLA82XX_CRB_DEV_PART_INFO, + QLA82XX_CRB_DRV_IDC_VERSION, + QLA82XX_FW_VERSION_MAJOR, + QLA82XX_FW_VERSION_MINOR, + QLA82XX_FW_VERSION_SUB, + CRB_CMDPEG_STATE, + CRB_TEMP_STATE, +}; + +static const uint32_t qla4_83xx_reg_tbl[] = { + QLA83XX_PEG_HALT_STATUS1, + QLA83XX_PEG_HALT_STATUS2, + QLA83XX_PEG_ALIVE_COUNTER, + QLA83XX_CRB_DRV_ACTIVE, + QLA83XX_CRB_DEV_STATE, + QLA83XX_CRB_DRV_STATE, + QLA83XX_CRB_DRV_SCRATCH, + QLA83XX_CRB_DEV_PART_INFO1, + QLA83XX_CRB_IDC_VER_MAJOR, + QLA83XX_FW_VER_MAJOR, + QLA83XX_FW_VER_MINOR, + QLA83XX_FW_VER_SUB, + QLA83XX_CMDPEG_STATE, + QLA83XX_ASIC_TEMP, +}; + +static struct scsi_host_template qla4xxx_driver_template = { + .module = THIS_MODULE, + .name = DRIVER_NAME, + .proc_name = DRIVER_NAME, + .queuecommand = qla4xxx_queuecommand, + .cmd_size = sizeof(struct qla4xxx_cmd_priv), + + .eh_abort_handler = qla4xxx_eh_abort, + .eh_device_reset_handler = qla4xxx_eh_device_reset, + .eh_target_reset_handler = qla4xxx_eh_target_reset, + .eh_host_reset_handler = qla4xxx_eh_host_reset, + .eh_timed_out = qla4xxx_eh_cmd_timed_out, + + .slave_alloc = qla4xxx_slave_alloc, + .change_queue_depth = scsi_change_queue_depth, + + .this_id = -1, + .cmd_per_lun = 3, + .sg_tablesize = SG_ALL, + + .max_sectors = 0xFFFF, + .shost_groups = qla4xxx_host_groups, + .host_reset = qla4xxx_host_reset, + .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, +}; + +static struct iscsi_transport qla4xxx_iscsi_transport = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + .caps = CAP_TEXT_NEGO | + CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | + CAP_DATADGST | CAP_LOGIN_OFFLOAD | + CAP_MULTI_R2T, + .attr_is_visible = qla4_attr_is_visible, + .create_session = qla4xxx_session_create, + .destroy_session = qla4xxx_session_destroy, + .start_conn = qla4xxx_conn_start, + .create_conn = qla4xxx_conn_create, + .bind_conn = qla4xxx_conn_bind, + .unbind_conn = iscsi_conn_unbind, + .stop_conn = iscsi_conn_stop, + .destroy_conn = qla4xxx_conn_destroy, + .set_param = iscsi_set_param, + .get_conn_param = qla4xxx_conn_get_param, + .get_session_param = qla4xxx_session_get_param, + .get_ep_param = qla4xxx_get_ep_param, + .ep_connect = qla4xxx_ep_connect, + .ep_poll = qla4xxx_ep_poll, + .ep_disconnect = qla4xxx_ep_disconnect, + .get_stats = qla4xxx_conn_get_stats, + .send_pdu = iscsi_conn_send_pdu, + .xmit_task = qla4xxx_task_xmit, + .cleanup_task = qla4xxx_task_cleanup, + .alloc_pdu = qla4xxx_alloc_pdu, + + .get_host_param = qla4xxx_host_get_param, + .set_iface_param = qla4xxx_iface_set_param, + .get_iface_param = qla4xxx_get_iface_param, + .bsg_request = qla4xxx_bsg_request, + .send_ping = qla4xxx_send_ping, + .get_chap = qla4xxx_get_chap_list, + .delete_chap = qla4xxx_delete_chap, + .set_chap = qla4xxx_set_chap_entry, + .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, + .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, + .new_flashnode = qla4xxx_sysfs_ddb_add, + .del_flashnode = qla4xxx_sysfs_ddb_delete, + .login_flashnode = qla4xxx_sysfs_ddb_login, + .logout_flashnode = qla4xxx_sysfs_ddb_logout, + .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, + .get_host_stats = qla4xxx_get_host_stats, +}; + +static struct scsi_transport_template *qla4xxx_scsi_transport; + +static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) +{ + u32 reg_val = 0; + int rval = QLA_SUCCESS; + + if (is_qla8022(ha)) + reg_val = readl(&ha->qla4_82xx_reg->host_status); + else if (is_qla8032(ha) || is_qla8042(ha)) + reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); + else + reg_val = readw(&ha->reg->ctrl_status); + + if (reg_val == QL4_ISP_REG_DISCONNECT) + rval = QLA_ERROR; + + return rval; +} + +static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, + uint32_t iface_type, uint32_t payload_size, + uint32_t pid, struct sockaddr *dst_addr) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + uint32_t options = 0; + uint8_t ipaddr[IPv6_ADDR_LEN]; + int rval; + + memset(ipaddr, 0, IPv6_ADDR_LEN); + /* IPv4 to IPv4 */ + if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && + (dst_addr->sa_family == AF_INET)) { + addr = (struct sockaddr_in *)dst_addr; + memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " + "dest: %pI4\n", __func__, + &ha->ip_config.ip_address, ipaddr)); + rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, + ipaddr); + if (rval) + rval = -EINVAL; + } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && + (dst_addr->sa_family == AF_INET6)) { + /* IPv6 to IPv6 */ + addr6 = (struct sockaddr_in6 *)dst_addr; + memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); + + options |= PING_IPV6_PROTOCOL_ENABLE; + + /* Ping using LinkLocal address */ + if ((iface_num == 0) || (iface_num == 1)) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " + "src: %pI6 dest: %pI6\n", __func__, + &ha->ip_config.ipv6_link_local_addr, + ipaddr)); + options |= PING_IPV6_LINKLOCAL_ADDR; + rval = qla4xxx_ping_iocb(ha, options, payload_size, + pid, ipaddr); + } else { + ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " + "not supported\n", __func__, iface_num); + rval = -ENOSYS; + goto exit_send_ping; + } + + /* + * If ping using LinkLocal address fails, try ping using + * IPv6 address + */ + if (rval != QLA_SUCCESS) { + options &= ~PING_IPV6_LINKLOCAL_ADDR; + if (iface_num == 0) { + options |= PING_IPV6_ADDR0; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " + "Ping src: %pI6 " + "dest: %pI6\n", __func__, + &ha->ip_config.ipv6_addr0, + ipaddr)); + } else if (iface_num == 1) { + options |= PING_IPV6_ADDR1; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " + "Ping src: %pI6 " + "dest: %pI6\n", __func__, + &ha->ip_config.ipv6_addr1, + ipaddr)); + } + rval = qla4xxx_ping_iocb(ha, options, payload_size, + pid, ipaddr); + if (rval) + rval = -EINVAL; + } + } else + rval = -ENOSYS; +exit_send_ping: + return rval; +} + +static umode_t qla4_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + case ISCSI_HOST_PARAM_PORT_STATE: + case ISCSI_HOST_PARAM_PORT_SPEED: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_TARGET_ALIAS: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_CHAP_OUT_IDX: + case ISCSI_PARAM_CHAP_IN_IDX: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: + case ISCSI_PARAM_DISCOVERY_SESS: + case ISCSI_PARAM_PORTAL_TYPE: + case ISCSI_PARAM_CHAP_AUTH_EN: + case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: + case ISCSI_PARAM_BIDI_CHAP_EN: + case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_PARAM_DEF_TIME2WAIT: + case ISCSI_PARAM_DEF_TIME2RETAIN: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_MAX_SEGMENT_SIZE: + case ISCSI_PARAM_TCP_TIMESTAMP_STAT: + case ISCSI_PARAM_TCP_WSF_DISABLE: + case ISCSI_PARAM_TCP_NAGLE_DISABLE: + case ISCSI_PARAM_TCP_TIMER_SCALE: + case ISCSI_PARAM_TCP_TIMESTAMP_EN: + case ISCSI_PARAM_TCP_XMIT_WSF: + case ISCSI_PARAM_TCP_RECV_WSF: + case ISCSI_PARAM_IP_FRAGMENT_DISABLE: + case ISCSI_PARAM_IPV4_TOS: + case ISCSI_PARAM_IPV6_TC: + case ISCSI_PARAM_IPV6_FLOW_LABEL: + case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: + case ISCSI_PARAM_KEEPALIVE_TMO: + case ISCSI_PARAM_LOCAL_PORT: + case ISCSI_PARAM_ISID: + case ISCSI_PARAM_TSID: + case ISCSI_PARAM_DEF_TASKMGMT_TMO: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_STATSN: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_DISCOVERY_PARENT_IDX: + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + case ISCSI_PARAM_LOCAL_IPADDR: + return S_IRUGO; + default: + return 0; + } + case ISCSI_NET_PARAM: + switch (param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + case ISCSI_NET_PARAM_IPV4_SUBNET: + case ISCSI_NET_PARAM_IPV4_GW: + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + case ISCSI_NET_PARAM_IFACE_ENABLE: + case ISCSI_NET_PARAM_IPV6_LINKLOCAL: + case ISCSI_NET_PARAM_IPV6_ADDR: + case ISCSI_NET_PARAM_IPV6_ROUTER: + case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: + case ISCSI_NET_PARAM_VLAN_ID: + case ISCSI_NET_PARAM_VLAN_PRIORITY: + case ISCSI_NET_PARAM_VLAN_ENABLED: + case ISCSI_NET_PARAM_MTU: + case ISCSI_NET_PARAM_PORT: + case ISCSI_NET_PARAM_IPADDR_STATE: + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: + case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + case ISCSI_NET_PARAM_TCP_WSF: + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + case ISCSI_NET_PARAM_CACHE_ID: + case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: + case ISCSI_NET_PARAM_IPV4_TOS_EN: + case ISCSI_NET_PARAM_IPV4_TOS: + case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: + case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: + case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: + case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: + case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: + case ISCSI_NET_PARAM_REDIRECT_EN: + case ISCSI_NET_PARAM_IPV4_TTL: + case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: + case ISCSI_NET_PARAM_IPV6_MLD_EN: + case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: + case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: + case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: + case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: + case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: + case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: + case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: + case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: + return S_IRUGO; + default: + return 0; + } + case ISCSI_IFACE_PARAM: + switch (param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + case ISCSI_IFACE_PARAM_HDRDGST_EN: + case ISCSI_IFACE_PARAM_DATADGST_EN: + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + case ISCSI_IFACE_PARAM_ERL: + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + case ISCSI_IFACE_PARAM_FIRST_BURST: + case ISCSI_IFACE_PARAM_MAX_R2T: + case ISCSI_IFACE_PARAM_MAX_BURST: + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + case ISCSI_IFACE_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_FLASHNODE_PARAM: + switch (param) { + case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: + case ISCSI_FLASHNODE_PORTAL_TYPE: + case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: + case ISCSI_FLASHNODE_DISCOVERY_SESS: + case ISCSI_FLASHNODE_ENTRY_EN: + case ISCSI_FLASHNODE_HDR_DGST_EN: + case ISCSI_FLASHNODE_DATA_DGST_EN: + case ISCSI_FLASHNODE_IMM_DATA_EN: + case ISCSI_FLASHNODE_INITIAL_R2T_EN: + case ISCSI_FLASHNODE_DATASEQ_INORDER: + case ISCSI_FLASHNODE_PDU_INORDER: + case ISCSI_FLASHNODE_CHAP_AUTH_EN: + case ISCSI_FLASHNODE_SNACK_REQ_EN: + case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: + case ISCSI_FLASHNODE_BIDI_CHAP_EN: + case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_FLASHNODE_ERL: + case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: + case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: + case ISCSI_FLASHNODE_TCP_WSF_DISABLE: + case ISCSI_FLASHNODE_TCP_TIMER_SCALE: + case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: + case ISCSI_FLASHNODE_IP_FRAG_DISABLE: + case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: + case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: + case ISCSI_FLASHNODE_FIRST_BURST: + case ISCSI_FLASHNODE_DEF_TIME2WAIT: + case ISCSI_FLASHNODE_DEF_TIME2RETAIN: + case ISCSI_FLASHNODE_MAX_R2T: + case ISCSI_FLASHNODE_KEEPALIVE_TMO: + case ISCSI_FLASHNODE_ISID: + case ISCSI_FLASHNODE_TSID: + case ISCSI_FLASHNODE_PORT: + case ISCSI_FLASHNODE_MAX_BURST: + case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: + case ISCSI_FLASHNODE_IPADDR: + case ISCSI_FLASHNODE_ALIAS: + case ISCSI_FLASHNODE_REDIRECT_IPADDR: + case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: + case ISCSI_FLASHNODE_LOCAL_PORT: + case ISCSI_FLASHNODE_IPV4_TOS: + case ISCSI_FLASHNODE_IPV6_TC: + case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: + case ISCSI_FLASHNODE_NAME: + case ISCSI_FLASHNODE_TPGT: + case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: + case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: + case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: + case ISCSI_FLASHNODE_TCP_XMIT_WSF: + case ISCSI_FLASHNODE_TCP_RECV_WSF: + case ISCSI_FLASHNODE_CHAP_OUT_IDX: + case ISCSI_FLASHNODE_USERNAME: + case ISCSI_FLASHNODE_PASSWORD: + case ISCSI_FLASHNODE_STATSN: + case ISCSI_FLASHNODE_EXP_STATSN: + case ISCSI_FLASHNODE_IS_BOOT_TGT: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} + +/** + * qla4xxx_create_chap_list - Create CHAP list from FLASH + * @ha: pointer to adapter structure + * + * Read flash and make a list of CHAP entries, during login when a CHAP entry + * is received, it will be checked in this list. If entry exist then the CHAP + * entry index is set in the DDB. If CHAP entry does not exist in this list + * then a new entry is added in FLASH in CHAP table and the index obtained is + * used in the DDB. + **/ +static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) +{ + int rval = 0; + uint8_t *chap_flash_data = NULL; + uint32_t offset; + dma_addr_t chap_dma; + uint32_t chap_size = 0; + + if (is_qla40XX(ha)) + chap_size = MAX_CHAP_ENTRIES_40XX * + sizeof(struct ql4_chap_table); + else /* Single region contains CHAP info for both + * ports which is divided into half for each port. + */ + chap_size = ha->hw.flt_chap_size / 2; + + chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, + &chap_dma, GFP_KERNEL); + if (!chap_flash_data) { + ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); + return; + } + + if (is_qla40XX(ha)) { + offset = FLASH_CHAP_OFFSET; + } else { + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + if (ha->port_num == 1) + offset += chap_size; + } + + rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); + if (rval != QLA_SUCCESS) + goto exit_chap_list; + + if (ha->chap_list == NULL) + ha->chap_list = vmalloc(chap_size); + if (ha->chap_list == NULL) { + ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); + goto exit_chap_list; + } + + memcpy(ha->chap_list, chap_flash_data, chap_size); + +exit_chap_list: + dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); +} + +static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, + int16_t chap_index, + struct ql4_chap_table **chap_entry) +{ + int rval = QLA_ERROR; + int max_chap_entries; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); + goto exit_get_chap; + } + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (chap_index > max_chap_entries) { + ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); + goto exit_get_chap; + } + + *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; + if ((*chap_entry)->cookie != + cpu_to_le16(CHAP_VALID_COOKIE)) { + *chap_entry = NULL; + } else { + rval = QLA_SUCCESS; + } + +exit_get_chap: + return rval; +} + +/** + * qla4xxx_find_free_chap_index - Find the first free chap index + * @ha: pointer to adapter structure + * @chap_index: CHAP index to be returned + * + * Find the first free chap index available in the chap table + * + * Note: Caller should acquire the chap lock before getting here. + **/ +static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, + uint16_t *chap_index) +{ + int i, rval; + int free_index = -1; + int max_chap_entries = 0; + struct ql4_chap_table *chap_table; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); + rval = QLA_ERROR; + goto exit_find_chap; + } + + for (i = 0; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + + if ((chap_table->cookie != + cpu_to_le16(CHAP_VALID_COOKIE)) && + (i > MAX_RESRV_CHAP_IDX)) { + free_index = i; + break; + } + } + + if (free_index != -1) { + *chap_index = free_index; + rval = QLA_SUCCESS; + } else { + rval = QLA_ERROR; + } + +exit_find_chap: + return rval; +} + +static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, + uint32_t *num_entries, char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct ql4_chap_table *chap_table; + struct iscsi_chap_rec *chap_rec; + int max_chap_entries = 0; + int valid_chap_entries = 0; + int ret = 0, i; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", + __func__, *num_entries, chap_tbl_idx); + + if (!buf) { + ret = -ENOMEM; + goto exit_get_chap_list; + } + + qla4xxx_create_chap_list(ha); + + chap_rec = (struct iscsi_chap_rec *) buf; + mutex_lock(&ha->chap_sem); + for (i = chap_tbl_idx; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + if (chap_table->cookie != + cpu_to_le16(CHAP_VALID_COOKIE)) + continue; + + chap_rec->chap_tbl_idx = i; + strscpy(chap_rec->username, chap_table->name, + ISCSI_CHAP_AUTH_NAME_MAX_LEN); + strscpy(chap_rec->password, chap_table->secret, + QL4_CHAP_MAX_SECRET_LEN); + chap_rec->password_length = chap_table->secret_len; + + if (chap_table->flags & BIT_7) /* local */ + chap_rec->chap_type = CHAP_TYPE_OUT; + + if (chap_table->flags & BIT_6) /* peer */ + chap_rec->chap_type = CHAP_TYPE_IN; + + chap_rec++; + + valid_chap_entries++; + if (valid_chap_entries == *num_entries) + break; + } + mutex_unlock(&ha->chap_sem); + +exit_get_chap_list: + ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", + __func__, valid_chap_entries); + *num_entries = valid_chap_entries; + return ret; +} + +static int __qla4xxx_is_chap_active(struct device *dev, void *data) +{ + int ret = 0; + uint16_t *chap_tbl_idx = (uint16_t *) data; + struct iscsi_cls_session *cls_session; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + + if (!iscsi_is_session_dev(dev)) + goto exit_is_chap_active; + + cls_session = iscsi_dev_to_session(dev); + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + + if (iscsi_is_session_online(cls_session)) + goto exit_is_chap_active; + + if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) + ret = 1; + +exit_is_chap_active: + return ret; +} + +static int qla4xxx_is_chap_active(struct Scsi_Host *shost, + uint16_t chap_tbl_idx) +{ + int ret = 0; + + ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, + __qla4xxx_is_chap_active); + + return ret; +} + +static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct ql4_chap_table *chap_table; + dma_addr_t chap_dma; + int max_chap_entries = 0; + uint32_t offset = 0; + uint32_t chap_size; + int ret = 0; + + chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); + if (chap_table == NULL) + return -ENOMEM; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (chap_tbl_idx > max_chap_entries) { + ret = -EINVAL; + goto exit_delete_chap; + } + + /* Check if chap index is in use. + * If chap is in use don't delet chap entry */ + ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); + if (ret) { + ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " + "delete from flash\n", chap_tbl_idx); + ret = -EBUSY; + goto exit_delete_chap; + } + + chap_size = sizeof(struct ql4_chap_table); + if (is_qla40XX(ha)) + offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); + else { + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + /* flt_chap_size is CHAP table size for both ports + * so divide it by 2 to calculate the offset for second port + */ + if (ha->port_num == 1) + offset += (ha->hw.flt_chap_size / 2); + offset += (chap_tbl_idx * chap_size); + } + + ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); + if (ret != QLA_SUCCESS) { + ret = -EINVAL; + goto exit_delete_chap; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", + __le16_to_cpu(chap_table->cookie))); + + if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { + ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); + goto exit_delete_chap; + } + + chap_table->cookie = cpu_to_le16(0xFFFF); + + offset = FLASH_CHAP_OFFSET | + (chap_tbl_idx * sizeof(struct ql4_chap_table)); + ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, + FLASH_OPT_RMW_COMMIT); + if (ret == QLA_SUCCESS && ha->chap_list) { + mutex_lock(&ha->chap_sem); + /* Update ha chap_list cache */ + memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, + chap_table, sizeof(struct ql4_chap_table)); + mutex_unlock(&ha->chap_sem); + } + if (ret != QLA_SUCCESS) + ret = -EINVAL; + +exit_delete_chap: + dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); + return ret; +} + +/** + * qla4xxx_set_chap_entry - Make chap entry with given information + * @shost: pointer to host + * @data: chap info - credentials, index and type to make chap entry + * @len: length of data + * + * Add or update chap entry with the given information + **/ +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_chap_rec chap_rec; + struct ql4_chap_table *chap_entry = NULL; + struct iscsi_param_info *param_info; + struct nlattr *attr; + int max_chap_entries = 0; + int type; + int rem = len; + int rc = 0; + int size; + + memset(&chap_rec, 0, sizeof(chap_rec)); + + nla_for_each_attr(attr, data, len, rem) { + if (nla_len(attr) < sizeof(*param_info)) { + rc = -EINVAL; + goto exit_set_chap; + } + + param_info = nla_data(attr); + + switch (param_info->param) { + case ISCSI_CHAP_PARAM_INDEX: + chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; + break; + case ISCSI_CHAP_PARAM_CHAP_TYPE: + chap_rec.chap_type = param_info->value[0]; + break; + case ISCSI_CHAP_PARAM_USERNAME: + size = min_t(size_t, sizeof(chap_rec.username), + param_info->len); + memcpy(chap_rec.username, param_info->value, size); + break; + case ISCSI_CHAP_PARAM_PASSWORD: + size = min_t(size_t, sizeof(chap_rec.password), + param_info->len); + memcpy(chap_rec.password, param_info->value, size); + break; + case ISCSI_CHAP_PARAM_PASSWORD_LEN: + chap_rec.password_length = param_info->value[0]; + break; + default: + ql4_printk(KERN_ERR, ha, + "%s: No such sysfs attribute\n", __func__); + rc = -ENOSYS; + goto exit_set_chap; + } + } + + if (chap_rec.chap_type == CHAP_TYPE_IN) + type = BIDI_CHAP; + else + type = LOCAL_CHAP; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + mutex_lock(&ha->chap_sem); + if (chap_rec.chap_tbl_idx < max_chap_entries) { + rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, + &chap_entry); + if (!rc) { + if (!(type == qla4xxx_get_chap_type(chap_entry))) { + ql4_printk(KERN_INFO, ha, + "Type mismatch for CHAP entry %d\n", + chap_rec.chap_tbl_idx); + rc = -EINVAL; + goto exit_unlock_chap; + } + + /* If chap index is in use then don't modify it */ + rc = qla4xxx_is_chap_active(shost, + chap_rec.chap_tbl_idx); + if (rc) { + ql4_printk(KERN_INFO, ha, + "CHAP entry %d is in use\n", + chap_rec.chap_tbl_idx); + rc = -EBUSY; + goto exit_unlock_chap; + } + } + } else { + rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); + if (rc) { + ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); + rc = -EBUSY; + goto exit_unlock_chap; + } + } + + rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, + chap_rec.chap_tbl_idx, type); + +exit_unlock_chap: + mutex_unlock(&ha->chap_sem); + +exit_set_chap: + return rc; +} + + +static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_offload_host_stats *host_stats = NULL; + int host_stats_size; + int ret = 0; + int ddb_idx = 0; + struct ql_iscsi_stats *ql_iscsi_stats = NULL; + int stats_size; + dma_addr_t iscsi_stats_dma; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); + + host_stats_size = sizeof(struct iscsi_offload_host_stats); + + if (host_stats_size != len) { + ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", + __func__, len, host_stats_size); + ret = -EINVAL; + goto exit_host_stats; + } + host_stats = (struct iscsi_offload_host_stats *)buf; + + if (!buf) { + ret = -ENOMEM; + goto exit_host_stats; + } + + stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); + + ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, + &iscsi_stats_dma, GFP_KERNEL); + if (!ql_iscsi_stats) { + ql4_printk(KERN_ERR, ha, + "Unable to allocate memory for iscsi stats\n"); + ret = -ENOMEM; + goto exit_host_stats; + } + + ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, + iscsi_stats_dma); + if (ret != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, + "Unable to retrieve iscsi stats\n"); + ret = -EIO; + goto exit_host_stats; + } + host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); + host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); + host_stats->mactx_multicast_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); + host_stats->mactx_broadcast_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); + host_stats->mactx_pause_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); + host_stats->mactx_control_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); + host_stats->mactx_deferral = + le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); + host_stats->mactx_excess_deferral = + le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); + host_stats->mactx_late_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); + host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); + host_stats->mactx_single_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); + host_stats->mactx_multiple_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); + host_stats->mactx_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_collision); + host_stats->mactx_frames_dropped = + le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); + host_stats->mactx_jumbo_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); + host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); + host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); + host_stats->macrx_unknown_control_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); + host_stats->macrx_pause_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); + host_stats->macrx_control_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); + host_stats->macrx_dribble = + le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); + host_stats->macrx_frame_length_error = + le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); + host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); + host_stats->macrx_carrier_sense_error = + le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); + host_stats->macrx_frame_discarded = + le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); + host_stats->macrx_frames_dropped = + le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); + host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); + host_stats->mac_encoding_error = + le64_to_cpu(ql_iscsi_stats->mac_encoding_error); + host_stats->macrx_length_error_large = + le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); + host_stats->macrx_length_error_small = + le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); + host_stats->macrx_multicast_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); + host_stats->macrx_broadcast_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); + host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); + host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); + host_stats->iptx_fragments = + le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); + host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); + host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); + host_stats->iprx_fragments = + le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); + host_stats->ip_datagram_reassembly = + le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); + host_stats->ip_invalid_address_error = + le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); + host_stats->ip_error_packets = + le64_to_cpu(ql_iscsi_stats->ip_error_packets); + host_stats->ip_fragrx_overlap = + le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); + host_stats->ip_fragrx_outoforder = + le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); + host_stats->ip_datagram_reassembly_timeout = + le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); + host_stats->ipv6tx_packets = + le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); + host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); + host_stats->ipv6tx_fragments = + le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); + host_stats->ipv6rx_packets = + le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); + host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); + host_stats->ipv6rx_fragments = + le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); + host_stats->ipv6_datagram_reassembly = + le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); + host_stats->ipv6_invalid_address_error = + le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); + host_stats->ipv6_error_packets = + le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); + host_stats->ipv6_fragrx_overlap = + le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); + host_stats->ipv6_fragrx_outoforder = + le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); + host_stats->ipv6_datagram_reassembly_timeout = + le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); + host_stats->tcptx_segments = + le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); + host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); + host_stats->tcprx_segments = + le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); + host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); + host_stats->tcp_duplicate_ack_retx = + le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); + host_stats->tcp_retx_timer_expired = + le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); + host_stats->tcprx_duplicate_ack = + le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); + host_stats->tcprx_pure_ackr = + le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); + host_stats->tcptx_delayed_ack = + le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); + host_stats->tcptx_pure_ack = + le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); + host_stats->tcprx_segment_error = + le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); + host_stats->tcprx_segment_outoforder = + le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); + host_stats->tcprx_window_probe = + le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); + host_stats->tcprx_window_update = + le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); + host_stats->tcptx_window_probe_persist = + le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); + host_stats->ecc_error_correction = + le64_to_cpu(ql_iscsi_stats->ecc_error_correction); + host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); + host_stats->iscsi_data_bytes_tx = + le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); + host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); + host_stats->iscsi_data_bytes_rx = + le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); + host_stats->iscsi_io_completed = + le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); + host_stats->iscsi_unexpected_io_rx = + le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); + host_stats->iscsi_format_error = + le64_to_cpu(ql_iscsi_stats->iscsi_format_error); + host_stats->iscsi_hdr_digest_error = + le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); + host_stats->iscsi_data_digest_error = + le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); + host_stats->iscsi_sequence_error = + le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); +exit_host_stats: + if (ql_iscsi_stats) + dma_free_coherent(&ha->pdev->dev, stats_size, + ql_iscsi_stats, iscsi_stats_dma); + + ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", + __func__); + return ret; +} + +static int qla4xxx_get_iface_param(struct iscsi_iface *iface, + enum iscsi_param_type param_type, + int param, char *buf) +{ + struct Scsi_Host *shost = iscsi_iface_to_shost(iface); + struct scsi_qla_host *ha = to_qla_host(shost); + int ival; + char *pval = NULL; + int len = -ENOSYS; + + if (param_type == ISCSI_NET_PARAM) { + switch (param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); + break; + case ISCSI_NET_PARAM_IPV4_SUBNET: + len = sprintf(buf, "%pI4\n", + &ha->ip_config.subnet_mask); + break; + case ISCSI_NET_PARAM_IPV4_GW: + len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_IPV4_PROTOCOL_ENABLE, pval); + } else { + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); + } + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + len = sprintf(buf, "%s\n", + (ha->ip_config.tcp_options & + TCPOPT_DHCP_ENABLE) ? + "dhcp" : "static"); + break; + case ISCSI_NET_PARAM_IPV6_ADDR: + if (iface->iface_num == 0) + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_addr0); + if (iface->iface_num == 1) + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_addr1); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL: + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_link_local_addr); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER: + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_default_router_addr); + break; + case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: + pval = (ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? + "nd" : "static"; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: + pval = (ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? + "auto" : "static"; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_VLAN_ID: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + ival = ha->ip_config.ipv4_vlan_tag & + ISCSI_MAX_VLAN_ID; + else + ival = ha->ip_config.ipv6_vlan_tag & + ISCSI_MAX_VLAN_ID; + + len = sprintf(buf, "%d\n", ival); + break; + case ISCSI_NET_PARAM_VLAN_PRIORITY: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + ival = (ha->ip_config.ipv4_vlan_tag >> 13) & + ISCSI_MAX_VLAN_PRIORITY; + else + ival = (ha->ip_config.ipv6_vlan_tag >> 13) & + ISCSI_MAX_VLAN_PRIORITY; + + len = sprintf(buf, "%d\n", ival); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_VLAN_TAGGING_ENABLE, pval); + } else { + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_VLAN_TAGGING_ENABLE, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_MTU: + len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); + break; + case ISCSI_NET_PARAM_PORT: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + ha->ip_config.ipv4_port); + else + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_port); + break; + case ISCSI_NET_PARAM_IPADDR_STATE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv4_addr_state); + } else { + if (iface->iface_num == 0) + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv6_addr0_state); + else if (iface->iface_num == 1) + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv6_addr1_state); + } + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv6_link_local_state); + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: + pval = iscsi_get_router_state_name( + ha->ip_config.ipv6_default_router_state); + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(~ha->ip_config.tcp_options, + TCPOPT_DELAYED_ACK_DISABLE, pval); + } else { + OP_STATE(~ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(~ha->ip_config.tcp_options, + TCPOPT_NAGLE_ALGO_DISABLE, pval); + } else { + OP_STATE(~ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(~ha->ip_config.tcp_options, + TCPOPT_WINDOW_SCALE_DISABLE, pval); + } else { + OP_STATE(~ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_WINDOW_SCALE_DISABLE, + pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_TCP_WSF: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + ha->ip_config.tcp_wsf); + else + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_tcp_wsf); + break; + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + ival = (ha->ip_config.tcp_options & + TCPOPT_TIMER_SCALE) >> 1; + else + ival = (ha->ip_config.ipv6_tcp_options & + IPV6_TCPOPT_TIMER_SCALE) >> 1; + + len = sprintf(buf, "%d\n", ival); + break; + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.tcp_options, + TCPOPT_TIMESTAMP_ENABLE, pval); + } else { + OP_STATE(ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_TIMESTAMP_EN, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_CACHE_ID: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + ha->ip_config.ipv4_cache_id); + else + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_cache_id); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: + OP_STATE(ha->ip_config.tcp_options, + TCPOPT_DNS_SERVER_IP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: + OP_STATE(ha->ip_config.tcp_options, + TCPOPT_SLP_DA_INFO_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_TOS_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_IPV4_TOS_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_TOS: + len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); + break; + case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_GRAT_ARP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: + OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, + pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: + pval = (ha->ip_config.ipv4_alt_cid_len) ? + (char *)ha->ip_config.ipv4_alt_cid : ""; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_REQ_VID_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_USE_VID_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: + pval = (ha->ip_config.ipv4_vid_len) ? + (char *)ha->ip_config.ipv4_vid : ""; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_LEARN_IQN_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: + OP_STATE(~ha->ip_config.ipv4_options, + IPOPT_FRAGMENTATION_DISABLE, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_IN_FORWARD_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_REDIRECT_EN: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_ARP_REDIRECT_EN, pval); + } else { + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_REDIRECT_EN, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_TTL: + len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); + break; + case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_MLD_EN: + OP_STATE(ha->ip_config.ipv6_addl_options, + IPV6_ADDOPT_MLD_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: + len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); + break; + case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_traffic_class); + break; + case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_hop_limit); + break; + case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_nd_reach_time); + break; + case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_nd_rexmit_timer); + break; + case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_nd_stale_timeout); + break; + case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_dup_addr_detect_count); + break; + case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_gw_advrt_mtu); + break; + default: + len = -ENOSYS; + } + } else if (param_type == ISCSI_IFACE_PARAM) { + switch (param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); + break; + case ISCSI_IFACE_PARAM_HDRDGST_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_HEADER_DIGEST_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DATADGST_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DATA_DIGEST_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_IMMEDIATE_DATA_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_INITIAL_R2T_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DATA_PDU_INORDER_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_ERL: + len = sprintf(buf, "%d\n", + (ha->ip_config.iscsi_options & + ISCSIOPTS_ERL)); + break; + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + len = sprintf(buf, "%u\n", + ha->ip_config.iscsi_max_pdu_size * + BYTE_UNITS); + break; + case ISCSI_IFACE_PARAM_FIRST_BURST: + len = sprintf(buf, "%u\n", + ha->ip_config.iscsi_first_burst_len * + BYTE_UNITS); + break; + case ISCSI_IFACE_PARAM_MAX_R2T: + len = sprintf(buf, "%d\n", + ha->ip_config.iscsi_max_outstnd_r2t); + break; + case ISCSI_IFACE_PARAM_MAX_BURST: + len = sprintf(buf, "%u\n", + ha->ip_config.iscsi_max_burst_len * + BYTE_UNITS); + break; + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_CHAP_AUTH_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_BIDI_CHAP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DISCOVERY_AUTH_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_INITIATOR_NAME: + len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); + break; + default: + len = -ENOSYS; + } + } + + return len; +} + +static struct iscsi_endpoint * +qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) +{ + int ret; + struct iscsi_endpoint *ep; + struct qla_endpoint *qla_ep; + struct scsi_qla_host *ha; + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + + if (!shost) { + ret = -ENXIO; + pr_err("%s: shost is NULL\n", __func__); + return ERR_PTR(ret); + } + + ha = iscsi_host_priv(shost); + ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); + if (!ep) { + ret = -ENOMEM; + return ERR_PTR(ret); + } + + qla_ep = ep->dd_data; + memset(qla_ep, 0, sizeof(struct qla_endpoint)); + if (dst_addr->sa_family == AF_INET) { + memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); + addr = (struct sockaddr_in *)&qla_ep->dst_addr; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, + (char *)&addr->sin_addr)); + } else if (dst_addr->sa_family == AF_INET6) { + memcpy(&qla_ep->dst_addr, dst_addr, + sizeof(struct sockaddr_in6)); + addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, + (char *)&addr6->sin6_addr)); + } else { + ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", + __func__); + } + + qla_ep->host = shost; + + return ep; +} + +static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct qla_endpoint *qla_ep; + struct scsi_qla_host *ha; + int ret = 0; + + qla_ep = ep->dd_data; + ha = to_qla_host(qla_ep->host); + DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); + + if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) + ret = 1; + + return ret; +} + +static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct qla_endpoint *qla_ep; + struct scsi_qla_host *ha; + + qla_ep = ep->dd_data; + ha = to_qla_host(qla_ep->host); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); + iscsi_destroy_endpoint(ep); +} + +static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, + char *buf) +{ + struct qla_endpoint *qla_ep = ep->dd_data; + struct sockaddr *dst_addr; + struct scsi_qla_host *ha; + + if (!qla_ep) + return -ENOTCONN; + + ha = to_qla_host(qla_ep->host); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + dst_addr = (struct sockaddr *)&qla_ep->dst_addr; + if (!dst_addr) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &qla_ep->dst_addr, param, buf); + default: + return -ENOSYS; + } +} + +static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_session *sess; + struct iscsi_cls_session *cls_sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + struct ql_iscsi_stats *ql_iscsi_stats; + int stats_size; + int ret; + dma_addr_t iscsi_stats_dma; + + cls_sess = iscsi_conn_to_session(cls_conn); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); + stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); + /* Allocate memory */ + ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, + &iscsi_stats_dma, GFP_KERNEL); + if (!ql_iscsi_stats) { + ql4_printk(KERN_ERR, ha, + "Unable to allocate memory for iscsi stats\n"); + goto exit_get_stats; + } + + ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, + iscsi_stats_dma); + if (ret != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, + "Unable to retrieve iscsi stats\n"); + goto free_stats; + } + + /* octets */ + stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); + stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); + /* xmit pdus */ + stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); + stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); + stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); + stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); + stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); + stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); + stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); + stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); + /* recv pdus */ + stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); + stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); + stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); + stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); + stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); + stats->logoutrsp_pdus = + le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); + stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); + stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); + stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); + +free_stats: + dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, + iscsi_stats_dma); +exit_get_stats: + return; +} + +static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *session; + unsigned long flags; + enum scsi_timeout_action ret = SCSI_EH_NOT_HANDLED; + + session = starget_to_session(scsi_target(sc->device)); + + spin_lock_irqsave(&session->lock, flags); + if (session->state == ISCSI_SESSION_FAILED) + ret = SCSI_EH_RESET_TIMER; + spin_unlock_irqrestore(&session->lock, flags); + + return ret; +} + +static void qla4xxx_set_port_speed(struct Scsi_Host *shost) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_cls_host *ihost = shost->shost_data; + uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; + + qla4xxx_get_firmware_state(ha); + + switch (ha->addl_fw_state & 0x0F00) { + case FW_ADDSTATE_LINK_SPEED_10MBPS: + speed = ISCSI_PORT_SPEED_10MBPS; + break; + case FW_ADDSTATE_LINK_SPEED_100MBPS: + speed = ISCSI_PORT_SPEED_100MBPS; + break; + case FW_ADDSTATE_LINK_SPEED_1GBPS: + speed = ISCSI_PORT_SPEED_1GBPS; + break; + case FW_ADDSTATE_LINK_SPEED_10GBPS: + speed = ISCSI_PORT_SPEED_10GBPS; + break; + } + ihost->port_speed = speed; +} + +static void qla4xxx_set_port_state(struct Scsi_Host *shost) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_cls_host *ihost = shost->shost_data; + uint32_t state = ISCSI_PORT_STATE_DOWN; + + if (test_bit(AF_LINK_UP, &ha->flags)) + state = ISCSI_PORT_STATE_UP; + + ihost->port_state = state; +} + +static int qla4xxx_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + int len; + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); + break; + case ISCSI_HOST_PARAM_IPADDRESS: + len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); + break; + case ISCSI_HOST_PARAM_INITIATOR_NAME: + len = sprintf(buf, "%s\n", ha->name_string); + break; + case ISCSI_HOST_PARAM_PORT_STATE: + qla4xxx_set_port_state(shost); + len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); + break; + case ISCSI_HOST_PARAM_PORT_SPEED: + qla4xxx_set_port_speed(shost); + len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); + break; + default: + return -ENOSYS; + } + + return len; +} + +static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) +{ + if (ha->iface_ipv4) + return; + + /* IPv4 */ + ha->iface_ipv4 = iscsi_create_iface(ha->host, + &qla4xxx_iscsi_transport, + ISCSI_IFACE_TYPE_IPV4, 0, 0); + if (!ha->iface_ipv4) + ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " + "iface0.\n"); +} + +static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) +{ + if (!ha->iface_ipv6_0) + /* IPv6 iface-0 */ + ha->iface_ipv6_0 = iscsi_create_iface(ha->host, + &qla4xxx_iscsi_transport, + ISCSI_IFACE_TYPE_IPV6, 0, + 0); + if (!ha->iface_ipv6_0) + ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " + "iface0.\n"); + + if (!ha->iface_ipv6_1) + /* IPv6 iface-1 */ + ha->iface_ipv6_1 = iscsi_create_iface(ha->host, + &qla4xxx_iscsi_transport, + ISCSI_IFACE_TYPE_IPV6, 1, + 0); + if (!ha->iface_ipv6_1) + ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " + "iface1.\n"); +} + +static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) +{ + if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) + qla4xxx_create_ipv4_iface(ha); + + if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) + qla4xxx_create_ipv6_iface(ha); +} + +static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) +{ + if (ha->iface_ipv4) { + iscsi_destroy_iface(ha->iface_ipv4); + ha->iface_ipv4 = NULL; + } +} + +static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) +{ + if (ha->iface_ipv6_0) { + iscsi_destroy_iface(ha->iface_ipv6_0); + ha->iface_ipv6_0 = NULL; + } + if (ha->iface_ipv6_1) { + iscsi_destroy_iface(ha->iface_ipv6_1); + ha->iface_ipv6_1 = NULL; + } +} + +static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) +{ + qla4xxx_destroy_ipv4_iface(ha); + qla4xxx_destroy_ipv6_iface(ha); +} + +static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, + struct iscsi_iface_param_info *iface_param, + struct addr_ctrl_blk *init_fw_cb) +{ + /* + * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. + * iface_num 1 is valid only for IPv6 Addr. + */ + switch (iface_param->param) { + case ISCSI_NET_PARAM_IPV6_ADDR: + if (iface_param->iface_num & 0x1) + /* IPv6 Addr 1 */ + memcpy(init_fw_cb->ipv6_addr1, iface_param->value, + sizeof(init_fw_cb->ipv6_addr1)); + else + /* IPv6 Addr 0 */ + memcpy(init_fw_cb->ipv6_addr0, iface_param->value, + sizeof(init_fw_cb->ipv6_addr0)); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], + sizeof(init_fw_cb->ipv6_if_id)); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, + sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); + break; + case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) + init_fw_cb->ipv6_addtl_opts &= + cpu_to_le16( + ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); + else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) + init_fw_cb->ipv6_addtl_opts |= + cpu_to_le16( + IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); + else + ql4_printk(KERN_ERR, ha, + "Invalid autocfg setting for IPv6 addr\n"); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + if (iface_param->value[0] == + ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) + init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( + IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); + else if (iface_param->value[0] == + ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) + init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( + ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); + else + ql4_printk(KERN_ERR, ha, + "Invalid autocfg setting for IPv6 linklocal addr\n"); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) + memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, + sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); + qla4xxx_create_ipv6_iface(ha); + } else { + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & + 0xFFFF); + qla4xxx_destroy_ipv6_iface(ha); + } + break; + case ISCSI_NET_PARAM_VLAN_TAG: + if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) + break; + init_fw_cb->ipv6_vlan_tag = + cpu_to_be16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + if (iface_param->value[0] == ISCSI_VLAN_ENABLE) + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); + else + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); + break; + case ISCSI_NET_PARAM_MTU: + init_fw_cb->eth_mtu_size = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_PORT: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + init_fw_cb->ipv6_port = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & + 0xFFFF); + break; + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16((iface_param->value[0] << 1) & + IPV6_TCPOPT_TIMER_SCALE); + break; + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); + break; + case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); + else + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); + break; + case ISCSI_NET_PARAM_REDIRECT_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_REDIRECT_EN); + else + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_REDIRECT_EN); + break; + case ISCSI_NET_PARAM_IPV6_MLD_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_addtl_opts |= + cpu_to_le16(IPV6_ADDOPT_MLD_EN); + else + init_fw_cb->ipv6_addtl_opts &= + cpu_to_le16(~IPV6_ADDOPT_MLD_EN); + break; + case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_flow_lbl = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_traffic_class = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_hop_limit = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_nd_reach_time = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_nd_rexmit_timer = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_nd_stale_timeout = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_gw_advrt_mtu = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; + default: + ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", + iface_param->param); + break; + } +} + +static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, + struct iscsi_iface_param_info *iface_param, + struct addr_ctrl_blk *init_fw_cb) +{ + switch (iface_param->param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + memcpy(init_fw_cb->ipv4_addr, iface_param->value, + sizeof(init_fw_cb->ipv4_addr)); + break; + case ISCSI_NET_PARAM_IPV4_SUBNET: + memcpy(init_fw_cb->ipv4_subnet, iface_param->value, + sizeof(init_fw_cb->ipv4_subnet)); + break; + case ISCSI_NET_PARAM_IPV4_GW: + memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, + sizeof(init_fw_cb->ipv4_gw_addr)); + break; + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_DHCP_ENABLE); + else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_DHCP_ENABLE); + else + ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); + qla4xxx_create_ipv4_iface(ha); + } else { + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & + 0xFFFF); + qla4xxx_destroy_ipv4_iface(ha); + } + break; + case ISCSI_NET_PARAM_VLAN_TAG: + if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) + break; + init_fw_cb->ipv4_vlan_tag = + cpu_to_be16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + if (iface_param->value[0] == ISCSI_VLAN_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); + break; + case ISCSI_NET_PARAM_MTU: + init_fw_cb->eth_mtu_size = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_PORT: + init_fw_cb->ipv4_port = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & + 0xFFFF); + break; + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16((iface_param->value[0] << 1) & + TCPOPT_TIMER_SCALE); + break; + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); + break; + case ISCSI_NET_PARAM_IPV4_TOS_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_IPV4_TOS_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_IPV4_TOS_EN); + break; + case ISCSI_NET_PARAM_IPV4_TOS: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_tos = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_GRAT_ARP_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_GRAT_ARP_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_ALT_CID_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_ALT_CID_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, + (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); + init_fw_cb->ipv4_dhcp_alt_cid_len = + strlen(init_fw_cb->ipv4_dhcp_alt_cid); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_REQ_VID_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_REQ_VID_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_USE_VID_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_USE_VID_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, + (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); + init_fw_cb->ipv4_dhcp_vid_len = + strlen(init_fw_cb->ipv4_dhcp_vid); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_LEARN_IQN_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_LEARN_IQN_EN); + break; + case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); + break; + case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_IN_FORWARD_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_IN_FORWARD_EN); + break; + case ISCSI_NET_PARAM_REDIRECT_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_ARP_REDIRECT_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); + break; + case ISCSI_NET_PARAM_IPV4_TTL: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_ttl = iface_param->value[0]; + break; + default: + ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", + iface_param->param); + break; + } +} + +static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, + struct iscsi_iface_param_info *iface_param, + struct addr_ctrl_blk *init_fw_cb) +{ + switch (iface_param->param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->def_timeout = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_IFACE_PARAM_HDRDGST_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); + break; + case ISCSI_IFACE_PARAM_DATADGST_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); + break; + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); + break; + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); + break; + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); + break; + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); + break; + case ISCSI_IFACE_PARAM_ERL: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); + init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & + ISCSIOPTS_ERL); + break; + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_max_pdu_size = + cpu_to_le32(*(uint32_t *)iface_param->value) / + BYTE_UNITS; + break; + case ISCSI_IFACE_PARAM_FIRST_BURST: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_fburst_len = + cpu_to_le32(*(uint32_t *)iface_param->value) / + BYTE_UNITS; + break; + case ISCSI_IFACE_PARAM_MAX_R2T: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_max_outstnd_r2t = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_IFACE_PARAM_MAX_BURST: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_max_burst_len = + cpu_to_le32(*(uint32_t *)iface_param->value) / + BYTE_UNITS; + break; + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); + break; + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); + break; + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); + break; + default: + ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", + iface_param->param); + break; + } +} + +static void +qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) +{ + struct addr_ctrl_blk_def *acb; + acb = (struct addr_ctrl_blk_def *)init_fw_cb; + memset(acb->reserved1, 0, sizeof(acb->reserved1)); + memset(acb->reserved2, 0, sizeof(acb->reserved2)); + memset(acb->reserved3, 0, sizeof(acb->reserved3)); + memset(acb->reserved4, 0, sizeof(acb->reserved4)); + memset(acb->reserved5, 0, sizeof(acb->reserved5)); + memset(acb->reserved6, 0, sizeof(acb->reserved6)); + memset(acb->reserved7, 0, sizeof(acb->reserved7)); + memset(acb->reserved8, 0, sizeof(acb->reserved8)); + memset(acb->reserved9, 0, sizeof(acb->reserved9)); + memset(acb->reserved10, 0, sizeof(acb->reserved10)); + memset(acb->reserved11, 0, sizeof(acb->reserved11)); + memset(acb->reserved12, 0, sizeof(acb->reserved12)); + memset(acb->reserved13, 0, sizeof(acb->reserved13)); + memset(acb->reserved14, 0, sizeof(acb->reserved14)); + memset(acb->reserved15, 0, sizeof(acb->reserved15)); +} + +static int +qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + int rval = 0; + struct iscsi_iface_param_info *iface_param = NULL; + struct addr_ctrl_blk *init_fw_cb = NULL; + dma_addr_t init_fw_cb_dma; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + uint32_t rem = len; + struct nlattr *attr; + + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &init_fw_cb_dma, GFP_KERNEL); + if (!init_fw_cb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", + __func__); + return -ENOMEM; + } + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { + ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); + rval = -EIO; + goto exit_init_fw_cb; + } + + nla_for_each_attr(attr, data, len, rem) { + if (nla_len(attr) < sizeof(*iface_param)) { + rval = -EINVAL; + goto exit_init_fw_cb; + } + + iface_param = nla_data(attr); + + if (iface_param->param_type == ISCSI_NET_PARAM) { + switch (iface_param->iface_type) { + case ISCSI_IFACE_TYPE_IPV4: + switch (iface_param->iface_num) { + case 0: + qla4xxx_set_ipv4(ha, iface_param, + init_fw_cb); + break; + default: + /* Cannot have more than one IPv4 interface */ + ql4_printk(KERN_ERR, ha, + "Invalid IPv4 iface number = %d\n", + iface_param->iface_num); + break; + } + break; + case ISCSI_IFACE_TYPE_IPV6: + switch (iface_param->iface_num) { + case 0: + case 1: + qla4xxx_set_ipv6(ha, iface_param, + init_fw_cb); + break; + default: + /* Cannot have more than two IPv6 interface */ + ql4_printk(KERN_ERR, ha, + "Invalid IPv6 iface number = %d\n", + iface_param->iface_num); + break; + } + break; + default: + ql4_printk(KERN_ERR, ha, + "Invalid iface type\n"); + break; + } + } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { + qla4xxx_set_iscsi_param(ha, iface_param, + init_fw_cb); + } else { + continue; + } + } + + init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); + + rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, + sizeof(struct addr_ctrl_blk), + FLASH_OPT_RMW_COMMIT); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", + __func__); + rval = -EIO; + goto exit_init_fw_cb; + } + + rval = qla4xxx_disable_acb(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", + __func__); + rval = -EIO; + goto exit_init_fw_cb; + } + + wait_for_completion_timeout(&ha->disable_acb_comp, + DISABLE_ACB_TOV * HZ); + + qla4xxx_initcb_to_acb(init_fw_cb); + + rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", + __func__); + rval = -EIO; + goto exit_init_fw_cb; + } + + memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); + qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, + init_fw_cb_dma); + +exit_init_fw_cb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), + init_fw_cb, init_fw_cb_dma); + + return rval; +} + +static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, + enum iscsi_param param, char *buf) +{ + struct iscsi_session *sess = cls_sess->dd_data; + struct ddb_entry *ddb_entry = sess->dd_data; + struct scsi_qla_host *ha = ddb_entry->ha; + struct iscsi_cls_conn *cls_conn = ddb_entry->conn; + struct ql4_chap_table chap_tbl; + int rval, len; + uint16_t idx; + + memset(&chap_tbl, 0, sizeof(chap_tbl)); + switch (param) { + case ISCSI_PARAM_CHAP_IN_IDX: + rval = qla4xxx_get_chap_index(ha, sess->username_in, + sess->password_in, BIDI_CHAP, + &idx); + if (rval) + len = sprintf(buf, "\n"); + else + len = sprintf(buf, "%hu\n", idx); + break; + case ISCSI_PARAM_CHAP_OUT_IDX: + if (ddb_entry->ddb_type == FLASH_DDB) { + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { + idx = ddb_entry->chap_tbl_idx; + rval = QLA_SUCCESS; + } else { + rval = QLA_ERROR; + } + } else { + rval = qla4xxx_get_chap_index(ha, sess->username, + sess->password, + LOCAL_CHAP, &idx); + } + if (rval) + len = sprintf(buf, "\n"); + else + len = sprintf(buf, "%hu\n", idx); + break; + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + /* First, populate session username and password for FLASH DDB, + * if not already done. This happens when session login fails + * for a FLASH DDB. + */ + if (ddb_entry->ddb_type == FLASH_DDB && + ddb_entry->chap_tbl_idx != INVALID_ENTRY && + !sess->username && !sess->password) { + idx = ddb_entry->chap_tbl_idx; + rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, + chap_tbl.secret, + idx); + if (!rval) { + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, + (char *)chap_tbl.name, + strlen((char *)chap_tbl.name)); + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, + (char *)chap_tbl.secret, + chap_tbl.secret_len); + } + } + fallthrough; + default: + return iscsi_session_get_param(cls_sess, param, buf); + } + + return len; +} + +static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf) +{ + struct iscsi_conn *conn; + struct qla_conn *qla_conn; + struct sockaddr *dst_addr; + + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + dst_addr, param, buf); + default: + return iscsi_conn_get_param(cls_conn, param, buf); + } +} + +int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) +{ + uint32_t mbx_sts = 0; + uint16_t tmp_ddb_index; + int ret; + +get_ddb_index: + tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); + + if (tmp_ddb_index >= MAX_DDB_ENTRIES) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Free DDB index not available\n")); + ret = QLA_ERROR; + goto exit_get_ddb_index; + } + + if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) + goto get_ddb_index; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Found a free DDB index at %d\n", tmp_ddb_index)); + ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); + if (ret == QLA_ERROR) { + if (mbx_sts == MBOX_STS_COMMAND_ERROR) { + ql4_printk(KERN_INFO, ha, + "DDB index = %d not available trying next\n", + tmp_ddb_index); + goto get_ddb_index; + } + DEBUG2(ql4_printk(KERN_INFO, ha, + "Free FW DDB not available\n")); + } + + *ddb_index = tmp_ddb_index; + +exit_get_ddb_index: + return ret; +} + +static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + char *existing_ipaddr, + char *user_ipaddr) +{ + uint8_t dst_ipaddr[IPv6_ADDR_LEN]; + char formatted_ipaddr[DDB_IPADDR_LEN]; + int status = QLA_SUCCESS, ret = 0; + + if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { + ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, + '\0', NULL); + if (ret == 0) { + status = QLA_ERROR; + goto out_match; + } + ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); + } else { + ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, + '\0', NULL); + if (ret == 0) { + status = QLA_ERROR; + goto out_match; + } + ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); + } + + if (strcmp(existing_ipaddr, formatted_ipaddr)) + status = QLA_ERROR; + +out_match: + return status; +} + +static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, + struct iscsi_cls_conn *cls_conn) +{ + int idx = 0, max_ddbs, rval; + struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); + struct iscsi_session *sess, *existing_sess; + struct iscsi_conn *conn, *existing_conn; + struct ddb_entry *ddb_entry; + + sess = cls_sess->dd_data; + conn = cls_conn->dd_data; + + if (sess->targetname == NULL || + conn->persistent_address == NULL || + conn->persistent_port == 0) + return QLA_ERROR; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + for (idx = 0; idx < max_ddbs; idx++) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); + if (ddb_entry == NULL) + continue; + + if (ddb_entry->ddb_type != FLASH_DDB) + continue; + + existing_sess = ddb_entry->sess->dd_data; + existing_conn = ddb_entry->conn->dd_data; + + if (existing_sess->targetname == NULL || + existing_conn->persistent_address == NULL || + existing_conn->persistent_port == 0) + continue; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "IQN = %s User IQN = %s\n", + existing_sess->targetname, + sess->targetname)); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "IP = %s User IP = %s\n", + existing_conn->persistent_address, + conn->persistent_address)); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Port = %d User Port = %d\n", + existing_conn->persistent_port, + conn->persistent_port)); + + if (strcmp(existing_sess->targetname, sess->targetname)) + continue; + rval = qla4xxx_match_ipaddress(ha, ddb_entry, + existing_conn->persistent_address, + conn->persistent_address); + if (rval == QLA_ERROR) + continue; + if (existing_conn->persistent_port != conn->persistent_port) + continue; + break; + } + + if (idx == max_ddbs) + return QLA_ERROR; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Match found in fwdb sessions\n")); + return QLA_SUCCESS; +} + +static struct iscsi_cls_session * +qla4xxx_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn) +{ + struct iscsi_cls_session *cls_sess; + struct scsi_qla_host *ha; + struct qla_endpoint *qla_ep; + struct ddb_entry *ddb_entry; + uint16_t ddb_index; + struct iscsi_session *sess; + int ret; + + if (!ep) { + printk(KERN_ERR "qla4xxx: missing ep.\n"); + return NULL; + } + + qla_ep = ep->dd_data; + ha = to_qla_host(qla_ep->host); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); + + ret = qla4xxx_get_ddb_index(ha, &ddb_index); + if (ret == QLA_ERROR) + return NULL; + + cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, + cmds_max, sizeof(struct ddb_entry), + sizeof(struct ql4_task_data), + initial_cmdsn, ddb_index); + if (!cls_sess) + return NULL; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->fw_ddb_index = ddb_index; + ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; + ddb_entry->ha = ha; + ddb_entry->sess = cls_sess; + ddb_entry->unblock_sess = qla4xxx_unblock_ddb; + ddb_entry->ddb_change = qla4xxx_ddb_change; + clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); + cls_sess->recovery_tmo = ql4xsess_recovery_tmo; + ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; + ha->tot_ddbs++; + + return cls_sess; +} + +static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + unsigned long flags, wtime; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint32_t ddb_state; + int ret; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + goto destroy_session; + } + + wtime = jiffies + (HZ * LOGOUT_TOV); + do { + ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto destroy_session; + + if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || + (ddb_state == DDB_DS_SESSION_FAILED)) + goto destroy_session; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +destroy_session: + qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); + if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) + clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); + spin_lock_irqsave(&ha->hardware_lock, flags); + qla4xxx_free_ddb(ha, ddb_entry); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + iscsi_session_teardown(cls_sess); + + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); +} + +static struct iscsi_cls_conn * +qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) +{ + struct iscsi_cls_conn *cls_conn; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + + cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), + conn_idx); + if (!cls_conn) { + pr_info("%s: Can not create connection for conn_idx = %u\n", + __func__, conn_idx); + return NULL; + } + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->conn = cls_conn; + + ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, + conn_idx)); + return cls_conn; +} + +static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading) +{ + struct iscsi_conn *conn; + struct qla_conn *qla_conn; + struct iscsi_endpoint *ep; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + struct iscsi_session *sess; + + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, + cls_session->sid, cls_conn->cid)); + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) + return -EINVAL; + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + qla_conn->qla_ep = ep->dd_data; + iscsi_put_endpoint(ep); + return 0; +} + +static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint32_t mbx_sts = 0; + int ret = 0; + int status = QLA_SUCCESS; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, + cls_sess->sid, cls_conn->cid)); + + /* Check if we have matching FW DDB, if yes then do not + * login to this target. This could cause target to logout previous + * connection + */ + ret = qla4xxx_match_fwdb_session(ha, cls_conn); + if (ret == QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, + "Session already exist in FW.\n"); + ret = -EEXIST; + goto exit_conn_start; + } + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + ret = -ENOMEM; + goto exit_conn_start; + } + + ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); + if (ret) { + /* If iscsid is stopped and started then no need to do + * set param again since ddb state will be already + * active and FW does not allow set ddb to an + * active session. + */ + if (mbx_sts) + if (ddb_entry->fw_ddb_device_state == + DDB_DS_SESSION_ACTIVE) { + ddb_entry->unblock_sess(ddb_entry->sess); + goto exit_set_param; + } + + ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", + __func__, ddb_entry->fw_ddb_index); + goto exit_conn_start; + } + + status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); + if (status == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, + sess->targetname); + ret = -EINVAL; + goto exit_conn_start; + } + + if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) + ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; + + DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, + ddb_entry->fw_ddb_device_state)); + +exit_set_param: + ret = 0; + +exit_conn_start: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return ret; +} + +static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); + struct iscsi_session *sess; + struct scsi_qla_host *ha; + struct ddb_entry *ddb_entry; + int options; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, + cls_conn->cid)); + + options = LOGOUT_OPTION_CLOSE_SESSION; + if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) + ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); +} + +static void qla4xxx_task_work(struct work_struct *wdata) +{ + struct ql4_task_data *task_data; + struct scsi_qla_host *ha; + struct passthru_status *sts; + struct iscsi_task *task; + struct iscsi_hdr *hdr; + uint8_t *data; + uint32_t data_len; + struct iscsi_conn *conn; + int hdr_len; + itt_t itt; + + task_data = container_of(wdata, struct ql4_task_data, task_work); + ha = task_data->ha; + task = task_data->task; + sts = &task_data->sts; + hdr_len = sizeof(struct iscsi_hdr); + + DEBUG3(printk(KERN_INFO "Status returned\n")); + DEBUG3(qla4xxx_dump_buffer(sts, 64)); + DEBUG3(printk(KERN_INFO "Response buffer")); + DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); + + conn = task->conn; + + switch (sts->completionStatus) { + case PASSTHRU_STATUS_COMPLETE: + hdr = (struct iscsi_hdr *)task_data->resp_buffer; + /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ + itt = sts->handle; + hdr->itt = itt; + data = task_data->resp_buffer + hdr_len; + data_len = task_data->resp_len - hdr_len; + iscsi_complete_pdu(conn, hdr, data, data_len); + break; + default: + ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", + sts->completionStatus); + break; + } + return; +} + +static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) +{ + struct ql4_task_data *task_data; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + int hdr_len; + + sess = task->conn->session; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + task_data = task->dd_data; + memset(task_data, 0, sizeof(struct ql4_task_data)); + + if (task->sc) { + ql4_printk(KERN_INFO, ha, + "%s: SCSI Commands not implemented\n", __func__); + return -EINVAL; + } + + hdr_len = sizeof(struct iscsi_hdr); + task_data->ha = ha; + task_data->task = task; + + if (task->data_count) { + task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, + task->data_count, + DMA_TO_DEVICE); + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", + __func__, task->conn->max_recv_dlength, hdr_len)); + + task_data->resp_len = task->conn->max_recv_dlength + hdr_len; + task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, + task_data->resp_len, + &task_data->resp_dma, + GFP_ATOMIC); + if (!task_data->resp_buffer) + goto exit_alloc_pdu; + + task_data->req_len = task->data_count + hdr_len; + task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, + task_data->req_len, + &task_data->req_dma, + GFP_ATOMIC); + if (!task_data->req_buffer) + goto exit_alloc_pdu; + + task->hdr = task_data->req_buffer; + + INIT_WORK(&task_data->task_work, qla4xxx_task_work); + + return 0; + +exit_alloc_pdu: + if (task_data->resp_buffer) + dma_free_coherent(&ha->pdev->dev, task_data->resp_len, + task_data->resp_buffer, task_data->resp_dma); + + if (task_data->req_buffer) + dma_free_coherent(&ha->pdev->dev, task_data->req_len, + task_data->req_buffer, task_data->req_dma); + return -ENOMEM; +} + +static void qla4xxx_task_cleanup(struct iscsi_task *task) +{ + struct ql4_task_data *task_data; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + int hdr_len; + + hdr_len = sizeof(struct iscsi_hdr); + sess = task->conn->session; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + task_data = task->dd_data; + + if (task->data_count) { + dma_unmap_single(&ha->pdev->dev, task_data->data_dma, + task->data_count, DMA_TO_DEVICE); + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", + __func__, task->conn->max_recv_dlength, hdr_len)); + + dma_free_coherent(&ha->pdev->dev, task_data->resp_len, + task_data->resp_buffer, task_data->resp_dma); + dma_free_coherent(&ha->pdev->dev, task_data->req_len, + task_data->req_buffer, task_data->req_dma); + return; +} + +static int qla4xxx_task_xmit(struct iscsi_task *task) +{ + struct scsi_cmnd *sc = task->sc; + struct iscsi_session *sess = task->conn->session; + struct ddb_entry *ddb_entry = sess->dd_data; + struct scsi_qla_host *ha = ddb_entry->ha; + + if (!sc) + return qla4xxx_send_passthru0(task); + + ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", + __func__); + return -ENOSYS; +} + +static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, + struct iscsi_bus_flash_conn *conn, + struct dev_db_entry *fw_ddb_entry) +{ + unsigned long options = 0; + int rc = 0; + + options = le16_to_cpu(fw_ddb_entry->options); + conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); + if (test_bit(OPT_IPV6_DEVICE, &options)) { + rc = iscsi_switch_str_param(&sess->portal_type, + PORTAL_TYPE_IPV6); + if (rc) + goto exit_copy; + } else { + rc = iscsi_switch_str_param(&sess->portal_type, + PORTAL_TYPE_IPV4); + if (rc) + goto exit_copy; + } + + sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, + &options); + sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); + sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); + + options = le16_to_cpu(fw_ddb_entry->iscsi_options); + conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); + conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); + sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); + sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); + sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, + &options); + sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); + sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); + conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); + sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, + &options); + sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); + sess->discovery_auth_optional = + test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); + if (test_bit(ISCSIOPT_ERL1, &options)) + sess->erl |= BIT_1; + if (test_bit(ISCSIOPT_ERL0, &options)) + sess->erl |= BIT_0; + + options = le16_to_cpu(fw_ddb_entry->tcp_options); + conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); + conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); + conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); + if (test_bit(TCPOPT_TIMER_SCALE3, &options)) + conn->tcp_timer_scale |= BIT_3; + if (test_bit(TCPOPT_TIMER_SCALE2, &options)) + conn->tcp_timer_scale |= BIT_2; + if (test_bit(TCPOPT_TIMER_SCALE1, &options)) + conn->tcp_timer_scale |= BIT_1; + + conn->tcp_timer_scale >>= 1; + conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); + + options = le16_to_cpu(fw_ddb_entry->ip_options); + conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); + + conn->max_recv_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); + conn->max_xmit_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); + sess->first_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); + sess->max_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); + sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); + sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); + sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); + conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; + conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; + conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); + conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); + conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); + conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); + conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); + sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); + sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); + sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); + sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); + + sess->default_taskmgmt_timeout = + le16_to_cpu(fw_ddb_entry->def_timeout); + conn->port = le16_to_cpu(fw_ddb_entry->port); + + options = le16_to_cpu(fw_ddb_entry->options); + conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); + if (!conn->ipaddress) { + rc = -ENOMEM; + goto exit_copy; + } + + conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); + if (!conn->redirect_ipaddr) { + rc = -ENOMEM; + goto exit_copy; + } + + memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); + memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); + + if (test_bit(OPT_IPV6_DEVICE, &options)) { + conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; + + conn->link_local_ipv6_addr = kmemdup( + fw_ddb_entry->link_local_ipv6_addr, + IPv6_ADDR_LEN, GFP_KERNEL); + if (!conn->link_local_ipv6_addr) { + rc = -ENOMEM; + goto exit_copy; + } + } else { + conn->ipv4_tos = fw_ddb_entry->ipv4_tos; + } + + if (fw_ddb_entry->iscsi_name[0]) { + rc = iscsi_switch_str_param(&sess->targetname, + (char *)fw_ddb_entry->iscsi_name); + if (rc) + goto exit_copy; + } + + if (fw_ddb_entry->iscsi_alias[0]) { + rc = iscsi_switch_str_param(&sess->targetalias, + (char *)fw_ddb_entry->iscsi_alias); + if (rc) + goto exit_copy; + } + + COPY_ISID(sess->isid, fw_ddb_entry->isid); + +exit_copy: + return rc; +} + +static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, + struct iscsi_bus_flash_conn *conn, + struct dev_db_entry *fw_ddb_entry) +{ + uint16_t options; + + options = le16_to_cpu(fw_ddb_entry->options); + SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); + if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) + options |= BIT_8; + else + options &= ~BIT_8; + + SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); + SET_BITVAL(sess->discovery_sess, options, BIT_4); + SET_BITVAL(sess->entry_state, options, BIT_3); + fw_ddb_entry->options = cpu_to_le16(options); + + options = le16_to_cpu(fw_ddb_entry->iscsi_options); + SET_BITVAL(conn->hdrdgst_en, options, BIT_13); + SET_BITVAL(conn->datadgst_en, options, BIT_12); + SET_BITVAL(sess->imm_data_en, options, BIT_11); + SET_BITVAL(sess->initial_r2t_en, options, BIT_10); + SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); + SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); + SET_BITVAL(sess->chap_auth_en, options, BIT_7); + SET_BITVAL(conn->snack_req_en, options, BIT_6); + SET_BITVAL(sess->discovery_logout_en, options, BIT_5); + SET_BITVAL(sess->bidi_chap_en, options, BIT_4); + SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); + SET_BITVAL(sess->erl & BIT_1, options, BIT_1); + SET_BITVAL(sess->erl & BIT_0, options, BIT_0); + fw_ddb_entry->iscsi_options = cpu_to_le16(options); + + options = le16_to_cpu(fw_ddb_entry->tcp_options); + SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); + SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); + SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); + SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); + SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); + SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); + SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); + fw_ddb_entry->tcp_options = cpu_to_le16(options); + + options = le16_to_cpu(fw_ddb_entry->ip_options); + SET_BITVAL(conn->fragment_disable, options, BIT_4); + fw_ddb_entry->ip_options = cpu_to_le16(options); + + fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); + fw_ddb_entry->iscsi_max_rcv_data_seg_len = + cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); + fw_ddb_entry->iscsi_max_snd_data_seg_len = + cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); + fw_ddb_entry->iscsi_first_burst_len = + cpu_to_le16(sess->first_burst / BYTE_UNITS); + fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / + BYTE_UNITS); + fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); + fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); + fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); + fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); + fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); + fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); + fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); + fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); + fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); + fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); + fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); + fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); + fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); + fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); + fw_ddb_entry->port = cpu_to_le16(conn->port); + fw_ddb_entry->def_timeout = + cpu_to_le16(sess->default_taskmgmt_timeout); + + if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) + fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; + else + fw_ddb_entry->ipv4_tos = conn->ipv4_tos; + + if (conn->ipaddress) + memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, + sizeof(fw_ddb_entry->ip_addr)); + + if (conn->redirect_ipaddr) + memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, + sizeof(fw_ddb_entry->tgt_addr)); + + if (conn->link_local_ipv6_addr) + memcpy(fw_ddb_entry->link_local_ipv6_addr, + conn->link_local_ipv6_addr, + sizeof(fw_ddb_entry->link_local_ipv6_addr)); + + if (sess->targetname) + memcpy(fw_ddb_entry->iscsi_name, sess->targetname, + sizeof(fw_ddb_entry->iscsi_name)); + + if (sess->targetalias) + memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, + sizeof(fw_ddb_entry->iscsi_alias)); + + COPY_ISID(fw_ddb_entry->isid, sess->isid); + + return 0; +} + +static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, + struct iscsi_session *sess, + struct dev_db_entry *fw_ddb_entry) +{ + unsigned long options = 0; + uint16_t ddb_link; + uint16_t disc_parent; + char ip_addr[DDB_IPADDR_LEN]; + + options = le16_to_cpu(fw_ddb_entry->options); + conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); + sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, + &options); + sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); + + options = le16_to_cpu(fw_ddb_entry->iscsi_options); + conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); + conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); + sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); + sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); + sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, + &options); + sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); + sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); + sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, + &options); + sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); + sess->discovery_auth_optional = + test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); + if (test_bit(ISCSIOPT_ERL1, &options)) + sess->erl |= BIT_1; + if (test_bit(ISCSIOPT_ERL0, &options)) + sess->erl |= BIT_0; + + options = le16_to_cpu(fw_ddb_entry->tcp_options); + conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); + conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); + conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); + if (test_bit(TCPOPT_TIMER_SCALE3, &options)) + conn->tcp_timer_scale |= BIT_3; + if (test_bit(TCPOPT_TIMER_SCALE2, &options)) + conn->tcp_timer_scale |= BIT_2; + if (test_bit(TCPOPT_TIMER_SCALE1, &options)) + conn->tcp_timer_scale |= BIT_1; + + conn->tcp_timer_scale >>= 1; + conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); + + options = le16_to_cpu(fw_ddb_entry->ip_options); + conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); + + conn->max_recv_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); + conn->max_xmit_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); + sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); + sess->first_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); + sess->max_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); + sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); + sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); + conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; + conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; + conn->ipv4_tos = fw_ddb_entry->ipv4_tos; + conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); + conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); + conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); + conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); + sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); + COPY_ISID(sess->isid, fw_ddb_entry->isid); + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link == DDB_ISNS) + disc_parent = ISCSI_DISC_PARENT_ISNS; + else if (ddb_link == DDB_NO_LINK) + disc_parent = ISCSI_DISC_PARENT_UNKNOWN; + else if (ddb_link < MAX_DDB_ENTRIES) + disc_parent = ISCSI_DISC_PARENT_SENDTGT; + else + disc_parent = ISCSI_DISC_PARENT_UNKNOWN; + + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, + iscsi_get_discovery_parent_name(disc_parent), 0); + + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, + (char *)fw_ddb_entry->iscsi_alias, 0); + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) { + memset(ip_addr, 0, sizeof(ip_addr)); + sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, + (char *)ip_addr, 0); + } +} + +static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + struct iscsi_cls_session *cls_sess, + struct iscsi_cls_conn *cls_conn) +{ + int buflen = 0; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct ql4_chap_table chap_tbl; + struct iscsi_conn *conn; + char ip_addr[DDB_IPADDR_LEN]; + uint16_t options = 0; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + conn = cls_conn->dd_data; + memset(&chap_tbl, 0, sizeof(chap_tbl)); + + ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); + + qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); + + sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); + conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); + + memset(ip_addr, 0, sizeof(ip_addr)); + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) { + iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); + + memset(ip_addr, 0, sizeof(ip_addr)); + sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); + } else { + iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); + sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); + } + + iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, + (char *)ip_addr, buflen); + iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, + (char *)fw_ddb_entry->iscsi_name, buflen); + iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, + (char *)ha->name_string, buflen); + + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { + if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, + chap_tbl.secret, + ddb_entry->chap_tbl_idx)) { + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, + (char *)chap_tbl.name, + strlen((char *)chap_tbl.name)); + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, + (char *)chap_tbl.secret, + chap_tbl.secret_len); + } + } +} + +void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + uint32_t ddb_state; + dma_addr_t fw_ddb_entry_dma; + struct dev_db_entry *fw_ddb_entry; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + goto exit_session_conn_fwddb_param; + } + + if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, + fw_ddb_entry_dma, NULL, NULL, &ddb_state, + NULL, NULL, NULL) == QLA_ERROR) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "get_ddb_entry for fw_ddb_index %d\n", + ha->host_no, __func__, + ddb_entry->fw_ddb_index)); + goto exit_session_conn_fwddb_param; + } + + cls_sess = ddb_entry->sess; + + cls_conn = ddb_entry->conn; + + /* Update params */ + qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); + +exit_session_conn_fwddb_param: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); +} + +void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + struct iscsi_session *sess; + struct iscsi_conn *conn; + uint32_t ddb_state; + dma_addr_t fw_ddb_entry_dma; + struct dev_db_entry *fw_ddb_entry; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + goto exit_session_conn_param; + } + + if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, + fw_ddb_entry_dma, NULL, NULL, &ddb_state, + NULL, NULL, NULL) == QLA_ERROR) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "get_ddb_entry for fw_ddb_index %d\n", + ha->host_no, __func__, + ddb_entry->fw_ddb_index)); + goto exit_session_conn_param; + } + + cls_sess = ddb_entry->sess; + sess = cls_sess->dd_data; + + cls_conn = ddb_entry->conn; + conn = cls_conn->dd_data; + + /* Update timers after login */ + ddb_entry->default_relogin_timeout = + (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && + (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? + le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; + ddb_entry->default_time2wait = + le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); + + /* Update params */ + ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); + qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); + + memcpy(sess->initiatorname, ha->name_string, + min(sizeof(ha->name_string), sizeof(sess->initiatorname))); + +exit_session_conn_param: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); +} + +/* + * Timer routines + */ +static void qla4xxx_timer(struct timer_list *t); + +static void qla4xxx_start_timer(struct scsi_qla_host *ha, + unsigned long interval) +{ + DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", + __func__, ha->host->host_no)); + timer_setup(&ha->timer, qla4xxx_timer, 0); + ha->timer.expires = jiffies + interval * HZ; + add_timer(&ha->timer); + ha->timer_active = 1; +} + +static void qla4xxx_stop_timer(struct scsi_qla_host *ha) +{ + del_timer_sync(&ha->timer); + ha->timer_active = 0; +} + +/*** + * qla4xxx_mark_device_missing - blocks the session + * @cls_session: Pointer to the session to be blocked + * @ddb_entry: Pointer to device database entry + * + * This routine marks a device missing and close connection. + **/ +void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) +{ + iscsi_block_session(cls_session); +} + +/** + * qla4xxx_mark_all_devices_missing - mark all devices as missing. + * @ha: Pointer to host adapter structure. + * + * This routine marks a device missing and resets the relogin retry count. + **/ +void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) +{ + iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); +} + +static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct scsi_cmnd *cmd) +{ + struct srb *srb; + + srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); + if (!srb) + return srb; + + kref_init(&srb->srb_ref); + srb->ha = ha; + srb->ddb = ddb_entry; + srb->cmd = cmd; + srb->flags = 0; + qla4xxx_cmd_priv(cmd)->srb = srb; + + return srb; +} + +static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) +{ + struct scsi_cmnd *cmd = srb->cmd; + + if (srb->flags & SRB_DMA_VALID) { + scsi_dma_unmap(cmd); + srb->flags &= ~SRB_DMA_VALID; + } + qla4xxx_cmd_priv(cmd)->srb = NULL; +} + +void qla4xxx_srb_compl(struct kref *ref) +{ + struct srb *srb = container_of(ref, struct srb, srb_ref); + struct scsi_cmnd *cmd = srb->cmd; + struct scsi_qla_host *ha = srb->ha; + + qla4xxx_srb_free_dma(ha, srb); + + mempool_free(srb, ha->srb_mempool); + + scsi_done(cmd); +} + +/** + * qla4xxx_queuecommand - scsi layer issues scsi command to driver. + * @host: scsi host + * @cmd: Pointer to Linux's SCSI command structure + * + * Remarks: + * This routine is invoked by Linux to send a SCSI command to the driver. + * The mid-level driver tries to ensure that queuecommand never gets + * invoked concurrently with itself or the interrupt handler (although + * the interrupt handler may call this routine as part of request- + * completion handling). Unfortunely, it sometimes calls the scheduler + * in interrupt context which is a big NO! NO!. + **/ +static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct scsi_qla_host *ha = to_qla_host(host); + struct ddb_entry *ddb_entry = cmd->device->hostdata; + struct iscsi_cls_session *sess = ddb_entry->sess; + struct srb *srb; + int rval; + + if (test_bit(AF_EEH_BUSY, &ha->flags)) { + if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) + cmd->result = DID_NO_CONNECT << 16; + else + cmd->result = DID_REQUEUE << 16; + goto qc_fail_command; + } + + if (!sess) { + cmd->result = DID_IMM_RETRY << 16; + goto qc_fail_command; + } + + rval = iscsi_session_chkready(sess); + if (rval) { + cmd->result = rval; + goto qc_fail_command; + } + + if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || + test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || + test_bit(DPC_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || + test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || + !test_bit(AF_ONLINE, &ha->flags) || + !test_bit(AF_LINK_UP, &ha->flags) || + test_bit(AF_LOOPBACK, &ha->flags) || + test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || + test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) + goto qc_host_busy; + + srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); + if (!srb) + goto qc_host_busy; + + rval = qla4xxx_send_command_to_isp(ha, srb); + if (rval != QLA_SUCCESS) + goto qc_host_busy_free_sp; + + return 0; + +qc_host_busy_free_sp: + qla4xxx_srb_free_dma(ha, srb); + mempool_free(srb, ha->srb_mempool); + +qc_host_busy: + return SCSI_MLQUEUE_HOST_BUSY; + +qc_fail_command: + scsi_done(cmd); + + return 0; +} + +/** + * qla4xxx_mem_free - frees memory allocated to adapter + * @ha: Pointer to host adapter structure. + * + * Frees memory previously allocated by qla4xxx_mem_alloc + **/ +static void qla4xxx_mem_free(struct scsi_qla_host *ha) +{ + if (ha->queues) + dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, + ha->queues_dma); + + vfree(ha->fw_dump); + + ha->queues_len = 0; + ha->queues = NULL; + ha->queues_dma = 0; + ha->request_ring = NULL; + ha->request_dma = 0; + ha->response_ring = NULL; + ha->response_dma = 0; + ha->shadow_regs = NULL; + ha->shadow_regs_dma = 0; + ha->fw_dump = NULL; + ha->fw_dump_size = 0; + + /* Free srb pool. */ + mempool_destroy(ha->srb_mempool); + ha->srb_mempool = NULL; + + dma_pool_destroy(ha->chap_dma_pool); + + vfree(ha->chap_list); + ha->chap_list = NULL; + + dma_pool_destroy(ha->fw_ddb_dma_pool); + + /* release io space registers */ + if (is_qla8022(ha)) { + if (ha->nx_pcibase) + iounmap( + (struct device_reg_82xx __iomem *)ha->nx_pcibase); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + if (ha->nx_pcibase) + iounmap( + (struct device_reg_83xx __iomem *)ha->nx_pcibase); + } else if (ha->reg) { + iounmap(ha->reg); + } + + vfree(ha->reset_tmplt.buff); + + pci_release_regions(ha->pdev); +} + +/** + * qla4xxx_mem_alloc - allocates memory for use by adapter. + * @ha: Pointer to host adapter structure + * + * Allocates DMA memory for request and response queues. Also allocates memory + * for srbs. + **/ +static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) +{ + unsigned long align; + + /* Allocate contiguous block of DMA memory for queues. */ + ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + + sizeof(struct shadow_regs) + + MEM_ALIGN_VALUE + + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, + &ha->queues_dma, GFP_KERNEL); + if (ha->queues == NULL) { + ql4_printk(KERN_WARNING, ha, + "Memory Allocation failed - queues.\n"); + + goto mem_alloc_error_exit; + } + + /* + * As per RISC alignment requirements -- the bus-address must be a + * multiple of the request-ring size (in bytes). + */ + align = 0; + if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) + align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & + (MEM_ALIGN_VALUE - 1)); + + /* Update request and response queue pointers. */ + ha->request_dma = ha->queues_dma + align; + ha->request_ring = (struct queue_entry *) (ha->queues + align); + ha->response_dma = ha->queues_dma + align + + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); + ha->response_ring = (struct queue_entry *) (ha->queues + align + + (REQUEST_QUEUE_DEPTH * + QUEUE_SIZE)); + ha->shadow_regs_dma = ha->queues_dma + align + + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); + ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + + (REQUEST_QUEUE_DEPTH * + QUEUE_SIZE) + + (RESPONSE_QUEUE_DEPTH * + QUEUE_SIZE)); + + /* Allocate memory for srb pool. */ + ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, + mempool_free_slab, srb_cachep); + if (ha->srb_mempool == NULL) { + ql4_printk(KERN_WARNING, ha, + "Memory Allocation failed - SRB Pool.\n"); + + goto mem_alloc_error_exit; + } + + ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, + CHAP_DMA_BLOCK_SIZE, 8, 0); + + if (ha->chap_dma_pool == NULL) { + ql4_printk(KERN_WARNING, ha, + "%s: chap_dma_pool allocation failed..\n", __func__); + goto mem_alloc_error_exit; + } + + ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, + DDB_DMA_BLOCK_SIZE, 8, 0); + + if (ha->fw_ddb_dma_pool == NULL) { + ql4_printk(KERN_WARNING, ha, + "%s: fw_ddb_dma_pool allocation failed..\n", + __func__); + goto mem_alloc_error_exit; + } + + return QLA_SUCCESS; + +mem_alloc_error_exit: + return QLA_ERROR; +} + +/** + * qla4_8xxx_check_temp - Check the ISP82XX temperature. + * @ha: adapter block pointer. + * + * Note: The caller should not hold the idc lock. + **/ +static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) +{ + uint32_t temp, temp_state, temp_val; + int status = QLA_SUCCESS; + + temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); + + temp_state = qla82xx_get_temp_state(temp); + temp_val = qla82xx_get_temp_val(temp); + + if (temp_state == QLA82XX_TEMP_PANIC) { + ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" + " exceeds maximum allowed. Hardware has been shut" + " down.\n", temp_val); + status = QLA_ERROR; + } else if (temp_state == QLA82XX_TEMP_WARN) { + if (ha->temperature == QLA82XX_TEMP_NORMAL) + ql4_printk(KERN_WARNING, ha, "Device temperature %d" + " degrees C exceeds operating range." + " Immediate action needed.\n", temp_val); + } else { + if (ha->temperature == QLA82XX_TEMP_WARN) + ql4_printk(KERN_INFO, ha, "Device temperature is" + " now %d degrees C in normal range.\n", + temp_val); + } + ha->temperature = temp_state; + return status; +} + +/** + * qla4_8xxx_check_fw_alive - Check firmware health + * @ha: Pointer to host adapter structure. + * + * Context: Interrupt + **/ +static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) +{ + uint32_t fw_heartbeat_counter; + int status = QLA_SUCCESS; + + fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, + QLA8XXX_PEG_ALIVE_COUNTER); + /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ + if (fw_heartbeat_counter == 0xffffffff) { + DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " + "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", + ha->host_no, __func__)); + return status; + } + + if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { + ha->seconds_since_last_heartbeat++; + /* FW not alive after 2 seconds */ + if (ha->seconds_since_last_heartbeat == 2) { + ha->seconds_since_last_heartbeat = 0; + qla4_8xxx_dump_peg_reg(ha); + status = QLA_ERROR; + } + } else + ha->seconds_since_last_heartbeat = 0; + + ha->fw_heartbeat_counter = fw_heartbeat_counter; + return status; +} + +static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) +{ + uint32_t halt_status; + int halt_status_unrecoverable = 0; + + halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); + + if (is_qla8022(ha)) { + ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", + __func__); + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0 | + CRB_NIU_XG_PAUSE_CTL_P1); + + if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) + ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", + __func__); + if (halt_status & HALT_STATUS_UNRECOVERABLE) + halt_status_unrecoverable = 1; + } else if (is_qla8032(ha) || is_qla8042(ha)) { + if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) + ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", + __func__); + else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) + halt_status_unrecoverable = 1; + } + + /* + * Since we cannot change dev_state in interrupt context, + * set appropriate DPC flag then wakeup DPC + */ + if (halt_status_unrecoverable) { + set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); + } else { + ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", + __func__); + set_bit(DPC_RESET_HA, &ha->dpc_flags); + } + qla4xxx_mailbox_premature_completion(ha); + qla4xxx_wake_dpc(ha); +} + +/** + * qla4_8xxx_watchdog - Poll dev state + * @ha: Pointer to host adapter structure. + * + * Context: Interrupt + **/ +void qla4_8xxx_watchdog(struct scsi_qla_host *ha) +{ + uint32_t dev_state; + uint32_t idc_ctrl; + + if (is_qla8032(ha) && + (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) + WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", + __func__, ha->func_num); + + /* don't poll if reset is going on */ + if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || + test_bit(DPC_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { + dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); + + if (qla4_8xxx_check_temp(ha)) { + if (is_qla8022(ha)) { + ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); + qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0 | + CRB_NIU_XG_PAUSE_CTL_P1); + } + set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); + qla4xxx_wake_dpc(ha); + } else if (dev_state == QLA8XXX_DEV_NEED_RESET && + !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + + ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", + __func__); + + if (is_qla8032(ha) || is_qla8042(ha)) { + idc_ctrl = qla4_83xx_rd_reg(ha, + QLA83XX_IDC_DRV_CTRL); + if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { + ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", + __func__); + qla4xxx_mailbox_premature_completion( + ha); + } + } + + if ((is_qla8032(ha) || is_qla8042(ha)) || + (is_qla8022(ha) && !ql4xdontresethba)) { + set_bit(DPC_RESET_HA, &ha->dpc_flags); + qla4xxx_wake_dpc(ha); + } + } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && + !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", + __func__); + set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); + qla4xxx_wake_dpc(ha); + } else { + /* Check firmware health */ + if (qla4_8xxx_check_fw_alive(ha)) + qla4_8xxx_process_fw_error(ha); + } + } +} + +static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + if (!(ddb_entry->ddb_type == FLASH_DDB)) + return; + + if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && + !iscsi_is_session_online(cls_sess)) { + if (atomic_read(&ddb_entry->retry_relogin_timer) != + INVALID_ENTRY) { + if (atomic_read(&ddb_entry->retry_relogin_timer) == + 0) { + atomic_set(&ddb_entry->retry_relogin_timer, + INVALID_ENTRY); + set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); + set_bit(DF_RELOGIN, &ddb_entry->flags); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: index [%d] login device\n", + __func__, ddb_entry->fw_ddb_index)); + } else + atomic_dec(&ddb_entry->retry_relogin_timer); + } + } + + /* Wait for relogin to timeout */ + if (atomic_read(&ddb_entry->relogin_timer) && + (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { + /* + * If the relogin times out and the device is + * still NOT ONLINE then try and relogin again. + */ + if (!iscsi_is_session_online(cls_sess)) { + /* Reset retry relogin timer */ + atomic_inc(&ddb_entry->relogin_retry_count); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: index[%d] relogin timed out-retrying" + " relogin (%d), retry (%d)\n", __func__, + ddb_entry->fw_ddb_index, + atomic_read(&ddb_entry->relogin_retry_count), + ddb_entry->default_time2wait + 4)); + set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); + atomic_set(&ddb_entry->retry_relogin_timer, + ddb_entry->default_time2wait + 4); + } + } +} + +/** + * qla4xxx_timer - checks every second for work to do. + * @t: Context to obtain pointer to host adapter structure. + **/ +static void qla4xxx_timer(struct timer_list *t) +{ + struct scsi_qla_host *ha = from_timer(ha, t, timer); + int start_dpc = 0; + uint16_t w; + + iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); + + /* If we are in the middle of AER/EEH processing + * skip any processing and reschedule the timer + */ + if (test_bit(AF_EEH_BUSY, &ha->flags)) { + mod_timer(&ha->timer, jiffies + HZ); + return; + } + + /* Hardware read to trigger an EEH error during mailbox waits. */ + if (!pci_channel_offline(ha->pdev)) + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); + + if (is_qla80XX(ha)) + qla4_8xxx_watchdog(ha); + + if (is_qla40XX(ha)) { + /* Check for heartbeat interval. */ + if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && + ha->heartbeat_interval != 0) { + ha->seconds_since_last_heartbeat++; + if (ha->seconds_since_last_heartbeat > + ha->heartbeat_interval + 2) + set_bit(DPC_RESET_HA, &ha->dpc_flags); + } + } + + /* Process any deferred work. */ + if (!list_empty(&ha->work_list)) + start_dpc++; + + /* Wakeup the dpc routine for this adapter, if needed. */ + if (start_dpc || + test_bit(DPC_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || + test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || + test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || + test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || + test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || + test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || + test_bit(DPC_AEN, &ha->dpc_flags)) { + DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" + " - dpc flags = 0x%lx\n", + ha->host_no, __func__, ha->dpc_flags)); + qla4xxx_wake_dpc(ha); + } + + /* Reschedule timer thread to call us back in one second */ + mod_timer(&ha->timer, jiffies + HZ); + + DEBUG2(ha->seconds_since_last_intr++); +} + +/** + * qla4xxx_cmd_wait - waits for all outstanding commands to complete + * @ha: Pointer to host adapter structure. + * + * This routine stalls the driver until all outstanding commands are returned. + * Caller must release the Hardware Lock prior to calling this routine. + **/ +static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) +{ + uint32_t index = 0; + unsigned long flags; + struct scsi_cmnd *cmd; + unsigned long wtime; + uint32_t wtmo; + + if (is_qla40XX(ha)) + wtmo = WAIT_CMD_TOV; + else + wtmo = ha->nx_reset_timeout / 2; + + wtime = jiffies + (wtmo * HZ); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Wait up to %u seconds for cmds to complete\n", + wtmo)); + + while (!time_after_eq(jiffies, wtime)) { + spin_lock_irqsave(&ha->hardware_lock, flags); + /* Find a command that hasn't completed. */ + for (index = 0; index < ha->host->can_queue; index++) { + cmd = scsi_host_find_tag(ha->host, index); + /* + * We cannot just check if the index is valid, + * becase if we are run from the scsi eh, then + * the scsi/block layer is going to prevent + * the tag from being released. + */ + if (cmd != NULL && qla4xxx_cmd_priv(cmd)->srb) + break; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* If No Commands are pending, wait is complete */ + if (index == ha->host->can_queue) + return QLA_SUCCESS; + + msleep(1000); + } + /* If we timed out on waiting for commands to come back + * return ERROR. */ + return QLA_ERROR; +} + +int qla4xxx_hw_reset(struct scsi_qla_host *ha) +{ + uint32_t ctrl_status; + unsigned long flags = 0; + + DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); + + if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) + return QLA_ERROR; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* + * If the SCSI Reset Interrupt bit is set, clear it. + * Otherwise, the Soft Reset won't work. + */ + ctrl_status = readw(&ha->reg->ctrl_status); + if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) + writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); + + /* Issue Soft Reset */ + writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; +} + +/** + * qla4xxx_soft_reset - performs soft reset. + * @ha: Pointer to host adapter structure. + **/ +int qla4xxx_soft_reset(struct scsi_qla_host *ha) +{ + uint32_t max_wait_time; + unsigned long flags = 0; + int status; + uint32_t ctrl_status; + + status = qla4xxx_hw_reset(ha); + if (status != QLA_SUCCESS) + return status; + + status = QLA_ERROR; + /* Wait until the Network Reset Intr bit is cleared */ + max_wait_time = RESET_INTR_TOV; + do { + spin_lock_irqsave(&ha->hardware_lock, flags); + ctrl_status = readw(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if ((ctrl_status & CSR_NET_RESET_INTR) == 0) + break; + + msleep(1000); + } while ((--max_wait_time)); + + if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { + DEBUG2(printk(KERN_WARNING + "scsi%ld: Network Reset Intr not cleared by " + "Network function, clearing it now!\n", + ha->host_no)); + spin_lock_irqsave(&ha->hardware_lock, flags); + writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + /* Wait until the firmware tells us the Soft Reset is done */ + max_wait_time = SOFT_RESET_TOV; + do { + spin_lock_irqsave(&ha->hardware_lock, flags); + ctrl_status = readw(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if ((ctrl_status & CSR_SOFT_RESET) == 0) { + status = QLA_SUCCESS; + break; + } + + msleep(1000); + } while ((--max_wait_time)); + + /* + * Also, make sure that the SCSI Reset Interrupt bit has been cleared + * after the soft reset has taken place. + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ctrl_status = readw(&ha->reg->ctrl_status); + if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { + writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* If soft reset fails then most probably the bios on other + * function is also enabled. + * Since the initialization is sequential the other fn + * wont be able to acknowledge the soft reset. + * Issue a force soft reset to workaround this scenario. + */ + if (max_wait_time == 0) { + /* Issue Force Soft Reset */ + spin_lock_irqsave(&ha->hardware_lock, flags); + writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + /* Wait until the firmware tells us the Soft Reset is done */ + max_wait_time = SOFT_RESET_TOV; + do { + spin_lock_irqsave(&ha->hardware_lock, flags); + ctrl_status = readw(&ha->reg->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { + status = QLA_SUCCESS; + break; + } + + msleep(1000); + } while ((--max_wait_time)); + } + + return status; +} + +/** + * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. + * @ha: Pointer to host adapter structure. + * @res: returned scsi status + * + * This routine is called just prior to a HARD RESET to return all + * outstanding commands back to the Operating System. + * Caller should make sure that the following locks are released + * before this calling routine: Hardware lock, and io_request_lock. + **/ +static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) +{ + struct srb *srb; + int i; + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (i = 0; i < ha->host->can_queue; i++) { + srb = qla4xxx_del_from_active_array(ha, i); + if (srb != NULL) { + srb->cmd->result = res; + kref_put(&srb->srb_ref, qla4xxx_srb_compl); + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) +{ + clear_bit(AF_ONLINE, &ha->flags); + + /* Disable the board */ + ql4_printk(KERN_INFO, ha, "Disabling the board\n"); + + qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); + qla4xxx_mark_all_devices_missing(ha); + clear_bit(AF_INIT_DONE, &ha->flags); +} + +static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; + + if (ddb_entry->ddb_type == FLASH_DDB) + iscsi_block_session(ddb_entry->sess); + else + iscsi_session_failure(cls_session->dd_data, + ISCSI_ERR_CONN_FAILED); +} + +/** + * qla4xxx_recover_adapter - recovers adapter after a fatal error + * @ha: Pointer to host adapter structure. + **/ +static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) +{ + int status = QLA_ERROR; + uint8_t reset_chip = 0; + uint32_t dev_state; + unsigned long wait; + + /* Stall incoming I/O until we are done */ + scsi_block_requests(ha->host); + clear_bit(AF_ONLINE, &ha->flags); + clear_bit(AF_LINK_UP, &ha->flags); + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); + + set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); + + if ((is_qla8032(ha) || is_qla8042(ha)) && + !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", + __func__); + /* disable pause frame for ISP83xx */ + qla4_83xx_disable_pause(ha); + } + + iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); + + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) + reset_chip = 1; + + /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) + * do not reset adapter, jump to initialize_adapter */ + if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { + status = QLA_SUCCESS; + goto recover_ha_init_adapter; + } + + /* For the ISP-8xxx adapter, issue a stop_firmware if invoked + * from eh_host_reset or ioctl module */ + if (is_qla80XX(ha) && !reset_chip && + test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { + + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s - Performing stop_firmware...\n", + ha->host_no, __func__)); + status = ha->isp_ops->reset_firmware(ha); + if (status == QLA_SUCCESS) { + ha->isp_ops->disable_intrs(ha); + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); + } else { + /* If the stop_firmware fails then + * reset the entire chip */ + reset_chip = 1; + clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); + set_bit(DPC_RESET_HA, &ha->dpc_flags); + } + } + + /* Issue full chip reset if recovering from a catastrophic error, + * or if stop_firmware fails for ISP-8xxx. + * This is the default case for ISP-4xxx */ + if (is_qla40XX(ha) || reset_chip) { + if (is_qla40XX(ha)) + goto chip_reset; + + /* Check if 8XXX firmware is alive or not + * We may have arrived here from NEED_RESET + * detection only */ + if (test_bit(AF_FW_RECOVERY, &ha->flags)) + goto chip_reset; + + wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); + while (time_before(jiffies, wait)) { + if (qla4_8xxx_check_fw_alive(ha)) { + qla4xxx_mailbox_premature_completion(ha); + break; + } + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + } +chip_reset: + if (!test_bit(AF_FW_RECOVERY, &ha->flags)) + qla4xxx_cmd_wait(ha); + + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s - Performing chip reset..\n", + ha->host_no, __func__)); + status = ha->isp_ops->reset_chip(ha); + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); + } + + /* Flush any pending ddb changed AENs */ + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + +recover_ha_init_adapter: + /* Upon successful firmware/chip reset, re-initialize the adapter */ + if (status == QLA_SUCCESS) { + /* For ISP-4xxx, force function 1 to always initialize + * before function 3 to prevent both funcions from + * stepping on top of the other */ + if (is_qla40XX(ha) && (ha->mac_index == 3)) + ssleep(6); + + /* NOTE: AF_ONLINE flag set upon successful completion of + * qla4xxx_initialize_adapter */ + status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); + if (is_qla80XX(ha) && (status == QLA_ERROR)) { + status = qla4_8xxx_check_init_adapter_retry(ha); + if (status == QLA_ERROR) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", + ha->host_no, __func__); + qla4xxx_dead_adapter_cleanup(ha); + clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + goto exit_recover; + } + } + } + + /* Retry failed adapter initialization, if necessary + * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) + * case to prevent ping-pong resets between functions */ + if (!test_bit(AF_ONLINE, &ha->flags) && + !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { + /* Adapter initialization failed, see if we can retry + * resetting the ha. + * Since we don't want to block the DPC for too long + * with multiple resets in the same thread, + * utilize DPC to retry */ + if (is_qla80XX(ha)) { + ha->isp_ops->idc_lock(ha); + dev_state = qla4_8xxx_rd_direct(ha, + QLA8XXX_CRB_DEV_STATE); + ha->isp_ops->idc_unlock(ha); + if (dev_state == QLA8XXX_DEV_FAILED) { + ql4_printk(KERN_INFO, ha, "%s: don't retry " + "recover adapter. H/W is in Failed " + "state\n", __func__); + qla4xxx_dead_adapter_cleanup(ha); + clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + status = QLA_ERROR; + + goto exit_recover; + } + } + + if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { + ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; + DEBUG2(printk("scsi%ld: recover adapter - retrying " + "(%d) more times\n", ha->host_no, + ha->retry_reset_ha_cnt)); + set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); + status = QLA_ERROR; + } else { + if (ha->retry_reset_ha_cnt > 0) { + /* Schedule another Reset HA--DPC will retry */ + ha->retry_reset_ha_cnt--; + DEBUG2(printk("scsi%ld: recover adapter - " + "retry remaining %d\n", + ha->host_no, + ha->retry_reset_ha_cnt)); + status = QLA_ERROR; + } + + if (ha->retry_reset_ha_cnt == 0) { + /* Recover adapter retries have been exhausted. + * Adapter DEAD */ + DEBUG2(printk("scsi%ld: recover adapter " + "failed - board disabled\n", + ha->host_no)); + qla4xxx_dead_adapter_cleanup(ha); + clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + status = QLA_ERROR; + } + } + } else { + clear_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); + clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); + } + +exit_recover: + ha->adapter_error_count++; + + if (test_bit(AF_ONLINE, &ha->flags)) + ha->isp_ops->enable_intrs(ha); + + scsi_unblock_requests(ha->host); + + clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); + DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, + status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); + + return status; +} + +static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + if (!iscsi_is_session_online(cls_session)) { + if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" + " unblock session\n", ha->host_no, __func__, + ddb_entry->fw_ddb_index); + iscsi_unblock_session(ddb_entry->sess); + } else { + /* Trigger relogin */ + if (ddb_entry->ddb_type == FLASH_DDB) { + if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || + test_bit(DF_DISABLE_RELOGIN, + &ddb_entry->flags))) + qla4xxx_arm_relogin_timer(ddb_entry); + } else + iscsi_session_failure(cls_session->dd_data, + ISCSI_ERR_CONN_FAILED); + } + } +} + +int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" + " unblock session\n", ha->host_no, __func__, + ddb_entry->fw_ddb_index); + + iscsi_unblock_session(ddb_entry->sess); + + /* Start scan target */ + if (test_bit(AF_ONLINE, &ha->flags)) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" + " start scan\n", ha->host_no, __func__, + ddb_entry->fw_ddb_index); + queue_work(ddb_entry->sess->workq, &ddb_entry->sess->scan_work); + } + return QLA_SUCCESS; +} + +int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + int status = QLA_SUCCESS; + + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" + " unblock user space session\n", ha->host_no, __func__, + ddb_entry->fw_ddb_index); + + if (!iscsi_is_session_online(cls_session)) { + iscsi_conn_start(ddb_entry->conn); + iscsi_conn_login_event(ddb_entry->conn, + ISCSI_CONN_STATE_LOGGED_IN); + } else { + ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", + ha->host_no, __func__, ddb_entry->fw_ddb_index, + cls_session->sid); + status = QLA_ERROR; + } + + return status; +} + +static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) +{ + iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); +} + +static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) +{ + uint16_t relogin_timer; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + relogin_timer = max(ddb_entry->default_relogin_timeout, + (uint16_t)RELOGIN_TOV); + atomic_set(&ddb_entry->relogin_timer, relogin_timer); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, + ddb_entry->fw_ddb_index, relogin_timer)); + + qla4xxx_login_flash_ddb(cls_sess); +} + +static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + if (!(ddb_entry->ddb_type == FLASH_DDB)) + return; + + if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) + return; + + if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && + !iscsi_is_session_online(cls_sess)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "relogin issued\n")); + qla4xxx_relogin_flash_ddb(cls_sess); + } +} + +void qla4xxx_wake_dpc(struct scsi_qla_host *ha) +{ + if (ha->dpc_thread) + queue_work(ha->dpc_thread, &ha->dpc_work); +} + +static struct qla4_work_evt * +qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, + enum qla4_work_type type) +{ + struct qla4_work_evt *e; + uint32_t size = sizeof(struct qla4_work_evt) + data_size; + + e = kzalloc(size, GFP_ATOMIC); + if (!e) + return NULL; + + INIT_LIST_HEAD(&e->list); + e->type = type; + return e; +} + +static void qla4xxx_post_work(struct scsi_qla_host *ha, + struct qla4_work_evt *e) +{ + unsigned long flags; + + spin_lock_irqsave(&ha->work_lock, flags); + list_add_tail(&e->list, &ha->work_list); + spin_unlock_irqrestore(&ha->work_lock, flags); + qla4xxx_wake_dpc(ha); +} + +int qla4xxx_post_aen_work(struct scsi_qla_host *ha, + enum iscsi_host_event_code aen_code, + uint32_t data_size, uint8_t *data) +{ + struct qla4_work_evt *e; + + e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); + if (!e) + return QLA_ERROR; + + e->u.aen.code = aen_code; + e->u.aen.data_size = data_size; + memcpy(e->u.aen.data, data, data_size); + + qla4xxx_post_work(ha, e); + + return QLA_SUCCESS; +} + +int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, + uint32_t status, uint32_t pid, + uint32_t data_size, uint8_t *data) +{ + struct qla4_work_evt *e; + + e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); + if (!e) + return QLA_ERROR; + + e->u.ping.status = status; + e->u.ping.pid = pid; + e->u.ping.data_size = data_size; + memcpy(e->u.ping.data, data, data_size); + + qla4xxx_post_work(ha, e); + + return QLA_SUCCESS; +} + +static void qla4xxx_do_work(struct scsi_qla_host *ha) +{ + struct qla4_work_evt *e, *tmp; + unsigned long flags; + LIST_HEAD(work); + + spin_lock_irqsave(&ha->work_lock, flags); + list_splice_init(&ha->work_list, &work); + spin_unlock_irqrestore(&ha->work_lock, flags); + + list_for_each_entry_safe(e, tmp, &work, list) { + list_del_init(&e->list); + + switch (e->type) { + case QLA4_EVENT_AEN: + iscsi_post_host_event(ha->host_no, + &qla4xxx_iscsi_transport, + e->u.aen.code, + e->u.aen.data_size, + e->u.aen.data); + break; + case QLA4_EVENT_PING_STATUS: + iscsi_ping_comp_event(ha->host_no, + &qla4xxx_iscsi_transport, + e->u.ping.status, + e->u.ping.pid, + e->u.ping.data_size, + e->u.ping.data); + break; + default: + ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " + "supported", e->type); + } + kfree(e); + } +} + +/** + * qla4xxx_do_dpc - dpc routine + * @work: Context to obtain pointer to host adapter structure. + * + * This routine is a task that is schedule by the interrupt handler + * to perform the background processing for interrupts. We put it + * on a task queue that is consumed whenever the scheduler runs; that's + * so you can do anything (i.e. put the process to sleep etc). In fact, + * the mid-level tries to sleep when it reaches the driver threshold + * "host->can_queue". This can cause a panic if we were in our interrupt code. + **/ +static void qla4xxx_do_dpc(struct work_struct *work) +{ + struct scsi_qla_host *ha = + container_of(work, struct scsi_qla_host, dpc_work); + int status = QLA_ERROR; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", + ha->host_no, __func__, ha->flags, ha->dpc_flags)); + + /* Initialization not yet finished. Don't do anything yet. */ + if (!test_bit(AF_INIT_DONE, &ha->flags)) + return; + + if (test_bit(AF_EEH_BUSY, &ha->flags)) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", + ha->host_no, __func__, ha->flags)); + return; + } + + /* post events to application */ + qla4xxx_do_work(ha); + + if (is_qla80XX(ha)) { + if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { + if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", + __func__); + /* disable pause frame for ISP83xx */ + qla4_83xx_disable_pause(ha); + } + + ha->isp_ops->idc_lock(ha); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + ha->isp_ops->idc_unlock(ha); + ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); + qla4_8xxx_device_state_handler(ha); + } + + if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { + if (is_qla8042(ha)) { + if (ha->idc_info.info2 & + ENABLE_INTERNAL_LOOPBACK) { + ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", + __func__); + status = qla4_84xx_config_acb(ha, + ACB_CONFIG_DISABLE); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", + __func__); + } + } + } + qla4_83xx_post_idc_ack(ha); + clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); + } + + if (is_qla8042(ha) && + test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", + __func__); + if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != + QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", + __func__); + } + clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); + } + + if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { + qla4_8xxx_need_qsnt_handler(ha); + } + } + + if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && + (test_bit(DPC_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { + if ((is_qla8022(ha) && ql4xdontresethba) || + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { + DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", + ha->host_no, __func__)); + clear_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); + clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); + goto dpc_post_reset_ha; + } + if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || + test_bit(DPC_RESET_HA, &ha->dpc_flags)) + qla4xxx_recover_adapter(ha); + + if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { + uint8_t wait_time = RESET_INTR_TOV; + + while ((readw(&ha->reg->ctrl_status) & + (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { + if (--wait_time == 0) + break; + msleep(1000); + } + if (wait_time == 0) + DEBUG2(printk("scsi%ld: %s: SR|FSR " + "bit not cleared-- resetting\n", + ha->host_no, __func__)); + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); + if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + status = qla4xxx_recover_adapter(ha); + } + clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); + if (status == QLA_SUCCESS) + ha->isp_ops->enable_intrs(ha); + } + } + +dpc_post_reset_ha: + /* ---- process AEN? --- */ + if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) + qla4xxx_process_aen(ha, PROCESS_ALL_AENS); + + /* ---- Get DHCP IP Address? --- */ + if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) + qla4xxx_get_dhcp_ip_address(ha); + + /* ---- relogin device? --- */ + if (adapter_up(ha) && + test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { + iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); + } + + /* ---- link change? --- */ + if (!test_bit(AF_LOOPBACK, &ha->flags) && + test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { + if (!test_bit(AF_LINK_UP, &ha->flags)) { + /* ---- link down? --- */ + qla4xxx_mark_all_devices_missing(ha); + } else { + /* ---- link up? --- * + * F/W will auto login to all devices ONLY ONCE after + * link up during driver initialization and runtime + * fatal error recovery. Therefore, the driver must + * manually relogin to devices when recovering from + * connection failures, logouts, expired KATO, etc. */ + if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { + qla4xxx_build_ddb_list(ha, ha->is_reset); + iscsi_host_for_each_session(ha->host, + qla4xxx_login_flash_ddb); + } else + qla4xxx_relogin_all_devices(ha); + } + } + if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { + if (qla4xxx_sysfs_ddb_export(ha)) + ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", + __func__); + } +} + +/** + * qla4xxx_free_adapter - release the adapter + * @ha: pointer to adapter structure + **/ +static void qla4xxx_free_adapter(struct scsi_qla_host *ha) +{ + qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); + + /* Turn-off interrupts on the card. */ + ha->isp_ops->disable_intrs(ha); + + if (is_qla40XX(ha)) { + writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); + } else if (is_qla8022(ha)) { + writel(0, &ha->qla4_82xx_reg->host_int); + readl(&ha->qla4_82xx_reg->host_int); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + writel(0, &ha->qla4_83xx_reg->risc_intr); + readl(&ha->qla4_83xx_reg->risc_intr); + } + + /* Remove timer thread, if present */ + if (ha->timer_active) + qla4xxx_stop_timer(ha); + + /* Kill the kernel thread for this host */ + if (ha->dpc_thread) + destroy_workqueue(ha->dpc_thread); + + /* Kill the kernel thread for this host */ + if (ha->task_wq) + destroy_workqueue(ha->task_wq); + + /* Put firmware in known state */ + ha->isp_ops->reset_firmware(ha); + + if (is_qla80XX(ha)) { + ha->isp_ops->idc_lock(ha); + qla4_8xxx_clear_drv_active(ha); + ha->isp_ops->idc_unlock(ha); + } + + /* Detach interrupts */ + qla4xxx_free_irqs(ha); + + /* free extra memory */ + qla4xxx_mem_free(ha); +} + +int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) +{ + int status = 0; + unsigned long mem_base, mem_len; + struct pci_dev *pdev = ha->pdev; + + status = pci_request_regions(pdev, DRIVER_NAME); + if (status) { + printk(KERN_WARNING + "scsi(%ld) Failed to reserve PIO regions (%s) " + "status=%d\n", ha->host_no, pci_name(pdev), status); + goto iospace_error_exit; + } + + DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", + __func__, pdev->revision)); + ha->revision_id = pdev->revision; + + /* remap phys address */ + mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ + mem_len = pci_resource_len(pdev, 0); + DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", + __func__, mem_base, mem_len)); + + /* mapping of pcibase pointer */ + ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); + if (!ha->nx_pcibase) { + printk(KERN_ERR + "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); + pci_release_regions(ha->pdev); + goto iospace_error_exit; + } + + /* Mapping of IO base pointer, door bell read and write pointer */ + + /* mapping of IO base pointer */ + if (is_qla8022(ha)) { + ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) + ((uint8_t *)ha->nx_pcibase + 0xbc000 + + (ha->pdev->devfn << 11)); + ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : + QLA82XX_CAM_RAM_DB2); + } else if (is_qla8032(ha) || is_qla8042(ha)) { + ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) + ((uint8_t *)ha->nx_pcibase); + } + + return 0; +iospace_error_exit: + return -ENOMEM; +} + +/*** + * qla4xxx_iospace_config - maps registers + * @ha: pointer to adapter structure + * + * This routines maps HBA's registers from the pci address space + * into the kernel virtual address space for memory mapped i/o. + **/ +int qla4xxx_iospace_config(struct scsi_qla_host *ha) +{ + unsigned long pio, pio_len, pio_flags; + unsigned long mmio, mmio_len, mmio_flags; + + pio = pci_resource_start(ha->pdev, 0); + pio_len = pci_resource_len(ha->pdev, 0); + pio_flags = pci_resource_flags(ha->pdev, 0); + if (pio_flags & IORESOURCE_IO) { + if (pio_len < MIN_IOBASE_LEN) { + ql4_printk(KERN_WARNING, ha, + "Invalid PCI I/O region size\n"); + pio = 0; + } + } else { + ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); + pio = 0; + } + + /* Use MMIO operations for all accesses. */ + mmio = pci_resource_start(ha->pdev, 1); + mmio_len = pci_resource_len(ha->pdev, 1); + mmio_flags = pci_resource_flags(ha->pdev, 1); + + if (!(mmio_flags & IORESOURCE_MEM)) { + ql4_printk(KERN_ERR, ha, + "region #0 not an MMIO resource, aborting\n"); + + goto iospace_error_exit; + } + + if (mmio_len < MIN_IOBASE_LEN) { + ql4_printk(KERN_ERR, ha, + "Invalid PCI mem region size, aborting\n"); + goto iospace_error_exit; + } + + if (pci_request_regions(ha->pdev, DRIVER_NAME)) { + ql4_printk(KERN_WARNING, ha, + "Failed to reserve PIO/MMIO regions\n"); + + goto iospace_error_exit; + } + + ha->pio_address = pio; + ha->pio_length = pio_len; + ha->reg = ioremap(mmio, MIN_IOBASE_LEN); + if (!ha->reg) { + ql4_printk(KERN_ERR, ha, + "cannot remap MMIO, aborting\n"); + + goto iospace_error_exit; + } + + return 0; + +iospace_error_exit: + return -ENOMEM; +} + +static struct isp_operations qla4xxx_isp_ops = { + .iospace_config = qla4xxx_iospace_config, + .pci_config = qla4xxx_pci_config, + .disable_intrs = qla4xxx_disable_intrs, + .enable_intrs = qla4xxx_enable_intrs, + .start_firmware = qla4xxx_start_firmware, + .intr_handler = qla4xxx_intr_handler, + .interrupt_service_routine = qla4xxx_interrupt_service_routine, + .reset_chip = qla4xxx_soft_reset, + .reset_firmware = qla4xxx_hw_reset, + .queue_iocb = qla4xxx_queue_iocb, + .complete_iocb = qla4xxx_complete_iocb, + .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, + .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, + .get_sys_info = qla4xxx_get_sys_info, + .queue_mailbox_command = qla4xxx_queue_mbox_cmd, + .process_mailbox_interrupt = qla4xxx_process_mbox_intr, +}; + +static struct isp_operations qla4_82xx_isp_ops = { + .iospace_config = qla4_8xxx_iospace_config, + .pci_config = qla4_8xxx_pci_config, + .disable_intrs = qla4_82xx_disable_intrs, + .enable_intrs = qla4_82xx_enable_intrs, + .start_firmware = qla4_8xxx_load_risc, + .restart_firmware = qla4_82xx_try_start_fw, + .intr_handler = qla4_82xx_intr_handler, + .interrupt_service_routine = qla4_82xx_interrupt_service_routine, + .need_reset = qla4_8xxx_need_reset, + .reset_chip = qla4_82xx_isp_reset, + .reset_firmware = qla4_8xxx_stop_firmware, + .queue_iocb = qla4_82xx_queue_iocb, + .complete_iocb = qla4_82xx_complete_iocb, + .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, + .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, + .get_sys_info = qla4_8xxx_get_sys_info, + .rd_reg_direct = qla4_82xx_rd_32, + .wr_reg_direct = qla4_82xx_wr_32, + .rd_reg_indirect = qla4_82xx_md_rd_32, + .wr_reg_indirect = qla4_82xx_md_wr_32, + .idc_lock = qla4_82xx_idc_lock, + .idc_unlock = qla4_82xx_idc_unlock, + .rom_lock_recovery = qla4_82xx_rom_lock_recovery, + .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, + .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, +}; + +static struct isp_operations qla4_83xx_isp_ops = { + .iospace_config = qla4_8xxx_iospace_config, + .pci_config = qla4_8xxx_pci_config, + .disable_intrs = qla4_83xx_disable_intrs, + .enable_intrs = qla4_83xx_enable_intrs, + .start_firmware = qla4_8xxx_load_risc, + .restart_firmware = qla4_83xx_start_firmware, + .intr_handler = qla4_83xx_intr_handler, + .interrupt_service_routine = qla4_83xx_interrupt_service_routine, + .need_reset = qla4_8xxx_need_reset, + .reset_chip = qla4_83xx_isp_reset, + .reset_firmware = qla4_8xxx_stop_firmware, + .queue_iocb = qla4_83xx_queue_iocb, + .complete_iocb = qla4_83xx_complete_iocb, + .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, + .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, + .get_sys_info = qla4_8xxx_get_sys_info, + .rd_reg_direct = qla4_83xx_rd_reg, + .wr_reg_direct = qla4_83xx_wr_reg, + .rd_reg_indirect = qla4_83xx_rd_reg_indirect, + .wr_reg_indirect = qla4_83xx_wr_reg_indirect, + .idc_lock = qla4_83xx_drv_lock, + .idc_unlock = qla4_83xx_drv_unlock, + .rom_lock_recovery = qla4_83xx_rom_lock_recovery, + .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, + .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, +}; + +uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) +{ + return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); +} + +uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) +{ + return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); +} + +uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) +{ + return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); +} + +uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) +{ + return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); +} + +static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_ETH_INDEX: + rc = sprintf(str, "0\n"); + break; + case ISCSI_BOOT_ETH_MAC: + rc = sysfs_format_mac(str, ha->my_mac, + MAC_ADDR_LEN); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + case ISCSI_BOOT_ETH_MAC: + case ISCSI_BOOT_ETH_INDEX: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = sprintf(str, "%s\n", ha->name_string); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t +qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, + char *buf) +{ + struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); + break; + case ISCSI_BOOT_TGT_IP_ADDR: + if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) + rc = sprintf(buf, "%pI4\n", + &boot_conn->dest_ipaddr.ip_address); + else + rc = sprintf(str, "%pI6\n", + &boot_conn->dest_ipaddr.ip_address); + break; + case ISCSI_BOOT_TGT_PORT: + rc = sprintf(str, "%d\n", boot_conn->dest_port); + break; + case ISCSI_BOOT_TGT_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.target_chap_name_length, + (char *)&boot_conn->chap.target_chap_name); + break; + case ISCSI_BOOT_TGT_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.target_secret_length, + (char *)&boot_conn->chap.target_secret); + break; + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.intr_chap_name_length, + (char *)&boot_conn->chap.intr_chap_name); + break; + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.intr_secret_length, + (char *)&boot_conn->chap.intr_secret); + break; + case ISCSI_BOOT_TGT_FLAGS: + rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_TGT_NIC_ASSOC: + rc = sprintf(str, "0\n"); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); + + return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); +} + +static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); + + return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); +} + +static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + case ISCSI_BOOT_TGT_IP_ADDR: + case ISCSI_BOOT_TGT_PORT: + case ISCSI_BOOT_TGT_CHAP_NAME: + case ISCSI_BOOT_TGT_CHAP_SECRET: + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + case ISCSI_BOOT_TGT_NIC_ASSOC: + case ISCSI_BOOT_TGT_FLAGS: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static void qla4xxx_boot_release(void *data) +{ + struct scsi_qla_host *ha = data; + + scsi_host_put(ha->host); +} + +static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) +{ + dma_addr_t buf_dma; + uint32_t addr, pri_addr, sec_addr; + uint32_t offset; + uint16_t func_num; + uint8_t val; + uint8_t *buf = NULL; + size_t size = 13 * sizeof(uint8_t); + int ret = QLA_SUCCESS; + + func_num = PCI_FUNC(ha->pdev->devfn); + + ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", + __func__, ha->pdev->device, func_num); + + if (is_qla40XX(ha)) { + if (func_num == 1) { + addr = NVRAM_PORT0_BOOT_MODE; + pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; + sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; + } else if (func_num == 3) { + addr = NVRAM_PORT1_BOOT_MODE; + pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; + sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; + } else { + ret = QLA_ERROR; + goto exit_boot_info; + } + + /* Check Boot Mode */ + val = rd_nvram_byte(ha, addr); + if (!(val & 0x07)) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " + "options : 0x%x\n", __func__, val)); + ret = QLA_ERROR; + goto exit_boot_info; + } + + /* get primary valid target index */ + val = rd_nvram_byte(ha, pri_addr); + if (val & BIT_7) + ddb_index[0] = (val & 0x7f); + + /* get secondary valid target index */ + val = rd_nvram_byte(ha, sec_addr); + if (val & BIT_7) + ddb_index[1] = (val & 0x7f); + goto exit_boot_info; + } else if (is_qla80XX(ha)) { + buf = dma_alloc_coherent(&ha->pdev->dev, size, + &buf_dma, GFP_KERNEL); + if (!buf) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + ret = QLA_ERROR; + goto exit_boot_info; + } + + if (ha->port_num == 0) + offset = BOOT_PARAM_OFFSET_PORT0; + else if (ha->port_num == 1) + offset = BOOT_PARAM_OFFSET_PORT1; + else { + ret = QLA_ERROR; + goto exit_boot_info_free; + } + addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + + offset; + if (qla4xxx_get_flash(ha, buf_dma, addr, + 13 * sizeof(uint8_t)) != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" + " failed\n", ha->host_no, __func__)); + ret = QLA_ERROR; + goto exit_boot_info_free; + } + /* Check Boot Mode */ + if (!(buf[1] & 0x07)) { + DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" + " : 0x%x\n", buf[1])); + ret = QLA_ERROR; + goto exit_boot_info_free; + } + + /* get primary valid target index */ + if (buf[2] & BIT_7) + ddb_index[0] = buf[2] & 0x7f; + + /* get secondary valid target index */ + if (buf[11] & BIT_7) + ddb_index[1] = buf[11] & 0x7f; + } else { + ret = QLA_ERROR; + goto exit_boot_info; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" + " target ID %d\n", __func__, ddb_index[0], + ddb_index[1])); + +exit_boot_info_free: + dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); +exit_boot_info: + ha->pri_ddb_idx = ddb_index[0]; + ha->sec_ddb_idx = ddb_index[1]; + return ret; +} + +/** + * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password + * @ha: pointer to adapter structure + * @username: CHAP username to be returned + * @password: CHAP password to be returned + * + * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP + * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. + * So from the CHAP cache find the first BIDI CHAP entry and set it + * to the boot record in sysfs. + **/ +static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, + char *password) +{ + int i, ret = -EINVAL; + int max_chap_entries = 0; + struct ql4_chap_table *chap_table; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); + return ret; + } + + mutex_lock(&ha->chap_sem); + for (i = 0; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + if (chap_table->cookie != + cpu_to_le16(CHAP_VALID_COOKIE)) { + continue; + } + + if (chap_table->flags & BIT_7) /* local */ + continue; + + if (!(chap_table->flags & BIT_6)) /* Not BIDI */ + continue; + + strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + ret = 0; + break; + } + mutex_unlock(&ha->chap_sem); + + return ret; +} + + +static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, + struct ql4_boot_session_info *boot_sess, + uint16_t ddb_index) +{ + struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_entry_dma; + uint16_t idx; + uint16_t options; + int ret = QLA_SUCCESS; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer.\n", + __func__)); + ret = QLA_ERROR; + return ret; + } + + if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, + fw_ddb_entry_dma, ddb_index)) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " + "index [%d]\n", __func__, ddb_index)); + ret = QLA_ERROR; + goto exit_boot_target; + } + + /* Update target name and IP from DDB */ + memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, + min(sizeof(boot_sess->target_name), + sizeof(fw_ddb_entry->iscsi_name))); + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) { + memcpy(&boot_conn->dest_ipaddr.ip_address, + &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); + } else { + boot_conn->dest_ipaddr.ip_type = 0x1; + memcpy(&boot_conn->dest_ipaddr.ip_address, + &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); + } + + boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); + + /* update chap information */ + idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); + + if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { + + DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); + + ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. + target_chap_name, + (char *)&boot_conn->chap.target_secret, + idx); + if (ret) { + ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); + ret = QLA_ERROR; + goto exit_boot_target; + } + + boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; + boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; + } + + if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { + + DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); + + ret = qla4xxx_get_bidi_chap(ha, + (char *)&boot_conn->chap.intr_chap_name, + (char *)&boot_conn->chap.intr_secret); + + if (ret) { + ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); + ret = QLA_ERROR; + goto exit_boot_target; + } + + boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; + boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; + } + +exit_boot_target: + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return ret; +} + +static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) +{ + uint16_t ddb_index[2]; + int ret = QLA_ERROR; + int rval; + + memset(ddb_index, 0, sizeof(ddb_index)); + ddb_index[0] = 0xffff; + ddb_index[1] = 0xffff; + ret = get_fw_boot_info(ha, ddb_index); + if (ret != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: No boot target configured.\n", __func__)); + return ret; + } + + if (ql4xdisablesysfsboot) + return QLA_SUCCESS; + + if (ddb_index[0] == 0xffff) + goto sec_target; + + rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), + ddb_index[0]); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " + "configured\n", __func__)); + } else + ret = QLA_SUCCESS; + +sec_target: + if (ddb_index[1] == 0xffff) + goto exit_get_boot_info; + + rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), + ddb_index[1]); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" + " configured\n", __func__)); + } else + ret = QLA_SUCCESS; + +exit_get_boot_info: + return ret; +} + +static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) +{ + struct iscsi_boot_kobj *boot_kobj; + + if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) + return QLA_ERROR; + + if (ql4xdisablesysfsboot) { + ql4_printk(KERN_INFO, ha, + "%s: syfsboot disabled - driver will trigger login " + "and publish session for discovery .\n", __func__); + return QLA_SUCCESS; + } + + + ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); + if (!ha->boot_kset) + goto kset_free; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, + qla4xxx_show_boot_tgt_pri_info, + qla4xxx_tgt_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, + qla4xxx_show_boot_tgt_sec_info, + qla4xxx_tgt_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, + qla4xxx_show_boot_ini_info, + qla4xxx_ini_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, + qla4xxx_show_boot_eth_info, + qla4xxx_eth_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + return QLA_SUCCESS; + +put_host: + scsi_host_put(ha->host); +kset_free: + iscsi_boot_destroy_kset(ha->boot_kset); + return -ENOMEM; +} + + +static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, + struct ql4_tuple_ddb *tddb) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + struct iscsi_session *sess; + struct iscsi_conn *conn; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + cls_sess = ddb_entry->sess; + sess = cls_sess->dd_data; + cls_conn = ddb_entry->conn; + conn = cls_conn->dd_data; + + tddb->tpgt = sess->tpgt; + tddb->port = conn->persistent_port; + strscpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); + strscpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); +} + +static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, + struct ql4_tuple_ddb *tddb, + uint8_t *flash_isid) +{ + uint16_t options = 0; + + tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], + min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) + sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); + else + sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); + + tddb->port = le16_to_cpu(fw_ddb_entry->port); + + if (flash_isid == NULL) + memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], + sizeof(tddb->isid)); + else + memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); +} + +static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, + struct ql4_tuple_ddb *old_tddb, + struct ql4_tuple_ddb *new_tddb, + uint8_t is_isid_compare) +{ + if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) + return QLA_ERROR; + + if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) + return QLA_ERROR; + + if (old_tddb->port != new_tddb->port) + return QLA_ERROR; + + /* For multi sessions, driver generates the ISID, so do not compare + * ISID in reset path since it would be a comparison between the + * driver generated ISID and firmware generated ISID. This could + * lead to adding duplicated DDBs in the list as driver generated + * ISID would not match firmware generated ISID. + */ + if (is_isid_compare) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: old ISID [%pmR] New ISID [%pmR]\n", + __func__, old_tddb->isid, new_tddb->isid)); + + if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], + sizeof(old_tddb->isid))) + return QLA_ERROR; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", + old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, + old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, + new_tddb->ip_addr, new_tddb->iscsi_name)); + + return QLA_SUCCESS; +} + +static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint32_t *index) +{ + struct ddb_entry *ddb_entry; + struct ql4_tuple_ddb *fw_tddb = NULL; + struct ql4_tuple_ddb *tmp_tddb = NULL; + int idx; + int ret = QLA_ERROR; + + fw_tddb = vzalloc(sizeof(*fw_tddb)); + if (!fw_tddb) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "Memory Allocation failed.\n")); + ret = QLA_SUCCESS; + goto exit_check; + } + + tmp_tddb = vzalloc(sizeof(*tmp_tddb)); + if (!tmp_tddb) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "Memory Allocation failed.\n")); + ret = QLA_SUCCESS; + goto exit_check; + } + + qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); + + for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); + if (ddb_entry == NULL) + continue; + + qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); + if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { + ret = QLA_SUCCESS; /* found */ + if (index != NULL) + *index = idx; + goto exit_check; + } + } + +exit_check: + vfree(fw_tddb); + vfree(tmp_tddb); + return ret; +} + +/** + * qla4xxx_check_existing_isid - check if target with same isid exist + * in target list + * @list_nt: list of target + * @isid: isid to check + * + * This routine return QLA_SUCCESS if target with same isid exist + **/ +static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) +{ + struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; + struct dev_db_entry *fw_ddb_entry; + + list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { + fw_ddb_entry = &nt_ddb_idx->fw_ddb; + + if (memcmp(&fw_ddb_entry->isid[0], &isid[0], + sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { + return QLA_SUCCESS; + } + } + return QLA_ERROR; +} + +/** + * qla4xxx_update_isid - compare ddbs and updated isid + * @ha: Pointer to host adapter structure. + * @list_nt: list of nt target + * @fw_ddb_entry: firmware ddb entry + * + * This routine update isid if ddbs have same iqn, same isid and + * different IP addr. + * Return QLA_SUCCESS if isid is updated. + **/ +static int qla4xxx_update_isid(struct scsi_qla_host *ha, + struct list_head *list_nt, + struct dev_db_entry *fw_ddb_entry) +{ + uint8_t base_value, i; + + base_value = fw_ddb_entry->isid[1] & 0x1f; + for (i = 0; i < 8; i++) { + fw_ddb_entry->isid[1] = (base_value | (i << 5)); + if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) + break; + } + + if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) + return QLA_ERROR; + + return QLA_SUCCESS; +} + +/** + * qla4xxx_should_update_isid - check if isid need to update + * @ha: Pointer to host adapter structure. + * @old_tddb: ddb tuple + * @new_tddb: ddb tuple + * + * Return QLA_SUCCESS if different IP, different PORT, same iqn, + * same isid + **/ +static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, + struct ql4_tuple_ddb *old_tddb, + struct ql4_tuple_ddb *new_tddb) +{ + if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { + /* Same ip */ + if (old_tddb->port == new_tddb->port) + return QLA_ERROR; + } + + if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) + /* different iqn */ + return QLA_ERROR; + + if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], + sizeof(old_tddb->isid))) + /* different isid */ + return QLA_ERROR; + + return QLA_SUCCESS; +} + +/** + * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt + * @ha: Pointer to host adapter structure. + * @list_nt: list of nt target. + * @fw_ddb_entry: firmware ddb entry. + * + * This routine check if fw_ddb_entry already exists in list_nt to avoid + * duplicate ddb in list_nt. + * Return QLA_SUCCESS if duplicate ddb exit in list_nl. + * Note: This function also update isid of DDB if required. + **/ + +static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, + struct list_head *list_nt, + struct dev_db_entry *fw_ddb_entry) +{ + struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; + struct ql4_tuple_ddb *fw_tddb = NULL; + struct ql4_tuple_ddb *tmp_tddb = NULL; + int rval, ret = QLA_ERROR; + + fw_tddb = vzalloc(sizeof(*fw_tddb)); + if (!fw_tddb) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "Memory Allocation failed.\n")); + ret = QLA_SUCCESS; + goto exit_check; + } + + tmp_tddb = vzalloc(sizeof(*tmp_tddb)); + if (!tmp_tddb) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "Memory Allocation failed.\n")); + ret = QLA_SUCCESS; + goto exit_check; + } + + qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); + + list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { + qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, + nt_ddb_idx->flash_isid); + ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); + /* found duplicate ddb */ + if (ret == QLA_SUCCESS) + goto exit_check; + } + + list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { + qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); + + ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); + if (ret == QLA_SUCCESS) { + rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); + if (rval == QLA_SUCCESS) + ret = QLA_ERROR; + else + ret = QLA_SUCCESS; + + goto exit_check; + } + } + +exit_check: + vfree(fw_tddb); + vfree(tmp_tddb); + return ret; +} + +static void qla4xxx_free_ddb_list(struct list_head *list_ddb) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { + list_del_init(&ddb_idx->list); + vfree(ddb_idx); + } +} + +static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry) +{ + struct iscsi_endpoint *ep; + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + struct sockaddr *t_addr; + struct sockaddr_storage *dst_addr; + char *ip; + + /* TODO: need to destroy on unload iscsi_endpoint*/ + dst_addr = vmalloc(sizeof(*dst_addr)); + if (!dst_addr) + return NULL; + + if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { + t_addr = (struct sockaddr *)dst_addr; + t_addr->sa_family = AF_INET6; + addr6 = (struct sockaddr_in6 *)dst_addr; + ip = (char *)&addr6->sin6_addr; + memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); + addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); + + } else { + t_addr = (struct sockaddr *)dst_addr; + t_addr->sa_family = AF_INET; + addr = (struct sockaddr_in *)dst_addr; + ip = (char *)&addr->sin_addr; + memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); + addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); + } + + ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); + vfree(dst_addr); + return ep; +} + +static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) +{ + if (ql4xdisablesysfsboot) + return QLA_SUCCESS; + if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) + return QLA_ERROR; + return QLA_SUCCESS; +} + +static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + uint16_t idx) +{ + uint16_t def_timeout; + + ddb_entry->ddb_type = FLASH_DDB; + ddb_entry->fw_ddb_index = INVALID_ENTRY; + ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; + ddb_entry->ha = ha; + ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; + ddb_entry->ddb_change = qla4xxx_flash_ddb_change; + ddb_entry->chap_tbl_idx = INVALID_ENTRY; + + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); + atomic_set(&ddb_entry->relogin_timer, 0); + atomic_set(&ddb_entry->relogin_retry_count, 0); + def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); + ddb_entry->default_relogin_timeout = + (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? + def_timeout : LOGIN_TOV; + ddb_entry->default_time2wait = + le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); + + if (ql4xdisablesysfsboot && + (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) + set_bit(DF_BOOT_TGT, &ddb_entry->flags); +} + +static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) +{ + uint32_t idx = 0; + uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ + uint32_t sts[MBOX_REG_COUNT]; + uint32_t ip_state; + unsigned long wtime; + int ret; + + wtime = jiffies + (HZ * IP_CONFIG_TOV); + do { + for (idx = 0; idx < IP_ADDR_COUNT; idx++) { + if (ip_idx[idx] == -1) + continue; + + ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); + + if (ret == QLA_ERROR) { + ip_idx[idx] = -1; + continue; + } + + ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Waiting for IP state for idx = %d, state = 0x%x\n", + ip_idx[idx], ip_state)); + if (ip_state == IP_ADDRSTATE_UNCONFIGURED || + ip_state == IP_ADDRSTATE_INVALID || + ip_state == IP_ADDRSTATE_PREFERRED || + ip_state == IP_ADDRSTATE_DEPRICATED || + ip_state == IP_ADDRSTATE_DISABLING) + ip_idx[idx] = -1; + } + + /* Break if all IP states checked */ + if ((ip_idx[0] == -1) && + (ip_idx[1] == -1) && + (ip_idx[2] == -1) && + (ip_idx[3] == -1)) + break; + schedule_timeout_uninterruptible(HZ); + } while (time_after(wtime, jiffies)); +} + +static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, + struct dev_db_entry *flash_ddb_entry) +{ + uint16_t options = 0; + size_t ip_len = IP_ADDR_LEN; + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) + ip_len = IPv6_ADDR_LEN; + + if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) + return QLA_ERROR; + + if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], + sizeof(fw_ddb_entry->isid))) + return QLA_ERROR; + + if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, + sizeof(fw_ddb_entry->port))) + return QLA_ERROR; + + return QLA_SUCCESS; +} + +static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint32_t fw_idx, uint32_t *flash_index) +{ + struct dev_db_entry *flash_ddb_entry; + dma_addr_t flash_ddb_entry_dma; + uint32_t idx = 0; + int max_ddbs; + int ret = QLA_ERROR, status; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &flash_ddb_entry_dma); + if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "Out of memory\n"); + goto exit_find_st_idx; + } + + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, + flash_ddb_entry_dma, fw_idx); + if (status == QLA_SUCCESS) { + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); + if (status == QLA_SUCCESS) { + *flash_index = fw_idx; + ret = QLA_SUCCESS; + goto exit_find_st_idx; + } + } + + for (idx = 0; idx < max_ddbs; idx++) { + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, + flash_ddb_entry_dma, idx); + if (status == QLA_ERROR) + continue; + + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); + if (status == QLA_SUCCESS) { + *flash_index = idx; + ret = QLA_SUCCESS; + goto exit_find_st_idx; + } + } + + if (idx == max_ddbs) + ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", + fw_idx); + +exit_find_st_idx: + if (flash_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, + flash_ddb_entry_dma); + + return ret; +} + +static void qla4xxx_build_st_list(struct scsi_qla_host *ha, + struct list_head *list_st) +{ + struct qla_ddb_index *st_ddb_idx; + int max_ddbs; + int fw_idx_size; + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_dma; + int ret; + uint32_t idx = 0, next_idx = 0; + uint32_t state = 0, conn_err = 0; + uint32_t flash_index = -1; + uint16_t conn_id = 0; + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); + goto exit_st_list; + } + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + fw_idx_size = sizeof(struct qla_ddb_index); + + for (idx = 0; idx < max_ddbs; idx = next_idx) { + ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, + NULL, &next_idx, &state, + &conn_err, NULL, &conn_id); + if (ret == QLA_ERROR) + break; + + /* Ignore DDB if invalid state (unassigned) */ + if (state == DDB_DS_UNASSIGNED) + goto continue_next_st; + + /* Check if ST, add to the list_st */ + if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) + goto continue_next_st; + + st_ddb_idx = vzalloc(fw_idx_size); + if (!st_ddb_idx) + break; + + ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, + &flash_index); + if (ret == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, + "No flash entry for ST at idx [%d]\n", idx); + st_ddb_idx->flash_ddb_idx = idx; + } else { + ql4_printk(KERN_INFO, ha, + "ST at idx [%d] is stored at flash [%d]\n", + idx, flash_index); + st_ddb_idx->flash_ddb_idx = flash_index; + } + + st_ddb_idx->fw_ddb_idx = idx; + + list_add_tail(&st_ddb_idx->list, list_st); +continue_next_st: + if (next_idx == 0) + break; + } + +exit_st_list: + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); +} + +/** + * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list + * @ha: pointer to adapter structure + * @list_ddb: List from which failed ddb to be removed + * + * Iterate over the list of DDBs and find and remove DDBs that are either in + * no connection active state or failed state + **/ +static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, + struct list_head *list_ddb) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + uint32_t next_idx = 0; + uint32_t state = 0, conn_err = 0; + int ret; + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { + ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, + NULL, 0, NULL, &next_idx, &state, + &conn_err, NULL, NULL); + if (ret == QLA_ERROR) + continue; + + if (state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED) { + list_del_init(&ddb_idx->list); + vfree(ddb_idx); + } + } +} + +static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct dev_db_entry *fw_ddb_entry) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_session *sess; + uint32_t max_ddbs = 0; + uint16_t ddb_link = -1; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + cls_sess = ddb_entry->sess; + sess = cls_sess->dd_data; + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link < max_ddbs) + sess->discovery_parent_idx = ddb_link; + else + sess->discovery_parent_idx = DDB_NO_LINK; +} + +static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + int is_reset, uint16_t idx) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_session *sess; + struct iscsi_cls_conn *cls_conn; + struct iscsi_endpoint *ep; + uint16_t cmds_max = 32; + uint16_t conn_id = 0; + uint32_t initial_cmdsn = 0; + int ret = QLA_SUCCESS; + + struct ddb_entry *ddb_entry = NULL; + + /* Create session object, with INVALID_ENTRY, + * the targer_id would get set when we issue the login + */ + cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, + cmds_max, sizeof(struct ddb_entry), + sizeof(struct ql4_task_data), + initial_cmdsn, INVALID_ENTRY); + if (!cls_sess) { + ret = QLA_ERROR; + goto exit_setup; + } + + /* + * so calling module_put function to decrement the + * reference count. + **/ + module_put(qla4xxx_iscsi_transport.owner); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->sess = cls_sess; + + cls_sess->recovery_tmo = ql4xsess_recovery_tmo; + memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, + sizeof(struct dev_db_entry)); + + qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); + + cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); + + if (!cls_conn) { + ret = QLA_ERROR; + goto exit_setup; + } + + ddb_entry->conn = cls_conn; + + /* Setup ep, for displaying attributes in sysfs */ + ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); + if (ep) { + ep->conn = cls_conn; + cls_conn->ep = ep; + } else { + DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); + ret = QLA_ERROR; + goto exit_setup; + } + + /* Update sess/conn params */ + qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); + qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); + + if (is_reset == RESET_ADAPTER) { + iscsi_block_session(cls_sess); + /* Use the relogin path to discover new devices + * by short-circuiting the logic of setting + * timer to relogin - instead set the flags + * to initiate login right away. + */ + set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); + set_bit(DF_RELOGIN, &ddb_entry->flags); + } + +exit_setup: + return ret; +} + +static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, + struct list_head *list_ddb, + struct dev_db_entry *fw_ddb_entry) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + uint16_t ddb_link; + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { + if (ddb_idx->fw_ddb_idx == ddb_link) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Updating NT parent idx from [%d] to [%d]\n", + ddb_link, ddb_idx->flash_ddb_idx)); + fw_ddb_entry->ddb_link = + cpu_to_le16(ddb_idx->flash_ddb_idx); + return; + } + } +} + +static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, + struct list_head *list_nt, + struct list_head *list_st, + int is_reset) +{ + struct dev_db_entry *fw_ddb_entry; + struct ddb_entry *ddb_entry = NULL; + dma_addr_t fw_ddb_dma; + int max_ddbs; + int fw_idx_size; + int ret; + uint32_t idx = 0, next_idx = 0; + uint32_t state = 0, conn_err = 0; + uint32_t ddb_idx = -1; + uint16_t conn_id = 0; + uint16_t ddb_link = -1; + struct qla_ddb_index *nt_ddb_idx; + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); + goto exit_nt_list; + } + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + fw_idx_size = sizeof(struct qla_ddb_index); + + for (idx = 0; idx < max_ddbs; idx = next_idx) { + ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, + NULL, &next_idx, &state, + &conn_err, NULL, &conn_id); + if (ret == QLA_ERROR) + break; + + if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) + goto continue_next_nt; + + /* Check if NT, then add to list it */ + if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) + goto continue_next_nt; + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link < max_ddbs) + qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); + + if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED) && + (is_reset == INIT_ADAPTER)) + goto continue_next_nt; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Adding DDB to session = 0x%x\n", idx)); + + if (is_reset == INIT_ADAPTER) { + nt_ddb_idx = vmalloc(fw_idx_size); + if (!nt_ddb_idx) + break; + + nt_ddb_idx->fw_ddb_idx = idx; + + /* Copy original isid as it may get updated in function + * qla4xxx_update_isid(). We need original isid in + * function qla4xxx_compare_tuple_ddb to find duplicate + * target */ + memcpy(&nt_ddb_idx->flash_isid[0], + &fw_ddb_entry->isid[0], + sizeof(nt_ddb_idx->flash_isid)); + + ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, + fw_ddb_entry); + if (ret == QLA_SUCCESS) { + /* free nt_ddb_idx and do not add to list_nt */ + vfree(nt_ddb_idx); + goto continue_next_nt; + } + + /* Copy updated isid */ + memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, + sizeof(struct dev_db_entry)); + + list_add_tail(&nt_ddb_idx->list, list_nt); + } else if (is_reset == RESET_ADAPTER) { + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, + &ddb_idx); + if (ret == QLA_SUCCESS) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, + ddb_idx); + if (ddb_entry != NULL) + qla4xxx_update_sess_disc_idx(ha, + ddb_entry, + fw_ddb_entry); + goto continue_next_nt; + } + } + + ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); + if (ret == QLA_ERROR) + goto exit_nt_list; + +continue_next_nt: + if (next_idx == 0) + break; + } + +exit_nt_list: + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); +} + +static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, + struct list_head *list_nt, + uint16_t target_id) +{ + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_dma; + int max_ddbs; + int fw_idx_size; + int ret; + uint32_t idx = 0, next_idx = 0; + uint32_t state = 0, conn_err = 0; + uint16_t conn_id = 0; + struct qla_ddb_index *nt_ddb_idx; + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); + goto exit_new_nt_list; + } + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + fw_idx_size = sizeof(struct qla_ddb_index); + + for (idx = 0; idx < max_ddbs; idx = next_idx) { + ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, + NULL, &next_idx, &state, + &conn_err, NULL, &conn_id); + if (ret == QLA_ERROR) + break; + + /* Check if NT, then add it to list */ + if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) + goto continue_next_new_nt; + + if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) + goto continue_next_new_nt; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Adding DDB to session = 0x%x\n", idx)); + + nt_ddb_idx = vmalloc(fw_idx_size); + if (!nt_ddb_idx) + break; + + nt_ddb_idx->fw_ddb_idx = idx; + + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); + if (ret == QLA_SUCCESS) { + /* free nt_ddb_idx and do not add to list_nt */ + vfree(nt_ddb_idx); + goto continue_next_new_nt; + } + + if (target_id < max_ddbs) + fw_ddb_entry->ddb_link = cpu_to_le16(target_id); + + list_add_tail(&nt_ddb_idx->list, list_nt); + + ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, + idx); + if (ret == QLA_ERROR) + goto exit_new_nt_list; + +continue_next_new_nt: + if (next_idx == 0) + break; + } + +exit_new_nt_list: + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); +} + +/** + * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry + * @dev: dev associated with the sysfs entry + * @data: pointer to flashnode session object + * + * Returns: + * 1: if flashnode entry is non-persistent + * 0: if flashnode entry is persistent + **/ +static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) +{ + struct iscsi_bus_flash_session *fnode_sess; + + if (!iscsi_flashnode_bus_match(dev, NULL)) + return 0; + + fnode_sess = iscsi_dev_to_flash_session(dev); + + return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); +} + +/** + * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target + * @ha: pointer to host + * @fw_ddb_entry: flash ddb data + * @idx: target index + * @user: if set then this call is made from userland else from kernel + * + * Returns: + * On sucess: QLA_SUCCESS + * On failure: QLA_ERROR + * + * This create separate sysfs entries for session and connection attributes of + * the given fw ddb entry. + * If this is invoked as a result of a userspace call then the entry is marked + * as nonpersistent using flash_state field. + **/ +static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t *idx, int user) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + struct iscsi_bus_flash_conn *fnode_conn = NULL; + int rc = QLA_ERROR; + + fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, + &qla4xxx_iscsi_transport, 0); + if (!fnode_sess) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", + __func__, *idx, ha->host_no); + goto exit_tgt_create; + } + + fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, + &qla4xxx_iscsi_transport, 0); + if (!fnode_conn) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", + __func__, *idx, ha->host_no); + goto free_sess; + } + + if (user) { + fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; + } else { + fnode_sess->flash_state = DEV_DB_PERSISTENT; + + if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) + fnode_sess->is_boot_target = 1; + else + fnode_sess->is_boot_target = 0; + } + + rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, + fw_ddb_entry); + if (rc) + goto free_sess; + + ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", + __func__, fnode_sess->dev.kobj.name); + + ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", + __func__, fnode_conn->dev.kobj.name); + + return QLA_SUCCESS; + +free_sess: + iscsi_destroy_flashnode_sess(fnode_sess); + +exit_tgt_create: + return QLA_ERROR; +} + +/** + * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash + * @shost: pointer to host + * @buf: type of ddb entry (ipv4/ipv6) + * @len: length of buf + * + * This creates new ddb entry in the flash by finding first free index and + * storing default ddb there. And then create sysfs entry for the new ddb entry. + **/ +static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, + int len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + struct device *dev; + uint16_t idx = 0; + uint16_t max_ddbs = 0; + uint32_t options = 0; + uint32_t rval = QLA_ERROR; + + if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && + strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", + __func__)); + goto exit_ddb_add; + } + + max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : + MAX_DEV_DB_ENTRIES; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + goto exit_ddb_add; + } + + dev = iscsi_find_flashnode_sess(ha->host, NULL, + qla4xxx_sysfs_ddb_is_non_persistent); + if (dev) { + ql4_printk(KERN_ERR, ha, + "%s: A non-persistent entry %s found\n", + __func__, dev->kobj.name); + put_device(dev); + goto exit_ddb_add; + } + + /* Index 0 and 1 are reserved for boot target entries */ + for (idx = 2; idx < max_ddbs; idx++) { + if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, + fw_ddb_entry_dma, idx)) + break; + } + + if (idx == max_ddbs) + goto exit_ddb_add; + + if (!strncasecmp("ipv6", buf, 4)) + options |= IPV6_DEFAULT_DDB_ENTRY; + + rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (rval == QLA_ERROR) + goto exit_ddb_add; + + rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); + +exit_ddb_add: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + if (rval == QLA_SUCCESS) + return idx; + else + return -EIO; +} + +/** + * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * + * This writes the contents of target ddb buffer to Flash with a valid cookie + * value in order to make the ddb entry persistent. + **/ +static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint32_t options = 0; + int rval = 0; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + rval = -ENOMEM; + goto exit_ddb_apply; + } + + if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + options |= IPV6_DEFAULT_DDB_ENTRY; + + rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (rval == QLA_ERROR) + goto exit_ddb_apply; + + dev_db_start_offset += (fnode_sess->target_id * + sizeof(*fw_ddb_entry)); + + qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); + fw_ddb_entry->cookie = DDB_VALID_COOKIE; + + rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); + + if (rval == QLA_SUCCESS) { + fnode_sess->flash_state = DEV_DB_PERSISTENT; + ql4_printk(KERN_INFO, ha, + "%s: flash node %u of host %lu written to flash\n", + __func__, fnode_sess->target_id, ha->host_no); + } else { + rval = -EIO; + ql4_printk(KERN_ERR, ha, + "%s: Error while writing flash node %u of host %lu to flash\n", + __func__, fnode_sess->target_id, ha->host_no); + } + +exit_ddb_apply: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return rval; +} + +static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t idx) +{ + struct dev_db_entry *ddb_entry = NULL; + dma_addr_t ddb_entry_dma; + unsigned long wtime; + uint32_t mbx_sts = 0; + uint32_t state = 0, conn_err = 0; + uint16_t tmo = 0; + int ret = 0; + + ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), + &ddb_entry_dma, GFP_KERNEL); + if (!ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + return QLA_ERROR; + } + + memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); + + ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); + if (ret != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to set ddb entry for index %d\n", + __func__, idx)); + goto exit_ddb_conn_open; + } + + qla4xxx_conn_open(ha, idx); + + /* To ensure that sendtargets is done, wait for at least 12 secs */ + tmo = ((ha->def_timeout > LOGIN_TOV) && + (ha->def_timeout < LOGIN_TOV * 10) ? + ha->def_timeout : LOGIN_TOV); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Default time to wait for login to ddb %d\n", tmo)); + + wtime = jiffies + (HZ * tmo); + do { + ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, + NULL, &state, &conn_err, NULL, + NULL); + if (ret == QLA_ERROR) + continue; + + if (state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED) + break; + + schedule_timeout_uninterruptible(HZ / 10); + } while (time_after(wtime, jiffies)); + +exit_ddb_conn_open: + if (ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), + ddb_entry, ddb_entry_dma); + return ret; +} + +static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t target_id) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + struct list_head list_nt; + uint16_t ddb_index; + int ret = 0; + + if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { + ql4_printk(KERN_WARNING, ha, + "%s: A discovery already in progress!\n", __func__); + return QLA_ERROR; + } + + INIT_LIST_HEAD(&list_nt); + + set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); + + ret = qla4xxx_get_ddb_index(ha, &ddb_index); + if (ret == QLA_ERROR) + goto exit_login_st_clr_bit; + + ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); + if (ret == QLA_ERROR) + goto exit_login_st; + + qla4xxx_build_new_nt_list(ha, &list_nt, target_id); + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { + list_del_init(&ddb_idx->list); + qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); + vfree(ddb_idx); + } + +exit_login_st: + if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, + "Unable to clear DDB index = 0x%x\n", ddb_index); + } + + clear_bit(ddb_index, ha->ddb_idx_map); + +exit_login_st_clr_bit: + clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); + return ret; +} + +static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t idx) +{ + int ret = QLA_ERROR; + + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); + if (ret != QLA_SUCCESS) + ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, + idx); + else + ret = -EPERM; + + return ret; +} + +/** + * qla4xxx_sysfs_ddb_login - Login to the specified target + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * + * This logs in to the specified target + **/ +static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint32_t options = 0; + int ret = 0; + + if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { + ql4_printk(KERN_ERR, ha, + "%s: Target info is not persistent\n", __func__); + ret = -EIO; + goto exit_ddb_login; + } + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + ret = -ENOMEM; + goto exit_ddb_login; + } + + if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + options |= IPV6_DEFAULT_DDB_ENTRY; + + ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (ret == QLA_ERROR) + goto exit_ddb_login; + + qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); + fw_ddb_entry->cookie = DDB_VALID_COOKIE; + + if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) + ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, + fnode_sess->target_id); + else + ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, + fnode_sess->target_id); + + if (ret > 0) + ret = -EIO; + +exit_ddb_login: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return ret; +} + +/** + * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target + * @cls_sess: pointer to session to be logged out + * + * This performs session log out from the specified target + **/ +static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry = NULL; + struct scsi_qla_host *ha; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + unsigned long flags; + unsigned long wtime; + uint32_t ddb_state; + int options; + int ret = 0; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + if (ddb_entry->ddb_type != FLASH_DDB) { + ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", + __func__); + ret = -ENXIO; + goto exit_ddb_logout; + } + + if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { + ql4_printk(KERN_ERR, ha, + "%s: Logout from boot target entry is not permitted.\n", + __func__); + ret = -EPERM; + goto exit_ddb_logout; + } + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) + goto ddb_logout_init; + + ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto ddb_logout_init; + + if (ddb_state == DDB_DS_SESSION_ACTIVE) + goto ddb_logout_init; + + /* wait until next relogin is triggered using DF_RELOGIN and + * clear DF_RELOGIN to avoid invocation of further relogin + */ + wtime = jiffies + (HZ * RELOGIN_TOV); + do { + if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) + goto ddb_logout_init; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +ddb_logout_init: + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); + atomic_set(&ddb_entry->relogin_timer, 0); + + options = LOGOUT_OPTION_CLOSE_SESSION; + qla4xxx_session_logout_ddb(ha, ddb_entry, options); + + memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); + wtime = jiffies + (HZ * LOGOUT_TOV); + do { + ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto ddb_logout_clr_sess; + + if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || + (ddb_state == DDB_DS_SESSION_FAILED)) + goto ddb_logout_clr_sess; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +ddb_logout_clr_sess: + qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); + /* + * we have decremented the reference count of the driver + * when we setup the session to have the driver unload + * to be seamless without actually destroying the + * session + **/ + try_module_get(qla4xxx_iscsi_transport.owner); + iscsi_destroy_endpoint(ddb_entry->conn->ep); + + spin_lock_irqsave(&ha->hardware_lock, flags); + qla4xxx_free_ddb(ha, ddb_entry); + clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + iscsi_session_teardown(ddb_entry->sess); + + clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); + ret = QLA_SUCCESS; + +exit_ddb_logout: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return ret; +} + +/** + * qla4xxx_sysfs_ddb_logout - Logout from the specified target + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * + * This performs log out from the specified target + **/ +static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct ql4_tuple_ddb *flash_tddb = NULL; + struct ql4_tuple_ddb *tmp_tddb = NULL; + struct dev_db_entry *fw_ddb_entry = NULL; + struct ddb_entry *ddb_entry = NULL; + dma_addr_t fw_ddb_dma; + uint32_t next_idx = 0; + uint32_t state = 0, conn_err = 0; + uint16_t conn_id = 0; + int idx, index; + int status, ret = 0; + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + flash_tddb = vzalloc(sizeof(*flash_tddb)); + if (!flash_tddb) { + ql4_printk(KERN_WARNING, ha, + "%s:Memory Allocation failed.\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + tmp_tddb = vzalloc(sizeof(*tmp_tddb)); + if (!tmp_tddb) { + ql4_printk(KERN_WARNING, ha, + "%s:Memory Allocation failed.\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + if (!fnode_sess->targetname) { + ql4_printk(KERN_ERR, ha, + "%s:Cannot logout from SendTarget entry\n", + __func__); + ret = -EPERM; + goto exit_ddb_logout; + } + + if (fnode_sess->is_boot_target) { + ql4_printk(KERN_ERR, ha, + "%s: Logout from boot target entry is not permitted.\n", + __func__); + ret = -EPERM; + goto exit_ddb_logout; + } + + strscpy(flash_tddb->iscsi_name, fnode_sess->targetname, + ISCSI_NAME_SIZE); + + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); + else + sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); + + flash_tddb->tpgt = fnode_sess->tpgt; + flash_tddb->port = fnode_conn->port; + + COPY_ISID(flash_tddb->isid, fnode_sess->isid); + + for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); + if (ddb_entry == NULL) + continue; + + if (ddb_entry->ddb_type != FLASH_DDB) + continue; + + index = ddb_entry->sess->target_id; + status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, + fw_ddb_dma, NULL, &next_idx, + &state, &conn_err, NULL, + &conn_id); + if (status == QLA_ERROR) { + ret = -ENOMEM; + break; + } + + qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); + + status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, + true); + if (status == QLA_SUCCESS) { + ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); + break; + } + } + + if (idx == MAX_DDB_ENTRIES) + ret = -ESRCH; + +exit_ddb_logout: + vfree(flash_tddb); + vfree(tmp_tddb); + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); + + return ret; +} + +static int +qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, + int param, char *buf) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_bus_flash_conn *fnode_conn; + struct ql4_chap_table chap_tbl; + struct device *dev; + int parent_type; + int rc = 0; + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) + return -EIO; + + fnode_conn = iscsi_dev_to_flash_conn(dev); + + switch (param) { + case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: + rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); + break; + case ISCSI_FLASHNODE_PORTAL_TYPE: + rc = sprintf(buf, "%s\n", fnode_sess->portal_type); + break; + case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: + rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); + break; + case ISCSI_FLASHNODE_DISCOVERY_SESS: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); + break; + case ISCSI_FLASHNODE_ENTRY_EN: + rc = sprintf(buf, "%u\n", fnode_sess->entry_state); + break; + case ISCSI_FLASHNODE_HDR_DGST_EN: + rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); + break; + case ISCSI_FLASHNODE_DATA_DGST_EN: + rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); + break; + case ISCSI_FLASHNODE_IMM_DATA_EN: + rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); + break; + case ISCSI_FLASHNODE_INITIAL_R2T_EN: + rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); + break; + case ISCSI_FLASHNODE_DATASEQ_INORDER: + rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); + break; + case ISCSI_FLASHNODE_PDU_INORDER: + rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); + break; + case ISCSI_FLASHNODE_CHAP_AUTH_EN: + rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); + break; + case ISCSI_FLASHNODE_SNACK_REQ_EN: + rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); + break; + case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); + break; + case ISCSI_FLASHNODE_BIDI_CHAP_EN: + rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); + break; + case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); + break; + case ISCSI_FLASHNODE_ERL: + rc = sprintf(buf, "%u\n", fnode_sess->erl); + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); + break; + case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); + break; + case ISCSI_FLASHNODE_TCP_WSF_DISABLE: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); + break; + case ISCSI_FLASHNODE_TCP_TIMER_SCALE: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); + break; + case ISCSI_FLASHNODE_IP_FRAG_DISABLE: + rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); + break; + case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: + rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); + break; + case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: + rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); + break; + case ISCSI_FLASHNODE_FIRST_BURST: + rc = sprintf(buf, "%u\n", fnode_sess->first_burst); + break; + case ISCSI_FLASHNODE_DEF_TIME2WAIT: + rc = sprintf(buf, "%u\n", fnode_sess->time2wait); + break; + case ISCSI_FLASHNODE_DEF_TIME2RETAIN: + rc = sprintf(buf, "%u\n", fnode_sess->time2retain); + break; + case ISCSI_FLASHNODE_MAX_R2T: + rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); + break; + case ISCSI_FLASHNODE_KEEPALIVE_TMO: + rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); + break; + case ISCSI_FLASHNODE_ISID: + rc = sprintf(buf, "%pm\n", fnode_sess->isid); + break; + case ISCSI_FLASHNODE_TSID: + rc = sprintf(buf, "%u\n", fnode_sess->tsid); + break; + case ISCSI_FLASHNODE_PORT: + rc = sprintf(buf, "%d\n", fnode_conn->port); + break; + case ISCSI_FLASHNODE_MAX_BURST: + rc = sprintf(buf, "%u\n", fnode_sess->max_burst); + break; + case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: + rc = sprintf(buf, "%u\n", + fnode_sess->default_taskmgmt_timeout); + break; + case ISCSI_FLASHNODE_IPADDR: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); + else + rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); + break; + case ISCSI_FLASHNODE_ALIAS: + if (fnode_sess->targetalias) + rc = sprintf(buf, "%s\n", fnode_sess->targetalias); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_REDIRECT_IPADDR: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%pI6\n", + fnode_conn->redirect_ipaddr); + else + rc = sprintf(buf, "%pI4\n", + fnode_conn->redirect_ipaddr); + break; + case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: + rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); + break; + case ISCSI_FLASHNODE_LOCAL_PORT: + rc = sprintf(buf, "%u\n", fnode_conn->local_port); + break; + case ISCSI_FLASHNODE_IPV4_TOS: + rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); + break; + case ISCSI_FLASHNODE_IPV6_TC: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%u\n", + fnode_conn->ipv6_traffic_class); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: + rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); + break; + case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%pI6\n", + fnode_conn->link_local_ipv6_addr); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); + break; + case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: + if (fnode_sess->discovery_parent_type == DDB_ISNS) + parent_type = ISCSI_DISC_PARENT_ISNS; + else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) + parent_type = ISCSI_DISC_PARENT_UNKNOWN; + else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) + parent_type = ISCSI_DISC_PARENT_SENDTGT; + else + parent_type = ISCSI_DISC_PARENT_UNKNOWN; + + rc = sprintf(buf, "%s\n", + iscsi_get_discovery_parent_name(parent_type)); + break; + case ISCSI_FLASHNODE_NAME: + if (fnode_sess->targetname) + rc = sprintf(buf, "%s\n", fnode_sess->targetname); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_TPGT: + rc = sprintf(buf, "%u\n", fnode_sess->tpgt); + break; + case ISCSI_FLASHNODE_TCP_XMIT_WSF: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); + break; + case ISCSI_FLASHNODE_TCP_RECV_WSF: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); + break; + case ISCSI_FLASHNODE_CHAP_OUT_IDX: + rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); + break; + case ISCSI_FLASHNODE_USERNAME: + if (fnode_sess->chap_auth_en) { + qla4xxx_get_uni_chap_at_index(ha, + chap_tbl.name, + chap_tbl.secret, + fnode_sess->chap_out_idx); + rc = sprintf(buf, "%s\n", chap_tbl.name); + } else { + rc = sprintf(buf, "\n"); + } + break; + case ISCSI_FLASHNODE_PASSWORD: + if (fnode_sess->chap_auth_en) { + qla4xxx_get_uni_chap_at_index(ha, + chap_tbl.name, + chap_tbl.secret, + fnode_sess->chap_out_idx); + rc = sprintf(buf, "%s\n", chap_tbl.secret); + } else { + rc = sprintf(buf, "\n"); + } + break; + case ISCSI_FLASHNODE_STATSN: + rc = sprintf(buf, "%u\n", fnode_conn->statsn); + break; + case ISCSI_FLASHNODE_EXP_STATSN: + rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); + break; + case ISCSI_FLASHNODE_IS_BOOT_TGT: + rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); + break; + default: + rc = -ENOSYS; + break; + } + + put_device(dev); + return rc; +} + +/** + * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * @data: Parameters and their values to update + * @len: len of data + * + * This sets the parameter of flash ddb entry and writes them to flash + **/ +static int +qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn, + void *data, int len) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_flashnode_param_info *fnode_param; + struct ql4_chap_table chap_tbl; + struct nlattr *attr; + uint16_t chap_out_idx = INVALID_ENTRY; + int rc = QLA_ERROR; + uint32_t rem = len; + + memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); + nla_for_each_attr(attr, data, len, rem) { + if (nla_len(attr) < sizeof(*fnode_param)) { + rc = -EINVAL; + goto exit_set_param; + } + + fnode_param = nla_data(attr); + + switch (fnode_param->param) { + case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: + fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_PORTAL_TYPE: + memcpy(fnode_sess->portal_type, fnode_param->value, + strlen(fnode_sess->portal_type)); + break; + case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: + fnode_sess->auto_snd_tgt_disable = + fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DISCOVERY_SESS: + fnode_sess->discovery_sess = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_ENTRY_EN: + fnode_sess->entry_state = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_HDR_DGST_EN: + fnode_conn->hdrdgst_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DATA_DGST_EN: + fnode_conn->datadgst_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IMM_DATA_EN: + fnode_sess->imm_data_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_INITIAL_R2T_EN: + fnode_sess->initial_r2t_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DATASEQ_INORDER: + fnode_sess->dataseq_inorder_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_PDU_INORDER: + fnode_sess->pdu_inorder_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_CHAP_AUTH_EN: + fnode_sess->chap_auth_en = fnode_param->value[0]; + /* Invalidate chap index if chap auth is disabled */ + if (!fnode_sess->chap_auth_en) + fnode_sess->chap_out_idx = INVALID_ENTRY; + + break; + case ISCSI_FLASHNODE_SNACK_REQ_EN: + fnode_conn->snack_req_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: + fnode_sess->discovery_logout_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_BIDI_CHAP_EN: + fnode_sess->bidi_chap_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: + fnode_sess->discovery_auth_optional = + fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_ERL: + fnode_sess->erl = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: + fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: + fnode_conn->tcp_nagle_disable = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_WSF_DISABLE: + fnode_conn->tcp_wsf_disable = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_TIMER_SCALE: + fnode_conn->tcp_timer_scale = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: + fnode_conn->tcp_timestamp_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IP_FRAG_DISABLE: + fnode_conn->fragment_disable = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: + fnode_conn->max_recv_dlength = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: + fnode_conn->max_xmit_dlength = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_FIRST_BURST: + fnode_sess->first_burst = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_DEF_TIME2WAIT: + fnode_sess->time2wait = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_DEF_TIME2RETAIN: + fnode_sess->time2retain = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_MAX_R2T: + fnode_sess->max_r2t = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_KEEPALIVE_TMO: + fnode_conn->keepalive_timeout = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_ISID: + memcpy(fnode_sess->isid, fnode_param->value, + sizeof(fnode_sess->isid)); + break; + case ISCSI_FLASHNODE_TSID: + fnode_sess->tsid = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_PORT: + fnode_conn->port = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_MAX_BURST: + fnode_sess->max_burst = *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: + fnode_sess->default_taskmgmt_timeout = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_IPADDR: + memcpy(fnode_conn->ipaddress, fnode_param->value, + IPv6_ADDR_LEN); + break; + case ISCSI_FLASHNODE_ALIAS: + rc = iscsi_switch_str_param(&fnode_sess->targetalias, + (char *)fnode_param->value); + break; + case ISCSI_FLASHNODE_REDIRECT_IPADDR: + memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, + IPv6_ADDR_LEN); + break; + case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: + fnode_conn->max_segment_size = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_LOCAL_PORT: + fnode_conn->local_port = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_IPV4_TOS: + fnode_conn->ipv4_tos = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IPV6_TC: + fnode_conn->ipv6_traffic_class = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: + fnode_conn->ipv6_flow_label = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_NAME: + rc = iscsi_switch_str_param(&fnode_sess->targetname, + (char *)fnode_param->value); + break; + case ISCSI_FLASHNODE_TPGT: + fnode_sess->tpgt = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: + memcpy(fnode_conn->link_local_ipv6_addr, + fnode_param->value, IPv6_ADDR_LEN); + break; + case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: + fnode_sess->discovery_parent_idx = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_TCP_XMIT_WSF: + fnode_conn->tcp_xmit_wsf = + *(uint8_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_TCP_RECV_WSF: + fnode_conn->tcp_recv_wsf = + *(uint8_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_STATSN: + fnode_conn->statsn = *(uint32_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_EXP_STATSN: + fnode_conn->exp_statsn = + *(uint32_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_CHAP_OUT_IDX: + chap_out_idx = *(uint16_t *)fnode_param->value; + if (!qla4xxx_get_uni_chap_at_index(ha, + chap_tbl.name, + chap_tbl.secret, + chap_out_idx)) { + fnode_sess->chap_out_idx = chap_out_idx; + /* Enable chap auth if chap index is valid */ + fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; + } + break; + default: + ql4_printk(KERN_ERR, ha, + "%s: No such sysfs attribute\n", __func__); + rc = -ENOSYS; + goto exit_set_param; + } + } + + rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); + +exit_set_param: + return rc; +} + +/** + * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry + * @fnode_sess: pointer to session attrs of flash ddb entry + * + * This invalidates the flash ddb entry at the given index + **/ +static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + uint32_t dev_db_start_offset; + uint32_t dev_db_end_offset; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint16_t *ddb_cookie = NULL; + size_t ddb_size = 0; + void *pddb = NULL; + int target_id; + int rc = 0; + + if (fnode_sess->is_boot_target) { + rc = -EPERM; + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Deletion of boot target entry is not permitted.\n", + __func__)); + goto exit_ddb_del; + } + + if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) + goto sysfs_ddb_del; + + if (is_qla40XX(ha)) { + dev_db_start_offset = FLASH_OFFSET_DB_INFO; + dev_db_end_offset = FLASH_OFFSET_DB_END; + dev_db_start_offset += (fnode_sess->target_id * + sizeof(*fw_ddb_entry)); + ddb_size = sizeof(*fw_ddb_entry); + } else { + dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + + (ha->hw.flt_region_ddb << 2); + /* flt_ddb_size is DDB table size for both ports + * so divide it by 2 to calculate the offset for second port + */ + if (ha->port_num == 1) + dev_db_start_offset += (ha->hw.flt_ddb_size / 2); + + dev_db_end_offset = dev_db_start_offset + + (ha->hw.flt_ddb_size / 2); + + dev_db_start_offset += (fnode_sess->target_id * + sizeof(*fw_ddb_entry)); + dev_db_start_offset += offsetof(struct dev_db_entry, cookie); + + ddb_size = sizeof(*ddb_cookie); + } + + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", + __func__, dev_db_start_offset, dev_db_end_offset)); + + if (dev_db_start_offset > dev_db_end_offset) { + rc = -EIO; + DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", + __func__, fnode_sess->target_id)); + goto exit_ddb_del; + } + + pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, + &fw_ddb_entry_dma, GFP_KERNEL); + if (!pddb) { + rc = -ENOMEM; + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + goto exit_ddb_del; + } + + if (is_qla40XX(ha)) { + fw_ddb_entry = pddb; + memset(fw_ddb_entry, 0, ddb_size); + ddb_cookie = &fw_ddb_entry->cookie; + } else { + ddb_cookie = pddb; + } + + /* invalidate the cookie */ + *ddb_cookie = 0xFFEE; + qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + ddb_size, FLASH_OPT_RMW_COMMIT); + +sysfs_ddb_del: + target_id = fnode_sess->target_id; + iscsi_destroy_flashnode_sess(fnode_sess); + ql4_printk(KERN_INFO, ha, + "%s: session and conn entries for flashnode %u of host %lu deleted\n", + __func__, target_id, ha->host_no); +exit_ddb_del: + if (pddb) + dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, + fw_ddb_entry_dma); + return rc; +} + +/** + * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs + * @ha: pointer to adapter structure + * + * Export the firmware DDB for all send targets and normal targets to sysfs. + **/ +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) +{ + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint16_t max_ddbs; + uint16_t idx = 0; + int ret = QLA_SUCCESS; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, + sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + return -ENOMEM; + } + + max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : + MAX_DEV_DB_ENTRIES; + + for (idx = 0; idx < max_ddbs; idx++) { + if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, + idx)) + continue; + + ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); + if (ret) { + ret = -EIO; + break; + } + } + + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, + fw_ddb_entry_dma); + + return ret; +} + +static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) +{ + iscsi_destroy_all_flashnode(ha->host); +} + +/** + * qla4xxx_build_ddb_list - Build ddb list and setup sessions + * @ha: pointer to adapter structure + * @is_reset: Is this init path or reset path + * + * Create a list of sendtargets (st) from firmware DDBs, issue send targets + * using connection open, then create the list of normal targets (nt) + * from firmware DDBs. Based on the list of nt setup session and connection + * objects. + **/ +void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) +{ + uint16_t tmo = 0; + struct list_head list_st, list_nt; + struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; + unsigned long wtime; + + if (!test_bit(AF_LINK_UP, &ha->flags)) { + set_bit(AF_BUILD_DDB_LIST, &ha->flags); + ha->is_reset = is_reset; + return; + } + + INIT_LIST_HEAD(&list_st); + INIT_LIST_HEAD(&list_nt); + + qla4xxx_build_st_list(ha, &list_st); + + /* Before issuing conn open mbox, ensure all IPs states are configured + * Note, conn open fails if IPs are not configured + */ + qla4xxx_wait_for_ip_configuration(ha); + + /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ + list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { + qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); + } + + /* Wait to ensure all sendtargets are done for min 12 sec wait */ + tmo = ((ha->def_timeout > LOGIN_TOV) && + (ha->def_timeout < LOGIN_TOV * 10) ? + ha->def_timeout : LOGIN_TOV); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Default time to wait for build ddb %d\n", tmo)); + + wtime = jiffies + (HZ * tmo); + do { + if (list_empty(&list_st)) + break; + + qla4xxx_remove_failed_ddb(ha, &list_st); + schedule_timeout_uninterruptible(HZ / 10); + } while (time_after(wtime, jiffies)); + + + qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); + + qla4xxx_free_ddb_list(&list_st); + qla4xxx_free_ddb_list(&list_nt); + + qla4xxx_free_ddb_index(ha); +} + +/** + * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login + * response. + * @ha: pointer to adapter structure + * + * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be + * set in DDB and we will wait for login response of boot targets during + * probe. + **/ +static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) +{ + struct ddb_entry *ddb_entry; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + unsigned long wtime; + uint32_t ddb_state; + int max_ddbs, idx, ret; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + goto exit_login_resp; + } + + wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); + + for (idx = 0; idx < max_ddbs; idx++) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); + if (ddb_entry == NULL) + continue; + + if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: DDB index [%d]\n", __func__, + ddb_entry->fw_ddb_index)); + do { + ret = qla4xxx_get_fwddb_entry(ha, + ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto exit_login_resp; + + if ((ddb_state == DDB_DS_SESSION_ACTIVE) || + (ddb_state == DDB_DS_SESSION_FAILED)) + break; + + schedule_timeout_uninterruptible(HZ); + + } while ((time_after(wtime, jiffies))); + + if (!time_after(wtime, jiffies)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Login response wait timer expired\n", + __func__)); + goto exit_login_resp; + } + } + } + +exit_login_resp: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); +} + +/** + * qla4xxx_probe_adapter - callback function to probe HBA + * @pdev: pointer to pci_dev structure + * @ent: pointer to pci_device entry + * + * This routine will probe for Qlogic 4xxx iSCSI host adapters. + * It returns zero if successful. It also initializes all data necessary for + * the driver. + **/ +static int qla4xxx_probe_adapter(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret = -ENODEV, status; + struct Scsi_Host *host; + struct scsi_qla_host *ha; + uint8_t init_retry_count = 0; + char buf[34]; + struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; + uint32_t dev_state; + + if (pci_enable_device(pdev)) + return -1; + + host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); + if (host == NULL) { + printk(KERN_WARNING + "qla4xxx: Couldn't allocate host from scsi layer!\n"); + goto probe_disable_device; + } + + /* Clear our data area */ + ha = to_qla_host(host); + memset(ha, 0, sizeof(*ha)); + + /* Save the information from PCI BIOS. */ + ha->pdev = pdev; + ha->host = host; + ha->host_no = host->host_no; + ha->func_num = PCI_FUNC(ha->pdev->devfn); + + /* Setup Runtime configurable options */ + if (is_qla8022(ha)) { + ha->isp_ops = &qla4_82xx_isp_ops; + ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; + ha->qdr_sn_window = -1; + ha->ddr_mn_window = -1; + ha->curr_window = 255; + nx_legacy_intr = &legacy_intr[ha->func_num]; + ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; + ha->nx_legacy_intr.tgt_status_reg = + nx_legacy_intr->tgt_status_reg; + ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; + ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; + } else if (is_qla8032(ha) || is_qla8042(ha)) { + ha->isp_ops = &qla4_83xx_isp_ops; + ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; + } else { + ha->isp_ops = &qla4xxx_isp_ops; + } + + if (is_qla80XX(ha)) { + rwlock_init(&ha->hw_lock); + ha->pf_bit = ha->func_num << 16; + /* Set EEH reset type to fundamental if required by hba */ + pdev->needs_freset = 1; + } + + /* Configure PCI I/O space. */ + ret = ha->isp_ops->iospace_config(ha); + if (ret) + goto probe_failed_ioconfig; + + ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", + pdev->device, pdev->irq, ha->reg); + + qla4xxx_config_dma_addressing(ha); + + /* Initialize lists and spinlocks. */ + INIT_LIST_HEAD(&ha->free_srb_q); + + mutex_init(&ha->mbox_sem); + mutex_init(&ha->chap_sem); + init_completion(&ha->mbx_intr_comp); + init_completion(&ha->disable_acb_comp); + init_completion(&ha->idc_comp); + init_completion(&ha->link_up_comp); + + spin_lock_init(&ha->hardware_lock); + spin_lock_init(&ha->work_lock); + + /* Initialize work list */ + INIT_LIST_HEAD(&ha->work_list); + + /* Allocate dma buffers */ + if (qla4xxx_mem_alloc(ha)) { + ql4_printk(KERN_WARNING, ha, + "[ERROR] Failed to allocate memory for adapter\n"); + + ret = -ENOMEM; + goto probe_failed; + } + + host->cmd_per_lun = 3; + host->max_channel = 0; + host->max_lun = MAX_LUNS - 1; + host->max_id = MAX_TARGETS; + host->max_cmd_len = IOCB_MAX_CDB_LEN; + host->can_queue = MAX_SRBS ; + host->transportt = qla4xxx_scsi_transport; + + pci_set_drvdata(pdev, ha); + + ret = scsi_add_host(host, &pdev->dev); + if (ret) + goto probe_failed; + + if (is_qla80XX(ha)) + qla4_8xxx_get_flash_info(ha); + + if (is_qla8032(ha) || is_qla8042(ha)) { + qla4_83xx_read_reset_template(ha); + /* + * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. + * If DONRESET_BIT0 is set, drivers should not set dev_state + * to NEED_RESET. But if NEED_RESET is set, drivers should + * should honor the reset. + */ + if (ql4xdontresethba == 1) + qla4_83xx_set_idc_dontreset(ha); + } + + /* + * Initialize the Host adapter request/response queues and + * firmware + * NOTE: interrupts enabled upon successful completion + */ + status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); + + /* Dont retry adapter initialization if IRQ allocation failed */ + if (is_qla80XX(ha) && (status == QLA_ERROR)) + goto skip_retry_init; + + while ((!test_bit(AF_ONLINE, &ha->flags)) && + init_retry_count++ < MAX_INIT_RETRIES) { + + if (is_qla80XX(ha)) { + ha->isp_ops->idc_lock(ha); + dev_state = qla4_8xxx_rd_direct(ha, + QLA8XXX_CRB_DEV_STATE); + ha->isp_ops->idc_unlock(ha); + if (dev_state == QLA8XXX_DEV_FAILED) { + ql4_printk(KERN_WARNING, ha, "%s: don't retry " + "initialize adapter. H/W is in failed state\n", + __func__); + break; + } + } + DEBUG2(printk("scsi: %s: retrying adapter initialization " + "(%d)\n", __func__, init_retry_count)); + + if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) + continue; + + status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); + if (is_qla80XX(ha) && (status == QLA_ERROR)) { + if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) + goto skip_retry_init; + } + } + +skip_retry_init: + if (!test_bit(AF_ONLINE, &ha->flags)) { + ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); + + if ((is_qla8022(ha) && ql4xdontresethba) || + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { + /* Put the device in failed state. */ + DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); + ha->isp_ops->idc_lock(ha); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + ha->isp_ops->idc_unlock(ha); + } + ret = -ENODEV; + goto remove_host; + } + + /* Startup the kernel thread for this host adapter. */ + DEBUG2(printk("scsi: %s: Starting kernel thread for " + "qla4xxx_dpc\n", __func__)); + sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); + ha->dpc_thread = create_singlethread_workqueue(buf); + if (!ha->dpc_thread) { + ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); + ret = -ENODEV; + goto remove_host; + } + INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); + + ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, + ha->host_no); + if (!ha->task_wq) { + ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); + ret = -ENODEV; + goto remove_host; + } + + /* + * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc + * (which is called indirectly by qla4xxx_initialize_adapter), + * so that irqs will be registered after crbinit but before + * mbx_intr_enable. + */ + if (is_qla40XX(ha)) { + ret = qla4xxx_request_irqs(ha); + if (ret) { + ql4_printk(KERN_WARNING, ha, "Failed to reserve " + "interrupt %d already in use.\n", pdev->irq); + goto remove_host; + } + } + + pci_save_state(ha->pdev); + ha->isp_ops->enable_intrs(ha); + + /* Start timer thread. */ + qla4xxx_start_timer(ha, 1); + + set_bit(AF_INIT_DONE, &ha->flags); + + qla4_8xxx_alloc_sysfs_attr(ha); + + printk(KERN_INFO + " QLogic iSCSI HBA Driver version: %s\n" + " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", + qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), + ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); + + /* Set the driver version */ + if (is_qla80XX(ha)) + qla4_8xxx_set_param(ha, SET_DRVR_VERSION); + + if (qla4xxx_setup_boot_info(ha)) + ql4_printk(KERN_ERR, ha, + "%s: No iSCSI boot target configured\n", __func__); + + set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); + /* Perform the build ddb list and login to each */ + qla4xxx_build_ddb_list(ha, INIT_ADAPTER); + iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); + qla4xxx_wait_login_resp_boot_tgt(ha); + + qla4xxx_create_chap_list(ha); + + qla4xxx_create_ifaces(ha); + return 0; + +remove_host: + scsi_remove_host(ha->host); + +probe_failed: + qla4xxx_free_adapter(ha); + +probe_failed_ioconfig: + scsi_host_put(ha->host); + +probe_disable_device: + pci_disable_device(pdev); + + return ret; +} + +/** + * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize + * @ha: pointer to adapter structure + * + * Mark the other ISP-4xxx port to indicate that the driver is being removed, + * so that the other port will not re-initialize while in the process of + * removing the ha due to driver unload or hba hotplug. + **/ +static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) +{ + struct scsi_qla_host *other_ha = NULL; + struct pci_dev *other_pdev = NULL; + int fn = ISP4XXX_PCI_FN_2; + + /*iscsi function numbers for ISP4xxx is 1 and 3*/ + if (PCI_FUNC(ha->pdev->devfn) & BIT_1) + fn = ISP4XXX_PCI_FN_1; + + other_pdev = + pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), + ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), + fn)); + + /* Get other_ha if other_pdev is valid and state is enable*/ + if (other_pdev) { + if (atomic_read(&other_pdev->enable_cnt)) { + other_ha = pci_get_drvdata(other_pdev); + if (other_ha) { + set_bit(AF_HA_REMOVAL, &other_ha->flags); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " + "Prevent %s reinit\n", __func__, + dev_name(&other_ha->pdev->dev))); + } + } + pci_dev_put(other_pdev); + } +} + +static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry) +{ + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + unsigned long wtime; + uint32_t ddb_state; + int options; + int status; + + options = LOGOUT_OPTION_CLOSE_SESSION; + if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); + goto clear_ddb; + } + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + goto clear_ddb; + } + + wtime = jiffies + (HZ * LOGOUT_TOV); + do { + status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (status == QLA_ERROR) + goto free_ddb; + + if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || + (ddb_state == DDB_DS_SESSION_FAILED)) + goto free_ddb; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +free_ddb: + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); +clear_ddb: + qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); +} + +static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) +{ + struct ddb_entry *ddb_entry; + int idx; + + for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { + + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); + if ((ddb_entry != NULL) && + (ddb_entry->ddb_type == FLASH_DDB)) { + + qla4xxx_destroy_ddb(ha, ddb_entry); + /* + * we have decremented the reference count of the driver + * when we setup the session to have the driver unload + * to be seamless without actually destroying the + * session + **/ + try_module_get(qla4xxx_iscsi_transport.owner); + iscsi_destroy_endpoint(ddb_entry->conn->ep); + qla4xxx_free_ddb(ha, ddb_entry); + iscsi_session_teardown(ddb_entry->sess); + } + } +} +/** + * qla4xxx_remove_adapter - callback function to remove adapter. + * @pdev: PCI device pointer + **/ +static void qla4xxx_remove_adapter(struct pci_dev *pdev) +{ + struct scsi_qla_host *ha; + + /* + * If the PCI device is disabled then it means probe_adapter had + * failed and resources already cleaned up on probe_adapter exit. + */ + if (!pci_is_enabled(pdev)) + return; + + ha = pci_get_drvdata(pdev); + + if (is_qla40XX(ha)) + qla4xxx_prevent_other_port_reinit(ha); + + /* destroy iface from sysfs */ + qla4xxx_destroy_ifaces(ha); + + if ((!ql4xdisablesysfsboot) && ha->boot_kset) + iscsi_boot_destroy_kset(ha->boot_kset); + + qla4xxx_destroy_fw_ddb_session(ha); + qla4_8xxx_free_sysfs_attr(ha); + + qla4xxx_sysfs_ddb_remove(ha); + scsi_remove_host(ha->host); + + qla4xxx_free_adapter(ha); + + scsi_host_put(ha->host); + + pci_disable_device(pdev); +} + +/** + * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. + * @ha: HA context + */ +static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) +{ + /* Update our PCI device dma_mask for full 64 bit mask */ + if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { + dev_dbg(&ha->pdev->dev, + "Failed to set 64 bit PCI consistent mask; " + "using 32 bit.\n"); + dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32)); + } +} + +static int qla4xxx_slave_alloc(struct scsi_device *sdev) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_session *sess; + struct ddb_entry *ddb; + int queue_depth = QL4_DEF_QDEPTH; + + cls_sess = starget_to_session(sdev->sdev_target); + sess = cls_sess->dd_data; + ddb = sess->dd_data; + + sdev->hostdata = ddb; + + if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) + queue_depth = ql4xmaxqdepth; + + scsi_change_queue_depth(sdev, queue_depth); + return 0; +} + +/** + * qla4xxx_del_from_active_array - returns an active srb + * @ha: Pointer to host adapter structure. + * @index: index into the active_array + * + * This routine removes and returns the srb at the specified index + **/ +struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, + uint32_t index) +{ + struct srb *srb = NULL; + struct scsi_cmnd *cmd = NULL; + + cmd = scsi_host_find_tag(ha->host, index); + if (!cmd) + return srb; + + srb = qla4xxx_cmd_priv(cmd)->srb; + if (!srb) + return srb; + + /* update counters */ + if (srb->flags & SRB_DMA_VALID) { + ha->iocb_cnt -= srb->iocb_cnt; + if (srb->cmd) + srb->cmd->host_scribble = + (unsigned char *)(unsigned long) MAX_SRBS; + } + return srb; +} + +/** + * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware + * @ha: Pointer to host adapter structure. + * @cmd: Scsi Command to wait on. + * + * This routine waits for the command to be returned by the Firmware + * for some max time. + **/ +static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, + struct scsi_cmnd *cmd) +{ + int done = 0; + struct srb *rp; + uint32_t max_wait_time = EH_WAIT_CMD_TOV; + int ret = SUCCESS; + + /* Dont wait on command if PCI error is being handled + * by PCI AER driver + */ + if (unlikely(pci_channel_offline(ha->pdev)) || + (test_bit(AF_EEH_BUSY, &ha->flags))) { + ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", + ha->host_no, __func__); + return ret; + } + + do { + /* Checking to see if its returned to OS */ + rp = qla4xxx_cmd_priv(cmd)->srb; + if (rp == NULL) { + done++; + break; + } + + msleep(2000); + } while (max_wait_time--); + + return done; +} + +/** + * qla4xxx_wait_for_hba_online - waits for HBA to come online + * @ha: Pointer to host adapter structure + **/ +static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) +{ + unsigned long wait_online; + + wait_online = jiffies + (HBA_ONLINE_TOV * HZ); + while (time_before(jiffies, wait_online)) { + + if (adapter_up(ha)) + return QLA_SUCCESS; + + msleep(2000); + } + + return QLA_ERROR; +} + +/** + * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. + * @ha: pointer to HBA + * @stgt: pointer to SCSI target + * @sdev: pointer to SCSI device + * + * This function waits for all outstanding commands to a lun to complete. It + * returns 0 if all pending commands are returned and 1 otherwise. + **/ +static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, + struct scsi_target *stgt, + struct scsi_device *sdev) +{ + int cnt; + int status = 0; + struct scsi_cmnd *cmd; + + /* + * Waiting for all commands for the designated target or dev + * in the active array + */ + for (cnt = 0; cnt < ha->host->can_queue; cnt++) { + cmd = scsi_host_find_tag(ha->host, cnt); + if (cmd && stgt == scsi_target(cmd->device) && + (!sdev || sdev == cmd->device)) { + if (!qla4xxx_eh_wait_on_command(ha, cmd)) { + status++; + break; + } + } + } + return status; +} + +/** + * qla4xxx_eh_abort - callback for abort task. + * @cmd: Pointer to Linux's SCSI command structure + * + * This routine is called by the Linux OS to abort the specified + * command. + **/ +static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) +{ + struct scsi_qla_host *ha = to_qla_host(cmd->device->host); + unsigned int id = cmd->device->id; + uint64_t lun = cmd->device->lun; + unsigned long flags; + struct srb *srb = NULL; + int ret = SUCCESS; + int wait = 0; + int rval; + + ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", + ha->host_no, id, lun, cmd, cmd->cmnd[0]); + + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + srb = qla4xxx_cmd_priv(cmd)->srb; + if (!srb) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", + ha->host_no, id, lun); + return SUCCESS; + } + kref_get(&srb->srb_ref); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { + DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", + ha->host_no, id, lun)); + ret = FAILED; + } else { + DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", + ha->host_no, id, lun)); + wait = 1; + } + + kref_put(&srb->srb_ref, qla4xxx_srb_compl); + + /* Wait for command to complete */ + if (wait) { + if (!qla4xxx_eh_wait_on_command(ha, cmd)) { + DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", + ha->host_no, id, lun)); + ret = FAILED; + } + } + + ql4_printk(KERN_INFO, ha, + "scsi%ld:%d:%llu: Abort command - %s\n", + ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); + + return ret; +} + +/** + * qla4xxx_eh_device_reset - callback for target reset. + * @cmd: Pointer to Linux's SCSI command structure + * + * This routine is called by the Linux OS to reset all luns on the + * specified target. + **/ +static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) +{ + struct scsi_qla_host *ha = to_qla_host(cmd->device->host); + struct ddb_entry *ddb_entry = cmd->device->hostdata; + int ret = FAILED, stat; + int rval; + + if (!ddb_entry) + return ret; + + ret = iscsi_block_scsi_eh(cmd); + if (ret) + return ret; + ret = FAILED; + + ql4_printk(KERN_INFO, ha, + "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, + cmd->device->channel, cmd->device->id, cmd->device->lun); + + DEBUG2(printk(KERN_INFO + "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," + "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, + cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, + ha->dpc_flags, cmd->result, cmd->allowed)); + + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + + /* FIXME: wait for hba to go online */ + stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); + if (stat != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); + goto eh_dev_reset_done; + } + + if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), + cmd->device)) { + ql4_printk(KERN_INFO, ha, + "DEVICE RESET FAILED - waiting for " + "commands.\n"); + goto eh_dev_reset_done; + } + + /* Send marker. */ + if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, + MM_LUN_RESET) != QLA_SUCCESS) + goto eh_dev_reset_done; + + ql4_printk(KERN_INFO, ha, + "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", + ha->host_no, cmd->device->channel, cmd->device->id, + cmd->device->lun); + + ret = SUCCESS; + +eh_dev_reset_done: + + return ret; +} + +/** + * qla4xxx_eh_target_reset - callback for target reset. + * @cmd: Pointer to Linux's SCSI command structure + * + * This routine is called by the Linux OS to reset the target. + **/ +static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) +{ + struct scsi_qla_host *ha = to_qla_host(cmd->device->host); + struct ddb_entry *ddb_entry = cmd->device->hostdata; + int stat, ret; + int rval; + + if (!ddb_entry) + return FAILED; + + ret = iscsi_block_scsi_eh(cmd); + if (ret) + return ret; + + starget_printk(KERN_INFO, scsi_target(cmd->device), + "WARM TARGET RESET ISSUED.\n"); + + DEBUG2(printk(KERN_INFO + "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " + "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", + ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, + ha->dpc_flags, cmd->result, cmd->allowed)); + + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + + stat = qla4xxx_reset_target(ha, ddb_entry); + if (stat != QLA_SUCCESS) { + starget_printk(KERN_INFO, scsi_target(cmd->device), + "WARM TARGET RESET FAILED.\n"); + return FAILED; + } + + if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), + NULL)) { + starget_printk(KERN_INFO, scsi_target(cmd->device), + "WARM TARGET DEVICE RESET FAILED - " + "waiting for commands.\n"); + return FAILED; + } + + /* Send marker. */ + if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, + MM_TGT_WARM_RESET) != QLA_SUCCESS) { + starget_printk(KERN_INFO, scsi_target(cmd->device), + "WARM TARGET DEVICE RESET FAILED - " + "marker iocb failed.\n"); + return FAILED; + } + + starget_printk(KERN_INFO, scsi_target(cmd->device), + "WARM TARGET RESET SUCCEEDED.\n"); + return SUCCESS; +} + +/** + * qla4xxx_is_eh_active - check if error handler is running + * @shost: Pointer to SCSI Host struct + * + * This routine finds that if reset host is called in EH + * scenario or from some application like sg_reset + **/ +static int qla4xxx_is_eh_active(struct Scsi_Host *shost) +{ + if (shost->shost_state == SHOST_RECOVERY) + return 1; + return 0; +} + +/** + * qla4xxx_eh_host_reset - kernel callback + * @cmd: Pointer to Linux's SCSI command structure + * + * This routine is invoked by the Linux kernel to perform fatal error + * recovery on the specified adapter. + **/ +static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) +{ + int return_status = FAILED; + struct scsi_qla_host *ha; + int rval; + + ha = to_qla_host(cmd->device->host); + + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + + if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) + qla4_83xx_set_idc_dontreset(ha); + + /* + * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other + * protocol drivers, we should not set device_state to NEED_RESET + */ + if (ql4xdontresethba || + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { + DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", + ha->host_no, __func__)); + + /* Clear outstanding srb in queues */ + if (qla4xxx_is_eh_active(cmd->device->host)) + qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); + + return FAILED; + } + + ql4_printk(KERN_INFO, ha, + "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, + cmd->device->channel, cmd->device->id, cmd->device->lun); + + if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { + DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " + "DEAD.\n", ha->host_no, cmd->device->channel, + __func__)); + + return FAILED; + } + + if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + if (is_qla80XX(ha)) + set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); + else + set_bit(DPC_RESET_HA, &ha->dpc_flags); + } + + if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) + return_status = SUCCESS; + + ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", + return_status == FAILED ? "FAILED" : "SUCCEEDED"); + + return return_status; +} + +static int qla4xxx_context_reset(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct addr_ctrl_blk_def *acb = NULL; + uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); + int rval = QLA_SUCCESS; + dma_addr_t acb_dma; + + acb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk_def), + &acb_dma, GFP_KERNEL); + if (!acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", + __func__); + rval = -ENOMEM; + goto exit_port_reset; + } + + memset(acb, 0, acb_len); + + rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); + if (rval != QLA_SUCCESS) { + rval = -EIO; + goto exit_free_acb; + } + + rval = qla4xxx_disable_acb(ha); + if (rval != QLA_SUCCESS) { + rval = -EIO; + goto exit_free_acb; + } + + wait_for_completion_timeout(&ha->disable_acb_comp, + DISABLE_ACB_TOV * HZ); + + rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); + if (rval != QLA_SUCCESS) { + rval = -EIO; + goto exit_free_acb; + } + +exit_free_acb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), + acb, acb_dma); +exit_port_reset: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, + rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); + return rval; +} + +static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + int rval = QLA_SUCCESS; + uint32_t idc_ctrl; + + if (ql4xdontresethba) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", + __func__)); + rval = -EPERM; + goto exit_host_reset; + } + + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) + goto recover_adapter; + + switch (reset_type) { + case SCSI_ADAPTER_RESET: + set_bit(DPC_RESET_HA, &ha->dpc_flags); + break; + case SCSI_FIRMWARE_RESET: + if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + if (is_qla80XX(ha)) + /* set firmware context reset */ + set_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + else { + rval = qla4xxx_context_reset(ha); + goto exit_host_reset; + } + } + break; + } + +recover_adapter: + /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if + * reset is issued by application */ + if ((is_qla8032(ha) || is_qla8042(ha)) && + test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); + qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, + (idc_ctrl | GRACEFUL_RESET_BIT1)); + } + + rval = qla4xxx_recover_adapter(ha); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", + __func__)); + rval = -EIO; + } + +exit_host_reset: + return rval; +} + +/* PCI AER driver recovers from all correctable errors w/o + * driver intervention. For uncorrectable errors PCI AER + * driver calls the following device driver's callbacks + * + * - Fatal Errors - link_reset + * - Non-Fatal Errors - driver's error_detected() which + * returns CAN_RECOVER, NEED_RESET or DISCONNECT. + * + * PCI AER driver calls + * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() + * returns RECOVERED or NEED_RESET if fw_hung + * NEED_RESET - driver's slot_reset() + * DISCONNECT - device is dead & cannot recover + * RECOVERED - driver's resume() + */ +static pci_ers_result_t +qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", + ha->host_no, __func__, state); + + if (!is_aer_supported(ha)) + return PCI_ERS_RESULT_NONE; + + switch (state) { + case pci_channel_io_normal: + clear_bit(AF_EEH_BUSY, &ha->flags); + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + set_bit(AF_EEH_BUSY, &ha->flags); + qla4xxx_mailbox_premature_completion(ha); + qla4xxx_free_irqs(ha); + pci_disable_device(pdev); + /* Return back all IOs */ + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + set_bit(AF_EEH_BUSY, &ha->flags); + set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); + qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * qla4xxx_pci_mmio_enabled() - gets called if + * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER + * and read/write to the device still works. + * @pdev: PCI device pointer + **/ +static pci_ers_result_t +qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + + if (!is_aer_supported(ha)) + return PCI_ERS_RESULT_NONE; + + return PCI_ERS_RESULT_RECOVERED; +} + +static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) +{ + uint32_t rval = QLA_ERROR; + int fn; + struct pci_dev *other_pdev = NULL; + + ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); + + set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); + + if (test_bit(AF_ONLINE, &ha->flags)) { + clear_bit(AF_ONLINE, &ha->flags); + clear_bit(AF_LINK_UP, &ha->flags); + iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + } + + fn = PCI_FUNC(ha->pdev->devfn); + if (is_qla8022(ha)) { + while (fn > 0) { + fn--; + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", + ha->host_no, __func__, fn); + /* Get the pci device given the domain, bus, + * slot/function number */ + other_pdev = pci_get_domain_bus_and_slot( + pci_domain_nr(ha->pdev->bus), + ha->pdev->bus->number, + PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), + fn)); + + if (!other_pdev) + continue; + + if (atomic_read(&other_pdev->enable_cnt)) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", + ha->host_no, __func__, fn); + pci_dev_put(other_pdev); + break; + } + pci_dev_put(other_pdev); + } + } else { + /* this case is meant for ISP83xx/ISP84xx only */ + if (qla4_83xx_can_perform_reset(ha)) { + /* reset fn as iSCSI is going to perform the reset */ + fn = 0; + } + } + + /* The first function on the card, the reset owner will + * start & initialize the firmware. The other functions + * on the card will reset the firmware context + */ + if (!fn) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " + "0x%x is the owner\n", ha->host_no, __func__, + ha->pdev->devfn); + + ha->isp_ops->idc_lock(ha); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_COLD); + ha->isp_ops->idc_unlock(ha); + + rval = qla4_8xxx_update_idc_reg(ha); + if (rval == QLA_ERROR) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", + ha->host_no, __func__); + ha->isp_ops->idc_lock(ha); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + ha->isp_ops->idc_unlock(ha); + goto exit_error_recovery; + } + + clear_bit(AF_FW_RECOVERY, &ha->flags); + rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); + + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " + "FAILED\n", ha->host_no, __func__); + qla4xxx_free_irqs(ha); + ha->isp_ops->idc_lock(ha); + qla4_8xxx_clear_drv_active(ha); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + ha->isp_ops->idc_unlock(ha); + } else { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " + "READY\n", ha->host_no, __func__); + ha->isp_ops->idc_lock(ha); + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, + QLA8XXX_DEV_READY); + /* Clear driver state register */ + qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); + qla4_8xxx_set_drv_active(ha); + ha->isp_ops->idc_unlock(ha); + ha->isp_ops->enable_intrs(ha); + } + } else { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " + "the reset owner\n", ha->host_no, __func__, + ha->pdev->devfn); + if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == + QLA8XXX_DEV_READY)) { + clear_bit(AF_FW_RECOVERY, &ha->flags); + rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); + if (rval == QLA_SUCCESS) + ha->isp_ops->enable_intrs(ha); + else + qla4xxx_free_irqs(ha); + + ha->isp_ops->idc_lock(ha); + qla4_8xxx_set_drv_active(ha); + ha->isp_ops->idc_unlock(ha); + } + } +exit_error_recovery: + clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); + return rval; +} + +static pci_ers_result_t +qla4xxx_pci_slot_reset(struct pci_dev *pdev) +{ + pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + int rc; + + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", + ha->host_no, __func__); + + if (!is_aer_supported(ha)) + return PCI_ERS_RESULT_NONE; + + /* Restore the saved state of PCIe device - + * BAR registers, PCI Config space, PCIX, MSI, + * IOV states + */ + pci_restore_state(pdev); + + /* pci_restore_state() clears the saved_state flag of the device + * save restored state which resets saved_state flag + */ + pci_save_state(pdev); + + /* Initialize device or resume if in suspended state */ + rc = pci_enable_device(pdev); + if (rc) { + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " + "device after reset\n", ha->host_no, __func__); + goto exit_slot_reset; + } + + ha->isp_ops->disable_intrs(ha); + + if (is_qla80XX(ha)) { + if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { + ret = PCI_ERS_RESULT_RECOVERED; + goto exit_slot_reset; + } else + goto exit_slot_reset; + } + +exit_slot_reset: + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" + "device after reset\n", ha->host_no, __func__, ret); + return ret; +} + +static void +qla4xxx_pci_resume(struct pci_dev *pdev) +{ + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + int ret; + + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", + ha->host_no, __func__); + + ret = qla4xxx_wait_for_hba_online(ha); + if (ret != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " + "resume I/O from slot/link_reset\n", ha->host_no, + __func__); + } + + clear_bit(AF_EEH_BUSY, &ha->flags); +} + +static const struct pci_error_handlers qla4xxx_err_handler = { + .error_detected = qla4xxx_pci_error_detected, + .mmio_enabled = qla4xxx_pci_mmio_enabled, + .slot_reset = qla4xxx_pci_slot_reset, + .resume = qla4xxx_pci_resume, +}; + +static struct pci_device_id qla4xxx_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP4010, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP4022, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP4032, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP8022, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP8324, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP8042, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + {0, 0}, +}; +MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); + +static struct pci_driver qla4xxx_pci_driver = { + .name = DRIVER_NAME, + .id_table = qla4xxx_pci_tbl, + .probe = qla4xxx_probe_adapter, + .remove = qla4xxx_remove_adapter, + .err_handler = &qla4xxx_err_handler, +}; + +static int __init qla4xxx_module_init(void) +{ + int ret; + + if (ql4xqfulltracking) + qla4xxx_driver_template.track_queue_depth = 1; + + /* Allocate cache for SRBs. */ + srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (srb_cachep == NULL) { + printk(KERN_ERR + "%s: Unable to allocate SRB cache..." + "Failing load!\n", DRIVER_NAME); + ret = -ENOMEM; + goto no_srp_cache; + } + + /* Derive version string. */ + strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); + if (ql4xextended_error_logging) + strcat(qla4xxx_version_str, "-debug"); + + qla4xxx_scsi_transport = + iscsi_register_transport(&qla4xxx_iscsi_transport); + if (!qla4xxx_scsi_transport){ + ret = -ENODEV; + goto release_srb_cache; + } + + ret = pci_register_driver(&qla4xxx_pci_driver); + if (ret) + goto unregister_transport; + + printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); + return 0; + +unregister_transport: + iscsi_unregister_transport(&qla4xxx_iscsi_transport); +release_srb_cache: + kmem_cache_destroy(srb_cachep); +no_srp_cache: + return ret; +} + +static void __exit qla4xxx_module_exit(void) +{ + pci_unregister_driver(&qla4xxx_pci_driver); + iscsi_unregister_transport(&qla4xxx_iscsi_transport); + kmem_cache_destroy(srb_cachep); +} + +module_init(qla4xxx_module_init); +module_exit(qla4xxx_module_exit); + +MODULE_AUTHOR("QLogic Corporation"); +MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(QLA4XXX_DRIVER_VERSION); diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h new file mode 100644 index 000000000..fb1c14269 --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + */ + +#define QLA4XXX_DRIVER_VERSION "5.04.00-k6" diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c new file mode 100644 index 000000000..8f05e3707 --- /dev/null +++ b/drivers/scsi/qlogicfas.c @@ -0,0 +1,229 @@ +/* + * Qlogic FAS408 ISA card driver + * + * Copyright 1994, Tom Zerucha. + * tz@execpc.com + * + * Redistributable under terms of the GNU General Public License + * + * For the avoidance of doubt the "preferred form" of this code is one which + * is in an open non patent encumbered format. Where cryptographic key signing + * forms part of the process of creating an executable the information + * including keys needed to generate an equivalently functional executable + * are deemed to be part of the source code. + * + * Check qlogicfas408.c for more credits and info. + */ + +#include +#include /* to get disk capacity */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "qlogicfas408.h" + +/* Set the following to 2 to use normal interrupt (active high/totempole- + * tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open + * drain + */ +#define INT_TYPE 2 + +static char qlogicfas_name[] = "qlogicfas"; + +/* + * Look for qlogic card and init if found + */ + +static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host, + int qbase, + int qlirq) +{ + int qltyp; /* type of chip */ + int qinitid; + struct Scsi_Host *hreg; /* registered host structure */ + struct qlogicfas408_priv *priv; + + /* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself + * decodes the address - I check 230 first since MIDI cards are + * typically at 0x330 + * + * Theoretically, two Qlogic cards can coexist in the same system. + * This should work by simply using this as a loadable module for + * the second card, but I haven't tested this. + */ + + if (!qbase || qlirq == -1) + goto err; + + if (!request_region(qbase, 0x10, qlogicfas_name)) { + printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name, + qbase); + goto err; + } + + if (!qlogicfas408_detect(qbase, INT_TYPE)) { + printk(KERN_WARNING "%s: probe failed for %#x\n", + qlogicfas_name, + qbase); + goto err_release_mem; + } + + printk(KERN_INFO "%s: Using preset base address of %03x," + " IRQ %d\n", qlogicfas_name, qbase, qlirq); + + qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); + qinitid = host->this_id; + if (qinitid < 0) + qinitid = 7; /* if no ID, use 7 */ + + qlogicfas408_setup(qbase, qinitid, INT_TYPE); + + hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); + if (!hreg) + goto err_release_mem; + priv = get_priv_by_host(hreg); + hreg->io_port = qbase; + hreg->n_io_port = 16; + hreg->dma_channel = -1; + if (qlirq != -1) + hreg->irq = qlirq; + priv->qbase = qbase; + priv->qlirq = qlirq; + priv->qinitid = qinitid; + priv->shost = hreg; + priv->int_type = INT_TYPE; + + sprintf(priv->qinfo, + "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", + qltyp, qbase, qlirq, QL_TURBO_PDMA); + host->name = qlogicfas_name; + + if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg)) + goto free_scsi_host; + + if (scsi_add_host(hreg, NULL)) + goto free_interrupt; + + scsi_scan_host(hreg); + + return hreg; + +free_interrupt: + free_irq(qlirq, hreg); + +free_scsi_host: + scsi_host_put(hreg); + +err_release_mem: + release_region(qbase, 0x10); +err: + return NULL; +} + +#define MAX_QLOGICFAS 8 +static struct qlogicfas408_priv *cards; +static int iobase[MAX_QLOGICFAS]; +static int irq[MAX_QLOGICFAS] = { [0 ... MAX_QLOGICFAS-1] = -1 }; +module_param_hw_array(iobase, int, ioport, NULL, 0); +module_param_hw_array(irq, int, irq, NULL, 0); +MODULE_PARM_DESC(iobase, "I/O address"); +MODULE_PARM_DESC(irq, "IRQ"); + +static int qlogicfas_detect(struct scsi_host_template *sht) +{ + struct Scsi_Host *shost; + struct qlogicfas408_priv *priv; + int num; + + for (num = 0; num < MAX_QLOGICFAS; num++) { + shost = __qlogicfas_detect(sht, iobase[num], irq[num]); + if (shost == NULL) { + /* no more devices */ + break; + } + priv = get_priv_by_host(shost); + priv->next = cards; + cards = priv; + } + + return num; +} + +static int qlogicfas_release(struct Scsi_Host *shost) +{ + struct qlogicfas408_priv *priv = get_priv_by_host(shost); + + scsi_remove_host(shost); + if (shost->irq) { + qlogicfas408_disable_ints(priv); + free_irq(shost->irq, shost); + } + if (shost->io_port && shost->n_io_port) + release_region(shost->io_port, shost->n_io_port); + scsi_host_put(shost); + + return 0; +} + +/* + * The driver template is also needed for PCMCIA + */ +static struct scsi_host_template qlogicfas_driver_template = { + .module = THIS_MODULE, + .name = qlogicfas_name, + .proc_name = qlogicfas_name, + .info = qlogicfas408_info, + .queuecommand = qlogicfas408_queuecommand, + .eh_abort_handler = qlogicfas408_abort, + .eh_host_reset_handler = qlogicfas408_host_reset, + .bios_param = qlogicfas408_biosparam, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .dma_boundary = PAGE_SIZE - 1, +}; + +static __init int qlogicfas_init(void) +{ + if (!qlogicfas_detect(&qlogicfas_driver_template)) { + /* no cards found */ + printk(KERN_INFO "%s: no cards were found, please specify " + "I/O address and IRQ using iobase= and irq= " + "options", qlogicfas_name); + return -ENODEV; + } + + return 0; +} + +static __exit void qlogicfas_exit(void) +{ + struct qlogicfas408_priv *priv; + + for (priv = cards; priv != NULL; priv = priv->next) + qlogicfas_release(priv->shost); +} + +MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); +MODULE_DESCRIPTION("Driver for the Qlogic FAS408 based ISA card"); +MODULE_LICENSE("GPL"); +module_init(qlogicfas_init); +module_exit(qlogicfas_exit); + diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c new file mode 100644 index 000000000..3e065d5fc --- /dev/null +++ b/drivers/scsi/qlogicfas408.c @@ -0,0 +1,644 @@ +/*----------------------------------------------------------------*/ +/* + Qlogic linux driver - work in progress. No Warranty express or implied. + Use at your own risk. Support Tort Reform so you won't have to read all + these silly disclaimers. + + Copyright 1994, Tom Zerucha. + tz@execpc.com + + Additional Code, and much appreciated help by + Michael A. Griffith + grif@cs.ucr.edu + + Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA + help respectively, and for suffering through my foolishness during the + debugging process. + + Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994 + (you can reference it, but it is incomplete and inaccurate in places) + + Version 0.46 1/30/97 - kernel 1.2.0+ + + Functions as standalone, loadable, and PCMCIA driver, the latter from + Dave Hinds' PCMCIA package. + + Cleaned up 26/10/2002 by Alan Cox as part of the 2.5 + SCSI driver cleanup and audit. This driver still needs work on the + following + - Non terminating hardware waits + - Some layering violations with its pcmcia stub + + Redistributable under terms of the GNU General Public License + + For the avoidance of doubt the "preferred form" of this code is one which + is in an open non patent encumbered format. Where cryptographic key signing + forms part of the process of creating an executable the information + including keys needed to generate an equivalently functional executable + are deemed to be part of the source code. + +*/ + +#include +#include /* to get disk capacity */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "qlogicfas408.h" + +/*----------------------------------------------------------------*/ +static int qlcfg5 = (XTALFREQ << 5); /* 15625/512 */ +static int qlcfg6 = SYNCXFRPD; +static int qlcfg7 = SYNCOFFST; +static int qlcfg8 = (SLOWCABLE << 7) | (QL_ENABLE_PARITY << 4); +static int qlcfg9 = ((XTALFREQ + 4) / 5); +static int qlcfgc = (FASTCLK << 3) | (FASTSCSI << 4); + +/*----------------------------------------------------------------*/ + +/*----------------------------------------------------------------*/ +/* local functions */ +/*----------------------------------------------------------------*/ + +/* error recovery - reset everything */ + +static void ql_zap(struct qlogicfas408_priv *priv) +{ + int x; + int qbase = priv->qbase; + int int_type = priv->int_type; + + x = inb(qbase + 0xd); + REG0; + outb(3, qbase + 3); /* reset SCSI */ + outb(2, qbase + 3); /* reset chip */ + if (x & 0x80) + REG1; +} + +/* + * Do a pseudo-dma tranfer + */ + +static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, + int reqlen) +{ + int j; + int qbase = priv->qbase; + j = 0; + if (phase & 1) { /* in */ +#if QL_TURBO_PDMA + rtrc(4) + /* empty fifo in large chunks */ + if (reqlen >= 128 && (inb(qbase + 8) & 2)) { /* full */ + insl(qbase + 4, request, 32); + reqlen -= 128; + request += 128; + } + while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */ + if ((j = inb(qbase + 8)) & 4) + { + insl(qbase + 4, request, 21); + reqlen -= 84; + request += 84; + } + if (reqlen >= 44 && (inb(qbase + 8) & 8)) { /* 1/3 */ + insl(qbase + 4, request, 11); + reqlen -= 44; + request += 44; + } +#endif + /* until both empty and int (or until reclen is 0) */ + rtrc(7) + j = 0; + while (reqlen && !((j & 0x10) && (j & 0xc0))) + { + /* while bytes to receive and not empty */ + j &= 0xc0; + while (reqlen && !((j = inb(qbase + 8)) & 0x10)) + { + *request++ = inb(qbase + 4); + reqlen--; + } + if (j & 0x10) + j = inb(qbase + 8); + + } + } else { /* out */ +#if QL_TURBO_PDMA + rtrc(4) + if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */ + outsl(qbase + 4, request, 32); + reqlen -= 128; + request += 128; + } + while (reqlen >= 84 && !(j & 0xc0)) /* 1/3 */ + if (!((j = inb(qbase + 8)) & 8)) { + outsl(qbase + 4, request, 21); + reqlen -= 84; + request += 84; + } + if (reqlen >= 40 && !(inb(qbase + 8) & 4)) { /* 2/3 */ + outsl(qbase + 4, request, 10); + reqlen -= 40; + request += 40; + } +#endif + /* until full and int (or until reclen is 0) */ + rtrc(7) + j = 0; + while (reqlen && !((j & 2) && (j & 0xc0))) { + /* while bytes to send and not full */ + while (reqlen && !((j = inb(qbase + 8)) & 2)) + { + outb(*request++, qbase + 4); + reqlen--; + } + if (j & 2) + j = inb(qbase + 8); + } + } + /* maybe return reqlen */ + return inb(qbase + 8) & 0xc0; +} + +/* + * Wait for interrupt flag (polled - not real hardware interrupt) + */ + +static int ql_wai(struct qlogicfas408_priv *priv) +{ + int k; + int qbase = priv->qbase; + unsigned long i; + + k = 0; + i = jiffies + WATCHDOG; + while (time_before(jiffies, i) && !priv->qabort && + !((k = inb(qbase + 4)) & 0xe0)) { + barrier(); + cpu_relax(); + } + if (time_after_eq(jiffies, i)) + return (DID_TIME_OUT); + if (priv->qabort) + return (priv->qabort == 1 ? DID_ABORT : DID_RESET); + if (k & 0x60) + ql_zap(priv); + if (k & 0x20) + return (DID_PARITY); + if (k & 0x40) + return (DID_ERROR); + return 0; +} + +/* + * Initiate scsi command - queueing handler + * caller must hold host lock + */ + +static void ql_icmd(struct scsi_cmnd *cmd) +{ + struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); + int qbase = priv->qbase; + int int_type = priv->int_type; + unsigned int i; + + priv->qabort = 0; + + REG0; + /* clearing of interrupts and the fifo is needed */ + + inb(qbase + 5); /* clear interrupts */ + if (inb(qbase + 5)) /* if still interrupting */ + outb(2, qbase + 3); /* reset chip */ + else if (inb(qbase + 7) & 0x1f) + outb(1, qbase + 3); /* clear fifo */ + while (inb(qbase + 5)); /* clear ints */ + REG1; + outb(1, qbase + 8); /* set for PIO pseudo DMA */ + outb(0, qbase + 0xb); /* disable ints */ + inb(qbase + 8); /* clear int bits */ + REG0; + outb(0x40, qbase + 0xb); /* enable features */ + + /* configurables */ + outb(qlcfgc, qbase + 0xc); + /* config: no reset interrupt, (initiator) bus id */ + outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8); + outb(qlcfg7, qbase + 7); + outb(qlcfg6, qbase + 6); + outb(qlcfg5, qbase + 5); /* select timer */ + outb(qlcfg9 & 7, qbase + 9); /* prescaler */ +/* outb(0x99, qbase + 5); */ + outb(scmd_id(cmd), qbase + 4); + + for (i = 0; i < cmd->cmd_len; i++) + outb(cmd->cmnd[i], qbase + 2); + + priv->qlcmd = cmd; + outb(0x41, qbase + 3); /* select and send command */ +} + +/* + * Process scsi command - usually after interrupt + */ + +static void ql_pcmd(struct scsi_cmnd *cmd) +{ + unsigned int i, j; + unsigned long k; + unsigned int status; /* scsi returned status */ + unsigned int message; /* scsi returned message */ + unsigned int phase; /* recorded scsi phase */ + unsigned int reqlen; /* total length of transfer */ + char *buf; + struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); + int qbase = priv->qbase; + int int_type = priv->int_type; + + rtrc(1) + j = inb(qbase + 6); + i = inb(qbase + 5); + if (i == 0x20) { + set_host_byte(cmd, DID_NO_CONNECT); + return; + } + i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */ + if (i != 0x18) { + printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i); + ql_zap(priv); + set_host_byte(cmd, DID_BAD_INTR); + return; + } + j &= 7; /* j = inb( qbase + 7 ) >> 5; */ + + /* correct status is supposed to be step 4 */ + /* it sometimes returns step 3 but with 0 bytes left to send */ + /* We can try stuffing the FIFO with the max each time, but we will get a + sequence of 3 if any bytes are left (but we do flush the FIFO anyway */ + + if (j != 3 && j != 4) { + printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n", + j, i, inb(qbase + 7) & 0x1f); + ql_zap(priv); + set_host_byte(cmd, DID_ERROR); + return; + } + + if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */ + outb(1, qbase + 3); /* clear fifo */ + /* note that request_bufflen is the total xfer size when sg is used */ + reqlen = scsi_bufflen(cmd); + /* note that it won't work if transfers > 16M are requested */ + if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */ + struct scatterlist *sg; + rtrc(2) + outb(reqlen, qbase); /* low-mid xfer cnt */ + outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */ + outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */ + outb(0x90, qbase + 3); /* command do xfer */ + /* PIO pseudo DMA to buffer or sglist */ + REG1; + + scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) { + if (priv->qabort) { + REG0; + set_host_byte(cmd, + priv->qabort == 1 ? + DID_ABORT : DID_RESET); + } + buf = sg_virt(sg); + if (ql_pdma(priv, phase, buf, sg->length)) + break; + } + REG0; + rtrc(2); + /* + * Wait for irq (split into second state of irq handler + * if this can take time) + */ + if ((k = ql_wai(priv))) { + set_host_byte(cmd, k); + return; + } + k = inb(qbase + 5); /* should be 0x10, bus service */ + } + + /* + * Enter Status (and Message In) Phase + */ + + k = jiffies + WATCHDOG; + + while (time_before(jiffies, k) && !priv->qabort && + !(inb(qbase + 4) & 6)) + cpu_relax(); /* wait for status phase */ + + if (time_after_eq(jiffies, k)) { + ql_zap(priv); + set_host_byte(cmd, DID_TIME_OUT); + return; + } + + /* FIXME: timeout ?? */ + while (inb(qbase + 5)) + cpu_relax(); /* clear pending ints */ + + if (priv->qabort) { + set_host_byte(cmd, + priv->qabort == 1 ? DID_ABORT : DID_RESET); + return; + } + + outb(0x11, qbase + 3); /* get status and message */ + if ((k = ql_wai(priv))) { + set_host_byte(cmd, k); + return; + } + i = inb(qbase + 5); /* get chip irq stat */ + j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */ + status = inb(qbase + 2); + message = inb(qbase + 2); + + /* + * Should get function complete int if Status and message, else + * bus serv if only status + */ + if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) { + printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j); + set_host_byte(cmd, DID_ERROR); + } + outb(0x12, qbase + 3); /* done, disconnect */ + rtrc(1); + if ((k = ql_wai(priv))) { + set_host_byte(cmd, k); + return; + } + + /* + * Should get bus service interrupt and disconnect interrupt + */ + + i = inb(qbase + 5); /* should be bus service */ + while (!priv->qabort && ((i & 0x20) != 0x20)) { + barrier(); + cpu_relax(); + i |= inb(qbase + 5); + } + rtrc(0); + + if (priv->qabort) { + set_host_byte(cmd, + priv->qabort == 1 ? DID_ABORT : DID_RESET); + return; + } + + set_host_byte(cmd, DID_OK); + if (message != COMMAND_COMPLETE) + scsi_msg_to_host_byte(cmd, message); + set_status_byte(cmd, status); + return; +} + +/* + * Interrupt handler + */ + +static void ql_ihandl(void *dev_id) +{ + struct scsi_cmnd *icmd; + struct Scsi_Host *host = dev_id; + struct qlogicfas408_priv *priv = get_priv_by_host(host); + int qbase = priv->qbase; + REG0; + + if (!(inb(qbase + 4) & 0x80)) /* false alarm? */ + return; + + if (priv->qlcmd == NULL) { /* no command to process? */ + int i; + i = 16; + while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */ + return; + } + icmd = priv->qlcmd; + ql_pcmd(icmd); + priv->qlcmd = NULL; + /* + * If result is CHECK CONDITION done calls qcommand to request + * sense + */ + scsi_done(icmd); +} + +irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id) +{ + unsigned long flags; + struct Scsi_Host *host = dev_id; + + spin_lock_irqsave(host->host_lock, flags); + ql_ihandl(dev_id); + spin_unlock_irqrestore(host->host_lock, flags); + return IRQ_HANDLED; +} + +/* + * Queued command + */ + +static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); + + set_host_byte(cmd, DID_OK); + set_status_byte(cmd, SAM_STAT_GOOD); + if (scmd_id(cmd) == priv->qinitid) { + set_host_byte(cmd, DID_BAD_TARGET); + done(cmd); + return 0; + } + + /* wait for the last command's interrupt to finish */ + while (priv->qlcmd != NULL) { + barrier(); + cpu_relax(); + } + ql_icmd(cmd); + return 0; +} + +DEF_SCSI_QCMD(qlogicfas408_queuecommand) + +/* + * Return bios parameters + */ + +int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev, + sector_t capacity, int ip[]) +{ +/* This should mimic the DOS Qlogic driver's behavior exactly */ + ip[0] = 0x40; + ip[1] = 0x20; + ip[2] = (unsigned long) capacity / (ip[0] * ip[1]); + if (ip[2] > 1024) { + ip[0] = 0xff; + ip[1] = 0x3f; + ip[2] = (unsigned long) capacity / (ip[0] * ip[1]); +#if 0 + if (ip[2] > 1023) + ip[2] = 1023; +#endif + } + return 0; +} + +/* + * Abort a command in progress + */ + +int qlogicfas408_abort(struct scsi_cmnd *cmd) +{ + struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); + priv->qabort = 1; + ql_zap(priv); + return SUCCESS; +} + +/* + * Reset SCSI bus + * FIXME: This function is invoked with cmd = NULL directly by + * the PCMCIA qlogic_stub code. This wants fixing + */ + +int qlogicfas408_host_reset(struct scsi_cmnd *cmd) +{ + struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); + unsigned long flags; + + priv->qabort = 2; + + spin_lock_irqsave(cmd->device->host->host_lock, flags); + ql_zap(priv); + spin_unlock_irqrestore(cmd->device->host->host_lock, flags); + + return SUCCESS; +} + +/* + * Return info string + */ + +const char *qlogicfas408_info(struct Scsi_Host *host) +{ + struct qlogicfas408_priv *priv = get_priv_by_host(host); + return priv->qinfo; +} + +/* + * Get type of chip + */ + +int qlogicfas408_get_chip_type(int qbase, int int_type) +{ + REG1; + return inb(qbase + 0xe) & 0xf8; +} + +/* + * Perform initialization tasks + */ + +void qlogicfas408_setup(int qbase, int id, int int_type) +{ + outb(1, qbase + 8); /* set for PIO pseudo DMA */ + REG0; + outb(0x40 | qlcfg8 | id, qbase + 8); /* (ini) bus id, disable scsi rst */ + outb(qlcfg5, qbase + 5); /* select timer */ + outb(qlcfg9, qbase + 9); /* prescaler */ + +#if QL_RESET_AT_START + outb(3, qbase + 3); + + REG1; + /* FIXME: timeout */ + while (inb(qbase + 0xf) & 4) + cpu_relax(); + + REG0; +#endif +} + +/* + * Checks if this is a QLogic FAS 408 + */ + +int qlogicfas408_detect(int qbase, int int_type) +{ + REG1; + return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) && + ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7)); +} + +/* + * Disable interrupts + */ + +void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv) +{ + int qbase = priv->qbase; + int int_type = priv->int_type; + + REG1; + outb(0, qbase + 0xb); /* disable ints */ +} + +/* + * Init and exit functions + */ + +static int __init qlogicfas408_init(void) +{ + return 0; +} + +static void __exit qlogicfas408_exit(void) +{ + +} + +MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); +MODULE_DESCRIPTION("Driver for the Qlogic FAS SCSI controllers"); +MODULE_LICENSE("GPL"); +module_init(qlogicfas408_init); +module_exit(qlogicfas408_exit); + +EXPORT_SYMBOL(qlogicfas408_info); +EXPORT_SYMBOL(qlogicfas408_queuecommand); +EXPORT_SYMBOL(qlogicfas408_abort); +EXPORT_SYMBOL(qlogicfas408_host_reset); +EXPORT_SYMBOL(qlogicfas408_biosparam); +EXPORT_SYMBOL(qlogicfas408_ihandl); +EXPORT_SYMBOL(qlogicfas408_get_chip_type); +EXPORT_SYMBOL(qlogicfas408_setup); +EXPORT_SYMBOL(qlogicfas408_detect); +EXPORT_SYMBOL(qlogicfas408_disable_ints); + diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h new file mode 100644 index 000000000..a971db11d --- /dev/null +++ b/drivers/scsi/qlogicfas408.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* to be used by qlogicfas and qlogic_cs */ +#ifndef __QLOGICFAS408_H +#define __QLOGICFAS408_H + +/*----------------------------------------------------------------*/ +/* Configuration */ + +/* Set the following to max out the speed of the PIO PseudoDMA transfers, + again, 0 tends to be slower, but more stable. */ + +#define QL_TURBO_PDMA 1 + +/* This should be 1 to enable parity detection */ + +#define QL_ENABLE_PARITY 1 + +/* This will reset all devices when the driver is initialized (during bootup). + The other linux drivers don't do this, but the DOS drivers do, and after + using DOS or some kind of crash or lockup this will bring things back + without requiring a cold boot. It does take some time to recover from a + reset, so it is slower, and I have seen timeouts so that devices weren't + recognized when this was set. */ + +#define QL_RESET_AT_START 0 + +/* crystal frequency in megahertz (for offset 5 and 9) + Please set this for your card. Most Qlogic cards are 40 Mhz. The + Control Concepts ISA (not VLB) is 24 Mhz */ + +#define XTALFREQ 40 + +/**********/ +/* DANGER! modify these at your own risk */ +/* SLOWCABLE can usually be reset to zero if you have a clean setup and + proper termination. The rest are for synchronous transfers and other + advanced features if your device can transfer faster than 5Mb/sec. + If you are really curious, email me for a quick howto until I have + something official */ +/**********/ + +/*****/ +/* config register 1 (offset 8) options */ +/* This needs to be set to 1 if your cabling is long or noisy */ +#define SLOWCABLE 1 + +/*****/ +/* offset 0xc */ +/* This will set fast (10Mhz) synchronous timing when set to 1 + For this to have an effect, FASTCLK must also be 1 */ +#define FASTSCSI 0 + +/* This when set to 1 will set a faster sync transfer rate */ +#define FASTCLK 0 /*(XTALFREQ>25?1:0)*/ + +/*****/ +/* offset 6 */ +/* This is the sync transfer divisor, XTALFREQ/X will be the maximum + achievable data rate (assuming the rest of the system is capable + and set properly) */ +#define SYNCXFRPD 5 /*(XTALFREQ/5)*/ + +/*****/ +/* offset 7 */ +/* This is the count of how many synchronous transfers can take place + i.e. how many reqs can occur before an ack is given. + The maximum value for this is 15, the upper bits can modify + REQ/ACK assertion and deassertion during synchronous transfers + If this is 0, the bus will only transfer asynchronously */ +#define SYNCOFFST 0 +/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles + of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will + cause the deassertion to be early by 1/2 clock. Bits 5&4 control + the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */ + +/*----------------------------------------------------------------*/ + +struct qlogicfas408_priv { + int qbase; /* Port */ + int qinitid; /* initiator ID */ + int qabort; /* Flag to cause an abort */ + int qlirq; /* IRQ being used */ + int int_type; /* type of irq, 2 for ISA board, 0 for PCMCIA */ + char qinfo[80]; /* description */ + struct scsi_cmnd *qlcmd; /* current command being processed */ + struct Scsi_Host *shost; /* pointer back to host */ + struct qlogicfas408_priv *next; /* next private struct */ +}; + +/* The qlogic card uses two register maps - These macros select which one */ +#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd )) +#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | int_type, qbase + 0xd )) + +/* following is watchdog timeout in microseconds */ +#define WATCHDOG 5000000 + +/*----------------------------------------------------------------*/ +/* the following will set the monitor border color (useful to find + where something crashed or gets stuck at and as a simple profiler) */ + +#define rtrc(i) {} + +#define get_priv_by_cmd(x) (struct qlogicfas408_priv *)&((x)->device->host->hostdata[0]) +#define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) + +irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id); +int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd); +int qlogicfas408_biosparam(struct scsi_device * disk, + struct block_device *dev, + sector_t capacity, int ip[]); +int qlogicfas408_abort(struct scsi_cmnd * cmd); +extern int qlogicfas408_host_reset(struct scsi_cmnd *cmd); +const char *qlogicfas408_info(struct Scsi_Host *host); +int qlogicfas408_get_chip_type(int qbase, int int_type); +void qlogicfas408_setup(int qbase, int id, int int_type); +int qlogicfas408_detect(int qbase, int int_type); +void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv); +#endif /* __QLOGICFAS408_H */ + diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c new file mode 100644 index 000000000..3b95f7a62 --- /dev/null +++ b/drivers/scsi/qlogicpti.c @@ -0,0 +1,1476 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. + * + * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net) + * + * A lot of this driver was directly stolen from Erik H. Moe's PCI + * Qlogic ISP driver. Mucho kudos to him for this code. + * + * An even bigger kudos to John Grana at Performance Technologies + * for providing me with the hardware to write this driver, you rule + * John you really do. + * + * May, 2, 1997: Added support for QLGC,isp --jj + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "qlogicpti.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define MAX_TARGETS 16 +#define MAX_LUNS 8 /* 32 for 1.31 F/W */ + +#define DEFAULT_LOOP_COUNT 10000 + +static struct qlogicpti *qptichain = NULL; +static DEFINE_SPINLOCK(qptichain_lock); + +#define PACKB(a, b) (((a)<<4)|(b)) + +static const u_char mbox_param[] = { + PACKB(1, 1), /* MBOX_NO_OP */ + PACKB(5, 5), /* MBOX_LOAD_RAM */ + PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */ + PACKB(5, 5), /* MBOX_DUMP_RAM */ + PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */ + PACKB(2, 3), /* MBOX_READ_RAM_WORD */ + PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */ + PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */ + PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */ + PACKB(0, 0), /* 0x0009 */ + PACKB(0, 0), /* 0x000a */ + PACKB(0, 0), /* 0x000b */ + PACKB(0, 0), /* 0x000c */ + PACKB(0, 0), /* 0x000d */ + PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */ + PACKB(0, 0), /* 0x000f */ + PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */ + PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */ + PACKB(4, 4), /* MBOX_EXECUTE_IOCB */ + PACKB(2, 2), /* MBOX_WAKE_UP */ + PACKB(1, 6), /* MBOX_STOP_FIRMWARE */ + PACKB(4, 4), /* MBOX_ABORT */ + PACKB(2, 2), /* MBOX_ABORT_DEVICE */ + PACKB(3, 3), /* MBOX_ABORT_TARGET */ + PACKB(2, 2), /* MBOX_BUS_RESET */ + PACKB(2, 3), /* MBOX_STOP_QUEUE */ + PACKB(2, 3), /* MBOX_START_QUEUE */ + PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */ + PACKB(2, 3), /* MBOX_ABORT_QUEUE */ + PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */ + PACKB(0, 0), /* 0x001e */ + PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */ + PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */ + PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */ + PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */ + PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */ + PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */ + PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */ + PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */ + PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */ + PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */ + PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */ + PACKB(0, 0), /* 0x002a */ + PACKB(0, 0), /* 0x002b */ + PACKB(0, 0), /* 0x002c */ + PACKB(0, 0), /* 0x002d */ + PACKB(0, 0), /* 0x002e */ + PACKB(0, 0), /* 0x002f */ + PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */ + PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */ + PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */ + PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */ + PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */ + PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */ + PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */ + PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */ + PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */ + PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */ + PACKB(0, 0), /* 0x003a */ + PACKB(0, 0), /* 0x003b */ + PACKB(0, 0), /* 0x003c */ + PACKB(0, 0), /* 0x003d */ + PACKB(0, 0), /* 0x003e */ + PACKB(0, 0), /* 0x003f */ + PACKB(0, 0), /* 0x0040 */ + PACKB(0, 0), /* 0x0041 */ + PACKB(0, 0) /* 0x0042 */ +}; + +#define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param) + +/* queue length's _must_ be power of two: */ +#define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql)) +#define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \ + QLOGICPTI_REQ_QUEUE_LEN) +#define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN) + +static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti) +{ + sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB, + qpti->qregs + SBUS_CTRL); +} + +static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti) +{ + sbus_writew(0, qpti->qregs + SBUS_CTRL); +} + +static inline void set_sbus_cfg1(struct qlogicpti *qpti) +{ + u16 val; + u8 bursts = qpti->bursts; + +#if 0 /* It appears that at least PTI cards do not support + * 64-byte bursts and that setting the B64 bit actually + * is a nop and the chip ends up using the smallest burst + * size. -DaveM + */ + if (sbus_can_burst64() && (bursts & DMA_BURST64)) { + val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64); + } else +#endif + if (bursts & DMA_BURST32) { + val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32); + } else if (bursts & DMA_BURST16) { + val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16); + } else if (bursts & DMA_BURST8) { + val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8); + } else { + val = 0; /* No sbus bursts for you... */ + } + sbus_writew(val, qpti->qregs + SBUS_CFG1); +} + +static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force) +{ + int loop_count; + u16 tmp; + + if (mbox_param[param[0]] == 0) + return 1; + + /* Set SBUS semaphore. */ + tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); + tmp |= SBUS_SEMAPHORE_LCK; + sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); + + /* Wait for host IRQ bit to clear. */ + loop_count = DEFAULT_LOOP_COUNT; + while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) { + barrier(); + cpu_relax(); + } + if (!loop_count) + printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n", + qpti->qpti_id); + + /* Write mailbox command registers. */ + switch (mbox_param[param[0]] >> 4) { + case 6: sbus_writew(param[5], qpti->qregs + MBOX5); + fallthrough; + case 5: sbus_writew(param[4], qpti->qregs + MBOX4); + fallthrough; + case 4: sbus_writew(param[3], qpti->qregs + MBOX3); + fallthrough; + case 3: sbus_writew(param[2], qpti->qregs + MBOX2); + fallthrough; + case 2: sbus_writew(param[1], qpti->qregs + MBOX1); + fallthrough; + case 1: sbus_writew(param[0], qpti->qregs + MBOX0); + } + + /* Clear RISC interrupt. */ + tmp = sbus_readw(qpti->qregs + HCCTRL); + tmp |= HCCTRL_CRIRQ; + sbus_writew(tmp, qpti->qregs + HCCTRL); + + /* Clear SBUS semaphore. */ + sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); + + /* Set HOST interrupt. */ + tmp = sbus_readw(qpti->qregs + HCCTRL); + tmp |= HCCTRL_SHIRQ; + sbus_writew(tmp, qpti->qregs + HCCTRL); + + /* Wait for HOST interrupt clears. */ + loop_count = DEFAULT_LOOP_COUNT; + while (--loop_count && + (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ)) + udelay(20); + if (!loop_count) + printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n", + qpti->qpti_id, param[0]); + + /* Wait for SBUS semaphore to get set. */ + loop_count = DEFAULT_LOOP_COUNT; + while (--loop_count && + !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) { + udelay(20); + + /* Workaround for some buggy chips. */ + if (sbus_readw(qpti->qregs + MBOX0) & 0x4000) + break; + } + if (!loop_count) + printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n", + qpti->qpti_id, param[0]); + + /* Wait for MBOX busy condition to go away. */ + loop_count = DEFAULT_LOOP_COUNT; + while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04)) + udelay(20); + if (!loop_count) + printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n", + qpti->qpti_id, param[0]); + + /* Read back output parameters. */ + switch (mbox_param[param[0]] & 0xf) { + case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); + fallthrough; + case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); + fallthrough; + case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); + fallthrough; + case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); + fallthrough; + case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); + fallthrough; + case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); + } + + /* Clear RISC interrupt. */ + tmp = sbus_readw(qpti->qregs + HCCTRL); + tmp |= HCCTRL_CRIRQ; + sbus_writew(tmp, qpti->qregs + HCCTRL); + + /* Release SBUS semaphore. */ + tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); + tmp &= ~(SBUS_SEMAPHORE_LCK); + sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); + + /* We're done. */ + return 0; +} + +static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti) +{ + int i; + + qpti->host_param.initiator_scsi_id = qpti->scsi_id; + qpti->host_param.bus_reset_delay = 3; + qpti->host_param.retry_count = 0; + qpti->host_param.retry_delay = 5; + qpti->host_param.async_data_setup_time = 3; + qpti->host_param.req_ack_active_negation = 1; + qpti->host_param.data_line_active_negation = 1; + qpti->host_param.data_dma_burst_enable = 1; + qpti->host_param.command_dma_burst_enable = 1; + qpti->host_param.tag_aging = 8; + qpti->host_param.selection_timeout = 250; + qpti->host_param.max_queue_depth = 256; + + for(i = 0; i < MAX_TARGETS; i++) { + /* + * disconnect, parity, arq, reneg on reset, and, oddly enough + * tags...the midlayer's notion of tagged support has to match + * our device settings, and since we base whether we enable a + * tag on a per-cmnd basis upon what the midlayer sez, we + * actually enable the capability here. + */ + qpti->dev_param[i].device_flags = 0xcd; + qpti->dev_param[i].execution_throttle = 16; + if (qpti->ultra) { + qpti->dev_param[i].synchronous_period = 12; + qpti->dev_param[i].synchronous_offset = 8; + } else { + qpti->dev_param[i].synchronous_period = 25; + qpti->dev_param[i].synchronous_offset = 12; + } + qpti->dev_param[i].device_enable = 1; + } +} + +static int qlogicpti_reset_hardware(struct Scsi_Host *host) +{ + struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; + u_short param[6]; + unsigned short risc_code_addr; + int loop_count, i; + unsigned long flags; + + risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */ + + spin_lock_irqsave(host->host_lock, flags); + + sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); + + /* Only reset the scsi bus if it is not free. */ + if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) { + sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE); + sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD); + udelay(400); + } + + sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); + sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); + sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); + + loop_count = DEFAULT_LOOP_COUNT; + while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04)) + udelay(20); + if (!loop_count) + printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n", + qpti->qpti_id); + + sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); + set_sbus_cfg1(qpti); + qlogicpti_enable_irqs(qpti); + + if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { + qpti->ultra = 1; + sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), + qpti->qregs + RISC_MTREG); + } else { + qpti->ultra = 0; + sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), + qpti->qregs + RISC_MTREG); + } + + /* reset adapter and per-device default values. */ + /* do it after finding out whether we're ultra mode capable */ + qlogicpti_set_hostdev_defaults(qpti); + + /* Release the RISC processor. */ + sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); + + /* Get RISC to start executing the firmware code. */ + param[0] = MBOX_EXEC_FIRMWARE; + param[1] = risc_code_addr; + if (qlogicpti_mbox_command(qpti, param, 1)) { + printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n", + qpti->qpti_id); + spin_unlock_irqrestore(host->host_lock, flags); + return 1; + } + + /* Set initiator scsi ID. */ + param[0] = MBOX_SET_INIT_SCSI_ID; + param[1] = qpti->host_param.initiator_scsi_id; + if (qlogicpti_mbox_command(qpti, param, 1) || + (param[0] != MBOX_COMMAND_COMPLETE)) { + printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n", + qpti->qpti_id); + spin_unlock_irqrestore(host->host_lock, flags); + return 1; + } + + /* Initialize state of the queues, both hw and sw. */ + qpti->req_in_ptr = qpti->res_out_ptr = 0; + + param[0] = MBOX_INIT_RES_QUEUE; + param[1] = RES_QUEUE_LEN + 1; + param[2] = (u_short) (qpti->res_dvma >> 16); + param[3] = (u_short) (qpti->res_dvma & 0xffff); + param[4] = param[5] = 0; + if (qlogicpti_mbox_command(qpti, param, 1)) { + printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n", + qpti->qpti_id); + spin_unlock_irqrestore(host->host_lock, flags); + return 1; + } + + param[0] = MBOX_INIT_REQ_QUEUE; + param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1; + param[2] = (u_short) (qpti->req_dvma >> 16); + param[3] = (u_short) (qpti->req_dvma & 0xffff); + param[4] = param[5] = 0; + if (qlogicpti_mbox_command(qpti, param, 1)) { + printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n", + qpti->qpti_id); + spin_unlock_irqrestore(host->host_lock, flags); + return 1; + } + + param[0] = MBOX_SET_RETRY_COUNT; + param[1] = qpti->host_param.retry_count; + param[2] = qpti->host_param.retry_delay; + qlogicpti_mbox_command(qpti, param, 0); + + param[0] = MBOX_SET_TAG_AGE_LIMIT; + param[1] = qpti->host_param.tag_aging; + qlogicpti_mbox_command(qpti, param, 0); + + for (i = 0; i < MAX_TARGETS; i++) { + param[0] = MBOX_GET_DEV_QUEUE_PARAMS; + param[1] = (i << 8); + qlogicpti_mbox_command(qpti, param, 0); + } + + param[0] = MBOX_GET_FIRMWARE_STATUS; + qlogicpti_mbox_command(qpti, param, 0); + + param[0] = MBOX_SET_SELECT_TIMEOUT; + param[1] = qpti->host_param.selection_timeout; + qlogicpti_mbox_command(qpti, param, 0); + + for (i = 0; i < MAX_TARGETS; i++) { + param[0] = MBOX_SET_TARGET_PARAMS; + param[1] = (i << 8); + param[2] = (qpti->dev_param[i].device_flags << 8); + /* + * Since we're now loading 1.31 f/w, force narrow/async. + */ + param[2] |= 0xc0; + param[3] = 0; /* no offset, we do not have sync mode yet */ + qlogicpti_mbox_command(qpti, param, 0); + } + + /* + * Always (sigh) do an initial bus reset (kicks f/w). + */ + param[0] = MBOX_BUS_RESET; + param[1] = qpti->host_param.bus_reset_delay; + qlogicpti_mbox_command(qpti, param, 0); + qpti->send_marker = 1; + + spin_unlock_irqrestore(host->host_lock, flags); + return 0; +} + +#define PTI_RESET_LIMIT 400 + +static int qlogicpti_load_firmware(struct qlogicpti *qpti) +{ + const struct firmware *fw; + const char fwname[] = "qlogic/isp1000.bin"; + const __le16 *fw_data; + struct Scsi_Host *host = qpti->qhost; + unsigned short csum = 0; + unsigned short param[6]; + unsigned short risc_code_addr, risc_code_length; + int err; + unsigned long flags; + int i, timeout; + + err = request_firmware(&fw, fwname, &qpti->op->dev); + if (err) { + printk(KERN_ERR "Failed to load image \"%s\" err %d\n", + fwname, err); + return err; + } + if (fw->size % 2) { + printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", + fw->size, fwname); + err = -EINVAL; + goto outfirm; + } + fw_data = (const __le16 *)&fw->data[0]; + risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */ + risc_code_length = fw->size / 2; + + spin_lock_irqsave(host->host_lock, flags); + + /* Verify the checksum twice, one before loading it, and once + * afterwards via the mailbox commands. + */ + for (i = 0; i < risc_code_length; i++) + csum += __le16_to_cpu(fw_data[i]); + if (csum) { + printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!", + qpti->qpti_id); + err = 1; + goto out; + } + sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); + sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); + sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); + timeout = PTI_RESET_LIMIT; + while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET)) + udelay(20); + if (!timeout) { + printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id); + err = 1; + goto out; + } + + sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); + mdelay(1); + + sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL); + set_sbus_cfg1(qpti); + sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); + + if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { + qpti->ultra = 1; + sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), + qpti->qregs + RISC_MTREG); + } else { + qpti->ultra = 0; + sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), + qpti->qregs + RISC_MTREG); + } + + sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); + + /* Pin lines are only stable while RISC is paused. */ + sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); + if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE) + qpti->differential = 1; + else + qpti->differential = 0; + sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); + + /* This shouldn't be necessary- we've reset things so we should be + running from the ROM now.. */ + + param[0] = MBOX_STOP_FIRMWARE; + param[1] = param[2] = param[3] = param[4] = param[5] = 0; + if (qlogicpti_mbox_command(qpti, param, 1)) { + printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n", + qpti->qpti_id); + err = 1; + goto out; + } + + /* Load it up.. */ + for (i = 0; i < risc_code_length; i++) { + param[0] = MBOX_WRITE_RAM_WORD; + param[1] = risc_code_addr + i; + param[2] = __le16_to_cpu(fw_data[i]); + if (qlogicpti_mbox_command(qpti, param, 1) || + param[0] != MBOX_COMMAND_COMPLETE) { + printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n", + qpti->qpti_id); + err = 1; + goto out; + } + } + + /* Reset the ISP again. */ + sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); + mdelay(1); + + qlogicpti_enable_irqs(qpti); + sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); + sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); + + /* Ask ISP to verify the checksum of the new code. */ + param[0] = MBOX_VERIFY_CHECKSUM; + param[1] = risc_code_addr; + if (qlogicpti_mbox_command(qpti, param, 1) || + (param[0] != MBOX_COMMAND_COMPLETE)) { + printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n", + qpti->qpti_id); + err = 1; + goto out; + } + + /* Start using newly downloaded firmware. */ + param[0] = MBOX_EXEC_FIRMWARE; + param[1] = risc_code_addr; + qlogicpti_mbox_command(qpti, param, 1); + + param[0] = MBOX_ABOUT_FIRMWARE; + if (qlogicpti_mbox_command(qpti, param, 1) || + (param[0] != MBOX_COMMAND_COMPLETE)) { + printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n", + qpti->qpti_id); + err = 1; + goto out; + } + + /* Snag the major and minor revisions from the result. */ + qpti->fware_majrev = param[1]; + qpti->fware_minrev = param[2]; + qpti->fware_micrev = param[3]; + + /* Set the clock rate */ + param[0] = MBOX_SET_CLOCK_RATE; + param[1] = qpti->clock; + if (qlogicpti_mbox_command(qpti, param, 1) || + (param[0] != MBOX_COMMAND_COMPLETE)) { + printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n", + qpti->qpti_id); + err = 1; + goto out; + } + + if (qpti->is_pti != 0) { + /* Load scsi initiator ID and interrupt level into sbus static ram. */ + param[0] = MBOX_WRITE_RAM_WORD; + param[1] = 0xff80; + param[2] = (unsigned short) qpti->scsi_id; + qlogicpti_mbox_command(qpti, param, 1); + + param[0] = MBOX_WRITE_RAM_WORD; + param[1] = 0xff00; + param[2] = (unsigned short) 3; + qlogicpti_mbox_command(qpti, param, 1); + } + +out: + spin_unlock_irqrestore(host->host_lock, flags); +outfirm: + release_firmware(fw); + return err; +} + +static int qlogicpti_verify_tmon(struct qlogicpti *qpti) +{ + int curstat = sbus_readb(qpti->sreg); + + curstat &= 0xf0; + if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE)) + printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id); + if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER)) + printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id); + if (curstat != qpti->swsreg) { + int error = 0; + if (curstat & SREG_FUSE) { + error++; + printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id); + } + if (curstat & SREG_TPOWER) { + error++; + printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id); + } + if (qpti->differential && + (curstat & SREG_DSENSE) != SREG_DSENSE) { + error++; + printk("qlogicpti%d: You have a single ended device on a " + "differential bus! Please fix!\n", qpti->qpti_id); + } + qpti->swsreg = curstat; + return error; + } + return 0; +} + +static irqreturn_t qpti_intr(int irq, void *dev_id); + +static void qpti_chain_add(struct qlogicpti *qpti) +{ + spin_lock_irq(&qptichain_lock); + if (qptichain != NULL) { + struct qlogicpti *qlink = qptichain; + + while(qlink->next) + qlink = qlink->next; + qlink->next = qpti; + } else { + qptichain = qpti; + } + qpti->next = NULL; + spin_unlock_irq(&qptichain_lock); +} + +static void qpti_chain_del(struct qlogicpti *qpti) +{ + spin_lock_irq(&qptichain_lock); + if (qptichain == qpti) { + qptichain = qpti->next; + } else { + struct qlogicpti *qlink = qptichain; + while(qlink->next != qpti) + qlink = qlink->next; + qlink->next = qpti->next; + } + qpti->next = NULL; + spin_unlock_irq(&qptichain_lock); +} + +static int qpti_map_regs(struct qlogicpti *qpti) +{ + struct platform_device *op = qpti->op; + + qpti->qregs = of_ioremap(&op->resource[0], 0, + resource_size(&op->resource[0]), + "PTI Qlogic/ISP"); + if (!qpti->qregs) { + printk("PTI: Qlogic/ISP registers are unmappable\n"); + return -ENODEV; + } + if (qpti->is_pti) { + qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096), + sizeof(unsigned char), + "PTI Qlogic/ISP statreg"); + if (!qpti->sreg) { + printk("PTI: Qlogic/ISP status register is unmappable\n"); + return -ENODEV; + } + } + return 0; +} + +static int qpti_register_irq(struct qlogicpti *qpti) +{ + struct platform_device *op = qpti->op; + + qpti->qhost->irq = qpti->irq = op->archdata.irqs[0]; + + /* We used to try various overly-clever things to + * reduce the interrupt processing overhead on + * sun4c/sun4m when multiple PTI's shared the + * same IRQ. It was too complex and messy to + * sanely maintain. + */ + if (request_irq(qpti->irq, qpti_intr, + IRQF_SHARED, "QlogicPTI", qpti)) + goto fail; + + printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq); + + return 0; + +fail: + printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id); + return -1; +} + +static void qpti_get_scsi_id(struct qlogicpti *qpti) +{ + struct platform_device *op = qpti->op; + struct device_node *dp; + + dp = op->dev.of_node; + + qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); + if (qpti->scsi_id == -1) + qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", + -1); + if (qpti->scsi_id == -1) + qpti->scsi_id = + of_getintprop_default(dp->parent, + "scsi-initiator-id", 7); + qpti->qhost->this_id = qpti->scsi_id; + qpti->qhost->max_sectors = 64; + + printk("SCSI ID %d ", qpti->scsi_id); +} + +static void qpti_get_bursts(struct qlogicpti *qpti) +{ + struct platform_device *op = qpti->op; + u8 bursts, bmask; + + bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff); + bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff); + if (bmask != 0xff) + bursts &= bmask; + if (bursts == 0xff || + (bursts & DMA_BURST16) == 0 || + (bursts & DMA_BURST32) == 0) + bursts = (DMA_BURST32 - 1); + + qpti->bursts = bursts; +} + +static void qpti_get_clock(struct qlogicpti *qpti) +{ + unsigned int cfreq; + + /* Check for what the clock input to this card is. + * Default to 40Mhz. + */ + cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000); + qpti->clock = (cfreq + 500000)/1000000; + if (qpti->clock == 0) /* bullshit */ + qpti->clock = 40; +} + +/* The request and response queues must each be aligned + * on a page boundary. + */ +static int qpti_map_queues(struct qlogicpti *qpti) +{ + struct platform_device *op = qpti->op; + +#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) + qpti->res_cpu = dma_alloc_coherent(&op->dev, + QSIZE(RES_QUEUE_LEN), + &qpti->res_dvma, GFP_ATOMIC); + if (qpti->res_cpu == NULL || + qpti->res_dvma == 0) { + printk("QPTI: Cannot map response queue.\n"); + return -1; + } + + qpti->req_cpu = dma_alloc_coherent(&op->dev, + QSIZE(QLOGICPTI_REQ_QUEUE_LEN), + &qpti->req_dvma, GFP_ATOMIC); + if (qpti->req_cpu == NULL || + qpti->req_dvma == 0) { + dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), + qpti->res_cpu, qpti->res_dvma); + printk("QPTI: Cannot map request queue.\n"); + return -1; + } + memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN)); + memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN)); + return 0; +} + +static const char *qlogicpti_info(struct Scsi_Host *host) +{ + static char buf[80]; + struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; + + sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p", + qpti->qhost->irq, qpti->qregs); + return buf; +} + +/* I am a certified frobtronicist. */ +static inline void marker_frob(struct Command_Entry *cmd) +{ + struct Marker_Entry *marker = (struct Marker_Entry *) cmd; + + memset(marker, 0, sizeof(struct Marker_Entry)); + marker->hdr.entry_cnt = 1; + marker->hdr.entry_type = ENTRY_MARKER; + marker->modifier = SYNC_ALL; + marker->rsvd = 0; +} + +static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, + struct qlogicpti *qpti) +{ + memset(cmd, 0, sizeof(struct Command_Entry)); + cmd->hdr.entry_cnt = 1; + cmd->hdr.entry_type = ENTRY_COMMAND; + cmd->target_id = Cmnd->device->id; + cmd->target_lun = Cmnd->device->lun; + cmd->cdb_length = Cmnd->cmd_len; + cmd->control_flags = 0; + if (Cmnd->device->tagged_supported) { + if (qpti->cmd_count[Cmnd->device->id] == 0) + qpti->tag_ages[Cmnd->device->id] = jiffies; + if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) { + cmd->control_flags = CFLAG_ORDERED_TAG; + qpti->tag_ages[Cmnd->device->id] = jiffies; + } else + cmd->control_flags = CFLAG_SIMPLE_TAG; + } + if ((Cmnd->cmnd[0] == WRITE_6) || + (Cmnd->cmnd[0] == WRITE_10) || + (Cmnd->cmnd[0] == WRITE_12)) + cmd->control_flags |= CFLAG_WRITE; + else + cmd->control_flags |= CFLAG_READ; + cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ; + memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); +} + +/* Do it to it baby. */ +static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, + struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) +{ + struct dataseg *ds; + struct scatterlist *sg, *s; + int i, n; + + if (scsi_bufflen(Cmnd)) { + int sg_count; + + sg = scsi_sglist(Cmnd); + sg_count = dma_map_sg(&qpti->op->dev, sg, + scsi_sg_count(Cmnd), + Cmnd->sc_data_direction); + if (!sg_count) + return -1; + ds = cmd->dataseg; + cmd->segment_cnt = sg_count; + + /* Fill in first four sg entries: */ + n = sg_count; + if (n > 4) + n = 4; + for_each_sg(sg, s, n, i) { + ds[i].d_base = sg_dma_address(s); + ds[i].d_count = sg_dma_len(s); + } + sg_count -= 4; + sg = s; + while (sg_count > 0) { + struct Continuation_Entry *cont; + + ++cmd->hdr.entry_cnt; + cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr]; + in_ptr = NEXT_REQ_PTR(in_ptr); + if (in_ptr == out_ptr) + return -1; + + cont->hdr.entry_type = ENTRY_CONTINUATION; + cont->hdr.entry_cnt = 0; + cont->hdr.sys_def_1 = 0; + cont->hdr.flags = 0; + cont->reserved = 0; + ds = cont->dataseg; + n = sg_count; + if (n > 7) + n = 7; + for_each_sg(sg, s, n, i) { + ds[i].d_base = sg_dma_address(s); + ds[i].d_count = sg_dma_len(s); + } + sg_count -= n; + sg = s; + } + } else { + cmd->dataseg[0].d_base = 0; + cmd->dataseg[0].d_count = 0; + cmd->segment_cnt = 1; /* Shouldn't this be 0? */ + } + + /* Committed, record Scsi_Cmd so we can find it later. */ + cmd->handle = in_ptr; + qpti->cmd_slots[in_ptr] = Cmnd; + + qpti->cmd_count[Cmnd->device->id]++; + sbus_writew(in_ptr, qpti->qregs + MBOX4); + qpti->req_in_ptr = in_ptr; + + return in_ptr; +} + +static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr) +{ + /* Temporary workaround until bug is found and fixed (one bug has been found + already, but fixing it makes things even worse) -jj */ + int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; + host->can_queue = scsi_host_busy(host) + num_free; + host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); +} + +static int qlogicpti_slave_configure(struct scsi_device *sdev) +{ + struct qlogicpti *qpti = shost_priv(sdev->host); + int tgt = sdev->id; + u_short param[6]; + + /* tags handled in midlayer */ + /* enable sync mode? */ + if (sdev->sdtr) { + qpti->dev_param[tgt].device_flags |= 0x10; + } else { + qpti->dev_param[tgt].synchronous_offset = 0; + qpti->dev_param[tgt].synchronous_period = 0; + } + /* are we wide capable? */ + if (sdev->wdtr) + qpti->dev_param[tgt].device_flags |= 0x20; + + param[0] = MBOX_SET_TARGET_PARAMS; + param[1] = (tgt << 8); + param[2] = (qpti->dev_param[tgt].device_flags << 8); + if (qpti->dev_param[tgt].device_flags & 0x10) { + param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) | + qpti->dev_param[tgt].synchronous_period; + } else { + param[3] = 0; + } + qlogicpti_mbox_command(qpti, param, 0); + return 0; +} + +/* + * The middle SCSI layer ensures that queuecommand never gets invoked + * concurrently with itself or the interrupt handler (though the + * interrupt handler may call this routine as part of + * request-completion handling). + * + * "This code must fly." -davem + */ +static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + struct Scsi_Host *host = Cmnd->device->host; + struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; + struct Command_Entry *cmd; + u_int out_ptr; + int in_ptr; + + in_ptr = qpti->req_in_ptr; + cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; + out_ptr = sbus_readw(qpti->qregs + MBOX4); + in_ptr = NEXT_REQ_PTR(in_ptr); + if (in_ptr == out_ptr) + goto toss_command; + + if (qpti->send_marker) { + marker_frob(cmd); + qpti->send_marker = 0; + if (NEXT_REQ_PTR(in_ptr) == out_ptr) { + sbus_writew(in_ptr, qpti->qregs + MBOX4); + qpti->req_in_ptr = in_ptr; + goto toss_command; + } + cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; + in_ptr = NEXT_REQ_PTR(in_ptr); + } + cmd_frob(cmd, Cmnd, qpti); + if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1) + goto toss_command; + + update_can_queue(host, in_ptr, out_ptr); + + return 0; + +toss_command: + printk(KERN_EMERG "qlogicpti%d: request queue overflow\n", + qpti->qpti_id); + + /* Unfortunately, unless you use the new EH code, which + * we don't, the midlayer will ignore the return value, + * which is insane. We pick up the pieces like this. + */ + Cmnd->result = DID_BUS_BUSY; + done(Cmnd); + return 1; +} + +static DEF_SCSI_QCMD(qlogicpti_queuecommand) + +static int qlogicpti_return_status(struct Status_Entry *sts, int id) +{ + int host_status = DID_ERROR; + + switch (sts->completion_status) { + case CS_COMPLETE: + host_status = DID_OK; + break; + case CS_INCOMPLETE: + if (!(sts->state_flags & SF_GOT_BUS)) + host_status = DID_NO_CONNECT; + else if (!(sts->state_flags & SF_GOT_TARGET)) + host_status = DID_BAD_TARGET; + else if (!(sts->state_flags & SF_SENT_CDB)) + host_status = DID_ERROR; + else if (!(sts->state_flags & SF_TRANSFERRED_DATA)) + host_status = DID_ERROR; + else if (!(sts->state_flags & SF_GOT_STATUS)) + host_status = DID_ERROR; + else if (!(sts->state_flags & SF_GOT_SENSE)) + host_status = DID_ERROR; + break; + case CS_DMA_ERROR: + case CS_TRANSPORT_ERROR: + host_status = DID_ERROR; + break; + case CS_RESET_OCCURRED: + case CS_BUS_RESET: + host_status = DID_RESET; + break; + case CS_ABORTED: + host_status = DID_ABORT; + break; + case CS_TIMEOUT: + host_status = DID_TIME_OUT; + break; + case CS_DATA_OVERRUN: + case CS_COMMAND_OVERRUN: + case CS_STATUS_OVERRUN: + case CS_BAD_MESSAGE: + case CS_NO_MESSAGE_OUT: + case CS_EXT_ID_FAILED: + case CS_IDE_MSG_FAILED: + case CS_ABORT_MSG_FAILED: + case CS_NOP_MSG_FAILED: + case CS_PARITY_ERROR_MSG_FAILED: + case CS_DEVICE_RESET_MSG_FAILED: + case CS_ID_MSG_FAILED: + case CS_UNEXP_BUS_FREE: + host_status = DID_ERROR; + break; + case CS_DATA_UNDERRUN: + host_status = DID_OK; + break; + default: + printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n", + id, sts->completion_status); + host_status = DID_ERROR; + break; + } + + return (sts->scsi_status & STATUS_MASK) | (host_status << 16); +} + +static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) +{ + struct scsi_cmnd *Cmnd, *done_queue = NULL; + struct Status_Entry *sts; + u_int in_ptr, out_ptr; + + if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT)) + return NULL; + + in_ptr = sbus_readw(qpti->qregs + MBOX5); + sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL); + if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) { + switch (sbus_readw(qpti->qregs + MBOX0)) { + case ASYNC_SCSI_BUS_RESET: + case EXECUTION_TIMEOUT_RESET: + qpti->send_marker = 1; + break; + case INVALID_COMMAND: + case HOST_INTERFACE_ERROR: + case COMMAND_ERROR: + case COMMAND_PARAM_ERROR: + break; + }; + sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); + } + + /* This looks like a network driver! */ + out_ptr = qpti->res_out_ptr; + while (out_ptr != in_ptr) { + u_int cmd_slot; + + sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr]; + out_ptr = NEXT_RES_PTR(out_ptr); + + /* We store an index in the handle, not the pointer in + * some form. This avoids problems due to the fact + * that the handle provided is only 32-bits. -DaveM + */ + cmd_slot = sts->handle; + Cmnd = qpti->cmd_slots[cmd_slot]; + qpti->cmd_slots[cmd_slot] = NULL; + + if (sts->completion_status == CS_RESET_OCCURRED || + sts->completion_status == CS_ABORTED || + (sts->status_flags & STF_BUS_RESET)) + qpti->send_marker = 1; + + if (sts->state_flags & SF_GOT_SENSE) + memcpy(Cmnd->sense_buffer, sts->req_sense_data, + SCSI_SENSE_BUFFERSIZE); + + if (sts->hdr.entry_type == ENTRY_STATUS) + Cmnd->result = + qlogicpti_return_status(sts, qpti->qpti_id); + else + Cmnd->result = DID_ERROR << 16; + + if (scsi_bufflen(Cmnd)) + dma_unmap_sg(&qpti->op->dev, + scsi_sglist(Cmnd), scsi_sg_count(Cmnd), + Cmnd->sc_data_direction); + + qpti->cmd_count[Cmnd->device->id]--; + sbus_writew(out_ptr, qpti->qregs + MBOX5); + Cmnd->host_scribble = (unsigned char *) done_queue; + done_queue = Cmnd; + } + qpti->res_out_ptr = out_ptr; + + return done_queue; +} + +static irqreturn_t qpti_intr(int irq, void *dev_id) +{ + struct qlogicpti *qpti = dev_id; + unsigned long flags; + struct scsi_cmnd *dq; + + spin_lock_irqsave(qpti->qhost->host_lock, flags); + dq = qlogicpti_intr_handler(qpti); + + if (dq != NULL) { + do { + struct scsi_cmnd *next; + + next = (struct scsi_cmnd *) dq->host_scribble; + scsi_done(dq); + dq = next; + } while (dq != NULL); + } + spin_unlock_irqrestore(qpti->qhost->host_lock, flags); + + return IRQ_HANDLED; +} + +static int qlogicpti_abort(struct scsi_cmnd *Cmnd) +{ + u_short param[6]; + struct Scsi_Host *host = Cmnd->device->host; + struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; + int return_status = SUCCESS; + u32 cmd_cookie; + int i; + + printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n", + qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun); + + qlogicpti_disable_irqs(qpti); + + /* Find the 32-bit cookie we gave to the firmware for + * this command. + */ + for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++) + if (qpti->cmd_slots[i] == Cmnd) + break; + cmd_cookie = i; + + param[0] = MBOX_ABORT; + param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun; + param[2] = cmd_cookie >> 16; + param[3] = cmd_cookie & 0xffff; + if (qlogicpti_mbox_command(qpti, param, 0) || + (param[0] != MBOX_COMMAND_COMPLETE)) { + printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n", + qpti->qpti_id, param[0]); + return_status = FAILED; + } + + qlogicpti_enable_irqs(qpti); + + return return_status; +} + +static int qlogicpti_reset(struct scsi_cmnd *Cmnd) +{ + u_short param[6]; + struct Scsi_Host *host = Cmnd->device->host; + struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; + int return_status = SUCCESS; + + printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n", + qpti->qpti_id); + + qlogicpti_disable_irqs(qpti); + + param[0] = MBOX_BUS_RESET; + param[1] = qpti->host_param.bus_reset_delay; + if (qlogicpti_mbox_command(qpti, param, 0) || + (param[0] != MBOX_COMMAND_COMPLETE)) { + printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n", + qpti->qpti_id, param[0]); + return_status = FAILED; + } + + qlogicpti_enable_irqs(qpti); + + return return_status; +} + +static const struct scsi_host_template qpti_template = { + .module = THIS_MODULE, + .name = "qlogicpti", + .info = qlogicpti_info, + .queuecommand = qlogicpti_queuecommand, + .slave_configure = qlogicpti_slave_configure, + .eh_abort_handler = qlogicpti_abort, + .eh_host_reset_handler = qlogicpti_reset, + .can_queue = QLOGICPTI_REQ_QUEUE_LEN, + .this_id = 7, + .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), +}; + +static const struct of_device_id qpti_match[]; +static int qpti_sbus_probe(struct platform_device *op) +{ + struct device_node *dp = op->dev.of_node; + struct Scsi_Host *host; + struct qlogicpti *qpti; + static int nqptis; + const char *fcode; + + /* Sometimes Antares cards come up not completely + * setup, and we get a report of a zero IRQ. + */ + if (op->archdata.irqs[0] == 0) + return -ENODEV; + + host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti)); + if (!host) + return -ENOMEM; + + qpti = shost_priv(host); + + host->max_id = MAX_TARGETS; + qpti->qhost = host; + qpti->op = op; + qpti->qpti_id = nqptis; + qpti->is_pti = !of_node_name_eq(op->dev.of_node, "QLGC,isp"); + + if (qpti_map_regs(qpti) < 0) + goto fail_unlink; + + if (qpti_register_irq(qpti) < 0) + goto fail_unmap_regs; + + qpti_get_scsi_id(qpti); + qpti_get_bursts(qpti); + qpti_get_clock(qpti); + + /* Clear out scsi_cmnd array. */ + memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots)); + + if (qpti_map_queues(qpti) < 0) + goto fail_free_irq; + + /* Load the firmware. */ + if (qlogicpti_load_firmware(qpti)) + goto fail_unmap_queues; + if (qpti->is_pti) { + /* Check the PTI status reg. */ + if (qlogicpti_verify_tmon(qpti)) + goto fail_unmap_queues; + } + + /* Reset the ISP and init res/req queues. */ + if (qlogicpti_reset_hardware(host)) + goto fail_unmap_queues; + + printk("(Firmware v%d.%d.%d)", qpti->fware_majrev, + qpti->fware_minrev, qpti->fware_micrev); + + fcode = of_get_property(dp, "isp-fcode", NULL); + if (fcode && fcode[0]) + printk("(FCode %s)", fcode); + qpti->differential = of_property_read_bool(dp, "differential"); + + printk("\nqlogicpti%d: [%s Wide, using %s interface]\n", + qpti->qpti_id, + (qpti->ultra ? "Ultra" : "Fast"), + (qpti->differential ? "differential" : "single ended")); + + if (scsi_add_host(host, &op->dev)) { + printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id); + goto fail_unmap_queues; + } + + dev_set_drvdata(&op->dev, qpti); + + qpti_chain_add(qpti); + + scsi_scan_host(host); + nqptis++; + + return 0; + +fail_unmap_queues: +#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) + dma_free_coherent(&op->dev, + QSIZE(RES_QUEUE_LEN), + qpti->res_cpu, qpti->res_dvma); + dma_free_coherent(&op->dev, + QSIZE(QLOGICPTI_REQ_QUEUE_LEN), + qpti->req_cpu, qpti->req_dvma); +#undef QSIZE + +fail_free_irq: + free_irq(qpti->irq, qpti); + +fail_unmap_regs: + of_iounmap(&op->resource[0], qpti->qregs, + resource_size(&op->resource[0])); + if (qpti->is_pti) + of_iounmap(&op->resource[0], qpti->sreg, + sizeof(unsigned char)); + +fail_unlink: + scsi_host_put(host); + + return -ENODEV; +} + +static int qpti_sbus_remove(struct platform_device *op) +{ + struct qlogicpti *qpti = dev_get_drvdata(&op->dev); + + qpti_chain_del(qpti); + + scsi_remove_host(qpti->qhost); + + /* Shut up the card. */ + sbus_writew(0, qpti->qregs + SBUS_CTRL); + + /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */ + free_irq(qpti->irq, qpti); + +#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) + dma_free_coherent(&op->dev, + QSIZE(RES_QUEUE_LEN), + qpti->res_cpu, qpti->res_dvma); + dma_free_coherent(&op->dev, + QSIZE(QLOGICPTI_REQ_QUEUE_LEN), + qpti->req_cpu, qpti->req_dvma); +#undef QSIZE + + of_iounmap(&op->resource[0], qpti->qregs, + resource_size(&op->resource[0])); + if (qpti->is_pti) + of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char)); + + scsi_host_put(qpti->qhost); + + return 0; +} + +static const struct of_device_id qpti_match[] = { + { + .name = "ptisp", + }, + { + .name = "PTI,ptisp", + }, + { + .name = "QLGC,isp", + }, + { + .name = "SUNW,isp", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, qpti_match); + +static struct platform_driver qpti_sbus_driver = { + .driver = { + .name = "qpti", + .of_match_table = qpti_match, + }, + .probe = qpti_sbus_probe, + .remove = qpti_sbus_remove, +}; +module_platform_driver(qpti_sbus_driver); + +MODULE_DESCRIPTION("QlogicISP SBUS driver"); +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("2.1"); +MODULE_FIRMWARE("qlogic/isp1000.bin"); diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h new file mode 100644 index 000000000..2b6374e08 --- /dev/null +++ b/drivers/scsi/qlogicpti.h @@ -0,0 +1,507 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* qlogicpti.h: Performance Technologies QlogicISP sbus card defines. + * + * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu) + */ + +#ifndef _QLOGICPTI_H +#define _QLOGICPTI_H + +/* Qlogic/SBUS controller registers. */ +#define SBUS_CFG1 0x006UL +#define SBUS_CTRL 0x008UL +#define SBUS_STAT 0x00aUL +#define SBUS_SEMAPHORE 0x00cUL +#define CMD_DMA_CTRL 0x022UL +#define DATA_DMA_CTRL 0x042UL +#define MBOX0 0x080UL +#define MBOX1 0x082UL +#define MBOX2 0x084UL +#define MBOX3 0x086UL +#define MBOX4 0x088UL +#define MBOX5 0x08aUL +#define CPU_CMD 0x214UL +#define CPU_ORIDE 0x224UL +#define CPU_PCTRL 0x272UL +#define CPU_PDIFF 0x276UL +#define RISC_PSR 0x420UL +#define RISC_MTREG 0x42EUL +#define HCCTRL 0x440UL + +/* SCSI parameters for this driver. */ +#define MAX_TARGETS 16 +#define MAX_LUNS 8 + +/* With the qlogic interface, every queue slot can hold a SCSI + * command with up to 4 scatter/gather entries. If we need more + * than 4 entries, continuation entries can be used that hold + * another 7 entries each. Unlike for other drivers, this means + * that the maximum number of scatter/gather entries we can + * support at any given time is a function of the number of queue + * slots available. That is, host->can_queue and host->sg_tablesize + * are dynamic and _not_ independent. This all works fine because + * requests are queued serially and the scatter/gather limit is + * determined for each queue request anew. + */ +#define QLOGICPTI_REQ_QUEUE_LEN 255 /* must be power of two - 1 */ +#define QLOGICPTI_MAX_SG(ql) (4 + (((ql) > 0) ? 7*((ql) - 1) : 0)) + +/* mailbox command complete status codes */ +#define MBOX_COMMAND_COMPLETE 0x4000 +#define INVALID_COMMAND 0x4001 +#define HOST_INTERFACE_ERROR 0x4002 +#define TEST_FAILED 0x4003 +#define COMMAND_ERROR 0x4005 +#define COMMAND_PARAM_ERROR 0x4006 + +/* async event status codes */ +#define ASYNC_SCSI_BUS_RESET 0x8001 +#define SYSTEM_ERROR 0x8002 +#define REQUEST_TRANSFER_ERROR 0x8003 +#define RESPONSE_TRANSFER_ERROR 0x8004 +#define REQUEST_QUEUE_WAKEUP 0x8005 +#define EXECUTION_TIMEOUT_RESET 0x8006 + +/* Am I fucking pedantic or what? */ +struct Entry_header { +#ifdef __BIG_ENDIAN + u8 entry_cnt; + u8 entry_type; + u8 flags; + u8 sys_def_1; +#else /* __LITTLE_ENDIAN */ + u8 entry_type; + u8 entry_cnt; + u8 sys_def_1; + u8 flags; +#endif +}; + +/* entry header type commands */ +#define ENTRY_COMMAND 1 +#define ENTRY_CONTINUATION 2 +#define ENTRY_STATUS 3 +#define ENTRY_MARKER 4 +#define ENTRY_EXTENDED_COMMAND 5 + +/* entry header flag definitions */ +#define EFLAG_CONTINUATION 1 +#define EFLAG_BUSY 2 +#define EFLAG_BAD_HEADER 4 +#define EFLAG_BAD_PAYLOAD 8 + +struct dataseg { + u32 d_base; + u32 d_count; +}; + +struct Command_Entry { + struct Entry_header hdr; + u32 handle; +#ifdef __BIG_ENDIAN + u8 target_id; + u8 target_lun; +#else /* __LITTLE_ENDIAN */ + u8 target_lun; + u8 target_id; +#endif + u16 cdb_length; + u16 control_flags; + u16 rsvd; + u16 time_out; + u16 segment_cnt; + u8 cdb[12]; + struct dataseg dataseg[4]; +}; + +/* command entry control flag definitions */ +#define CFLAG_NODISC 0x01 +#define CFLAG_HEAD_TAG 0x02 +#define CFLAG_ORDERED_TAG 0x04 +#define CFLAG_SIMPLE_TAG 0x08 +#define CFLAG_TAR_RTN 0x10 +#define CFLAG_READ 0x20 +#define CFLAG_WRITE 0x40 + +struct Ext_Command_Entry { + struct Entry_header hdr; + u32 handle; +#ifdef __BIG_ENDIAN + u8 target_id; + u8 target_lun; +#else /* __LITTLE_ENDIAN */ + u8 target_lun; + u8 target_id; +#endif + u16 cdb_length; + u16 control_flags; + u16 rsvd; + u16 time_out; + u16 segment_cnt; + u8 cdb[44]; +}; + +struct Continuation_Entry { + struct Entry_header hdr; + u32 reserved; + struct dataseg dataseg[7]; +}; + +struct Marker_Entry { + struct Entry_header hdr; + u32 reserved; +#ifdef __BIG_ENDIAN + u8 target_id; + u8 target_lun; +#else /* __LITTLE_ENDIAN */ + u8 target_lun; + u8 target_id; +#endif +#ifdef __BIG_ENDIAN + u8 rsvd; + u8 modifier; +#else /* __LITTLE_ENDIAN */ + u8 modifier; + u8 rsvd; +#endif + u8 rsvds[52]; +}; + +/* marker entry modifier definitions */ +#define SYNC_DEVICE 0 +#define SYNC_TARGET 1 +#define SYNC_ALL 2 + +struct Status_Entry { + struct Entry_header hdr; + u32 handle; + u16 scsi_status; + u16 completion_status; + u16 state_flags; + u16 status_flags; + u16 time; + u16 req_sense_len; + u32 residual; + u8 rsvd[8]; + u8 req_sense_data[32]; +}; + +/* status entry completion status definitions */ +#define CS_COMPLETE 0x0000 +#define CS_INCOMPLETE 0x0001 +#define CS_DMA_ERROR 0x0002 +#define CS_TRANSPORT_ERROR 0x0003 +#define CS_RESET_OCCURRED 0x0004 +#define CS_ABORTED 0x0005 +#define CS_TIMEOUT 0x0006 +#define CS_DATA_OVERRUN 0x0007 +#define CS_COMMAND_OVERRUN 0x0008 +#define CS_STATUS_OVERRUN 0x0009 +#define CS_BAD_MESSAGE 0x000a +#define CS_NO_MESSAGE_OUT 0x000b +#define CS_EXT_ID_FAILED 0x000c +#define CS_IDE_MSG_FAILED 0x000d +#define CS_ABORT_MSG_FAILED 0x000e +#define CS_REJECT_MSG_FAILED 0x000f +#define CS_NOP_MSG_FAILED 0x0010 +#define CS_PARITY_ERROR_MSG_FAILED 0x0011 +#define CS_DEVICE_RESET_MSG_FAILED 0x0012 +#define CS_ID_MSG_FAILED 0x0013 +#define CS_UNEXP_BUS_FREE 0x0014 +#define CS_DATA_UNDERRUN 0x0015 +#define CS_BUS_RESET 0x001c + +/* status entry state flag definitions */ +#define SF_GOT_BUS 0x0100 +#define SF_GOT_TARGET 0x0200 +#define SF_SENT_CDB 0x0400 +#define SF_TRANSFERRED_DATA 0x0800 +#define SF_GOT_STATUS 0x1000 +#define SF_GOT_SENSE 0x2000 + +/* status entry status flag definitions */ +#define STF_DISCONNECT 0x0001 +#define STF_SYNCHRONOUS 0x0002 +#define STF_PARITY_ERROR 0x0004 +#define STF_BUS_RESET 0x0008 +#define STF_DEVICE_RESET 0x0010 +#define STF_ABORTED 0x0020 +#define STF_TIMEOUT 0x0040 +#define STF_NEGOTIATION 0x0080 + +/* mailbox commands */ +#define MBOX_NO_OP 0x0000 +#define MBOX_LOAD_RAM 0x0001 +#define MBOX_EXEC_FIRMWARE 0x0002 +#define MBOX_DUMP_RAM 0x0003 +#define MBOX_WRITE_RAM_WORD 0x0004 +#define MBOX_READ_RAM_WORD 0x0005 +#define MBOX_MAILBOX_REG_TEST 0x0006 +#define MBOX_VERIFY_CHECKSUM 0x0007 +#define MBOX_ABOUT_FIRMWARE 0x0008 +#define MBOX_CHECK_FIRMWARE 0x000e +#define MBOX_INIT_REQ_QUEUE 0x0010 +#define MBOX_INIT_RES_QUEUE 0x0011 +#define MBOX_EXECUTE_IOCB 0x0012 +#define MBOX_WAKE_UP 0x0013 +#define MBOX_STOP_FIRMWARE 0x0014 +#define MBOX_ABORT 0x0015 +#define MBOX_ABORT_DEVICE 0x0016 +#define MBOX_ABORT_TARGET 0x0017 +#define MBOX_BUS_RESET 0x0018 +#define MBOX_STOP_QUEUE 0x0019 +#define MBOX_START_QUEUE 0x001a +#define MBOX_SINGLE_STEP_QUEUE 0x001b +#define MBOX_ABORT_QUEUE 0x001c +#define MBOX_GET_DEV_QUEUE_STATUS 0x001d +#define MBOX_GET_FIRMWARE_STATUS 0x001f +#define MBOX_GET_INIT_SCSI_ID 0x0020 +#define MBOX_GET_SELECT_TIMEOUT 0x0021 +#define MBOX_GET_RETRY_COUNT 0x0022 +#define MBOX_GET_TAG_AGE_LIMIT 0x0023 +#define MBOX_GET_CLOCK_RATE 0x0024 +#define MBOX_GET_ACT_NEG_STATE 0x0025 +#define MBOX_GET_ASYNC_DATA_SETUP_TIME 0x0026 +#define MBOX_GET_SBUS_PARAMS 0x0027 +#define MBOX_GET_TARGET_PARAMS 0x0028 +#define MBOX_GET_DEV_QUEUE_PARAMS 0x0029 +#define MBOX_SET_INIT_SCSI_ID 0x0030 +#define MBOX_SET_SELECT_TIMEOUT 0x0031 +#define MBOX_SET_RETRY_COUNT 0x0032 +#define MBOX_SET_TAG_AGE_LIMIT 0x0033 +#define MBOX_SET_CLOCK_RATE 0x0034 +#define MBOX_SET_ACTIVE_NEG_STATE 0x0035 +#define MBOX_SET_ASYNC_DATA_SETUP_TIME 0x0036 +#define MBOX_SET_SBUS_CONTROL_PARAMS 0x0037 +#define MBOX_SET_TARGET_PARAMS 0x0038 +#define MBOX_SET_DEV_QUEUE_PARAMS 0x0039 + +struct host_param { + u_short initiator_scsi_id; + u_short bus_reset_delay; + u_short retry_count; + u_short retry_delay; + u_short async_data_setup_time; + u_short req_ack_active_negation; + u_short data_line_active_negation; + u_short data_dma_burst_enable; + u_short command_dma_burst_enable; + u_short tag_aging; + u_short selection_timeout; + u_short max_queue_depth; +}; + +/* + * Device Flags: + * + * Bit Name + * --------- + * 7 Disconnect Privilege + * 6 Parity Checking + * 5 Wide Data Transfers + * 4 Synchronous Data Transfers + * 3 Tagged Queuing + * 2 Automatic Request Sense + * 1 Stop Queue on Check Condition + * 0 Renegotiate on Error + */ + +struct dev_param { + u_short device_flags; + u_short execution_throttle; + u_short synchronous_period; + u_short synchronous_offset; + u_short device_enable; + u_short reserved; /* pad */ +}; + +/* + * The result queue can be quite a bit smaller since continuation entries + * do not show up there: + */ +#define RES_QUEUE_LEN 255 /* Must be power of two - 1 */ +#define QUEUE_ENTRY_LEN 64 + +#define NEXT_REQ_PTR(wheee) (((wheee) + 1) & QLOGICPTI_REQ_QUEUE_LEN) +#define NEXT_RES_PTR(wheee) (((wheee) + 1) & RES_QUEUE_LEN) +#define PREV_REQ_PTR(wheee) (((wheee) - 1) & QLOGICPTI_REQ_QUEUE_LEN) +#define PREV_RES_PTR(wheee) (((wheee) - 1) & RES_QUEUE_LEN) + +struct pti_queue_entry { + char __opaque[QUEUE_ENTRY_LEN]; +}; + +struct scsi_cmnd; + +/* Software state for the driver. */ +struct qlogicpti { + /* These are the hot elements in the cache, so they come first. */ + void __iomem *qregs; /* Adapter registers */ + struct pti_queue_entry *res_cpu; /* Ptr to RESPONSE bufs (CPU) */ + struct pti_queue_entry *req_cpu; /* Ptr to REQUEST bufs (CPU) */ + + u_int req_in_ptr; /* index of next request slot */ + u_int res_out_ptr; /* index of next result slot */ + long send_marker; /* must we send a marker? */ + struct platform_device *op; + unsigned long __pad; + + int cmd_count[MAX_TARGETS]; + unsigned long tag_ages[MAX_TARGETS]; + + /* The cmd->handler is only 32-bits, so that things work even on monster + * Ex000 sparc64 machines with >4GB of ram we just keep track of the + * scsi command pointers here. This is essentially what Matt Jacob does. -DaveM + */ + struct scsi_cmnd *cmd_slots[QLOGICPTI_REQ_QUEUE_LEN + 1]; + + /* The rest of the elements are unimportant for performance. */ + struct qlogicpti *next; + dma_addr_t res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ + dma_addr_t req_dvma; /* Ptr to REQUEST bufs (DVMA) */ + u_char fware_majrev, fware_minrev, fware_micrev; + struct Scsi_Host *qhost; + int qpti_id; + int scsi_id; + int prom_node; + int irq; + char differential, ultra, clock; + unsigned char bursts; + struct host_param host_param; + struct dev_param dev_param[MAX_TARGETS]; + + void __iomem *sreg; +#define SREG_TPOWER 0x80 /* State of termpwr */ +#define SREG_FUSE 0x40 /* State of on board fuse */ +#define SREG_PDISAB 0x20 /* Disable state for power on */ +#define SREG_DSENSE 0x10 /* Sense for differential */ +#define SREG_IMASK 0x0c /* Interrupt level */ +#define SREG_SPMASK 0x03 /* Mask for switch pack */ + unsigned char swsreg; + unsigned int + gotirq : 1, /* this instance got an irq */ + is_pti : 1; /* Non-zero if this is a PTI board. */ +}; + +/* How to twiddle them bits... */ + +/* SBUS config register one. */ +#define SBUS_CFG1_EPAR 0x0100 /* Enable parity checking */ +#define SBUS_CFG1_FMASK 0x00f0 /* Forth code cycle mask */ +#define SBUS_CFG1_BENAB 0x0004 /* Burst dvma enable */ +#define SBUS_CFG1_B64 0x0003 /* Enable 64byte bursts */ +#define SBUS_CFG1_B32 0x0002 /* Enable 32byte bursts */ +#define SBUS_CFG1_B16 0x0001 /* Enable 16byte bursts */ +#define SBUS_CFG1_B8 0x0008 /* Enable 8byte bursts */ + +/* SBUS control register */ +#define SBUS_CTRL_EDIRQ 0x0020 /* Enable Data DVMA Interrupts */ +#define SBUS_CTRL_ECIRQ 0x0010 /* Enable Command DVMA Interrupts */ +#define SBUS_CTRL_ESIRQ 0x0008 /* Enable SCSI Processor Interrupts */ +#define SBUS_CTRL_ERIRQ 0x0004 /* Enable RISC Processor Interrupts */ +#define SBUS_CTRL_GENAB 0x0002 /* Global Interrupt Enable */ +#define SBUS_CTRL_RESET 0x0001 /* Soft Reset */ + +/* SBUS status register */ +#define SBUS_STAT_DINT 0x0020 /* Data DVMA IRQ pending */ +#define SBUS_STAT_CINT 0x0010 /* Command DVMA IRQ pending */ +#define SBUS_STAT_SINT 0x0008 /* SCSI Processor IRQ pending */ +#define SBUS_STAT_RINT 0x0004 /* RISC Processor IRQ pending */ +#define SBUS_STAT_GINT 0x0002 /* Global IRQ pending */ + +/* SBUS semaphore register */ +#define SBUS_SEMAPHORE_STAT 0x0002 /* Semaphore status bit */ +#define SBUS_SEMAPHORE_LCK 0x0001 /* Semaphore lock bit */ + +/* DVMA control register */ +#define DMA_CTRL_CSUSPEND 0x0010 /* DMA channel suspend */ +#define DMA_CTRL_CCLEAR 0x0008 /* DMA channel clear and reset */ +#define DMA_CTRL_FCLEAR 0x0004 /* DMA fifo clear */ +#define DMA_CTRL_CIRQ 0x0002 /* DMA irq clear */ +#define DMA_CTRL_DMASTART 0x0001 /* DMA transfer start */ + +/* SCSI processor override register */ +#define CPU_ORIDE_ETRIG 0x8000 /* External trigger enable */ +#define CPU_ORIDE_STEP 0x4000 /* Single step mode enable */ +#define CPU_ORIDE_BKPT 0x2000 /* Breakpoint reg enable */ +#define CPU_ORIDE_PWRITE 0x1000 /* SCSI pin write enable */ +#define CPU_ORIDE_OFORCE 0x0800 /* Force outputs on */ +#define CPU_ORIDE_LBACK 0x0400 /* SCSI loopback enable */ +#define CPU_ORIDE_PTEST 0x0200 /* Parity test enable */ +#define CPU_ORIDE_TENAB 0x0100 /* SCSI pins tristate enable */ +#define CPU_ORIDE_TPINS 0x0080 /* SCSI pins enable */ +#define CPU_ORIDE_FRESET 0x0008 /* FIFO reset */ +#define CPU_ORIDE_CTERM 0x0004 /* Command terminate */ +#define CPU_ORIDE_RREG 0x0002 /* Reset SCSI processor regs */ +#define CPU_ORIDE_RMOD 0x0001 /* Reset SCSI processor module */ + +/* SCSI processor commands */ +#define CPU_CMD_BRESET 0x300b /* Reset SCSI bus */ + +/* SCSI processor pin control register */ +#define CPU_PCTRL_PVALID 0x8000 /* Phase bits are valid */ +#define CPU_PCTRL_PHI 0x0400 /* Parity bit high */ +#define CPU_PCTRL_PLO 0x0200 /* Parity bit low */ +#define CPU_PCTRL_REQ 0x0100 /* REQ bus signal */ +#define CPU_PCTRL_ACK 0x0080 /* ACK bus signal */ +#define CPU_PCTRL_RST 0x0040 /* RST bus signal */ +#define CPU_PCTRL_BSY 0x0020 /* BSY bus signal */ +#define CPU_PCTRL_SEL 0x0010 /* SEL bus signal */ +#define CPU_PCTRL_ATN 0x0008 /* ATN bus signal */ +#define CPU_PCTRL_MSG 0x0004 /* MSG bus signal */ +#define CPU_PCTRL_CD 0x0002 /* CD bus signal */ +#define CPU_PCTRL_IO 0x0001 /* IO bus signal */ + +/* SCSI processor differential pins register */ +#define CPU_PDIFF_SENSE 0x0200 /* Differential sense */ +#define CPU_PDIFF_MODE 0x0100 /* Differential mode */ +#define CPU_PDIFF_OENAB 0x0080 /* Outputs enable */ +#define CPU_PDIFF_PMASK 0x007c /* Differential control pins */ +#define CPU_PDIFF_TGT 0x0002 /* Target mode enable */ +#define CPU_PDIFF_INIT 0x0001 /* Initiator mode enable */ + +/* RISC processor status register */ +#define RISC_PSR_FTRUE 0x8000 /* Force true */ +#define RISC_PSR_LCD 0x4000 /* Loop counter shows done status */ +#define RISC_PSR_RIRQ 0x2000 /* RISC irq status */ +#define RISC_PSR_TOFLOW 0x1000 /* Timer overflow (rollover) */ +#define RISC_PSR_AOFLOW 0x0800 /* Arithmetic overflow */ +#define RISC_PSR_AMSB 0x0400 /* Arithmetic big endian */ +#define RISC_PSR_ACARRY 0x0200 /* Arithmetic carry */ +#define RISC_PSR_AZERO 0x0100 /* Arithmetic zero */ +#define RISC_PSR_ULTRA 0x0020 /* Ultra mode */ +#define RISC_PSR_DIRQ 0x0010 /* DVMA interrupt */ +#define RISC_PSR_SIRQ 0x0008 /* SCSI processor interrupt */ +#define RISC_PSR_HIRQ 0x0004 /* Host interrupt */ +#define RISC_PSR_IPEND 0x0002 /* Interrupt pending */ +#define RISC_PSR_FFALSE 0x0001 /* Force false */ + +/* RISC processor memory timing register */ +#define RISC_MTREG_P1DFLT 0x1200 /* Default read/write timing, pg1 */ +#define RISC_MTREG_P0DFLT 0x0012 /* Default read/write timing, pg0 */ +#define RISC_MTREG_P1ULTRA 0x2300 /* Ultra-mode rw timing, pg1 */ +#define RISC_MTREG_P0ULTRA 0x0023 /* Ultra-mode rw timing, pg0 */ + +/* Host command/ctrl register */ +#define HCCTRL_NOP 0x0000 /* CMD: No operation */ +#define HCCTRL_RESET 0x1000 /* CMD: Reset RISC cpu */ +#define HCCTRL_PAUSE 0x2000 /* CMD: Pause RISC cpu */ +#define HCCTRL_REL 0x3000 /* CMD: Release paused RISC cpu */ +#define HCCTRL_STEP 0x4000 /* CMD: Single step RISC cpu */ +#define HCCTRL_SHIRQ 0x5000 /* CMD: Set host irq */ +#define HCCTRL_CHIRQ 0x6000 /* CMD: Clear host irq */ +#define HCCTRL_CRIRQ 0x7000 /* CMD: Clear RISC cpu irq */ +#define HCCTRL_BKPT 0x8000 /* CMD: Breakpoint enables change */ +#define HCCTRL_TMODE 0xf000 /* CMD: Enable test mode */ +#define HCCTRL_HIRQ 0x0080 /* Host IRQ pending */ +#define HCCTRL_RRIP 0x0040 /* RISC cpu reset in happening now */ +#define HCCTRL_RPAUSED 0x0020 /* RISC cpu is paused now */ +#define HCCTRL_EBENAB 0x0010 /* External breakpoint enable */ +#define HCCTRL_B1ENAB 0x0008 /* Breakpoint 1 enable */ +#define HCCTRL_B0ENAB 0x0004 /* Breakpoint 0 enable */ + +/* For our interrupt engine. */ +#define for_each_qlogicpti(qp) \ + for((qp) = qptichain; (qp); (qp) = (qp)->next) + +#endif /* !(_QLOGICPTI_H) */ diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c new file mode 100644 index 000000000..95a86e0df --- /dev/null +++ b/drivers/scsi/raid_class.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * raid_class.c - implementation of a simple raid visualisation class + * + * Copyright (c) 2005 - James Bottomley + * + * This class is designed to allow raid attributes to be visualised and + * manipulated in a form independent of the underlying raid. Ultimately this + * should work for both hardware and software raids. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define RAID_NUM_ATTRS 3 + +struct raid_internal { + struct raid_template r; + struct raid_function_template *f; + /* The actual attributes */ + struct device_attribute private_attrs[RAID_NUM_ATTRS]; + /* The array of null terminated pointers to attributes + * needed by scsi_sysfs.c */ + struct device_attribute *attrs[RAID_NUM_ATTRS + 1]; +}; + +struct raid_component { + struct list_head node; + struct device dev; + int num; +}; + +#define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r) + +#define tc_to_raid_internal(tcont) ({ \ + struct raid_template *r = \ + container_of(tcont, struct raid_template, raid_attrs); \ + to_raid_internal(r); \ +}) + +#define ac_to_raid_internal(acont) ({ \ + struct transport_container *tc = \ + container_of(acont, struct transport_container, ac); \ + tc_to_raid_internal(tc); \ +}) + +#define device_to_raid_internal(dev) ({ \ + struct attribute_container *ac = \ + attribute_container_classdev_to_container(dev); \ + ac_to_raid_internal(ac); \ +}) + + +static int raid_match(struct attribute_container *cont, struct device *dev) +{ + /* We have to look for every subsystem that could house + * emulated RAID devices, so start with SCSI */ + struct raid_internal *i = ac_to_raid_internal(cont); + + if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (i->f->cookie != sdev->host->hostt) + return 0; + + return i->f->is_raid(dev); + } + /* FIXME: look at other subsystems too */ + return 0; +} + +static int raid_setup(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct raid_data *rd; + + BUG_ON(dev_get_drvdata(cdev)); + + rd = kzalloc(sizeof(*rd), GFP_KERNEL); + if (!rd) + return -ENOMEM; + + INIT_LIST_HEAD(&rd->component_list); + dev_set_drvdata(cdev, rd); + + return 0; +} + +static int raid_remove(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct raid_data *rd = dev_get_drvdata(cdev); + struct raid_component *rc, *next; + dev_printk(KERN_ERR, dev, "RAID REMOVE\n"); + dev_set_drvdata(cdev, NULL); + list_for_each_entry_safe(rc, next, &rd->component_list, node) { + list_del(&rc->node); + dev_printk(KERN_ERR, rc->dev.parent, "RAID COMPONENT REMOVE\n"); + device_unregister(&rc->dev); + } + dev_printk(KERN_ERR, dev, "RAID REMOVE DONE\n"); + kfree(rd); + return 0; +} + +static DECLARE_TRANSPORT_CLASS(raid_class, + "raid_devices", + raid_setup, + raid_remove, + NULL); + +static const struct { + enum raid_state value; + char *name; +} raid_states[] = { + { RAID_STATE_UNKNOWN, "unknown" }, + { RAID_STATE_ACTIVE, "active" }, + { RAID_STATE_DEGRADED, "degraded" }, + { RAID_STATE_RESYNCING, "resyncing" }, + { RAID_STATE_OFFLINE, "offline" }, +}; + +static const char *raid_state_name(enum raid_state state) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(raid_states); i++) { + if (raid_states[i].value == state) { + name = raid_states[i].name; + break; + } + } + return name; +} + +static struct { + enum raid_level value; + char *name; +} raid_levels[] = { + { RAID_LEVEL_UNKNOWN, "unknown" }, + { RAID_LEVEL_LINEAR, "linear" }, + { RAID_LEVEL_0, "raid0" }, + { RAID_LEVEL_1, "raid1" }, + { RAID_LEVEL_10, "raid10" }, + { RAID_LEVEL_1E, "raid1e" }, + { RAID_LEVEL_3, "raid3" }, + { RAID_LEVEL_4, "raid4" }, + { RAID_LEVEL_5, "raid5" }, + { RAID_LEVEL_50, "raid50" }, + { RAID_LEVEL_6, "raid6" }, + { RAID_LEVEL_JBOD, "jbod" }, +}; + +static const char *raid_level_name(enum raid_level level) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(raid_levels); i++) { + if (raid_levels[i].value == level) { + name = raid_levels[i].name; + break; + } + } + return name; +} + +#define raid_attr_show_internal(attr, fmt, var, code) \ +static ssize_t raid_show_##attr(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct raid_data *rd = dev_get_drvdata(dev); \ + code \ + return snprintf(buf, 20, #fmt "\n", var); \ +} + +#define raid_attr_ro_states(attr, states, code) \ +raid_attr_show_internal(attr, %s, name, \ + const char *name; \ + code \ + name = raid_##states##_name(rd->attr); \ +) \ +static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL) + + +#define raid_attr_ro_internal(attr, code) \ +raid_attr_show_internal(attr, %d, rd->attr, code) \ +static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL) + +#define ATTR_CODE(attr) \ + struct raid_internal *i = device_to_raid_internal(dev); \ + if (i->f->get_##attr) \ + i->f->get_##attr(dev->parent); + +#define raid_attr_ro(attr) raid_attr_ro_internal(attr, ) +#define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr)) +#define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, ) +#define raid_attr_ro_state_fn(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr)) + + +raid_attr_ro_state(level); +raid_attr_ro_fn(resync); +raid_attr_ro_state_fn(state); + +struct raid_template * +raid_class_attach(struct raid_function_template *ft) +{ + struct raid_internal *i = kzalloc(sizeof(struct raid_internal), + GFP_KERNEL); + int count = 0; + + if (unlikely(!i)) + return NULL; + + i->f = ft; + + i->r.raid_attrs.ac.class = &raid_class.class; + i->r.raid_attrs.ac.match = raid_match; + i->r.raid_attrs.ac.attrs = &i->attrs[0]; + + attribute_container_register(&i->r.raid_attrs.ac); + + i->attrs[count++] = &dev_attr_level; + i->attrs[count++] = &dev_attr_resync; + i->attrs[count++] = &dev_attr_state; + + i->attrs[count] = NULL; + BUG_ON(count > RAID_NUM_ATTRS); + + return &i->r; +} +EXPORT_SYMBOL(raid_class_attach); + +void +raid_class_release(struct raid_template *r) +{ + struct raid_internal *i = to_raid_internal(r); + + BUG_ON(attribute_container_unregister(&i->r.raid_attrs.ac)); + + kfree(i); +} +EXPORT_SYMBOL(raid_class_release); + +static __init int raid_init(void) +{ + return transport_class_register(&raid_class); +} + +static __exit void raid_exit(void) +{ + transport_class_unregister(&raid_class); +} + +MODULE_AUTHOR("James Bottomley"); +MODULE_DESCRIPTION("RAID device class"); +MODULE_LICENSE("GPL"); + +module_init(raid_init); +module_exit(raid_exit); + diff --git a/drivers/scsi/script_asm.pl b/drivers/scsi/script_asm.pl new file mode 100644 index 000000000..0300f4c55 --- /dev/null +++ b/drivers/scsi/script_asm.pl @@ -0,0 +1,971 @@ +#!/usr/bin/perl -s +# SPDX-License-Identifier: GPL-2.0-or-later + +# NCR 53c810 script assembler +# Sponsored by +# iX Multiuser Multitasking Magazine +# +# Copyright 1993, Drew Eckhardt +# Visionary Computing +# (Unix and Linux consulting and custom programming) +# drew@Colorado.EDU +# +1 (303) 786-7975 +# +# Support for 53c710 (via -ncr7x0_family switch) added by Richard +# Hirst - 15th March 1997 +# +# TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation. +# + +# +# Basically, I follow the NCR syntax documented in the NCR53c710 +# Programmer's guide, with the new instructions, registers, etc. +# from the NCR53c810. +# +# Differences between this assembler and NCR's are that +# 1. PASS, REL (data, JUMPs work fine), and the option to start a new +# script, are unimplemented, since I didn't use them in my scripts. +# +# 2. I also emit a script_u.h file, which will undefine all of +# the A_*, E_*, etc. symbols defined in the script. This +# makes including multiple scripts in one program easier +# +# 3. This is a single pass assembler, which only emits +# .h files. +# + + +# XXX - set these with command line options +$debug = 0; # Print general debugging messages +$debug_external = 0; # Print external/forward reference messages +$list_in_array = 1; # Emit original SCRIPTS assembler in comments in + # script.h +#$prefix; # (set by perl -s) + # define all arrays having this prefix so we + # don't have name space collisions after + # assembling this file in different ways for + # different host adapters + +# Constants + + +# Table of the SCSI phase encodings +%scsi_phases = ( + 'DATA_OUT', 0x00_00_00_00, 'DATA_IN', 0x01_00_00_00, 'CMD', 0x02_00_00_00, + 'STATUS', 0x03_00_00_00, 'MSG_OUT', 0x06_00_00_00, 'MSG_IN', 0x07_00_00_00 +); + +# XXX - replace references to the *_810 constants with general constants +# assigned at compile time based on chip type. + +# Table of operator encodings +# XXX - NCR53c710 only implements +# move (nop) = 0x00_00_00_00 +# or = 0x02_00_00_00 +# and = 0x04_00_00_00 +# add = 0x06_00_00_00 + +if ($ncr7x0_family) { + %operators = ( + '|', 0x02_00_00_00, 'OR', 0x02_00_00_00, + '&', 0x04_00_00_00, 'AND', 0x04_00_00_00, + '+', 0x06_00_00_00 + ); +} +else { + %operators = ( + 'SHL', 0x01_00_00_00, + '|', 0x02_00_00_00, 'OR', 0x02_00_00_00, + 'XOR', 0x03_00_00_00, + '&', 0x04_00_00_00, 'AND', 0x04_00_00_00, + 'SHR', 0x05_00_00_00, + # Note : low bit of the operator bit should be set for add with + # carry. + '+', 0x06_00_00_00 + ); +} + +# Table of register addresses + +if ($ncr7x0_family) { + %registers = ( + 'SCNTL0', 0, 'SCNTL1', 1, 'SDID', 2, 'SIEN', 3, + 'SCID', 4, 'SXFER', 5, 'SODL', 6, 'SOCL', 7, + 'SFBR', 8, 'SIDL', 9, 'SBDL', 10, 'SBCL', 11, + 'DSTAT', 12, 'SSTAT0', 13, 'SSTAT1', 14, 'SSTAT2', 15, + 'DSA0', 16, 'DSA1', 17, 'DSA2', 18, 'DSA3', 19, + 'CTEST0', 20, 'CTEST1', 21, 'CTEST2', 22, 'CTEST3', 23, + 'CTEST4', 24, 'CTEST5', 25, 'CTEST6', 26, 'CTEST7', 27, + 'TEMP0', 28, 'TEMP1', 29, 'TEMP2', 30, 'TEMP3', 31, + 'DFIFO', 32, 'ISTAT', 33, 'CTEST8', 34, 'LCRC', 35, + 'DBC0', 36, 'DBC1', 37, 'DBC2', 38, 'DCMD', 39, + 'DNAD0', 40, 'DNAD1', 41, 'DNAD2', 42, 'DNAD3', 43, + 'DSP0', 44, 'DSP1', 45, 'DSP2', 46, 'DSP3', 47, + 'DSPS0', 48, 'DSPS1', 49, 'DSPS2', 50, 'DSPS3', 51, + 'SCRATCH0', 52, 'SCRATCH1', 53, 'SCRATCH2', 54, 'SCRATCH3', 55, + 'DMODE', 56, 'DIEN', 57, 'DWT', 58, 'DCNTL', 59, + 'ADDER0', 60, 'ADDER1', 61, 'ADDER2', 62, 'ADDER3', 63, + ); +} +else { + %registers = ( + 'SCNTL0', 0, 'SCNTL1', 1, 'SCNTL2', 2, 'SCNTL3', 3, + 'SCID', 4, 'SXFER', 5, 'SDID', 6, 'GPREG', 7, + 'SFBR', 8, 'SOCL', 9, 'SSID', 10, 'SBCL', 11, + 'DSTAT', 12, 'SSTAT0', 13, 'SSTAT1', 14, 'SSTAT2', 15, + 'DSA0', 16, 'DSA1', 17, 'DSA2', 18, 'DSA3', 19, + 'ISTAT', 20, + 'CTEST0', 24, 'CTEST1', 25, 'CTEST2', 26, 'CTEST3', 27, + 'TEMP0', 28, 'TEMP1', 29, 'TEMP2', 30, 'TEMP3', 31, + 'DFIFO', 32, 'CTEST4', 33, 'CTEST5', 34, 'CTEST6', 35, + 'DBC0', 36, 'DBC1', 37, 'DBC2', 38, 'DCMD', 39, + 'DNAD0', 40, 'DNAD1', 41, 'DNAD2', 42, 'DNAD3', 43, + 'DSP0', 44, 'DSP1', 45, 'DSP2', 46, 'DSP3', 47, + 'DSPS0', 48, 'DSPS1', 49, 'DSPS2', 50, 'DSPS3', 51, + 'SCRATCH0', 52, 'SCRATCH1', 53, 'SCRATCH2', 54, 'SCRATCH3', 55, + 'SCRATCHA0', 52, 'SCRATCHA1', 53, 'SCRATCHA2', 54, 'SCRATCHA3', 55, + 'DMODE', 56, 'DIEN', 57, 'DWT', 58, 'DCNTL', 59, + 'ADDER0', 60, 'ADDER1', 61, 'ADDER2', 62, 'ADDER3', 63, + 'SIEN0', 64, 'SIEN1', 65, 'SIST0', 66, 'SIST1', 67, + 'SLPAR', 68, 'MACNTL', 70, 'GPCNTL', 71, + 'STIME0', 72, 'STIME1', 73, 'RESPID', 74, + 'STEST0', 76, 'STEST1', 77, 'STEST2', 78, 'STEST3', 79, + 'SIDL', 80, + 'SODL', 84, + 'SBDL', 88, + 'SCRATCHB0', 92, 'SCRATCHB1', 93, 'SCRATCHB2', 94, 'SCRATCHB3', 95 + ); +} + +# Parsing regular expressions +$identifier = '[A-Za-z_][A-Za-z_0-9]*'; +$decnum = '-?\\d+'; +$hexnum = '0[xX][0-9A-Fa-f]+'; +$constant = "$hexnum|$decnum"; + +# yucky - since we can't control grouping of # $constant, we need to +# expand out each alternative for $value. + +$value = "$identifier|$identifier\\s*[+\-]\\s*$decnum|". + "$identifier\\s*[+-]\s*$hexnum|$constant"; + +print STDERR "value regex = $value\n" if ($debug); + +$phase = join ('|', keys %scsi_phases); +print STDERR "phase regex = $phase\n" if ($debug); +$register = join ('|', keys %registers); + +# yucky - since %operators includes meta-characters which must +# be escaped, I can't use the join() trick I used for the register +# regex + +if ($ncr7x0_family) { + $operator = '\||OR|AND|\&|\+'; +} +else { + $operator = '\||OR|AND|XOR|\&|\+'; +} + +# Global variables + +%symbol_values = (%registers) ; # Traditional symbol table + +%symbol_references = () ; # Table of symbol references, where + # the index is the symbol name, + # and the contents a white space + # delimited list of address,size + # tuples where size is in bytes. + +@code = (); # Array of 32 bit words for SIOP + +@entry = (); # Array of entry point names + +@label = (); # Array of label names + +@absolute = (); # Array of absolute names + +@relative = (); # Array of relative names + +@external = (); # Array of external names + +$address = 0; # Address of current instruction + +$lineno = 0; # Line number we are parsing + +$output = 'script.h'; # Output file +$outputu = 'scriptu.h'; + +# &patch ($address, $offset, $length, $value) patches $code[$address] +# so that the $length bytes at $offset have $value added to +# them. + +@inverted_masks = (0x00_00_00_00, 0x00_00_00_ff, 0x00_00_ff_ff, 0x00_ff_ff_ff, + 0xff_ff_ff_ff); + +sub patch { + local ($address, $offset, $length, $value) = @_; + if ($debug) { + print STDERR "Patching $address at offset $offset, length $length to $value\n"; + printf STDERR "Old code : %08x\n", $code[$address]; + } + + $mask = ($inverted_masks[$length] << ($offset * 8)); + + $code[$address] = ($code[$address] & ~$mask) | + (($code[$address] & $mask) + ($value << ($offset * 8)) & + $mask); + + printf STDERR "New code : %08x\n", $code[$address] if ($debug); +} + +# &parse_value($value, $word, $offset, $length) where $value is +# an identifier or constant, $word is the word offset relative to +# $address, $offset is the starting byte within that word, and +# $length is the length of the field in bytes. +# +# Side effects are that the bytes are combined into the @code array +# relative to $address, and that the %symbol_references table is +# updated as appropriate. + +sub parse_value { + local ($value, $word, $offset, $length) = @_; + local ($tmp); + + $symbol = ''; + + if ($value =~ /^REL\s*\(\s*($identifier)\s*\)\s*(.*)/i) { + $relative = 'REL'; + $symbol = $1; + $value = $2; +print STDERR "Relative reference $symbol\n" if ($debug); + } elsif ($value =~ /^($identifier)\s*(.*)/) { + $relative = 'ABS'; + $symbol = $1; + $value = $2; +print STDERR "Absolute reference $symbol\n" if ($debug); + } + + if ($symbol ne '') { +print STDERR "Referencing symbol $1, length = $length in $_\n" if ($debug); + $tmp = ($address + $word) * 4 + $offset; + if ($symbol_references{$symbol} ne undef) { + $symbol_references{$symbol} = + "$symbol_references{$symbol} $relative,$tmp,$length"; + } else { + if (!defined($symbol_values{$symbol})) { +print STDERR "forward $1\n" if ($debug_external); + $forward{$symbol} = "line $lineno : $_"; + } + $symbol_references{$symbol} = "$relative,$tmp,$length"; + } + } + + $value = eval $value; + &patch ($address + $word, $offset, $length, $value); +} + +# &parse_conditional ($conditional) where $conditional is the conditional +# clause from a transfer control instruction (RETURN, CALL, JUMP, INT). + +sub parse_conditional { + local ($conditional) = @_; + if ($conditional =~ /^\s*(IF|WHEN)\s*(.*)/i) { + $if = $1; + $conditional = $2; + if ($if =~ /WHEN/i) { + $allow_atn = 0; + $code[$address] |= 0x00_01_00_00; + $allow_atn = 0; + print STDERR "$0 : parsed WHEN\n" if ($debug); + } else { + $allow_atn = 1; + print STDERR "$0 : parsed IF\n" if ($debug); + } + } else { + die "$0 : syntax error in line $lineno : $_ + expected IF or WHEN +"; + } + + if ($conditional =~ /^NOT\s+(.*)$/i) { + $not = 'NOT '; + $other = 'OR'; + $conditional = $1; + print STDERR "$0 : parsed NOT\n" if ($debug); + } else { + $code[$address] |= 0x00_08_00_00; + $not = ''; + $other = 'AND' + } + + $need_data = 0; + if ($conditional =~ /^ATN\s*(.*)/i) {# + die "$0 : syntax error in line $lineno : $_ + WHEN conditional is incompatible with ATN +" if (!$allow_atn); + $code[$address] |= 0x00_02_00_00; + $conditional = $1; + print STDERR "$0 : parsed ATN\n" if ($debug); + } elsif ($conditional =~ /^($phase)\s*(.*)/i) { + $phase_index = "\U$1\E"; + $p = $scsi_phases{$phase_index}; + $code[$address] |= $p | 0x00_02_00_00; + $conditional = $2; + print STDERR "$0 : parsed phase $phase_index\n" if ($debug); + } else { + $other = ''; + $need_data = 1; + } + +print STDERR "Parsing conjunction, expecting $other\n" if ($debug); + if ($conditional =~ /^(AND|OR)\s*(.*)/i) { + $conjunction = $1; + $conditional = $2; + $need_data = 1; + die "$0 : syntax error in line $lineno : $_ + Illegal use of $1. Valid uses are + ".$not." $1 data + ".$not."ATN $1 data +" if ($other eq ''); + die "$0 : syntax error in line $lineno : $_ + Illegal use of $conjunction. Valid syntaxes are + NOT |ATN OR data + |ATN AND data +" if ($conjunction !~ /\s*$other\s*/i); + print STDERR "$0 : parsed $1\n" if ($debug); + } + + if ($need_data) { +print STDERR "looking for data in $conditional\n" if ($debug); + if ($conditional=~ /^($value)\s*(.*)/i) { + $code[$address] |= 0x00_04_00_00; + $conditional = $2; + &parse_value($1, 0, 0, 1); + print STDERR "$0 : parsed data\n" if ($debug); + } else { + die "$0 : syntax error in line $lineno : $_ + expected . +"; + } + } + + if ($conditional =~ /^\s*,\s*(.*)/) { + $conditional = $1; + if ($conditional =~ /^AND\s\s*MASK\s\s*($value)\s*(.*)/i) { + &parse_value ($1, 0, 1, 1); + print STDERR "$0 parsed AND MASK $1\n" if ($debug); + die "$0 : syntax error in line $lineno : $_ + expected end of line, not \"$2\" +" if ($2 ne ''); + } else { + die "$0 : syntax error in line $lineno : $_ + expected \",AND MASK \", not \"$2\" +"; + } + } elsif ($conditional !~ /^\s*$/) { + die "$0 : syntax error in line $lineno : $_ + expected end of line" . (($need_data) ? " or \"AND MASK \"" : "") . " + not \"$conditional\" +"; + } +} + +# Parse command line +$output = shift; +$outputu = shift; + + +# Main loop +while () { + $lineno = $lineno + 1; + $list[$address] = $list[$address].$_; + s/;.*$//; # Strip comments + + + chop; # Leave new line out of error messages + +# Handle symbol definitions of the form label: + if (/^\s*($identifier)\s*:(.*)/) { + if (!defined($symbol_values{$1})) { + $symbol_values{$1} = $address * 4; # Address is an index into + delete $forward{$1}; # an array of longs + push (@label, $1); + $_ = $2; + } else { + die "$0 : redefinition of symbol $1 in line $lineno : $_\n"; + } + } + +# Handle symbol definitions of the form ABSOLUTE or RELATIVE identifier = +# value + if (/^\s*(ABSOLUTE|RELATIVE)\s+(.*)/i) { + $is_absolute = $1; + $rest = $2; + foreach $rest (split (/\s*,\s*/, $rest)) { + if ($rest =~ /^($identifier)\s*=\s*($constant)\s*$/) { + local ($id, $cnst) = ($1, $2); + if ($symbol_values{$id} eq undef) { + $symbol_values{$id} = eval $cnst; + delete $forward{$id}; + if ($is_absolute =~ /ABSOLUTE/i) { + push (@absolute , $id); + } else { + push (@relative, $id); + } + } else { + die "$0 : redefinition of symbol $id in line $lineno : $_\n"; + } + } else { + die +"$0 : syntax error in line $lineno : $_ + expected = +"; + } + } + } elsif (/^\s*EXTERNAL\s+(.*)/i) { + $externals = $1; + foreach $external (split (/,/,$externals)) { + if ($external =~ /\s*($identifier)\s*$/) { + $external = $1; + push (@external, $external); + delete $forward{$external}; + if (defined($symbol_values{$external})) { + die "$0 : redefinition of symbol $1 in line $lineno : $_\n"; + } + $symbol_values{$external} = $external; +print STDERR "defined external $1 to $external\n" if ($debug_external); + } else { + die +"$0 : syntax error in line $lineno : $_ + expected , got $external +"; + } + } +# Process ENTRY identifier declarations + } elsif (/^\s*ENTRY\s+(.*)/i) { + if ($1 =~ /^($identifier)\s*$/) { + push (@entry, $1); + } else { + die +"$0 : syntax error in line $lineno : $_ + expected ENTRY +"; + } +# Process MOVE length, address, WITH|WHEN phase instruction + } elsif (/^\s*MOVE\s+(.*)/i) { + $rest = $1; + if ($rest =~ /^FROM\s+($value)\s*,\s*(WITH|WHEN)\s+($phase)\s*$/i) { + $transfer_addr = $1; + $with_when = $2; + $scsi_phase = $3; +print STDERR "Parsing MOVE FROM $transfer_addr, $with_when $3\n" if ($debug); + $code[$address] = 0x18_00_00_00 | (($with_when =~ /WITH/i) ? + 0x00_00_00_00 : 0x08_00_00_00) | $scsi_phases{$scsi_phase}; + &parse_value ($transfer_addr, 1, 0, 4); + $address += 2; + } elsif ($rest =~ /^($value)\s*,\s*(PTR\s+|)($value)\s*,\s*(WITH|WHEN)\s+($phase)\s*$/i) { + $transfer_len = $1; + $ptr = $2; + $transfer_addr = $3; + $with_when = $4; + $scsi_phase = $5; + $code[$address] = (($with_when =~ /WITH/i) ? 0x00_00_00_00 : + 0x08_00_00_00) | (($ptr =~ /PTR/i) ? (1 << 29) : 0) | + $scsi_phases{$scsi_phase}; + &parse_value ($transfer_len, 0, 0, 3); + &parse_value ($transfer_addr, 1, 0, 4); + $address += 2; + } elsif ($rest =~ /^MEMORY\s+(.*)/i) { + $rest = $1; + $code[$address] = 0xc0_00_00_00; + if ($rest =~ /^($value)\s*,\s*($value)\s*,\s*($value)\s*$/) { + $count = $1; + $source = $2; + $dest = $3; +print STDERR "Parsing MOVE MEMORY $count, $source, $dest\n" if ($debug); + &parse_value ($count, 0, 0, 3); + &parse_value ($source, 1, 0, 4); + &parse_value ($dest, 2, 0, 4); +printf STDERR "Move memory instruction = %08x,%08x,%08x\n", + $code[$address], $code[$address+1], $code[$address +2] if + ($debug); + $address += 3; + + } else { + die +"$0 : syntax error in line $lineno : $_ + expected , , +" + } + } elsif ($1 =~ /^(.*)\s+(TO|SHL|SHR)\s+(.*)/i) { +print STDERR "Parsing register to register move\n" if ($debug); + $src = $1; + $op = "\U$2\E"; + $rest = $3; + + $code[$address] = 0x40_00_00_00; + + $force = ($op !~ /TO/i); + + +print STDERR "Forcing register source \n" if ($force && $debug); + + if (!$force && $src =~ + /^($register)\s+(-|$operator)\s+($value)\s*$/i) { +print STDERR "register operand data8 source\n" if ($debug); + $src_reg = "\U$1\E"; + $op = "\U$2\E"; + if ($op ne '-') { + $data8 = $3; + } else { + die "- is not implemented yet.\n" + } + } elsif ($src =~ /^($register)\s*$/i) { +print STDERR "register source\n" if ($debug); + $src_reg = "\U$1\E"; + # Encode register to register move as a register | 0 + # move to register. + if (!$force) { + $op = '|'; + } + $data8 = 0; + } elsif (!$force && $src =~ /^($value)\s*$/i) { +print STDERR "data8 source\n" if ($debug); + $src_reg = undef; + $op = 'NONE'; + $data8 = $1; + } else { + if (!$force) { + die +"$0 : syntax error in line $lineno : $_ + expected + + +"; + } else { + die +"$0 : syntax error in line $lineno : $_ + expected +"; + } + } + if ($rest =~ /^($register)\s*(.*)$/i) { + $dst_reg = "\U$1\E"; + $rest = $2; + } else { + die +"$0 : syntax error in $lineno : $_ + expected , got $rest +"; + } + + if ($rest =~ /^WITH\s+CARRY\s*(.*)/i) { + $rest = $1; + if ($op eq '+') { + $code[$address] |= 0x01_00_00_00; + } else { + die +"$0 : syntax error in $lineno : $_ + WITH CARRY option is incompatible with the $op operator. +"; + } + } + + if ($rest !~ /^\s*$/) { + die +"$0 : syntax error in $lineno : $_ + Expected end of line, got $rest +"; + } + + print STDERR "source = $src_reg, data = $data8 , destination = $dst_reg\n" + if ($debug); + # Note that Move data8 to reg is encoded as a read-modify-write + # instruction. + if (($src_reg eq undef) || ($src_reg eq $dst_reg)) { + $code[$address] |= 0x38_00_00_00 | + ($registers{$dst_reg} << 16); + } elsif ($dst_reg =~ /SFBR/i) { + $code[$address] |= 0x30_00_00_00 | + ($registers{$src_reg} << 16); + } elsif ($src_reg =~ /SFBR/i) { + $code[$address] |= 0x28_00_00_00 | + ($registers{$dst_reg} << 16); + } else { + die +"$0 : Illegal combination of registers in line $lineno : $_ + Either source and destination registers must be the same, + or either source or destination register must be SFBR. +"; + } + + $code[$address] |= $operators{$op}; + + &parse_value ($data8, 0, 1, 1); + $code[$address] |= $operators{$op}; + $code[$address + 1] = 0x00_00_00_00;# Reserved + $address += 2; + } else { + die +"$0 : syntax error in line $lineno : $_ + expected (initiator) ,
, WHEN + (target) ,
, WITH + MEMORY , , + TO +"; + } +# Process SELECT {ATN|} id, fail_address + } elsif (/^\s*(SELECT|RESELECT)\s+(.*)/i) { + $rest = $2; + if ($rest =~ /^(ATN|)\s*($value)\s*,\s*($identifier)\s*$/i) { + $atn = $1; + $id = $2; + $alt_addr = $3; + $code[$address] = 0x40_00_00_00 | + (($atn =~ /ATN/i) ? 0x01_00_00_00 : 0); + $code[$address + 1] = 0x00_00_00_00; + &parse_value($id, 0, 2, 1); + &parse_value($alt_addr, 1, 0, 4); + $address += 2; + } elsif ($rest =~ /^(ATN|)\s*FROM\s+($value)\s*,\s*($identifier)\s*$/i) { + $atn = $1; + $addr = $2; + $alt_addr = $3; + $code[$address] = 0x42_00_00_00 | + (($atn =~ /ATN/i) ? 0x01_00_00_00 : 0); + $code[$address + 1] = 0x00_00_00_00; + &parse_value($addr, 0, 0, 3); + &parse_value($alt_addr, 1, 0, 4); + $address += 2; + } else { + die +"$0 : syntax error in line $lineno : $_ + expected SELECT id, alternate_address or + SELECT FROM address, alternate_address or + RESELECT id, alternate_address or + RESELECT FROM address, alternate_address +"; + } + } elsif (/^\s*WAIT\s+(.*)/i) { + $rest = $1; +print STDERR "Parsing WAIT $rest\n" if ($debug); + if ($rest =~ /^DISCONNECT\s*$/i) { + $code[$address] = 0x48_00_00_00; + $code[$address + 1] = 0x00_00_00_00; + $address += 2; + } elsif ($rest =~ /^(RESELECT|SELECT)\s+($identifier)\s*$/i) { + $alt_addr = $2; + $code[$address] = 0x50_00_00_00; + &parse_value ($alt_addr, 1, 0, 4); + $address += 2; + } else { + die +"$0 : syntax error in line $lineno : $_ + expected (initiator) WAIT DISCONNECT or + (initiator) WAIT RESELECT alternate_address or + (target) WAIT SELECT alternate_address +"; + } +# Handle SET and CLEAR instructions. Note that we should also do something +# with this syntax to set target mode. + } elsif (/^\s*(SET|CLEAR)\s+(.*)/i) { + $set = $1; + $list = $2; + $code[$address] = ($set =~ /SET/i) ? 0x58_00_00_00 : + 0x60_00_00_00; + foreach $arg (split (/\s+AND\s+/i,$list)) { + if ($arg =~ /ATN/i) { + $code[$address] |= 0x00_00_00_08; + } elsif ($arg =~ /ACK/i) { + $code[$address] |= 0x00_00_00_40; + } elsif ($arg =~ /TARGET/i) { + $code[$address] |= 0x00_00_02_00; + } elsif ($arg =~ /CARRY/i) { + $code[$address] |= 0x00_00_04_00; + } else { + die +"$0 : syntax error in line $lineno : $_ + expected $set followed by a AND delimited list of one or + more strings from the list ACK, ATN, CARRY, TARGET. +"; + } + } + $code[$address + 1] = 0x00_00_00_00; + $address += 2; + } elsif (/^\s*(JUMP|CALL|INT)\s+(.*)/i) { + $instruction = $1; + $rest = $2; + if ($instruction =~ /JUMP/i) { + $code[$address] = 0x80_00_00_00; + } elsif ($instruction =~ /CALL/i) { + $code[$address] = 0x88_00_00_00; + } else { + $code[$address] = 0x98_00_00_00; + } +print STDERR "parsing JUMP, rest = $rest\n" if ($debug); + +# Relative jump. + if ($rest =~ /^(REL\s*\(\s*$identifier\s*\))\s*(.*)/i) { + $addr = $1; + $rest = $2; +print STDERR "parsing JUMP REL, addr = $addr, rest = $rest\n" if ($debug); + $code[$address] |= 0x00_80_00_00; + &parse_value($addr, 1, 0, 4); +# Absolute jump, requires no more gunk + } elsif ($rest =~ /^($value)\s*(.*)/) { + $addr = $1; + $rest = $2; + &parse_value($addr, 1, 0, 4); + } else { + die +"$0 : syntax error in line $lineno : $_ + expected
or REL (address) +"; + } + + if ($rest =~ /^,\s*(.*)/) { + &parse_conditional($1); + } elsif ($rest =~ /^\s*$/) { + $code[$address] |= (1 << 19); + } else { + die +"$0 : syntax error in line $lineno : $_ + expected , or end of line, got $1 +"; + } + + $address += 2; + } elsif (/^\s*(RETURN|INTFLY)\s*(.*)/i) { + $instruction = $1; + $conditional = $2; +print STDERR "Parsing $instruction\n" if ($debug); + $code[$address] = ($instruction =~ /RETURN/i) ? 0x90_00_00_00 : + 0x98_10_00_00; + if ($conditional =~ /^,\s*(.*)/) { + $conditional = $1; + &parse_conditional ($conditional); + } elsif ($conditional !~ /^\s*$/) { + die +"$0 : syntax error in line $lineno : $_ + expected , +"; + } else { + $code[$address] |= 0x00_08_00_00; + } + + $code[$address + 1] = 0x00_00_00_00; + $address += 2; + } elsif (/^\s*DISCONNECT\s*$/) { + $code[$address] = 0x48_00_00_00; + $code[$address + 1] = 0x00_00_00_00; + $address += 2; +# I'm not sure that I should be including this extension, but +# what the hell? + } elsif (/^\s*NOP\s*$/i) { + $code[$address] = 0x80_88_00_00; + $code[$address + 1] = 0x00_00_00_00; + $address += 2; +# Ignore lines consisting entirely of white space + } elsif (/^\s*$/) { + } else { + die +"$0 : syntax error in line $lineno: $_ + expected label:, ABSOLUTE, CLEAR, DISCONNECT, EXTERNAL, MOVE, RESELECT, + SELECT SET, or WAIT +"; + } +} + +# Fill in label references + +@undefined = keys %forward; +if ($#undefined >= 0) { + print STDERR "Undefined symbols : \n"; + foreach $undef (@undefined) { + print STDERR "$undef in $forward{$undef}\n"; + } + exit 1; +} + +@label_patches = (); + +@external_patches = (); + +@absolute = sort @absolute; + +foreach $i (@absolute) { + foreach $j (split (/\s+/,$symbol_references{$i})) { + $j =~ /(REL|ABS),(.*),(.*)/; + $type = $1; + $address = $2; + $length = $3; + die +"$0 : $symbol $i has invalid relative reference at address $address, + size $length\n" + if ($type eq 'REL'); + + &patch ($address / 4, $address % 4, $length, $symbol_values{$i}); + } +} + +foreach $external (@external) { +print STDERR "checking external $external \n" if ($debug_external); + if ($symbol_references{$external} ne undef) { + for $reference (split(/\s+/,$symbol_references{$external})) { + $reference =~ /(REL|ABS),(.*),(.*)/; + $type = $1; + $address = $2; + $length = $3; + + die +"$0 : symbol $label is external, has invalid relative reference at $address, + size $length\n" + if ($type eq 'REL'); + + die +"$0 : symbol $label has invalid reference at $address, size $length\n" + if ((($address % 4) !=0) || ($length != 4)); + + $symbol = $symbol_values{$external}; + $add = $code[$address / 4]; + if ($add eq 0) { + $code[$address / 4] = $symbol; + } else { + $add = sprintf ("0x%08x", $add); + $code[$address / 4] = "$symbol + $add"; + } + +print STDERR "referenced external $external at $1\n" if ($debug_external); + } + } +} + +foreach $label (@label) { + if ($symbol_references{$label} ne undef) { + for $reference (split(/\s+/,$symbol_references{$label})) { + $reference =~ /(REL|ABS),(.*),(.*)/; + $type = $1; + $address = $2; + $length = $3; + + if ((($address % 4) !=0) || ($length != 4)) { + die "$0 : symbol $label has invalid reference at $1, size $2\n"; + } + + if ($type eq 'ABS') { + $code[$address / 4] += $symbol_values{$label}; + push (@label_patches, $address / 4); + } else { +# +# - The address of the reference should be in the second and last word +# of an instruction +# - Relative jumps, etc. are relative to the DSP of the _next_ instruction +# +# So, we need to add four to the address of the reference, to get +# the address of the next instruction, when computing the reference. + + $tmp = $symbol_values{$label} - + ($address + 4); + die +# Relative addressing is limited to 24 bits. +"$0 : symbol $label is too far ($tmp) from $address to reference as + relative/\n" if (($tmp >= 0x80_00_00) || ($tmp < -0x80_00_00)); + $code[$address / 4] = $tmp & 0x00_ff_ff_ff; + } + } + } +} + +# Output SCRIPT[] array, one instruction per line. Optionally +# print the original code too. + +open (OUTPUT, ">$output") || die "$0 : can't open $output for writing\n"; +open (OUTPUTU, ">$outputu") || die "$0 : can't open $outputu for writing\n"; + +($_ = $0) =~ s:.*/::; +print OUTPUT "/* DO NOT EDIT - Generated automatically by ".$_." */\n"; +print OUTPUT "static u32 ".$prefix."SCRIPT[] = {\n"; +$instructions = 0; +for ($i = 0; $i < $#code; ) { + if ($list_in_array) { + printf OUTPUT "/*\n$list[$i]\nat 0x%08x : */", $i; + } + printf OUTPUT "\t0x%08x,", $code[$i]; + printf STDERR "Address $i = %x\n", $code[$i] if ($debug); + if ($code[$i + 1] =~ /\s*($identifier)(.*)$/) { + push (@external_patches, $i+1, $1); + printf OUTPUT "0%s,", $2 + } else { + printf OUTPUT "0x%08x,",$code[$i+1]; + } + + if (($code[$i] & 0xff_00_00_00) == 0xc0_00_00_00) { + if ($code[$i + 2] =~ /$identifier/) { + push (@external_patches, $i+2, $code[$i+2]); + printf OUTPUT "0,\n"; + } else { + printf OUTPUT "0x%08x,\n",$code[$i+2]; + } + $i += 3; + } else { + printf OUTPUT "\n"; + $i += 2; + } + $instructions += 1; +} +print OUTPUT "};\n\n"; + +foreach $i (@absolute) { + printf OUTPUT "#define A_$i\t0x%08x\n", $symbol_values{$i}; + if (defined($prefix) && $prefix ne '') { + printf OUTPUT "#define A_".$i."_used ".$prefix."A_".$i."_used\n"; + printf OUTPUTU "#undef A_".$i."_used\n"; + } + printf OUTPUTU "#undef A_$i\n"; + + printf OUTPUT "static u32 A_".$i."_used\[\] __attribute((unused)) = {\n"; +printf STDERR "$i is used $symbol_references{$i}\n" if ($debug); + foreach $j (split (/\s+/,$symbol_references{$i})) { + $j =~ /(ABS|REL),(.*),(.*)/; + if ($1 eq 'ABS') { + $address = $2; + $length = $3; + printf OUTPUT "\t0x%08x,\n", $address / 4; + } + } + printf OUTPUT "};\n\n"; +} + +foreach $i (sort @entry) { + printf OUTPUT "#define Ent_$i\t0x%08x\n", $symbol_values{$i}; + printf OUTPUTU "#undef Ent_$i\n", $symbol_values{$i}; +} + +# +# NCR assembler outputs label patches in the form of indices into +# the code. +# +printf OUTPUT "static u32 ".$prefix."LABELPATCHES[] __attribute((unused)) = {\n"; +for $patch (sort {$a <=> $b} @label_patches) { + printf OUTPUT "\t0x%08x,\n", $patch; +} +printf OUTPUT "};\n\n"; + +$num_external_patches = 0; +printf OUTPUT "static struct {\n\tu32\toffset;\n\tvoid\t\t*address;\n". + "} ".$prefix."EXTERNAL_PATCHES[] __attribute((unused)) = {\n"; +while ($ident = pop(@external_patches)) { + $off = pop(@external_patches); + printf OUTPUT "\t{0x%08x, &%s},\n", $off, $ident; + ++$num_external_patches; +} +printf OUTPUT "};\n\n"; + +printf OUTPUT "static u32 ".$prefix."INSTRUCTIONS __attribute((unused))\t= %d;\n", + $instructions; +printf OUTPUT "static u32 ".$prefix."PATCHES __attribute((unused))\t= %d;\n", + $#label_patches+1; +printf OUTPUT "static u32 ".$prefix."EXTERNAL_PATCHES_LEN __attribute((unused))\t= %d;\n", + $num_external_patches; +close OUTPUT; +close OUTPUTU; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c new file mode 100644 index 000000000..89367c4bf --- /dev/null +++ b/drivers/scsi/scsi.c @@ -0,0 +1,1015 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * scsi.c Copyright (C) 1992 Drew Eckhardt + * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale + * Copyright (C) 2002, 2003 Christoph Hellwig + * + * generic mid-level SCSI driver + * Initial versions: Drew Eckhardt + * Subsequent revisions: Eric Youngdale + * + * + * + * Bug correction thanks go to : + * Rik Faith + * Tommy Thorn + * Thomas Wuensche + * + * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to + * add scatter-gather, multiple outstanding request, and other + * enhancements. + * + * Native multichannel, wide scsi, /proc/scsi and hot plugging + * support added by Michael Neuffer + * + * Added request_module("scsi_hostadapter") for kerneld: + * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) + * Bjorn Ekwall + * (changed to kmod) + * + * Major improvements to the timeout, abort, and reset processing, + * as well as performance modifications for large queue depths by + * Leonard N. Zubkoff + * + * Converted cli() code to spinlocks, Ingo Molnar + * + * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli + * + * out_of_space hacks, D. Gilbert (dpg) 990608 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scsi_priv.h" +#include "scsi_logging.h" + +#define CREATE_TRACE_POINTS +#include + +/* + * Definitions and constants. + */ + +/* + * Note - the initial logging level can be set here to log events at boot time. + * After the system is up, you may enable logging via the /proc interface. + */ +unsigned int scsi_logging_level; +#if defined(CONFIG_SCSI_LOGGING) +EXPORT_SYMBOL(scsi_logging_level); +#endif + +#ifdef CONFIG_SCSI_LOGGING +void scsi_log_send(struct scsi_cmnd *cmd) +{ + unsigned int level; + + /* + * If ML QUEUE log level is greater than or equal to: + * + * 1: nothing (match completion) + * + * 2: log opcode + command of all commands + cmd address + * + * 3: same as 2 + * + * 4: same as 3 + */ + if (unlikely(scsi_logging_level)) { + level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, + SCSI_LOG_MLQUEUE_BITS); + if (level > 1) { + scmd_printk(KERN_INFO, cmd, + "Send: scmd 0x%p\n", cmd); + scsi_print_command(cmd); + } + } +} + +void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) +{ + unsigned int level; + + /* + * If ML COMPLETE log level is greater than or equal to: + * + * 1: log disposition, result, opcode + command, and conditionally + * sense data for failures or non SUCCESS dispositions. + * + * 2: same as 1 but for all command completions. + * + * 3: same as 2 + * + * 4: same as 3 plus dump extra junk + */ + if (unlikely(scsi_logging_level)) { + level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, + SCSI_LOG_MLCOMPLETE_BITS); + if (((level > 0) && (cmd->result || disposition != SUCCESS)) || + (level > 1)) { + scsi_print_result(cmd, "Done", disposition); + scsi_print_command(cmd); + if (scsi_status_is_check_condition(cmd->result)) + scsi_print_sense(cmd); + if (level > 3) + scmd_printk(KERN_INFO, cmd, + "scsi host busy %d failed %d\n", + scsi_host_busy(cmd->device->host), + cmd->device->host->host_failed); + } + } +} +#endif + +/** + * scsi_finish_command - cleanup and pass command back to upper layer + * @cmd: the command + * + * Description: Pass command off to upper layer for finishing of I/O + * request, waking processes that are waiting on results, + * etc. + */ +void scsi_finish_command(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct scsi_target *starget = scsi_target(sdev); + struct Scsi_Host *shost = sdev->host; + struct scsi_driver *drv; + unsigned int good_bytes; + + scsi_device_unbusy(sdev, cmd); + + /* + * Clear the flags that say that the device/target/host is no longer + * capable of accepting new commands. + */ + if (atomic_read(&shost->host_blocked)) + atomic_set(&shost->host_blocked, 0); + if (atomic_read(&starget->target_blocked)) + atomic_set(&starget->target_blocked, 0); + if (atomic_read(&sdev->device_blocked)) + atomic_set(&sdev->device_blocked, 0); + + SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, + "Notifying upper driver of completion " + "(result %x)\n", cmd->result)); + + good_bytes = scsi_bufflen(cmd); + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { + int old_good_bytes = good_bytes; + drv = scsi_cmd_to_driver(cmd); + if (drv->done) + good_bytes = drv->done(cmd); + /* + * USB may not give sense identifying bad sector and + * simply return a residue instead, so subtract off the + * residue if drv->done() error processing indicates no + * change to the completion length. + */ + if (good_bytes == old_good_bytes) + good_bytes -= scsi_get_resid(cmd); + } + scsi_io_completion(cmd, good_bytes); +} + + +/* + * 4096 is big enough for saturating fast SCSI LUNs. + */ +int scsi_device_max_queue_depth(struct scsi_device *sdev) +{ + return min_t(int, sdev->host->can_queue, 4096); +} + +/** + * scsi_change_queue_depth - change a device's queue depth + * @sdev: SCSI Device in question + * @depth: number of commands allowed to be queued to the driver + * + * Sets the device queue depth and returns the new value. + */ +int scsi_change_queue_depth(struct scsi_device *sdev, int depth) +{ + depth = min_t(int, depth, scsi_device_max_queue_depth(sdev)); + + if (depth > 0) { + sdev->queue_depth = depth; + wmb(); + } + + if (sdev->request_queue) + blk_set_queue_depth(sdev->request_queue, depth); + + sbitmap_resize(&sdev->budget_map, sdev->queue_depth); + + return sdev->queue_depth; +} +EXPORT_SYMBOL(scsi_change_queue_depth); + +/** + * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth + * @sdev: SCSI Device in question + * @depth: Current number of outstanding SCSI commands on this device, + * not counting the one returned as QUEUE_FULL. + * + * Description: This function will track successive QUEUE_FULL events on a + * specific SCSI device to determine if and when there is a + * need to adjust the queue depth on the device. + * + * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth, + * -1 - Drop back to untagged operation using host->cmd_per_lun + * as the untagged command depth + * + * Lock Status: None held on entry + * + * Notes: Low level drivers may call this at any time and we will do + * "The Right Thing." We are interrupt context safe. + */ +int scsi_track_queue_full(struct scsi_device *sdev, int depth) +{ + + /* + * Don't let QUEUE_FULLs on the same + * jiffies count, they could all be from + * same event. + */ + if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) + return 0; + + sdev->last_queue_full_time = jiffies; + if (sdev->last_queue_full_depth != depth) { + sdev->last_queue_full_count = 1; + sdev->last_queue_full_depth = depth; + } else { + sdev->last_queue_full_count++; + } + + if (sdev->last_queue_full_count <= 10) + return 0; + + return scsi_change_queue_depth(sdev, depth); +} +EXPORT_SYMBOL(scsi_track_queue_full); + +/** + * scsi_vpd_inquiry - Request a device provide us with a VPD page + * @sdev: The device to ask + * @buffer: Where to put the result + * @page: Which Vital Product Data to return + * @len: The length of the buffer + * + * This is an internal helper function. You probably want to use + * scsi_get_vpd_page instead. + * + * Returns size of the vpd page on success or a negative error number. + */ +static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, + u8 page, unsigned len) +{ + int result; + unsigned char cmd[16]; + + if (len < 4) + return -EINVAL; + + cmd[0] = INQUIRY; + cmd[1] = 1; /* EVPD */ + cmd[2] = page; + cmd[3] = len >> 8; + cmd[4] = len & 0xff; + cmd[5] = 0; /* Control byte */ + + /* + * I'm not convinced we need to try quite this hard to get VPD, but + * all the existing users tried this hard. + */ + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, + 30 * HZ, 3, NULL); + if (result) + return -EIO; + + /* + * Sanity check that we got the page back that we asked for and that + * the page size is not 0. + */ + if (buffer[1] != page) + return -EIO; + + result = get_unaligned_be16(&buffer[2]); + if (!result) + return -EIO; + + return result + 4; +} + +static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) +{ + unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4); + int result; + + if (sdev->no_vpd_size) + return SCSI_DEFAULT_VPD_LEN; + + /* + * Fetch the VPD page header to find out how big the page + * is. This is done to prevent problems on legacy devices + * which can not handle allocation lengths as large as + * potentially requested by the caller. + */ + result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header)); + if (result < 0) + return 0; + + if (result < SCSI_VPD_HEADER_SIZE) { + dev_warn_once(&sdev->sdev_gendev, + "%s: short VPD page 0x%02x length: %d bytes\n", + __func__, page, result); + return 0; + } + + return result; +} + +/** + * scsi_get_vpd_page - Get Vital Product Data from a SCSI device + * @sdev: The device to ask + * @page: Which Vital Product Data to return + * @buf: where to store the VPD + * @buf_len: number of bytes in the VPD buffer area + * + * SCSI devices may optionally supply Vital Product Data. Each 'page' + * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). + * If the device supports this VPD page, this routine fills @buf + * with the data from that page and return 0. If the VPD page is not + * supported or its content cannot be retrieved, -EINVAL is returned. + */ +int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, + int buf_len) +{ + int result, vpd_len; + + if (!scsi_device_supports_vpd(sdev)) + return -EINVAL; + + vpd_len = scsi_get_vpd_size(sdev, page); + if (vpd_len <= 0) + return -EINVAL; + + vpd_len = min(vpd_len, buf_len); + + /* + * Fetch the actual page. Since the appropriate size was reported + * by the device it is now safe to ask for something bigger. + */ + memset(buf, 0, buf_len); + result = scsi_vpd_inquiry(sdev, buf, page, vpd_len); + if (result < 0) + return -EINVAL; + else if (result > vpd_len) + dev_warn_once(&sdev->sdev_gendev, + "%s: VPD page 0x%02x result %d > %d bytes\n", + __func__, page, result, vpd_len); + + return 0; +} +EXPORT_SYMBOL_GPL(scsi_get_vpd_page); + +/** + * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device + * @sdev: The device to ask + * @page: Which Vital Product Data to return + * + * Returns %NULL upon failure. + */ +static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page) +{ + struct scsi_vpd *vpd_buf; + int vpd_len, result; + + vpd_len = scsi_get_vpd_size(sdev, page); + if (vpd_len <= 0) + return NULL; + +retry_pg: + /* + * Fetch the actual page. Since the appropriate size was reported + * by the device it is now safe to ask for something bigger. + */ + vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL); + if (!vpd_buf) + return NULL; + + result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len); + if (result < 0) { + kfree(vpd_buf); + return NULL; + } + if (result > vpd_len) { + dev_warn_once(&sdev->sdev_gendev, + "%s: VPD page 0x%02x result %d > %d bytes\n", + __func__, page, result, vpd_len); + vpd_len = result; + kfree(vpd_buf); + goto retry_pg; + } + + vpd_buf->len = result; + + return vpd_buf; +} + +static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, + struct scsi_vpd __rcu **sdev_vpd_buf) +{ + struct scsi_vpd *vpd_buf; + + vpd_buf = scsi_get_vpd_buf(sdev, page); + if (!vpd_buf) + return; + + mutex_lock(&sdev->inquiry_mutex); + vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf, + lockdep_is_held(&sdev->inquiry_mutex)); + mutex_unlock(&sdev->inquiry_mutex); + + if (vpd_buf) + kfree_rcu(vpd_buf, rcu); +} + +/** + * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure + * @sdev: The device to ask + * + * Attach the 'Device Identification' VPD page (0x83) and the + * 'Unit Serial Number' VPD page (0x80) to a SCSI device + * structure. This information can be used to identify the device + * uniquely. + */ +void scsi_attach_vpd(struct scsi_device *sdev) +{ + int i; + struct scsi_vpd *vpd_buf; + + if (!scsi_device_supports_vpd(sdev)) + return; + + /* Ask for all the pages supported by this device */ + vpd_buf = scsi_get_vpd_buf(sdev, 0); + if (!vpd_buf) + return; + + for (i = 4; i < vpd_buf->len; i++) { + if (vpd_buf->data[i] == 0x0) + scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); + if (vpd_buf->data[i] == 0x80) + scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); + if (vpd_buf->data[i] == 0x83) + scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); + if (vpd_buf->data[i] == 0x89) + scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); + if (vpd_buf->data[i] == 0xb0) + scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0); + if (vpd_buf->data[i] == 0xb1) + scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1); + if (vpd_buf->data[i] == 0xb2) + scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2); + } + kfree(vpd_buf); +} + +/** + * scsi_report_opcode - Find out if a given command is supported + * @sdev: scsi device to query + * @buffer: scratch buffer (must be at least 20 bytes long) + * @len: length of buffer + * @opcode: opcode for the command to look up + * @sa: service action for the command to look up + * + * Uses the REPORT SUPPORTED OPERATION CODES to check support for the + * command identified with @opcode and @sa. If the command does not + * have a service action, @sa must be 0. Returns -EINVAL if RSOC fails, + * 0 if the command is not supported and 1 if the device claims to + * support the command. + */ +int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, + unsigned int len, unsigned char opcode, + unsigned short sa) +{ + unsigned char cmd[16]; + struct scsi_sense_hdr sshdr; + int result, request_len; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + + if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) + return -EINVAL; + + /* RSOC header + size of command we are asking about */ + request_len = 4 + COMMAND_SIZE(opcode); + if (request_len > len) { + dev_warn_once(&sdev->sdev_gendev, + "%s: len %u bytes, opcode 0x%02x needs %u\n", + __func__, len, opcode, request_len); + return -EINVAL; + } + + memset(cmd, 0, 16); + cmd[0] = MAINTENANCE_IN; + cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; + if (!sa) { + cmd[2] = 1; /* One command format */ + cmd[3] = opcode; + } else { + cmd[2] = 3; /* One command format with service action */ + cmd[3] = opcode; + put_unaligned_be16(sa, &cmd[4]); + } + put_unaligned_be32(request_len, &cmd[6]); + memset(buffer, 0, len); + + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, + request_len, 30 * HZ, 3, &exec_args); + if (result < 0) + return result; + if (result && scsi_sense_valid(&sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) + return -EINVAL; + + if ((buffer[1] & 3) == 3) /* Command supported */ + return 1; + + return 0; +} +EXPORT_SYMBOL(scsi_report_opcode); + +#define SCSI_CDL_CHECK_BUF_LEN 64 + +static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa, + unsigned char *buf) +{ + int ret; + u8 cdlp; + + /* Check operation code */ + ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa); + if (ret <= 0) + return false; + + if ((buf[1] & 0x03) != 0x03) + return false; + + /* + * See SPC-6, One_command parameter data format for + * REPORT SUPPORTED OPERATION CODES. We have the following cases + * depending on rwcdlp (buf[0] & 0x01) value: + * - rwcdlp == 0: then cdlp indicates support for the A mode page when + * it is equal to 1 and for the B mode page when it is + * equal to 2. + * - rwcdlp == 1: then cdlp indicates support for the T2A mode page + * when it is equal to 1 and for the T2B mode page when + * it is equal to 2. + * Overall, to detect support for command duration limits, we only need + * to check that cdlp is 1 or 2. + */ + cdlp = (buf[1] & 0x18) >> 3; + + return cdlp == 0x01 || cdlp == 0x02; +} + +/** + * scsi_cdl_check - Check if a SCSI device supports Command Duration Limits + * @sdev: The device to check + */ +void scsi_cdl_check(struct scsi_device *sdev) +{ + bool cdl_supported; + unsigned char *buf; + + /* + * Support for CDL was defined in SPC-5. Ignore devices reporting an + * lower SPC version. This also avoids problems with old drives choking + * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a + * service action specified, as done in scsi_cdl_check_cmd(). + */ + if (sdev->scsi_level < SCSI_SPC_5) { + sdev->cdl_supported = 0; + return; + } + + buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL); + if (!buf) { + sdev->cdl_supported = 0; + return; + } + + /* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */ + cdl_supported = + scsi_cdl_check_cmd(sdev, READ_16, 0, buf) || + scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) || + scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) || + scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf); + if (cdl_supported) { + /* + * We have CDL support: force the use of READ16/WRITE16. + * READ32 and WRITE32 will be used for devices that support + * the T10_PI_TYPE2_PROTECTION protection type. + */ + sdev->use_16_for_rw = 1; + sdev->use_10_for_rw = 0; + + sdev->cdl_supported = 1; + } else { + sdev->cdl_supported = 0; + } + + kfree(buf); +} + +/** + * scsi_cdl_enable - Enable or disable a SCSI device supports for Command + * Duration Limits + * @sdev: The target device + * @enable: the target state + */ +int scsi_cdl_enable(struct scsi_device *sdev, bool enable) +{ + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; + struct scsi_vpd *vpd; + bool is_ata = false; + char buf[64]; + int ret; + + if (!sdev->cdl_supported) + return -EOPNOTSUPP; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (vpd) + is_ata = true; + rcu_read_unlock(); + + /* + * For ATA devices, CDL needs to be enabled with a SET FEATURES command. + */ + if (is_ata) { + char *buf_data; + int len; + + ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf), + 5 * HZ, 3, &data, NULL); + if (ret) + return -EINVAL; + + /* Enable CDL using the ATA feature page */ + len = min_t(size_t, sizeof(buf), + data.length - data.header_length - + data.block_descriptor_length); + buf_data = buf + data.header_length + + data.block_descriptor_length; + if (enable) + buf_data[4] = 0x02; + else + buf_data[4] = 0; + + ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, + &data, &sshdr); + if (ret) { + if (scsi_sense_valid(&sshdr)) + scsi_print_sense_hdr(sdev, + dev_name(&sdev->sdev_gendev), &sshdr); + return ret; + } + } + + sdev->cdl_enable = enable; + + return 0; +} + +/** + * scsi_device_get - get an additional reference to a scsi_device + * @sdev: device to get a reference to + * + * Description: Gets a reference to the scsi_device and increments the use count + * of the underlying LLDD module. You must hold host_lock of the + * parent Scsi_Host or already have a reference when calling this. + * + * This will fail if a device is deleted or cancelled, or when the LLD module + * is in the process of being unloaded. + */ +int scsi_device_get(struct scsi_device *sdev) +{ + if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) + goto fail; + if (!try_module_get(sdev->host->hostt->module)) + goto fail; + if (!get_device(&sdev->sdev_gendev)) + goto fail_put_module; + return 0; + +fail_put_module: + module_put(sdev->host->hostt->module); +fail: + return -ENXIO; +} +EXPORT_SYMBOL(scsi_device_get); + +/** + * scsi_device_put - release a reference to a scsi_device + * @sdev: device to release a reference on. + * + * Description: Release a reference to the scsi_device and decrements the use + * count of the underlying LLDD module. The device is freed once the last + * user vanishes. + */ +void scsi_device_put(struct scsi_device *sdev) +{ + struct module *mod = sdev->host->hostt->module; + + put_device(&sdev->sdev_gendev); + module_put(mod); +} +EXPORT_SYMBOL(scsi_device_put); + +/* helper for shost_for_each_device, see that for documentation */ +struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, + struct scsi_device *prev) +{ + struct list_head *list = (prev ? &prev->siblings : &shost->__devices); + struct scsi_device *next = NULL; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + while (list->next != &shost->__devices) { + next = list_entry(list->next, struct scsi_device, siblings); + /* skip devices that we can't get a reference to */ + if (!scsi_device_get(next)) + break; + next = NULL; + list = list->next; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + if (prev) + scsi_device_put(prev); + return next; +} +EXPORT_SYMBOL(__scsi_iterate_devices); + +/** + * starget_for_each_device - helper to walk all devices of a target + * @starget: target whose devices we want to iterate over. + * @data: Opaque passed to each function call. + * @fn: Function to call on each device + * + * This traverses over each device of @starget. The devices have + * a reference that must be released by scsi_host_put when breaking + * out of the loop. + */ +void starget_for_each_device(struct scsi_target *starget, void *data, + void (*fn)(struct scsi_device *, void *)) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) { + if ((sdev->channel == starget->channel) && + (sdev->id == starget->id)) + fn(sdev, data); + } +} +EXPORT_SYMBOL(starget_for_each_device); + +/** + * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) + * @starget: target whose devices we want to iterate over. + * @data: parameter for callback @fn() + * @fn: callback function that is invoked for each device + * + * This traverses over each device of @starget. It does _not_ + * take a reference on the scsi_device, so the whole loop must be + * protected by shost->host_lock. + * + * Note: The only reason why drivers would want to use this is because + * they need to access the device list in irq context. Otherwise you + * really want to use starget_for_each_device instead. + **/ +void __starget_for_each_device(struct scsi_target *starget, void *data, + void (*fn)(struct scsi_device *, void *)) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct scsi_device *sdev; + + __shost_for_each_device(sdev, shost) { + if ((sdev->channel == starget->channel) && + (sdev->id == starget->id)) + fn(sdev, data); + } +} +EXPORT_SYMBOL(__starget_for_each_device); + +/** + * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Description: Looks up the scsi_device with the specified @lun for a given + * @starget. The returned scsi_device does not have an additional + * reference. You must hold the host's host_lock over this call and + * any access to the returned scsi_device. A scsi_device in state + * SDEV_DEL is skipped. + * + * Note: The only reason why drivers should use this is because + * they need to access the device list in irq context. Otherwise you + * really want to use scsi_device_lookup_by_target instead. + **/ +struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, + u64 lun) +{ + struct scsi_device *sdev; + + list_for_each_entry(sdev, &starget->devices, same_target_siblings) { + if (sdev->sdev_state == SDEV_DEL) + continue; + if (sdev->lun ==lun) + return sdev; + } + + return NULL; +} +EXPORT_SYMBOL(__scsi_device_lookup_by_target); + +/** + * scsi_device_lookup_by_target - find a device given the target + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Description: Looks up the scsi_device with the specified @lun for a given + * @starget. The returned scsi_device has an additional reference that + * needs to be released with scsi_device_put once you're done with it. + **/ +struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, + u64 lun) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + sdev = __scsi_device_lookup_by_target(starget, lun); + if (sdev && scsi_device_get(sdev)) + sdev = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); + + return sdev; +} +EXPORT_SYMBOL(scsi_device_lookup_by_target); + +/** + * __scsi_device_lookup - find a device given the host (UNLOCKED) + * @shost: SCSI host pointer + * @channel: SCSI channel (zero if only one channel) + * @id: SCSI target number (physical unit number) + * @lun: SCSI Logical Unit Number + * + * Description: Looks up the scsi_device with the specified @channel, @id, @lun + * for a given host. The returned scsi_device does not have an additional + * reference. You must hold the host's host_lock over this call and any access + * to the returned scsi_device. + * + * Note: The only reason why drivers would want to use this is because + * they need to access the device list in irq context. Otherwise you + * really want to use scsi_device_lookup instead. + **/ +struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, + uint channel, uint id, u64 lun) +{ + struct scsi_device *sdev; + + list_for_each_entry(sdev, &shost->__devices, siblings) { + if (sdev->sdev_state == SDEV_DEL) + continue; + if (sdev->channel == channel && sdev->id == id && + sdev->lun ==lun) + return sdev; + } + + return NULL; +} +EXPORT_SYMBOL(__scsi_device_lookup); + +/** + * scsi_device_lookup - find a device given the host + * @shost: SCSI host pointer + * @channel: SCSI channel (zero if only one channel) + * @id: SCSI target number (physical unit number) + * @lun: SCSI Logical Unit Number + * + * Description: Looks up the scsi_device with the specified @channel, @id, @lun + * for a given host. The returned scsi_device has an additional reference that + * needs to be released with scsi_device_put once you're done with it. + **/ +struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, + uint channel, uint id, u64 lun) +{ + struct scsi_device *sdev; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + sdev = __scsi_device_lookup(shost, channel, id, lun); + if (sdev && scsi_device_get(sdev)) + sdev = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); + + return sdev; +} +EXPORT_SYMBOL(scsi_device_lookup); + +MODULE_DESCRIPTION("SCSI core"); +MODULE_LICENSE("GPL"); + +module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); + +static int __init init_scsi(void) +{ + int error; + + error = scsi_init_procfs(); + if (error) + goto cleanup_queue; + error = scsi_init_devinfo(); + if (error) + goto cleanup_procfs; + error = scsi_init_hosts(); + if (error) + goto cleanup_devlist; + error = scsi_init_sysctl(); + if (error) + goto cleanup_hosts; + error = scsi_sysfs_register(); + if (error) + goto cleanup_sysctl; + + scsi_netlink_init(); + + printk(KERN_NOTICE "SCSI subsystem initialized\n"); + return 0; + +cleanup_sysctl: + scsi_exit_sysctl(); +cleanup_hosts: + scsi_exit_hosts(); +cleanup_devlist: + scsi_exit_devinfo(); +cleanup_procfs: + scsi_exit_procfs(); +cleanup_queue: + scsi_exit_queue(); + printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", + -error); + return error; +} + +static void __exit exit_scsi(void) +{ + scsi_netlink_exit(); + scsi_sysfs_unregister(); + scsi_exit_sysctl(); + scsi_exit_hosts(); + scsi_exit_devinfo(); + scsi_exit_procfs(); + scsi_exit_queue(); +} + +subsys_initcall(init_scsi); +module_exit(exit_scsi); diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c new file mode 100644 index 000000000..a9a9ec086 --- /dev/null +++ b/drivers/scsi/scsi_bsg.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include "scsi_priv.h" + +#define uptr64(val) ((void __user *)(uintptr_t)(val)) + +static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, + bool open_for_write, unsigned int timeout) +{ + struct scsi_cmnd *scmd; + struct request *rq; + struct bio *bio; + int ret; + + if (hdr->protocol != BSG_PROTOCOL_SCSI || + hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) + return -EINVAL; + if (hdr->dout_xfer_len && hdr->din_xfer_len) { + pr_warn_once("BIDI support in bsg has been removed.\n"); + return -EOPNOTSUPP; + } + + rq = scsi_alloc_request(q, hdr->dout_xfer_len ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + rq->timeout = timeout; + + scmd = blk_mq_rq_to_pdu(rq); + scmd->cmd_len = hdr->request_len; + if (scmd->cmd_len > sizeof(scmd->cmnd)) { + ret = -EINVAL; + goto out_put_request; + } + + ret = -EFAULT; + if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len)) + goto out_put_request; + ret = -EPERM; + if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) + goto out_put_request; + + ret = 0; + if (hdr->dout_xfer_len) { + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), + hdr->dout_xfer_len, GFP_KERNEL); + } else if (hdr->din_xfer_len) { + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp), + hdr->din_xfer_len, GFP_KERNEL); + } + + if (ret) + goto out_put_request; + + bio = rq->bio; + blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); + + /* + * fill in all the output members + */ + hdr->device_status = scmd->result & 0xff; + hdr->transport_status = host_byte(scmd->result); + hdr->driver_status = 0; + if (scsi_status_is_check_condition(scmd->result)) + hdr->driver_status = DRIVER_SENSE; + hdr->info = 0; + if (hdr->device_status || hdr->transport_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->response_len = 0; + + if (scmd->sense_len && hdr->response) { + int len = min_t(unsigned int, hdr->max_response_len, + scmd->sense_len); + + if (copy_to_user(uptr64(hdr->response), scmd->sense_buffer, + len)) + ret = -EFAULT; + else + hdr->response_len = len; + } + + if (rq_data_dir(rq) == READ) + hdr->din_resid = scmd->resid_len; + else + hdr->dout_resid = scmd->resid_len; + + blk_rq_unmap_user(bio); + +out_put_request: + blk_mq_free_request(rq); + return ret; +} + +struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev) +{ + return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev, + dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn); +} diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c new file mode 100644 index 000000000..9c14fdf61 --- /dev/null +++ b/drivers/scsi/scsi_common.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCSI functions used by both the initiator and the target code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL v2"); + +/* Command group 3 is reserved and should never be used. */ +const unsigned char scsi_command_size_tbl[8] = { + 6, 10, 10, 12, 16, 12, 10, 10 +}; +EXPORT_SYMBOL(scsi_command_size_tbl); + +/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. + * You may not alter any existing entry (although adding new ones is + * encouraged once assigned by ANSI/INCITS T10). + */ +static const char *const scsi_device_types[] = { + "Direct-Access ", + "Sequential-Access", + "Printer ", + "Processor ", + "WORM ", + "CD-ROM ", + "Scanner ", + "Optical Device ", + "Medium Changer ", + "Communications ", + "ASC IT8 ", + "ASC IT8 ", + "RAID ", + "Enclosure ", + "Direct-Access-RBC", + "Optical card ", + "Bridge controller", + "Object storage ", + "Automation/Drive ", + "Security Manager ", + "Direct-Access-ZBC", +}; + +/** + * scsi_device_type - Return 17-char string indicating device type. + * @type: type number to look up + */ +const char *scsi_device_type(unsigned type) +{ + if (type == 0x1e) + return "Well-known LUN "; + if (type == 0x1f) + return "No Device "; + if (type >= ARRAY_SIZE(scsi_device_types)) + return "Unknown "; + return scsi_device_types[type]; +} +EXPORT_SYMBOL(scsi_device_type); + +enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type) +{ + switch (type) { + case SCSI_PR_WRITE_EXCLUSIVE: + return PR_WRITE_EXCLUSIVE; + case SCSI_PR_EXCLUSIVE_ACCESS: + return PR_EXCLUSIVE_ACCESS; + case SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY: + return PR_WRITE_EXCLUSIVE_REG_ONLY; + case SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY: + return PR_EXCLUSIVE_ACCESS_REG_ONLY; + case SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS: + return PR_WRITE_EXCLUSIVE_ALL_REGS; + case SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS: + return PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(scsi_pr_type_to_block); + +enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type) +{ + switch (type) { + case PR_WRITE_EXCLUSIVE: + return SCSI_PR_WRITE_EXCLUSIVE; + case PR_EXCLUSIVE_ACCESS: + return SCSI_PR_EXCLUSIVE_ACCESS; + case PR_WRITE_EXCLUSIVE_REG_ONLY: + return SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY; + case PR_EXCLUSIVE_ACCESS_REG_ONLY: + return SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY; + case PR_WRITE_EXCLUSIVE_ALL_REGS: + return SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS; + case PR_EXCLUSIVE_ACCESS_ALL_REGS: + return SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(block_pr_type_to_scsi); + +/** + * scsilun_to_int - convert a scsi_lun to an int + * @scsilun: struct scsi_lun to be converted. + * + * Description: + * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered + * integer, and return the result. The caller must check for + * truncation before using this function. + * + * Notes: + * For a description of the LUN format, post SCSI-3 see the SCSI + * Architecture Model, for SCSI-3 see the SCSI Controller Commands. + * + * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function + * returns the integer: 0x0b03d204 + * + * This encoding will return a standard integer LUN for LUNs smaller + * than 256, which typically use a single level LUN structure with + * addressing method 0. + */ +u64 scsilun_to_int(struct scsi_lun *scsilun) +{ + int i; + u64 lun; + + lun = 0; + for (i = 0; i < sizeof(lun); i += 2) + lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) | + ((u64)scsilun->scsi_lun[i + 1] << (i * 8))); + return lun; +} +EXPORT_SYMBOL(scsilun_to_int); + +/** + * int_to_scsilun - reverts an int into a scsi_lun + * @lun: integer to be reverted + * @scsilun: struct scsi_lun to be set. + * + * Description: + * Reverts the functionality of the scsilun_to_int, which packed + * an 8-byte lun value into an int. This routine unpacks the int + * back into the lun value. + * + * Notes: + * Given an integer : 0x0b03d204, this function returns a + * struct scsi_lun of: d2 04 0b 03 00 00 00 00 + * + */ +void int_to_scsilun(u64 lun, struct scsi_lun *scsilun) +{ + int i; + + memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun)); + + for (i = 0; i < sizeof(lun); i += 2) { + scsilun->scsi_lun[i] = (lun >> 8) & 0xFF; + scsilun->scsi_lun[i+1] = lun & 0xFF; + lun = lun >> 16; + } +} +EXPORT_SYMBOL(int_to_scsilun); + +/** + * scsi_normalize_sense - normalize main elements from either fixed or + * descriptor sense data format into a common format. + * + * @sense_buffer: byte array containing sense data returned by device + * @sb_len: number of valid bytes in sense_buffer + * @sshdr: pointer to instance of structure that common + * elements are written to. + * + * Notes: + * The "main elements" from sense data are: response_code, sense_key, + * asc, ascq and additional_length (only for descriptor format). + * + * Typically this function can be called after a device has + * responded to a SCSI command with the CHECK_CONDITION status. + * + * Return value: + * true if valid sense data information found, else false; + */ +bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, + struct scsi_sense_hdr *sshdr) +{ + memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); + + if (!sense_buffer || !sb_len) + return false; + + sshdr->response_code = (sense_buffer[0] & 0x7f); + + if (!scsi_sense_valid(sshdr)) + return false; + + if (sshdr->response_code >= 0x72) { + /* + * descriptor format + */ + if (sb_len > 1) + sshdr->sense_key = (sense_buffer[1] & 0xf); + if (sb_len > 2) + sshdr->asc = sense_buffer[2]; + if (sb_len > 3) + sshdr->ascq = sense_buffer[3]; + if (sb_len > 7) + sshdr->additional_length = sense_buffer[7]; + } else { + /* + * fixed format + */ + if (sb_len > 2) + sshdr->sense_key = (sense_buffer[2] & 0xf); + if (sb_len > 7) { + sb_len = min(sb_len, sense_buffer[7] + 8); + if (sb_len > 12) + sshdr->asc = sense_buffer[12]; + if (sb_len > 13) + sshdr->ascq = sense_buffer[13]; + } + } + + return true; +} +EXPORT_SYMBOL(scsi_normalize_sense); + +/** + * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format. + * @sense_buffer: byte array of descriptor format sense data + * @sb_len: number of valid bytes in sense_buffer + * @desc_type: value of descriptor type to find + * (e.g. 0 -> information) + * + * Notes: + * only valid when sense data is in descriptor format + * + * Return value: + * pointer to start of (first) descriptor if found else NULL + */ +const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, + int desc_type) +{ + int add_sen_len, add_len, desc_len, k; + const u8 * descp; + + if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) + return NULL; + if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) + return NULL; + add_sen_len = (add_sen_len < (sb_len - 8)) ? + add_sen_len : (sb_len - 8); + descp = &sense_buffer[8]; + for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { + descp += desc_len; + add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; + desc_len = add_len + 2; + if (descp[0] == desc_type) + return descp; + if (add_len < 0) // short descriptor ?? + break; + } + return NULL; +} +EXPORT_SYMBOL(scsi_sense_desc_find); + +/** + * scsi_build_sense_buffer - build sense data in a buffer + * @desc: Sense format (non-zero == descriptor format, + * 0 == fixed format) + * @buf: Where to build sense data + * @key: Sense key + * @asc: Additional sense code + * @ascq: Additional sense code qualifier + * + **/ +void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq) +{ + if (desc) { + buf[0] = 0x72; /* descriptor, current */ + buf[1] = key; + buf[2] = asc; + buf[3] = ascq; + buf[7] = 0; + } else { + buf[0] = 0x70; /* fixed, current */ + buf[2] = key; + buf[7] = 0xa; + buf[12] = asc; + buf[13] = ascq; + } +} +EXPORT_SYMBOL(scsi_build_sense_buffer); + +/** + * scsi_set_sense_information - set the information field in a + * formatted sense data buffer + * @buf: Where to build sense data + * @buf_len: buffer length + * @info: 64-bit information value to be set + * + * Return value: + * 0 on success or -EINVAL for invalid sense buffer length + **/ +int scsi_set_sense_information(u8 *buf, int buf_len, u64 info) +{ + if ((buf[0] & 0x7f) == 0x72) { + u8 *ucp, len; + + len = buf[7]; + ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0); + if (!ucp) { + buf[7] = len + 0xc; + ucp = buf + 8 + len; + } + + if (buf_len < len + 0xc) + /* Not enough room for info */ + return -EINVAL; + + ucp[0] = 0; + ucp[1] = 0xa; + ucp[2] = 0x80; /* Valid bit */ + ucp[3] = 0; + put_unaligned_be64(info, &ucp[4]); + } else if ((buf[0] & 0x7f) == 0x70) { + /* + * Only set the 'VALID' bit if we can represent the value + * correctly; otherwise just fill out the lower bytes and + * clear the 'VALID' flag. + */ + if (info <= 0xffffffffUL) + buf[0] |= 0x80; + else + buf[0] &= 0x7f; + put_unaligned_be32((u32)info, &buf[3]); + } + + return 0; +} +EXPORT_SYMBOL(scsi_set_sense_information); + +/** + * scsi_set_sense_field_pointer - set the field pointer sense key + * specific information in a formatted sense data buffer + * @buf: Where to build sense data + * @buf_len: buffer length + * @fp: field pointer to be set + * @bp: bit pointer to be set + * @cd: command/data bit + * + * Return value: + * 0 on success or -EINVAL for invalid sense buffer length + */ +int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd) +{ + u8 *ucp, len; + + if ((buf[0] & 0x7f) == 0x72) { + len = buf[7]; + ucp = (char *)scsi_sense_desc_find(buf, len + 8, 2); + if (!ucp) { + buf[7] = len + 8; + ucp = buf + 8 + len; + } + + if (buf_len < len + 8) + /* Not enough room for info */ + return -EINVAL; + + ucp[0] = 2; + ucp[1] = 6; + ucp[4] = 0x80; /* Valid bit */ + if (cd) + ucp[4] |= 0x40; + if (bp < 0x8) + ucp[4] |= 0x8 | bp; + put_unaligned_be16(fp, &ucp[5]); + } else if ((buf[0] & 0x7f) == 0x70) { + len = buf[7]; + if (len < 18) + buf[7] = 18; + + buf[15] = 0x80; + if (cd) + buf[15] |= 0x40; + if (bp < 0x8) + buf[15] |= 0x8 | bp; + put_unaligned_be16(fp, &buf[16]); + } + + return 0; +} +EXPORT_SYMBOL(scsi_set_sense_field_pointer); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c new file mode 100644 index 000000000..9c0af5050 --- /dev/null +++ b/drivers/scsi/scsi_debug.c @@ -0,0 +1,7856 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv + * Copyright (C) 1992 Eric Youngdale + * Simulate a host adapter with 2 disks attached. Do a lot of checking + * to make sure that we are not getting blocks mixed up, and PANIC if + * anything out of the ordinary is seen. + * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + * + * Copyright (C) 2001 - 2021 Douglas Gilbert + * + * For documentation see http://sg.danny.cz/sg/scsi_debug.html + */ + + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sd.h" +#include "scsi_logging.h" + +/* make sure inq_product_rev string corresponds to this version */ +#define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */ +static const char *sdebug_version_date = "20210520"; + +#define MY_NAME "scsi_debug" + +/* Additional Sense Code (ASC) */ +#define NO_ADDITIONAL_SENSE 0x0 +#define LOGICAL_UNIT_NOT_READY 0x4 +#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8 +#define UNRECOVERED_READ_ERR 0x11 +#define PARAMETER_LIST_LENGTH_ERR 0x1a +#define INVALID_OPCODE 0x20 +#define LBA_OUT_OF_RANGE 0x21 +#define INVALID_FIELD_IN_CDB 0x24 +#define INVALID_FIELD_IN_PARAM_LIST 0x26 +#define WRITE_PROTECTED 0x27 +#define UA_RESET_ASC 0x29 +#define UA_CHANGED_ASC 0x2a +#define TARGET_CHANGED_ASC 0x3f +#define LUNS_CHANGED_ASCQ 0x0e +#define INSUFF_RES_ASC 0x55 +#define INSUFF_RES_ASCQ 0x3 +#define POWER_ON_RESET_ASCQ 0x0 +#define POWER_ON_OCCURRED_ASCQ 0x1 +#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */ +#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */ +#define CAPACITY_CHANGED_ASCQ 0x9 +#define SAVING_PARAMS_UNSUP 0x39 +#define TRANSPORT_PROBLEM 0x4b +#define THRESHOLD_EXCEEDED 0x5d +#define LOW_POWER_COND_ON 0x5e +#define MISCOMPARE_VERIFY_ASC 0x1d +#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */ +#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16 +#define WRITE_ERROR_ASC 0xc +#define UNALIGNED_WRITE_ASCQ 0x4 +#define WRITE_BOUNDARY_ASCQ 0x5 +#define READ_INVDATA_ASCQ 0x6 +#define READ_BOUNDARY_ASCQ 0x7 +#define ATTEMPT_ACCESS_GAP 0x9 +#define INSUFF_ZONE_ASCQ 0xe + +/* Additional Sense Code Qualifier (ASCQ) */ +#define ACK_NAK_TO 0x3 + +/* Default values for driver parameters */ +#define DEF_NUM_HOST 1 +#define DEF_NUM_TGTS 1 +#define DEF_MAX_LUNS 1 +/* With these defaults, this driver will make 1 host with 1 target + * (id 0) containing 1 logical unit (lun 0). That is 1 device. + */ +#define DEF_ATO 1 +#define DEF_CDB_LEN 10 +#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */ +#define DEF_DEV_SIZE_PRE_INIT 0 +#define DEF_DEV_SIZE_MB 8 +#define DEF_ZBC_DEV_SIZE_MB 128 +#define DEF_DIF 0 +#define DEF_DIX 0 +#define DEF_PER_HOST_STORE false +#define DEF_D_SENSE 0 +#define DEF_EVERY_NTH 0 +#define DEF_FAKE_RW 0 +#define DEF_GUARD 0 +#define DEF_HOST_LOCK 0 +#define DEF_LBPU 0 +#define DEF_LBPWS 0 +#define DEF_LBPWS10 0 +#define DEF_LBPRZ 1 +#define DEF_LOWEST_ALIGNED 0 +#define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */ +#define DEF_NO_LUN_0 0 +#define DEF_NUM_PARTS 0 +#define DEF_OPTS 0 +#define DEF_OPT_BLKS 1024 +#define DEF_PHYSBLK_EXP 0 +#define DEF_OPT_XFERLEN_EXP 0 +#define DEF_PTYPE TYPE_DISK +#define DEF_RANDOM false +#define DEF_REMOVABLE false +#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */ +#define DEF_SECTOR_SIZE 512 +#define DEF_UNMAP_ALIGNMENT 0 +#define DEF_UNMAP_GRANULARITY 1 +#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF +#define DEF_UNMAP_MAX_DESC 256 +#define DEF_VIRTUAL_GB 0 +#define DEF_VPD_USE_HOSTNO 1 +#define DEF_WRITESAME_LENGTH 0xFFFF +#define DEF_STRICT 0 +#define DEF_STATISTICS false +#define DEF_SUBMIT_QUEUES 1 +#define DEF_TUR_MS_TO_READY 0 +#define DEF_UUID_CTL 0 +#define JDELAY_OVERRIDDEN -9999 + +/* Default parameters for ZBC drives */ +#define DEF_ZBC_ZONE_SIZE_MB 128 +#define DEF_ZBC_MAX_OPEN_ZONES 8 +#define DEF_ZBC_NR_CONV_ZONES 1 + +#define SDEBUG_LUN_0_VAL 0 + +/* bit mask values for sdebug_opts */ +#define SDEBUG_OPT_NOISE 1 +#define SDEBUG_OPT_MEDIUM_ERR 2 +#define SDEBUG_OPT_TIMEOUT 4 +#define SDEBUG_OPT_RECOVERED_ERR 8 +#define SDEBUG_OPT_TRANSPORT_ERR 16 +#define SDEBUG_OPT_DIF_ERR 32 +#define SDEBUG_OPT_DIX_ERR 64 +#define SDEBUG_OPT_MAC_TIMEOUT 128 +#define SDEBUG_OPT_SHORT_TRANSFER 0x100 +#define SDEBUG_OPT_Q_NOISE 0x200 +#define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */ +#define SDEBUG_OPT_RARE_TSF 0x800 +#define SDEBUG_OPT_N_WCE 0x1000 +#define SDEBUG_OPT_RESET_NOISE 0x2000 +#define SDEBUG_OPT_NO_CDB_NOISE 0x4000 +#define SDEBUG_OPT_HOST_BUSY 0x8000 +#define SDEBUG_OPT_CMD_ABORT 0x10000 +#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ + SDEBUG_OPT_RESET_NOISE) +#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ + SDEBUG_OPT_TRANSPORT_ERR | \ + SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ + SDEBUG_OPT_SHORT_TRANSFER | \ + SDEBUG_OPT_HOST_BUSY | \ + SDEBUG_OPT_CMD_ABORT) +#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \ + SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR) + +/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in + * priority order. In the subset implemented here lower numbers have higher + * priority. The UA numbers should be a sequence starting from 0 with + * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */ +#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */ +#define SDEBUG_UA_POOCCUR 1 /* Power on occurred */ +#define SDEBUG_UA_BUS_RESET 2 +#define SDEBUG_UA_MODE_CHANGED 3 +#define SDEBUG_UA_CAPACITY_CHANGED 4 +#define SDEBUG_UA_LUNS_CHANGED 5 +#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */ +#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7 +#define SDEBUG_NUM_UAS 8 + +/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this + * sector on read commands: */ +#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ +#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ + +/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued + * (for response) per submit queue at one time. Can be reduced by max_queue + * option. Command responses are not queued when jdelay=0 and ndelay=0. The + * per-device DEF_CMD_PER_LUN can be changed via sysfs: + * /sys/class/scsi_device//device/queue_depth + * but cannot exceed SDEBUG_CANQUEUE . + */ +#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */ +#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG) +#define DEF_CMD_PER_LUN SDEBUG_CANQUEUE + +/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */ +#define F_D_IN 1 /* Data-in command (e.g. READ) */ +#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */ +#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ +#define F_D_UNKN 8 +#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */ +#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */ +#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */ +#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */ +#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */ +#define F_INV_OP 0x200 /* invalid opcode (not supported) */ +#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */ +#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */ +#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */ +#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */ + +/* Useful combinations of the above flags */ +#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) +#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW) +#define FF_SA (F_SA_HIGH | F_SA_LOW) +#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY) + +#define SDEBUG_MAX_PARTS 4 + +#define SDEBUG_MAX_CMD_LEN 32 + +#define SDEB_XA_NOT_IN_USE XA_MARK_1 + +static struct kmem_cache *queued_cmd_cache; + +#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble) +#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; } + +/* Zone types (zbcr05 table 25) */ +enum sdebug_z_type { + ZBC_ZTYPE_CNV = 0x1, + ZBC_ZTYPE_SWR = 0x2, + ZBC_ZTYPE_SWP = 0x3, + /* ZBC_ZTYPE_SOBR = 0x4, */ + ZBC_ZTYPE_GAP = 0x5, +}; + +/* enumeration names taken from table 26, zbcr05 */ +enum sdebug_z_cond { + ZBC_NOT_WRITE_POINTER = 0x0, + ZC1_EMPTY = 0x1, + ZC2_IMPLICIT_OPEN = 0x2, + ZC3_EXPLICIT_OPEN = 0x3, + ZC4_CLOSED = 0x4, + ZC6_READ_ONLY = 0xd, + ZC5_FULL = 0xe, + ZC7_OFFLINE = 0xf, +}; + +struct sdeb_zone_state { /* ZBC: per zone state */ + enum sdebug_z_type z_type; + enum sdebug_z_cond z_cond; + bool z_non_seq_resource; + unsigned int z_size; + sector_t z_start; + sector_t z_wp; +}; + +struct sdebug_dev_info { + struct list_head dev_list; + unsigned int channel; + unsigned int target; + u64 lun; + uuid_t lu_name; + struct sdebug_host_info *sdbg_host; + unsigned long uas_bm[1]; + atomic_t stopped; /* 1: by SSU, 2: device start */ + bool used; + + /* For ZBC devices */ + enum blk_zoned_model zmodel; + unsigned int zcap; + unsigned int zsize; + unsigned int zsize_shift; + unsigned int nr_zones; + unsigned int nr_conv_zones; + unsigned int nr_seq_zones; + unsigned int nr_imp_open; + unsigned int nr_exp_open; + unsigned int nr_closed; + unsigned int max_open; + ktime_t create_ts; /* time since bootup that this device was created */ + struct sdeb_zone_state *zstate; +}; + +struct sdebug_host_info { + struct list_head host_list; + int si_idx; /* sdeb_store_info (per host) xarray index */ + struct Scsi_Host *shost; + struct device dev; + struct list_head dev_info_list; +}; + +/* There is an xarray of pointers to this struct's objects, one per host */ +struct sdeb_store_info { + rwlock_t macc_lck; /* for atomic media access on this store */ + u8 *storep; /* user data storage (ram) */ + struct t10_pi_tuple *dif_storep; /* protection info */ + void *map_storep; /* provisioning map */ +}; + +#define dev_to_sdebug_host(d) \ + container_of(d, struct sdebug_host_info, dev) + +#define shost_to_sdebug_host(shost) \ + dev_to_sdebug_host(shost->dma_dev) + +enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1, + SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3}; + +struct sdebug_defer { + struct hrtimer hrt; + struct execute_work ew; + ktime_t cmpl_ts;/* time since boot to complete this cmd */ + int issuing_cpu; + bool aborted; /* true when blk_abort_request() already called */ + enum sdeb_defer_type defer_t; +}; + +struct sdebug_queued_cmd { + /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue + * instance indicates this slot is in use. + */ + struct sdebug_defer sd_dp; + struct scsi_cmnd *scmd; +}; + +struct sdebug_scsi_cmd { + spinlock_t lock; +}; + +static atomic_t sdebug_cmnd_count; /* number of incoming commands */ +static atomic_t sdebug_completions; /* count of deferred completions */ +static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */ +static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */ +static atomic_t sdeb_inject_pending; +static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */ + +struct opcode_info_t { + u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */ + /* for terminating element */ + u8 opcode; /* if num_attached > 0, preferred */ + u16 sa; /* service action */ + u32 flags; /* OR-ed set of SDEB_F_* */ + int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); + const struct opcode_info_t *arrp; /* num_attached elements or NULL */ + u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */ + /* 1 to min(cdb_len, 15); ignore cdb[15...] */ +}; + +/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */ +enum sdeb_opcode_index { + SDEB_I_INVALID_OPCODE = 0, + SDEB_I_INQUIRY = 1, + SDEB_I_REPORT_LUNS = 2, + SDEB_I_REQUEST_SENSE = 3, + SDEB_I_TEST_UNIT_READY = 4, + SDEB_I_MODE_SENSE = 5, /* 6, 10 */ + SDEB_I_MODE_SELECT = 6, /* 6, 10 */ + SDEB_I_LOG_SENSE = 7, + SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */ + SDEB_I_READ = 9, /* 6, 10, 12, 16 */ + SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */ + SDEB_I_START_STOP = 11, + SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */ + SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */ + SDEB_I_MAINT_IN = 14, + SDEB_I_MAINT_OUT = 15, + SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */ + SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */ + SDEB_I_RESERVE = 18, /* 6, 10 */ + SDEB_I_RELEASE = 19, /* 6, 10 */ + SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */ + SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */ + SDEB_I_ATA_PT = 22, /* 12, 16 */ + SDEB_I_SEND_DIAG = 23, + SDEB_I_UNMAP = 24, + SDEB_I_WRITE_BUFFER = 25, + SDEB_I_WRITE_SAME = 26, /* 10, 16 */ + SDEB_I_SYNC_CACHE = 27, /* 10, 16 */ + SDEB_I_COMP_WRITE = 28, + SDEB_I_PRE_FETCH = 29, /* 10, 16 */ + SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */ + SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */ + SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */ +}; + + +static const unsigned char opcode_ind_arr[256] = { +/* 0x0; 0x0->0x1f: 6 byte cdbs */ + SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, + 0, 0, 0, 0, + SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0, + 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, + SDEB_I_RELEASE, + 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, + SDEB_I_ALLOW_REMOVAL, 0, +/* 0x20; 0x20->0x3f: 10 byte cdbs */ + 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0, + SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY, + 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0, + 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0, +/* 0x40; 0x40->0x5f: 10 byte cdbs */ + 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0, + 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, + SDEB_I_RELEASE, + 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, +/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, SDEB_I_VARIABLE_LEN, +/* 0x80; 0x80->0x9f: 16 byte cdbs */ + 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0, + SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, + 0, 0, 0, SDEB_I_VERIFY, + SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, + SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0, + 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, +/* 0xa0; 0xa0->0xbf: 12 byte cdbs */ + SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN, + SDEB_I_MAINT_OUT, 0, 0, 0, + SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE, + 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +/* 0xc0; 0xc0->0xff: vendor specific */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* + * The following "response" functions return the SCSI mid-level's 4 byte + * tuple-in-an-int. To handle commands with an IMMED bit, for a faster + * command completion, they can mask their return value with + * SDEG_RES_IMMED_MASK . + */ +#define SDEG_RES_IMMED_MASK 0x40000000 + +static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *); + +static int sdebug_do_add_host(bool mk_new_store); +static int sdebug_add_host_helper(int per_host_idx); +static void sdebug_do_remove_host(bool the_end); +static int sdebug_add_store(void); +static void sdebug_erase_store(int idx, struct sdeb_store_info *sip); +static void sdebug_erase_all_stores(bool apart_from_first); + +static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp); + +/* + * The following are overflow arrays for cdbs that "hit" the same index in + * the opcode_info_arr array. The most time sensitive (or commonly used) cdb + * should be placed in opcode_info_arr[], the others should be placed here. + */ +static const struct opcode_info_t msense_iarr[] = { + {0, 0x1a, 0, F_D_IN, NULL, NULL, + {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t mselect_iarr[] = { + {0, 0x15, 0, F_D_OUT, NULL, NULL, + {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t read_iarr[] = { + {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ + {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, + {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */ + {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ + {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, + 0xc7, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t write_iarr[] = { + {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ + NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, + 0, 0, 0, 0, 0, 0} }, + {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */ + NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0} }, + {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ + NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xbf, 0xc7, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t verify_iarr[] = { + {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ + NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7, + 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t sa_in_16_iarr[] = { + {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, + {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */ +}; + +static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */ + {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, + NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa, + 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */ + {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8, + 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */ +}; + +static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */ + {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, + {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, + 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */ + {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, + {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, + 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */ +}; + +static const struct opcode_info_t write_same_iarr[] = { + {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, + {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */ +}; + +static const struct opcode_info_t reserve_iarr[] = { + {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */ + {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t release_iarr[] = { + {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */ + {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t sync_cache_iarr[] = { + {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, + {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ +}; + +static const struct opcode_info_t pre_fetch_iarr[] = { + {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, + {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */ +}; + +static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */ + {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, + {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */ + {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, + {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */ + {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, + {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */ +}; + +static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ + {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, + {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */ +}; + + +/* This array is accessed via SDEB_I_* values. Make sure all are mapped, + * plus the terminating elements for logic that scans this table such as + * REPORT SUPPORTED OPERATION CODES. */ +static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { +/* 0 */ + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ + {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL, + {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, + 0, 0} }, /* REPORT LUNS */ + {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL, + {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ + {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +/* 5 */ + {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */ + resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0, + 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, + {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */ + resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff, + 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, + {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ + {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, + 0, 0, 0} }, + {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ + {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0, + 0, 0} }, + {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */ + resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, +/* 10 */ + {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO, + resp_write_dt0, write_iarr, /* WRITE(16) */ + {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, + {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ + {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, + resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ + {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} }, + {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */ + {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN, + resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */ + maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, + 0xff, 0, 0xc7, 0, 0, 0, 0} }, +/* 15 */ + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {ARRAY_SIZE(verify_iarr), 0x8f, 0, + F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */ + verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, + {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, + resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */ + {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff, + 0xff, 0xff} }, + {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT, + NULL, reserve_iarr, /* RESERVE(10) */ + {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, + 0} }, + {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT, + NULL, release_iarr, /* RELEASE(10) */ + {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, + 0} }, +/* 20 */ + {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ + {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ + {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */ + {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ + {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, +/* 25 */ + {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, + {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, /* WRITE_BUFFER */ + {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, + resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ + {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, + 0, 0, 0, 0, 0} }, + {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS, + resp_sync_cache, sync_cache_iarr, + {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ + {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, + {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, + 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */ + {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO, + resp_pre_fetch, pre_fetch_iarr, + {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, /* PRE-FETCH (10) */ + +/* 30 */ + {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS, + resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */ + {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} }, + {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS, + resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */ + {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} }, +/* sentinel */ + {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static int sdebug_num_hosts; +static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */ +static int sdebug_ato = DEF_ATO; +static int sdebug_cdb_len = DEF_CDB_LEN; +static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */ +static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT; +static int sdebug_dif = DEF_DIF; +static int sdebug_dix = DEF_DIX; +static int sdebug_dsense = DEF_D_SENSE; +static int sdebug_every_nth = DEF_EVERY_NTH; +static int sdebug_fake_rw = DEF_FAKE_RW; +static unsigned int sdebug_guard = DEF_GUARD; +static int sdebug_host_max_queue; /* per host */ +static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED; +static int sdebug_max_luns = DEF_MAX_LUNS; +static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */ +static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR; +static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM; +static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */ +static int sdebug_no_lun_0 = DEF_NO_LUN_0; +static int sdebug_no_uld; +static int sdebug_num_parts = DEF_NUM_PARTS; +static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */ +static int sdebug_opt_blks = DEF_OPT_BLKS; +static int sdebug_opts = DEF_OPTS; +static int sdebug_physblk_exp = DEF_PHYSBLK_EXP; +static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP; +static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */ +static int sdebug_scsi_level = DEF_SCSI_LEVEL; +static int sdebug_sector_size = DEF_SECTOR_SIZE; +static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY; +static int sdebug_virtual_gb = DEF_VIRTUAL_GB; +static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; +static unsigned int sdebug_lbpu = DEF_LBPU; +static unsigned int sdebug_lbpws = DEF_LBPWS; +static unsigned int sdebug_lbpws10 = DEF_LBPWS10; +static unsigned int sdebug_lbprz = DEF_LBPRZ; +static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT; +static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; +static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; +static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; +static int sdebug_uuid_ctl = DEF_UUID_CTL; +static bool sdebug_random = DEF_RANDOM; +static bool sdebug_per_host_store = DEF_PER_HOST_STORE; +static bool sdebug_removable = DEF_REMOVABLE; +static bool sdebug_clustering; +static bool sdebug_host_lock = DEF_HOST_LOCK; +static bool sdebug_strict = DEF_STRICT; +static bool sdebug_any_injecting_opt; +static bool sdebug_no_rwlock; +static bool sdebug_verbose; +static bool have_dif_prot; +static bool write_since_sync; +static bool sdebug_statistics = DEF_STATISTICS; +static bool sdebug_wp; +/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */ +static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE; +static char *sdeb_zbc_model_s; + +enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0, + SAM_LUN_AM_FLAT = 0x1, + SAM_LUN_AM_LOGICAL_UNIT = 0x2, + SAM_LUN_AM_EXTENDED = 0x3}; +static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL; +static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL; + +static unsigned int sdebug_store_sectors; +static sector_t sdebug_capacity; /* in sectors */ + +/* old BIOS stuff, kernel may get rid of them but some mode sense pages + may still need them */ +static int sdebug_heads; /* heads per disk */ +static int sdebug_cylinders_per; /* cylinders per surface */ +static int sdebug_sectors_per; /* sectors per cylinder */ + +static LIST_HEAD(sdebug_host_list); +static DEFINE_MUTEX(sdebug_host_list_mutex); + +static struct xarray per_store_arr; +static struct xarray *per_store_ap = &per_store_arr; +static int sdeb_first_idx = -1; /* invalid index ==> none created */ +static int sdeb_most_recent_idx = -1; +static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */ + +static unsigned long map_size; +static int num_aborts; +static int num_dev_resets; +static int num_target_resets; +static int num_bus_resets; +static int num_host_resets; +static int dix_writes; +static int dix_reads; +static int dif_errors; + +/* ZBC global data */ +static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */ +static int sdeb_zbc_zone_cap_mb; +static int sdeb_zbc_zone_size_mb; +static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES; +static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES; + +static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */ +static int poll_queues; /* iouring iopoll interface.*/ + +static char sdebug_proc_name[] = MY_NAME; +static const char *my_name = MY_NAME; + +static struct bus_type pseudo_lld_bus; + +static struct device_driver sdebug_driverfs_driver = { + .name = sdebug_proc_name, + .bus = &pseudo_lld_bus, +}; + +static const int check_condition_result = + SAM_STAT_CHECK_CONDITION; + +static const int illegal_condition_result = + (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; + +static const int device_qfull_result = + (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL; + +static const int condition_met_result = SAM_STAT_CONDITION_MET; + + +/* Only do the extra work involved in logical block provisioning if one or + * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing + * real reads and writes (i.e. not skipping them for speed). + */ +static inline bool scsi_debug_lbp(void) +{ + return 0 == sdebug_fake_rw && + (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); +} + +static void *lba2fake_store(struct sdeb_store_info *sip, + unsigned long long lba) +{ + struct sdeb_store_info *lsip = sip; + + lba = do_div(lba, sdebug_store_sectors); + if (!sip || !sip->storep) { + WARN_ON_ONCE(true); + lsip = xa_load(per_store_ap, 0); /* should never be NULL */ + } + return lsip->storep + lba * sdebug_sector_size; +} + +static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip, + sector_t sector) +{ + sector = sector_div(sector, sdebug_store_sectors); + + return sip->dif_storep + sector; +} + +static void sdebug_max_tgts_luns(void) +{ + struct sdebug_host_info *sdbg_host; + struct Scsi_Host *hpnt; + + mutex_lock(&sdebug_host_list_mutex); + list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { + hpnt = sdbg_host->shost; + if ((hpnt->this_id >= 0) && + (sdebug_num_tgts > hpnt->this_id)) + hpnt->max_id = sdebug_num_tgts + 1; + else + hpnt->max_id = sdebug_num_tgts; + /* sdebug_max_luns; */ + hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; + } + mutex_unlock(&sdebug_host_list_mutex); +} + +enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1}; + +/* Set in_bit to -1 to indicate no bit position of invalid field */ +static void mk_sense_invalid_fld(struct scsi_cmnd *scp, + enum sdeb_cmd_data c_d, + int in_byte, int in_bit) +{ + unsigned char *sbuff; + u8 sks[4]; + int sl, asc; + + sbuff = scp->sense_buffer; + if (!sbuff) { + sdev_printk(KERN_ERR, scp->device, + "%s: sense_buffer is NULL\n", __func__); + return; + } + asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; + memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); + scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0); + memset(sks, 0, sizeof(sks)); + sks[0] = 0x80; + if (c_d) + sks[0] |= 0x40; + if (in_bit >= 0) { + sks[0] |= 0x8; + sks[0] |= 0x7 & in_bit; + } + put_unaligned_be16(in_byte, sks + 1); + if (sdebug_dsense) { + sl = sbuff[7] + 8; + sbuff[7] = sl; + sbuff[sl] = 0x2; + sbuff[sl + 1] = 0x6; + memcpy(sbuff + sl + 4, sks, 3); + } else + memcpy(sbuff + 15, sks, 3); + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" + "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n", + my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit); +} + +static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) +{ + if (!scp->sense_buffer) { + sdev_printk(KERN_ERR, scp->device, + "%s: sense_buffer is NULL\n", __func__); + return; + } + memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + + scsi_build_sense(scp, sdebug_dsense, key, asc, asq); + + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", + my_name, key, asc, asq); +} + +static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) +{ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); +} + +static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) +{ + if (sdebug_verbose) { + if (0x1261 == cmd) + sdev_printk(KERN_INFO, dev, + "%s: BLKFLSBUF [0x1261]\n", __func__); + else if (0x5331 == cmd) + sdev_printk(KERN_INFO, dev, + "%s: CDROM_GET_CAPABILITY [0x5331]\n", + __func__); + else + sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n", + __func__, cmd); + } + return -EINVAL; + /* return -ENOTTY; // correct return but upsets fdisk */ +} + +static void config_cdb_len(struct scsi_device *sdev) +{ + switch (sdebug_cdb_len) { + case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */ + sdev->use_10_for_rw = false; + sdev->use_16_for_rw = false; + sdev->use_10_for_ms = false; + break; + case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */ + sdev->use_10_for_rw = true; + sdev->use_16_for_rw = false; + sdev->use_10_for_ms = false; + break; + case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */ + sdev->use_10_for_rw = true; + sdev->use_16_for_rw = false; + sdev->use_10_for_ms = true; + break; + case 16: + sdev->use_10_for_rw = false; + sdev->use_16_for_rw = true; + sdev->use_10_for_ms = true; + break; + case 32: /* No knobs to suggest this so same as 16 for now */ + sdev->use_10_for_rw = false; + sdev->use_16_for_rw = true; + sdev->use_10_for_ms = true; + break; + default: + pr_warn("unexpected cdb_len=%d, force to 10\n", + sdebug_cdb_len); + sdev->use_10_for_rw = true; + sdev->use_16_for_rw = false; + sdev->use_10_for_ms = false; + sdebug_cdb_len = 10; + break; + } +} + +static void all_config_cdb_len(void) +{ + struct sdebug_host_info *sdbg_host; + struct Scsi_Host *shost; + struct scsi_device *sdev; + + mutex_lock(&sdebug_host_list_mutex); + list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { + shost = sdbg_host->shost; + shost_for_each_device(sdev, shost) { + config_cdb_len(sdev); + } + } + mutex_unlock(&sdebug_host_list_mutex); +} + +static void clear_luns_changed_on_target(struct sdebug_dev_info *devip) +{ + struct sdebug_host_info *sdhp = devip->sdbg_host; + struct sdebug_dev_info *dp; + + list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { + if ((devip->sdbg_host == dp->sdbg_host) && + (devip->target == dp->target)) { + clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); + } + } +} + +static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + int k; + + k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); + if (k != SDEBUG_NUM_UAS) { + const char *cp = NULL; + + switch (k) { + case SDEBUG_UA_POR: + mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, + POWER_ON_RESET_ASCQ); + if (sdebug_verbose) + cp = "power on reset"; + break; + case SDEBUG_UA_POOCCUR: + mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, + POWER_ON_OCCURRED_ASCQ); + if (sdebug_verbose) + cp = "power on occurred"; + break; + case SDEBUG_UA_BUS_RESET: + mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, + BUS_RESET_ASCQ); + if (sdebug_verbose) + cp = "bus reset"; + break; + case SDEBUG_UA_MODE_CHANGED: + mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, + MODE_CHANGED_ASCQ); + if (sdebug_verbose) + cp = "mode parameters changed"; + break; + case SDEBUG_UA_CAPACITY_CHANGED: + mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, + CAPACITY_CHANGED_ASCQ); + if (sdebug_verbose) + cp = "capacity data changed"; + break; + case SDEBUG_UA_MICROCODE_CHANGED: + mk_sense_buffer(scp, UNIT_ATTENTION, + TARGET_CHANGED_ASC, + MICROCODE_CHANGED_ASCQ); + if (sdebug_verbose) + cp = "microcode has been changed"; + break; + case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: + mk_sense_buffer(scp, UNIT_ATTENTION, + TARGET_CHANGED_ASC, + MICROCODE_CHANGED_WO_RESET_ASCQ); + if (sdebug_verbose) + cp = "microcode has been changed without reset"; + break; + case SDEBUG_UA_LUNS_CHANGED: + /* + * SPC-3 behavior is to report a UNIT ATTENTION with + * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN + * on the target, until a REPORT LUNS command is + * received. SPC-4 behavior is to report it only once. + * NOTE: sdebug_scsi_level does not use the same + * values as struct scsi_device->scsi_level. + */ + if (sdebug_scsi_level >= 6) /* SPC-4 and above */ + clear_luns_changed_on_target(devip); + mk_sense_buffer(scp, UNIT_ATTENTION, + TARGET_CHANGED_ASC, + LUNS_CHANGED_ASCQ); + if (sdebug_verbose) + cp = "reported luns data has changed"; + break; + default: + pr_warn("unexpected unit attention code=%d\n", k); + if (sdebug_verbose) + cp = "unknown"; + break; + } + clear_bit(k, devip->uas_bm); + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s reports: Unit attention: %s\n", + my_name, cp); + return check_condition_result; + } + return 0; +} + +/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */ +static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, + int arr_len) +{ + int act_len; + struct scsi_data_buffer *sdb = &scp->sdb; + + if (!sdb->length) + return 0; + if (scp->sc_data_direction != DMA_FROM_DEVICE) + return DID_ERROR << 16; + + act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, + arr, arr_len); + scsi_set_resid(scp, scsi_bufflen(scp) - act_len); + + return 0; +} + +/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else + * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple + * calls, not required to write in ascending offset order. Assumes resid + * set to scsi_bufflen() prior to any calls. + */ +static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, + int arr_len, unsigned int off_dst) +{ + unsigned int act_len, n; + struct scsi_data_buffer *sdb = &scp->sdb; + off_t skip = off_dst; + + if (sdb->length <= off_dst) + return 0; + if (scp->sc_data_direction != DMA_FROM_DEVICE) + return DID_ERROR << 16; + + act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, + arr, arr_len, skip); + pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", + __func__, off_dst, scsi_bufflen(scp), act_len, + scsi_get_resid(scp)); + n = scsi_bufflen(scp) - (off_dst + act_len); + scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n)); + return 0; +} + +/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into + * 'arr' or -1 if error. + */ +static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, + int arr_len) +{ + if (!scsi_bufflen(scp)) + return 0; + if (scp->sc_data_direction != DMA_TO_DEVICE) + return -1; + + return scsi_sg_copy_to_buffer(scp, arr, arr_len); +} + + +static char sdebug_inq_vendor_id[9] = "Linux "; +static char sdebug_inq_product_id[17] = "scsi_debug "; +static char sdebug_inq_product_rev[5] = SDEBUG_VERSION; +/* Use some locally assigned NAAs for SAS addresses. */ +static const u64 naa3_comp_a = 0x3222222000000000ULL; +static const u64 naa3_comp_b = 0x3333333000000000ULL; +static const u64 naa3_comp_c = 0x3111111000000000ULL; + +/* Device identification VPD page. Returns number of bytes placed in arr */ +static int inquiry_vpd_83(unsigned char *arr, int port_group_id, + int target_dev_id, int dev_id_num, + const char *dev_id_str, int dev_id_str_len, + const uuid_t *lu_name) +{ + int num, port_a; + char b[32]; + + port_a = target_dev_id + 1; + /* T10 vendor identifier field format (faked) */ + arr[0] = 0x2; /* ASCII */ + arr[1] = 0x1; + arr[2] = 0x0; + memcpy(&arr[4], sdebug_inq_vendor_id, 8); + memcpy(&arr[12], sdebug_inq_product_id, 16); + memcpy(&arr[28], dev_id_str, dev_id_str_len); + num = 8 + 16 + dev_id_str_len; + arr[3] = num; + num += 4; + if (dev_id_num >= 0) { + if (sdebug_uuid_ctl) { + /* Locally assigned UUID */ + arr[num++] = 0x1; /* binary (not necessarily sas) */ + arr[num++] = 0xa; /* PIV=0, lu, naa */ + arr[num++] = 0x0; + arr[num++] = 0x12; + arr[num++] = 0x10; /* uuid type=1, locally assigned */ + arr[num++] = 0x0; + memcpy(arr + num, lu_name, 16); + num += 16; + } else { + /* NAA-3, Logical unit identifier (binary) */ + arr[num++] = 0x1; /* binary (not necessarily sas) */ + arr[num++] = 0x3; /* PIV=0, lu, naa */ + arr[num++] = 0x0; + arr[num++] = 0x8; + put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num); + num += 8; + } + /* Target relative port number */ + arr[num++] = 0x61; /* proto=sas, binary */ + arr[num++] = 0x94; /* PIV=1, target port, rel port */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x4; /* length */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; + arr[num++] = 0x1; /* relative port A */ + } + /* NAA-3, Target port identifier */ + arr[num++] = 0x61; /* proto=sas, binary */ + arr[num++] = 0x93; /* piv=1, target port, naa */ + arr[num++] = 0x0; + arr[num++] = 0x8; + put_unaligned_be64(naa3_comp_a + port_a, arr + num); + num += 8; + /* NAA-3, Target port group identifier */ + arr[num++] = 0x61; /* proto=sas, binary */ + arr[num++] = 0x95; /* piv=1, target port group id */ + arr[num++] = 0x0; + arr[num++] = 0x4; + arr[num++] = 0; + arr[num++] = 0; + put_unaligned_be16(port_group_id, arr + num); + num += 2; + /* NAA-3, Target device identifier */ + arr[num++] = 0x61; /* proto=sas, binary */ + arr[num++] = 0xa3; /* piv=1, target device, naa */ + arr[num++] = 0x0; + arr[num++] = 0x8; + put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num); + num += 8; + /* SCSI name string: Target device identifier */ + arr[num++] = 0x63; /* proto=sas, UTF-8 */ + arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */ + arr[num++] = 0x0; + arr[num++] = 24; + memcpy(arr + num, "naa.32222220", 12); + num += 12; + snprintf(b, sizeof(b), "%08X", target_dev_id); + memcpy(arr + num, b, 8); + num += 8; + memset(arr + num, 0, 4); + num += 4; + return num; +} + +static unsigned char vpd84_data[] = { +/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0, + 0x22,0x22,0x22,0x0,0xbb,0x1, + 0x22,0x22,0x22,0x0,0xbb,0x2, +}; + +/* Software interface identification VPD page */ +static int inquiry_vpd_84(unsigned char *arr) +{ + memcpy(arr, vpd84_data, sizeof(vpd84_data)); + return sizeof(vpd84_data); +} + +/* Management network addresses VPD page */ +static int inquiry_vpd_85(unsigned char *arr) +{ + int num = 0; + const char *na1 = "https://www.kernel.org/config"; + const char *na2 = "http://www.kernel.org/log"; + int plen, olen; + + arr[num++] = 0x1; /* lu, storage config */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; + olen = strlen(na1); + plen = olen + 1; + if (plen % 4) + plen = ((plen / 4) + 1) * 4; + arr[num++] = plen; /* length, null termianted, padded */ + memcpy(arr + num, na1, olen); + memset(arr + num + olen, 0, plen - olen); + num += plen; + + arr[num++] = 0x4; /* lu, logging */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; + olen = strlen(na2); + plen = olen + 1; + if (plen % 4) + plen = ((plen / 4) + 1) * 4; + arr[num++] = plen; /* length, null terminated, padded */ + memcpy(arr + num, na2, olen); + memset(arr + num + olen, 0, plen - olen); + num += plen; + + return num; +} + +/* SCSI ports VPD page */ +static int inquiry_vpd_88(unsigned char *arr, int target_dev_id) +{ + int num = 0; + int port_a, port_b; + + port_a = target_dev_id + 1; + port_b = port_a + 1; + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; + arr[num++] = 0x1; /* relative port 1 (primary) */ + memset(arr + num, 0, 6); + num += 6; + arr[num++] = 0x0; + arr[num++] = 12; /* length tp descriptor */ + /* naa-5 target port identifier (A) */ + arr[num++] = 0x61; /* proto=sas, binary */ + arr[num++] = 0x93; /* PIV=1, target port, NAA */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x8; /* length */ + put_unaligned_be64(naa3_comp_a + port_a, arr + num); + num += 8; + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x0; + arr[num++] = 0x2; /* relative port 2 (secondary) */ + memset(arr + num, 0, 6); + num += 6; + arr[num++] = 0x0; + arr[num++] = 12; /* length tp descriptor */ + /* naa-5 target port identifier (B) */ + arr[num++] = 0x61; /* proto=sas, binary */ + arr[num++] = 0x93; /* PIV=1, target port, NAA */ + arr[num++] = 0x0; /* reserved */ + arr[num++] = 0x8; /* length */ + put_unaligned_be64(naa3_comp_a + port_b, arr + num); + num += 8; + + return num; +} + + +static unsigned char vpd89_data[] = { +/* from 4th byte */ 0,0,0,0, +'l','i','n','u','x',' ',' ',' ', +'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ', +'1','2','3','4', +0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +0xec,0,0,0, +0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0, +0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20, +0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33, +0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31, +0x53,0x41, +0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, +0x20,0x20, +0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, +0x10,0x80, +0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0, +0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0, +0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0, +0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40, +0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0, +0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42, +0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8, +0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe, +0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51, +}; + +/* ATA Information VPD page */ +static int inquiry_vpd_89(unsigned char *arr) +{ + memcpy(arr, vpd89_data, sizeof(vpd89_data)); + return sizeof(vpd89_data); +} + + +static unsigned char vpdb0_data[] = { + /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +}; + +/* Block limits VPD page (SBC-3) */ +static int inquiry_vpd_b0(unsigned char *arr) +{ + unsigned int gran; + + memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); + + /* Optimal transfer length granularity */ + if (sdebug_opt_xferlen_exp != 0 && + sdebug_physblk_exp < sdebug_opt_xferlen_exp) + gran = 1 << sdebug_opt_xferlen_exp; + else + gran = 1 << sdebug_physblk_exp; + put_unaligned_be16(gran, arr + 2); + + /* Maximum Transfer Length */ + if (sdebug_store_sectors > 0x400) + put_unaligned_be32(sdebug_store_sectors, arr + 4); + + /* Optimal Transfer Length */ + put_unaligned_be32(sdebug_opt_blks, &arr[8]); + + if (sdebug_lbpu) { + /* Maximum Unmap LBA Count */ + put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]); + + /* Maximum Unmap Block Descriptor Count */ + put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]); + } + + /* Unmap Granularity Alignment */ + if (sdebug_unmap_alignment) { + put_unaligned_be32(sdebug_unmap_alignment, &arr[28]); + arr[28] |= 0x80; /* UGAVALID */ + } + + /* Optimal Unmap Granularity */ + put_unaligned_be32(sdebug_unmap_granularity, &arr[24]); + + /* Maximum WRITE SAME Length */ + put_unaligned_be64(sdebug_write_same_length, &arr[32]); + + return 0x3c; /* Mandatory page length for Logical Block Provisioning */ +} + +/* Block device characteristics VPD page (SBC-3) */ +static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr) +{ + memset(arr, 0, 0x3c); + arr[0] = 0; + arr[1] = 1; /* non rotating medium (e.g. solid state) */ + arr[2] = 0; + arr[3] = 5; /* less than 1.8" */ + if (devip->zmodel == BLK_ZONED_HA) + arr[4] = 1 << 4; /* zoned field = 01b */ + + return 0x3c; +} + +/* Logical block provisioning VPD page (SBC-4) */ +static int inquiry_vpd_b2(unsigned char *arr) +{ + memset(arr, 0, 0x4); + arr[0] = 0; /* threshold exponent */ + if (sdebug_lbpu) + arr[1] = 1 << 7; + if (sdebug_lbpws) + arr[1] |= 1 << 6; + if (sdebug_lbpws10) + arr[1] |= 1 << 5; + if (sdebug_lbprz && scsi_debug_lbp()) + arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */ + /* anc_sup=0; dp=0 (no provisioning group descriptor) */ + /* minimum_percentage=0; provisioning_type=0 (unknown) */ + /* threshold_percentage=0 */ + return 0x4; +} + +/* Zoned block device characteristics VPD page (ZBC mandatory) */ +static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr) +{ + memset(arr, 0, 0x3c); + arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */ + /* + * Set Optimal number of open sequential write preferred zones and + * Optimal number of non-sequentially written sequential write + * preferred zones fields to 'not reported' (0xffffffff). Leave other + * fields set to zero, apart from Max. number of open swrz_s field. + */ + put_unaligned_be32(0xffffffff, &arr[4]); + put_unaligned_be32(0xffffffff, &arr[8]); + if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open) + put_unaligned_be32(devip->max_open, &arr[12]); + else + put_unaligned_be32(0xffffffff, &arr[12]); + if (devip->zcap < devip->zsize) { + arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET; + put_unaligned_be64(devip->zsize, &arr[20]); + } else { + arr[19] = 0; + } + return 0x3c; +} + +#define SDEBUG_LONG_INQ_SZ 96 +#define SDEBUG_MAX_INQ_ARR_SZ 584 + +static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + unsigned char pq_pdt; + unsigned char *arr; + unsigned char *cmd = scp->cmnd; + u32 alloc_len, n; + int ret; + bool have_wlun, is_disk, is_zbc, is_disk_zbc; + + alloc_len = get_unaligned_be16(cmd + 3); + arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); + if (! arr) + return DID_REQUEUE << 16; + is_disk = (sdebug_ptype == TYPE_DISK); + is_zbc = (devip->zmodel != BLK_ZONED_NONE); + is_disk_zbc = (is_disk || is_zbc); + have_wlun = scsi_is_wlun(scp->device->lun); + if (have_wlun) + pq_pdt = TYPE_WLUN; /* present, wlun */ + else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) + pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ + else + pq_pdt = (sdebug_ptype & 0x1f); + arr[0] = pq_pdt; + if (0x2 & cmd[1]) { /* CMDDT bit set */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); + kfree(arr); + return check_condition_result; + } else if (0x1 & cmd[1]) { /* EVPD bit set */ + int lu_id_num, port_group_id, target_dev_id; + u32 len; + char lu_id_str[6]; + int host_no = devip->sdbg_host->shost->host_no; + + port_group_id = (((host_no + 1) & 0x7f) << 8) + + (devip->channel & 0x7f); + if (sdebug_vpd_use_hostno == 0) + host_no = 0; + lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + + (devip->target * 1000) + devip->lun); + target_dev_id = ((host_no + 1) * 2000) + + (devip->target * 1000) - 3; + len = scnprintf(lu_id_str, 6, "%d", lu_id_num); + if (0 == cmd[2]) { /* supported vital product data pages */ + arr[1] = cmd[2]; /*sanity */ + n = 4; + arr[n++] = 0x0; /* this page */ + arr[n++] = 0x80; /* unit serial number */ + arr[n++] = 0x83; /* device identification */ + arr[n++] = 0x84; /* software interface ident. */ + arr[n++] = 0x85; /* management network addresses */ + arr[n++] = 0x86; /* extended inquiry */ + arr[n++] = 0x87; /* mode page policy */ + arr[n++] = 0x88; /* SCSI ports */ + if (is_disk_zbc) { /* SBC or ZBC */ + arr[n++] = 0x89; /* ATA information */ + arr[n++] = 0xb0; /* Block limits */ + arr[n++] = 0xb1; /* Block characteristics */ + if (is_disk) + arr[n++] = 0xb2; /* LB Provisioning */ + if (is_zbc) + arr[n++] = 0xb6; /* ZB dev. char. */ + } + arr[3] = n - 4; /* number of supported VPD pages */ + } else if (0x80 == cmd[2]) { /* unit serial number */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = len; + memcpy(&arr[4], lu_id_str, len); + } else if (0x83 == cmd[2]) { /* device identification */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_83(&arr[4], port_group_id, + target_dev_id, lu_id_num, + lu_id_str, len, + &devip->lu_name); + } else if (0x84 == cmd[2]) { /* Software interface ident. */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_84(&arr[4]); + } else if (0x85 == cmd[2]) { /* Management network addresses */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_85(&arr[4]); + } else if (0x86 == cmd[2]) { /* extended inquiry */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = 0x3c; /* number of following entries */ + if (sdebug_dif == T10_PI_TYPE3_PROTECTION) + arr[4] = 0x4; /* SPT: GRD_CHK:1 */ + else if (have_dif_prot) + arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ + else + arr[4] = 0x0; /* no protection stuff */ + arr[5] = 0x7; /* head of q, ordered + simple q's */ + } else if (0x87 == cmd[2]) { /* mode page policy */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = 0x8; /* number of following entries */ + arr[4] = 0x2; /* disconnect-reconnect mp */ + arr[6] = 0x80; /* mlus, shared */ + arr[8] = 0x18; /* protocol specific lu */ + arr[10] = 0x82; /* mlus, per initiator port */ + } else if (0x88 == cmd[2]) { /* SCSI Ports */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_88(&arr[4], target_dev_id); + } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */ + arr[1] = cmd[2]; /*sanity */ + n = inquiry_vpd_89(&arr[4]); + put_unaligned_be16(n, arr + 2); + } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_b0(&arr[4]); + } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_b1(devip, &arr[4]); + } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_b2(&arr[4]); + } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_vpd_b6(devip, &arr[4]); + } else { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); + kfree(arr); + return check_condition_result; + } + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); + ret = fill_from_dev_buffer(scp, arr, + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); + kfree(arr); + return ret; + } + /* drops through here for a standard inquiry */ + arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */ + arr[2] = sdebug_scsi_level; + arr[3] = 2; /* response_data_format==2 */ + arr[4] = SDEBUG_LONG_INQ_SZ - 5; + arr[5] = (int)have_dif_prot; /* PROTECT bit */ + if (sdebug_vpd_use_hostno == 0) + arr[5] |= 0x10; /* claim: implicit TPGS */ + arr[6] = 0x10; /* claim: MultiP */ + /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ + arr[7] = 0xa; /* claim: LINKED + CMDQUE */ + memcpy(&arr[8], sdebug_inq_vendor_id, 8); + memcpy(&arr[16], sdebug_inq_product_id, 16); + memcpy(&arr[32], sdebug_inq_product_rev, 4); + /* Use Vendor Specific area to place driver date in ASCII hex */ + memcpy(&arr[36], sdebug_version_date, 8); + /* version descriptors (2 bytes each) follow */ + put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ + put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ + n = 62; + if (is_disk) { /* SBC-4 no version claimed */ + put_unaligned_be16(0x600, arr + n); + n += 2; + } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ + put_unaligned_be16(0x525, arr + n); + n += 2; + } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */ + put_unaligned_be16(0x624, arr + n); + n += 2; + } + put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ + ret = fill_from_dev_buffer(scp, arr, + min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ)); + kfree(arr); + return ret; +} + +/* See resp_iec_m_pg() for how this data is manipulated */ +static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, + 0, 0, 0x0, 0x0}; + +static int resp_requests(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd; + unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */ + bool dsense = !!(cmd[1] & 1); + u32 alloc_len = cmd[4]; + u32 len = 18; + int stopped_state = atomic_read(&devip->stopped); + + memset(arr, 0, sizeof(arr)); + if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */ + if (dsense) { + arr[0] = 0x72; + arr[1] = NOT_READY; + arr[2] = LOGICAL_UNIT_NOT_READY; + arr[3] = (stopped_state == 2) ? 0x1 : 0x2; + len = 8; + } else { + arr[0] = 0x70; + arr[2] = NOT_READY; /* NO_SENSE in sense_key */ + arr[7] = 0xa; /* 18 byte sense buffer */ + arr[12] = LOGICAL_UNIT_NOT_READY; + arr[13] = (stopped_state == 2) ? 0x1 : 0x2; + } + } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { + /* Information exceptions control mode page: TEST=1, MRIE=6 */ + if (dsense) { + arr[0] = 0x72; + arr[1] = 0x0; /* NO_SENSE in sense_key */ + arr[2] = THRESHOLD_EXCEEDED; + arr[3] = 0xff; /* Failure prediction(false) */ + len = 8; + } else { + arr[0] = 0x70; + arr[2] = 0x0; /* NO_SENSE in sense_key */ + arr[7] = 0xa; /* 18 byte sense buffer */ + arr[12] = THRESHOLD_EXCEEDED; + arr[13] = 0xff; /* Failure prediction(false) */ + } + } else { /* nothing to report */ + if (dsense) { + len = 8; + memset(arr, 0, len); + arr[0] = 0x72; + } else { + memset(arr, 0, len); + arr[0] = 0x70; + arr[7] = 0xa; + } + } + return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len)); +} + +static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd; + int power_cond, want_stop, stopped_state; + bool changing; + + power_cond = (cmd[4] & 0xf0) >> 4; + if (power_cond) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7); + return check_condition_result; + } + want_stop = !(cmd[4] & 1); + stopped_state = atomic_read(&devip->stopped); + if (stopped_state == 2) { + ktime_t now_ts = ktime_get_boottime(); + + if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { + u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); + + if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) { + /* tur_ms_to_ready timer extinguished */ + atomic_set(&devip->stopped, 0); + stopped_state = 0; + } + } + if (stopped_state == 2) { + if (want_stop) { + stopped_state = 1; /* dummy up success */ + } else { /* Disallow tur_ms_to_ready delay to be overridden */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */); + return check_condition_result; + } + } + } + changing = (stopped_state != want_stop); + if (changing) + atomic_xchg(&devip->stopped, want_stop); + if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */ + return SDEG_RES_IMMED_MASK; + else + return 0; +} + +static sector_t get_sdebug_capacity(void) +{ + static const unsigned int gibibyte = 1073741824; + + if (sdebug_virtual_gb > 0) + return (sector_t)sdebug_virtual_gb * + (gibibyte / sdebug_sector_size); + else + return sdebug_store_sectors; +} + +#define SDEBUG_READCAP_ARR_SZ 8 +static int resp_readcap(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char arr[SDEBUG_READCAP_ARR_SZ]; + unsigned int capac; + + /* following just in case virtual_gb changed */ + sdebug_capacity = get_sdebug_capacity(); + memset(arr, 0, SDEBUG_READCAP_ARR_SZ); + if (sdebug_capacity < 0xffffffff) { + capac = (unsigned int)sdebug_capacity - 1; + put_unaligned_be32(capac, arr + 0); + } else + put_unaligned_be32(0xffffffff, arr + 0); + put_unaligned_be16(sdebug_sector_size, arr + 6); + return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); +} + +#define SDEBUG_READCAP16_ARR_SZ 32 +static int resp_readcap16(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd; + unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; + u32 alloc_len; + + alloc_len = get_unaligned_be32(cmd + 10); + /* following just in case virtual_gb changed */ + sdebug_capacity = get_sdebug_capacity(); + memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); + put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); + put_unaligned_be32(sdebug_sector_size, arr + 8); + arr[13] = sdebug_physblk_exp & 0xf; + arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f; + + if (scsi_debug_lbp()) { + arr[14] |= 0x80; /* LBPME */ + /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in + * the LB Provisioning VPD page is 3 bits. Note that lbprz=2 + * in the wider field maps to 0 in this field. + */ + if (sdebug_lbprz & 1) /* precisely what the draft requires */ + arr[14] |= 0x40; + } + + /* + * Since the scsi_debug READ CAPACITY implementation always reports the + * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. + */ + if (devip->zmodel == BLK_ZONED_HM) + arr[12] |= 1 << 4; + + arr[15] = sdebug_lowest_aligned & 0xff; + + if (have_dif_prot) { + arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ + arr[12] |= 1; /* PROT_EN */ + } + + return fill_from_dev_buffer(scp, arr, + min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ)); +} + +#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412 + +static int resp_report_tgtpgs(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd; + unsigned char *arr; + int host_no = devip->sdbg_host->shost->host_no; + int port_group_a, port_group_b, port_a, port_b; + u32 alen, n, rlen; + int ret; + + alen = get_unaligned_be32(cmd + 6); + arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); + if (! arr) + return DID_REQUEUE << 16; + /* + * EVPD page 0x88 states we have two ports, one + * real and a fake port with no device connected. + * So we create two port groups with one port each + * and set the group with port B to unavailable. + */ + port_a = 0x1; /* relative port A */ + port_b = 0x2; /* relative port B */ + port_group_a = (((host_no + 1) & 0x7f) << 8) + + (devip->channel & 0x7f); + port_group_b = (((host_no + 1) & 0x7f) << 8) + + (devip->channel & 0x7f) + 0x80; + + /* + * The asymmetric access state is cycled according to the host_id. + */ + n = 4; + if (sdebug_vpd_use_hostno == 0) { + arr[n++] = host_no % 3; /* Asymm access state */ + arr[n++] = 0x0F; /* claim: all states are supported */ + } else { + arr[n++] = 0x0; /* Active/Optimized path */ + arr[n++] = 0x01; /* only support active/optimized paths */ + } + put_unaligned_be16(port_group_a, arr + n); + n += 2; + arr[n++] = 0; /* Reserved */ + arr[n++] = 0; /* Status code */ + arr[n++] = 0; /* Vendor unique */ + arr[n++] = 0x1; /* One port per group */ + arr[n++] = 0; /* Reserved */ + arr[n++] = 0; /* Reserved */ + put_unaligned_be16(port_a, arr + n); + n += 2; + arr[n++] = 3; /* Port unavailable */ + arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ + put_unaligned_be16(port_group_b, arr + n); + n += 2; + arr[n++] = 0; /* Reserved */ + arr[n++] = 0; /* Status code */ + arr[n++] = 0; /* Vendor unique */ + arr[n++] = 0x1; /* One port per group */ + arr[n++] = 0; /* Reserved */ + arr[n++] = 0; /* Reserved */ + put_unaligned_be16(port_b, arr + n); + n += 2; + + rlen = n - 4; + put_unaligned_be32(rlen, arr + 0); + + /* + * Return the smallest value of either + * - The allocated length + * - The constructed command length + * - The maximum array size + */ + rlen = min(alen, n); + ret = fill_from_dev_buffer(scp, arr, + min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ)); + kfree(arr); + return ret; +} + +static int resp_rsup_opcodes(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + bool rctd; + u8 reporting_opts, req_opcode, sdeb_i, supp; + u16 req_sa, u; + u32 alloc_len, a_len; + int k, offset, len, errsts, count, bump, na; + const struct opcode_info_t *oip; + const struct opcode_info_t *r_oip; + u8 *arr; + u8 *cmd = scp->cmnd; + + rctd = !!(cmd[2] & 0x80); + reporting_opts = cmd[2] & 0x7; + req_opcode = cmd[3]; + req_sa = get_unaligned_be16(cmd + 4); + alloc_len = get_unaligned_be32(cmd + 6); + if (alloc_len < 4 || alloc_len > 0xffff) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); + return check_condition_result; + } + if (alloc_len > 8192) + a_len = 8192; + else + a_len = alloc_len; + arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC); + if (NULL == arr) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + switch (reporting_opts) { + case 0: /* all commands */ + /* count number of commands */ + for (count = 0, oip = opcode_info_arr; + oip->num_attached != 0xff; ++oip) { + if (F_INV_OP & oip->flags) + continue; + count += (oip->num_attached + 1); + } + bump = rctd ? 20 : 8; + put_unaligned_be32(count * bump, arr); + for (offset = 4, oip = opcode_info_arr; + oip->num_attached != 0xff && offset < a_len; ++oip) { + if (F_INV_OP & oip->flags) + continue; + na = oip->num_attached; + arr[offset] = oip->opcode; + put_unaligned_be16(oip->sa, arr + offset + 2); + if (rctd) + arr[offset + 5] |= 0x2; + if (FF_SA & oip->flags) + arr[offset + 5] |= 0x1; + put_unaligned_be16(oip->len_mask[0], arr + offset + 6); + if (rctd) + put_unaligned_be16(0xa, arr + offset + 8); + r_oip = oip; + for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { + if (F_INV_OP & oip->flags) + continue; + offset += bump; + arr[offset] = oip->opcode; + put_unaligned_be16(oip->sa, arr + offset + 2); + if (rctd) + arr[offset + 5] |= 0x2; + if (FF_SA & oip->flags) + arr[offset + 5] |= 0x1; + put_unaligned_be16(oip->len_mask[0], + arr + offset + 6); + if (rctd) + put_unaligned_be16(0xa, + arr + offset + 8); + } + oip = r_oip; + offset += bump; + } + break; + case 1: /* one command: opcode only */ + case 2: /* one command: opcode plus service action */ + case 3: /* one command: if sa==0 then opcode only else opcode+sa */ + sdeb_i = opcode_ind_arr[req_opcode]; + oip = &opcode_info_arr[sdeb_i]; + if (F_INV_OP & oip->flags) { + supp = 1; + offset = 4; + } else { + if (1 == reporting_opts) { + if (FF_SA & oip->flags) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, + 2, 2); + kfree(arr); + return check_condition_result; + } + req_sa = 0; + } else if (2 == reporting_opts && + 0 == (FF_SA & oip->flags)) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); + kfree(arr); /* point at requested sa */ + return check_condition_result; + } + if (0 == (FF_SA & oip->flags) && + req_opcode == oip->opcode) + supp = 3; + else if (0 == (FF_SA & oip->flags)) { + na = oip->num_attached; + for (k = 0, oip = oip->arrp; k < na; + ++k, ++oip) { + if (req_opcode == oip->opcode) + break; + } + supp = (k >= na) ? 1 : 3; + } else if (req_sa != oip->sa) { + na = oip->num_attached; + for (k = 0, oip = oip->arrp; k < na; + ++k, ++oip) { + if (req_sa == oip->sa) + break; + } + supp = (k >= na) ? 1 : 3; + } else + supp = 3; + if (3 == supp) { + u = oip->len_mask[0]; + put_unaligned_be16(u, arr + 2); + arr[4] = oip->opcode; + for (k = 1; k < u; ++k) + arr[4 + k] = (k < 16) ? + oip->len_mask[k] : 0xff; + offset = 4 + u; + } else + offset = 4; + } + arr[1] = (rctd ? 0x80 : 0) | supp; + if (rctd) { + put_unaligned_be16(0xa, arr + offset); + offset += 12; + } + break; + default: + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2); + kfree(arr); + return check_condition_result; + } + offset = (offset < a_len) ? offset : a_len; + len = (offset < alloc_len) ? offset : alloc_len; + errsts = fill_from_dev_buffer(scp, arr, len); + kfree(arr); + return errsts; +} + +static int resp_rsup_tmfs(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + bool repd; + u32 alloc_len, len; + u8 arr[16]; + u8 *cmd = scp->cmnd; + + memset(arr, 0, sizeof(arr)); + repd = !!(cmd[2] & 0x80); + alloc_len = get_unaligned_be32(cmd + 6); + if (alloc_len < 4) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); + return check_condition_result; + } + arr[0] = 0xc8; /* ATS | ATSS | LURS */ + arr[1] = 0x1; /* ITNRS */ + if (repd) { + arr[3] = 0xc; + len = 16; + } else + len = 4; + + len = (len < alloc_len) ? len : alloc_len; + return fill_from_dev_buffer(scp, arr, len); +} + +/* <> */ + +static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) +{ /* Read-Write Error Recovery page for mode_sense */ + unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, + 5, 0, 0xff, 0xff}; + + memcpy(p, err_recov_pg, sizeof(err_recov_pg)); + if (1 == pcontrol) + memset(p + 2, 0, sizeof(err_recov_pg) - 2); + return sizeof(err_recov_pg); +} + +static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) +{ /* Disconnect-Reconnect page for mode_sense */ + unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0}; + + memcpy(p, disconnect_pg, sizeof(disconnect_pg)); + if (1 == pcontrol) + memset(p + 2, 0, sizeof(disconnect_pg) - 2); + return sizeof(disconnect_pg); +} + +static int resp_format_pg(unsigned char *p, int pcontrol, int target) +{ /* Format device page for mode_sense */ + unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0x40, 0, 0, 0}; + + memcpy(p, format_pg, sizeof(format_pg)); + put_unaligned_be16(sdebug_sectors_per, p + 10); + put_unaligned_be16(sdebug_sector_size, p + 12); + if (sdebug_removable) + p[20] |= 0x20; /* should agree with INQUIRY */ + if (1 == pcontrol) + memset(p + 2, 0, sizeof(format_pg) - 2); + return sizeof(format_pg); +} + +static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, + 0, 0, 0, 0}; + +static int resp_caching_pg(unsigned char *p, int pcontrol, int target) +{ /* Caching page for mode_sense */ + unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; + + if (SDEBUG_OPT_N_WCE & sdebug_opts) + caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ + memcpy(p, caching_pg, sizeof(caching_pg)); + if (1 == pcontrol) + memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg)); + else if (2 == pcontrol) + memcpy(p, d_caching_pg, sizeof(d_caching_pg)); + return sizeof(caching_pg); +} + +static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, + 0, 0, 0x2, 0x4b}; + +static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target) +{ /* Control mode page for mode_sense */ + unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; + unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, + 0, 0, 0x2, 0x4b}; + + if (sdebug_dsense) + ctrl_m_pg[2] |= 0x4; + else + ctrl_m_pg[2] &= ~0x4; + + if (sdebug_ato) + ctrl_m_pg[5] |= 0x80; /* ATO=1 */ + + memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); + if (1 == pcontrol) + memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg)); + else if (2 == pcontrol) + memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg)); + return sizeof(ctrl_m_pg); +} + + +static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) +{ /* Informational Exceptions control mode page for mode_sense */ + unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, + 0, 0, 0x0, 0x0}; + unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, + 0, 0, 0x0, 0x0}; + + memcpy(p, iec_m_pg, sizeof(iec_m_pg)); + if (1 == pcontrol) + memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg)); + else if (2 == pcontrol) + memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg)); + return sizeof(iec_m_pg); +} + +static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target) +{ /* SAS SSP mode page - short format for mode_sense */ + unsigned char sas_sf_m_pg[] = {0x19, 0x6, + 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0}; + + memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg)); + if (1 == pcontrol) + memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2); + return sizeof(sas_sf_m_pg); +} + + +static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target, + int target_dev_id) +{ /* SAS phy control and discover mode page for mode_sense */ + unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2, + 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0, + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ + 0x2, 0, 0, 0, 0, 0, 0, 0, + 0x88, 0x99, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0, + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ + 0x3, 0, 0, 0, 0, 0, 0, 0, + 0x88, 0x99, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }; + int port_a, port_b; + + put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16); + put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24); + put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64); + put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72); + port_a = target_dev_id + 1; + port_b = port_a + 1; + memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg)); + put_unaligned_be32(port_a, p + 20); + put_unaligned_be32(port_b, p + 48 + 20); + if (1 == pcontrol) + memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); + return sizeof(sas_pcd_m_pg); +} + +static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol) +{ /* SAS SSP shared protocol specific port mode subpage */ + unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }; + + memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg)); + if (1 == pcontrol) + memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4); + return sizeof(sas_sha_m_pg); +} + +#define SDEBUG_MAX_MSENSE_SZ 256 + +static int resp_mode_sense(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int pcontrol, pcode, subpcode, bd_len; + unsigned char dev_spec; + u32 alloc_len, offset, len; + int target_dev_id; + int target = scp->device->id; + unsigned char *ap; + unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; + unsigned char *cmd = scp->cmnd; + bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode; + + dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ + pcontrol = (cmd[2] & 0xc0) >> 6; + pcode = cmd[2] & 0x3f; + subpcode = cmd[3]; + msense_6 = (MODE_SENSE == cmd[0]); + llbaa = msense_6 ? false : !!(cmd[1] & 0x10); + is_disk = (sdebug_ptype == TYPE_DISK); + is_zbc = (devip->zmodel != BLK_ZONED_NONE); + if ((is_disk || is_zbc) && !dbd) + bd_len = llbaa ? 16 : 8; + else + bd_len = 0; + alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); + memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); + if (0x3 == pcontrol) { /* Saving values not supported */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); + return check_condition_result; + } + target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + + (devip->target * 1000) - 3; + /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */ + if (is_disk || is_zbc) { + dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ + if (sdebug_wp) + dev_spec |= 0x80; + } else + dev_spec = 0x0; + if (msense_6) { + arr[2] = dev_spec; + arr[3] = bd_len; + offset = 4; + } else { + arr[3] = dev_spec; + if (16 == bd_len) + arr[4] = 0x1; /* set LONGLBA bit */ + arr[7] = bd_len; /* assume 255 or less */ + offset = 8; + } + ap = arr + offset; + if ((bd_len > 0) && (!sdebug_capacity)) + sdebug_capacity = get_sdebug_capacity(); + + if (8 == bd_len) { + if (sdebug_capacity > 0xfffffffe) + put_unaligned_be32(0xffffffff, ap + 0); + else + put_unaligned_be32(sdebug_capacity, ap + 0); + put_unaligned_be16(sdebug_sector_size, ap + 6); + offset += bd_len; + ap = arr + offset; + } else if (16 == bd_len) { + put_unaligned_be64((u64)sdebug_capacity, ap + 0); + put_unaligned_be32(sdebug_sector_size, ap + 12); + offset += bd_len; + ap = arr + offset; + } + + if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { + /* TODO: Control Extension page */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); + return check_condition_result; + } + bad_pcode = false; + + switch (pcode) { + case 0x1: /* Read-Write error recovery page, direct access */ + len = resp_err_recov_pg(ap, pcontrol, target); + offset += len; + break; + case 0x2: /* Disconnect-Reconnect page, all devices */ + len = resp_disconnect_pg(ap, pcontrol, target); + offset += len; + break; + case 0x3: /* Format device page, direct access */ + if (is_disk) { + len = resp_format_pg(ap, pcontrol, target); + offset += len; + } else + bad_pcode = true; + break; + case 0x8: /* Caching page, direct access */ + if (is_disk || is_zbc) { + len = resp_caching_pg(ap, pcontrol, target); + offset += len; + } else + bad_pcode = true; + break; + case 0xa: /* Control Mode page, all devices */ + len = resp_ctrl_m_pg(ap, pcontrol, target); + offset += len; + break; + case 0x19: /* if spc==1 then sas phy, control+discover */ + if ((subpcode > 0x2) && (subpcode < 0xff)) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); + return check_condition_result; + } + len = 0; + if ((0x0 == subpcode) || (0xff == subpcode)) + len += resp_sas_sf_m_pg(ap + len, pcontrol, target); + if ((0x1 == subpcode) || (0xff == subpcode)) + len += resp_sas_pcd_m_spg(ap + len, pcontrol, target, + target_dev_id); + if ((0x2 == subpcode) || (0xff == subpcode)) + len += resp_sas_sha_m_spg(ap + len, pcontrol); + offset += len; + break; + case 0x1c: /* Informational Exceptions Mode page, all devices */ + len = resp_iec_m_pg(ap, pcontrol, target); + offset += len; + break; + case 0x3f: /* Read all Mode pages */ + if ((0 == subpcode) || (0xff == subpcode)) { + len = resp_err_recov_pg(ap, pcontrol, target); + len += resp_disconnect_pg(ap + len, pcontrol, target); + if (is_disk) { + len += resp_format_pg(ap + len, pcontrol, + target); + len += resp_caching_pg(ap + len, pcontrol, + target); + } else if (is_zbc) { + len += resp_caching_pg(ap + len, pcontrol, + target); + } + len += resp_ctrl_m_pg(ap + len, pcontrol, target); + len += resp_sas_sf_m_pg(ap + len, pcontrol, target); + if (0xff == subpcode) { + len += resp_sas_pcd_m_spg(ap + len, pcontrol, + target, target_dev_id); + len += resp_sas_sha_m_spg(ap + len, pcontrol); + } + len += resp_iec_m_pg(ap + len, pcontrol, target); + offset += len; + } else { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); + return check_condition_result; + } + break; + default: + bad_pcode = true; + break; + } + if (bad_pcode) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); + return check_condition_result; + } + if (msense_6) + arr[0] = offset - 1; + else + put_unaligned_be16((offset - 2), arr + 0); + return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset)); +} + +#define SDEBUG_MAX_MSELECT_SZ 512 + +static int resp_mode_select(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int pf, sp, ps, md_len, bd_len, off, spf, pg_len; + int param_len, res, mpage; + unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; + unsigned char *cmd = scp->cmnd; + int mselect6 = (MODE_SELECT == cmd[0]); + + memset(arr, 0, sizeof(arr)); + pf = cmd[1] & 0x10; + sp = cmd[1] & 0x1; + param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7); + if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); + return check_condition_result; + } + res = fetch_to_dev_buffer(scp, arr, param_len); + if (-1 == res) + return DID_ERROR << 16; + else if (sdebug_verbose && (res < param_len)) + sdev_printk(KERN_INFO, scp->device, + "%s: cdb indicated=%d, IO sent=%d bytes\n", + __func__, param_len, res); + md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); + bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); + off = bd_len + (mselect6 ? 4 : 8); + if (md_len > 2 || off >= res) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); + return check_condition_result; + } + mpage = arr[off] & 0x3f; + ps = !!(arr[off] & 0x80); + if (ps) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7); + return check_condition_result; + } + spf = !!(arr[off] & 0x40); + pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) : + (arr[off + 1] + 2); + if ((pg_len + off) > param_len) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, + PARAMETER_LIST_LENGTH_ERR, 0); + return check_condition_result; + } + switch (mpage) { + case 0x8: /* Caching Mode page */ + if (caching_pg[1] == arr[off + 1]) { + memcpy(caching_pg + 2, arr + off + 2, + sizeof(caching_pg) - 2); + goto set_mode_changed_ua; + } + break; + case 0xa: /* Control Mode page */ + if (ctrl_m_pg[1] == arr[off + 1]) { + memcpy(ctrl_m_pg + 2, arr + off + 2, + sizeof(ctrl_m_pg) - 2); + if (ctrl_m_pg[4] & 0x8) + sdebug_wp = true; + else + sdebug_wp = false; + sdebug_dsense = !!(ctrl_m_pg[2] & 0x4); + goto set_mode_changed_ua; + } + break; + case 0x1c: /* Informational Exceptions Mode page */ + if (iec_m_pg[1] == arr[off + 1]) { + memcpy(iec_m_pg + 2, arr + off + 2, + sizeof(iec_m_pg) - 2); + goto set_mode_changed_ua; + } + break; + default: + break; + } + mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5); + return check_condition_result; +set_mode_changed_ua: + set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); + return 0; +} + +static int resp_temp_l_pg(unsigned char *arr) +{ + unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38, + 0x0, 0x1, 0x3, 0x2, 0x0, 65, + }; + + memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); + return sizeof(temp_l_pg); +} + +static int resp_ie_l_pg(unsigned char *arr) +{ + unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, + }; + + memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); + if (iec_m_pg[2] & 0x4) { /* TEST bit set */ + arr[4] = THRESHOLD_EXCEEDED; + arr[5] = 0xff; + } + return sizeof(ie_l_pg); +} + +static int resp_env_rep_l_spg(unsigned char *arr) +{ + unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8, + 0x0, 40, 72, 0xff, 45, 18, 0, 0, + 0x1, 0x0, 0x23, 0x8, + 0x0, 55, 72, 35, 55, 45, 0, 0, + }; + + memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg)); + return sizeof(env_rep_l_spg); +} + +#define SDEBUG_MAX_LSENSE_SZ 512 + +static int resp_log_sense(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int ppc, sp, pcode, subpcode; + u32 alloc_len, len, n; + unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; + unsigned char *cmd = scp->cmnd; + + memset(arr, 0, sizeof(arr)); + ppc = cmd[1] & 0x2; + sp = cmd[1] & 0x1; + if (ppc || sp) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0); + return check_condition_result; + } + pcode = cmd[2] & 0x3f; + subpcode = cmd[3] & 0xff; + alloc_len = get_unaligned_be16(cmd + 7); + arr[0] = pcode; + if (0 == subpcode) { + switch (pcode) { + case 0x0: /* Supported log pages log page */ + n = 4; + arr[n++] = 0x0; /* this page */ + arr[n++] = 0xd; /* Temperature */ + arr[n++] = 0x2f; /* Informational exceptions */ + arr[3] = n - 4; + break; + case 0xd: /* Temperature log page */ + arr[3] = resp_temp_l_pg(arr + 4); + break; + case 0x2f: /* Informational exceptions log page */ + arr[3] = resp_ie_l_pg(arr + 4); + break; + default: + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); + return check_condition_result; + } + } else if (0xff == subpcode) { + arr[0] |= 0x40; + arr[1] = subpcode; + switch (pcode) { + case 0x0: /* Supported log pages and subpages log page */ + n = 4; + arr[n++] = 0x0; + arr[n++] = 0x0; /* 0,0 page */ + arr[n++] = 0x0; + arr[n++] = 0xff; /* this page */ + arr[n++] = 0xd; + arr[n++] = 0x0; /* Temperature */ + arr[n++] = 0xd; + arr[n++] = 0x1; /* Environment reporting */ + arr[n++] = 0xd; + arr[n++] = 0xff; /* all 0xd subpages */ + arr[n++] = 0x2f; + arr[n++] = 0x0; /* Informational exceptions */ + arr[n++] = 0x2f; + arr[n++] = 0xff; /* all 0x2f subpages */ + arr[3] = n - 4; + break; + case 0xd: /* Temperature subpages */ + n = 4; + arr[n++] = 0xd; + arr[n++] = 0x0; /* Temperature */ + arr[n++] = 0xd; + arr[n++] = 0x1; /* Environment reporting */ + arr[n++] = 0xd; + arr[n++] = 0xff; /* these subpages */ + arr[3] = n - 4; + break; + case 0x2f: /* Informational exceptions subpages */ + n = 4; + arr[n++] = 0x2f; + arr[n++] = 0x0; /* Informational exceptions */ + arr[n++] = 0x2f; + arr[n++] = 0xff; /* these subpages */ + arr[3] = n - 4; + break; + default: + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); + return check_condition_result; + } + } else if (subpcode > 0) { + arr[0] |= 0x40; + arr[1] = subpcode; + if (pcode == 0xd && subpcode == 1) + arr[3] = resp_env_rep_l_spg(arr + 4); + else { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); + return check_condition_result; + } + } else { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); + return check_condition_result; + } + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); + return fill_from_dev_buffer(scp, arr, + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); +} + +static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) +{ + return devip->nr_zones != 0; +} + +static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip, + unsigned long long lba) +{ + u32 zno = lba >> devip->zsize_shift; + struct sdeb_zone_state *zsp; + + if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones) + return &devip->zstate[zno]; + + /* + * If the zone capacity is less than the zone size, adjust for gap + * zones. + */ + zno = 2 * zno - devip->nr_conv_zones; + WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones); + zsp = &devip->zstate[zno]; + if (lba >= zsp->z_start + zsp->z_size) + zsp++; + WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size); + return zsp; +} + +static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp) +{ + return zsp->z_type == ZBC_ZTYPE_CNV; +} + +static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp) +{ + return zsp->z_type == ZBC_ZTYPE_GAP; +} + +static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp) +{ + return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp); +} + +static void zbc_close_zone(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp) +{ + enum sdebug_z_cond zc; + + if (!zbc_zone_is_seq(zsp)) + return; + + zc = zsp->z_cond; + if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)) + return; + + if (zc == ZC2_IMPLICIT_OPEN) + devip->nr_imp_open--; + else + devip->nr_exp_open--; + + if (zsp->z_wp == zsp->z_start) { + zsp->z_cond = ZC1_EMPTY; + } else { + zsp->z_cond = ZC4_CLOSED; + devip->nr_closed++; + } +} + +static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip) +{ + struct sdeb_zone_state *zsp = &devip->zstate[0]; + unsigned int i; + + for (i = 0; i < devip->nr_zones; i++, zsp++) { + if (zsp->z_cond == ZC2_IMPLICIT_OPEN) { + zbc_close_zone(devip, zsp); + return; + } + } +} + +static void zbc_open_zone(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp, bool explicit) +{ + enum sdebug_z_cond zc; + + if (!zbc_zone_is_seq(zsp)) + return; + + zc = zsp->z_cond; + if ((explicit && zc == ZC3_EXPLICIT_OPEN) || + (!explicit && zc == ZC2_IMPLICIT_OPEN)) + return; + + /* Close an implicit open zone if necessary */ + if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN) + zbc_close_zone(devip, zsp); + else if (devip->max_open && + devip->nr_imp_open + devip->nr_exp_open >= devip->max_open) + zbc_close_imp_open_zone(devip); + + if (zsp->z_cond == ZC4_CLOSED) + devip->nr_closed--; + if (explicit) { + zsp->z_cond = ZC3_EXPLICIT_OPEN; + devip->nr_exp_open++; + } else { + zsp->z_cond = ZC2_IMPLICIT_OPEN; + devip->nr_imp_open++; + } +} + +static inline void zbc_set_zone_full(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp) +{ + switch (zsp->z_cond) { + case ZC2_IMPLICIT_OPEN: + devip->nr_imp_open--; + break; + case ZC3_EXPLICIT_OPEN: + devip->nr_exp_open--; + break; + default: + WARN_ONCE(true, "Invalid zone %llu condition %x\n", + zsp->z_start, zsp->z_cond); + break; + } + zsp->z_cond = ZC5_FULL; +} + +static void zbc_inc_wp(struct sdebug_dev_info *devip, + unsigned long long lba, unsigned int num) +{ + struct sdeb_zone_state *zsp = zbc_zone(devip, lba); + unsigned long long n, end, zend = zsp->z_start + zsp->z_size; + + if (!zbc_zone_is_seq(zsp)) + return; + + if (zsp->z_type == ZBC_ZTYPE_SWR) { + zsp->z_wp += num; + if (zsp->z_wp >= zend) + zbc_set_zone_full(devip, zsp); + return; + } + + while (num) { + if (lba != zsp->z_wp) + zsp->z_non_seq_resource = true; + + end = lba + num; + if (end >= zend) { + n = zend - lba; + zsp->z_wp = zend; + } else if (end > zsp->z_wp) { + n = num; + zsp->z_wp = end; + } else { + n = num; + } + if (zsp->z_wp >= zend) + zbc_set_zone_full(devip, zsp); + + num -= n; + lba += n; + if (num) { + zsp++; + zend = zsp->z_start + zsp->z_size; + } + } +} + +static int check_zbc_access_params(struct scsi_cmnd *scp, + unsigned long long lba, unsigned int num, bool write) +{ + struct scsi_device *sdp = scp->device; + struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; + struct sdeb_zone_state *zsp = zbc_zone(devip, lba); + struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1); + + if (!write) { + if (devip->zmodel == BLK_ZONED_HA) + return 0; + /* For host-managed, reads cannot cross zone types boundaries */ + if (zsp->z_type != zsp_end->z_type) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, + LBA_OUT_OF_RANGE, + READ_INVDATA_ASCQ); + return check_condition_result; + } + return 0; + } + + /* Writing into a gap zone is not allowed */ + if (zbc_zone_is_gap(zsp)) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, + ATTEMPT_ACCESS_GAP); + return check_condition_result; + } + + /* No restrictions for writes within conventional zones */ + if (zbc_zone_is_conv(zsp)) { + if (!zbc_zone_is_conv(zsp_end)) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, + LBA_OUT_OF_RANGE, + WRITE_BOUNDARY_ASCQ); + return check_condition_result; + } + return 0; + } + + if (zsp->z_type == ZBC_ZTYPE_SWR) { + /* Writes cannot cross sequential zone boundaries */ + if (zsp_end != zsp) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, + LBA_OUT_OF_RANGE, + WRITE_BOUNDARY_ASCQ); + return check_condition_result; + } + /* Cannot write full zones */ + if (zsp->z_cond == ZC5_FULL) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, + INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + /* Writes must be aligned to the zone WP */ + if (lba != zsp->z_wp) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, + LBA_OUT_OF_RANGE, + UNALIGNED_WRITE_ASCQ); + return check_condition_result; + } + } + + /* Handle implicit open of closed and empty zones */ + if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) { + if (devip->max_open && + devip->nr_exp_open >= devip->max_open) { + mk_sense_buffer(scp, DATA_PROTECT, + INSUFF_RES_ASC, + INSUFF_ZONE_ASCQ); + return check_condition_result; + } + zbc_open_zone(devip, zsp, false); + } + + return 0; +} + +static inline int check_device_access_params + (struct scsi_cmnd *scp, unsigned long long lba, + unsigned int num, bool write) +{ + struct scsi_device *sdp = scp->device; + struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; + + if (lba + num > sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + return check_condition_result; + } + /* transfer length excessive (tie in to block limits VPD page) */ + if (num > sdebug_store_sectors) { + /* needs work to find which cdb byte 'num' comes from */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + if (write && unlikely(sdebug_wp)) { + mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2); + return check_condition_result; + } + if (sdebug_dev_is_zoned(devip)) + return check_zbc_access_params(scp, lba, num, write); + + return 0; +} + +/* + * Note: if BUG_ON() fires it usually indicates a problem with the parser + * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions + * that access any of the "stores" in struct sdeb_store_info should call this + * function with bug_if_fake_rw set to true. + */ +static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip, + bool bug_if_fake_rw) +{ + if (sdebug_fake_rw) { + BUG_ON(bug_if_fake_rw); /* See note above */ + return NULL; + } + return xa_load(per_store_ap, devip->sdbg_host->si_idx); +} + +/* Returns number of bytes copied or -1 if error. */ +static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, + u32 sg_skip, u64 lba, u32 num, bool do_write) +{ + int ret; + u64 block, rest = 0; + enum dma_data_direction dir; + struct scsi_data_buffer *sdb = &scp->sdb; + u8 *fsp; + + if (do_write) { + dir = DMA_TO_DEVICE; + write_since_sync = true; + } else { + dir = DMA_FROM_DEVICE; + } + + if (!sdb->length || !sip) + return 0; + if (scp->sc_data_direction != dir) + return -1; + fsp = sip->storep; + + block = do_div(lba, sdebug_store_sectors); + if (block + num > sdebug_store_sectors) + rest = block + num - sdebug_store_sectors; + + ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, + fsp + (block * sdebug_sector_size), + (num - rest) * sdebug_sector_size, sg_skip, do_write); + if (ret != (num - rest) * sdebug_sector_size) + return ret; + + if (rest) { + ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, + fsp, rest * sdebug_sector_size, + sg_skip + ((num - rest) * sdebug_sector_size), + do_write); + } + + return ret; +} + +/* Returns number of bytes copied or -1 if error. */ +static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp) +{ + struct scsi_data_buffer *sdb = &scp->sdb; + + if (!sdb->length) + return 0; + if (scp->sc_data_direction != DMA_TO_DEVICE) + return -1; + return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp, + num * sdebug_sector_size, 0, true); +} + +/* If sip->storep+lba compares equal to arr(num), then copy top half of + * arr into sip->storep+lba and return true. If comparison fails then + * return false. */ +static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num, + const u8 *arr, bool compare_only) +{ + bool res; + u64 block, rest = 0; + u32 store_blks = sdebug_store_sectors; + u32 lb_size = sdebug_sector_size; + u8 *fsp = sip->storep; + + block = do_div(lba, store_blks); + if (block + num > store_blks) + rest = block + num - store_blks; + + res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size); + if (!res) + return res; + if (rest) + res = memcmp(fsp, arr + ((num - rest) * lb_size), + rest * lb_size); + if (!res) + return res; + if (compare_only) + return true; + arr += num * lb_size; + memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size); + if (rest) + memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size); + return res; +} + +static __be16 dif_compute_csum(const void *buf, int len) +{ + __be16 csum; + + if (sdebug_guard) + csum = (__force __be16)ip_compute_csum(buf, len); + else + csum = cpu_to_be16(crc_t10dif(buf, len)); + + return csum; +} + +static int dif_verify(struct t10_pi_tuple *sdt, const void *data, + sector_t sector, u32 ei_lba) +{ + __be16 csum = dif_compute_csum(data, sdebug_sector_size); + + if (sdt->guard_tag != csum) { + pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", + (unsigned long)sector, + be16_to_cpu(sdt->guard_tag), + be16_to_cpu(csum)); + return 0x01; + } + if (sdebug_dif == T10_PI_TYPE1_PROTECTION && + be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { + pr_err("REF check failed on sector %lu\n", + (unsigned long)sector); + return 0x03; + } + if (sdebug_dif == T10_PI_TYPE2_PROTECTION && + be32_to_cpu(sdt->ref_tag) != ei_lba) { + pr_err("REF check failed on sector %lu\n", + (unsigned long)sector); + return 0x03; + } + return 0; +} + +static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector, + unsigned int sectors, bool read) +{ + size_t resid; + void *paddr; + struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *) + scp->device->hostdata, true); + struct t10_pi_tuple *dif_storep = sip->dif_storep; + const void *dif_store_end = dif_storep + sdebug_store_sectors; + struct sg_mapping_iter miter; + + /* Bytes of protection data to copy into sgl */ + resid = sectors * sizeof(*dif_storep); + + sg_miter_start(&miter, scsi_prot_sglist(scp), + scsi_prot_sg_count(scp), SG_MITER_ATOMIC | + (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG)); + + while (sg_miter_next(&miter) && resid > 0) { + size_t len = min_t(size_t, miter.length, resid); + void *start = dif_store(sip, sector); + size_t rest = 0; + + if (dif_store_end < start + len) + rest = start + len - dif_store_end; + + paddr = miter.addr; + + if (read) + memcpy(paddr, start, len - rest); + else + memcpy(start, paddr, len - rest); + + if (rest) { + if (read) + memcpy(paddr + len - rest, dif_storep, rest); + else + memcpy(dif_storep, paddr + len - rest, rest); + } + + sector += len / sizeof(*dif_storep); + resid -= len; + } + sg_miter_stop(&miter); +} + +static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec, + unsigned int sectors, u32 ei_lba) +{ + int ret = 0; + unsigned int i; + sector_t sector; + struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *) + scp->device->hostdata, true); + struct t10_pi_tuple *sdt; + + for (i = 0; i < sectors; i++, ei_lba++) { + sector = start_sec + i; + sdt = dif_store(sip, sector); + + if (sdt->app_tag == cpu_to_be16(0xffff)) + continue; + + /* + * Because scsi_debug acts as both initiator and + * target we proceed to verify the PI even if + * RDPROTECT=3. This is done so the "initiator" knows + * which type of error to return. Otherwise we would + * have to iterate over the PI twice. + */ + if (scp->cmnd[1] >> 5) { /* RDPROTECT */ + ret = dif_verify(sdt, lba2fake_store(sip, sector), + sector, ei_lba); + if (ret) { + dif_errors++; + break; + } + } + } + + dif_copy_prot(scp, start_sec, sectors, true); + dix_reads++; + + return ret; +} + +static inline void +sdeb_read_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + read_lock(&sip->macc_lck); + else + read_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_read_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + read_unlock(&sip->macc_lck); + else + read_unlock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_write_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + write_lock(&sip->macc_lck); + else + write_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_write_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + write_unlock(&sip->macc_lck); + else + write_unlock(&sdeb_fake_rw_lck); + } +} + +static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + bool check_prot; + u32 num; + u32 ei_lba; + int ret; + u64 lba; + struct sdeb_store_info *sip = devip2sip(devip, true); + u8 *cmd = scp->cmnd; + + switch (cmd[0]) { + case READ_16: + ei_lba = 0; + lba = get_unaligned_be64(cmd + 2); + num = get_unaligned_be32(cmd + 10); + check_prot = true; + break; + case READ_10: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = true; + break; + case READ_6: + ei_lba = 0; + lba = (u32)cmd[3] | (u32)cmd[2] << 8 | + (u32)(cmd[1] & 0x1f) << 16; + num = (0 == cmd[4]) ? 256 : cmd[4]; + check_prot = true; + break; + case READ_12: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be32(cmd + 6); + check_prot = true; + break; + case XDWRITEREAD_10: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = false; + break; + default: /* assume READ(32) */ + lba = get_unaligned_be64(cmd + 12); + ei_lba = get_unaligned_be32(cmd + 20); + num = get_unaligned_be32(cmd + 28); + check_prot = false; + break; + } + if (unlikely(have_dif_prot && check_prot)) { + if (sdebug_dif == T10_PI_TYPE2_PROTECTION && + (cmd[1] & 0xe0)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || + sdebug_dif == T10_PI_TYPE3_PROTECTION) && + (cmd[1] & 0xe0) == 0) + sdev_printk(KERN_ERR, scp->device, "Unprotected RD " + "to DIF device\n"); + } + if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) && + atomic_read(&sdeb_inject_pending))) { + num /= 2; + atomic_set(&sdeb_inject_pending, 0); + } + + ret = check_device_access_params(scp, lba, num, false); + if (ret) + return ret; + if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) && + (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) && + ((lba + num) > sdebug_medium_error_start))) { + /* claim unrecoverable read error */ + mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); + /* set info field and valid bit for fixed descriptor */ + if (0x70 == (scp->sense_buffer[0] & 0x7f)) { + scp->sense_buffer[0] |= 0x80; /* Valid bit */ + ret = (lba < OPT_MEDIUM_ERR_ADDR) + ? OPT_MEDIUM_ERR_ADDR : (int)lba; + put_unaligned_be32(ret, scp->sense_buffer + 3); + } + scsi_set_resid(scp, scsi_bufflen(scp)); + return check_condition_result; + } + + sdeb_read_lock(sip); + + /* DIX + T10 DIF */ + if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { + switch (prot_verify_read(scp, lba, num, ei_lba)) { + case 1: /* Guard tag error */ + if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ + sdeb_read_unlock(sip); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + return check_condition_result; + } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { + sdeb_read_unlock(sip); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + return illegal_condition_result; + } + break; + case 3: /* Reference tag error */ + if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ + sdeb_read_unlock(sip); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); + return check_condition_result; + } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { + sdeb_read_unlock(sip); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); + return illegal_condition_result; + } + break; + } + } + + ret = do_device_access(sip, scp, 0, lba, num, false); + sdeb_read_unlock(sip); + if (unlikely(ret == -1)) + return DID_ERROR << 16; + + scsi_set_resid(scp, scsi_bufflen(scp) - ret); + + if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) && + atomic_read(&sdeb_inject_pending))) { + if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) { + mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); + atomic_set(&sdeb_inject_pending, 0); + return check_condition_result; + } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) { + /* Logical block guard check failed */ + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + atomic_set(&sdeb_inject_pending, 0); + return illegal_condition_result; + } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + atomic_set(&sdeb_inject_pending, 0); + return illegal_condition_result; + } + } + return 0; +} + +static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, + unsigned int sectors, u32 ei_lba) +{ + int ret; + struct t10_pi_tuple *sdt; + void *daddr; + sector_t sector = start_sec; + int ppage_offset; + int dpage_offset; + struct sg_mapping_iter diter; + struct sg_mapping_iter piter; + + BUG_ON(scsi_sg_count(SCpnt) == 0); + BUG_ON(scsi_prot_sg_count(SCpnt) == 0); + + sg_miter_start(&piter, scsi_prot_sglist(SCpnt), + scsi_prot_sg_count(SCpnt), + SG_MITER_ATOMIC | SG_MITER_FROM_SG); + sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt), + SG_MITER_ATOMIC | SG_MITER_FROM_SG); + + /* For each protection page */ + while (sg_miter_next(&piter)) { + dpage_offset = 0; + if (WARN_ON(!sg_miter_next(&diter))) { + ret = 0x01; + goto out; + } + + for (ppage_offset = 0; ppage_offset < piter.length; + ppage_offset += sizeof(struct t10_pi_tuple)) { + /* If we're at the end of the current + * data page advance to the next one + */ + if (dpage_offset >= diter.length) { + if (WARN_ON(!sg_miter_next(&diter))) { + ret = 0x01; + goto out; + } + dpage_offset = 0; + } + + sdt = piter.addr + ppage_offset; + daddr = diter.addr + dpage_offset; + + if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */ + ret = dif_verify(sdt, daddr, sector, ei_lba); + if (ret) + goto out; + } + + sector++; + ei_lba++; + dpage_offset += sdebug_sector_size; + } + diter.consumed = dpage_offset; + sg_miter_stop(&diter); + } + sg_miter_stop(&piter); + + dif_copy_prot(SCpnt, start_sec, sectors, false); + dix_writes++; + + return 0; + +out: + dif_errors++; + sg_miter_stop(&diter); + sg_miter_stop(&piter); + return ret; +} + +static unsigned long lba_to_map_index(sector_t lba) +{ + if (sdebug_unmap_alignment) + lba += sdebug_unmap_granularity - sdebug_unmap_alignment; + sector_div(lba, sdebug_unmap_granularity); + return lba; +} + +static sector_t map_index_to_lba(unsigned long index) +{ + sector_t lba = index * sdebug_unmap_granularity; + + if (sdebug_unmap_alignment) + lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; + return lba; +} + +static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba, + unsigned int *num) +{ + sector_t end; + unsigned int mapped; + unsigned long index; + unsigned long next; + + index = lba_to_map_index(lba); + mapped = test_bit(index, sip->map_storep); + + if (mapped) + next = find_next_zero_bit(sip->map_storep, map_size, index); + else + next = find_next_bit(sip->map_storep, map_size, index); + + end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); + *num = end - lba; + return mapped; +} + +static void map_region(struct sdeb_store_info *sip, sector_t lba, + unsigned int len) +{ + sector_t end = lba + len; + + while (lba < end) { + unsigned long index = lba_to_map_index(lba); + + if (index < map_size) + set_bit(index, sip->map_storep); + + lba = map_index_to_lba(index + 1); + } +} + +static void unmap_region(struct sdeb_store_info *sip, sector_t lba, + unsigned int len) +{ + sector_t end = lba + len; + u8 *fsp = sip->storep; + + while (lba < end) { + unsigned long index = lba_to_map_index(lba); + + if (lba == map_index_to_lba(index) && + lba + sdebug_unmap_granularity <= end && + index < map_size) { + clear_bit(index, sip->map_storep); + if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */ + memset(fsp + lba * sdebug_sector_size, + (sdebug_lbprz & 1) ? 0 : 0xff, + sdebug_sector_size * + sdebug_unmap_granularity); + } + if (sip->dif_storep) { + memset(sip->dif_storep + lba, 0xff, + sizeof(*sip->dif_storep) * + sdebug_unmap_granularity); + } + } + lba = map_index_to_lba(index + 1); + } +} + +static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + bool check_prot; + u32 num; + u32 ei_lba; + int ret; + u64 lba; + struct sdeb_store_info *sip = devip2sip(devip, true); + u8 *cmd = scp->cmnd; + + switch (cmd[0]) { + case WRITE_16: + ei_lba = 0; + lba = get_unaligned_be64(cmd + 2); + num = get_unaligned_be32(cmd + 10); + check_prot = true; + break; + case WRITE_10: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = true; + break; + case WRITE_6: + ei_lba = 0; + lba = (u32)cmd[3] | (u32)cmd[2] << 8 | + (u32)(cmd[1] & 0x1f) << 16; + num = (0 == cmd[4]) ? 256 : cmd[4]; + check_prot = true; + break; + case WRITE_12: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be32(cmd + 6); + check_prot = true; + break; + case 0x53: /* XDWRITEREAD(10) */ + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = false; + break; + default: /* assume WRITE(32) */ + lba = get_unaligned_be64(cmd + 12); + ei_lba = get_unaligned_be32(cmd + 20); + num = get_unaligned_be32(cmd + 28); + check_prot = false; + break; + } + if (unlikely(have_dif_prot && check_prot)) { + if (sdebug_dif == T10_PI_TYPE2_PROTECTION && + (cmd[1] & 0xe0)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || + sdebug_dif == T10_PI_TYPE3_PROTECTION) && + (cmd[1] & 0xe0) == 0) + sdev_printk(KERN_ERR, scp->device, "Unprotected WR " + "to DIF device\n"); + } + + sdeb_write_lock(sip); + ret = check_device_access_params(scp, lba, num, true); + if (ret) { + sdeb_write_unlock(sip); + return ret; + } + + /* DIX + T10 DIF */ + if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { + switch (prot_verify_write(scp, lba, num, ei_lba)) { + case 1: /* Guard tag error */ + if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { + sdeb_write_unlock(sip); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + return illegal_condition_result; + } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ + sdeb_write_unlock(sip); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + return check_condition_result; + } + break; + case 3: /* Reference tag error */ + if (scp->prot_flags & SCSI_PROT_REF_CHECK) { + sdeb_write_unlock(sip); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); + return illegal_condition_result; + } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ + sdeb_write_unlock(sip); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); + return check_condition_result; + } + break; + } + } + + ret = do_device_access(sip, scp, 0, lba, num, true); + if (unlikely(scsi_debug_lbp())) + map_region(sip, lba, num); + /* If ZBC zone then bump its write pointer */ + if (sdebug_dev_is_zoned(devip)) + zbc_inc_wp(devip, lba, num); + sdeb_write_unlock(sip); + if (unlikely(-1 == ret)) + return DID_ERROR << 16; + else if (unlikely(sdebug_verbose && + (ret < (num * sdebug_sector_size)))) + sdev_printk(KERN_INFO, scp->device, + "%s: write: cdb indicated=%u, IO sent=%d bytes\n", + my_name, num * sdebug_sector_size, ret); + + if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) && + atomic_read(&sdeb_inject_pending))) { + if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) { + mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); + atomic_set(&sdeb_inject_pending, 0); + return check_condition_result; + } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) { + /* Logical block guard check failed */ + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + atomic_set(&sdeb_inject_pending, 0); + return illegal_condition_result; + } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + atomic_set(&sdeb_inject_pending, 0); + return illegal_condition_result; + } + } + return 0; +} + +/* + * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32). + * No READ GATHERED yet (requires bidi or long cdb holding gather list). + */ +static int resp_write_scat(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u8 *lrdp = NULL; + u8 *up; + struct sdeb_store_info *sip = devip2sip(devip, true); + u8 wrprotect; + u16 lbdof, num_lrd, k; + u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb; + u32 lb_size = sdebug_sector_size; + u32 ei_lba; + u64 lba; + int ret, res; + bool is_16; + static const u32 lrd_size = 32; /* + parameter list header size */ + + if (cmd[0] == VARIABLE_LENGTH_CMD) { + is_16 = false; + wrprotect = (cmd[10] >> 5) & 0x7; + lbdof = get_unaligned_be16(cmd + 12); + num_lrd = get_unaligned_be16(cmd + 16); + bt_len = get_unaligned_be32(cmd + 28); + } else { /* that leaves WRITE SCATTERED(16) */ + is_16 = true; + wrprotect = (cmd[2] >> 5) & 0x7; + lbdof = get_unaligned_be16(cmd + 4); + num_lrd = get_unaligned_be16(cmd + 8); + bt_len = get_unaligned_be32(cmd + 10); + if (unlikely(have_dif_prot)) { + if (sdebug_dif == T10_PI_TYPE2_PROTECTION && + wrprotect) { + mk_sense_invalid_opcode(scp); + return illegal_condition_result; + } + if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || + sdebug_dif == T10_PI_TYPE3_PROTECTION) && + wrprotect == 0) + sdev_printk(KERN_ERR, scp->device, + "Unprotected WR to DIF device\n"); + } + } + if ((num_lrd == 0) || (bt_len == 0)) + return 0; /* T10 says these do-nothings are not errors */ + if (lbdof == 0) { + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s: %s: LB Data Offset field bad\n", + my_name, __func__); + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return illegal_condition_result; + } + lbdof_blen = lbdof * lb_size; + if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) { + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s: %s: LBA range descriptors don't fit\n", + my_name, __func__); + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return illegal_condition_result; + } + lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN); + if (lrdp == NULL) + return SCSI_MLQUEUE_HOST_BUSY; + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n", + my_name, __func__, lbdof_blen); + res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen); + if (res == -1) { + ret = DID_ERROR << 16; + goto err_out; + } + + sdeb_write_lock(sip); + sg_off = lbdof_blen; + /* Spec says Buffer xfer Length field in number of LBs in dout */ + cum_lb = 0; + for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) { + lba = get_unaligned_be64(up + 0); + num = get_unaligned_be32(up + 8); + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n", + my_name, __func__, k, lba, num, sg_off); + if (num == 0) + continue; + ret = check_device_access_params(scp, lba, num, true); + if (ret) + goto err_out_unlock; + num_by = num * lb_size; + ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12); + + if ((cum_lb + num) > bt_len) { + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, + "%s: %s: sum of blocks > data provided\n", + my_name, __func__); + mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC, + 0); + ret = illegal_condition_result; + goto err_out_unlock; + } + + /* DIX + T10 DIF */ + if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { + int prot_ret = prot_verify_write(scp, lba, num, + ei_lba); + + if (prot_ret) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, + prot_ret); + ret = illegal_condition_result; + goto err_out_unlock; + } + } + + ret = do_device_access(sip, scp, sg_off, lba, num, true); + /* If ZBC zone then bump its write pointer */ + if (sdebug_dev_is_zoned(devip)) + zbc_inc_wp(devip, lba, num); + if (unlikely(scsi_debug_lbp())) + map_region(sip, lba, num); + if (unlikely(-1 == ret)) { + ret = DID_ERROR << 16; + goto err_out_unlock; + } else if (unlikely(sdebug_verbose && (ret < num_by))) + sdev_printk(KERN_INFO, scp->device, + "%s: write: cdb indicated=%u, IO sent=%d bytes\n", + my_name, num_by, ret); + + if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) && + atomic_read(&sdeb_inject_pending))) { + if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) { + mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); + atomic_set(&sdeb_inject_pending, 0); + ret = check_condition_result; + goto err_out_unlock; + } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) { + /* Logical block guard check failed */ + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + atomic_set(&sdeb_inject_pending, 0); + ret = illegal_condition_result; + goto err_out_unlock; + } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + atomic_set(&sdeb_inject_pending, 0); + ret = illegal_condition_result; + goto err_out_unlock; + } + } + sg_off += num_by; + cum_lb += num; + } + ret = 0; +err_out_unlock: + sdeb_write_unlock(sip); +err_out: + kfree(lrdp); + return ret; +} + +static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, + u32 ei_lba, bool unmap, bool ndob) +{ + struct scsi_device *sdp = scp->device; + struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; + unsigned long long i; + u64 block, lbaa; + u32 lb_size = sdebug_sector_size; + int ret; + struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *) + scp->device->hostdata, true); + u8 *fs1p; + u8 *fsp; + + sdeb_write_lock(sip); + + ret = check_device_access_params(scp, lba, num, true); + if (ret) { + sdeb_write_unlock(sip); + return ret; + } + + if (unmap && scsi_debug_lbp()) { + unmap_region(sip, lba, num); + goto out; + } + lbaa = lba; + block = do_div(lbaa, sdebug_store_sectors); + /* if ndob then zero 1 logical block, else fetch 1 logical block */ + fsp = sip->storep; + fs1p = fsp + (block * lb_size); + if (ndob) { + memset(fs1p, 0, lb_size); + ret = 0; + } else + ret = fetch_to_dev_buffer(scp, fs1p, lb_size); + + if (-1 == ret) { + sdeb_write_unlock(sip); + return DID_ERROR << 16; + } else if (sdebug_verbose && !ndob && (ret < lb_size)) + sdev_printk(KERN_INFO, scp->device, + "%s: %s: lb size=%u, IO sent=%d bytes\n", + my_name, "write same", lb_size, ret); + + /* Copy first sector to remaining blocks */ + for (i = 1 ; i < num ; i++) { + lbaa = lba + i; + block = do_div(lbaa, sdebug_store_sectors); + memmove(fsp + (block * lb_size), fs1p, lb_size); + } + if (scsi_debug_lbp()) + map_region(sip, lba, num); + /* If ZBC zone then bump its write pointer */ + if (sdebug_dev_is_zoned(devip)) + zbc_inc_wp(devip, lba, num); +out: + sdeb_write_unlock(sip); + + return 0; +} + +static int resp_write_same_10(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u32 lba; + u16 num; + u32 ei_lba = 0; + bool unmap = false; + + if (cmd[1] & 0x8) { + if (sdebug_lbpws10 == 0) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); + return check_condition_result; + } else + unmap = true; + } + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + if (num > sdebug_write_same_length) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); + return check_condition_result; + } + return resp_write_same(scp, lba, num, ei_lba, unmap, false); +} + +static int resp_write_same_16(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u64 lba; + u32 num; + u32 ei_lba = 0; + bool unmap = false; + bool ndob = false; + + if (cmd[1] & 0x8) { /* UNMAP */ + if (sdebug_lbpws == 0) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); + return check_condition_result; + } else + unmap = true; + } + if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */ + ndob = true; + lba = get_unaligned_be64(cmd + 2); + num = get_unaligned_be32(cmd + 10); + if (num > sdebug_write_same_length) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); + return check_condition_result; + } + return resp_write_same(scp, lba, num, ei_lba, unmap, ndob); +} + +/* Note the mode field is in the same position as the (lower) service action + * field. For the Report supported operation codes command, SPC-4 suggests + * each mode of this command should be reported separately; for future. */ +static int resp_write_buffer(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + struct scsi_device *sdp = scp->device; + struct sdebug_dev_info *dp; + u8 mode; + + mode = cmd[1] & 0x1f; + switch (mode) { + case 0x4: /* download microcode (MC) and activate (ACT) */ + /* set UAs on this device only */ + set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); + set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm); + break; + case 0x5: /* download MC, save and ACT */ + set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm); + break; + case 0x6: /* download MC with offsets and ACT */ + /* set UAs on most devices (LUs) in this target */ + list_for_each_entry(dp, + &devip->sdbg_host->dev_info_list, + dev_list) + if (dp->target == sdp->id) { + set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm); + if (devip != dp) + set_bit(SDEBUG_UA_MICROCODE_CHANGED, + dp->uas_bm); + } + break; + case 0x7: /* download MC with offsets, save, and ACT */ + /* set UA on all devices (LUs) in this target */ + list_for_each_entry(dp, + &devip->sdbg_host->dev_info_list, + dev_list) + if (dp->target == sdp->id) + set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, + dp->uas_bm); + break; + default: + /* do nothing for this command for other mode values */ + break; + } + return 0; +} + +static int resp_comp_write(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u8 *arr; + struct sdeb_store_info *sip = devip2sip(devip, true); + u64 lba; + u32 dnum; + u32 lb_size = sdebug_sector_size; + u8 num; + int ret; + int retval = 0; + + lba = get_unaligned_be64(cmd + 2); + num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ + if (0 == num) + return 0; /* degenerate case, not an error */ + if (sdebug_dif == T10_PI_TYPE2_PROTECTION && + (cmd[1] & 0xe0)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || + sdebug_dif == T10_PI_TYPE3_PROTECTION) && + (cmd[1] & 0xe0) == 0) + sdev_printk(KERN_ERR, scp->device, "Unprotected WR " + "to DIF device\n"); + ret = check_device_access_params(scp, lba, num, false); + if (ret) + return ret; + dnum = 2 * num; + arr = kcalloc(lb_size, dnum, GFP_ATOMIC); + if (NULL == arr) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + + sdeb_write_lock(sip); + + ret = do_dout_fetch(scp, dnum, arr); + if (ret == -1) { + retval = DID_ERROR << 16; + goto cleanup; + } else if (sdebug_verbose && (ret < (dnum * lb_size))) + sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " + "indicated=%u, IO sent=%d bytes\n", my_name, + dnum * lb_size, ret); + if (!comp_write_worker(sip, lba, num, arr, false)) { + mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); + retval = check_condition_result; + goto cleanup; + } + if (scsi_debug_lbp()) + map_region(sip, lba, num); +cleanup: + sdeb_write_unlock(sip); + kfree(arr); + return retval; +} + +struct unmap_block_desc { + __be64 lba; + __be32 blocks; + __be32 __reserved; +}; + +static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + unsigned char *buf; + struct unmap_block_desc *desc; + struct sdeb_store_info *sip = devip2sip(devip, true); + unsigned int i, payload_len, descriptors; + int ret; + + if (!scsi_debug_lbp()) + return 0; /* fib and say its done */ + payload_len = get_unaligned_be16(scp->cmnd + 7); + BUG_ON(scsi_bufflen(scp) != payload_len); + + descriptors = (payload_len - 8) / 16; + if (descriptors > sdebug_unmap_max_desc) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); + return check_condition_result; + } + + buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); + if (!buf) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + + scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); + + BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); + BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); + + desc = (void *)&buf[8]; + + sdeb_write_lock(sip); + + for (i = 0 ; i < descriptors ; i++) { + unsigned long long lba = get_unaligned_be64(&desc[i].lba); + unsigned int num = get_unaligned_be32(&desc[i].blocks); + + ret = check_device_access_params(scp, lba, num, true); + if (ret) + goto out; + + unmap_region(sip, lba, num); + } + + ret = 0; + +out: + sdeb_write_unlock(sip); + kfree(buf); + + return ret; +} + +#define SDEBUG_GET_LBA_STATUS_LEN 32 + +static int resp_get_lba_status(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u64 lba; + u32 alloc_len, mapped, num; + int ret; + u8 arr[SDEBUG_GET_LBA_STATUS_LEN]; + + lba = get_unaligned_be64(cmd + 2); + alloc_len = get_unaligned_be32(cmd + 10); + + if (alloc_len < 24) + return 0; + + ret = check_device_access_params(scp, lba, 1, false); + if (ret) + return ret; + + if (scsi_debug_lbp()) { + struct sdeb_store_info *sip = devip2sip(devip, true); + + mapped = map_state(sip, lba, &num); + } else { + mapped = 1; + /* following just in case virtual_gb changed */ + sdebug_capacity = get_sdebug_capacity(); + if (sdebug_capacity - lba <= 0xffffffff) + num = sdebug_capacity - lba; + else + num = 0xffffffff; + } + + memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); + put_unaligned_be32(20, arr); /* Parameter Data Length */ + put_unaligned_be64(lba, arr + 8); /* LBA */ + put_unaligned_be32(num, arr + 16); /* Number of blocks */ + arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */ + + return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); +} + +static int resp_sync_cache(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int res = 0; + u64 lba; + u32 num_blocks; + u8 *cmd = scp->cmnd; + + if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */ + lba = get_unaligned_be32(cmd + 2); + num_blocks = get_unaligned_be16(cmd + 7); + } else { /* SYNCHRONIZE_CACHE(16) */ + lba = get_unaligned_be64(cmd + 2); + num_blocks = get_unaligned_be32(cmd + 10); + } + if (lba + num_blocks > sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + return check_condition_result; + } + if (!write_since_sync || (cmd[1] & 0x2)) + res = SDEG_RES_IMMED_MASK; + else /* delay if write_since_sync and IMMED clear */ + write_since_sync = false; + return res; +} + +/* + * Assuming the LBA+num_blocks is not out-of-range, this function will return + * CONDITION MET if the specified blocks will/have fitted in the cache, and + * a GOOD status otherwise. Model a disk with a big cache and yield + * CONDITION MET. Actually tries to bring range in main memory into the + * cache associated with the CPU(s). + */ +static int resp_pre_fetch(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int res = 0; + u64 lba; + u64 block, rest = 0; + u32 nblks; + u8 *cmd = scp->cmnd; + struct sdeb_store_info *sip = devip2sip(devip, true); + u8 *fsp = sip->storep; + + if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */ + lba = get_unaligned_be32(cmd + 2); + nblks = get_unaligned_be16(cmd + 7); + } else { /* PRE-FETCH(16) */ + lba = get_unaligned_be64(cmd + 2); + nblks = get_unaligned_be32(cmd + 10); + } + if (lba + nblks > sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + return check_condition_result; + } + if (!fsp) + goto fini; + /* PRE-FETCH spec says nothing about LBP or PI so skip them */ + block = do_div(lba, sdebug_store_sectors); + if (block + nblks > sdebug_store_sectors) + rest = block + nblks - sdebug_store_sectors; + + /* Try to bring the PRE-FETCH range into CPU's cache */ + sdeb_read_lock(sip); + prefetch_range(fsp + (sdebug_sector_size * block), + (nblks - rest) * sdebug_sector_size); + if (rest) + prefetch_range(fsp, rest * sdebug_sector_size); + sdeb_read_unlock(sip); +fini: + if (cmd[1] & 0x2) + res = SDEG_RES_IMMED_MASK; + return res | condition_met_result; +} + +#define RL_BUCKET_ELEMS 8 + +/* Even though each pseudo target has a REPORT LUNS "well known logical unit" + * (W-LUN), the normal Linux scanning logic does not associate it with a + * device (e.g. /dev/sg7). The following magic will make that association: + * "cd /sys/class/scsi_host/host ; echo '- - 49409' > scan" + * where is a host number. If there are multiple targets in a host then + * the above will associate a W-LUN to each target. To only get a W-LUN + * for target 2, then use "echo '- 2 49409' > scan" . + */ +static int resp_report_luns(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned char *cmd = scp->cmnd; + unsigned int alloc_len; + unsigned char select_report; + u64 lun; + struct scsi_lun *lun_p; + u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)]; + unsigned int lun_cnt; /* normal LUN count (max: 256) */ + unsigned int wlun_cnt; /* report luns W-LUN count */ + unsigned int tlun_cnt; /* total LUN count */ + unsigned int rlen; /* response length (in bytes) */ + int k, j, n, res; + unsigned int off_rsp = 0; + const int sz_lun = sizeof(struct scsi_lun); + + clear_luns_changed_on_target(devip); + + select_report = cmd[2]; + alloc_len = get_unaligned_be32(cmd + 6); + + if (alloc_len < 4) { + pr_err("alloc len too small %d\n", alloc_len); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); + return check_condition_result; + } + + switch (select_report) { + case 0: /* all LUNs apart from W-LUNs */ + lun_cnt = sdebug_max_luns; + wlun_cnt = 0; + break; + case 1: /* only W-LUNs */ + lun_cnt = 0; + wlun_cnt = 1; + break; + case 2: /* all LUNs */ + lun_cnt = sdebug_max_luns; + wlun_cnt = 1; + break; + case 0x10: /* only administrative LUs */ + case 0x11: /* see SPC-5 */ + case 0x12: /* only subsiduary LUs owned by referenced LU */ + default: + pr_debug("select report invalid %d\n", select_report); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); + return check_condition_result; + } + + if (sdebug_no_lun_0 && (lun_cnt > 0)) + --lun_cnt; + + tlun_cnt = lun_cnt + wlun_cnt; + rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */ + scsi_set_resid(scp, scsi_bufflen(scp)); + pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n", + select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0); + + /* loops rely on sizeof response header same as sizeof lun (both 8) */ + lun = sdebug_no_lun_0 ? 1 : 0; + for (k = 0, j = 0, res = 0; true; ++k, j = 0) { + memset(arr, 0, sizeof(arr)); + lun_p = (struct scsi_lun *)&arr[0]; + if (k == 0) { + put_unaligned_be32(rlen, &arr[0]); + ++lun_p; + j = 1; + } + for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) { + if ((k * RL_BUCKET_ELEMS) + j > lun_cnt) + break; + int_to_scsilun(lun++, lun_p); + if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT) + lun_p->scsi_lun[0] |= 0x40; + } + if (j < RL_BUCKET_ELEMS) + break; + n = j * sz_lun; + res = p_fill_from_dev_buffer(scp, arr, n, off_rsp); + if (res) + return res; + off_rsp += n; + } + if (wlun_cnt) { + int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p); + ++j; + } + if (j > 0) + res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp); + return res; +} + +static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + bool is_bytchk3 = false; + u8 bytchk; + int ret, j; + u32 vnum, a_num, off; + const u32 lb_size = sdebug_sector_size; + u64 lba; + u8 *arr; + u8 *cmd = scp->cmnd; + struct sdeb_store_info *sip = devip2sip(devip, true); + + bytchk = (cmd[1] >> 1) & 0x3; + if (bytchk == 0) { + return 0; /* always claim internal verify okay */ + } else if (bytchk == 2) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2); + return check_condition_result; + } else if (bytchk == 3) { + is_bytchk3 = true; /* 1 block sent, compared repeatedly */ + } + switch (cmd[0]) { + case VERIFY_16: + lba = get_unaligned_be64(cmd + 2); + vnum = get_unaligned_be32(cmd + 10); + break; + case VERIFY: /* is VERIFY(10) */ + lba = get_unaligned_be32(cmd + 2); + vnum = get_unaligned_be16(cmd + 7); + break; + default: + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + if (vnum == 0) + return 0; /* not an error */ + a_num = is_bytchk3 ? 1 : vnum; + /* Treat following check like one for read (i.e. no write) access */ + ret = check_device_access_params(scp, lba, a_num, false); + if (ret) + return ret; + + arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN); + if (!arr) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + /* Not changing store, so only need read access */ + sdeb_read_lock(sip); + + ret = do_dout_fetch(scp, a_num, arr); + if (ret == -1) { + ret = DID_ERROR << 16; + goto cleanup; + } else if (sdebug_verbose && (ret < (a_num * lb_size))) { + sdev_printk(KERN_INFO, scp->device, + "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", + my_name, __func__, a_num * lb_size, ret); + } + if (is_bytchk3) { + for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size) + memcpy(arr + off, arr, lb_size); + } + ret = 0; + if (!comp_write_worker(sip, lba, vnum, arr, true)) { + mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); + ret = check_condition_result; + goto cleanup; + } +cleanup: + sdeb_read_unlock(sip); + kfree(arr); + return ret; +} + +#define RZONES_DESC_HD 64 + +/* Report zones depending on start LBA and reporting options */ +static int resp_report_zones(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + unsigned int rep_max_zones, nrz = 0; + int ret = 0; + u32 alloc_len, rep_opts, rep_len; + bool partial; + u64 lba, zs_lba; + u8 *arr = NULL, *desc; + u8 *cmd = scp->cmnd; + struct sdeb_zone_state *zsp = NULL; + struct sdeb_store_info *sip = devip2sip(devip, false); + + if (!sdebug_dev_is_zoned(devip)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + zs_lba = get_unaligned_be64(cmd + 2); + alloc_len = get_unaligned_be32(cmd + 10); + if (alloc_len == 0) + return 0; /* not an error */ + rep_opts = cmd[14] & 0x3f; + partial = cmd[14] & 0x80; + + if (zs_lba >= sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + return check_condition_result; + } + + rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD); + + arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN); + if (!arr) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + + sdeb_read_lock(sip); + + desc = arr + 64; + for (lba = zs_lba; lba < sdebug_capacity; + lba = zsp->z_start + zsp->z_size) { + if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba)) + break; + zsp = zbc_zone(devip, lba); + switch (rep_opts) { + case 0x00: + /* All zones */ + break; + case 0x01: + /* Empty zones */ + if (zsp->z_cond != ZC1_EMPTY) + continue; + break; + case 0x02: + /* Implicit open zones */ + if (zsp->z_cond != ZC2_IMPLICIT_OPEN) + continue; + break; + case 0x03: + /* Explicit open zones */ + if (zsp->z_cond != ZC3_EXPLICIT_OPEN) + continue; + break; + case 0x04: + /* Closed zones */ + if (zsp->z_cond != ZC4_CLOSED) + continue; + break; + case 0x05: + /* Full zones */ + if (zsp->z_cond != ZC5_FULL) + continue; + break; + case 0x06: + case 0x07: + case 0x10: + /* + * Read-only, offline, reset WP recommended are + * not emulated: no zones to report; + */ + continue; + case 0x11: + /* non-seq-resource set */ + if (!zsp->z_non_seq_resource) + continue; + break; + case 0x3e: + /* All zones except gap zones. */ + if (zbc_zone_is_gap(zsp)) + continue; + break; + case 0x3f: + /* Not write pointer (conventional) zones */ + if (zbc_zone_is_seq(zsp)) + continue; + break; + default: + mk_sense_buffer(scp, ILLEGAL_REQUEST, + INVALID_FIELD_IN_CDB, 0); + ret = check_condition_result; + goto fini; + } + + if (nrz < rep_max_zones) { + /* Fill zone descriptor */ + desc[0] = zsp->z_type; + desc[1] = zsp->z_cond << 4; + if (zsp->z_non_seq_resource) + desc[1] |= 1 << 1; + put_unaligned_be64((u64)zsp->z_size, desc + 8); + put_unaligned_be64((u64)zsp->z_start, desc + 16); + put_unaligned_be64((u64)zsp->z_wp, desc + 24); + desc += 64; + } + + if (partial && nrz >= rep_max_zones) + break; + + nrz++; + } + + /* Report header */ + /* Zone list length. */ + put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0); + /* Maximum LBA */ + put_unaligned_be64(sdebug_capacity - 1, arr + 8); + /* Zone starting LBA granularity. */ + if (devip->zcap < devip->zsize) + put_unaligned_be64(devip->zsize, arr + 16); + + rep_len = (unsigned long)desc - (unsigned long)arr; + ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len)); + +fini: + sdeb_read_unlock(sip); + kfree(arr); + return ret; +} + +/* Logic transplanted from tcmu-runner, file_zbc.c */ +static void zbc_open_all(struct sdebug_dev_info *devip) +{ + struct sdeb_zone_state *zsp = &devip->zstate[0]; + unsigned int i; + + for (i = 0; i < devip->nr_zones; i++, zsp++) { + if (zsp->z_cond == ZC4_CLOSED) + zbc_open_zone(devip, &devip->zstate[i], true); + } +} + +static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + int res = 0; + u64 z_id; + enum sdebug_z_cond zc; + u8 *cmd = scp->cmnd; + struct sdeb_zone_state *zsp; + bool all = cmd[14] & 0x01; + struct sdeb_store_info *sip = devip2sip(devip, false); + + if (!sdebug_dev_is_zoned(devip)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + + sdeb_write_lock(sip); + + if (all) { + /* Check if all closed zones can be open */ + if (devip->max_open && + devip->nr_exp_open + devip->nr_closed > devip->max_open) { + mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC, + INSUFF_ZONE_ASCQ); + res = check_condition_result; + goto fini; + } + /* Open all closed zones */ + zbc_open_all(devip); + goto fini; + } + + /* Open the specified zone */ + z_id = get_unaligned_be64(cmd + 2); + if (z_id >= sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + res = check_condition_result; + goto fini; + } + + zsp = zbc_zone(devip, z_id); + if (z_id != zsp->z_start) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + if (zbc_zone_is_conv(zsp)) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + + zc = zsp->z_cond; + if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL) + goto fini; + + if (devip->max_open && devip->nr_exp_open >= devip->max_open) { + mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC, + INSUFF_ZONE_ASCQ); + res = check_condition_result; + goto fini; + } + + zbc_open_zone(devip, zsp, true); +fini: + sdeb_write_unlock(sip); + return res; +} + +static void zbc_close_all(struct sdebug_dev_info *devip) +{ + unsigned int i; + + for (i = 0; i < devip->nr_zones; i++) + zbc_close_zone(devip, &devip->zstate[i]); +} + +static int resp_close_zone(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int res = 0; + u64 z_id; + u8 *cmd = scp->cmnd; + struct sdeb_zone_state *zsp; + bool all = cmd[14] & 0x01; + struct sdeb_store_info *sip = devip2sip(devip, false); + + if (!sdebug_dev_is_zoned(devip)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + + sdeb_write_lock(sip); + + if (all) { + zbc_close_all(devip); + goto fini; + } + + /* Close specified zone */ + z_id = get_unaligned_be64(cmd + 2); + if (z_id >= sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + res = check_condition_result; + goto fini; + } + + zsp = zbc_zone(devip, z_id); + if (z_id != zsp->z_start) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + if (zbc_zone_is_conv(zsp)) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + + zbc_close_zone(devip, zsp); +fini: + sdeb_write_unlock(sip); + return res; +} + +static void zbc_finish_zone(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp, bool empty) +{ + enum sdebug_z_cond zc = zsp->z_cond; + + if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN || + zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) { + if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN) + zbc_close_zone(devip, zsp); + if (zsp->z_cond == ZC4_CLOSED) + devip->nr_closed--; + zsp->z_wp = zsp->z_start + zsp->z_size; + zsp->z_cond = ZC5_FULL; + } +} + +static void zbc_finish_all(struct sdebug_dev_info *devip) +{ + unsigned int i; + + for (i = 0; i < devip->nr_zones; i++) + zbc_finish_zone(devip, &devip->zstate[i], false); +} + +static int resp_finish_zone(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + struct sdeb_zone_state *zsp; + int res = 0; + u64 z_id; + u8 *cmd = scp->cmnd; + bool all = cmd[14] & 0x01; + struct sdeb_store_info *sip = devip2sip(devip, false); + + if (!sdebug_dev_is_zoned(devip)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + + sdeb_write_lock(sip); + + if (all) { + zbc_finish_all(devip); + goto fini; + } + + /* Finish the specified zone */ + z_id = get_unaligned_be64(cmd + 2); + if (z_id >= sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + res = check_condition_result; + goto fini; + } + + zsp = zbc_zone(devip, z_id); + if (z_id != zsp->z_start) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + if (zbc_zone_is_conv(zsp)) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + + zbc_finish_zone(devip, zsp, true); +fini: + sdeb_write_unlock(sip); + return res; +} + +static void zbc_rwp_zone(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp) +{ + enum sdebug_z_cond zc; + struct sdeb_store_info *sip = devip2sip(devip, false); + + if (!zbc_zone_is_seq(zsp)) + return; + + zc = zsp->z_cond; + if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN) + zbc_close_zone(devip, zsp); + + if (zsp->z_cond == ZC4_CLOSED) + devip->nr_closed--; + + if (zsp->z_wp > zsp->z_start) + memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, + (zsp->z_wp - zsp->z_start) * sdebug_sector_size); + + zsp->z_non_seq_resource = false; + zsp->z_wp = zsp->z_start; + zsp->z_cond = ZC1_EMPTY; +} + +static void zbc_rwp_all(struct sdebug_dev_info *devip) +{ + unsigned int i; + + for (i = 0; i < devip->nr_zones; i++) + zbc_rwp_zone(devip, &devip->zstate[i]); +} + +static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + struct sdeb_zone_state *zsp; + int res = 0; + u64 z_id; + u8 *cmd = scp->cmnd; + bool all = cmd[14] & 0x01; + struct sdeb_store_info *sip = devip2sip(devip, false); + + if (!sdebug_dev_is_zoned(devip)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + + sdeb_write_lock(sip); + + if (all) { + zbc_rwp_all(devip); + goto fini; + } + + z_id = get_unaligned_be64(cmd + 2); + if (z_id >= sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + res = check_condition_result; + goto fini; + } + + zsp = zbc_zone(devip, z_id); + if (z_id != zsp->z_start) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + if (zbc_zone_is_conv(zsp)) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + res = check_condition_result; + goto fini; + } + + zbc_rwp_zone(devip, zsp); +fini: + sdeb_write_unlock(sip); + return res; +} + +static u32 get_tag(struct scsi_cmnd *cmnd) +{ + return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); +} + +/* Queued (deferred) command completions converge here. */ +static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) +{ + struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp); + unsigned long flags; + struct scsi_cmnd *scp = sqcp->scmd; + struct sdebug_scsi_cmd *sdsc; + bool aborted; + + if (sdebug_statistics) { + atomic_inc(&sdebug_completions); + if (raw_smp_processor_id() != sd_dp->issuing_cpu) + atomic_inc(&sdebug_miss_cpus); + } + + if (!scp) { + pr_err("scmd=NULL\n"); + goto out; + } + + sdsc = scsi_cmd_priv(scp); + spin_lock_irqsave(&sdsc->lock, flags); + aborted = sd_dp->aborted; + if (unlikely(aborted)) + sd_dp->aborted = false; + ASSIGN_QUEUED_CMD(scp, NULL); + + spin_unlock_irqrestore(&sdsc->lock, flags); + + if (aborted) { + pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n"); + blk_abort_request(scsi_cmd_to_rq(scp)); + goto out; + } + + scsi_done(scp); /* callback to mid level */ +out: + sdebug_free_queued_cmd(sqcp); +} + +/* When high resolution timer goes off this function is called. */ +static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer) +{ + struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer, + hrt); + sdebug_q_cmd_complete(sd_dp); + return HRTIMER_NORESTART; +} + +/* When work queue schedules work, it calls this function. */ +static void sdebug_q_cmd_wq_complete(struct work_struct *work) +{ + struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer, + ew.work); + sdebug_q_cmd_complete(sd_dp); +} + +static bool got_shared_uuid; +static uuid_t shared_uuid; + +static int sdebug_device_create_zones(struct sdebug_dev_info *devip) +{ + struct sdeb_zone_state *zsp; + sector_t capacity = get_sdebug_capacity(); + sector_t conv_capacity; + sector_t zstart = 0; + unsigned int i; + + /* + * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out + * a zone size allowing for at least 4 zones on the device. Otherwise, + * use the specified zone size checking that at least 2 zones can be + * created for the device. + */ + if (!sdeb_zbc_zone_size_mb) { + devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M) + >> ilog2(sdebug_sector_size); + while (capacity < devip->zsize << 2 && devip->zsize >= 2) + devip->zsize >>= 1; + if (devip->zsize < 2) { + pr_err("Device capacity too small\n"); + return -EINVAL; + } + } else { + if (!is_power_of_2(sdeb_zbc_zone_size_mb)) { + pr_err("Zone size is not a power of 2\n"); + return -EINVAL; + } + devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M) + >> ilog2(sdebug_sector_size); + if (devip->zsize >= capacity) { + pr_err("Zone size too large for device capacity\n"); + return -EINVAL; + } + } + + devip->zsize_shift = ilog2(devip->zsize); + devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift; + + if (sdeb_zbc_zone_cap_mb == 0) { + devip->zcap = devip->zsize; + } else { + devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >> + ilog2(sdebug_sector_size); + if (devip->zcap > devip->zsize) { + pr_err("Zone capacity too large\n"); + return -EINVAL; + } + } + + conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift; + if (conv_capacity >= capacity) { + pr_err("Number of conventional zones too large\n"); + return -EINVAL; + } + devip->nr_conv_zones = sdeb_zbc_nr_conv; + devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >> + devip->zsize_shift; + devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones; + + /* Add gap zones if zone capacity is smaller than the zone size */ + if (devip->zcap < devip->zsize) + devip->nr_zones += devip->nr_seq_zones; + + if (devip->zmodel == BLK_ZONED_HM) { + /* zbc_max_open_zones can be 0, meaning "not reported" */ + if (sdeb_zbc_max_open >= devip->nr_zones - 1) + devip->max_open = (devip->nr_zones - 1) / 2; + else + devip->max_open = sdeb_zbc_max_open; + } + + devip->zstate = kcalloc(devip->nr_zones, + sizeof(struct sdeb_zone_state), GFP_KERNEL); + if (!devip->zstate) + return -ENOMEM; + + for (i = 0; i < devip->nr_zones; i++) { + zsp = &devip->zstate[i]; + + zsp->z_start = zstart; + + if (i < devip->nr_conv_zones) { + zsp->z_type = ZBC_ZTYPE_CNV; + zsp->z_cond = ZBC_NOT_WRITE_POINTER; + zsp->z_wp = (sector_t)-1; + zsp->z_size = + min_t(u64, devip->zsize, capacity - zstart); + } else if ((zstart & (devip->zsize - 1)) == 0) { + if (devip->zmodel == BLK_ZONED_HM) + zsp->z_type = ZBC_ZTYPE_SWR; + else + zsp->z_type = ZBC_ZTYPE_SWP; + zsp->z_cond = ZC1_EMPTY; + zsp->z_wp = zsp->z_start; + zsp->z_size = + min_t(u64, devip->zcap, capacity - zstart); + } else { + zsp->z_type = ZBC_ZTYPE_GAP; + zsp->z_cond = ZBC_NOT_WRITE_POINTER; + zsp->z_wp = (sector_t)-1; + zsp->z_size = min_t(u64, devip->zsize - devip->zcap, + capacity - zstart); + } + + WARN_ON_ONCE((int)zsp->z_size <= 0); + zstart += zsp->z_size; + } + + return 0; +} + +static struct sdebug_dev_info *sdebug_device_create( + struct sdebug_host_info *sdbg_host, gfp_t flags) +{ + struct sdebug_dev_info *devip; + + devip = kzalloc(sizeof(*devip), flags); + if (devip) { + if (sdebug_uuid_ctl == 1) + uuid_gen(&devip->lu_name); + else if (sdebug_uuid_ctl == 2) { + if (got_shared_uuid) + devip->lu_name = shared_uuid; + else { + uuid_gen(&shared_uuid); + got_shared_uuid = true; + devip->lu_name = shared_uuid; + } + } + devip->sdbg_host = sdbg_host; + if (sdeb_zbc_in_use) { + devip->zmodel = sdeb_zbc_model; + if (sdebug_device_create_zones(devip)) { + kfree(devip); + return NULL; + } + } else { + devip->zmodel = BLK_ZONED_NONE; + } + devip->create_ts = ktime_get_boottime(); + atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); + list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); + } + return devip; +} + +static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev) +{ + struct sdebug_host_info *sdbg_host; + struct sdebug_dev_info *open_devip = NULL; + struct sdebug_dev_info *devip; + + sdbg_host = shost_to_sdebug_host(sdev->host); + + list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { + if ((devip->used) && (devip->channel == sdev->channel) && + (devip->target == sdev->id) && + (devip->lun == sdev->lun)) + return devip; + else { + if ((!devip->used) && (!open_devip)) + open_devip = devip; + } + } + if (!open_devip) { /* try and make a new one */ + open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); + if (!open_devip) { + pr_err("out of memory at line %d\n", __LINE__); + return NULL; + } + } + + open_devip->channel = sdev->channel; + open_devip->target = sdev->id; + open_devip->lun = sdev->lun; + open_devip->sdbg_host = sdbg_host; + set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm); + open_devip->used = true; + return open_devip; +} + +static int scsi_debug_slave_alloc(struct scsi_device *sdp) +{ + if (sdebug_verbose) + pr_info("slave_alloc <%u %u %u %llu>\n", + sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); + return 0; +} + +static int scsi_debug_slave_configure(struct scsi_device *sdp) +{ + struct sdebug_dev_info *devip = + (struct sdebug_dev_info *)sdp->hostdata; + + if (sdebug_verbose) + pr_info("slave_configure <%u %u %u %llu>\n", + sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); + if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) + sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; + if (devip == NULL) { + devip = find_build_dev_info(sdp); + if (devip == NULL) + return 1; /* no resources, will be marked offline */ + } + sdp->hostdata = devip; + if (sdebug_no_uld) + sdp->no_uld_attach = 1; + config_cdb_len(sdp); + return 0; +} + +static void scsi_debug_slave_destroy(struct scsi_device *sdp) +{ + struct sdebug_dev_info *devip = + (struct sdebug_dev_info *)sdp->hostdata; + + if (sdebug_verbose) + pr_info("slave_destroy <%u %u %u %llu>\n", + sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); + if (devip) { + /* make this slot available for re-use */ + devip->used = false; + sdp->hostdata = NULL; + } +} + +/* Returns true if we require the queued memory to be freed by the caller. */ +static bool stop_qc_helper(struct sdebug_defer *sd_dp, + enum sdeb_defer_type defer_t) +{ + if (defer_t == SDEB_DEFER_HRT) { + int res = hrtimer_try_to_cancel(&sd_dp->hrt); + + switch (res) { + case 0: /* Not active, it must have already run */ + case -1: /* -1 It's executing the CB */ + return false; + case 1: /* Was active, we've now cancelled */ + default: + return true; + } + } else if (defer_t == SDEB_DEFER_WQ) { + /* Cancel if pending */ + if (cancel_work_sync(&sd_dp->ew.work)) + return true; + /* Was not pending, so it must have run */ + return false; + } else if (defer_t == SDEB_DEFER_POLL) { + return true; + } + + return false; +} + + +static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd) +{ + enum sdeb_defer_type l_defer_t; + struct sdebug_defer *sd_dp; + struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); + struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd); + + lockdep_assert_held(&sdsc->lock); + + if (!sqcp) + return false; + sd_dp = &sqcp->sd_dp; + l_defer_t = READ_ONCE(sd_dp->defer_t); + ASSIGN_QUEUED_CMD(cmnd, NULL); + + if (stop_qc_helper(sd_dp, l_defer_t)) + sdebug_free_queued_cmd(sqcp); + + return true; +} + +/* + * Called from scsi_debug_abort() only, which is for timed-out cmd. + */ +static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd) +{ + struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); + unsigned long flags; + bool res; + + spin_lock_irqsave(&sdsc->lock, flags); + res = scsi_debug_stop_cmnd(cmnd); + spin_unlock_irqrestore(&sdsc->lock, flags); + + return res; +} + +/* + * All we can do is set the cmnd as internally aborted and wait for it to + * finish. We cannot call scsi_done() as normal completion path may do that. + */ +static bool sdebug_stop_cmnd(struct request *rq, void *data) +{ + scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq)); + + return true; +} + +/* Deletes (stops) timers or work queues of all queued commands */ +static void stop_all_queued(void) +{ + struct sdebug_host_info *sdhp; + + mutex_lock(&sdebug_host_list_mutex); + list_for_each_entry(sdhp, &sdebug_host_list, host_list) { + struct Scsi_Host *shost = sdhp->shost; + + blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL); + } + mutex_unlock(&sdebug_host_list_mutex); +} + +static int scsi_debug_abort(struct scsi_cmnd *SCpnt) +{ + bool ok = scsi_debug_abort_cmnd(SCpnt); + + ++num_aborts; + + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, SCpnt->device, + "%s: command%s found\n", __func__, + ok ? "" : " not"); + + return SUCCESS; +} + +static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data) +{ + struct scsi_device *sdp = data; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + + if (scmd->device == sdp) + scsi_debug_abort_cmnd(scmd); + + return true; +} + +/* Deletes (stops) timers or work queues of all queued commands per sdev */ +static void scsi_debug_stop_all_queued(struct scsi_device *sdp) +{ + struct Scsi_Host *shost = sdp->host; + + blk_mq_tagset_busy_iter(&shost->tag_set, + scsi_debug_stop_all_queued_iter, sdp); +} + +static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) +{ + struct scsi_device *sdp = SCpnt->device; + struct sdebug_dev_info *devip = sdp->hostdata; + + ++num_dev_resets; + + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, sdp, "%s\n", __func__); + + scsi_debug_stop_all_queued(sdp); + if (devip) + set_bit(SDEBUG_UA_POR, devip->uas_bm); + + return SUCCESS; +} + +static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) +{ + struct scsi_device *sdp = SCpnt->device; + struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); + struct sdebug_dev_info *devip; + int k = 0; + + ++num_target_resets; + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, sdp, "%s\n", __func__); + + list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { + if (devip->target == sdp->id) { + set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); + ++k; + } + } + + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, sdp, + "%s: %d device(s) found in target\n", __func__, k); + + return SUCCESS; +} + +static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt) +{ + struct scsi_device *sdp = SCpnt->device; + struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); + struct sdebug_dev_info *devip; + int k = 0; + + ++num_bus_resets; + + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, sdp, "%s\n", __func__); + + list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { + set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); + ++k; + } + + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, sdp, + "%s: %d device(s) found in host\n", __func__, k); + return SUCCESS; +} + +static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt) +{ + struct sdebug_host_info *sdbg_host; + struct sdebug_dev_info *devip; + int k = 0; + + ++num_host_resets; + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); + mutex_lock(&sdebug_host_list_mutex); + list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { + list_for_each_entry(devip, &sdbg_host->dev_info_list, + dev_list) { + set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); + ++k; + } + } + mutex_unlock(&sdebug_host_list_mutex); + stop_all_queued(); + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, SCpnt->device, + "%s: %d device(s) found\n", __func__, k); + return SUCCESS; +} + +static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size) +{ + struct msdos_partition *pp; + int starts[SDEBUG_MAX_PARTS + 2], max_part_secs; + int sectors_per_part, num_sectors, k; + int heads_by_sects, start_sec, end_sec; + + /* assume partition table already zeroed */ + if ((sdebug_num_parts < 1) || (store_size < 1048576)) + return; + if (sdebug_num_parts > SDEBUG_MAX_PARTS) { + sdebug_num_parts = SDEBUG_MAX_PARTS; + pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS); + } + num_sectors = (int)get_sdebug_capacity(); + sectors_per_part = (num_sectors - sdebug_sectors_per) + / sdebug_num_parts; + heads_by_sects = sdebug_heads * sdebug_sectors_per; + starts[0] = sdebug_sectors_per; + max_part_secs = sectors_per_part; + for (k = 1; k < sdebug_num_parts; ++k) { + starts[k] = ((k * sectors_per_part) / heads_by_sects) + * heads_by_sects; + if (starts[k] - starts[k - 1] < max_part_secs) + max_part_secs = starts[k] - starts[k - 1]; + } + starts[sdebug_num_parts] = num_sectors; + starts[sdebug_num_parts + 1] = 0; + + ramp[510] = 0x55; /* magic partition markings */ + ramp[511] = 0xAA; + pp = (struct msdos_partition *)(ramp + 0x1be); + for (k = 0; starts[k + 1]; ++k, ++pp) { + start_sec = starts[k]; + end_sec = starts[k] + max_part_secs - 1; + pp->boot_ind = 0; + + pp->cyl = start_sec / heads_by_sects; + pp->head = (start_sec - (pp->cyl * heads_by_sects)) + / sdebug_sectors_per; + pp->sector = (start_sec % sdebug_sectors_per) + 1; + + pp->end_cyl = end_sec / heads_by_sects; + pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects)) + / sdebug_sectors_per; + pp->end_sector = (end_sec % sdebug_sectors_per) + 1; + + pp->start_sect = cpu_to_le32(start_sec); + pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); + pp->sys_ind = 0x83; /* plain Linux partition */ + } +} + +static void block_unblock_all_queues(bool block) +{ + struct sdebug_host_info *sdhp; + + lockdep_assert_held(&sdebug_host_list_mutex); + + list_for_each_entry(sdhp, &sdebug_host_list, host_list) { + struct Scsi_Host *shost = sdhp->shost; + + if (block) + scsi_block_requests(shost); + else + scsi_unblock_requests(shost); + } +} + +/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1 + * commands will be processed normally before triggers occur. + */ +static void tweak_cmnd_count(void) +{ + int count, modulo; + + modulo = abs(sdebug_every_nth); + if (modulo < 2) + return; + + mutex_lock(&sdebug_host_list_mutex); + block_unblock_all_queues(true); + count = atomic_read(&sdebug_cmnd_count); + atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo); + block_unblock_all_queues(false); + mutex_unlock(&sdebug_host_list_mutex); +} + +static void clear_queue_stats(void) +{ + atomic_set(&sdebug_cmnd_count, 0); + atomic_set(&sdebug_completions, 0); + atomic_set(&sdebug_miss_cpus, 0); + atomic_set(&sdebug_a_tsf, 0); +} + +static bool inject_on_this_cmd(void) +{ + if (sdebug_every_nth == 0) + return false; + return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0; +} + +#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */ + + +void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp) +{ + if (sqcp) + kmem_cache_free(queued_cmd_cache, sqcp); +} + +static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd) +{ + struct sdebug_queued_cmd *sqcp; + struct sdebug_defer *sd_dp; + + sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC); + if (!sqcp) + return NULL; + + sd_dp = &sqcp->sd_dp; + + hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; + INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); + + sqcp->scmd = scmd; + + return sqcp; +} + +/* Complete the processing of the thread that queued a SCSI command to this + * driver. It either completes the command by calling cmnd_done() or + * schedules a hr timer or work queue then returns 0. Returns + * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources. + */ +static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, + int scsi_result, + int (*pfp)(struct scsi_cmnd *, + struct sdebug_dev_info *), + int delta_jiff, int ndelay) +{ + struct request *rq = scsi_cmd_to_rq(cmnd); + bool polled = rq->cmd_flags & REQ_POLLED; + struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); + unsigned long flags; + u64 ns_from_boot = 0; + struct sdebug_queued_cmd *sqcp; + struct scsi_device *sdp; + struct sdebug_defer *sd_dp; + + if (unlikely(devip == NULL)) { + if (scsi_result == 0) + scsi_result = DID_NO_CONNECT << 16; + goto respond_in_thread; + } + sdp = cmnd->device; + + if (delta_jiff == 0) + goto respond_in_thread; + + + if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) && + (scsi_result == 0))) { + int num_in_q = scsi_device_busy(sdp); + int qdepth = cmnd->device->queue_depth; + + if ((num_in_q == qdepth) && + (atomic_inc_return(&sdebug_a_tsf) >= + abs(sdebug_every_nth))) { + atomic_set(&sdebug_a_tsf, 0); + scsi_result = device_qfull_result; + + if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts)) + sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, status: TASK SET FULL\n", + __func__, num_in_q); + } + } + + sqcp = sdebug_alloc_queued_cmd(cmnd); + if (!sqcp) { + pr_err("%s no alloc\n", __func__); + return SCSI_MLQUEUE_HOST_BUSY; + } + sd_dp = &sqcp->sd_dp; + + if (polled) + ns_from_boot = ktime_get_boottime_ns(); + + /* one of the resp_*() response functions is called here */ + cmnd->result = pfp ? pfp(cmnd, devip) : 0; + if (cmnd->result & SDEG_RES_IMMED_MASK) { + cmnd->result &= ~SDEG_RES_IMMED_MASK; + delta_jiff = ndelay = 0; + } + if (cmnd->result == 0 && scsi_result != 0) + cmnd->result = scsi_result; + if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) { + if (atomic_read(&sdeb_inject_pending)) { + mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO); + atomic_set(&sdeb_inject_pending, 0); + cmnd->result = check_condition_result; + } + } + + if (unlikely(sdebug_verbose && cmnd->result)) + sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", + __func__, cmnd->result); + + if (delta_jiff > 0 || ndelay > 0) { + ktime_t kt; + + if (delta_jiff > 0) { + u64 ns = jiffies_to_nsecs(delta_jiff); + + if (sdebug_random && ns < U32_MAX) { + ns = get_random_u32_below((u32)ns); + } else if (sdebug_random) { + ns >>= 12; /* scale to 4 usec precision */ + if (ns < U32_MAX) /* over 4 hours max */ + ns = get_random_u32_below((u32)ns); + ns <<= 12; + } + kt = ns_to_ktime(ns); + } else { /* ndelay has a 4.2 second max */ + kt = sdebug_random ? get_random_u32_below((u32)ndelay) : + (u32)ndelay; + if (ndelay < INCLUSIVE_TIMING_MAX_NS) { + u64 d = ktime_get_boottime_ns() - ns_from_boot; + + if (kt <= d) { /* elapsed duration >= kt */ + /* call scsi_done() from this thread */ + sdebug_free_queued_cmd(sqcp); + scsi_done(cmnd); + return 0; + } + /* otherwise reduce kt by elapsed time */ + kt -= d; + } + } + if (sdebug_statistics) + sd_dp->issuing_cpu = raw_smp_processor_id(); + if (polled) { + spin_lock_irqsave(&sdsc->lock, flags); + sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); + ASSIGN_QUEUED_CMD(cmnd, sqcp); + WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); + spin_unlock_irqrestore(&sdsc->lock, flags); + } else { + /* schedule the invocation of scsi_done() for a later time */ + spin_lock_irqsave(&sdsc->lock, flags); + ASSIGN_QUEUED_CMD(cmnd, sqcp); + WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT); + hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); + /* + * The completion handler will try to grab sqcp->lock, + * so there is no chance that the completion handler + * will call scsi_done() until we release the lock + * here (so ok to keep referencing sdsc). + */ + spin_unlock_irqrestore(&sdsc->lock, flags); + } + } else { /* jdelay < 0, use work queue */ + if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) && + atomic_read(&sdeb_inject_pending))) { + sd_dp->aborted = true; + atomic_set(&sdeb_inject_pending, 0); + sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n", + blk_mq_unique_tag_to_tag(get_tag(cmnd))); + } + + if (sdebug_statistics) + sd_dp->issuing_cpu = raw_smp_processor_id(); + if (polled) { + spin_lock_irqsave(&sdsc->lock, flags); + ASSIGN_QUEUED_CMD(cmnd, sqcp); + sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); + WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); + spin_unlock_irqrestore(&sdsc->lock, flags); + } else { + spin_lock_irqsave(&sdsc->lock, flags); + ASSIGN_QUEUED_CMD(cmnd, sqcp); + WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ); + schedule_work(&sd_dp->ew.work); + spin_unlock_irqrestore(&sdsc->lock, flags); + } + } + + return 0; + +respond_in_thread: /* call back to mid-layer using invocation thread */ + cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0; + cmnd->result &= ~SDEG_RES_IMMED_MASK; + if (cmnd->result == 0 && scsi_result != 0) + cmnd->result = scsi_result; + scsi_done(cmnd); + return 0; +} + +/* Note: The following macros create attribute files in the + /sys/module/scsi_debug/parameters directory. Unfortunately this + driver is unaware of a change and cannot trigger auxiliary actions + as it can when the corresponding attribute in the + /sys/bus/pseudo/drivers/scsi_debug directory is changed. + */ +module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR); +module_param_named(ato, sdebug_ato, int, S_IRUGO); +module_param_named(cdb_len, sdebug_cdb_len, int, 0644); +module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR); +module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR); +module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO); +module_param_named(dif, sdebug_dif, int, S_IRUGO); +module_param_named(dix, sdebug_dix, int, S_IRUGO); +module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR); +module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR); +module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR); +module_param_named(guard, sdebug_guard, uint, S_IRUGO); +module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR); +module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO); +module_param_string(inq_product, sdebug_inq_product_id, + sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR); +module_param_string(inq_rev, sdebug_inq_product_rev, + sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR); +module_param_string(inq_vendor, sdebug_inq_vendor_id, + sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR); +module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); +module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); +module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); +module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); +module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); +module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR); +module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); +module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR); +module_param_named(medium_error_count, sdebug_medium_error_count, int, + S_IRUGO | S_IWUSR); +module_param_named(medium_error_start, sdebug_medium_error_start, int, + S_IRUGO | S_IWUSR); +module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR); +module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR); +module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR); +module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO); +module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO); +module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR); +module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO); +module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO); +module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR); +module_param_named(per_host_store, sdebug_per_host_store, bool, + S_IRUGO | S_IWUSR); +module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO); +module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR); +module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR); +module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR); +module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO); +module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO); +module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR); +module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR); +module_param_named(submit_queues, submit_queues, int, S_IRUGO); +module_param_named(poll_queues, poll_queues, int, S_IRUGO); +module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO); +module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); +module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); +module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); +module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); +module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); +module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); +module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, + S_IRUGO | S_IWUSR); +module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR); +module_param_named(write_same_length, sdebug_write_same_length, int, + S_IRUGO | S_IWUSR); +module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO); +module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO); +module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO); +module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO); +module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO); + +MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); +MODULE_DESCRIPTION("SCSI debug adapter driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(SDEBUG_VERSION); + +MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)"); +MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); +MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)"); +MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); +MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny"); +MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)"); +MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); +MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); +MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); +MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); +MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); +MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); +MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)"); +MODULE_PARM_DESC(host_max_queue, + "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])"); +MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")"); +MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"" + SDEBUG_VERSION "\")"); +MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")"); +MODULE_PARM_DESC(lbprz, + "on read unmapped LBs return 0 when 1 (def), return 0xff when 2"); +MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); +MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); +MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method"); +MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); +MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); +MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error"); +MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error"); +MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)"); +MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); +MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)"); +MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); +MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); +MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); +MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)"); +MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)"); +MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); +MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)"); +MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); +MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))"); +MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); +MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns"); +MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); +MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])"); +MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); +MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)"); +MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)"); +MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)"); +MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)"); +MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); +MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); +MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); +MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); +MODULE_PARM_DESC(uuid_ctl, + "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); +MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); +MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); +MODULE_PARM_DESC(wp, "Write Protect (def=0)"); +MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); +MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix"); +MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)"); +MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)"); +MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)"); +MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)"); + +#define SDEBUG_INFO_LEN 256 +static char sdebug_info[SDEBUG_INFO_LEN]; + +static const char *scsi_debug_info(struct Scsi_Host *shp) +{ + int k; + + k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n", + my_name, SDEBUG_VERSION, sdebug_version_date); + if (k >= (SDEBUG_INFO_LEN - 1)) + return sdebug_info; + scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, + " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d", + sdebug_dev_size_mb, sdebug_opts, submit_queues, + "statistics", (int)sdebug_statistics); + return sdebug_info; +} + +/* 'echo > /proc/scsi/scsi_debug/' writes to opts */ +static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, + int length) +{ + char arr[16]; + int opts; + int minLen = length > 15 ? 15 : length; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + memcpy(arr, buffer, minLen); + arr[minLen] = '\0'; + if (1 != sscanf(arr, "%d", &opts)) + return -EINVAL; + sdebug_opts = opts; + sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); + if (sdebug_every_nth != 0) + tweak_cmnd_count(); + return length; +} + +struct sdebug_submit_queue_data { + int *first; + int *last; + int queue_num; +}; + +static bool sdebug_submit_queue_iter(struct request *rq, void *opaque) +{ + struct sdebug_submit_queue_data *data = opaque; + u32 unique_tag = blk_mq_unique_tag(rq); + u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag); + u16 tag = blk_mq_unique_tag_to_tag(unique_tag); + int queue_num = data->queue_num; + + if (hwq != queue_num) + return true; + + /* Rely on iter'ing in ascending tag order */ + if (*data->first == -1) + *data->first = *data->last = tag; + else + *data->last = tag; + + return true; +} + +/* Output seen with 'cat /proc/scsi/scsi_debug/'. It will be the + * same for each scsi_debug host (if more than one). Some of the counters + * output are not atomics so might be inaccurate in a busy system. */ +static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + struct sdebug_host_info *sdhp; + int j; + + seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n", + SDEBUG_VERSION, sdebug_version_date); + seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n", + sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb, + sdebug_opts, sdebug_every_nth); + seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n", + sdebug_jdelay, sdebug_ndelay, sdebug_max_luns, + sdebug_sector_size, "bytes"); + seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n", + sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, + num_aborts); + seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n", + num_dev_resets, num_target_resets, num_bus_resets, + num_host_resets); + seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n", + dix_reads, dix_writes, dif_errors); + seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000, + sdebug_statistics); + seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n", + atomic_read(&sdebug_cmnd_count), + atomic_read(&sdebug_completions), + "miss_cpus", atomic_read(&sdebug_miss_cpus), + atomic_read(&sdebug_a_tsf), + atomic_read(&sdeb_mq_poll_count)); + + seq_printf(m, "submit_queues=%d\n", submit_queues); + for (j = 0; j < submit_queues; ++j) { + int f = -1, l = -1; + struct sdebug_submit_queue_data data = { + .queue_num = j, + .first = &f, + .last = &l, + }; + seq_printf(m, " queue %d:\n", j); + blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter, + &data); + if (f >= 0) { + seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n", + "first,last bits", f, l); + } + } + + seq_printf(m, "this host_no=%d\n", host->host_no); + if (!xa_empty(per_store_ap)) { + bool niu; + int idx; + unsigned long l_idx; + struct sdeb_store_info *sip; + + seq_puts(m, "\nhost list:\n"); + j = 0; + list_for_each_entry(sdhp, &sdebug_host_list, host_list) { + idx = sdhp->si_idx; + seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j, + sdhp->shost->host_no, idx); + ++j; + } + seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n", + sdeb_most_recent_idx); + j = 0; + xa_for_each(per_store_ap, l_idx, sip) { + niu = xa_get_mark(per_store_ap, l_idx, + SDEB_XA_NOT_IN_USE); + idx = (int)l_idx; + seq_printf(m, " %d: idx=%d%s\n", j, idx, + (niu ? " not_in_use" : "")); + ++j; + } + } + return 0; +} + +static ssize_t delay_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay); +} +/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit + * of delay is jiffies. + */ +static ssize_t delay_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int jdelay, res; + + if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) { + res = count; + if (sdebug_jdelay != jdelay) { + struct sdebug_host_info *sdhp; + + mutex_lock(&sdebug_host_list_mutex); + block_unblock_all_queues(true); + + list_for_each_entry(sdhp, &sdebug_host_list, host_list) { + struct Scsi_Host *shost = sdhp->shost; + + if (scsi_host_busy(shost)) { + res = -EBUSY; /* queued commands */ + break; + } + } + if (res > 0) { + sdebug_jdelay = jdelay; + sdebug_ndelay = 0; + } + block_unblock_all_queues(false); + mutex_unlock(&sdebug_host_list_mutex); + } + return res; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(delay); + +static ssize_t ndelay_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay); +} +/* Returns -EBUSY if ndelay is being changed and commands are queued */ +/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */ +static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int ndelay, res; + + if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) && + (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) { + res = count; + if (sdebug_ndelay != ndelay) { + struct sdebug_host_info *sdhp; + + mutex_lock(&sdebug_host_list_mutex); + block_unblock_all_queues(true); + + list_for_each_entry(sdhp, &sdebug_host_list, host_list) { + struct Scsi_Host *shost = sdhp->shost; + + if (scsi_host_busy(shost)) { + res = -EBUSY; /* queued commands */ + break; + } + } + + if (res > 0) { + sdebug_ndelay = ndelay; + sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN + : DEF_JDELAY; + } + block_unblock_all_queues(false); + mutex_unlock(&sdebug_host_list_mutex); + } + return res; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(ndelay); + +static ssize_t opts_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts); +} + +static ssize_t opts_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int opts; + char work[20]; + + if (sscanf(buf, "%10s", work) == 1) { + if (strncasecmp(work, "0x", 2) == 0) { + if (kstrtoint(work + 2, 16, &opts) == 0) + goto opts_done; + } else { + if (kstrtoint(work, 10, &opts) == 0) + goto opts_done; + } + } + return -EINVAL; +opts_done: + sdebug_opts = opts; + sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); + tweak_cmnd_count(); + return count; +} +static DRIVER_ATTR_RW(opts); + +static ssize_t ptype_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype); +} +static ssize_t ptype_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + /* Cannot change from or to TYPE_ZBC with sysfs */ + if (sdebug_ptype == TYPE_ZBC) + return -EINVAL; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + if (n == TYPE_ZBC) + return -EINVAL; + sdebug_ptype = n; + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(ptype); + +static ssize_t dsense_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense); +} +static ssize_t dsense_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + sdebug_dsense = n; + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(dsense); + +static ssize_t fake_rw_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw); +} +static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n, idx; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + bool want_store = (n == 0); + struct sdebug_host_info *sdhp; + + n = (n > 0); + sdebug_fake_rw = (sdebug_fake_rw > 0); + if (sdebug_fake_rw == n) + return count; /* not transitioning so do nothing */ + + if (want_store) { /* 1 --> 0 transition, set up store */ + if (sdeb_first_idx < 0) { + idx = sdebug_add_store(); + if (idx < 0) + return idx; + } else { + idx = sdeb_first_idx; + xa_clear_mark(per_store_ap, idx, + SDEB_XA_NOT_IN_USE); + } + /* make all hosts use same store */ + list_for_each_entry(sdhp, &sdebug_host_list, + host_list) { + if (sdhp->si_idx != idx) { + xa_set_mark(per_store_ap, sdhp->si_idx, + SDEB_XA_NOT_IN_USE); + sdhp->si_idx = idx; + } + } + sdeb_most_recent_idx = idx; + } else { /* 0 --> 1 transition is trigger for shrink */ + sdebug_erase_all_stores(true /* apart from first */); + } + sdebug_fake_rw = n; + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(fake_rw); + +static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0); +} +static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + sdebug_no_lun_0 = n; + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(no_lun_0); + +static ssize_t num_tgts_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts); +} +static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + sdebug_num_tgts = n; + sdebug_max_tgts_luns(); + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(num_tgts); + +static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb); +} +static DRIVER_ATTR_RO(dev_size_mb); + +static ssize_t per_host_store_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store); +} + +static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + bool v; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdebug_per_host_store = v; + return count; +} +static DRIVER_ATTR_RW(per_host_store); + +static ssize_t num_parts_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts); +} +static DRIVER_ATTR_RO(num_parts); + +static ssize_t every_nth_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth); +} +static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int nth; + char work[20]; + + if (sscanf(buf, "%10s", work) == 1) { + if (strncasecmp(work, "0x", 2) == 0) { + if (kstrtoint(work + 2, 16, &nth) == 0) + goto every_nth_done; + } else { + if (kstrtoint(work, 10, &nth) == 0) + goto every_nth_done; + } + } + return -EINVAL; + +every_nth_done: + sdebug_every_nth = nth; + if (nth && !sdebug_statistics) { + pr_info("every_nth needs statistics=1, set it\n"); + sdebug_statistics = true; + } + tweak_cmnd_count(); + return count; +} +static DRIVER_ATTR_RW(every_nth); + +static ssize_t lun_format_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am); +} +static ssize_t lun_format_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + bool changed; + + if (kstrtoint(buf, 0, &n)) + return -EINVAL; + if (n >= 0) { + if (n > (int)SAM_LUN_AM_FLAT) { + pr_warn("only LUN address methods 0 and 1 are supported\n"); + return -EINVAL; + } + changed = ((int)sdebug_lun_am != n); + sdebug_lun_am = n; + if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */ + struct sdebug_host_info *sdhp; + struct sdebug_dev_info *dp; + + mutex_lock(&sdebug_host_list_mutex); + list_for_each_entry(sdhp, &sdebug_host_list, host_list) { + list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { + set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); + } + } + mutex_unlock(&sdebug_host_list_mutex); + } + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(lun_format); + +static ssize_t max_luns_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns); +} +static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + bool changed; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + if (n > 256) { + pr_warn("max_luns can be no more than 256\n"); + return -EINVAL; + } + changed = (sdebug_max_luns != n); + sdebug_max_luns = n; + sdebug_max_tgts_luns(); + if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ + struct sdebug_host_info *sdhp; + struct sdebug_dev_info *dp; + + mutex_lock(&sdebug_host_list_mutex); + list_for_each_entry(sdhp, &sdebug_host_list, + host_list) { + list_for_each_entry(dp, &sdhp->dev_info_list, + dev_list) { + set_bit(SDEBUG_UA_LUNS_CHANGED, + dp->uas_bm); + } + } + mutex_unlock(&sdebug_host_list_mutex); + } + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(max_luns); + +static ssize_t max_queue_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue); +} +/* N.B. max_queue can be changed while there are queued commands. In flight + * commands beyond the new max_queue will be completed. */ +static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && + (n <= SDEBUG_CANQUEUE) && + (sdebug_host_max_queue == 0)) { + mutex_lock(&sdebug_host_list_mutex); + + /* We may only change sdebug_max_queue when we have no shosts */ + if (list_empty(&sdebug_host_list)) + sdebug_max_queue = n; + else + count = -EBUSY; + mutex_unlock(&sdebug_host_list_mutex); + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(max_queue); + +static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue); +} + +static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock); +} + +static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count) +{ + bool v; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdebug_no_rwlock = v; + return count; +} +static DRIVER_ATTR_RW(no_rwlock); + +/* + * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap + * in range [0, sdebug_host_max_queue), we can't change it. + */ +static DRIVER_ATTR_RO(host_max_queue); + +static ssize_t no_uld_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld); +} +static DRIVER_ATTR_RO(no_uld); + +static ssize_t scsi_level_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level); +} +static DRIVER_ATTR_RO(scsi_level); + +static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb); +} +static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + bool changed; + + /* Ignore capacity change for ZBC drives for now */ + if (sdeb_zbc_in_use) + return -ENOTSUPP; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + changed = (sdebug_virtual_gb != n); + sdebug_virtual_gb = n; + sdebug_capacity = get_sdebug_capacity(); + if (changed) { + struct sdebug_host_info *sdhp; + struct sdebug_dev_info *dp; + + mutex_lock(&sdebug_host_list_mutex); + list_for_each_entry(sdhp, &sdebug_host_list, + host_list) { + list_for_each_entry(dp, &sdhp->dev_info_list, + dev_list) { + set_bit(SDEBUG_UA_CAPACITY_CHANGED, + dp->uas_bm); + } + } + mutex_unlock(&sdebug_host_list_mutex); + } + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(virtual_gb); + +static ssize_t add_host_show(struct device_driver *ddp, char *buf) +{ + /* absolute number of hosts currently active is what is shown */ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts); +} + +static ssize_t add_host_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + bool found; + unsigned long idx; + struct sdeb_store_info *sip; + bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store; + int delta_hosts; + + if (sscanf(buf, "%d", &delta_hosts) != 1) + return -EINVAL; + if (delta_hosts > 0) { + do { + found = false; + if (want_phs) { + xa_for_each_marked(per_store_ap, idx, sip, + SDEB_XA_NOT_IN_USE) { + sdeb_most_recent_idx = (int)idx; + found = true; + break; + } + if (found) /* re-use case */ + sdebug_add_host_helper((int)idx); + else + sdebug_do_add_host(true); + } else { + sdebug_do_add_host(false); + } + } while (--delta_hosts); + } else if (delta_hosts < 0) { + do { + sdebug_do_remove_host(false); + } while (++delta_hosts); + } + return count; +} +static DRIVER_ATTR_RW(add_host); + +static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno); +} +static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + sdebug_vpd_use_hostno = n; + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(vpd_use_hostno); + +static ssize_t statistics_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics); +} +static ssize_t statistics_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) { + if (n > 0) + sdebug_statistics = true; + else { + clear_queue_stats(); + sdebug_statistics = false; + } + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(statistics); + +static ssize_t sector_size_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size); +} +static DRIVER_ATTR_RO(sector_size); + +static ssize_t submit_queues_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues); +} +static DRIVER_ATTR_RO(submit_queues); + +static ssize_t dix_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix); +} +static DRIVER_ATTR_RO(dix); + +static ssize_t dif_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif); +} +static DRIVER_ATTR_RO(dif); + +static ssize_t guard_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard); +} +static DRIVER_ATTR_RO(guard); + +static ssize_t ato_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato); +} +static DRIVER_ATTR_RO(ato); + +static ssize_t map_show(struct device_driver *ddp, char *buf) +{ + ssize_t count = 0; + + if (!scsi_debug_lbp()) + return scnprintf(buf, PAGE_SIZE, "0-%u\n", + sdebug_store_sectors); + + if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) { + struct sdeb_store_info *sip = xa_load(per_store_ap, 0); + + if (sip) + count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", + (int)map_size, sip->map_storep); + } + buf[count++] = '\n'; + buf[count] = '\0'; + + return count; +} +static DRIVER_ATTR_RO(map); + +static ssize_t random_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random); +} + +static ssize_t random_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + bool v; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdebug_random = v; + return count; +} +static DRIVER_ATTR_RW(random); + +static ssize_t removable_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0); +} +static ssize_t removable_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + sdebug_removable = (n > 0); + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(removable); + +static ssize_t host_lock_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock); +} +/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */ +static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + sdebug_host_lock = (n > 0); + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(host_lock); + +static ssize_t strict_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict); +} +static ssize_t strict_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + sdebug_strict = (n > 0); + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(strict); + +static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl); +} +static DRIVER_ATTR_RO(uuid_ctl); + +static ssize_t cdb_len_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len); +} +static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int ret, n; + + ret = kstrtoint(buf, 0, &n); + if (ret) + return ret; + sdebug_cdb_len = n; + all_config_cdb_len(); + return count; +} +static DRIVER_ATTR_RW(cdb_len); + +static const char * const zbc_model_strs_a[] = { + [BLK_ZONED_NONE] = "none", + [BLK_ZONED_HA] = "host-aware", + [BLK_ZONED_HM] = "host-managed", +}; + +static const char * const zbc_model_strs_b[] = { + [BLK_ZONED_NONE] = "no", + [BLK_ZONED_HA] = "aware", + [BLK_ZONED_HM] = "managed", +}; + +static const char * const zbc_model_strs_c[] = { + [BLK_ZONED_NONE] = "0", + [BLK_ZONED_HA] = "1", + [BLK_ZONED_HM] = "2", +}; + +static int sdeb_zbc_model_str(const char *cp) +{ + int res = sysfs_match_string(zbc_model_strs_a, cp); + + if (res < 0) { + res = sysfs_match_string(zbc_model_strs_b, cp); + if (res < 0) { + res = sysfs_match_string(zbc_model_strs_c, cp); + if (res < 0) + return -EINVAL; + } + } + return res; +} + +static ssize_t zbc_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%s\n", + zbc_model_strs_a[sdeb_zbc_model]); +} +static DRIVER_ATTR_RO(zbc); + +static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready); +} +static DRIVER_ATTR_RO(tur_ms_to_ready); + +/* Note: The following array creates attribute files in the + /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these + files (over those found in the /sys/module/scsi_debug/parameters + directory) is that auxiliary actions can be triggered when an attribute + is changed. For example see: add_host_store() above. + */ + +static struct attribute *sdebug_drv_attrs[] = { + &driver_attr_delay.attr, + &driver_attr_opts.attr, + &driver_attr_ptype.attr, + &driver_attr_dsense.attr, + &driver_attr_fake_rw.attr, + &driver_attr_host_max_queue.attr, + &driver_attr_no_lun_0.attr, + &driver_attr_num_tgts.attr, + &driver_attr_dev_size_mb.attr, + &driver_attr_num_parts.attr, + &driver_attr_every_nth.attr, + &driver_attr_lun_format.attr, + &driver_attr_max_luns.attr, + &driver_attr_max_queue.attr, + &driver_attr_no_rwlock.attr, + &driver_attr_no_uld.attr, + &driver_attr_scsi_level.attr, + &driver_attr_virtual_gb.attr, + &driver_attr_add_host.attr, + &driver_attr_per_host_store.attr, + &driver_attr_vpd_use_hostno.attr, + &driver_attr_sector_size.attr, + &driver_attr_statistics.attr, + &driver_attr_submit_queues.attr, + &driver_attr_dix.attr, + &driver_attr_dif.attr, + &driver_attr_guard.attr, + &driver_attr_ato.attr, + &driver_attr_map.attr, + &driver_attr_random.attr, + &driver_attr_removable.attr, + &driver_attr_host_lock.attr, + &driver_attr_ndelay.attr, + &driver_attr_strict.attr, + &driver_attr_uuid_ctl.attr, + &driver_attr_cdb_len.attr, + &driver_attr_tur_ms_to_ready.attr, + &driver_attr_zbc.attr, + NULL, +}; +ATTRIBUTE_GROUPS(sdebug_drv); + +static struct device *pseudo_primary; + +static int __init scsi_debug_init(void) +{ + bool want_store = (sdebug_fake_rw == 0); + unsigned long sz; + int k, ret, hosts_to_add; + int idx = -1; + + if (sdebug_ndelay >= 1000 * 1000 * 1000) { + pr_warn("ndelay must be less than 1 second, ignored\n"); + sdebug_ndelay = 0; + } else if (sdebug_ndelay > 0) + sdebug_jdelay = JDELAY_OVERRIDDEN; + + switch (sdebug_sector_size) { + case 512: + case 1024: + case 2048: + case 4096: + break; + default: + pr_err("invalid sector_size %d\n", sdebug_sector_size); + return -EINVAL; + } + + switch (sdebug_dif) { + case T10_PI_TYPE0_PROTECTION: + break; + case T10_PI_TYPE1_PROTECTION: + case T10_PI_TYPE2_PROTECTION: + case T10_PI_TYPE3_PROTECTION: + have_dif_prot = true; + break; + + default: + pr_err("dif must be 0, 1, 2 or 3\n"); + return -EINVAL; + } + + if (sdebug_num_tgts < 0) { + pr_err("num_tgts must be >= 0\n"); + return -EINVAL; + } + + if (sdebug_guard > 1) { + pr_err("guard must be 0 or 1\n"); + return -EINVAL; + } + + if (sdebug_ato > 1) { + pr_err("ato must be 0 or 1\n"); + return -EINVAL; + } + + if (sdebug_physblk_exp > 15) { + pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp); + return -EINVAL; + } + + sdebug_lun_am = sdebug_lun_am_i; + if (sdebug_lun_am > SAM_LUN_AM_FLAT) { + pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am); + sdebug_lun_am = SAM_LUN_AM_PERIPHERAL; + } + + if (sdebug_max_luns > 256) { + if (sdebug_max_luns > 16384) { + pr_warn("max_luns can be no more than 16384, use default\n"); + sdebug_max_luns = DEF_MAX_LUNS; + } + sdebug_lun_am = SAM_LUN_AM_FLAT; + } + + if (sdebug_lowest_aligned > 0x3fff) { + pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned); + return -EINVAL; + } + + if (submit_queues < 1) { + pr_err("submit_queues must be 1 or more\n"); + return -EINVAL; + } + + if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) { + pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE); + return -EINVAL; + } + + if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) || + (sdebug_host_max_queue < 0)) { + pr_err("host_max_queue must be in range [0 %d]\n", + SDEBUG_CANQUEUE); + return -EINVAL; + } + + if (sdebug_host_max_queue && + (sdebug_max_queue != sdebug_host_max_queue)) { + sdebug_max_queue = sdebug_host_max_queue; + pr_warn("fixing max submit queue depth to host max queue depth, %d\n", + sdebug_max_queue); + } + + /* + * check for host managed zoned block device specified with + * ptype=0x14 or zbc=XXX. + */ + if (sdebug_ptype == TYPE_ZBC) { + sdeb_zbc_model = BLK_ZONED_HM; + } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) { + k = sdeb_zbc_model_str(sdeb_zbc_model_s); + if (k < 0) + return k; + sdeb_zbc_model = k; + switch (sdeb_zbc_model) { + case BLK_ZONED_NONE: + case BLK_ZONED_HA: + sdebug_ptype = TYPE_DISK; + break; + case BLK_ZONED_HM: + sdebug_ptype = TYPE_ZBC; + break; + default: + pr_err("Invalid ZBC model\n"); + return -EINVAL; + } + } + if (sdeb_zbc_model != BLK_ZONED_NONE) { + sdeb_zbc_in_use = true; + if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT) + sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB; + } + + if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT) + sdebug_dev_size_mb = DEF_DEV_SIZE_MB; + if (sdebug_dev_size_mb < 1) + sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ + sz = (unsigned long)sdebug_dev_size_mb * 1048576; + sdebug_store_sectors = sz / sdebug_sector_size; + sdebug_capacity = get_sdebug_capacity(); + + /* play around with geometry, don't waste too much on track 0 */ + sdebug_heads = 8; + sdebug_sectors_per = 32; + if (sdebug_dev_size_mb >= 256) + sdebug_heads = 64; + else if (sdebug_dev_size_mb >= 16) + sdebug_heads = 32; + sdebug_cylinders_per = (unsigned long)sdebug_capacity / + (sdebug_sectors_per * sdebug_heads); + if (sdebug_cylinders_per >= 1024) { + /* other LLDs do this; implies >= 1GB ram disk ... */ + sdebug_heads = 255; + sdebug_sectors_per = 63; + sdebug_cylinders_per = (unsigned long)sdebug_capacity / + (sdebug_sectors_per * sdebug_heads); + } + if (scsi_debug_lbp()) { + sdebug_unmap_max_blocks = + clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU); + + sdebug_unmap_max_desc = + clamp(sdebug_unmap_max_desc, 0U, 256U); + + sdebug_unmap_granularity = + clamp(sdebug_unmap_granularity, 1U, 0xffffffffU); + + if (sdebug_unmap_alignment && + sdebug_unmap_granularity <= + sdebug_unmap_alignment) { + pr_err("ERR: unmap_granularity <= unmap_alignment\n"); + return -EINVAL; + } + } + xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); + if (want_store) { + idx = sdebug_add_store(); + if (idx < 0) + return idx; + } + + pseudo_primary = root_device_register("pseudo_0"); + if (IS_ERR(pseudo_primary)) { + pr_warn("root_device_register() error\n"); + ret = PTR_ERR(pseudo_primary); + goto free_vm; + } + ret = bus_register(&pseudo_lld_bus); + if (ret < 0) { + pr_warn("bus_register error: %d\n", ret); + goto dev_unreg; + } + ret = driver_register(&sdebug_driverfs_driver); + if (ret < 0) { + pr_warn("driver_register error: %d\n", ret); + goto bus_unreg; + } + + hosts_to_add = sdebug_add_host; + sdebug_add_host = 0; + + queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN); + if (!queued_cmd_cache) { + ret = -ENOMEM; + goto driver_unreg; + } + + for (k = 0; k < hosts_to_add; k++) { + if (want_store && k == 0) { + ret = sdebug_add_host_helper(idx); + if (ret < 0) { + pr_err("add_host_helper k=%d, error=%d\n", + k, -ret); + break; + } + } else { + ret = sdebug_do_add_host(want_store && + sdebug_per_host_store); + if (ret < 0) { + pr_err("add_host k=%d error=%d\n", k, -ret); + break; + } + } + } + if (sdebug_verbose) + pr_info("built %d host(s)\n", sdebug_num_hosts); + + return 0; + +driver_unreg: + driver_unregister(&sdebug_driverfs_driver); +bus_unreg: + bus_unregister(&pseudo_lld_bus); +dev_unreg: + root_device_unregister(pseudo_primary); +free_vm: + sdebug_erase_store(idx, NULL); + return ret; +} + +static void __exit scsi_debug_exit(void) +{ + int k = sdebug_num_hosts; + + for (; k; k--) + sdebug_do_remove_host(true); + kmem_cache_destroy(queued_cmd_cache); + driver_unregister(&sdebug_driverfs_driver); + bus_unregister(&pseudo_lld_bus); + root_device_unregister(pseudo_primary); + + sdebug_erase_all_stores(false); + xa_destroy(per_store_ap); +} + +device_initcall(scsi_debug_init); +module_exit(scsi_debug_exit); + +static void sdebug_release_adapter(struct device *dev) +{ + struct sdebug_host_info *sdbg_host; + + sdbg_host = dev_to_sdebug_host(dev); + kfree(sdbg_host); +} + +/* idx must be valid, if sip is NULL then it will be obtained using idx */ +static void sdebug_erase_store(int idx, struct sdeb_store_info *sip) +{ + if (idx < 0) + return; + if (!sip) { + if (xa_empty(per_store_ap)) + return; + sip = xa_load(per_store_ap, idx); + if (!sip) + return; + } + vfree(sip->map_storep); + vfree(sip->dif_storep); + vfree(sip->storep); + xa_erase(per_store_ap, idx); + kfree(sip); +} + +/* Assume apart_from_first==false only in shutdown case. */ +static void sdebug_erase_all_stores(bool apart_from_first) +{ + unsigned long idx; + struct sdeb_store_info *sip = NULL; + + xa_for_each(per_store_ap, idx, sip) { + if (apart_from_first) + apart_from_first = false; + else + sdebug_erase_store(idx, sip); + } + if (apart_from_first) + sdeb_most_recent_idx = sdeb_first_idx; +} + +/* + * Returns store xarray new element index (idx) if >=0 else negated errno. + * Limit the number of stores to 65536. + */ +static int sdebug_add_store(void) +{ + int res; + u32 n_idx; + unsigned long iflags; + unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576; + struct sdeb_store_info *sip = NULL; + struct xa_limit xal = { .max = 1 << 16, .min = 0 }; + + sip = kzalloc(sizeof(*sip), GFP_KERNEL); + if (!sip) + return -ENOMEM; + + xa_lock_irqsave(per_store_ap, iflags); + res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC); + if (unlikely(res < 0)) { + xa_unlock_irqrestore(per_store_ap, iflags); + kfree(sip); + pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res); + return res; + } + sdeb_most_recent_idx = n_idx; + if (sdeb_first_idx < 0) + sdeb_first_idx = n_idx; + xa_unlock_irqrestore(per_store_ap, iflags); + + res = -ENOMEM; + sip->storep = vzalloc(sz); + if (!sip->storep) { + pr_err("user data oom\n"); + goto err; + } + if (sdebug_num_parts > 0) + sdebug_build_parts(sip->storep, sz); + + /* DIF/DIX: what T10 calls Protection Information (PI) */ + if (sdebug_dix) { + int dif_size; + + dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple); + sip->dif_storep = vmalloc(dif_size); + + pr_info("dif_storep %u bytes @ %pK\n", dif_size, + sip->dif_storep); + + if (!sip->dif_storep) { + pr_err("DIX oom\n"); + goto err; + } + memset(sip->dif_storep, 0xff, dif_size); + } + /* Logical Block Provisioning */ + if (scsi_debug_lbp()) { + map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; + sip->map_storep = vmalloc(array_size(sizeof(long), + BITS_TO_LONGS(map_size))); + + pr_info("%lu provisioning blocks\n", map_size); + + if (!sip->map_storep) { + pr_err("LBP map oom\n"); + goto err; + } + + bitmap_zero(sip->map_storep, map_size); + + /* Map first 1KB for partition table */ + if (sdebug_num_parts) + map_region(sip, 0, 2); + } + + rwlock_init(&sip->macc_lck); + return (int)n_idx; +err: + sdebug_erase_store((int)n_idx, sip); + pr_warn("%s: failed, errno=%d\n", __func__, -res); + return res; +} + +static int sdebug_add_host_helper(int per_host_idx) +{ + int k, devs_per_host, idx; + int error = -ENOMEM; + struct sdebug_host_info *sdbg_host; + struct sdebug_dev_info *sdbg_devinfo, *tmp; + + sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL); + if (!sdbg_host) + return -ENOMEM; + idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx; + if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE)) + xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE); + sdbg_host->si_idx = idx; + + INIT_LIST_HEAD(&sdbg_host->dev_info_list); + + devs_per_host = sdebug_num_tgts * sdebug_max_luns; + for (k = 0; k < devs_per_host; k++) { + sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); + if (!sdbg_devinfo) + goto clean; + } + + mutex_lock(&sdebug_host_list_mutex); + list_add_tail(&sdbg_host->host_list, &sdebug_host_list); + mutex_unlock(&sdebug_host_list_mutex); + + sdbg_host->dev.bus = &pseudo_lld_bus; + sdbg_host->dev.parent = pseudo_primary; + sdbg_host->dev.release = &sdebug_release_adapter; + dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); + + error = device_register(&sdbg_host->dev); + if (error) { + mutex_lock(&sdebug_host_list_mutex); + list_del(&sdbg_host->host_list); + mutex_unlock(&sdebug_host_list_mutex); + goto clean; + } + + ++sdebug_num_hosts; + return 0; + +clean: + list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, + dev_list) { + list_del(&sdbg_devinfo->dev_list); + kfree(sdbg_devinfo->zstate); + kfree(sdbg_devinfo); + } + if (sdbg_host->dev.release) + put_device(&sdbg_host->dev); + else + kfree(sdbg_host); + pr_warn("%s: failed, errno=%d\n", __func__, -error); + return error; +} + +static int sdebug_do_add_host(bool mk_new_store) +{ + int ph_idx = sdeb_most_recent_idx; + + if (mk_new_store) { + ph_idx = sdebug_add_store(); + if (ph_idx < 0) + return ph_idx; + } + return sdebug_add_host_helper(ph_idx); +} + +static void sdebug_do_remove_host(bool the_end) +{ + int idx = -1; + struct sdebug_host_info *sdbg_host = NULL; + struct sdebug_host_info *sdbg_host2; + + mutex_lock(&sdebug_host_list_mutex); + if (!list_empty(&sdebug_host_list)) { + sdbg_host = list_entry(sdebug_host_list.prev, + struct sdebug_host_info, host_list); + idx = sdbg_host->si_idx; + } + if (!the_end && idx >= 0) { + bool unique = true; + + list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) { + if (sdbg_host2 == sdbg_host) + continue; + if (idx == sdbg_host2->si_idx) { + unique = false; + break; + } + } + if (unique) { + xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE); + if (idx == sdeb_most_recent_idx) + --sdeb_most_recent_idx; + } + } + if (sdbg_host) + list_del(&sdbg_host->host_list); + mutex_unlock(&sdebug_host_list_mutex); + + if (!sdbg_host) + return; + + device_unregister(&sdbg_host->dev); + --sdebug_num_hosts; +} + +static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) +{ + struct sdebug_dev_info *devip = sdev->hostdata; + + if (!devip) + return -ENODEV; + + mutex_lock(&sdebug_host_list_mutex); + block_unblock_all_queues(true); + + if (qdepth > SDEBUG_CANQUEUE) { + qdepth = SDEBUG_CANQUEUE; + pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__, + qdepth, SDEBUG_CANQUEUE); + } + if (qdepth < 1) + qdepth = 1; + if (qdepth != sdev->queue_depth) + scsi_change_queue_depth(sdev, qdepth); + + block_unblock_all_queues(false); + mutex_unlock(&sdebug_host_list_mutex); + + if (SDEBUG_OPT_Q_NOISE & sdebug_opts) + sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth); + + return sdev->queue_depth; +} + +static bool fake_timeout(struct scsi_cmnd *scp) +{ + if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) { + if (sdebug_every_nth < -1) + sdebug_every_nth = -1; + if (SDEBUG_OPT_TIMEOUT & sdebug_opts) + return true; /* ignore command causing timeout */ + else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts && + scsi_medium_access_command(scp)) + return true; /* time out reads and writes */ + } + return false; +} + +/* Response to TUR or media access command when device stopped */ +static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + int stopped_state; + u64 diff_ns = 0; + ktime_t now_ts = ktime_get_boottime(); + struct scsi_device *sdp = scp->device; + + stopped_state = atomic_read(&devip->stopped); + if (stopped_state == 2) { + if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { + diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); + if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) { + /* tur_ms_to_ready timer extinguished */ + atomic_set(&devip->stopped, 0); + return 0; + } + } + mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1); + if (sdebug_verbose) + sdev_printk(KERN_INFO, sdp, + "%s: Not ready: in process of becoming ready\n", my_name); + if (scp->cmnd[0] == TEST_UNIT_READY) { + u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000; + + if (diff_ns <= tur_nanosecs_to_ready) + diff_ns = tur_nanosecs_to_ready - diff_ns; + else + diff_ns = tur_nanosecs_to_ready; + /* As per 20-061r2 approved for spc6 by T10 on 20200716 */ + do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */ + scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, + diff_ns); + return check_condition_result; + } + } + mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); + if (sdebug_verbose) + sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n", + my_name); + return check_condition_result; +} + +static void sdebug_map_queues(struct Scsi_Host *shost) +{ + int i, qoff; + + if (shost->nr_hw_queues == 1) + return; + + for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { + struct blk_mq_queue_map *map = &shost->tag_set.map[i]; + + map->nr_queues = 0; + + if (i == HCTX_TYPE_DEFAULT) + map->nr_queues = submit_queues - poll_queues; + else if (i == HCTX_TYPE_POLL) + map->nr_queues = poll_queues; + + if (!map->nr_queues) { + BUG_ON(i == HCTX_TYPE_DEFAULT); + continue; + } + + map->queue_offset = qoff; + blk_mq_map_queues(map); + + qoff += map->nr_queues; + } +} + +struct sdebug_blk_mq_poll_data { + unsigned int queue_num; + int *num_entries; +}; + +/* + * We don't handle aborted commands here, but it does not seem possible to have + * aborted polled commands from schedule_resp() + */ +static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque) +{ + struct sdebug_blk_mq_poll_data *data = opaque; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd); + struct sdebug_defer *sd_dp; + u32 unique_tag = blk_mq_unique_tag(rq); + u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag); + struct sdebug_queued_cmd *sqcp; + unsigned long flags; + int queue_num = data->queue_num; + ktime_t time; + + /* We're only interested in one queue for this iteration */ + if (hwq != queue_num) + return true; + + /* Subsequent checks would fail if this failed, but check anyway */ + if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state)) + return true; + + time = ktime_get_boottime(); + + spin_lock_irqsave(&sdsc->lock, flags); + sqcp = TO_QUEUED_CMD(cmd); + if (!sqcp) { + spin_unlock_irqrestore(&sdsc->lock, flags); + return true; + } + + sd_dp = &sqcp->sd_dp; + if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) { + spin_unlock_irqrestore(&sdsc->lock, flags); + return true; + } + + if (time < sd_dp->cmpl_ts) { + spin_unlock_irqrestore(&sdsc->lock, flags); + return true; + } + + ASSIGN_QUEUED_CMD(cmd, NULL); + spin_unlock_irqrestore(&sdsc->lock, flags); + + if (sdebug_statistics) { + atomic_inc(&sdebug_completions); + if (raw_smp_processor_id() != sd_dp->issuing_cpu) + atomic_inc(&sdebug_miss_cpus); + } + + sdebug_free_queued_cmd(sqcp); + + scsi_done(cmd); /* callback to mid level */ + (*data->num_entries)++; + return true; +} + +static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + int num_entries = 0; + struct sdebug_blk_mq_poll_data data = { + .queue_num = queue_num, + .num_entries = &num_entries, + }; + + blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter, + &data); + + if (num_entries > 0) + atomic_add(num_entries, &sdeb_mq_poll_count); + return num_entries; +} + +static int scsi_debug_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scp) +{ + u8 sdeb_i; + struct scsi_device *sdp = scp->device; + const struct opcode_info_t *oip; + const struct opcode_info_t *r_oip; + struct sdebug_dev_info *devip; + u8 *cmd = scp->cmnd; + int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); + int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL; + int k, na; + int errsts = 0; + u64 lun_index = sdp->lun & 0x3FFF; + u32 flags; + u16 sa; + u8 opcode = cmd[0]; + bool has_wlun_rl; + bool inject_now; + + scsi_set_resid(scp, 0); + if (sdebug_statistics) { + atomic_inc(&sdebug_cmnd_count); + inject_now = inject_on_this_cmd(); + } else { + inject_now = false; + } + if (unlikely(sdebug_verbose && + !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { + char b[120]; + int n, len, sb; + + len = scp->cmd_len; + sb = (int)sizeof(b); + if (len > 32) + strcpy(b, "too long, over 32 bytes"); + else { + for (k = 0, n = 0; k < len && n < sb; ++k) + n += scnprintf(b + n, sb - n, "%02x ", + (u32)cmd[k]); + } + sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name, + blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b); + } + if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY))) + return SCSI_MLQUEUE_HOST_BUSY; + has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); + if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl)) + goto err_out; + + sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */ + oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */ + devip = (struct sdebug_dev_info *)sdp->hostdata; + if (unlikely(!devip)) { + devip = find_build_dev_info(sdp); + if (NULL == devip) + goto err_out; + } + if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending))) + atomic_set(&sdeb_inject_pending, 1); + + na = oip->num_attached; + r_pfp = oip->pfp; + if (na) { /* multiple commands with this opcode */ + r_oip = oip; + if (FF_SA & r_oip->flags) { + if (F_SA_LOW & oip->flags) + sa = 0x1f & cmd[1]; + else + sa = get_unaligned_be16(cmd + 8); + for (k = 0; k <= na; oip = r_oip->arrp + k++) { + if (opcode == oip->opcode && sa == oip->sa) + break; + } + } else { /* since no service action only check opcode */ + for (k = 0; k <= na; oip = r_oip->arrp + k++) { + if (opcode == oip->opcode) + break; + } + } + if (k > na) { + if (F_SA_LOW & r_oip->flags) + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4); + else if (F_SA_HIGH & r_oip->flags) + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7); + else + mk_sense_invalid_opcode(scp); + goto check_cond; + } + } /* else (when na==0) we assume the oip is a match */ + flags = oip->flags; + if (unlikely(F_INV_OP & flags)) { + mk_sense_invalid_opcode(scp); + goto check_cond; + } + if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) { + if (sdebug_verbose) + sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n", + my_name, opcode, " supported for wlun"); + mk_sense_invalid_opcode(scp); + goto check_cond; + } + if (unlikely(sdebug_strict)) { /* check cdb against mask */ + u8 rem; + int j; + + for (k = 1; k < oip->len_mask[0] && k < 16; ++k) { + rem = ~oip->len_mask[k] & cmd[k]; + if (rem) { + for (j = 7; j >= 0; --j, rem <<= 1) { + if (0x80 & rem) + break; + } + mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j); + goto check_cond; + } + } + } + if (unlikely(!(F_SKIP_UA & flags) && + find_first_bit(devip->uas_bm, + SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) { + errsts = make_ua(scp, devip); + if (errsts) + goto check_cond; + } + if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) && + atomic_read(&devip->stopped))) { + errsts = resp_not_ready(scp, devip); + if (errsts) + goto fini; + } + if (sdebug_fake_rw && (F_FAKE_RW & flags)) + goto fini; + if (unlikely(sdebug_every_nth)) { + if (fake_timeout(scp)) + return 0; /* ignore command: make trouble */ + } + if (likely(oip->pfp)) + pfp = oip->pfp; /* calls a resp_* function */ + else + pfp = r_pfp; /* if leaf function ptr NULL, try the root's */ + +fini: + if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */ + return schedule_resp(scp, devip, errsts, pfp, 0, 0); + else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 || + sdebug_ndelay > 10000)) { + /* + * Skip long delays if ndelay <= 10 microseconds. Otherwise + * for Start Stop Unit (SSU) want at least 1 second delay and + * if sdebug_jdelay>1 want a long delay of that many seconds. + * For Synchronize Cache want 1/20 of SSU's delay. + */ + int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; + int denom = (flags & F_SYNC_DELAY) ? 20 : 1; + + jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ); + return schedule_resp(scp, devip, errsts, pfp, jdelay, 0); + } else + return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay, + sdebug_ndelay); +check_cond: + return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0); +err_out: + return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0); +} + +static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) +{ + struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd); + + spin_lock_init(&sdsc->lock); + + return 0; +} + + +static struct scsi_host_template sdebug_driver_template = { + .show_info = scsi_debug_show_info, + .write_info = scsi_debug_write_info, + .proc_name = sdebug_proc_name, + .name = "SCSI DEBUG", + .info = scsi_debug_info, + .slave_alloc = scsi_debug_slave_alloc, + .slave_configure = scsi_debug_slave_configure, + .slave_destroy = scsi_debug_slave_destroy, + .ioctl = scsi_debug_ioctl, + .queuecommand = scsi_debug_queuecommand, + .change_queue_depth = sdebug_change_qdepth, + .map_queues = sdebug_map_queues, + .mq_poll = sdebug_blk_mq_poll, + .eh_abort_handler = scsi_debug_abort, + .eh_device_reset_handler = scsi_debug_device_reset, + .eh_target_reset_handler = scsi_debug_target_reset, + .eh_bus_reset_handler = scsi_debug_bus_reset, + .eh_host_reset_handler = scsi_debug_host_reset, + .can_queue = SDEBUG_CANQUEUE, + .this_id = 7, + .sg_tablesize = SG_MAX_SEGMENTS, + .cmd_per_lun = DEF_CMD_PER_LUN, + .max_sectors = -1U, + .max_segment_size = -1U, + .module = THIS_MODULE, + .track_queue_depth = 1, + .cmd_size = sizeof(struct sdebug_scsi_cmd), + .init_cmd_priv = sdebug_init_cmd_priv, +}; + +static int sdebug_driver_probe(struct device *dev) +{ + int error = 0; + struct sdebug_host_info *sdbg_host; + struct Scsi_Host *hpnt; + int hprot; + + sdbg_host = dev_to_sdebug_host(dev); + + sdebug_driver_template.can_queue = sdebug_max_queue; + sdebug_driver_template.cmd_per_lun = sdebug_max_queue; + if (!sdebug_clustering) + sdebug_driver_template.dma_boundary = PAGE_SIZE - 1; + + hpnt = scsi_host_alloc(&sdebug_driver_template, 0); + if (NULL == hpnt) { + pr_err("scsi_host_alloc failed\n"); + error = -ENODEV; + return error; + } + if (submit_queues > nr_cpu_ids) { + pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n", + my_name, submit_queues, nr_cpu_ids); + submit_queues = nr_cpu_ids; + } + /* + * Decide whether to tell scsi subsystem that we want mq. The + * following should give the same answer for each host. + */ + hpnt->nr_hw_queues = submit_queues; + if (sdebug_host_max_queue) + hpnt->host_tagset = 1; + + /* poll queues are possible for nr_hw_queues > 1 */ + if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) { + pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n", + my_name, poll_queues, hpnt->nr_hw_queues); + poll_queues = 0; + } + + /* + * Poll queues don't need interrupts, but we need at least one I/O queue + * left over for non-polled I/O. + * If condition not met, trim poll_queues to 1 (just for simplicity). + */ + if (poll_queues >= submit_queues) { + if (submit_queues < 3) + pr_warn("%s: trim poll_queues to 1\n", my_name); + else + pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n", + my_name, submit_queues - 1); + poll_queues = 1; + } + if (poll_queues) + hpnt->nr_maps = 3; + + sdbg_host->shost = hpnt; + if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) + hpnt->max_id = sdebug_num_tgts + 1; + else + hpnt->max_id = sdebug_num_tgts; + /* = sdebug_max_luns; */ + hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; + + hprot = 0; + + switch (sdebug_dif) { + + case T10_PI_TYPE1_PROTECTION: + hprot = SHOST_DIF_TYPE1_PROTECTION; + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE1_PROTECTION; + break; + + case T10_PI_TYPE2_PROTECTION: + hprot = SHOST_DIF_TYPE2_PROTECTION; + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE2_PROTECTION; + break; + + case T10_PI_TYPE3_PROTECTION: + hprot = SHOST_DIF_TYPE3_PROTECTION; + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE3_PROTECTION; + break; + + default: + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE0_PROTECTION; + break; + } + + scsi_host_set_prot(hpnt, hprot); + + if (have_dif_prot || sdebug_dix) + pr_info("host protection%s%s%s%s%s%s%s\n", + (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", + (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", + (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", + (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", + (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", + (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", + (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); + + if (sdebug_guard == 1) + scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP); + else + scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); + + sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts); + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts); + if (sdebug_every_nth) /* need stats counters for every_nth */ + sdebug_statistics = true; + error = scsi_add_host(hpnt, &sdbg_host->dev); + if (error) { + pr_err("scsi_add_host failed\n"); + error = -ENODEV; + scsi_host_put(hpnt); + } else { + scsi_scan_host(hpnt); + } + + return error; +} + +static void sdebug_driver_remove(struct device *dev) +{ + struct sdebug_host_info *sdbg_host; + struct sdebug_dev_info *sdbg_devinfo, *tmp; + + sdbg_host = dev_to_sdebug_host(dev); + + scsi_remove_host(sdbg_host->shost); + + list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, + dev_list) { + list_del(&sdbg_devinfo->dev_list); + kfree(sdbg_devinfo->zstate); + kfree(sdbg_devinfo); + } + + scsi_host_put(sdbg_host->shost); +} + +static struct bus_type pseudo_lld_bus = { + .name = "pseudo", + .probe = sdebug_driver_probe, + .remove = sdebug_driver_remove, + .drv_groups = sdebug_drv_groups, +}; diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c new file mode 100644 index 000000000..f795848b3 --- /dev/null +++ b/drivers/scsi/scsi_debugfs.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include "scsi_debugfs.h" + +#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name +static const char *const scsi_cmd_flags[] = { + SCSI_CMD_FLAG_NAME(TAGGED), + SCSI_CMD_FLAG_NAME(INITIALIZED), + SCSI_CMD_FLAG_NAME(LAST), +}; +#undef SCSI_CMD_FLAG_NAME + +static int scsi_flags_show(struct seq_file *m, const unsigned long flags, + const char *const *flag_name, int flag_name_count) +{ + bool sep = false; + int i; + + for_each_set_bit(i, &flags, BITS_PER_LONG) { + if (sep) + seq_puts(m, "|"); + sep = true; + if (i < flag_name_count && flag_name[i]) + seq_puts(m, flag_name[i]); + else + seq_printf(m, "%d", i); + } + return 0; +} + +void scsi_show_rq(struct seq_file *m, struct request *rq) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2; + struct Scsi_Host *shost = cmd->device->host; + int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); + int timeout_ms = jiffies_to_msecs(rq->timeout); + const char *list_info = NULL; + char buf[80] = "(?)"; + + spin_lock_irq(shost->host_lock); + list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) { + if (cmd == cmd2) { + list_info = "on eh_abort_list"; + goto unlock; + } + } + list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) { + if (cmd == cmd2) { + list_info = "on eh_cmd_q"; + goto unlock; + } + } +unlock: + spin_unlock_irq(shost->host_lock); + + __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); + seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=", + buf, cmd->retries, cmd->allowed, cmd->result, + list_info ? : "", list_info ? ", " : ""); + scsi_flags_show(m, cmd->flags, scsi_cmd_flags, + ARRAY_SIZE(scsi_cmd_flags)); + seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago", + timeout_ms / 1000, timeout_ms % 1000, + alloc_ms / 1000, alloc_ms % 1000); +} diff --git a/drivers/scsi/scsi_debugfs.h b/drivers/scsi/scsi_debugfs.h new file mode 100644 index 000000000..d125d1bd4 --- /dev/null +++ b/drivers/scsi/scsi_debugfs.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +struct request; +struct seq_file; + +void scsi_show_rq(struct seq_file *m, struct request *rq); diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c new file mode 100644 index 000000000..3fcaf10a9 --- /dev/null +++ b/drivers/scsi/scsi_devinfo.c @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "scsi_priv.h" + + +/* + * scsi_dev_info_list: structure to hold black/white listed devices. + */ +struct scsi_dev_info_list { + struct list_head dev_info_list; + char vendor[8]; + char model[16]; + blist_flags_t flags; + unsigned compatible; /* for use with scsi_static_device_list entries */ +}; + +struct scsi_dev_info_list_table { + struct list_head node; /* our node for being on the master list */ + struct list_head scsi_dev_info_list; /* head of dev info list */ + const char *name; /* name of list for /proc (NULL for global) */ + int key; /* unique numeric identifier */ +}; + + +static blist_flags_t scsi_default_dev_flags; +static LIST_HEAD(scsi_dev_info_list); +static char scsi_dev_flags[256]; + +/* + * scsi_static_device_list: deprecated list of devices that require + * settings that differ from the default, includes black-listed (broken) + * devices. The entries here are added to the tail of scsi_dev_info_list + * via scsi_dev_info_list_init. + * + * Do not add to this list, use the command line or proc interface to add + * to the scsi_dev_info_list. This table will eventually go away. + */ +static struct { + char *vendor; + char *model; + char *revision; /* revision known to be bad, unused */ + blist_flags_t flags; +} scsi_static_device_list[] __initdata = { + /* + * The following devices are known not to tolerate a lun != 0 scan + * for one reason or another. Some will respond to all luns, + * others will lock up. + */ + {"Aashima", "IMAGERY 2400SP", "1.03", BLIST_NOLUN}, /* locks up */ + {"CHINON", "CD-ROM CDS-431", "H42", BLIST_NOLUN}, /* locks up */ + {"CHINON", "CD-ROM CDS-535", "Q14", BLIST_NOLUN}, /* locks up */ + {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ + {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ + {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ + {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */ + {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */ + {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ + {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ + {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ + {"MAXTOR", "MXT-1240S", "I1.2", BLIST_NOLUN}, /* locks up */ + {"MAXTOR", "XT-4170S", "B5A", BLIST_NOLUN}, /* locks up */ + {"MAXTOR", "XT-8760S", "B7B", BLIST_NOLUN}, /* locks up */ + {"MEDIAVIS", "RENO CD-ROMX2A", "2.03", BLIST_NOLUN}, /* responds to all lun */ + {"MICROTEK", "ScanMakerIII", "2.30", BLIST_NOLUN}, /* responds to all lun */ + {"NEC", "CD-ROM DRIVE:841", "1.0", BLIST_NOLUN},/* locks up */ + {"PHILIPS", "PCA80SC", "V4-2", BLIST_NOLUN}, /* responds to all lun */ + {"RODIME", "RO3000S", "2.33", BLIST_NOLUN}, /* locks up */ + {"SUN", "SENA", NULL, BLIST_NOLUN}, /* responds to all luns */ + /* + * The following causes a failed REQUEST SENSE on lun 1 for + * aha152x controller, which causes SCSI code to reset bus. + */ + {"SANYO", "CRD-250S", "1.20", BLIST_NOLUN}, + /* + * The following causes a failed REQUEST SENSE on lun 1 for + * aha152x controller, which causes SCSI code to reset bus. + */ + {"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, + {"SEAGATE", "ST296", "921", BLIST_NOLUN}, /* responds to all lun */ + {"SEAGATE", "ST1581", "6538", BLIST_NOLUN}, /* responds to all lun */ + {"SONY", "CD-ROM CDU-541", "4.3d", BLIST_NOLUN}, + {"SONY", "CD-ROM CDU-55S", "1.0i", BLIST_NOLUN}, + {"SONY", "CD-ROM CDU-561", "1.7x", BLIST_NOLUN}, + {"SONY", "CD-ROM CDU-8012", NULL, BLIST_NOLUN}, + {"SONY", "SDT-5000", "3.17", BLIST_SELECT_NO_ATN}, + {"TANDBERG", "TDC 3600", "U07", BLIST_NOLUN}, /* locks up */ + {"TEAC", "CD-R55S", "1.0H", BLIST_NOLUN}, /* locks up */ + /* + * The following causes a failed REQUEST SENSE on lun 1 for + * seagate controller, which causes SCSI code to reset bus. + */ + {"TEAC", "CD-ROM", "1.06", BLIST_NOLUN}, + {"TEAC", "MT-2ST/45S2-27", "RV M", BLIST_NOLUN}, /* responds to all lun */ + /* + * The following causes a failed REQUEST SENSE on lun 1 for + * seagate controller, which causes SCSI code to reset bus. + */ + {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */ + {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */ + {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */ + {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */ + {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */ + {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */ + {"NEC", "D3856", "0009", BLIST_NOLUN}, + {"QUANTUM", "LPS525S", "3110", BLIST_NOLUN}, /* locks up */ + {"QUANTUM", "PD1225S", "3110", BLIST_NOLUN}, /* locks up */ + {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */ + {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ + {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ + {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN}, + {"transtec", "T5008", "0001", BLIST_NOREPORTLUN }, + {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ + {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ + {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ + {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */ + {"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */ + + /* + * Other types of devices that have special flags. + * Note that all USB devices should have the BLIST_INQUIRY_36 flag. + */ + {"3PARdata", "VV", NULL, BLIST_REPORTLUN2}, + {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN}, + {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, + {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE}, + {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, + {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, + {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN}, + {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN}, + {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN}, + {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36}, + {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */ + {"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */ + {"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */ + {"COMPAQ", "ARRAY CONTROLLER", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | + BLIST_MAX_512 | BLIST_REPORTLUN2}, /* Compaq RA4x00 */ + {"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN | BLIST_MAX_512}, /* Compaq RA4x00 */ + {"COMPAQ", "CR3500", NULL, BLIST_FORCELUN}, + {"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, + {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, + {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, + {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN}, + {"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, + {"DELL", "PV660F", NULL, BLIST_SPARSELUN}, + {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN}, + {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */ + {"DELL", "PV530F", NULL, BLIST_SPARSELUN}, + {"DELL", "PERCRAID", NULL, BLIST_FORCELUN}, + {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */ + {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */ + {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | + BLIST_REPORTLUN2 | BLIST_RETRY_ITF}, + {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, + {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, + {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, + {"easyRAID", "F8", NULL, BLIST_NOREPORTLUN}, + {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1}, + {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, + {"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */ + {"Generic", "Ultra HS-SD/MMC", "2.09", BLIST_IGN_MEDIA_CHANGE | BLIST_INQUIRY_36}, + {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, + {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, + {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, + {"HITACHI", "HUS1530", "*", BLIST_NO_DIF}, + {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, + {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ + {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */ + {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, + {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, + {"HP", "C1557A", NULL, BLIST_FORCELUN}, + {"HP", "C3323-300", "4269", BLIST_NOTQ}, + {"HP", "C5713A", NULL, BLIST_NOREPORTLUN}, + {"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, + {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, + {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, + {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE}, + {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, + {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN}, + {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN}, + {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, + {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, + {"INSITE", "I325VM", NULL, BLIST_KEY}, + {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, + {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, + {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, + {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, + {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES}, + {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, + {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, + {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, + {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, + {"MICROP", "4110", NULL, BLIST_NOTQ}, + {"MSFT", "Virtual HD", NULL, BLIST_MAX_1024 | BLIST_NO_RSOC}, + {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, + {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, + {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"NEC", "iStorage", NULL, BLIST_REPORTLUN2}, + {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, + {"Promise", "", NULL, BLIST_SPARSELUN}, + {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES}, + {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, + {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, + {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, + {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, + {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, + {"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */ + {"SEAGATE", "ST3390N", "9546", BLIST_NOTQ}, + {"SEAGATE", "ST900MM0006", NULL, BLIST_SKIP_VPD_PAGES}, + {"SGI", "RAID3", "*", BLIST_SPARSELUN}, + {"SGI", "RAID5", "*", BLIST_SPARSELUN}, + {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, + {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES}, + {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES | + BLIST_INQUIRY_36}, + {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, + {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, + {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ + {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR}, + {"SUN", "T300", "*", BLIST_SPARSELUN}, + {"SUN", "T4", "*", BLIST_SPARSELUN}, + {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, + {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, + {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, + {"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */ + {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, + {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN}, + {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN}, + {"WangDAT", "Model 1300", "02.4", BLIST_SELECT_NO_ATN}, + {"WDC WD25", "00JB-00FUA0", NULL, BLIST_NOREPORTLUN}, + {"XYRATEX", "RS", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"Zzyzx", "RocketStor 500S", NULL, BLIST_SPARSELUN}, + {"Zzyzx", "RocketStor 2000", NULL, BLIST_SPARSELUN}, + { NULL, NULL, NULL, 0 }, +}; + +static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key) +{ + struct scsi_dev_info_list_table *devinfo_table; + int found = 0; + + list_for_each_entry(devinfo_table, &scsi_dev_info_list, node) + if (devinfo_table->key == key) { + found = 1; + break; + } + if (!found) + return ERR_PTR(-EINVAL); + + return devinfo_table; +} + +/* + * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into + * devinfo vendor and model strings. + */ +static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, + char *from, int compatible) +{ + size_t from_length; + + from_length = strlen(from); + /* This zero-pads the destination */ + strncpy(to, from, to_length); + if (from_length < to_length && !compatible) { + /* + * space pad the string if it is short. + */ + memset(&to[from_length], ' ', to_length - from_length); + } + if (from_length > to_length) + printk(KERN_WARNING "%s: %s string '%s' is too long\n", + __func__, name, from); +} + +/** + * scsi_dev_info_list_add - add one dev_info list entry. + * @compatible: if true, null terminate short strings. Otherwise space pad. + * @vendor: vendor string + * @model: model (product) string + * @strflags: integer string + * @flags: if strflags NULL, use this flag value + * + * Description: + * Create and add one dev_info entry for @vendor, @model, @strflags or + * @flag. If @compatible, add to the tail of the list, do not space + * pad, and set devinfo->compatible. The scsi_static_device_list entries + * are added with @compatible 1 and @clfags NULL. + * + * Returns: 0 OK, -error on failure. + **/ +static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, + char *strflags, blist_flags_t flags) +{ + return scsi_dev_info_list_add_keyed(compatible, vendor, model, + strflags, flags, + SCSI_DEVINFO_GLOBAL); +} + +/** + * scsi_dev_info_list_add_keyed - add one dev_info list entry. + * @compatible: if true, null terminate short strings. Otherwise space pad. + * @vendor: vendor string + * @model: model (product) string + * @strflags: integer string + * @flags: if strflags NULL, use this flag value + * @key: specify list to use + * + * Description: + * Create and add one dev_info entry for @vendor, @model, + * @strflags or @flag in list specified by @key. If @compatible, + * add to the tail of the list, do not space pad, and set + * devinfo->compatible. The scsi_static_device_list entries are + * added with @compatible 1 and @clfags NULL. + * + * Returns: 0 OK, -error on failure. + **/ +int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, + char *strflags, blist_flags_t flags, + enum scsi_devinfo_key key) +{ + struct scsi_dev_info_list *devinfo; + struct scsi_dev_info_list_table *devinfo_table = + scsi_devinfo_lookup_by_key(key); + + if (IS_ERR(devinfo_table)) + return PTR_ERR(devinfo_table); + + devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); + if (!devinfo) { + printk(KERN_ERR "%s: no memory\n", __func__); + return -ENOMEM; + } + + scsi_strcpy_devinfo("vendor", devinfo->vendor, sizeof(devinfo->vendor), + vendor, compatible); + scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model), + model, compatible); + + if (strflags) { + unsigned long long val; + int ret = kstrtoull(strflags, 0, &val); + + if (ret != 0) { + kfree(devinfo); + return ret; + } + flags = (__force blist_flags_t)val; + } + if (flags & __BLIST_UNUSED_MASK) { + pr_err("scsi_devinfo (%s:%s): unsupported flags 0x%llx", + vendor, model, flags & __BLIST_UNUSED_MASK); + kfree(devinfo); + return -EINVAL; + } + devinfo->flags = flags; + devinfo->compatible = compatible; + + if (compatible) + list_add_tail(&devinfo->dev_info_list, + &devinfo_table->scsi_dev_info_list); + else + list_add(&devinfo->dev_info_list, + &devinfo_table->scsi_dev_info_list); + + return 0; +} +EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); + +/** + * scsi_dev_info_list_find - find a matching dev_info list entry. + * @vendor: full vendor string + * @model: full model (product) string + * @key: specify list to use + * + * Description: + * Finds the first dev_info entry matching @vendor, @model + * in list specified by @key. + * + * Returns: pointer to matching entry, or ERR_PTR on failure. + **/ +static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + const char *model, enum scsi_devinfo_key key) +{ + struct scsi_dev_info_list *devinfo; + struct scsi_dev_info_list_table *devinfo_table = + scsi_devinfo_lookup_by_key(key); + size_t vmax, mmax, mlen; + const char *vskip, *mskip; + + if (IS_ERR(devinfo_table)) + return (struct scsi_dev_info_list *) devinfo_table; + + /* Prepare for "compatible" matches */ + + /* + * XXX why skip leading spaces? If an odd INQUIRY + * value, that should have been part of the + * scsi_static_device_list[] entry, such as " FOO" + * rather than "FOO". Since this code is already + * here, and we don't know what device it is + * trying to work with, leave it as-is. + */ + vmax = sizeof(devinfo->vendor); + vskip = vendor; + while (vmax > 0 && *vskip == ' ') { + vmax--; + vskip++; + } + /* Also skip trailing spaces */ + while (vmax > 0 && vskip[vmax - 1] == ' ') + --vmax; + + mmax = sizeof(devinfo->model); + mskip = model; + while (mmax > 0 && *mskip == ' ') { + mmax--; + mskip++; + } + while (mmax > 0 && mskip[mmax - 1] == ' ') + --mmax; + + list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, + dev_info_list) { + if (devinfo->compatible) { + /* + * vendor strings must be an exact match + */ + if (vmax != strnlen(devinfo->vendor, + sizeof(devinfo->vendor)) || + memcmp(devinfo->vendor, vskip, vmax)) + continue; + + /* + * @model specifies the full string, and + * must be larger or equal to devinfo->model + */ + mlen = strnlen(devinfo->model, sizeof(devinfo->model)); + if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) + continue; + return devinfo; + } else { + if (!memcmp(devinfo->vendor, vendor, + sizeof(devinfo->vendor)) && + !memcmp(devinfo->model, model, + sizeof(devinfo->model))) + return devinfo; + } + } + + return ERR_PTR(-ENOENT); +} + +/** + * scsi_dev_info_list_del_keyed - remove one dev_info list entry. + * @vendor: vendor string + * @model: model (product) string + * @key: specify list to use + * + * Description: + * Remove and destroy one dev_info entry for @vendor, @model + * in list specified by @key. + * + * Returns: 0 OK, -error on failure. + **/ +int scsi_dev_info_list_del_keyed(char *vendor, char *model, + enum scsi_devinfo_key key) +{ + struct scsi_dev_info_list *found; + + found = scsi_dev_info_list_find(vendor, model, key); + if (IS_ERR(found)) + return PTR_ERR(found); + + list_del(&found->dev_info_list); + kfree(found); + return 0; +} +EXPORT_SYMBOL(scsi_dev_info_list_del_keyed); + +/** + * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. + * @dev_list: string of device flags to add + * + * Description: + * Parse dev_list, and add entries to the scsi_dev_info_list. + * dev_list is of the form "vendor:product:flag,vendor:product:flag". + * dev_list is modified via strsep. Can be called for command line + * addition, for proc or mabye a sysfs interface. + * + * Returns: 0 if OK, -error on failure. + **/ +static int scsi_dev_info_list_add_str(char *dev_list) +{ + char *vendor, *model, *strflags, *next; + char *next_check; + int res = 0; + + next = dev_list; + if (next && next[0] == '"') { + /* + * Ignore both the leading and trailing quote. + */ + next++; + next_check = ",\""; + } else { + next_check = ","; + } + + /* + * For the leading and trailing '"' case, the for loop comes + * through the last time with vendor[0] == '\0'. + */ + for (vendor = strsep(&next, ":"); vendor && (vendor[0] != '\0') + && (res == 0); vendor = strsep(&next, ":")) { + strflags = NULL; + model = strsep(&next, ":"); + if (model) + strflags = strsep(&next, next_check); + if (!model || !strflags) { + printk(KERN_ERR "%s: bad dev info string '%s' '%s'" + " '%s'\n", __func__, vendor, model, + strflags); + res = -EINVAL; + } else + res = scsi_dev_info_list_add(0 /* compatible */, vendor, + model, strflags, 0); + } + return res; +} + +/** + * scsi_get_device_flags - get device specific flags from the dynamic + * device list. + * @sdev: &scsi_device to get flags for + * @vendor: vendor name + * @model: model name + * + * Description: + * Search the global scsi_dev_info_list (specified by list zero) + * for an entry matching @vendor and @model, if found, return the + * matching flags value, else return the host or global default + * settings. Called during scan time. + **/ +blist_flags_t scsi_get_device_flags(struct scsi_device *sdev, + const unsigned char *vendor, + const unsigned char *model) +{ + return scsi_get_device_flags_keyed(sdev, vendor, model, + SCSI_DEVINFO_GLOBAL); +} + + +/** + * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list + * @sdev: &scsi_device to get flags for + * @vendor: vendor name + * @model: model name + * @key: list to look up + * + * Description: + * Search the scsi_dev_info_list specified by @key for an entry + * matching @vendor and @model, if found, return the matching + * flags value, else return the host or global default settings. + * Called during scan time. + **/ +blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev, + const unsigned char *vendor, + const unsigned char *model, + enum scsi_devinfo_key key) +{ + struct scsi_dev_info_list *devinfo; + + devinfo = scsi_dev_info_list_find(vendor, model, key); + if (!IS_ERR(devinfo)) + return devinfo->flags; + + /* key or device not found: return nothing */ + if (key != SCSI_DEVINFO_GLOBAL) + return 0; + + /* except for the global list, where we have an exception */ + if (sdev->sdev_bflags) + return sdev->sdev_bflags; + + return scsi_default_dev_flags; +} +EXPORT_SYMBOL(scsi_get_device_flags_keyed); + +#ifdef CONFIG_SCSI_PROC_FS +struct double_list { + struct list_head *top; + struct list_head *bottom; +}; + +static int devinfo_seq_show(struct seq_file *m, void *v) +{ + struct double_list *dl = v; + struct scsi_dev_info_list_table *devinfo_table = + list_entry(dl->top, struct scsi_dev_info_list_table, node); + struct scsi_dev_info_list *devinfo = + list_entry(dl->bottom, struct scsi_dev_info_list, + dev_info_list); + + if (devinfo_table->scsi_dev_info_list.next == dl->bottom && + devinfo_table->name) + seq_printf(m, "[%s]:\n", devinfo_table->name); + + seq_printf(m, "'%.8s' '%.16s' 0x%llx\n", + devinfo->vendor, devinfo->model, devinfo->flags); + return 0; +} + +static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos) +{ + struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL); + loff_t pos = *ppos; + + if (!dl) + return NULL; + + list_for_each(dl->top, &scsi_dev_info_list) { + struct scsi_dev_info_list_table *devinfo_table = + list_entry(dl->top, struct scsi_dev_info_list_table, + node); + list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list) + if (pos-- == 0) + return dl; + } + + kfree(dl); + return NULL; +} + +static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos) +{ + struct double_list *dl = v; + struct scsi_dev_info_list_table *devinfo_table = + list_entry(dl->top, struct scsi_dev_info_list_table, node); + + ++*ppos; + dl->bottom = dl->bottom->next; + while (&devinfo_table->scsi_dev_info_list == dl->bottom) { + dl->top = dl->top->next; + if (dl->top == &scsi_dev_info_list) { + kfree(dl); + return NULL; + } + devinfo_table = list_entry(dl->top, + struct scsi_dev_info_list_table, + node); + dl->bottom = devinfo_table->scsi_dev_info_list.next; + } + + return dl; +} + +static void devinfo_seq_stop(struct seq_file *m, void *v) +{ + kfree(v); +} + +static const struct seq_operations scsi_devinfo_seq_ops = { + .start = devinfo_seq_start, + .next = devinfo_seq_next, + .stop = devinfo_seq_stop, + .show = devinfo_seq_show, +}; + +static int proc_scsi_devinfo_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &scsi_devinfo_seq_ops); +} + +/* + * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc. + * + * Description: Adds a black/white list entry for vendor and model with an + * integer value of flag to the scsi device info list. + * To use, echo "vendor:model:flag" > /proc/scsi/device_info + */ +static ssize_t proc_scsi_devinfo_write(struct file *file, + const char __user *buf, + size_t length, loff_t *ppos) +{ + char *buffer; + ssize_t err = length; + + if (!buf || length>PAGE_SIZE) + return -EINVAL; + if (!(buffer = (char *) __get_free_page(GFP_KERNEL))) + return -ENOMEM; + if (copy_from_user(buffer, buf, length)) { + err =-EFAULT; + goto out; + } + + if (length < PAGE_SIZE) + buffer[length] = '\0'; + else if (buffer[PAGE_SIZE-1]) { + err = -EINVAL; + goto out; + } + + scsi_dev_info_list_add_str(buffer); + +out: + free_page((unsigned long)buffer); + return err; +} + +static const struct proc_ops scsi_devinfo_proc_ops = { + .proc_open = proc_scsi_devinfo_open, + .proc_read = seq_read, + .proc_write = proc_scsi_devinfo_write, + .proc_lseek = seq_lseek, + .proc_release = seq_release, +}; +#endif /* CONFIG_SCSI_PROC_FS */ + +module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); +MODULE_PARM_DESC(dev_flags, + "Given scsi_dev_flags=vendor:model:flags[,v:m:f] add black/white" + " list entries for vendor and model with an integer value of flags" + " to the scsi device info list"); + +module_param_named(default_dev_flags, scsi_default_dev_flags, ullong, 0644); +MODULE_PARM_DESC(default_dev_flags, + "scsi default device flag uint64_t value"); + +/** + * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list + **/ +void scsi_exit_devinfo(void) +{ +#ifdef CONFIG_SCSI_PROC_FS + remove_proc_entry("scsi/device_info", NULL); +#endif + + scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL); +} + +/** + * scsi_dev_info_add_list - add a new devinfo list + * @key: key of the list to add + * @name: Name of the list to add (for /proc/scsi/device_info) + * + * Adds the requested list, returns zero on success, -EEXIST if the + * key is already registered to a list, or other error on failure. + */ +int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name) +{ + struct scsi_dev_info_list_table *devinfo_table = + scsi_devinfo_lookup_by_key(key); + + if (!IS_ERR(devinfo_table)) + /* list already exists */ + return -EEXIST; + + devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL); + + if (!devinfo_table) + return -ENOMEM; + + INIT_LIST_HEAD(&devinfo_table->node); + INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list); + devinfo_table->name = name; + devinfo_table->key = key; + list_add_tail(&devinfo_table->node, &scsi_dev_info_list); + + return 0; +} +EXPORT_SYMBOL(scsi_dev_info_add_list); + +/** + * scsi_dev_info_remove_list - destroy an added devinfo list + * @key: key of the list to destroy + * + * Iterates over the entire list first, freeing all the values, then + * frees the list itself. Returns 0 on success or -EINVAL if the key + * can't be found. + */ +int scsi_dev_info_remove_list(enum scsi_devinfo_key key) +{ + struct list_head *lh, *lh_next; + struct scsi_dev_info_list_table *devinfo_table = + scsi_devinfo_lookup_by_key(key); + + if (IS_ERR(devinfo_table)) + /* no such list */ + return -EINVAL; + + /* remove from the master list */ + list_del(&devinfo_table->node); + + list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) { + struct scsi_dev_info_list *devinfo; + + devinfo = list_entry(lh, struct scsi_dev_info_list, + dev_info_list); + kfree(devinfo); + } + kfree(devinfo_table); + + return 0; +} +EXPORT_SYMBOL(scsi_dev_info_remove_list); + +/** + * scsi_init_devinfo - set up the dynamic device list. + * + * Description: + * Add command line entries from scsi_dev_flags, then add + * scsi_static_device_list entries to the scsi device info list. + */ +int __init scsi_init_devinfo(void) +{ +#ifdef CONFIG_SCSI_PROC_FS + struct proc_dir_entry *p; +#endif + int error, i; + + error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL); + if (error) + return error; + + error = scsi_dev_info_list_add_str(scsi_dev_flags); + if (error) + goto out; + + for (i = 0; scsi_static_device_list[i].vendor; i++) { + error = scsi_dev_info_list_add(1 /* compatibile */, + scsi_static_device_list[i].vendor, + scsi_static_device_list[i].model, + NULL, + scsi_static_device_list[i].flags); + if (error) + goto out; + } + +#ifdef CONFIG_SCSI_PROC_FS + p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_ops); + if (!p) { + error = -ENOMEM; + goto out; + } +#endif /* CONFIG_SCSI_PROC_FS */ + + out: + if (error) + scsi_exit_devinfo(); + return error; +} diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c new file mode 100644 index 000000000..7b56e00c7 --- /dev/null +++ b/drivers/scsi/scsi_dh.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SCSI device handler infrastructure. + * + * Copyright IBM Corporation, 2007 + * Authors: + * Chandra Seetharaman + * Mike Anderson + */ + +#include +#include +#include +#include "scsi_priv.h" + +static DEFINE_SPINLOCK(list_lock); +static LIST_HEAD(scsi_dh_list); + +struct scsi_dh_blist { + const char *vendor; + const char *model; + const char *driver; +}; + +static const struct scsi_dh_blist scsi_dh_blist[] = { + {"DGC", "RAID", "emc" }, + {"DGC", "DISK", "emc" }, + {"DGC", "VRAID", "emc" }, + + {"COMPAQ", "MSA1000 VOLUME", "hp_sw" }, + {"COMPAQ", "HSV110", "hp_sw" }, + {"HP", "HSV100", "hp_sw"}, + {"DEC", "HSG80", "hp_sw"}, + + {"IBM", "1722", "rdac", }, + {"IBM", "1724", "rdac", }, + {"IBM", "1726", "rdac", }, + {"IBM", "1742", "rdac", }, + {"IBM", "1745", "rdac", }, + {"IBM", "1746", "rdac", }, + {"IBM", "1813", "rdac", }, + {"IBM", "1814", "rdac", }, + {"IBM", "1815", "rdac", }, + {"IBM", "1818", "rdac", }, + {"IBM", "3526", "rdac", }, + {"IBM", "3542", "rdac", }, + {"IBM", "3552", "rdac", }, + {"SGI", "TP9300", "rdac", }, + {"SGI", "TP9400", "rdac", }, + {"SGI", "TP9500", "rdac", }, + {"SGI", "TP9700", "rdac", }, + {"SGI", "IS", "rdac", }, + {"STK", "OPENstorage", "rdac", }, + {"STK", "FLEXLINE 380", "rdac", }, + {"STK", "BladeCtlr", "rdac", }, + {"SUN", "CSM", "rdac", }, + {"SUN", "LCSM100", "rdac", }, + {"SUN", "STK6580_6780", "rdac", }, + {"SUN", "SUN_6180", "rdac", }, + {"SUN", "ArrayStorage", "rdac", }, + {"DELL", "MD3", "rdac", }, + {"NETAPP", "INF-01-00", "rdac", }, + {"LSI", "INF-01-00", "rdac", }, + {"ENGENIO", "INF-01-00", "rdac", }, + {"LENOVO", "DE_Series", "rdac", }, + {"FUJITSU", "ETERNUS_AHB", "rdac", }, + {NULL, NULL, NULL }, +}; + +static const char * +scsi_dh_find_driver(struct scsi_device *sdev) +{ + const struct scsi_dh_blist *b; + + if (scsi_device_tpgs(sdev)) + return "alua"; + + for (b = scsi_dh_blist; b->vendor; b++) { + if (!strncmp(sdev->vendor, b->vendor, strlen(b->vendor)) && + !strncmp(sdev->model, b->model, strlen(b->model))) { + return b->driver; + } + } + return NULL; +} + + +static struct scsi_device_handler *__scsi_dh_lookup(const char *name) +{ + struct scsi_device_handler *tmp, *found = NULL; + + spin_lock(&list_lock); + list_for_each_entry(tmp, &scsi_dh_list, list) { + if (!strncmp(tmp->name, name, strlen(tmp->name))) { + found = tmp; + break; + } + } + spin_unlock(&list_lock); + return found; +} + +static struct scsi_device_handler *scsi_dh_lookup(const char *name) +{ + struct scsi_device_handler *dh; + + if (!name || strlen(name) == 0) + return NULL; + + dh = __scsi_dh_lookup(name); + if (!dh) { + request_module("scsi_dh_%s", name); + dh = __scsi_dh_lookup(name); + } + + return dh; +} + +/* + * scsi_dh_handler_attach - Attach a device handler to a device + * @sdev - SCSI device the device handler should attach to + * @scsi_dh - The device handler to attach + */ +static int scsi_dh_handler_attach(struct scsi_device *sdev, + struct scsi_device_handler *scsi_dh) +{ + int error, ret = 0; + + if (!try_module_get(scsi_dh->module)) + return -EINVAL; + + error = scsi_dh->attach(sdev); + if (error != SCSI_DH_OK) { + switch (error) { + case SCSI_DH_NOMEM: + ret = -ENOMEM; + break; + case SCSI_DH_RES_TEMP_UNAVAIL: + ret = -EAGAIN; + break; + case SCSI_DH_DEV_UNSUPP: + case SCSI_DH_NOSYS: + ret = -ENODEV; + break; + default: + ret = -EINVAL; + break; + } + if (ret != -ENODEV) + sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n", + scsi_dh->name, error); + module_put(scsi_dh->module); + } else + sdev->handler = scsi_dh; + + return ret; +} + +/* + * scsi_dh_handler_detach - Detach a device handler from a device + * @sdev - SCSI device the device handler should be detached from + */ +static void scsi_dh_handler_detach(struct scsi_device *sdev) +{ + sdev->handler->detach(sdev); + sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", sdev->handler->name); + module_put(sdev->handler->module); +} + +void scsi_dh_add_device(struct scsi_device *sdev) +{ + struct scsi_device_handler *devinfo = NULL; + const char *drv; + + drv = scsi_dh_find_driver(sdev); + if (drv) + devinfo = __scsi_dh_lookup(drv); + /* + * device_handler is optional, so ignore errors + * from scsi_dh_handler_attach() + */ + if (devinfo) + (void)scsi_dh_handler_attach(sdev, devinfo); +} + +void scsi_dh_release_device(struct scsi_device *sdev) +{ + if (sdev->handler) + scsi_dh_handler_detach(sdev); +} + +/* + * scsi_register_device_handler - register a device handler personality + * module. + * @scsi_dh - device handler to be registered. + * + * Returns 0 on success, -EBUSY if handler already registered. + */ +int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) +{ + if (__scsi_dh_lookup(scsi_dh->name)) + return -EBUSY; + + if (!scsi_dh->attach || !scsi_dh->detach) + return -EINVAL; + + spin_lock(&list_lock); + list_add(&scsi_dh->list, &scsi_dh_list); + spin_unlock(&list_lock); + + printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); + + return SCSI_DH_OK; +} +EXPORT_SYMBOL_GPL(scsi_register_device_handler); + +/* + * scsi_unregister_device_handler - register a device handler personality + * module. + * @scsi_dh - device handler to be unregistered. + * + * Returns 0 on success, -ENODEV if handler not registered. + */ +int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) +{ + if (!__scsi_dh_lookup(scsi_dh->name)) + return -ENODEV; + + spin_lock(&list_lock); + list_del(&scsi_dh->list); + spin_unlock(&list_lock); + printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name); + + return SCSI_DH_OK; +} +EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); + +/* + * scsi_dh_activate - activate the path associated with the scsi_device + * corresponding to the given request queue. + * Returns immediately without waiting for activation to be completed. + * @q - Request queue that is associated with the scsi_device to be + * activated. + * @fn - Function to be called upon completion of the activation. + * Function fn is called with data (below) and the error code. + * Function fn may be called from the same calling context. So, + * do not hold the lock in the caller which may be needed in fn. + * @data - data passed to the function fn upon completion. + * + */ +int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) +{ + struct scsi_device *sdev; + int err = SCSI_DH_NOSYS; + + sdev = scsi_device_from_queue(q); + if (!sdev) { + if (fn) + fn(data, err); + return err; + } + + if (!sdev->handler) + goto out_fn; + err = SCSI_DH_NOTCONN; + if (sdev->sdev_state == SDEV_CANCEL || + sdev->sdev_state == SDEV_DEL) + goto out_fn; + + err = SCSI_DH_DEV_OFFLINED; + if (sdev->sdev_state == SDEV_OFFLINE) + goto out_fn; + + if (sdev->handler->activate) + err = sdev->handler->activate(sdev, fn, data); + +out_put_device: + put_device(&sdev->sdev_gendev); + return err; + +out_fn: + if (fn) + fn(data, err); + goto out_put_device; +} +EXPORT_SYMBOL_GPL(scsi_dh_activate); + +/* + * scsi_dh_set_params - set the parameters for the device as per the + * string specified in params. + * @q - Request queue that is associated with the scsi_device for + * which the parameters to be set. + * @params - parameters in the following format + * "no_of_params\0param1\0param2\0param3\0...\0" + * for example, string for 2 parameters with value 10 and 21 + * is specified as "2\010\021\0". + */ +int scsi_dh_set_params(struct request_queue *q, const char *params) +{ + struct scsi_device *sdev; + int err = -SCSI_DH_NOSYS; + + sdev = scsi_device_from_queue(q); + if (!sdev) + return err; + + if (sdev->handler && sdev->handler->set_params) + err = sdev->handler->set_params(sdev, params); + put_device(&sdev->sdev_gendev); + return err; +} +EXPORT_SYMBOL_GPL(scsi_dh_set_params); + +/* + * scsi_dh_attach - Attach device handler + * @q - Request queue that is associated with the scsi_device + * the handler should be attached to + * @name - name of the handler to attach + */ +int scsi_dh_attach(struct request_queue *q, const char *name) +{ + struct scsi_device *sdev; + struct scsi_device_handler *scsi_dh; + int err = 0; + + sdev = scsi_device_from_queue(q); + if (!sdev) + return -ENODEV; + + scsi_dh = scsi_dh_lookup(name); + if (!scsi_dh) { + err = -EINVAL; + goto out_put_device; + } + + if (sdev->handler) { + if (sdev->handler != scsi_dh) + err = -EBUSY; + goto out_put_device; + } + + err = scsi_dh_handler_attach(sdev, scsi_dh); + +out_put_device: + put_device(&sdev->sdev_gendev); + return err; +} +EXPORT_SYMBOL_GPL(scsi_dh_attach); + +/* + * scsi_dh_attached_handler_name - Get attached device handler's name + * @q - Request queue that is associated with the scsi_device + * that may have a device handler attached + * @gfp - the GFP mask used in the kmalloc() call when allocating memory + * + * Returns name of attached handler, NULL if no handler is attached. + * Caller must take care to free the returned string. + */ +const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) +{ + struct scsi_device *sdev; + const char *handler_name = NULL; + + sdev = scsi_device_from_queue(q); + if (!sdev) + return NULL; + + if (sdev->handler) + handler_name = kstrdup(sdev->handler->name, gfp); + put_device(&sdev->sdev_gendev); + return handler_name; +} +EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c new file mode 100644 index 000000000..d983f4a0e --- /dev/null +++ b/drivers/scsi/scsi_error.c @@ -0,0 +1,2576 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * scsi_error.c Copyright (C) 1997 Eric Youngdale + * + * SCSI error/timeout handling + * Initial versions: Eric Youngdale. Based upon conversations with + * Leonard Zubkoff and David Miller at Linux Expo, + * ideas originating from all over the place. + * + * Restructured scsi_unjam_host and associated functions. + * September 04, 2002 Mike Anderson (andmike@us.ibm.com) + * + * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and + * minor cleanups. + * September 30, 2002 Mike Anderson (andmike@us.ibm.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scsi_priv.h" +#include "scsi_logging.h" +#include "scsi_transport_api.h" + +#include + +#include + +/* + * These should *probably* be handled by the host itself. + * Since it is allowed to sleep, it probably should. + */ +#define BUS_RESET_SETTLE_TIME (10) +#define HOST_RESET_SETTLE_TIME (10) + +static int scsi_eh_try_stu(struct scsi_cmnd *scmd); +static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *, + struct scsi_cmnd *); + +void scsi_eh_wakeup(struct Scsi_Host *shost) +{ + lockdep_assert_held(shost->host_lock); + + if (scsi_host_busy(shost) == shost->host_failed) { + trace_scsi_eh_wakeup(shost); + wake_up_process(shost->ehandler); + SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost, + "Waking error handler thread\n")); + } +} + +/** + * scsi_schedule_eh - schedule EH for SCSI host + * @shost: SCSI host to invoke error handling on. + * + * Schedule SCSI EH without scmd. + */ +void scsi_schedule_eh(struct Scsi_Host *shost) +{ + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + + if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 || + scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) { + shost->host_eh_scheduled++; + scsi_eh_wakeup(shost); + } + + spin_unlock_irqrestore(shost->host_lock, flags); +} +EXPORT_SYMBOL_GPL(scsi_schedule_eh); + +static int scsi_host_eh_past_deadline(struct Scsi_Host *shost) +{ + if (!shost->last_reset || shost->eh_deadline == -1) + return 0; + + /* + * 32bit accesses are guaranteed to be atomic + * (on all supported architectures), so instead + * of using a spinlock we can as well double check + * if eh_deadline has been set to 'off' during the + * time_before call. + */ + if (time_before(jiffies, shost->last_reset + shost->eh_deadline) && + shost->eh_deadline > -1) + return 0; + + return 1; +} + +static bool scsi_cmd_retry_allowed(struct scsi_cmnd *cmd) +{ + if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) + return true; + + return ++cmd->retries <= cmd->allowed; +} + +static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct Scsi_Host *host = sdev->host; + + if (host->hostt->eh_should_retry_cmd) + return host->hostt->eh_should_retry_cmd(cmd); + + return true; +} + +/** + * scmd_eh_abort_handler - Handle command aborts + * @work: command to be aborted. + * + * Note: this function must be called only for a command that has timed out. + * Because the block layer marks a request as complete before it calls + * scsi_timeout(), a .scsi_done() call from the LLD for a command that has + * timed out do not have any effect. Hence it is safe to call + * scsi_finish_command() from this function. + */ +void +scmd_eh_abort_handler(struct work_struct *work) +{ + struct scsi_cmnd *scmd = + container_of(work, struct scsi_cmnd, abort_work.work); + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + enum scsi_disposition rtn; + unsigned long flags; + + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "eh timeout, not aborting\n")); + goto out; + } + + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "aborting command\n")); + rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); + if (rtn != SUCCESS) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "cmd abort %s\n", + (rtn == FAST_IO_FAIL) ? + "not send" : "failed")); + goto out; + } + set_host_byte(scmd, DID_TIME_OUT); + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "eh timeout, not retrying " + "aborted command\n")); + goto out; + } + + spin_lock_irqsave(shost->host_lock, flags); + list_del_init(&scmd->eh_entry); + + /* + * If the abort succeeds, and there is no further + * EH action, clear the ->last_reset time. + */ + if (list_empty(&shost->eh_abort_list) && + list_empty(&shost->eh_cmd_q)) + if (shost->eh_deadline != -1) + shost->last_reset = 0; + + spin_unlock_irqrestore(shost->host_lock, flags); + + if (!scsi_noretry_cmd(scmd) && + scsi_cmd_retry_allowed(scmd) && + scsi_eh_should_retry_cmd(scmd)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "retry aborted command\n")); + scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); + } else { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "finish aborted command\n")); + scsi_finish_command(scmd); + } + return; + +out: + spin_lock_irqsave(shost->host_lock, flags); + list_del_init(&scmd->eh_entry); + spin_unlock_irqrestore(shost->host_lock, flags); + + scsi_eh_scmd_add(scmd); +} + +/** + * scsi_abort_command - schedule a command abort + * @scmd: scmd to abort. + * + * We only need to abort commands after a command timeout + */ +static int +scsi_abort_command(struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + unsigned long flags; + + if (!shost->hostt->eh_abort_handler) { + /* No abort handler, fail command directly */ + return FAILED; + } + + if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { + /* + * Retry after abort failed, escalate to next level. + */ + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "previous abort failed\n")); + BUG_ON(delayed_work_pending(&scmd->abort_work)); + return FAILED; + } + + spin_lock_irqsave(shost->host_lock, flags); + if (shost->eh_deadline != -1 && !shost->last_reset) + shost->last_reset = jiffies; + BUG_ON(!list_empty(&scmd->eh_entry)); + list_add_tail(&scmd->eh_entry, &shost->eh_abort_list); + spin_unlock_irqrestore(shost->host_lock, flags); + + scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, "abort scheduled\n")); + queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100); + return SUCCESS; +} + +/** + * scsi_eh_reset - call into ->eh_action to reset internal counters + * @scmd: scmd to run eh on. + * + * The scsi driver might be carrying internal state about the + * devices, so we need to call into the driver to reset the + * internal state once the error handler is started. + */ +static void scsi_eh_reset(struct scsi_cmnd *scmd) +{ + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) { + struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); + if (sdrv->eh_reset) + sdrv->eh_reset(scmd); + } +} + +static void scsi_eh_inc_host_failed(struct rcu_head *head) +{ + struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); + struct Scsi_Host *shost = scmd->device->host; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + shost->host_failed++; + scsi_eh_wakeup(shost); + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * scsi_eh_scmd_add - add scsi cmd to error handling. + * @scmd: scmd to run eh on. + */ +void scsi_eh_scmd_add(struct scsi_cmnd *scmd) +{ + struct Scsi_Host *shost = scmd->device->host; + unsigned long flags; + int ret; + + WARN_ON_ONCE(!shost->ehandler); + + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_set_state(shost, SHOST_RECOVERY)) { + ret = scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY); + WARN_ON_ONCE(ret); + } + if (shost->eh_deadline != -1 && !shost->last_reset) + shost->last_reset = jiffies; + + scsi_eh_reset(scmd); + list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); + spin_unlock_irqrestore(shost->host_lock, flags); + /* + * Ensure that all tasks observe the host state change before the + * host_failed change. + */ + call_rcu_hurry(&scmd->rcu, scsi_eh_inc_host_failed); +} + +/** + * scsi_timeout - Timeout function for normal scsi commands. + * @req: request that is timing out. + * + * Notes: + * We do not need to lock this. There is the potential for a race + * only in that the normal completion handling might run, but if the + * normal completion function determines that the timer has already + * fired, then it mustn't do anything. + */ +enum blk_eh_timer_return scsi_timeout(struct request *req) +{ + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); + struct Scsi_Host *host = scmd->device->host; + + trace_scsi_dispatch_cmd_timeout(scmd); + scsi_log_completion(scmd, TIMEOUT_ERROR); + + atomic_inc(&scmd->device->iotmo_cnt); + if (host->eh_deadline != -1 && !host->last_reset) + host->last_reset = jiffies; + + if (host->hostt->eh_timed_out) { + switch (host->hostt->eh_timed_out(scmd)) { + case SCSI_EH_DONE: + return BLK_EH_DONE; + case SCSI_EH_RESET_TIMER: + return BLK_EH_RESET_TIMER; + case SCSI_EH_NOT_HANDLED: + break; + } + } + + /* + * If scsi_done() has already set SCMD_STATE_COMPLETE, do not modify + * *scmd. + */ + if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) + return BLK_EH_DONE; + atomic_inc(&scmd->device->iodone_cnt); + if (scsi_abort_command(scmd) != SUCCESS) { + set_host_byte(scmd, DID_TIME_OUT); + scsi_eh_scmd_add(scmd); + } + + return BLK_EH_DONE; +} + +/** + * scsi_block_when_processing_errors - Prevent cmds from being queued. + * @sdev: Device on which we are performing recovery. + * + * Description: + * We block until the host is out of error recovery, and then check to + * see whether the host or the device is offline. + * + * Return value: + * 0 when dev was taken offline by error recovery. 1 OK to proceed. + */ +int scsi_block_when_processing_errors(struct scsi_device *sdev) +{ + int online; + + wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host)); + + online = scsi_device_online(sdev); + + return online; +} +EXPORT_SYMBOL(scsi_block_when_processing_errors); + +#ifdef CONFIG_SCSI_LOGGING +/** + * scsi_eh_prt_fail_stats - Log info on failures. + * @shost: scsi host being recovered. + * @work_q: Queue of scsi cmds to process. + */ +static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, + struct list_head *work_q) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + int total_failures = 0; + int cmd_failed = 0; + int cmd_cancel = 0; + int devices_failed = 0; + + shost_for_each_device(sdev, shost) { + list_for_each_entry(scmd, work_q, eh_entry) { + if (scmd->device == sdev) { + ++total_failures; + if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) + ++cmd_cancel; + else + ++cmd_failed; + } + } + + if (cmd_cancel || cmd_failed) { + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: cmds failed: %d, cancel: %d\n", + __func__, cmd_failed, + cmd_cancel)); + cmd_cancel = 0; + cmd_failed = 0; + ++devices_failed; + } + } + + SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost, + "Total of %d commands on %d" + " devices require eh work\n", + total_failures, devices_failed)); +} +#endif + + /** + * scsi_report_lun_change - Set flag on all *other* devices on the same target + * to indicate that a UNIT ATTENTION is expected. + * @sdev: Device reporting the UNIT ATTENTION + */ +static void scsi_report_lun_change(struct scsi_device *sdev) +{ + sdev->sdev_target->expecting_lun_change = 1; +} + +/** + * scsi_report_sense - Examine scsi sense information and log messages for + * certain conditions, also issue uevents for some of them. + * @sdev: Device reporting the sense code + * @sshdr: sshdr to be examined + */ +static void scsi_report_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sshdr) +{ + enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */ + + if (sshdr->sense_key == UNIT_ATTENTION) { + if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) { + evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Inquiry data has changed"); + } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) { + evt_type = SDEV_EVT_LUN_CHANGE_REPORTED; + scsi_report_lun_change(sdev); + sdev_printk(KERN_WARNING, sdev, + "LUN assignments on this target have " + "changed. The Linux SCSI layer does not " + "automatically remap LUN assignments.\n"); + } else if (sshdr->asc == 0x3f) + sdev_printk(KERN_WARNING, sdev, + "Operating parameters on this target have " + "changed. The Linux SCSI layer does not " + "automatically adjust these parameters.\n"); + + if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) { + evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Warning! Received an indication that the " + "LUN reached a thin provisioning soft " + "threshold.\n"); + } + + if (sshdr->asc == 0x29) { + evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED; + /* + * Do not print message if it is an expected side-effect + * of runtime PM. + */ + if (!sdev->silence_suspend) + sdev_printk(KERN_WARNING, sdev, + "Power-on or device reset occurred\n"); + } + + if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) { + evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Mode parameters changed"); + } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) { + evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Asymmetric access state changed"); + } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) { + evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Capacity data has changed"); + } else if (sshdr->asc == 0x2a) + sdev_printk(KERN_WARNING, sdev, + "Parameters changed"); + } + + if (evt_type != SDEV_EVT_MAXBITS) { + set_bit(evt_type, sdev->pending_events); + schedule_work(&sdev->event_work); + } +} + +static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status) +{ + cmd->result = (cmd->result & 0xffff00ff) | (status << 8); +} + +/** + * scsi_check_sense - Examine scsi cmd sense + * @scmd: Cmd to have sense checked. + * + * Return value: + * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE + * + * Notes: + * When a deferred error is detected the current command has + * not been executed and needs retrying. + */ +enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) +{ + struct request *req = scsi_cmd_to_rq(scmd); + struct scsi_device *sdev = scmd->device; + struct scsi_sense_hdr sshdr; + + if (! scsi_command_normalize_sense(scmd, &sshdr)) + return FAILED; /* no valid sense data */ + + scsi_report_sense(sdev, &sshdr); + + if (scsi_sense_is_deferred(&sshdr)) + return NEEDS_RETRY; + + if (sdev->handler && sdev->handler->check_sense) { + enum scsi_disposition rc; + + rc = sdev->handler->check_sense(sdev, &sshdr); + if (rc != SCSI_RETURN_NOT_HANDLED) + return rc; + /* handler does not care. Drop down to default handling */ + } + + if (scmd->cmnd[0] == TEST_UNIT_READY && + scmd->submitter != SUBMITTED_BY_SCSI_ERROR_HANDLER) + /* + * nasty: for mid-layer issued TURs, we need to return the + * actual sense data without any recovery attempt. For eh + * issued ones, we need to try to recover and interpret + */ + return SUCCESS; + + /* + * Previous logic looked for FILEMARK, EOM or ILI which are + * mainly associated with tapes and returned SUCCESS. + */ + if (sshdr.response_code == 0x70) { + /* fixed format */ + if (scmd->sense_buffer[2] & 0xe0) + return SUCCESS; + } else { + /* + * descriptor format: look for "stream commands sense data + * descriptor" (see SSC-3). Assume single sense data + * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG. + */ + if ((sshdr.additional_length > 3) && + (scmd->sense_buffer[8] == 0x4) && + (scmd->sense_buffer[11] & 0xe0)) + return SUCCESS; + } + + switch (sshdr.sense_key) { + case NO_SENSE: + return SUCCESS; + case RECOVERED_ERROR: + return /* soft_error */ SUCCESS; + + case ABORTED_COMMAND: + if (sshdr.asc == 0x10) /* DIF */ + return SUCCESS; + + /* + * Check aborts due to command duration limit policy: + * ABORTED COMMAND additional sense code with the + * COMMAND TIMEOUT BEFORE PROCESSING or + * COMMAND TIMEOUT DURING PROCESSING or + * COMMAND TIMEOUT DURING PROCESSING DUE TO ERROR RECOVERY + * additional sense code qualifiers. + */ + if (sshdr.asc == 0x2e && + sshdr.ascq >= 0x01 && sshdr.ascq <= 0x03) { + set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT); + req->cmd_flags |= REQ_FAILFAST_DEV; + req->rq_flags |= RQF_QUIET; + return SUCCESS; + } + + if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF) + return ADD_TO_MLQUEUE; + if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 && + sdev->sdev_bflags & BLIST_RETRY_ASC_C1) + return ADD_TO_MLQUEUE; + + return NEEDS_RETRY; + case NOT_READY: + case UNIT_ATTENTION: + /* + * if we are expecting a cc/ua because of a bus reset that we + * performed, treat this just as a retry. otherwise this is + * information that we should pass up to the upper-level driver + * so that we can deal with it there. + */ + if (scmd->device->expecting_cc_ua) { + /* + * Because some device does not queue unit + * attentions correctly, we carefully check + * additional sense code and qualifier so as + * not to squash media change unit attention. + */ + if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) { + scmd->device->expecting_cc_ua = 0; + return NEEDS_RETRY; + } + } + /* + * we might also expect a cc/ua if another LUN on the target + * reported a UA with an ASC/ASCQ of 3F 0E - + * REPORTED LUNS DATA HAS CHANGED. + */ + if (scmd->device->sdev_target->expecting_lun_change && + sshdr.asc == 0x3f && sshdr.ascq == 0x0e) + return NEEDS_RETRY; + /* + * if the device is in the process of becoming ready, we + * should retry. + */ + if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01)) + return NEEDS_RETRY; + /* + * if the device is not started, we need to wake + * the error handler to start the motor + */ + if (scmd->device->allow_restart && + (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) + return FAILED; + /* + * Pass the UA upwards for a determination in the completion + * functions. + */ + return SUCCESS; + + /* these are not supported */ + case DATA_PROTECT: + if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) { + /* Thin provisioning hard threshold reached */ + set_scsi_ml_byte(scmd, SCSIML_STAT_NOSPC); + return SUCCESS; + } + fallthrough; + case COPY_ABORTED: + case VOLUME_OVERFLOW: + case MISCOMPARE: + case BLANK_CHECK: + set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE); + return SUCCESS; + + case MEDIUM_ERROR: + if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ + sshdr.asc == 0x13 || /* AMNF DATA FIELD */ + sshdr.asc == 0x14) { /* RECORD NOT FOUND */ + set_scsi_ml_byte(scmd, SCSIML_STAT_MED_ERROR); + return SUCCESS; + } + return NEEDS_RETRY; + + case HARDWARE_ERROR: + if (scmd->device->retry_hwerror) + return ADD_TO_MLQUEUE; + else + set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE); + fallthrough; + + case ILLEGAL_REQUEST: + if (sshdr.asc == 0x20 || /* Invalid command operation code */ + sshdr.asc == 0x21 || /* Logical block address out of range */ + sshdr.asc == 0x22 || /* Invalid function */ + sshdr.asc == 0x24 || /* Invalid field in cdb */ + sshdr.asc == 0x26 || /* Parameter value invalid */ + sshdr.asc == 0x27) { /* Write protected */ + set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE); + } + return SUCCESS; + + case COMPLETED: + if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) { + set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT); + req->cmd_flags |= REQ_FAILFAST_DEV; + req->rq_flags |= RQF_QUIET; + } + return SUCCESS; + + default: + return SUCCESS; + } +} +EXPORT_SYMBOL_GPL(scsi_check_sense); + +static void scsi_handle_queue_ramp_up(struct scsi_device *sdev) +{ + const struct scsi_host_template *sht = sdev->host->hostt; + struct scsi_device *tmp_sdev; + + if (!sht->track_queue_depth || + sdev->queue_depth >= sdev->max_queue_depth) + return; + + if (time_before(jiffies, + sdev->last_queue_ramp_up + sdev->queue_ramp_up_period)) + return; + + if (time_before(jiffies, + sdev->last_queue_full_time + sdev->queue_ramp_up_period)) + return; + + /* + * Walk all devices of a target and do + * ramp up on them. + */ + shost_for_each_device(tmp_sdev, sdev->host) { + if (tmp_sdev->channel != sdev->channel || + tmp_sdev->id != sdev->id || + tmp_sdev->queue_depth == sdev->max_queue_depth) + continue; + + scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1); + sdev->last_queue_ramp_up = jiffies; + } +} + +static void scsi_handle_queue_full(struct scsi_device *sdev) +{ + const struct scsi_host_template *sht = sdev->host->hostt; + struct scsi_device *tmp_sdev; + + if (!sht->track_queue_depth) + return; + + shost_for_each_device(tmp_sdev, sdev->host) { + if (tmp_sdev->channel != sdev->channel || + tmp_sdev->id != sdev->id) + continue; + /* + * We do not know the number of commands that were at + * the device when we got the queue full so we start + * from the highest possible value and work our way down. + */ + scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); + } +} + +/** + * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. + * @scmd: SCSI cmd to examine. + * + * Notes: + * This is *only* called when we are examining the status of commands + * queued during error recovery. the main difference here is that we + * don't allow for the possibility of retries here, and we are a lot + * more restrictive about what we consider acceptable. + */ +static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd) +{ + /* + * first check the host byte, to see if there is anything in there + * that would indicate what we need to do. + */ + if (host_byte(scmd->result) == DID_RESET) { + /* + * rats. we are already in the error handler, so we now + * get to try and figure out what to do next. if the sense + * is valid, we have a pretty good idea of what to do. + * if not, we mark it as FAILED. + */ + return scsi_check_sense(scmd); + } + if (host_byte(scmd->result) != DID_OK) + return FAILED; + + /* + * now, check the status byte to see if this indicates + * anything special. + */ + switch (get_status_byte(scmd)) { + case SAM_STAT_GOOD: + scsi_handle_queue_ramp_up(scmd->device); + if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd)) + /* + * If we have sense data, call scsi_check_sense() in + * order to set the correct SCSI ML byte (if any). + * No point in checking the return value, since the + * command has already completed successfully. + */ + scsi_check_sense(scmd); + fallthrough; + case SAM_STAT_COMMAND_TERMINATED: + return SUCCESS; + case SAM_STAT_CHECK_CONDITION: + return scsi_check_sense(scmd); + case SAM_STAT_CONDITION_MET: + case SAM_STAT_INTERMEDIATE: + case SAM_STAT_INTERMEDIATE_CONDITION_MET: + /* + * who knows? FIXME(eric) + */ + return SUCCESS; + case SAM_STAT_RESERVATION_CONFLICT: + if (scmd->cmnd[0] == TEST_UNIT_READY) + /* it is a success, we probed the device and + * found it */ + return SUCCESS; + /* otherwise, we failed to send the command */ + return FAILED; + case SAM_STAT_TASK_SET_FULL: + scsi_handle_queue_full(scmd->device); + fallthrough; + case SAM_STAT_BUSY: + return NEEDS_RETRY; + default: + return FAILED; + } + return FAILED; +} + +/** + * scsi_eh_done - Completion function for error handling. + * @scmd: Cmd that is done. + */ +void scsi_eh_done(struct scsi_cmnd *scmd) +{ + struct completion *eh_action; + + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, + "%s result: %x\n", __func__, scmd->result)); + + eh_action = scmd->device->host->eh_action; + if (eh_action) + complete(eh_action); +} + +/** + * scsi_try_host_reset - ask host adapter to reset itself + * @scmd: SCSI cmd to send host reset. + */ +static enum scsi_disposition scsi_try_host_reset(struct scsi_cmnd *scmd) +{ + unsigned long flags; + enum scsi_disposition rtn; + struct Scsi_Host *host = scmd->device->host; + const struct scsi_host_template *hostt = host->hostt; + + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, host, "Snd Host RST\n")); + + if (!hostt->eh_host_reset_handler) + return FAILED; + + rtn = hostt->eh_host_reset_handler(scmd); + + if (rtn == SUCCESS) { + if (!hostt->skip_settle_delay) + ssleep(HOST_RESET_SETTLE_TIME); + spin_lock_irqsave(host->host_lock, flags); + scsi_report_bus_reset(host, scmd_channel(scmd)); + spin_unlock_irqrestore(host->host_lock, flags); + } + + return rtn; +} + +/** + * scsi_try_bus_reset - ask host to perform a bus reset + * @scmd: SCSI cmd to send bus reset. + */ +static enum scsi_disposition scsi_try_bus_reset(struct scsi_cmnd *scmd) +{ + unsigned long flags; + enum scsi_disposition rtn; + struct Scsi_Host *host = scmd->device->host; + const struct scsi_host_template *hostt = host->hostt; + + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, + "%s: Snd Bus RST\n", __func__)); + + if (!hostt->eh_bus_reset_handler) + return FAILED; + + rtn = hostt->eh_bus_reset_handler(scmd); + + if (rtn == SUCCESS) { + if (!hostt->skip_settle_delay) + ssleep(BUS_RESET_SETTLE_TIME); + spin_lock_irqsave(host->host_lock, flags); + scsi_report_bus_reset(host, scmd_channel(scmd)); + spin_unlock_irqrestore(host->host_lock, flags); + } + + return rtn; +} + +static void __scsi_report_device_reset(struct scsi_device *sdev, void *data) +{ + sdev->was_reset = 1; + sdev->expecting_cc_ua = 1; +} + +/** + * scsi_try_target_reset - Ask host to perform a target reset + * @scmd: SCSI cmd used to send a target reset + * + * Notes: + * There is no timeout for this operation. if this operation is + * unreliable for a given host, then the host itself needs to put a + * timer on it, and set the host back to a consistent state prior to + * returning. + */ +static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd) +{ + unsigned long flags; + enum scsi_disposition rtn; + struct Scsi_Host *host = scmd->device->host; + const struct scsi_host_template *hostt = host->hostt; + + if (!hostt->eh_target_reset_handler) + return FAILED; + + rtn = hostt->eh_target_reset_handler(scmd); + if (rtn == SUCCESS) { + spin_lock_irqsave(host->host_lock, flags); + __starget_for_each_device(scsi_target(scmd->device), NULL, + __scsi_report_device_reset); + spin_unlock_irqrestore(host->host_lock, flags); + } + + return rtn; +} + +/** + * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev + * @scmd: SCSI cmd used to send BDR + * + * Notes: + * There is no timeout for this operation. if this operation is + * unreliable for a given host, then the host itself needs to put a + * timer on it, and set the host back to a consistent state prior to + * returning. + */ +static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd) +{ + enum scsi_disposition rtn; + const struct scsi_host_template *hostt = scmd->device->host->hostt; + + if (!hostt->eh_device_reset_handler) + return FAILED; + + rtn = hostt->eh_device_reset_handler(scmd); + if (rtn == SUCCESS) + __scsi_report_device_reset(scmd->device, NULL); + return rtn; +} + +/** + * scsi_try_to_abort_cmd - Ask host to abort a SCSI command + * @hostt: SCSI driver host template + * @scmd: SCSI cmd used to send a target reset + * + * Return value: + * SUCCESS, FAILED, or FAST_IO_FAIL + * + * Notes: + * SUCCESS does not necessarily indicate that the command + * has been aborted; it only indicates that the LLDDs + * has cleared all references to that command. + * LLDDs should return FAILED only if an abort was required + * but could not be executed. LLDDs should return FAST_IO_FAIL + * if the device is temporarily unavailable (eg due to a + * link down on FibreChannel) + */ +static enum scsi_disposition +scsi_try_to_abort_cmd(const struct scsi_host_template *hostt, struct scsi_cmnd *scmd) +{ + if (!hostt->eh_abort_handler) + return FAILED; + + return hostt->eh_abort_handler(scmd); +} + +static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd) +{ + if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS) + if (scsi_try_bus_device_reset(scmd) != SUCCESS) + if (scsi_try_target_reset(scmd) != SUCCESS) + if (scsi_try_bus_reset(scmd) != SUCCESS) + scsi_try_host_reset(scmd); +} + +/** + * scsi_eh_prep_cmnd - Save a scsi command info as part of error recovery + * @scmd: SCSI command structure to hijack + * @ses: structure to save restore information + * @cmnd: CDB to send. Can be NULL if no new cmnd is needed + * @cmnd_size: size in bytes of @cmnd (must be <= MAX_COMMAND_SIZE) + * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored) + * + * This function is used to save a scsi command information before re-execution + * as part of the error recovery process. If @sense_bytes is 0 the command + * sent must be one that does not transfer any data. If @sense_bytes != 0 + * @cmnd is ignored and this functions sets up a REQUEST_SENSE command + * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer. + */ +void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, + unsigned char *cmnd, int cmnd_size, unsigned sense_bytes) +{ + struct scsi_device *sdev = scmd->device; + + /* + * We need saved copies of a number of fields - this is because + * error handling may need to overwrite these with different values + * to run different commands, and once error handling is complete, + * we will need to restore these values prior to running the actual + * command. + */ + ses->cmd_len = scmd->cmd_len; + ses->data_direction = scmd->sc_data_direction; + ses->sdb = scmd->sdb; + ses->result = scmd->result; + ses->resid_len = scmd->resid_len; + ses->underflow = scmd->underflow; + ses->prot_op = scmd->prot_op; + ses->eh_eflags = scmd->eh_eflags; + + scmd->prot_op = SCSI_PROT_NORMAL; + scmd->eh_eflags = 0; + memcpy(ses->cmnd, scmd->cmnd, sizeof(ses->cmnd)); + memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); + memset(&scmd->sdb, 0, sizeof(scmd->sdb)); + scmd->result = 0; + scmd->resid_len = 0; + + if (sense_bytes) { + scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, + sense_bytes); + sg_init_one(&ses->sense_sgl, scmd->sense_buffer, + scmd->sdb.length); + scmd->sdb.table.sgl = &ses->sense_sgl; + scmd->sc_data_direction = DMA_FROM_DEVICE; + scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1; + scmd->cmnd[0] = REQUEST_SENSE; + scmd->cmnd[4] = scmd->sdb.length; + scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); + } else { + scmd->sc_data_direction = DMA_NONE; + if (cmnd) { + BUG_ON(cmnd_size > sizeof(scmd->cmnd)); + memcpy(scmd->cmnd, cmnd, cmnd_size); + scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); + } + } + + scmd->underflow = 0; + + if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN) + scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | + (sdev->lun << 5 & 0xe0); + + /* + * Zero the sense buffer. The scsi spec mandates that any + * untransferred sense data should be interpreted as being zero. + */ + memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); +} +EXPORT_SYMBOL(scsi_eh_prep_cmnd); + +/** + * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recovery + * @scmd: SCSI command structure to restore + * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd + * + * Undo any damage done by above scsi_eh_prep_cmnd(). + */ +void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) +{ + /* + * Restore original data + */ + scmd->cmd_len = ses->cmd_len; + memcpy(scmd->cmnd, ses->cmnd, sizeof(ses->cmnd)); + scmd->sc_data_direction = ses->data_direction; + scmd->sdb = ses->sdb; + scmd->result = ses->result; + scmd->resid_len = ses->resid_len; + scmd->underflow = ses->underflow; + scmd->prot_op = ses->prot_op; + scmd->eh_eflags = ses->eh_eflags; +} +EXPORT_SYMBOL(scsi_eh_restore_cmnd); + +/** + * scsi_send_eh_cmnd - submit a scsi command as part of error recovery + * @scmd: SCSI command structure to hijack + * @cmnd: CDB to send + * @cmnd_size: size in bytes of @cmnd + * @timeout: timeout for this request + * @sense_bytes: size of sense data to copy or 0 + * + * This function is used to send a scsi command down to a target device + * as part of the error recovery process. See also scsi_eh_prep_cmnd() above. + * + * Return value: + * SUCCESS or FAILED or NEEDS_RETRY + */ +static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd, + unsigned char *cmnd, int cmnd_size, int timeout, unsigned sense_bytes) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + DECLARE_COMPLETION_ONSTACK(done); + unsigned long timeleft = timeout, delay; + struct scsi_eh_save ses; + const unsigned long stall_for = msecs_to_jiffies(100); + int rtn; + +retry: + scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); + shost->eh_action = &done; + + scsi_log_send(scmd); + scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER; + scmd->flags |= SCMD_LAST; + + /* + * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can + * change the SCSI device state after we have examined it and before + * .queuecommand() is called. + */ + mutex_lock(&sdev->state_mutex); + while (sdev->sdev_state == SDEV_BLOCK && timeleft > 0) { + mutex_unlock(&sdev->state_mutex); + SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_DEBUG, sdev, + "%s: state %d <> %d\n", __func__, sdev->sdev_state, + SDEV_BLOCK)); + delay = min(timeleft, stall_for); + timeleft -= delay; + msleep(jiffies_to_msecs(delay)); + mutex_lock(&sdev->state_mutex); + } + if (sdev->sdev_state != SDEV_BLOCK) + rtn = shost->hostt->queuecommand(shost, scmd); + else + rtn = FAILED; + mutex_unlock(&sdev->state_mutex); + + if (rtn) { + if (timeleft > stall_for) { + scsi_eh_restore_cmnd(scmd, &ses); + + timeleft -= stall_for; + msleep(jiffies_to_msecs(stall_for)); + goto retry; + } + /* signal not to enter either branch of the if () below */ + timeleft = 0; + rtn = FAILED; + } else { + timeleft = wait_for_completion_timeout(&done, timeout); + rtn = SUCCESS; + } + + shost->eh_action = NULL; + + scsi_log_completion(scmd, rtn); + + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, + "%s timeleft: %ld\n", + __func__, timeleft)); + + /* + * If there is time left scsi_eh_done got called, and we will examine + * the actual status codes to see whether the command actually did + * complete normally, else if we have a zero return and no time left, + * the command must still be pending, so abort it and return FAILED. + * If we never actually managed to issue the command, because + * ->queuecommand() kept returning non zero, use the rtn = FAILED + * value above (so don't execute either branch of the if) + */ + if (timeleft) { + rtn = scsi_eh_completed_normally(scmd); + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, + "%s: scsi_eh_completed_normally %x\n", __func__, rtn)); + + switch (rtn) { + case SUCCESS: + case NEEDS_RETRY: + case FAILED: + break; + case ADD_TO_MLQUEUE: + rtn = NEEDS_RETRY; + break; + default: + rtn = FAILED; + break; + } + } else if (rtn != FAILED) { + scsi_abort_eh_cmnd(scmd); + rtn = FAILED; + } + + scsi_eh_restore_cmnd(scmd, &ses); + + return rtn; +} + +/** + * scsi_request_sense - Request sense data from a particular target. + * @scmd: SCSI cmd for request sense. + * + * Notes: + * Some hosts automatically obtain this information, others require + * that we obtain it on our own. This function will *not* return until + * the command either times out, or it completes. + */ +static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd) +{ + return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0); +} + +static enum scsi_disposition +scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn) +{ + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) { + struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); + if (sdrv->eh_action) + rtn = sdrv->eh_action(scmd, rtn); + } + return rtn; +} + +/** + * scsi_eh_finish_cmd - Handle a cmd that eh is finished with. + * @scmd: Original SCSI cmd that eh has finished. + * @done_q: Queue for processed commands. + * + * Notes: + * We don't want to use the normal command completion while we are are + * still handling errors - it may cause other commands to be queued, + * and that would disturb what we are doing. Thus we really want to + * keep a list of pending commands for final completion, and once we + * are ready to leave error handling we handle completion for real. + */ +void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) +{ + list_move_tail(&scmd->eh_entry, done_q); +} +EXPORT_SYMBOL(scsi_eh_finish_cmd); + +/** + * scsi_eh_get_sense - Get device sense data. + * @work_q: Queue of commands to process. + * @done_q: Queue of processed commands. + * + * Description: + * See if we need to request sense information. if so, then get it + * now, so we have a better idea of what to do. + * + * Notes: + * This has the unfortunate side effect that if a shost adapter does + * not automatically request sense information, we end up shutting + * it down before we request it. + * + * All drivers should request sense information internally these days, + * so for now all I have to say is tough noogies if you end up in here. + * + * XXX: Long term this code should go away, but that needs an audit of + * all LLDDs first. + */ +int scsi_eh_get_sense(struct list_head *work_q, + struct list_head *done_q) +{ + struct scsi_cmnd *scmd, *next; + struct Scsi_Host *shost; + enum scsi_disposition rtn; + + /* + * If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO, + * should not get sense. + */ + list_for_each_entry_safe(scmd, next, work_q, eh_entry) { + if ((scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) || + SCSI_SENSE_VALID(scmd)) + continue; + + shost = scmd->device->host; + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "%s: skip request sense, past eh deadline\n", + current->comm)); + break; + } + if (!scsi_status_is_check_condition(scmd->result)) + /* + * don't request sense if there's no check condition + * status because the error we're processing isn't one + * that has a sense code (and some devices get + * confused by sense requests out of the blue) + */ + continue; + + SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, + "%s: requesting sense\n", + current->comm)); + rtn = scsi_request_sense(scmd); + if (rtn != SUCCESS) + continue; + + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, + "sense requested, result %x\n", scmd->result)); + SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd)); + + rtn = scsi_decide_disposition(scmd); + + /* + * if the result was normal, then just pass it along to the + * upper level. + */ + if (rtn == SUCCESS) + /* + * We don't want this command reissued, just finished + * with the sense data, so set retries to the max + * allowed to ensure it won't get reissued. If the user + * has requested infinite retries, we also want to + * finish this command, so force completion by setting + * retries and allowed to the same value. + */ + if (scmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) + scmd->retries = scmd->allowed = 1; + else + scmd->retries = scmd->allowed; + else if (rtn != NEEDS_RETRY) + continue; + + scsi_eh_finish_cmd(scmd, done_q); + } + + return list_empty(work_q); +} +EXPORT_SYMBOL_GPL(scsi_eh_get_sense); + +/** + * scsi_eh_tur - Send TUR to device. + * @scmd: &scsi_cmnd to send TUR + * + * Return value: + * 0 - Device is ready. 1 - Device NOT ready. + */ +static int scsi_eh_tur(struct scsi_cmnd *scmd) +{ + static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; + int retry_cnt = 1; + enum scsi_disposition rtn; + +retry_tur: + rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, + scmd->device->eh_timeout, 0); + + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, + "%s return: %x\n", __func__, rtn)); + + switch (rtn) { + case NEEDS_RETRY: + if (retry_cnt--) + goto retry_tur; + fallthrough; + case SUCCESS: + return 0; + default: + return 1; + } +} + +/** + * scsi_eh_test_devices - check if devices are responding from error recovery. + * @cmd_list: scsi commands in error recovery. + * @work_q: queue for commands which still need more error recovery + * @done_q: queue for commands which are finished + * @try_stu: boolean on if a STU command should be tried in addition to TUR. + * + * Decription: + * Tests if devices are in a working state. Commands to devices now in + * a working state are sent to the done_q while commands to devices which + * are still failing to respond are returned to the work_q for more + * processing. + **/ +static int scsi_eh_test_devices(struct list_head *cmd_list, + struct list_head *work_q, + struct list_head *done_q, int try_stu) +{ + struct scsi_cmnd *scmd, *next; + struct scsi_device *sdev; + int finish_cmds; + + while (!list_empty(cmd_list)) { + scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); + sdev = scmd->device; + + if (!try_stu) { + if (scsi_host_eh_past_deadline(sdev->host)) { + /* Push items back onto work_q */ + list_splice_init(cmd_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + sdev_printk(KERN_INFO, sdev, + "%s: skip test device, past eh deadline", + current->comm)); + break; + } + } + + finish_cmds = !scsi_device_online(scmd->device) || + (try_stu && !scsi_eh_try_stu(scmd) && + !scsi_eh_tur(scmd)) || + !scsi_eh_tur(scmd); + + list_for_each_entry_safe(scmd, next, cmd_list, eh_entry) + if (scmd->device == sdev) { + if (finish_cmds && + (try_stu || + scsi_eh_action(scmd, SUCCESS) == SUCCESS)) + scsi_eh_finish_cmd(scmd, done_q); + else + list_move_tail(&scmd->eh_entry, work_q); + } + } + return list_empty(work_q); +} + +/** + * scsi_eh_try_stu - Send START_UNIT to device. + * @scmd: &scsi_cmnd to send START_UNIT + * + * Return value: + * 0 - Device is ready. 1 - Device NOT ready. + */ +static int scsi_eh_try_stu(struct scsi_cmnd *scmd) +{ + static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; + + if (scmd->device->allow_restart) { + int i; + enum scsi_disposition rtn = NEEDS_RETRY; + + for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) + rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, + scmd->device->eh_timeout, 0); + + if (rtn == SUCCESS) + return 0; + } + + return 1; +} + + /** + * scsi_eh_stu - send START_UNIT if needed + * @shost: &scsi host being recovered. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. + * + * Notes: + * If commands are failing due to not ready, initializing command required, + * try revalidating the device, which will end up sending a start unit. + */ +static int scsi_eh_stu(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) +{ + struct scsi_cmnd *scmd, *stu_scmd, *next; + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) { + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + sdev_printk(KERN_INFO, sdev, + "%s: skip START_UNIT, past eh deadline\n", + current->comm)); + scsi_device_put(sdev); + break; + } + stu_scmd = NULL; + list_for_each_entry(scmd, work_q, eh_entry) + if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && + scsi_check_sense(scmd) == FAILED ) { + stu_scmd = scmd; + break; + } + + if (!stu_scmd) + continue; + + SCSI_LOG_ERROR_RECOVERY(3, + sdev_printk(KERN_INFO, sdev, + "%s: Sending START_UNIT\n", + current->comm)); + + if (!scsi_eh_try_stu(stu_scmd)) { + if (!scsi_device_online(sdev) || + !scsi_eh_tur(stu_scmd)) { + list_for_each_entry_safe(scmd, next, + work_q, eh_entry) { + if (scmd->device == sdev && + scsi_eh_action(scmd, SUCCESS) == SUCCESS) + scsi_eh_finish_cmd(scmd, done_q); + } + } + } else { + SCSI_LOG_ERROR_RECOVERY(3, + sdev_printk(KERN_INFO, sdev, + "%s: START_UNIT failed\n", + current->comm)); + } + } + + return list_empty(work_q); +} + + +/** + * scsi_eh_bus_device_reset - send bdr if needed + * @shost: scsi host being recovered. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. + * + * Notes: + * Try a bus device reset. Still, look to see whether we have multiple + * devices that are jammed or not - if we have multiple devices, it + * makes no sense to try bus_device_reset - we really would need to try + * a bus_reset instead. + */ +static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) +{ + struct scsi_cmnd *scmd, *bdr_scmd, *next; + struct scsi_device *sdev; + enum scsi_disposition rtn; + + shost_for_each_device(sdev, shost) { + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + sdev_printk(KERN_INFO, sdev, + "%s: skip BDR, past eh deadline\n", + current->comm)); + scsi_device_put(sdev); + break; + } + bdr_scmd = NULL; + list_for_each_entry(scmd, work_q, eh_entry) + if (scmd->device == sdev) { + bdr_scmd = scmd; + break; + } + + if (!bdr_scmd) + continue; + + SCSI_LOG_ERROR_RECOVERY(3, + sdev_printk(KERN_INFO, sdev, + "%s: Sending BDR\n", current->comm)); + rtn = scsi_try_bus_device_reset(bdr_scmd); + if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { + if (!scsi_device_online(sdev) || + rtn == FAST_IO_FAIL || + !scsi_eh_tur(bdr_scmd)) { + list_for_each_entry_safe(scmd, next, + work_q, eh_entry) { + if (scmd->device == sdev && + scsi_eh_action(scmd, rtn) != FAILED) + scsi_eh_finish_cmd(scmd, + done_q); + } + } + } else { + SCSI_LOG_ERROR_RECOVERY(3, + sdev_printk(KERN_INFO, sdev, + "%s: BDR failed\n", current->comm)); + } + } + + return list_empty(work_q); +} + +/** + * scsi_eh_target_reset - send target reset if needed + * @shost: scsi host being recovered. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. + * + * Notes: + * Try a target reset. + */ +static int scsi_eh_target_reset(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) +{ + LIST_HEAD(tmp_list); + LIST_HEAD(check_list); + + list_splice_init(work_q, &tmp_list); + + while (!list_empty(&tmp_list)) { + struct scsi_cmnd *next, *scmd; + enum scsi_disposition rtn; + unsigned int id; + + if (scsi_host_eh_past_deadline(shost)) { + /* push back on work queue for further processing */ + list_splice_init(&check_list, work_q); + list_splice_init(&tmp_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: Skip target reset, past eh deadline\n", + current->comm)); + return list_empty(work_q); + } + + scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); + id = scmd_id(scmd); + + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: Sending target reset to target %d\n", + current->comm, id)); + rtn = scsi_try_target_reset(scmd); + if (rtn != SUCCESS && rtn != FAST_IO_FAIL) + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: Target reset failed" + " target: %d\n", + current->comm, id)); + list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) { + if (scmd_id(scmd) != id) + continue; + + if (rtn == SUCCESS) + list_move_tail(&scmd->eh_entry, &check_list); + else if (rtn == FAST_IO_FAIL) + scsi_eh_finish_cmd(scmd, done_q); + else + /* push back on work queue for further processing */ + list_move(&scmd->eh_entry, work_q); + } + } + + return scsi_eh_test_devices(&check_list, work_q, done_q, 0); +} + +/** + * scsi_eh_bus_reset - send a bus reset + * @shost: &scsi host being recovered. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. + */ +static int scsi_eh_bus_reset(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) +{ + struct scsi_cmnd *scmd, *chan_scmd, *next; + LIST_HEAD(check_list); + unsigned int channel; + enum scsi_disposition rtn; + + /* + * we really want to loop over the various channels, and do this on + * a channel by channel basis. we should also check to see if any + * of the failed commands are on soft_reset devices, and if so, skip + * the reset. + */ + + for (channel = 0; channel <= shost->max_channel; channel++) { + if (scsi_host_eh_past_deadline(shost)) { + list_splice_init(&check_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: skip BRST, past eh deadline\n", + current->comm)); + return list_empty(work_q); + } + + chan_scmd = NULL; + list_for_each_entry(scmd, work_q, eh_entry) { + if (channel == scmd_channel(scmd)) { + chan_scmd = scmd; + break; + /* + * FIXME add back in some support for + * soft_reset devices. + */ + } + } + + if (!chan_scmd) + continue; + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: Sending BRST chan: %d\n", + current->comm, channel)); + rtn = scsi_try_bus_reset(chan_scmd); + if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { + list_for_each_entry_safe(scmd, next, work_q, eh_entry) { + if (channel == scmd_channel(scmd)) { + if (rtn == FAST_IO_FAIL) + scsi_eh_finish_cmd(scmd, + done_q); + else + list_move_tail(&scmd->eh_entry, + &check_list); + } + } + } else { + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: BRST failed chan: %d\n", + current->comm, channel)); + } + } + return scsi_eh_test_devices(&check_list, work_q, done_q, 0); +} + +/** + * scsi_eh_host_reset - send a host reset + * @shost: host to be reset. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. + */ +static int scsi_eh_host_reset(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) +{ + struct scsi_cmnd *scmd, *next; + LIST_HEAD(check_list); + enum scsi_disposition rtn; + + if (!list_empty(work_q)) { + scmd = list_entry(work_q->next, + struct scsi_cmnd, eh_entry); + + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: Sending HRST\n", + current->comm)); + + rtn = scsi_try_host_reset(scmd); + if (rtn == SUCCESS) { + list_splice_init(work_q, &check_list); + } else if (rtn == FAST_IO_FAIL) { + list_for_each_entry_safe(scmd, next, work_q, eh_entry) { + scsi_eh_finish_cmd(scmd, done_q); + } + } else { + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "%s: HRST failed\n", + current->comm)); + } + } + return scsi_eh_test_devices(&check_list, work_q, done_q, 1); +} + +/** + * scsi_eh_offline_sdevs - offline scsi devices that fail to recover + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. + */ +static void scsi_eh_offline_sdevs(struct list_head *work_q, + struct list_head *done_q) +{ + struct scsi_cmnd *scmd, *next; + struct scsi_device *sdev; + + list_for_each_entry_safe(scmd, next, work_q, eh_entry) { + sdev_printk(KERN_INFO, scmd->device, "Device offlined - " + "not ready after error recovery\n"); + sdev = scmd->device; + + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_OFFLINE); + mutex_unlock(&sdev->state_mutex); + + scsi_eh_finish_cmd(scmd, done_q); + } + return; +} + +/** + * scsi_noretry_cmd - determine if command should be failed fast + * @scmd: SCSI cmd to examine. + */ +bool scsi_noretry_cmd(struct scsi_cmnd *scmd) +{ + struct request *req = scsi_cmd_to_rq(scmd); + + switch (host_byte(scmd->result)) { + case DID_OK: + break; + case DID_TIME_OUT: + goto check_type; + case DID_BUS_BUSY: + return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT); + case DID_PARITY: + return !!(req->cmd_flags & REQ_FAILFAST_DEV); + case DID_ERROR: + if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT) + return false; + fallthrough; + case DID_SOFT_ERROR: + return !!(req->cmd_flags & REQ_FAILFAST_DRIVER); + } + + /* Never retry commands aborted due to a duration limit timeout */ + if (scsi_ml_byte(scmd->result) == SCSIML_STAT_DL_TIMEOUT) + return true; + + if (!scsi_status_is_check_condition(scmd->result)) + return false; + +check_type: + /* + * assume caller has checked sense and determined + * the check condition was retryable. + */ + if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req)) + return true; + + return false; +} + +/** + * scsi_decide_disposition - Disposition a cmd on return from LLD. + * @scmd: SCSI cmd to examine. + * + * Notes: + * This is *only* called when we are examining the status after sending + * out the actual data command. any commands that are queued for error + * recovery (e.g. test_unit_ready) do *not* come through here. + * + * When this routine returns failed, it means the error handler thread + * is woken. In cases where the error code indicates an error that + * doesn't require the error handler read (i.e. we don't need to + * abort/reset), this function should return SUCCESS. + */ +enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd) +{ + enum scsi_disposition rtn; + + /* + * if the device is offline, then we clearly just pass the result back + * up to the top level. + */ + if (!scsi_device_online(scmd->device)) { + SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd, + "%s: device offline - report as SUCCESS\n", __func__)); + return SUCCESS; + } + + /* + * first check the host byte, to see if there is anything in there + * that would indicate what we need to do. + */ + switch (host_byte(scmd->result)) { + case DID_PASSTHROUGH: + /* + * no matter what, pass this through to the upper layer. + * nuke this special code so that it looks like we are saying + * did_ok. + */ + scmd->result &= 0xff00ffff; + return SUCCESS; + case DID_OK: + /* + * looks good. drop through, and check the next byte. + */ + break; + case DID_ABORT: + if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { + set_host_byte(scmd, DID_TIME_OUT); + return SUCCESS; + } + fallthrough; + case DID_NO_CONNECT: + case DID_BAD_TARGET: + /* + * note - this means that we just report the status back + * to the top level driver, not that we actually think + * that it indicates SUCCESS. + */ + return SUCCESS; + case DID_SOFT_ERROR: + /* + * when the low level driver returns did_soft_error, + * it is responsible for keeping an internal retry counter + * in order to avoid endless loops (db) + */ + goto maybe_retry; + case DID_IMM_RETRY: + return NEEDS_RETRY; + + case DID_REQUEUE: + return ADD_TO_MLQUEUE; + case DID_TRANSPORT_DISRUPTED: + /* + * LLD/transport was disrupted during processing of the IO. + * The transport class is now blocked/blocking, + * and the transport will decide what to do with the IO + * based on its timers and recovery capablilities if + * there are enough retries. + */ + goto maybe_retry; + case DID_TRANSPORT_FAILFAST: + /* + * The transport decided to failfast the IO (most likely + * the fast io fail tmo fired), so send IO directly upwards. + */ + return SUCCESS; + case DID_TRANSPORT_MARGINAL: + /* + * caller has decided not to do retries on + * abort success, so send IO directly upwards + */ + return SUCCESS; + case DID_ERROR: + if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT) + /* + * execute reservation conflict processing code + * lower down + */ + break; + fallthrough; + case DID_BUS_BUSY: + case DID_PARITY: + goto maybe_retry; + case DID_TIME_OUT: + /* + * when we scan the bus, we get timeout messages for + * these commands if there is no device available. + * other hosts report did_no_connect for the same thing. + */ + if ((scmd->cmnd[0] == TEST_UNIT_READY || + scmd->cmnd[0] == INQUIRY)) { + return SUCCESS; + } else { + return FAILED; + } + case DID_RESET: + return SUCCESS; + default: + return FAILED; + } + + /* + * check the status byte to see if this indicates anything special. + */ + switch (get_status_byte(scmd)) { + case SAM_STAT_TASK_SET_FULL: + scsi_handle_queue_full(scmd->device); + /* + * the case of trying to send too many commands to a + * tagged queueing device. + */ + fallthrough; + case SAM_STAT_BUSY: + /* + * device can't talk to us at the moment. Should only + * occur (SAM-3) when the task queue is empty, so will cause + * the empty queue handling to trigger a stall in the + * device. + */ + return ADD_TO_MLQUEUE; + case SAM_STAT_GOOD: + if (scmd->cmnd[0] == REPORT_LUNS) + scmd->device->sdev_target->expecting_lun_change = 0; + scsi_handle_queue_ramp_up(scmd->device); + if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd)) + /* + * If we have sense data, call scsi_check_sense() in + * order to set the correct SCSI ML byte (if any). + * No point in checking the return value, since the + * command has already completed successfully. + */ + scsi_check_sense(scmd); + fallthrough; + case SAM_STAT_COMMAND_TERMINATED: + return SUCCESS; + case SAM_STAT_TASK_ABORTED: + goto maybe_retry; + case SAM_STAT_CHECK_CONDITION: + rtn = scsi_check_sense(scmd); + if (rtn == NEEDS_RETRY) + goto maybe_retry; + /* if rtn == FAILED, we have no sense information; + * returning FAILED will wake the error handler thread + * to collect the sense and redo the decide + * disposition */ + return rtn; + case SAM_STAT_CONDITION_MET: + case SAM_STAT_INTERMEDIATE: + case SAM_STAT_INTERMEDIATE_CONDITION_MET: + case SAM_STAT_ACA_ACTIVE: + /* + * who knows? FIXME(eric) + */ + return SUCCESS; + + case SAM_STAT_RESERVATION_CONFLICT: + sdev_printk(KERN_INFO, scmd->device, + "reservation conflict\n"); + set_scsi_ml_byte(scmd, SCSIML_STAT_RESV_CONFLICT); + return SUCCESS; /* causes immediate i/o error */ + } + return FAILED; + +maybe_retry: + + /* we requeue for retry because the error was retryable, and + * the request was not marked fast fail. Note that above, + * even if the request is marked fast fail, we still requeue + * for queue congestion conditions (QUEUE_FULL or BUSY) */ + if (scsi_cmd_retry_allowed(scmd) && !scsi_noretry_cmd(scmd)) { + return NEEDS_RETRY; + } else { + /* + * no more retries - report this one back to upper level. + */ + return SUCCESS; + } +} + +static enum rq_end_io_ret eh_lock_door_done(struct request *req, + blk_status_t status) +{ + blk_mq_free_request(req); + return RQ_END_IO_NONE; +} + +/** + * scsi_eh_lock_door - Prevent medium removal for the specified device + * @sdev: SCSI device to prevent medium removal + * + * Locking: + * We must be called from process context. + * + * Notes: + * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the + * head of the devices request queue, and continue. + */ +static void scsi_eh_lock_door(struct scsi_device *sdev) +{ + struct scsi_cmnd *scmd; + struct request *req; + + req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0); + if (IS_ERR(req)) + return; + scmd = blk_mq_rq_to_pdu(req); + + scmd->cmnd[0] = ALLOW_MEDIUM_REMOVAL; + scmd->cmnd[1] = 0; + scmd->cmnd[2] = 0; + scmd->cmnd[3] = 0; + scmd->cmnd[4] = SCSI_REMOVAL_PREVENT; + scmd->cmnd[5] = 0; + scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); + scmd->allowed = 5; + + req->rq_flags |= RQF_QUIET; + req->timeout = 10 * HZ; + req->end_io = eh_lock_door_done; + + blk_execute_rq_nowait(req, true); +} + +/** + * scsi_restart_operations - restart io operations to the specified host. + * @shost: Host we are restarting. + * + * Notes: + * When we entered the error handler, we blocked all further i/o to + * this device. we need to 'reverse' this process. + */ +static void scsi_restart_operations(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + unsigned long flags; + + /* + * If the door was locked, we need to insert a door lock request + * onto the head of the SCSI request queue for the device. There + * is no point trying to lock the door of an off-line device. + */ + shost_for_each_device(sdev, shost) { + if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) { + scsi_eh_lock_door(sdev); + sdev->was_reset = 0; + } + } + + /* + * next free up anything directly waiting upon the host. this + * will be requests for character device operations, and also for + * ioctls to queued block devices. + */ + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, "waking up host to restart\n")); + + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_set_state(shost, SHOST_RUNNING)) + if (scsi_host_set_state(shost, SHOST_CANCEL)) + BUG_ON(scsi_host_set_state(shost, SHOST_DEL)); + spin_unlock_irqrestore(shost->host_lock, flags); + + wake_up(&shost->host_wait); + + /* + * finally we need to re-initiate requests that may be pending. we will + * have had everything blocked while error handling is taking place, and + * now that error recovery is done, we will need to ensure that these + * requests are started. + */ + scsi_run_host_queues(shost); + + /* + * if eh is active and host_eh_scheduled is pending we need to re-run + * recovery. we do this check after scsi_run_host_queues() to allow + * everything pent up since the last eh run a chance to make forward + * progress before we sync again. Either we'll immediately re-run + * recovery or scsi_device_unbusy() will wake us again when these + * pending commands complete. + */ + spin_lock_irqsave(shost->host_lock, flags); + if (shost->host_eh_scheduled) + if (scsi_host_set_state(shost, SHOST_RECOVERY)) + WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)); + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * scsi_eh_ready_devs - check device ready state and recover if not. + * @shost: host to be recovered. + * @work_q: &list_head for pending commands. + * @done_q: &list_head for processed commands. + */ +void scsi_eh_ready_devs(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) +{ + if (!scsi_eh_stu(shost, work_q, done_q)) + if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) + if (!scsi_eh_target_reset(shost, work_q, done_q)) + if (!scsi_eh_bus_reset(shost, work_q, done_q)) + if (!scsi_eh_host_reset(shost, work_q, done_q)) + scsi_eh_offline_sdevs(work_q, + done_q); +} +EXPORT_SYMBOL_GPL(scsi_eh_ready_devs); + +/** + * scsi_eh_flush_done_q - finish processed commands or retry them. + * @done_q: list_head of processed commands. + */ +void scsi_eh_flush_done_q(struct list_head *done_q) +{ + struct scsi_cmnd *scmd, *next; + + list_for_each_entry_safe(scmd, next, done_q, eh_entry) { + struct scsi_device *sdev = scmd->device; + + list_del_init(&scmd->eh_entry); + if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) && + scsi_cmd_retry_allowed(scmd) && + scsi_eh_should_retry_cmd(scmd)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "%s: flush retry cmd\n", + current->comm)); + scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); + blk_mq_kick_requeue_list(sdev->request_queue); + } else { + /* + * If just we got sense for the device (called + * scsi_eh_get_sense), scmd->result is already + * set, do not set DID_TIME_OUT. + */ + if (!scmd->result && + !(scmd->flags & SCMD_FORCE_EH_SUCCESS)) + scmd->result |= (DID_TIME_OUT << 16); + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "%s: flush finish cmd\n", + current->comm)); + scsi_finish_command(scmd); + } + } +} +EXPORT_SYMBOL(scsi_eh_flush_done_q); + +/** + * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. + * @shost: Host to unjam. + * + * Notes: + * When we come in here, we *know* that all commands on the bus have + * either completed, failed or timed out. we also know that no further + * commands are being sent to the host, so things are relatively quiet + * and we have freedom to fiddle with things as we wish. + * + * This is only the *default* implementation. it is possible for + * individual drivers to supply their own version of this function, and + * if the maintainer wishes to do this, it is strongly suggested that + * this function be taken as a template and modified. this function + * was designed to correctly handle problems for about 95% of the + * different cases out there, and it should always provide at least a + * reasonable amount of error recovery. + * + * Any command marked 'failed' or 'timeout' must eventually have + * scsi_finish_cmd() called for it. we do all of the retry stuff + * here, so when we restart the host after we return it should have an + * empty queue. + */ +static void scsi_unjam_host(struct Scsi_Host *shost) +{ + unsigned long flags; + LIST_HEAD(eh_work_q); + LIST_HEAD(eh_done_q); + + spin_lock_irqsave(shost->host_lock, flags); + list_splice_init(&shost->eh_cmd_q, &eh_work_q); + spin_unlock_irqrestore(shost->host_lock, flags); + + SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q)); + + if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q)) + scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); + + spin_lock_irqsave(shost->host_lock, flags); + if (shost->eh_deadline != -1) + shost->last_reset = 0; + spin_unlock_irqrestore(shost->host_lock, flags); + scsi_eh_flush_done_q(&eh_done_q); +} + +/** + * scsi_error_handler - SCSI error handler thread + * @data: Host for which we are running. + * + * Notes: + * This is the main error handling loop. This is run as a kernel thread + * for every SCSI host and handles all error handling activity. + */ +int scsi_error_handler(void *data) +{ + struct Scsi_Host *shost = data; + + /* + * We use TASK_INTERRUPTIBLE so that the thread is not + * counted against the load average as a running process. + * We never actually get interrupted because kthread_run + * disables signal delivery for the created thread. + */ + while (true) { + /* + * The sequence in kthread_stop() sets the stop flag first + * then wakes the process. To avoid missed wakeups, the task + * should always be in a non running state before the stop + * flag is checked + */ + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + break; + + if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || + shost->host_failed != scsi_host_busy(shost)) { + SCSI_LOG_ERROR_RECOVERY(1, + shost_printk(KERN_INFO, shost, + "scsi_eh_%d: sleeping\n", + shost->host_no)); + schedule(); + continue; + } + + __set_current_state(TASK_RUNNING); + SCSI_LOG_ERROR_RECOVERY(1, + shost_printk(KERN_INFO, shost, + "scsi_eh_%d: waking up %d/%d/%d\n", + shost->host_no, shost->host_eh_scheduled, + shost->host_failed, + scsi_host_busy(shost))); + + /* + * We have a host that is failing for some reason. Figure out + * what we need to do to get it up and online again (if we can). + * If we fail, we end up taking the thing offline. + */ + if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) { + SCSI_LOG_ERROR_RECOVERY(1, + shost_printk(KERN_ERR, shost, + "scsi_eh_%d: unable to autoresume\n", + shost->host_no)); + continue; + } + + if (shost->transportt->eh_strategy_handler) + shost->transportt->eh_strategy_handler(shost); + else + scsi_unjam_host(shost); + + /* All scmds have been handled */ + shost->host_failed = 0; + + /* + * Note - if the above fails completely, the action is to take + * individual devices offline and flush the queue of any + * outstanding requests that may have been pending. When we + * restart, we restart any I/O to any other devices on the bus + * which are still online. + */ + scsi_restart_operations(shost); + if (!shost->eh_noresume) + scsi_autopm_put_host(shost); + } + __set_current_state(TASK_RUNNING); + + SCSI_LOG_ERROR_RECOVERY(1, + shost_printk(KERN_INFO, shost, + "Error handler scsi_eh_%d exiting\n", + shost->host_no)); + shost->ehandler = NULL; + return 0; +} + +/* + * Function: scsi_report_bus_reset() + * + * Purpose: Utility function used by low-level drivers to report that + * they have observed a bus reset on the bus being handled. + * + * Arguments: shost - Host in question + * channel - channel on which reset was observed. + * + * Returns: Nothing + * + * Lock status: Host lock must be held. + * + * Notes: This only needs to be called if the reset is one which + * originates from an unknown location. Resets originated + * by the mid-level itself don't need to call this, but there + * should be no harm. + * + * The main purpose of this is to make sure that a CHECK_CONDITION + * is properly treated. + */ +void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) +{ + struct scsi_device *sdev; + + __shost_for_each_device(sdev, shost) { + if (channel == sdev_channel(sdev)) + __scsi_report_device_reset(sdev, NULL); + } +} +EXPORT_SYMBOL(scsi_report_bus_reset); + +/* + * Function: scsi_report_device_reset() + * + * Purpose: Utility function used by low-level drivers to report that + * they have observed a device reset on the device being handled. + * + * Arguments: shost - Host in question + * channel - channel on which reset was observed + * target - target on which reset was observed + * + * Returns: Nothing + * + * Lock status: Host lock must be held + * + * Notes: This only needs to be called if the reset is one which + * originates from an unknown location. Resets originated + * by the mid-level itself don't need to call this, but there + * should be no harm. + * + * The main purpose of this is to make sure that a CHECK_CONDITION + * is properly treated. + */ +void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) +{ + struct scsi_device *sdev; + + __shost_for_each_device(sdev, shost) { + if (channel == sdev_channel(sdev) && + target == sdev_id(sdev)) + __scsi_report_device_reset(sdev, NULL); + } +} +EXPORT_SYMBOL(scsi_report_device_reset); + +/** + * scsi_ioctl_reset: explicitly reset a host/bus/target/device + * @dev: scsi_device to operate on + * @arg: reset type (see sg.h) + */ +int +scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) +{ + struct scsi_cmnd *scmd; + struct Scsi_Host *shost = dev->host; + struct request *rq; + unsigned long flags; + int error = 0, val; + enum scsi_disposition rtn; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + + error = get_user(val, arg); + if (error) + return error; + + if (scsi_autopm_get_host(shost) < 0) + return -EIO; + + error = -EIO; + rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) + + shost->hostt->cmd_size, GFP_KERNEL); + if (!rq) + goto out_put_autopm_host; + blk_rq_init(NULL, rq); + + scmd = (struct scsi_cmnd *)(rq + 1); + scsi_init_command(dev, scmd); + + scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL; + scmd->flags |= SCMD_LAST; + memset(&scmd->sdb, 0, sizeof(scmd->sdb)); + + scmd->cmd_len = 0; + + scmd->sc_data_direction = DMA_BIDIRECTIONAL; + + spin_lock_irqsave(shost->host_lock, flags); + shost->tmf_in_progress = 1; + spin_unlock_irqrestore(shost->host_lock, flags); + + switch (val & ~SG_SCSI_RESET_NO_ESCALATE) { + case SG_SCSI_RESET_NOTHING: + rtn = SUCCESS; + break; + case SG_SCSI_RESET_DEVICE: + rtn = scsi_try_bus_device_reset(scmd); + if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) + break; + fallthrough; + case SG_SCSI_RESET_TARGET: + rtn = scsi_try_target_reset(scmd); + if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) + break; + fallthrough; + case SG_SCSI_RESET_BUS: + rtn = scsi_try_bus_reset(scmd); + if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) + break; + fallthrough; + case SG_SCSI_RESET_HOST: + rtn = scsi_try_host_reset(scmd); + if (rtn == SUCCESS) + break; + fallthrough; + default: + rtn = FAILED; + break; + } + + error = (rtn == SUCCESS) ? 0 : -EIO; + + spin_lock_irqsave(shost->host_lock, flags); + shost->tmf_in_progress = 0; + spin_unlock_irqrestore(shost->host_lock, flags); + + /* + * be sure to wake up anyone who was sleeping or had their queue + * suspended while we performed the TMF. + */ + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "waking up host to restart after TMF\n")); + + wake_up(&shost->host_wait); + scsi_run_host_queues(shost); + + kfree(rq); + +out_put_autopm_host: + scsi_autopm_put_host(shost); + return error; +} + +bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, + struct scsi_sense_hdr *sshdr) +{ + return scsi_normalize_sense(cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, sshdr); +} +EXPORT_SYMBOL(scsi_command_normalize_sense); + +/** + * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format) + * @sense_buffer: byte array of sense data + * @sb_len: number of valid bytes in sense_buffer + * @info_out: pointer to 64 integer where 8 or 4 byte information + * field will be placed if found. + * + * Return value: + * true if information field found, false if not found. + */ +bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len, + u64 *info_out) +{ + const u8 * ucp; + + if (sb_len < 7) + return false; + switch (sense_buffer[0] & 0x7f) { + case 0x70: + case 0x71: + if (sense_buffer[0] & 0x80) { + *info_out = get_unaligned_be32(&sense_buffer[3]); + return true; + } + return false; + case 0x72: + case 0x73: + ucp = scsi_sense_desc_find(sense_buffer, sb_len, + 0 /* info desc */); + if (ucp && (0xa == ucp[1])) { + *info_out = get_unaligned_be64(&ucp[4]); + return true; + } + return false; + default: + return false; + } +} +EXPORT_SYMBOL(scsi_get_sense_info_fld); diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c new file mode 100644 index 000000000..6f6c5973c --- /dev/null +++ b/drivers/scsi/scsi_ioctl.c @@ -0,0 +1,960 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Changes: + * Arnaldo Carvalho de Melo 08/23/2000 + * - get rid of some verify_areas and use __copy*user and __get/put_user + * for the ones that remain + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scsi_logging.h" + +#define NORMAL_RETRIES 5 +#define IOCTL_NORMAL_TIMEOUT (10 * HZ) + +#define MAX_BUF PAGE_SIZE + +/** + * ioctl_probe -- return host identification + * @host: host to identify + * @buffer: userspace buffer for identification + * + * Return an identifying string at @buffer, if @buffer is non-NULL, filling + * to the length stored at * (int *) @buffer. + */ +static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) +{ + unsigned int len, slen; + const char *string; + + if (buffer) { + if (get_user(len, (unsigned int __user *) buffer)) + return -EFAULT; + + if (host->hostt->info) + string = host->hostt->info(host); + else + string = host->hostt->name; + if (string) { + slen = strlen(string); + if (len > slen) + len = slen + 1; + if (copy_to_user(buffer, string, len)) + return -EFAULT; + } + } + return 1; +} + +static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, + int timeout, int retries) +{ + int result; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + + SCSI_LOG_IOCTL(1, sdev_printk(KERN_INFO, sdev, + "Trying ioctl with scsi command %d\n", *cmd)); + + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, timeout, + retries, &exec_args); + + SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev, + "Ioctl returned 0x%x\n", result)); + + if (result < 0) + goto out; + if (scsi_sense_valid(&sshdr)) { + switch (sshdr.sense_key) { + case ILLEGAL_REQUEST: + if (cmd[0] == ALLOW_MEDIUM_REMOVAL) + sdev->lockable = 0; + else + sdev_printk(KERN_INFO, sdev, + "ioctl_internal_command: " + "ILLEGAL REQUEST " + "asc=0x%x ascq=0x%x\n", + sshdr.asc, sshdr.ascq); + break; + case NOT_READY: /* This happens if there is no disc in drive */ + if (sdev->removable) + break; + fallthrough; + case UNIT_ATTENTION: + if (sdev->removable) { + sdev->changed = 1; + result = 0; /* This is no longer considered an error */ + break; + } + fallthrough; /* for non-removable media */ + default: + sdev_printk(KERN_INFO, sdev, + "ioctl_internal_command return code = %x\n", + result); + scsi_print_sense_hdr(sdev, NULL, &sshdr); + break; + } + } +out: + SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev, + "IOCTL Releasing command\n")); + return result; +} + +int scsi_set_medium_removal(struct scsi_device *sdev, char state) +{ + char scsi_cmd[MAX_COMMAND_SIZE]; + int ret; + + if (!sdev->removable || !sdev->lockable) + return 0; + + scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL; + scsi_cmd[1] = 0; + scsi_cmd[2] = 0; + scsi_cmd[3] = 0; + scsi_cmd[4] = state; + scsi_cmd[5] = 0; + + ret = ioctl_internal_command(sdev, scsi_cmd, + IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); + if (ret == 0) + sdev->locked = (state == SCSI_REMOVAL_PREVENT); + return ret; +} +EXPORT_SYMBOL(scsi_set_medium_removal); + +/* + * The scsi_ioctl_get_pci() function places into arg the value + * pci_dev::slot_name (8 characters) for the PCI device (if any). + * Returns: 0 on success + * -ENXIO if there isn't a PCI device pointer + * (could be because the SCSI driver hasn't been + * updated yet, or because it isn't a SCSI + * device) + * any copy_to_user() error on failure there + */ +static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) +{ + struct device *dev = scsi_get_device(sdev->host); + const char *name; + + if (!dev) + return -ENXIO; + + name = dev_name(dev); + + /* compatibility with old ioctl which only returned + * 20 characters */ + return copy_to_user(arg, name, min(strlen(name), (size_t)20)) + ? -EFAULT: 0; +} + +static int sg_get_version(int __user *p) +{ + static const int sg_version_num = 30527; + return put_user(sg_version_num, p); +} + +static int sg_set_timeout(struct scsi_device *sdev, int __user *p) +{ + int timeout, err = get_user(timeout, p); + + if (!err) + sdev->sg_timeout = clock_t_to_jiffies(timeout); + + return err; +} + +static int sg_get_reserved_size(struct scsi_device *sdev, int __user *p) +{ + int val = min(sdev->sg_reserved_size, + queue_max_bytes(sdev->request_queue)); + + return put_user(val, p); +} + +static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p) +{ + int size, err = get_user(size, p); + + if (err) + return err; + + if (size < 0) + return -EINVAL; + + sdev->sg_reserved_size = min_t(unsigned int, size, + queue_max_bytes(sdev->request_queue)); + return 0; +} + +/* + * will always return that we are ATAPI even for a real SCSI drive, I'm not + * so sure this is worth doing anything about (why would you care??) + */ +static int sg_emulated_host(struct request_queue *q, int __user *p) +{ + return put_user(1, p); +} + +static int scsi_get_idlun(struct scsi_device *sdev, void __user *argp) +{ + struct scsi_idlun v = { + .dev_id = (sdev->id & 0xff) + + ((sdev->lun & 0xff) << 8) + + ((sdev->channel & 0xff) << 16) + + ((sdev->host->host_no & 0xff) << 24), + .host_unique_id = sdev->host->unique_id + }; + if (copy_to_user(argp, &v, sizeof(struct scsi_idlun))) + return -EFAULT; + return 0; +} + +static int scsi_send_start_stop(struct scsi_device *sdev, int data) +{ + u8 cdb[MAX_COMMAND_SIZE] = { }; + + cdb[0] = START_STOP; + cdb[4] = data; + return ioctl_internal_command(sdev, cdb, START_STOP_TIMEOUT, + NORMAL_RETRIES); +} + +/* + * Check if the given command is allowed. + * + * Only a subset of commands are allowed for unprivileged users. Commands used + * to format the media, update the firmware, etc. are not permitted. + */ +bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write) +{ + /* root can do any command. */ + if (capable(CAP_SYS_RAWIO)) + return true; + + /* Anybody who can open the device can do a read-safe command */ + switch (cmd[0]) { + /* Basic read-only commands */ + case TEST_UNIT_READY: + case REQUEST_SENSE: + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case READ_BUFFER: + case READ_DEFECT_DATA: + case READ_CAPACITY: /* also GPCMD_READ_CDVD_CAPACITY */ + case READ_LONG: + case INQUIRY: + case MODE_SENSE: + case MODE_SENSE_10: + case LOG_SENSE: + case START_STOP: + case GPCMD_VERIFY_10: + case VERIFY_16: + case REPORT_LUNS: + case SERVICE_ACTION_IN_16: + case RECEIVE_DIAGNOSTIC: + case MAINTENANCE_IN: /* also GPCMD_SEND_KEY, which is a write command */ + case GPCMD_READ_BUFFER_CAPACITY: + /* Audio CD commands */ + case GPCMD_PLAY_CD: + case GPCMD_PLAY_AUDIO_10: + case GPCMD_PLAY_AUDIO_MSF: + case GPCMD_PLAY_AUDIO_TI: + case GPCMD_PAUSE_RESUME: + /* CD/DVD data reading */ + case GPCMD_READ_CD: + case GPCMD_READ_CD_MSF: + case GPCMD_READ_DISC_INFO: + case GPCMD_READ_DVD_STRUCTURE: + case GPCMD_READ_HEADER: + case GPCMD_READ_TRACK_RZONE_INFO: + case GPCMD_READ_SUBCHANNEL: + case GPCMD_READ_TOC_PMA_ATIP: + case GPCMD_REPORT_KEY: + case GPCMD_SCAN: + case GPCMD_GET_CONFIGURATION: + case GPCMD_READ_FORMAT_CAPACITIES: + case GPCMD_GET_EVENT_STATUS_NOTIFICATION: + case GPCMD_GET_PERFORMANCE: + case GPCMD_SEEK: + case GPCMD_STOP_PLAY_SCAN: + /* ZBC */ + case ZBC_IN: + return true; + /* Basic writing commands */ + case WRITE_6: + case WRITE_10: + case WRITE_VERIFY: + case WRITE_12: + case WRITE_VERIFY_12: + case WRITE_16: + case WRITE_LONG: + case WRITE_LONG_2: + case WRITE_SAME: + case WRITE_SAME_16: + case WRITE_SAME_32: + case ERASE: + case GPCMD_MODE_SELECT_10: + case MODE_SELECT: + case LOG_SELECT: + case GPCMD_BLANK: + case GPCMD_CLOSE_TRACK: + case GPCMD_FLUSH_CACHE: + case GPCMD_FORMAT_UNIT: + case GPCMD_REPAIR_RZONE_TRACK: + case GPCMD_RESERVE_RZONE_TRACK: + case GPCMD_SEND_DVD_STRUCTURE: + case GPCMD_SEND_EVENT: + case GPCMD_SEND_OPC: + case GPCMD_SEND_CUE_SHEET: + case GPCMD_SET_SPEED: + case GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL: + case GPCMD_LOAD_UNLOAD: + case GPCMD_SET_STREAMING: + case GPCMD_SET_READ_AHEAD: + /* ZBC */ + case ZBC_OUT: + return open_for_write; + default: + return false; + } +} +EXPORT_SYMBOL(scsi_cmd_allowed); + +static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq, + struct sg_io_hdr *hdr, bool open_for_write) +{ + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + + if (hdr->cmd_len < 6) + return -EMSGSIZE; + if (copy_from_user(scmd->cmnd, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; + if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) + return -EPERM; + scmd->cmd_len = hdr->cmd_len; + + rq->timeout = msecs_to_jiffies(hdr->timeout); + if (!rq->timeout) + rq->timeout = sdev->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + if (rq->timeout < BLK_MIN_SG_TIMEOUT) + rq->timeout = BLK_MIN_SG_TIMEOUT; + + return 0; +} + +static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, + struct bio *bio) +{ + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + int r, ret = 0; + + /* + * fill in all the output members + */ + hdr->status = scmd->result & 0xff; + hdr->masked_status = sg_status_byte(scmd->result); + hdr->msg_status = COMMAND_COMPLETE; + hdr->host_status = host_byte(scmd->result); + hdr->driver_status = 0; + if (scsi_status_is_check_condition(hdr->status)) + hdr->driver_status = DRIVER_SENSE; + hdr->info = 0; + if (hdr->masked_status || hdr->host_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->resid = scmd->resid_len; + hdr->sb_len_wr = 0; + + if (scmd->sense_len && hdr->sbp) { + int len = min((unsigned int) hdr->mx_sb_len, scmd->sense_len); + + if (!copy_to_user(hdr->sbp, scmd->sense_buffer, len)) + hdr->sb_len_wr = len; + else + ret = -EFAULT; + } + + r = blk_rq_unmap_user(bio); + if (!ret) + ret = r; + + return ret; +} + +static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, + bool open_for_write) +{ + unsigned long start_time; + ssize_t ret = 0; + int writing = 0; + int at_head = 0; + struct request *rq; + struct scsi_cmnd *scmd; + struct bio *bio; + + if (hdr->interface_id != 'S') + return -EINVAL; + + if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9)) + return -EIO; + + if (hdr->dxfer_len) + switch (hdr->dxfer_direction) { + default: + return -EINVAL; + case SG_DXFER_TO_DEV: + writing = 1; + break; + case SG_DXFER_TO_FROM_DEV: + case SG_DXFER_FROM_DEV: + break; + } + if (hdr->flags & SG_FLAG_Q_AT_HEAD) + at_head = 1; + + rq = scsi_alloc_request(sdev->request_queue, writing ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + scmd = blk_mq_rq_to_pdu(rq); + + if (hdr->cmd_len > sizeof(scmd->cmnd)) { + ret = -EINVAL; + goto out_put_request; + } + + ret = scsi_fill_sghdr_rq(sdev, rq, hdr, open_for_write); + if (ret < 0) + goto out_put_request; + + ret = blk_rq_map_user_io(rq, NULL, hdr->dxferp, hdr->dxfer_len, + GFP_KERNEL, hdr->iovec_count && hdr->dxfer_len, + hdr->iovec_count, 0, rq_data_dir(rq)); + if (ret) + goto out_put_request; + + bio = rq->bio; + scmd->allowed = 0; + + start_time = jiffies; + + blk_execute_rq(rq, at_head); + + hdr->duration = jiffies_to_msecs(jiffies - start_time); + + ret = scsi_complete_sghdr_rq(rq, hdr, bio); + +out_put_request: + blk_mq_free_request(rq); + return ret; +} + +/** + * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl + * @q: request queue to send scsi commands down + * @open_for_write: is the file / block device opened for writing? + * @sic: userspace structure describing the command to perform + * + * Send down the scsi command described by @sic to the device below + * the request queue @q. + * + * Notes: + * - This interface is deprecated - users should use the SG_IO + * interface instead, as this is a more flexible approach to + * performing SCSI commands on a device. + * - The SCSI command length is determined by examining the 1st byte + * of the given command. There is no way to override this. + * - Data transfers are limited to PAGE_SIZE + * - The length (x + y) must be at least OMAX_SB_LEN bytes long to + * accommodate the sense buffer when an error occurs. + * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that + * old code will not be surprised. + * - If a Unix error occurs (e.g. ENOMEM) then the user will receive + * a negative return and the Unix error code in 'errno'. + * If the SCSI command succeeds then 0 is returned. + * Positive numbers returned are the compacted SCSI error codes (4 + * bytes in one int) where the lowest byte is the SCSI status. + */ +static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write, + struct scsi_ioctl_command __user *sic) +{ + struct request *rq; + int err; + unsigned int in_len, out_len, bytes, opcode, cmdlen; + struct scsi_cmnd *scmd; + char *buffer = NULL; + + if (!sic) + return -EINVAL; + + /* + * get in an out lengths, verify they don't exceed a page worth of data + */ + if (get_user(in_len, &sic->inlen)) + return -EFAULT; + if (get_user(out_len, &sic->outlen)) + return -EFAULT; + if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) + return -EINVAL; + if (get_user(opcode, &sic->data[0])) + return -EFAULT; + + bytes = max(in_len, out_len); + if (bytes) { + buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN); + if (!buffer) + return -ENOMEM; + + } + + rq = scsi_alloc_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto error_free_buffer; + } + scmd = blk_mq_rq_to_pdu(rq); + + cmdlen = COMMAND_SIZE(opcode); + + /* + * get command and data to send to device, if any + */ + err = -EFAULT; + scmd->cmd_len = cmdlen; + if (copy_from_user(scmd->cmnd, sic->data, cmdlen)) + goto error; + + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) + goto error; + + err = -EPERM; + if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) + goto error; + + /* default. possible overridden later */ + scmd->allowed = 5; + + switch (opcode) { + case SEND_DIAGNOSTIC: + case FORMAT_UNIT: + rq->timeout = FORMAT_UNIT_TIMEOUT; + scmd->allowed = 1; + break; + case START_STOP: + rq->timeout = START_STOP_TIMEOUT; + break; + case MOVE_MEDIUM: + rq->timeout = MOVE_MEDIUM_TIMEOUT; + break; + case READ_ELEMENT_STATUS: + rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; + break; + case READ_DEFECT_DATA: + rq->timeout = READ_DEFECT_DATA_TIMEOUT; + scmd->allowed = 1; + break; + default: + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + break; + } + + if (bytes) { + err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO); + if (err) + goto error; + } + + blk_execute_rq(rq, false); + + err = scmd->result & 0xff; /* only 8 bit SCSI status */ + if (err) { + if (scmd->sense_len && scmd->sense_buffer) { + /* limit sense len for backward compatibility */ + if (copy_to_user(sic->data, scmd->sense_buffer, + min(scmd->sense_len, 16U))) + err = -EFAULT; + } + } else { + if (copy_to_user(sic->data, buffer, out_len)) + err = -EFAULT; + } + +error: + blk_mq_free_request(rq); + +error_free_buffer: + kfree(buffer); + + return err; +} + +int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp) +{ +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + struct compat_sg_io_hdr hdr32 = { + .interface_id = hdr->interface_id, + .dxfer_direction = hdr->dxfer_direction, + .cmd_len = hdr->cmd_len, + .mx_sb_len = hdr->mx_sb_len, + .iovec_count = hdr->iovec_count, + .dxfer_len = hdr->dxfer_len, + .dxferp = (uintptr_t)hdr->dxferp, + .cmdp = (uintptr_t)hdr->cmdp, + .sbp = (uintptr_t)hdr->sbp, + .timeout = hdr->timeout, + .flags = hdr->flags, + .pack_id = hdr->pack_id, + .usr_ptr = (uintptr_t)hdr->usr_ptr, + .status = hdr->status, + .masked_status = hdr->masked_status, + .msg_status = hdr->msg_status, + .sb_len_wr = hdr->sb_len_wr, + .host_status = hdr->host_status, + .driver_status = hdr->driver_status, + .resid = hdr->resid, + .duration = hdr->duration, + .info = hdr->info, + }; + + if (copy_to_user(argp, &hdr32, sizeof(hdr32))) + return -EFAULT; + + return 0; + } +#endif + + if (copy_to_user(argp, hdr, sizeof(*hdr))) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(put_sg_io_hdr); + +int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp) +{ +#ifdef CONFIG_COMPAT + struct compat_sg_io_hdr hdr32; + + if (in_compat_syscall()) { + if (copy_from_user(&hdr32, argp, sizeof(hdr32))) + return -EFAULT; + + *hdr = (struct sg_io_hdr) { + .interface_id = hdr32.interface_id, + .dxfer_direction = hdr32.dxfer_direction, + .cmd_len = hdr32.cmd_len, + .mx_sb_len = hdr32.mx_sb_len, + .iovec_count = hdr32.iovec_count, + .dxfer_len = hdr32.dxfer_len, + .dxferp = compat_ptr(hdr32.dxferp), + .cmdp = compat_ptr(hdr32.cmdp), + .sbp = compat_ptr(hdr32.sbp), + .timeout = hdr32.timeout, + .flags = hdr32.flags, + .pack_id = hdr32.pack_id, + .usr_ptr = compat_ptr(hdr32.usr_ptr), + .status = hdr32.status, + .masked_status = hdr32.masked_status, + .msg_status = hdr32.msg_status, + .sb_len_wr = hdr32.sb_len_wr, + .host_status = hdr32.host_status, + .driver_status = hdr32.driver_status, + .resid = hdr32.resid, + .duration = hdr32.duration, + .info = hdr32.info, + }; + + return 0; + } +#endif + + if (copy_from_user(hdr, argp, sizeof(*hdr))) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(get_sg_io_hdr); + +#ifdef CONFIG_COMPAT +struct compat_cdrom_generic_command { + unsigned char cmd[CDROM_PACKET_SIZE]; + compat_caddr_t buffer; + compat_uint_t buflen; + compat_int_t stat; + compat_caddr_t sense; + unsigned char data_direction; + unsigned char pad[3]; + compat_int_t quiet; + compat_int_t timeout; + compat_caddr_t unused; +}; +#endif + +static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc, + const void __user *arg) +{ +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + struct compat_cdrom_generic_command cgc32; + + if (copy_from_user(&cgc32, arg, sizeof(cgc32))) + return -EFAULT; + + *cgc = (struct cdrom_generic_command) { + .buffer = compat_ptr(cgc32.buffer), + .buflen = cgc32.buflen, + .stat = cgc32.stat, + .sense = compat_ptr(cgc32.sense), + .data_direction = cgc32.data_direction, + .quiet = cgc32.quiet, + .timeout = cgc32.timeout, + .unused = compat_ptr(cgc32.unused), + }; + memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE); + return 0; + } +#endif + if (copy_from_user(cgc, arg, sizeof(*cgc))) + return -EFAULT; + + return 0; +} + +static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc, + void __user *arg) +{ +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + struct compat_cdrom_generic_command cgc32 = { + .buffer = (uintptr_t)(cgc->buffer), + .buflen = cgc->buflen, + .stat = cgc->stat, + .sense = (uintptr_t)(cgc->sense), + .data_direction = cgc->data_direction, + .quiet = cgc->quiet, + .timeout = cgc->timeout, + .unused = (uintptr_t)(cgc->unused), + }; + memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE); + + if (copy_to_user(arg, &cgc32, sizeof(cgc32))) + return -EFAULT; + + return 0; + } +#endif + if (copy_to_user(arg, cgc, sizeof(*cgc))) + return -EFAULT; + + return 0; +} + +static int scsi_cdrom_send_packet(struct scsi_device *sdev, bool open_for_write, + void __user *arg) +{ + struct cdrom_generic_command cgc; + struct sg_io_hdr hdr; + int err; + + err = scsi_get_cdrom_generic_arg(&cgc, arg); + if (err) + return err; + + cgc.timeout = clock_t_to_jiffies(cgc.timeout); + memset(&hdr, 0, sizeof(hdr)); + hdr.interface_id = 'S'; + hdr.cmd_len = sizeof(cgc.cmd); + hdr.dxfer_len = cgc.buflen; + switch (cgc.data_direction) { + case CGC_DATA_UNKNOWN: + hdr.dxfer_direction = SG_DXFER_UNKNOWN; + break; + case CGC_DATA_WRITE: + hdr.dxfer_direction = SG_DXFER_TO_DEV; + break; + case CGC_DATA_READ: + hdr.dxfer_direction = SG_DXFER_FROM_DEV; + break; + case CGC_DATA_NONE: + hdr.dxfer_direction = SG_DXFER_NONE; + break; + default: + return -EINVAL; + } + + hdr.dxferp = cgc.buffer; + hdr.sbp = cgc.sense; + if (hdr.sbp) + hdr.mx_sb_len = sizeof(struct request_sense); + hdr.timeout = jiffies_to_msecs(cgc.timeout); + hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd; + hdr.cmd_len = sizeof(cgc.cmd); + + err = sg_io(sdev, &hdr, open_for_write); + if (err == -EFAULT) + return -EFAULT; + + if (hdr.status) + return -EIO; + + cgc.stat = err; + cgc.buflen = hdr.resid; + if (scsi_put_cdrom_generic_arg(&cgc, arg)) + return -EFAULT; + + return err; +} + +static int scsi_ioctl_sg_io(struct scsi_device *sdev, bool open_for_write, + void __user *argp) +{ + struct sg_io_hdr hdr; + int error; + + error = get_sg_io_hdr(&hdr, argp); + if (error) + return error; + error = sg_io(sdev, &hdr, open_for_write); + if (error == -EFAULT) + return error; + if (put_sg_io_hdr(&hdr, argp)) + return -EFAULT; + return error; +} + +/** + * scsi_ioctl - Dispatch ioctl to scsi device + * @sdev: scsi device receiving ioctl + * @open_for_write: is the file / block device opened for writing? + * @cmd: which ioctl is it + * @arg: data associated with ioctl + * + * Description: The scsi_ioctl() function differs from most ioctls in that it + * does not take a major/minor number as the dev field. Rather, it takes + * a pointer to a &struct scsi_device. + */ +int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd, + void __user *arg) +{ + struct request_queue *q = sdev->request_queue; + struct scsi_sense_hdr sense_hdr; + + /* Check for deprecated ioctls ... all the ioctls which don't + * follow the new unique numbering scheme are deprecated */ + switch (cmd) { + case SCSI_IOCTL_SEND_COMMAND: + case SCSI_IOCTL_TEST_UNIT_READY: + case SCSI_IOCTL_BENCHMARK_COMMAND: + case SCSI_IOCTL_SYNC: + case SCSI_IOCTL_START_UNIT: + case SCSI_IOCTL_STOP_UNIT: + printk(KERN_WARNING "program %s is using a deprecated SCSI " + "ioctl, please convert it to SG_IO\n", current->comm); + break; + default: + break; + } + + switch (cmd) { + case SG_GET_VERSION_NUM: + return sg_get_version(arg); + case SG_SET_TIMEOUT: + return sg_set_timeout(sdev, arg); + case SG_GET_TIMEOUT: + return jiffies_to_clock_t(sdev->sg_timeout); + case SG_GET_RESERVED_SIZE: + return sg_get_reserved_size(sdev, arg); + case SG_SET_RESERVED_SIZE: + return sg_set_reserved_size(sdev, arg); + case SG_EMULATED_HOST: + return sg_emulated_host(q, arg); + case SG_IO: + return scsi_ioctl_sg_io(sdev, open_for_write, arg); + case SCSI_IOCTL_SEND_COMMAND: + return sg_scsi_ioctl(q, open_for_write, arg); + case CDROM_SEND_PACKET: + return scsi_cdrom_send_packet(sdev, open_for_write, arg); + case CDROMCLOSETRAY: + return scsi_send_start_stop(sdev, 3); + case CDROMEJECT: + return scsi_send_start_stop(sdev, 2); + case SCSI_IOCTL_GET_IDLUN: + return scsi_get_idlun(sdev, arg); + case SCSI_IOCTL_GET_BUS_NUMBER: + return put_user(sdev->host->host_no, (int __user *)arg); + case SCSI_IOCTL_PROBE_HOST: + return ioctl_probe(sdev->host, arg); + case SCSI_IOCTL_DOORLOCK: + return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); + case SCSI_IOCTL_DOORUNLOCK: + return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); + case SCSI_IOCTL_TEST_UNIT_READY: + return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT, + NORMAL_RETRIES, &sense_hdr); + case SCSI_IOCTL_START_UNIT: + return scsi_send_start_stop(sdev, 1); + case SCSI_IOCTL_STOP_UNIT: + return scsi_send_start_stop(sdev, 0); + case SCSI_IOCTL_GET_PCI: + return scsi_ioctl_get_pci(sdev, arg); + case SG_SCSI_RESET: + return scsi_ioctl_reset(sdev, arg); + } + +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + if (!sdev->host->hostt->compat_ioctl) + return -EINVAL; + return sdev->host->hostt->compat_ioctl(sdev, cmd, arg); + } +#endif + if (!sdev->host->hostt->ioctl) + return -EINVAL; + return sdev->host->hostt->ioctl(sdev, cmd, arg); +} +EXPORT_SYMBOL(scsi_ioctl); + +/* + * We can process a reset even when a device isn't fully operable. + */ +int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd, + bool ndelay) +{ + if (cmd == SG_SCSI_RESET && ndelay) { + if (scsi_host_in_recovery(sdev->host)) + return -EAGAIN; + } else { + if (!scsi_block_when_processing_errors(sdev)) + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL_GPL(scsi_ioctl_block_when_processing_errors); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c new file mode 100644 index 000000000..c2f647a7c --- /dev/null +++ b/drivers/scsi/scsi_lib.c @@ -0,0 +1,3337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1999 Eric Youngdale + * Copyright (C) 2014 Christoph Hellwig + * + * SCSI queueing library. + * Initial versions: Eric Youngdale (eric@andante.org). + * Based upon conversations with large numbers + * of people at Linux Expo. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* __scsi_init_queue() */ +#include + +#include + +#include "scsi_debugfs.h" +#include "scsi_priv.h" +#include "scsi_logging.h" + +/* + * Size of integrity metadata is usually small, 1 inline sg should + * cover normal cases. + */ +#ifdef CONFIG_ARCH_NO_SG_CHAIN +#define SCSI_INLINE_PROT_SG_CNT 0 +#define SCSI_INLINE_SG_CNT 0 +#else +#define SCSI_INLINE_PROT_SG_CNT 1 +#define SCSI_INLINE_SG_CNT 2 +#endif + +static struct kmem_cache *scsi_sense_cache; +static DEFINE_MUTEX(scsi_sense_cache_mutex); + +static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); + +int scsi_init_sense_cache(struct Scsi_Host *shost) +{ + int ret = 0; + + mutex_lock(&scsi_sense_cache_mutex); + if (!scsi_sense_cache) { + scsi_sense_cache = + kmem_cache_create_usercopy("scsi_sense_cache", + SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, + 0, SCSI_SENSE_BUFFERSIZE, NULL); + if (!scsi_sense_cache) + ret = -ENOMEM; + } + mutex_unlock(&scsi_sense_cache_mutex); + return ret; +} + +static void +scsi_set_blocked(struct scsi_cmnd *cmd, int reason) +{ + struct Scsi_Host *host = cmd->device->host; + struct scsi_device *device = cmd->device; + struct scsi_target *starget = scsi_target(device); + + /* + * Set the appropriate busy bit for the device/host. + * + * If the host/device isn't busy, assume that something actually + * completed, and that we should be able to queue a command now. + * + * Note that the prior mid-layer assumption that any host could + * always queue at least one command is now broken. The mid-layer + * will implement a user specifiable stall (see + * scsi_host.max_host_blocked and scsi_device.max_device_blocked) + * if a command is requeued with no other commands outstanding + * either for the device or for the host. + */ + switch (reason) { + case SCSI_MLQUEUE_HOST_BUSY: + atomic_set(&host->host_blocked, host->max_host_blocked); + break; + case SCSI_MLQUEUE_DEVICE_BUSY: + case SCSI_MLQUEUE_EH_RETRY: + atomic_set(&device->device_blocked, + device->max_device_blocked); + break; + case SCSI_MLQUEUE_TARGET_BUSY: + atomic_set(&starget->target_blocked, + starget->max_target_blocked); + break; + } +} + +static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + + if (rq->rq_flags & RQF_DONTPREP) { + rq->rq_flags &= ~RQF_DONTPREP; + scsi_mq_uninit_cmd(cmd); + } else { + WARN_ON_ONCE(true); + } + + blk_mq_requeue_request(rq, false); + if (!scsi_host_in_recovery(cmd->device->host)) + blk_mq_delay_kick_requeue_list(rq->q, msecs); +} + +/** + * __scsi_queue_insert - private queue insertion + * @cmd: The SCSI command being requeued + * @reason: The reason for the requeue + * @unbusy: Whether the queue should be unbusied + * + * This is a private queue insertion. The public interface + * scsi_queue_insert() always assumes the queue should be unbusied + * because it's always called before the completion. This function is + * for a requeue after completion, which should only occur in this + * file. + */ +static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) +{ + struct scsi_device *device = cmd->device; + + SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, + "Inserting command %p into mlqueue\n", cmd)); + + scsi_set_blocked(cmd, reason); + + /* + * Decrement the counters, since these commands are no longer + * active on the host/device. + */ + if (unbusy) + scsi_device_unbusy(device, cmd); + + /* + * Requeue this command. It will go before all other commands + * that are already in the queue. Schedule requeue work under + * lock such that the kblockd_schedule_work() call happens + * before blk_mq_destroy_queue() finishes. + */ + cmd->result = 0; + + blk_mq_requeue_request(scsi_cmd_to_rq(cmd), + !scsi_host_in_recovery(cmd->device->host)); +} + +/** + * scsi_queue_insert - Reinsert a command in the queue. + * @cmd: command that we are adding to queue. + * @reason: why we are inserting command to queue. + * + * We do this for one of two cases. Either the host is busy and it cannot accept + * any more commands for the time being, or the device returned QUEUE_FULL and + * can accept no more commands. + * + * Context: This could be called either from an interrupt context or a normal + * process context. + */ +void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) +{ + __scsi_queue_insert(cmd, reason, true); +} + +/** + * scsi_execute_cmd - insert request and wait for the result + * @sdev: scsi_device + * @cmd: scsi command + * @opf: block layer request cmd_flags + * @buffer: data buffer + * @bufflen: len of buffer + * @timeout: request timeout in HZ + * @retries: number of times to retry request + * @args: Optional args. See struct definition for field descriptions + * + * Returns the scsi_cmnd result field if a command was executed, or a negative + * Linux error code if we didn't get that far. + */ +int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, + blk_opf_t opf, void *buffer, unsigned int bufflen, + int timeout, int retries, + const struct scsi_exec_args *args) +{ + static const struct scsi_exec_args default_args; + struct request *req; + struct scsi_cmnd *scmd; + int ret; + + if (!args) + args = &default_args; + else if (WARN_ON_ONCE(args->sense && + args->sense_len != SCSI_SENSE_BUFFERSIZE)) + return -EINVAL; + + req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags); + if (IS_ERR(req)) + return PTR_ERR(req); + + if (bufflen) { + ret = blk_rq_map_kern(sdev->request_queue, req, + buffer, bufflen, GFP_NOIO); + if (ret) + goto out; + } + scmd = blk_mq_rq_to_pdu(req); + scmd->cmd_len = COMMAND_SIZE(cmd[0]); + memcpy(scmd->cmnd, cmd, scmd->cmd_len); + scmd->allowed = retries; + scmd->flags |= args->scmd_flags; + req->timeout = timeout; + req->rq_flags |= RQF_QUIET; + + /* + * head injection *required* here otherwise quiesce won't work + */ + blk_execute_rq(req, true); + + /* + * Some devices (USB mass-storage in particular) may transfer + * garbage data together with a residue indicating that the data + * is invalid. Prevent the garbage from being misinterpreted + * and prevent security leaks by zeroing out the excess data. + */ + if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen)) + memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len); + + if (args->resid) + *args->resid = scmd->resid_len; + if (args->sense) + memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); + if (args->sshdr) + scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len, + args->sshdr); + + ret = scmd->result; + out: + blk_mq_free_request(req); + + return ret; +} +EXPORT_SYMBOL(scsi_execute_cmd); + +/* + * Wake up the error handler if necessary. Avoid as follows that the error + * handler is not woken up if host in-flight requests number == + * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination + * with an RCU read lock in this function to ensure that this function in + * its entirety either finishes before scsi_eh_scmd_add() increases the + * host_failed counter or that it notices the shost state change made by + * scsi_eh_scmd_add(). + */ +static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) +{ + unsigned long flags; + + rcu_read_lock(); + __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); + if (unlikely(scsi_host_in_recovery(shost))) { + spin_lock_irqsave(shost->host_lock, flags); + if (shost->host_failed || shost->host_eh_scheduled) + scsi_eh_wakeup(shost); + spin_unlock_irqrestore(shost->host_lock, flags); + } + rcu_read_unlock(); +} + +void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) +{ + struct Scsi_Host *shost = sdev->host; + struct scsi_target *starget = scsi_target(sdev); + + scsi_dec_host_busy(shost, cmd); + + if (starget->can_queue > 0) + atomic_dec(&starget->target_busy); + + sbitmap_put(&sdev->budget_map, cmd->budget_token); + cmd->budget_token = -1; +} + +/* + * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with + * interrupts disabled. + */ +static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) +{ + struct scsi_device *current_sdev = data; + + if (sdev != current_sdev) + blk_mq_run_hw_queues(sdev->request_queue, true); +} + +/* + * Called for single_lun devices on IO completion. Clear starget_sdev_user, + * and call blk_run_queue for all the scsi_devices on the target - + * including current_sdev first. + * + * Called with *no* scsi locks held. + */ +static void scsi_single_lun_run(struct scsi_device *current_sdev) +{ + struct Scsi_Host *shost = current_sdev->host; + struct scsi_target *starget = scsi_target(current_sdev); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + starget->starget_sdev_user = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); + + /* + * Call blk_run_queue for all LUNs on the target, starting with + * current_sdev. We race with others (to set starget_sdev_user), + * but in most cases, we will be first. Ideally, each LU on the + * target would get some limited time or requests on the target. + */ + blk_mq_run_hw_queues(current_sdev->request_queue, + shost->queuecommand_may_block); + + spin_lock_irqsave(shost->host_lock, flags); + if (!starget->starget_sdev_user) + __starget_for_each_device(starget, current_sdev, + scsi_kick_sdev_queue); + spin_unlock_irqrestore(shost->host_lock, flags); +} + +static inline bool scsi_device_is_busy(struct scsi_device *sdev) +{ + if (scsi_device_busy(sdev) >= sdev->queue_depth) + return true; + if (atomic_read(&sdev->device_blocked) > 0) + return true; + return false; +} + +static inline bool scsi_target_is_busy(struct scsi_target *starget) +{ + if (starget->can_queue > 0) { + if (atomic_read(&starget->target_busy) >= starget->can_queue) + return true; + if (atomic_read(&starget->target_blocked) > 0) + return true; + } + return false; +} + +static inline bool scsi_host_is_busy(struct Scsi_Host *shost) +{ + if (atomic_read(&shost->host_blocked) > 0) + return true; + if (shost->host_self_blocked) + return true; + return false; +} + +static void scsi_starved_list_run(struct Scsi_Host *shost) +{ + LIST_HEAD(starved_list); + struct scsi_device *sdev; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + list_splice_init(&shost->starved_list, &starved_list); + + while (!list_empty(&starved_list)) { + struct request_queue *slq; + + /* + * As long as shost is accepting commands and we have + * starved queues, call blk_run_queue. scsi_request_fn + * drops the queue_lock and can add us back to the + * starved_list. + * + * host_lock protects the starved_list and starved_entry. + * scsi_request_fn must get the host_lock before checking + * or modifying starved_list or starved_entry. + */ + if (scsi_host_is_busy(shost)) + break; + + sdev = list_entry(starved_list.next, + struct scsi_device, starved_entry); + list_del_init(&sdev->starved_entry); + if (scsi_target_is_busy(scsi_target(sdev))) { + list_move_tail(&sdev->starved_entry, + &shost->starved_list); + continue; + } + + /* + * Once we drop the host lock, a racing scsi_remove_device() + * call may remove the sdev from the starved list and destroy + * it and the queue. Mitigate by taking a reference to the + * queue and never touching the sdev again after we drop the + * host lock. Note: if __scsi_remove_device() invokes + * blk_mq_destroy_queue() before the queue is run from this + * function then blk_run_queue() will return immediately since + * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING. + */ + slq = sdev->request_queue; + if (!blk_get_queue(slq)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + + blk_mq_run_hw_queues(slq, false); + blk_put_queue(slq); + + spin_lock_irqsave(shost->host_lock, flags); + } + /* put any unprocessed entries back */ + list_splice(&starved_list, &shost->starved_list); + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * scsi_run_queue - Select a proper request queue to serve next. + * @q: last request's queue + * + * The previous command was completely finished, start a new one if possible. + */ +static void scsi_run_queue(struct request_queue *q) +{ + struct scsi_device *sdev = q->queuedata; + + if (scsi_target(sdev)->single_lun) + scsi_single_lun_run(sdev); + if (!list_empty(&sdev->host->starved_list)) + scsi_starved_list_run(sdev->host); + + /* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */ + blk_mq_kick_requeue_list(q); +} + +void scsi_requeue_run_queue(struct work_struct *work) +{ + struct scsi_device *sdev; + struct request_queue *q; + + sdev = container_of(work, struct scsi_device, requeue_work); + q = sdev->request_queue; + scsi_run_queue(q); +} + +void scsi_run_host_queues(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) + scsi_run_queue(sdev->request_queue); +} + +static void scsi_uninit_cmd(struct scsi_cmnd *cmd) +{ + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { + struct scsi_driver *drv = scsi_cmd_to_driver(cmd); + + if (drv->uninit_command) + drv->uninit_command(cmd); + } +} + +void scsi_free_sgtables(struct scsi_cmnd *cmd) +{ + if (cmd->sdb.table.nents) + sg_free_table_chained(&cmd->sdb.table, + SCSI_INLINE_SG_CNT); + if (scsi_prot_sg_count(cmd)) + sg_free_table_chained(&cmd->prot_sdb->table, + SCSI_INLINE_PROT_SG_CNT); +} +EXPORT_SYMBOL_GPL(scsi_free_sgtables); + +static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) +{ + scsi_free_sgtables(cmd); + scsi_uninit_cmd(cmd); +} + +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + if (scsi_host_in_recovery(sdev->host)) + return; + + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) { + kblockd_schedule_work(&sdev->requeue_work); + } else { + /* + * smp_mb() present in sbitmap_queue_clear() or implied in + * .end_io is for ordering writing .device_busy in + * scsi_device_unbusy() and reading sdev->restarts. + */ + int old = atomic_read(&sdev->restarts); + + /* + * ->restarts has to be kept as non-zero if new budget + * contention occurs. + * + * No need to run queue when either another re-run + * queue wins in updating ->restarts or a new budget + * contention occurs. + */ + if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old) + blk_mq_run_hw_queues(sdev->request_queue, true); + } +} + +/* Returns false when no more bytes to process, true if there are more */ +static bool scsi_end_request(struct request *req, blk_status_t error, + unsigned int bytes) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + struct scsi_device *sdev = cmd->device; + struct request_queue *q = sdev->request_queue; + + if (blk_update_request(req, error, bytes)) + return true; + + // XXX: + if (blk_queue_add_random(q)) + add_disk_randomness(req->q->disk); + + if (!blk_rq_is_passthrough(req)) { + WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); + cmd->flags &= ~SCMD_INITIALIZED; + } + + /* + * Calling rcu_barrier() is not necessary here because the + * SCSI error handler guarantees that the function called by + * call_rcu() has been called before scsi_end_request() is + * called. + */ + destroy_rcu_head(&cmd->rcu); + + /* + * In the MQ case the command gets freed by __blk_mq_end_request, + * so we have to do all cleanup that depends on it earlier. + * + * We also can't kick the queues from irq context, so we + * will have to defer it to a workqueue. + */ + scsi_mq_uninit_cmd(cmd); + + /* + * queue is still alive, so grab the ref for preventing it + * from being cleaned up during running queue. + */ + percpu_ref_get(&q->q_usage_counter); + + __blk_mq_end_request(req, error); + + scsi_run_queue_async(sdev); + + percpu_ref_put(&q->q_usage_counter); + return false; +} + +/** + * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t + * @result: scsi error code + * + * Translate a SCSI result code into a blk_status_t value. + */ +static blk_status_t scsi_result_to_blk_status(int result) +{ + /* + * Check the scsi-ml byte first in case we converted a host or status + * byte. + */ + switch (scsi_ml_byte(result)) { + case SCSIML_STAT_OK: + break; + case SCSIML_STAT_RESV_CONFLICT: + return BLK_STS_RESV_CONFLICT; + case SCSIML_STAT_NOSPC: + return BLK_STS_NOSPC; + case SCSIML_STAT_MED_ERROR: + return BLK_STS_MEDIUM; + case SCSIML_STAT_TGT_FAILURE: + return BLK_STS_TARGET; + case SCSIML_STAT_DL_TIMEOUT: + return BLK_STS_DURATION_LIMIT; + } + + switch (host_byte(result)) { + case DID_OK: + if (scsi_status_is_good(result)) + return BLK_STS_OK; + return BLK_STS_IOERR; + case DID_TRANSPORT_FAILFAST: + case DID_TRANSPORT_MARGINAL: + return BLK_STS_TRANSPORT; + default: + return BLK_STS_IOERR; + } +} + +/** + * scsi_rq_err_bytes - determine number of bytes till the next failure boundary + * @rq: request to examine + * + * Description: + * A request could be merge of IOs which require different failure + * handling. This function determines the number of bytes which + * can be failed from the beginning of the request without + * crossing into area which need to be retried further. + * + * Return: + * The number of bytes to fail. + */ +static unsigned int scsi_rq_err_bytes(const struct request *rq) +{ + blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; + unsigned int bytes = 0; + struct bio *bio; + + if (!(rq->rq_flags & RQF_MIXED_MERGE)) + return blk_rq_bytes(rq); + + /* + * Currently the only 'mixing' which can happen is between + * different fastfail types. We can safely fail portions + * which have all the failfast bits that the first one has - + * the ones which are at least as eager to fail as the first + * one. + */ + for (bio = rq->bio; bio; bio = bio->bi_next) { + if ((bio->bi_opf & ff) != ff) + break; + bytes += bio->bi_iter.bi_size; + } + + /* this could lead to infinite loop */ + BUG_ON(blk_rq_bytes(rq) && !bytes); + return bytes; +} + +static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) +{ + struct request *req = scsi_cmd_to_rq(cmd); + unsigned long wait_for; + + if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) + return false; + + wait_for = (cmd->allowed + 1) * req->timeout; + if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { + scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n", + wait_for/HZ); + return true; + } + return false; +} + +/* + * When ALUA transition state is returned, reprep the cmd to + * use the ALUA handler's transition timeout. Delay the reprep + * 1 sec to avoid aggressive retries of the target in that + * state. + */ +#define ALUA_TRANSITION_REPREP_DELAY 1000 + +/* Helper for scsi_io_completion() when special action required. */ +static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) +{ + struct request *req = scsi_cmd_to_rq(cmd); + int level = 0; + enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP, + ACTION_RETRY, ACTION_DELAYED_RETRY} action; + struct scsi_sense_hdr sshdr; + bool sense_valid; + bool sense_current = true; /* false implies "deferred sense" */ + blk_status_t blk_stat; + + sense_valid = scsi_command_normalize_sense(cmd, &sshdr); + if (sense_valid) + sense_current = !scsi_sense_is_deferred(&sshdr); + + blk_stat = scsi_result_to_blk_status(result); + + if (host_byte(result) == DID_RESET) { + /* Third party bus reset or reset for error recovery + * reasons. Just retry the command and see what + * happens. + */ + action = ACTION_RETRY; + } else if (sense_valid && sense_current) { + switch (sshdr.sense_key) { + case UNIT_ATTENTION: + if (cmd->device->removable) { + /* Detected disc change. Set a bit + * and quietly refuse further access. + */ + cmd->device->changed = 1; + action = ACTION_FAIL; + } else { + /* Must have been a power glitch, or a + * bus reset. Could not have been a + * media change, so we just retry the + * command and see what happens. + */ + action = ACTION_RETRY; + } + break; + case ILLEGAL_REQUEST: + /* If we had an ILLEGAL REQUEST returned, then + * we may have performed an unsupported + * command. The only thing this should be + * would be a ten byte read where only a six + * byte read was supported. Also, on a system + * where READ CAPACITY failed, we may have + * read past the end of the disk. + */ + if ((cmd->device->use_10_for_rw && + sshdr.asc == 0x20 && sshdr.ascq == 0x00) && + (cmd->cmnd[0] == READ_10 || + cmd->cmnd[0] == WRITE_10)) { + /* This will issue a new 6-byte command. */ + cmd->device->use_10_for_rw = 0; + action = ACTION_REPREP; + } else if (sshdr.asc == 0x10) /* DIX */ { + action = ACTION_FAIL; + blk_stat = BLK_STS_PROTECTION; + /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ + } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + action = ACTION_FAIL; + blk_stat = BLK_STS_TARGET; + } else + action = ACTION_FAIL; + break; + case ABORTED_COMMAND: + action = ACTION_FAIL; + if (sshdr.asc == 0x10) /* DIF */ + blk_stat = BLK_STS_PROTECTION; + break; + case NOT_READY: + /* If the device is in the process of becoming + * ready, or has a temporary blockage, retry. + */ + if (sshdr.asc == 0x04) { + switch (sshdr.ascq) { + case 0x01: /* becoming ready */ + case 0x04: /* format in progress */ + case 0x05: /* rebuild in progress */ + case 0x06: /* recalculation in progress */ + case 0x07: /* operation in progress */ + case 0x08: /* Long write in progress */ + case 0x09: /* self test in progress */ + case 0x11: /* notify (enable spinup) required */ + case 0x14: /* space allocation in progress */ + case 0x1a: /* start stop unit in progress */ + case 0x1b: /* sanitize in progress */ + case 0x1d: /* configuration in progress */ + case 0x24: /* depopulation in progress */ + action = ACTION_DELAYED_RETRY; + break; + case 0x0a: /* ALUA state transition */ + action = ACTION_DELAYED_REPREP; + break; + default: + action = ACTION_FAIL; + break; + } + } else + action = ACTION_FAIL; + break; + case VOLUME_OVERFLOW: + /* See SSC3rXX or current. */ + action = ACTION_FAIL; + break; + case DATA_PROTECT: + action = ACTION_FAIL; + if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) || + (sshdr.asc == 0x55 && + (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) { + /* Insufficient zone resources */ + blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; + } + break; + case COMPLETED: + fallthrough; + default: + action = ACTION_FAIL; + break; + } + } else + action = ACTION_FAIL; + + if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd)) + action = ACTION_FAIL; + + switch (action) { + case ACTION_FAIL: + /* Give up and fail the remainder of the request */ + if (!(req->rq_flags & RQF_QUIET)) { + static DEFINE_RATELIMIT_STATE(_rs, + DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + + if (unlikely(scsi_logging_level)) + level = + SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, + SCSI_LOG_MLCOMPLETE_BITS); + + /* + * if logging is enabled the failure will be printed + * in scsi_log_completion(), so avoid duplicate messages + */ + if (!level && __ratelimit(&_rs)) { + scsi_print_result(cmd, NULL, FAILED); + if (sense_valid) + scsi_print_sense(cmd); + scsi_print_command(cmd); + } + } + if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req))) + return; + fallthrough; + case ACTION_REPREP: + scsi_mq_requeue_cmd(cmd, 0); + break; + case ACTION_DELAYED_REPREP: + scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY); + break; + case ACTION_RETRY: + /* Retry the same command immediately */ + __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false); + break; + case ACTION_DELAYED_RETRY: + /* Retry the same command after a delay */ + __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false); + break; + } +} + +/* + * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a + * new result that may suppress further error checking. Also modifies + * *blk_statp in some cases. + */ +static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, + blk_status_t *blk_statp) +{ + bool sense_valid; + bool sense_current = true; /* false implies "deferred sense" */ + struct request *req = scsi_cmd_to_rq(cmd); + struct scsi_sense_hdr sshdr; + + sense_valid = scsi_command_normalize_sense(cmd, &sshdr); + if (sense_valid) + sense_current = !scsi_sense_is_deferred(&sshdr); + + if (blk_rq_is_passthrough(req)) { + if (sense_valid) { + /* + * SG_IO wants current and deferred errors + */ + cmd->sense_len = min(8 + cmd->sense_buffer[7], + SCSI_SENSE_BUFFERSIZE); + } + if (sense_current) + *blk_statp = scsi_result_to_blk_status(result); + } else if (blk_rq_bytes(req) == 0 && sense_current) { + /* + * Flush commands do not transfers any data, and thus cannot use + * good_bytes != blk_rq_bytes(req) as the signal for an error. + * This sets *blk_statp explicitly for the problem case. + */ + *blk_statp = scsi_result_to_blk_status(result); + } + /* + * Recovered errors need reporting, but they're always treated as + * success, so fiddle the result code here. For passthrough requests + * we already took a copy of the original into sreq->result which + * is what gets returned to the user + */ + if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { + bool do_print = true; + /* + * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d] + * skip print since caller wants ATA registers. Only occurs + * on SCSI ATA PASS_THROUGH commands when CK_COND=1 + */ + if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) + do_print = false; + else if (req->rq_flags & RQF_QUIET) + do_print = false; + if (do_print) + scsi_print_sense(cmd); + result = 0; + /* for passthrough, *blk_statp may be set */ + *blk_statp = BLK_STS_OK; + } + /* + * Another corner case: the SCSI status byte is non-zero but 'good'. + * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when + * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD + * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related + * intermediate statuses (both obsolete in SAM-4) as good. + */ + if ((result & 0xff) && scsi_status_is_good(result)) { + result = 0; + *blk_statp = BLK_STS_OK; + } + return result; +} + +/** + * scsi_io_completion - Completion processing for SCSI commands. + * @cmd: command that is finished. + * @good_bytes: number of processed bytes. + * + * We will finish off the specified number of sectors. If we are done, the + * command block will be released and the queue function will be goosed. If we + * are not done then we have to figure out what to do next: + * + * a) We can call scsi_mq_requeue_cmd(). The request will be + * unprepared and put back on the queue. Then a new command will + * be created for it. This should be used if we made forward + * progress, or if we want to switch from READ(10) to READ(6) for + * example. + * + * b) We can call scsi_io_completion_action(). The request will be + * put back on the queue and retried using the same command as + * before, possibly after a delay. + * + * c) We can call scsi_end_request() with blk_stat other than + * BLK_STS_OK, to fail the remainder of the request. + */ +void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) +{ + int result = cmd->result; + struct request *req = scsi_cmd_to_rq(cmd); + blk_status_t blk_stat = BLK_STS_OK; + + if (unlikely(result)) /* a nz result may or may not be an error */ + result = scsi_io_completion_nz_result(cmd, result, &blk_stat); + + /* + * Next deal with any sectors which we were able to correctly + * handle. + */ + SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, + "%u sectors total, %d bytes done.\n", + blk_rq_sectors(req), good_bytes)); + + /* + * Failed, zero length commands always need to drop down + * to retry code. Fast path should return in this block. + */ + if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { + if (likely(!scsi_end_request(req, blk_stat, good_bytes))) + return; /* no bytes remaining */ + } + + /* Kill remainder if no retries. */ + if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { + if (scsi_end_request(req, blk_stat, blk_rq_bytes(req))) + WARN_ONCE(true, + "Bytes remaining after failed, no-retry command"); + return; + } + + /* + * If there had been no error, but we have leftover bytes in the + * request just queue the command up again. + */ + if (likely(result == 0)) + scsi_mq_requeue_cmd(cmd, 0); + else + scsi_io_completion_action(cmd, result); +} + +static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, + struct request *rq) +{ + return sdev->dma_drain_len && blk_rq_is_passthrough(rq) && + !op_is_write(req_op(rq)) && + sdev->host->hostt->dma_need_drain(rq); +} + +/** + * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists + * @cmd: SCSI command data structure to initialize. + * + * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled + * for @cmd. + * + * Returns: + * * BLK_STS_OK - on success + * * BLK_STS_RESOURCE - if the failure is retryable + * * BLK_STS_IOERR - if the failure is fatal + */ +blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct request *rq = scsi_cmd_to_rq(cmd); + unsigned short nr_segs = blk_rq_nr_phys_segments(rq); + struct scatterlist *last_sg = NULL; + blk_status_t ret; + bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq); + int count; + + if (WARN_ON_ONCE(!nr_segs)) + return BLK_STS_IOERR; + + /* + * Make sure there is space for the drain. The driver must adjust + * max_hw_segments to be prepared for this. + */ + if (need_drain) + nr_segs++; + + /* + * If sg table allocation fails, requeue request later. + */ + if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs, + cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT))) + return BLK_STS_RESOURCE; + + /* + * Next, walk the list, and fill in the addresses and sizes of + * each segment. + */ + count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); + + if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + unsigned int pad_len = + (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + + last_sg->length += pad_len; + cmd->extra_len += pad_len; + } + + if (need_drain) { + sg_unmark_end(last_sg); + last_sg = sg_next(last_sg); + sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len); + sg_mark_end(last_sg); + + cmd->extra_len += sdev->dma_drain_len; + count++; + } + + BUG_ON(count > cmd->sdb.table.nents); + cmd->sdb.table.nents = count; + cmd->sdb.length = blk_rq_payload_bytes(rq); + + if (blk_integrity_rq(rq)) { + struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; + int ivecs; + + if (WARN_ON_ONCE(!prot_sdb)) { + /* + * This can happen if someone (e.g. multipath) + * queues a command to a device on an adapter + * that does not support DIX. + */ + ret = BLK_STS_IOERR; + goto out_free_sgtables; + } + + ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); + + if (sg_alloc_table_chained(&prot_sdb->table, ivecs, + prot_sdb->table.sgl, + SCSI_INLINE_PROT_SG_CNT)) { + ret = BLK_STS_RESOURCE; + goto out_free_sgtables; + } + + count = blk_rq_map_integrity_sg(rq->q, rq->bio, + prot_sdb->table.sgl); + BUG_ON(count > ivecs); + BUG_ON(count > queue_max_integrity_segments(rq->q)); + + cmd->prot_sdb = prot_sdb; + cmd->prot_sdb->table.nents = count; + } + + return BLK_STS_OK; +out_free_sgtables: + scsi_free_sgtables(cmd); + return ret; +} +EXPORT_SYMBOL(scsi_alloc_sgtables); + +/** + * scsi_initialize_rq - initialize struct scsi_cmnd partially + * @rq: Request associated with the SCSI command to be initialized. + * + * This function initializes the members of struct scsi_cmnd that must be + * initialized before request processing starts and that won't be + * reinitialized if a SCSI command is requeued. + */ +static void scsi_initialize_rq(struct request *rq) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); + cmd->cmd_len = MAX_COMMAND_SIZE; + cmd->sense_len = 0; + init_rcu_head(&cmd->rcu); + cmd->jiffies_at_alloc = jiffies; + cmd->retries = 0; +} + +struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, + blk_mq_req_flags_t flags) +{ + struct request *rq; + + rq = blk_mq_alloc_request(q, opf, flags); + if (!IS_ERR(rq)) + scsi_initialize_rq(rq); + return rq; +} +EXPORT_SYMBOL_GPL(scsi_alloc_request); + +/* + * Only called when the request isn't completed by SCSI, and not freed by + * SCSI + */ +static void scsi_cleanup_rq(struct request *rq) +{ + if (rq->rq_flags & RQF_DONTPREP) { + scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + rq->rq_flags &= ~RQF_DONTPREP; + } +} + +/* Called before a request is prepared. See also scsi_mq_prep_fn(). */ +void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + + if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) { + cmd->flags |= SCMD_INITIALIZED; + scsi_initialize_rq(rq); + } + + cmd->device = dev; + INIT_LIST_HEAD(&cmd->eh_entry); + INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); +} + +static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, + struct request *req) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + + /* + * Passthrough requests may transfer data, in which case they must + * a bio attached to them. Or they might contain a SCSI command + * that does not transfer data, in which case they may optionally + * submit a request without an attached bio. + */ + if (req->bio) { + blk_status_t ret = scsi_alloc_sgtables(cmd); + if (unlikely(ret != BLK_STS_OK)) + return ret; + } else { + BUG_ON(blk_rq_bytes(req)); + + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + } + + cmd->transfersize = blk_rq_bytes(req); + return BLK_STS_OK; +} + +static blk_status_t +scsi_device_state_check(struct scsi_device *sdev, struct request *req) +{ + switch (sdev->sdev_state) { + case SDEV_CREATED: + return BLK_STS_OK; + case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: + /* + * If the device is offline we refuse to process any + * commands. The device must be brought online + * before trying any recovery commands. + */ + if (!sdev->offline_already) { + sdev->offline_already = true; + sdev_printk(KERN_ERR, sdev, + "rejecting I/O to offline device\n"); + } + return BLK_STS_IOERR; + case SDEV_DEL: + /* + * If the device is fully deleted, we refuse to + * process any commands as well. + */ + sdev_printk(KERN_ERR, sdev, + "rejecting I/O to dead device\n"); + return BLK_STS_IOERR; + case SDEV_BLOCK: + case SDEV_CREATED_BLOCK: + return BLK_STS_RESOURCE; + case SDEV_QUIESCE: + /* + * If the device is blocked we only accept power management + * commands. + */ + if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) + return BLK_STS_RESOURCE; + return BLK_STS_OK; + default: + /* + * For any other not fully online state we only allow + * power management commands. + */ + if (req && !(req->rq_flags & RQF_PM)) + return BLK_STS_OFFLINE; + return BLK_STS_OK; + } +} + +/* + * scsi_dev_queue_ready: if we can send requests to sdev, assign one token + * and return the token else return -1. + */ +static inline int scsi_dev_queue_ready(struct request_queue *q, + struct scsi_device *sdev) +{ + int token; + + token = sbitmap_get(&sdev->budget_map); + if (atomic_read(&sdev->device_blocked)) { + if (token < 0) + goto out; + + if (scsi_device_busy(sdev) > 1) + goto out_dec; + + /* + * unblock after device_blocked iterates to zero + */ + if (atomic_dec_return(&sdev->device_blocked) > 0) + goto out_dec; + SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, + "unblocking device at zero depth\n")); + } + + return token; +out_dec: + if (token >= 0) + sbitmap_put(&sdev->budget_map, token); +out: + return -1; +} + +/* + * scsi_target_queue_ready: checks if there we can send commands to target + * @sdev: scsi device on starget to check. + */ +static inline int scsi_target_queue_ready(struct Scsi_Host *shost, + struct scsi_device *sdev) +{ + struct scsi_target *starget = scsi_target(sdev); + unsigned int busy; + + if (starget->single_lun) { + spin_lock_irq(shost->host_lock); + if (starget->starget_sdev_user && + starget->starget_sdev_user != sdev) { + spin_unlock_irq(shost->host_lock); + return 0; + } + starget->starget_sdev_user = sdev; + spin_unlock_irq(shost->host_lock); + } + + if (starget->can_queue <= 0) + return 1; + + busy = atomic_inc_return(&starget->target_busy) - 1; + if (atomic_read(&starget->target_blocked) > 0) { + if (busy) + goto starved; + + /* + * unblock after target_blocked iterates to zero + */ + if (atomic_dec_return(&starget->target_blocked) > 0) + goto out_dec; + + SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, + "unblocking target at zero depth\n")); + } + + if (busy >= starget->can_queue) + goto starved; + + return 1; + +starved: + spin_lock_irq(shost->host_lock); + list_move_tail(&sdev->starved_entry, &shost->starved_list); + spin_unlock_irq(shost->host_lock); +out_dec: + if (starget->can_queue > 0) + atomic_dec(&starget->target_busy); + return 0; +} + +/* + * scsi_host_queue_ready: if we can send requests to shost, return 1 else + * return 0. We must end up running the queue again whenever 0 is + * returned, else IO can hang. + */ +static inline int scsi_host_queue_ready(struct request_queue *q, + struct Scsi_Host *shost, + struct scsi_device *sdev, + struct scsi_cmnd *cmd) +{ + if (atomic_read(&shost->host_blocked) > 0) { + if (scsi_host_busy(shost) > 0) + goto starved; + + /* + * unblock after host_blocked iterates to zero + */ + if (atomic_dec_return(&shost->host_blocked) > 0) + goto out_dec; + + SCSI_LOG_MLQUEUE(3, + shost_printk(KERN_INFO, shost, + "unblocking host at zero depth\n")); + } + + if (shost->host_self_blocked) + goto starved; + + /* We're OK to process the command, so we can't be starved */ + if (!list_empty(&sdev->starved_entry)) { + spin_lock_irq(shost->host_lock); + if (!list_empty(&sdev->starved_entry)) + list_del_init(&sdev->starved_entry); + spin_unlock_irq(shost->host_lock); + } + + __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); + + return 1; + +starved: + spin_lock_irq(shost->host_lock); + if (list_empty(&sdev->starved_entry)) + list_add_tail(&sdev->starved_entry, &shost->starved_list); + spin_unlock_irq(shost->host_lock); +out_dec: + scsi_dec_host_busy(shost, cmd); + return 0; +} + +/* + * Busy state exporting function for request stacking drivers. + * + * For efficiency, no lock is taken to check the busy state of + * shost/starget/sdev, since the returned value is not guaranteed and + * may be changed after request stacking drivers call the function, + * regardless of taking lock or not. + * + * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi + * needs to return 'not busy'. Otherwise, request stacking drivers + * may hold requests forever. + */ +static bool scsi_mq_lld_busy(struct request_queue *q) +{ + struct scsi_device *sdev = q->queuedata; + struct Scsi_Host *shost; + + if (blk_queue_dying(q)) + return false; + + shost = sdev->host; + + /* + * Ignore host/starget busy state. + * Since block layer does not have a concept of fairness across + * multiple queues, congestion of host/starget needs to be handled + * in SCSI layer. + */ + if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) + return true; + + return false; +} + +/* + * Block layer request completion callback. May be called from interrupt + * context. + */ +static void scsi_complete(struct request *rq) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + enum scsi_disposition disposition; + + INIT_LIST_HEAD(&cmd->eh_entry); + + atomic_inc(&cmd->device->iodone_cnt); + if (cmd->result) + atomic_inc(&cmd->device->ioerr_cnt); + + disposition = scsi_decide_disposition(cmd); + if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd)) + disposition = SUCCESS; + + scsi_log_completion(cmd, disposition); + + switch (disposition) { + case SUCCESS: + scsi_finish_command(cmd); + break; + case NEEDS_RETRY: + scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); + break; + case ADD_TO_MLQUEUE: + scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); + break; + default: + scsi_eh_scmd_add(cmd); + break; + } +} + +/** + * scsi_dispatch_cmd - Dispatch a command to the low-level driver. + * @cmd: command block we are dispatching. + * + * Return: nonzero return request was rejected and device's queue needs to be + * plugged. + */ +static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + int rtn = 0; + + atomic_inc(&cmd->device->iorequest_cnt); + + /* check if the device is still usable */ + if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { + /* in SDEV_DEL we error all commands. DID_NO_CONNECT + * returns an immediate error upwards, and signals + * that the device is no longer present */ + cmd->result = DID_NO_CONNECT << 16; + goto done; + } + + /* Check to see if the scsi lld made this device blocked. */ + if (unlikely(scsi_device_blocked(cmd->device))) { + /* + * in blocked state, the command is just put back on + * the device queue. The suspend state has already + * blocked the queue so future requests should not + * occur until the device transitions out of the + * suspend state. + */ + SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, + "queuecommand : device blocked\n")); + atomic_dec(&cmd->device->iorequest_cnt); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + /* Store the LUN value in cmnd, if needed. */ + if (cmd->device->lun_in_cdb) + cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | + (cmd->device->lun << 5 & 0xe0); + + scsi_log_send(cmd); + + /* + * Before we queue this command, check if the command + * length exceeds what the host adapter can handle. + */ + if (cmd->cmd_len > cmd->device->host->max_cmd_len) { + SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, + "queuecommand : command too long. " + "cdb_size=%d host->max_cmd_len=%d\n", + cmd->cmd_len, cmd->device->host->max_cmd_len)); + cmd->result = (DID_ABORT << 16); + goto done; + } + + if (unlikely(host->shost_state == SHOST_DEL)) { + cmd->result = (DID_NO_CONNECT << 16); + goto done; + + } + + trace_scsi_dispatch_cmd_start(cmd); + rtn = host->hostt->queuecommand(host, cmd); + if (rtn) { + atomic_dec(&cmd->device->iorequest_cnt); + trace_scsi_dispatch_cmd_error(cmd, rtn); + if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && + rtn != SCSI_MLQUEUE_TARGET_BUSY) + rtn = SCSI_MLQUEUE_HOST_BUSY; + + SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, + "queuecommand : request rejected\n")); + } + + return rtn; + done: + scsi_done(cmd); + return 0; +} + +/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ +static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) +{ + return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * + sizeof(struct scatterlist); +} + +static blk_status_t scsi_prepare_cmd(struct request *req) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + struct scsi_device *sdev = req->q->queuedata; + struct Scsi_Host *shost = sdev->host; + bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); + struct scatterlist *sg; + + scsi_init_command(sdev, cmd); + + cmd->eh_eflags = 0; + cmd->prot_type = 0; + cmd->prot_flags = 0; + cmd->submitter = 0; + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + cmd->underflow = 0; + cmd->transfersize = 0; + cmd->host_scribble = NULL; + cmd->result = 0; + cmd->extra_len = 0; + cmd->state = 0; + if (in_flight) + __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); + + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (!shost->hostt->init_cmd_priv) + memset(cmd + 1, 0, shost->hostt->cmd_size); + + cmd->prot_op = SCSI_PROT_NORMAL; + if (blk_rq_bytes(req)) + cmd->sc_data_direction = rq_dma_dir(req); + else + cmd->sc_data_direction = DMA_NONE; + + sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; + cmd->sdb.table.sgl = sg; + + if (scsi_host_get_prot(shost)) { + memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); + + cmd->prot_sdb->table.sgl = + (struct scatterlist *)(cmd->prot_sdb + 1); + } + + /* + * Special handling for passthrough commands, which don't go to the ULP + * at all: + */ + if (blk_rq_is_passthrough(req)) + return scsi_setup_scsi_cmnd(sdev, req); + + if (sdev->handler && sdev->handler->prep_fn) { + blk_status_t ret = sdev->handler->prep_fn(sdev, req); + + if (ret != BLK_STS_OK) + return ret; + } + + /* Usually overridden by the ULP */ + cmd->allowed = 0; + memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); + return scsi_cmd_to_driver(cmd)->init_command(cmd); +} + +static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly) +{ + struct request *req = scsi_cmd_to_rq(cmd); + + switch (cmd->submitter) { + case SUBMITTED_BY_BLOCK_LAYER: + break; + case SUBMITTED_BY_SCSI_ERROR_HANDLER: + return scsi_eh_done(cmd); + case SUBMITTED_BY_SCSI_RESET_IOCTL: + return; + } + + if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q))) + return; + if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) + return; + trace_scsi_dispatch_cmd_done(cmd); + + if (complete_directly) + blk_mq_complete_request_direct(req, scsi_complete); + else + blk_mq_complete_request(req); +} + +void scsi_done(struct scsi_cmnd *cmd) +{ + scsi_done_internal(cmd, false); +} +EXPORT_SYMBOL(scsi_done); + +void scsi_done_direct(struct scsi_cmnd *cmd) +{ + scsi_done_internal(cmd, true); +} +EXPORT_SYMBOL(scsi_done_direct); + +static void scsi_mq_put_budget(struct request_queue *q, int budget_token) +{ + struct scsi_device *sdev = q->queuedata; + + sbitmap_put(&sdev->budget_map, budget_token); +} + +/* + * When to reinvoke queueing after a resource shortage. It's 3 msecs to + * not change behaviour from the previous unplug mechanism, experimentation + * may prove this needs changing. + */ +#define SCSI_QUEUE_DELAY 3 + +static int scsi_mq_get_budget(struct request_queue *q) +{ + struct scsi_device *sdev = q->queuedata; + int token = scsi_dev_queue_ready(q, sdev); + + if (token >= 0) + return token; + + atomic_inc(&sdev->restarts); + + /* + * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy). + * .restarts must be incremented before .device_busy is read because the + * code in scsi_run_queue_async() depends on the order of these operations. + */ + smp_mb__after_atomic(); + + /* + * If all in-flight requests originated from this LUN are completed + * before reading .device_busy, sdev->device_busy will be observed as + * zero, then blk_mq_delay_run_hw_queues() will dispatch this request + * soon. Otherwise, completion of one of these requests will observe + * the .restarts flag, and the request queue will be run for handling + * this request, see scsi_end_request(). + */ + if (unlikely(scsi_device_busy(sdev) == 0 && + !scsi_device_blocked(sdev))) + blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY); + return -1; +} + +static void scsi_mq_set_rq_budget_token(struct request *req, int token) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + + cmd->budget_token = token; +} + +static int scsi_mq_get_rq_budget_token(struct request *req) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + + return cmd->budget_token; +} + +static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct request *req = bd->rq; + struct request_queue *q = req->q; + struct scsi_device *sdev = q->queuedata; + struct Scsi_Host *shost = sdev->host; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + blk_status_t ret; + int reason; + + WARN_ON_ONCE(cmd->budget_token < 0); + + /* + * If the device is not in running state we will reject some or all + * commands. + */ + if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { + ret = scsi_device_state_check(sdev, req); + if (ret != BLK_STS_OK) + goto out_put_budget; + } + + ret = BLK_STS_RESOURCE; + if (!scsi_target_queue_ready(shost, sdev)) + goto out_put_budget; + if (unlikely(scsi_host_in_recovery(shost))) { + if (cmd->flags & SCMD_FAIL_IF_RECOVERING) + ret = BLK_STS_OFFLINE; + goto out_dec_target_busy; + } + if (!scsi_host_queue_ready(q, shost, sdev, cmd)) + goto out_dec_target_busy; + + if (!(req->rq_flags & RQF_DONTPREP)) { + ret = scsi_prepare_cmd(req); + if (ret != BLK_STS_OK) + goto out_dec_host_busy; + req->rq_flags |= RQF_DONTPREP; + } else { + clear_bit(SCMD_STATE_COMPLETE, &cmd->state); + } + + cmd->flags &= SCMD_PRESERVED_FLAGS; + if (sdev->simple_tags) + cmd->flags |= SCMD_TAGGED; + if (bd->last) + cmd->flags |= SCMD_LAST; + + scsi_set_resid(cmd, 0); + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; + + blk_mq_start_request(req); + reason = scsi_dispatch_cmd(cmd); + if (reason) { + scsi_set_blocked(cmd, reason); + ret = BLK_STS_RESOURCE; + goto out_dec_host_busy; + } + + return BLK_STS_OK; + +out_dec_host_busy: + scsi_dec_host_busy(shost, cmd); +out_dec_target_busy: + if (scsi_target(sdev)->can_queue > 0) + atomic_dec(&scsi_target(sdev)->target_busy); +out_put_budget: + scsi_mq_put_budget(q, cmd->budget_token); + cmd->budget_token = -1; + switch (ret) { + case BLK_STS_OK: + break; + case BLK_STS_RESOURCE: + case BLK_STS_ZONE_RESOURCE: + if (scsi_device_blocked(sdev)) + ret = BLK_STS_DEV_RESOURCE; + break; + case BLK_STS_AGAIN: + cmd->result = DID_BUS_BUSY << 16; + if (req->rq_flags & RQF_DONTPREP) + scsi_mq_uninit_cmd(cmd); + break; + default: + if (unlikely(!scsi_device_online(sdev))) + cmd->result = DID_NO_CONNECT << 16; + else + cmd->result = DID_ERROR << 16; + /* + * Make sure to release all allocated resources when + * we hit an error, as we will never see this command + * again. + */ + if (req->rq_flags & RQF_DONTPREP) + scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); + break; + } + return ret; +} + +static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx, unsigned int numa_node) +{ + struct Scsi_Host *shost = set->driver_data; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + struct scatterlist *sg; + int ret = 0; + + cmd->sense_buffer = + kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node); + if (!cmd->sense_buffer) + return -ENOMEM; + + if (scsi_host_get_prot(shost)) { + sg = (void *)cmd + sizeof(struct scsi_cmnd) + + shost->hostt->cmd_size; + cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); + } + + if (shost->hostt->init_cmd_priv) { + ret = shost->hostt->init_cmd_priv(shost, cmd); + if (ret < 0) + kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); + } + + return ret; +} + +static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx) +{ + struct Scsi_Host *shost = set->driver_data; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + if (shost->hostt->exit_cmd_priv) + shost->hostt->exit_cmd_priv(shost, cmd); + kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); +} + + +static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) +{ + struct Scsi_Host *shost = hctx->driver_data; + + if (shost->hostt->mq_poll) + return shost->hostt->mq_poll(shost, hctx->queue_num); + + return 0; +} + +static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct Scsi_Host *shost = data; + + hctx->driver_data = shost; + return 0; +} + +static void scsi_map_queues(struct blk_mq_tag_set *set) +{ + struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); + + if (shost->hostt->map_queues) + return shost->hostt->map_queues(shost); + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); +} + +void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) +{ + struct device *dev = shost->dma_dev; + + /* + * this limit is imposed by hardware restrictions + */ + blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, + SG_MAX_SEGMENTS)); + + if (scsi_host_prot_dma(shost)) { + shost->sg_prot_tablesize = + min_not_zero(shost->sg_prot_tablesize, + (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); + BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); + blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); + } + + blk_queue_max_hw_sectors(q, shost->max_sectors); + blk_queue_segment_boundary(q, shost->dma_boundary); + dma_set_seg_boundary(dev, shost->dma_boundary); + + blk_queue_max_segment_size(q, shost->max_segment_size); + blk_queue_virt_boundary(q, shost->virt_boundary_mask); + dma_set_max_seg_size(dev, queue_max_segment_size(q)); + + /* + * Set a reasonable default alignment: The larger of 32-byte (dword), + * which is a common minimum for HBAs, and the minimum DMA alignment, + * which is set by the platform. + * + * Devices that require a bigger alignment can increase it later. + */ + blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); +} +EXPORT_SYMBOL_GPL(__scsi_init_queue); + +static const struct blk_mq_ops scsi_mq_ops_no_commit = { + .get_budget = scsi_mq_get_budget, + .put_budget = scsi_mq_put_budget, + .queue_rq = scsi_queue_rq, + .complete = scsi_complete, + .timeout = scsi_timeout, +#ifdef CONFIG_BLK_DEBUG_FS + .show_rq = scsi_show_rq, +#endif + .init_request = scsi_mq_init_request, + .exit_request = scsi_mq_exit_request, + .cleanup_rq = scsi_cleanup_rq, + .busy = scsi_mq_lld_busy, + .map_queues = scsi_map_queues, + .init_hctx = scsi_init_hctx, + .poll = scsi_mq_poll, + .set_rq_budget_token = scsi_mq_set_rq_budget_token, + .get_rq_budget_token = scsi_mq_get_rq_budget_token, +}; + + +static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) +{ + struct Scsi_Host *shost = hctx->driver_data; + + shost->hostt->commit_rqs(shost, hctx->queue_num); +} + +static const struct blk_mq_ops scsi_mq_ops = { + .get_budget = scsi_mq_get_budget, + .put_budget = scsi_mq_put_budget, + .queue_rq = scsi_queue_rq, + .commit_rqs = scsi_commit_rqs, + .complete = scsi_complete, + .timeout = scsi_timeout, +#ifdef CONFIG_BLK_DEBUG_FS + .show_rq = scsi_show_rq, +#endif + .init_request = scsi_mq_init_request, + .exit_request = scsi_mq_exit_request, + .cleanup_rq = scsi_cleanup_rq, + .busy = scsi_mq_lld_busy, + .map_queues = scsi_map_queues, + .init_hctx = scsi_init_hctx, + .poll = scsi_mq_poll, + .set_rq_budget_token = scsi_mq_set_rq_budget_token, + .get_rq_budget_token = scsi_mq_get_rq_budget_token, +}; + +int scsi_mq_setup_tags(struct Scsi_Host *shost) +{ + unsigned int cmd_size, sgl_size; + struct blk_mq_tag_set *tag_set = &shost->tag_set; + + sgl_size = max_t(unsigned int, sizeof(struct scatterlist), + scsi_mq_inline_sgl_size(shost)); + cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; + if (scsi_host_get_prot(shost)) + cmd_size += sizeof(struct scsi_data_buffer) + + sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; + + memset(tag_set, 0, sizeof(*tag_set)); + if (shost->hostt->commit_rqs) + tag_set->ops = &scsi_mq_ops; + else + tag_set->ops = &scsi_mq_ops_no_commit; + tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; + tag_set->nr_maps = shost->nr_maps ? : 1; + tag_set->queue_depth = shost->can_queue; + tag_set->cmd_size = cmd_size; + tag_set->numa_node = dev_to_node(shost->dma_dev); + tag_set->flags = BLK_MQ_F_SHOULD_MERGE; + tag_set->flags |= + BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); + if (shost->queuecommand_may_block) + tag_set->flags |= BLK_MQ_F_BLOCKING; + tag_set->driver_data = shost; + if (shost->host_tagset) + tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; + + return blk_mq_alloc_tag_set(tag_set); +} + +void scsi_mq_free_tags(struct kref *kref) +{ + struct Scsi_Host *shost = container_of(kref, typeof(*shost), + tagset_refcnt); + + blk_mq_free_tag_set(&shost->tag_set); + complete(&shost->tagset_freed); +} + +/** + * scsi_device_from_queue - return sdev associated with a request_queue + * @q: The request queue to return the sdev from + * + * Return the sdev associated with a request queue or NULL if the + * request_queue does not reference a SCSI device. + */ +struct scsi_device *scsi_device_from_queue(struct request_queue *q) +{ + struct scsi_device *sdev = NULL; + + if (q->mq_ops == &scsi_mq_ops_no_commit || + q->mq_ops == &scsi_mq_ops) + sdev = q->queuedata; + if (!sdev || !get_device(&sdev->sdev_gendev)) + sdev = NULL; + + return sdev; +} +/* + * pktcdvd should have been integrated into the SCSI layers, but for historical + * reasons like the old IDE driver it isn't. This export allows it to safely + * probe if a given device is a SCSI one and only attach to that. + */ +#ifdef CONFIG_CDROM_PKTCDVD_MODULE +EXPORT_SYMBOL_GPL(scsi_device_from_queue); +#endif + +/** + * scsi_block_requests - Utility function used by low-level drivers to prevent + * further commands from being queued to the device. + * @shost: host in question + * + * There is no timer nor any other means by which the requests get unblocked + * other than the low-level driver calling scsi_unblock_requests(). + */ +void scsi_block_requests(struct Scsi_Host *shost) +{ + shost->host_self_blocked = 1; +} +EXPORT_SYMBOL(scsi_block_requests); + +/** + * scsi_unblock_requests - Utility function used by low-level drivers to allow + * further commands to be queued to the device. + * @shost: host in question + * + * There is no timer nor any other means by which the requests get unblocked + * other than the low-level driver calling scsi_unblock_requests(). This is done + * as an API function so that changes to the internals of the scsi mid-layer + * won't require wholesale changes to drivers that use this feature. + */ +void scsi_unblock_requests(struct Scsi_Host *shost) +{ + shost->host_self_blocked = 0; + scsi_run_host_queues(shost); +} +EXPORT_SYMBOL(scsi_unblock_requests); + +void scsi_exit_queue(void) +{ + kmem_cache_destroy(scsi_sense_cache); +} + +/** + * scsi_mode_select - issue a mode select + * @sdev: SCSI device to be queried + * @pf: Page format bit (1 == standard, 0 == vendor specific) + * @sp: Save page bit (0 == don't save, 1 == save) + * @buffer: request buffer (may not be smaller than eight bytes) + * @len: length of request buffer. + * @timeout: command timeout + * @retries: number of retries before failing + * @data: returns a structure abstracting the mode header data + * @sshdr: place to put sense data (or NULL if no sense to be collected). + * must be SCSI_SENSE_BUFFERSIZE big. + * + * Returns zero if successful; negative error number or scsi + * status on error + * + */ +int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, + unsigned char *buffer, int len, int timeout, int retries, + struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) +{ + unsigned char cmd[10]; + unsigned char *real_buffer; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; + int ret; + + memset(cmd, 0, sizeof(cmd)); + cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); + + /* + * Use MODE SELECT(10) if the device asked for it or if the mode page + * and the mode select header cannot fit within the maximumm 255 bytes + * of the MODE SELECT(6) command. + */ + if (sdev->use_10_for_ms || + len + 4 > 255 || + data->block_descriptor_length > 255) { + if (len > 65535 - 8) + return -EINVAL; + real_buffer = kmalloc(8 + len, GFP_KERNEL); + if (!real_buffer) + return -ENOMEM; + memcpy(real_buffer + 8, buffer, len); + len += 8; + real_buffer[0] = 0; + real_buffer[1] = 0; + real_buffer[2] = data->medium_type; + real_buffer[3] = data->device_specific; + real_buffer[4] = data->longlba ? 0x01 : 0; + real_buffer[5] = 0; + put_unaligned_be16(data->block_descriptor_length, + &real_buffer[6]); + + cmd[0] = MODE_SELECT_10; + put_unaligned_be16(len, &cmd[7]); + } else { + if (data->longlba) + return -EINVAL; + + real_buffer = kmalloc(4 + len, GFP_KERNEL); + if (!real_buffer) + return -ENOMEM; + memcpy(real_buffer + 4, buffer, len); + len += 4; + real_buffer[0] = 0; + real_buffer[1] = data->medium_type; + real_buffer[2] = data->device_specific; + real_buffer[3] = data->block_descriptor_length; + + cmd[0] = MODE_SELECT; + cmd[4] = len; + } + + ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len, + timeout, retries, &exec_args); + kfree(real_buffer); + return ret; +} +EXPORT_SYMBOL_GPL(scsi_mode_select); + +/** + * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. + * @sdev: SCSI device to be queried + * @dbd: set to prevent mode sense from returning block descriptors + * @modepage: mode page being requested + * @subpage: sub-page of the mode page being requested + * @buffer: request buffer (may not be smaller than eight bytes) + * @len: length of request buffer. + * @timeout: command timeout + * @retries: number of retries before failing + * @data: returns a structure abstracting the mode header data + * @sshdr: place to put sense data (or NULL if no sense to be collected). + * must be SCSI_SENSE_BUFFERSIZE big. + * + * Returns zero if successful, or a negative error number on failure + */ +int +scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, + unsigned char *buffer, int len, int timeout, int retries, + struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) +{ + unsigned char cmd[12]; + int use_10_for_ms; + int header_length; + int result, retry_count = retries; + struct scsi_sense_hdr my_sshdr; + const struct scsi_exec_args exec_args = { + /* caller might not be interested in sense, but we need it */ + .sshdr = sshdr ? : &my_sshdr, + }; + + memset(data, 0, sizeof(*data)); + memset(&cmd[0], 0, 12); + + dbd = sdev->set_dbd_for_ms ? 8 : dbd; + cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ + cmd[2] = modepage; + cmd[3] = subpage; + + sshdr = exec_args.sshdr; + + retry: + use_10_for_ms = sdev->use_10_for_ms || len > 255; + + if (use_10_for_ms) { + if (len < 8 || len > 65535) + return -EINVAL; + + cmd[0] = MODE_SENSE_10; + put_unaligned_be16(len, &cmd[7]); + header_length = 8; + } else { + if (len < 4) + return -EINVAL; + + cmd[0] = MODE_SENSE; + cmd[4] = len; + header_length = 4; + } + + memset(buffer, 0, len); + + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, + timeout, retries, &exec_args); + if (result < 0) + return result; + + /* This code looks awful: what it's doing is making sure an + * ILLEGAL REQUEST sense return identifies the actual command + * byte as the problem. MODE_SENSE commands can return + * ILLEGAL REQUEST if the code page isn't supported */ + + if (!scsi_status_is_good(result)) { + if (scsi_sense_valid(sshdr)) { + if ((sshdr->sense_key == ILLEGAL_REQUEST) && + (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { + /* + * Invalid command operation code: retry using + * MODE SENSE(6) if this was a MODE SENSE(10) + * request, except if the request mode page is + * too large for MODE SENSE single byte + * allocation length field. + */ + if (use_10_for_ms) { + if (len > 255) + return -EIO; + sdev->use_10_for_ms = 0; + goto retry; + } + } + if (scsi_status_is_check_condition(result) && + sshdr->sense_key == UNIT_ATTENTION && + retry_count) { + retry_count--; + goto retry; + } + } + return -EIO; + } + if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && + (modepage == 6 || modepage == 8))) { + /* Initio breakage? */ + header_length = 0; + data->length = 13; + data->medium_type = 0; + data->device_specific = 0; + data->longlba = 0; + data->block_descriptor_length = 0; + } else if (use_10_for_ms) { + data->length = get_unaligned_be16(&buffer[0]) + 2; + data->medium_type = buffer[2]; + data->device_specific = buffer[3]; + data->longlba = buffer[4] & 0x01; + data->block_descriptor_length = get_unaligned_be16(&buffer[6]); + } else { + data->length = buffer[0] + 1; + data->medium_type = buffer[1]; + data->device_specific = buffer[2]; + data->block_descriptor_length = buffer[3]; + } + data->header_length = header_length; + + return 0; +} +EXPORT_SYMBOL(scsi_mode_sense); + +/** + * scsi_test_unit_ready - test if unit is ready + * @sdev: scsi device to change the state of. + * @timeout: command timeout + * @retries: number of retries before failing + * @sshdr: outpout pointer for decoded sense information. + * + * Returns zero if unsuccessful or an error if TUR failed. For + * removable media, UNIT_ATTENTION sets ->changed flag. + **/ +int +scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, + struct scsi_sense_hdr *sshdr) +{ + char cmd[] = { + TEST_UNIT_READY, 0, 0, 0, 0, 0, + }; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; + int result; + + /* try to eat the UNIT_ATTENTION if there are enough retries */ + do { + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, + timeout, 1, &exec_args); + if (sdev->removable && scsi_sense_valid(sshdr) && + sshdr->sense_key == UNIT_ATTENTION) + sdev->changed = 1; + } while (scsi_sense_valid(sshdr) && + sshdr->sense_key == UNIT_ATTENTION && --retries); + + return result; +} +EXPORT_SYMBOL(scsi_test_unit_ready); + +/** + * scsi_device_set_state - Take the given device through the device state model. + * @sdev: scsi device to change the state of. + * @state: state to change to. + * + * Returns zero if successful or an error if the requested + * transition is illegal. + */ +int +scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) +{ + enum scsi_device_state oldstate = sdev->sdev_state; + + if (state == oldstate) + return 0; + + switch (state) { + case SDEV_CREATED: + switch (oldstate) { + case SDEV_CREATED_BLOCK: + break; + default: + goto illegal; + } + break; + + case SDEV_RUNNING: + switch (oldstate) { + case SDEV_CREATED: + case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: + case SDEV_QUIESCE: + case SDEV_BLOCK: + break; + default: + goto illegal; + } + break; + + case SDEV_QUIESCE: + switch (oldstate) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: + break; + default: + goto illegal; + } + break; + + case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: + switch (oldstate) { + case SDEV_CREATED: + case SDEV_RUNNING: + case SDEV_QUIESCE: + case SDEV_BLOCK: + break; + default: + goto illegal; + } + break; + + case SDEV_BLOCK: + switch (oldstate) { + case SDEV_RUNNING: + case SDEV_CREATED_BLOCK: + case SDEV_QUIESCE: + case SDEV_OFFLINE: + break; + default: + goto illegal; + } + break; + + case SDEV_CREATED_BLOCK: + switch (oldstate) { + case SDEV_CREATED: + break; + default: + goto illegal; + } + break; + + case SDEV_CANCEL: + switch (oldstate) { + case SDEV_CREATED: + case SDEV_RUNNING: + case SDEV_QUIESCE: + case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: + break; + default: + goto illegal; + } + break; + + case SDEV_DEL: + switch (oldstate) { + case SDEV_CREATED: + case SDEV_RUNNING: + case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: + case SDEV_CANCEL: + case SDEV_BLOCK: + case SDEV_CREATED_BLOCK: + break; + default: + goto illegal; + } + break; + + } + sdev->offline_already = false; + sdev->sdev_state = state; + return 0; + + illegal: + SCSI_LOG_ERROR_RECOVERY(1, + sdev_printk(KERN_ERR, sdev, + "Illegal state transition %s->%s", + scsi_device_state_name(oldstate), + scsi_device_state_name(state)) + ); + return -EINVAL; +} +EXPORT_SYMBOL(scsi_device_set_state); + +/** + * scsi_evt_emit - emit a single SCSI device uevent + * @sdev: associated SCSI device + * @evt: event to emit + * + * Send a single uevent (scsi_event) to the associated scsi_device. + */ +static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) +{ + int idx = 0; + char *envp[3]; + + switch (evt->evt_type) { + case SDEV_EVT_MEDIA_CHANGE: + envp[idx++] = "SDEV_MEDIA_CHANGE=1"; + break; + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + scsi_rescan_device(sdev); + envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; + break; + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; + break; + case SDEV_EVT_LUN_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED"; + break; + case SDEV_EVT_POWER_ON_RESET_OCCURRED: + envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED"; + break; + default: + /* do nothing */ + break; + } + + envp[idx++] = NULL; + + kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); +} + +/** + * scsi_evt_thread - send a uevent for each scsi event + * @work: work struct for scsi_device + * + * Dispatch queued events to their associated scsi_device kobjects + * as uevents. + */ +void scsi_evt_thread(struct work_struct *work) +{ + struct scsi_device *sdev; + enum scsi_device_event evt_type; + LIST_HEAD(event_list); + + sdev = container_of(work, struct scsi_device, event_work); + + for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) + if (test_and_clear_bit(evt_type, sdev->pending_events)) + sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); + + while (1) { + struct scsi_event *evt; + struct list_head *this, *tmp; + unsigned long flags; + + spin_lock_irqsave(&sdev->list_lock, flags); + list_splice_init(&sdev->event_list, &event_list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + + if (list_empty(&event_list)) + break; + + list_for_each_safe(this, tmp, &event_list) { + evt = list_entry(this, struct scsi_event, node); + list_del(&evt->node); + scsi_evt_emit(sdev, evt); + kfree(evt); + } + } +} + +/** + * sdev_evt_send - send asserted event to uevent thread + * @sdev: scsi_device event occurred on + * @evt: event to send + * + * Assert scsi device event asynchronously. + */ +void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) +{ + unsigned long flags; + +#if 0 + /* FIXME: currently this check eliminates all media change events + * for polled devices. Need to update to discriminate between AN + * and polled events */ + if (!test_bit(evt->evt_type, sdev->supported_events)) { + kfree(evt); + return; + } +#endif + + spin_lock_irqsave(&sdev->list_lock, flags); + list_add_tail(&evt->node, &sdev->event_list); + schedule_work(&sdev->event_work); + spin_unlock_irqrestore(&sdev->list_lock, flags); +} +EXPORT_SYMBOL_GPL(sdev_evt_send); + +/** + * sdev_evt_alloc - allocate a new scsi event + * @evt_type: type of event to allocate + * @gfpflags: GFP flags for allocation + * + * Allocates and returns a new scsi_event. + */ +struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, + gfp_t gfpflags) +{ + struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); + if (!evt) + return NULL; + + evt->evt_type = evt_type; + INIT_LIST_HEAD(&evt->node); + + /* evt_type-specific initialization, if any */ + switch (evt_type) { + case SDEV_EVT_MEDIA_CHANGE: + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + case SDEV_EVT_LUN_CHANGE_REPORTED: + case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: + case SDEV_EVT_POWER_ON_RESET_OCCURRED: + default: + /* do nothing */ + break; + } + + return evt; +} +EXPORT_SYMBOL_GPL(sdev_evt_alloc); + +/** + * sdev_evt_send_simple - send asserted event to uevent thread + * @sdev: scsi_device event occurred on + * @evt_type: type of event to send + * @gfpflags: GFP flags for allocation + * + * Assert scsi device event asynchronously, given an event type. + */ +void sdev_evt_send_simple(struct scsi_device *sdev, + enum scsi_device_event evt_type, gfp_t gfpflags) +{ + struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); + if (!evt) { + sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", + evt_type); + return; + } + + sdev_evt_send(sdev, evt); +} +EXPORT_SYMBOL_GPL(sdev_evt_send_simple); + +/** + * scsi_device_quiesce - Block all commands except power management. + * @sdev: scsi device to quiesce. + * + * This works by trying to transition to the SDEV_QUIESCE state + * (which must be a legal transition). When the device is in this + * state, only power management requests will be accepted, all others will + * be deferred. + * + * Must be called with user context, may sleep. + * + * Returns zero if unsuccessful or an error if not. + */ +int +scsi_device_quiesce(struct scsi_device *sdev) +{ + struct request_queue *q = sdev->request_queue; + int err; + + /* + * It is allowed to call scsi_device_quiesce() multiple times from + * the same context but concurrent scsi_device_quiesce() calls are + * not allowed. + */ + WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); + + if (sdev->quiesced_by == current) + return 0; + + blk_set_pm_only(q); + + blk_mq_freeze_queue(q); + /* + * Ensure that the effect of blk_set_pm_only() will be visible + * for percpu_ref_tryget() callers that occur after the queue + * unfreeze even if the queue was already frozen before this function + * was called. See also https://lwn.net/Articles/573497/. + */ + synchronize_rcu(); + blk_mq_unfreeze_queue(q); + + mutex_lock(&sdev->state_mutex); + err = scsi_device_set_state(sdev, SDEV_QUIESCE); + if (err == 0) + sdev->quiesced_by = current; + else + blk_clear_pm_only(q); + mutex_unlock(&sdev->state_mutex); + + return err; +} +EXPORT_SYMBOL(scsi_device_quiesce); + +/** + * scsi_device_resume - Restart user issued commands to a quiesced device. + * @sdev: scsi device to resume. + * + * Moves the device from quiesced back to running and restarts the + * queues. + * + * Must be called with user context, may sleep. + */ +void scsi_device_resume(struct scsi_device *sdev) +{ + /* check if the device state was mutated prior to resume, and if + * so assume the state is being managed elsewhere (for example + * device deleted during suspend) + */ + mutex_lock(&sdev->state_mutex); + if (sdev->sdev_state == SDEV_QUIESCE) + scsi_device_set_state(sdev, SDEV_RUNNING); + if (sdev->quiesced_by) { + sdev->quiesced_by = NULL; + blk_clear_pm_only(sdev->request_queue); + } + mutex_unlock(&sdev->state_mutex); +} +EXPORT_SYMBOL(scsi_device_resume); + +static void +device_quiesce_fn(struct scsi_device *sdev, void *data) +{ + scsi_device_quiesce(sdev); +} + +void +scsi_target_quiesce(struct scsi_target *starget) +{ + starget_for_each_device(starget, NULL, device_quiesce_fn); +} +EXPORT_SYMBOL(scsi_target_quiesce); + +static void +device_resume_fn(struct scsi_device *sdev, void *data) +{ + scsi_device_resume(sdev); +} + +void +scsi_target_resume(struct scsi_target *starget) +{ + starget_for_each_device(starget, NULL, device_resume_fn); +} +EXPORT_SYMBOL(scsi_target_resume); + +static int __scsi_internal_device_block_nowait(struct scsi_device *sdev) +{ + if (scsi_device_set_state(sdev, SDEV_BLOCK)) + return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); + + return 0; +} + +void scsi_start_queue(struct scsi_device *sdev) +{ + if (cmpxchg(&sdev->queue_stopped, 1, 0)) + blk_mq_unquiesce_queue(sdev->request_queue); +} + +static void scsi_stop_queue(struct scsi_device *sdev) +{ + /* + * The atomic variable of ->queue_stopped covers that + * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. + * + * The caller needs to wait until quiesce is done. + */ + if (!cmpxchg(&sdev->queue_stopped, 0, 1)) + blk_mq_quiesce_queue_nowait(sdev->request_queue); +} + +/** + * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state + * @sdev: device to block + * + * Pause SCSI command processing on the specified device. Does not sleep. + * + * Returns zero if successful or a negative error code upon failure. + * + * Notes: + * This routine transitions the device to the SDEV_BLOCK state (which must be + * a legal transition). When the device is in this state, command processing + * is paused until the device leaves the SDEV_BLOCK state. See also + * scsi_internal_device_unblock_nowait(). + */ +int scsi_internal_device_block_nowait(struct scsi_device *sdev) +{ + int ret = __scsi_internal_device_block_nowait(sdev); + + /* + * The device has transitioned to SDEV_BLOCK. Stop the + * block layer from calling the midlayer with this device's + * request queue. + */ + if (!ret) + scsi_stop_queue(sdev); + return ret; +} +EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); + +/** + * scsi_device_block - try to transition to the SDEV_BLOCK state + * @sdev: device to block + * @data: dummy argument, ignored + * + * Pause SCSI command processing on the specified device. Callers must wait + * until all ongoing scsi_queue_rq() calls have finished after this function + * returns. + * + * Note: + * This routine transitions the device to the SDEV_BLOCK state (which must be + * a legal transition). When the device is in this state, command processing + * is paused until the device leaves the SDEV_BLOCK state. See also + * scsi_internal_device_unblock(). + */ +static void scsi_device_block(struct scsi_device *sdev, void *data) +{ + int err; + enum scsi_device_state state; + + mutex_lock(&sdev->state_mutex); + err = __scsi_internal_device_block_nowait(sdev); + state = sdev->sdev_state; + if (err == 0) + /* + * scsi_stop_queue() must be called with the state_mutex + * held. Otherwise a simultaneous scsi_start_queue() call + * might unquiesce the queue before we quiesce it. + */ + scsi_stop_queue(sdev); + + mutex_unlock(&sdev->state_mutex); + + WARN_ONCE(err, "%s: failed to block %s in state %d\n", + __func__, dev_name(&sdev->sdev_gendev), state); +} + +/** + * scsi_internal_device_unblock_nowait - resume a device after a block request + * @sdev: device to resume + * @new_state: state to set the device to after unblocking + * + * Restart the device queue for a previously suspended SCSI device. Does not + * sleep. + * + * Returns zero if successful or a negative error code upon failure. + * + * Notes: + * This routine transitions the device to the SDEV_RUNNING state or to one of + * the offline states (which must be a legal transition) allowing the midlayer + * to goose the queue for this device. + */ +int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, + enum scsi_device_state new_state) +{ + switch (new_state) { + case SDEV_RUNNING: + case SDEV_TRANSPORT_OFFLINE: + break; + default: + return -EINVAL; + } + + /* + * Try to transition the scsi device to SDEV_RUNNING or one of the + * offlined states and goose the device queue if successful. + */ + switch (sdev->sdev_state) { + case SDEV_BLOCK: + case SDEV_TRANSPORT_OFFLINE: + sdev->sdev_state = new_state; + break; + case SDEV_CREATED_BLOCK: + if (new_state == SDEV_TRANSPORT_OFFLINE || + new_state == SDEV_OFFLINE) + sdev->sdev_state = new_state; + else + sdev->sdev_state = SDEV_CREATED; + break; + case SDEV_CANCEL: + case SDEV_OFFLINE: + break; + default: + return -EINVAL; + } + scsi_start_queue(sdev); + + return 0; +} +EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); + +/** + * scsi_internal_device_unblock - resume a device after a block request + * @sdev: device to resume + * @new_state: state to set the device to after unblocking + * + * Restart the device queue for a previously suspended SCSI device. May sleep. + * + * Returns zero if successful or a negative error code upon failure. + * + * Notes: + * This routine transitions the device to the SDEV_RUNNING state or to one of + * the offline states (which must be a legal transition) allowing the midlayer + * to goose the queue for this device. + */ +static int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state) +{ + int ret; + + mutex_lock(&sdev->state_mutex); + ret = scsi_internal_device_unblock_nowait(sdev, new_state); + mutex_unlock(&sdev->state_mutex); + + return ret; +} + +static int +target_block(struct device *dev, void *data) +{ + if (scsi_is_target_device(dev)) + starget_for_each_device(to_scsi_target(dev), NULL, + scsi_device_block); + return 0; +} + +/** + * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state + * @dev: a parent device of one or more scsi_target devices + * @shost: the Scsi_Host to which this device belongs + * + * Iterate over all children of @dev, which should be scsi_target devices, + * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for + * ongoing scsi_queue_rq() calls to finish. May sleep. + * + * Note: + * @dev must not itself be a scsi_target device. + */ +void +scsi_block_targets(struct Scsi_Host *shost, struct device *dev) +{ + WARN_ON_ONCE(scsi_is_target_device(dev)); + device_for_each_child(dev, NULL, target_block); + blk_mq_wait_quiesce_done(&shost->tag_set); +} +EXPORT_SYMBOL_GPL(scsi_block_targets); + +static void +device_unblock(struct scsi_device *sdev, void *data) +{ + scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); +} + +static int +target_unblock(struct device *dev, void *data) +{ + if (scsi_is_target_device(dev)) + starget_for_each_device(to_scsi_target(dev), data, + device_unblock); + return 0; +} + +void +scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) +{ + if (scsi_is_target_device(dev)) + starget_for_each_device(to_scsi_target(dev), &new_state, + device_unblock); + else + device_for_each_child(dev, &new_state, target_unblock); +} +EXPORT_SYMBOL_GPL(scsi_target_unblock); + +/** + * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state + * @shost: device to block + * + * Pause SCSI command processing for all logical units associated with the SCSI + * host and wait until pending scsi_queue_rq() calls have finished. + * + * Returns zero if successful or a negative error code upon failure. + */ +int +scsi_host_block(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + int ret; + + /* + * Call scsi_internal_device_block_nowait so we can avoid + * calling synchronize_rcu() for each LUN. + */ + shost_for_each_device(sdev, shost) { + mutex_lock(&sdev->state_mutex); + ret = scsi_internal_device_block_nowait(sdev); + mutex_unlock(&sdev->state_mutex); + if (ret) { + scsi_device_put(sdev); + return ret; + } + } + + /* Wait for ongoing scsi_queue_rq() calls to finish. */ + blk_mq_wait_quiesce_done(&shost->tag_set); + + return 0; +} +EXPORT_SYMBOL_GPL(scsi_host_block); + +int +scsi_host_unblock(struct Scsi_Host *shost, int new_state) +{ + struct scsi_device *sdev; + int ret = 0; + + shost_for_each_device(sdev, shost) { + ret = scsi_internal_device_unblock(sdev, new_state); + if (ret) { + scsi_device_put(sdev); + break; + } + } + return ret; +} +EXPORT_SYMBOL_GPL(scsi_host_unblock); + +/** + * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt + * @sgl: scatter-gather list + * @sg_count: number of segments in sg + * @offset: offset in bytes into sg, on return offset into the mapped area + * @len: bytes to map, on return number of bytes mapped + * + * Returns virtual address of the start of the mapped page + */ +void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, + size_t *offset, size_t *len) +{ + int i; + size_t sg_len = 0, len_complete = 0; + struct scatterlist *sg; + struct page *page; + + WARN_ON(!irqs_disabled()); + + for_each_sg(sgl, sg, sg_count, i) { + len_complete = sg_len; /* Complete sg-entries */ + sg_len += sg->length; + if (sg_len > *offset) + break; + } + + if (unlikely(i == sg_count)) { + printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " + "elements %d\n", + __func__, sg_len, *offset, sg_count); + WARN_ON(1); + return NULL; + } + + /* Offset starting from the beginning of first page in this sg-entry */ + *offset = *offset - len_complete + sg->offset; + + /* Assumption: contiguous pages can be accessed as "page + i" */ + page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); + *offset &= ~PAGE_MASK; + + /* Bytes in this sg-entry from *offset to the end of the page */ + sg_len = PAGE_SIZE - *offset; + if (*len > sg_len) + *len = sg_len; + + return kmap_atomic(page); +} +EXPORT_SYMBOL(scsi_kmap_atomic_sg); + +/** + * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg + * @virt: virtual address to be unmapped + */ +void scsi_kunmap_atomic_sg(void *virt) +{ + kunmap_atomic(virt); +} +EXPORT_SYMBOL(scsi_kunmap_atomic_sg); + +void sdev_disable_disk_events(struct scsi_device *sdev) +{ + atomic_inc(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_disable_disk_events); + +void sdev_enable_disk_events(struct scsi_device *sdev) +{ + if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) + return; + atomic_dec(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_enable_disk_events); + +static unsigned char designator_prio(const unsigned char *d) +{ + if (d[1] & 0x30) + /* not associated with LUN */ + return 0; + + if (d[3] == 0) + /* invalid length */ + return 0; + + /* + * Order of preference for lun descriptor: + * - SCSI name string + * - NAA IEEE Registered Extended + * - EUI-64 based 16-byte + * - EUI-64 based 12-byte + * - NAA IEEE Registered + * - NAA IEEE Extended + * - EUI-64 based 8-byte + * - SCSI name string (truncated) + * - T10 Vendor ID + * as longer descriptors reduce the likelyhood + * of identification clashes. + */ + + switch (d[1] & 0xf) { + case 8: + /* SCSI name string, variable-length UTF-8 */ + return 9; + case 3: + switch (d[4] >> 4) { + case 6: + /* NAA registered extended */ + return 8; + case 5: + /* NAA registered */ + return 5; + case 4: + /* NAA extended */ + return 4; + case 3: + /* NAA locally assigned */ + return 1; + default: + break; + } + break; + case 2: + switch (d[3]) { + case 16: + /* EUI64-based, 16 byte */ + return 7; + case 12: + /* EUI64-based, 12 byte */ + return 6; + case 8: + /* EUI64-based, 8 byte */ + return 3; + default: + break; + } + break; + case 1: + /* T10 vendor ID */ + return 1; + default: + break; + } + + return 0; +} + +/** + * scsi_vpd_lun_id - return a unique device identification + * @sdev: SCSI device + * @id: buffer for the identification + * @id_len: length of the buffer + * + * Copies a unique device identification into @id based + * on the information in the VPD page 0x83 of the device. + * The string will be formatted as a SCSI name string. + * + * Returns the length of the identification or error on failure. + * If the identifier is longer than the supplied buffer the actual + * identifier length is returned and the buffer is not zero-padded. + */ +int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) +{ + u8 cur_id_prio = 0; + u8 cur_id_size = 0; + const unsigned char *d, *cur_id_str; + const struct scsi_vpd *vpd_pg83; + int id_size = -EINVAL; + + rcu_read_lock(); + vpd_pg83 = rcu_dereference(sdev->vpd_pg83); + if (!vpd_pg83) { + rcu_read_unlock(); + return -ENXIO; + } + + /* The id string must be at least 20 bytes + terminating NULL byte */ + if (id_len < 21) { + rcu_read_unlock(); + return -EINVAL; + } + + memset(id, 0, id_len); + for (d = vpd_pg83->data + 4; + d < vpd_pg83->data + vpd_pg83->len; + d += d[3] + 4) { + u8 prio = designator_prio(d); + + if (prio == 0 || cur_id_prio > prio) + continue; + + switch (d[1] & 0xf) { + case 0x1: + /* T10 Vendor ID */ + if (cur_id_size > d[3]) + break; + cur_id_prio = prio; + cur_id_size = d[3]; + if (cur_id_size + 4 > id_len) + cur_id_size = id_len - 4; + cur_id_str = d + 4; + id_size = snprintf(id, id_len, "t10.%*pE", + cur_id_size, cur_id_str); + break; + case 0x2: + /* EUI-64 */ + cur_id_prio = prio; + cur_id_size = d[3]; + cur_id_str = d + 4; + switch (cur_id_size) { + case 8: + id_size = snprintf(id, id_len, + "eui.%8phN", + cur_id_str); + break; + case 12: + id_size = snprintf(id, id_len, + "eui.%12phN", + cur_id_str); + break; + case 16: + id_size = snprintf(id, id_len, + "eui.%16phN", + cur_id_str); + break; + default: + break; + } + break; + case 0x3: + /* NAA */ + cur_id_prio = prio; + cur_id_size = d[3]; + cur_id_str = d + 4; + switch (cur_id_size) { + case 8: + id_size = snprintf(id, id_len, + "naa.%8phN", + cur_id_str); + break; + case 16: + id_size = snprintf(id, id_len, + "naa.%16phN", + cur_id_str); + break; + default: + break; + } + break; + case 0x8: + /* SCSI name string */ + if (cur_id_size > d[3]) + break; + /* Prefer others for truncated descriptor */ + if (d[3] > id_len) { + prio = 2; + if (cur_id_prio > prio) + break; + } + cur_id_prio = prio; + cur_id_size = id_size = d[3]; + cur_id_str = d + 4; + if (cur_id_size >= id_len) + cur_id_size = id_len - 1; + memcpy(id, cur_id_str, cur_id_size); + break; + default: + break; + } + } + rcu_read_unlock(); + + return id_size; +} +EXPORT_SYMBOL(scsi_vpd_lun_id); + +/* + * scsi_vpd_tpg_id - return a target port group identifier + * @sdev: SCSI device + * + * Returns the Target Port Group identifier from the information + * froom VPD page 0x83 of the device. + * + * Returns the identifier or error on failure. + */ +int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) +{ + const unsigned char *d; + const struct scsi_vpd *vpd_pg83; + int group_id = -EAGAIN, rel_port = -1; + + rcu_read_lock(); + vpd_pg83 = rcu_dereference(sdev->vpd_pg83); + if (!vpd_pg83) { + rcu_read_unlock(); + return -ENXIO; + } + + d = vpd_pg83->data + 4; + while (d < vpd_pg83->data + vpd_pg83->len) { + switch (d[1] & 0xf) { + case 0x4: + /* Relative target port */ + rel_port = get_unaligned_be16(&d[6]); + break; + case 0x5: + /* Target port group */ + group_id = get_unaligned_be16(&d[6]); + break; + default: + break; + } + d += d[3] + 4; + } + rcu_read_unlock(); + + if (group_id >= 0 && rel_id && rel_port != -1) + *rel_id = rel_port; + + return group_id; +} +EXPORT_SYMBOL(scsi_vpd_tpg_id); + +/** + * scsi_build_sense - build sense data for a command + * @scmd: scsi command for which the sense should be formatted + * @desc: Sense format (non-zero == descriptor format, + * 0 == fixed format) + * @key: Sense key + * @asc: Additional sense code + * @ascq: Additional sense code qualifier + * + **/ +void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq) +{ + scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq); + scmd->result = SAM_STAT_CHECK_CONDITION; +} +EXPORT_SYMBOL_GPL(scsi_build_sense); diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c new file mode 100644 index 000000000..572391527 --- /dev/null +++ b/drivers/scsi/scsi_lib_dma.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCSI library functions depending on DMA + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +/** + * scsi_dma_map - perform DMA mapping against command's sg lists + * @cmd: scsi command + * + * Returns the number of sg lists actually used, zero if the sg lists + * is NULL, or -ENOMEM if the mapping failed. + */ +int scsi_dma_map(struct scsi_cmnd *cmd) +{ + int nseg = 0; + + if (scsi_sg_count(cmd)) { + struct device *dev = cmd->device->host->dma_dev; + + nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), + cmd->sc_data_direction); + if (unlikely(!nseg)) + return -ENOMEM; + } + return nseg; +} +EXPORT_SYMBOL(scsi_dma_map); + +/** + * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map + * @cmd: scsi command + */ +void scsi_dma_unmap(struct scsi_cmnd *cmd) +{ + if (scsi_sg_count(cmd)) { + struct device *dev = cmd->device->host->dma_dev; + + dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), + cmd->sc_data_direction); + } +} +EXPORT_SYMBOL(scsi_dma_unmap); diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c new file mode 100644 index 000000000..b02af340c --- /dev/null +++ b/drivers/scsi/scsi_logging.c @@ -0,0 +1,439 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * scsi_logging.c + * + * Copyright (C) 2014 SUSE Linux Products GmbH + * Copyright (C) 2014 Hannes Reinecke + */ + +#include +#include + +#include +#include +#include +#include +#include + +static char *scsi_log_reserve_buffer(size_t *len) +{ + *len = 128; + return kmalloc(*len, GFP_ATOMIC); +} + +static void scsi_log_release_buffer(char *bufptr) +{ + kfree(bufptr); +} + +static inline const char *scmd_name(const struct scsi_cmnd *scmd) +{ + struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd); + + if (!rq->q || !rq->q->disk) + return NULL; + return rq->q->disk->disk_name; +} + +static size_t sdev_format_header(char *logbuf, size_t logbuf_len, + const char *name, int tag) +{ + size_t off = 0; + + if (name) + off += scnprintf(logbuf + off, logbuf_len - off, + "[%s] ", name); + + if (WARN_ON(off >= logbuf_len)) + return off; + + if (tag >= 0) + off += scnprintf(logbuf + off, logbuf_len - off, + "tag#%d ", tag); + return off; +} + +void sdev_prefix_printk(const char *level, const struct scsi_device *sdev, + const char *name, const char *fmt, ...) +{ + va_list args; + char *logbuf; + size_t off = 0, logbuf_len; + + if (!sdev) + return; + + logbuf = scsi_log_reserve_buffer(&logbuf_len); + if (!logbuf) + return; + + if (name) + off += scnprintf(logbuf + off, logbuf_len - off, + "[%s] ", name); + if (!WARN_ON(off >= logbuf_len)) { + va_start(args, fmt); + off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); + va_end(args); + } + dev_printk(level, &sdev->sdev_gendev, "%s", logbuf); + scsi_log_release_buffer(logbuf); +} +EXPORT_SYMBOL(sdev_prefix_printk); + +void scmd_printk(const char *level, const struct scsi_cmnd *scmd, + const char *fmt, ...) +{ + va_list args; + char *logbuf; + size_t off = 0, logbuf_len; + + if (!scmd) + return; + + logbuf = scsi_log_reserve_buffer(&logbuf_len); + if (!logbuf) + return; + off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd), + scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag); + if (off < logbuf_len) { + va_start(args, fmt); + off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); + va_end(args); + } + dev_printk(level, &scmd->device->sdev_gendev, "%s", logbuf); + scsi_log_release_buffer(logbuf); +} +EXPORT_SYMBOL(scmd_printk); + +static size_t scsi_format_opcode_name(char *buffer, size_t buf_len, + const unsigned char *cdbp) +{ + int sa, cdb0; + const char *cdb_name = NULL, *sa_name = NULL; + size_t off; + + cdb0 = cdbp[0]; + if (cdb0 == VARIABLE_LENGTH_CMD) { + int len = scsi_varlen_cdb_length(cdbp); + + if (len < 10) { + off = scnprintf(buffer, buf_len, + "short variable length command, len=%d", + len); + return off; + } + sa = (cdbp[8] << 8) + cdbp[9]; + } else + sa = cdbp[1] & 0x1f; + + if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) { + if (cdb_name) + off = scnprintf(buffer, buf_len, "%s", cdb_name); + else { + off = scnprintf(buffer, buf_len, "opcode=0x%x", cdb0); + if (WARN_ON(off >= buf_len)) + return off; + if (cdb0 >= VENDOR_SPECIFIC_CDB) + off += scnprintf(buffer + off, buf_len - off, + " (vendor)"); + else if (cdb0 >= 0x60 && cdb0 < 0x7e) + off += scnprintf(buffer + off, buf_len - off, + " (reserved)"); + } + } else { + if (sa_name) + off = scnprintf(buffer, buf_len, "%s", sa_name); + else if (cdb_name) + off = scnprintf(buffer, buf_len, "%s, sa=0x%x", + cdb_name, sa); + else + off = scnprintf(buffer, buf_len, + "opcode=0x%x, sa=0x%x", cdb0, sa); + } + WARN_ON(off >= buf_len); + return off; +} + +size_t __scsi_format_command(char *logbuf, size_t logbuf_len, + const unsigned char *cdb, size_t cdb_len) +{ + int len, k; + size_t off; + + off = scsi_format_opcode_name(logbuf, logbuf_len, cdb); + if (off >= logbuf_len) + return off; + len = scsi_command_size(cdb); + if (cdb_len < len) + len = cdb_len; + /* print out all bytes in cdb */ + for (k = 0; k < len; ++k) { + if (off > logbuf_len - 3) + break; + off += scnprintf(logbuf + off, logbuf_len - off, + " %02x", cdb[k]); + } + return off; +} +EXPORT_SYMBOL(__scsi_format_command); + +void scsi_print_command(struct scsi_cmnd *cmd) +{ + int k; + char *logbuf; + size_t off, logbuf_len; + + logbuf = scsi_log_reserve_buffer(&logbuf_len); + if (!logbuf) + return; + + off = sdev_format_header(logbuf, logbuf_len, + scmd_name(cmd), scsi_cmd_to_rq(cmd)->tag); + if (off >= logbuf_len) + goto out_printk; + off += scnprintf(logbuf + off, logbuf_len - off, "CDB: "); + if (WARN_ON(off >= logbuf_len)) + goto out_printk; + + off += scsi_format_opcode_name(logbuf + off, logbuf_len - off, + cmd->cmnd); + if (off >= logbuf_len) + goto out_printk; + + /* print out all bytes in cdb */ + if (cmd->cmd_len > 16) { + /* Print opcode in one line and use separate lines for CDB */ + off += scnprintf(logbuf + off, logbuf_len - off, "\n"); + dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); + for (k = 0; k < cmd->cmd_len; k += 16) { + size_t linelen = min(cmd->cmd_len - k, 16); + + off = sdev_format_header(logbuf, logbuf_len, + scmd_name(cmd), + scsi_cmd_to_rq(cmd)->tag); + if (!WARN_ON(off > logbuf_len - 58)) { + off += scnprintf(logbuf + off, logbuf_len - off, + "CDB[%02x]: ", k); + hex_dump_to_buffer(&cmd->cmnd[k], linelen, + 16, 1, logbuf + off, + logbuf_len - off, false); + } + dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", + logbuf); + } + goto out; + } + if (!WARN_ON(off > logbuf_len - 49)) { + off += scnprintf(logbuf + off, logbuf_len - off, " "); + hex_dump_to_buffer(cmd->cmnd, cmd->cmd_len, 16, 1, + logbuf + off, logbuf_len - off, + false); + } +out_printk: + dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); +out: + scsi_log_release_buffer(logbuf); +} +EXPORT_SYMBOL(scsi_print_command); + +static size_t +scsi_format_extd_sense(char *buffer, size_t buf_len, + unsigned char asc, unsigned char ascq) +{ + size_t off = 0; + const char *extd_sense_fmt = NULL; + const char *extd_sense_str = scsi_extd_sense_format(asc, ascq, + &extd_sense_fmt); + + if (extd_sense_str) { + off = scnprintf(buffer, buf_len, "Add. Sense: %s", + extd_sense_str); + if (extd_sense_fmt) + off += scnprintf(buffer + off, buf_len - off, + "(%s%x)", extd_sense_fmt, ascq); + } else { + if (asc >= 0x80) + off = scnprintf(buffer, buf_len, "<>"); + off += scnprintf(buffer + off, buf_len - off, + "ASC=0x%x ", asc); + if (ascq >= 0x80) + off += scnprintf(buffer + off, buf_len - off, + "<>"); + off += scnprintf(buffer + off, buf_len - off, + "ASCQ=0x%x ", ascq); + } + return off; +} + +static size_t +scsi_format_sense_hdr(char *buffer, size_t buf_len, + const struct scsi_sense_hdr *sshdr) +{ + const char *sense_txt; + size_t off; + + off = scnprintf(buffer, buf_len, "Sense Key : "); + sense_txt = scsi_sense_key_string(sshdr->sense_key); + if (sense_txt) + off += scnprintf(buffer + off, buf_len - off, + "%s ", sense_txt); + else + off += scnprintf(buffer + off, buf_len - off, + "0x%x ", sshdr->sense_key); + off += scnprintf(buffer + off, buf_len - off, + scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] "); + + if (sshdr->response_code >= 0x72) + off += scnprintf(buffer + off, buf_len - off, "[descriptor] "); + return off; +} + +static void +scsi_log_dump_sense(const struct scsi_device *sdev, const char *name, int tag, + const unsigned char *sense_buffer, int sense_len) +{ + char *logbuf; + size_t logbuf_len; + int i; + + logbuf = scsi_log_reserve_buffer(&logbuf_len); + if (!logbuf) + return; + + for (i = 0; i < sense_len; i += 16) { + int len = min(sense_len - i, 16); + size_t off; + + off = sdev_format_header(logbuf, logbuf_len, + name, tag); + hex_dump_to_buffer(&sense_buffer[i], len, 16, 1, + logbuf + off, logbuf_len - off, + false); + dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); + } + scsi_log_release_buffer(logbuf); +} + +static void +scsi_log_print_sense_hdr(const struct scsi_device *sdev, const char *name, + int tag, const struct scsi_sense_hdr *sshdr) +{ + char *logbuf; + size_t off, logbuf_len; + + logbuf = scsi_log_reserve_buffer(&logbuf_len); + if (!logbuf) + return; + off = sdev_format_header(logbuf, logbuf_len, name, tag); + off += scsi_format_sense_hdr(logbuf + off, logbuf_len - off, sshdr); + dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); + scsi_log_release_buffer(logbuf); + + logbuf = scsi_log_reserve_buffer(&logbuf_len); + if (!logbuf) + return; + off = sdev_format_header(logbuf, logbuf_len, name, tag); + off += scsi_format_extd_sense(logbuf + off, logbuf_len - off, + sshdr->asc, sshdr->ascq); + dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); + scsi_log_release_buffer(logbuf); +} + +static void +scsi_log_print_sense(const struct scsi_device *sdev, const char *name, int tag, + const unsigned char *sense_buffer, int sense_len) +{ + struct scsi_sense_hdr sshdr; + + if (scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) + scsi_log_print_sense_hdr(sdev, name, tag, &sshdr); + else + scsi_log_dump_sense(sdev, name, tag, sense_buffer, sense_len); +} + +/* + * Print normalized SCSI sense header with a prefix. + */ +void +scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name, + const struct scsi_sense_hdr *sshdr) +{ + scsi_log_print_sense_hdr(sdev, name, -1, sshdr); +} +EXPORT_SYMBOL(scsi_print_sense_hdr); + +/* Normalize and print sense buffer with name prefix */ +void __scsi_print_sense(const struct scsi_device *sdev, const char *name, + const unsigned char *sense_buffer, int sense_len) +{ + scsi_log_print_sense(sdev, name, -1, sense_buffer, sense_len); +} +EXPORT_SYMBOL(__scsi_print_sense); + +/* Normalize and print sense buffer in SCSI command */ +void scsi_print_sense(const struct scsi_cmnd *cmd) +{ + scsi_log_print_sense(cmd->device, scmd_name(cmd), + scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag, + cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); +} +EXPORT_SYMBOL(scsi_print_sense); + +void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, + int disposition) +{ + char *logbuf; + size_t off, logbuf_len; + const char *mlret_string = scsi_mlreturn_string(disposition); + const char *hb_string = scsi_hostbyte_string(cmd->result); + unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ; + + logbuf = scsi_log_reserve_buffer(&logbuf_len); + if (!logbuf) + return; + + off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd), + scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag); + + if (off >= logbuf_len) + goto out_printk; + + if (msg) { + off += scnprintf(logbuf + off, logbuf_len - off, + "%s: ", msg); + if (WARN_ON(off >= logbuf_len)) + goto out_printk; + } + if (mlret_string) + off += scnprintf(logbuf + off, logbuf_len - off, + "%s ", mlret_string); + else + off += scnprintf(logbuf + off, logbuf_len - off, + "UNKNOWN(0x%02x) ", disposition); + if (WARN_ON(off >= logbuf_len)) + goto out_printk; + + off += scnprintf(logbuf + off, logbuf_len - off, "Result: "); + if (WARN_ON(off >= logbuf_len)) + goto out_printk; + + if (hb_string) + off += scnprintf(logbuf + off, logbuf_len - off, + "hostbyte=%s ", hb_string); + else + off += scnprintf(logbuf + off, logbuf_len - off, + "hostbyte=0x%02x ", host_byte(cmd->result)); + if (WARN_ON(off >= logbuf_len)) + goto out_printk; + + off += scnprintf(logbuf + off, logbuf_len - off, + "driverbyte=DRIVER_OK "); + + off += scnprintf(logbuf + off, logbuf_len - off, + "cmd_age=%lus", cmd_age); + +out_printk: + dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); + scsi_log_release_buffer(logbuf); +} +EXPORT_SYMBOL(scsi_print_result); diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h new file mode 100644 index 000000000..3df877886 --- /dev/null +++ b/drivers/scsi/scsi_logging.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SCSI_LOGGING_H +#define _SCSI_LOGGING_H + + +/* + * This defines the scsi logging feature. It is a means by which the user can + * select how much information they get about various goings on, and it can be + * really useful for fault tracing. The logging word is divided into 10 3-bit + * bitfields, each of which describes a loglevel. The division of things is + * somewhat arbitrary, and the division of the word could be changed if it + * were really needed for any reason. The numbers below are the only place + * where these are specified. For a first go-around, 3 bits is more than + * enough, since this gives 8 levels of logging (really 7, since 0 is always + * off). Cutting to 2 bits might be wise at some point. + */ + +#define SCSI_LOG_ERROR_SHIFT 0 +#define SCSI_LOG_TIMEOUT_SHIFT 3 +#define SCSI_LOG_SCAN_SHIFT 6 +#define SCSI_LOG_MLQUEUE_SHIFT 9 +#define SCSI_LOG_MLCOMPLETE_SHIFT 12 +#define SCSI_LOG_LLQUEUE_SHIFT 15 +#define SCSI_LOG_LLCOMPLETE_SHIFT 18 +#define SCSI_LOG_HLQUEUE_SHIFT 21 +#define SCSI_LOG_HLCOMPLETE_SHIFT 24 +#define SCSI_LOG_IOCTL_SHIFT 27 + +#define SCSI_LOG_ERROR_BITS 3 +#define SCSI_LOG_TIMEOUT_BITS 3 +#define SCSI_LOG_SCAN_BITS 3 +#define SCSI_LOG_MLQUEUE_BITS 3 +#define SCSI_LOG_MLCOMPLETE_BITS 3 +#define SCSI_LOG_LLQUEUE_BITS 3 +#define SCSI_LOG_LLCOMPLETE_BITS 3 +#define SCSI_LOG_HLQUEUE_BITS 3 +#define SCSI_LOG_HLCOMPLETE_BITS 3 +#define SCSI_LOG_IOCTL_BITS 3 + +extern unsigned int scsi_logging_level; + +#ifdef CONFIG_SCSI_LOGGING + +#define SCSI_LOG_LEVEL(SHIFT, BITS) \ + ((scsi_logging_level >> (SHIFT)) & ((1 << (BITS)) - 1)) + +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) \ +do { \ + if (unlikely((SCSI_LOG_LEVEL(SHIFT, BITS)) > (LEVEL))) \ + do { \ + CMD; \ + } while (0); \ +} while (0) +#else +#define SCSI_LOG_LEVEL(SHIFT, BITS) 0 +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do { } while (0) +#endif /* CONFIG_SCSI_LOGGING */ + +/* + * These are the macros that are actually used throughout the code to + * log events. If logging isn't enabled, they are no-ops and will be + * completely absent from the user's code. + */ +#define SCSI_LOG_ERROR_RECOVERY(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_ERROR_SHIFT, SCSI_LOG_ERROR_BITS, LEVEL,CMD); +#define SCSI_LOG_TIMEOUT(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_TIMEOUT_SHIFT, SCSI_LOG_TIMEOUT_BITS, LEVEL,CMD); +#define SCSI_LOG_SCAN_BUS(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_SCAN_SHIFT, SCSI_LOG_SCAN_BITS, LEVEL,CMD); +#define SCSI_LOG_MLQUEUE(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_MLQUEUE_SHIFT, SCSI_LOG_MLQUEUE_BITS, LEVEL,CMD); +#define SCSI_LOG_MLCOMPLETE(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_MLCOMPLETE_SHIFT, SCSI_LOG_MLCOMPLETE_BITS, LEVEL,CMD); +#define SCSI_LOG_LLQUEUE(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_LLQUEUE_SHIFT, SCSI_LOG_LLQUEUE_BITS, LEVEL,CMD); +#define SCSI_LOG_LLCOMPLETE(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_LLCOMPLETE_SHIFT, SCSI_LOG_LLCOMPLETE_BITS, LEVEL,CMD); +#define SCSI_LOG_HLQUEUE(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_HLQUEUE_SHIFT, SCSI_LOG_HLQUEUE_BITS, LEVEL,CMD); +#define SCSI_LOG_HLCOMPLETE(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_HLCOMPLETE_SHIFT, SCSI_LOG_HLCOMPLETE_BITS, LEVEL,CMD); +#define SCSI_LOG_IOCTL(LEVEL,CMD) \ + SCSI_CHECK_LOGGING(SCSI_LOG_IOCTL_SHIFT, SCSI_LOG_IOCTL_BITS, LEVEL,CMD); + +#endif /* _SCSI_LOGGING_H */ diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c new file mode 100644 index 000000000..d7f76fd84 --- /dev/null +++ b/drivers/scsi/scsi_netlink.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * scsi_netlink.c - SCSI Transport Netlink Interface + * + * Copyright (C) 2006 James Smart, Emulex Corporation + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "scsi_priv.h" + +struct sock *scsi_nl_sock = NULL; +EXPORT_SYMBOL_GPL(scsi_nl_sock); + +/** + * scsi_nl_rcv_msg - Receive message handler. + * @skb: socket receive buffer + * + * Description: Extracts message from a receive buffer. + * Validates message header and calls appropriate transport message handler + * + * + **/ +static void +scsi_nl_rcv_msg(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + struct scsi_nl_hdr *hdr; + u32 rlen; + int err, tport; + + while (skb->len >= NLMSG_HDRLEN) { + err = 0; + + nlh = nlmsg_hdr(skb); + if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || + (skb->len < nlh->nlmsg_len)) { + printk(KERN_WARNING "%s: discarding partial skb\n", + __func__); + return; + } + + rlen = NLMSG_ALIGN(nlh->nlmsg_len); + if (rlen > skb->len) + rlen = skb->len; + + if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { + err = -EBADMSG; + goto next_msg; + } + + hdr = nlmsg_data(nlh); + if ((hdr->version != SCSI_NL_VERSION) || + (hdr->magic != SCSI_NL_MAGIC)) { + err = -EPROTOTYPE; + goto next_msg; + } + + if (!netlink_capable(skb, CAP_SYS_ADMIN)) { + err = -EPERM; + goto next_msg; + } + + if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { + printk(KERN_WARNING "%s: discarding partial message\n", + __func__); + goto next_msg; + } + + /* + * Deliver message to the appropriate transport + */ + tport = hdr->transport; + if (tport == SCSI_NL_TRANSPORT) { + switch (hdr->msgtype) { + case SCSI_NL_SHOST_VENDOR: + /* Locate the driver that corresponds to the message */ + err = -ESRCH; + break; + default: + err = -EBADR; + break; + } + if (err) + printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n", + __func__, hdr->msgtype, err); + } + else + err = -ENOENT; + +next_msg: + if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) + netlink_ack(skb, nlh, err, NULL); + + skb_pull(skb, rlen); + } +} + +/** + * scsi_netlink_init - Called by SCSI subsystem to initialize + * the SCSI transport netlink interface + * + **/ +void +scsi_netlink_init(void) +{ + struct netlink_kernel_cfg cfg = { + .input = scsi_nl_rcv_msg, + .groups = SCSI_NL_GRP_CNT, + }; + + scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, + &cfg); + if (!scsi_nl_sock) { + printk(KERN_ERR "%s: register of receive handler failed\n", + __func__); + return; + } + + return; +} + + +/** + * scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface + * + **/ +void +scsi_netlink_exit(void) +{ + if (scsi_nl_sock) { + netlink_kernel_release(scsi_nl_sock); + } + + return; +} + diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c new file mode 100644 index 000000000..d581613d8 --- /dev/null +++ b/drivers/scsi/scsi_pm.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * scsi_pm.c Copyright (C) 2010 Alan Stern + * + * SCSI dynamic Power Management + * Initial version: Alan Stern + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "scsi_priv.h" + +#ifdef CONFIG_PM_SLEEP + +static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->suspend ? pm->suspend(dev) : 0; +} + +static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->freeze ? pm->freeze(dev) : 0; +} + +static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->poweroff ? pm->poweroff(dev) : 0; +} + +static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->resume ? pm->resume(dev) : 0; +} + +static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->thaw ? pm->thaw(dev) : 0; +} + +static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->restore ? pm->restore(dev) : 0; +} + +static int scsi_dev_type_suspend(struct device *dev, + int (*cb)(struct device *, const struct dev_pm_ops *)) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int err; + + err = scsi_device_quiesce(to_scsi_device(dev)); + if (err == 0) { + err = cb(dev, pm); + if (err) + scsi_device_resume(to_scsi_device(dev)); + } + dev_dbg(dev, "scsi suspend: %d\n", err); + return err; +} + +static int +scsi_bus_suspend_common(struct device *dev, + int (*cb)(struct device *, const struct dev_pm_ops *)) +{ + if (!scsi_is_sdev_device(dev)) + return 0; + + return scsi_dev_type_suspend(dev, cb); +} + +static int scsi_bus_resume_common(struct device *dev, + int (*cb)(struct device *, const struct dev_pm_ops *)) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int err; + + if (!scsi_is_sdev_device(dev)) + return 0; + + err = cb(dev, pm); + scsi_device_resume(to_scsi_device(dev)); + dev_dbg(dev, "scsi resume: %d\n", err); + + return err; +} + +static int scsi_bus_prepare(struct device *dev) +{ + if (scsi_is_host_device(dev)) { + /* Wait until async scanning is finished */ + scsi_complete_async_scans(); + } + return 0; +} + +static int scsi_bus_suspend(struct device *dev) +{ + return scsi_bus_suspend_common(dev, do_scsi_suspend); +} + +static int scsi_bus_resume(struct device *dev) +{ + return scsi_bus_resume_common(dev, do_scsi_resume); +} + +static int scsi_bus_freeze(struct device *dev) +{ + return scsi_bus_suspend_common(dev, do_scsi_freeze); +} + +static int scsi_bus_thaw(struct device *dev) +{ + return scsi_bus_resume_common(dev, do_scsi_thaw); +} + +static int scsi_bus_poweroff(struct device *dev) +{ + return scsi_bus_suspend_common(dev, do_scsi_poweroff); +} + +static int scsi_bus_restore(struct device *dev) +{ + return scsi_bus_resume_common(dev, do_scsi_restore); +} + +#else /* CONFIG_PM_SLEEP */ + +#define scsi_bus_prepare NULL +#define scsi_bus_suspend NULL +#define scsi_bus_resume NULL +#define scsi_bus_freeze NULL +#define scsi_bus_thaw NULL +#define scsi_bus_poweroff NULL +#define scsi_bus_restore NULL + +#endif /* CONFIG_PM_SLEEP */ + +static int sdev_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + struct scsi_device *sdev = to_scsi_device(dev); + int err = 0; + + err = blk_pre_runtime_suspend(sdev->request_queue); + if (err) + return err; + if (pm && pm->runtime_suspend) + err = pm->runtime_suspend(dev); + blk_post_runtime_suspend(sdev->request_queue, err); + + return err; +} + +static int scsi_runtime_suspend(struct device *dev) +{ + int err = 0; + + dev_dbg(dev, "scsi_runtime_suspend\n"); + if (scsi_is_sdev_device(dev)) + err = sdev_runtime_suspend(dev); + + /* Insert hooks here for targets, hosts, and transport classes */ + + return err; +} + +static int sdev_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int err = 0; + + blk_pre_runtime_resume(sdev->request_queue); + if (pm && pm->runtime_resume) + err = pm->runtime_resume(dev); + blk_post_runtime_resume(sdev->request_queue); + + return err; +} + +static int scsi_runtime_resume(struct device *dev) +{ + int err = 0; + + dev_dbg(dev, "scsi_runtime_resume\n"); + if (scsi_is_sdev_device(dev)) + err = sdev_runtime_resume(dev); + + /* Insert hooks here for targets, hosts, and transport classes */ + + return err; +} + +static int scsi_runtime_idle(struct device *dev) +{ + dev_dbg(dev, "scsi_runtime_idle\n"); + + /* Insert hooks here for targets, hosts, and transport classes */ + + if (scsi_is_sdev_device(dev)) { + pm_runtime_mark_last_busy(dev); + pm_runtime_autosuspend(dev); + return -EBUSY; + } + + return 0; +} + +int scsi_autopm_get_device(struct scsi_device *sdev) +{ + int err; + + err = pm_runtime_get_sync(&sdev->sdev_gendev); + if (err < 0 && err !=-EACCES) + pm_runtime_put_sync(&sdev->sdev_gendev); + else + err = 0; + return err; +} +EXPORT_SYMBOL_GPL(scsi_autopm_get_device); + +void scsi_autopm_put_device(struct scsi_device *sdev) +{ + pm_runtime_put_sync(&sdev->sdev_gendev); +} +EXPORT_SYMBOL_GPL(scsi_autopm_put_device); + +void scsi_autopm_get_target(struct scsi_target *starget) +{ + pm_runtime_get_sync(&starget->dev); +} + +void scsi_autopm_put_target(struct scsi_target *starget) +{ + pm_runtime_put_sync(&starget->dev); +} + +int scsi_autopm_get_host(struct Scsi_Host *shost) +{ + int err; + + err = pm_runtime_get_sync(&shost->shost_gendev); + if (err < 0 && err !=-EACCES) + pm_runtime_put_sync(&shost->shost_gendev); + else + err = 0; + return err; +} + +void scsi_autopm_put_host(struct Scsi_Host *shost) +{ + pm_runtime_put_sync(&shost->shost_gendev); +} + +const struct dev_pm_ops scsi_bus_pm_ops = { + .prepare = scsi_bus_prepare, + .suspend = scsi_bus_suspend, + .resume = scsi_bus_resume, + .freeze = scsi_bus_freeze, + .thaw = scsi_bus_thaw, + .poweroff = scsi_bus_poweroff, + .restore = scsi_bus_restore, + .runtime_suspend = scsi_runtime_suspend, + .runtime_resume = scsi_runtime_resume, + .runtime_idle = scsi_runtime_idle, +}; diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h new file mode 100644 index 000000000..3f0dfb97d --- /dev/null +++ b/drivers/scsi/scsi_priv.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SCSI_PRIV_H +#define _SCSI_PRIV_H + +#include +#include +#include + +struct bsg_device; +struct request_queue; +struct request; +struct scsi_cmnd; +struct scsi_device; +struct scsi_target; +struct scsi_host_template; +struct Scsi_Host; +struct scsi_nl_hdr; + +#define SCSI_CMD_RETRIES_NO_LIMIT -1 + +/* + * Error codes used by scsi-ml internally. These must not be used by drivers. + */ +enum scsi_ml_status { + SCSIML_STAT_OK = 0x00, + SCSIML_STAT_RESV_CONFLICT = 0x01, /* Reservation conflict */ + SCSIML_STAT_NOSPC = 0x02, /* Space allocation on the dev failed */ + SCSIML_STAT_MED_ERROR = 0x03, /* Medium error */ + SCSIML_STAT_TGT_FAILURE = 0x04, /* Permanent target failure */ + SCSIML_STAT_DL_TIMEOUT = 0x05, /* Command Duration Limit timeout */ +}; + +static inline u8 scsi_ml_byte(int result) +{ + return (result >> 8) & 0xff; +} + +/* + * Scsi Error Handler Flags + */ +#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */ + +#define SCSI_SENSE_VALID(scmd) \ + (((scmd)->sense_buffer[0] & 0x70) == 0x70) + +/* hosts.c */ +extern int scsi_init_hosts(void); +extern void scsi_exit_hosts(void); + +/* scsi.c */ +int scsi_init_sense_cache(struct Scsi_Host *shost); +void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd); +#ifdef CONFIG_SCSI_LOGGING +void scsi_log_send(struct scsi_cmnd *cmd); +void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); +#else +static inline void scsi_log_send(struct scsi_cmnd *cmd) + { }; +static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) + { }; +#endif + +/* scsi_devinfo.c */ + +/* list of keys for the lists */ +enum scsi_devinfo_key { + SCSI_DEVINFO_GLOBAL = 0, + SCSI_DEVINFO_SPI, +}; + +extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev, + const unsigned char *vendor, + const unsigned char *model); +extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev, + const unsigned char *vendor, + const unsigned char *model, + enum scsi_devinfo_key key); +extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, + char *model, char *strflags, + blist_flags_t flags, + enum scsi_devinfo_key key); +extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, + enum scsi_devinfo_key key); +extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name); +extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key); + +extern int __init scsi_init_devinfo(void); +extern void scsi_exit_devinfo(void); + +/* scsi_error.c */ +extern void scmd_eh_abort_handler(struct work_struct *work); +extern enum blk_eh_timer_return scsi_timeout(struct request *req); +extern int scsi_error_handler(void *host); +extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd); +extern void scsi_eh_wakeup(struct Scsi_Host *shost); +extern void scsi_eh_scmd_add(struct scsi_cmnd *); +void scsi_eh_ready_devs(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q); +int scsi_eh_get_sense(struct list_head *work_q, + struct list_head *done_q); +bool scsi_noretry_cmd(struct scsi_cmnd *scmd); +void scsi_eh_done(struct scsi_cmnd *scmd); + +/* scsi_lib.c */ +extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd); +extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); +extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); +extern void scsi_run_host_queues(struct Scsi_Host *shost); +extern void scsi_requeue_run_queue(struct work_struct *work); +extern void scsi_start_queue(struct scsi_device *sdev); +extern int scsi_mq_setup_tags(struct Scsi_Host *shost); +extern void scsi_mq_free_tags(struct kref *kref); +extern void scsi_exit_queue(void); +extern void scsi_evt_thread(struct work_struct *work); + +/* scsi_proc.c */ +#ifdef CONFIG_SCSI_PROC_FS +extern int scsi_proc_hostdir_add(const struct scsi_host_template *); +extern void scsi_proc_hostdir_rm(const struct scsi_host_template *); +extern void scsi_proc_host_add(struct Scsi_Host *); +extern void scsi_proc_host_rm(struct Scsi_Host *); +extern int scsi_init_procfs(void); +extern void scsi_exit_procfs(void); +#else +# define scsi_proc_hostdir_add(sht) 0 +# define scsi_proc_hostdir_rm(sht) do { } while (0) +# define scsi_proc_host_add(shost) do { } while (0) +# define scsi_proc_host_rm(shost) do { } while (0) +# define scsi_init_procfs() (0) +# define scsi_exit_procfs() do { } while (0) +#endif /* CONFIG_PROC_FS */ + +/* scsi_scan.c */ +void scsi_enable_async_suspend(struct device *dev); +extern int scsi_complete_async_scans(void); +extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, + unsigned int, u64, enum scsi_scan_mode); +extern void scsi_forget_host(struct Scsi_Host *); + +/* scsi_sysctl.c */ +#ifdef CONFIG_SYSCTL +extern int scsi_init_sysctl(void); +extern void scsi_exit_sysctl(void); +#else +# define scsi_init_sysctl() (0) +# define scsi_exit_sysctl() do { } while (0) +#endif /* CONFIG_SYSCTL */ + +/* scsi_sysfs.c */ +extern int scsi_sysfs_add_sdev(struct scsi_device *); +extern int scsi_sysfs_add_host(struct Scsi_Host *); +extern int scsi_sysfs_register(void); +extern void scsi_sysfs_unregister(void); +extern void scsi_sysfs_device_initialize(struct scsi_device *); +extern struct scsi_transport_template blank_transport_template; +extern void __scsi_remove_device(struct scsi_device *); + +extern struct bus_type scsi_bus_type; +extern const struct attribute_group *scsi_shost_groups[]; + +/* scsi_netlink.c */ +#ifdef CONFIG_SCSI_NETLINK +extern void scsi_netlink_init(void); +extern void scsi_netlink_exit(void); +extern struct sock *scsi_nl_sock; +#else +static inline void scsi_netlink_init(void) {} +static inline void scsi_netlink_exit(void) {} +#endif + +/* scsi_pm.c */ +#ifdef CONFIG_PM +extern const struct dev_pm_ops scsi_bus_pm_ops; + +extern void scsi_autopm_get_target(struct scsi_target *); +extern void scsi_autopm_put_target(struct scsi_target *); +extern int scsi_autopm_get_host(struct Scsi_Host *); +extern void scsi_autopm_put_host(struct Scsi_Host *); +#else +static inline void scsi_autopm_get_target(struct scsi_target *t) {} +static inline void scsi_autopm_put_target(struct scsi_target *t) {} +static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } +static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} +#endif /* CONFIG_PM */ + +/* scsi_dh.c */ +#ifdef CONFIG_SCSI_DH +void scsi_dh_add_device(struct scsi_device *sdev); +void scsi_dh_release_device(struct scsi_device *sdev); +#else +static inline void scsi_dh_add_device(struct scsi_device *sdev) { } +static inline void scsi_dh_release_device(struct scsi_device *sdev) { } +#endif + +struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev); + +extern int scsi_device_max_queue_depth(struct scsi_device *sdev); + +/* + * internal scsi timeout functions: for use by mid-layer and transport + * classes. + */ + +#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */ + +#endif /* _SCSI_PRIV_H */ diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c new file mode 100644 index 000000000..41f23cd0b --- /dev/null +++ b/drivers/scsi/scsi_proc.c @@ -0,0 +1,576 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/drivers/scsi/scsi_proc.c + * + * The functions in this file provide an interface between + * the PROC file system and the SCSI device drivers + * It is mainly used for debugging, statistics and to pass + * information directly to the lowlevel driver. + * + * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de + * Version: 0.99.8 last change: 95/09/13 + * + * generic command parser provided by: + * Andreas Heilwagen + * + * generic_proc_info() support of xxxx_info() by: + * Michael A. Griffith + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "scsi_priv.h" +#include "scsi_logging.h" + + +/* 4K page size, but our output routines, use some slack for overruns */ +#define PROC_BLOCK_SIZE (3*1024) + +static struct proc_dir_entry *proc_scsi; + +/* Protects scsi_proc_list */ +static DEFINE_MUTEX(global_host_template_mutex); +static LIST_HEAD(scsi_proc_list); + +/** + * struct scsi_proc_entry - (host template, SCSI proc dir) association + * @entry: entry in scsi_proc_list. + * @sht: SCSI host template associated with the procfs directory. + * @proc_dir: procfs directory associated with the SCSI host template. + * @present: Number of SCSI hosts instantiated for @sht. + */ +struct scsi_proc_entry { + struct list_head entry; + const struct scsi_host_template *sht; + struct proc_dir_entry *proc_dir; + unsigned int present; +}; + +static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct Scsi_Host *shost = pde_data(file_inode(file)); + ssize_t ret = -ENOMEM; + char *page; + + if (count > PROC_BLOCK_SIZE) + return -EOVERFLOW; + + if (!shost->hostt->write_info) + return -EINVAL; + + page = (char *)__get_free_page(GFP_KERNEL); + if (page) { + ret = -EFAULT; + if (copy_from_user(page, buf, count)) + goto out; + ret = shost->hostt->write_info(shost, page, count); + } +out: + free_page((unsigned long)page); + return ret; +} + +static int proc_scsi_show(struct seq_file *m, void *v) +{ + struct Scsi_Host *shost = m->private; + return shost->hostt->show_info(m, shost); +} + +static int proc_scsi_host_open(struct inode *inode, struct file *file) +{ + return single_open_size(file, proc_scsi_show, pde_data(inode), + 4 * PAGE_SIZE); +} + +static struct scsi_proc_entry * +__scsi_lookup_proc_entry(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + + lockdep_assert_held(&global_host_template_mutex); + + list_for_each_entry(e, &scsi_proc_list, entry) + if (e->sht == sht) + return e; + + return NULL; +} + +static struct scsi_proc_entry * +scsi_lookup_proc_entry(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + + mutex_lock(&global_host_template_mutex); + e = __scsi_lookup_proc_entry(sht); + mutex_unlock(&global_host_template_mutex); + + return e; +} + +/** + * scsi_template_proc_dir() - returns the procfs dir for a SCSI host template + * @sht: SCSI host template pointer. + */ +struct proc_dir_entry * +scsi_template_proc_dir(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e = scsi_lookup_proc_entry(sht); + + return e ? e->proc_dir : NULL; +} +EXPORT_SYMBOL_GPL(scsi_template_proc_dir); + +static const struct proc_ops proc_scsi_ops = { + .proc_open = proc_scsi_host_open, + .proc_release = single_release, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = proc_scsi_host_write +}; + +/** + * scsi_proc_hostdir_add - Create directory in /proc for a scsi host + * @sht: owner of this directory + * + * Sets sht->proc_dir to the new directory. + */ +int scsi_proc_hostdir_add(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + int ret; + + if (!sht->show_info) + return 0; + + mutex_lock(&global_host_template_mutex); + e = __scsi_lookup_proc_entry(sht); + if (!e) { + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + ret = -ENOMEM; + goto unlock; + } + } + if (e->present++) + goto success; + e->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); + if (!e->proc_dir) { + printk(KERN_ERR "%s: proc_mkdir failed for %s\n", __func__, + sht->proc_name); + ret = -ENOMEM; + goto unlock; + } + e->sht = sht; + list_add_tail(&e->entry, &scsi_proc_list); +success: + e = NULL; + ret = 0; +unlock: + mutex_unlock(&global_host_template_mutex); + + kfree(e); + return ret; +} + +/** + * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host + * @sht: owner of directory + */ +void scsi_proc_hostdir_rm(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + + if (!sht->show_info) + return; + + mutex_lock(&global_host_template_mutex); + e = __scsi_lookup_proc_entry(sht); + if (e && !--e->present) { + remove_proc_entry(sht->proc_name, proc_scsi); + list_del(&e->entry); + kfree(e); + } + mutex_unlock(&global_host_template_mutex); +} + + +/** + * scsi_proc_host_add - Add entry for this host to appropriate /proc dir + * @shost: host to add + */ +void scsi_proc_host_add(struct Scsi_Host *shost) +{ + const struct scsi_host_template *sht = shost->hostt; + struct scsi_proc_entry *e; + struct proc_dir_entry *p; + char name[10]; + + if (!sht->show_info) + return; + + e = scsi_lookup_proc_entry(sht); + if (!e) + goto err; + + sprintf(name,"%d", shost->host_no); + p = proc_create_data(name, S_IRUGO | S_IWUSR, e->proc_dir, + &proc_scsi_ops, shost); + if (!p) + goto err; + return; + +err: + shost_printk(KERN_ERR, shost, + "%s: Failed to register host (%s failed)\n", __func__, + e ? "proc_create_data()" : "scsi_proc_hostdir_add()"); +} + +/** + * scsi_proc_host_rm - remove this host's entry from /proc + * @shost: which host + */ +void scsi_proc_host_rm(struct Scsi_Host *shost) +{ + const struct scsi_host_template *sht = shost->hostt; + struct scsi_proc_entry *e; + char name[10]; + + if (!sht->show_info) + return; + + e = scsi_lookup_proc_entry(sht); + if (!e) + return; + + sprintf(name,"%d", shost->host_no); + remove_proc_entry(name, e->proc_dir); +} +/** + * proc_print_scsidevice - return data about this host + * @dev: A scsi device + * @data: &struct seq_file to output to. + * + * Description: prints Host, Channel, Id, Lun, Vendor, Model, Rev, Type, + * and revision. + */ +static int proc_print_scsidevice(struct device *dev, void *data) +{ + struct scsi_device *sdev; + struct seq_file *s = data; + int i; + + if (!scsi_is_sdev_device(dev)) + goto out; + + sdev = to_scsi_device(dev); + seq_printf(s, + "Host: scsi%d Channel: %02d Id: %02d Lun: %02llu\n Vendor: ", + sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); + for (i = 0; i < 8; i++) { + if (sdev->vendor[i] >= 0x20) + seq_putc(s, sdev->vendor[i]); + else + seq_putc(s, ' '); + } + + seq_puts(s, " Model: "); + for (i = 0; i < 16; i++) { + if (sdev->model[i] >= 0x20) + seq_putc(s, sdev->model[i]); + else + seq_putc(s, ' '); + } + + seq_puts(s, " Rev: "); + for (i = 0; i < 4; i++) { + if (sdev->rev[i] >= 0x20) + seq_putc(s, sdev->rev[i]); + else + seq_putc(s, ' '); + } + + seq_putc(s, '\n'); + + seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); + seq_printf(s, " ANSI SCSI revision: %02x", + sdev->scsi_level - (sdev->scsi_level > 1)); + if (sdev->scsi_level == 2) + seq_puts(s, " CCS\n"); + else + seq_putc(s, '\n'); + +out: + return 0; +} + +/** + * scsi_add_single_device - Respond to user request to probe for/add device + * @host: user-supplied decimal integer + * @channel: user-supplied decimal integer + * @id: user-supplied decimal integer + * @lun: user-supplied decimal integer + * + * Description: called by writing "scsi add-single-device" to /proc/scsi/scsi. + * + * does scsi_host_lookup() and either user_scan() if that transport + * type supports it, or else scsi_scan_host_selected() + * + * Note: this seems to be aimed exclusively at SCSI parallel busses. + */ + +static int scsi_add_single_device(uint host, uint channel, uint id, uint lun) +{ + struct Scsi_Host *shost; + int error = -ENXIO; + + shost = scsi_host_lookup(host); + if (!shost) + return error; + + if (shost->transportt->user_scan) + error = shost->transportt->user_scan(shost, channel, id, lun); + else + error = scsi_scan_host_selected(shost, channel, id, lun, + SCSI_SCAN_MANUAL); + scsi_host_put(shost); + return error; +} + +/** + * scsi_remove_single_device - Respond to user request to remove a device + * @host: user-supplied decimal integer + * @channel: user-supplied decimal integer + * @id: user-supplied decimal integer + * @lun: user-supplied decimal integer + * + * Description: called by writing "scsi remove-single-device" to + * /proc/scsi/scsi. Does a scsi_device_lookup() and scsi_remove_device() + */ +static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost; + int error = -ENXIO; + + shost = scsi_host_lookup(host); + if (!shost) + return error; + sdev = scsi_device_lookup(shost, channel, id, lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + error = 0; + } + + scsi_host_put(shost); + return error; +} + +/** + * proc_scsi_write - handle writes to /proc/scsi/scsi + * @file: not used + * @buf: buffer to write + * @length: length of buf, at most PAGE_SIZE + * @ppos: not used + * + * Description: this provides a legacy mechanism to add or remove devices by + * Host, Channel, ID, and Lun. To use, + * "echo 'scsi add-single-device 0 1 2 3' > /proc/scsi/scsi" or + * "echo 'scsi remove-single-device 0 1 2 3' > /proc/scsi/scsi" with + * "0 1 2 3" replaced by the Host, Channel, Id, and Lun. + * + * Note: this seems to be aimed at parallel SCSI. Most modern busses (USB, + * SATA, Firewire, Fibre Channel, etc) dynamically assign these values to + * provide a unique identifier and nothing more. + */ + + +static ssize_t proc_scsi_write(struct file *file, const char __user *buf, + size_t length, loff_t *ppos) +{ + int host, channel, id, lun; + char *buffer, *end, *p; + int err; + + if (!buf || length > PAGE_SIZE) + return -EINVAL; + + buffer = (char *)__get_free_page(GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = -EFAULT; + if (copy_from_user(buffer, buf, length)) + goto out; + + err = -EINVAL; + if (length < PAGE_SIZE) { + end = buffer + length; + *end = '\0'; + } else { + end = buffer + PAGE_SIZE - 1; + if (*end) + goto out; + } + + /* + * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi + * with "0 1 2 3" replaced by your "Host Channel Id Lun". + */ + if (!strncmp("scsi add-single-device", buffer, 22)) { + p = buffer + 23; + + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + + err = scsi_add_single_device(host, channel, id, lun); + + /* + * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi + * with "0 1 2 3" replaced by your "Host Channel Id Lun". + */ + } else if (!strncmp("scsi remove-single-device", buffer, 25)) { + p = buffer + 26; + + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + + err = scsi_remove_single_device(host, channel, id, lun); + } + + /* + * convert success returns so that we return the + * number of bytes consumed. + */ + if (!err) + err = length; + + out: + free_page((unsigned long)buffer); + return err; +} + +static inline struct device *next_scsi_device(struct device *start) +{ + struct device *next = bus_find_next_device(&scsi_bus_type, start); + + put_device(start); + return next; +} + +static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos) +{ + struct device *dev = NULL; + loff_t n = *pos; + + while ((dev = next_scsi_device(dev))) { + if (!n--) + break; + sfile->private++; + } + return dev; +} + +static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + (*pos)++; + sfile->private++; + return next_scsi_device(v); +} + +static void scsi_seq_stop(struct seq_file *sfile, void *v) +{ + put_device(v); +} + +static int scsi_seq_show(struct seq_file *sfile, void *dev) +{ + if (!sfile->private) + seq_puts(sfile, "Attached devices:\n"); + + return proc_print_scsidevice(dev, sfile); +} + +static const struct seq_operations scsi_seq_ops = { + .start = scsi_seq_start, + .next = scsi_seq_next, + .stop = scsi_seq_stop, + .show = scsi_seq_show +}; + +/** + * proc_scsi_open - glue function + * @inode: not used + * @file: passed to single_open() + * + * Associates proc_scsi_show with this file + */ +static int proc_scsi_open(struct inode *inode, struct file *file) +{ + /* + * We don't really need this for the write case but it doesn't + * harm either. + */ + return seq_open(file, &scsi_seq_ops); +} + +static const struct proc_ops scsi_scsi_proc_ops = { + .proc_open = proc_scsi_open, + .proc_read = seq_read, + .proc_write = proc_scsi_write, + .proc_lseek = seq_lseek, + .proc_release = seq_release, +}; + +/** + * scsi_init_procfs - create scsi and scsi/scsi in procfs + */ +int __init scsi_init_procfs(void) +{ + struct proc_dir_entry *pde; + + proc_scsi = proc_mkdir("scsi", NULL); + if (!proc_scsi) + goto err1; + + pde = proc_create("scsi/scsi", 0, NULL, &scsi_scsi_proc_ops); + if (!pde) + goto err2; + + return 0; + +err2: + remove_proc_entry("scsi", NULL); +err1: + return -ENOMEM; +} + +/** + * scsi_exit_procfs - Remove scsi/scsi and scsi from procfs + */ +void scsi_exit_procfs(void) +{ + remove_proc_entry("scsi/scsi", NULL); + remove_proc_entry("scsi", NULL); +} diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h new file mode 100644 index 000000000..82fd548c5 --- /dev/null +++ b/drivers/scsi/scsi_sas_internal.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SCSI_SAS_INTERNAL_H +#define _SCSI_SAS_INTERNAL_H + +#define SAS_HOST_ATTRS 0 +#define SAS_PHY_ATTRS 17 +#define SAS_PORT_ATTRS 1 +#define SAS_RPORT_ATTRS 8 +#define SAS_END_DEV_ATTRS 5 +#define SAS_EXPANDER_ATTRS 7 + +struct sas_internal { + struct scsi_transport_template t; + struct sas_function_template *f; + struct sas_domain_function_template *dft; + + struct device_attribute private_host_attrs[SAS_HOST_ATTRS]; + struct device_attribute private_phy_attrs[SAS_PHY_ATTRS]; + struct device_attribute private_port_attrs[SAS_PORT_ATTRS]; + struct device_attribute private_rphy_attrs[SAS_RPORT_ATTRS]; + struct device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS]; + struct device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS]; + + struct transport_container phy_attr_cont; + struct transport_container port_attr_cont; + struct transport_container rphy_attr_cont; + struct transport_container end_dev_attr_cont; + struct transport_container expander_attr_cont; + + /* + * The array of null terminated pointers to attributes + * needed by scsi_sysfs.c + */ + struct device_attribute *host_attrs[SAS_HOST_ATTRS + 1]; + struct device_attribute *phy_attrs[SAS_PHY_ATTRS + 1]; + struct device_attribute *port_attrs[SAS_PORT_ATTRS + 1]; + struct device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1]; + struct device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1]; + struct device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1]; +}; +#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t) + +#endif diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c new file mode 100644 index 000000000..44680f65e --- /dev/null +++ b/drivers/scsi/scsi_scan.c @@ -0,0 +1,2009 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * scsi_scan.c + * + * Copyright (C) 2000 Eric Youngdale, + * Copyright (C) 2002 Patrick Mansfield + * + * The general scanning/probing algorithm is as follows, exceptions are + * made to it depending on device specific flags, compilation options, and + * global variable (boot or module load time) settings. + * + * A specific LUN is scanned via an INQUIRY command; if the LUN has a + * device attached, a scsi_device is allocated and setup for it. + * + * For every id of every channel on the given host: + * + * Scan LUN 0; if the target responds to LUN 0 (even if there is no + * device or storage attached to LUN 0): + * + * If LUN 0 has a device attached, allocate and setup a + * scsi_device for it. + * + * If target is SCSI-3 or up, issue a REPORT LUN, and scan + * all of the LUNs returned by the REPORT LUN; else, + * sequentially scan LUNs up until some maximum is reached, + * or a LUN is seen that cannot have a device attached to it. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scsi_priv.h" +#include "scsi_logging.h" + +#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \ + " SCSI scanning, some SCSI devices might not be configured\n" + +/* + * Default timeout + */ +#define SCSI_TIMEOUT (2*HZ) +#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ) + +/* + * Prefix values for the SCSI id's (stored in sysfs name field) + */ +#define SCSI_UID_SER_NUM 'S' +#define SCSI_UID_UNKNOWN 'Z' + +/* + * Return values of some of the scanning functions. + * + * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this + * includes allocation or general failures preventing IO from being sent. + * + * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available + * on the given LUN. + * + * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a + * given LUN. + */ +#define SCSI_SCAN_NO_RESPONSE 0 +#define SCSI_SCAN_TARGET_PRESENT 1 +#define SCSI_SCAN_LUN_PRESENT 2 + +static const char *scsi_null_device_strs = "nullnullnullnull"; + +#define MAX_SCSI_LUNS 512 + +static u64 max_scsi_luns = MAX_SCSI_LUNS; + +module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(max_luns, + "last scsi LUN (should be between 1 and 2^64-1)"); + +#ifdef CONFIG_SCSI_SCAN_ASYNC +#define SCSI_SCAN_TYPE_DEFAULT "async" +#else +#define SCSI_SCAN_TYPE_DEFAULT "sync" +#endif + +static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; + +module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), + S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(scan, "sync, async, manual, or none. " + "Setting to 'manual' disables automatic scanning, but allows " + "for manual device scan via the 'scan' sysfs attribute."); + +static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; + +module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(inq_timeout, + "Timeout (in seconds) waiting for devices to answer INQUIRY." + " Default is 20. Some devices may need more; most need less."); + +/* This lock protects only this list */ +static DEFINE_SPINLOCK(async_scan_lock); +static LIST_HEAD(scanning_hosts); + +struct async_scan_data { + struct list_head list; + struct Scsi_Host *shost; + struct completion prev_finished; +}; + +/* + * scsi_enable_async_suspend - Enable async suspend and resume + */ +void scsi_enable_async_suspend(struct device *dev) +{ + /* + * If a user has disabled async probing a likely reason is due to a + * storage enclosure that does not inject staggered spin-ups. For + * safety, make resume synchronous as well in that case. + */ + if (strncmp(scsi_scan_type, "async", 5) != 0) + return; + /* Enable asynchronous suspend and resume. */ + device_enable_async_suspend(dev); +} + +/** + * scsi_complete_async_scans - Wait for asynchronous scans to complete + * + * When this function returns, any host which started scanning before + * this function was called will have finished its scan. Hosts which + * started scanning after this function was called may or may not have + * finished. + */ +int scsi_complete_async_scans(void) +{ + struct async_scan_data *data; + + do { + if (list_empty(&scanning_hosts)) + return 0; + /* If we can't get memory immediately, that's OK. Just + * sleep a little. Even if we never get memory, the async + * scans will finish eventually. + */ + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + msleep(1); + } while (!data); + + data->shost = NULL; + init_completion(&data->prev_finished); + + spin_lock(&async_scan_lock); + /* Check that there's still somebody else on the list */ + if (list_empty(&scanning_hosts)) + goto done; + list_add_tail(&data->list, &scanning_hosts); + spin_unlock(&async_scan_lock); + + printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n"); + wait_for_completion(&data->prev_finished); + + spin_lock(&async_scan_lock); + list_del(&data->list); + if (!list_empty(&scanning_hosts)) { + struct async_scan_data *next = list_entry(scanning_hosts.next, + struct async_scan_data, list); + complete(&next->prev_finished); + } + done: + spin_unlock(&async_scan_lock); + + kfree(data); + return 0; +} + +/** + * scsi_unlock_floptical - unlock device via a special MODE SENSE command + * @sdev: scsi device to send command to + * @result: area to store the result of the MODE SENSE + * + * Description: + * Send a vendor specific MODE SENSE (not a MODE SELECT) command. + * Called for BLIST_KEY devices. + **/ +static void scsi_unlock_floptical(struct scsi_device *sdev, + unsigned char *result) +{ + unsigned char scsi_cmd[MAX_COMMAND_SIZE]; + + sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n"); + scsi_cmd[0] = MODE_SENSE; + scsi_cmd[1] = 0; + scsi_cmd[2] = 0x2e; + scsi_cmd[3] = 0; + scsi_cmd[4] = 0x2a; /* size */ + scsi_cmd[5] = 0; + scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a, + SCSI_TIMEOUT, 3, NULL); +} + +static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, + unsigned int depth) +{ + int new_shift = sbitmap_calculate_shift(depth); + bool need_alloc = !sdev->budget_map.map; + bool need_free = false; + int ret; + struct sbitmap sb_backup; + + depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev)); + + /* + * realloc if new shift is calculated, which is caused by setting + * up one new default queue depth after calling ->slave_configure + */ + if (!need_alloc && new_shift != sdev->budget_map.shift) + need_alloc = need_free = true; + + if (!need_alloc) + return 0; + + /* + * Request queue has to be frozen for reallocating budget map, + * and here disk isn't added yet, so freezing is pretty fast + */ + if (need_free) { + blk_mq_freeze_queue(sdev->request_queue); + sb_backup = sdev->budget_map; + } + ret = sbitmap_init_node(&sdev->budget_map, + scsi_device_max_queue_depth(sdev), + new_shift, GFP_KERNEL, + sdev->request_queue->node, false, true); + if (!ret) + sbitmap_resize(&sdev->budget_map, depth); + + if (need_free) { + if (ret) + sdev->budget_map = sb_backup; + else + sbitmap_free(&sb_backup); + ret = 0; + blk_mq_unfreeze_queue(sdev->request_queue); + } + return ret; +} + +/** + * scsi_alloc_sdev - allocate and setup a scsi_Device + * @starget: which target to allocate a &scsi_device for + * @lun: which lun + * @hostdata: usually NULL and set by ->slave_alloc instead + * + * Description: + * Allocate, initialize for io, and return a pointer to a scsi_Device. + * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and + * adds scsi_Device to the appropriate list. + * + * Return value: + * scsi_Device pointer, or NULL on failure. + **/ +static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, + u64 lun, void *hostdata) +{ + unsigned int depth; + struct scsi_device *sdev; + struct request_queue *q; + int display_failure_msg = 1, ret; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + + sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, + GFP_KERNEL); + if (!sdev) + goto out; + + sdev->vendor = scsi_null_device_strs; + sdev->model = scsi_null_device_strs; + sdev->rev = scsi_null_device_strs; + sdev->host = shost; + sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; + sdev->id = starget->id; + sdev->lun = lun; + sdev->channel = starget->channel; + mutex_init(&sdev->state_mutex); + sdev->sdev_state = SDEV_CREATED; + INIT_LIST_HEAD(&sdev->siblings); + INIT_LIST_HEAD(&sdev->same_target_siblings); + INIT_LIST_HEAD(&sdev->starved_entry); + INIT_LIST_HEAD(&sdev->event_list); + spin_lock_init(&sdev->list_lock); + mutex_init(&sdev->inquiry_mutex); + INIT_WORK(&sdev->event_work, scsi_evt_thread); + INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); + + sdev->sdev_gendev.parent = get_device(&starget->dev); + sdev->sdev_target = starget; + + /* usually NULL and set by ->slave_alloc instead */ + sdev->hostdata = hostdata; + + /* if the device needs this changing, it may do so in the + * slave_configure function */ + sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; + + /* + * Some low level driver could use device->type + */ + sdev->type = -1; + + /* + * Assume that the device will have handshaking problems, + * and then fix this field later if it turns out it + * doesn't + */ + sdev->borken = 1; + + sdev->sg_reserved_size = INT_MAX; + + q = blk_mq_init_queue(&sdev->host->tag_set); + if (IS_ERR(q)) { + /* release fn is set up in scsi_sysfs_device_initialise, so + * have to free and put manually here */ + put_device(&starget->dev); + kfree(sdev); + goto out; + } + kref_get(&sdev->host->tagset_refcnt); + sdev->request_queue = q; + q->queuedata = sdev; + __scsi_init_queue(sdev->host, q); + + depth = sdev->host->cmd_per_lun ?: 1; + + /* + * Use .can_queue as budget map's depth because we have to + * support adjusting queue depth from sysfs. Meantime use + * default device queue depth to figure out sbitmap shift + * since we use this queue depth most of times. + */ + if (scsi_realloc_sdev_budget_map(sdev, depth)) { + put_device(&starget->dev); + kfree(sdev); + goto out; + } + + scsi_change_queue_depth(sdev, depth); + + scsi_sysfs_device_initialize(sdev); + + if (shost->hostt->slave_alloc) { + ret = shost->hostt->slave_alloc(sdev); + if (ret) { + /* + * if LLDD reports slave not present, don't clutter + * console with alloc failure messages + */ + if (ret == -ENXIO) + display_failure_msg = 0; + goto out_device_destroy; + } + } + + return sdev; + +out_device_destroy: + __scsi_remove_device(sdev); +out: + if (display_failure_msg) + printk(ALLOC_FAILURE_MSG, __func__); + return NULL; +} + +static void scsi_target_destroy(struct scsi_target *starget) +{ + struct device *dev = &starget->dev; + struct Scsi_Host *shost = dev_to_shost(dev->parent); + unsigned long flags; + + BUG_ON(starget->state == STARGET_DEL); + starget->state = STARGET_DEL; + transport_destroy_device(dev); + spin_lock_irqsave(shost->host_lock, flags); + if (shost->hostt->target_destroy) + shost->hostt->target_destroy(starget); + list_del_init(&starget->siblings); + spin_unlock_irqrestore(shost->host_lock, flags); + put_device(dev); +} + +static void scsi_target_dev_release(struct device *dev) +{ + struct device *parent = dev->parent; + struct scsi_target *starget = to_scsi_target(dev); + + kfree(starget); + put_device(parent); +} + +static struct device_type scsi_target_type = { + .name = "scsi_target", + .release = scsi_target_dev_release, +}; + +int scsi_is_target_device(const struct device *dev) +{ + return dev->type == &scsi_target_type; +} +EXPORT_SYMBOL(scsi_is_target_device); + +static struct scsi_target *__scsi_find_target(struct device *parent, + int channel, uint id) +{ + struct scsi_target *starget, *found_starget = NULL; + struct Scsi_Host *shost = dev_to_shost(parent); + /* + * Search for an existing target for this sdev. + */ + list_for_each_entry(starget, &shost->__targets, siblings) { + if (starget->id == id && + starget->channel == channel) { + found_starget = starget; + break; + } + } + if (found_starget) + get_device(&found_starget->dev); + + return found_starget; +} + +/** + * scsi_target_reap_ref_release - remove target from visibility + * @kref: the reap_ref in the target being released + * + * Called on last put of reap_ref, which is the indication that no device + * under this target is visible anymore, so render the target invisible in + * sysfs. Note: we have to be in user context here because the target reaps + * should be done in places where the scsi device visibility is being removed. + */ +static void scsi_target_reap_ref_release(struct kref *kref) +{ + struct scsi_target *starget + = container_of(kref, struct scsi_target, reap_ref); + + /* + * if we get here and the target is still in a CREATED state that + * means it was allocated but never made visible (because a scan + * turned up no LUNs), so don't call device_del() on it. + */ + if ((starget->state != STARGET_CREATED) && + (starget->state != STARGET_CREATED_REMOVE)) { + transport_remove_device(&starget->dev); + device_del(&starget->dev); + } + scsi_target_destroy(starget); +} + +static void scsi_target_reap_ref_put(struct scsi_target *starget) +{ + kref_put(&starget->reap_ref, scsi_target_reap_ref_release); +} + +/** + * scsi_alloc_target - allocate a new or find an existing target + * @parent: parent of the target (need not be a scsi host) + * @channel: target channel number (zero if no channels) + * @id: target id number + * + * Return an existing target if one exists, provided it hasn't already + * gone into STARGET_DEL state, otherwise allocate a new target. + * + * The target is returned with an incremented reference, so the caller + * is responsible for both reaping and doing a last put + */ +static struct scsi_target *scsi_alloc_target(struct device *parent, + int channel, uint id) +{ + struct Scsi_Host *shost = dev_to_shost(parent); + struct device *dev = NULL; + unsigned long flags; + const int size = sizeof(struct scsi_target) + + shost->transportt->target_size; + struct scsi_target *starget; + struct scsi_target *found_target; + int error, ref_got; + + starget = kzalloc(size, GFP_KERNEL); + if (!starget) { + printk(KERN_ERR "%s: allocation failure\n", __func__); + return NULL; + } + dev = &starget->dev; + device_initialize(dev); + kref_init(&starget->reap_ref); + dev->parent = get_device(parent); + dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); + dev->bus = &scsi_bus_type; + dev->type = &scsi_target_type; + scsi_enable_async_suspend(dev); + starget->id = id; + starget->channel = channel; + starget->can_queue = 0; + INIT_LIST_HEAD(&starget->siblings); + INIT_LIST_HEAD(&starget->devices); + starget->state = STARGET_CREATED; + starget->scsi_level = SCSI_2; + starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; + retry: + spin_lock_irqsave(shost->host_lock, flags); + + found_target = __scsi_find_target(parent, channel, id); + if (found_target) + goto found; + + list_add_tail(&starget->siblings, &shost->__targets); + spin_unlock_irqrestore(shost->host_lock, flags); + /* allocate and add */ + transport_setup_device(dev); + if (shost->hostt->target_alloc) { + error = shost->hostt->target_alloc(starget); + + if(error) { + if (error != -ENXIO) + dev_err(dev, "target allocation failed, error %d\n", error); + /* don't want scsi_target_reap to do the final + * put because it will be under the host lock */ + scsi_target_destroy(starget); + return NULL; + } + } + get_device(dev); + + return starget; + + found: + /* + * release routine already fired if kref is zero, so if we can still + * take the reference, the target must be alive. If we can't, it must + * be dying and we need to wait for a new target + */ + ref_got = kref_get_unless_zero(&found_target->reap_ref); + + spin_unlock_irqrestore(shost->host_lock, flags); + if (ref_got) { + put_device(dev); + return found_target; + } + /* + * Unfortunately, we found a dying target; need to wait until it's + * dead before we can get a new one. There is an anomaly here. We + * *should* call scsi_target_reap() to balance the kref_get() of the + * reap_ref above. However, since the target being released, it's + * already invisible and the reap_ref is irrelevant. If we call + * scsi_target_reap() we might spuriously do another device_del() on + * an already invisible target. + */ + put_device(&found_target->dev); + /* + * length of time is irrelevant here, we just want to yield the CPU + * for a tick to avoid busy waiting for the target to die. + */ + msleep(1); + goto retry; +} + +/** + * scsi_target_reap - check to see if target is in use and destroy if not + * @starget: target to be checked + * + * This is used after removing a LUN or doing a last put of the target + * it checks atomically that nothing is using the target and removes + * it if so. + */ +void scsi_target_reap(struct scsi_target *starget) +{ + /* + * serious problem if this triggers: STARGET_DEL is only set in the if + * the reap_ref drops to zero, so we're trying to do another final put + * on an already released kref + */ + BUG_ON(starget->state == STARGET_DEL); + scsi_target_reap_ref_put(starget); +} + +/** + * scsi_sanitize_inquiry_string - remove non-graphical chars from an + * INQUIRY result string + * @s: INQUIRY result string to sanitize + * @len: length of the string + * + * Description: + * The SCSI spec says that INQUIRY vendor, product, and revision + * strings must consist entirely of graphic ASCII characters, + * padded on the right with spaces. Since not all devices obey + * this rule, we will replace non-graphic or non-ASCII characters + * with spaces. Exception: a NUL character is interpreted as a + * string terminator, so all the following characters are set to + * spaces. + **/ +void scsi_sanitize_inquiry_string(unsigned char *s, int len) +{ + int terminated = 0; + + for (; len > 0; (--len, ++s)) { + if (*s == 0) + terminated = 1; + if (terminated || *s < 0x20 || *s > 0x7e) + *s = ' '; + } +} +EXPORT_SYMBOL(scsi_sanitize_inquiry_string); + +/** + * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY + * @sdev: scsi_device to probe + * @inq_result: area to store the INQUIRY result + * @result_len: len of inq_result + * @bflags: store any bflags found here + * + * Description: + * Probe the lun associated with @req using a standard SCSI INQUIRY; + * + * If the INQUIRY is successful, zero is returned and the + * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length + * are copied to the scsi_device any flags value is stored in *@bflags. + **/ +static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, + int result_len, blist_flags_t *bflags) +{ + unsigned char scsi_cmd[MAX_COMMAND_SIZE]; + int first_inquiry_len, try_inquiry_len, next_inquiry_len; + int response_len = 0; + int pass, count, result, resid; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + .resid = &resid, + }; + + *bflags = 0; + + /* Perform up to 3 passes. The first pass uses a conservative + * transfer length of 36 unless sdev->inquiry_len specifies a + * different value. */ + first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; + try_inquiry_len = first_inquiry_len; + pass = 1; + + next_pass: + SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, + "scsi scan: INQUIRY pass %d length %d\n", + pass, try_inquiry_len)); + + /* Each pass gets up to three chances to ignore Unit Attention */ + for (count = 0; count < 3; ++count) { + memset(scsi_cmd, 0, 6); + scsi_cmd[0] = INQUIRY; + scsi_cmd[4] = (unsigned char) try_inquiry_len; + + memset(inq_result, 0, try_inquiry_len); + + result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, + inq_result, try_inquiry_len, + HZ / 2 + HZ * scsi_inq_timeout, 3, + &exec_args); + + SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, + "scsi scan: INQUIRY %s with code 0x%x\n", + result ? "failed" : "successful", result)); + + if (result > 0) { + /* + * not-ready to ready transition [asc/ascq=0x28/0x0] + * or power-on, reset [asc/ascq=0x29/0x0], continue. + * INQUIRY should not yield UNIT_ATTENTION + * but many buggy devices do so anyway. + */ + if (scsi_status_is_check_condition(result) && + scsi_sense_valid(&sshdr)) { + if ((sshdr.sense_key == UNIT_ATTENTION) && + ((sshdr.asc == 0x28) || + (sshdr.asc == 0x29)) && + (sshdr.ascq == 0)) + continue; + } + } else if (result == 0) { + /* + * if nothing was transferred, we try + * again. It's a workaround for some USB + * devices. + */ + if (resid == try_inquiry_len) + continue; + } + break; + } + + if (result == 0) { + scsi_sanitize_inquiry_string(&inq_result[8], 8); + scsi_sanitize_inquiry_string(&inq_result[16], 16); + scsi_sanitize_inquiry_string(&inq_result[32], 4); + + response_len = inq_result[4] + 5; + if (response_len > 255) + response_len = first_inquiry_len; /* sanity */ + + /* + * Get any flags for this device. + * + * XXX add a bflags to scsi_device, and replace the + * corresponding bit fields in scsi_device, so bflags + * need not be passed as an argument. + */ + *bflags = scsi_get_device_flags(sdev, &inq_result[8], + &inq_result[16]); + + /* When the first pass succeeds we gain information about + * what larger transfer lengths might work. */ + if (pass == 1) { + if (BLIST_INQUIRY_36 & *bflags) + next_inquiry_len = 36; + /* + * LLD specified a maximum sdev->inquiry_len + * but device claims it has more data. Capping + * the length only makes sense for legacy + * devices. If a device supports SPC-4 (2014) + * or newer, assume that it is safe to ask for + * as much as the device says it supports. + */ + else if (sdev->inquiry_len && + response_len > sdev->inquiry_len && + (inq_result[2] & 0x7) < 6) /* SPC-4 */ + next_inquiry_len = sdev->inquiry_len; + else + next_inquiry_len = response_len; + + /* If more data is available perform the second pass */ + if (next_inquiry_len > try_inquiry_len) { + try_inquiry_len = next_inquiry_len; + pass = 2; + goto next_pass; + } + } + + } else if (pass == 2) { + sdev_printk(KERN_INFO, sdev, + "scsi scan: %d byte inquiry failed. " + "Consider BLIST_INQUIRY_36 for this device\n", + try_inquiry_len); + + /* If this pass failed, the third pass goes back and transfers + * the same amount as we successfully got in the first pass. */ + try_inquiry_len = first_inquiry_len; + pass = 3; + goto next_pass; + } + + /* If the last transfer attempt got an error, assume the + * peripheral doesn't exist or is dead. */ + if (result) + return -EIO; + + /* Don't report any more data than the device says is valid */ + sdev->inquiry_len = min(try_inquiry_len, response_len); + + /* + * XXX Abort if the response length is less than 36? If less than + * 32, the lookup of the device flags (above) could be invalid, + * and it would be possible to take an incorrect action - we do + * not want to hang because of a short INQUIRY. On the flip side, + * if the device is spun down or becoming ready (and so it gives a + * short INQUIRY), an abort here prevents any further use of the + * device, including spin up. + * + * On the whole, the best approach seems to be to assume the first + * 36 bytes are valid no matter what the device says. That's + * better than copying < 36 bytes to the inquiry-result buffer + * and displaying garbage for the Vendor, Product, or Revision + * strings. + */ + if (sdev->inquiry_len < 36) { + if (!sdev->host->short_inquiry) { + shost_printk(KERN_INFO, sdev->host, + "scsi scan: INQUIRY result too short (%d)," + " using 36\n", sdev->inquiry_len); + sdev->host->short_inquiry = 1; + } + sdev->inquiry_len = 36; + } + + /* + * Related to the above issue: + * + * XXX Devices (disk or all?) should be sent a TEST UNIT READY, + * and if not ready, sent a START_STOP to start (maybe spin up) and + * then send the INQUIRY again, since the INQUIRY can change after + * a device is initialized. + * + * Ideally, start a device if explicitly asked to do so. This + * assumes that a device is spun up on power on, spun down on + * request, and then spun up on request. + */ + + /* + * The scanning code needs to know the scsi_level, even if no + * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so + * non-zero LUNs can be scanned. + */ + sdev->scsi_level = inq_result[2] & 0x0f; + if (sdev->scsi_level >= 2 || + (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) + sdev->scsi_level++; + sdev->sdev_target->scsi_level = sdev->scsi_level; + + /* + * If SCSI-2 or lower, and if the transport requires it, + * store the LUN value in CDB[1]. + */ + sdev->lun_in_cdb = 0; + if (sdev->scsi_level <= SCSI_2 && + sdev->scsi_level != SCSI_UNKNOWN && + !sdev->host->no_scsi2_lun_in_cdb) + sdev->lun_in_cdb = 1; + + return 0; +} + +/** + * scsi_add_lun - allocate and fully initialze a scsi_device + * @sdev: holds information to be stored in the new scsi_device + * @inq_result: holds the result of a previous INQUIRY to the LUN + * @bflags: black/white list flag + * @async: 1 if this device is being scanned asynchronously + * + * Description: + * Initialize the scsi_device @sdev. Optionally set fields based + * on values in *@bflags. + * + * Return: + * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device + * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized + **/ +static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, + blist_flags_t *bflags, int async) +{ + int ret; + + /* + * XXX do not save the inquiry, since it can change underneath us, + * save just vendor/model/rev. + * + * Rather than save it and have an ioctl that retrieves the saved + * value, have an ioctl that executes the same INQUIRY code used + * in scsi_probe_lun, let user level programs doing INQUIRY + * scanning run at their own risk, or supply a user level program + * that can correctly scan. + */ + + /* + * Copy at least 36 bytes of INQUIRY data, so that we don't + * dereference unallocated memory when accessing the Vendor, + * Product, and Revision strings. Badly behaved devices may set + * the INQUIRY Additional Length byte to a small value, indicating + * these strings are invalid, but often they contain plausible data + * nonetheless. It doesn't matter if the device sent < 36 bytes + * total, since scsi_probe_lun() initializes inq_result with 0s. + */ + sdev->inquiry = kmemdup(inq_result, + max_t(size_t, sdev->inquiry_len, 36), + GFP_KERNEL); + if (sdev->inquiry == NULL) + return SCSI_SCAN_NO_RESPONSE; + + sdev->vendor = (char *) (sdev->inquiry + 8); + sdev->model = (char *) (sdev->inquiry + 16); + sdev->rev = (char *) (sdev->inquiry + 32); + + if (strncmp(sdev->vendor, "ATA ", 8) == 0) { + /* + * sata emulation layer device. This is a hack to work around + * the SATL power management specifications which state that + * when the SATL detects the device has gone into standby + * mode, it shall respond with NOT READY. + */ + sdev->allow_restart = 1; + } + + if (*bflags & BLIST_ISROM) { + sdev->type = TYPE_ROM; + sdev->removable = 1; + } else { + sdev->type = (inq_result[0] & 0x1f); + sdev->removable = (inq_result[1] & 0x80) >> 7; + + /* + * some devices may respond with wrong type for + * well-known logical units. Force well-known type + * to enumerate them correctly. + */ + if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) { + sdev_printk(KERN_WARNING, sdev, + "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n", + __func__, sdev->type, (unsigned int)sdev->lun); + sdev->type = TYPE_WLUN; + } + + } + + if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { + /* RBC and MMC devices can return SCSI-3 compliance and yet + * still not support REPORT LUNS, so make them act as + * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is + * specifically set */ + if ((*bflags & BLIST_REPORTLUN2) == 0) + *bflags |= BLIST_NOREPORTLUN; + } + + /* + * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI + * spec says: The device server is capable of supporting the + * specified peripheral device type on this logical unit. However, + * the physical device is not currently connected to this logical + * unit. + * + * The above is vague, as it implies that we could treat 001 and + * 011 the same. Stay compatible with previous code, and create a + * scsi_device for a PQ of 1 + * + * Don't set the device offline here; rather let the upper + * level drivers eval the PQ to decide whether they should + * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check. + */ + + sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; + sdev->lockable = sdev->removable; + sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); + + if (sdev->scsi_level >= SCSI_3 || + (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) + sdev->ppr = 1; + if (inq_result[7] & 0x60) + sdev->wdtr = 1; + if (inq_result[7] & 0x10) + sdev->sdtr = 1; + + sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d " + "ANSI: %d%s\n", scsi_device_type(sdev->type), + sdev->vendor, sdev->model, sdev->rev, + sdev->inq_periph_qual, inq_result[2] & 0x07, + (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); + + if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && + !(*bflags & BLIST_NOTQ)) { + sdev->tagged_supported = 1; + sdev->simple_tags = 1; + } + + /* + * Some devices (Texel CD ROM drives) have handshaking problems + * when used with the Seagate controllers. borken is initialized + * to 1, and then set it to 0 here. + */ + if ((*bflags & BLIST_BORKEN) == 0) + sdev->borken = 0; + + if (*bflags & BLIST_NO_ULD_ATTACH) + sdev->no_uld_attach = 1; + + /* + * Apparently some really broken devices (contrary to the SCSI + * standards) need to be selected without asserting ATN + */ + if (*bflags & BLIST_SELECT_NO_ATN) + sdev->select_no_atn = 1; + + /* + * Maximum 512 sector transfer length + * broken RA4x00 Compaq Disk Array + */ + if (*bflags & BLIST_MAX_512) + blk_queue_max_hw_sectors(sdev->request_queue, 512); + /* + * Max 1024 sector transfer length for targets that report incorrect + * max/optimal lengths and relied on the old block layer safe default + */ + else if (*bflags & BLIST_MAX_1024) + blk_queue_max_hw_sectors(sdev->request_queue, 1024); + + /* + * Some devices may not want to have a start command automatically + * issued when a device is added. + */ + if (*bflags & BLIST_NOSTARTONADD) + sdev->no_start_on_add = 1; + + if (*bflags & BLIST_SINGLELUN) + scsi_target(sdev)->single_lun = 1; + + sdev->use_10_for_rw = 1; + + /* some devices don't like REPORT SUPPORTED OPERATION CODES + * and will simply timeout causing sd_mod init to take a very + * very long time */ + if (*bflags & BLIST_NO_RSOC) + sdev->no_report_opcodes = 1; + + /* set the device running here so that slave configure + * may do I/O */ + mutex_lock(&sdev->state_mutex); + ret = scsi_device_set_state(sdev, SDEV_RUNNING); + if (ret) + ret = scsi_device_set_state(sdev, SDEV_BLOCK); + mutex_unlock(&sdev->state_mutex); + + if (ret) { + sdev_printk(KERN_ERR, sdev, + "in wrong state %s to complete scan\n", + scsi_device_state_name(sdev->sdev_state)); + return SCSI_SCAN_NO_RESPONSE; + } + + if (*bflags & BLIST_NOT_LOCKABLE) + sdev->lockable = 0; + + if (*bflags & BLIST_RETRY_HWERROR) + sdev->retry_hwerror = 1; + + if (*bflags & BLIST_NO_DIF) + sdev->no_dif = 1; + + if (*bflags & BLIST_UNMAP_LIMIT_WS) + sdev->unmap_limit_for_ws = 1; + + if (*bflags & BLIST_IGN_MEDIA_CHANGE) + sdev->ignore_media_change = 1; + + sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; + + if (*bflags & BLIST_TRY_VPD_PAGES) + sdev->try_vpd_pages = 1; + else if (*bflags & BLIST_SKIP_VPD_PAGES) + sdev->skip_vpd_pages = 1; + + if (*bflags & BLIST_NO_VPD_SIZE) + sdev->no_vpd_size = 1; + + transport_configure_device(&sdev->sdev_gendev); + + if (sdev->host->hostt->slave_configure) { + ret = sdev->host->hostt->slave_configure(sdev); + if (ret) { + /* + * if LLDD reports slave not present, don't clutter + * console with alloc failure messages + */ + if (ret != -ENXIO) { + sdev_printk(KERN_ERR, sdev, + "failed to configure device\n"); + } + return SCSI_SCAN_NO_RESPONSE; + } + + /* + * The queue_depth is often changed in ->slave_configure. + * Set up budget map again since memory consumption of + * the map depends on actual queue depth. + */ + scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); + } + + if (sdev->scsi_level >= SCSI_3) + scsi_attach_vpd(sdev); + + scsi_cdl_check(sdev); + + sdev->max_queue_depth = sdev->queue_depth; + WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth); + sdev->sdev_bflags = *bflags; + + /* + * Ok, the device is now all set up, we can + * register it and tell the rest of the kernel + * about it. + */ + if (!async && scsi_sysfs_add_sdev(sdev) != 0) + return SCSI_SCAN_NO_RESPONSE; + + return SCSI_SCAN_LUN_PRESENT; +} + +#ifdef CONFIG_SCSI_LOGGING +/** + * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace + * @buf: Output buffer with at least end-first+1 bytes of space + * @inq: Inquiry buffer (input) + * @first: Offset of string into inq + * @end: Index after last character in inq + */ +static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, + unsigned first, unsigned end) +{ + unsigned term = 0, idx; + + for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) { + if (inq[idx+first] > ' ') { + buf[idx] = inq[idx+first]; + term = idx+1; + } else { + buf[idx] = ' '; + } + } + buf[term] = 0; + return buf; +} +#endif + +/** + * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it + * @starget: pointer to target device structure + * @lun: LUN of target device + * @bflagsp: store bflags here if not NULL + * @sdevp: probe the LUN corresponding to this scsi_device + * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only + * needed on first scan + * @hostdata: passed to scsi_alloc_sdev() + * + * Description: + * Call scsi_probe_lun, if a LUN with an attached device is found, + * allocate and set it up by calling scsi_add_lun. + * + * Return: + * + * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device + * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is + * attached at the LUN + * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized + **/ +static int scsi_probe_and_add_lun(struct scsi_target *starget, + u64 lun, blist_flags_t *bflagsp, + struct scsi_device **sdevp, + enum scsi_scan_mode rescan, + void *hostdata) +{ + struct scsi_device *sdev; + unsigned char *result; + blist_flags_t bflags; + int res = SCSI_SCAN_NO_RESPONSE, result_len = 256; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + + /* + * The rescan flag is used as an optimization, the first scan of a + * host adapter calls into here with rescan == 0. + */ + sdev = scsi_device_lookup_by_target(starget, lun); + if (sdev) { + if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) { + SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, + "scsi scan: device exists on %s\n", + dev_name(&sdev->sdev_gendev))); + if (sdevp) + *sdevp = sdev; + else + scsi_device_put(sdev); + + if (bflagsp) + *bflagsp = scsi_get_device_flags(sdev, + sdev->vendor, + sdev->model); + return SCSI_SCAN_LUN_PRESENT; + } + scsi_device_put(sdev); + } else + sdev = scsi_alloc_sdev(starget, lun, hostdata); + if (!sdev) + goto out; + + result = kmalloc(result_len, GFP_KERNEL); + if (!result) + goto out_free_sdev; + + if (scsi_probe_lun(sdev, result, result_len, &bflags)) + goto out_free_result; + + if (bflagsp) + *bflagsp = bflags; + /* + * result contains valid SCSI INQUIRY data. + */ + if ((result[0] >> 5) == 3) { + /* + * For a Peripheral qualifier 3 (011b), the SCSI + * spec says: The device server is not capable of + * supporting a physical device on this logical + * unit. + * + * For disks, this implies that there is no + * logical disk configured at sdev->lun, but there + * is a target id responding. + */ + SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:" + " peripheral qualifier of 3, device not" + " added\n")) + if (lun == 0) { + SCSI_LOG_SCAN_BUS(1, { + unsigned char vend[9]; + unsigned char mod[17]; + + sdev_printk(KERN_INFO, sdev, + "scsi scan: consider passing scsi_mod." + "dev_flags=%s:%s:0x240 or 0x1000240\n", + scsi_inq_str(vend, result, 8, 16), + scsi_inq_str(mod, result, 16, 32)); + }); + + } + + res = SCSI_SCAN_TARGET_PRESENT; + goto out_free_result; + } + + /* + * Some targets may set slight variations of PQ and PDT to signal + * that no LUN is present, so don't add sdev in these cases. + * Two specific examples are: + * 1) NetApp targets: return PQ=1, PDT=0x1f + * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" + * in the UFI 1.0 spec (we cannot rely on reserved bits). + * + * References: + * 1) SCSI SPC-3, pp. 145-146 + * PQ=1: "A peripheral device having the specified peripheral + * device type is not connected to this logical unit. However, the + * device server is capable of supporting the specified peripheral + * device type on this logical unit." + * PDT=0x1f: "Unknown or no device type" + * 2) USB UFI 1.0, p. 20 + * PDT=00h Direct-access device (floppy) + * PDT=1Fh none (no FDD connected to the requested logical unit) + */ + if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && + (result[0] & 0x1f) == 0x1f && + !scsi_is_wlun(lun)) { + SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, + "scsi scan: peripheral device type" + " of 31, no device added\n")); + res = SCSI_SCAN_TARGET_PRESENT; + goto out_free_result; + } + + res = scsi_add_lun(sdev, result, &bflags, shost->async_scan); + if (res == SCSI_SCAN_LUN_PRESENT) { + if (bflags & BLIST_KEY) { + sdev->lockable = 0; + scsi_unlock_floptical(sdev, result); + } + } + + out_free_result: + kfree(result); + out_free_sdev: + if (res == SCSI_SCAN_LUN_PRESENT) { + if (sdevp) { + if (scsi_device_get(sdev) == 0) { + *sdevp = sdev; + } else { + __scsi_remove_device(sdev); + res = SCSI_SCAN_NO_RESPONSE; + } + } + } else + __scsi_remove_device(sdev); + out: + return res; +} + +/** + * scsi_sequential_lun_scan - sequentially scan a SCSI target + * @starget: pointer to target structure to scan + * @bflags: black/white list flag for LUN 0 + * @scsi_level: Which version of the standard does this device adhere to + * @rescan: passed to scsi_probe_add_lun() + * + * Description: + * Generally, scan from LUN 1 (LUN 0 is assumed to already have been + * scanned) to some maximum lun until a LUN is found with no device + * attached. Use the bflags to figure out any oddities. + * + * Modifies sdevscan->lun. + **/ +static void scsi_sequential_lun_scan(struct scsi_target *starget, + blist_flags_t bflags, int scsi_level, + enum scsi_scan_mode rescan) +{ + uint max_dev_lun; + u64 sparse_lun, lun; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + + SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget, + "scsi scan: Sequential scan\n")); + + max_dev_lun = min(max_scsi_luns, shost->max_lun); + /* + * If this device is known to support sparse multiple units, + * override the other settings, and scan all of them. Normally, + * SCSI-3 devices should be scanned via the REPORT LUNS. + */ + if (bflags & BLIST_SPARSELUN) { + max_dev_lun = shost->max_lun; + sparse_lun = 1; + } else + sparse_lun = 0; + + /* + * If less than SCSI_1_CCS, and no special lun scanning, stop + * scanning; this matches 2.4 behaviour, but could just be a bug + * (to continue scanning a SCSI_1_CCS device). + * + * This test is broken. We might not have any device on lun0 for + * a sparselun device, and if that's the case then how would we + * know the real scsi_level, eh? It might make sense to just not + * scan any SCSI_1 device for non-0 luns, but that check would best + * go into scsi_alloc_sdev() and just have it return null when asked + * to alloc an sdev for lun > 0 on an already found SCSI_1 device. + * + if ((sdevscan->scsi_level < SCSI_1_CCS) && + ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN)) + == 0)) + return; + */ + /* + * If this device is known to support multiple units, override + * the other settings, and scan all of them. + */ + if (bflags & BLIST_FORCELUN) + max_dev_lun = shost->max_lun; + /* + * REGAL CDC-4X: avoid hang after LUN 4 + */ + if (bflags & BLIST_MAX5LUN) + max_dev_lun = min(5U, max_dev_lun); + /* + * Do not scan SCSI-2 or lower device past LUN 7, unless + * BLIST_LARGELUN. + */ + if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN)) + max_dev_lun = min(8U, max_dev_lun); + else + max_dev_lun = min(256U, max_dev_lun); + + /* + * We have already scanned LUN 0, so start at LUN 1. Keep scanning + * until we reach the max, or no LUN is found and we are not + * sparse_lun. + */ + for (lun = 1; lun < max_dev_lun; ++lun) + if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, + NULL) != SCSI_SCAN_LUN_PRESENT) && + !sparse_lun) + return; +} + +/** + * scsi_report_lun_scan - Scan using SCSI REPORT LUN results + * @starget: which target + * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN + * @rescan: nonzero if we can skip code only needed on first scan + * + * Description: + * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command. + * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun. + * + * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8 + * LUNs even if it's older than SCSI-3. + * If BLIST_NOREPORTLUN is set, return 1 always. + * If BLIST_NOLUN is set, return 0 always. + * If starget->no_report_luns is set, return 1 always. + * + * Return: + * 0: scan completed (or no memory, so further scanning is futile) + * 1: could not scan with REPORT LUN + **/ +static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags, + enum scsi_scan_mode rescan) +{ + unsigned char scsi_cmd[MAX_COMMAND_SIZE]; + unsigned int length; + u64 lun; + unsigned int num_luns; + unsigned int retries; + int result; + struct scsi_lun *lunp, *lun_data; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdev; + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int ret = 0; + + /* + * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. + * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does + * support more than 8 LUNs. + * Don't attempt if the target doesn't support REPORT LUNS. + */ + if (bflags & BLIST_NOREPORTLUN) + return 1; + if (starget->scsi_level < SCSI_2 && + starget->scsi_level != SCSI_UNKNOWN) + return 1; + if (starget->scsi_level < SCSI_3 && + (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) + return 1; + if (bflags & BLIST_NOLUN) + return 0; + if (starget->no_report_luns) + return 1; + + if (!(sdev = scsi_device_lookup_by_target(starget, 0))) { + sdev = scsi_alloc_sdev(starget, 0, NULL); + if (!sdev) + return 0; + if (scsi_device_get(sdev)) { + __scsi_remove_device(sdev); + return 0; + } + } + + /* + * Allocate enough to hold the header (the same size as one scsi_lun) + * plus the number of luns we are requesting. 511 was the default + * value of the now removed max_report_luns parameter. + */ + length = (511 + 1) * sizeof(struct scsi_lun); +retry: + lun_data = kmalloc(length, GFP_KERNEL); + if (!lun_data) { + printk(ALLOC_FAILURE_MSG, __func__); + goto out; + } + + scsi_cmd[0] = REPORT_LUNS; + + /* + * bytes 1 - 5: reserved, set to zero. + */ + memset(&scsi_cmd[1], 0, 5); + + /* + * bytes 6 - 9: length of the command. + */ + put_unaligned_be32(length, &scsi_cmd[6]); + + scsi_cmd[10] = 0; /* reserved */ + scsi_cmd[11] = 0; /* control */ + + /* + * We can get a UNIT ATTENTION, for example a power on/reset, so + * retry a few times (like sd.c does for TEST UNIT READY). + * Experience shows some combinations of adapter/devices get at + * least two power on/resets. + * + * Illegal requests (for devices that do not support REPORT LUNS) + * should come through as a check condition, and will not generate + * a retry. + */ + for (retries = 0; retries < 3; retries++) { + SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, + "scsi scan: Sending REPORT LUNS to (try %d)\n", + retries)); + + result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, + lun_data, length, + SCSI_REPORT_LUNS_TIMEOUT, 3, + &exec_args); + + SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, + "scsi scan: REPORT LUNS" + " %s (try %d) result 0x%x\n", + result ? "failed" : "successful", + retries, result)); + if (result == 0) + break; + else if (scsi_sense_valid(&sshdr)) { + if (sshdr.sense_key != UNIT_ATTENTION) + break; + } + } + + if (result) { + /* + * The device probably does not support a REPORT LUN command + */ + ret = 1; + goto out_err; + } + + /* + * Get the length from the first four bytes of lun_data. + */ + if (get_unaligned_be32(lun_data->scsi_lun) + + sizeof(struct scsi_lun) > length) { + length = get_unaligned_be32(lun_data->scsi_lun) + + sizeof(struct scsi_lun); + kfree(lun_data); + goto retry; + } + length = get_unaligned_be32(lun_data->scsi_lun); + + num_luns = (length / sizeof(struct scsi_lun)); + + SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, + "scsi scan: REPORT LUN scan\n")); + + /* + * Scan the luns in lun_data. The entry at offset 0 is really + * the header, so start at 1 and go up to and including num_luns. + */ + for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { + lun = scsilun_to_int(lunp); + + if (lun > sdev->host->max_lun) { + sdev_printk(KERN_WARNING, sdev, + "lun%llu has a LUN larger than" + " allowed by the host adapter\n", lun); + } else { + int res; + + res = scsi_probe_and_add_lun(starget, + lun, NULL, NULL, rescan, NULL); + if (res == SCSI_SCAN_NO_RESPONSE) { + /* + * Got some results, but now none, abort. + */ + sdev_printk(KERN_ERR, sdev, + "Unexpected response" + " from lun %llu while scanning, scan" + " aborted\n", (unsigned long long)lun); + break; + } + } + } + + out_err: + kfree(lun_data); + out: + if (scsi_device_created(sdev)) + /* + * the sdev we used didn't appear in the report luns scan + */ + __scsi_remove_device(sdev); + scsi_device_put(sdev); + return ret; +} + +struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, + uint id, u64 lun, void *hostdata) +{ + struct scsi_device *sdev = ERR_PTR(-ENODEV); + struct device *parent = &shost->shost_gendev; + struct scsi_target *starget; + + if (strncmp(scsi_scan_type, "none", 4) == 0) + return ERR_PTR(-ENODEV); + + starget = scsi_alloc_target(parent, channel, id); + if (!starget) + return ERR_PTR(-ENOMEM); + scsi_autopm_get_target(starget); + + mutex_lock(&shost->scan_mutex); + if (!shost->async_scan) + scsi_complete_async_scans(); + + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { + scsi_probe_and_add_lun(starget, lun, NULL, &sdev, + SCSI_SCAN_RESCAN, hostdata); + scsi_autopm_put_host(shost); + } + mutex_unlock(&shost->scan_mutex); + scsi_autopm_put_target(starget); + /* + * paired with scsi_alloc_target(). Target will be destroyed unless + * scsi_probe_and_add_lun made an underlying device visible + */ + scsi_target_reap(starget); + put_device(&starget->dev); + + return sdev; +} +EXPORT_SYMBOL(__scsi_add_device); + +int scsi_add_device(struct Scsi_Host *host, uint channel, + uint target, u64 lun) +{ + struct scsi_device *sdev = + __scsi_add_device(host, channel, target, lun, NULL); + if (IS_ERR(sdev)) + return PTR_ERR(sdev); + + scsi_device_put(sdev); + return 0; +} +EXPORT_SYMBOL(scsi_add_device); + +int scsi_rescan_device(struct scsi_device *sdev) +{ + struct device *dev = &sdev->sdev_gendev; + int ret = 0; + + device_lock(dev); + + /* + * Bail out if the device or its queue are not running. Otherwise, + * the rescan may block waiting for commands to be executed, with us + * holding the device lock. This can result in a potential deadlock + * in the power management core code when system resume is on-going. + */ + if (sdev->sdev_state != SDEV_RUNNING || + blk_queue_pm_only(sdev->request_queue)) { + ret = -EWOULDBLOCK; + goto unlock; + } + + scsi_attach_vpd(sdev); + scsi_cdl_check(sdev); + + if (sdev->handler && sdev->handler->rescan) + sdev->handler->rescan(sdev); + + if (dev->driver && try_module_get(dev->driver->owner)) { + struct scsi_driver *drv = to_scsi_driver(dev->driver); + + if (drv->rescan) + drv->rescan(dev); + module_put(dev->driver->owner); + } + +unlock: + device_unlock(dev); + + return ret; +} +EXPORT_SYMBOL(scsi_rescan_device); + +static void __scsi_scan_target(struct device *parent, unsigned int channel, + unsigned int id, u64 lun, enum scsi_scan_mode rescan) +{ + struct Scsi_Host *shost = dev_to_shost(parent); + blist_flags_t bflags = 0; + int res; + struct scsi_target *starget; + + if (shost->this_id == id) + /* + * Don't scan the host adapter + */ + return; + + starget = scsi_alloc_target(parent, channel, id); + if (!starget) + return; + scsi_autopm_get_target(starget); + + if (lun != SCAN_WILD_CARD) { + /* + * Scan for a specific host/chan/id/lun. + */ + scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL); + goto out_reap; + } + + /* + * Scan LUN 0, if there is some response, scan further. Ideally, we + * would not configure LUN 0 until all LUNs are scanned. + */ + res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL); + if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) { + if (scsi_report_lun_scan(starget, bflags, rescan) != 0) + /* + * The REPORT LUN did not scan the target, + * do a sequential scan. + */ + scsi_sequential_lun_scan(starget, bflags, + starget->scsi_level, rescan); + } + + out_reap: + scsi_autopm_put_target(starget); + /* + * paired with scsi_alloc_target(): determine if the target has + * any children at all and if not, nuke it + */ + scsi_target_reap(starget); + + put_device(&starget->dev); +} + +/** + * scsi_scan_target - scan a target id, possibly including all LUNs on the target. + * @parent: host to scan + * @channel: channel to scan + * @id: target id to scan + * @lun: Specific LUN to scan or SCAN_WILD_CARD + * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for + * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs, + * and SCSI_SCAN_MANUAL to force scanning even if + * 'scan=manual' is set. + * + * Description: + * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, + * and possibly all LUNs on the target id. + * + * First try a REPORT LUN scan, if that does not scan the target, do a + * sequential scan of LUNs on the target id. + **/ +void scsi_scan_target(struct device *parent, unsigned int channel, + unsigned int id, u64 lun, enum scsi_scan_mode rescan) +{ + struct Scsi_Host *shost = dev_to_shost(parent); + + if (strncmp(scsi_scan_type, "none", 4) == 0) + return; + + if (rescan != SCSI_SCAN_MANUAL && + strncmp(scsi_scan_type, "manual", 6) == 0) + return; + + mutex_lock(&shost->scan_mutex); + if (!shost->async_scan) + scsi_complete_async_scans(); + + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { + __scsi_scan_target(parent, channel, id, lun, rescan); + scsi_autopm_put_host(shost); + } + mutex_unlock(&shost->scan_mutex); +} +EXPORT_SYMBOL(scsi_scan_target); + +static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, + unsigned int id, u64 lun, + enum scsi_scan_mode rescan) +{ + uint order_id; + + if (id == SCAN_WILD_CARD) + for (id = 0; id < shost->max_id; ++id) { + /* + * XXX adapter drivers when possible (FCP, iSCSI) + * could modify max_id to match the current max, + * not the absolute max. + * + * XXX add a shost id iterator, so for example, + * the FC ID can be the same as a target id + * without a huge overhead of sparse id's. + */ + if (shost->reverse_ordering) + /* + * Scan from high to low id. + */ + order_id = shost->max_id - id - 1; + else + order_id = id; + __scsi_scan_target(&shost->shost_gendev, channel, + order_id, lun, rescan); + } + else + __scsi_scan_target(&shost->shost_gendev, channel, + id, lun, rescan); +} + +int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, + unsigned int id, u64 lun, + enum scsi_scan_mode rescan) +{ + SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, + "%s: <%u:%u:%llu>\n", + __func__, channel, id, lun)); + + if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || + ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || + ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun))) + return -EINVAL; + + mutex_lock(&shost->scan_mutex); + if (!shost->async_scan) + scsi_complete_async_scans(); + + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { + if (channel == SCAN_WILD_CARD) + for (channel = 0; channel <= shost->max_channel; + channel++) + scsi_scan_channel(shost, channel, id, lun, + rescan); + else + scsi_scan_channel(shost, channel, id, lun, rescan); + scsi_autopm_put_host(shost); + } + mutex_unlock(&shost->scan_mutex); + + return 0; +} + +static void scsi_sysfs_add_devices(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + shost_for_each_device(sdev, shost) { + /* target removed before the device could be added */ + if (sdev->sdev_state == SDEV_DEL) + continue; + /* If device is already visible, skip adding it to sysfs */ + if (sdev->is_visible) + continue; + if (!scsi_host_scan_allowed(shost) || + scsi_sysfs_add_sdev(sdev) != 0) + __scsi_remove_device(sdev); + } +} + +/** + * scsi_prep_async_scan - prepare for an async scan + * @shost: the host which will be scanned + * Returns: a cookie to be passed to scsi_finish_async_scan() + * + * Tells the midlayer this host is going to do an asynchronous scan. + * It reserves the host's position in the scanning list and ensures + * that other asynchronous scans started after this one won't affect the + * ordering of the discovered devices. + */ +static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) +{ + struct async_scan_data *data = NULL; + unsigned long flags; + + if (strncmp(scsi_scan_type, "sync", 4) == 0) + return NULL; + + mutex_lock(&shost->scan_mutex); + if (shost->async_scan) { + shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); + goto err; + } + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto err; + data->shost = scsi_host_get(shost); + if (!data->shost) + goto err; + init_completion(&data->prev_finished); + + spin_lock_irqsave(shost->host_lock, flags); + shost->async_scan = 1; + spin_unlock_irqrestore(shost->host_lock, flags); + mutex_unlock(&shost->scan_mutex); + + spin_lock(&async_scan_lock); + if (list_empty(&scanning_hosts)) + complete(&data->prev_finished); + list_add_tail(&data->list, &scanning_hosts); + spin_unlock(&async_scan_lock); + + return data; + + err: + mutex_unlock(&shost->scan_mutex); + kfree(data); + return NULL; +} + +/** + * scsi_finish_async_scan - asynchronous scan has finished + * @data: cookie returned from earlier call to scsi_prep_async_scan() + * + * All the devices currently attached to this host have been found. + * This function announces all the devices it has found to the rest + * of the system. + */ +static void scsi_finish_async_scan(struct async_scan_data *data) +{ + struct Scsi_Host *shost; + unsigned long flags; + + if (!data) + return; + + shost = data->shost; + + mutex_lock(&shost->scan_mutex); + + if (!shost->async_scan) { + shost_printk(KERN_INFO, shost, "%s called twice\n", __func__); + dump_stack(); + mutex_unlock(&shost->scan_mutex); + return; + } + + wait_for_completion(&data->prev_finished); + + scsi_sysfs_add_devices(shost); + + spin_lock_irqsave(shost->host_lock, flags); + shost->async_scan = 0; + spin_unlock_irqrestore(shost->host_lock, flags); + + mutex_unlock(&shost->scan_mutex); + + spin_lock(&async_scan_lock); + list_del(&data->list); + if (!list_empty(&scanning_hosts)) { + struct async_scan_data *next = list_entry(scanning_hosts.next, + struct async_scan_data, list); + complete(&next->prev_finished); + } + spin_unlock(&async_scan_lock); + + scsi_autopm_put_host(shost); + scsi_host_put(shost); + kfree(data); +} + +static void do_scsi_scan_host(struct Scsi_Host *shost) +{ + if (shost->hostt->scan_finished) { + unsigned long start = jiffies; + if (shost->hostt->scan_start) + shost->hostt->scan_start(shost); + + while (!shost->hostt->scan_finished(shost, jiffies - start)) + msleep(10); + } else { + scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); + } +} + +static void do_scan_async(void *_data, async_cookie_t c) +{ + struct async_scan_data *data = _data; + struct Scsi_Host *shost = data->shost; + + do_scsi_scan_host(shost); + scsi_finish_async_scan(data); +} + +/** + * scsi_scan_host - scan the given adapter + * @shost: adapter to scan + **/ +void scsi_scan_host(struct Scsi_Host *shost) +{ + struct async_scan_data *data; + + if (strncmp(scsi_scan_type, "none", 4) == 0 || + strncmp(scsi_scan_type, "manual", 6) == 0) + return; + if (scsi_autopm_get_host(shost) < 0) + return; + + data = scsi_prep_async_scan(shost); + if (!data) { + do_scsi_scan_host(shost); + scsi_autopm_put_host(shost); + return; + } + + /* register with the async subsystem so wait_for_device_probe() + * will flush this work + */ + async_schedule(do_scan_async, data); + + /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ +} +EXPORT_SYMBOL(scsi_scan_host); + +void scsi_forget_host(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + unsigned long flags; + + restart: + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(sdev, &shost->__devices, siblings) { + if (sdev->sdev_state == SDEV_DEL) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + __scsi_remove_device(sdev); + goto restart; + } + spin_unlock_irqrestore(shost->host_lock, flags); +} + diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c new file mode 100644 index 000000000..7f0914ea1 --- /dev/null +++ b/drivers/scsi/scsi_sysctl.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2003 Christoph Hellwig. + */ + +#include +#include +#include +#include + +#include "scsi_logging.h" +#include "scsi_priv.h" + + +static struct ctl_table scsi_table[] = { + { .procname = "logging_level", + .data = &scsi_logging_level, + .maxlen = sizeof(scsi_logging_level), + .mode = 0644, + .proc_handler = proc_dointvec }, + { } +}; + +static struct ctl_table_header *scsi_table_header; + +int __init scsi_init_sysctl(void) +{ + scsi_table_header = register_sysctl("dev/scsi", scsi_table); + if (!scsi_table_header) + return -ENOMEM; + return 0; +} + +void scsi_exit_sysctl(void) +{ + unregister_sysctl_table(scsi_table_header); +} diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c new file mode 100644 index 000000000..24f6eefb6 --- /dev/null +++ b/drivers/scsi/scsi_sysfs.c @@ -0,0 +1,1689 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * scsi_sysfs.c + * + * SCSI sysfs interface routines. + * + * Created to pull SCSI mid layer sysfs routines into one file. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scsi_priv.h" +#include "scsi_logging.h" + +static struct device_type scsi_dev_type; + +static const struct { + enum scsi_device_state value; + char *name; +} sdev_states[] = { + { SDEV_CREATED, "created" }, + { SDEV_RUNNING, "running" }, + { SDEV_CANCEL, "cancel" }, + { SDEV_DEL, "deleted" }, + { SDEV_QUIESCE, "quiesce" }, + { SDEV_OFFLINE, "offline" }, + { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, + { SDEV_BLOCK, "blocked" }, + { SDEV_CREATED_BLOCK, "created-blocked" }, +}; + +const char *scsi_device_state_name(enum scsi_device_state state) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { + if (sdev_states[i].value == state) { + name = sdev_states[i].name; + break; + } + } + return name; +} + +static const struct { + enum scsi_host_state value; + char *name; +} shost_states[] = { + { SHOST_CREATED, "created" }, + { SHOST_RUNNING, "running" }, + { SHOST_CANCEL, "cancel" }, + { SHOST_DEL, "deleted" }, + { SHOST_RECOVERY, "recovery" }, + { SHOST_CANCEL_RECOVERY, "cancel/recovery" }, + { SHOST_DEL_RECOVERY, "deleted/recovery", }, +}; +const char *scsi_host_state_name(enum scsi_host_state state) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(shost_states); i++) { + if (shost_states[i].value == state) { + name = shost_states[i].name; + break; + } + } + return name; +} + +#ifdef CONFIG_SCSI_DH +static const struct { + unsigned char value; + char *name; +} sdev_access_states[] = { + { SCSI_ACCESS_STATE_OPTIMAL, "active/optimized" }, + { SCSI_ACCESS_STATE_ACTIVE, "active/non-optimized" }, + { SCSI_ACCESS_STATE_STANDBY, "standby" }, + { SCSI_ACCESS_STATE_UNAVAILABLE, "unavailable" }, + { SCSI_ACCESS_STATE_LBA, "lba-dependent" }, + { SCSI_ACCESS_STATE_OFFLINE, "offline" }, + { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, +}; + +static const char *scsi_access_state_name(unsigned char state) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(sdev_access_states); i++) { + if (sdev_access_states[i].value == state) { + name = sdev_access_states[i].name; + break; + } + } + return name; +} +#endif + +static int check_set(unsigned long long *val, char *src) +{ + char *last; + + if (strcmp(src, "-") == 0) { + *val = SCAN_WILD_CARD; + } else { + /* + * Doesn't check for int overflow + */ + *val = simple_strtoull(src, &last, 0); + if (*last != '\0') + return 1; + } + return 0; +} + +static int scsi_scan(struct Scsi_Host *shost, const char *str) +{ + char s1[15], s2[15], s3[17], junk; + unsigned long long channel, id, lun; + int res; + + res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk); + if (res != 3) + return -EINVAL; + if (check_set(&channel, s1)) + return -EINVAL; + if (check_set(&id, s2)) + return -EINVAL; + if (check_set(&lun, s3)) + return -EINVAL; + if (shost->transportt->user_scan) + res = shost->transportt->user_scan(shost, channel, id, lun); + else + res = scsi_scan_host_selected(shost, channel, id, lun, + SCSI_SCAN_MANUAL); + return res; +} + +/* + * shost_show_function: macro to create an attr function that can be used to + * show a non-bit field. + */ +#define shost_show_function(name, field, format_string) \ +static ssize_t \ +show_##name (struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct Scsi_Host *shost = class_to_shost(dev); \ + return snprintf (buf, 20, format_string, shost->field); \ +} + +/* + * shost_rd_attr: macro to create a function and attribute variable for a + * read only field. + */ +#define shost_rd_attr2(name, field, format_string) \ + shost_show_function(name, field, format_string) \ +static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); + +#define shost_rd_attr(field, format_string) \ +shost_rd_attr2(field, field, format_string) + +/* + * Create the actual show/store functions and data structures. + */ + +static ssize_t +store_scan(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + int res; + + res = scsi_scan(shost, buf); + if (res == 0) + res = count; + return res; +}; +static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); + +static ssize_t +store_shost_state(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int i; + struct Scsi_Host *shost = class_to_shost(dev); + enum scsi_host_state state = 0; + + for (i = 0; i < ARRAY_SIZE(shost_states); i++) { + const int len = strlen(shost_states[i].name); + if (strncmp(shost_states[i].name, buf, len) == 0 && + buf[len] == '\n') { + state = shost_states[i].value; + break; + } + } + if (!state) + return -EINVAL; + + if (scsi_host_set_state(shost, state)) + return -EINVAL; + return count; +} + +static ssize_t +show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + const char *name = scsi_host_state_name(shost->shost_state); + + if (!name) + return -EINVAL; + + return snprintf(buf, 20, "%s\n", name); +} + +/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ +static struct device_attribute dev_attr_hstate = + __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); + +static ssize_t +show_shost_mode(unsigned int mode, char *buf) +{ + ssize_t len = 0; + + if (mode & MODE_INITIATOR) + len = sprintf(buf, "%s", "Initiator"); + + if (mode & MODE_TARGET) + len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target"); + + len += sprintf(buf + len, "\n"); + + return len; +} + +static ssize_t +show_shost_supported_mode(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + unsigned int supported_mode = shost->hostt->supported_mode; + + if (supported_mode == MODE_UNKNOWN) + /* by default this should be initiator */ + supported_mode = MODE_INITIATOR; + + return show_shost_mode(supported_mode, buf); +} + +static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); + +static ssize_t +show_shost_active_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + + if (shost->active_mode == MODE_UNKNOWN) + return snprintf(buf, 20, "unknown\n"); + else + return show_shost_mode(shost->active_mode, buf); +} + +static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); + +static int check_reset_type(const char *str) +{ + if (sysfs_streq(str, "adapter")) + return SCSI_ADAPTER_RESET; + else if (sysfs_streq(str, "firmware")) + return SCSI_FIRMWARE_RESET; + else + return 0; +} + +static ssize_t +store_host_reset(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + const struct scsi_host_template *sht = shost->hostt; + int ret = -EINVAL; + int type; + + type = check_reset_type(buf); + if (!type) + goto exit_store_host_reset; + + if (sht->host_reset) + ret = sht->host_reset(shost, type); + else + ret = -EOPNOTSUPP; + +exit_store_host_reset: + if (ret == 0) + ret = count; + return ret; +} + +static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); + +static ssize_t +show_shost_eh_deadline(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + + if (shost->eh_deadline == -1) + return snprintf(buf, strlen("off") + 2, "off\n"); + return sprintf(buf, "%u\n", shost->eh_deadline / HZ); +} + +static ssize_t +store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + int ret = -EINVAL; + unsigned long deadline, flags; + + if (shost->transportt && + (shost->transportt->eh_strategy_handler || + !shost->hostt->eh_host_reset_handler)) + return ret; + + if (!strncmp(buf, "off", strlen("off"))) + deadline = -1; + else { + ret = kstrtoul(buf, 10, &deadline); + if (ret) + return ret; + if (deadline * HZ > UINT_MAX) + return -EINVAL; + } + + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_in_recovery(shost)) + ret = -EBUSY; + else { + if (deadline == -1) + shost->eh_deadline = -1; + else + shost->eh_deadline = deadline * HZ; + + ret = count; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + return ret; +} + +static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); + +shost_rd_attr(unique_id, "%u\n"); +shost_rd_attr(cmd_per_lun, "%hd\n"); +shost_rd_attr(can_queue, "%d\n"); +shost_rd_attr(sg_tablesize, "%hu\n"); +shost_rd_attr(sg_prot_tablesize, "%hu\n"); +shost_rd_attr(prot_capabilities, "%u\n"); +shost_rd_attr(prot_guard_type, "%hd\n"); +shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); + +static ssize_t +show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + return snprintf(buf, 20, "%d\n", scsi_host_busy(shost)); +} +static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); + +static ssize_t +show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} +static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL); + +static ssize_t +show_nr_hw_queues(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct blk_mq_tag_set *tag_set = &shost->tag_set; + + return snprintf(buf, 20, "%d\n", tag_set->nr_hw_queues); +} +static DEVICE_ATTR(nr_hw_queues, S_IRUGO, show_nr_hw_queues, NULL); + +static struct attribute *scsi_sysfs_shost_attrs[] = { + &dev_attr_use_blk_mq.attr, + &dev_attr_unique_id.attr, + &dev_attr_host_busy.attr, + &dev_attr_cmd_per_lun.attr, + &dev_attr_can_queue.attr, + &dev_attr_sg_tablesize.attr, + &dev_attr_sg_prot_tablesize.attr, + &dev_attr_proc_name.attr, + &dev_attr_scan.attr, + &dev_attr_hstate.attr, + &dev_attr_supported_mode.attr, + &dev_attr_active_mode.attr, + &dev_attr_prot_capabilities.attr, + &dev_attr_prot_guard_type.attr, + &dev_attr_host_reset.attr, + &dev_attr_eh_deadline.attr, + &dev_attr_nr_hw_queues.attr, + NULL +}; + +static const struct attribute_group scsi_shost_attr_group = { + .attrs = scsi_sysfs_shost_attrs, +}; + +const struct attribute_group *scsi_shost_groups[] = { + &scsi_shost_attr_group, + NULL +}; + +static void scsi_device_cls_release(struct device *class_dev) +{ + struct scsi_device *sdev; + + sdev = class_to_sdev(class_dev); + put_device(&sdev->sdev_gendev); +} + +static void scsi_device_dev_release(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct device *parent; + struct list_head *this, *tmp; + struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; + struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL; + struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL; + unsigned long flags; + + might_sleep(); + + scsi_dh_release_device(sdev); + + parent = sdev->sdev_gendev.parent; + + spin_lock_irqsave(sdev->host->host_lock, flags); + list_del(&sdev->siblings); + list_del(&sdev->same_target_siblings); + list_del(&sdev->starved_entry); + spin_unlock_irqrestore(sdev->host->host_lock, flags); + + cancel_work_sync(&sdev->event_work); + + list_for_each_safe(this, tmp, &sdev->event_list) { + struct scsi_event *evt; + + evt = list_entry(this, struct scsi_event, node); + list_del(&evt->node); + kfree(evt); + } + + blk_put_queue(sdev->request_queue); + /* NULL queue means the device can't be used */ + sdev->request_queue = NULL; + + sbitmap_free(&sdev->budget_map); + + mutex_lock(&sdev->inquiry_mutex); + vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pgb0 = rcu_replace_pointer(sdev->vpd_pgb0, vpd_pgb0, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pgb1 = rcu_replace_pointer(sdev->vpd_pgb1, vpd_pgb1, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2, + lockdep_is_held(&sdev->inquiry_mutex)); + mutex_unlock(&sdev->inquiry_mutex); + + if (vpd_pg0) + kfree_rcu(vpd_pg0, rcu); + if (vpd_pg83) + kfree_rcu(vpd_pg83, rcu); + if (vpd_pg80) + kfree_rcu(vpd_pg80, rcu); + if (vpd_pg89) + kfree_rcu(vpd_pg89, rcu); + if (vpd_pgb0) + kfree_rcu(vpd_pgb0, rcu); + if (vpd_pgb1) + kfree_rcu(vpd_pgb1, rcu); + if (vpd_pgb2) + kfree_rcu(vpd_pgb2, rcu); + kfree(sdev->inquiry); + kfree(sdev); + + if (parent) + put_device(parent); +} + +static struct class sdev_class = { + .name = "scsi_device", + .dev_release = scsi_device_cls_release, +}; + +/* all probing is done in the individual ->probe routines */ +static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) +{ + struct scsi_device *sdp; + + if (dev->type != &scsi_dev_type) + return 0; + + sdp = to_scsi_device(dev); + if (sdp->no_uld_attach) + return 0; + return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; +} + +static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct scsi_device *sdev; + + if (dev->type != &scsi_dev_type) + return 0; + + sdev = to_scsi_device(dev); + + add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type); + return 0; +} + +struct bus_type scsi_bus_type = { + .name = "scsi", + .match = scsi_bus_match, + .uevent = scsi_bus_uevent, +#ifdef CONFIG_PM + .pm = &scsi_bus_pm_ops, +#endif +}; + +int scsi_sysfs_register(void) +{ + int error; + + error = bus_register(&scsi_bus_type); + if (!error) { + error = class_register(&sdev_class); + if (error) + bus_unregister(&scsi_bus_type); + } + + return error; +} + +void scsi_sysfs_unregister(void) +{ + class_unregister(&sdev_class); + bus_unregister(&scsi_bus_type); +} + +/* + * sdev_show_function: macro to create an attr function that can be used to + * show a non-bit field. + */ +#define sdev_show_function(field, format_string) \ +static ssize_t \ +sdev_show_##field (struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct scsi_device *sdev; \ + sdev = to_scsi_device(dev); \ + return snprintf (buf, 20, format_string, sdev->field); \ +} \ + +/* + * sdev_rd_attr: macro to create a function and attribute variable for a + * read only field. + */ +#define sdev_rd_attr(field, format_string) \ + sdev_show_function(field, format_string) \ +static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); + + +/* + * sdev_rw_attr: create a function and attribute variable for a + * read/write field. + */ +#define sdev_rw_attr(field, format_string) \ + sdev_show_function(field, format_string) \ + \ +static ssize_t \ +sdev_store_##field (struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct scsi_device *sdev; \ + sdev = to_scsi_device(dev); \ + sscanf (buf, format_string, &sdev->field); \ + return count; \ +} \ +static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); + +/* Currently we don't export bit fields, but we might in future, + * so leave this code in */ +#if 0 +/* + * sdev_rd_attr: create a function and attribute variable for a + * read/write bit field. + */ +#define sdev_rw_attr_bit(field) \ + sdev_show_function(field, "%d\n") \ + \ +static ssize_t \ +sdev_store_##field (struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int ret; \ + struct scsi_device *sdev; \ + ret = scsi_sdev_check_buf_bit(buf); \ + if (ret >= 0) { \ + sdev = to_scsi_device(dev); \ + sdev->field = ret; \ + ret = count; \ + } \ + return ret; \ +} \ +static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); + +/* + * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1", + * else return -EINVAL. + */ +static int scsi_sdev_check_buf_bit(const char *buf) +{ + if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { + if (buf[0] == '1') + return 1; + else if (buf[0] == '0') + return 0; + else + return -EINVAL; + } else + return -EINVAL; +} +#endif +/* + * Create the actual show/store functions and data structures. + */ +sdev_rd_attr (type, "%d\n"); +sdev_rd_attr (scsi_level, "%d\n"); +sdev_rd_attr (vendor, "%.8s\n"); +sdev_rd_attr (model, "%.16s\n"); +sdev_rd_attr (rev, "%.4s\n"); +sdev_rd_attr (cdl_supported, "%d\n"); + +static ssize_t +sdev_show_device_busy(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + return snprintf(buf, 20, "%d\n", scsi_device_busy(sdev)); +} +static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); + +static ssize_t +sdev_show_device_blocked(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked)); +} +static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL); + +/* + * TODO: can we make these symlinks to the block layer ones? + */ +static ssize_t +sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + sdev = to_scsi_device(dev); + return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); +} + +static ssize_t +sdev_store_timeout (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev; + int timeout; + sdev = to_scsi_device(dev); + sscanf (buf, "%d\n", &timeout); + blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); + return count; +} +static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); + +static ssize_t +sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + sdev = to_scsi_device(dev); + return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); +} + +static ssize_t +sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev; + unsigned int eh_timeout; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + sdev = to_scsi_device(dev); + err = kstrtouint(buf, 10, &eh_timeout); + if (err) + return err; + sdev->eh_timeout = eh_timeout * HZ; + + return count; +} +static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); + +static ssize_t +store_rescan_field (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + scsi_rescan_device(to_scsi_device(dev)); + return count; +} +static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); + +static ssize_t +sdev_store_delete(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kernfs_node *kn; + struct scsi_device *sdev = to_scsi_device(dev); + + /* + * We need to try to get module, avoiding the module been removed + * during delete. + */ + if (scsi_device_get(sdev)) + return -ENODEV; + + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); + WARN_ON_ONCE(!kn); + /* + * Concurrent writes into the "delete" sysfs attribute may trigger + * concurrent calls to device_remove_file() and scsi_remove_device(). + * device_remove_file() handles concurrent removal calls by + * serializing these and by ignoring the second and later removal + * attempts. Concurrent calls of scsi_remove_device() are + * serialized. The second and later calls of scsi_remove_device() are + * ignored because the first call of that function changes the device + * state into SDEV_DEL. + */ + device_remove_file(dev, attr); + scsi_remove_device(sdev); + if (kn) + sysfs_unbreak_active_protection(kn); + scsi_device_put(sdev); + return count; +}; +static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); + +static ssize_t +store_state_field(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int i, ret; + struct scsi_device *sdev = to_scsi_device(dev); + enum scsi_device_state state = 0; + bool rescan_dev = false; + + for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { + const int len = strlen(sdev_states[i].name); + if (strncmp(sdev_states[i].name, buf, len) == 0 && + buf[len] == '\n') { + state = sdev_states[i].value; + break; + } + } + switch (state) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + break; + default: + return -EINVAL; + } + + mutex_lock(&sdev->state_mutex); + switch (sdev->sdev_state) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + break; + default: + mutex_unlock(&sdev->state_mutex); + return -EINVAL; + } + if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { + ret = 0; + } else { + ret = scsi_device_set_state(sdev, state); + if (ret == 0 && state == SDEV_RUNNING) + rescan_dev = true; + } + mutex_unlock(&sdev->state_mutex); + + if (rescan_dev) { + /* + * If the device state changes to SDEV_RUNNING, we need to + * run the queue to avoid I/O hang, and rescan the device + * to revalidate it. Running the queue first is necessary + * because another thread may be waiting inside + * blk_mq_freeze_queue_wait() and because that call may be + * waiting for pending I/O to finish. + */ + blk_mq_run_hw_queues(sdev->request_queue, true); + scsi_rescan_device(sdev); + } + + return ret == 0 ? count : -EINVAL; +} + +static ssize_t +show_state_field(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + const char *name = scsi_device_state_name(sdev->sdev_state); + + if (!name) + return -EINVAL; + + return snprintf(buf, 20, "%s\n", name); +} + +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field); + +static ssize_t +show_queue_type_field(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + const char *name = "none"; + + if (sdev->simple_tags) + name = "simple"; + + return snprintf(buf, 20, "%s\n", name); +} + +static ssize_t +store_queue_type_field(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (!sdev->tagged_supported) + return -EINVAL; + + sdev_printk(KERN_INFO, sdev, + "ignoring write to deprecated queue_type attribute"); + return count; +} + +static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, + store_queue_type_field); + +#define sdev_vpd_pg_attr(_page) \ +static ssize_t \ +show_vpd_##_page(struct file *filp, struct kobject *kobj, \ + struct bin_attribute *bin_attr, \ + char *buf, loff_t off, size_t count) \ +{ \ + struct device *dev = kobj_to_dev(kobj); \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct scsi_vpd *vpd_page; \ + int ret = -EINVAL; \ + \ + rcu_read_lock(); \ + vpd_page = rcu_dereference(sdev->vpd_##_page); \ + if (vpd_page) \ + ret = memory_read_from_buffer(buf, count, &off, \ + vpd_page->data, vpd_page->len); \ + rcu_read_unlock(); \ + return ret; \ +} \ +static struct bin_attribute dev_attr_vpd_##_page = { \ + .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ + .size = 0, \ + .read = show_vpd_##_page, \ +}; + +sdev_vpd_pg_attr(pg83); +sdev_vpd_pg_attr(pg80); +sdev_vpd_pg_attr(pg89); +sdev_vpd_pg_attr(pgb0); +sdev_vpd_pg_attr(pgb1); +sdev_vpd_pg_attr(pgb2); +sdev_vpd_pg_attr(pg0); + +static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct scsi_device *sdev = to_scsi_device(dev); + + if (!sdev->inquiry) + return -EINVAL; + + return memory_read_from_buffer(buf, count, &off, sdev->inquiry, + sdev->inquiry_len); +} + +static struct bin_attribute dev_attr_inquiry = { + .attr = { + .name = "inquiry", + .mode = S_IRUGO, + }, + .size = 0, + .read = show_inquiry, +}; + +static ssize_t +show_iostat_counterbits(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); +} + +static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL); + +#define show_sdev_iostat(field) \ +static ssize_t \ +show_iostat_##field(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + unsigned long long count = atomic_read(&sdev->field); \ + return snprintf(buf, 20, "0x%llx\n", count); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) + +show_sdev_iostat(iorequest_cnt); +show_sdev_iostat(iodone_cnt); +show_sdev_iostat(ioerr_cnt); +show_sdev_iostat(iotmo_cnt); + +static ssize_t +sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + sdev = to_scsi_device(dev); + return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type); +} +static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); + +#define DECLARE_EVT_SHOW(name, Cap_name) \ +static ssize_t \ +sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ + return snprintf(buf, 20, "%d\n", val); \ +} + +#define DECLARE_EVT_STORE(name, Cap_name) \ +static ssize_t \ +sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ + const char *buf, size_t count) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + int val = simple_strtoul(buf, NULL, 0); \ + if (val == 0) \ + clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ + else if (val == 1) \ + set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ + else \ + return -EINVAL; \ + return count; \ +} + +#define DECLARE_EVT(name, Cap_name) \ + DECLARE_EVT_SHOW(name, Cap_name) \ + DECLARE_EVT_STORE(name, Cap_name) \ + static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ + sdev_store_evt_##name); +#define REF_EVT(name) &dev_attr_evt_##name.attr + +DECLARE_EVT(media_change, MEDIA_CHANGE) +DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) +DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) +DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) +DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) +DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) + +static ssize_t +sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int depth, retval; + struct scsi_device *sdev = to_scsi_device(dev); + const struct scsi_host_template *sht = sdev->host->hostt; + + if (!sht->change_queue_depth) + return -EINVAL; + + depth = simple_strtoul(buf, NULL, 0); + + if (depth < 1 || depth > sdev->host->can_queue) + return -EINVAL; + + retval = sht->change_queue_depth(sdev, depth); + if (retval < 0) + return retval; + + sdev->max_queue_depth = sdev->queue_depth; + + return count; +} +sdev_show_function(queue_depth, "%d\n"); + +static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, + sdev_store_queue_depth); + +static ssize_t +sdev_show_wwid(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + ssize_t count; + + count = scsi_vpd_lun_id(sdev, buf, PAGE_SIZE); + if (count > 0) { + buf[count] = '\n'; + count++; + } + return count; +} +static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); + +#define BLIST_FLAG_NAME(name) \ + [const_ilog2((__force __u64)BLIST_##name)] = #name +static const char *const sdev_bflags_name[] = { +#include "scsi_devinfo_tbl.c" +}; +#undef BLIST_FLAG_NAME + +static ssize_t +sdev_show_blacklist(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + int i; + ssize_t len = 0; + + for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) { + const char *name = NULL; + + if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i))) + continue; + if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i]) + name = sdev_bflags_name[i]; + + if (name) + len += scnprintf(buf + len, PAGE_SIZE - len, + "%s%s", len ? " " : "", name); + else + len += scnprintf(buf + len, PAGE_SIZE - len, + "%sINVALID_BIT(%d)", len ? " " : "", i); + } + if (len) + len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); + return len; +} +static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL); + +#ifdef CONFIG_SCSI_DH +static ssize_t +sdev_show_dh_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (!sdev->handler) + return snprintf(buf, 20, "detached\n"); + + return snprintf(buf, 20, "%s\n", sdev->handler->name); +} + +static ssize_t +sdev_store_dh_state(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + int err = -EINVAL; + + if (sdev->sdev_state == SDEV_CANCEL || + sdev->sdev_state == SDEV_DEL) + return -ENODEV; + + if (!sdev->handler) { + /* + * Attach to a device handler + */ + err = scsi_dh_attach(sdev->request_queue, buf); + } else if (!strncmp(buf, "activate", 8)) { + /* + * Activate a device handler + */ + if (sdev->handler->activate) + err = sdev->handler->activate(sdev, NULL, NULL); + else + err = 0; + } else if (!strncmp(buf, "detach", 6)) { + /* + * Detach from a device handler + */ + sdev_printk(KERN_WARNING, sdev, + "can't detach handler %s.\n", + sdev->handler->name); + err = -EINVAL; + } + + return err < 0 ? err : count; +} + +static DEVICE_ATTR(dh_state, S_IRUGO | S_IWUSR, sdev_show_dh_state, + sdev_store_dh_state); + +static ssize_t +sdev_show_access_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + unsigned char access_state; + const char *access_state_name; + + if (!sdev->handler) + return -EINVAL; + + access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK); + access_state_name = scsi_access_state_name(access_state); + + return sprintf(buf, "%s\n", + access_state_name ? access_state_name : "unknown"); +} +static DEVICE_ATTR(access_state, S_IRUGO, sdev_show_access_state, NULL); + +static ssize_t +sdev_show_preferred_path(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (!sdev->handler) + return -EINVAL; + + if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED) + return sprintf(buf, "1\n"); + else + return sprintf(buf, "0\n"); +} +static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL); +#endif + +static ssize_t +sdev_show_queue_ramp_up_period(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev; + sdev = to_scsi_device(dev); + return snprintf(buf, 20, "%u\n", + jiffies_to_msecs(sdev->queue_ramp_up_period)); +} + +static ssize_t +sdev_store_queue_ramp_up_period(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + unsigned int period; + + if (kstrtouint(buf, 10, &period)) + return -EINVAL; + + sdev->queue_ramp_up_period = msecs_to_jiffies(period); + return count; +} + +static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, + sdev_show_queue_ramp_up_period, + sdev_store_queue_ramp_up_period); + +static ssize_t sdev_show_cdl_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", (int)sdev->cdl_enable); +} + +static ssize_t sdev_store_cdl_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + bool v; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + ret = scsi_cdl_enable(to_scsi_device(dev), v); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR(cdl_enable, S_IRUGO | S_IWUSR, + sdev_show_cdl_enable, sdev_store_cdl_enable); + +static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct scsi_device *sdev = to_scsi_device(dev); + + + if (attr == &dev_attr_queue_depth.attr && + !sdev->host->hostt->change_queue_depth) + return S_IRUGO; + + if (attr == &dev_attr_queue_ramp_up_period.attr && + !sdev->host->hostt->change_queue_depth) + return 0; + + return attr->mode; +} + +static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, + struct bin_attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct scsi_device *sdev = to_scsi_device(dev); + + + if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0) + return 0; + + if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) + return 0; + + if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83) + return 0; + + if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89) + return 0; + + if (attr == &dev_attr_vpd_pgb0 && !sdev->vpd_pgb0) + return 0; + + if (attr == &dev_attr_vpd_pgb1 && !sdev->vpd_pgb1) + return 0; + + if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2) + return 0; + + return S_IRUGO; +} + +/* Default template for device attributes. May NOT be modified */ +static struct attribute *scsi_sdev_attrs[] = { + &dev_attr_device_blocked.attr, + &dev_attr_type.attr, + &dev_attr_scsi_level.attr, + &dev_attr_device_busy.attr, + &dev_attr_vendor.attr, + &dev_attr_model.attr, + &dev_attr_rev.attr, + &dev_attr_rescan.attr, + &dev_attr_delete.attr, + &dev_attr_state.attr, + &dev_attr_timeout.attr, + &dev_attr_eh_timeout.attr, + &dev_attr_iocounterbits.attr, + &dev_attr_iorequest_cnt.attr, + &dev_attr_iodone_cnt.attr, + &dev_attr_ioerr_cnt.attr, + &dev_attr_iotmo_cnt.attr, + &dev_attr_modalias.attr, + &dev_attr_queue_depth.attr, + &dev_attr_queue_type.attr, + &dev_attr_wwid.attr, + &dev_attr_blacklist.attr, +#ifdef CONFIG_SCSI_DH + &dev_attr_dh_state.attr, + &dev_attr_access_state.attr, + &dev_attr_preferred_path.attr, +#endif + &dev_attr_queue_ramp_up_period.attr, + &dev_attr_cdl_supported.attr, + &dev_attr_cdl_enable.attr, + REF_EVT(media_change), + REF_EVT(inquiry_change_reported), + REF_EVT(capacity_change_reported), + REF_EVT(soft_threshold_reached), + REF_EVT(mode_parameter_change_reported), + REF_EVT(lun_change_reported), + NULL +}; + +static struct bin_attribute *scsi_sdev_bin_attrs[] = { + &dev_attr_vpd_pg0, + &dev_attr_vpd_pg83, + &dev_attr_vpd_pg80, + &dev_attr_vpd_pg89, + &dev_attr_vpd_pgb0, + &dev_attr_vpd_pgb1, + &dev_attr_vpd_pgb2, + &dev_attr_inquiry, + NULL +}; +static struct attribute_group scsi_sdev_attr_group = { + .attrs = scsi_sdev_attrs, + .bin_attrs = scsi_sdev_bin_attrs, + .is_visible = scsi_sdev_attr_is_visible, + .is_bin_visible = scsi_sdev_bin_attr_is_visible, +}; + +static const struct attribute_group *scsi_sdev_attr_groups[] = { + &scsi_sdev_attr_group, + NULL +}; + +static int scsi_target_add(struct scsi_target *starget) +{ + int error; + + if (starget->state != STARGET_CREATED) + return 0; + + error = device_add(&starget->dev); + if (error) { + dev_err(&starget->dev, "target device_add failed, error %d\n", error); + return error; + } + transport_add_device(&starget->dev); + starget->state = STARGET_RUNNING; + + pm_runtime_set_active(&starget->dev); + pm_runtime_enable(&starget->dev); + device_enable_async_suspend(&starget->dev); + + return 0; +} + +/** + * scsi_sysfs_add_sdev - add scsi device to sysfs + * @sdev: scsi_device to add + * + * Return value: + * 0 on Success / non-zero on Failure + **/ +int scsi_sysfs_add_sdev(struct scsi_device *sdev) +{ + int error; + struct scsi_target *starget = sdev->sdev_target; + + error = scsi_target_add(starget); + if (error) + return error; + + transport_configure_device(&starget->dev); + + device_enable_async_suspend(&sdev->sdev_gendev); + scsi_autopm_get_target(starget); + pm_runtime_set_active(&sdev->sdev_gendev); + if (!sdev->rpm_autosuspend) + pm_runtime_forbid(&sdev->sdev_gendev); + pm_runtime_enable(&sdev->sdev_gendev); + scsi_autopm_put_target(starget); + + scsi_autopm_get_device(sdev); + + scsi_dh_add_device(sdev); + + error = device_add(&sdev->sdev_gendev); + if (error) { + sdev_printk(KERN_INFO, sdev, + "failed to add device: %d\n", error); + return error; + } + + device_enable_async_suspend(&sdev->sdev_dev); + error = device_add(&sdev->sdev_dev); + if (error) { + sdev_printk(KERN_INFO, sdev, + "failed to add class device: %d\n", error); + device_del(&sdev->sdev_gendev); + return error; + } + transport_add_device(&sdev->sdev_gendev); + sdev->is_visible = 1; + + if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) { + sdev->bsg_dev = scsi_bsg_register_queue(sdev); + if (IS_ERR(sdev->bsg_dev)) { + error = PTR_ERR(sdev->bsg_dev); + sdev_printk(KERN_INFO, sdev, + "Failed to register bsg queue, errno=%d\n", + error); + sdev->bsg_dev = NULL; + } + } + + scsi_autopm_put_device(sdev); + return error; +} + +void __scsi_remove_device(struct scsi_device *sdev) +{ + struct device *dev = &sdev->sdev_gendev; + int res; + + /* + * This cleanup path is not reentrant and while it is impossible + * to get a new reference with scsi_device_get() someone can still + * hold a previously acquired one. + */ + if (sdev->sdev_state == SDEV_DEL) + return; + + if (sdev->is_visible) { + /* + * If scsi_internal_target_block() is running concurrently, + * wait until it has finished before changing the device state. + */ + mutex_lock(&sdev->state_mutex); + /* + * If blocked, we go straight to DEL and restart the queue so + * any commands issued during driver shutdown (like sync + * cache) are errored immediately. + */ + res = scsi_device_set_state(sdev, SDEV_CANCEL); + if (res != 0) { + res = scsi_device_set_state(sdev, SDEV_DEL); + if (res == 0) + scsi_start_queue(sdev); + } + mutex_unlock(&sdev->state_mutex); + + if (res != 0) + return; + + if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev) + bsg_unregister_queue(sdev->bsg_dev); + device_unregister(&sdev->sdev_dev); + transport_remove_device(dev); + device_del(dev); + } else + put_device(&sdev->sdev_dev); + + /* + * Stop accepting new requests and wait until all queuecommand() and + * scsi_run_queue() invocations have finished before tearing down the + * device. + */ + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_DEL); + mutex_unlock(&sdev->state_mutex); + + blk_mq_destroy_queue(sdev->request_queue); + kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); + cancel_work_sync(&sdev->requeue_work); + + if (sdev->host->hostt->slave_destroy) + sdev->host->hostt->slave_destroy(sdev); + transport_destroy_device(dev); + + /* + * Paired with the kref_get() in scsi_sysfs_initialize(). We have + * removed sysfs visibility from the device, so make the target + * invisible if this was the last device underneath it. + */ + scsi_target_reap(scsi_target(sdev)); + + put_device(dev); +} + +/** + * scsi_remove_device - unregister a device from the scsi bus + * @sdev: scsi_device to unregister + **/ +void scsi_remove_device(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + + mutex_lock(&shost->scan_mutex); + __scsi_remove_device(sdev); + mutex_unlock(&shost->scan_mutex); +} +EXPORT_SYMBOL(scsi_remove_device); + +static void __scsi_remove_target(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + unsigned long flags; + struct scsi_device *sdev; + + spin_lock_irqsave(shost->host_lock, flags); + restart: + list_for_each_entry(sdev, &shost->__devices, siblings) { + /* + * We cannot call scsi_device_get() here, as + * we might've been called from rmmod() causing + * scsi_device_get() to fail the module_is_live() + * check. + */ + if (sdev->channel != starget->channel || + sdev->id != starget->id) + continue; + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL || + !get_device(&sdev->sdev_gendev)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + scsi_remove_device(sdev); + put_device(&sdev->sdev_gendev); + spin_lock_irqsave(shost->host_lock, flags); + goto restart; + } + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * scsi_remove_target - try to remove a target and all its devices + * @dev: generic starget or parent of generic stargets to be removed + * + * Note: This is slightly racy. It is possible that if the user + * requests the addition of another device then the target won't be + * removed. + */ +void scsi_remove_target(struct device *dev) +{ + struct Scsi_Host *shost = dev_to_shost(dev->parent); + struct scsi_target *starget; + unsigned long flags; + +restart: + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(starget, &shost->__targets, siblings) { + if (starget->state == STARGET_DEL || + starget->state == STARGET_REMOVE || + starget->state == STARGET_CREATED_REMOVE) + continue; + if (starget->dev.parent == dev || &starget->dev == dev) { + kref_get(&starget->reap_ref); + if (starget->state == STARGET_CREATED) + starget->state = STARGET_CREATED_REMOVE; + else + starget->state = STARGET_REMOVE; + spin_unlock_irqrestore(shost->host_lock, flags); + __scsi_remove_target(starget); + scsi_target_reap(starget); + goto restart; + } + } + spin_unlock_irqrestore(shost->host_lock, flags); +} +EXPORT_SYMBOL(scsi_remove_target); + +int scsi_register_driver(struct device_driver *drv) +{ + drv->bus = &scsi_bus_type; + + return driver_register(drv); +} +EXPORT_SYMBOL(scsi_register_driver); + +int scsi_register_interface(struct class_interface *intf) +{ + intf->class = &sdev_class; + + return class_interface_register(intf); +} +EXPORT_SYMBOL(scsi_register_interface); + +/** + * scsi_sysfs_add_host - add scsi host to subsystem + * @shost: scsi host struct to add to subsystem + **/ +int scsi_sysfs_add_host(struct Scsi_Host *shost) +{ + transport_register_device(&shost->shost_gendev); + transport_configure_device(&shost->shost_gendev); + return 0; +} + +static struct device_type scsi_dev_type = { + .name = "scsi_device", + .release = scsi_device_dev_release, + .groups = scsi_sdev_attr_groups, +}; + +void scsi_sysfs_device_initialize(struct scsi_device *sdev) +{ + unsigned long flags; + struct Scsi_Host *shost = sdev->host; + const struct scsi_host_template *hostt = shost->hostt; + struct scsi_target *starget = sdev->sdev_target; + + device_initialize(&sdev->sdev_gendev); + sdev->sdev_gendev.bus = &scsi_bus_type; + sdev->sdev_gendev.type = &scsi_dev_type; + scsi_enable_async_suspend(&sdev->sdev_gendev); + dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", + sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); + sdev->sdev_gendev.groups = hostt->sdev_groups; + + device_initialize(&sdev->sdev_dev); + sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); + sdev->sdev_dev.class = &sdev_class; + dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", + sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); + /* + * Get a default scsi_level from the target (derived from sibling + * devices). This is the best we can do for guessing how to set + * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the + * setting doesn't matter, because all the bits are zero anyway. + * But it does matter for higher LUNs. + */ + sdev->scsi_level = starget->scsi_level; + if (sdev->scsi_level <= SCSI_2 && + sdev->scsi_level != SCSI_UNKNOWN && + !shost->no_scsi2_lun_in_cdb) + sdev->lun_in_cdb = 1; + + transport_setup_device(&sdev->sdev_gendev); + spin_lock_irqsave(shost->host_lock, flags); + list_add_tail(&sdev->same_target_siblings, &starget->devices); + list_add_tail(&sdev->siblings, &shost->__devices); + spin_unlock_irqrestore(shost->host_lock, flags); + /* + * device can now only be removed via __scsi_remove_device() so hold + * the target. Target will be held in CREATED state until something + * beneath it becomes visible (in which case it moves to RUNNING) + */ + kref_get(&starget->reap_ref); +} + +int scsi_is_sdev_device(const struct device *dev) +{ + return dev->type == &scsi_dev_type; +} +EXPORT_SYMBOL(scsi_is_sdev_device); + +/* A blank transport template that is used in drivers that don't + * yet implement Transport Attributes */ +struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, }; diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c new file mode 100644 index 000000000..41a950075 --- /dev/null +++ b/drivers/scsi/scsi_trace.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2010 FUJITSU LIMITED + * Copyright (C) 2010 Tomohiro Kusumi + */ +#include +#include +#include +#include + +#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) +#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8])) + +static const char * +scsi_trace_misc(struct trace_seq *, unsigned char *, int); + +static const char * +scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + u32 lba, txlen; + + lba = get_unaligned_be24(&cdb[1]) & 0x1fffff; + /* + * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be read (READ(6)) or written (WRITE(6)). + */ + txlen = cdb[4] ? cdb[4] : 256; + + trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen); + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + u32 lba, txlen; + + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be16(&cdb[7]); + + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, + cdb[1] >> 5); + + if (cdb[0] == WRITE_SAME) + trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); + + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + u32 lba, txlen; + + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be32(&cdb[6]); + + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, + cdb[1] >> 5); + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 lba; + u32 txlen; + + lba = get_unaligned_be64(&cdb[2]); + txlen = get_unaligned_be32(&cdb[10]); + + trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen, + cdb[1] >> 5); + + if (cdb[0] == WRITE_SAME_16) + trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); + + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u64 lba; + u32 ei_lbrt, txlen; + + switch (SERVICE_ACTION32(cdb)) { + case READ_32: + cmd = "READ"; + break; + case VERIFY_32: + cmd = "VERIFY"; + break; + case WRITE_32: + cmd = "WRITE"; + break; + case WRITE_SAME_32: + cmd = "WRITE_SAME"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + lba = get_unaligned_be64(&cdb[12]); + ei_lbrt = get_unaligned_be32(&cdb[20]); + txlen = get_unaligned_be32(&cdb[28]); + + trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u", + cmd, lba, txlen, cdb[10] >> 5, ei_lbrt); + + if (SERVICE_ACTION32(cdb) == WRITE_SAME_32) + trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + unsigned int regions = get_unaligned_be16(&cdb[7]); + + trace_seq_printf(p, "regions=%u", (regions - 8) / 16); + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u64 lba; + u32 alloc_len; + + switch (SERVICE_ACTION16(cdb)) { + case SAI_READ_CAPACITY_16: + cmd = "READ_CAPACITY_16"; + break; + case SAI_GET_LBA_STATUS: + cmd = "GET_LBA_STATUS"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + lba = get_unaligned_be64(&cdb[2]); + alloc_len = get_unaligned_be32(&cdb[10]); + + trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u32 alloc_len; + + switch (SERVICE_ACTION16(cdb)) { + case MI_REPORT_IDENTIFYING_INFORMATION: + cmd = "REPORT_IDENTIFYING_INFORMATION"; + break; + case MI_REPORT_TARGET_PGS: + cmd = "REPORT_TARGET_PORT_GROUPS"; + break; + case MI_REPORT_ALIASES: + cmd = "REPORT_ALIASES"; + break; + case MI_REPORT_SUPPORTED_OPERATION_CODES: + cmd = "REPORT_SUPPORTED_OPERATION_CODES"; + break; + case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: + cmd = "REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS"; + break; + case MI_REPORT_PRIORITY: + cmd = "REPORT_PRIORITY"; + break; + case MI_REPORT_TIMESTAMP: + cmd = "REPORT_TIMESTAMP"; + break; + case MI_MANAGEMENT_PROTOCOL_IN: + cmd = "MANAGEMENT_PROTOCOL_IN"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + alloc_len = get_unaligned_be32(&cdb[6]); + + trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u32 alloc_len; + + switch (SERVICE_ACTION16(cdb)) { + case MO_SET_IDENTIFYING_INFORMATION: + cmd = "SET_IDENTIFYING_INFORMATION"; + break; + case MO_SET_TARGET_PGS: + cmd = "SET_TARGET_PORT_GROUPS"; + break; + case MO_CHANGE_ALIASES: + cmd = "CHANGE_ALIASES"; + break; + case MO_SET_PRIORITY: + cmd = "SET_PRIORITY"; + break; + case MO_SET_TIMESTAMP: + cmd = "SET_TIMESTAMP"; + break; + case MO_MANAGEMENT_PROTOCOL_OUT: + cmd = "MANAGEMENT_PROTOCOL_OUT"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + alloc_len = get_unaligned_be32(&cdb[6]); + + trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u64 zone_id; + u32 alloc_len; + u8 options; + + switch (SERVICE_ACTION16(cdb)) { + case ZI_REPORT_ZONES: + cmd = "REPORT_ZONES"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + zone_id = get_unaligned_be64(&cdb[2]); + alloc_len = get_unaligned_be32(&cdb[10]); + options = cdb[14] & 0x3f; + + trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u", + cmd, (unsigned long long)zone_id, alloc_len, + options, (cdb[14] >> 7) & 1); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u64 zone_id; + + switch (SERVICE_ACTION16(cdb)) { + case ZO_CLOSE_ZONE: + cmd = "CLOSE_ZONE"; + break; + case ZO_FINISH_ZONE: + cmd = "FINISH_ZONE"; + break; + case ZO_OPEN_ZONE: + cmd = "OPEN_ZONE"; + break; + case ZO_RESET_WRITE_POINTER: + cmd = "RESET_WRITE_POINTER"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + zone_id = get_unaligned_be64(&cdb[2]); + + trace_seq_printf(p, "%s zone=%llu all=%u", cmd, + (unsigned long long)zone_id, cdb[14] & 1); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) +{ + switch (SERVICE_ACTION32(cdb)) { + case READ_32: + case VERIFY_32: + case WRITE_32: + case WRITE_SAME_32: + return scsi_trace_rw32(p, cdb, len); + default: + return scsi_trace_misc(p, cdb, len); + } +} + +static const char * +scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_putc(p, '-'); + trace_seq_putc(p, 0); + + return ret; +} + +const char * +scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) +{ + switch (cdb[0]) { + case READ_6: + case WRITE_6: + return scsi_trace_rw6(p, cdb, len); + case READ_10: + case VERIFY: + case WRITE_10: + case WRITE_SAME: + return scsi_trace_rw10(p, cdb, len); + case READ_12: + case VERIFY_12: + case WRITE_12: + return scsi_trace_rw12(p, cdb, len); + case READ_16: + case VERIFY_16: + case WRITE_16: + case WRITE_SAME_16: + return scsi_trace_rw16(p, cdb, len); + case UNMAP: + return scsi_trace_unmap(p, cdb, len); + case SERVICE_ACTION_IN_16: + return scsi_trace_service_action_in(p, cdb, len); + case VARIABLE_LENGTH_CMD: + return scsi_trace_varlen(p, cdb, len); + case MAINTENANCE_IN: + return scsi_trace_maintenance_in(p, cdb, len); + case MAINTENANCE_OUT: + return scsi_trace_maintenance_out(p, cdb, len); + case ZBC_IN: + return scsi_trace_zbc_in(p, cdb, len); + case ZBC_OUT: + return scsi_trace_zbc_out(p, cdb, len); + default: + return scsi_trace_misc(p, cdb, len); + } +} diff --git a/drivers/scsi/scsi_transport_api.h b/drivers/scsi/scsi_transport_api.h new file mode 100644 index 000000000..f91776653 --- /dev/null +++ b/drivers/scsi/scsi_transport_api.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SCSI_TRANSPORT_API_H +#define _SCSI_TRANSPORT_API_H + +void scsi_schedule_eh(struct Scsi_Host *shost); + +#endif /* _SCSI_TRANSPORT_API_H */ diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c new file mode 100644 index 000000000..b04075f19 --- /dev/null +++ b/drivers/scsi/scsi_transport_fc.c @@ -0,0 +1,4356 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * FiberChannel transport specific attributes exported to sysfs. + * + * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2004-2007 James Smart, Emulex Corporation + * Rewrite for host, target, device, and remote port attributes, + * statistics, and service functions... + * Add vports, etc + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi_priv.h" + +static int fc_queue_work(struct Scsi_Host *, struct work_struct *); +static void fc_vport_sched_delete(struct work_struct *work); +static int fc_vport_setup(struct Scsi_Host *shost, int channel, + struct device *pdev, struct fc_vport_identifiers *ids, + struct fc_vport **vport); +static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *); +static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *); +static void fc_bsg_remove(struct request_queue *); +static void fc_bsg_goose_queue(struct fc_rport *); +static void fc_li_stats_update(u16 event_type, + struct fc_fpin_stats *stats); +static void fc_delivery_stats_update(u32 reason_code, + struct fc_fpin_stats *stats); +static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats); + +/* + * Module Parameters + */ + +/* + * dev_loss_tmo: the default number of seconds that the FC transport + * should insulate the loss of a remote port. + * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. + */ +static unsigned int fc_dev_loss_tmo = 60; /* seconds */ + +module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(dev_loss_tmo, + "Maximum number of seconds that the FC transport should" + " insulate the loss of a remote port. Once this value is" + " exceeded, the scsi target is removed. Value should be" + " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if" + " fast_io_fail_tmo is not set."); + +/* + * Redefine so that we can have same named attributes in the + * sdev/starget/host objects. + */ +#define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ +struct device_attribute device_attr_##_prefix##_##_name = \ + __ATTR(_name,_mode,_show,_store) + +#define fc_enum_name_search(title, table_type, table) \ +static const char *get_fc_##title##_name(enum table_type table_key) \ +{ \ + int i; \ + char *name = NULL; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value == table_key) { \ + name = table[i].name; \ + break; \ + } \ + } \ + return name; \ +} + +#define fc_enum_name_match(title, table_type, table) \ +static int get_fc_##title##_match(const char *table_key, \ + enum table_type *value) \ +{ \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (strncmp(table_key, table[i].name, \ + table[i].matchlen) == 0) { \ + *value = table[i].value; \ + return 0; /* success */ \ + } \ + } \ + return 1; /* failure */ \ +} + + +/* Convert fc_port_type values to ascii string name */ +static struct { + enum fc_port_type value; + char *name; +} fc_port_type_names[] = { + { FC_PORTTYPE_UNKNOWN, "Unknown" }, + { FC_PORTTYPE_OTHER, "Other" }, + { FC_PORTTYPE_NOTPRESENT, "Not Present" }, + { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" }, + { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, + { FC_PORTTYPE_LPORT, "LPort (private loop)" }, + { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" }, + { FC_PORTTYPE_NPIV, "NPIV VPORT" }, +}; +fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) +#define FC_PORTTYPE_MAX_NAMELEN 50 + +/* Reuse fc_port_type enum function for vport_type */ +#define get_fc_vport_type_name get_fc_port_type_name + + +/* Convert fc_host_event_code values to ascii string name */ +static const struct { + enum fc_host_event_code value; + char *name; +} fc_host_event_code_names[] = { + { FCH_EVT_LIP, "lip" }, + { FCH_EVT_LINKUP, "link_up" }, + { FCH_EVT_LINKDOWN, "link_down" }, + { FCH_EVT_LIPRESET, "lip_reset" }, + { FCH_EVT_RSCN, "rscn" }, + { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" }, + { FCH_EVT_PORT_UNKNOWN, "port_unknown" }, + { FCH_EVT_PORT_ONLINE, "port_online" }, + { FCH_EVT_PORT_OFFLINE, "port_offline" }, + { FCH_EVT_PORT_FABRIC, "port_fabric" }, + { FCH_EVT_LINK_UNKNOWN, "link_unknown" }, + { FCH_EVT_LINK_FPIN, "link_FPIN" }, + { FCH_EVT_LINK_FPIN_ACK, "link_FPIN_ACK" }, + { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" }, +}; +fc_enum_name_search(host_event_code, fc_host_event_code, + fc_host_event_code_names) +#define FC_HOST_EVENT_CODE_MAX_NAMELEN 30 + + +/* Convert fc_port_state values to ascii string name */ +static struct { + enum fc_port_state value; + char *name; + int matchlen; +} fc_port_state_names[] = { + { FC_PORTSTATE_UNKNOWN, "Unknown", 7}, + { FC_PORTSTATE_NOTPRESENT, "Not Present", 11 }, + { FC_PORTSTATE_ONLINE, "Online", 6 }, + { FC_PORTSTATE_OFFLINE, "Offline", 7 }, + { FC_PORTSTATE_BLOCKED, "Blocked", 7 }, + { FC_PORTSTATE_BYPASSED, "Bypassed", 8 }, + { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics", 11 }, + { FC_PORTSTATE_LINKDOWN, "Linkdown", 8 }, + { FC_PORTSTATE_ERROR, "Error", 5 }, + { FC_PORTSTATE_LOOPBACK, "Loopback", 8 }, + { FC_PORTSTATE_DELETED, "Deleted", 7 }, + { FC_PORTSTATE_MARGINAL, "Marginal", 8 }, +}; +fc_enum_name_search(port_state, fc_port_state, fc_port_state_names) +fc_enum_name_match(port_state, fc_port_state, fc_port_state_names) +#define FC_PORTSTATE_MAX_NAMELEN 20 + + +/* Convert fc_vport_state values to ascii string name */ +static struct { + enum fc_vport_state value; + char *name; +} fc_vport_state_names[] = { + { FC_VPORT_UNKNOWN, "Unknown" }, + { FC_VPORT_ACTIVE, "Active" }, + { FC_VPORT_DISABLED, "Disabled" }, + { FC_VPORT_LINKDOWN, "Linkdown" }, + { FC_VPORT_INITIALIZING, "Initializing" }, + { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" }, + { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" }, + { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" }, + { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" }, + { FC_VPORT_FAILED, "VPort Failed" }, +}; +fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names) +#define FC_VPORTSTATE_MAX_NAMELEN 24 + +/* Reuse fc_vport_state enum function for vport_last_state */ +#define get_fc_vport_last_state_name get_fc_vport_state_name + + +/* Convert fc_tgtid_binding_type values to ascii string name */ +static const struct { + enum fc_tgtid_binding_type value; + char *name; + int matchlen; +} fc_tgtid_binding_type_names[] = { + { FC_TGTID_BIND_NONE, "none", 4 }, + { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 }, + { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 }, + { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 }, +}; +fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type, + fc_tgtid_binding_type_names) +fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type, + fc_tgtid_binding_type_names) +#define FC_BINDTYPE_MAX_NAMELEN 30 + + +#define fc_bitfield_name_search(title, table) \ +static ssize_t \ +get_fc_##title##_names(u32 table_key, char *buf) \ +{ \ + char *prefix = ""; \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value & table_key) { \ + len += sprintf(buf + len, "%s%s", \ + prefix, table[i].name); \ + prefix = ", "; \ + } \ + } \ + len += sprintf(buf + len, "\n"); \ + return len; \ +} + + +/* Convert FC_COS bit values to ascii string name */ +static const struct { + u32 value; + char *name; +} fc_cos_names[] = { + { FC_COS_CLASS1, "Class 1" }, + { FC_COS_CLASS2, "Class 2" }, + { FC_COS_CLASS3, "Class 3" }, + { FC_COS_CLASS4, "Class 4" }, + { FC_COS_CLASS6, "Class 6" }, +}; +fc_bitfield_name_search(cos, fc_cos_names) + + +/* Convert FC_PORTSPEED bit values to ascii string name */ +static const struct { + u32 value; + char *name; +} fc_port_speed_names[] = { + { FC_PORTSPEED_1GBIT, "1 Gbit" }, + { FC_PORTSPEED_2GBIT, "2 Gbit" }, + { FC_PORTSPEED_4GBIT, "4 Gbit" }, + { FC_PORTSPEED_10GBIT, "10 Gbit" }, + { FC_PORTSPEED_8GBIT, "8 Gbit" }, + { FC_PORTSPEED_16GBIT, "16 Gbit" }, + { FC_PORTSPEED_32GBIT, "32 Gbit" }, + { FC_PORTSPEED_20GBIT, "20 Gbit" }, + { FC_PORTSPEED_40GBIT, "40 Gbit" }, + { FC_PORTSPEED_50GBIT, "50 Gbit" }, + { FC_PORTSPEED_100GBIT, "100 Gbit" }, + { FC_PORTSPEED_25GBIT, "25 Gbit" }, + { FC_PORTSPEED_64GBIT, "64 Gbit" }, + { FC_PORTSPEED_128GBIT, "128 Gbit" }, + { FC_PORTSPEED_256GBIT, "256 Gbit" }, + { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, +}; +fc_bitfield_name_search(port_speed, fc_port_speed_names) + + +static int +show_fc_fc4s (char *buf, u8 *fc4_list) +{ + int i, len=0; + + for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++) + len += sprintf(buf + len , "0x%02x ", *fc4_list); + len += sprintf(buf + len, "\n"); + return len; +} + + +/* Convert FC_PORT_ROLE bit values to ascii string name */ +static const struct { + u32 value; + char *name; +} fc_port_role_names[] = { + { FC_PORT_ROLE_FCP_TARGET, "FCP Target" }, + { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, + { FC_PORT_ROLE_IP_PORT, "IP Port" }, + { FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" }, + { FC_PORT_ROLE_NVME_INITIATOR, "NVMe Initiator" }, + { FC_PORT_ROLE_NVME_TARGET, "NVMe Target" }, + { FC_PORT_ROLE_NVME_DISCOVERY, "NVMe Discovery" }, +}; +fc_bitfield_name_search(port_roles, fc_port_role_names) + +/* + * Define roles that are specific to port_id. Values are relative to ROLE_MASK. + */ +#define FC_WELLKNOWN_PORTID_MASK 0xfffff0 +#define FC_WELLKNOWN_ROLE_MASK 0x00000f +#define FC_FPORT_PORTID 0x00000e +#define FC_FABCTLR_PORTID 0x00000d +#define FC_DIRSRVR_PORTID 0x00000c +#define FC_TIMESRVR_PORTID 0x00000b +#define FC_MGMTSRVR_PORTID 0x00000a + + +static void fc_timeout_deleted_rport(struct work_struct *work); +static void fc_timeout_fail_rport_io(struct work_struct *work); +static void fc_scsi_scan_rport(struct work_struct *work); + +/* + * Attribute counts pre object type... + * Increase these values if you add attributes + */ +#define FC_STARGET_NUM_ATTRS 3 +#define FC_RPORT_NUM_ATTRS 10 +#define FC_VPORT_NUM_ATTRS 9 +#define FC_HOST_NUM_ATTRS 29 + +struct fc_internal { + struct scsi_transport_template t; + struct fc_function_template *f; + + /* + * For attributes : each object has : + * An array of the actual attributes structures + * An array of null-terminated pointers to the attribute + * structures - used for mid-layer interaction. + * + * The attribute containers for the starget and host are are + * part of the midlayer. As the remote port is specific to the + * fc transport, we must provide the attribute container. + */ + struct device_attribute private_starget_attrs[ + FC_STARGET_NUM_ATTRS]; + struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1]; + + struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS]; + struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1]; + + struct transport_container rport_attr_cont; + struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS]; + struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1]; + + struct transport_container vport_attr_cont; + struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS]; + struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1]; +}; + +#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) + +static int fc_target_setup(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct scsi_target *starget = to_scsi_target(dev); + struct fc_rport *rport = starget_to_rport(starget); + + /* + * if parent is remote port, use values from remote port. + * Otherwise, this host uses the fc_transport, but not the + * remote port interface. As such, initialize to known non-values. + */ + if (rport) { + fc_starget_node_name(starget) = rport->node_name; + fc_starget_port_name(starget) = rport->port_name; + fc_starget_port_id(starget) = rport->port_id; + } else { + fc_starget_node_name(starget) = -1; + fc_starget_port_name(starget) = -1; + fc_starget_port_id(starget) = -1; + } + + return 0; +} + +static DECLARE_TRANSPORT_CLASS(fc_transport_class, + "fc_transport", + fc_target_setup, + NULL, + NULL); + +static int fc_host_setup(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + + /* + * Set default values easily detected by the midlayer as + * failure cases. The scsi lldd is responsible for initializing + * all transport attributes to valid values per host. + */ + fc_host->node_name = -1; + fc_host->port_name = -1; + fc_host->permanent_port_name = -1; + fc_host->supported_classes = FC_COS_UNSPECIFIED; + memset(fc_host->supported_fc4s, 0, + sizeof(fc_host->supported_fc4s)); + fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; + fc_host->maxframe_size = -1; + fc_host->max_npiv_vports = 0; + memset(fc_host->serial_number, 0, + sizeof(fc_host->serial_number)); + memset(fc_host->manufacturer, 0, + sizeof(fc_host->manufacturer)); + memset(fc_host->model, 0, + sizeof(fc_host->model)); + memset(fc_host->model_description, 0, + sizeof(fc_host->model_description)); + memset(fc_host->hardware_version, 0, + sizeof(fc_host->hardware_version)); + memset(fc_host->driver_version, 0, + sizeof(fc_host->driver_version)); + memset(fc_host->firmware_version, 0, + sizeof(fc_host->firmware_version)); + memset(fc_host->optionrom_version, 0, + sizeof(fc_host->optionrom_version)); + + fc_host->port_id = -1; + fc_host->port_type = FC_PORTTYPE_UNKNOWN; + fc_host->port_state = FC_PORTSTATE_UNKNOWN; + memset(fc_host->active_fc4s, 0, + sizeof(fc_host->active_fc4s)); + fc_host->speed = FC_PORTSPEED_UNKNOWN; + fc_host->fabric_name = -1; + memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name)); + memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname)); + memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats)); + + fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; + + INIT_LIST_HEAD(&fc_host->rports); + INIT_LIST_HEAD(&fc_host->rport_bindings); + INIT_LIST_HEAD(&fc_host->vports); + fc_host->next_rport_number = 0; + fc_host->next_target_id = 0; + fc_host->next_vport_number = 0; + fc_host->npiv_vports_inuse = 0; + + snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), + "fc_wq_%d", shost->host_no); + fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name); + if (!fc_host->work_q) + return -ENOMEM; + + fc_host->dev_loss_tmo = fc_dev_loss_tmo; + snprintf(fc_host->devloss_work_q_name, + sizeof(fc_host->devloss_work_q_name), + "fc_dl_%d", shost->host_no); + fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0, + fc_host->devloss_work_q_name); + if (!fc_host->devloss_work_q) { + destroy_workqueue(fc_host->work_q); + fc_host->work_q = NULL; + return -ENOMEM; + } + + fc_bsg_hostadd(shost, fc_host); + /* ignore any bsg add error - we just can't do sgio */ + + return 0; +} + +static int fc_host_remove(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + + fc_bsg_remove(fc_host->rqst_q); + return 0; +} + +static DECLARE_TRANSPORT_CLASS(fc_host_class, + "fc_host", + fc_host_setup, + fc_host_remove, + NULL); + +/* + * Setup and Remove actions for remote ports are handled + * in the service functions below. + */ +static DECLARE_TRANSPORT_CLASS(fc_rport_class, + "fc_remote_ports", + NULL, + NULL, + NULL); + +/* + * Setup and Remove actions for virtual ports are handled + * in the service functions below. + */ +static DECLARE_TRANSPORT_CLASS(fc_vport_class, + "fc_vports", + NULL, + NULL, + NULL); + +/* + * Netlink Infrastructure + */ + +static atomic_t fc_event_seq; + +/** + * fc_get_event_number - Obtain the next sequential FC event number + * + * Notes: + * We could have inlined this, but it would have required fc_event_seq to + * be exposed. For now, live with the subroutine call. + * Atomic used to avoid lock/unlock... + */ +u32 +fc_get_event_number(void) +{ + return atomic_add_return(1, &fc_event_seq); +} +EXPORT_SYMBOL(fc_get_event_number); + +/** + * fc_host_post_fc_event - routine to do the work of posting an event + * on an fc_host. + * @shost: host the event occurred on + * @event_number: fc event number obtained from get_fc_event_number() + * @event_code: fc_host event being posted + * @data_len: amount, in bytes, of event data + * @data_buf: pointer to event data + * @vendor_id: value for Vendor id + * + * Notes: + * This routine assumes no locks are held on entry. + */ +void +fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number, + enum fc_host_event_code event_code, + u32 data_len, char *data_buf, u64 vendor_id) +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + struct fc_nl_event *event; + const char *name; + size_t len, padding; + int err; + + if (!data_buf || data_len < 4) + data_len = 0; + + if (!scsi_nl_sock) { + err = -ENOENT; + goto send_fail; + } + + len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len); + + skb = nlmsg_new(len, GFP_KERNEL); + if (!skb) { + err = -ENOBUFS; + goto send_fail; + } + + nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0); + if (!nlh) { + err = -ENOBUFS; + goto send_fail_skb; + } + event = nlmsg_data(nlh); + + INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, + FC_NL_ASYNC_EVENT, len); + event->seconds = ktime_get_real_seconds(); + event->vendor_id = vendor_id; + event->host_no = shost->host_no; + event->event_datalen = data_len; /* bytes */ + event->event_num = event_number; + event->event_code = event_code; + if (data_len) + memcpy(event->event_data_flex, data_buf, data_len); + padding = len - offsetof(typeof(*event), event_data_flex) - data_len; + memset(event->event_data_flex + data_len, 0, padding); + + nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS, + GFP_KERNEL); + return; + +send_fail_skb: + kfree_skb(skb); +send_fail: + name = get_fc_host_event_code_name(event_code); + printk(KERN_WARNING + "%s: Dropped Event : host %d %s data 0x%08x - err %d\n", + __func__, shost->host_no, + (name) ? name : "", + (data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err); + return; +} +EXPORT_SYMBOL(fc_host_post_fc_event); + +/** + * fc_host_post_event - called to post an even on an fc_host. + * @shost: host the event occurred on + * @event_number: fc event number obtained from get_fc_event_number() + * @event_code: fc_host event being posted + * @event_data: 32bits of data for the event being posted + * + * Notes: + * This routine assumes no locks are held on entry. + */ +void +fc_host_post_event(struct Scsi_Host *shost, u32 event_number, + enum fc_host_event_code event_code, u32 event_data) +{ + fc_host_post_fc_event(shost, event_number, event_code, + (u32)sizeof(u32), (char *)&event_data, 0); +} +EXPORT_SYMBOL(fc_host_post_event); + + +/** + * fc_host_post_vendor_event - called to post a vendor unique event + * on an fc_host + * @shost: host the event occurred on + * @event_number: fc event number obtained from get_fc_event_number() + * @data_len: amount, in bytes, of vendor unique data + * @data_buf: pointer to vendor unique data + * @vendor_id: Vendor id + * + * Notes: + * This routine assumes no locks are held on entry. + */ +void +fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, + u32 data_len, char * data_buf, u64 vendor_id) +{ + fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE, + data_len, data_buf, vendor_id); +} +EXPORT_SYMBOL(fc_host_post_vendor_event); + +/** + * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn + * @shost: host the fc_rport is associated with + * @wwpn: wwpn of the fc_rport device + * + * Notes: + * This routine assumes no locks are held on entry. + */ +struct fc_rport * +fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn) +{ + struct fc_rport *rport; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + + list_for_each_entry(rport, &fc_host_rports(shost), peers) { + if (rport->port_state != FC_PORTSTATE_ONLINE) + continue; + + if (rport->port_name == wwpn) { + spin_unlock_irqrestore(shost->host_lock, flags); + return rport; + } + } + + spin_unlock_irqrestore(shost->host_lock, flags); + return NULL; +} +EXPORT_SYMBOL(fc_find_rport_by_wwpn); + +static void +fc_li_stats_update(u16 event_type, + struct fc_fpin_stats *stats) +{ + stats->li++; + switch (event_type) { + case FPIN_LI_UNKNOWN: + stats->li_failure_unknown++; + break; + case FPIN_LI_LINK_FAILURE: + stats->li_link_failure_count++; + break; + case FPIN_LI_LOSS_OF_SYNC: + stats->li_loss_of_sync_count++; + break; + case FPIN_LI_LOSS_OF_SIG: + stats->li_loss_of_signals_count++; + break; + case FPIN_LI_PRIM_SEQ_ERR: + stats->li_prim_seq_err_count++; + break; + case FPIN_LI_INVALID_TX_WD: + stats->li_invalid_tx_word_count++; + break; + case FPIN_LI_INVALID_CRC: + stats->li_invalid_crc_count++; + break; + case FPIN_LI_DEVICE_SPEC: + stats->li_device_specific++; + break; + } +} + +static void +fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats) +{ + stats->dn++; + switch (reason_code) { + case FPIN_DELI_UNKNOWN: + stats->dn_unknown++; + break; + case FPIN_DELI_TIMEOUT: + stats->dn_timeout++; + break; + case FPIN_DELI_UNABLE_TO_ROUTE: + stats->dn_unable_to_route++; + break; + case FPIN_DELI_DEVICE_SPEC: + stats->dn_device_specific++; + break; + } +} + +static void +fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats) +{ + stats->cn++; + switch (event_type) { + case FPIN_CONGN_CLEAR: + stats->cn_clear++; + break; + case FPIN_CONGN_LOST_CREDIT: + stats->cn_lost_credit++; + break; + case FPIN_CONGN_CREDIT_STALL: + stats->cn_credit_stall++; + break; + case FPIN_CONGN_OVERSUBSCRIPTION: + stats->cn_oversubscription++; + break; + case FPIN_CONGN_DEVICE_SPEC: + stats->cn_device_specific++; + } +} + +/* + * fc_fpin_li_stats_update - routine to update Link Integrity + * event statistics. + * @shost: host the FPIN was received on + * @tlv: pointer to link integrity descriptor + * + */ +static void +fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) +{ + u8 i; + struct fc_rport *rport = NULL; + struct fc_rport *attach_rport = NULL; + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv; + u16 event_type = be16_to_cpu(li_desc->event_type); + u64 wwpn; + + rport = fc_find_rport_by_wwpn(shost, + be64_to_cpu(li_desc->attached_wwpn)); + if (rport && + (rport->roles & FC_PORT_ROLE_FCP_TARGET || + rport->roles & FC_PORT_ROLE_NVME_TARGET)) { + attach_rport = rport; + fc_li_stats_update(event_type, &attach_rport->fpin_stats); + } + + if (be32_to_cpu(li_desc->pname_count) > 0) { + for (i = 0; + i < be32_to_cpu(li_desc->pname_count); + i++) { + wwpn = be64_to_cpu(li_desc->pname_list[i]); + rport = fc_find_rport_by_wwpn(shost, wwpn); + if (rport && + (rport->roles & FC_PORT_ROLE_FCP_TARGET || + rport->roles & FC_PORT_ROLE_NVME_TARGET)) { + if (rport == attach_rport) + continue; + fc_li_stats_update(event_type, + &rport->fpin_stats); + } + } + } + + if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn)) + fc_li_stats_update(event_type, &fc_host->fpin_stats); +} + +/* + * fc_fpin_delivery_stats_update - routine to update Delivery Notification + * event statistics. + * @shost: host the FPIN was received on + * @tlv: pointer to delivery descriptor + * + */ +static void +fc_fpin_delivery_stats_update(struct Scsi_Host *shost, + struct fc_tlv_desc *tlv) +{ + struct fc_rport *rport = NULL; + struct fc_rport *attach_rport = NULL; + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv; + u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code); + + rport = fc_find_rport_by_wwpn(shost, + be64_to_cpu(dn_desc->attached_wwpn)); + if (rport && + (rport->roles & FC_PORT_ROLE_FCP_TARGET || + rport->roles & FC_PORT_ROLE_NVME_TARGET)) { + attach_rport = rport; + fc_delivery_stats_update(reason_code, + &attach_rport->fpin_stats); + } + + if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn)) + fc_delivery_stats_update(reason_code, &fc_host->fpin_stats); +} + +/* + * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion + * event statistics. + * @shost: host the FPIN was received on + * @tlv: pointer to peer congestion descriptor + * + */ +static void +fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost, + struct fc_tlv_desc *tlv) +{ + u8 i; + struct fc_rport *rport = NULL; + struct fc_rport *attach_rport = NULL; + struct fc_fn_peer_congn_desc *pc_desc = + (struct fc_fn_peer_congn_desc *)tlv; + u16 event_type = be16_to_cpu(pc_desc->event_type); + u64 wwpn; + + rport = fc_find_rport_by_wwpn(shost, + be64_to_cpu(pc_desc->attached_wwpn)); + if (rport && + (rport->roles & FC_PORT_ROLE_FCP_TARGET || + rport->roles & FC_PORT_ROLE_NVME_TARGET)) { + attach_rport = rport; + fc_cn_stats_update(event_type, &attach_rport->fpin_stats); + } + + if (be32_to_cpu(pc_desc->pname_count) > 0) { + for (i = 0; + i < be32_to_cpu(pc_desc->pname_count); + i++) { + wwpn = be64_to_cpu(pc_desc->pname_list[i]); + rport = fc_find_rport_by_wwpn(shost, wwpn); + if (rport && + (rport->roles & FC_PORT_ROLE_FCP_TARGET || + rport->roles & FC_PORT_ROLE_NVME_TARGET)) { + if (rport == attach_rport) + continue; + fc_cn_stats_update(event_type, + &rport->fpin_stats); + } + } + } +} + +/* + * fc_fpin_congn_stats_update - routine to update Congestion + * event statistics. + * @shost: host the FPIN was received on + * @tlv: pointer to congestion descriptor + * + */ +static void +fc_fpin_congn_stats_update(struct Scsi_Host *shost, + struct fc_tlv_desc *tlv) +{ + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv; + + fc_cn_stats_update(be16_to_cpu(congn->event_type), + &fc_host->fpin_stats); +} + +/** + * fc_host_fpin_rcv - routine to process a received FPIN. + * @shost: host the FPIN was received on + * @fpin_len: length of FPIN payload, in bytes + * @fpin_buf: pointer to FPIN payload + * @event_acknowledge: 1, if LLDD handles this event. + * Notes: + * This routine assumes no locks are held on entry. + */ +void +fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf, + u8 event_acknowledge) +{ + struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf; + struct fc_tlv_desc *tlv; + u32 bytes_remain; + u32 dtag; + enum fc_host_event_code event_code = + event_acknowledge ? FCH_EVT_LINK_FPIN_ACK : FCH_EVT_LINK_FPIN; + + /* Update Statistics */ + tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; + bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc); + bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); + + while (bytes_remain >= FC_TLV_DESC_HDR_SZ && + bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { + dtag = be32_to_cpu(tlv->desc_tag); + switch (dtag) { + case ELS_DTAG_LNK_INTEGRITY: + fc_fpin_li_stats_update(shost, tlv); + break; + case ELS_DTAG_DELIVERY: + fc_fpin_delivery_stats_update(shost, tlv); + break; + case ELS_DTAG_PEER_CONGEST: + fc_fpin_peer_congn_stats_update(shost, tlv); + break; + case ELS_DTAG_CONGESTION: + fc_fpin_congn_stats_update(shost, tlv); + } + + bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); + tlv = fc_tlv_next_desc(tlv); + } + + fc_host_post_fc_event(shost, fc_get_event_number(), + event_code, fpin_len, fpin_buf, 0); +} +EXPORT_SYMBOL(fc_host_fpin_rcv); + + +static __init int fc_transport_init(void) +{ + int error; + + atomic_set(&fc_event_seq, 0); + + error = transport_class_register(&fc_host_class); + if (error) + return error; + error = transport_class_register(&fc_vport_class); + if (error) + goto unreg_host_class; + error = transport_class_register(&fc_rport_class); + if (error) + goto unreg_vport_class; + error = transport_class_register(&fc_transport_class); + if (error) + goto unreg_rport_class; + return 0; + +unreg_rport_class: + transport_class_unregister(&fc_rport_class); +unreg_vport_class: + transport_class_unregister(&fc_vport_class); +unreg_host_class: + transport_class_unregister(&fc_host_class); + return error; +} + +static void __exit fc_transport_exit(void) +{ + transport_class_unregister(&fc_transport_class); + transport_class_unregister(&fc_rport_class); + transport_class_unregister(&fc_host_class); + transport_class_unregister(&fc_vport_class); +} + +/* + * FC Remote Port Attribute Management + */ + +#define fc_rport_show_function(field, format_string, sz, cast) \ +static ssize_t \ +show_fc_rport_##field (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct fc_rport *rport = transport_class_to_rport(dev); \ + struct Scsi_Host *shost = rport_to_shost(rport); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + if ((i->f->get_rport_##field) && \ + !((rport->port_state == FC_PORTSTATE_BLOCKED) || \ + (rport->port_state == FC_PORTSTATE_DELETED) || \ + (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \ + i->f->get_rport_##field(rport); \ + return snprintf(buf, sz, format_string, cast rport->field); \ +} + +#define fc_rport_store_function(field) \ +static ssize_t \ +store_fc_rport_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + struct fc_rport *rport = transport_class_to_rport(dev); \ + struct Scsi_Host *shost = rport_to_shost(rport); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + char *cp; \ + if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \ + (rport->port_state == FC_PORTSTATE_DELETED) || \ + (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \ + return -EBUSY; \ + val = simple_strtoul(buf, &cp, 0); \ + if (*cp && (*cp != '\n')) \ + return -EINVAL; \ + i->f->set_rport_##field(rport, val); \ + return count; \ +} + +#define fc_rport_rd_attr(field, format_string, sz) \ + fc_rport_show_function(field, format_string, sz, ) \ +static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ + show_fc_rport_##field, NULL) + +#define fc_rport_rd_attr_cast(field, format_string, sz, cast) \ + fc_rport_show_function(field, format_string, sz, (cast)) \ +static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ + show_fc_rport_##field, NULL) + +#define fc_rport_rw_attr(field, format_string, sz) \ + fc_rport_show_function(field, format_string, sz, ) \ + fc_rport_store_function(field) \ +static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \ + show_fc_rport_##field, \ + store_fc_rport_##field) + + +#define fc_private_rport_show_function(field, format_string, sz, cast) \ +static ssize_t \ +show_fc_rport_##field (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct fc_rport *rport = transport_class_to_rport(dev); \ + return snprintf(buf, sz, format_string, cast rport->field); \ +} + +#define fc_private_rport_rd_attr(field, format_string, sz) \ + fc_private_rport_show_function(field, format_string, sz, ) \ +static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ + show_fc_rport_##field, NULL) + +#define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \ + fc_private_rport_show_function(field, format_string, sz, (cast)) \ +static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ + show_fc_rport_##field, NULL) + + +#define fc_private_rport_rd_enum_attr(title, maxlen) \ +static ssize_t \ +show_fc_rport_##title (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct fc_rport *rport = transport_class_to_rport(dev); \ + const char *name; \ + name = get_fc_##title##_name(rport->title); \ + if (!name) \ + return -EINVAL; \ + return snprintf(buf, maxlen, "%s\n", name); \ +} \ +static FC_DEVICE_ATTR(rport, title, S_IRUGO, \ + show_fc_rport_##title, NULL) + + +#define SETUP_RPORT_ATTRIBUTE_RD(field) \ + i->private_rport_attrs[count] = device_attr_rport_##field; \ + i->private_rport_attrs[count].attr.mode = S_IRUGO; \ + i->private_rport_attrs[count].store = NULL; \ + i->rport_attrs[count] = &i->private_rport_attrs[count]; \ + if (i->f->show_rport_##field) \ + count++ + +#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \ + i->private_rport_attrs[count] = device_attr_rport_##field; \ + i->private_rport_attrs[count].attr.mode = S_IRUGO; \ + i->private_rport_attrs[count].store = NULL; \ + i->rport_attrs[count] = &i->private_rport_attrs[count]; \ + count++ + +#define SETUP_RPORT_ATTRIBUTE_RW(field) \ + i->private_rport_attrs[count] = device_attr_rport_##field; \ + if (!i->f->set_rport_##field) { \ + i->private_rport_attrs[count].attr.mode = S_IRUGO; \ + i->private_rport_attrs[count].store = NULL; \ + } \ + i->rport_attrs[count] = &i->private_rport_attrs[count]; \ + if (i->f->show_rport_##field) \ + count++ + +#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \ +{ \ + i->private_rport_attrs[count] = device_attr_rport_##field; \ + i->rport_attrs[count] = &i->private_rport_attrs[count]; \ + count++; \ +} + + +/* The FC Transport Remote Port Attributes: */ + +/* Fixed Remote Port Attributes */ + +fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20); + +static ssize_t +show_fc_rport_supported_classes (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_rport *rport = transport_class_to_rport(dev); + if (rport->supported_classes == FC_COS_UNSPECIFIED) + return snprintf(buf, 20, "unspecified\n"); + return get_fc_cos_names(rport->supported_classes, buf); +} +static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO, + show_fc_rport_supported_classes, NULL); + +/* Dynamic Remote Port Attributes */ + +/* + * dev_loss_tmo attribute + */ +static int fc_str_to_dev_loss(const char *buf, unsigned long *val) +{ + char *cp; + + *val = simple_strtoul(buf, &cp, 0); + if (*cp && (*cp != '\n')) + return -EINVAL; + /* + * Check for overflow; dev_loss_tmo is u32 + */ + if (*val > UINT_MAX) + return -EINVAL; + + return 0; +} + +static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport, + unsigned long val) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_internal *i = to_fc_internal(shost->transportt); + + if ((rport->port_state == FC_PORTSTATE_BLOCKED) || + (rport->port_state == FC_PORTSTATE_DELETED) || + (rport->port_state == FC_PORTSTATE_NOTPRESENT)) + return -EBUSY; + /* + * Check for overflow; dev_loss_tmo is u32 + */ + if (val > UINT_MAX) + return -EINVAL; + + /* + * If fast_io_fail is off we have to cap + * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT + */ + if (rport->fast_io_fail_tmo == -1 && + val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + + i->f->set_rport_dev_loss_tmo(rport, val); + return 0; +} + +fc_rport_show_function(dev_loss_tmo, "%u\n", 20, ) +static ssize_t +store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fc_rport *rport = transport_class_to_rport(dev); + unsigned long val; + int rc; + + rc = fc_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + rc = fc_rport_set_dev_loss_tmo(rport, val); + if (rc) + return rc; + return count; +} +static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR, + show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo); + + +/* Private Remote Port Attributes */ + +fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); +fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); +fc_private_rport_rd_attr(port_id, "0x%06x\n", 20); + +static ssize_t +show_fc_rport_roles (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fc_rport *rport = transport_class_to_rport(dev); + + /* identify any roles that are port_id specific */ + if ((rport->port_id != -1) && + (rport->port_id & FC_WELLKNOWN_PORTID_MASK) == + FC_WELLKNOWN_PORTID_MASK) { + switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) { + case FC_FPORT_PORTID: + return snprintf(buf, 30, "Fabric Port\n"); + case FC_FABCTLR_PORTID: + return snprintf(buf, 30, "Fabric Controller\n"); + case FC_DIRSRVR_PORTID: + return snprintf(buf, 30, "Directory Server\n"); + case FC_TIMESRVR_PORTID: + return snprintf(buf, 30, "Time Server\n"); + case FC_MGMTSRVR_PORTID: + return snprintf(buf, 30, "Management Server\n"); + default: + return snprintf(buf, 30, "Unknown Fabric Entity\n"); + } + } else { + if (rport->roles == FC_PORT_ROLE_UNKNOWN) + return snprintf(buf, 20, "unknown\n"); + return get_fc_port_roles_names(rport->roles, buf); + } +} +static FC_DEVICE_ATTR(rport, roles, S_IRUGO, + show_fc_rport_roles, NULL); + +static ssize_t fc_rport_set_marginal_state(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fc_rport *rport = transport_class_to_rport(dev); + enum fc_port_state port_state; + int ret = 0; + + ret = get_fc_port_state_match(buf, &port_state); + if (ret) + return -EINVAL; + if (port_state == FC_PORTSTATE_MARGINAL) { + /* + * Change the state to Marginal only if the + * current rport state is Online + * Allow only Online->Marginal + */ + if (rport->port_state == FC_PORTSTATE_ONLINE) + rport->port_state = port_state; + else + return -EINVAL; + } else if (port_state == FC_PORTSTATE_ONLINE) { + /* + * Change the state to Online only if the + * current rport state is Marginal + * Allow only Marginal->Online + */ + if (rport->port_state == FC_PORTSTATE_MARGINAL) + rport->port_state = port_state; + else + return -EINVAL; + } else + return -EINVAL; + return count; +} + +static ssize_t +show_fc_rport_port_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *name; + struct fc_rport *rport = transport_class_to_rport(dev); + + name = get_fc_port_state_name(rport->port_state); + if (!name) + return -EINVAL; + + return snprintf(buf, 20, "%s\n", name); +} + +static FC_DEVICE_ATTR(rport, port_state, 0444 | 0200, + show_fc_rport_port_state, fc_rport_set_marginal_state); + +fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); + +/* + * fast_io_fail_tmo attribute + */ +static ssize_t +show_fc_rport_fast_io_fail_tmo (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_rport *rport = transport_class_to_rport(dev); + + if (rport->fast_io_fail_tmo == -1) + return snprintf(buf, 5, "off\n"); + return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo); +} + +static ssize_t +store_fc_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + int val; + char *cp; + struct fc_rport *rport = transport_class_to_rport(dev); + + if ((rport->port_state == FC_PORTSTATE_BLOCKED) || + (rport->port_state == FC_PORTSTATE_DELETED) || + (rport->port_state == FC_PORTSTATE_NOTPRESENT)) + return -EBUSY; + if (strncmp(buf, "off", 3) == 0) + rport->fast_io_fail_tmo = -1; + else { + val = simple_strtoul(buf, &cp, 0); + if ((*cp && (*cp != '\n')) || (val < 0)) + return -EINVAL; + /* + * Cap fast_io_fail by dev_loss_tmo or + * SCSI_DEVICE_BLOCK_MAX_TIMEOUT. + */ + if ((val >= rport->dev_loss_tmo) || + (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) + return -EINVAL; + + rport->fast_io_fail_tmo = val; + } + return count; +} +static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR, + show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo); + +#define fc_rport_fpin_statistic(name) \ +static ssize_t fc_rport_fpinstat_##name(struct device *cd, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fc_rport *rport = transport_class_to_rport(cd); \ + \ + return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name); \ +} \ +static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL) + +fc_rport_fpin_statistic(dn); +fc_rport_fpin_statistic(dn_unknown); +fc_rport_fpin_statistic(dn_timeout); +fc_rport_fpin_statistic(dn_unable_to_route); +fc_rport_fpin_statistic(dn_device_specific); +fc_rport_fpin_statistic(cn); +fc_rport_fpin_statistic(cn_clear); +fc_rport_fpin_statistic(cn_lost_credit); +fc_rport_fpin_statistic(cn_credit_stall); +fc_rport_fpin_statistic(cn_oversubscription); +fc_rport_fpin_statistic(cn_device_specific); +fc_rport_fpin_statistic(li); +fc_rport_fpin_statistic(li_failure_unknown); +fc_rport_fpin_statistic(li_link_failure_count); +fc_rport_fpin_statistic(li_loss_of_sync_count); +fc_rport_fpin_statistic(li_loss_of_signals_count); +fc_rport_fpin_statistic(li_prim_seq_err_count); +fc_rport_fpin_statistic(li_invalid_tx_word_count); +fc_rport_fpin_statistic(li_invalid_crc_count); +fc_rport_fpin_statistic(li_device_specific); + +static struct attribute *fc_rport_statistics_attrs[] = { + &device_attr_rport_fpin_dn.attr, + &device_attr_rport_fpin_dn_unknown.attr, + &device_attr_rport_fpin_dn_timeout.attr, + &device_attr_rport_fpin_dn_unable_to_route.attr, + &device_attr_rport_fpin_dn_device_specific.attr, + &device_attr_rport_fpin_li.attr, + &device_attr_rport_fpin_li_failure_unknown.attr, + &device_attr_rport_fpin_li_link_failure_count.attr, + &device_attr_rport_fpin_li_loss_of_sync_count.attr, + &device_attr_rport_fpin_li_loss_of_signals_count.attr, + &device_attr_rport_fpin_li_prim_seq_err_count.attr, + &device_attr_rport_fpin_li_invalid_tx_word_count.attr, + &device_attr_rport_fpin_li_invalid_crc_count.attr, + &device_attr_rport_fpin_li_device_specific.attr, + &device_attr_rport_fpin_cn.attr, + &device_attr_rport_fpin_cn_clear.attr, + &device_attr_rport_fpin_cn_lost_credit.attr, + &device_attr_rport_fpin_cn_credit_stall.attr, + &device_attr_rport_fpin_cn_oversubscription.attr, + &device_attr_rport_fpin_cn_device_specific.attr, + NULL +}; + +static struct attribute_group fc_rport_statistics_group = { + .name = "statistics", + .attrs = fc_rport_statistics_attrs, +}; + + +/* + * FC SCSI Target Attribute Management + */ + +/* + * Note: in the target show function we recognize when the remote + * port is in the hierarchy and do not allow the driver to get + * involved in sysfs functions. The driver only gets involved if + * it's the "old" style that doesn't use rports. + */ +#define fc_starget_show_function(field, format_string, sz, cast) \ +static ssize_t \ +show_fc_starget_##field (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_target *starget = transport_class_to_starget(dev); \ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + struct fc_rport *rport = starget_to_rport(starget); \ + if (rport) \ + fc_starget_##field(starget) = rport->field; \ + else if (i->f->get_starget_##field) \ + i->f->get_starget_##field(starget); \ + return snprintf(buf, sz, format_string, \ + cast fc_starget_##field(starget)); \ +} + +#define fc_starget_rd_attr(field, format_string, sz) \ + fc_starget_show_function(field, format_string, sz, ) \ +static FC_DEVICE_ATTR(starget, field, S_IRUGO, \ + show_fc_starget_##field, NULL) + +#define fc_starget_rd_attr_cast(field, format_string, sz, cast) \ + fc_starget_show_function(field, format_string, sz, (cast)) \ +static FC_DEVICE_ATTR(starget, field, S_IRUGO, \ + show_fc_starget_##field, NULL) + +#define SETUP_STARGET_ATTRIBUTE_RD(field) \ + i->private_starget_attrs[count] = device_attr_starget_##field; \ + i->private_starget_attrs[count].attr.mode = S_IRUGO; \ + i->private_starget_attrs[count].store = NULL; \ + i->starget_attrs[count] = &i->private_starget_attrs[count]; \ + if (i->f->show_starget_##field) \ + count++ + +#define SETUP_STARGET_ATTRIBUTE_RW(field) \ + i->private_starget_attrs[count] = device_attr_starget_##field; \ + if (!i->f->set_starget_##field) { \ + i->private_starget_attrs[count].attr.mode = S_IRUGO; \ + i->private_starget_attrs[count].store = NULL; \ + } \ + i->starget_attrs[count] = &i->private_starget_attrs[count]; \ + if (i->f->show_starget_##field) \ + count++ + +/* The FC Transport SCSI Target Attributes: */ +fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); +fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); +fc_starget_rd_attr(port_id, "0x%06x\n", 20); + + +/* + * FC Virtual Port Attribute Management + */ + +#define fc_vport_show_function(field, format_string, sz, cast) \ +static ssize_t \ +show_fc_vport_##field (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct fc_vport *vport = transport_class_to_vport(dev); \ + struct Scsi_Host *shost = vport_to_shost(vport); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + if ((i->f->get_vport_##field) && \ + !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \ + i->f->get_vport_##field(vport); \ + return snprintf(buf, sz, format_string, cast vport->field); \ +} + +#define fc_vport_store_function(field) \ +static ssize_t \ +store_fc_vport_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + struct fc_vport *vport = transport_class_to_vport(dev); \ + struct Scsi_Host *shost = vport_to_shost(vport); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + char *cp; \ + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \ + return -EBUSY; \ + val = simple_strtoul(buf, &cp, 0); \ + if (*cp && (*cp != '\n')) \ + return -EINVAL; \ + i->f->set_vport_##field(vport, val); \ + return count; \ +} + +#define fc_vport_store_str_function(field, slen) \ +static ssize_t \ +store_fc_vport_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct fc_vport *vport = transport_class_to_vport(dev); \ + struct Scsi_Host *shost = vport_to_shost(vport); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + unsigned int cnt=count; \ + \ + /* count may include a LF at end of string */ \ + if (buf[cnt-1] == '\n') \ + cnt--; \ + if (cnt > ((slen) - 1)) \ + return -EINVAL; \ + memcpy(vport->field, buf, cnt); \ + i->f->set_vport_##field(vport); \ + return count; \ +} + +#define fc_vport_rd_attr(field, format_string, sz) \ + fc_vport_show_function(field, format_string, sz, ) \ +static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ + show_fc_vport_##field, NULL) + +#define fc_vport_rd_attr_cast(field, format_string, sz, cast) \ + fc_vport_show_function(field, format_string, sz, (cast)) \ +static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ + show_fc_vport_##field, NULL) + +#define fc_vport_rw_attr(field, format_string, sz) \ + fc_vport_show_function(field, format_string, sz, ) \ + fc_vport_store_function(field) \ +static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \ + show_fc_vport_##field, \ + store_fc_vport_##field) + +#define fc_private_vport_show_function(field, format_string, sz, cast) \ +static ssize_t \ +show_fc_vport_##field (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct fc_vport *vport = transport_class_to_vport(dev); \ + return snprintf(buf, sz, format_string, cast vport->field); \ +} + +#define fc_private_vport_store_u32_function(field) \ +static ssize_t \ +store_fc_vport_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + u32 val; \ + struct fc_vport *vport = transport_class_to_vport(dev); \ + char *cp; \ + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \ + return -EBUSY; \ + val = simple_strtoul(buf, &cp, 0); \ + if (*cp && (*cp != '\n')) \ + return -EINVAL; \ + vport->field = val; \ + return count; \ +} + + +#define fc_private_vport_rd_attr(field, format_string, sz) \ + fc_private_vport_show_function(field, format_string, sz, ) \ +static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ + show_fc_vport_##field, NULL) + +#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \ + fc_private_vport_show_function(field, format_string, sz, (cast)) \ +static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ + show_fc_vport_##field, NULL) + +#define fc_private_vport_rw_u32_attr(field, format_string, sz) \ + fc_private_vport_show_function(field, format_string, sz, ) \ + fc_private_vport_store_u32_function(field) \ +static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \ + show_fc_vport_##field, \ + store_fc_vport_##field) + + +#define fc_private_vport_rd_enum_attr(title, maxlen) \ +static ssize_t \ +show_fc_vport_##title (struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fc_vport *vport = transport_class_to_vport(dev); \ + const char *name; \ + name = get_fc_##title##_name(vport->title); \ + if (!name) \ + return -EINVAL; \ + return snprintf(buf, maxlen, "%s\n", name); \ +} \ +static FC_DEVICE_ATTR(vport, title, S_IRUGO, \ + show_fc_vport_##title, NULL) + + +#define SETUP_VPORT_ATTRIBUTE_RD(field) \ + i->private_vport_attrs[count] = device_attr_vport_##field; \ + i->private_vport_attrs[count].attr.mode = S_IRUGO; \ + i->private_vport_attrs[count].store = NULL; \ + i->vport_attrs[count] = &i->private_vport_attrs[count]; \ + if (i->f->get_##field) \ + count++ + /* NOTE: Above MACRO differs: checks function not show bit */ + +#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \ + i->private_vport_attrs[count] = device_attr_vport_##field; \ + i->private_vport_attrs[count].attr.mode = S_IRUGO; \ + i->private_vport_attrs[count].store = NULL; \ + i->vport_attrs[count] = &i->private_vport_attrs[count]; \ + count++ + +#define SETUP_VPORT_ATTRIBUTE_WR(field) \ + i->private_vport_attrs[count] = device_attr_vport_##field; \ + i->vport_attrs[count] = &i->private_vport_attrs[count]; \ + if (i->f->field) \ + count++ + /* NOTE: Above MACRO differs: checks function */ + +#define SETUP_VPORT_ATTRIBUTE_RW(field) \ + i->private_vport_attrs[count] = device_attr_vport_##field; \ + if (!i->f->set_vport_##field) { \ + i->private_vport_attrs[count].attr.mode = S_IRUGO; \ + i->private_vport_attrs[count].store = NULL; \ + } \ + i->vport_attrs[count] = &i->private_vport_attrs[count]; \ + count++ + /* NOTE: Above MACRO differs: does not check show bit */ + +#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \ +{ \ + i->private_vport_attrs[count] = device_attr_vport_##field; \ + i->vport_attrs[count] = &i->private_vport_attrs[count]; \ + count++; \ +} + + +/* The FC Transport Virtual Port Attributes: */ + +/* Fixed Virtual Port Attributes */ + +/* Dynamic Virtual Port Attributes */ + +/* Private Virtual Port Attributes */ + +fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN); +fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN); +fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); +fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); + +static ssize_t +show_fc_vport_roles (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fc_vport *vport = transport_class_to_vport(dev); + + if (vport->roles == FC_PORT_ROLE_UNKNOWN) + return snprintf(buf, 20, "unknown\n"); + return get_fc_port_roles_names(vport->roles, buf); +} +static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL); + +fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN); + +fc_private_vport_show_function(symbolic_name, "%s\n", + FC_VPORT_SYMBOLIC_NAMELEN + 1, ) +fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN) +static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR, + show_fc_vport_symbolic_name, store_fc_vport_symbolic_name); + +static ssize_t +store_fc_vport_delete(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fc_vport *vport = transport_class_to_vport(dev); + struct Scsi_Host *shost = vport_to_shost(vport); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { + spin_unlock_irqrestore(shost->host_lock, flags); + return -EBUSY; + } + vport->flags |= FC_VPORT_DELETING; + spin_unlock_irqrestore(shost->host_lock, flags); + + fc_queue_work(shost, &vport->vport_delete_work); + return count; +} +static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR, + NULL, store_fc_vport_delete); + + +/* + * Enable/Disable vport + * Write "1" to disable, write "0" to enable + */ +static ssize_t +store_fc_vport_disable(struct device *dev, struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct fc_vport *vport = transport_class_to_vport(dev); + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_internal *i = to_fc_internal(shost->transportt); + int stat; + + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) + return -EBUSY; + + if (*buf == '0') { + if (vport->vport_state != FC_VPORT_DISABLED) + return -EALREADY; + } else if (*buf == '1') { + if (vport->vport_state == FC_VPORT_DISABLED) + return -EALREADY; + } else + return -EINVAL; + + stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true)); + return stat ? stat : count; +} +static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR, + NULL, store_fc_vport_disable); + + +/* + * Host Attribute Management + */ + +#define fc_host_show_function(field, format_string, sz, cast) \ +static ssize_t \ +show_fc_host_##field (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct Scsi_Host *shost = transport_class_to_shost(dev); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + if (i->f->get_host_##field) \ + i->f->get_host_##field(shost); \ + return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ +} + +#define fc_host_store_function(field) \ +static ssize_t \ +store_fc_host_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + struct Scsi_Host *shost = transport_class_to_shost(dev); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + char *cp; \ + \ + val = simple_strtoul(buf, &cp, 0); \ + if (*cp && (*cp != '\n')) \ + return -EINVAL; \ + i->f->set_host_##field(shost, val); \ + return count; \ +} + +#define fc_host_store_str_function(field, slen) \ +static ssize_t \ +store_fc_host_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct Scsi_Host *shost = transport_class_to_shost(dev); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + unsigned int cnt=count; \ + \ + /* count may include a LF at end of string */ \ + if (buf[cnt-1] == '\n') \ + cnt--; \ + if (cnt > ((slen) - 1)) \ + return -EINVAL; \ + memcpy(fc_host_##field(shost), buf, cnt); \ + i->f->set_host_##field(shost); \ + return count; \ +} + +#define fc_host_rd_attr(field, format_string, sz) \ + fc_host_show_function(field, format_string, sz, ) \ +static FC_DEVICE_ATTR(host, field, S_IRUGO, \ + show_fc_host_##field, NULL) + +#define fc_host_rd_attr_cast(field, format_string, sz, cast) \ + fc_host_show_function(field, format_string, sz, (cast)) \ +static FC_DEVICE_ATTR(host, field, S_IRUGO, \ + show_fc_host_##field, NULL) + +#define fc_host_rw_attr(field, format_string, sz) \ + fc_host_show_function(field, format_string, sz, ) \ + fc_host_store_function(field) \ +static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \ + show_fc_host_##field, \ + store_fc_host_##field) + +#define fc_host_rd_enum_attr(title, maxlen) \ +static ssize_t \ +show_fc_host_##title (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct Scsi_Host *shost = transport_class_to_shost(dev); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + const char *name; \ + if (i->f->get_host_##title) \ + i->f->get_host_##title(shost); \ + name = get_fc_##title##_name(fc_host_##title(shost)); \ + if (!name) \ + return -EINVAL; \ + return snprintf(buf, maxlen, "%s\n", name); \ +} \ +static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL) + +#define SETUP_HOST_ATTRIBUTE_RD(field) \ + i->private_host_attrs[count] = device_attr_host_##field; \ + i->private_host_attrs[count].attr.mode = S_IRUGO; \ + i->private_host_attrs[count].store = NULL; \ + i->host_attrs[count] = &i->private_host_attrs[count]; \ + if (i->f->show_host_##field) \ + count++ + +#define SETUP_HOST_ATTRIBUTE_RD_NS(field) \ + i->private_host_attrs[count] = device_attr_host_##field; \ + i->private_host_attrs[count].attr.mode = S_IRUGO; \ + i->private_host_attrs[count].store = NULL; \ + i->host_attrs[count] = &i->private_host_attrs[count]; \ + count++ + +#define SETUP_HOST_ATTRIBUTE_RW(field) \ + i->private_host_attrs[count] = device_attr_host_##field; \ + if (!i->f->set_host_##field) { \ + i->private_host_attrs[count].attr.mode = S_IRUGO; \ + i->private_host_attrs[count].store = NULL; \ + } \ + i->host_attrs[count] = &i->private_host_attrs[count]; \ + if (i->f->show_host_##field) \ + count++ + + +#define fc_private_host_show_function(field, format_string, sz, cast) \ +static ssize_t \ +show_fc_host_##field (struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct Scsi_Host *shost = transport_class_to_shost(dev); \ + return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ +} + +#define fc_private_host_rd_attr(field, format_string, sz) \ + fc_private_host_show_function(field, format_string, sz, ) \ +static FC_DEVICE_ATTR(host, field, S_IRUGO, \ + show_fc_host_##field, NULL) + +#define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \ + fc_private_host_show_function(field, format_string, sz, (cast)) \ +static FC_DEVICE_ATTR(host, field, S_IRUGO, \ + show_fc_host_##field, NULL) + +#define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \ + i->private_host_attrs[count] = device_attr_host_##field; \ + i->private_host_attrs[count].attr.mode = S_IRUGO; \ + i->private_host_attrs[count].store = NULL; \ + i->host_attrs[count] = &i->private_host_attrs[count]; \ + count++ + +#define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \ +{ \ + i->private_host_attrs[count] = device_attr_host_##field; \ + i->host_attrs[count] = &i->private_host_attrs[count]; \ + count++; \ +} + + +/* Fixed Host Attributes */ + +static ssize_t +show_fc_host_supported_classes (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + + if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED) + return snprintf(buf, 20, "unspecified\n"); + + return get_fc_cos_names(fc_host_supported_classes(shost), buf); +} +static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO, + show_fc_host_supported_classes, NULL); + +static ssize_t +show_fc_host_supported_fc4s (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost)); +} +static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO, + show_fc_host_supported_fc4s, NULL); + +static ssize_t +show_fc_host_supported_speeds (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + + if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN) + return snprintf(buf, 20, "unknown\n"); + + return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf); +} +static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO, + show_fc_host_supported_speeds, NULL); + + +fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); +fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); +fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, + unsigned long long); +fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); +fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20); +fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); +fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1); +fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); +fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); +fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1); +fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1); +fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1); +fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1); + + +/* Dynamic Host Attributes */ + +static ssize_t +show_fc_host_active_fc4s (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_internal *i = to_fc_internal(shost->transportt); + + if (i->f->get_host_active_fc4s) + i->f->get_host_active_fc4s(shost); + + return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost)); +} +static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO, + show_fc_host_active_fc4s, NULL); + +static ssize_t +show_fc_host_speed (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_internal *i = to_fc_internal(shost->transportt); + + if (i->f->get_host_speed) + i->f->get_host_speed(shost); + + if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN) + return snprintf(buf, 20, "unknown\n"); + + return get_fc_port_speed_names(fc_host_speed(shost), buf); +} +static FC_DEVICE_ATTR(host, speed, S_IRUGO, + show_fc_host_speed, NULL); + + +fc_host_rd_attr(port_id, "0x%06x\n", 20); +fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); +fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); +fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); +fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); + +fc_private_host_show_function(system_hostname, "%s\n", + FC_SYMBOLIC_NAME_SIZE + 1, ) +fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE) +static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR, + show_fc_host_system_hostname, store_fc_host_system_hostname); + + +/* Private Host Attributes */ + +static ssize_t +show_fc_private_host_tgtid_bind_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + const char *name; + + name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost)); + if (!name) + return -EINVAL; + return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name); +} + +#define get_list_head_entry(pos, head, member) \ + pos = list_entry((head)->next, typeof(*pos), member) + +static ssize_t +store_fc_private_host_tgtid_bind_type(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_rport *rport; + enum fc_tgtid_binding_type val; + unsigned long flags; + + if (get_fc_tgtid_bind_type_match(buf, &val)) + return -EINVAL; + + /* if changing bind type, purge all unused consistent bindings */ + if (val != fc_host_tgtid_bind_type(shost)) { + spin_lock_irqsave(shost->host_lock, flags); + while (!list_empty(&fc_host_rport_bindings(shost))) { + get_list_head_entry(rport, + &fc_host_rport_bindings(shost), peers); + list_del(&rport->peers); + rport->port_state = FC_PORTSTATE_DELETED; + fc_queue_work(shost, &rport->rport_delete_work); + } + spin_unlock_irqrestore(shost->host_lock, flags); + } + + fc_host_tgtid_bind_type(shost) = val; + return count; +} + +static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR, + show_fc_private_host_tgtid_bind_type, + store_fc_private_host_tgtid_bind_type); + +static ssize_t +store_fc_private_host_issue_lip(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_internal *i = to_fc_internal(shost->transportt); + int ret; + + /* ignore any data value written to the attribute */ + if (i->f->issue_fc_host_lip) { + ret = i->f->issue_fc_host_lip(shost); + return ret ? ret: count; + } + + return -ENOENT; +} + +static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, + store_fc_private_host_issue_lip); + +static ssize_t +store_fc_private_host_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_rport *rport; + unsigned long val, flags; + int rc; + + rc = fc_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + fc_host_dev_loss_tmo(shost) = val; + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(rport, &fc_host->rports, peers) + fc_rport_set_dev_loss_tmo(rport, val); + spin_unlock_irqrestore(shost->host_lock, flags); + return count; +} + +fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, ); +static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR, + show_fc_host_dev_loss_tmo, + store_fc_private_host_dev_loss_tmo); + +fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); + +/* + * Host Statistics Management + */ + +/* Show a given attribute in the statistics group */ +static ssize_t +fc_stat_show(const struct device *dev, char *buf, unsigned long offset) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_internal *i = to_fc_internal(shost->transportt); + struct fc_host_statistics *stats; + ssize_t ret = -ENOENT; + + if (offset > sizeof(struct fc_host_statistics) || + offset % sizeof(u64) != 0) + WARN_ON(1); + + if (i->f->get_fc_host_stats) { + stats = (i->f->get_fc_host_stats)(shost); + if (stats) + ret = snprintf(buf, 20, "0x%llx\n", + (unsigned long long)*(u64 *)(((u8 *) stats) + offset)); + } + return ret; +} + + +/* generate a read-only statistics attribute */ +#define fc_host_statistic(name) \ +static ssize_t show_fcstat_##name(struct device *cd, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return fc_stat_show(cd, buf, \ + offsetof(struct fc_host_statistics, name)); \ +} \ +static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL) + +fc_host_statistic(seconds_since_last_reset); +fc_host_statistic(tx_frames); +fc_host_statistic(tx_words); +fc_host_statistic(rx_frames); +fc_host_statistic(rx_words); +fc_host_statistic(lip_count); +fc_host_statistic(nos_count); +fc_host_statistic(error_frames); +fc_host_statistic(dumped_frames); +fc_host_statistic(link_failure_count); +fc_host_statistic(loss_of_sync_count); +fc_host_statistic(loss_of_signal_count); +fc_host_statistic(prim_seq_protocol_err_count); +fc_host_statistic(invalid_tx_word_count); +fc_host_statistic(invalid_crc_count); +fc_host_statistic(fcp_input_requests); +fc_host_statistic(fcp_output_requests); +fc_host_statistic(fcp_control_requests); +fc_host_statistic(fcp_input_megabytes); +fc_host_statistic(fcp_output_megabytes); +fc_host_statistic(fcp_packet_alloc_failures); +fc_host_statistic(fcp_packet_aborts); +fc_host_statistic(fcp_frame_alloc_failures); +fc_host_statistic(fc_no_free_exch); +fc_host_statistic(fc_no_free_exch_xid); +fc_host_statistic(fc_xid_not_found); +fc_host_statistic(fc_xid_busy); +fc_host_statistic(fc_seq_not_found); +fc_host_statistic(fc_non_bls_resp); +fc_host_statistic(cn_sig_warn); +fc_host_statistic(cn_sig_alarm); + + +#define fc_host_fpin_statistic(name) \ +static ssize_t fc_host_fpinstat_##name(struct device *cd, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct Scsi_Host *shost = transport_class_to_shost(cd); \ + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); \ + \ + return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name); \ +} \ +static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL) + +fc_host_fpin_statistic(dn); +fc_host_fpin_statistic(dn_unknown); +fc_host_fpin_statistic(dn_timeout); +fc_host_fpin_statistic(dn_unable_to_route); +fc_host_fpin_statistic(dn_device_specific); +fc_host_fpin_statistic(cn); +fc_host_fpin_statistic(cn_clear); +fc_host_fpin_statistic(cn_lost_credit); +fc_host_fpin_statistic(cn_credit_stall); +fc_host_fpin_statistic(cn_oversubscription); +fc_host_fpin_statistic(cn_device_specific); +fc_host_fpin_statistic(li); +fc_host_fpin_statistic(li_failure_unknown); +fc_host_fpin_statistic(li_link_failure_count); +fc_host_fpin_statistic(li_loss_of_sync_count); +fc_host_fpin_statistic(li_loss_of_signals_count); +fc_host_fpin_statistic(li_prim_seq_err_count); +fc_host_fpin_statistic(li_invalid_tx_word_count); +fc_host_fpin_statistic(li_invalid_crc_count); +fc_host_fpin_statistic(li_device_specific); + +static ssize_t +fc_reset_statistics(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_internal *i = to_fc_internal(shost->transportt); + + /* ignore any data value written to the attribute */ + if (i->f->reset_fc_host_stats) { + i->f->reset_fc_host_stats(shost); + return count; + } + + return -ENOENT; +} +static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL, + fc_reset_statistics); + +static struct attribute *fc_statistics_attrs[] = { + &device_attr_host_seconds_since_last_reset.attr, + &device_attr_host_tx_frames.attr, + &device_attr_host_tx_words.attr, + &device_attr_host_rx_frames.attr, + &device_attr_host_rx_words.attr, + &device_attr_host_lip_count.attr, + &device_attr_host_nos_count.attr, + &device_attr_host_error_frames.attr, + &device_attr_host_dumped_frames.attr, + &device_attr_host_link_failure_count.attr, + &device_attr_host_loss_of_sync_count.attr, + &device_attr_host_loss_of_signal_count.attr, + &device_attr_host_prim_seq_protocol_err_count.attr, + &device_attr_host_invalid_tx_word_count.attr, + &device_attr_host_invalid_crc_count.attr, + &device_attr_host_fcp_input_requests.attr, + &device_attr_host_fcp_output_requests.attr, + &device_attr_host_fcp_control_requests.attr, + &device_attr_host_fcp_input_megabytes.attr, + &device_attr_host_fcp_output_megabytes.attr, + &device_attr_host_fcp_packet_alloc_failures.attr, + &device_attr_host_fcp_packet_aborts.attr, + &device_attr_host_fcp_frame_alloc_failures.attr, + &device_attr_host_fc_no_free_exch.attr, + &device_attr_host_fc_no_free_exch_xid.attr, + &device_attr_host_fc_xid_not_found.attr, + &device_attr_host_fc_xid_busy.attr, + &device_attr_host_fc_seq_not_found.attr, + &device_attr_host_fc_non_bls_resp.attr, + &device_attr_host_cn_sig_warn.attr, + &device_attr_host_cn_sig_alarm.attr, + &device_attr_host_reset_statistics.attr, + &device_attr_host_fpin_dn.attr, + &device_attr_host_fpin_dn_unknown.attr, + &device_attr_host_fpin_dn_timeout.attr, + &device_attr_host_fpin_dn_unable_to_route.attr, + &device_attr_host_fpin_dn_device_specific.attr, + &device_attr_host_fpin_li.attr, + &device_attr_host_fpin_li_failure_unknown.attr, + &device_attr_host_fpin_li_link_failure_count.attr, + &device_attr_host_fpin_li_loss_of_sync_count.attr, + &device_attr_host_fpin_li_loss_of_signals_count.attr, + &device_attr_host_fpin_li_prim_seq_err_count.attr, + &device_attr_host_fpin_li_invalid_tx_word_count.attr, + &device_attr_host_fpin_li_invalid_crc_count.attr, + &device_attr_host_fpin_li_device_specific.attr, + &device_attr_host_fpin_cn.attr, + &device_attr_host_fpin_cn_clear.attr, + &device_attr_host_fpin_cn_lost_credit.attr, + &device_attr_host_fpin_cn_credit_stall.attr, + &device_attr_host_fpin_cn_oversubscription.attr, + &device_attr_host_fpin_cn_device_specific.attr, + NULL +}; + +static struct attribute_group fc_statistics_group = { + .name = "statistics", + .attrs = fc_statistics_attrs, +}; + + +/* Host Vport Attributes */ + +static int +fc_parse_wwn(const char *ns, u64 *nm) +{ + unsigned int i, j; + u8 wwn[8]; + + memset(wwn, 0, sizeof(wwn)); + + /* Validate and store the new name */ + for (i=0, j=0; i < 16; i++) { + int value; + + value = hex_to_bin(*ns++); + if (value >= 0) + j = (j << 4) | value; + else + return -EINVAL; + if (i % 2) { + wwn[i/2] = j & 0xff; + j = 0; + } + } + + *nm = wwn_to_u64(wwn); + + return 0; +} + + +/* + * "Short-cut" sysfs variable to create a new vport on a FC Host. + * Input is a string of the form ":". Other attributes + * will default to a NPIV-based FCP_Initiator; The WWNs are specified + * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc) + */ +static ssize_t +store_fc_host_vport_create(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_vport_identifiers vid; + struct fc_vport *vport; + unsigned int cnt=count; + int stat; + + memset(&vid, 0, sizeof(vid)); + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + /* validate we have enough characters for WWPN */ + if ((cnt != (16+1+16)) || (buf[16] != ':')) + return -EINVAL; + + stat = fc_parse_wwn(&buf[0], &vid.port_name); + if (stat) + return stat; + + stat = fc_parse_wwn(&buf[17], &vid.node_name); + if (stat) + return stat; + + vid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vid.vport_type = FC_PORTTYPE_NPIV; + /* vid.symbolic_name is already zero/NULL's */ + vid.disable = false; /* always enabled */ + + /* we only allow support on Channel 0 !!! */ + stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport); + return stat ? stat : count; +} +static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, + store_fc_host_vport_create); + + +/* + * "Short-cut" sysfs variable to delete a vport on a FC Host. + * Vport is identified by a string containing ":". + * The WWNs are specified as hex characters, and may *not* contain + * any prefixes (e.g. 0x, x, etc) + */ +static ssize_t +store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_vport *vport; + u64 wwpn, wwnn; + unsigned long flags; + unsigned int cnt=count; + int stat, match; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + /* validate we have enough characters for WWPN */ + if ((cnt != (16+1+16)) || (buf[16] != ':')) + return -EINVAL; + + stat = fc_parse_wwn(&buf[0], &wwpn); + if (stat) + return stat; + + stat = fc_parse_wwn(&buf[17], &wwnn); + if (stat) + return stat; + + spin_lock_irqsave(shost->host_lock, flags); + match = 0; + /* we only allow support on Channel 0 !!! */ + list_for_each_entry(vport, &fc_host->vports, peers) { + if ((vport->channel == 0) && + (vport->port_name == wwpn) && (vport->node_name == wwnn)) { + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) + break; + vport->flags |= FC_VPORT_DELETING; + match = 1; + break; + } + } + spin_unlock_irqrestore(shost->host_lock, flags); + + if (!match) + return -ENODEV; + + stat = fc_vport_terminate(vport); + return stat ? stat : count; +} +static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL, + store_fc_host_vport_delete); + + +static int fc_host_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct fc_internal *i; + + if (!scsi_is_host_device(dev)) + return 0; + + shost = dev_to_shost(dev); + if (!shost->transportt || shost->transportt->host_attrs.ac.class + != &fc_host_class.class) + return 0; + + i = to_fc_internal(shost->transportt); + + return &i->t.host_attrs.ac == cont; +} + +static int fc_target_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct fc_internal *i; + + if (!scsi_is_target_device(dev)) + return 0; + + shost = dev_to_shost(dev->parent); + if (!shost->transportt || shost->transportt->host_attrs.ac.class + != &fc_host_class.class) + return 0; + + i = to_fc_internal(shost->transportt); + + return &i->t.target_attrs.ac == cont; +} + +static void fc_rport_dev_release(struct device *dev) +{ + struct fc_rport *rport = dev_to_rport(dev); + put_device(dev->parent); + kfree(rport); +} + +int scsi_is_fc_rport(const struct device *dev) +{ + return dev->release == fc_rport_dev_release; +} +EXPORT_SYMBOL(scsi_is_fc_rport); + +static int fc_rport_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct fc_internal *i; + + if (!scsi_is_fc_rport(dev)) + return 0; + + shost = dev_to_shost(dev->parent); + if (!shost->transportt || shost->transportt->host_attrs.ac.class + != &fc_host_class.class) + return 0; + + i = to_fc_internal(shost->transportt); + + return &i->rport_attr_cont.ac == cont; +} + + +static void fc_vport_dev_release(struct device *dev) +{ + struct fc_vport *vport = dev_to_vport(dev); + put_device(dev->parent); /* release kobj parent */ + kfree(vport); +} + +static int scsi_is_fc_vport(const struct device *dev) +{ + return dev->release == fc_vport_dev_release; +} + +static int fc_vport_match(struct attribute_container *cont, + struct device *dev) +{ + struct fc_vport *vport; + struct Scsi_Host *shost; + struct fc_internal *i; + + if (!scsi_is_fc_vport(dev)) + return 0; + vport = dev_to_vport(dev); + + shost = vport_to_shost(vport); + if (!shost->transportt || shost->transportt->host_attrs.ac.class + != &fc_host_class.class) + return 0; + + i = to_fc_internal(shost->transportt); + return &i->vport_attr_cont.ac == cont; +} + + +/** + * fc_eh_timed_out - FC Transport I/O timeout intercept handler + * @scmd: The SCSI command which timed out + * + * This routine protects against error handlers getting invoked while a + * rport is in a blocked state, typically due to a temporarily loss of + * connectivity. If the error handlers are allowed to proceed, requests + * to abort i/o, reset the target, etc will likely fail as there is no way + * to communicate with the device to perform the requested function. These + * failures may result in the midlayer taking the device offline, requiring + * manual intervention to restore operation. + * + * This routine, called whenever an i/o times out, validates the state of + * the underlying rport. If the rport is blocked, it returns + * EH_RESET_TIMER, which will continue to reschedule the timeout. + * Eventually, either the device will return, or devloss_tmo will fire, + * and when the timeout then fires, it will be handled normally. + * If the rport is not blocked, normal error handling continues. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); + + if (rport->port_state == FC_PORTSTATE_BLOCKED) + return SCSI_EH_RESET_TIMER; + + return SCSI_EH_NOT_HANDLED; +} +EXPORT_SYMBOL(fc_eh_timed_out); + +/* + * Called by fc_user_scan to locate an rport on the shost that + * matches the channel and target id, and invoke scsi_scan_target() + * on the rport. + */ +static void +fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun) +{ + struct fc_rport *rport; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + + list_for_each_entry(rport, &fc_host_rports(shost), peers) { + if (rport->scsi_target_id == -1) + continue; + + if ((rport->port_state != FC_PORTSTATE_ONLINE) && + (rport->port_state != FC_PORTSTATE_MARGINAL)) + continue; + + if ((channel == rport->channel) && + (id == rport->scsi_target_id)) { + spin_unlock_irqrestore(shost->host_lock, flags); + scsi_scan_target(&rport->dev, channel, id, lun, + SCSI_SCAN_MANUAL); + return; + } + } + + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/* + * Called via sysfs scan routines. Necessary, as the FC transport + * wants to place all target objects below the rport object. So this + * routine must invoke the scsi_scan_target() routine with the rport + * object as the parent. + */ +static int +fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) +{ + uint chlo, chhi; + uint tgtlo, tgthi; + + if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || + ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || + ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) + return -EINVAL; + + if (channel == SCAN_WILD_CARD) { + chlo = 0; + chhi = shost->max_channel + 1; + } else { + chlo = channel; + chhi = channel + 1; + } + + if (id == SCAN_WILD_CARD) { + tgtlo = 0; + tgthi = shost->max_id; + } else { + tgtlo = id; + tgthi = id + 1; + } + + for ( ; chlo < chhi; chlo++) + for ( ; tgtlo < tgthi; tgtlo++) + fc_user_scan_tgt(shost, chlo, tgtlo, lun); + + return 0; +} + +struct scsi_transport_template * +fc_attach_transport(struct fc_function_template *ft) +{ + int count; + struct fc_internal *i = kzalloc(sizeof(struct fc_internal), + GFP_KERNEL); + + if (unlikely(!i)) + return NULL; + + i->t.target_attrs.ac.attrs = &i->starget_attrs[0]; + i->t.target_attrs.ac.class = &fc_transport_class.class; + i->t.target_attrs.ac.match = fc_target_match; + i->t.target_size = sizeof(struct fc_starget_attrs); + transport_container_register(&i->t.target_attrs); + + i->t.host_attrs.ac.attrs = &i->host_attrs[0]; + i->t.host_attrs.ac.class = &fc_host_class.class; + i->t.host_attrs.ac.match = fc_host_match; + i->t.host_size = sizeof(struct fc_host_attrs); + if (ft->get_fc_host_stats) + i->t.host_attrs.statistics = &fc_statistics_group; + transport_container_register(&i->t.host_attrs); + + i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; + i->rport_attr_cont.ac.class = &fc_rport_class.class; + i->rport_attr_cont.ac.match = fc_rport_match; + i->rport_attr_cont.statistics = &fc_rport_statistics_group; + transport_container_register(&i->rport_attr_cont); + + i->vport_attr_cont.ac.attrs = &i->vport_attrs[0]; + i->vport_attr_cont.ac.class = &fc_vport_class.class; + i->vport_attr_cont.ac.match = fc_vport_match; + transport_container_register(&i->vport_attr_cont); + + i->f = ft; + + /* Transport uses the shost workq for scsi scanning */ + i->t.create_work_queue = 1; + + i->t.user_scan = fc_user_scan; + + /* + * Setup SCSI Target Attributes. + */ + count = 0; + SETUP_STARGET_ATTRIBUTE_RD(node_name); + SETUP_STARGET_ATTRIBUTE_RD(port_name); + SETUP_STARGET_ATTRIBUTE_RD(port_id); + + BUG_ON(count > FC_STARGET_NUM_ATTRS); + + i->starget_attrs[count] = NULL; + + + /* + * Setup SCSI Host Attributes. + */ + count=0; + SETUP_HOST_ATTRIBUTE_RD(node_name); + SETUP_HOST_ATTRIBUTE_RD(port_name); + SETUP_HOST_ATTRIBUTE_RD(permanent_port_name); + SETUP_HOST_ATTRIBUTE_RD(supported_classes); + SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); + SETUP_HOST_ATTRIBUTE_RD(supported_speeds); + SETUP_HOST_ATTRIBUTE_RD(maxframe_size); + if (ft->vport_create) { + SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports); + SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse); + } + SETUP_HOST_ATTRIBUTE_RD(serial_number); + SETUP_HOST_ATTRIBUTE_RD(manufacturer); + SETUP_HOST_ATTRIBUTE_RD(model); + SETUP_HOST_ATTRIBUTE_RD(model_description); + SETUP_HOST_ATTRIBUTE_RD(hardware_version); + SETUP_HOST_ATTRIBUTE_RD(driver_version); + SETUP_HOST_ATTRIBUTE_RD(firmware_version); + SETUP_HOST_ATTRIBUTE_RD(optionrom_version); + + SETUP_HOST_ATTRIBUTE_RD(port_id); + SETUP_HOST_ATTRIBUTE_RD(port_type); + SETUP_HOST_ATTRIBUTE_RD(port_state); + SETUP_HOST_ATTRIBUTE_RD(active_fc4s); + SETUP_HOST_ATTRIBUTE_RD(speed); + SETUP_HOST_ATTRIBUTE_RD(fabric_name); + SETUP_HOST_ATTRIBUTE_RD(symbolic_name); + SETUP_HOST_ATTRIBUTE_RW(system_hostname); + + /* Transport-managed attributes */ + SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo); + SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); + if (ft->issue_fc_host_lip) + SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); + if (ft->vport_create) + SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create); + if (ft->vport_delete) + SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete); + + BUG_ON(count > FC_HOST_NUM_ATTRS); + + i->host_attrs[count] = NULL; + + /* + * Setup Remote Port Attributes. + */ + count=0; + SETUP_RPORT_ATTRIBUTE_RD(maxframe_size); + SETUP_RPORT_ATTRIBUTE_RD(supported_classes); + SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo); + SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name); + SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name); + SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id); + SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); + SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(port_state); + SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); + SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo); + + BUG_ON(count > FC_RPORT_NUM_ATTRS); + + i->rport_attrs[count] = NULL; + + /* + * Setup Virtual Port Attributes. + */ + count=0; + SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state); + SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state); + SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name); + SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name); + SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles); + SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type); + SETUP_VPORT_ATTRIBUTE_RW(symbolic_name); + SETUP_VPORT_ATTRIBUTE_WR(vport_delete); + SETUP_VPORT_ATTRIBUTE_WR(vport_disable); + + BUG_ON(count > FC_VPORT_NUM_ATTRS); + + i->vport_attrs[count] = NULL; + + return &i->t; +} +EXPORT_SYMBOL(fc_attach_transport); + +void fc_release_transport(struct scsi_transport_template *t) +{ + struct fc_internal *i = to_fc_internal(t); + + transport_container_unregister(&i->t.target_attrs); + transport_container_unregister(&i->t.host_attrs); + transport_container_unregister(&i->rport_attr_cont); + transport_container_unregister(&i->vport_attr_cont); + + kfree(i); +} +EXPORT_SYMBOL(fc_release_transport); + +/** + * fc_queue_work - Queue work to the fc_host workqueue. + * @shost: Pointer to Scsi_Host bound to fc_host. + * @work: Work to queue for execution. + * + * Return value: + * 1 - work queued for execution + * 0 - work is already queued + * -EINVAL - work queue doesn't exist + */ +static int +fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) +{ + if (unlikely(!fc_host_work_q(shost))) { + printk(KERN_ERR + "ERROR: FC host '%s' attempted to queue work, " + "when no workqueue created.\n", shost->hostt->name); + dump_stack(); + + return -EINVAL; + } + + return queue_work(fc_host_work_q(shost), work); +} + +/** + * fc_flush_work - Flush a fc_host's workqueue. + * @shost: Pointer to Scsi_Host bound to fc_host. + */ +static void +fc_flush_work(struct Scsi_Host *shost) +{ + if (!fc_host_work_q(shost)) { + printk(KERN_ERR + "ERROR: FC host '%s' attempted to flush work, " + "when no workqueue created.\n", shost->hostt->name); + dump_stack(); + return; + } + + flush_workqueue(fc_host_work_q(shost)); +} + +/** + * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue. + * @shost: Pointer to Scsi_Host bound to fc_host. + * @work: Work to queue for execution. + * @delay: jiffies to delay the work queuing + * + * Return value: + * 1 on success / 0 already queued / < 0 for error + */ +static int +fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, + unsigned long delay) +{ + if (unlikely(!fc_host_devloss_work_q(shost))) { + printk(KERN_ERR + "ERROR: FC host '%s' attempted to queue work, " + "when no workqueue created.\n", shost->hostt->name); + dump_stack(); + + return -EINVAL; + } + + return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); +} + +/** + * fc_flush_devloss - Flush a fc_host's devloss workqueue. + * @shost: Pointer to Scsi_Host bound to fc_host. + */ +static void +fc_flush_devloss(struct Scsi_Host *shost) +{ + if (!fc_host_devloss_work_q(shost)) { + printk(KERN_ERR + "ERROR: FC host '%s' attempted to flush work, " + "when no workqueue created.\n", shost->hostt->name); + dump_stack(); + return; + } + + flush_workqueue(fc_host_devloss_work_q(shost)); +} + + +/** + * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host. + * @shost: Which &Scsi_Host + * + * This routine is expected to be called immediately preceding the + * a driver's call to scsi_remove_host(). + * + * WARNING: A driver utilizing the fc_transport, which fails to call + * this routine prior to scsi_remove_host(), will leave dangling + * objects in /sys/class/fc_remote_ports. Access to any of these + * objects can result in a system crash !!! + * + * Notes: + * This routine assumes no locks are held on entry. + */ +void +fc_remove_host(struct Scsi_Host *shost) +{ + struct fc_vport *vport = NULL, *next_vport = NULL; + struct fc_rport *rport = NULL, *next_rport = NULL; + struct workqueue_struct *work_q; + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + + /* Remove any vports */ + list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { + vport->flags |= FC_VPORT_DELETING; + fc_queue_work(shost, &vport->vport_delete_work); + } + + /* Remove any remote ports */ + list_for_each_entry_safe(rport, next_rport, + &fc_host->rports, peers) { + list_del(&rport->peers); + rport->port_state = FC_PORTSTATE_DELETED; + fc_queue_work(shost, &rport->rport_delete_work); + } + + list_for_each_entry_safe(rport, next_rport, + &fc_host->rport_bindings, peers) { + list_del(&rport->peers); + rport->port_state = FC_PORTSTATE_DELETED; + fc_queue_work(shost, &rport->rport_delete_work); + } + + spin_unlock_irqrestore(shost->host_lock, flags); + + /* flush all scan work items */ + scsi_flush_work(shost); + + /* flush all stgt delete, and rport delete work items, then kill it */ + if (fc_host->work_q) { + work_q = fc_host->work_q; + fc_host->work_q = NULL; + destroy_workqueue(work_q); + } + + /* flush all devloss work items, then kill it */ + if (fc_host->devloss_work_q) { + work_q = fc_host->devloss_work_q; + fc_host->devloss_work_q = NULL; + destroy_workqueue(work_q); + } +} +EXPORT_SYMBOL(fc_remove_host); + +static void fc_terminate_rport_io(struct fc_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_internal *i = to_fc_internal(shost->transportt); + + /* Involve the LLDD if possible to terminate all io on the rport. */ + if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); + + /* + * Must unblock to flush queued IO. scsi-ml will fail incoming reqs. + */ + scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); +} + +/** + * fc_starget_delete - called to delete the scsi descendants of an rport + * @work: remote port to be operated on. + * + * Deletes target and all sdevs. + */ +static void +fc_starget_delete(struct work_struct *work) +{ + struct fc_rport *rport = + container_of(work, struct fc_rport, stgt_delete_work); + + fc_terminate_rport_io(rport); + scsi_remove_target(&rport->dev); +} + + +/** + * fc_rport_final_delete - finish rport termination and delete it. + * @work: remote port to be deleted. + */ +static void +fc_rport_final_delete(struct work_struct *work) +{ + struct fc_rport *rport = + container_of(work, struct fc_rport, rport_delete_work); + struct device *dev = &rport->dev; + struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_internal *i = to_fc_internal(shost->transportt); + unsigned long flags; + int do_callback = 0; + + fc_terminate_rport_io(rport); + + /* + * if a scan is pending, flush the SCSI Host work_q so that + * that we can reclaim the rport scan work element. + */ + if (rport->flags & FC_RPORT_SCAN_PENDING) + scsi_flush_work(shost); + + /* + * Cancel any outstanding timers. These should really exist + * only when rmmod'ing the LLDD and we're asking for + * immediate termination of the rports + */ + spin_lock_irqsave(shost->host_lock, flags); + if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { + spin_unlock_irqrestore(shost->host_lock, flags); + if (!cancel_delayed_work(&rport->fail_io_work)) + fc_flush_devloss(shost); + if (!cancel_delayed_work(&rport->dev_loss_work)) + fc_flush_devloss(shost); + cancel_work_sync(&rport->scan_work); + spin_lock_irqsave(shost->host_lock, flags); + rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + /* Delete SCSI target and sdevs */ + if (rport->scsi_target_id != -1) + fc_starget_delete(&rport->stgt_delete_work); + + /* + * Notify the driver that the rport is now dead. The LLDD will + * also guarantee that any communication to the rport is terminated + * + * Avoid this call if we already called it when we preserved the + * rport for the binding. + */ + spin_lock_irqsave(shost->host_lock, flags); + if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && + (i->f->dev_loss_tmo_callbk)) { + rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; + do_callback = 1; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + if (do_callback) + i->f->dev_loss_tmo_callbk(rport); + + fc_bsg_remove(rport->rqst_q); + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + scsi_host_put(shost); /* for fc_host->rport list */ + put_device(dev); /* for self-reference */ +} + + +/** + * fc_remote_port_create - allocates and creates a remote FC port. + * @shost: scsi host the remote port is connected to. + * @channel: Channel on shost port connected to. + * @ids: The world wide names, fc address, and FC4 port + * roles for the remote port. + * + * Allocates and creates the remoter port structure, including the + * class and sysfs creation. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +static struct fc_rport * +fc_remote_port_create(struct Scsi_Host *shost, int channel, + struct fc_rport_identifiers *ids) +{ + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_internal *fci = to_fc_internal(shost->transportt); + struct fc_rport *rport; + struct device *dev; + unsigned long flags; + int error; + size_t size; + + size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); + rport = kzalloc(size, GFP_KERNEL); + if (unlikely(!rport)) { + printk(KERN_ERR "%s: allocation failure\n", __func__); + return NULL; + } + + rport->maxframe_size = -1; + rport->supported_classes = FC_COS_UNSPECIFIED; + rport->dev_loss_tmo = fc_host->dev_loss_tmo; + memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); + memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); + rport->port_id = ids->port_id; + rport->roles = ids->roles; + rport->port_state = FC_PORTSTATE_ONLINE; + if (fci->f->dd_fcrport_size) + rport->dd_data = &rport[1]; + rport->channel = channel; + rport->fast_io_fail_tmo = -1; + + INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); + INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); + INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); + INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); + INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); + + spin_lock_irqsave(shost->host_lock, flags); + + rport->number = fc_host->next_rport_number++; + if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) || + (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR)) + rport->scsi_target_id = fc_host->next_target_id++; + else + rport->scsi_target_id = -1; + list_add_tail(&rport->peers, &fc_host->rports); + scsi_host_get(shost); /* for fc_host->rport list */ + + spin_unlock_irqrestore(shost->host_lock, flags); + + dev = &rport->dev; + device_initialize(dev); /* takes self reference */ + dev->parent = get_device(&shost->shost_gendev); /* parent reference */ + dev->release = fc_rport_dev_release; + dev_set_name(dev, "rport-%d:%d-%d", + shost->host_no, channel, rport->number); + transport_setup_device(dev); + + error = device_add(dev); + if (error) { + printk(KERN_ERR "FC Remote Port device_add failed\n"); + goto delete_rport; + } + transport_add_device(dev); + transport_configure_device(dev); + + fc_bsg_rportadd(shost, rport); + /* ignore any bsg add error - we just can't do sgio */ + + if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { + /* initiate a scan of the target */ + rport->flags |= FC_RPORT_SCAN_PENDING; + scsi_queue_work(shost, &rport->scan_work); + } + + return rport; + +delete_rport: + transport_destroy_device(dev); + spin_lock_irqsave(shost->host_lock, flags); + list_del(&rport->peers); + scsi_host_put(shost); /* for fc_host->rport list */ + spin_unlock_irqrestore(shost->host_lock, flags); + put_device(dev->parent); + kfree(rport); + return NULL; +} + +/** + * fc_remote_port_add - notify fc transport of the existence of a remote FC port. + * @shost: scsi host the remote port is connected to. + * @channel: Channel on shost port connected to. + * @ids: The world wide names, fc address, and FC4 port + * roles for the remote port. + * + * The LLDD calls this routine to notify the transport of the existence + * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn) + * of the port, it's FC address (port_id), and the FC4 roles that are + * active for the port. + * + * For ports that are FCP targets (aka scsi targets), the FC transport + * maintains consistent target id bindings on behalf of the LLDD. + * A consistent target id binding is an assignment of a target id to + * a remote port identifier, which persists while the scsi host is + * attached. The remote port can disappear, then later reappear, and + * it's target id assignment remains the same. This allows for shifts + * in FC addressing (if binding by wwpn or wwnn) with no apparent + * changes to the scsi subsystem which is based on scsi host number and + * target id values. Bindings are only valid during the attachment of + * the scsi host. If the host detaches, then later re-attaches, target + * id bindings may change. + * + * This routine is responsible for returning a remote port structure. + * The routine will search the list of remote ports it maintains + * internally on behalf of consistent target id mappings. If found, the + * remote port structure will be reused. Otherwise, a new remote port + * structure will be allocated. + * + * Whenever a remote port is allocated, a new fc_remote_port class + * device is created. + * + * Should not be called from interrupt context. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +struct fc_rport * +fc_remote_port_add(struct Scsi_Host *shost, int channel, + struct fc_rport_identifiers *ids) +{ + struct fc_internal *fci = to_fc_internal(shost->transportt); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_rport *rport; + unsigned long flags; + int match = 0; + + /* ensure any stgt delete functions are done */ + fc_flush_work(shost); + + /* + * Search the list of "active" rports, for an rport that has been + * deleted, but we've held off the real delete while the target + * is in a "blocked" state. + */ + spin_lock_irqsave(shost->host_lock, flags); + + list_for_each_entry(rport, &fc_host->rports, peers) { + + if ((rport->port_state == FC_PORTSTATE_BLOCKED || + rport->port_state == FC_PORTSTATE_NOTPRESENT) && + (rport->channel == channel)) { + + switch (fc_host->tgtid_bind_type) { + case FC_TGTID_BIND_BY_WWPN: + case FC_TGTID_BIND_NONE: + if (rport->port_name == ids->port_name) + match = 1; + break; + case FC_TGTID_BIND_BY_WWNN: + if (rport->node_name == ids->node_name) + match = 1; + break; + case FC_TGTID_BIND_BY_ID: + if (rport->port_id == ids->port_id) + match = 1; + break; + } + + if (match) { + + memcpy(&rport->node_name, &ids->node_name, + sizeof(rport->node_name)); + memcpy(&rport->port_name, &ids->port_name, + sizeof(rport->port_name)); + rport->port_id = ids->port_id; + + rport->port_state = FC_PORTSTATE_ONLINE; + rport->roles = ids->roles; + + spin_unlock_irqrestore(shost->host_lock, flags); + + if (fci->f->dd_fcrport_size) + memset(rport->dd_data, 0, + fci->f->dd_fcrport_size); + + /* + * If we were not a target, cancel the + * io terminate and rport timers, and + * we're done. + * + * If we were a target, but our new role + * doesn't indicate a target, leave the + * timers running expecting the role to + * change as the target fully logs in. If + * it doesn't, the target will be torn down. + * + * If we were a target, and our role shows + * we're still a target, cancel the timers + * and kick off a scan. + */ + + /* was a target, not in roles */ + if ((rport->scsi_target_id != -1) && + (!(ids->roles & FC_PORT_ROLE_FCP_TARGET))) + return rport; + + /* + * Stop the fail io and dev_loss timers. + * If they flush, the port_state will + * be checked and will NOOP the function. + */ + if (!cancel_delayed_work(&rport->fail_io_work)) + fc_flush_devloss(shost); + if (!cancel_delayed_work(&rport->dev_loss_work)) + fc_flush_devloss(shost); + + spin_lock_irqsave(shost->host_lock, flags); + + rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | + FC_RPORT_DEVLOSS_PENDING | + FC_RPORT_DEVLOSS_CALLBK_DONE); + + spin_unlock_irqrestore(shost->host_lock, flags); + + /* if target, initiate a scan */ + if (rport->scsi_target_id != -1) { + scsi_target_unblock(&rport->dev, + SDEV_RUNNING); + spin_lock_irqsave(shost->host_lock, + flags); + rport->flags |= FC_RPORT_SCAN_PENDING; + scsi_queue_work(shost, + &rport->scan_work); + spin_unlock_irqrestore(shost->host_lock, + flags); + } + + fc_bsg_goose_queue(rport); + + return rport; + } + } + } + + /* + * Search the bindings array + * Note: if never a FCP target, you won't be on this list + */ + if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) { + + /* search for a matching consistent binding */ + + list_for_each_entry(rport, &fc_host->rport_bindings, + peers) { + if (rport->channel != channel) + continue; + + switch (fc_host->tgtid_bind_type) { + case FC_TGTID_BIND_BY_WWPN: + if (rport->port_name == ids->port_name) + match = 1; + break; + case FC_TGTID_BIND_BY_WWNN: + if (rport->node_name == ids->node_name) + match = 1; + break; + case FC_TGTID_BIND_BY_ID: + if (rport->port_id == ids->port_id) + match = 1; + break; + case FC_TGTID_BIND_NONE: /* to keep compiler happy */ + break; + } + + if (match) { + list_move_tail(&rport->peers, &fc_host->rports); + break; + } + } + + if (match) { + memcpy(&rport->node_name, &ids->node_name, + sizeof(rport->node_name)); + memcpy(&rport->port_name, &ids->port_name, + sizeof(rport->port_name)); + rport->port_id = ids->port_id; + rport->port_state = FC_PORTSTATE_ONLINE; + rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; + + if (fci->f->dd_fcrport_size) + memset(rport->dd_data, 0, + fci->f->dd_fcrport_size); + spin_unlock_irqrestore(shost->host_lock, flags); + + fc_remote_port_rolechg(rport, ids->roles); + return rport; + } + } + + spin_unlock_irqrestore(shost->host_lock, flags); + + /* No consistent binding found - create new remote port entry */ + rport = fc_remote_port_create(shost, channel, ids); + + return rport; +} +EXPORT_SYMBOL(fc_remote_port_add); + + +/** + * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence. + * @rport: The remote port that no longer exists + * + * The LLDD calls this routine to notify the transport that a remote + * port is no longer part of the topology. Note: Although a port + * may no longer be part of the topology, it may persist in the remote + * ports displayed by the fc_host. We do this under 2 conditions: + * + * 1) If the port was a scsi target, we delay its deletion by "blocking" it. + * This allows the port to temporarily disappear, then reappear without + * disrupting the SCSI device tree attached to it. During the "blocked" + * period the port will still exist. + * + * 2) If the port was a scsi target and disappears for longer than we + * expect, we'll delete the port and the tear down the SCSI device tree + * attached to it. However, we want to semi-persist the target id assigned + * to that port if it eventually does exist. The port structure will + * remain (although with minimal information) so that the target id + * bindings also remain. + * + * If the remote port is not an FCP Target, it will be fully torn down + * and deallocated, including the fc_remote_port class device. + * + * If the remote port is an FCP Target, the port will be placed in a + * temporary blocked state. From the LLDD's perspective, the rport no + * longer exists. From the SCSI midlayer's perspective, the SCSI target + * exists, but all sdevs on it are blocked from further I/O. The following + * is then expected. + * + * If the remote port does not return (signaled by a LLDD call to + * fc_remote_port_add()) within the dev_loss_tmo timeout, then the + * scsi target is removed - killing all outstanding i/o and removing the + * scsi devices attached to it. The port structure will be marked Not + * Present and be partially cleared, leaving only enough information to + * recognize the remote port relative to the scsi target id binding if + * it later appears. The port will remain as long as there is a valid + * binding (e.g. until the user changes the binding type or unloads the + * scsi host with the binding). + * + * If the remote port returns within the dev_loss_tmo value (and matches + * according to the target id binding type), the port structure will be + * reused. If it is no longer a SCSI target, the target will be torn + * down. If it continues to be a SCSI target, then the target will be + * unblocked (allowing i/o to be resumed), and a scan will be activated + * to ensure that all luns are detected. + * + * Called from normal process context only - cannot be called from interrupt. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +void +fc_remote_port_delete(struct fc_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + unsigned long timeout = rport->dev_loss_tmo; + unsigned long flags; + + /* + * No need to flush the fc_host work_q's, as all adds are synchronous. + * + * We do need to reclaim the rport scan work element, so eventually + * (in fc_rport_final_delete()) we'll flush the scsi host work_q if + * there's still a scan pending. + */ + + spin_lock_irqsave(shost->host_lock, flags); + + if ((rport->port_state != FC_PORTSTATE_ONLINE) && + (rport->port_state != FC_PORTSTATE_MARGINAL)) { + spin_unlock_irqrestore(shost->host_lock, flags); + return; + } + + /* + * In the past, we if this was not an FCP-Target, we would + * unconditionally just jump to deleting the rport. + * However, rports can be used as node containers by the LLDD, + * and its not appropriate to just terminate the rport at the + * first sign of a loss in connectivity. The LLDD may want to + * send ELS traffic to re-validate the login. If the rport is + * immediately deleted, it makes it inappropriate for a node + * container. + * So... we now unconditionally wait dev_loss_tmo before + * destroying an rport. + */ + + rport->port_state = FC_PORTSTATE_BLOCKED; + + rport->flags |= FC_RPORT_DEVLOSS_PENDING; + + spin_unlock_irqrestore(shost->host_lock, flags); + + scsi_block_targets(shost, &rport->dev); + + /* see if we need to kill io faster than waiting for device loss */ + if ((rport->fast_io_fail_tmo != -1) && + (rport->fast_io_fail_tmo < timeout)) + fc_queue_devloss_work(shost, &rport->fail_io_work, + rport->fast_io_fail_tmo * HZ); + + /* cap the length the devices can be blocked until they are deleted */ + fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); +} +EXPORT_SYMBOL(fc_remote_port_delete); + +/** + * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed. + * @rport: The remote port that changed. + * @roles: New roles for this port. + * + * Description: The LLDD calls this routine to notify the transport that the + * roles on a remote port may have changed. The largest effect of this is + * if a port now becomes a FCP Target, it must be allocated a + * scsi target id. If the port is no longer a FCP target, any + * scsi target id value assigned to it will persist in case the + * role changes back to include FCP Target. No changes in the scsi + * midlayer will be invoked if the role changes (in the expectation + * that the role will be resumed. If it doesn't normal error processing + * will take place). + * + * Should not be called from interrupt context. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +void +fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + unsigned long flags; + int create = 0; + + spin_lock_irqsave(shost->host_lock, flags); + if (roles & FC_PORT_ROLE_FCP_TARGET) { + if (rport->scsi_target_id == -1) { + rport->scsi_target_id = fc_host->next_target_id++; + create = 1; + } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET)) + create = 1; + } + + rport->roles = roles; + + spin_unlock_irqrestore(shost->host_lock, flags); + + if (create) { + /* + * There may have been a delete timer running on the + * port. Ensure that it is cancelled as we now know + * the port is an FCP Target. + * Note: we know the rport exists and is in an online + * state as the LLDD would not have had an rport + * reference to pass us. + * + * Take no action on the del_timer failure as the state + * machine state change will validate the + * transaction. + */ + if (!cancel_delayed_work(&rport->fail_io_work)) + fc_flush_devloss(shost); + if (!cancel_delayed_work(&rport->dev_loss_work)) + fc_flush_devloss(shost); + + spin_lock_irqsave(shost->host_lock, flags); + rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | + FC_RPORT_DEVLOSS_PENDING | + FC_RPORT_DEVLOSS_CALLBK_DONE); + spin_unlock_irqrestore(shost->host_lock, flags); + + /* ensure any stgt delete functions are done */ + fc_flush_work(shost); + + scsi_target_unblock(&rport->dev, SDEV_RUNNING); + /* initiate a scan of the target */ + spin_lock_irqsave(shost->host_lock, flags); + rport->flags |= FC_RPORT_SCAN_PENDING; + scsi_queue_work(shost, &rport->scan_work); + spin_unlock_irqrestore(shost->host_lock, flags); + } +} +EXPORT_SYMBOL(fc_remote_port_rolechg); + +/** + * fc_timeout_deleted_rport - Timeout handler for a deleted remote port. + * @work: rport target that failed to reappear in the allotted time. + * + * Description: An attempt to delete a remote port blocks, and if it fails + * to return in the allotted time this gets called. + */ +static void +fc_timeout_deleted_rport(struct work_struct *work) +{ + struct fc_rport *rport = + container_of(work, struct fc_rport, dev_loss_work.work); + struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_internal *i = to_fc_internal(shost->transportt); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + unsigned long flags; + int do_callback = 0; + + spin_lock_irqsave(shost->host_lock, flags); + + rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; + + /* + * If the port is ONLINE, then it came back. If it was a SCSI + * target, validate it still is. If not, tear down the + * scsi_target on it. + */ + if (((rport->port_state == FC_PORTSTATE_ONLINE) || + (rport->port_state == FC_PORTSTATE_MARGINAL)) && + (rport->scsi_target_id != -1) && + !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) { + dev_printk(KERN_ERR, &rport->dev, + "blocked FC remote port time out: no longer" + " a FCP target, removing starget\n"); + spin_unlock_irqrestore(shost->host_lock, flags); + scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); + fc_queue_work(shost, &rport->stgt_delete_work); + return; + } + + /* NOOP state - we're flushing workq's */ + if (rport->port_state != FC_PORTSTATE_BLOCKED) { + spin_unlock_irqrestore(shost->host_lock, flags); + dev_printk(KERN_ERR, &rport->dev, + "blocked FC remote port time out: leaving" + " rport%s alone\n", + (rport->scsi_target_id != -1) ? " and starget" : ""); + return; + } + + if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) || + (rport->scsi_target_id == -1)) { + list_del(&rport->peers); + rport->port_state = FC_PORTSTATE_DELETED; + dev_printk(KERN_ERR, &rport->dev, + "blocked FC remote port time out: removing" + " rport%s\n", + (rport->scsi_target_id != -1) ? " and starget" : ""); + fc_queue_work(shost, &rport->rport_delete_work); + spin_unlock_irqrestore(shost->host_lock, flags); + return; + } + + dev_printk(KERN_ERR, &rport->dev, + "blocked FC remote port time out: removing target and " + "saving binding\n"); + + list_move_tail(&rport->peers, &fc_host->rport_bindings); + + /* + * Note: We do not remove or clear the hostdata area. This allows + * host-specific target data to persist along with the + * scsi_target_id. It's up to the host to manage it's hostdata area. + */ + + /* + * Reinitialize port attributes that may change if the port comes back. + */ + rport->maxframe_size = -1; + rport->supported_classes = FC_COS_UNSPECIFIED; + rport->roles = FC_PORT_ROLE_UNKNOWN; + rport->port_state = FC_PORTSTATE_NOTPRESENT; + rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; + + /* + * Pre-emptively kill I/O rather than waiting for the work queue + * item to teardown the starget. (FCOE libFC folks prefer this + * and to have the rport_port_id still set when it's done). + */ + spin_unlock_irqrestore(shost->host_lock, flags); + fc_terminate_rport_io(rport); + + spin_lock_irqsave(shost->host_lock, flags); + + if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */ + + /* remove the identifiers that aren't used in the consisting binding */ + switch (fc_host->tgtid_bind_type) { + case FC_TGTID_BIND_BY_WWPN: + rport->node_name = -1; + rport->port_id = -1; + break; + case FC_TGTID_BIND_BY_WWNN: + rport->port_name = -1; + rport->port_id = -1; + break; + case FC_TGTID_BIND_BY_ID: + rport->node_name = -1; + rport->port_name = -1; + break; + case FC_TGTID_BIND_NONE: /* to keep compiler happy */ + break; + } + + /* + * As this only occurs if the remote port (scsi target) + * went away and didn't come back - we'll remove + * all attached scsi devices. + */ + rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; + fc_queue_work(shost, &rport->stgt_delete_work); + + do_callback = 1; + } + + spin_unlock_irqrestore(shost->host_lock, flags); + + /* + * Notify the driver that the rport is now dead. The LLDD will + * also guarantee that any communication to the rport is terminated + * + * Note: we set the CALLBK_DONE flag above to correspond + */ + if (do_callback && i->f->dev_loss_tmo_callbk) + i->f->dev_loss_tmo_callbk(rport); +} + + +/** + * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target. + * @work: rport to terminate io on. + * + * Notes: Only requests the failure of the io, not that all are flushed + * prior to returning. + */ +static void +fc_timeout_fail_rport_io(struct work_struct *work) +{ + struct fc_rport *rport = + container_of(work, struct fc_rport, fail_io_work.work); + + if (rport->port_state != FC_PORTSTATE_BLOCKED) + return; + + rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT; + fc_terminate_rport_io(rport); +} + +/** + * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. + * @work: remote port to be scanned. + */ +static void +fc_scsi_scan_rport(struct work_struct *work) +{ + struct fc_rport *rport = + container_of(work, struct fc_rport, scan_work); + struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_internal *i = to_fc_internal(shost->transportt); + unsigned long flags; + + if (((rport->port_state == FC_PORTSTATE_ONLINE) || + (rport->port_state == FC_PORTSTATE_MARGINAL)) && + (rport->roles & FC_PORT_ROLE_FCP_TARGET) && + !(i->f->disable_target_scan)) { + scsi_scan_target(&rport->dev, rport->channel, + rport->scsi_target_id, SCAN_WILD_CARD, + SCSI_SCAN_RESCAN); + } + + spin_lock_irqsave(shost->host_lock, flags); + rport->flags &= ~FC_RPORT_SCAN_PENDING; + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * fc_block_rport() - Block SCSI eh thread for blocked fc_rport. + * @rport: Remote port that scsi_eh is trying to recover. + * + * This routine can be called from a FC LLD scsi_eh callback. It + * blocks the scsi_eh thread until the fc_rport leaves the + * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is + * necessary to avoid the scsi_eh failing recovery actions for blocked + * rports which would lead to offlined SCSI devices. + * + * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED. + * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be + * passed back to scsi_eh. + */ +int fc_block_rport(struct fc_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + while (rport->port_state == FC_PORTSTATE_BLOCKED && + !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) { + spin_unlock_irqrestore(shost->host_lock, flags); + msleep(1000); + spin_lock_irqsave(shost->host_lock, flags); + } + spin_unlock_irqrestore(shost->host_lock, flags); + + if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT) + return FAST_IO_FAIL; + + return 0; +} +EXPORT_SYMBOL(fc_block_rport); + +/** + * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport + * @cmnd: SCSI command that scsi_eh is trying to recover + * + * This routine can be called from a FC LLD scsi_eh callback. It + * blocks the scsi_eh thread until the fc_rport leaves the + * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is + * necessary to avoid the scsi_eh failing recovery actions for blocked + * rports which would lead to offlined SCSI devices. + * + * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED. + * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be + * passed back to scsi_eh. + */ +int fc_block_scsi_eh(struct scsi_cmnd *cmnd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + if (WARN_ON_ONCE(!rport)) + return FAST_IO_FAIL; + + return fc_block_rport(rport); +} +EXPORT_SYMBOL(fc_block_scsi_eh); + +/* + * fc_eh_should_retry_cmd - Checks if the cmd should be retried or not + * @scmd: The SCSI command to be checked + * + * This checks the rport state to decide if a cmd is + * retryable. + * + * Returns: true if the rport state is not in marginal state. + */ +bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); + + if ((rport->port_state != FC_PORTSTATE_ONLINE) && + (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) { + set_host_byte(scmd, DID_TRANSPORT_MARGINAL); + return false; + } + return true; +} +EXPORT_SYMBOL_GPL(fc_eh_should_retry_cmd); + +/** + * fc_vport_setup - allocates and creates a FC virtual port. + * @shost: scsi host the virtual port is connected to. + * @channel: Channel on shost port connected to. + * @pdev: parent device for vport + * @ids: The world wide names, FC4 port roles, etc for + * the virtual port. + * @ret_vport: The pointer to the created vport. + * + * Allocates and creates the vport structure, calls the parent host + * to instantiate the vport, this completes w/ class and sysfs creation. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +static int +fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, + struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) +{ + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_internal *fci = to_fc_internal(shost->transportt); + struct fc_vport *vport; + struct device *dev; + unsigned long flags; + size_t size; + int error; + + *ret_vport = NULL; + + if ( ! fci->f->vport_create) + return -ENOENT; + + size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size); + vport = kzalloc(size, GFP_KERNEL); + if (unlikely(!vport)) { + printk(KERN_ERR "%s: allocation failure\n", __func__); + return -ENOMEM; + } + + vport->vport_state = FC_VPORT_UNKNOWN; + vport->vport_last_state = FC_VPORT_UNKNOWN; + vport->node_name = ids->node_name; + vport->port_name = ids->port_name; + vport->roles = ids->roles; + vport->vport_type = ids->vport_type; + if (fci->f->dd_fcvport_size) + vport->dd_data = &vport[1]; + vport->shost = shost; + vport->channel = channel; + vport->flags = FC_VPORT_CREATING; + INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete); + + spin_lock_irqsave(shost->host_lock, flags); + + if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) { + spin_unlock_irqrestore(shost->host_lock, flags); + kfree(vport); + return -ENOSPC; + } + fc_host->npiv_vports_inuse++; + vport->number = fc_host->next_vport_number++; + list_add_tail(&vport->peers, &fc_host->vports); + scsi_host_get(shost); /* for fc_host->vport list */ + + spin_unlock_irqrestore(shost->host_lock, flags); + + dev = &vport->dev; + device_initialize(dev); /* takes self reference */ + dev->parent = get_device(pdev); /* takes parent reference */ + dev->release = fc_vport_dev_release; + dev_set_name(dev, "vport-%d:%d-%d", + shost->host_no, channel, vport->number); + transport_setup_device(dev); + + error = device_add(dev); + if (error) { + printk(KERN_ERR "FC Virtual Port device_add failed\n"); + goto delete_vport; + } + transport_add_device(dev); + transport_configure_device(dev); + + error = fci->f->vport_create(vport, ids->disable); + if (error) { + printk(KERN_ERR "FC Virtual Port LLDD Create failed\n"); + goto delete_vport_all; + } + + /* + * if the parent isn't the physical adapter's Scsi_Host, ensure + * the Scsi_Host at least contains a symlink to the vport. + */ + if (pdev != &shost->shost_gendev) { + error = sysfs_create_link(&shost->shost_gendev.kobj, + &dev->kobj, dev_name(dev)); + if (error) + printk(KERN_ERR + "%s: Cannot create vport symlinks for " + "%s, err=%d\n", + __func__, dev_name(dev), error); + } + spin_lock_irqsave(shost->host_lock, flags); + vport->flags &= ~FC_VPORT_CREATING; + spin_unlock_irqrestore(shost->host_lock, flags); + + dev_printk(KERN_NOTICE, pdev, + "%s created via shost%d channel %d\n", dev_name(dev), + shost->host_no, channel); + + *ret_vport = vport; + + return 0; + +delete_vport_all: + transport_remove_device(dev); + device_del(dev); +delete_vport: + transport_destroy_device(dev); + spin_lock_irqsave(shost->host_lock, flags); + list_del(&vport->peers); + scsi_host_put(shost); /* for fc_host->vport list */ + fc_host->npiv_vports_inuse--; + spin_unlock_irqrestore(shost->host_lock, flags); + put_device(dev->parent); + kfree(vport); + + return error; +} + +/** + * fc_vport_create - Admin App or LLDD requests creation of a vport + * @shost: scsi host the virtual port is connected to. + * @channel: channel on shost port connected to. + * @ids: The world wide names, FC4 port roles, etc for + * the virtual port. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +struct fc_vport * +fc_vport_create(struct Scsi_Host *shost, int channel, + struct fc_vport_identifiers *ids) +{ + int stat; + struct fc_vport *vport; + + stat = fc_vport_setup(shost, channel, &shost->shost_gendev, + ids, &vport); + return stat ? NULL : vport; +} +EXPORT_SYMBOL(fc_vport_create); + +/** + * fc_vport_terminate - Admin App or LLDD requests termination of a vport + * @vport: fc_vport to be terminated + * + * Calls the LLDD vport_delete() function, then deallocates and removes + * the vport from the shost and object tree. + * + * Notes: + * This routine assumes no locks are held on entry. + */ +int +fc_vport_terminate(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_internal *i = to_fc_internal(shost->transportt); + struct device *dev = &vport->dev; + unsigned long flags; + int stat; + + if (i->f->vport_delete) + stat = i->f->vport_delete(vport); + else + stat = -ENOENT; + + spin_lock_irqsave(shost->host_lock, flags); + vport->flags &= ~FC_VPORT_DELETING; + if (!stat) { + vport->flags |= FC_VPORT_DELETED; + list_del(&vport->peers); + fc_host->npiv_vports_inuse--; + scsi_host_put(shost); /* for fc_host->vport list */ + } + spin_unlock_irqrestore(shost->host_lock, flags); + + if (stat) + return stat; + + if (dev->parent != &shost->shost_gendev) + sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev)); + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + + /* + * Removing our self-reference should mean our + * release function gets called, which will drop the remaining + * parent reference and free the data structure. + */ + put_device(dev); /* for self-reference */ + + return 0; /* SUCCESS */ +} +EXPORT_SYMBOL(fc_vport_terminate); + +/** + * fc_vport_sched_delete - workq-based delete request for a vport + * @work: vport to be deleted. + */ +static void +fc_vport_sched_delete(struct work_struct *work) +{ + struct fc_vport *vport = + container_of(work, struct fc_vport, vport_delete_work); + int stat; + + stat = fc_vport_terminate(vport); + if (stat) + dev_printk(KERN_ERR, vport->dev.parent, + "%s: %s could not be deleted created via " + "shost%d channel %d - error %d\n", __func__, + dev_name(&vport->dev), vport->shost->host_no, + vport->channel, stat); +} + + +/* + * BSG support + */ + +/** + * fc_bsg_job_timeout - handler for when a bsg request timesout + * @req: request that timed out + */ +static enum blk_eh_timer_return +fc_bsg_job_timeout(struct request *req) +{ + struct bsg_job *job = blk_mq_rq_to_pdu(req); + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct fc_rport *rport = fc_bsg_to_rport(job); + struct fc_internal *i = to_fc_internal(shost->transportt); + int err = 0, inflight = 0; + + if (rport && rport->port_state == FC_PORTSTATE_BLOCKED) + return BLK_EH_RESET_TIMER; + + inflight = bsg_job_get(job); + + if (inflight && i->f->bsg_timeout) { + /* call LLDD to abort the i/o as it has timed out */ + err = i->f->bsg_timeout(job); + if (err == -EAGAIN) { + bsg_job_put(job); + return BLK_EH_RESET_TIMER; + } else if (err) + printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " + "abort failed with status %d\n", err); + } + + /* the blk_end_sync_io() doesn't check the error */ + if (inflight) + blk_mq_end_request(req, BLK_STS_IOERR); + return BLK_EH_DONE; +} + +/** + * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD + * @shost: scsi host rport attached to + * @job: bsg job to be processed + */ +static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job) +{ + struct fc_internal *i = to_fc_internal(shost->transportt); + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ + int ret; + + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_host_msg; + } + + /* Validate the host command */ + switch (bsg_request->msgcode) { + case FC_BSG_HST_ADD_RPORT: + cmdlen += sizeof(struct fc_bsg_host_add_rport); + break; + + case FC_BSG_HST_DEL_RPORT: + cmdlen += sizeof(struct fc_bsg_host_del_rport); + break; + + case FC_BSG_HST_ELS_NOLOGIN: + cmdlen += sizeof(struct fc_bsg_host_els); + /* there better be a xmt and rcv payloads */ + if ((!job->request_payload.payload_len) || + (!job->reply_payload.payload_len)) { + ret = -EINVAL; + goto fail_host_msg; + } + break; + + case FC_BSG_HST_CT: + cmdlen += sizeof(struct fc_bsg_host_ct); + /* there better be xmt and rcv payloads */ + if ((!job->request_payload.payload_len) || + (!job->reply_payload.payload_len)) { + ret = -EINVAL; + goto fail_host_msg; + } + break; + + case FC_BSG_HST_VENDOR: + cmdlen += sizeof(struct fc_bsg_host_vendor); + if ((shost->hostt->vendor_id == 0L) || + (bsg_request->rqst_data.h_vendor.vendor_id != + shost->hostt->vendor_id)) { + ret = -ESRCH; + goto fail_host_msg; + } + break; + + default: + ret = -EBADR; + goto fail_host_msg; + } + + ret = i->f->bsg_request(job); + if (!ret) + return 0; + +fail_host_msg: + /* return the errno failure code as the only status */ + BUG_ON(job->reply_len < sizeof(uint32_t)); + bsg_reply->reply_payload_rcv_len = 0; + bsg_reply->result = ret; + job->reply_len = sizeof(uint32_t); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; +} + + +/* + * fc_bsg_goose_queue - restart rport queue in case it was stopped + * @rport: rport to be restarted + */ +static void +fc_bsg_goose_queue(struct fc_rport *rport) +{ + struct request_queue *q = rport->rqst_q; + + if (q) + blk_mq_run_hw_queues(q, true); +} + +/** + * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD + * @shost: scsi host rport attached to + * @job: bsg job to be processed + */ +static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job) +{ + struct fc_internal *i = to_fc_internal(shost->transportt); + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ + int ret; + + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_rport_msg; + } + + /* Validate the rport command */ + switch (bsg_request->msgcode) { + case FC_BSG_RPT_ELS: + cmdlen += sizeof(struct fc_bsg_rport_els); + goto check_bidi; + + case FC_BSG_RPT_CT: + cmdlen += sizeof(struct fc_bsg_rport_ct); +check_bidi: + /* there better be xmt and rcv payloads */ + if ((!job->request_payload.payload_len) || + (!job->reply_payload.payload_len)) { + ret = -EINVAL; + goto fail_rport_msg; + } + break; + default: + ret = -EBADR; + goto fail_rport_msg; + } + + ret = i->f->bsg_request(job); + if (!ret) + return 0; + +fail_rport_msg: + /* return the errno failure code as the only status */ + BUG_ON(job->reply_len < sizeof(uint32_t)); + bsg_reply->reply_payload_rcv_len = 0; + bsg_reply->result = ret; + job->reply_len = sizeof(uint32_t); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; +} + +static int fc_bsg_dispatch(struct bsg_job *job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(job); + + if (scsi_is_fc_rport(job->dev)) + return fc_bsg_rport_dispatch(shost, job); + else + return fc_bsg_host_dispatch(shost, job); +} + +static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport) +{ + if (rport->port_state == FC_PORTSTATE_BLOCKED && + !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) + return BLK_STS_RESOURCE; + + if ((rport->port_state != FC_PORTSTATE_ONLINE) && + (rport->port_state != FC_PORTSTATE_MARGINAL)) + return BLK_STS_IOERR; + + return BLK_STS_OK; +} + + +static int fc_bsg_dispatch_prep(struct bsg_job *job) +{ + struct fc_rport *rport = fc_bsg_to_rport(job); + blk_status_t ret; + + ret = fc_bsg_rport_prep(rport); + switch (ret) { + case BLK_STS_OK: + break; + case BLK_STS_RESOURCE: + return -EAGAIN; + default: + return -EIO; + } + + return fc_bsg_dispatch(job); +} + +/** + * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests + * @shost: shost for fc_host + * @fc_host: fc_host adding the structures to + */ +static int +fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) +{ + struct device *dev = &shost->shost_gendev; + struct fc_internal *i = to_fc_internal(shost->transportt); + struct request_queue *q; + char bsg_name[20]; + + fc_host->rqst_q = NULL; + + if (!i->f->bsg_request) + return -ENOTSUPP; + + snprintf(bsg_name, sizeof(bsg_name), + "fc_host%d", shost->host_no); + + q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout, + i->f->dd_bsg_size); + if (IS_ERR(q)) { + dev_err(dev, + "fc_host%d: bsg interface failed to initialize - setup queue\n", + shost->host_no); + return PTR_ERR(q); + } + __scsi_init_queue(shost, q); + blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); + fc_host->rqst_q = q; + return 0; +} + +/** + * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests + * @shost: shost that rport is attached to + * @rport: rport that the bsg hooks are being attached to + */ +static int +fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) +{ + struct device *dev = &rport->dev; + struct fc_internal *i = to_fc_internal(shost->transportt); + struct request_queue *q; + + rport->rqst_q = NULL; + + if (!i->f->bsg_request) + return -ENOTSUPP; + + q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep, + fc_bsg_job_timeout, i->f->dd_bsg_size); + if (IS_ERR(q)) { + dev_err(dev, "failed to setup bsg queue\n"); + return PTR_ERR(q); + } + __scsi_init_queue(shost, q); + blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); + rport->rqst_q = q; + return 0; +} + + +/** + * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports + * @q: the request_queue that is to be torn down. + * + * Notes: + * Before unregistering the queue empty any requests that are blocked + * + * + */ +static void +fc_bsg_remove(struct request_queue *q) +{ + bsg_remove_queue(q); +} + + +/* Original Author: Martin Hicks */ +MODULE_AUTHOR("James Smart"); +MODULE_DESCRIPTION("FC Transport Attributes"); +MODULE_LICENSE("GPL"); + +module_init(fc_transport_init); +module_exit(fc_transport_exit); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c new file mode 100644 index 000000000..3075b2ddf --- /dev/null +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -0,0 +1,5060 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * iSCSI transport class definitions + * + * Copyright (C) IBM Corporation, 2004 + * Copyright (C) Mike Christie, 2004 - 2005 + * Copyright (C) Dmitry Yusupov, 2004 - 2005 + * Copyright (C) Alex Aizman, 2004 - 2005 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ISCSI_TRANSPORT_VERSION "2.0-870" + +#define ISCSI_SEND_MAX_ALLOWED 10 + +#define CREATE_TRACE_POINTS +#include + +/* + * Export tracepoint symbols to be used by other modules. + */ +EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_conn); +EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_eh); +EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_session); +EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_tcp); +EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_sw_tcp); + +static int dbg_session; +module_param_named(debug_session, dbg_session, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_session, + "Turn on debugging for sessions in scsi_transport_iscsi " + "module. Set to 1 to turn on, and zero to turn off. Default " + "is off."); + +static int dbg_conn; +module_param_named(debug_conn, dbg_conn, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_conn, + "Turn on debugging for connections in scsi_transport_iscsi " + "module. Set to 1 to turn on, and zero to turn off. Default " + "is off."); + +#define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \ + do { \ + if (dbg_session) \ + iscsi_cls_session_printk(KERN_INFO, _session, \ + "%s: " dbg_fmt, \ + __func__, ##arg); \ + iscsi_dbg_trace(trace_iscsi_dbg_trans_session, \ + &(_session)->dev, \ + "%s " dbg_fmt, __func__, ##arg); \ + } while (0); + +#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \ + do { \ + if (dbg_conn) \ + iscsi_cls_conn_printk(KERN_INFO, _conn, \ + "%s: " dbg_fmt, \ + __func__, ##arg); \ + iscsi_dbg_trace(trace_iscsi_dbg_trans_conn, \ + &(_conn)->dev, \ + "%s " dbg_fmt, __func__, ##arg); \ + } while (0); + +struct iscsi_internal { + struct scsi_transport_template t; + struct iscsi_transport *iscsi_transport; + struct list_head list; + struct device dev; + + struct transport_container conn_cont; + struct transport_container session_cont; +}; + +static DEFINE_IDR(iscsi_ep_idr); +static DEFINE_MUTEX(iscsi_ep_idr_mutex); + +static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ + +static struct workqueue_struct *iscsi_conn_cleanup_workq; + +static DEFINE_IDA(iscsi_sess_ida); +/* + * list of registered transports and lock that must + * be held while accessing list. The iscsi_transport_lock must + * be acquired after the rx_queue_mutex. + */ +static LIST_HEAD(iscsi_transports); +static DEFINE_SPINLOCK(iscsi_transport_lock); + +#define to_iscsi_internal(tmpl) \ + container_of(tmpl, struct iscsi_internal, t) + +#define dev_to_iscsi_internal(_dev) \ + container_of(_dev, struct iscsi_internal, dev) + +static void iscsi_transport_release(struct device *dev) +{ + struct iscsi_internal *priv = dev_to_iscsi_internal(dev); + kfree(priv); +} + +/* + * iscsi_transport_class represents the iscsi_transports that are + * registered. + */ +static struct class iscsi_transport_class = { + .name = "iscsi_transport", + .dev_release = iscsi_transport_release, +}; + +static ssize_t +show_transport_handle(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct iscsi_internal *priv = dev_to_iscsi_internal(dev); + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return sysfs_emit(buf, "%llu\n", + (unsigned long long)iscsi_handle(priv->iscsi_transport)); +} +static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); + +#define show_transport_attr(name, format) \ +static ssize_t \ +show_transport_##name(struct device *dev, \ + struct device_attribute *attr,char *buf) \ +{ \ + struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ + return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ +} \ +static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); + +show_transport_attr(caps, "0x%x"); + +static struct attribute *iscsi_transport_attrs[] = { + &dev_attr_handle.attr, + &dev_attr_caps.attr, + NULL, +}; + +static struct attribute_group iscsi_transport_group = { + .attrs = iscsi_transport_attrs, +}; + +/* + * iSCSI endpoint attrs + */ +#define iscsi_dev_to_endpoint(_dev) \ + container_of(_dev, struct iscsi_endpoint, dev) + +#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name,_mode,_show,_store) + +static void iscsi_endpoint_release(struct device *dev) +{ + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); + + mutex_lock(&iscsi_ep_idr_mutex); + idr_remove(&iscsi_ep_idr, ep->id); + mutex_unlock(&iscsi_ep_idr_mutex); + + kfree(ep); +} + +static struct class iscsi_endpoint_class = { + .name = "iscsi_endpoint", + .dev_release = iscsi_endpoint_release, +}; + +static ssize_t +show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); + return sysfs_emit(buf, "%d\n", ep->id); +} +static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); + +static struct attribute *iscsi_endpoint_attrs[] = { + &dev_attr_ep_handle.attr, + NULL, +}; + +static struct attribute_group iscsi_endpoint_group = { + .attrs = iscsi_endpoint_attrs, +}; + +struct iscsi_endpoint * +iscsi_create_endpoint(int dd_size) +{ + struct iscsi_endpoint *ep; + int err, id; + + ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); + if (!ep) + return NULL; + + mutex_lock(&iscsi_ep_idr_mutex); + + /* + * First endpoint id should be 1 to comply with user space + * applications (iscsid). + */ + id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); + if (id < 0) { + mutex_unlock(&iscsi_ep_idr_mutex); + printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", + id); + goto free_ep; + } + mutex_unlock(&iscsi_ep_idr_mutex); + + ep->id = id; + ep->dev.class = &iscsi_endpoint_class; + dev_set_name(&ep->dev, "ep-%d", id); + err = device_register(&ep->dev); + if (err) + goto put_dev; + + err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); + if (err) + goto unregister_dev; + + if (dd_size) + ep->dd_data = &ep[1]; + return ep; + +unregister_dev: + device_unregister(&ep->dev); + return NULL; + +put_dev: + mutex_lock(&iscsi_ep_idr_mutex); + idr_remove(&iscsi_ep_idr, id); + mutex_unlock(&iscsi_ep_idr_mutex); + put_device(&ep->dev); + return NULL; +free_ep: + kfree(ep); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_endpoint); + +void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) +{ + sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group); + device_unregister(&ep->dev); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); + +void iscsi_put_endpoint(struct iscsi_endpoint *ep) +{ + put_device(&ep->dev); +} +EXPORT_SYMBOL_GPL(iscsi_put_endpoint); + +/** + * iscsi_lookup_endpoint - get ep from handle + * @handle: endpoint handle + * + * Caller must do a iscsi_put_endpoint. + */ +struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) +{ + struct iscsi_endpoint *ep; + + mutex_lock(&iscsi_ep_idr_mutex); + ep = idr_find(&iscsi_ep_idr, handle); + if (!ep) + goto unlock; + + get_device(&ep->dev); +unlock: + mutex_unlock(&iscsi_ep_idr_mutex); + return ep; +} +EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); + +/* + * Interface to display network param to sysfs + */ + +static void iscsi_iface_release(struct device *dev) +{ + struct iscsi_iface *iface = iscsi_dev_to_iface(dev); + struct device *parent = iface->dev.parent; + + kfree(iface); + put_device(parent); +} + + +static struct class iscsi_iface_class = { + .name = "iscsi_iface", + .dev_release = iscsi_iface_release, +}; + +#define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name, _mode, _show, _store) + +/* iface attrs show */ +#define iscsi_iface_attr_show(type, name, param_type, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \ + struct iscsi_transport *t = iface->transport; \ + return t->get_iface_param(iface, param_type, param, buf); \ +} \ + +#define iscsi_iface_net_attr(type, name, param) \ + iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \ +static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); + +#define iscsi_iface_attr(type, name, param) \ + iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \ +static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); + +/* generic read only ipv4 attribute */ +iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR); +iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW); +iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET); +iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO); +iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en, + ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en, + ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN); +iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN); +iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS); +iscsi_iface_net_attr(ipv4_iface, grat_arp_en, + ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en, + ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id, + ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID); +iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en, + ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en, + ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id, + ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID); +iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en, + ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN); +iscsi_iface_net_attr(ipv4_iface, fragment_disable, + ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE); +iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en, + ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN); +iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL); + +/* generic read only ipv6 attribute */ +iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR); +iscsi_iface_net_attr(ipv6_iface, link_local_addr, + ISCSI_NET_PARAM_IPV6_LINKLOCAL); +iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER); +iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg, + ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG); +iscsi_iface_net_attr(ipv6_iface, link_local_autocfg, + ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG); +iscsi_iface_net_attr(ipv6_iface, link_local_state, + ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE); +iscsi_iface_net_attr(ipv6_iface, router_state, + ISCSI_NET_PARAM_IPV6_ROUTER_STATE); +iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en, + ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN); +iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN); +iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL); +iscsi_iface_net_attr(ipv6_iface, traffic_class, + ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS); +iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT); +iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo, + ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO); +iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time, + ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME); +iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo, + ISCSI_NET_PARAM_IPV6_ND_STALE_TMO); +iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt, + ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT); +iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu, + ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU); + +/* common read only iface attribute */ +iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE); +iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID); +iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY); +iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED); +iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU); +iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT); +iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE); +iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN); +iscsi_iface_net_attr(iface, tcp_nagle_disable, + ISCSI_NET_PARAM_TCP_NAGLE_DISABLE); +iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE); +iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF); +iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE); +iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN); +iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID); +iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN); + +/* common iscsi specific settings attributes */ +iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO); +iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN); +iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN); +iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN); +iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN); +iscsi_iface_attr(iface, data_seq_in_order, + ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN); +iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN); +iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL); +iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH); +iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST); +iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T); +iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST); +iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN); +iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN); +iscsi_iface_attr(iface, discovery_auth_optional, + ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL); +iscsi_iface_attr(iface, discovery_logout, + ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN); +iscsi_iface_attr(iface, strict_login_comp_en, + ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN); +iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME); + +static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_iface *iface = iscsi_dev_to_iface(dev); + struct iscsi_transport *t = iface->transport; + int param = -1; + + if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) + param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; + else if (attr == &dev_attr_iface_header_digest.attr) + param = ISCSI_IFACE_PARAM_HDRDGST_EN; + else if (attr == &dev_attr_iface_data_digest.attr) + param = ISCSI_IFACE_PARAM_DATADGST_EN; + else if (attr == &dev_attr_iface_immediate_data.attr) + param = ISCSI_IFACE_PARAM_IMM_DATA_EN; + else if (attr == &dev_attr_iface_initial_r2t.attr) + param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN; + else if (attr == &dev_attr_iface_data_seq_in_order.attr) + param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN; + else if (attr == &dev_attr_iface_data_pdu_in_order.attr) + param = ISCSI_IFACE_PARAM_PDU_INORDER_EN; + else if (attr == &dev_attr_iface_erl.attr) + param = ISCSI_IFACE_PARAM_ERL; + else if (attr == &dev_attr_iface_max_recv_dlength.attr) + param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH; + else if (attr == &dev_attr_iface_first_burst_len.attr) + param = ISCSI_IFACE_PARAM_FIRST_BURST; + else if (attr == &dev_attr_iface_max_outstanding_r2t.attr) + param = ISCSI_IFACE_PARAM_MAX_R2T; + else if (attr == &dev_attr_iface_max_burst_len.attr) + param = ISCSI_IFACE_PARAM_MAX_BURST; + else if (attr == &dev_attr_iface_chap_auth.attr) + param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN; + else if (attr == &dev_attr_iface_bidi_chap.attr) + param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN; + else if (attr == &dev_attr_iface_discovery_auth_optional.attr) + param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL; + else if (attr == &dev_attr_iface_discovery_logout.attr) + param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN; + else if (attr == &dev_attr_iface_strict_login_comp_en.attr) + param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; + else if (attr == &dev_attr_iface_initiator_name.attr) + param = ISCSI_IFACE_PARAM_INITIATOR_NAME; + + if (param != -1) + return t->attr_is_visible(ISCSI_IFACE_PARAM, param); + + if (attr == &dev_attr_iface_enabled.attr) + param = ISCSI_NET_PARAM_IFACE_ENABLE; + else if (attr == &dev_attr_iface_vlan_id.attr) + param = ISCSI_NET_PARAM_VLAN_ID; + else if (attr == &dev_attr_iface_vlan_priority.attr) + param = ISCSI_NET_PARAM_VLAN_PRIORITY; + else if (attr == &dev_attr_iface_vlan_enabled.attr) + param = ISCSI_NET_PARAM_VLAN_ENABLED; + else if (attr == &dev_attr_iface_mtu.attr) + param = ISCSI_NET_PARAM_MTU; + else if (attr == &dev_attr_iface_port.attr) + param = ISCSI_NET_PARAM_PORT; + else if (attr == &dev_attr_iface_ipaddress_state.attr) + param = ISCSI_NET_PARAM_IPADDR_STATE; + else if (attr == &dev_attr_iface_delayed_ack_en.attr) + param = ISCSI_NET_PARAM_DELAYED_ACK_EN; + else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) + param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) + param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf.attr) + param = ISCSI_NET_PARAM_TCP_WSF; + else if (attr == &dev_attr_iface_tcp_timer_scale.attr) + param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) + param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_iface_cache_id.attr) + param = ISCSI_NET_PARAM_CACHE_ID; + else if (attr == &dev_attr_iface_redirect_en.attr) + param = ISCSI_NET_PARAM_REDIRECT_EN; + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + if (attr == &dev_attr_ipv4_iface_ipaddress.attr) + param = ISCSI_NET_PARAM_IPV4_ADDR; + else if (attr == &dev_attr_ipv4_iface_gateway.attr) + param = ISCSI_NET_PARAM_IPV4_GW; + else if (attr == &dev_attr_ipv4_iface_subnet.attr) + param = ISCSI_NET_PARAM_IPV4_SUBNET; + else if (attr == &dev_attr_ipv4_iface_bootproto.attr) + param = ISCSI_NET_PARAM_IPV4_BOOTPROTO; + else if (attr == + &dev_attr_ipv4_iface_dhcp_dns_address_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN; + else if (attr == + &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN; + else if (attr == &dev_attr_ipv4_iface_tos_en.attr) + param = ISCSI_NET_PARAM_IPV4_TOS_EN; + else if (attr == &dev_attr_ipv4_iface_tos.attr) + param = ISCSI_NET_PARAM_IPV4_TOS; + else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr) + param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN; + else if (attr == + &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN; + else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID; + else if (attr == + &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN; + else if (attr == + &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN; + else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID; + else if (attr == + &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN; + else if (attr == + &dev_attr_ipv4_iface_fragment_disable.attr) + param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE; + else if (attr == + &dev_attr_ipv4_iface_incoming_forwarding_en.attr) + param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN; + else if (attr == &dev_attr_ipv4_iface_ttl.attr) + param = ISCSI_NET_PARAM_IPV4_TTL; + else + return 0; + } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) { + if (attr == &dev_attr_ipv6_iface_ipaddress.attr) + param = ISCSI_NET_PARAM_IPV6_ADDR; + else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr) + param = ISCSI_NET_PARAM_IPV6_LINKLOCAL; + else if (attr == &dev_attr_ipv6_iface_router_addr.attr) + param = ISCSI_NET_PARAM_IPV6_ROUTER; + else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr) + param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG; + else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr) + param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG; + else if (attr == &dev_attr_ipv6_iface_link_local_state.attr) + param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE; + else if (attr == &dev_attr_ipv6_iface_router_state.attr) + param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE; + else if (attr == + &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr) + param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN; + else if (attr == &dev_attr_ipv6_iface_mld_en.attr) + param = ISCSI_NET_PARAM_IPV6_MLD_EN; + else if (attr == &dev_attr_ipv6_iface_flow_label.attr) + param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL; + else if (attr == &dev_attr_ipv6_iface_traffic_class.attr) + param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS; + else if (attr == &dev_attr_ipv6_iface_hop_limit.attr) + param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT; + else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr) + param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO; + else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr) + param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME; + else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr) + param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO; + else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr) + param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT; + else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr) + param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU; + else + return 0; + } else { + WARN_ONCE(1, "Invalid iface attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_NET_PARAM, param); +} + +static struct attribute *iscsi_iface_attrs[] = { + &dev_attr_iface_enabled.attr, + &dev_attr_iface_vlan_id.attr, + &dev_attr_iface_vlan_priority.attr, + &dev_attr_iface_vlan_enabled.attr, + &dev_attr_ipv4_iface_ipaddress.attr, + &dev_attr_ipv4_iface_gateway.attr, + &dev_attr_ipv4_iface_subnet.attr, + &dev_attr_ipv4_iface_bootproto.attr, + &dev_attr_ipv6_iface_ipaddress.attr, + &dev_attr_ipv6_iface_link_local_addr.attr, + &dev_attr_ipv6_iface_router_addr.attr, + &dev_attr_ipv6_iface_ipaddr_autocfg.attr, + &dev_attr_ipv6_iface_link_local_autocfg.attr, + &dev_attr_iface_mtu.attr, + &dev_attr_iface_port.attr, + &dev_attr_iface_ipaddress_state.attr, + &dev_attr_iface_delayed_ack_en.attr, + &dev_attr_iface_tcp_nagle_disable.attr, + &dev_attr_iface_tcp_wsf_disable.attr, + &dev_attr_iface_tcp_wsf.attr, + &dev_attr_iface_tcp_timer_scale.attr, + &dev_attr_iface_tcp_timestamp_en.attr, + &dev_attr_iface_cache_id.attr, + &dev_attr_iface_redirect_en.attr, + &dev_attr_iface_def_taskmgmt_tmo.attr, + &dev_attr_iface_header_digest.attr, + &dev_attr_iface_data_digest.attr, + &dev_attr_iface_immediate_data.attr, + &dev_attr_iface_initial_r2t.attr, + &dev_attr_iface_data_seq_in_order.attr, + &dev_attr_iface_data_pdu_in_order.attr, + &dev_attr_iface_erl.attr, + &dev_attr_iface_max_recv_dlength.attr, + &dev_attr_iface_first_burst_len.attr, + &dev_attr_iface_max_outstanding_r2t.attr, + &dev_attr_iface_max_burst_len.attr, + &dev_attr_iface_chap_auth.attr, + &dev_attr_iface_bidi_chap.attr, + &dev_attr_iface_discovery_auth_optional.attr, + &dev_attr_iface_discovery_logout.attr, + &dev_attr_iface_strict_login_comp_en.attr, + &dev_attr_iface_initiator_name.attr, + &dev_attr_ipv4_iface_dhcp_dns_address_en.attr, + &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr, + &dev_attr_ipv4_iface_tos_en.attr, + &dev_attr_ipv4_iface_tos.attr, + &dev_attr_ipv4_iface_grat_arp_en.attr, + &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr, + &dev_attr_ipv4_iface_dhcp_alt_client_id.attr, + &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr, + &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr, + &dev_attr_ipv4_iface_dhcp_vendor_id.attr, + &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr, + &dev_attr_ipv4_iface_fragment_disable.attr, + &dev_attr_ipv4_iface_incoming_forwarding_en.attr, + &dev_attr_ipv4_iface_ttl.attr, + &dev_attr_ipv6_iface_link_local_state.attr, + &dev_attr_ipv6_iface_router_state.attr, + &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr, + &dev_attr_ipv6_iface_mld_en.attr, + &dev_attr_ipv6_iface_flow_label.attr, + &dev_attr_ipv6_iface_traffic_class.attr, + &dev_attr_ipv6_iface_hop_limit.attr, + &dev_attr_ipv6_iface_nd_reachable_tmo.attr, + &dev_attr_ipv6_iface_nd_rexmit_time.attr, + &dev_attr_ipv6_iface_nd_stale_tmo.attr, + &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr, + &dev_attr_ipv6_iface_router_adv_link_mtu.attr, + NULL, +}; + +static struct attribute_group iscsi_iface_group = { + .attrs = iscsi_iface_attrs, + .is_visible = iscsi_iface_attr_is_visible, +}; + +/* convert iscsi_ipaddress_state values to ascii string name */ +static const struct { + enum iscsi_ipaddress_state value; + char *name; +} iscsi_ipaddress_state_names[] = { + {ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" }, + {ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" }, + {ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" }, + {ISCSI_IPDDRESS_STATE_VALID, "Valid" }, + {ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" }, + {ISCSI_IPDDRESS_STATE_INVALID, "Invalid" }, + {ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" }, +}; + +char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state) +{ + int i; + char *state = NULL; + + for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) { + if (iscsi_ipaddress_state_names[i].value == port_state) { + state = iscsi_ipaddress_state_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name); + +/* convert iscsi_router_state values to ascii string name */ +static const struct { + enum iscsi_router_state value; + char *name; +} iscsi_router_state_names[] = { + {ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" }, + {ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" }, + {ISCSI_ROUTER_STATE_MANUAL, "Manual" }, + {ISCSI_ROUTER_STATE_STALE, "Stale" }, +}; + +char *iscsi_get_router_state_name(enum iscsi_router_state router_state) +{ + int i; + char *state = NULL; + + for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) { + if (iscsi_router_state_names[i].value == router_state) { + state = iscsi_router_state_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_router_state_name); + +struct iscsi_iface * +iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, + uint32_t iface_type, uint32_t iface_num, int dd_size) +{ + struct iscsi_iface *iface; + int err; + + iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL); + if (!iface) + return NULL; + + iface->transport = transport; + iface->iface_type = iface_type; + iface->iface_num = iface_num; + iface->dev.release = iscsi_iface_release; + iface->dev.class = &iscsi_iface_class; + /* parent reference released in iscsi_iface_release */ + iface->dev.parent = get_device(&shost->shost_gendev); + if (iface_type == ISCSI_IFACE_TYPE_IPV4) + dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no, + iface_num); + else + dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no, + iface_num); + + err = device_register(&iface->dev); + if (err) + goto put_dev; + + err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group); + if (err) + goto unreg_iface; + + if (dd_size) + iface->dd_data = &iface[1]; + return iface; + +unreg_iface: + device_unregister(&iface->dev); + return NULL; + +put_dev: + put_device(&iface->dev); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_iface); + +void iscsi_destroy_iface(struct iscsi_iface *iface) +{ + sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group); + device_unregister(&iface->dev); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_iface); + +/* + * Interface to display flash node params to sysfs + */ + +#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name, _mode, _show, _store) + +/* flash node session attrs show */ +#define iscsi_flashnode_sess_attr_show(type, name, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_bus_flash_session *fnode_sess = \ + iscsi_dev_to_flash_session(dev);\ + struct iscsi_transport *t = fnode_sess->transport; \ + return t->get_flashnode_param(fnode_sess, param, buf); \ +} \ + + +#define iscsi_flashnode_sess_attr(type, name, param) \ + iscsi_flashnode_sess_attr_show(type, name, param) \ +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ + show_##type##_##name, NULL); + +/* Flash node session attributes */ + +iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable, + ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE); +iscsi_flashnode_sess_attr(fnode, discovery_session, + ISCSI_FLASHNODE_DISCOVERY_SESS); +iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE); +iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN); +iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN); +iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN); +iscsi_flashnode_sess_attr(fnode, data_seq_in_order, + ISCSI_FLASHNODE_DATASEQ_INORDER); +iscsi_flashnode_sess_attr(fnode, data_pdu_in_order, + ISCSI_FLASHNODE_PDU_INORDER); +iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN); +iscsi_flashnode_sess_attr(fnode, discovery_logout, + ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN); +iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN); +iscsi_flashnode_sess_attr(fnode, discovery_auth_optional, + ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL); +iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL); +iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST); +iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT); +iscsi_flashnode_sess_attr(fnode, def_time2retain, + ISCSI_FLASHNODE_DEF_TIME2RETAIN); +iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T); +iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID); +iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID); +iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST); +iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo, + ISCSI_FLASHNODE_DEF_TASKMGMT_TMO); +iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS); +iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME); +iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT); +iscsi_flashnode_sess_attr(fnode, discovery_parent_idx, + ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX); +iscsi_flashnode_sess_attr(fnode, discovery_parent_type, + ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE); +iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX); +iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX); +iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME); +iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN); +iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD); +iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN); +iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT); + +static struct attribute *iscsi_flashnode_sess_attrs[] = { + &dev_attr_fnode_auto_snd_tgt_disable.attr, + &dev_attr_fnode_discovery_session.attr, + &dev_attr_fnode_portal_type.attr, + &dev_attr_fnode_entry_enable.attr, + &dev_attr_fnode_immediate_data.attr, + &dev_attr_fnode_initial_r2t.attr, + &dev_attr_fnode_data_seq_in_order.attr, + &dev_attr_fnode_data_pdu_in_order.attr, + &dev_attr_fnode_chap_auth.attr, + &dev_attr_fnode_discovery_logout.attr, + &dev_attr_fnode_bidi_chap.attr, + &dev_attr_fnode_discovery_auth_optional.attr, + &dev_attr_fnode_erl.attr, + &dev_attr_fnode_first_burst_len.attr, + &dev_attr_fnode_def_time2wait.attr, + &dev_attr_fnode_def_time2retain.attr, + &dev_attr_fnode_max_outstanding_r2t.attr, + &dev_attr_fnode_isid.attr, + &dev_attr_fnode_tsid.attr, + &dev_attr_fnode_max_burst_len.attr, + &dev_attr_fnode_def_taskmgmt_tmo.attr, + &dev_attr_fnode_targetalias.attr, + &dev_attr_fnode_targetname.attr, + &dev_attr_fnode_tpgt.attr, + &dev_attr_fnode_discovery_parent_idx.attr, + &dev_attr_fnode_discovery_parent_type.attr, + &dev_attr_fnode_chap_in_idx.attr, + &dev_attr_fnode_chap_out_idx.attr, + &dev_attr_fnode_username.attr, + &dev_attr_fnode_username_in.attr, + &dev_attr_fnode_password.attr, + &dev_attr_fnode_password_in.attr, + &dev_attr_fnode_is_boot_target.attr, + NULL, +}; + +static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_bus_flash_session *fnode_sess = + iscsi_dev_to_flash_session(dev); + struct iscsi_transport *t = fnode_sess->transport; + int param; + + if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) { + param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE; + } else if (attr == &dev_attr_fnode_discovery_session.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_SESS; + } else if (attr == &dev_attr_fnode_portal_type.attr) { + param = ISCSI_FLASHNODE_PORTAL_TYPE; + } else if (attr == &dev_attr_fnode_entry_enable.attr) { + param = ISCSI_FLASHNODE_ENTRY_EN; + } else if (attr == &dev_attr_fnode_immediate_data.attr) { + param = ISCSI_FLASHNODE_IMM_DATA_EN; + } else if (attr == &dev_attr_fnode_initial_r2t.attr) { + param = ISCSI_FLASHNODE_INITIAL_R2T_EN; + } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) { + param = ISCSI_FLASHNODE_DATASEQ_INORDER; + } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) { + param = ISCSI_FLASHNODE_PDU_INORDER; + } else if (attr == &dev_attr_fnode_chap_auth.attr) { + param = ISCSI_FLASHNODE_CHAP_AUTH_EN; + } else if (attr == &dev_attr_fnode_discovery_logout.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN; + } else if (attr == &dev_attr_fnode_bidi_chap.attr) { + param = ISCSI_FLASHNODE_BIDI_CHAP_EN; + } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL; + } else if (attr == &dev_attr_fnode_erl.attr) { + param = ISCSI_FLASHNODE_ERL; + } else if (attr == &dev_attr_fnode_first_burst_len.attr) { + param = ISCSI_FLASHNODE_FIRST_BURST; + } else if (attr == &dev_attr_fnode_def_time2wait.attr) { + param = ISCSI_FLASHNODE_DEF_TIME2WAIT; + } else if (attr == &dev_attr_fnode_def_time2retain.attr) { + param = ISCSI_FLASHNODE_DEF_TIME2RETAIN; + } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) { + param = ISCSI_FLASHNODE_MAX_R2T; + } else if (attr == &dev_attr_fnode_isid.attr) { + param = ISCSI_FLASHNODE_ISID; + } else if (attr == &dev_attr_fnode_tsid.attr) { + param = ISCSI_FLASHNODE_TSID; + } else if (attr == &dev_attr_fnode_max_burst_len.attr) { + param = ISCSI_FLASHNODE_MAX_BURST; + } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) { + param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO; + } else if (attr == &dev_attr_fnode_targetalias.attr) { + param = ISCSI_FLASHNODE_ALIAS; + } else if (attr == &dev_attr_fnode_targetname.attr) { + param = ISCSI_FLASHNODE_NAME; + } else if (attr == &dev_attr_fnode_tpgt.attr) { + param = ISCSI_FLASHNODE_TPGT; + } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX; + } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE; + } else if (attr == &dev_attr_fnode_chap_in_idx.attr) { + param = ISCSI_FLASHNODE_CHAP_IN_IDX; + } else if (attr == &dev_attr_fnode_chap_out_idx.attr) { + param = ISCSI_FLASHNODE_CHAP_OUT_IDX; + } else if (attr == &dev_attr_fnode_username.attr) { + param = ISCSI_FLASHNODE_USERNAME; + } else if (attr == &dev_attr_fnode_username_in.attr) { + param = ISCSI_FLASHNODE_USERNAME_IN; + } else if (attr == &dev_attr_fnode_password.attr) { + param = ISCSI_FLASHNODE_PASSWORD; + } else if (attr == &dev_attr_fnode_password_in.attr) { + param = ISCSI_FLASHNODE_PASSWORD_IN; + } else if (attr == &dev_attr_fnode_is_boot_target.attr) { + param = ISCSI_FLASHNODE_IS_BOOT_TGT; + } else { + WARN_ONCE(1, "Invalid flashnode session attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); +} + +static struct attribute_group iscsi_flashnode_sess_attr_group = { + .attrs = iscsi_flashnode_sess_attrs, + .is_visible = iscsi_flashnode_sess_attr_is_visible, +}; + +static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = { + &iscsi_flashnode_sess_attr_group, + NULL, +}; + +static void iscsi_flashnode_sess_release(struct device *dev) +{ + struct iscsi_bus_flash_session *fnode_sess = + iscsi_dev_to_flash_session(dev); + + kfree(fnode_sess->targetname); + kfree(fnode_sess->targetalias); + kfree(fnode_sess->portal_type); + kfree(fnode_sess); +} + +static const struct device_type iscsi_flashnode_sess_dev_type = { + .name = "iscsi_flashnode_sess_dev_type", + .groups = iscsi_flashnode_sess_attr_groups, + .release = iscsi_flashnode_sess_release, +}; + +/* flash node connection attrs show */ +#define iscsi_flashnode_conn_attr_show(type, name, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\ + struct iscsi_bus_flash_session *fnode_sess = \ + iscsi_flash_conn_to_flash_session(fnode_conn);\ + struct iscsi_transport *t = fnode_conn->transport; \ + return t->get_flashnode_param(fnode_sess, param, buf); \ +} \ + + +#define iscsi_flashnode_conn_attr(type, name, param) \ + iscsi_flashnode_conn_attr_show(type, name, param) \ +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ + show_##type##_##name, NULL); + +/* Flash node connection attributes */ + +iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6, + ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6); +iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN); +iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN); +iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN); +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat, + ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT); +iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable, + ISCSI_FLASHNODE_TCP_NAGLE_DISABLE); +iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable, + ISCSI_FLASHNODE_TCP_WSF_DISABLE); +iscsi_flashnode_conn_attr(fnode, tcp_timer_scale, + ISCSI_FLASHNODE_TCP_TIMER_SCALE); +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable, + ISCSI_FLASHNODE_TCP_TIMESTAMP_EN); +iscsi_flashnode_conn_attr(fnode, fragment_disable, + ISCSI_FLASHNODE_IP_FRAG_DISABLE); +iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO); +iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT); +iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR); +iscsi_flashnode_conn_attr(fnode, max_recv_dlength, + ISCSI_FLASHNODE_MAX_RECV_DLENGTH); +iscsi_flashnode_conn_attr(fnode, max_xmit_dlength, + ISCSI_FLASHNODE_MAX_XMIT_DLENGTH); +iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT); +iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS); +iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC); +iscsi_flashnode_conn_attr(fnode, ipv6_flow_label, + ISCSI_FLASHNODE_IPV6_FLOW_LABEL); +iscsi_flashnode_conn_attr(fnode, redirect_ipaddr, + ISCSI_FLASHNODE_REDIRECT_IPADDR); +iscsi_flashnode_conn_attr(fnode, max_segment_size, + ISCSI_FLASHNODE_MAX_SEGMENT_SIZE); +iscsi_flashnode_conn_attr(fnode, link_local_ipv6, + ISCSI_FLASHNODE_LINK_LOCAL_IPV6); +iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF); +iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF); +iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN); +iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN); + +static struct attribute *iscsi_flashnode_conn_attrs[] = { + &dev_attr_fnode_is_fw_assigned_ipv6.attr, + &dev_attr_fnode_header_digest.attr, + &dev_attr_fnode_data_digest.attr, + &dev_attr_fnode_snack_req.attr, + &dev_attr_fnode_tcp_timestamp_stat.attr, + &dev_attr_fnode_tcp_nagle_disable.attr, + &dev_attr_fnode_tcp_wsf_disable.attr, + &dev_attr_fnode_tcp_timer_scale.attr, + &dev_attr_fnode_tcp_timestamp_enable.attr, + &dev_attr_fnode_fragment_disable.attr, + &dev_attr_fnode_max_recv_dlength.attr, + &dev_attr_fnode_max_xmit_dlength.attr, + &dev_attr_fnode_keepalive_tmo.attr, + &dev_attr_fnode_port.attr, + &dev_attr_fnode_ipaddress.attr, + &dev_attr_fnode_redirect_ipaddr.attr, + &dev_attr_fnode_max_segment_size.attr, + &dev_attr_fnode_local_port.attr, + &dev_attr_fnode_ipv4_tos.attr, + &dev_attr_fnode_ipv6_traffic_class.attr, + &dev_attr_fnode_ipv6_flow_label.attr, + &dev_attr_fnode_link_local_ipv6.attr, + &dev_attr_fnode_tcp_xmit_wsf.attr, + &dev_attr_fnode_tcp_recv_wsf.attr, + &dev_attr_fnode_statsn.attr, + &dev_attr_fnode_exp_statsn.attr, + NULL, +}; + +static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); + struct iscsi_transport *t = fnode_conn->transport; + int param; + + if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) { + param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6; + } else if (attr == &dev_attr_fnode_header_digest.attr) { + param = ISCSI_FLASHNODE_HDR_DGST_EN; + } else if (attr == &dev_attr_fnode_data_digest.attr) { + param = ISCSI_FLASHNODE_DATA_DGST_EN; + } else if (attr == &dev_attr_fnode_snack_req.attr) { + param = ISCSI_FLASHNODE_SNACK_REQ_EN; + } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) { + param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT; + } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) { + param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE; + } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) { + param = ISCSI_FLASHNODE_TCP_WSF_DISABLE; + } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) { + param = ISCSI_FLASHNODE_TCP_TIMER_SCALE; + } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) { + param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN; + } else if (attr == &dev_attr_fnode_fragment_disable.attr) { + param = ISCSI_FLASHNODE_IP_FRAG_DISABLE; + } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) { + param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH; + } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) { + param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH; + } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) { + param = ISCSI_FLASHNODE_KEEPALIVE_TMO; + } else if (attr == &dev_attr_fnode_port.attr) { + param = ISCSI_FLASHNODE_PORT; + } else if (attr == &dev_attr_fnode_ipaddress.attr) { + param = ISCSI_FLASHNODE_IPADDR; + } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) { + param = ISCSI_FLASHNODE_REDIRECT_IPADDR; + } else if (attr == &dev_attr_fnode_max_segment_size.attr) { + param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE; + } else if (attr == &dev_attr_fnode_local_port.attr) { + param = ISCSI_FLASHNODE_LOCAL_PORT; + } else if (attr == &dev_attr_fnode_ipv4_tos.attr) { + param = ISCSI_FLASHNODE_IPV4_TOS; + } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) { + param = ISCSI_FLASHNODE_IPV6_TC; + } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) { + param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL; + } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) { + param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6; + } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) { + param = ISCSI_FLASHNODE_TCP_XMIT_WSF; + } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) { + param = ISCSI_FLASHNODE_TCP_RECV_WSF; + } else if (attr == &dev_attr_fnode_statsn.attr) { + param = ISCSI_FLASHNODE_STATSN; + } else if (attr == &dev_attr_fnode_exp_statsn.attr) { + param = ISCSI_FLASHNODE_EXP_STATSN; + } else { + WARN_ONCE(1, "Invalid flashnode connection attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); +} + +static struct attribute_group iscsi_flashnode_conn_attr_group = { + .attrs = iscsi_flashnode_conn_attrs, + .is_visible = iscsi_flashnode_conn_attr_is_visible, +}; + +static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = { + &iscsi_flashnode_conn_attr_group, + NULL, +}; + +static void iscsi_flashnode_conn_release(struct device *dev) +{ + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); + + kfree(fnode_conn->ipaddress); + kfree(fnode_conn->redirect_ipaddr); + kfree(fnode_conn->link_local_ipv6_addr); + kfree(fnode_conn); +} + +static const struct device_type iscsi_flashnode_conn_dev_type = { + .name = "iscsi_flashnode_conn_dev_type", + .groups = iscsi_flashnode_conn_attr_groups, + .release = iscsi_flashnode_conn_release, +}; + +static struct bus_type iscsi_flashnode_bus; + +int iscsi_flashnode_bus_match(struct device *dev, + struct device_driver *drv) +{ + if (dev->bus == &iscsi_flashnode_bus) + return 1; + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); + +static struct bus_type iscsi_flashnode_bus = { + .name = "iscsi_flashnode", + .match = &iscsi_flashnode_bus_match, +}; + +/** + * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs + * @shost: pointer to host data + * @index: index of flashnode to add in sysfs + * @transport: pointer to transport data + * @dd_size: total size to allocate + * + * Adds a sysfs entry for the flashnode session attributes + * + * Returns: + * pointer to allocated flashnode sess on success + * %NULL on failure + */ +struct iscsi_bus_flash_session * +iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, + struct iscsi_transport *transport, + int dd_size) +{ + struct iscsi_bus_flash_session *fnode_sess; + int err; + + fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL); + if (!fnode_sess) + return NULL; + + fnode_sess->transport = transport; + fnode_sess->target_id = index; + fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type; + fnode_sess->dev.bus = &iscsi_flashnode_bus; + fnode_sess->dev.parent = &shost->shost_gendev; + dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u", + shost->host_no, index); + + err = device_register(&fnode_sess->dev); + if (err) + goto put_dev; + + if (dd_size) + fnode_sess->dd_data = &fnode_sess[1]; + + return fnode_sess; + +put_dev: + put_device(&fnode_sess->dev); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); + +/** + * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs + * @shost: pointer to host data + * @fnode_sess: pointer to the parent flashnode session entry + * @transport: pointer to transport data + * @dd_size: total size to allocate + * + * Adds a sysfs entry for the flashnode connection attributes + * + * Returns: + * pointer to allocated flashnode conn on success + * %NULL on failure + */ +struct iscsi_bus_flash_conn * +iscsi_create_flashnode_conn(struct Scsi_Host *shost, + struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_transport *transport, + int dd_size) +{ + struct iscsi_bus_flash_conn *fnode_conn; + int err; + + fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL); + if (!fnode_conn) + return NULL; + + fnode_conn->transport = transport; + fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type; + fnode_conn->dev.bus = &iscsi_flashnode_bus; + fnode_conn->dev.parent = &fnode_sess->dev; + dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0", + shost->host_no, fnode_sess->target_id); + + err = device_register(&fnode_conn->dev); + if (err) + goto put_dev; + + if (dd_size) + fnode_conn->dd_data = &fnode_conn[1]; + + return fnode_conn; + +put_dev: + put_device(&fnode_conn->dev); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); + +/** + * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn + * @dev: device to verify + * @data: pointer to data containing value to use for verification + * + * Verifies if the passed device is flashnode conn device + * + * Returns: + * 1 on success + * 0 on failure + */ +static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) +{ + return dev->bus == &iscsi_flashnode_bus; +} + +static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) +{ + device_unregister(&fnode_conn->dev); + return 0; +} + +static int flashnode_match_index(struct device *dev, void *data) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + int ret = 0; + + if (!iscsi_flashnode_bus_match(dev, NULL)) + goto exit_match_index; + + fnode_sess = iscsi_dev_to_flash_session(dev); + ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0; + +exit_match_index: + return ret; +} + +/** + * iscsi_get_flashnode_by_index -finds flashnode session entry by index + * @shost: pointer to host data + * @idx: index to match + * + * Finds the flashnode session object for the passed index + * + * Returns: + * pointer to found flashnode session object on success + * %NULL on failure + */ +static struct iscsi_bus_flash_session * +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + struct device *dev; + + dev = device_find_child(&shost->shost_gendev, &idx, + flashnode_match_index); + if (dev) + fnode_sess = iscsi_dev_to_flash_session(dev); + + return fnode_sess; +} + +/** + * iscsi_find_flashnode_sess - finds flashnode session entry + * @shost: pointer to host data + * @data: pointer to data containing value to use for comparison + * @fn: function pointer that does actual comparison + * + * Finds the flashnode session object comparing the data passed using logic + * defined in passed function pointer + * + * Returns: + * pointer to found flashnode session device object on success + * %NULL on failure + */ +struct device * +iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, + int (*fn)(struct device *dev, void *data)) +{ + return device_find_child(&shost->shost_gendev, data, fn); +} +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); + +/** + * iscsi_find_flashnode_conn - finds flashnode connection entry + * @fnode_sess: pointer to parent flashnode session entry + * + * Finds the flashnode connection object comparing the data passed using logic + * defined in passed function pointer + * + * Returns: + * pointer to found flashnode connection device object on success + * %NULL on failure + */ +struct device * +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) +{ + return device_find_child(&fnode_sess->dev, NULL, + iscsi_is_flashnode_conn_dev); +} +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); + +static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data) +{ + if (!iscsi_is_flashnode_conn_dev(dev, NULL)) + return 0; + + return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev)); +} + +/** + * iscsi_destroy_flashnode_sess - destroy flashnode session entry + * @fnode_sess: pointer to flashnode session entry to be destroyed + * + * Deletes the flashnode session entry and all children flashnode connection + * entries from sysfs + */ +void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess) +{ + int err; + + err = device_for_each_child(&fnode_sess->dev, NULL, + iscsi_iter_destroy_flashnode_conn_fn); + if (err) + pr_err("Could not delete all connections for %s. Error %d.\n", + fnode_sess->dev.kobj.name, err); + + device_unregister(&fnode_sess->dev); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess); + +static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data) +{ + if (!iscsi_flashnode_bus_match(dev, NULL)) + return 0; + + iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev)); + return 0; +} + +/** + * iscsi_destroy_all_flashnode - destroy all flashnode session entries + * @shost: pointer to host data + * + * Destroys all the flashnode session entries and all corresponding children + * flashnode connection entries from sysfs + */ +void iscsi_destroy_all_flashnode(struct Scsi_Host *shost) +{ + device_for_each_child(&shost->shost_gendev, NULL, + iscsi_iter_destroy_flashnode_fn); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode); + +/* + * BSG support + */ +/** + * iscsi_bsg_host_dispatch - Dispatch command to LLD. + * @job: bsg job to be processed + */ +static int iscsi_bsg_host_dispatch(struct bsg_job *job) +{ + struct Scsi_Host *shost = iscsi_job_to_shost(job); + struct iscsi_bsg_request *req = job->request; + struct iscsi_bsg_reply *reply = job->reply; + struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ + int ret; + + /* check if we have the msgcode value at least */ + if (job->request_len < sizeof(uint32_t)) { + ret = -ENOMSG; + goto fail_host_msg; + } + + /* Validate the host command */ + switch (req->msgcode) { + case ISCSI_BSG_HST_VENDOR: + cmdlen += sizeof(struct iscsi_bsg_host_vendor); + if ((shost->hostt->vendor_id == 0L) || + (req->rqst_data.h_vendor.vendor_id != + shost->hostt->vendor_id)) { + ret = -ESRCH; + goto fail_host_msg; + } + break; + default: + ret = -EBADR; + goto fail_host_msg; + } + + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_host_msg; + } + + ret = i->iscsi_transport->bsg_request(job); + if (!ret) + return 0; + +fail_host_msg: + /* return the errno failure code as the only status */ + BUG_ON(job->reply_len < sizeof(uint32_t)); + reply->reply_payload_rcv_len = 0; + reply->result = ret; + job->reply_len = sizeof(uint32_t); + bsg_job_done(job, ret, 0); + return 0; +} + +/** + * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests + * @shost: shost for iscsi_host + * @ihost: iscsi_cls_host adding the structures to + */ +static int +iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) +{ + struct device *dev = &shost->shost_gendev; + struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + struct request_queue *q; + char bsg_name[20]; + + if (!i->iscsi_transport->bsg_request) + return -ENOTSUPP; + + snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); + q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0); + if (IS_ERR(q)) { + shost_printk(KERN_ERR, shost, "bsg interface failed to " + "initialize - no request queue\n"); + return PTR_ERR(q); + } + __scsi_init_queue(shost, q); + + ihost->bsg_q = q; + return 0; +} + +static int iscsi_setup_host(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct iscsi_cls_host *ihost = shost->shost_data; + + memset(ihost, 0, sizeof(*ihost)); + mutex_init(&ihost->mutex); + + iscsi_bsg_host_add(shost, ihost); + /* ignore any bsg add error - we just can't do sgio */ + + return 0; +} + +static int iscsi_remove_host(struct transport_container *tc, + struct device *dev, struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct iscsi_cls_host *ihost = shost->shost_data; + + bsg_remove_queue(ihost->bsg_q); + return 0; +} + +static DECLARE_TRANSPORT_CLASS(iscsi_host_class, + "iscsi_host", + iscsi_setup_host, + iscsi_remove_host, + NULL); + +static DECLARE_TRANSPORT_CLASS(iscsi_session_class, + "iscsi_session", + NULL, + NULL, + NULL); + +static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, + "iscsi_connection", + NULL, + NULL, + NULL); + +static struct sock *nls; +static DEFINE_MUTEX(rx_queue_mutex); + +static LIST_HEAD(sesslist); +static DEFINE_SPINLOCK(sesslock); +static LIST_HEAD(connlist); +static LIST_HEAD(connlist_err); +static DEFINE_SPINLOCK(connlock); + +static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn) +{ + struct iscsi_cls_session *sess = iscsi_dev_to_session(conn->dev.parent); + return sess->sid; +} + +/* + * Returns the matching session to a given sid + */ +static struct iscsi_cls_session *iscsi_session_lookup(uint32_t sid) +{ + unsigned long flags; + struct iscsi_cls_session *sess; + + spin_lock_irqsave(&sesslock, flags); + list_for_each_entry(sess, &sesslist, sess_list) { + if (sess->sid == sid) { + spin_unlock_irqrestore(&sesslock, flags); + return sess; + } + } + spin_unlock_irqrestore(&sesslock, flags); + return NULL; +} + +/* + * Returns the matching connection to a given sid / cid tuple + */ +static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid) +{ + unsigned long flags; + struct iscsi_cls_conn *conn; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + if ((conn->cid == cid) && (iscsi_conn_get_sid(conn) == sid)) { + spin_unlock_irqrestore(&connlock, flags); + return conn; + } + } + spin_unlock_irqrestore(&connlock, flags); + return NULL; +} + +/* + * The following functions can be used by LLDs that allocate + * their own scsi_hosts or by software iscsi LLDs + */ +static struct { + int value; + char *name; +} iscsi_session_state_names[] = { + { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" }, + { ISCSI_SESSION_FAILED, "FAILED" }, + { ISCSI_SESSION_FREE, "FREE" }, +}; + +static const char *iscsi_session_state_name(int state) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) { + if (iscsi_session_state_names[i].value == state) { + name = iscsi_session_state_names[i].name; + break; + } + } + return name; +} + +static char *iscsi_session_target_state_name[] = { + [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND", + [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED", + [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED", + [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING", +}; + +int iscsi_session_chkready(struct iscsi_cls_session *session) +{ + int err; + + switch (session->state) { + case ISCSI_SESSION_LOGGED_IN: + err = 0; + break; + case ISCSI_SESSION_FAILED: + err = DID_IMM_RETRY << 16; + break; + case ISCSI_SESSION_FREE: + err = DID_TRANSPORT_FAILFAST << 16; + break; + default: + err = DID_NO_CONNECT << 16; + break; + } + return err; +} +EXPORT_SYMBOL_GPL(iscsi_session_chkready); + +int iscsi_is_session_online(struct iscsi_cls_session *session) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&session->lock, flags); + if (session->state == ISCSI_SESSION_LOGGED_IN) + ret = 1; + spin_unlock_irqrestore(&session->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(iscsi_is_session_online); + +static void iscsi_session_release(struct device *dev) +{ + struct iscsi_cls_session *session = iscsi_dev_to_session(dev); + struct Scsi_Host *shost; + + shost = iscsi_session_to_shost(session); + scsi_host_put(shost); + ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n"); + kfree(session); +} + +int iscsi_is_session_dev(const struct device *dev) +{ + return dev->release == iscsi_session_release; +} +EXPORT_SYMBOL_GPL(iscsi_is_session_dev); + +static int iscsi_iter_session_fn(struct device *dev, void *data) +{ + void (* fn) (struct iscsi_cls_session *) = data; + + if (!iscsi_is_session_dev(dev)) + return 0; + fn(iscsi_dev_to_session(dev)); + return 0; +} + +void iscsi_host_for_each_session(struct Scsi_Host *shost, + void (*fn)(struct iscsi_cls_session *)) +{ + device_for_each_child(&shost->shost_gendev, fn, + iscsi_iter_session_fn); +} +EXPORT_SYMBOL_GPL(iscsi_host_for_each_session); + +struct iscsi_scan_data { + unsigned int channel; + unsigned int id; + u64 lun; + enum scsi_scan_mode rescan; +}; + +static int iscsi_user_scan_session(struct device *dev, void *data) +{ + struct iscsi_scan_data *scan_data = data; + struct iscsi_cls_session *session; + struct Scsi_Host *shost; + struct iscsi_cls_host *ihost; + unsigned long flags; + unsigned int id; + + if (!iscsi_is_session_dev(dev)) + return 0; + + session = iscsi_dev_to_session(dev); + + ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n"); + + shost = iscsi_session_to_shost(session); + ihost = shost->shost_data; + + mutex_lock(&ihost->mutex); + spin_lock_irqsave(&session->lock, flags); + if (session->state != ISCSI_SESSION_LOGGED_IN) { + spin_unlock_irqrestore(&session->lock, flags); + goto user_scan_exit; + } + id = session->target_id; + spin_unlock_irqrestore(&session->lock, flags); + + if (id != ISCSI_MAX_TARGET) { + if ((scan_data->channel == SCAN_WILD_CARD || + scan_data->channel == 0) && + (scan_data->id == SCAN_WILD_CARD || + scan_data->id == id)) { + scsi_scan_target(&session->dev, 0, id, + scan_data->lun, scan_data->rescan); + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_SCANNED; + spin_unlock_irqrestore(&session->lock, flags); + } + } + +user_scan_exit: + mutex_unlock(&ihost->mutex); + ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n"); + return 0; +} + +static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, + uint id, u64 lun) +{ + struct iscsi_scan_data scan_data; + + scan_data.channel = channel; + scan_data.id = id; + scan_data.lun = lun; + scan_data.rescan = SCSI_SCAN_MANUAL; + + return device_for_each_child(&shost->shost_gendev, &scan_data, + iscsi_user_scan_session); +} + +static void iscsi_scan_session(struct work_struct *work) +{ + struct iscsi_cls_session *session = + container_of(work, struct iscsi_cls_session, scan_work); + struct iscsi_scan_data scan_data; + + scan_data.channel = 0; + scan_data.id = SCAN_WILD_CARD; + scan_data.lun = SCAN_WILD_CARD; + scan_data.rescan = SCSI_SCAN_RESCAN; + + iscsi_user_scan_session(&session->dev, &scan_data); +} + +/** + * iscsi_block_scsi_eh - block scsi eh until session state has transistioned + * @cmd: scsi cmd passed to scsi eh handler + * + * If the session is down this function will wait for the recovery + * timer to fire or for the session to be logged back in. If the + * recovery timer fires then FAST_IO_FAIL is returned. The caller + * should pass this error value to the scsi eh. + */ +int iscsi_block_scsi_eh(struct scsi_cmnd *cmd) +{ + struct iscsi_cls_session *session = + starget_to_session(scsi_target(cmd->device)); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&session->lock, flags); + while (session->state != ISCSI_SESSION_LOGGED_IN) { + if (session->state == ISCSI_SESSION_FREE) { + ret = FAST_IO_FAIL; + break; + } + spin_unlock_irqrestore(&session->lock, flags); + msleep(1000); + spin_lock_irqsave(&session->lock, flags); + } + spin_unlock_irqrestore(&session->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh); + +static void session_recovery_timedout(struct work_struct *work) +{ + struct iscsi_cls_session *session = + container_of(work, struct iscsi_cls_session, + recovery_work.work); + unsigned long flags; + + iscsi_cls_session_printk(KERN_INFO, session, + "session recovery timed out after %d secs\n", + session->recovery_tmo); + + spin_lock_irqsave(&session->lock, flags); + switch (session->state) { + case ISCSI_SESSION_FAILED: + session->state = ISCSI_SESSION_FREE; + break; + case ISCSI_SESSION_LOGGED_IN: + case ISCSI_SESSION_FREE: + /* we raced with the unblock's flush */ + spin_unlock_irqrestore(&session->lock, flags); + return; + } + spin_unlock_irqrestore(&session->lock, flags); + + ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); + scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); + ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); + + if (session->transport->session_recovery_timedout) + session->transport->session_recovery_timedout(session); +} + +static void __iscsi_unblock_session(struct work_struct *work) +{ + struct iscsi_cls_session *session = + container_of(work, struct iscsi_cls_session, + unblock_work); + unsigned long flags; + + ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n"); + + cancel_delayed_work_sync(&session->recovery_work); + spin_lock_irqsave(&session->lock, flags); + session->state = ISCSI_SESSION_LOGGED_IN; + spin_unlock_irqrestore(&session->lock, flags); + /* start IO */ + scsi_target_unblock(&session->dev, SDEV_RUNNING); + ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n"); +} + +/** + * iscsi_unblock_session - set a session as logged in and start IO. + * @session: iscsi session + * + * Mark a session as ready to accept IO. + */ +void iscsi_unblock_session(struct iscsi_cls_session *session) +{ + if (!cancel_work_sync(&session->block_work)) + cancel_delayed_work_sync(&session->recovery_work); + + queue_work(session->workq, &session->unblock_work); + /* + * Blocking the session can be done from any context so we only + * queue the block work. Make sure the unblock work has completed + * because it flushes/cancels the other works and updates the state. + */ + flush_work(&session->unblock_work); +} +EXPORT_SYMBOL_GPL(iscsi_unblock_session); + +static void __iscsi_block_session(struct work_struct *work) +{ + struct iscsi_cls_session *session = + container_of(work, struct iscsi_cls_session, + block_work); + struct Scsi_Host *shost = iscsi_session_to_shost(session); + unsigned long flags; + + ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n"); + spin_lock_irqsave(&session->lock, flags); + session->state = ISCSI_SESSION_FAILED; + spin_unlock_irqrestore(&session->lock, flags); + scsi_block_targets(shost, &session->dev); + ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); + if (session->recovery_tmo >= 0) + queue_delayed_work(session->workq, + &session->recovery_work, + session->recovery_tmo * HZ); +} + +void iscsi_block_session(struct iscsi_cls_session *session) +{ + queue_work(session->workq, &session->block_work); +} +EXPORT_SYMBOL_GPL(iscsi_block_session); + +static void __iscsi_unbind_session(struct work_struct *work) +{ + struct iscsi_cls_session *session = + container_of(work, struct iscsi_cls_session, + unbind_work); + struct Scsi_Host *shost = iscsi_session_to_shost(session); + struct iscsi_cls_host *ihost = shost->shost_data; + unsigned long flags; + unsigned int target_id; + bool remove_target = true; + + ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); + + /* Prevent new scans and make sure scanning is not in progress */ + mutex_lock(&ihost->mutex); + spin_lock_irqsave(&session->lock, flags); + if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) { + remove_target = false; + } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) { + spin_unlock_irqrestore(&session->lock, flags); + mutex_unlock(&ihost->mutex); + ISCSI_DBG_TRANS_SESSION(session, + "Skipping target unbinding: Session is unbound/unbinding.\n"); + return; + } + + session->target_state = ISCSI_SESSION_TARGET_UNBINDING; + target_id = session->target_id; + session->target_id = ISCSI_MAX_TARGET; + spin_unlock_irqrestore(&session->lock, flags); + mutex_unlock(&ihost->mutex); + + if (remove_target) + scsi_remove_target(&session->dev); + + if (session->ida_used) + ida_free(&iscsi_sess_ida, target_id); + + iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); + ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); + + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_UNBOUND; + spin_unlock_irqrestore(&session->lock, flags); +} + +static void __iscsi_destroy_session(struct work_struct *work) +{ + struct iscsi_cls_session *session = + container_of(work, struct iscsi_cls_session, destroy_work); + + session->transport->destroy_session(session); +} + +struct iscsi_cls_session * +iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, + int dd_size) +{ + struct iscsi_cls_session *session; + + session = kzalloc(sizeof(*session) + dd_size, + GFP_KERNEL); + if (!session) + return NULL; + + session->transport = transport; + session->creator = -1; + session->recovery_tmo = 120; + session->recovery_tmo_sysfs_override = false; + session->state = ISCSI_SESSION_FREE; + INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); + INIT_LIST_HEAD(&session->sess_list); + INIT_WORK(&session->unblock_work, __iscsi_unblock_session); + INIT_WORK(&session->block_work, __iscsi_block_session); + INIT_WORK(&session->unbind_work, __iscsi_unbind_session); + INIT_WORK(&session->scan_work, iscsi_scan_session); + INIT_WORK(&session->destroy_work, __iscsi_destroy_session); + spin_lock_init(&session->lock); + + /* this is released in the dev's release function */ + scsi_host_get(shost); + session->dev.parent = &shost->shost_gendev; + session->dev.release = iscsi_session_release; + device_initialize(&session->dev); + if (dd_size) + session->dd_data = &session[1]; + + ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n"); + return session; +} +EXPORT_SYMBOL_GPL(iscsi_alloc_session); + +int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(session); + unsigned long flags; + int id = 0; + int err; + + session->sid = atomic_add_return(1, &iscsi_session_nr); + + session->workq = alloc_workqueue("iscsi_ctrl_%d:%d", + WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, + shost->host_no, session->sid); + if (!session->workq) + return -ENOMEM; + + if (target_id == ISCSI_MAX_TARGET) { + id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL); + + if (id < 0) { + iscsi_cls_session_printk(KERN_ERR, session, + "Failure in Target ID Allocation\n"); + err = id; + goto destroy_wq; + } + session->target_id = (unsigned int)id; + session->ida_used = true; + } else + session->target_id = target_id; + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_ALLOCATED; + spin_unlock_irqrestore(&session->lock, flags); + + dev_set_name(&session->dev, "session%u", session->sid); + err = device_add(&session->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, + "could not register session's dev\n"); + goto release_ida; + } + err = transport_register_device(&session->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, + "could not register transport's dev\n"); + goto release_dev; + } + + spin_lock_irqsave(&sesslock, flags); + list_add(&session->sess_list, &sesslist); + spin_unlock_irqrestore(&sesslock, flags); + + iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); + ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); + return 0; + +release_dev: + device_del(&session->dev); +release_ida: + if (session->ida_used) + ida_free(&iscsi_sess_ida, session->target_id); +destroy_wq: + destroy_workqueue(session->workq); + return err; +} +EXPORT_SYMBOL_GPL(iscsi_add_session); + +/** + * iscsi_create_session - create iscsi class session + * @shost: scsi host + * @transport: iscsi transport + * @dd_size: private driver data size + * @target_id: which target + * + * This can be called from a LLD or iscsi_transport. + */ +struct iscsi_cls_session * +iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport, + int dd_size, unsigned int target_id) +{ + struct iscsi_cls_session *session; + + session = iscsi_alloc_session(shost, transport, dd_size); + if (!session) + return NULL; + + if (iscsi_add_session(session, target_id)) { + iscsi_free_session(session); + return NULL; + } + return session; +} +EXPORT_SYMBOL_GPL(iscsi_create_session); + +static void iscsi_conn_release(struct device *dev) +{ + struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); + struct device *parent = conn->dev.parent; + + ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n"); + kfree(conn); + put_device(parent); +} + +static int iscsi_is_conn_dev(const struct device *dev) +{ + return dev->release == iscsi_conn_release; +} + +static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) +{ + if (!iscsi_is_conn_dev(dev)) + return 0; + + iscsi_remove_conn(iscsi_dev_to_conn(dev)); + return 0; +} + +void iscsi_remove_session(struct iscsi_cls_session *session) +{ + unsigned long flags; + int err; + + ISCSI_DBG_TRANS_SESSION(session, "Removing session\n"); + + spin_lock_irqsave(&sesslock, flags); + if (!list_empty(&session->sess_list)) + list_del(&session->sess_list); + spin_unlock_irqrestore(&sesslock, flags); + + if (!cancel_work_sync(&session->block_work)) + cancel_delayed_work_sync(&session->recovery_work); + cancel_work_sync(&session->unblock_work); + /* + * If we are blocked let commands flow again. The lld or iscsi + * layer should set up the queuecommand to fail commands. + * We assume that LLD will not be calling block/unblock while + * removing the session. + */ + spin_lock_irqsave(&session->lock, flags); + session->state = ISCSI_SESSION_FREE; + spin_unlock_irqrestore(&session->lock, flags); + + scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); + /* + * qla4xxx can perform it's own scans when it runs in kernel only + * mode. Make sure to flush those scans. + */ + flush_work(&session->scan_work); + /* flush running unbind operations */ + flush_work(&session->unbind_work); + __iscsi_unbind_session(&session->unbind_work); + + /* hw iscsi may not have removed all connections from session */ + err = device_for_each_child(&session->dev, NULL, + iscsi_iter_destroy_conn_fn); + if (err) + iscsi_cls_session_printk(KERN_ERR, session, + "Could not delete all connections " + "for session. Error %d.\n", err); + + transport_unregister_device(&session->dev); + + destroy_workqueue(session->workq); + + ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n"); + device_del(&session->dev); +} +EXPORT_SYMBOL_GPL(iscsi_remove_session); + +static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) +{ + ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n"); + + switch (flag) { + case STOP_CONN_RECOVER: + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + break; + case STOP_CONN_TERM: + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); + break; + default: + iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", + flag); + return; + } + + conn->transport->stop_conn(conn, flag); + ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); +} + +static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) +{ + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + struct iscsi_endpoint *ep; + + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + + if (!conn->ep || !session->transport->ep_disconnect) + return; + + ep = conn->ep; + conn->ep = NULL; + + session->transport->unbind_conn(conn, is_active); + session->transport->ep_disconnect(ep); + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); +} + +static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, + struct iscsi_endpoint *ep, + bool is_active) +{ + /* Check if this was a conn error and the kernel took ownership */ + spin_lock_irq(&conn->lock); + if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + iscsi_ep_disconnect(conn, is_active); + } else { + spin_unlock_irq(&conn->lock); + ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); + mutex_unlock(&conn->ep_mutex); + + flush_work(&conn->cleanup_work); + /* + * Userspace is now done with the EP so we can release the ref + * iscsi_cleanup_conn_work_fn took. + */ + iscsi_put_endpoint(ep); + mutex_lock(&conn->ep_mutex); + } +} + +static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) +{ + ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n"); + /* + * For offload, iscsid may not know about the ep like when iscsid is + * restarted or for kernel based session shutdown iscsid is not even + * up. For these cases, we do the disconnect now. + */ + mutex_lock(&conn->ep_mutex); + if (conn->ep) + iscsi_if_disconnect_bound_ep(conn, conn->ep, true); + mutex_unlock(&conn->ep_mutex); + + /* + * If this is a termination we have to call stop_conn with that flag + * so the correct states get set. If we haven't run the work yet try to + * avoid the extra run. + */ + if (flag == STOP_CONN_TERM) { + cancel_work_sync(&conn->cleanup_work); + iscsi_stop_conn(conn, flag); + } else { + /* + * Figure out if it was the kernel or userspace initiating this. + */ + spin_lock_irq(&conn->lock); + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + iscsi_stop_conn(conn, flag); + } else { + spin_unlock_irq(&conn->lock); + ISCSI_DBG_TRANS_CONN(conn, + "flush kernel conn cleanup.\n"); + flush_work(&conn->cleanup_work); + } + /* + * Only clear for recovery to avoid extra cleanup runs during + * termination. + */ + spin_lock_irq(&conn->lock); + clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); + spin_unlock_irq(&conn->lock); + } + ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); + return 0; +} + +static void iscsi_cleanup_conn_work_fn(struct work_struct *work) +{ + struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, + cleanup_work); + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + + mutex_lock(&conn->ep_mutex); + /* + * Get a ref to the ep, so we don't release its ID until after + * userspace is done referencing it in iscsi_if_disconnect_bound_ep. + */ + if (conn->ep) + get_device(&conn->ep->dev); + iscsi_ep_disconnect(conn, false); + + if (system_state != SYSTEM_RUNNING) { + /* + * If the user has set up for the session to never timeout + * then hang like they wanted. For all other cases fail right + * away since userspace is not going to relogin. + */ + if (session->recovery_tmo > 0) + session->recovery_tmo = 0; + } + + iscsi_stop_conn(conn, STOP_CONN_RECOVER); + mutex_unlock(&conn->ep_mutex); + ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n"); +} + +static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data) +{ + struct iscsi_transport *transport; + struct iscsi_cls_conn *conn; + + if (!iscsi_is_conn_dev(dev)) + return 0; + + conn = iscsi_dev_to_conn(dev); + transport = conn->transport; + + if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN) + iscsi_if_stop_conn(conn, STOP_CONN_TERM); + + transport->destroy_conn(conn); + return 0; +} + +/** + * iscsi_force_destroy_session - destroy a session from the kernel + * @session: session to destroy + * + * Force the destruction of a session from the kernel. This should only be + * used when userspace is no longer running during system shutdown. + */ +void iscsi_force_destroy_session(struct iscsi_cls_session *session) +{ + struct iscsi_transport *transport = session->transport; + unsigned long flags; + + WARN_ON_ONCE(system_state == SYSTEM_RUNNING); + + spin_lock_irqsave(&sesslock, flags); + if (list_empty(&session->sess_list)) { + spin_unlock_irqrestore(&sesslock, flags); + /* + * Conn/ep is already freed. Session is being torn down via + * async path. For shutdown we don't care about it so return. + */ + return; + } + spin_unlock_irqrestore(&sesslock, flags); + + device_for_each_child(&session->dev, NULL, + iscsi_iter_force_destroy_conn_fn); + transport->destroy_session(session); +} +EXPORT_SYMBOL_GPL(iscsi_force_destroy_session); + +void iscsi_free_session(struct iscsi_cls_session *session) +{ + ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); + iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION); + put_device(&session->dev); +} +EXPORT_SYMBOL_GPL(iscsi_free_session); + +/** + * iscsi_alloc_conn - alloc iscsi class connection + * @session: iscsi cls session + * @dd_size: private driver data size + * @cid: connection id + */ +struct iscsi_cls_conn * +iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) +{ + struct iscsi_transport *transport = session->transport; + struct iscsi_cls_conn *conn; + + conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL); + if (!conn) + return NULL; + if (dd_size) + conn->dd_data = &conn[1]; + + mutex_init(&conn->ep_mutex); + spin_lock_init(&conn->lock); + INIT_LIST_HEAD(&conn->conn_list); + INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); + conn->transport = transport; + conn->cid = cid; + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); + + /* this is released in the dev's release function */ + if (!get_device(&session->dev)) + goto free_conn; + + dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid); + device_initialize(&conn->dev); + conn->dev.parent = &session->dev; + conn->dev.release = iscsi_conn_release; + + return conn; + +free_conn: + kfree(conn); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_alloc_conn); + +/** + * iscsi_add_conn - add iscsi class connection + * @conn: iscsi cls connection + * + * This will expose iscsi_cls_conn to sysfs so make sure the related + * resources for sysfs attributes are initialized before calling this. + */ +int iscsi_add_conn(struct iscsi_cls_conn *conn) +{ + int err; + unsigned long flags; + struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent); + + err = device_add(&conn->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, + "could not register connection's dev\n"); + return err; + } + err = transport_register_device(&conn->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, + "could not register transport's dev\n"); + device_del(&conn->dev); + return err; + } + + spin_lock_irqsave(&connlock, flags); + list_add(&conn->conn_list, &connlist); + spin_unlock_irqrestore(&connlock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_add_conn); + +/** + * iscsi_remove_conn - remove iscsi class connection from sysfs + * @conn: iscsi cls connection + * + * Remove iscsi_cls_conn from sysfs, and wait for previous + * read/write of iscsi_cls_conn's attributes in sysfs to finish. + */ +void iscsi_remove_conn(struct iscsi_cls_conn *conn) +{ + unsigned long flags; + + spin_lock_irqsave(&connlock, flags); + list_del(&conn->conn_list); + spin_unlock_irqrestore(&connlock, flags); + + transport_unregister_device(&conn->dev); + device_del(&conn->dev); +} +EXPORT_SYMBOL_GPL(iscsi_remove_conn); + +void iscsi_put_conn(struct iscsi_cls_conn *conn) +{ + put_device(&conn->dev); +} +EXPORT_SYMBOL_GPL(iscsi_put_conn); + +void iscsi_get_conn(struct iscsi_cls_conn *conn) +{ + get_device(&conn->dev); +} +EXPORT_SYMBOL_GPL(iscsi_get_conn); + +/* + * iscsi interface functions + */ +static struct iscsi_internal * +iscsi_if_transport_lookup(struct iscsi_transport *tt) +{ + struct iscsi_internal *priv; + unsigned long flags; + + spin_lock_irqsave(&iscsi_transport_lock, flags); + list_for_each_entry(priv, &iscsi_transports, list) { + if (tt == priv->iscsi_transport) { + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + return priv; + } + } + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + return NULL; +} + +static int +iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) +{ + return nlmsg_multicast(nls, skb, 0, group, gfp); +} + +static int +iscsi_unicast_skb(struct sk_buff *skb, u32 portid) +{ + return nlmsg_unicast(nls, skb, portid); +} + +int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, + char *data, uint32_t data_size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + char *pdu; + struct iscsi_internal *priv; + int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) + + data_size); + + priv = iscsi_if_transport_lookup(conn->transport); + if (!priv) + return -EINVAL; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED); + iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " + "control PDU: OOM\n"); + return -ENOMEM; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = nlmsg_data(nlh); + memset(ev, 0, sizeof(*ev)); + ev->transport_handle = iscsi_handle(conn->transport); + ev->type = ISCSI_KEVENT_RECV_PDU; + ev->r.recv_req.cid = conn->cid; + ev->r.recv_req.sid = iscsi_conn_get_sid(conn); + pdu = (char*)ev + sizeof(*ev); + memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); + memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); + + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(iscsi_recv_pdu); + +int iscsi_offload_mesg(struct Scsi_Host *shost, + struct iscsi_transport *transport, uint32_t type, + char *data, uint16_t data_size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + int len = nlmsg_total_size(sizeof(*ev) + data_size); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); + return -ENOMEM; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = nlmsg_data(nlh); + memset(ev, 0, sizeof(*ev)); + ev->type = type; + ev->transport_handle = iscsi_handle(transport); + switch (type) { + case ISCSI_KEVENT_PATH_REQ: + ev->r.req_path.host_no = shost->host_no; + break; + case ISCSI_KEVENT_IF_DOWN: + ev->r.notify_if_down.host_no = shost->host_no; + break; + } + + memcpy((char *)ev + sizeof(*ev), data, data_size); + + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(iscsi_offload_mesg); + +void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + struct iscsi_internal *priv; + int len = nlmsg_total_size(sizeof(*ev)); + unsigned long flags; + int state; + + spin_lock_irqsave(&conn->lock, flags); + /* + * Userspace will only do a stop call if we are at least bound. And, we + * only need to do the in kernel cleanup if in the UP state so cmds can + * be released to upper layers. If in other states just wait for + * userspace to avoid races that can leave the cleanup_work queued. + */ + state = READ_ONCE(conn->state); + switch (state) { + case ISCSI_CONN_BOUND: + case ISCSI_CONN_UP: + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, + &conn->flags)) { + queue_work(iscsi_conn_cleanup_workq, + &conn->cleanup_work); + } + break; + default: + ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", + state); + break; + } + spin_unlock_irqrestore(&conn->lock, flags); + + priv = iscsi_if_transport_lookup(conn->transport); + if (!priv) + return; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " + "conn error (%d)\n", error); + return; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = nlmsg_data(nlh); + ev->transport_handle = iscsi_handle(conn->transport); + ev->type = ISCSI_KEVENT_CONN_ERROR; + ev->r.connerror.error = error; + ev->r.connerror.cid = conn->cid; + ev->r.connerror.sid = iscsi_conn_get_sid(conn); + + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); + + iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", + error); +} +EXPORT_SYMBOL_GPL(iscsi_conn_error_event); + +void iscsi_conn_login_event(struct iscsi_cls_conn *conn, + enum iscsi_conn_state state) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + struct iscsi_internal *priv; + int len = nlmsg_total_size(sizeof(*ev)); + + priv = iscsi_if_transport_lookup(conn->transport); + if (!priv) + return; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " + "conn login (%d)\n", state); + return; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = nlmsg_data(nlh); + ev->transport_handle = iscsi_handle(conn->transport); + ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; + ev->r.conn_login.state = state; + ev->r.conn_login.cid = conn->cid; + ev->r.conn_login.sid = iscsi_conn_get_sid(conn); + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); + + iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n", + state); +} +EXPORT_SYMBOL_GPL(iscsi_conn_login_event); + +void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, + enum iscsi_host_event_code code, uint32_t data_size, + uint8_t *data) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + int len = nlmsg_total_size(sizeof(*ev) + data_size); + + skb = alloc_skb(len, GFP_NOIO); + if (!skb) { + printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", + host_no, code); + return; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = nlmsg_data(nlh); + ev->transport_handle = iscsi_handle(transport); + ev->type = ISCSI_KEVENT_HOST_EVENT; + ev->r.host_event.host_no = host_no; + ev->r.host_event.code = code; + ev->r.host_event.data_size = data_size; + + if (data_size) + memcpy((char *)ev + sizeof(*ev), data, data_size); + + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); +} +EXPORT_SYMBOL_GPL(iscsi_post_host_event); + +void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, + uint32_t status, uint32_t pid, uint32_t data_size, + uint8_t *data) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + int len = nlmsg_total_size(sizeof(*ev) + data_size); + + skb = alloc_skb(len, GFP_NOIO); + if (!skb) { + printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); + return; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = nlmsg_data(nlh); + ev->transport_handle = iscsi_handle(transport); + ev->type = ISCSI_KEVENT_PING_COMP; + ev->r.ping_comp.host_no = host_no; + ev->r.ping_comp.status = status; + ev->r.ping_comp.pid = pid; + ev->r.ping_comp.data_size = data_size; + memcpy((char *)ev + sizeof(*ev), data, data_size); + + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); +} +EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); + +static int +iscsi_if_send_reply(u32 portid, int type, void *payload, int size) +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + int len = nlmsg_total_size(size); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + printk(KERN_ERR "Could not allocate skb to send reply.\n"); + return -ENOMEM; + } + + nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0); + memcpy(nlmsg_data(nlh), payload, size); + return iscsi_unicast_skb(skb, portid); +} + +static int +iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) +{ + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct iscsi_stats *stats; + struct sk_buff *skbstat; + struct iscsi_cls_conn *conn; + struct nlmsghdr *nlhstat; + struct iscsi_uevent *evstat; + struct iscsi_internal *priv; + int len = nlmsg_total_size(sizeof(*ev) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + ISCSI_STATS_CUSTOM_MAX); + int err = 0; + + priv = iscsi_if_transport_lookup(transport); + if (!priv) + return -EINVAL; + + conn = iscsi_conn_lookup(ev->u.get_stats.sid, ev->u.get_stats.cid); + if (!conn) + return -EEXIST; + + do { + int actual_size; + + skbstat = alloc_skb(len, GFP_ATOMIC); + if (!skbstat) { + iscsi_cls_conn_printk(KERN_ERR, conn, "can not " + "deliver stats: OOM\n"); + return -ENOMEM; + } + + nlhstat = __nlmsg_put(skbstat, 0, 0, 0, + (len - sizeof(*nlhstat)), 0); + evstat = nlmsg_data(nlhstat); + memset(evstat, 0, sizeof(*evstat)); + evstat->transport_handle = iscsi_handle(conn->transport); + evstat->type = nlh->nlmsg_type; + evstat->u.get_stats.cid = + ev->u.get_stats.cid; + evstat->u.get_stats.sid = + ev->u.get_stats.sid; + stats = (struct iscsi_stats *) + ((char*)evstat + sizeof(*evstat)); + memset(stats, 0, sizeof(*stats)); + + transport->get_stats(conn, stats); + actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + stats->custom_length); + actual_size -= sizeof(*nlhstat); + actual_size = nlmsg_msg_size(actual_size); + skb_trim(skbstat, NLMSG_ALIGN(actual_size)); + nlhstat->nlmsg_len = actual_size; + + err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID, + GFP_ATOMIC); + } while (err < 0 && err != -ECONNREFUSED); + + return err; +} + +/** + * iscsi_session_event - send session destr. completion event + * @session: iscsi class session + * @event: type of event + */ +int iscsi_session_event(struct iscsi_cls_session *session, + enum iscsi_uevent_e event) +{ + struct iscsi_internal *priv; + struct Scsi_Host *shost; + struct iscsi_uevent *ev; + struct sk_buff *skb; + struct nlmsghdr *nlh; + int rc, len = nlmsg_total_size(sizeof(*ev)); + + priv = iscsi_if_transport_lookup(session->transport); + if (!priv) + return -EINVAL; + shost = iscsi_session_to_shost(session); + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) { + iscsi_cls_session_printk(KERN_ERR, session, + "Cannot notify userspace of session " + "event %u\n", event); + return -ENOMEM; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = nlmsg_data(nlh); + ev->transport_handle = iscsi_handle(session->transport); + + ev->type = event; + switch (event) { + case ISCSI_KEVENT_DESTROY_SESSION: + ev->r.d_session.host_no = shost->host_no; + ev->r.d_session.sid = session->sid; + break; + case ISCSI_KEVENT_CREATE_SESSION: + ev->r.c_session_ret.host_no = shost->host_no; + ev->r.c_session_ret.sid = session->sid; + break; + case ISCSI_KEVENT_UNBIND_SESSION: + ev->r.unbind_session.host_no = shost->host_no; + ev->r.unbind_session.sid = session->sid; + break; + default: + iscsi_cls_session_printk(KERN_ERR, session, "Invalid event " + "%u.\n", event); + kfree_skb(skb); + return -EINVAL; + } + + /* + * this will occur if the daemon is not up, so we just warn + * the user and when the daemon is restarted it will handle it + */ + rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); + if (rc == -ESRCH) + iscsi_cls_session_printk(KERN_ERR, session, + "Cannot notify userspace of session " + "event %u. Check iscsi daemon\n", + event); + + ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n", + event, rc); + return rc; +} +EXPORT_SYMBOL_GPL(iscsi_session_event); + +static int +iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, + struct iscsi_uevent *ev, pid_t pid, + uint32_t initial_cmdsn, uint16_t cmds_max, + uint16_t queue_depth) +{ + struct iscsi_transport *transport = priv->iscsi_transport; + struct iscsi_cls_session *session; + struct Scsi_Host *shost; + + session = transport->create_session(ep, cmds_max, queue_depth, + initial_cmdsn); + if (!session) + return -ENOMEM; + + session->creator = pid; + shost = iscsi_session_to_shost(session); + ev->r.c_session_ret.host_no = shost->host_no; + ev->r.c_session_ret.sid = session->sid; + ISCSI_DBG_TRANS_SESSION(session, + "Completed creating transport session\n"); + return 0; +} + +static int +iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) +{ + struct iscsi_cls_conn *conn; + struct iscsi_cls_session *session; + + session = iscsi_session_lookup(ev->u.c_conn.sid); + if (!session) { + printk(KERN_ERR "iscsi: invalid session %d.\n", + ev->u.c_conn.sid); + return -EINVAL; + } + + conn = transport->create_conn(session, ev->u.c_conn.cid); + if (!conn) { + iscsi_cls_session_printk(KERN_ERR, session, + "couldn't create a new connection."); + return -ENOMEM; + } + + ev->r.c_conn_ret.sid = session->sid; + ev->r.c_conn_ret.cid = conn->cid; + + ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n"); + return 0; +} + +static int +iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) +{ + struct iscsi_cls_conn *conn; + + conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); + if (!conn) + return -EINVAL; + + ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n"); + flush_work(&conn->cleanup_work); + ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); + + if (transport->destroy_conn) + transport->destroy_conn(conn); + return 0; +} + +static int +iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) +{ + char *data = (char*)ev + sizeof(*ev); + struct iscsi_cls_conn *conn; + struct iscsi_cls_session *session; + int err = 0, value = 0, state; + + if (ev->u.set_param.len > rlen || + ev->u.set_param.len > PAGE_SIZE) + return -EINVAL; + + session = iscsi_session_lookup(ev->u.set_param.sid); + conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); + if (!conn || !session) + return -EINVAL; + + /* data will be regarded as NULL-ended string, do length check */ + if (strlen(data) > ev->u.set_param.len) + return -EINVAL; + + switch (ev->u.set_param.param) { + case ISCSI_PARAM_SESS_RECOVERY_TMO: + sscanf(data, "%d", &value); + if (!session->recovery_tmo_sysfs_override) + session->recovery_tmo = value; + break; + default: + state = READ_ONCE(conn->state); + if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { + err = transport->set_param(conn, ev->u.set_param.param, + data, ev->u.set_param.len); + } else { + return -ENOTCONN; + } + } + + return err; +} + +static int iscsi_if_ep_connect(struct iscsi_transport *transport, + struct iscsi_uevent *ev, int msg_type) +{ + struct iscsi_endpoint *ep; + struct sockaddr *dst_addr; + struct Scsi_Host *shost = NULL; + int non_blocking, err = 0; + + if (!transport->ep_connect) + return -EINVAL; + + if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) { + shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no); + if (!shost) { + printk(KERN_ERR "ep connect failed. Could not find " + "host no %u\n", + ev->u.ep_connect_through_host.host_no); + return -ENODEV; + } + non_blocking = ev->u.ep_connect_through_host.non_blocking; + } else + non_blocking = ev->u.ep_connect.non_blocking; + + dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); + ep = transport->ep_connect(shost, dst_addr, non_blocking); + if (IS_ERR(ep)) { + err = PTR_ERR(ep); + goto release_host; + } + + ev->r.ep_connect_ret.handle = ep->id; +release_host: + if (shost) + scsi_host_put(shost); + return err; +} + +static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, + u64 ep_handle) +{ + struct iscsi_cls_conn *conn; + struct iscsi_endpoint *ep; + + if (!transport->ep_disconnect) + return -EINVAL; + + ep = iscsi_lookup_endpoint(ep_handle); + if (!ep) + return -EINVAL; + + conn = ep->conn; + if (!conn) { + /* + * conn was not even bound yet, so we can't get iscsi conn + * failures yet. + */ + transport->ep_disconnect(ep); + goto put_ep; + } + + mutex_lock(&conn->ep_mutex); + iscsi_if_disconnect_bound_ep(conn, ep, false); + mutex_unlock(&conn->ep_mutex); +put_ep: + iscsi_put_endpoint(ep); + return 0; +} + +static int +iscsi_if_transport_ep(struct iscsi_transport *transport, + struct iscsi_uevent *ev, int msg_type, u32 rlen) +{ + struct iscsi_endpoint *ep; + int rc = 0; + + switch (msg_type) { + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: + if (rlen < sizeof(struct sockaddr)) + rc = -EINVAL; + else + rc = iscsi_if_ep_connect(transport, ev, msg_type); + break; + case ISCSI_UEVENT_TRANSPORT_EP_POLL: + if (!transport->ep_poll) + return -EINVAL; + + ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle); + if (!ep) + return -EINVAL; + + ev->r.retcode = transport->ep_poll(ep, + ev->u.ep_poll.timeout_ms); + iscsi_put_endpoint(ep); + break; + case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: + rc = iscsi_if_ep_disconnect(transport, + ev->u.ep_disconnect.ep_handle); + break; + } + return rc; +} + +static int +iscsi_tgt_dscvr(struct iscsi_transport *transport, + struct iscsi_uevent *ev, u32 rlen) +{ + struct Scsi_Host *shost; + struct sockaddr *dst_addr; + int err; + + if (rlen < sizeof(*dst_addr)) + return -EINVAL; + + if (!transport->tgt_dscvr) + return -EINVAL; + + shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); + if (!shost) { + printk(KERN_ERR "target discovery could not find host no %u\n", + ev->u.tgt_dscvr.host_no); + return -ENODEV; + } + + + dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); + err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type, + ev->u.tgt_dscvr.enable, dst_addr); + scsi_host_put(shost); + return err; +} + +static int +iscsi_set_host_param(struct iscsi_transport *transport, + struct iscsi_uevent *ev, u32 rlen) +{ + char *data = (char*)ev + sizeof(*ev); + struct Scsi_Host *shost; + int err; + + if (!transport->set_host_param) + return -ENOSYS; + + if (ev->u.set_host_param.len > rlen || + ev->u.set_host_param.len > PAGE_SIZE) + return -EINVAL; + + shost = scsi_host_lookup(ev->u.set_host_param.host_no); + if (!shost) { + printk(KERN_ERR "set_host_param could not find host no %u\n", + ev->u.set_host_param.host_no); + return -ENODEV; + } + + /* see similar check in iscsi_if_set_param() */ + if (strlen(data) > ev->u.set_host_param.len) + return -EINVAL; + + err = transport->set_host_param(shost, ev->u.set_host_param.param, + data, ev->u.set_host_param.len); + scsi_host_put(shost); + return err; +} + +static int +iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) +{ + struct Scsi_Host *shost; + struct iscsi_path *params; + int err; + + if (rlen < sizeof(*params)) + return -EINVAL; + + if (!transport->set_path) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_path.host_no); + if (!shost) { + printk(KERN_ERR "set path could not find host no %u\n", + ev->u.set_path.host_no); + return -ENODEV; + } + + params = (struct iscsi_path *)((char *)ev + sizeof(*ev)); + err = transport->set_path(shost, params); + + scsi_host_put(shost); + return err; +} + +static int iscsi_session_has_conns(int sid) +{ + struct iscsi_cls_conn *conn; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + if (iscsi_conn_get_sid(conn) == sid) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&connlock, flags); + + return found; +} + +static int +iscsi_set_iface_params(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int err; + + if (!transport->set_iface_param) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_iface_params.host_no); + if (!shost) { + printk(KERN_ERR "set_iface_params could not find host no %u\n", + ev->u.set_iface_params.host_no); + return -ENODEV; + } + + err = transport->set_iface_param(shost, data, len); + scsi_host_put(shost); + return err; +} + +static int +iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) +{ + struct Scsi_Host *shost; + struct sockaddr *dst_addr; + int err; + + if (rlen < sizeof(*dst_addr)) + return -EINVAL; + + if (!transport->send_ping) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.iscsi_ping.host_no); + if (!shost) { + printk(KERN_ERR "iscsi_ping could not find host no %u\n", + ev->u.iscsi_ping.host_no); + return -ENODEV; + } + + dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev)); + err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num, + ev->u.iscsi_ping.iface_type, + ev->u.iscsi_ping.payload_size, + ev->u.iscsi_ping.pid, + dst_addr); + scsi_host_put(shost); + return err; +} + +static int +iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) +{ + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct Scsi_Host *shost = NULL; + struct iscsi_chap_rec *chap_rec; + struct iscsi_internal *priv; + struct sk_buff *skbchap; + struct nlmsghdr *nlhchap; + struct iscsi_uevent *evchap; + uint32_t chap_buf_size; + int len, err = 0; + char *buf; + + if (!transport->get_chap) + return -EINVAL; + + priv = iscsi_if_transport_lookup(transport); + if (!priv) + return -EINVAL; + + chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); + len = nlmsg_total_size(sizeof(*ev) + chap_buf_size); + + shost = scsi_host_lookup(ev->u.get_chap.host_no); + if (!shost) { + printk(KERN_ERR "%s: failed. Could not find host no %u\n", + __func__, ev->u.get_chap.host_no); + return -ENODEV; + } + + do { + int actual_size; + + skbchap = alloc_skb(len, GFP_KERNEL); + if (!skbchap) { + printk(KERN_ERR "can not deliver chap: OOM\n"); + err = -ENOMEM; + goto exit_get_chap; + } + + nlhchap = __nlmsg_put(skbchap, 0, 0, 0, + (len - sizeof(*nlhchap)), 0); + evchap = nlmsg_data(nlhchap); + memset(evchap, 0, sizeof(*evchap)); + evchap->transport_handle = iscsi_handle(transport); + evchap->type = nlh->nlmsg_type; + evchap->u.get_chap.host_no = ev->u.get_chap.host_no; + evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx; + evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries; + buf = (char *)evchap + sizeof(*evchap); + memset(buf, 0, chap_buf_size); + + err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, + &evchap->u.get_chap.num_entries, buf); + + actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size); + skb_trim(skbchap, NLMSG_ALIGN(actual_size)); + nlhchap->nlmsg_len = actual_size; + + err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID, + GFP_KERNEL); + } while (err < 0 && err != -ECONNREFUSED); + +exit_get_chap: + scsi_host_put(shost); + return err; +} + +static int iscsi_set_chap(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int err = 0; + + if (!transport->set_chap) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_path.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.set_path.host_no); + return -ENODEV; + } + + err = transport->set_chap(shost, data, len); + scsi_host_put(shost); + return err; +} + +static int iscsi_delete_chap(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + int err = 0; + + if (!transport->delete_chap) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.delete_chap.host_no); + if (!shost) { + printk(KERN_ERR "%s could not find host no %u\n", + __func__, ev->u.delete_chap.host_no); + return -ENODEV; + } + + err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx); + scsi_host_put(shost); + return err; +} + +static const struct { + enum iscsi_discovery_parent_type value; + char *name; +} iscsi_discovery_parent_names[] = { + {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" }, + {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" }, + {ISCSI_DISC_PARENT_ISNS, "isns" }, +}; + +char *iscsi_get_discovery_parent_name(int parent_type) +{ + int i; + char *state = "Unknown!"; + + for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) { + if (iscsi_discovery_parent_names[i].value & parent_type) { + state = iscsi_discovery_parent_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name); + +static int iscsi_set_flashnode_param(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t idx; + int err = 0; + + if (!transport->set_flashnode_param) { + err = -ENOSYS; + goto exit_set_fnode; + } + + shost = scsi_host_lookup(ev->u.set_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.set_flashnode.host_no); + err = -ENODEV; + goto exit_set_fnode; + } + + idx = ev->u.set_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.set_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) { + err = -ENODEV; + goto put_sess; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_set_fnode: + return err; +} + +static int iscsi_new_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int index; + int err = 0; + + if (!transport->new_flashnode) { + err = -ENOSYS; + goto exit_new_fnode; + } + + shost = scsi_host_lookup(ev->u.new_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.new_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + index = transport->new_flashnode(shost, data, len); + + if (index >= 0) + ev->r.new_flashnode_ret.flashnode_idx = index; + else + err = -EIO; + +put_host: + scsi_host_put(shost); + +exit_new_fnode: + return err; +} + +static int iscsi_del_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + uint32_t idx; + int err = 0; + + if (!transport->del_flashnode) { + err = -ENOSYS; + goto exit_del_fnode; + } + + shost = scsi_host_lookup(ev->u.del_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.del_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = ev->u.del_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.del_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + err = transport->del_flashnode(fnode_sess); + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_del_fnode: + return err; +} + +static int iscsi_login_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t idx; + int err = 0; + + if (!transport->login_flashnode) { + err = -ENOSYS; + goto exit_login_fnode; + } + + shost = scsi_host_lookup(ev->u.login_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.login_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = ev->u.login_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.login_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) { + err = -ENODEV; + goto put_sess; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + err = transport->login_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_login_fnode: + return err; +} + +static int iscsi_logout_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t idx; + int err = 0; + + if (!transport->logout_flashnode) { + err = -ENOSYS; + goto exit_logout_fnode; + } + + shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = ev->u.logout_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) { + err = -ENODEV; + goto put_sess; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + + err = transport->logout_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_logout_fnode: + return err; +} + +static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *session; + int err = 0; + + if (!transport->logout_flashnode_sid) { + err = -ENOSYS; + goto exit_logout_sid; + } + + shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid); + if (!session) { + pr_err("%s could not find session id %u\n", + __func__, ev->u.logout_flashnode_sid.sid); + err = -EINVAL; + goto put_host; + } + + err = transport->logout_flashnode_sid(session); + +put_host: + scsi_host_put(shost); + +exit_logout_sid: + return err; +} + +static int +iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) +{ + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct Scsi_Host *shost = NULL; + struct iscsi_internal *priv; + struct sk_buff *skbhost_stats; + struct nlmsghdr *nlhhost_stats; + struct iscsi_uevent *evhost_stats; + int host_stats_size = 0; + int len, err = 0; + char *buf; + + if (!transport->get_host_stats) + return -ENOSYS; + + priv = iscsi_if_transport_lookup(transport); + if (!priv) + return -EINVAL; + + host_stats_size = sizeof(struct iscsi_offload_host_stats); + len = nlmsg_total_size(sizeof(*ev) + host_stats_size); + + shost = scsi_host_lookup(ev->u.get_host_stats.host_no); + if (!shost) { + pr_err("%s: failed. Could not find host no %u\n", + __func__, ev->u.get_host_stats.host_no); + return -ENODEV; + } + + do { + int actual_size; + + skbhost_stats = alloc_skb(len, GFP_KERNEL); + if (!skbhost_stats) { + pr_err("cannot deliver host stats: OOM\n"); + err = -ENOMEM; + goto exit_host_stats; + } + + nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0, + (len - sizeof(*nlhhost_stats)), 0); + evhost_stats = nlmsg_data(nlhhost_stats); + memset(evhost_stats, 0, sizeof(*evhost_stats)); + evhost_stats->transport_handle = iscsi_handle(transport); + evhost_stats->type = nlh->nlmsg_type; + evhost_stats->u.get_host_stats.host_no = + ev->u.get_host_stats.host_no; + buf = (char *)evhost_stats + sizeof(*evhost_stats); + memset(buf, 0, host_stats_size); + + err = transport->get_host_stats(shost, buf, host_stats_size); + if (err) { + kfree_skb(skbhost_stats); + goto exit_host_stats; + } + + actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); + skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); + nlhhost_stats->nlmsg_len = actual_size; + + err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID, + GFP_KERNEL); + } while (err < 0 && err != -ECONNREFUSED); + +exit_host_stats: + scsi_host_put(shost); + return err; +} + +static int iscsi_if_transport_conn(struct iscsi_transport *transport, + struct nlmsghdr *nlh, u32 pdu_len) +{ + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct iscsi_cls_session *session; + struct iscsi_cls_conn *conn = NULL; + struct iscsi_endpoint *ep; + int err = 0; + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_CREATE_CONN: + return iscsi_if_create_conn(transport, ev); + case ISCSI_UEVENT_DESTROY_CONN: + return iscsi_if_destroy_conn(transport, ev); + case ISCSI_UEVENT_STOP_CONN: + conn = iscsi_conn_lookup(ev->u.stop_conn.sid, + ev->u.stop_conn.cid); + if (!conn) + return -EINVAL; + + return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag); + } + + /* + * The following cmds need to be run under the ep_mutex so in kernel + * conn cleanup (ep_disconnect + unbind and conn) is not done while + * these are running. They also must not run if we have just run a conn + * cleanup because they would set the state in a way that might allow + * IO or send IO themselves. + */ + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_START_CONN: + conn = iscsi_conn_lookup(ev->u.start_conn.sid, + ev->u.start_conn.cid); + break; + case ISCSI_UEVENT_BIND_CONN: + conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); + break; + case ISCSI_UEVENT_SEND_PDU: + conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); + break; + } + + if (!conn) + return -EINVAL; + + mutex_lock(&conn->ep_mutex); + spin_lock_irq(&conn->lock); + if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + mutex_unlock(&conn->ep_mutex); + ev->r.retcode = -ENOTCONN; + return 0; + } + spin_unlock_irq(&conn->lock); + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_BIND_CONN: + session = iscsi_session_lookup(ev->u.b_conn.sid); + if (!session) { + err = -EINVAL; + break; + } + + ev->r.retcode = transport->bind_conn(session, conn, + ev->u.b_conn.transport_eph, + ev->u.b_conn.is_leading); + if (!ev->r.retcode) + WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); + + if (ev->r.retcode || !transport->ep_connect) + break; + + ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); + if (ep) { + ep->conn = conn; + conn->ep = ep; + iscsi_put_endpoint(ep); + } else { + err = -ENOTCONN; + iscsi_cls_conn_printk(KERN_ERR, conn, + "Could not set ep conn binding\n"); + } + break; + case ISCSI_UEVENT_START_CONN: + ev->r.retcode = transport->start_conn(conn); + if (!ev->r.retcode) + WRITE_ONCE(conn->state, ISCSI_CONN_UP); + + break; + case ISCSI_UEVENT_SEND_PDU: + if ((ev->u.send_pdu.hdr_size > pdu_len) || + (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { + err = -EINVAL; + break; + } + + ev->r.retcode = transport->send_pdu(conn, + (struct iscsi_hdr *)((char *)ev + sizeof(*ev)), + (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, + ev->u.send_pdu.data_size); + break; + default: + err = -ENOSYS; + } + + mutex_unlock(&conn->ep_mutex); + return err; +} + +static int +iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) +{ + int err = 0; + u32 portid; + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct iscsi_transport *transport = NULL; + struct iscsi_internal *priv; + struct iscsi_cls_session *session; + struct iscsi_endpoint *ep = NULL; + u32 rlen; + + if (!netlink_capable(skb, CAP_SYS_ADMIN)) + return -EPERM; + + if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) + *group = ISCSI_NL_GRP_UIP; + else + *group = ISCSI_NL_GRP_ISCSID; + + priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); + if (!priv) + return -EINVAL; + transport = priv->iscsi_transport; + + if (!try_module_get(transport->owner)) + return -EINVAL; + + portid = NETLINK_CB(skb).portid; + + /* + * Even though the remaining payload may not be regarded as nlattr, + * (like address or something else), calculate the remaining length + * here to ease following length checks. + */ + rlen = nlmsg_attrlen(nlh, sizeof(*ev)); + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_CREATE_SESSION: + err = iscsi_if_create_session(priv, ep, ev, + portid, + ev->u.c_session.initial_cmdsn, + ev->u.c_session.cmds_max, + ev->u.c_session.queue_depth); + break; + case ISCSI_UEVENT_CREATE_BOUND_SESSION: + ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle); + if (!ep) { + err = -EINVAL; + break; + } + + err = iscsi_if_create_session(priv, ep, ev, + portid, + ev->u.c_bound_session.initial_cmdsn, + ev->u.c_bound_session.cmds_max, + ev->u.c_bound_session.queue_depth); + iscsi_put_endpoint(ep); + break; + case ISCSI_UEVENT_DESTROY_SESSION: + session = iscsi_session_lookup(ev->u.d_session.sid); + if (!session) + err = -EINVAL; + else if (iscsi_session_has_conns(ev->u.d_session.sid)) + err = -EBUSY; + else + transport->destroy_session(session); + break; + case ISCSI_UEVENT_DESTROY_SESSION_ASYNC: + session = iscsi_session_lookup(ev->u.d_session.sid); + if (!session) + err = -EINVAL; + else if (iscsi_session_has_conns(ev->u.d_session.sid)) + err = -EBUSY; + else { + unsigned long flags; + + /* Prevent this session from being found again */ + spin_lock_irqsave(&sesslock, flags); + list_del_init(&session->sess_list); + spin_unlock_irqrestore(&sesslock, flags); + + queue_work(system_unbound_wq, &session->destroy_work); + } + break; + case ISCSI_UEVENT_UNBIND_SESSION: + session = iscsi_session_lookup(ev->u.d_session.sid); + if (session) + queue_work(session->workq, &session->unbind_work); + else + err = -EINVAL; + break; + case ISCSI_UEVENT_SET_PARAM: + err = iscsi_if_set_param(transport, ev, rlen); + break; + case ISCSI_UEVENT_CREATE_CONN: + case ISCSI_UEVENT_DESTROY_CONN: + case ISCSI_UEVENT_STOP_CONN: + case ISCSI_UEVENT_START_CONN: + case ISCSI_UEVENT_BIND_CONN: + case ISCSI_UEVENT_SEND_PDU: + err = iscsi_if_transport_conn(transport, nlh, rlen); + break; + case ISCSI_UEVENT_GET_STATS: + err = iscsi_if_get_stats(transport, nlh); + break; + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: + case ISCSI_UEVENT_TRANSPORT_EP_POLL: + case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: + err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen); + break; + case ISCSI_UEVENT_TGT_DSCVR: + err = iscsi_tgt_dscvr(transport, ev, rlen); + break; + case ISCSI_UEVENT_SET_HOST_PARAM: + err = iscsi_set_host_param(transport, ev, rlen); + break; + case ISCSI_UEVENT_PATH_UPDATE: + err = iscsi_set_path(transport, ev, rlen); + break; + case ISCSI_UEVENT_SET_IFACE_PARAMS: + err = iscsi_set_iface_params(transport, ev, rlen); + break; + case ISCSI_UEVENT_PING: + err = iscsi_send_ping(transport, ev, rlen); + break; + case ISCSI_UEVENT_GET_CHAP: + err = iscsi_get_chap(transport, nlh); + break; + case ISCSI_UEVENT_DELETE_CHAP: + err = iscsi_delete_chap(transport, ev); + break; + case ISCSI_UEVENT_SET_FLASHNODE_PARAMS: + err = iscsi_set_flashnode_param(transport, ev, rlen); + break; + case ISCSI_UEVENT_NEW_FLASHNODE: + err = iscsi_new_flashnode(transport, ev, rlen); + break; + case ISCSI_UEVENT_DEL_FLASHNODE: + err = iscsi_del_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGIN_FLASHNODE: + err = iscsi_login_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGOUT_FLASHNODE: + err = iscsi_logout_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: + err = iscsi_logout_flashnode_sid(transport, ev); + break; + case ISCSI_UEVENT_SET_CHAP: + err = iscsi_set_chap(transport, ev, rlen); + break; + case ISCSI_UEVENT_GET_HOST_STATS: + err = iscsi_get_host_stats(transport, nlh); + break; + default: + err = -ENOSYS; + break; + } + + module_put(transport->owner); + return err; +} + +/* + * Get message from skb. Each message is processed by iscsi_if_recv_msg. + * Malformed skbs with wrong lengths or invalid creds are not processed. + */ +static void +iscsi_if_rx(struct sk_buff *skb) +{ + u32 portid = NETLINK_CB(skb).portid; + + mutex_lock(&rx_queue_mutex); + while (skb->len >= NLMSG_HDRLEN) { + int err; + uint32_t rlen; + struct nlmsghdr *nlh; + struct iscsi_uevent *ev; + uint32_t group; + int retries = ISCSI_SEND_MAX_ALLOWED; + + nlh = nlmsg_hdr(skb); + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || + skb->len < nlh->nlmsg_len) { + break; + } + + ev = nlmsg_data(nlh); + rlen = NLMSG_ALIGN(nlh->nlmsg_len); + if (rlen > skb->len) + rlen = skb->len; + + err = iscsi_if_recv_msg(skb, nlh, &group); + if (err) { + ev->type = ISCSI_KEVENT_IF_ERROR; + ev->iferror = err; + } + do { + /* + * special case for GET_STATS: + * on success - sending reply and stats from + * inside of if_recv_msg(), + * on error - fall through. + */ + if (ev->type == ISCSI_UEVENT_GET_STATS && !err) + break; + if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) + break; + err = iscsi_if_send_reply(portid, nlh->nlmsg_type, + ev, sizeof(*ev)); + if (err == -EAGAIN && --retries < 0) { + printk(KERN_WARNING "Send reply failed, error %d\n", err); + break; + } + } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); + skb_pull(skb, rlen); + } + mutex_unlock(&rx_queue_mutex); +} + +#define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name,_mode,_show,_store) + +/* + * iSCSI connection attrs + */ +#define iscsi_conn_attr_show(param) \ +static ssize_t \ +show_conn_param_##param(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ + struct iscsi_transport *t = conn->transport; \ + return t->get_conn_param(conn, param, buf); \ +} + +#define iscsi_conn_attr(field, param) \ + iscsi_conn_attr_show(param) \ +static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \ + NULL); + +iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH); +iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH); +iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN); +iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN); +iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN); +iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN); +iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT); +iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); +iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); +iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); +iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); +iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT); +iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN); +iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO); +iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE); +iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT); +iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE); +iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE); +iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE); +iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN); +iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE); +iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS); +iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC); +iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL); +iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6); +iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF); +iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF); +iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR); + +static const char *const connection_state_names[] = { + [ISCSI_CONN_UP] = "up", + [ISCSI_CONN_DOWN] = "down", + [ISCSI_CONN_FAILED] = "failed", + [ISCSI_CONN_BOUND] = "bound" +}; + +static ssize_t show_conn_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); + const char *state = "unknown"; + int conn_state = READ_ONCE(conn->state); + + if (conn_state >= 0 && + conn_state < ARRAY_SIZE(connection_state_names)) + state = connection_state_names[conn_state]; + + return sysfs_emit(buf, "%s\n", state); +} +static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state, + NULL); + +#define iscsi_conn_ep_attr_show(param) \ +static ssize_t show_conn_ep_param_##param(struct device *dev, \ + struct device_attribute *attr,\ + char *buf) \ +{ \ + struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ + struct iscsi_transport *t = conn->transport; \ + struct iscsi_endpoint *ep; \ + ssize_t rc; \ + \ + /* \ + * Need to make sure ep_disconnect does not free the LLD's \ + * interconnect resources while we are trying to read them. \ + */ \ + mutex_lock(&conn->ep_mutex); \ + ep = conn->ep; \ + if (!ep && t->ep_connect) { \ + mutex_unlock(&conn->ep_mutex); \ + return -ENOTCONN; \ + } \ + \ + if (ep) \ + rc = t->get_ep_param(ep, param, buf); \ + else \ + rc = t->get_conn_param(conn, param, buf); \ + mutex_unlock(&conn->ep_mutex); \ + return rc; \ +} + +#define iscsi_conn_ep_attr(field, param) \ + iscsi_conn_ep_attr_show(param) \ +static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \ + show_conn_ep_param_##param, NULL); + +iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); +iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); + +static struct attribute *iscsi_conn_attrs[] = { + &dev_attr_conn_max_recv_dlength.attr, + &dev_attr_conn_max_xmit_dlength.attr, + &dev_attr_conn_header_digest.attr, + &dev_attr_conn_data_digest.attr, + &dev_attr_conn_ifmarker.attr, + &dev_attr_conn_ofmarker.attr, + &dev_attr_conn_address.attr, + &dev_attr_conn_port.attr, + &dev_attr_conn_exp_statsn.attr, + &dev_attr_conn_persistent_address.attr, + &dev_attr_conn_persistent_port.attr, + &dev_attr_conn_ping_tmo.attr, + &dev_attr_conn_recv_tmo.attr, + &dev_attr_conn_local_port.attr, + &dev_attr_conn_statsn.attr, + &dev_attr_conn_keepalive_tmo.attr, + &dev_attr_conn_max_segment_size.attr, + &dev_attr_conn_tcp_timestamp_stat.attr, + &dev_attr_conn_tcp_wsf_disable.attr, + &dev_attr_conn_tcp_nagle_disable.attr, + &dev_attr_conn_tcp_timer_scale.attr, + &dev_attr_conn_tcp_timestamp_enable.attr, + &dev_attr_conn_fragment_disable.attr, + &dev_attr_conn_ipv4_tos.attr, + &dev_attr_conn_ipv6_traffic_class.attr, + &dev_attr_conn_ipv6_flow_label.attr, + &dev_attr_conn_is_fw_assigned_ipv6.attr, + &dev_attr_conn_tcp_xmit_wsf.attr, + &dev_attr_conn_tcp_recv_wsf.attr, + &dev_attr_conn_local_ipaddr.attr, + &dev_attr_conn_state.attr, + NULL, +}; + +static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *cdev = container_of(kobj, struct device, kobj); + struct iscsi_cls_conn *conn = transport_class_to_conn(cdev); + struct iscsi_transport *t = conn->transport; + int param; + + if (attr == &dev_attr_conn_max_recv_dlength.attr) + param = ISCSI_PARAM_MAX_RECV_DLENGTH; + else if (attr == &dev_attr_conn_max_xmit_dlength.attr) + param = ISCSI_PARAM_MAX_XMIT_DLENGTH; + else if (attr == &dev_attr_conn_header_digest.attr) + param = ISCSI_PARAM_HDRDGST_EN; + else if (attr == &dev_attr_conn_data_digest.attr) + param = ISCSI_PARAM_DATADGST_EN; + else if (attr == &dev_attr_conn_ifmarker.attr) + param = ISCSI_PARAM_IFMARKER_EN; + else if (attr == &dev_attr_conn_ofmarker.attr) + param = ISCSI_PARAM_OFMARKER_EN; + else if (attr == &dev_attr_conn_address.attr) + param = ISCSI_PARAM_CONN_ADDRESS; + else if (attr == &dev_attr_conn_port.attr) + param = ISCSI_PARAM_CONN_PORT; + else if (attr == &dev_attr_conn_exp_statsn.attr) + param = ISCSI_PARAM_EXP_STATSN; + else if (attr == &dev_attr_conn_persistent_address.attr) + param = ISCSI_PARAM_PERSISTENT_ADDRESS; + else if (attr == &dev_attr_conn_persistent_port.attr) + param = ISCSI_PARAM_PERSISTENT_PORT; + else if (attr == &dev_attr_conn_ping_tmo.attr) + param = ISCSI_PARAM_PING_TMO; + else if (attr == &dev_attr_conn_recv_tmo.attr) + param = ISCSI_PARAM_RECV_TMO; + else if (attr == &dev_attr_conn_local_port.attr) + param = ISCSI_PARAM_LOCAL_PORT; + else if (attr == &dev_attr_conn_statsn.attr) + param = ISCSI_PARAM_STATSN; + else if (attr == &dev_attr_conn_keepalive_tmo.attr) + param = ISCSI_PARAM_KEEPALIVE_TMO; + else if (attr == &dev_attr_conn_max_segment_size.attr) + param = ISCSI_PARAM_MAX_SEGMENT_SIZE; + else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr) + param = ISCSI_PARAM_TCP_TIMESTAMP_STAT; + else if (attr == &dev_attr_conn_tcp_wsf_disable.attr) + param = ISCSI_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_conn_tcp_nagle_disable.attr) + param = ISCSI_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_conn_tcp_timer_scale.attr) + param = ISCSI_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr) + param = ISCSI_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_conn_fragment_disable.attr) + param = ISCSI_PARAM_IP_FRAGMENT_DISABLE; + else if (attr == &dev_attr_conn_ipv4_tos.attr) + param = ISCSI_PARAM_IPV4_TOS; + else if (attr == &dev_attr_conn_ipv6_traffic_class.attr) + param = ISCSI_PARAM_IPV6_TC; + else if (attr == &dev_attr_conn_ipv6_flow_label.attr) + param = ISCSI_PARAM_IPV6_FLOW_LABEL; + else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr) + param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6; + else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr) + param = ISCSI_PARAM_TCP_XMIT_WSF; + else if (attr == &dev_attr_conn_tcp_recv_wsf.attr) + param = ISCSI_PARAM_TCP_RECV_WSF; + else if (attr == &dev_attr_conn_local_ipaddr.attr) + param = ISCSI_PARAM_LOCAL_IPADDR; + else if (attr == &dev_attr_conn_state.attr) + return S_IRUGO; + else { + WARN_ONCE(1, "Invalid conn attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_PARAM, param); +} + +static struct attribute_group iscsi_conn_group = { + .attrs = iscsi_conn_attrs, + .is_visible = iscsi_conn_attr_is_visible, +}; + +/* + * iSCSI session attrs + */ +#define iscsi_session_attr_show(param, perm) \ +static ssize_t \ +show_session_param_##param(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct iscsi_cls_session *session = \ + iscsi_dev_to_session(dev->parent); \ + struct iscsi_transport *t = session->transport; \ + \ + if (perm && !capable(CAP_SYS_ADMIN)) \ + return -EACCES; \ + return t->get_session_param(session, param, buf); \ +} + +#define iscsi_session_attr(field, param, perm) \ + iscsi_session_attr_show(param, perm) \ +static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \ + NULL); +iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0); +iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0); +iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0); +iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0); +iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0); +iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0); +iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0); +iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0); +iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0); +iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0); +iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1); +iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1); +iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1); +iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); +iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1); +iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1); +iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); +iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); +iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); +iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); +iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); +iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0); +iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0); +iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0); +iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0); +iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0); +iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0); +iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0); +iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0); +iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0); +iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0); +iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0); +iscsi_session_attr(discovery_auth_optional, + ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0); +iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0); +iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0); +iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0); +iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0); +iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); +iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); +iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); + +static ssize_t +show_priv_session_target_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); + + return sysfs_emit(buf, "%s\n", + iscsi_session_target_state_name[session->target_state]); +} + +static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO, + show_priv_session_target_state, NULL); + +static ssize_t +show_priv_session_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); + return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); +} +static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, + NULL); +static ssize_t +show_priv_session_creator(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); + return sysfs_emit(buf, "%d\n", session->creator); +} +static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, + NULL); +static ssize_t +show_priv_session_target_id(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); + return sysfs_emit(buf, "%d\n", session->target_id); +} +static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, + show_priv_session_target_id, NULL); + +#define iscsi_priv_session_attr_show(field, format) \ +static ssize_t \ +show_priv_session_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct iscsi_cls_session *session = \ + iscsi_dev_to_session(dev->parent); \ + if (session->field == -1) \ + return sysfs_emit(buf, "off\n"); \ + return sysfs_emit(buf, format"\n", session->field); \ +} + +#define iscsi_priv_session_attr_store(field) \ +static ssize_t \ +store_priv_session_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + char *cp; \ + struct iscsi_cls_session *session = \ + iscsi_dev_to_session(dev->parent); \ + if ((session->state == ISCSI_SESSION_FREE) || \ + (session->state == ISCSI_SESSION_FAILED)) \ + return -EBUSY; \ + if (strncmp(buf, "off", 3) == 0) { \ + session->field = -1; \ + session->field##_sysfs_override = true; \ + } else { \ + val = simple_strtoul(buf, &cp, 0); \ + if (*cp != '\0' && *cp != '\n') \ + return -EINVAL; \ + session->field = val; \ + session->field##_sysfs_override = true; \ + } \ + return count; \ +} + +#define iscsi_priv_session_rw_attr(field, format) \ + iscsi_priv_session_attr_show(field, format) \ + iscsi_priv_session_attr_store(field) \ +static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ + show_priv_session_##field, \ + store_priv_session_##field) + +iscsi_priv_session_rw_attr(recovery_tmo, "%d"); + +static struct attribute *iscsi_session_attrs[] = { + &dev_attr_sess_initial_r2t.attr, + &dev_attr_sess_max_outstanding_r2t.attr, + &dev_attr_sess_immediate_data.attr, + &dev_attr_sess_first_burst_len.attr, + &dev_attr_sess_max_burst_len.attr, + &dev_attr_sess_data_pdu_in_order.attr, + &dev_attr_sess_data_seq_in_order.attr, + &dev_attr_sess_erl.attr, + &dev_attr_sess_targetname.attr, + &dev_attr_sess_tpgt.attr, + &dev_attr_sess_password.attr, + &dev_attr_sess_password_in.attr, + &dev_attr_sess_username.attr, + &dev_attr_sess_username_in.attr, + &dev_attr_sess_fast_abort.attr, + &dev_attr_sess_abort_tmo.attr, + &dev_attr_sess_lu_reset_tmo.attr, + &dev_attr_sess_tgt_reset_tmo.attr, + &dev_attr_sess_ifacename.attr, + &dev_attr_sess_initiatorname.attr, + &dev_attr_sess_targetalias.attr, + &dev_attr_sess_boot_root.attr, + &dev_attr_sess_boot_nic.attr, + &dev_attr_sess_boot_target.attr, + &dev_attr_priv_sess_recovery_tmo.attr, + &dev_attr_priv_sess_state.attr, + &dev_attr_priv_sess_target_state.attr, + &dev_attr_priv_sess_creator.attr, + &dev_attr_sess_chap_out_idx.attr, + &dev_attr_sess_chap_in_idx.attr, + &dev_attr_priv_sess_target_id.attr, + &dev_attr_sess_auto_snd_tgt_disable.attr, + &dev_attr_sess_discovery_session.attr, + &dev_attr_sess_portal_type.attr, + &dev_attr_sess_chap_auth.attr, + &dev_attr_sess_discovery_logout.attr, + &dev_attr_sess_bidi_chap.attr, + &dev_attr_sess_discovery_auth_optional.attr, + &dev_attr_sess_def_time2wait.attr, + &dev_attr_sess_def_time2retain.attr, + &dev_attr_sess_isid.attr, + &dev_attr_sess_tsid.attr, + &dev_attr_sess_def_taskmgmt_tmo.attr, + &dev_attr_sess_discovery_parent_idx.attr, + &dev_attr_sess_discovery_parent_type.attr, + NULL, +}; + +static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *cdev = container_of(kobj, struct device, kobj); + struct iscsi_cls_session *session = transport_class_to_session(cdev); + struct iscsi_transport *t = session->transport; + int param; + + if (attr == &dev_attr_sess_initial_r2t.attr) + param = ISCSI_PARAM_INITIAL_R2T_EN; + else if (attr == &dev_attr_sess_max_outstanding_r2t.attr) + param = ISCSI_PARAM_MAX_R2T; + else if (attr == &dev_attr_sess_immediate_data.attr) + param = ISCSI_PARAM_IMM_DATA_EN; + else if (attr == &dev_attr_sess_first_burst_len.attr) + param = ISCSI_PARAM_FIRST_BURST; + else if (attr == &dev_attr_sess_max_burst_len.attr) + param = ISCSI_PARAM_MAX_BURST; + else if (attr == &dev_attr_sess_data_pdu_in_order.attr) + param = ISCSI_PARAM_PDU_INORDER_EN; + else if (attr == &dev_attr_sess_data_seq_in_order.attr) + param = ISCSI_PARAM_DATASEQ_INORDER_EN; + else if (attr == &dev_attr_sess_erl.attr) + param = ISCSI_PARAM_ERL; + else if (attr == &dev_attr_sess_targetname.attr) + param = ISCSI_PARAM_TARGET_NAME; + else if (attr == &dev_attr_sess_tpgt.attr) + param = ISCSI_PARAM_TPGT; + else if (attr == &dev_attr_sess_chap_in_idx.attr) + param = ISCSI_PARAM_CHAP_IN_IDX; + else if (attr == &dev_attr_sess_chap_out_idx.attr) + param = ISCSI_PARAM_CHAP_OUT_IDX; + else if (attr == &dev_attr_sess_password.attr) + param = ISCSI_PARAM_USERNAME; + else if (attr == &dev_attr_sess_password_in.attr) + param = ISCSI_PARAM_USERNAME_IN; + else if (attr == &dev_attr_sess_username.attr) + param = ISCSI_PARAM_PASSWORD; + else if (attr == &dev_attr_sess_username_in.attr) + param = ISCSI_PARAM_PASSWORD_IN; + else if (attr == &dev_attr_sess_fast_abort.attr) + param = ISCSI_PARAM_FAST_ABORT; + else if (attr == &dev_attr_sess_abort_tmo.attr) + param = ISCSI_PARAM_ABORT_TMO; + else if (attr == &dev_attr_sess_lu_reset_tmo.attr) + param = ISCSI_PARAM_LU_RESET_TMO; + else if (attr == &dev_attr_sess_tgt_reset_tmo.attr) + param = ISCSI_PARAM_TGT_RESET_TMO; + else if (attr == &dev_attr_sess_ifacename.attr) + param = ISCSI_PARAM_IFACE_NAME; + else if (attr == &dev_attr_sess_initiatorname.attr) + param = ISCSI_PARAM_INITIATOR_NAME; + else if (attr == &dev_attr_sess_targetalias.attr) + param = ISCSI_PARAM_TARGET_ALIAS; + else if (attr == &dev_attr_sess_boot_root.attr) + param = ISCSI_PARAM_BOOT_ROOT; + else if (attr == &dev_attr_sess_boot_nic.attr) + param = ISCSI_PARAM_BOOT_NIC; + else if (attr == &dev_attr_sess_boot_target.attr) + param = ISCSI_PARAM_BOOT_TARGET; + else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr) + param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE; + else if (attr == &dev_attr_sess_discovery_session.attr) + param = ISCSI_PARAM_DISCOVERY_SESS; + else if (attr == &dev_attr_sess_portal_type.attr) + param = ISCSI_PARAM_PORTAL_TYPE; + else if (attr == &dev_attr_sess_chap_auth.attr) + param = ISCSI_PARAM_CHAP_AUTH_EN; + else if (attr == &dev_attr_sess_discovery_logout.attr) + param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN; + else if (attr == &dev_attr_sess_bidi_chap.attr) + param = ISCSI_PARAM_BIDI_CHAP_EN; + else if (attr == &dev_attr_sess_discovery_auth_optional.attr) + param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL; + else if (attr == &dev_attr_sess_def_time2wait.attr) + param = ISCSI_PARAM_DEF_TIME2WAIT; + else if (attr == &dev_attr_sess_def_time2retain.attr) + param = ISCSI_PARAM_DEF_TIME2RETAIN; + else if (attr == &dev_attr_sess_isid.attr) + param = ISCSI_PARAM_ISID; + else if (attr == &dev_attr_sess_tsid.attr) + param = ISCSI_PARAM_TSID; + else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr) + param = ISCSI_PARAM_DEF_TASKMGMT_TMO; + else if (attr == &dev_attr_sess_discovery_parent_idx.attr) + param = ISCSI_PARAM_DISCOVERY_PARENT_IDX; + else if (attr == &dev_attr_sess_discovery_parent_type.attr) + param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE; + else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) + return S_IRUGO | S_IWUSR; + else if (attr == &dev_attr_priv_sess_state.attr) + return S_IRUGO; + else if (attr == &dev_attr_priv_sess_target_state.attr) + return S_IRUGO; + else if (attr == &dev_attr_priv_sess_creator.attr) + return S_IRUGO; + else if (attr == &dev_attr_priv_sess_target_id.attr) + return S_IRUGO; + else { + WARN_ONCE(1, "Invalid session attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_PARAM, param); +} + +static struct attribute_group iscsi_session_group = { + .attrs = iscsi_session_attrs, + .is_visible = iscsi_session_attr_is_visible, +}; + +/* + * iSCSI host attrs + */ +#define iscsi_host_attr_show(param) \ +static ssize_t \ +show_host_param_##param(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct Scsi_Host *shost = transport_class_to_shost(dev); \ + struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \ + return priv->iscsi_transport->get_host_param(shost, param, buf); \ +} + +#define iscsi_host_attr(field, param) \ + iscsi_host_attr_show(param) \ +static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \ + NULL); + +iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME); +iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS); +iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS); +iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME); +iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE); +iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED); + +static struct attribute *iscsi_host_attrs[] = { + &dev_attr_host_netdev.attr, + &dev_attr_host_hwaddress.attr, + &dev_attr_host_ipaddress.attr, + &dev_attr_host_initiatorname.attr, + &dev_attr_host_port_state.attr, + &dev_attr_host_port_speed.attr, + NULL, +}; + +static umode_t iscsi_host_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *cdev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = transport_class_to_shost(cdev); + struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); + int param; + + if (attr == &dev_attr_host_netdev.attr) + param = ISCSI_HOST_PARAM_NETDEV_NAME; + else if (attr == &dev_attr_host_hwaddress.attr) + param = ISCSI_HOST_PARAM_HWADDRESS; + else if (attr == &dev_attr_host_ipaddress.attr) + param = ISCSI_HOST_PARAM_IPADDRESS; + else if (attr == &dev_attr_host_initiatorname.attr) + param = ISCSI_HOST_PARAM_INITIATOR_NAME; + else if (attr == &dev_attr_host_port_state.attr) + param = ISCSI_HOST_PARAM_PORT_STATE; + else if (attr == &dev_attr_host_port_speed.attr) + param = ISCSI_HOST_PARAM_PORT_SPEED; + else { + WARN_ONCE(1, "Invalid host attr"); + return 0; + } + + return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param); +} + +static struct attribute_group iscsi_host_group = { + .attrs = iscsi_host_attrs, + .is_visible = iscsi_host_attr_is_visible, +}; + +/* convert iscsi_port_speed values to ascii string name */ +static const struct { + enum iscsi_port_speed value; + char *name; +} iscsi_port_speed_names[] = { + {ISCSI_PORT_SPEED_UNKNOWN, "Unknown" }, + {ISCSI_PORT_SPEED_10MBPS, "10 Mbps" }, + {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" }, + {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" }, + {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" }, + {ISCSI_PORT_SPEED_25GBPS, "25 Gbps" }, + {ISCSI_PORT_SPEED_40GBPS, "40 Gbps" }, +}; + +char *iscsi_get_port_speed_name(struct Scsi_Host *shost) +{ + int i; + char *speed = "Unknown!"; + struct iscsi_cls_host *ihost = shost->shost_data; + uint32_t port_speed = ihost->port_speed; + + for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) { + if (iscsi_port_speed_names[i].value & port_speed) { + speed = iscsi_port_speed_names[i].name; + break; + } + } + return speed; +} +EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name); + +/* convert iscsi_port_state values to ascii string name */ +static const struct { + enum iscsi_port_state value; + char *name; +} iscsi_port_state_names[] = { + {ISCSI_PORT_STATE_DOWN, "LINK DOWN" }, + {ISCSI_PORT_STATE_UP, "LINK UP" }, +}; + +char *iscsi_get_port_state_name(struct Scsi_Host *shost) +{ + int i; + char *state = "Unknown!"; + struct iscsi_cls_host *ihost = shost->shost_data; + uint32_t port_state = ihost->port_state; + + for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) { + if (iscsi_port_state_names[i].value & port_state) { + state = iscsi_port_state_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_port_state_name); + +static int iscsi_session_match(struct attribute_container *cont, + struct device *dev) +{ + struct iscsi_cls_session *session; + struct Scsi_Host *shost; + struct iscsi_internal *priv; + + if (!iscsi_is_session_dev(dev)) + return 0; + + session = iscsi_dev_to_session(dev); + shost = iscsi_session_to_shost(session); + if (!shost->transportt) + return 0; + + priv = to_iscsi_internal(shost->transportt); + if (priv->session_cont.ac.class != &iscsi_session_class.class) + return 0; + + return &priv->session_cont.ac == cont; +} + +static int iscsi_conn_match(struct attribute_container *cont, + struct device *dev) +{ + struct iscsi_cls_session *session; + struct iscsi_cls_conn *conn; + struct Scsi_Host *shost; + struct iscsi_internal *priv; + + if (!iscsi_is_conn_dev(dev)) + return 0; + + conn = iscsi_dev_to_conn(dev); + session = iscsi_dev_to_session(conn->dev.parent); + shost = iscsi_session_to_shost(session); + + if (!shost->transportt) + return 0; + + priv = to_iscsi_internal(shost->transportt); + if (priv->conn_cont.ac.class != &iscsi_connection_class.class) + return 0; + + return &priv->conn_cont.ac == cont; +} + +static int iscsi_host_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct iscsi_internal *priv; + + if (!scsi_is_host_device(dev)) + return 0; + + shost = dev_to_shost(dev); + if (!shost->transportt || + shost->transportt->host_attrs.ac.class != &iscsi_host_class.class) + return 0; + + priv = to_iscsi_internal(shost->transportt); + return &priv->t.host_attrs.ac == cont; +} + +struct scsi_transport_template * +iscsi_register_transport(struct iscsi_transport *tt) +{ + struct iscsi_internal *priv; + unsigned long flags; + int err; + + BUG_ON(!tt); + WARN_ON(tt->ep_disconnect && !tt->unbind_conn); + + priv = iscsi_if_transport_lookup(tt); + if (priv) + return NULL; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + INIT_LIST_HEAD(&priv->list); + priv->iscsi_transport = tt; + priv->t.user_scan = iscsi_user_scan; + + priv->dev.class = &iscsi_transport_class; + dev_set_name(&priv->dev, "%s", tt->name); + err = device_register(&priv->dev); + if (err) + goto put_dev; + + err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group); + if (err) + goto unregister_dev; + + /* host parameters */ + priv->t.host_attrs.ac.class = &iscsi_host_class.class; + priv->t.host_attrs.ac.match = iscsi_host_match; + priv->t.host_attrs.ac.grp = &iscsi_host_group; + priv->t.host_size = sizeof(struct iscsi_cls_host); + transport_container_register(&priv->t.host_attrs); + + /* connection parameters */ + priv->conn_cont.ac.class = &iscsi_connection_class.class; + priv->conn_cont.ac.match = iscsi_conn_match; + priv->conn_cont.ac.grp = &iscsi_conn_group; + transport_container_register(&priv->conn_cont); + + /* session parameters */ + priv->session_cont.ac.class = &iscsi_session_class.class; + priv->session_cont.ac.match = iscsi_session_match; + priv->session_cont.ac.grp = &iscsi_session_group; + transport_container_register(&priv->session_cont); + + spin_lock_irqsave(&iscsi_transport_lock, flags); + list_add(&priv->list, &iscsi_transports); + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + + printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name); + return &priv->t; + +unregister_dev: + device_unregister(&priv->dev); + return NULL; +put_dev: + put_device(&priv->dev); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_register_transport); + +void iscsi_unregister_transport(struct iscsi_transport *tt) +{ + struct iscsi_internal *priv; + unsigned long flags; + + BUG_ON(!tt); + + mutex_lock(&rx_queue_mutex); + + priv = iscsi_if_transport_lookup(tt); + BUG_ON (!priv); + + spin_lock_irqsave(&iscsi_transport_lock, flags); + list_del(&priv->list); + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + + transport_container_unregister(&priv->conn_cont); + transport_container_unregister(&priv->session_cont); + transport_container_unregister(&priv->t.host_attrs); + + sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group); + device_unregister(&priv->dev); + mutex_unlock(&rx_queue_mutex); +} +EXPORT_SYMBOL_GPL(iscsi_unregister_transport); + +void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *), + struct device *dev, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + trace(dev, &vaf); + va_end(args); +} +EXPORT_SYMBOL_GPL(iscsi_dbg_trace); + +static __init int iscsi_transport_init(void) +{ + int err; + struct netlink_kernel_cfg cfg = { + .groups = 1, + .input = iscsi_if_rx, + }; + printk(KERN_INFO "Loading iSCSI transport class v%s.\n", + ISCSI_TRANSPORT_VERSION); + + atomic_set(&iscsi_session_nr, 0); + + err = class_register(&iscsi_transport_class); + if (err) + return err; + + err = class_register(&iscsi_endpoint_class); + if (err) + goto unregister_transport_class; + + err = class_register(&iscsi_iface_class); + if (err) + goto unregister_endpoint_class; + + err = transport_class_register(&iscsi_host_class); + if (err) + goto unregister_iface_class; + + err = transport_class_register(&iscsi_connection_class); + if (err) + goto unregister_host_class; + + err = transport_class_register(&iscsi_session_class); + if (err) + goto unregister_conn_class; + + err = bus_register(&iscsi_flashnode_bus); + if (err) + goto unregister_session_class; + + nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg); + if (!nls) { + err = -ENOBUFS; + goto unregister_flashnode_bus; + } + + iscsi_conn_cleanup_workq = alloc_workqueue("%s", + WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, + "iscsi_conn_cleanup"); + if (!iscsi_conn_cleanup_workq) { + err = -ENOMEM; + goto release_nls; + } + + return 0; + +release_nls: + netlink_kernel_release(nls); +unregister_flashnode_bus: + bus_unregister(&iscsi_flashnode_bus); +unregister_session_class: + transport_class_unregister(&iscsi_session_class); +unregister_conn_class: + transport_class_unregister(&iscsi_connection_class); +unregister_host_class: + transport_class_unregister(&iscsi_host_class); +unregister_iface_class: + class_unregister(&iscsi_iface_class); +unregister_endpoint_class: + class_unregister(&iscsi_endpoint_class); +unregister_transport_class: + class_unregister(&iscsi_transport_class); + return err; +} + +static void __exit iscsi_transport_exit(void) +{ + destroy_workqueue(iscsi_conn_cleanup_workq); + netlink_kernel_release(nls); + bus_unregister(&iscsi_flashnode_bus); + transport_class_unregister(&iscsi_connection_class); + transport_class_unregister(&iscsi_session_class); + transport_class_unregister(&iscsi_host_class); + class_unregister(&iscsi_endpoint_class); + class_unregister(&iscsi_iface_class); + class_unregister(&iscsi_transport_class); +} + +module_init(iscsi_transport_init); +module_exit(iscsi_transport_exit); + +MODULE_AUTHOR("Mike Christie , " + "Dmitry Yusupov , " + "Alex Aizman "); +MODULE_DESCRIPTION("iSCSI Transport Interface"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ISCSI_TRANSPORT_VERSION); +MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c new file mode 100644 index 000000000..d704c484a --- /dev/null +++ b/drivers/scsi/scsi_transport_sas.c @@ -0,0 +1,1930 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2005-2006 Dell Inc. + * + * Serial Attached SCSI (SAS) transport class. + * + * The SAS transport class contains common code to deal with SAS HBAs, + * an aproximated representation of SAS topologies in the driver model, + * and various sysfs attributes to expose these topologies and management + * interfaces to userspace. + * + * In addition to the basic SCSI core objects this transport class + * introduces two additional intermediate objects: The SAS PHY + * as represented by struct sas_phy defines an "outgoing" PHY on + * a SAS HBA or Expander, and the SAS remote PHY represented by + * struct sas_rphy defines an "incoming" PHY on a SAS Expander or + * end device. Note that this is purely a software concept, the + * underlying hardware for a PHY and a remote PHY is the exactly + * the same. + * + * There is no concept of a SAS port in this code, users can see + * what PHYs form a wide port based on the port_identifier attribute, + * which is the same for all PHYs in a port. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "scsi_sas_internal.h" +struct sas_host_attrs { + struct list_head rphy_list; + struct mutex lock; + struct request_queue *q; + u32 next_target_id; + u32 next_expander_id; + int next_port_id; +}; +#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) + + +/* + * Hack to allow attributes of the same name in different objects. + */ +#define SAS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ + struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name,_mode,_show,_store) + + +/* + * Pretty printing helpers + */ + +#define sas_bitfield_name_match(title, table) \ +static ssize_t \ +get_sas_##title##_names(u32 table_key, char *buf) \ +{ \ + char *prefix = ""; \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value & table_key) { \ + len += sprintf(buf + len, "%s%s", \ + prefix, table[i].name); \ + prefix = ", "; \ + } \ + } \ + len += sprintf(buf + len, "\n"); \ + return len; \ +} + +#define sas_bitfield_name_set(title, table) \ +static ssize_t \ +set_sas_##title##_names(u32 *table_key, const char *buf) \ +{ \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + len = strlen(table[i].name); \ + if (strncmp(buf, table[i].name, len) == 0 && \ + (buf[len] == '\n' || buf[len] == '\0')) { \ + *table_key = table[i].value; \ + return 0; \ + } \ + } \ + return -EINVAL; \ +} + +#define sas_bitfield_name_search(title, table) \ +static ssize_t \ +get_sas_##title##_names(u32 table_key, char *buf) \ +{ \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value == table_key) { \ + len += sprintf(buf + len, "%s", \ + table[i].name); \ + break; \ + } \ + } \ + len += sprintf(buf + len, "\n"); \ + return len; \ +} + +static struct { + u32 value; + char *name; +} sas_device_type_names[] = { + { SAS_PHY_UNUSED, "unused" }, + { SAS_END_DEVICE, "end device" }, + { SAS_EDGE_EXPANDER_DEVICE, "edge expander" }, + { SAS_FANOUT_EXPANDER_DEVICE, "fanout expander" }, +}; +sas_bitfield_name_search(device_type, sas_device_type_names) + + +static struct { + u32 value; + char *name; +} sas_protocol_names[] = { + { SAS_PROTOCOL_SATA, "sata" }, + { SAS_PROTOCOL_SMP, "smp" }, + { SAS_PROTOCOL_STP, "stp" }, + { SAS_PROTOCOL_SSP, "ssp" }, +}; +sas_bitfield_name_match(protocol, sas_protocol_names) + +static struct { + u32 value; + char *name; +} sas_linkspeed_names[] = { + { SAS_LINK_RATE_UNKNOWN, "Unknown" }, + { SAS_PHY_DISABLED, "Phy disabled" }, + { SAS_LINK_RATE_FAILED, "Link Rate failed" }, + { SAS_SATA_SPINUP_HOLD, "Spin-up hold" }, + { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, + { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, + { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, + { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, + { SAS_LINK_RATE_22_5_GBPS, "22.5 Gbit" }, +}; +sas_bitfield_name_search(linkspeed, sas_linkspeed_names) +sas_bitfield_name_set(linkspeed, sas_linkspeed_names) + +static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev) +{ + struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target); + struct sas_end_device *rdev; + + BUG_ON(rphy->identify.device_type != SAS_END_DEVICE); + + rdev = rphy_to_end_device(rphy); + return rdev; +} + +static int sas_smp_dispatch(struct bsg_job *job) +{ + struct Scsi_Host *shost = dev_to_shost(job->dev); + struct sas_rphy *rphy = NULL; + + if (!scsi_is_host_device(job->dev)) + rphy = dev_to_rphy(job->dev); + + if (!job->reply_payload.payload_len) { + dev_warn(job->dev, "space for a smp response is missing\n"); + bsg_job_done(job, -EINVAL, 0); + return 0; + } + + to_sas_internal(shost->transportt)->f->smp_handler(job, shost, rphy); + return 0; +} + +static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) +{ + struct request_queue *q; + + if (!to_sas_internal(shost->transportt)->f->smp_handler) { + printk("%s can't handle SMP requests\n", shost->hostt->name); + return 0; + } + + if (rphy) { + q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), + sas_smp_dispatch, NULL, 0); + if (IS_ERR(q)) + return PTR_ERR(q); + rphy->q = q; + } else { + char name[20]; + + snprintf(name, sizeof(name), "sas_host%d", shost->host_no); + q = bsg_setup_queue(&shost->shost_gendev, name, + sas_smp_dispatch, NULL, 0); + if (IS_ERR(q)) + return PTR_ERR(q); + to_sas_host_attrs(shost)->q = q; + } + + return 0; +} + +/* + * SAS host attributes + */ + +static int sas_host_setup(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + struct device *dma_dev = shost->dma_dev; + + INIT_LIST_HEAD(&sas_host->rphy_list); + mutex_init(&sas_host->lock); + sas_host->next_target_id = 0; + sas_host->next_expander_id = 0; + sas_host->next_port_id = 0; + + if (sas_bsg_initialize(shost, NULL)) + dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n", + shost->host_no); + + if (dma_dev->dma_mask) { + shost->opt_sectors = min_t(unsigned int, shost->max_sectors, + dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT); + } + + return 0; +} + +static int sas_host_remove(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct request_queue *q = to_sas_host_attrs(shost)->q; + + bsg_remove_queue(q); + return 0; +} + +static DECLARE_TRANSPORT_CLASS(sas_host_class, + "sas_host", sas_host_setup, sas_host_remove, NULL); + +static int sas_host_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct sas_internal *i; + + if (!scsi_is_host_device(dev)) + return 0; + shost = dev_to_shost(dev); + + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != + &sas_host_class.class) + return 0; + + i = to_sas_internal(shost->transportt); + return &i->t.host_attrs.ac == cont; +} + +static int do_sas_phy_delete(struct device *dev, void *data) +{ + int pass = (int)(unsigned long)data; + + if (pass == 0 && scsi_is_sas_port(dev)) + sas_port_delete(dev_to_sas_port(dev)); + else if (pass == 1 && scsi_is_sas_phy(dev)) + sas_phy_delete(dev_to_phy(dev)); + return 0; +} + +/** + * sas_remove_children - tear down a devices SAS data structures + * @dev: device belonging to the sas object + * + * Removes all SAS PHYs and remote PHYs for a given object + */ +void sas_remove_children(struct device *dev) +{ + device_for_each_child(dev, (void *)0, do_sas_phy_delete); + device_for_each_child(dev, (void *)1, do_sas_phy_delete); +} +EXPORT_SYMBOL(sas_remove_children); + +/** + * sas_remove_host - tear down a Scsi_Host's SAS data structures + * @shost: Scsi Host that is torn down + * + * Removes all SAS PHYs and remote PHYs for a given Scsi_Host and remove the + * Scsi_Host as well. + * + * Note: Do not call scsi_remove_host() on the Scsi_Host any more, as it is + * already removed. + */ +void sas_remove_host(struct Scsi_Host *shost) +{ + sas_remove_children(&shost->shost_gendev); + scsi_remove_host(shost); +} +EXPORT_SYMBOL(sas_remove_host); + +/** + * sas_get_address - return the SAS address of the device + * @sdev: scsi device + * + * Returns the SAS address of the scsi device + */ +u64 sas_get_address(struct scsi_device *sdev) +{ + struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); + + return rdev->rphy.identify.sas_address; +} +EXPORT_SYMBOL(sas_get_address); + +/** + * sas_tlr_supported - checking TLR bit in vpd 0x90 + * @sdev: scsi device struct + * + * Check Transport Layer Retries are supported or not. + * If vpd page 0x90 is present, TRL is supported. + * + */ +unsigned int +sas_tlr_supported(struct scsi_device *sdev) +{ + const int vpd_len = 32; + struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); + char *buffer = kzalloc(vpd_len, GFP_KERNEL); + int ret = 0; + + if (!buffer) + goto out; + + if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len)) + goto out; + + /* + * Magic numbers: the VPD Protocol page (0x90) + * has a 4 byte header and then one entry per device port + * the TLR bit is at offset 8 on each port entry + * if we take the first port, that's at total offset 12 + */ + ret = buffer[12] & 0x01; + + out: + kfree(buffer); + rdev->tlr_supported = ret; + return ret; + +} +EXPORT_SYMBOL_GPL(sas_tlr_supported); + +/** + * sas_disable_tlr - setting TLR flags + * @sdev: scsi device struct + * + * Seting tlr_enabled flag to 0. + * + */ +void +sas_disable_tlr(struct scsi_device *sdev) +{ + struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); + + rdev->tlr_enabled = 0; +} +EXPORT_SYMBOL_GPL(sas_disable_tlr); + +/** + * sas_enable_tlr - setting TLR flags + * @sdev: scsi device struct + * + * Seting tlr_enabled flag 1. + * + */ +void sas_enable_tlr(struct scsi_device *sdev) +{ + unsigned int tlr_supported = 0; + tlr_supported = sas_tlr_supported(sdev); + + if (tlr_supported) { + struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); + + rdev->tlr_enabled = 1; + } + + return; +} +EXPORT_SYMBOL_GPL(sas_enable_tlr); + +unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) +{ + struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); + return rdev->tlr_enabled; +} +EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); + +/* + * SAS Phy attributes + */ + +#define sas_phy_show_simple(field, name, format_string, cast) \ +static ssize_t \ +show_sas_phy_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_phy *phy = transport_class_to_phy(dev); \ + \ + return snprintf(buf, 20, format_string, cast phy->field); \ +} + +#define sas_phy_simple_attr(field, name, format_string, type) \ + sas_phy_show_simple(field, name, format_string, (type)) \ +static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL) + +#define sas_phy_show_protocol(field, name) \ +static ssize_t \ +show_sas_phy_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_phy *phy = transport_class_to_phy(dev); \ + \ + if (!phy->field) \ + return snprintf(buf, 20, "none\n"); \ + return get_sas_protocol_names(phy->field, buf); \ +} + +#define sas_phy_protocol_attr(field, name) \ + sas_phy_show_protocol(field, name) \ +static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL) + +#define sas_phy_show_linkspeed(field) \ +static ssize_t \ +show_sas_phy_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_phy *phy = transport_class_to_phy(dev); \ + \ + return get_sas_linkspeed_names(phy->field, buf); \ +} + +/* Fudge to tell if we're minimum or maximum */ +#define sas_phy_store_linkspeed(field) \ +static ssize_t \ +store_sas_phy_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct sas_phy *phy = transport_class_to_phy(dev); \ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \ + struct sas_internal *i = to_sas_internal(shost->transportt); \ + u32 value; \ + struct sas_phy_linkrates rates = {0}; \ + int error; \ + \ + error = set_sas_linkspeed_names(&value, buf); \ + if (error) \ + return error; \ + rates.field = value; \ + error = i->f->set_phy_speed(phy, &rates); \ + \ + return error ? error : count; \ +} + +#define sas_phy_linkspeed_rw_attr(field) \ + sas_phy_show_linkspeed(field) \ + sas_phy_store_linkspeed(field) \ +static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \ + store_sas_phy_##field) + +#define sas_phy_linkspeed_attr(field) \ + sas_phy_show_linkspeed(field) \ +static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) + + +#define sas_phy_show_linkerror(field) \ +static ssize_t \ +show_sas_phy_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_phy *phy = transport_class_to_phy(dev); \ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \ + struct sas_internal *i = to_sas_internal(shost->transportt); \ + int error; \ + \ + error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \ + if (error) \ + return error; \ + return snprintf(buf, 20, "%u\n", phy->field); \ +} + +#define sas_phy_linkerror_attr(field) \ + sas_phy_show_linkerror(field) \ +static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) + + +static ssize_t +show_sas_device_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sas_phy *phy = transport_class_to_phy(dev); + + if (!phy->identify.device_type) + return snprintf(buf, 20, "none\n"); + return get_sas_device_type_names(phy->identify.device_type, buf); +} +static DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL); + +static ssize_t do_sas_phy_enable(struct device *dev, + size_t count, int enable) +{ + struct sas_phy *phy = transport_class_to_phy(dev); + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_internal *i = to_sas_internal(shost->transportt); + int error; + + error = i->f->phy_enable(phy, enable); + if (error) + return error; + phy->enabled = enable; + return count; +}; + +static ssize_t +store_sas_phy_enable(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + if (count < 1) + return -EINVAL; + + switch (buf[0]) { + case '0': + do_sas_phy_enable(dev, count, 0); + break; + case '1': + do_sas_phy_enable(dev, count, 1); + break; + default: + return -EINVAL; + } + + return count; +} + +static ssize_t +show_sas_phy_enable(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct sas_phy *phy = transport_class_to_phy(dev); + + return snprintf(buf, 20, "%d\n", phy->enabled); +} + +static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable, + store_sas_phy_enable); + +static ssize_t +do_sas_phy_reset(struct device *dev, size_t count, int hard_reset) +{ + struct sas_phy *phy = transport_class_to_phy(dev); + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_internal *i = to_sas_internal(shost->transportt); + int error; + + error = i->f->phy_reset(phy, hard_reset); + if (error) + return error; + phy->enabled = 1; + return count; +}; + +static ssize_t +store_sas_link_reset(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + return do_sas_phy_reset(dev, count, 0); +} +static DEVICE_ATTR(link_reset, S_IWUSR, NULL, store_sas_link_reset); + +static ssize_t +store_sas_hard_reset(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + return do_sas_phy_reset(dev, count, 1); +} +static DEVICE_ATTR(hard_reset, S_IWUSR, NULL, store_sas_hard_reset); + +sas_phy_protocol_attr(identify.initiator_port_protocols, + initiator_port_protocols); +sas_phy_protocol_attr(identify.target_port_protocols, + target_port_protocols); +sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", + unsigned long long); +sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); +sas_phy_linkspeed_attr(negotiated_linkrate); +sas_phy_linkspeed_attr(minimum_linkrate_hw); +sas_phy_linkspeed_rw_attr(minimum_linkrate); +sas_phy_linkspeed_attr(maximum_linkrate_hw); +sas_phy_linkspeed_rw_attr(maximum_linkrate); +sas_phy_linkerror_attr(invalid_dword_count); +sas_phy_linkerror_attr(running_disparity_error_count); +sas_phy_linkerror_attr(loss_of_dword_sync_count); +sas_phy_linkerror_attr(phy_reset_problem_count); + +static int sas_phy_setup(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct sas_phy *phy = dev_to_phy(dev); + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (i->f->phy_setup) + i->f->phy_setup(phy); + + return 0; +} + +static DECLARE_TRANSPORT_CLASS(sas_phy_class, + "sas_phy", sas_phy_setup, NULL, NULL); + +static int sas_phy_match(struct attribute_container *cont, struct device *dev) +{ + struct Scsi_Host *shost; + struct sas_internal *i; + + if (!scsi_is_sas_phy(dev)) + return 0; + shost = dev_to_shost(dev->parent); + + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != + &sas_host_class.class) + return 0; + + i = to_sas_internal(shost->transportt); + return &i->phy_attr_cont.ac == cont; +} + +static void sas_phy_release(struct device *dev) +{ + struct sas_phy *phy = dev_to_phy(dev); + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (i->f->phy_release) + i->f->phy_release(phy); + put_device(dev->parent); + kfree(phy); +} + +/** + * sas_phy_alloc - allocates and initialize a SAS PHY structure + * @parent: Parent device + * @number: Phy index + * + * Allocates an SAS PHY structure. It will be added in the device tree + * below the device specified by @parent, which has to be either a Scsi_Host + * or sas_rphy. + * + * Returns: + * SAS PHY allocated or %NULL if the allocation failed. + */ +struct sas_phy *sas_phy_alloc(struct device *parent, int number) +{ + struct Scsi_Host *shost = dev_to_shost(parent); + struct sas_phy *phy; + + phy = kzalloc(sizeof(*phy), GFP_KERNEL); + if (!phy) + return NULL; + + phy->number = number; + phy->enabled = 1; + + device_initialize(&phy->dev); + phy->dev.parent = get_device(parent); + phy->dev.release = sas_phy_release; + INIT_LIST_HEAD(&phy->port_siblings); + if (scsi_is_sas_expander_device(parent)) { + struct sas_rphy *rphy = dev_to_rphy(parent); + dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no, + rphy->scsi_target_id, number); + } else + dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number); + + transport_setup_device(&phy->dev); + + return phy; +} +EXPORT_SYMBOL(sas_phy_alloc); + +/** + * sas_phy_add - add a SAS PHY to the device hierarchy + * @phy: The PHY to be added + * + * Publishes a SAS PHY to the rest of the system. + */ +int sas_phy_add(struct sas_phy *phy) +{ + int error; + + error = device_add(&phy->dev); + if (error) + return error; + + error = transport_add_device(&phy->dev); + if (error) { + device_del(&phy->dev); + return error; + } + transport_configure_device(&phy->dev); + + return 0; +} +EXPORT_SYMBOL(sas_phy_add); + +/** + * sas_phy_free - free a SAS PHY + * @phy: SAS PHY to free + * + * Frees the specified SAS PHY. + * + * Note: + * This function must only be called on a PHY that has not + * successfully been added using sas_phy_add(). + */ +void sas_phy_free(struct sas_phy *phy) +{ + transport_destroy_device(&phy->dev); + put_device(&phy->dev); +} +EXPORT_SYMBOL(sas_phy_free); + +/** + * sas_phy_delete - remove SAS PHY + * @phy: SAS PHY to remove + * + * Removes the specified SAS PHY. If the SAS PHY has an + * associated remote PHY it is removed before. + */ +void +sas_phy_delete(struct sas_phy *phy) +{ + struct device *dev = &phy->dev; + + /* this happens if the phy is still part of a port when deleted */ + BUG_ON(!list_empty(&phy->port_siblings)); + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + put_device(dev); +} +EXPORT_SYMBOL(sas_phy_delete); + +/** + * scsi_is_sas_phy - check if a struct device represents a SAS PHY + * @dev: device to check + * + * Returns: + * %1 if the device represents a SAS PHY, %0 else + */ +int scsi_is_sas_phy(const struct device *dev) +{ + return dev->release == sas_phy_release; +} +EXPORT_SYMBOL(scsi_is_sas_phy); + +/* + * SAS Port attributes + */ +#define sas_port_show_simple(field, name, format_string, cast) \ +static ssize_t \ +show_sas_port_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_port *port = transport_class_to_sas_port(dev); \ + \ + return snprintf(buf, 20, format_string, cast port->field); \ +} + +#define sas_port_simple_attr(field, name, format_string, type) \ + sas_port_show_simple(field, name, format_string, (type)) \ +static DEVICE_ATTR(name, S_IRUGO, show_sas_port_##name, NULL) + +sas_port_simple_attr(num_phys, num_phys, "%d\n", int); + +static DECLARE_TRANSPORT_CLASS(sas_port_class, + "sas_port", NULL, NULL, NULL); + +static int sas_port_match(struct attribute_container *cont, struct device *dev) +{ + struct Scsi_Host *shost; + struct sas_internal *i; + + if (!scsi_is_sas_port(dev)) + return 0; + shost = dev_to_shost(dev->parent); + + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != + &sas_host_class.class) + return 0; + + i = to_sas_internal(shost->transportt); + return &i->port_attr_cont.ac == cont; +} + + +static void sas_port_release(struct device *dev) +{ + struct sas_port *port = dev_to_sas_port(dev); + + BUG_ON(!list_empty(&port->phy_list)); + + put_device(dev->parent); + kfree(port); +} + +static void sas_port_create_link(struct sas_port *port, + struct sas_phy *phy) +{ + int res; + + res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, + dev_name(&phy->dev)); + if (res) + goto err; + res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); + if (res) + goto err; + return; +err: + printk(KERN_ERR "%s: Cannot create port links, err=%d\n", + __func__, res); +} + +static void sas_port_delete_link(struct sas_port *port, + struct sas_phy *phy) +{ + sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev)); + sysfs_remove_link(&phy->dev.kobj, "port"); +} + +/** sas_port_alloc - allocate and initialize a SAS port structure + * + * @parent: parent device + * @port_id: port number + * + * Allocates a SAS port structure. It will be added to the device tree + * below the device specified by @parent which must be either a Scsi_Host + * or a sas_expander_device. + * + * Returns %NULL on error + */ +struct sas_port *sas_port_alloc(struct device *parent, int port_id) +{ + struct Scsi_Host *shost = dev_to_shost(parent); + struct sas_port *port; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return NULL; + + port->port_identifier = port_id; + + device_initialize(&port->dev); + + port->dev.parent = get_device(parent); + port->dev.release = sas_port_release; + + mutex_init(&port->phy_list_mutex); + INIT_LIST_HEAD(&port->phy_list); + + if (scsi_is_sas_expander_device(parent)) { + struct sas_rphy *rphy = dev_to_rphy(parent); + dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no, + rphy->scsi_target_id, port->port_identifier); + } else + dev_set_name(&port->dev, "port-%d:%d", shost->host_no, + port->port_identifier); + + transport_setup_device(&port->dev); + + return port; +} +EXPORT_SYMBOL(sas_port_alloc); + +/** sas_port_alloc_num - allocate and initialize a SAS port structure + * + * @parent: parent device + * + * Allocates a SAS port structure and a number to go with it. This + * interface is really for adapters where the port number has no + * meansing, so the sas class should manage them. It will be added to + * the device tree below the device specified by @parent which must be + * either a Scsi_Host or a sas_expander_device. + * + * Returns %NULL on error + */ +struct sas_port *sas_port_alloc_num(struct device *parent) +{ + int index; + struct Scsi_Host *shost = dev_to_shost(parent); + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + + /* FIXME: use idr for this eventually */ + mutex_lock(&sas_host->lock); + if (scsi_is_sas_expander_device(parent)) { + struct sas_rphy *rphy = dev_to_rphy(parent); + struct sas_expander_device *exp = rphy_to_expander_device(rphy); + + index = exp->next_port_id++; + } else + index = sas_host->next_port_id++; + mutex_unlock(&sas_host->lock); + return sas_port_alloc(parent, index); +} +EXPORT_SYMBOL(sas_port_alloc_num); + +/** + * sas_port_add - add a SAS port to the device hierarchy + * @port: port to be added + * + * publishes a port to the rest of the system + */ +int sas_port_add(struct sas_port *port) +{ + int error; + + /* No phys should be added until this is made visible */ + BUG_ON(!list_empty(&port->phy_list)); + + error = device_add(&port->dev); + + if (error) + return error; + + transport_add_device(&port->dev); + transport_configure_device(&port->dev); + + return 0; +} +EXPORT_SYMBOL(sas_port_add); + +/** + * sas_port_free - free a SAS PORT + * @port: SAS PORT to free + * + * Frees the specified SAS PORT. + * + * Note: + * This function must only be called on a PORT that has not + * successfully been added using sas_port_add(). + */ +void sas_port_free(struct sas_port *port) +{ + transport_destroy_device(&port->dev); + put_device(&port->dev); +} +EXPORT_SYMBOL(sas_port_free); + +/** + * sas_port_delete - remove SAS PORT + * @port: SAS PORT to remove + * + * Removes the specified SAS PORT. If the SAS PORT has an + * associated phys, unlink them from the port as well. + */ +void sas_port_delete(struct sas_port *port) +{ + struct device *dev = &port->dev; + struct sas_phy *phy, *tmp_phy; + + if (port->rphy) { + sas_rphy_delete(port->rphy); + port->rphy = NULL; + } + + mutex_lock(&port->phy_list_mutex); + list_for_each_entry_safe(phy, tmp_phy, &port->phy_list, + port_siblings) { + sas_port_delete_link(port, phy); + list_del_init(&phy->port_siblings); + } + mutex_unlock(&port->phy_list_mutex); + + if (port->is_backlink) { + struct device *parent = port->dev.parent; + + sysfs_remove_link(&port->dev.kobj, dev_name(parent)); + port->is_backlink = 0; + } + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + put_device(dev); +} +EXPORT_SYMBOL(sas_port_delete); + +/** + * scsi_is_sas_port - check if a struct device represents a SAS port + * @dev: device to check + * + * Returns: + * %1 if the device represents a SAS Port, %0 else + */ +int scsi_is_sas_port(const struct device *dev) +{ + return dev->release == sas_port_release; +} +EXPORT_SYMBOL(scsi_is_sas_port); + +/** + * sas_port_get_phy - try to take a reference on a port member + * @port: port to check + */ +struct sas_phy *sas_port_get_phy(struct sas_port *port) +{ + struct sas_phy *phy; + + mutex_lock(&port->phy_list_mutex); + if (list_empty(&port->phy_list)) + phy = NULL; + else { + struct list_head *ent = port->phy_list.next; + + phy = list_entry(ent, typeof(*phy), port_siblings); + get_device(&phy->dev); + } + mutex_unlock(&port->phy_list_mutex); + + return phy; +} +EXPORT_SYMBOL(sas_port_get_phy); + +/** + * sas_port_add_phy - add another phy to a port to form a wide port + * @port: port to add the phy to + * @phy: phy to add + * + * When a port is initially created, it is empty (has no phys). All + * ports must have at least one phy to operated, and all wide ports + * must have at least two. The current code makes no difference + * between ports and wide ports, but the only object that can be + * connected to a remote device is a port, so ports must be formed on + * all devices with phys if they're connected to anything. + */ +void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy) +{ + mutex_lock(&port->phy_list_mutex); + if (unlikely(!list_empty(&phy->port_siblings))) { + /* make sure we're already on this port */ + struct sas_phy *tmp; + + list_for_each_entry(tmp, &port->phy_list, port_siblings) + if (tmp == phy) + break; + /* If this trips, you added a phy that was already + * part of a different port */ + if (unlikely(tmp != phy)) { + dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", + dev_name(&phy->dev)); + BUG(); + } + } else { + sas_port_create_link(port, phy); + list_add_tail(&phy->port_siblings, &port->phy_list); + port->num_phys++; + } + mutex_unlock(&port->phy_list_mutex); +} +EXPORT_SYMBOL(sas_port_add_phy); + +/** + * sas_port_delete_phy - remove a phy from a port or wide port + * @port: port to remove the phy from + * @phy: phy to remove + * + * This operation is used for tearing down ports again. It must be + * done to every port or wide port before calling sas_port_delete. + */ +void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy) +{ + mutex_lock(&port->phy_list_mutex); + sas_port_delete_link(port, phy); + list_del_init(&phy->port_siblings); + port->num_phys--; + mutex_unlock(&port->phy_list_mutex); +} +EXPORT_SYMBOL(sas_port_delete_phy); + +void sas_port_mark_backlink(struct sas_port *port) +{ + int res; + struct device *parent = port->dev.parent->parent->parent; + + if (port->is_backlink) + return; + port->is_backlink = 1; + res = sysfs_create_link(&port->dev.kobj, &parent->kobj, + dev_name(parent)); + if (res) + goto err; + return; +err: + printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n", + __func__, res); + +} +EXPORT_SYMBOL(sas_port_mark_backlink); + +/* + * SAS remote PHY attributes. + */ + +#define sas_rphy_show_simple(field, name, format_string, cast) \ +static ssize_t \ +show_sas_rphy_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_rphy *rphy = transport_class_to_rphy(dev); \ + \ + return snprintf(buf, 20, format_string, cast rphy->field); \ +} + +#define sas_rphy_simple_attr(field, name, format_string, type) \ + sas_rphy_show_simple(field, name, format_string, (type)) \ +static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \ + show_sas_rphy_##name, NULL) + +#define sas_rphy_show_protocol(field, name) \ +static ssize_t \ +show_sas_rphy_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_rphy *rphy = transport_class_to_rphy(dev); \ + \ + if (!rphy->field) \ + return snprintf(buf, 20, "none\n"); \ + return get_sas_protocol_names(rphy->field, buf); \ +} + +#define sas_rphy_protocol_attr(field, name) \ + sas_rphy_show_protocol(field, name) \ +static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \ + show_sas_rphy_##name, NULL) + +static ssize_t +show_sas_rphy_device_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sas_rphy *rphy = transport_class_to_rphy(dev); + + if (!rphy->identify.device_type) + return snprintf(buf, 20, "none\n"); + return get_sas_device_type_names( + rphy->identify.device_type, buf); +} + +static SAS_DEVICE_ATTR(rphy, device_type, S_IRUGO, + show_sas_rphy_device_type, NULL); + +static ssize_t +show_sas_rphy_enclosure_identifier(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sas_rphy *rphy = transport_class_to_rphy(dev); + struct sas_phy *phy = dev_to_phy(rphy->dev.parent); + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_internal *i = to_sas_internal(shost->transportt); + u64 identifier; + int error; + + error = i->f->get_enclosure_identifier(rphy, &identifier); + if (error) + return error; + return sprintf(buf, "0x%llx\n", (unsigned long long)identifier); +} + +static SAS_DEVICE_ATTR(rphy, enclosure_identifier, S_IRUGO, + show_sas_rphy_enclosure_identifier, NULL); + +static ssize_t +show_sas_rphy_bay_identifier(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sas_rphy *rphy = transport_class_to_rphy(dev); + struct sas_phy *phy = dev_to_phy(rphy->dev.parent); + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_internal *i = to_sas_internal(shost->transportt); + int val; + + val = i->f->get_bay_identifier(rphy); + if (val < 0) + return val; + return sprintf(buf, "%d\n", val); +} + +static SAS_DEVICE_ATTR(rphy, bay_identifier, S_IRUGO, + show_sas_rphy_bay_identifier, NULL); + +sas_rphy_protocol_attr(identify.initiator_port_protocols, + initiator_port_protocols); +sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols); +sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", + unsigned long long); +sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); +sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32); + +/* only need 8 bytes of data plus header (4 or 8) */ +#define BUF_SIZE 64 + +int sas_read_port_mode_page(struct scsi_device *sdev) +{ + char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; + struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); + struct scsi_mode_data mode_data; + int error; + + if (!buffer) + return -ENOMEM; + + error = scsi_mode_sense(sdev, 1, 0x19, 0, buffer, BUF_SIZE, 30*HZ, 3, + &mode_data, NULL); + + if (error) + goto out; + + msdata = buffer + mode_data.header_length + + mode_data.block_descriptor_length; + + if (msdata - buffer > BUF_SIZE - 8) + goto out; + + error = 0; + + rdev->ready_led_meaning = msdata[2] & 0x10 ? 1 : 0; + rdev->I_T_nexus_loss_timeout = (msdata[4] << 8) + msdata[5]; + rdev->initiator_response_timeout = (msdata[6] << 8) + msdata[7]; + + out: + kfree(buffer); + return error; +} +EXPORT_SYMBOL(sas_read_port_mode_page); + +static DECLARE_TRANSPORT_CLASS(sas_end_dev_class, + "sas_end_device", NULL, NULL, NULL); + +#define sas_end_dev_show_simple(field, name, format_string, cast) \ +static ssize_t \ +show_sas_end_dev_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_rphy *rphy = transport_class_to_rphy(dev); \ + struct sas_end_device *rdev = rphy_to_end_device(rphy); \ + \ + return snprintf(buf, 20, format_string, cast rdev->field); \ +} + +#define sas_end_dev_simple_attr(field, name, format_string, type) \ + sas_end_dev_show_simple(field, name, format_string, (type)) \ +static SAS_DEVICE_ATTR(end_dev, name, S_IRUGO, \ + show_sas_end_dev_##name, NULL) + +sas_end_dev_simple_attr(ready_led_meaning, ready_led_meaning, "%d\n", int); +sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout, + "%d\n", int); +sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout, + "%d\n", int); +sas_end_dev_simple_attr(tlr_supported, tlr_supported, + "%d\n", int); +sas_end_dev_simple_attr(tlr_enabled, tlr_enabled, + "%d\n", int); + +static DECLARE_TRANSPORT_CLASS(sas_expander_class, + "sas_expander", NULL, NULL, NULL); + +#define sas_expander_show_simple(field, name, format_string, cast) \ +static ssize_t \ +show_sas_expander_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct sas_rphy *rphy = transport_class_to_rphy(dev); \ + struct sas_expander_device *edev = rphy_to_expander_device(rphy); \ + \ + return snprintf(buf, 20, format_string, cast edev->field); \ +} + +#define sas_expander_simple_attr(field, name, format_string, type) \ + sas_expander_show_simple(field, name, format_string, (type)) \ +static SAS_DEVICE_ATTR(expander, name, S_IRUGO, \ + show_sas_expander_##name, NULL) + +sas_expander_simple_attr(vendor_id, vendor_id, "%s\n", char *); +sas_expander_simple_attr(product_id, product_id, "%s\n", char *); +sas_expander_simple_attr(product_rev, product_rev, "%s\n", char *); +sas_expander_simple_attr(component_vendor_id, component_vendor_id, + "%s\n", char *); +sas_expander_simple_attr(component_id, component_id, "%u\n", unsigned int); +sas_expander_simple_attr(component_revision_id, component_revision_id, "%u\n", + unsigned int); +sas_expander_simple_attr(level, level, "%d\n", int); + +static DECLARE_TRANSPORT_CLASS(sas_rphy_class, + "sas_device", NULL, NULL, NULL); + +static int sas_rphy_match(struct attribute_container *cont, struct device *dev) +{ + struct Scsi_Host *shost; + struct sas_internal *i; + + if (!scsi_is_sas_rphy(dev)) + return 0; + shost = dev_to_shost(dev->parent->parent); + + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != + &sas_host_class.class) + return 0; + + i = to_sas_internal(shost->transportt); + return &i->rphy_attr_cont.ac == cont; +} + +static int sas_end_dev_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct sas_internal *i; + struct sas_rphy *rphy; + + if (!scsi_is_sas_rphy(dev)) + return 0; + shost = dev_to_shost(dev->parent->parent); + rphy = dev_to_rphy(dev); + + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != + &sas_host_class.class) + return 0; + + i = to_sas_internal(shost->transportt); + return &i->end_dev_attr_cont.ac == cont && + rphy->identify.device_type == SAS_END_DEVICE; +} + +static int sas_expander_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct sas_internal *i; + struct sas_rphy *rphy; + + if (!scsi_is_sas_rphy(dev)) + return 0; + shost = dev_to_shost(dev->parent->parent); + rphy = dev_to_rphy(dev); + + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != + &sas_host_class.class) + return 0; + + i = to_sas_internal(shost->transportt); + return &i->expander_attr_cont.ac == cont && + (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE); +} + +static void sas_expander_release(struct device *dev) +{ + struct sas_rphy *rphy = dev_to_rphy(dev); + struct sas_expander_device *edev = rphy_to_expander_device(rphy); + + put_device(dev->parent); + kfree(edev); +} + +static void sas_end_device_release(struct device *dev) +{ + struct sas_rphy *rphy = dev_to_rphy(dev); + struct sas_end_device *edev = rphy_to_end_device(rphy); + + put_device(dev->parent); + kfree(edev); +} + +/** + * sas_rphy_initialize - common rphy initialization + * @rphy: rphy to initialise + * + * Used by both sas_end_device_alloc() and sas_expander_alloc() to + * initialise the common rphy component of each. + */ +static void sas_rphy_initialize(struct sas_rphy *rphy) +{ + INIT_LIST_HEAD(&rphy->list); +} + +/** + * sas_end_device_alloc - allocate an rphy for an end device + * @parent: which port + * + * Allocates an SAS remote PHY structure, connected to @parent. + * + * Returns: + * SAS PHY allocated or %NULL if the allocation failed. + */ +struct sas_rphy *sas_end_device_alloc(struct sas_port *parent) +{ + struct Scsi_Host *shost = dev_to_shost(&parent->dev); + struct sas_end_device *rdev; + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) { + return NULL; + } + + device_initialize(&rdev->rphy.dev); + rdev->rphy.dev.parent = get_device(&parent->dev); + rdev->rphy.dev.release = sas_end_device_release; + if (scsi_is_sas_expander_device(parent->dev.parent)) { + struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent); + dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d", + shost->host_no, rphy->scsi_target_id, + parent->port_identifier); + } else + dev_set_name(&rdev->rphy.dev, "end_device-%d:%d", + shost->host_no, parent->port_identifier); + rdev->rphy.identify.device_type = SAS_END_DEVICE; + sas_rphy_initialize(&rdev->rphy); + transport_setup_device(&rdev->rphy.dev); + + return &rdev->rphy; +} +EXPORT_SYMBOL(sas_end_device_alloc); + +/** + * sas_expander_alloc - allocate an rphy for an end device + * @parent: which port + * @type: SAS_EDGE_EXPANDER_DEVICE or SAS_FANOUT_EXPANDER_DEVICE + * + * Allocates an SAS remote PHY structure, connected to @parent. + * + * Returns: + * SAS PHY allocated or %NULL if the allocation failed. + */ +struct sas_rphy *sas_expander_alloc(struct sas_port *parent, + enum sas_device_type type) +{ + struct Scsi_Host *shost = dev_to_shost(&parent->dev); + struct sas_expander_device *rdev; + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + + BUG_ON(type != SAS_EDGE_EXPANDER_DEVICE && + type != SAS_FANOUT_EXPANDER_DEVICE); + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) { + return NULL; + } + + device_initialize(&rdev->rphy.dev); + rdev->rphy.dev.parent = get_device(&parent->dev); + rdev->rphy.dev.release = sas_expander_release; + mutex_lock(&sas_host->lock); + rdev->rphy.scsi_target_id = sas_host->next_expander_id++; + mutex_unlock(&sas_host->lock); + dev_set_name(&rdev->rphy.dev, "expander-%d:%d", + shost->host_no, rdev->rphy.scsi_target_id); + rdev->rphy.identify.device_type = type; + sas_rphy_initialize(&rdev->rphy); + transport_setup_device(&rdev->rphy.dev); + + return &rdev->rphy; +} +EXPORT_SYMBOL(sas_expander_alloc); + +/** + * sas_rphy_add - add a SAS remote PHY to the device hierarchy + * @rphy: The remote PHY to be added + * + * Publishes a SAS remote PHY to the rest of the system. + */ +int sas_rphy_add(struct sas_rphy *rphy) +{ + struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); + struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + struct sas_identify *identify = &rphy->identify; + int error; + + if (parent->rphy) + return -ENXIO; + parent->rphy = rphy; + + error = device_add(&rphy->dev); + if (error) + return error; + transport_add_device(&rphy->dev); + transport_configure_device(&rphy->dev); + if (sas_bsg_initialize(shost, rphy)) + printk("fail to a bsg device %s\n", dev_name(&rphy->dev)); + + + mutex_lock(&sas_host->lock); + list_add_tail(&rphy->list, &sas_host->rphy_list); + if (identify->device_type == SAS_END_DEVICE && + (identify->target_port_protocols & + (SAS_PROTOCOL_SSP | SAS_PROTOCOL_STP | SAS_PROTOCOL_SATA))) + rphy->scsi_target_id = sas_host->next_target_id++; + else if (identify->device_type == SAS_END_DEVICE) + rphy->scsi_target_id = -1; + mutex_unlock(&sas_host->lock); + + if (identify->device_type == SAS_END_DEVICE && + rphy->scsi_target_id != -1) { + int lun; + + if (identify->target_port_protocols & SAS_PROTOCOL_SSP) + lun = SCAN_WILD_CARD; + else + lun = 0; + + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, + SCSI_SCAN_INITIAL); + } + + return 0; +} +EXPORT_SYMBOL(sas_rphy_add); + +/** + * sas_rphy_free - free a SAS remote PHY + * @rphy: SAS remote PHY to free + * + * Frees the specified SAS remote PHY. + * + * Note: + * This function must only be called on a remote + * PHY that has not successfully been added using + * sas_rphy_add() (or has been sas_rphy_remove()'d) + */ +void sas_rphy_free(struct sas_rphy *rphy) +{ + struct device *dev = &rphy->dev; + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + + mutex_lock(&sas_host->lock); + list_del(&rphy->list); + mutex_unlock(&sas_host->lock); + + transport_destroy_device(dev); + + put_device(dev); +} +EXPORT_SYMBOL(sas_rphy_free); + +/** + * sas_rphy_delete - remove and free SAS remote PHY + * @rphy: SAS remote PHY to remove and free + * + * Removes the specified SAS remote PHY and frees it. + */ +void +sas_rphy_delete(struct sas_rphy *rphy) +{ + sas_rphy_remove(rphy); + sas_rphy_free(rphy); +} +EXPORT_SYMBOL(sas_rphy_delete); + +/** + * sas_rphy_unlink - unlink SAS remote PHY + * @rphy: SAS remote phy to unlink from its parent port + * + * Removes port reference to an rphy + */ +void sas_rphy_unlink(struct sas_rphy *rphy) +{ + struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); + + parent->rphy = NULL; +} +EXPORT_SYMBOL(sas_rphy_unlink); + +/** + * sas_rphy_remove - remove SAS remote PHY + * @rphy: SAS remote phy to remove + * + * Removes the specified SAS remote PHY. + */ +void +sas_rphy_remove(struct sas_rphy *rphy) +{ + struct device *dev = &rphy->dev; + + switch (rphy->identify.device_type) { + case SAS_END_DEVICE: + scsi_remove_target(dev); + break; + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + sas_remove_children(dev); + break; + default: + break; + } + + sas_rphy_unlink(rphy); + bsg_remove_queue(rphy->q); + transport_remove_device(dev); + device_del(dev); +} +EXPORT_SYMBOL(sas_rphy_remove); + +/** + * scsi_is_sas_rphy - check if a struct device represents a SAS remote PHY + * @dev: device to check + * + * Returns: + * %1 if the device represents a SAS remote PHY, %0 else + */ +int scsi_is_sas_rphy(const struct device *dev) +{ + return dev->release == sas_end_device_release || + dev->release == sas_expander_release; +} +EXPORT_SYMBOL(scsi_is_sas_rphy); + + +/* + * SCSI scan helper + */ + +static int sas_user_scan(struct Scsi_Host *shost, uint channel, + uint id, u64 lun) +{ + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + struct sas_rphy *rphy; + + mutex_lock(&sas_host->lock); + list_for_each_entry(rphy, &sas_host->rphy_list, list) { + if (rphy->identify.device_type != SAS_END_DEVICE || + rphy->scsi_target_id == -1) + continue; + + if ((channel == SCAN_WILD_CARD || channel == 0) && + (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, + lun, SCSI_SCAN_MANUAL); + } + } + mutex_unlock(&sas_host->lock); + + return 0; +} + + +/* + * Setup / Teardown code + */ + +#define SETUP_TEMPLATE(attrb, field, perm, test) \ + i->private_##attrb[count] = dev_attr_##field; \ + i->private_##attrb[count].attr.mode = perm; \ + i->attrb[count] = &i->private_##attrb[count]; \ + if (test) \ + count++ + +#define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \ + i->private_##attrb[count] = dev_attr_##field; \ + i->private_##attrb[count].attr.mode = perm; \ + if (ro_test) { \ + i->private_##attrb[count].attr.mode = ro_perm; \ + i->private_##attrb[count].store = NULL; \ + } \ + i->attrb[count] = &i->private_##attrb[count]; \ + if (test) \ + count++ + +#define SETUP_RPORT_ATTRIBUTE(field) \ + SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1) + +#define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \ + SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func) + +#define SETUP_PHY_ATTRIBUTE(field) \ + SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) + +#define SETUP_PHY_ATTRIBUTE_RW(field) \ + SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ + !i->f->set_phy_speed, S_IRUGO) + +#define SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(field, func) \ + SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ + !i->f->func, S_IRUGO) + +#define SETUP_PORT_ATTRIBUTE(field) \ + SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) + +#define SETUP_OPTIONAL_PHY_ATTRIBUTE(field, func) \ + SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) + +#define SETUP_PHY_ATTRIBUTE_WRONLY(field) \ + SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, 1) + +#define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \ + SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, i->f->func) + +#define SETUP_END_DEV_ATTRIBUTE(field) \ + SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1) + +#define SETUP_EXPANDER_ATTRIBUTE(field) \ + SETUP_TEMPLATE(expander_attrs, expander_##field, S_IRUGO, 1) + +/** + * sas_attach_transport - instantiate SAS transport template + * @ft: SAS transport class function template + */ +struct scsi_transport_template * +sas_attach_transport(struct sas_function_template *ft) +{ + struct sas_internal *i; + int count; + + i = kzalloc(sizeof(struct sas_internal), GFP_KERNEL); + if (!i) + return NULL; + + i->t.user_scan = sas_user_scan; + + i->t.host_attrs.ac.attrs = &i->host_attrs[0]; + i->t.host_attrs.ac.class = &sas_host_class.class; + i->t.host_attrs.ac.match = sas_host_match; + transport_container_register(&i->t.host_attrs); + i->t.host_size = sizeof(struct sas_host_attrs); + + i->phy_attr_cont.ac.class = &sas_phy_class.class; + i->phy_attr_cont.ac.attrs = &i->phy_attrs[0]; + i->phy_attr_cont.ac.match = sas_phy_match; + transport_container_register(&i->phy_attr_cont); + + i->port_attr_cont.ac.class = &sas_port_class.class; + i->port_attr_cont.ac.attrs = &i->port_attrs[0]; + i->port_attr_cont.ac.match = sas_port_match; + transport_container_register(&i->port_attr_cont); + + i->rphy_attr_cont.ac.class = &sas_rphy_class.class; + i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0]; + i->rphy_attr_cont.ac.match = sas_rphy_match; + transport_container_register(&i->rphy_attr_cont); + + i->end_dev_attr_cont.ac.class = &sas_end_dev_class.class; + i->end_dev_attr_cont.ac.attrs = &i->end_dev_attrs[0]; + i->end_dev_attr_cont.ac.match = sas_end_dev_match; + transport_container_register(&i->end_dev_attr_cont); + + i->expander_attr_cont.ac.class = &sas_expander_class.class; + i->expander_attr_cont.ac.attrs = &i->expander_attrs[0]; + i->expander_attr_cont.ac.match = sas_expander_match; + transport_container_register(&i->expander_attr_cont); + + i->f = ft; + + count = 0; + SETUP_PHY_ATTRIBUTE(initiator_port_protocols); + SETUP_PHY_ATTRIBUTE(target_port_protocols); + SETUP_PHY_ATTRIBUTE(device_type); + SETUP_PHY_ATTRIBUTE(sas_address); + SETUP_PHY_ATTRIBUTE(phy_identifier); + SETUP_PHY_ATTRIBUTE(negotiated_linkrate); + SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw); + SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate); + SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw); + SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate); + + SETUP_PHY_ATTRIBUTE(invalid_dword_count); + SETUP_PHY_ATTRIBUTE(running_disparity_error_count); + SETUP_PHY_ATTRIBUTE(loss_of_dword_sync_count); + SETUP_PHY_ATTRIBUTE(phy_reset_problem_count); + SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset); + SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset); + SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(enable, phy_enable); + i->phy_attrs[count] = NULL; + + count = 0; + SETUP_PORT_ATTRIBUTE(num_phys); + i->port_attrs[count] = NULL; + + count = 0; + SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols); + SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols); + SETUP_RPORT_ATTRIBUTE(rphy_device_type); + SETUP_RPORT_ATTRIBUTE(rphy_sas_address); + SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); + SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id); + SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, + get_enclosure_identifier); + SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier, + get_bay_identifier); + i->rphy_attrs[count] = NULL; + + count = 0; + SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning); + SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout); + SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout); + SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported); + SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled); + i->end_dev_attrs[count] = NULL; + + count = 0; + SETUP_EXPANDER_ATTRIBUTE(vendor_id); + SETUP_EXPANDER_ATTRIBUTE(product_id); + SETUP_EXPANDER_ATTRIBUTE(product_rev); + SETUP_EXPANDER_ATTRIBUTE(component_vendor_id); + SETUP_EXPANDER_ATTRIBUTE(component_id); + SETUP_EXPANDER_ATTRIBUTE(component_revision_id); + SETUP_EXPANDER_ATTRIBUTE(level); + i->expander_attrs[count] = NULL; + + return &i->t; +} +EXPORT_SYMBOL(sas_attach_transport); + +/** + * sas_release_transport - release SAS transport template instance + * @t: transport template instance + */ +void sas_release_transport(struct scsi_transport_template *t) +{ + struct sas_internal *i = to_sas_internal(t); + + transport_container_unregister(&i->t.host_attrs); + transport_container_unregister(&i->phy_attr_cont); + transport_container_unregister(&i->port_attr_cont); + transport_container_unregister(&i->rphy_attr_cont); + transport_container_unregister(&i->end_dev_attr_cont); + transport_container_unregister(&i->expander_attr_cont); + + kfree(i); +} +EXPORT_SYMBOL(sas_release_transport); + +static __init int sas_transport_init(void) +{ + int error; + + error = transport_class_register(&sas_host_class); + if (error) + goto out; + error = transport_class_register(&sas_phy_class); + if (error) + goto out_unregister_transport; + error = transport_class_register(&sas_port_class); + if (error) + goto out_unregister_phy; + error = transport_class_register(&sas_rphy_class); + if (error) + goto out_unregister_port; + error = transport_class_register(&sas_end_dev_class); + if (error) + goto out_unregister_rphy; + error = transport_class_register(&sas_expander_class); + if (error) + goto out_unregister_end_dev; + + return 0; + + out_unregister_end_dev: + transport_class_unregister(&sas_end_dev_class); + out_unregister_rphy: + transport_class_unregister(&sas_rphy_class); + out_unregister_port: + transport_class_unregister(&sas_port_class); + out_unregister_phy: + transport_class_unregister(&sas_phy_class); + out_unregister_transport: + transport_class_unregister(&sas_host_class); + out: + return error; + +} + +static void __exit sas_transport_exit(void) +{ + transport_class_unregister(&sas_host_class); + transport_class_unregister(&sas_phy_class); + transport_class_unregister(&sas_port_class); + transport_class_unregister(&sas_rphy_class); + transport_class_unregister(&sas_end_dev_class); + transport_class_unregister(&sas_expander_class); +} + +MODULE_AUTHOR("Christoph Hellwig"); +MODULE_DESCRIPTION("SAS Transport Attributes"); +MODULE_LICENSE("GPL"); + +module_init(sas_transport_init); +module_exit(sas_transport_exit); diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c new file mode 100644 index 000000000..2442d4d2e --- /dev/null +++ b/drivers/scsi/scsi_transport_spi.c @@ -0,0 +1,1640 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Parallel SCSI (SPI) transport specific attributes exported to sysfs. + * + * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. + * Copyright (c) 2004, 2005 James Bottomley + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi_priv.h" +#include +#include +#include +#include +#include +#include +#include + +#define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ +#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always + * on" attributes */ +#define SPI_HOST_ATTRS 1 + +#define SPI_MAX_ECHO_BUFFER_SIZE 4096 + +#define DV_LOOPS 3 +#define DV_TIMEOUT (10*HZ) +#define DV_RETRIES 3 /* should only need at most + * two cc/ua clears */ + +/* Our blacklist flags */ +enum { + SPI_BLIST_NOIUS = (__force blist_flags_t)0x1, +}; + +/* blacklist table, modelled on scsi_devinfo.c */ +static struct { + char *vendor; + char *model; + blist_flags_t flags; +} spi_static_device_list[] __initdata = { + {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, + {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, + {NULL, NULL, 0} +}; + +/* Private data accessors (keep these out of the header file) */ +#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) +#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) + +struct spi_internal { + struct scsi_transport_template t; + struct spi_function_template *f; +}; + +#define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) + +static const int ppr_to_ps[] = { + /* The PPR values 0-6 are reserved, fill them in when + * the committee defines them */ + -1, /* 0x00 */ + -1, /* 0x01 */ + -1, /* 0x02 */ + -1, /* 0x03 */ + -1, /* 0x04 */ + -1, /* 0x05 */ + -1, /* 0x06 */ + 3125, /* 0x07 */ + 6250, /* 0x08 */ + 12500, /* 0x09 */ + 25000, /* 0x0a */ + 30300, /* 0x0b */ + 50000, /* 0x0c */ +}; +/* The PPR values at which you calculate the period in ns by multiplying + * by 4 */ +#define SPI_STATIC_PPR 0x0c + +static int sprint_frac(char *dest, int value, int denom) +{ + int frac = value % denom; + int result = sprintf(dest, "%d", value / denom); + + if (frac == 0) + return result; + dest[result++] = '.'; + + do { + denom /= 10; + sprintf(dest + result, "%d", frac / denom); + result++; + frac %= denom; + } while (frac); + + dest[result++] = '\0'; + return result; +} + +static int spi_execute(struct scsi_device *sdev, const void *cmd, + enum req_op op, void *buffer, unsigned int bufflen, + struct scsi_sense_hdr *sshdr) +{ + int i, result; + struct scsi_sense_hdr sshdr_tmp; + blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; + const struct scsi_exec_args exec_args = { + .req_flags = BLK_MQ_REQ_PM, + .sshdr = sshdr ? : &sshdr_tmp, + }; + + sshdr = exec_args.sshdr; + + for(i = 0; i < DV_RETRIES; i++) { + /* + * The purpose of the RQF_PM flag below is to bypass the + * SDEV_QUIESCE state. + */ + result = scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen, + DV_TIMEOUT, 1, &exec_args); + if (result < 0 || !scsi_sense_valid(sshdr) || + sshdr->sense_key != UNIT_ATTENTION) + break; + } + return result; +} + +static struct { + enum spi_signal_type value; + char *name; +} signal_types[] = { + { SPI_SIGNAL_UNKNOWN, "unknown" }, + { SPI_SIGNAL_SE, "SE" }, + { SPI_SIGNAL_LVD, "LVD" }, + { SPI_SIGNAL_HVD, "HVD" }, +}; + +static inline const char *spi_signal_to_string(enum spi_signal_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(signal_types); i++) { + if (type == signal_types[i].value) + return signal_types[i].name; + } + return NULL; +} +static inline enum spi_signal_type spi_signal_to_value(const char *name) +{ + int i, len; + + for (i = 0; i < ARRAY_SIZE(signal_types); i++) { + len = strlen(signal_types[i].name); + if (strncmp(name, signal_types[i].name, len) == 0 && + (name[len] == '\n' || name[len] == '\0')) + return signal_types[i].value; + } + return SPI_SIGNAL_UNKNOWN; +} + +static int spi_host_setup(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + + spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; + + return 0; +} + +static int spi_host_configure(struct transport_container *tc, + struct device *dev, + struct device *cdev); + +static DECLARE_TRANSPORT_CLASS(spi_host_class, + "spi_host", + spi_host_setup, + NULL, + spi_host_configure); + +static int spi_host_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + + if (!scsi_is_host_device(dev)) + return 0; + + shost = dev_to_shost(dev); + if (!shost->transportt || shost->transportt->host_attrs.ac.class + != &spi_host_class.class) + return 0; + + return &shost->transportt->host_attrs.ac == cont; +} + +static int spi_target_configure(struct transport_container *tc, + struct device *dev, + struct device *cdev); + +static int spi_device_configure(struct transport_container *tc, + struct device *dev, + struct device *cdev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_target *starget = sdev->sdev_target; + blist_flags_t bflags; + + bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], + &sdev->inquiry[16], + SCSI_DEVINFO_SPI); + + /* Populate the target capability fields with the values + * gleaned from the device inquiry */ + + spi_support_sync(starget) = scsi_device_sync(sdev); + spi_support_wide(starget) = scsi_device_wide(sdev); + spi_support_dt(starget) = scsi_device_dt(sdev); + spi_support_dt_only(starget) = scsi_device_dt_only(sdev); + spi_support_ius(starget) = scsi_device_ius(sdev); + if (bflags & SPI_BLIST_NOIUS) { + dev_info(dev, "Information Units disabled by blacklist\n"); + spi_support_ius(starget) = 0; + } + spi_support_qas(starget) = scsi_device_qas(sdev); + + return 0; +} + +static int spi_setup_transport_attrs(struct transport_container *tc, + struct device *dev, + struct device *cdev) +{ + struct scsi_target *starget = to_scsi_target(dev); + + spi_period(starget) = -1; /* illegal value */ + spi_min_period(starget) = 0; + spi_offset(starget) = 0; /* async */ + spi_max_offset(starget) = 255; + spi_width(starget) = 0; /* narrow */ + spi_max_width(starget) = 1; + spi_iu(starget) = 0; /* no IU */ + spi_max_iu(starget) = 1; + spi_dt(starget) = 0; /* ST */ + spi_qas(starget) = 0; + spi_max_qas(starget) = 1; + spi_wr_flow(starget) = 0; + spi_rd_strm(starget) = 0; + spi_rti(starget) = 0; + spi_pcomp_en(starget) = 0; + spi_hold_mcs(starget) = 0; + spi_dv_pending(starget) = 0; + spi_dv_in_progress(starget) = 0; + spi_initial_dv(starget) = 0; + mutex_init(&spi_dv_mutex(starget)); + + return 0; +} + +#define spi_transport_show_simple(field, format_string) \ + \ +static ssize_t \ +show_spi_transport_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_target *starget = transport_class_to_starget(dev); \ + struct spi_transport_attrs *tp; \ + \ + tp = (struct spi_transport_attrs *)&starget->starget_data; \ + return snprintf(buf, 20, format_string, tp->field); \ +} + +#define spi_transport_store_simple(field, format_string) \ + \ +static ssize_t \ +store_spi_transport_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + struct scsi_target *starget = transport_class_to_starget(dev); \ + struct spi_transport_attrs *tp; \ + \ + tp = (struct spi_transport_attrs *)&starget->starget_data; \ + val = simple_strtoul(buf, NULL, 0); \ + tp->field = val; \ + return count; \ +} + +#define spi_transport_show_function(field, format_string) \ + \ +static ssize_t \ +show_spi_transport_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_target *starget = transport_class_to_starget(dev); \ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ + struct spi_transport_attrs *tp; \ + struct spi_internal *i = to_spi_internal(shost->transportt); \ + tp = (struct spi_transport_attrs *)&starget->starget_data; \ + if (i->f->get_##field) \ + i->f->get_##field(starget); \ + return snprintf(buf, 20, format_string, tp->field); \ +} + +#define spi_transport_store_function(field, format_string) \ +static ssize_t \ +store_spi_transport_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + struct scsi_target *starget = transport_class_to_starget(dev); \ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ + struct spi_internal *i = to_spi_internal(shost->transportt); \ + \ + if (!i->f->set_##field) \ + return -EINVAL; \ + val = simple_strtoul(buf, NULL, 0); \ + i->f->set_##field(starget, val); \ + return count; \ +} + +#define spi_transport_store_max(field, format_string) \ +static ssize_t \ +store_spi_transport_##field(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + struct scsi_target *starget = transport_class_to_starget(dev); \ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ + struct spi_internal *i = to_spi_internal(shost->transportt); \ + struct spi_transport_attrs *tp \ + = (struct spi_transport_attrs *)&starget->starget_data; \ + \ + if (!i->f->set_##field) \ + return -EINVAL; \ + val = simple_strtoul(buf, NULL, 0); \ + if (val > tp->max_##field) \ + val = tp->max_##field; \ + i->f->set_##field(starget, val); \ + return count; \ +} + +#define spi_transport_rd_attr(field, format_string) \ + spi_transport_show_function(field, format_string) \ + spi_transport_store_function(field, format_string) \ +static DEVICE_ATTR(field, S_IRUGO, \ + show_spi_transport_##field, \ + store_spi_transport_##field); + +#define spi_transport_simple_attr(field, format_string) \ + spi_transport_show_simple(field, format_string) \ + spi_transport_store_simple(field, format_string) \ +static DEVICE_ATTR(field, S_IRUGO, \ + show_spi_transport_##field, \ + store_spi_transport_##field); + +#define spi_transport_max_attr(field, format_string) \ + spi_transport_show_function(field, format_string) \ + spi_transport_store_max(field, format_string) \ + spi_transport_simple_attr(max_##field, format_string) \ +static DEVICE_ATTR(field, S_IRUGO, \ + show_spi_transport_##field, \ + store_spi_transport_##field); + +/* The Parallel SCSI Tranport Attributes: */ +spi_transport_max_attr(offset, "%d\n"); +spi_transport_max_attr(width, "%d\n"); +spi_transport_max_attr(iu, "%d\n"); +spi_transport_rd_attr(dt, "%d\n"); +spi_transport_max_attr(qas, "%d\n"); +spi_transport_rd_attr(wr_flow, "%d\n"); +spi_transport_rd_attr(rd_strm, "%d\n"); +spi_transport_rd_attr(rti, "%d\n"); +spi_transport_rd_attr(pcomp_en, "%d\n"); +spi_transport_rd_attr(hold_mcs, "%d\n"); + +/* we only care about the first child device that's a real SCSI device + * so we return 1 to terminate the iteration when we find it */ +static int child_iter(struct device *dev, void *data) +{ + if (!scsi_is_sdev_device(dev)) + return 0; + + spi_dv_device(to_scsi_device(dev)); + return 1; +} + +static ssize_t +store_spi_revalidate(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_target *starget = transport_class_to_starget(dev); + + device_for_each_child(&starget->dev, NULL, child_iter); + return count; +} +static DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); + +/* Translate the period into ns according to the current spec + * for SDTR/PPR messages */ +static int period_to_str(char *buf, int period) +{ + int len, picosec; + + if (period < 0 || period > 0xff) { + picosec = -1; + } else if (period <= SPI_STATIC_PPR) { + picosec = ppr_to_ps[period]; + } else { + picosec = period * 4000; + } + + if (picosec == -1) { + len = sprintf(buf, "reserved"); + } else { + len = sprint_frac(buf, picosec, 1000); + } + + return len; +} + +static ssize_t +show_spi_transport_period_helper(char *buf, int period) +{ + int len = period_to_str(buf, period); + buf[len++] = '\n'; + buf[len] = '\0'; + return len; +} + +static ssize_t +store_spi_transport_period_helper(struct device *dev, const char *buf, + size_t count, int *periodp) +{ + int j, picosec, period = -1; + char *endp; + + picosec = simple_strtoul(buf, &endp, 10) * 1000; + if (*endp == '.') { + int mult = 100; + do { + endp++; + if (!isdigit(*endp)) + break; + picosec += (*endp - '0') * mult; + mult /= 10; + } while (mult > 0); + } + + for (j = 0; j <= SPI_STATIC_PPR; j++) { + if (ppr_to_ps[j] < picosec) + continue; + period = j; + break; + } + + if (period == -1) + period = picosec / 4000; + + if (period > 0xff) + period = 0xff; + + *periodp = period; + + return count; +} + +static ssize_t +show_spi_transport_period(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_target *starget = transport_class_to_starget(dev); + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct spi_internal *i = to_spi_internal(shost->transportt); + struct spi_transport_attrs *tp = + (struct spi_transport_attrs *)&starget->starget_data; + + if (i->f->get_period) + i->f->get_period(starget); + + return show_spi_transport_period_helper(buf, tp->period); +} + +static ssize_t +store_spi_transport_period(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_target *starget = transport_class_to_starget(cdev); + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct spi_internal *i = to_spi_internal(shost->transportt); + struct spi_transport_attrs *tp = + (struct spi_transport_attrs *)&starget->starget_data; + int period, retval; + + if (!i->f->set_period) + return -EINVAL; + + retval = store_spi_transport_period_helper(cdev, buf, count, &period); + + if (period < tp->min_period) + period = tp->min_period; + + i->f->set_period(starget, period); + + return retval; +} + +static DEVICE_ATTR(period, S_IRUGO, + show_spi_transport_period, + store_spi_transport_period); + +static ssize_t +show_spi_transport_min_period(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct scsi_target *starget = transport_class_to_starget(cdev); + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct spi_internal *i = to_spi_internal(shost->transportt); + struct spi_transport_attrs *tp = + (struct spi_transport_attrs *)&starget->starget_data; + + if (!i->f->set_period) + return -EINVAL; + + return show_spi_transport_period_helper(buf, tp->min_period); +} + +static ssize_t +store_spi_transport_min_period(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_target *starget = transport_class_to_starget(cdev); + struct spi_transport_attrs *tp = + (struct spi_transport_attrs *)&starget->starget_data; + + return store_spi_transport_period_helper(cdev, buf, count, + &tp->min_period); +} + + +static DEVICE_ATTR(min_period, S_IRUGO, + show_spi_transport_min_period, + store_spi_transport_min_period); + + +static ssize_t show_spi_host_signalling(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(cdev); + struct spi_internal *i = to_spi_internal(shost->transportt); + + if (i->f->get_signalling) + i->f->get_signalling(shost); + + return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost))); +} +static ssize_t store_spi_host_signalling(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct spi_internal *i = to_spi_internal(shost->transportt); + enum spi_signal_type type = spi_signal_to_value(buf); + + if (!i->f->set_signalling) + return -EINVAL; + + if (type != SPI_SIGNAL_UNKNOWN) + i->f->set_signalling(shost, type); + + return count; +} +static DEVICE_ATTR(signalling, S_IRUGO, + show_spi_host_signalling, + store_spi_host_signalling); + +static ssize_t show_spi_host_width(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(cdev); + + return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow"); +} +static DEVICE_ATTR(host_width, S_IRUGO, + show_spi_host_width, NULL); + +static ssize_t show_spi_host_hba_id(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(cdev); + + return sprintf(buf, "%d\n", shost->this_id); +} +static DEVICE_ATTR(hba_id, S_IRUGO, + show_spi_host_hba_id, NULL); + +#define DV_SET(x, y) \ + if(i->f->set_##x) \ + i->f->set_##x(sdev->sdev_target, y) + +enum spi_compare_returns { + SPI_COMPARE_SUCCESS, + SPI_COMPARE_FAILURE, + SPI_COMPARE_SKIP_TEST, +}; + + +/* This is for read/write Domain Validation: If the device supports + * an echo buffer, we do read/write tests to it */ +static enum spi_compare_returns +spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer, + u8 *ptr, const int retries) +{ + int len = ptr - buffer; + int j, k, r, result; + unsigned int pattern = 0x0000ffff; + struct scsi_sense_hdr sshdr; + + const char spi_write_buffer[] = { + WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 + }; + const char spi_read_buffer[] = { + READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 + }; + + /* set up the pattern buffer. Doesn't matter if we spill + * slightly beyond since that's where the read buffer is */ + for (j = 0; j < len; ) { + + /* fill the buffer with counting (test a) */ + for ( ; j < min(len, 32); j++) + buffer[j] = j; + k = j; + /* fill the buffer with alternating words of 0x0 and + * 0xffff (test b) */ + for ( ; j < min(len, k + 32); j += 2) { + u16 *word = (u16 *)&buffer[j]; + + *word = (j & 0x02) ? 0x0000 : 0xffff; + } + k = j; + /* fill with crosstalk (alternating 0x5555 0xaaa) + * (test c) */ + for ( ; j < min(len, k + 32); j += 2) { + u16 *word = (u16 *)&buffer[j]; + + *word = (j & 0x02) ? 0x5555 : 0xaaaa; + } + k = j; + /* fill with shifting bits (test d) */ + for ( ; j < min(len, k + 32); j += 4) { + u32 *word = (unsigned int *)&buffer[j]; + u32 roll = (pattern & 0x80000000) ? 1 : 0; + + *word = pattern; + pattern = (pattern << 1) | roll; + } + /* don't bother with random data (test e) */ + } + + for (r = 0; r < retries; r++) { + result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT, + buffer, len, &sshdr); + if(result || !scsi_device_online(sdev)) { + + scsi_device_set_state(sdev, SDEV_QUIESCE); + if (scsi_sense_valid(&sshdr) + && sshdr.sense_key == ILLEGAL_REQUEST + /* INVALID FIELD IN CDB */ + && sshdr.asc == 0x24 && sshdr.ascq == 0x00) + /* This would mean that the drive lied + * to us about supporting an echo + * buffer (unfortunately some Western + * Digital drives do precisely this) + */ + return SPI_COMPARE_SKIP_TEST; + + + sdev_printk(KERN_ERR, sdev, "Write Buffer failure %x\n", result); + return SPI_COMPARE_FAILURE; + } + + memset(ptr, 0, len); + spi_execute(sdev, spi_read_buffer, REQ_OP_DRV_IN, + ptr, len, NULL); + scsi_device_set_state(sdev, SDEV_QUIESCE); + + if (memcmp(buffer, ptr, len) != 0) + return SPI_COMPARE_FAILURE; + } + return SPI_COMPARE_SUCCESS; +} + +/* This is for the simplest form of Domain Validation: a read test + * on the inquiry data from the device */ +static enum spi_compare_returns +spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, + u8 *ptr, const int retries) +{ + int r, result; + const int len = sdev->inquiry_len; + const char spi_inquiry[] = { + INQUIRY, 0, 0, 0, len, 0 + }; + + for (r = 0; r < retries; r++) { + memset(ptr, 0, len); + + result = spi_execute(sdev, spi_inquiry, REQ_OP_DRV_IN, + ptr, len, NULL); + + if(result || !scsi_device_online(sdev)) { + scsi_device_set_state(sdev, SDEV_QUIESCE); + return SPI_COMPARE_FAILURE; + } + + /* If we don't have the inquiry data already, the + * first read gets it */ + if (ptr == buffer) { + ptr += len; + --r; + continue; + } + + if (memcmp(buffer, ptr, len) != 0) + /* failure */ + return SPI_COMPARE_FAILURE; + } + return SPI_COMPARE_SUCCESS; +} + +static enum spi_compare_returns +spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, + enum spi_compare_returns + (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)) +{ + struct spi_internal *i = to_spi_internal(sdev->host->transportt); + struct scsi_target *starget = sdev->sdev_target; + int period = 0, prevperiod = 0; + enum spi_compare_returns retval; + + + for (;;) { + int newperiod; + retval = compare_fn(sdev, buffer, ptr, DV_LOOPS); + + if (retval == SPI_COMPARE_SUCCESS + || retval == SPI_COMPARE_SKIP_TEST) + break; + + /* OK, retrain, fallback */ + if (i->f->get_iu) + i->f->get_iu(starget); + if (i->f->get_qas) + i->f->get_qas(starget); + if (i->f->get_period) + i->f->get_period(sdev->sdev_target); + + /* Here's the fallback sequence; first try turning off + * IU, then QAS (if we can control them), then finally + * fall down the periods */ + if (i->f->set_iu && spi_iu(starget)) { + starget_printk(KERN_ERR, starget, "Domain Validation Disabling Information Units\n"); + DV_SET(iu, 0); + } else if (i->f->set_qas && spi_qas(starget)) { + starget_printk(KERN_ERR, starget, "Domain Validation Disabling Quick Arbitration and Selection\n"); + DV_SET(qas, 0); + } else { + newperiod = spi_period(starget); + period = newperiod > period ? newperiod : period; + if (period < 0x0d) + period++; + else + period += period >> 1; + + if (unlikely(period > 0xff || period == prevperiod)) { + /* Total failure; set to async and return */ + starget_printk(KERN_ERR, starget, "Domain Validation Failure, dropping back to Asynchronous\n"); + DV_SET(offset, 0); + return SPI_COMPARE_FAILURE; + } + starget_printk(KERN_ERR, starget, "Domain Validation detected failure, dropping back\n"); + DV_SET(period, period); + prevperiod = period; + } + } + return retval; +} + +static int +spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer) +{ + int l, result; + + /* first off do a test unit ready. This can error out + * because of reservations or some other reason. If it + * fails, the device won't let us write to the echo buffer + * so just return failure */ + + static const char spi_test_unit_ready[] = { + TEST_UNIT_READY, 0, 0, 0, 0, 0 + }; + + static const char spi_read_buffer_descriptor[] = { + READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0 + }; + + + /* We send a set of three TURs to clear any outstanding + * unit attention conditions if they exist (Otherwise the + * buffer tests won't be happy). If the TUR still fails + * (reservation conflict, device not ready, etc) just + * skip the write tests */ + for (l = 0; ; l++) { + result = spi_execute(sdev, spi_test_unit_ready, REQ_OP_DRV_IN, + NULL, 0, NULL); + + if(result) { + if(l >= 3) + return 0; + } else { + /* TUR succeeded */ + break; + } + } + + result = spi_execute(sdev, spi_read_buffer_descriptor, + REQ_OP_DRV_IN, buffer, 4, NULL); + + if (result) + /* Device has no echo buffer */ + return 0; + + return buffer[3] + ((buffer[2] & 0x1f) << 8); +} + +static void +spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) +{ + struct spi_internal *i = to_spi_internal(sdev->host->transportt); + struct scsi_target *starget = sdev->sdev_target; + struct Scsi_Host *shost = sdev->host; + int len = sdev->inquiry_len; + int min_period = spi_min_period(starget); + int max_width = spi_max_width(starget); + /* first set us up for narrow async */ + DV_SET(offset, 0); + DV_SET(width, 0); + + if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) + != SPI_COMPARE_SUCCESS) { + starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); + /* FIXME: should probably offline the device here? */ + return; + } + + if (!spi_support_wide(starget)) { + spi_max_width(starget) = 0; + max_width = 0; + } + + /* test width */ + if (i->f->set_width && max_width) { + i->f->set_width(starget, 1); + + if (spi_dv_device_compare_inquiry(sdev, buffer, + buffer + len, + DV_LOOPS) + != SPI_COMPARE_SUCCESS) { + starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); + i->f->set_width(starget, 0); + /* Make sure we don't force wide back on by asking + * for a transfer period that requires it */ + max_width = 0; + if (min_period < 10) + min_period = 10; + } + } + + if (!i->f->set_period) + return; + + /* device can't handle synchronous */ + if (!spi_support_sync(starget) && !spi_support_dt(starget)) + return; + + /* len == -1 is the signal that we need to ascertain the + * presence of an echo buffer before trying to use it. len == + * 0 means we don't have an echo buffer */ + len = -1; + + retry: + + /* now set up to the maximum */ + DV_SET(offset, spi_max_offset(starget)); + DV_SET(period, min_period); + + /* try QAS requests; this should be harmless to set if the + * target supports it */ + if (spi_support_qas(starget) && spi_max_qas(starget)) { + DV_SET(qas, 1); + } else { + DV_SET(qas, 0); + } + + if (spi_support_ius(starget) && spi_max_iu(starget) && + min_period < 9) { + /* This u320 (or u640). Set IU transfers */ + DV_SET(iu, 1); + /* Then set the optional parameters */ + DV_SET(rd_strm, 1); + DV_SET(wr_flow, 1); + DV_SET(rti, 1); + if (min_period == 8) + DV_SET(pcomp_en, 1); + } else { + DV_SET(iu, 0); + } + + /* now that we've done all this, actually check the bus + * signal type (if known). Some devices are stupid on + * a SE bus and still claim they can try LVD only settings */ + if (i->f->get_signalling) + i->f->get_signalling(shost); + if (spi_signalling(shost) == SPI_SIGNAL_SE || + spi_signalling(shost) == SPI_SIGNAL_HVD || + !spi_support_dt(starget)) { + DV_SET(dt, 0); + } else { + DV_SET(dt, 1); + } + /* set width last because it will pull all the other + * parameters down to required values */ + DV_SET(width, max_width); + + /* Do the read only INQUIRY tests */ + spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, + spi_dv_device_compare_inquiry); + /* See if we actually managed to negotiate and sustain DT */ + if (i->f->get_dt) + i->f->get_dt(starget); + + /* see if the device has an echo buffer. If it does we can do + * the SPI pattern write tests. Because of some broken + * devices, we *only* try this on a device that has actually + * negotiated DT */ + + if (len == -1 && spi_dt(starget)) + len = spi_dv_device_get_echo_buffer(sdev, buffer); + + if (len <= 0) { + starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n"); + return; + } + + if (len > SPI_MAX_ECHO_BUFFER_SIZE) { + starget_printk(KERN_WARNING, starget, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); + len = SPI_MAX_ECHO_BUFFER_SIZE; + } + + if (spi_dv_retrain(sdev, buffer, buffer + len, + spi_dv_device_echo_buffer) + == SPI_COMPARE_SKIP_TEST) { + /* OK, the stupid drive can't do a write echo buffer + * test after all, fall back to the read tests */ + len = 0; + goto retry; + } +} + + +/** spi_dv_device - Do Domain Validation on the device + * @sdev: scsi device to validate + * + * Performs the domain validation on the given device in the + * current execution thread. Since DV operations may sleep, + * the current thread must have user context. Also no SCSI + * related locks that would deadlock I/O issued by the DV may + * be held. + */ +void +spi_dv_device(struct scsi_device *sdev) +{ + struct scsi_target *starget = sdev->sdev_target; + const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; + unsigned int sleep_flags; + u8 *buffer; + + /* + * Because this function and the power management code both call + * scsi_device_quiesce(), it is not safe to perform domain validation + * while suspend or resume is in progress. Hence the + * lock/unlock_system_sleep() calls. + */ + sleep_flags = lock_system_sleep(); + + if (scsi_autopm_get_device(sdev)) + goto unlock_system_sleep; + + if (unlikely(spi_dv_in_progress(starget))) + goto put_autopm; + + if (unlikely(scsi_device_get(sdev))) + goto put_autopm; + + spi_dv_in_progress(starget) = 1; + + buffer = kzalloc(len, GFP_KERNEL); + + if (unlikely(!buffer)) + goto put_sdev; + + /* We need to verify that the actual device will quiesce; the + * later target quiesce is just a nice to have */ + if (unlikely(scsi_device_quiesce(sdev))) + goto free_buffer; + + scsi_target_quiesce(starget); + + spi_dv_pending(starget) = 1; + mutex_lock(&spi_dv_mutex(starget)); + + starget_printk(KERN_INFO, starget, "Beginning Domain Validation\n"); + + spi_dv_device_internal(sdev, buffer); + + starget_printk(KERN_INFO, starget, "Ending Domain Validation\n"); + + mutex_unlock(&spi_dv_mutex(starget)); + spi_dv_pending(starget) = 0; + + scsi_target_resume(starget); + + spi_initial_dv(starget) = 1; + +free_buffer: + kfree(buffer); + +put_sdev: + spi_dv_in_progress(starget) = 0; + scsi_device_put(sdev); +put_autopm: + scsi_autopm_put_device(sdev); + +unlock_system_sleep: + unlock_system_sleep(sleep_flags); +} +EXPORT_SYMBOL(spi_dv_device); + +struct work_queue_wrapper { + struct work_struct work; + struct scsi_device *sdev; +}; + +static void +spi_dv_device_work_wrapper(struct work_struct *work) +{ + struct work_queue_wrapper *wqw = + container_of(work, struct work_queue_wrapper, work); + struct scsi_device *sdev = wqw->sdev; + + kfree(wqw); + spi_dv_device(sdev); + spi_dv_pending(sdev->sdev_target) = 0; + scsi_device_put(sdev); +} + + +/** + * spi_schedule_dv_device - schedule domain validation to occur on the device + * @sdev: The device to validate + * + * Identical to spi_dv_device() above, except that the DV will be + * scheduled to occur in a workqueue later. All memory allocations + * are atomic, so may be called from any context including those holding + * SCSI locks. + */ +void +spi_schedule_dv_device(struct scsi_device *sdev) +{ + struct work_queue_wrapper *wqw = + kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); + + if (unlikely(!wqw)) + return; + + if (unlikely(spi_dv_pending(sdev->sdev_target))) { + kfree(wqw); + return; + } + /* Set pending early (dv_device doesn't check it, only sets it) */ + spi_dv_pending(sdev->sdev_target) = 1; + if (unlikely(scsi_device_get(sdev))) { + kfree(wqw); + spi_dv_pending(sdev->sdev_target) = 0; + return; + } + + INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); + wqw->sdev = sdev; + + schedule_work(&wqw->work); +} +EXPORT_SYMBOL(spi_schedule_dv_device); + +/** + * spi_display_xfer_agreement - Print the current target transfer agreement + * @starget: The target for which to display the agreement + * + * Each SPI port is required to maintain a transfer agreement for each + * other port on the bus. This function prints a one-line summary of + * the current agreement; more detailed information is available in sysfs. + */ +void spi_display_xfer_agreement(struct scsi_target *starget) +{ + struct spi_transport_attrs *tp; + tp = (struct spi_transport_attrs *)&starget->starget_data; + + if (tp->offset > 0 && tp->period > 0) { + unsigned int picosec, kb100; + char *scsi = "FAST-?"; + char tmp[8]; + + if (tp->period <= SPI_STATIC_PPR) { + picosec = ppr_to_ps[tp->period]; + switch (tp->period) { + case 7: scsi = "FAST-320"; break; + case 8: scsi = "FAST-160"; break; + case 9: scsi = "FAST-80"; break; + case 10: + case 11: scsi = "FAST-40"; break; + case 12: scsi = "FAST-20"; break; + } + } else { + picosec = tp->period * 4000; + if (tp->period < 25) + scsi = "FAST-20"; + else if (tp->period < 50) + scsi = "FAST-10"; + else + scsi = "FAST-5"; + } + + kb100 = (10000000 + picosec / 2) / picosec; + if (tp->width) + kb100 *= 2; + sprint_frac(tmp, picosec, 1000); + + dev_info(&starget->dev, + "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n", + scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, + tp->dt ? "DT" : "ST", + tp->iu ? " IU" : "", + tp->qas ? " QAS" : "", + tp->rd_strm ? " RDSTRM" : "", + tp->rti ? " RTI" : "", + tp->wr_flow ? " WRFLOW" : "", + tp->pcomp_en ? " PCOMP" : "", + tp->hold_mcs ? " HMCS" : "", + tmp, tp->offset); + } else { + dev_info(&starget->dev, "%sasynchronous\n", + tp->width ? "wide " : ""); + } +} +EXPORT_SYMBOL(spi_display_xfer_agreement); + +int spi_populate_width_msg(unsigned char *msg, int width) +{ + msg[0] = EXTENDED_MESSAGE; + msg[1] = 2; + msg[2] = EXTENDED_WDTR; + msg[3] = width; + return 4; +} +EXPORT_SYMBOL_GPL(spi_populate_width_msg); + +int spi_populate_sync_msg(unsigned char *msg, int period, int offset) +{ + msg[0] = EXTENDED_MESSAGE; + msg[1] = 3; + msg[2] = EXTENDED_SDTR; + msg[3] = period; + msg[4] = offset; + return 5; +} +EXPORT_SYMBOL_GPL(spi_populate_sync_msg); + +int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, + int width, int options) +{ + msg[0] = EXTENDED_MESSAGE; + msg[1] = 6; + msg[2] = EXTENDED_PPR; + msg[3] = period; + msg[4] = 0; + msg[5] = offset; + msg[6] = width; + msg[7] = options; + return 8; +} +EXPORT_SYMBOL_GPL(spi_populate_ppr_msg); + +/** + * spi_populate_tag_msg - place a tag message in a buffer + * @msg: pointer to the area to place the tag + * @cmd: pointer to the scsi command for the tag + * + * Notes: + * designed to create the correct type of tag message for the + * particular request. Returns the size of the tag message. + * May return 0 if TCQ is disabled for this device. + **/ +int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) +{ + if (cmd->flags & SCMD_TAGGED) { + *msg++ = SIMPLE_QUEUE_TAG; + *msg++ = scsi_cmd_to_rq(cmd)->tag; + return 2; + } + + return 0; +} +EXPORT_SYMBOL_GPL(spi_populate_tag_msg); + +#ifdef CONFIG_SCSI_CONSTANTS +static const char * const one_byte_msgs[] = { +/* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers", +/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error", +/* 0x06 */ "Abort Task Set", "Message Reject", "Nop", "Message Parity Error", +/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag", +/* 0x0c */ "Target Reset", "Abort Task", "Clear Task Set", +/* 0x0f */ "Initiate Recovery", "Release Recovery", +/* 0x11 */ "Terminate Process", "Continue Task", "Target Transfer Disable", +/* 0x14 */ NULL, NULL, "Clear ACA", "LUN Reset" +}; + +static const char * const two_byte_msgs[] = { +/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag", +/* 0x23 */ "Ignore Wide Residue", "ACA" +}; + +static const char * const extended_msgs[] = { +/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request", +/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request", +/* 0x04 */ "Parallel Protocol Request", "Modify Bidirectional Data Pointer" +}; + +static void print_nego(const unsigned char *msg, int per, int off, int width) +{ + if (per) { + char buf[20]; + period_to_str(buf, msg[per]); + printk("period = %s ns ", buf); + } + + if (off) + printk("offset = %d ", msg[off]); + if (width) + printk("width = %d ", 8 << msg[width]); +} + +static void print_ptr(const unsigned char *msg, int msb, const char *desc) +{ + int ptr = (msg[msb] << 24) | (msg[msb+1] << 16) | (msg[msb+2] << 8) | + msg[msb+3]; + printk("%s = %d ", desc, ptr); +} + +int spi_print_msg(const unsigned char *msg) +{ + int len = 1, i; + if (msg[0] == EXTENDED_MESSAGE) { + len = 2 + msg[1]; + if (len == 2) + len += 256; + if (msg[2] < ARRAY_SIZE(extended_msgs)) + printk ("%s ", extended_msgs[msg[2]]); + else + printk ("Extended Message, reserved code (0x%02x) ", + (int) msg[2]); + switch (msg[2]) { + case EXTENDED_MODIFY_DATA_POINTER: + print_ptr(msg, 3, "pointer"); + break; + case EXTENDED_SDTR: + print_nego(msg, 3, 4, 0); + break; + case EXTENDED_WDTR: + print_nego(msg, 0, 0, 3); + break; + case EXTENDED_PPR: + print_nego(msg, 3, 5, 6); + break; + case EXTENDED_MODIFY_BIDI_DATA_PTR: + print_ptr(msg, 3, "out"); + print_ptr(msg, 7, "in"); + break; + default: + for (i = 2; i < len; ++i) + printk("%02x ", msg[i]); + } + /* Identify */ + } else if (msg[0] & 0x80) { + printk("Identify disconnect %sallowed %s %d ", + (msg[0] & 0x40) ? "" : "not ", + (msg[0] & 0x20) ? "target routine" : "lun", + msg[0] & 0x7); + /* Normal One byte */ + } else if (msg[0] < 0x1f) { + if (msg[0] < ARRAY_SIZE(one_byte_msgs) && one_byte_msgs[msg[0]]) + printk("%s ", one_byte_msgs[msg[0]]); + else + printk("reserved (%02x) ", msg[0]); + } else if (msg[0] == 0x55) { + printk("QAS Request "); + /* Two byte */ + } else if (msg[0] <= 0x2f) { + if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs)) + printk("%s %02x ", two_byte_msgs[msg[0] - 0x20], + msg[1]); + else + printk("reserved two byte (%02x %02x) ", + msg[0], msg[1]); + len = 2; + } else + printk("reserved "); + return len; +} +EXPORT_SYMBOL(spi_print_msg); + +#else /* ifndef CONFIG_SCSI_CONSTANTS */ + +int spi_print_msg(const unsigned char *msg) +{ + int len = 1, i; + + if (msg[0] == EXTENDED_MESSAGE) { + len = 2 + msg[1]; + if (len == 2) + len += 256; + for (i = 0; i < len; ++i) + printk("%02x ", msg[i]); + /* Identify */ + } else if (msg[0] & 0x80) { + printk("%02x ", msg[0]); + /* Normal One byte */ + } else if ((msg[0] < 0x1f) || (msg[0] == 0x55)) { + printk("%02x ", msg[0]); + /* Two byte */ + } else if (msg[0] <= 0x2f) { + printk("%02x %02x", msg[0], msg[1]); + len = 2; + } else + printk("%02x ", msg[0]); + return len; +} +EXPORT_SYMBOL(spi_print_msg); +#endif /* ! CONFIG_SCSI_CONSTANTS */ + +static int spi_device_match(struct attribute_container *cont, + struct device *dev) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost; + struct spi_internal *i; + + if (!scsi_is_sdev_device(dev)) + return 0; + + sdev = to_scsi_device(dev); + shost = sdev->host; + if (!shost->transportt || shost->transportt->host_attrs.ac.class + != &spi_host_class.class) + return 0; + /* Note: this class has no device attributes, so it has + * no per-HBA allocation and thus we don't need to distinguish + * the attribute containers for the device */ + i = to_spi_internal(shost->transportt); + if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) + return 0; + return 1; +} + +static int spi_target_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct scsi_target *starget; + struct spi_internal *i; + + if (!scsi_is_target_device(dev)) + return 0; + + shost = dev_to_shost(dev->parent); + if (!shost->transportt || shost->transportt->host_attrs.ac.class + != &spi_host_class.class) + return 0; + + i = to_spi_internal(shost->transportt); + starget = to_scsi_target(dev); + + if (i->f->deny_binding && i->f->deny_binding(starget)) + return 0; + + return &i->t.target_attrs.ac == cont; +} + +static DECLARE_TRANSPORT_CLASS(spi_transport_class, + "spi_transport", + spi_setup_transport_attrs, + NULL, + spi_target_configure); + +static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, + spi_device_match, + spi_device_configure); + +static struct attribute *host_attributes[] = { + &dev_attr_signalling.attr, + &dev_attr_host_width.attr, + &dev_attr_hba_id.attr, + NULL +}; + +static struct attribute_group host_attribute_group = { + .attrs = host_attributes, +}; + +static int spi_host_configure(struct transport_container *tc, + struct device *dev, + struct device *cdev) +{ + struct kobject *kobj = &cdev->kobj; + struct Scsi_Host *shost = transport_class_to_shost(cdev); + struct spi_internal *si = to_spi_internal(shost->transportt); + struct attribute *attr = &dev_attr_signalling.attr; + int rc = 0; + + if (si->f->set_signalling) + rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR); + + return rc; +} + +/* returns true if we should be showing the variable. Also + * overloads the return by setting 1<<1 if the attribute should + * be writeable */ +#define TARGET_ATTRIBUTE_HELPER(name) \ + (si->f->show_##name ? S_IRUGO : 0) | \ + (si->f->set_##name ? S_IWUSR : 0) + +static umode_t target_attribute_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *cdev = container_of(kobj, struct device, kobj); + struct scsi_target *starget = transport_class_to_starget(cdev); + struct Scsi_Host *shost = transport_class_to_shost(cdev); + struct spi_internal *si = to_spi_internal(shost->transportt); + + if (attr == &dev_attr_period.attr && + spi_support_sync(starget)) + return TARGET_ATTRIBUTE_HELPER(period); + else if (attr == &dev_attr_min_period.attr && + spi_support_sync(starget)) + return TARGET_ATTRIBUTE_HELPER(period); + else if (attr == &dev_attr_offset.attr && + spi_support_sync(starget)) + return TARGET_ATTRIBUTE_HELPER(offset); + else if (attr == &dev_attr_max_offset.attr && + spi_support_sync(starget)) + return TARGET_ATTRIBUTE_HELPER(offset); + else if (attr == &dev_attr_width.attr && + spi_support_wide(starget)) + return TARGET_ATTRIBUTE_HELPER(width); + else if (attr == &dev_attr_max_width.attr && + spi_support_wide(starget)) + return TARGET_ATTRIBUTE_HELPER(width); + else if (attr == &dev_attr_iu.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(iu); + else if (attr == &dev_attr_max_iu.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(iu); + else if (attr == &dev_attr_dt.attr && + spi_support_dt(starget)) + return TARGET_ATTRIBUTE_HELPER(dt); + else if (attr == &dev_attr_qas.attr && + spi_support_qas(starget)) + return TARGET_ATTRIBUTE_HELPER(qas); + else if (attr == &dev_attr_max_qas.attr && + spi_support_qas(starget)) + return TARGET_ATTRIBUTE_HELPER(qas); + else if (attr == &dev_attr_wr_flow.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(wr_flow); + else if (attr == &dev_attr_rd_strm.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(rd_strm); + else if (attr == &dev_attr_rti.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(rti); + else if (attr == &dev_attr_pcomp_en.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(pcomp_en); + else if (attr == &dev_attr_hold_mcs.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(hold_mcs); + else if (attr == &dev_attr_revalidate.attr) + return S_IWUSR; + + return 0; +} + +static struct attribute *target_attributes[] = { + &dev_attr_period.attr, + &dev_attr_min_period.attr, + &dev_attr_offset.attr, + &dev_attr_max_offset.attr, + &dev_attr_width.attr, + &dev_attr_max_width.attr, + &dev_attr_iu.attr, + &dev_attr_max_iu.attr, + &dev_attr_dt.attr, + &dev_attr_qas.attr, + &dev_attr_max_qas.attr, + &dev_attr_wr_flow.attr, + &dev_attr_rd_strm.attr, + &dev_attr_rti.attr, + &dev_attr_pcomp_en.attr, + &dev_attr_hold_mcs.attr, + &dev_attr_revalidate.attr, + NULL +}; + +static struct attribute_group target_attribute_group = { + .attrs = target_attributes, + .is_visible = target_attribute_is_visible, +}; + +static int spi_target_configure(struct transport_container *tc, + struct device *dev, + struct device *cdev) +{ + struct kobject *kobj = &cdev->kobj; + + /* force an update based on parameters read from the device */ + sysfs_update_group(kobj, &target_attribute_group); + + return 0; +} + +struct scsi_transport_template * +spi_attach_transport(struct spi_function_template *ft) +{ + struct spi_internal *i = kzalloc(sizeof(struct spi_internal), + GFP_KERNEL); + + if (unlikely(!i)) + return NULL; + + i->t.target_attrs.ac.class = &spi_transport_class.class; + i->t.target_attrs.ac.grp = &target_attribute_group; + i->t.target_attrs.ac.match = spi_target_match; + transport_container_register(&i->t.target_attrs); + i->t.target_size = sizeof(struct spi_transport_attrs); + i->t.host_attrs.ac.class = &spi_host_class.class; + i->t.host_attrs.ac.grp = &host_attribute_group; + i->t.host_attrs.ac.match = spi_host_match; + transport_container_register(&i->t.host_attrs); + i->t.host_size = sizeof(struct spi_host_attrs); + i->f = ft; + + return &i->t; +} +EXPORT_SYMBOL(spi_attach_transport); + +void spi_release_transport(struct scsi_transport_template *t) +{ + struct spi_internal *i = to_spi_internal(t); + + transport_container_unregister(&i->t.target_attrs); + transport_container_unregister(&i->t.host_attrs); + + kfree(i); +} +EXPORT_SYMBOL(spi_release_transport); + +static __init int spi_transport_init(void) +{ + int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, + "SCSI Parallel Transport Class"); + if (!error) { + int i; + + for (i = 0; spi_static_device_list[i].vendor; i++) + scsi_dev_info_list_add_keyed(1, /* compatible */ + spi_static_device_list[i].vendor, + spi_static_device_list[i].model, + NULL, + spi_static_device_list[i].flags, + SCSI_DEVINFO_SPI); + } + + error = transport_class_register(&spi_transport_class); + if (error) + return error; + error = anon_transport_class_register(&spi_device_class); + return transport_class_register(&spi_host_class); +} + +static void __exit spi_transport_exit(void) +{ + transport_class_unregister(&spi_transport_class); + anon_transport_class_unregister(&spi_device_class); + transport_class_unregister(&spi_host_class); + scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); +} + +MODULE_AUTHOR("Martin Hicks"); +MODULE_DESCRIPTION("SPI Transport Attributes"); +MODULE_LICENSE("GPL"); + +module_init(spi_transport_init); +module_exit(spi_transport_exit); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c new file mode 100644 index 000000000..64f6b22e8 --- /dev/null +++ b/drivers/scsi/scsi_transport_srp.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SCSI RDMA (SRP) transport class + * + * Copyright (C) 2007 FUJITA Tomonori + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "scsi_priv.h" + +struct srp_host_attrs { + atomic_t next_port_id; +}; +#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) + +#define SRP_HOST_ATTRS 0 +#define SRP_RPORT_ATTRS 8 + +struct srp_internal { + struct scsi_transport_template t; + struct srp_function_template *f; + + struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1]; + + struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1]; + struct transport_container rport_attr_cont; +}; + +static int scsi_is_srp_rport(const struct device *dev); + +#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) + +#define dev_to_rport(d) container_of(d, struct srp_rport, dev) +#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) +static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) +{ + return dev_to_shost(r->dev.parent); +} + +static int find_child_rport(struct device *dev, void *data) +{ + struct device **child = data; + + if (scsi_is_srp_rport(dev)) { + WARN_ON_ONCE(*child); + *child = dev; + } + return 0; +} + +static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) +{ + struct device *child = NULL; + + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, + find_child_rport) < 0); + return child ? dev_to_rport(child) : NULL; +} + +/** + * srp_tmo_valid() - check timeout combination validity + * @reconnect_delay: Reconnect delay in seconds. + * @fast_io_fail_tmo: Fast I/O fail timeout in seconds. + * @dev_loss_tmo: Device loss timeout in seconds. + * + * The combination of the timeout parameters must be such that SCSI commands + * are finished in a reasonable time. Hence do not allow the fast I/O fail + * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to + * exceed that limit if failing I/O fast has been disabled. Furthermore, these + * parameters must be such that multipath can detect failed paths timely. + * Hence do not allow all three parameters to be disabled simultaneously. + */ +int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo) +{ + if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) + return -EINVAL; + if (reconnect_delay == 0) + return -EINVAL; + if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + if (fast_io_fail_tmo < 0 && + dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + if (dev_loss_tmo >= LONG_MAX / HZ) + return -EINVAL; + if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 && + fast_io_fail_tmo >= dev_loss_tmo) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(srp_tmo_valid); + +static int srp_host_setup(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); + + atomic_set(&srp_host->next_port_id, 0); + return 0; +} + +static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup, + NULL, NULL); + +static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports", + NULL, NULL, NULL); + +static ssize_t +show_srp_rport_id(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + return sprintf(buf, "%16phC\n", rport->port_id); +} + +static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL); + +static const struct { + u32 value; + char *name; +} srp_rport_role_names[] = { + {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"}, + {SRP_RPORT_ROLE_TARGET, "SRP Target"}, +}; + +static ssize_t +show_srp_rport_roles(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++) + if (srp_rport_role_names[i].value == rport->roles) { + name = srp_rport_role_names[i].name; + break; + } + return sprintf(buf, "%s\n", name ? : "unknown"); +} + +static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL); + +static ssize_t store_srp_rport_delete(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + struct Scsi_Host *shost = dev_to_shost(dev); + struct srp_internal *i = to_srp_internal(shost->transportt); + + if (i->f->rport_delete) { + i->f->rport_delete(rport); + return count; + } else { + return -ENOSYS; + } +} + +static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete); + +static ssize_t show_srp_rport_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + static const char *const state_name[] = { + [SRP_RPORT_RUNNING] = "running", + [SRP_RPORT_BLOCKED] = "blocked", + [SRP_RPORT_FAIL_FAST] = "fail-fast", + [SRP_RPORT_LOST] = "lost", + }; + struct srp_rport *rport = transport_class_to_srp_rport(dev); + enum srp_rport_state state = rport->state; + + return sprintf(buf, "%s\n", + (unsigned)state < ARRAY_SIZE(state_name) ? + state_name[state] : "???"); +} + +static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL); + +static ssize_t srp_show_tmo(char *buf, int tmo) +{ + return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); +} + +int srp_parse_tmo(int *tmo, const char *buf) +{ + int res = 0; + + if (strncmp(buf, "off", 3) != 0) + res = kstrtoint(buf, 0, tmo); + else + *tmo = -1; + + return res; +} +EXPORT_SYMBOL(srp_parse_tmo); + +static ssize_t show_reconnect_delay(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->reconnect_delay); +} + +static ssize_t store_reconnect_delay(struct device *dev, + struct device_attribute *attr, + const char *buf, const size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res, delay; + + res = srp_parse_tmo(&delay, buf); + if (res) + goto out; + res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + + if (rport->reconnect_delay <= 0 && delay > 0 && + rport->state != SRP_RPORT_RUNNING) { + queue_delayed_work(system_long_wq, &rport->reconnect_work, + delay * HZ); + } else if (delay <= 0) { + cancel_delayed_work(&rport->reconnect_work); + } + rport->reconnect_delay = delay; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay, + store_reconnect_delay); + +static ssize_t show_failed_reconnects(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return sprintf(buf, "%d\n", rport->failed_reconnects); +} + +static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL); + +static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->fast_io_fail_tmo); +} + +static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int fast_io_fail_tmo; + + res = srp_parse_tmo(&fast_io_fail_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + rport->fast_io_fail_tmo = fast_io_fail_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_fast_io_fail_tmo, + store_srp_rport_fast_io_fail_tmo); + +static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->dev_loss_tmo); +} + +static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int dev_loss_tmo; + + res = srp_parse_tmo(&dev_loss_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, + dev_loss_tmo); + if (res) + goto out; + rport->dev_loss_tmo = dev_loss_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_dev_loss_tmo, + store_srp_rport_dev_loss_tmo); + +static int srp_rport_set_state(struct srp_rport *rport, + enum srp_rport_state new_state) +{ + enum srp_rport_state old_state = rport->state; + + lockdep_assert_held(&rport->mutex); + + switch (new_state) { + case SRP_RPORT_RUNNING: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_BLOCKED: + switch (old_state) { + case SRP_RPORT_RUNNING: + break; + default: + goto invalid; + } + break; + case SRP_RPORT_FAIL_FAST: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_LOST: + break; + } + rport->state = new_state; + return 0; + +invalid: + return -EINVAL; +} + +/** + * srp_reconnect_work() - reconnect and schedule a new attempt if necessary + * @work: Work structure used for scheduling this operation. + */ +static void srp_reconnect_work(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, reconnect_work); + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, res; + + res = srp_reconnect_rport(rport); + if (res != 0) { + shost_printk(KERN_ERR, shost, + "reconnect attempt %d failed (%d)\n", + ++rport->failed_reconnects, res); + delay = rport->reconnect_delay * + min(100, max(1, rport->failed_reconnects - 10)); + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, delay * HZ); + } +} + +/* + * scsi_block_targets() must have been called before this function is + * called to guarantee that no .queuecommand() calls are in progress. + */ +static void __rport_fail_io_fast(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i; + + lockdep_assert_held(&rport->mutex); + + if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) + return; + + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + + /* Involve the LLD if possible to terminate all I/O on the rport. */ + i = to_srp_internal(shost->transportt); + if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); +} + +/** + * rport_fast_io_fail_timedout() - fast I/O failure timeout handler + * @work: Work structure used for scheduling this operation. + */ +static void rport_fast_io_fail_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, fast_io_fail_work); + struct Scsi_Host *shost = rport_to_shost(rport); + + pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + mutex_unlock(&rport->mutex); +} + +/** + * rport_dev_loss_timedout() - device loss timeout handler + * @work: Work structure used for scheduling this operation. + */ +static void rport_dev_loss_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, dev_loss_work); + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_info("dev_loss_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + mutex_unlock(&rport->mutex); + + i->f->rport_delete(rport); +} + +static void __srp_start_tl_fail_timers(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, fast_io_fail_tmo, dev_loss_tmo; + + lockdep_assert_held(&rport->mutex); + + delay = rport->reconnect_delay; + fast_io_fail_tmo = rport->fast_io_fail_tmo; + dev_loss_tmo = rport->dev_loss_tmo; + pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev), + rport->state); + + if (rport->state == SRP_RPORT_LOST) + return; + if (delay > 0) + queue_delayed_work(system_long_wq, &rport->reconnect_work, + 1UL * delay * HZ); + if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) && + srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { + pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), + rport->state); + scsi_block_targets(shost, &shost->shost_gendev); + if (fast_io_fail_tmo >= 0) + queue_delayed_work(system_long_wq, + &rport->fast_io_fail_work, + 1UL * fast_io_fail_tmo * HZ); + if (dev_loss_tmo >= 0) + queue_delayed_work(system_long_wq, + &rport->dev_loss_work, + 1UL * dev_loss_tmo * HZ); + } +} + +/** + * srp_start_tl_fail_timers() - start the transport layer failure timers + * @rport: SRP target port. + * + * Start the transport layer fast I/O failure and device loss timers. Do not + * modify a timer that was already started. + */ +void srp_start_tl_fail_timers(struct srp_rport *rport) +{ + mutex_lock(&rport->mutex); + __srp_start_tl_fail_timers(rport); + mutex_unlock(&rport->mutex); +} +EXPORT_SYMBOL(srp_start_tl_fail_timers); + +/** + * srp_reconnect_rport() - reconnect to an SRP target port + * @rport: SRP target port. + * + * Blocks SCSI command queueing before invoking reconnect() such that + * queuecommand() won't be invoked concurrently with reconnect() from outside + * the SCSI EH. This is important since a reconnect() implementation may + * reallocate resources needed by queuecommand(). + * + * Notes: + * - This function neither waits until outstanding requests have finished nor + * tries to abort these. It is the responsibility of the reconnect() + * function to finish outstanding commands before reconnecting to the target + * port. + * - It is the responsibility of the caller to ensure that the resources + * reallocated by the reconnect() function won't be used while this function + * is in progress. One possible strategy is to invoke this function from + * the context of the SCSI EH thread only. Another possible strategy is to + * lock the rport mutex inside each SCSI LLD callback that can be invoked by + * the SCSI EH (the scsi_host_template.eh_*() functions and also the + * scsi_host_template.queuecommand() function). + */ +int srp_reconnect_rport(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + struct scsi_device *sdev; + int res; + + pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev)); + + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; + if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) + /* + * sdev state must be SDEV_TRANSPORT_OFFLINE, transition + * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() + * later is ok though, scsi_internal_device_unblock_nowait() + * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. + */ + scsi_block_targets(shost, &shost->shost_gendev); + res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); + if (res == 0) { + cancel_delayed_work(&rport->fast_io_fail_work); + cancel_delayed_work(&rport->dev_loss_work); + + rport->failed_reconnects = 0; + srp_rport_set_state(rport, SRP_RPORT_RUNNING); + scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); + /* + * If the SCSI error handler has offlined one or more devices, + * invoking scsi_target_unblock() won't change the state of + * these devices into running so do that explicitly. + */ + shost_for_each_device(sdev, shost) { + mutex_lock(&sdev->state_mutex); + if (sdev->sdev_state == SDEV_OFFLINE) + sdev->sdev_state = SDEV_RUNNING; + mutex_unlock(&sdev->state_mutex); + } + } else if (rport->state == SRP_RPORT_RUNNING) { + /* + * srp_reconnect_rport() has been invoked with fast_io_fail + * and dev_loss off. Mark the port as failed and start the TL + * failure timers if these had not yet been started. + */ + __rport_fail_io_fast(rport); + __srp_start_tl_fail_timers(rport); + } else if (rport->state != SRP_RPORT_BLOCKED) { + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } + mutex_unlock(&rport->mutex); + +out: + return res; +} +EXPORT_SYMBOL(srp_reconnect_rport); + +/** + * srp_timed_out() - SRP transport intercept of the SCSI timeout EH + * @scmd: SCSI command. + * + * If a timeout occurs while an rport is in the blocked state, ask the SCSI + * EH to continue waiting (SCSI_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (SCSI_EH_NOT_HANDLED). + * + * Note: This function is called from soft-IRQ context and with the request + * queue lock held. + */ +enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + struct srp_internal *i = to_srp_internal(shost->transportt); + struct srp_rport *rport = shost_to_rport(shost); + + pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); + return rport && rport->fast_io_fail_tmo < 0 && + rport->dev_loss_tmo < 0 && + i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? + SCSI_EH_RESET_TIMER : SCSI_EH_NOT_HANDLED; +} +EXPORT_SYMBOL(srp_timed_out); + +static void srp_rport_release(struct device *dev) +{ + struct srp_rport *rport = dev_to_rport(dev); + + put_device(dev->parent); + kfree(rport); +} + +static int scsi_is_srp_rport(const struct device *dev) +{ + return dev->release == srp_rport_release; +} + +static int srp_rport_match(struct attribute_container *cont, + struct device *dev) +{ + struct Scsi_Host *shost; + struct srp_internal *i; + + if (!scsi_is_srp_rport(dev)) + return 0; + + shost = dev_to_shost(dev->parent); + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != &srp_host_class.class) + return 0; + + i = to_srp_internal(shost->transportt); + return &i->rport_attr_cont.ac == cont; +} + +static int srp_host_match(struct attribute_container *cont, struct device *dev) +{ + struct Scsi_Host *shost; + struct srp_internal *i; + + if (!scsi_is_host_device(dev)) + return 0; + + shost = dev_to_shost(dev); + if (!shost->transportt) + return 0; + if (shost->transportt->host_attrs.ac.class != &srp_host_class.class) + return 0; + + i = to_srp_internal(shost->transportt); + return &i->t.host_attrs.ac == cont; +} + +/** + * srp_rport_get() - increment rport reference count + * @rport: SRP target port. + */ +void srp_rport_get(struct srp_rport *rport) +{ + get_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_get); + +/** + * srp_rport_put() - decrement rport reference count + * @rport: SRP target port. + */ +void srp_rport_put(struct srp_rport *rport) +{ + put_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_put); + +/** + * srp_rport_add - add a SRP remote port to the device hierarchy + * @shost: scsi host the remote port is connected to. + * @ids: The port id for the remote port. + * + * Publishes a port to the rest of the system. + */ +struct srp_rport *srp_rport_add(struct Scsi_Host *shost, + struct srp_rport_identifiers *ids) +{ + struct srp_rport *rport; + struct device *parent = &shost->shost_gendev; + struct srp_internal *i = to_srp_internal(shost->transportt); + int id, ret; + + rport = kzalloc(sizeof(*rport), GFP_KERNEL); + if (!rport) + return ERR_PTR(-ENOMEM); + + mutex_init(&rport->mutex); + + device_initialize(&rport->dev); + + rport->dev.parent = get_device(parent); + rport->dev.release = srp_rport_release; + + memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); + rport->roles = ids->roles; + + if (i->f->reconnect) + rport->reconnect_delay = i->f->reconnect_delay ? + *i->f->reconnect_delay : 10; + INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); + rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? + *i->f->fast_io_fail_tmo : 15; + rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; + INIT_DELAYED_WORK(&rport->fast_io_fail_work, + rport_fast_io_fail_timedout); + INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); + + id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); + dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); + + transport_setup_device(&rport->dev); + + ret = device_add(&rport->dev); + if (ret) { + transport_destroy_device(&rport->dev); + put_device(&rport->dev); + return ERR_PTR(ret); + } + + transport_add_device(&rport->dev); + transport_configure_device(&rport->dev); + + return rport; +} +EXPORT_SYMBOL_GPL(srp_rport_add); + +/** + * srp_rport_del - remove a SRP remote port + * @rport: SRP remote port to remove + * + * Removes the specified SRP remote port. + */ +void srp_rport_del(struct srp_rport *rport) +{ + struct device *dev = &rport->dev; + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + + put_device(dev); +} +EXPORT_SYMBOL_GPL(srp_rport_del); + +static int do_srp_rport_del(struct device *dev, void *data) +{ + if (scsi_is_srp_rport(dev)) + srp_rport_del(dev_to_rport(dev)); + return 0; +} + +/** + * srp_remove_host - tear down a Scsi_Host's SRP data structures + * @shost: Scsi Host that is torn down + * + * Removes all SRP remote ports for a given Scsi_Host. + * Must be called just before scsi_remove_host for SRP HBAs. + */ +void srp_remove_host(struct Scsi_Host *shost) +{ + device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del); +} +EXPORT_SYMBOL_GPL(srp_remove_host); + +/** + * srp_stop_rport_timers - stop the transport layer recovery timers + * @rport: SRP remote port for which to stop the timers. + * + * Must be called after srp_remove_host() and scsi_remove_host(). The caller + * must hold a reference on the rport (rport->dev) and on the SCSI host + * (rport->dev.parent). + */ +void srp_stop_rport_timers(struct srp_rport *rport) +{ + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + srp_rport_set_state(rport, SRP_RPORT_LOST); + mutex_unlock(&rport->mutex); + + cancel_delayed_work_sync(&rport->reconnect_work); + cancel_delayed_work_sync(&rport->fast_io_fail_work); + cancel_delayed_work_sync(&rport->dev_loss_work); +} +EXPORT_SYMBOL_GPL(srp_stop_rport_timers); + +/** + * srp_attach_transport - instantiate SRP transport template + * @ft: SRP transport class function template + */ +struct scsi_transport_template * +srp_attach_transport(struct srp_function_template *ft) +{ + int count; + struct srp_internal *i; + + i = kzalloc(sizeof(*i), GFP_KERNEL); + if (!i) + return NULL; + + i->t.host_size = sizeof(struct srp_host_attrs); + i->t.host_attrs.ac.attrs = &i->host_attrs[0]; + i->t.host_attrs.ac.class = &srp_host_class.class; + i->t.host_attrs.ac.match = srp_host_match; + i->host_attrs[0] = NULL; + transport_container_register(&i->t.host_attrs); + + i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; + i->rport_attr_cont.ac.class = &srp_rport_class.class; + i->rport_attr_cont.ac.match = srp_rport_match; + + count = 0; + i->rport_attrs[count++] = &dev_attr_port_id; + i->rport_attrs[count++] = &dev_attr_roles; + if (ft->has_rport_state) { + i->rport_attrs[count++] = &dev_attr_state; + i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo; + i->rport_attrs[count++] = &dev_attr_dev_loss_tmo; + } + if (ft->reconnect) { + i->rport_attrs[count++] = &dev_attr_reconnect_delay; + i->rport_attrs[count++] = &dev_attr_failed_reconnects; + } + if (ft->rport_delete) + i->rport_attrs[count++] = &dev_attr_delete; + i->rport_attrs[count++] = NULL; + BUG_ON(count > ARRAY_SIZE(i->rport_attrs)); + + transport_container_register(&i->rport_attr_cont); + + i->f = ft; + + return &i->t; +} +EXPORT_SYMBOL_GPL(srp_attach_transport); + +/** + * srp_release_transport - release SRP transport template instance + * @t: transport template instance + */ +void srp_release_transport(struct scsi_transport_template *t) +{ + struct srp_internal *i = to_srp_internal(t); + + transport_container_unregister(&i->t.host_attrs); + transport_container_unregister(&i->rport_attr_cont); + + kfree(i); +} +EXPORT_SYMBOL_GPL(srp_release_transport); + +static __init int srp_transport_init(void) +{ + int ret; + + ret = transport_class_register(&srp_host_class); + if (ret) + return ret; + ret = transport_class_register(&srp_rport_class); + if (ret) + goto unregister_host_class; + + return 0; +unregister_host_class: + transport_class_unregister(&srp_host_class); + return ret; +} + +static void __exit srp_transport_exit(void) +{ + transport_class_unregister(&srp_host_class); + transport_class_unregister(&srp_rport_class); +} + +MODULE_AUTHOR("FUJITA Tomonori"); +MODULE_DESCRIPTION("SRP Transport Attributes"); +MODULE_LICENSE("GPL"); + +module_init(srp_transport_init); +module_exit(srp_transport_exit); diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c new file mode 100644 index 000000000..e2c7d8ef2 --- /dev/null +++ b/drivers/scsi/scsicam.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc. + * + * Copyright 1993, 1994 Drew Eckhardt + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * drew@Colorado.EDU + * +1 (303) 786-7975 + * + * For more information, please consult the SCSI-CAM draft. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/** + * scsi_bios_ptable - Read PC partition table out of first sector of device. + * @dev: from this device + * + * Description: Reads the first sector from the device and returns %0x42 bytes + * starting at offset %0x1be. + * Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error. + */ +unsigned char *scsi_bios_ptable(struct block_device *dev) +{ + struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping; + unsigned char *res = NULL; + struct folio *folio; + + folio = read_mapping_folio(mapping, 0, NULL); + if (IS_ERR(folio)) + return NULL; + + res = kmemdup(folio_address(folio) + 0x1be, 66, GFP_KERNEL); + folio_put(folio); + return res; +} +EXPORT_SYMBOL(scsi_bios_ptable); + +/** + * scsi_partsize - Parse cylinders/heads/sectors from PC partition table + * @bdev: block device to parse + * @capacity: size of the disk in sectors + * @geom: output in form of [hds, cylinders, sectors] + * + * Determine the BIOS mapping/geometry used to create the partition + * table, storing the results in @geom. + * + * Returns: %false on failure, %true on success. + */ +bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]) +{ + int cyl, ext_cyl, end_head, end_cyl, end_sector; + unsigned int logical_end, physical_end, ext_physical_end; + struct msdos_partition *p, *largest = NULL; + void *buf; + int ret = false; + + buf = scsi_bios_ptable(bdev); + if (!buf) + return false; + + if (*(unsigned short *) (buf + 64) == 0xAA55) { + int largest_cyl = -1, i; + + for (i = 0, p = buf; i < 4; i++, p++) { + if (!p->sys_ind) + continue; +#ifdef DEBUG + printk("scsicam_bios_param : partition %d has system \n", + i); +#endif + cyl = p->cyl + ((p->sector & 0xc0) << 2); + if (cyl > largest_cyl) { + largest_cyl = cyl; + largest = p; + } + } + } + if (largest) { + end_cyl = largest->end_cyl + ((largest->end_sector & 0xc0) << 2); + end_head = largest->end_head; + end_sector = largest->end_sector & 0x3f; + + if (end_head + 1 == 0 || end_sector == 0) + goto out_free_buf; + +#ifdef DEBUG + printk("scsicam_bios_param : end at h = %d, c = %d, s = %d\n", + end_head, end_cyl, end_sector); +#endif + + physical_end = end_cyl * (end_head + 1) * end_sector + + end_head * end_sector + end_sector; + + /* This is the actual _sector_ number at the end */ + logical_end = get_unaligned_le32(&largest->start_sect) + + get_unaligned_le32(&largest->nr_sects); + + /* This is for >1023 cylinders */ + ext_cyl = (logical_end - (end_head * end_sector + end_sector)) + / (end_head + 1) / end_sector; + ext_physical_end = ext_cyl * (end_head + 1) * end_sector + + end_head * end_sector + end_sector; + +#ifdef DEBUG + printk("scsicam_bios_param : logical_end=%d physical_end=%d ext_physical_end=%d ext_cyl=%d\n" + ,logical_end, physical_end, ext_physical_end, ext_cyl); +#endif + + if (logical_end == physical_end || + (end_cyl == 1023 && ext_physical_end == logical_end)) { + geom[0] = end_head + 1; + geom[1] = end_sector; + geom[2] = (unsigned long)capacity / + ((end_head + 1) * end_sector); + ret = true; + goto out_free_buf; + } +#ifdef DEBUG + printk("scsicam_bios_param : logical (%u) != physical (%u)\n", + logical_end, physical_end); +#endif + } + +out_free_buf: + kfree(buf); + return ret; +} +EXPORT_SYMBOL(scsi_partsize); + +/* + * Function : static int setsize(unsigned long capacity,unsigned int *cyls, + * unsigned int *hds, unsigned int *secs); + * + * Purpose : to determine a near-optimal int 0x13 mapping for a + * SCSI disk in terms of lost space of size capacity, storing + * the results in *cyls, *hds, and *secs. + * + * Returns : -1 on failure, 0 on success. + * + * Extracted from + * + * WORKING X3T9.2 + * DRAFT 792D + * see http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf + * + * Revision 6 + * 10-MAR-94 + * Information technology - + * SCSI-2 Common access method + * transport and SCSI interface module + * + * ANNEX A : + * + * setsize() converts a read capacity value to int 13h + * head-cylinder-sector requirements. It minimizes the value for + * number of heads and maximizes the number of cylinders. This + * will support rather large disks before the number of heads + * will not fit in 4 bits (or 6 bits). This algorithm also + * minimizes the number of sectors that will be unused at the end + * of the disk while allowing for very large disks to be + * accommodated. This algorithm does not use physical geometry. + */ + +static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds, + unsigned int *secs) +{ + unsigned int rv = 0; + unsigned long heads, sectors, cylinders, temp; + + cylinders = 1024L; /* Set number of cylinders to max */ + sectors = 62L; /* Maximize sectors per track */ + + temp = cylinders * sectors; /* Compute divisor for heads */ + heads = capacity / temp; /* Compute value for number of heads */ + if (capacity % temp) { /* If no remainder, done! */ + heads++; /* Else, increment number of heads */ + temp = cylinders * heads; /* Compute divisor for sectors */ + sectors = capacity / temp; /* Compute value for sectors per + track */ + if (capacity % temp) { /* If no remainder, done! */ + sectors++; /* Else, increment number of sectors */ + temp = heads * sectors; /* Compute divisor for cylinders */ + cylinders = capacity / temp; /* Compute number of cylinders */ + } + } + if (cylinders == 0) + rv = (unsigned) -1; /* Give error if 0 cylinders */ + + *cyls = (unsigned int) cylinders; /* Stuff return values */ + *secs = (unsigned int) sectors; + *hds = (unsigned int) heads; + return (rv); +} + +/** + * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors. + * @bdev: which device + * @capacity: size of the disk in sectors + * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders + * + * Description : determine the BIOS mapping/geometry used for a drive in a + * SCSI-CAM system, storing the results in ip as required + * by the HDIO_GETGEO ioctl(). + * + * Returns : -1 on failure, 0 on success. + */ +int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) +{ + u64 capacity64 = capacity; /* Suppress gcc warning */ + int ret = 0; + + /* try to infer mapping from partition table */ + if (scsi_partsize(bdev, capacity, ip)) + return 0; + + if (capacity64 < (1ULL << 32)) { + /* + * Pick some standard mapping with at most 1024 cylinders, and + * at most 62 sectors per track - this works up to 7905 MB. + */ + ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2, + (unsigned int *)ip + 0, (unsigned int *)ip + 1); + } + + /* + * If something went wrong, then apparently we have to return a geometry + * with more than 1024 cylinders. + */ + if (ret || ip[0] > 255 || ip[1] > 63) { + if ((capacity >> 11) > 65534) { + ip[0] = 255; + ip[1] = 63; + } else { + ip[0] = 64; + ip[1] = 32; + } + + if (capacity > 65535*63*255) + ip[2] = 65535; + else + ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); + } + + return 0; +} +EXPORT_SYMBOL(scsicam_bios_param); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c new file mode 100644 index 000000000..c2e8d9e27 --- /dev/null +++ b/drivers/scsi/sd.c @@ -0,0 +1,4106 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sd.c Copyright (C) 1992 Drew Eckhardt + * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale + * + * Linux scsi disk driver + * Initial versions: Drew Eckhardt + * Subsequent revisions: Eric Youngdale + * Modification history: + * - Drew Eckhardt original + * - Eric Youngdale add scatter-gather, multiple + * outstanding request, and other enhancements. + * Support loadable low-level scsi drivers. + * - Jirka Hanika support more scsi disks using + * eight major numbers. + * - Richard Gooch support devfs. + * - Torben Mathiasen Resource allocation fixes in + * sd_init and cleanups. + * - Alex Davis Fix problem where partition info + * not being read in sd_open. Fix problem where removable media + * could be ejected after sd_open. + * - Douglas Gilbert cleanup for lk 2.5.x + * - Badari Pulavarty , Matthew Wilcox + * , Kurt Garloff : + * Support 32k/1M disks. + * + * Logging policy (needs CONFIG_SCSI_LOGGING defined): + * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 + * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 + * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 + * - entering other commands: SCSI_LOG_HLQUEUE level 3 + * Note: when the logging level is set by the user, it must be greater + * than the level indicated above to trigger output. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sd.h" +#include "scsi_priv.h" +#include "scsi_logging.h" + +MODULE_AUTHOR("Eric Youngdale"); +MODULE_DESCRIPTION("SCSI disk (sd) driver"); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); +MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); +MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); +MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); +MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); + +#define SD_MINORS 16 + +static void sd_config_discard(struct scsi_disk *, unsigned int); +static void sd_config_write_same(struct scsi_disk *); +static int sd_revalidate_disk(struct gendisk *); +static void sd_unlock_native_capacity(struct gendisk *disk); +static void sd_shutdown(struct device *); +static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); +static void scsi_disk_release(struct device *cdev); + +static DEFINE_IDA(sd_index_ida); + +static mempool_t *sd_page_pool; +static struct lock_class_key sd_bio_compl_lkclass; + +static const char *sd_cache_types[] = { + "write through", "none", "write back", + "write back, no read (daft)" +}; + +static void sd_set_flush_flag(struct scsi_disk *sdkp) +{ + bool wc = false, fua = false; + + if (sdkp->WCE) { + wc = true; + if (sdkp->DPOFUA) + fua = true; + } + + blk_queue_write_cache(sdkp->disk->queue, wc, fua); +} + +static ssize_t +cache_type_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int ct, rcd, wce, sp; + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + char buffer[64]; + char *buffer_data; + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; + static const char temp[] = "temporary "; + int len; + + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) + /* no cache control on RBC devices; theoretically they + * can do it, but there's probably so many exceptions + * it's not worth the risk */ + return -EINVAL; + + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + + ct = sysfs_match_string(sd_cache_types, buf); + if (ct < 0) + return -EINVAL; + + rcd = ct & 0x01 ? 1 : 0; + wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + sd_set_flush_flag(sdkp); + return count; + } + + if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT, + sdkp->max_retries, &data, NULL)) + return -EINVAL; + len = min_t(size_t, sizeof(buffer), data.length - data.header_length - + data.block_descriptor_length); + buffer_data = buffer + data.header_length + + data.block_descriptor_length; + buffer_data[2] &= ~0x05; + buffer_data[2] |= wce << 2 | rcd; + sp = buffer_data[0] & 0x80 ? 1 : 0; + buffer_data[0] &= ~0x80; + + /* + * Ensure WP, DPOFUA, and RESERVED fields are cleared in + * received mode parameter buffer before doing MODE SELECT. + */ + data.device_specific = 0; + + if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT, + sdkp->max_retries, &data, &sshdr)) { + if (scsi_sense_valid(&sshdr)) + sd_print_sense_hdr(sdkp, &sshdr); + return -EINVAL; + } + sd_revalidate_disk(sdkp->disk); + return count; +} + +static ssize_t +manage_start_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", + sdp->manage_system_start_stop && + sdp->manage_runtime_start_stop && + sdp->manage_shutdown); +} +static DEVICE_ATTR_RO(manage_start_stop); + +static ssize_t +manage_system_start_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop); +} + +static ssize_t +manage_system_start_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + bool v; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_system_start_stop = v; + + return count; +} +static DEVICE_ATTR_RW(manage_system_start_stop); + +static ssize_t +manage_runtime_start_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop); +} + +static ssize_t +manage_runtime_start_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + bool v; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_runtime_start_stop = v; + + return count; +} +static DEVICE_ATTR_RW(manage_runtime_start_stop); + +static ssize_t manage_shutdown_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", sdp->manage_shutdown); +} + +static ssize_t manage_shutdown_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + bool v; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_shutdown = v; + + return count; +} +static DEVICE_ATTR_RW(manage_shutdown); + +static ssize_t +allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%u\n", sdkp->device->allow_restart); +} + +static ssize_t +allow_restart_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + bool v; + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) + return -EINVAL; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->allow_restart = v; + + return count; +} +static DEVICE_ATTR_RW(allow_restart); + +static ssize_t +cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + int ct = sdkp->RCD + 2*sdkp->WCE; + + return sprintf(buf, "%s\n", sd_cache_types[ct]); +} +static DEVICE_ATTR_RW(cache_type); + +static ssize_t +FUA_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%u\n", sdkp->DPOFUA); +} +static DEVICE_ATTR_RO(FUA); + +static ssize_t +protection_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%u\n", sdkp->protection_type); +} + +static ssize_t +protection_type_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + unsigned int val; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + err = kstrtouint(buf, 10, &val); + + if (err) + return err; + + if (val <= T10_PI_TYPE3_PROTECTION) + sdkp->protection_type = val; + + return count; +} +static DEVICE_ATTR_RW(protection_type); + +static ssize_t +protection_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + unsigned int dif, dix; + + dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); + dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); + + if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { + dif = 0; + dix = 1; + } + + if (!dif && !dix) + return sprintf(buf, "none\n"); + + return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); +} +static DEVICE_ATTR_RO(protection_mode); + +static ssize_t +app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%u\n", sdkp->ATO); +} +static DEVICE_ATTR_RO(app_tag_own); + +static ssize_t +thin_provisioning_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%u\n", sdkp->lbpme); +} +static DEVICE_ATTR_RO(thin_provisioning); + +/* sysfs_match_string() requires dense arrays */ +static const char *lbp_mode[] = { + [SD_LBP_FULL] = "full", + [SD_LBP_UNMAP] = "unmap", + [SD_LBP_WS16] = "writesame_16", + [SD_LBP_WS10] = "writesame_10", + [SD_LBP_ZERO] = "writesame_zero", + [SD_LBP_DISABLE] = "disabled", +}; + +static ssize_t +provisioning_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); +} + +static ssize_t +provisioning_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + int mode; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sd_is_zoned(sdkp)) { + sd_config_discard(sdkp, SD_LBP_DISABLE); + return count; + } + + if (sdp->type != TYPE_DISK) + return -EINVAL; + + mode = sysfs_match_string(lbp_mode, buf); + if (mode < 0) + return -EINVAL; + + sd_config_discard(sdkp, mode); + + return count; +} +static DEVICE_ATTR_RW(provisioning_mode); + +/* sysfs_match_string() requires dense arrays */ +static const char *zeroing_mode[] = { + [SD_ZERO_WRITE] = "write", + [SD_ZERO_WS] = "writesame", + [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", + [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", +}; + +static ssize_t +zeroing_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); +} + +static ssize_t +zeroing_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + int mode; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + mode = sysfs_match_string(zeroing_mode, buf); + if (mode < 0) + return -EINVAL; + + sdkp->zeroing_mode = mode; + + return count; +} +static DEVICE_ATTR_RW(zeroing_mode); + +static ssize_t +max_medium_access_timeouts_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); +} + +static ssize_t +max_medium_access_timeouts_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); + + return err ? err : count; +} +static DEVICE_ATTR_RW(max_medium_access_timeouts); + +static ssize_t +max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%u\n", sdkp->max_ws_blocks); +} + +static ssize_t +max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + unsigned long max; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) + return -EINVAL; + + err = kstrtoul(buf, 10, &max); + + if (err) + return err; + + if (max == 0) + sdp->no_write_same = 1; + else if (max <= SD_MAX_WS16_BLOCKS) { + sdp->no_write_same = 0; + sdkp->max_ws_blocks = max; + } + + sd_config_write_same(sdkp); + + return count; +} +static DEVICE_ATTR_RW(max_write_same_blocks); + +static ssize_t +zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + if (sdkp->device->type == TYPE_ZBC) + return sprintf(buf, "host-managed\n"); + if (sdkp->zoned == 1) + return sprintf(buf, "host-aware\n"); + if (sdkp->zoned == 2) + return sprintf(buf, "drive-managed\n"); + return sprintf(buf, "none\n"); +} +static DEVICE_ATTR_RO(zoned_cap); + +static ssize_t +max_retries_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdev = sdkp->device; + int retries, err; + + err = kstrtoint(buf, 10, &retries); + if (err) + return err; + + if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) { + sdkp->max_retries = retries; + return count; + } + + sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n", + SD_MAX_RETRIES); + return -EINVAL; +} + +static ssize_t +max_retries_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return sprintf(buf, "%d\n", sdkp->max_retries); +} + +static DEVICE_ATTR_RW(max_retries); + +static struct attribute *sd_disk_attrs[] = { + &dev_attr_cache_type.attr, + &dev_attr_FUA.attr, + &dev_attr_allow_restart.attr, + &dev_attr_manage_start_stop.attr, + &dev_attr_manage_system_start_stop.attr, + &dev_attr_manage_runtime_start_stop.attr, + &dev_attr_manage_shutdown.attr, + &dev_attr_protection_type.attr, + &dev_attr_protection_mode.attr, + &dev_attr_app_tag_own.attr, + &dev_attr_thin_provisioning.attr, + &dev_attr_provisioning_mode.attr, + &dev_attr_zeroing_mode.attr, + &dev_attr_max_write_same_blocks.attr, + &dev_attr_max_medium_access_timeouts.attr, + &dev_attr_zoned_cap.attr, + &dev_attr_max_retries.attr, + NULL, +}; +ATTRIBUTE_GROUPS(sd_disk); + +static struct class sd_disk_class = { + .name = "scsi_disk", + .dev_release = scsi_disk_release, + .dev_groups = sd_disk_groups, +}; + +/* + * Don't request a new module, as that could deadlock in multipath + * environment. + */ +static void sd_default_probe(dev_t devt) +{ +} + +/* + * Device no to disk mapping: + * + * major disc2 disc p1 + * |............|.............|....|....| <- dev_t + * 31 20 19 8 7 4 3 0 + * + * Inside a major, we have 16k disks, however mapped non- + * contiguously. The first 16 disks are for major0, the next + * ones with major1, ... Disk 256 is for major0 again, disk 272 + * for major1, ... + * As we stay compatible with our numbering scheme, we can reuse + * the well-know SCSI majors 8, 65--71, 136--143. + */ +static int sd_major(int major_idx) +{ + switch (major_idx) { + case 0: + return SCSI_DISK0_MAJOR; + case 1 ... 7: + return SCSI_DISK1_MAJOR + major_idx - 1; + case 8 ... 15: + return SCSI_DISK8_MAJOR + major_idx - 8; + default: + BUG(); + return 0; /* shut up gcc */ + } +} + +#ifdef CONFIG_BLK_SED_OPAL +static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, + size_t len, bool send) +{ + struct scsi_disk *sdkp = data; + struct scsi_device *sdev = sdkp->device; + u8 cdb[12] = { 0, }; + const struct scsi_exec_args exec_args = { + .req_flags = BLK_MQ_REQ_PM, + }; + int ret; + + cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; + cdb[1] = secp; + put_unaligned_be16(spsp, &cdb[2]); + put_unaligned_be32(len, &cdb[6]); + + ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, + buffer, len, SD_TIMEOUT, sdkp->max_retries, + &exec_args); + return ret <= 0 ? ret : -EIO; +} +#endif /* CONFIG_BLK_SED_OPAL */ + +/* + * Look up the DIX operation based on whether the command is read or + * write and whether dix and dif are enabled. + */ +static unsigned int sd_prot_op(bool write, bool dix, bool dif) +{ + /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ + static const unsigned int ops[] = { /* wrt dix dif */ + SCSI_PROT_NORMAL, /* 0 0 0 */ + SCSI_PROT_READ_STRIP, /* 0 0 1 */ + SCSI_PROT_READ_INSERT, /* 0 1 0 */ + SCSI_PROT_READ_PASS, /* 0 1 1 */ + SCSI_PROT_NORMAL, /* 1 0 0 */ + SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ + SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ + SCSI_PROT_WRITE_PASS, /* 1 1 1 */ + }; + + return ops[write << 2 | dix << 1 | dif]; +} + +/* + * Returns a mask of the protection flags that are valid for a given DIX + * operation. + */ +static unsigned int sd_prot_flag_mask(unsigned int prot_op) +{ + static const unsigned int flag_mask[] = { + [SCSI_PROT_NORMAL] = 0, + + [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT, + + [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + + [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + + [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_REF_INCREMENT, + + [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + + [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + }; + + return flag_mask[prot_op]; +} + +static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, + unsigned int dix, unsigned int dif) +{ + struct request *rq = scsi_cmd_to_rq(scmd); + struct bio *bio = rq->bio; + unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif); + unsigned int protect = 0; + + if (dix) { /* DIX Type 0, 1, 2, 3 */ + if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) + scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; + + if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) + scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; + } + + if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ + scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; + + if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) + scmd->prot_flags |= SCSI_PROT_REF_CHECK; + } + + if (dif) { /* DIX/DIF Type 1, 2, 3 */ + scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; + + if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) + protect = 3 << 5; /* Disable target PI checking */ + else + protect = 1 << 5; /* Enable target PI checking */ + } + + scsi_set_prot_op(scmd, prot_op); + scsi_set_prot_type(scmd, dif); + scmd->prot_flags &= sd_prot_flag_mask(prot_op); + + return protect; +} + +static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) +{ + struct request_queue *q = sdkp->disk->queue; + unsigned int logical_block_size = sdkp->device->sector_size; + unsigned int max_blocks = 0; + + q->limits.discard_alignment = + sdkp->unmap_alignment * logical_block_size; + q->limits.discard_granularity = + max(sdkp->physical_block_size, + sdkp->unmap_granularity * logical_block_size); + sdkp->provisioning_mode = mode; + + switch (mode) { + + case SD_LBP_FULL: + case SD_LBP_DISABLE: + blk_queue_max_discard_sectors(q, 0); + return; + + case SD_LBP_UNMAP: + max_blocks = min_not_zero(sdkp->max_unmap_blocks, + (u32)SD_MAX_WS16_BLOCKS); + break; + + case SD_LBP_WS16: + if (sdkp->device->unmap_limit_for_ws) + max_blocks = sdkp->max_unmap_blocks; + else + max_blocks = sdkp->max_ws_blocks; + + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); + break; + + case SD_LBP_WS10: + if (sdkp->device->unmap_limit_for_ws) + max_blocks = sdkp->max_unmap_blocks; + else + max_blocks = sdkp->max_ws_blocks; + + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); + break; + + case SD_LBP_ZERO: + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); + break; + } + + blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); +} + +static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) +{ + struct page *page; + + page = mempool_alloc(sd_page_pool, GFP_ATOMIC); + if (!page) + return NULL; + clear_highpage(page); + bvec_set_page(&rq->special_vec, page, data_len, 0); + rq->rq_flags |= RQF_SPECIAL_PAYLOAD; + return bvec_virt(&rq->special_vec); +} + +static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdp = cmd->device; + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + unsigned int data_len = 24; + char *buf; + + buf = sd_set_special_bvec(rq, data_len); + if (!buf) + return BLK_STS_RESOURCE; + + cmd->cmd_len = 10; + cmd->cmnd[0] = UNMAP; + cmd->cmnd[8] = 24; + + put_unaligned_be16(6 + 16, &buf[0]); + put_unaligned_be16(16, &buf[2]); + put_unaligned_be64(lba, &buf[8]); + put_unaligned_be32(nr_blocks, &buf[16]); + + cmd->allowed = sdkp->max_retries; + cmd->transfersize = data_len; + rq->timeout = SD_TIMEOUT; + + return scsi_alloc_sgtables(cmd); +} + +static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, + bool unmap) +{ + struct scsi_device *sdp = cmd->device; + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + u32 data_len = sdp->sector_size; + + if (!sd_set_special_bvec(rq, data_len)) + return BLK_STS_RESOURCE; + + cmd->cmd_len = 16; + cmd->cmnd[0] = WRITE_SAME_16; + if (unmap) + cmd->cmnd[1] = 0x8; /* UNMAP */ + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); + + cmd->allowed = sdkp->max_retries; + cmd->transfersize = data_len; + rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; + + return scsi_alloc_sgtables(cmd); +} + +static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, + bool unmap) +{ + struct scsi_device *sdp = cmd->device; + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + u32 data_len = sdp->sector_size; + + if (!sd_set_special_bvec(rq, data_len)) + return BLK_STS_RESOURCE; + + cmd->cmd_len = 10; + cmd->cmnd[0] = WRITE_SAME; + if (unmap) + cmd->cmnd[1] = 0x8; /* UNMAP */ + put_unaligned_be32(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); + + cmd->allowed = sdkp->max_retries; + cmd->transfersize = data_len; + rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; + + return scsi_alloc_sgtables(cmd); +} + +static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_device *sdp = cmd->device; + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + + if (!(rq->cmd_flags & REQ_NOUNMAP)) { + switch (sdkp->zeroing_mode) { + case SD_ZERO_WS16_UNMAP: + return sd_setup_write_same16_cmnd(cmd, true); + case SD_ZERO_WS10_UNMAP: + return sd_setup_write_same10_cmnd(cmd, true); + } + } + + if (sdp->no_write_same) { + rq->rq_flags |= RQF_QUIET; + return BLK_STS_TARGET; + } + + if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) + return sd_setup_write_same16_cmnd(cmd, false); + + return sd_setup_write_same10_cmnd(cmd, false); +} + +static void sd_config_write_same(struct scsi_disk *sdkp) +{ + struct request_queue *q = sdkp->disk->queue; + unsigned int logical_block_size = sdkp->device->sector_size; + + if (sdkp->device->no_write_same) { + sdkp->max_ws_blocks = 0; + goto out; + } + + /* Some devices can not handle block counts above 0xffff despite + * supporting WRITE SAME(16). Consequently we default to 64k + * blocks per I/O unless the device explicitly advertises a + * bigger limit. + */ + if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) + sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); + else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) + sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); + else { + sdkp->device->no_write_same = 1; + sdkp->max_ws_blocks = 0; + } + + if (sdkp->lbprz && sdkp->lbpws) + sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; + else if (sdkp->lbprz && sdkp->lbpws10) + sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; + else if (sdkp->max_ws_blocks) + sdkp->zeroing_mode = SD_ZERO_WS; + else + sdkp->zeroing_mode = SD_ZERO_WRITE; + + if (sdkp->max_ws_blocks && + sdkp->physical_block_size > logical_block_size) { + /* + * Reporting a maximum number of blocks that is not aligned + * on the device physical size would cause a large write same + * request to be split into physically unaligned chunks by + * __blkdev_issue_write_zeroes() even if the caller of this + * functions took care to align the large request. So make sure + * the maximum reported is aligned to the device physical block + * size. This is only an optional optimization for regular + * disks, but this is mandatory to avoid failure of large write + * same requests directed at sequential write required zones of + * host-managed ZBC disks. + */ + sdkp->max_ws_blocks = + round_down(sdkp->max_ws_blocks, + bytes_to_logical(sdkp->device, + sdkp->physical_block_size)); + } + +out: + blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * + (logical_block_size >> 9)); +} + +static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + + /* flush requests don't perform I/O, zero the S/G table */ + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + + if (cmd->device->use_16_for_sync) { + cmd->cmnd[0] = SYNCHRONIZE_CACHE_16; + cmd->cmd_len = 16; + } else { + cmd->cmnd[0] = SYNCHRONIZE_CACHE; + cmd->cmd_len = 10; + } + cmd->transfersize = 0; + cmd->allowed = sdkp->max_retries; + + rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; + return BLK_STS_OK; +} + +static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags, unsigned int dld) +{ + cmd->cmd_len = SD_EXT_CDB_SIZE; + cmd->cmnd[0] = VARIABLE_LENGTH_CMD; + cmd->cmnd[7] = 0x18; /* Additional CDB len */ + cmd->cmnd[9] = write ? WRITE_32 : READ_32; + cmd->cmnd[10] = flags; + cmd->cmnd[11] = dld & 0x07; + put_unaligned_be64(lba, &cmd->cmnd[12]); + put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ + put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); + + return BLK_STS_OK; +} + +static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags, unsigned int dld) +{ + cmd->cmd_len = 16; + cmd->cmnd[0] = write ? WRITE_16 : READ_16; + cmd->cmnd[1] = flags | ((dld >> 2) & 0x01); + cmd->cmnd[14] = (dld & 0x03) << 6; + cmd->cmnd[15] = 0; + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); + + return BLK_STS_OK; +} + +static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags) +{ + cmd->cmd_len = 10; + cmd->cmnd[0] = write ? WRITE_10 : READ_10; + cmd->cmnd[1] = flags; + cmd->cmnd[6] = 0; + cmd->cmnd[9] = 0; + put_unaligned_be32(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); + + return BLK_STS_OK; +} + +static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags) +{ + /* Avoid that 0 blocks gets translated into 256 blocks. */ + if (WARN_ON_ONCE(nr_blocks == 0)) + return BLK_STS_IOERR; + + if (unlikely(flags & 0x8)) { + /* + * This happens only if this drive failed 10byte rw + * command with ILLEGAL_REQUEST during operation and + * thus turned off use_10_for_rw. + */ + scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); + return BLK_STS_IOERR; + } + + cmd->cmd_len = 6; + cmd->cmnd[0] = write ? WRITE_6 : READ_6; + cmd->cmnd[1] = (lba >> 16) & 0x1f; + cmd->cmnd[2] = (lba >> 8) & 0xff; + cmd->cmnd[3] = lba & 0xff; + cmd->cmnd[4] = nr_blocks; + cmd->cmnd[5] = 0; + + return BLK_STS_OK; +} + +/* + * Check if a command has a duration limit set. If it does, and the target + * device supports CDL and the feature is enabled, return the limit + * descriptor index to use. Return 0 (no limit) otherwise. + */ +static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) +{ + struct scsi_device *sdp = sdkp->device; + int hint; + + if (!sdp->cdl_supported || !sdp->cdl_enable) + return 0; + + /* + * Use "no limit" if the request ioprio does not specify a duration + * limit hint. + */ + hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd))); + if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 || + hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7) + return 0; + + return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; +} + +static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_device *sdp = cmd->device; + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + sector_t threshold; + unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + unsigned int mask = logical_to_sectors(sdp, 1) - 1; + bool write = rq_data_dir(rq) == WRITE; + unsigned char protect, fua; + unsigned int dld; + blk_status_t ret; + unsigned int dif; + bool dix; + + ret = scsi_alloc_sgtables(cmd); + if (ret != BLK_STS_OK) + return ret; + + ret = BLK_STS_IOERR; + if (!scsi_device_online(sdp) || sdp->changed) { + scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); + goto fail; + } + + if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { + scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); + goto fail; + } + + if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { + scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); + goto fail; + } + + /* + * Some SD card readers can't handle accesses which touch the + * last one or two logical blocks. Split accesses as needed. + */ + threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; + + if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { + if (lba < threshold) { + /* Access up to the threshold but not beyond */ + nr_blocks = threshold - lba; + } else { + /* Access only a single logical block */ + nr_blocks = 1; + } + } + + if (req_op(rq) == REQ_OP_ZONE_APPEND) { + ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); + if (ret) + goto fail; + } + + fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; + dix = scsi_prot_sg_count(cmd); + dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); + dld = sd_cdl_dld(sdkp, cmd); + + if (dif || dix) + protect = sd_setup_protect_cmnd(cmd, dix, dif); + else + protect = 0; + + if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { + ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, + protect | fua, dld); + } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { + ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, + protect | fua, dld); + } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || + sdp->use_10_for_rw || protect) { + ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, + protect | fua); + } else { + ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, + protect | fua); + } + + if (unlikely(ret != BLK_STS_OK)) + goto fail; + + /* + * We shouldn't disconnect in the middle of a sector, so with a dumb + * host adapter, it's safe to assume that we can at least transfer + * this many bytes between each connect / disconnect. + */ + cmd->transfersize = sdp->sector_size; + cmd->underflow = nr_blocks << 9; + cmd->allowed = sdkp->max_retries; + cmd->sdb.length = nr_blocks * sdp->sector_size; + + SCSI_LOG_HLQUEUE(1, + scmd_printk(KERN_INFO, cmd, + "%s: block=%llu, count=%d\n", __func__, + (unsigned long long)blk_rq_pos(rq), + blk_rq_sectors(rq))); + SCSI_LOG_HLQUEUE(2, + scmd_printk(KERN_INFO, cmd, + "%s %d/%u 512 byte blocks.\n", + write ? "writing" : "reading", nr_blocks, + blk_rq_sectors(rq))); + + /* + * This indicates that the command is ready from our end to be queued. + */ + return BLK_STS_OK; +fail: + scsi_free_sgtables(cmd); + return ret; +} + +static blk_status_t sd_init_command(struct scsi_cmnd *cmd) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + + switch (req_op(rq)) { + case REQ_OP_DISCARD: + switch (scsi_disk(rq->q->disk)->provisioning_mode) { + case SD_LBP_UNMAP: + return sd_setup_unmap_cmnd(cmd); + case SD_LBP_WS16: + return sd_setup_write_same16_cmnd(cmd, true); + case SD_LBP_WS10: + return sd_setup_write_same10_cmnd(cmd, true); + case SD_LBP_ZERO: + return sd_setup_write_same10_cmnd(cmd, false); + default: + return BLK_STS_TARGET; + } + case REQ_OP_WRITE_ZEROES: + return sd_setup_write_zeroes_cmnd(cmd); + case REQ_OP_FLUSH: + return sd_setup_flush_cmnd(cmd); + case REQ_OP_READ: + case REQ_OP_WRITE: + case REQ_OP_ZONE_APPEND: + return sd_setup_read_write_cmnd(cmd); + case REQ_OP_ZONE_RESET: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, + false); + case REQ_OP_ZONE_RESET_ALL: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, + true); + case REQ_OP_ZONE_OPEN: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); + case REQ_OP_ZONE_CLOSE: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); + case REQ_OP_ZONE_FINISH: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); + default: + WARN_ON_ONCE(1); + return BLK_STS_NOTSUPP; + } +} + +static void sd_uninit_command(struct scsi_cmnd *SCpnt) +{ + struct request *rq = scsi_cmd_to_rq(SCpnt); + + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + mempool_free(rq->special_vec.bv_page, sd_page_pool); +} + +static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) +{ + if (sdkp->device->removable || sdkp->write_prot) { + if (disk_check_media_change(disk)) + return true; + } + + /* + * Force a full rescan after ioctl(BLKRRPART). While the disk state has + * nothing to do with partitions, BLKRRPART is used to force a full + * revalidate after things like a format for historical reasons. + */ + return test_bit(GD_NEED_PART_SCAN, &disk->state); +} + +/** + * sd_open - open a scsi disk device + * @disk: disk to open + * @mode: open mode + * + * Returns 0 if successful. Returns a negated errno value in case + * of error. + * + * Note: This can be called from a user context (e.g. fsck(1) ) + * or from within the kernel (e.g. as a result of a mount(1) ). + * In the latter case @inode and @filp carry an abridged amount + * of information as noted above. + * + * Locking: called with disk->open_mutex held. + **/ +static int sd_open(struct gendisk *disk, blk_mode_t mode) +{ + struct scsi_disk *sdkp = scsi_disk(disk); + struct scsi_device *sdev = sdkp->device; + int retval; + + if (scsi_device_get(sdev)) + return -ENXIO; + + SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); + + /* + * If the device is in error recovery, wait until it is done. + * If the device is offline, then disallow any access to it. + */ + retval = -ENXIO; + if (!scsi_block_when_processing_errors(sdev)) + goto error_out; + + if (sd_need_revalidate(disk, sdkp)) + sd_revalidate_disk(disk); + + /* + * If the drive is empty, just let the open fail. + */ + retval = -ENOMEDIUM; + if (sdev->removable && !sdkp->media_present && + !(mode & BLK_OPEN_NDELAY)) + goto error_out; + + /* + * If the device has the write protect tab set, have the open fail + * if the user expects to be able to write to the thing. + */ + retval = -EROFS; + if (sdkp->write_prot && (mode & BLK_OPEN_WRITE)) + goto error_out; + + /* + * It is possible that the disk changing stuff resulted in + * the device being taken offline. If this is the case, + * report this to the user, and don't pretend that the + * open actually succeeded. + */ + retval = -ENXIO; + if (!scsi_device_online(sdev)) + goto error_out; + + if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { + if (scsi_block_when_processing_errors(sdev)) + scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); + } + + return 0; + +error_out: + scsi_device_put(sdev); + return retval; +} + +/** + * sd_release - invoked when the (last) close(2) is called on this + * scsi disk. + * @disk: disk to release + * + * Returns 0. + * + * Note: may block (uninterruptible) if error recovery is underway + * on this disk. + * + * Locking: called with disk->open_mutex held. + **/ +static void sd_release(struct gendisk *disk) +{ + struct scsi_disk *sdkp = scsi_disk(disk); + struct scsi_device *sdev = sdkp->device; + + SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); + + if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { + if (scsi_block_when_processing_errors(sdev)) + scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); + } + + scsi_device_put(sdev); +} + +static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_device *sdp = sdkp->device; + struct Scsi_Host *host = sdp->host; + sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); + int diskinfo[4]; + + /* default to most commonly used values */ + diskinfo[0] = 0x40; /* 1 << 6 */ + diskinfo[1] = 0x20; /* 1 << 5 */ + diskinfo[2] = capacity >> 11; + + /* override with calculated, extended default, or driver values */ + if (host->hostt->bios_param) + host->hostt->bios_param(sdp, bdev, capacity, diskinfo); + else + scsicam_bios_param(bdev, capacity, diskinfo); + + geo->heads = diskinfo[0]; + geo->sectors = diskinfo[1]; + geo->cylinders = diskinfo[2]; + return 0; +} + +/** + * sd_ioctl - process an ioctl + * @bdev: target block device + * @mode: open mode + * @cmd: ioctl command number + * @arg: this is third argument given to ioctl(2) system call. + * Often contains a pointer. + * + * Returns 0 if successful (some ioctls return positive numbers on + * success as well). Returns a negated errno value in case of error. + * + * Note: most ioctls are forward onto the block subsystem or further + * down in the scsi subsystem. + **/ +static int sd_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + struct scsi_disk *sdkp = scsi_disk(disk); + struct scsi_device *sdp = sdkp->device; + void __user *p = (void __user *)arg; + int error; + + SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " + "cmd=0x%x\n", disk->disk_name, cmd)); + + if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) + return -ENOIOCTLCMD; + + /* + * If we are in the middle of error recovery, don't let anyone + * else try and use this device. Also, if error recovery fails, it + * may try and take the device offline, in which case all further + * access to the device is prohibited. + */ + error = scsi_ioctl_block_when_processing_errors(sdp, cmd, + (mode & BLK_OPEN_NDELAY)); + if (error) + return error; + + if (is_sed_ioctl(cmd)) + return sed_ioctl(sdkp->opal_dev, cmd, p); + return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p); +} + +static void set_media_not_present(struct scsi_disk *sdkp) +{ + if (sdkp->media_present) + sdkp->device->changed = 1; + + if (sdkp->device->removable) { + sdkp->media_present = 0; + sdkp->capacity = 0; + } +} + +static int media_not_present(struct scsi_disk *sdkp, + struct scsi_sense_hdr *sshdr) +{ + if (!scsi_sense_valid(sshdr)) + return 0; + + /* not invoked for commands that could return deferred errors */ + switch (sshdr->sense_key) { + case UNIT_ATTENTION: + case NOT_READY: + /* medium not present */ + if (sshdr->asc == 0x3A) { + set_media_not_present(sdkp); + return 1; + } + } + return 0; +} + +/** + * sd_check_events - check media events + * @disk: kernel device descriptor + * @clearing: disk events currently being cleared + * + * Returns mask of DISK_EVENT_*. + * + * Note: this function is invoked from the block subsystem. + **/ +static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) +{ + struct scsi_disk *sdkp = disk->private_data; + struct scsi_device *sdp; + int retval; + bool disk_changed; + + if (!sdkp) + return 0; + + sdp = sdkp->device; + SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); + + /* + * If the device is offline, don't send any commands - just pretend as + * if the command failed. If the device ever comes back online, we + * can deal with it then. It is only because of unrecoverable errors + * that we would ever take a device offline in the first place. + */ + if (!scsi_device_online(sdp)) { + set_media_not_present(sdkp); + goto out; + } + + /* + * Using TEST_UNIT_READY enables differentiation between drive with + * no cartridge loaded - NOT READY, drive with changed cartridge - + * UNIT ATTENTION, or with same cartridge - GOOD STATUS. + * + * Drives that auto spin down. eg iomega jaz 1G, will be started + * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever + * sd_revalidate() is called. + */ + if (scsi_block_when_processing_errors(sdp)) { + struct scsi_sense_hdr sshdr = { 0, }; + + retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries, + &sshdr); + + /* failed to execute TUR, assume media not present */ + if (retval < 0 || host_byte(retval)) { + set_media_not_present(sdkp); + goto out; + } + + if (media_not_present(sdkp, &sshdr)) + goto out; + } + + /* + * For removable scsi disk we have to recognise the presence + * of a disk in the drive. + */ + if (!sdkp->media_present) + sdp->changed = 1; + sdkp->media_present = 1; +out: + /* + * sdp->changed is set under the following conditions: + * + * Medium present state has changed in either direction. + * Device has indicated UNIT_ATTENTION. + */ + disk_changed = sdp->changed; + sdp->changed = 0; + return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; +} + +static int sd_sync_cache(struct scsi_disk *sdkp) +{ + int retries, res; + struct scsi_device *sdp = sdkp->device; + const int timeout = sdp->request_queue->rq_timeout + * SD_FLUSH_TIMEOUT_MULTIPLIER; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .req_flags = BLK_MQ_REQ_PM, + .sshdr = &sshdr, + }; + + if (!scsi_device_online(sdp)) + return -ENODEV; + + for (retries = 3; retries > 0; --retries) { + unsigned char cmd[16] = { 0 }; + + if (sdp->use_16_for_sync) + cmd[0] = SYNCHRONIZE_CACHE_16; + else + cmd[0] = SYNCHRONIZE_CACHE; + /* + * Leave the rest of the command zero to indicate + * flush everything. + */ + res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, + timeout, sdkp->max_retries, &exec_args); + if (res == 0) + break; + } + + if (res) { + sd_print_result(sdkp, "Synchronize Cache(10) failed", res); + + if (res < 0) + return res; + + if (scsi_status_is_check_condition(res) && + scsi_sense_valid(&sshdr)) { + sd_print_sense_hdr(sdkp, &sshdr); + + /* we need to evaluate the error return */ + if (sshdr.asc == 0x3a || /* medium not present */ + sshdr.asc == 0x20 || /* invalid command */ + (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */ + /* this is no error here */ + return 0; + /* + * This drive doesn't support sync and there's not much + * we can do because this is called during shutdown + * or suspend so just return success so those operations + * can proceed. + */ + if (sshdr.sense_key == ILLEGAL_REQUEST) + return 0; + } + + switch (host_byte(res)) { + /* ignore errors due to racing a disconnection */ + case DID_BAD_TARGET: + case DID_NO_CONNECT: + return 0; + /* signal the upper layer it might try again */ + case DID_BUS_BUSY: + case DID_IMM_RETRY: + case DID_REQUEUE: + case DID_SOFT_ERROR: + return -EBUSY; + default: + return -EIO; + } + } + return 0; +} + +static void sd_rescan(struct device *dev) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + + sd_revalidate_disk(sdkp->disk); +} + +static int sd_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + struct scsi_device *sdev = scsi_disk(disk)->device; + const struct scsi_vpd *vpd; + const unsigned char *d; + int ret = -ENXIO, len; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg83); + if (!vpd) + goto out_unlock; + + ret = -EINVAL; + for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) { + /* we only care about designators with LU association */ + if (((d[1] >> 4) & 0x3) != 0x00) + continue; + if ((d[1] & 0xf) != type) + continue; + + /* + * Only exit early if a 16-byte descriptor was found. Otherwise + * keep looking as one with more entropy might still show up. + */ + len = d[3]; + if (len != 8 && len != 12 && len != 16) + continue; + ret = len; + memcpy(id, d + 4, len); + if (len == 16) + break; + } +out_unlock: + rcu_read_unlock(); + return ret; +} + +static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) +{ + switch (host_byte(result)) { + case DID_TRANSPORT_MARGINAL: + case DID_TRANSPORT_DISRUPTED: + case DID_BUS_BUSY: + return PR_STS_RETRY_PATH_FAILURE; + case DID_NO_CONNECT: + return PR_STS_PATH_FAILED; + case DID_TRANSPORT_FAILFAST: + return PR_STS_PATH_FAST_FAILED; + } + + switch (status_byte(result)) { + case SAM_STAT_RESERVATION_CONFLICT: + return PR_STS_RESERVATION_CONFLICT; + case SAM_STAT_CHECK_CONDITION: + if (!scsi_sense_valid(sshdr)) + return PR_STS_IOERR; + + if (sshdr->sense_key == ILLEGAL_REQUEST && + (sshdr->asc == 0x26 || sshdr->asc == 0x24)) + return -EINVAL; + + fallthrough; + default: + return PR_STS_IOERR; + } +} + +static int sd_pr_in_command(struct block_device *bdev, u8 sa, + unsigned char *data, int data_len) +{ + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_device *sdev = sdkp->device; + struct scsi_sense_hdr sshdr; + u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa }; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int result; + + put_unaligned_be16(data_len, &cmd[7]); + + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len, + SD_TIMEOUT, sdkp->max_retries, &exec_args); + if (scsi_status_is_check_condition(result) && + scsi_sense_valid(&sshdr)) { + sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); + scsi_print_sense_hdr(sdev, NULL, &sshdr); + } + + if (result <= 0) + return result; + + return sd_scsi_to_pr_err(&sshdr, result); +} + +static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) +{ + int result, i, data_offset, num_copy_keys; + u32 num_keys = keys_info->num_keys; + int data_len = num_keys * 8 + 8; + u8 *data; + + data = kzalloc(data_len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + result = sd_pr_in_command(bdev, READ_KEYS, data, data_len); + if (result) + goto free_data; + + keys_info->generation = get_unaligned_be32(&data[0]); + keys_info->num_keys = get_unaligned_be32(&data[4]) / 8; + + data_offset = 8; + num_copy_keys = min(num_keys, keys_info->num_keys); + + for (i = 0; i < num_copy_keys; i++) { + keys_info->keys[i] = get_unaligned_be64(&data[data_offset]); + data_offset += 8; + } + +free_data: + kfree(data); + return result; +} + +static int sd_pr_read_reservation(struct block_device *bdev, + struct pr_held_reservation *rsv) +{ + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_device *sdev = sdkp->device; + u8 data[24] = { }; + int result, len; + + result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data)); + if (result) + return result; + + len = get_unaligned_be32(&data[4]); + if (!len) + return 0; + + /* Make sure we have at least the key and type */ + if (len < 14) { + sdev_printk(KERN_INFO, sdev, + "READ RESERVATION failed due to short return buffer of %d bytes\n", + len); + return -EINVAL; + } + + rsv->generation = get_unaligned_be32(&data[0]); + rsv->key = get_unaligned_be64(&data[8]); + rsv->type = scsi_pr_type_to_block(data[21] & 0x0f); + return 0; +} + +static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key, + u64 sa_key, enum scsi_pr_type type, u8 flags) +{ + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_device *sdev = sdkp->device; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int result; + u8 cmd[16] = { 0, }; + u8 data[24] = { 0, }; + + cmd[0] = PERSISTENT_RESERVE_OUT; + cmd[1] = sa; + cmd[2] = type; + put_unaligned_be32(sizeof(data), &cmd[5]); + + put_unaligned_be64(key, &data[0]); + put_unaligned_be64(sa_key, &data[8]); + data[20] = flags; + + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data, + sizeof(data), SD_TIMEOUT, sdkp->max_retries, + &exec_args); + + if (scsi_status_is_check_condition(result) && + scsi_sense_valid(&sshdr)) { + sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); + scsi_print_sense_hdr(sdev, NULL, &sshdr); + } + + if (result <= 0) + return result; + + return sd_scsi_to_pr_err(&sshdr, result); +} + +static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, + u32 flags) +{ + if (flags & ~PR_FL_IGNORE_KEY) + return -EOPNOTSUPP; + return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, + old_key, new_key, 0, + (1 << 0) /* APTPL */); +} + +static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, + u32 flags) +{ + if (flags) + return -EOPNOTSUPP; + return sd_pr_out_command(bdev, 0x01, key, 0, + block_pr_type_to_scsi(type), 0); +} + +static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) +{ + return sd_pr_out_command(bdev, 0x02, key, 0, + block_pr_type_to_scsi(type), 0); +} + +static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, + enum pr_type type, bool abort) +{ + return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, + block_pr_type_to_scsi(type), 0); +} + +static int sd_pr_clear(struct block_device *bdev, u64 key) +{ + return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0); +} + +static const struct pr_ops sd_pr_ops = { + .pr_register = sd_pr_register, + .pr_reserve = sd_pr_reserve, + .pr_release = sd_pr_release, + .pr_preempt = sd_pr_preempt, + .pr_clear = sd_pr_clear, + .pr_read_keys = sd_pr_read_keys, + .pr_read_reservation = sd_pr_read_reservation, +}; + +static void scsi_disk_free_disk(struct gendisk *disk) +{ + struct scsi_disk *sdkp = scsi_disk(disk); + + put_device(&sdkp->disk_dev); +} + +static const struct block_device_operations sd_fops = { + .owner = THIS_MODULE, + .open = sd_open, + .release = sd_release, + .ioctl = sd_ioctl, + .getgeo = sd_getgeo, + .compat_ioctl = blkdev_compat_ptr_ioctl, + .check_events = sd_check_events, + .unlock_native_capacity = sd_unlock_native_capacity, + .report_zones = sd_zbc_report_zones, + .get_unique_id = sd_get_unique_id, + .free_disk = scsi_disk_free_disk, + .pr_ops = &sd_pr_ops, +}; + +/** + * sd_eh_reset - reset error handling callback + * @scmd: sd-issued command that has failed + * + * This function is called by the SCSI midlayer before starting + * SCSI EH. When counting medium access failures we have to be + * careful to register it only only once per device and SCSI EH run; + * there might be several timed out commands which will cause the + * 'max_medium_access_timeouts' counter to trigger after the first + * SCSI EH run already and set the device to offline. + * So this function resets the internal counter before starting SCSI EH. + **/ +static void sd_eh_reset(struct scsi_cmnd *scmd) +{ + struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); + + /* New SCSI EH run, reset gate variable */ + sdkp->ignore_medium_access_errors = false; +} + +/** + * sd_eh_action - error handling callback + * @scmd: sd-issued command that has failed + * @eh_disp: The recovery disposition suggested by the midlayer + * + * This function is called by the SCSI midlayer upon completion of an + * error test command (currently TEST UNIT READY). The result of sending + * the eh command is passed in eh_disp. We're looking for devices that + * fail medium access commands but are OK with non access commands like + * test unit ready (so wrongly see the device as having a successful + * recovery) + **/ +static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) +{ + struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); + struct scsi_device *sdev = scmd->device; + + if (!scsi_device_online(sdev) || + !scsi_medium_access_command(scmd) || + host_byte(scmd->result) != DID_TIME_OUT || + eh_disp != SUCCESS) + return eh_disp; + + /* + * The device has timed out executing a medium access command. + * However, the TEST UNIT READY command sent during error + * handling completed successfully. Either the device is in the + * process of recovering or has it suffered an internal failure + * that prevents access to the storage medium. + */ + if (!sdkp->ignore_medium_access_errors) { + sdkp->medium_access_timed_out++; + sdkp->ignore_medium_access_errors = true; + } + + /* + * If the device keeps failing read/write commands but TEST UNIT + * READY always completes successfully we assume that medium + * access is no longer possible and take the device offline. + */ + if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { + scmd_printk(KERN_ERR, scmd, + "Medium access timeout failure. Offlining disk!\n"); + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_OFFLINE); + mutex_unlock(&sdev->state_mutex); + + return SUCCESS; + } + + return eh_disp; +} + +static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) +{ + struct request *req = scsi_cmd_to_rq(scmd); + struct scsi_device *sdev = scmd->device; + unsigned int transferred, good_bytes; + u64 start_lba, end_lba, bad_lba; + + /* + * Some commands have a payload smaller than the device logical + * block size (e.g. INQUIRY on a 4K disk). + */ + if (scsi_bufflen(scmd) <= sdev->sector_size) + return 0; + + /* Check if we have a 'bad_lba' information */ + if (!scsi_get_sense_info_fld(scmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + &bad_lba)) + return 0; + + /* + * If the bad lba was reported incorrectly, we have no idea where + * the error is. + */ + start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); + end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); + if (bad_lba < start_lba || bad_lba >= end_lba) + return 0; + + /* + * resid is optional but mostly filled in. When it's unused, + * its value is zero, so we assume the whole buffer transferred + */ + transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); + + /* This computation should always be done in terms of the + * resolution of the device's medium. + */ + good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); + + return min(good_bytes, transferred); +} + +/** + * sd_done - bottom half handler: called when the lower level + * driver has completed (successfully or otherwise) a scsi command. + * @SCpnt: mid-level's per command structure. + * + * Note: potentially run from within an ISR. Must not block. + **/ +static int sd_done(struct scsi_cmnd *SCpnt) +{ + int result = SCpnt->result; + unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); + unsigned int sector_size = SCpnt->device->sector_size; + unsigned int resid; + struct scsi_sense_hdr sshdr; + struct request *req = scsi_cmd_to_rq(SCpnt); + struct scsi_disk *sdkp = scsi_disk(req->q->disk); + int sense_valid = 0; + int sense_deferred = 0; + + switch (req_op(req)) { + case REQ_OP_DISCARD: + case REQ_OP_WRITE_ZEROES: + case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_RESET_ALL: + case REQ_OP_ZONE_OPEN: + case REQ_OP_ZONE_CLOSE: + case REQ_OP_ZONE_FINISH: + if (!result) { + good_bytes = blk_rq_bytes(req); + scsi_set_resid(SCpnt, 0); + } else { + good_bytes = 0; + scsi_set_resid(SCpnt, blk_rq_bytes(req)); + } + break; + default: + /* + * In case of bogus fw or device, we could end up having + * an unaligned partial completion. Check this here and force + * alignment. + */ + resid = scsi_get_resid(SCpnt); + if (resid & (sector_size - 1)) { + sd_printk(KERN_INFO, sdkp, + "Unaligned partial completion (resid=%u, sector_sz=%u)\n", + resid, sector_size); + scsi_print_command(SCpnt); + resid = min(scsi_bufflen(SCpnt), + round_up(resid, sector_size)); + scsi_set_resid(SCpnt, resid); + } + } + + if (result) { + sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); + if (sense_valid) + sense_deferred = scsi_sense_is_deferred(&sshdr); + } + sdkp->medium_access_timed_out = 0; + + if (!scsi_status_is_check_condition(result) && + (!sense_valid || sense_deferred)) + goto out; + + switch (sshdr.sense_key) { + case HARDWARE_ERROR: + case MEDIUM_ERROR: + good_bytes = sd_completed_bytes(SCpnt); + break; + case RECOVERED_ERROR: + good_bytes = scsi_bufflen(SCpnt); + break; + case NO_SENSE: + /* This indicates a false check condition, so ignore it. An + * unknown amount of data was transferred so treat it as an + * error. + */ + SCpnt->result = 0; + memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + break; + case ABORTED_COMMAND: + if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ + good_bytes = sd_completed_bytes(SCpnt); + break; + case ILLEGAL_REQUEST: + switch (sshdr.asc) { + case 0x10: /* DIX: Host detected corruption */ + good_bytes = sd_completed_bytes(SCpnt); + break; + case 0x20: /* INVALID COMMAND OPCODE */ + case 0x24: /* INVALID FIELD IN CDB */ + switch (SCpnt->cmnd[0]) { + case UNMAP: + sd_config_discard(sdkp, SD_LBP_DISABLE); + break; + case WRITE_SAME_16: + case WRITE_SAME: + if (SCpnt->cmnd[1] & 8) { /* UNMAP */ + sd_config_discard(sdkp, SD_LBP_DISABLE); + } else { + sdkp->device->no_write_same = 1; + sd_config_write_same(sdkp); + req->rq_flags |= RQF_QUIET; + } + break; + } + } + break; + default: + break; + } + + out: + if (sd_is_zoned(sdkp)) + good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); + + SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, + "sd_done: completed %d of %d bytes\n", + good_bytes, scsi_bufflen(SCpnt))); + + return good_bytes; +} + +/* + * spinup disk - called only in sd_revalidate_disk() + */ +static void +sd_spinup_disk(struct scsi_disk *sdkp) +{ + unsigned char cmd[10]; + unsigned long spintime_expire = 0; + int retries, spintime; + unsigned int the_result; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int sense_valid = 0; + + spintime = 0; + + /* Spin up drives, as required. Only do this at boot time */ + /* Spinup needs to be done for module loads too. */ + do { + retries = 0; + + do { + bool media_was_present = sdkp->media_present; + + cmd[0] = TEST_UNIT_READY; + memset((void *) &cmd[1], 0, 9); + + the_result = scsi_execute_cmd(sdkp->device, cmd, + REQ_OP_DRV_IN, NULL, 0, + SD_TIMEOUT, + sdkp->max_retries, + &exec_args); + + /* + * If the drive has indicated to us that it + * doesn't have any media in it, don't bother + * with any more polling. + */ + if (media_not_present(sdkp, &sshdr)) { + if (media_was_present) + sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); + return; + } + + if (the_result) + sense_valid = scsi_sense_valid(&sshdr); + retries++; + } while (retries < 3 && + (!scsi_status_is_good(the_result) || + (scsi_status_is_check_condition(the_result) && + sense_valid && sshdr.sense_key == UNIT_ATTENTION))); + + if (!scsi_status_is_check_condition(the_result)) { + /* no sense, TUR either succeeded or failed + * with a status error */ + if(!spintime && !scsi_status_is_good(the_result)) { + sd_print_result(sdkp, "Test Unit Ready failed", + the_result); + } + break; + } + + /* + * The device does not want the automatic start to be issued. + */ + if (sdkp->device->no_start_on_add) + break; + + if (sense_valid && sshdr.sense_key == NOT_READY) { + if (sshdr.asc == 4 && sshdr.ascq == 3) + break; /* manual intervention required */ + if (sshdr.asc == 4 && sshdr.ascq == 0xb) + break; /* standby */ + if (sshdr.asc == 4 && sshdr.ascq == 0xc) + break; /* unavailable */ + if (sshdr.asc == 4 && sshdr.ascq == 0x1b) + break; /* sanitize in progress */ + /* + * Issue command to spin up drive when not ready + */ + if (!spintime) { + sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); + cmd[0] = START_STOP; + cmd[1] = 1; /* Return immediately */ + memset((void *) &cmd[2], 0, 8); + cmd[4] = 1; /* Start spin cycle */ + if (sdkp->device->start_stop_pwr_cond) + cmd[4] |= 1 << 4; + scsi_execute_cmd(sdkp->device, cmd, + REQ_OP_DRV_IN, NULL, 0, + SD_TIMEOUT, sdkp->max_retries, + &exec_args); + spintime_expire = jiffies + 100 * HZ; + spintime = 1; + } + /* Wait 1 second for next try */ + msleep(1000); + printk(KERN_CONT "."); + + /* + * Wait for USB flash devices with slow firmware. + * Yes, this sense key/ASC combination shouldn't + * occur here. It's characteristic of these devices. + */ + } else if (sense_valid && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x28) { + if (!spintime) { + spintime_expire = jiffies + 5 * HZ; + spintime = 1; + } + /* Wait 1 second for next try */ + msleep(1000); + } else { + /* we don't understand the sense code, so it's + * probably pointless to loop */ + if(!spintime) { + sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); + sd_print_sense_hdr(sdkp, &sshdr); + } + break; + } + + } while (spintime && time_before_eq(jiffies, spintime_expire)); + + if (spintime) { + if (scsi_status_is_good(the_result)) + printk(KERN_CONT "ready\n"); + else + printk(KERN_CONT "not responding...\n"); + } +} + +/* + * Determine whether disk supports Data Integrity Field. + */ +static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) +{ + struct scsi_device *sdp = sdkp->device; + u8 type; + + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { + sdkp->protection_type = 0; + return 0; + } + + type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ + + if (type > T10_PI_TYPE3_PROTECTION) { + sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ + " protection type %u. Disabling disk!\n", + type); + sdkp->protection_type = 0; + return -ENODEV; + } + + sdkp->protection_type = type; + + return 0; +} + +static void sd_config_protection(struct scsi_disk *sdkp) +{ + struct scsi_device *sdp = sdkp->device; + + sd_dif_config_host(sdkp); + + if (!sdkp->protection_type) + return; + + if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) { + sd_first_printk(KERN_NOTICE, sdkp, + "Disabling DIF Type %u protection\n", + sdkp->protection_type); + sdkp->protection_type = 0; + } + + sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n", + sdkp->protection_type); +} + +static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, + struct scsi_sense_hdr *sshdr, int sense_valid, + int the_result) +{ + if (sense_valid) + sd_print_sense_hdr(sdkp, sshdr); + else + sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); + + /* + * Set dirty bit for removable devices if not ready - + * sometimes drives will not report this properly. + */ + if (sdp->removable && + sense_valid && sshdr->sense_key == NOT_READY) + set_media_not_present(sdkp); + + /* + * We used to set media_present to 0 here to indicate no media + * in the drive, but some drives fail read capacity even with + * media present, so we can't do that. + */ + sdkp->capacity = 0; /* unknown mapped to zero - as usual */ +} + +#define RC16_LEN 32 +#if RC16_LEN > SD_BUF_SIZE +#error RC16_LEN must not be more than SD_BUF_SIZE +#endif + +#define READ_CAPACITY_RETRIES_ON_RESET 10 + +static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, + unsigned char *buffer) +{ + unsigned char cmd[16]; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int sense_valid = 0; + int the_result; + int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; + unsigned int alignment; + unsigned long long lba; + unsigned sector_size; + + if (sdp->no_read_capacity_16) + return -EINVAL; + + do { + memset(cmd, 0, 16); + cmd[0] = SERVICE_ACTION_IN_16; + cmd[1] = SAI_READ_CAPACITY_16; + cmd[13] = RC16_LEN; + memset(buffer, 0, RC16_LEN); + + the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, + buffer, RC16_LEN, SD_TIMEOUT, + sdkp->max_retries, &exec_args); + + if (media_not_present(sdkp, &sshdr)) + return -ENODEV; + + if (the_result > 0) { + sense_valid = scsi_sense_valid(&sshdr); + if (sense_valid && + sshdr.sense_key == ILLEGAL_REQUEST && + (sshdr.asc == 0x20 || sshdr.asc == 0x24) && + sshdr.ascq == 0x00) + /* Invalid Command Operation Code or + * Invalid Field in CDB, just retry + * silently with RC10 */ + return -EINVAL; + if (sense_valid && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* Device reset might occur several times, + * give it one more chance */ + if (--reset_retries > 0) + continue; + } + retries--; + + } while (the_result && retries); + + if (the_result) { + sd_print_result(sdkp, "Read Capacity(16) failed", the_result); + read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); + return -EINVAL; + } + + sector_size = get_unaligned_be32(&buffer[8]); + lba = get_unaligned_be64(&buffer[0]); + + if (sd_read_protection_type(sdkp, buffer) < 0) { + sdkp->capacity = 0; + return -ENODEV; + } + + /* Logical blocks per physical block exponent */ + sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; + + /* RC basis */ + sdkp->rc_basis = (buffer[12] >> 4) & 0x3; + + /* Lowest aligned logical block */ + alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; + blk_queue_alignment_offset(sdp->request_queue, alignment); + if (alignment && sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "physical block alignment offset: %u\n", alignment); + + if (buffer[14] & 0x80) { /* LBPME */ + sdkp->lbpme = 1; + + if (buffer[14] & 0x40) /* LBPRZ */ + sdkp->lbprz = 1; + + sd_config_discard(sdkp, SD_LBP_WS16); + } + + sdkp->capacity = lba + 1; + return sector_size; +} + +static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, + unsigned char *buffer) +{ + unsigned char cmd[16]; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int sense_valid = 0; + int the_result; + int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; + sector_t lba; + unsigned sector_size; + + do { + cmd[0] = READ_CAPACITY; + memset(&cmd[1], 0, 9); + memset(buffer, 0, 8); + + the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer, + 8, SD_TIMEOUT, sdkp->max_retries, + &exec_args); + + if (media_not_present(sdkp, &sshdr)) + return -ENODEV; + + if (the_result > 0) { + sense_valid = scsi_sense_valid(&sshdr); + if (sense_valid && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* Device reset might occur several times, + * give it one more chance */ + if (--reset_retries > 0) + continue; + } + retries--; + + } while (the_result && retries); + + if (the_result) { + sd_print_result(sdkp, "Read Capacity(10) failed", the_result); + read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); + return -EINVAL; + } + + sector_size = get_unaligned_be32(&buffer[4]); + lba = get_unaligned_be32(&buffer[0]); + + if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { + /* Some buggy (usb cardreader) devices return an lba of + 0xffffffff when the want to report a size of 0 (with + which they really mean no media is present) */ + sdkp->capacity = 0; + sdkp->physical_block_size = sector_size; + return sector_size; + } + + sdkp->capacity = lba + 1; + sdkp->physical_block_size = sector_size; + return sector_size; +} + +static int sd_try_rc16_first(struct scsi_device *sdp) +{ + if (sdp->host->max_cmd_len < 16) + return 0; + if (sdp->try_rc_10_first) + return 0; + if (sdp->scsi_level > SCSI_SPC_2) + return 1; + if (scsi_device_protection(sdp)) + return 1; + return 0; +} + +/* + * read disk capacity + */ +static void +sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) +{ + int sector_size; + struct scsi_device *sdp = sdkp->device; + + if (sd_try_rc16_first(sdp)) { + sector_size = read_capacity_16(sdkp, sdp, buffer); + if (sector_size == -EOVERFLOW) + goto got_data; + if (sector_size == -ENODEV) + return; + if (sector_size < 0) + sector_size = read_capacity_10(sdkp, sdp, buffer); + if (sector_size < 0) + return; + } else { + sector_size = read_capacity_10(sdkp, sdp, buffer); + if (sector_size == -EOVERFLOW) + goto got_data; + if (sector_size < 0) + return; + if ((sizeof(sdkp->capacity) > 4) && + (sdkp->capacity > 0xffffffffULL)) { + int old_sector_size = sector_size; + sd_printk(KERN_NOTICE, sdkp, "Very big device. " + "Trying to use READ CAPACITY(16).\n"); + sector_size = read_capacity_16(sdkp, sdp, buffer); + if (sector_size < 0) { + sd_printk(KERN_NOTICE, sdkp, + "Using 0xffffffff as device size\n"); + sdkp->capacity = 1 + (sector_t) 0xffffffff; + sector_size = old_sector_size; + goto got_data; + } + /* Remember that READ CAPACITY(16) succeeded */ + sdp->try_rc_10_first = 0; + } + } + + /* Some devices are known to return the total number of blocks, + * not the highest block number. Some devices have versions + * which do this and others which do not. Some devices we might + * suspect of doing this but we don't know for certain. + * + * If we know the reported capacity is wrong, decrement it. If + * we can only guess, then assume the number of blocks is even + * (usually true but not always) and err on the side of lowering + * the capacity. + */ + if (sdp->fix_capacity || + (sdp->guess_capacity && (sdkp->capacity & 0x01))) { + sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " + "from its reported value: %llu\n", + (unsigned long long) sdkp->capacity); + --sdkp->capacity; + } + +got_data: + if (sector_size == 0) { + sector_size = 512; + sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " + "assuming 512.\n"); + } + + if (sector_size != 512 && + sector_size != 1024 && + sector_size != 2048 && + sector_size != 4096) { + sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", + sector_size); + /* + * The user might want to re-format the drive with + * a supported sectorsize. Once this happens, it + * would be relatively trivial to set the thing up. + * For this reason, we leave the thing in the table. + */ + sdkp->capacity = 0; + /* + * set a bogus sector size so the normal read/write + * logic in the block layer will eventually refuse any + * request on this device without tripping over power + * of two sector size assumptions + */ + sector_size = 512; + } + blk_queue_logical_block_size(sdp->request_queue, sector_size); + blk_queue_physical_block_size(sdp->request_queue, + sdkp->physical_block_size); + sdkp->device->sector_size = sector_size; + + if (sdkp->capacity > 0xffffffff) + sdp->use_16_for_rw = 1; + +} + +/* + * Print disk capacity + */ +static void +sd_print_capacity(struct scsi_disk *sdkp, + sector_t old_capacity) +{ + int sector_size = sdkp->device->sector_size; + char cap_str_2[10], cap_str_10[10]; + + if (!sdkp->first_scan && old_capacity == sdkp->capacity) + return; + + string_get_size(sdkp->capacity, sector_size, + STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); + string_get_size(sdkp->capacity, sector_size, + STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); + + sd_printk(KERN_NOTICE, sdkp, + "%llu %d-byte logical blocks: (%s/%s)\n", + (unsigned long long)sdkp->capacity, + sector_size, cap_str_10, cap_str_2); + + if (sdkp->physical_block_size != sector_size) + sd_printk(KERN_NOTICE, sdkp, + "%u-byte physical blocks\n", + sdkp->physical_block_size); +} + +/* called with buffer of length 512 */ +static inline int +sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, + unsigned char *buffer, int len, struct scsi_mode_data *data, + struct scsi_sense_hdr *sshdr) +{ + /* + * If we must use MODE SENSE(10), make sure that the buffer length + * is at least 8 bytes so that the mode sense header fits. + */ + if (sdkp->device->use_10_for_ms && len < 8) + len = 8; + + return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len, + SD_TIMEOUT, sdkp->max_retries, data, sshdr); +} + +/* + * read write protect setting, if possible - called only in sd_revalidate_disk() + * called with buffer of length SD_BUF_SIZE + */ +static void +sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) +{ + int res; + struct scsi_device *sdp = sdkp->device; + struct scsi_mode_data data; + int old_wp = sdkp->write_prot; + + set_disk_ro(sdkp->disk, 0); + if (sdp->skip_ms_page_3f) { + sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); + return; + } + + if (sdp->use_192_bytes_for_3f) { + res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL); + } else { + /* + * First attempt: ask for all pages (0x3F), but only 4 bytes. + * We have to start carefully: some devices hang if we ask + * for more than is available. + */ + res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL); + + /* + * Second attempt: ask for page 0 When only page 0 is + * implemented, a request for page 3F may return Sense Key + * 5: Illegal Request, Sense Code 24: Invalid field in + * CDB. + */ + if (res < 0) + res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL); + + /* + * Third attempt: ask 255 bytes, as we did earlier. + */ + if (res < 0) + res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255, + &data, NULL); + } + + if (res < 0) { + sd_first_printk(KERN_WARNING, sdkp, + "Test WP failed, assume Write Enabled\n"); + } else { + sdkp->write_prot = ((data.device_specific & 0x80) != 0); + set_disk_ro(sdkp->disk, sdkp->write_prot); + if (sdkp->first_scan || old_wp != sdkp->write_prot) { + sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", + sdkp->write_prot ? "on" : "off"); + sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); + } + } +} + +/* + * sd_read_cache_type - called only from sd_revalidate_disk() + * called with buffer of length SD_BUF_SIZE + */ +static void +sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) +{ + int len = 0, res; + struct scsi_device *sdp = sdkp->device; + + int dbd; + int modepage; + int first_len; + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; + int old_wce = sdkp->WCE; + int old_rcd = sdkp->RCD; + int old_dpofua = sdkp->DPOFUA; + + + if (sdkp->cache_override) + return; + + first_len = 4; + if (sdp->skip_ms_page_8) { + if (sdp->type == TYPE_RBC) + goto defaults; + else { + if (sdp->skip_ms_page_3f) + goto defaults; + modepage = 0x3F; + if (sdp->use_192_bytes_for_3f) + first_len = 192; + dbd = 0; + } + } else if (sdp->type == TYPE_RBC) { + modepage = 6; + dbd = 8; + } else { + modepage = 8; + dbd = 0; + } + + /* cautiously ask */ + res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len, + &data, &sshdr); + + if (res < 0) + goto bad_sense; + + if (!data.header_length) { + modepage = 6; + first_len = 0; + sd_first_printk(KERN_ERR, sdkp, + "Missing header in MODE_SENSE response\n"); + } + + /* that went OK, now ask for the proper length */ + len = data.length; + + /* + * We're only interested in the first three bytes, actually. + * But the data cache page is defined for the first 20. + */ + if (len < 3) + goto bad_sense; + else if (len > SD_BUF_SIZE) { + sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " + "data from %d to %d bytes\n", len, SD_BUF_SIZE); + len = SD_BUF_SIZE; + } + if (modepage == 0x3F && sdp->use_192_bytes_for_3f) + len = 192; + + /* Get the data */ + if (len > first_len) + res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len, + &data, &sshdr); + + if (!res) { + int offset = data.header_length + data.block_descriptor_length; + + while (offset < len) { + u8 page_code = buffer[offset] & 0x3F; + u8 spf = buffer[offset] & 0x40; + + if (page_code == 8 || page_code == 6) { + /* We're interested only in the first 3 bytes. + */ + if (len - offset <= 2) { + sd_first_printk(KERN_ERR, sdkp, + "Incomplete mode parameter " + "data\n"); + goto defaults; + } else { + modepage = page_code; + goto Page_found; + } + } else { + /* Go to the next page */ + if (spf && len - offset > 3) + offset += 4 + (buffer[offset+2] << 8) + + buffer[offset+3]; + else if (!spf && len - offset > 1) + offset += 2 + buffer[offset+1]; + else { + sd_first_printk(KERN_ERR, sdkp, + "Incomplete mode " + "parameter data\n"); + goto defaults; + } + } + } + + sd_first_printk(KERN_WARNING, sdkp, + "No Caching mode page found\n"); + goto defaults; + + Page_found: + if (modepage == 8) { + sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); + sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); + } else { + sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); + sdkp->RCD = 0; + } + + sdkp->DPOFUA = (data.device_specific & 0x10) != 0; + if (sdp->broken_fua) { + sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); + sdkp->DPOFUA = 0; + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && + !sdkp->device->use_16_for_rw) { + sd_first_printk(KERN_NOTICE, sdkp, + "Uses READ/WRITE(6), disabling FUA\n"); + sdkp->DPOFUA = 0; + } + + /* No cache flush allowed for write protected devices */ + if (sdkp->WCE && sdkp->write_prot) + sdkp->WCE = 0; + + if (sdkp->first_scan || old_wce != sdkp->WCE || + old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) + sd_printk(KERN_NOTICE, sdkp, + "Write cache: %s, read cache: %s, %s\n", + sdkp->WCE ? "enabled" : "disabled", + sdkp->RCD ? "disabled" : "enabled", + sdkp->DPOFUA ? "supports DPO and FUA" + : "doesn't support DPO or FUA"); + + return; + } + +bad_sense: + if (scsi_sense_valid(&sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + sshdr.asc == 0x24 && sshdr.ascq == 0x0) + /* Invalid field in CDB */ + sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); + else + sd_first_printk(KERN_ERR, sdkp, + "Asking for cache data failed\n"); + +defaults: + if (sdp->wce_default_on) { + sd_first_printk(KERN_NOTICE, sdkp, + "Assuming drive cache: write back\n"); + sdkp->WCE = 1; + } else { + sd_first_printk(KERN_WARNING, sdkp, + "Assuming drive cache: write through\n"); + sdkp->WCE = 0; + } + sdkp->RCD = 0; + sdkp->DPOFUA = 0; +} + +/* + * The ATO bit indicates whether the DIF application tag is available + * for use by the operating system. + */ +static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) +{ + int res, offset; + struct scsi_device *sdp = sdkp->device; + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; + + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) + return; + + if (sdkp->protection_type == 0) + return; + + res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT, + sdkp->max_retries, &data, &sshdr); + + if (res < 0 || !data.header_length || + data.length < 6) { + sd_first_printk(KERN_WARNING, sdkp, + "getting Control mode page failed, assume no ATO\n"); + + if (scsi_sense_valid(&sshdr)) + sd_print_sense_hdr(sdkp, &sshdr); + + return; + } + + offset = data.header_length + data.block_descriptor_length; + + if ((buffer[offset] & 0x3f) != 0x0a) { + sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); + return; + } + + if ((buffer[offset + 5] & 0x80) == 0) + return; + + sdkp->ATO = 1; + + return; +} + +/** + * sd_read_block_limits - Query disk device for preferred I/O sizes. + * @sdkp: disk to query + */ +static void sd_read_block_limits(struct scsi_disk *sdkp) +{ + struct scsi_vpd *vpd; + + rcu_read_lock(); + + vpd = rcu_dereference(sdkp->device->vpd_pgb0); + if (!vpd || vpd->len < 16) + goto out; + + sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]); + sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]); + sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]); + + if (vpd->len >= 64) { + unsigned int lba_count, desc_count; + + sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); + + if (!sdkp->lbpme) + goto out; + + lba_count = get_unaligned_be32(&vpd->data[20]); + desc_count = get_unaligned_be32(&vpd->data[24]); + + if (lba_count && desc_count) + sdkp->max_unmap_blocks = lba_count; + + sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]); + + if (vpd->data[32] & 0x80) + sdkp->unmap_alignment = + get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); + + if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ + + if (sdkp->max_unmap_blocks) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else + sd_config_discard(sdkp, SD_LBP_WS16); + + } else { /* LBP VPD page tells us what to use */ + if (sdkp->lbpu && sdkp->max_unmap_blocks) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else if (sdkp->lbpws) + sd_config_discard(sdkp, SD_LBP_WS16); + else if (sdkp->lbpws10) + sd_config_discard(sdkp, SD_LBP_WS10); + else + sd_config_discard(sdkp, SD_LBP_DISABLE); + } + } + + out: + rcu_read_unlock(); +} + +/** + * sd_read_block_characteristics - Query block dev. characteristics + * @sdkp: disk to query + */ +static void sd_read_block_characteristics(struct scsi_disk *sdkp) +{ + struct request_queue *q = sdkp->disk->queue; + struct scsi_vpd *vpd; + u16 rot; + u8 zoned; + + rcu_read_lock(); + vpd = rcu_dereference(sdkp->device->vpd_pgb1); + + if (!vpd || vpd->len < 8) { + rcu_read_unlock(); + return; + } + + rot = get_unaligned_be16(&vpd->data[4]); + zoned = (vpd->data[8] >> 4) & 3; + rcu_read_unlock(); + + if (rot == 1) { + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); + } + + if (sdkp->device->type == TYPE_ZBC) { + /* + * Host-managed: Per ZBC and ZAC specifications, writes in + * sequential write required zones of host-managed devices must + * be aligned to the device physical block size. + */ + disk_set_zoned(sdkp->disk, BLK_ZONED_HM); + blk_queue_zone_write_granularity(q, sdkp->physical_block_size); + } else { + sdkp->zoned = zoned; + if (sdkp->zoned == 1) { + /* Host-aware */ + disk_set_zoned(sdkp->disk, BLK_ZONED_HA); + } else { + /* Regular disk or drive managed disk */ + disk_set_zoned(sdkp->disk, BLK_ZONED_NONE); + } + } + + if (!sdkp->first_scan) + return; + + if (blk_queue_is_zoned(q)) { + sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", + q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); + } else { + if (sdkp->zoned == 1) + sd_printk(KERN_NOTICE, sdkp, + "Host-aware SMR disk used as regular disk\n"); + else if (sdkp->zoned == 2) + sd_printk(KERN_NOTICE, sdkp, + "Drive-managed SMR disk\n"); + } +} + +/** + * sd_read_block_provisioning - Query provisioning VPD page + * @sdkp: disk to query + */ +static void sd_read_block_provisioning(struct scsi_disk *sdkp) +{ + struct scsi_vpd *vpd; + + if (sdkp->lbpme == 0) + return; + + rcu_read_lock(); + vpd = rcu_dereference(sdkp->device->vpd_pgb2); + + if (!vpd || vpd->len < 8) { + rcu_read_unlock(); + return; + } + + sdkp->lbpvpd = 1; + sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */ + sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */ + sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */ + rcu_read_unlock(); +} + +static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) +{ + struct scsi_device *sdev = sdkp->device; + + if (sdev->host->no_write_same) { + sdev->no_write_same = 1; + + return; + } + + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) { + struct scsi_vpd *vpd; + + sdev->no_report_opcodes = 1; + + /* Disable WRITE SAME if REPORT SUPPORTED OPERATION + * CODES is unsupported and the device has an ATA + * Information VPD page (SAT). + */ + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (vpd) + sdev->no_write_same = 1; + rcu_read_unlock(); + } + + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1) + sdkp->ws16 = 1; + + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1) + sdkp->ws10 = 1; +} + +static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) +{ + struct scsi_device *sdev = sdkp->device; + + if (!sdev->security_supported) + return; + + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, + SECURITY_PROTOCOL_IN, 0) == 1 && + scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, + SECURITY_PROTOCOL_OUT, 0) == 1) + sdkp->security = 1; +} + +static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf) +{ + return logical_to_sectors(sdkp->device, get_unaligned_be64(buf)); +} + +/** + * sd_read_cpr - Query concurrent positioning ranges + * @sdkp: disk to query + */ +static void sd_read_cpr(struct scsi_disk *sdkp) +{ + struct blk_independent_access_ranges *iars = NULL; + unsigned char *buffer = NULL; + unsigned int nr_cpr = 0; + int i, vpd_len, buf_len = SD_BUF_SIZE; + u8 *desc; + + /* + * We need to have the capacity set first for the block layer to be + * able to check the ranges. + */ + if (sdkp->first_scan) + return; + + if (!sdkp->capacity) + goto out; + + /* + * Concurrent Positioning Ranges VPD: there can be at most 256 ranges, + * leading to a maximum page size of 64 + 256*32 bytes. + */ + buf_len = 64 + 256*32; + buffer = kmalloc(buf_len, GFP_KERNEL); + if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len)) + goto out; + + /* We must have at least a 64B header and one 32B range descriptor */ + vpd_len = get_unaligned_be16(&buffer[2]) + 4; + if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { + sd_printk(KERN_ERR, sdkp, + "Invalid Concurrent Positioning Ranges VPD page\n"); + goto out; + } + + nr_cpr = (vpd_len - 64) / 32; + if (nr_cpr == 1) { + nr_cpr = 0; + goto out; + } + + iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr); + if (!iars) { + nr_cpr = 0; + goto out; + } + + desc = &buffer[64]; + for (i = 0; i < nr_cpr; i++, desc += 32) { + if (desc[0] != i) { + sd_printk(KERN_ERR, sdkp, + "Invalid Concurrent Positioning Range number\n"); + nr_cpr = 0; + break; + } + + iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8); + iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16); + } + +out: + disk_set_independent_access_ranges(sdkp->disk, iars); + if (nr_cpr && sdkp->nr_actuators != nr_cpr) { + sd_printk(KERN_NOTICE, sdkp, + "%u concurrent positioning ranges\n", nr_cpr); + sdkp->nr_actuators = nr_cpr; + } + + kfree(buffer); +} + +static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp) +{ + struct scsi_device *sdp = sdkp->device; + unsigned int min_xfer_bytes = + logical_to_bytes(sdp, sdkp->min_xfer_blocks); + + if (sdkp->min_xfer_blocks == 0) + return false; + + if (min_xfer_bytes & (sdkp->physical_block_size - 1)) { + sd_first_printk(KERN_WARNING, sdkp, + "Preferred minimum I/O size %u bytes not a " \ + "multiple of physical block size (%u bytes)\n", + min_xfer_bytes, sdkp->physical_block_size); + sdkp->min_xfer_blocks = 0; + return false; + } + + sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n", + min_xfer_bytes); + return true; +} + +/* + * Determine the device's preferred I/O size for reads and writes + * unless the reported value is unreasonably small, large, not a + * multiple of the physical block size, or simply garbage. + */ +static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, + unsigned int dev_max) +{ + struct scsi_device *sdp = sdkp->device; + unsigned int opt_xfer_bytes = + logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + unsigned int min_xfer_bytes = + logical_to_bytes(sdp, sdkp->min_xfer_blocks); + + if (sdkp->opt_xfer_blocks == 0) + return false; + + if (sdkp->opt_xfer_blocks > dev_max) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u logical blocks " \ + "> dev_max (%u logical blocks)\n", + sdkp->opt_xfer_blocks, dev_max); + return false; + } + + if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u logical blocks " \ + "> sd driver limit (%u logical blocks)\n", + sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); + return false; + } + + if (opt_xfer_bytes < PAGE_SIZE) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u bytes < " \ + "PAGE_SIZE (%u bytes)\n", + opt_xfer_bytes, (unsigned int)PAGE_SIZE); + return false; + } + + if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u bytes not a " \ + "multiple of preferred minimum block " \ + "size (%u bytes)\n", + opt_xfer_bytes, min_xfer_bytes); + return false; + } + + if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u bytes not a " \ + "multiple of physical block size (%u bytes)\n", + opt_xfer_bytes, sdkp->physical_block_size); + return false; + } + + sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", + opt_xfer_bytes); + return true; +} + +/** + * sd_revalidate_disk - called the first time a new disk is seen, + * performs disk spin up, read_capacity, etc. + * @disk: struct gendisk we care about + **/ +static int sd_revalidate_disk(struct gendisk *disk) +{ + struct scsi_disk *sdkp = scsi_disk(disk); + struct scsi_device *sdp = sdkp->device; + struct request_queue *q = sdkp->disk->queue; + sector_t old_capacity = sdkp->capacity; + unsigned char *buffer; + unsigned int dev_max, rw_max; + + SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, + "sd_revalidate_disk\n")); + + /* + * If the device is offline, don't try and read capacity or any + * of the other niceties. + */ + if (!scsi_device_online(sdp)) + goto out; + + buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); + if (!buffer) { + sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " + "allocation failure.\n"); + goto out; + } + + sd_spinup_disk(sdkp); + + /* + * Without media there is no reason to ask; moreover, some devices + * react badly if we do. + */ + if (sdkp->media_present) { + sd_read_capacity(sdkp, buffer); + + /* + * set the default to rotational. All non-rotational devices + * support the block characteristics VPD page, which will + * cause this to be updated correctly and any device which + * doesn't support it should be treated as rotational. + */ + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + + if (scsi_device_supports_vpd(sdp)) { + sd_read_block_provisioning(sdkp); + sd_read_block_limits(sdkp); + sd_read_block_characteristics(sdkp); + sd_zbc_read_zones(sdkp, buffer); + sd_read_cpr(sdkp); + } + + sd_print_capacity(sdkp, old_capacity); + + sd_read_write_protect_flag(sdkp, buffer); + sd_read_cache_type(sdkp, buffer); + sd_read_app_tag_own(sdkp, buffer); + sd_read_write_same(sdkp, buffer); + sd_read_security(sdkp, buffer); + sd_config_protection(sdkp); + } + + /* + * We now have all cache related info, determine how we deal + * with flush requests. + */ + sd_set_flush_flag(sdkp); + + /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ + dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; + + /* Some devices report a maximum block count for READ/WRITE requests. */ + dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); + q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); + + if (sd_validate_min_xfer_size(sdkp)) + blk_queue_io_min(sdkp->disk->queue, + logical_to_bytes(sdp, sdkp->min_xfer_blocks)); + else + blk_queue_io_min(sdkp->disk->queue, 0); + + if (sd_validate_opt_xfer_size(sdkp, dev_max)) { + q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); + } else { + q->limits.io_opt = 0; + rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), + (sector_t)BLK_DEF_MAX_SECTORS); + } + + /* + * Limit default to SCSI host optimal sector limit if set. There may be + * an impact on performance for when the size of a request exceeds this + * host limit. + */ + rw_max = min_not_zero(rw_max, sdp->host->opt_sectors); + + /* Do not exceed controller limit */ + rw_max = min(rw_max, queue_max_hw_sectors(q)); + + /* + * Only update max_sectors if previously unset or if the current value + * exceeds the capabilities of the hardware. + */ + if (sdkp->first_scan || + q->limits.max_sectors > q->limits.max_dev_sectors || + q->limits.max_sectors > q->limits.max_hw_sectors) + q->limits.max_sectors = rw_max; + + sdkp->first_scan = 0; + + set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); + sd_config_write_same(sdkp); + kfree(buffer); + + /* + * For a zoned drive, revalidating the zones can be done only once + * the gendisk capacity is set. So if this fails, set back the gendisk + * capacity to 0. + */ + if (sd_zbc_revalidate_zones(sdkp)) + set_capacity_and_notify(disk, 0); + + out: + return 0; +} + +/** + * sd_unlock_native_capacity - unlock native capacity + * @disk: struct gendisk to set capacity for + * + * Block layer calls this function if it detects that partitions + * on @disk reach beyond the end of the device. If the SCSI host + * implements ->unlock_native_capacity() method, it's invoked to + * give it a chance to adjust the device capacity. + * + * CONTEXT: + * Defined by block layer. Might sleep. + */ +static void sd_unlock_native_capacity(struct gendisk *disk) +{ + struct scsi_device *sdev = scsi_disk(disk)->device; + + if (sdev->host->hostt->unlock_native_capacity) + sdev->host->hostt->unlock_native_capacity(sdev); +} + +/** + * sd_format_disk_name - format disk name + * @prefix: name prefix - ie. "sd" for SCSI disks + * @index: index of the disk to format name for + * @buf: output buffer + * @buflen: length of the output buffer + * + * SCSI disk names starts at sda. The 26th device is sdz and the + * 27th is sdaa. The last one for two lettered suffix is sdzz + * which is followed by sdaaa. + * + * This is basically 26 base counting with one extra 'nil' entry + * at the beginning from the second digit on and can be + * determined using similar method as 26 base conversion with the + * index shifted -1 after each digit is computed. + * + * CONTEXT: + * Don't care. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) +{ + const int base = 'z' - 'a' + 1; + char *begin = buf + strlen(prefix); + char *end = buf + buflen; + char *p; + int unit; + + p = end - 1; + *p = '\0'; + unit = base; + do { + if (p == begin) + return -EINVAL; + *--p = 'a' + (index % unit); + index = (index / unit) - 1; + } while (index >= 0); + + memmove(begin, p, end - p); + memcpy(buf, prefix, strlen(prefix)); + + return 0; +} + +/** + * sd_probe - called during driver initialization and whenever a + * new scsi device is attached to the system. It is called once + * for each scsi device (not just disks) present. + * @dev: pointer to device object + * + * Returns 0 if successful (or not interested in this scsi device + * (e.g. scanner)); 1 when there is an error. + * + * Note: this function is invoked from the scsi mid-level. + * This function sets up the mapping between a given + * (found in sdp) and new device name + * (e.g. /dev/sda). More precisely it is the block device major + * and minor number that is chosen here. + * + * Assume sd_probe is not re-entrant (for time being) + * Also think about sd_probe() and sd_remove() running coincidentally. + **/ +static int sd_probe(struct device *dev) +{ + struct scsi_device *sdp = to_scsi_device(dev); + struct scsi_disk *sdkp; + struct gendisk *gd; + int index; + int error; + + scsi_autopm_get_device(sdp); + error = -ENODEV; + if (sdp->type != TYPE_DISK && + sdp->type != TYPE_ZBC && + sdp->type != TYPE_MOD && + sdp->type != TYPE_RBC) + goto out; + + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) { + sdev_printk(KERN_WARNING, sdp, + "Unsupported ZBC host-managed device.\n"); + goto out; + } + + SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, + "sd_probe\n")); + + error = -ENOMEM; + sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); + if (!sdkp) + goto out; + + gd = blk_mq_alloc_disk_for_queue(sdp->request_queue, + &sd_bio_compl_lkclass); + if (!gd) + goto out_free; + + index = ida_alloc(&sd_index_ida, GFP_KERNEL); + if (index < 0) { + sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); + goto out_put; + } + + error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); + if (error) { + sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); + goto out_free_index; + } + + sdkp->device = sdp; + sdkp->disk = gd; + sdkp->index = index; + sdkp->max_retries = SD_MAX_RETRIES; + atomic_set(&sdkp->openers, 0); + atomic_set(&sdkp->device->ioerr_cnt, 0); + + if (!sdp->request_queue->rq_timeout) { + if (sdp->type != TYPE_MOD) + blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); + else + blk_queue_rq_timeout(sdp->request_queue, + SD_MOD_TIMEOUT); + } + + device_initialize(&sdkp->disk_dev); + sdkp->disk_dev.parent = get_device(dev); + sdkp->disk_dev.class = &sd_disk_class; + dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev)); + + error = device_add(&sdkp->disk_dev); + if (error) { + put_device(&sdkp->disk_dev); + goto out; + } + + dev_set_drvdata(dev, sdkp); + + gd->major = sd_major((index & 0xf0) >> 4); + gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); + gd->minors = SD_MINORS; + + gd->fops = &sd_fops; + gd->private_data = sdkp; + + /* defaults, until the device tells us otherwise */ + sdp->sector_size = 512; + sdkp->capacity = 0; + sdkp->media_present = 1; + sdkp->write_prot = 0; + sdkp->cache_override = 0; + sdkp->WCE = 0; + sdkp->RCD = 0; + sdkp->ATO = 0; + sdkp->first_scan = 1; + sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; + + sd_revalidate_disk(gd); + + if (sdp->removable) { + gd->flags |= GENHD_FL_REMOVABLE; + gd->events |= DISK_EVENT_MEDIA_CHANGE; + gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; + } + + blk_pm_runtime_init(sdp->request_queue, dev); + if (sdp->rpm_autosuspend) { + pm_runtime_set_autosuspend_delay(dev, + sdp->host->hostt->rpm_autosuspend_delay); + } + + error = device_add_disk(dev, gd, NULL); + if (error) { + put_device(&sdkp->disk_dev); + put_disk(gd); + goto out; + } + + if (sdkp->security) { + sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit); + if (sdkp->opal_dev) + sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); + } + + sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", + sdp->removable ? "removable " : ""); + scsi_autopm_put_device(sdp); + + return 0; + + out_free_index: + ida_free(&sd_index_ida, index); + out_put: + put_disk(gd); + out_free: + kfree(sdkp); + out: + scsi_autopm_put_device(sdp); + return error; +} + +/** + * sd_remove - called whenever a scsi disk (previously recognized by + * sd_probe) is detached from the system. It is called (potentially + * multiple times) during sd module unload. + * @dev: pointer to device object + * + * Note: this function is invoked from the scsi mid-level. + * This function potentially frees up a device name (e.g. /dev/sdc) + * that could be re-used by a subsequent sd_probe(). + * This function is not called when the built-in sd driver is "exit-ed". + **/ +static int sd_remove(struct device *dev) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + + scsi_autopm_get_device(sdkp->device); + + device_del(&sdkp->disk_dev); + del_gendisk(sdkp->disk); + if (!sdkp->suspended) + sd_shutdown(dev); + + put_disk(sdkp->disk); + return 0; +} + +static void scsi_disk_release(struct device *dev) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + ida_free(&sd_index_ida, sdkp->index); + sd_zbc_free_zone_info(sdkp); + put_device(&sdkp->device->sdev_gendev); + free_opal_dev(sdkp->opal_dev); + + kfree(sdkp); +} + +static int sd_start_stop_device(struct scsi_disk *sdkp, int start) +{ + unsigned char cmd[6] = { START_STOP }; /* START_VALID */ + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + .req_flags = BLK_MQ_REQ_PM, + }; + struct scsi_device *sdp = sdkp->device; + int res; + + if (start) + cmd[4] |= 1; /* START */ + + if (sdp->start_stop_pwr_cond) + cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ + + if (!scsi_device_online(sdp)) + return -ENODEV; + + res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT, + sdkp->max_retries, &exec_args); + if (res) { + sd_print_result(sdkp, "Start/Stop Unit failed", res); + if (res > 0 && scsi_sense_valid(&sshdr)) { + sd_print_sense_hdr(sdkp, &sshdr); + /* 0x3a is medium not present */ + if (sshdr.asc == 0x3a) + res = 0; + } + } + + /* SCSI error codes must not go to the generic layer */ + if (res) + return -EIO; + + return 0; +} + +/* + * Send a SYNCHRONIZE CACHE instruction down to the device through + * the normal SCSI command structure. Wait for the command to + * complete. + */ +static void sd_shutdown(struct device *dev) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + + if (!sdkp) + return; /* this can happen */ + + if (pm_runtime_suspended(dev)) + return; + + if (sdkp->WCE && sdkp->media_present) { + sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); + sd_sync_cache(sdkp); + } + + if ((system_state != SYSTEM_RESTART && + sdkp->device->manage_system_start_stop) || + (system_state == SYSTEM_POWER_OFF && + sdkp->device->manage_shutdown)) { + sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); + sd_start_stop_device(sdkp, 0); + } +} + +static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime) +{ + return (sdev->manage_system_start_stop && !runtime) || + (sdev->manage_runtime_start_stop && runtime); +} + +static int sd_suspend_common(struct device *dev, bool runtime) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + int ret = 0; + + if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ + return 0; + + if (sdkp->WCE && sdkp->media_present) { + if (!sdkp->device->silence_suspend) + sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); + ret = sd_sync_cache(sdkp); + /* ignore OFFLINE device */ + if (ret == -ENODEV) + return 0; + + if (ret) + return ret; + } + + if (sd_do_start_stop(sdkp->device, runtime)) { + if (!sdkp->device->silence_suspend) + sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); + /* an error is not worth aborting a system sleep */ + ret = sd_start_stop_device(sdkp, 0); + if (!runtime) + ret = 0; + } + + if (!ret) + sdkp->suspended = true; + + return ret; +} + +static int sd_suspend_system(struct device *dev) +{ + if (pm_runtime_suspended(dev)) + return 0; + + return sd_suspend_common(dev, false); +} + +static int sd_suspend_runtime(struct device *dev) +{ + return sd_suspend_common(dev, true); +} + +static int sd_resume(struct device *dev, bool runtime) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + int ret = 0; + + if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ + return 0; + + if (!sd_do_start_stop(sdkp->device, runtime)) { + sdkp->suspended = false; + return 0; + } + + if (!sdkp->device->no_start_on_resume) { + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + ret = sd_start_stop_device(sdkp, 1); + } + + if (!ret) { + opal_unlock_from_suspend(sdkp->opal_dev); + sdkp->suspended = false; + } + + return ret; +} + +static int sd_resume_system(struct device *dev) +{ + if (pm_runtime_suspended(dev)) { + struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_device *sdp = sdkp ? sdkp->device : NULL; + + if (sdp && sdp->force_runtime_start_on_system_start) + pm_request_resume(dev); + + return 0; + } + + return sd_resume(dev, false); +} + +static int sd_resume_runtime(struct device *dev) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_device *sdp; + + if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ + return 0; + + sdp = sdkp->device; + + if (sdp->ignore_media_change) { + /* clear the device's sense data */ + static const u8 cmd[10] = { REQUEST_SENSE }; + const struct scsi_exec_args exec_args = { + .req_flags = BLK_MQ_REQ_PM, + }; + + if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, + sdp->request_queue->rq_timeout, 1, + &exec_args)) + sd_printk(KERN_NOTICE, sdkp, + "Failed to clear sense data\n"); + } + + return sd_resume(dev, true); +} + +static const struct dev_pm_ops sd_pm_ops = { + .suspend = sd_suspend_system, + .resume = sd_resume_system, + .poweroff = sd_suspend_system, + .restore = sd_resume_system, + .runtime_suspend = sd_suspend_runtime, + .runtime_resume = sd_resume_runtime, +}; + +static struct scsi_driver sd_template = { + .gendrv = { + .name = "sd", + .owner = THIS_MODULE, + .probe = sd_probe, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .remove = sd_remove, + .shutdown = sd_shutdown, + .pm = &sd_pm_ops, + }, + .rescan = sd_rescan, + .init_command = sd_init_command, + .uninit_command = sd_uninit_command, + .done = sd_done, + .eh_action = sd_eh_action, + .eh_reset = sd_eh_reset, +}; + +/** + * init_sd - entry point for this driver (both when built in or when + * a module). + * + * Note: this function registers this driver with the scsi mid-level. + **/ +static int __init init_sd(void) +{ + int majors = 0, i, err; + + SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); + + for (i = 0; i < SD_MAJORS; i++) { + if (__register_blkdev(sd_major(i), "sd", sd_default_probe)) + continue; + majors++; + } + + if (!majors) + return -ENODEV; + + err = class_register(&sd_disk_class); + if (err) + goto err_out; + + sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); + if (!sd_page_pool) { + printk(KERN_ERR "sd: can't init discard page pool\n"); + err = -ENOMEM; + goto err_out_class; + } + + err = scsi_register_driver(&sd_template.gendrv); + if (err) + goto err_out_driver; + + return 0; + +err_out_driver: + mempool_destroy(sd_page_pool); +err_out_class: + class_unregister(&sd_disk_class); +err_out: + for (i = 0; i < SD_MAJORS; i++) + unregister_blkdev(sd_major(i), "sd"); + return err; +} + +/** + * exit_sd - exit point for this driver (when it is a module). + * + * Note: this function unregisters this driver from the scsi mid-level. + **/ +static void __exit exit_sd(void) +{ + int i; + + SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); + + scsi_unregister_driver(&sd_template.gendrv); + mempool_destroy(sd_page_pool); + + class_unregister(&sd_disk_class); + + for (i = 0; i < SD_MAJORS; i++) + unregister_blkdev(sd_major(i), "sd"); +} + +module_init(init_sd); +module_exit(exit_sd); + +void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) +{ + scsi_print_sense_hdr(sdkp->device, + sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); +} + +void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) +{ + const char *hb_string = scsi_hostbyte_string(result); + + if (hb_string) + sd_printk(KERN_INFO, sdkp, + "%s: Result: hostbyte=%s driverbyte=%s\n", msg, + hb_string ? hb_string : "invalid", + "DRIVER_OK"); + else + sd_printk(KERN_INFO, sdkp, + "%s: Result: hostbyte=0x%02x driverbyte=%s\n", + msg, host_byte(result), "DRIVER_OK"); +} diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h new file mode 100644 index 000000000..409dda535 --- /dev/null +++ b/drivers/scsi/sd.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SCSI_DISK_H +#define _SCSI_DISK_H + +/* + * More than enough for everybody ;) The huge number of majors + * is a leftover from 16bit dev_t days, we don't really need that + * much numberspace. + */ +#define SD_MAJORS 16 + +/* + * Time out in seconds for disks and Magneto-opticals (which are slower). + */ +#define SD_TIMEOUT (30 * HZ) +#define SD_MOD_TIMEOUT (75 * HZ) +/* + * Flush timeout is a multiplier over the standard device timeout which is + * user modifiable via sysfs but initially set to SD_TIMEOUT + */ +#define SD_FLUSH_TIMEOUT_MULTIPLIER 2 +#define SD_WRITE_SAME_TIMEOUT (120 * HZ) + +/* + * Number of allowed retries + */ +#define SD_MAX_RETRIES 5 +#define SD_PASSTHROUGH_RETRIES 1 +#define SD_MAX_MEDIUM_TIMEOUTS 2 + +/* + * Size of the initial data buffer for mode and read capacity data + */ +#define SD_BUF_SIZE 512 + +/* + * Number of sectors at the end of the device to avoid multi-sector + * accesses to in the case of last_sector_bug + */ +#define SD_LAST_BUGGY_SECTORS 8 + +enum { + SD_EXT_CDB_SIZE = 32, /* Extended CDB size */ + SD_MEMPOOL_SIZE = 2, /* CDB pool size */ +}; + +enum { + SD_DEF_XFER_BLOCKS = 0xffff, + SD_MAX_XFER_BLOCKS = 0xffffffff, + SD_MAX_WS10_BLOCKS = 0xffff, + SD_MAX_WS16_BLOCKS = 0x7fffff, +}; + +enum { + SD_LBP_FULL = 0, /* Full logical block provisioning */ + SD_LBP_UNMAP, /* Use UNMAP command */ + SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ + SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */ + SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */ + SD_LBP_DISABLE, /* Discard disabled due to failed cmd */ +}; + +enum { + SD_ZERO_WRITE = 0, /* Use WRITE(10/16) command */ + SD_ZERO_WS, /* Use WRITE SAME(10/16) command */ + SD_ZERO_WS16_UNMAP, /* Use WRITE SAME(16) with UNMAP */ + SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */ +}; + +/** + * struct zoned_disk_info - Specific properties of a ZBC SCSI device. + * @nr_zones: number of zones. + * @zone_blocks: number of logical blocks per zone. + * + * This data structure holds the ZBC SCSI device properties that are retrieved + * twice: a first time before the gendisk capacity is known and a second time + * after the gendisk capacity is known. + */ +struct zoned_disk_info { + u32 nr_zones; + u32 zone_blocks; +}; + +struct scsi_disk { + struct scsi_device *device; + + /* + * disk_dev is used to show attributes in /sys/class/scsi_disk/, + * but otherwise not really needed. Do not use for refcounting. + */ + struct device disk_dev; + struct gendisk *disk; + struct opal_dev *opal_dev; +#ifdef CONFIG_BLK_DEV_ZONED + /* Updated during revalidation before the gendisk capacity is known. */ + struct zoned_disk_info early_zone_info; + /* Updated during revalidation after the gendisk capacity is known. */ + struct zoned_disk_info zone_info; + u32 zones_optimal_open; + u32 zones_optimal_nonseq; + u32 zones_max_open; + /* + * Either zero or a power of two. If not zero it means that the offset + * between zone starting LBAs is constant. + */ + u32 zone_starting_lba_gran; + u32 *zones_wp_offset; + spinlock_t zones_wp_offset_lock; + u32 *rev_wp_offset; + struct mutex rev_mutex; + struct work_struct zone_wp_offset_work; + char *zone_wp_update_buf; +#endif + atomic_t openers; + sector_t capacity; /* size in logical blocks */ + int max_retries; + u32 min_xfer_blocks; + u32 max_xfer_blocks; + u32 opt_xfer_blocks; + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; + u32 index; + unsigned int physical_block_size; + unsigned int max_medium_access_timeouts; + unsigned int medium_access_timed_out; + u8 media_present; + u8 write_prot; + u8 protection_type;/* Data Integrity Field */ + u8 provisioning_mode; + u8 zeroing_mode; + u8 nr_actuators; /* Number of actuators */ + bool suspended; /* Disk is suspended (stopped) */ + unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ + unsigned WCE : 1; /* state of disk WCE bit */ + unsigned RCD : 1; /* state of disk RCD bit, unused */ + unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ + unsigned first_scan : 1; + unsigned lbpme : 1; + unsigned lbprz : 1; + unsigned lbpu : 1; + unsigned lbpws : 1; + unsigned lbpws10 : 1; + unsigned lbpvpd : 1; + unsigned ws10 : 1; + unsigned ws16 : 1; + unsigned rc_basis: 2; + unsigned zoned: 2; + unsigned urswrz : 1; + unsigned security : 1; + unsigned ignore_medium_access_errors : 1; +}; +#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev) + +static inline struct scsi_disk *scsi_disk(struct gendisk *disk) +{ + return disk->private_data; +} + +#define sd_printk(prefix, sdsk, fmt, a...) \ + (sdsk)->disk ? \ + sdev_prefix_printk(prefix, (sdsk)->device, \ + (sdsk)->disk->disk_name, fmt, ##a) : \ + sdev_printk(prefix, (sdsk)->device, fmt, ##a) + +#define sd_first_printk(prefix, sdsk, fmt, a...) \ + do { \ + if ((sdsk)->first_scan) \ + sd_printk(prefix, sdsk, fmt, ##a); \ + } while (0) + +static inline int scsi_medium_access_command(struct scsi_cmnd *scmd) +{ + switch (scmd->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case SYNCHRONIZE_CACHE: + case VERIFY: + case VERIFY_12: + case VERIFY_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_SAME: + case WRITE_SAME_16: + case UNMAP: + return 1; + case VARIABLE_LENGTH_CMD: + switch (scmd->cmnd[9]) { + case READ_32: + case VERIFY_32: + case WRITE_32: + case WRITE_SAME_32: + return 1; + } + } + + return 0; +} + +static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks) +{ + return blocks << (ilog2(sdev->sector_size) - 9); +} + +static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) +{ + return blocks * sdev->sector_size; +} + +static inline sector_t bytes_to_logical(struct scsi_device *sdev, unsigned int bytes) +{ + return bytes >> ilog2(sdev->sector_size); +} + +static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector) +{ + return sector >> (ilog2(sdev->sector_size) - 9); +} + +#ifdef CONFIG_BLK_DEV_INTEGRITY + +extern void sd_dif_config_host(struct scsi_disk *); + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +static inline void sd_dif_config_host(struct scsi_disk *disk) +{ +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +static inline int sd_is_zoned(struct scsi_disk *sdkp) +{ + return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC; +} + +#ifdef CONFIG_BLK_DEV_ZONED + +void sd_zbc_free_zone_info(struct scsi_disk *sdkp); +int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]); +int sd_zbc_revalidate_zones(struct scsi_disk *sdkp); +blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, + unsigned char op, bool all); +unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, + struct scsi_sense_hdr *sshdr); +int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); + +blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, + unsigned int nr_blocks); + +#else /* CONFIG_BLK_DEV_ZONED */ + +static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {} + +static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) +{ + return 0; +} + +static inline int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) +{ + return 0; +} + +static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, + unsigned char op, + bool all) +{ + return BLK_STS_TARGET; +} + +static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, + unsigned int good_bytes, struct scsi_sense_hdr *sshdr) +{ + return good_bytes; +} + +static inline blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, + sector_t *lba, + unsigned int nr_blocks) +{ + return BLK_STS_TARGET; +} + +#define sd_zbc_report_zones NULL + +#endif /* CONFIG_BLK_DEV_ZONED */ + +void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr); +void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result); + +#endif /* _SCSI_DISK_H */ diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c new file mode 100644 index 000000000..1df847b5f --- /dev/null +++ b/drivers/scsi/sd_dif.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sd_dif.c - SCSI Data Integrity Field + * + * Copyright (C) 2007, 2008 Oracle Corporation + * Written by: Martin K. Petersen + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sd.h" + +/* + * Configure exchange of protection information between OS and HBA. + */ +void sd_dif_config_host(struct scsi_disk *sdkp) +{ + struct scsi_device *sdp = sdkp->device; + struct gendisk *disk = sdkp->disk; + u8 type = sdkp->protection_type; + struct blk_integrity bi; + int dif, dix; + + dif = scsi_host_dif_capable(sdp->host, type); + dix = scsi_host_dix_capable(sdp->host, type); + + if (!dix && scsi_host_dix_capable(sdp->host, 0)) { + dif = 0; dix = 1; + } + + if (!dix) { + blk_integrity_unregister(disk); + return; + } + + memset(&bi, 0, sizeof(bi)); + + /* Enable DMA of protection information */ + if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) { + if (type == T10_PI_TYPE3_PROTECTION) + bi.profile = &t10_pi_type3_ip; + else + bi.profile = &t10_pi_type1_ip; + + bi.flags |= BLK_INTEGRITY_IP_CHECKSUM; + } else + if (type == T10_PI_TYPE3_PROTECTION) + bi.profile = &t10_pi_type3_crc; + else + bi.profile = &t10_pi_type1_crc; + + bi.tuple_size = sizeof(struct t10_pi_tuple); + + if (dif && type) { + bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + + if (!sdkp->ATO) + goto out; + + if (type == T10_PI_TYPE3_PROTECTION) + bi.tag_size = sizeof(u16) + sizeof(u32); + else + bi.tag_size = sizeof(u16); + } + + sd_first_printk(KERN_NOTICE, sdkp, + "Enabling DIX %s, application tag size %u bytes\n", + bi.profile->name, bi.tag_size); +out: + blk_integrity_register(disk, &bi); +} + diff --git a/drivers/scsi/sd_trace.h b/drivers/scsi/sd_trace.h new file mode 100644 index 000000000..cba3c0b82 --- /dev/null +++ b/drivers/scsi/sd_trace.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Western Digital Corporation or its affiliates. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sd + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE sd_trace + +#if !defined(_SD_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#include +#include +#include + +TRACE_EVENT(scsi_prepare_zone_append, + + TP_PROTO(struct scsi_cmnd *cmnd, sector_t lba, + unsigned int wp_offset), + + TP_ARGS(cmnd, lba, wp_offset), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( sector_t, lba ) + __field( unsigned int, wp_offset ) + ), + + TP_fast_assign( + __entry->host_no = cmnd->device->host->host_no; + __entry->channel = cmnd->device->channel; + __entry->id = cmnd->device->id; + __entry->lun = cmnd->device->lun; + __entry->lba = lba; + __entry->wp_offset = wp_offset; + ), + + TP_printk("host_no=%u, channel=%u id=%u lun=%u lba=%llu wp_offset=%u", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->lba, __entry->wp_offset) +); + +TRACE_EVENT(scsi_zone_wp_update, + + TP_PROTO(struct scsi_cmnd *cmnd, sector_t rq_sector, + unsigned int wp_offset, unsigned int good_bytes), + + TP_ARGS(cmnd, rq_sector, wp_offset, good_bytes), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( sector_t, rq_sector ) + __field( unsigned int, wp_offset ) + __field( unsigned int, good_bytes ) + ), + + TP_fast_assign( + __entry->host_no = cmnd->device->host->host_no; + __entry->channel = cmnd->device->channel; + __entry->id = cmnd->device->id; + __entry->lun = cmnd->device->lun; + __entry->rq_sector = rq_sector; + __entry->wp_offset = wp_offset; + __entry->good_bytes = good_bytes; + ), + + TP_printk("host_no=%u, channel=%u id=%u lun=%u rq_sector=%llu" \ + " wp_offset=%u good_bytes=%u", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->rq_sector, __entry->wp_offset, + __entry->good_bytes) +); +#endif /* _SD_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/scsi +#include diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c new file mode 100644 index 000000000..a25215507 --- /dev/null +++ b/drivers/scsi/sd_zbc.c @@ -0,0 +1,975 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SCSI Zoned Block commands + * + * Copyright (C) 2014-2015 SUSE Linux GmbH + * Written by: Hannes Reinecke + * Modified by: Damien Le Moal + * Modified by: Shaun Tancheff + */ + +#include +#include +#include +#include + +#include + +#include +#include + +#include "sd.h" + +#define CREATE_TRACE_POINTS +#include "sd_trace.h" + +/** + * sd_zbc_get_zone_wp_offset - Get zone write pointer offset. + * @zone: Zone for which to return the write pointer offset. + * + * Return: offset of the write pointer from the start of the zone. + */ +static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone) +{ + if (zone->type == ZBC_ZONE_TYPE_CONV) + return 0; + + switch (zone->cond) { + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: + return zone->wp - zone->start; + case BLK_ZONE_COND_FULL: + return zone->len; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_OFFLINE: + case BLK_ZONE_COND_READONLY: + default: + /* + * Offline and read-only zones do not have a valid + * write pointer. Use 0 as for an empty zone. + */ + return 0; + } +} + +/* Whether or not a SCSI zone descriptor describes a gap zone. */ +static bool sd_zbc_is_gap_zone(const u8 buf[64]) +{ + return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP; +} + +/** + * sd_zbc_parse_report - Parse a SCSI zone descriptor + * @sdkp: SCSI disk pointer. + * @buf: SCSI zone descriptor. + * @idx: Index of the zone relative to the first zone reported by the current + * sd_zbc_report_zones() call. + * @cb: Callback function pointer. + * @data: Second argument passed to @cb. + * + * Return: Value returned by @cb. + * + * Convert a SCSI zone descriptor into struct blk_zone format. Additionally, + * call @cb(blk_zone, @data). + */ +static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64], + unsigned int idx, report_zones_cb cb, void *data) +{ + struct scsi_device *sdp = sdkp->device; + struct blk_zone zone = { 0 }; + sector_t start_lba, gran; + int ret; + + if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf))) + return -EINVAL; + + zone.type = buf[0] & 0x0f; + zone.cond = (buf[1] >> 4) & 0xf; + if (buf[1] & 0x01) + zone.reset = 1; + if (buf[1] & 0x02) + zone.non_seq = 1; + + start_lba = get_unaligned_be64(&buf[16]); + zone.start = logical_to_sectors(sdp, start_lba); + zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); + zone.len = zone.capacity; + if (sdkp->zone_starting_lba_gran) { + gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran); + if (zone.len > gran) { + sd_printk(KERN_ERR, sdkp, + "Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n", + start_lba, + sectors_to_logical(sdp, zone.capacity), + sectors_to_logical(sdp, zone.len), + sectors_to_logical(sdp, gran)); + return -EINVAL; + } + /* + * Use the starting LBA granularity instead of the zone length + * obtained from the REPORT ZONES command. + */ + zone.len = gran; + } + if (zone.cond == ZBC_ZONE_COND_FULL) + zone.wp = zone.start + zone.len; + else + zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); + + ret = cb(&zone, idx, data); + if (ret) + return ret; + + if (sdkp->rev_wp_offset) + sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone); + + return 0; +} + +/** + * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command. + * @sdkp: The target disk + * @buf: vmalloc-ed buffer to use for the reply + * @buflen: the buffer size + * @lba: Start LBA of the report + * @partial: Do partial report + * + * For internal use during device validation. + * Using partial=true can significantly speed up execution of a report zones + * command because the disk does not have to count all possible report matching + * zones and will only report the count of zones fitting in the command reply + * buffer. + */ +static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf, + unsigned int buflen, sector_t lba, + bool partial) +{ + struct scsi_device *sdp = sdkp->device; + const int timeout = sdp->request_queue->rq_timeout; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + unsigned char cmd[16]; + unsigned int rep_len; + int result; + + memset(cmd, 0, 16); + cmd[0] = ZBC_IN; + cmd[1] = ZI_REPORT_ZONES; + put_unaligned_be64(lba, &cmd[2]); + put_unaligned_be32(buflen, &cmd[10]); + if (partial) + cmd[14] = ZBC_REPORT_ZONE_PARTIAL; + + result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buf, buflen, + timeout, SD_MAX_RETRIES, &exec_args); + if (result) { + sd_printk(KERN_ERR, sdkp, + "REPORT ZONES start lba %llu failed\n", lba); + sd_print_result(sdkp, "REPORT ZONES", result); + if (result > 0 && scsi_sense_valid(&sshdr)) + sd_print_sense_hdr(sdkp, &sshdr); + return -EIO; + } + + rep_len = get_unaligned_be32(&buf[0]); + if (rep_len < 64) { + sd_printk(KERN_ERR, sdkp, + "REPORT ZONES report invalid length %u\n", + rep_len); + return -EIO; + } + + return 0; +} + +/** + * sd_zbc_alloc_report_buffer() - Allocate a buffer for report zones reply. + * @sdkp: The target disk + * @nr_zones: Maximum number of zones to report + * @buflen: Size of the buffer allocated + * + * Try to allocate a reply buffer for the number of requested zones. + * The size of the buffer allocated may be smaller than requested to + * satify the device constraint (max_hw_sectors, max_segments, etc). + * + * Return the address of the allocated buffer and update @buflen with + * the size of the allocated buffer. + */ +static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, + unsigned int nr_zones, size_t *buflen) +{ + struct request_queue *q = sdkp->disk->queue; + size_t bufsize; + void *buf; + + /* + * Report zone buffer size should be at most 64B times the number of + * zones requested plus the 64B reply header, but should be aligned + * to SECTOR_SIZE for ATA devices. + * Make sure that this size does not exceed the hardware capabilities. + * Furthermore, since the report zone command cannot be split, make + * sure that the allocated buffer can always be mapped by limiting the + * number of pages allocated to the HBA max segments limit. + */ + nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); + bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); + bufsize = min_t(size_t, bufsize, + queue_max_hw_sectors(q) << SECTOR_SHIFT); + bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); + + while (bufsize >= SECTOR_SIZE) { + buf = __vmalloc(bufsize, + GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); + if (buf) { + *buflen = bufsize; + return buf; + } + bufsize = rounddown(bufsize >> 1, SECTOR_SIZE); + } + + return NULL; +} + +/** + * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors. + * @sdkp: The target disk + */ +static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) +{ + return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks); +} + +/** + * sd_zbc_report_zones - SCSI .report_zones() callback. + * @disk: Disk to report zones for. + * @sector: Start sector. + * @nr_zones: Maximum number of zones to report. + * @cb: Callback function called to report zone information. + * @data: Second argument passed to @cb. + * + * Called by the block layer to iterate over zone information. See also the + * disk->fops->report_zones() calls in block/blk-zoned.c. + */ +int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct scsi_disk *sdkp = scsi_disk(disk); + sector_t lba = sectors_to_logical(sdkp->device, sector); + unsigned int nr, i; + unsigned char *buf; + u64 zone_length, start_lba; + size_t offset, buflen = 0; + int zone_idx = 0; + int ret; + + if (!sd_is_zoned(sdkp)) + /* Not a zoned device */ + return -EOPNOTSUPP; + + if (!sdkp->capacity) + /* Device gone or invalid */ + return -ENODEV; + + buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen); + if (!buf) + return -ENOMEM; + + while (zone_idx < nr_zones && lba < sdkp->capacity) { + ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true); + if (ret) + goto out; + + offset = 0; + nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64); + if (!nr) + break; + + for (i = 0; i < nr && zone_idx < nr_zones; i++) { + offset += 64; + start_lba = get_unaligned_be64(&buf[offset + 16]); + zone_length = get_unaligned_be64(&buf[offset + 8]); + if ((zone_idx == 0 && + (lba < start_lba || + lba >= start_lba + zone_length)) || + (zone_idx > 0 && start_lba != lba) || + start_lba + zone_length < start_lba) { + sd_printk(KERN_ERR, sdkp, + "Zone %d at LBA %llu is invalid: %llu + %llu\n", + zone_idx, lba, start_lba, zone_length); + ret = -EINVAL; + goto out; + } + lba = start_lba + zone_length; + if (sd_zbc_is_gap_zone(&buf[offset])) { + if (sdkp->zone_starting_lba_gran) + continue; + sd_printk(KERN_ERR, sdkp, + "Gap zone without constant LBA offsets\n"); + ret = -EINVAL; + goto out; + } + + ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx, + cb, data); + if (ret) + goto out; + + zone_idx++; + } + } + + ret = zone_idx; +out: + kvfree(buf); + return ret; +} + +static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + sector_t sector = blk_rq_pos(rq); + + if (!sd_is_zoned(sdkp)) + /* Not a zoned device */ + return BLK_STS_IOERR; + + if (sdkp->device->changed) + return BLK_STS_IOERR; + + if (sector & (sd_zbc_zone_sectors(sdkp) - 1)) + /* Unaligned request */ + return BLK_STS_IOERR; + + return BLK_STS_OK; +} + +#define SD_ZBC_INVALID_WP_OFST (~0u) +#define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1) + +static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx, + void *data) +{ + struct scsi_disk *sdkp = data; + + lockdep_assert_held(&sdkp->zones_wp_offset_lock); + + sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone); + + return 0; +} + +/* + * An attempt to append a zone triggered an invalid write pointer error. + * Reread the write pointer of the zone(s) in which the append failed. + */ +static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) +{ + struct scsi_disk *sdkp; + unsigned long flags; + sector_t zno; + int ret; + + sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); + + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); + for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) { + if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) + continue; + + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); + ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, + SD_BUF_SIZE, + zno * sdkp->zone_info.zone_blocks, true); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); + if (!ret) + sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, + zno, sd_zbc_update_wp_offset_cb, + sdkp); + } + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); + + scsi_device_put(sdkp->device); +} + +/** + * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command. + * @cmd: the command to setup + * @lba: the LBA to patch + * @nr_blocks: the number of LBAs to be written + * + * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND. + * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and + * patching of the lba for an emulated ZONE_APPEND command. + * + * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will + * schedule a REPORT ZONES command and return BLK_STS_IOERR. + */ +blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, + unsigned int nr_blocks) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + unsigned int wp_offset, zno = blk_rq_zone_no(rq); + unsigned long flags; + blk_status_t ret; + + ret = sd_zbc_cmnd_checks(cmd); + if (ret != BLK_STS_OK) + return ret; + + if (!blk_rq_zone_is_seq(rq)) + return BLK_STS_IOERR; + + /* Unlock of the write lock will happen in sd_zbc_complete() */ + if (!blk_req_zone_write_trylock(rq)) + return BLK_STS_ZONE_RESOURCE; + + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); + wp_offset = sdkp->zones_wp_offset[zno]; + switch (wp_offset) { + case SD_ZBC_INVALID_WP_OFST: + /* + * We are about to schedule work to update a zone write pointer + * offset, which will cause the zone append command to be + * requeued. So make sure that the scsi device does not go away + * while the work is being processed. + */ + if (scsi_device_get(sdkp->device)) { + ret = BLK_STS_IOERR; + break; + } + sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST; + schedule_work(&sdkp->zone_wp_offset_work); + fallthrough; + case SD_ZBC_UPDATING_WP_OFST: + ret = BLK_STS_DEV_RESOURCE; + break; + default: + wp_offset = sectors_to_logical(sdkp->device, wp_offset); + if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) { + ret = BLK_STS_IOERR; + break; + } + + trace_scsi_prepare_zone_append(cmd, *lba, wp_offset); + *lba += wp_offset; + } + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); + if (ret) + blk_req_zone_write_unlock(rq); + return ret; +} + +/** + * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations + * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH. + * @cmd: the command to setup + * @op: Operation to be performed + * @all: All zones control + * + * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL, + * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests. + */ +blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, + unsigned char op, bool all) +{ + struct request *rq = scsi_cmd_to_rq(cmd); + sector_t sector = blk_rq_pos(rq); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + sector_t block = sectors_to_logical(sdkp->device, sector); + blk_status_t ret; + + ret = sd_zbc_cmnd_checks(cmd); + if (ret != BLK_STS_OK) + return ret; + + cmd->cmd_len = 16; + memset(cmd->cmnd, 0, cmd->cmd_len); + cmd->cmnd[0] = ZBC_OUT; + cmd->cmnd[1] = op; + if (all) + cmd->cmnd[14] = 0x1; + else + put_unaligned_be64(block, &cmd->cmnd[2]); + + rq->timeout = SD_TIMEOUT; + cmd->sc_data_direction = DMA_NONE; + cmd->transfersize = 0; + cmd->allowed = 0; + + return BLK_STS_OK; +} + +static bool sd_zbc_need_zone_wp_update(struct request *rq) +{ + switch (req_op(rq)) { + case REQ_OP_ZONE_APPEND: + case REQ_OP_ZONE_FINISH: + case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_RESET_ALL: + return true; + case REQ_OP_WRITE: + case REQ_OP_WRITE_ZEROES: + return blk_rq_zone_is_seq(rq); + default: + return false; + } +} + +/** + * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion + * @cmd: Completed command + * @good_bytes: Command reply bytes + * + * Called from sd_zbc_complete() to handle the update of the cached zone write + * pointer value in case an update is needed. + */ +static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, + unsigned int good_bytes) +{ + int result = cmd->result; + struct request *rq = scsi_cmd_to_rq(cmd); + struct scsi_disk *sdkp = scsi_disk(rq->q->disk); + unsigned int zno = blk_rq_zone_no(rq); + enum req_op op = req_op(rq); + unsigned long flags; + + /* + * If we got an error for a command that needs updating the write + * pointer offset cache, we must mark the zone wp offset entry as + * invalid to force an update from disk the next time a zone append + * command is issued. + */ + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); + + if (result && op != REQ_OP_ZONE_RESET_ALL) { + if (op == REQ_OP_ZONE_APPEND) { + /* Force complete completion (no retry) */ + good_bytes = 0; + scsi_set_resid(cmd, blk_rq_bytes(rq)); + } + + /* + * Force an update of the zone write pointer offset on + * the next zone append access. + */ + if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) + sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST; + goto unlock_wp_offset; + } + + switch (op) { + case REQ_OP_ZONE_APPEND: + trace_scsi_zone_wp_update(cmd, rq->__sector, + sdkp->zones_wp_offset[zno], good_bytes); + rq->__sector += sdkp->zones_wp_offset[zno]; + fallthrough; + case REQ_OP_WRITE_ZEROES: + case REQ_OP_WRITE: + if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp)) + sdkp->zones_wp_offset[zno] += + good_bytes >> SECTOR_SHIFT; + break; + case REQ_OP_ZONE_RESET: + sdkp->zones_wp_offset[zno] = 0; + break; + case REQ_OP_ZONE_FINISH: + sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp); + break; + case REQ_OP_ZONE_RESET_ALL: + memset(sdkp->zones_wp_offset, 0, + sdkp->zone_info.nr_zones * sizeof(unsigned int)); + break; + default: + break; + } + +unlock_wp_offset: + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); + + return good_bytes; +} + +/** + * sd_zbc_complete - ZBC command post processing. + * @cmd: Completed command + * @good_bytes: Command reply bytes + * @sshdr: command sense header + * + * Called from sd_done() to handle zone commands errors and updates to the + * device queue zone write pointer offset cahce. + */ +unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, + struct scsi_sense_hdr *sshdr) +{ + int result = cmd->result; + struct request *rq = scsi_cmd_to_rq(cmd); + + if (op_is_zone_mgmt(req_op(rq)) && + result && + sshdr->sense_key == ILLEGAL_REQUEST && + sshdr->asc == 0x24) { + /* + * INVALID FIELD IN CDB error: a zone management command was + * attempted on a conventional zone. Nothing to worry about, + * so be quiet about the error. + */ + rq->rq_flags |= RQF_QUIET; + } else if (sd_zbc_need_zone_wp_update(rq)) + good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes); + + if (req_op(rq) == REQ_OP_ZONE_APPEND) + blk_req_zone_write_unlock(rq); + + return good_bytes; +} + +/** + * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics + * @sdkp: Target disk + * @buf: Buffer where to store the VPD page data + * + * Read VPD page B6, get information and check that reads are unconstrained. + */ +static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp, + unsigned char *buf) +{ + u64 zone_starting_lba_gran; + + if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) { + sd_printk(KERN_NOTICE, sdkp, + "Read zoned characteristics VPD page failed\n"); + return -ENODEV; + } + + if (sdkp->device->type != TYPE_ZBC) { + /* Host-aware */ + sdkp->urswrz = 1; + sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]); + sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]); + sdkp->zones_max_open = 0; + return 0; + } + + /* Host-managed */ + sdkp->urswrz = buf[4] & 1; + sdkp->zones_optimal_open = 0; + sdkp->zones_optimal_nonseq = 0; + sdkp->zones_max_open = get_unaligned_be32(&buf[16]); + /* Check zone alignment method */ + switch (buf[23] & 0xf) { + case 0: + case ZBC_CONSTANT_ZONE_LENGTH: + /* Use zone length */ + break; + case ZBC_CONSTANT_ZONE_START_OFFSET: + zone_starting_lba_gran = get_unaligned_be64(&buf[24]); + if (zone_starting_lba_gran == 0 || + !is_power_of_2(zone_starting_lba_gran) || + logical_to_sectors(sdkp->device, zone_starting_lba_gran) > + UINT_MAX) { + sd_printk(KERN_ERR, sdkp, + "Invalid zone starting LBA granularity %llu\n", + zone_starting_lba_gran); + return -ENODEV; + } + sdkp->zone_starting_lba_gran = zone_starting_lba_gran; + break; + default: + sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n"); + return -ENODEV; + } + + /* + * Check for unconstrained reads: host-managed devices with + * constrained reads (drives failing read after write pointer) + * are not supported. + */ + if (!sdkp->urswrz) { + if (sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "constrained reads devices are not supported\n"); + return -ENODEV; + } + + return 0; +} + +/** + * sd_zbc_check_capacity - Check the device capacity + * @sdkp: Target disk + * @buf: command buffer + * @zblocks: zone size in logical blocks + * + * Get the device zone size and check that the device capacity as reported + * by READ CAPACITY matches the max_lba value (plus one) of the report zones + * command reply for devices with RC_BASIS == 0. + * + * Returns 0 upon success or an error code upon failure. + */ +static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, + u32 *zblocks) +{ + u64 zone_blocks; + sector_t max_lba; + unsigned char *rec; + int ret; + + /* Do a report zone to get max_lba and the size of the first zone */ + ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false); + if (ret) + return ret; + + if (sdkp->rc_basis == 0) { + /* The max_lba field is the capacity of this device */ + max_lba = get_unaligned_be64(&buf[8]); + if (sdkp->capacity != max_lba + 1) { + if (sdkp->first_scan) + sd_printk(KERN_WARNING, sdkp, + "Changing capacity from %llu to max LBA+1 %llu\n", + (unsigned long long)sdkp->capacity, + (unsigned long long)max_lba + 1); + sdkp->capacity = max_lba + 1; + } + } + + if (sdkp->zone_starting_lba_gran == 0) { + /* Get the size of the first reported zone */ + rec = buf + 64; + zone_blocks = get_unaligned_be64(&rec[8]); + if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { + if (sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "Zone size too large\n"); + return -EFBIG; + } + } else { + zone_blocks = sdkp->zone_starting_lba_gran; + } + + if (!is_power_of_2(zone_blocks)) { + sd_printk(KERN_ERR, sdkp, + "Zone size %llu is not a power of two.\n", + zone_blocks); + return -EINVAL; + } + + *zblocks = zone_blocks; + + return 0; +} + +static void sd_zbc_print_zones(struct scsi_disk *sdkp) +{ + if (!sd_is_zoned(sdkp) || !sdkp->capacity) + return; + + if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1)) + sd_printk(KERN_NOTICE, sdkp, + "%u zones of %u logical blocks + 1 runt zone\n", + sdkp->zone_info.nr_zones - 1, + sdkp->zone_info.zone_blocks); + else + sd_printk(KERN_NOTICE, sdkp, + "%u zones of %u logical blocks\n", + sdkp->zone_info.nr_zones, + sdkp->zone_info.zone_blocks); +} + +static int sd_zbc_init_disk(struct scsi_disk *sdkp) +{ + sdkp->zones_wp_offset = NULL; + spin_lock_init(&sdkp->zones_wp_offset_lock); + sdkp->rev_wp_offset = NULL; + mutex_init(&sdkp->rev_mutex); + INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn); + sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL); + if (!sdkp->zone_wp_update_buf) + return -ENOMEM; + + return 0; +} + +void sd_zbc_free_zone_info(struct scsi_disk *sdkp) +{ + if (!sdkp->zone_wp_update_buf) + return; + + /* Serialize against revalidate zones */ + mutex_lock(&sdkp->rev_mutex); + + kvfree(sdkp->zones_wp_offset); + sdkp->zones_wp_offset = NULL; + kfree(sdkp->zone_wp_update_buf); + sdkp->zone_wp_update_buf = NULL; + + sdkp->early_zone_info = (struct zoned_disk_info){ }; + sdkp->zone_info = (struct zoned_disk_info){ }; + + mutex_unlock(&sdkp->rev_mutex); +} + +static void sd_zbc_revalidate_zones_cb(struct gendisk *disk) +{ + struct scsi_disk *sdkp = scsi_disk(disk); + + swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset); +} + +/* + * Call blk_revalidate_disk_zones() if any of the zoned disk properties have + * changed that make it necessary to call that function. Called by + * sd_revalidate_disk() after the gendisk capacity has been set. + */ +int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) +{ + struct gendisk *disk = sdkp->disk; + struct request_queue *q = disk->queue; + u32 zone_blocks = sdkp->early_zone_info.zone_blocks; + unsigned int nr_zones = sdkp->early_zone_info.nr_zones; + int ret = 0; + unsigned int flags; + + /* + * For all zoned disks, initialize zone append emulation data if not + * already done. This is necessary also for host-aware disks used as + * regular disks due to the presence of partitions as these partitions + * may be deleted and the disk zoned model changed back from + * BLK_ZONED_NONE to BLK_ZONED_HA. + */ + if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) { + ret = sd_zbc_init_disk(sdkp); + if (ret) + return ret; + } + + /* + * There is nothing to do for regular disks, including host-aware disks + * that have partitions. + */ + if (!blk_queue_is_zoned(q)) + return 0; + + /* + * Make sure revalidate zones are serialized to ensure exclusive + * updates of the scsi disk data. + */ + mutex_lock(&sdkp->rev_mutex); + + if (sdkp->zone_info.zone_blocks == zone_blocks && + sdkp->zone_info.nr_zones == nr_zones && + disk->nr_zones == nr_zones) + goto unlock; + + flags = memalloc_noio_save(); + sdkp->zone_info.zone_blocks = zone_blocks; + sdkp->zone_info.nr_zones = nr_zones; + sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL); + if (!sdkp->rev_wp_offset) { + ret = -ENOMEM; + memalloc_noio_restore(flags); + goto unlock; + } + + blk_queue_chunk_sectors(q, + logical_to_sectors(sdkp->device, zone_blocks)); + blk_queue_max_zone_append_sectors(q, + q->limits.max_segments << PAGE_SECTORS_SHIFT); + + ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb); + + memalloc_noio_restore(flags); + kvfree(sdkp->rev_wp_offset); + sdkp->rev_wp_offset = NULL; + + if (ret) { + sdkp->zone_info = (struct zoned_disk_info){ }; + sdkp->capacity = 0; + goto unlock; + } + + sd_zbc_print_zones(sdkp); + +unlock: + mutex_unlock(&sdkp->rev_mutex); + + return ret; +} + +/** + * sd_zbc_read_zones - Read zone information and update the request queue + * @sdkp: SCSI disk pointer. + * @buf: 512 byte buffer used for storing SCSI command output. + * + * Read zone information and update the request queue zone characteristics and + * also the zoned device information in *sdkp. Called by sd_revalidate_disk() + * before the gendisk capacity has been set. + */ +int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) +{ + struct gendisk *disk = sdkp->disk; + struct request_queue *q = disk->queue; + unsigned int nr_zones; + u32 zone_blocks = 0; + int ret; + + if (!sd_is_zoned(sdkp)) { + /* + * Device managed or normal SCSI disk, no special handling + * required. Nevertheless, free the disk zone information in + * case the device type changed. + */ + sd_zbc_free_zone_info(sdkp); + return 0; + } + + /* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */ + sdkp->device->use_16_for_rw = 1; + sdkp->device->use_10_for_rw = 0; + sdkp->device->use_16_for_sync = 1; + + if (!blk_queue_is_zoned(q)) { + /* + * This can happen for a host aware disk with partitions. + * The block device zone model was already cleared by + * disk_set_zoned(). Only free the scsi disk zone + * information and exit early. + */ + sd_zbc_free_zone_info(sdkp); + return 0; + } + + /* Check zoned block device characteristics (unconstrained reads) */ + ret = sd_zbc_check_zoned_characteristics(sdkp, buf); + if (ret) + goto err; + + /* Check the device capacity reported by report zones */ + ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks); + if (ret != 0) + goto err; + + /* The drive satisfies the kernel restrictions: set it up */ + blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); + blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); + if (sdkp->zones_max_open == U32_MAX) + disk_set_max_open_zones(disk, 0); + else + disk_set_max_open_zones(disk, sdkp->zones_max_open); + disk_set_max_active_zones(disk, 0); + nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); + + sdkp->early_zone_info.nr_zones = nr_zones; + sdkp->early_zone_info.zone_blocks = zone_blocks; + + return 0; + +err: + sdkp->capacity = 0; + + return ret; +} diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h new file mode 100644 index 000000000..805d4c13d --- /dev/null +++ b/drivers/scsi/sense_codes.h @@ -0,0 +1,879 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The canonical list of T10 Additional Sense Codes is available at: + * http://www.t10.org/lists/asc-num.txt [most recent: 20200817] + */ + +SENSE_CODE(0x0000, "No additional sense information") +SENSE_CODE(0x0001, "Filemark detected") +SENSE_CODE(0x0002, "End-of-partition/medium detected") +SENSE_CODE(0x0003, "Setmark detected") +SENSE_CODE(0x0004, "Beginning-of-partition/medium detected") +SENSE_CODE(0x0005, "End-of-data detected") +SENSE_CODE(0x0006, "I/O process terminated") +SENSE_CODE(0x0007, "Programmable early warning detected") +SENSE_CODE(0x0011, "Audio play operation in progress") +SENSE_CODE(0x0012, "Audio play operation paused") +SENSE_CODE(0x0013, "Audio play operation successfully completed") +SENSE_CODE(0x0014, "Audio play operation stopped due to error") +SENSE_CODE(0x0015, "No current audio status to return") +SENSE_CODE(0x0016, "Operation in progress") +SENSE_CODE(0x0017, "Cleaning requested") +SENSE_CODE(0x0018, "Erase operation in progress") +SENSE_CODE(0x0019, "Locate operation in progress") +SENSE_CODE(0x001A, "Rewind operation in progress") +SENSE_CODE(0x001B, "Set capacity operation in progress") +SENSE_CODE(0x001C, "Verify operation in progress") +SENSE_CODE(0x001D, "ATA pass through information available") +SENSE_CODE(0x001E, "Conflicting SA creation request") +SENSE_CODE(0x001F, "Logical unit transitioning to another power condition") +SENSE_CODE(0x0020, "Extended copy information available") +SENSE_CODE(0x0021, "Atomic command aborted due to ACA") +SENSE_CODE(0x0022, "Deferred microcode is pending") + +SENSE_CODE(0x0100, "No index/sector signal") + +SENSE_CODE(0x0200, "No seek complete") + +SENSE_CODE(0x0300, "Peripheral device write fault") +SENSE_CODE(0x0301, "No write current") +SENSE_CODE(0x0302, "Excessive write errors") + +SENSE_CODE(0x0400, "Logical unit not ready, cause not reportable") +SENSE_CODE(0x0401, "Logical unit is in process of becoming ready") +SENSE_CODE(0x0402, "Logical unit not ready, initializing command required") +SENSE_CODE(0x0403, "Logical unit not ready, manual intervention required") +SENSE_CODE(0x0404, "Logical unit not ready, format in progress") +SENSE_CODE(0x0405, "Logical unit not ready, rebuild in progress") +SENSE_CODE(0x0406, "Logical unit not ready, recalculation in progress") +SENSE_CODE(0x0407, "Logical unit not ready, operation in progress") +SENSE_CODE(0x0408, "Logical unit not ready, long write in progress") +SENSE_CODE(0x0409, "Logical unit not ready, self-test in progress") +SENSE_CODE(0x040A, "Logical unit not accessible, asymmetric access state transition") +SENSE_CODE(0x040B, "Logical unit not accessible, target port in standby state") +SENSE_CODE(0x040C, "Logical unit not accessible, target port in unavailable state") +SENSE_CODE(0x040D, "Logical unit not ready, structure check required") +SENSE_CODE(0x040E, "Logical unit not ready, security session in progress") +SENSE_CODE(0x0410, "Logical unit not ready, auxiliary memory not accessible") +SENSE_CODE(0x0411, "Logical unit not ready, notify (enable spinup) required") +SENSE_CODE(0x0412, "Logical unit not ready, offline") +SENSE_CODE(0x0413, "Logical unit not ready, SA creation in progress") +SENSE_CODE(0x0414, "Logical unit not ready, space allocation in progress") +SENSE_CODE(0x0415, "Logical unit not ready, robotics disabled") +SENSE_CODE(0x0416, "Logical unit not ready, configuration required") +SENSE_CODE(0x0417, "Logical unit not ready, calibration required") +SENSE_CODE(0x0418, "Logical unit not ready, a door is open") +SENSE_CODE(0x0419, "Logical unit not ready, operating in sequential mode") +SENSE_CODE(0x041A, "Logical unit not ready, start stop unit command in progress") +SENSE_CODE(0x041B, "Logical unit not ready, sanitize in progress") +SENSE_CODE(0x041C, "Logical unit not ready, additional power use not yet granted") +SENSE_CODE(0x041D, "Logical unit not ready, configuration in progress") +SENSE_CODE(0x041E, "Logical unit not ready, microcode activation required") +SENSE_CODE(0x041F, "Logical unit not ready, microcode download required") +SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required") +SENSE_CODE(0x0421, "Logical unit not ready, hard reset required") +SENSE_CODE(0x0422, "Logical unit not ready, power cycle required") +SENSE_CODE(0x0423, "Logical unit not ready, affiliation required") +SENSE_CODE(0x0424, "Depopulation in progress") +SENSE_CODE(0x0425, "Depopulation restoration in progress") + +SENSE_CODE(0x0500, "Logical unit does not respond to selection") + +SENSE_CODE(0x0600, "No reference position found") + +SENSE_CODE(0x0700, "Multiple peripheral devices selected") + +SENSE_CODE(0x0800, "Logical unit communication failure") +SENSE_CODE(0x0801, "Logical unit communication time-out") +SENSE_CODE(0x0802, "Logical unit communication parity error") +SENSE_CODE(0x0803, "Logical unit communication CRC error (Ultra-DMA/32)") +SENSE_CODE(0x0804, "Unreachable copy target") + +SENSE_CODE(0x0900, "Track following error") +SENSE_CODE(0x0901, "Tracking servo failure") +SENSE_CODE(0x0902, "Focus servo failure") +SENSE_CODE(0x0903, "Spindle servo failure") +SENSE_CODE(0x0904, "Head select fault") +SENSE_CODE(0x0905, "Vibration induced tracking error") + +SENSE_CODE(0x0A00, "Error log overflow") + +SENSE_CODE(0x0B00, "Warning") +SENSE_CODE(0x0B01, "Warning - specified temperature exceeded") +SENSE_CODE(0x0B02, "Warning - enclosure degraded") +SENSE_CODE(0x0B03, "Warning - background self-test failed") +SENSE_CODE(0x0B04, "Warning - background pre-scan detected medium error") +SENSE_CODE(0x0B05, "Warning - background medium scan detected medium error") +SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile") +SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache") +SENSE_CODE(0x0B08, "Warning - power loss expected") +SENSE_CODE(0x0B09, "Warning - device statistics notification active") +SENSE_CODE(0x0B0A, "Warning - high critical temperature limit exceeded") +SENSE_CODE(0x0B0B, "Warning - low critical temperature limit exceeded") +SENSE_CODE(0x0B0C, "Warning - high operating temperature limit exceeded") +SENSE_CODE(0x0B0D, "Warning - low operating temperature limit exceeded") +SENSE_CODE(0x0B0E, "Warning - high critical humidity limit exceeded") +SENSE_CODE(0x0B0F, "Warning - low critical humidity limit exceeded") +SENSE_CODE(0x0B10, "Warning - high operating humidity limit exceeded") +SENSE_CODE(0x0B11, "Warning - low operating humidity limit exceeded") +SENSE_CODE(0x0B12, "Warning - microcode security at risk") +SENSE_CODE(0x0B13, "Warning - microcode digital signature validation failure") +SENSE_CODE(0x0B14, "Warning - physical element status change") + +SENSE_CODE(0x0C00, "Write error") +SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation") +SENSE_CODE(0x0C02, "Write error - auto reallocation failed") +SENSE_CODE(0x0C03, "Write error - recommend reassignment") +SENSE_CODE(0x0C04, "Compression check miscompare error") +SENSE_CODE(0x0C05, "Data expansion occurred during compression") +SENSE_CODE(0x0C06, "Block not compressible") +SENSE_CODE(0x0C07, "Write error - recovery needed") +SENSE_CODE(0x0C08, "Write error - recovery failed") +SENSE_CODE(0x0C09, "Write error - loss of streaming") +SENSE_CODE(0x0C0A, "Write error - padding blocks added") +SENSE_CODE(0x0C0B, "Auxiliary memory write error") +SENSE_CODE(0x0C0C, "Write error - unexpected unsolicited data") +SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data") +SENSE_CODE(0x0C0E, "Multiple write errors") +SENSE_CODE(0x0C0F, "Defects in error window") +SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations") +SENSE_CODE(0x0C11, "Write error - recovery scan needed") +SENSE_CODE(0x0C12, "Write error - insufficient zone resources") + +SENSE_CODE(0x0D00, "Error detected by third party temporary initiator") +SENSE_CODE(0x0D01, "Third party device failure") +SENSE_CODE(0x0D02, "Copy target device not reachable") +SENSE_CODE(0x0D03, "Incorrect copy target device type") +SENSE_CODE(0x0D04, "Copy target device data underrun") +SENSE_CODE(0x0D05, "Copy target device data overrun") + +SENSE_CODE(0x0E00, "Invalid information unit") +SENSE_CODE(0x0E01, "Information unit too short") +SENSE_CODE(0x0E02, "Information unit too long") +SENSE_CODE(0x0E03, "Invalid field in command information unit") + +SENSE_CODE(0x1000, "Id CRC or ECC error") +SENSE_CODE(0x1001, "Logical block guard check failed") +SENSE_CODE(0x1002, "Logical block application tag check failed") +SENSE_CODE(0x1003, "Logical block reference tag check failed") +SENSE_CODE(0x1004, "Logical block protection error on recover buffered data") +SENSE_CODE(0x1005, "Logical block protection method error") + +SENSE_CODE(0x1100, "Unrecovered read error") +SENSE_CODE(0x1101, "Read retries exhausted") +SENSE_CODE(0x1102, "Error too long to correct") +SENSE_CODE(0x1103, "Multiple read errors") +SENSE_CODE(0x1104, "Unrecovered read error - auto reallocate failed") +SENSE_CODE(0x1105, "L-EC uncorrectable error") +SENSE_CODE(0x1106, "CIRC unrecovered error") +SENSE_CODE(0x1107, "Data re-synchronization error") +SENSE_CODE(0x1108, "Incomplete block read") +SENSE_CODE(0x1109, "No gap found") +SENSE_CODE(0x110A, "Miscorrected error") +SENSE_CODE(0x110B, "Unrecovered read error - recommend reassignment") +SENSE_CODE(0x110C, "Unrecovered read error - recommend rewrite the data") +SENSE_CODE(0x110D, "De-compression CRC error") +SENSE_CODE(0x110E, "Cannot decompress using declared algorithm") +SENSE_CODE(0x110F, "Error reading UPC/EAN number") +SENSE_CODE(0x1110, "Error reading ISRC number") +SENSE_CODE(0x1111, "Read error - loss of streaming") +SENSE_CODE(0x1112, "Auxiliary memory read error") +SENSE_CODE(0x1113, "Read error - failed retransmission request") +SENSE_CODE(0x1114, "Read error - lba marked bad by application client") +SENSE_CODE(0x1115, "Write after sanitize required") + +SENSE_CODE(0x1200, "Address mark not found for id field") + +SENSE_CODE(0x1300, "Address mark not found for data field") + +SENSE_CODE(0x1400, "Recorded entity not found") +SENSE_CODE(0x1401, "Record not found") +SENSE_CODE(0x1402, "Filemark or setmark not found") +SENSE_CODE(0x1403, "End-of-data not found") +SENSE_CODE(0x1404, "Block sequence error") +SENSE_CODE(0x1405, "Record not found - recommend reassignment") +SENSE_CODE(0x1406, "Record not found - data auto-reallocated") +SENSE_CODE(0x1407, "Locate operation failure") + +SENSE_CODE(0x1500, "Random positioning error") +SENSE_CODE(0x1501, "Mechanical positioning error") +SENSE_CODE(0x1502, "Positioning error detected by read of medium") + +SENSE_CODE(0x1600, "Data synchronization mark error") +SENSE_CODE(0x1601, "Data sync error - data rewritten") +SENSE_CODE(0x1602, "Data sync error - recommend rewrite") +SENSE_CODE(0x1603, "Data sync error - data auto-reallocated") +SENSE_CODE(0x1604, "Data sync error - recommend reassignment") + +SENSE_CODE(0x1700, "Recovered data with no error correction applied") +SENSE_CODE(0x1701, "Recovered data with retries") +SENSE_CODE(0x1702, "Recovered data with positive head offset") +SENSE_CODE(0x1703, "Recovered data with negative head offset") +SENSE_CODE(0x1704, "Recovered data with retries and/or circ applied") +SENSE_CODE(0x1705, "Recovered data using previous sector id") +SENSE_CODE(0x1706, "Recovered data without ECC - data auto-reallocated") +SENSE_CODE(0x1707, "Recovered data without ECC - recommend reassignment") +SENSE_CODE(0x1708, "Recovered data without ECC - recommend rewrite") +SENSE_CODE(0x1709, "Recovered data without ECC - data rewritten") + +SENSE_CODE(0x1800, "Recovered data with error correction applied") +SENSE_CODE(0x1801, "Recovered data with error corr. & retries applied") +SENSE_CODE(0x1802, "Recovered data - data auto-reallocated") +SENSE_CODE(0x1803, "Recovered data with CIRC") +SENSE_CODE(0x1804, "Recovered data with L-EC") +SENSE_CODE(0x1805, "Recovered data - recommend reassignment") +SENSE_CODE(0x1806, "Recovered data - recommend rewrite") +SENSE_CODE(0x1807, "Recovered data with ECC - data rewritten") +SENSE_CODE(0x1808, "Recovered data with linking") + +SENSE_CODE(0x1900, "Defect list error") +SENSE_CODE(0x1901, "Defect list not available") +SENSE_CODE(0x1902, "Defect list error in primary list") +SENSE_CODE(0x1903, "Defect list error in grown list") + +SENSE_CODE(0x1A00, "Parameter list length error") + +SENSE_CODE(0x1B00, "Synchronous data transfer error") + +SENSE_CODE(0x1C00, "Defect list not found") +SENSE_CODE(0x1C01, "Primary defect list not found") +SENSE_CODE(0x1C02, "Grown defect list not found") + +SENSE_CODE(0x1D00, "Miscompare during verify operation") +SENSE_CODE(0x1D01, "Miscompare verify of unmapped LBA") + +SENSE_CODE(0x1E00, "Recovered id with ECC correction") + +SENSE_CODE(0x1F00, "Partial defect list transfer") + +SENSE_CODE(0x2000, "Invalid command operation code") +SENSE_CODE(0x2001, "Access denied - initiator pending-enrolled") +SENSE_CODE(0x2002, "Access denied - no access rights") +SENSE_CODE(0x2003, "Access denied - invalid mgmt id key") +SENSE_CODE(0x2004, "Illegal command while in write capable state") +SENSE_CODE(0x2005, "Obsolete") +SENSE_CODE(0x2006, "Illegal command while in explicit address mode") +SENSE_CODE(0x2007, "Illegal command while in implicit address mode") +SENSE_CODE(0x2008, "Access denied - enrollment conflict") +SENSE_CODE(0x2009, "Access denied - invalid LU identifier") +SENSE_CODE(0x200A, "Access denied - invalid proxy token") +SENSE_CODE(0x200B, "Access denied - ACL LUN conflict") +SENSE_CODE(0x200C, "Illegal command when not in append-only mode") +SENSE_CODE(0x200D, "Not an administrative logical unit") +SENSE_CODE(0x200E, "Not a subsidiary logical unit") +SENSE_CODE(0x200F, "Not a conglomerate logical unit") + +SENSE_CODE(0x2100, "Logical block address out of range") +SENSE_CODE(0x2101, "Invalid element address") +SENSE_CODE(0x2102, "Invalid address for write") +SENSE_CODE(0x2103, "Invalid write crossing layer jump") +SENSE_CODE(0x2104, "Unaligned write command") +SENSE_CODE(0x2105, "Write boundary violation") +SENSE_CODE(0x2106, "Attempt to read invalid data") +SENSE_CODE(0x2107, "Read boundary violation") +SENSE_CODE(0x2108, "Misaligned write command") +SENSE_CODE(0x2109, "Attempt to access gap zone") + +SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)") + +SENSE_CODE(0x2300, "Invalid token operation, cause not reportable") +SENSE_CODE(0x2301, "Invalid token operation, unsupported token type") +SENSE_CODE(0x2302, "Invalid token operation, remote token usage not supported") +SENSE_CODE(0x2303, "Invalid token operation, remote rod token creation not supported") +SENSE_CODE(0x2304, "Invalid token operation, token unknown") +SENSE_CODE(0x2305, "Invalid token operation, token corrupt") +SENSE_CODE(0x2306, "Invalid token operation, token revoked") +SENSE_CODE(0x2307, "Invalid token operation, token expired") +SENSE_CODE(0x2308, "Invalid token operation, token cancelled") +SENSE_CODE(0x2309, "Invalid token operation, token deleted") +SENSE_CODE(0x230A, "Invalid token operation, invalid token length") + +SENSE_CODE(0x2400, "Invalid field in cdb") +SENSE_CODE(0x2401, "CDB decryption error") +SENSE_CODE(0x2402, "Obsolete") +SENSE_CODE(0x2403, "Obsolete") +SENSE_CODE(0x2404, "Security audit value frozen") +SENSE_CODE(0x2405, "Security working key frozen") +SENSE_CODE(0x2406, "Nonce not unique") +SENSE_CODE(0x2407, "Nonce timestamp out of range") +SENSE_CODE(0x2408, "Invalid XCDB") +SENSE_CODE(0x2409, "Invalid fast format") + +SENSE_CODE(0x2500, "Logical unit not supported") + +SENSE_CODE(0x2600, "Invalid field in parameter list") +SENSE_CODE(0x2601, "Parameter not supported") +SENSE_CODE(0x2602, "Parameter value invalid") +SENSE_CODE(0x2603, "Threshold parameters not supported") +SENSE_CODE(0x2604, "Invalid release of persistent reservation") +SENSE_CODE(0x2605, "Data decryption error") +SENSE_CODE(0x2606, "Too many target descriptors") +SENSE_CODE(0x2607, "Unsupported target descriptor type code") +SENSE_CODE(0x2608, "Too many segment descriptors") +SENSE_CODE(0x2609, "Unsupported segment descriptor type code") +SENSE_CODE(0x260A, "Unexpected inexact segment") +SENSE_CODE(0x260B, "Inline data length exceeded") +SENSE_CODE(0x260C, "Invalid operation for copy source or destination") +SENSE_CODE(0x260D, "Copy segment granularity violation") +SENSE_CODE(0x260E, "Invalid parameter while port is enabled") +SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value") +SENSE_CODE(0x2610, "Data decryption key fail limit reached") +SENSE_CODE(0x2611, "Incomplete key-associated data set") +SENSE_CODE(0x2612, "Vendor specific key reference not found") +SENSE_CODE(0x2613, "Application tag mode page is invalid") +SENSE_CODE(0x2614, "Tape stream mirroring prevented") +SENSE_CODE(0x2615, "Copy source or copy destination not authorized") +SENSE_CODE(0x2616, "Fast copy not possible") + +SENSE_CODE(0x2700, "Write protected") +SENSE_CODE(0x2701, "Hardware write protected") +SENSE_CODE(0x2702, "Logical unit software write protected") +SENSE_CODE(0x2703, "Associated write protect") +SENSE_CODE(0x2704, "Persistent write protect") +SENSE_CODE(0x2705, "Permanent write protect") +SENSE_CODE(0x2706, "Conditional write protect") +SENSE_CODE(0x2707, "Space allocation failed write protect") +SENSE_CODE(0x2708, "Zone is read only") + +SENSE_CODE(0x2800, "Not ready to ready change, medium may have changed") +SENSE_CODE(0x2801, "Import or export element accessed") +SENSE_CODE(0x2802, "Format-layer may have changed") +SENSE_CODE(0x2803, "Import/export element accessed, medium changed") + +SENSE_CODE(0x2900, "Power on, reset, or bus device reset occurred") +SENSE_CODE(0x2901, "Power on occurred") +SENSE_CODE(0x2902, "Scsi bus reset occurred") +SENSE_CODE(0x2903, "Bus device reset function occurred") +SENSE_CODE(0x2904, "Device internal reset") +SENSE_CODE(0x2905, "Transceiver mode changed to single-ended") +SENSE_CODE(0x2906, "Transceiver mode changed to lvd") +SENSE_CODE(0x2907, "I_T nexus loss occurred") + +SENSE_CODE(0x2A00, "Parameters changed") +SENSE_CODE(0x2A01, "Mode parameters changed") +SENSE_CODE(0x2A02, "Log parameters changed") +SENSE_CODE(0x2A03, "Reservations preempted") +SENSE_CODE(0x2A04, "Reservations released") +SENSE_CODE(0x2A05, "Registrations preempted") +SENSE_CODE(0x2A06, "Asymmetric access state changed") +SENSE_CODE(0x2A07, "Implicit asymmetric access state transition failed") +SENSE_CODE(0x2A08, "Priority changed") +SENSE_CODE(0x2A09, "Capacity data has changed") +SENSE_CODE(0x2A0A, "Error history I_T nexus cleared") +SENSE_CODE(0x2A0B, "Error history snapshot released") +SENSE_CODE(0x2A0C, "Error recovery attributes have changed") +SENSE_CODE(0x2A0D, "Data encryption capabilities changed") +SENSE_CODE(0x2A10, "Timestamp changed") +SENSE_CODE(0x2A11, "Data encryption parameters changed by another i_t nexus") +SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event") +SENSE_CODE(0x2A13, "Data encryption key instance counter has changed") +SENSE_CODE(0x2A14, "SA creation capabilities data has changed") +SENSE_CODE(0x2A15, "Medium removal prevention preempted") +SENSE_CODE(0x2A16, "Zone reset write pointer recommended") + +SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect") + +SENSE_CODE(0x2C00, "Command sequence error") +SENSE_CODE(0x2C01, "Too many windows specified") +SENSE_CODE(0x2C02, "Invalid combination of windows specified") +SENSE_CODE(0x2C03, "Current program area is not empty") +SENSE_CODE(0x2C04, "Current program area is empty") +SENSE_CODE(0x2C05, "Illegal power condition request") +SENSE_CODE(0x2C06, "Persistent prevent conflict") +SENSE_CODE(0x2C07, "Previous busy status") +SENSE_CODE(0x2C08, "Previous task set full status") +SENSE_CODE(0x2C09, "Previous reservation conflict status") +SENSE_CODE(0x2C0A, "Partition or collection contains user objects") +SENSE_CODE(0x2C0B, "Not reserved") +SENSE_CODE(0x2C0C, "Orwrite generation does not match") +SENSE_CODE(0x2C0D, "Reset write pointer not allowed") +SENSE_CODE(0x2C0E, "Zone is offline") +SENSE_CODE(0x2C0F, "Stream not open") +SENSE_CODE(0x2C10, "Unwritten data in zone") +SENSE_CODE(0x2C11, "Descriptor format sense data required") +SENSE_CODE(0x2C12, "Zone is inactive") +SENSE_CODE(0x2C13, "Well known logical unit access required") + +SENSE_CODE(0x2D00, "Overwrite error on update in place") + +SENSE_CODE(0x2E00, "Insufficient time for operation") +SENSE_CODE(0x2E01, "Command timeout before processing") +SENSE_CODE(0x2E02, "Command timeout during processing") +SENSE_CODE(0x2E03, "Command timeout during processing due to error recovery") + +SENSE_CODE(0x2F00, "Commands cleared by another initiator") +SENSE_CODE(0x2F01, "Commands cleared by power loss notification") +SENSE_CODE(0x2F02, "Commands cleared by device server") +SENSE_CODE(0x2F03, "Some commands cleared by queuing layer event") + +SENSE_CODE(0x3000, "Incompatible medium installed") +SENSE_CODE(0x3001, "Cannot read medium - unknown format") +SENSE_CODE(0x3002, "Cannot read medium - incompatible format") +SENSE_CODE(0x3003, "Cleaning cartridge installed") +SENSE_CODE(0x3004, "Cannot write medium - unknown format") +SENSE_CODE(0x3005, "Cannot write medium - incompatible format") +SENSE_CODE(0x3006, "Cannot format medium - incompatible medium") +SENSE_CODE(0x3007, "Cleaning failure") +SENSE_CODE(0x3008, "Cannot write - application code mismatch") +SENSE_CODE(0x3009, "Current session not fixated for append") +SENSE_CODE(0x300A, "Cleaning request rejected") +SENSE_CODE(0x300C, "WORM medium - overwrite attempted") +SENSE_CODE(0x300D, "WORM medium - integrity check") +SENSE_CODE(0x3010, "Medium not formatted") +SENSE_CODE(0x3011, "Incompatible volume type") +SENSE_CODE(0x3012, "Incompatible volume qualifier") +SENSE_CODE(0x3013, "Cleaning volume expired") + +SENSE_CODE(0x3100, "Medium format corrupted") +SENSE_CODE(0x3101, "Format command failed") +SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking") +SENSE_CODE(0x3103, "Sanitize command failed") +SENSE_CODE(0x3104, "Depopulation failed") +SENSE_CODE(0x3105, "Depopulation restoration failed") + +SENSE_CODE(0x3200, "No defect spare location available") +SENSE_CODE(0x3201, "Defect list update failure") + +SENSE_CODE(0x3300, "Tape length error") + +SENSE_CODE(0x3400, "Enclosure failure") + +SENSE_CODE(0x3500, "Enclosure services failure") +SENSE_CODE(0x3501, "Unsupported enclosure function") +SENSE_CODE(0x3502, "Enclosure services unavailable") +SENSE_CODE(0x3503, "Enclosure services transfer failure") +SENSE_CODE(0x3504, "Enclosure services transfer refused") +SENSE_CODE(0x3505, "Enclosure services checksum error") + +SENSE_CODE(0x3600, "Ribbon, ink, or toner failure") + +SENSE_CODE(0x3700, "Rounded parameter") + +SENSE_CODE(0x3800, "Event status notification") +SENSE_CODE(0x3802, "Esn - power management class event") +SENSE_CODE(0x3804, "Esn - media class event") +SENSE_CODE(0x3806, "Esn - device busy class event") +SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached") +SENSE_CODE(0x3808, "Depopulation interrupted") + +SENSE_CODE(0x3900, "Saving parameters not supported") + +SENSE_CODE(0x3A00, "Medium not present") +SENSE_CODE(0x3A01, "Medium not present - tray closed") +SENSE_CODE(0x3A02, "Medium not present - tray open") +SENSE_CODE(0x3A03, "Medium not present - loadable") +SENSE_CODE(0x3A04, "Medium not present - medium auxiliary memory accessible") + +SENSE_CODE(0x3B00, "Sequential positioning error") +SENSE_CODE(0x3B01, "Tape position error at beginning-of-medium") +SENSE_CODE(0x3B02, "Tape position error at end-of-medium") +SENSE_CODE(0x3B03, "Tape or electronic vertical forms unit not ready") +SENSE_CODE(0x3B04, "Slew failure") +SENSE_CODE(0x3B05, "Paper jam") +SENSE_CODE(0x3B06, "Failed to sense top-of-form") +SENSE_CODE(0x3B07, "Failed to sense bottom-of-form") +SENSE_CODE(0x3B08, "Reposition error") +SENSE_CODE(0x3B09, "Read past end of medium") +SENSE_CODE(0x3B0A, "Read past beginning of medium") +SENSE_CODE(0x3B0B, "Position past end of medium") +SENSE_CODE(0x3B0C, "Position past beginning of medium") +SENSE_CODE(0x3B0D, "Medium destination element full") +SENSE_CODE(0x3B0E, "Medium source element empty") +SENSE_CODE(0x3B0F, "End of medium reached") +SENSE_CODE(0x3B11, "Medium magazine not accessible") +SENSE_CODE(0x3B12, "Medium magazine removed") +SENSE_CODE(0x3B13, "Medium magazine inserted") +SENSE_CODE(0x3B14, "Medium magazine locked") +SENSE_CODE(0x3B15, "Medium magazine unlocked") +SENSE_CODE(0x3B16, "Mechanical positioning or changer error") +SENSE_CODE(0x3B17, "Read past end of user object") +SENSE_CODE(0x3B18, "Element disabled") +SENSE_CODE(0x3B19, "Element enabled") +SENSE_CODE(0x3B1A, "Data transfer device removed") +SENSE_CODE(0x3B1B, "Data transfer device inserted") +SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation") +SENSE_CODE(0x3B20, "Element static information changed") + +SENSE_CODE(0x3D00, "Invalid bits in identify message") + +SENSE_CODE(0x3E00, "Logical unit has not self-configured yet") +SENSE_CODE(0x3E01, "Logical unit failure") +SENSE_CODE(0x3E02, "Timeout on logical unit") +SENSE_CODE(0x3E03, "Logical unit failed self-test") +SENSE_CODE(0x3E04, "Logical unit unable to update self-test log") + +SENSE_CODE(0x3F00, "Target operating conditions have changed") +SENSE_CODE(0x3F01, "Microcode has been changed") +SENSE_CODE(0x3F02, "Changed operating definition") +SENSE_CODE(0x3F03, "Inquiry data has changed") +SENSE_CODE(0x3F04, "Component device attached") +SENSE_CODE(0x3F05, "Device identifier changed") +SENSE_CODE(0x3F06, "Redundancy group created or modified") +SENSE_CODE(0x3F07, "Redundancy group deleted") +SENSE_CODE(0x3F08, "Spare created or modified") +SENSE_CODE(0x3F09, "Spare deleted") +SENSE_CODE(0x3F0A, "Volume set created or modified") +SENSE_CODE(0x3F0B, "Volume set deleted") +SENSE_CODE(0x3F0C, "Volume set deassigned") +SENSE_CODE(0x3F0D, "Volume set reassigned") +SENSE_CODE(0x3F0E, "Reported luns data has changed") +SENSE_CODE(0x3F0F, "Echo buffer overwritten") +SENSE_CODE(0x3F10, "Medium loadable") +SENSE_CODE(0x3F11, "Medium auxiliary memory accessible") +SENSE_CODE(0x3F12, "iSCSI IP address added") +SENSE_CODE(0x3F13, "iSCSI IP address removed") +SENSE_CODE(0x3F14, "iSCSI IP address changed") +SENSE_CODE(0x3F15, "Inspect referrals sense descriptors") +SENSE_CODE(0x3F16, "Microcode has been changed without reset") +SENSE_CODE(0x3F17, "Zone transition to full") +SENSE_CODE(0x3F18, "Bind completed") +SENSE_CODE(0x3F19, "Bind redirected") +SENSE_CODE(0x3F1A, "Subsidiary binding changed") + +/* + * SENSE_CODE(0x40NN, "Ram failure") + * SENSE_CODE(0x40NN, "Diagnostic failure on component nn") + * SENSE_CODE(0x41NN, "Data path failure") + * SENSE_CODE(0x42NN, "Power-on or self-test failure") + */ +SENSE_CODE(0x4300, "Message error") + +SENSE_CODE(0x4400, "Internal target failure") +SENSE_CODE(0x4401, "Persistent reservation information lost") +SENSE_CODE(0x4471, "ATA device failed set features") + +SENSE_CODE(0x4500, "Select or reselect failure") + +SENSE_CODE(0x4600, "Unsuccessful soft reset") + +SENSE_CODE(0x4700, "Scsi parity error") +SENSE_CODE(0x4701, "Data phase CRC error detected") +SENSE_CODE(0x4702, "Scsi parity error detected during st data phase") +SENSE_CODE(0x4703, "Information unit iuCRC error detected") +SENSE_CODE(0x4704, "Asynchronous information protection error detected") +SENSE_CODE(0x4705, "Protocol service CRC error") +SENSE_CODE(0x4706, "Phy test function in progress") +SENSE_CODE(0x477f, "Some commands cleared by iSCSI Protocol event") + +SENSE_CODE(0x4800, "Initiator detected error message received") + +SENSE_CODE(0x4900, "Invalid message error") + +SENSE_CODE(0x4A00, "Command phase error") + +SENSE_CODE(0x4B00, "Data phase error") +SENSE_CODE(0x4B01, "Invalid target port transfer tag received") +SENSE_CODE(0x4B02, "Too much write data") +SENSE_CODE(0x4B03, "Ack/nak timeout") +SENSE_CODE(0x4B04, "Nak received") +SENSE_CODE(0x4B05, "Data offset error") +SENSE_CODE(0x4B06, "Initiator response timeout") +SENSE_CODE(0x4B07, "Connection lost") +SENSE_CODE(0x4B08, "Data-in buffer overflow - data buffer size") +SENSE_CODE(0x4B09, "Data-in buffer overflow - data buffer descriptor area") +SENSE_CODE(0x4B0A, "Data-in buffer error") +SENSE_CODE(0x4B0B, "Data-out buffer overflow - data buffer size") +SENSE_CODE(0x4B0C, "Data-out buffer overflow - data buffer descriptor area") +SENSE_CODE(0x4B0D, "Data-out buffer error") +SENSE_CODE(0x4B0E, "PCIe fabric error") +SENSE_CODE(0x4B0F, "PCIe completion timeout") +SENSE_CODE(0x4B10, "PCIe completer abort") +SENSE_CODE(0x4B11, "PCIe poisoned tlp received") +SENSE_CODE(0x4B12, "PCIe eCRC check failed") +SENSE_CODE(0x4B13, "PCIe unsupported request") +SENSE_CODE(0x4B14, "PCIe acs violation") +SENSE_CODE(0x4B15, "PCIe tlp prefix blocked") + +SENSE_CODE(0x4C00, "Logical unit failed self-configuration") +/* + * SENSE_CODE(0x4DNN, "Tagged overlapped commands (nn = queue tag)") + */ +SENSE_CODE(0x4E00, "Overlapped commands attempted") + +SENSE_CODE(0x5000, "Write append error") +SENSE_CODE(0x5001, "Write append position error") +SENSE_CODE(0x5002, "Position error related to timing") + +SENSE_CODE(0x5100, "Erase failure") +SENSE_CODE(0x5101, "Erase failure - incomplete erase operation detected") + +SENSE_CODE(0x5200, "Cartridge fault") + +SENSE_CODE(0x5300, "Media load or eject failed") +SENSE_CODE(0x5301, "Unload tape failure") +SENSE_CODE(0x5302, "Medium removal prevented") +SENSE_CODE(0x5303, "Medium removal prevented by data transfer element") +SENSE_CODE(0x5304, "Medium thread or unthread failure") +SENSE_CODE(0x5305, "Volume identifier invalid") +SENSE_CODE(0x5306, "Volume identifier missing") +SENSE_CODE(0x5307, "Duplicate volume identifier") +SENSE_CODE(0x5308, "Element status unknown") +SENSE_CODE(0x5309, "Data transfer device error - load failed") +SENSE_CODE(0x530a, "Data transfer device error - unload failed") +SENSE_CODE(0x530b, "Data transfer device error - unload missing") +SENSE_CODE(0x530c, "Data transfer device error - eject failed") +SENSE_CODE(0x530d, "Data transfer device error - library communication failed") + +SENSE_CODE(0x5400, "Scsi to host system interface failure") + +SENSE_CODE(0x5500, "System resource failure") +SENSE_CODE(0x5501, "System buffer full") +SENSE_CODE(0x5502, "Insufficient reservation resources") +SENSE_CODE(0x5503, "Insufficient resources") +SENSE_CODE(0x5504, "Insufficient registration resources") +SENSE_CODE(0x5505, "Insufficient access control resources") +SENSE_CODE(0x5506, "Auxiliary memory out of space") +SENSE_CODE(0x5507, "Quota error") +SENSE_CODE(0x5508, "Maximum number of supplemental decryption keys exceeded") +SENSE_CODE(0x5509, "Medium auxiliary memory not accessible") +SENSE_CODE(0x550A, "Data currently unavailable") +SENSE_CODE(0x550B, "Insufficient power for operation") +SENSE_CODE(0x550C, "Insufficient resources to create rod") +SENSE_CODE(0x550D, "Insufficient resources to create rod token") +SENSE_CODE(0x550E, "Insufficient zone resources") +SENSE_CODE(0x550F, "Insufficient zone resources to complete write") +SENSE_CODE(0x5510, "Maximum number of streams open") +SENSE_CODE(0x5511, "Insufficient resources to bind") + +SENSE_CODE(0x5700, "Unable to recover table-of-contents") + +SENSE_CODE(0x5800, "Generation does not exist") + +SENSE_CODE(0x5900, "Updated block read") + +SENSE_CODE(0x5A00, "Operator request or state change input") +SENSE_CODE(0x5A01, "Operator medium removal request") +SENSE_CODE(0x5A02, "Operator selected write protect") +SENSE_CODE(0x5A03, "Operator selected write permit") + +SENSE_CODE(0x5B00, "Log exception") +SENSE_CODE(0x5B01, "Threshold condition met") +SENSE_CODE(0x5B02, "Log counter at maximum") +SENSE_CODE(0x5B03, "Log list codes exhausted") + +SENSE_CODE(0x5C00, "Rpl status change") +SENSE_CODE(0x5C01, "Spindles synchronized") +SENSE_CODE(0x5C02, "Spindles not synchronized") + +SENSE_CODE(0x5D00, "Failure prediction threshold exceeded") +SENSE_CODE(0x5D01, "Media failure prediction threshold exceeded") +SENSE_CODE(0x5D02, "Logical unit failure prediction threshold exceeded") +SENSE_CODE(0x5D03, "Spare area exhaustion prediction threshold exceeded") +SENSE_CODE(0x5D10, "Hardware impending failure general hard drive failure") +SENSE_CODE(0x5D11, "Hardware impending failure drive error rate too high") +SENSE_CODE(0x5D12, "Hardware impending failure data error rate too high") +SENSE_CODE(0x5D13, "Hardware impending failure seek error rate too high") +SENSE_CODE(0x5D14, "Hardware impending failure too many block reassigns") +SENSE_CODE(0x5D15, "Hardware impending failure access times too high") +SENSE_CODE(0x5D16, "Hardware impending failure start unit times too high") +SENSE_CODE(0x5D17, "Hardware impending failure channel parametrics") +SENSE_CODE(0x5D18, "Hardware impending failure controller detected") +SENSE_CODE(0x5D19, "Hardware impending failure throughput performance") +SENSE_CODE(0x5D1A, "Hardware impending failure seek time performance") +SENSE_CODE(0x5D1B, "Hardware impending failure spin-up retry count") +SENSE_CODE(0x5D1C, "Hardware impending failure drive calibration retry count") +SENSE_CODE(0x5D20, "Controller impending failure general hard drive failure") +SENSE_CODE(0x5D21, "Controller impending failure drive error rate too high") +SENSE_CODE(0x5D22, "Controller impending failure data error rate too high") +SENSE_CODE(0x5D23, "Controller impending failure seek error rate too high") +SENSE_CODE(0x5D24, "Controller impending failure too many block reassigns") +SENSE_CODE(0x5D25, "Controller impending failure access times too high") +SENSE_CODE(0x5D26, "Controller impending failure start unit times too high") +SENSE_CODE(0x5D27, "Controller impending failure channel parametrics") +SENSE_CODE(0x5D28, "Controller impending failure controller detected") +SENSE_CODE(0x5D29, "Controller impending failure throughput performance") +SENSE_CODE(0x5D2A, "Controller impending failure seek time performance") +SENSE_CODE(0x5D2B, "Controller impending failure spin-up retry count") +SENSE_CODE(0x5D2C, "Controller impending failure drive calibration retry count") +SENSE_CODE(0x5D30, "Data channel impending failure general hard drive failure") +SENSE_CODE(0x5D31, "Data channel impending failure drive error rate too high") +SENSE_CODE(0x5D32, "Data channel impending failure data error rate too high") +SENSE_CODE(0x5D33, "Data channel impending failure seek error rate too high") +SENSE_CODE(0x5D34, "Data channel impending failure too many block reassigns") +SENSE_CODE(0x5D35, "Data channel impending failure access times too high") +SENSE_CODE(0x5D36, "Data channel impending failure start unit times too high") +SENSE_CODE(0x5D37, "Data channel impending failure channel parametrics") +SENSE_CODE(0x5D38, "Data channel impending failure controller detected") +SENSE_CODE(0x5D39, "Data channel impending failure throughput performance") +SENSE_CODE(0x5D3A, "Data channel impending failure seek time performance") +SENSE_CODE(0x5D3B, "Data channel impending failure spin-up retry count") +SENSE_CODE(0x5D3C, "Data channel impending failure drive calibration retry count") +SENSE_CODE(0x5D40, "Servo impending failure general hard drive failure") +SENSE_CODE(0x5D41, "Servo impending failure drive error rate too high") +SENSE_CODE(0x5D42, "Servo impending failure data error rate too high") +SENSE_CODE(0x5D43, "Servo impending failure seek error rate too high") +SENSE_CODE(0x5D44, "Servo impending failure too many block reassigns") +SENSE_CODE(0x5D45, "Servo impending failure access times too high") +SENSE_CODE(0x5D46, "Servo impending failure start unit times too high") +SENSE_CODE(0x5D47, "Servo impending failure channel parametrics") +SENSE_CODE(0x5D48, "Servo impending failure controller detected") +SENSE_CODE(0x5D49, "Servo impending failure throughput performance") +SENSE_CODE(0x5D4A, "Servo impending failure seek time performance") +SENSE_CODE(0x5D4B, "Servo impending failure spin-up retry count") +SENSE_CODE(0x5D4C, "Servo impending failure drive calibration retry count") +SENSE_CODE(0x5D50, "Spindle impending failure general hard drive failure") +SENSE_CODE(0x5D51, "Spindle impending failure drive error rate too high") +SENSE_CODE(0x5D52, "Spindle impending failure data error rate too high") +SENSE_CODE(0x5D53, "Spindle impending failure seek error rate too high") +SENSE_CODE(0x5D54, "Spindle impending failure too many block reassigns") +SENSE_CODE(0x5D55, "Spindle impending failure access times too high") +SENSE_CODE(0x5D56, "Spindle impending failure start unit times too high") +SENSE_CODE(0x5D57, "Spindle impending failure channel parametrics") +SENSE_CODE(0x5D58, "Spindle impending failure controller detected") +SENSE_CODE(0x5D59, "Spindle impending failure throughput performance") +SENSE_CODE(0x5D5A, "Spindle impending failure seek time performance") +SENSE_CODE(0x5D5B, "Spindle impending failure spin-up retry count") +SENSE_CODE(0x5D5C, "Spindle impending failure drive calibration retry count") +SENSE_CODE(0x5D60, "Firmware impending failure general hard drive failure") +SENSE_CODE(0x5D61, "Firmware impending failure drive error rate too high") +SENSE_CODE(0x5D62, "Firmware impending failure data error rate too high") +SENSE_CODE(0x5D63, "Firmware impending failure seek error rate too high") +SENSE_CODE(0x5D64, "Firmware impending failure too many block reassigns") +SENSE_CODE(0x5D65, "Firmware impending failure access times too high") +SENSE_CODE(0x5D66, "Firmware impending failure start unit times too high") +SENSE_CODE(0x5D67, "Firmware impending failure channel parametrics") +SENSE_CODE(0x5D68, "Firmware impending failure controller detected") +SENSE_CODE(0x5D69, "Firmware impending failure throughput performance") +SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance") +SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count") +SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count") +SENSE_CODE(0x5D73, "Media impending failure endurance limit met") +SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)") + +SENSE_CODE(0x5E00, "Low power condition on") +SENSE_CODE(0x5E01, "Idle condition activated by timer") +SENSE_CODE(0x5E02, "Standby condition activated by timer") +SENSE_CODE(0x5E03, "Idle condition activated by command") +SENSE_CODE(0x5E04, "Standby condition activated by command") +SENSE_CODE(0x5E05, "Idle_b condition activated by timer") +SENSE_CODE(0x5E06, "Idle_b condition activated by command") +SENSE_CODE(0x5E07, "Idle_c condition activated by timer") +SENSE_CODE(0x5E08, "Idle_c condition activated by command") +SENSE_CODE(0x5E09, "Standby_y condition activated by timer") +SENSE_CODE(0x5E0A, "Standby_y condition activated by command") +SENSE_CODE(0x5E41, "Power state change to active") +SENSE_CODE(0x5E42, "Power state change to idle") +SENSE_CODE(0x5E43, "Power state change to standby") +SENSE_CODE(0x5E45, "Power state change to sleep") +SENSE_CODE(0x5E47, "Power state change to device control") + +SENSE_CODE(0x6000, "Lamp failure") + +SENSE_CODE(0x6100, "Video acquisition error") +SENSE_CODE(0x6101, "Unable to acquire video") +SENSE_CODE(0x6102, "Out of focus") + +SENSE_CODE(0x6200, "Scan head positioning error") + +SENSE_CODE(0x6300, "End of user area encountered on this track") +SENSE_CODE(0x6301, "Packet does not fit in available space") + +SENSE_CODE(0x6400, "Illegal mode for this track") +SENSE_CODE(0x6401, "Invalid packet size") + +SENSE_CODE(0x6500, "Voltage fault") + +SENSE_CODE(0x6600, "Automatic document feeder cover up") +SENSE_CODE(0x6601, "Automatic document feeder lift up") +SENSE_CODE(0x6602, "Document jam in automatic document feeder") +SENSE_CODE(0x6603, "Document miss feed automatic in document feeder") + +SENSE_CODE(0x6700, "Configuration failure") +SENSE_CODE(0x6701, "Configuration of incapable logical units failed") +SENSE_CODE(0x6702, "Add logical unit failed") +SENSE_CODE(0x6703, "Modification of logical unit failed") +SENSE_CODE(0x6704, "Exchange of logical unit failed") +SENSE_CODE(0x6705, "Remove of logical unit failed") +SENSE_CODE(0x6706, "Attachment of logical unit failed") +SENSE_CODE(0x6707, "Creation of logical unit failed") +SENSE_CODE(0x6708, "Assign failure occurred") +SENSE_CODE(0x6709, "Multiply assigned logical unit") +SENSE_CODE(0x670A, "Set target port groups command failed") +SENSE_CODE(0x670B, "ATA device feature not enabled") +SENSE_CODE(0x670C, "Command rejected") +SENSE_CODE(0x670D, "Explicit bind not allowed") + +SENSE_CODE(0x6800, "Logical unit not configured") +SENSE_CODE(0x6801, "Subsidiary logical unit not configured") + +SENSE_CODE(0x6900, "Data loss on logical unit") +SENSE_CODE(0x6901, "Multiple logical unit failures") +SENSE_CODE(0x6902, "Parity/data mismatch") + +SENSE_CODE(0x6A00, "Informational, refer to log") + +SENSE_CODE(0x6B00, "State change has occurred") +SENSE_CODE(0x6B01, "Redundancy level got better") +SENSE_CODE(0x6B02, "Redundancy level got worse") + +SENSE_CODE(0x6C00, "Rebuild failure occurred") + +SENSE_CODE(0x6D00, "Recalculate failure occurred") + +SENSE_CODE(0x6E00, "Command to logical unit failed") + +SENSE_CODE(0x6F00, "Copy protection key exchange failure - authentication failure") +SENSE_CODE(0x6F01, "Copy protection key exchange failure - key not present") +SENSE_CODE(0x6F02, "Copy protection key exchange failure - key not established") +SENSE_CODE(0x6F03, "Read of scrambled sector without authentication") +SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region") +SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error") +SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording") +SENSE_CODE(0x6F07, "Conflict in binding nonce recording") +SENSE_CODE(0x6F08, "Insufficient permission") +SENSE_CODE(0x6F09, "Invalid drive-host pairing server") +SENSE_CODE(0x6F0A, "Drive-host pairing suspended") + +/* + * SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn") + */ +SENSE_CODE(0x7100, "Decompression exception long algorithm id") + +SENSE_CODE(0x7200, "Session fixation error") +SENSE_CODE(0x7201, "Session fixation error writing lead-in") +SENSE_CODE(0x7202, "Session fixation error writing lead-out") +SENSE_CODE(0x7203, "Session fixation error - incomplete track in session") +SENSE_CODE(0x7204, "Empty or partially written reserved track") +SENSE_CODE(0x7205, "No more track reservations allowed") +SENSE_CODE(0x7206, "RMZ extension is not allowed") +SENSE_CODE(0x7207, "No more test zone extensions are allowed") + +SENSE_CODE(0x7300, "Cd control error") +SENSE_CODE(0x7301, "Power calibration area almost full") +SENSE_CODE(0x7302, "Power calibration area is full") +SENSE_CODE(0x7303, "Power calibration area error") +SENSE_CODE(0x7304, "Program memory area update failure") +SENSE_CODE(0x7305, "Program memory area is full") +SENSE_CODE(0x7306, "RMA/PMA is almost full") +SENSE_CODE(0x7310, "Current power calibration area almost full") +SENSE_CODE(0x7311, "Current power calibration area is full") +SENSE_CODE(0x7317, "RDZ is full") + +SENSE_CODE(0x7400, "Security error") +SENSE_CODE(0x7401, "Unable to decrypt data") +SENSE_CODE(0x7402, "Unencrypted data encountered while decrypting") +SENSE_CODE(0x7403, "Incorrect data encryption key") +SENSE_CODE(0x7404, "Cryptographic integrity validation failed") +SENSE_CODE(0x7405, "Error decrypting data") +SENSE_CODE(0x7406, "Unknown signature verification key") +SENSE_CODE(0x7407, "Encryption parameters not useable") +SENSE_CODE(0x7408, "Digital signature validation failure") +SENSE_CODE(0x7409, "Encryption mode mismatch on read") +SENSE_CODE(0x740A, "Encrypted block not raw read enabled") +SENSE_CODE(0x740B, "Incorrect Encryption parameters") +SENSE_CODE(0x740C, "Unable to decrypt parameter list") +SENSE_CODE(0x740D, "Encryption algorithm disabled") +SENSE_CODE(0x7410, "SA creation parameter value invalid") +SENSE_CODE(0x7411, "SA creation parameter value rejected") +SENSE_CODE(0x7412, "Invalid SA usage") +SENSE_CODE(0x7421, "Data Encryption configuration prevented") +SENSE_CODE(0x7430, "SA creation parameter not supported") +SENSE_CODE(0x7440, "Authentication failed") +SENSE_CODE(0x7461, "External data encryption key manager access error") +SENSE_CODE(0x7462, "External data encryption key manager error") +SENSE_CODE(0x7463, "External data encryption key not found") +SENSE_CODE(0x7464, "External data encryption request not authorized") +SENSE_CODE(0x746E, "External data encryption control timeout") +SENSE_CODE(0x746F, "External data encryption control error") +SENSE_CODE(0x7471, "Logical unit access not authorized") +SENSE_CODE(0x7479, "Security conflict in translated device") diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c new file mode 100644 index 000000000..d7d0c35c5 --- /dev/null +++ b/drivers/scsi/ses.c @@ -0,0 +1,923 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SCSI Enclosure Services + * + * Copyright (C) 2008 James Bottomley + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +struct ses_device { + unsigned char *page1; + unsigned char *page1_types; + unsigned char *page2; + unsigned char *page10; + short page1_len; + short page1_num_types; + short page2_len; + short page10_len; +}; + +struct ses_component { + u64 addr; +}; + +static bool ses_page2_supported(struct enclosure_device *edev) +{ + struct ses_device *ses_dev = edev->scratch; + + return (ses_dev->page2 != NULL); +} + +static int ses_probe(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + int err = -ENODEV; + + if (sdev->type != TYPE_ENCLOSURE) + goto out; + + err = 0; + sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n"); + + out: + return err; +} + +#define SES_TIMEOUT (30 * HZ) +#define SES_RETRIES 3 + +static void init_device_slot_control(unsigned char *dest_desc, + struct enclosure_component *ecomp, + unsigned char *status) +{ + memcpy(dest_desc, status, 4); + dest_desc[0] = 0; + /* only clear byte 1 for ENCLOSURE_COMPONENT_DEVICE */ + if (ecomp->type == ENCLOSURE_COMPONENT_DEVICE) + dest_desc[1] = 0; + dest_desc[2] &= 0xde; + dest_desc[3] &= 0x3c; +} + + +static int ses_recv_diag(struct scsi_device *sdev, int page_code, + void *buf, int bufflen) +{ + int ret; + unsigned char cmd[] = { + RECEIVE_DIAGNOSTIC, + 1, /* Set PCV bit */ + page_code, + bufflen >> 8, + bufflen & 0xff, + 0 + }; + unsigned char recv_page_code; + unsigned int retries = SES_RETRIES; + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + + do { + ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen, + SES_TIMEOUT, 1, &exec_args); + } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) && + (sshdr.sense_key == NOT_READY || + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); + + if (unlikely(ret)) + return ret; + + recv_page_code = ((unsigned char *)buf)[0]; + + if (likely(recv_page_code == page_code)) + return ret; + + /* successful diagnostic but wrong page code. This happens to some + * USB devices, just print a message and pretend there was an error */ + + sdev_printk(KERN_ERR, sdev, + "Wrong diagnostic page; asked for %d got %u\n", + page_code, recv_page_code); + + return -EINVAL; +} + +static int ses_send_diag(struct scsi_device *sdev, int page_code, + void *buf, int bufflen) +{ + int result; + + unsigned char cmd[] = { + SEND_DIAGNOSTIC, + 0x10, /* Set PF bit */ + 0, + bufflen >> 8, + bufflen & 0xff, + 0 + }; + struct scsi_sense_hdr sshdr; + unsigned int retries = SES_RETRIES; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + + do { + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf, + bufflen, SES_TIMEOUT, 1, &exec_args); + } while (result > 0 && --retries && scsi_sense_valid(&sshdr) && + (sshdr.sense_key == NOT_READY || + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); + + if (result) + sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", + result); + return result; +} + +static int ses_set_page2_descriptor(struct enclosure_device *edev, + struct enclosure_component *ecomp, + unsigned char *desc) +{ + int i, j, count = 0, descriptor = ecomp->number; + struct scsi_device *sdev = to_scsi_device(edev->edev.parent); + struct ses_device *ses_dev = edev->scratch; + unsigned char *type_ptr = ses_dev->page1_types; + unsigned char *desc_ptr = ses_dev->page2 + 8; + + /* Clear everything */ + memset(desc_ptr, 0, ses_dev->page2_len - 8); + for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { + for (j = 0; j < type_ptr[1]; j++) { + desc_ptr += 4; + if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && + type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) + continue; + if (count++ == descriptor) { + memcpy(desc_ptr, desc, 4); + /* set select */ + desc_ptr[0] |= 0x80; + /* clear reserved, just in case */ + desc_ptr[0] &= 0xf0; + } + } + } + + return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); +} + +static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, + struct enclosure_component *ecomp) +{ + int i, j, count = 0, descriptor = ecomp->number; + struct scsi_device *sdev = to_scsi_device(edev->edev.parent); + struct ses_device *ses_dev = edev->scratch; + unsigned char *type_ptr = ses_dev->page1_types; + unsigned char *desc_ptr = ses_dev->page2 + 8; + + if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0) + return NULL; + + for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { + for (j = 0; j < type_ptr[1]; j++) { + desc_ptr += 4; + if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && + type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) + continue; + if (count++ == descriptor) + return desc_ptr; + } + } + return NULL; +} + +/* For device slot and array device slot elements, byte 3 bit 6 + * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this + * code stands these bits are shifted 4 positions right so in + * sysfs they will appear as bits 2 and 1 respectively. Strange. */ +static void ses_get_fault(struct enclosure_device *edev, + struct enclosure_component *ecomp) +{ + unsigned char *desc; + + if (!ses_page2_supported(edev)) { + ecomp->fault = 0; + return; + } + desc = ses_get_page2_descriptor(edev, ecomp); + if (desc) + ecomp->fault = (desc[3] & 0x60) >> 4; +} + +static int ses_set_fault(struct enclosure_device *edev, + struct enclosure_component *ecomp, + enum enclosure_component_setting val) +{ + unsigned char desc[4]; + unsigned char *desc_ptr; + + if (!ses_page2_supported(edev)) + return -EINVAL; + + desc_ptr = ses_get_page2_descriptor(edev, ecomp); + + if (!desc_ptr) + return -EIO; + + init_device_slot_control(desc, ecomp, desc_ptr); + + switch (val) { + case ENCLOSURE_SETTING_DISABLED: + desc[3] &= 0xdf; + break; + case ENCLOSURE_SETTING_ENABLED: + desc[3] |= 0x20; + break; + default: + /* SES doesn't do the SGPIO blink settings */ + return -EINVAL; + } + + return ses_set_page2_descriptor(edev, ecomp, desc); +} + +static void ses_get_status(struct enclosure_device *edev, + struct enclosure_component *ecomp) +{ + unsigned char *desc; + + if (!ses_page2_supported(edev)) { + ecomp->status = 0; + return; + } + desc = ses_get_page2_descriptor(edev, ecomp); + if (desc) + ecomp->status = (desc[0] & 0x0f); +} + +static void ses_get_locate(struct enclosure_device *edev, + struct enclosure_component *ecomp) +{ + unsigned char *desc; + + if (!ses_page2_supported(edev)) { + ecomp->locate = 0; + return; + } + desc = ses_get_page2_descriptor(edev, ecomp); + if (desc) + ecomp->locate = (desc[2] & 0x02) ? 1 : 0; +} + +static int ses_set_locate(struct enclosure_device *edev, + struct enclosure_component *ecomp, + enum enclosure_component_setting val) +{ + unsigned char desc[4]; + unsigned char *desc_ptr; + + if (!ses_page2_supported(edev)) + return -EINVAL; + + desc_ptr = ses_get_page2_descriptor(edev, ecomp); + + if (!desc_ptr) + return -EIO; + + init_device_slot_control(desc, ecomp, desc_ptr); + + switch (val) { + case ENCLOSURE_SETTING_DISABLED: + desc[2] &= 0xfd; + break; + case ENCLOSURE_SETTING_ENABLED: + desc[2] |= 0x02; + break; + default: + /* SES doesn't do the SGPIO blink settings */ + return -EINVAL; + } + return ses_set_page2_descriptor(edev, ecomp, desc); +} + +static int ses_set_active(struct enclosure_device *edev, + struct enclosure_component *ecomp, + enum enclosure_component_setting val) +{ + unsigned char desc[4]; + unsigned char *desc_ptr; + + if (!ses_page2_supported(edev)) + return -EINVAL; + + desc_ptr = ses_get_page2_descriptor(edev, ecomp); + + if (!desc_ptr) + return -EIO; + + init_device_slot_control(desc, ecomp, desc_ptr); + + switch (val) { + case ENCLOSURE_SETTING_DISABLED: + desc[2] &= 0x7f; + ecomp->active = 0; + break; + case ENCLOSURE_SETTING_ENABLED: + desc[2] |= 0x80; + ecomp->active = 1; + break; + default: + /* SES doesn't do the SGPIO blink settings */ + return -EINVAL; + } + return ses_set_page2_descriptor(edev, ecomp, desc); +} + +static int ses_show_id(struct enclosure_device *edev, char *buf) +{ + struct ses_device *ses_dev = edev->scratch; + unsigned long long id = get_unaligned_be64(ses_dev->page1+8+4); + + return sprintf(buf, "%#llx\n", id); +} + +static void ses_get_power_status(struct enclosure_device *edev, + struct enclosure_component *ecomp) +{ + unsigned char *desc; + + if (!ses_page2_supported(edev)) { + ecomp->power_status = 0; + return; + } + + desc = ses_get_page2_descriptor(edev, ecomp); + if (desc) + ecomp->power_status = (desc[3] & 0x10) ? 0 : 1; +} + +static int ses_set_power_status(struct enclosure_device *edev, + struct enclosure_component *ecomp, + int val) +{ + unsigned char desc[4]; + unsigned char *desc_ptr; + + if (!ses_page2_supported(edev)) + return -EINVAL; + + desc_ptr = ses_get_page2_descriptor(edev, ecomp); + + if (!desc_ptr) + return -EIO; + + init_device_slot_control(desc, ecomp, desc_ptr); + + switch (val) { + /* power = 1 is device_off = 0 and vice versa */ + case 0: + desc[3] |= 0x10; + break; + case 1: + desc[3] &= 0xef; + break; + default: + return -EINVAL; + } + ecomp->power_status = val; + return ses_set_page2_descriptor(edev, ecomp, desc); +} + +static struct enclosure_component_callbacks ses_enclosure_callbacks = { + .get_fault = ses_get_fault, + .set_fault = ses_set_fault, + .get_status = ses_get_status, + .get_locate = ses_get_locate, + .set_locate = ses_set_locate, + .get_power_status = ses_get_power_status, + .set_power_status = ses_set_power_status, + .set_active = ses_set_active, + .show_id = ses_show_id, +}; + +struct ses_host_edev { + struct Scsi_Host *shost; + struct enclosure_device *edev; +}; + +#if 0 +int ses_match_host(struct enclosure_device *edev, void *data) +{ + struct ses_host_edev *sed = data; + struct scsi_device *sdev; + + if (!scsi_is_sdev_device(edev->edev.parent)) + return 0; + + sdev = to_scsi_device(edev->edev.parent); + + if (sdev->host != sed->shost) + return 0; + + sed->edev = edev; + return 1; +} +#endif /* 0 */ + +static int ses_process_descriptor(struct enclosure_component *ecomp, + unsigned char *desc, int max_desc_len) +{ + int eip = desc[0] & 0x10; + int invalid = desc[0] & 0x80; + enum scsi_protocol proto = desc[0] & 0x0f; + u64 addr = 0; + int slot = -1; + struct ses_component *scomp = ecomp->scratch; + unsigned char *d; + + if (invalid) + return 0; + + switch (proto) { + case SCSI_PROTOCOL_FCP: + if (eip) { + if (max_desc_len <= 7) + return 1; + d = desc + 4; + slot = d[3]; + } + break; + case SCSI_PROTOCOL_SAS: + + if (eip) { + if (max_desc_len <= 27) + return 1; + d = desc + 4; + slot = d[3]; + d = desc + 8; + } else { + if (max_desc_len <= 23) + return 1; + d = desc + 4; + } + + + /* only take the phy0 addr */ + addr = (u64)d[12] << 56 | + (u64)d[13] << 48 | + (u64)d[14] << 40 | + (u64)d[15] << 32 | + (u64)d[16] << 24 | + (u64)d[17] << 16 | + (u64)d[18] << 8 | + (u64)d[19]; + break; + default: + /* FIXME: Need to add more protocols than just SAS */ + break; + } + ecomp->slot = slot; + scomp->addr = addr; + + return 0; +} + +struct efd { + u64 addr; + struct device *dev; +}; + +static int ses_enclosure_find_by_addr(struct enclosure_device *edev, + void *data) +{ + struct efd *efd = data; + int i; + struct ses_component *scomp; + + for (i = 0; i < edev->components; i++) { + scomp = edev->component[i].scratch; + if (scomp->addr != efd->addr) + continue; + + if (enclosure_add_device(edev, i, efd->dev) == 0) + kobject_uevent(&efd->dev->kobj, KOBJ_CHANGE); + return 1; + } + return 0; +} + +#define INIT_ALLOC_SIZE 32 + +static void ses_enclosure_data_process(struct enclosure_device *edev, + struct scsi_device *sdev, + int create) +{ + u32 result; + unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; + int i, j, page7_len, len, components; + struct ses_device *ses_dev = edev->scratch; + int types = ses_dev->page1_num_types; + unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); + + if (!hdr_buf) + goto simple_populate; + + /* re-read page 10 */ + if (ses_dev->page10) + ses_recv_diag(sdev, 10, ses_dev->page10, ses_dev->page10_len); + /* Page 7 for the descriptors is optional */ + result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); + if (result) + goto simple_populate; + + page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; + /* add 1 for trailing '\0' we'll use */ + buf = kzalloc(len + 1, GFP_KERNEL); + if (!buf) + goto simple_populate; + result = ses_recv_diag(sdev, 7, buf, len); + if (result) { + simple_populate: + kfree(buf); + buf = NULL; + desc_ptr = NULL; + len = 0; + page7_len = 0; + } else { + desc_ptr = buf + 8; + len = (desc_ptr[2] << 8) + desc_ptr[3]; + /* skip past overall descriptor */ + desc_ptr += len + 4; + } + if (ses_dev->page10 && ses_dev->page10_len > 9) + addl_desc_ptr = ses_dev->page10 + 8; + type_ptr = ses_dev->page1_types; + components = 0; + for (i = 0; i < types; i++, type_ptr += 4) { + for (j = 0; j < type_ptr[1]; j++) { + char *name = NULL; + struct enclosure_component *ecomp; + int max_desc_len; + + if (desc_ptr) { + if (desc_ptr + 3 >= buf + page7_len) { + desc_ptr = NULL; + } else { + len = (desc_ptr[2] << 8) + desc_ptr[3]; + desc_ptr += 4; + if (desc_ptr + len > buf + page7_len) + desc_ptr = NULL; + else { + /* Add trailing zero - pushes into + * reserved space */ + desc_ptr[len] = '\0'; + name = desc_ptr; + } + } + } + if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { + + if (create) + ecomp = enclosure_component_alloc( + edev, + components++, + type_ptr[0], + name); + else if (components < edev->components) + ecomp = &edev->component[components++]; + else + ecomp = ERR_PTR(-EINVAL); + + if (!IS_ERR(ecomp)) { + if (addl_desc_ptr) { + max_desc_len = ses_dev->page10_len - + (addl_desc_ptr - ses_dev->page10); + if (ses_process_descriptor(ecomp, + addl_desc_ptr, + max_desc_len)) + addl_desc_ptr = NULL; + } + if (create) + enclosure_component_register( + ecomp); + } + } + if (desc_ptr) + desc_ptr += len; + + if (addl_desc_ptr && + /* only find additional descriptions for specific devices */ + (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || + /* these elements are optional */ + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || + type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) { + addl_desc_ptr += addl_desc_ptr[1] + 2; + if (addl_desc_ptr + 1 >= ses_dev->page10 + ses_dev->page10_len) + addl_desc_ptr = NULL; + } + } + } + kfree(buf); + kfree(hdr_buf); +} + +static void ses_match_to_enclosure(struct enclosure_device *edev, + struct scsi_device *sdev, + int refresh) +{ + struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent); + struct efd efd = { + .addr = 0, + }; + + if (refresh) + ses_enclosure_data_process(edev, edev_sdev, 0); + + if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) + efd.addr = sas_get_address(sdev); + + if (efd.addr) { + efd.dev = &sdev->sdev_gendev; + + enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); + } +} + +static int ses_intf_add(struct device *cdev) +{ + struct scsi_device *sdev = to_scsi_device(cdev->parent); + struct scsi_device *tmp_sdev; + unsigned char *buf = NULL, *hdr_buf, *type_ptr, page; + struct ses_device *ses_dev; + u32 result; + int i, types, len, components = 0; + int err = -ENOMEM; + int num_enclosures; + struct enclosure_device *edev; + struct ses_component *scomp = NULL; + + if (!scsi_device_enclosure(sdev)) { + /* not an enclosure, but might be in one */ + struct enclosure_device *prev = NULL; + + while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { + ses_match_to_enclosure(edev, sdev, 1); + prev = edev; + } + return -ENODEV; + } + + /* TYPE_ENCLOSURE prints a message in probe */ + if (sdev->type != TYPE_ENCLOSURE) + sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n"); + + ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL); + hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); + if (!hdr_buf || !ses_dev) + goto err_init_free; + + page = 1; + result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); + if (result) + goto recv_failed; + + len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + goto err_free; + + result = ses_recv_diag(sdev, page, buf, len); + if (result) + goto recv_failed; + + types = 0; + + /* we always have one main enclosure and the rest are referred + * to as secondary subenclosures */ + num_enclosures = buf[1] + 1; + + /* begin at the enclosure descriptor */ + type_ptr = buf + 8; + /* skip all the enclosure descriptors */ + for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) { + types += type_ptr[2]; + type_ptr += type_ptr[3] + 4; + } + + ses_dev->page1_types = type_ptr; + ses_dev->page1_num_types = types; + + for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) { + if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) + components += type_ptr[1]; + } + + ses_dev->page1 = buf; + ses_dev->page1_len = len; + buf = NULL; + + page = 2; + result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); + if (result) + goto page2_not_supported; + + len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + goto err_free; + + /* make sure getting page 2 actually works */ + result = ses_recv_diag(sdev, 2, buf, len); + if (result) + goto recv_failed; + ses_dev->page2 = buf; + ses_dev->page2_len = len; + buf = NULL; + + /* The additional information page --- allows us + * to match up the devices */ + page = 10; + result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); + if (!result) { + + len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + goto err_free; + + result = ses_recv_diag(sdev, page, buf, len); + if (result) + goto recv_failed; + ses_dev->page10 = buf; + ses_dev->page10_len = len; + buf = NULL; + } +page2_not_supported: + if (components > 0) { + scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); + if (!scomp) + goto err_free; + } + + edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev), + components, &ses_enclosure_callbacks); + if (IS_ERR(edev)) { + err = PTR_ERR(edev); + goto err_free; + } + + kfree(hdr_buf); + + edev->scratch = ses_dev; + for (i = 0; i < components; i++) + edev->component[i].scratch = scomp + i; + + ses_enclosure_data_process(edev, sdev, 1); + + /* see if there are any devices matching before + * we found the enclosure */ + shost_for_each_device(tmp_sdev, sdev->host) { + if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev)) + continue; + ses_match_to_enclosure(edev, tmp_sdev, 0); + } + + return 0; + + recv_failed: + sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n", + page); + err = -ENODEV; + err_free: + kfree(buf); + kfree(scomp); + kfree(ses_dev->page10); + kfree(ses_dev->page2); + kfree(ses_dev->page1); + err_init_free: + kfree(ses_dev); + kfree(hdr_buf); + sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err); + return err; +} + +static int ses_remove(struct device *dev) +{ + return 0; +} + +static void ses_intf_remove_component(struct scsi_device *sdev) +{ + struct enclosure_device *edev, *prev = NULL; + + while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { + prev = edev; + if (!enclosure_remove_device(edev, &sdev->sdev_gendev)) + break; + } + if (edev) + put_device(&edev->edev); +} + +static void ses_intf_remove_enclosure(struct scsi_device *sdev) +{ + struct enclosure_device *edev; + struct ses_device *ses_dev; + + /* exact match to this enclosure */ + edev = enclosure_find(&sdev->sdev_gendev, NULL); + if (!edev) + return; + + ses_dev = edev->scratch; + edev->scratch = NULL; + + kfree(ses_dev->page10); + kfree(ses_dev->page1); + kfree(ses_dev->page2); + kfree(ses_dev); + + if (edev->components) + kfree(edev->component[0].scratch); + + put_device(&edev->edev); + enclosure_unregister(edev); +} + +static void ses_intf_remove(struct device *cdev) +{ + struct scsi_device *sdev = to_scsi_device(cdev->parent); + + if (!scsi_device_enclosure(sdev)) + ses_intf_remove_component(sdev); + else + ses_intf_remove_enclosure(sdev); +} + +static struct class_interface ses_interface = { + .add_dev = ses_intf_add, + .remove_dev = ses_intf_remove, +}; + +static struct scsi_driver ses_template = { + .gendrv = { + .name = "ses", + .owner = THIS_MODULE, + .probe = ses_probe, + .remove = ses_remove, + }, +}; + +static int __init ses_init(void) +{ + int err; + + err = scsi_register_interface(&ses_interface); + if (err) + return err; + + err = scsi_register_driver(&ses_template.gendrv); + if (err) + goto out_unreg; + + return 0; + + out_unreg: + scsi_unregister_interface(&ses_interface); + return err; +} + +static void __exit ses_exit(void) +{ + scsi_unregister_driver(&ses_template.gendrv); + scsi_unregister_interface(&ses_interface); +} + +module_init(ses_init); +module_exit(ses_exit); + +MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE); + +MODULE_AUTHOR("James Bottomley"); +MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c new file mode 100644 index 000000000..0d8afffd1 --- /dev/null +++ b/drivers/scsi/sg.c @@ -0,0 +1,2630 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * History: + * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), + * to allow user process control of SCSI devices. + * Development Sponsored by Killy Corp. NY NY + * + * Original driver (sg.c): + * Copyright (C) 1992 Lawrence Foard + * Version 2 and 3 extensions to driver: + * Copyright (C) 1998 - 2014 Douglas Gilbert + */ + +static int sg_version_num = 30536; /* 2 digits for each component */ +#define SG_VERSION_STR "3.5.36" + +/* + * D. P. Gilbert (dgilbert@interlog.com), notes: + * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First + * the kernel/module needs to be built with CONFIG_SCSI_LOGGING + * (otherwise the macros compile to empty statements). + * + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for sg_check_file_access() */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scsi_logging.h" + +#ifdef CONFIG_SCSI_PROC_FS +#include +static char *sg_version_date = "20140603"; + +static int sg_proc_init(void); +#endif + +#define SG_ALLOW_DIO_DEF 0 + +#define SG_MAX_DEVS (1 << MINORBITS) + +/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type + * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater + * than 16 bytes are "variable length" whose length is a multiple of 4 + */ +#define SG_MAX_CDB_SIZE 252 + +#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) + +static int sg_big_buff = SG_DEF_RESERVED_SIZE; +/* N.B. This variable is readable and writeable via + /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer + of this size (or less if there is not enough memory) will be reserved + for use by this file descriptor. [Deprecated usage: this variable is also + readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into + the kernel (i.e. it is not a module).] */ +static int def_reserved_size = -1; /* picks up init parameter */ +static int sg_allow_dio = SG_ALLOW_DIO_DEF; + +static int scatter_elem_sz = SG_SCATTER_SZ; +static int scatter_elem_sz_prev = SG_SCATTER_SZ; + +#define SG_SECTOR_SZ 512 + +static int sg_add_device(struct device *); +static void sg_remove_device(struct device *); + +static DEFINE_IDR(sg_index_idr); +static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock + file descriptor list for device */ + +static struct class_interface sg_interface = { + .add_dev = sg_add_device, + .remove_dev = sg_remove_device, +}; + +typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ + unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ + unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ + unsigned bufflen; /* Size of (aggregate) data buffer */ + struct page **pages; + int page_order; + char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ + unsigned char cmd_opcode; /* first byte of command */ +} Sg_scatter_hold; + +struct sg_device; /* forward declarations */ +struct sg_fd; + +typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ + struct list_head entry; /* list entry */ + struct sg_fd *parentfp; /* NULL -> not in use */ + Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ + sg_io_hdr_t header; /* scsi command+info, see */ + unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; + char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ + char orphan; /* 1 -> drop on sight, 0 -> normal */ + char sg_io_owned; /* 1 -> packet belongs to SG_IO */ + /* done protected by rq_list_lock */ + char done; /* 0->before bh, 1->before read, 2->read */ + struct request *rq; + struct bio *bio; + struct execute_work ew; +} Sg_request; + +typedef struct sg_fd { /* holds the state of a file descriptor */ + struct list_head sfd_siblings; /* protected by device's sfd_lock */ + struct sg_device *parentdp; /* owning device */ + wait_queue_head_t read_wait; /* queue read until command done */ + rwlock_t rq_list_lock; /* protect access to list in req_arr */ + struct mutex f_mutex; /* protect against changes in this fd */ + int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ + int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ + Sg_scatter_hold reserve; /* buffer held for this file descriptor */ + struct list_head rq_list; /* head of request list */ + struct fasync_struct *async_qp; /* used by asynchronous notification */ + Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ + char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ + char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ + unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ + char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ + char mmap_called; /* 0 -> mmap() never called on this fd */ + char res_in_use; /* 1 -> 'reserve' array in use */ + struct kref f_ref; + struct execute_work ew; +} Sg_fd; + +typedef struct sg_device { /* holds the state of each scsi generic device */ + struct scsi_device *device; + wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ + struct mutex open_rel_lock; /* held when in open() or release() */ + int sg_tablesize; /* adapter's max scatter-gather table size */ + u32 index; /* device index number */ + struct list_head sfds; + rwlock_t sfd_lock; /* protect access to sfd list */ + atomic_t detaching; /* 0->device usable, 1->device detaching */ + bool exclude; /* 1->open(O_EXCL) succeeded and is active */ + int open_cnt; /* count of opens (perhaps < num(sfds) ) */ + char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ + char name[DISK_NAME_LEN]; + struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg] */ + struct kref d_ref; +} Sg_device; + +/* tasklet or soft irq callback */ +static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status); +static int sg_start_req(Sg_request *srp, unsigned char *cmd); +static int sg_finish_rem_req(Sg_request * srp); +static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); +static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, + Sg_request * srp); +static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, + const char __user *buf, size_t count, int blocking, + int read_only, int sg_io_owned, Sg_request **o_srp); +static int sg_common_write(Sg_fd * sfp, Sg_request * srp, + unsigned char *cmnd, int timeout, int blocking); +static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); +static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); +static void sg_build_reserve(Sg_fd * sfp, int req_size); +static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); +static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); +static Sg_fd *sg_add_sfp(Sg_device * sdp); +static void sg_remove_sfp(struct kref *); +static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy); +static Sg_request *sg_add_request(Sg_fd * sfp); +static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); +static Sg_device *sg_get_dev(int dev); +static void sg_device_destroy(struct kref *kref); + +#define SZ_SG_HEADER sizeof(struct sg_header) +#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) +#define SZ_SG_IOVEC sizeof(sg_iovec_t) +#define SZ_SG_REQ_INFO sizeof(sg_req_info_t) + +#define sg_printk(prefix, sdp, fmt, a...) \ + sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a) + +/* + * The SCSI interfaces that use read() and write() as an asynchronous variant of + * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways + * to trigger read() and write() calls from various contexts with elevated + * privileges. This can lead to kernel memory corruption (e.g. if these + * interfaces are called through splice()) and privilege escalation inside + * userspace (e.g. if a process with access to such a device passes a file + * descriptor to a SUID binary as stdin/stdout/stderr). + * + * This function provides protection for the legacy API by restricting the + * calling context. + */ +static int sg_check_file_access(struct file *filp, const char *caller) +{ + if (filp->f_cred != current_real_cred()) { + pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", + caller, task_tgid_vnr(current), current->comm); + return -EPERM; + } + return 0; +} + +static int sg_allow_access(struct file *filp, unsigned char *cmd) +{ + struct sg_fd *sfp = filp->private_data; + + if (sfp->parentdp->device->type == TYPE_SCANNER) + return 0; + if (!scsi_cmd_allowed(cmd, filp->f_mode & FMODE_WRITE)) + return -EPERM; + return 0; +} + +static int +open_wait(Sg_device *sdp, int flags) +{ + int retval = 0; + + if (flags & O_EXCL) { + while (sdp->open_cnt > 0) { + mutex_unlock(&sdp->open_rel_lock); + retval = wait_event_interruptible(sdp->open_wait, + (atomic_read(&sdp->detaching) || + !sdp->open_cnt)); + mutex_lock(&sdp->open_rel_lock); + + if (retval) /* -ERESTARTSYS */ + return retval; + if (atomic_read(&sdp->detaching)) + return -ENODEV; + } + } else { + while (sdp->exclude) { + mutex_unlock(&sdp->open_rel_lock); + retval = wait_event_interruptible(sdp->open_wait, + (atomic_read(&sdp->detaching) || + !sdp->exclude)); + mutex_lock(&sdp->open_rel_lock); + + if (retval) /* -ERESTARTSYS */ + return retval; + if (atomic_read(&sdp->detaching)) + return -ENODEV; + } + } + + return retval; +} + +/* Returns 0 on success, else a negated errno value */ +static int +sg_open(struct inode *inode, struct file *filp) +{ + int dev = iminor(inode); + int flags = filp->f_flags; + struct request_queue *q; + Sg_device *sdp; + Sg_fd *sfp; + int retval; + + nonseekable_open(inode, filp); + if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) + return -EPERM; /* Can't lock it with read only access */ + sdp = sg_get_dev(dev); + if (IS_ERR(sdp)) + return PTR_ERR(sdp); + + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_open: flags=0x%x\n", flags)); + + /* This driver's module count bumped by fops_get in */ + /* Prevent the device driver from vanishing while we sleep */ + retval = scsi_device_get(sdp->device); + if (retval) + goto sg_put; + + retval = scsi_autopm_get_device(sdp->device); + if (retval) + goto sdp_put; + + /* scsi_block_when_processing_errors() may block so bypass + * check if O_NONBLOCK. Permits SCSI commands to be issued + * during error recovery. Tread carefully. */ + if (!((flags & O_NONBLOCK) || + scsi_block_when_processing_errors(sdp->device))) { + retval = -ENXIO; + /* we are in error recovery for this device */ + goto error_out; + } + + mutex_lock(&sdp->open_rel_lock); + if (flags & O_NONBLOCK) { + if (flags & O_EXCL) { + if (sdp->open_cnt > 0) { + retval = -EBUSY; + goto error_mutex_locked; + } + } else { + if (sdp->exclude) { + retval = -EBUSY; + goto error_mutex_locked; + } + } + } else { + retval = open_wait(sdp, flags); + if (retval) /* -ERESTARTSYS or -ENODEV */ + goto error_mutex_locked; + } + + /* N.B. at this point we are holding the open_rel_lock */ + if (flags & O_EXCL) + sdp->exclude = true; + + if (sdp->open_cnt < 1) { /* no existing opens */ + sdp->sgdebug = 0; + q = sdp->device->request_queue; + sdp->sg_tablesize = queue_max_segments(q); + } + sfp = sg_add_sfp(sdp); + if (IS_ERR(sfp)) { + retval = PTR_ERR(sfp); + goto out_undo; + } + + filp->private_data = sfp; + sdp->open_cnt++; + mutex_unlock(&sdp->open_rel_lock); + + retval = 0; +sg_put: + kref_put(&sdp->d_ref, sg_device_destroy); + return retval; + +out_undo: + if (flags & O_EXCL) { + sdp->exclude = false; /* undo if error */ + wake_up_interruptible(&sdp->open_wait); + } +error_mutex_locked: + mutex_unlock(&sdp->open_rel_lock); +error_out: + scsi_autopm_put_device(sdp->device); +sdp_put: + scsi_device_put(sdp->device); + goto sg_put; +} + +/* Release resources associated with a successful sg_open() + * Returns 0 on success, else a negated errno value */ +static int +sg_release(struct inode *inode, struct file *filp) +{ + Sg_device *sdp; + Sg_fd *sfp; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); + + mutex_lock(&sdp->open_rel_lock); + scsi_autopm_put_device(sdp->device); + kref_put(&sfp->f_ref, sg_remove_sfp); + sdp->open_cnt--; + + /* possibly many open()s waiting on exlude clearing, start many; + * only open(O_EXCL)s wait on 0==open_cnt so only start one */ + if (sdp->exclude) { + sdp->exclude = false; + wake_up_interruptible_all(&sdp->open_wait); + } else if (0 == sdp->open_cnt) { + wake_up_interruptible(&sdp->open_wait); + } + mutex_unlock(&sdp->open_rel_lock); + return 0; +} + +static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count) +{ + struct sg_header __user *old_hdr = buf; + int reply_len; + + if (count >= SZ_SG_HEADER) { + /* negative reply_len means v3 format, otherwise v1/v2 */ + if (get_user(reply_len, &old_hdr->reply_len)) + return -EFAULT; + + if (reply_len >= 0) + return get_user(*pack_id, &old_hdr->pack_id); + + if (in_compat_syscall() && + count >= sizeof(struct compat_sg_io_hdr)) { + struct compat_sg_io_hdr __user *hp = buf; + + return get_user(*pack_id, &hp->pack_id); + } + + if (count >= sizeof(struct sg_io_hdr)) { + struct sg_io_hdr __user *hp = buf; + + return get_user(*pack_id, &hp->pack_id); + } + } + + /* no valid header was passed, so ignore the pack_id */ + *pack_id = -1; + return 0; +} + +static ssize_t +sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) +{ + Sg_device *sdp; + Sg_fd *sfp; + Sg_request *srp; + int req_pack_id = -1; + bool busy; + sg_io_hdr_t *hp; + struct sg_header *old_hdr; + int retval; + + /* + * This could cause a response to be stranded. Close the associated + * file descriptor to free up any resources being held. + */ + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_read: count=%d\n", (int) count)); + + if (sfp->force_packid) + retval = get_sg_io_pack_id(&req_pack_id, buf, count); + if (retval) + return retval; + + srp = sg_get_rq_mark(sfp, req_pack_id, &busy); + if (!srp) { /* now wait on packet to arrive */ + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + retval = wait_event_interruptible(sfp->read_wait, + ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) || + (!busy && atomic_read(&sdp->detaching)))); + if (!srp) + /* signal or detaching */ + return retval ? retval : -ENODEV; + } + if (srp->header.interface_id != '\0') + return sg_new_read(sfp, buf, count, srp); + + hp = &srp->header; + old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL); + if (!old_hdr) + return -ENOMEM; + + old_hdr->reply_len = (int) hp->timeout; + old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ + old_hdr->pack_id = hp->pack_id; + old_hdr->twelve_byte = + ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; + old_hdr->target_status = hp->masked_status; + old_hdr->host_status = hp->host_status; + old_hdr->driver_status = hp->driver_status; + if ((CHECK_CONDITION & hp->masked_status) || + (srp->sense_b[0] & 0x70) == 0x70) { + old_hdr->driver_status = DRIVER_SENSE; + memcpy(old_hdr->sense_buffer, srp->sense_b, + sizeof (old_hdr->sense_buffer)); + } + switch (hp->host_status) { + /* This setup of 'result' is for backward compatibility and is best + ignored by the user who should use target, host + driver status */ + case DID_OK: + case DID_PASSTHROUGH: + case DID_SOFT_ERROR: + old_hdr->result = 0; + break; + case DID_NO_CONNECT: + case DID_BUS_BUSY: + case DID_TIME_OUT: + old_hdr->result = EBUSY; + break; + case DID_BAD_TARGET: + case DID_ABORT: + case DID_PARITY: + case DID_RESET: + case DID_BAD_INTR: + old_hdr->result = EIO; + break; + case DID_ERROR: + old_hdr->result = (srp->sense_b[0] == 0 && + hp->masked_status == GOOD) ? 0 : EIO; + break; + default: + old_hdr->result = EIO; + break; + } + + /* Now copy the result back to the user buffer. */ + if (count >= SZ_SG_HEADER) { + if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { + retval = -EFAULT; + goto free_old_hdr; + } + buf += SZ_SG_HEADER; + if (count > old_hdr->reply_len) + count = old_hdr->reply_len; + if (count > SZ_SG_HEADER) { + if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { + retval = -EFAULT; + goto free_old_hdr; + } + } + } else + count = (old_hdr->result == 0) ? 0 : -EIO; + sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); + retval = count; +free_old_hdr: + kfree(old_hdr); + return retval; +} + +static ssize_t +sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) +{ + sg_io_hdr_t *hp = &srp->header; + int err = 0, err2; + int len; + + if (in_compat_syscall()) { + if (count < sizeof(struct compat_sg_io_hdr)) { + err = -EINVAL; + goto err_out; + } + } else if (count < SZ_SG_IO_HDR) { + err = -EINVAL; + goto err_out; + } + hp->sb_len_wr = 0; + if ((hp->mx_sb_len > 0) && hp->sbp) { + if ((CHECK_CONDITION & hp->masked_status) || + (srp->sense_b[0] & 0x70) == 0x70) { + int sb_len = SCSI_SENSE_BUFFERSIZE; + sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; + len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ + len = (len > sb_len) ? sb_len : len; + if (copy_to_user(hp->sbp, srp->sense_b, len)) { + err = -EFAULT; + goto err_out; + } + hp->driver_status = DRIVER_SENSE; + hp->sb_len_wr = len; + } + } + if (hp->masked_status || hp->host_status || hp->driver_status) + hp->info |= SG_INFO_CHECK; + err = put_sg_io_hdr(hp, buf); +err_out: + err2 = sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); + return err ? : err2 ? : count; +} + +static ssize_t +sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) +{ + int mxsize, cmd_size, k; + int input_size, blocking; + unsigned char opcode; + Sg_device *sdp; + Sg_fd *sfp; + Sg_request *srp; + struct sg_header old_hdr; + sg_io_hdr_t *hp; + unsigned char cmnd[SG_MAX_CDB_SIZE]; + int retval; + + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_write: count=%d\n", (int) count)); + if (atomic_read(&sdp->detaching)) + return -ENODEV; + if (!((filp->f_flags & O_NONBLOCK) || + scsi_block_when_processing_errors(sdp->device))) + return -ENXIO; + + if (count < SZ_SG_HEADER) + return -EIO; + if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) + return -EFAULT; + blocking = !(filp->f_flags & O_NONBLOCK); + if (old_hdr.reply_len < 0) + return sg_new_write(sfp, filp, buf, count, + blocking, 0, 0, NULL); + if (count < (SZ_SG_HEADER + 6)) + return -EIO; /* The minimum scsi command length is 6 bytes. */ + + buf += SZ_SG_HEADER; + if (get_user(opcode, buf)) + return -EFAULT; + + if (!(srp = sg_add_request(sfp))) { + SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, + "sg_write: queue full\n")); + return -EDOM; + } + mutex_lock(&sfp->f_mutex); + if (sfp->next_cmd_len > 0) { + cmd_size = sfp->next_cmd_len; + sfp->next_cmd_len = 0; /* reset so only this write() effected */ + } else { + cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ + if ((opcode >= 0xc0) && old_hdr.twelve_byte) + cmd_size = 12; + } + mutex_unlock(&sfp->f_mutex); + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, + "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); +/* Determine buffer size. */ + input_size = count - cmd_size; + mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; + mxsize -= SZ_SG_HEADER; + input_size -= SZ_SG_HEADER; + if (input_size < 0) { + sg_remove_request(sfp, srp); + return -EIO; /* User did not pass enough bytes for this command. */ + } + hp = &srp->header; + hp->interface_id = '\0'; /* indicator of old interface tunnelled */ + hp->cmd_len = (unsigned char) cmd_size; + hp->iovec_count = 0; + hp->mx_sb_len = 0; + if (input_size > 0) + hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? + SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; + else + hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; + hp->dxfer_len = mxsize; + if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || + (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) + hp->dxferp = (char __user *)buf + cmd_size; + else + hp->dxferp = NULL; + hp->sbp = NULL; + hp->timeout = old_hdr.reply_len; /* structure abuse ... */ + hp->flags = input_size; /* structure abuse ... */ + hp->pack_id = old_hdr.pack_id; + hp->usr_ptr = NULL; + if (copy_from_user(cmnd, buf, cmd_size)) { + sg_remove_request(sfp, srp); + return -EFAULT; + } + /* + * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, + * but is is possible that the app intended SG_DXFER_TO_DEV, because there + * is a non-zero input_size, so emit a warning. + */ + if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { + printk_ratelimited(KERN_WARNING + "sg_write: data in/out %d/%d bytes " + "for SCSI command 0x%x-- guessing " + "data in;\n program %s not setting " + "count and/or reply_len properly\n", + old_hdr.reply_len - (int)SZ_SG_HEADER, + input_size, (unsigned int) cmnd[0], + current->comm); + } + k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); + return (k < 0) ? k : count; +} + +static ssize_t +sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, + size_t count, int blocking, int read_only, int sg_io_owned, + Sg_request **o_srp) +{ + int k; + Sg_request *srp; + sg_io_hdr_t *hp; + unsigned char cmnd[SG_MAX_CDB_SIZE]; + int timeout; + unsigned long ul_timeout; + + if (count < SZ_SG_IO_HDR) + return -EINVAL; + + sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ + if (!(srp = sg_add_request(sfp))) { + SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, + "sg_new_write: queue full\n")); + return -EDOM; + } + srp->sg_io_owned = sg_io_owned; + hp = &srp->header; + if (get_sg_io_hdr(hp, buf)) { + sg_remove_request(sfp, srp); + return -EFAULT; + } + if (hp->interface_id != 'S') { + sg_remove_request(sfp, srp); + return -ENOSYS; + } + if (hp->flags & SG_FLAG_MMAP_IO) { + if (hp->dxfer_len > sfp->reserve.bufflen) { + sg_remove_request(sfp, srp); + return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ + } + if (hp->flags & SG_FLAG_DIRECT_IO) { + sg_remove_request(sfp, srp); + return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ + } + if (sfp->res_in_use) { + sg_remove_request(sfp, srp); + return -EBUSY; /* reserve buffer already being used */ + } + } + ul_timeout = msecs_to_jiffies(srp->header.timeout); + timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; + if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { + sg_remove_request(sfp, srp); + return -EMSGSIZE; + } + if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { + sg_remove_request(sfp, srp); + return -EFAULT; + } + if (read_only && sg_allow_access(file, cmnd)) { + sg_remove_request(sfp, srp); + return -EPERM; + } + k = sg_common_write(sfp, srp, cmnd, timeout, blocking); + if (k < 0) + return k; + if (o_srp) + *o_srp = srp; + return count; +} + +static int +sg_common_write(Sg_fd * sfp, Sg_request * srp, + unsigned char *cmnd, int timeout, int blocking) +{ + int k, at_head; + Sg_device *sdp = sfp->parentdp; + sg_io_hdr_t *hp = &srp->header; + + srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ + hp->status = 0; + hp->masked_status = 0; + hp->msg_status = 0; + hp->info = 0; + hp->host_status = 0; + hp->driver_status = 0; + hp->resid = 0; + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, + "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", + (int) cmnd[0], (int) hp->cmd_len)); + + if (hp->dxfer_len >= SZ_256M) { + sg_remove_request(sfp, srp); + return -EINVAL; + } + + k = sg_start_req(srp, cmnd); + if (k) { + SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, + "sg_common_write: start_req err=%d\n", k)); + sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); + return k; /* probably out of space --> ENOMEM */ + } + if (atomic_read(&sdp->detaching)) { + if (srp->bio) { + blk_mq_free_request(srp->rq); + srp->rq = NULL; + } + + sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); + return -ENODEV; + } + + hp->duration = jiffies_to_msecs(jiffies); + if (hp->interface_id != '\0' && /* v3 (or later) interface */ + (SG_FLAG_Q_AT_TAIL & hp->flags)) + at_head = 0; + else + at_head = 1; + + srp->rq->timeout = timeout; + kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ + srp->rq->end_io = sg_rq_end_io; + blk_execute_rq_nowait(srp->rq, at_head); + return 0; +} + +static int srp_done(Sg_fd *sfp, Sg_request *srp) +{ + unsigned long flags; + int ret; + + read_lock_irqsave(&sfp->rq_list_lock, flags); + ret = srp->done; + read_unlock_irqrestore(&sfp->rq_list_lock, flags); + return ret; +} + +static int max_sectors_bytes(struct request_queue *q) +{ + unsigned int max_sectors = queue_max_sectors(q); + + max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); + + return max_sectors << 9; +} + +static void +sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) +{ + Sg_request *srp; + int val; + unsigned int ms; + + val = 0; + list_for_each_entry(srp, &sfp->rq_list, entry) { + if (val >= SG_MAX_QUEUE) + break; + rinfo[val].req_state = srp->done + 1; + rinfo[val].problem = + srp->header.masked_status & + srp->header.host_status & + srp->header.driver_status; + if (srp->done) + rinfo[val].duration = + srp->header.duration; + else { + ms = jiffies_to_msecs(jiffies); + rinfo[val].duration = + (ms > srp->header.duration) ? + (ms - srp->header.duration) : 0; + } + rinfo[val].orphan = srp->orphan; + rinfo[val].sg_io_owned = srp->sg_io_owned; + rinfo[val].pack_id = srp->header.pack_id; + rinfo[val].usr_ptr = srp->header.usr_ptr; + val++; + } +} + +#ifdef CONFIG_COMPAT +struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + compat_uptr_t usr_ptr; + unsigned int duration; + int unused; +}; + +static int put_compat_request_table(struct compat_sg_req_info __user *o, + struct sg_req_info *rinfo) +{ + int i; + for (i = 0; i < SG_MAX_QUEUE; i++) { + if (copy_to_user(o + i, rinfo + i, offsetof(sg_req_info_t, usr_ptr)) || + put_user((uintptr_t)rinfo[i].usr_ptr, &o[i].usr_ptr) || + put_user(rinfo[i].duration, &o[i].duration) || + put_user(rinfo[i].unused, &o[i].unused)) + return -EFAULT; + } + return 0; +} +#endif + +static long +sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, + unsigned int cmd_in, void __user *p) +{ + int __user *ip = p; + int result, val, read_only; + Sg_request *srp; + unsigned long iflags; + + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); + read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); + + switch (cmd_in) { + case SG_IO: + if (atomic_read(&sdp->detaching)) + return -ENODEV; + if (!scsi_block_when_processing_errors(sdp->device)) + return -ENXIO; + result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, + 1, read_only, 1, &srp); + if (result < 0) + return result; + result = wait_event_interruptible(sfp->read_wait, + srp_done(sfp, srp)); + write_lock_irq(&sfp->rq_list_lock); + if (srp->done) { + srp->done = 2; + write_unlock_irq(&sfp->rq_list_lock); + result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); + return (result < 0) ? result : 0; + } + srp->orphan = 1; + write_unlock_irq(&sfp->rq_list_lock); + return result; /* -ERESTARTSYS because signal hit process */ + case SG_SET_TIMEOUT: + result = get_user(val, ip); + if (result) + return result; + if (val < 0) + return -EIO; + if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ)) + val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ), + INT_MAX); + sfp->timeout_user = val; + sfp->timeout = mult_frac(val, HZ, USER_HZ); + + return 0; + case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ + /* strange ..., for backward compatibility */ + return sfp->timeout_user; + case SG_SET_FORCE_LOW_DMA: + /* + * N.B. This ioctl never worked properly, but failed to + * return an error value. So returning '0' to keep compability + * with legacy applications. + */ + return 0; + case SG_GET_LOW_DMA: + return put_user(0, ip); + case SG_GET_SCSI_ID: + { + sg_scsi_id_t v; + + if (atomic_read(&sdp->detaching)) + return -ENODEV; + memset(&v, 0, sizeof(v)); + v.host_no = sdp->device->host->host_no; + v.channel = sdp->device->channel; + v.scsi_id = sdp->device->id; + v.lun = sdp->device->lun; + v.scsi_type = sdp->device->type; + v.h_cmd_per_lun = sdp->device->host->cmd_per_lun; + v.d_queue_depth = sdp->device->queue_depth; + if (copy_to_user(p, &v, sizeof(sg_scsi_id_t))) + return -EFAULT; + return 0; + } + case SG_SET_FORCE_PACK_ID: + result = get_user(val, ip); + if (result) + return result; + sfp->force_packid = val ? 1 : 0; + return 0; + case SG_GET_PACK_ID: + read_lock_irqsave(&sfp->rq_list_lock, iflags); + list_for_each_entry(srp, &sfp->rq_list, entry) { + if ((1 == srp->done) && (!srp->sg_io_owned)) { + read_unlock_irqrestore(&sfp->rq_list_lock, + iflags); + return put_user(srp->header.pack_id, ip); + } + } + read_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return put_user(-1, ip); + case SG_GET_NUM_WAITING: + read_lock_irqsave(&sfp->rq_list_lock, iflags); + val = 0; + list_for_each_entry(srp, &sfp->rq_list, entry) { + if ((1 == srp->done) && (!srp->sg_io_owned)) + ++val; + } + read_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return put_user(val, ip); + case SG_GET_SG_TABLESIZE: + return put_user(sdp->sg_tablesize, ip); + case SG_SET_RESERVED_SIZE: + result = get_user(val, ip); + if (result) + return result; + if (val < 0) + return -EINVAL; + val = min_t(int, val, + max_sectors_bytes(sdp->device->request_queue)); + mutex_lock(&sfp->f_mutex); + if (val != sfp->reserve.bufflen) { + if (sfp->mmap_called || + sfp->res_in_use) { + mutex_unlock(&sfp->f_mutex); + return -EBUSY; + } + + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); + } + mutex_unlock(&sfp->f_mutex); + return 0; + case SG_GET_RESERVED_SIZE: + val = min_t(int, sfp->reserve.bufflen, + max_sectors_bytes(sdp->device->request_queue)); + return put_user(val, ip); + case SG_SET_COMMAND_Q: + result = get_user(val, ip); + if (result) + return result; + sfp->cmd_q = val ? 1 : 0; + return 0; + case SG_GET_COMMAND_Q: + return put_user((int) sfp->cmd_q, ip); + case SG_SET_KEEP_ORPHAN: + result = get_user(val, ip); + if (result) + return result; + sfp->keep_orphan = val; + return 0; + case SG_GET_KEEP_ORPHAN: + return put_user((int) sfp->keep_orphan, ip); + case SG_NEXT_CMD_LEN: + result = get_user(val, ip); + if (result) + return result; + if (val > SG_MAX_CDB_SIZE) + return -ENOMEM; + sfp->next_cmd_len = (val > 0) ? val : 0; + return 0; + case SG_GET_VERSION_NUM: + return put_user(sg_version_num, ip); + case SG_GET_ACCESS_COUNT: + /* faked - we don't have a real access count anymore */ + val = (sdp->device ? 1 : 0); + return put_user(val, ip); + case SG_GET_REQUEST_TABLE: + { + sg_req_info_t *rinfo; + + rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO, + GFP_KERNEL); + if (!rinfo) + return -ENOMEM; + read_lock_irqsave(&sfp->rq_list_lock, iflags); + sg_fill_request_table(sfp, rinfo); + read_unlock_irqrestore(&sfp->rq_list_lock, iflags); + #ifdef CONFIG_COMPAT + if (in_compat_syscall()) + result = put_compat_request_table(p, rinfo); + else + #endif + result = copy_to_user(p, rinfo, + SZ_SG_REQ_INFO * SG_MAX_QUEUE); + result = result ? -EFAULT : 0; + kfree(rinfo); + return result; + } + case SG_EMULATED_HOST: + if (atomic_read(&sdp->detaching)) + return -ENODEV; + return put_user(sdp->device->host->hostt->emulated, ip); + case SCSI_IOCTL_SEND_COMMAND: + if (atomic_read(&sdp->detaching)) + return -ENODEV; + return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, + cmd_in, p); + case SG_SET_DEBUG: + result = get_user(val, ip); + if (result) + return result; + sdp->sgdebug = (char) val; + return 0; + case BLKSECTGET: + return put_user(max_sectors_bytes(sdp->device->request_queue), + ip); + case BLKTRACESETUP: + return blk_trace_setup(sdp->device->request_queue, sdp->name, + MKDEV(SCSI_GENERIC_MAJOR, sdp->index), + NULL, p); + case BLKTRACESTART: + return blk_trace_startstop(sdp->device->request_queue, 1); + case BLKTRACESTOP: + return blk_trace_startstop(sdp->device->request_queue, 0); + case BLKTRACETEARDOWN: + return blk_trace_remove(sdp->device->request_queue); + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + case SCSI_IOCTL_PROBE_HOST: + case SG_GET_TRANSFORM: + case SG_SCSI_RESET: + if (atomic_read(&sdp->detaching)) + return -ENODEV; + break; + default: + if (read_only) + return -EPERM; /* don't know so take safe approach */ + break; + } + + result = scsi_ioctl_block_when_processing_errors(sdp->device, + cmd_in, filp->f_flags & O_NDELAY); + if (result) + return result; + + return -ENOIOCTLCMD; +} + +static long +sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) +{ + void __user *p = (void __user *)arg; + Sg_device *sdp; + Sg_fd *sfp; + int ret; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + + ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); + if (ret != -ENOIOCTLCMD) + return ret; + return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, cmd_in, p); +} + +static __poll_t +sg_poll(struct file *filp, poll_table * wait) +{ + __poll_t res = 0; + Sg_device *sdp; + Sg_fd *sfp; + Sg_request *srp; + int count = 0; + unsigned long iflags; + + sfp = filp->private_data; + if (!sfp) + return EPOLLERR; + sdp = sfp->parentdp; + if (!sdp) + return EPOLLERR; + poll_wait(filp, &sfp->read_wait, wait); + read_lock_irqsave(&sfp->rq_list_lock, iflags); + list_for_each_entry(srp, &sfp->rq_list, entry) { + /* if any read waiting, flag it */ + if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) + res = EPOLLIN | EPOLLRDNORM; + ++count; + } + read_unlock_irqrestore(&sfp->rq_list_lock, iflags); + + if (atomic_read(&sdp->detaching)) + res |= EPOLLHUP; + else if (!sfp->cmd_q) { + if (0 == count) + res |= EPOLLOUT | EPOLLWRNORM; + } else if (count < SG_MAX_QUEUE) + res |= EPOLLOUT | EPOLLWRNORM; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_poll: res=0x%x\n", (__force u32) res)); + return res; +} + +static int +sg_fasync(int fd, struct file *filp, int mode) +{ + Sg_device *sdp; + Sg_fd *sfp; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_fasync: mode=%d\n", mode)); + + return fasync_helper(fd, filp, mode, &sfp->async_qp); +} + +static vm_fault_t +sg_vma_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + Sg_fd *sfp; + unsigned long offset, len, sa; + Sg_scatter_hold *rsv_schp; + int k, length; + + if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) + return VM_FAULT_SIGBUS; + rsv_schp = &sfp->reserve; + offset = vmf->pgoff << PAGE_SHIFT; + if (offset >= rsv_schp->bufflen) + return VM_FAULT_SIGBUS; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, + "sg_vma_fault: offset=%lu, scatg=%d\n", + offset, rsv_schp->k_use_sg)); + sa = vma->vm_start; + length = 1 << (PAGE_SHIFT + rsv_schp->page_order); + for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { + len = vma->vm_end - sa; + len = (len < length) ? len : length; + if (offset < len) { + struct page *page = nth_page(rsv_schp->pages[k], + offset >> PAGE_SHIFT); + get_page(page); /* increment page count */ + vmf->page = page; + return 0; /* success */ + } + sa += len; + offset -= len; + } + + return VM_FAULT_SIGBUS; +} + +static const struct vm_operations_struct sg_mmap_vm_ops = { + .fault = sg_vma_fault, +}; + +static int +sg_mmap(struct file *filp, struct vm_area_struct *vma) +{ + Sg_fd *sfp; + unsigned long req_sz, len, sa; + Sg_scatter_hold *rsv_schp; + int k, length; + int ret = 0; + + if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) + return -ENXIO; + req_sz = vma->vm_end - vma->vm_start; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, + "sg_mmap starting, vm_start=%p, len=%d\n", + (void *) vma->vm_start, (int) req_sz)); + if (vma->vm_pgoff) + return -EINVAL; /* want no offset */ + rsv_schp = &sfp->reserve; + mutex_lock(&sfp->f_mutex); + if (req_sz > rsv_schp->bufflen) { + ret = -ENOMEM; /* cannot map more than reserved buffer */ + goto out; + } + + sa = vma->vm_start; + length = 1 << (PAGE_SHIFT + rsv_schp->page_order); + for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { + len = vma->vm_end - sa; + len = (len < length) ? len : length; + sa += len; + } + + sfp->mmap_called = 1; + vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_private_data = sfp; + vma->vm_ops = &sg_mmap_vm_ops; +out: + mutex_unlock(&sfp->f_mutex); + return ret; +} + +static void +sg_rq_end_io_usercontext(struct work_struct *work) +{ + struct sg_request *srp = container_of(work, struct sg_request, ew.work); + struct sg_fd *sfp = srp->parentfp; + + sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); + kref_put(&sfp->f_ref, sg_remove_sfp); +} + +/* + * This function is a "bottom half" handler that is called by the mid + * level when a command is completed (or has failed). + */ +static enum rq_end_io_ret +sg_rq_end_io(struct request *rq, blk_status_t status) +{ + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct sg_request *srp = rq->end_io_data; + Sg_device *sdp; + Sg_fd *sfp; + unsigned long iflags; + unsigned int ms; + char *sense; + int result, resid, done = 1; + + if (WARN_ON(srp->done != 0)) + return RQ_END_IO_NONE; + + sfp = srp->parentfp; + if (WARN_ON(sfp == NULL)) + return RQ_END_IO_NONE; + + sdp = sfp->parentdp; + if (unlikely(atomic_read(&sdp->detaching))) + pr_info("%s: device detaching\n", __func__); + + sense = scmd->sense_buffer; + result = scmd->result; + resid = scmd->resid_len; + + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, + "sg_cmd_done: pack_id=%d, res=0x%x\n", + srp->header.pack_id, result)); + srp->header.resid = resid; + ms = jiffies_to_msecs(jiffies); + srp->header.duration = (ms > srp->header.duration) ? + (ms - srp->header.duration) : 0; + if (0 != result) { + struct scsi_sense_hdr sshdr; + + srp->header.status = 0xff & result; + srp->header.masked_status = sg_status_byte(result); + srp->header.msg_status = COMMAND_COMPLETE; + srp->header.host_status = host_byte(result); + srp->header.driver_status = driver_byte(result); + if ((sdp->sgdebug > 0) && + ((CHECK_CONDITION == srp->header.masked_status) || + (COMMAND_TERMINATED == srp->header.masked_status))) + __scsi_print_sense(sdp->device, __func__, sense, + SCSI_SENSE_BUFFERSIZE); + + /* Following if statement is a patch supplied by Eric Youngdale */ + if (driver_byte(result) != 0 + && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) + && !scsi_sense_is_deferred(&sshdr) + && sshdr.sense_key == UNIT_ATTENTION + && sdp->device->removable) { + /* Detected possible disc change. Set the bit - this */ + /* may be used if there are filesystems using this device */ + sdp->device->changed = 1; + } + } + + if (scmd->sense_len) + memcpy(srp->sense_b, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); + + /* Rely on write phase to clean out srp status values, so no "else" */ + + /* + * Free the request as soon as it is complete so that its resources + * can be reused without waiting for userspace to read() the + * result. But keep the associated bio (if any) around until + * blk_rq_unmap_user() can be called from user context. + */ + srp->rq = NULL; + blk_mq_free_request(rq); + + write_lock_irqsave(&sfp->rq_list_lock, iflags); + if (unlikely(srp->orphan)) { + if (sfp->keep_orphan) + srp->sg_io_owned = 0; + else + done = 0; + } + srp->done = done; + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + + if (likely(done)) { + /* Now wake up any sg_read() that is waiting for this + * packet. + */ + wake_up_interruptible(&sfp->read_wait); + kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); + kref_put(&sfp->f_ref, sg_remove_sfp); + } else { + INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); + schedule_work(&srp->ew.work); + } + return RQ_END_IO_NONE; +} + +static const struct file_operations sg_fops = { + .owner = THIS_MODULE, + .read = sg_read, + .write = sg_write, + .poll = sg_poll, + .unlocked_ioctl = sg_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .open = sg_open, + .mmap = sg_mmap, + .release = sg_release, + .fasync = sg_fasync, + .llseek = no_llseek, +}; + +static struct class *sg_sysfs_class; + +static int sg_sysfs_valid = 0; + +static Sg_device * +sg_alloc(struct scsi_device *scsidp) +{ + struct request_queue *q = scsidp->request_queue; + Sg_device *sdp; + unsigned long iflags; + int error; + u32 k; + + sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); + if (!sdp) { + sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " + "failure\n", __func__); + return ERR_PTR(-ENOMEM); + } + + idr_preload(GFP_KERNEL); + write_lock_irqsave(&sg_index_lock, iflags); + + error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT); + if (error < 0) { + if (error == -ENOSPC) { + sdev_printk(KERN_WARNING, scsidp, + "Unable to attach sg device type=%d, minor number exceeds %d\n", + scsidp->type, SG_MAX_DEVS - 1); + error = -ENODEV; + } else { + sdev_printk(KERN_WARNING, scsidp, "%s: idr " + "allocation Sg_device failure: %d\n", + __func__, error); + } + goto out_unlock; + } + k = error; + + SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, + "sg_alloc: dev=%d \n", k)); + sprintf(sdp->name, "sg%d", k); + sdp->device = scsidp; + mutex_init(&sdp->open_rel_lock); + INIT_LIST_HEAD(&sdp->sfds); + init_waitqueue_head(&sdp->open_wait); + atomic_set(&sdp->detaching, 0); + rwlock_init(&sdp->sfd_lock); + sdp->sg_tablesize = queue_max_segments(q); + sdp->index = k; + kref_init(&sdp->d_ref); + error = 0; + +out_unlock: + write_unlock_irqrestore(&sg_index_lock, iflags); + idr_preload_end(); + + if (error) { + kfree(sdp); + return ERR_PTR(error); + } + return sdp; +} + +static int +sg_add_device(struct device *cl_dev) +{ + struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); + Sg_device *sdp = NULL; + struct cdev * cdev = NULL; + int error; + unsigned long iflags; + + if (!blk_get_queue(scsidp->request_queue)) { + pr_warn("%s: get scsi_device queue failed\n", __func__); + return -ENODEV; + } + + error = -ENOMEM; + cdev = cdev_alloc(); + if (!cdev) { + pr_warn("%s: cdev_alloc failed\n", __func__); + goto out; + } + cdev->owner = THIS_MODULE; + cdev->ops = &sg_fops; + + sdp = sg_alloc(scsidp); + if (IS_ERR(sdp)) { + pr_warn("%s: sg_alloc failed\n", __func__); + error = PTR_ERR(sdp); + goto out; + } + + error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); + if (error) + goto cdev_add_err; + + sdp->cdev = cdev; + if (sg_sysfs_valid) { + struct device *sg_class_member; + + sg_class_member = device_create(sg_sysfs_class, cl_dev->parent, + MKDEV(SCSI_GENERIC_MAJOR, + sdp->index), + sdp, "%s", sdp->name); + if (IS_ERR(sg_class_member)) { + pr_err("%s: device_create failed\n", __func__); + error = PTR_ERR(sg_class_member); + goto cdev_add_err; + } + error = sysfs_create_link(&scsidp->sdev_gendev.kobj, + &sg_class_member->kobj, "generic"); + if (error) + pr_err("%s: unable to make symlink 'generic' back " + "to sg%d\n", __func__, sdp->index); + } else + pr_warn("%s: sg_sys Invalid\n", __func__); + + sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " + "type %d\n", sdp->index, scsidp->type); + + dev_set_drvdata(cl_dev, sdp); + + return 0; + +cdev_add_err: + write_lock_irqsave(&sg_index_lock, iflags); + idr_remove(&sg_index_idr, sdp->index); + write_unlock_irqrestore(&sg_index_lock, iflags); + kfree(sdp); + +out: + if (cdev) + cdev_del(cdev); + blk_put_queue(scsidp->request_queue); + return error; +} + +static void +sg_device_destroy(struct kref *kref) +{ + struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); + struct request_queue *q = sdp->device->request_queue; + unsigned long flags; + + /* CAUTION! Note that the device can still be found via idr_find() + * even though the refcount is 0. Therefore, do idr_remove() BEFORE + * any other cleanup. + */ + + blk_trace_remove(q); + blk_put_queue(q); + + write_lock_irqsave(&sg_index_lock, flags); + idr_remove(&sg_index_idr, sdp->index); + write_unlock_irqrestore(&sg_index_lock, flags); + + SCSI_LOG_TIMEOUT(3, + sg_printk(KERN_INFO, sdp, "sg_device_destroy\n")); + + kfree(sdp); +} + +static void +sg_remove_device(struct device *cl_dev) +{ + struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); + Sg_device *sdp = dev_get_drvdata(cl_dev); + unsigned long iflags; + Sg_fd *sfp; + int val; + + if (!sdp) + return; + /* want sdp->detaching non-zero as soon as possible */ + val = atomic_inc_return(&sdp->detaching); + if (val > 1) + return; /* only want to do following once per device */ + + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "%s\n", __func__)); + + read_lock_irqsave(&sdp->sfd_lock, iflags); + list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { + wake_up_interruptible_all(&sfp->read_wait); + kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); + } + wake_up_interruptible_all(&sdp->open_wait); + read_unlock_irqrestore(&sdp->sfd_lock, iflags); + + sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); + device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); + cdev_del(sdp->cdev); + sdp->cdev = NULL; + + kref_put(&sdp->d_ref, sg_device_destroy); +} + +module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); +module_param_named(def_reserved_size, def_reserved_size, int, + S_IRUGO | S_IWUSR); +module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); + +MODULE_AUTHOR("Douglas Gilbert"); +MODULE_DESCRIPTION("SCSI generic (sg) driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(SG_VERSION_STR); +MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); + +MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " + "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); +MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); +MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); + +#ifdef CONFIG_SYSCTL +#include + +static struct ctl_table sg_sysctls[] = { + { + .procname = "sg-big-buff", + .data = &sg_big_buff, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec, + }, + {} +}; + +static struct ctl_table_header *hdr; +static void register_sg_sysctls(void) +{ + if (!hdr) + hdr = register_sysctl("kernel", sg_sysctls); +} + +static void unregister_sg_sysctls(void) +{ + if (hdr) + unregister_sysctl_table(hdr); +} +#else +#define register_sg_sysctls() do { } while (0) +#define unregister_sg_sysctls() do { } while (0) +#endif /* CONFIG_SYSCTL */ + +static int __init +init_sg(void) +{ + int rc; + + if (scatter_elem_sz < PAGE_SIZE) { + scatter_elem_sz = PAGE_SIZE; + scatter_elem_sz_prev = scatter_elem_sz; + } + if (def_reserved_size >= 0) + sg_big_buff = def_reserved_size; + else + def_reserved_size = sg_big_buff; + + rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), + SG_MAX_DEVS, "sg"); + if (rc) + return rc; + sg_sysfs_class = class_create("scsi_generic"); + if ( IS_ERR(sg_sysfs_class) ) { + rc = PTR_ERR(sg_sysfs_class); + goto err_out; + } + sg_sysfs_valid = 1; + rc = scsi_register_interface(&sg_interface); + if (0 == rc) { +#ifdef CONFIG_SCSI_PROC_FS + sg_proc_init(); +#endif /* CONFIG_SCSI_PROC_FS */ + return 0; + } + class_destroy(sg_sysfs_class); + register_sg_sysctls(); +err_out: + unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); + return rc; +} + +static void __exit +exit_sg(void) +{ + unregister_sg_sysctls(); +#ifdef CONFIG_SCSI_PROC_FS + remove_proc_subtree("scsi/sg", NULL); +#endif /* CONFIG_SCSI_PROC_FS */ + scsi_unregister_interface(&sg_interface); + class_destroy(sg_sysfs_class); + sg_sysfs_valid = 0; + unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), + SG_MAX_DEVS); + idr_destroy(&sg_index_idr); +} + +static int +sg_start_req(Sg_request *srp, unsigned char *cmd) +{ + int res; + struct request *rq; + Sg_fd *sfp = srp->parentfp; + sg_io_hdr_t *hp = &srp->header; + int dxfer_len = (int) hp->dxfer_len; + int dxfer_dir = hp->dxfer_direction; + unsigned int iov_count = hp->iovec_count; + Sg_scatter_hold *req_schp = &srp->data; + Sg_scatter_hold *rsv_schp = &sfp->reserve; + struct request_queue *q = sfp->parentdp->device->request_queue; + struct rq_map_data *md, map_data; + int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? ITER_SOURCE : ITER_DEST; + struct scsi_cmnd *scmd; + + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, + "sg_start_req: dxfer_len=%d\n", + dxfer_len)); + + /* + * NOTE + * + * With scsi-mq enabled, there are a fixed number of preallocated + * requests equal in number to shost->can_queue. If all of the + * preallocated requests are already in use, then scsi_alloc_request() + * will sleep until an active command completes, freeing up a request. + * Although waiting in an asynchronous interface is less than ideal, we + * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might + * not expect an EWOULDBLOCK from this condition. + */ + rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + scmd = blk_mq_rq_to_pdu(rq); + + if (hp->cmd_len > sizeof(scmd->cmnd)) { + blk_mq_free_request(rq); + return -EINVAL; + } + + memcpy(scmd->cmnd, cmd, hp->cmd_len); + scmd->cmd_len = hp->cmd_len; + + srp->rq = rq; + rq->end_io_data = srp; + scmd->allowed = SG_DEFAULT_RETRIES; + + if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) + return 0; + + if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && + dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && + blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) + md = NULL; + else + md = &map_data; + + if (md) { + mutex_lock(&sfp->f_mutex); + if (dxfer_len <= rsv_schp->bufflen && + !sfp->res_in_use) { + sfp->res_in_use = 1; + sg_link_reserve(sfp, srp, dxfer_len); + } else if (hp->flags & SG_FLAG_MMAP_IO) { + res = -EBUSY; /* sfp->res_in_use == 1 */ + if (dxfer_len > rsv_schp->bufflen) + res = -ENOMEM; + mutex_unlock(&sfp->f_mutex); + return res; + } else { + res = sg_build_indirect(req_schp, sfp, dxfer_len); + if (res) { + mutex_unlock(&sfp->f_mutex); + return res; + } + } + mutex_unlock(&sfp->f_mutex); + + md->pages = req_schp->pages; + md->page_order = req_schp->page_order; + md->nr_entries = req_schp->k_use_sg; + md->offset = 0; + md->null_mapped = hp->dxferp ? 0 : 1; + if (dxfer_dir == SG_DXFER_TO_FROM_DEV) + md->from_user = 1; + else + md->from_user = 0; + } + + res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len, + GFP_ATOMIC, iov_count, iov_count, 1, rw); + if (!res) { + srp->bio = rq->bio; + + if (!md) { + req_schp->dio_in_use = 1; + hp->info |= SG_INFO_DIRECT_IO; + } + } + return res; +} + +static int +sg_finish_rem_req(Sg_request *srp) +{ + int ret = 0; + + Sg_fd *sfp = srp->parentfp; + Sg_scatter_hold *req_schp = &srp->data; + + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, + "sg_finish_rem_req: res_used=%d\n", + (int) srp->res_used)); + if (srp->bio) + ret = blk_rq_unmap_user(srp->bio); + + if (srp->rq) + blk_mq_free_request(srp->rq); + + if (srp->res_used) + sg_unlink_reserve(sfp, srp); + else + sg_remove_scat(sfp, req_schp); + + return ret; +} + +static int +sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) +{ + int sg_bufflen = tablesize * sizeof(struct page *); + gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; + + schp->pages = kzalloc(sg_bufflen, gfp_flags); + if (!schp->pages) + return -ENOMEM; + schp->sglist_len = sg_bufflen; + return tablesize; /* number of scat_gath elements allocated */ +} + +static int +sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) +{ + int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; + int sg_tablesize = sfp->parentdp->sg_tablesize; + int blk_size = buff_size, order; + gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO; + + if (blk_size < 0) + return -EFAULT; + if (0 == blk_size) + ++blk_size; /* don't know why */ + /* round request up to next highest SG_SECTOR_SZ byte boundary */ + blk_size = ALIGN(blk_size, SG_SECTOR_SZ); + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, + "sg_build_indirect: buff_size=%d, blk_size=%d\n", + buff_size, blk_size)); + + /* N.B. ret_sz carried into this block ... */ + mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); + if (mx_sc_elems < 0) + return mx_sc_elems; /* most likely -ENOMEM */ + + num = scatter_elem_sz; + if (unlikely(num != scatter_elem_sz_prev)) { + if (num < PAGE_SIZE) { + scatter_elem_sz = PAGE_SIZE; + scatter_elem_sz_prev = PAGE_SIZE; + } else + scatter_elem_sz_prev = num; + } + + order = get_order(num); +retry: + ret_sz = 1 << (PAGE_SHIFT + order); + + for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; + k++, rem_sz -= ret_sz) { + + num = (rem_sz > scatter_elem_sz_prev) ? + scatter_elem_sz_prev : rem_sz; + + schp->pages[k] = alloc_pages(gfp_mask, order); + if (!schp->pages[k]) + goto out; + + if (num == scatter_elem_sz_prev) { + if (unlikely(ret_sz > scatter_elem_sz_prev)) { + scatter_elem_sz = ret_sz; + scatter_elem_sz_prev = ret_sz; + } + } + + SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, + "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n", + k, num, ret_sz)); + } /* end of for loop */ + + schp->page_order = order; + schp->k_use_sg = k; + SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, + "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", + k, rem_sz)); + + schp->bufflen = blk_size; + if (rem_sz > 0) /* must have failed */ + return -ENOMEM; + return 0; +out: + for (i = 0; i < k; i++) + __free_pages(schp->pages[i], order); + + if (--order >= 0) + goto retry; + + return -ENOMEM; +} + +static void +sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) +{ + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, + "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); + if (schp->pages && schp->sglist_len > 0) { + if (!schp->dio_in_use) { + int k; + + for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { + SCSI_LOG_TIMEOUT(5, + sg_printk(KERN_INFO, sfp->parentdp, + "sg_remove_scat: k=%d, pg=0x%p\n", + k, schp->pages[k])); + __free_pages(schp->pages[k], schp->page_order); + } + + kfree(schp->pages); + } + } + memset(schp, 0, sizeof (*schp)); +} + +static int +sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) +{ + Sg_scatter_hold *schp = &srp->data; + int k, num; + + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, + "sg_read_oxfer: num_read_xfer=%d\n", + num_read_xfer)); + if ((!outp) || (num_read_xfer <= 0)) + return 0; + + num = 1 << (PAGE_SHIFT + schp->page_order); + for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { + if (num > num_read_xfer) { + if (copy_to_user(outp, page_address(schp->pages[k]), + num_read_xfer)) + return -EFAULT; + break; + } else { + if (copy_to_user(outp, page_address(schp->pages[k]), + num)) + return -EFAULT; + num_read_xfer -= num; + if (num_read_xfer <= 0) + break; + outp += num; + } + } + + return 0; +} + +static void +sg_build_reserve(Sg_fd * sfp, int req_size) +{ + Sg_scatter_hold *schp = &sfp->reserve; + + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, + "sg_build_reserve: req_size=%d\n", req_size)); + do { + if (req_size < PAGE_SIZE) + req_size = PAGE_SIZE; + if (0 == sg_build_indirect(schp, sfp, req_size)) + return; + else + sg_remove_scat(sfp, schp); + req_size >>= 1; /* divide by 2 */ + } while (req_size > (PAGE_SIZE / 2)); +} + +static void +sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) +{ + Sg_scatter_hold *req_schp = &srp->data; + Sg_scatter_hold *rsv_schp = &sfp->reserve; + int k, num, rem; + + srp->res_used = 1; + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, + "sg_link_reserve: size=%d\n", size)); + rem = size; + + num = 1 << (PAGE_SHIFT + rsv_schp->page_order); + for (k = 0; k < rsv_schp->k_use_sg; k++) { + if (rem <= num) { + req_schp->k_use_sg = k + 1; + req_schp->sglist_len = rsv_schp->sglist_len; + req_schp->pages = rsv_schp->pages; + + req_schp->bufflen = size; + req_schp->page_order = rsv_schp->page_order; + break; + } else + rem -= num; + } + + if (k >= rsv_schp->k_use_sg) + SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, + "sg_link_reserve: BAD size\n")); +} + +static void +sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) +{ + Sg_scatter_hold *req_schp = &srp->data; + + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, + "sg_unlink_reserve: req->k_use_sg=%d\n", + (int) req_schp->k_use_sg)); + req_schp->k_use_sg = 0; + req_schp->bufflen = 0; + req_schp->pages = NULL; + req_schp->page_order = 0; + req_schp->sglist_len = 0; + srp->res_used = 0; + /* Called without mutex lock to avoid deadlock */ + sfp->res_in_use = 0; +} + +static Sg_request * +sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy) +{ + Sg_request *resp; + unsigned long iflags; + + *busy = false; + write_lock_irqsave(&sfp->rq_list_lock, iflags); + list_for_each_entry(resp, &sfp->rq_list, entry) { + /* look for requests that are not SG_IO owned */ + if ((!resp->sg_io_owned) && + ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { + switch (resp->done) { + case 0: /* request active */ + *busy = true; + break; + case 1: /* request done; response ready to return */ + resp->done = 2; /* guard against other readers */ + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; + case 2: /* response already being returned */ + break; + } + } + } + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return NULL; +} + +/* always adds to end of list */ +static Sg_request * +sg_add_request(Sg_fd * sfp) +{ + int k; + unsigned long iflags; + Sg_request *rp = sfp->req_arr; + + write_lock_irqsave(&sfp->rq_list_lock, iflags); + if (!list_empty(&sfp->rq_list)) { + if (!sfp->cmd_q) + goto out_unlock; + + for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { + if (!rp->parentfp) + break; + } + if (k >= SG_MAX_QUEUE) + goto out_unlock; + } + memset(rp, 0, sizeof (Sg_request)); + rp->parentfp = sfp; + rp->header.duration = jiffies_to_msecs(jiffies); + list_add_tail(&rp->entry, &sfp->rq_list); + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return rp; +out_unlock: + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return NULL; +} + +/* Return of 1 for found; 0 for not found */ +static int +sg_remove_request(Sg_fd * sfp, Sg_request * srp) +{ + unsigned long iflags; + int res = 0; + + if (!sfp || !srp || list_empty(&sfp->rq_list)) + return res; + write_lock_irqsave(&sfp->rq_list_lock, iflags); + if (!list_empty(&srp->entry)) { + list_del(&srp->entry); + srp->parentfp = NULL; + res = 1; + } + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + + /* + * If the device is detaching, wakeup any readers in case we just + * removed the last response, which would leave nothing for them to + * return other than -ENODEV. + */ + if (unlikely(atomic_read(&sfp->parentdp->detaching))) + wake_up_interruptible_all(&sfp->read_wait); + + return res; +} + +static Sg_fd * +sg_add_sfp(Sg_device * sdp) +{ + Sg_fd *sfp; + unsigned long iflags; + int bufflen; + + sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); + if (!sfp) + return ERR_PTR(-ENOMEM); + + init_waitqueue_head(&sfp->read_wait); + rwlock_init(&sfp->rq_list_lock); + INIT_LIST_HEAD(&sfp->rq_list); + kref_init(&sfp->f_ref); + mutex_init(&sfp->f_mutex); + sfp->timeout = SG_DEFAULT_TIMEOUT; + sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; + sfp->force_packid = SG_DEF_FORCE_PACK_ID; + sfp->cmd_q = SG_DEF_COMMAND_Q; + sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; + sfp->parentdp = sdp; + write_lock_irqsave(&sdp->sfd_lock, iflags); + if (atomic_read(&sdp->detaching)) { + write_unlock_irqrestore(&sdp->sfd_lock, iflags); + kfree(sfp); + return ERR_PTR(-ENODEV); + } + list_add_tail(&sfp->sfd_siblings, &sdp->sfds); + write_unlock_irqrestore(&sdp->sfd_lock, iflags); + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_add_sfp: sfp=0x%p\n", sfp)); + if (unlikely(sg_big_buff != def_reserved_size)) + sg_big_buff = def_reserved_size; + + bufflen = min_t(int, sg_big_buff, + max_sectors_bytes(sdp->device->request_queue)); + sg_build_reserve(sfp, bufflen); + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, + "sg_add_sfp: bufflen=%d, k_use_sg=%d\n", + sfp->reserve.bufflen, + sfp->reserve.k_use_sg)); + + kref_get(&sdp->d_ref); + __module_get(THIS_MODULE); + return sfp; +} + +static void +sg_remove_sfp_usercontext(struct work_struct *work) +{ + struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); + struct sg_device *sdp = sfp->parentdp; + Sg_request *srp; + unsigned long iflags; + + /* Cleanup any responses which were never read(). */ + write_lock_irqsave(&sfp->rq_list_lock, iflags); + while (!list_empty(&sfp->rq_list)) { + srp = list_first_entry(&sfp->rq_list, Sg_request, entry); + sg_finish_rem_req(srp); + list_del(&srp->entry); + srp->parentfp = NULL; + } + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + + if (sfp->reserve.bufflen > 0) { + SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, + "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", + (int) sfp->reserve.bufflen, + (int) sfp->reserve.k_use_sg)); + sg_remove_scat(sfp, &sfp->reserve); + } + + SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, + "sg_remove_sfp: sfp=0x%p\n", sfp)); + kfree(sfp); + + scsi_device_put(sdp->device); + kref_put(&sdp->d_ref, sg_device_destroy); + module_put(THIS_MODULE); +} + +static void +sg_remove_sfp(struct kref *kref) +{ + struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); + struct sg_device *sdp = sfp->parentdp; + unsigned long iflags; + + write_lock_irqsave(&sdp->sfd_lock, iflags); + list_del(&sfp->sfd_siblings); + write_unlock_irqrestore(&sdp->sfd_lock, iflags); + + INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); + schedule_work(&sfp->ew.work); +} + +#ifdef CONFIG_SCSI_PROC_FS +static int +sg_idr_max_id(int id, void *p, void *data) +{ + int *k = data; + + if (*k < id) + *k = id; + + return 0; +} + +static int +sg_last_dev(void) +{ + int k = -1; + unsigned long iflags; + + read_lock_irqsave(&sg_index_lock, iflags); + idr_for_each(&sg_index_idr, sg_idr_max_id, &k); + read_unlock_irqrestore(&sg_index_lock, iflags); + return k + 1; /* origin 1 */ +} +#endif + +/* must be called with sg_index_lock held */ +static Sg_device *sg_lookup_dev(int dev) +{ + return idr_find(&sg_index_idr, dev); +} + +static Sg_device * +sg_get_dev(int dev) +{ + struct sg_device *sdp; + unsigned long flags; + + read_lock_irqsave(&sg_index_lock, flags); + sdp = sg_lookup_dev(dev); + if (!sdp) + sdp = ERR_PTR(-ENXIO); + else if (atomic_read(&sdp->detaching)) { + /* If sdp->detaching, then the refcount may already be 0, in + * which case it would be a bug to do kref_get(). + */ + sdp = ERR_PTR(-ENODEV); + } else + kref_get(&sdp->d_ref); + read_unlock_irqrestore(&sg_index_lock, flags); + + return sdp; +} + +#ifdef CONFIG_SCSI_PROC_FS +static int sg_proc_seq_show_int(struct seq_file *s, void *v); + +static int sg_proc_single_open_adio(struct inode *inode, struct file *file); +static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, + size_t count, loff_t *off); +static const struct proc_ops adio_proc_ops = { + .proc_open = sg_proc_single_open_adio, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = sg_proc_write_adio, + .proc_release = single_release, +}; + +static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); +static ssize_t sg_proc_write_dressz(struct file *filp, + const char __user *buffer, size_t count, loff_t *off); +static const struct proc_ops dressz_proc_ops = { + .proc_open = sg_proc_single_open_dressz, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = sg_proc_write_dressz, + .proc_release = single_release, +}; + +static int sg_proc_seq_show_version(struct seq_file *s, void *v); +static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); +static int sg_proc_seq_show_dev(struct seq_file *s, void *v); +static void * dev_seq_start(struct seq_file *s, loff_t *pos); +static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); +static void dev_seq_stop(struct seq_file *s, void *v); +static const struct seq_operations dev_seq_ops = { + .start = dev_seq_start, + .next = dev_seq_next, + .stop = dev_seq_stop, + .show = sg_proc_seq_show_dev, +}; + +static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); +static const struct seq_operations devstrs_seq_ops = { + .start = dev_seq_start, + .next = dev_seq_next, + .stop = dev_seq_stop, + .show = sg_proc_seq_show_devstrs, +}; + +static int sg_proc_seq_show_debug(struct seq_file *s, void *v); +static const struct seq_operations debug_seq_ops = { + .start = dev_seq_start, + .next = dev_seq_next, + .stop = dev_seq_stop, + .show = sg_proc_seq_show_debug, +}; + +static int +sg_proc_init(void) +{ + struct proc_dir_entry *p; + + p = proc_mkdir("scsi/sg", NULL); + if (!p) + return 1; + + proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_proc_ops); + proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops); + proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_proc_ops); + proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr); + proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops); + proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops); + proc_create_single("version", S_IRUGO, p, sg_proc_seq_show_version); + return 0; +} + + +static int sg_proc_seq_show_int(struct seq_file *s, void *v) +{ + seq_printf(s, "%d\n", *((int *)s->private)); + return 0; +} + +static int sg_proc_single_open_adio(struct inode *inode, struct file *file) +{ + return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); +} + +static ssize_t +sg_proc_write_adio(struct file *filp, const char __user *buffer, + size_t count, loff_t *off) +{ + int err; + unsigned long num; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + err = kstrtoul_from_user(buffer, count, 0, &num); + if (err) + return err; + sg_allow_dio = num ? 1 : 0; + return count; +} + +static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) +{ + return single_open(file, sg_proc_seq_show_int, &sg_big_buff); +} + +static ssize_t +sg_proc_write_dressz(struct file *filp, const char __user *buffer, + size_t count, loff_t *off) +{ + int err; + unsigned long k = ULONG_MAX; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + + err = kstrtoul_from_user(buffer, count, 0, &k); + if (err) + return err; + if (k <= 1048576) { /* limit "big buff" to 1 MB */ + sg_big_buff = k; + return count; + } + return -ERANGE; +} + +static int sg_proc_seq_show_version(struct seq_file *s, void *v) +{ + seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, + sg_version_date); + return 0; +} + +static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) +{ + seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n"); + return 0; +} + +struct sg_proc_deviter { + loff_t index; + size_t max; +}; + +static void * dev_seq_start(struct seq_file *s, loff_t *pos) +{ + struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); + + s->private = it; + if (! it) + return NULL; + + it->index = *pos; + it->max = sg_last_dev(); + if (it->index >= it->max) + return NULL; + return it; +} + +static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct sg_proc_deviter * it = s->private; + + *pos = ++it->index; + return (it->index < it->max) ? it : NULL; +} + +static void dev_seq_stop(struct seq_file *s, void *v) +{ + kfree(s->private); +} + +static int sg_proc_seq_show_dev(struct seq_file *s, void *v) +{ + struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; + Sg_device *sdp; + struct scsi_device *scsidp; + unsigned long iflags; + + read_lock_irqsave(&sg_index_lock, iflags); + sdp = it ? sg_lookup_dev(it->index) : NULL; + if ((NULL == sdp) || (NULL == sdp->device) || + (atomic_read(&sdp->detaching))) + seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); + else { + scsidp = sdp->device; + seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n", + scsidp->host->host_no, scsidp->channel, + scsidp->id, scsidp->lun, (int) scsidp->type, + 1, + (int) scsidp->queue_depth, + (int) scsi_device_busy(scsidp), + (int) scsi_device_online(scsidp)); + } + read_unlock_irqrestore(&sg_index_lock, iflags); + return 0; +} + +static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) +{ + struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; + Sg_device *sdp; + struct scsi_device *scsidp; + unsigned long iflags; + + read_lock_irqsave(&sg_index_lock, iflags); + sdp = it ? sg_lookup_dev(it->index) : NULL; + scsidp = sdp ? sdp->device : NULL; + if (sdp && scsidp && (!atomic_read(&sdp->detaching))) + seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", + scsidp->vendor, scsidp->model, scsidp->rev); + else + seq_puts(s, "\n"); + read_unlock_irqrestore(&sg_index_lock, iflags); + return 0; +} + +/* must be called while holding sg_index_lock */ +static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) +{ + int k, new_interface, blen, usg; + Sg_request *srp; + Sg_fd *fp; + const sg_io_hdr_t *hp; + const char * cp; + unsigned int ms; + + k = 0; + list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { + k++; + read_lock(&fp->rq_list_lock); /* irqs already disabled */ + seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " + "(res)sgat=%d low_dma=%d\n", k, + jiffies_to_msecs(fp->timeout), + fp->reserve.bufflen, + (int) fp->reserve.k_use_sg, 0); + seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", + (int) fp->cmd_q, (int) fp->force_packid, + (int) fp->keep_orphan); + list_for_each_entry(srp, &fp->rq_list, entry) { + hp = &srp->header; + new_interface = (hp->interface_id == '\0') ? 0 : 1; + if (srp->res_used) { + if (new_interface && + (SG_FLAG_MMAP_IO & hp->flags)) + cp = " mmap>> "; + else + cp = " rb>> "; + } else { + if (SG_INFO_DIRECT_IO_MASK & hp->info) + cp = " dio>> "; + else + cp = " "; + } + seq_puts(s, cp); + blen = srp->data.bufflen; + usg = srp->data.k_use_sg; + seq_puts(s, srp->done ? + ((1 == srp->done) ? "rcv:" : "fin:") + : "act:"); + seq_printf(s, " id=%d blen=%d", + srp->header.pack_id, blen); + if (srp->done) + seq_printf(s, " dur=%d", hp->duration); + else { + ms = jiffies_to_msecs(jiffies); + seq_printf(s, " t_o/elap=%d/%d", + (new_interface ? hp->timeout : + jiffies_to_msecs(fp->timeout)), + (ms > hp->duration ? ms - hp->duration : 0)); + } + seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, + (int) srp->data.cmd_opcode); + } + if (list_empty(&fp->rq_list)) + seq_puts(s, " No requests active\n"); + read_unlock(&fp->rq_list_lock); + } +} + +static int sg_proc_seq_show_debug(struct seq_file *s, void *v) +{ + struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; + Sg_device *sdp; + unsigned long iflags; + + if (it && (0 == it->index)) + seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", + (int)it->max, sg_big_buff); + + read_lock_irqsave(&sg_index_lock, iflags); + sdp = it ? sg_lookup_dev(it->index) : NULL; + if (NULL == sdp) + goto skip; + read_lock(&sdp->sfd_lock); + if (!list_empty(&sdp->sfds)) { + seq_printf(s, " >>> device=%s ", sdp->name); + if (atomic_read(&sdp->detaching)) + seq_puts(s, "detaching pending close "); + else if (sdp->device) { + struct scsi_device *scsidp = sdp->device; + + seq_printf(s, "%d:%d:%d:%llu em=%d", + scsidp->host->host_no, + scsidp->channel, scsidp->id, + scsidp->lun, + scsidp->host->hostt->emulated); + } + seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", + sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); + sg_proc_debug_helper(s, sdp); + } + read_unlock(&sdp->sfd_lock); +skip: + read_unlock_irqrestore(&sg_index_lock, iflags); + return 0; +} + +#endif /* CONFIG_SCSI_PROC_FS */ + +module_init(init_sg); +module_exit(exit_sg); diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c new file mode 100644 index 000000000..88e2b5eb9 --- /dev/null +++ b/drivers/scsi/sgiwd93.c @@ -0,0 +1,332 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + * Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu) + * Copyright (C) 2001 Florian Lohoff (flo@rfc822.org) + * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) + * + * (In all truth, Jed Schimmel wrote all this code.) + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include "wd33c93.h" + +struct ip22_hostdata { + struct WD33C93_hostdata wh; + dma_addr_t dma; + void *cpu; + struct device *dev; +}; + +#define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata)) + +struct hpc_chunk { + struct hpc_dma_desc desc; + u32 _padding; /* align to quadword boundary */ +}; + +/* space for hpc dma descriptors */ +#define HPC_DMA_SIZE PAGE_SIZE + +#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) + +static irqreturn_t sgiwd93_intr(int irq, void *dev_id) +{ + struct Scsi_Host * host = dev_id; + unsigned long flags; + + spin_lock_irqsave(host->host_lock, flags); + wd33c93_intr(host); + spin_unlock_irqrestore(host->host_lock, flags); + + return IRQ_HANDLED; +} + +static inline +void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + unsigned long len = scsi_pointer->this_residual; + void *addr = scsi_pointer->ptr; + dma_addr_t physaddr; + unsigned long count; + struct hpc_chunk *hcp; + + physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din)); + scsi_pointer->dma_handle = physaddr; + hcp = hd->cpu; + + while (len) { + /* + * even cntinfo could be up to 16383, without + * magic only 8192 works correctly + */ + count = len > 8192 ? 8192 : len; + hcp->desc.pbuf = physaddr; + hcp->desc.cntinfo = count; + hcp++; + len -= count; + physaddr += count; + } + + /* + * To make sure, if we trip an HPC bug, that we transfer every single + * byte, we tag on an extra zero length dma descriptor at the end of + * the chain. + */ + hcp->desc.pbuf = 0; + hcp->desc.cntinfo = HPCDMA_EOX; + dma_sync_single_for_device(hd->dev, hd->dma, + (unsigned long)(hcp + 1) - (unsigned long)hd->cpu, + DMA_TO_DEVICE); +} + +static int dma_setup(struct scsi_cmnd *cmd, int datainp) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host); + struct hpc3_scsiregs *hregs = + (struct hpc3_scsiregs *) cmd->device->host->base; + + pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu); + + hdata->wh.dma_dir = datainp; + + /* + * wd33c93 shouldn't pass us bogus dma_setups, but it does:-( The + * other wd33c93 drivers deal with it the same way (which isn't that + * obvious). IMHO a better fix would be, not to do these dma setups + * in the first place. + */ + if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0) + return 1; + + fill_hpc_entries(hdata, cmd, datainp); + + pr_debug(" HPCGO\n"); + + /* Start up the HPC. */ + hregs->ndptr = hdata->dma; + if (datainp) + hregs->ctrl = HPC3_SCTRL_ACTIVE; + else + hregs->ctrl = HPC3_SCTRL_ACTIVE | HPC3_SCTRL_DIR; + + return 0; +} + +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, + int status) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt); + struct ip22_hostdata *hdata = host_to_hostdata(instance); + struct hpc3_scsiregs *hregs; + + if (!SCpnt) + return; + + if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0) + return; + + hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base; + + pr_debug("dma_stop: status<%d> ", status); + + /* First stop the HPC and flush it's FIFO. */ + if (hdata->wh.dma_dir) { + hregs->ctrl |= HPC3_SCTRL_FLUSH; + while (hregs->ctrl & HPC3_SCTRL_ACTIVE) + barrier(); + } + hregs->ctrl = 0; + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(hdata->wh.dma_dir)); + + pr_debug("\n"); +} + +void sgiwd93_reset(unsigned long base) +{ + struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) base; + + hregs->ctrl = HPC3_SCTRL_CRESET; + udelay(50); + hregs->ctrl = 0; +} +EXPORT_SYMBOL_GPL(sgiwd93_reset); + +static inline void init_hpc_chain(struct ip22_hostdata *hdata) +{ + struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu; + dma_addr_t dma = hdata->dma; + unsigned long start, end; + + start = (unsigned long) hcp; + end = start + HPC_DMA_SIZE; + while (start < end) { + hcp->desc.pnext = (u32) (dma + sizeof(struct hpc_chunk)); + hcp->desc.cntinfo = HPCDMA_EOX; + hcp++; + dma += sizeof(struct hpc_chunk); + start += sizeof(struct hpc_chunk); + } + hcp--; + hcp->desc.pnext = hdata->dma; +} + +/* + * Kludge alert - the SCSI code calls the abort and reset method with int + * arguments not with pointers. So this is going to blow up beautyfully + * on 64-bit systems with memory outside the compat address spaces. + */ +static const struct scsi_host_template sgiwd93_template = { + .module = THIS_MODULE, + .proc_name = "SGIWD93", + .name = "SGI WD93", + .queuecommand = wd33c93_queuecommand, + .eh_abort_handler = wd33c93_abort, + .eh_host_reset_handler = wd33c93_host_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 8, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct scsi_pointer), +}; + +static int sgiwd93_probe(struct platform_device *pdev) +{ + struct sgiwd93_platform_data *pd = pdev->dev.platform_data; + unsigned char *wdregs = pd->wdregs; + struct hpc3_scsiregs *hregs = pd->hregs; + struct ip22_hostdata *hdata; + struct Scsi_Host *host; + wd33c93_regs regs; + unsigned int unit = pd->unit; + unsigned int irq = pd->irq; + int err; + + host = scsi_host_alloc(&sgiwd93_template, sizeof(struct ip22_hostdata)); + if (!host) { + err = -ENOMEM; + goto out; + } + + host->base = (unsigned long) hregs; + host->irq = irq; + + hdata = host_to_hostdata(host); + hdata->dev = &pdev->dev; + hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE, + &hdata->dma, DMA_TO_DEVICE, GFP_KERNEL); + if (!hdata->cpu) { + printk(KERN_WARNING "sgiwd93: Could not allocate memory for " + "host %d buffer.\n", unit); + err = -ENOMEM; + goto out_put; + } + + init_hpc_chain(hdata); + + regs.SASR = wdregs + 3; + regs.SCMD = wdregs + 7; + + hdata->wh.no_sync = 0; + hdata->wh.fast = 1; + hdata->wh.dma_mode = CTRL_BURST; + + wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20)); + + err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host); + if (err) { + printk(KERN_WARNING "sgiwd93: Could not register irq %d " + "for host %d.\n", irq, unit); + goto out_free; + } + + platform_set_drvdata(pdev, host); + + err = scsi_add_host(host, NULL); + if (err) + goto out_irq; + + scsi_scan_host(host); + + return 0; + +out_irq: + free_irq(irq, host); +out_free: + dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma, + DMA_TO_DEVICE); +out_put: + scsi_host_put(host); +out: + + return err; +} + +static int sgiwd93_remove(struct platform_device *pdev) +{ + struct Scsi_Host *host = platform_get_drvdata(pdev); + struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata; + struct sgiwd93_platform_data *pd = pdev->dev.platform_data; + + scsi_remove_host(host); + free_irq(pd->irq, host); + dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma, + DMA_TO_DEVICE); + scsi_host_put(host); + return 0; +} + +static struct platform_driver sgiwd93_driver = { + .probe = sgiwd93_probe, + .remove = sgiwd93_remove, + .driver = { + .name = "sgiwd93", + } +}; + +static int __init sgiwd93_module_init(void) +{ + return platform_driver_register(&sgiwd93_driver); +} + +static void __exit sgiwd93_module_exit(void) +{ + return platform_driver_unregister(&sgiwd93_driver); +} + +module_init(sgiwd93_module_init); +module_exit(sgiwd93_module_exit); + +MODULE_DESCRIPTION("SGI WD33C93 driver"); +MODULE_AUTHOR("Ralf Baechle "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:sgiwd93"); diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c new file mode 100644 index 000000000..e519df68d --- /dev/null +++ b/drivers/scsi/sim710.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sim710.c - Copyright (C) 1999 Richard Hirst + * + *---------------------------------------------------------------------------- + *---------------------------------------------------------------------------- + * + * MCA card detection code by Trent McNair. (now deleted) + * Fixes to not explicitly nul bss data from Xavier Bestel. + * Some multiboard fixes from Rolf Eike Beer. + * Auto probing of EISA config space from Trevor Hemsley. + * + * Rewritten to use 53c700.c by James.Bottomley@SteelEye.com + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "53c700.h" + + +/* Must be enough for EISA */ +#define MAX_SLOTS 8 +static __u8 __initdata id_array[MAX_SLOTS] = { [0 ... MAX_SLOTS-1] = 7 }; + +static char *sim710; /* command line passed by insmod */ + +MODULE_AUTHOR("Richard Hirst"); +MODULE_DESCRIPTION("Simple NCR53C710 driver"); +MODULE_LICENSE("GPL"); + +module_param(sim710, charp, 0); + +#ifdef MODULE +#define ARG_SEP ' ' +#else +#define ARG_SEP ',' +#endif + +static __init int +param_setup(char *str) +{ + char *pos = str, *next; + int slot = -1; + + while(pos != NULL && (next = strchr(pos, ':')) != NULL) { + int val = (int)simple_strtoul(++next, NULL, 0); + + if(!strncmp(pos, "slot:", 5)) + slot = val; + else if(!strncmp(pos, "id:", 3)) { + if(slot == -1) { + printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); + } else if(slot >= MAX_SLOTS) { + printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); + } else { + id_array[slot] = val; + } + } + if((pos = strchr(pos, ARG_SEP)) != NULL) + pos++; + } + return 1; +} +__setup("sim710=", param_setup); + +static struct scsi_host_template sim710_driver_template = { + .name = "LSI (Symbios) 710 EISA", + .proc_name = "sim710", + .this_id = 7, + .module = THIS_MODULE, +}; + +static int sim710_probe_common(struct device *dev, unsigned long base_addr, + int irq, int clock, int differential, + int scsi_id) +{ + struct Scsi_Host * host = NULL; + struct NCR_700_Host_Parameters *hostdata = + kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); + + printk(KERN_NOTICE "sim710: %s\n", dev_name(dev)); + printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n", + irq, clock, base_addr, scsi_id); + + if(hostdata == NULL) { + printk(KERN_ERR "sim710: Failed to allocate host data\n"); + goto out; + } + + if(request_region(base_addr, 64, "sim710") == NULL) { + printk(KERN_ERR "sim710: Failed to reserve IO region 0x%lx\n", + base_addr); + goto out_free; + } + + /* Fill in the three required pieces of hostdata */ + hostdata->base = ioport_map(base_addr, 64); + hostdata->differential = differential; + hostdata->clock = clock; + hostdata->chip710 = 1; + hostdata->burst_length = 8; + + /* and register the chip */ + if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev)) + == NULL) { + printk(KERN_ERR "sim710: No host detected; card configuration problem?\n"); + goto out_release; + } + host->this_id = scsi_id; + host->base = base_addr; + host->irq = irq; + if (request_irq(irq, NCR_700_intr, IRQF_SHARED, "sim710", host)) { + printk(KERN_ERR "sim710: request_irq failed\n"); + goto out_put_host; + } + + dev_set_drvdata(dev, host); + scsi_scan_host(host); + + return 0; + + out_put_host: + scsi_host_put(host); + out_release: + release_region(base_addr, 64); + out_free: + kfree(hostdata); + out: + return -ENODEV; +} + +static int sim710_device_remove(struct device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(dev); + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + + scsi_remove_host(host); + NCR_700_release(host); + kfree(hostdata); + free_irq(host->irq, host); + release_region(host->base, 64); + return 0; +} + +#ifdef CONFIG_EISA +static struct eisa_device_id sim710_eisa_ids[] = { + { "CPQ4410" }, + { "CPQ4411" }, + { "HWP0C80" }, + { "" } +}; +MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids); + +static int sim710_eisa_probe(struct device *dev) +{ + struct eisa_device *edev = to_eisa_device(dev); + unsigned long io_addr = edev->base_addr; + char eisa_cpq_irqs[] = { 11, 14, 15, 10, 9, 0 }; + char eisa_hwp_irqs[] = { 3, 4, 5, 7, 12, 10, 11, 0}; + char *eisa_irqs; + unsigned char irq_index; + unsigned char irq, differential = 0, scsi_id = 7; + + if(strcmp(edev->id.sig, "HWP0C80") == 0) { + __u8 val; + eisa_irqs = eisa_hwp_irqs; + irq_index = (inb(io_addr + 0xc85) & 0x7) - 1; + + val = inb(io_addr + 0x4); + scsi_id = ffs(val) - 1; + + if(scsi_id > 7 || (val & ~(1<= strlen(eisa_irqs)) { + printk("sim710.c: irq nasty\n"); + return -ENODEV; + } + + irq = eisa_irqs[irq_index]; + + return sim710_probe_common(dev, io_addr, irq, 50, + differential, scsi_id); +} + +static struct eisa_driver sim710_eisa_driver = { + .id_table = sim710_eisa_ids, + .driver = { + .name = "sim710", + .probe = sim710_eisa_probe, + .remove = sim710_device_remove, + }, +}; +#endif /* CONFIG_EISA */ + +static int __init sim710_init(void) +{ +#ifdef MODULE + if (sim710) + param_setup(sim710); +#endif + +#ifdef CONFIG_EISA + /* + * FIXME: We'd really like to return -ENODEV if no devices have actually + * been found. However eisa_driver_register() only reports problems + * with kobject_register() so simply return success for now. + */ + eisa_driver_register(&sim710_eisa_driver); +#endif + return 0; +} + +static void __exit sim710_exit(void) +{ +#ifdef CONFIG_EISA + eisa_driver_unregister(&sim710_eisa_driver); +#endif +} + +module_init(sim710_init); +module_exit(sim710_exit); diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig new file mode 100644 index 000000000..789460b0a --- /dev/null +++ b/drivers/scsi/smartpqi/Kconfig @@ -0,0 +1,56 @@ +# +# Kernel configuration file for the SMARTPQI +# +# Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries +# Copyright (c) 2017-2018 Microsemi Corporation +# Copyright (c) 2016 Microsemi Corporation +# Copyright (c) 2016 PMC-Sierra, Inc. +# (mailto:storagedev@microchip.com) + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 +# of the License. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# NO WARRANTY +# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +# solely responsible for determining the appropriateness of using and +# distributing the Program and assumes all risks associated with its +# exercise of rights under this Agreement, including but not limited to +# the risks and costs of program errors, damage to or loss of data, +# programs or equipment, and unavailability or interruption of operations. + +# DISCLAIMER OF LIABILITY +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +config SCSI_SMARTPQI + tristate "Microchip PQI Driver" + depends on PCI && SCSI && !S390 + select SCSI_SAS_ATTRS + select RAID_ATTRS + help + This driver supports Microchip PQI controllers. + + + + To compile this driver as a module, choose M here: the + module will be called smartpqi. + + Note: the aacraid driver will not manage a smartpqi + controller. You need to enable smartpqi for smartpqi + controllers. For more information, please see + Documentation/scsi/smartpqi.rst diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile new file mode 100644 index 000000000..28985e508 --- /dev/null +++ b/drivers/scsi/smartpqi/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o +smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h new file mode 100644 index 000000000..041940183 --- /dev/null +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -0,0 +1,1704 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2016-2018 Microsemi Corporation + * Copyright (c) 2016 PMC-Sierra, Inc. + * + * Questions/Comments/Bugfixes to storagedev@microchip.com + * + */ + +#include + +#if !defined(_SMARTPQI_H) +#define _SMARTPQI_H + +#include +#include + +#pragma pack(1) + +#define PQI_DEVICE_SIGNATURE "PQI DREG" + +/* This structure is defined by the PQI specification. */ +struct pqi_device_registers { + __le64 signature; + u8 function_and_status_code; + u8 reserved[7]; + u8 max_admin_iq_elements; + u8 max_admin_oq_elements; + u8 admin_iq_element_length; /* in 16-byte units */ + u8 admin_oq_element_length; /* in 16-byte units */ + __le16 max_reset_timeout; /* in 100-millisecond units */ + u8 reserved1[2]; + __le32 legacy_intx_status; + __le32 legacy_intx_mask_set; + __le32 legacy_intx_mask_clear; + u8 reserved2[28]; + __le32 device_status; + u8 reserved3[4]; + __le64 admin_iq_pi_offset; + __le64 admin_oq_ci_offset; + __le64 admin_iq_element_array_addr; + __le64 admin_oq_element_array_addr; + __le64 admin_iq_ci_addr; + __le64 admin_oq_pi_addr; + u8 admin_iq_num_elements; + u8 admin_oq_num_elements; + __le16 admin_queue_int_msg_num; + u8 reserved4[4]; + __le32 device_error; + u8 reserved5[4]; + __le64 error_details; + __le32 device_reset; + __le32 power_action; + u8 reserved6[104]; +}; + +/* + * controller registers + * + * These are defined by the Microchip implementation. + * + * Some registers (those named sis_*) are only used when in + * legacy SIS mode before we transition the controller into + * PQI mode. There are a number of other SIS mode registers, + * but we don't use them, so only the SIS registers that we + * care about are defined here. The offsets mentioned in the + * comments are the offsets from the PCIe BAR 0. + */ +struct pqi_ctrl_registers { + u8 reserved[0x20]; + __le32 sis_host_to_ctrl_doorbell; /* 20h */ + u8 reserved1[0x34 - (0x20 + sizeof(__le32))]; + __le32 sis_interrupt_mask; /* 34h */ + u8 reserved2[0x9c - (0x34 + sizeof(__le32))]; + __le32 sis_ctrl_to_host_doorbell; /* 9Ch */ + u8 reserved3[0xa0 - (0x9c + sizeof(__le32))]; + __le32 sis_ctrl_to_host_doorbell_clear; /* A0h */ + u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))]; + __le32 sis_driver_scratch; /* B0h */ + __le32 sis_product_identifier; /* B4h */ + u8 reserved5[0xbc - (0xb4 + sizeof(__le32))]; + __le32 sis_firmware_status; /* BCh */ + u8 reserved6[0xcc - (0xbc + sizeof(__le32))]; + __le32 sis_ctrl_shutdown_reason_code; /* CCh */ + u8 reserved7[0x1000 - (0xcc + sizeof(__le32))]; + __le32 sis_mailbox[8]; /* 1000h */ + u8 reserved8[0x4000 - (0x1000 + (sizeof(__le32) * 8))]; + /* + * The PQI spec states that the PQI registers should be at + * offset 0 from the PCIe BAR 0. However, we can't map + * them at offset 0 because that would break compatibility + * with the SIS registers. So we map them at offset 4000h. + */ + struct pqi_device_registers pqi_registers; /* 4000h */ +}; + +#define PQI_DEVICE_REGISTERS_OFFSET 0x4000 + +/* shutdown reasons for taking the controller offline */ +enum pqi_ctrl_shutdown_reason { + PQI_IQ_NOT_DRAINED_TIMEOUT = 1, + PQI_LUN_RESET_TIMEOUT = 2, + PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT = 3, + PQI_NO_HEARTBEAT = 4, + PQI_FIRMWARE_KERNEL_NOT_UP = 5, + PQI_OFA_RESPONSE_TIMEOUT = 6, + PQI_INVALID_REQ_ID = 7, + PQI_UNMATCHED_REQ_ID = 8, + PQI_IO_PI_OUT_OF_RANGE = 9, + PQI_EVENT_PI_OUT_OF_RANGE = 10, + PQI_UNEXPECTED_IU_TYPE = 11 +}; + +enum pqi_io_path { + RAID_PATH = 0, + AIO_PATH = 1 +}; + +enum pqi_irq_mode { + IRQ_MODE_NONE, + IRQ_MODE_INTX, + IRQ_MODE_MSIX +}; + +struct pqi_sg_descriptor { + __le64 address; + __le32 length; + __le32 flags; +}; + +/* manifest constants for the flags field of pqi_sg_descriptor */ +#define CISS_SG_LAST 0x40000000 +#define CISS_SG_CHAIN 0x80000000 + +struct pqi_iu_header { + u8 iu_type; + u8 reserved; + __le16 iu_length; /* in bytes - does not include the length */ + /* of this header */ + __le16 response_queue_id; /* specifies the OQ where the */ + /* response IU is to be delivered */ + u16 driver_flags; /* reserved for driver use */ +}; + +/* manifest constants for pqi_iu_header.driver_flags */ +#define PQI_DRIVER_NONBLOCKABLE_REQUEST 0x1 + +/* + * According to the PQI spec, the IU header is only the first 4 bytes of our + * pqi_iu_header structure. + */ +#define PQI_REQUEST_HEADER_LENGTH 4 + +struct pqi_general_admin_request { + struct pqi_iu_header header; + __le16 request_id; + u8 function_code; + union { + struct { + u8 reserved[33]; + __le32 buffer_length; + struct pqi_sg_descriptor sg_descriptor; + } report_device_capability; + + struct { + u8 reserved; + __le16 queue_id; + u8 reserved1[2]; + __le64 element_array_addr; + __le64 ci_addr; + __le16 num_elements; + __le16 element_length; + u8 queue_protocol; + u8 reserved2[23]; + __le32 vendor_specific; + } create_operational_iq; + + struct { + u8 reserved; + __le16 queue_id; + u8 reserved1[2]; + __le64 element_array_addr; + __le64 pi_addr; + __le16 num_elements; + __le16 element_length; + u8 queue_protocol; + u8 reserved2[3]; + __le16 int_msg_num; + __le16 coalescing_count; + __le32 min_coalescing_time; + __le32 max_coalescing_time; + u8 reserved3[8]; + __le32 vendor_specific; + } create_operational_oq; + + struct { + u8 reserved; + __le16 queue_id; + u8 reserved1[50]; + } delete_operational_queue; + + struct { + u8 reserved; + __le16 queue_id; + u8 reserved1[46]; + __le32 vendor_specific; + } change_operational_iq_properties; + + } data; +}; + +struct pqi_general_admin_response { + struct pqi_iu_header header; + __le16 request_id; + u8 function_code; + u8 status; + union { + struct { + u8 status_descriptor[4]; + __le64 iq_pi_offset; + u8 reserved[40]; + } create_operational_iq; + + struct { + u8 status_descriptor[4]; + __le64 oq_ci_offset; + u8 reserved[40]; + } create_operational_oq; + } data; +}; + +struct pqi_iu_layer_descriptor { + u8 inbound_spanning_supported : 1; + u8 reserved : 7; + u8 reserved1[5]; + __le16 max_inbound_iu_length; + u8 outbound_spanning_supported : 1; + u8 reserved2 : 7; + u8 reserved3[5]; + __le16 max_outbound_iu_length; +}; + +struct pqi_device_capability { + __le16 data_length; + u8 reserved[6]; + u8 iq_arbitration_priority_support_bitmask; + u8 maximum_aw_a; + u8 maximum_aw_b; + u8 maximum_aw_c; + u8 max_arbitration_burst : 3; + u8 reserved1 : 4; + u8 iqa : 1; + u8 reserved2[2]; + u8 iq_freeze : 1; + u8 reserved3 : 7; + __le16 max_inbound_queues; + __le16 max_elements_per_iq; + u8 reserved4[4]; + __le16 max_iq_element_length; + __le16 min_iq_element_length; + u8 reserved5[2]; + __le16 max_outbound_queues; + __le16 max_elements_per_oq; + __le16 intr_coalescing_time_granularity; + __le16 max_oq_element_length; + __le16 min_oq_element_length; + u8 reserved6[24]; + struct pqi_iu_layer_descriptor iu_layer_descriptors[32]; +}; + +#define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4 +#define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS 3 + +struct pqi_raid_path_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 nexus_id; + __le32 buffer_length; + u8 lun_number[8]; + __le16 protocol_specific; + u8 data_direction : 2; + u8 partial : 1; + u8 reserved1 : 4; + u8 fence : 1; + __le16 error_index; + u8 reserved2; + u8 task_attribute : 3; + u8 command_priority : 4; + u8 reserved3 : 1; + u8 reserved4 : 2; + u8 additional_cdb_bytes_usage : 3; + u8 reserved5 : 3; + u8 cdb[16]; + u8 reserved6[11]; + u8 ml_device_lun_number; + __le32 timeout; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; +}; + +struct pqi_aio_path_request { + struct pqi_iu_header header; + __le16 request_id; + u8 reserved1[2]; + __le32 nexus_id; + __le32 buffer_length; + u8 data_direction : 2; + u8 partial : 1; + u8 memory_type : 1; + u8 fence : 1; + u8 encryption_enable : 1; + u8 reserved2 : 2; + u8 task_attribute : 3; + u8 command_priority : 4; + u8 reserved3 : 1; + __le16 data_encryption_key_index; + __le32 encrypt_tweak_lower; + __le32 encrypt_tweak_upper; + u8 cdb[16]; + __le16 error_index; + u8 num_sg_descriptors; + u8 cdb_length; + u8 lun_number[8]; + u8 reserved4[4]; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; +}; + +#define PQI_RAID1_NVME_XFER_LIMIT (32 * 1024) /* 32 KiB */ + +struct pqi_aio_r1_path_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 volume_id; /* ID of the RAID volume */ + __le32 it_nexus_1; /* IT nexus of the 1st drive in the RAID volume */ + __le32 it_nexus_2; /* IT nexus of the 2nd drive in the RAID volume */ + __le32 it_nexus_3; /* IT nexus of the 3rd drive in the RAID volume */ + __le32 data_length; /* total bytes to read/write */ + u8 data_direction : 2; + u8 partial : 1; + u8 memory_type : 1; + u8 fence : 1; + u8 encryption_enable : 1; + u8 reserved : 2; + u8 task_attribute : 3; + u8 command_priority : 4; + u8 reserved2 : 1; + __le16 data_encryption_key_index; + u8 cdb[16]; + __le16 error_index; + u8 num_sg_descriptors; + u8 cdb_length; + u8 num_drives; /* number of drives in the RAID volume (2 or 3) */ + u8 reserved3[3]; + __le32 encrypt_tweak_lower; + __le32 encrypt_tweak_upper; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; +}; + +#define PQI_DEFAULT_MAX_WRITE_RAID_5_6 (8 * 1024U) +#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA (~0U) +#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME (32 * 1024U) + +struct pqi_aio_r56_path_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 volume_id; /* ID of the RAID volume */ + __le32 data_it_nexus; /* IT nexus for the data drive */ + __le32 p_parity_it_nexus; /* IT nexus for the P parity drive */ + __le32 q_parity_it_nexus; /* IT nexus for the Q parity drive */ + __le32 data_length; /* total bytes to read/write */ + u8 data_direction : 2; + u8 partial : 1; + u8 mem_type : 1; /* 0 = PCIe, 1 = DDR */ + u8 fence : 1; + u8 encryption_enable : 1; + u8 reserved : 2; + u8 task_attribute : 3; + u8 command_priority : 4; + u8 reserved1 : 1; + __le16 data_encryption_key_index; + u8 cdb[16]; + __le16 error_index; + u8 num_sg_descriptors; + u8 cdb_length; + u8 xor_multiplier; + u8 reserved2[3]; + __le32 encrypt_tweak_lower; + __le32 encrypt_tweak_upper; + __le64 row; /* row = logical LBA/blocks per row */ + u8 reserved3[8]; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS]; +}; + +struct pqi_io_response { + struct pqi_iu_header header; + __le16 request_id; + __le16 error_index; + u8 reserved2[4]; +}; + +struct pqi_general_management_request { + struct pqi_iu_header header; + __le16 request_id; + union { + struct { + u8 reserved[2]; + __le32 buffer_length; + struct pqi_sg_descriptor sg_descriptors[3]; + } report_event_configuration; + + struct { + __le16 global_event_oq_id; + __le32 buffer_length; + struct pqi_sg_descriptor sg_descriptors[3]; + } set_event_configuration; + } data; +}; + +struct pqi_event_descriptor { + u8 event_type; + u8 reserved; + __le16 oq_id; +}; + +struct pqi_event_config { + u8 reserved[2]; + u8 num_event_descriptors; + u8 reserved1; + struct pqi_event_descriptor descriptors[]; +}; + +#define PQI_MAX_EVENT_DESCRIPTORS 255 + +#define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0 +#define PQI_EVENT_OFA_QUIESCE 0x1 +#define PQI_EVENT_OFA_CANCELED 0x2 + +struct pqi_event_response { + struct pqi_iu_header header; + u8 event_type; + u8 reserved2 : 7; + u8 request_acknowledge : 1; + __le16 event_id; + __le32 additional_event_id; + union { + struct { + __le32 bytes_requested; + u8 reserved[12]; + } ofa_memory_allocation; + + struct { + __le16 reason; /* reason for cancellation */ + u8 reserved[14]; + } ofa_cancelled; + } data; +}; + +struct pqi_event_acknowledge_request { + struct pqi_iu_header header; + u8 event_type; + u8 reserved2; + __le16 event_id; + __le32 additional_event_id; +}; + +struct pqi_task_management_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 nexus_id; + u8 reserved; + u8 ml_device_lun_number; + __le16 timeout; + u8 lun_number[8]; + __le16 protocol_specific; + __le16 outbound_queue_id_to_manage; + __le16 request_id_to_manage; + u8 task_management_function; + u8 reserved2 : 7; + u8 fence : 1; +}; + +#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8 + +struct pqi_task_management_response { + struct pqi_iu_header header; + __le16 request_id; + __le16 nexus_id; + u8 additional_response_info[3]; + u8 response_code; +}; + +struct pqi_vendor_general_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 function_code; + union { + struct { + __le16 first_section; + __le16 last_section; + u8 reserved[48]; + } config_table_update; + + struct { + __le64 buffer_address; + __le32 buffer_length; + u8 reserved[40]; + } ofa_memory_allocation; + } data; +}; + +struct pqi_vendor_general_response { + struct pqi_iu_header header; + __le16 request_id; + __le16 function_code; + __le16 status; + u8 reserved[2]; +}; + +#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 +#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 + +#define PQI_OFA_VERSION 1 +#define PQI_OFA_SIGNATURE "OFA_QRM" +#define PQI_OFA_MAX_SG_DESCRIPTORS 64 + +struct pqi_ofa_memory { + __le64 signature; /* "OFA_QRM" */ + __le16 version; /* version of this struct (1 = 1st version) */ + u8 reserved[62]; + __le32 bytes_allocated; /* total allocated memory in bytes */ + __le16 num_memory_descriptors; + u8 reserved1[2]; + struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS]; +}; + +struct pqi_aio_error_info { + u8 status; + u8 service_response; + u8 data_present; + u8 reserved; + __le32 residual_count; + __le16 data_length; + __le16 reserved1; + u8 data[256]; +}; + +struct pqi_raid_error_info { + u8 data_in_result; + u8 data_out_result; + u8 reserved[3]; + u8 status; + __le16 status_qualifier; + __le16 sense_data_length; + __le16 response_data_length; + __le32 data_in_transferred; + __le32 data_out_transferred; + u8 data[256]; +}; + +#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13 +#define PQI_REQUEST_IU_RAID_PATH_IO 0x14 +#define PQI_REQUEST_IU_AIO_PATH_IO 0x15 +#define PQI_REQUEST_IU_AIO_PATH_RAID5_IO 0x18 +#define PQI_REQUEST_IU_AIO_PATH_RAID6_IO 0x19 +#define PQI_REQUEST_IU_AIO_PATH_RAID1_IO 0x1A +#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60 +#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72 +#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73 +#define PQI_REQUEST_IU_VENDOR_GENERAL 0x75 +#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 + +#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81 +#define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93 +#define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0 +#define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0 +#define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1 +#define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2 +#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3 +#define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4 +#define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5 +#define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7 + +#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0 +#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10 +#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ 0x11 +#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ 0x12 +#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ 0x13 +#define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY 0x14 + +#define PQI_GENERAL_ADMIN_STATUS_SUCCESS 0x0 + +#define PQI_IQ_PROPERTY_IS_AIO_QUEUE 0x1 + +#define PQI_GENERAL_ADMIN_IU_LENGTH 0x3c +#define PQI_PROTOCOL_SOP 0x0 + +#define PQI_DATA_IN_OUT_GOOD 0x0 +#define PQI_DATA_IN_OUT_UNDERFLOW 0x1 +#define PQI_DATA_IN_OUT_BUFFER_ERROR 0x40 +#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW 0x41 +#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42 +#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43 +#define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60 +#define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61 +#define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62 +#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED 0x63 +#define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64 +#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65 +#define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66 +#define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67 +#define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x6F +#define PQI_DATA_IN_OUT_ERROR 0xf0 +#define PQI_DATA_IN_OUT_PROTOCOL_ERROR 0xf1 +#define PQI_DATA_IN_OUT_HARDWARE_ERROR 0xf2 +#define PQI_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3 +#define PQI_DATA_IN_OUT_ABORTED 0xf4 +#define PQI_DATA_IN_OUT_TIMEOUT 0xf5 + +#define CISS_CMD_STATUS_SUCCESS 0x0 +#define CISS_CMD_STATUS_TARGET_STATUS 0x1 +#define CISS_CMD_STATUS_DATA_UNDERRUN 0x2 +#define CISS_CMD_STATUS_DATA_OVERRUN 0x3 +#define CISS_CMD_STATUS_INVALID 0x4 +#define CISS_CMD_STATUS_PROTOCOL_ERROR 0x5 +#define CISS_CMD_STATUS_HARDWARE_ERROR 0x6 +#define CISS_CMD_STATUS_CONNECTION_LOST 0x7 +#define CISS_CMD_STATUS_ABORTED 0x8 +#define CISS_CMD_STATUS_ABORT_FAILED 0x9 +#define CISS_CMD_STATUS_UNSOLICITED_ABORT 0xa +#define CISS_CMD_STATUS_TIMEOUT 0xb +#define CISS_CMD_STATUS_UNABORTABLE 0xc +#define CISS_CMD_STATUS_TMF 0xd +#define CISS_CMD_STATUS_AIO_DISABLED 0xe + +#define PQI_CMD_STATUS_ABORTED CISS_CMD_STATUS_ABORTED + +#define PQI_NUM_EVENT_QUEUE_ELEMENTS 32 +#define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response) + +#define PQI_EVENT_TYPE_HOTPLUG 0x1 +#define PQI_EVENT_TYPE_HARDWARE 0x2 +#define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4 +#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5 +#define PQI_EVENT_TYPE_OFA 0xfb +#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd +#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe + +#pragma pack() + +#define PQI_ERROR_BUFFER_ELEMENT_LENGTH \ + sizeof(struct pqi_raid_error_info) + +/* these values are based on our implementation */ +#define PQI_ADMIN_IQ_NUM_ELEMENTS 8 +#define PQI_ADMIN_OQ_NUM_ELEMENTS 20 +#define PQI_ADMIN_IQ_ELEMENT_LENGTH 64 +#define PQI_ADMIN_OQ_ELEMENT_LENGTH 64 + +#define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH 128 +#define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH 16 + +#define PQI_MIN_MSIX_VECTORS 1 +#define PQI_MAX_MSIX_VECTORS 64 + +/* these values are defined by the PQI spec */ +#define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255 +#define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535 + +#define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64 +#define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16 +#define PQI_ADMIN_INDEX_ALIGNMENT 64 +#define PQI_OPERATIONAL_INDEX_ALIGNMENT 4 + +#define PQI_MIN_OPERATIONAL_QUEUE_ID 1 +#define PQI_MAX_OPERATIONAL_QUEUE_ID 65535 + +#define PQI_AIO_SERV_RESPONSE_COMPLETE 0 +#define PQI_AIO_SERV_RESPONSE_FAILURE 1 +#define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2 +#define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3 +#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4 +#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5 + +#define PQI_AIO_STATUS_IO_ERROR 0x1 +#define PQI_AIO_STATUS_IO_ABORTED 0x2 +#define PQI_AIO_STATUS_NO_PATH_TO_DEVICE 0x3 +#define PQI_AIO_STATUS_INVALID_DEVICE 0x4 +#define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe +#define PQI_AIO_STATUS_UNDERRUN 0x51 +#define PQI_AIO_STATUS_OVERRUN 0x75 + +typedef u32 pqi_index_t; + +/* SOP data direction flags */ +#define SOP_NO_DIRECTION_FLAG 0 +#define SOP_WRITE_FLAG 1 /* host writes data to Data-Out */ + /* buffer */ +#define SOP_READ_FLAG 2 /* host receives data from Data-In */ + /* buffer */ +#define SOP_BIDIRECTIONAL 3 /* data is transferred from the */ + /* Data-Out buffer and data is */ + /* transferred to the Data-In buffer */ + +#define SOP_TASK_ATTRIBUTE_SIMPLE 0 +#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1 +#define SOP_TASK_ATTRIBUTE_ORDERED 2 +#define SOP_TASK_ATTRIBUTE_ACA 4 + +#define SOP_TMF_COMPLETE 0x0 +#define SOP_TMF_REJECTED 0x4 +#define SOP_TMF_FUNCTION_SUCCEEDED 0x8 +#define SOP_TMF_INCORRECT_LOGICAL_UNIT 0x9 + +/* additional CDB bytes usage field codes */ +#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */ +#define SOP_ADDITIONAL_CDB_BYTES_4 1 /* 20-byte CDB */ +#define SOP_ADDITIONAL_CDB_BYTES_8 2 /* 24-byte CDB */ +#define SOP_ADDITIONAL_CDB_BYTES_12 3 /* 28-byte CDB */ +#define SOP_ADDITIONAL_CDB_BYTES_16 4 /* 32-byte CDB */ + +/* + * The purpose of this structure is to obtain proper alignment of objects in + * an admin queue pair. + */ +struct pqi_admin_queues_aligned { + __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT) + u8 iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH] + [PQI_ADMIN_IQ_NUM_ELEMENTS]; + __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT) + u8 oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH] + [PQI_ADMIN_OQ_NUM_ELEMENTS]; + __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci; + __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi; +}; + +struct pqi_admin_queues { + void *iq_element_array; + void *oq_element_array; + pqi_index_t __iomem *iq_ci; + pqi_index_t __iomem *oq_pi; + dma_addr_t iq_element_array_bus_addr; + dma_addr_t oq_element_array_bus_addr; + dma_addr_t iq_ci_bus_addr; + dma_addr_t oq_pi_bus_addr; + __le32 __iomem *iq_pi; + pqi_index_t iq_pi_copy; + __le32 __iomem *oq_ci; + pqi_index_t oq_ci_copy; + struct task_struct *task; + u16 int_msg_num; +}; + +struct pqi_queue_group { + struct pqi_ctrl_info *ctrl_info; /* backpointer */ + u16 iq_id[2]; + u16 oq_id; + u16 int_msg_num; + void *iq_element_array[2]; + void *oq_element_array; + dma_addr_t iq_element_array_bus_addr[2]; + dma_addr_t oq_element_array_bus_addr; + __le32 __iomem *iq_pi[2]; + pqi_index_t iq_pi_copy[2]; + pqi_index_t __iomem *iq_ci[2]; + pqi_index_t __iomem *oq_pi; + dma_addr_t iq_ci_bus_addr[2]; + dma_addr_t oq_pi_bus_addr; + __le32 __iomem *oq_ci; + pqi_index_t oq_ci_copy; + spinlock_t submit_lock[2]; /* protect submission queue */ + struct list_head request_list[2]; +}; + +struct pqi_event_queue { + u16 oq_id; + u16 int_msg_num; + void *oq_element_array; + pqi_index_t __iomem *oq_pi; + dma_addr_t oq_element_array_bus_addr; + dma_addr_t oq_pi_bus_addr; + __le32 __iomem *oq_ci; + pqi_index_t oq_ci_copy; +}; + +#define PQI_DEFAULT_QUEUE_GROUP 0 +#define PQI_MAX_QUEUE_GROUPS PQI_MAX_MSIX_VECTORS + +struct pqi_encryption_info { + u16 data_encryption_key_index; + u32 encrypt_tweak_lower; + u32 encrypt_tweak_upper; +}; + +#pragma pack(1) + +#define PQI_CONFIG_TABLE_SIGNATURE "CFGTABLE" +#define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0) + +/* configuration table section IDs */ +#define PQI_CONFIG_TABLE_ALL_SECTIONS (-1) +#define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0 +#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1 +#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2 +#define PQI_CONFIG_TABLE_SECTION_DEBUG 3 +#define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4 +#define PQI_CONFIG_TABLE_SECTION_SOFT_RESET 5 + +struct pqi_config_table { + u8 signature[8]; /* "CFGTABLE" */ + __le32 first_section_offset; /* offset in bytes from the base */ + /* address of this table to the */ + /* first section */ +}; + +struct pqi_config_table_section_header { + __le16 section_id; /* as defined by the */ + /* PQI_CONFIG_TABLE_SECTION_* */ + /* manifest constants above */ + __le16 next_section_offset; /* offset in bytes from base */ + /* address of the table of the */ + /* next section or 0 if last entry */ +}; + +struct pqi_config_table_general_info { + struct pqi_config_table_section_header header; + __le32 section_length; /* size of this section in bytes */ + /* including the section header */ + __le32 max_outstanding_requests; /* max. outstanding */ + /* commands supported by */ + /* the controller */ + __le32 max_sg_size; /* max. transfer size of a single */ + /* command */ + __le32 max_sg_per_request; /* max. number of scatter-gather */ + /* entries supported in a single */ + /* command */ +}; + +struct pqi_config_table_firmware_features { + struct pqi_config_table_section_header header; + __le16 num_elements; + u8 features_supported[]; +/* u8 features_requested_by_host[]; */ +/* u8 features_enabled[]; */ +/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */ +/* __le16 firmware_max_known_feature; */ +/* __le16 host_max_known_feature; */ +}; + +#define PQI_FIRMWARE_FEATURE_OFA 0 +#define PQI_FIRMWARE_FEATURE_SMP 1 +#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE 2 +#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS 3 +#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS 4 +#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS 5 +#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS 6 +#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS 7 +#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS 8 +#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS 9 +#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS 10 +#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 +#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN 12 +#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13 +#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14 +#define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME 15 +#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16 +#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17 +#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18 +#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21 +#define PQI_FIRMWARE_FEATURE_MAXIMUM 21 + +struct pqi_config_table_debug { + struct pqi_config_table_section_header header; + __le32 scratchpad; +}; + +struct pqi_config_table_heartbeat { + struct pqi_config_table_section_header header; + __le32 heartbeat_counter; +}; + +struct pqi_config_table_soft_reset { + struct pqi_config_table_section_header header; + u8 soft_reset_status; +}; + +#define PQI_SOFT_RESET_INITIATE 0x1 +#define PQI_SOFT_RESET_ABORT 0x2 + +enum pqi_soft_reset_status { + RESET_INITIATE_FIRMWARE, + RESET_INITIATE_DRIVER, + RESET_ABORT, + RESET_NORESPONSE, + RESET_TIMEDOUT +}; + +union pqi_reset_register { + struct { + u32 reset_type : 3; + u32 reserved : 2; + u32 reset_action : 3; + u32 hold_in_pd1 : 1; + u32 reserved2 : 23; + } bits; + u32 all_bits; +}; + +#define PQI_RESET_ACTION_RESET 0x1 + +#define PQI_RESET_TYPE_NO_RESET 0x0 +#define PQI_RESET_TYPE_SOFT_RESET 0x1 +#define PQI_RESET_TYPE_FIRM_RESET 0x2 +#define PQI_RESET_TYPE_HARD_RESET 0x3 + +#define PQI_RESET_ACTION_COMPLETED 0x2 + +#define PQI_RESET_POLL_INTERVAL_MSECS 100 + +#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) +#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 +#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U) +#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) + +#define RAID_MAP_MAX_ENTRIES 1024 +#define RAID_MAP_MAX_DATA_DISKS_PER_ROW 128 + +#define PQI_PHYSICAL_DEVICE_BUS 0 +#define PQI_RAID_VOLUME_BUS 1 +#define PQI_HBA_BUS 2 +#define PQI_EXTERNAL_RAID_VOLUME_BUS 3 +#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS +#define PQI_VSEP_CISS_BTL 379 + +struct report_lun_header { + __be32 list_length; + u8 flags; + u8 reserved[3]; +}; + +/* for flags field of struct report_lun_header */ +#define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID (1 << 0) +#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5) +#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6) + +#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2 0x2 +#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4 0x4 +#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK 0xf + +struct report_log_lun { + u8 lunid[8]; + u8 volume_id[16]; +}; + +struct report_log_lun_list { + struct report_lun_header header; + struct report_log_lun lun_entries[]; +}; + +struct report_phys_lun_8byte_wwid { + u8 lunid[8]; + __be64 wwid; + u8 device_type; + u8 device_flags; + u8 lun_count; /* number of LUNs in a multi-LUN device */ + u8 redundant_paths; + u32 aio_handle; +}; + +struct report_phys_lun_16byte_wwid { + u8 lunid[8]; + u8 wwid[16]; + u8 device_type; + u8 device_flags; + u8 lun_count; /* number of LUNs in a multi-LUN device */ + u8 redundant_paths; + u32 aio_handle; +}; + +/* for device_flags field of struct report_phys_lun_extended_entry */ +#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8 + +struct report_phys_lun_8byte_wwid_list { + struct report_lun_header header; + struct report_phys_lun_8byte_wwid lun_entries[]; +}; + +struct report_phys_lun_16byte_wwid_list { + struct report_lun_header header; + struct report_phys_lun_16byte_wwid lun_entries[]; +}; + +struct raid_map_disk_data { + u32 aio_handle; + u8 xor_mult[2]; + u8 reserved[2]; +}; + +/* for flags field of RAID map */ +#define RAID_MAP_ENCRYPTION_ENABLED 0x1 + +struct raid_map { + __le32 structure_size; /* size of entire structure in bytes */ + __le32 volume_blk_size; /* bytes / block in the volume */ + __le64 volume_blk_cnt; /* logical blocks on the volume */ + u8 phys_blk_shift; /* shift factor to convert between */ + /* units of logical blocks and */ + /* physical disk blocks */ + u8 parity_rotation_shift; /* shift factor to convert between */ + /* units of logical stripes and */ + /* physical stripes */ + __le16 strip_size; /* blocks used on each disk / stripe */ + __le64 disk_starting_blk; /* first disk block used in volume */ + __le64 disk_blk_cnt; /* disk blocks used by volume / disk */ + __le16 data_disks_per_row; /* data disk entries / row in the map */ + __le16 metadata_disks_per_row; /* mirror/parity disk entries / row */ + /* in the map */ + __le16 row_cnt; /* rows in each layout map */ + __le16 layout_map_count; /* layout maps (1 map per */ + /* mirror parity group) */ + __le16 flags; + __le16 data_encryption_key_index; + u8 reserved[16]; + struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES]; +}; + +#pragma pack() + +struct pqi_scsi_dev_raid_map_data { + bool is_write; + u8 raid_level; + u32 map_index; + u64 first_block; + u64 last_block; + u32 data_length; + u32 block_cnt; + u32 blocks_per_row; + u64 first_row; + u64 last_row; + u32 first_row_offset; + u32 last_row_offset; + u32 first_column; + u32 last_column; + u64 r5or6_first_row; + u64 r5or6_last_row; + u32 r5or6_first_row_offset; + u32 r5or6_last_row_offset; + u32 r5or6_first_column; + u32 r5or6_last_column; + u16 data_disks_per_row; + u32 total_disks_per_row; + u16 layout_map_count; + u32 stripesize; + u16 strip_size; + u32 first_group; + u32 last_group; + u32 map_row; + u32 aio_handle; + u64 disk_block; + u32 disk_block_cnt; + u8 cdb[16]; + u8 cdb_length; + + /* RAID 1 specific */ +#define NUM_RAID1_MAP_ENTRIES 3 + u32 num_it_nexus_entries; + u32 it_nexus[NUM_RAID1_MAP_ENTRIES]; + + /* RAID 5 / RAID 6 specific */ + u32 p_parity_it_nexus; /* aio_handle */ + u32 q_parity_it_nexus; /* aio_handle */ + u8 xor_mult; + u64 row; + u64 stripe_lba; + u32 p_index; + u32 q_index; +}; + +#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" + +#define NUM_STREAMS_PER_LUN 8 + +struct pqi_stream_data { + u64 next_lba; + u32 last_accessed; +}; + +#define PQI_MAX_LUNS_PER_DEVICE 256 + +struct pqi_tmf_work { + struct work_struct work_struct; + struct scsi_cmnd *scmd; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + u8 lun; + u8 scsi_opcode; +}; + +struct pqi_scsi_dev { + int devtype; /* as reported by INQUIRY command */ + u8 device_type; /* as reported by */ + /* BMIC_IDENTIFY_PHYSICAL_DEVICE */ + /* only valid for devtype = TYPE_DISK */ + int bus; + int target; + int lun; + u8 scsi3addr[8]; + u8 wwid[16]; + u8 volume_id[16]; + u8 is_physical_device : 1; + u8 is_external_raid_device : 1; + u8 is_expander_smp_device : 1; + u8 target_lun_valid : 1; + u8 device_gone : 1; + u8 new_device : 1; + u8 keep_device : 1; + u8 volume_offline : 1; + u8 rescan : 1; + u8 ignore_device : 1; + u8 erase_in_progress : 1; + bool aio_enabled; /* only valid for physical disks */ + bool in_remove; + bool in_reset[PQI_MAX_LUNS_PER_DEVICE]; + bool device_offline; + u8 vendor[8]; /* bytes 8-15 of inquiry data */ + u8 model[16]; /* bytes 16-31 of inquiry data */ + u64 sas_address; + u8 raid_level; + u16 queue_depth; /* max. queue_depth for this device */ + u16 advertised_queue_depth; + u32 aio_handle; + u8 volume_status; + u8 active_path_index; + u8 path_map; + u8 bay; + u8 box_index; + u8 phys_box_on_bus; + u8 phy_connected_dev_type; + u8 box[8]; + u16 phys_connector[8]; + u8 phy_id; + u8 ncq_prio_enable; + u8 ncq_prio_support; + u8 lun_count; + bool raid_bypass_configured; /* RAID bypass configured */ + bool raid_bypass_enabled; /* RAID bypass enabled */ + u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW]; + struct raid_map *raid_map; /* RAID bypass map */ + u32 max_transfer_encrypted; + + struct pqi_sas_port *sas_port; + struct scsi_device *sdev; + + struct list_head scsi_device_list_entry; + struct list_head new_device_list_entry; + struct list_head add_list_entry; + struct list_head delete_list_entry; + + struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; + atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE]; + unsigned int raid_bypass_cnt; + + struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE]; +}; + +/* VPD inquiry pages */ +#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ +#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */ +#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */ + +#define VPD_PAGE (1 << 8) + +#pragma pack(1) + +/* structure for CISS_VPD_LV_STATUS */ +struct ciss_vpd_logical_volume_status { + u8 peripheral_info; + u8 page_code; + u8 reserved; + u8 page_length; + u8 volume_status; + u8 reserved2[3]; + __be32 flags; +}; + +#pragma pack() + +/* constants for volume_status field of ciss_vpd_logical_volume_status */ +#define CISS_LV_OK 0 +#define CISS_LV_FAILED 1 +#define CISS_LV_NOT_CONFIGURED 2 +#define CISS_LV_DEGRADED 3 +#define CISS_LV_READY_FOR_RECOVERY 4 +#define CISS_LV_UNDERGOING_RECOVERY 5 +#define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED 6 +#define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 7 +#define CISS_LV_HARDWARE_OVERHEATING 8 +#define CISS_LV_HARDWARE_HAS_OVERHEATED 9 +#define CISS_LV_UNDERGOING_EXPANSION 10 +#define CISS_LV_NOT_AVAILABLE 11 +#define CISS_LV_QUEUED_FOR_EXPANSION 12 +#define CISS_LV_DISABLED_SCSI_ID_CONFLICT 13 +#define CISS_LV_EJECTED 14 +#define CISS_LV_UNDERGOING_ERASE 15 +/* state 16 not used */ +#define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD 17 +#define CISS_LV_UNDERGOING_RPI 18 +#define CISS_LV_PENDING_RPI 19 +#define CISS_LV_ENCRYPTED_NO_KEY 20 +/* state 21 not used */ +#define CISS_LV_UNDERGOING_ENCRYPTION 22 +#define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING 23 +#define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 24 +#define CISS_LV_PENDING_ENCRYPTION 25 +#define CISS_LV_PENDING_ENCRYPTION_REKEYING 26 +#define CISS_LV_NOT_SUPPORTED 27 +#define CISS_LV_STATUS_UNAVAILABLE 255 + +/* constants for flags field of ciss_vpd_logical_volume_status */ +#define CISS_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */ + /* host I/O */ + +/* for SAS hosts and SAS expanders */ +struct pqi_sas_node { + struct device *parent_dev; + struct list_head port_list_head; +}; + +struct pqi_sas_port { + struct list_head port_list_entry; + u64 sas_address; + struct pqi_scsi_dev *device; + struct sas_port *port; + int next_phy_index; + struct list_head phy_list_head; + struct pqi_sas_node *parent_node; + struct sas_rphy *rphy; +}; + +struct pqi_sas_phy { + struct list_head phy_list_entry; + struct sas_phy *phy; + struct pqi_sas_port *parent_port; + bool added_to_port; +}; + +struct pqi_io_request { + atomic_t refcount; + u16 index; + void (*io_complete_callback)(struct pqi_io_request *io_request, + void *context); + void *context; + u8 raid_bypass : 1; + int status; + struct pqi_queue_group *queue_group; + struct scsi_cmnd *scmd; + void *error_info; + struct pqi_sg_descriptor *sg_chain_buffer; + dma_addr_t sg_chain_buffer_dma_handle; + void *iu; + struct list_head request_list_entry; +}; + +#define PQI_NUM_SUPPORTED_EVENTS 7 + +struct pqi_event { + bool pending; + u8 event_type; + u16 event_id; + u32 additional_event_id; +}; + +#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1 +#define PQI_RESERVED_IO_SLOTS_EVENT_ACK PQI_NUM_SUPPORTED_EVENTS +#define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS 3 +#define PQI_RESERVED_IO_SLOTS \ + (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \ + PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS) + +#define PQI_CTRL_PRODUCT_ID_GEN1 0 +#define PQI_CTRL_PRODUCT_ID_GEN2 7 +#define PQI_CTRL_PRODUCT_REVISION_A 0 +#define PQI_CTRL_PRODUCT_REVISION_B 1 + +enum pqi_ctrl_removal_state { + PQI_CTRL_PRESENT = 0, + PQI_CTRL_GRACEFUL_REMOVAL, + PQI_CTRL_SURPRISE_REMOVAL +}; + +struct pqi_ctrl_info { + unsigned int ctrl_id; + struct pci_dev *pci_dev; + char firmware_version[32]; + char serial_number[17]; + char model[17]; + char vendor[9]; + u8 product_id; + u8 product_revision; + void __iomem *iomem_base; + struct pqi_ctrl_registers __iomem *registers; + struct pqi_device_registers __iomem *pqi_registers; + u32 max_sg_entries; + u32 config_table_offset; + u32 config_table_length; + u16 max_inbound_queues; + u16 max_elements_per_iq; + u16 max_iq_element_length; + u16 max_outbound_queues; + u16 max_elements_per_oq; + u16 max_oq_element_length; + u32 max_transfer_size; + u32 max_outstanding_requests; + u32 max_io_slots; + unsigned int scsi_ml_can_queue; + unsigned short sg_tablesize; + unsigned int max_sectors; + u32 error_buffer_length; + void *error_buffer; + dma_addr_t error_buffer_dma_handle; + size_t sg_chain_buffer_length; + unsigned int num_queue_groups; + u16 num_elements_per_iq; + u16 num_elements_per_oq; + u16 max_inbound_iu_length_per_firmware; + u16 max_inbound_iu_length; + unsigned int max_sg_per_iu; + unsigned int max_sg_per_r56_iu; + void *admin_queue_memory_base; + u32 admin_queue_memory_length; + dma_addr_t admin_queue_memory_base_dma_handle; + void *queue_memory_base; + u32 queue_memory_length; + dma_addr_t queue_memory_base_dma_handle; + struct pqi_admin_queues admin_queues; + struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS]; + struct pqi_event_queue event_queue; + enum pqi_irq_mode irq_mode; + int max_msix_vectors; + int num_msix_vectors_enabled; + int num_msix_vectors_initialized; + int event_irq; + struct Scsi_Host *scsi_host; + + struct mutex scan_mutex; + struct mutex lun_reset_mutex; + bool controller_online; + bool block_requests; + bool scan_blocked; + u8 logical_volume_rescan_needed : 1; + u8 inbound_spanning_supported : 1; + u8 outbound_spanning_supported : 1; + u8 pqi_mode_enabled : 1; + u8 pqi_reset_quiesce_supported : 1; + u8 soft_reset_handshake_supported : 1; + u8 raid_iu_timeout_supported : 1; + u8 tmf_iu_timeout_supported : 1; + u8 firmware_triage_supported : 1; + u8 rpl_extended_format_4_5_supported : 1; + u8 multi_lun_device_supported : 1; + u8 enable_r1_writes : 1; + u8 enable_r5_writes : 1; + u8 enable_r6_writes : 1; + u8 lv_drive_type_mix_valid : 1; + u8 enable_stream_detection : 1; + u8 disable_managed_interrupts : 1; + u8 ciss_report_log_flags; + u32 max_transfer_encrypted_sas_sata; + u32 max_transfer_encrypted_nvme; + u32 max_write_raid_5_6; + u32 max_write_raid_1_10_2drive; + u32 max_write_raid_1_10_3drive; + int numa_node; + + struct list_head scsi_device_list; + spinlock_t scsi_device_list_lock; + + struct delayed_work rescan_work; + struct delayed_work update_time_work; + + struct pqi_sas_node *sas_host; + u64 sas_address; + + struct pqi_io_request *io_request_pool; + struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS]; + struct work_struct event_work; + + atomic_t num_interrupts; + int previous_num_interrupts; + u32 previous_heartbeat_count; + __le32 __iomem *heartbeat_counter; + u8 __iomem *soft_reset_status; + struct timer_list heartbeat_timer; + struct work_struct ctrl_offline_work; + + struct semaphore sync_request_sem; + atomic_t num_busy_threads; + atomic_t num_blocked_threads; + wait_queue_head_t block_requests_wait; + + struct mutex ofa_mutex; + struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; + dma_addr_t pqi_ofa_mem_dma_handle; + void **pqi_ofa_chunk_virt_addr; + struct work_struct ofa_memory_alloc_work; + struct work_struct ofa_quiesce_work; + u32 ofa_bytes_requested; + u16 ofa_cancel_reason; + enum pqi_ctrl_removal_state ctrl_removal_state; +}; + +enum pqi_ctrl_mode { + SIS_MODE = 0, + PQI_MODE +}; + +/* + * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands + */ +#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27 + +/* CISS commands */ +#define CISS_READ 0xc0 +#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ +#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ +#define CISS_GET_RAID_MAP 0xc8 + +/* BMIC commands */ +#define BMIC_IDENTIFY_CONTROLLER 0x11 +#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 +#define BMIC_READ 0x26 +#define BMIC_WRITE 0x27 +#define BMIC_SENSE_FEATURE 0x61 +#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 +#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 +#define BMIC_CSMI_PASSTHRU 0x68 +#define BMIC_WRITE_HOST_WELLNESS 0xa5 +#define BMIC_FLUSH_CACHE 0xc2 +#define BMIC_SET_DIAG_OPTIONS 0xf4 +#define BMIC_SENSE_DIAG_OPTIONS 0xf5 + +#define CSMI_CC_SAS_SMP_PASSTHRU 0x17 + +#define SA_FLUSH_CACHE 0x1 + +#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0) +#define CISS_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3f) +#define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6]) +#define CISS_GET_DRIVE_NUMBER(lunid) \ + (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \ + CISS_GET_LEVEL_2_TARGET((lunid))) + +#define LV_GET_DRIVE_TYPE_MIX(lunid) ((lunid)[6]) + +#define LV_DRIVE_TYPE_MIX_UNKNOWN 0 +#define LV_DRIVE_TYPE_MIX_NO_RESTRICTION 1 +#define LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY 2 +#define LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY 3 +#define LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY 4 +#define LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY 5 +#define LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY 6 +#define LV_DRIVE_TYPE_MIX_SAS_ONLY 7 +#define LV_DRIVE_TYPE_MIX_SATA_ONLY 8 +#define LV_DRIVE_TYPE_MIX_NVME_ONLY 9 + +#define NO_TIMEOUT ((unsigned long) -1) + +#pragma pack(1) + +struct bmic_identify_controller { + u8 configured_logical_drive_count; + __le32 configuration_signature; + u8 firmware_version_short[4]; + u8 reserved[145]; + __le16 extended_logical_unit_count; + u8 reserved1[34]; + __le16 firmware_build_number; + u8 reserved2[8]; + u8 vendor_id[8]; + u8 product_id[16]; + u8 reserved3[62]; + __le32 extra_controller_flags; + u8 reserved4[2]; + u8 controller_mode; + u8 spare_part_number[32]; + u8 firmware_version_long[32]; +}; + +/* constants for extra_controller_flags field of bmic_identify_controller */ +#define BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED 0x20000000 + +struct bmic_sense_subsystem_info { + u8 reserved[44]; + u8 ctrl_serial_number[16]; +}; + +/* constants for device_type field */ +#define SA_DEVICE_TYPE_SATA 0x1 +#define SA_DEVICE_TYPE_SAS 0x2 +#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5 +#define SA_DEVICE_TYPE_SES 0x6 +#define SA_DEVICE_TYPE_CONTROLLER 0x7 +#define SA_DEVICE_TYPE_NVME 0x9 + +struct bmic_identify_physical_device { + u8 scsi_bus; /* SCSI Bus number on controller */ + u8 scsi_id; /* SCSI ID on this bus */ + __le16 block_size; /* sector size in bytes */ + __le32 total_blocks; /* number for sectors on drive */ + __le32 reserved_blocks; /* controller reserved (RIS) */ + u8 model[40]; /* Physical Drive Model */ + u8 serial_number[40]; /* Drive Serial Number */ + u8 firmware_revision[8]; /* drive firmware revision */ + u8 scsi_inquiry_bits; /* inquiry byte 7 bits */ + u8 compaq_drive_stamp; /* 0 means drive not stamped */ + u8 last_failure_reason; + u8 flags; + u8 more_flags; + u8 scsi_lun; /* SCSI LUN for phys drive */ + u8 yet_more_flags; + u8 even_more_flags; + __le32 spi_speed_rules; + u8 phys_connector[2]; /* connector number on controller */ + u8 phys_box_on_bus; /* phys enclosure this drive resides */ + u8 phys_bay_in_box; /* phys drv bay this drive resides */ + __le32 rpm; /* drive rotational speed in RPM */ + u8 device_type; /* type of drive */ + u8 sata_version; /* only valid when device_type = */ + /* SA_DEVICE_TYPE_SATA */ + __le64 big_total_block_count; + __le64 ris_starting_lba; + __le32 ris_size; + u8 wwid[20]; + u8 controller_phy_map[32]; + __le16 phy_count; + u8 phy_connected_dev_type[256]; + u8 phy_to_drive_bay_num[256]; + __le16 phy_to_attached_dev_index[256]; + u8 box_index; + u8 reserved; + __le16 extra_physical_drive_flags; + u8 negotiated_link_rate[256]; + u8 phy_to_phy_map[256]; + u8 redundant_path_present_map; + u8 redundant_path_failure_map; + u8 active_path_number; + __le16 alternate_paths_phys_connector[8]; + u8 alternate_paths_phys_box_on_port[8]; + u8 multi_lun_device_lun_count; + u8 minimum_good_fw_revision[8]; + u8 unique_inquiry_bytes[20]; + u8 current_temperature_degrees; + u8 temperature_threshold_degrees; + u8 max_temperature_degrees; + u8 logical_blocks_per_phys_block_exp; + __le16 current_queue_depth_limit; + u8 switch_name[10]; + __le16 switch_port; + u8 alternate_paths_switch_name[40]; + u8 alternate_paths_switch_port[8]; + __le16 power_on_hours; + __le16 percent_endurance_used; + u8 drive_authentication; + u8 smart_carrier_authentication; + u8 smart_carrier_app_fw_version; + u8 smart_carrier_bootloader_fw_version; + u8 sanitize_flags; + u8 encryption_key_flags; + u8 encryption_key_name[64]; + __le32 misc_drive_flags; + __le16 dek_index; + __le16 hba_drive_encryption_flags; + __le16 max_overwrite_time; + __le16 max_block_erase_time; + __le16 max_crypto_erase_time; + u8 connector_info[5]; + u8 connector_name[8][8]; + u8 page_83_identifier[16]; + u8 maximum_link_rate[256]; + u8 negotiated_physical_link_rate[256]; + u8 box_connector_name[8]; + u8 padding_to_multiple_of_512[9]; +}; + +#define BMIC_SENSE_FEATURE_IO_PAGE 0x8 +#define BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE 0x2 + +struct bmic_sense_feature_buffer_header { + u8 page_code; + u8 subpage_code; + __le16 buffer_length; +}; + +struct bmic_sense_feature_page_header { + u8 page_code; + u8 subpage_code; + __le16 page_length; +}; + +struct bmic_sense_feature_io_page_aio_subpage { + struct bmic_sense_feature_page_header header; + u8 firmware_read_support; + u8 driver_read_support; + u8 firmware_write_support; + u8 driver_write_support; + __le16 max_transfer_encrypted_sas_sata; + __le16 max_transfer_encrypted_nvme; + __le16 max_write_raid_5_6; + __le16 max_write_raid_1_10_2drive; + __le16 max_write_raid_1_10_3drive; +}; + +struct bmic_smp_request { + u8 frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u8 additional_request_bytes[1016]; +}; + +struct bmic_smp_response { + u8 frame_type; + u8 function; + u8 function_result; + u8 response_length; + u8 additional_response_bytes[1016]; +}; + +struct bmic_csmi_ioctl_header { + __le32 header_length; + u8 signature[8]; + __le32 timeout; + __le32 control_code; + __le32 return_code; + __le32 length; +}; + +struct bmic_csmi_smp_passthru { + u8 phy_identifier; + u8 port_identifier; + u8 connection_rate; + u8 reserved; + __be64 destination_sas_address; + __le32 request_length; + struct bmic_smp_request request; + u8 connection_status; + u8 reserved1[3]; + __le32 response_length; + struct bmic_smp_response response; +}; + +struct bmic_csmi_smp_passthru_buffer { + struct bmic_csmi_ioctl_header ioctl_header; + struct bmic_csmi_smp_passthru parameters; +}; + +struct bmic_flush_cache { + u8 disable_flag; + u8 system_power_action; + u8 ndu_flush; + u8 shutdown_event; + u8 reserved[28]; +}; + +/* for shutdown_event member of struct bmic_flush_cache */ +enum bmic_flush_cache_shutdown_event { + NONE_CACHE_FLUSH_ONLY = 0, + SHUTDOWN = 1, + HIBERNATE = 2, + SUSPEND = 3, + RESTART = 4 +}; + +struct bmic_diag_options { + __le32 options; +}; + +#pragma pack() + +static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) +{ + void *hostdata = shost_priv(shost); + + return *((struct pqi_ctrl_info **)hostdata); +} + +void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy); + +int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info); +void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info); +int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, + struct pqi_scsi_dev *device); +void pqi_remove_sas_device(struct pqi_scsi_dev *device); +struct pqi_scsi_dev *pqi_find_device_by_sas_rphy( + struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy); +void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd); +int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, + struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info); + +extern struct sas_function_template pqi_sas_transport_functions; + +#endif /* _SMARTPQI_H */ diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c new file mode 100644 index 000000000..9a58df931 --- /dev/null +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -0,0 +1,10754 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2016-2018 Microsemi Corporation + * Copyright (c) 2016 PMC-Sierra, Inc. + * + * Questions/Comments/Bugfixes to storagedev@microchip.com + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smartpqi.h" +#include "smartpqi_sis.h" + +#if !defined(BUILD_TIMESTAMP) +#define BUILD_TIMESTAMP +#endif + +#define DRIVER_VERSION "2.1.24-046" +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 1 +#define DRIVER_RELEASE 24 +#define DRIVER_REVISION 46 + +#define DRIVER_NAME "Microchip SmartPQI Driver (v" \ + DRIVER_VERSION BUILD_TIMESTAMP ")" +#define DRIVER_NAME_SHORT "smartpqi" + +#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) + +#define PQI_POST_RESET_DELAY_SECS 5 +#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 + +#define PQI_NO_COMPLETION ((void *)-1) + +MODULE_AUTHOR("Microchip"); +MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " + DRIVER_VERSION); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); + +struct pqi_cmd_priv { + int this_residual; +}; + +static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static void pqi_verify_structures(void); +static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); +static void pqi_ctrl_offline_worker(struct work_struct *work); +static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); +static void pqi_scan_start(struct Scsi_Host *shost); +static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, + struct pqi_queue_group *queue_group, enum pqi_io_path path, + struct pqi_io_request *io_request); +static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, + struct pqi_iu_header *request, unsigned int flags, + struct pqi_raid_error_info *error_info); +static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, + unsigned int cdb_length, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio); +static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd); +static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd); +static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); +static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); +static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); +static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); +static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); +static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); +static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); +static void pqi_tmf_worker(struct work_struct *work); + +/* for flags argument to pqi_submit_raid_request_synchronous() */ +#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 + +static struct scsi_transport_template *pqi_sas_transport_template; + +static atomic_t pqi_controller_count = ATOMIC_INIT(0); + +enum pqi_lockup_action { + NONE, + REBOOT, + PANIC +}; + +static enum pqi_lockup_action pqi_lockup_action = NONE; + +static struct { + enum pqi_lockup_action action; + char *name; +} pqi_lockup_actions[] = { + { + .action = NONE, + .name = "none", + }, + { + .action = REBOOT, + .name = "reboot", + }, + { + .action = PANIC, + .name = "panic", + }, +}; + +static unsigned int pqi_supported_event_types[] = { + PQI_EVENT_TYPE_HOTPLUG, + PQI_EVENT_TYPE_HARDWARE, + PQI_EVENT_TYPE_PHYSICAL_DEVICE, + PQI_EVENT_TYPE_LOGICAL_DEVICE, + PQI_EVENT_TYPE_OFA, + PQI_EVENT_TYPE_AIO_STATE_CHANGE, + PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, +}; + +static int pqi_disable_device_id_wildcards; +module_param_named(disable_device_id_wildcards, + pqi_disable_device_id_wildcards, int, 0644); +MODULE_PARM_DESC(disable_device_id_wildcards, + "Disable device ID wildcards."); + +static int pqi_disable_heartbeat; +module_param_named(disable_heartbeat, + pqi_disable_heartbeat, int, 0644); +MODULE_PARM_DESC(disable_heartbeat, + "Disable heartbeat."); + +static int pqi_disable_ctrl_shutdown; +module_param_named(disable_ctrl_shutdown, + pqi_disable_ctrl_shutdown, int, 0644); +MODULE_PARM_DESC(disable_ctrl_shutdown, + "Disable controller shutdown when controller locked up."); + +static char *pqi_lockup_action_param; +module_param_named(lockup_action, + pqi_lockup_action_param, charp, 0644); +MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" + "\t\tSupported: none, reboot, panic\n" + "\t\tDefault: none"); + +static int pqi_expose_ld_first; +module_param_named(expose_ld_first, + pqi_expose_ld_first, int, 0644); +MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); + +static int pqi_hide_vsep; +module_param_named(hide_vsep, + pqi_hide_vsep, int, 0644); +MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); + +static int pqi_disable_managed_interrupts; +module_param_named(disable_managed_interrupts, + pqi_disable_managed_interrupts, int, 0644); +MODULE_PARM_DESC(disable_managed_interrupts, + "Disable the kernel automatically assigning SMP affinity to IRQs."); + +static unsigned int pqi_ctrl_ready_timeout_secs; +module_param_named(ctrl_ready_timeout, + pqi_ctrl_ready_timeout_secs, uint, 0644); +MODULE_PARM_DESC(ctrl_ready_timeout, + "Timeout in seconds for driver to wait for controller ready."); + +static char *raid_levels[] = { + "RAID-0", + "RAID-4", + "RAID-1(1+0)", + "RAID-5", + "RAID-5+1", + "RAID-6", + "RAID-1(Triple)", +}; + +static char *pqi_raid_level_to_string(u8 raid_level) +{ + if (raid_level < ARRAY_SIZE(raid_levels)) + return raid_levels[raid_level]; + + return "RAID UNKNOWN"; +} + +#define SA_RAID_0 0 +#define SA_RAID_4 1 +#define SA_RAID_1 2 /* also used for RAID 10 */ +#define SA_RAID_5 3 /* also used for RAID 50 */ +#define SA_RAID_51 4 +#define SA_RAID_6 5 /* also used for RAID 60 */ +#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ +#define SA_RAID_MAX SA_RAID_TRIPLE +#define SA_RAID_UNKNOWN 0xff + +static inline void pqi_scsi_done(struct scsi_cmnd *scmd) +{ + pqi_prep_for_scsi_done(scmd); + scsi_done(scmd); +} + +static inline void pqi_disable_write_same(struct scsi_device *sdev) +{ + sdev->no_write_same = 1; +} + +static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) +{ + return memcmp(scsi3addr1, scsi3addr2, 8) == 0; +} + +static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) +{ + return !device->is_physical_device; +} + +static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) +{ + return scsi3addr[2] != 0; +} + +static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) +{ + return !ctrl_info->controller_online; +} + +static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) +{ + if (ctrl_info->controller_online) + if (!sis_is_firmware_running(ctrl_info)) + pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); +} + +static inline bool pqi_is_hba_lunid(u8 *scsi3addr) +{ + return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); +} + +#define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 +#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 + +static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) +{ + return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; +} + +static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, + enum pqi_ctrl_mode mode) +{ + u32 driver_scratch; + + driver_scratch = sis_read_driver_scratch(ctrl_info); + + if (mode == PQI_MODE) + driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; + else + driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; + + sis_write_driver_scratch(ctrl_info, driver_scratch); +} + +static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) +{ + return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; +} + +static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) +{ + u32 driver_scratch; + + driver_scratch = sis_read_driver_scratch(ctrl_info); + + if (is_supported) + driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; + else + driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; + + sis_write_driver_scratch(ctrl_info, driver_scratch); +} + +static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->scan_blocked = true; + mutex_lock(&ctrl_info->scan_mutex); +} + +static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->scan_blocked = false; + mutex_unlock(&ctrl_info->scan_mutex); +} + +static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->scan_blocked; +} + +static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) +{ + mutex_lock(&ctrl_info->lun_reset_mutex); +} + +static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) +{ + mutex_unlock(&ctrl_info->lun_reset_mutex); +} + +static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) +{ + struct Scsi_Host *shost; + unsigned int num_loops; + int msecs_sleep; + + shost = ctrl_info->scsi_host; + + scsi_block_requests(shost); + + num_loops = 0; + msecs_sleep = 20; + while (scsi_host_busy(shost)) { + num_loops++; + if (num_loops == 10) + msecs_sleep = 500; + msleep(msecs_sleep); + } +} + +static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) +{ + scsi_unblock_requests(ctrl_info->scsi_host); +} + +static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_inc(&ctrl_info->num_busy_threads); +} + +static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_dec(&ctrl_info->num_busy_threads); +} + +static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->block_requests; +} + +static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->block_requests = true; +} + +static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->block_requests = false; + wake_up_all(&ctrl_info->block_requests_wait); +} + +static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) +{ + if (!pqi_ctrl_blocked(ctrl_info)) + return; + + atomic_inc(&ctrl_info->num_blocked_threads); + wait_event(ctrl_info->block_requests_wait, + !pqi_ctrl_blocked(ctrl_info)); + atomic_dec(&ctrl_info->num_blocked_threads); +} + +#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 + +static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) +{ + unsigned long start_jiffies; + unsigned long warning_timeout; + bool displayed_warning; + + displayed_warning = false; + start_jiffies = jiffies; + warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; + + while (atomic_read(&ctrl_info->num_busy_threads) > + atomic_read(&ctrl_info->num_blocked_threads)) { + if (time_after(jiffies, warning_timeout)) { + dev_warn(&ctrl_info->pci_dev->dev, + "waiting %u seconds for driver activity to quiesce\n", + jiffies_to_msecs(jiffies - start_jiffies) / 1000); + displayed_warning = true; + warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies; + } + usleep_range(1000, 2000); + } + + if (displayed_warning) + dev_warn(&ctrl_info->pci_dev->dev, + "driver activity quiesced after waiting for %u seconds\n", + jiffies_to_msecs(jiffies - start_jiffies) / 1000); +} + +static inline bool pqi_device_offline(struct pqi_scsi_dev *device) +{ + return device->device_offline; +} + +static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) +{ + mutex_lock(&ctrl_info->ofa_mutex); +} + +static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) +{ + mutex_unlock(&ctrl_info->ofa_mutex); +} + +static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) +{ + mutex_lock(&ctrl_info->ofa_mutex); + mutex_unlock(&ctrl_info->ofa_mutex); +} + +static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) +{ + return mutex_is_locked(&ctrl_info->ofa_mutex); +} + +static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) +{ + device->in_remove = true; +} + +static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) +{ + return device->in_remove; +} + +static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) +{ + device->in_reset[lun] = true; +} + +static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) +{ + device->in_reset[lun] = false; +} + +static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) +{ + return device->in_reset[lun]; +} + +static inline int pqi_event_type_to_event_index(unsigned int event_type) +{ + int index; + + for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) + if (event_type == pqi_supported_event_types[index]) + return index; + + return -1; +} + +static inline bool pqi_is_supported_event(unsigned int event_type) +{ + return pqi_event_type_to_event_index(event_type) != -1; +} + +static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, + unsigned long delay) +{ + if (pqi_ctrl_offline(ctrl_info)) + return; + + schedule_delayed_work(&ctrl_info->rescan_work, delay); +} + +static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) +{ + pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); +} + +#define PQI_RESCAN_WORK_DELAY (10 * HZ) + +static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) +{ + pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); +} + +static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) +{ + cancel_delayed_work_sync(&ctrl_info->rescan_work); +} + +static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) +{ + if (!ctrl_info->heartbeat_counter) + return 0; + + return readl(ctrl_info->heartbeat_counter); +} + +static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) +{ + return readb(ctrl_info->soft_reset_status); +} + +static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) +{ + u8 status; + + status = pqi_read_soft_reset_status(ctrl_info); + status &= ~PQI_SOFT_RESET_ABORT; + writeb(status, ctrl_info->soft_reset_status); +} + +static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) +{ + bool io_high_prio; + int priority_class; + + io_high_prio = false; + + if (device->ncq_prio_enable) { + priority_class = + IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); + if (priority_class == IOPRIO_CLASS_RT) { + /* Set NCQ priority for read/write commands. */ + switch (scmd->cmnd[0]) { + case WRITE_16: + case READ_16: + case WRITE_12: + case READ_12: + case WRITE_10: + case READ_10: + case WRITE_6: + case READ_6: + io_high_prio = true; + break; + } + } + } + + return io_high_prio; +} + +static int pqi_map_single(struct pci_dev *pci_dev, + struct pqi_sg_descriptor *sg_descriptor, void *buffer, + size_t buffer_length, enum dma_data_direction data_direction) +{ + dma_addr_t bus_address; + + if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) + return 0; + + bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, + data_direction); + if (dma_mapping_error(&pci_dev->dev, bus_address)) + return -ENOMEM; + + put_unaligned_le64((u64)bus_address, &sg_descriptor->address); + put_unaligned_le32(buffer_length, &sg_descriptor->length); + put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); + + return 0; +} + +static void pqi_pci_unmap(struct pci_dev *pci_dev, + struct pqi_sg_descriptor *descriptors, int num_descriptors, + enum dma_data_direction data_direction) +{ + int i; + + if (data_direction == DMA_NONE) + return; + + for (i = 0; i < num_descriptors; i++) + dma_unmap_single(&pci_dev->dev, + (dma_addr_t)get_unaligned_le64(&descriptors[i].address), + get_unaligned_le32(&descriptors[i].length), + data_direction); +} + +static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, + struct pqi_raid_path_request *request, u8 cmd, + u8 *scsi3addr, void *buffer, size_t buffer_length, + u16 vpd_page, enum dma_data_direction *dir) +{ + u8 *cdb; + size_t cdb_length = buffer_length; + + memset(request, 0, sizeof(*request)); + + request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; + put_unaligned_le16(offsetof(struct pqi_raid_path_request, + sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, + &request->header.iu_length); + put_unaligned_le32(buffer_length, &request->buffer_length); + memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); + request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; + + cdb = request->cdb; + + switch (cmd) { + case INQUIRY: + request->data_direction = SOP_READ_FLAG; + cdb[0] = INQUIRY; + if (vpd_page & VPD_PAGE) { + cdb[1] = 0x1; + cdb[2] = (u8)vpd_page; + } + cdb[4] = (u8)cdb_length; + break; + case CISS_REPORT_LOG: + case CISS_REPORT_PHYS: + request->data_direction = SOP_READ_FLAG; + cdb[0] = cmd; + if (cmd == CISS_REPORT_PHYS) { + if (ctrl_info->rpl_extended_format_4_5_supported) + cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; + else + cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; + } else { + cdb[1] = ctrl_info->ciss_report_log_flags; + } + put_unaligned_be32(cdb_length, &cdb[6]); + break; + case CISS_GET_RAID_MAP: + request->data_direction = SOP_READ_FLAG; + cdb[0] = CISS_READ; + cdb[1] = CISS_GET_RAID_MAP; + put_unaligned_be32(cdb_length, &cdb[6]); + break; + case SA_FLUSH_CACHE: + request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; + request->data_direction = SOP_WRITE_FLAG; + cdb[0] = BMIC_WRITE; + cdb[6] = BMIC_FLUSH_CACHE; + put_unaligned_be16(cdb_length, &cdb[7]); + break; + case BMIC_SENSE_DIAG_OPTIONS: + cdb_length = 0; + fallthrough; + case BMIC_IDENTIFY_CONTROLLER: + case BMIC_IDENTIFY_PHYSICAL_DEVICE: + case BMIC_SENSE_SUBSYSTEM_INFORMATION: + case BMIC_SENSE_FEATURE: + request->data_direction = SOP_READ_FLAG; + cdb[0] = BMIC_READ; + cdb[6] = cmd; + put_unaligned_be16(cdb_length, &cdb[7]); + break; + case BMIC_SET_DIAG_OPTIONS: + cdb_length = 0; + fallthrough; + case BMIC_WRITE_HOST_WELLNESS: + request->data_direction = SOP_WRITE_FLAG; + cdb[0] = BMIC_WRITE; + cdb[6] = cmd; + put_unaligned_be16(cdb_length, &cdb[7]); + break; + case BMIC_CSMI_PASSTHRU: + request->data_direction = SOP_BIDIRECTIONAL; + cdb[0] = BMIC_WRITE; + cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; + cdb[6] = cmd; + put_unaligned_be16(cdb_length, &cdb[7]); + break; + default: + dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); + break; + } + + switch (request->data_direction) { + case SOP_READ_FLAG: + *dir = DMA_FROM_DEVICE; + break; + case SOP_WRITE_FLAG: + *dir = DMA_TO_DEVICE; + break; + case SOP_NO_DIRECTION_FLAG: + *dir = DMA_NONE; + break; + default: + *dir = DMA_BIDIRECTIONAL; + break; + } + + return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], + buffer, buffer_length, *dir); +} + +static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) +{ + io_request->scmd = NULL; + io_request->status = 0; + io_request->error_info = NULL; + io_request->raid_bypass = false; +} + +static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) +{ + struct pqi_io_request *io_request; + u16 i; + + if (scmd) { /* SML I/O request */ + u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + i = blk_mq_unique_tag_to_tag(blk_tag); + io_request = &ctrl_info->io_request_pool[i]; + if (atomic_inc_return(&io_request->refcount) > 1) { + atomic_dec(&io_request->refcount); + return NULL; + } + } else { /* IOCTL or driver internal request */ + /* + * benignly racy - may have to wait for an open slot. + * command slot range is scsi_ml_can_queue - + * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] + */ + i = 0; + while (1) { + io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; + if (atomic_inc_return(&io_request->refcount) == 1) + break; + atomic_dec(&io_request->refcount); + i = (i + 1) % PQI_RESERVED_IO_SLOTS; + } + } + + if (io_request) + pqi_reinit_io_request(io_request); + + return io_request; +} + +static void pqi_free_io_request(struct pqi_io_request *io_request) +{ + atomic_dec(&io_request->refcount); +} + +static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, + u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, + struct pqi_raid_error_info *error_info) +{ + int rc; + struct pqi_raid_path_request request; + enum dma_data_direction dir; + + rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, + buffer, buffer_length, vpd_page, &dir); + if (rc) + return rc; + + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); + + pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + + return rc; +} + +/* helper functions for pqi_send_scsi_raid_request */ + +static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, + u8 cmd, void *buffer, size_t buffer_length) +{ + return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, + buffer, buffer_length, 0, NULL); +} + +static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, + u8 cmd, void *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info) +{ + return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, + buffer, buffer_length, 0, error_info); +} + +static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, + struct bmic_identify_controller *buffer) +{ + return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, + buffer, sizeof(*buffer)); +} + +static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, + struct bmic_sense_subsystem_info *sense_info) +{ + return pqi_send_ctrl_raid_request(ctrl_info, + BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, + sizeof(*sense_info)); +} + +static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, + u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) +{ + return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, + buffer, buffer_length, vpd_page, NULL); +} + +static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, + struct bmic_identify_physical_device *buffer, size_t buffer_length) +{ + int rc; + enum dma_data_direction dir; + u16 bmic_device_index; + struct pqi_raid_path_request request; + + rc = pqi_build_raid_path_request(ctrl_info, &request, + BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, + buffer_length, 0, &dir); + if (rc) + return rc; + + bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); + request.cdb[2] = (u8)bmic_device_index; + request.cdb[9] = (u8)(bmic_device_index >> 8); + + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); + + pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + + return rc; +} + +static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) +{ + u32 bytes; + + bytes = get_unaligned_le16(limit); + if (bytes == 0) + bytes = ~0; + else + bytes *= 1024; + + return bytes; +} + +#pragma pack(1) + +struct bmic_sense_feature_buffer { + struct bmic_sense_feature_buffer_header header; + struct bmic_sense_feature_io_page_aio_subpage aio_subpage; +}; + +#pragma pack() + +#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ + offsetofend(struct bmic_sense_feature_buffer, \ + aio_subpage.max_write_raid_1_10_3drive) + +#define MINIMUM_AIO_SUBPAGE_LENGTH \ + (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ + max_write_raid_1_10_3drive) - \ + sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) + +static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + enum dma_data_direction dir; + struct pqi_raid_path_request request; + struct bmic_sense_feature_buffer *buffer; + + buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, + buffer, sizeof(*buffer), 0, &dir); + if (rc) + goto error; + + request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; + request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; + + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); + + pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + + if (rc) + goto error; + + if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || + buffer->header.subpage_code != + BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || + get_unaligned_le16(&buffer->header.buffer_length) < + MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || + buffer->aio_subpage.header.page_code != + BMIC_SENSE_FEATURE_IO_PAGE || + buffer->aio_subpage.header.subpage_code != + BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || + get_unaligned_le16(&buffer->aio_subpage.header.page_length) < + MINIMUM_AIO_SUBPAGE_LENGTH) { + goto error; + } + + ctrl_info->max_transfer_encrypted_sas_sata = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_transfer_encrypted_sas_sata); + + ctrl_info->max_transfer_encrypted_nvme = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_transfer_encrypted_nvme); + + ctrl_info->max_write_raid_5_6 = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_write_raid_5_6); + + ctrl_info->max_write_raid_1_10_2drive = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_write_raid_1_10_2drive); + + ctrl_info->max_write_raid_1_10_3drive = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_write_raid_1_10_3drive); + +error: + kfree(buffer); + + return rc; +} + +static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, + enum bmic_flush_cache_shutdown_event shutdown_event) +{ + int rc; + struct bmic_flush_cache *flush_cache; + + flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); + if (!flush_cache) + return -ENOMEM; + + flush_cache->shutdown_event = shutdown_event; + + rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, + sizeof(*flush_cache)); + + kfree(flush_cache); + + return rc; +} + +int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, + struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info) +{ + return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, + buffer, buffer_length, error_info); +} + +#define PQI_FETCH_PTRAID_DATA (1 << 31) + +static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct bmic_diag_options *diag; + + diag = kzalloc(sizeof(*diag), GFP_KERNEL); + if (!diag) + return -ENOMEM; + + rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, + diag, sizeof(*diag)); + if (rc) + goto out; + + diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); + + rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, + sizeof(*diag)); + +out: + kfree(diag); + + return rc; +} + +static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, + void *buffer, size_t buffer_length) +{ + return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, + buffer, buffer_length); +} + +#pragma pack(1) + +struct bmic_host_wellness_driver_version { + u8 start_tag[4]; + u8 driver_version_tag[2]; + __le16 driver_version_length; + char driver_version[32]; + u8 dont_write_tag[2]; + u8 end_tag[2]; +}; + +#pragma pack() + +static int pqi_write_driver_version_to_host_wellness( + struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct bmic_host_wellness_driver_version *buffer; + size_t buffer_length; + + buffer_length = sizeof(*buffer); + + buffer = kmalloc(buffer_length, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + buffer->start_tag[0] = '<'; + buffer->start_tag[1] = 'H'; + buffer->start_tag[2] = 'W'; + buffer->start_tag[3] = '>'; + buffer->driver_version_tag[0] = 'D'; + buffer->driver_version_tag[1] = 'V'; + put_unaligned_le16(sizeof(buffer->driver_version), + &buffer->driver_version_length); + strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, + sizeof(buffer->driver_version) - 1); + buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; + buffer->dont_write_tag[0] = 'D'; + buffer->dont_write_tag[1] = 'W'; + buffer->end_tag[0] = 'Z'; + buffer->end_tag[1] = 'Z'; + + rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); + + kfree(buffer); + + return rc; +} + +#pragma pack(1) + +struct bmic_host_wellness_time { + u8 start_tag[4]; + u8 time_tag[2]; + __le16 time_length; + u8 time[8]; + u8 dont_write_tag[2]; + u8 end_tag[2]; +}; + +#pragma pack() + +static int pqi_write_current_time_to_host_wellness( + struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct bmic_host_wellness_time *buffer; + size_t buffer_length; + time64_t local_time; + unsigned int year; + struct tm tm; + + buffer_length = sizeof(*buffer); + + buffer = kmalloc(buffer_length, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + buffer->start_tag[0] = '<'; + buffer->start_tag[1] = 'H'; + buffer->start_tag[2] = 'W'; + buffer->start_tag[3] = '>'; + buffer->time_tag[0] = 'T'; + buffer->time_tag[1] = 'D'; + put_unaligned_le16(sizeof(buffer->time), + &buffer->time_length); + + local_time = ktime_get_real_seconds(); + time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); + year = tm.tm_year + 1900; + + buffer->time[0] = bin2bcd(tm.tm_hour); + buffer->time[1] = bin2bcd(tm.tm_min); + buffer->time[2] = bin2bcd(tm.tm_sec); + buffer->time[3] = 0; + buffer->time[4] = bin2bcd(tm.tm_mon + 1); + buffer->time[5] = bin2bcd(tm.tm_mday); + buffer->time[6] = bin2bcd(year / 100); + buffer->time[7] = bin2bcd(year % 100); + + buffer->dont_write_tag[0] = 'D'; + buffer->dont_write_tag[1] = 'W'; + buffer->end_tag[0] = 'Z'; + buffer->end_tag[1] = 'Z'; + + rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); + + kfree(buffer); + + return rc; +} + +#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) + +static void pqi_update_time_worker(struct work_struct *work) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, + update_time_work); + + rc = pqi_write_current_time_to_host_wellness(ctrl_info); + if (rc) + dev_warn(&ctrl_info->pci_dev->dev, + "error updating time on controller\n"); + + schedule_delayed_work(&ctrl_info->update_time_work, + PQI_UPDATE_TIME_WORK_INTERVAL); +} + +static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) +{ + schedule_delayed_work(&ctrl_info->update_time_work, 0); +} + +static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) +{ + cancel_delayed_work_sync(&ctrl_info->update_time_work); +} + +static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, + size_t buffer_length) +{ + return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); +} + +static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) +{ + int rc; + size_t lun_list_length; + size_t lun_data_length; + size_t new_lun_list_length; + void *lun_data = NULL; + struct report_lun_header *report_lun_header; + + report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); + if (!report_lun_header) { + rc = -ENOMEM; + goto out; + } + + rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); + if (rc) + goto out; + + lun_list_length = get_unaligned_be32(&report_lun_header->list_length); + +again: + lun_data_length = sizeof(struct report_lun_header) + lun_list_length; + + lun_data = kmalloc(lun_data_length, GFP_KERNEL); + if (!lun_data) { + rc = -ENOMEM; + goto out; + } + + if (lun_list_length == 0) { + memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); + goto out; + } + + rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); + if (rc) + goto out; + + new_lun_list_length = + get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); + + if (new_lun_list_length > lun_list_length) { + lun_list_length = new_lun_list_length; + kfree(lun_data); + goto again; + } + +out: + kfree(report_lun_header); + + if (rc) { + kfree(lun_data); + lun_data = NULL; + } + + *buffer = lun_data; + + return rc; +} + +static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) +{ + int rc; + unsigned int i; + u8 rpl_response_format; + u32 num_physicals; + void *rpl_list; + struct report_lun_header *rpl_header; + struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; + struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; + + rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); + if (rc) + return rc; + + if (ctrl_info->rpl_extended_format_4_5_supported) { + rpl_header = rpl_list; + rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; + if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { + *buffer = rpl_list; + return 0; + } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { + dev_err(&ctrl_info->pci_dev->dev, + "RPL returned unsupported data format %u\n", + rpl_response_format); + return -EINVAL; + } else { + dev_warn(&ctrl_info->pci_dev->dev, + "RPL returned extended format 2 instead of 4\n"); + } + } + + rpl_8byte_wwid_list = rpl_list; + num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); + + rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, + num_physicals), GFP_KERNEL); + if (!rpl_16byte_wwid_list) + return -ENOMEM; + + put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), + &rpl_16byte_wwid_list->header.list_length); + rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; + + for (i = 0; i < num_physicals; i++) { + memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); + memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); + memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); + rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; + rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; + rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; + rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; + rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; + } + + kfree(rpl_8byte_wwid_list); + *buffer = rpl_16byte_wwid_list; + + return 0; +} + +static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) +{ + return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); +} + +static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, + struct report_phys_lun_16byte_wwid_list **physdev_list, + struct report_log_lun_list **logdev_list) +{ + int rc; + size_t logdev_list_length; + size_t logdev_data_length; + struct report_log_lun_list *internal_logdev_list; + struct report_log_lun_list *logdev_data; + struct report_lun_header report_lun_header; + + rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "report physical LUNs failed\n"); + + rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "report logical LUNs failed\n"); + + /* + * Tack the controller itself onto the end of the logical device list + * by adding a list entry that is all zeros. + */ + + logdev_data = *logdev_list; + + if (logdev_data) { + logdev_list_length = + get_unaligned_be32(&logdev_data->header.list_length); + } else { + memset(&report_lun_header, 0, sizeof(report_lun_header)); + logdev_data = + (struct report_log_lun_list *)&report_lun_header; + logdev_list_length = 0; + } + + logdev_data_length = sizeof(struct report_lun_header) + + logdev_list_length; + + internal_logdev_list = kmalloc(logdev_data_length + + sizeof(struct report_log_lun), GFP_KERNEL); + if (!internal_logdev_list) { + kfree(*logdev_list); + *logdev_list = NULL; + return -ENOMEM; + } + + memcpy(internal_logdev_list, logdev_data, logdev_data_length); + memset((u8 *)internal_logdev_list + logdev_data_length, 0, + sizeof(struct report_log_lun)); + put_unaligned_be32(logdev_list_length + + sizeof(struct report_log_lun), + &internal_logdev_list->header.list_length); + + kfree(*logdev_list); + *logdev_list = internal_logdev_list; + + return 0; +} + +static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, + int bus, int target, int lun) +{ + device->bus = bus; + device->target = target; + device->lun = lun; +} + +static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) +{ + u8 *scsi3addr; + u32 lunid; + int bus; + int target; + int lun; + + scsi3addr = device->scsi3addr; + lunid = get_unaligned_le32(scsi3addr); + + if (pqi_is_hba_lunid(scsi3addr)) { + /* The specified device is the controller. */ + pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); + device->target_lun_valid = true; + return; + } + + if (pqi_is_logical_device(device)) { + if (device->is_external_raid_device) { + bus = PQI_EXTERNAL_RAID_VOLUME_BUS; + target = (lunid >> 16) & 0x3fff; + lun = lunid & 0xff; + } else { + bus = PQI_RAID_VOLUME_BUS; + target = 0; + lun = lunid & 0x3fff; + } + pqi_set_bus_target_lun(device, bus, target, lun); + device->target_lun_valid = true; + return; + } + + /* + * Defer target and LUN assignment for non-controller physical devices + * because the SAS transport layer will make these assignments later. + */ + pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); +} + +static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + u8 raid_level; + u8 *buffer; + + raid_level = SA_RAID_UNKNOWN; + + buffer = kmalloc(64, GFP_KERNEL); + if (buffer) { + rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, + VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); + if (rc == 0) { + raid_level = buffer[8]; + if (raid_level > SA_RAID_MAX) + raid_level = SA_RAID_UNKNOWN; + } + kfree(buffer); + } + + device->raid_level = raid_level; +} + +static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct raid_map *raid_map) +{ + char *err_msg; + u32 raid_map_size; + u32 r5or6_blocks_per_row; + + raid_map_size = get_unaligned_le32(&raid_map->structure_size); + + if (raid_map_size < offsetof(struct raid_map, disk_data)) { + err_msg = "RAID map too small"; + goto bad_raid_map; + } + + if (device->raid_level == SA_RAID_1) { + if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { + err_msg = "invalid RAID-1 map"; + goto bad_raid_map; + } + } else if (device->raid_level == SA_RAID_TRIPLE) { + if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { + err_msg = "invalid RAID-1(Triple) map"; + goto bad_raid_map; + } + } else if ((device->raid_level == SA_RAID_5 || + device->raid_level == SA_RAID_6) && + get_unaligned_le16(&raid_map->layout_map_count) > 1) { + /* RAID 50/60 */ + r5or6_blocks_per_row = + get_unaligned_le16(&raid_map->strip_size) * + get_unaligned_le16(&raid_map->data_disks_per_row); + if (r5or6_blocks_per_row == 0) { + err_msg = "invalid RAID-5 or RAID-6 map"; + goto bad_raid_map; + } + } + + return 0; + +bad_raid_map: + dev_warn(&ctrl_info->pci_dev->dev, + "logical device %08x%08x %s\n", + *((u32 *)&device->scsi3addr), + *((u32 *)&device->scsi3addr[4]), err_msg); + + return -EINVAL; +} + +static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + u32 raid_map_size; + struct raid_map *raid_map; + + raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); + if (!raid_map) + return -ENOMEM; + + rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, + device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); + if (rc) + goto error; + + raid_map_size = get_unaligned_le32(&raid_map->structure_size); + + if (raid_map_size > sizeof(*raid_map)) { + + kfree(raid_map); + + raid_map = kmalloc(raid_map_size, GFP_KERNEL); + if (!raid_map) + return -ENOMEM; + + rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, + device->scsi3addr, raid_map, raid_map_size, 0, NULL); + if (rc) + goto error; + + if (get_unaligned_le32(&raid_map->structure_size) + != raid_map_size) { + dev_warn(&ctrl_info->pci_dev->dev, + "requested %u bytes, received %u bytes\n", + raid_map_size, + get_unaligned_le32(&raid_map->structure_size)); + rc = -EINVAL; + goto error; + } + } + + rc = pqi_validate_raid_map(ctrl_info, device, raid_map); + if (rc) + goto error; + + device->raid_map = raid_map; + + return 0; + +error: + kfree(raid_map); + + return rc; +} + +static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + if (!ctrl_info->lv_drive_type_mix_valid) { + device->max_transfer_encrypted = ~0; + return; + } + + switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { + case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: + case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: + case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: + case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: + case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: + case LV_DRIVE_TYPE_MIX_SAS_ONLY: + case LV_DRIVE_TYPE_MIX_SATA_ONLY: + device->max_transfer_encrypted = + ctrl_info->max_transfer_encrypted_sas_sata; + break; + case LV_DRIVE_TYPE_MIX_NVME_ONLY: + device->max_transfer_encrypted = + ctrl_info->max_transfer_encrypted_nvme; + break; + case LV_DRIVE_TYPE_MIX_UNKNOWN: + case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: + default: + device->max_transfer_encrypted = + min(ctrl_info->max_transfer_encrypted_sas_sata, + ctrl_info->max_transfer_encrypted_nvme); + break; + } +} + +static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + u8 *buffer; + u8 bypass_status; + + buffer = kmalloc(64, GFP_KERNEL); + if (!buffer) + return; + + rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, + VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); + if (rc) + goto out; + +#define RAID_BYPASS_STATUS 4 +#define RAID_BYPASS_CONFIGURED 0x1 +#define RAID_BYPASS_ENABLED 0x2 + + bypass_status = buffer[RAID_BYPASS_STATUS]; + device->raid_bypass_configured = + (bypass_status & RAID_BYPASS_CONFIGURED) != 0; + if (device->raid_bypass_configured && + (bypass_status & RAID_BYPASS_ENABLED) && + pqi_get_raid_map(ctrl_info, device) == 0) { + device->raid_bypass_enabled = true; + if (get_unaligned_le16(&device->raid_map->flags) & + RAID_MAP_ENCRYPTION_ENABLED) + pqi_set_max_transfer_encrypted(ctrl_info, device); + } + +out: + kfree(buffer); +} + +/* + * Use vendor-specific VPD to determine online/offline status of a volume. + */ + +static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + size_t page_length; + u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; + bool volume_offline = true; + u32 volume_flags; + struct ciss_vpd_logical_volume_status *vpd; + + vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); + if (!vpd) + goto no_buffer; + + rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, + VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); + if (rc) + goto out; + + if (vpd->page_code != CISS_VPD_LV_STATUS) + goto out; + + page_length = offsetof(struct ciss_vpd_logical_volume_status, + volume_status) + vpd->page_length; + if (page_length < sizeof(*vpd)) + goto out; + + volume_status = vpd->volume_status; + volume_flags = get_unaligned_be32(&vpd->flags); + volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; + +out: + kfree(vpd); +no_buffer: + device->volume_status = volume_status; + device->volume_offline = volume_offline; +} + +#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 +#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 +#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 + +static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + + memset(id_phys, 0, sizeof(*id_phys)); + + rc = pqi_identify_physical_device(ctrl_info, device, + id_phys, sizeof(*id_phys)); + if (rc) { + device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; + return rc; + } + + scsi_sanitize_inquiry_string(&id_phys->model[0], 8); + scsi_sanitize_inquiry_string(&id_phys->model[8], 16); + + memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); + memcpy(device->model, &id_phys->model[8], sizeof(device->model)); + + device->box_index = id_phys->box_index; + device->phys_box_on_bus = id_phys->phys_box_on_bus; + device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; + device->queue_depth = + get_unaligned_le16(&id_phys->current_queue_depth_limit); + device->active_path_index = id_phys->active_path_number; + device->path_map = id_phys->redundant_path_present_map; + memcpy(&device->box, + &id_phys->alternate_paths_phys_box_on_port, + sizeof(device->box)); + memcpy(&device->phys_connector, + &id_phys->alternate_paths_phys_connector, + sizeof(device->phys_connector)); + device->bay = id_phys->phys_bay_in_box; + device->lun_count = id_phys->multi_lun_device_lun_count; + if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && + id_phys->phy_count) + device->phy_id = + id_phys->phy_to_phy_map[device->active_path_index]; + else + device->phy_id = 0xFF; + + device->ncq_prio_support = + ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & + PQI_DEVICE_NCQ_PRIO_SUPPORTED); + + device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); + + return 0; +} + +static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + u8 *buffer; + + buffer = kmalloc(64, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + /* Send an inquiry to the device to see what it is. */ + rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); + if (rc) + goto out; + + scsi_sanitize_inquiry_string(&buffer[8], 8); + scsi_sanitize_inquiry_string(&buffer[16], 16); + + device->devtype = buffer[0] & 0x1f; + memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); + memcpy(device->model, &buffer[16], sizeof(device->model)); + + if (device->devtype == TYPE_DISK) { + if (device->is_external_raid_device) { + device->raid_level = SA_RAID_UNKNOWN; + device->volume_status = CISS_LV_OK; + device->volume_offline = false; + } else { + pqi_get_raid_level(ctrl_info, device); + pqi_get_raid_bypass_status(ctrl_info, device); + pqi_get_volume_status(ctrl_info, device); + } + } + +out: + kfree(buffer); + + return rc; +} + +/* + * Prevent adding drive to OS for some corner cases such as a drive + * undergoing a sanitize (erase) operation. Some OSes will continue to poll + * the drive until the sanitize completes, which can take hours, + * resulting in long bootup delays. Commands such as TUR, READ_CAP + * are allowed, but READ/WRITE cause check condition. So the OS + * cannot check/read the partition table. + * Note: devices that have completed sanitize must be re-enabled + * using the management utility. + */ +static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) +{ + return device->erase_in_progress; +} + +static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + + if (device->is_expander_smp_device) + return 0; + + if (pqi_is_logical_device(device)) + rc = pqi_get_logical_device_info(ctrl_info, device); + else + rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); + + return rc; +} + +static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + + rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); + + if (rc == 0 && device->lun_count == 0) + device->lun_count = 1; + + return rc; +} + +static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + char *status; + static const char unknown_state_str[] = + "Volume is in an unknown state (%u)"; + char unknown_state_buffer[sizeof(unknown_state_str) + 10]; + + switch (device->volume_status) { + case CISS_LV_OK: + status = "Volume online"; + break; + case CISS_LV_FAILED: + status = "Volume failed"; + break; + case CISS_LV_NOT_CONFIGURED: + status = "Volume not configured"; + break; + case CISS_LV_DEGRADED: + status = "Volume degraded"; + break; + case CISS_LV_READY_FOR_RECOVERY: + status = "Volume ready for recovery operation"; + break; + case CISS_LV_UNDERGOING_RECOVERY: + status = "Volume undergoing recovery"; + break; + case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: + status = "Wrong physical drive was replaced"; + break; + case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: + status = "A physical drive not properly connected"; + break; + case CISS_LV_HARDWARE_OVERHEATING: + status = "Hardware is overheating"; + break; + case CISS_LV_HARDWARE_HAS_OVERHEATED: + status = "Hardware has overheated"; + break; + case CISS_LV_UNDERGOING_EXPANSION: + status = "Volume undergoing expansion"; + break; + case CISS_LV_NOT_AVAILABLE: + status = "Volume waiting for transforming volume"; + break; + case CISS_LV_QUEUED_FOR_EXPANSION: + status = "Volume queued for expansion"; + break; + case CISS_LV_DISABLED_SCSI_ID_CONFLICT: + status = "Volume disabled due to SCSI ID conflict"; + break; + case CISS_LV_EJECTED: + status = "Volume has been ejected"; + break; + case CISS_LV_UNDERGOING_ERASE: + status = "Volume undergoing background erase"; + break; + case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: + status = "Volume ready for predictive spare rebuild"; + break; + case CISS_LV_UNDERGOING_RPI: + status = "Volume undergoing rapid parity initialization"; + break; + case CISS_LV_PENDING_RPI: + status = "Volume queued for rapid parity initialization"; + break; + case CISS_LV_ENCRYPTED_NO_KEY: + status = "Encrypted volume inaccessible - key not present"; + break; + case CISS_LV_UNDERGOING_ENCRYPTION: + status = "Volume undergoing encryption process"; + break; + case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: + status = "Volume undergoing encryption re-keying process"; + break; + case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: + status = "Volume encrypted but encryption is disabled"; + break; + case CISS_LV_PENDING_ENCRYPTION: + status = "Volume pending migration to encrypted state"; + break; + case CISS_LV_PENDING_ENCRYPTION_REKEYING: + status = "Volume pending encryption rekeying"; + break; + case CISS_LV_NOT_SUPPORTED: + status = "Volume not supported on this controller"; + break; + case CISS_LV_STATUS_UNAVAILABLE: + status = "Volume status not available"; + break; + default: + snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), + unknown_state_str, device->volume_status); + status = unknown_state_buffer; + break; + } + + dev_info(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d %s\n", + ctrl_info->scsi_host->host_no, + device->bus, device->target, device->lun, status); +} + +static void pqi_rescan_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, + rescan_work); + + pqi_scan_scsi_devices(ctrl_info); +} + +static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + + if (pqi_is_logical_device(device)) + rc = scsi_add_device(ctrl_info->scsi_host, device->bus, + device->target, device->lun); + else + rc = pqi_add_sas_device(ctrl_info->sas_host, device); + + return rc; +} + +#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) + +static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) +{ + int rc; + int lun; + + for (lun = 0; lun < device->lun_count; lun++) { + rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, + PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", + ctrl_info->scsi_host->host_no, device->bus, + device->target, lun, + atomic_read(&device->scsi_cmds_outstanding[lun])); + } + + if (pqi_is_logical_device(device)) + scsi_remove_device(device->sdev); + else + pqi_remove_sas_device(device); + + pqi_device_remove_start(device); +} + +/* Assumes the SCSI device list lock is held. */ + +static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, + int bus, int target, int lun) +{ + struct pqi_scsi_dev *device; + + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) + if (device->bus == bus && device->target == target && device->lun == lun) + return device; + + return NULL; +} + +static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) +{ + if (dev1->is_physical_device != dev2->is_physical_device) + return false; + + if (dev1->is_physical_device) + return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; + + return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; +} + +enum pqi_find_result { + DEVICE_NOT_FOUND, + DEVICE_CHANGED, + DEVICE_SAME, +}; + +static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) +{ + struct pqi_scsi_dev *device; + + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { + *matching_device = device; + if (pqi_device_equal(device_to_find, device)) { + if (device_to_find->volume_offline) + return DEVICE_CHANGED; + return DEVICE_SAME; + } + return DEVICE_CHANGED; + } + } + + return DEVICE_NOT_FOUND; +} + +static inline const char *pqi_device_type(struct pqi_scsi_dev *device) +{ + if (device->is_expander_smp_device) + return "Enclosure SMP "; + + return scsi_device_type(device->devtype); +} + +#define PQI_DEV_INFO_BUFFER_LENGTH 128 + +static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, + char *action, struct pqi_scsi_dev *device) +{ + ssize_t count; + char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; + + count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, + "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); + + if (device->target_lun_valid) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "%d:%d", + device->target, + device->lun); + else + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "-:-"); + + if (pqi_is_logical_device(device)) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %08x%08x", + *((u32 *)&device->scsi3addr), + *((u32 *)&device->scsi3addr[4])); + else + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx%016llx", + get_unaligned_be64(&device->wwid[0]), + get_unaligned_be64(&device->wwid[8])); + + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, + " %s %.8s %.16s ", + pqi_device_type(device), + device->vendor, + device->model); + + if (pqi_is_logical_device(device)) { + if (device->devtype == TYPE_DISK) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "SSDSmartPathCap%c En%c %-12s", + device->raid_bypass_configured ? '+' : '-', + device->raid_bypass_enabled ? '+' : '-', + pqi_raid_level_to_string(device->raid_level)); + } else { + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + "AIO%c", device->aio_enabled ? '+' : '-'); + if (device->devtype == TYPE_DISK || + device->devtype == TYPE_ZBC) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " qd=%-6d", device->queue_depth); + } + + dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); +} + +static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2) +{ + u32 raid_map1_size; + u32 raid_map2_size; + + if (raid_map1 == NULL || raid_map2 == NULL) + return raid_map1 == raid_map2; + + raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); + raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); + + if (raid_map1_size != raid_map2_size) + return false; + + return memcmp(raid_map1, raid_map2, raid_map1_size) == 0; +} + +/* Assumes the SCSI device list lock is held. */ + +static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) +{ + existing_device->device_type = new_device->device_type; + existing_device->bus = new_device->bus; + if (new_device->target_lun_valid) { + existing_device->target = new_device->target; + existing_device->lun = new_device->lun; + existing_device->target_lun_valid = true; + } + + /* By definition, the scsi3addr and wwid fields are already the same. */ + + existing_device->is_physical_device = new_device->is_physical_device; + memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); + memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); + existing_device->sas_address = new_device->sas_address; + existing_device->queue_depth = new_device->queue_depth; + existing_device->device_offline = false; + existing_device->lun_count = new_device->lun_count; + + if (pqi_is_logical_device(existing_device)) { + existing_device->is_external_raid_device = new_device->is_external_raid_device; + + if (existing_device->devtype == TYPE_DISK) { + existing_device->raid_level = new_device->raid_level; + existing_device->volume_status = new_device->volume_status; + if (ctrl_info->logical_volume_rescan_needed) + existing_device->rescan = true; + memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); + if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { + kfree(existing_device->raid_map); + existing_device->raid_map = new_device->raid_map; + /* To prevent this from being freed later. */ + new_device->raid_map = NULL; + } + existing_device->raid_bypass_configured = new_device->raid_bypass_configured; + existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; + } + } else { + existing_device->aio_enabled = new_device->aio_enabled; + existing_device->aio_handle = new_device->aio_handle; + existing_device->is_expander_smp_device = new_device->is_expander_smp_device; + existing_device->active_path_index = new_device->active_path_index; + existing_device->phy_id = new_device->phy_id; + existing_device->path_map = new_device->path_map; + existing_device->bay = new_device->bay; + existing_device->box_index = new_device->box_index; + existing_device->phys_box_on_bus = new_device->phys_box_on_bus; + existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; + memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); + memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); + } +} + +static inline void pqi_free_device(struct pqi_scsi_dev *device) +{ + if (device) { + kfree(device->raid_map); + kfree(device); + } +} + +/* + * Called when exposing a new device to the OS fails in order to re-adjust + * our internal SCSI device list to match the SCSI ML's view. + */ + +static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + unsigned long flags; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_del(&device->scsi_device_list_entry); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + /* Allow the device structure to be freed later. */ + device->keep_device = false; +} + +static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) +{ + if (device->is_expander_smp_device) + return device->sas_port != NULL; + + return device->sdev != NULL; +} + +static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) +{ + unsigned int lun; + struct pqi_tmf_work *tmf_work; + + for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) + INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); +} + +static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) +{ + int rc; + unsigned int i; + unsigned long flags; + enum pqi_find_result find_result; + struct pqi_scsi_dev *device; + struct pqi_scsi_dev *next; + struct pqi_scsi_dev *matching_device; + LIST_HEAD(add_list); + LIST_HEAD(delete_list); + + /* + * The idea here is to do as little work as possible while holding the + * spinlock. That's why we go to great pains to defer anything other + * than updating the internal device list until after we release the + * spinlock. + */ + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + /* Assume that all devices in the existing list have gone away. */ + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) + device->device_gone = true; + + for (i = 0; i < num_new_devices; i++) { + device = new_device_list[i]; + + find_result = pqi_scsi_find_entry(ctrl_info, device, + &matching_device); + + switch (find_result) { + case DEVICE_SAME: + /* + * The newly found device is already in the existing + * device list. + */ + device->new_device = false; + matching_device->device_gone = false; + pqi_scsi_update_device(ctrl_info, matching_device, device); + break; + case DEVICE_NOT_FOUND: + /* + * The newly found device is NOT in the existing device + * list. + */ + device->new_device = true; + break; + case DEVICE_CHANGED: + /* + * The original device has gone away and we need to add + * the new device. + */ + device->new_device = true; + break; + } + } + + /* Process all devices that have gone away. */ + list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { + if (device->device_gone) { + list_del(&device->scsi_device_list_entry); + list_add_tail(&device->delete_list_entry, &delete_list); + } + } + + /* Process all new devices. */ + for (i = 0; i < num_new_devices; i++) { + device = new_device_list[i]; + if (!device->new_device) + continue; + if (device->volume_offline) + continue; + list_add_tail(&device->scsi_device_list_entry, + &ctrl_info->scsi_device_list); + list_add_tail(&device->add_list_entry, &add_list); + /* To prevent this device structure from being freed later. */ + device->keep_device = true; + pqi_init_device_tmf_work(device); + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + /* + * If OFA is in progress and there are devices that need to be deleted, + * allow any pending reset operations to continue and unblock any SCSI + * requests before removal. + */ + if (pqi_ofa_in_progress(ctrl_info)) { + list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) + if (pqi_is_device_added(device)) + pqi_device_remove_start(device); + pqi_ctrl_unblock_device_reset(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + } + + /* Remove all devices that have gone away. */ + list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { + if (device->volume_offline) { + pqi_dev_info(ctrl_info, "offline", device); + pqi_show_volume_status(ctrl_info, device); + } else { + pqi_dev_info(ctrl_info, "removed", device); + } + if (pqi_is_device_added(device)) + pqi_remove_device(ctrl_info, device); + list_del(&device->delete_list_entry); + pqi_free_device(device); + } + + /* + * Notify the SML of any existing device changes such as; + * queue depth, device size. + */ + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + if (device->sdev && device->queue_depth != device->advertised_queue_depth) { + device->advertised_queue_depth = device->queue_depth; + scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); + if (device->rescan) { + scsi_rescan_device(device->sdev); + device->rescan = false; + } + } + } + + /* Expose any new devices. */ + list_for_each_entry_safe(device, next, &add_list, add_list_entry) { + if (!pqi_is_device_added(device)) { + rc = pqi_add_device(ctrl_info, device); + if (rc == 0) { + pqi_dev_info(ctrl_info, "added", device); + } else { + dev_warn(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d addition failed, device not added\n", + ctrl_info->scsi_host->host_no, + device->bus, device->target, + device->lun); + pqi_fixup_botched_add(ctrl_info, device); + } + } + } + + ctrl_info->logical_volume_rescan_needed = false; + +} + +static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) +{ + /* + * Only support the HBA controller itself as a RAID + * controller. If it's a RAID controller other than + * the HBA itself (an external RAID controller, for + * example), we don't support it. + */ + if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && + !pqi_is_hba_lunid(device->scsi3addr)) + return false; + + return true; +} + +static inline bool pqi_skip_device(u8 *scsi3addr) +{ + /* Ignore all masked devices. */ + if (MASKED_DEVICE(scsi3addr)) + return true; + + return false; +} + +static inline void pqi_mask_device(u8 *scsi3addr) +{ + scsi3addr[3] |= 0xc0; +} + +static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) +{ + if (pqi_is_logical_device(device)) + return false; + + return (device->path_map & (device->path_map - 1)) != 0; +} + +static inline bool pqi_expose_device(struct pqi_scsi_dev *device) +{ + return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); +} + +static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) +{ + int i; + int rc; + LIST_HEAD(new_device_list_head); + struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; + struct report_log_lun_list *logdev_list = NULL; + struct report_phys_lun_16byte_wwid *phys_lun; + struct report_log_lun *log_lun; + struct bmic_identify_physical_device *id_phys = NULL; + u32 num_physicals; + u32 num_logicals; + struct pqi_scsi_dev **new_device_list = NULL; + struct pqi_scsi_dev *device; + struct pqi_scsi_dev *next; + unsigned int num_new_devices; + unsigned int num_valid_devices; + bool is_physical_device; + u8 *scsi3addr; + unsigned int physical_index; + unsigned int logical_index; + static char *out_of_memory_msg = + "failed to allocate memory, device discovery stopped"; + + rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); + if (rc) + goto out; + + if (physdev_list) + num_physicals = + get_unaligned_be32(&physdev_list->header.list_length) + / sizeof(physdev_list->lun_entries[0]); + else + num_physicals = 0; + + if (logdev_list) + num_logicals = + get_unaligned_be32(&logdev_list->header.list_length) + / sizeof(logdev_list->lun_entries[0]); + else + num_logicals = 0; + + if (num_physicals) { + /* + * We need this buffer for calls to pqi_get_physical_disk_info() + * below. We allocate it here instead of inside + * pqi_get_physical_disk_info() because it's a fairly large + * buffer. + */ + id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) { + dev_warn(&ctrl_info->pci_dev->dev, "%s\n", + out_of_memory_msg); + rc = -ENOMEM; + goto out; + } + + if (pqi_hide_vsep) { + for (i = num_physicals - 1; i >= 0; i--) { + phys_lun = &physdev_list->lun_entries[i]; + if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { + pqi_mask_device(phys_lun->lunid); + break; + } + } + } + } + + if (num_logicals && + (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) + ctrl_info->lv_drive_type_mix_valid = true; + + num_new_devices = num_physicals + num_logicals; + + new_device_list = kmalloc_array(num_new_devices, + sizeof(*new_device_list), + GFP_KERNEL); + if (!new_device_list) { + dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); + rc = -ENOMEM; + goto out; + } + + for (i = 0; i < num_new_devices; i++) { + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + dev_warn(&ctrl_info->pci_dev->dev, "%s\n", + out_of_memory_msg); + rc = -ENOMEM; + goto out; + } + list_add_tail(&device->new_device_list_entry, + &new_device_list_head); + } + + device = NULL; + num_valid_devices = 0; + physical_index = 0; + logical_index = 0; + + for (i = 0; i < num_new_devices; i++) { + + if ((!pqi_expose_ld_first && i < num_physicals) || + (pqi_expose_ld_first && i >= num_logicals)) { + is_physical_device = true; + phys_lun = &physdev_list->lun_entries[physical_index++]; + log_lun = NULL; + scsi3addr = phys_lun->lunid; + } else { + is_physical_device = false; + phys_lun = NULL; + log_lun = &logdev_list->lun_entries[logical_index++]; + scsi3addr = log_lun->lunid; + } + + if (is_physical_device && pqi_skip_device(scsi3addr)) + continue; + + if (device) + device = list_next_entry(device, new_device_list_entry); + else + device = list_first_entry(&new_device_list_head, + struct pqi_scsi_dev, new_device_list_entry); + + memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); + device->is_physical_device = is_physical_device; + if (is_physical_device) { + device->device_type = phys_lun->device_type; + if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) + device->is_expander_smp_device = true; + } else { + device->is_external_raid_device = + pqi_is_external_raid_addr(scsi3addr); + } + + if (!pqi_is_supported_device(device)) + continue; + + /* Gather information about the device. */ + rc = pqi_get_device_info(ctrl_info, device, id_phys); + if (rc == -ENOMEM) { + dev_warn(&ctrl_info->pci_dev->dev, "%s\n", + out_of_memory_msg); + goto out; + } + if (rc) { + if (device->is_physical_device) + dev_warn(&ctrl_info->pci_dev->dev, + "obtaining device info failed, skipping physical device %016llx%016llx\n", + get_unaligned_be64(&phys_lun->wwid[0]), + get_unaligned_be64(&phys_lun->wwid[8])); + else + dev_warn(&ctrl_info->pci_dev->dev, + "obtaining device info failed, skipping logical device %08x%08x\n", + *((u32 *)&device->scsi3addr), + *((u32 *)&device->scsi3addr[4])); + rc = 0; + continue; + } + + /* Do not present disks that the OS cannot fully probe. */ + if (pqi_keep_device_offline(device)) + continue; + + pqi_assign_bus_target_lun(device); + + if (device->is_physical_device) { + memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); + if ((phys_lun->device_flags & + CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && + phys_lun->aio_handle) { + device->aio_enabled = true; + device->aio_handle = + phys_lun->aio_handle; + } + } else { + memcpy(device->volume_id, log_lun->volume_id, + sizeof(device->volume_id)); + } + + device->sas_address = get_unaligned_be64(&device->wwid[0]); + + new_device_list[num_valid_devices++] = device; + } + + pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); + +out: + list_for_each_entry_safe(device, next, &new_device_list_head, + new_device_list_entry) { + if (device->keep_device) + continue; + list_del(&device->new_device_list_entry); + pqi_free_device(device); + } + + kfree(new_device_list); + kfree(physdev_list); + kfree(logdev_list); + kfree(id_phys); + + return rc; +} + +static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + int mutex_acquired; + + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + + mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); + + if (!mutex_acquired) { + if (pqi_ctrl_scan_blocked(ctrl_info)) + return -EBUSY; + pqi_schedule_rescan_worker_delayed(ctrl_info); + return -EINPROGRESS; + } + + rc = pqi_update_scsi_devices(ctrl_info); + if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) + pqi_schedule_rescan_worker_delayed(ctrl_info); + + mutex_unlock(&ctrl_info->scan_mutex); + + return rc; +} + +static void pqi_scan_start(struct Scsi_Host *shost) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = shost_to_hba(shost); + + pqi_scan_scsi_devices(ctrl_info); +} + +/* Returns TRUE if scan is finished. */ + +static int pqi_scan_finished(struct Scsi_Host *shost, + unsigned long elapsed_time) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = shost_priv(shost); + + return !mutex_is_locked(&ctrl_info->scan_mutex); +} + +static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, + struct raid_map *raid_map, u64 first_block) +{ + u32 volume_blk_size; + + /* + * Set the encryption tweak values based on logical block address. + * If the block size is 512, the tweak value is equal to the LBA. + * For other block sizes, tweak value is (LBA * block size) / 512. + */ + volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); + if (volume_blk_size != 512) + first_block = (first_block * volume_blk_size) / 512; + + encryption_info->data_encryption_key_index = + get_unaligned_le16(&raid_map->data_encryption_key_index); + encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); + encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); +} + +/* + * Attempt to perform RAID bypass mapping for a logical volume I/O. + */ + +static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev_raid_map_data *rmd) +{ + bool is_supported = true; + + switch (rmd->raid_level) { + case SA_RAID_0: + break; + case SA_RAID_1: + if (rmd->is_write && (!ctrl_info->enable_r1_writes || + rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) + is_supported = false; + break; + case SA_RAID_TRIPLE: + if (rmd->is_write && (!ctrl_info->enable_r1_writes || + rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) + is_supported = false; + break; + case SA_RAID_5: + if (rmd->is_write && (!ctrl_info->enable_r5_writes || + rmd->data_length > ctrl_info->max_write_raid_5_6)) + is_supported = false; + break; + case SA_RAID_6: + if (rmd->is_write && (!ctrl_info->enable_r6_writes || + rmd->data_length > ctrl_info->max_write_raid_5_6)) + is_supported = false; + break; + default: + is_supported = false; + break; + } + + return is_supported; +} + +#define PQI_RAID_BYPASS_INELIGIBLE 1 + +static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, + struct pqi_scsi_dev_raid_map_data *rmd) +{ + /* Check for valid opcode, get LBA and block count. */ + switch (scmd->cmnd[0]) { + case WRITE_6: + rmd->is_write = true; + fallthrough; + case READ_6: + rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | + (scmd->cmnd[2] << 8) | scmd->cmnd[3]); + rmd->block_cnt = (u32)scmd->cmnd[4]; + if (rmd->block_cnt == 0) + rmd->block_cnt = 256; + break; + case WRITE_10: + rmd->is_write = true; + fallthrough; + case READ_10: + rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); + rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); + break; + case WRITE_12: + rmd->is_write = true; + fallthrough; + case READ_12: + rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); + rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); + break; + case WRITE_16: + rmd->is_write = true; + fallthrough; + case READ_16: + rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); + rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); + break; + default: + /* Process via normal I/O path. */ + return PQI_RAID_BYPASS_INELIGIBLE; + } + + put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); + + return 0; +} + +static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) +{ +#if BITS_PER_LONG == 32 + u64 tmpdiv; +#endif + + rmd->last_block = rmd->first_block + rmd->block_cnt - 1; + + /* Check for invalid block or wraparound. */ + if (rmd->last_block >= + get_unaligned_le64(&raid_map->volume_blk_cnt) || + rmd->last_block < rmd->first_block) + return PQI_RAID_BYPASS_INELIGIBLE; + + rmd->data_disks_per_row = + get_unaligned_le16(&raid_map->data_disks_per_row); + rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); + rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); + + /* Calculate stripe information for the request. */ + rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; + if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ + return PQI_RAID_BYPASS_INELIGIBLE; +#if BITS_PER_LONG == 32 + tmpdiv = rmd->first_block; + do_div(tmpdiv, rmd->blocks_per_row); + rmd->first_row = tmpdiv; + tmpdiv = rmd->last_block; + do_div(tmpdiv, rmd->blocks_per_row); + rmd->last_row = tmpdiv; + rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); + rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); + tmpdiv = rmd->first_row_offset; + do_div(tmpdiv, rmd->strip_size); + rmd->first_column = tmpdiv; + tmpdiv = rmd->last_row_offset; + do_div(tmpdiv, rmd->strip_size); + rmd->last_column = tmpdiv; +#else + rmd->first_row = rmd->first_block / rmd->blocks_per_row; + rmd->last_row = rmd->last_block / rmd->blocks_per_row; + rmd->first_row_offset = (u32)(rmd->first_block - + (rmd->first_row * rmd->blocks_per_row)); + rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * + rmd->blocks_per_row)); + rmd->first_column = rmd->first_row_offset / rmd->strip_size; + rmd->last_column = rmd->last_row_offset / rmd->strip_size; +#endif + + /* If this isn't a single row/column then give to the controller. */ + if (rmd->first_row != rmd->last_row || + rmd->first_column != rmd->last_column) + return PQI_RAID_BYPASS_INELIGIBLE; + + /* Proceeding with driver mapping. */ + rmd->total_disks_per_row = rmd->data_disks_per_row + + get_unaligned_le16(&raid_map->metadata_disks_per_row); + rmd->map_row = ((u32)(rmd->first_row >> + raid_map->parity_rotation_shift)) % + get_unaligned_le16(&raid_map->row_cnt); + rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + + rmd->first_column; + + return 0; +} + +static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, + struct raid_map *raid_map) +{ +#if BITS_PER_LONG == 32 + u64 tmpdiv; +#endif + + if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ + return PQI_RAID_BYPASS_INELIGIBLE; + + /* RAID 50/60 */ + /* Verify first and last block are in same RAID group. */ + rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; +#if BITS_PER_LONG == 32 + tmpdiv = rmd->first_block; + rmd->first_group = do_div(tmpdiv, rmd->stripesize); + tmpdiv = rmd->first_group; + do_div(tmpdiv, rmd->blocks_per_row); + rmd->first_group = tmpdiv; + tmpdiv = rmd->last_block; + rmd->last_group = do_div(tmpdiv, rmd->stripesize); + tmpdiv = rmd->last_group; + do_div(tmpdiv, rmd->blocks_per_row); + rmd->last_group = tmpdiv; +#else + rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; + rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; +#endif + if (rmd->first_group != rmd->last_group) + return PQI_RAID_BYPASS_INELIGIBLE; + + /* Verify request is in a single row of RAID 5/6. */ +#if BITS_PER_LONG == 32 + tmpdiv = rmd->first_block; + do_div(tmpdiv, rmd->stripesize); + rmd->first_row = tmpdiv; + rmd->r5or6_first_row = tmpdiv; + tmpdiv = rmd->last_block; + do_div(tmpdiv, rmd->stripesize); + rmd->r5or6_last_row = tmpdiv; +#else + rmd->first_row = rmd->r5or6_first_row = + rmd->first_block / rmd->stripesize; + rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; +#endif + if (rmd->r5or6_first_row != rmd->r5or6_last_row) + return PQI_RAID_BYPASS_INELIGIBLE; + + /* Verify request is in a single column. */ +#if BITS_PER_LONG == 32 + tmpdiv = rmd->first_block; + rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); + tmpdiv = rmd->first_row_offset; + rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); + rmd->r5or6_first_row_offset = rmd->first_row_offset; + tmpdiv = rmd->last_block; + rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); + tmpdiv = rmd->r5or6_last_row_offset; + rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); + tmpdiv = rmd->r5or6_first_row_offset; + do_div(tmpdiv, rmd->strip_size); + rmd->first_column = rmd->r5or6_first_column = tmpdiv; + tmpdiv = rmd->r5or6_last_row_offset; + do_div(tmpdiv, rmd->strip_size); + rmd->r5or6_last_column = tmpdiv; +#else + rmd->first_row_offset = rmd->r5or6_first_row_offset = + (u32)((rmd->first_block % rmd->stripesize) % + rmd->blocks_per_row); + + rmd->r5or6_last_row_offset = + (u32)((rmd->last_block % rmd->stripesize) % + rmd->blocks_per_row); + + rmd->first_column = + rmd->r5or6_first_row_offset / rmd->strip_size; + rmd->r5or6_first_column = rmd->first_column; + rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; +#endif + if (rmd->r5or6_first_column != rmd->r5or6_last_column) + return PQI_RAID_BYPASS_INELIGIBLE; + + /* Request is eligible. */ + rmd->map_row = + ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % + get_unaligned_le16(&raid_map->row_cnt); + + rmd->map_index = (rmd->first_group * + (get_unaligned_le16(&raid_map->row_cnt) * + rmd->total_disks_per_row)) + + (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; + + if (rmd->is_write) { + u32 index; + + /* + * p_parity_it_nexus and q_parity_it_nexus are pointers to the + * parity entries inside the device's raid_map. + * + * A device's RAID map is bounded by: number of RAID disks squared. + * + * The devices RAID map size is checked during device + * initialization. + */ + index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); + index *= rmd->total_disks_per_row; + index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); + + rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; + if (rmd->raid_level == SA_RAID_6) { + rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; + rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; + } +#if BITS_PER_LONG == 32 + tmpdiv = rmd->first_block; + do_div(tmpdiv, rmd->blocks_per_row); + rmd->row = tmpdiv; +#else + rmd->row = rmd->first_block / rmd->blocks_per_row; +#endif + } + + return 0; +} + +static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) +{ + /* Build the new CDB for the physical disk I/O. */ + if (rmd->disk_block > 0xffffffff) { + rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; + rmd->cdb[1] = 0; + put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); + put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); + rmd->cdb[14] = 0; + rmd->cdb[15] = 0; + rmd->cdb_length = 16; + } else { + rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; + rmd->cdb[1] = 0; + put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); + rmd->cdb[6] = 0; + put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); + rmd->cdb[9] = 0; + rmd->cdb_length = 10; + } +} + +static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, + struct pqi_scsi_dev_raid_map_data *rmd) +{ + u32 index; + u32 group; + + group = rmd->map_index / rmd->data_disks_per_row; + + index = rmd->map_index - (group * rmd->data_disks_per_row); + rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; + index += rmd->data_disks_per_row; + rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; + if (rmd->layout_map_count > 2) { + index += rmd->data_disks_per_row; + rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; + } + + rmd->num_it_nexus_entries = rmd->layout_map_count; +} + +static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, + struct pqi_queue_group *queue_group) +{ + int rc; + struct raid_map *raid_map; + u32 group; + u32 next_bypass_group; + struct pqi_encryption_info *encryption_info_ptr; + struct pqi_encryption_info encryption_info; + struct pqi_scsi_dev_raid_map_data rmd = { 0 }; + + rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); + if (rc) + return PQI_RAID_BYPASS_INELIGIBLE; + + rmd.raid_level = device->raid_level; + + if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) + return PQI_RAID_BYPASS_INELIGIBLE; + + if (unlikely(rmd.block_cnt == 0)) + return PQI_RAID_BYPASS_INELIGIBLE; + + raid_map = device->raid_map; + + rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); + if (rc) + return PQI_RAID_BYPASS_INELIGIBLE; + + if (device->raid_level == SA_RAID_1 || + device->raid_level == SA_RAID_TRIPLE) { + if (rmd.is_write) { + pqi_calc_aio_r1_nexus(raid_map, &rmd); + } else { + group = device->next_bypass_group[rmd.map_index]; + next_bypass_group = group + 1; + if (next_bypass_group >= rmd.layout_map_count) + next_bypass_group = 0; + device->next_bypass_group[rmd.map_index] = next_bypass_group; + rmd.map_index += group * rmd.data_disks_per_row; + } + } else if ((device->raid_level == SA_RAID_5 || + device->raid_level == SA_RAID_6) && + (rmd.layout_map_count > 1 || rmd.is_write)) { + rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); + if (rc) + return PQI_RAID_BYPASS_INELIGIBLE; + } + + if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) + return PQI_RAID_BYPASS_INELIGIBLE; + + rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; + rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + + rmd.first_row * rmd.strip_size + + (rmd.first_row_offset - rmd.first_column * rmd.strip_size); + rmd.disk_block_cnt = rmd.block_cnt; + + /* Handle differing logical/physical block sizes. */ + if (raid_map->phys_blk_shift) { + rmd.disk_block <<= raid_map->phys_blk_shift; + rmd.disk_block_cnt <<= raid_map->phys_blk_shift; + } + + if (unlikely(rmd.disk_block_cnt > 0xffff)) + return PQI_RAID_BYPASS_INELIGIBLE; + + pqi_set_aio_cdb(&rmd); + + if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { + if (rmd.data_length > device->max_transfer_encrypted) + return PQI_RAID_BYPASS_INELIGIBLE; + pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); + encryption_info_ptr = &encryption_info; + } else { + encryption_info_ptr = NULL; + } + + if (rmd.is_write) { + switch (device->raid_level) { + case SA_RAID_1: + case SA_RAID_TRIPLE: + return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, + encryption_info_ptr, device, &rmd); + case SA_RAID_5: + case SA_RAID_6: + return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, + encryption_info_ptr, device, &rmd); + } + } + + return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, + rmd.cdb, rmd.cdb_length, queue_group, + encryption_info_ptr, true, false); +} + +#define PQI_STATUS_IDLE 0x0 + +#define PQI_CREATE_ADMIN_QUEUE_PAIR 1 +#define PQI_DELETE_ADMIN_QUEUE_PAIR 2 + +#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 +#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 +#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 +#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 +#define PQI_DEVICE_STATE_ERROR 0x4 + +#define PQI_MODE_READY_TIMEOUT_SECS 30 +#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 + +static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) +{ + struct pqi_device_registers __iomem *pqi_registers; + unsigned long timeout; + u64 signature; + u8 status; + + pqi_registers = ctrl_info->pqi_registers; + timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; + + while (1) { + signature = readq(&pqi_registers->signature); + if (memcmp(&signature, PQI_DEVICE_SIGNATURE, + sizeof(signature)) == 0) + break; + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for PQI signature\n"); + return -ETIMEDOUT; + } + msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); + } + + while (1) { + status = readb(&pqi_registers->function_and_status_code); + if (status == PQI_STATUS_IDLE) + break; + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for PQI IDLE\n"); + return -ETIMEDOUT; + } + msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); + } + + while (1) { + if (readl(&pqi_registers->device_status) == + PQI_DEVICE_STATE_ALL_REGISTERS_READY) + break; + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for PQI all registers ready\n"); + return -ETIMEDOUT; + } + msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); + } + + return 0; +} + +static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) +{ + struct pqi_scsi_dev *device; + + device = io_request->scmd->device->hostdata; + device->raid_bypass_enabled = false; + device->aio_enabled = false; +} + +static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) +{ + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + + device = sdev->hostdata; + if (device->device_offline) + return; + + device->device_offline = true; + ctrl_info = shost_to_hba(sdev->host); + pqi_schedule_rescan_worker(ctrl_info); + dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", + path, ctrl_info->scsi_host->host_no, device->bus, + device->target, device->lun); +} + +static void pqi_process_raid_io_error(struct pqi_io_request *io_request) +{ + u8 scsi_status; + u8 host_byte; + struct scsi_cmnd *scmd; + struct pqi_raid_error_info *error_info; + size_t sense_data_length; + int residual_count; + int xfer_count; + struct scsi_sense_hdr sshdr; + + scmd = io_request->scmd; + if (!scmd) + return; + + error_info = io_request->error_info; + scsi_status = error_info->status; + host_byte = DID_OK; + + switch (error_info->data_out_result) { + case PQI_DATA_IN_OUT_GOOD: + break; + case PQI_DATA_IN_OUT_UNDERFLOW: + xfer_count = + get_unaligned_le32(&error_info->data_out_transferred); + residual_count = scsi_bufflen(scmd) - xfer_count; + scsi_set_resid(scmd, residual_count); + if (xfer_count < scmd->underflow) + host_byte = DID_SOFT_ERROR; + break; + case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: + case PQI_DATA_IN_OUT_ABORTED: + host_byte = DID_ABORT; + break; + case PQI_DATA_IN_OUT_TIMEOUT: + host_byte = DID_TIME_OUT; + break; + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: + case PQI_DATA_IN_OUT_PROTOCOL_ERROR: + case PQI_DATA_IN_OUT_BUFFER_ERROR: + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: + case PQI_DATA_IN_OUT_ERROR: + case PQI_DATA_IN_OUT_HARDWARE_ERROR: + case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: + case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: + case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: + case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: + case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: + case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: + case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: + case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: + case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: + default: + host_byte = DID_ERROR; + break; + } + + sense_data_length = get_unaligned_le16(&error_info->sense_data_length); + if (sense_data_length == 0) + sense_data_length = + get_unaligned_le16(&error_info->response_data_length); + if (sense_data_length) { + if (sense_data_length > sizeof(error_info->data)) + sense_data_length = sizeof(error_info->data); + + if (scsi_status == SAM_STAT_CHECK_CONDITION && + scsi_normalize_sense(error_info->data, + sense_data_length, &sshdr) && + sshdr.sense_key == HARDWARE_ERROR && + sshdr.asc == 0x3e) { + struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); + struct pqi_scsi_dev *device = scmd->device->hostdata; + + switch (sshdr.ascq) { + case 0x1: /* LOGICAL UNIT FAILURE */ + if (printk_ratelimit()) + scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); + pqi_take_device_offline(scmd->device, "RAID"); + host_byte = DID_NO_CONNECT; + break; + + default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ + if (printk_ratelimit()) + scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", + sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); + break; + } + } + + if (sense_data_length > SCSI_SENSE_BUFFERSIZE) + sense_data_length = SCSI_SENSE_BUFFERSIZE; + memcpy(scmd->sense_buffer, error_info->data, + sense_data_length); + } + + scmd->result = scsi_status; + set_host_byte(scmd, host_byte); +} + +static void pqi_process_aio_io_error(struct pqi_io_request *io_request) +{ + u8 scsi_status; + u8 host_byte; + struct scsi_cmnd *scmd; + struct pqi_aio_error_info *error_info; + size_t sense_data_length; + int residual_count; + int xfer_count; + bool device_offline; + struct pqi_scsi_dev *device; + + scmd = io_request->scmd; + error_info = io_request->error_info; + host_byte = DID_OK; + sense_data_length = 0; + device_offline = false; + device = scmd->device->hostdata; + + switch (error_info->service_response) { + case PQI_AIO_SERV_RESPONSE_COMPLETE: + scsi_status = error_info->status; + break; + case PQI_AIO_SERV_RESPONSE_FAILURE: + switch (error_info->status) { + case PQI_AIO_STATUS_IO_ABORTED: + scsi_status = SAM_STAT_TASK_ABORTED; + break; + case PQI_AIO_STATUS_UNDERRUN: + scsi_status = SAM_STAT_GOOD; + residual_count = get_unaligned_le32( + &error_info->residual_count); + scsi_set_resid(scmd, residual_count); + xfer_count = scsi_bufflen(scmd) - residual_count; + if (xfer_count < scmd->underflow) + host_byte = DID_SOFT_ERROR; + break; + case PQI_AIO_STATUS_OVERRUN: + scsi_status = SAM_STAT_GOOD; + break; + case PQI_AIO_STATUS_AIO_PATH_DISABLED: + pqi_aio_path_disabled(io_request); + if (pqi_is_multipath_device(device)) { + pqi_device_remove_start(device); + host_byte = DID_NO_CONNECT; + scsi_status = SAM_STAT_CHECK_CONDITION; + } else { + scsi_status = SAM_STAT_GOOD; + io_request->status = -EAGAIN; + } + break; + case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: + case PQI_AIO_STATUS_INVALID_DEVICE: + if (!io_request->raid_bypass) { + device_offline = true; + pqi_take_device_offline(scmd->device, "AIO"); + host_byte = DID_NO_CONNECT; + } + scsi_status = SAM_STAT_CHECK_CONDITION; + break; + case PQI_AIO_STATUS_IO_ERROR: + default: + scsi_status = SAM_STAT_CHECK_CONDITION; + break; + } + break; + case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: + case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: + scsi_status = SAM_STAT_GOOD; + break; + case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: + case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: + default: + scsi_status = SAM_STAT_CHECK_CONDITION; + break; + } + + if (error_info->data_present) { + sense_data_length = + get_unaligned_le16(&error_info->data_length); + if (sense_data_length) { + if (sense_data_length > sizeof(error_info->data)) + sense_data_length = sizeof(error_info->data); + if (sense_data_length > SCSI_SENSE_BUFFERSIZE) + sense_data_length = SCSI_SENSE_BUFFERSIZE; + memcpy(scmd->sense_buffer, error_info->data, + sense_data_length); + } + } + + if (device_offline && sense_data_length == 0) + scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1); + + scmd->result = scsi_status; + set_host_byte(scmd, host_byte); +} + +static void pqi_process_io_error(unsigned int iu_type, + struct pqi_io_request *io_request) +{ + switch (iu_type) { + case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: + pqi_process_raid_io_error(io_request); + break; + case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: + pqi_process_aio_io_error(io_request); + break; + } +} + +static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, + struct pqi_task_management_response *response) +{ + int rc; + + switch (response->response_code) { + case SOP_TMF_COMPLETE: + case SOP_TMF_FUNCTION_SUCCEEDED: + rc = 0; + break; + case SOP_TMF_REJECTED: + rc = -EAGAIN; + break; + case SOP_TMF_INCORRECT_LOGICAL_UNIT: + rc = -ENODEV; + break; + default: + rc = -EIO; + break; + } + + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); + + return rc; +} + +static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) +{ + pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); +} + +static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) +{ + int num_responses; + pqi_index_t oq_pi; + pqi_index_t oq_ci; + struct pqi_io_request *io_request; + struct pqi_io_response *response; + u16 request_id; + + num_responses = 0; + oq_ci = queue_group->oq_ci_copy; + + while (1) { + oq_pi = readl(queue_group->oq_pi); + if (oq_pi >= ctrl_info->num_elements_per_oq) { + pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); + dev_err(&ctrl_info->pci_dev->dev, + "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", + oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); + return -1; + } + if (oq_pi == oq_ci) + break; + + num_responses++; + response = queue_group->oq_element_array + + (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); + + request_id = get_unaligned_le16(&response->request_id); + if (request_id >= ctrl_info->max_io_slots) { + pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); + dev_err(&ctrl_info->pci_dev->dev, + "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", + request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); + return -1; + } + + io_request = &ctrl_info->io_request_pool[request_id]; + if (atomic_read(&io_request->refcount) == 0) { + pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); + dev_err(&ctrl_info->pci_dev->dev, + "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", + request_id, oq_pi, oq_ci); + return -1; + } + + switch (response->header.iu_type) { + case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: + case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: + if (io_request->scmd) + io_request->scmd->result = 0; + fallthrough; + case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: + break; + case PQI_RESPONSE_IU_VENDOR_GENERAL: + io_request->status = + get_unaligned_le16( + &((struct pqi_vendor_general_response *)response)->status); + break; + case PQI_RESPONSE_IU_TASK_MANAGEMENT: + io_request->status = pqi_interpret_task_management_response(ctrl_info, + (void *)response); + break; + case PQI_RESPONSE_IU_AIO_PATH_DISABLED: + pqi_aio_path_disabled(io_request); + io_request->status = -EAGAIN; + break; + case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: + case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: + io_request->error_info = ctrl_info->error_buffer + + (get_unaligned_le16(&response->error_index) * + PQI_ERROR_BUFFER_ELEMENT_LENGTH); + pqi_process_io_error(response->header.iu_type, io_request); + break; + default: + pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); + dev_err(&ctrl_info->pci_dev->dev, + "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", + response->header.iu_type, oq_pi, oq_ci); + return -1; + } + + io_request->io_complete_callback(io_request, io_request->context); + + /* + * Note that the I/O request structure CANNOT BE TOUCHED after + * returning from the I/O completion callback! + */ + oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; + } + + if (num_responses) { + queue_group->oq_ci_copy = oq_ci; + writel(oq_ci, queue_group->oq_ci); + } + + return num_responses; +} + +static inline unsigned int pqi_num_elements_free(unsigned int pi, + unsigned int ci, unsigned int elements_in_queue) +{ + unsigned int num_elements_used; + + if (pi >= ci) + num_elements_used = pi - ci; + else + num_elements_used = elements_in_queue - ci + pi; + + return elements_in_queue - num_elements_used - 1; +} + +static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, + struct pqi_event_acknowledge_request *iu, size_t iu_length) +{ + pqi_index_t iq_pi; + pqi_index_t iq_ci; + unsigned long flags; + void *next_element; + struct pqi_queue_group *queue_group; + + queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; + put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); + + while (1) { + spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); + + iq_pi = queue_group->iq_pi_copy[RAID_PATH]; + iq_ci = readl(queue_group->iq_ci[RAID_PATH]); + + if (pqi_num_elements_free(iq_pi, iq_ci, + ctrl_info->num_elements_per_iq)) + break; + + spin_unlock_irqrestore( + &queue_group->submit_lock[RAID_PATH], flags); + + if (pqi_ctrl_offline(ctrl_info)) + return; + } + + next_element = queue_group->iq_element_array[RAID_PATH] + + (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + + memcpy(next_element, iu, iu_length); + + iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; + queue_group->iq_pi_copy[RAID_PATH] = iq_pi; + + /* + * This write notifies the controller that an IU is available to be + * processed. + */ + writel(iq_pi, queue_group->iq_pi[RAID_PATH]); + + spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); +} + +static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, + struct pqi_event *event) +{ + struct pqi_event_acknowledge_request request; + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, + &request.header.iu_length); + request.event_type = event->event_type; + put_unaligned_le16(event->event_id, &request.event_id); + put_unaligned_le32(event->additional_event_id, &request.additional_event_id); + + pqi_send_event_ack(ctrl_info, &request, sizeof(request)); +} + +#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 +#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 + +static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( + struct pqi_ctrl_info *ctrl_info) +{ + u8 status; + unsigned long timeout; + + timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies; + + while (1) { + status = pqi_read_soft_reset_status(ctrl_info); + if (status & PQI_SOFT_RESET_INITIATE) + return RESET_INITIATE_DRIVER; + + if (status & PQI_SOFT_RESET_ABORT) + return RESET_ABORT; + + if (!sis_is_firmware_running(ctrl_info)) + return RESET_NORESPONSE; + + if (time_after(jiffies, timeout)) { + dev_warn(&ctrl_info->pci_dev->dev, + "timed out waiting for soft reset status\n"); + return RESET_TIMEDOUT; + } + + ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); + } +} + +static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned int delay_secs; + enum pqi_soft_reset_status reset_status; + + if (ctrl_info->soft_reset_handshake_supported) + reset_status = pqi_poll_for_soft_reset_status(ctrl_info); + else + reset_status = RESET_INITIATE_FIRMWARE; + + delay_secs = PQI_POST_RESET_DELAY_SECS; + + switch (reset_status) { + case RESET_TIMEDOUT: + delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; + fallthrough; + case RESET_INITIATE_DRIVER: + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation: resetting controller\n"); + sis_soft_reset(ctrl_info); + fallthrough; + case RESET_INITIATE_FIRMWARE: + ctrl_info->pqi_mode_enabled = false; + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); + rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); + pqi_ofa_free_host_buffer(ctrl_info); + pqi_ctrl_ofa_done(ctrl_info); + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation: %s\n", + rc == 0 ? "SUCCESS" : "FAILED"); + break; + case RESET_ABORT: + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation ABORTED\n"); + if (ctrl_info->soft_reset_handshake_supported) + pqi_clear_soft_reset_status(ctrl_info); + pqi_ofa_free_host_buffer(ctrl_info); + pqi_ctrl_ofa_done(ctrl_info); + pqi_ofa_ctrl_unquiesce(ctrl_info); + break; + case RESET_NORESPONSE: + fallthrough; + default: + dev_err(&ctrl_info->pci_dev->dev, + "unexpected Online Firmware Activation reset status: 0x%x\n", + reset_status); + pqi_ofa_free_host_buffer(ctrl_info); + pqi_ctrl_ofa_done(ctrl_info); + pqi_ofa_ctrl_unquiesce(ctrl_info); + pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); + break; + } +} + +static void pqi_ofa_memory_alloc_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); + + pqi_ctrl_ofa_start(ctrl_info); + pqi_ofa_setup_host_buffer(ctrl_info); + pqi_ofa_host_memory_update(ctrl_info); +} + +static void pqi_ofa_quiesce_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + struct pqi_event *event; + + ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); + + event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; + + pqi_ofa_ctrl_quiesce(ctrl_info); + pqi_acknowledge_event(ctrl_info, event); + pqi_process_soft_reset(ctrl_info); +} + +static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, + struct pqi_event *event) +{ + bool ack_event; + + ack_event = true; + + switch (event->event_id) { + case PQI_EVENT_OFA_MEMORY_ALLOCATION: + dev_info(&ctrl_info->pci_dev->dev, + "received Online Firmware Activation memory allocation request\n"); + schedule_work(&ctrl_info->ofa_memory_alloc_work); + break; + case PQI_EVENT_OFA_QUIESCE: + dev_info(&ctrl_info->pci_dev->dev, + "received Online Firmware Activation quiesce request\n"); + schedule_work(&ctrl_info->ofa_quiesce_work); + ack_event = false; + break; + case PQI_EVENT_OFA_CANCELED: + dev_info(&ctrl_info->pci_dev->dev, + "received Online Firmware Activation cancel request: reason: %u\n", + ctrl_info->ofa_cancel_reason); + pqi_ofa_free_host_buffer(ctrl_info); + pqi_ctrl_ofa_done(ctrl_info); + break; + default: + dev_err(&ctrl_info->pci_dev->dev, + "received unknown Online Firmware Activation request: event ID: %u\n", + event->event_id); + break; + } + + return ack_event; +} + +static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) +{ + unsigned long flags; + struct pqi_scsi_dev *device; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) + if (device->raid_bypass_enabled) + device->raid_bypass_enabled = false; + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +} + +static void pqi_event_worker(struct work_struct *work) +{ + unsigned int i; + bool rescan_needed; + struct pqi_ctrl_info *ctrl_info; + struct pqi_event *event; + bool ack_event; + + ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); + + pqi_ctrl_busy(ctrl_info); + pqi_wait_if_ctrl_blocked(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + goto out; + + rescan_needed = false; + event = ctrl_info->events; + for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { + if (event->pending) { + event->pending = false; + if (event->event_type == PQI_EVENT_TYPE_OFA) { + ack_event = pqi_ofa_process_event(ctrl_info, event); + } else { + ack_event = true; + rescan_needed = true; + if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) + ctrl_info->logical_volume_rescan_needed = true; + else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) + pqi_disable_raid_bypass(ctrl_info); + } + if (ack_event) + pqi_acknowledge_event(ctrl_info, event); + } + event++; + } + +#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ) + + if (rescan_needed) + pqi_schedule_rescan_worker_with_delay(ctrl_info, + PQI_RESCAN_WORK_FOR_EVENT_DELAY); + +out: + pqi_ctrl_unbusy(ctrl_info); +} + +#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) + +static void pqi_heartbeat_timer_handler(struct timer_list *t) +{ + int num_interrupts; + u32 heartbeat_count; + struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); + + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return; + + num_interrupts = atomic_read(&ctrl_info->num_interrupts); + heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); + + if (num_interrupts == ctrl_info->previous_num_interrupts) { + if (heartbeat_count == ctrl_info->previous_heartbeat_count) { + dev_err(&ctrl_info->pci_dev->dev, + "no heartbeat detected - last heartbeat count: %u\n", + heartbeat_count); + pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); + return; + } + } else { + ctrl_info->previous_num_interrupts = num_interrupts; + } + + ctrl_info->previous_heartbeat_count = heartbeat_count; + mod_timer(&ctrl_info->heartbeat_timer, + jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); +} + +static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) +{ + if (!ctrl_info->heartbeat_counter) + return; + + ctrl_info->previous_num_interrupts = + atomic_read(&ctrl_info->num_interrupts); + ctrl_info->previous_heartbeat_count = + pqi_read_heartbeat_counter(ctrl_info); + + ctrl_info->heartbeat_timer.expires = + jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; + add_timer(&ctrl_info->heartbeat_timer); +} + +static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) +{ + del_timer_sync(&ctrl_info->heartbeat_timer); +} + +static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, + struct pqi_event *event, struct pqi_event_response *response) +{ + switch (event->event_id) { + case PQI_EVENT_OFA_MEMORY_ALLOCATION: + ctrl_info->ofa_bytes_requested = + get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); + break; + case PQI_EVENT_OFA_CANCELED: + ctrl_info->ofa_cancel_reason = + get_unaligned_le16(&response->data.ofa_cancelled.reason); + break; + } +} + +static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) +{ + int num_events; + pqi_index_t oq_pi; + pqi_index_t oq_ci; + struct pqi_event_queue *event_queue; + struct pqi_event_response *response; + struct pqi_event *event; + int event_index; + + event_queue = &ctrl_info->event_queue; + num_events = 0; + oq_ci = event_queue->oq_ci_copy; + + while (1) { + oq_pi = readl(event_queue->oq_pi); + if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { + pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); + dev_err(&ctrl_info->pci_dev->dev, + "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", + oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); + return -1; + } + + if (oq_pi == oq_ci) + break; + + num_events++; + response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); + + event_index = pqi_event_type_to_event_index(response->event_type); + + if (event_index >= 0 && response->request_acknowledge) { + event = &ctrl_info->events[event_index]; + event->pending = true; + event->event_type = response->event_type; + event->event_id = get_unaligned_le16(&response->event_id); + event->additional_event_id = + get_unaligned_le32(&response->additional_event_id); + if (event->event_type == PQI_EVENT_TYPE_OFA) + pqi_ofa_capture_event_payload(ctrl_info, event, response); + } + + oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; + } + + if (num_events) { + event_queue->oq_ci_copy = oq_ci; + writel(oq_ci, event_queue->oq_ci); + schedule_work(&ctrl_info->event_work); + } + + return num_events; +} + +#define PQI_LEGACY_INTX_MASK 0x1 + +static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) +{ + u32 intx_mask; + struct pqi_device_registers __iomem *pqi_registers; + volatile void __iomem *register_addr; + + pqi_registers = ctrl_info->pqi_registers; + + if (enable_intx) + register_addr = &pqi_registers->legacy_intx_mask_clear; + else + register_addr = &pqi_registers->legacy_intx_mask_set; + + intx_mask = readl(register_addr); + intx_mask |= PQI_LEGACY_INTX_MASK; + writel(intx_mask, register_addr); +} + +static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, + enum pqi_irq_mode new_mode) +{ + switch (ctrl_info->irq_mode) { + case IRQ_MODE_MSIX: + switch (new_mode) { + case IRQ_MODE_MSIX: + break; + case IRQ_MODE_INTX: + pqi_configure_legacy_intx(ctrl_info, true); + sis_enable_intx(ctrl_info); + break; + case IRQ_MODE_NONE: + break; + } + break; + case IRQ_MODE_INTX: + switch (new_mode) { + case IRQ_MODE_MSIX: + pqi_configure_legacy_intx(ctrl_info, false); + sis_enable_msix(ctrl_info); + break; + case IRQ_MODE_INTX: + break; + case IRQ_MODE_NONE: + pqi_configure_legacy_intx(ctrl_info, false); + break; + } + break; + case IRQ_MODE_NONE: + switch (new_mode) { + case IRQ_MODE_MSIX: + sis_enable_msix(ctrl_info); + break; + case IRQ_MODE_INTX: + pqi_configure_legacy_intx(ctrl_info, true); + sis_enable_intx(ctrl_info); + break; + case IRQ_MODE_NONE: + break; + } + break; + } + + ctrl_info->irq_mode = new_mode; +} + +#define PQI_LEGACY_INTX_PENDING 0x1 + +static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) +{ + bool valid_irq; + u32 intx_status; + + switch (ctrl_info->irq_mode) { + case IRQ_MODE_MSIX: + valid_irq = true; + break; + case IRQ_MODE_INTX: + intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); + if (intx_status & PQI_LEGACY_INTX_PENDING) + valid_irq = true; + else + valid_irq = false; + break; + case IRQ_MODE_NONE: + default: + valid_irq = false; + break; + } + + return valid_irq; +} + +static irqreturn_t pqi_irq_handler(int irq, void *data) +{ + struct pqi_ctrl_info *ctrl_info; + struct pqi_queue_group *queue_group; + int num_io_responses_handled; + int num_events_handled; + + queue_group = data; + ctrl_info = queue_group->ctrl_info; + + if (!pqi_is_valid_irq(ctrl_info)) + return IRQ_NONE; + + num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); + if (num_io_responses_handled < 0) + goto out; + + if (irq == ctrl_info->event_irq) { + num_events_handled = pqi_process_event_intr(ctrl_info); + if (num_events_handled < 0) + goto out; + } else { + num_events_handled = 0; + } + + if (num_io_responses_handled + num_events_handled > 0) + atomic_inc(&ctrl_info->num_interrupts); + + pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); + pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); + +out: + return IRQ_HANDLED; +} + +static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) +{ + struct pci_dev *pci_dev = ctrl_info->pci_dev; + int i; + int rc; + + ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); + + for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { + rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, + DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); + if (rc) { + dev_err(&pci_dev->dev, + "irq %u init failed with error %d\n", + pci_irq_vector(pci_dev, i), rc); + return rc; + } + ctrl_info->num_msix_vectors_initialized++; + } + + return 0; +} + +static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) +{ + int i; + + for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) + free_irq(pci_irq_vector(ctrl_info->pci_dev, i), + &ctrl_info->queue_groups[i]); + + ctrl_info->num_msix_vectors_initialized = 0; +} + +static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) +{ + int num_vectors_enabled; + unsigned int flags = PCI_IRQ_MSIX; + + if (!pqi_disable_managed_interrupts) + flags |= PCI_IRQ_AFFINITY; + + num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, + PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, + flags); + if (num_vectors_enabled < 0) { + dev_err(&ctrl_info->pci_dev->dev, + "MSI-X init failed with error %d\n", + num_vectors_enabled); + return num_vectors_enabled; + } + + ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; + ctrl_info->irq_mode = IRQ_MODE_MSIX; + return 0; +} + +static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) +{ + if (ctrl_info->num_msix_vectors_enabled) { + pci_free_irq_vectors(ctrl_info->pci_dev); + ctrl_info->num_msix_vectors_enabled = 0; + } +} + +static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + size_t alloc_length; + size_t element_array_length_per_iq; + size_t element_array_length_per_oq; + void *element_array; + void __iomem *next_queue_index; + void *aligned_pointer; + unsigned int num_inbound_queues; + unsigned int num_outbound_queues; + unsigned int num_queue_indexes; + struct pqi_queue_group *queue_group; + + element_array_length_per_iq = + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * + ctrl_info->num_elements_per_iq; + element_array_length_per_oq = + PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * + ctrl_info->num_elements_per_oq; + num_inbound_queues = ctrl_info->num_queue_groups * 2; + num_outbound_queues = ctrl_info->num_queue_groups; + num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; + + aligned_pointer = NULL; + + for (i = 0; i < num_inbound_queues; i++) { + aligned_pointer = PTR_ALIGN(aligned_pointer, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + aligned_pointer += element_array_length_per_iq; + } + + for (i = 0; i < num_outbound_queues; i++) { + aligned_pointer = PTR_ALIGN(aligned_pointer, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + aligned_pointer += element_array_length_per_oq; + } + + aligned_pointer = PTR_ALIGN(aligned_pointer, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * + PQI_EVENT_OQ_ELEMENT_LENGTH; + + for (i = 0; i < num_queue_indexes; i++) { + aligned_pointer = PTR_ALIGN(aligned_pointer, + PQI_OPERATIONAL_INDEX_ALIGNMENT); + aligned_pointer += sizeof(pqi_index_t); + } + + alloc_length = (size_t)aligned_pointer + + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; + + alloc_length += PQI_EXTRA_SGL_MEMORY; + + ctrl_info->queue_memory_base = + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, + &ctrl_info->queue_memory_base_dma_handle, + GFP_KERNEL); + + if (!ctrl_info->queue_memory_base) + return -ENOMEM; + + ctrl_info->queue_memory_length = alloc_length; + + element_array = PTR_ALIGN(ctrl_info->queue_memory_base, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + queue_group->iq_element_array[RAID_PATH] = element_array; + queue_group->iq_element_array_bus_addr[RAID_PATH] = + ctrl_info->queue_memory_base_dma_handle + + (element_array - ctrl_info->queue_memory_base); + element_array += element_array_length_per_iq; + element_array = PTR_ALIGN(element_array, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + queue_group->iq_element_array[AIO_PATH] = element_array; + queue_group->iq_element_array_bus_addr[AIO_PATH] = + ctrl_info->queue_memory_base_dma_handle + + (element_array - ctrl_info->queue_memory_base); + element_array += element_array_length_per_iq; + element_array = PTR_ALIGN(element_array, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + } + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + queue_group->oq_element_array = element_array; + queue_group->oq_element_array_bus_addr = + ctrl_info->queue_memory_base_dma_handle + + (element_array - ctrl_info->queue_memory_base); + element_array += element_array_length_per_oq; + element_array = PTR_ALIGN(element_array, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + } + + ctrl_info->event_queue.oq_element_array = element_array; + ctrl_info->event_queue.oq_element_array_bus_addr = + ctrl_info->queue_memory_base_dma_handle + + (element_array - ctrl_info->queue_memory_base); + element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * + PQI_EVENT_OQ_ELEMENT_LENGTH; + + next_queue_index = (void __iomem *)PTR_ALIGN(element_array, + PQI_OPERATIONAL_INDEX_ALIGNMENT); + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + queue_group->iq_ci[RAID_PATH] = next_queue_index; + queue_group->iq_ci_bus_addr[RAID_PATH] = + ctrl_info->queue_memory_base_dma_handle + + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); + next_queue_index += sizeof(pqi_index_t); + next_queue_index = PTR_ALIGN(next_queue_index, + PQI_OPERATIONAL_INDEX_ALIGNMENT); + queue_group->iq_ci[AIO_PATH] = next_queue_index; + queue_group->iq_ci_bus_addr[AIO_PATH] = + ctrl_info->queue_memory_base_dma_handle + + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); + next_queue_index += sizeof(pqi_index_t); + next_queue_index = PTR_ALIGN(next_queue_index, + PQI_OPERATIONAL_INDEX_ALIGNMENT); + queue_group->oq_pi = next_queue_index; + queue_group->oq_pi_bus_addr = + ctrl_info->queue_memory_base_dma_handle + + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); + next_queue_index += sizeof(pqi_index_t); + next_queue_index = PTR_ALIGN(next_queue_index, + PQI_OPERATIONAL_INDEX_ALIGNMENT); + } + + ctrl_info->event_queue.oq_pi = next_queue_index; + ctrl_info->event_queue.oq_pi_bus_addr = + ctrl_info->queue_memory_base_dma_handle + + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); + + return 0; +} + +static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; + u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; + + /* + * Initialize the backpointers to the controller structure in + * each operational queue group structure. + */ + for (i = 0; i < ctrl_info->num_queue_groups; i++) + ctrl_info->queue_groups[i].ctrl_info = ctrl_info; + + /* + * Assign IDs to all operational queues. Note that the IDs + * assigned to operational IQs are independent of the IDs + * assigned to operational OQs. + */ + ctrl_info->event_queue.oq_id = next_oq_id++; + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; + ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; + ctrl_info->queue_groups[i].oq_id = next_oq_id++; + } + + /* + * Assign MSI-X table entry indexes to all queues. Note that the + * interrupt for the event queue is shared with the first queue group. + */ + ctrl_info->event_queue.int_msg_num = 0; + for (i = 0; i < ctrl_info->num_queue_groups; i++) + ctrl_info->queue_groups[i].int_msg_num = i; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); + spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); + INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); + INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); + } +} + +static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) +{ + size_t alloc_length; + struct pqi_admin_queues_aligned *admin_queues_aligned; + struct pqi_admin_queues *admin_queues; + + alloc_length = sizeof(struct pqi_admin_queues_aligned) + + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; + + ctrl_info->admin_queue_memory_base = + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, + &ctrl_info->admin_queue_memory_base_dma_handle, + GFP_KERNEL); + + if (!ctrl_info->admin_queue_memory_base) + return -ENOMEM; + + ctrl_info->admin_queue_memory_length = alloc_length; + + admin_queues = &ctrl_info->admin_queues; + admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); + admin_queues->iq_element_array = + &admin_queues_aligned->iq_element_array; + admin_queues->oq_element_array = + &admin_queues_aligned->oq_element_array; + admin_queues->iq_ci = + (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; + admin_queues->oq_pi = + (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; + + admin_queues->iq_element_array_bus_addr = + ctrl_info->admin_queue_memory_base_dma_handle + + (admin_queues->iq_element_array - + ctrl_info->admin_queue_memory_base); + admin_queues->oq_element_array_bus_addr = + ctrl_info->admin_queue_memory_base_dma_handle + + (admin_queues->oq_element_array - + ctrl_info->admin_queue_memory_base); + admin_queues->iq_ci_bus_addr = + ctrl_info->admin_queue_memory_base_dma_handle + + ((void __iomem *)admin_queues->iq_ci - + (void __iomem *)ctrl_info->admin_queue_memory_base); + admin_queues->oq_pi_bus_addr = + ctrl_info->admin_queue_memory_base_dma_handle + + ((void __iomem *)admin_queues->oq_pi - + (void __iomem *)ctrl_info->admin_queue_memory_base); + + return 0; +} + +#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ +#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 + +static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) +{ + struct pqi_device_registers __iomem *pqi_registers; + struct pqi_admin_queues *admin_queues; + unsigned long timeout; + u8 status; + u32 reg; + + pqi_registers = ctrl_info->pqi_registers; + admin_queues = &ctrl_info->admin_queues; + + writeq((u64)admin_queues->iq_element_array_bus_addr, + &pqi_registers->admin_iq_element_array_addr); + writeq((u64)admin_queues->oq_element_array_bus_addr, + &pqi_registers->admin_oq_element_array_addr); + writeq((u64)admin_queues->iq_ci_bus_addr, + &pqi_registers->admin_iq_ci_addr); + writeq((u64)admin_queues->oq_pi_bus_addr, + &pqi_registers->admin_oq_pi_addr); + + reg = PQI_ADMIN_IQ_NUM_ELEMENTS | + (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | + (admin_queues->int_msg_num << 16); + writel(reg, &pqi_registers->admin_iq_num_elements); + + writel(PQI_CREATE_ADMIN_QUEUE_PAIR, + &pqi_registers->function_and_status_code); + + timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; + while (1) { + msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); + status = readb(&pqi_registers->function_and_status_code); + if (status == PQI_STATUS_IDLE) + break; + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + } + + /* + * The offset registers are not initialized to the correct + * offsets until *after* the create admin queue pair command + * completes successfully. + */ + admin_queues->iq_pi = ctrl_info->iomem_base + + PQI_DEVICE_REGISTERS_OFFSET + + readq(&pqi_registers->admin_iq_pi_offset); + admin_queues->oq_ci = ctrl_info->iomem_base + + PQI_DEVICE_REGISTERS_OFFSET + + readq(&pqi_registers->admin_oq_ci_offset); + + return 0; +} + +static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, + struct pqi_general_admin_request *request) +{ + struct pqi_admin_queues *admin_queues; + void *next_element; + pqi_index_t iq_pi; + + admin_queues = &ctrl_info->admin_queues; + iq_pi = admin_queues->iq_pi_copy; + + next_element = admin_queues->iq_element_array + + (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); + + memcpy(next_element, request, sizeof(*request)); + + iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; + admin_queues->iq_pi_copy = iq_pi; + + /* + * This write notifies the controller that an IU is available to be + * processed. + */ + writel(iq_pi, admin_queues->iq_pi); +} + +#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 + +static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, + struct pqi_general_admin_response *response) +{ + struct pqi_admin_queues *admin_queues; + pqi_index_t oq_pi; + pqi_index_t oq_ci; + unsigned long timeout; + + admin_queues = &ctrl_info->admin_queues; + oq_ci = admin_queues->oq_ci_copy; + + timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; + + while (1) { + oq_pi = readl(admin_queues->oq_pi); + if (oq_pi != oq_ci) + break; + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for admin response\n"); + return -ETIMEDOUT; + } + if (!sis_is_firmware_running(ctrl_info)) + return -ENXIO; + usleep_range(1000, 2000); + } + + memcpy(response, admin_queues->oq_element_array + + (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); + + oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; + admin_queues->oq_ci_copy = oq_ci; + writel(oq_ci, admin_queues->oq_ci); + + return 0; +} + +static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, + struct pqi_queue_group *queue_group, enum pqi_io_path path, + struct pqi_io_request *io_request) +{ + struct pqi_io_request *next; + void *next_element; + pqi_index_t iq_pi; + pqi_index_t iq_ci; + size_t iu_length; + unsigned long flags; + unsigned int num_elements_needed; + unsigned int num_elements_to_end_of_queue; + size_t copy_count; + struct pqi_iu_header *request; + + spin_lock_irqsave(&queue_group->submit_lock[path], flags); + + if (io_request) { + io_request->queue_group = queue_group; + list_add_tail(&io_request->request_list_entry, + &queue_group->request_list[path]); + } + + iq_pi = queue_group->iq_pi_copy[path]; + + list_for_each_entry_safe(io_request, next, + &queue_group->request_list[path], request_list_entry) { + + request = io_request->iu; + + iu_length = get_unaligned_le16(&request->iu_length) + + PQI_REQUEST_HEADER_LENGTH; + num_elements_needed = + DIV_ROUND_UP(iu_length, + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + + iq_ci = readl(queue_group->iq_ci[path]); + + if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, + ctrl_info->num_elements_per_iq)) + break; + + put_unaligned_le16(queue_group->oq_id, + &request->response_queue_id); + + next_element = queue_group->iq_element_array[path] + + (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + + num_elements_to_end_of_queue = + ctrl_info->num_elements_per_iq - iq_pi; + + if (num_elements_needed <= num_elements_to_end_of_queue) { + memcpy(next_element, request, iu_length); + } else { + copy_count = num_elements_to_end_of_queue * + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; + memcpy(next_element, request, copy_count); + memcpy(queue_group->iq_element_array[path], + (u8 *)request + copy_count, + iu_length - copy_count); + } + + iq_pi = (iq_pi + num_elements_needed) % + ctrl_info->num_elements_per_iq; + + list_del(&io_request->request_list_entry); + } + + if (iq_pi != queue_group->iq_pi_copy[path]) { + queue_group->iq_pi_copy[path] = iq_pi; + /* + * This write notifies the controller that one or more IUs are + * available to be processed. + */ + writel(iq_pi, queue_group->iq_pi[path]); + } + + spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); +} + +#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 + +static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, + struct completion *wait) +{ + int rc; + + while (1) { + if (wait_for_completion_io_timeout(wait, + PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { + rc = 0; + break; + } + + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) { + rc = -ENXIO; + break; + } + } + + return rc; +} + +static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, + void *context) +{ + struct completion *waiting = context; + + complete(waiting); +} + +static int pqi_process_raid_io_error_synchronous( + struct pqi_raid_error_info *error_info) +{ + int rc = -EIO; + + switch (error_info->data_out_result) { + case PQI_DATA_IN_OUT_GOOD: + if (error_info->status == SAM_STAT_GOOD) + rc = 0; + break; + case PQI_DATA_IN_OUT_UNDERFLOW: + if (error_info->status == SAM_STAT_GOOD || + error_info->status == SAM_STAT_CHECK_CONDITION) + rc = 0; + break; + case PQI_DATA_IN_OUT_ABORTED: + rc = PQI_CMD_STATUS_ABORTED; + break; + } + + return rc; +} + +static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) +{ + return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; +} + +static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, + struct pqi_iu_header *request, unsigned int flags, + struct pqi_raid_error_info *error_info) +{ + int rc = 0; + struct pqi_io_request *io_request; + size_t iu_length; + DECLARE_COMPLETION_ONSTACK(wait); + + if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { + if (down_interruptible(&ctrl_info->sync_request_sem)) + return -ERESTARTSYS; + } else { + down(&ctrl_info->sync_request_sem); + } + + pqi_ctrl_busy(ctrl_info); + /* + * Wait for other admin queue updates such as; + * config table changes, OFA memory updates, ... + */ + if (pqi_is_blockable_request(request)) + pqi_wait_if_ctrl_blocked(ctrl_info); + + if (pqi_ctrl_offline(ctrl_info)) { + rc = -ENXIO; + goto out; + } + + io_request = pqi_alloc_io_request(ctrl_info, NULL); + + put_unaligned_le16(io_request->index, + &(((struct pqi_raid_path_request *)request)->request_id)); + + if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) + ((struct pqi_raid_path_request *)request)->error_index = + ((struct pqi_raid_path_request *)request)->request_id; + + iu_length = get_unaligned_le16(&request->iu_length) + + PQI_REQUEST_HEADER_LENGTH; + memcpy(io_request->iu, request, iu_length); + + io_request->io_complete_callback = pqi_raid_synchronous_complete; + io_request->context = &wait; + + pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, + io_request); + + pqi_wait_for_completion_io(ctrl_info, &wait); + + if (error_info) { + if (io_request->error_info) + memcpy(error_info, io_request->error_info, sizeof(*error_info)); + else + memset(error_info, 0, sizeof(*error_info)); + } else if (rc == 0 && io_request->error_info) { + rc = pqi_process_raid_io_error_synchronous(io_request->error_info); + } + + pqi_free_io_request(io_request); + +out: + pqi_ctrl_unbusy(ctrl_info); + up(&ctrl_info->sync_request_sem); + + return rc; +} + +static int pqi_validate_admin_response( + struct pqi_general_admin_response *response, u8 expected_function_code) +{ + if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) + return -EINVAL; + + if (get_unaligned_le16(&response->header.iu_length) != + PQI_GENERAL_ADMIN_IU_LENGTH) + return -EINVAL; + + if (response->function_code != expected_function_code) + return -EINVAL; + + if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) + return -EINVAL; + + return 0; +} + +static int pqi_submit_admin_request_synchronous( + struct pqi_ctrl_info *ctrl_info, + struct pqi_general_admin_request *request, + struct pqi_general_admin_response *response) +{ + int rc; + + pqi_submit_admin_request(ctrl_info, request); + + rc = pqi_poll_for_admin_response(ctrl_info, response); + + if (rc == 0) + rc = pqi_validate_admin_response(response, request->function_code); + + return rc; +} + +static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct pqi_general_admin_request request; + struct pqi_general_admin_response response; + struct pqi_device_capability *capability; + struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; + + capability = kmalloc(sizeof(*capability), GFP_KERNEL); + if (!capability) + return -ENOMEM; + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; + put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, + &request.header.iu_length); + request.function_code = + PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; + put_unaligned_le32(sizeof(*capability), + &request.data.report_device_capability.buffer_length); + + rc = pqi_map_single(ctrl_info->pci_dev, + &request.data.report_device_capability.sg_descriptor, + capability, sizeof(*capability), + DMA_FROM_DEVICE); + if (rc) + goto out; + + rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); + + pqi_pci_unmap(ctrl_info->pci_dev, + &request.data.report_device_capability.sg_descriptor, 1, + DMA_FROM_DEVICE); + + if (rc) + goto out; + + if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { + rc = -EIO; + goto out; + } + + ctrl_info->max_inbound_queues = + get_unaligned_le16(&capability->max_inbound_queues); + ctrl_info->max_elements_per_iq = + get_unaligned_le16(&capability->max_elements_per_iq); + ctrl_info->max_iq_element_length = + get_unaligned_le16(&capability->max_iq_element_length) + * 16; + ctrl_info->max_outbound_queues = + get_unaligned_le16(&capability->max_outbound_queues); + ctrl_info->max_elements_per_oq = + get_unaligned_le16(&capability->max_elements_per_oq); + ctrl_info->max_oq_element_length = + get_unaligned_le16(&capability->max_oq_element_length) + * 16; + + sop_iu_layer_descriptor = + &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; + + ctrl_info->max_inbound_iu_length_per_firmware = + get_unaligned_le16( + &sop_iu_layer_descriptor->max_inbound_iu_length); + ctrl_info->inbound_spanning_supported = + sop_iu_layer_descriptor->inbound_spanning_supported; + ctrl_info->outbound_spanning_supported = + sop_iu_layer_descriptor->outbound_spanning_supported; + +out: + kfree(capability); + + return rc; +} + +static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) +{ + if (ctrl_info->max_iq_element_length < + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { + dev_err(&ctrl_info->pci_dev->dev, + "max. inbound queue element length of %d is less than the required length of %d\n", + ctrl_info->max_iq_element_length, + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + return -EINVAL; + } + + if (ctrl_info->max_oq_element_length < + PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { + dev_err(&ctrl_info->pci_dev->dev, + "max. outbound queue element length of %d is less than the required length of %d\n", + ctrl_info->max_oq_element_length, + PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); + return -EINVAL; + } + + if (ctrl_info->max_inbound_iu_length_per_firmware < + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { + dev_err(&ctrl_info->pci_dev->dev, + "max. inbound IU length of %u is less than the min. required length of %d\n", + ctrl_info->max_inbound_iu_length_per_firmware, + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + return -EINVAL; + } + + if (!ctrl_info->inbound_spanning_supported) { + dev_err(&ctrl_info->pci_dev->dev, + "the controller does not support inbound spanning\n"); + return -EINVAL; + } + + if (ctrl_info->outbound_spanning_supported) { + dev_err(&ctrl_info->pci_dev->dev, + "the controller supports outbound spanning but this driver does not\n"); + return -EINVAL; + } + + return 0; +} + +static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct pqi_event_queue *event_queue; + struct pqi_general_admin_request request; + struct pqi_general_admin_response response; + + event_queue = &ctrl_info->event_queue; + + /* + * Create OQ (Outbound Queue - device to host queue) to dedicate + * to events. + */ + memset(&request, 0, sizeof(request)); + request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; + put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, + &request.header.iu_length); + request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; + put_unaligned_le16(event_queue->oq_id, + &request.data.create_operational_oq.queue_id); + put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, + &request.data.create_operational_oq.element_array_addr); + put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, + &request.data.create_operational_oq.pi_addr); + put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, + &request.data.create_operational_oq.num_elements); + put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, + &request.data.create_operational_oq.element_length); + request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; + put_unaligned_le16(event_queue->int_msg_num, + &request.data.create_operational_oq.int_msg_num); + + rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, + &response); + if (rc) + return rc; + + event_queue->oq_ci = ctrl_info->iomem_base + + PQI_DEVICE_REGISTERS_OFFSET + + get_unaligned_le64( + &response.data.create_operational_oq.oq_ci_offset); + + return 0; +} + +static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, + unsigned int group_number) +{ + int rc; + struct pqi_queue_group *queue_group; + struct pqi_general_admin_request request; + struct pqi_general_admin_response response; + + queue_group = &ctrl_info->queue_groups[group_number]; + + /* + * Create IQ (Inbound Queue - host to device queue) for + * RAID path. + */ + memset(&request, 0, sizeof(request)); + request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; + put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, + &request.header.iu_length); + request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; + put_unaligned_le16(queue_group->iq_id[RAID_PATH], + &request.data.create_operational_iq.queue_id); + put_unaligned_le64( + (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], + &request.data.create_operational_iq.element_array_addr); + put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], + &request.data.create_operational_iq.ci_addr); + put_unaligned_le16(ctrl_info->num_elements_per_iq, + &request.data.create_operational_iq.num_elements); + put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, + &request.data.create_operational_iq.element_length); + request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; + + rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, + &response); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating inbound RAID queue\n"); + return rc; + } + + queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + + PQI_DEVICE_REGISTERS_OFFSET + + get_unaligned_le64( + &response.data.create_operational_iq.iq_pi_offset); + + /* + * Create IQ (Inbound Queue - host to device queue) for + * Advanced I/O (AIO) path. + */ + memset(&request, 0, sizeof(request)); + request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; + put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, + &request.header.iu_length); + request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; + put_unaligned_le16(queue_group->iq_id[AIO_PATH], + &request.data.create_operational_iq.queue_id); + put_unaligned_le64((u64)queue_group-> + iq_element_array_bus_addr[AIO_PATH], + &request.data.create_operational_iq.element_array_addr); + put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], + &request.data.create_operational_iq.ci_addr); + put_unaligned_le16(ctrl_info->num_elements_per_iq, + &request.data.create_operational_iq.num_elements); + put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, + &request.data.create_operational_iq.element_length); + request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; + + rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, + &response); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating inbound AIO queue\n"); + return rc; + } + + queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + + PQI_DEVICE_REGISTERS_OFFSET + + get_unaligned_le64( + &response.data.create_operational_iq.iq_pi_offset); + + /* + * Designate the 2nd IQ as the AIO path. By default, all IQs are + * assumed to be for RAID path I/O unless we change the queue's + * property. + */ + memset(&request, 0, sizeof(request)); + request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; + put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, + &request.header.iu_length); + request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; + put_unaligned_le16(queue_group->iq_id[AIO_PATH], + &request.data.change_operational_iq_properties.queue_id); + put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, + &request.data.change_operational_iq_properties.vendor_specific); + + rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, + &response); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error changing queue property\n"); + return rc; + } + + /* + * Create OQ (Outbound Queue - device to host queue). + */ + memset(&request, 0, sizeof(request)); + request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; + put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, + &request.header.iu_length); + request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; + put_unaligned_le16(queue_group->oq_id, + &request.data.create_operational_oq.queue_id); + put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, + &request.data.create_operational_oq.element_array_addr); + put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, + &request.data.create_operational_oq.pi_addr); + put_unaligned_le16(ctrl_info->num_elements_per_oq, + &request.data.create_operational_oq.num_elements); + put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, + &request.data.create_operational_oq.element_length); + request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; + put_unaligned_le16(queue_group->int_msg_num, + &request.data.create_operational_oq.int_msg_num); + + rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, + &response); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating outbound queue\n"); + return rc; + } + + queue_group->oq_ci = ctrl_info->iomem_base + + PQI_DEVICE_REGISTERS_OFFSET + + get_unaligned_le64( + &response.data.create_operational_oq.oq_ci_offset); + + return 0; +} + +static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned int i; + + rc = pqi_create_event_queue(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating event queue\n"); + return rc; + } + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + rc = pqi_create_queue_group(ctrl_info, i); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating queue group number %u/%u\n", + i, ctrl_info->num_queue_groups); + return rc; + } + } + + return 0; +} + +#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ + struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) + +static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, + bool enable_events) +{ + int rc; + unsigned int i; + struct pqi_event_config *event_config; + struct pqi_event_descriptor *event_descriptor; + struct pqi_general_management_request request; + + event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, + GFP_KERNEL); + if (!event_config) + return -ENOMEM; + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; + put_unaligned_le16(offsetof(struct pqi_general_management_request, + data.report_event_configuration.sg_descriptors[1]) - + PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); + put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, + &request.data.report_event_configuration.buffer_length); + + rc = pqi_map_single(ctrl_info->pci_dev, + request.data.report_event_configuration.sg_descriptors, + event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, + DMA_FROM_DEVICE); + if (rc) + goto out; + + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); + + pqi_pci_unmap(ctrl_info->pci_dev, + request.data.report_event_configuration.sg_descriptors, 1, + DMA_FROM_DEVICE); + + if (rc) + goto out; + + for (i = 0; i < event_config->num_event_descriptors; i++) { + event_descriptor = &event_config->descriptors[i]; + if (enable_events && + pqi_is_supported_event(event_descriptor->event_type)) + put_unaligned_le16(ctrl_info->event_queue.oq_id, + &event_descriptor->oq_id); + else + put_unaligned_le16(0, &event_descriptor->oq_id); + } + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; + put_unaligned_le16(offsetof(struct pqi_general_management_request, + data.report_event_configuration.sg_descriptors[1]) - + PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); + put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, + &request.data.report_event_configuration.buffer_length); + + rc = pqi_map_single(ctrl_info->pci_dev, + request.data.report_event_configuration.sg_descriptors, + event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, + DMA_TO_DEVICE); + if (rc) + goto out; + + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); + + pqi_pci_unmap(ctrl_info->pci_dev, + request.data.report_event_configuration.sg_descriptors, 1, + DMA_TO_DEVICE); + +out: + kfree(event_config); + + return rc; +} + +static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) +{ + return pqi_configure_events(ctrl_info, true); +} + +static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct device *dev; + size_t sg_chain_buffer_length; + struct pqi_io_request *io_request; + + if (!ctrl_info->io_request_pool) + return; + + dev = &ctrl_info->pci_dev->dev; + sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; + io_request = ctrl_info->io_request_pool; + + for (i = 0; i < ctrl_info->max_io_slots; i++) { + kfree(io_request->iu); + if (!io_request->sg_chain_buffer) + break; + dma_free_coherent(dev, sg_chain_buffer_length, + io_request->sg_chain_buffer, + io_request->sg_chain_buffer_dma_handle); + io_request++; + } + + kfree(ctrl_info->io_request_pool); + ctrl_info->io_request_pool = NULL; +} + +static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, + ctrl_info->error_buffer_length, + &ctrl_info->error_buffer_dma_handle, + GFP_KERNEL); + if (!ctrl_info->error_buffer) + return -ENOMEM; + + return 0; +} + +static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + void *sg_chain_buffer; + size_t sg_chain_buffer_length; + dma_addr_t sg_chain_buffer_dma_handle; + struct device *dev; + struct pqi_io_request *io_request; + + ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, + sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); + + if (!ctrl_info->io_request_pool) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate I/O request pool\n"); + goto error; + } + + dev = &ctrl_info->pci_dev->dev; + sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; + io_request = ctrl_info->io_request_pool; + + for (i = 0; i < ctrl_info->max_io_slots; i++) { + io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); + + if (!io_request->iu) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate IU buffers\n"); + goto error; + } + + sg_chain_buffer = dma_alloc_coherent(dev, + sg_chain_buffer_length, &sg_chain_buffer_dma_handle, + GFP_KERNEL); + + if (!sg_chain_buffer) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate PQI scatter-gather chain buffers\n"); + goto error; + } + + io_request->index = i; + io_request->sg_chain_buffer = sg_chain_buffer; + io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; + io_request++; + } + + return 0; + +error: + pqi_free_all_io_requests(ctrl_info); + + return -ENOMEM; +} + +/* + * Calculate required resources that are sized based on max. outstanding + * requests and max. transfer size. + */ + +static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) +{ + u32 max_transfer_size; + u32 max_sg_entries; + + ctrl_info->scsi_ml_can_queue = + ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; + ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; + + ctrl_info->error_buffer_length = + ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; + + if (reset_devices) + max_transfer_size = min(ctrl_info->max_transfer_size, + PQI_MAX_TRANSFER_SIZE_KDUMP); + else + max_transfer_size = min(ctrl_info->max_transfer_size, + PQI_MAX_TRANSFER_SIZE); + + max_sg_entries = max_transfer_size / PAGE_SIZE; + + /* +1 to cover when the buffer is not page-aligned. */ + max_sg_entries++; + + max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); + + max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; + + ctrl_info->sg_chain_buffer_length = + (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + + PQI_EXTRA_SGL_MEMORY; + ctrl_info->sg_tablesize = max_sg_entries; + ctrl_info->max_sectors = max_transfer_size / 512; +} + +static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) +{ + int num_queue_groups; + u16 num_elements_per_iq; + u16 num_elements_per_oq; + + if (reset_devices) { + num_queue_groups = 1; + } else { + int num_cpus; + int max_queue_groups; + + max_queue_groups = min(ctrl_info->max_inbound_queues / 2, + ctrl_info->max_outbound_queues - 1); + max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); + + num_cpus = num_online_cpus(); + num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); + num_queue_groups = min(num_queue_groups, max_queue_groups); + } + + ctrl_info->num_queue_groups = num_queue_groups; + + /* + * Make sure that the max. inbound IU length is an even multiple + * of our inbound element length. + */ + ctrl_info->max_inbound_iu_length = + (ctrl_info->max_inbound_iu_length_per_firmware / + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; + + num_elements_per_iq = + (ctrl_info->max_inbound_iu_length / + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + + /* Add one because one element in each queue is unusable. */ + num_elements_per_iq++; + + num_elements_per_iq = min(num_elements_per_iq, + ctrl_info->max_elements_per_iq); + + num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; + num_elements_per_oq = min(num_elements_per_oq, + ctrl_info->max_elements_per_oq); + + ctrl_info->num_elements_per_iq = num_elements_per_iq; + ctrl_info->num_elements_per_oq = num_elements_per_oq; + + ctrl_info->max_sg_per_iu = + ((ctrl_info->max_inbound_iu_length - + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / + sizeof(struct pqi_sg_descriptor)) + + PQI_MAX_EMBEDDED_SG_DESCRIPTORS; + + ctrl_info->max_sg_per_r56_iu = + ((ctrl_info->max_inbound_iu_length - + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / + sizeof(struct pqi_sg_descriptor)) + + PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; +} + +static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, + struct scatterlist *sg) +{ + u64 address = (u64)sg_dma_address(sg); + unsigned int length = sg_dma_len(sg); + + put_unaligned_le64(address, &sg_descriptor->address); + put_unaligned_le32(length, &sg_descriptor->length); + put_unaligned_le32(0, &sg_descriptor->flags); +} + +static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, + struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, + int max_sg_per_iu, bool *chained) +{ + int i; + unsigned int num_sg_in_iu; + + *chained = false; + i = 0; + num_sg_in_iu = 0; + max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ + + while (1) { + pqi_set_sg_descriptor(sg_descriptor, sg); + if (!*chained) + num_sg_in_iu++; + i++; + if (i == sg_count) + break; + sg_descriptor++; + if (i == max_sg_per_iu) { + put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, + &sg_descriptor->address); + put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), + &sg_descriptor->length); + put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); + *chained = true; + num_sg_in_iu++; + sg_descriptor = io_request->sg_chain_buffer; + } + sg = sg_next(sg); + } + + put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); + + return num_sg_in_iu; +} + +static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, + struct pqi_io_request *io_request) +{ + u16 iu_length; + int sg_count; + bool chained; + unsigned int num_sg_in_iu; + struct scatterlist *sg; + struct pqi_sg_descriptor *sg_descriptor; + + sg_count = scsi_dma_map(scmd); + if (sg_count < 0) + return sg_count; + + iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - + PQI_REQUEST_HEADER_LENGTH; + + if (sg_count == 0) + goto out; + + sg = scsi_sglist(scmd); + sg_descriptor = request->sg_descriptors; + + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_iu, &chained); + + request->partial = chained; + iu_length += num_sg_in_iu * sizeof(*sg_descriptor); + +out: + put_unaligned_le16(iu_length, &request->header.iu_length); + + return 0; +} + +static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, + struct pqi_io_request *io_request) +{ + u16 iu_length; + int sg_count; + bool chained; + unsigned int num_sg_in_iu; + struct scatterlist *sg; + struct pqi_sg_descriptor *sg_descriptor; + + sg_count = scsi_dma_map(scmd); + if (sg_count < 0) + return sg_count; + + iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - + PQI_REQUEST_HEADER_LENGTH; + num_sg_in_iu = 0; + + if (sg_count == 0) + goto out; + + sg = scsi_sglist(scmd); + sg_descriptor = request->sg_descriptors; + + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_iu, &chained); + + request->partial = chained; + iu_length += num_sg_in_iu * sizeof(*sg_descriptor); + +out: + put_unaligned_le16(iu_length, &request->header.iu_length); + request->num_sg_descriptors = num_sg_in_iu; + + return 0; +} + +static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, + struct pqi_io_request *io_request) +{ + u16 iu_length; + int sg_count; + bool chained; + unsigned int num_sg_in_iu; + struct scatterlist *sg; + struct pqi_sg_descriptor *sg_descriptor; + + sg_count = scsi_dma_map(scmd); + if (sg_count < 0) + return sg_count; + + iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - + PQI_REQUEST_HEADER_LENGTH; + num_sg_in_iu = 0; + + if (sg_count != 0) { + sg = scsi_sglist(scmd); + sg_descriptor = request->sg_descriptors; + + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_r56_iu, &chained); + + request->partial = chained; + iu_length += num_sg_in_iu * sizeof(*sg_descriptor); + } + + put_unaligned_le16(iu_length, &request->header.iu_length); + request->num_sg_descriptors = num_sg_in_iu; + + return 0; +} + +static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, + struct pqi_io_request *io_request) +{ + u16 iu_length; + int sg_count; + bool chained; + unsigned int num_sg_in_iu; + struct scatterlist *sg; + struct pqi_sg_descriptor *sg_descriptor; + + sg_count = scsi_dma_map(scmd); + if (sg_count < 0) + return sg_count; + + iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - + PQI_REQUEST_HEADER_LENGTH; + num_sg_in_iu = 0; + + if (sg_count == 0) + goto out; + + sg = scsi_sglist(scmd); + sg_descriptor = request->sg_descriptors; + + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_iu, &chained); + + request->partial = chained; + iu_length += num_sg_in_iu * sizeof(*sg_descriptor); + +out: + put_unaligned_le16(iu_length, &request->header.iu_length); + request->num_sg_descriptors = num_sg_in_iu; + + return 0; +} + +static void pqi_raid_io_complete(struct pqi_io_request *io_request, + void *context) +{ + struct scsi_cmnd *scmd; + + scmd = io_request->scmd; + pqi_free_io_request(io_request); + scsi_dma_unmap(scmd); + pqi_scsi_done(scmd); +} + +static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, + struct pqi_queue_group *queue_group, bool io_high_prio) +{ + int rc; + size_t cdb_length; + struct pqi_io_request *io_request; + struct pqi_raid_path_request *request; + + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; + + io_request->io_complete_callback = pqi_raid_io_complete; + io_request->scmd = scmd; + + request = io_request->iu; + memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); + + request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; + put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); + request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + request->command_priority = io_high_prio; + put_unaligned_le16(io_request->index, &request->request_id); + request->error_index = request->request_id; + memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); + request->ml_device_lun_number = (u8)scmd->device->lun; + + cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); + memcpy(request->cdb, scmd->cmnd, cdb_length); + + switch (cdb_length) { + case 6: + case 10: + case 12: + case 16: + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; + break; + case 20: + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; + break; + case 24: + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; + break; + case 28: + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; + break; + case 32: + default: + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; + break; + } + + switch (scmd->sc_data_direction) { + case DMA_FROM_DEVICE: + request->data_direction = SOP_READ_FLAG; + break; + case DMA_TO_DEVICE: + request->data_direction = SOP_WRITE_FLAG; + break; + case DMA_NONE: + request->data_direction = SOP_NO_DIRECTION_FLAG; + break; + case DMA_BIDIRECTIONAL: + request->data_direction = SOP_BIDIRECTIONAL; + break; + default: + dev_err(&ctrl_info->pci_dev->dev, + "unknown data direction: %d\n", + scmd->sc_data_direction); + break; + } + + rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); + if (rc) { + pqi_free_io_request(io_request); + return SCSI_MLQUEUE_HOST_BUSY; + } + + pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); + + return 0; +} + +static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, + struct pqi_queue_group *queue_group) +{ + bool io_high_prio; + + io_high_prio = pqi_is_io_high_priority(device, scmd); + + return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); +} + +static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) +{ + struct scsi_cmnd *scmd; + struct pqi_scsi_dev *device; + struct pqi_ctrl_info *ctrl_info; + + if (!io_request->raid_bypass) + return false; + + scmd = io_request->scmd; + if ((scmd->result & 0xff) == SAM_STAT_GOOD) + return false; + if (host_byte(scmd->result) == DID_NO_CONNECT) + return false; + + device = scmd->device->hostdata; + if (pqi_device_offline(device) || pqi_device_in_remove(device)) + return false; + + ctrl_info = shost_to_hba(scmd->device->host); + if (pqi_ctrl_offline(ctrl_info)) + return false; + + return true; +} + +static void pqi_aio_io_complete(struct pqi_io_request *io_request, + void *context) +{ + struct scsi_cmnd *scmd; + + scmd = io_request->scmd; + scsi_dma_unmap(scmd); + if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { + set_host_byte(scmd, DID_IMM_RETRY); + pqi_cmd_priv(scmd)->this_residual++; + } + + pqi_free_io_request(io_request); + pqi_scsi_done(scmd); +} + +static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, + struct pqi_queue_group *queue_group) +{ + bool io_high_prio; + + io_high_prio = pqi_is_io_high_priority(device, scmd); + + return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, + scmd->cmnd, scmd->cmd_len, queue_group, NULL, + false, io_high_prio); +} + +static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, + unsigned int cdb_length, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, bool raid_bypass, + bool io_high_prio) +{ + int rc; + struct pqi_io_request *io_request; + struct pqi_aio_path_request *request; + + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; + + io_request->io_complete_callback = pqi_aio_io_complete; + io_request->scmd = scmd; + io_request->raid_bypass = raid_bypass; + + request = io_request->iu; + memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors)); + + request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; + put_unaligned_le32(aio_handle, &request->nexus_id); + put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); + request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + request->command_priority = io_high_prio; + put_unaligned_le16(io_request->index, &request->request_id); + request->error_index = request->request_id; + if (!raid_bypass && ctrl_info->multi_lun_device_supported) + put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); + if (cdb_length > sizeof(request->cdb)) + cdb_length = sizeof(request->cdb); + request->cdb_length = cdb_length; + memcpy(request->cdb, cdb, cdb_length); + + switch (scmd->sc_data_direction) { + case DMA_TO_DEVICE: + request->data_direction = SOP_READ_FLAG; + break; + case DMA_FROM_DEVICE: + request->data_direction = SOP_WRITE_FLAG; + break; + case DMA_NONE: + request->data_direction = SOP_NO_DIRECTION_FLAG; + break; + case DMA_BIDIRECTIONAL: + request->data_direction = SOP_BIDIRECTIONAL; + break; + default: + dev_err(&ctrl_info->pci_dev->dev, + "unknown data direction: %d\n", + scmd->sc_data_direction); + break; + } + + if (encryption_info) { + request->encryption_enable = true; + put_unaligned_le16(encryption_info->data_encryption_key_index, + &request->data_encryption_key_index); + put_unaligned_le32(encryption_info->encrypt_tweak_lower, + &request->encrypt_tweak_lower); + put_unaligned_le32(encryption_info->encrypt_tweak_upper, + &request->encrypt_tweak_upper); + } + + rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); + if (rc) { + pqi_free_io_request(io_request); + return SCSI_MLQUEUE_HOST_BUSY; + } + + pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); + + return 0; +} + +static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd) +{ + int rc; + struct pqi_io_request *io_request; + struct pqi_aio_r1_path_request *r1_request; + + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; + + io_request->io_complete_callback = pqi_aio_io_complete; + io_request->scmd = scmd; + io_request->raid_bypass = true; + + r1_request = io_request->iu; + memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); + + r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; + put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); + r1_request->num_drives = rmd->num_it_nexus_entries; + put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); + put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); + if (rmd->num_it_nexus_entries == 3) + put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); + + put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); + r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + put_unaligned_le16(io_request->index, &r1_request->request_id); + r1_request->error_index = r1_request->request_id; + if (rmd->cdb_length > sizeof(r1_request->cdb)) + rmd->cdb_length = sizeof(r1_request->cdb); + r1_request->cdb_length = rmd->cdb_length; + memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); + + /* The direction is always write. */ + r1_request->data_direction = SOP_READ_FLAG; + + if (encryption_info) { + r1_request->encryption_enable = true; + put_unaligned_le16(encryption_info->data_encryption_key_index, + &r1_request->data_encryption_key_index); + put_unaligned_le32(encryption_info->encrypt_tweak_lower, + &r1_request->encrypt_tweak_lower); + put_unaligned_le32(encryption_info->encrypt_tweak_upper, + &r1_request->encrypt_tweak_upper); + } + + rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); + if (rc) { + pqi_free_io_request(io_request); + return SCSI_MLQUEUE_HOST_BUSY; + } + + pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); + + return 0; +} + +static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd) +{ + int rc; + struct pqi_io_request *io_request; + struct pqi_aio_r56_path_request *r56_request; + + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; + io_request->io_complete_callback = pqi_aio_io_complete; + io_request->scmd = scmd; + io_request->raid_bypass = true; + + r56_request = io_request->iu; + memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); + + if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) + r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; + else + r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; + + put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); + put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); + put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); + if (rmd->raid_level == SA_RAID_6) { + put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); + r56_request->xor_multiplier = rmd->xor_mult; + } + put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); + r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + put_unaligned_le64(rmd->row, &r56_request->row); + + put_unaligned_le16(io_request->index, &r56_request->request_id); + r56_request->error_index = r56_request->request_id; + + if (rmd->cdb_length > sizeof(r56_request->cdb)) + rmd->cdb_length = sizeof(r56_request->cdb); + r56_request->cdb_length = rmd->cdb_length; + memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); + + /* The direction is always write. */ + r56_request->data_direction = SOP_READ_FLAG; + + if (encryption_info) { + r56_request->encryption_enable = true; + put_unaligned_le16(encryption_info->data_encryption_key_index, + &r56_request->data_encryption_key_index); + put_unaligned_le32(encryption_info->encrypt_tweak_lower, + &r56_request->encrypt_tweak_lower); + put_unaligned_le32(encryption_info->encrypt_tweak_upper, + &r56_request->encrypt_tweak_upper); + } + + rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); + if (rc) { + pqi_free_io_request(io_request); + return SCSI_MLQUEUE_HOST_BUSY; + } + + pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); + + return 0; +} + +static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd) +{ + /* + * We are setting host_tagset = 1 during init. + */ + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); +} + +static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) +{ + if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) + return false; + + return pqi_cmd_priv(scmd)->this_residual == 0; +} + +/* + * This function gets called just before we hand the completed SCSI request + * back to the SML. + */ + +void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) +{ + struct pqi_scsi_dev *device; + struct completion *wait; + + if (!scmd->device) { + set_host_byte(scmd, DID_NO_CONNECT); + return; + } + + device = scmd->device->hostdata; + if (!device) { + set_host_byte(scmd, DID_NO_CONNECT); + return; + } + + atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); + + wait = (struct completion *)xchg(&scmd->host_scribble, NULL); + if (wait != PQI_NO_COMPLETION) + complete(wait); +} + +static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd) +{ + u32 oldest_jiffies; + u8 lru_index; + int i; + int rc; + struct pqi_scsi_dev *device; + struct pqi_stream_data *pqi_stream_data; + struct pqi_scsi_dev_raid_map_data rmd; + + if (!ctrl_info->enable_stream_detection) + return false; + + rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); + if (rc) + return false; + + /* Check writes only. */ + if (!rmd.is_write) + return false; + + device = scmd->device->hostdata; + + /* Check for RAID 5/6 streams. */ + if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) + return false; + + /* + * If controller does not support AIO RAID{5,6} writes, need to send + * requests down non-AIO path. + */ + if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || + (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) + return true; + + lru_index = 0; + oldest_jiffies = INT_MAX; + for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { + pqi_stream_data = &device->stream_data[i]; + /* + * Check for adjacent request or request is within + * the previous request. + */ + if ((pqi_stream_data->next_lba && + rmd.first_block >= pqi_stream_data->next_lba) && + rmd.first_block <= pqi_stream_data->next_lba + + rmd.block_cnt) { + pqi_stream_data->next_lba = rmd.first_block + + rmd.block_cnt; + pqi_stream_data->last_accessed = jiffies; + return true; + } + + /* unused entry */ + if (pqi_stream_data->last_accessed == 0) { + lru_index = i; + break; + } + + /* Find entry with oldest last accessed time. */ + if (pqi_stream_data->last_accessed <= oldest_jiffies) { + oldest_jiffies = pqi_stream_data->last_accessed; + lru_index = i; + } + } + + /* Set LRU entry. */ + pqi_stream_data = &device->stream_data[lru_index]; + pqi_stream_data->last_accessed = jiffies; + pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; + + return false; +} + +static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + u16 hw_queue; + struct pqi_queue_group *queue_group; + bool raid_bypassed; + u8 lun; + + scmd->host_scribble = PQI_NO_COMPLETION; + + device = scmd->device->hostdata; + + if (!device) { + set_host_byte(scmd, DID_NO_CONNECT); + pqi_scsi_done(scmd); + return 0; + } + + lun = (u8)scmd->device->lun; + + atomic_inc(&device->scsi_cmds_outstanding[lun]); + + ctrl_info = shost_to_hba(shost); + + if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { + set_host_byte(scmd, DID_NO_CONNECT); + pqi_scsi_done(scmd); + return 0; + } + + if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* + * This is necessary because the SML doesn't zero out this field during + * error recovery. + */ + scmd->result = 0; + + hw_queue = pqi_get_hw_queue(ctrl_info, scmd); + queue_group = &ctrl_info->queue_groups[hw_queue]; + + if (pqi_is_logical_device(device)) { + raid_bypassed = false; + if (device->raid_bypass_enabled && + pqi_is_bypass_eligible_request(scmd) && + !pqi_is_parity_write_stream(ctrl_info, scmd)) { + rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); + if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { + raid_bypassed = true; + device->raid_bypass_cnt++; + } + } + if (!raid_bypassed) + rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); + } else { + if (device->aio_enabled) + rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); + else + rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); + } + +out: + if (rc) { + scmd->host_scribble = NULL; + atomic_dec(&device->scsi_cmds_outstanding[lun]); + } + + return rc; +} + +static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + unsigned int path; + unsigned long flags; + unsigned int queued_io_count; + struct pqi_queue_group *queue_group; + struct pqi_io_request *io_request; + + queued_io_count = 0; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + for (path = 0; path < 2; path++) { + spin_lock_irqsave(&queue_group->submit_lock[path], flags); + list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) + queued_io_count++; + spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); + } + } + + return queued_io_count; +} + +static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + unsigned int path; + unsigned int nonempty_inbound_queue_count; + struct pqi_queue_group *queue_group; + pqi_index_t iq_pi; + pqi_index_t iq_ci; + + nonempty_inbound_queue_count = 0; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + for (path = 0; path < 2; path++) { + iq_pi = queue_group->iq_pi_copy[path]; + iq_ci = readl(queue_group->iq_ci[path]); + if (iq_ci != iq_pi) + nonempty_inbound_queue_count++; + } + } + + return nonempty_inbound_queue_count; +} + +#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 + +static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) +{ + unsigned long start_jiffies; + unsigned long warning_timeout; + unsigned int queued_io_count; + unsigned int nonempty_inbound_queue_count; + bool displayed_warning; + + displayed_warning = false; + start_jiffies = jiffies; + warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; + + while (1) { + queued_io_count = pqi_queued_io_count(ctrl_info); + nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); + if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) + break; + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + if (time_after(jiffies, warning_timeout)) { + dev_warn(&ctrl_info->pci_dev->dev, + "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", + jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); + displayed_warning = true; + warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies; + } + usleep_range(1000, 2000); + } + + if (displayed_warning) + dev_warn(&ctrl_info->pci_dev->dev, + "queued I/O drained after waiting for %u seconds\n", + jiffies_to_msecs(jiffies - start_jiffies) / 1000); + + return 0; +} + +static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, u8 lun) +{ + unsigned int i; + unsigned int path; + struct pqi_queue_group *queue_group; + unsigned long flags; + struct pqi_io_request *io_request; + struct pqi_io_request *next; + struct scsi_cmnd *scmd; + struct pqi_scsi_dev *scsi_device; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + + for (path = 0; path < 2; path++) { + spin_lock_irqsave( + &queue_group->submit_lock[path], flags); + + list_for_each_entry_safe(io_request, next, + &queue_group->request_list[path], + request_list_entry) { + + scmd = io_request->scmd; + if (!scmd) + continue; + + scsi_device = scmd->device->hostdata; + if (scsi_device != device) + continue; + + if ((u8)scmd->device->lun != lun) + continue; + + list_del(&io_request->request_list_entry); + set_host_byte(scmd, DID_RESET); + pqi_free_io_request(io_request); + scsi_dma_unmap(scmd); + pqi_scsi_done(scmd); + } + + spin_unlock_irqrestore( + &queue_group->submit_lock[path], flags); + } + } +} + +#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 + +static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs) +{ + int cmds_outstanding; + unsigned long start_jiffies; + unsigned long warning_timeout; + unsigned long msecs_waiting; + + start_jiffies = jiffies; + warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; + + while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { + if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + } + msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); + if (msecs_waiting >= timeout_msecs) { + dev_err(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, + lun, msecs_waiting / 1000, cmds_outstanding); + return -ETIMEDOUT; + } + if (time_after(jiffies, warning_timeout)) { + dev_warn(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, + lun, msecs_waiting / 1000, cmds_outstanding); + warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies; + } + usleep_range(1000, 2000); + } + + return 0; +} + +static void pqi_lun_reset_complete(struct pqi_io_request *io_request, + void *context) +{ + struct completion *waiting = context; + + complete(waiting); +} + +#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 + +static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, u8 lun, struct completion *wait) +{ + int rc; + unsigned int wait_secs; + int cmds_outstanding; + + wait_secs = 0; + + while (1) { + if (wait_for_completion_io_timeout(wait, + PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) { + rc = 0; + break; + } + + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) { + rc = -ENXIO; + break; + } + + wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; + cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); + dev_warn(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); + } + + return rc; +} + +#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 + +static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) +{ + int rc; + struct pqi_io_request *io_request; + DECLARE_COMPLETION_ONSTACK(wait); + struct pqi_task_management_request *request; + + io_request = pqi_alloc_io_request(ctrl_info, NULL); + io_request->io_complete_callback = pqi_lun_reset_complete; + io_request->context = &wait; + + request = io_request->iu; + memset(request, 0, sizeof(*request)); + + request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; + put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, + &request->header.iu_length); + put_unaligned_le16(io_request->index, &request->request_id); + memcpy(request->lun_number, device->scsi3addr, + sizeof(request->lun_number)); + if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) + request->ml_device_lun_number = lun; + request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; + if (ctrl_info->tmf_iu_timeout_supported) + put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); + + pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, + io_request); + + rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); + if (rc == 0) + rc = io_request->status; + + pqi_free_io_request(io_request); + + return rc; +} + +#define PQI_LUN_RESET_RETRIES 3 +#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) +#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) +#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) + +static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) +{ + int reset_rc; + int wait_rc; + unsigned int retries; + unsigned long timeout_msecs; + + for (retries = 0;;) { + reset_rc = pqi_lun_reset(ctrl_info, device, lun); + if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) + break; + msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); + } + + timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : + PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; + + wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); + if (wait_rc && reset_rc == 0) + reset_rc = wait_rc; + + return reset_rc == 0 ? SUCCESS : FAILED; +} + +static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) +{ + int rc; + + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_fail_io_queued_for_device(ctrl_info, device, lun); + rc = pqi_wait_until_inbound_queues_empty(ctrl_info); + pqi_device_reset_start(device, lun); + pqi_ctrl_unblock_requests(ctrl_info); + if (rc) + rc = FAILED; + else + rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); + pqi_device_reset_done(device, lun); + + return rc; +} + +static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) +{ + int rc; + + mutex_lock(&ctrl_info->lun_reset_mutex); + + dev_err(&ctrl_info->pci_dev->dev, + "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); + + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + rc = FAILED; + else + rc = pqi_device_reset(ctrl_info, device, lun); + + dev_err(&ctrl_info->pci_dev->dev, + "reset of scsi %d:%d:%d:%u: %s\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, lun, + rc == SUCCESS ? "SUCCESS" : "FAILED"); + + mutex_unlock(&ctrl_info->lun_reset_mutex); + + return rc; +} + +static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + u8 scsi_opcode; + + shost = scmd->device->host; + ctrl_info = shost_to_hba(shost); + device = scmd->device->hostdata; + scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; + + return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); +} + +static void pqi_tmf_worker(struct work_struct *work) +{ + struct pqi_tmf_work *tmf_work; + struct scsi_cmnd *scmd; + + tmf_work = container_of(work, struct pqi_tmf_work, work_struct); + scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); + + pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode); +} + +static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + struct pqi_tmf_work *tmf_work; + DECLARE_COMPLETION_ONSTACK(wait); + + shost = scmd->device->host; + ctrl_info = shost_to_hba(shost); + device = scmd->device->hostdata; + + dev_err(&ctrl_info->pci_dev->dev, + "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", + shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); + + if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { + dev_err(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", + shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); + scmd->result = DID_RESET << 16; + goto out; + } + + tmf_work = &device->tmf_work[scmd->device->lun]; + + if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { + tmf_work->ctrl_info = ctrl_info; + tmf_work->device = device; + tmf_work->lun = (u8)scmd->device->lun; + tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; + schedule_work(&tmf_work->work_struct); + } + + wait_for_completion(&wait); + + dev_err(&ctrl_info->pci_dev->dev, + "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", + shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); + +out: + + return SUCCESS; +} + +static int pqi_slave_alloc(struct scsi_device *sdev) +{ + struct pqi_scsi_dev *device; + unsigned long flags; + struct pqi_ctrl_info *ctrl_info; + struct scsi_target *starget; + struct sas_rphy *rphy; + + ctrl_info = shost_to_hba(sdev->host); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { + starget = scsi_target(sdev); + rphy = target_to_rphy(starget); + device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); + if (device) { + if (device->target_lun_valid) { + device->ignore_device = true; + } else { + device->target = sdev_id(sdev); + device->lun = sdev->lun; + device->target_lun_valid = true; + } + } + } else { + device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), + sdev_id(sdev), sdev->lun); + } + + if (device) { + sdev->hostdata = device; + device->sdev = sdev; + if (device->queue_depth) { + device->advertised_queue_depth = device->queue_depth; + scsi_change_queue_depth(sdev, + device->advertised_queue_depth); + } + if (pqi_is_logical_device(device)) { + pqi_disable_write_same(sdev); + } else { + sdev->allow_restart = 1; + if (device->device_type == SA_DEVICE_TYPE_NVME) + pqi_disable_write_same(sdev); + } + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return 0; +} + +static void pqi_map_queues(struct Scsi_Host *shost) +{ + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + ctrl_info->pci_dev, 0); +} + +static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) +{ + return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; +} + +static int pqi_slave_configure(struct scsi_device *sdev) +{ + int rc = 0; + struct pqi_scsi_dev *device; + + device = sdev->hostdata; + device->devtype = sdev->type; + + if (pqi_is_tape_changer_device(device) && device->ignore_device) { + rc = -ENXIO; + device->ignore_device = false; + } + + return rc; +} + +static void pqi_slave_destroy(struct scsi_device *sdev) +{ + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + int mutex_acquired; + unsigned long flags; + + ctrl_info = shost_to_hba(sdev->host); + + mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); + if (!mutex_acquired) + return; + + device = sdev->hostdata; + if (!device) { + mutex_unlock(&ctrl_info->scan_mutex); + return; + } + + device->lun_count--; + if (device->lun_count > 0) { + mutex_unlock(&ctrl_info->scan_mutex); + return; + } + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_del(&device->scsi_device_list_entry); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + mutex_unlock(&ctrl_info->scan_mutex); + + pqi_dev_info(ctrl_info, "removed", device); + pqi_free_device(device); +} + +static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) +{ + struct pci_dev *pci_dev; + u32 subsystem_vendor; + u32 subsystem_device; + cciss_pci_info_struct pci_info; + + if (!arg) + return -EINVAL; + + pci_dev = ctrl_info->pci_dev; + + pci_info.domain = pci_domain_nr(pci_dev->bus); + pci_info.bus = pci_dev->bus->number; + pci_info.dev_fn = pci_dev->devfn; + subsystem_vendor = pci_dev->subsystem_vendor; + subsystem_device = pci_dev->subsystem_device; + pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; + + if (copy_to_user(arg, &pci_info, sizeof(pci_info))) + return -EFAULT; + + return 0; +} + +static int pqi_getdrivver_ioctl(void __user *arg) +{ + u32 version; + + if (!arg) + return -EINVAL; + + version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | + (DRIVER_RELEASE << 16) | DRIVER_REVISION; + + if (copy_to_user(arg, &version, sizeof(version))) + return -EFAULT; + + return 0; +} + +struct ciss_error_info { + u8 scsi_status; + int command_status; + size_t sense_data_length; +}; + +static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, + struct ciss_error_info *ciss_error_info) +{ + int ciss_cmd_status; + size_t sense_data_length; + + switch (pqi_error_info->data_out_result) { + case PQI_DATA_IN_OUT_GOOD: + ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; + break; + case PQI_DATA_IN_OUT_UNDERFLOW: + ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; + break; + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: + ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; + break; + case PQI_DATA_IN_OUT_PROTOCOL_ERROR: + case PQI_DATA_IN_OUT_BUFFER_ERROR: + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: + case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: + case PQI_DATA_IN_OUT_ERROR: + ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; + break; + case PQI_DATA_IN_OUT_HARDWARE_ERROR: + case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: + case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: + case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: + case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: + case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: + case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: + case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: + case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: + case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: + ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; + break; + case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: + ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; + break; + case PQI_DATA_IN_OUT_ABORTED: + ciss_cmd_status = CISS_CMD_STATUS_ABORTED; + break; + case PQI_DATA_IN_OUT_TIMEOUT: + ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; + break; + default: + ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; + break; + } + + sense_data_length = + get_unaligned_le16(&pqi_error_info->sense_data_length); + if (sense_data_length == 0) + sense_data_length = + get_unaligned_le16(&pqi_error_info->response_data_length); + if (sense_data_length) + if (sense_data_length > sizeof(pqi_error_info->data)) + sense_data_length = sizeof(pqi_error_info->data); + + ciss_error_info->scsi_status = pqi_error_info->status; + ciss_error_info->command_status = ciss_cmd_status; + ciss_error_info->sense_data_length = sense_data_length; +} + +static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) +{ + int rc; + char *kernel_buffer = NULL; + u16 iu_length; + size_t sense_data_length; + IOCTL_Command_struct iocommand; + struct pqi_raid_path_request request; + struct pqi_raid_error_info pqi_error_info; + struct ciss_error_info ciss_error_info; + + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) + return -EBUSY; + if (!arg) + return -EINVAL; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (copy_from_user(&iocommand, arg, sizeof(iocommand))) + return -EFAULT; + if (iocommand.buf_size < 1 && + iocommand.Request.Type.Direction != XFER_NONE) + return -EINVAL; + if (iocommand.Request.CDBLen > sizeof(request.cdb)) + return -EINVAL; + if (iocommand.Request.Type.Type != TYPE_CMD) + return -EINVAL; + + switch (iocommand.Request.Type.Direction) { + case XFER_NONE: + case XFER_WRITE: + case XFER_READ: + case XFER_READ | XFER_WRITE: + break; + default: + return -EINVAL; + } + + if (iocommand.buf_size > 0) { + kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); + if (!kernel_buffer) + return -ENOMEM; + if (iocommand.Request.Type.Direction & XFER_WRITE) { + if (copy_from_user(kernel_buffer, iocommand.buf, + iocommand.buf_size)) { + rc = -EFAULT; + goto out; + } + } else { + memset(kernel_buffer, 0, iocommand.buf_size); + } + } + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; + iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - + PQI_REQUEST_HEADER_LENGTH; + memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, + sizeof(request.lun_number)); + memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); + request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; + + switch (iocommand.Request.Type.Direction) { + case XFER_NONE: + request.data_direction = SOP_NO_DIRECTION_FLAG; + break; + case XFER_WRITE: + request.data_direction = SOP_WRITE_FLAG; + break; + case XFER_READ: + request.data_direction = SOP_READ_FLAG; + break; + case XFER_READ | XFER_WRITE: + request.data_direction = SOP_BIDIRECTIONAL; + break; + } + + request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + + if (iocommand.buf_size > 0) { + put_unaligned_le32(iocommand.buf_size, &request.buffer_length); + + rc = pqi_map_single(ctrl_info->pci_dev, + &request.sg_descriptors[0], kernel_buffer, + iocommand.buf_size, DMA_BIDIRECTIONAL); + if (rc) + goto out; + + iu_length += sizeof(request.sg_descriptors[0]); + } + + put_unaligned_le16(iu_length, &request.header.iu_length); + + if (ctrl_info->raid_iu_timeout_supported) + put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); + + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, + PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); + + if (iocommand.buf_size > 0) + pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, + DMA_BIDIRECTIONAL); + + memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); + + if (rc == 0) { + pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); + iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; + iocommand.error_info.CommandStatus = + ciss_error_info.command_status; + sense_data_length = ciss_error_info.sense_data_length; + if (sense_data_length) { + if (sense_data_length > + sizeof(iocommand.error_info.SenseInfo)) + sense_data_length = + sizeof(iocommand.error_info.SenseInfo); + memcpy(iocommand.error_info.SenseInfo, + pqi_error_info.data, sense_data_length); + iocommand.error_info.SenseLen = sense_data_length; + } + } + + if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { + rc = -EFAULT; + goto out; + } + + if (rc == 0 && iocommand.buf_size > 0 && + (iocommand.Request.Type.Direction & XFER_READ)) { + if (copy_to_user(iocommand.buf, kernel_buffer, + iocommand.buf_size)) { + rc = -EFAULT; + } + } + +out: + kfree(kernel_buffer); + + return rc; +} + +static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = shost_to_hba(sdev->host); + + switch (cmd) { + case CCISS_DEREGDISK: + case CCISS_REGNEWDISK: + case CCISS_REGNEWD: + rc = pqi_scan_scsi_devices(ctrl_info); + break; + case CCISS_GETPCIINFO: + rc = pqi_getpciinfo_ioctl(ctrl_info, arg); + break; + case CCISS_GETDRIVVER: + rc = pqi_getdrivver_ioctl(arg); + break; + case CCISS_PASSTHRU: + rc = pqi_passthru_ioctl(ctrl_info, arg); + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static ssize_t pqi_firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + + shost = class_to_shost(dev); + ctrl_info = shost_to_hba(shost); + + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); +} + +static ssize_t pqi_driver_version_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP); +} + +static ssize_t pqi_serial_number_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + + shost = class_to_shost(dev); + ctrl_info = shost_to_hba(shost); + + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); +} + +static ssize_t pqi_model_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + + shost = class_to_shost(dev); + ctrl_info = shost_to_hba(shost); + + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); +} + +static ssize_t pqi_vendor_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + + shost = class_to_shost(dev); + ctrl_info = shost_to_hba(shost); + + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); +} + +static ssize_t pqi_host_rescan_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + + pqi_scan_start(shost); + + return count; +} + +static ssize_t pqi_lockup_action_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + int count = 0; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { + if (pqi_lockup_actions[i].action == pqi_lockup_action) + count += scnprintf(buffer + count, PAGE_SIZE - count, + "[%s] ", pqi_lockup_actions[i].name); + else + count += scnprintf(buffer + count, PAGE_SIZE - count, + "%s ", pqi_lockup_actions[i].name); + } + + count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); + + return count; +} + +static ssize_t pqi_lockup_action_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + unsigned int i; + char *action_name; + char action_name_buffer[32]; + + strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); + action_name = strstrip(action_name_buffer); + + for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { + if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { + pqi_lockup_action = pqi_lockup_actions[i].action; + return count; + } + } + + return -EINVAL; +} + +static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + return scnprintf(buffer, 10, "%x\n", + ctrl_info->enable_stream_detection); +} + +static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + u8 set_stream_detection = 0; + + if (kstrtou8(buffer, 0, &set_stream_detection)) + return -EINVAL; + + if (set_stream_detection > 0) + set_stream_detection = 1; + + ctrl_info->enable_stream_detection = set_stream_detection; + + return count; +} + +static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); +} + +static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + u8 set_r5_writes = 0; + + if (kstrtou8(buffer, 0, &set_r5_writes)) + return -EINVAL; + + if (set_r5_writes > 0) + set_r5_writes = 1; + + ctrl_info->enable_r5_writes = set_r5_writes; + + return count; +} + +static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); +} + +static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + u8 set_r6_writes = 0; + + if (kstrtou8(buffer, 0, &set_r6_writes)) + return -EINVAL; + + if (set_r6_writes > 0) + set_r6_writes = 1; + + ctrl_info->enable_r6_writes = set_r6_writes; + + return count; +} + +static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); +static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); +static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); +static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); +static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); +static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); +static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, + pqi_lockup_action_store); +static DEVICE_ATTR(enable_stream_detection, 0644, + pqi_host_enable_stream_detection_show, + pqi_host_enable_stream_detection_store); +static DEVICE_ATTR(enable_r5_writes, 0644, + pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); +static DEVICE_ATTR(enable_r6_writes, 0644, + pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); + +static struct attribute *pqi_shost_attrs[] = { + &dev_attr_driver_version.attr, + &dev_attr_firmware_version.attr, + &dev_attr_model.attr, + &dev_attr_serial_number.attr, + &dev_attr_vendor.attr, + &dev_attr_rescan.attr, + &dev_attr_lockup_action.attr, + &dev_attr_enable_stream_detection.attr, + &dev_attr_enable_r5_writes.attr, + &dev_attr_enable_r6_writes.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pqi_shost); + +static ssize_t pqi_unique_id_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u8 unique_id[16]; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + if (device->is_physical_device) + memcpy(unique_id, device->wwid, sizeof(device->wwid)); + else + memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, + "%02X%02X%02X%02X%02X%02X%02X%02X" + "%02X%02X%02X%02X%02X%02X%02X%02X\n", + unique_id[0], unique_id[1], unique_id[2], unique_id[3], + unique_id[4], unique_id[5], unique_id[6], unique_id[7], + unique_id[8], unique_id[9], unique_id[10], unique_id[11], + unique_id[12], unique_id[13], unique_id[14], unique_id[15]); +} + +static ssize_t pqi_lunid_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u8 lunid[8]; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + memcpy(lunid, device->scsi3addr, sizeof(lunid)); + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); +} + +#define MAX_PATHS 8 + +static ssize_t pqi_path_info_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + int i; + int output_len = 0; + u8 box; + u8 bay; + u8 path_map_index; + char *active; + u8 phys_connector[2]; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + bay = device->bay; + for (i = 0; i < MAX_PATHS; i++) { + path_map_index = 1 << i; + if (i == device->active_path_index) + active = "Active"; + else if (device->path_map & path_map_index) + active = "Inactive"; + else + continue; + + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "[%d:%d:%d:%d] %20.20s ", + ctrl_info->scsi_host->host_no, + device->bus, device->target, + device->lun, + scsi_device_type(device->devtype)); + + if (device->devtype == TYPE_RAID || + pqi_is_logical_device(device)) + goto end_buffer; + + memcpy(&phys_connector, &device->phys_connector[i], + sizeof(phys_connector)); + if (phys_connector[0] < '0') + phys_connector[0] = '0'; + if (phys_connector[1] < '0') + phys_connector[1] = '0'; + + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "PORT: %.2s ", phys_connector); + + box = device->box[i]; + if (box != 0 && box != 0xFF) + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "BOX: %hhu ", box); + + if ((device->devtype == TYPE_DISK || + device->devtype == TYPE_ZBC) && + pqi_expose_device(device)) + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "BAY: %hhu ", bay); + +end_buffer: + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "%s\n", active); + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return output_len; +} + +static ssize_t pqi_sas_address_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u64 sas_address; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + sas_address = device->sas_address; + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); +} + +static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + buffer[0] = device->raid_bypass_enabled ? '1' : '0'; + buffer[1] = '\n'; + buffer[2] = '\0'; + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return 2; +} + +static ssize_t pqi_raid_level_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + char *raid_level; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) + raid_level = pqi_raid_level_to_string(device->raid_level); + else + raid_level = "N/A"; + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); +} + +static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + unsigned int raid_bypass_cnt; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + raid_bypass_cnt = device->raid_bypass_cnt; + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); +} + +static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + int output_len = 0; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + output_len = snprintf(buf, PAGE_SIZE, "%d\n", + device->ncq_prio_enable); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return output_len; +} + +static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u8 ncq_prio_enable = 0; + + if (kstrtou8(buf, 0, &ncq_prio_enable)) + return -EINVAL; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + if (!device->ncq_prio_support) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -EINVAL; + } + + device->ncq_prio_enable = ncq_prio_enable; + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return strlen(buf); +} + +static ssize_t pqi_numa_node_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct scsi_device *sdev; + struct pqi_ctrl_info *ctrl_info; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); +} + +static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); +static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); +static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); +static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); +static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); +static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); +static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); +static DEVICE_ATTR(sas_ncq_prio_enable, 0644, + pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); +static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); + +static struct attribute *pqi_sdev_attrs[] = { + &dev_attr_lunid.attr, + &dev_attr_unique_id.attr, + &dev_attr_path_info.attr, + &dev_attr_sas_address.attr, + &dev_attr_ssd_smart_path_enabled.attr, + &dev_attr_raid_level.attr, + &dev_attr_raid_bypass_cnt.attr, + &dev_attr_sas_ncq_prio_enable.attr, + &dev_attr_numa_node.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pqi_sdev); + +static const struct scsi_host_template pqi_driver_template = { + .module = THIS_MODULE, + .name = DRIVER_NAME_SHORT, + .proc_name = DRIVER_NAME_SHORT, + .queuecommand = pqi_scsi_queue_command, + .scan_start = pqi_scan_start, + .scan_finished = pqi_scan_finished, + .this_id = -1, + .eh_device_reset_handler = pqi_eh_device_reset_handler, + .eh_abort_handler = pqi_eh_abort_handler, + .ioctl = pqi_ioctl, + .slave_alloc = pqi_slave_alloc, + .slave_configure = pqi_slave_configure, + .slave_destroy = pqi_slave_destroy, + .map_queues = pqi_map_queues, + .sdev_groups = pqi_sdev_groups, + .shost_groups = pqi_shost_groups, + .cmd_size = sizeof(struct pqi_cmd_priv), +}; + +static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct Scsi_Host *shost; + + shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); + if (!shost) { + dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); + return -ENOMEM; + } + + shost->io_port = 0; + shost->n_io_port = 0; + shost->this_id = -1; + shost->max_channel = PQI_MAX_BUS; + shost->max_cmd_len = MAX_COMMAND_SIZE; + shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; + shost->max_id = ~0; + shost->max_sectors = ctrl_info->max_sectors; + shost->can_queue = ctrl_info->scsi_ml_can_queue; + shost->cmd_per_lun = shost->can_queue; + shost->sg_tablesize = ctrl_info->sg_tablesize; + shost->transportt = pqi_sas_transport_template; + shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); + shost->unique_id = shost->irq; + shost->nr_hw_queues = ctrl_info->num_queue_groups; + shost->host_tagset = 1; + shost->hostdata[0] = (unsigned long)ctrl_info; + + rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); + goto free_host; + } + + rc = pqi_add_sas_host(shost, ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); + goto remove_host; + } + + ctrl_info->scsi_host = shost; + + return 0; + +remove_host: + scsi_remove_host(shost); +free_host: + scsi_host_put(shost); + + return rc; +} + +static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) +{ + struct Scsi_Host *shost; + + pqi_delete_sas_host(ctrl_info); + + shost = ctrl_info->scsi_host; + if (!shost) + return; + + scsi_remove_host(shost); + scsi_host_put(shost); +} + +static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) +{ + int rc = 0; + struct pqi_device_registers __iomem *pqi_registers; + unsigned long timeout; + unsigned int timeout_msecs; + union pqi_reset_register reset_reg; + + pqi_registers = ctrl_info->pqi_registers; + timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; + timeout = msecs_to_jiffies(timeout_msecs) + jiffies; + + while (1) { + msleep(PQI_RESET_POLL_INTERVAL_MSECS); + reset_reg.all_bits = readl(&pqi_registers->device_reset); + if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) + break; + if (!sis_is_firmware_running(ctrl_info)) { + rc = -ENXIO; + break; + } + if (time_after(jiffies, timeout)) { + rc = -ETIMEDOUT; + break; + } + } + + return rc; +} + +static int pqi_reset(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + union pqi_reset_register reset_reg; + + if (ctrl_info->pqi_reset_quiesce_supported) { + rc = sis_pqi_reset_quiesce(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "PQI reset failed during quiesce with error %d\n", rc); + return rc; + } + } + + reset_reg.all_bits = 0; + reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; + reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; + + writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); + + rc = pqi_wait_for_pqi_reset_completion(ctrl_info); + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "PQI reset failed with error %d\n", rc); + + return rc; +} + +static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct bmic_sense_subsystem_info *sense_info; + + sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); + if (!sense_info) + return -ENOMEM; + + rc = pqi_sense_subsystem_info(ctrl_info, sense_info); + if (rc) + goto out; + + memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, + sizeof(sense_info->ctrl_serial_number)); + ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; + +out: + kfree(sense_info); + + return rc; +} + +static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct bmic_identify_controller *identify; + + identify = kmalloc(sizeof(*identify), GFP_KERNEL); + if (!identify) + return -ENOMEM; + + rc = pqi_identify_controller(ctrl_info, identify); + if (rc) + goto out; + + if (get_unaligned_le32(&identify->extra_controller_flags) & + BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { + memcpy(ctrl_info->firmware_version, + identify->firmware_version_long, + sizeof(identify->firmware_version_long)); + } else { + memcpy(ctrl_info->firmware_version, + identify->firmware_version_short, + sizeof(identify->firmware_version_short)); + ctrl_info->firmware_version + [sizeof(identify->firmware_version_short)] = '\0'; + snprintf(ctrl_info->firmware_version + + strlen(ctrl_info->firmware_version), + sizeof(ctrl_info->firmware_version) - + sizeof(identify->firmware_version_short), + "-%u", + get_unaligned_le16(&identify->firmware_build_number)); + } + + memcpy(ctrl_info->model, identify->product_id, + sizeof(identify->product_id)); + ctrl_info->model[sizeof(identify->product_id)] = '\0'; + + memcpy(ctrl_info->vendor, identify->vendor_id, + sizeof(identify->vendor_id)); + ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; + + dev_info(&ctrl_info->pci_dev->dev, + "Firmware version: %s\n", ctrl_info->firmware_version); + +out: + kfree(identify); + + return rc; +} + +struct pqi_config_table_section_info { + struct pqi_ctrl_info *ctrl_info; + void *section; + u32 section_offset; + void __iomem *section_iomem_addr; +}; + +static inline bool pqi_is_firmware_feature_supported( + struct pqi_config_table_firmware_features *firmware_features, + unsigned int bit_position) +{ + unsigned int byte_index; + + byte_index = bit_position / BITS_PER_BYTE; + + if (byte_index >= le16_to_cpu(firmware_features->num_elements)) + return false; + + return firmware_features->features_supported[byte_index] & + (1 << (bit_position % BITS_PER_BYTE)) ? true : false; +} + +static inline bool pqi_is_firmware_feature_enabled( + struct pqi_config_table_firmware_features *firmware_features, + void __iomem *firmware_features_iomem_addr, + unsigned int bit_position) +{ + unsigned int byte_index; + u8 __iomem *features_enabled_iomem_addr; + + byte_index = (bit_position / BITS_PER_BYTE) + + (le16_to_cpu(firmware_features->num_elements) * 2); + + features_enabled_iomem_addr = firmware_features_iomem_addr + + offsetof(struct pqi_config_table_firmware_features, + features_supported) + byte_index; + + return *((__force u8 *)features_enabled_iomem_addr) & + (1 << (bit_position % BITS_PER_BYTE)) ? true : false; +} + +static inline void pqi_request_firmware_feature( + struct pqi_config_table_firmware_features *firmware_features, + unsigned int bit_position) +{ + unsigned int byte_index; + + byte_index = (bit_position / BITS_PER_BYTE) + + le16_to_cpu(firmware_features->num_elements); + + firmware_features->features_supported[byte_index] |= + (1 << (bit_position % BITS_PER_BYTE)); +} + +static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, + u16 first_section, u16 last_section) +{ + struct pqi_vendor_general_request request; + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, + &request.header.iu_length); + put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, + &request.function_code); + put_unaligned_le16(first_section, + &request.data.config_table_update.first_section); + put_unaligned_le16(last_section, + &request.data.config_table_update.last_section); + + return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); +} + +static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, + struct pqi_config_table_firmware_features *firmware_features, + void __iomem *firmware_features_iomem_addr) +{ + void *features_requested; + void __iomem *features_requested_iomem_addr; + void __iomem *host_max_known_feature_iomem_addr; + + features_requested = firmware_features->features_supported + + le16_to_cpu(firmware_features->num_elements); + + features_requested_iomem_addr = firmware_features_iomem_addr + + (features_requested - (void *)firmware_features); + + memcpy_toio(features_requested_iomem_addr, features_requested, + le16_to_cpu(firmware_features->num_elements)); + + if (pqi_is_firmware_feature_supported(firmware_features, + PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { + host_max_known_feature_iomem_addr = + features_requested_iomem_addr + + (le16_to_cpu(firmware_features->num_elements) * 2) + + sizeof(__le16); + writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); + writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); + } + + return pqi_config_table_update(ctrl_info, + PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, + PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); +} + +struct pqi_firmware_feature { + char *feature_name; + unsigned int feature_bit; + bool supported; + bool enabled; + void (*feature_status)(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature); +}; + +static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature) +{ + if (!firmware_feature->supported) { + dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", + firmware_feature->feature_name); + return; + } + + if (firmware_feature->enabled) { + dev_info(&ctrl_info->pci_dev->dev, + "%s enabled\n", firmware_feature->feature_name); + return; + } + + dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", + firmware_feature->feature_name); +} + +static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature) +{ + switch (firmware_feature->feature_bit) { + case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: + ctrl_info->enable_r1_writes = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: + ctrl_info->enable_r5_writes = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: + ctrl_info->enable_r6_writes = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: + ctrl_info->soft_reset_handshake_supported = + firmware_feature->enabled && + pqi_read_soft_reset_status(ctrl_info); + break; + case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: + ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: + ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_FW_TRIAGE: + ctrl_info->firmware_triage_supported = firmware_feature->enabled; + pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); + break; + case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: + ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: + ctrl_info->multi_lun_device_supported = firmware_feature->enabled; + break; + } + + pqi_firmware_feature_status(ctrl_info, firmware_feature); +} + +static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature) +{ + if (firmware_feature->feature_status) + firmware_feature->feature_status(ctrl_info, firmware_feature); +} + +static DEFINE_MUTEX(pqi_firmware_features_mutex); + +static struct pqi_firmware_feature pqi_firmware_features[] = { + { + .feature_name = "Online Firmware Activation", + .feature_bit = PQI_FIRMWARE_FEATURE_OFA, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "Serial Management Protocol", + .feature_bit = PQI_FIRMWARE_FEATURE_SMP, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "Maximum Known Feature", + .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 0 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 1 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 5 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 6 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 0 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 1 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID 5 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID 6 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "New Soft Reset Handshake", + .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "TMF IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "Firmware Triage", + .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RPL Extended Formats 4 and 5", + .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "Multi-LUN Target", + .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, + .feature_status = pqi_ctrl_update_feature_flags, + }, +}; + +static void pqi_process_firmware_features( + struct pqi_config_table_section_info *section_info) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + struct pqi_config_table_firmware_features *firmware_features; + void __iomem *firmware_features_iomem_addr; + unsigned int i; + unsigned int num_features_supported; + + ctrl_info = section_info->ctrl_info; + firmware_features = section_info->section; + firmware_features_iomem_addr = section_info->section_iomem_addr; + + for (i = 0, num_features_supported = 0; + i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (pqi_is_firmware_feature_supported(firmware_features, + pqi_firmware_features[i].feature_bit)) { + pqi_firmware_features[i].supported = true; + num_features_supported++; + } else { + pqi_firmware_feature_update(ctrl_info, + &pqi_firmware_features[i]); + } + } + + if (num_features_supported == 0) + return; + + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + pqi_request_firmware_feature(firmware_features, + pqi_firmware_features[i].feature_bit); + } + + rc = pqi_enable_firmware_features(ctrl_info, firmware_features, + firmware_features_iomem_addr); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to enable firmware features in PQI configuration table\n"); + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + pqi_firmware_feature_update(ctrl_info, + &pqi_firmware_features[i]); + } + return; + } + + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + if (pqi_is_firmware_feature_enabled(firmware_features, + firmware_features_iomem_addr, + pqi_firmware_features[i].feature_bit)) { + pqi_firmware_features[i].enabled = true; + } + pqi_firmware_feature_update(ctrl_info, + &pqi_firmware_features[i]); + } +} + +static void pqi_init_firmware_features(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + pqi_firmware_features[i].supported = false; + pqi_firmware_features[i].enabled = false; + } +} + +static void pqi_process_firmware_features_section( + struct pqi_config_table_section_info *section_info) +{ + mutex_lock(&pqi_firmware_features_mutex); + pqi_init_firmware_features(); + pqi_process_firmware_features(section_info); + mutex_unlock(&pqi_firmware_features_mutex); +} + +/* + * Reset all controller settings that can be initialized during the processing + * of the PQI Configuration Table. + */ + +static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->heartbeat_counter = NULL; + ctrl_info->soft_reset_status = NULL; + ctrl_info->soft_reset_handshake_supported = false; + ctrl_info->enable_r1_writes = false; + ctrl_info->enable_r5_writes = false; + ctrl_info->enable_r6_writes = false; + ctrl_info->raid_iu_timeout_supported = false; + ctrl_info->tmf_iu_timeout_supported = false; + ctrl_info->firmware_triage_supported = false; + ctrl_info->rpl_extended_format_4_5_supported = false; + ctrl_info->multi_lun_device_supported = false; +} + +static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) +{ + u32 table_length; + u32 section_offset; + bool firmware_feature_section_present; + void __iomem *table_iomem_addr; + struct pqi_config_table *config_table; + struct pqi_config_table_section_header *section; + struct pqi_config_table_section_info section_info; + struct pqi_config_table_section_info feature_section_info = {0}; + + table_length = ctrl_info->config_table_length; + if (table_length == 0) + return 0; + + config_table = kmalloc(table_length, GFP_KERNEL); + if (!config_table) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate memory for PQI configuration table\n"); + return -ENOMEM; + } + + /* + * Copy the config table contents from I/O memory space into the + * temporary buffer. + */ + table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; + memcpy_fromio(config_table, table_iomem_addr, table_length); + + firmware_feature_section_present = false; + section_info.ctrl_info = ctrl_info; + section_offset = get_unaligned_le32(&config_table->first_section_offset); + + while (section_offset) { + section = (void *)config_table + section_offset; + + section_info.section = section; + section_info.section_offset = section_offset; + section_info.section_iomem_addr = table_iomem_addr + section_offset; + + switch (get_unaligned_le16(§ion->section_id)) { + case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: + firmware_feature_section_present = true; + feature_section_info = section_info; + break; + case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: + if (pqi_disable_heartbeat) + dev_warn(&ctrl_info->pci_dev->dev, + "heartbeat disabled by module parameter\n"); + else + ctrl_info->heartbeat_counter = + table_iomem_addr + + section_offset + + offsetof(struct pqi_config_table_heartbeat, + heartbeat_counter); + break; + case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: + ctrl_info->soft_reset_status = + table_iomem_addr + + section_offset + + offsetof(struct pqi_config_table_soft_reset, + soft_reset_status); + break; + } + + section_offset = get_unaligned_le16(§ion->next_section_offset); + } + + /* + * We process the firmware feature section after all other sections + * have been processed so that the feature bit callbacks can take + * into account the settings configured by other sections. + */ + if (firmware_feature_section_present) + pqi_process_firmware_features_section(&feature_section_info); + + kfree(config_table); + + return 0; +} + +/* Switches the controller from PQI mode back into SIS mode. */ + +static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + + pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); + rc = pqi_reset(ctrl_info); + if (rc) + return rc; + rc = sis_reenable_sis_mode(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "re-enabling SIS mode failed with error %d\n", rc); + return rc; + } + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); + + return 0; +} + +/* + * If the controller isn't already in SIS mode, this function forces it into + * SIS mode. + */ + +static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) +{ + if (!sis_is_firmware_running(ctrl_info)) + return -ENXIO; + + if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) + return 0; + + if (sis_is_kernel_up(ctrl_info)) { + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); + return 0; + } + + return pqi_revert_to_sis_mode(ctrl_info); +} + +static void pqi_perform_lockup_action(void) +{ + switch (pqi_lockup_action) { + case PANIC: + panic("FATAL: Smart Family Controller lockup detected"); + break; + case REBOOT: + emergency_restart(); + break; + case NONE: + default: + break; + } +} + +static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + u32 product_id; + + if (reset_devices) { + if (pqi_is_fw_triage_supported(ctrl_info)) { + rc = sis_wait_for_fw_triage_completion(ctrl_info); + if (rc) + return rc; + } + sis_soft_reset(ctrl_info); + ssleep(PQI_POST_RESET_DELAY_SECS); + } else { + rc = pqi_force_sis_mode(ctrl_info); + if (rc) + return rc; + } + + /* + * Wait until the controller is ready to start accepting SIS + * commands. + */ + rc = sis_wait_for_ctrl_ready(ctrl_info); + if (rc) { + if (reset_devices) { + dev_err(&ctrl_info->pci_dev->dev, + "kdump init failed with error %d\n", rc); + pqi_lockup_action = REBOOT; + pqi_perform_lockup_action(); + } + return rc; + } + + /* + * Get the controller properties. This allows us to determine + * whether or not it supports PQI mode. + */ + rc = sis_get_ctrl_properties(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining controller properties\n"); + return rc; + } + + rc = sis_get_pqi_capabilities(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining controller capabilities\n"); + return rc; + } + + product_id = sis_get_product_id(ctrl_info); + ctrl_info->product_id = (u8)product_id; + ctrl_info->product_revision = (u8)(product_id >> 8); + + if (reset_devices) { + if (ctrl_info->max_outstanding_requests > + PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) + ctrl_info->max_outstanding_requests = + PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; + } else { + if (ctrl_info->max_outstanding_requests > + PQI_MAX_OUTSTANDING_REQUESTS) + ctrl_info->max_outstanding_requests = + PQI_MAX_OUTSTANDING_REQUESTS; + } + + pqi_calculate_io_resources(ctrl_info); + + rc = pqi_alloc_error_buffer(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate PQI error buffer\n"); + return rc; + } + + /* + * If the function we are about to call succeeds, the + * controller will transition from legacy SIS mode + * into PQI mode. + */ + rc = sis_init_base_struct_addr(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error initializing PQI mode\n"); + return rc; + } + + /* Wait for the controller to complete the SIS -> PQI transition. */ + rc = pqi_wait_for_pqi_mode_ready(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "transition to PQI mode failed\n"); + return rc; + } + + /* From here on, we are running in PQI mode. */ + ctrl_info->pqi_mode_enabled = true; + pqi_save_ctrl_mode(ctrl_info, PQI_MODE); + + rc = pqi_alloc_admin_queues(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate admin queues\n"); + return rc; + } + + rc = pqi_create_admin_queues(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating admin queues\n"); + return rc; + } + + rc = pqi_report_device_capability(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "obtaining device capability failed\n"); + return rc; + } + + rc = pqi_validate_device_capability(ctrl_info); + if (rc) + return rc; + + pqi_calculate_queue_resources(ctrl_info); + + rc = pqi_enable_msix_interrupts(ctrl_info); + if (rc) + return rc; + + if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { + ctrl_info->max_msix_vectors = + ctrl_info->num_msix_vectors_enabled; + pqi_calculate_queue_resources(ctrl_info); + } + + rc = pqi_alloc_io_resources(ctrl_info); + if (rc) + return rc; + + rc = pqi_alloc_operational_queues(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate operational queues\n"); + return rc; + } + + pqi_init_operational_queues(ctrl_info); + + rc = pqi_create_queues(ctrl_info); + if (rc) + return rc; + + rc = pqi_request_irqs(ctrl_info); + if (rc) + return rc; + + pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); + + ctrl_info->controller_online = true; + + rc = pqi_process_config_table(ctrl_info); + if (rc) + return rc; + + pqi_start_heartbeat_timer(ctrl_info); + + if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { + rc = pqi_get_advanced_raid_bypass_config(ctrl_info); + if (rc) { /* Supported features not returned correctly. */ + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining advanced RAID bypass configuration\n"); + return rc; + } + ctrl_info->ciss_report_log_flags |= + CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; + } + + rc = pqi_enable_events(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error enabling events\n"); + return rc; + } + + /* Register with the SCSI subsystem. */ + rc = pqi_register_scsi(ctrl_info); + if (rc) + return rc; + + rc = pqi_get_ctrl_product_details(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining product details\n"); + return rc; + } + + rc = pqi_get_ctrl_serial_number(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining ctrl serial number\n"); + return rc; + } + + rc = pqi_set_diag_rescan(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error enabling multi-lun rescan\n"); + return rc; + } + + rc = pqi_write_driver_version_to_host_wellness(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error updating host wellness\n"); + return rc; + } + + pqi_schedule_update_time_worker(ctrl_info); + + pqi_scan_scsi_devices(ctrl_info); + + return 0; +} + +static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct pqi_admin_queues *admin_queues; + struct pqi_event_queue *event_queue; + + admin_queues = &ctrl_info->admin_queues; + admin_queues->iq_pi_copy = 0; + admin_queues->oq_ci_copy = 0; + writel(0, admin_queues->oq_pi); + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; + ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; + ctrl_info->queue_groups[i].oq_ci_copy = 0; + + writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); + writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); + writel(0, ctrl_info->queue_groups[i].oq_pi); + } + + event_queue = &ctrl_info->event_queue; + writel(0, event_queue->oq_pi); + event_queue->oq_ci_copy = 0; +} + +static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + + rc = pqi_force_sis_mode(ctrl_info); + if (rc) + return rc; + + /* + * Wait until the controller is ready to start accepting SIS + * commands. + */ + rc = sis_wait_for_ctrl_ready_resume(ctrl_info); + if (rc) + return rc; + + /* + * Get the controller properties. This allows us to determine + * whether or not it supports PQI mode. + */ + rc = sis_get_ctrl_properties(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining controller properties\n"); + return rc; + } + + rc = sis_get_pqi_capabilities(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining controller capabilities\n"); + return rc; + } + + /* + * If the function we are about to call succeeds, the + * controller will transition from legacy SIS mode + * into PQI mode. + */ + rc = sis_init_base_struct_addr(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error initializing PQI mode\n"); + return rc; + } + + /* Wait for the controller to complete the SIS -> PQI transition. */ + rc = pqi_wait_for_pqi_mode_ready(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "transition to PQI mode failed\n"); + return rc; + } + + /* From here on, we are running in PQI mode. */ + ctrl_info->pqi_mode_enabled = true; + pqi_save_ctrl_mode(ctrl_info, PQI_MODE); + + pqi_reinit_queues(ctrl_info); + + rc = pqi_create_admin_queues(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error creating admin queues\n"); + return rc; + } + + rc = pqi_create_queues(ctrl_info); + if (rc) + return rc; + + pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); + + ctrl_info->controller_online = true; + pqi_ctrl_unblock_requests(ctrl_info); + + pqi_ctrl_reset_config(ctrl_info); + + rc = pqi_process_config_table(ctrl_info); + if (rc) + return rc; + + pqi_start_heartbeat_timer(ctrl_info); + + if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { + rc = pqi_get_advanced_raid_bypass_config(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining advanced RAID bypass configuration\n"); + return rc; + } + ctrl_info->ciss_report_log_flags |= + CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; + } + + rc = pqi_enable_events(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error enabling events\n"); + return rc; + } + + rc = pqi_get_ctrl_product_details(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining product details\n"); + return rc; + } + + rc = pqi_set_diag_rescan(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error enabling multi-lun rescan\n"); + return rc; + } + + rc = pqi_write_driver_version_to_host_wellness(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error updating host wellness\n"); + return rc; + } + + if (pqi_ofa_in_progress(ctrl_info)) + pqi_ctrl_unblock_scan(ctrl_info); + + pqi_scan_scsi_devices(ctrl_info); + + return 0; +} + +static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) +{ + int rc; + + rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); + + return pcibios_err_to_errno(rc); +} + +static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + u64 mask; + + rc = pci_enable_device(ctrl_info->pci_dev); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to enable PCI device\n"); + return rc; + } + + if (sizeof(dma_addr_t) > 4) + mask = DMA_BIT_MASK(64); + else + mask = DMA_BIT_MASK(32); + + rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); + goto disable_device; + } + + rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to obtain PCI resources\n"); + goto disable_device; + } + + ctrl_info->iomem_base = ioremap(pci_resource_start( + ctrl_info->pci_dev, 0), + pci_resource_len(ctrl_info->pci_dev, 0)); + if (!ctrl_info->iomem_base) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to map memory for controller registers\n"); + rc = -ENOMEM; + goto release_regions; + } + +#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 + + /* Increase the PCIe completion timeout. */ + rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, + PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to set PCIe completion timeout\n"); + goto release_regions; + } + + /* Enable bus mastering. */ + pci_set_master(ctrl_info->pci_dev); + + ctrl_info->registers = ctrl_info->iomem_base; + ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; + + pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); + + return 0; + +release_regions: + pci_release_regions(ctrl_info->pci_dev); +disable_device: + pci_disable_device(ctrl_info->pci_dev); + + return rc; +} + +static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) +{ + iounmap(ctrl_info->iomem_base); + pci_release_regions(ctrl_info->pci_dev); + if (pci_is_enabled(ctrl_info->pci_dev)) + pci_disable_device(ctrl_info->pci_dev); + pci_set_drvdata(ctrl_info->pci_dev, NULL); +} + +static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), + GFP_KERNEL, numa_node); + if (!ctrl_info) + return NULL; + + mutex_init(&ctrl_info->scan_mutex); + mutex_init(&ctrl_info->lun_reset_mutex); + mutex_init(&ctrl_info->ofa_mutex); + + INIT_LIST_HEAD(&ctrl_info->scsi_device_list); + spin_lock_init(&ctrl_info->scsi_device_list_lock); + + INIT_WORK(&ctrl_info->event_work, pqi_event_worker); + atomic_set(&ctrl_info->num_interrupts, 0); + + INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); + INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); + + timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); + INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); + + INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); + INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); + + sema_init(&ctrl_info->sync_request_sem, + PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); + init_waitqueue_head(&ctrl_info->block_requests_wait); + + ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; + ctrl_info->irq_mode = IRQ_MODE_NONE; + ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; + + ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; + ctrl_info->max_transfer_encrypted_sas_sata = + PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; + ctrl_info->max_transfer_encrypted_nvme = + PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; + ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; + ctrl_info->max_write_raid_1_10_2drive = ~0; + ctrl_info->max_write_raid_1_10_3drive = ~0; + ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; + + return ctrl_info; +} + +static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) +{ + kfree(ctrl_info); +} + +static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) +{ + pqi_free_irqs(ctrl_info); + pqi_disable_msix_interrupts(ctrl_info); +} + +static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) +{ + pqi_free_interrupts(ctrl_info); + if (ctrl_info->queue_memory_base) + dma_free_coherent(&ctrl_info->pci_dev->dev, + ctrl_info->queue_memory_length, + ctrl_info->queue_memory_base, + ctrl_info->queue_memory_base_dma_handle); + if (ctrl_info->admin_queue_memory_base) + dma_free_coherent(&ctrl_info->pci_dev->dev, + ctrl_info->admin_queue_memory_length, + ctrl_info->admin_queue_memory_base, + ctrl_info->admin_queue_memory_base_dma_handle); + pqi_free_all_io_requests(ctrl_info); + if (ctrl_info->error_buffer) + dma_free_coherent(&ctrl_info->pci_dev->dev, + ctrl_info->error_buffer_length, + ctrl_info->error_buffer, + ctrl_info->error_buffer_dma_handle); + if (ctrl_info->iomem_base) + pqi_cleanup_pci_init(ctrl_info); + pqi_free_ctrl_info(ctrl_info); +} + +static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->controller_online = false; + pqi_stop_heartbeat_timer(ctrl_info); + pqi_ctrl_block_requests(ctrl_info); + pqi_cancel_rescan_worker(ctrl_info); + pqi_cancel_update_time_worker(ctrl_info); + if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { + pqi_fail_all_outstanding_requests(ctrl_info); + ctrl_info->pqi_mode_enabled = false; + } + pqi_unregister_scsi(ctrl_info); + if (ctrl_info->pqi_mode_enabled) + pqi_revert_to_sis_mode(ctrl_info); + pqi_free_ctrl_resources(ctrl_info); +} + +static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) +{ + pqi_ctrl_block_scan(ctrl_info); + pqi_scsi_block_requests(ctrl_info); + pqi_ctrl_block_device_reset(ctrl_info); + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_stop_heartbeat_timer(ctrl_info); +} + +static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) +{ + pqi_start_heartbeat_timer(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); + pqi_ctrl_unblock_device_reset(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + pqi_ctrl_unblock_scan(ctrl_info); +} + +static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) +{ + int i; + u32 sg_count; + struct device *dev; + struct pqi_ofa_memory *ofap; + struct pqi_sg_descriptor *mem_descriptor; + dma_addr_t dma_handle; + + ofap = ctrl_info->pqi_ofa_mem_virt_addr; + + sg_count = DIV_ROUND_UP(total_size, chunk_size); + if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) + goto out; + + ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); + if (!ctrl_info->pqi_ofa_chunk_virt_addr) + goto out; + + dev = &ctrl_info->pci_dev->dev; + + for (i = 0; i < sg_count; i++) { + ctrl_info->pqi_ofa_chunk_virt_addr[i] = + dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); + if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) + goto out_free_chunks; + mem_descriptor = &ofap->sg_descriptor[i]; + put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); + put_unaligned_le32(chunk_size, &mem_descriptor->length); + } + + put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); + put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); + put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); + + return 0; + +out_free_chunks: + while (--i >= 0) { + mem_descriptor = &ofap->sg_descriptor[i]; + dma_free_coherent(dev, chunk_size, + ctrl_info->pqi_ofa_chunk_virt_addr[i], + get_unaligned_le64(&mem_descriptor->address)); + } + kfree(ctrl_info->pqi_ofa_chunk_virt_addr); + +out: + return -ENOMEM; +} + +static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) +{ + u32 total_size; + u32 chunk_size; + u32 min_chunk_size; + + if (ctrl_info->ofa_bytes_requested == 0) + return 0; + + total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); + min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); + min_chunk_size = PAGE_ALIGN(min_chunk_size); + + for (chunk_size = total_size; chunk_size >= min_chunk_size;) { + if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) + return 0; + chunk_size /= 2; + chunk_size = PAGE_ALIGN(chunk_size); + } + + return -ENOMEM; +} + +static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) +{ + struct device *dev; + struct pqi_ofa_memory *ofap; + + dev = &ctrl_info->pci_dev->dev; + + ofap = dma_alloc_coherent(dev, sizeof(*ofap), + &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); + if (!ofap) + return; + + ctrl_info->pqi_ofa_mem_virt_addr = ofap; + + if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { + dev_err(dev, + "failed to allocate host buffer for Online Firmware Activation\n"); + dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); + ctrl_info->pqi_ofa_mem_virt_addr = NULL; + return; + } + + put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); + memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); +} + +static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct device *dev; + struct pqi_ofa_memory *ofap; + struct pqi_sg_descriptor *mem_descriptor; + unsigned int num_memory_descriptors; + + ofap = ctrl_info->pqi_ofa_mem_virt_addr; + if (!ofap) + return; + + dev = &ctrl_info->pci_dev->dev; + + if (get_unaligned_le32(&ofap->bytes_allocated) == 0) + goto out; + + mem_descriptor = ofap->sg_descriptor; + num_memory_descriptors = + get_unaligned_le16(&ofap->num_memory_descriptors); + + for (i = 0; i < num_memory_descriptors; i++) { + dma_free_coherent(dev, + get_unaligned_le32(&mem_descriptor[i].length), + ctrl_info->pqi_ofa_chunk_virt_addr[i], + get_unaligned_le64(&mem_descriptor[i].address)); + } + kfree(ctrl_info->pqi_ofa_chunk_virt_addr); + +out: + dma_free_coherent(dev, sizeof(*ofap), ofap, + ctrl_info->pqi_ofa_mem_dma_handle); + ctrl_info->pqi_ofa_mem_virt_addr = NULL; +} + +static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) +{ + u32 buffer_length; + struct pqi_vendor_general_request request; + struct pqi_ofa_memory *ofap; + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, + &request.header.iu_length); + put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, + &request.function_code); + + ofap = ctrl_info->pqi_ofa_mem_virt_addr; + + if (ofap) { + buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + + get_unaligned_le16(&ofap->num_memory_descriptors) * + sizeof(struct pqi_sg_descriptor); + + put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, + &request.data.ofa_memory_allocation.buffer_address); + put_unaligned_le32(buffer_length, + &request.data.ofa_memory_allocation.buffer_length); + } + + return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); +} + +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) +{ + ssleep(delay_secs); + + return pqi_ctrl_init_resume(ctrl_info); +} + +static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { + .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, + .status = SAM_STAT_CHECK_CONDITION, +}; + +static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct pqi_io_request *io_request; + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + + for (i = 0; i < ctrl_info->max_io_slots; i++) { + io_request = &ctrl_info->io_request_pool[i]; + if (atomic_read(&io_request->refcount) == 0) + continue; + + scmd = io_request->scmd; + if (scmd) { + sdev = scmd->device; + if (!sdev || !scsi_device_online(sdev)) { + pqi_free_io_request(io_request); + continue; + } else { + set_host_byte(scmd, DID_NO_CONNECT); + } + } else { + io_request->status = -ENXIO; + io_request->error_info = + &pqi_ctrl_offline_raid_error_info; + } + + io_request->io_complete_callback(io_request, + io_request->context); + } +} + +static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) +{ + pqi_perform_lockup_action(); + pqi_stop_heartbeat_timer(ctrl_info); + pqi_free_interrupts(ctrl_info); + pqi_cancel_rescan_worker(ctrl_info); + pqi_cancel_update_time_worker(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_fail_all_outstanding_requests(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); +} + +static void pqi_ctrl_offline_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); + pqi_take_ctrl_offline_deferred(ctrl_info); +} + +static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) +{ + char *string; + + switch (ctrl_shutdown_reason) { + case PQI_IQ_NOT_DRAINED_TIMEOUT: + string = "inbound queue not drained timeout"; + break; + case PQI_LUN_RESET_TIMEOUT: + string = "LUN reset timeout"; + break; + case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: + string = "I/O pending timeout after LUN reset"; + break; + case PQI_NO_HEARTBEAT: + string = "no controller heartbeat detected"; + break; + case PQI_FIRMWARE_KERNEL_NOT_UP: + string = "firmware kernel not ready"; + break; + case PQI_OFA_RESPONSE_TIMEOUT: + string = "OFA response timeout"; + break; + case PQI_INVALID_REQ_ID: + string = "invalid request ID"; + break; + case PQI_UNMATCHED_REQ_ID: + string = "unmatched request ID"; + break; + case PQI_IO_PI_OUT_OF_RANGE: + string = "I/O queue producer index out of range"; + break; + case PQI_EVENT_PI_OUT_OF_RANGE: + string = "event queue producer index out of range"; + break; + case PQI_UNEXPECTED_IU_TYPE: + string = "unexpected IU type"; + break; + default: + string = "unknown reason"; + break; + } + + return string; +} + +static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) +{ + if (!ctrl_info->controller_online) + return; + + ctrl_info->controller_online = false; + ctrl_info->pqi_mode_enabled = false; + pqi_ctrl_block_requests(ctrl_info); + if (!pqi_disable_ctrl_shutdown) + sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); + pci_disable_device(ctrl_info->pci_dev); + dev_err(&ctrl_info->pci_dev->dev, + "controller offline: reason code 0x%x (%s)\n", + ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); + schedule_work(&ctrl_info->ctrl_offline_work); +} + +static void pqi_print_ctrl_info(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + char *ctrl_description; + + if (id->driver_data) + ctrl_description = (char *)id->driver_data; + else + ctrl_description = "Microchip Smart Family Controller"; + + dev_info(&pci_dev->dev, "%s found\n", ctrl_description); +} + +static int pqi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + int rc; + int node; + struct pqi_ctrl_info *ctrl_info; + + pqi_print_ctrl_info(pci_dev, id); + + if (pqi_disable_device_id_wildcards && + id->subvendor == PCI_ANY_ID && + id->subdevice == PCI_ANY_ID) { + dev_warn(&pci_dev->dev, + "controller not probed because device ID wildcards are disabled\n"); + return -ENODEV; + } + + if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) + dev_warn(&pci_dev->dev, + "controller device ID matched using wildcards\n"); + + node = dev_to_node(&pci_dev->dev); + if (node == NUMA_NO_NODE) { + node = cpu_to_node(0); + if (node == NUMA_NO_NODE) + node = 0; + set_dev_node(&pci_dev->dev, node); + } + + ctrl_info = pqi_alloc_ctrl_info(node); + if (!ctrl_info) { + dev_err(&pci_dev->dev, + "failed to allocate controller info block\n"); + return -ENOMEM; + } + ctrl_info->numa_node = node; + + ctrl_info->pci_dev = pci_dev; + + rc = pqi_pci_init(ctrl_info); + if (rc) + goto error; + + rc = pqi_ctrl_init(ctrl_info); + if (rc) + goto error; + + return 0; + +error: + pqi_remove_ctrl(ctrl_info); + + return rc; +} + +static void pqi_pci_remove(struct pci_dev *pci_dev) +{ + struct pqi_ctrl_info *ctrl_info; + u16 vendor_id; + int rc; + + ctrl_info = pci_get_drvdata(pci_dev); + if (!ctrl_info) + return; + + pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); + if (vendor_id == 0xffff) + ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; + else + ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; + + if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { + rc = pqi_flush_cache(ctrl_info, RESTART); + if (rc) + dev_err(&pci_dev->dev, + "unable to flush controller cache during remove\n"); + } + + pqi_remove_ctrl(ctrl_info); +} + +static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct pqi_io_request *io_request; + struct scsi_cmnd *scmd; + + for (i = 0; i < ctrl_info->max_io_slots; i++) { + io_request = &ctrl_info->io_request_pool[i]; + if (atomic_read(&io_request->refcount) == 0) + continue; + scmd = io_request->scmd; + WARN_ON(scmd != NULL); /* IO command from SML */ + WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ + } +} + +static void pqi_shutdown(struct pci_dev *pci_dev) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + enum bmic_flush_cache_shutdown_event shutdown_event; + + ctrl_info = pci_get_drvdata(pci_dev); + if (!ctrl_info) { + dev_err(&pci_dev->dev, + "cache could not be flushed\n"); + return; + } + + pqi_wait_until_ofa_finished(ctrl_info); + + pqi_scsi_block_requests(ctrl_info); + pqi_ctrl_block_device_reset(ctrl_info); + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + + if (system_state == SYSTEM_RESTART) + shutdown_event = RESTART; + else + shutdown_event = SHUTDOWN; + + /* + * Write all data in the controller's battery-backed cache to + * storage. + */ + rc = pqi_flush_cache(ctrl_info, shutdown_event); + if (rc) + dev_err(&pci_dev->dev, + "unable to flush controller cache during shutdown\n"); + + pqi_crash_if_pending_command(ctrl_info); + pqi_reset(ctrl_info); +} + +static void pqi_process_lockup_action_param(void) +{ + unsigned int i; + + if (!pqi_lockup_action_param) + return; + + for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { + if (strcmp(pqi_lockup_action_param, + pqi_lockup_actions[i].name) == 0) { + pqi_lockup_action = pqi_lockup_actions[i].action; + return; + } + } + + pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", + DRIVER_NAME_SHORT, pqi_lockup_action_param); +} + +#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30 +#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60) + +static void pqi_process_ctrl_ready_timeout_param(void) +{ + if (pqi_ctrl_ready_timeout_secs == 0) + return; + + if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) { + pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n", + DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS); + pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS; + } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) { + pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n", + DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS); + pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS; + } + + sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs; +} + +static void pqi_process_module_params(void) +{ + pqi_process_lockup_action_param(); + pqi_process_ctrl_ready_timeout_param(); +} + +#if defined(CONFIG_PM) + +static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev) +{ + if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) + return RESTART; + + return SUSPEND; +} + +static int pqi_suspend_or_freeze(struct device *dev, bool suspend) +{ + struct pci_dev *pci_dev; + struct pqi_ctrl_info *ctrl_info; + + pci_dev = to_pci_dev(dev); + ctrl_info = pci_get_drvdata(pci_dev); + + pqi_wait_until_ofa_finished(ctrl_info); + + pqi_ctrl_block_scan(ctrl_info); + pqi_scsi_block_requests(ctrl_info); + pqi_ctrl_block_device_reset(ctrl_info); + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + + if (suspend) { + enum bmic_flush_cache_shutdown_event shutdown_event; + + shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); + pqi_flush_cache(ctrl_info, shutdown_event); + } + + pqi_stop_heartbeat_timer(ctrl_info); + pqi_crash_if_pending_command(ctrl_info); + pqi_free_irqs(ctrl_info); + + ctrl_info->controller_online = false; + ctrl_info->pqi_mode_enabled = false; + + return 0; +} + +static __maybe_unused int pqi_suspend(struct device *dev) +{ + return pqi_suspend_or_freeze(dev, true); +} + +static int pqi_resume_or_restore(struct device *dev) +{ + int rc; + struct pci_dev *pci_dev; + struct pqi_ctrl_info *ctrl_info; + + pci_dev = to_pci_dev(dev); + ctrl_info = pci_get_drvdata(pci_dev); + + rc = pqi_request_irqs(ctrl_info); + if (rc) + return rc; + + pqi_ctrl_unblock_device_reset(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + pqi_ctrl_unblock_scan(ctrl_info); + + ssleep(PQI_POST_RESET_DELAY_SECS); + + return pqi_ctrl_init_resume(ctrl_info); +} + +static int pqi_freeze(struct device *dev) +{ + return pqi_suspend_or_freeze(dev, false); +} + +static int pqi_thaw(struct device *dev) +{ + int rc; + struct pci_dev *pci_dev; + struct pqi_ctrl_info *ctrl_info; + + pci_dev = to_pci_dev(dev); + ctrl_info = pci_get_drvdata(pci_dev); + + rc = pqi_request_irqs(ctrl_info); + if (rc) + return rc; + + ctrl_info->controller_online = true; + ctrl_info->pqi_mode_enabled = true; + + pqi_ctrl_unblock_device_reset(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + pqi_ctrl_unblock_scan(ctrl_info); + + return 0; +} + +static int pqi_poweroff(struct device *dev) +{ + struct pci_dev *pci_dev; + struct pqi_ctrl_info *ctrl_info; + enum bmic_flush_cache_shutdown_event shutdown_event; + + pci_dev = to_pci_dev(dev); + ctrl_info = pci_get_drvdata(pci_dev); + + shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); + pqi_flush_cache(ctrl_info, shutdown_event); + + return 0; +} + +static const struct dev_pm_ops pqi_pm_ops = { + .suspend = pqi_suspend, + .resume = pqi_resume_or_restore, + .freeze = pqi_freeze, + .thaw = pqi_thaw, + .poweroff = pqi_poweroff, + .restore = pqi_resume_or_restore, +}; + +#endif /* CONFIG_PM */ + +/* Define the PCI IDs for the controllers that we support. */ +static const struct pci_device_id pqi_pci_id_table[] = { + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x105b, 0x1211) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x105b, 0x1321) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a22) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a23) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a24) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a36) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x152d, 0x8a37) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1104) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1105) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1106) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1107) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1108) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1109) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x110b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x8460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x8461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xc460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xc461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xf460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xf461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0045) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0046) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0047) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0048) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0051) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0052) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0053) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x006b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x006c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x006d) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x006f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0070) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0071) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0072) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0086) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0087) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0088) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0089) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd227) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd228) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd229) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd22a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd22b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd22c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0110) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0608) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0659) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0800) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0801) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0802) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0803) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0804) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0805) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0806) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0807) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0808) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0809) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x080a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0900) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0901) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0902) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0903) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0904) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0905) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0906) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0907) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0908) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x090a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1200) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1201) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1202) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1280) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1281) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1282) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1300) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1301) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1302) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1303) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1304) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1380) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1400) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1402) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1410) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1411) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1412) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1420) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1430) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1440) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1441) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1450) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1452) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1462) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1463) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1470) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1471) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1472) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1473) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1474) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1475) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1480) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1490) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1491) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a2) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a4) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a5) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a6) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14b0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14b1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c2) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c4) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14d0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14e0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14f0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADVANTECH, 0x8312) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_DELL, 0x1fe0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0600) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0601) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0602) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0603) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0609) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0650) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0651) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0652) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0653) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0654) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0655) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0700) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x0701) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x1001) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x1002) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x1100) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x1101) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x0294) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x02db) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x02dc) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x032e) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x036f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x0381) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x0382) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1590, 0x0383) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0800) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0908) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0806) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0916) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_GIGABYTE, 0x1000) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1dfc, 0x3161) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f0c, 0x3161) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0804) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0805) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0806) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x5445) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x5446) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x5447) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x5449) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x544a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x544b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x544d) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x544e) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x544f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54da) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54db) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54dc) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0b27) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0b29) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0b45) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cc4, 0x0101) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cc4, 0x0201) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0220) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0221) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0520) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0522) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0620) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0621) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0622) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0623) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1014, 0x0718) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1000) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1001) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1002) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1005) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1001) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1002) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1003) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1004) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1005) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1006) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1007) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1008) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1009) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_ANY_ID, PCI_ANY_ID) + }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); + +static struct pci_driver pqi_pci_driver = { + .name = DRIVER_NAME_SHORT, + .id_table = pqi_pci_id_table, + .probe = pqi_pci_probe, + .remove = pqi_pci_remove, + .shutdown = pqi_shutdown, +#if defined(CONFIG_PM) + .driver = { + .pm = &pqi_pm_ops + }, +#endif +}; + +static int __init pqi_init(void) +{ + int rc; + + pr_info(DRIVER_NAME "\n"); + pqi_verify_structures(); + sis_verify_structures(); + + pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); + if (!pqi_sas_transport_template) + return -ENODEV; + + pqi_process_module_params(); + + rc = pci_register_driver(&pqi_pci_driver); + if (rc) + sas_release_transport(pqi_sas_transport_template); + + return rc; +} + +static void __exit pqi_cleanup(void) +{ + pci_unregister_driver(&pqi_pci_driver); + sas_release_transport(pqi_sas_transport_template); +} + +module_init(pqi_init); +module_exit(pqi_cleanup); + +static void pqi_verify_structures(void) +{ + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_host_to_ctrl_doorbell) != 0x20); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_interrupt_mask) != 0x34); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_ctrl_to_host_doorbell) != 0x9c); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_ctrl_to_host_doorbell_clear) != 0xa0); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_driver_scratch) != 0xb0); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_product_identifier) != 0xb4); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_firmware_status) != 0xbc); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_ctrl_shutdown_reason_code) != 0xcc); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_mailbox) != 0x1000); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + pqi_registers) != 0x4000); + + BUILD_BUG_ON(offsetof(struct pqi_iu_header, + iu_type) != 0x0); + BUILD_BUG_ON(offsetof(struct pqi_iu_header, + iu_length) != 0x2); + BUILD_BUG_ON(offsetof(struct pqi_iu_header, + response_queue_id) != 0x4); + BUILD_BUG_ON(offsetof(struct pqi_iu_header, + driver_flags) != 0x6); + BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); + + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + status) != 0x0); + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + service_response) != 0x1); + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + data_present) != 0x2); + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + reserved) != 0x3); + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + residual_count) != 0x4); + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + data_length) != 0x8); + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + reserved1) != 0xa); + BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, + data) != 0xc); + BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); + + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + data_in_result) != 0x0); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + data_out_result) != 0x1); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + reserved) != 0x2); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + status) != 0x5); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + status_qualifier) != 0x6); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + sense_data_length) != 0x8); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + response_data_length) != 0xa); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + data_in_transferred) != 0xc); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + data_out_transferred) != 0x10); + BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, + data) != 0x14); + BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); + + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + signature) != 0x0); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + function_and_status_code) != 0x8); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + max_admin_iq_elements) != 0x10); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + max_admin_oq_elements) != 0x11); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_iq_element_length) != 0x12); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_oq_element_length) != 0x13); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + max_reset_timeout) != 0x14); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + legacy_intx_status) != 0x18); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + legacy_intx_mask_set) != 0x1c); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + legacy_intx_mask_clear) != 0x20); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + device_status) != 0x40); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_iq_pi_offset) != 0x48); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_oq_ci_offset) != 0x50); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_iq_element_array_addr) != 0x58); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_oq_element_array_addr) != 0x60); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_iq_ci_addr) != 0x68); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_oq_pi_addr) != 0x70); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_iq_num_elements) != 0x78); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_oq_num_elements) != 0x79); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + admin_queue_int_msg_num) != 0x7a); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + device_error) != 0x80); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + error_details) != 0x88); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + device_reset) != 0x90); + BUILD_BUG_ON(offsetof(struct pqi_device_registers, + power_action) != 0x94); + BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); + + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + header.driver_flags) != 6); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + function_code) != 10); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.report_device_capability.buffer_length) != 44); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.report_device_capability.sg_descriptor) != 48); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_iq.queue_id) != 12); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_iq.element_array_addr) != 16); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_iq.ci_addr) != 24); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_iq.num_elements) != 32); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_iq.element_length) != 34); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_iq.queue_protocol) != 36); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.queue_id) != 12); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.element_array_addr) != 16); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.pi_addr) != 24); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.num_elements) != 32); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.element_length) != 34); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.queue_protocol) != 36); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.int_msg_num) != 40); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.coalescing_count) != 42); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.min_coalescing_time) != 44); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.create_operational_oq.max_coalescing_time) != 48); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, + data.delete_operational_queue.queue_id) != 12); + BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, + data.create_operational_iq) != 64 - 11); + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, + data.create_operational_oq) != 64 - 11); + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, + data.delete_operational_queue) != 64 - 11); + + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + header.driver_flags) != 6); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + function_code) != 10); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + status) != 11); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + data.create_operational_iq.status_descriptor) != 12); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + data.create_operational_iq.iq_pi_offset) != 16); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + data.create_operational_oq.status_descriptor) != 12); + BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, + data.create_operational_oq.oq_ci_offset) != 16); + BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); + + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + header.response_queue_id) != 4); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + header.driver_flags) != 6); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + nexus_id) != 10); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + buffer_length) != 12); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + lun_number) != 16); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + protocol_specific) != 24); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + error_index) != 27); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + cdb) != 32); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + timeout) != 60); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + sg_descriptors) != 64); + BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + header.response_queue_id) != 4); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + header.driver_flags) != 6); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + nexus_id) != 12); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + buffer_length) != 16); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + data_encryption_key_index) != 22); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + encrypt_tweak_lower) != 24); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + encrypt_tweak_upper) != 28); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + cdb) != 32); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + error_index) != 48); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + num_sg_descriptors) != 50); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + cdb_length) != 51); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + lun_number) != 52); + BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, + sg_descriptors) != 64); + BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); + + BUILD_BUG_ON(offsetof(struct pqi_io_response, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_io_response, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_io_response, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_io_response, + error_index) != 10); + + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + header.response_queue_id) != 4); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + data.report_event_configuration.buffer_length) != 12); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + data.report_event_configuration.sg_descriptors) != 16); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + data.set_event_configuration.global_event_oq_id) != 10); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + data.set_event_configuration.buffer_length) != 12); + BUILD_BUG_ON(offsetof(struct pqi_general_management_request, + data.set_event_configuration.sg_descriptors) != 16); + + BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, + max_inbound_iu_length) != 6); + BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, + max_outbound_iu_length) != 14); + BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); + + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + data_length) != 0); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + iq_arbitration_priority_support_bitmask) != 8); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + maximum_aw_a) != 9); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + maximum_aw_b) != 10); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + maximum_aw_c) != 11); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + max_inbound_queues) != 16); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + max_elements_per_iq) != 18); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + max_iq_element_length) != 24); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + min_iq_element_length) != 26); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + max_outbound_queues) != 30); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + max_elements_per_oq) != 32); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + intr_coalescing_time_granularity) != 34); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + max_oq_element_length) != 36); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + min_oq_element_length) != 38); + BUILD_BUG_ON(offsetof(struct pqi_device_capability, + iu_layer_descriptors) != 64); + BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); + + BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, + event_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, + oq_id) != 2); + BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); + + BUILD_BUG_ON(offsetof(struct pqi_event_config, + num_event_descriptors) != 2); + BUILD_BUG_ON(offsetof(struct pqi_event_config, + descriptors) != 4); + + BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != + ARRAY_SIZE(pqi_supported_event_types)); + + BUILD_BUG_ON(offsetof(struct pqi_event_response, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_event_response, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_event_response, + event_type) != 8); + BUILD_BUG_ON(offsetof(struct pqi_event_response, + event_id) != 10); + BUILD_BUG_ON(offsetof(struct pqi_event_response, + additional_event_id) != 12); + BUILD_BUG_ON(offsetof(struct pqi_event_response, + data) != 16); + BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); + + BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, + event_type) != 8); + BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, + event_id) != 10); + BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, + additional_event_id) != 12); + BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); + + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + nexus_id) != 10); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + timeout) != 14); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + lun_number) != 16); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + protocol_specific) != 24); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + outbound_queue_id_to_manage) != 26); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + request_id_to_manage) != 28); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + task_management_function) != 30); + BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); + + BUILD_BUG_ON(offsetof(struct pqi_task_management_response, + header.iu_type) != 0); + BUILD_BUG_ON(offsetof(struct pqi_task_management_response, + header.iu_length) != 2); + BUILD_BUG_ON(offsetof(struct pqi_task_management_response, + request_id) != 8); + BUILD_BUG_ON(offsetof(struct pqi_task_management_response, + nexus_id) != 10); + BUILD_BUG_ON(offsetof(struct pqi_task_management_response, + additional_response_info) != 12); + BUILD_BUG_ON(offsetof(struct pqi_task_management_response, + response_code) != 15); + BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); + + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + configured_logical_drive_count) != 0); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + configuration_signature) != 1); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + firmware_version_short) != 5); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + extended_logical_unit_count) != 154); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + firmware_build_number) != 190); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + vendor_id) != 200); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + product_id) != 208); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + extra_controller_flags) != 286); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + controller_mode) != 292); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + spare_part_number) != 293); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + firmware_version_long) != 325); + + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + phys_bay_in_box) != 115); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + device_type) != 120); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + redundant_path_present_map) != 1736); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + active_path_number) != 1738); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + alternate_paths_phys_connector) != 1739); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + alternate_paths_phys_box_on_port) != 1755); + BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, + current_queue_depth_limit) != 1796); + BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); + + BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, + page_code) != 0); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, + subpage_code) != 1); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, + buffer_length) != 2); + + BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, + page_code) != 0); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, + subpage_code) != 1); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, + page_length) != 2); + + BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) + != 18); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + header) != 0); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + firmware_read_support) != 4); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + driver_read_support) != 5); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + firmware_write_support) != 6); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + driver_write_support) != 7); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_transfer_encrypted_sas_sata) != 8); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_transfer_encrypted_nvme) != 10); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_write_raid_5_6) != 12); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_write_raid_1_10_2drive) != 14); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_write_raid_1_10_3drive) != 16); + + BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); + BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); + BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % + PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); + BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % + PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); + BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); + BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % + PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); + BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); + BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % + PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); + + BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); + BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= + PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); +} diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c new file mode 100644 index 000000000..a981d0377 --- /dev/null +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2016-2018 Microsemi Corporation + * Copyright (c) 2016 PMC-Sierra, Inc. + * + * Questions/Comments/Bugfixes to storagedev@microchip.com + * + */ + +#include +#include +#include +#include +#include +#include +#include "smartpqi.h" + +static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port) +{ + struct pqi_sas_phy *pqi_sas_phy; + struct sas_phy *phy; + + pqi_sas_phy = kzalloc(sizeof(*pqi_sas_phy), GFP_KERNEL); + if (!pqi_sas_phy) + return NULL; + + phy = sas_phy_alloc(pqi_sas_port->parent_node->parent_dev, + pqi_sas_port->next_phy_index); + if (!phy) { + kfree(pqi_sas_phy); + return NULL; + } + + pqi_sas_port->next_phy_index++; + pqi_sas_phy->phy = phy; + pqi_sas_phy->parent_port = pqi_sas_port; + + return pqi_sas_phy; +} + +static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy) +{ + struct sas_phy *phy = pqi_sas_phy->phy; + + sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy); + if (pqi_sas_phy->added_to_port) + list_del(&pqi_sas_phy->phy_list_entry); + sas_phy_delete(phy); + kfree(pqi_sas_phy); +} + +static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy) +{ + int rc; + struct pqi_sas_port *pqi_sas_port; + struct sas_phy *phy; + struct sas_identify *identify; + + pqi_sas_port = pqi_sas_phy->parent_port; + phy = pqi_sas_phy->phy; + + identify = &phy->identify; + memset(identify, 0, sizeof(*identify)); + identify->sas_address = pqi_sas_port->sas_address; + identify->device_type = SAS_END_DEVICE; + identify->initiator_port_protocols = SAS_PROTOCOL_ALL; + identify->target_port_protocols = SAS_PROTOCOL_ALL; + phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + + rc = sas_phy_add(pqi_sas_phy->phy); + if (rc) + return rc; + + sas_port_add_phy(pqi_sas_port->port, pqi_sas_phy->phy); + list_add_tail(&pqi_sas_phy->phy_list_entry, + &pqi_sas_port->phy_list_head); + pqi_sas_phy->added_to_port = true; + + return 0; +} + +static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port, + struct sas_rphy *rphy) +{ + struct sas_identify *identify; + + identify = &rphy->identify; + identify->sas_address = pqi_sas_port->sas_address; + identify->phy_identifier = pqi_sas_port->device->phy_id; + + identify->initiator_port_protocols = SAS_PROTOCOL_ALL; + identify->target_port_protocols = SAS_PROTOCOL_STP; + + switch (pqi_sas_port->device->device_type) { + case SA_DEVICE_TYPE_SAS: + case SA_DEVICE_TYPE_SES: + case SA_DEVICE_TYPE_NVME: + identify->target_port_protocols = SAS_PROTOCOL_SSP; + break; + case SA_DEVICE_TYPE_EXPANDER_SMP: + identify->target_port_protocols = SAS_PROTOCOL_SMP; + break; + case SA_DEVICE_TYPE_SATA: + default: + break; + } + + return sas_rphy_add(rphy); +} + +static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port) +{ + if (pqi_sas_port->device && pqi_sas_port->device->is_expander_smp_device) + return sas_expander_alloc(pqi_sas_port->port, + SAS_FANOUT_EXPANDER_DEVICE); + + return sas_end_device_alloc(pqi_sas_port->port); +} + +static struct pqi_sas_port *pqi_alloc_sas_port( + struct pqi_sas_node *pqi_sas_node, u64 sas_address, + struct pqi_scsi_dev *device) +{ + int rc; + struct pqi_sas_port *pqi_sas_port; + struct sas_port *port; + + pqi_sas_port = kzalloc(sizeof(*pqi_sas_port), GFP_KERNEL); + if (!pqi_sas_port) + return NULL; + + INIT_LIST_HEAD(&pqi_sas_port->phy_list_head); + pqi_sas_port->parent_node = pqi_sas_node; + + port = sas_port_alloc_num(pqi_sas_node->parent_dev); + if (!port) + goto free_pqi_port; + + rc = sas_port_add(port); + if (rc) + goto free_sas_port; + + pqi_sas_port->port = port; + pqi_sas_port->sas_address = sas_address; + pqi_sas_port->device = device; + list_add_tail(&pqi_sas_port->port_list_entry, + &pqi_sas_node->port_list_head); + + return pqi_sas_port; + +free_sas_port: + sas_port_free(port); +free_pqi_port: + kfree(pqi_sas_port); + + return NULL; +} + +static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port) +{ + struct pqi_sas_phy *pqi_sas_phy; + struct pqi_sas_phy *next; + + list_for_each_entry_safe(pqi_sas_phy, next, + &pqi_sas_port->phy_list_head, phy_list_entry) + pqi_free_sas_phy(pqi_sas_phy); + + sas_port_delete(pqi_sas_port->port); + list_del(&pqi_sas_port->port_list_entry); + kfree(pqi_sas_port); +} + +static struct pqi_sas_node *pqi_alloc_sas_node(struct device *parent_dev) +{ + struct pqi_sas_node *pqi_sas_node; + + pqi_sas_node = kzalloc(sizeof(*pqi_sas_node), GFP_KERNEL); + if (pqi_sas_node) { + pqi_sas_node->parent_dev = parent_dev; + INIT_LIST_HEAD(&pqi_sas_node->port_list_head); + } + + return pqi_sas_node; +} + +static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node) +{ + struct pqi_sas_port *pqi_sas_port; + struct pqi_sas_port *next; + + if (!pqi_sas_node) + return; + + list_for_each_entry_safe(pqi_sas_port, next, + &pqi_sas_node->port_list_head, port_list_entry) + pqi_free_sas_port(pqi_sas_port); + + kfree(pqi_sas_node); +} + +struct pqi_scsi_dev *pqi_find_device_by_sas_rphy( + struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy) +{ + struct pqi_scsi_dev *device; + + list_for_each_entry(device, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { + if (!device->sas_port) + continue; + if (device->sas_port->rphy == rphy) + return device; + } + + return NULL; +} + +int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct device *parent_dev; + struct pqi_sas_node *pqi_sas_node; + struct pqi_sas_port *pqi_sas_port; + struct pqi_sas_phy *pqi_sas_phy; + + parent_dev = &shost->shost_dev; + + pqi_sas_node = pqi_alloc_sas_node(parent_dev); + if (!pqi_sas_node) + return -ENOMEM; + + pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, + ctrl_info->sas_address, NULL); + if (!pqi_sas_port) { + rc = -ENODEV; + goto free_sas_node; + } + + pqi_sas_phy = pqi_alloc_sas_phy(pqi_sas_port); + if (!pqi_sas_phy) { + rc = -ENODEV; + goto free_sas_port; + } + + rc = pqi_sas_port_add_phy(pqi_sas_phy); + if (rc) + goto free_sas_phy; + + ctrl_info->sas_host = pqi_sas_node; + + return 0; + +free_sas_phy: + pqi_free_sas_phy(pqi_sas_phy); +free_sas_port: + pqi_free_sas_port(pqi_sas_port); +free_sas_node: + pqi_free_sas_node(pqi_sas_node); + + return rc; +} + +void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info) +{ + pqi_free_sas_node(ctrl_info->sas_host); +} + +int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, + struct pqi_scsi_dev *device) +{ + int rc; + struct pqi_sas_port *pqi_sas_port; + struct sas_rphy *rphy; + + pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, + device->sas_address, device); + if (!pqi_sas_port) + return -ENOMEM; + + rphy = pqi_sas_rphy_alloc(pqi_sas_port); + if (!rphy) { + rc = -ENODEV; + goto free_sas_port; + } + + pqi_sas_port->rphy = rphy; + device->sas_port = pqi_sas_port; + + rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy); + if (rc) + goto free_sas_rphy; + + return 0; + +free_sas_rphy: + sas_rphy_free(rphy); +free_sas_port: + pqi_free_sas_port(pqi_sas_port); + device->sas_port = NULL; + + return rc; +} + +void pqi_remove_sas_device(struct pqi_scsi_dev *device) +{ + if (device->sas_port) { + pqi_free_sas_port(device->sas_port); + device->sas_port = NULL; + } +} + +static int pqi_sas_get_linkerrors(struct sas_phy *phy) +{ + return 0; +} + +static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy, + u64 *identifier) +{ + int rc; + unsigned long flags; + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *found_device; + struct pqi_scsi_dev *device; + + if (!rphy) + return -ENODEV; + + shost = rphy_to_shost(rphy); + ctrl_info = shost_to_hba(shost); + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + found_device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); + + if (!found_device) { + rc = -ENODEV; + goto out; + } + + if (found_device->devtype == TYPE_ENCLOSURE) { + *identifier = get_unaligned_be64(&found_device->wwid[8]); + rc = 0; + goto out; + } + + if (found_device->box_index == 0xff || + found_device->phys_box_on_bus == 0 || + found_device->bay == 0xff) { + rc = -EINVAL; + goto out; + } + + list_for_each_entry(device, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { + if (device->devtype == TYPE_ENCLOSURE && + device->box_index == found_device->box_index && + device->phys_box_on_bus == + found_device->phys_box_on_bus && + memcmp(device->phys_connector, + found_device->phys_connector, 2) == 0) { + *identifier = + get_unaligned_be64(&device->wwid[8]); + rc = 0; + goto out; + } + } + + if (found_device->phy_connected_dev_type != SA_DEVICE_TYPE_CONTROLLER) { + rc = -EINVAL; + goto out; + } + + list_for_each_entry(device, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { + if (device->devtype == TYPE_ENCLOSURE && + CISS_GET_DRIVE_NUMBER(device->scsi3addr) == + PQI_VSEP_CISS_BTL) { + *identifier = get_unaligned_be64(&device->wwid[8]); + rc = 0; + goto out; + } + } + + rc = -EINVAL; +out: + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return rc; +} + +static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy) +{ + int rc; + unsigned long flags; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + struct Scsi_Host *shost; + + if (!rphy) + return -ENODEV; + + shost = rphy_to_shost(rphy); + ctrl_info = shost_to_hba(shost); + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); + + if (!device) { + rc = -ENODEV; + goto out; + } + + if (device->bay == 0xff) + rc = -EINVAL; + else + rc = device->bay; + +out: + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return rc; +} + +static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset) +{ + return 0; +} + +static int pqi_sas_phy_enable(struct sas_phy *phy, int enable) +{ + return 0; +} + +static int pqi_sas_phy_setup(struct sas_phy *phy) +{ + return 0; +} + +static void pqi_sas_phy_release(struct sas_phy *phy) +{ +} + +static int pqi_sas_phy_speed(struct sas_phy *phy, + struct sas_phy_linkrates *rates) +{ + return -EINVAL; +} + +#define CSMI_IOCTL_TIMEOUT 60 +#define SMP_CRC_FIELD_LENGTH 4 + +static struct bmic_csmi_smp_passthru_buffer * +pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy, + struct bsg_job *job) +{ + struct bmic_csmi_smp_passthru_buffer *smp_buf; + struct bmic_csmi_ioctl_header *ioctl_header; + struct bmic_csmi_smp_passthru *parameters; + u32 req_size; + u32 resp_size; + + smp_buf = kzalloc(sizeof(*smp_buf), GFP_KERNEL); + if (!smp_buf) + return NULL; + + req_size = job->request_payload.payload_len; + resp_size = job->reply_payload.payload_len; + + ioctl_header = &smp_buf->ioctl_header; + put_unaligned_le32(sizeof(smp_buf->ioctl_header), + &ioctl_header->header_length); + put_unaligned_le32(CSMI_IOCTL_TIMEOUT, &ioctl_header->timeout); + put_unaligned_le32(CSMI_CC_SAS_SMP_PASSTHRU, + &ioctl_header->control_code); + put_unaligned_le32(sizeof(smp_buf->parameters), &ioctl_header->length); + + parameters = &smp_buf->parameters; + parameters->phy_identifier = rphy->identify.phy_identifier; + parameters->port_identifier = 0; + parameters->connection_rate = 0; + put_unaligned_be64(rphy->identify.sas_address, + ¶meters->destination_sas_address); + + if (req_size > SMP_CRC_FIELD_LENGTH) + req_size -= SMP_CRC_FIELD_LENGTH; + + put_unaligned_le32(req_size, ¶meters->request_length); + put_unaligned_le32(resp_size, ¶meters->response_length); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->reply_payload.sg_cnt, ¶meters->request, + req_size); + + return smp_buf; +} + +static unsigned int pqi_build_sas_smp_handler_reply( + struct bmic_csmi_smp_passthru_buffer *smp_buf, struct bsg_job *job, + struct pqi_raid_error_info *error_info) +{ + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, &smp_buf->parameters.response, + le32_to_cpu(smp_buf->parameters.response_length)); + + job->reply_len = le16_to_cpu(error_info->sense_data_length); + memcpy(job->reply, error_info->data, + le16_to_cpu(error_info->sense_data_length)); + + return job->reply_payload.payload_len - + get_unaligned_le32(&error_info->data_in_transferred); +} + +void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + struct bmic_csmi_smp_passthru_buffer *smp_buf; + struct pqi_raid_error_info error_info; + unsigned int reslen = 0; + + ctrl_info = shost_to_hba(shost); + + if (job->reply_payload.payload_len == 0) { + rc = -ENOMEM; + goto out; + } + + if (!rphy) { + rc = -EINVAL; + goto out; + } + + if (rphy->identify.device_type != SAS_FANOUT_EXPANDER_DEVICE) { + rc = -EINVAL; + goto out; + } + + if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) { + rc = -EINVAL; + goto out; + } + + smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job); + if (!smp_buf) { + rc = -ENOMEM; + goto out; + } + + rc = pqi_csmi_smp_passthru(ctrl_info, smp_buf, sizeof(*smp_buf), + &error_info); + if (rc) + goto out; + + reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info); + +out: + bsg_job_done(job, rc, reslen); +} +struct sas_function_template pqi_sas_transport_functions = { + .get_linkerrors = pqi_sas_get_linkerrors, + .get_enclosure_identifier = pqi_sas_get_enclosure_identifier, + .get_bay_identifier = pqi_sas_get_bay_identifier, + .phy_reset = pqi_sas_phy_reset, + .phy_enable = pqi_sas_phy_enable, + .phy_setup = pqi_sas_phy_setup, + .phy_release = pqi_sas_phy_release, + .set_phy_speed = pqi_sas_phy_speed, + .smp_handler = pqi_sas_smp_handler, +}; diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c new file mode 100644 index 000000000..673437c71 --- /dev/null +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2016-2018 Microsemi Corporation + * Copyright (c) 2016 PMC-Sierra, Inc. + * + * Questions/Comments/Bugfixes to storagedev@microchip.com + * + */ + +#include +#include +#include +#include +#include +#include +#include "smartpqi.h" +#include "smartpqi_sis.h" + +/* legacy SIS interface commands */ +#define SIS_CMD_GET_ADAPTER_PROPERTIES 0x19 +#define SIS_CMD_INIT_BASE_STRUCT_ADDRESS 0x1b +#define SIS_CMD_GET_PQI_CAPABILITIES 0x3000 + +/* for submission of legacy SIS commands */ +#define SIS_REENABLE_SIS_MODE 0x1 +#define SIS_ENABLE_MSIX 0x40 +#define SIS_ENABLE_INTX 0x80 +#define SIS_SOFT_RESET 0x100 +#define SIS_CMD_READY 0x200 +#define SIS_TRIGGER_SHUTDOWN 0x800000 +#define SIS_PQI_RESET_QUIESCE 0x1000000 + +#define SIS_CMD_COMPLETE 0x1000 +#define SIS_CLEAR_CTRL_TO_HOST_DOORBELL 0x1000 + +#define SIS_CMD_STATUS_SUCCESS 0x1 +#define SIS_CMD_COMPLETE_TIMEOUT_SECS 30 +#define SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS 10 + +/* used with SIS_CMD_GET_ADAPTER_PROPERTIES command */ +#define SIS_EXTENDED_PROPERTIES_SUPPORTED 0x800000 +#define SIS_SMARTARRAY_FEATURES_SUPPORTED 0x2 +#define SIS_PQI_MODE_SUPPORTED 0x4 +#define SIS_PQI_RESET_QUIESCE_SUPPORTED 0x8 +#define SIS_REQUIRED_EXTENDED_PROPERTIES \ + (SIS_SMARTARRAY_FEATURES_SUPPORTED | SIS_PQI_MODE_SUPPORTED) + +/* used with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */ +#define SIS_BASE_STRUCT_REVISION 9 +#define SIS_BASE_STRUCT_ALIGNMENT 16 + +#define SIS_CTRL_KERNEL_FW_TRIAGE 0x3 +#define SIS_CTRL_KERNEL_UP 0x80 +#define SIS_CTRL_KERNEL_PANIC 0x100 +#define SIS_CTRL_READY_TIMEOUT_SECS 180 +#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90 +#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10 + +enum sis_fw_triage_status { + FW_TRIAGE_NOT_STARTED = 0, + FW_TRIAGE_STARTED, + FW_TRIAGE_COND_INVALID, + FW_TRIAGE_COMPLETED +}; + +#pragma pack(1) + +/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */ +struct sis_base_struct { + __le32 revision; /* revision of this structure */ + __le32 flags; /* reserved */ + __le32 error_buffer_paddr_low; /* lower 32 bits of physical memory */ + /* buffer for PQI error response */ + /* data */ + __le32 error_buffer_paddr_high; /* upper 32 bits of physical */ + /* memory buffer for PQI */ + /* error response data */ + __le32 error_buffer_element_length; /* length of each PQI error */ + /* response buffer element */ + /* in bytes */ + __le32 error_buffer_num_elements; /* total number of PQI error */ + /* response buffers available */ +}; + +#pragma pack() + +unsigned int sis_ctrl_ready_timeout_secs = SIS_CTRL_READY_TIMEOUT_SECS; + +static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info, + unsigned int timeout_secs) +{ + unsigned long timeout; + u32 status; + + timeout = (timeout_secs * HZ) + jiffies; + + while (1) { + status = readl(&ctrl_info->registers->sis_firmware_status); + if (status != ~0) { + if (status & SIS_CTRL_KERNEL_PANIC) { + dev_err(&ctrl_info->pci_dev->dev, + "controller is offline: status code 0x%x\n", + readl( + &ctrl_info->registers->sis_mailbox[7])); + return -ENODEV; + } + if (status & SIS_CTRL_KERNEL_UP) + break; + } + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "controller not ready after %u seconds\n", + timeout_secs); + return -ETIMEDOUT; + } + msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS); + } + + return 0; +} + +int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info) +{ + return sis_wait_for_ctrl_ready_with_timeout(ctrl_info, + sis_ctrl_ready_timeout_secs); +} + +int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info) +{ + return sis_wait_for_ctrl_ready_with_timeout(ctrl_info, + SIS_CTRL_READY_RESUME_TIMEOUT_SECS); +} + +bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info) +{ + bool running; + u32 status; + + status = readl(&ctrl_info->registers->sis_firmware_status); + + if (status != ~0 && (status & SIS_CTRL_KERNEL_PANIC)) + running = false; + else + running = true; + + if (!running) + dev_err(&ctrl_info->pci_dev->dev, + "controller is offline: status code 0x%x\n", + readl(&ctrl_info->registers->sis_mailbox[7])); + + return running; +} + +bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info) +{ + return readl(&ctrl_info->registers->sis_firmware_status) & + SIS_CTRL_KERNEL_UP; +} + +u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info) +{ + return readl(&ctrl_info->registers->sis_product_identifier); +} + +/* used for passing command parameters/results when issuing SIS commands */ +struct sis_sync_cmd_params { + u32 mailbox[6]; /* mailboxes 0-5 */ +}; + +static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info, + u32 cmd, struct sis_sync_cmd_params *params) +{ + struct pqi_ctrl_registers __iomem *registers; + unsigned int i; + unsigned long timeout; + u32 doorbell; + u32 cmd_status; + + registers = ctrl_info->registers; + + /* Write the command to mailbox 0. */ + writel(cmd, ®isters->sis_mailbox[0]); + + /* + * Write the command parameters to mailboxes 1-4 (mailbox 5 is not used + * when sending a command to the controller). + */ + for (i = 1; i <= 4; i++) + writel(params->mailbox[i], ®isters->sis_mailbox[i]); + + /* Clear the command doorbell. */ + writel(SIS_CLEAR_CTRL_TO_HOST_DOORBELL, + ®isters->sis_ctrl_to_host_doorbell_clear); + + /* Disable doorbell interrupts by masking all interrupts. */ + writel(~0, ®isters->sis_interrupt_mask); + usleep_range(1000, 2000); + + /* + * Force the completion of the interrupt mask register write before + * submitting the command. + */ + readl(®isters->sis_interrupt_mask); + + /* Submit the command to the controller. */ + writel(SIS_CMD_READY, ®isters->sis_host_to_ctrl_doorbell); + + /* + * Poll for command completion. Note that the call to msleep() is at + * the top of the loop in order to give the controller time to start + * processing the command before we start polling. + */ + timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies; + while (1) { + msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS); + doorbell = readl(®isters->sis_ctrl_to_host_doorbell); + if (doorbell & SIS_CMD_COMPLETE) + break; + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + } + + /* Read the command status from mailbox 0. */ + cmd_status = readl(®isters->sis_mailbox[0]); + if (cmd_status != SIS_CMD_STATUS_SUCCESS) { + dev_err(&ctrl_info->pci_dev->dev, + "SIS command failed for command 0x%x: status = 0x%x\n", + cmd, cmd_status); + return -EINVAL; + } + + /* + * The command completed successfully, so save the command status and + * read the values returned in mailboxes 1-5. + */ + params->mailbox[0] = cmd_status; + for (i = 1; i < ARRAY_SIZE(params->mailbox); i++) + params->mailbox[i] = readl(®isters->sis_mailbox[i]); + + return 0; +} + +/* + * This function verifies that we are talking to a controller that speaks PQI. + */ + +int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + u32 properties; + u32 extended_properties; + struct sis_sync_cmd_params params; + + memset(¶ms, 0, sizeof(params)); + + rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_ADAPTER_PROPERTIES, + ¶ms); + if (rc) + return rc; + + properties = params.mailbox[1]; + + if (!(properties & SIS_EXTENDED_PROPERTIES_SUPPORTED)) + return -ENODEV; + + extended_properties = params.mailbox[4]; + + if ((extended_properties & SIS_REQUIRED_EXTENDED_PROPERTIES) != + SIS_REQUIRED_EXTENDED_PROPERTIES) + return -ENODEV; + + if (extended_properties & SIS_PQI_RESET_QUIESCE_SUPPORTED) + ctrl_info->pqi_reset_quiesce_supported = true; + + return 0; +} + +int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct sis_sync_cmd_params params; + + memset(¶ms, 0, sizeof(params)); + + rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_PQI_CAPABILITIES, + ¶ms); + if (rc) + return rc; + + ctrl_info->max_sg_entries = params.mailbox[1]; + ctrl_info->max_transfer_size = params.mailbox[2]; + ctrl_info->max_outstanding_requests = params.mailbox[3]; + ctrl_info->config_table_offset = params.mailbox[4]; + ctrl_info->config_table_length = params.mailbox[5]; + + return 0; +} + +int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + void *base_struct_unaligned; + struct sis_base_struct *base_struct; + struct sis_sync_cmd_params params; + unsigned long error_buffer_paddr; + dma_addr_t bus_address; + + base_struct_unaligned = kzalloc(sizeof(*base_struct) + + SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL); + if (!base_struct_unaligned) + return -ENOMEM; + + base_struct = PTR_ALIGN(base_struct_unaligned, + SIS_BASE_STRUCT_ALIGNMENT); + error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle; + + put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision); + put_unaligned_le32(lower_32_bits(error_buffer_paddr), + &base_struct->error_buffer_paddr_low); + put_unaligned_le32(upper_32_bits(error_buffer_paddr), + &base_struct->error_buffer_paddr_high); + put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH, + &base_struct->error_buffer_element_length); + put_unaligned_le32(ctrl_info->max_io_slots, + &base_struct->error_buffer_num_elements); + + bus_address = dma_map_single(&ctrl_info->pci_dev->dev, base_struct, + sizeof(*base_struct), DMA_TO_DEVICE); + if (dma_mapping_error(&ctrl_info->pci_dev->dev, bus_address)) { + rc = -ENOMEM; + goto out; + } + + memset(¶ms, 0, sizeof(params)); + params.mailbox[1] = lower_32_bits((u64)bus_address); + params.mailbox[2] = upper_32_bits((u64)bus_address); + params.mailbox[3] = sizeof(*base_struct); + + rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS, + ¶ms); + + dma_unmap_single(&ctrl_info->pci_dev->dev, bus_address, + sizeof(*base_struct), DMA_TO_DEVICE); +out: + kfree(base_struct_unaligned); + + return rc; +} + +#define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS 30 + +static int sis_wait_for_doorbell_bit_to_clear( + struct pqi_ctrl_info *ctrl_info, u32 bit) +{ + int rc = 0; + u32 doorbell_register; + unsigned long timeout; + + timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies; + + while (1) { + doorbell_register = + readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); + if ((doorbell_register & bit) == 0) + break; + if (readl(&ctrl_info->registers->sis_firmware_status) & + SIS_CTRL_KERNEL_PANIC) { + rc = -ENODEV; + break; + } + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "doorbell register bit 0x%x not cleared\n", + bit); + rc = -ETIMEDOUT; + break; + } + usleep_range(1000, 2000); + } + + return rc; +} + +static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit) +{ + writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell); + usleep_range(1000, 2000); + + return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit); +} + +void sis_enable_msix(struct pqi_ctrl_info *ctrl_info) +{ + sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_MSIX); +} + +void sis_enable_intx(struct pqi_ctrl_info *ctrl_info) +{ + sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_INTX); +} + +void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info, + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) +{ + if (readl(&ctrl_info->registers->sis_firmware_status) & + SIS_CTRL_KERNEL_PANIC) + return; + + if (ctrl_info->firmware_triage_supported) + writel(ctrl_shutdown_reason, &ctrl_info->registers->sis_ctrl_shutdown_reason_code); + + writel(SIS_TRIGGER_SHUTDOWN, &ctrl_info->registers->sis_host_to_ctrl_doorbell); +} + +int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info) +{ + return sis_set_doorbell_bit(ctrl_info, SIS_PQI_RESET_QUIESCE); +} + +int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info) +{ + return sis_set_doorbell_bit(ctrl_info, SIS_REENABLE_SIS_MODE); +} + +void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value) +{ + writel(value, &ctrl_info->registers->sis_driver_scratch); + usleep_range(1000, 2000); +} + +u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info) +{ + return readl(&ctrl_info->registers->sis_driver_scratch); +} + +static inline enum sis_fw_triage_status + sis_read_firmware_triage_status(struct pqi_ctrl_info *ctrl_info) +{ + return ((enum sis_fw_triage_status)(readl(&ctrl_info->registers->sis_firmware_status) & + SIS_CTRL_KERNEL_FW_TRIAGE)); +} + +void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) +{ + writel(SIS_SOFT_RESET, + &ctrl_info->registers->sis_host_to_ctrl_doorbell); +} + +#define SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS 300 +#define SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS 1 + +int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + enum sis_fw_triage_status status; + unsigned long timeout; + + timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * HZ) + jiffies; + while (1) { + status = sis_read_firmware_triage_status(ctrl_info); + if (status == FW_TRIAGE_COND_INVALID) { + dev_err(&ctrl_info->pci_dev->dev, + "firmware triage condition invalid\n"); + rc = -EINVAL; + break; + } else if (status == FW_TRIAGE_NOT_STARTED || + status == FW_TRIAGE_COMPLETED) { + rc = 0; + break; + } + + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for firmware triage status\n"); + rc = -ETIMEDOUT; + break; + } + + ssleep(SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS); + } + + return rc; +} + +void sis_verify_structures(void) +{ + BUILD_BUG_ON(offsetof(struct sis_base_struct, + revision) != 0x0); + BUILD_BUG_ON(offsetof(struct sis_base_struct, + flags) != 0x4); + BUILD_BUG_ON(offsetof(struct sis_base_struct, + error_buffer_paddr_low) != 0x8); + BUILD_BUG_ON(offsetof(struct sis_base_struct, + error_buffer_paddr_high) != 0xc); + BUILD_BUG_ON(offsetof(struct sis_base_struct, + error_buffer_element_length) != 0x10); + BUILD_BUG_ON(offsetof(struct sis_base_struct, + error_buffer_num_elements) != 0x14); + BUILD_BUG_ON(sizeof(struct sis_base_struct) != 0x18); +} diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h new file mode 100644 index 000000000..0c97626d8 --- /dev/null +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2016-2018 Microsemi Corporation + * Copyright (c) 2016 PMC-Sierra, Inc. + * + * Questions/Comments/Bugfixes to storagedev@microchip.com + * + */ + +#if !defined(_SMARTPQI_SIS_H) +#define _SMARTPQI_SIS_H + +void sis_verify_structures(void); +int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info); +int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info); +bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info); +bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info); +int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info); +int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info); +int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info); +void sis_enable_msix(struct pqi_ctrl_info *ctrl_info); +void sis_enable_intx(struct pqi_ctrl_info *ctrl_info); +void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info, + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); +int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info); +int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); +void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); +u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); +void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); +u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info); +int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info); + +extern unsigned int sis_ctrl_ready_timeout_secs; + +#endif /* _SMARTPQI_SIS_H */ diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c new file mode 100644 index 000000000..678651b9b --- /dev/null +++ b/drivers/scsi/sni_53c710.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* SNI RM driver + * + * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com +**----------------------------------------------------------------------------- +** +** +**----------------------------------------------------------------------------- + */ + +/* + * Based on lasi700.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "53c700.h" + +MODULE_AUTHOR("Thomas Bogendörfer"); +MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:snirm_53c710"); + +#define SNIRM710_CLOCK 32 + +static struct scsi_host_template snirm710_template = { + .name = "SNI RM SCSI 53c710", + .proc_name = "snirm_53c710", + .this_id = 7, + .module = THIS_MODULE, +}; + +static int snirm710_probe(struct platform_device *dev) +{ + unsigned long base; + struct NCR_700_Host_Parameters *hostdata; + struct Scsi_Host *host; + struct resource *res; + int rc; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + base = res->start; + hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); + if (!hostdata) + return -ENOMEM; + + hostdata->dev = &dev->dev; + dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); + hostdata->base = ioremap(base, 0x100); + hostdata->differential = 0; + + hostdata->clock = SNIRM710_CLOCK; + hostdata->force_le_on_be = 1; + hostdata->chip710 = 1; + hostdata->burst_length = 4; + + host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev); + if (!host) + goto out_kfree; + host->this_id = 7; + host->base = base; + host->irq = rc = platform_get_irq(dev, 0); + if (rc < 0) + goto out_put_host; + if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) { + printk(KERN_ERR "snirm710: request_irq failed!\n"); + goto out_put_host; + } + + dev_set_drvdata(&dev->dev, host); + scsi_scan_host(host); + + return 0; + + out_put_host: + scsi_host_put(host); + out_kfree: + iounmap(hostdata->base); + kfree(hostdata); + return -ENODEV; +} + +static int snirm710_driver_remove(struct platform_device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(&dev->dev); + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + + scsi_remove_host(host); + NCR_700_release(host); + free_irq(host->irq, host); + iounmap(hostdata->base); + kfree(hostdata); + + return 0; +} + +static struct platform_driver snirm710_driver = { + .probe = snirm710_probe, + .remove = snirm710_driver_remove, + .driver = { + .name = "snirm_53c710", + }, +}; +module_platform_driver(snirm710_driver); diff --git a/drivers/scsi/snic/Makefile b/drivers/scsi/snic/Makefile new file mode 100644 index 000000000..41546e3cb --- /dev/null +++ b/drivers/scsi/snic/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_SCSI_SNIC) += snic.o + +snic-y := \ + snic_attrs.o \ + snic_main.o \ + snic_res.o \ + snic_isr.o \ + snic_ctl.o \ + snic_io.o \ + snic_scsi.o \ + snic_disc.o \ + vnic_cq.o \ + vnic_intr.o \ + vnic_dev.o \ + vnic_wq.o + +snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o diff --git a/drivers/scsi/snic/cq_desc.h b/drivers/scsi/snic/cq_desc.h new file mode 100644 index 000000000..52a916fd0 --- /dev/null +++ b/drivers/scsi/snic/cq_desc.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _CQ_DESC_H_ +#define _CQ_DESC_H_ + +/* + * Completion queue descriptor types + */ +enum cq_desc_types { + CQ_DESC_TYPE_WQ_ENET = 0, + CQ_DESC_TYPE_DESC_COPY = 1, + CQ_DESC_TYPE_WQ_EXCH = 2, + CQ_DESC_TYPE_RQ_ENET = 3, + CQ_DESC_TYPE_RQ_FCP = 4, +}; + +/* Completion queue descriptor: 16B + * + * All completion queues have this basic layout. The + * type_specific area is unique for each completion + * queue type. + */ +struct cq_desc { + __le16 completed_index; + __le16 q_number; + u8 type_specific[11]; + u8 type_color; +}; + +#define CQ_DESC_TYPE_BITS 4 +#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) +#define CQ_DESC_COLOR_MASK 1 +#define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_Q_NUM_BITS 10 +#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) +#define CQ_DESC_COMP_NDX_BITS 12 +#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) + +static inline void cq_desc_dec(const struct cq_desc *desc_arg, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +#endif /* _CQ_DESC_H_ */ diff --git a/drivers/scsi/snic/cq_enet_desc.h b/drivers/scsi/snic/cq_enet_desc.h new file mode 100644 index 000000000..bd7381e52 --- /dev/null +++ b/drivers/scsi/snic/cq_enet_desc.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _CQ_ENET_DESC_H_ +#define _CQ_ENET_DESC_H_ + +#include "cq_desc.h" + +/* Ethernet completion queue descriptor: 16B */ +struct cq_enet_wq_desc { + __le16 completed_index; + __le16 q_number; + u8 reserved[11]; + u8 type_color; +}; + +static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +#endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h new file mode 100644 index 000000000..32f5a34b6 --- /dev/null +++ b/drivers/scsi/snic/snic.h @@ -0,0 +1,402 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _SNIC_H_ +#define _SNIC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "snic_disc.h" +#include "snic_io.h" +#include "snic_res.h" +#include "snic_trc.h" +#include "snic_stats.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_snic.h" + +#define SNIC_DRV_NAME "snic" +#define SNIC_DRV_DESCRIPTION "Cisco SCSI NIC Driver" +#define SNIC_DRV_VERSION "0.0.1.18" +#define PFX SNIC_DRV_NAME ":" +#define DFX SNIC_DRV_NAME "%d: " + +#define DESC_CLEAN_LOW_WATERMARK 8 +#define SNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ +#define SNIC_MAX_IO_REQ 50 /* scsi_cmnd tag map entries */ +#define SNIC_MIN_IO_REQ 8 /* Min IO throttle count */ +#define SNIC_IO_LOCKS 64 /* IO locks: power of 2 */ +#define SNIC_DFLT_QUEUE_DEPTH 32 /* Default Queue Depth */ +#define SNIC_MAX_QUEUE_DEPTH 64 /* Max Queue Depth */ +#define SNIC_DFLT_CMD_TIMEOUT 90 /* Extended tmo for FW */ + +/* + * Tag bits used for special requests. + */ +#define SNIC_TAG_ABORT BIT(30) /* Tag indicating abort */ +#define SNIC_TAG_DEV_RST BIT(29) /* Tag for device reset */ +#define SNIC_TAG_IOCTL_DEV_RST BIT(28) /* Tag for User Device Reset */ +#define SNIC_TAG_MASK (BIT(24) - 1) /* Mask for lookup */ +#define SNIC_NO_TAG -1 + +/* + * Command flags to identify the type of command and for other future use + */ +#define SNIC_NO_FLAGS 0 +#define SNIC_IO_INITIALIZED BIT(0) +#define SNIC_IO_ISSUED BIT(1) +#define SNIC_IO_DONE BIT(2) +#define SNIC_IO_REQ_NULL BIT(3) +#define SNIC_IO_ABTS_PENDING BIT(4) +#define SNIC_IO_ABORTED BIT(5) +#define SNIC_IO_ABTS_ISSUED BIT(6) +#define SNIC_IO_TERM_ISSUED BIT(7) +#define SNIC_IO_ABTS_TIMEDOUT BIT(8) +#define SNIC_IO_ABTS_TERM_DONE BIT(9) +#define SNIC_IO_ABTS_TERM_REQ_NULL BIT(10) +#define SNIC_IO_ABTS_TERM_TIMEDOUT BIT(11) +#define SNIC_IO_INTERNAL_TERM_PENDING BIT(12) +#define SNIC_IO_INTERNAL_TERM_ISSUED BIT(13) +#define SNIC_DEVICE_RESET BIT(14) +#define SNIC_DEV_RST_ISSUED BIT(15) +#define SNIC_DEV_RST_TIMEDOUT BIT(16) +#define SNIC_DEV_RST_ABTS_ISSUED BIT(17) +#define SNIC_DEV_RST_TERM_ISSUED BIT(18) +#define SNIC_DEV_RST_DONE BIT(19) +#define SNIC_DEV_RST_REQ_NULL BIT(20) +#define SNIC_DEV_RST_ABTS_DONE BIT(21) +#define SNIC_DEV_RST_TERM_DONE BIT(22) +#define SNIC_DEV_RST_ABTS_PENDING BIT(23) +#define SNIC_DEV_RST_PENDING BIT(24) +#define SNIC_DEV_RST_NOTSUP BIT(25) +#define SNIC_SCSI_CLEANUP BIT(26) +#define SNIC_HOST_RESET_ISSUED BIT(27) +#define SNIC_HOST_RESET_CMD_TERM \ + (SNIC_DEV_RST_NOTSUP | SNIC_SCSI_CLEANUP | SNIC_HOST_RESET_ISSUED) + +#define SNIC_ABTS_TIMEOUT 30000 /* msec */ +#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */ +#define SNIC_HOST_RESET_TIMEOUT 30000 /* msec */ + + +/* + * These are protected by the hashed req_lock. + */ +#define CMD_SP(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->rqi) +#define CMD_STATE(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->state) +#define CMD_ABTS_STATUS(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->abts_status) +#define CMD_LR_STATUS(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->lr_status) +#define CMD_FLAGS(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->flags) + +#define SNIC_INVALID_CODE 0x100 /* Hdr Status val unused by firmware */ + +#define SNIC_MAX_TARGET 256 +#define SNIC_FLAGS_NONE (0) + +/* snic module params */ +extern unsigned int snic_max_qdepth; + +/* snic debugging */ +extern unsigned int snic_log_level; + +#define SNIC_MAIN_LOGGING 0x1 +#define SNIC_SCSI_LOGGING 0x2 +#define SNIC_ISR_LOGGING 0x8 +#define SNIC_DESC_LOGGING 0x10 + +#define SNIC_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(snic_log_level & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define SNIC_MAIN_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_MAIN_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ## args);) + +#define SNIC_SCSI_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ##args);) + +#define SNIC_DISC_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ##args);) + +#define SNIC_ISR_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_ISR_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ##args);) + +#define SNIC_HOST_ERR(host, fmt, args...) \ + shost_printk(KERN_ERR, host, fmt, ##args) + +#define SNIC_HOST_INFO(host, fmt, args...) \ + shost_printk(KERN_INFO, host, fmt, ##args) + +#define SNIC_INFO(fmt, args...) \ + pr_info(PFX fmt, ## args) + +#define SNIC_DBG(fmt, args...) \ + pr_info(PFX fmt, ## args) + +#define SNIC_ERR(fmt, args...) \ + pr_err(PFX fmt, ## args) + +#ifdef DEBUG +#define SNIC_BUG_ON(EXPR) \ + ({ \ + if (EXPR) { \ + SNIC_ERR("SNIC BUG(%s)\n", #EXPR); \ + BUG_ON(EXPR); \ + } \ + }) +#else +#define SNIC_BUG_ON(EXPR) \ + ({ \ + if (EXPR) { \ + SNIC_ERR("SNIC BUG(%s) at %s : %d\n", \ + #EXPR, __func__, __LINE__); \ + WARN_ON_ONCE(EXPR); \ + } \ + }) +#endif + +/* Soft assert */ +#define SNIC_ASSERT_NOT_IMPL(EXPR) \ + ({ \ + if (EXPR) {\ + SNIC_INFO("Functionality not impl'ed at %s:%d\n", \ + __func__, __LINE__); \ + WARN_ON_ONCE(EXPR); \ + } \ + }) + + +extern const char *snic_state_str[]; + +enum snic_intx_intr_index { + SNIC_INTX_WQ_RQ_COPYWQ, + SNIC_INTX_ERR, + SNIC_INTX_NOTIFY, + SNIC_INTX_INTR_MAX, +}; + +enum snic_msix_intr_index { + SNIC_MSIX_WQ, + SNIC_MSIX_IO_CMPL, + SNIC_MSIX_ERR_NOTIFY, + SNIC_MSIX_INTR_MAX, +}; + +#define SNIC_INTRHDLR_NAMSZ (2 * IFNAMSIZ) +struct snic_msix_entry { + int requested; + char devname[SNIC_INTRHDLR_NAMSZ]; + irqreturn_t (*isr)(int, void *); + void *devid; +}; + +enum snic_state { + SNIC_INIT = 0, + SNIC_ERROR, + SNIC_ONLINE, + SNIC_OFFLINE, + SNIC_FWRESET, +}; + +#define SNIC_WQ_MAX 1 +#define SNIC_CQ_IO_CMPL_MAX 1 +#define SNIC_CQ_MAX (SNIC_WQ_MAX + SNIC_CQ_IO_CMPL_MAX) + +/* firmware version information */ +struct snic_fw_info { + u32 fw_ver; + u32 hid; /* u16 hid | u16 vnic id */ + u32 max_concur_ios; /* max concurrent ios */ + u32 max_sgs_per_cmd; /* max sgls per IO */ + u32 max_io_sz; /* max io size supported */ + u32 hba_cap; /* hba capabilities */ + u32 max_tgts; /* max tgts supported */ + u16 io_tmo; /* FW Extended timeout */ + struct completion *wait; /* protected by snic lock*/ +}; + +/* + * snic_work item : defined to process asynchronous events + */ +struct snic_work { + struct work_struct work; + u16 ev_id; + u64 *ev_data; +}; + +/* + * snic structure to represent SCSI vNIC + */ +struct snic { + /* snic specific members */ + struct list_head list; + char name[IFNAMSIZ]; + atomic_t state; + spinlock_t snic_lock; + struct completion *remove_wait; + bool in_remove; + bool stop_link_events; /* stop processing link events */ + + /* discovery related */ + struct snic_disc disc; + + /* Scsi Host info */ + struct Scsi_Host *shost; + + /* vnic related structures */ + struct vnic_dev_bar bar0; + + struct vnic_stats *stats; + unsigned long stats_time; + unsigned long stats_reset_time; + + struct vnic_dev *vdev; + + /* hw resource info */ + unsigned int wq_count; + unsigned int cq_count; + unsigned int intr_count; + unsigned int err_intr_offset; + + int link_status; /* retrieved from svnic_dev_link_status() */ + u32 link_down_cnt; + + /* pci related */ + struct pci_dev *pdev; + struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX]; + + /* io related info */ + mempool_t *req_pool[SNIC_REQ_MAX_CACHES]; /* (??) */ + ____cacheline_aligned spinlock_t io_req_lock[SNIC_IO_LOCKS]; + + /* Maintain snic specific commands, cmds with no tag in spl_cmd_list */ + ____cacheline_aligned spinlock_t spl_cmd_lock; + struct list_head spl_cmd_list; + + unsigned int max_tag_id; + atomic_t ios_inflight; /* io in flight counter */ + + struct vnic_snic_config config; + + struct work_struct link_work; + + /* firmware information */ + struct snic_fw_info fwinfo; + + /* Work for processing Target related work */ + struct work_struct tgt_work; + + /* Work for processing Discovery */ + struct work_struct disc_work; + + /* stats related */ + unsigned int reset_stats; + atomic64_t io_cmpl_skip; + struct snic_stats s_stats; /* Per SNIC driver stats */ + + /* platform specific */ +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + struct dentry *stats_host; /* Per snic debugfs root */ + struct dentry *stats_file; /* Per snic debugfs file */ + struct dentry *reset_stats_file;/* Per snic reset stats file */ +#endif + + /* completion queue cache line section */ + ____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX]; + + /* work queue cache line section */ + ____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX]; + spinlock_t wq_lock[SNIC_WQ_MAX]; + + /* interrupt resource cache line section */ + ____cacheline_aligned struct vnic_intr intr[SNIC_MSIX_INTR_MAX]; +}; /* end of snic structure */ + +/* + * SNIC Driver's Global Data + */ +struct snic_global { + struct list_head snic_list; + spinlock_t snic_list_lock; + + struct kmem_cache *req_cache[SNIC_REQ_MAX_CACHES]; + + struct workqueue_struct *event_q; + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* debugfs related global data */ + struct dentry *trc_root; + struct dentry *stats_root; + + struct snic_trc trc ____cacheline_aligned; +#endif +}; + +extern struct snic_global *snic_glob; + +int snic_glob_init(void); +void snic_glob_cleanup(void); + +extern struct workqueue_struct *snic_event_queue; +extern const struct attribute_group *snic_host_groups[]; + +int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); +int snic_abort_cmd(struct scsi_cmnd *); +int snic_device_reset(struct scsi_cmnd *); +int snic_host_reset(struct scsi_cmnd *); +int snic_reset(struct Scsi_Host *, struct scsi_cmnd *); +void snic_shutdown_scsi_cleanup(struct snic *); + + +int snic_request_intr(struct snic *); +void snic_free_intr(struct snic *); +int snic_set_intr_mode(struct snic *); +void snic_clear_intr_mode(struct snic *); + +int snic_fwcq_cmpl_handler(struct snic *, int); +int snic_wq_cmpl_handler(struct snic *, int); +void snic_free_wq_buf(struct vnic_wq *, struct vnic_wq_buf *); + + +void snic_log_q_error(struct snic *); +void snic_handle_link_event(struct snic *); +void snic_handle_link(struct work_struct *); + +int snic_queue_exch_ver_req(struct snic *); +void snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *); + +int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len); + +void snic_handle_untagged_req(struct snic *, struct snic_req_info *); +void snic_release_untagged_req(struct snic *, struct snic_req_info *); +void snic_free_all_untagged_reqs(struct snic *); +int snic_get_conf(struct snic *); +void snic_set_state(struct snic *, enum snic_state); +int snic_get_state(struct snic *); +const char *snic_state_to_str(unsigned int); +void snic_hex_dump(char *, char *, int); +void snic_print_desc(const char *fn, char *os_buf, int len); +const char *show_opcode_name(int val); +#endif /* _SNIC_H */ diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c new file mode 100644 index 000000000..3ddbdbc3d --- /dev/null +++ b/drivers/scsi/snic/snic_attrs.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include + +#include "snic.h" + +static ssize_t +snic_show_sym_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct snic *snic = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", snic->name); +} + +static ssize_t +snic_show_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct snic *snic = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", + snic_state_str[snic_get_state(snic)]); +} + +static ssize_t +snic_show_drv_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION); +} + +static ssize_t +snic_show_link_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct snic *snic = shost_priv(class_to_shost(dev)); + + if (snic->config.xpt_type == SNIC_DAS) + snic->link_status = svnic_dev_link_status(snic->vdev); + + return snprintf(buf, PAGE_SIZE, "%s\n", + (snic->link_status) ? "Link Up" : "Link Down"); +} + +static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL); +static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL); +static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL); +static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL); + +static struct attribute *snic_host_attrs[] = { + &dev_attr_snic_sym_name.attr, + &dev_attr_snic_state.attr, + &dev_attr_drv_version.attr, + &dev_attr_link_state.attr, + NULL, +}; + +static const struct attribute_group snic_host_attr_group = { + .attrs = snic_host_attrs +}; + +const struct attribute_group *snic_host_groups[] = { + &snic_host_attr_group, + NULL +}; diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c new file mode 100644 index 000000000..5f4fca96b --- /dev/null +++ b/drivers/scsi/snic/snic_ctl.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "snic_io.h" +#include "snic.h" +#include "cq_enet_desc.h" +#include "snic_fwint.h" + +/* + * snic_handle_link : Handles link flaps. + */ +void +snic_handle_link(struct work_struct *work) +{ + struct snic *snic = container_of(work, struct snic, link_work); + + if (snic->config.xpt_type == SNIC_DAS) + return; + + snic->link_status = svnic_dev_link_status(snic->vdev); + snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev); + SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n", + ((snic->link_status) ? "Up" : "Down")); + + SNIC_ASSERT_NOT_IMPL(1); +} + + +/* + * snic_ver_enc : Encodes version str to int + * version string is similar to netmask string + */ +static int +snic_ver_enc(const char *s) +{ + int v[4] = {0}; + int i = 0, x = 0; + char c; + const char *p = s; + + /* validate version string */ + if ((strlen(s) > 15) || (strlen(s) < 7)) + goto end; + + while ((c = *p++)) { + if (c == '.') { + i++; + continue; + } + + if (i > 3 || !isdigit(c)) + goto end; + + v[i] = v[i] * 10 + (c - '0'); + } + + /* validate sub version numbers */ + for (i = 3; i >= 0; i--) + if (v[i] > 0xff) + goto end; + + x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3]; + +end: + if (x == 0) { + SNIC_ERR("Invalid version string [%s].\n", s); + + return -1; + } + + return x; +} /* end of snic_ver_enc */ + +/* + * snic_qeueue_exch_ver_req : + * + * Queues Exchange Version Request, to communicate host information + * in return, it gets firmware version details + */ +int +snic_queue_exch_ver_req(struct snic *snic) +{ + struct snic_req_info *rqi = NULL; + struct snic_host_req *req = NULL; + u32 ver = 0; + int ret = 0; + + SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n"); + + rqi = snic_req_init(snic, 0); + if (!rqi) { + SNIC_HOST_ERR(snic->shost, "Init Exch Ver Req failed\n"); + ret = -ENOMEM; + goto error; + } + + req = rqi_to_req(rqi); + + /* Initialize snic_host_req */ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG, + snic->config.hid, 0, (ulong)rqi); + ver = snic_ver_enc(SNIC_DRV_VERSION); + req->u.exch_ver.drvr_ver = cpu_to_le32(ver); + req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX); + + snic_handle_untagged_req(snic, rqi); + + ret = snic_queue_wq_desc(snic, req, sizeof(*req)); + if (ret) { + snic_release_untagged_req(snic, rqi); + SNIC_HOST_ERR(snic->shost, + "Queuing Exch Ver Req failed, err = %d\n", + ret); + goto error; + } + + SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret); + +error: + return ret; +} /* end of snic_queue_exch_ver_req */ + +/* + * snic_io_exch_ver_cmpl_handler + */ +void +snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + struct snic_req_info *rqi = NULL; + struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl; + u8 typ, hdr_stat; + u32 cmnd_id, hid, max_sgs; + ulong ctx = 0; + unsigned long flags; + + SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n"); + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_BUG_ON(snic->config.hid != hid); + rqi = (struct snic_req_info *) ctx; + + if (hdr_stat) { + SNIC_HOST_ERR(snic->shost, + "Exch Ver Completed w/ err status %d\n", + hdr_stat); + + goto exch_cmpl_end; + } + + spin_lock_irqsave(&snic->snic_lock, flags); + snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version); + snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid); + snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios); + snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd); + snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz); + snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts); + snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout); + + SNIC_HOST_INFO(snic->shost, + "vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n", + snic->fwinfo.fw_ver, + snic->fwinfo.hid, + snic->fwinfo.max_concur_ios, + snic->fwinfo.max_sgs_per_cmd, + snic->fwinfo.max_io_sz, + snic->fwinfo.max_tgts, + snic->fwinfo.io_tmo); + + SNIC_HOST_INFO(snic->shost, + "HBA Capabilities = 0x%x\n", + le32_to_cpu(exv_cmpl->hba_cap)); + + /* Updating SGList size */ + max_sgs = snic->fwinfo.max_sgs_per_cmd; + if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) { + snic->shost->sg_tablesize = max_sgs; + SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n", + snic->shost->sg_tablesize); + } else if (max_sgs > snic->shost->sg_tablesize) { + SNIC_HOST_INFO(snic->shost, + "Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n", + snic->config.xpt_type, max_sgs, + snic->shost->sg_tablesize); + } + + if (snic->shost->can_queue > snic->fwinfo.max_concur_ios) + snic->shost->can_queue = snic->fwinfo.max_concur_ios; + + snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9; + if (snic->fwinfo.wait) + complete(snic->fwinfo.wait); + + spin_unlock_irqrestore(&snic->snic_lock, flags); + +exch_cmpl_end: + snic_release_untagged_req(snic, rqi); + + SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat); +} /* end of snic_io_exch_ver_cmpl_handler */ + +/* + * snic_get_conf + * + * Synchronous call, and Retrieves snic params. + */ +int +snic_get_conf(struct snic *snic) +{ + DECLARE_COMPLETION_ONSTACK(wait); + unsigned long flags; + int ret; + int nr_retries = 3; + + SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n"); + spin_lock_irqsave(&snic->snic_lock, flags); + memset(&snic->fwinfo, 0, sizeof(snic->fwinfo)); + snic->fwinfo.wait = &wait; + spin_unlock_irqrestore(&snic->snic_lock, flags); + + /* Additional delay to handle HW Resource initialization. */ + msleep(50); + + /* + * Exch ver req can be ignored by FW, if HW Resource initialization + * is in progress, Hence retry. + */ + do { + ret = snic_queue_exch_ver_req(snic); + if (ret) + return ret; + + wait_for_completion_timeout(&wait, msecs_to_jiffies(2000)); + spin_lock_irqsave(&snic->snic_lock, flags); + ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT; + if (ret) + SNIC_HOST_ERR(snic->shost, + "Failed to retrieve snic params,\n"); + + /* Unset fwinfo.wait, on success or on last retry */ + if (ret == 0 || nr_retries == 1) + snic->fwinfo.wait = NULL; + + spin_unlock_irqrestore(&snic->snic_lock, flags); + } while (ret && --nr_retries); + + return ret; +} /* end of snic_get_info */ diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c new file mode 100644 index 000000000..9dd975b36 --- /dev/null +++ b/drivers/scsi/snic/snic_debugfs.c @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include + +#include "snic.h" + +/* + * snic_debugfs_init - Initialize debugfs for snic debug logging + * + * Description: + * When Debugfs is configured this routine sets up fnic debugfs + * filesystem. If not already created. this routine will crate the + * fnic directory and statistics directory for trace buffer and + * stats logging + */ +void snic_debugfs_init(void) +{ + snic_glob->trc_root = debugfs_create_dir("snic", NULL); + + snic_glob->stats_root = debugfs_create_dir("statistics", + snic_glob->trc_root); +} + +/* + * snic_debugfs_term - Tear down debugfs intrastructure + * + * Description: + * When Debufs is configured this routine removes debugfs file system + * elements that are specific to snic + */ +void +snic_debugfs_term(void) +{ + debugfs_remove(snic_glob->stats_root); + snic_glob->stats_root = NULL; + + debugfs_remove(snic_glob->trc_root); + snic_glob->trc_root = NULL; +} + +/* + * snic_reset_stats_open - Open the reset_stats file + */ +static int +snic_reset_stats_open(struct inode *inode, struct file *filp) +{ + SNIC_BUG_ON(!inode->i_private); + filp->private_data = inode->i_private; + + return 0; +} + +/* + * snic_reset_stats_read - Read a reset_stats debugfs file + * @filp: The file pointer to read from. + * @ubuf: The buffer tocopy the data to. + * @cnt: The number of bytes to read. + * @ppos: The position in the file to start reading frm. + * + * Description: + * This routine reads value of variable reset_stats + * and stores into local @buf. It will start reading file @ppos and + * copy up to @cnt of data to @ubuf from @buf. + * + * Returns: + * This function returns the amount of data that was read. + */ +static ssize_t +snic_reset_stats_read(struct file *filp, + char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct snic *snic = (struct snic *) filp->private_data; + char buf[64]; + int len; + + len = sprintf(buf, "%u\n", snic->reset_stats); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +/* + * snic_reset_stats_write - Write to reset_stats debugfs file + * @filp: The file pointer to write from + * @ubuf: The buffer to copy the data from. + * @cnt: The number of bytes to write. + * @ppos: The position in the file to start writing to. + * + * Description: + * This routine writes data from user buffer @ubuf to buffer @buf and + * resets cumulative stats of snic. + * + * Returns: + * This function returns the amount of data that was written. + */ +static ssize_t +snic_reset_stats_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct snic *snic = (struct snic *) filp->private_data; + struct snic_stats *stats = &snic->s_stats; + u64 *io_stats_p = (u64 *) &stats->io; + u64 *fw_stats_p = (u64 *) &stats->fw; + char buf[64]; + unsigned long val; + int ret; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = '\0'; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + snic->reset_stats = val; + + if (snic->reset_stats) { + /* Skip variable is used to avoid descrepancies to Num IOs + * and IO Completions stats. Skip incrementing No IO Compls + * for pending active IOs after reset_stats + */ + atomic64_set(&snic->io_cmpl_skip, + atomic64_read(&stats->io.active)); + memset(&stats->abts, 0, sizeof(struct snic_abort_stats)); + memset(&stats->reset, 0, sizeof(struct snic_reset_stats)); + memset(&stats->misc, 0, sizeof(struct snic_misc_stats)); + memset(io_stats_p+1, + 0, + sizeof(struct snic_io_stats) - sizeof(u64)); + memset(fw_stats_p+1, + 0, + sizeof(struct snic_fw_stats) - sizeof(u64)); + } + + (*ppos)++; + + SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n"); + + return cnt; +} + +static int +snic_reset_stats_release(struct inode *inode, struct file *filp) +{ + filp->private_data = NULL; + + return 0; +} + +/* + * snic_stats_show - Formats and prints per host specific driver stats. + */ +static int +snic_stats_show(struct seq_file *sfp, void *data) +{ + struct snic *snic = (struct snic *) sfp->private; + struct snic_stats *stats = &snic->s_stats; + struct timespec64 last_isr_tms, last_ack_tms; + u64 maxio_tm; + int i; + + /* Dump IO Stats */ + seq_printf(sfp, + "------------------------------------------\n" + "\t\t IO Statistics\n" + "------------------------------------------\n"); + + maxio_tm = (u64) atomic64_read(&stats->io.max_time); + seq_printf(sfp, + "Active IOs : %lld\n" + "Max Active IOs : %lld\n" + "Total IOs : %lld\n" + "IOs Completed : %lld\n" + "IOs Failed : %lld\n" + "IOs Not Found : %lld\n" + "Memory Alloc Failures : %lld\n" + "REQs Null : %lld\n" + "SCSI Cmd Pointers Null : %lld\n" + "Max SGL for any IO : %lld\n" + "Max IO Size : %lld Sectors\n" + "Max Queuing Time : %lld\n" + "Max Completion Time : %lld\n" + "Max IO Process Time(FW) : %lld (%u msec)\n", + (u64) atomic64_read(&stats->io.active), + (u64) atomic64_read(&stats->io.max_active), + (u64) atomic64_read(&stats->io.num_ios), + (u64) atomic64_read(&stats->io.compl), + (u64) atomic64_read(&stats->io.fail), + (u64) atomic64_read(&stats->io.io_not_found), + (u64) atomic64_read(&stats->io.alloc_fail), + (u64) atomic64_read(&stats->io.req_null), + (u64) atomic64_read(&stats->io.sc_null), + (u64) atomic64_read(&stats->io.max_sgl), + (u64) atomic64_read(&stats->io.max_io_sz), + (u64) atomic64_read(&stats->io.max_qtime), + (u64) atomic64_read(&stats->io.max_cmpl_time), + maxio_tm, + jiffies_to_msecs(maxio_tm)); + + seq_puts(sfp, "\nSGL Counters\n"); + + for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) { + seq_printf(sfp, + "%10lld ", + (u64) atomic64_read(&stats->io.sgl_cnt[i])); + + if ((i + 1) % 8 == 0) + seq_puts(sfp, "\n"); + } + + /* Dump Abort Stats */ + seq_printf(sfp, + "\n-------------------------------------------\n" + "\t\t Abort Statistics\n" + "---------------------------------------------\n"); + + seq_printf(sfp, + "Aborts : %lld\n" + "Aborts Fail : %lld\n" + "Aborts Driver Timeout : %lld\n" + "Abort FW Timeout : %lld\n" + "Abort IO NOT Found : %lld\n" + "Abort Queuing Failed : %lld\n", + (u64) atomic64_read(&stats->abts.num), + (u64) atomic64_read(&stats->abts.fail), + (u64) atomic64_read(&stats->abts.drv_tmo), + (u64) atomic64_read(&stats->abts.fw_tmo), + (u64) atomic64_read(&stats->abts.io_not_found), + (u64) atomic64_read(&stats->abts.q_fail)); + + /* Dump Reset Stats */ + seq_printf(sfp, + "\n-------------------------------------------\n" + "\t\t Reset Statistics\n" + "---------------------------------------------\n"); + + seq_printf(sfp, + "HBA Resets : %lld\n" + "HBA Reset Cmpls : %lld\n" + "HBA Reset Fail : %lld\n", + (u64) atomic64_read(&stats->reset.hba_resets), + (u64) atomic64_read(&stats->reset.hba_reset_cmpl), + (u64) atomic64_read(&stats->reset.hba_reset_fail)); + + /* Dump Firmware Stats */ + seq_printf(sfp, + "\n-------------------------------------------\n" + "\t\t Firmware Statistics\n" + "---------------------------------------------\n"); + + seq_printf(sfp, + "Active FW Requests : %lld\n" + "Max FW Requests : %lld\n" + "FW Out Of Resource Errs : %lld\n" + "FW IO Errors : %lld\n" + "FW SCSI Errors : %lld\n", + (u64) atomic64_read(&stats->fw.actv_reqs), + (u64) atomic64_read(&stats->fw.max_actv_reqs), + (u64) atomic64_read(&stats->fw.out_of_res), + (u64) atomic64_read(&stats->fw.io_errs), + (u64) atomic64_read(&stats->fw.scsi_errs)); + + + /* Dump Miscellenous Stats */ + seq_printf(sfp, + "\n---------------------------------------------\n" + "\t\t Other Statistics\n" + "\n---------------------------------------------\n"); + + jiffies_to_timespec64(stats->misc.last_isr_time, &last_isr_tms); + jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms); + + seq_printf(sfp, + "Last ISR Time : %llu (%8llu.%09lu)\n" + "Last Ack Time : %llu (%8llu.%09lu)\n" + "Ack ISRs : %llu\n" + "IO Cmpl ISRs : %llu\n" + "Err Notify ISRs : %llu\n" + "Max CQ Entries : %lld\n" + "Data Count Mismatch : %lld\n" + "IOs w/ Timeout Status : %lld\n" + "IOs w/ Aborted Status : %lld\n" + "IOs w/ SGL Invalid Stat : %lld\n" + "WQ Desc Alloc Fail : %lld\n" + "Queue Full : %lld\n" + "Queue Ramp Up : %lld\n" + "Queue Ramp Down : %lld\n" + "Queue Last Queue Depth : %lld\n" + "Target Not Ready : %lld\n", + (u64) stats->misc.last_isr_time, + last_isr_tms.tv_sec, last_isr_tms.tv_nsec, + (u64)stats->misc.last_ack_time, + last_ack_tms.tv_sec, last_ack_tms.tv_nsec, + (u64) atomic64_read(&stats->misc.ack_isr_cnt), + (u64) atomic64_read(&stats->misc.cmpl_isr_cnt), + (u64) atomic64_read(&stats->misc.errnotify_isr_cnt), + (u64) atomic64_read(&stats->misc.max_cq_ents), + (u64) atomic64_read(&stats->misc.data_cnt_mismat), + (u64) atomic64_read(&stats->misc.io_tmo), + (u64) atomic64_read(&stats->misc.io_aborted), + (u64) atomic64_read(&stats->misc.sgl_inval), + (u64) atomic64_read(&stats->misc.wq_alloc_fail), + (u64) atomic64_read(&stats->misc.qfull), + (u64) atomic64_read(&stats->misc.qsz_rampup), + (u64) atomic64_read(&stats->misc.qsz_rampdown), + (u64) atomic64_read(&stats->misc.last_qsz), + (u64) atomic64_read(&stats->misc.tgt_not_rdy)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(snic_stats); + +static const struct file_operations snic_reset_stats_fops = { + .owner = THIS_MODULE, + .open = snic_reset_stats_open, + .read = snic_reset_stats_read, + .write = snic_reset_stats_write, + .release = snic_reset_stats_release, +}; + +/* + * snic_stats_init - Initialize stats struct and create stats file + * per snic + * + * Description: + * When debugfs is cofigured this routine sets up the stats file per snic + * It will create file stats and reset_stats under statistics/host# directory + * to log per snic stats + */ +void snic_stats_debugfs_init(struct snic *snic) +{ + char name[16]; + + snprintf(name, sizeof(name), "host%d", snic->shost->host_no); + + snic->stats_host = debugfs_create_dir(name, snic_glob->stats_root); + + snic->stats_file = debugfs_create_file("stats", S_IFREG|S_IRUGO, + snic->stats_host, snic, + &snic_stats_fops); + + snic->reset_stats_file = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + snic->stats_host, snic, + &snic_reset_stats_fops); +} + +/* + * snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats + * + * Description: + * When Debufs is configured this routine removes debugfs file system + * elements that are specific to to snic stats + */ +void +snic_stats_debugfs_remove(struct snic *snic) +{ + debugfs_remove(snic->stats_file); + snic->stats_file = NULL; + + debugfs_remove(snic->reset_stats_file); + snic->reset_stats_file = NULL; + + debugfs_remove(snic->stats_host); + snic->stats_host = NULL; +} + +/* Trace Facility related API */ +static void * +snic_trc_seq_start(struct seq_file *sfp, loff_t *pos) +{ + return &snic_glob->trc; +} + +static void * +snic_trc_seq_next(struct seq_file *sfp, void *data, loff_t *pos) +{ + return NULL; +} + +static void +snic_trc_seq_stop(struct seq_file *sfp, void *data) +{ +} + +#define SNIC_TRC_PBLEN 256 +static int +snic_trc_seq_show(struct seq_file *sfp, void *data) +{ + char buf[SNIC_TRC_PBLEN]; + + if (snic_get_trc_data(buf, SNIC_TRC_PBLEN) > 0) + seq_printf(sfp, "%s\n", buf); + + return 0; +} + +static const struct seq_operations snic_trc_sops = { + .start = snic_trc_seq_start, + .next = snic_trc_seq_next, + .stop = snic_trc_seq_stop, + .show = snic_trc_seq_show, +}; + +DEFINE_SEQ_ATTRIBUTE(snic_trc); + +#define TRC_ENABLE_FILE "tracing_enable" +#define TRC_FILE "trace" +/* + * snic_trc_debugfs_init : creates trace/tracing_enable files for trace + * under debugfs + */ +void snic_trc_debugfs_init(void) +{ + debugfs_create_bool(TRC_ENABLE_FILE, S_IFREG | S_IRUGO | S_IWUSR, + snic_glob->trc_root, &snic_glob->trc.enable); + + debugfs_create_file(TRC_FILE, S_IFREG | S_IRUGO | S_IWUSR, + snic_glob->trc_root, NULL, &snic_trc_fops); +} + +/* + * snic_trc_debugfs_term : cleans up the files created for trace under debugfs + */ +void +snic_trc_debugfs_term(void) +{ + debugfs_lookup_and_remove(TRC_FILE, snic_glob->trc_root); + debugfs_lookup_and_remove(TRC_ENABLE_FILE, snic_glob->trc_root); +} diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c new file mode 100644 index 000000000..4db3ba62f --- /dev/null +++ b/drivers/scsi/snic/snic_disc.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include + +#include + +#include "snic_disc.h" +#include "snic.h" +#include "snic_io.h" + + +/* snic target types */ +static const char * const snic_tgt_type_str[] = { + [SNIC_TGT_DAS] = "DAS", + [SNIC_TGT_SAN] = "SAN", +}; + +static inline const char * +snic_tgt_type_to_str(int typ) +{ + return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ? + snic_tgt_type_str[typ] : "Unknown"); +} + +static const char * const snic_tgt_state_str[] = { + [SNIC_TGT_STAT_INIT] = "INIT", + [SNIC_TGT_STAT_ONLINE] = "ONLINE", + [SNIC_TGT_STAT_OFFLINE] = "OFFLINE", + [SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS", +}; + +const char * +snic_tgt_state_to_str(int state) +{ + return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ? + snic_tgt_state_str[state] : "UNKNOWN"); +} + +/* + * Initiate report_tgt req desc + */ +static void +snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len, + dma_addr_t rsp_buf_pa, ulong ctx) +{ + struct snic_sg_desc *sgd = NULL; + + + snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid, + 1, ctx); + + req->u.rpt_tgts.sg_cnt = cpu_to_le16(1); + sgd = req_to_sgl(req); + sgd[0].addr = cpu_to_le64(rsp_buf_pa); + sgd[0].len = cpu_to_le32(len); + sgd[0]._resvd = 0; + req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd); +} + +/* + * snic_queue_report_tgt_req: Queues report target request. + */ +static int +snic_queue_report_tgt_req(struct snic *snic) +{ + struct snic_req_info *rqi = NULL; + u32 ntgts, buf_len = 0; + u8 *buf = NULL; + dma_addr_t pa = 0; + int ret = 0; + + rqi = snic_req_init(snic, 1); + if (!rqi) { + ret = -ENOMEM; + goto error; + } + + if (snic->fwinfo.max_tgts) + ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id); + else + ntgts = snic->shost->max_id; + + /* Allocate Response Buffer */ + SNIC_BUG_ON(ntgts == 0); + buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) { + snic_req_free(snic, rqi); + SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n"); + + ret = -ENOMEM; + goto error; + } + + SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0); + + pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&snic->pdev->dev, pa)) { + SNIC_HOST_ERR(snic->shost, + "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", + buf); + kfree(buf); + snic_req_free(snic, rqi); + ret = -EINVAL; + + goto error; + } + + + SNIC_BUG_ON(pa == 0); + rqi->sge_va = (ulong) buf; + + snic_report_tgt_init(rqi->req, + snic->config.hid, + buf, + buf_len, + pa, + (ulong)rqi); + + snic_handle_untagged_req(snic, rqi); + + ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); + if (ret) { + dma_unmap_single(&snic->pdev->dev, pa, buf_len, + DMA_FROM_DEVICE); + kfree(buf); + rqi->sge_va = 0; + snic_release_untagged_req(snic, rqi); + SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n"); + + goto error; + } + + SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n"); + + return ret; + +error: + SNIC_HOST_ERR(snic->shost, + "Queuing Report Targets Failed, err = %d\n", + ret); + return ret; +} /* end of snic_queue_report_tgt_req */ + +/* call into SML */ +static void +snic_scsi_scan_tgt(struct work_struct *work) +{ + struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work); + struct Scsi_Host *shost = dev_to_shost(&tgt->dev); + unsigned long flags; + + SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id); + scsi_scan_target(&tgt->dev, + tgt->channel, + tgt->scsi_tgt_id, + SCAN_WILD_CARD, + SCSI_SCAN_RESCAN); + + spin_lock_irqsave(shost->host_lock, flags); + tgt->flags &= ~SNIC_TGT_SCAN_PENDING; + spin_unlock_irqrestore(shost->host_lock, flags); +} /* end of snic_scsi_scan_tgt */ + +/* + * snic_tgt_lookup : + */ +static struct snic_tgt * +snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid) +{ + struct list_head *cur, *nxt; + struct snic_tgt *tgt = NULL; + + list_for_each_safe(cur, nxt, &snic->disc.tgt_list) { + tgt = list_entry(cur, struct snic_tgt, list); + if (tgt->id == le32_to_cpu(tgtid->tgt_id)) + return tgt; + tgt = NULL; + } + + return tgt; +} /* end of snic_tgt_lookup */ + +/* + * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object + */ +void +snic_tgt_dev_release(struct device *dev) +{ + struct snic_tgt *tgt = dev_to_tgt(dev); + + SNIC_HOST_INFO(snic_tgt_to_shost(tgt), + "Target Device ID %d (%s) Permanently Deleted.\n", + tgt->id, + dev_name(dev)); + + SNIC_BUG_ON(!list_empty(&tgt->list)); + kfree(tgt); +} + +/* + * snic_tgt_del : work function to delete snic_tgt + */ +static void +snic_tgt_del(struct work_struct *work) +{ + struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work); + struct Scsi_Host *shost = snic_tgt_to_shost(tgt); + + if (tgt->flags & SNIC_TGT_SCAN_PENDING) + scsi_flush_work(shost); + + /* Block IOs on child devices, stops new IOs */ + scsi_block_targets(shost, &tgt->dev); + + /* Cleanup IOs */ + snic_tgt_scsi_abort_io(tgt); + + /* Unblock IOs now, to flush if there are any. */ + scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE); + + /* Delete SCSI Target and sdevs */ + scsi_remove_target(&tgt->dev); /* ?? */ + device_del(&tgt->dev); + put_device(&tgt->dev); +} /* end of snic_tgt_del */ + +/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't + * it creates one. + */ +static struct snic_tgt * +snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) +{ + struct snic_tgt *tgt = NULL; + unsigned long flags; + int ret; + + tgt = snic_tgt_lookup(snic, tgtid); + if (tgt) { + /* update the information if required */ + return tgt; + } + + tgt = kzalloc(sizeof(*tgt), GFP_KERNEL); + if (!tgt) { + SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n"); + ret = -ENOMEM; + + return tgt; + } + + INIT_LIST_HEAD(&tgt->list); + tgt->id = le32_to_cpu(tgtid->tgt_id); + tgt->channel = 0; + + SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN); + tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type); + + /* + * Plugging into SML Device Tree + */ + tgt->tdata.disc_id = 0; + tgt->state = SNIC_TGT_STAT_INIT; + device_initialize(&tgt->dev); + tgt->dev.parent = get_device(&snic->shost->shost_gendev); + tgt->dev.release = snic_tgt_dev_release; + INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt); + INIT_WORK(&tgt->del_work, snic_tgt_del); + switch (tgt->tdata.typ) { + case SNIC_TGT_DAS: + dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d", + snic->shost->host_no, tgt->channel, tgt->id); + break; + + case SNIC_TGT_SAN: + dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d", + snic->shost->host_no, tgt->channel, tgt->id); + break; + + default: + SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n"); + dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d", + snic->shost->host_no, tgt->channel, tgt->id); + break; + } + + spin_lock_irqsave(snic->shost->host_lock, flags); + list_add_tail(&tgt->list, &snic->disc.tgt_list); + tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++; + tgt->state = SNIC_TGT_STAT_ONLINE; + spin_unlock_irqrestore(snic->shost->host_lock, flags); + + SNIC_HOST_INFO(snic->shost, + "Tgt %d, type = %s detected. Adding..\n", + tgt->id, snic_tgt_type_to_str(tgt->tdata.typ)); + + ret = device_add(&tgt->dev); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "Snic Tgt: device_add, with err = %d\n", + ret); + + put_device(&snic->shost->shost_gendev); + spin_lock_irqsave(snic->shost->host_lock, flags); + list_del(&tgt->list); + spin_unlock_irqrestore(snic->shost->host_lock, flags); + put_device(&tgt->dev); + tgt = NULL; + + return tgt; + } + + SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev)); + + scsi_queue_work(snic->shost, &tgt->scan_work); + + return tgt; +} /* end of snic_tgt_create */ + +/* Handler for discovery */ +void +snic_handle_tgt_disc(struct work_struct *work) +{ + struct snic *snic = container_of(work, struct snic, tgt_work); + struct snic_tgt_id *tgtid = NULL; + struct snic_tgt *tgt = NULL; + unsigned long flags; + int i; + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->in_remove) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + kfree(snic->disc.rtgt_info); + + return; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + + mutex_lock(&snic->disc.mutex); + /* Discover triggered during disc in progress */ + if (snic->disc.req_cnt) { + snic->disc.state = SNIC_DISC_DONE; + snic->disc.req_cnt = 0; + mutex_unlock(&snic->disc.mutex); + kfree(snic->disc.rtgt_info); + snic->disc.rtgt_info = NULL; + + SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n"); + /* Start Discovery Again */ + snic_disc_start(snic); + + return; + } + + tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info; + + SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL); + + for (i = 0; i < snic->disc.rtgt_cnt; i++) { + tgt = snic_tgt_create(snic, &tgtid[i]); + if (!tgt) { + int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid); + + SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n"); + snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz); + break; + } + } + + snic->disc.rtgt_info = NULL; + snic->disc.state = SNIC_DISC_DONE; + mutex_unlock(&snic->disc.mutex); + + SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n"); + + kfree(tgtid); +} /* end of snic_handle_tgt_disc */ + + +int +snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + + u8 typ, cmpl_stat; + u32 cmnd_id, hid, tgt_cnt = 0; + ulong ctx; + struct snic_req_info *rqi = NULL; + struct snic_tgt_id *tgtid; + int i, ret = 0; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx); + rqi = (struct snic_req_info *) ctx; + tgtid = (struct snic_tgt_id *) rqi->sge_va; + + tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt); + if (tgt_cnt == 0) { + SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n"); + ret = 1; + + goto end; + } + + /* printing list of targets here */ + SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt); + + SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts); + + for (i = 0; i < tgt_cnt; i++) + SNIC_HOST_INFO(snic->shost, + "Tgt id = 0x%x\n", + le32_to_cpu(tgtid[i].tgt_id)); + + /* + * Queue work for further processing, + * Response Buffer Memory is freed after creating targets + */ + snic->disc.rtgt_cnt = tgt_cnt; + snic->disc.rtgt_info = (u8 *) tgtid; + queue_work(snic_glob->event_q, &snic->tgt_work); + ret = 0; + +end: + /* Unmap Response Buffer */ + snic_pci_unmap_rsp_buf(snic, rqi); + if (ret) + kfree(tgtid); + + rqi->sge_va = 0; + snic_release_untagged_req(snic, rqi); + + return ret; +} /* end of snic_report_tgt_cmpl_handler */ + +/* Discovery init fn */ +void +snic_disc_init(struct snic_disc *disc) +{ + INIT_LIST_HEAD(&disc->tgt_list); + mutex_init(&disc->mutex); + disc->disc_id = 0; + disc->nxt_tgt_id = 0; + disc->state = SNIC_DISC_INIT; + disc->req_cnt = 0; + disc->rtgt_cnt = 0; + disc->rtgt_info = NULL; + disc->cb = NULL; +} /* end of snic_disc_init */ + +/* Discovery, uninit fn */ +void +snic_disc_term(struct snic *snic) +{ + struct snic_disc *disc = &snic->disc; + + mutex_lock(&disc->mutex); + if (disc->req_cnt) { + disc->req_cnt = 0; + SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n"); + } + mutex_unlock(&disc->mutex); +} + +/* + * snic_disc_start: Discovery Start ... + */ +int +snic_disc_start(struct snic *snic) +{ + struct snic_disc *disc = &snic->disc; + unsigned long flags; + int ret = 0; + + SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n"); + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->in_remove) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + SNIC_ERR("snic driver removal in progress ...\n"); + ret = 0; + + return ret; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + + mutex_lock(&disc->mutex); + if (disc->state == SNIC_DISC_PENDING) { + disc->req_cnt++; + mutex_unlock(&disc->mutex); + + return ret; + } + disc->state = SNIC_DISC_PENDING; + mutex_unlock(&disc->mutex); + + ret = snic_queue_report_tgt_req(snic); + if (ret) + SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret); + + return ret; +} /* end of snic_disc_start */ + +/* + * snic_disc_work : + */ +void +snic_handle_disc(struct work_struct *work) +{ + struct snic *snic = container_of(work, struct snic, disc_work); + int ret = 0; + + SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n"); + + ret = snic_disc_start(snic); + if (ret) + goto disc_err; + +disc_err: + SNIC_HOST_ERR(snic->shost, + "disc_work: Discovery Failed w/ err = %d\n", + ret); +} /* end of snic_disc_work */ + +/* + * snic_tgt_del_all : cleanup all snic targets + * Called on unbinding the interface + */ +void +snic_tgt_del_all(struct snic *snic) +{ + struct snic_tgt *tgt = NULL; + struct list_head *cur, *nxt; + unsigned long flags; + + scsi_flush_work(snic->shost); + + mutex_lock(&snic->disc.mutex); + spin_lock_irqsave(snic->shost->host_lock, flags); + + list_for_each_safe(cur, nxt, &snic->disc.tgt_list) { + tgt = list_entry(cur, struct snic_tgt, list); + tgt->state = SNIC_TGT_STAT_DEL; + list_del_init(&tgt->list); + SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id); + queue_work(snic_glob->event_q, &tgt->del_work); + tgt = NULL; + } + spin_unlock_irqrestore(snic->shost->host_lock, flags); + mutex_unlock(&snic->disc.mutex); + + flush_workqueue(snic_glob->event_q); +} /* end of snic_tgt_del_all */ diff --git a/drivers/scsi/snic/snic_disc.h b/drivers/scsi/snic/snic_disc.h new file mode 100644 index 000000000..9ad7f84a3 --- /dev/null +++ b/drivers/scsi/snic/snic_disc.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef __SNIC_DISC_H +#define __SNIC_DISC_H + +#include "snic_fwint.h" + +enum snic_disc_state { + SNIC_DISC_NONE, + SNIC_DISC_INIT, + SNIC_DISC_PENDING, + SNIC_DISC_DONE +}; + +struct snic; +struct snic_disc { + struct list_head tgt_list; + enum snic_disc_state state; + struct mutex mutex; + u16 disc_id; + u8 req_cnt; + u32 nxt_tgt_id; + u32 rtgt_cnt; + u8 *rtgt_info; + struct delayed_work disc_timeout; + void (*cb)(struct snic *); +}; + +#define SNIC_TGT_NAM_LEN 16 + +enum snic_tgt_state { + SNIC_TGT_STAT_NONE, + SNIC_TGT_STAT_INIT, + SNIC_TGT_STAT_ONLINE, /* Target is Online */ + SNIC_TGT_STAT_OFFLINE, /* Target is Offline */ + SNIC_TGT_STAT_DEL, +}; + +struct snic_tgt_priv { + struct list_head list; + enum snic_tgt_type typ; + u16 disc_id; + char *name[SNIC_TGT_NAM_LEN]; + + union { + /*DAS Target specific info */ + /*SAN Target specific info */ + u8 dummmy; + } u; +}; + +/* snic tgt flags */ +#define SNIC_TGT_SCAN_PENDING 0x01 + +struct snic_tgt { + struct list_head list; + u16 id; + u16 channel; + u32 flags; + u32 scsi_tgt_id; + enum snic_tgt_state state; + struct device dev; + struct work_struct scan_work; + struct work_struct del_work; + struct snic_tgt_priv tdata; +}; + + +struct snic_fw_req; + +void snic_disc_init(struct snic_disc *); +int snic_disc_start(struct snic *); +void snic_disc_term(struct snic *); +int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *); +int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq); +void snic_process_report_tgts_rsp(struct work_struct *); +void snic_handle_tgt_disc(struct work_struct *); +void snic_handle_disc(struct work_struct *); +void snic_tgt_dev_release(struct device *); +void snic_tgt_del_all(struct snic *); + +#define dev_to_tgt(d) \ + container_of(d, struct snic_tgt, dev) + +static inline int +is_snic_target(struct device *dev) +{ + return dev->release == snic_tgt_dev_release; +} + +#define starget_to_tgt(st) \ + (is_snic_target(((struct scsi_target *) st)->dev.parent) ? \ + dev_to_tgt(st->dev.parent) : NULL) + +#define snic_tgt_to_shost(t) \ + dev_to_shost(t->dev.parent) + +static inline int +snic_tgt_chkready(struct snic_tgt *tgt) +{ + if (tgt->state == SNIC_TGT_STAT_ONLINE) + return 0; + else + return DID_NO_CONNECT << 16; +} + +const char *snic_tgt_state_to_str(int); +int snic_tgt_scsi_abort_io(struct snic_tgt *); +#endif /* end of __SNIC_DISC_H */ diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h new file mode 100644 index 000000000..2550ba964 --- /dev/null +++ b/drivers/scsi/snic/snic_fwint.h @@ -0,0 +1,513 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef __SNIC_FWINT_H +#define __SNIC_FWINT_H + +#define SNIC_CDB_LEN 32 /* SCSI CDB size 32, can be used for 16 bytes */ +#define LUN_ADDR_LEN 8 + +/* + * Command entry type + */ +enum snic_io_type { + /* + * Initiator request types + */ + SNIC_REQ_REPORT_TGTS = 0x2, /* Report Targets */ + SNIC_REQ_ICMND, /* Initiator command for SCSI IO */ + SNIC_REQ_ITMF, /* Initiator command for Task Mgmt */ + SNIC_REQ_HBA_RESET, /* SNIC Reset */ + SNIC_REQ_EXCH_VER, /* Exchange Version Information */ + SNIC_REQ_TGT_INFO, /* Backend/Target Information */ + SNIC_REQ_BOOT_LUNS, + + /* + * Response type + */ + SNIC_RSP_REPORT_TGTS_CMPL = 0x12,/* Report Targets Completion */ + SNIC_RSP_ICMND_CMPL, /* SCSI IO Completion */ + SNIC_RSP_ITMF_CMPL, /* Task Management Completion */ + SNIC_RSP_HBA_RESET_CMPL, /* SNIC Reset Completion */ + SNIC_RSP_EXCH_VER_CMPL, /* Exchange Version Completion*/ + SNIC_RSP_BOOT_LUNS_CMPL, + + /* + * Misc Request types + */ + SNIC_MSG_ACK = 0x80, /* Ack: snic_notify_msg */ + SNIC_MSG_ASYNC_EVNOTIFY, /* Asynchronous Event Notification */ +}; /* end of enum snic_io_type */ + + +/* + * Header status codes from firmware + */ +enum snic_io_status { + SNIC_STAT_IO_SUCCESS = 0, /* request was successful */ + + /* + * If a request to the fw is rejected, the original request header + * will be returned with the status set to one of the following: + */ + SNIC_STAT_INVALID_HDR, /* header contains invalid data */ + SNIC_STAT_OUT_OF_RES, /* out of resources to complete request */ + SNIC_STAT_INVALID_PARM, /* some parameter in request is not valid */ + SNIC_STAT_REQ_NOT_SUP, /* req type is not supported */ + SNIC_STAT_IO_NOT_FOUND, /* requested IO was not found */ + + /* + * Once a request is processed, the fw will usually return + * a cmpl message type. In cases where errors occurred, + * the header status would be filled in with one of the following: + */ + SNIC_STAT_ABORTED, /* req was aborted */ + SNIC_STAT_TIMEOUT, /* req was timed out */ + SNIC_STAT_SGL_INVALID, /* req was aborted due to sgl error */ + SNIC_STAT_DATA_CNT_MISMATCH, /*recv/sent more/less data than expec */ + SNIC_STAT_FW_ERR, /* req was terminated due to fw error */ + SNIC_STAT_ITMF_REJECT, /* itmf req was rejected by target */ + SNIC_STAT_ITMF_FAIL, /* itmf req was failed */ + SNIC_STAT_ITMF_INCORRECT_LUN, /* itmf req has incorrect LUN id*/ + SNIC_STAT_CMND_REJECT, /* req was invalid and rejected */ + SNIC_STAT_DEV_OFFLINE, /* req sent to offline device */ + SNIC_STAT_NO_BOOTLUN, + SNIC_STAT_SCSI_ERR, /* SCSI error returned by Target. */ + SNIC_STAT_NOT_READY, /* sNIC Subsystem is not ready */ + SNIC_STAT_FATAL_ERROR, /* sNIC is in unrecoverable state */ +}; /* end of enum snic_io_status */ + +/* + * snic_io_hdr : host <--> firmware + * + * for any other message that will be queued to firmware should + * have the following request header + */ +struct snic_io_hdr { + __le32 hid; + __le32 cmnd_id; /* tag here */ + ulong init_ctx; /* initiator context */ + u8 type; /* request/response type */ + u8 status; /* header status entry */ + u8 protocol; /* Protocol specific, may needed for RoCE*/ + u8 flags; + __le16 sg_cnt; + u16 resvd; +}; + +/* auxillary funciton for encoding the snic_io_hdr */ +static inline void +snic_io_hdr_enc(struct snic_io_hdr *hdr, u8 typ, u8 status, u32 id, u32 hid, + u16 sg_cnt, ulong ctx) +{ + hdr->type = typ; + hdr->status = status; + hdr->protocol = 0; + hdr->hid = cpu_to_le32(hid); + hdr->cmnd_id = cpu_to_le32(id); + hdr->sg_cnt = cpu_to_le16(sg_cnt); + hdr->init_ctx = ctx; + hdr->flags = 0; +} + +/* auxillary funciton for decoding the snic_io_hdr */ +static inline void +snic_io_hdr_dec(struct snic_io_hdr *hdr, u8 *typ, u8 *stat, u32 *cmnd_id, + u32 *hid, ulong *ctx) +{ + *typ = hdr->type; + *stat = hdr->status; + *hid = le32_to_cpu(hdr->hid); + *cmnd_id = le32_to_cpu(hdr->cmnd_id); + *ctx = hdr->init_ctx; +} + +/* + * snic_host_info: host -> firmware + * + * Used for sending host information to firmware, and request fw version + */ +struct snic_exch_ver_req { + __le32 drvr_ver; /* for debugging, when fw dump captured */ + __le32 os_type; /* for OS specific features */ +}; + +/* + * os_type flags + * Bit 0-7 : OS information + * Bit 8-31: Feature/Capability Information + */ +#define SNIC_OS_LINUX 0x1 +#define SNIC_OS_WIN 0x2 +#define SNIC_OS_ESX 0x3 + +/* + * HBA Capabilities + * Bit 1: Reserved. + * Bit 2: Dynamic Discovery of LUNs. + * Bit 3: Async event notifications on tgt online/offline events. + * Bit 4: IO timeout support in FW. + * Bit 5-31: Reserved. + */ +#define SNIC_HBA_CAP_DDL 0x02 /* Supports Dynamic Discovery of LUNs */ +#define SNIC_HBA_CAP_AEN 0x04 /* Supports Async Event Noitifcation */ +#define SNIC_HBA_CAP_TMO 0x08 /* Supports IO timeout in FW */ + +/* + * snic_exch_ver_rsp : firmware -> host + * + * Used by firmware to send response to version request + */ +struct snic_exch_ver_rsp { + __le32 version; + __le32 hid; + __le32 max_concur_ios; /* max concurrent ios */ + __le32 max_sgs_per_cmd; /* max sgls per IO */ + __le32 max_io_sz; /* max io size supported */ + __le32 hba_cap; /* hba capabilities */ + __le32 max_tgts; /* max tgts supported */ + __le16 io_timeout; /* FW extended timeout */ + u16 rsvd; +}; + + +/* + * snic_report_tgts : host -> firmware request + * + * Used by the host to request list of targets + */ +struct snic_report_tgts { + __le16 sg_cnt; + __le16 flags; /* specific flags from fw */ + u8 _resvd[4]; + __le64 sg_addr; /* Points to SGL */ + __le64 sense_addr; +}; + +enum snic_type { + SNIC_NONE = 0x0, + SNIC_DAS, + SNIC_SAN, +}; + + +/* Report Target Response */ +enum snic_tgt_type { + SNIC_TGT_NONE = 0x0, + SNIC_TGT_DAS, /* DAS Target */ + SNIC_TGT_SAN, /* SAN Target */ +}; + +/* target id format */ +struct snic_tgt_id { + __le32 tgt_id; /* target id */ + __le16 tgt_type; /* tgt type */ + __le16 vnic_id; /* corresponding vnic id */ +}; + +/* + * snic_report_tgts_cmpl : firmware -> host response + * + * Used by firmware to send response to Report Targets request + */ +struct snic_report_tgts_cmpl { + __le32 tgt_cnt; /* Number of Targets accessible */ + u32 _resvd; +}; + +/* + * Command flags + * + * Bit 0: Read flags + * Bit 1: Write flag + * Bit 2: ESGL - sg/esg array contains extended sg + * ESGE - is a host buffer contains sg elements + * Bit 3-4: Task Attributes + * 00b - simple + * 01b - head of queue + * 10b - ordered + * Bit 5-7: Priority - future use + * Bit 8-15: Reserved + */ + +#define SNIC_ICMND_WR 0x01 /* write command */ +#define SNIC_ICMND_RD 0x02 /* read command */ +#define SNIC_ICMND_ESGL 0x04 /* SGE/ESGE array contains valid data*/ + +/* + * Priority/Task Attribute settings + */ +#define SNIC_ICMND_TSK_SHIFT 2 /* task attr starts at bit 2 */ +#define SNIC_ICMND_TSK_MASK(x) ((x>>SNIC_ICMND_TSK_SHIFT) & ~(0xffff)) +#define SNIC_ICMND_TSK_SIMPLE 0 /* simple task attr */ +#define SNIC_ICMND_TSK_HEAD_OF_QUEUE 1 /* head of qeuue task attr */ +#define SNIC_ICMND_TSK_ORDERED 2 /* ordered task attr */ + +#define SNIC_ICMND_PRI_SHIFT 5 /* prio val starts at bit 5 */ + +/* + * snic_icmnd : host-> firmware request + * + * used for sending out an initiator SCSI 16/32-byte command + */ +struct snic_icmnd { + __le16 sg_cnt; /* Number of SG Elements */ + __le16 flags; /* flags */ + __le32 sense_len; /* Sense buffer length */ + __le64 tgt_id; /* Destination Target ID */ + __le64 lun_id; /* Destination LUN ID */ + u8 cdb_len; + u8 _resvd; + __le16 time_out; /* ms time for Res allocations fw to handle io*/ + __le32 data_len; /* Total number of bytes to be transferred */ + u8 cdb[SNIC_CDB_LEN]; + __le64 sg_addr; /* Points to SG List */ + __le64 sense_addr; /* Sense buffer address */ +}; + + +/* Response flags */ +/* Bit 0: Under run + * Bit 1: Over Run + * Bit 2-7: Reserved + */ +#define SNIC_ICMND_CMPL_UNDR_RUN 0x01 /* resid under and valid */ +#define SNIC_ICMND_CMPL_OVER_RUN 0x02 /* resid over and valid */ + +/* + * snic_icmnd_cmpl: firmware -> host response + * + * Used for sending the host a response to an icmnd (initiator command) + */ +struct snic_icmnd_cmpl { + u8 scsi_status; /* value as per SAM */ + u8 flags; + __le16 sense_len; /* Sense Length */ + __le32 resid; /* Residue : # bytes under or over run */ +}; + +/* + * snic_itmf: host->firmware request + * + * used for requesting the firmware to abort a request and/or send out + * a task management function + * + * the req_id field is valid in case of abort task and clear task + */ +struct snic_itmf { + u8 tm_type; /* SCSI Task Management request */ + u8 resvd; + __le16 flags; /* flags */ + __le32 req_id; /* Command id of snic req to be aborted */ + __le64 tgt_id; /* Target ID */ + __le64 lun_id; /* Destination LUN ID */ + __le16 timeout; /* in sec */ +}; + +/* + * Task Management Request + */ +enum snic_itmf_tm_type { + SNIC_ITMF_ABTS_TASK = 0x01, /* Abort Task */ + SNIC_ITMF_ABTS_TASK_SET, /* Abort Task Set */ + SNIC_ITMF_CLR_TASK, /* Clear Task */ + SNIC_ITMF_CLR_TASKSET, /* Clear Task Set */ + SNIC_ITMF_LUN_RESET, /* Lun Reset */ + SNIC_ITMF_ABTS_TASK_TERM, /* Supported for SAN Targets */ +}; + +/* + * snic_itmf_cmpl: firmware -> host resposne + * + * used for sending the host a response for a itmf request + */ +struct snic_itmf_cmpl { + __le32 nterminated; /* # IOs terminated as a result of tmf */ + u8 flags; /* flags */ + u8 _resvd[3]; +}; + +/* + * itmfl_cmpl flags + * Bit 0 : 1 - Num terminated field valid + * Bit 1 - 7 : Reserved + */ +#define SNIC_NUM_TERM_VALID 0x01 /* Number of IOs terminated */ + +/* + * snic_hba_reset: host -> firmware request + * + * used for requesting firmware to reset snic + */ +struct snic_hba_reset { + __le16 flags; /* flags */ + u8 _resvd[6]; +}; + +/* + * snic_hba_reset_cmpl: firmware -> host response + * + * Used by firmware to respond to the host's hba reset request + */ +struct snic_hba_reset_cmpl { + u8 flags; /* flags : more info needs to be added*/ + u8 _resvd[7]; +}; + +/* + * snic_notify_msg: firmware -> host response + * + * Used by firmware to notify host of the last work queue entry received + */ +struct snic_notify_msg { + __le32 wqe_num; /* wq entry number */ + u8 flags; /* flags, macros */ + u8 _resvd[4]; +}; + + +#define SNIC_EVDATA_LEN 24 /* in bytes */ +/* snic_async_evnotify: firmware -> host notification + * + * Used by firmware to notify the host about configuration/state changes + */ +struct snic_async_evnotify { + u8 FLS_EVENT_DESC; + u8 vnic; /* vnic id */ + u8 _resvd[2]; + __le32 ev_id; /* Event ID */ + u8 ev_data[SNIC_EVDATA_LEN]; /* Event Data */ + u8 _resvd2[4]; +}; + +/* async event flags */ +enum snic_ev_type { + SNIC_EV_TGT_OFFLINE = 0x01, /* Target Offline, PL contains TGT ID */ + SNIC_EV_TGT_ONLINE, /* Target Online, PL contains TGT ID */ + SNIC_EV_LUN_OFFLINE, /* LUN Offline, PL contains LUN ID */ + SNIC_EV_LUN_ONLINE, /* LUN Online, PL contains LUN ID */ + SNIC_EV_CONF_CHG, /* Dev Config/Attr Change Event */ + SNIC_EV_TGT_ADDED, /* Target Added */ + SNIC_EV_TGT_DELTD, /* Target Del'd, PL contains TGT ID */ + SNIC_EV_LUN_ADDED, /* LUN Added */ + SNIC_EV_LUN_DELTD, /* LUN Del'd, PL cont. TGT & LUN ID */ + + SNIC_EV_DISC_CMPL = 0x10, /* Discovery Completed Event */ +}; + + +#define SNIC_HOST_REQ_LEN 128 /*Exp length of host req, wq desc sz*/ +/* Payload 88 bytes = 128 - 24 - 16 */ +#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ + sizeof(struct snic_io_hdr) - \ + (2 * sizeof(u64)) - sizeof(ulong))) + +/* + * snic_host_req: host -> firmware request + * + * Basic structure for all snic requests that are sent from the host to + * firmware. They are 128 bytes in size. + */ +struct snic_host_req { + u64 ctrl_data[2]; /*16 bytes - Control Data */ + struct snic_io_hdr hdr; + union { + /* + * Entry specific space, last byte contains color + */ + u8 buf[SNIC_HOST_REQ_PAYLOAD]; + + /* + * Exchange firmware version + */ + struct snic_exch_ver_req exch_ver; + + /* report targets */ + struct snic_report_tgts rpt_tgts; + + /* io request */ + struct snic_icmnd icmnd; + + /* task management request */ + struct snic_itmf itmf; + + /* hba reset */ + struct snic_hba_reset reset; + } u; + + ulong req_pa; +}; /* end of snic_host_req structure */ + + +#define SNIC_FW_REQ_LEN 64 /* Expected length of fw req */ +struct snic_fw_req { + struct snic_io_hdr hdr; + union { + /* + * Entry specific space, last byte contains color + */ + u8 buf[SNIC_FW_REQ_LEN - sizeof(struct snic_io_hdr)]; + + /* Exchange Version Response */ + struct snic_exch_ver_rsp exch_ver_cmpl; + + /* Report Targets Response */ + struct snic_report_tgts_cmpl rpt_tgts_cmpl; + + /* scsi response */ + struct snic_icmnd_cmpl icmnd_cmpl; + + /* task management response */ + struct snic_itmf_cmpl itmf_cmpl; + + /* hba reset response */ + struct snic_hba_reset_cmpl reset_cmpl; + + /* notify message */ + struct snic_notify_msg ack; + + /* async notification event */ + struct snic_async_evnotify async_ev; + + } u; +}; /* end of snic_fw_req structure */ + +/* + * Auxillary macro to verify specific snic req/cmpl structures + * to ensure that it will be aligned to 64 bit, and not using + * color bit field + */ +#define VERIFY_REQ_SZ(x) +#define VERIFY_CMPL_SZ(x) + +/* + * Access routines to encode and decode the color bit, which is the most + * significant bit of the structure. + */ +static inline void +snic_color_enc(struct snic_fw_req *req, u8 color) +{ + u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1; + + if (color) + *c |= 0x80; + else + *c &= ~0x80; +} + +static inline void +snic_color_dec(struct snic_fw_req *req, u8 *color) +{ + u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1; + + *color = *c >> 7; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); +} +#endif /* end of __SNIC_FWINT_H */ diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c new file mode 100644 index 000000000..32a77bee4 --- /dev/null +++ b/drivers/scsi/snic/snic_io.c @@ -0,0 +1,555 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "snic_io.h" +#include "snic.h" +#include "cq_enet_desc.h" +#include "snic_fwint.h" + +static void +snic_wq_cmpl_frame_send(struct vnic_wq *wq, + struct cq_desc *cq_desc, + struct vnic_wq_buf *buf, + void *opaque) +{ + struct snic *snic = svnic_dev_priv(wq->vdev); + + SNIC_BUG_ON(buf->os_buf == NULL); + + if (snic_log_level & SNIC_DESC_LOGGING) + SNIC_HOST_INFO(snic->shost, + "Ack received for snic_host_req %p.\n", + buf->os_buf); + + SNIC_TRC(snic->shost->host_no, 0, 0, + ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, + 0); + + buf->os_buf = NULL; +} + +static int +snic_wq_cmpl_handler_cont(struct vnic_dev *vdev, + struct cq_desc *cq_desc, + u8 type, + u16 q_num, + u16 cmpl_idx, + void *opaque) +{ + struct snic *snic = svnic_dev_priv(vdev); + unsigned long flags; + + SNIC_BUG_ON(q_num != 0); + + spin_lock_irqsave(&snic->wq_lock[q_num], flags); + svnic_wq_service(&snic->wq[q_num], + cq_desc, + cmpl_idx, + snic_wq_cmpl_frame_send, + NULL); + spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); + + return 0; +} /* end of snic_cmpl_handler_cont */ + +int +snic_wq_cmpl_handler(struct snic *snic, int work_to_do) +{ + unsigned int work_done = 0; + unsigned int i; + + snic->s_stats.misc.last_ack_time = jiffies; + for (i = 0; i < snic->wq_count; i++) { + work_done += svnic_cq_service(&snic->cq[i], + work_to_do, + snic_wq_cmpl_handler_cont, + NULL); + } + + return work_done; +} /* end of snic_wq_cmpl_handler */ + +void +snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + + struct snic_host_req *req = buf->os_buf; + struct snic *snic = svnic_dev_priv(wq->vdev); + struct snic_req_info *rqi = NULL; + unsigned long flags; + + dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); + + rqi = req_to_rqi(req); + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + if (list_empty(&rqi->list)) { + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + goto end; + } + + SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */ + list_del_init(&rqi->list); + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + + if (rqi->sge_va) { + snic_pci_unmap_rsp_buf(snic, rqi); + kfree((void *)rqi->sge_va); + rqi->sge_va = 0; + } + snic_req_free(snic, rqi); + SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n"); + +end: + return; +} + +/* Criteria to select work queue in multi queue mode */ +static int +snic_select_wq(struct snic *snic) +{ + /* No multi queue support for now */ + BUILD_BUG_ON(SNIC_WQ_MAX > 1); + + return 0; +} + +static int +snic_wqdesc_avail(struct snic *snic, int q_num, int req_type) +{ + int nr_wqdesc = snic->config.wq_enet_desc_count; + + if (q_num > 0) { + /* + * Multi Queue case, additional care is required. + * Per WQ active requests need to be maintained. + */ + SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n"); + SNIC_BUG_ON(q_num > 0); + + return -1; + } + + nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs); + + return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1); +} + +int +snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) +{ + dma_addr_t pa = 0; + unsigned long flags; + struct snic_fw_stats *fwstats = &snic->s_stats.fw; + struct snic_host_req *req = (struct snic_host_req *) os_buf; + long act_reqs; + long desc_avail = 0; + int q_num = 0; + + snic_print_desc(__func__, os_buf, len); + + /* Map request buffer */ + pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE); + if (dma_mapping_error(&snic->pdev->dev, pa)) { + SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); + + return -ENOMEM; + } + + req->req_pa = (ulong)pa; + + q_num = snic_select_wq(snic); + + spin_lock_irqsave(&snic->wq_lock[q_num], flags); + desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); + if (desc_avail <= 0) { + dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE); + req->req_pa = 0; + spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); + atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); + SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); + + return -ENOMEM; + } + + snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); + /* + * Update stats + * note: when multi queue enabled, fw actv_reqs should be per queue. + */ + act_reqs = atomic64_inc_return(&fwstats->actv_reqs); + spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); + + if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) + atomic64_set(&fwstats->max_actv_reqs, act_reqs); + + return 0; +} /* end of snic_queue_wq_desc() */ + +/* + * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list. + * Purpose : Used during driver unload to clean up the requests. + */ +void +snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi) +{ + unsigned long flags; + + INIT_LIST_HEAD(&rqi->list); + + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + list_add_tail(&rqi->list, &snic->spl_cmd_list); + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); +} + +/* + * snic_req_init: + * Allocates snic_req_info + snic_host_req + sgl data, and initializes. + */ +struct snic_req_info * +snic_req_init(struct snic *snic, int sg_cnt) +{ + u8 typ; + struct snic_req_info *rqi = NULL; + + typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ? + SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL; + + rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC); + if (!rqi) { + atomic64_inc(&snic->s_stats.io.alloc_fail); + SNIC_HOST_ERR(snic->shost, + "Failed to allocate memory from snic req pool id = %d\n", + typ); + return rqi; + } + + memset(rqi, 0, sizeof(*rqi)); + rqi->rq_pool_type = typ; + rqi->start_time = jiffies; + rqi->req = (struct snic_host_req *) (rqi + 1); + rqi->req_len = sizeof(struct snic_host_req); + rqi->snic = snic; + + rqi->req = (struct snic_host_req *)(rqi + 1); + + if (sg_cnt == 0) + goto end; + + rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc)); + + if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl)) + atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt); + + SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT); + atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]); + +end: + memset(rqi->req, 0, rqi->req_len); + + /* pre initialization of init_ctx to support req_to_rqi */ + rqi->req->hdr.init_ctx = (ulong) rqi; + + SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi); + + return rqi; +} /* end of snic_req_init */ + +/* + * snic_abort_req_init : Inits abort request. + */ +struct snic_host_req * +snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi) +{ + struct snic_host_req *req = NULL; + + SNIC_BUG_ON(!rqi); + + /* If abort to be issued second time, then reuse */ + if (rqi->abort_req) + return rqi->abort_req; + + + req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); + if (!req) { + SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n"); + WARN_ON_ONCE(1); + + return NULL; + } + + rqi->abort_req = req; + memset(req, 0, sizeof(struct snic_host_req)); + /* pre initialization of init_ctx to support req_to_rqi */ + req->hdr.init_ctx = (ulong) rqi; + + return req; +} /* end of snic_abort_req_init */ + +/* + * snic_dr_req_init : Inits device reset req + */ +struct snic_host_req * +snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi) +{ + struct snic_host_req *req = NULL; + + SNIC_BUG_ON(!rqi); + + req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); + if (!req) { + SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n"); + WARN_ON_ONCE(1); + + return NULL; + } + + SNIC_BUG_ON(rqi->dr_req != NULL); + rqi->dr_req = req; + memset(req, 0, sizeof(struct snic_host_req)); + /* pre initialization of init_ctx to support req_to_rqi */ + req->hdr.init_ctx = (ulong) rqi; + + return req; +} /* end of snic_dr_req_init */ + +/* frees snic_req_info and snic_host_req */ +void +snic_req_free(struct snic *snic, struct snic_req_info *rqi) +{ + SNIC_BUG_ON(rqi->req == rqi->abort_req); + SNIC_BUG_ON(rqi->req == rqi->dr_req); + SNIC_BUG_ON(rqi->sge_va != 0); + + SNIC_SCSI_DBG(snic->shost, + "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", + rqi, rqi->req, rqi->abort_req, rqi->dr_req); + + if (rqi->abort_req) { + if (rqi->abort_req->req_pa) + dma_unmap_single(&snic->pdev->dev, + rqi->abort_req->req_pa, + sizeof(struct snic_host_req), + DMA_TO_DEVICE); + + mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); + } + + if (rqi->dr_req) { + if (rqi->dr_req->req_pa) + dma_unmap_single(&snic->pdev->dev, + rqi->dr_req->req_pa, + sizeof(struct snic_host_req), + DMA_TO_DEVICE); + + mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); + } + + if (rqi->req->req_pa) + dma_unmap_single(&snic->pdev->dev, + rqi->req->req_pa, + rqi->req_len, + DMA_TO_DEVICE); + + mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); +} + +void +snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi) +{ + struct snic_sg_desc *sgd; + + sgd = req_to_sgl(rqi_to_req(rqi)); + SNIC_BUG_ON(sgd[0].addr == 0); + dma_unmap_single(&snic->pdev->dev, + le64_to_cpu(sgd[0].addr), + le32_to_cpu(sgd[0].len), + DMA_FROM_DEVICE); +} + +/* + * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them. + */ +void +snic_free_all_untagged_reqs(struct snic *snic) +{ + struct snic_req_info *rqi; + struct list_head *cur, *nxt; + unsigned long flags; + + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + list_for_each_safe(cur, nxt, &snic->spl_cmd_list) { + rqi = list_entry(cur, struct snic_req_info, list); + list_del_init(&rqi->list); + if (rqi->sge_va) { + snic_pci_unmap_rsp_buf(snic, rqi); + kfree((void *)rqi->sge_va); + rqi->sge_va = 0; + } + + snic_req_free(snic, rqi); + } + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); +} + +/* + * snic_release_untagged_req : Unlinks the untagged req and frees it. + */ +void +snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi) +{ + unsigned long flags; + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->in_remove) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + goto end; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + if (list_empty(&rqi->list)) { + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + goto end; + } + list_del_init(&rqi->list); + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + snic_req_free(snic, rqi); + +end: + return; +} + +/* dump buf in hex fmt */ +void +snic_hex_dump(char *pfx, char *data, int len) +{ + SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len); + print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len); +} + +#define LINE_BUFSZ 128 /* for snic_print_desc fn */ +static void +snic_dump_desc(const char *fn, char *os_buf, int len) +{ + struct snic_host_req *req = (struct snic_host_req *) os_buf; + struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf; + struct snic_req_info *rqi = NULL; + char line[LINE_BUFSZ] = { '\0' }; + char *cmd_str = NULL; + + if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) + rqi = (struct snic_req_info *) fwreq->hdr.init_ctx; + else + rqi = (struct snic_req_info *) req->hdr.init_ctx; + + SNIC_BUG_ON(rqi == NULL || rqi->req == NULL); + switch (req->hdr.type) { + case SNIC_REQ_REPORT_TGTS: + cmd_str = "report-tgt : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :"); + break; + + case SNIC_REQ_ICMND: + cmd_str = "icmnd : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :", + req->u.icmnd.cdb[0]); + break; + + case SNIC_REQ_ITMF: + cmd_str = "itmf : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :"); + break; + + case SNIC_REQ_HBA_RESET: + cmd_str = "hba reset :"; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :"); + break; + + case SNIC_REQ_EXCH_VER: + cmd_str = "exch ver : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :"); + break; + + case SNIC_REQ_TGT_INFO: + cmd_str = "tgt info : "; + break; + + case SNIC_RSP_REPORT_TGTS_CMPL: + cmd_str = "report tgt cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :"); + break; + + case SNIC_RSP_ICMND_CMPL: + cmd_str = "icmnd_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :", + rqi->req->u.icmnd.cdb[0]); + break; + + case SNIC_RSP_ITMF_CMPL: + cmd_str = "itmf_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :"); + break; + + case SNIC_RSP_HBA_RESET_CMPL: + cmd_str = "hba_reset_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :"); + break; + + case SNIC_RSP_EXCH_VER_CMPL: + cmd_str = "exch_ver_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :"); + break; + + case SNIC_MSG_ACK: + cmd_str = "msg ack : "; + snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :"); + break; + + case SNIC_MSG_ASYNC_EVNOTIFY: + cmd_str = "async notify : "; + snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :"); + break; + + default: + cmd_str = "unknown : "; + SNIC_BUG_ON(1); + break; + } + + SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n", + fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status, + req->hdr.init_ctx); + + /* Enable it, to dump byte stream */ + if (snic_log_level & 0x20) + snic_hex_dump(cmd_str, os_buf, len); +} /* end of __snic_print_desc */ + +void +snic_print_desc(const char *fn, char *os_buf, int len) +{ + if (snic_log_level & SNIC_DESC_LOGGING) + snic_dump_desc(fn, os_buf, len); +} + +void +snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi) +{ + u64 duration; + + duration = jiffies - rqi->start_time; + + if (duration > atomic64_read(&snic->s_stats.io.max_time)) + atomic64_set(&snic->s_stats.io.max_time, duration); +} diff --git a/drivers/scsi/snic/snic_io.h b/drivers/scsi/snic/snic_io.h new file mode 100644 index 000000000..de6694a24 --- /dev/null +++ b/drivers/scsi/snic/snic_io.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _SNIC_IO_H +#define _SNIC_IO_H + +#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */ +#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */ +#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */ + +/* SG descriptor for snic */ +struct snic_sg_desc { + __le64 addr; + __le32 len; + u32 _resvd; +}; + +struct snic_dflt_sgl { + struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT]; +}; + +struct snic_max_sgl { + struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT]; +}; + +enum snic_req_cache_type { + SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */ + SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */ + SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains + snic_host_req objects only*/ + SNIC_REQ_MAX_CACHES /* number of sgl caches */ +}; + +/* Per IO internal state */ +struct snic_internal_io_state { + char *rqi; + u64 flags; + u32 state; + u32 abts_status; /* Abort completion status */ + u32 lr_status; /* device reset completion status */ +}; + +/* IO state machine */ +enum snic_ioreq_state { + SNIC_IOREQ_NOT_INITED = 0, + SNIC_IOREQ_PENDING, + SNIC_IOREQ_ABTS_PENDING, + SNIC_IOREQ_ABTS_COMPLETE, + SNIC_IOREQ_LR_PENDING, + SNIC_IOREQ_LR_COMPLETE, + SNIC_IOREQ_COMPLETE, +}; + +struct snic; +struct snic_host_req; + +/* + * snic_req_info : Contains info about IO, one per scsi command. + * Notes: Make sure that the structure is aligned to 16 B + * this helps in easy access to snic_req_info from snic_host_req + */ +struct snic_req_info { + struct list_head list; + struct snic_host_req *req; + u64 start_time; /* start time in jiffies */ + u16 rq_pool_type; /* noticion of request pool type */ + u16 req_len; /* buf len passing to fw (req + sgl)*/ + u32 tgt_id; + + u32 tm_tag; + u8 io_cmpl:1; /* sets to 1 when fw completes IO */ + u8 resvd[3]; + struct scsi_cmnd *sc; /* Associated scsi cmd */ + struct snic *snic; /* Associated snic */ + ulong sge_va; /* Pointer to Resp Buffer */ + u64 snsbuf_va; + + struct snic_host_req *abort_req; + struct completion *abts_done; + + struct snic_host_req *dr_req; + struct completion *dr_done; +}; + + +#define rqi_to_req(rqi) \ + ((struct snic_host_req *) (((struct snic_req_info *)rqi)->req)) + +#define req_to_rqi(req) \ + ((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx)) + +#define req_to_sgl(req) \ + ((struct snic_sg_desc *) (((struct snic_host_req *)req)+1)) + +struct snic_req_info * +snic_req_init(struct snic *, int sg_cnt); +void snic_req_free(struct snic *, struct snic_req_info *); +void snic_calc_io_process_time(struct snic *, struct snic_req_info *); +void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *); +struct snic_host_req * +snic_abort_req_init(struct snic *, struct snic_req_info *); +struct snic_host_req * +snic_dr_req_init(struct snic *, struct snic_req_info *); +#endif /* _SNIC_IO_H */ diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c new file mode 100644 index 000000000..471a37422 --- /dev/null +++ b/drivers/scsi/snic/snic_isr.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "snic_io.h" +#include "snic.h" + + +/* + * snic_isr_msix_wq : MSIx ISR for work queue. + */ + +static irqreturn_t +snic_isr_msix_wq(int irq, void *data) +{ + struct snic *snic = data; + unsigned long wq_work_done = 0; + + snic->s_stats.misc.last_isr_time = jiffies; + atomic64_inc(&snic->s_stats.misc.ack_isr_cnt); + + wq_work_done = snic_wq_cmpl_handler(snic, -1); + svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ], + wq_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} /* end of snic_isr_msix_wq */ + +static irqreturn_t +snic_isr_msix_io_cmpl(int irq, void *data) +{ + struct snic *snic = data; + unsigned long iocmpl_work_done = 0; + + snic->s_stats.misc.last_isr_time = jiffies; + atomic64_inc(&snic->s_stats.misc.cmpl_isr_cnt); + + iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1); + svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL], + iocmpl_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} /* end of snic_isr_msix_io_cmpl */ + +static irqreturn_t +snic_isr_msix_err_notify(int irq, void *data) +{ + struct snic *snic = data; + + snic->s_stats.misc.last_isr_time = jiffies; + atomic64_inc(&snic->s_stats.misc.errnotify_isr_cnt); + + svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]); + snic_log_q_error(snic); + + /*Handling link events */ + snic_handle_link_event(snic); + + return IRQ_HANDLED; +} /* end of snic_isr_msix_err_notify */ + + +void +snic_free_intr(struct snic *snic) +{ + int i; + + /* ONLY interrupt mode MSIX is supported */ + for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { + if (snic->msix[i].requested) { + free_irq(pci_irq_vector(snic->pdev, i), + snic->msix[i].devid); + } + } +} /* end of snic_free_intr */ + +int +snic_request_intr(struct snic *snic) +{ + int ret = 0, i; + enum vnic_dev_intr_mode intr_mode; + + intr_mode = svnic_dev_get_intr_mode(snic->vdev); + SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX); + + /* + * Currently HW supports single WQ and CQ. So passing devid as snic. + * When hardware supports multiple WQs and CQs, one idea is + * to pass devid as corresponding WQ or CQ ptr and retrieve snic + * from queue ptr. + * Except for err_notify, which is always one. + */ + sprintf(snic->msix[SNIC_MSIX_WQ].devname, + "%.11s-scsi-wq", + snic->name); + snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq; + snic->msix[SNIC_MSIX_WQ].devid = snic; + + sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname, + "%.11s-io-cmpl", + snic->name); + snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl; + snic->msix[SNIC_MSIX_IO_CMPL].devid = snic; + + sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname, + "%.11s-err-notify", + snic->name); + snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify; + snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic; + + for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { + ret = request_irq(pci_irq_vector(snic->pdev, i), + snic->msix[i].isr, + 0, + snic->msix[i].devname, + snic->msix[i].devid); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "MSI-X: request_irq(%d) failed %d\n", + i, + ret); + snic_free_intr(snic); + break; + } + snic->msix[i].requested = 1; + } + + return ret; +} /* end of snic_request_intr */ + +int +snic_set_intr_mode(struct snic *snic) +{ + unsigned int n = ARRAY_SIZE(snic->wq); + unsigned int m = SNIC_CQ_IO_CMPL_MAX; + unsigned int vecs = n + m + 1; + + /* + * We need n WQs, m CQs, and n+m+1 INTRs + * (last INTR is used for WQ/CQ errors and notification area + */ + BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) > + ARRAY_SIZE(snic->intr)); + + if (snic->wq_count < n || snic->cq_count < n + m) + goto fail; + + if (pci_alloc_irq_vectors(snic->pdev, vecs, vecs, PCI_IRQ_MSIX) < 0) + goto fail; + + snic->wq_count = n; + snic->cq_count = n + m; + snic->intr_count = vecs; + snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY; + + SNIC_ISR_DBG(snic->shost, "Using MSI-X Interrupts\n"); + svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_MSIX); + return 0; +fail: + svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); + return -EINVAL; +} /* end of snic_set_intr_mode */ + +void +snic_clear_intr_mode(struct snic *snic) +{ + pci_free_irq_vectors(snic->pdev); + svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX); +} diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c new file mode 100644 index 000000000..cc824dcfe --- /dev/null +++ b/drivers/scsi/snic/snic_main.c @@ -0,0 +1,998 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "snic.h" +#include "snic_fwint.h" + +#define PCI_DEVICE_ID_CISCO_SNIC 0x0046 + +/* Supported devices by snic module */ +static struct pci_device_id snic_id_table[] = { + {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) }, + { 0, } /* end of table */ +}; + +unsigned int snic_log_level = 0x0; +module_param(snic_log_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels"); + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS +unsigned int snic_trace_max_pages = 16; +module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(snic_trace_max_pages, + "Total allocated memory pages for snic trace buffer"); + +#endif +unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH; +module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN"); + +/* + * snic_slave_alloc : callback function to SCSI Mid Layer, called on + * scsi device initialization. + */ +static int +snic_slave_alloc(struct scsi_device *sdev) +{ + struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); + + if (!tgt || snic_tgt_chkready(tgt)) + return -ENXIO; + + return 0; +} + +/* + * snic_slave_configure : callback function to SCSI Mid Layer, called on + * scsi device initialization. + */ +static int +snic_slave_configure(struct scsi_device *sdev) +{ + struct snic *snic = shost_priv(sdev->host); + u32 qdepth = 0, max_ios = 0; + int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ; + + /* Set Queue Depth */ + max_ios = snic_max_qdepth; + qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH); + scsi_change_queue_depth(sdev, qdepth); + + if (snic->fwinfo.io_tmo > 1) + tmo = snic->fwinfo.io_tmo * HZ; + + /* FW requires extended timeouts */ + blk_queue_rq_timeout(sdev->request_queue, tmo); + + return 0; +} + +static int +snic_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct snic *snic = shost_priv(sdev->host); + int qsz = 0; + + qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); + if (qsz < sdev->queue_depth) + atomic64_inc(&snic->s_stats.misc.qsz_rampdown); + else if (qsz > sdev->queue_depth) + atomic64_inc(&snic->s_stats.misc.qsz_rampup); + + atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); + + scsi_change_queue_depth(sdev, qsz); + + return sdev->queue_depth; +} + +static const struct scsi_host_template snic_host_template = { + .module = THIS_MODULE, + .name = SNIC_DRV_NAME, + .queuecommand = snic_queuecommand, + .eh_abort_handler = snic_abort_cmd, + .eh_device_reset_handler = snic_device_reset, + .eh_host_reset_handler = snic_host_reset, + .slave_alloc = snic_slave_alloc, + .slave_configure = snic_slave_configure, + .change_queue_depth = snic_change_queue_depth, + .this_id = -1, + .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, + .can_queue = SNIC_MAX_IO_REQ, + .sg_tablesize = SNIC_MAX_SG_DESC_CNT, + .max_sectors = 0x800, + .shost_groups = snic_host_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct snic_internal_io_state), + .proc_name = "snic_scsi", +}; + +/* + * snic_handle_link_event : Handles link events such as link up/down/error + */ +void +snic_handle_link_event(struct snic *snic) +{ + unsigned long flags; + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->stop_link_events) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + + return; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + + queue_work(snic_glob->event_q, &snic->link_work); +} /* end of snic_handle_link_event */ + +/* + * snic_notify_set : sets notification area + * This notification area is to receive events from fw + * Note: snic supports only MSIX interrupts, in which we can just call + * svnic_dev_notify_set directly + */ +static int +snic_notify_set(struct snic *snic) +{ + int ret = 0; + enum vnic_dev_intr_mode intr_mode; + + intr_mode = svnic_dev_get_intr_mode(snic->vdev); + + if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) { + ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY); + } else { + SNIC_HOST_ERR(snic->shost, + "Interrupt mode should be setup before devcmd notify set %d\n", + intr_mode); + ret = -1; + } + + return ret; +} /* end of snic_notify_set */ + +/* + * snic_dev_wait : polls vnic open status. + */ +static int +snic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + unsigned long time; + int ret, done; + int retry_cnt = 0; + + ret = start(vdev, arg); + if (ret) + return ret; + + /* + * Wait for func to complete...2 seconds max. + * + * Sometimes schedule_timeout_uninterruptible take long time + * to wakeup, which results skipping retry. The retry counter + * ensures to retry at least two times. + */ + time = jiffies + (HZ * 2); + do { + ret = finished(vdev, &done); + if (ret) + return ret; + + if (done) + return 0; + schedule_timeout_uninterruptible(HZ/10); + ++retry_cnt; + } while (time_after(time, jiffies) || (retry_cnt < 3)); + + return -ETIMEDOUT; +} /* end of snic_dev_wait */ + +/* + * snic_cleanup: called by snic_remove + * Stops the snic device, masks all interrupts, Completed CQ entries are + * drained. Posted WQ/RQ/Copy-WQ entries are cleanup + */ +static int +snic_cleanup(struct snic *snic) +{ + unsigned int i; + int ret; + + svnic_dev_disable(snic->vdev); + for (i = 0; i < snic->intr_count; i++) + svnic_intr_mask(&snic->intr[i]); + + for (i = 0; i < snic->wq_count; i++) { + ret = svnic_wq_disable(&snic->wq[i]); + if (ret) + return ret; + } + + /* Clean up completed IOs */ + snic_fwcq_cmpl_handler(snic, -1); + + snic_wq_cmpl_handler(snic, -1); + + /* Clean up the IOs that have not completed */ + for (i = 0; i < snic->wq_count; i++) + svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); + + for (i = 0; i < snic->cq_count; i++) + svnic_cq_clean(&snic->cq[i]); + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_clean(&snic->intr[i]); + + /* Cleanup snic specific requests */ + snic_free_all_untagged_reqs(snic); + + /* Cleanup Pending SCSI commands */ + snic_shutdown_scsi_cleanup(snic); + + for (i = 0; i < SNIC_REQ_MAX_CACHES; i++) + mempool_destroy(snic->req_pool[i]); + + return 0; +} /* end of snic_cleanup */ + + +static void +snic_iounmap(struct snic *snic) +{ + if (snic->bar0.vaddr) + iounmap(snic->bar0.vaddr); +} + +/* + * snic_vdev_open_done : polls for svnic_dev_open cmd completion. + */ +static int +snic_vdev_open_done(struct vnic_dev *vdev, int *done) +{ + struct snic *snic = svnic_dev_priv(vdev); + int ret; + int nretries = 5; + + do { + ret = svnic_dev_open_done(vdev, done); + if (ret == 0) + break; + + SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n"); + } while (nretries--); + + return ret; +} /* end of snic_vdev_open_done */ + +/* + * snic_add_host : registers scsi host with ML + */ +static int +snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) +{ + int ret = 0; + + ret = scsi_add_host(shost, &pdev->dev); + if (ret) { + SNIC_HOST_ERR(shost, + "snic: scsi_add_host failed. %d\n", + ret); + + return ret; + } + + SNIC_BUG_ON(shost->work_q != NULL); + snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", + shost->host_no); + shost->work_q = create_singlethread_workqueue(shost->work_q_name); + if (!shost->work_q) { + SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); + + ret = -ENOMEM; + } + + return ret; +} /* end of snic_add_host */ + +static void +snic_del_host(struct Scsi_Host *shost) +{ + if (!shost->work_q) + return; + + destroy_workqueue(shost->work_q); + shost->work_q = NULL; + scsi_remove_host(shost); +} + +int +snic_get_state(struct snic *snic) +{ + return atomic_read(&snic->state); +} + +void +snic_set_state(struct snic *snic, enum snic_state state) +{ + SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n", + snic_state_to_str(snic_get_state(snic)), + snic_state_to_str(state)); + + atomic_set(&snic->state, state); +} + +/* + * snic_probe : Initialize the snic interface. + */ +static int +snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct Scsi_Host *shost; + struct snic *snic; + mempool_t *pool; + unsigned long flags; + u32 max_ios = 0; + int ret, i; + + /* Device Information */ + SNIC_INFO("snic device %4x:%4x:%4x:%4x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + + SNIC_INFO("snic device bus %x: slot %x: fn %x\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + /* + * Allocate SCSI Host and setup association between host, and snic + */ + shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic)); + if (!shost) { + SNIC_ERR("Unable to alloc scsi_host\n"); + ret = -ENOMEM; + + goto prob_end; + } + snic = shost_priv(shost); + snic->shost = shost; + + snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME, + shost->host_no); + + SNIC_HOST_INFO(shost, + "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n", + shost->host_no, snic, shost, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* Per snic debugfs init */ + snic_stats_debugfs_init(snic); +#endif + + /* Setup PCI Resources */ + pci_set_drvdata(pdev, snic); + snic->pdev = pdev; + + ret = pci_enable_device(pdev); + if (ret) { + SNIC_HOST_ERR(shost, + "Cannot enable PCI Resources, aborting : %d\n", + ret); + + goto err_free_snic; + } + + ret = pci_request_regions(pdev, SNIC_DRV_NAME); + if (ret) { + SNIC_HOST_ERR(shost, + "Cannot obtain PCI Resources, aborting : %d\n", + ret); + + goto err_pci_disable; + } + + pci_set_master(pdev); + + /* + * Query PCI Controller on system for DMA addressing + * limitation for the device. Try 43-bit first, and + * fail to 32-bit. + */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + SNIC_HOST_ERR(shost, + "No Usable DMA Configuration, aborting %d\n", + ret); + goto err_rel_regions; + } + } + + /* Map vNIC resources from BAR0 */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); + + ret = -ENODEV; + goto err_rel_regions; + } + + snic->bar0.vaddr = pci_iomap(pdev, 0, 0); + if (!snic->bar0.vaddr) { + SNIC_HOST_ERR(shost, + "Cannot memory map BAR0 res hdr aborting.\n"); + + ret = -ENODEV; + goto err_rel_regions; + } + + snic->bar0.bus_addr = pci_resource_start(pdev, 0); + snic->bar0.len = pci_resource_len(pdev, 0); + SNIC_BUG_ON(snic->bar0.bus_addr == 0); + + /* Devcmd2 Resource Allocation and Initialization */ + snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1); + if (!snic->vdev) { + SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n"); + + ret = -ENODEV; + goto err_iounmap; + } + + ret = svnic_dev_cmd_init(snic->vdev, 0); + if (ret) { + SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret); + + goto err_vnic_unreg; + } + + ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0); + if (ret) { + SNIC_HOST_ERR(shost, + "vNIC dev open failed, aborting. %d\n", + ret); + + goto err_vnic_unreg; + } + + ret = svnic_dev_init(snic->vdev, 0); + if (ret) { + SNIC_HOST_ERR(shost, + "vNIC dev init failed. aborting. %d\n", + ret); + + goto err_dev_close; + } + + /* Get vNIC information */ + ret = snic_get_vnic_config(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Get vNIC configuration failed, aborting. %d\n", + ret); + + goto err_dev_close; + } + + /* Configure Maximum Outstanding IO reqs */ + max_ios = snic->config.io_throttle_count; + if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD) + shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ, + max_t(u32, SNIC_MIN_IO_REQ, max_ios)); + + snic->max_tag_id = shost->can_queue; + + shost->max_lun = snic->config.luns_per_tgt; + shost->max_id = SNIC_MAX_TARGET; + + shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/ + + snic_get_res_counts(snic); + + /* + * Assumption: Only MSIx is supported + */ + ret = snic_set_intr_mode(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to set intr mode aborting. %d\n", + ret); + + goto err_dev_close; + } + + ret = snic_alloc_vnic_res(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to alloc vNIC resources aborting. %d\n", + ret); + + goto err_clear_intr; + } + + /* Initialize specific lists */ + INIT_LIST_HEAD(&snic->list); + + /* + * spl_cmd_list for maintaining snic specific cmds + * such as EXCH_VER_REQ, REPORT_TARGETS etc + */ + INIT_LIST_HEAD(&snic->spl_cmd_list); + spin_lock_init(&snic->spl_cmd_lock); + + /* initialize all snic locks */ + spin_lock_init(&snic->snic_lock); + + for (i = 0; i < SNIC_WQ_MAX; i++) + spin_lock_init(&snic->wq_lock[i]); + + for (i = 0; i < SNIC_IO_LOCKS; i++) + spin_lock_init(&snic->io_req_lock[i]); + + pool = mempool_create_slab_pool(2, + snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); + if (!pool) { + SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); + + ret = -ENOMEM; + goto err_free_res; + } + + snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool; + + pool = mempool_create_slab_pool(2, + snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); + if (!pool) { + SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); + + ret = -ENOMEM; + goto err_free_dflt_sgl_pool; + } + + snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool; + + pool = mempool_create_slab_pool(2, + snic_glob->req_cache[SNIC_REQ_TM_CACHE]); + if (!pool) { + SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); + + ret = -ENOMEM; + goto err_free_max_sgl_pool; + } + + snic->req_pool[SNIC_REQ_TM_CACHE] = pool; + + /* Initialize snic state */ + atomic_set(&snic->state, SNIC_INIT); + + atomic_set(&snic->ios_inflight, 0); + + /* Setup notification buffer area */ + ret = snic_notify_set(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to alloc notify buffer aborting. %d\n", + ret); + + goto err_free_tmreq_pool; + } + + spin_lock_irqsave(&snic_glob->snic_list_lock, flags); + list_add_tail(&snic->list, &snic_glob->snic_list); + spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); + + snic_disc_init(&snic->disc); + INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc); + INIT_WORK(&snic->disc_work, snic_handle_disc); + INIT_WORK(&snic->link_work, snic_handle_link); + + /* Enable all queues */ + for (i = 0; i < snic->wq_count; i++) + svnic_wq_enable(&snic->wq[i]); + + ret = svnic_dev_enable_wait(snic->vdev); + if (ret) { + SNIC_HOST_ERR(shost, + "vNIC dev enable failed w/ error %d\n", + ret); + + goto err_vdev_enable; + } + + ret = snic_request_intr(snic); + if (ret) { + SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret); + + goto err_req_intr; + } + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_unmask(&snic->intr[i]); + + /* Get snic params */ + ret = snic_get_conf(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to get snic io config from FW w err %d\n", + ret); + + goto err_get_conf; + } + + /* + * Initialization done with PCI system, hardware, firmware. + * Add shost to SCSI + */ + ret = snic_add_host(shost, pdev); + if (ret) { + SNIC_HOST_ERR(shost, + "Adding scsi host Failed ... exiting. %d\n", + ret); + + goto err_get_conf; + } + + snic_set_state(snic, SNIC_ONLINE); + + ret = snic_disc_start(snic); + if (ret) { + SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", + ret); + + goto err_get_conf; + } + + SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n"); + + return 0; + +err_get_conf: + snic_free_all_untagged_reqs(snic); + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_mask(&snic->intr[i]); + + snic_free_intr(snic); + +err_req_intr: + svnic_dev_disable(snic->vdev); + +err_vdev_enable: + svnic_dev_notify_unset(snic->vdev); + + for (i = 0; i < snic->wq_count; i++) { + int rc = 0; + + rc = svnic_wq_disable(&snic->wq[i]); + if (rc) { + SNIC_HOST_ERR(shost, + "WQ Disable Failed w/ err = %d\n", rc); + + break; + } + } + snic_del_host(snic->shost); + +err_free_tmreq_pool: + mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); + +err_free_max_sgl_pool: + mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]); + +err_free_dflt_sgl_pool: + mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]); + +err_free_res: + snic_free_vnic_res(snic); + +err_clear_intr: + snic_clear_intr_mode(snic); + +err_dev_close: + svnic_dev_close(snic->vdev); + +err_vnic_unreg: + svnic_dev_unregister(snic->vdev); + +err_iounmap: + snic_iounmap(snic); + +err_rel_regions: + pci_release_regions(pdev); + +err_pci_disable: + pci_disable_device(pdev); + +err_free_snic: +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + snic_stats_debugfs_remove(snic); +#endif + scsi_host_put(shost); + pci_set_drvdata(pdev, NULL); + +prob_end: + SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + return ret; +} /* end of snic_probe */ + + +/* + * snic_remove : invoked on unbinding the interface to cleanup the + * resources allocated in snic_probe on initialization. + */ +static void +snic_remove(struct pci_dev *pdev) +{ + struct snic *snic = pci_get_drvdata(pdev); + unsigned long flags; + + if (!snic) { + SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + return; + } + + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events. ISR and other threads + * that can queue work items will also stop creating work + * items on the snic workqueue + */ + snic_set_state(snic, SNIC_OFFLINE); + spin_lock_irqsave(&snic->snic_lock, flags); + snic->stop_link_events = 1; + spin_unlock_irqrestore(&snic->snic_lock, flags); + + flush_workqueue(snic_glob->event_q); + snic_disc_term(snic); + + spin_lock_irqsave(&snic->snic_lock, flags); + snic->in_remove = 1; + spin_unlock_irqrestore(&snic->snic_lock, flags); + + /* + * This stops the snic device, masks all interrupts, Completed + * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are + * cleanup + */ + snic_cleanup(snic); + + spin_lock_irqsave(&snic_glob->snic_list_lock, flags); + list_del(&snic->list); + spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); + + snic_tgt_del_all(snic); +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + snic_stats_debugfs_remove(snic); +#endif + snic_del_host(snic->shost); + + svnic_dev_notify_unset(snic->vdev); + snic_free_intr(snic); + snic_free_vnic_res(snic); + snic_clear_intr_mode(snic); + svnic_dev_close(snic->vdev); + svnic_dev_unregister(snic->vdev); + snic_iounmap(snic); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + /* this frees Scsi_Host and snic memory (continuous chunk) */ + scsi_host_put(snic->shost); +} /* end of snic_remove */ + + +struct snic_global *snic_glob; + +/* + * snic_global_data_init: Initialize SNIC Global Data + * Notes: All the global lists, variables should be part of global data + * this helps in debugging. + */ +static int +snic_global_data_init(void) +{ + int ret = 0; + struct kmem_cache *cachep; + ssize_t len = 0; + + snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL); + + if (!snic_glob) { + SNIC_ERR("Failed to allocate Global Context.\n"); + + ret = -ENOMEM; + goto gdi_end; + } + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* Debugfs related Initialization */ + /* Create debugfs entries for snic */ + snic_debugfs_init(); + + /* Trace related Initialization */ + /* Allocate memory for trace buffer */ + ret = snic_trc_init(); + if (ret < 0) { + SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n"); + snic_trc_free(); + /* continue even if it fails */ + } + +#endif + INIT_LIST_HEAD(&snic_glob->snic_list); + spin_lock_init(&snic_glob->snic_list_lock); + + /* Create a cache for allocation of snic_host_req+default size ESGLs */ + len = sizeof(struct snic_req_info); + len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl); + cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, NULL); + if (!cachep) { + SNIC_ERR("Failed to create snic default sgl slab\n"); + ret = -ENOMEM; + + goto err_dflt_req_slab; + } + snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep; + + /* Create a cache for allocation of max size Extended SGLs */ + len = sizeof(struct snic_req_info); + len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl); + cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, NULL); + if (!cachep) { + SNIC_ERR("Failed to create snic max sgl slab\n"); + ret = -ENOMEM; + + goto err_max_req_slab; + } + snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; + + len = sizeof(struct snic_host_req); + cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, NULL); + if (!cachep) { + SNIC_ERR("Failed to create snic tm req slab\n"); + ret = -ENOMEM; + + goto err_tmreq_slab; + } + snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; + + /* snic_event queue */ + snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); + if (!snic_glob->event_q) { + SNIC_ERR("snic event queue create failed\n"); + ret = -ENOMEM; + + goto err_eventq; + } + + return ret; + +err_eventq: + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); + +err_tmreq_slab: + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); + +err_max_req_slab: + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); + +err_dflt_req_slab: +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + snic_trc_free(); + snic_debugfs_term(); +#endif + kfree(snic_glob); + snic_glob = NULL; + +gdi_end: + return ret; +} /* end of snic_glob_init */ + +/* + * snic_global_data_cleanup : Frees SNIC Global Data + */ +static void +snic_global_data_cleanup(void) +{ + SNIC_BUG_ON(snic_glob == NULL); + + destroy_workqueue(snic_glob->event_q); + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* Freeing Trace Resources */ + snic_trc_free(); + + /* Freeing Debugfs Resources */ + snic_debugfs_term(); +#endif + kfree(snic_glob); + snic_glob = NULL; +} /* end of snic_glob_cleanup */ + +static struct pci_driver snic_driver = { + .name = SNIC_DRV_NAME, + .id_table = snic_id_table, + .probe = snic_probe, + .remove = snic_remove, +}; + +static int __init +snic_init_module(void) +{ + int ret = 0; + +#ifndef __x86_64__ + SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); +#endif + + SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION); + + ret = snic_global_data_init(); + if (ret) { + SNIC_ERR("Failed to Initialize Global Data.\n"); + + return ret; + } + + ret = pci_register_driver(&snic_driver); + if (ret < 0) { + SNIC_ERR("PCI driver register error\n"); + + goto err_pci_reg; + } + + return ret; + +err_pci_reg: + snic_global_data_cleanup(); + + return ret; +} + +static void __exit +snic_cleanup_module(void) +{ + pci_unregister_driver(&snic_driver); + snic_global_data_cleanup(); +} + +module_init(snic_init_module); +module_exit(snic_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION); +MODULE_VERSION(SNIC_DRV_VERSION); +MODULE_DEVICE_TABLE(pci, snic_id_table); +MODULE_AUTHOR("Narsimhulu Musini , " + "Sesidhar Baddela "); diff --git a/drivers/scsi/snic/snic_res.c b/drivers/scsi/snic/snic_res.c new file mode 100644 index 000000000..43f1a2823 --- /dev/null +++ b/drivers/scsi/snic/snic_res.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include + +#include "wq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "snic.h" + +int +snic_get_vnic_config(struct snic *snic) +{ + struct vnic_snic_config *c = &snic->config; + int ret; + +#define GET_CONFIG(m) \ + do { \ + ret = svnic_dev_spec(snic->vdev, \ + offsetof(struct vnic_snic_config, m), \ + sizeof(c->m), \ + &c->m); \ + if (ret) { \ + SNIC_HOST_ERR(snic->shost, \ + "Error getting %s, %d\n", #m, ret); \ + return ret; \ + } \ + } while (0) + + GET_CONFIG(wq_enet_desc_count); + GET_CONFIG(maxdatafieldsize); + GET_CONFIG(intr_timer); + GET_CONFIG(intr_timer_type); + GET_CONFIG(flags); + GET_CONFIG(io_throttle_count); + GET_CONFIG(port_down_timeout); + GET_CONFIG(port_down_io_retries); + GET_CONFIG(luns_per_tgt); + GET_CONFIG(xpt_type); + GET_CONFIG(hid); + + c->wq_enet_desc_count = min_t(u32, + VNIC_SNIC_WQ_DESCS_MAX, + max_t(u32, + VNIC_SNIC_WQ_DESCS_MIN, + c->wq_enet_desc_count)); + + c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16); + + c->maxdatafieldsize = min_t(u32, + VNIC_SNIC_MAXDATAFIELDSIZE_MAX, + max_t(u32, + VNIC_SNIC_MAXDATAFIELDSIZE_MIN, + c->maxdatafieldsize)); + + c->io_throttle_count = min_t(u32, + VNIC_SNIC_IO_THROTTLE_COUNT_MAX, + max_t(u32, + VNIC_SNIC_IO_THROTTLE_COUNT_MIN, + c->io_throttle_count)); + + c->port_down_timeout = min_t(u32, + VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX, + c->port_down_timeout); + + c->port_down_io_retries = min_t(u32, + VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX, + c->port_down_io_retries); + + c->luns_per_tgt = min_t(u32, + VNIC_SNIC_LUNS_PER_TARGET_MAX, + max_t(u32, + VNIC_SNIC_LUNS_PER_TARGET_MIN, + c->luns_per_tgt)); + + c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer); + + SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count); + SNIC_INFO("vNIC mtu %d intr timer %d\n", + c->maxdatafieldsize, + c->intr_timer); + + SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n", + c->flags, + c->luns_per_tgt); + + SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count); + SNIC_INFO("vNIC port down timeout %d port down io retries %d\n", + c->port_down_timeout, + c->port_down_io_retries); + + SNIC_INFO("vNIC back end type = %d\n", c->xpt_type); + SNIC_INFO("vNIC hid = %d\n", c->hid); + + return 0; +} + +void +snic_get_res_counts(struct snic *snic) +{ + snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ); + SNIC_BUG_ON(snic->wq_count == 0); + snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ); + SNIC_BUG_ON(snic->cq_count == 0); + snic->intr_count = svnic_dev_get_res_count(snic->vdev, + RES_TYPE_INTR_CTRL); + SNIC_BUG_ON(snic->intr_count == 0); +} + +void +snic_free_vnic_res(struct snic *snic) +{ + unsigned int i; + + for (i = 0; i < snic->wq_count; i++) + svnic_wq_free(&snic->wq[i]); + + for (i = 0; i < snic->cq_count; i++) + svnic_cq_free(&snic->cq[i]); + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_free(&snic->intr[i]); +} + +int +snic_alloc_vnic_res(struct snic *snic) +{ + enum vnic_dev_intr_mode intr_mode; + unsigned int mask_on_assertion; + unsigned int intr_offset; + unsigned int err_intr_enable; + unsigned int err_intr_offset; + unsigned int i; + int ret; + + intr_mode = svnic_dev_get_intr_mode(snic->vdev); + + SNIC_INFO("vNIC interrupt mode: %s\n", + ((intr_mode == VNIC_DEV_INTR_MODE_INTX) ? + "Legacy PCI INTx" : + ((intr_mode == VNIC_DEV_INTR_MODE_MSI) ? + "MSI" : + ((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ? + "MSI-X" : "Unknown")))); + + /* only MSI-X is supported */ + SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX); + + SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count, + snic->cq_count, + snic->intr_count); + + + /* Allocate WQs used for SCSI IOs */ + for (i = 0; i < snic->wq_count; i++) { + ret = svnic_wq_alloc(snic->vdev, + &snic->wq[i], + i, + snic->config.wq_enet_desc_count, + sizeof(struct wq_enet_desc)); + if (ret) + goto error_cleanup; + } + + /* CQ for each WQ */ + for (i = 0; i < snic->wq_count; i++) { + ret = svnic_cq_alloc(snic->vdev, + &snic->cq[i], + i, + snic->config.wq_enet_desc_count, + sizeof(struct cq_enet_wq_desc)); + if (ret) + goto error_cleanup; + } + + SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count); + /* CQ for FW TO host */ + for (i = snic->wq_count; i < snic->cq_count; i++) { + ret = svnic_cq_alloc(snic->vdev, + &snic->cq[i], + i, + (snic->config.wq_enet_desc_count * 3), + sizeof(struct snic_fw_req)); + if (ret) + goto error_cleanup; + } + + for (i = 0; i < snic->intr_count; i++) { + ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i); + if (ret) + goto error_cleanup; + } + + /* + * Init WQ Resources. + * WQ[0 to n] points to CQ[0 to n-1] + * firmware to host comm points to CQ[n to m+1] + */ + err_intr_enable = 1; + err_intr_offset = snic->err_intr_offset; + + for (i = 0; i < snic->wq_count; i++) { + svnic_wq_init(&snic->wq[i], + i, + err_intr_enable, + err_intr_offset); + } + + for (i = 0; i < snic->cq_count; i++) { + intr_offset = i; + + svnic_cq_init(&snic->cq[i], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 1 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + intr_offset, + 0 /* cq_message_addr */); + } + + /* + * Init INTR resources + * Assumption : snic is always in MSI-X mode + */ + SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX); + mask_on_assertion = 1; + + for (i = 0; i < snic->intr_count; i++) { + svnic_intr_init(&snic->intr[i], + snic->config.intr_timer, + snic->config.intr_timer_type, + mask_on_assertion); + } + + /* init the stats memory by making the first call here */ + ret = svnic_dev_stats_dump(snic->vdev, &snic->stats); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "svnic_dev_stats_dump failed - x%x\n", + ret); + goto error_cleanup; + } + + /* Clear LIF stats */ + svnic_dev_stats_clear(snic->vdev); + ret = 0; + + return ret; + +error_cleanup: + snic_free_vnic_res(snic); + + return ret; +} + +void +snic_log_q_error(struct snic *snic) +{ + unsigned int i; + u32 err_status; + + for (i = 0; i < snic->wq_count; i++) { + err_status = ioread32(&snic->wq[i].ctrl->error_status); + if (err_status) + SNIC_HOST_ERR(snic->shost, + "WQ[%d] error status %d\n", + i, + err_status); + } +} /* end of snic_log_q_error */ diff --git a/drivers/scsi/snic/snic_res.h b/drivers/scsi/snic/snic_res.h new file mode 100644 index 000000000..53cf6b19a --- /dev/null +++ b/drivers/scsi/snic/snic_res.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef __SNIC_RES_H +#define __SNIC_RES_H + +#include "snic_io.h" +#include "wq_enet_desc.h" +#include "vnic_wq.h" +#include "snic_fwint.h" +#include "vnic_cq_fw.h" + +static inline void +snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx, + u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len, + u32 data_len, u16 sg_cnt, ulong sgl_addr, + dma_addr_t sns_addr_pa, u32 sense_len) +{ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt, + ctx); + + req->u.icmnd.flags = cpu_to_le16(flags); + req->u.icmnd.tgt_id = cpu_to_le64(tgt_id); + memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN); + req->u.icmnd.cdb_len = cdb_len; + memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN); + memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len); + req->u.icmnd.data_len = cpu_to_le32(data_len); + req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr); + req->u.icmnd.sense_len = cpu_to_le32(sense_len); + req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa); +} + +static inline void +snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx, + u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type) +{ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx); + + req->u.itmf.tm_type = tm_type; + req->u.itmf.flags = cpu_to_le16(flags); + /* req_id valid only in abort, clear task */ + req->u.itmf.req_id = cpu_to_le32(req_id); + req->u.itmf.tgt_id = cpu_to_le64(tgt_id); + memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN); +} + +static inline void +snic_queue_wq_eth_desc(struct vnic_wq *wq, + void *os_buf, + dma_addr_t dma_addr, + unsigned int len, + int vlan_tag_insert, + unsigned int vlan_tag, + int cq_entry) +{ + struct wq_enet_desc *desc = svnic_wq_next_desc(wq); + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + 0, /* mss_or_csum_offset */ + 0, /* fc_eof */ + 0, /* offload mode */ + 1, /* eop */ + (u8)cq_entry, + 0, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + 0 /* loopback */); + + svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); +} + +struct snic; + +int snic_get_vnic_config(struct snic *); +int snic_alloc_vnic_res(struct snic *); +void snic_free_vnic_res(struct snic *); +void snic_get_res_counts(struct snic *); +void snic_log_q_error(struct snic *); +int snic_get_vnic_resources_size(struct snic *); +#endif /* __SNIC_RES_H */ diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c new file mode 100644 index 000000000..c50ede326 --- /dev/null +++ b/drivers/scsi/snic/snic_scsi.c @@ -0,0 +1,2643 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "snic_io.h" +#include "snic.h" + +#define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag) + +const char *snic_state_str[] = { + [SNIC_INIT] = "SNIC_INIT", + [SNIC_ERROR] = "SNIC_ERROR", + [SNIC_ONLINE] = "SNIC_ONLINE", + [SNIC_OFFLINE] = "SNIC_OFFLINE", + [SNIC_FWRESET] = "SNIC_FWRESET", +}; + +static const char * const snic_req_state_str[] = { + [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED", + [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING", + [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING", + [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPLETE", + [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING", + [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPLETE", + [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPLETE", +}; + +/* snic cmd status strings */ +static const char * const snic_io_status_str[] = { + [SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS", /* 0x0 */ + [SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR", + [SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES", + [SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM", + [SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP", + [SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND", + [SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED", + [SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT", + [SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID", + [SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH", + [SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR", + [SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT", + [SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL", + [SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN", + [SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT", + [SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE", + [SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN", + [SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR", + [SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY", + [SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR", +}; + +static void snic_scsi_cleanup(struct snic *, int); + +const char * +snic_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state]) + return "Unknown"; + + return snic_state_str[state]; +} + +static const char * +snic_io_status_to_str(unsigned int state) +{ + if ((state >= ARRAY_SIZE(snic_io_status_str)) || + (!snic_io_status_str[state])) + return "Unknown"; + + return snic_io_status_str[state]; +} + +static const char * +snic_ioreq_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(snic_req_state_str) || + !snic_req_state_str[state]) + return "Unknown"; + + return snic_req_state_str[state]; +} + +static inline spinlock_t * +snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc) +{ + u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1); + + return &snic->io_req_lock[hash]; +} + +static inline spinlock_t * +snic_io_lock_tag(struct snic *snic, int tag) +{ + return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)]; +} + +/* snic_release_req_buf : Releases snic_req_info */ +static void +snic_release_req_buf(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc) +{ + struct snic_host_req *req = rqi_to_req(rqi); + + /* Freeing cmd without marking completion, not okay */ + SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) || + (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) || + (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) || + (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) || + (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) || + (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) || + (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE))); + + SNIC_SCSI_DBG(snic->shost, + "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n", + sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req, + rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)), + CMD_FLAGS(sc)); + + if (req->u.icmnd.sense_addr) + dma_unmap_single(&snic->pdev->dev, + le64_to_cpu(req->u.icmnd.sense_addr), + SCSI_SENSE_BUFFERSIZE, + DMA_FROM_DEVICE); + + scsi_dma_unmap(sc); + + snic_req_free(snic, rqi); +} /* end of snic_release_req_buf */ + +/* + * snic_queue_icmnd_req : Queues snic_icmnd request + */ +static int +snic_queue_icmnd_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc, + int sg_cnt) +{ + struct scatterlist *sg; + struct snic_sg_desc *sgd; + dma_addr_t pa = 0; + struct scsi_lun lun; + u16 flags = 0; + int ret = 0; + unsigned int i; + + if (sg_cnt) { + flags = SNIC_ICMND_ESGL; + sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req); + + for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) { + sgd->addr = cpu_to_le64(sg_dma_address(sg)); + sgd->len = cpu_to_le32(sg_dma_len(sg)); + sgd->_resvd = 0; + sgd++; + } + } + + pa = dma_map_single(&snic->pdev->dev, + sc->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&snic->pdev->dev, pa)) { + SNIC_HOST_ERR(snic->shost, + "QIcmnd:PCI Map Failed for sns buf %p tag %x\n", + sc->sense_buffer, snic_cmd_tag(sc)); + ret = -ENOMEM; + + return ret; + } + + int_to_scsilun(sc->device->lun, &lun); + if (sc->sc_data_direction == DMA_FROM_DEVICE) + flags |= SNIC_ICMND_RD; + if (sc->sc_data_direction == DMA_TO_DEVICE) + flags |= SNIC_ICMND_WR; + + /* Initialize icmnd */ + snic_icmnd_init(rqi->req, + snic_cmd_tag(sc), + snic->config.hid, /* hid */ + (ulong) rqi, + flags, /* command flags */ + rqi->tgt_id, + lun.scsi_lun, + sc->cmnd, + sc->cmd_len, + scsi_bufflen(sc), + sg_cnt, + (ulong) req_to_sgl(rqi->req), + pa, /* sense buffer pa */ + SCSI_SENSE_BUFFERSIZE); + + atomic64_inc(&snic->s_stats.io.active); + ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); + if (ret) { + atomic64_dec(&snic->s_stats.io.active); + SNIC_HOST_ERR(snic->shost, + "QIcmnd: Queuing Icmnd Failed. ret = %d\n", + ret); + } else + snic_stats_update_active_ios(&snic->s_stats); + + return ret; +} /* end of snic_queue_icmnd_req */ + +/* + * snic_issue_scsi_req : Prepares IO request and Issues to FW. + */ +static int +snic_issue_scsi_req(struct snic *snic, + struct snic_tgt *tgt, + struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + int sg_cnt = 0; + int ret = 0; + u32 tag = snic_cmd_tag(sc); + u64 cmd_trc = 0, cmd_st_flags = 0; + spinlock_t *io_lock = NULL; + unsigned long flags; + + CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED; + CMD_FLAGS(sc) = SNIC_NO_FLAGS; + sg_cnt = scsi_dma_map(sc); + if (sg_cnt < 0) { + SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0, + sc->cmnd[0], sg_cnt, CMD_STATE(sc)); + + SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n"); + ret = -ENOMEM; + + goto issue_sc_end; + } + + rqi = snic_req_init(snic, sg_cnt); + if (!rqi) { + scsi_dma_unmap(sc); + ret = -ENOMEM; + + goto issue_sc_end; + } + + rqi->tgt_id = tgt->id; + rqi->sc = sc; + + CMD_STATE(sc) = SNIC_IOREQ_PENDING; + CMD_SP(sc) = (char *) rqi; + cmd_trc = SNIC_TRC_CMD(sc); + CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED); + cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc); + io_lock = snic_io_lock_hash(snic, sc); + + /* create wq desc and enqueue it */ + ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "issue_sc: icmnd qing Failed for sc %p, err %d\n", + sc, ret); + + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + CMD_SP(sc) = NULL; + CMD_STATE(sc) = SNIC_IOREQ_COMPLETE; + CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */ + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_release_req_buf(snic, rqi, sc); + + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0, + SNIC_TRC_CMD_STATE_FLAGS(sc)); + } else { + u32 io_sz = scsi_bufflen(sc) >> 9; + u32 qtime = jiffies - rqi->start_time; + struct snic_io_stats *iostats = &snic->s_stats.io; + + if (io_sz > atomic64_read(&iostats->max_io_sz)) + atomic64_set(&iostats->max_io_sz, io_sz); + + if (qtime > atomic64_read(&iostats->max_qtime)) + atomic64_set(&iostats->max_qtime, qtime); + + SNIC_SCSI_DBG(snic->shost, + "issue_sc:sc %p, tag %d queued to WQ.\n", + sc, tag); + + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi, + sg_cnt, cmd_trc, cmd_st_flags); + } + +issue_sc_end: + + return ret; +} /* end of snic_issue_scsi_req */ + + +/* + * snic_queuecommand + * Routine to send a scsi cdb to LLD + * Called with host_lock held and interrupts disabled + */ +int +snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) +{ + struct snic_tgt *tgt = NULL; + struct snic *snic = shost_priv(shost); + int ret; + + tgt = starget_to_tgt(scsi_target(sc->device)); + ret = snic_tgt_chkready(tgt); + if (ret) { + SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id); + atomic64_inc(&snic->s_stats.misc.tgt_not_rdy); + sc->result = ret; + scsi_done(sc); + + return 0; + } + + if (snic_get_state(snic) != SNIC_ONLINE) { + SNIC_HOST_ERR(shost, "snic state is %s\n", + snic_state_str[snic_get_state(snic)]); + + return SCSI_MLQUEUE_HOST_BUSY; + } + atomic_inc(&snic->ios_inflight); + + SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n", + sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun); + + ret = snic_issue_scsi_req(snic, tgt, sc); + if (ret) { + SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); + ret = SCSI_MLQUEUE_HOST_BUSY; + } + + atomic_dec(&snic->ios_inflight); + + return ret; +} /* end of snic_queuecommand */ + +/* + * snic_process_abts_pending_state: + * caller should hold IO lock + */ +static void +snic_proc_tmreq_pending_state(struct snic *snic, + struct scsi_cmnd *sc, + u8 cmpl_status) +{ + int state = CMD_STATE(sc); + + if (state == SNIC_IOREQ_ABTS_PENDING) + CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING; + else if (state == SNIC_IOREQ_LR_PENDING) + CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING; + else + SNIC_BUG_ON(1); + + switch (cmpl_status) { + case SNIC_STAT_IO_SUCCESS: + CMD_FLAGS(sc) |= SNIC_IO_DONE; + break; + + case SNIC_STAT_ABORTED: + CMD_FLAGS(sc) |= SNIC_IO_ABORTED; + break; + + default: + SNIC_BUG_ON(1); + } +} + +/* + * snic_process_io_failed_state: + * Processes IO's error states + */ +static void +snic_process_io_failed_state(struct snic *snic, + struct snic_icmnd_cmpl *icmnd_cmpl, + struct scsi_cmnd *sc, + u8 cmpl_stat) +{ + int res = 0; + + switch (cmpl_stat) { + case SNIC_STAT_TIMEOUT: /* Req was timedout */ + atomic64_inc(&snic->s_stats.misc.io_tmo); + res = DID_TIME_OUT; + break; + + case SNIC_STAT_ABORTED: /* Req was aborted */ + atomic64_inc(&snic->s_stats.misc.io_aborted); + res = DID_ABORT; + break; + + case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */ + atomic64_inc(&snic->s_stats.misc.data_cnt_mismat); + scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid)); + res = DID_ERROR; + break; + + case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */ + atomic64_inc(&snic->s_stats.fw.out_of_res); + res = DID_REQUEUE; + break; + + case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */ + atomic64_inc(&snic->s_stats.io.io_not_found); + res = DID_ERROR; + break; + + case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/ + atomic64_inc(&snic->s_stats.misc.sgl_inval); + res = DID_ERROR; + break; + + case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */ + atomic64_inc(&snic->s_stats.fw.io_errs); + res = DID_ERROR; + break; + + case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */ + atomic64_inc(&snic->s_stats.fw.scsi_errs); + break; + + case SNIC_STAT_NOT_READY: /* XPT yet to initialize */ + case SNIC_STAT_DEV_OFFLINE: /* Device offline */ + res = DID_NO_CONNECT; + break; + + case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */ + case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */ + case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */ + case SNIC_STAT_CMND_REJECT: /* Req rejected */ + case SNIC_STAT_FATAL_ERROR: /* XPT Error */ + default: + SNIC_SCSI_DBG(snic->shost, + "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n"); + res = DID_ERROR; + break; + } + + SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n", + snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc)); + + /* Set sc->result */ + sc->result = (res << 16) | icmnd_cmpl->scsi_status; +} /* end of snic_process_io_failed_state */ + +/* + * snic_tmreq_pending : is task management in progress. + */ +static int +snic_tmreq_pending(struct scsi_cmnd *sc) +{ + int state = CMD_STATE(sc); + + return ((state == SNIC_IOREQ_ABTS_PENDING) || + (state == SNIC_IOREQ_LR_PENDING)); +} + +/* + * snic_process_icmnd_cmpl_status: + * Caller should hold io_lock + */ +static int +snic_process_icmnd_cmpl_status(struct snic *snic, + struct snic_icmnd_cmpl *icmnd_cmpl, + u8 cmpl_stat, + struct scsi_cmnd *sc) +{ + u8 scsi_stat = icmnd_cmpl->scsi_status; + int ret = 0; + + /* Mark the IO as complete */ + CMD_STATE(sc) = SNIC_IOREQ_COMPLETE; + + if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) { + sc->result = (DID_OK << 16) | scsi_stat; + + /* Update SCSI Cmd with resid value */ + scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid)); + + if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) + atomic64_inc(&snic->s_stats.misc.io_under_run); + + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) + atomic64_inc(&snic->s_stats.misc.qfull); + + ret = 0; + } else { + snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat); + atomic64_inc(&snic->s_stats.io.fail); + SNIC_HOST_ERR(snic->shost, + "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n", + snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc)); + ret = 1; + } + + return ret; +} /* end of snic_process_icmnd_cmpl_status */ + + +/* + * snic_icmnd_cmpl_handler + * Routine to handle icmnd completions + */ +static void +snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + u8 typ, hdr_stat; + u32 cmnd_id, hid; + ulong ctx; + struct scsi_cmnd *sc = NULL; + struct snic_icmnd_cmpl *icmnd_cmpl = NULL; + struct snic_host_req *req = NULL; + struct snic_req_info *rqi = NULL; + unsigned long flags, start_time; + spinlock_t *io_lock; + u8 sc_stat = 0; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + icmnd_cmpl = &fwreq->u.icmnd_cmpl; + sc_stat = icmnd_cmpl->scsi_status; + + SNIC_SCSI_DBG(snic->shost, + "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n", + typ, hdr_stat, cmnd_id, hid, ctx); + + if (cmnd_id >= snic->max_tag_id) { + SNIC_HOST_ERR(snic->shost, + "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n", + cmnd_id, snic_io_status_to_str(hdr_stat)); + return; + } + + sc = scsi_host_find_tag(snic->shost, cmnd_id); + WARN_ON_ONCE(!sc); + + if (!sc) { + atomic64_inc(&snic->s_stats.io.sc_null); + SNIC_HOST_ERR(snic->shost, + "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n", + snic_io_status_to_str(hdr_stat), + cmnd_id, + fwreq); + + SNIC_TRC(snic->shost->host_no, cmnd_id, 0, + ((u64)hdr_stat << 16 | + (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags), + (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx); + + return; + } + + io_lock = snic_io_lock_hash(snic, sc); + + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + SNIC_SCSI_DBG(snic->shost, + "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n", + sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc), + CMD_FLAGS(sc), rqi); + + if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { + spin_unlock_irqrestore(io_lock, flags); + + return; + } + + SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx); + WARN_ON_ONCE(req); + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL; + spin_unlock_irqrestore(io_lock, flags); + + SNIC_HOST_ERR(snic->shost, + "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n", + snic_io_status_to_str(hdr_stat), + cmnd_id, sc, CMD_FLAGS(sc)); + return; + } + + rqi = (struct snic_req_info *) ctx; + start_time = rqi->start_time; + + /* firmware completed the io */ + rqi->io_cmpl = 1; + + /* + * if SCSI-ML has already issued abort on this command, + * ignore completion of the IO. The abts path will clean it up + */ + if (unlikely(snic_tmreq_pending(sc))) { + snic_proc_tmreq_pending_state(snic, sc, hdr_stat); + spin_unlock_irqrestore(io_lock, flags); + + snic_stats_update_io_cmpl(&snic->s_stats); + + /* Expected value is SNIC_STAT_ABORTED */ + if (likely(hdr_stat == SNIC_STAT_ABORTED)) + return; + + SNIC_SCSI_DBG(snic->shost, + "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n", + snic_ioreq_state_to_str(CMD_STATE(sc)), + snic_io_status_to_str(hdr_stat), + sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid), + CMD_FLAGS(sc)); + + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), (ulong) fwreq, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + return; + } + + if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) { + scsi_print_command(sc); + SNIC_HOST_ERR(snic->shost, + "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n", + sc, sc->cmnd[0], cmnd_id, + snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc)); + } + + /* Break link with the SCSI Command */ + CMD_SP(sc) = NULL; + CMD_FLAGS(sc) |= SNIC_IO_DONE; + + spin_unlock_irqrestore(io_lock, flags); + + /* For now, consider only successful IO. */ + snic_calc_io_process_time(snic, rqi); + + snic_release_req_buf(snic, rqi, sc); + + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), (ulong) fwreq, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + + scsi_done(sc); + + snic_stats_update_io_cmpl(&snic->s_stats); +} /* end of snic_icmnd_cmpl_handler */ + +static void +snic_proc_dr_cmpl_locked(struct snic *snic, + struct snic_fw_req *fwreq, + u8 cmpl_stat, + u32 cmnd_id, + struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc); + u32 start_time = rqi->start_time; + + CMD_LR_STATUS(sc) = cmpl_stat; + + SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n", + snic_ioreq_state_to_str(CMD_STATE(sc))); + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { + CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING; + + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n", + (int)(cmnd_id & SNIC_TAG_MASK), + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + return; + } + + + if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) { + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n", + (int)(cmnd_id & SNIC_TAG_MASK), + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + return; + } + + CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE; + CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE; + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n", + (int)(cmnd_id & SNIC_TAG_MASK), + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + if (rqi->dr_done) + complete(rqi->dr_done); +} /* end of snic_proc_dr_cmpl_locked */ + +/* + * snic_update_abort_stats : Updates abort stats based on completion status. + */ +static void +snic_update_abort_stats(struct snic *snic, u8 cmpl_stat) +{ + struct snic_abort_stats *abt_stats = &snic->s_stats.abts; + + SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n"); + + switch (cmpl_stat) { + case SNIC_STAT_IO_SUCCESS: + break; + + case SNIC_STAT_TIMEOUT: + atomic64_inc(&abt_stats->fw_tmo); + break; + + case SNIC_STAT_IO_NOT_FOUND: + atomic64_inc(&abt_stats->io_not_found); + break; + + default: + atomic64_inc(&abt_stats->fail); + break; + } +} + +static int +snic_process_itmf_cmpl(struct snic *snic, + struct snic_fw_req *fwreq, + u32 cmnd_id, + u8 cmpl_stat, + struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + u32 tm_tags = 0; + spinlock_t *io_lock = NULL; + unsigned long flags; + u32 start_time = 0; + int ret = 0; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { + spin_unlock_irqrestore(io_lock, flags); + + return ret; + } + rqi = (struct snic_req_info *) CMD_SP(sc); + WARN_ON_ONCE(!rqi); + + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + spin_unlock_irqrestore(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + SNIC_HOST_ERR(snic->shost, + "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n", + snic_io_status_to_str(cmpl_stat), cmnd_id, sc, + CMD_FLAGS(sc)); + + return ret; + } + + /* Extract task management flags */ + tm_tags = cmnd_id & ~(SNIC_TAG_MASK); + + start_time = rqi->start_time; + cmnd_id &= (SNIC_TAG_MASK); + + switch (tm_tags) { + case SNIC_TAG_ABORT: + /* Abort only issued on cmd */ + snic_update_abort_stats(snic, cmpl_stat); + + if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) { + /* This is a late completion. Ignore it. */ + ret = -1; + spin_unlock_irqrestore(io_lock, flags); + break; + } + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; + CMD_ABTS_STATUS(sc) = cmpl_stat; + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE; + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n", + cmnd_id, + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + /* + * If scsi_eh thread is blocked waiting for abts complete, + * signal completion to it. IO will be cleaned in the thread, + * else clean it in this context. + */ + if (rqi->abts_done) { + complete(rqi->abts_done); + spin_unlock_irqrestore(io_lock, flags); + + break; /* jump out */ + } + + CMD_SP(sc) = NULL; + sc->result = (DID_ERROR << 16); + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n", + sc, CMD_FLAGS(sc)); + + spin_unlock_irqrestore(io_lock, flags); + + snic_release_req_buf(snic, rqi, sc); + + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + (ulong) fwreq, SNIC_TRC_CMD(sc), + SNIC_TRC_CMD_STATE_FLAGS(sc)); + + scsi_done(sc); + + break; + + case SNIC_TAG_DEV_RST: + case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST: + snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc); + spin_unlock_irqrestore(io_lock, flags); + ret = 0; + + break; + + case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST: + /* Abort and terminate completion of device reset req */ + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; + CMD_ABTS_STATUS(sc) = cmpl_stat; + CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE; + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n", + cmnd_id, snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + if (rqi->abts_done) + complete(rqi->abts_done); + + spin_unlock_irqrestore(io_lock, flags); + + break; + + default: + spin_unlock_irqrestore(io_lock, flags); + SNIC_HOST_ERR(snic->shost, + "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags); + + SNIC_HOST_ERR(snic->shost, + "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n", + snic_ioreq_state_to_str(CMD_STATE(sc)), + cmnd_id, + CMD_FLAGS(sc)); + ret = -1; + SNIC_BUG_ON(1); + + break; + } + + return ret; +} /* end of snic_process_itmf_cmpl_status */ + +/* + * snic_itmf_cmpl_handler. + * Routine to handle itmf completions. + */ +static void +snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + struct scsi_cmnd *sc = NULL; + struct snic_req_info *rqi = NULL; + struct snic_itmf_cmpl *itmf_cmpl = NULL; + ulong ctx; + u32 cmnd_id; + u32 hid; + u8 typ; + u8 hdr_stat; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_SCSI_DBG(snic->shost, + "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n", + __func__, typ, hdr_stat, cmnd_id, hid, ctx); + + itmf_cmpl = &fwreq->u.itmf_cmpl; + SNIC_SCSI_DBG(snic->shost, + "Itmf_cmpl: nterm %u , flags 0x%x\n", + le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags); + + /* spl case, dev reset issued through ioctl */ + if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) { + rqi = (struct snic_req_info *) ctx; + sc = rqi->sc; + + goto ioctl_dev_rst; + } + + if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) { + SNIC_HOST_ERR(snic->shost, + "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n", + cmnd_id, snic_io_status_to_str(hdr_stat)); + SNIC_BUG_ON(1); + + return; + } + + sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK); + WARN_ON_ONCE(!sc); + +ioctl_dev_rst: + if (!sc) { + atomic64_inc(&snic->s_stats.io.sc_null); + SNIC_HOST_ERR(snic->shost, + "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n", + snic_io_status_to_str(hdr_stat), cmnd_id); + + return; + } + + snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc); +} /* end of snic_itmf_cmpl_handler */ + + + +static void +snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_stats *st = &snic->s_stats; + long act_ios = 0, act_fwreqs = 0; + + SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n"); + snic_scsi_cleanup(snic, snic_cmd_tag(sc)); + + /* Update stats on pending IOs */ + act_ios = atomic64_read(&st->io.active); + atomic64_add(act_ios, &st->io.compl); + atomic64_sub(act_ios, &st->io.active); + + act_fwreqs = atomic64_read(&st->fw.actv_reqs); + atomic64_sub(act_fwreqs, &st->fw.actv_reqs); +} + +/* + * snic_hba_reset_cmpl_handler : + * + * Notes : + * 1. Cleanup all the scsi cmds, release all snic specific cmds + * 2. Issue Report Targets in case of SAN targets + */ +static int +snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + ulong ctx; + u32 cmnd_id; + u32 hid; + u8 typ; + u8 hdr_stat; + struct scsi_cmnd *sc = NULL; + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags, gflags; + int ret = 0; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_HOST_INFO(snic->shost, + "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n", + cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); + + SNIC_SCSI_DBG(snic->shost, + "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", + typ, hdr_stat, cmnd_id, hid, ctx); + + /* spl case, host reset issued through ioctl */ + if (cmnd_id == SCSI_NO_TAG) { + rqi = (struct snic_req_info *) ctx; + SNIC_HOST_INFO(snic->shost, + "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n", + cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); + sc = rqi->sc; + + goto ioctl_hba_rst; + } + + if (cmnd_id >= snic->max_tag_id) { + SNIC_HOST_ERR(snic->shost, + "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n", + cmnd_id, snic_io_status_to_str(hdr_stat)); + SNIC_BUG_ON(1); + + return 1; + } + + sc = scsi_host_find_tag(snic->shost, cmnd_id); +ioctl_hba_rst: + if (!sc) { + atomic64_inc(&snic->s_stats.io.sc_null); + SNIC_HOST_ERR(snic->shost, + "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n", + snic_io_status_to_str(hdr_stat), cmnd_id); + ret = 1; + + return ret; + } + + SNIC_HOST_INFO(snic->shost, + "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n", + sc, rqi, cmnd_id, CMD_FLAGS(sc)); + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + + if (!snic->remove_wait) { + spin_unlock_irqrestore(io_lock, flags); + SNIC_HOST_ERR(snic->shost, + "reset_cmpl:host reset completed after timeout\n"); + ret = 1; + + return ret; + } + + rqi = (struct snic_req_info *) CMD_SP(sc); + WARN_ON_ONCE(!rqi); + + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + spin_unlock_irqrestore(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + SNIC_HOST_ERR(snic->shost, + "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n", + snic_io_status_to_str(hdr_stat), cmnd_id, sc, + CMD_FLAGS(sc)); + + ret = 1; + + return ret; + } + /* stats */ + spin_unlock_irqrestore(io_lock, flags); + + /* scsi cleanup */ + snic_hba_reset_scsi_cleanup(snic, sc); + + SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE && + snic_get_state(snic) != SNIC_FWRESET); + + /* Careful locking between snic_lock and io lock */ + spin_lock_irqsave(io_lock, flags); + spin_lock_irqsave(&snic->snic_lock, gflags); + if (snic_get_state(snic) == SNIC_FWRESET) + snic_set_state(snic, SNIC_ONLINE); + spin_unlock_irqrestore(&snic->snic_lock, gflags); + + if (snic->remove_wait) + complete(snic->remove_wait); + + spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl); + + ret = 0; + /* Rediscovery is for SAN */ + if (snic->config.xpt_type == SNIC_DAS) + return ret; + + SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n"); + queue_work(snic_glob->event_q, &snic->disc_work); + + return ret; +} + +static void +snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n"); + + SNIC_ASSERT_NOT_IMPL(1); +} + +static void +snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + u8 typ, hdr_stat; + u32 cmnd_id, hid; + ulong ctx; + struct snic_async_evnotify *aen = &fwreq->u.async_ev; + u32 event_id = 0; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_SCSI_DBG(snic->shost, + "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", + typ, hdr_stat, cmnd_id, hid, ctx); + + event_id = le32_to_cpu(aen->ev_id); + + switch (event_id) { + case SNIC_EV_TGT_OFFLINE: + SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n"); + break; + + case SNIC_EV_TGT_ONLINE: + SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n"); + break; + + case SNIC_EV_LUN_OFFLINE: + SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n"); + break; + + case SNIC_EV_LUN_ONLINE: + SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n"); + break; + + case SNIC_EV_CONF_CHG: + SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n"); + break; + + case SNIC_EV_TGT_ADDED: + SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n"); + break; + + case SNIC_EV_TGT_DELTD: + SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n"); + break; + + case SNIC_EV_LUN_ADDED: + SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n"); + break; + + case SNIC_EV_LUN_DELTD: + SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n"); + break; + + case SNIC_EV_DISC_CMPL: + SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n"); + break; + + default: + SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n"); + SNIC_BUG_ON(1); + break; + } + + SNIC_ASSERT_NOT_IMPL(1); +} /* end of snic_aen_handler */ + +/* + * snic_io_cmpl_handler + * Routine to process CQ entries(IO Completions) posted by fw. + */ +static int +snic_io_cmpl_handler(struct vnic_dev *vdev, + unsigned int cq_idx, + struct snic_fw_req *fwreq) +{ + struct snic *snic = svnic_dev_priv(vdev); + u64 start = jiffies, cmpl_time; + + snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq)); + + /* Update FW Stats */ + if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) && + (fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL)) + atomic64_dec(&snic->s_stats.fw.actv_reqs); + + SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) && + (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY)); + + /* Check for snic subsys errors */ + switch (fwreq->hdr.status) { + case SNIC_STAT_NOT_READY: /* XPT yet to initialize */ + SNIC_HOST_ERR(snic->shost, + "sNIC SubSystem is NOT Ready.\n"); + break; + + case SNIC_STAT_FATAL_ERROR: /* XPT Error */ + SNIC_HOST_ERR(snic->shost, + "sNIC SubSystem in Unrecoverable State.\n"); + break; + } + + switch (fwreq->hdr.type) { + case SNIC_RSP_EXCH_VER_CMPL: + snic_io_exch_ver_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_REPORT_TGTS_CMPL: + snic_report_tgt_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_ICMND_CMPL: + snic_icmnd_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_ITMF_CMPL: + snic_itmf_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_HBA_RESET_CMPL: + snic_hba_reset_cmpl_handler(snic, fwreq); + break; + + case SNIC_MSG_ACK: + snic_msg_ack_handler(snic, fwreq); + break; + + case SNIC_MSG_ASYNC_EVNOTIFY: + snic_aen_handler(snic, fwreq); + break; + + default: + SNIC_BUG_ON(1); + SNIC_SCSI_DBG(snic->shost, + "Unknown Firmware completion request type %d\n", + fwreq->hdr.type); + break; + } + + /* Update Stats */ + cmpl_time = jiffies - start; + if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time)) + atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time); + + return 0; +} /* end of snic_io_cmpl_handler */ + +/* + * snic_fwcq_cmpl_handler + * Routine to process fwCQ + * This CQ is independent, and not associated with wq/rq/wq_copy queues + */ +int +snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work) +{ + unsigned int num_ent = 0; /* number cq entries processed */ + unsigned int cq_idx; + unsigned int nent_per_cq; + struct snic_misc_stats *misc_stats = &snic->s_stats.misc; + + for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) { + nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx], + snic_io_cmpl_handler, + io_cmpl_work); + num_ent += nent_per_cq; + + if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents)) + atomic64_set(&misc_stats->max_cq_ents, nent_per_cq); + } + + return num_ent; +} /* end of snic_fwcq_cmpl_handler */ + +/* + * snic_queue_itmf_req: Common API to queue Task Management requests. + * Use rqi->tm_tag for passing special tags. + * @req_id : aborted request's tag, -1 for lun reset. + */ +static int +snic_queue_itmf_req(struct snic *snic, + struct snic_host_req *tmreq, + struct scsi_cmnd *sc, + u32 tmf, + u32 req_id) +{ + struct snic_req_info *rqi = req_to_rqi(tmreq); + struct scsi_lun lun; + int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag; + int ret = 0; + + SNIC_BUG_ON(!rqi); + SNIC_BUG_ON(!rqi->tm_tag); + + /* fill in lun info */ + int_to_scsilun(sc->device->lun, &lun); + + /* Initialize snic_host_req: itmf */ + snic_itmf_init(tmreq, + tm_tag, + snic->config.hid, + (ulong) rqi, + 0 /* flags */, + req_id, /* Command to be aborted. */ + rqi->tgt_id, + lun.scsi_lun, + tmf); + + /* + * In case of multiple aborts on same cmd, + * use try_wait_for_completion and completion_done() to check + * whether it queues aborts even after completion of abort issued + * prior.SNIC_BUG_ON(completion_done(&rqi->done)); + */ + + ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq)); + if (ret) + SNIC_HOST_ERR(snic->shost, + "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n", + tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret); + else + SNIC_SCSI_DBG(snic->shost, + "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.", + tmf, sc, rqi, req_id, snic_cmd_tag(sc)); + + return ret; +} /* end of snic_queue_itmf_req */ + +static int +snic_issue_tm_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc, + int tmf) +{ + struct snic_host_req *tmreq = NULL; + int req_id = 0, tag = snic_cmd_tag(sc); + int ret = 0; + + if (snic_get_state(snic) == SNIC_FWRESET) + return -EBUSY; + + atomic_inc(&snic->ios_inflight); + + SNIC_SCSI_DBG(snic->shost, + "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n", + tmf, rqi, tag); + + + if (tmf == SNIC_ITMF_LUN_RESET) { + tmreq = snic_dr_req_init(snic, rqi); + req_id = SCSI_NO_TAG; + } else { + tmreq = snic_abort_req_init(snic, rqi); + req_id = tag; + } + + if (!tmreq) { + ret = -ENOMEM; + + goto tmreq_err; + } + + ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id); + +tmreq_err: + if (ret) { + SNIC_HOST_ERR(snic->shost, + "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n", + tmf, sc, rqi, req_id, tag, ret); + } else { + SNIC_SCSI_DBG(snic->shost, + "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n", + tmf, sc, rqi, req_id, tag); + } + + atomic_dec(&snic->ios_inflight); + + return ret; +} + +/* + * snic_queue_abort_req : Queues abort req to WQ + */ +static int +snic_queue_abort_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc, + int tmf) +{ + SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n", + sc, rqi, snic_cmd_tag(sc), tmf); + + /* Add special tag for abort */ + rqi->tm_tag |= SNIC_TAG_ABORT; + + return snic_issue_tm_req(snic, rqi, sc, tmf); +} + +/* + * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully. + */ +static int +snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int ret = 0, tag = snic_cmd_tag(sc); + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + + SNIC_SCSI_DBG(snic->shost, + "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n", + tag, sc, CMD_FLAGS(sc)); + ret = FAILED; + + goto abort_fail; + } + + rqi->abts_done = NULL; + + ret = FAILED; + + /* Check the abort status. */ + switch (CMD_ABTS_STATUS(sc)) { + case SNIC_INVALID_CODE: + /* Firmware didn't complete abort req, timedout */ + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT; + atomic64_inc(&snic->s_stats.abts.drv_tmo); + SNIC_SCSI_DBG(snic->shost, + "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n", + sc, snic_cmd_tag(sc), CMD_FLAGS(sc)); + /* do not release snic request in timedout case */ + rqi = NULL; + + goto abort_fail; + + case SNIC_STAT_IO_SUCCESS: + case SNIC_STAT_IO_NOT_FOUND: + ret = SUCCESS; + /* + * If abort path doesn't call scsi_done(), + * the # IO timeouts == 2, will cause the LUN offline. + * Call scsi_done to complete the IO. + */ + sc->result = (DID_ERROR << 16); + scsi_done(sc); + break; + + default: + /* Firmware completed abort with error */ + ret = FAILED; + rqi = NULL; + break; + } + + CMD_SP(sc) = NULL; + SNIC_HOST_INFO(snic->shost, + "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n", + tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)), + CMD_FLAGS(sc)); + +abort_fail: + spin_unlock_irqrestore(io_lock, flags); + if (rqi) + snic_release_req_buf(snic, rqi, sc); + + return ret; +} /* end of snic_abort_finish */ + +/* + * snic_send_abort_and_wait : Issues Abort, and Waits + */ +static int +snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + enum snic_ioreq_state sv_state; + struct snic_tgt *tgt = NULL; + spinlock_t *io_lock = NULL; + DECLARE_COMPLETION_ONSTACK(tm_done); + unsigned long flags; + int ret = 0, tmf = 0, tag = snic_cmd_tag(sc); + + tgt = starget_to_tgt(scsi_target(sc->device)); + if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN)) + tmf = SNIC_ITMF_ABTS_TASK_TERM; + else + tmf = SNIC_ITMF_ABTS_TASK; + + /* stats */ + + io_lock = snic_io_lock_hash(snic, sc); + + /* + * Avoid a race between SCSI issuing the abort and the device + * completing the command. + * + * If the command is already completed by fw_cmpl code, + * we just return SUCCESS from here. This means that the abort + * succeeded. In the SCSI ML, since the timeout for command has + * happend, the completion wont actually complete the command + * and it will be considered as an aborted command + * + * The CMD_SP will not be cleared except while holding io_lock + */ + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + + SNIC_HOST_ERR(snic->shost, + "abt_cmd: rqi is null. Tag %d flags 0x%llx\n", + tag, CMD_FLAGS(sc)); + + ret = SUCCESS; + + goto send_abts_end; + } + + rqi->abts_done = &tm_done; + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + + ret = 0; + goto abts_pending; + } + SNIC_BUG_ON(!rqi->abts_done); + + /* Save Command State, should be restored on failed to Queue. */ + sv_state = CMD_STATE(sc); + + /* + * Command is still pending, need to abort it + * If the fw completes the command after this point, + * the completion won't be done till mid-layer, since abot + * has already started. + */ + CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; + CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; + + SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now Queue the abort command to firmware */ + ret = snic_queue_abort_req(snic, rqi, sc, tmf); + if (ret) { + atomic64_inc(&snic->s_stats.abts.q_fail); + SNIC_HOST_ERR(snic->shost, + "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n", + tag, ret, CMD_FLAGS(sc)); + + spin_lock_irqsave(io_lock, flags); + /* Restore Command's previous state */ + CMD_STATE(sc) = sv_state; + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + rqi->abts_done = NULL; + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + + goto send_abts_end; + } + + spin_lock_irqsave(io_lock, flags); + if (tmf == SNIC_ITMF_ABTS_TASK) { + CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED; + atomic64_inc(&snic->s_stats.abts.num); + } else { + /* term stats */ + CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED; + } + spin_unlock_irqrestore(io_lock, flags); + + SNIC_SCSI_DBG(snic->shost, + "send_abt_cmd: sc %p Tag %x flags 0x%llx\n", + sc, tag, CMD_FLAGS(sc)); + + + ret = 0; + +abts_pending: + /* + * Queued an abort IO, wait for its completion. + * Once the fw completes the abort command, it will + * wakeup this thread. + */ + wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT); + +send_abts_end: + return ret; +} /* end of snic_send_abort_and_wait */ + +/* + * This function is exported to SCSI for sending abort cmnds. + * A SCSI IO is represent by snic_ioreq in the driver. + * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO + */ +int +snic_abort_cmd(struct scsi_cmnd *sc) +{ + struct snic *snic = shost_priv(sc->device->host); + int ret = SUCCESS, tag = snic_cmd_tag(sc); + u32 start_time = jiffies; + + SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n", + sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag); + + if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) { + SNIC_HOST_ERR(snic->shost, + "abt_cmd: tag %x Parent Devs are not rdy\n", + tag); + ret = FAST_IO_FAIL; + + goto abort_end; + } + + + ret = snic_send_abort_and_wait(snic, sc); + if (ret) + goto abort_end; + + ret = snic_abort_finish(snic, sc); + +abort_end: + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), 0, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "abts: Abort Req Status = %s\n", + (ret == SUCCESS) ? "SUCCESS" : + ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED")); + + return ret; +} + + + +static int +snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc) +{ + struct snic_req_info *rqi = NULL; + struct scsi_cmnd *sc = NULL; + struct scsi_device *lr_sdev = NULL; + spinlock_t *io_lock = NULL; + u32 tag; + unsigned long flags; + + if (lr_sc) + lr_sdev = lr_sc->device; + + /* walk through the tag map, an dcheck if IOs are still pending in fw*/ + for (tag = 0; tag < snic->max_tag_id; tag++) { + io_lock = snic_io_lock_tag(snic, tag); + + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + + if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + /* + * Found IO that is still pending w/ firmware and belongs to + * the LUN that is under reset, if lr_sc != NULL + */ + SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n", + snic_ioreq_state_to_str(CMD_STATE(sc))); + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + + return 1; + } + + spin_unlock_irqrestore(io_lock, flags); + } + + return 0; +} /* end of snic_is_abts_pending */ + +static int +snic_dr_clean_single_req(struct snic *snic, + u32 tag, + struct scsi_device *lr_sdev) +{ + struct snic_req_info *rqi = NULL; + struct snic_tgt *tgt = NULL; + struct scsi_cmnd *sc = NULL; + spinlock_t *io_lock = NULL; + u32 sv_state = 0, tmf = 0; + DECLARE_COMPLETION_ONSTACK(tm_done); + unsigned long flags; + int ret = 0; + + io_lock = snic_io_lock_tag(snic, tag); + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + + /* Ignore Cmd that don't belong to Lun Reset device */ + if (!sc || sc->device != lr_sdev) + goto skip_clean; + + rqi = (struct snic_req_info *) CMD_SP(sc); + + if (!rqi) + goto skip_clean; + + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + goto skip_clean; + + + if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) && + (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) { + + SNIC_SCSI_DBG(snic->shost, + "clean_single_req: devrst is not pending sc 0x%p\n", + sc); + + goto skip_clean; + } + + SNIC_SCSI_DBG(snic->shost, + "clean_single_req: Found IO in %s on lun\n", + snic_ioreq_state_to_str(CMD_STATE(sc))); + + /* Save Command State */ + sv_state = CMD_STATE(sc); + + /* + * Any pending IO issued prior to reset is expected to be + * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING + * to indicate the IO is abort pending. + * When IO is completed, the IO will be handed over and handled + * in this function. + */ + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; + SNIC_BUG_ON(rqi->abts_done); + + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) { + rqi->tm_tag = SNIC_TAG_DEV_RST; + + SNIC_SCSI_DBG(snic->shost, + "clean_single_req:devrst sc 0x%p\n", sc); + } + + CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; + rqi->abts_done = &tm_done; + spin_unlock_irqrestore(io_lock, flags); + + tgt = starget_to_tgt(scsi_target(sc->device)); + if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN)) + tmf = SNIC_ITMF_ABTS_TASK_TERM; + else + tmf = SNIC_ITMF_ABTS_TASK; + + /* Now queue the abort command to firmware */ + ret = snic_queue_abort_req(snic, rqi, sc, tmf); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n", + sc, tag, rqi->tm_tag, CMD_FLAGS(sc)); + + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + rqi->abts_done = NULL; + + /* Restore Command State */ + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = sv_state; + + ret = 1; + goto skip_clean; + } + + spin_lock_irqsave(io_lock, flags); + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) + CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED; + + CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + + wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT); + + /* Recheck cmd state to check if it now aborted. */ + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + goto skip_clean; + } + rqi->abts_done = NULL; + + /* if abort is still pending w/ fw, fail */ + if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) { + SNIC_HOST_ERR(snic->shost, + "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n", + sc, tag, rqi->tm_tag, CMD_FLAGS(sc)); + + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE; + ret = 1; + + goto skip_clean; + } + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + snic_release_req_buf(snic, rqi, sc); + + sc->result = (DID_ERROR << 16); + scsi_done(sc); + + ret = 0; + + return ret; + +skip_clean: + spin_unlock_irqrestore(io_lock, flags); + + return ret; +} /* end of snic_dr_clean_single_req */ + +static int +snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc) +{ + struct scsi_device *lr_sdev = lr_sc->device; + u32 tag = 0; + int ret = FAILED; + + for (tag = 0; tag < snic->max_tag_id; tag++) { + if (tag == snic_cmd_tag(lr_sc)) + continue; + + ret = snic_dr_clean_single_req(snic, tag, lr_sdev); + if (ret) { + SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag); + + goto clean_err; + } + } + + schedule_timeout(msecs_to_jiffies(100)); + + /* Walk through all the cmds and check abts status. */ + if (snic_is_abts_pending(snic, lr_sc)) { + ret = FAILED; + + goto clean_err; + } + + ret = 0; + SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n"); + + return ret; + +clean_err: + ret = FAILED; + SNIC_HOST_ERR(snic->shost, + "Failed to Clean Pending IOs on %s device.\n", + dev_name(&lr_sdev->sdev_gendev)); + + return ret; + +} /* end of snic_dr_clean_pending_req */ + +/* + * snic_dr_finish : Called by snic_device_reset + */ +static int +snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int lr_res = 0; + int ret = FAILED; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + SNIC_SCSI_DBG(snic->shost, + "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n", + snic_cmd_tag(sc), sc, CMD_FLAGS(sc)); + + ret = FAILED; + goto dr_fini_end; + } + + rqi->dr_done = NULL; + + lr_res = CMD_LR_STATUS(sc); + + switch (lr_res) { + case SNIC_INVALID_CODE: + /* stats */ + SNIC_SCSI_DBG(snic->shost, + "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n", + snic_cmd_tag(sc), CMD_FLAGS(sc)); + + CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT; + ret = FAILED; + + goto dr_failed; + + case SNIC_STAT_IO_SUCCESS: + SNIC_SCSI_DBG(snic->shost, + "dr_fini: Tag %x Dev Reset cmpl\n", + snic_cmd_tag(sc)); + ret = 0; + break; + + default: + SNIC_HOST_ERR(snic->shost, + "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n", + snic_cmd_tag(sc), + snic_io_status_to_str(lr_res), CMD_FLAGS(sc)); + ret = FAILED; + goto dr_failed; + } + spin_unlock_irqrestore(io_lock, flags); + + /* + * Cleanup any IOs on this LUN that have still not completed. + * If any of these fail, then LUN Reset fails. + * Cleanup cleans all commands on this LUN except + * the lun reset command. If all cmds get cleaned, the LUN Reset + * succeeds. + */ + + ret = snic_dr_clean_pending_req(snic, sc); + if (ret) { + spin_lock_irqsave(io_lock, flags); + SNIC_SCSI_DBG(snic->shost, + "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n", + snic_cmd_tag(sc)); + rqi = (struct snic_req_info *) CMD_SP(sc); + + goto dr_failed; + } else { + /* Cleanup LUN Reset Command */ + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + ret = SUCCESS; /* Completed Successfully */ + else + ret = FAILED; + } + +dr_failed: + lockdep_assert_held(io_lock); + if (rqi) + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_release_req_buf(snic, rqi, sc); + +dr_fini_end: + return ret; +} /* end of snic_dr_finish */ + +static int +snic_queue_dr_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc) +{ + /* Add special tag for device reset */ + rqi->tm_tag |= SNIC_TAG_DEV_RST; + + return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET); +} + +static int +snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + enum snic_ioreq_state sv_state; + spinlock_t *io_lock = NULL; + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(tm_done); + int ret = FAILED, tag = snic_cmd_tag(sc); + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_DEVICE_RESET; + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + SNIC_HOST_ERR(snic->shost, + "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n", + tag, CMD_FLAGS(sc)); + spin_unlock_irqrestore(io_lock, flags); + + ret = FAILED; + goto send_dr_end; + } + + /* Save Command state to restore in case Queuing failed. */ + sv_state = CMD_STATE(sc); + + CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING; + CMD_LR_STATUS(sc) = SNIC_INVALID_CODE; + + SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag); + + rqi->dr_done = &tm_done; + SNIC_BUG_ON(!rqi->dr_done); + + spin_unlock_irqrestore(io_lock, flags); + /* + * The Command state is changed to IOREQ_PENDING, + * in this case, if the command is completed, the icmnd_cmpl will + * mark the cmd as completed. + * This logic still makes LUN Reset is inevitable. + */ + + ret = snic_queue_dr_req(snic, rqi, sc); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n", + tag, ret, CMD_FLAGS(sc)); + + spin_lock_irqsave(io_lock, flags); + /* Restore State */ + CMD_STATE(sc) = sv_state; + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + rqi->dr_done = NULL; + /* rqi is freed in caller. */ + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + + goto send_dr_end; + } + + spin_lock_irqsave(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + + ret = 0; + + wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT); + +send_dr_end: + return ret; +} + +/* + * auxillary funciton to check lun reset op is supported or not + * Not supported if returns 0 + */ +static int +snic_dev_reset_supported(struct scsi_device *sdev) +{ + struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); + + if (tgt->tdata.typ == SNIC_TGT_DAS) + return 0; + + return 1; +} + +static void +snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + u32 start_time = jiffies; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) { + start_time = rqi->start_time; + CMD_SP(sc) = NULL; + } + + CMD_FLAGS(sc) |= flag; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_release_req_buf(snic, rqi, sc); + + SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc, + jiffies_to_msecs(jiffies - start_time), (ulong) rqi, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); +} + +/* + * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN + * fail to get aborted. It calls driver's eh_device_reset with a SCSI + * command on the LUN. + */ +int +snic_device_reset(struct scsi_cmnd *sc) +{ + struct Scsi_Host *shost = sc->device->host; + struct snic *snic = shost_priv(shost); + struct snic_req_info *rqi = NULL; + int tag = snic_cmd_tag(sc); + int start_time = jiffies; + int ret = FAILED; + int dr_supp = 0; + + SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n", + sc, sc->cmnd[0], scsi_cmd_to_rq(sc), + snic_cmd_tag(sc)); + dr_supp = snic_dev_reset_supported(sc->device); + if (!dr_supp) { + /* device reset op is not supported */ + SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n"); + snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP); + + goto dev_rst_end; + } + + if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) { + snic_unlink_and_release_req(snic, sc, 0); + SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n"); + + goto dev_rst_end; + } + + /* There is no tag when lun reset is issue through ioctl. */ + if (unlikely(tag <= SNIC_NO_TAG)) { + SNIC_HOST_INFO(snic->shost, + "Devrst: LUN Reset Recvd thru IOCTL.\n"); + + rqi = snic_req_init(snic, 0); + if (!rqi) + goto dev_rst_end; + + memset(scsi_cmd_priv(sc), 0, + sizeof(struct snic_internal_io_state)); + CMD_SP(sc) = (char *)rqi; + CMD_FLAGS(sc) = SNIC_NO_FLAGS; + + /* Add special tag for dr coming from user spc */ + rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST; + rqi->sc = sc; + } + + ret = snic_send_dr_and_wait(snic, sc); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "Devrst: IO w/ Tag %x Failed w/ err = %d\n", + tag, ret); + + snic_unlink_and_release_req(snic, sc, 0); + + goto dev_rst_end; + } + + ret = snic_dr_finish(snic, sc); + +dev_rst_end: + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "Devrst: Returning from Device Reset : %s\n", + (ret == SUCCESS) ? "SUCCESS" : "FAILED"); + + return ret; +} /* end of snic_device_reset */ + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + */ +/* + * snic_issue_hba_reset : Queues FW Reset Request. + */ +static int +snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + struct snic_host_req *req = NULL; + spinlock_t *io_lock = NULL; + DECLARE_COMPLETION_ONSTACK(wait); + unsigned long flags; + int ret = -ENOMEM; + + rqi = snic_req_init(snic, 0); + if (!rqi) { + ret = -ENOMEM; + + goto hba_rst_end; + } + + if (snic_cmd_tag(sc) == SCSI_NO_TAG) { + memset(scsi_cmd_priv(sc), 0, + sizeof(struct snic_internal_io_state)); + SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n"); + rqi->sc = sc; + } + + req = rqi_to_req(rqi); + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + SNIC_BUG_ON(CMD_SP(sc) != NULL); + CMD_STATE(sc) = SNIC_IOREQ_PENDING; + CMD_SP(sc) = (char *) rqi; + CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED; + snic->remove_wait = &wait; + spin_unlock_irqrestore(io_lock, flags); + + /* Initialize Request */ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc), + snic->config.hid, 0, (ulong) rqi); + + req->u.reset.flags = 0; + + ret = snic_queue_wq_desc(snic, req, sizeof(*req)); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "issu_hr:Queuing HBA Reset Failed. w err %d\n", + ret); + + goto hba_rst_err; + } + + spin_lock_irqsave(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&snic->s_stats.reset.hba_resets); + SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n"); + + wait_for_completion_timeout(snic->remove_wait, + SNIC_HOST_RESET_TIMEOUT); + + if (snic_get_state(snic) == SNIC_FWRESET) { + SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n"); + ret = -ETIMEDOUT; + + goto hba_rst_err; + } + + spin_lock_irqsave(io_lock, flags); + snic->remove_wait = NULL; + rqi = (struct snic_req_info *) CMD_SP(sc); + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_req_free(snic, rqi); + + ret = 0; + + return ret; + +hba_rst_err: + spin_lock_irqsave(io_lock, flags); + snic->remove_wait = NULL; + rqi = (struct snic_req_info *) CMD_SP(sc); + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_req_free(snic, rqi); + +hba_rst_end: + SNIC_HOST_ERR(snic->shost, + "reset:HBA Reset Failed w/ err = %d.\n", + ret); + + return ret; +} /* end of snic_issue_hba_reset */ + +int +snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc) +{ + struct snic *snic = shost_priv(shost); + enum snic_state sv_state; + unsigned long flags; + int ret = FAILED; + + /* Set snic state as SNIC_FWRESET*/ + sv_state = snic_get_state(snic); + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic_get_state(snic) == SNIC_FWRESET) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n"); + + msleep(SNIC_HOST_RESET_TIMEOUT); + ret = SUCCESS; + + goto reset_end; + } + + snic_set_state(snic, SNIC_FWRESET); + spin_unlock_irqrestore(&snic->snic_lock, flags); + + + /* Wait for all the IOs that are entered in Qcmd */ + while (atomic_read(&snic->ios_inflight)) + schedule_timeout(msecs_to_jiffies(1)); + + ret = snic_issue_hba_reset(snic, sc); + if (ret) { + SNIC_HOST_ERR(shost, + "reset:Host Reset Failed w/ err %d.\n", + ret); + spin_lock_irqsave(&snic->snic_lock, flags); + snic_set_state(snic, sv_state); + spin_unlock_irqrestore(&snic->snic_lock, flags); + atomic64_inc(&snic->s_stats.reset.hba_reset_fail); + ret = FAILED; + + goto reset_end; + } + + ret = SUCCESS; + +reset_end: + return ret; +} /* end of snic_reset */ + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + */ +int +snic_host_reset(struct scsi_cmnd *sc) +{ + struct Scsi_Host *shost = sc->device->host; + u32 start_time = jiffies; + int ret; + + SNIC_SCSI_DBG(shost, + "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n", + sc, sc->cmnd[0], scsi_cmd_to_rq(sc), + snic_cmd_tag(sc), CMD_FLAGS(sc)); + + ret = snic_reset(shost, sc); + + SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + return ret; +} /* end of snic_host_reset */ + +/* + * snic_cmpl_pending_tmreq : Caller should hold io_lock + */ +static void +snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + + SNIC_SCSI_DBG(snic->shost, + "Completing Pending TM Req sc %p, state %s flags 0x%llx\n", + sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc)); + + /* + * CASE : FW didn't post itmf completion due to PCIe Errors. + * Marking the abort status as Success to call scsi completion + * in snic_abort_finish() + */ + CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS; + + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) + return; + + if (rqi->dr_done) + complete(rqi->dr_done); + else if (rqi->abts_done) + complete(rqi->abts_done); +} + +/* + * snic_scsi_cleanup: Walks through tag map and releases the reqs + */ +static void +snic_scsi_cleanup(struct snic *snic, int ex_tag) +{ + struct snic_req_info *rqi = NULL; + struct scsi_cmnd *sc = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int tag; + u64 st_time = 0; + + SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n"); + + for (tag = 0; tag < snic->max_tag_id; tag++) { + /* Skip ex_tag */ + if (tag == ex_tag) + continue; + + io_lock = snic_io_lock_tag(snic, tag); + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + if (unlikely(snic_tmreq_pending(sc))) { + /* + * When FW Completes reset w/o sending completions + * for outstanding ios. + */ + snic_cmpl_pending_tmreq(snic, sc); + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + + goto cleanup; + } + + SNIC_SCSI_DBG(snic->shost, + "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n", + sc, rqi, tag, CMD_FLAGS(sc)); + + CMD_SP(sc) = NULL; + CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP; + spin_unlock_irqrestore(io_lock, flags); + st_time = rqi->start_time; + + SNIC_HOST_INFO(snic->shost, + "sc_clean: Releasing rqi %p : flags 0x%llx\n", + rqi, CMD_FLAGS(sc)); + + snic_release_req_buf(snic, rqi, sc); + +cleanup: + sc->result = DID_TRANSPORT_DISRUPTED << 16; + SNIC_HOST_INFO(snic->shost, + "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n", + sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi, + jiffies_to_msecs(jiffies - st_time)); + + /* Update IO stats */ + snic_stats_update_io_cmpl(&snic->s_stats); + + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, + jiffies_to_msecs(jiffies - st_time), 0, + SNIC_TRC_CMD(sc), + SNIC_TRC_CMD_STATE_FLAGS(sc)); + + scsi_done(sc); + } +} /* end of snic_scsi_cleanup */ + +void +snic_shutdown_scsi_cleanup(struct snic *snic) +{ + SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n"); + + snic_scsi_cleanup(snic, SCSI_NO_TAG); +} /* end of snic_shutdown_scsi_cleanup */ + +/* + * snic_internal_abort_io + * called by : snic_tgt_scsi_abort_io + */ +static int +snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + u32 sv_state = 0; + int ret = 0; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) + goto skip_internal_abts; + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + goto skip_internal_abts; + + if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) && + (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) { + + SNIC_SCSI_DBG(snic->shost, + "internal_abts: dev rst not pending sc 0x%p\n", + sc); + + goto skip_internal_abts; + } + + + if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) { + SNIC_SCSI_DBG(snic->shost, + "internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n", + sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc)); + + goto skip_internal_abts; + } + + sv_state = CMD_STATE(sc); + CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; + CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; + CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING; + + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) { + /* stats */ + rqi->tm_tag = SNIC_TAG_DEV_RST; + SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc); + } + + SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n", + snic_cmd_tag(sc)); + SNIC_BUG_ON(rqi->abts_done); + spin_unlock_irqrestore(io_lock, flags); + + ret = snic_queue_abort_req(snic, rqi, sc, tmf); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "internal_abts: Tag = %x , Failed w/ err = %d\n", + snic_cmd_tag(sc), ret); + + spin_lock_irqsave(io_lock, flags); + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = sv_state; + + goto skip_internal_abts; + } + + spin_lock_irqsave(io_lock, flags); + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) + CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED; + else + CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED; + + ret = SUCCESS; + +skip_internal_abts: + lockdep_assert_held(io_lock); + spin_unlock_irqrestore(io_lock, flags); + + return ret; +} /* end of snic_internal_abort_io */ + +/* + * snic_tgt_scsi_abort_io : called by snic_tgt_del + */ +int +snic_tgt_scsi_abort_io(struct snic_tgt *tgt) +{ + struct snic *snic = NULL; + struct scsi_cmnd *sc = NULL; + struct snic_tgt *sc_tgt = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int ret = 0, tag, abt_cnt = 0, tmf = 0; + + if (!tgt) + return -1; + + snic = shost_priv(snic_tgt_to_shost(tgt)); + SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n"); + + if (tgt->tdata.typ == SNIC_TGT_DAS) + tmf = SNIC_ITMF_ABTS_TASK; + else + tmf = SNIC_ITMF_ABTS_TASK_TERM; + + for (tag = 0; tag < snic->max_tag_id; tag++) { + io_lock = snic_io_lock_tag(snic, tag); + + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + sc_tgt = starget_to_tgt(scsi_target(sc->device)); + if (sc_tgt != tgt) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + spin_unlock_irqrestore(io_lock, flags); + + ret = snic_internal_abort_io(snic, sc, tmf); + if (ret < 0) { + SNIC_HOST_ERR(snic->shost, + "tgt_abt_io: Tag %x, Failed w err = %d\n", + tag, ret); + + continue; + } + + if (ret == SUCCESS) + abt_cnt++; + } + + SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt); + + return 0; +} /* end of snic_tgt_scsi_abort_io */ diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h new file mode 100644 index 000000000..f0285c5a3 --- /dev/null +++ b/drivers/scsi/snic/snic_stats.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef __SNIC_STATS_H +#define __SNIC_STATS_H + +struct snic_io_stats { + atomic64_t active; /* Active IOs */ + atomic64_t max_active; /* Max # active IOs */ + atomic64_t max_sgl; /* Max # SGLs for any IO */ + atomic64_t max_time; /* Max time to process IO */ + atomic64_t max_qtime; /* Max time to Queue the IO */ + atomic64_t max_cmpl_time; /* Max time to complete the IO */ + atomic64_t sgl_cnt[SNIC_MAX_SG_DESC_CNT]; /* SGL Counters */ + atomic64_t max_io_sz; /* Max IO Size */ + atomic64_t compl; /* IO Completions */ + atomic64_t fail; /* IO Failures */ + atomic64_t req_null; /* req or req info is NULL */ + atomic64_t alloc_fail; /* Alloc Failures */ + atomic64_t sc_null; + atomic64_t io_not_found; /* IO Not Found */ + atomic64_t num_ios; /* Number of IOs */ +}; + +struct snic_abort_stats { + atomic64_t num; /* Abort counter */ + atomic64_t fail; /* Abort Failure Counter */ + atomic64_t drv_tmo; /* Abort Driver Timeouts */ + atomic64_t fw_tmo; /* Abort Firmware Timeouts */ + atomic64_t io_not_found;/* Abort IO Not Found */ + atomic64_t q_fail; /* Abort Queuing Failed */ +}; + +struct snic_reset_stats { + atomic64_t dev_resets; /* Device Reset Counter */ + atomic64_t dev_reset_fail; /* Device Reset Failures */ + atomic64_t dev_reset_aborts; /* Device Reset Aborts */ + atomic64_t dev_reset_tmo; /* Device Reset Timeout */ + atomic64_t dev_reset_terms; /* Device Reset terminate */ + atomic64_t hba_resets; /* hba/firmware resets */ + atomic64_t hba_reset_cmpl; /* hba/firmware reset completions */ + atomic64_t hba_reset_fail; /* hba/firmware failures */ + atomic64_t snic_resets; /* snic resets */ + atomic64_t snic_reset_compl; /* snic reset completions */ + atomic64_t snic_reset_fail; /* snic reset failures */ +}; + +struct snic_fw_stats { + atomic64_t actv_reqs; /* Active Requests */ + atomic64_t max_actv_reqs; /* Max Active Requests */ + atomic64_t out_of_res; /* Firmware Out Of Resources */ + atomic64_t io_errs; /* Firmware IO Firmware Errors */ + atomic64_t scsi_errs; /* Target hits check condition */ +}; + +struct snic_misc_stats { + u64 last_isr_time; + u64 last_ack_time; + atomic64_t ack_isr_cnt; + atomic64_t cmpl_isr_cnt; + atomic64_t errnotify_isr_cnt; + atomic64_t max_cq_ents; /* Max CQ Entries */ + atomic64_t data_cnt_mismat; /* Data Count Mismatch */ + atomic64_t io_tmo; + atomic64_t io_aborted; + atomic64_t sgl_inval; /* SGL Invalid */ + atomic64_t abts_wq_alloc_fail; /* Abort Path WQ desc alloc failure */ + atomic64_t devrst_wq_alloc_fail;/* Device Reset - WQ desc alloc fail */ + atomic64_t wq_alloc_fail; /* IO WQ desc alloc failure */ + atomic64_t no_icmnd_itmf_cmpls; + atomic64_t io_under_run; + atomic64_t qfull; + atomic64_t qsz_rampup; + atomic64_t qsz_rampdown; + atomic64_t last_qsz; + atomic64_t tgt_not_rdy; +}; + +struct snic_stats { + struct snic_io_stats io; + struct snic_abort_stats abts; + struct snic_reset_stats reset; + struct snic_fw_stats fw; + struct snic_misc_stats misc; + atomic64_t io_cmpl_skip; +}; + +void snic_stats_debugfs_init(struct snic *); +void snic_stats_debugfs_remove(struct snic *); + +/* Auxillary function to update active IO counter */ +static inline void +snic_stats_update_active_ios(struct snic_stats *s_stats) +{ + struct snic_io_stats *io = &s_stats->io; + int nr_active_ios; + + nr_active_ios = atomic64_read(&io->active); + if (atomic64_read(&io->max_active) < nr_active_ios) + atomic64_set(&io->max_active, nr_active_ios); + + atomic64_inc(&io->num_ios); +} + +/* Auxillary function to update IO completion counter */ +static inline void +snic_stats_update_io_cmpl(struct snic_stats *s_stats) +{ + atomic64_dec(&s_stats->io.active); + if (unlikely(atomic64_read(&s_stats->io_cmpl_skip))) + atomic64_dec(&s_stats->io_cmpl_skip); + else + atomic64_inc(&s_stats->io.compl); +} +#endif /* __SNIC_STATS_H */ diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c new file mode 100644 index 000000000..c2e5ab7e9 --- /dev/null +++ b/drivers/scsi/snic/snic_trc.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include + +#include "snic_io.h" +#include "snic.h" + +/* + * snic_get_trc_buf : Allocates a trace record and returns. + */ +struct snic_trc_data * +snic_get_trc_buf(void) +{ + struct snic_trc *trc = &snic_glob->trc; + struct snic_trc_data *td = NULL; + unsigned long flags; + + spin_lock_irqsave(&trc->lock, flags); + td = &trc->buf[trc->wr_idx]; + trc->wr_idx++; + + if (trc->wr_idx == trc->max_idx) + trc->wr_idx = 0; + + if (trc->wr_idx != trc->rd_idx) { + spin_unlock_irqrestore(&trc->lock, flags); + + goto end; + } + + trc->rd_idx++; + if (trc->rd_idx == trc->max_idx) + trc->rd_idx = 0; + + td->ts = 0; /* Marker for checking the record, for complete data*/ + spin_unlock_irqrestore(&trc->lock, flags); + +end: + + return td; +} /* end of snic_get_trc_buf */ + +/* + * snic_fmt_trc_data : Formats trace data for printing. + */ +static int +snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz) +{ + int len = 0; + struct timespec64 tmspec; + + jiffies_to_timespec64(td->ts, &tmspec); + + len += snprintf(buf, buf_sz, + "%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", + tmspec.tv_sec, + tmspec.tv_nsec, + td->fn, + td->hno, + td->tag, + td->data[0], td->data[1], td->data[2], td->data[3], + td->data[4]); + + return len; +} /* end of snic_fmt_trc_data */ + +/* + * snic_get_trc_data : Returns a formatted trace buffer. + */ +int +snic_get_trc_data(char *buf, int buf_sz) +{ + struct snic_trc_data *td = NULL; + struct snic_trc *trc = &snic_glob->trc; + unsigned long flags; + + spin_lock_irqsave(&trc->lock, flags); + if (trc->rd_idx == trc->wr_idx) { + spin_unlock_irqrestore(&trc->lock, flags); + + return -1; + } + td = &trc->buf[trc->rd_idx]; + + if (td->ts == 0) { + /* write in progress. */ + spin_unlock_irqrestore(&trc->lock, flags); + + return -1; + } + + trc->rd_idx++; + if (trc->rd_idx == trc->max_idx) + trc->rd_idx = 0; + spin_unlock_irqrestore(&trc->lock, flags); + + return snic_fmt_trc_data(td, buf, buf_sz); +} /* end of snic_get_trc_data */ + +/* + * snic_trc_init() : Configures Trace Functionality for snic. + */ +int +snic_trc_init(void) +{ + struct snic_trc *trc = &snic_glob->trc; + void *tbuf = NULL; + int tbuf_sz = 0, ret; + + tbuf_sz = (snic_trace_max_pages * PAGE_SIZE); + tbuf = vzalloc(tbuf_sz); + if (!tbuf) { + SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz); + SNIC_ERR("Trace Facility not enabled.\n"); + ret = -ENOMEM; + + return ret; + } + + trc->buf = (struct snic_trc_data *) tbuf; + spin_lock_init(&trc->lock); + + snic_trc_debugfs_init(); + + trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ); + trc->rd_idx = trc->wr_idx = 0; + trc->enable = true; + SNIC_INFO("Trace Facility Enabled.\n Trace Buffer SZ %lu Pages.\n", + tbuf_sz / PAGE_SIZE); + ret = 0; + + return ret; +} /* end of snic_trc_init */ + +/* + * snic_trc_free : Releases the trace buffer and disables the tracing. + */ +void +snic_trc_free(void) +{ + struct snic_trc *trc = &snic_glob->trc; + + trc->enable = false; + snic_trc_debugfs_term(); + + if (trc->buf) { + vfree(trc->buf); + trc->buf = NULL; + } + + SNIC_INFO("Trace Facility Disabled.\n"); +} /* end of snic_trc_free */ diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h new file mode 100644 index 000000000..c38e0dadc --- /dev/null +++ b/drivers/scsi/snic/snic_trc.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef __SNIC_TRC_H +#define __SNIC_TRC_H + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + +extern ssize_t simple_read_from_buffer(void __user *to, + size_t count, + loff_t *ppos, + const void *from, + size_t available); + +extern unsigned int snic_trace_max_pages; + +/* Global Data structure for trace to manage trace functionality */ +struct snic_trc_data { + u64 ts; /* Time Stamp */ + char *fn; /* Ptr to Function Name */ + u32 hno; /* SCSI Host ID */ + u32 tag; /* Command Tag */ + u64 data[5]; +} __attribute__((__packed__)); + +#define SNIC_TRC_ENTRY_SZ 64 /* in Bytes */ + +struct snic_trc { + spinlock_t lock; + struct snic_trc_data *buf; /* Trace Buffer */ + u32 max_idx; /* Max Index into trace buffer */ + u32 rd_idx; + u32 wr_idx; + bool enable; /* Control Variable for Tracing */ +}; + +int snic_trc_init(void); +void snic_trc_free(void); +void snic_trc_debugfs_init(void); +void snic_trc_debugfs_term(void); +struct snic_trc_data *snic_get_trc_buf(void); +int snic_get_trc_data(char *buf, int buf_sz); + +void snic_debugfs_init(void); +void snic_debugfs_term(void); + +static inline void +snic_trace(char *fn, u16 hno, u32 tag, u64 d1, u64 d2, u64 d3, u64 d4, u64 d5) +{ + struct snic_trc_data *tr_rec = snic_get_trc_buf(); + + if (!tr_rec) + return; + + tr_rec->fn = (char *)fn; + tr_rec->hno = hno; + tr_rec->tag = tag; + tr_rec->data[0] = d1; + tr_rec->data[1] = d2; + tr_rec->data[2] = d3; + tr_rec->data[3] = d4; + tr_rec->data[4] = d5; + tr_rec->ts = jiffies; /* Update time stamp at last */ +} + +#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \ + do { \ + if (unlikely(snic_glob->trc.enable)) \ + snic_trace((char *)__func__, \ + (u16)(_hno), \ + (u32)(_tag), \ + (u64)(d1), \ + (u64)(d2), \ + (u64)(d3), \ + (u64)(d4), \ + (u64)(d5)); \ + } while (0) +#else + +#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \ + do { \ + if (unlikely(snic_log_level & 0x2)) \ + SNIC_DBG("SnicTrace: %s %2u %2u %llx %llx %llx %llx %llx", \ + (char *)__func__, \ + (u16)(_hno), \ + (u32)(_tag), \ + (u64)(d1), \ + (u64)(d2), \ + (u64)(d3), \ + (u64)(d4), \ + (u64)(d5)); \ + } while (0) +#endif /* end of CONFIG_SCSI_SNIC_DEBUG_FS */ + +#define SNIC_TRC_CMD(sc) \ + ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | \ + (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | \ + (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | \ + (u64)sc->cmnd[5]) + +#define SNIC_TRC_CMD_STATE_FLAGS(sc) \ + ((u64) CMD_FLAGS(sc) << 32 | CMD_STATE(sc)) + +#endif /* end of __SNIC_TRC_H */ diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c new file mode 100644 index 000000000..0d5d3bd4b --- /dev/null +++ b/drivers/scsi/snic/vnic_cq.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +void svnic_cq_free(struct vnic_cq *cq) +{ + svnic_dev_free_desc_ring(cq->vdev, &cq->ring); + + cq->ctrl = NULL; +} + +int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, + unsigned int index, unsigned int desc_count, unsigned int desc_size) +{ + cq->index = index; + cq->vdev = vdev; + + cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index); + if (!cq->ctrl) { + pr_err("Failed to hook CQ[%d] resource\n", index); + + return -EINVAL; + } + + return svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); +} + +void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int cq_message_enable, + unsigned int interrupt_offset, u64 cq_message_addr) +{ + u64 paddr; + + paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &cq->ctrl->ring_base); + iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); + iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); + iowrite32(color_enable, &cq->ctrl->color_enable); + iowrite32(cq_head, &cq->ctrl->cq_head); + iowrite32(cq_tail, &cq->ctrl->cq_tail); + iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); + iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); + iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); + iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); + iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); + writeq(cq_message_addr, &cq->ctrl->cq_message_addr); +} + +void svnic_cq_clean(struct vnic_cq *cq) +{ + cq->to_clean = 0; + cq->last_color = 0; + + iowrite32(0, &cq->ctrl->cq_head); + iowrite32(0, &cq->ctrl->cq_tail); + iowrite32(1, &cq->ctrl->cq_tail_color); + + svnic_dev_clear_desc_ring(&cq->ring); +} diff --git a/drivers/scsi/snic/vnic_cq.h b/drivers/scsi/snic/vnic_cq.h new file mode 100644 index 000000000..6cee911ee --- /dev/null +++ b/drivers/scsi/snic/vnic_cq.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_CQ_H_ +#define _VNIC_CQ_H_ + +#include "cq_desc.h" +#include "vnic_dev.h" + +/* Completion queue control */ +struct vnic_cq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 flow_control_enable; /* 0x10 */ + u32 pad1; + u32 color_enable; /* 0x18 */ + u32 pad2; + u32 cq_head; /* 0x20 */ + u32 pad3; + u32 cq_tail; /* 0x28 */ + u32 pad4; + u32 cq_tail_color; /* 0x30 */ + u32 pad5; + u32 interrupt_enable; /* 0x38 */ + u32 pad6; + u32 cq_entry_enable; /* 0x40 */ + u32 pad7; + u32 cq_message_enable; /* 0x48 */ + u32 pad8; + u32 interrupt_offset; /* 0x50 */ + u32 pad9; + u64 cq_message_addr; /* 0x58 */ + u32 pad10; +}; + +struct vnic_cq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned int to_clean; + unsigned int last_color; +}; + +static inline unsigned int svnic_cq_service(struct vnic_cq *cq, + unsigned int work_to_do, + int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque), + void *opaque) +{ + struct cq_desc *cq_desc; + unsigned int work_done = 0; + u16 q_number, completed_index; + u8 type, color; + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq_desc, type, + q_number, completed_index, opaque)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +void svnic_cq_free(struct vnic_cq *cq); +int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, + unsigned int index, unsigned int desc_count, unsigned int desc_size); +void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int message_enable, + unsigned int interrupt_offset, u64 message_addr); +void svnic_cq_clean(struct vnic_cq *cq); +#endif /* _VNIC_CQ_H_ */ diff --git a/drivers/scsi/snic/vnic_cq_fw.h b/drivers/scsi/snic/vnic_cq_fw.h new file mode 100644 index 000000000..d74954bc7 --- /dev/null +++ b/drivers/scsi/snic/vnic_cq_fw.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_CQ_FW_H_ +#define _VNIC_CQ_FW_H_ + +#include "snic_fwint.h" + +static inline unsigned int +vnic_cq_fw_service(struct vnic_cq *cq, + int (*q_service)(struct vnic_dev *vdev, + unsigned int index, + struct snic_fw_req *desc), + unsigned int work_to_do) + +{ + struct snic_fw_req *desc; + unsigned int work_done = 0; + u8 color; + + desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + snic_color_dec(desc, &color); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq->index, desc)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + snic_color_dec(desc, &color); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +#endif /* _VNIC_CQ_FW_H_ */ diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c new file mode 100644 index 000000000..760f3f220 --- /dev/null +++ b/drivers/scsi/snic/vnic_dev.c @@ -0,0 +1,749 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include "vnic_resource.h" +#include "vnic_devcmd.h" +#include "vnic_dev.h" +#include "vnic_stats.h" +#include "vnic_wq.h" + +#define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */ +#define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL + +struct devcmd2_controller { + struct vnic_wq_ctrl __iomem *wq_ctrl; + struct vnic_dev_ring results_ring; + struct vnic_wq wq; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; +}; + +struct vnic_res { + void __iomem *vaddr; + unsigned int count; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 *linkstatus; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + u64 args[VNIC_DEVCMD_NARGS]; + struct devcmd2_controller *devcmd2; + + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); +}; + +#define VNIC_MAX_RES_HDR_SIZE \ + (sizeof(struct vnic_resource_header) + \ + sizeof(struct vnic_resource) * RES_TYPE_MAX) +#define VNIC_RES_STRIDE 128 + +void *svnic_dev_priv(struct vnic_dev *vdev) +{ + return vdev->priv; +} + +static int vnic_dev_discover_res(struct vnic_dev *vdev, + struct vnic_dev_bar *bar, unsigned int num_bars) +{ + struct vnic_resource_header __iomem *rh; + struct vnic_resource __iomem *r; + u8 type; + + if (num_bars == 0) + return -EINVAL; + + if (bar->len < VNIC_MAX_RES_HDR_SIZE) { + pr_err("vNIC BAR0 res hdr length error\n"); + + return -EINVAL; + } + + rh = bar->vaddr; + if (!rh) { + pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + + return -EINVAL; + } + + if (ioread32(&rh->magic) != VNIC_RES_MAGIC || + ioread32(&rh->version) != VNIC_RES_VERSION) { + pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); + + return -EINVAL; + } + + r = (struct vnic_resource __iomem *)(rh + 1); + + while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { + + u8 bar_num = ioread8(&r->bar); + u32 bar_offset = ioread32(&r->bar_offset); + u32 count = ioread32(&r->count); + u32 len; + + r++; + + if (bar_num >= num_bars) + continue; + + if (!bar[bar_num].len || !bar[bar_num].vaddr) + continue; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + /* each count is stride bytes long */ + len = count * VNIC_RES_STRIDE; + if (len + bar_offset > bar->len) { + pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", + type, bar_offset, + len, + bar->len); + + return -EINVAL; + } + break; + + case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD: + case RES_TYPE_DEVCMD2: + len = count; + break; + + default: + continue; + } + + vdev->res[type].count = count; + vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; + } + + return 0; +} + +unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type) +{ + return vdev->res[type].count; +} + +void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index) +{ + if (!vdev->res[type].vaddr) + return NULL; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + return (char __iomem *)vdev->res[type].vaddr + + index * VNIC_RES_STRIDE; + + default: + return (char __iomem *)vdev->res[type].vaddr; + } +} + +unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size) +{ + /* The base address of the desc rings must be 512 byte aligned. + * Descriptor count is aligned to groups of 32 descriptors. A + * count of 0 means the maximum 4096 descriptors. Descriptor + * size is aligned to 16 bytes. + */ + + unsigned int count_align = 32; + unsigned int desc_align = 16; + + ring->base_align = 512; + + if (desc_count == 0) + desc_count = 4096; + + ring->desc_count = ALIGN(desc_count, count_align); + + ring->desc_size = ALIGN(desc_size, desc_align); + + ring->size = ring->desc_count * ring->desc_size; + ring->size_unaligned = ring->size + ring->base_align; + + return ring->size_unaligned; +} + +void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) +{ + memset(ring->descs, 0, ring->size); +} + +int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + svnic_dev_desc_ring_size(ring, desc_count, desc_size); + + ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev, + ring->size_unaligned, &ring->base_addr_unaligned, + GFP_KERNEL); + if (!ring->descs_unaligned) { + pr_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); + + return -ENOMEM; + } + + ring->base_addr = ALIGN(ring->base_addr_unaligned, + ring->base_align); + ring->descs = (u8 *)ring->descs_unaligned + + (ring->base_addr - ring->base_addr_unaligned); + + svnic_dev_clear_desc_ring(ring); + + ring->desc_avail = ring->desc_count - 1; + + return 0; +} + +void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) +{ + if (ring->descs) { + dma_free_coherent(&vdev->pdev->dev, + ring->size_unaligned, + ring->descs_unaligned, + ring->base_addr_unaligned); + ring->descs = NULL; + } +} + +static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result = NULL; + unsigned int i; + int delay; + int err; + u32 posted; + u32 fetch_idx; + u32 new_posted; + u8 color; + + fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index); + if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: return error */ + return -ENODEV; + } + + posted = ioread32(&dc2c->wq_ctrl->posted_index); + + if (posted == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: return error */ + return -ENODEV; + } + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + if (new_posted == fetch_idx) { + pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n", + pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted); + + return -EBUSY; + } + + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + } + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return 0; + + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + /* + * Increment next_result, after posting the devcmd, irrespective of + * devcmd result, and it should be done only once. + */ + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + + for (delay = 0; delay < wait; delay++) { + udelay(100); + if (result->color == color) { + if (result->error) { + err = (int) result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("Error %d devcmd %d\n", + err, _CMD_N(cmd)); + + return err; + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = result->results[i]; + } + + return 0; + } + } + + pr_err("Timed out devcmd %d\n", _CMD_N(cmd)); + + return -ETIMEDOUT; +} + +static int svnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + struct devcmd2_controller *dc2c = NULL; + unsigned int fetch_idx; + int ret; + void __iomem *p; + + if (vdev->devcmd2) + return 0; + + p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (!p) + return -ENODEV; + + dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC); + if (!dc2c) + return -ENOMEM; + + vdev->devcmd2 = dc2c; + + dc2c->color = 1; + dc2c->result_size = DEVCMD2_RING_SIZE; + + ret = vnic_wq_devcmd2_alloc(vdev, + &dc2c->wq, + DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + if (ret) + goto err_free_devcmd2; + + fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index); + if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_idx = 0; + } + + /* + * Don't change fetch_index ever and + * set posted_index same as fetch_index + * when setting up the WQ for devcmd2. + */ + vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0); + svnic_wq_enable(&dc2c->wq); + ret = svnic_dev_alloc_desc_ring(vdev, + &dc2c->results_ring, + DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + if (ret) + goto err_free_wq; + + dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs; + dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; + dc2c->wq_ctrl = dc2c->wq.ctrl; + vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO); + if (ret < 0) + goto err_free_desc_ring; + + vdev->devcmd_rtn = &_svnic_dev_cmd2; + pr_info("DEVCMD2 Initialized.\n"); + + return ret; + +err_free_desc_ring: + svnic_dev_free_desc_ring(vdev, &dc2c->results_ring); + +err_free_wq: + svnic_wq_disable(&dc2c->wq); + svnic_wq_free(&dc2c->wq); + +err_free_devcmd2: + kfree(dc2c); + vdev->devcmd2 = NULL; + + return ret; +} /* end of svnic_dev_init_devcmd2 */ + +static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + + vdev->devcmd2 = NULL; + vdev->devcmd_rtn = NULL; + + svnic_dev_free_desc_ring(vdev, &dc2c->results_ring); + svnic_wq_disable(&dc2c->wq); + svnic_wq_free(&dc2c->wq); + kfree(dc2c); +} + +int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + int err; + + memset(vdev->args, 0, sizeof(vdev->args)); + vdev->args[0] = *a0; + vdev->args[1] = *a1; + + err = (*vdev->devcmd_rtn)(vdev, cmd, wait); + + *a0 = vdev->args[0]; + *a1 = vdev->args[1]; + + return err; +} + +int svnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info) +{ + u64 a0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + int err = 0; + + if (!vdev->fw_info) { + vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_fw_info), + &vdev->fw_info_pa, GFP_KERNEL); + if (!vdev->fw_info) + return -ENOMEM; + + a0 = vdev->fw_info_pa; + + /* only get fw_info once and cache it */ + err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); + } + + *fw_info = vdev->fw_info; + + return err; +} + +int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, + unsigned int size, void *value) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + int err; + + a0 = offset; + a1 = size; + + err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); + + switch (size) { + case 1: + *(u8 *)value = (u8)a0; + break; + case 2: + *(u16 *)value = (u16)a0; + break; + case 4: + *(u32 *)value = (u32)a0; + break; + case 8: + *(u64 *)value = a0; + break; + default: + BUG(); + break; + } + + return err; +} + +int svnic_dev_stats_clear(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); +} + +int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + + if (!vdev->stats) { + vdev->stats = dma_alloc_coherent(&vdev->pdev->dev, + sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL); + if (!vdev->stats) + return -ENOMEM; + } + + *stats = vdev->stats; + a0 = vdev->stats_pa; + a1 = sizeof(struct vnic_stats); + + return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); +} + +int svnic_dev_close(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); +} + +int svnic_dev_enable_wait(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + int err = 0; + + err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); + if (err == ERR_ECMDUNKNOWN) + return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); + + return err; +} + +int svnic_dev_disable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); +} + +int svnic_dev_open(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); +} + +int svnic_dev_open_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + int err; + + *done = 0; + + err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + + if (!vdev->notify) { + vdev->notify = dma_alloc_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_notify), + &vdev->notify_pa, GFP_KERNEL); + if (!vdev->notify) + return -ENOMEM; + } + + a0 = vdev->notify_pa; + a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK; + a1 += sizeof(struct vnic_devcmd_notify); + + return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +void svnic_dev_notify_unset(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + + a0 = 0; /* paddr = 0 to unset notify buffer */ + a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */ + a1 += sizeof(struct vnic_devcmd_notify); + + svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +static int vnic_dev_notify_ready(struct vnic_dev *vdev) +{ + u32 *words; + unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; + unsigned int i; + u32 csum; + + if (!vdev->notify) + return 0; + + do { + csum = 0; + memcpy(&vdev->notify_copy, vdev->notify, + sizeof(struct vnic_devcmd_notify)); + words = (u32 *)&vdev->notify_copy; + for (i = 1; i < nwords; i++) + csum += words[i]; + } while (csum != words[0]); + + return 1; +} + +int svnic_dev_init(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); +} + +int svnic_dev_link_status(struct vnic_dev *vdev) +{ + if (vdev->linkstatus) + return *vdev->linkstatus; + + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_state; +} + +u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_down_cnt; +} + +void svnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode) +{ + vdev->intr_mode = intr_mode; +} + +enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev) +{ + return vdev->intr_mode; +} + +void svnic_dev_unregister(struct vnic_dev *vdev) +{ + if (vdev) { + if (vdev->notify) + dma_free_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + if (vdev->linkstatus) + dma_free_coherent(&vdev->pdev->dev, + sizeof(u32), + vdev->linkstatus, + vdev->linkstatus_pa); + if (vdev->stats) + dma_free_coherent(&vdev->pdev->dev, + sizeof(struct vnic_stats), + vdev->stats, vdev->stats_pa); + if (vdev->fw_info) + dma_free_coherent(&vdev->pdev->dev, + sizeof(struct vnic_devcmd_fw_info), + vdev->fw_info, vdev->fw_info_pa); + if (vdev->devcmd2) + vnic_dev_deinit_devcmd2(vdev); + kfree(vdev); + } +} + +struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev, + void *priv, + struct pci_dev *pdev, + struct vnic_dev_bar *bar, + unsigned int num_bars) +{ + if (!vdev) { + vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); + if (!vdev) + return NULL; + } + + vdev->priv = priv; + vdev->pdev = pdev; + + if (vnic_dev_discover_res(vdev, bar, num_bars)) + goto err_out; + + return vdev; + +err_out: + svnic_dev_unregister(vdev); + + return NULL; +} /* end of svnic_dev_alloc_discover */ + +/* + * fallback option is left to keep the interface common for other vnics. + */ +int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback) +{ + int err = -ENODEV; + void __iomem *p; + + p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (p) + err = svnic_dev_init_devcmd2(vdev); + else + pr_err("DEVCMD2 resource not found.\n"); + + return err; +} /* end of svnic_dev_cmd_init */ diff --git a/drivers/scsi/snic/vnic_dev.h b/drivers/scsi/snic/vnic_dev.h new file mode 100644 index 000000000..d2f9b6f7b --- /dev/null +++ b/drivers/scsi/snic/vnic_dev.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_DEV_H_ +#define _VNIC_DEV_H_ + +#include "vnic_resource.h" +#include "vnic_devcmd.h" + +#ifndef VNIC_PADDR_TARGET +#define VNIC_PADDR_TARGET 0x0000000000000000ULL +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg); +} + +static inline void writeq(u64 val, void __iomem *reg) +{ + writel(lower_32_bits(val), reg); + writel(upper_32_bits(val), reg + 0x4UL); +} +#endif + +enum vnic_dev_intr_mode { + VNIC_DEV_INTR_MODE_UNKNOWN, + VNIC_DEV_INTR_MODE_INTX, + VNIC_DEV_INTR_MODE_MSI, + VNIC_DEV_INTR_MODE_MSIX, +}; + +struct vnic_dev_bar { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned long len; +}; + +struct vnic_dev_ring { + void *descs; + size_t size; + dma_addr_t base_addr; + size_t base_align; + void *descs_unaligned; + size_t size_unaligned; + dma_addr_t base_addr_unaligned; + unsigned int desc_size; + unsigned int desc_count; + unsigned int desc_avail; +}; + +struct vnic_dev; +struct vnic_stats; + +void *svnic_dev_priv(struct vnic_dev *vdev); +unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type); +void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index); +unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size); +void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void svnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); +int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait); +int svnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info); +int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, + unsigned int size, void *value); +int svnic_dev_stats_clear(struct vnic_dev *vdev); +int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); +int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); +void svnic_dev_notify_unset(struct vnic_dev *vdev); +int svnic_dev_link_status(struct vnic_dev *vdev); +u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev); +int svnic_dev_close(struct vnic_dev *vdev); +int svnic_dev_enable_wait(struct vnic_dev *vdev); +int svnic_dev_disable(struct vnic_dev *vdev); +int svnic_dev_open(struct vnic_dev *vdev, int arg); +int svnic_dev_open_done(struct vnic_dev *vdev, int *done); +int svnic_dev_init(struct vnic_dev *vdev, int arg); +struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, + struct vnic_dev_bar *bar, + unsigned int num_bars); +void svnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode); +enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev); +void svnic_dev_unregister(struct vnic_dev *vdev); +int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); +#endif /* _VNIC_DEV_H_ */ diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h new file mode 100644 index 000000000..9d82fcb74 --- /dev/null +++ b/drivers/scsi/snic/vnic_devcmd.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_DEVCMD_H_ +#define _VNIC_DEVCMD_H_ + +#define _CMD_NBITS 14 +#define _CMD_VTYPEBITS 10 +#define _CMD_FLAGSBITS 6 +#define _CMD_DIRBITS 2 + +#define _CMD_NMASK ((1 << _CMD_NBITS)-1) +#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) +#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) +#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) + +#define _CMD_NSHIFT 0 +#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) +#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) +#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) + +/* + * Direction bits (from host perspective). + */ +#define _CMD_DIR_NONE 0U +#define _CMD_DIR_WRITE 1U +#define _CMD_DIR_READ 2U +#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) + +/* + * Flag bits. + */ +#define _CMD_FLAGS_NONE 0U +#define _CMD_FLAGS_NOWAIT 1U + +/* + * vNIC type bits. + */ +#define _CMD_VTYPE_NONE 0U +#define _CMD_VTYPE_ENET 1U +#define _CMD_VTYPE_FC 2U +#define _CMD_VTYPE_SCSI 4U +#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) + +/* + * Used to create cmds.. +*/ +#define _CMDCF(dir, flags, vtype, nr) \ + (((dir) << _CMD_DIRSHIFT) | \ + ((flags) << _CMD_FLAGSSHIFT) | \ + ((vtype) << _CMD_VTYPESHIFT) | \ + ((nr) << _CMD_NSHIFT)) +#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) +#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) + +/* + * Used to decode cmds.. +*/ +#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) +#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) +#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) +#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) + +enum vnic_devcmd_cmd { + CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), + + /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* dev-specific block member: + * in: (u16)a0=offset,(u8)a1=size + * out: a0=value */ + CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), + + /* stats clear */ + CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), + + /* stats dump in mem: (u64)a0=paddr to stats area, + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), + + /* nic_cfg in (u32)a0 */ + CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* set struct vnic_devcmd_notify buffer in mem: + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), + + /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ + CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), + + /* open status: + * out: a0=0 open complete, a0=1 open in progress */ + CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), + + /* close vnic */ + CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ + CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), + + /* enable virtual link */ + CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* enable virtual link, waiting variant. */ + CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* disable virtual link */ + CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), + + /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */ + CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), + + /* init status: + * out: a0=0 init complete, a0=1 init in progress + * if a0=0, a1=errno */ + CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), + + /* undo initialize of virtual link */ + CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* + * Initialization for the devcmd2 interface. + * in: (u64) a0=host result buffer physical address + * in: (u16) a1=number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57) +}; + +/* flags for CMD_OPEN */ +#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ + +/* flags for CMD_INIT */ +#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ + +/* flags for CMD_PACKET_FILTER */ +#define CMD_PFILTER_DIRECTED 0x01 +#define CMD_PFILTER_MULTICAST 0x02 +#define CMD_PFILTER_BROADCAST 0x04 +#define CMD_PFILTER_PROMISCUOUS 0x08 +#define CMD_PFILTER_ALL_MULTICAST 0x10 + +enum vnic_devcmd_status { + STAT_NONE = 0, + STAT_BUSY = 1 << 0, /* cmd in progress */ + STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ +}; + +enum vnic_devcmd_error { + ERR_SUCCESS = 0, + ERR_EINVAL = 1, + ERR_EFAULT = 2, + ERR_EPERM = 3, + ERR_EBUSY = 4, + ERR_ECMDUNKNOWN = 5, + ERR_EBADSTATE = 6, + ERR_ENOMEM = 7, + ERR_ETIMEDOUT = 8, + ERR_ELINKDOWN = 9, +}; + +struct vnic_devcmd_fw_info { + char fw_version[32]; + char fw_build[32]; + char hw_version[32]; + char hw_serial_number[32]; +}; + +struct vnic_devcmd_notify { + u32 csum; /* checksum over following words */ + + u32 link_state; /* link up == 1 */ + u32 port_speed; /* effective port speed (rate limit) */ + u32 mtu; /* MTU */ + u32 msglvl; /* requested driver msg lvl */ + u32 uif; /* uplink interface */ + u32 status; /* status bits (see VNIC_STF_*) */ + u32 error; /* error code (see ERR_*) for first ERR */ + u32 link_down_cnt; /* running count of link down transitions */ +}; +#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ + +struct vnic_devcmd_provinfo { + u8 oui[3]; + u8 type; + u8 data[]; +}; + +/* + * Writing cmd register causes STAT_BUSY to get set in status register. + * When cmd completes, STAT_BUSY will be cleared. + * + * If cmd completed successfully STAT_ERROR will be clear + * and args registers contain cmd-specific results. + * + * If cmd error, STAT_ERROR will be set and args[0] contains error code. + * + * status register is read-only. While STAT_BUSY is set, + * all other register contents are read-only. + */ + +/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ +#define VNIC_DEVCMD_NARGS 15 +struct vnic_devcmd { + u32 status; /* RO */ + u32 cmd; /* RW */ + u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ +}; + + +/* + * Version 2 of the interface. + * + * Some things are carried over, notably the vnic_devcmd_cmd enum. + */ + +/* + * Flags for vnic_devcmd2.flags + */ + +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; /* same command #defines as original */ + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; /* into copy WQ */ + u8 error; /* same error codes as original */ + u8 color; /* 0 or 1 as with completion queues */ +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + +#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1) + +#endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/scsi/snic/vnic_intr.c b/drivers/scsi/snic/vnic_intr.c new file mode 100644 index 000000000..23627f959 --- /dev/null +++ b/drivers/scsi/snic/vnic_intr.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_intr.h" + +void svnic_intr_free(struct vnic_intr *intr) +{ + intr->ctrl = NULL; +} + +int svnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index) +{ + intr->index = index; + intr->vdev = vdev; + + intr->ctrl = svnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); + if (!intr->ctrl) { + pr_err("Failed to hook INTR[%d].ctrl resource\n", + index); + return -EINVAL; + } + + return 0; +} + +void svnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion) +{ + iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); + iowrite32(coalescing_type, &intr->ctrl->coalescing_type); + iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); + iowrite32(0, &intr->ctrl->int_credits); +} + +void svnic_intr_clean(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->int_credits); +} diff --git a/drivers/scsi/snic/vnic_intr.h b/drivers/scsi/snic/vnic_intr.h new file mode 100644 index 000000000..7bff60faf --- /dev/null +++ b/drivers/scsi/snic/vnic_intr.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_INTR_H_ +#define _VNIC_INTR_H_ + +#include +#include "vnic_dev.h" + +#define VNIC_INTR_TIMER_MAX 0xffff + +#define VNIC_INTR_TIMER_TYPE_ABS 0 +#define VNIC_INTR_TIMER_TYPE_QUIET 1 + +/* Interrupt control */ +struct vnic_intr_ctrl { + u32 coalescing_timer; /* 0x00 */ + u32 pad0; + u32 coalescing_value; /* 0x08 */ + u32 pad1; + u32 coalescing_type; /* 0x10 */ + u32 pad2; + u32 mask_on_assertion; /* 0x18 */ + u32 pad3; + u32 mask; /* 0x20 */ + u32 pad4; + u32 int_credits; /* 0x28 */ + u32 pad5; + u32 int_credit_return; /* 0x30 */ + u32 pad6; +}; + +struct vnic_intr { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ +}; + +static inline void +svnic_intr_unmask(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->mask); +} + +static inline void +svnic_intr_mask(struct vnic_intr *intr) +{ + iowrite32(1, &intr->ctrl->mask); +} + +static inline void +svnic_intr_return_credits(struct vnic_intr *intr, + unsigned int credits, + int unmask, + int reset_timer) +{ +#define VNIC_INTR_UNMASK_SHIFT 16 +#define VNIC_INTR_RESET_TIMER_SHIFT 17 + + u32 int_credit_return = (credits & 0xffff) | + (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | + (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); + + iowrite32(int_credit_return, &intr->ctrl->int_credit_return); +} + +static inline unsigned int +svnic_intr_credits(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->int_credits); +} + +static inline void +svnic_intr_return_all_credits(struct vnic_intr *intr) +{ + unsigned int credits = svnic_intr_credits(intr); + int unmask = 1; + int reset_timer = 1; + + svnic_intr_return_credits(intr, credits, unmask, reset_timer); +} + +void svnic_intr_free(struct vnic_intr *); +int svnic_intr_alloc(struct vnic_dev *, struct vnic_intr *, unsigned int); +void svnic_intr_init(struct vnic_intr *intr, + unsigned int coalescing_timer, + unsigned int coalescing_type, + unsigned int mask_on_assertion); +void svnic_intr_clean(struct vnic_intr *); + +#endif /* _VNIC_INTR_H_ */ diff --git a/drivers/scsi/snic/vnic_resource.h b/drivers/scsi/snic/vnic_resource.h new file mode 100644 index 000000000..372596b09 --- /dev/null +++ b/drivers/scsi/snic/vnic_resource.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_RESOURCE_H_ +#define _VNIC_RESOURCE_H_ + +#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ +#define VNIC_RES_VERSION 0x00000000L + +/* vNIC resource types */ +enum vnic_res_type { + RES_TYPE_EOL, /* End-of-list */ + RES_TYPE_WQ, /* Work queues */ + RES_TYPE_RQ, /* Receive queues */ + RES_TYPE_CQ, /* Completion queues */ + RES_TYPE_RSVD1, + RES_TYPE_NIC_CFG, /* Enet NIC config registers */ + RES_TYPE_RSVD2, + RES_TYPE_RSVD3, + RES_TYPE_RSVD4, + RES_TYPE_RSVD5, + RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ + RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ + RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ + RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ + RES_TYPE_RSVD6, + RES_TYPE_RSVD7, + RES_TYPE_DEVCMD, /* Device command region */ + RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ + + RES_TYPE_MAX, /* Count of resource types */ +}; + +struct vnic_resource_header { + u32 magic; + u32 version; +}; + +struct vnic_resource { + u8 type; + u8 bar; + u8 pad[2]; + u32 bar_offset; + u32 count; +}; + +#endif /* _VNIC_RESOURCE_H_ */ diff --git a/drivers/scsi/snic/vnic_snic.h b/drivers/scsi/snic/vnic_snic.h new file mode 100644 index 000000000..ffc8a0fee --- /dev/null +++ b/drivers/scsi/snic/vnic_snic.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_SNIC_H_ +#define _VNIC_SNIC_H_ + +#define VNIC_SNIC_WQ_DESCS_MIN 64 +#define VNIC_SNIC_WQ_DESCS_MAX 1024 + +#define VNIC_SNIC_MAXDATAFIELDSIZE_MIN 256 +#define VNIC_SNIC_MAXDATAFIELDSIZE_MAX 2112 + +#define VNIC_SNIC_IO_THROTTLE_COUNT_MIN 1 +#define VNIC_SNIC_IO_THROTTLE_COUNT_MAX 1024 + +#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MIN 0 +#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX 240000 + +#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MIN 0 +#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX 255 + +#define VNIC_SNIC_LUNS_PER_TARGET_MIN 1 +#define VNIC_SNIC_LUNS_PER_TARGET_MAX 1024 + +/* Device-specific region: scsi configuration */ +struct vnic_snic_config { + u32 flags; + u32 wq_enet_desc_count; + u32 io_throttle_count; + u32 port_down_timeout; + u32 port_down_io_retries; + u32 luns_per_tgt; + u16 maxdatafieldsize; + u16 intr_timer; + u8 intr_timer_type; + u8 _resvd2; + u8 xpt_type; + u8 hid; +}; +#endif /* _VNIC_SNIC_H_ */ diff --git a/drivers/scsi/snic/vnic_stats.h b/drivers/scsi/snic/vnic_stats.h new file mode 100644 index 000000000..38155aae7 --- /dev/null +++ b/drivers/scsi/snic/vnic_stats.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_STATS_H_ +#define _VNIC_STATS_H_ + +/* Tx statistics */ +struct vnic_tx_stats { + u64 tx_frames_ok; + u64 tx_unicast_frames_ok; + u64 tx_multicast_frames_ok; + u64 tx_broadcast_frames_ok; + u64 tx_bytes_ok; + u64 tx_unicast_bytes_ok; + u64 tx_multicast_bytes_ok; + u64 tx_broadcast_bytes_ok; + u64 tx_drops; + u64 tx_errors; + u64 tx_tso; + u64 rsvd[16]; +}; + +/* Rx statistics */ +struct vnic_rx_stats { + u64 rx_frames_ok; + u64 rx_frames_total; + u64 rx_unicast_frames_ok; + u64 rx_multicast_frames_ok; + u64 rx_broadcast_frames_ok; + u64 rx_bytes_ok; + u64 rx_unicast_bytes_ok; + u64 rx_multicast_bytes_ok; + u64 rx_broadcast_bytes_ok; + u64 rx_drop; + u64 rx_no_bufs; + u64 rx_errors; + u64 rx_rss; + u64 rx_crc_errors; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_to_max; + u64 rsvd[16]; +}; + +struct vnic_stats { + struct vnic_tx_stats tx; + struct vnic_rx_stats rx; +}; + +#endif /* _VNIC_STATS_H_ */ diff --git a/drivers/scsi/snic/vnic_wq.c b/drivers/scsi/snic/vnic_wq.c new file mode 100644 index 000000000..48be9a3f4 --- /dev/null +++ b/drivers/scsi/snic/vnic_wq.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2014 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_wq.h" + +static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, enum vnic_res_type res_type) +{ + wq->ctrl = svnic_dev_get_res(vdev, res_type, index); + if (!wq->ctrl) + return -EINVAL; + + return 0; +} + +static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size) +{ + return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, + desc_size); +} + +static int vnic_wq_alloc_bufs(struct vnic_wq *wq) +{ + struct vnic_wq_buf *buf; + unsigned int i, j, count = wq->ring.desc_count; + unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); + + for (i = 0; i < blks; i++) { + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); + if (!wq->bufs[i]) { + pr_err("Failed to alloc wq_bufs\n"); + + return -ENOMEM; + } + } + + for (i = 0; i < blks; i++) { + buf = wq->bufs[i]; + for (j = 0; j < VNIC_WQ_BUF_DFLT_BLK_ENTRIES; j++) { + buf->index = i * VNIC_WQ_BUF_DFLT_BLK_ENTRIES + j; + buf->desc = (u8 *)wq->ring.descs + + wq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = wq->bufs[0]; + break; + } else if (j + 1 == VNIC_WQ_BUF_DFLT_BLK_ENTRIES) { + buf->next = wq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + return 0; +} + +void svnic_wq_free(struct vnic_wq *wq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = wq->vdev; + + svnic_dev_free_desc_ring(vdev, &wq->ring); + + for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { + kfree(wq->bufs[i]); + wq->bufs[i] = NULL; + } + + wq->ctrl = NULL; + +} + +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2); + if (err) { + pr_err("Failed to get devcmd2 resource\n"); + + return err; + } + + svnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size); + if (err) + return err; + + return 0; +} + +int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ); + if (err) { + pr_err("Failed to hook WQ[%d] resource\n", index); + + return err; + } + + svnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size); + if (err) + return err; + + err = vnic_wq_alloc_bufs(wq); + if (err) { + svnic_wq_free(wq); + + return err; + } + + return 0; +} + +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = wq->ring.desc_count; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(count, &wq->ctrl->ring_size); + iowrite32(fetch_index, &wq->ctrl->fetch_index); + iowrite32(posted_index, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); + + wq->to_use = wq->to_clean = + &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; +} + +void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, + error_interrupt_offset); +} + +unsigned int svnic_wq_error_status(struct vnic_wq *wq) +{ + return ioread32(&wq->ctrl->error_status); +} + +void svnic_wq_enable(struct vnic_wq *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int svnic_wq_disable(struct vnic_wq *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(1); + } + + pr_err("Failed to disable WQ[%d]\n", wq->index); + + return -ETIMEDOUT; +} + +void svnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) +{ + struct vnic_wq_buf *buf; + + BUG_ON(ioread32(&wq->ctrl->enable)); + + buf = wq->to_clean; + + while (svnic_wq_desc_used(wq) > 0) { + + (*buf_clean)(wq, buf); + + buf = wq->to_clean = buf->next; + wq->ring.desc_avail++; + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + svnic_dev_clear_desc_ring(&wq->ring); +} diff --git a/drivers/scsi/snic/vnic_wq.h b/drivers/scsi/snic/vnic_wq.h new file mode 100644 index 000000000..1415da4b6 --- /dev/null +++ b/drivers/scsi/snic/vnic_wq.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _VNIC_WQ_H_ +#define _VNIC_WQ_H_ + +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* Work queue control */ +struct vnic_wq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 dca_value; /* 0x38 */ + u32 pad6; + u32 error_interrupt_enable; /* 0x40 */ + u32 pad7; + u32 error_interrupt_offset; /* 0x48 */ + u32 pad8; + u32 error_status; /* 0x50 */ + u32 pad9; +}; + +struct vnic_wq_buf { + struct vnic_wq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int len; + unsigned int index; + int sop; + void *desc; +}; + +/* Break the vnic_wq_buf allocations into blocks of 64 entries */ +#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 +#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 +#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ + ((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ + VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLK_SZ \ + (VNIC_WQ_BUF_DFLT_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) + +struct vnic_wq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; + struct vnic_wq_buf *to_use; + struct vnic_wq_buf *to_clean; + unsigned int pkts_outstanding; +}; + +static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) +{ + /* how many does SW own? */ + return wq->ring.desc_avail; +} + +static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) +{ + /* how many does HW own? */ + return wq->ring.desc_count - wq->ring.desc_avail - 1; +} + +static inline void *svnic_wq_next_desc(struct vnic_wq *wq) +{ + return wq->to_use->desc; +} + +static inline void svnic_wq_post(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, int sop, int eop) +{ + struct vnic_wq_buf *buf = wq->to_use; + + buf->sop = sop; + buf->os_buf = eop ? os_buf : NULL; + buf->dma_addr = dma_addr; + buf->len = len; + + buf = buf->next; + if (eop) { + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &wq->ctrl->posted_index); + } + wq->to_use = buf; + + wq->ring.desc_avail--; +} + +static inline void svnic_wq_service(struct vnic_wq *wq, + struct cq_desc *cq_desc, u16 completed_index, + void (*buf_service)(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), + void *opaque) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + while (1) { + + (*buf_service)(wq, cq_desc, buf, opaque); + + wq->ring.desc_avail++; + + wq->to_clean = buf->next; + + if (buf->index == completed_index) + break; + + buf = wq->to_clean; + } +} + +void svnic_wq_free(struct vnic_wq *wq); +int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size); +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int post_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); + +void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int svnic_wq_error_status(struct vnic_wq *wq); +void svnic_wq_enable(struct vnic_wq *wq); +int svnic_wq_disable(struct vnic_wq *wq); +void svnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); +#endif /* _VNIC_WQ_H_ */ diff --git a/drivers/scsi/snic/wq_enet_desc.h b/drivers/scsi/snic/wq_enet_desc.h new file mode 100644 index 000000000..e8025331b --- /dev/null +++ b/drivers/scsi/snic/wq_enet_desc.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */ + +#ifndef _WQ_ENET_DESC_H_ +#define _WQ_ENET_DESC_H_ + +/* Ethernet work queue descriptor: 16B */ +struct wq_enet_desc { + __le64 address; + __le16 length; + __le16 mss_loopback; + __le16 header_length_flags; + __le16 vlan_tag; +}; + +#define WQ_ENET_ADDR_BITS 64 +#define WQ_ENET_LEN_BITS 14 +#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) +#define WQ_ENET_MSS_BITS 14 +#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) +#define WQ_ENET_MSS_SHIFT 2 +#define WQ_ENET_LOOPBACK_SHIFT 1 +#define WQ_ENET_HDRLEN_BITS 10 +#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) +#define WQ_ENET_FLAGS_OM_BITS 2 +#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) +#define WQ_ENET_FLAGS_EOP_SHIFT 12 +#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 +#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 +#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 + +#define WQ_ENET_OFFLOAD_MODE_CSUM 0 +#define WQ_ENET_OFFLOAD_MODE_RESERVED 1 +#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 +#define WQ_ENET_OFFLOAD_MODE_TSO 3 + +static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, + u64 address, u16 length, u16 mss, u16 header_length, + u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, + u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) +{ + desc->address = cpu_to_le64(address); + desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); + desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << + WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); + desc->header_length_flags = cpu_to_le16( + (header_length & WQ_ENET_HDRLEN_MASK) | + (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | + (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | + (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | + (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | + (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); + desc->vlan_tag = cpu_to_le16(vlan_tag); +} + +static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, + u64 *address, u16 *length, u16 *mss, u16 *header_length, + u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, + u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; + *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & + WQ_ENET_MSS_MASK; + *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> + WQ_ENET_LOOPBACK_SHIFT) & 1); + *header_length = le16_to_cpu(desc->header_length_flags) & + WQ_ENET_HDRLEN_MASK; + *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); + *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_EOP_SHIFT) & 1); + *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); + *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); + *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); + *vlan_tag = le16_to_cpu(desc->vlan_tag); +} + +#endif /* _WQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c new file mode 100644 index 000000000..07ef3db3d --- /dev/null +++ b/drivers/scsi/sr.c @@ -0,0 +1,1008 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sr.c Copyright (C) 1992 David Giller + * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale + * + * adapted from: + * sd.c Copyright (C) 1992 Drew Eckhardt + * Linux scsi disk driver by + * Drew Eckhardt + * + * Modified by Eric Youngdale ericy@andante.org to + * add scatter-gather, multiple outstanding request, and other + * enhancements. + * + * Modified by Eric Youngdale eric@andante.org to support loadable + * low-level scsi drivers. + * + * Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to + * provide auto-eject. + * + * Modified by Gerd Knorr to support the + * generic cdrom interface + * + * Modified by Jens Axboe - Uniform sr_packet() + * interface, capabilities probe additions, ioctl cleanups, etc. + * + * Modified by Richard Gooch to support devfs + * + * Modified by Jens Axboe - support DVD-RAM + * transparently and lose the GHOST hack + * + * Modified by Arnaldo Carvalho de Melo + * check resource allocation in sr_init and some cleanups + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* For the door lock/unlock commands */ + +#include "scsi_logging.h" +#include "sr.h" + + +MODULE_DESCRIPTION("SCSI cdrom (sr) driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR); +MODULE_ALIAS_SCSI_DEVICE(TYPE_ROM); +MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM); + +#define SR_DISKS 256 + +#define SR_CAPABILITIES \ + (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \ + CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \ + CDC_PLAY_AUDIO|CDC_RESET|CDC_DRIVE_STATUS| \ + CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \ + CDC_MRW|CDC_MRW_W|CDC_RAM) + +static int sr_probe(struct device *); +static int sr_remove(struct device *); +static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt); +static int sr_done(struct scsi_cmnd *); +static int sr_runtime_suspend(struct device *dev); + +static const struct dev_pm_ops sr_pm_ops = { + .runtime_suspend = sr_runtime_suspend, +}; + +static struct scsi_driver sr_template = { + .gendrv = { + .name = "sr", + .owner = THIS_MODULE, + .probe = sr_probe, + .remove = sr_remove, + .pm = &sr_pm_ops, + }, + .init_command = sr_init_command, + .done = sr_done, +}; + +static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG]; +static DEFINE_SPINLOCK(sr_index_lock); + +static struct lock_class_key sr_bio_compl_lkclass; + +static int sr_open(struct cdrom_device_info *, int); +static void sr_release(struct cdrom_device_info *); + +static void get_sectorsize(struct scsi_cd *); +static int get_capabilities(struct scsi_cd *); + +static unsigned int sr_check_events(struct cdrom_device_info *cdi, + unsigned int clearing, int slot); +static int sr_packet(struct cdrom_device_info *, struct packet_command *); +static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf, + u32 lba, u32 nr, u8 *last_sense); + +static const struct cdrom_device_ops sr_dops = { + .open = sr_open, + .release = sr_release, + .drive_status = sr_drive_status, + .check_events = sr_check_events, + .tray_move = sr_tray_move, + .lock_door = sr_lock_door, + .select_speed = sr_select_speed, + .get_last_session = sr_get_last_session, + .get_mcn = sr_get_mcn, + .reset = sr_reset, + .audio_ioctl = sr_audio_ioctl, + .generic_packet = sr_packet, + .read_cdda_bpc = sr_read_cdda_bpc, + .capability = SR_CAPABILITIES, +}; + +static inline struct scsi_cd *scsi_cd(struct gendisk *disk) +{ + return disk->private_data; +} + +static int sr_runtime_suspend(struct device *dev) +{ + struct scsi_cd *cd = dev_get_drvdata(dev); + + if (!cd) /* E.g.: runtime suspend following sr_remove() */ + return 0; + + if (cd->media_present) + return -EBUSY; + else + return 0; +} + +static unsigned int sr_get_events(struct scsi_device *sdev) +{ + u8 buf[8]; + u8 cmd[] = { GET_EVENT_STATUS_NOTIFICATION, + 1, /* polled */ + 0, 0, /* reserved */ + 1 << 4, /* notification class: media */ + 0, 0, /* reserved */ + 0, sizeof(buf), /* allocation length */ + 0, /* control */ + }; + struct event_header *eh = (void *)buf; + struct media_event_desc *med = (void *)(buf + 4); + struct scsi_sense_hdr sshdr; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int result; + + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, sizeof(buf), + SR_TIMEOUT, MAX_RETRIES, &exec_args); + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION) + return DISK_EVENT_MEDIA_CHANGE; + + if (result || be16_to_cpu(eh->data_len) < sizeof(*med)) + return 0; + + if (eh->nea || eh->notification_class != 0x4) + return 0; + + if (med->media_event_code == 1) + return DISK_EVENT_EJECT_REQUEST; + else if (med->media_event_code == 2) + return DISK_EVENT_MEDIA_CHANGE; + else if (med->media_event_code == 3) + return DISK_EVENT_MEDIA_CHANGE; + return 0; +} + +/* + * This function checks to see if the media has been changed or eject + * button has been pressed. It is possible that we have already + * sensed a change, or the drive may have sensed one and not yet + * reported it. The past events are accumulated in sdev->changed and + * returned together with the current state. + */ +static unsigned int sr_check_events(struct cdrom_device_info *cdi, + unsigned int clearing, int slot) +{ + struct scsi_cd *cd = cdi->handle; + bool last_present; + struct scsi_sense_hdr sshdr; + unsigned int events; + int ret; + + /* no changer support */ + if (CDSL_CURRENT != slot) + return 0; + + events = sr_get_events(cd->device); + cd->get_event_changed |= events & DISK_EVENT_MEDIA_CHANGE; + + /* + * If earlier GET_EVENT_STATUS_NOTIFICATION and TUR did not agree + * for several times in a row. We rely on TUR only for this likely + * broken device, to prevent generating incorrect media changed + * events for every open(). + */ + if (cd->ignore_get_event) { + events &= ~DISK_EVENT_MEDIA_CHANGE; + goto do_tur; + } + + /* + * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE + * is being cleared. Note that there are devices which hang + * if asked to execute TUR repeatedly. + */ + if (cd->device->changed) { + events |= DISK_EVENT_MEDIA_CHANGE; + cd->device->changed = 0; + cd->tur_changed = true; + } + + if (!(clearing & DISK_EVENT_MEDIA_CHANGE)) + return events; +do_tur: + /* let's see whether the media is there with TUR */ + last_present = cd->media_present; + ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); + + /* + * Media is considered to be present if TUR succeeds or fails with + * sense data indicating something other than media-not-present + * (ASC 0x3a). + */ + cd->media_present = scsi_status_is_good(ret) || + (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a); + + if (last_present != cd->media_present) + cd->device->changed = 1; + + if (cd->device->changed) { + events |= DISK_EVENT_MEDIA_CHANGE; + cd->device->changed = 0; + cd->tur_changed = true; + } + + if (cd->ignore_get_event) + return events; + + /* check whether GET_EVENT is reporting spurious MEDIA_CHANGE */ + if (!cd->tur_changed) { + if (cd->get_event_changed) { + if (cd->tur_mismatch++ > 8) { + sr_printk(KERN_WARNING, cd, + "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n"); + cd->ignore_get_event = true; + } + } else { + cd->tur_mismatch = 0; + } + } + cd->tur_changed = false; + cd->get_event_changed = false; + + return events; +} + +/* + * sr_done is the interrupt routine for the device driver. + * + * It will be notified on the end of a SCSI read / write, and will take one + * of several actions based on success or failure. + */ +static int sr_done(struct scsi_cmnd *SCpnt) +{ + int result = SCpnt->result; + int this_count = scsi_bufflen(SCpnt); + int good_bytes = (result == 0 ? this_count : 0); + int block_sectors = 0; + long error_sector; + struct request *rq = scsi_cmd_to_rq(SCpnt); + struct scsi_cd *cd = scsi_cd(rq->q->disk); + +#ifdef DEBUG + scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result); +#endif + + /* + * Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial + * success. Since this is a relatively rare error condition, no + * care is taken to avoid unnecessary additional work such as + * memcpy's that could be avoided. + */ + if (scsi_status_is_check_condition(result) && + (SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */ + switch (SCpnt->sense_buffer[2]) { + case MEDIUM_ERROR: + case VOLUME_OVERFLOW: + case ILLEGAL_REQUEST: + if (!(SCpnt->sense_buffer[0] & 0x90)) + break; + error_sector = + get_unaligned_be32(&SCpnt->sense_buffer[3]); + if (rq->bio != NULL) + block_sectors = bio_sectors(rq->bio); + if (block_sectors < 4) + block_sectors = 4; + if (cd->device->sector_size == 2048) + error_sector <<= 2; + error_sector &= ~(block_sectors - 1); + good_bytes = (error_sector - blk_rq_pos(rq)) << 9; + if (good_bytes < 0 || good_bytes >= this_count) + good_bytes = 0; + /* + * The SCSI specification allows for the value + * returned by READ CAPACITY to be up to 75 2K + * sectors past the last readable block. + * Therefore, if we hit a medium error within the + * last 75 2K sectors, we decrease the saved size + * value. + */ + if (error_sector < get_capacity(cd->disk) && + cd->capacity - error_sector < 4 * 75) + set_capacity(cd->disk, error_sector); + break; + + case RECOVERED_ERROR: + good_bytes = this_count; + break; + + default: + break; + } + } + + return good_bytes; +} + +static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) +{ + int block = 0, this_count, s_size; + struct scsi_cd *cd; + struct request *rq = scsi_cmd_to_rq(SCpnt); + blk_status_t ret; + + ret = scsi_alloc_sgtables(SCpnt); + if (ret != BLK_STS_OK) + return ret; + cd = scsi_cd(rq->q->disk); + + SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, + "Doing sr request, block = %d\n", block)); + + if (!cd->device || !scsi_device_online(cd->device)) { + SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, + "Finishing %u sectors\n", blk_rq_sectors(rq))); + SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, + "Retry with 0x%p\n", SCpnt)); + goto out; + } + + if (cd->device->changed) { + /* + * quietly refuse to do anything to a changed disc until the + * changed bit has been reset + */ + goto out; + } + + s_size = cd->device->sector_size; + if (s_size != 512 && s_size != 1024 && s_size != 2048) { + scmd_printk(KERN_ERR, SCpnt, "bad sector size %d\n", s_size); + goto out; + } + + switch (req_op(rq)) { + case REQ_OP_WRITE: + if (!cd->writeable) + goto out; + SCpnt->cmnd[0] = WRITE_10; + cd->cdi.media_written = 1; + break; + case REQ_OP_READ: + SCpnt->cmnd[0] = READ_10; + break; + default: + blk_dump_rq_flags(rq, "Unknown sr command"); + goto out; + } + + { + struct scatterlist *sg; + int i, size = 0, sg_count = scsi_sg_count(SCpnt); + + scsi_for_each_sg(SCpnt, sg, sg_count, i) + size += sg->length; + + if (size != scsi_bufflen(SCpnt)) { + scmd_printk(KERN_ERR, SCpnt, + "mismatch count %d, bytes %d\n", + size, scsi_bufflen(SCpnt)); + if (scsi_bufflen(SCpnt) > size) + SCpnt->sdb.length = size; + } + } + + /* + * request doesn't start on hw block boundary, add scatter pads + */ + if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || + (scsi_bufflen(SCpnt) % s_size)) { + scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); + goto out; + } + + this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); + + + SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, + "%s %d/%u 512 byte blocks.\n", + (rq_data_dir(rq) == WRITE) ? + "writing" : "reading", + this_count, blk_rq_sectors(rq))); + + SCpnt->cmnd[1] = 0; + block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); + + if (this_count > 0xffff) { + this_count = 0xffff; + SCpnt->sdb.length = this_count * s_size; + } + + put_unaligned_be32(block, &SCpnt->cmnd[2]); + SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; + put_unaligned_be16(this_count, &SCpnt->cmnd[7]); + + /* + * We shouldn't disconnect in the middle of a sector, so with a dumb + * host adapter, it's safe to assume that we can at least transfer + * this many bytes between each connect / disconnect. + */ + SCpnt->transfersize = cd->device->sector_size; + SCpnt->underflow = this_count << 9; + SCpnt->allowed = MAX_RETRIES; + SCpnt->cmd_len = 10; + + /* + * This indicates that the command is ready from our end to be queued. + */ + return BLK_STS_OK; + out: + scsi_free_sgtables(SCpnt); + return BLK_STS_IOERR; +} + +static void sr_revalidate_disk(struct scsi_cd *cd) +{ + struct scsi_sense_hdr sshdr; + + /* if the unit is not ready, nothing more to do */ + if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) + return; + sr_cd_check(&cd->cdi); + get_sectorsize(cd); +} + +static int sr_block_open(struct gendisk *disk, blk_mode_t mode) +{ + struct scsi_cd *cd = scsi_cd(disk); + struct scsi_device *sdev = cd->device; + int ret; + + if (scsi_device_get(cd->device)) + return -ENXIO; + + scsi_autopm_get_device(sdev); + if (disk_check_media_change(disk)) + sr_revalidate_disk(cd); + + mutex_lock(&cd->lock); + ret = cdrom_open(&cd->cdi, mode); + mutex_unlock(&cd->lock); + + scsi_autopm_put_device(sdev); + if (ret) + scsi_device_put(cd->device); + return ret; +} + +static void sr_block_release(struct gendisk *disk) +{ + struct scsi_cd *cd = scsi_cd(disk); + + mutex_lock(&cd->lock); + cdrom_release(&cd->cdi); + mutex_unlock(&cd->lock); + + scsi_device_put(cd->device); +} + +static int sr_block_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned cmd, unsigned long arg) +{ + struct scsi_cd *cd = scsi_cd(bdev->bd_disk); + struct scsi_device *sdev = cd->device; + void __user *argp = (void __user *)arg; + int ret; + + if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) + return -ENOIOCTLCMD; + + mutex_lock(&cd->lock); + + ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, + (mode & BLK_OPEN_NDELAY)); + if (ret) + goto out; + + scsi_autopm_get_device(sdev); + + if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) { + ret = cdrom_ioctl(&cd->cdi, bdev, cmd, arg); + if (ret != -ENOSYS) + goto put; + } + ret = scsi_ioctl(sdev, mode & BLK_OPEN_WRITE, cmd, argp); + +put: + scsi_autopm_put_device(sdev); +out: + mutex_unlock(&cd->lock); + return ret; +} + +static unsigned int sr_block_check_events(struct gendisk *disk, + unsigned int clearing) +{ + struct scsi_cd *cd = disk->private_data; + + if (atomic_read(&cd->device->disk_events_disable_depth)) + return 0; + return cdrom_check_events(&cd->cdi, clearing); +} + +static void sr_free_disk(struct gendisk *disk) +{ + struct scsi_cd *cd = disk->private_data; + + spin_lock(&sr_index_lock); + clear_bit(MINOR(disk_devt(disk)), sr_index_bits); + spin_unlock(&sr_index_lock); + + unregister_cdrom(&cd->cdi); + mutex_destroy(&cd->lock); + kfree(cd); +} + +static const struct block_device_operations sr_bdops = +{ + .owner = THIS_MODULE, + .open = sr_block_open, + .release = sr_block_release, + .ioctl = sr_block_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, + .check_events = sr_block_check_events, + .free_disk = sr_free_disk, +}; + +static int sr_open(struct cdrom_device_info *cdi, int purpose) +{ + struct scsi_cd *cd = cdi->handle; + struct scsi_device *sdev = cd->device; + + /* + * If the device is in error recovery, wait until it is done. + * If the device is offline, then disallow any access to it. + */ + if (!scsi_block_when_processing_errors(sdev)) + return -ENXIO; + + return 0; +} + +static void sr_release(struct cdrom_device_info *cdi) +{ +} + +static int sr_probe(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct gendisk *disk; + struct scsi_cd *cd; + int minor, error; + + scsi_autopm_get_device(sdev); + error = -ENODEV; + if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM) + goto fail; + + error = -ENOMEM; + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) + goto fail; + + disk = blk_mq_alloc_disk_for_queue(sdev->request_queue, + &sr_bio_compl_lkclass); + if (!disk) + goto fail_free; + mutex_init(&cd->lock); + + spin_lock(&sr_index_lock); + minor = find_first_zero_bit(sr_index_bits, SR_DISKS); + if (minor == SR_DISKS) { + spin_unlock(&sr_index_lock); + error = -EBUSY; + goto fail_put; + } + __set_bit(minor, sr_index_bits); + spin_unlock(&sr_index_lock); + + disk->major = SCSI_CDROM_MAJOR; + disk->first_minor = minor; + disk->minors = 1; + sprintf(disk->disk_name, "sr%d", minor); + disk->fops = &sr_bdops; + disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; + disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; + disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT | + DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE; + + blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); + + cd->device = sdev; + cd->disk = disk; + cd->capacity = 0x1fffff; + cd->device->changed = 1; /* force recheck CD type */ + cd->media_present = 1; + cd->use = 1; + cd->readcd_known = 0; + cd->readcd_cdda = 0; + + cd->cdi.ops = &sr_dops; + cd->cdi.handle = cd; + cd->cdi.mask = 0; + cd->cdi.capacity = 1; + sprintf(cd->cdi.name, "sr%d", minor); + + sdev->sector_size = 2048; /* A guess, just in case */ + + error = -ENOMEM; + if (get_capabilities(cd)) + goto fail_minor; + sr_vendor_init(cd); + + set_capacity(disk, cd->capacity); + disk->private_data = cd; + + if (register_cdrom(disk, &cd->cdi)) + goto fail_minor; + + /* + * Initialize block layer runtime PM stuffs before the + * periodic event checking request gets started in add_disk. + */ + blk_pm_runtime_init(sdev->request_queue, dev); + + dev_set_drvdata(dev, cd); + sr_revalidate_disk(cd); + + error = device_add_disk(&sdev->sdev_gendev, disk, NULL); + if (error) + goto unregister_cdrom; + + sdev_printk(KERN_DEBUG, sdev, + "Attached scsi CD-ROM %s\n", cd->cdi.name); + scsi_autopm_put_device(cd->device); + + return 0; + +unregister_cdrom: + unregister_cdrom(&cd->cdi); +fail_minor: + spin_lock(&sr_index_lock); + clear_bit(minor, sr_index_bits); + spin_unlock(&sr_index_lock); +fail_put: + put_disk(disk); + mutex_destroy(&cd->lock); +fail_free: + kfree(cd); +fail: + scsi_autopm_put_device(sdev); + return error; +} + + +static void get_sectorsize(struct scsi_cd *cd) +{ + unsigned char cmd[10]; + unsigned char buffer[8]; + int the_result, retries = 3; + int sector_size; + struct request_queue *queue; + + do { + cmd[0] = READ_CAPACITY; + memset((void *) &cmd[1], 0, 9); + memset(buffer, 0, sizeof(buffer)); + + /* Do the command and wait.. */ + the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, + buffer, sizeof(buffer), + SR_TIMEOUT, MAX_RETRIES, NULL); + + retries--; + + } while (the_result && retries); + + + if (the_result) { + cd->capacity = 0x1fffff; + sector_size = 2048; /* A guess, just in case */ + } else { + long last_written; + + cd->capacity = 1 + get_unaligned_be32(&buffer[0]); + /* + * READ_CAPACITY doesn't return the correct size on + * certain UDF media. If last_written is larger, use + * it instead. + * + * http://bugzilla.kernel.org/show_bug.cgi?id=9668 + */ + if (!cdrom_get_last_written(&cd->cdi, &last_written)) + cd->capacity = max_t(long, cd->capacity, last_written); + + sector_size = get_unaligned_be32(&buffer[4]); + switch (sector_size) { + /* + * HP 4020i CD-Recorder reports 2340 byte sectors + * Philips CD-Writers report 2352 byte sectors + * + * Use 2k sectors for them.. + */ + case 0: + case 2340: + case 2352: + sector_size = 2048; + fallthrough; + case 2048: + cd->capacity *= 4; + fallthrough; + case 512: + break; + default: + sr_printk(KERN_INFO, cd, + "unsupported sector size %d.", sector_size); + cd->capacity = 0; + } + + cd->device->sector_size = sector_size; + + /* + * Add this so that we have the ability to correctly gauge + * what the device is capable of. + */ + set_capacity(cd->disk, cd->capacity); + } + + queue = cd->device->request_queue; + blk_queue_logical_block_size(queue, sector_size); + + return; +} + +static int get_capabilities(struct scsi_cd *cd) +{ + unsigned char *buffer; + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; + unsigned int ms_len = 128; + int rc, n; + + static const char *loadmech[] = + { + "caddy", + "tray", + "pop-up", + "", + "changer", + "cartridge changer", + "", + "" + }; + + + /* allocate transfer buffer */ + buffer = kmalloc(512, GFP_KERNEL); + if (!buffer) { + sr_printk(KERN_ERR, cd, "out of memory.\n"); + return -ENOMEM; + } + + /* eat unit attentions */ + scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); + + /* ask for mode page 0x2a */ + rc = scsi_mode_sense(cd->device, 0, 0x2a, 0, buffer, ms_len, + SR_TIMEOUT, 3, &data, NULL); + + if (rc < 0 || data.length > ms_len || + data.header_length + data.block_descriptor_length > data.length) { + /* failed, drive doesn't have capabilities mode page */ + cd->cdi.speed = 1; + cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | + CDC_DVD | CDC_DVD_RAM | + CDC_SELECT_DISC | CDC_SELECT_SPEED | + CDC_MRW | CDC_MRW_W | CDC_RAM); + kfree(buffer); + sr_printk(KERN_INFO, cd, "scsi-1 drive"); + return 0; + } + + n = data.header_length + data.block_descriptor_length; + cd->cdi.speed = get_unaligned_be16(&buffer[n + 8]) / 176; + cd->readcd_known = 1; + cd->readcd_cdda = buffer[n + 5] & 0x01; + /* print some capability bits */ + sr_printk(KERN_INFO, cd, + "scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n", + get_unaligned_be16(&buffer[n + 14]) / 176, + cd->cdi.speed, + buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */ + buffer[n + 3] & 0x20 ? "dvd-ram " : "", + buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */ + buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */ + buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */ + loadmech[buffer[n + 6] >> 5]); + if ((buffer[n + 6] >> 5) == 0) + /* caddy drives can't close tray... */ + cd->cdi.mask |= CDC_CLOSE_TRAY; + if ((buffer[n + 2] & 0x8) == 0) + /* not a DVD drive */ + cd->cdi.mask |= CDC_DVD; + if ((buffer[n + 3] & 0x20) == 0) + /* can't write DVD-RAM media */ + cd->cdi.mask |= CDC_DVD_RAM; + if ((buffer[n + 3] & 0x10) == 0) + /* can't write DVD-R media */ + cd->cdi.mask |= CDC_DVD_R; + if ((buffer[n + 3] & 0x2) == 0) + /* can't write CD-RW media */ + cd->cdi.mask |= CDC_CD_RW; + if ((buffer[n + 3] & 0x1) == 0) + /* can't write CD-R media */ + cd->cdi.mask |= CDC_CD_R; + if ((buffer[n + 6] & 0x8) == 0) + /* can't eject */ + cd->cdi.mask |= CDC_OPEN_TRAY; + + if ((buffer[n + 6] >> 5) == mechtype_individual_changer || + (buffer[n + 6] >> 5) == mechtype_cartridge_changer) + cd->cdi.capacity = + cdrom_number_of_slots(&cd->cdi); + if (cd->cdi.capacity <= 1) + /* not a changer */ + cd->cdi.mask |= CDC_SELECT_DISC; + /*else I don't think it can close its tray + cd->cdi.mask |= CDC_CLOSE_TRAY; */ + + /* + * if DVD-RAM, MRW-W or CD-RW, we are randomly writable + */ + if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) != + (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) { + cd->writeable = 1; + } + + kfree(buffer); + return 0; +} + +/* + * sr_packet() is the entry point for the generic commands generated + * by the Uniform CD-ROM layer. + */ +static int sr_packet(struct cdrom_device_info *cdi, + struct packet_command *cgc) +{ + struct scsi_cd *cd = cdi->handle; + struct scsi_device *sdev = cd->device; + + if (cgc->cmd[0] == GPCMD_READ_DISC_INFO && sdev->no_read_disc_info) + return -EDRIVE_CANT_DO_THIS; + + if (cgc->timeout <= 0) + cgc->timeout = IOCTL_TIMEOUT; + + sr_do_ioctl(cd, cgc); + + return cgc->stat; +} + +static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf, + u32 lba, u32 nr, u8 *last_sense) +{ + struct gendisk *disk = cdi->disk; + u32 len = nr * CD_FRAMESIZE_RAW; + struct scsi_cmnd *scmd; + struct request *rq; + struct bio *bio; + int ret; + + rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + scmd = blk_mq_rq_to_pdu(rq); + + ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL); + if (ret) + goto out_put_request; + + scmd->cmnd[0] = GPCMD_READ_CD; + scmd->cmnd[1] = 1 << 2; + scmd->cmnd[2] = (lba >> 24) & 0xff; + scmd->cmnd[3] = (lba >> 16) & 0xff; + scmd->cmnd[4] = (lba >> 8) & 0xff; + scmd->cmnd[5] = lba & 0xff; + scmd->cmnd[6] = (nr >> 16) & 0xff; + scmd->cmnd[7] = (nr >> 8) & 0xff; + scmd->cmnd[8] = nr & 0xff; + scmd->cmnd[9] = 0xf8; + scmd->cmd_len = 12; + rq->timeout = 60 * HZ; + bio = rq->bio; + + blk_execute_rq(rq, false); + if (scmd->result) { + struct scsi_sense_hdr sshdr; + + scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len, + &sshdr); + *last_sense = sshdr.sense_key; + ret = -EIO; + } + + if (blk_rq_unmap_user(bio)) + ret = -EFAULT; +out_put_request: + blk_mq_free_request(rq); + return ret; +} + +static int sr_remove(struct device *dev) +{ + struct scsi_cd *cd = dev_get_drvdata(dev); + + scsi_autopm_get_device(cd->device); + + del_gendisk(cd->disk); + put_disk(cd->disk); + + return 0; +} + +static int __init init_sr(void) +{ + int rc; + + rc = register_blkdev(SCSI_CDROM_MAJOR, "sr"); + if (rc) + return rc; + rc = scsi_register_driver(&sr_template.gendrv); + if (rc) + unregister_blkdev(SCSI_CDROM_MAJOR, "sr"); + + return rc; +} + +static void __exit exit_sr(void) +{ + scsi_unregister_driver(&sr_template.gendrv); + unregister_blkdev(SCSI_CDROM_MAJOR, "sr"); +} + +module_init(init_sr); +module_exit(exit_sr); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h new file mode 100644 index 000000000..1175f2e21 --- /dev/null +++ b/drivers/scsi/sr.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sr.h by David Giller + * CD-ROM disk driver header file + * + * adapted from: + * sd.h Copyright (C) 1992 Drew Eckhardt + * SCSI disk driver header file by + * Drew Eckhardt + * + * + * + * Modified by Eric Youngdale eric@andante.org to + * add scatter-gather, multiple outstanding request, and other + * enhancements. + */ + +#ifndef _SR_H +#define _SR_H + +#include + +#define MAX_RETRIES 3 +#define SR_TIMEOUT (30 * HZ) + +struct scsi_device; + +/* The CDROM is fairly slow, so we need a little extra time */ +/* In fact, it is very slow if it has to spin up first */ +#define IOCTL_TIMEOUT 30*HZ + + +typedef struct scsi_cd { + unsigned capacity; /* size in blocks */ + struct scsi_device *device; + unsigned int vendor; /* vendor code, see sr_vendor.c */ + unsigned long ms_offset; /* for reading multisession-CD's */ + unsigned writeable : 1; + unsigned use:1; /* is this device still supportable */ + unsigned xa_flag:1; /* CD has XA sectors ? */ + unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ + unsigned readcd_cdda:1; /* reading audio data using READ_CD */ + unsigned media_present:1; /* media is present */ + + /* GET_EVENT spurious event handling, blk layer guarantees exclusion */ + int tur_mismatch; /* nr of get_event TUR mismatches */ + bool tur_changed:1; /* changed according to TUR */ + bool get_event_changed:1; /* changed according to GET_EVENT */ + bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */ + + struct cdrom_device_info cdi; + struct mutex lock; + struct gendisk *disk; +} Scsi_CD; + +#define sr_printk(prefix, cd, fmt, a...) \ + sdev_prefix_printk(prefix, (cd)->device, (cd)->cdi.name, fmt, ##a) + +int sr_do_ioctl(Scsi_CD *, struct packet_command *); + +int sr_lock_door(struct cdrom_device_info *, int); +int sr_tray_move(struct cdrom_device_info *, int); +int sr_drive_status(struct cdrom_device_info *, int); +int sr_disk_status(struct cdrom_device_info *); +int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *); +int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *); +int sr_reset(struct cdrom_device_info *); +int sr_select_speed(struct cdrom_device_info *cdi, int speed); +int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); + +int sr_is_xa(Scsi_CD *); + +/* sr_vendor.c */ +void sr_vendor_init(Scsi_CD *); +int sr_cd_check(struct cdrom_device_info *); +int sr_set_blocklength(Scsi_CD *, int blocklength); + +#endif diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c new file mode 100644 index 000000000..5b0b35e60 --- /dev/null +++ b/drivers/scsi/sr_ioctl.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "sr.h" + +#if 0 +#define DEBUG +#endif + +/* The sr_is_xa() seems to trigger firmware bugs with some drives :-( + * It is off by default and can be turned on with this module parameter */ +static int xa_test = 0; + +module_param(xa_test, int, S_IRUGO | S_IWUSR); + +static int sr_read_tochdr(struct cdrom_device_info *cdi, + struct cdrom_tochdr *tochdr) +{ + struct scsi_cd *cd = cdi->handle; + struct packet_command cgc; + int result; + unsigned char *buffer; + + buffer = kzalloc(32, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.timeout = IOCTL_TIMEOUT; + cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + cgc.cmd[8] = 12; /* LSB of length */ + cgc.buffer = buffer; + cgc.buflen = 12; + cgc.quiet = 1; + cgc.data_direction = DMA_FROM_DEVICE; + + result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; + + tochdr->cdth_trk0 = buffer[2]; + tochdr->cdth_trk1 = buffer[3]; + +err: + kfree(buffer); + return result; +} + +static int sr_read_tocentry(struct cdrom_device_info *cdi, + struct cdrom_tocentry *tocentry) +{ + struct scsi_cd *cd = cdi->handle; + struct packet_command cgc; + int result; + unsigned char *buffer; + + buffer = kzalloc(32, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.timeout = IOCTL_TIMEOUT; + cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + cgc.cmd[1] |= (tocentry->cdte_format == CDROM_MSF) ? 0x02 : 0; + cgc.cmd[6] = tocentry->cdte_track; + cgc.cmd[8] = 12; /* LSB of length */ + cgc.buffer = buffer; + cgc.buflen = 12; + cgc.data_direction = DMA_FROM_DEVICE; + + result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; + + tocentry->cdte_ctrl = buffer[5] & 0xf; + tocentry->cdte_adr = buffer[5] >> 4; + tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0; + if (tocentry->cdte_format == CDROM_MSF) { + tocentry->cdte_addr.msf.minute = buffer[9]; + tocentry->cdte_addr.msf.second = buffer[10]; + tocentry->cdte_addr.msf.frame = buffer[11]; + } else + tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) + + buffer[10]) << 8) + buffer[11]; + +err: + kfree(buffer); + return result; +} + +#define IOCTL_RETRIES 3 + +/* ATAPI drives don't have a SCMD_PLAYAUDIO_TI command. When these drives + are emulating a SCSI device via the idescsi module, they need to have + CDROMPLAYTRKIND commands translated into CDROMPLAYMSF commands for them */ + +static int sr_fake_playtrkind(struct cdrom_device_info *cdi, struct cdrom_ti *ti) +{ + struct cdrom_tocentry trk0_te, trk1_te; + struct cdrom_tochdr tochdr; + struct packet_command cgc; + int ntracks, ret; + + ret = sr_read_tochdr(cdi, &tochdr); + if (ret) + return ret; + + ntracks = tochdr.cdth_trk1 - tochdr.cdth_trk0 + 1; + + if (ti->cdti_trk1 == ntracks) + ti->cdti_trk1 = CDROM_LEADOUT; + else if (ti->cdti_trk1 != CDROM_LEADOUT) + ti->cdti_trk1 ++; + + trk0_te.cdte_track = ti->cdti_trk0; + trk0_te.cdte_format = CDROM_MSF; + trk1_te.cdte_track = ti->cdti_trk1; + trk1_te.cdte_format = CDROM_MSF; + + ret = sr_read_tocentry(cdi, &trk0_te); + if (ret) + return ret; + ret = sr_read_tocentry(cdi, &trk1_te); + if (ret) + return ret; + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.cmd[0] = GPCMD_PLAY_AUDIO_MSF; + cgc.cmd[3] = trk0_te.cdte_addr.msf.minute; + cgc.cmd[4] = trk0_te.cdte_addr.msf.second; + cgc.cmd[5] = trk0_te.cdte_addr.msf.frame; + cgc.cmd[6] = trk1_te.cdte_addr.msf.minute; + cgc.cmd[7] = trk1_te.cdte_addr.msf.second; + cgc.cmd[8] = trk1_te.cdte_addr.msf.frame; + cgc.data_direction = DMA_NONE; + cgc.timeout = IOCTL_TIMEOUT; + return sr_do_ioctl(cdi->handle, &cgc); +} + +static int sr_play_trkind(struct cdrom_device_info *cdi, + struct cdrom_ti *ti) + +{ + struct scsi_cd *cd = cdi->handle; + struct packet_command cgc; + int result; + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.timeout = IOCTL_TIMEOUT; + cgc.cmd[0] = GPCMD_PLAYAUDIO_TI; + cgc.cmd[4] = ti->cdti_trk0; + cgc.cmd[5] = ti->cdti_ind0; + cgc.cmd[7] = ti->cdti_trk1; + cgc.cmd[8] = ti->cdti_ind1; + cgc.data_direction = DMA_NONE; + + result = sr_do_ioctl(cd, &cgc); + if (result == -EDRIVE_CANT_DO_THIS) + result = sr_fake_playtrkind(cdi, ti); + + return result; +} + +/* We do our own retries because we want to know what the specific + error code is. Normally the UNIT_ATTENTION code will automatically + clear after one error */ + +int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) +{ + struct scsi_device *SDev; + struct scsi_sense_hdr local_sshdr, *sshdr; + int result, err = 0, retries = 0; + const struct scsi_exec_args exec_args = { + .sshdr = cgc->sshdr ? : &local_sshdr, + }; + + SDev = cd->device; + + sshdr = exec_args.sshdr; + + retry: + if (!scsi_block_when_processing_errors(SDev)) { + err = -ENODEV; + goto out; + } + + result = scsi_execute_cmd(SDev, cgc->cmd, + cgc->data_direction == DMA_TO_DEVICE ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, cgc->buffer, + cgc->buflen, cgc->timeout, IOCTL_RETRIES, + &exec_args); + /* Minimal error checking. Ignore cases we know about, and report the rest. */ + if (result < 0) { + err = result; + goto out; + } + if (scsi_status_is_check_condition(result)) { + switch (sshdr->sense_key) { + case UNIT_ATTENTION: + SDev->changed = 1; + if (!cgc->quiet) + sr_printk(KERN_INFO, cd, + "disc change detected.\n"); + if (retries++ < 10) + goto retry; + err = -ENOMEDIUM; + break; + case NOT_READY: /* This happens if there is no disc in drive */ + if (sshdr->asc == 0x04 && + sshdr->ascq == 0x01) { + /* sense: Logical unit is in process of becoming ready */ + if (!cgc->quiet) + sr_printk(KERN_INFO, cd, + "CDROM not ready yet.\n"); + if (retries++ < 10) { + /* sleep 2 sec and try again */ + ssleep(2); + goto retry; + } else { + /* 20 secs are enough? */ + err = -ENOMEDIUM; + break; + } + } + if (!cgc->quiet) + sr_printk(KERN_INFO, cd, + "CDROM not ready. Make sure there " + "is a disc in the drive.\n"); + err = -ENOMEDIUM; + break; + case ILLEGAL_REQUEST: + err = -EIO; + if (sshdr->asc == 0x20 && + sshdr->ascq == 0x00) + /* sense: Invalid command operation code */ + err = -EDRIVE_CANT_DO_THIS; + break; + default: + err = -EIO; + } + } + + /* Wake up a process waiting for device */ + out: + cgc->stat = err; + return err; +} + +/* ---------------------------------------------------------------------- */ +/* interface to cdrom.c */ + +int sr_tray_move(struct cdrom_device_info *cdi, int pos) +{ + Scsi_CD *cd = cdi->handle; + struct packet_command cgc; + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.cmd[0] = GPCMD_START_STOP_UNIT; + cgc.cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ; + cgc.data_direction = DMA_NONE; + cgc.timeout = IOCTL_TIMEOUT; + return sr_do_ioctl(cd, &cgc); +} + +int sr_lock_door(struct cdrom_device_info *cdi, int lock) +{ + Scsi_CD *cd = cdi->handle; + + return scsi_set_medium_removal(cd->device, lock ? + SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW); +} + +int sr_drive_status(struct cdrom_device_info *cdi, int slot) +{ + struct scsi_cd *cd = cdi->handle; + struct scsi_sense_hdr sshdr; + struct media_event_desc med; + + if (CDSL_CURRENT != slot) { + /* we have no changer support */ + return -EINVAL; + } + if (!scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) + return CDS_DISC_OK; + + /* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */ + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY + && sshdr.asc == 0x04 && sshdr.ascq == 0x01) + return CDS_DRIVE_NOT_READY; + + if (!cdrom_get_media_event(cdi, &med)) { + if (med.media_present) + return CDS_DISC_OK; + else if (med.door_open) + return CDS_TRAY_OPEN; + else + return CDS_NO_DISC; + } + + /* + * SK/ASC/ASCQ of 2/4/2 means "initialization required" + * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close + * the tray, which resolves the initialization requirement. + */ + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY + && sshdr.asc == 0x04 && sshdr.ascq == 0x02) + return CDS_TRAY_OPEN; + + /* + * 0x04 is format in progress .. but there must be a disc present! + */ + if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04) + return CDS_DISC_OK; + + /* + * If not using Mt Fuji extended media tray reports, + * just return TRAY_OPEN since ATAPI doesn't provide + * any other way to detect this... + */ + if (scsi_sense_valid(&sshdr) && + /* 0x3a is medium not present */ + sshdr.asc == 0x3a) + return CDS_NO_DISC; + else + return CDS_TRAY_OPEN; + + return CDS_DRIVE_NOT_READY; +} + +int sr_disk_status(struct cdrom_device_info *cdi) +{ + Scsi_CD *cd = cdi->handle; + struct cdrom_tochdr toc_h; + struct cdrom_tocentry toc_e; + int i, rc, have_datatracks = 0; + + /* look for data tracks */ + rc = sr_read_tochdr(cdi, &toc_h); + if (rc) + return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO; + + for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) { + toc_e.cdte_track = i; + toc_e.cdte_format = CDROM_LBA; + if (sr_read_tocentry(cdi, &toc_e)) + return CDS_NO_INFO; + if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) { + have_datatracks = 1; + break; + } + } + if (!have_datatracks) + return CDS_AUDIO; + + if (cd->xa_flag) + return CDS_XA_2_1; + else + return CDS_DATA_1; +} + +int sr_get_last_session(struct cdrom_device_info *cdi, + struct cdrom_multisession *ms_info) +{ + Scsi_CD *cd = cdi->handle; + + ms_info->addr.lba = cd->ms_offset; + ms_info->xa_flag = cd->xa_flag || cd->ms_offset > 0; + + return 0; +} + +int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) +{ + Scsi_CD *cd = cdi->handle; + struct packet_command cgc; + char *buffer = kzalloc(32, GFP_KERNEL); + int result; + + if (!buffer) + return -ENOMEM; + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.cmd[0] = GPCMD_READ_SUBCHANNEL; + cgc.cmd[2] = 0x40; /* I do want the subchannel info */ + cgc.cmd[3] = 0x02; /* Give me medium catalog number info */ + cgc.cmd[8] = 24; + cgc.buffer = buffer; + cgc.buflen = 24; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = IOCTL_TIMEOUT; + result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; + + memcpy(mcn->medium_catalog_number, buffer + 9, 13); + mcn->medium_catalog_number[13] = 0; + +err: + kfree(buffer); + return result; +} + +int sr_reset(struct cdrom_device_info *cdi) +{ + return 0; +} + +int sr_select_speed(struct cdrom_device_info *cdi, int speed) +{ + Scsi_CD *cd = cdi->handle; + struct packet_command cgc; + + if (speed == 0) + speed = 0xffff; /* set to max */ + else + speed *= 177; /* Nx to kbyte/s */ + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */ + cgc.cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */ + cgc.cmd[3] = speed & 0xff; /* LSB */ + cgc.data_direction = DMA_NONE; + cgc.timeout = IOCTL_TIMEOUT; + + if (sr_do_ioctl(cd, &cgc)) + return -EIO; + return 0; +} + +/* ----------------------------------------------------------------------- */ +/* this is called by the generic cdrom driver. arg is a _kernel_ pointer, */ +/* because the generic cdrom driver does the user access stuff for us. */ +/* only cdromreadtochdr and cdromreadtocentry are left - for use with the */ +/* sr_disk_status interface for the generic cdrom driver. */ + +int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) +{ + switch (cmd) { + case CDROMREADTOCHDR: + return sr_read_tochdr(cdi, arg); + case CDROMREADTOCENTRY: + return sr_read_tocentry(cdi, arg); + case CDROMPLAYTRKIND: + return sr_play_trkind(cdi, arg); + default: + return -EINVAL; + } +} + +/* ----------------------------------------------------------------------- + * a function to read all sorts of funny cdrom sectors using the READ_CD + * scsi-3 mmc command + * + * lba: linear block address + * format: 0 = data (anything) + * 1 = audio + * 2 = data (mode 1) + * 3 = data (mode 2) + * 4 = data (mode 2 form1) + * 5 = data (mode 2 form2) + * blksize: 2048 | 2336 | 2340 | 2352 + */ + +static int sr_read_cd(Scsi_CD *cd, unsigned char *dest, int lba, int format, int blksize) +{ + struct packet_command cgc; + +#ifdef DEBUG + sr_printk(KERN_INFO, cd, "sr_read_cd lba=%d format=%d blksize=%d\n", + lba, format, blksize); +#endif + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.cmd[0] = GPCMD_READ_CD; /* READ_CD */ + cgc.cmd[1] = ((format & 7) << 2); + cgc.cmd[2] = (unsigned char) (lba >> 24) & 0xff; + cgc.cmd[3] = (unsigned char) (lba >> 16) & 0xff; + cgc.cmd[4] = (unsigned char) (lba >> 8) & 0xff; + cgc.cmd[5] = (unsigned char) lba & 0xff; + cgc.cmd[8] = 1; + switch (blksize) { + case 2336: + cgc.cmd[9] = 0x58; + break; + case 2340: + cgc.cmd[9] = 0x78; + break; + case 2352: + cgc.cmd[9] = 0xf8; + break; + default: + cgc.cmd[9] = 0x10; + break; + } + cgc.buffer = dest; + cgc.buflen = blksize; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = IOCTL_TIMEOUT; + return sr_do_ioctl(cd, &cgc); +} + +/* + * read sectors with blocksizes other than 2048 + */ + +static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest) +{ + struct packet_command cgc; + int rc; + + /* we try the READ CD command first... */ + if (cd->readcd_known) { + rc = sr_read_cd(cd, dest, lba, 0, blksize); + if (-EDRIVE_CANT_DO_THIS != rc) + return rc; + cd->readcd_known = 0; + sr_printk(KERN_INFO, cd, + "CDROM doesn't support READ CD (0xbe) command\n"); + /* fall & retry the other way */ + } + /* ... if this fails, we switch the blocksize using MODE SELECT */ + if (blksize != cd->device->sector_size) { + if (0 != (rc = sr_set_blocklength(cd, blksize))) + return rc; + } +#ifdef DEBUG + sr_printk(KERN_INFO, cd, "sr_read_sector lba=%d blksize=%d\n", + lba, blksize); +#endif + + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.cmd[0] = GPCMD_READ_10; + cgc.cmd[2] = (unsigned char) (lba >> 24) & 0xff; + cgc.cmd[3] = (unsigned char) (lba >> 16) & 0xff; + cgc.cmd[4] = (unsigned char) (lba >> 8) & 0xff; + cgc.cmd[5] = (unsigned char) lba & 0xff; + cgc.cmd[8] = 1; + cgc.buffer = dest; + cgc.buflen = blksize; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = IOCTL_TIMEOUT; + rc = sr_do_ioctl(cd, &cgc); + + if (blksize != CD_FRAMESIZE) + rc |= sr_set_blocklength(cd, CD_FRAMESIZE); + return rc; +} + +/* + * read a sector in raw mode to check the sector format + * ret: 1 == mode2 (XA), 0 == mode1, <0 == error + */ + +int sr_is_xa(Scsi_CD *cd) +{ + unsigned char *raw_sector; + int is_xa; + + if (!xa_test) + return 0; + + raw_sector = kmalloc(2048, GFP_KERNEL); + if (!raw_sector) + return -ENOMEM; + if (0 == sr_read_sector(cd, cd->ms_offset + 16, + CD_FRAMESIZE_RAW1, raw_sector)) { + is_xa = (raw_sector[3] == 0x02) ? 1 : 0; + } else { + /* read a raw sector failed for some reason. */ + is_xa = -1; + } + kfree(raw_sector); +#ifdef DEBUG + sr_printk(KERN_INFO, cd, "sr_is_xa: %d\n", is_xa); +#endif + return is_xa; +} diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c new file mode 100644 index 000000000..a61635326 --- /dev/null +++ b/drivers/scsi/sr_vendor.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: GPL-2.0 +/* -*-linux-c-*- + + * vendor-specific code for SCSI CD-ROM's goes here. + * + * This is needed becauce most of the new features (multisession and + * the like) are too new to be included into the SCSI-II standard (to + * be exact: there is'nt anything in my draft copy). + * + * Aug 1997: Ha! Got a SCSI-3 cdrom spec across my fingers. SCSI-3 does + * multisession using the READ TOC command (like SONY). + * + * Rearranged stuff here: SCSI-3 is included allways, support + * for NEC/TOSHIBA/HP commands is optional. + * + * Gerd Knorr + * + * -------------------------------------------------------------------------- + * + * support for XA/multisession-CD's + * + * - NEC: Detection and support of multisession CD's. + * + * - TOSHIBA: Detection and support of multisession CD's. + * Some XA-Sector tweaking, required for older drives. + * + * - SONY: Detection and support of multisession CD's. + * added by Thomas Quinot + * + * - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to + * work with SONY (SCSI3 now) code. + * + * - HP: Much like SONY, but a little different... (Thomas) + * HP-Writers only ??? Maybe other CD-Writers work with this too ? + * HP 6020 writers now supported. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "sr.h" + +#if 0 +#define DEBUG +#endif + +/* here are some constants to sort the vendors into groups */ + +#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */ + +#define VENDOR_NEC 2 +#define VENDOR_TOSHIBA 3 +#define VENDOR_WRITER 4 /* pre-scsi3 writers */ +#define VENDOR_CYGNAL_85ED 5 /* CD-on-a-chip */ + +#define VENDOR_TIMEOUT 30*HZ + +void sr_vendor_init(Scsi_CD *cd) +{ + const char *vendor = cd->device->vendor; + const char *model = cd->device->model; + + /* default */ + cd->vendor = VENDOR_SCSI3; + if (cd->readcd_known) + /* this is true for scsi3/mmc drives - no more checks */ + return; + + if (cd->device->type == TYPE_WORM) { + cd->vendor = VENDOR_WRITER; + + } else if (!strncmp(vendor, "NEC", 3)) { + cd->vendor = VENDOR_NEC; + if (!strncmp(model, "CD-ROM DRIVE:25", 15) || + !strncmp(model, "CD-ROM DRIVE:36", 15) || + !strncmp(model, "CD-ROM DRIVE:83", 15) || + !strncmp(model, "CD-ROM DRIVE:84 ", 16) +#if 0 + /* my NEC 3x returns the read-raw data if a read-raw + is followed by a read for the same sector - aeb */ + || !strncmp(model, "CD-ROM DRIVE:500", 16) +#endif + ) + /* these can't handle multisession, may hang */ + cd->cdi.mask |= CDC_MULTI_SESSION; + + } else if (!strncmp(vendor, "TOSHIBA", 7)) { + cd->vendor = VENDOR_TOSHIBA; + + } else if (!strncmp(vendor, "Beurer", 6) && + !strncmp(model, "Gluco Memory", 12)) { + /* The Beurer GL50 evo uses a Cygnal-manufactured CD-on-a-chip + that only accepts a subset of SCSI commands. Most of the + not-implemented commands are fine to fail, but a few, + particularly around the MMC or Audio commands, will put the + device into an unrecoverable state, so they need to be + avoided at all costs. + */ + cd->vendor = VENDOR_CYGNAL_85ED; + cd->cdi.mask |= ( + CDC_MULTI_SESSION | + CDC_CLOSE_TRAY | CDC_OPEN_TRAY | + CDC_LOCK | + CDC_GENERIC_PACKET | + CDC_PLAY_AUDIO + ); + } +} + + +/* small handy function for switching block length using MODE SELECT, + * used by sr_read_sector() */ + +int sr_set_blocklength(Scsi_CD *cd, int blocklength) +{ + unsigned char *buffer; /* the buffer for the ioctl */ + struct packet_command cgc; + struct ccs_modesel_head *modesel; + int rc, density = 0; + + if (cd->vendor == VENDOR_TOSHIBA) + density = (blocklength > 2048) ? 0x81 : 0x83; + + buffer = kmalloc(512, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + +#ifdef DEBUG + sr_printk(KERN_INFO, cd, "MODE SELECT 0x%x/%d\n", density, blocklength); +#endif + memset(&cgc, 0, sizeof(struct packet_command)); + cgc.cmd[0] = MODE_SELECT; + cgc.cmd[1] = (1 << 4); + cgc.cmd[4] = 12; + modesel = (struct ccs_modesel_head *) buffer; + memset(modesel, 0, sizeof(*modesel)); + modesel->block_desc_length = 0x08; + modesel->density = density; + modesel->block_length_med = (blocklength >> 8) & 0xff; + modesel->block_length_lo = blocklength & 0xff; + cgc.buffer = buffer; + cgc.buflen = sizeof(*modesel); + cgc.data_direction = DMA_TO_DEVICE; + cgc.timeout = VENDOR_TIMEOUT; + if (0 == (rc = sr_do_ioctl(cd, &cgc))) { + cd->device->sector_size = blocklength; + } +#ifdef DEBUG + else + sr_printk(KERN_INFO, cd, + "switching blocklength to %d bytes failed\n", + blocklength); +#endif + kfree(buffer); + return rc; +} + +/* This function gets called after a media change. Checks if the CD is + multisession, asks for offset etc. */ + +int sr_cd_check(struct cdrom_device_info *cdi) +{ + Scsi_CD *cd = cdi->handle; + unsigned long sector; + unsigned char *buffer; /* the buffer for the ioctl */ + struct packet_command cgc; + int rc, no_multi; + + if (cd->cdi.mask & CDC_MULTI_SESSION) + return 0; + + buffer = kmalloc(512, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sector = 0; /* the multisession sector offset goes here */ + no_multi = 0; /* flag: the drive can't handle multisession */ + rc = 0; + + memset(&cgc, 0, sizeof(struct packet_command)); + + switch (cd->vendor) { + + case VENDOR_SCSI3: + cgc.cmd[0] = READ_TOC; + cgc.cmd[8] = 12; + cgc.cmd[9] = 0x40; + cgc.buffer = buffer; + cgc.buflen = 12; + cgc.quiet = 1; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = VENDOR_TIMEOUT; + rc = sr_do_ioctl(cd, &cgc); + if (rc != 0) + break; + if ((buffer[0] << 8) + buffer[1] < 0x0a) { + sr_printk(KERN_INFO, cd, "Hmm, seems the drive " + "doesn't support multisession CD's\n"); + no_multi = 1; + break; + } + sector = buffer[11] + (buffer[10] << 8) + + (buffer[9] << 16) + (buffer[8] << 24); + if (buffer[6] <= 1) { + /* ignore sector offsets from first track */ + sector = 0; + } + break; + + case VENDOR_NEC:{ + unsigned long min, sec, frame; + cgc.cmd[0] = 0xde; + cgc.cmd[1] = 0x03; + cgc.cmd[2] = 0xb0; + cgc.buffer = buffer; + cgc.buflen = 0x16; + cgc.quiet = 1; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = VENDOR_TIMEOUT; + rc = sr_do_ioctl(cd, &cgc); + if (rc != 0) + break; + if (buffer[14] != 0 && buffer[14] != 0xb0) { + sr_printk(KERN_INFO, cd, "Hmm, seems the cdrom " + "doesn't support multisession CD's\n"); + + no_multi = 1; + break; + } + min = bcd2bin(buffer[15]); + sec = bcd2bin(buffer[16]); + frame = bcd2bin(buffer[17]); + sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; + break; + } + + case VENDOR_TOSHIBA:{ + unsigned long min, sec, frame; + + /* we request some disc information (is it a XA-CD ?, + * where starts the last session ?) */ + cgc.cmd[0] = 0xc7; + cgc.cmd[1] = 0x03; + cgc.buffer = buffer; + cgc.buflen = 4; + cgc.quiet = 1; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = VENDOR_TIMEOUT; + rc = sr_do_ioctl(cd, &cgc); + if (rc == -EINVAL) { + sr_printk(KERN_INFO, cd, "Hmm, seems the drive " + "doesn't support multisession CD's\n"); + no_multi = 1; + break; + } + if (rc != 0) + break; + min = bcd2bin(buffer[1]); + sec = bcd2bin(buffer[2]); + frame = bcd2bin(buffer[3]); + sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; + if (sector) + sector -= CD_MSF_OFFSET; + sr_set_blocklength(cd, 2048); + break; + } + + case VENDOR_WRITER: + cgc.cmd[0] = READ_TOC; + cgc.cmd[8] = 0x04; + cgc.cmd[9] = 0x40; + cgc.buffer = buffer; + cgc.buflen = 0x04; + cgc.quiet = 1; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = VENDOR_TIMEOUT; + rc = sr_do_ioctl(cd, &cgc); + if (rc != 0) { + break; + } + if ((rc = buffer[2]) == 0) { + sr_printk(KERN_WARNING, cd, + "No finished session\n"); + break; + } + cgc.cmd[0] = READ_TOC; /* Read TOC */ + cgc.cmd[6] = rc & 0x7f; /* number of last session */ + cgc.cmd[8] = 0x0c; + cgc.cmd[9] = 0x40; + cgc.buffer = buffer; + cgc.buflen = 12; + cgc.quiet = 1; + cgc.data_direction = DMA_FROM_DEVICE; + cgc.timeout = VENDOR_TIMEOUT; + rc = sr_do_ioctl(cd, &cgc); + if (rc != 0) { + break; + } + sector = buffer[11] + (buffer[10] << 8) + + (buffer[9] << 16) + (buffer[8] << 24); + break; + + default: + /* should not happen */ + sr_printk(KERN_WARNING, cd, + "unknown vendor code (%i), not initialized ?\n", + cd->vendor); + sector = 0; + no_multi = 1; + break; + } + cd->ms_offset = sector; + cd->xa_flag = 0; + if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(cd)) + cd->xa_flag = 1; + + if (2048 != cd->device->sector_size) { + sr_set_blocklength(cd, 2048); + } + if (no_multi) + cdi->mask |= CDC_MULTI_SESSION; + +#ifdef DEBUG + if (sector) + sr_printk(KERN_DEBUG, cd, "multisession offset=%lu\n", + sector); +#endif + kfree(buffer); + return rc; +} diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c new file mode 100644 index 000000000..338aa8c42 --- /dev/null +++ b/drivers/scsi/st.c @@ -0,0 +1,4931 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying + file Documentation/scsi/st.rst for more information. + + History: + Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara. + Contribution and ideas from several people including (in alphabetical + order) Klaus Ehrenfried, Eugene Exarevsky, Eric Lee Green, Wolfgang Denk, + Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, + Michael Schaefer, J"org Weule, and Eric Youngdale. + + Copyright 1992 - 2016 Kai Makisara + email Kai.Makisara@kolumbus.fi + + Some small formal changes - aeb, 950809 + + Last modified: 18-JAN-1998 Richard Gooch Devfs support + */ + +static const char *verstr = "20160209"; + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* The driver prints some debugging information on the console if DEBUG + is defined and non-zero. */ +#define DEBUG 1 +#define NO_DEBUG 0 + +#define ST_DEB_MSG KERN_NOTICE +#if DEBUG +/* The message level for the debug messages is currently set to KERN_NOTICE + so that people can easily see the messages. Later when the debugging messages + in the drivers are more widely classified, this may be changed to KERN_DEBUG. */ +#define DEB(a) a +#define DEBC(a) if (debugging) { a ; } +#else +#define DEB(a) +#define DEBC(a) +#endif + +#define ST_KILOBYTE 1024 + +#include "st_options.h" +#include "st.h" + +static int buffer_kbs; +static int max_sg_segs; +static int try_direct_io = TRY_DIRECT_IO; +static int try_rdio = 1; +static int try_wdio = 1; +static int debug_flag; + +static struct class st_sysfs_class; +static const struct attribute_group *st_dev_groups[]; +static const struct attribute_group *st_drv_groups[]; + +MODULE_AUTHOR("Kai Makisara"); +MODULE_DESCRIPTION("SCSI tape (st) driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR); +MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE); + +/* Set 'perm' (4th argument) to 0 to disable module_param's definition + * of sysfs parameters (which module_param doesn't yet support). + * Sysfs parameters defined explicitly later. + */ +module_param_named(buffer_kbs, buffer_kbs, int, 0); +MODULE_PARM_DESC(buffer_kbs, "Default driver buffer size for fixed block mode (KB; 32)"); +module_param_named(max_sg_segs, max_sg_segs, int, 0); +MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)"); +module_param_named(try_direct_io, try_direct_io, int, 0); +MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)"); +module_param_named(debug_flag, debug_flag, int, 0); +MODULE_PARM_DESC(debug_flag, "Enable DEBUG, same as setting debugging=1"); + + +/* Extra parameters for testing */ +module_param_named(try_rdio, try_rdio, int, 0); +MODULE_PARM_DESC(try_rdio, "Try direct read i/o when possible"); +module_param_named(try_wdio, try_wdio, int, 0); +MODULE_PARM_DESC(try_wdio, "Try direct write i/o when possible"); + +#ifndef MODULE +static int write_threshold_kbs; /* retained for compatibility */ +static struct st_dev_parm { + char *name; + int *val; +} parms[] __initdata = { + { + "buffer_kbs", &buffer_kbs + }, + { /* Retained for compatibility with 2.4 */ + "write_threshold_kbs", &write_threshold_kbs + }, + { + "max_sg_segs", NULL + }, + { + "try_direct_io", &try_direct_io + }, + { + "debug_flag", &debug_flag + } +}; +#endif + +/* Restrict the number of modes so that names for all are assigned */ +#if ST_NBR_MODES > 16 +#error "Maximum number of modes is 16" +#endif +/* Bit reversed order to get same names for same minors with all + mode counts */ +static const char *st_formats[] = { + "", "r", "k", "s", "l", "t", "o", "u", + "m", "v", "p", "x", "a", "y", "q", "z"}; + +/* The default definitions have been moved to st_options.h */ + +#define ST_FIXED_BUFFER_SIZE (ST_FIXED_BUFFER_BLOCKS * ST_KILOBYTE) + +/* The buffer size should fit into the 24 bits for length in the + 6-byte SCSI read and write commands. */ +#if ST_FIXED_BUFFER_SIZE >= (2 << 24 - 1) +#error "Buffer size should not exceed (2 << 24 - 1) bytes!" +#endif + +static int debugging = DEBUG; + +#define MAX_RETRIES 0 +#define MAX_WRITE_RETRIES 0 +#define MAX_READY_RETRIES 0 +#define NO_TAPE NOT_READY + +#define ST_TIMEOUT (900 * HZ) +#define ST_LONG_TIMEOUT (14000 * HZ) + +/* Remove mode bits and auto-rewind bit (7) */ +#define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \ + (iminor(x) & ((1 << ST_MODE_SHIFT)-1))) +#define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT) + +/* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */ +#define TAPE_MINOR(d, m, n) (((d & ~(255 >> (ST_NBR_MODE_BITS + 1))) << (ST_NBR_MODE_BITS + 1)) | \ + (d & (255 >> (ST_NBR_MODE_BITS + 1))) | (m << ST_MODE_SHIFT) | ((n != 0) << 7) ) + +/* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower + 24 bits) */ +#define SET_DENS_AND_BLK 0x10001 + +static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE; +static int st_max_sg_segs = ST_MAX_SG; + +static int modes_defined; + +static int enlarge_buffer(struct st_buffer *, int); +static void clear_buffer(struct st_buffer *); +static void normalize_buffer(struct st_buffer *); +static int append_to_buffer(const char __user *, struct st_buffer *, int); +static int from_buffer(struct st_buffer *, char __user *, int); +static void move_buffer_data(struct st_buffer *, int); + +static int sgl_map_user_pages(struct st_buffer *, const unsigned int, + unsigned long, size_t, int); +static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int); + +static int st_probe(struct device *); +static int st_remove(struct device *); + +static struct scsi_driver st_template = { + .gendrv = { + .name = "st", + .owner = THIS_MODULE, + .probe = st_probe, + .remove = st_remove, + .groups = st_drv_groups, + }, +}; + +static int st_compression(struct scsi_tape *, int); + +static int find_partition(struct scsi_tape *); +static int switch_partition(struct scsi_tape *); + +static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long); + +static void scsi_tape_release(struct kref *); + +#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref) + +static DEFINE_MUTEX(st_ref_mutex); +static DEFINE_SPINLOCK(st_index_lock); +static DEFINE_SPINLOCK(st_use_lock); +static DEFINE_IDR(st_index_idr); + + + +#ifndef SIGS_FROM_OSST +#define SIGS_FROM_OSST \ + {"OnStream", "SC-", "", "osst"}, \ + {"OnStream", "DI-", "", "osst"}, \ + {"OnStream", "DP-", "", "osst"}, \ + {"OnStream", "USB", "", "osst"}, \ + {"OnStream", "FW-", "", "osst"} +#endif + +static struct scsi_tape *scsi_tape_get(int dev) +{ + struct scsi_tape *STp = NULL; + + mutex_lock(&st_ref_mutex); + spin_lock(&st_index_lock); + + STp = idr_find(&st_index_idr, dev); + if (!STp) goto out; + + kref_get(&STp->kref); + + if (!STp->device) + goto out_put; + + if (scsi_device_get(STp->device)) + goto out_put; + + goto out; + +out_put: + kref_put(&STp->kref, scsi_tape_release); + STp = NULL; +out: + spin_unlock(&st_index_lock); + mutex_unlock(&st_ref_mutex); + return STp; +} + +static void scsi_tape_put(struct scsi_tape *STp) +{ + struct scsi_device *sdev = STp->device; + + mutex_lock(&st_ref_mutex); + kref_put(&STp->kref, scsi_tape_release); + scsi_device_put(sdev); + mutex_unlock(&st_ref_mutex); +} + +struct st_reject_data { + char *vendor; + char *model; + char *rev; + char *driver_hint; /* Name of the correct driver, NULL if unknown */ +}; + +static struct st_reject_data reject_list[] = { + /* {"XXX", "Yy-", "", NULL}, example */ + SIGS_FROM_OSST, + {NULL, }}; + +/* If the device signature is on the list of incompatible drives, the + function returns a pointer to the name of the correct driver (if known) */ +static char * st_incompatible(struct scsi_device* SDp) +{ + struct st_reject_data *rp; + + for (rp=&(reject_list[0]); rp->vendor != NULL; rp++) + if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) && + !strncmp(rp->model, SDp->model, strlen(rp->model)) && + !strncmp(rp->rev, SDp->rev, strlen(rp->rev))) { + if (rp->driver_hint) + return rp->driver_hint; + else + return "unknown"; + } + return NULL; +} + + +#define st_printk(prefix, t, fmt, a...) \ + sdev_prefix_printk(prefix, (t)->device, (t)->name, fmt, ##a) +#ifdef DEBUG +#define DEBC_printk(t, fmt, a...) \ + if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); } +#else +#define DEBC_printk(t, fmt, a...) +#endif + +static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) +{ + const u8 *ucp; + const u8 *sense = SRpnt->sense; + + s->have_sense = scsi_normalize_sense(SRpnt->sense, + SCSI_SENSE_BUFFERSIZE, &s->sense_hdr); + s->flags = 0; + + if (s->have_sense) { + s->deferred = 0; + s->remainder_valid = + scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64); + switch (sense[0] & 0x7f) { + case 0x71: + s->deferred = 1; + fallthrough; + case 0x70: + s->fixed_format = 1; + s->flags = sense[2] & 0xe0; + break; + case 0x73: + s->deferred = 1; + fallthrough; + case 0x72: + s->fixed_format = 0; + ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4); + s->flags = ucp ? (ucp[3] & 0xe0) : 0; + break; + } + } +} + + +/* Convert the result to success code */ +static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) +{ + int result = SRpnt->result; + u8 scode; + DEB(const char *stp;) + char *name = STp->name; + struct st_cmdstatus *cmdstatp; + + if (!result) + return 0; + + cmdstatp = &STp->buffer->cmdstat; + st_analyze_sense(SRpnt, cmdstatp); + + if (cmdstatp->have_sense) + scode = STp->buffer->cmdstat.sense_hdr.sense_key; + else + scode = 0; + + DEB( + if (debugging) { + st_printk(ST_DEB_MSG, STp, + "Error: %x, cmd: %x %x %x %x %x %x\n", result, + SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], + SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); + if (cmdstatp->have_sense) + __scsi_print_sense(STp->device, name, + SRpnt->sense, SCSI_SENSE_BUFFERSIZE); + } ) /* end DEB */ + if (!debugging) { /* Abnormal conditions for tape */ + if (!cmdstatp->have_sense) + st_printk(KERN_WARNING, STp, + "Error %x (driver bt 0, host bt 0x%x).\n", + result, host_byte(result)); + else if (cmdstatp->have_sense && + scode != NO_SENSE && + scode != RECOVERED_ERROR && + /* scode != UNIT_ATTENTION && */ + scode != BLANK_CHECK && + scode != VOLUME_OVERFLOW && + SRpnt->cmd[0] != MODE_SENSE && + SRpnt->cmd[0] != TEST_UNIT_READY) { + + __scsi_print_sense(STp->device, name, + SRpnt->sense, SCSI_SENSE_BUFFERSIZE); + } + } + + if (cmdstatp->fixed_format && + STp->cln_mode >= EXTENDED_SENSE_START) { /* Only fixed format sense */ + if (STp->cln_sense_value) + STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] & + STp->cln_sense_mask) == STp->cln_sense_value); + else + STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] & + STp->cln_sense_mask) != 0); + } + if (cmdstatp->have_sense && + cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17) + STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */ + if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29) + STp->pos_unknown = 1; /* ASC => power on / reset */ + + STp->pos_unknown |= STp->device->was_reset; + + if (cmdstatp->have_sense && + scode == RECOVERED_ERROR +#if ST_RECOVERED_WRITE_FATAL + && SRpnt->cmd[0] != WRITE_6 + && SRpnt->cmd[0] != WRITE_FILEMARKS +#endif + ) { + STp->recover_count++; + STp->recover_reg++; + + DEB( + if (debugging) { + if (SRpnt->cmd[0] == READ_6) + stp = "read"; + else if (SRpnt->cmd[0] == WRITE_6) + stp = "write"; + else + stp = "ioctl"; + st_printk(ST_DEB_MSG, STp, + "Recovered %s error (%d).\n", + stp, STp->recover_count); + } ) /* end DEB */ + + if (cmdstatp->flags == 0) + return 0; + } + return (-EIO); +} + +static struct st_request *st_allocate_request(struct scsi_tape *stp) +{ + struct st_request *streq; + + streq = kzalloc(sizeof(*streq), GFP_KERNEL); + if (streq) + streq->stp = stp; + else { + st_printk(KERN_ERR, stp, + "Can't get SCSI request.\n"); + if (signal_pending(current)) + stp->buffer->syscall_result = -EINTR; + else + stp->buffer->syscall_result = -EBUSY; + } + + return streq; +} + +static void st_release_request(struct st_request *streq) +{ + kfree(streq); +} + +static void st_do_stats(struct scsi_tape *STp, struct request *req) +{ + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); + ktime_t now; + + now = ktime_get(); + if (scmd->cmnd[0] == WRITE_6) { + now = ktime_sub(now, STp->stats->write_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); + atomic64_inc(&STp->stats->write_cnt); + if (scmd->result) { + atomic64_add(atomic_read(&STp->stats->last_write_size) + - STp->buffer->cmdstat.residual, + &STp->stats->write_byte_cnt); + if (STp->buffer->cmdstat.residual > 0) + atomic64_inc(&STp->stats->resid_cnt); + } else + atomic64_add(atomic_read(&STp->stats->last_write_size), + &STp->stats->write_byte_cnt); + } else if (scmd->cmnd[0] == READ_6) { + now = ktime_sub(now, STp->stats->read_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); + atomic64_inc(&STp->stats->read_cnt); + if (scmd->result) { + atomic64_add(atomic_read(&STp->stats->last_read_size) + - STp->buffer->cmdstat.residual, + &STp->stats->read_byte_cnt); + if (STp->buffer->cmdstat.residual > 0) + atomic64_inc(&STp->stats->resid_cnt); + } else + atomic64_add(atomic_read(&STp->stats->last_read_size), + &STp->stats->read_byte_cnt); + } else { + now = ktime_sub(now, STp->stats->other_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); + atomic64_inc(&STp->stats->other_cnt); + } + atomic64_dec(&STp->stats->in_flight); +} + +static enum rq_end_io_ret st_scsi_execute_end(struct request *req, + blk_status_t status) +{ + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); + struct st_request *SRpnt = req->end_io_data; + struct scsi_tape *STp = SRpnt->stp; + struct bio *tmp; + + STp->buffer->cmdstat.midlevel_result = SRpnt->result = scmd->result; + STp->buffer->cmdstat.residual = scmd->resid_len; + + st_do_stats(STp, req); + + tmp = SRpnt->bio; + if (scmd->sense_len) + memcpy(SRpnt->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); + if (SRpnt->waiting) + complete(SRpnt->waiting); + + blk_rq_unmap_user(tmp); + blk_mq_free_request(req); + return RQ_END_IO_NONE; +} + +static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, + int data_direction, void *buffer, unsigned bufflen, + int timeout, int retries) +{ + struct request *req; + struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; + int err = 0; + struct scsi_tape *STp = SRpnt->stp; + struct scsi_cmnd *scmd; + + req = scsi_alloc_request(SRpnt->stp->device->request_queue, + data_direction == DMA_TO_DEVICE ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(req)) + return PTR_ERR(req); + scmd = blk_mq_rq_to_pdu(req); + req->rq_flags |= RQF_QUIET; + + mdata->null_mapped = 1; + + if (bufflen) { + err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, + GFP_KERNEL); + if (err) { + blk_mq_free_request(req); + return err; + } + } + + atomic64_inc(&STp->stats->in_flight); + if (cmd[0] == WRITE_6) { + atomic_set(&STp->stats->last_write_size, bufflen); + STp->stats->write_time = ktime_get(); + } else if (cmd[0] == READ_6) { + atomic_set(&STp->stats->last_read_size, bufflen); + STp->stats->read_time = ktime_get(); + } else { + STp->stats->other_time = ktime_get(); + } + + SRpnt->bio = req->bio; + scmd->cmd_len = COMMAND_SIZE(cmd[0]); + memcpy(scmd->cmnd, cmd, scmd->cmd_len); + req->timeout = timeout; + scmd->allowed = retries; + req->end_io = st_scsi_execute_end; + req->end_io_data = SRpnt; + + blk_execute_rq_nowait(req, true); + return 0; +} + +/* Do the scsi command. Waits until command performed if do_wait is true. + Otherwise write_behind_check() is used to check that the command + has finished. */ +static struct st_request * +st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd, + int bytes, int direction, int timeout, int retries, int do_wait) +{ + struct completion *waiting; + struct rq_map_data *mdata = &STp->buffer->map_data; + int ret; + + /* if async, make sure there's no command outstanding */ + if (!do_wait && ((STp->buffer)->last_SRpnt)) { + st_printk(KERN_ERR, STp, + "Async command already active.\n"); + if (signal_pending(current)) + (STp->buffer)->syscall_result = (-EINTR); + else + (STp->buffer)->syscall_result = (-EBUSY); + return NULL; + } + + if (!SRpnt) { + SRpnt = st_allocate_request(STp); + if (!SRpnt) + return NULL; + } + + /* If async IO, set last_SRpnt. This ptr tells write_behind_check + which IO is outstanding. It's nulled out when the IO completes. */ + if (!do_wait) + (STp->buffer)->last_SRpnt = SRpnt; + + waiting = &STp->wait; + init_completion(waiting); + SRpnt->waiting = waiting; + + if (STp->buffer->do_dio) { + mdata->page_order = 0; + mdata->nr_entries = STp->buffer->sg_segs; + mdata->pages = STp->buffer->mapped_pages; + } else { + mdata->page_order = STp->buffer->reserved_page_order; + mdata->nr_entries = + DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); + mdata->pages = STp->buffer->reserved_pages; + mdata->offset = 0; + } + + memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); + STp->buffer->cmdstat.have_sense = 0; + STp->buffer->syscall_result = 0; + + ret = st_scsi_execute(SRpnt, cmd, direction, NULL, bytes, timeout, + retries); + if (ret) { + /* could not allocate the buffer or request was too large */ + (STp->buffer)->syscall_result = (-EBUSY); + (STp->buffer)->last_SRpnt = NULL; + } else if (do_wait) { + wait_for_completion(waiting); + SRpnt->waiting = NULL; + (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); + } + + return SRpnt; +} + + +/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if + write has been correct but EOM early warning reached, -EIO if write ended in + error or zero if write successful. Asynchronous writes are used only in + variable block mode. */ +static int write_behind_check(struct scsi_tape * STp) +{ + int retval = 0; + struct st_buffer *STbuffer; + struct st_partstat *STps; + struct st_cmdstatus *cmdstatp; + struct st_request *SRpnt; + + STbuffer = STp->buffer; + if (!STbuffer->writing) + return 0; + + DEB( + if (STp->write_pending) + STp->nbr_waits++; + else + STp->nbr_finished++; + ) /* end DEB */ + + wait_for_completion(&(STp->wait)); + SRpnt = STbuffer->last_SRpnt; + STbuffer->last_SRpnt = NULL; + SRpnt->waiting = NULL; + + (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); + st_release_request(SRpnt); + + STbuffer->buffer_bytes -= STbuffer->writing; + STps = &(STp->ps[STp->partition]); + if (STps->drv_block >= 0) { + if (STp->block_size == 0) + STps->drv_block++; + else + STps->drv_block += STbuffer->writing / STp->block_size; + } + + cmdstatp = &STbuffer->cmdstat; + if (STbuffer->syscall_result) { + retval = -EIO; + if (cmdstatp->have_sense && !cmdstatp->deferred && + (cmdstatp->flags & SENSE_EOM) && + (cmdstatp->sense_hdr.sense_key == NO_SENSE || + cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR)) { + /* EOM at write-behind, has all data been written? */ + if (!cmdstatp->remainder_valid || + cmdstatp->uremainder64 == 0) + retval = -ENOSPC; + } + if (retval == -EIO) + STps->drv_block = -1; + } + STbuffer->writing = 0; + + DEB(if (debugging && retval) + st_printk(ST_DEB_MSG, STp, + "Async write error %x, return value %d.\n", + STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */ + + return retval; +} + + +/* Step over EOF if it has been inadvertently crossed (ioctl not used because + it messes up the block number). */ +static int cross_eof(struct scsi_tape * STp, int forward) +{ + struct st_request *SRpnt; + unsigned char cmd[MAX_COMMAND_SIZE]; + + cmd[0] = SPACE; + cmd[1] = 0x01; /* Space FileMarks */ + if (forward) { + cmd[2] = cmd[3] = 0; + cmd[4] = 1; + } else + cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */ + cmd[5] = 0; + + DEBC_printk(STp, "Stepping over filemark %s.\n", + forward ? "forward" : "backward"); + + SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, + STp->device->request_queue->rq_timeout, + MAX_RETRIES, 1); + if (!SRpnt) + return (STp->buffer)->syscall_result; + + st_release_request(SRpnt); + SRpnt = NULL; + + if ((STp->buffer)->cmdstat.midlevel_result != 0) + st_printk(KERN_ERR, STp, + "Stepping over filemark %s failed.\n", + forward ? "forward" : "backward"); + + return (STp->buffer)->syscall_result; +} + + +/* Flush the write buffer (never need to write if variable blocksize). */ +static int st_flush_write_buffer(struct scsi_tape * STp) +{ + int transfer, blks; + int result; + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + struct st_partstat *STps; + + result = write_behind_check(STp); + if (result) + return result; + + result = 0; + if (STp->dirty == 1) { + + transfer = STp->buffer->buffer_bytes; + DEBC_printk(STp, "Flushing %d bytes.\n", transfer); + + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd[0] = WRITE_6; + cmd[1] = 1; + blks = transfer / STp->block_size; + cmd[2] = blks >> 16; + cmd[3] = blks >> 8; + cmd[4] = blks; + + SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE, + STp->device->request_queue->rq_timeout, + MAX_WRITE_RETRIES, 1); + if (!SRpnt) + return (STp->buffer)->syscall_result; + + STps = &(STp->ps[STp->partition]); + if ((STp->buffer)->syscall_result != 0) { + struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; + + if (cmdstatp->have_sense && !cmdstatp->deferred && + (cmdstatp->flags & SENSE_EOM) && + (cmdstatp->sense_hdr.sense_key == NO_SENSE || + cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && + (!cmdstatp->remainder_valid || + cmdstatp->uremainder64 == 0)) { /* All written at EOM early warning */ + STp->dirty = 0; + (STp->buffer)->buffer_bytes = 0; + if (STps->drv_block >= 0) + STps->drv_block += blks; + result = (-ENOSPC); + } else { + st_printk(KERN_ERR, STp, "Error on flush.\n"); + STps->drv_block = (-1); + result = (-EIO); + } + } else { + if (STps->drv_block >= 0) + STps->drv_block += blks; + STp->dirty = 0; + (STp->buffer)->buffer_bytes = 0; + } + st_release_request(SRpnt); + SRpnt = NULL; + } + return result; +} + + +/* Flush the tape buffer. The tape will be positioned correctly unless + seek_next is true. */ +static int flush_buffer(struct scsi_tape *STp, int seek_next) +{ + int backspace, result; + struct st_partstat *STps; + + /* + * If there was a bus reset, block further access + * to this device. + */ + if (STp->pos_unknown) + return (-EIO); + + if (STp->ready != ST_READY) + return 0; + STps = &(STp->ps[STp->partition]); + if (STps->rw == ST_WRITING) /* Writing */ + return st_flush_write_buffer(STp); + + if (STp->block_size == 0) + return 0; + + backspace = ((STp->buffer)->buffer_bytes + + (STp->buffer)->read_pointer) / STp->block_size - + ((STp->buffer)->read_pointer + STp->block_size - 1) / + STp->block_size; + (STp->buffer)->buffer_bytes = 0; + (STp->buffer)->read_pointer = 0; + result = 0; + if (!seek_next) { + if (STps->eof == ST_FM_HIT) { + result = cross_eof(STp, 0); /* Back over the EOF hit */ + if (!result) + STps->eof = ST_NOEOF; + else { + if (STps->drv_file >= 0) + STps->drv_file++; + STps->drv_block = 0; + } + } + if (!result && backspace > 0) + result = st_int_ioctl(STp, MTBSR, backspace); + } else if (STps->eof == ST_FM_HIT) { + if (STps->drv_file >= 0) + STps->drv_file++; + STps->drv_block = 0; + STps->eof = ST_NOEOF; + } + return result; + +} + +/* Set the mode parameters */ +static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm) +{ + int set_it = 0; + unsigned long arg; + + if (!STp->density_changed && + STm->default_density >= 0 && + STm->default_density != STp->density) { + arg = STm->default_density; + set_it = 1; + } else + arg = STp->density; + arg <<= MT_ST_DENSITY_SHIFT; + if (!STp->blksize_changed && + STm->default_blksize >= 0 && + STm->default_blksize != STp->block_size) { + arg |= STm->default_blksize; + set_it = 1; + } else + arg |= STp->block_size; + if (set_it && + st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) { + st_printk(KERN_WARNING, STp, + "Can't set default block size to %d bytes " + "and density %x.\n", + STm->default_blksize, STm->default_density); + if (modes_defined) + return (-EINVAL); + } + return 0; +} + + +/* Lock or unlock the drive door. Don't use when st_request allocated. */ +static int do_door_lock(struct scsi_tape * STp, int do_lock) +{ + int retval; + + DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl"); + + retval = scsi_set_medium_removal(STp->device, + do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW); + if (!retval) + STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; + else + STp->door_locked = ST_LOCK_FAILS; + return retval; +} + + +/* Set the internal state after reset */ +static void reset_state(struct scsi_tape *STp) +{ + int i; + struct st_partstat *STps; + + STp->pos_unknown = 0; + for (i = 0; i < ST_NBR_PARTITIONS; i++) { + STps = &(STp->ps[i]); + STps->rw = ST_IDLE; + STps->eof = ST_NOEOF; + STps->at_sm = 0; + STps->last_block_valid = 0; + STps->drv_block = -1; + STps->drv_file = -1; + } + if (STp->can_partitions) { + STp->partition = find_partition(STp); + if (STp->partition < 0) + STp->partition = 0; + STp->new_partition = STp->partition; + } +} + +/* Test if the drive is ready. Returns either one of the codes below or a negative system + error code. */ +#define CHKRES_READY 0 +#define CHKRES_NEW_SESSION 1 +#define CHKRES_NOT_READY 2 +#define CHKRES_NO_TAPE 3 + +#define MAX_ATTENTIONS 10 + +static int test_ready(struct scsi_tape *STp, int do_wait) +{ + int attentions, waits, max_wait, scode; + int retval = CHKRES_READY, new_session = 0; + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt = NULL; + struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; + + max_wait = do_wait ? ST_BLOCK_SECONDS : 0; + + for (attentions=waits=0; ; ) { + memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); + cmd[0] = TEST_UNIT_READY; + SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, + STp->long_timeout, MAX_READY_RETRIES, 1); + + if (!SRpnt) { + retval = (STp->buffer)->syscall_result; + break; + } + + if (cmdstatp->have_sense) { + + scode = cmdstatp->sense_hdr.sense_key; + + if (scode == UNIT_ATTENTION) { /* New media? */ + new_session = 1; + if (attentions < MAX_ATTENTIONS) { + attentions++; + continue; + } + else { + retval = (-EIO); + break; + } + } + + if (scode == NOT_READY) { + if (waits < max_wait) { + if (msleep_interruptible(1000)) { + retval = (-EINTR); + break; + } + waits++; + continue; + } + else { + if ((STp->device)->scsi_level >= SCSI_2 && + cmdstatp->sense_hdr.asc == 0x3a) /* Check ASC */ + retval = CHKRES_NO_TAPE; + else + retval = CHKRES_NOT_READY; + break; + } + } + } + + retval = (STp->buffer)->syscall_result; + if (!retval) + retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY; + break; + } + + if (SRpnt != NULL) + st_release_request(SRpnt); + return retval; +} + + +/* See if the drive is ready and gather information about the tape. Return values: + < 0 negative error code from errno.h + 0 drive ready + 1 drive not ready (possibly no tape) +*/ +static int check_tape(struct scsi_tape *STp, struct file *filp) +{ + int i, retval, new_session = 0, do_wait; + unsigned char cmd[MAX_COMMAND_SIZE], saved_cleaning; + unsigned short st_flags = filp->f_flags; + struct st_request *SRpnt = NULL; + struct st_modedef *STm; + struct st_partstat *STps; + struct inode *inode = file_inode(filp); + int mode = TAPE_MODE(inode); + + STp->ready = ST_READY; + + if (mode != STp->current_mode) { + DEBC_printk(STp, "Mode change from %d to %d.\n", + STp->current_mode, mode); + new_session = 1; + STp->current_mode = mode; + } + STm = &(STp->modes[STp->current_mode]); + + saved_cleaning = STp->cleaning_req; + STp->cleaning_req = 0; + + do_wait = ((filp->f_flags & O_NONBLOCK) == 0); + retval = test_ready(STp, do_wait); + + if (retval < 0) + goto err_out; + + if (retval == CHKRES_NEW_SESSION) { + STp->pos_unknown = 0; + STp->partition = STp->new_partition = 0; + if (STp->can_partitions) + STp->nbr_partitions = 1; /* This guess will be updated later + if necessary */ + for (i = 0; i < ST_NBR_PARTITIONS; i++) { + STps = &(STp->ps[i]); + STps->rw = ST_IDLE; + STps->eof = ST_NOEOF; + STps->at_sm = 0; + STps->last_block_valid = 0; + STps->drv_block = 0; + STps->drv_file = 0; + } + new_session = 1; + } + else { + STp->cleaning_req |= saved_cleaning; + + if (retval == CHKRES_NOT_READY || retval == CHKRES_NO_TAPE) { + if (retval == CHKRES_NO_TAPE) + STp->ready = ST_NO_TAPE; + else + STp->ready = ST_NOT_READY; + + STp->density = 0; /* Clear the erroneous "residue" */ + STp->write_prot = 0; + STp->block_size = 0; + STp->ps[0].drv_file = STp->ps[0].drv_block = (-1); + STp->partition = STp->new_partition = 0; + STp->door_locked = ST_UNLOCKED; + return CHKRES_NOT_READY; + } + } + + if (STp->omit_blklims) + STp->min_block = STp->max_block = (-1); + else { + memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); + cmd[0] = READ_BLOCK_LIMITS; + + SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE, + STp->device->request_queue->rq_timeout, + MAX_READY_RETRIES, 1); + if (!SRpnt) { + retval = (STp->buffer)->syscall_result; + goto err_out; + } + + if (!SRpnt->result && !STp->buffer->cmdstat.have_sense) { + STp->max_block = ((STp->buffer)->b_data[1] << 16) | + ((STp->buffer)->b_data[2] << 8) | (STp->buffer)->b_data[3]; + STp->min_block = ((STp->buffer)->b_data[4] << 8) | + (STp->buffer)->b_data[5]; + if ( DEB( debugging || ) !STp->inited) + st_printk(KERN_INFO, STp, + "Block limits %d - %d bytes.\n", + STp->min_block, STp->max_block); + } else { + STp->min_block = STp->max_block = (-1); + DEBC_printk(STp, "Can't read block limits.\n"); + } + } + + memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); + cmd[0] = MODE_SENSE; + cmd[4] = 12; + + SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE, + STp->device->request_queue->rq_timeout, + MAX_READY_RETRIES, 1); + if (!SRpnt) { + retval = (STp->buffer)->syscall_result; + goto err_out; + } + + if ((STp->buffer)->syscall_result != 0) { + DEBC_printk(STp, "No Mode Sense.\n"); + STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */ + (STp->buffer)->syscall_result = 0; /* Prevent error propagation */ + STp->drv_write_prot = 0; + } else { + DEBC_printk(STp,"Mode sense. Length %d, " + "medium %x, WBS %x, BLL %d\n", + (STp->buffer)->b_data[0], + (STp->buffer)->b_data[1], + (STp->buffer)->b_data[2], + (STp->buffer)->b_data[3]); + + if ((STp->buffer)->b_data[3] >= 8) { + STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7; + STp->density = (STp->buffer)->b_data[4]; + STp->block_size = (STp->buffer)->b_data[9] * 65536 + + (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11]; + DEBC_printk(STp, "Density %x, tape length: %x, " + "drv buffer: %d\n", + STp->density, + (STp->buffer)->b_data[5] * 65536 + + (STp->buffer)->b_data[6] * 256 + + (STp->buffer)->b_data[7], + STp->drv_buffer); + } + STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; + if (!STp->drv_buffer && STp->immediate_filemark) { + st_printk(KERN_WARNING, STp, + "non-buffered tape: disabling " + "writing immediate filemarks\n"); + STp->immediate_filemark = 0; + } + } + st_release_request(SRpnt); + SRpnt = NULL; + STp->inited = 1; + + if (STp->block_size > 0) + (STp->buffer)->buffer_blocks = + (STp->buffer)->buffer_size / STp->block_size; + else + (STp->buffer)->buffer_blocks = 1; + (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; + + DEBC_printk(STp, "Block size: %d, buffer size: %d (%d blocks).\n", + STp->block_size, (STp->buffer)->buffer_size, + (STp->buffer)->buffer_blocks); + + if (STp->drv_write_prot) { + STp->write_prot = 1; + + DEBC_printk(STp, "Write protected\n"); + + if (do_wait && + ((st_flags & O_ACCMODE) == O_WRONLY || + (st_flags & O_ACCMODE) == O_RDWR)) { + retval = (-EROFS); + goto err_out; + } + } + + if (STp->can_partitions && STp->nbr_partitions < 1) { + /* This code is reached when the device is opened for the first time + after the driver has been initialized with tape in the drive and the + partition support has been enabled. */ + DEBC_printk(STp, "Updating partition number in status.\n"); + if ((STp->partition = find_partition(STp)) < 0) { + retval = STp->partition; + goto err_out; + } + STp->new_partition = STp->partition; + STp->nbr_partitions = 1; /* This guess will be updated when necessary */ + } + + if (new_session) { /* Change the drive parameters for the new mode */ + STp->density_changed = STp->blksize_changed = 0; + STp->compression_changed = 0; + if (!(STm->defaults_for_writes) && + (retval = set_mode_densblk(STp, STm)) < 0) + goto err_out; + + if (STp->default_drvbuffer != 0xff) { + if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer)) + st_printk(KERN_WARNING, STp, + "Can't set default drive " + "buffering to %d.\n", + STp->default_drvbuffer); + } + } + + return CHKRES_READY; + + err_out: + return retval; +} + + + /* Open the device. Needs to take the BKL only because of incrementing the SCSI host + module count. */ +static int st_open(struct inode *inode, struct file *filp) +{ + int i, retval = (-EIO); + int resumed = 0; + struct scsi_tape *STp; + struct st_partstat *STps; + int dev = TAPE_NR(inode); + + /* + * We really want to do nonseekable_open(inode, filp); here, but some + * versions of tar incorrectly call lseek on tapes and bail out if that + * fails. So we disallow pread() and pwrite(), but permit lseeks. + */ + filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); + + if (!(STp = scsi_tape_get(dev))) { + return -ENXIO; + } + + filp->private_data = STp; + + spin_lock(&st_use_lock); + if (STp->in_use) { + spin_unlock(&st_use_lock); + DEBC_printk(STp, "Device already in use.\n"); + scsi_tape_put(STp); + return (-EBUSY); + } + + STp->in_use = 1; + spin_unlock(&st_use_lock); + STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0; + + if (scsi_autopm_get_device(STp->device) < 0) { + retval = -EIO; + goto err_out; + } + resumed = 1; + if (!scsi_block_when_processing_errors(STp->device)) { + retval = (-ENXIO); + goto err_out; + } + + /* See that we have at least a one page buffer available */ + if (!enlarge_buffer(STp->buffer, PAGE_SIZE)) { + st_printk(KERN_WARNING, STp, + "Can't allocate one page tape buffer.\n"); + retval = (-EOVERFLOW); + goto err_out; + } + + (STp->buffer)->cleared = 0; + (STp->buffer)->writing = 0; + (STp->buffer)->syscall_result = 0; + + STp->write_prot = ((filp->f_flags & O_ACCMODE) == O_RDONLY); + + STp->dirty = 0; + for (i = 0; i < ST_NBR_PARTITIONS; i++) { + STps = &(STp->ps[i]); + STps->rw = ST_IDLE; + } + STp->try_dio_now = STp->try_dio; + STp->recover_count = 0; + DEB( STp->nbr_waits = STp->nbr_finished = 0; + STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; ) + + retval = check_tape(STp, filp); + if (retval < 0) + goto err_out; + if ((filp->f_flags & O_NONBLOCK) == 0 && + retval != CHKRES_READY) { + if (STp->ready == NO_TAPE) + retval = (-ENOMEDIUM); + else + retval = (-EIO); + goto err_out; + } + return 0; + + err_out: + normalize_buffer(STp->buffer); + spin_lock(&st_use_lock); + STp->in_use = 0; + spin_unlock(&st_use_lock); + if (resumed) + scsi_autopm_put_device(STp->device); + scsi_tape_put(STp); + return retval; + +} + + +/* Flush the tape buffer before close */ +static int st_flush(struct file *filp, fl_owner_t id) +{ + int result = 0, result2; + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + struct scsi_tape *STp = filp->private_data; + struct st_modedef *STm = &(STp->modes[STp->current_mode]); + struct st_partstat *STps = &(STp->ps[STp->partition]); + + if (file_count(filp) > 1) + return 0; + + if (STps->rw == ST_WRITING && !STp->pos_unknown) { + result = st_flush_write_buffer(STp); + if (result != 0 && result != (-ENOSPC)) + goto out; + } + + if (STp->can_partitions && + (result2 = switch_partition(STp)) < 0) { + DEBC_printk(STp, "switch_partition at close failed.\n"); + if (result == 0) + result = result2; + goto out; + } + + DEBC( if (STp->nbr_requests) + st_printk(KERN_DEBUG, STp, + "Number of r/w requests %d, dio used in %d, " + "pages %d.\n", STp->nbr_requests, STp->nbr_dio, + STp->nbr_pages)); + + if (STps->rw == ST_WRITING && !STp->pos_unknown) { + struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; + +#if DEBUG + DEBC_printk(STp, "Async write waits %d, finished %d.\n", + STp->nbr_waits, STp->nbr_finished); +#endif + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd[0] = WRITE_FILEMARKS; + if (STp->immediate_filemark) + cmd[1] = 1; + cmd[4] = 1 + STp->two_fm; + + SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, + STp->device->request_queue->rq_timeout, + MAX_WRITE_RETRIES, 1); + if (!SRpnt) { + result = (STp->buffer)->syscall_result; + goto out; + } + + if (STp->buffer->syscall_result == 0 || + (cmdstatp->have_sense && !cmdstatp->deferred && + (cmdstatp->flags & SENSE_EOM) && + (cmdstatp->sense_hdr.sense_key == NO_SENSE || + cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && + (!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0))) { + /* Write successful at EOM */ + st_release_request(SRpnt); + SRpnt = NULL; + if (STps->drv_file >= 0) + STps->drv_file++; + STps->drv_block = 0; + if (STp->two_fm) + cross_eof(STp, 0); + STps->eof = ST_FM; + } + else { /* Write error */ + st_release_request(SRpnt); + SRpnt = NULL; + st_printk(KERN_ERR, STp, + "Error on write filemark.\n"); + if (result == 0) + result = (-EIO); + } + + DEBC_printk(STp, "Buffer flushed, %d EOF(s) written\n", cmd[4]); + } else if (!STp->rew_at_close) { + STps = &(STp->ps[STp->partition]); + if (!STm->sysv || STps->rw != ST_READING) { + if (STp->can_bsr) + result = flush_buffer(STp, 0); + else if (STps->eof == ST_FM_HIT) { + result = cross_eof(STp, 0); + if (result) { + if (STps->drv_file >= 0) + STps->drv_file++; + STps->drv_block = 0; + STps->eof = ST_FM; + } else + STps->eof = ST_NOEOF; + } + } else if ((STps->eof == ST_NOEOF && + !(result = cross_eof(STp, 1))) || + STps->eof == ST_FM_HIT) { + if (STps->drv_file >= 0) + STps->drv_file++; + STps->drv_block = 0; + STps->eof = ST_FM; + } + } + + out: + if (STp->rew_at_close) { + result2 = st_int_ioctl(STp, MTREW, 1); + if (result == 0) + result = result2; + } + return result; +} + + +/* Close the device and release it. BKL is not needed: this is the only thread + accessing this tape. */ +static int st_release(struct inode *inode, struct file *filp) +{ + struct scsi_tape *STp = filp->private_data; + + if (STp->door_locked == ST_LOCKED_AUTO) + do_door_lock(STp, 0); + + normalize_buffer(STp->buffer); + spin_lock(&st_use_lock); + STp->in_use = 0; + spin_unlock(&st_use_lock); + scsi_autopm_put_device(STp->device); + scsi_tape_put(STp); + + return 0; +} + +/* The checks common to both reading and writing */ +static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count) +{ + ssize_t retval = 0; + + /* + * If we are in the middle of error recovery, don't let anyone + * else try and use this device. Also, if error recovery fails, it + * may try and take the device offline, in which case all further + * access to the device is prohibited. + */ + if (!scsi_block_when_processing_errors(STp->device)) { + retval = (-ENXIO); + goto out; + } + + if (STp->ready != ST_READY) { + if (STp->ready == ST_NO_TAPE) + retval = (-ENOMEDIUM); + else + retval = (-EIO); + goto out; + } + + if (! STp->modes[STp->current_mode].defined) { + retval = (-ENXIO); + goto out; + } + + + /* + * If there was a bus reset, block further access + * to this device. + */ + if (STp->pos_unknown) { + retval = (-EIO); + goto out; + } + + if (count == 0) + goto out; + + DEB( + if (!STp->in_use) { + st_printk(ST_DEB_MSG, STp, + "Incorrect device.\n"); + retval = (-EIO); + goto out; + } ) /* end DEB */ + + if (STp->can_partitions && + (retval = switch_partition(STp)) < 0) + goto out; + + if (STp->block_size == 0 && STp->max_block > 0 && + (count < STp->min_block || count > STp->max_block)) { + retval = (-EINVAL); + goto out; + } + + if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && + !do_door_lock(STp, 1)) + STp->door_locked = ST_LOCKED_AUTO; + + out: + return retval; +} + + +static int setup_buffering(struct scsi_tape *STp, const char __user *buf, + size_t count, int is_read) +{ + int i, bufsize, retval = 0; + struct st_buffer *STbp = STp->buffer; + + if (is_read) + i = STp->try_dio_now && try_rdio; + else + i = STp->try_dio_now && try_wdio; + + if (i && ((unsigned long)buf & queue_dma_alignment( + STp->device->request_queue)) == 0) { + i = sgl_map_user_pages(STbp, STbp->use_sg, (unsigned long)buf, + count, (is_read ? READ : WRITE)); + if (i > 0) { + STbp->do_dio = i; + STbp->buffer_bytes = 0; /* can be used as transfer counter */ + } + else + STbp->do_dio = 0; /* fall back to buffering with any error */ + STbp->sg_segs = STbp->do_dio; + DEB( + if (STbp->do_dio) { + STp->nbr_dio++; + STp->nbr_pages += STbp->do_dio; + } + ) + } else + STbp->do_dio = 0; + DEB( STp->nbr_requests++; ) + + if (!STbp->do_dio) { + if (STp->block_size) + bufsize = STp->block_size > st_fixed_buffer_size ? + STp->block_size : st_fixed_buffer_size; + else { + bufsize = count; + /* Make sure that data from previous user is not leaked even if + HBA does not return correct residual */ + if (is_read && STp->sili && !STbp->cleared) + clear_buffer(STbp); + } + + if (bufsize > STbp->buffer_size && + !enlarge_buffer(STbp, bufsize)) { + st_printk(KERN_WARNING, STp, + "Can't allocate %d byte tape buffer.\n", + bufsize); + retval = (-EOVERFLOW); + goto out; + } + if (STp->block_size) + STbp->buffer_blocks = bufsize / STp->block_size; + } + + out: + return retval; +} + + +/* Can be called more than once after each setup_buffer() */ +static void release_buffering(struct scsi_tape *STp, int is_read) +{ + struct st_buffer *STbp; + + STbp = STp->buffer; + if (STbp->do_dio) { + sgl_unmap_user_pages(STbp, STbp->do_dio, is_read); + STbp->do_dio = 0; + STbp->sg_segs = 0; + } +} + + +/* Write command */ +static ssize_t +st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) +{ + ssize_t total; + ssize_t i, do_count, blks, transfer; + ssize_t retval; + int undone, retry_eot = 0, scode; + int async_write; + unsigned char cmd[MAX_COMMAND_SIZE]; + const char __user *b_point; + struct st_request *SRpnt = NULL; + struct scsi_tape *STp = filp->private_data; + struct st_modedef *STm; + struct st_partstat *STps; + struct st_buffer *STbp; + + if (mutex_lock_interruptible(&STp->lock)) + return -ERESTARTSYS; + + retval = rw_checks(STp, filp, count); + if (retval || count == 0) + goto out; + + /* Write must be integral number of blocks */ + if (STp->block_size != 0 && (count % STp->block_size) != 0) { + st_printk(KERN_WARNING, STp, + "Write not multiple of tape block size.\n"); + retval = (-EINVAL); + goto out; + } + + STm = &(STp->modes[STp->current_mode]); + STps = &(STp->ps[STp->partition]); + + if (STp->write_prot) { + retval = (-EACCES); + goto out; + } + + + if (STps->rw == ST_READING) { + retval = flush_buffer(STp, 0); + if (retval) + goto out; + STps->rw = ST_WRITING; + } else if (STps->rw != ST_WRITING && + STps->drv_file == 0 && STps->drv_block == 0) { + if ((retval = set_mode_densblk(STp, STm)) < 0) + goto out; + if (STm->default_compression != ST_DONT_TOUCH && + !(STp->compression_changed)) { + if (st_compression(STp, (STm->default_compression == ST_YES))) { + st_printk(KERN_WARNING, STp, + "Can't set default compression.\n"); + if (modes_defined) { + retval = (-EINVAL); + goto out; + } + } + } + } + + STbp = STp->buffer; + i = write_behind_check(STp); + if (i) { + if (i == -ENOSPC) + STps->eof = ST_EOM_OK; + else + STps->eof = ST_EOM_ERROR; + } + + if (STps->eof == ST_EOM_OK) { + STps->eof = ST_EOD_1; /* allow next write */ + retval = (-ENOSPC); + goto out; + } + else if (STps->eof == ST_EOM_ERROR) { + retval = (-EIO); + goto out; + } + + /* Check the buffer readability in cases where copy_user might catch + the problems after some tape movement. */ + if (STp->block_size != 0 && + !STbp->do_dio && + (copy_from_user(&i, buf, 1) != 0 || + copy_from_user(&i, buf + count - 1, 1) != 0)) { + retval = (-EFAULT); + goto out; + } + + retval = setup_buffering(STp, buf, count, 0); + if (retval) + goto out; + + total = count; + + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd[0] = WRITE_6; + cmd[1] = (STp->block_size != 0); + + STps->rw = ST_WRITING; + + b_point = buf; + while (count > 0 && !retry_eot) { + + if (STbp->do_dio) { + do_count = count; + } + else { + if (STp->block_size == 0) + do_count = count; + else { + do_count = STbp->buffer_blocks * STp->block_size - + STbp->buffer_bytes; + if (do_count > count) + do_count = count; + } + + i = append_to_buffer(b_point, STbp, do_count); + if (i) { + retval = i; + goto out; + } + } + count -= do_count; + b_point += do_count; + + async_write = STp->block_size == 0 && !STbp->do_dio && + STm->do_async_writes && STps->eof < ST_EOM_OK; + + if (STp->block_size != 0 && STm->do_buffer_writes && + !(STp->try_dio_now && try_wdio) && STps->eof < ST_EOM_OK && + STbp->buffer_bytes < STbp->buffer_size) { + STp->dirty = 1; + /* Don't write a buffer that is not full enough. */ + if (!async_write && count == 0) + break; + } + + retry_write: + if (STp->block_size == 0) + blks = transfer = do_count; + else { + if (!STbp->do_dio) + blks = STbp->buffer_bytes; + else + blks = do_count; + blks /= STp->block_size; + transfer = blks * STp->block_size; + } + cmd[2] = blks >> 16; + cmd[3] = blks >> 8; + cmd[4] = blks; + + SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE, + STp->device->request_queue->rq_timeout, + MAX_WRITE_RETRIES, !async_write); + if (!SRpnt) { + retval = STbp->syscall_result; + goto out; + } + if (async_write && !STbp->syscall_result) { + STbp->writing = transfer; + STp->dirty = !(STbp->writing == + STbp->buffer_bytes); + SRpnt = NULL; /* Prevent releasing this request! */ + DEB( STp->write_pending = 1; ) + break; + } + + if (STbp->syscall_result != 0) { + struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; + + DEBC_printk(STp, "Error on write:\n"); + if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) { + scode = cmdstatp->sense_hdr.sense_key; + if (cmdstatp->remainder_valid) + undone = (int)cmdstatp->uremainder64; + else if (STp->block_size == 0 && + scode == VOLUME_OVERFLOW) + undone = transfer; + else + undone = 0; + if (STp->block_size != 0) + undone *= STp->block_size; + if (undone <= do_count) { + /* Only data from this write is not written */ + count += undone; + b_point -= undone; + do_count -= undone; + if (STp->block_size) + blks = (transfer - undone) / STp->block_size; + STps->eof = ST_EOM_OK; + /* Continue in fixed block mode if all written + in this request but still something left to write + (retval left to zero) + */ + if (STp->block_size == 0 || + undone > 0 || count == 0) + retval = (-ENOSPC); /* EOM within current request */ + DEBC_printk(STp, "EOM with %d " + "bytes unwritten.\n", + (int)count); + } else { + /* EOT within data buffered earlier (possible only + in fixed block mode without direct i/o) */ + if (!retry_eot && !cmdstatp->deferred && + (scode == NO_SENSE || scode == RECOVERED_ERROR)) { + move_buffer_data(STp->buffer, transfer - undone); + retry_eot = 1; + if (STps->drv_block >= 0) { + STps->drv_block += (transfer - undone) / + STp->block_size; + } + STps->eof = ST_EOM_OK; + DEBC_printk(STp, "Retry " + "write of %d " + "bytes at EOM.\n", + STp->buffer->buffer_bytes); + goto retry_write; + } + else { + /* Either error within data buffered by driver or + failed retry */ + count -= do_count; + blks = do_count = 0; + STps->eof = ST_EOM_ERROR; + STps->drv_block = (-1); /* Too cautious? */ + retval = (-EIO); /* EOM for old data */ + DEBC_printk(STp, "EOM with " + "lost data.\n"); + } + } + } else { + count += do_count; + STps->drv_block = (-1); /* Too cautious? */ + retval = STbp->syscall_result; + } + + } + + if (STps->drv_block >= 0) { + if (STp->block_size == 0) + STps->drv_block += (do_count > 0); + else + STps->drv_block += blks; + } + + STbp->buffer_bytes = 0; + STp->dirty = 0; + + if (retval || retry_eot) { + if (count < total) + retval = total - count; + goto out; + } + } + + if (STps->eof == ST_EOD_1) + STps->eof = ST_EOM_OK; + else if (STps->eof != ST_EOM_OK) + STps->eof = ST_NOEOF; + retval = total - count; + + out: + if (SRpnt != NULL) + st_release_request(SRpnt); + release_buffering(STp, 0); + mutex_unlock(&STp->lock); + + return retval; +} + +/* Read data from the tape. Returns zero in the normal case, one if the + eof status has changed, and the negative error code in case of a + fatal error. Otherwise updates the buffer and the eof state. + + Does release user buffer mapping if it is set. +*/ +static long read_tape(struct scsi_tape *STp, long count, + struct st_request ** aSRpnt) +{ + int transfer, blks, bytes; + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + struct st_modedef *STm; + struct st_partstat *STps; + struct st_buffer *STbp; + int retval = 0; + + if (count == 0) + return 0; + + STm = &(STp->modes[STp->current_mode]); + STps = &(STp->ps[STp->partition]); + if (STps->eof == ST_FM_HIT) + return 1; + STbp = STp->buffer; + + if (STp->block_size == 0) + blks = bytes = count; + else { + if (!(STp->try_dio_now && try_rdio) && STm->do_read_ahead) { + blks = (STp->buffer)->buffer_blocks; + bytes = blks * STp->block_size; + } else { + bytes = count; + if (!STbp->do_dio && bytes > (STp->buffer)->buffer_size) + bytes = (STp->buffer)->buffer_size; + blks = bytes / STp->block_size; + bytes = blks * STp->block_size; + } + } + + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd[0] = READ_6; + cmd[1] = (STp->block_size != 0); + if (!cmd[1] && STp->sili) + cmd[1] |= 2; + cmd[2] = blks >> 16; + cmd[3] = blks >> 8; + cmd[4] = blks; + + SRpnt = *aSRpnt; + SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE, + STp->device->request_queue->rq_timeout, + MAX_RETRIES, 1); + release_buffering(STp, 1); + *aSRpnt = SRpnt; + if (!SRpnt) + return STbp->syscall_result; + + STbp->read_pointer = 0; + STps->at_sm = 0; + + /* Something to check */ + if (STbp->syscall_result) { + struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; + + retval = 1; + DEBC_printk(STp, + "Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n", + SRpnt->sense[0], SRpnt->sense[1], + SRpnt->sense[2], SRpnt->sense[3], + SRpnt->sense[4], SRpnt->sense[5], + SRpnt->sense[6], SRpnt->sense[7]); + if (cmdstatp->have_sense) { + + if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) + cmdstatp->flags &= 0xcf; /* No need for EOM in this case */ + + if (cmdstatp->flags != 0) { /* EOF, EOM, or ILI */ + /* Compute the residual count */ + if (cmdstatp->remainder_valid) + transfer = (int)cmdstatp->uremainder64; + else + transfer = 0; + if (cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) { + if (STp->block_size == 0) + transfer = bytes; + /* Some drives set ILI with MEDIUM ERROR */ + cmdstatp->flags &= ~SENSE_ILI; + } + + if (cmdstatp->flags & SENSE_ILI) { /* ILI */ + if (STp->block_size == 0 && + transfer < 0) { + st_printk(KERN_NOTICE, STp, + "Failed to read %d " + "byte block with %d " + "byte transfer.\n", + bytes - transfer, + bytes); + if (STps->drv_block >= 0) + STps->drv_block += 1; + STbp->buffer_bytes = 0; + return (-ENOMEM); + } else if (STp->block_size == 0) { + STbp->buffer_bytes = bytes - transfer; + } else { + st_release_request(SRpnt); + SRpnt = *aSRpnt = NULL; + if (transfer == blks) { /* We did not get anything, error */ + st_printk(KERN_NOTICE, STp, + "Incorrect " + "block size.\n"); + if (STps->drv_block >= 0) + STps->drv_block += blks - transfer + 1; + st_int_ioctl(STp, MTBSR, 1); + return (-EIO); + } + /* We have some data, deliver it */ + STbp->buffer_bytes = (blks - transfer) * + STp->block_size; + DEBC_printk(STp, "ILI but " + "enough data " + "received %ld " + "%d.\n", count, + STbp->buffer_bytes); + if (STps->drv_block >= 0) + STps->drv_block += 1; + if (st_int_ioctl(STp, MTBSR, 1)) + return (-EIO); + } + } else if (cmdstatp->flags & SENSE_FMK) { /* FM overrides EOM */ + if (STps->eof != ST_FM_HIT) + STps->eof = ST_FM_HIT; + else + STps->eof = ST_EOD_2; + if (STp->block_size == 0) + STbp->buffer_bytes = 0; + else + STbp->buffer_bytes = + bytes - transfer * STp->block_size; + DEBC_printk(STp, "EOF detected (%d " + "bytes read).\n", + STbp->buffer_bytes); + } else if (cmdstatp->flags & SENSE_EOM) { + if (STps->eof == ST_FM) + STps->eof = ST_EOD_1; + else + STps->eof = ST_EOM_OK; + if (STp->block_size == 0) + STbp->buffer_bytes = bytes - transfer; + else + STbp->buffer_bytes = + bytes - transfer * STp->block_size; + + DEBC_printk(STp, "EOM detected (%d " + "bytes read).\n", + STbp->buffer_bytes); + } + } + /* end of EOF, EOM, ILI test */ + else { /* nonzero sense key */ + DEBC_printk(STp, "Tape error while reading.\n"); + STps->drv_block = (-1); + if (STps->eof == ST_FM && + cmdstatp->sense_hdr.sense_key == BLANK_CHECK) { + DEBC_printk(STp, "Zero returned for " + "first BLANK CHECK " + "after EOF.\n"); + STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */ + } else /* Some other extended sense code */ + retval = (-EIO); + } + + if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */ + STbp->buffer_bytes = 0; + } + /* End of extended sense test */ + else { /* Non-extended sense */ + retval = STbp->syscall_result; + } + + } + /* End of error handling */ + else { /* Read successful */ + STbp->buffer_bytes = bytes; + if (STp->sili) /* In fixed block mode residual is always zero here */ + STbp->buffer_bytes -= STp->buffer->cmdstat.residual; + } + + if (STps->drv_block >= 0) { + if (STp->block_size == 0) + STps->drv_block++; + else + STps->drv_block += STbp->buffer_bytes / STp->block_size; + } + return retval; +} + + +/* Read command */ +static ssize_t +st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) +{ + ssize_t total; + ssize_t retval = 0; + ssize_t i, transfer; + int special, do_dio = 0; + struct st_request *SRpnt = NULL; + struct scsi_tape *STp = filp->private_data; + struct st_modedef *STm; + struct st_partstat *STps; + struct st_buffer *STbp = STp->buffer; + + if (mutex_lock_interruptible(&STp->lock)) + return -ERESTARTSYS; + + retval = rw_checks(STp, filp, count); + if (retval || count == 0) + goto out; + + STm = &(STp->modes[STp->current_mode]); + if (STp->block_size != 0 && (count % STp->block_size) != 0) { + if (!STm->do_read_ahead) { + retval = (-EINVAL); /* Read must be integral number of blocks */ + goto out; + } + STp->try_dio_now = 0; /* Direct i/o can't handle split blocks */ + } + + STps = &(STp->ps[STp->partition]); + if (STps->rw == ST_WRITING) { + retval = flush_buffer(STp, 0); + if (retval) + goto out; + STps->rw = ST_READING; + } + DEB( + if (debugging && STps->eof != ST_NOEOF) + st_printk(ST_DEB_MSG, STp, + "EOF/EOM flag up (%d). Bytes %d\n", + STps->eof, STbp->buffer_bytes); + ) /* end DEB */ + + retval = setup_buffering(STp, buf, count, 1); + if (retval) + goto out; + do_dio = STbp->do_dio; + + if (STbp->buffer_bytes == 0 && + STps->eof >= ST_EOD_1) { + if (STps->eof < ST_EOD) { + STps->eof += 1; + retval = 0; + goto out; + } + retval = (-EIO); /* EOM or Blank Check */ + goto out; + } + + if (do_dio) { + /* Check the buffer writability before any tape movement. Don't alter + buffer data. */ + if (copy_from_user(&i, buf, 1) != 0 || + copy_to_user(buf, &i, 1) != 0 || + copy_from_user(&i, buf + count - 1, 1) != 0 || + copy_to_user(buf + count - 1, &i, 1) != 0) { + retval = (-EFAULT); + goto out; + } + } + + STps->rw = ST_READING; + + + /* Loop until enough data in buffer or a special condition found */ + for (total = 0, special = 0; total < count && !special;) { + + /* Get new data if the buffer is empty */ + if (STbp->buffer_bytes == 0) { + special = read_tape(STp, count - total, &SRpnt); + if (special < 0) { /* No need to continue read */ + retval = special; + goto out; + } + } + + /* Move the data from driver buffer to user buffer */ + if (STbp->buffer_bytes > 0) { + DEB( + if (debugging && STps->eof != ST_NOEOF) + st_printk(ST_DEB_MSG, STp, + "EOF up (%d). Left %d, needed %d.\n", + STps->eof, STbp->buffer_bytes, + (int)(count - total)); + ) /* end DEB */ + transfer = STbp->buffer_bytes < count - total ? + STbp->buffer_bytes : count - total; + if (!do_dio) { + i = from_buffer(STbp, buf, transfer); + if (i) { + retval = i; + goto out; + } + } + buf += transfer; + total += transfer; + } + + if (STp->block_size == 0) + break; /* Read only one variable length block */ + + } /* for (total = 0, special = 0; + total < count && !special; ) */ + + /* Change the eof state if no data from tape or buffer */ + if (total == 0) { + if (STps->eof == ST_FM_HIT) { + STps->eof = ST_FM; + STps->drv_block = 0; + if (STps->drv_file >= 0) + STps->drv_file++; + } else if (STps->eof == ST_EOD_1) { + STps->eof = ST_EOD_2; + STps->drv_block = 0; + if (STps->drv_file >= 0) + STps->drv_file++; + } else if (STps->eof == ST_EOD_2) + STps->eof = ST_EOD; + } else if (STps->eof == ST_FM) + STps->eof = ST_NOEOF; + retval = total; + + out: + if (SRpnt != NULL) { + st_release_request(SRpnt); + SRpnt = NULL; + } + if (do_dio) { + release_buffering(STp, 1); + STbp->buffer_bytes = 0; + } + mutex_unlock(&STp->lock); + + return retval; +} + + + +DEB( +/* Set the driver options */ +static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm) +{ + if (debugging) { + st_printk(KERN_INFO, STp, + "Mode %d options: buffer writes: %d, " + "async writes: %d, read ahead: %d\n", + STp->current_mode, STm->do_buffer_writes, + STm->do_async_writes, STm->do_read_ahead); + st_printk(KERN_INFO, STp, + " can bsr: %d, two FMs: %d, " + "fast mteom: %d, auto lock: %d,\n", + STp->can_bsr, STp->two_fm, STp->fast_mteom, + STp->do_auto_lock); + st_printk(KERN_INFO, STp, + " defs for wr: %d, no block limits: %d, " + "partitions: %d, s2 log: %d\n", + STm->defaults_for_writes, STp->omit_blklims, + STp->can_partitions, STp->scsi2_logical); + st_printk(KERN_INFO, STp, + " sysv: %d nowait: %d sili: %d " + "nowait_filemark: %d\n", + STm->sysv, STp->immediate, STp->sili, + STp->immediate_filemark); + st_printk(KERN_INFO, STp, " debugging: %d\n", debugging); + } +} + ) + + +static int st_set_options(struct scsi_tape *STp, long options) +{ + int value; + long code; + struct st_modedef *STm; + struct cdev *cd0, *cd1; + struct device *d0, *d1; + + STm = &(STp->modes[STp->current_mode]); + if (!STm->defined) { + cd0 = STm->cdevs[0]; + cd1 = STm->cdevs[1]; + d0 = STm->devs[0]; + d1 = STm->devs[1]; + memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef)); + STm->cdevs[0] = cd0; + STm->cdevs[1] = cd1; + STm->devs[0] = d0; + STm->devs[1] = d1; + modes_defined = 1; + DEBC_printk(STp, "Initialized mode %d definition from mode 0\n", + STp->current_mode); + } + + code = options & MT_ST_OPTIONS; + if (code == MT_ST_BOOLEANS) { + STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0; + STm->do_async_writes = (options & MT_ST_ASYNC_WRITES) != 0; + STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0; + STm->do_read_ahead = (options & MT_ST_READ_AHEAD) != 0; + STp->two_fm = (options & MT_ST_TWO_FM) != 0; + STp->fast_mteom = (options & MT_ST_FAST_MTEOM) != 0; + STp->do_auto_lock = (options & MT_ST_AUTO_LOCK) != 0; + STp->can_bsr = (options & MT_ST_CAN_BSR) != 0; + STp->omit_blklims = (options & MT_ST_NO_BLKLIMS) != 0; + if ((STp->device)->scsi_level >= SCSI_2) + STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0; + STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; + STp->immediate = (options & MT_ST_NOWAIT) != 0; + STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0; + STm->sysv = (options & MT_ST_SYSV) != 0; + STp->sili = (options & MT_ST_SILI) != 0; + DEB( debugging = (options & MT_ST_DEBUGGING) != 0; + st_log_options(STp, STm); ) + } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) { + value = (code == MT_ST_SETBOOLEANS); + if ((options & MT_ST_BUFFER_WRITES) != 0) + STm->do_buffer_writes = value; + if ((options & MT_ST_ASYNC_WRITES) != 0) + STm->do_async_writes = value; + if ((options & MT_ST_DEF_WRITES) != 0) + STm->defaults_for_writes = value; + if ((options & MT_ST_READ_AHEAD) != 0) + STm->do_read_ahead = value; + if ((options & MT_ST_TWO_FM) != 0) + STp->two_fm = value; + if ((options & MT_ST_FAST_MTEOM) != 0) + STp->fast_mteom = value; + if ((options & MT_ST_AUTO_LOCK) != 0) + STp->do_auto_lock = value; + if ((options & MT_ST_CAN_BSR) != 0) + STp->can_bsr = value; + if ((options & MT_ST_NO_BLKLIMS) != 0) + STp->omit_blklims = value; + if ((STp->device)->scsi_level >= SCSI_2 && + (options & MT_ST_CAN_PARTITIONS) != 0) + STp->can_partitions = value; + if ((options & MT_ST_SCSI2LOGICAL) != 0) + STp->scsi2_logical = value; + if ((options & MT_ST_NOWAIT) != 0) + STp->immediate = value; + if ((options & MT_ST_NOWAIT_EOF) != 0) + STp->immediate_filemark = value; + if ((options & MT_ST_SYSV) != 0) + STm->sysv = value; + if ((options & MT_ST_SILI) != 0) + STp->sili = value; + DEB( + if ((options & MT_ST_DEBUGGING) != 0) + debugging = value; + st_log_options(STp, STm); ) + } else if (code == MT_ST_WRITE_THRESHOLD) { + /* Retained for compatibility */ + } else if (code == MT_ST_DEF_BLKSIZE) { + value = (options & ~MT_ST_OPTIONS); + if (value == ~MT_ST_OPTIONS) { + STm->default_blksize = (-1); + DEBC_printk(STp, "Default block size disabled.\n"); + } else { + STm->default_blksize = value; + DEBC_printk(STp,"Default block size set to " + "%d bytes.\n", STm->default_blksize); + if (STp->ready == ST_READY) { + STp->blksize_changed = 0; + set_mode_densblk(STp, STm); + } + } + } else if (code == MT_ST_TIMEOUTS) { + value = (options & ~MT_ST_OPTIONS); + if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) { + STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ; + DEBC_printk(STp, "Long timeout set to %d seconds.\n", + (value & ~MT_ST_SET_LONG_TIMEOUT)); + } else { + blk_queue_rq_timeout(STp->device->request_queue, + value * HZ); + DEBC_printk(STp, "Normal timeout set to %d seconds.\n", + value); + } + } else if (code == MT_ST_SET_CLN) { + value = (options & ~MT_ST_OPTIONS) & 0xff; + if (value != 0 && + (value < EXTENDED_SENSE_START || + value >= SCSI_SENSE_BUFFERSIZE)) + return (-EINVAL); + STp->cln_mode = value; + STp->cln_sense_mask = (options >> 8) & 0xff; + STp->cln_sense_value = (options >> 16) & 0xff; + st_printk(KERN_INFO, STp, + "Cleaning request mode %d, mask %02x, value %02x\n", + value, STp->cln_sense_mask, STp->cln_sense_value); + } else if (code == MT_ST_DEF_OPTIONS) { + code = (options & ~MT_ST_CLEAR_DEFAULT); + value = (options & MT_ST_CLEAR_DEFAULT); + if (code == MT_ST_DEF_DENSITY) { + if (value == MT_ST_CLEAR_DEFAULT) { + STm->default_density = (-1); + DEBC_printk(STp, + "Density default disabled.\n"); + } else { + STm->default_density = value & 0xff; + DEBC_printk(STp, "Density default set to %x\n", + STm->default_density); + if (STp->ready == ST_READY) { + STp->density_changed = 0; + set_mode_densblk(STp, STm); + } + } + } else if (code == MT_ST_DEF_DRVBUFFER) { + if (value == MT_ST_CLEAR_DEFAULT) { + STp->default_drvbuffer = 0xff; + DEBC_printk(STp, + "Drive buffer default disabled.\n"); + } else { + STp->default_drvbuffer = value & 7; + DEBC_printk(STp, + "Drive buffer default set to %x\n", + STp->default_drvbuffer); + if (STp->ready == ST_READY) + st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer); + } + } else if (code == MT_ST_DEF_COMPRESSION) { + if (value == MT_ST_CLEAR_DEFAULT) { + STm->default_compression = ST_DONT_TOUCH; + DEBC_printk(STp, + "Compression default disabled.\n"); + } else { + if ((value & 0xff00) != 0) { + STp->c_algo = (value & 0xff00) >> 8; + DEBC_printk(STp, "Compression " + "algorithm set to 0x%x.\n", + STp->c_algo); + } + if ((value & 0xff) != 0xff) { + STm->default_compression = (value & 1 ? ST_YES : ST_NO); + DEBC_printk(STp, "Compression default " + "set to %x\n", + (value & 1)); + if (STp->ready == ST_READY) { + STp->compression_changed = 0; + st_compression(STp, (STm->default_compression == ST_YES)); + } + } + } + } + } else + return (-EIO); + + return 0; +} + +#define MODE_HEADER_LENGTH 4 + +/* Mode header and page byte offsets */ +#define MH_OFF_DATA_LENGTH 0 +#define MH_OFF_MEDIUM_TYPE 1 +#define MH_OFF_DEV_SPECIFIC 2 +#define MH_OFF_BDESCS_LENGTH 3 +#define MP_OFF_PAGE_NBR 0 +#define MP_OFF_PAGE_LENGTH 1 + +/* Mode header and page bit masks */ +#define MH_BIT_WP 0x80 +#define MP_MSK_PAGE_NBR 0x3f + +/* Don't return block descriptors */ +#define MODE_SENSE_OMIT_BDESCS 0x08 + +#define MODE_SELECT_PAGE_FORMAT 0x10 + +/* Read a mode page into the tape buffer. The block descriptors are included + if incl_block_descs is true. The page control is ored to the page number + parameter, if necessary. */ +static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs) +{ + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd[0] = MODE_SENSE; + if (omit_block_descs) + cmd[1] = MODE_SENSE_OMIT_BDESCS; + cmd[2] = page; + cmd[4] = 255; + + SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_FROM_DEVICE, + STp->device->request_queue->rq_timeout, 0, 1); + if (SRpnt == NULL) + return (STp->buffer)->syscall_result; + + st_release_request(SRpnt); + + return STp->buffer->syscall_result; +} + + +/* Send the mode page in the tape buffer to the drive. Assumes that the mode data + in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */ +static int write_mode_page(struct scsi_tape *STp, int page, int slow) +{ + int pgo; + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + int timeout; + + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd[0] = MODE_SELECT; + cmd[1] = MODE_SELECT_PAGE_FORMAT; + pgo = MODE_HEADER_LENGTH + (STp->buffer)->b_data[MH_OFF_BDESCS_LENGTH]; + cmd[4] = pgo + (STp->buffer)->b_data[pgo + MP_OFF_PAGE_LENGTH] + 2; + + /* Clear reserved fields */ + (STp->buffer)->b_data[MH_OFF_DATA_LENGTH] = 0; + (STp->buffer)->b_data[MH_OFF_MEDIUM_TYPE] = 0; + (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP; + (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; + + timeout = slow ? + STp->long_timeout : STp->device->request_queue->rq_timeout; + SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_TO_DEVICE, + timeout, 0, 1); + if (SRpnt == NULL) + return (STp->buffer)->syscall_result; + + st_release_request(SRpnt); + + return STp->buffer->syscall_result; +} + + +#define COMPRESSION_PAGE 0x0f +#define COMPRESSION_PAGE_LENGTH 16 + +#define CP_OFF_DCE_DCC 2 +#define CP_OFF_C_ALGO 7 + +#define DCE_MASK 0x80 +#define DCC_MASK 0x40 +#define RED_MASK 0x60 + + +/* Control the compression with mode page 15. Algorithm not changed if zero. + + The block descriptors are read and written because Sony SDT-7000 does not + work without this (suggestion from Michael Schaefer ). + Including block descriptors should not cause any harm to other drives. */ + +static int st_compression(struct scsi_tape * STp, int state) +{ + int retval; + int mpoffs; /* Offset to mode page start */ + unsigned char *b_data = (STp->buffer)->b_data; + + if (STp->ready != ST_READY) + return (-EIO); + + /* Read the current page contents */ + retval = read_mode_page(STp, COMPRESSION_PAGE, 0); + if (retval) { + DEBC_printk(STp, "Compression mode page not supported.\n"); + return (-EIO); + } + + mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH]; + DEBC_printk(STp, "Compression state is %d.\n", + (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0)); + + /* Check if compression can be changed */ + if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) { + DEBC_printk(STp, "Compression not supported.\n"); + return (-EIO); + } + + /* Do the change */ + if (state) { + b_data[mpoffs + CP_OFF_DCE_DCC] |= DCE_MASK; + if (STp->c_algo != 0) + b_data[mpoffs + CP_OFF_C_ALGO] = STp->c_algo; + } + else { + b_data[mpoffs + CP_OFF_DCE_DCC] &= ~DCE_MASK; + if (STp->c_algo != 0) + b_data[mpoffs + CP_OFF_C_ALGO] = 0; /* no compression */ + } + + retval = write_mode_page(STp, COMPRESSION_PAGE, 0); + if (retval) { + DEBC_printk(STp, "Compression change failed.\n"); + return (-EIO); + } + DEBC_printk(STp, "Compression state changed to %d.\n", state); + + STp->compression_changed = 1; + return 0; +} + + +/* Process the load and unload commands (does unload if the load code is zero) */ +static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code) +{ + int retval = (-EIO), timeout; + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_partstat *STps; + struct st_request *SRpnt; + + if (STp->ready != ST_READY && !load_code) { + if (STp->ready == ST_NO_TAPE) + return (-ENOMEDIUM); + else + return (-EIO); + } + + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd[0] = START_STOP; + if (load_code) + cmd[4] |= 1; + /* + * If arg >= 1 && arg <= 6 Enhanced load/unload in HP C1553A + */ + if (load_code >= 1 + MT_ST_HPLOADER_OFFSET + && load_code <= 6 + MT_ST_HPLOADER_OFFSET) { + DEBC_printk(STp, " Enhanced %sload slot %2d.\n", + (cmd[4]) ? "" : "un", + load_code - MT_ST_HPLOADER_OFFSET); + cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */ + } + if (STp->immediate) { + cmd[1] = 1; /* Don't wait for completion */ + timeout = STp->device->request_queue->rq_timeout; + } + else + timeout = STp->long_timeout; + + DEBC( + if (!load_code) + st_printk(ST_DEB_MSG, STp, "Unloading tape.\n"); + else + st_printk(ST_DEB_MSG, STp, "Loading tape.\n"); + ); + + SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, + timeout, MAX_RETRIES, 1); + if (!SRpnt) + return (STp->buffer)->syscall_result; + + retval = (STp->buffer)->syscall_result; + st_release_request(SRpnt); + + if (!retval) { /* SCSI command successful */ + + if (!load_code) { + STp->rew_at_close = 0; + STp->ready = ST_NO_TAPE; + } + else { + STp->rew_at_close = STp->autorew_dev; + retval = check_tape(STp, filp); + if (retval > 0) + retval = 0; + } + } + else { + STps = &(STp->ps[STp->partition]); + STps->drv_file = STps->drv_block = (-1); + } + + return retval; +} + +#if DEBUG +#define ST_DEB_FORWARD 0 +#define ST_DEB_BACKWARD 1 +static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) +{ + s32 sc; + + if (!debugging) + return; + + sc = sign_extend32(get_unaligned_be24(&cmd[2]), 23); + if (direction) + sc = -sc; + st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n", + direction ? "backward" : "forward", sc, units); +} +#else +#define ST_DEB_FORWARD 0 +#define ST_DEB_BACKWARD 1 +static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) {} +#endif + + +/* Internal ioctl function */ +static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned long arg) +{ + int timeout; + long ltmp; + int ioctl_result; + int chg_eof = 1; + unsigned char cmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + struct st_partstat *STps; + int fileno, blkno, at_sm, undone; + int datalen = 0, direction = DMA_NONE; + + WARN_ON(STp->buffer->do_dio != 0); + if (STp->ready != ST_READY) { + if (STp->ready == ST_NO_TAPE) + return (-ENOMEDIUM); + else + return (-EIO); + } + timeout = STp->long_timeout; + STps = &(STp->ps[STp->partition]); + fileno = STps->drv_file; + blkno = STps->drv_block; + at_sm = STps->at_sm; + + memset(cmd, 0, MAX_COMMAND_SIZE); + switch (cmd_in) { + case MTFSFM: + chg_eof = 0; /* Changed from the FSF after this */ + fallthrough; + case MTFSF: + cmd[0] = SPACE; + cmd[1] = 0x01; /* Space FileMarks */ + cmd[2] = (arg >> 16); + cmd[3] = (arg >> 8); + cmd[4] = arg; + deb_space_print(STp, ST_DEB_FORWARD, "filemarks", cmd); + if (fileno >= 0) + fileno += arg; + blkno = 0; + at_sm &= (arg == 0); + break; + case MTBSFM: + chg_eof = 0; /* Changed from the FSF after this */ + fallthrough; + case MTBSF: + cmd[0] = SPACE; + cmd[1] = 0x01; /* Space FileMarks */ + ltmp = (-arg); + cmd[2] = (ltmp >> 16); + cmd[3] = (ltmp >> 8); + cmd[4] = ltmp; + deb_space_print(STp, ST_DEB_BACKWARD, "filemarks", cmd); + if (fileno >= 0) + fileno -= arg; + blkno = (-1); /* We can't know the block number */ + at_sm &= (arg == 0); + break; + case MTFSR: + cmd[0] = SPACE; + cmd[1] = 0x00; /* Space Blocks */ + cmd[2] = (arg >> 16); + cmd[3] = (arg >> 8); + cmd[4] = arg; + deb_space_print(STp, ST_DEB_FORWARD, "blocks", cmd); + if (blkno >= 0) + blkno += arg; + at_sm &= (arg == 0); + break; + case MTBSR: + cmd[0] = SPACE; + cmd[1] = 0x00; /* Space Blocks */ + ltmp = (-arg); + cmd[2] = (ltmp >> 16); + cmd[3] = (ltmp >> 8); + cmd[4] = ltmp; + deb_space_print(STp, ST_DEB_BACKWARD, "blocks", cmd); + if (blkno >= 0) + blkno -= arg; + at_sm &= (arg == 0); + break; + case MTFSS: + cmd[0] = SPACE; + cmd[1] = 0x04; /* Space Setmarks */ + cmd[2] = (arg >> 16); + cmd[3] = (arg >> 8); + cmd[4] = arg; + deb_space_print(STp, ST_DEB_FORWARD, "setmarks", cmd); + if (arg != 0) { + blkno = fileno = (-1); + at_sm = 1; + } + break; + case MTBSS: + cmd[0] = SPACE; + cmd[1] = 0x04; /* Space Setmarks */ + ltmp = (-arg); + cmd[2] = (ltmp >> 16); + cmd[3] = (ltmp >> 8); + cmd[4] = ltmp; + deb_space_print(STp, ST_DEB_BACKWARD, "setmarks", cmd); + if (arg != 0) { + blkno = fileno = (-1); + at_sm = 1; + } + break; + case MTWEOF: + case MTWEOFI: + case MTWSM: + if (STp->write_prot) + return (-EACCES); + cmd[0] = WRITE_FILEMARKS; + if (cmd_in == MTWSM) + cmd[1] = 2; + if (cmd_in == MTWEOFI || + (cmd_in == MTWEOF && STp->immediate_filemark)) + cmd[1] |= 1; + cmd[2] = (arg >> 16); + cmd[3] = (arg >> 8); + cmd[4] = arg; + timeout = STp->device->request_queue->rq_timeout; + DEBC( + if (cmd_in != MTWSM) + st_printk(ST_DEB_MSG, STp, + "Writing %d filemarks.\n", + cmd[2] * 65536 + + cmd[3] * 256 + + cmd[4]); + else + st_printk(ST_DEB_MSG, STp, + "Writing %d setmarks.\n", + cmd[2] * 65536 + + cmd[3] * 256 + + cmd[4]); + ) + if (fileno >= 0) + fileno += arg; + blkno = 0; + at_sm = (cmd_in == MTWSM); + break; + case MTREW: + cmd[0] = REZERO_UNIT; + if (STp->immediate) { + cmd[1] = 1; /* Don't wait for completion */ + timeout = STp->device->request_queue->rq_timeout; + } + DEBC_printk(STp, "Rewinding tape.\n"); + fileno = blkno = at_sm = 0; + break; + case MTNOP: + DEBC_printk(STp, "No op on tape.\n"); + return 0; /* Should do something ? */ + case MTRETEN: + cmd[0] = START_STOP; + if (STp->immediate) { + cmd[1] = 1; /* Don't wait for completion */ + timeout = STp->device->request_queue->rq_timeout; + } + cmd[4] = 3; + DEBC_printk(STp, "Retensioning tape.\n"); + fileno = blkno = at_sm = 0; + break; + case MTEOM: + if (!STp->fast_mteom) { + /* space to the end of tape */ + ioctl_result = st_int_ioctl(STp, MTFSF, 0x7fffff); + fileno = STps->drv_file; + if (STps->eof >= ST_EOD_1) + return 0; + /* The next lines would hide the number of spaced FileMarks + That's why I inserted the previous lines. I had no luck + with detecting EOM with FSF, so we go now to EOM. + Joerg Weule */ + } else + fileno = (-1); + cmd[0] = SPACE; + cmd[1] = 3; + DEBC_printk(STp, "Spacing to end of recorded medium.\n"); + blkno = -1; + at_sm = 0; + break; + case MTERASE: + if (STp->write_prot) + return (-EACCES); + cmd[0] = ERASE; + cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */ + if (STp->immediate) { + cmd[1] |= 2; /* Don't wait for completion */ + timeout = STp->device->request_queue->rq_timeout; + } + else + timeout = STp->long_timeout * 8; + + DEBC_printk(STp, "Erasing tape.\n"); + fileno = blkno = at_sm = 0; + break; + case MTSETBLK: /* Set block length */ + case MTSETDENSITY: /* Set tape density */ + case MTSETDRVBUFFER: /* Set drive buffering */ + case SET_DENS_AND_BLK: /* Set density and block size */ + chg_eof = 0; + if (STp->dirty || (STp->buffer)->buffer_bytes != 0) + return (-EIO); /* Not allowed if data in buffer */ + if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) && + (arg & MT_ST_BLKSIZE_MASK) != 0 && + STp->max_block > 0 && + ((arg & MT_ST_BLKSIZE_MASK) < STp->min_block || + (arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) { + st_printk(KERN_WARNING, STp, "Illegal block size.\n"); + return (-EINVAL); + } + cmd[0] = MODE_SELECT; + if ((STp->use_pf & USE_PF)) + cmd[1] = MODE_SELECT_PAGE_FORMAT; + cmd[4] = datalen = 12; + direction = DMA_TO_DEVICE; + + memset((STp->buffer)->b_data, 0, 12); + if (cmd_in == MTSETDRVBUFFER) + (STp->buffer)->b_data[2] = (arg & 7) << 4; + else + (STp->buffer)->b_data[2] = + STp->drv_buffer << 4; + (STp->buffer)->b_data[3] = 8; /* block descriptor length */ + if (cmd_in == MTSETDENSITY) { + (STp->buffer)->b_data[4] = arg; + STp->density_changed = 1; /* At least we tried ;-) */ + } else if (cmd_in == SET_DENS_AND_BLK) + (STp->buffer)->b_data[4] = arg >> 24; + else + (STp->buffer)->b_data[4] = STp->density; + if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { + ltmp = arg & MT_ST_BLKSIZE_MASK; + if (cmd_in == MTSETBLK) + STp->blksize_changed = 1; /* At least we tried ;-) */ + } else + ltmp = STp->block_size; + (STp->buffer)->b_data[9] = (ltmp >> 16); + (STp->buffer)->b_data[10] = (ltmp >> 8); + (STp->buffer)->b_data[11] = ltmp; + timeout = STp->device->request_queue->rq_timeout; + DEBC( + if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) + st_printk(ST_DEB_MSG, STp, + "Setting block size to %d bytes.\n", + (STp->buffer)->b_data[9] * 65536 + + (STp->buffer)->b_data[10] * 256 + + (STp->buffer)->b_data[11]); + if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK) + st_printk(ST_DEB_MSG, STp, + "Setting density code to %x.\n", + (STp->buffer)->b_data[4]); + if (cmd_in == MTSETDRVBUFFER) + st_printk(ST_DEB_MSG, STp, + "Setting drive buffer code to %d.\n", + ((STp->buffer)->b_data[2] >> 4) & 7); + ) + break; + default: + return (-ENOSYS); + } + + SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction, + timeout, MAX_RETRIES, 1); + if (!SRpnt) + return (STp->buffer)->syscall_result; + + ioctl_result = (STp->buffer)->syscall_result; + + if (!ioctl_result) { /* SCSI command successful */ + st_release_request(SRpnt); + SRpnt = NULL; + STps->drv_block = blkno; + STps->drv_file = fileno; + STps->at_sm = at_sm; + + if (cmd_in == MTBSFM) + ioctl_result = st_int_ioctl(STp, MTFSF, 1); + else if (cmd_in == MTFSFM) + ioctl_result = st_int_ioctl(STp, MTBSF, 1); + + if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { + STp->block_size = arg & MT_ST_BLKSIZE_MASK; + if (STp->block_size != 0) { + (STp->buffer)->buffer_blocks = + (STp->buffer)->buffer_size / STp->block_size; + } + (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; + if (cmd_in == SET_DENS_AND_BLK) + STp->density = arg >> MT_ST_DENSITY_SHIFT; + } else if (cmd_in == MTSETDRVBUFFER) + STp->drv_buffer = (arg & 7); + else if (cmd_in == MTSETDENSITY) + STp->density = arg; + + if (cmd_in == MTEOM) + STps->eof = ST_EOD; + else if (cmd_in == MTFSF) + STps->eof = ST_FM; + else if (chg_eof) + STps->eof = ST_NOEOF; + + if (cmd_in == MTWEOF || cmd_in == MTWEOFI) + STps->rw = ST_IDLE; /* prevent automatic WEOF at close */ + } else { /* SCSI command was not completely successful. Don't return + from this block without releasing the SCSI command block! */ + struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; + + if (cmdstatp->flags & SENSE_EOM) { + if (cmd_in != MTBSF && cmd_in != MTBSFM && + cmd_in != MTBSR && cmd_in != MTBSS) + STps->eof = ST_EOM_OK; + STps->drv_block = 0; + } + + if (cmdstatp->remainder_valid) + undone = (int)cmdstatp->uremainder64; + else + undone = 0; + + if ((cmd_in == MTWEOF || cmd_in == MTWEOFI) && + cmdstatp->have_sense && + (cmdstatp->flags & SENSE_EOM)) { + if (cmdstatp->sense_hdr.sense_key == NO_SENSE || + cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) { + ioctl_result = 0; /* EOF(s) written successfully at EOM */ + STps->eof = ST_NOEOF; + } else { /* Writing EOF(s) failed */ + if (fileno >= 0) + fileno -= undone; + if (undone < arg) + STps->eof = ST_NOEOF; + } + STps->drv_file = fileno; + } else if ((cmd_in == MTFSF) || (cmd_in == MTFSFM)) { + if (fileno >= 0) + STps->drv_file = fileno - undone; + else + STps->drv_file = fileno; + STps->drv_block = -1; + STps->eof = ST_NOEOF; + } else if ((cmd_in == MTBSF) || (cmd_in == MTBSFM)) { + if (arg > 0 && undone < 0) /* Some drives get this wrong */ + undone = (-undone); + if (STps->drv_file >= 0) + STps->drv_file = fileno + undone; + STps->drv_block = 0; + STps->eof = ST_NOEOF; + } else if (cmd_in == MTFSR) { + if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */ + if (STps->drv_file >= 0) + STps->drv_file++; + STps->drv_block = 0; + STps->eof = ST_FM; + } else { + if (blkno >= undone) + STps->drv_block = blkno - undone; + else + STps->drv_block = (-1); + STps->eof = ST_NOEOF; + } + } else if (cmd_in == MTBSR) { + if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */ + STps->drv_file--; + STps->drv_block = (-1); + } else { + if (arg > 0 && undone < 0) /* Some drives get this wrong */ + undone = (-undone); + if (STps->drv_block >= 0) + STps->drv_block = blkno + undone; + } + STps->eof = ST_NOEOF; + } else if (cmd_in == MTEOM) { + STps->drv_file = (-1); + STps->drv_block = (-1); + STps->eof = ST_EOD; + } else if (cmd_in == MTSETBLK || + cmd_in == MTSETDENSITY || + cmd_in == MTSETDRVBUFFER || + cmd_in == SET_DENS_AND_BLK) { + if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST && + !(STp->use_pf & PF_TESTED)) { + /* Try the other possible state of Page Format if not + already tried */ + STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; + st_release_request(SRpnt); + SRpnt = NULL; + return st_int_ioctl(STp, cmd_in, arg); + } + } else if (chg_eof) + STps->eof = ST_NOEOF; + + if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) + STps->eof = ST_EOD; + + st_release_request(SRpnt); + SRpnt = NULL; + } + + return ioctl_result; +} + + +/* Get the tape position. If bt == 2, arg points into a kernel space mt_loc + structure. */ + +static int get_location(struct scsi_tape *STp, unsigned int *block, int *partition, + int logical) +{ + int result; + unsigned char scmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + + if (STp->ready != ST_READY) + return (-EIO); + + memset(scmd, 0, MAX_COMMAND_SIZE); + if ((STp->device)->scsi_level < SCSI_2) { + scmd[0] = QFA_REQUEST_BLOCK; + scmd[4] = 3; + } else { + scmd[0] = READ_POSITION; + if (!logical && !STp->scsi2_logical) + scmd[1] = 1; + } + SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE, + STp->device->request_queue->rq_timeout, + MAX_READY_RETRIES, 1); + if (!SRpnt) + return (STp->buffer)->syscall_result; + + if ((STp->buffer)->syscall_result != 0 || + (STp->device->scsi_level >= SCSI_2 && + ((STp->buffer)->b_data[0] & 4) != 0)) { + *block = *partition = 0; + DEBC_printk(STp, " Can't read tape position.\n"); + result = (-EIO); + } else { + result = 0; + if ((STp->device)->scsi_level < SCSI_2) { + *block = ((STp->buffer)->b_data[0] << 16) + + ((STp->buffer)->b_data[1] << 8) + + (STp->buffer)->b_data[2]; + *partition = 0; + } else { + *block = ((STp->buffer)->b_data[4] << 24) + + ((STp->buffer)->b_data[5] << 16) + + ((STp->buffer)->b_data[6] << 8) + + (STp->buffer)->b_data[7]; + *partition = (STp->buffer)->b_data[1]; + if (((STp->buffer)->b_data[0] & 0x80) && + (STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */ + STp->ps[0].drv_block = STp->ps[0].drv_file = 0; + } + DEBC_printk(STp, "Got tape pos. blk %d part %d.\n", + *block, *partition); + } + st_release_request(SRpnt); + SRpnt = NULL; + + return result; +} + + +/* Set the tape block and partition. Negative partition means that only the + block should be set in vendor specific way. */ +static int set_location(struct scsi_tape *STp, unsigned int block, int partition, + int logical) +{ + struct st_partstat *STps; + int result, p; + unsigned int blk; + int timeout; + unsigned char scmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + + if (STp->ready != ST_READY) + return (-EIO); + timeout = STp->long_timeout; + STps = &(STp->ps[STp->partition]); + + DEBC_printk(STp, "Setting block to %d and partition to %d.\n", + block, partition); + DEB(if (partition < 0) + return (-EIO); ) + + /* Update the location at the partition we are leaving */ + if ((!STp->can_partitions && partition != 0) || + partition >= ST_NBR_PARTITIONS) + return (-EINVAL); + if (partition != STp->partition) { + if (get_location(STp, &blk, &p, 1)) + STps->last_block_valid = 0; + else { + STps->last_block_valid = 1; + STps->last_block_visited = blk; + DEBC_printk(STp, "Visited block %d for " + "partition %d saved.\n", + blk, STp->partition); + } + } + + memset(scmd, 0, MAX_COMMAND_SIZE); + if ((STp->device)->scsi_level < SCSI_2) { + scmd[0] = QFA_SEEK_BLOCK; + scmd[2] = (block >> 16); + scmd[3] = (block >> 8); + scmd[4] = block; + scmd[5] = 0; + } else { + scmd[0] = SEEK_10; + scmd[3] = (block >> 24); + scmd[4] = (block >> 16); + scmd[5] = (block >> 8); + scmd[6] = block; + if (!logical && !STp->scsi2_logical) + scmd[1] = 4; + if (STp->partition != partition) { + scmd[1] |= 2; + scmd[8] = partition; + DEBC_printk(STp, "Trying to change partition " + "from %d to %d\n", STp->partition, + partition); + } + } + if (STp->immediate) { + scmd[1] |= 1; /* Don't wait for completion */ + timeout = STp->device->request_queue->rq_timeout; + } + + SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, + timeout, MAX_READY_RETRIES, 1); + if (!SRpnt) + return (STp->buffer)->syscall_result; + + STps->drv_block = STps->drv_file = (-1); + STps->eof = ST_NOEOF; + if ((STp->buffer)->syscall_result != 0) { + result = (-EIO); + if (STp->can_partitions && + (STp->device)->scsi_level >= SCSI_2 && + (p = find_partition(STp)) >= 0) + STp->partition = p; + } else { + if (STp->can_partitions) { + STp->partition = partition; + STps = &(STp->ps[partition]); + if (!STps->last_block_valid || + STps->last_block_visited != block) { + STps->at_sm = 0; + STps->rw = ST_IDLE; + } + } else + STps->at_sm = 0; + if (block == 0) + STps->drv_block = STps->drv_file = 0; + result = 0; + } + + st_release_request(SRpnt); + SRpnt = NULL; + + return result; +} + + +/* Find the current partition number for the drive status. Called from open and + returns either partition number of negative error code. */ +static int find_partition(struct scsi_tape *STp) +{ + int i, partition; + unsigned int block; + + if ((i = get_location(STp, &block, &partition, 1)) < 0) + return i; + if (partition >= ST_NBR_PARTITIONS) + return (-EIO); + return partition; +} + + +/* Change the partition if necessary */ +static int switch_partition(struct scsi_tape *STp) +{ + struct st_partstat *STps; + + if (STp->partition == STp->new_partition) + return 0; + STps = &(STp->ps[STp->new_partition]); + if (!STps->last_block_valid) + STps->last_block_visited = 0; + return set_location(STp, STps->last_block_visited, STp->new_partition, 1); +} + +/* Functions for reading and writing the medium partition mode page. */ + +#define PART_PAGE 0x11 +#define PART_PAGE_FIXED_LENGTH 8 + +#define PP_OFF_MAX_ADD_PARTS 2 +#define PP_OFF_NBR_ADD_PARTS 3 +#define PP_OFF_FLAGS 4 +#define PP_OFF_PART_UNITS 6 +#define PP_OFF_RESERVED 7 + +#define PP_BIT_IDP 0x20 +#define PP_BIT_FDP 0x80 +#define PP_MSK_PSUM_MB 0x10 +#define PP_MSK_PSUM_UNITS 0x18 +#define PP_MSK_POFM 0x04 + +/* Get the number of partitions on the tape. As a side effect reads the + mode page into the tape buffer. */ +static int nbr_partitions(struct scsi_tape *STp) +{ + int result; + + if (STp->ready != ST_READY) + return (-EIO); + + result = read_mode_page(STp, PART_PAGE, 1); + + if (result) { + DEBC_printk(STp, "Can't read medium partition page.\n"); + result = (-EIO); + } else { + result = (STp->buffer)->b_data[MODE_HEADER_LENGTH + + PP_OFF_NBR_ADD_PARTS] + 1; + DEBC_printk(STp, "Number of partitions %d.\n", result); + } + + return result; +} + + +static int format_medium(struct scsi_tape *STp, int format) +{ + int result = 0; + int timeout = STp->long_timeout; + unsigned char scmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + + memset(scmd, 0, MAX_COMMAND_SIZE); + scmd[0] = FORMAT_UNIT; + scmd[2] = format; + if (STp->immediate) { + scmd[1] |= 1; /* Don't wait for completion */ + timeout = STp->device->request_queue->rq_timeout; + } + DEBC_printk(STp, "Sending FORMAT MEDIUM\n"); + SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, + timeout, MAX_RETRIES, 1); + if (!SRpnt) + result = STp->buffer->syscall_result; + return result; +} + + +/* Partition the tape into two partitions if size > 0 or one partition if + size == 0. + + The block descriptors are read and written because Sony SDT-7000 does not + work without this (suggestion from Michael Schaefer ). + + My HP C1533A drive returns only one partition size field. This is used to + set the size of partition 1. There is no size field for the default partition. + Michael Schaefer's Sony SDT-7000 returns two descriptors and the second is + used to set the size of partition 1 (this is what the SCSI-3 standard specifies). + The following algorithm is used to accommodate both drives: if the number of + partition size fields is greater than the maximum number of additional partitions + in the mode page, the second field is used. Otherwise the first field is used. + + For Seagate DDS drives the page length must be 8 when no partitions is defined + and 10 when 1 partition is defined (information from Eric Lee Green). This is + is acceptable also to some other old drives and enforced if the first partition + size field is used for the first additional partition size. + + For drives that advertize SCSI-3 or newer, use the SSC-3 methods. + */ +static int partition_tape(struct scsi_tape *STp, int size) +{ + int result; + int target_partition; + bool scsi3 = STp->device->scsi_level >= SCSI_3, needs_format = false; + int pgo, psd_cnt, psdo; + int psum = PP_MSK_PSUM_MB, units = 0; + unsigned char *bp; + + result = read_mode_page(STp, PART_PAGE, 0); + if (result) { + DEBC_printk(STp, "Can't read partition mode page.\n"); + return result; + } + target_partition = 1; + if (size < 0) { + target_partition = 0; + size = -size; + } + + /* The mode page is in the buffer. Let's modify it and write it. */ + bp = (STp->buffer)->b_data; + pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH]; + DEBC_printk(STp, "Partition page length is %d bytes.\n", + bp[pgo + MP_OFF_PAGE_LENGTH] + 2); + + psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2; + + if (scsi3) { + needs_format = (bp[pgo + PP_OFF_FLAGS] & PP_MSK_POFM) != 0; + if (needs_format && size == 0) { + /* No need to write the mode page when clearing + * partitioning + */ + DEBC_printk(STp, "Formatting tape with one partition.\n"); + result = format_medium(STp, 0); + goto out; + } + if (needs_format) /* Leave the old value for HP DATs claiming SCSI_3 */ + psd_cnt = 2; + if ((bp[pgo + PP_OFF_FLAGS] & PP_MSK_PSUM_UNITS) == PP_MSK_PSUM_UNITS) { + /* Use units scaling for large partitions if the device + * suggests it and no precision lost. Required for IBM + * TS1140/50 drives that don't support MB units. + */ + if (size >= 1000 && (size % 1000) == 0) { + size /= 1000; + psum = PP_MSK_PSUM_UNITS; + units = 9; /* GB */ + } + } + /* Try it anyway if too large to specify in MB */ + if (psum == PP_MSK_PSUM_MB && size >= 65534) { + size /= 1000; + psum = PP_MSK_PSUM_UNITS; + units = 9; /* GB */ + } + } + + if (size >= 65535 || /* Does not fit into two bytes */ + (target_partition == 0 && psd_cnt < 2)) { + result = -EINVAL; + goto out; + } + + psdo = pgo + PART_PAGE_FIXED_LENGTH; + /* The second condition is for HP DDS which use only one partition size + * descriptor + */ + if (target_partition > 0 && + (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS] || + bp[pgo + PP_OFF_MAX_ADD_PARTS] != 1)) { + bp[psdo] = bp[psdo + 1] = 0xff; /* Rest to partition 0 */ + psdo += 2; + } + memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2); + + DEBC_printk(STp, "psd_cnt %d, max.parts %d, nbr_parts %d\n", + psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS], + bp[pgo + PP_OFF_NBR_ADD_PARTS]); + + if (size == 0) { + bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0; + if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS]) + bp[pgo + MP_OFF_PAGE_LENGTH] = 6; + DEBC_printk(STp, "Formatting tape with one partition.\n"); + } else { + bp[psdo] = (size >> 8) & 0xff; + bp[psdo + 1] = size & 0xff; + if (target_partition == 0) + bp[psdo + 2] = bp[psdo + 3] = 0xff; + bp[pgo + 3] = 1; + if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8) + bp[pgo + MP_OFF_PAGE_LENGTH] = 8; + DEBC_printk(STp, + "Formatting tape with two partitions (%i = %d MB).\n", + target_partition, units > 0 ? size * 1000 : size); + } + bp[pgo + PP_OFF_PART_UNITS] = 0; + bp[pgo + PP_OFF_RESERVED] = 0; + if (size != 1 || units != 0) { + bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | psum | + (bp[pgo + PP_OFF_FLAGS] & 0x07); + bp[pgo + PP_OFF_PART_UNITS] = units; + } else + bp[pgo + PP_OFF_FLAGS] = PP_BIT_FDP | + (bp[pgo + PP_OFF_FLAGS] & 0x1f); + bp[pgo + MP_OFF_PAGE_LENGTH] = 6 + psd_cnt * 2; + + result = write_mode_page(STp, PART_PAGE, 1); + + if (!result && needs_format) + result = format_medium(STp, 1); + + if (result) { + st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n"); + result = (-EIO); + } + +out: + return result; +} + + + +/* The ioctl command */ +static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) +{ + void __user *p = (void __user *)arg; + int i, cmd_nr, cmd_type, bt; + int retval = 0; + unsigned int blk; + struct scsi_tape *STp = file->private_data; + struct st_modedef *STm; + struct st_partstat *STps; + + if (mutex_lock_interruptible(&STp->lock)) + return -ERESTARTSYS; + + DEB( + if (debugging && !STp->in_use) { + st_printk(ST_DEB_MSG, STp, "Incorrect device.\n"); + retval = (-EIO); + goto out; + } ) /* end DEB */ + + STm = &(STp->modes[STp->current_mode]); + STps = &(STp->ps[STp->partition]); + + /* + * If we are in the middle of error recovery, don't let anyone + * else try and use this device. Also, if error recovery fails, it + * may try and take the device offline, in which case all further + * access to the device is prohibited. + */ + retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in, + file->f_flags & O_NDELAY); + if (retval) + goto out; + + cmd_type = _IOC_TYPE(cmd_in); + cmd_nr = _IOC_NR(cmd_in); + + if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) { + struct mtop mtc; + + if (_IOC_SIZE(cmd_in) != sizeof(mtc)) { + retval = (-EINVAL); + goto out; + } + + i = copy_from_user(&mtc, p, sizeof(struct mtop)); + if (i) { + retval = (-EFAULT); + goto out; + } + + if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) { + st_printk(KERN_WARNING, STp, + "MTSETDRVBUFFER only allowed for root.\n"); + retval = (-EPERM); + goto out; + } + if (!STm->defined && + (mtc.mt_op != MTSETDRVBUFFER && + (mtc.mt_count & MT_ST_OPTIONS) == 0)) { + retval = (-ENXIO); + goto out; + } + + if (!STp->pos_unknown) { + + if (STps->eof == ST_FM_HIT) { + if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM || + mtc.mt_op == MTEOM) { + mtc.mt_count -= 1; + if (STps->drv_file >= 0) + STps->drv_file += 1; + } else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) { + mtc.mt_count += 1; + if (STps->drv_file >= 0) + STps->drv_file += 1; + } + } + + if (mtc.mt_op == MTSEEK) { + /* Old position must be restored if partition will be + changed */ + i = !STp->can_partitions || + (STp->new_partition != STp->partition); + } else { + i = mtc.mt_op == MTREW || mtc.mt_op == MTOFFL || + mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM || + mtc.mt_op == MTLOCK || mtc.mt_op == MTLOAD || + mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM || + mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM || + mtc.mt_op == MTCOMPRESSION; + } + i = flush_buffer(STp, i); + if (i < 0) { + retval = i; + goto out; + } + if (STps->rw == ST_WRITING && + (mtc.mt_op == MTREW || mtc.mt_op == MTOFFL || + mtc.mt_op == MTSEEK || + mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM)) { + i = st_int_ioctl(STp, MTWEOF, 1); + if (i < 0) { + retval = i; + goto out; + } + if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) + mtc.mt_count++; + STps->rw = ST_IDLE; + } + + } else { + /* + * If there was a bus reset, block further access + * to this device. If the user wants to rewind the tape, + * then reset the flag and allow access again. + */ + if (mtc.mt_op != MTREW && + mtc.mt_op != MTOFFL && + mtc.mt_op != MTRETEN && + mtc.mt_op != MTERASE && + mtc.mt_op != MTSEEK && + mtc.mt_op != MTEOM) { + retval = (-EIO); + goto out; + } + reset_state(STp); + /* remove this when the midlevel properly clears was_reset */ + STp->device->was_reset = 0; + } + + if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK && + mtc.mt_op != MTSETDENSITY && mtc.mt_op != MTWSM && + mtc.mt_op != MTSETDRVBUFFER && mtc.mt_op != MTSETPART) + STps->rw = ST_IDLE; /* Prevent automatic WEOF and fsf */ + + if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED) + do_door_lock(STp, 0); /* Ignore result! */ + + if (mtc.mt_op == MTSETDRVBUFFER && + (mtc.mt_count & MT_ST_OPTIONS) != 0) { + retval = st_set_options(STp, mtc.mt_count); + goto out; + } + + if (mtc.mt_op == MTSETPART) { + if (!STp->can_partitions || + mtc.mt_count < 0 || mtc.mt_count >= ST_NBR_PARTITIONS) { + retval = (-EINVAL); + goto out; + } + if (mtc.mt_count >= STp->nbr_partitions && + (STp->nbr_partitions = nbr_partitions(STp)) < 0) { + retval = (-EIO); + goto out; + } + if (mtc.mt_count >= STp->nbr_partitions) { + retval = (-EINVAL); + goto out; + } + STp->new_partition = mtc.mt_count; + retval = 0; + goto out; + } + + if (mtc.mt_op == MTMKPART) { + if (!STp->can_partitions) { + retval = (-EINVAL); + goto out; + } + i = do_load_unload(STp, file, 1); + if (i < 0) { + retval = i; + goto out; + } + i = partition_tape(STp, mtc.mt_count); + if (i < 0) { + retval = i; + goto out; + } + for (i = 0; i < ST_NBR_PARTITIONS; i++) { + STp->ps[i].rw = ST_IDLE; + STp->ps[i].at_sm = 0; + STp->ps[i].last_block_valid = 0; + } + STp->partition = STp->new_partition = 0; + STp->nbr_partitions = mtc.mt_count != 0 ? 2 : 1; + STps->drv_block = STps->drv_file = 0; + retval = 0; + goto out; + } + + if (mtc.mt_op == MTSEEK) { + i = set_location(STp, mtc.mt_count, STp->new_partition, 0); + if (!STp->can_partitions) + STp->ps[0].rw = ST_IDLE; + retval = i; + goto out; + } + + if (mtc.mt_op == MTUNLOAD || mtc.mt_op == MTOFFL) { + retval = do_load_unload(STp, file, 0); + goto out; + } + + if (mtc.mt_op == MTLOAD) { + retval = do_load_unload(STp, file, max(1, mtc.mt_count)); + goto out; + } + + if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) { + retval = do_door_lock(STp, (mtc.mt_op == MTLOCK)); + goto out; + } + + if (STp->can_partitions && STp->ready == ST_READY && + (i = switch_partition(STp)) < 0) { + retval = i; + goto out; + } + + if (mtc.mt_op == MTCOMPRESSION) + retval = st_compression(STp, (mtc.mt_count & 1)); + else + retval = st_int_ioctl(STp, mtc.mt_op, mtc.mt_count); + goto out; + } + if (!STm->defined) { + retval = (-ENXIO); + goto out; + } + + if ((i = flush_buffer(STp, 0)) < 0) { + retval = i; + goto out; + } + if (STp->can_partitions && + (i = switch_partition(STp)) < 0) { + retval = i; + goto out; + } + + if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { + struct mtget mt_status; + + if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { + retval = (-EINVAL); + goto out; + } + + mt_status.mt_type = STp->tape_type; + mt_status.mt_dsreg = + ((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) | + ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); + mt_status.mt_blkno = STps->drv_block; + mt_status.mt_fileno = STps->drv_file; + if (STp->block_size != 0) { + if (STps->rw == ST_WRITING) + mt_status.mt_blkno += + (STp->buffer)->buffer_bytes / STp->block_size; + else if (STps->rw == ST_READING) + mt_status.mt_blkno -= + ((STp->buffer)->buffer_bytes + + STp->block_size - 1) / STp->block_size; + } + + mt_status.mt_gstat = 0; + if (STp->drv_write_prot) + mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff); + if (mt_status.mt_blkno == 0) { + if (mt_status.mt_fileno == 0) + mt_status.mt_gstat |= GMT_BOT(0xffffffff); + else + mt_status.mt_gstat |= GMT_EOF(0xffffffff); + } + mt_status.mt_erreg = (STp->recover_reg << MT_ST_SOFTERR_SHIFT); + mt_status.mt_resid = STp->partition; + if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR) + mt_status.mt_gstat |= GMT_EOT(0xffffffff); + else if (STps->eof >= ST_EOM_OK) + mt_status.mt_gstat |= GMT_EOD(0xffffffff); + if (STp->density == 1) + mt_status.mt_gstat |= GMT_D_800(0xffffffff); + else if (STp->density == 2) + mt_status.mt_gstat |= GMT_D_1600(0xffffffff); + else if (STp->density == 3) + mt_status.mt_gstat |= GMT_D_6250(0xffffffff); + if (STp->ready == ST_READY) + mt_status.mt_gstat |= GMT_ONLINE(0xffffffff); + if (STp->ready == ST_NO_TAPE) + mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff); + if (STps->at_sm) + mt_status.mt_gstat |= GMT_SM(0xffffffff); + if (STm->do_async_writes || + (STm->do_buffer_writes && STp->block_size != 0) || + STp->drv_buffer != 0) + mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff); + if (STp->cleaning_req) + mt_status.mt_gstat |= GMT_CLN(0xffffffff); + + retval = put_user_mtget(p, &mt_status); + if (retval) + goto out; + + STp->recover_reg = 0; /* Clear after read */ + goto out; + } /* End of MTIOCGET */ + if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) { + struct mtpos mt_pos; + if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) { + retval = (-EINVAL); + goto out; + } + if ((i = get_location(STp, &blk, &bt, 0)) < 0) { + retval = i; + goto out; + } + mt_pos.mt_blkno = blk; + retval = put_user_mtpos(p, &mt_pos); + goto out; + } + mutex_unlock(&STp->lock); + + switch (cmd_in) { + case SG_IO: + case SCSI_IOCTL_SEND_COMMAND: + case CDROM_SEND_PACKET: + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + break; + default: + break; + } + + retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p); + if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { + /* unload */ + STp->rew_at_close = 0; + STp->ready = ST_NO_TAPE; + } + return retval; + + out: + mutex_unlock(&STp->lock); + return retval; +} + +#ifdef CONFIG_COMPAT +static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) +{ + /* argument conversion is handled using put_user_mtpos/put_user_mtget */ + switch (cmd_in) { + case MTIOCPOS32: + cmd_in = MTIOCPOS; + break; + case MTIOCGET32: + cmd_in = MTIOCGET; + break; + } + + return st_ioctl(file, cmd_in, arg); +} +#endif + + + +/* Try to allocate a new tape buffer. Calling function must not hold + dev_arr_lock. */ +static struct st_buffer *new_tape_buffer(int max_sg) +{ + struct st_buffer *tb; + + tb = kzalloc(sizeof(struct st_buffer), GFP_KERNEL); + if (!tb) { + printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); + return NULL; + } + tb->frp_segs = 0; + tb->use_sg = max_sg; + tb->buffer_size = 0; + + tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *), + GFP_KERNEL); + if (!tb->reserved_pages) { + kfree(tb); + return NULL; + } + + return tb; +} + + +/* Try to allocate enough space in the tape buffer */ +#define ST_MAX_ORDER 6 + +static int enlarge_buffer(struct st_buffer * STbuffer, int new_size) +{ + int segs, max_segs, b_size, order, got; + gfp_t priority; + + if (new_size <= STbuffer->buffer_size) + return 1; + + if (STbuffer->buffer_size <= PAGE_SIZE) + normalize_buffer(STbuffer); /* Avoid extra segment */ + + max_segs = STbuffer->use_sg; + + priority = GFP_KERNEL | __GFP_NOWARN; + + if (STbuffer->cleared) + priority |= __GFP_ZERO; + + if (STbuffer->frp_segs) { + order = STbuffer->reserved_page_order; + b_size = PAGE_SIZE << order; + } else { + for (b_size = PAGE_SIZE, order = 0; + order < ST_MAX_ORDER && + max_segs * (PAGE_SIZE << order) < new_size; + order++, b_size *= 2) + ; /* empty */ + STbuffer->reserved_page_order = order; + } + if (max_segs * (PAGE_SIZE << order) < new_size) { + if (order == ST_MAX_ORDER) + return 0; + normalize_buffer(STbuffer); + return enlarge_buffer(STbuffer, new_size); + } + + for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; + segs < max_segs && got < new_size;) { + struct page *page; + + page = alloc_pages(priority, order); + if (!page) { + DEB(STbuffer->buffer_size = got); + normalize_buffer(STbuffer); + return 0; + } + + STbuffer->frp_segs += 1; + got += b_size; + STbuffer->buffer_size = got; + STbuffer->reserved_pages[segs] = page; + segs++; + } + STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); + + return 1; +} + + +/* Make sure that no data from previous user is in the internal buffer */ +static void clear_buffer(struct st_buffer * st_bp) +{ + int i; + + for (i=0; i < st_bp->frp_segs; i++) + memset(page_address(st_bp->reserved_pages[i]), 0, + PAGE_SIZE << st_bp->reserved_page_order); + st_bp->cleared = 1; +} + + +/* Release the extra buffer */ +static void normalize_buffer(struct st_buffer * STbuffer) +{ + int i, order = STbuffer->reserved_page_order; + + for (i = 0; i < STbuffer->frp_segs; i++) { + __free_pages(STbuffer->reserved_pages[i], order); + STbuffer->buffer_size -= (PAGE_SIZE << order); + } + STbuffer->frp_segs = 0; + STbuffer->sg_segs = 0; + STbuffer->reserved_page_order = 0; + STbuffer->map_data.offset = 0; +} + + +/* Move data from the user buffer to the tape buffer. Returns zero (success) or + negative error code. */ +static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) +{ + int i, cnt, res, offset; + int length = PAGE_SIZE << st_bp->reserved_page_order; + + for (i = 0, offset = st_bp->buffer_bytes; + i < st_bp->frp_segs && offset >= length; i++) + offset -= length; + if (i == st_bp->frp_segs) { /* Should never happen */ + printk(KERN_WARNING "st: append_to_buffer offset overflow.\n"); + return (-EIO); + } + for (; i < st_bp->frp_segs && do_count > 0; i++) { + struct page *page = st_bp->reserved_pages[i]; + cnt = length - offset < do_count ? length - offset : do_count; + res = copy_from_user(page_address(page) + offset, ubp, cnt); + if (res) + return (-EFAULT); + do_count -= cnt; + st_bp->buffer_bytes += cnt; + ubp += cnt; + offset = 0; + } + if (do_count) /* Should never happen */ + return (-EIO); + + return 0; +} + + +/* Move data from the tape buffer to the user buffer. Returns zero (success) or + negative error code. */ +static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) +{ + int i, cnt, res, offset; + int length = PAGE_SIZE << st_bp->reserved_page_order; + + for (i = 0, offset = st_bp->read_pointer; + i < st_bp->frp_segs && offset >= length; i++) + offset -= length; + if (i == st_bp->frp_segs) { /* Should never happen */ + printk(KERN_WARNING "st: from_buffer offset overflow.\n"); + return (-EIO); + } + for (; i < st_bp->frp_segs && do_count > 0; i++) { + struct page *page = st_bp->reserved_pages[i]; + cnt = length - offset < do_count ? length - offset : do_count; + res = copy_to_user(ubp, page_address(page) + offset, cnt); + if (res) + return (-EFAULT); + do_count -= cnt; + st_bp->buffer_bytes -= cnt; + st_bp->read_pointer += cnt; + ubp += cnt; + offset = 0; + } + if (do_count) /* Should never happen */ + return (-EIO); + + return 0; +} + + +/* Move data towards start of buffer */ +static void move_buffer_data(struct st_buffer * st_bp, int offset) +{ + int src_seg, dst_seg, src_offset = 0, dst_offset; + int count, total; + int length = PAGE_SIZE << st_bp->reserved_page_order; + + if (offset == 0) + return; + + total=st_bp->buffer_bytes - offset; + for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) { + src_offset = offset; + if (src_offset < length) + break; + offset -= length; + } + + st_bp->buffer_bytes = st_bp->read_pointer = total; + for (dst_seg=dst_offset=0; total > 0; ) { + struct page *dpage = st_bp->reserved_pages[dst_seg]; + struct page *spage = st_bp->reserved_pages[src_seg]; + + count = min(length - dst_offset, length - src_offset); + memmove(page_address(dpage) + dst_offset, + page_address(spage) + src_offset, count); + src_offset += count; + if (src_offset >= length) { + src_seg++; + src_offset = 0; + } + dst_offset += count; + if (dst_offset >= length) { + dst_seg++; + dst_offset = 0; + } + total -= count; + } +} + +/* Validate the options from command line or module parameters */ +static void validate_options(void) +{ + if (buffer_kbs > 0) + st_fixed_buffer_size = buffer_kbs * ST_KILOBYTE; + if (max_sg_segs >= ST_FIRST_SG) + st_max_sg_segs = max_sg_segs; +} + +#ifndef MODULE +/* Set the boot options. Syntax is defined in Documenation/scsi/st.txt. + */ +static int __init st_setup(char *str) +{ + int i, len, ints[5]; + char *stp; + + stp = get_options(str, ARRAY_SIZE(ints), ints); + + if (ints[0] > 0) { + for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++) + if (parms[i].val) + *parms[i].val = ints[i + 1]; + } else { + while (stp != NULL) { + for (i = 0; i < ARRAY_SIZE(parms); i++) { + len = strlen(parms[i].name); + if (!strncmp(stp, parms[i].name, len) && + (*(stp + len) == ':' || *(stp + len) == '=')) { + if (parms[i].val) + *parms[i].val = + simple_strtoul(stp + len + 1, NULL, 0); + else + printk(KERN_WARNING "st: Obsolete parameter %s\n", + parms[i].name); + break; + } + } + if (i >= ARRAY_SIZE(parms)) + printk(KERN_WARNING "st: invalid parameter in '%s'\n", + stp); + stp = strchr(stp, ','); + if (stp) + stp++; + } + } + + validate_options(); + + return 1; +} + +__setup("st=", st_setup); + +#endif + +static const struct file_operations st_fops = +{ + .owner = THIS_MODULE, + .read = st_read, + .write = st_write, + .unlocked_ioctl = st_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = st_compat_ioctl, +#endif + .open = st_open, + .flush = st_flush, + .release = st_release, + .llseek = noop_llseek, +}; + +static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) +{ + int i, error; + dev_t cdev_devno; + struct cdev *cdev; + struct device *dev; + struct st_modedef *STm = &(tape->modes[mode]); + char name[10]; + int dev_num = tape->index; + + cdev_devno = MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, rew)); + + cdev = cdev_alloc(); + if (!cdev) { + pr_err("st%d: out of memory. Device not attached.\n", dev_num); + error = -ENOMEM; + goto out; + } + cdev->owner = THIS_MODULE; + cdev->ops = &st_fops; + STm->cdevs[rew] = cdev; + + error = cdev_add(cdev, cdev_devno, 1); + if (error) { + pr_err("st%d: Can't add %s-rewind mode %d\n", dev_num, + rew ? "non" : "auto", mode); + pr_err("st%d: Device not attached.\n", dev_num); + goto out_free; + } + + i = mode << (4 - ST_NBR_MODE_BITS); + snprintf(name, 10, "%s%s%s", rew ? "n" : "", + tape->name, st_formats[i]); + + dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev, + cdev_devno, &tape->modes[mode], "%s", name); + if (IS_ERR(dev)) { + pr_err("st%d: device_create failed\n", dev_num); + error = PTR_ERR(dev); + goto out_free; + } + + STm->devs[rew] = dev; + + return 0; +out_free: + cdev_del(STm->cdevs[rew]); +out: + STm->cdevs[rew] = NULL; + STm->devs[rew] = NULL; + return error; +} + +static int create_cdevs(struct scsi_tape *tape) +{ + int mode, error; + for (mode = 0; mode < ST_NBR_MODES; ++mode) { + error = create_one_cdev(tape, mode, 0); + if (error) + return error; + error = create_one_cdev(tape, mode, 1); + if (error) + return error; + } + + return sysfs_create_link(&tape->device->sdev_gendev.kobj, + &tape->modes[0].devs[0]->kobj, "tape"); +} + +static void remove_cdevs(struct scsi_tape *tape) +{ + int mode, rew; + sysfs_remove_link(&tape->device->sdev_gendev.kobj, "tape"); + for (mode = 0; mode < ST_NBR_MODES; mode++) { + struct st_modedef *STm = &(tape->modes[mode]); + for (rew = 0; rew < 2; rew++) { + if (STm->cdevs[rew]) + cdev_del(STm->cdevs[rew]); + if (STm->devs[rew]) + device_unregister(STm->devs[rew]); + } + } +} + +static int st_probe(struct device *dev) +{ + struct scsi_device *SDp = to_scsi_device(dev); + struct scsi_tape *tpnt = NULL; + struct st_modedef *STm; + struct st_partstat *STps; + struct st_buffer *buffer; + int i, error; + + if (SDp->type != TYPE_TAPE) + return -ENODEV; + if (st_incompatible(SDp)) { + sdev_printk(KERN_INFO, SDp, + "OnStream tapes are no longer supported;\n"); + sdev_printk(KERN_INFO, SDp, + "please mail to linux-scsi@vger.kernel.org.\n"); + return -ENODEV; + } + + scsi_autopm_get_device(SDp); + i = queue_max_segments(SDp->request_queue); + if (st_max_sg_segs < i) + i = st_max_sg_segs; + buffer = new_tape_buffer(i); + if (buffer == NULL) { + sdev_printk(KERN_ERR, SDp, + "st: Can't allocate new tape buffer. " + "Device not attached.\n"); + goto out; + } + + tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL); + if (tpnt == NULL) { + sdev_printk(KERN_ERR, SDp, + "st: Can't allocate device descriptor.\n"); + goto out_buffer_free; + } + kref_init(&tpnt->kref); + + tpnt->device = SDp; + if (SDp->scsi_level <= 2) + tpnt->tape_type = MT_ISSCSI1; + else + tpnt->tape_type = MT_ISSCSI2; + + tpnt->buffer = buffer; + tpnt->buffer->last_SRpnt = NULL; + + tpnt->inited = 0; + tpnt->dirty = 0; + tpnt->in_use = 0; + tpnt->drv_buffer = 1; /* Try buffering if no mode sense */ + tpnt->use_pf = (SDp->scsi_level >= SCSI_2); + tpnt->density = 0; + tpnt->do_auto_lock = ST_AUTO_LOCK; + tpnt->can_bsr = (SDp->scsi_level > 2 ? 1 : ST_IN_FILE_POS); /* BSR mandatory in SCSI3 */ + tpnt->can_partitions = 0; + tpnt->two_fm = ST_TWO_FM; + tpnt->fast_mteom = ST_FAST_MTEOM; + tpnt->scsi2_logical = ST_SCSI2LOGICAL; + tpnt->sili = ST_SILI; + tpnt->immediate = ST_NOWAIT; + tpnt->immediate_filemark = 0; + tpnt->default_drvbuffer = 0xff; /* No forced buffering */ + tpnt->partition = 0; + tpnt->new_partition = 0; + tpnt->nbr_partitions = 0; + blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT); + tpnt->long_timeout = ST_LONG_TIMEOUT; + tpnt->try_dio = try_direct_io; + + for (i = 0; i < ST_NBR_MODES; i++) { + STm = &(tpnt->modes[i]); + STm->defined = 0; + STm->sysv = ST_SYSV; + STm->defaults_for_writes = 0; + STm->do_async_writes = ST_ASYNC_WRITES; + STm->do_buffer_writes = ST_BUFFER_WRITES; + STm->do_read_ahead = ST_READ_AHEAD; + STm->default_compression = ST_DONT_TOUCH; + STm->default_blksize = (-1); /* No forced size */ + STm->default_density = (-1); /* No forced density */ + STm->tape = tpnt; + } + + for (i = 0; i < ST_NBR_PARTITIONS; i++) { + STps = &(tpnt->ps[i]); + STps->rw = ST_IDLE; + STps->eof = ST_NOEOF; + STps->at_sm = 0; + STps->last_block_valid = 0; + STps->drv_block = (-1); + STps->drv_file = (-1); + } + + tpnt->current_mode = 0; + tpnt->modes[0].defined = 1; + + tpnt->density_changed = tpnt->compression_changed = + tpnt->blksize_changed = 0; + mutex_init(&tpnt->lock); + + idr_preload(GFP_KERNEL); + spin_lock(&st_index_lock); + error = idr_alloc(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT); + spin_unlock(&st_index_lock); + idr_preload_end(); + if (error < 0) { + pr_warn("st: idr allocation failed: %d\n", error); + goto out_free_tape; + } + tpnt->index = error; + sprintf(tpnt->name, "st%d", tpnt->index); + tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL); + if (tpnt->stats == NULL) { + sdev_printk(KERN_ERR, SDp, + "st: Can't allocate statistics.\n"); + goto out_idr_remove; + } + + dev_set_drvdata(dev, tpnt); + + + error = create_cdevs(tpnt); + if (error) + goto out_remove_devs; + scsi_autopm_put_device(SDp); + + sdev_printk(KERN_NOTICE, SDp, + "Attached scsi tape %s\n", tpnt->name); + sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n", + tpnt->name, tpnt->try_dio ? "yes" : "no", + queue_dma_alignment(SDp->request_queue) + 1); + + return 0; + +out_remove_devs: + remove_cdevs(tpnt); + kfree(tpnt->stats); +out_idr_remove: + spin_lock(&st_index_lock); + idr_remove(&st_index_idr, tpnt->index); + spin_unlock(&st_index_lock); +out_free_tape: + kfree(tpnt); +out_buffer_free: + kfree(buffer); +out: + scsi_autopm_put_device(SDp); + return -ENODEV; +}; + + +static int st_remove(struct device *dev) +{ + struct scsi_tape *tpnt = dev_get_drvdata(dev); + int index = tpnt->index; + + scsi_autopm_get_device(to_scsi_device(dev)); + remove_cdevs(tpnt); + + mutex_lock(&st_ref_mutex); + kref_put(&tpnt->kref, scsi_tape_release); + mutex_unlock(&st_ref_mutex); + spin_lock(&st_index_lock); + idr_remove(&st_index_idr, index); + spin_unlock(&st_index_lock); + return 0; +} + +/** + * scsi_tape_release - Called to free the Scsi_Tape structure + * @kref: pointer to embedded kref + * + * st_ref_mutex must be held entering this routine. Because it is + * called on last put, you should always use the scsi_tape_get() + * scsi_tape_put() helpers which manipulate the semaphore directly + * and never do a direct kref_put(). + **/ +static void scsi_tape_release(struct kref *kref) +{ + struct scsi_tape *tpnt = to_scsi_tape(kref); + + tpnt->device = NULL; + + if (tpnt->buffer) { + normalize_buffer(tpnt->buffer); + kfree(tpnt->buffer->reserved_pages); + kfree(tpnt->buffer); + } + + kfree(tpnt->stats); + kfree(tpnt); + return; +} + +static struct class st_sysfs_class = { + .name = "scsi_tape", + .dev_groups = st_dev_groups, +}; + +static int __init init_st(void) +{ + int err; + + validate_options(); + + printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n", + verstr, st_fixed_buffer_size, st_max_sg_segs); + + debugging = (debug_flag > 0) ? debug_flag : NO_DEBUG; + if (debugging) { + printk(KERN_INFO "st: Debugging enabled debug_flag = %d\n", + debugging); + } + + err = class_register(&st_sysfs_class); + if (err) { + pr_err("Unable register sysfs class for SCSI tapes\n"); + return err; + } + + err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), + ST_MAX_TAPE_ENTRIES, "st"); + if (err) { + printk(KERN_ERR "Unable to get major %d for SCSI tapes\n", + SCSI_TAPE_MAJOR); + goto err_class; + } + + err = scsi_register_driver(&st_template.gendrv); + if (err) + goto err_chrdev; + + return 0; + +err_chrdev: + unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), + ST_MAX_TAPE_ENTRIES); +err_class: + class_unregister(&st_sysfs_class); + return err; +} + +static void __exit exit_st(void) +{ + scsi_unregister_driver(&st_template.gendrv); + unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), + ST_MAX_TAPE_ENTRIES); + class_unregister(&st_sysfs_class); + idr_destroy(&st_index_idr); + printk(KERN_INFO "st: Unloaded.\n"); +} + +module_init(init_st); +module_exit(exit_st); + + +/* The sysfs driver interface. Read-only at the moment */ +static ssize_t try_direct_io_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", try_direct_io); +} +static DRIVER_ATTR_RO(try_direct_io); + +static ssize_t fixed_buffer_size_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size); +} +static DRIVER_ATTR_RO(fixed_buffer_size); + +static ssize_t max_sg_segs_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs); +} +static DRIVER_ATTR_RO(max_sg_segs); + +static ssize_t version_show(struct device_driver *ddd, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "[%s]\n", verstr); +} +static DRIVER_ATTR_RO(version); + +#if DEBUG +static ssize_t debug_flag_store(struct device_driver *ddp, + const char *buf, size_t count) +{ +/* We only care what the first byte of the data is the rest is unused. + * if it's a '1' we turn on debug and if it's a '0' we disable it. All + * other values have -EINVAL returned if they are passed in. + */ + if (count > 0) { + if (buf[0] == '0') { + debugging = NO_DEBUG; + return count; + } else if (buf[0] == '1') { + debugging = 1; + return count; + } + } + return -EINVAL; +} + +static ssize_t debug_flag_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", debugging); +} +static DRIVER_ATTR_RW(debug_flag); +#endif + +static struct attribute *st_drv_attrs[] = { + &driver_attr_try_direct_io.attr, + &driver_attr_fixed_buffer_size.attr, + &driver_attr_max_sg_segs.attr, + &driver_attr_version.attr, +#if DEBUG + &driver_attr_debug_flag.attr, +#endif + NULL, +}; +ATTRIBUTE_GROUPS(st_drv); + +/* The sysfs simple class interface */ +static ssize_t +defined_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + ssize_t l = 0; + + l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); + return l; +} +static DEVICE_ATTR_RO(defined); + +static ssize_t +default_blksize_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + ssize_t l = 0; + + l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); + return l; +} +static DEVICE_ATTR_RO(default_blksize); + +static ssize_t +default_density_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + ssize_t l = 0; + char *fmt; + + fmt = STm->default_density >= 0 ? "0x%02x\n" : "%d\n"; + l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density); + return l; +} +static DEVICE_ATTR_RO(default_density); + +static ssize_t +default_compression_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + ssize_t l = 0; + + l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); + return l; +} +static DEVICE_ATTR_RO(default_compression); + +static ssize_t +options_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + struct scsi_tape *STp = STm->tape; + int options; + ssize_t l = 0; + + options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0; + options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0; + options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0; + DEB( options |= debugging ? MT_ST_DEBUGGING : 0 ); + options |= STp->two_fm ? MT_ST_TWO_FM : 0; + options |= STp->fast_mteom ? MT_ST_FAST_MTEOM : 0; + options |= STm->defaults_for_writes ? MT_ST_DEF_WRITES : 0; + options |= STp->can_bsr ? MT_ST_CAN_BSR : 0; + options |= STp->omit_blklims ? MT_ST_NO_BLKLIMS : 0; + options |= STp->can_partitions ? MT_ST_CAN_PARTITIONS : 0; + options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0; + options |= STm->sysv ? MT_ST_SYSV : 0; + options |= STp->immediate ? MT_ST_NOWAIT : 0; + options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0; + options |= STp->sili ? MT_ST_SILI : 0; + + l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); + return l; +} +static DEVICE_ATTR_RO(options); + +/* Support for tape stats */ + +/** + * read_cnt_show - return read count - count of reads made from tape drive + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t read_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->read_cnt)); +} +static DEVICE_ATTR_RO(read_cnt); + +/** + * read_byte_cnt_show - return read byte count - tape drives + * may use blocks less than 512 bytes this gives the raw byte count of + * of data read from the tape drive. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t read_byte_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->read_byte_cnt)); +} +static DEVICE_ATTR_RO(read_byte_cnt); + +/** + * read_ns_show - return read ns - overall time spent waiting on reads in ns. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t read_ns_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->tot_read_time)); +} +static DEVICE_ATTR_RO(read_ns); + +/** + * write_cnt_show - write count - number of user calls + * to write(2) that have written data to tape. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t write_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->write_cnt)); +} +static DEVICE_ATTR_RO(write_cnt); + +/** + * write_byte_cnt_show - write byte count - raw count of + * bytes written to tape. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t write_byte_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->write_byte_cnt)); +} +static DEVICE_ATTR_RO(write_byte_cnt); + +/** + * write_ns_show - write ns - number of nanoseconds waiting on write + * requests to complete. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t write_ns_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->tot_write_time)); +} +static DEVICE_ATTR_RO(write_ns); + +/** + * in_flight_show - number of I/Os currently in flight - + * in most cases this will be either 0 or 1. It may be higher if someone + * has also issued other SCSI commands such as via an ioctl. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t in_flight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->in_flight)); +} +static DEVICE_ATTR_RO(in_flight); + +/** + * io_ns_show - io wait ns - this is the number of ns spent + * waiting on all I/O to complete. This includes tape movement commands + * such as rewinding, seeking to end of file or tape, it also includes + * read and write. To determine the time spent on tape movement + * subtract the read and write ns from this value. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t io_ns_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->tot_io_time)); +} +static DEVICE_ATTR_RO(io_ns); + +/** + * other_cnt_show - other io count - this is the number of + * I/O requests other than read and write requests. + * Typically these are tape movement requests but will include driver + * tape movement. This includes only requests issued by the st driver. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t other_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->other_cnt)); +} +static DEVICE_ATTR_RO(other_cnt); + +/** + * resid_cnt_show - A count of the number of times we get a residual + * count - this should indicate someone issuing reads larger than the + * block size on tape. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t resid_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->resid_cnt)); +} +static DEVICE_ATTR_RO(resid_cnt); + +static struct attribute *st_dev_attrs[] = { + &dev_attr_defined.attr, + &dev_attr_default_blksize.attr, + &dev_attr_default_density.attr, + &dev_attr_default_compression.attr, + &dev_attr_options.attr, + NULL, +}; + +static struct attribute *st_stats_attrs[] = { + &dev_attr_read_cnt.attr, + &dev_attr_read_byte_cnt.attr, + &dev_attr_read_ns.attr, + &dev_attr_write_cnt.attr, + &dev_attr_write_byte_cnt.attr, + &dev_attr_write_ns.attr, + &dev_attr_in_flight.attr, + &dev_attr_io_ns.attr, + &dev_attr_other_cnt.attr, + &dev_attr_resid_cnt.attr, + NULL, +}; + +static struct attribute_group stats_group = { + .name = "stats", + .attrs = st_stats_attrs, +}; + +static struct attribute_group st_group = { + .attrs = st_dev_attrs, +}; + +static const struct attribute_group *st_dev_groups[] = { + &st_group, + &stats_group, + NULL, +}; + +/* The following functions may be useful for a larger audience. */ +static int sgl_map_user_pages(struct st_buffer *STbp, + const unsigned int max_pages, unsigned long uaddr, + size_t count, int rw) +{ + unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + const int nr_pages = end - start; + int res, i; + struct page **pages; + struct rq_map_data *mdata = &STbp->map_data; + + /* User attempted Overflow! */ + if ((uaddr + count) < uaddr) + return -EINVAL; + + /* Too big */ + if (nr_pages > max_pages) + return -ENOMEM; + + /* Hmm? */ + if (count == 0) + return 0; + + pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL); + if (pages == NULL) + return -ENOMEM; + + /* Try to fault in all of the necessary pages */ + /* rw==READ means read from drive, write into memory area */ + res = pin_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0, + pages); + + /* Errors and no page mapped should return here */ + if (res < nr_pages) + goto out_unmap; + + for (i=0; i < nr_pages; i++) { + /* FIXME: flush superflous for rw==READ, + * probably wrong function for rw==WRITE + */ + flush_dcache_page(pages[i]); + } + + mdata->offset = uaddr & ~PAGE_MASK; + STbp->mapped_pages = pages; + + return nr_pages; + out_unmap: + if (res > 0) { + unpin_user_pages(pages, res); + res = 0; + } + kfree(pages); + return res; +} + + +/* And unmap them... */ +static int sgl_unmap_user_pages(struct st_buffer *STbp, + const unsigned int nr_pages, int dirtied) +{ + /* FIXME: cache flush missing for rw==READ */ + unpin_user_pages_dirty_lock(STbp->mapped_pages, nr_pages, dirtied); + + kfree(STbp->mapped_pages); + STbp->mapped_pages = NULL; + + return 0; +} diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h new file mode 100644 index 000000000..7a68eaba7 --- /dev/null +++ b/drivers/scsi/st.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ST_H +#define _ST_H + +#include +#include +#include +#include + +/* Descriptor for analyzed sense data */ +struct st_cmdstatus { + int midlevel_result; + struct scsi_sense_hdr sense_hdr; + int have_sense; + int residual; + u64 uremainder64; + u8 flags; + u8 remainder_valid; + u8 fixed_format; + u8 deferred; +}; + +struct scsi_tape; + +/* scsi tape command */ +struct st_request { + unsigned char cmd[MAX_COMMAND_SIZE]; + unsigned char sense[SCSI_SENSE_BUFFERSIZE]; + int result; + struct scsi_tape *stp; + struct completion *waiting; + struct bio *bio; +}; + +/* The tape buffer descriptor. */ +struct st_buffer { + unsigned char cleared; /* internal buffer cleared after open? */ + unsigned short do_dio; /* direct i/o set up? */ + int buffer_size; + int buffer_blocks; + int buffer_bytes; + int read_pointer; + int writing; + int syscall_result; + struct st_request *last_SRpnt; + struct st_cmdstatus cmdstat; + struct page **reserved_pages; + int reserved_page_order; + struct page **mapped_pages; + struct rq_map_data map_data; + unsigned char *b_data; + unsigned short use_sg; /* zero or max number of s/g segments for this adapter */ + unsigned short sg_segs; /* number of segments in s/g list */ + unsigned short frp_segs; /* number of buffer segments */ +}; + +/* The tape mode definition */ +struct st_modedef { + unsigned char defined; + unsigned char sysv; /* SYS V semantics? */ + unsigned char do_async_writes; + unsigned char do_buffer_writes; + unsigned char do_read_ahead; + unsigned char defaults_for_writes; + unsigned char default_compression; /* 0 = don't touch, etc */ + short default_density; /* Forced density, -1 = no value */ + int default_blksize; /* Forced blocksize, -1 = no value */ + struct scsi_tape *tape; + struct device *devs[2]; /* Auto-rewind and non-rewind devices */ + struct cdev *cdevs[2]; /* Auto-rewind and non-rewind devices */ +}; + +/* Number of modes can be changed by changing ST_NBR_MODE_BITS. The maximum + number of modes is 16 (ST_NBR_MODE_BITS 4) */ +#define ST_NBR_MODE_BITS 2 +#define ST_NBR_MODES (1 << ST_NBR_MODE_BITS) +#define ST_MODE_SHIFT (7 - ST_NBR_MODE_BITS) +#define ST_MODE_MASK ((ST_NBR_MODES - 1) << ST_MODE_SHIFT) + +#define ST_MAX_TAPES (1 << (20 - (ST_NBR_MODE_BITS + 1))) +#define ST_MAX_TAPE_ENTRIES (ST_MAX_TAPES << (ST_NBR_MODE_BITS + 1)) + +/* The status related to each partition */ +struct st_partstat { + unsigned char rw; + unsigned char eof; + unsigned char at_sm; + unsigned char last_block_valid; + u32 last_block_visited; + int drv_block; /* The block where the drive head is */ + int drv_file; +}; + +/* Tape statistics */ +struct scsi_tape_stats { + atomic64_t read_byte_cnt; /* bytes read */ + atomic64_t write_byte_cnt; /* bytes written */ + atomic64_t in_flight; /* Number of I/Os in flight */ + atomic64_t read_cnt; /* Count of read requests */ + atomic64_t write_cnt; /* Count of write requests */ + atomic64_t other_cnt; /* Count of other requests either + * implicit or from user space + * ioctl. */ + atomic64_t resid_cnt; /* Count of resid_len > 0 */ + atomic64_t tot_read_time; /* ktime spent completing reads */ + atomic64_t tot_write_time; /* ktime spent completing writes */ + atomic64_t tot_io_time; /* ktime spent doing any I/O */ + ktime_t read_time; /* holds ktime request was queued */ + ktime_t write_time; /* holds ktime request was queued */ + ktime_t other_time; /* holds ktime request was queued */ + atomic_t last_read_size; /* Number of bytes issued for last read */ + atomic_t last_write_size; /* Number of bytes issued for last write */ +}; + +#define ST_NBR_PARTITIONS 4 + +/* The tape drive descriptor */ +struct scsi_tape { + struct scsi_device *device; + struct mutex lock; /* For serialization */ + struct completion wait; /* For SCSI commands */ + struct st_buffer *buffer; + int index; + + /* Drive characteristics */ + unsigned char omit_blklims; + unsigned char do_auto_lock; + unsigned char can_bsr; + unsigned char can_partitions; + unsigned char two_fm; + unsigned char fast_mteom; + unsigned char immediate; + unsigned char scsi2_logical; + unsigned char default_drvbuffer; /* 0xff = don't touch, value 3 bits */ + unsigned char cln_mode; /* 0 = none, otherwise sense byte nbr */ + unsigned char cln_sense_value; + unsigned char cln_sense_mask; + unsigned char use_pf; /* Set Page Format bit in all mode selects? */ + unsigned char try_dio; /* try direct i/o in general? */ + unsigned char try_dio_now; /* try direct i/o before next close? */ + unsigned char c_algo; /* compression algorithm */ + unsigned char pos_unknown; /* after reset position unknown */ + unsigned char sili; /* use SILI when reading in variable b mode */ + unsigned char immediate_filemark; /* write filemark immediately */ + int tape_type; + int long_timeout; /* timeout for commands known to take long time */ + + /* Mode characteristics */ + struct st_modedef modes[ST_NBR_MODES]; + int current_mode; + + /* Status variables */ + int partition; + int new_partition; + int nbr_partitions; /* zero until partition support enabled */ + struct st_partstat ps[ST_NBR_PARTITIONS]; + unsigned char dirty; + unsigned char ready; + unsigned char write_prot; + unsigned char drv_write_prot; + unsigned char in_use; + unsigned char blksize_changed; + unsigned char density_changed; + unsigned char compression_changed; + unsigned char drv_buffer; + unsigned char density; + unsigned char door_locked; + unsigned char autorew_dev; /* auto-rewind device */ + unsigned char rew_at_close; /* rewind necessary at close */ + unsigned char inited; + unsigned char cleaning_req; /* cleaning requested? */ + int block_size; + int min_block; + int max_block; + int recover_count; /* From tape opening */ + int recover_reg; /* From last status call */ + +#if DEBUG + unsigned char write_pending; + int nbr_finished; + int nbr_waits; + int nbr_requests; + int nbr_dio; + int nbr_pages; + unsigned char last_cmnd[6]; + unsigned char last_sense[16]; +#endif + char name[DISK_NAME_LEN]; + struct kref kref; + struct scsi_tape_stats *stats; +}; + +/* Bit masks for use_pf */ +#define USE_PF 1 +#define PF_TESTED 2 + +/* Values of eof */ +#define ST_NOEOF 0 +#define ST_FM_HIT 1 +#define ST_FM 2 +#define ST_EOM_OK 3 +#define ST_EOM_ERROR 4 +#define ST_EOD_1 5 +#define ST_EOD_2 6 +#define ST_EOD 7 +/* EOD hit while reading => ST_EOD_1 => return zero => ST_EOD_2 => + return zero => ST_EOD, return ENOSPC */ +/* When writing: ST_EOM_OK == early warning found, write OK + ST_EOD_1 == allow trying new write after early warning + ST_EOM_ERROR == early warning found, not able to write all */ + +/* Values of rw */ +#define ST_IDLE 0 +#define ST_READING 1 +#define ST_WRITING 2 + +/* Values of ready state */ +#define ST_READY 0 +#define ST_NOT_READY 1 +#define ST_NO_TAPE 2 + +/* Values for door lock state */ +#define ST_UNLOCKED 0 +#define ST_LOCKED_EXPLICIT 1 +#define ST_LOCKED_AUTO 2 +#define ST_LOCK_FAILS 3 + +/* Positioning SCSI-commands for Tandberg, etc. drives */ +#define QFA_REQUEST_BLOCK 0x02 +#define QFA_SEEK_BLOCK 0x0c + +/* Setting the binary options */ +#define ST_DONT_TOUCH 0 +#define ST_NO 1 +#define ST_YES 2 + +#define EXTENDED_SENSE_START 18 + +/* Masks for some conditions in the sense data */ +#define SENSE_FMK 0x80 +#define SENSE_EOM 0x40 +#define SENSE_ILI 0x20 + +#endif diff --git a/drivers/scsi/st_options.h b/drivers/scsi/st_options.h new file mode 100644 index 000000000..2b6cabd7b --- /dev/null +++ b/drivers/scsi/st_options.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + The compile-time configurable defaults for the Linux SCSI tape driver. + + Copyright 1995-2003 Kai Makisara. + + Last modified: Thu Feb 21 21:47:07 2008 by kai.makisara +*/ + +#ifndef _ST_OPTIONS_H +#define _ST_OPTIONS_H + +/* If TRY_DIRECT_IO is non-zero, the driver tries to transfer data directly + between the user buffer and tape drive. If this is not possible, driver + buffer is used. If TRY_DIRECT_IO is zero, driver buffer is always used. */ +#define TRY_DIRECT_IO 1 + +/* The driver does not wait for some operations to finish before returning + to the user program if ST_NOWAIT is non-zero. This helps if the SCSI + adapter does not support multiple outstanding commands. However, the user + should not give a new tape command before the previous one has finished. */ +#define ST_NOWAIT 0 + +/* If ST_IN_FILE_POS is nonzero, the driver positions the tape after the + record been read by the user program even if the tape has moved further + because of buffered reads. Should be set to zero to support also drives + that can't space backwards over records. NOTE: The tape will be + spaced backwards over an "accidentally" crossed filemark in any case. */ +#define ST_IN_FILE_POS 0 + +/* If ST_RECOVERED_WRITE_FATAL is non-zero, recovered errors while writing + are considered "hard errors". */ +#define ST_RECOVERED_WRITE_FATAL 0 + +/* The "guess" for the block size for devices that don't support MODE + SENSE. */ +#define ST_DEFAULT_BLOCK 0 + +/* The minimum tape driver buffer size in kilobytes in fixed block mode. + Must be non-zero. */ +#define ST_FIXED_BUFFER_BLOCKS 32 + +/* Maximum number of scatter/gather segments */ +#define ST_MAX_SG 256 + +/* The number of scatter/gather segments to allocate at first try (must be + smaller or equal to the maximum). */ +#define ST_FIRST_SG 8 + +/* The size of the first scatter/gather segments (determines the maximum block + size for SCSI adapters not supporting scatter/gather). The default is set + to try to allocate the buffer as one chunk. */ +#define ST_FIRST_ORDER 5 + + +/* The following lines define defaults for properties that can be set + separately for each drive using the MTSTOPTIONS ioctl. */ + +/* If ST_TWO_FM is non-zero, the driver writes two filemarks after a + file being written. Some drives can't handle two filemarks at the + end of data. */ +#define ST_TWO_FM 0 + +/* If ST_BUFFER_WRITES is non-zero, writes in fixed block mode are + buffered until the driver buffer is full or asynchronous write is + triggered. May make detection of End-Of-Medium early enough fail. */ +#define ST_BUFFER_WRITES 1 + +/* If ST_ASYNC_WRITES is non-zero, the SCSI write command may be started + without waiting for it to finish. May cause problems in multiple + tape backups. */ +#define ST_ASYNC_WRITES 1 + +/* If ST_READ_AHEAD is non-zero, blocks are read ahead in fixed block + mode. */ +#define ST_READ_AHEAD 1 + +/* If ST_AUTO_LOCK is non-zero, the drive door is locked at the first + read or write command after the device is opened. The door is opened + when the device is closed. */ +#define ST_AUTO_LOCK 0 + +/* If ST_FAST_MTEOM is non-zero, the MTEOM ioctl is done using the + direct SCSI command. The file number status is lost but this method + is fast with some drives. Otherwise MTEOM is done by spacing over + files and the file number status is retained. */ +#define ST_FAST_MTEOM 0 + +/* If ST_SCSI2LOGICAL is nonzero, the logical block addresses are used for + MTIOCPOS and MTSEEK by default. Vendor addresses are used if ST_SCSI2LOGICAL + is zero. */ +#define ST_SCSI2LOGICAL 0 + +/* If ST_SYSV is non-zero, the tape behaves according to the SYS V semantics. + The default is BSD semantics. */ +#define ST_SYSV 0 + +/* If ST_SILI is non-zero, the SILI bit is set when reading in variable block + mode and the block size is determined using the residual returned by the HBA. */ +#define ST_SILI 0 + +/* Time to wait for the drive to become ready if blocking open */ +#define ST_BLOCK_SECONDS 120 + +#endif diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c new file mode 100644 index 000000000..8ffb75be9 --- /dev/null +++ b/drivers/scsi/stex.c @@ -0,0 +1,2030 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SuperTrak EX Series Storage Controller driver for Linux + * + * Copyright (C) 2005-2015 Promise Technology Inc. + * + * Written By: + * Ed Lin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "stex" +#define ST_DRIVER_VERSION "6.02.0000.01" +#define ST_VER_MAJOR 6 +#define ST_VER_MINOR 02 +#define ST_OEM 0000 +#define ST_BUILD_VER 01 + +enum { + /* MU register offset */ + IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */ + IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */ + OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */ + OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */ + IDBL = 0x20, /* MU_INBOUND_DOORBELL */ + IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */ + IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */ + ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */ + OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ + OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ + + YIOA_STATUS = 0x00, + YH2I_INT = 0x20, + YINT_EN = 0x34, + YI2H_INT = 0x9c, + YI2H_INT_C = 0xa0, + YH2I_REQ = 0xc0, + YH2I_REQ_HI = 0xc4, + PSCRATCH0 = 0xb0, + PSCRATCH1 = 0xb4, + PSCRATCH2 = 0xb8, + PSCRATCH3 = 0xbc, + PSCRATCH4 = 0xc8, + MAILBOX_BASE = 0x1000, + MAILBOX_HNDSHK_STS = 0x0, + + /* MU register value */ + MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), + MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1), + MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2), + MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3), + MU_INBOUND_DOORBELL_RESET = (1 << 4), + + MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0), + MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1), + MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2), + MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3), + MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4), + MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27), + + /* MU status code */ + MU_STATE_STARTING = 1, + MU_STATE_STARTED = 2, + MU_STATE_RESETTING = 3, + MU_STATE_FAILED = 4, + MU_STATE_STOP = 5, + MU_STATE_NOCONNECT = 6, + + MU_MAX_DELAY = 50, + MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, + MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000, + MU_HARD_RESET_WAIT = 30000, + HMU_PARTNER_TYPE = 2, + + /* firmware returned values */ + SRB_STATUS_SUCCESS = 0x01, + SRB_STATUS_ERROR = 0x04, + SRB_STATUS_BUSY = 0x05, + SRB_STATUS_INVALID_REQUEST = 0x06, + SRB_STATUS_SELECTION_TIMEOUT = 0x0A, + SRB_SEE_SENSE = 0x80, + + /* task attribute */ + TASK_ATTRIBUTE_SIMPLE = 0x0, + TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, + TASK_ATTRIBUTE_ORDERED = 0x2, + TASK_ATTRIBUTE_ACA = 0x4, +}; + +enum { + SS_STS_NORMAL = 0x80000000, + SS_STS_DONE = 0x40000000, + SS_STS_HANDSHAKE = 0x20000000, + + SS_HEAD_HANDSHAKE = 0x80, + + SS_H2I_INT_RESET = 0x100, + + SS_I2H_REQUEST_RESET = 0x2000, + + SS_MU_OPERATIONAL = 0x80000000, +}; + +enum { + STEX_CDB_LENGTH = 16, + STATUS_VAR_LEN = 128, + + /* sg flags */ + SG_CF_EOT = 0x80, /* end of table */ + SG_CF_64B = 0x40, /* 64 bit item */ + SG_CF_HOST = 0x20, /* sg in host memory */ + MSG_DATA_DIR_ND = 0, + MSG_DATA_DIR_IN = 1, + MSG_DATA_DIR_OUT = 2, + + st_shasta = 0, + st_vsc = 1, + st_yosemite = 2, + st_seq = 3, + st_yel = 4, + st_P3 = 5, + + PASSTHRU_REQ_TYPE = 0x00000001, + PASSTHRU_REQ_NO_WAKEUP = 0x00000100, + ST_INTERNAL_TIMEOUT = 180, + + ST_TO_CMD = 0, + ST_FROM_CMD = 1, + + /* vendor specific commands of Promise */ + MGT_CMD = 0xd8, + SINBAND_MGT_CMD = 0xd9, + ARRAY_CMD = 0xe0, + CONTROLLER_CMD = 0xe1, + DEBUGGING_CMD = 0xe2, + PASSTHRU_CMD = 0xe3, + + PASSTHRU_GET_ADAPTER = 0x05, + PASSTHRU_GET_DRVVER = 0x10, + + CTLR_CONFIG_CMD = 0x03, + CTLR_SHUTDOWN = 0x0d, + + CTLR_POWER_STATE_CHANGE = 0x0e, + CTLR_POWER_SAVING = 0x01, + + PASSTHRU_SIGNATURE = 0x4e415041, + MGT_CMD_SIGNATURE = 0xba, + + INQUIRY_EVPD = 0x01, + + ST_ADDITIONAL_MEM = 0x200000, + ST_ADDITIONAL_MEM_MIN = 0x80000, + PMIC_SHUTDOWN = 0x0D, + PMIC_REUMSE = 0x10, + ST_IGNORED = -1, + ST_NOTHANDLED = 7, + ST_S3 = 3, + ST_S4 = 4, + ST_S5 = 5, + ST_S6 = 6, +}; + +struct st_sgitem { + u8 ctrl; /* SG_CF_xxx */ + u8 reserved[3]; + __le32 count; + __le64 addr; +}; + +struct st_ss_sgitem { + __le32 addr; + __le32 addr_hi; + __le32 count; +}; + +struct st_sgtable { + __le16 sg_count; + __le16 max_sg_count; + __le32 sz_in_byte; +}; + +struct st_msg_header { + __le64 handle; + u8 flag; + u8 channel; + __le16 timeout; + u32 reserved; +}; + +struct handshake_frame { + __le64 rb_phy; /* request payload queue physical address */ + __le16 req_sz; /* size of each request payload */ + __le16 req_cnt; /* count of reqs the buffer can hold */ + __le16 status_sz; /* size of each status payload */ + __le16 status_cnt; /* count of status the buffer can hold */ + __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */ + u8 partner_type; /* who sends this frame */ + u8 reserved0[7]; + __le32 partner_ver_major; + __le32 partner_ver_minor; + __le32 partner_ver_oem; + __le32 partner_ver_build; + __le32 extra_offset; /* NEW */ + __le32 extra_size; /* NEW */ + __le32 scratch_size; + u32 reserved1; +}; + +struct req_msg { + __le16 tag; + u8 lun; + u8 target; + u8 task_attr; + u8 task_manage; + u8 data_dir; + u8 payload_sz; /* payload size in 4-byte, not used */ + u8 cdb[STEX_CDB_LENGTH]; + u32 variable[]; +}; + +struct status_msg { + __le16 tag; + u8 lun; + u8 target; + u8 srb_status; + u8 scsi_status; + u8 reserved; + u8 payload_sz; /* payload size in 4-byte */ + u8 variable[STATUS_VAR_LEN]; +}; + +struct ver_info { + u32 major; + u32 minor; + u32 oem; + u32 build; + u32 reserved[2]; +}; + +struct st_frame { + u32 base[6]; + u32 rom_addr; + + struct ver_info drv_ver; + struct ver_info bios_ver; + + u32 bus; + u32 slot; + u32 irq_level; + u32 irq_vec; + u32 id; + u32 subid; + + u32 dimm_size; + u8 dimm_type; + u8 reserved[3]; + + u32 channel; + u32 reserved1; +}; + +struct st_drvver { + u32 major; + u32 minor; + u32 oem; + u32 build; + u32 signature[2]; + u8 console_id; + u8 host_no; + u8 reserved0[2]; + u32 reserved[3]; +}; + +struct st_ccb { + struct req_msg *req; + struct scsi_cmnd *cmd; + + void *sense_buffer; + unsigned int sense_bufflen; + int sg_count; + + u32 req_type; + u8 srb_status; + u8 scsi_status; + u8 reserved[2]; +}; + +struct st_hba { + void __iomem *mmio_base; /* iomapped PCI memory space */ + void *dma_mem; + dma_addr_t dma_handle; + size_t dma_size; + + struct Scsi_Host *host; + struct pci_dev *pdev; + + struct req_msg * (*alloc_rq) (struct st_hba *); + int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); + void (*send) (struct st_hba *, struct req_msg *, u16); + + u32 req_head; + u32 req_tail; + u32 status_head; + u32 status_tail; + + struct status_msg *status_buffer; + void *copy_buffer; /* temp buffer for driver-handled commands */ + struct st_ccb *ccb; + struct st_ccb *wait_ccb; + __le32 *scratch; + + char work_q_name[20]; + struct workqueue_struct *work_q; + struct work_struct reset_work; + wait_queue_head_t reset_waitq; + unsigned int mu_status; + unsigned int cardtype; + int msi_enabled; + int out_req_cnt; + u32 extra_offset; + u16 rq_count; + u16 rq_size; + u16 sts_count; + u8 supports_pm; + int msi_lock; +}; + +struct st_card_info { + struct req_msg * (*alloc_rq) (struct st_hba *); + int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); + void (*send) (struct st_hba *, struct req_msg *, u16); + unsigned int max_id; + unsigned int max_lun; + unsigned int max_channel; + u16 rq_count; + u16 rq_size; + u16 sts_count; +}; + +static int S6flag; +static int stex_halt(struct notifier_block *nb, ulong event, void *buf); +static struct notifier_block stex_notifier = { + stex_halt, NULL, 0 +}; + +static int msi; +module_param(msi, int, 0); +MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)"); + +static const char console_inq_page[] = +{ + 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30, + 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */ + 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */ + 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */ + 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */ + 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */ + 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */ + 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20 +}; + +MODULE_AUTHOR("Ed Lin"); +MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ST_DRIVER_VERSION); + +static struct status_msg *stex_get_status(struct st_hba *hba) +{ + struct status_msg *status = hba->status_buffer + hba->status_tail; + + ++hba->status_tail; + hba->status_tail %= hba->sts_count+1; + + return status; +} + +static void stex_invalid_field(struct scsi_cmnd *cmd, + void (*done)(struct scsi_cmnd *)) +{ + /* "Invalid field in cdb" */ + scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0); + done(cmd); +} + +static struct req_msg *stex_alloc_req(struct st_hba *hba) +{ + struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; + + ++hba->req_head; + hba->req_head %= hba->rq_count+1; + + return req; +} + +static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) +{ + return (struct req_msg *)(hba->dma_mem + + hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); +} + +static int stex_map_sg(struct st_hba *hba, + struct req_msg *req, struct st_ccb *ccb) +{ + struct scsi_cmnd *cmd; + struct scatterlist *sg; + struct st_sgtable *dst; + struct st_sgitem *table; + int i, nseg; + + cmd = ccb->cmd; + nseg = scsi_dma_map(cmd); + BUG_ON(nseg < 0); + if (nseg) { + dst = (struct st_sgtable *)req->variable; + + ccb->sg_count = nseg; + dst->sg_count = cpu_to_le16((u16)nseg); + dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); + dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); + + table = (struct st_sgitem *)(dst + 1); + scsi_for_each_sg(cmd, sg, nseg, i) { + table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); + table[i].addr = cpu_to_le64(sg_dma_address(sg)); + table[i].ctrl = SG_CF_64B | SG_CF_HOST; + } + table[--i].ctrl |= SG_CF_EOT; + } + + return nseg; +} + +static int stex_ss_map_sg(struct st_hba *hba, + struct req_msg *req, struct st_ccb *ccb) +{ + struct scsi_cmnd *cmd; + struct scatterlist *sg; + struct st_sgtable *dst; + struct st_ss_sgitem *table; + int i, nseg; + + cmd = ccb->cmd; + nseg = scsi_dma_map(cmd); + BUG_ON(nseg < 0); + if (nseg) { + dst = (struct st_sgtable *)req->variable; + + ccb->sg_count = nseg; + dst->sg_count = cpu_to_le16((u16)nseg); + dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); + dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); + + table = (struct st_ss_sgitem *)(dst + 1); + scsi_for_each_sg(cmd, sg, nseg, i) { + table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); + table[i].addr = + cpu_to_le32(sg_dma_address(sg) & 0xffffffff); + table[i].addr_hi = + cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); + } + } + + return nseg; +} + +static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) +{ + struct st_frame *p; + size_t count = sizeof(struct st_frame); + + p = hba->copy_buffer; + scsi_sg_copy_to_buffer(ccb->cmd, p, count); + memset(p->base, 0, sizeof(u32)*6); + *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); + p->rom_addr = 0; + + p->drv_ver.major = ST_VER_MAJOR; + p->drv_ver.minor = ST_VER_MINOR; + p->drv_ver.oem = ST_OEM; + p->drv_ver.build = ST_BUILD_VER; + + p->bus = hba->pdev->bus->number; + p->slot = hba->pdev->devfn; + p->irq_level = 0; + p->irq_vec = hba->pdev->irq; + p->id = hba->pdev->vendor << 16 | hba->pdev->device; + p->subid = + hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; + + scsi_sg_copy_from_buffer(ccb->cmd, p, count); +} + +static void +stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) +{ + req->tag = cpu_to_le16(tag); + + hba->ccb[tag].req = req; + hba->out_req_cnt++; + + writel(hba->req_head, hba->mmio_base + IMR0); + writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); + readl(hba->mmio_base + IDBL); /* flush */ +} + +static void +stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) +{ + struct scsi_cmnd *cmd; + struct st_msg_header *msg_h; + dma_addr_t addr; + + req->tag = cpu_to_le16(tag); + + hba->ccb[tag].req = req; + hba->out_req_cnt++; + + cmd = hba->ccb[tag].cmd; + msg_h = (struct st_msg_header *)req - 1; + if (likely(cmd)) { + msg_h->channel = (u8)cmd->device->channel; + msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ); + } + addr = hba->dma_handle + hba->req_head * hba->rq_size; + addr += (hba->ccb[tag].sg_count+4)/11; + msg_h->handle = cpu_to_le64(addr); + + ++hba->req_head; + hba->req_head %= hba->rq_count+1; + if (hba->cardtype == st_P3) { + writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); + writel(addr, hba->mmio_base + YH2I_REQ); + } else { + writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); + readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ + writel(addr, hba->mmio_base + YH2I_REQ); + readl(hba->mmio_base + YH2I_REQ); /* flush */ + } +} + +static void return_abnormal_state(struct st_hba *hba, int status) +{ + struct st_ccb *ccb; + unsigned long flags; + u16 tag; + + spin_lock_irqsave(hba->host->host_lock, flags); + for (tag = 0; tag < hba->host->can_queue; tag++) { + ccb = &hba->ccb[tag]; + if (ccb->req == NULL) + continue; + ccb->req = NULL; + if (ccb->cmd) { + scsi_dma_unmap(ccb->cmd); + ccb->cmd->result = status << 16; + scsi_done(ccb->cmd); + ccb->cmd = NULL; + } + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +static int +stex_slave_config(struct scsi_device *sdev) +{ + sdev->use_10_for_rw = 1; + sdev->use_10_for_ms = 1; + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + + return 0; +} + +static int stex_queuecommand_lck(struct scsi_cmnd *cmd) +{ + void (*done)(struct scsi_cmnd *) = scsi_done; + struct st_hba *hba; + struct Scsi_Host *host; + unsigned int id, lun; + struct req_msg *req; + u16 tag; + + host = cmd->device->host; + id = cmd->device->id; + lun = cmd->device->lun; + hba = (struct st_hba *) &host->hostdata[0]; + if (hba->mu_status == MU_STATE_NOCONNECT) { + cmd->result = DID_NO_CONNECT; + done(cmd); + return 0; + } + if (unlikely(hba->mu_status != MU_STATE_STARTED)) + return SCSI_MLQUEUE_HOST_BUSY; + + switch (cmd->cmnd[0]) { + case MODE_SENSE_10: + { + static char ms10_caching_page[12] = + { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 }; + unsigned char page; + + page = cmd->cmnd[2] & 0x3f; + if (page == 0x8 || page == 0x3f) { + scsi_sg_copy_from_buffer(cmd, ms10_caching_page, + sizeof(ms10_caching_page)); + cmd->result = DID_OK << 16; + done(cmd); + } else + stex_invalid_field(cmd, done); + return 0; + } + case REPORT_LUNS: + /* + * The shasta firmware does not report actual luns in the + * target, so fail the command to force sequential lun scan. + * Also, the console device does not support this command. + */ + if (hba->cardtype == st_shasta || id == host->max_id - 1) { + stex_invalid_field(cmd, done); + return 0; + } + break; + case TEST_UNIT_READY: + if (id == host->max_id - 1) { + cmd->result = DID_OK << 16; + done(cmd); + return 0; + } + break; + case INQUIRY: + if (lun >= host->max_lun) { + cmd->result = DID_NO_CONNECT << 16; + done(cmd); + return 0; + } + if (id != host->max_id - 1) + break; + if (!lun && !cmd->device->channel && + (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { + scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page, + sizeof(console_inq_page)); + cmd->result = DID_OK << 16; + done(cmd); + } else + stex_invalid_field(cmd, done); + return 0; + case PASSTHRU_CMD: + if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { + const struct st_drvver ver = { + .major = ST_VER_MAJOR, + .minor = ST_VER_MINOR, + .oem = ST_OEM, + .build = ST_BUILD_VER, + .signature[0] = PASSTHRU_SIGNATURE, + .console_id = host->max_id - 1, + .host_no = hba->host->host_no, + }; + size_t cp_len = sizeof(ver); + + cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); + if (sizeof(ver) == cp_len) + cmd->result = DID_OK << 16; + else + cmd->result = DID_ERROR << 16; + done(cmd); + return 0; + } + break; + default: + break; + } + + tag = scsi_cmd_to_rq(cmd)->tag; + + if (unlikely(tag >= host->can_queue)) + return SCSI_MLQUEUE_HOST_BUSY; + + req = hba->alloc_rq(hba); + + req->lun = lun; + req->target = id; + + /* cdb */ + memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); + + if (cmd->sc_data_direction == DMA_FROM_DEVICE) + req->data_dir = MSG_DATA_DIR_IN; + else if (cmd->sc_data_direction == DMA_TO_DEVICE) + req->data_dir = MSG_DATA_DIR_OUT; + else + req->data_dir = MSG_DATA_DIR_ND; + + hba->ccb[tag].cmd = cmd; + hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; + hba->ccb[tag].sense_buffer = cmd->sense_buffer; + + if (!hba->map_sg(hba, req, &hba->ccb[tag])) { + hba->ccb[tag].sg_count = 0; + memset(&req->variable[0], 0, 8); + } + + hba->send(hba, req, tag); + return 0; +} + +static DEF_SCSI_QCMD(stex_queuecommand) + +static void stex_scsi_done(struct st_ccb *ccb) +{ + struct scsi_cmnd *cmd = ccb->cmd; + int result; + + if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) { + result = ccb->scsi_status; + switch (ccb->scsi_status) { + case SAM_STAT_GOOD: + result |= DID_OK << 16; + break; + case SAM_STAT_CHECK_CONDITION: + result |= DID_OK << 16; + break; + case SAM_STAT_BUSY: + result |= DID_BUS_BUSY << 16; + break; + default: + result |= DID_ERROR << 16; + break; + } + } + else if (ccb->srb_status & SRB_SEE_SENSE) + result = SAM_STAT_CHECK_CONDITION; + else switch (ccb->srb_status) { + case SRB_STATUS_SELECTION_TIMEOUT: + result = DID_NO_CONNECT << 16; + break; + case SRB_STATUS_BUSY: + result = DID_BUS_BUSY << 16; + break; + case SRB_STATUS_INVALID_REQUEST: + case SRB_STATUS_ERROR: + default: + result = DID_ERROR << 16; + break; + } + + cmd->result = result; + scsi_done(cmd); +} + +static void stex_copy_data(struct st_ccb *ccb, + struct status_msg *resp, unsigned int variable) +{ + if (resp->scsi_status != SAM_STAT_GOOD) { + if (ccb->sense_buffer != NULL) + memcpy(ccb->sense_buffer, resp->variable, + min(variable, ccb->sense_bufflen)); + return; + } + + if (ccb->cmd == NULL) + return; + scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable); +} + +static void stex_check_cmd(struct st_hba *hba, + struct st_ccb *ccb, struct status_msg *resp) +{ + if (ccb->cmd->cmnd[0] == MGT_CMD && + resp->scsi_status != SAM_STAT_CHECK_CONDITION) + scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) - + le32_to_cpu(*(__le32 *)&resp->variable[0])); +} + +static void stex_mu_intr(struct st_hba *hba, u32 doorbell) +{ + void __iomem *base = hba->mmio_base; + struct status_msg *resp; + struct st_ccb *ccb; + unsigned int size; + u16 tag; + + if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))) + return; + + /* status payloads */ + hba->status_head = readl(base + OMR1); + if (unlikely(hba->status_head > hba->sts_count)) { + printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n", + pci_name(hba->pdev)); + return; + } + + /* + * it's not a valid status payload if: + * 1. there are no pending requests(e.g. during init stage) + * 2. there are some pending requests, but the controller is in + * reset status, and its type is not st_yosemite + * firmware of st_yosemite in reset status will return pending requests + * to driver, so we allow it to pass + */ + if (unlikely(hba->out_req_cnt <= 0 || + (hba->mu_status == MU_STATE_RESETTING && + hba->cardtype != st_yosemite))) { + hba->status_tail = hba->status_head; + goto update_status; + } + + while (hba->status_tail != hba->status_head) { + resp = stex_get_status(hba); + tag = le16_to_cpu(resp->tag); + if (unlikely(tag >= hba->host->can_queue)) { + printk(KERN_WARNING DRV_NAME + "(%s): invalid tag\n", pci_name(hba->pdev)); + continue; + } + + hba->out_req_cnt--; + ccb = &hba->ccb[tag]; + if (unlikely(hba->wait_ccb == ccb)) + hba->wait_ccb = NULL; + if (unlikely(ccb->req == NULL)) { + printk(KERN_WARNING DRV_NAME + "(%s): lagging req\n", pci_name(hba->pdev)); + continue; + } + + size = resp->payload_sz * sizeof(u32); /* payload size */ + if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || + size > sizeof(*resp))) { + printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", + pci_name(hba->pdev)); + } else { + size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */ + if (size) + stex_copy_data(ccb, resp, size); + } + + ccb->req = NULL; + ccb->srb_status = resp->srb_status; + ccb->scsi_status = resp->scsi_status; + + if (likely(ccb->cmd != NULL)) { + if (hba->cardtype == st_yosemite) + stex_check_cmd(hba, ccb, resp); + + if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD && + ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) + stex_controller_info(hba, ccb); + + scsi_dma_unmap(ccb->cmd); + stex_scsi_done(ccb); + } else + ccb->req_type = 0; + } + +update_status: + writel(hba->status_head, base + IMR1); + readl(base + IMR1); /* flush */ +} + +static irqreturn_t stex_intr(int irq, void *__hba) +{ + struct st_hba *hba = __hba; + void __iomem *base = hba->mmio_base; + u32 data; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + + data = readl(base + ODBL); + + if (data && data != 0xffffffff) { + /* clear the interrupt */ + writel(data, base + ODBL); + readl(base + ODBL); /* flush */ + stex_mu_intr(hba, data); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET && + hba->cardtype == st_shasta)) + queue_work(hba->work_q, &hba->reset_work); + return IRQ_HANDLED; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return IRQ_NONE; +} + +static void stex_ss_mu_intr(struct st_hba *hba) +{ + struct status_msg *resp; + struct st_ccb *ccb; + __le32 *scratch; + unsigned int size; + int count = 0; + u32 value; + u16 tag; + + if (unlikely(hba->out_req_cnt <= 0 || + hba->mu_status == MU_STATE_RESETTING)) + return; + + while (count < hba->sts_count) { + scratch = hba->scratch + hba->status_tail; + value = le32_to_cpu(*scratch); + if (unlikely(!(value & SS_STS_NORMAL))) + return; + + resp = hba->status_buffer + hba->status_tail; + *scratch = 0; + ++count; + ++hba->status_tail; + hba->status_tail %= hba->sts_count+1; + + tag = (u16)value; + if (unlikely(tag >= hba->host->can_queue)) { + printk(KERN_WARNING DRV_NAME + "(%s): invalid tag\n", pci_name(hba->pdev)); + continue; + } + + hba->out_req_cnt--; + ccb = &hba->ccb[tag]; + if (unlikely(hba->wait_ccb == ccb)) + hba->wait_ccb = NULL; + if (unlikely(ccb->req == NULL)) { + printk(KERN_WARNING DRV_NAME + "(%s): lagging req\n", pci_name(hba->pdev)); + continue; + } + + ccb->req = NULL; + if (likely(value & SS_STS_DONE)) { /* normal case */ + ccb->srb_status = SRB_STATUS_SUCCESS; + ccb->scsi_status = SAM_STAT_GOOD; + } else { + ccb->srb_status = resp->srb_status; + ccb->scsi_status = resp->scsi_status; + size = resp->payload_sz * sizeof(u32); + if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || + size > sizeof(*resp))) { + printk(KERN_WARNING DRV_NAME + "(%s): bad status size\n", + pci_name(hba->pdev)); + } else { + size -= sizeof(*resp) - STATUS_VAR_LEN; + if (size) + stex_copy_data(ccb, resp, size); + } + if (likely(ccb->cmd != NULL)) + stex_check_cmd(hba, ccb, resp); + } + + if (likely(ccb->cmd != NULL)) { + scsi_dma_unmap(ccb->cmd); + stex_scsi_done(ccb); + } else + ccb->req_type = 0; + } +} + +static irqreturn_t stex_ss_intr(int irq, void *__hba) +{ + struct st_hba *hba = __hba; + void __iomem *base = hba->mmio_base; + u32 data; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + + if (hba->cardtype == st_yel) { + data = readl(base + YI2H_INT); + if (data && data != 0xffffffff) { + /* clear the interrupt */ + writel(data, base + YI2H_INT_C); + stex_ss_mu_intr(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unlikely(data & SS_I2H_REQUEST_RESET)) + queue_work(hba->work_q, &hba->reset_work); + return IRQ_HANDLED; + } + } else { + data = readl(base + PSCRATCH4); + if (data != 0xffffffff) { + if (data != 0) { + /* clear the interrupt */ + writel(data, base + PSCRATCH1); + writel((1 << 22), base + YH2I_INT); + } + stex_ss_mu_intr(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unlikely(data & SS_I2H_REQUEST_RESET)) + queue_work(hba->work_q, &hba->reset_work); + return IRQ_HANDLED; + } + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return IRQ_NONE; +} + +static int stex_common_handshake(struct st_hba *hba) +{ + void __iomem *base = hba->mmio_base; + struct handshake_frame *h; + dma_addr_t status_phys; + u32 data; + unsigned long before; + + if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { + writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); + readl(base + IDBL); + before = jiffies; + while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): no handshake signature\n", + pci_name(hba->pdev)); + return -1; + } + rmb(); + msleep(1); + } + } + + udelay(10); + + data = readl(base + OMR1); + if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) { + data &= 0x0000ffff; + if (hba->host->can_queue > data) { + hba->host->can_queue = data; + hba->host->cmd_per_lun = data; + } + } + + h = (struct handshake_frame *)hba->status_buffer; + h->rb_phy = cpu_to_le64(hba->dma_handle); + h->req_sz = cpu_to_le16(hba->rq_size); + h->req_cnt = cpu_to_le16(hba->rq_count+1); + h->status_sz = cpu_to_le16(sizeof(struct status_msg)); + h->status_cnt = cpu_to_le16(hba->sts_count+1); + h->hosttime = cpu_to_le64(ktime_get_real_seconds()); + h->partner_type = HMU_PARTNER_TYPE; + if (hba->extra_offset) { + h->extra_offset = cpu_to_le32(hba->extra_offset); + h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); + } else + h->extra_offset = h->extra_size = 0; + + status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; + writel(status_phys, base + IMR0); + readl(base + IMR0); + writel((status_phys >> 16) >> 16, base + IMR1); + readl(base + IMR1); + + writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */ + readl(base + OMR0); + writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); + readl(base + IDBL); /* flush */ + + udelay(10); + before = jiffies; + while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): no signature after handshake frame\n", + pci_name(hba->pdev)); + return -1; + } + rmb(); + msleep(1); + } + + writel(0, base + IMR0); + readl(base + IMR0); + writel(0, base + OMR0); + readl(base + OMR0); + writel(0, base + IMR1); + readl(base + IMR1); + writel(0, base + OMR1); + readl(base + OMR1); /* flush */ + return 0; +} + +static int stex_ss_handshake(struct st_hba *hba) +{ + void __iomem *base = hba->mmio_base; + struct st_msg_header *msg_h; + struct handshake_frame *h; + __le32 *scratch; + u32 data, scratch_size, mailboxdata, operationaldata; + unsigned long before; + int ret = 0; + + before = jiffies; + + if (hba->cardtype == st_yel) { + operationaldata = readl(base + YIOA_STATUS); + while (operationaldata != SS_MU_OPERATIONAL) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): firmware not operational\n", + pci_name(hba->pdev)); + return -1; + } + msleep(1); + operationaldata = readl(base + YIOA_STATUS); + } + } else { + operationaldata = readl(base + PSCRATCH3); + while (operationaldata != SS_MU_OPERATIONAL) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): firmware not operational\n", + pci_name(hba->pdev)); + return -1; + } + msleep(1); + operationaldata = readl(base + PSCRATCH3); + } + } + + msg_h = (struct st_msg_header *)hba->dma_mem; + msg_h->handle = cpu_to_le64(hba->dma_handle); + msg_h->flag = SS_HEAD_HANDSHAKE; + + h = (struct handshake_frame *)(msg_h + 1); + h->rb_phy = cpu_to_le64(hba->dma_handle); + h->req_sz = cpu_to_le16(hba->rq_size); + h->req_cnt = cpu_to_le16(hba->rq_count+1); + h->status_sz = cpu_to_le16(sizeof(struct status_msg)); + h->status_cnt = cpu_to_le16(hba->sts_count+1); + h->hosttime = cpu_to_le64(ktime_get_real_seconds()); + h->partner_type = HMU_PARTNER_TYPE; + h->extra_offset = h->extra_size = 0; + scratch_size = (hba->sts_count+1)*sizeof(u32); + h->scratch_size = cpu_to_le32(scratch_size); + + if (hba->cardtype == st_yel) { + data = readl(base + YINT_EN); + data &= ~4; + writel(data, base + YINT_EN); + writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); + readl(base + YH2I_REQ_HI); + writel(hba->dma_handle, base + YH2I_REQ); + readl(base + YH2I_REQ); /* flush */ + } else { + data = readl(base + YINT_EN); + data &= ~(1 << 0); + data &= ~(1 << 2); + writel(data, base + YINT_EN); + if (hba->msi_lock == 0) { + /* P3 MSI Register cannot access twice */ + writel((1 << 6), base + YH2I_INT); + hba->msi_lock = 1; + } + writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); + writel(hba->dma_handle, base + YH2I_REQ); + } + + before = jiffies; + scratch = hba->scratch; + if (hba->cardtype == st_yel) { + while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): no signature after handshake frame\n", + pci_name(hba->pdev)); + ret = -1; + break; + } + rmb(); + msleep(1); + } + } else { + mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); + while (mailboxdata != SS_STS_HANDSHAKE) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): no signature after handshake frame\n", + pci_name(hba->pdev)); + ret = -1; + break; + } + rmb(); + msleep(1); + mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); + } + } + memset(scratch, 0, scratch_size); + msg_h->flag = 0; + + return ret; +} + +static int stex_handshake(struct st_hba *hba) +{ + int err; + unsigned long flags; + unsigned int mu_status; + + if (hba->cardtype == st_yel || hba->cardtype == st_P3) + err = stex_ss_handshake(hba); + else + err = stex_common_handshake(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + mu_status = hba->mu_status; + if (err == 0) { + hba->req_head = 0; + hba->req_tail = 0; + hba->status_head = 0; + hba->status_tail = 0; + hba->out_req_cnt = 0; + hba->mu_status = MU_STATE_STARTED; + } else + hba->mu_status = MU_STATE_FAILED; + if (mu_status == MU_STATE_RESETTING) + wake_up_all(&hba->reset_waitq); + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err; +} + +static int stex_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct st_hba *hba = (struct st_hba *)host->hostdata; + u16 tag = scsi_cmd_to_rq(cmd)->tag; + void __iomem *base; + u32 data; + int result = SUCCESS; + unsigned long flags; + + scmd_printk(KERN_INFO, cmd, "aborting command\n"); + + base = hba->mmio_base; + spin_lock_irqsave(host->host_lock, flags); + if (tag < host->can_queue && + hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) + hba->wait_ccb = &hba->ccb[tag]; + else + goto out; + + if (hba->cardtype == st_yel) { + data = readl(base + YI2H_INT); + if (data == 0 || data == 0xffffffff) + goto fail_out; + + writel(data, base + YI2H_INT_C); + stex_ss_mu_intr(hba); + } else if (hba->cardtype == st_P3) { + data = readl(base + PSCRATCH4); + if (data == 0xffffffff) + goto fail_out; + if (data != 0) { + writel(data, base + PSCRATCH1); + writel((1 << 22), base + YH2I_INT); + } + stex_ss_mu_intr(hba); + } else { + data = readl(base + ODBL); + if (data == 0 || data == 0xffffffff) + goto fail_out; + + writel(data, base + ODBL); + readl(base + ODBL); /* flush */ + stex_mu_intr(hba, data); + } + if (hba->wait_ccb == NULL) { + printk(KERN_WARNING DRV_NAME + "(%s): lost interrupt\n", pci_name(hba->pdev)); + goto out; + } + +fail_out: + scsi_dma_unmap(cmd); + hba->wait_ccb->req = NULL; /* nullify the req's future return */ + hba->wait_ccb = NULL; + result = FAILED; +out: + spin_unlock_irqrestore(host->host_lock, flags); + return result; +} + +static void stex_hard_reset(struct st_hba *hba) +{ + struct pci_bus *bus; + int i; + u16 pci_cmd; + u8 pci_bctl; + + for (i = 0; i < 16; i++) + pci_read_config_dword(hba->pdev, i * 4, + &hba->pdev->saved_config_space[i]); + + /* Reset secondary bus. Our controller(MU/ATU) is the only device on + secondary bus. Consult Intel 80331/3 developer's manual for detail */ + bus = hba->pdev->bus; + pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl); + pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); + + /* + * 1 ms may be enough for 8-port controllers. But 16-port controllers + * require more time to finish bus reset. Use 100 ms here for safety + */ + msleep(100); + pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); + + for (i = 0; i < MU_HARD_RESET_WAIT; i++) { + pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER)) + break; + msleep(1); + } + + ssleep(5); + for (i = 0; i < 16; i++) + pci_write_config_dword(hba->pdev, i * 4, + hba->pdev->saved_config_space[i]); +} + +static int stex_yos_reset(struct st_hba *hba) +{ + void __iomem *base; + unsigned long flags, before; + int ret = 0; + + base = hba->mmio_base; + writel(MU_INBOUND_DOORBELL_RESET, base + IDBL); + readl(base + IDBL); /* flush */ + before = jiffies; + while (hba->out_req_cnt > 0) { + if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { + printk(KERN_WARNING DRV_NAME + "(%s): reset timeout\n", pci_name(hba->pdev)); + ret = -1; + break; + } + msleep(1); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret == -1) + hba->mu_status = MU_STATE_FAILED; + else + hba->mu_status = MU_STATE_STARTED; + wake_up_all(&hba->reset_waitq); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return ret; +} + +static void stex_ss_reset(struct st_hba *hba) +{ + writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); + readl(hba->mmio_base + YH2I_INT); + ssleep(5); +} + +static void stex_p3_reset(struct st_hba *hba) +{ + writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); + ssleep(5); +} + +static int stex_do_reset(struct st_hba *hba) +{ + unsigned long flags; + unsigned int mu_status = MU_STATE_RESETTING; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->mu_status == MU_STATE_STARTING) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + printk(KERN_INFO DRV_NAME "(%s): request reset during init\n", + pci_name(hba->pdev)); + return 0; + } + while (hba->mu_status == MU_STATE_RESETTING) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + wait_event_timeout(hba->reset_waitq, + hba->mu_status != MU_STATE_RESETTING, + MU_MAX_DELAY * HZ); + spin_lock_irqsave(hba->host->host_lock, flags); + mu_status = hba->mu_status; + } + + if (mu_status != MU_STATE_RESETTING) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return (mu_status == MU_STATE_STARTED) ? 0 : -1; + } + + hba->mu_status = MU_STATE_RESETTING; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (hba->cardtype == st_yosemite) + return stex_yos_reset(hba); + + if (hba->cardtype == st_shasta) + stex_hard_reset(hba); + else if (hba->cardtype == st_yel) + stex_ss_reset(hba); + else if (hba->cardtype == st_P3) + stex_p3_reset(hba); + + return_abnormal_state(hba, DID_RESET); + + if (stex_handshake(hba) == 0) + return 0; + + printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n", + pci_name(hba->pdev)); + return -1; +} + +static int stex_reset(struct scsi_cmnd *cmd) +{ + struct st_hba *hba; + + hba = (struct st_hba *) &cmd->device->host->hostdata[0]; + + shost_printk(KERN_INFO, cmd->device->host, + "resetting host\n"); + + return stex_do_reset(hba) ? FAILED : SUCCESS; +} + +static void stex_reset_work(struct work_struct *work) +{ + struct st_hba *hba = container_of(work, struct st_hba, reset_work); + + stex_do_reset(hba); +} + +static int stex_biosparam(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int geom[]) +{ + int heads = 255, sectors = 63; + + if (capacity < 0x200000) { + heads = 64; + sectors = 32; + } + + sector_div(capacity, heads * sectors); + + geom[0] = heads; + geom[1] = sectors; + geom[2] = capacity; + + return 0; +} + +static const struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = DRV_NAME, + .proc_name = DRV_NAME, + .bios_param = stex_biosparam, + .queuecommand = stex_queuecommand, + .slave_configure = stex_slave_config, + .eh_abort_handler = stex_abort, + .eh_host_reset_handler = stex_reset, + .this_id = -1, + .dma_boundary = PAGE_SIZE - 1, +}; + +static struct pci_device_id stex_pci_tbl[] = { + /* st_shasta */ + { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */ + { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + st_shasta }, /* SuperTrak EX12350 */ + { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + st_shasta }, /* SuperTrak EX4350 */ + { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + st_shasta }, /* SuperTrak EX24350 */ + + /* st_vsc */ + { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, + + /* st_yosemite */ + { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite }, + + /* st_seq */ + { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq }, + + /* st_yel */ + { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel }, + { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel }, + + /* st_P3, pluto */ + { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, + 0x8870, 0, 0, st_P3 }, + /* st_P3, p3 */ + { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, + 0x4300, 0, 0, st_P3 }, + + /* st_P3, SymplyStor4E */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4311, 0, 0, st_P3 }, + /* st_P3, SymplyStor8E */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4312, 0, 0, st_P3 }, + /* st_P3, SymplyStor4 */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4321, 0, 0, st_P3 }, + /* st_P3, SymplyStor8 */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4322, 0, 0, st_P3 }, + { } /* terminate list */ +}; + +static struct st_card_info stex_card_info[] = { + /* st_shasta */ + { + .max_id = 17, + .max_lun = 8, + .max_channel = 0, + .rq_count = 32, + .rq_size = 1048, + .sts_count = 32, + .alloc_rq = stex_alloc_req, + .map_sg = stex_map_sg, + .send = stex_send_cmd, + }, + + /* st_vsc */ + { + .max_id = 129, + .max_lun = 1, + .max_channel = 0, + .rq_count = 32, + .rq_size = 1048, + .sts_count = 32, + .alloc_rq = stex_alloc_req, + .map_sg = stex_map_sg, + .send = stex_send_cmd, + }, + + /* st_yosemite */ + { + .max_id = 2, + .max_lun = 256, + .max_channel = 0, + .rq_count = 256, + .rq_size = 1048, + .sts_count = 256, + .alloc_rq = stex_alloc_req, + .map_sg = stex_map_sg, + .send = stex_send_cmd, + }, + + /* st_seq */ + { + .max_id = 129, + .max_lun = 1, + .max_channel = 0, + .rq_count = 32, + .rq_size = 1048, + .sts_count = 32, + .alloc_rq = stex_alloc_req, + .map_sg = stex_map_sg, + .send = stex_send_cmd, + }, + + /* st_yel */ + { + .max_id = 129, + .max_lun = 256, + .max_channel = 3, + .rq_count = 801, + .rq_size = 512, + .sts_count = 801, + .alloc_rq = stex_ss_alloc_req, + .map_sg = stex_ss_map_sg, + .send = stex_ss_send_cmd, + }, + + /* st_P3 */ + { + .max_id = 129, + .max_lun = 256, + .max_channel = 0, + .rq_count = 801, + .rq_size = 512, + .sts_count = 801, + .alloc_rq = stex_ss_alloc_req, + .map_sg = stex_ss_map_sg, + .send = stex_ss_send_cmd, + }, +}; + +static int stex_request_irq(struct st_hba *hba) +{ + struct pci_dev *pdev = hba->pdev; + int status; + + if (msi || hba->cardtype == st_P3) { + status = pci_enable_msi(pdev); + if (status != 0) + printk(KERN_ERR DRV_NAME + "(%s): error %d setting up MSI\n", + pci_name(pdev), status); + else + hba->msi_enabled = 1; + } else + hba->msi_enabled = 0; + + status = request_irq(pdev->irq, + (hba->cardtype == st_yel || hba->cardtype == st_P3) ? + stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); + + if (status != 0) { + if (hba->msi_enabled) + pci_disable_msi(pdev); + } + return status; +} + +static void stex_free_irq(struct st_hba *hba) +{ + struct pci_dev *pdev = hba->pdev; + + free_irq(pdev->irq, hba); + if (hba->msi_enabled) + pci_disable_msi(pdev); +} + +static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct st_hba *hba; + struct Scsi_Host *host; + const struct st_card_info *ci = NULL; + u32 sts_offset, cp_offset, scratch_offset; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_master(pdev); + + S6flag = 0; + register_reboot_notifier(&stex_notifier); + + host = scsi_host_alloc(&driver_template, sizeof(struct st_hba)); + + if (!host) { + printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_disable; + } + + hba = (struct st_hba *)host->hostdata; + memset(hba, 0, sizeof(struct st_hba)); + + err = pci_request_regions(pdev, DRV_NAME); + if (err < 0) { + printk(KERN_ERR DRV_NAME "(%s): request regions failed\n", + pci_name(pdev)); + goto out_scsi_host_put; + } + + hba->mmio_base = pci_ioremap_bar(pdev, 0); + if ( !hba->mmio_base) { + printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_release_regions; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n", + pci_name(pdev)); + goto out_iounmap; + } + + hba->cardtype = (unsigned int) id->driver_data; + ci = &stex_card_info[hba->cardtype]; + switch (id->subdevice) { + case 0x4221: + case 0x4222: + case 0x4223: + case 0x4224: + case 0x4225: + case 0x4226: + case 0x4227: + case 0x4261: + case 0x4262: + case 0x4263: + case 0x4264: + case 0x4265: + break; + default: + if (hba->cardtype == st_yel || hba->cardtype == st_P3) + hba->supports_pm = 1; + } + + sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; + if (hba->cardtype == st_yel || hba->cardtype == st_P3) + sts_offset += (ci->sts_count+1) * sizeof(u32); + cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); + hba->dma_size = cp_offset + sizeof(struct st_frame); + if (hba->cardtype == st_seq || + (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { + hba->extra_offset = hba->dma_size; + hba->dma_size += ST_ADDITIONAL_MEM; + } + hba->dma_mem = dma_alloc_coherent(&pdev->dev, + hba->dma_size, &hba->dma_handle, GFP_KERNEL); + if (!hba->dma_mem) { + /* Retry minimum coherent mapping for st_seq and st_vsc */ + if (hba->cardtype == st_seq || + (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { + printk(KERN_WARNING DRV_NAME + "(%s): allocating min buffer for controller\n", + pci_name(pdev)); + hba->dma_size = hba->extra_offset + + ST_ADDITIONAL_MEM_MIN; + hba->dma_mem = dma_alloc_coherent(&pdev->dev, + hba->dma_size, &hba->dma_handle, GFP_KERNEL); + } + + if (!hba->dma_mem) { + err = -ENOMEM; + printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", + pci_name(pdev)); + goto out_iounmap; + } + } + + hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); + if (!hba->ccb) { + err = -ENOMEM; + printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n", + pci_name(pdev)); + goto out_pci_free; + } + + if (hba->cardtype == st_yel || hba->cardtype == st_P3) + hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); + hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); + hba->copy_buffer = hba->dma_mem + cp_offset; + hba->rq_count = ci->rq_count; + hba->rq_size = ci->rq_size; + hba->sts_count = ci->sts_count; + hba->alloc_rq = ci->alloc_rq; + hba->map_sg = ci->map_sg; + hba->send = ci->send; + hba->mu_status = MU_STATE_STARTING; + hba->msi_lock = 0; + + if (hba->cardtype == st_yel || hba->cardtype == st_P3) + host->sg_tablesize = 38; + else + host->sg_tablesize = 32; + host->can_queue = ci->rq_count; + host->cmd_per_lun = ci->rq_count; + host->max_id = ci->max_id; + host->max_lun = ci->max_lun; + host->max_channel = ci->max_channel; + host->unique_id = host->host_no; + host->max_cmd_len = STEX_CDB_LENGTH; + + hba->host = host; + hba->pdev = pdev; + init_waitqueue_head(&hba->reset_waitq); + + snprintf(hba->work_q_name, sizeof(hba->work_q_name), + "stex_wq_%d", host->host_no); + hba->work_q = create_singlethread_workqueue(hba->work_q_name); + if (!hba->work_q) { + printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_ccb_free; + } + INIT_WORK(&hba->reset_work, stex_reset_work); + + err = stex_request_irq(hba); + if (err) { + printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", + pci_name(pdev)); + goto out_free_wq; + } + + err = stex_handshake(hba); + if (err) + goto out_free_irq; + + pci_set_drvdata(pdev, hba); + + err = scsi_add_host(host, &pdev->dev); + if (err) { + printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n", + pci_name(pdev)); + goto out_free_irq; + } + + scsi_scan_host(host); + + return 0; + +out_free_irq: + stex_free_irq(hba); +out_free_wq: + destroy_workqueue(hba->work_q); +out_ccb_free: + kfree(hba->ccb); +out_pci_free: + dma_free_coherent(&pdev->dev, hba->dma_size, + hba->dma_mem, hba->dma_handle); +out_iounmap: + iounmap(hba->mmio_base); +out_release_regions: + pci_release_regions(pdev); +out_scsi_host_put: + scsi_host_put(host); +out_disable: + pci_disable_device(pdev); + + return err; +} + +static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic) +{ + struct req_msg *req; + struct st_msg_header *msg_h; + unsigned long flags; + unsigned long before; + u16 tag = 0; + + spin_lock_irqsave(hba->host->host_lock, flags); + + if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && + hba->supports_pm == 1) { + if (st_sleep_mic == ST_NOTHANDLED) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return; + } + } + req = hba->alloc_rq(hba); + if (hba->cardtype == st_yel || hba->cardtype == st_P3) { + msg_h = (struct st_msg_header *)req - 1; + memset(msg_h, 0, hba->rq_size); + } else + memset(req, 0, hba->rq_size); + + if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel + || hba->cardtype == st_P3) + && st_sleep_mic == ST_IGNORED) { + req->cdb[0] = MGT_CMD; + req->cdb[1] = MGT_CMD_SIGNATURE; + req->cdb[2] = CTLR_CONFIG_CMD; + req->cdb[3] = CTLR_SHUTDOWN; + } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3) + && st_sleep_mic != ST_IGNORED) { + req->cdb[0] = MGT_CMD; + req->cdb[1] = MGT_CMD_SIGNATURE; + req->cdb[2] = CTLR_CONFIG_CMD; + req->cdb[3] = PMIC_SHUTDOWN; + req->cdb[4] = st_sleep_mic; + } else { + req->cdb[0] = CONTROLLER_CMD; + req->cdb[1] = CTLR_POWER_STATE_CHANGE; + req->cdb[2] = CTLR_POWER_SAVING; + } + hba->ccb[tag].cmd = NULL; + hba->ccb[tag].sg_count = 0; + hba->ccb[tag].sense_bufflen = 0; + hba->ccb[tag].sense_buffer = NULL; + hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; + hba->send(hba, req, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + before = jiffies; + while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { + if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { + hba->ccb[tag].req_type = 0; + hba->mu_status = MU_STATE_STOP; + return; + } + msleep(1); + } + hba->mu_status = MU_STATE_STOP; +} + +static void stex_hba_free(struct st_hba *hba) +{ + stex_free_irq(hba); + + destroy_workqueue(hba->work_q); + + iounmap(hba->mmio_base); + + pci_release_regions(hba->pdev); + + kfree(hba->ccb); + + dma_free_coherent(&hba->pdev->dev, hba->dma_size, + hba->dma_mem, hba->dma_handle); +} + +static void stex_remove(struct pci_dev *pdev) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + hba->mu_status = MU_STATE_NOCONNECT; + return_abnormal_state(hba, DID_NO_CONNECT); + scsi_remove_host(hba->host); + + scsi_block_requests(hba->host); + + stex_hba_free(hba); + + scsi_host_put(hba->host); + + pci_disable_device(pdev); + + unregister_reboot_notifier(&stex_notifier); +} + +static void stex_shutdown(struct pci_dev *pdev) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + if (hba->supports_pm == 0) { + stex_hba_stop(hba, ST_IGNORED); + } else if (hba->supports_pm == 1 && S6flag) { + unregister_reboot_notifier(&stex_notifier); + stex_hba_stop(hba, ST_S6); + } else + stex_hba_stop(hba, ST_S5); +} + +static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state) +{ + switch (state.event) { + case PM_EVENT_SUSPEND: + return ST_S3; + case PM_EVENT_HIBERNATE: + hba->msi_lock = 0; + return ST_S4; + default: + return ST_NOTHANDLED; + } +} + +static int stex_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + if ((hba->cardtype == st_yel || hba->cardtype == st_P3) + && hba->supports_pm == 1) + stex_hba_stop(hba, stex_choice_sleep_mic(hba, state)); + else + stex_hba_stop(hba, ST_IGNORED); + return 0; +} + +static int stex_resume(struct pci_dev *pdev) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + hba->mu_status = MU_STATE_STARTING; + stex_handshake(hba); + return 0; +} + +static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf) +{ + S6flag = 1; + return NOTIFY_OK; +} +MODULE_DEVICE_TABLE(pci, stex_pci_tbl); + +static struct pci_driver stex_pci_driver = { + .name = DRV_NAME, + .id_table = stex_pci_tbl, + .probe = stex_probe, + .remove = stex_remove, + .shutdown = stex_shutdown, + .suspend = stex_suspend, + .resume = stex_resume, +}; + +static int __init stex_init(void) +{ + printk(KERN_INFO DRV_NAME + ": Promise SuperTrak EX Driver version: %s\n", + ST_DRIVER_VERSION); + + return pci_register_driver(&stex_pci_driver); +} + +static void __exit stex_exit(void) +{ + pci_unregister_driver(&stex_pci_driver); +} + +module_init(stex_init); +module_exit(stex_exit); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c new file mode 100644 index 000000000..a95936b18 --- /dev/null +++ b/drivers/scsi/storvsc_drv.c @@ -0,0 +1,2234 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2009, Microsoft Corporation. + * + * Authors: + * Haiyang Zhang + * Hank Janssen + * K. Y. Srinivasan + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * All wire protocol details (storage protocol between the guest and the host) + * are consolidated here. + * + * Begin protocol definitions. + */ + +/* + * Version history: + * V1 Beta: 0.1 + * V1 RC < 2008/1/31: 1.0 + * V1 RC > 2008/1/31: 2.0 + * Win7: 4.2 + * Win8: 5.1 + * Win8.1: 6.0 + * Win10: 6.2 + */ + +#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \ + (((MINOR_) & 0xff))) +#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0) +#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2) +#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1) +#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0) +#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2) + +/* channel callback timeout in ms */ +#define CALLBACK_TIMEOUT 2 + +/* Packet structure describing virtual storage requests. */ +enum vstor_packet_operation { + VSTOR_OPERATION_COMPLETE_IO = 1, + VSTOR_OPERATION_REMOVE_DEVICE = 2, + VSTOR_OPERATION_EXECUTE_SRB = 3, + VSTOR_OPERATION_RESET_LUN = 4, + VSTOR_OPERATION_RESET_ADAPTER = 5, + VSTOR_OPERATION_RESET_BUS = 6, + VSTOR_OPERATION_BEGIN_INITIALIZATION = 7, + VSTOR_OPERATION_END_INITIALIZATION = 8, + VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9, + VSTOR_OPERATION_QUERY_PROPERTIES = 10, + VSTOR_OPERATION_ENUMERATE_BUS = 11, + VSTOR_OPERATION_FCHBA_DATA = 12, + VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13, + VSTOR_OPERATION_MAXIMUM = 13 +}; + +/* + * WWN packet for Fibre Channel HBA + */ + +struct hv_fc_wwn_packet { + u8 primary_active; + u8 reserved1[3]; + u8 primary_port_wwn[8]; + u8 primary_node_wwn[8]; + u8 secondary_port_wwn[8]; + u8 secondary_node_wwn[8]; +}; + + + +/* + * SRB Flag Bits + */ + +#define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002 +#define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004 +#define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008 +#define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010 +#define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020 +#define SRB_FLAGS_DATA_IN 0x00000040 +#define SRB_FLAGS_DATA_OUT 0x00000080 +#define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000 +#define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT) +#define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100 +#define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200 +#define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400 + +/* + * This flag indicates the request is part of the workflow for processing a D3. + */ +#define SRB_FLAGS_D3_PROCESSING 0x00000800 +#define SRB_FLAGS_IS_ACTIVE 0x00010000 +#define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000 +#define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000 +#define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000 +#define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000 +#define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000 +#define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000 +#define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000 +#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 +#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 + +#define SP_UNTAGGED ((unsigned char) ~0) +#define SRB_SIMPLE_TAG_REQUEST 0x20 + +/* + * Platform neutral description of a scsi request - + * this remains the same across the write regardless of 32/64 bit + * note: it's patterned off the SCSI_PASS_THROUGH structure + */ +#define STORVSC_MAX_CMD_LEN 0x10 + +/* Sense buffer size is the same for all versions since Windows 8 */ +#define STORVSC_SENSE_BUFFER_SIZE 0x14 +#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 + +/* + * The storage protocol version is determined during the + * initial exchange with the host. It will indicate which + * storage functionality is available in the host. +*/ +static int vmstor_proto_version; + +#define STORVSC_LOGGING_NONE 0 +#define STORVSC_LOGGING_ERROR 1 +#define STORVSC_LOGGING_WARN 2 + +static int logging_level = STORVSC_LOGGING_ERROR; +module_param(logging_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(logging_level, + "Logging level, 0 - None, 1 - Error (default), 2 - Warning."); + +static inline bool do_logging(int level) +{ + return logging_level >= level; +} + +#define storvsc_log(dev, level, fmt, ...) \ +do { \ + if (do_logging(level)) \ + dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ +} while (0) + +struct vmscsi_request { + u16 length; + u8 srb_status; + u8 scsi_status; + + u8 port_number; + u8 path_id; + u8 target_id; + u8 lun; + + u8 cdb_length; + u8 sense_info_length; + u8 data_in; + u8 reserved; + + u32 data_transfer_length; + + union { + u8 cdb[STORVSC_MAX_CMD_LEN]; + u8 sense_data[STORVSC_SENSE_BUFFER_SIZE]; + u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING]; + }; + /* + * The following was added in win8. + */ + u16 reserve; + u8 queue_tag; + u8 queue_action; + u32 srb_flags; + u32 time_out_value; + u32 queue_sort_ey; + +} __attribute((packed)); + +/* + * The list of windows version in order of preference. + */ + +static const int protocol_version[] = { + VMSTOR_PROTO_VERSION_WIN10, + VMSTOR_PROTO_VERSION_WIN8_1, + VMSTOR_PROTO_VERSION_WIN8, +}; + + +/* + * This structure is sent during the initialization phase to get the different + * properties of the channel. + */ + +#define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1 + +struct vmstorage_channel_properties { + u32 reserved; + u16 max_channel_cnt; + u16 reserved1; + + u32 flags; + u32 max_transfer_bytes; + + u64 reserved2; +} __packed; + +/* This structure is sent during the storage protocol negotiations. */ +struct vmstorage_protocol_version { + /* Major (MSW) and minor (LSW) version numbers. */ + u16 major_minor; + + /* + * Revision number is auto-incremented whenever this file is changed + * (See FILL_VMSTOR_REVISION macro above). Mismatch does not + * definitely indicate incompatibility--but it does indicate mismatched + * builds. + * This is only used on the windows side. Just set it to 0. + */ + u16 revision; +} __packed; + +/* Channel Property Flags */ +#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1 +#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2 + +struct vstor_packet { + /* Requested operation type */ + enum vstor_packet_operation operation; + + /* Flags - see below for values */ + u32 flags; + + /* Status of the request returned from the server side. */ + u32 status; + + /* Data payload area */ + union { + /* + * Structure used to forward SCSI commands from the + * client to the server. + */ + struct vmscsi_request vm_srb; + + /* Structure used to query channel properties. */ + struct vmstorage_channel_properties storage_channel_properties; + + /* Used during version negotiations. */ + struct vmstorage_protocol_version version; + + /* Fibre channel address packet */ + struct hv_fc_wwn_packet wwn_packet; + + /* Number of sub-channels to create */ + u16 sub_channel_count; + + /* This will be the maximum of the union members */ + u8 buffer[0x34]; + }; +} __packed; + +/* + * Packet Flags: + * + * This flag indicates that the server should send back a completion for this + * packet. + */ + +#define REQUEST_COMPLETION_FLAG 0x1 + +/* Matches Windows-end */ +enum storvsc_request_type { + WRITE_TYPE = 0, + READ_TYPE, + UNKNOWN_TYPE, +}; + +/* + * SRB status codes and masks. In the 8-bit field, the two high order bits + * are flags, while the remaining 6 bits are an integer status code. The + * definitions here include only the subset of the integer status codes that + * are tested for in this driver. + */ +#define SRB_STATUS_AUTOSENSE_VALID 0x80 +#define SRB_STATUS_QUEUE_FROZEN 0x40 + +/* SRB status integer codes */ +#define SRB_STATUS_SUCCESS 0x01 +#define SRB_STATUS_ABORTED 0x02 +#define SRB_STATUS_ERROR 0x04 +#define SRB_STATUS_INVALID_REQUEST 0x06 +#define SRB_STATUS_TIMEOUT 0x09 +#define SRB_STATUS_SELECTION_TIMEOUT 0x0A +#define SRB_STATUS_BUS_RESET 0x0E +#define SRB_STATUS_DATA_OVERRUN 0x12 +#define SRB_STATUS_INVALID_LUN 0x20 +#define SRB_STATUS_INTERNAL_ERROR 0x30 + +#define SRB_STATUS(status) \ + (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) +/* + * This is the end of Protocol specific defines. + */ + +static int storvsc_ringbuffer_size = (128 * 1024); +static u32 max_outstanding_req_per_channel; +static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); + +static int storvsc_vcpus_per_sub_channel = 4; +static unsigned int storvsc_max_hw_queues; + +module_param(storvsc_ringbuffer_size, int, S_IRUGO); +MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); + +module_param(storvsc_max_hw_queues, uint, 0644); +MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues"); + +module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); +MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); + +static int ring_avail_percent_lowater = 10; +module_param(ring_avail_percent_lowater, int, S_IRUGO); +MODULE_PARM_DESC(ring_avail_percent_lowater, + "Select a channel if available ring size > this in percent"); + +/* + * Timeout in seconds for all devices managed by this driver. + */ +static int storvsc_timeout = 180; + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +static struct scsi_transport_template *fc_transport_template; +#endif + +static struct scsi_host_template scsi_driver; +static void storvsc_on_channel_callback(void *context); + +#define STORVSC_MAX_LUNS_PER_TARGET 255 +#define STORVSC_MAX_TARGETS 2 +#define STORVSC_MAX_CHANNELS 8 + +#define STORVSC_FC_MAX_LUNS_PER_TARGET 255 +#define STORVSC_FC_MAX_TARGETS 128 +#define STORVSC_FC_MAX_CHANNELS 8 +#define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024)) + +#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 +#define STORVSC_IDE_MAX_TARGETS 1 +#define STORVSC_IDE_MAX_CHANNELS 1 + +/* + * Upper bound on the size of a storvsc packet. + */ +#define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\ + sizeof(struct vstor_packet)) + +struct storvsc_cmd_request { + struct scsi_cmnd *cmd; + + struct hv_device *device; + + /* Synchronize the request/response if needed */ + struct completion wait_event; + + struct vmbus_channel_packet_multipage_buffer mpb; + struct vmbus_packet_mpb_array *payload; + u32 payload_sz; + + struct vstor_packet vstor_packet; +}; + + +/* A storvsc device is a device object that contains a vmbus channel */ +struct storvsc_device { + struct hv_device *device; + + bool destroy; + bool drain_notify; + atomic_t num_outstanding_req; + struct Scsi_Host *host; + + wait_queue_head_t waiting_to_drain; + + /* + * Each unique Port/Path/Target represents 1 channel ie scsi + * controller. In reality, the pathid, targetid is always 0 + * and the port is set by us + */ + unsigned int port_number; + unsigned char path_id; + unsigned char target_id; + + /* + * Max I/O, the device can support. + */ + u32 max_transfer_bytes; + /* + * Number of sub-channels we will open. + */ + u16 num_sc; + struct vmbus_channel **stor_chns; + /* + * Mask of CPUs bound to subchannels. + */ + struct cpumask alloced_cpus; + /* + * Serializes modifications of stor_chns[] from storvsc_do_io() + * and storvsc_change_target_cpu(). + */ + spinlock_t lock; + /* Used for vsc/vsp channel reset process */ + struct storvsc_cmd_request init_request; + struct storvsc_cmd_request reset_request; + /* + * Currently active port and node names for FC devices. + */ + u64 node_name; + u64 port_name; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + struct fc_rport *rport; +#endif +}; + +struct hv_host_device { + struct hv_device *dev; + unsigned int port; + unsigned char path; + unsigned char target; + struct workqueue_struct *handle_error_wq; + struct work_struct host_scan_work; + struct Scsi_Host *host; +}; + +struct storvsc_scan_work { + struct work_struct work; + struct Scsi_Host *host; + u8 lun; + u8 tgt_id; +}; + +static void storvsc_device_scan(struct work_struct *work) +{ + struct storvsc_scan_work *wrk; + struct scsi_device *sdev; + + wrk = container_of(work, struct storvsc_scan_work, work); + + sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); + if (!sdev) + goto done; + scsi_rescan_device(sdev); + scsi_device_put(sdev); + +done: + kfree(wrk); +} + +static void storvsc_host_scan(struct work_struct *work) +{ + struct Scsi_Host *host; + struct scsi_device *sdev; + struct hv_host_device *host_device = + container_of(work, struct hv_host_device, host_scan_work); + + host = host_device->host; + /* + * Before scanning the host, first check to see if any of the + * currently known devices have been hot removed. We issue a + * "unit ready" command against all currently known devices. + * This I/O will result in an error for devices that have been + * removed. As part of handling the I/O error, we remove the device. + * + * When a LUN is added or removed, the host sends us a signal to + * scan the host. Thus we are forced to discover the LUNs that + * may have been removed this way. + */ + mutex_lock(&host->scan_mutex); + shost_for_each_device(sdev, host) + scsi_test_unit_ready(sdev, 1, 1, NULL); + mutex_unlock(&host->scan_mutex); + /* + * Now scan the host to discover LUNs that may have been added. + */ + scsi_scan_host(host); +} + +static void storvsc_remove_lun(struct work_struct *work) +{ + struct storvsc_scan_work *wrk; + struct scsi_device *sdev; + + wrk = container_of(work, struct storvsc_scan_work, work); + if (!scsi_host_get(wrk->host)) + goto done; + + sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); + + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } + scsi_host_put(wrk->host); + +done: + kfree(wrk); +} + + +/* + * We can get incoming messages from the host that are not in response to + * messages that we have sent out. An example of this would be messages + * received by the guest to notify dynamic addition/removal of LUNs. To + * deal with potential race conditions where the driver may be in the + * midst of being unloaded when we might receive an unsolicited message + * from the host, we have implemented a mechanism to gurantee sequential + * consistency: + * + * 1) Once the device is marked as being destroyed, we will fail all + * outgoing messages. + * 2) We permit incoming messages when the device is being destroyed, + * only to properly account for messages already sent out. + */ + +static inline struct storvsc_device *get_out_stor_device( + struct hv_device *device) +{ + struct storvsc_device *stor_device; + + stor_device = hv_get_drvdata(device); + + if (stor_device && stor_device->destroy) + stor_device = NULL; + + return stor_device; +} + + +static inline void storvsc_wait_to_drain(struct storvsc_device *dev) +{ + dev->drain_notify = true; + wait_event(dev->waiting_to_drain, + atomic_read(&dev->num_outstanding_req) == 0); + dev->drain_notify = false; +} + +static inline struct storvsc_device *get_in_stor_device( + struct hv_device *device) +{ + struct storvsc_device *stor_device; + + stor_device = hv_get_drvdata(device); + + if (!stor_device) + goto get_in_err; + + /* + * If the device is being destroyed; allow incoming + * traffic only to cleanup outstanding requests. + */ + + if (stor_device->destroy && + (atomic_read(&stor_device->num_outstanding_req) == 0)) + stor_device = NULL; + +get_in_err: + return stor_device; + +} + +static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old, + u32 new) +{ + struct storvsc_device *stor_device; + struct vmbus_channel *cur_chn; + bool old_is_alloced = false; + struct hv_device *device; + unsigned long flags; + int cpu; + + device = channel->primary_channel ? + channel->primary_channel->device_obj + : channel->device_obj; + stor_device = get_out_stor_device(device); + if (!stor_device) + return; + + /* See storvsc_do_io() -> get_og_chn(). */ + spin_lock_irqsave(&stor_device->lock, flags); + + /* + * Determines if the storvsc device has other channels assigned to + * the "old" CPU to update the alloced_cpus mask and the stor_chns + * array. + */ + if (device->channel != channel && device->channel->target_cpu == old) { + cur_chn = device->channel; + old_is_alloced = true; + goto old_is_alloced; + } + list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) { + if (cur_chn == channel) + continue; + if (cur_chn->target_cpu == old) { + old_is_alloced = true; + goto old_is_alloced; + } + } + +old_is_alloced: + if (old_is_alloced) + WRITE_ONCE(stor_device->stor_chns[old], cur_chn); + else + cpumask_clear_cpu(old, &stor_device->alloced_cpus); + + /* "Flush" the stor_chns array. */ + for_each_possible_cpu(cpu) { + if (stor_device->stor_chns[cpu] && !cpumask_test_cpu( + cpu, &stor_device->alloced_cpus)) + WRITE_ONCE(stor_device->stor_chns[cpu], NULL); + } + + WRITE_ONCE(stor_device->stor_chns[new], channel); + cpumask_set_cpu(new, &stor_device->alloced_cpus); + + spin_unlock_irqrestore(&stor_device->lock, flags); +} + +static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr) +{ + struct storvsc_cmd_request *request = + (struct storvsc_cmd_request *)(unsigned long)rqst_addr; + + if (rqst_addr == VMBUS_RQST_INIT) + return VMBUS_RQST_INIT; + if (rqst_addr == VMBUS_RQST_RESET) + return VMBUS_RQST_RESET; + + /* + * Cannot return an ID of 0, which is reserved for an unsolicited + * message from Hyper-V. + */ + return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1; +} + +static void handle_sc_creation(struct vmbus_channel *new_sc) +{ + struct hv_device *device = new_sc->primary_channel->device_obj; + struct device *dev = &device->device; + struct storvsc_device *stor_device; + struct vmstorage_channel_properties props; + int ret; + + stor_device = get_out_stor_device(device); + if (!stor_device) + return; + + memset(&props, 0, sizeof(struct vmstorage_channel_properties)); + new_sc->max_pkt_size = STORVSC_MAX_PKT_SIZE; + + new_sc->next_request_id_callback = storvsc_next_request_id; + + ret = vmbus_open(new_sc, + storvsc_ringbuffer_size, + storvsc_ringbuffer_size, + (void *)&props, + sizeof(struct vmstorage_channel_properties), + storvsc_on_channel_callback, new_sc); + + /* In case vmbus_open() fails, we don't use the sub-channel. */ + if (ret != 0) { + dev_err(dev, "Failed to open sub-channel: err=%d\n", ret); + return; + } + + new_sc->change_target_cpu_callback = storvsc_change_target_cpu; + + /* Add the sub-channel to the array of available channels. */ + stor_device->stor_chns[new_sc->target_cpu] = new_sc; + cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus); +} + +static void handle_multichannel_storage(struct hv_device *device, int max_chns) +{ + struct device *dev = &device->device; + struct storvsc_device *stor_device; + int num_sc; + struct storvsc_cmd_request *request; + struct vstor_packet *vstor_packet; + int ret, t; + + /* + * If the number of CPUs is artificially restricted, such as + * with maxcpus=1 on the kernel boot line, Hyper-V could offer + * sub-channels >= the number of CPUs. These sub-channels + * should not be created. The primary channel is already created + * and assigned to one CPU, so check against # CPUs - 1. + */ + num_sc = min((int)(num_online_cpus() - 1), max_chns); + if (!num_sc) + return; + + stor_device = get_out_stor_device(device); + if (!stor_device) + return; + + stor_device->num_sc = num_sc; + request = &stor_device->init_request; + vstor_packet = &request->vstor_packet; + + /* + * Establish a handler for dealing with subchannels. + */ + vmbus_set_sc_create_callback(device->channel, handle_sc_creation); + + /* + * Request the host to create sub-channels. + */ + memset(request, 0, sizeof(struct storvsc_cmd_request)); + init_completion(&request->wait_event); + vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS; + vstor_packet->flags = REQUEST_COMPLETION_FLAG; + vstor_packet->sub_channel_count = num_sc; + + ret = vmbus_sendpacket(device->channel, vstor_packet, + sizeof(struct vstor_packet), + VMBUS_RQST_INIT, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + + if (ret != 0) { + dev_err(dev, "Failed to create sub-channel: err=%d\n", ret); + return; + } + + t = wait_for_completion_timeout(&request->wait_event, 10*HZ); + if (t == 0) { + dev_err(dev, "Failed to create sub-channel: timed out\n"); + return; + } + + if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || + vstor_packet->status != 0) { + dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n", + vstor_packet->operation, vstor_packet->status); + return; + } + + /* + * We need to do nothing here, because vmbus_process_offer() + * invokes channel->sc_creation_callback, which will open and use + * the sub-channel(s). + */ +} + +static void cache_wwn(struct storvsc_device *stor_device, + struct vstor_packet *vstor_packet) +{ + /* + * Cache the currently active port and node ww names. + */ + if (vstor_packet->wwn_packet.primary_active) { + stor_device->node_name = + wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn); + stor_device->port_name = + wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn); + } else { + stor_device->node_name = + wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn); + stor_device->port_name = + wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn); + } +} + + +static int storvsc_execute_vstor_op(struct hv_device *device, + struct storvsc_cmd_request *request, + bool status_check) +{ + struct storvsc_device *stor_device; + struct vstor_packet *vstor_packet; + int ret, t; + + stor_device = get_out_stor_device(device); + if (!stor_device) + return -ENODEV; + + vstor_packet = &request->vstor_packet; + + init_completion(&request->wait_event); + vstor_packet->flags = REQUEST_COMPLETION_FLAG; + + ret = vmbus_sendpacket(device->channel, vstor_packet, + sizeof(struct vstor_packet), + VMBUS_RQST_INIT, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret != 0) + return ret; + + t = wait_for_completion_timeout(&request->wait_event, 5*HZ); + if (t == 0) + return -ETIMEDOUT; + + if (!status_check) + return ret; + + if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || + vstor_packet->status != 0) + return -EINVAL; + + return ret; +} + +static int storvsc_channel_init(struct hv_device *device, bool is_fc) +{ + struct storvsc_device *stor_device; + struct storvsc_cmd_request *request; + struct vstor_packet *vstor_packet; + int ret, i; + int max_chns; + bool process_sub_channels = false; + + stor_device = get_out_stor_device(device); + if (!stor_device) + return -ENODEV; + + request = &stor_device->init_request; + vstor_packet = &request->vstor_packet; + + /* + * Now, initiate the vsc/vsp initialization protocol on the open + * channel + */ + memset(request, 0, sizeof(struct storvsc_cmd_request)); + vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; + ret = storvsc_execute_vstor_op(device, request, true); + if (ret) + return ret; + /* + * Query host supported protocol version. + */ + + for (i = 0; i < ARRAY_SIZE(protocol_version); i++) { + /* reuse the packet for version range supported */ + memset(vstor_packet, 0, sizeof(struct vstor_packet)); + vstor_packet->operation = + VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; + + vstor_packet->version.major_minor = protocol_version[i]; + + /* + * The revision number is only used in Windows; set it to 0. + */ + vstor_packet->version.revision = 0; + ret = storvsc_execute_vstor_op(device, request, false); + if (ret != 0) + return ret; + + if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) + return -EINVAL; + + if (vstor_packet->status == 0) { + vmstor_proto_version = protocol_version[i]; + + break; + } + } + + if (vstor_packet->status != 0) { + dev_err(&device->device, "Obsolete Hyper-V version\n"); + return -EINVAL; + } + + + memset(vstor_packet, 0, sizeof(struct vstor_packet)); + vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; + ret = storvsc_execute_vstor_op(device, request, true); + if (ret != 0) + return ret; + + /* + * Check to see if multi-channel support is there. + * Hosts that implement protocol version of 5.1 and above + * support multi-channel. + */ + max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; + + /* + * Allocate state to manage the sub-channels. + * We allocate an array based on the numbers of possible CPUs + * (Hyper-V does not support cpu online/offline). + * This Array will be sparseley populated with unique + * channels - primary + sub-channels. + * We will however populate all the slots to evenly distribute + * the load. + */ + stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *), + GFP_KERNEL); + if (stor_device->stor_chns == NULL) + return -ENOMEM; + + device->channel->change_target_cpu_callback = storvsc_change_target_cpu; + + stor_device->stor_chns[device->channel->target_cpu] = device->channel; + cpumask_set_cpu(device->channel->target_cpu, + &stor_device->alloced_cpus); + + if (vstor_packet->storage_channel_properties.flags & + STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) + process_sub_channels = true; + + stor_device->max_transfer_bytes = + vstor_packet->storage_channel_properties.max_transfer_bytes; + + if (!is_fc) + goto done; + + /* + * For FC devices retrieve FC HBA data. + */ + memset(vstor_packet, 0, sizeof(struct vstor_packet)); + vstor_packet->operation = VSTOR_OPERATION_FCHBA_DATA; + ret = storvsc_execute_vstor_op(device, request, true); + if (ret != 0) + return ret; + + /* + * Cache the currently active port and node ww names. + */ + cache_wwn(stor_device, vstor_packet); + +done: + + memset(vstor_packet, 0, sizeof(struct vstor_packet)); + vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; + ret = storvsc_execute_vstor_op(device, request, true); + if (ret != 0) + return ret; + + if (process_sub_channels) + handle_multichannel_storage(device, max_chns); + + return ret; +} + +static void storvsc_handle_error(struct vmscsi_request *vm_srb, + struct scsi_cmnd *scmnd, + struct Scsi_Host *host, + u8 asc, u8 ascq) +{ + struct storvsc_scan_work *wrk; + void (*process_err_fn)(struct work_struct *work); + struct hv_host_device *host_dev = shost_priv(host); + + switch (SRB_STATUS(vm_srb->srb_status)) { + case SRB_STATUS_ERROR: + case SRB_STATUS_ABORTED: + case SRB_STATUS_INVALID_REQUEST: + case SRB_STATUS_INTERNAL_ERROR: + case SRB_STATUS_TIMEOUT: + case SRB_STATUS_SELECTION_TIMEOUT: + case SRB_STATUS_BUS_RESET: + case SRB_STATUS_DATA_OVERRUN: + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { + /* Check for capacity change */ + if ((asc == 0x2a) && (ascq == 0x9)) { + process_err_fn = storvsc_device_scan; + /* Retry the I/O that triggered this. */ + set_host_byte(scmnd, DID_REQUEUE); + goto do_work; + } + + /* + * Check for "Operating parameters have changed" + * due to Hyper-V changing the VHD/VHDX BlockSize + * when adding/removing a differencing disk. This + * causes discard_granularity to change, so do a + * rescan to pick up the new granularity. We don't + * want scsi_report_sense() to output a message + * that a sysadmin wouldn't know what to do with. + */ + if ((asc == 0x3f) && (ascq != 0x03) && + (ascq != 0x0e)) { + process_err_fn = storvsc_device_scan; + set_host_byte(scmnd, DID_REQUEUE); + goto do_work; + } + + /* + * Otherwise, let upper layer deal with the + * error when sense message is present + */ + return; + } + + /* + * If there is an error; offline the device since all + * error recovery strategies would have already been + * deployed on the host side. However, if the command + * were a pass-through command deal with it appropriately. + */ + switch (scmnd->cmnd[0]) { + case ATA_16: + case ATA_12: + set_host_byte(scmnd, DID_PASSTHROUGH); + break; + /* + * On some Hyper-V hosts TEST_UNIT_READY command can + * return SRB_STATUS_ERROR. Let the upper level code + * deal with it based on the sense information. + */ + case TEST_UNIT_READY: + break; + default: + set_host_byte(scmnd, DID_ERROR); + } + return; + + case SRB_STATUS_INVALID_LUN: + set_host_byte(scmnd, DID_NO_CONNECT); + process_err_fn = storvsc_remove_lun; + goto do_work; + + } + return; + +do_work: + /* + * We need to schedule work to process this error; schedule it. + */ + wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); + if (!wrk) { + set_host_byte(scmnd, DID_BAD_TARGET); + return; + } + + wrk->host = host; + wrk->lun = vm_srb->lun; + wrk->tgt_id = vm_srb->target_id; + INIT_WORK(&wrk->work, process_err_fn); + queue_work(host_dev->handle_error_wq, &wrk->work); +} + + +static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, + struct storvsc_device *stor_dev) +{ + struct scsi_cmnd *scmnd = cmd_request->cmd; + struct scsi_sense_hdr sense_hdr; + struct vmscsi_request *vm_srb; + u32 data_transfer_length; + struct Scsi_Host *host; + u32 payload_sz = cmd_request->payload_sz; + void *payload = cmd_request->payload; + bool sense_ok; + + host = stor_dev->host; + + vm_srb = &cmd_request->vstor_packet.vm_srb; + data_transfer_length = vm_srb->data_transfer_length; + + scmnd->result = vm_srb->scsi_status; + + if (scmnd->result) { + sense_ok = scsi_normalize_sense(scmnd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, &sense_hdr); + + if (sense_ok && do_logging(STORVSC_LOGGING_WARN)) + scsi_print_sense_hdr(scmnd->device, "storvsc", + &sense_hdr); + } + + if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { + storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, + sense_hdr.ascq); + /* + * The Windows driver set data_transfer_length on + * SRB_STATUS_DATA_OVERRUN. On other errors, this value + * is untouched. In these cases we set it to 0. + */ + if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) + data_transfer_length = 0; + } + + /* Validate data_transfer_length (from Hyper-V) */ + if (data_transfer_length > cmd_request->payload->range.len) + data_transfer_length = cmd_request->payload->range.len; + + scsi_set_resid(scmnd, + cmd_request->payload->range.len - data_transfer_length); + + scsi_done(scmnd); + + if (payload_sz > + sizeof(struct vmbus_channel_packet_multipage_buffer)) + kfree(payload); +} + +static void storvsc_on_io_completion(struct storvsc_device *stor_device, + struct vstor_packet *vstor_packet, + struct storvsc_cmd_request *request) +{ + struct vstor_packet *stor_pkt; + struct hv_device *device = stor_device->device; + + stor_pkt = &request->vstor_packet; + + /* + * The current SCSI handling on the host side does + * not correctly handle: + * INQUIRY command with page code parameter set to 0x80 + * MODE_SENSE command with cmd[2] == 0x1c + * + * Setup srb and scsi status so this won't be fatal. + * We do this so we can distinguish truly fatal failues + * (srb status == 0x4) and off-line the device in that case. + */ + + if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || + (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { + vstor_packet->vm_srb.scsi_status = 0; + vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; + } + + /* Copy over the status...etc */ + stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status; + stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status; + + /* + * Copy over the sense_info_length, but limit to the known max + * size if Hyper-V returns a bad value. + */ + stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE, + vstor_packet->vm_srb.sense_info_length); + + if (vstor_packet->vm_srb.scsi_status != 0 || + vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) { + + /* + * Log TEST_UNIT_READY errors only as warnings. Hyper-V can + * return errors when detecting devices using TEST_UNIT_READY, + * and logging these as errors produces unhelpful noise. + */ + int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? + STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; + + storvsc_log(device, loglevel, + "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", + scsi_cmd_to_rq(request->cmd)->tag, + stor_pkt->vm_srb.cdb[0], + vstor_packet->vm_srb.scsi_status, + vstor_packet->vm_srb.srb_status, + vstor_packet->status); + } + + if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION && + (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID)) + memcpy(request->cmd->sense_buffer, + vstor_packet->vm_srb.sense_data, + stor_pkt->vm_srb.sense_info_length); + + stor_pkt->vm_srb.data_transfer_length = + vstor_packet->vm_srb.data_transfer_length; + + storvsc_command_completion(request, stor_device); + + if (atomic_dec_and_test(&stor_device->num_outstanding_req) && + stor_device->drain_notify) + wake_up(&stor_device->waiting_to_drain); +} + +static void storvsc_on_receive(struct storvsc_device *stor_device, + struct vstor_packet *vstor_packet, + struct storvsc_cmd_request *request) +{ + struct hv_host_device *host_dev; + switch (vstor_packet->operation) { + case VSTOR_OPERATION_COMPLETE_IO: + storvsc_on_io_completion(stor_device, vstor_packet, request); + break; + + case VSTOR_OPERATION_REMOVE_DEVICE: + case VSTOR_OPERATION_ENUMERATE_BUS: + host_dev = shost_priv(stor_device->host); + queue_work( + host_dev->handle_error_wq, &host_dev->host_scan_work); + break; + + case VSTOR_OPERATION_FCHBA_DATA: + cache_wwn(stor_device, vstor_packet); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + fc_host_node_name(stor_device->host) = stor_device->node_name; + fc_host_port_name(stor_device->host) = stor_device->port_name; +#endif + break; + default: + break; + } +} + +static void storvsc_on_channel_callback(void *context) +{ + struct vmbus_channel *channel = (struct vmbus_channel *)context; + const struct vmpacket_descriptor *desc; + struct hv_device *device; + struct storvsc_device *stor_device; + struct Scsi_Host *shost; + unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT); + + if (channel->primary_channel != NULL) + device = channel->primary_channel->device_obj; + else + device = channel->device_obj; + + stor_device = get_in_stor_device(device); + if (!stor_device) + return; + + shost = stor_device->host; + + foreach_vmbus_pkt(desc, channel) { + struct vstor_packet *packet = hv_pkt_data(desc); + struct storvsc_cmd_request *request = NULL; + u32 pktlen = hv_pkt_datalen(desc); + u64 rqst_id = desc->trans_id; + u32 minlen = rqst_id ? sizeof(struct vstor_packet) : + sizeof(enum vstor_packet_operation); + + if (unlikely(time_after(jiffies, time_limit))) { + hv_pkt_iter_close(channel); + return; + } + + if (pktlen < minlen) { + dev_err(&device->device, + "Invalid pkt: id=%llu, len=%u, minlen=%u\n", + rqst_id, pktlen, minlen); + continue; + } + + if (rqst_id == VMBUS_RQST_INIT) { + request = &stor_device->init_request; + } else if (rqst_id == VMBUS_RQST_RESET) { + request = &stor_device->reset_request; + } else { + /* Hyper-V can send an unsolicited message with ID of 0 */ + if (rqst_id == 0) { + /* + * storvsc_on_receive() looks at the vstor_packet in the message + * from the ring buffer. + * + * - If the operation in the vstor_packet is COMPLETE_IO, then + * we call storvsc_on_io_completion(), and dereference the + * guest memory address. Make sure we don't call + * storvsc_on_io_completion() with a guest memory address + * that is zero if Hyper-V were to construct and send such + * a bogus packet. + * + * - If the operation in the vstor_packet is FCHBA_DATA, then + * we call cache_wwn(), and access the data payload area of + * the packet (wwn_packet); however, there is no guarantee + * that the packet is big enough to contain such area. + * Future-proof the code by rejecting such a bogus packet. + */ + if (packet->operation == VSTOR_OPERATION_COMPLETE_IO || + packet->operation == VSTOR_OPERATION_FCHBA_DATA) { + dev_err(&device->device, "Invalid packet with ID of 0\n"); + continue; + } + } else { + struct scsi_cmnd *scmnd; + + /* Transaction 'rqst_id' corresponds to tag 'rqst_id - 1' */ + scmnd = scsi_host_find_tag(shost, rqst_id - 1); + if (scmnd == NULL) { + dev_err(&device->device, "Incorrect transaction ID\n"); + continue; + } + request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd); + scsi_dma_unmap(scmnd); + } + + storvsc_on_receive(stor_device, packet, request); + continue; + } + + memcpy(&request->vstor_packet, packet, + sizeof(struct vstor_packet)); + complete(&request->wait_event); + } +} + +static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, + bool is_fc) +{ + struct vmstorage_channel_properties props; + int ret; + + memset(&props, 0, sizeof(struct vmstorage_channel_properties)); + + device->channel->max_pkt_size = STORVSC_MAX_PKT_SIZE; + device->channel->next_request_id_callback = storvsc_next_request_id; + + ret = vmbus_open(device->channel, + ring_size, + ring_size, + (void *)&props, + sizeof(struct vmstorage_channel_properties), + storvsc_on_channel_callback, device->channel); + + if (ret != 0) + return ret; + + ret = storvsc_channel_init(device, is_fc); + + return ret; +} + +static int storvsc_dev_remove(struct hv_device *device) +{ + struct storvsc_device *stor_device; + + stor_device = hv_get_drvdata(device); + + stor_device->destroy = true; + + /* Make sure flag is set before waiting */ + wmb(); + + /* + * At this point, all outbound traffic should be disable. We + * only allow inbound traffic (responses) to proceed so that + * outstanding requests can be completed. + */ + + storvsc_wait_to_drain(stor_device); + + /* + * Since we have already drained, we don't need to busy wait + * as was done in final_release_stor_device() + * Note that we cannot set the ext pointer to NULL until + * we have drained - to drain the outgoing packets, we need to + * allow incoming packets. + */ + hv_set_drvdata(device, NULL); + + /* Close the channel */ + vmbus_close(device->channel); + + kfree(stor_device->stor_chns); + kfree(stor_device); + return 0; +} + +static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device, + u16 q_num) +{ + u16 slot = 0; + u16 hash_qnum; + const struct cpumask *node_mask; + int num_channels, tgt_cpu; + + if (stor_device->num_sc == 0) { + stor_device->stor_chns[q_num] = stor_device->device->channel; + return stor_device->device->channel; + } + + /* + * Our channel array is sparsley populated and we + * initiated I/O on a processor/hw-q that does not + * currently have a designated channel. Fix this. + * The strategy is simple: + * I. Ensure NUMA locality + * II. Distribute evenly (best effort) + */ + + node_mask = cpumask_of_node(cpu_to_node(q_num)); + + num_channels = 0; + for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { + if (cpumask_test_cpu(tgt_cpu, node_mask)) + num_channels++; + } + if (num_channels == 0) { + stor_device->stor_chns[q_num] = stor_device->device->channel; + return stor_device->device->channel; + } + + hash_qnum = q_num; + while (hash_qnum >= num_channels) + hash_qnum -= num_channels; + + for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { + if (!cpumask_test_cpu(tgt_cpu, node_mask)) + continue; + if (slot == hash_qnum) + break; + slot++; + } + + stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu]; + + return stor_device->stor_chns[q_num]; +} + + +static int storvsc_do_io(struct hv_device *device, + struct storvsc_cmd_request *request, u16 q_num) +{ + struct storvsc_device *stor_device; + struct vstor_packet *vstor_packet; + struct vmbus_channel *outgoing_channel, *channel; + unsigned long flags; + int ret = 0; + const struct cpumask *node_mask; + int tgt_cpu; + + vstor_packet = &request->vstor_packet; + stor_device = get_out_stor_device(device); + + if (!stor_device) + return -ENODEV; + + + request->device = device; + /* + * Select an appropriate channel to send the request out. + */ + /* See storvsc_change_target_cpu(). */ + outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); + if (outgoing_channel != NULL) { + if (outgoing_channel->target_cpu == q_num) { + /* + * Ideally, we want to pick a different channel if + * available on the same NUMA node. + */ + node_mask = cpumask_of_node(cpu_to_node(q_num)); + for_each_cpu_wrap(tgt_cpu, + &stor_device->alloced_cpus, q_num + 1) { + if (!cpumask_test_cpu(tgt_cpu, node_mask)) + continue; + if (tgt_cpu == q_num) + continue; + channel = READ_ONCE( + stor_device->stor_chns[tgt_cpu]); + if (channel == NULL) + continue; + if (hv_get_avail_to_write_percent( + &channel->outbound) + > ring_avail_percent_lowater) { + outgoing_channel = channel; + goto found_channel; + } + } + + /* + * All the other channels on the same NUMA node are + * busy. Try to use the channel on the current CPU + */ + if (hv_get_avail_to_write_percent( + &outgoing_channel->outbound) + > ring_avail_percent_lowater) + goto found_channel; + + /* + * If we reach here, all the channels on the current + * NUMA node are busy. Try to find a channel in + * other NUMA nodes + */ + for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { + if (cpumask_test_cpu(tgt_cpu, node_mask)) + continue; + channel = READ_ONCE( + stor_device->stor_chns[tgt_cpu]); + if (channel == NULL) + continue; + if (hv_get_avail_to_write_percent( + &channel->outbound) + > ring_avail_percent_lowater) { + outgoing_channel = channel; + goto found_channel; + } + } + } + } else { + spin_lock_irqsave(&stor_device->lock, flags); + outgoing_channel = stor_device->stor_chns[q_num]; + if (outgoing_channel != NULL) { + spin_unlock_irqrestore(&stor_device->lock, flags); + goto found_channel; + } + outgoing_channel = get_og_chn(stor_device, q_num); + spin_unlock_irqrestore(&stor_device->lock, flags); + } + +found_channel: + vstor_packet->flags |= REQUEST_COMPLETION_FLAG; + + vstor_packet->vm_srb.length = sizeof(struct vmscsi_request); + + + vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE; + + + vstor_packet->vm_srb.data_transfer_length = + request->payload->range.len; + + vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; + + if (request->payload->range.len) { + + ret = vmbus_sendpacket_mpb_desc(outgoing_channel, + request->payload, request->payload_sz, + vstor_packet, + sizeof(struct vstor_packet), + (unsigned long)request); + } else { + ret = vmbus_sendpacket(outgoing_channel, vstor_packet, + sizeof(struct vstor_packet), + (unsigned long)request, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + } + + if (ret != 0) + return ret; + + atomic_inc(&stor_device->num_outstanding_req); + + return ret; +} + +static int storvsc_device_alloc(struct scsi_device *sdevice) +{ + /* + * Set blist flag to permit the reading of the VPD pages even when + * the target may claim SPC-2 compliance. MSFT targets currently + * claim SPC-2 compliance while they implement post SPC-2 features. + * With this flag we can correctly handle WRITE_SAME_16 issues. + * + * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but + * still supports REPORT LUN. + */ + sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES; + + return 0; +} + +static int storvsc_device_configure(struct scsi_device *sdevice) +{ + blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); + + /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */ + sdevice->no_report_opcodes = 1; + sdevice->no_write_same = 1; + + /* + * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 + * if the device is a MSFT virtual device. If the host is + * WIN10 or newer, allow write_same. + */ + if (!strncmp(sdevice->vendor, "Msft", 4)) { + switch (vmstor_proto_version) { + case VMSTOR_PROTO_VERSION_WIN8: + case VMSTOR_PROTO_VERSION_WIN8_1: + sdevice->scsi_level = SCSI_SPC_3; + break; + } + + if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10) + sdevice->no_write_same = 0; + } + + return 0; +} + +static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, + sector_t capacity, int *info) +{ + sector_t nsect = capacity; + sector_t cylinders = nsect; + int heads, sectors_pt; + + /* + * We are making up these values; let us keep it simple. + */ + heads = 0xff; + sectors_pt = 0x3f; /* Sectors per track */ + sector_div(cylinders, heads * sectors_pt); + if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) + cylinders = 0xffff; + + info[0] = heads; + info[1] = sectors_pt; + info[2] = (int)cylinders; + + return 0; +} + +static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) +{ + struct hv_host_device *host_dev = shost_priv(scmnd->device->host); + struct hv_device *device = host_dev->dev; + + struct storvsc_device *stor_device; + struct storvsc_cmd_request *request; + struct vstor_packet *vstor_packet; + int ret, t; + + stor_device = get_out_stor_device(device); + if (!stor_device) + return FAILED; + + request = &stor_device->reset_request; + vstor_packet = &request->vstor_packet; + memset(vstor_packet, 0, sizeof(struct vstor_packet)); + + init_completion(&request->wait_event); + + vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; + vstor_packet->flags = REQUEST_COMPLETION_FLAG; + vstor_packet->vm_srb.path_id = stor_device->path_id; + + ret = vmbus_sendpacket(device->channel, vstor_packet, + sizeof(struct vstor_packet), + VMBUS_RQST_RESET, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret != 0) + return FAILED; + + t = wait_for_completion_timeout(&request->wait_event, 5*HZ); + if (t == 0) + return TIMEOUT_ERROR; + + + /* + * At this point, all outstanding requests in the adapter + * should have been flushed out and return to us + * There is a potential race here where the host may be in + * the process of responding when we return from here. + * Just wait for all in-transit packets to be accounted for + * before we return from here. + */ + storvsc_wait_to_drain(stor_device); + + return SUCCESS; +} + +/* + * The host guarantees to respond to each command, although I/O latencies might + * be unbounded on Azure. Reset the timer unconditionally to give the host a + * chance to perform EH. + */ +static enum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd) +{ + return SCSI_EH_RESET_TIMER; +} + +static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) +{ + bool allowed = true; + u8 scsi_op = scmnd->cmnd[0]; + + switch (scsi_op) { + /* the host does not handle WRITE_SAME, log accident usage */ + case WRITE_SAME: + /* + * smartd sends this command and the host does not handle + * this. So, don't send it. + */ + case SET_WINDOW: + set_host_byte(scmnd, DID_ERROR); + allowed = false; + break; + default: + break; + } + return allowed; +} + +static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) +{ + int ret; + struct hv_host_device *host_dev = shost_priv(host); + struct hv_device *dev = host_dev->dev; + struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); + struct scatterlist *sgl; + struct vmscsi_request *vm_srb; + struct vmbus_packet_mpb_array *payload; + u32 payload_sz; + u32 length; + + if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) { + /* + * On legacy hosts filter unimplemented commands. + * Future hosts are expected to correctly handle + * unsupported commands. Furthermore, it is + * possible that some of the currently + * unsupported commands maybe supported in + * future versions of the host. + */ + if (!storvsc_scsi_cmd_ok(scmnd)) { + scsi_done(scmnd); + return 0; + } + } + + /* Setup the cmd request */ + cmd_request->cmd = scmnd; + + memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet)); + vm_srb = &cmd_request->vstor_packet.vm_srb; + vm_srb->time_out_value = 60; + + vm_srb->srb_flags |= + SRB_FLAGS_DISABLE_SYNCH_TRANSFER; + + if (scmnd->device->tagged_supported) { + vm_srb->srb_flags |= + (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); + vm_srb->queue_tag = SP_UNTAGGED; + vm_srb->queue_action = SRB_SIMPLE_TAG_REQUEST; + } + + /* Build the SRB */ + switch (scmnd->sc_data_direction) { + case DMA_TO_DEVICE: + vm_srb->data_in = WRITE_TYPE; + vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT; + break; + case DMA_FROM_DEVICE: + vm_srb->data_in = READ_TYPE; + vm_srb->srb_flags |= SRB_FLAGS_DATA_IN; + break; + case DMA_NONE: + vm_srb->data_in = UNKNOWN_TYPE; + vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; + break; + default: + /* + * This is DMA_BIDIRECTIONAL or something else we are never + * supposed to see here. + */ + WARN(1, "Unexpected data direction: %d\n", + scmnd->sc_data_direction); + return -EINVAL; + } + + + vm_srb->port_number = host_dev->port; + vm_srb->path_id = scmnd->device->channel; + vm_srb->target_id = scmnd->device->id; + vm_srb->lun = scmnd->device->lun; + + vm_srb->cdb_length = scmnd->cmd_len; + + memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); + + sgl = (struct scatterlist *)scsi_sglist(scmnd); + + length = scsi_bufflen(scmnd); + payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; + payload_sz = 0; + + if (scsi_sg_count(scmnd)) { + unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset); + unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length); + struct scatterlist *sg; + unsigned long hvpfn, hvpfns_to_add; + int j, i = 0, sg_count; + + payload_sz = (hvpg_count * sizeof(u64) + + sizeof(struct vmbus_packet_mpb_array)); + + if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { + payload = kzalloc(payload_sz, GFP_ATOMIC); + if (!payload) + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + payload->range.len = length; + payload->range.offset = offset_in_hvpg; + + sg_count = scsi_dma_map(scmnd); + if (sg_count < 0) { + ret = SCSI_MLQUEUE_DEVICE_BUSY; + goto err_free_payload; + } + + for_each_sg(sgl, sg, sg_count, j) { + /* + * Init values for the current sgl entry. hvpfns_to_add + * is in units of Hyper-V size pages. Handling the + * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles + * values of sgl->offset that are larger than PAGE_SIZE. + * Such offsets are handled even on other than the first + * sgl entry, provided they are a multiple of PAGE_SIZE. + */ + hvpfn = HVPFN_DOWN(sg_dma_address(sg)); + hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) + + sg_dma_len(sg)) - hvpfn; + + /* + * Fill the next portion of the PFN array with + * sequential Hyper-V PFNs for the continguous physical + * memory described by the sgl entry. The end of the + * last sgl should be reached at the same time that + * the PFN array is filled. + */ + while (hvpfns_to_add--) + payload->range.pfn_array[i++] = hvpfn++; + } + } + + cmd_request->payload = payload; + cmd_request->payload_sz = payload_sz; + + /* Invokes the vsc to start an IO */ + ret = storvsc_do_io(dev, cmd_request, get_cpu()); + put_cpu(); + + if (ret) + scsi_dma_unmap(scmnd); + + if (ret == -EAGAIN) { + /* no more space */ + ret = SCSI_MLQUEUE_DEVICE_BUSY; + goto err_free_payload; + } + + return 0; + +err_free_payload: + if (payload_sz > sizeof(cmd_request->mpb)) + kfree(payload); + + return ret; +} + +static struct scsi_host_template scsi_driver = { + .module = THIS_MODULE, + .name = "storvsc_host_t", + .cmd_size = sizeof(struct storvsc_cmd_request), + .bios_param = storvsc_get_chs, + .queuecommand = storvsc_queuecommand, + .eh_host_reset_handler = storvsc_host_reset_handler, + .proc_name = "storvsc_host", + .eh_timed_out = storvsc_eh_timed_out, + .slave_alloc = storvsc_device_alloc, + .slave_configure = storvsc_device_configure, + .cmd_per_lun = 2048, + .this_id = -1, + /* Ensure there are no gaps in presented sgls */ + .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1, + .no_write_same = 1, + .track_queue_depth = 1, + .change_queue_depth = storvsc_change_queue_depth, +}; + +enum { + SCSI_GUID, + IDE_GUID, + SFC_GUID, +}; + +static const struct hv_vmbus_device_id id_table[] = { + /* SCSI guid */ + { HV_SCSI_GUID, + .driver_data = SCSI_GUID + }, + /* IDE guid */ + { HV_IDE_GUID, + .driver_data = IDE_GUID + }, + /* Fibre Channel GUID */ + { + HV_SYNTHFC_GUID, + .driver_data = SFC_GUID + }, + { }, +}; + +MODULE_DEVICE_TABLE(vmbus, id_table); + +static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID }; + +static bool hv_dev_is_fc(struct hv_device *hv_dev) +{ + return guid_equal(&fc_guid.guid, &hv_dev->dev_type); +} + +static int storvsc_probe(struct hv_device *device, + const struct hv_vmbus_device_id *dev_id) +{ + int ret; + int num_cpus = num_online_cpus(); + int num_present_cpus = num_present_cpus(); + struct Scsi_Host *host; + struct hv_host_device *host_dev; + bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); + bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false); + int target = 0; + struct storvsc_device *stor_device; + int max_sub_channels = 0; + u32 max_xfer_bytes; + + /* + * We support sub-channels for storage on SCSI and FC controllers. + * The number of sub-channels offerred is based on the number of + * VCPUs in the guest. + */ + if (!dev_is_ide) + max_sub_channels = + (num_cpus - 1) / storvsc_vcpus_per_sub_channel; + + scsi_driver.can_queue = max_outstanding_req_per_channel * + (max_sub_channels + 1) * + (100 - ring_avail_percent_lowater) / 100; + + host = scsi_host_alloc(&scsi_driver, + sizeof(struct hv_host_device)); + if (!host) + return -ENOMEM; + + host_dev = shost_priv(host); + memset(host_dev, 0, sizeof(struct hv_host_device)); + + host_dev->port = host->host_no; + host_dev->dev = device; + host_dev->host = host; + + + stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL); + if (!stor_device) { + ret = -ENOMEM; + goto err_out0; + } + + stor_device->destroy = false; + init_waitqueue_head(&stor_device->waiting_to_drain); + stor_device->device = device; + stor_device->host = host; + spin_lock_init(&stor_device->lock); + hv_set_drvdata(device, stor_device); + dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); + + stor_device->port_number = host->host_no; + ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); + if (ret) + goto err_out1; + + host_dev->path = stor_device->path_id; + host_dev->target = stor_device->target_id; + + switch (dev_id->driver_data) { + case SFC_GUID: + host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; + host->max_id = STORVSC_FC_MAX_TARGETS; + host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + host->transportt = fc_transport_template; +#endif + break; + + case SCSI_GUID: + host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; + host->max_id = STORVSC_MAX_TARGETS; + host->max_channel = STORVSC_MAX_CHANNELS - 1; + break; + + default: + host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; + host->max_id = STORVSC_IDE_MAX_TARGETS; + host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; + break; + } + /* max cmd length */ + host->max_cmd_len = STORVSC_MAX_CMD_LEN; + /* + * Any reasonable Hyper-V configuration should provide + * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, + * protecting it from any weird value. + */ + max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); + if (is_fc) + max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE); + + /* max_hw_sectors_kb */ + host->max_sectors = max_xfer_bytes >> 9; + /* + * There are 2 requirements for Hyper-V storvsc sgl segments, + * based on which the below calculation for max segments is + * done: + * + * 1. Except for the first and last sgl segment, all sgl segments + * should be align to HV_HYP_PAGE_SIZE, that also means the + * maximum number of segments in a sgl can be calculated by + * dividing the total max transfer length by HV_HYP_PAGE_SIZE. + * + * 2. Except for the first and last, each entry in the SGL must + * have an offset that is a multiple of HV_HYP_PAGE_SIZE. + */ + host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; + /* + * For non-IDE disks, the host supports multiple channels. + * Set the number of HW queues we are supporting. + */ + if (!dev_is_ide) { + if (storvsc_max_hw_queues > num_present_cpus) { + storvsc_max_hw_queues = 0; + storvsc_log(device, STORVSC_LOGGING_WARN, + "Resetting invalid storvsc_max_hw_queues value to default.\n"); + } + if (storvsc_max_hw_queues) + host->nr_hw_queues = storvsc_max_hw_queues; + else + host->nr_hw_queues = num_present_cpus; + } + + /* + * Set the error handler work queue. + */ + host_dev->handle_error_wq = + alloc_ordered_workqueue("storvsc_error_wq_%d", + 0, + host->host_no); + if (!host_dev->handle_error_wq) { + ret = -ENOMEM; + goto err_out2; + } + INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan); + /* Register the HBA and start the scsi bus scan */ + ret = scsi_add_host(host, &device->device); + if (ret != 0) + goto err_out3; + + if (!dev_is_ide) { + scsi_scan_host(host); + } else { + target = (device->dev_instance.b[5] << 8 | + device->dev_instance.b[4]); + ret = scsi_add_device(host, 0, target, 0); + if (ret) + goto err_out4; + } +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (host->transportt == fc_transport_template) { + struct fc_rport_identifiers ids = { + .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR, + }; + + fc_host_node_name(host) = stor_device->node_name; + fc_host_port_name(host) = stor_device->port_name; + stor_device->rport = fc_remote_port_add(host, 0, &ids); + if (!stor_device->rport) { + ret = -ENOMEM; + goto err_out4; + } + } +#endif + return 0; + +err_out4: + scsi_remove_host(host); + +err_out3: + destroy_workqueue(host_dev->handle_error_wq); + +err_out2: + /* + * Once we have connected with the host, we would need to + * invoke storvsc_dev_remove() to rollback this state and + * this call also frees up the stor_device; hence the jump around + * err_out1 label. + */ + storvsc_dev_remove(device); + goto err_out0; + +err_out1: + kfree(stor_device->stor_chns); + kfree(stor_device); + +err_out0: + scsi_host_put(host); + return ret; +} + +/* Change a scsi target's queue depth */ +static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth) +{ + if (queue_depth > scsi_driver.can_queue) + queue_depth = scsi_driver.can_queue; + + return scsi_change_queue_depth(sdev, queue_depth); +} + +static void storvsc_remove(struct hv_device *dev) +{ + struct storvsc_device *stor_device = hv_get_drvdata(dev); + struct Scsi_Host *host = stor_device->host; + struct hv_host_device *host_dev = shost_priv(host); + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (host->transportt == fc_transport_template) { + fc_remote_port_delete(stor_device->rport); + fc_remove_host(host); + } +#endif + destroy_workqueue(host_dev->handle_error_wq); + scsi_remove_host(host); + storvsc_dev_remove(dev); + scsi_host_put(host); +} + +static int storvsc_suspend(struct hv_device *hv_dev) +{ + struct storvsc_device *stor_device = hv_get_drvdata(hv_dev); + struct Scsi_Host *host = stor_device->host; + struct hv_host_device *host_dev = shost_priv(host); + + storvsc_wait_to_drain(stor_device); + + drain_workqueue(host_dev->handle_error_wq); + + vmbus_close(hv_dev->channel); + + kfree(stor_device->stor_chns); + stor_device->stor_chns = NULL; + + cpumask_clear(&stor_device->alloced_cpus); + + return 0; +} + +static int storvsc_resume(struct hv_device *hv_dev) +{ + int ret; + + ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size, + hv_dev_is_fc(hv_dev)); + return ret; +} + +static struct hv_driver storvsc_drv = { + .name = KBUILD_MODNAME, + .id_table = id_table, + .probe = storvsc_probe, + .remove = storvsc_remove, + .suspend = storvsc_suspend, + .resume = storvsc_resume, + .driver = { + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +static struct fc_function_template fc_transport_functions = { + .show_host_node_name = 1, + .show_host_port_name = 1, +}; +#endif + +static int __init storvsc_drv_init(void) +{ + int ret; + + /* + * Divide the ring buffer data size (which is 1 page less + * than the ring buffer size since that page is reserved for + * the ring buffer indices) by the max request size (which is + * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) + */ + max_outstanding_req_per_channel = + ((storvsc_ringbuffer_size - PAGE_SIZE) / + ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + + sizeof(struct vstor_packet) + sizeof(u64), + sizeof(u64))); + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + fc_transport_template = fc_attach_transport(&fc_transport_functions); + if (!fc_transport_template) + return -ENODEV; +#endif + + ret = vmbus_driver_register(&storvsc_drv); + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (ret) + fc_release_transport(fc_transport_template); +#endif + + return ret; +} + +static void __exit storvsc_drv_exit(void) +{ + vmbus_driver_unregister(&storvsc_drv); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + fc_release_transport(fc_transport_template); +#endif +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); +module_init(storvsc_drv_init); +module_exit(storvsc_drv_exit); diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c new file mode 100644 index 000000000..abf229b84 --- /dev/null +++ b/drivers/scsi/sun3_scsi.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) + * + * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) + * + * VME support added by Sam Creasey + * + * TODO: modify this driver to support multiple Sun3 SCSI VME boards + * + * Adapted from mac_scsinew.c: + */ +/* + * Generic Macintosh NCR5380 driver + * + * Copyright 1998, Michael Schmitz + * + * derived in part from: + */ +/* + * Generic Generic NCR5380 driver + * + * Copyright 1995, Russell King + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* minimum number of bytes to do dma on */ +#define DMA_MIN_SIZE 129 + +/* Definitions for the core NCR5380 driver. */ + +#define NCR5380_implementation_fields /* none */ + +#define NCR5380_read(reg) in_8(hostdata->io + (reg)) +#define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value) + +#define NCR5380_queue_command sun3scsi_queue_command +#define NCR5380_host_reset sun3scsi_host_reset +#define NCR5380_abort sun3scsi_abort +#define NCR5380_info sun3scsi_info + +#define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len +#define NCR5380_dma_recv_setup sun3scsi_dma_count +#define NCR5380_dma_send_setup sun3scsi_dma_count +#define NCR5380_dma_residual sun3scsi_dma_residual + +#include "NCR5380.h" + +/* dma regs start at regbase + 8, directly after the NCR regs */ +struct sun3_dma_regs { + unsigned short dma_addr_hi; /* vme only */ + unsigned short dma_addr_lo; /* vme only */ + unsigned short dma_count_hi; /* vme only */ + unsigned short dma_count_lo; /* vme only */ + unsigned short udc_data; /* udc dma data reg (obio only) */ + unsigned short udc_addr; /* uda dma addr reg (obio only) */ + unsigned short fifo_data; /* fifo data reg, + * holds extra byte on odd dma reads + */ + unsigned short fifo_count; + unsigned short csr; /* control/status reg */ + unsigned short bpack_hi; /* vme only */ + unsigned short bpack_lo; /* vme only */ + unsigned short ivect; /* vme only */ + unsigned short fifo_count_hi; /* vme only */ +}; + +/* ucd chip specific regs - live in dvma space */ +struct sun3_udc_regs { + unsigned short rsel; /* select regs to load */ + unsigned short addr_hi; /* high word of addr */ + unsigned short addr_lo; /* low word */ + unsigned short count; /* words to be xfer'd */ + unsigned short mode_hi; /* high word of channel mode */ + unsigned short mode_lo; /* low word of channel mode */ +}; + +/* addresses of the udc registers */ +#define UDC_MODE 0x38 +#define UDC_CSR 0x2e /* command/status */ +#define UDC_CHN_HI 0x26 /* chain high word */ +#define UDC_CHN_LO 0x22 /* chain lo word */ +#define UDC_CURA_HI 0x1a /* cur reg A high */ +#define UDC_CURA_LO 0x0a /* cur reg A low */ +#define UDC_CURB_HI 0x12 /* cur reg B high */ +#define UDC_CURB_LO 0x02 /* cur reg B low */ +#define UDC_MODE_HI 0x56 /* mode reg high */ +#define UDC_MODE_LO 0x52 /* mode reg low */ +#define UDC_COUNT 0x32 /* words to xfer */ + +/* some udc commands */ +#define UDC_RESET 0 +#define UDC_CHN_START 0xa0 /* start chain */ +#define UDC_INT_ENABLE 0x32 /* channel 1 int on */ + +/* udc mode words */ +#define UDC_MODE_HIWORD 0x40 +#define UDC_MODE_LSEND 0xc2 +#define UDC_MODE_LRECV 0xd2 + +/* udc reg selections */ +#define UDC_RSEL_SEND 0x282 +#define UDC_RSEL_RECV 0x182 + +/* bits in csr reg */ +#define CSR_DMA_ACTIVE 0x8000 +#define CSR_DMA_CONFLICT 0x4000 +#define CSR_DMA_BUSERR 0x2000 + +#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */ +#define CSR_SDB_INT 0x200 /* sbc interrupt pending */ +#define CSR_DMA_INT 0x100 /* dma interrupt pending */ + +#define CSR_LEFT 0xc0 +#define CSR_LEFT_3 0xc0 +#define CSR_LEFT_2 0x80 +#define CSR_LEFT_1 0x40 +#define CSR_PACK_ENABLE 0x20 + +#define CSR_DMA_ENABLE 0x10 + +#define CSR_SEND 0x8 /* 1 = send 0 = recv */ +#define CSR_FIFO 0x2 /* reset fifo */ +#define CSR_INTR 0x4 /* interrupt enable */ +#define CSR_SCSI 0x1 + +#define VME_DATA24 0x3d00 + +extern int sun3_map_test(unsigned long, char *); + +static int setup_can_queue = -1; +module_param(setup_can_queue, int, 0); +static int setup_cmd_per_lun = -1; +module_param(setup_cmd_per_lun, int, 0); +static int setup_sg_tablesize = -1; +module_param(setup_sg_tablesize, int, 0); +static int setup_hostid = -1; +module_param(setup_hostid, int, 0); + +/* ms to wait after hitting dma regs */ +#define SUN3_DMA_DELAY 10 + +/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ +#define SUN3_DVMA_BUFSIZE 0xe000 + +static struct scsi_cmnd *sun3_dma_setup_done; +static volatile struct sun3_dma_regs *dregs; +static struct sun3_udc_regs *udc_regs; +static unsigned char *sun3_dma_orig_addr; +static unsigned long sun3_dma_orig_count; +static int sun3_dma_active; +static unsigned long last_residual; + +#ifndef SUN3_SCSI_VME +/* dma controller register access functions */ + +static inline unsigned short sun3_udc_read(unsigned char reg) +{ + unsigned short ret; + + dregs->udc_addr = UDC_CSR; + udelay(SUN3_DMA_DELAY); + ret = dregs->udc_data; + udelay(SUN3_DMA_DELAY); + + return ret; +} + +static inline void sun3_udc_write(unsigned short val, unsigned char reg) +{ + dregs->udc_addr = reg; + udelay(SUN3_DMA_DELAY); + dregs->udc_data = val; + udelay(SUN3_DMA_DELAY); +} +#endif + +// safe bits for the CSR +#define CSR_GOOD 0x060f + +static irqreturn_t scsi_sun3_intr(int irq, void *dev) +{ + struct Scsi_Host *instance = dev; + unsigned short csr = dregs->csr; + int handled = 0; + +#ifdef SUN3_SCSI_VME + dregs->csr &= ~CSR_DMA_ENABLE; +#endif + + if(csr & ~CSR_GOOD) { + if (csr & CSR_DMA_BUSERR) + shost_printk(KERN_ERR, instance, "bus error in DMA\n"); + if (csr & CSR_DMA_CONFLICT) + shost_printk(KERN_ERR, instance, "DMA conflict\n"); + handled = 1; + } + + if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { + NCR5380_intr(irq, dev); + handled = 1; + } + + return IRQ_RETVAL(handled); +} + +/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ +static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count, int write_flag) +{ + void *addr; + + if(sun3_dma_orig_addr != NULL) + dvma_unmap(sun3_dma_orig_addr); + +#ifdef SUN3_SCSI_VME + addr = (void *)dvma_map_vme((unsigned long) data, count); +#else + addr = (void *)dvma_map((unsigned long) data, count); +#endif + + sun3_dma_orig_addr = addr; + sun3_dma_orig_count = count; + +#ifndef SUN3_SCSI_VME + dregs->fifo_count = 0; + sun3_udc_write(UDC_RESET, UDC_CSR); + + /* reset fifo */ + dregs->csr &= ~CSR_FIFO; + dregs->csr |= CSR_FIFO; +#endif + + /* set direction */ + if(write_flag) + dregs->csr |= CSR_SEND; + else + dregs->csr &= ~CSR_SEND; + +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_PACK_ENABLE; + + dregs->dma_addr_hi = ((unsigned long)addr >> 16); + dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); + + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + dregs->fifo_count_hi = 0; + dregs->fifo_count = 0; +#else + /* byte count for fifo */ + dregs->fifo_count = count; + + sun3_udc_write(UDC_RESET, UDC_CSR); + + /* reset fifo */ + dregs->csr &= ~CSR_FIFO; + dregs->csr |= CSR_FIFO; + + if(dregs->fifo_count != count) { + shost_printk(KERN_ERR, hostdata->host, + "FIFO mismatch %04x not %04x\n", + dregs->fifo_count, (unsigned int) count); + NCR5380_dprint(NDEBUG_DMA, hostdata->host); + } + + /* setup udc */ + udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); + udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); + udc_regs->count = count/2; /* count in words */ + udc_regs->mode_hi = UDC_MODE_HIWORD; + if(write_flag) { + if(count & 1) + udc_regs->count++; + udc_regs->mode_lo = UDC_MODE_LSEND; + udc_regs->rsel = UDC_RSEL_SEND; + } else { + udc_regs->mode_lo = UDC_MODE_LRECV; + udc_regs->rsel = UDC_RSEL_RECV; + } + + /* announce location of regs block */ + sun3_udc_write(((dvma_vtob(udc_regs) & 0xff0000) >> 8), + UDC_CHN_HI); + + sun3_udc_write((dvma_vtob(udc_regs) & 0xffff), UDC_CHN_LO); + + /* set dma master on */ + sun3_udc_write(0xd, UDC_MODE); + + /* interrupt enable */ + sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); +#endif + + return count; + +} + +static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return count; +} + +static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return sun3scsi_dma_setup(hostdata, data, count, 0); +} + +static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return sun3scsi_dma_setup(hostdata, data, count, 1); +} + +static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata) +{ + return last_residual; +} + +static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + int wanted_len = NCR5380_to_ncmd(cmd)->this_residual; + + if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) + return 0; + + return wanted_len; +} + +static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) +{ +#ifdef SUN3_SCSI_VME + unsigned short csr; + + csr = dregs->csr; + + dregs->dma_count_hi = (sun3_dma_orig_count >> 16); + dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); + + dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); + dregs->fifo_count = (sun3_dma_orig_count & 0xffff); + +/* if(!(csr & CSR_DMA_ENABLE)) + * dregs->csr |= CSR_DMA_ENABLE; + */ +#else + sun3_udc_write(UDC_CHN_START, UDC_CSR); +#endif + + return 0; +} + +/* clean up after our dma is done */ +static int sun3scsi_dma_finish(enum dma_data_direction data_dir) +{ + const bool write_flag = data_dir == DMA_TO_DEVICE; + unsigned short __maybe_unused count; + unsigned short fifo; + int ret = 0; + + sun3_dma_active = 0; + +#ifdef SUN3_SCSI_VME + dregs->csr &= ~CSR_DMA_ENABLE; + + fifo = dregs->fifo_count; + if (write_flag) { + if ((fifo > 0) && (fifo < sun3_dma_orig_count)) + fifo++; + } + + last_residual = fifo; + /* empty bytes from the fifo which didn't make it */ + if ((!write_flag) && (dregs->csr & CSR_LEFT)) { + unsigned char *vaddr; + + vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); + + vaddr += (sun3_dma_orig_count - fifo); + vaddr--; + + switch (dregs->csr & CSR_LEFT) { + case CSR_LEFT_3: + *vaddr = (dregs->bpack_lo & 0xff00) >> 8; + vaddr--; + fallthrough; + + case CSR_LEFT_2: + *vaddr = (dregs->bpack_hi & 0x00ff); + vaddr--; + fallthrough; + + case CSR_LEFT_1: + *vaddr = (dregs->bpack_hi & 0xff00) >> 8; + break; + } + } +#else + // check to empty the fifo on a read + if(!write_flag) { + int tmo = 20000; /* .2 sec */ + + while(1) { + if(dregs->csr & CSR_FIFO_EMPTY) + break; + + if(--tmo <= 0) { + printk("sun3scsi: fifo failed to empty!\n"); + return 1; + } + udelay(10); + } + } + + dregs->udc_addr = 0x32; + udelay(SUN3_DMA_DELAY); + count = 2 * dregs->udc_data; + udelay(SUN3_DMA_DELAY); + + fifo = dregs->fifo_count; + last_residual = fifo; + + /* empty bytes from the fifo which didn't make it */ + if((!write_flag) && (count - fifo) == 2) { + unsigned short data; + unsigned char *vaddr; + + data = dregs->fifo_data; + vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr); + + vaddr += (sun3_dma_orig_count - fifo); + + vaddr[-2] = (data & 0xff00) >> 8; + vaddr[-1] = (data & 0xff); + } +#endif + + dvma_unmap(sun3_dma_orig_addr); + sun3_dma_orig_addr = NULL; + +#ifdef SUN3_SCSI_VME + dregs->dma_addr_hi = 0; + dregs->dma_addr_lo = 0; + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + + dregs->fifo_count = 0; + dregs->fifo_count_hi = 0; + + dregs->csr &= ~CSR_SEND; +/* dregs->csr |= CSR_DMA_ENABLE; */ +#else + sun3_udc_write(UDC_RESET, UDC_CSR); + dregs->fifo_count = 0; + dregs->csr &= ~CSR_SEND; + + /* reset fifo */ + dregs->csr &= ~CSR_FIFO; + dregs->csr |= CSR_FIFO; +#endif + + sun3_dma_setup_done = NULL; + + return ret; + +} + +#include "NCR5380.c" + +#ifdef SUN3_SCSI_VME +#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" +#define DRV_MODULE_NAME "sun3_scsi_vme" +#else +#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" +#define DRV_MODULE_NAME "sun3_scsi" +#endif + +#define PFX DRV_MODULE_NAME ": " + +static struct scsi_host_template sun3_scsi_template = { + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, + .name = SUN3_SCSI_NAME, + .info = sun3scsi_info, + .queuecommand = sun3scsi_queue_command, + .eh_abort_handler = sun3scsi_abort, + .eh_host_reset_handler = sun3scsi_host_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = 1, + .cmd_per_lun = 2, + .dma_boundary = PAGE_SIZE - 1, + .cmd_size = sizeof(struct NCR5380_cmd), +}; + +static int __init sun3_scsi_probe(struct platform_device *pdev) +{ + struct Scsi_Host *instance; + struct NCR5380_hostdata *hostdata; + int error; + struct resource *irq, *mem; + void __iomem *ioaddr; + int host_flags = 0; +#ifdef SUN3_SCSI_VME + int i; +#endif + + if (setup_can_queue > 0) + sun3_scsi_template.can_queue = setup_can_queue; + if (setup_cmd_per_lun > 0) + sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun; + if (setup_sg_tablesize > 0) + sun3_scsi_template.sg_tablesize = setup_sg_tablesize; + if (setup_hostid >= 0) + sun3_scsi_template.this_id = setup_hostid & 7; + +#ifdef SUN3_SCSI_VME + ioaddr = NULL; + for (i = 0; i < 2; i++) { + unsigned char x; + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, i); + mem = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!irq || !mem) + break; + + ioaddr = sun3_ioremap(mem->start, resource_size(mem), + SUN3_PAGE_TYPE_VME16); + dregs = (struct sun3_dma_regs *)(ioaddr + 8); + + if (sun3_map_test((unsigned long)dregs, &x)) { + unsigned short oldcsr; + + oldcsr = dregs->csr; + dregs->csr = 0; + udelay(SUN3_DMA_DELAY); + if (dregs->csr == 0x1400) + break; + + dregs->csr = oldcsr; + } + + iounmap(ioaddr); + ioaddr = NULL; + } + if (!ioaddr) + return -ENODEV; +#else + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!irq || !mem) + return -ENODEV; + + ioaddr = ioremap(mem->start, resource_size(mem)); + dregs = (struct sun3_dma_regs *)(ioaddr + 8); + + udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs)); + if (!udc_regs) { + pr_err(PFX "couldn't allocate DVMA memory!\n"); + iounmap(ioaddr); + return -ENOMEM; + } +#endif + + instance = scsi_host_alloc(&sun3_scsi_template, + sizeof(struct NCR5380_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = irq->start; + + hostdata = shost_priv(instance); + hostdata->base = mem->start; + hostdata->io = ioaddr; + + error = NCR5380_init(instance, host_flags); + if (error) + goto fail_init; + + error = request_irq(instance->irq, scsi_sun3_intr, 0, + "NCR5380", instance); + if (error) { + pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n", + instance->host_no, instance->irq); + goto fail_irq; + } + + dregs->csr = 0; + udelay(SUN3_DMA_DELAY); + dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; + udelay(SUN3_DMA_DELAY); + dregs->fifo_count = 0; +#ifdef SUN3_SCSI_VME + dregs->fifo_count_hi = 0; + dregs->dma_addr_hi = 0; + dregs->dma_addr_lo = 0; + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + + dregs->ivect = VME_DATA24 | (instance->irq & 0xff); +#endif + + NCR5380_maybe_reset_bus(instance); + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(instance->irq, instance); +fail_irq: + NCR5380_exit(instance); +fail_init: + scsi_host_put(instance); +fail_alloc: + if (udc_regs) + dvma_free(udc_regs); + iounmap(ioaddr); + return error; +} + +static int __exit sun3_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + struct NCR5380_hostdata *hostdata = shost_priv(instance); + void __iomem *ioaddr = hostdata->io; + + scsi_remove_host(instance); + free_irq(instance->irq, instance); + NCR5380_exit(instance); + scsi_host_put(instance); + if (udc_regs) + dvma_free(udc_regs); + iounmap(ioaddr); + return 0; +} + +static struct platform_driver sun3_scsi_driver = { + .remove = __exit_p(sun3_scsi_remove), + .driver = { + .name = DRV_MODULE_NAME, + }, +}; + +module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe); + +MODULE_ALIAS("platform:" DRV_MODULE_NAME); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c new file mode 100644 index 000000000..1eeece6e2 --- /dev/null +++ b/drivers/scsi/sun3_scsi_vme.c @@ -0,0 +1,3 @@ +#define SUN3_SCSI_VME + +#include "sun3_scsi.c" diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c new file mode 100644 index 000000000..30f67cbf4 --- /dev/null +++ b/drivers/scsi/sun3x_esp.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* sun3x_esp.c: ESP front-end for Sun3x systems. + * + * Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* DMA controller reg offsets */ +#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */ +#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */ +#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */ +#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */ + +#include + +#include "esp_scsi.h" + +#define DRV_MODULE_NAME "sun3x_esp" +#define PFX DRV_MODULE_NAME ": " +#define DRV_VERSION "1.000" +#define DRV_MODULE_RELDATE "Nov 1, 2007" + +/* + * m68k always assumes readl/writel operate on little endian + * mmio space; this is wrong at least for Sun3x, so we + * need to workaround this until a proper way is found + */ +#if 0 +#define dma_read32(REG) \ + readl(esp->dma_regs + (REG)) +#define dma_write32(VAL, REG) \ + writel((VAL), esp->dma_regs + (REG)) +#else +#define dma_read32(REG) \ + *(volatile u32 *)(esp->dma_regs + (REG)) +#define dma_write32(VAL, REG) \ + do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0) +#endif + +static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg) +{ + writeb(val, esp->regs + (reg * 4UL)); +} + +static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg) +{ + return readb(esp->regs + (reg * 4UL)); +} + +static int sun3x_esp_irq_pending(struct esp *esp) +{ + if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) + return 1; + return 0; +} + +static void sun3x_esp_reset_dma(struct esp *esp) +{ + u32 val; + + val = dma_read32(DMA_CSR); + dma_write32(val | DMA_RST_SCSI, DMA_CSR); + dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); + + /* Enable interrupts. */ + val = dma_read32(DMA_CSR); + dma_write32(val | DMA_INT_ENAB, DMA_CSR); +} + +static void sun3x_esp_dma_drain(struct esp *esp) +{ + u32 csr; + int lim; + + csr = dma_read32(DMA_CSR); + if (!(csr & DMA_FIFO_ISDRAIN)) + return; + + dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); + + lim = 1000; + while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { + if (--lim == 0) { + printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", + esp->host->unique_id); + break; + } + udelay(1); + } +} + +static void sun3x_esp_dma_invalidate(struct esp *esp) +{ + u32 val; + int lim; + + lim = 1000; + while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { + if (--lim == 0) { + printk(KERN_ALERT PFX "esp%d: DMA will not " + "invalidate!\n", esp->host->unique_id); + break; + } + udelay(1); + } + + val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); + val |= DMA_FIFO_INV; + dma_write32(val, DMA_CSR); + val &= ~DMA_FIFO_INV; + dma_write32(val, DMA_CSR); +} + +static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, + u32 dma_count, int write, u8 cmd) +{ + u32 csr; + + BUG_ON(!(cmd & ESP_CMD_DMA)); + + sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + csr = dma_read32(DMA_CSR); + csr |= DMA_ENABLE; + if (write) + csr |= DMA_ST_WRITE; + else + csr &= ~DMA_ST_WRITE; + dma_write32(csr, DMA_CSR); + dma_write32(addr, DMA_ADDR); + + scsi_esp_cmd(esp, cmd); +} + +static int sun3x_esp_dma_error(struct esp *esp) +{ + u32 csr = dma_read32(DMA_CSR); + + if (csr & DMA_HNDL_ERROR) + return 1; + + return 0; +} + +static const struct esp_driver_ops sun3x_esp_ops = { + .esp_write8 = sun3x_esp_write8, + .esp_read8 = sun3x_esp_read8, + .irq_pending = sun3x_esp_irq_pending, + .reset_dma = sun3x_esp_reset_dma, + .dma_drain = sun3x_esp_dma_drain, + .dma_invalidate = sun3x_esp_dma_invalidate, + .send_dma_cmd = sun3x_esp_send_dma_cmd, + .dma_error = sun3x_esp_dma_error, +}; + +static int esp_sun3x_probe(struct platform_device *dev) +{ + const struct scsi_host_template *tpnt = &scsi_esp_template; + struct Scsi_Host *host; + struct esp *esp; + struct resource *res; + int err = -ENOMEM; + + host = scsi_host_alloc(tpnt, sizeof(struct esp)); + if (!host) + goto fail; + + host->max_id = 8; + esp = shost_priv(host); + + esp->host = host; + esp->dev = &dev->dev; + esp->ops = &sun3x_esp_ops; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res || !res->start) + goto fail_unlink; + + esp->regs = ioremap(res->start, 0x20); + if (!esp->regs) + goto fail_unmap_regs; + + res = platform_get_resource(dev, IORESOURCE_MEM, 1); + if (!res || !res->start) + goto fail_unmap_regs; + + esp->dma_regs = ioremap(res->start, 0x10); + + esp->command_block = dma_alloc_coherent(esp->dev, 16, + &esp->command_block_dma, + GFP_KERNEL); + if (!esp->command_block) + goto fail_unmap_regs_dma; + + host->irq = err = platform_get_irq(dev, 0); + if (err < 0) + goto fail_unmap_command_block; + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, + "SUN3X ESP", esp); + if (err < 0) + goto fail_unmap_command_block; + + esp->scsi_id = 7; + esp->host->this_id = esp->scsi_id; + esp->scsi_id_mask = (1 << esp->scsi_id); + esp->cfreq = 20000000; + + dev_set_drvdata(&dev->dev, esp); + + err = scsi_esp_register(esp); + if (err) + goto fail_free_irq; + + return 0; + +fail_free_irq: + free_irq(host->irq, esp); +fail_unmap_command_block: + dma_free_coherent(esp->dev, 16, + esp->command_block, + esp->command_block_dma); +fail_unmap_regs_dma: + iounmap(esp->dma_regs); +fail_unmap_regs: + iounmap(esp->regs); +fail_unlink: + scsi_host_put(host); +fail: + return err; +} + +static int esp_sun3x_remove(struct platform_device *dev) +{ + struct esp *esp = dev_get_drvdata(&dev->dev); + unsigned int irq = esp->host->irq; + u32 val; + + scsi_esp_unregister(esp); + + /* Disable interrupts. */ + val = dma_read32(DMA_CSR); + dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); + + free_irq(irq, esp); + dma_free_coherent(esp->dev, 16, + esp->command_block, + esp->command_block_dma); + + scsi_host_put(esp->host); + + return 0; +} + +static struct platform_driver esp_sun3x_driver = { + .probe = esp_sun3x_probe, + .remove = esp_sun3x_remove, + .driver = { + .name = "sun3x_esp", + }, +}; +module_platform_driver(esp_sun3x_driver); + +MODULE_DESCRIPTION("Sun3x ESP SCSI driver"); +MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:sun3x_esp"); diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c new file mode 100644 index 000000000..afa9d02a3 --- /dev/null +++ b/drivers/scsi/sun_esp.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* sun_esp.c: ESP front-end for Sparc SBUS systems. + * + * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "esp_scsi.h" + +#define DRV_MODULE_NAME "sun_esp" +#define PFX DRV_MODULE_NAME ": " +#define DRV_VERSION "1.100" +#define DRV_MODULE_RELDATE "August 27, 2008" + +#define dma_read32(REG) \ + sbus_readl(esp->dma_regs + (REG)) +#define dma_write32(VAL, REG) \ + sbus_writel((VAL), esp->dma_regs + (REG)) + +/* DVMA chip revisions */ +enum dvma_rev { + dvmarev0, + dvmaesc1, + dvmarev1, + dvmarev2, + dvmarev3, + dvmarevplus, + dvmahme +}; + +static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of) +{ + esp->dma = dma_of; + + esp->dma_regs = of_ioremap(&dma_of->resource[0], 0, + resource_size(&dma_of->resource[0]), + "espdma"); + if (!esp->dma_regs) + return -ENOMEM; + + switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) { + case DMA_VERS0: + esp->dmarev = dvmarev0; + break; + case DMA_ESCV1: + esp->dmarev = dvmaesc1; + break; + case DMA_VERS1: + esp->dmarev = dvmarev1; + break; + case DMA_VERS2: + esp->dmarev = dvmarev2; + break; + case DMA_VERHME: + esp->dmarev = dvmahme; + break; + case DMA_VERSPLUS: + esp->dmarev = dvmarevplus; + break; + } + + return 0; + +} + +static int esp_sbus_map_regs(struct esp *esp, int hme) +{ + struct platform_device *op = to_platform_device(esp->dev); + struct resource *res; + + /* On HME, two reg sets exist, first is DVMA, + * second is ESP registers. + */ + if (hme) + res = &op->resource[1]; + else + res = &op->resource[0]; + + esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); + if (!esp->regs) + return -ENOMEM; + + return 0; +} + +static int esp_sbus_map_command_block(struct esp *esp) +{ + esp->command_block = dma_alloc_coherent(esp->dev, 16, + &esp->command_block_dma, + GFP_KERNEL); + if (!esp->command_block) + return -ENOMEM; + return 0; +} + +static int esp_sbus_register_irq(struct esp *esp) +{ + struct Scsi_Host *host = esp->host; + struct platform_device *op = to_platform_device(esp->dev); + + host->irq = op->archdata.irqs[0]; + return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); +} + +static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma) +{ + struct platform_device *op = to_platform_device(esp->dev); + struct device_node *dp; + + dp = op->dev.of_node; + esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); + if (esp->scsi_id != 0xff) + goto done; + + esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); + if (esp->scsi_id != 0xff) + goto done; + + esp->scsi_id = of_getintprop_default(espdma->dev.of_node, + "scsi-initiator-id", 7); + +done: + esp->host->this_id = esp->scsi_id; + esp->scsi_id_mask = (1 << esp->scsi_id); +} + +static void esp_get_differential(struct esp *esp) +{ + struct platform_device *op = to_platform_device(esp->dev); + struct device_node *dp; + + dp = op->dev.of_node; + if (of_property_read_bool(dp, "differential")) + esp->flags |= ESP_FLAG_DIFFERENTIAL; + else + esp->flags &= ~ESP_FLAG_DIFFERENTIAL; +} + +static void esp_get_clock_params(struct esp *esp) +{ + struct platform_device *op = to_platform_device(esp->dev); + struct device_node *bus_dp, *dp; + int fmhz; + + dp = op->dev.of_node; + bus_dp = dp->parent; + + fmhz = of_getintprop_default(dp, "clock-frequency", 0); + if (fmhz == 0) + fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0); + + esp->cfreq = fmhz; +} + +static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of) +{ + struct device_node *dma_dp = dma_of->dev.of_node; + struct platform_device *op = to_platform_device(esp->dev); + struct device_node *dp; + u8 bursts, val; + + dp = op->dev.of_node; + bursts = of_getintprop_default(dp, "burst-sizes", 0xff); + val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); + if (val != 0xff) + bursts &= val; + + val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff); + if (val != 0xff) + bursts &= val; + + if (bursts == 0xff || + (bursts & DMA_BURST16) == 0 || + (bursts & DMA_BURST32) == 0) + bursts = (DMA_BURST32 - 1); + + esp->bursts = bursts; +} + +static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma) +{ + esp_get_scsi_id(esp, espdma); + esp_get_differential(esp); + esp_get_clock_params(esp); + esp_get_bursts(esp, espdma); +} + +static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) +{ + sbus_writeb(val, esp->regs + (reg * 4UL)); +} + +static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) +{ + return sbus_readb(esp->regs + (reg * 4UL)); +} + +static int sbus_esp_irq_pending(struct esp *esp) +{ + if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) + return 1; + return 0; +} + +static void sbus_esp_reset_dma(struct esp *esp) +{ + int can_do_burst16, can_do_burst32, can_do_burst64; + int can_do_sbus64, lim; + struct platform_device *op = to_platform_device(esp->dev); + u32 val; + + can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; + can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; + can_do_burst64 = 0; + can_do_sbus64 = 0; + if (sbus_can_dma_64bit()) + can_do_sbus64 = 1; + if (sbus_can_burst64()) + can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; + + /* Put the DVMA into a known state. */ + if (esp->dmarev != dvmahme) { + val = dma_read32(DMA_CSR); + dma_write32(val | DMA_RST_SCSI, DMA_CSR); + dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); + } + switch (esp->dmarev) { + case dvmahme: + dma_write32(DMA_RESET_FAS366, DMA_CSR); + dma_write32(DMA_RST_SCSI, DMA_CSR); + + esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | + DMA_SCSI_DISAB | DMA_INT_ENAB); + + esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | + DMA_BRST_SZ); + + if (can_do_burst64) + esp->prev_hme_dmacsr |= DMA_BRST64; + else if (can_do_burst32) + esp->prev_hme_dmacsr |= DMA_BRST32; + + if (can_do_sbus64) { + esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; + sbus_set_sbus64(&op->dev, esp->bursts); + } + + lim = 1000; + while (dma_read32(DMA_CSR) & DMA_PEND_READ) { + if (--lim == 0) { + printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " + "will not clear!\n", + esp->host->unique_id); + break; + } + udelay(1); + } + + dma_write32(0, DMA_CSR); + dma_write32(esp->prev_hme_dmacsr, DMA_CSR); + + dma_write32(0, DMA_ADDR); + break; + + case dvmarev2: + if (esp->rev != ESP100) { + val = dma_read32(DMA_CSR); + dma_write32(val | DMA_3CLKS, DMA_CSR); + } + break; + + case dvmarev3: + val = dma_read32(DMA_CSR); + val &= ~DMA_3CLKS; + val |= DMA_2CLKS; + if (can_do_burst32) { + val &= ~DMA_BRST_SZ; + val |= DMA_BRST32; + } + dma_write32(val, DMA_CSR); + break; + + case dvmaesc1: + val = dma_read32(DMA_CSR); + val |= DMA_ADD_ENABLE; + val &= ~DMA_BCNT_ENAB; + if (!can_do_burst32 && can_do_burst16) { + val |= DMA_ESC_BURST; + } else { + val &= ~(DMA_ESC_BURST); + } + dma_write32(val, DMA_CSR); + break; + + default: + break; + } + + /* Enable interrupts. */ + val = dma_read32(DMA_CSR); + dma_write32(val | DMA_INT_ENAB, DMA_CSR); +} + +static void sbus_esp_dma_drain(struct esp *esp) +{ + u32 csr; + int lim; + + if (esp->dmarev == dvmahme) + return; + + csr = dma_read32(DMA_CSR); + if (!(csr & DMA_FIFO_ISDRAIN)) + return; + + if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1) + dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); + + lim = 1000; + while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { + if (--lim == 0) { + printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", + esp->host->unique_id); + break; + } + udelay(1); + } +} + +static void sbus_esp_dma_invalidate(struct esp *esp) +{ + if (esp->dmarev == dvmahme) { + dma_write32(DMA_RST_SCSI, DMA_CSR); + + esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | + (DMA_PARITY_OFF | DMA_2CLKS | + DMA_SCSI_DISAB | DMA_INT_ENAB)) & + ~(DMA_ST_WRITE | DMA_ENABLE)); + + dma_write32(0, DMA_CSR); + dma_write32(esp->prev_hme_dmacsr, DMA_CSR); + + /* This is necessary to avoid having the SCSI channel + * engine lock up on us. + */ + dma_write32(0, DMA_ADDR); + } else { + u32 val; + int lim; + + lim = 1000; + while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { + if (--lim == 0) { + printk(KERN_ALERT PFX "esp%d: DMA will not " + "invalidate!\n", esp->host->unique_id); + break; + } + udelay(1); + } + + val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); + val |= DMA_FIFO_INV; + dma_write32(val, DMA_CSR); + val &= ~DMA_FIFO_INV; + dma_write32(val, DMA_CSR); + } +} + +static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, + u32 dma_count, int write, u8 cmd) +{ + u32 csr; + + BUG_ON(!(cmd & ESP_CMD_DMA)); + + sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + if (esp->rev == FASHME) { + sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); + sbus_esp_write8(esp, 0, FAS_RHI); + + scsi_esp_cmd(esp, cmd); + + csr = esp->prev_hme_dmacsr; + csr |= DMA_SCSI_DISAB | DMA_ENABLE; + if (write) + csr |= DMA_ST_WRITE; + else + csr &= ~DMA_ST_WRITE; + esp->prev_hme_dmacsr = csr; + + dma_write32(dma_count, DMA_COUNT); + dma_write32(addr, DMA_ADDR); + dma_write32(csr, DMA_CSR); + } else { + csr = dma_read32(DMA_CSR); + csr |= DMA_ENABLE; + if (write) + csr |= DMA_ST_WRITE; + else + csr &= ~DMA_ST_WRITE; + dma_write32(csr, DMA_CSR); + if (esp->dmarev == dvmaesc1) { + u32 end = PAGE_ALIGN(addr + dma_count + 16U); + dma_write32(end - addr, DMA_COUNT); + } + dma_write32(addr, DMA_ADDR); + + scsi_esp_cmd(esp, cmd); + } + +} + +static int sbus_esp_dma_error(struct esp *esp) +{ + u32 csr = dma_read32(DMA_CSR); + + if (csr & DMA_HNDL_ERROR) + return 1; + + return 0; +} + +static const struct esp_driver_ops sbus_esp_ops = { + .esp_write8 = sbus_esp_write8, + .esp_read8 = sbus_esp_read8, + .irq_pending = sbus_esp_irq_pending, + .reset_dma = sbus_esp_reset_dma, + .dma_drain = sbus_esp_dma_drain, + .dma_invalidate = sbus_esp_dma_invalidate, + .send_dma_cmd = sbus_esp_send_dma_cmd, + .dma_error = sbus_esp_dma_error, +}; + +static int esp_sbus_probe_one(struct platform_device *op, + struct platform_device *espdma, int hme) +{ + const struct scsi_host_template *tpnt = &scsi_esp_template; + struct Scsi_Host *host; + struct esp *esp; + int err; + + host = scsi_host_alloc(tpnt, sizeof(struct esp)); + + err = -ENOMEM; + if (!host) + goto fail; + + host->max_id = (hme ? 16 : 8); + esp = shost_priv(host); + + esp->host = host; + esp->dev = &op->dev; + esp->ops = &sbus_esp_ops; + + if (hme) + esp->flags |= ESP_FLAG_WIDE_CAPABLE; + + err = esp_sbus_setup_dma(esp, espdma); + if (err < 0) + goto fail_unlink; + + err = esp_sbus_map_regs(esp, hme); + if (err < 0) + goto fail_unlink; + + err = esp_sbus_map_command_block(esp); + if (err < 0) + goto fail_unmap_regs; + + err = esp_sbus_register_irq(esp); + if (err < 0) + goto fail_unmap_command_block; + + esp_sbus_get_props(esp, espdma); + + /* Before we try to touch the ESP chip, ESC1 dma can + * come up with the reset bit set, so make sure that + * is clear first. + */ + if (esp->dmarev == dvmaesc1) { + u32 val = dma_read32(DMA_CSR); + + dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); + } + + dev_set_drvdata(&op->dev, esp); + + err = scsi_esp_register(esp); + if (err) + goto fail_free_irq; + + return 0; + +fail_free_irq: + free_irq(host->irq, esp); +fail_unmap_command_block: + dma_free_coherent(&op->dev, 16, + esp->command_block, + esp->command_block_dma); +fail_unmap_regs: + of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE); +fail_unlink: + scsi_host_put(host); +fail: + return err; +} + +static int esp_sbus_probe(struct platform_device *op) +{ + struct device_node *dma_node = NULL; + struct device_node *dp = op->dev.of_node; + struct platform_device *dma_of = NULL; + int hme = 0; + int ret; + + if (of_node_name_eq(dp->parent, "espdma") || + of_node_name_eq(dp->parent, "dma")) + dma_node = dp->parent; + else if (of_node_name_eq(dp, "SUNW,fas")) { + dma_node = op->dev.of_node; + hme = 1; + } + if (dma_node) + dma_of = of_find_device_by_node(dma_node); + if (!dma_of) + return -ENODEV; + + ret = esp_sbus_probe_one(op, dma_of, hme); + if (ret) + put_device(&dma_of->dev); + + return ret; +} + +static int esp_sbus_remove(struct platform_device *op) +{ + struct esp *esp = dev_get_drvdata(&op->dev); + struct platform_device *dma_of = esp->dma; + unsigned int irq = esp->host->irq; + bool is_hme; + u32 val; + + scsi_esp_unregister(esp); + + /* Disable interrupts. */ + val = dma_read32(DMA_CSR); + dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); + + free_irq(irq, esp); + + is_hme = (esp->dmarev == dvmahme); + + dma_free_coherent(&op->dev, 16, + esp->command_block, + esp->command_block_dma); + of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs, + SBUS_ESP_REG_SIZE); + of_iounmap(&dma_of->resource[0], esp->dma_regs, + resource_size(&dma_of->resource[0])); + + scsi_host_put(esp->host); + + dev_set_drvdata(&op->dev, NULL); + + put_device(&dma_of->dev); + + return 0; +} + +static const struct of_device_id esp_match[] = { + { + .name = "SUNW,esp", + }, + { + .name = "SUNW,fas", + }, + { + .name = "esp", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, esp_match); + +static struct platform_driver esp_sbus_driver = { + .driver = { + .name = "esp", + .of_match_table = esp_match, + }, + .probe = esp_sbus_probe, + .remove = esp_sbus_remove, +}; +module_platform_driver(esp_sbus_driver); + +MODULE_DESCRIPTION("Sun ESP SCSI driver"); +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/scsi/sym53c8xx_2/Makefile b/drivers/scsi/sym53c8xx_2/Makefile new file mode 100644 index 000000000..0751e2a0c --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Makefile for the NCR/SYMBIOS/LSI 53C8XX PCI SCSI controllers driver. + +sym53c8xx-objs := sym_fw.o sym_glue.o sym_hipd.o sym_malloc.o sym_nvram.o +obj-$(CONFIG_SCSI_SYM53C8XX_2) := sym53c8xx.o diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h new file mode 100644 index 000000000..11f5dc29a --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#ifndef SYM53C8XX_H +#define SYM53C8XX_H + + +/* + * DMA addressing mode. + * + * 0 : 32 bit addressing for all chips. + * 1 : 40 bit addressing when supported by chip. + * 2 : 64 bit addressing when supported by chip, + * limited to 16 segments of 4 GB -> 64 GB max. + */ +#define SYM_CONF_DMA_ADDRESSING_MODE CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE + +/* + * NVRAM support. + */ +#if 1 +#define SYM_CONF_NVRAM_SUPPORT (1) +#endif + +/* + * These options are not tunable from 'make config' + */ +#if 1 +#define SYM_LINUX_PROC_INFO_SUPPORT +#define SYM_LINUX_USER_COMMAND_SUPPORT +#define SYM_LINUX_USER_INFO_SUPPORT +#define SYM_LINUX_DEBUG_CONTROL_SUPPORT +#endif + +/* + * Also handle old NCR chips if not (0). + */ +#define SYM_CONF_GENERIC_SUPPORT (1) + +/* + * Allow tags from 2 to 256, default 8 + */ +#ifndef CONFIG_SCSI_SYM53C8XX_MAX_TAGS +#define CONFIG_SCSI_SYM53C8XX_MAX_TAGS (8) +#endif + +#if CONFIG_SCSI_SYM53C8XX_MAX_TAGS < 2 +#define SYM_CONF_MAX_TAG (2) +#elif CONFIG_SCSI_SYM53C8XX_MAX_TAGS > 256 +#define SYM_CONF_MAX_TAG (256) +#else +#define SYM_CONF_MAX_TAG CONFIG_SCSI_SYM53C8XX_MAX_TAGS +#endif + +#ifndef CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS +#define CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS SYM_CONF_MAX_TAG +#endif + +/* + * Anyway, we configure the driver for at least 64 tags per LUN. :) + */ +#if SYM_CONF_MAX_TAG <= 64 +#define SYM_CONF_MAX_TAG_ORDER (6) +#elif SYM_CONF_MAX_TAG <= 128 +#define SYM_CONF_MAX_TAG_ORDER (7) +#else +#define SYM_CONF_MAX_TAG_ORDER (8) +#endif + +/* + * Max number of SG entries. + */ +#define SYM_CONF_MAX_SG (96) + +/* + * Driver setup structure. + * + * This structure is initialized from linux config options. + * It can be overridden at boot-up by the boot command line. + */ +struct sym_driver_setup { + u_short max_tag; + u_char burst_order; + u_char scsi_led; + u_char scsi_diff; + u_char irq_mode; + u_char scsi_bus_check; + u_char host_id; + + u_char verbose; + u_char settle_delay; + u_char use_nvram; + u_long excludes[8]; +}; + +#define SYM_SETUP_MAX_TAG sym_driver_setup.max_tag +#define SYM_SETUP_BURST_ORDER sym_driver_setup.burst_order +#define SYM_SETUP_SCSI_LED sym_driver_setup.scsi_led +#define SYM_SETUP_SCSI_DIFF sym_driver_setup.scsi_diff +#define SYM_SETUP_IRQ_MODE sym_driver_setup.irq_mode +#define SYM_SETUP_SCSI_BUS_CHECK sym_driver_setup.scsi_bus_check +#define SYM_SETUP_HOST_ID sym_driver_setup.host_id +#define boot_verbose sym_driver_setup.verbose + +/* + * Initial setup. + * + * Can be overriden at startup by a command line. + */ +#define SYM_LINUX_DRIVER_SETUP { \ + .max_tag = CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS, \ + .burst_order = 7, \ + .scsi_led = 1, \ + .scsi_diff = 1, \ + .irq_mode = 0, \ + .scsi_bus_check = 1, \ + .host_id = 7, \ + .verbose = 0, \ + .settle_delay = 3, \ + .use_nvram = 1, \ +} + +extern struct sym_driver_setup sym_driver_setup; +extern unsigned int sym_debug_flags; +#define DEBUG_FLAGS sym_debug_flags + +/* + * Max number of targets. + * Maximum is 16 and you are advised not to change this value. + */ +#ifndef SYM_CONF_MAX_TARGET +#define SYM_CONF_MAX_TARGET (16) +#endif + +/* + * Max number of logical units. + * SPI-2 allows up to 64 logical units, but in real life, target + * that implements more that 7 logical units are pretty rare. + * Anyway, the cost of accepting up to 64 logical unit is low in + * this driver, thus going with the maximum is acceptable. + */ +#ifndef SYM_CONF_MAX_LUN +#define SYM_CONF_MAX_LUN (64) +#endif + +/* + * Max number of IO control blocks queued to the controller. + * Each entry needs 8 bytes and the queues are allocated contiguously. + * Since we donnot want to allocate more than a page, the theorical + * maximum is PAGE_SIZE/8. For safety, we announce a bit less to the + * access method. :) + * When not supplied, as it is suggested, the driver compute some + * good value for this parameter. + */ +/* #define SYM_CONF_MAX_START (PAGE_SIZE/8 - 16) */ + +/* + * Support for Immediate Arbitration. + * Not advised. + */ +/* #define SYM_CONF_IARB_SUPPORT */ + +/* + * Only relevant if IARB support configured. + * - Max number of successive settings of IARB hints. + * - Set IARB on arbitration lost. + */ +#define SYM_CONF_IARB_MAX 3 +#define SYM_CONF_SET_IARB_ON_ARB_LOST 1 + +/* + * Returning wrong residuals may make problems. + * When zero, this define tells the driver to + * always return 0 as transfer residual. + * Btw, all my testings of residuals have succeeded. + */ +#define SYM_SETUP_RESIDUAL_SUPPORT 1 + +#endif /* SYM53C8XX_H */ diff --git a/drivers/scsi/sym53c8xx_2/sym_defs.h b/drivers/scsi/sym53c8xx_2/sym_defs.h new file mode 100644 index 000000000..317289ee0 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_defs.h @@ -0,0 +1,779 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#ifndef SYM_DEFS_H +#define SYM_DEFS_H + +#define SYM_VERSION "2.2.3" +#define SYM_DRIVER_NAME "sym-" SYM_VERSION + +/* + * SYM53C8XX device features descriptor. + */ +struct sym_chip { + u_short device_id; + u_short revision_id; + char *name; + u_char burst_max; /* log-base-2 of max burst */ + u_char offset_max; + u_char nr_divisor; + u_char lp_probe_bit; + u_int features; +#define FE_LED0 (1<<0) +#define FE_WIDE (1<<1) /* Wide data transfers */ +#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */ +#define FE_ULTRA2 (1<<3) /* Ultra 2 - 40 Mtrans/sec */ +#define FE_DBLR (1<<4) /* Clock doubler present */ +#define FE_QUAD (1<<5) /* Clock quadrupler present */ +#define FE_ERL (1<<6) /* Enable read line */ +#define FE_CLSE (1<<7) /* Cache line size enable */ +#define FE_WRIE (1<<8) /* Write & Invalidate enable */ +#define FE_ERMP (1<<9) /* Enable read multiple */ +#define FE_BOF (1<<10) /* Burst opcode fetch */ +#define FE_DFS (1<<11) /* DMA fifo size */ +#define FE_PFEN (1<<12) /* Prefetch enable */ +#define FE_LDSTR (1<<13) /* Load/Store supported */ +#define FE_RAM (1<<14) /* On chip RAM present */ +#define FE_VARCLK (1<<15) /* Clock frequency may vary */ +#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */ +#define FE_64BIT (1<<17) /* 64-bit PCI BUS interface */ +#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */ +#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */ +#define FE_LEDC (1<<20) /* Hardware control of LED */ +#define FE_ULTRA3 (1<<21) /* Ultra 3 - 80 Mtrans/sec DT */ +#define FE_66MHZ (1<<22) /* 66MHz PCI support */ +#define FE_CRC (1<<23) /* CRC support */ +#define FE_DIFF (1<<24) /* SCSI HVD support */ +#define FE_DFBC (1<<25) /* Have DFBC register */ +#define FE_LCKFRQ (1<<26) /* Have LCKFRQ */ +#define FE_C10 (1<<27) /* Various C10 core (mis)features */ +#define FE_U3EN (1<<28) /* U3EN bit usable */ +#define FE_DAC (1<<29) /* Support PCI DAC (64 bit addressing) */ +#define FE_ISTAT1 (1<<30) /* Have ISTAT1, MBOX0, MBOX1 registers */ + +#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP) +#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL) +}; + +/* + * SYM53C8XX IO register data structure. + */ +struct sym_reg { +/*00*/ u8 nc_scntl0; /* full arb., ena parity, par->ATN */ + +/*01*/ u8 nc_scntl1; /* no reset */ + #define ISCON 0x10 /* connected to scsi */ + #define CRST 0x08 /* force reset */ + #define IARB 0x02 /* immediate arbitration */ + +/*02*/ u8 nc_scntl2; /* no disconnect expected */ + #define SDU 0x80 /* cmd: disconnect will raise error */ + #define CHM 0x40 /* sta: chained mode */ + #define WSS 0x08 /* sta: wide scsi send [W]*/ + #define WSR 0x01 /* sta: wide scsi received [W]*/ + +/*03*/ u8 nc_scntl3; /* cnf system clock dependent */ + #define EWS 0x08 /* cmd: enable wide scsi [W]*/ + #define ULTRA 0x80 /* cmd: ULTRA enable */ + /* bits 0-2, 7 rsvd for C1010 */ + +/*04*/ u8 nc_scid; /* cnf host adapter scsi address */ + #define RRE 0x40 /* r/w:e enable response to resel. */ + #define SRE 0x20 /* r/w:e enable response to select */ + +/*05*/ u8 nc_sxfer; /* ### Sync speed and count */ + /* bits 6-7 rsvd for C1010 */ + +/*06*/ u8 nc_sdid; /* ### Destination-ID */ + +/*07*/ u8 nc_gpreg; /* ??? IO-Pins */ + +/*08*/ u8 nc_sfbr; /* ### First byte received */ + +/*09*/ u8 nc_socl; + #define CREQ 0x80 /* r/w: SCSI-REQ */ + #define CACK 0x40 /* r/w: SCSI-ACK */ + #define CBSY 0x20 /* r/w: SCSI-BSY */ + #define CSEL 0x10 /* r/w: SCSI-SEL */ + #define CATN 0x08 /* r/w: SCSI-ATN */ + #define CMSG 0x04 /* r/w: SCSI-MSG */ + #define CC_D 0x02 /* r/w: SCSI-C_D */ + #define CI_O 0x01 /* r/w: SCSI-I_O */ + +/*0a*/ u8 nc_ssid; + +/*0b*/ u8 nc_sbcl; + +/*0c*/ u8 nc_dstat; + #define DFE 0x80 /* sta: dma fifo empty */ + #define MDPE 0x40 /* int: master data parity error */ + #define BF 0x20 /* int: script: bus fault */ + #define ABRT 0x10 /* int: script: command aborted */ + #define SSI 0x08 /* int: script: single step */ + #define SIR 0x04 /* int: script: interrupt instruct. */ + #define IID 0x01 /* int: script: illegal instruct. */ + +/*0d*/ u8 nc_sstat0; + #define ILF 0x80 /* sta: data in SIDL register lsb */ + #define ORF 0x40 /* sta: data in SODR register lsb */ + #define OLF 0x20 /* sta: data in SODL register lsb */ + #define AIP 0x10 /* sta: arbitration in progress */ + #define LOA 0x08 /* sta: arbitration lost */ + #define WOA 0x04 /* sta: arbitration won */ + #define IRST 0x02 /* sta: scsi reset signal */ + #define SDP 0x01 /* sta: scsi parity signal */ + +/*0e*/ u8 nc_sstat1; + #define FF3210 0xf0 /* sta: bytes in the scsi fifo */ + +/*0f*/ u8 nc_sstat2; + #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/ + #define ORF1 0x40 /* sta: data in SODR register msb[W]*/ + #define OLF1 0x20 /* sta: data in SODL register msb[W]*/ + #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */ + #define LDSC 0x02 /* sta: disconnect & reconnect */ + +/*10*/ u8 nc_dsa; /* --> Base page */ +/*11*/ u8 nc_dsa1; +/*12*/ u8 nc_dsa2; +/*13*/ u8 nc_dsa3; + +/*14*/ u8 nc_istat; /* --> Main Command and status */ + #define CABRT 0x80 /* cmd: abort current operation */ + #define SRST 0x40 /* mod: reset chip */ + #define SIGP 0x20 /* r/w: message from host to script */ + #define SEM 0x10 /* r/w: message between host + script */ + #define CON 0x08 /* sta: connected to scsi */ + #define INTF 0x04 /* sta: int on the fly (reset by wr)*/ + #define SIP 0x02 /* sta: scsi-interrupt */ + #define DIP 0x01 /* sta: host/script interrupt */ + +/*15*/ u8 nc_istat1; /* 896 only */ + #define FLSH 0x04 /* sta: chip is flushing */ + #define SCRUN 0x02 /* sta: scripts are running */ + #define SIRQD 0x01 /* r/w: disable INT pin */ + +/*16*/ u8 nc_mbox0; /* 896 only */ +/*17*/ u8 nc_mbox1; /* 896 only */ + +/*18*/ u8 nc_ctest0; +/*19*/ u8 nc_ctest1; + +/*1a*/ u8 nc_ctest2; + #define CSIGP 0x40 + /* bits 0-2,7 rsvd for C1010 */ + +/*1b*/ u8 nc_ctest3; + #define FLF 0x08 /* cmd: flush dma fifo */ + #define CLF 0x04 /* cmd: clear dma fifo */ + #define FM 0x02 /* mod: fetch pin mode */ + #define WRIE 0x01 /* mod: write and invalidate enable */ + /* bits 4-7 rsvd for C1010 */ + +/*1c*/ u32 nc_temp; /* ### Temporary stack */ + +/*20*/ u8 nc_dfifo; +/*21*/ u8 nc_ctest4; + #define BDIS 0x80 /* mod: burst disable */ + #define MPEE 0x08 /* mod: master parity error enable */ + +/*22*/ u8 nc_ctest5; + #define DFS 0x20 /* mod: dma fifo size */ + /* bits 0-1, 3-7 rsvd for C1010 */ + +/*23*/ u8 nc_ctest6; + +/*24*/ u32 nc_dbc; /* ### Byte count and command */ +/*28*/ u32 nc_dnad; /* ### Next command register */ +/*2c*/ u32 nc_dsp; /* --> Script Pointer */ +/*30*/ u32 nc_dsps; /* --> Script pointer save/opcode#2 */ + +/*34*/ u8 nc_scratcha; /* Temporary register a */ +/*35*/ u8 nc_scratcha1; +/*36*/ u8 nc_scratcha2; +/*37*/ u8 nc_scratcha3; + +/*38*/ u8 nc_dmode; + #define BL_2 0x80 /* mod: burst length shift value +2 */ + #define BL_1 0x40 /* mod: burst length shift value +1 */ + #define ERL 0x08 /* mod: enable read line */ + #define ERMP 0x04 /* mod: enable read multiple */ + #define BOF 0x02 /* mod: burst op code fetch */ + +/*39*/ u8 nc_dien; +/*3a*/ u8 nc_sbr; + +/*3b*/ u8 nc_dcntl; /* --> Script execution control */ + #define CLSE 0x80 /* mod: cache line size enable */ + #define PFF 0x40 /* cmd: pre-fetch flush */ + #define PFEN 0x20 /* mod: pre-fetch enable */ + #define SSM 0x10 /* mod: single step mode */ + #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */ + #define STD 0x04 /* cmd: start dma mode */ + #define IRQD 0x02 /* mod: irq disable */ + #define NOCOM 0x01 /* cmd: protect sfbr while reselect */ + /* bits 0-1 rsvd for C1010 */ + +/*3c*/ u32 nc_adder; + +/*40*/ u16 nc_sien; /* -->: interrupt enable */ +/*42*/ u16 nc_sist; /* <--: interrupt status */ + #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */ + #define STO 0x0400/* sta: timeout (select) */ + #define GEN 0x0200/* sta: timeout (general) */ + #define HTH 0x0100/* sta: timeout (handshake) */ + #define MA 0x80 /* sta: phase mismatch */ + #define CMP 0x40 /* sta: arbitration complete */ + #define SEL 0x20 /* sta: selected by another device */ + #define RSL 0x10 /* sta: reselected by another device*/ + #define SGE 0x08 /* sta: gross error (over/underflow)*/ + #define UDC 0x04 /* sta: unexpected disconnect */ + #define RST 0x02 /* sta: scsi bus reset detected */ + #define PAR 0x01 /* sta: scsi parity error */ + +/*44*/ u8 nc_slpar; +/*45*/ u8 nc_swide; +/*46*/ u8 nc_macntl; +/*47*/ u8 nc_gpcntl; +/*48*/ u8 nc_stime0; /* cmd: timeout for select&handshake*/ +/*49*/ u8 nc_stime1; /* cmd: timeout user defined */ +/*4a*/ u16 nc_respid; /* sta: Reselect-IDs */ + +/*4c*/ u8 nc_stest0; + +/*4d*/ u8 nc_stest1; + #define SCLK 0x80 /* Use the PCI clock as SCSI clock */ + #define DBLEN 0x08 /* clock doubler running */ + #define DBLSEL 0x04 /* clock doubler selected */ + + +/*4e*/ u8 nc_stest2; + #define ROF 0x40 /* reset scsi offset (after gross error!) */ + #define EXT 0x02 /* extended filtering */ + +/*4f*/ u8 nc_stest3; + #define TE 0x80 /* c: tolerAnt enable */ + #define HSC 0x20 /* c: Halt SCSI Clock */ + #define CSF 0x02 /* c: clear scsi fifo */ + +/*50*/ u16 nc_sidl; /* Lowlevel: latched from scsi data */ +/*52*/ u8 nc_stest4; + #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */ + #define SMODE_HVD 0x40 /* High Voltage Differential */ + #define SMODE_SE 0x80 /* Single Ended */ + #define SMODE_LVD 0xc0 /* Low Voltage Differential */ + #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */ + /* bits 0-5 rsvd for C1010 */ + +/*53*/ u8 nc_53_; +/*54*/ u16 nc_sodl; /* Lowlevel: data out to scsi data */ +/*56*/ u8 nc_ccntl0; /* Chip Control 0 (896) */ + #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */ + #define PMJCTL 0x40 /* Phase Mismatch Jump Control */ + #define ENNDJ 0x20 /* Enable Non Data PM Jump */ + #define DISFC 0x10 /* Disable Auto FIFO Clear */ + #define DILS 0x02 /* Disable Internal Load/Store */ + #define DPR 0x01 /* Disable Pipe Req */ + +/*57*/ u8 nc_ccntl1; /* Chip Control 1 (896) */ + #define ZMOD 0x80 /* High Impedance Mode */ + #define DDAC 0x08 /* Disable Dual Address Cycle */ + #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */ + #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */ + #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */ + +/*58*/ u16 nc_sbdl; /* Lowlevel: data from scsi data */ +/*5a*/ u16 nc_5a_; + +/*5c*/ u8 nc_scr0; /* Working register B */ +/*5d*/ u8 nc_scr1; +/*5e*/ u8 nc_scr2; +/*5f*/ u8 nc_scr3; + +/*60*/ u8 nc_scrx[64]; /* Working register C-R */ +/*a0*/ u32 nc_mmrs; /* Memory Move Read Selector */ +/*a4*/ u32 nc_mmws; /* Memory Move Write Selector */ +/*a8*/ u32 nc_sfs; /* Script Fetch Selector */ +/*ac*/ u32 nc_drs; /* DSA Relative Selector */ +/*b0*/ u32 nc_sbms; /* Static Block Move Selector */ +/*b4*/ u32 nc_dbms; /* Dynamic Block Move Selector */ +/*b8*/ u32 nc_dnad64; /* DMA Next Address 64 */ +/*bc*/ u16 nc_scntl4; /* C1010 only */ + #define U3EN 0x80 /* Enable Ultra 3 */ + #define AIPCKEN 0x40 /* AIP checking enable */ + /* Also enable AIP generation on C10-33*/ + #define XCLKH_DT 0x08 /* Extra clock of data hold on DT edge */ + #define XCLKH_ST 0x04 /* Extra clock of data hold on ST edge */ + #define XCLKS_DT 0x02 /* Extra clock of data set on DT edge */ + #define XCLKS_ST 0x01 /* Extra clock of data set on ST edge */ +/*be*/ u8 nc_aipcntl0; /* AIP Control 0 C1010 only */ +/*bf*/ u8 nc_aipcntl1; /* AIP Control 1 C1010 only */ + #define DISAIP 0x08 /* Disable AIP generation C10-66 only */ +/*c0*/ u32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */ +/*c4*/ u32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */ +/*c8*/ u8 nc_rbc; /* Remaining Byte Count */ +/*c9*/ u8 nc_rbc1; +/*ca*/ u8 nc_rbc2; +/*cb*/ u8 nc_rbc3; + +/*cc*/ u8 nc_ua; /* Updated Address */ +/*cd*/ u8 nc_ua1; +/*ce*/ u8 nc_ua2; +/*cf*/ u8 nc_ua3; +/*d0*/ u32 nc_esa; /* Entry Storage Address */ +/*d4*/ u8 nc_ia; /* Instruction Address */ +/*d5*/ u8 nc_ia1; +/*d6*/ u8 nc_ia2; +/*d7*/ u8 nc_ia3; +/*d8*/ u32 nc_sbc; /* SCSI Byte Count (3 bytes only) */ +/*dc*/ u32 nc_csbc; /* Cumulative SCSI Byte Count */ + /* Following for C1010 only */ +/*e0*/ u16 nc_crcpad; /* CRC Value */ +/*e2*/ u8 nc_crccntl0; /* CRC control register */ + #define SNDCRC 0x10 /* Send CRC Request */ +/*e3*/ u8 nc_crccntl1; /* CRC control register */ +/*e4*/ u32 nc_crcdata; /* CRC data register */ +/*e8*/ u32 nc_e8_; +/*ec*/ u32 nc_ec_; +/*f0*/ u16 nc_dfbc; /* DMA FIFO byte count */ +}; + +/*----------------------------------------------------------- + * + * Utility macros for the script. + * + *----------------------------------------------------------- + */ + +#define REGJ(p,r) (offsetof(struct sym_reg, p ## r)) +#define REG(r) REGJ (nc_, r) + +/*----------------------------------------------------------- + * + * SCSI phases + * + *----------------------------------------------------------- + */ + +#define SCR_DATA_OUT 0x00000000 +#define SCR_DATA_IN 0x01000000 +#define SCR_COMMAND 0x02000000 +#define SCR_STATUS 0x03000000 +#define SCR_DT_DATA_OUT 0x04000000 +#define SCR_DT_DATA_IN 0x05000000 +#define SCR_MSG_OUT 0x06000000 +#define SCR_MSG_IN 0x07000000 +/* DT phases are illegal for non Ultra3 mode */ +#define SCR_ILG_OUT 0x04000000 +#define SCR_ILG_IN 0x05000000 + +/*----------------------------------------------------------- + * + * Data transfer via SCSI. + * + *----------------------------------------------------------- + * + * MOVE_ABS (LEN) + * <> + * + * MOVE_IND (LEN) + * <> + * + * MOVE_TBL + * <> + * + *----------------------------------------------------------- + */ + +#define OPC_MOVE 0x08000000 + +#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l)) +/* #define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l)) */ +#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE) + +#define SCR_CHMOV_ABS(l) ((0x00000000) | (l)) +/* #define SCR_CHMOV_IND(l) ((0x20000000) | (l)) */ +#define SCR_CHMOV_TBL (0x10000000) + +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT +/* We steal the `indirect addressing' flag for target mode MOVE in scripts */ + +#define OPC_TCHMOVE 0x08000000 + +#define SCR_TCHMOVE_ABS(l) ((0x20000000 | OPC_TCHMOVE) | (l)) +#define SCR_TCHMOVE_TBL (0x30000000 | OPC_TCHMOVE) + +#define SCR_TMOV_ABS(l) ((0x20000000) | (l)) +#define SCR_TMOV_TBL (0x30000000) +#endif + +struct sym_tblmove { + u32 size; + u32 addr; +}; + +/*----------------------------------------------------------- + * + * Selection + * + *----------------------------------------------------------- + * + * SEL_ABS | SCR_ID (0..15) [ | REL_JMP] + * <> + * + * SEL_TBL | << dnad_offset>> [ | REL_JMP] + * <> + * + *----------------------------------------------------------- + */ + +#define SCR_SEL_ABS 0x40000000 +#define SCR_SEL_ABS_ATN 0x41000000 +#define SCR_SEL_TBL 0x42000000 +#define SCR_SEL_TBL_ATN 0x43000000 + +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT +#define SCR_RESEL_ABS 0x40000000 +#define SCR_RESEL_ABS_ATN 0x41000000 +#define SCR_RESEL_TBL 0x42000000 +#define SCR_RESEL_TBL_ATN 0x43000000 +#endif + +struct sym_tblsel { + u_char sel_scntl4; /* C1010 only */ + u_char sel_sxfer; + u_char sel_id; + u_char sel_scntl3; +}; + +#define SCR_JMP_REL 0x04000000 +#define SCR_ID(id) (((u32)(id)) << 16) + +/*----------------------------------------------------------- + * + * Waiting for Disconnect or Reselect + * + *----------------------------------------------------------- + * + * WAIT_DISC + * dummy: <> + * + * WAIT_RESEL + * <> + * + *----------------------------------------------------------- + */ + +#define SCR_WAIT_DISC 0x48000000 +#define SCR_WAIT_RESEL 0x50000000 + +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT +#define SCR_DISCONNECT 0x48000000 +#endif + +/*----------------------------------------------------------- + * + * Bit Set / Reset + * + *----------------------------------------------------------- + * + * SET (flags {|.. }) + * + * CLR (flags {|.. }) + * + *----------------------------------------------------------- + */ + +#define SCR_SET(f) (0x58000000 | (f)) +#define SCR_CLR(f) (0x60000000 | (f)) + +#define SCR_CARRY 0x00000400 +#define SCR_TRG 0x00000200 +#define SCR_ACK 0x00000040 +#define SCR_ATN 0x00000008 + + +/*----------------------------------------------------------- + * + * Memory to memory move + * + *----------------------------------------------------------- + * + * COPY (bytecount) + * << source_address >> + * << destination_address >> + * + * SCR_COPY sets the NO FLUSH option by default. + * SCR_COPY_F does not set this option. + * + * For chips which do not support this option, + * sym_fw_bind_script() will remove this bit. + * + *----------------------------------------------------------- + */ + +#define SCR_NO_FLUSH 0x01000000 + +#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n)) +#define SCR_COPY_F(n) (0xc0000000 | (n)) + +/*----------------------------------------------------------- + * + * Register move and binary operations + * + *----------------------------------------------------------- + * + * SFBR_REG (reg, op, data) reg = SFBR op data + * << 0 >> + * + * REG_SFBR (reg, op, data) SFBR = reg op data + * << 0 >> + * + * REG_REG (reg, op, data) reg = reg op data + * << 0 >> + * + *----------------------------------------------------------- + * + * On 825A, 875, 895 and 896 chips the content + * of SFBR register can be used as data (SCR_SFBR_DATA). + * The 896 has additionnal IO registers starting at + * offset 0x80. Bit 7 of register offset is stored in + * bit 7 of the SCRIPTS instruction first DWORD. + * + *----------------------------------------------------------- + */ + +#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80)) + +#define SCR_SFBR_REG(reg,op,data) \ + (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul)) + +#define SCR_REG_SFBR(reg,op,data) \ + (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul)) + +#define SCR_REG_REG(reg,op,data) \ + (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul)) + + +#define SCR_LOAD 0x00000000 +#define SCR_SHL 0x01000000 +#define SCR_OR 0x02000000 +#define SCR_XOR 0x03000000 +#define SCR_AND 0x04000000 +#define SCR_SHR 0x05000000 +#define SCR_ADD 0x06000000 +#define SCR_ADDC 0x07000000 + +#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */ + +/*----------------------------------------------------------- + * + * FROM_REG (reg) SFBR = reg + * << 0 >> + * + * TO_REG (reg) reg = SFBR + * << 0 >> + * + * LOAD_REG (reg, data) reg = + * << 0 >> + * + * LOAD_SFBR(data) SFBR = + * << 0 >> + * + *----------------------------------------------------------- + */ + +#define SCR_FROM_REG(reg) \ + SCR_REG_SFBR(reg,SCR_OR,0) + +#define SCR_TO_REG(reg) \ + SCR_SFBR_REG(reg,SCR_OR,0) + +#define SCR_LOAD_REG(reg,data) \ + SCR_REG_REG(reg,SCR_LOAD,data) + +#define SCR_LOAD_SFBR(data) \ + (SCR_REG_SFBR (gpreg, SCR_LOAD, data)) + +/*----------------------------------------------------------- + * + * LOAD from memory to register. + * STORE from register to memory. + * + * Only supported by 810A, 860, 825A, 875, 895 and 896. + * + *----------------------------------------------------------- + * + * LOAD_ABS (LEN) + * <> + * + * LOAD_REL (LEN) (DSA relative) + * <> + * + *----------------------------------------------------------- + */ + +#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul) +#define SCR_NO_FLUSH2 0x02000000 +#define SCR_DSA_REL2 0x10000000 + +#define SCR_LOAD_R(reg, how, n) \ + (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) + +#define SCR_STORE_R(reg, how, n) \ + (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) + +#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n) +#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n) +#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n) +#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n) + +#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n) +#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n) +#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n) +#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n) + + +/*----------------------------------------------------------- + * + * Waiting for Disconnect or Reselect + * + *----------------------------------------------------------- + * + * JUMP [ | IFTRUE/IFFALSE ( ... ) ] + * <
> + * + * JUMPR [ | IFTRUE/IFFALSE ( ... ) ] + * <> + * + * CALL [ | IFTRUE/IFFALSE ( ... ) ] + * <
> + * + * CALLR [ | IFTRUE/IFFALSE ( ... ) ] + * <> + * + * RETURN [ | IFTRUE/IFFALSE ( ... ) ] + * <> + * + * INT [ | IFTRUE/IFFALSE ( ... ) ] + * <> + * + * INT_FLY [ | IFTRUE/IFFALSE ( ... ) ] + * <> + * + * Conditions: + * WHEN (phase) + * IF (phase) + * CARRYSET + * DATA (data, mask) + * + *----------------------------------------------------------- + */ + +#define SCR_NO_OP 0x80000000 +#define SCR_JUMP 0x80080000 +#define SCR_JUMP64 0x80480000 +#define SCR_JUMPR 0x80880000 +#define SCR_CALL 0x88080000 +#define SCR_CALLR 0x88880000 +#define SCR_RETURN 0x90080000 +#define SCR_INT 0x98080000 +#define SCR_INT_FLY 0x98180000 + +#define IFFALSE(arg) (0x00080000 | (arg)) +#define IFTRUE(arg) (0x00000000 | (arg)) + +#define WHEN(phase) (0x00030000 | (phase)) +#define IF(phase) (0x00020000 | (phase)) + +#define DATA(D) (0x00040000 | ((D) & 0xff)) +#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff)) + +#define CARRYSET (0x00200000) + +/*----------------------------------------------------------- + * + * SCSI constants. + * + *----------------------------------------------------------- + */ + +/* + * Messages + */ + +#define M_COMPLETE COMMAND_COMPLETE +#define M_EXTENDED EXTENDED_MESSAGE +#define M_SAVE_DP SAVE_POINTERS +#define M_RESTORE_DP RESTORE_POINTERS +#define M_DISCONNECT DISCONNECT +#define M_ID_ERROR INITIATOR_ERROR +#define M_ABORT ABORT_TASK_SET +#define M_REJECT MESSAGE_REJECT +#define M_NOOP NOP +#define M_PARITY MSG_PARITY_ERROR +#define M_LCOMPLETE LINKED_CMD_COMPLETE +#define M_FCOMPLETE LINKED_FLG_CMD_COMPLETE +#define M_RESET TARGET_RESET +#define M_ABORT_TAG ABORT_TASK +#define M_CLEAR_QUEUE CLEAR_TASK_SET +#define M_INIT_REC INITIATE_RECOVERY +#define M_REL_REC RELEASE_RECOVERY +#define M_TERMINATE (0x11) +#define M_SIMPLE_TAG SIMPLE_QUEUE_TAG +#define M_HEAD_TAG HEAD_OF_QUEUE_TAG +#define M_ORDERED_TAG ORDERED_QUEUE_TAG +#define M_IGN_RESIDUE IGNORE_WIDE_RESIDUE + +#define M_X_MODIFY_DP EXTENDED_MODIFY_DATA_POINTER +#define M_X_SYNC_REQ EXTENDED_SDTR +#define M_X_WIDE_REQ EXTENDED_WDTR +#define M_X_PPR_REQ EXTENDED_PPR + +/* + * PPR protocol options + */ +#define PPR_OPT_IU (0x01) +#define PPR_OPT_DT (0x02) +#define PPR_OPT_QAS (0x04) +#define PPR_OPT_MASK (0x07) + +/* + * Status + */ + +#define S_GOOD SAM_STAT_GOOD +#define S_CHECK_COND SAM_STAT_CHECK_CONDITION +#define S_COND_MET SAM_STAT_CONDITION_MET +#define S_BUSY SAM_STAT_BUSY +#define S_INT SAM_STAT_INTERMEDIATE +#define S_INT_COND_MET SAM_STAT_INTERMEDIATE_CONDITION_MET +#define S_CONFLICT SAM_STAT_RESERVATION_CONFLICT +#define S_TERMINATED SAM_STAT_COMMAND_TERMINATED +#define S_QUEUE_FULL SAM_STAT_TASK_SET_FULL +#define S_ILLEGAL (0xff) + +#endif /* defined SYM_DEFS_H */ diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c new file mode 100644 index 000000000..c536d2a9a --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_fw.c @@ -0,0 +1,537 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#include "sym_glue.h" + +/* + * Macros used for all firmwares. + */ +#define SYM_GEN_A(s, label) ((short) offsetof(s, label)), +#define SYM_GEN_B(s, label) ((short) offsetof(s, label)), +#define SYM_GEN_Z(s, label) ((short) offsetof(s, label)), +#define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) +#define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) + + +#if SYM_CONF_GENERIC_SUPPORT +/* + * Allocate firmware #1 script area. + */ +#define SYM_FWA_SCR sym_fw1a_scr +#define SYM_FWB_SCR sym_fw1b_scr +#define SYM_FWZ_SCR sym_fw1z_scr +#include "sym_fw1.h" +static struct sym_fwa_ofs sym_fw1a_ofs = { + SYM_GEN_FW_A(struct SYM_FWA_SCR) +}; +static struct sym_fwb_ofs sym_fw1b_ofs = { + SYM_GEN_FW_B(struct SYM_FWB_SCR) +}; +static struct sym_fwz_ofs sym_fw1z_ofs = { + SYM_GEN_FW_Z(struct SYM_FWZ_SCR) +}; +#undef SYM_FWA_SCR +#undef SYM_FWB_SCR +#undef SYM_FWZ_SCR +#endif /* SYM_CONF_GENERIC_SUPPORT */ + +/* + * Allocate firmware #2 script area. + */ +#define SYM_FWA_SCR sym_fw2a_scr +#define SYM_FWB_SCR sym_fw2b_scr +#define SYM_FWZ_SCR sym_fw2z_scr +#include "sym_fw2.h" +static struct sym_fwa_ofs sym_fw2a_ofs = { + SYM_GEN_FW_A(struct SYM_FWA_SCR) +}; +static struct sym_fwb_ofs sym_fw2b_ofs = { + SYM_GEN_FW_B(struct SYM_FWB_SCR) + SYM_GEN_B(struct SYM_FWB_SCR, start64) + SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) +}; +static struct sym_fwz_ofs sym_fw2z_ofs = { + SYM_GEN_FW_Z(struct SYM_FWZ_SCR) +}; +#undef SYM_FWA_SCR +#undef SYM_FWB_SCR +#undef SYM_FWZ_SCR + +#undef SYM_GEN_A +#undef SYM_GEN_B +#undef SYM_GEN_Z +#undef PADDR_A +#undef PADDR_B + +#if SYM_CONF_GENERIC_SUPPORT +/* + * Patch routine for firmware #1. + */ +static void +sym_fw1_patch(struct Scsi_Host *shost) +{ + struct sym_hcb *np = sym_get_hcb(shost); + struct sym_fw1a_scr *scripta0; + struct sym_fw1b_scr *scriptb0; + + scripta0 = (struct sym_fw1a_scr *) np->scripta0; + scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; + + /* + * Remove LED support if not needed. + */ + if (!(np->features & FE_LED0)) { + scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); + scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); + scripta0->start[0] = cpu_to_scr(SCR_NO_OP); + } + +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If user does not want to use IMMEDIATE ARBITRATION + * when we are reselected while attempting to arbitrate, + * patch the SCRIPTS accordingly with a SCRIPT NO_OP. + */ + if (!SYM_CONF_SET_IARB_ON_ARB_LOST) + scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); +#endif + /* + * Patch some data in SCRIPTS. + * - start and done queue initial bus address. + * - target bus address table bus address. + */ + scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); + scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); + scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); +} +#endif /* SYM_CONF_GENERIC_SUPPORT */ + +/* + * Patch routine for firmware #2. + */ +static void +sym_fw2_patch(struct Scsi_Host *shost) +{ + struct sym_data *sym_data = shost_priv(shost); + struct pci_dev *pdev = sym_data->pdev; + struct sym_hcb *np = sym_data->ncb; + struct sym_fw2a_scr *scripta0; + struct sym_fw2b_scr *scriptb0; + + scripta0 = (struct sym_fw2a_scr *) np->scripta0; + scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; + + /* + * Remove LED support if not needed. + */ + if (!(np->features & FE_LED0)) { + scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); + scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); + scripta0->start[0] = cpu_to_scr(SCR_NO_OP); + } + +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 + /* + * Remove useless 64 bit DMA specific SCRIPTS, + * when this feature is not available. + */ + if (!use_dac(np)) { + scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP); + scripta0->is_dmap_dirty[1] = 0; + scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP); + scripta0->is_dmap_dirty[3] = 0; + } +#endif + +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If user does not want to use IMMEDIATE ARBITRATION + * when we are reselected while attempting to arbitrate, + * patch the SCRIPTS accordingly with a SCRIPT NO_OP. + */ + if (!SYM_CONF_SET_IARB_ON_ARB_LOST) + scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); +#endif + /* + * Patch some variable in SCRIPTS. + * - start and done queue initial bus address. + * - target bus address table bus address. + */ + scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); + scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); + scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); + + /* + * Remove the load of SCNTL4 on reselection if not a C10. + */ + if (!(np->features & FE_C10)) { + scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); + scripta0->resel_scntl4[1] = cpu_to_scr(0); + } + + /* + * Remove a couple of work-arounds specific to C1010 if + * they are not desirable. See `sym_fw2.h' for more details. + */ + if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_66 && + pdev->revision < 0x1 && + np->pciclk_khz < 60000)) { + scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); + scripta0->datao_phase[1] = cpu_to_scr(0); + } + if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 /* && + pdev->revision < 0xff */)) { + scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); + scripta0->sel_done[1] = cpu_to_scr(0); + } + + /* + * Patch some other variables in SCRIPTS. + * These ones are loaded by the SCRIPTS processor. + */ + scriptb0->pm0_data_addr[0] = + cpu_to_scr(np->scripta_ba + + offsetof(struct sym_fw2a_scr, pm0_data)); + scriptb0->pm1_data_addr[0] = + cpu_to_scr(np->scripta_ba + + offsetof(struct sym_fw2a_scr, pm1_data)); +} + +/* + * Fill the data area in scripts. + * To be done for all firmwares. + */ +static void +sym_fw_fill_data (u32 *in, u32 *out) +{ + int i; + + for (i = 0; i < SYM_CONF_MAX_SG; i++) { + *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; + *in++ = offsetof (struct sym_dsb, data[i]); + *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; + *out++ = offsetof (struct sym_dsb, data[i]); + } +} + +/* + * Setup useful script bus addresses. + * To be done for all firmwares. + */ +static void +sym_fw_setup_bus_addresses(struct sym_hcb *np, struct sym_fw *fw) +{ + u32 *pa; + u_short *po; + int i; + + /* + * Build the bus address table for script A + * from the script A offset table. + */ + po = (u_short *) fw->a_ofs; + pa = (u32 *) &np->fwa_bas; + for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) + pa[i] = np->scripta_ba + po[i]; + + /* + * Same for script B. + */ + po = (u_short *) fw->b_ofs; + pa = (u32 *) &np->fwb_bas; + for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) + pa[i] = np->scriptb_ba + po[i]; + + /* + * Same for script Z. + */ + po = (u_short *) fw->z_ofs; + pa = (u32 *) &np->fwz_bas; + for (i = 0 ; i < sizeof(np->fwz_bas)/sizeof(u32) ; i++) + pa[i] = np->scriptz_ba + po[i]; +} + +#if SYM_CONF_GENERIC_SUPPORT +/* + * Setup routine for firmware #1. + */ +static void +sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw) +{ + struct sym_fw1a_scr *scripta0; + + scripta0 = (struct sym_fw1a_scr *) np->scripta0; + + /* + * Fill variable parts in scripts. + */ + sym_fw_fill_data(scripta0->data_in, scripta0->data_out); + + /* + * Setup bus addresses used from the C code.. + */ + sym_fw_setup_bus_addresses(np, fw); +} +#endif /* SYM_CONF_GENERIC_SUPPORT */ + +/* + * Setup routine for firmware #2. + */ +static void +sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw) +{ + struct sym_fw2a_scr *scripta0; + + scripta0 = (struct sym_fw2a_scr *) np->scripta0; + + /* + * Fill variable parts in scripts. + */ + sym_fw_fill_data(scripta0->data_in, scripta0->data_out); + + /* + * Setup bus addresses used from the C code.. + */ + sym_fw_setup_bus_addresses(np, fw); +} + +/* + * Allocate firmware descriptors. + */ +#if SYM_CONF_GENERIC_SUPPORT +static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); +#endif /* SYM_CONF_GENERIC_SUPPORT */ +static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); + +/* + * Find the most appropriate firmware for a chip. + */ +struct sym_fw * +sym_find_firmware(struct sym_chip *chip) +{ + if (chip->features & FE_LDSTR) + return &sym_fw2; +#if SYM_CONF_GENERIC_SUPPORT + else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC))) + return &sym_fw1; +#endif + else + return NULL; +} + +/* + * Bind a script to physical addresses. + */ +void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len) +{ + u32 opcode, new, old, tmp1, tmp2; + u32 *end, *cur; + int relocs; + + cur = start; + end = start + len/4; + + while (cur < end) { + + opcode = *cur; + + /* + * If we forget to change the length + * in scripts, a field will be + * padded with 0. This is an illegal + * command. + */ + if (opcode == 0) { + printf ("%s: ERROR0 IN SCRIPT at %d.\n", + sym_name(np), (int) (cur-start)); + ++cur; + continue; + } + + /* + * We use the bogus value 0xf00ff00f ;-) + * to reserve data area in SCRIPTS. + */ + if (opcode == SCR_DATA_ZERO) { + *cur++ = 0; + continue; + } + + if (DEBUG_FLAGS & DEBUG_SCRIPT) + printf ("%d: <%x>\n", (int) (cur-start), + (unsigned)opcode); + + /* + * We don't have to decode ALL commands + */ + switch (opcode >> 28) { + case 0xf: + /* + * LOAD / STORE DSA relative, don't relocate. + */ + relocs = 0; + break; + case 0xe: + /* + * LOAD / STORE absolute. + */ + relocs = 1; + break; + case 0xc: + /* + * COPY has TWO arguments. + */ + relocs = 2; + tmp1 = cur[1]; + tmp2 = cur[2]; + if ((tmp1 ^ tmp2) & 3) { + printf ("%s: ERROR1 IN SCRIPT at %d.\n", + sym_name(np), (int) (cur-start)); + } + /* + * If PREFETCH feature not enabled, remove + * the NO FLUSH bit if present. + */ + if ((opcode & SCR_NO_FLUSH) && + !(np->features & FE_PFEN)) { + opcode = (opcode & ~SCR_NO_FLUSH); + } + break; + case 0x0: + /* + * MOVE/CHMOV (absolute address) + */ + if (!(np->features & FE_WIDE)) + opcode = (opcode | OPC_MOVE); + relocs = 1; + break; + case 0x1: + /* + * MOVE/CHMOV (table indirect) + */ + if (!(np->features & FE_WIDE)) + opcode = (opcode | OPC_MOVE); + relocs = 0; + break; +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + case 0x2: + /* + * MOVE/CHMOV in target role (absolute address) + */ + opcode &= ~0x20000000; + if (!(np->features & FE_WIDE)) + opcode = (opcode & ~OPC_TCHMOVE); + relocs = 1; + break; + case 0x3: + /* + * MOVE/CHMOV in target role (table indirect) + */ + opcode &= ~0x20000000; + if (!(np->features & FE_WIDE)) + opcode = (opcode & ~OPC_TCHMOVE); + relocs = 0; + break; +#endif + case 0x8: + /* + * JUMP / CALL + * don't relocate if relative :-) + */ + if (opcode & 0x00800000) + relocs = 0; + else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ + relocs = 2; + else + relocs = 1; + break; + case 0x4: + case 0x5: + case 0x6: + case 0x7: + relocs = 1; + break; + default: + relocs = 0; + break; + } + + /* + * Scriptify:) the opcode. + */ + *cur++ = cpu_to_scr(opcode); + + /* + * If no relocation, assume 1 argument + * and just scriptize:) it. + */ + if (!relocs) { + *cur = cpu_to_scr(*cur); + ++cur; + continue; + } + + /* + * Otherwise performs all needed relocations. + */ + while (relocs--) { + old = *cur; + + switch (old & RELOC_MASK) { + case RELOC_REGISTER: + new = (old & ~RELOC_MASK) + np->mmio_ba; + break; + case RELOC_LABEL_A: + new = (old & ~RELOC_MASK) + np->scripta_ba; + break; + case RELOC_LABEL_B: + new = (old & ~RELOC_MASK) + np->scriptb_ba; + break; + case RELOC_SOFTC: + new = (old & ~RELOC_MASK) + np->hcb_ba; + break; + case 0: + /* + * Don't relocate a 0 address. + * They are mostly used for patched or + * script self-modified areas. + */ + if (old == 0) { + new = old; + break; + } + fallthrough; + default: + new = 0; + panic("sym_fw_bind_script: " + "weird relocation %x\n", old); + break; + } + + *cur++ = cpu_to_scr(new); + } + } +} diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.h b/drivers/scsi/sym53c8xx_2/sym_fw.h new file mode 100644 index 000000000..bbba011e7 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_fw.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#ifndef SYM_FW_H +#define SYM_FW_H +/* + * Macro used to generate interfaces for script A. + */ +#define SYM_GEN_FW_A(s) \ + SYM_GEN_A(s, start) SYM_GEN_A(s, getjob_begin) \ + SYM_GEN_A(s, getjob_end) \ + SYM_GEN_A(s, select) SYM_GEN_A(s, wf_sel_done) \ + SYM_GEN_A(s, send_ident) \ + SYM_GEN_A(s, dispatch) SYM_GEN_A(s, init) \ + SYM_GEN_A(s, clrack) SYM_GEN_A(s, complete_error) \ + SYM_GEN_A(s, done) SYM_GEN_A(s, done_end) \ + SYM_GEN_A(s, idle) SYM_GEN_A(s, ungetjob) \ + SYM_GEN_A(s, reselect) \ + SYM_GEN_A(s, resel_tag) SYM_GEN_A(s, resel_dsa) \ + SYM_GEN_A(s, resel_no_tag) \ + SYM_GEN_A(s, data_in) SYM_GEN_A(s, data_in2) \ + SYM_GEN_A(s, data_out) SYM_GEN_A(s, data_out2) \ + SYM_GEN_A(s, pm0_data) SYM_GEN_A(s, pm1_data) + +/* + * Macro used to generate interfaces for script B. + */ +#define SYM_GEN_FW_B(s) \ + SYM_GEN_B(s, no_data) \ + SYM_GEN_B(s, sel_for_abort) SYM_GEN_B(s, sel_for_abort_1) \ + SYM_GEN_B(s, msg_bad) SYM_GEN_B(s, msg_weird) \ + SYM_GEN_B(s, wdtr_resp) SYM_GEN_B(s, send_wdtr) \ + SYM_GEN_B(s, sdtr_resp) SYM_GEN_B(s, send_sdtr) \ + SYM_GEN_B(s, ppr_resp) SYM_GEN_B(s, send_ppr) \ + SYM_GEN_B(s, nego_bad_phase) \ + SYM_GEN_B(s, ident_break) SYM_GEN_B(s, ident_break_atn) \ + SYM_GEN_B(s, sdata_in) SYM_GEN_B(s, resel_bad_lun) \ + SYM_GEN_B(s, bad_i_t_l) SYM_GEN_B(s, bad_i_t_l_q) \ + SYM_GEN_B(s, wsr_ma_helper) + +/* + * Macro used to generate interfaces for script Z. + */ +#define SYM_GEN_FW_Z(s) \ + SYM_GEN_Z(s, snooptest) SYM_GEN_Z(s, snoopend) + +/* + * Generates structure interface that contains + * offsets within script A, B and Z. + */ +#define SYM_GEN_A(s, label) s label; +#define SYM_GEN_B(s, label) s label; +#define SYM_GEN_Z(s, label) s label; +struct sym_fwa_ofs { + SYM_GEN_FW_A(u_short) +}; +struct sym_fwb_ofs { + SYM_GEN_FW_B(u_short) + SYM_GEN_B(u_short, start64) + SYM_GEN_B(u_short, pm_handle) +}; +struct sym_fwz_ofs { + SYM_GEN_FW_Z(u_short) +}; + +/* + * Generates structure interface that contains + * bus addresses within script A, B and Z. + */ +struct sym_fwa_ba { + SYM_GEN_FW_A(u32) +}; +struct sym_fwb_ba { + SYM_GEN_FW_B(u32) + SYM_GEN_B(u32, start64); + SYM_GEN_B(u32, pm_handle); +}; +struct sym_fwz_ba { + SYM_GEN_FW_Z(u32) +}; +#undef SYM_GEN_A +#undef SYM_GEN_B +#undef SYM_GEN_Z + +/* + * Let cc know about the name of the controller data structure. + * We need this for function prototype declarations just below. + */ +struct sym_hcb; + +/* + * Generic structure that defines a firmware. + */ +struct sym_fw { + char *name; /* Name we want to print out */ + u32 *a_base; /* Pointer to script A template */ + int a_size; /* Size of script A */ + struct sym_fwa_ofs + *a_ofs; /* Useful offsets in script A */ + u32 *b_base; /* Pointer to script B template */ + int b_size; /* Size of script B */ + struct sym_fwb_ofs + *b_ofs; /* Useful offsets in script B */ + u32 *z_base; /* Pointer to script Z template */ + int z_size; /* Size of script Z */ + struct sym_fwz_ofs + *z_ofs; /* Useful offsets in script Z */ + /* Setup and patch methods for this firmware */ + void (*setup)(struct sym_hcb *, struct sym_fw *); + void (*patch)(struct Scsi_Host *); +}; + +/* + * Macro used to declare a firmware. + */ +#define SYM_FW_ENTRY(fw, name) \ +{ \ + name, \ + (u32 *) &fw##a_scr, sizeof(fw##a_scr), &fw##a_ofs, \ + (u32 *) &fw##b_scr, sizeof(fw##b_scr), &fw##b_ofs, \ + (u32 *) &fw##z_scr, sizeof(fw##z_scr), &fw##z_ofs, \ + fw##_setup, fw##_patch \ +} + +/* + * Macros used from the C code to get useful + * SCRIPTS bus addresses. + */ +#define SCRIPTA_BA(np, label) (np->fwa_bas.label) +#define SCRIPTB_BA(np, label) (np->fwb_bas.label) +#define SCRIPTZ_BA(np, label) (np->fwz_bas.label) + +/* + * Macros used by scripts definitions. + * + * HADDR_1 generates a reference to a field of the controller data. + * HADDR_2 generates a reference to a field of the controller data + * with offset. + * RADDR_1 generates a reference to a script processor register. + * RADDR_2 generates a reference to a script processor register + * with offset. + * PADDR_A generates a reference to another part of script A. + * PADDR_B generates a reference to another part of script B. + * + * SYM_GEN_PADDR_A and SYM_GEN_PADDR_B are used to define respectively + * the PADDR_A and PADDR_B macros for each firmware by setting argument + * `s' to the name of the corresponding structure. + * + * SCR_DATA_ZERO is used to allocate a DWORD of data in scripts areas. + */ + +#define RELOC_SOFTC 0x40000000 +#define RELOC_LABEL_A 0x50000000 +#define RELOC_REGISTER 0x60000000 +#define RELOC_LABEL_B 0x80000000 +#define RELOC_MASK 0xf0000000 + +#define HADDR_1(label) (RELOC_SOFTC | offsetof(struct sym_hcb, label)) +#define HADDR_2(label,ofs) (RELOC_SOFTC | \ + (offsetof(struct sym_hcb, label)+(ofs))) +#define RADDR_1(label) (RELOC_REGISTER | REG(label)) +#define RADDR_2(label,ofs) (RELOC_REGISTER | ((REG(label))+(ofs))) + +#define SYM_GEN_PADDR_A(s, label) (RELOC_LABEL_A | offsetof(s, label)) +#define SYM_GEN_PADDR_B(s, label) (RELOC_LABEL_B | offsetof(s, label)) + +#define SCR_DATA_ZERO 0xf00ff00f + +#endif /* SYM_FW_H */ diff --git a/drivers/scsi/sym53c8xx_2/sym_fw1.h b/drivers/scsi/sym53c8xx_2/sym_fw1.h new file mode 100644 index 000000000..d98ec67f0 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_fw1.h @@ -0,0 +1,1777 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +/* + * Scripts for SYMBIOS-Processor + * + * We have to know the offsets of all labels before we reach + * them (for forward jumps). Therefore we declare a struct + * here. If you make changes inside the script, + * + * DONT FORGET TO CHANGE THE LENGTHS HERE! + */ + +/* + * Script fragments which are loaded into the on-chip RAM + * of 825A, 875, 876, 895, 895A, 896 and 1010 chips. + * Must not exceed 4K bytes. + */ +struct SYM_FWA_SCR { + u32 start [ 11]; + u32 getjob_begin [ 4]; + u32 _sms_a10 [ 5]; + u32 getjob_end [ 4]; + u32 _sms_a20 [ 4]; +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + u32 select [ 8]; +#else + u32 select [ 6]; +#endif + u32 _sms_a30 [ 5]; + u32 wf_sel_done [ 2]; + u32 send_ident [ 2]; +#ifdef SYM_CONF_IARB_SUPPORT + u32 select2 [ 8]; +#else + u32 select2 [ 2]; +#endif + u32 command [ 2]; + u32 dispatch [ 28]; + u32 sel_no_cmd [ 10]; + u32 init [ 6]; + u32 clrack [ 4]; + u32 datai_done [ 11]; + u32 datai_done_wsr [ 20]; + u32 datao_done [ 11]; + u32 datao_done_wss [ 6]; + u32 datai_phase [ 5]; + u32 datao_phase [ 5]; + u32 msg_in [ 2]; + u32 msg_in2 [ 10]; +#ifdef SYM_CONF_IARB_SUPPORT + u32 status [ 14]; +#else + u32 status [ 10]; +#endif + u32 complete [ 6]; + u32 complete2 [ 8]; + u32 _sms_a40 [ 12]; + u32 done [ 5]; + u32 _sms_a50 [ 5]; + u32 _sms_a60 [ 2]; + u32 done_end [ 4]; + u32 complete_error [ 5]; + u32 save_dp [ 11]; + u32 restore_dp [ 7]; + u32 disconnect [ 11]; + u32 disconnect2 [ 5]; + u32 _sms_a65 [ 3]; +#ifdef SYM_CONF_IARB_SUPPORT + u32 idle [ 4]; +#else + u32 idle [ 2]; +#endif +#ifdef SYM_CONF_IARB_SUPPORT + u32 ungetjob [ 7]; +#else + u32 ungetjob [ 5]; +#endif +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + u32 reselect [ 4]; +#else + u32 reselect [ 2]; +#endif + u32 reselected [ 19]; + u32 _sms_a70 [ 6]; + u32 _sms_a80 [ 4]; + u32 reselected1 [ 25]; + u32 _sms_a90 [ 4]; + u32 resel_lun0 [ 7]; + u32 _sms_a100 [ 4]; + u32 resel_tag [ 8]; +#if SYM_CONF_MAX_TASK*4 > 512 + u32 _sms_a110 [ 23]; +#elif SYM_CONF_MAX_TASK*4 > 256 + u32 _sms_a110 [ 17]; +#else + u32 _sms_a110 [ 13]; +#endif + u32 _sms_a120 [ 2]; + u32 resel_go [ 4]; + u32 _sms_a130 [ 7]; + u32 resel_dsa [ 2]; + u32 resel_dsa1 [ 4]; + u32 _sms_a140 [ 7]; + u32 resel_no_tag [ 4]; + u32 _sms_a145 [ 7]; + u32 data_in [SYM_CONF_MAX_SG * 2]; + u32 data_in2 [ 4]; + u32 data_out [SYM_CONF_MAX_SG * 2]; + u32 data_out2 [ 4]; + u32 pm0_data [ 12]; + u32 pm0_data_out [ 6]; + u32 pm0_data_end [ 7]; + u32 pm_data_end [ 4]; + u32 _sms_a150 [ 4]; + u32 pm1_data [ 12]; + u32 pm1_data_out [ 6]; + u32 pm1_data_end [ 9]; +}; + +/* + * Script fragments which stay in main memory for all chips + * except for chips that support 8K on-chip RAM. + */ +struct SYM_FWB_SCR { + u32 no_data [ 2]; +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + u32 sel_for_abort [ 18]; +#else + u32 sel_for_abort [ 16]; +#endif + u32 sel_for_abort_1 [ 2]; + u32 msg_in_etc [ 12]; + u32 msg_received [ 5]; + u32 msg_weird_seen [ 5]; + u32 msg_extended [ 17]; + u32 _sms_b10 [ 4]; + u32 msg_bad [ 6]; + u32 msg_weird [ 4]; + u32 msg_weird1 [ 8]; + u32 wdtr_resp [ 6]; + u32 send_wdtr [ 4]; + u32 sdtr_resp [ 6]; + u32 send_sdtr [ 4]; + u32 ppr_resp [ 6]; + u32 send_ppr [ 4]; + u32 nego_bad_phase [ 4]; + u32 msg_out [ 4]; + u32 msg_out_done [ 4]; + u32 data_ovrun [ 3]; + u32 data_ovrun1 [ 22]; + u32 data_ovrun2 [ 8]; + u32 abort_resel [ 16]; + u32 resend_ident [ 4]; + u32 ident_break [ 4]; + u32 ident_break_atn [ 4]; + u32 sdata_in [ 6]; + u32 resel_bad_lun [ 4]; + u32 bad_i_t_l [ 4]; + u32 bad_i_t_l_q [ 4]; + u32 bad_status [ 7]; + u32 wsr_ma_helper [ 4]; + + /* Data area */ + u32 zero [ 1]; + u32 scratch [ 1]; + u32 scratch1 [ 1]; + u32 prev_done [ 1]; + u32 done_pos [ 1]; + u32 nextjob [ 1]; + u32 startpos [ 1]; + u32 targtbl [ 1]; +}; + +/* + * Script fragments used at initialisations. + * Only runs out of main memory. + */ +struct SYM_FWZ_SCR { + u32 snooptest [ 9]; + u32 snoopend [ 2]; +}; + +static struct SYM_FWA_SCR SYM_FWA_SCR = { +/*--------------------------< START >----------------------------*/ { + /* + * Switch the LED on. + * Will be patched with a NO_OP if LED + * not needed or not desired. + */ + SCR_REG_REG (gpreg, SCR_AND, 0xfe), + 0, + /* + * Clear SIGP. + */ + SCR_FROM_REG (ctest2), + 0, + /* + * Stop here if the C code wants to perform + * some error recovery procedure manually. + * (Indicate this by setting SEM in ISTAT) + */ + SCR_FROM_REG (istat), + 0, + /* + * Report to the C code the next position in + * the start queue the SCRIPTS will schedule. + * The C code must not change SCRATCHA. + */ + SCR_COPY (4), + PADDR_B (startpos), + RADDR_1 (scratcha), + SCR_INT ^ IFTRUE (MASK (SEM, SEM)), + SIR_SCRIPT_STOPPED, + /* + * Start the next job. + * + * @DSA = start point for this job. + * SCRATCHA = address of this job in the start queue. + * + * We will restore startpos with SCRATCHA if we fails the + * arbitration or if it is the idle job. + * + * The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS + * is a critical path. If it is partially executed, it then + * may happen that the job address is not yet in the DSA + * and the next queue position points to the next JOB. + */ +}/*-------------------------< GETJOB_BEGIN >---------------------*/,{ + /* + * Copy to a fixed location both the next STARTPOS + * and the current JOB address, using self modifying + * SCRIPTS. + */ + SCR_COPY (4), + RADDR_1 (scratcha), + PADDR_A (_sms_a10), + SCR_COPY (8), +}/*-------------------------< _SMS_A10 >-------------------------*/,{ + 0, + PADDR_B (nextjob), + /* + * Move the start address to TEMP using self- + * modifying SCRIPTS and jump indirectly to + * that address. + */ + SCR_COPY (4), + PADDR_B (nextjob), + RADDR_1 (dsa), +}/*-------------------------< GETJOB_END >-----------------------*/,{ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a20), + SCR_COPY (4), +}/*-------------------------< _SMS_A20 >-------------------------*/,{ + 0, + RADDR_1 (temp), + SCR_RETURN, + 0, +}/*-------------------------< SELECT >---------------------------*/,{ + /* + * DSA contains the address of a scheduled + * data structure. + * + * SCRATCHA contains the address of the start queue + * entry which points to the next job. + * + * Set Initiator mode. + * + * (Target mode is left as an exercise for the reader) + */ +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + SCR_CLR (SCR_TRG), + 0, +#endif + /* + * And try to select this target. + */ + SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select), + PADDR_A (ungetjob), + /* + * Now there are 4 possibilities: + * + * (1) The chip loses arbitration. + * This is ok, because it will try again, + * when the bus becomes idle. + * (But beware of the timeout function!) + * + * (2) The chip is reselected. + * Then the script processor takes the jump + * to the RESELECT label. + * + * (3) The chip wins arbitration. + * Then it will execute SCRIPTS instruction until + * the next instruction that checks SCSI phase. + * Then will stop and wait for selection to be + * complete or selection time-out to occur. + * + * After having won arbitration, the SCRIPTS + * processor is able to execute instructions while + * the SCSI core is performing SCSI selection. + */ + + /* + * Copy the CCB header to a fixed location + * in the HCB using self-modifying SCRIPTS. + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a30), + SCR_COPY (sizeof(struct sym_ccbh)), +}/*-------------------------< _SMS_A30 >-------------------------*/,{ + 0, + HADDR_1 (ccb_head), + /* + * Initialize the status register + */ + SCR_COPY (4), + HADDR_1 (ccb_head.status), + RADDR_1 (scr0), +}/*-------------------------< WF_SEL_DONE >----------------------*/,{ + SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), + SIR_SEL_ATN_NO_MSG_OUT, +}/*-------------------------< SEND_IDENT >-----------------------*/,{ + /* + * Selection complete. + * Send the IDENTIFY and possibly the TAG message + * and negotiation message if present. + */ + SCR_MOVE_TBL ^ SCR_MSG_OUT, + offsetof (struct sym_dsb, smsg), +}/*-------------------------< SELECT2 >--------------------------*/,{ +#ifdef SYM_CONF_IARB_SUPPORT + /* + * Set IMMEDIATE ARBITRATION if we have been given + * a hint to do so. (Some job to do after this one). + */ + SCR_FROM_REG (HF_REG), + 0, + SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)), + 8, + SCR_REG_REG (scntl1, SCR_OR, IARB), + 0, +#endif + /* + * Anticipate the COMMAND phase. + * This is the PHASE we expect at this point. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)), + PADDR_A (sel_no_cmd), +}/*-------------------------< COMMAND >--------------------------*/,{ + /* + * ... and send the command + */ + SCR_MOVE_TBL ^ SCR_COMMAND, + offsetof (struct sym_dsb, cmd), +}/*-------------------------< DISPATCH >-------------------------*/,{ + /* + * MSG_IN is the only phase that shall be + * entered at least once for each (re)selection. + * So we test it first. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (msg_in), + SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)), + PADDR_A (datao_phase), + SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)), + PADDR_A (datai_phase), + SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), + PADDR_A (status), + SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), + PADDR_A (command), + SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), + PADDR_B (msg_out), + /* + * Discard as many illegal phases as + * required and tell the C code about. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)), + 16, + SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, + HADDR_1 (scratch), + SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)), + -16, + SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)), + 16, + SCR_MOVE_ABS (1) ^ SCR_ILG_IN, + HADDR_1 (scratch), + SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)), + -16, + SCR_INT, + SIR_BAD_PHASE, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< SEL_NO_CMD >-----------------------*/,{ + /* + * The target does not switch to command + * phase after IDENTIFY has been sent. + * + * If it stays in MSG OUT phase send it + * the IDENTIFY again. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), + PADDR_B (resend_ident), + /* + * If target does not switch to MSG IN phase + * and we sent a negotiation, assert the + * failure immediately. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (dispatch), + SCR_FROM_REG (HS_REG), + 0, + SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), + SIR_NEGO_FAILED, + /* + * Jump to dispatcher. + */ + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< INIT >-----------------------------*/,{ + /* + * Wait for the SCSI RESET signal to be + * inactive before restarting operations, + * since the chip may hang on SEL_ATN + * if SCSI RESET is active. + */ + SCR_FROM_REG (sstat0), + 0, + SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)), + -16, + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< CLRACK >---------------------------*/,{ + /* + * Terminate possible pending message phase. + */ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAI_DONE >-----------------------*/,{ + /* + * Save current pointer to LASTP. + */ + SCR_COPY (4), + RADDR_1 (temp), + HADDR_1 (ccb_head.lastp), + /* + * If the SWIDE is not full, jump to dispatcher. + * We anticipate a STATUS phase. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)), + PADDR_A (datai_done_wsr), + SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)), + PADDR_A (status), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{ + /* + * The SWIDE is full. + * Clear this condition. + */ + SCR_REG_REG (scntl2, SCR_OR, WSR), + 0, + /* + * We are expecting an IGNORE RESIDUE message + * from the device, otherwise we are in data + * overrun condition. Check against MSG_IN phase. + */ + SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), + SIR_SWIDE_OVERRUN, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR_A (dispatch), + /* + * We are in MSG_IN phase, + * Read the first byte of the message. + * If it is not an IGNORE RESIDUE message, + * signal overrun and jump to message + * processing. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[0]), + SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)), + SIR_SWIDE_OVERRUN, + SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)), + PADDR_A (msg_in2), + /* + * We got the message we expected. + * Read the 2nd byte, and jump to dispatcher. + */ + SCR_CLR (SCR_ACK), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[1]), + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAO_DONE >-----------------------*/,{ + /* + * Save current pointer to LASTP. + */ + SCR_COPY (4), + RADDR_1 (temp), + HADDR_1 (ccb_head.lastp), + /* + * If the SODL is not full jump to dispatcher. + * We anticipate a STATUS phase. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)), + PADDR_A (datao_done_wss), + SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)), + PADDR_A (status), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{ + /* + * The SODL is full, clear this condition. + */ + SCR_REG_REG (scntl2, SCR_OR, WSS), + 0, + /* + * And signal a DATA UNDERRUN condition + * to the C code. + */ + SCR_INT, + SIR_SODL_UNDERRUN, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAI_PHASE >----------------------*/,{ + /* + * Jump to current pointer. + */ + SCR_COPY (4), + HADDR_1 (ccb_head.lastp), + RADDR_1 (temp), + SCR_RETURN, + 0, +}/*-------------------------< DATAO_PHASE >----------------------*/,{ + /* + * Jump to current pointer. + */ + SCR_COPY (4), + HADDR_1 (ccb_head.lastp), + RADDR_1 (temp), + SCR_RETURN, + 0, +}/*-------------------------< MSG_IN >---------------------------*/,{ + /* + * Get the first byte of the message. + * + * The script processor doesn't negate the + * ACK signal after this transfer. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[0]), +}/*-------------------------< MSG_IN2 >--------------------------*/,{ + /* + * Check first against 1 byte messages + * that we handle from SCRIPTS. + */ + SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)), + PADDR_A (complete), + SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)), + PADDR_A (disconnect), + SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)), + PADDR_A (save_dp), + SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)), + PADDR_A (restore_dp), + /* + * We handle all other messages from the + * C code, so no need to waste on-chip RAM + * for those ones. + */ + SCR_JUMP, + PADDR_B (msg_in_etc), +}/*-------------------------< STATUS >---------------------------*/,{ + /* + * get the status + */ + SCR_MOVE_ABS (1) ^ SCR_STATUS, + HADDR_1 (scratch), +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If STATUS is not GOOD, clear IMMEDIATE ARBITRATION, + * since we may have to tamper the start queue from + * the C code. + */ + SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)), + 8, + SCR_REG_REG (scntl1, SCR_AND, ~IARB), + 0, +#endif + /* + * save status to scsi_status. + * mark as complete. + */ + SCR_TO_REG (SS_REG), + 0, + SCR_LOAD_REG (HS_REG, HS_COMPLETE), + 0, + /* + * Anticipate the MESSAGE PHASE for + * the TASK COMPLETE message. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (msg_in), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< COMPLETE >-------------------------*/,{ + /* + * Complete message. + * + * When we terminate the cycle by clearing ACK, + * the target may disconnect immediately. + * + * We don't want to be told of an "unexpected disconnect", + * so we disable this feature. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + /* + * Terminate cycle ... + */ + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + /* + * ... and wait for the disconnect. + */ + SCR_WAIT_DISC, + 0, +}/*-------------------------< COMPLETE2 >------------------------*/,{ + /* + * Save host status. + */ + SCR_COPY (4), + RADDR_1 (scr0), + HADDR_1 (ccb_head.status), + /* + * Move back the CCB header using self-modifying + * SCRIPTS. + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a40), + SCR_COPY (sizeof(struct sym_ccbh)), + HADDR_1 (ccb_head), +}/*-------------------------< _SMS_A40 >-------------------------*/,{ + 0, + /* + * Some bridges may reorder DMA writes to memory. + * We donnot want the CPU to deal with completions + * without all the posted write having been flushed + * to memory. This DUMMY READ should flush posted + * buffers prior to the CPU having to deal with + * completions. + */ + SCR_COPY (4), /* DUMMY READ */ + HADDR_1 (ccb_head.status), + RADDR_1 (scr0), + /* + * If command resulted in not GOOD status, + * call the C code if needed. + */ + SCR_FROM_REG (SS_REG), + 0, + SCR_CALL ^ IFFALSE (DATA (S_GOOD)), + PADDR_B (bad_status), + /* + * If we performed an auto-sense, call + * the C code to synchronyze task aborts + * with UNIT ATTENTION conditions. + */ + SCR_FROM_REG (HF_REG), + 0, + SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))), + PADDR_A (complete_error), +}/*-------------------------< DONE >-----------------------------*/,{ + /* + * Copy the DSA to the DONE QUEUE and + * signal completion to the host. + * If we are interrupted between DONE + * and DONE_END, we must reset, otherwise + * the completed CCB may be lost. + */ + SCR_COPY (4), + PADDR_B (done_pos), + PADDR_A (_sms_a50), + SCR_COPY (4), + RADDR_1 (dsa), +}/*-------------------------< _SMS_A50 >-------------------------*/,{ + 0, + SCR_COPY (4), + PADDR_B (done_pos), + PADDR_A (_sms_a60), + /* + * The instruction below reads the DONE QUEUE next + * free position from memory. + * In addition it ensures that all PCI posted writes + * are flushed and so the DSA value of the done + * CCB is visible by the CPU before INTFLY is raised. + */ + SCR_COPY (8), +}/*-------------------------< _SMS_A60 >-------------------------*/,{ + 0, + PADDR_B (prev_done), +}/*-------------------------< DONE_END >-------------------------*/,{ + SCR_INT_FLY, + 0, + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< COMPLETE_ERROR >-------------------*/,{ + SCR_COPY (4), + PADDR_B (startpos), + RADDR_1 (scratcha), + SCR_INT, + SIR_COMPLETE_ERROR, +}/*-------------------------< SAVE_DP >--------------------------*/,{ + /* + * Clear ACK immediately. + * No need to delay it. + */ + SCR_CLR (SCR_ACK), + 0, + /* + * Keep track we received a SAVE DP, so + * we will switch to the other PM context + * on the next PM since the DP may point + * to the current PM context. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED), + 0, + /* + * SAVE_DP message: + * Copy LASTP to SAVEP. + */ + SCR_COPY (4), + HADDR_1 (ccb_head.lastp), + HADDR_1 (ccb_head.savep), + /* + * Anticipate the MESSAGE PHASE for + * the DISCONNECT message. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (msg_in), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< RESTORE_DP >-----------------------*/,{ + /* + * Clear ACK immediately. + * No need to delay it. + */ + SCR_CLR (SCR_ACK), + 0, + /* + * Copy SAVEP to LASTP. + */ + SCR_COPY (4), + HADDR_1 (ccb_head.savep), + HADDR_1 (ccb_head.lastp), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DISCONNECT >-----------------------*/,{ + /* + * DISCONNECTing ... + * + * disable the "unexpected disconnect" feature, + * and remove the ACK signal. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + /* + * Wait for the disconnect. + */ + SCR_WAIT_DISC, + 0, + /* + * Status is: DISCONNECTED. + */ + SCR_LOAD_REG (HS_REG, HS_DISCONNECT), + 0, + /* + * Save host status. + */ + SCR_COPY (4), + RADDR_1 (scr0), + HADDR_1 (ccb_head.status), +}/*-------------------------< DISCONNECT2 >----------------------*/,{ + /* + * Move back the CCB header using self-modifying + * SCRIPTS. + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a65), + SCR_COPY (sizeof(struct sym_ccbh)), + HADDR_1 (ccb_head), +}/*-------------------------< _SMS_A65 >-------------------------*/,{ + 0, + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< IDLE >-----------------------------*/,{ + /* + * Nothing to do? + * Switch the LED off and wait for reselect. + * Will be patched with a NO_OP if LED + * not needed or not desired. + */ + SCR_REG_REG (gpreg, SCR_OR, 0x01), + 0, +#ifdef SYM_CONF_IARB_SUPPORT + SCR_JUMPR, + 8, +#endif +}/*-------------------------< UNGETJOB >-------------------------*/,{ +#ifdef SYM_CONF_IARB_SUPPORT + /* + * Set IMMEDIATE ARBITRATION, for the next time. + * This will give us better chance to win arbitration + * for the job we just wanted to do. + */ + SCR_REG_REG (scntl1, SCR_OR, IARB), + 0, +#endif + /* + * We are not able to restart the SCRIPTS if we are + * interrupted and these instruction haven't been + * all executed. BTW, this is very unlikely to + * happen, but we check that from the C code. + */ + SCR_LOAD_REG (dsa, 0xff), + 0, + SCR_COPY (4), + RADDR_1 (scratcha), + PADDR_B (startpos), +}/*-------------------------< RESELECT >-------------------------*/,{ +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + /* + * Make sure we are in initiator mode. + */ + SCR_CLR (SCR_TRG), + 0, +#endif + /* + * Sleep waiting for a reselection. + */ + SCR_WAIT_RESEL, + PADDR_A(start), +}/*-------------------------< RESELECTED >-----------------------*/,{ + /* + * Switch the LED on. + * Will be patched with a NO_OP if LED + * not needed or not desired. + */ + SCR_REG_REG (gpreg, SCR_AND, 0xfe), + 0, + /* + * load the target id into the sdid + */ + SCR_REG_SFBR (ssid, SCR_AND, 0x8F), + 0, + SCR_TO_REG (sdid), + 0, + /* + * Load the target control block address + */ + SCR_COPY (4), + PADDR_B (targtbl), + RADDR_1 (dsa), + SCR_SFBR_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_AND, 0x3c), + 0, + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a70), + SCR_COPY (4), +}/*-------------------------< _SMS_A70 >-------------------------*/,{ + 0, + RADDR_1 (dsa), + /* + * Copy the TCB header to a fixed place in + * the HCB. + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a80), + SCR_COPY (sizeof(struct sym_tcbh)), +}/*-------------------------< _SMS_A80 >-------------------------*/,{ + 0, + HADDR_1 (tcb_head), + /* + * We expect MESSAGE IN phase. + * If not, get help from the C code. + */ + SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), + SIR_RESEL_NO_MSG_IN, +}/*-------------------------< RESELECTED1 >----------------------*/,{ + /* + * Load the synchronous transfer registers. + */ + SCR_COPY (1), + HADDR_1 (tcb_head.wval), + RADDR_1 (scntl3), + SCR_COPY (1), + HADDR_1 (tcb_head.sval), + RADDR_1 (sxfer), + /* + * Get the IDENTIFY message. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin), + /* + * If IDENTIFY LUN #0, use a faster path + * to find the LCB structure. + */ + SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)), + PADDR_A (resel_lun0), + /* + * If message isn't an IDENTIFY, + * tell the C code about. + */ + SCR_INT ^ IFFALSE (MASK (0x80, 0x80)), + SIR_RESEL_NO_IDENTIFY, + /* + * It is an IDENTIFY message, + * Load the LUN control block address. + */ + SCR_COPY (4), + HADDR_1 (tcb_head.luntbl_sa), + RADDR_1 (dsa), + SCR_SFBR_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_AND, 0xfc), + 0, + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a90), + SCR_COPY (4), +}/*-------------------------< _SMS_A90 >-------------------------*/,{ + 0, + RADDR_1 (dsa), + SCR_JUMPR, + 12, +}/*-------------------------< RESEL_LUN0 >-----------------------*/,{ + /* + * LUN 0 special case (but usual one :)) + */ + SCR_COPY (4), + HADDR_1 (tcb_head.lun0_sa), + RADDR_1 (dsa), + /* + * Jump indirectly to the reselect action for this LUN. + * (lcb.head.resel_sa assumed at offset zero of lcb). + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a100), + SCR_COPY (4), +}/*-------------------------< _SMS_A100 >------------------------*/,{ + 0, + RADDR_1 (temp), + SCR_RETURN, + 0, + /* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */ +}/*-------------------------< RESEL_TAG >------------------------*/,{ + /* + * ACK the IDENTIFY previously received. + */ + SCR_CLR (SCR_ACK), + 0, + /* + * It shall be a tagged command. + * Read SIMPLE+TAG. + * The C code will deal with errors. + * Aggressive optimization, isn't it? :) + */ + SCR_MOVE_ABS (2) ^ SCR_MSG_IN, + HADDR_1 (msgin), + /* + * Copy the LCB header to a fixed place in + * the HCB using self-modifying SCRIPTS. + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a110), + SCR_COPY (sizeof(struct sym_lcbh)), +}/*-------------------------< _SMS_A110 >------------------------*/,{ + 0, + HADDR_1 (lcb_head), + /* + * Load the pointer to the tagged task + * table for this LUN. + */ + SCR_COPY (4), + HADDR_1 (lcb_head.itlq_tbl_sa), + RADDR_1 (dsa), + /* + * The SIDL still contains the TAG value. + * Aggressive optimization, isn't it? :):) + */ + SCR_REG_SFBR (sidl, SCR_SHL, 0), + 0, +#if SYM_CONF_MAX_TASK*4 > 512 + SCR_JUMPR ^ IFFALSE (CARRYSET), + 8, + SCR_REG_REG (dsa1, SCR_OR, 2), + 0, + SCR_REG_REG (sfbr, SCR_SHL, 0), + 0, + SCR_JUMPR ^ IFFALSE (CARRYSET), + 8, + SCR_REG_REG (dsa1, SCR_OR, 1), + 0, +#elif SYM_CONF_MAX_TASK*4 > 256 + SCR_JUMPR ^ IFFALSE (CARRYSET), + 8, + SCR_REG_REG (dsa1, SCR_OR, 1), + 0, +#endif + /* + * Retrieve the DSA of this task. + * JUMP indirectly to the restart point of the CCB. + */ + SCR_SFBR_REG (dsa, SCR_AND, 0xfc), + 0, + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a120), + SCR_COPY (4), +}/*-------------------------< _SMS_A120 >------------------------*/,{ + 0, + RADDR_1 (dsa), +}/*-------------------------< RESEL_GO >-------------------------*/,{ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a130), + /* + * Move 'ccb.phys.head.go' action to + * scratch/scratch1. So scratch1 will + * contain the 'restart' field of the + * 'go' structure. + */ + SCR_COPY (8), +}/*-------------------------< _SMS_A130 >------------------------*/,{ + 0, + PADDR_B (scratch), + SCR_COPY (4), + PADDR_B (scratch1), /* phys.head.go.restart */ + RADDR_1 (temp), + SCR_RETURN, + 0, + /* In normal situations we branch to RESEL_DSA */ +}/*-------------------------< RESEL_DSA >------------------------*/,{ + /* + * ACK the IDENTIFY or TAG previously received. + */ + SCR_CLR (SCR_ACK), + 0, +}/*-------------------------< RESEL_DSA1 >-----------------------*/,{ + /* + * Copy the CCB header to a fixed location + * in the HCB using self-modifying SCRIPTS. + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a140), + SCR_COPY (sizeof(struct sym_ccbh)), +}/*-------------------------< _SMS_A140 >------------------------*/,{ + 0, + HADDR_1 (ccb_head), + /* + * Initialize the status register + */ + SCR_COPY (4), + HADDR_1 (ccb_head.status), + RADDR_1 (scr0), + /* + * Jump to dispatcher. + */ + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< RESEL_NO_TAG >---------------------*/,{ + /* + * Copy the LCB header to a fixed place in + * the HCB using self-modifying SCRIPTS. + */ + SCR_COPY (4), + RADDR_1 (dsa), + PADDR_A (_sms_a145), + SCR_COPY (sizeof(struct sym_lcbh)), +}/*-------------------------< _SMS_A145 >------------------------*/,{ + 0, + HADDR_1 (lcb_head), + /* + * Load the DSA with the unique ITL task. + */ + SCR_COPY (4), + HADDR_1 (lcb_head.itl_task_sa), + RADDR_1 (dsa), + SCR_JUMP, + PADDR_A (resel_go), +}/*-------------------------< DATA_IN >--------------------------*/,{ +/* + * Because the size depends on the + * #define SYM_CONF_MAX_SG parameter, + * it is filled in at runtime. + * + * ##===========< i=0; i========= + * || SCR_CHMOV_TBL ^ SCR_DATA_IN, + * || offsetof (struct sym_dsb, data[ i]), + * ##========================================== + */ +0 +}/*-------------------------< DATA_IN2 >-------------------------*/,{ + SCR_CALL, + PADDR_A (datai_done), + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< DATA_OUT >-------------------------*/,{ +/* + * Because the size depends on the + * #define SYM_CONF_MAX_SG parameter, + * it is filled in at runtime. + * + * ##===========< i=0; i========= + * || SCR_CHMOV_TBL ^ SCR_DATA_OUT, + * || offsetof (struct sym_dsb, data[ i]), + * ##========================================== + */ +0 +}/*-------------------------< DATA_OUT2 >------------------------*/,{ + SCR_CALL, + PADDR_A (datao_done), + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< PM0_DATA >-------------------------*/,{ + /* + * Read our host flags to SFBR, so we will be able + * to check against the data direction we expect. + */ + SCR_FROM_REG (HF_REG), + 0, + /* + * Check against actual DATA PHASE. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), + PADDR_A (pm0_data_out), + /* + * Actual phase is DATA IN. + * Check against expected direction. + */ + SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM0 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0), + 0, + /* + * Move the data to memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_ccb, phys.pm0.sg), + SCR_JUMP, + PADDR_A (pm0_data_end), +}/*-------------------------< PM0_DATA_OUT >---------------------*/,{ + /* + * Actual phase is DATA OUT. + * Check against expected direction. + */ + SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM0 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0), + 0, + /* + * Move the data from memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_OUT, + offsetof (struct sym_ccb, phys.pm0.sg), +}/*-------------------------< PM0_DATA_END >---------------------*/,{ + /* + * Clear the flag that told we were moving + * data from the PM0 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)), + 0, + /* + * Return to the previous DATA script which + * is guaranteed by design (if no bug) to be + * the main DATA script for this transfer. + */ + SCR_COPY (4), + RADDR_1 (dsa), + RADDR_1 (scratcha), + SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm0.ret)), + 0, +}/*-------------------------< PM_DATA_END >----------------------*/,{ + SCR_COPY (4), + RADDR_1 (scratcha), + PADDR_A (_sms_a150), + SCR_COPY (4), +}/*-------------------------< _SMS_A150 >------------------------*/,{ + 0, + RADDR_1 (temp), + SCR_RETURN, + 0, +}/*-------------------------< PM1_DATA >-------------------------*/,{ + /* + * Read our host flags to SFBR, so we will be able + * to check against the data direction we expect. + */ + SCR_FROM_REG (HF_REG), + 0, + /* + * Check against actual DATA PHASE. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), + PADDR_A (pm1_data_out), + /* + * Actual phase is DATA IN. + * Check against expected direction. + */ + SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM1 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1), + 0, + /* + * Move the data to memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_ccb, phys.pm1.sg), + SCR_JUMP, + PADDR_A (pm1_data_end), +}/*-------------------------< PM1_DATA_OUT >---------------------*/,{ + /* + * Actual phase is DATA OUT. + * Check against expected direction. + */ + SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM1 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1), + 0, + /* + * Move the data from memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_OUT, + offsetof (struct sym_ccb, phys.pm1.sg), +}/*-------------------------< PM1_DATA_END >---------------------*/,{ + /* + * Clear the flag that told we were moving + * data from the PM1 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)), + 0, + /* + * Return to the previous DATA script which + * is guaranteed by design (if no bug) to be + * the main DATA script for this transfer. + */ + SCR_COPY (4), + RADDR_1 (dsa), + RADDR_1 (scratcha), + SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm1.ret)), + 0, + SCR_JUMP, + PADDR_A (pm_data_end), +}/*--------------------------<>----------------------------------*/ +}; + +static struct SYM_FWB_SCR SYM_FWB_SCR = { +/*-------------------------< NO_DATA >--------------------------*/ { + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{ + /* + * We are jumped here by the C code, if we have + * some target to reset or some disconnected + * job to abort. Since error recovery is a serious + * busyness, we will really reset the SCSI BUS, if + * case of a SCSI interrupt occurring in this path. + */ + +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + /* + * Set initiator mode. + */ + SCR_CLR (SCR_TRG), + 0, +#endif + /* + * And try to select this target. + */ + SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel), + PADDR_A (reselect), + /* + * Wait for the selection to complete or + * the selection to time out. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)), + -8, + /* + * Call the C code. + */ + SCR_INT, + SIR_TARGET_SELECTED, + /* + * The C code should let us continue here. + * Send the 'kiss of death' message. + * We expect an immediate disconnect once + * the target has eaten the message. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_MOVE_TBL ^ SCR_MSG_OUT, + offsetof (struct sym_hcb, abrt_tbl), + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + SCR_WAIT_DISC, + 0, + /* + * Tell the C code that we are done. + */ + SCR_INT, + SIR_ABORT_SENT, +}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{ + /* + * Jump at scheduler. + */ + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< MSG_IN_ETC >-----------------------*/,{ + /* + * If it is an EXTENDED (variable size message) + * Handle it. + */ + SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)), + PADDR_B (msg_extended), + /* + * Let the C code handle any other + * 1 byte message. + */ + SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)), + PADDR_B (msg_received), + SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)), + PADDR_B (msg_received), + /* + * We donnot handle 2 bytes messages from SCRIPTS. + * So, let the C code deal with these ones too. + */ + SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)), + PADDR_B (msg_weird_seen), + SCR_CLR (SCR_ACK), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[1]), +}/*-------------------------< MSG_RECEIVED >---------------------*/,{ + SCR_COPY (4), /* DUMMY READ */ + HADDR_1 (scratch), + RADDR_1 (scratcha), + SCR_INT, + SIR_MSG_RECEIVED, +}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{ + SCR_COPY (4), /* DUMMY READ */ + HADDR_1 (scratch), + RADDR_1 (scratcha), + SCR_INT, + SIR_MSG_WEIRD, +}/*-------------------------< MSG_EXTENDED >---------------------*/,{ + /* + * Clear ACK and get the next byte + * assumed to be the message length. + */ + SCR_CLR (SCR_ACK), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[1]), + /* + * Try to catch some unlikely situations as 0 length + * or too large the length. + */ + SCR_JUMP ^ IFTRUE (DATA (0)), + PADDR_B (msg_weird_seen), + SCR_TO_REG (scratcha), + 0, + SCR_REG_REG (sfbr, SCR_ADD, (256-8)), + 0, + SCR_JUMP ^ IFTRUE (CARRYSET), + PADDR_B (msg_weird_seen), + /* + * We donnot handle extended messages from SCRIPTS. + * Read the amount of data corresponding to the + * message length and call the C code. + */ + SCR_COPY (1), + RADDR_1 (scratcha), + PADDR_B (_sms_b10), + SCR_CLR (SCR_ACK), + 0, +}/*-------------------------< _SMS_B10 >-------------------------*/,{ + SCR_MOVE_ABS (0) ^ SCR_MSG_IN, + HADDR_1 (msgin[2]), + SCR_JUMP, + PADDR_B (msg_received), +}/*-------------------------< MSG_BAD >--------------------------*/,{ + /* + * unimplemented message - reject it. + */ + SCR_INT, + SIR_REJECT_TO_SEND, + SCR_SET (SCR_ATN), + 0, + SCR_JUMP, + PADDR_A (clrack), +}/*-------------------------< MSG_WEIRD >------------------------*/,{ + /* + * weird message received + * ignore all MSG IN phases and reject it. + */ + SCR_INT, + SIR_REJECT_TO_SEND, + SCR_SET (SCR_ATN), + 0, +}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR_A (dispatch), + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (scratch), + SCR_JUMP, + PADDR_B (msg_weird1), +}/*-------------------------< WDTR_RESP >------------------------*/,{ + /* + * let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDR_B (nego_bad_phase), +}/*-------------------------< SEND_WDTR >------------------------*/,{ + /* + * Send the M_X_WIDE_REQ + */ + SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_JUMP, + PADDR_B (msg_out_done), +}/*-------------------------< SDTR_RESP >------------------------*/,{ + /* + * let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDR_B (nego_bad_phase), +}/*-------------------------< SEND_SDTR >------------------------*/,{ + /* + * Send the M_X_SYNC_REQ + */ + SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_JUMP, + PADDR_B (msg_out_done), +}/*-------------------------< PPR_RESP >-------------------------*/,{ + /* + * let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDR_B (nego_bad_phase), +}/*-------------------------< SEND_PPR >-------------------------*/,{ + /* + * Send the M_X_PPR_REQ + */ + SCR_MOVE_ABS (8) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_JUMP, + PADDR_B (msg_out_done), +}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{ + SCR_INT, + SIR_NEGO_PROTO, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< MSG_OUT >--------------------------*/,{ + /* + * The target requests a message. + * We donnot send messages that may + * require the device to go to bus free. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + /* + * ... wait for the next phase + * if it's a message out, send it again, ... + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), + PADDR_B (msg_out), +}/*-------------------------< MSG_OUT_DONE >---------------------*/,{ + /* + * Let the C code be aware of the + * sent message and clear the message. + */ + SCR_INT, + SIR_MSG_OUT_DONE, + /* + * ... and process the next phase + */ + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATA_OVRUN >-----------------------*/,{ + /* + * Zero scratcha that will count the + * extras bytes. + */ + SCR_COPY (4), + PADDR_B (zero), + RADDR_1 (scratcha), +}/*-------------------------< DATA_OVRUN1 >----------------------*/,{ + /* + * The target may want to transfer too much data. + * + * If phase is DATA OUT write 1 byte and count it. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), + 16, + SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT, + HADDR_1 (scratch), + SCR_JUMP, + PADDR_B (data_ovrun2), + /* + * If WSR is set, clear this condition, and + * count this byte. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), + 16, + SCR_REG_REG (scntl2, SCR_OR, WSR), + 0, + SCR_JUMP, + PADDR_B (data_ovrun2), + /* + * Finally check against DATA IN phase. + * Signal data overrun to the C code + * and jump to dispatcher if not so. + * Read 1 byte otherwise and count it. + */ + SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)), + 16, + SCR_INT, + SIR_DATA_OVERRUN, + SCR_JUMP, + PADDR_A (dispatch), + SCR_CHMOV_ABS (1) ^ SCR_DATA_IN, + HADDR_1 (scratch), +}/*-------------------------< DATA_OVRUN2 >----------------------*/,{ + /* + * Count this byte. + * This will allow to return a negative + * residual to user. + */ + SCR_REG_REG (scratcha, SCR_ADD, 0x01), + 0, + SCR_REG_REG (scratcha1, SCR_ADDC, 0), + 0, + SCR_REG_REG (scratcha2, SCR_ADDC, 0), + 0, + /* + * .. and repeat as required. + */ + SCR_JUMP, + PADDR_B (data_ovrun1), +}/*-------------------------< ABORT_RESEL >----------------------*/,{ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + /* + * send the abort/abortag/reset message + * we expect an immediate disconnect + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + SCR_WAIT_DISC, + 0, + SCR_INT, + SIR_RESEL_ABORTED, + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< RESEND_IDENT >---------------------*/,{ + /* + * The target stays in MSG OUT phase after having acked + * Identify [+ Tag [+ Extended message ]]. Targets shall + * behave this way on parity error. + * We must send it again all the messages. + */ + SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */ + 0, /* 1rst ACK = 90 ns. Hope the chip isn't too fast */ + SCR_JUMP, + PADDR_A (send_ident), +}/*-------------------------< IDENT_BREAK >----------------------*/,{ + SCR_CLR (SCR_ATN), + 0, + SCR_JUMP, + PADDR_A (select2), +}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{ + SCR_SET (SCR_ATN), + 0, + SCR_JUMP, + PADDR_A (select2), +}/*-------------------------< SDATA_IN >-------------------------*/,{ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_dsb, sense), + SCR_CALL, + PADDR_A (datai_done), + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{ + /* + * Message is an IDENTIFY, but lun is unknown. + * Signal problem to C code for logging the event. + * Send a M_ABORT to clear all pending tasks. + */ + SCR_INT, + SIR_RESEL_BAD_LUN, + SCR_JUMP, + PADDR_B (abort_resel), +}/*-------------------------< BAD_I_T_L >------------------------*/,{ + /* + * We donnot have a task for that I_T_L. + * Signal problem to C code for logging the event. + * Send a M_ABORT message. + */ + SCR_INT, + SIR_RESEL_BAD_I_T_L, + SCR_JUMP, + PADDR_B (abort_resel), +}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{ + /* + * We donnot have a task that matches the tag. + * Signal problem to C code for logging the event. + * Send a M_ABORTTAG message. + */ + SCR_INT, + SIR_RESEL_BAD_I_T_L_Q, + SCR_JUMP, + PADDR_B (abort_resel), +}/*-------------------------< BAD_STATUS >-----------------------*/,{ + /* + * Anything different from INTERMEDIATE + * CONDITION MET should be a bad SCSI status, + * given that GOOD status has already been tested. + * Call the C code. + */ + SCR_COPY (4), + PADDR_B (startpos), + RADDR_1 (scratcha), + SCR_INT ^ IFFALSE (DATA (S_COND_MET)), + SIR_BAD_SCSI_STATUS, + SCR_RETURN, + 0, +}/*-------------------------< WSR_MA_HELPER >--------------------*/,{ + /* + * Helper for the C code when WSR bit is set. + * Perform the move of the residual byte. + */ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_ccb, phys.wresid), + SCR_JUMP, + PADDR_A (dispatch), + +}/*-------------------------< ZERO >-----------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< SCRATCH >--------------------------*/,{ + SCR_DATA_ZERO, /* MUST BE BEFORE SCRATCH1 */ +}/*-------------------------< SCRATCH1 >-------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< PREV_DONE >------------------------*/,{ + SCR_DATA_ZERO, /* MUST BE BEFORE DONE_POS ! */ +}/*-------------------------< DONE_POS >-------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< NEXTJOB >--------------------------*/,{ + SCR_DATA_ZERO, /* MUST BE BEFORE STARTPOS ! */ +}/*-------------------------< STARTPOS >-------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< TARGTBL >--------------------------*/,{ + SCR_DATA_ZERO, +}/*--------------------------<>----------------------------------*/ +}; + +static struct SYM_FWZ_SCR SYM_FWZ_SCR = { + /*-------------------------< SNOOPTEST >------------------------*/{ + /* + * Read the variable. + */ + SCR_COPY (4), + HADDR_1 (scratch), + RADDR_1 (scratcha), + /* + * Write the variable. + */ + SCR_COPY (4), + RADDR_1 (temp), + HADDR_1 (scratch), + /* + * Read back the variable. + */ + SCR_COPY (4), + HADDR_1 (scratch), + RADDR_1 (temp), +}/*-------------------------< SNOOPEND >-------------------------*/,{ + /* + * And stop. + */ + SCR_INT, + 99, +}/*--------------------------<>----------------------------------*/ +}; diff --git a/drivers/scsi/sym53c8xx_2/sym_fw2.h b/drivers/scsi/sym53c8xx_2/sym_fw2.h new file mode 100644 index 000000000..4d1779b2a --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_fw2.h @@ -0,0 +1,1862 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +/* + * Scripts for SYMBIOS-Processor + * + * We have to know the offsets of all labels before we reach + * them (for forward jumps). Therefore we declare a struct + * here. If you make changes inside the script, + * + * DONT FORGET TO CHANGE THE LENGTHS HERE! + */ + +/* + * Script fragments which are loaded into the on-chip RAM + * of 825A, 875, 876, 895, 895A, 896 and 1010 chips. + * Must not exceed 4K bytes. + */ +struct SYM_FWA_SCR { + u32 start [ 14]; + u32 getjob_begin [ 4]; + u32 getjob_end [ 4]; +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + u32 select [ 6]; +#else + u32 select [ 4]; +#endif +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 + u32 is_dmap_dirty [ 4]; +#endif + u32 wf_sel_done [ 2]; + u32 sel_done [ 2]; + u32 send_ident [ 2]; +#ifdef SYM_CONF_IARB_SUPPORT + u32 select2 [ 8]; +#else + u32 select2 [ 2]; +#endif + u32 command [ 2]; + u32 dispatch [ 28]; + u32 sel_no_cmd [ 10]; + u32 init [ 6]; + u32 clrack [ 4]; + u32 datai_done [ 10]; + u32 datai_done_wsr [ 20]; + u32 datao_done [ 10]; + u32 datao_done_wss [ 6]; + u32 datai_phase [ 4]; + u32 datao_phase [ 6]; + u32 msg_in [ 2]; + u32 msg_in2 [ 10]; +#ifdef SYM_CONF_IARB_SUPPORT + u32 status [ 14]; +#else + u32 status [ 10]; +#endif + u32 complete [ 6]; + u32 complete2 [ 12]; + u32 done [ 14]; + u32 done_end [ 2]; + u32 complete_error [ 4]; + u32 save_dp [ 12]; + u32 restore_dp [ 8]; + u32 disconnect [ 12]; +#ifdef SYM_CONF_IARB_SUPPORT + u32 idle [ 4]; +#else + u32 idle [ 2]; +#endif +#ifdef SYM_CONF_IARB_SUPPORT + u32 ungetjob [ 6]; +#else + u32 ungetjob [ 4]; +#endif +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + u32 reselect [ 4]; +#else + u32 reselect [ 2]; +#endif + u32 reselected [ 22]; + u32 resel_scntl4 [ 20]; + u32 resel_lun0 [ 6]; +#if SYM_CONF_MAX_TASK*4 > 512 + u32 resel_tag [ 26]; +#elif SYM_CONF_MAX_TASK*4 > 256 + u32 resel_tag [ 20]; +#else + u32 resel_tag [ 16]; +#endif + u32 resel_dsa [ 2]; + u32 resel_dsa1 [ 4]; + u32 resel_no_tag [ 6]; + u32 data_in [SYM_CONF_MAX_SG * 2]; + u32 data_in2 [ 4]; + u32 data_out [SYM_CONF_MAX_SG * 2]; + u32 data_out2 [ 4]; + u32 pm0_data [ 12]; + u32 pm0_data_out [ 6]; + u32 pm0_data_end [ 6]; + u32 pm1_data [ 12]; + u32 pm1_data_out [ 6]; + u32 pm1_data_end [ 6]; +}; + +/* + * Script fragments which stay in main memory for all chips + * except for chips that support 8K on-chip RAM. + */ +struct SYM_FWB_SCR { + u32 start64 [ 2]; + u32 no_data [ 2]; +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + u32 sel_for_abort [ 18]; +#else + u32 sel_for_abort [ 16]; +#endif + u32 sel_for_abort_1 [ 2]; + u32 msg_in_etc [ 12]; + u32 msg_received [ 4]; + u32 msg_weird_seen [ 4]; + u32 msg_extended [ 20]; + u32 msg_bad [ 6]; + u32 msg_weird [ 4]; + u32 msg_weird1 [ 8]; + + u32 wdtr_resp [ 6]; + u32 send_wdtr [ 4]; + u32 sdtr_resp [ 6]; + u32 send_sdtr [ 4]; + u32 ppr_resp [ 6]; + u32 send_ppr [ 4]; + u32 nego_bad_phase [ 4]; + u32 msg_out [ 4]; + u32 msg_out_done [ 4]; + u32 data_ovrun [ 2]; + u32 data_ovrun1 [ 22]; + u32 data_ovrun2 [ 8]; + u32 abort_resel [ 16]; + u32 resend_ident [ 4]; + u32 ident_break [ 4]; + u32 ident_break_atn [ 4]; + u32 sdata_in [ 6]; + u32 resel_bad_lun [ 4]; + u32 bad_i_t_l [ 4]; + u32 bad_i_t_l_q [ 4]; + u32 bad_status [ 6]; + u32 pm_handle [ 20]; + u32 pm_handle1 [ 4]; + u32 pm_save [ 4]; + u32 pm0_save [ 12]; + u32 pm_save_end [ 4]; + u32 pm1_save [ 14]; + + /* WSR handling */ + u32 pm_wsr_handle [ 38]; + u32 wsr_ma_helper [ 4]; + + /* Data area */ + u32 zero [ 1]; + u32 scratch [ 1]; + u32 pm0_data_addr [ 1]; + u32 pm1_data_addr [ 1]; + u32 done_pos [ 1]; + u32 startpos [ 1]; + u32 targtbl [ 1]; +}; + +/* + * Script fragments used at initialisations. + * Only runs out of main memory. + */ +struct SYM_FWZ_SCR { + u32 snooptest [ 6]; + u32 snoopend [ 2]; +}; + +static struct SYM_FWA_SCR SYM_FWA_SCR = { +/*--------------------------< START >----------------------------*/ { + /* + * Switch the LED on. + * Will be patched with a NO_OP if LED + * not needed or not desired. + */ + SCR_REG_REG (gpreg, SCR_AND, 0xfe), + 0, + /* + * Clear SIGP. + */ + SCR_FROM_REG (ctest2), + 0, + /* + * Stop here if the C code wants to perform + * some error recovery procedure manually. + * (Indicate this by setting SEM in ISTAT) + */ + SCR_FROM_REG (istat), + 0, + /* + * Report to the C code the next position in + * the start queue the SCRIPTS will schedule. + * The C code must not change SCRATCHA. + */ + SCR_LOAD_ABS (scratcha, 4), + PADDR_B (startpos), + SCR_INT ^ IFTRUE (MASK (SEM, SEM)), + SIR_SCRIPT_STOPPED, + /* + * Start the next job. + * + * @DSA = start point for this job. + * SCRATCHA = address of this job in the start queue. + * + * We will restore startpos with SCRATCHA if we fails the + * arbitration or if it is the idle job. + * + * The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS + * is a critical path. If it is partially executed, it then + * may happen that the job address is not yet in the DSA + * and the next queue position points to the next JOB. + */ + SCR_LOAD_ABS (dsa, 4), + PADDR_B (startpos), + SCR_LOAD_REL (temp, 4), + 4, +}/*-------------------------< GETJOB_BEGIN >---------------------*/,{ + SCR_STORE_ABS (temp, 4), + PADDR_B (startpos), + SCR_LOAD_REL (dsa, 4), + 0, +}/*-------------------------< GETJOB_END >-----------------------*/,{ + SCR_LOAD_REL (temp, 4), + 0, + SCR_RETURN, + 0, +}/*-------------------------< SELECT >---------------------------*/,{ + /* + * DSA contains the address of a scheduled + * data structure. + * + * SCRATCHA contains the address of the start queue + * entry which points to the next job. + * + * Set Initiator mode. + * + * (Target mode is left as an exercise for the reader) + */ +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + SCR_CLR (SCR_TRG), + 0, +#endif + /* + * And try to select this target. + */ + SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select), + PADDR_A (ungetjob), + /* + * Now there are 4 possibilities: + * + * (1) The chip loses arbitration. + * This is ok, because it will try again, + * when the bus becomes idle. + * (But beware of the timeout function!) + * + * (2) The chip is reselected. + * Then the script processor takes the jump + * to the RESELECT label. + * + * (3) The chip wins arbitration. + * Then it will execute SCRIPTS instruction until + * the next instruction that checks SCSI phase. + * Then will stop and wait for selection to be + * complete or selection time-out to occur. + * + * After having won arbitration, the SCRIPTS + * processor is able to execute instructions while + * the SCSI core is performing SCSI selection. + */ + /* + * Initialize the status registers + */ + SCR_LOAD_REL (scr0, 4), + offsetof (struct sym_ccb, phys.head.status), + /* + * We may need help from CPU if the DMA segment + * registers aren't up-to-date for this IO. + * Patched with NOOP for chips that donnot + * support DAC addressing. + */ +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 +}/*-------------------------< IS_DMAP_DIRTY >--------------------*/,{ + SCR_FROM_REG (HX_REG), + 0, + SCR_INT ^ IFTRUE (MASK (HX_DMAP_DIRTY, HX_DMAP_DIRTY)), + SIR_DMAP_DIRTY, +#endif +}/*-------------------------< WF_SEL_DONE >----------------------*/,{ + SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), + SIR_SEL_ATN_NO_MSG_OUT, +}/*-------------------------< SEL_DONE >-------------------------*/,{ + /* + * C1010-33 errata work-around. + * Due to a race, the SCSI core may not have + * loaded SCNTL3 on SEL_TBL instruction. + * We reload it once phase is stable. + * Patched with a NOOP for other chips. + */ + SCR_LOAD_REL (scntl3, 1), + offsetof(struct sym_dsb, select.sel_scntl3), +}/*-------------------------< SEND_IDENT >-----------------------*/,{ + /* + * Selection complete. + * Send the IDENTIFY and possibly the TAG message + * and negotiation message if present. + */ + SCR_MOVE_TBL ^ SCR_MSG_OUT, + offsetof (struct sym_dsb, smsg), +}/*-------------------------< SELECT2 >--------------------------*/,{ +#ifdef SYM_CONF_IARB_SUPPORT + /* + * Set IMMEDIATE ARBITRATION if we have been given + * a hint to do so. (Some job to do after this one). + */ + SCR_FROM_REG (HF_REG), + 0, + SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)), + 8, + SCR_REG_REG (scntl1, SCR_OR, IARB), + 0, +#endif + /* + * Anticipate the COMMAND phase. + * This is the PHASE we expect at this point. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)), + PADDR_A (sel_no_cmd), +}/*-------------------------< COMMAND >--------------------------*/,{ + /* + * ... and send the command + */ + SCR_MOVE_TBL ^ SCR_COMMAND, + offsetof (struct sym_dsb, cmd), +}/*-------------------------< DISPATCH >-------------------------*/,{ + /* + * MSG_IN is the only phase that shall be + * entered at least once for each (re)selection. + * So we test it first. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (msg_in), + SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)), + PADDR_A (datao_phase), + SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)), + PADDR_A (datai_phase), + SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), + PADDR_A (status), + SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), + PADDR_A (command), + SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), + PADDR_B (msg_out), + /* + * Discard as many illegal phases as + * required and tell the C code about. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)), + 16, + SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, + HADDR_1 (scratch), + SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)), + -16, + SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)), + 16, + SCR_MOVE_ABS (1) ^ SCR_ILG_IN, + HADDR_1 (scratch), + SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)), + -16, + SCR_INT, + SIR_BAD_PHASE, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< SEL_NO_CMD >-----------------------*/,{ + /* + * The target does not switch to command + * phase after IDENTIFY has been sent. + * + * If it stays in MSG OUT phase send it + * the IDENTIFY again. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), + PADDR_B (resend_ident), + /* + * If target does not switch to MSG IN phase + * and we sent a negotiation, assert the + * failure immediately. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (dispatch), + SCR_FROM_REG (HS_REG), + 0, + SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), + SIR_NEGO_FAILED, + /* + * Jump to dispatcher. + */ + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< INIT >-----------------------------*/,{ + /* + * Wait for the SCSI RESET signal to be + * inactive before restarting operations, + * since the chip may hang on SEL_ATN + * if SCSI RESET is active. + */ + SCR_FROM_REG (sstat0), + 0, + SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)), + -16, + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< CLRACK >---------------------------*/,{ + /* + * Terminate possible pending message phase. + */ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAI_DONE >-----------------------*/,{ + /* + * Save current pointer to LASTP. + */ + SCR_STORE_REL (temp, 4), + offsetof (struct sym_ccb, phys.head.lastp), + /* + * If the SWIDE is not full, jump to dispatcher. + * We anticipate a STATUS phase. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)), + PADDR_A (datai_done_wsr), + SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)), + PADDR_A (status), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{ + /* + * The SWIDE is full. + * Clear this condition. + */ + SCR_REG_REG (scntl2, SCR_OR, WSR), + 0, + /* + * We are expecting an IGNORE RESIDUE message + * from the device, otherwise we are in data + * overrun condition. Check against MSG_IN phase. + */ + SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), + SIR_SWIDE_OVERRUN, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR_A (dispatch), + /* + * We are in MSG_IN phase, + * Read the first byte of the message. + * If it is not an IGNORE RESIDUE message, + * signal overrun and jump to message + * processing. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[0]), + SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)), + SIR_SWIDE_OVERRUN, + SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)), + PADDR_A (msg_in2), + /* + * We got the message we expected. + * Read the 2nd byte, and jump to dispatcher. + */ + SCR_CLR (SCR_ACK), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[1]), + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAO_DONE >-----------------------*/,{ + /* + * Save current pointer to LASTP. + */ + SCR_STORE_REL (temp, 4), + offsetof (struct sym_ccb, phys.head.lastp), + /* + * If the SODL is not full jump to dispatcher. + * We anticipate a STATUS phase. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)), + PADDR_A (datao_done_wss), + SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)), + PADDR_A (status), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{ + /* + * The SODL is full, clear this condition. + */ + SCR_REG_REG (scntl2, SCR_OR, WSS), + 0, + /* + * And signal a DATA UNDERRUN condition + * to the C code. + */ + SCR_INT, + SIR_SODL_UNDERRUN, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATAI_PHASE >----------------------*/,{ + /* + * Jump to current pointer. + */ + SCR_LOAD_REL (temp, 4), + offsetof (struct sym_ccb, phys.head.lastp), + SCR_RETURN, + 0, +}/*-------------------------< DATAO_PHASE >----------------------*/,{ + /* + * C1010-66 errata work-around. + * Extra clocks of data hold must be inserted + * in DATA OUT phase on 33 MHz PCI BUS. + * Patched with a NOOP for other chips. + */ + SCR_REG_REG (scntl4, SCR_OR, (XCLKH_DT|XCLKH_ST)), + 0, + /* + * Jump to current pointer. + */ + SCR_LOAD_REL (temp, 4), + offsetof (struct sym_ccb, phys.head.lastp), + SCR_RETURN, + 0, +}/*-------------------------< MSG_IN >---------------------------*/,{ + /* + * Get the first byte of the message. + * + * The script processor doesn't negate the + * ACK signal after this transfer. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[0]), +}/*-------------------------< MSG_IN2 >--------------------------*/,{ + /* + * Check first against 1 byte messages + * that we handle from SCRIPTS. + */ + SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)), + PADDR_A (complete), + SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)), + PADDR_A (disconnect), + SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)), + PADDR_A (save_dp), + SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)), + PADDR_A (restore_dp), + /* + * We handle all other messages from the + * C code, so no need to waste on-chip RAM + * for those ones. + */ + SCR_JUMP, + PADDR_B (msg_in_etc), +}/*-------------------------< STATUS >---------------------------*/,{ + /* + * get the status + */ + SCR_MOVE_ABS (1) ^ SCR_STATUS, + HADDR_1 (scratch), +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If STATUS is not GOOD, clear IMMEDIATE ARBITRATION, + * since we may have to tamper the start queue from + * the C code. + */ + SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)), + 8, + SCR_REG_REG (scntl1, SCR_AND, ~IARB), + 0, +#endif + /* + * save status to scsi_status. + * mark as complete. + */ + SCR_TO_REG (SS_REG), + 0, + SCR_LOAD_REG (HS_REG, HS_COMPLETE), + 0, + /* + * Anticipate the MESSAGE PHASE for + * the TASK COMPLETE message. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (msg_in), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< COMPLETE >-------------------------*/,{ + /* + * Complete message. + * + * When we terminate the cycle by clearing ACK, + * the target may disconnect immediately. + * + * We don't want to be told of an "unexpected disconnect", + * so we disable this feature. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + /* + * Terminate cycle ... + */ + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + /* + * ... and wait for the disconnect. + */ + SCR_WAIT_DISC, + 0, +}/*-------------------------< COMPLETE2 >------------------------*/,{ + /* + * Save host status. + */ + SCR_STORE_REL (scr0, 4), + offsetof (struct sym_ccb, phys.head.status), + /* + * Some bridges may reorder DMA writes to memory. + * We donnot want the CPU to deal with completions + * without all the posted write having been flushed + * to memory. This DUMMY READ should flush posted + * buffers prior to the CPU having to deal with + * completions. + */ + SCR_LOAD_REL (scr0, 4), /* DUMMY READ */ + offsetof (struct sym_ccb, phys.head.status), + + /* + * If command resulted in not GOOD status, + * call the C code if needed. + */ + SCR_FROM_REG (SS_REG), + 0, + SCR_CALL ^ IFFALSE (DATA (S_GOOD)), + PADDR_B (bad_status), + /* + * If we performed an auto-sense, call + * the C code to synchronyze task aborts + * with UNIT ATTENTION conditions. + */ + SCR_FROM_REG (HF_REG), + 0, + SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))), + PADDR_A (complete_error), +}/*-------------------------< DONE >-----------------------------*/,{ + /* + * Copy the DSA to the DONE QUEUE and + * signal completion to the host. + * If we are interrupted between DONE + * and DONE_END, we must reset, otherwise + * the completed CCB may be lost. + */ + SCR_STORE_ABS (dsa, 4), + PADDR_B (scratch), + SCR_LOAD_ABS (dsa, 4), + PADDR_B (done_pos), + SCR_LOAD_ABS (scratcha, 4), + PADDR_B (scratch), + SCR_STORE_REL (scratcha, 4), + 0, + /* + * The instruction below reads the DONE QUEUE next + * free position from memory. + * In addition it ensures that all PCI posted writes + * are flushed and so the DSA value of the done + * CCB is visible by the CPU before INTFLY is raised. + */ + SCR_LOAD_REL (scratcha, 4), + 4, + SCR_INT_FLY, + 0, + SCR_STORE_ABS (scratcha, 4), + PADDR_B (done_pos), +}/*-------------------------< DONE_END >-------------------------*/,{ + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< COMPLETE_ERROR >-------------------*/,{ + SCR_LOAD_ABS (scratcha, 4), + PADDR_B (startpos), + SCR_INT, + SIR_COMPLETE_ERROR, +}/*-------------------------< SAVE_DP >--------------------------*/,{ + /* + * Clear ACK immediately. + * No need to delay it. + */ + SCR_CLR (SCR_ACK), + 0, + /* + * Keep track we received a SAVE DP, so + * we will switch to the other PM context + * on the next PM since the DP may point + * to the current PM context. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED), + 0, + /* + * SAVE_DP message: + * Copy LASTP to SAVEP. + */ + SCR_LOAD_REL (scratcha, 4), + offsetof (struct sym_ccb, phys.head.lastp), + SCR_STORE_REL (scratcha, 4), + offsetof (struct sym_ccb, phys.head.savep), + /* + * Anticipate the MESSAGE PHASE for + * the DISCONNECT message. + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), + PADDR_A (msg_in), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< RESTORE_DP >-----------------------*/,{ + /* + * Clear ACK immediately. + * No need to delay it. + */ + SCR_CLR (SCR_ACK), + 0, + /* + * Copy SAVEP to LASTP. + */ + SCR_LOAD_REL (scratcha, 4), + offsetof (struct sym_ccb, phys.head.savep), + SCR_STORE_REL (scratcha, 4), + offsetof (struct sym_ccb, phys.head.lastp), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DISCONNECT >-----------------------*/,{ + /* + * DISCONNECTing ... + * + * disable the "unexpected disconnect" feature, + * and remove the ACK signal. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + /* + * Wait for the disconnect. + */ + SCR_WAIT_DISC, + 0, + /* + * Status is: DISCONNECTED. + */ + SCR_LOAD_REG (HS_REG, HS_DISCONNECT), + 0, + /* + * Save host status. + */ + SCR_STORE_REL (scr0, 4), + offsetof (struct sym_ccb, phys.head.status), + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< IDLE >-----------------------------*/,{ + /* + * Nothing to do? + * Switch the LED off and wait for reselect. + * Will be patched with a NO_OP if LED + * not needed or not desired. + */ + SCR_REG_REG (gpreg, SCR_OR, 0x01), + 0, +#ifdef SYM_CONF_IARB_SUPPORT + SCR_JUMPR, + 8, +#endif +}/*-------------------------< UNGETJOB >-------------------------*/,{ +#ifdef SYM_CONF_IARB_SUPPORT + /* + * Set IMMEDIATE ARBITRATION, for the next time. + * This will give us better chance to win arbitration + * for the job we just wanted to do. + */ + SCR_REG_REG (scntl1, SCR_OR, IARB), + 0, +#endif + /* + * We are not able to restart the SCRIPTS if we are + * interrupted and these instruction haven't been + * all executed. BTW, this is very unlikely to + * happen, but we check that from the C code. + */ + SCR_LOAD_REG (dsa, 0xff), + 0, + SCR_STORE_ABS (scratcha, 4), + PADDR_B (startpos), +}/*-------------------------< RESELECT >-------------------------*/,{ +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + /* + * Make sure we are in initiator mode. + */ + SCR_CLR (SCR_TRG), + 0, +#endif + /* + * Sleep waiting for a reselection. + */ + SCR_WAIT_RESEL, + PADDR_A(start), +}/*-------------------------< RESELECTED >-----------------------*/,{ + /* + * Switch the LED on. + * Will be patched with a NO_OP if LED + * not needed or not desired. + */ + SCR_REG_REG (gpreg, SCR_AND, 0xfe), + 0, + /* + * load the target id into the sdid + */ + SCR_REG_SFBR (ssid, SCR_AND, 0x8F), + 0, + SCR_TO_REG (sdid), + 0, + /* + * Load the target control block address + */ + SCR_LOAD_ABS (dsa, 4), + PADDR_B (targtbl), + SCR_SFBR_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_AND, 0x3c), + 0, + SCR_LOAD_REL (dsa, 4), + 0, + /* + * We expect MESSAGE IN phase. + * If not, get help from the C code. + */ + SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), + SIR_RESEL_NO_MSG_IN, + /* + * Load the legacy synchronous transfer registers. + */ + SCR_LOAD_REL (scntl3, 1), + offsetof(struct sym_tcb, head.wval), + SCR_LOAD_REL (sxfer, 1), + offsetof(struct sym_tcb, head.sval), +}/*-------------------------< RESEL_SCNTL4 >---------------------*/,{ + /* + * The C1010 uses a new synchronous timing scheme. + * Will be patched with a NO_OP if not a C1010. + */ + SCR_LOAD_REL (scntl4, 1), + offsetof(struct sym_tcb, head.uval), + /* + * Get the IDENTIFY message. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin), + /* + * If IDENTIFY LUN #0, use a faster path + * to find the LCB structure. + */ + SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)), + PADDR_A (resel_lun0), + /* + * If message isn't an IDENTIFY, + * tell the C code about. + */ + SCR_INT ^ IFFALSE (MASK (0x80, 0x80)), + SIR_RESEL_NO_IDENTIFY, + /* + * It is an IDENTIFY message, + * Load the LUN control block address. + */ + SCR_LOAD_REL (dsa, 4), + offsetof(struct sym_tcb, head.luntbl_sa), + SCR_SFBR_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_SHL, 0), + 0, + SCR_REG_REG (dsa, SCR_AND, 0xfc), + 0, + SCR_LOAD_REL (dsa, 4), + 0, + SCR_JUMPR, + 8, +}/*-------------------------< RESEL_LUN0 >-----------------------*/,{ + /* + * LUN 0 special case (but usual one :)) + */ + SCR_LOAD_REL (dsa, 4), + offsetof(struct sym_tcb, head.lun0_sa), + /* + * Jump indirectly to the reselect action for this LUN. + */ + SCR_LOAD_REL (temp, 4), + offsetof(struct sym_lcb, head.resel_sa), + SCR_RETURN, + 0, + /* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */ +}/*-------------------------< RESEL_TAG >------------------------*/,{ + /* + * ACK the IDENTIFY previously received. + */ + SCR_CLR (SCR_ACK), + 0, + /* + * It shall be a tagged command. + * Read SIMPLE+TAG. + * The C code will deal with errors. + * Aggressive optimization, isn't it? :) + */ + SCR_MOVE_ABS (2) ^ SCR_MSG_IN, + HADDR_1 (msgin), + /* + * Load the pointer to the tagged task + * table for this LUN. + */ + SCR_LOAD_REL (dsa, 4), + offsetof(struct sym_lcb, head.itlq_tbl_sa), + /* + * The SIDL still contains the TAG value. + * Aggressive optimization, isn't it? :):) + */ + SCR_REG_SFBR (sidl, SCR_SHL, 0), + 0, +#if SYM_CONF_MAX_TASK*4 > 512 + SCR_JUMPR ^ IFFALSE (CARRYSET), + 8, + SCR_REG_REG (dsa1, SCR_OR, 2), + 0, + SCR_REG_REG (sfbr, SCR_SHL, 0), + 0, + SCR_JUMPR ^ IFFALSE (CARRYSET), + 8, + SCR_REG_REG (dsa1, SCR_OR, 1), + 0, +#elif SYM_CONF_MAX_TASK*4 > 256 + SCR_JUMPR ^ IFFALSE (CARRYSET), + 8, + SCR_REG_REG (dsa1, SCR_OR, 1), + 0, +#endif + /* + * Retrieve the DSA of this task. + * JUMP indirectly to the restart point of the CCB. + */ + SCR_SFBR_REG (dsa, SCR_AND, 0xfc), + 0, + SCR_LOAD_REL (dsa, 4), + 0, + SCR_LOAD_REL (temp, 4), + offsetof(struct sym_ccb, phys.head.go.restart), + SCR_RETURN, + 0, + /* In normal situations we branch to RESEL_DSA */ +}/*-------------------------< RESEL_DSA >------------------------*/,{ + /* + * ACK the IDENTIFY or TAG previously received. + */ + SCR_CLR (SCR_ACK), + 0, +}/*-------------------------< RESEL_DSA1 >-----------------------*/,{ + /* + * Initialize the status registers + */ + SCR_LOAD_REL (scr0, 4), + offsetof (struct sym_ccb, phys.head.status), + /* + * Jump to dispatcher. + */ + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< RESEL_NO_TAG >---------------------*/,{ + /* + * Load the DSA with the unique ITL task. + */ + SCR_LOAD_REL (dsa, 4), + offsetof(struct sym_lcb, head.itl_task_sa), + /* + * JUMP indirectly to the restart point of the CCB. + */ + SCR_LOAD_REL (temp, 4), + offsetof(struct sym_ccb, phys.head.go.restart), + SCR_RETURN, + 0, + /* In normal situations we branch to RESEL_DSA */ +}/*-------------------------< DATA_IN >--------------------------*/,{ +/* + * Because the size depends on the + * #define SYM_CONF_MAX_SG parameter, + * it is filled in at runtime. + * + * ##===========< i=0; i========= + * || SCR_CHMOV_TBL ^ SCR_DATA_IN, + * || offsetof (struct sym_dsb, data[ i]), + * ##========================================== + */ +0 +}/*-------------------------< DATA_IN2 >-------------------------*/,{ + SCR_CALL, + PADDR_A (datai_done), + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< DATA_OUT >-------------------------*/,{ +/* + * Because the size depends on the + * #define SYM_CONF_MAX_SG parameter, + * it is filled in at runtime. + * + * ##===========< i=0; i========= + * || SCR_CHMOV_TBL ^ SCR_DATA_OUT, + * || offsetof (struct sym_dsb, data[ i]), + * ##========================================== + */ +0 +}/*-------------------------< DATA_OUT2 >------------------------*/,{ + SCR_CALL, + PADDR_A (datao_done), + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< PM0_DATA >-------------------------*/,{ + /* + * Read our host flags to SFBR, so we will be able + * to check against the data direction we expect. + */ + SCR_FROM_REG (HF_REG), + 0, + /* + * Check against actual DATA PHASE. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), + PADDR_A (pm0_data_out), + /* + * Actual phase is DATA IN. + * Check against expected direction. + */ + SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM0 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0), + 0, + /* + * Move the data to memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_ccb, phys.pm0.sg), + SCR_JUMP, + PADDR_A (pm0_data_end), +}/*-------------------------< PM0_DATA_OUT >---------------------*/,{ + /* + * Actual phase is DATA OUT. + * Check against expected direction. + */ + SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM0 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0), + 0, + /* + * Move the data from memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_OUT, + offsetof (struct sym_ccb, phys.pm0.sg), +}/*-------------------------< PM0_DATA_END >---------------------*/,{ + /* + * Clear the flag that told we were moving + * data from the PM0 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)), + 0, + /* + * Return to the previous DATA script which + * is guaranteed by design (if no bug) to be + * the main DATA script for this transfer. + */ + SCR_LOAD_REL (temp, 4), + offsetof (struct sym_ccb, phys.pm0.ret), + SCR_RETURN, + 0, +}/*-------------------------< PM1_DATA >-------------------------*/,{ + /* + * Read our host flags to SFBR, so we will be able + * to check against the data direction we expect. + */ + SCR_FROM_REG (HF_REG), + 0, + /* + * Check against actual DATA PHASE. + */ + SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), + PADDR_A (pm1_data_out), + /* + * Actual phase is DATA IN. + * Check against expected direction. + */ + SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM1 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1), + 0, + /* + * Move the data to memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_ccb, phys.pm1.sg), + SCR_JUMP, + PADDR_A (pm1_data_end), +}/*-------------------------< PM1_DATA_OUT >---------------------*/,{ + /* + * Actual phase is DATA OUT. + * Check against expected direction. + */ + SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)), + PADDR_B (data_ovrun), + /* + * Keep track we are moving data from the + * PM1 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1), + 0, + /* + * Move the data from memory. + */ + SCR_CHMOV_TBL ^ SCR_DATA_OUT, + offsetof (struct sym_ccb, phys.pm1.sg), +}/*-------------------------< PM1_DATA_END >---------------------*/,{ + /* + * Clear the flag that told we were moving + * data from the PM1 DATA mini-script. + */ + SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)), + 0, + /* + * Return to the previous DATA script which + * is guaranteed by design (if no bug) to be + * the main DATA script for this transfer. + */ + SCR_LOAD_REL (temp, 4), + offsetof (struct sym_ccb, phys.pm1.ret), + SCR_RETURN, + 0, +}/*-------------------------<>-----------------------------------*/ +}; + +static struct SYM_FWB_SCR SYM_FWB_SCR = { +/*--------------------------< START64 >--------------------------*/ { + /* + * SCRIPT entry point for the 895A, 896 and 1010. + * For now, there is no specific stuff for those + * chips at this point, but this may come. + */ + SCR_JUMP, + PADDR_A (init), +}/*-------------------------< NO_DATA >--------------------------*/,{ + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{ + /* + * We are jumped here by the C code, if we have + * some target to reset or some disconnected + * job to abort. Since error recovery is a serious + * busyness, we will really reset the SCSI BUS, if + * case of a SCSI interrupt occurring in this path. + */ +#ifdef SYM_CONF_TARGET_ROLE_SUPPORT + /* + * Set initiator mode. + */ + SCR_CLR (SCR_TRG), + 0, +#endif + /* + * And try to select this target. + */ + SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel), + PADDR_A (reselect), + /* + * Wait for the selection to complete or + * the selection to time out. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)), + -8, + /* + * Call the C code. + */ + SCR_INT, + SIR_TARGET_SELECTED, + /* + * The C code should let us continue here. + * Send the 'kiss of death' message. + * We expect an immediate disconnect once + * the target has eaten the message. + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_MOVE_TBL ^ SCR_MSG_OUT, + offsetof (struct sym_hcb, abrt_tbl), + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + SCR_WAIT_DISC, + 0, + /* + * Tell the C code that we are done. + */ + SCR_INT, + SIR_ABORT_SENT, +}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{ + /* + * Jump at scheduler. + */ + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< MSG_IN_ETC >-----------------------*/,{ + /* + * If it is an EXTENDED (variable size message) + * Handle it. + */ + SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)), + PADDR_B (msg_extended), + /* + * Let the C code handle any other + * 1 byte message. + */ + SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)), + PADDR_B (msg_received), + SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)), + PADDR_B (msg_received), + /* + * We donnot handle 2 bytes messages from SCRIPTS. + * So, let the C code deal with these ones too. + */ + SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)), + PADDR_B (msg_weird_seen), + SCR_CLR (SCR_ACK), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[1]), +}/*-------------------------< MSG_RECEIVED >---------------------*/,{ + SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */ + 0, + SCR_INT, + SIR_MSG_RECEIVED, +}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{ + SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */ + 0, + SCR_INT, + SIR_MSG_WEIRD, +}/*-------------------------< MSG_EXTENDED >---------------------*/,{ + /* + * Clear ACK and get the next byte + * assumed to be the message length. + */ + SCR_CLR (SCR_ACK), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (msgin[1]), + /* + * Try to catch some unlikely situations as 0 length + * or too large the length. + */ + SCR_JUMP ^ IFTRUE (DATA (0)), + PADDR_B (msg_weird_seen), + SCR_TO_REG (scratcha), + 0, + SCR_REG_REG (sfbr, SCR_ADD, (256-8)), + 0, + SCR_JUMP ^ IFTRUE (CARRYSET), + PADDR_B (msg_weird_seen), + /* + * We donnot handle extended messages from SCRIPTS. + * Read the amount of data corresponding to the + * message length and call the C code. + */ + SCR_STORE_REL (scratcha, 1), + offsetof (struct sym_dsb, smsg_ext.size), + SCR_CLR (SCR_ACK), + 0, + SCR_MOVE_TBL ^ SCR_MSG_IN, + offsetof (struct sym_dsb, smsg_ext), + SCR_JUMP, + PADDR_B (msg_received), +}/*-------------------------< MSG_BAD >--------------------------*/,{ + /* + * unimplemented message - reject it. + */ + SCR_INT, + SIR_REJECT_TO_SEND, + SCR_SET (SCR_ATN), + 0, + SCR_JUMP, + PADDR_A (clrack), +}/*-------------------------< MSG_WEIRD >------------------------*/,{ + /* + * weird message received + * ignore all MSG IN phases and reject it. + */ + SCR_INT, + SIR_REJECT_TO_SEND, + SCR_SET (SCR_ATN), + 0, +}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{ + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), + PADDR_A (dispatch), + SCR_MOVE_ABS (1) ^ SCR_MSG_IN, + HADDR_1 (scratch), + SCR_JUMP, + PADDR_B (msg_weird1), +}/*-------------------------< WDTR_RESP >------------------------*/,{ + /* + * let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDR_B (nego_bad_phase), +}/*-------------------------< SEND_WDTR >------------------------*/,{ + /* + * Send the M_X_WIDE_REQ + */ + SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_JUMP, + PADDR_B (msg_out_done), +}/*-------------------------< SDTR_RESP >------------------------*/,{ + /* + * let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDR_B (nego_bad_phase), +}/*-------------------------< SEND_SDTR >------------------------*/,{ + /* + * Send the M_X_SYNC_REQ + */ + SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_JUMP, + PADDR_B (msg_out_done), +}/*-------------------------< PPR_RESP >-------------------------*/,{ + /* + * let the target fetch our answer. + */ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), + PADDR_B (nego_bad_phase), +}/*-------------------------< SEND_PPR >-------------------------*/,{ + /* + * Send the M_X_PPR_REQ + */ + SCR_MOVE_ABS (8) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_JUMP, + PADDR_B (msg_out_done), +}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{ + SCR_INT, + SIR_NEGO_PROTO, + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< MSG_OUT >--------------------------*/,{ + /* + * The target requests a message. + * We donnot send messages that may + * require the device to go to bus free. + */ + SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + /* + * ... wait for the next phase + * if it's a message out, send it again, ... + */ + SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), + PADDR_B (msg_out), +}/*-------------------------< MSG_OUT_DONE >---------------------*/,{ + /* + * Let the C code be aware of the + * sent message and clear the message. + */ + SCR_INT, + SIR_MSG_OUT_DONE, + /* + * ... and process the next phase + */ + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< DATA_OVRUN >-----------------------*/,{ + /* + * Use scratcha to count the extra bytes. + */ + SCR_LOAD_ABS (scratcha, 4), + PADDR_B (zero), +}/*-------------------------< DATA_OVRUN1 >----------------------*/,{ + /* + * The target may want to transfer too much data. + * + * If phase is DATA OUT write 1 byte and count it. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), + 16, + SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT, + HADDR_1 (scratch), + SCR_JUMP, + PADDR_B (data_ovrun2), + /* + * If WSR is set, clear this condition, and + * count this byte. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), + 16, + SCR_REG_REG (scntl2, SCR_OR, WSR), + 0, + SCR_JUMP, + PADDR_B (data_ovrun2), + /* + * Finally check against DATA IN phase. + * Signal data overrun to the C code + * and jump to dispatcher if not so. + * Read 1 byte otherwise and count it. + */ + SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)), + 16, + SCR_INT, + SIR_DATA_OVERRUN, + SCR_JUMP, + PADDR_A (dispatch), + SCR_CHMOV_ABS (1) ^ SCR_DATA_IN, + HADDR_1 (scratch), +}/*-------------------------< DATA_OVRUN2 >----------------------*/,{ + /* + * Count this byte. + * This will allow to return a negative + * residual to user. + */ + SCR_REG_REG (scratcha, SCR_ADD, 0x01), + 0, + SCR_REG_REG (scratcha1, SCR_ADDC, 0), + 0, + SCR_REG_REG (scratcha2, SCR_ADDC, 0), + 0, + /* + * .. and repeat as required. + */ + SCR_JUMP, + PADDR_B (data_ovrun1), +}/*-------------------------< ABORT_RESEL >----------------------*/,{ + SCR_SET (SCR_ATN), + 0, + SCR_CLR (SCR_ACK), + 0, + /* + * send the abort/abortag/reset message + * we expect an immediate disconnect + */ + SCR_REG_REG (scntl2, SCR_AND, 0x7f), + 0, + SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, + HADDR_1 (msgout), + SCR_CLR (SCR_ACK|SCR_ATN), + 0, + SCR_WAIT_DISC, + 0, + SCR_INT, + SIR_RESEL_ABORTED, + SCR_JUMP, + PADDR_A (start), +}/*-------------------------< RESEND_IDENT >---------------------*/,{ + /* + * The target stays in MSG OUT phase after having acked + * Identify [+ Tag [+ Extended message ]]. Targets shall + * behave this way on parity error. + * We must send it again all the messages. + */ + SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */ + 0, /* 1rst ACK = 90 ns. Hope the chip isn't too fast */ + SCR_JUMP, + PADDR_A (send_ident), +}/*-------------------------< IDENT_BREAK >----------------------*/,{ + SCR_CLR (SCR_ATN), + 0, + SCR_JUMP, + PADDR_A (select2), +}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{ + SCR_SET (SCR_ATN), + 0, + SCR_JUMP, + PADDR_A (select2), +}/*-------------------------< SDATA_IN >-------------------------*/,{ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_dsb, sense), + SCR_CALL, + PADDR_A (datai_done), + SCR_JUMP, + PADDR_B (data_ovrun), +}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{ + /* + * Message is an IDENTIFY, but lun is unknown. + * Signal problem to C code for logging the event. + * Send a M_ABORT to clear all pending tasks. + */ + SCR_INT, + SIR_RESEL_BAD_LUN, + SCR_JUMP, + PADDR_B (abort_resel), +}/*-------------------------< BAD_I_T_L >------------------------*/,{ + /* + * We donnot have a task for that I_T_L. + * Signal problem to C code for logging the event. + * Send a M_ABORT message. + */ + SCR_INT, + SIR_RESEL_BAD_I_T_L, + SCR_JUMP, + PADDR_B (abort_resel), +}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{ + /* + * We donnot have a task that matches the tag. + * Signal problem to C code for logging the event. + * Send a M_ABORTTAG message. + */ + SCR_INT, + SIR_RESEL_BAD_I_T_L_Q, + SCR_JUMP, + PADDR_B (abort_resel), +}/*-------------------------< BAD_STATUS >-----------------------*/,{ + /* + * Anything different from INTERMEDIATE + * CONDITION MET should be a bad SCSI status, + * given that GOOD status has already been tested. + * Call the C code. + */ + SCR_LOAD_ABS (scratcha, 4), + PADDR_B (startpos), + SCR_INT ^ IFFALSE (DATA (S_COND_MET)), + SIR_BAD_SCSI_STATUS, + SCR_RETURN, + 0, +}/*-------------------------< PM_HANDLE >------------------------*/,{ + /* + * Phase mismatch handling. + * + * Since we have to deal with 2 SCSI data pointers + * (current and saved), we need at least 2 contexts. + * Each context (pm0 and pm1) has a saved area, a + * SAVE mini-script and a DATA phase mini-script. + */ + /* + * Get the PM handling flags. + */ + SCR_FROM_REG (HF_REG), + 0, + /* + * If no flags (1rst PM for example), avoid + * all the below heavy flags testing. + * This makes the normal case a bit faster. + */ + SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))), + PADDR_B (pm_handle1), + /* + * If we received a SAVE DP, switch to the + * other PM context since the savep may point + * to the current PM context. + */ + SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)), + 8, + SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM), + 0, + /* + * If we have been interrupt in a PM DATA mini-script, + * we take the return address from the corresponding + * saved area. + * This ensure the return address always points to the + * main DATA script for this transfer. + */ + SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))), + PADDR_B (pm_handle1), + SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)), + 16, + SCR_LOAD_REL (ia, 4), + offsetof(struct sym_ccb, phys.pm0.ret), + SCR_JUMP, + PADDR_B (pm_save), + SCR_LOAD_REL (ia, 4), + offsetof(struct sym_ccb, phys.pm1.ret), + SCR_JUMP, + PADDR_B (pm_save), +}/*-------------------------< PM_HANDLE1 >-----------------------*/,{ + /* + * Normal case. + * Update the return address so that it + * will point after the interrupted MOVE. + */ + SCR_REG_REG (ia, SCR_ADD, 8), + 0, + SCR_REG_REG (ia1, SCR_ADDC, 0), + 0, +}/*-------------------------< PM_SAVE >--------------------------*/,{ + /* + * Clear all the flags that told us if we were + * interrupted in a PM DATA mini-script and/or + * we received a SAVE DP. + */ + SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))), + 0, + /* + * Choose the current PM context. + */ + SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)), + PADDR_B (pm1_save), +}/*-------------------------< PM0_SAVE >-------------------------*/,{ + SCR_STORE_REL (ia, 4), + offsetof(struct sym_ccb, phys.pm0.ret), + /* + * If WSR bit is set, either UA and RBC may + * have to be changed whether the device wants + * to ignore this residue or not. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_CALL ^ IFTRUE (MASK (WSR, WSR)), + PADDR_B (pm_wsr_handle), + /* + * Save the remaining byte count, the updated + * address and the return address. + */ + SCR_STORE_REL (rbc, 4), + offsetof(struct sym_ccb, phys.pm0.sg.size), + SCR_STORE_REL (ua, 4), + offsetof(struct sym_ccb, phys.pm0.sg.addr), + /* + * Set the current pointer at the PM0 DATA mini-script. + */ + SCR_LOAD_ABS (ia, 4), + PADDR_B (pm0_data_addr), +}/*-------------------------< PM_SAVE_END >----------------------*/,{ + SCR_STORE_REL (ia, 4), + offsetof(struct sym_ccb, phys.head.lastp), + SCR_JUMP, + PADDR_A (dispatch), +}/*-------------------------< PM1_SAVE >-------------------------*/,{ + SCR_STORE_REL (ia, 4), + offsetof(struct sym_ccb, phys.pm1.ret), + /* + * If WSR bit is set, either UA and RBC may + * have to be changed whether the device wants + * to ignore this residue or not. + */ + SCR_FROM_REG (scntl2), + 0, + SCR_CALL ^ IFTRUE (MASK (WSR, WSR)), + PADDR_B (pm_wsr_handle), + /* + * Save the remaining byte count, the updated + * address and the return address. + */ + SCR_STORE_REL (rbc, 4), + offsetof(struct sym_ccb, phys.pm1.sg.size), + SCR_STORE_REL (ua, 4), + offsetof(struct sym_ccb, phys.pm1.sg.addr), + /* + * Set the current pointer at the PM1 DATA mini-script. + */ + SCR_LOAD_ABS (ia, 4), + PADDR_B (pm1_data_addr), + SCR_JUMP, + PADDR_B (pm_save_end), +}/*-------------------------< PM_WSR_HANDLE >--------------------*/,{ + /* + * Phase mismatch handling from SCRIPT with WSR set. + * Such a condition can occur if the chip wants to + * execute a CHMOV(size > 1) when the WSR bit is + * set and the target changes PHASE. + * + * We must move the residual byte to memory. + * + * UA contains bit 0..31 of the address to + * move the residual byte. + * Move it to the table indirect. + */ + SCR_STORE_REL (ua, 4), + offsetof (struct sym_ccb, phys.wresid.addr), + /* + * Increment UA (move address to next position). + */ + SCR_REG_REG (ua, SCR_ADD, 1), + 0, + SCR_REG_REG (ua1, SCR_ADDC, 0), + 0, + SCR_REG_REG (ua2, SCR_ADDC, 0), + 0, + SCR_REG_REG (ua3, SCR_ADDC, 0), + 0, + /* + * Compute SCRATCHA as: + * - size to transfer = 1 byte. + * - bit 24..31 = high address bit [32...39]. + */ + SCR_LOAD_ABS (scratcha, 4), + PADDR_B (zero), + SCR_REG_REG (scratcha, SCR_OR, 1), + 0, + SCR_FROM_REG (rbc3), + 0, + SCR_TO_REG (scratcha3), + 0, + /* + * Move this value to the table indirect. + */ + SCR_STORE_REL (scratcha, 4), + offsetof (struct sym_ccb, phys.wresid.size), + /* + * Wait for a valid phase. + * While testing with bogus QUANTUM drives, the C1010 + * sometimes raised a spurious phase mismatch with + * WSR and the CHMOV(1) triggered another PM. + * Waiting explicitly for the PHASE seemed to avoid + * the nested phase mismatch. Btw, this didn't happen + * using my IBM drives. + */ + SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)), + 0, + /* + * Perform the move of the residual byte. + */ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_ccb, phys.wresid), + /* + * We can now handle the phase mismatch with UA fixed. + * RBC[0..23]=0 is a special case that does not require + * a PM context. The C code also checks against this. + */ + SCR_FROM_REG (rbc), + 0, + SCR_RETURN ^ IFFALSE (DATA (0)), + 0, + SCR_FROM_REG (rbc1), + 0, + SCR_RETURN ^ IFFALSE (DATA (0)), + 0, + SCR_FROM_REG (rbc2), + 0, + SCR_RETURN ^ IFFALSE (DATA (0)), + 0, + /* + * RBC[0..23]=0. + * Not only we donnot need a PM context, but this would + * lead to a bogus CHMOV(0). This condition means that + * the residual was the last byte to move from this CHMOV. + * So, we just have to move the current data script pointer + * (i.e. TEMP) to the SCRIPTS address following the + * interrupted CHMOV and jump to dispatcher. + * IA contains the data pointer to save. + */ + SCR_JUMP, + PADDR_B (pm_save_end), +}/*-------------------------< WSR_MA_HELPER >--------------------*/,{ + /* + * Helper for the C code when WSR bit is set. + * Perform the move of the residual byte. + */ + SCR_CHMOV_TBL ^ SCR_DATA_IN, + offsetof (struct sym_ccb, phys.wresid), + SCR_JUMP, + PADDR_A (dispatch), + +}/*-------------------------< ZERO >-----------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< SCRATCH >--------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< PM0_DATA_ADDR >--------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< PM1_DATA_ADDR >--------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< DONE_POS >-------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< STARTPOS >-------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------< TARGTBL >--------------------------*/,{ + SCR_DATA_ZERO, +}/*-------------------------<>-----------------------------------*/ +}; + +static struct SYM_FWZ_SCR SYM_FWZ_SCR = { + /*-------------------------< SNOOPTEST >------------------------*/{ + /* + * Read the variable from memory. + */ + SCR_LOAD_REL (scratcha, 4), + offsetof(struct sym_hcb, scratch), + /* + * Write the variable to memory. + */ + SCR_STORE_REL (temp, 4), + offsetof(struct sym_hcb, scratch), + /* + * Read back the variable from memory. + */ + SCR_LOAD_REL (temp, 4), + offsetof(struct sym_hcb, scratch), +}/*-------------------------< SNOOPEND >-------------------------*/,{ + /* + * And stop. + */ + SCR_INT, + 99, +}/*-------------------------<>-----------------------------------*/ +}; diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c new file mode 100644 index 000000000..17491ba10 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -0,0 +1,2057 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * Copyright (c) 2003-2005 Matthew Wilcox + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sym_glue.h" +#include "sym_nvram.h" + +#define NAME53C "sym53c" +#define NAME53C8XX "sym53c8xx" + +struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; +unsigned int sym_debug_flags = 0; + +static char *excl_string; +static char *safe_string; +module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); +module_param_named(burst, sym_driver_setup.burst_order, byte, 0); +module_param_named(led, sym_driver_setup.scsi_led, byte, 0); +module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); +module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0); +module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0); +module_param_named(hostid, sym_driver_setup.host_id, byte, 0); +module_param_named(verb, sym_driver_setup.verbose, byte, 0); +module_param_named(debug, sym_debug_flags, uint, 0); +module_param_named(settle, sym_driver_setup.settle_delay, byte, 0); +module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0); +module_param_named(excl, excl_string, charp, 0); +module_param_named(safe, safe_string, charp, 0); + +MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); +MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); +MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); +MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); +MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole"); +MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error"); +MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters"); +MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive"); +MODULE_PARM_DESC(debug, "Set bits to enable debugging"); +MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3"); +MODULE_PARM_DESC(nvram, "Option currently not used"); +MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached"); +MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\""); + +MODULE_LICENSE("GPL"); +MODULE_VERSION(SYM_VERSION); +MODULE_AUTHOR("Matthew Wilcox "); +MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters"); + +static void sym2_setup_params(void) +{ + char *p = excl_string; + int xi = 0; + + while (p && (xi < 8)) { + char *next_p; + int val = (int) simple_strtoul(p, &next_p, 0); + sym_driver_setup.excludes[xi++] = val; + p = next_p; + } + + if (safe_string) { + if (*safe_string == 'y') { + sym_driver_setup.max_tag = 0; + sym_driver_setup.burst_order = 0; + sym_driver_setup.scsi_led = 0; + sym_driver_setup.scsi_diff = 1; + sym_driver_setup.irq_mode = 0; + sym_driver_setup.scsi_bus_check = 2; + sym_driver_setup.host_id = 7; + sym_driver_setup.verbose = 2; + sym_driver_setup.settle_delay = 10; + sym_driver_setup.use_nvram = 1; + } else if (*safe_string != 'n') { + printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s" + " passed to safe option", safe_string); + } + } +} + +static struct scsi_transport_template *sym2_transport_template = NULL; + +/* + * Driver private area in the SCSI command structure. + */ +struct sym_ucmd { /* Override the SCSI pointer structure */ + struct completion *eh_done; /* SCSI error handling */ +}; + +#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)scsi_cmd_priv(cmd)) +#define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) + +/* + * Complete a pending CAM CCB. + */ +void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) +{ + struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); + + if (ucmd->eh_done) + complete(ucmd->eh_done); + + scsi_dma_unmap(cmd); + scsi_done(cmd); +} + +/* + * Tell the SCSI layer about a BUS RESET. + */ +void sym_xpt_async_bus_reset(struct sym_hcb *np) +{ + printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np)); + np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; + np->s.settle_time_valid = 1; + if (sym_verbose >= 2) + printf_info("%s: command processing suspended for %d seconds\n", + sym_name(np), sym_driver_setup.settle_delay); +} + +/* + * Choose the more appropriate CAM status if + * the IO encountered an extended error. + */ +static int sym_xerr_cam_status(int cam_status, int x_status) +{ + if (x_status) { + if (x_status & XE_PARITY_ERR) + cam_status = DID_PARITY; + else + cam_status = DID_ERROR; + } + return cam_status; +} + +/* + * Build CAM result for a failed or auto-sensed IO. + */ +void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) +{ + struct scsi_cmnd *cmd = cp->cmd; + u_int cam_status, scsi_status; + + cam_status = DID_OK; + scsi_status = cp->ssss_status; + + if (cp->host_flags & HF_SENSE) { + scsi_status = cp->sv_scsi_status; + resid = cp->sv_resid; + if (sym_verbose && cp->sv_xerr_status) + sym_print_xerr(cmd, cp->sv_xerr_status); + if (cp->host_status == HS_COMPLETE && + cp->ssss_status == S_GOOD && + cp->xerr_status == 0) { + cam_status = sym_xerr_cam_status(DID_OK, + cp->sv_xerr_status); + /* + * Bounce back the sense data to user. + */ + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + memcpy(cmd->sense_buffer, cp->sns_bbuf, + min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN)); +#if 0 + /* + * If the device reports a UNIT ATTENTION condition + * due to a RESET condition, we should consider all + * disconnect CCBs for this unit as aborted. + */ + if (1) { + u_char *p; + p = (u_char *) cmd->sense_data; + if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) + sym_clear_tasks(np, DID_ABORT, + cp->target,cp->lun, -1); + } +#endif + } else { + /* + * Error return from our internal request sense. This + * is bad: we must clear the contingent allegiance + * condition otherwise the device will always return + * BUSY. Use a big stick. + */ + sym_reset_scsi_target(np, cmd->device->id); + cam_status = DID_ERROR; + } + } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ + cam_status = DID_OK; + else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ + cam_status = DID_NO_CONNECT; + else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ + cam_status = DID_ERROR; + else { /* Extended error */ + if (sym_verbose) { + sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n", + cp->host_status, cp->ssss_status, + cp->xerr_status); + } + /* + * Set the most appropriate value for CAM status. + */ + cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); + } + scsi_set_resid(cmd, resid); + cmd->result = (cam_status << 16) | scsi_status; +} + +static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) +{ + int segment; + int use_sg; + + cp->data_len = 0; + + use_sg = scsi_dma_map(cmd); + if (use_sg > 0) { + struct scatterlist *sg; + struct sym_tcb *tp = &np->target[cp->target]; + struct sym_tblmove *data; + + if (use_sg > SYM_CONF_MAX_SG) { + scsi_dma_unmap(cmd); + return -1; + } + + data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; + + scsi_for_each_sg(cmd, sg, use_sg, segment) { + dma_addr_t baddr = sg_dma_address(sg); + unsigned int len = sg_dma_len(sg); + + if ((len & 1) && (tp->head.wval & EWS)) { + len++; + cp->odd_byte_adjustment++; + } + + sym_build_sge(np, &data[segment], baddr, len); + cp->data_len += len; + } + } else { + segment = -2; + } + + return segment; +} + +/* + * Queue a SCSI command. + */ +static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct sym_tcb *tp; + struct sym_lcb *lp; + struct sym_ccb *cp; + int order; + + /* + * Retrieve the target descriptor. + */ + tp = &np->target[sdev->id]; + + /* + * Select tagged/untagged. + */ + lp = sym_lp(tp, sdev->lun); + order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; + + /* + * Queue the SCSI IO. + */ + cp = sym_get_ccb(np, cmd, order); + if (!cp) + return 1; /* Means resource shortage */ + sym_queue_scsiio(np, cmd, cp); + return 0; +} + +/* + * Setup buffers and pointers that address the CDB. + */ +static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) +{ + memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); + + cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); + cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); + + return 0; +} + +/* + * Setup pointers that address the data and start the I/O. + */ +int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) +{ + u32 lastp, goalp; + int dir; + + /* + * Build the CDB. + */ + if (sym_setup_cdb(np, cmd, cp)) + goto out_abort; + + /* + * No direction means no data. + */ + dir = cmd->sc_data_direction; + if (dir != DMA_NONE) { + cp->segments = sym_scatter(np, cp, cmd); + if (cp->segments < 0) { + sym_set_cam_status(cmd, DID_ERROR); + goto out_abort; + } + + /* + * No segments means no data. + */ + if (!cp->segments) + dir = DMA_NONE; + } else { + cp->data_len = 0; + cp->segments = 0; + } + + /* + * Set the data pointer. + */ + switch (dir) { + case DMA_BIDIRECTIONAL: + scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command"); + sym_set_cam_status(cmd, DID_ERROR); + goto out_abort; + case DMA_TO_DEVICE: + goalp = SCRIPTA_BA(np, data_out2) + 8; + lastp = goalp - 8 - (cp->segments * (2*4)); + break; + case DMA_FROM_DEVICE: + cp->host_flags |= HF_DATA_IN; + goalp = SCRIPTA_BA(np, data_in2) + 8; + lastp = goalp - 8 - (cp->segments * (2*4)); + break; + case DMA_NONE: + default: + lastp = goalp = SCRIPTB_BA(np, no_data); + break; + } + + /* + * Set all pointers values needed by SCRIPTS. + */ + cp->phys.head.lastp = cpu_to_scr(lastp); + cp->phys.head.savep = cpu_to_scr(lastp); + cp->startp = cp->phys.head.savep; + cp->goalp = cpu_to_scr(goalp); + + /* + * When `#ifed 1', the code below makes the driver + * panic on the first attempt to write to a SCSI device. + * It is the first test we want to do after a driver + * change that does not seem obviously safe. :) + */ +#if 0 + switch (cp->cdb_buf[0]) { + case 0x0A: case 0x2A: case 0xAA: + panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); + break; + default: + break; + } +#endif + + /* + * activate this job. + */ + sym_put_start_queue(np, cp); + return 0; + +out_abort: + sym_free_ccb(np, cp); + sym_xpt_done(np, cmd); + return 0; +} + + +/* + * timer daemon. + * + * Misused to keep the driver running when + * interrupts are not configured correctly. + */ +static void sym_timer(struct sym_hcb *np) +{ + unsigned long thistime = jiffies; + + /* + * Restart the timer. + */ + np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; + add_timer(&np->s.timer); + + /* + * If we are resetting the ncr, wait for settle_time before + * clearing it. Then command processing will be resumed. + */ + if (np->s.settle_time_valid) { + if (time_before_eq(np->s.settle_time, thistime)) { + if (sym_verbose >= 2 ) + printk("%s: command processing resumed\n", + sym_name(np)); + np->s.settle_time_valid = 0; + } + return; + } + + /* + * Nothing to do for now, but that may come. + */ + if (np->s.lasttime + 4*HZ < thistime) { + np->s.lasttime = thistime; + } + +#ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS + /* + * Some way-broken PCI bridges may lead to + * completions being lost when the clearing + * of the INTFLY flag by the CPU occurs + * concurrently with the chip raising this flag. + * If this ever happen, lost completions will + * be reaped here. + */ + sym_wakeup_done(np); +#endif +} + + +/* + * PCI BUS error handler. + */ +void sym_log_bus_error(struct Scsi_Host *shost) +{ + struct sym_data *sym_data = shost_priv(shost); + struct pci_dev *pdev = sym_data->pdev; + unsigned short pci_sts; + pci_read_config_word(pdev, PCI_STATUS, &pci_sts); + if (pci_sts & 0xf900) { + pci_write_config_word(pdev, PCI_STATUS, pci_sts); + shost_printk(KERN_WARNING, shost, + "PCI bus error: status = 0x%04x\n", pci_sts & 0xf900); + } +} + +/* + * queuecommand method. Entered with the host adapter lock held and + * interrupts disabled. + */ +static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd) +{ + struct sym_hcb *np = SYM_SOFTC_PTR(cmd); + struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); + int sts = 0; + + memset(ucp, 0, sizeof(*ucp)); + + /* + * Shorten our settle_time if needed for + * this command not to time out. + */ + if (np->s.settle_time_valid && scsi_cmd_to_rq(cmd)->timeout) { + unsigned long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout; + tlimit -= SYM_CONF_TIMER_INTERVAL*2; + if (time_after(np->s.settle_time, tlimit)) { + np->s.settle_time = tlimit; + } + } + + if (np->s.settle_time_valid) + return SCSI_MLQUEUE_HOST_BUSY; + + sts = sym_queue_command(np, cmd); + if (sts) + return SCSI_MLQUEUE_HOST_BUSY; + return 0; +} + +static DEF_SCSI_QCMD(sym53c8xx_queue_command) + +/* + * Linux entry point of the interrupt handler. + */ +static irqreturn_t sym53c8xx_intr(int irq, void *dev_id) +{ + struct Scsi_Host *shost = dev_id; + struct sym_data *sym_data = shost_priv(shost); + irqreturn_t result; + + /* Avoid spinloop trying to handle interrupts on frozen device */ + if (pci_channel_offline(sym_data->pdev)) + return IRQ_NONE; + + if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); + + spin_lock(shost->host_lock); + result = sym_interrupt(shost); + spin_unlock(shost->host_lock); + + if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); + + return result; +} + +/* + * Linux entry point of the timer handler + */ +static void sym53c8xx_timer(struct timer_list *t) +{ + struct sym_hcb *np = from_timer(np, t, s.timer); + unsigned long flags; + + spin_lock_irqsave(np->s.host->host_lock, flags); + sym_timer(np); + spin_unlock_irqrestore(np->s.host->host_lock, flags); +} + + +/* + * What the eh thread wants us to perform. + */ +#define SYM_EH_ABORT 0 +#define SYM_EH_DEVICE_RESET 1 +#define SYM_EH_BUS_RESET 2 +#define SYM_EH_HOST_RESET 3 + +/* + * Generic method for our eh processing. + * The 'op' argument tells what we have to do. + */ +static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) +{ + struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); + struct Scsi_Host *shost = cmd->device->host; + struct sym_data *sym_data = shost_priv(shost); + struct pci_dev *pdev = sym_data->pdev; + struct sym_hcb *np = sym_data->ncb; + SYM_QUEHEAD *qp; + int cmd_queued = 0; + int sts = -1; + struct completion eh_done; + + scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname); + + /* We may be in an error condition because the PCI bus + * went down. In this case, we need to wait until the + * PCI bus is reset, the card is reset, and only then + * proceed with the scsi error recovery. There's no + * point in hurrying; take a leisurely wait. + */ +#define WAIT_FOR_PCI_RECOVERY 35 + if (pci_channel_offline(pdev)) { + int finished_reset = 0; + init_completion(&eh_done); + spin_lock_irq(shost->host_lock); + /* Make sure we didn't race */ + if (pci_channel_offline(pdev)) { + BUG_ON(sym_data->io_reset); + sym_data->io_reset = &eh_done; + } else { + finished_reset = 1; + } + spin_unlock_irq(shost->host_lock); + if (!finished_reset) + finished_reset = wait_for_completion_timeout + (sym_data->io_reset, + WAIT_FOR_PCI_RECOVERY*HZ); + spin_lock_irq(shost->host_lock); + sym_data->io_reset = NULL; + spin_unlock_irq(shost->host_lock); + if (!finished_reset) + return SCSI_FAILED; + } + + spin_lock_irq(shost->host_lock); + /* This one is queued in some place -> to wait for completion */ + FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { + struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + if (cp->cmd == cmd) { + cmd_queued = 1; + break; + } + } + + /* Try to proceed the operation we have been asked for */ + sts = -1; + switch(op) { + case SYM_EH_ABORT: + sts = sym_abort_scsiio(np, cmd, 1); + break; + case SYM_EH_DEVICE_RESET: + sts = sym_reset_scsi_target(np, cmd->device->id); + break; + case SYM_EH_BUS_RESET: + sym_reset_scsi_bus(np, 1); + sts = 0; + break; + case SYM_EH_HOST_RESET: + sym_reset_scsi_bus(np, 0); + sym_start_up(shost, 1); + sts = 0; + break; + default: + break; + } + + /* On error, restore everything and cross fingers :) */ + if (sts) + cmd_queued = 0; + + if (cmd_queued) { + init_completion(&eh_done); + ucmd->eh_done = &eh_done; + spin_unlock_irq(shost->host_lock); + if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { + ucmd->eh_done = NULL; + sts = -2; + } + } else { + spin_unlock_irq(shost->host_lock); + } + + dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, + sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); + return sts ? SCSI_FAILED : SCSI_SUCCESS; +} + + +/* + * Error handlers called from the eh thread (one thread per HBA). + */ +static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd) +{ + return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd); +} + +static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd); +} + +static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd) +{ + return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd); +} + +static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); +} + +/* + * Tune device queuing depth, according to various limits. + */ +static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) +{ + struct sym_lcb *lp = sym_lp(tp, lun); + u_short oldtags; + + if (!lp) + return; + + oldtags = lp->s.reqtags; + + if (reqtags > lp->s.scdev_depth) + reqtags = lp->s.scdev_depth; + + lp->s.reqtags = reqtags; + + if (reqtags != oldtags) { + dev_info(&tp->starget->dev, + "tagged command queuing %s, command queue depth %d.\n", + lp->s.reqtags ? "enabled" : "disabled", reqtags); + } +} + +static int sym53c8xx_slave_alloc(struct scsi_device *sdev) +{ + struct sym_hcb *np = sym_get_hcb(sdev->host); + struct sym_tcb *tp = &np->target[sdev->id]; + struct sym_lcb *lp; + unsigned long flags; + int error; + + if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) + return -ENXIO; + + spin_lock_irqsave(np->s.host->host_lock, flags); + + /* + * Fail the device init if the device is flagged NOSCAN at BOOT in + * the NVRAM. This may speed up boot and maintain coherency with + * BIOS device numbering. Clearing the flag allows the user to + * rescan skipped devices later. We also return an error for + * devices not flagged for SCAN LUNS in the NVRAM since some single + * lun devices behave badly when asked for a non zero LUN. + */ + + if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { + tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; + starget_printk(KERN_INFO, sdev->sdev_target, + "Scan at boot disabled in NVRAM\n"); + error = -ENXIO; + goto out; + } + + if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { + if (sdev->lun != 0) { + error = -ENXIO; + goto out; + } + starget_printk(KERN_INFO, sdev->sdev_target, + "Multiple LUNs disabled in NVRAM\n"); + } + + lp = sym_alloc_lcb(np, sdev->id, sdev->lun); + if (!lp) { + error = -ENOMEM; + goto out; + } + if (tp->nlcb == 1) + tp->starget = sdev->sdev_target; + + spi_min_period(tp->starget) = tp->usr_period; + spi_max_width(tp->starget) = tp->usr_width; + + error = 0; +out: + spin_unlock_irqrestore(np->s.host->host_lock, flags); + + return error; +} + +/* + * Linux entry point for device queue sizing. + */ +static int sym53c8xx_slave_configure(struct scsi_device *sdev) +{ + struct sym_hcb *np = sym_get_hcb(sdev->host); + struct sym_tcb *tp = &np->target[sdev->id]; + struct sym_lcb *lp = sym_lp(tp, sdev->lun); + int reqtags, depth_to_use; + + /* + * Get user flags. + */ + lp->curr_flags = lp->user_flags; + + /* + * Select queue depth from driver setup. + * Do not use more than configured by user. + * Use at least 1. + * Do not use more than our maximum. + */ + reqtags = sym_driver_setup.max_tag; + if (reqtags > tp->usrtags) + reqtags = tp->usrtags; + if (!sdev->tagged_supported) + reqtags = 0; + if (reqtags > SYM_CONF_MAX_TAG) + reqtags = SYM_CONF_MAX_TAG; + depth_to_use = reqtags ? reqtags : 1; + scsi_change_queue_depth(sdev, depth_to_use); + lp->s.scdev_depth = depth_to_use; + sym_tune_dev_queuing(tp, sdev->lun, reqtags); + + if (!spi_initial_dv(sdev->sdev_target)) + spi_dv_device(sdev); + + return 0; +} + +static void sym53c8xx_slave_destroy(struct scsi_device *sdev) +{ + struct sym_hcb *np = sym_get_hcb(sdev->host); + struct sym_tcb *tp = &np->target[sdev->id]; + struct sym_lcb *lp = sym_lp(tp, sdev->lun); + unsigned long flags; + + /* if slave_alloc returned before allocating a sym_lcb, return */ + if (!lp) + return; + + spin_lock_irqsave(np->s.host->host_lock, flags); + + if (lp->busy_itlq || lp->busy_itl) { + /* + * This really shouldn't happen, but we can't return an error + * so let's try to stop all on-going I/O. + */ + starget_printk(KERN_WARNING, tp->starget, + "Removing busy LCB (%d)\n", (u8)sdev->lun); + sym_reset_scsi_bus(np, 1); + } + + if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) { + /* + * It was the last unit for this target. + */ + tp->head.sval = 0; + tp->head.wval = np->rv_scntl3; + tp->head.uval = 0; + tp->tgoal.check_nego = 1; + tp->starget = NULL; + } + + spin_unlock_irqrestore(np->s.host->host_lock, flags); +} + +/* + * Linux entry point for info() function + */ +static const char *sym53c8xx_info (struct Scsi_Host *host) +{ + return SYM_DRIVER_NAME; +} + + +#ifdef SYM_LINUX_PROC_INFO_SUPPORT +/* + * Proc file system stuff + * + * A read operation returns adapter information. + * A write operation is a control command. + * The string is parsed in the driver code and the command is passed + * to the sym_usercmd() function. + */ + +#ifdef SYM_LINUX_USER_COMMAND_SUPPORT + +struct sym_usrcmd { + u_long target; + u_long lun; + u_long data; + u_long cmd; +}; + +#define UC_SETSYNC 10 +#define UC_SETTAGS 11 +#define UC_SETDEBUG 12 +#define UC_SETWIDE 14 +#define UC_SETFLAG 15 +#define UC_SETVERBOSE 17 +#define UC_RESETDEV 18 +#define UC_CLEARDEV 19 + +static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) +{ + struct sym_tcb *tp; + int t, l; + + switch (uc->cmd) { + case 0: return; + +#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT + case UC_SETDEBUG: + sym_debug_flags = uc->data; + break; +#endif + case UC_SETVERBOSE: + np->verbose = uc->data; + break; + default: + /* + * We assume that other commands apply to targets. + * This should always be the case and avoid the below + * 4 lines to be repeated 6 times. + */ + for (t = 0; t < SYM_CONF_MAX_TARGET; t++) { + if (!((uc->target >> t) & 1)) + continue; + tp = &np->target[t]; + if (!tp->nlcb) + continue; + + switch (uc->cmd) { + + case UC_SETSYNC: + if (!uc->data || uc->data >= 255) { + tp->tgoal.iu = tp->tgoal.dt = + tp->tgoal.qas = 0; + tp->tgoal.offset = 0; + } else if (uc->data <= 9 && np->minsync_dt) { + if (uc->data < np->minsync_dt) + uc->data = np->minsync_dt; + tp->tgoal.iu = tp->tgoal.dt = + tp->tgoal.qas = 1; + tp->tgoal.width = 1; + tp->tgoal.period = uc->data; + tp->tgoal.offset = np->maxoffs_dt; + } else { + if (uc->data < np->minsync) + uc->data = np->minsync; + tp->tgoal.iu = tp->tgoal.dt = + tp->tgoal.qas = 0; + tp->tgoal.period = uc->data; + tp->tgoal.offset = np->maxoffs; + } + tp->tgoal.check_nego = 1; + break; + case UC_SETWIDE: + tp->tgoal.width = uc->data ? 1 : 0; + tp->tgoal.check_nego = 1; + break; + case UC_SETTAGS: + for (l = 0; l < SYM_CONF_MAX_LUN; l++) + sym_tune_dev_queuing(tp, l, uc->data); + break; + case UC_RESETDEV: + tp->to_reset = 1; + np->istat_sem = SEM; + OUTB(np, nc_istat, SIGP|SEM); + break; + case UC_CLEARDEV: + for (l = 0; l < SYM_CONF_MAX_LUN; l++) { + struct sym_lcb *lp = sym_lp(tp, l); + if (lp) lp->to_clear = 1; + } + np->istat_sem = SEM; + OUTB(np, nc_istat, SIGP|SEM); + break; + case UC_SETFLAG: + tp->usrflags = uc->data; + break; + } + } + break; + } +} + +static int sym_skip_spaces(char *ptr, int len) +{ + int cnt, c; + + for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); + + return (len - cnt); +} + +static int get_int_arg(char *ptr, int len, u_long *pv) +{ + char *end; + + *pv = simple_strtoul(ptr, &end, 10); + return (end - ptr); +} + +static int is_keyword(char *ptr, int len, char *verb) +{ + int verb_len = strlen(verb); + + if (len >= verb_len && !memcmp(verb, ptr, verb_len)) + return verb_len; + else + return 0; +} + +#define SKIP_SPACES(ptr, len) \ + if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \ + return -EINVAL; \ + ptr += arg_len; len -= arg_len; + +#define GET_INT_ARG(ptr, len, v) \ + if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ + return -EINVAL; \ + ptr += arg_len; len -= arg_len; + + +/* + * Parse a control command + */ + +static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length) +{ + struct sym_hcb *np = sym_get_hcb(shost); + char *ptr = buffer; + int len = length; + struct sym_usrcmd cmd, *uc = &cmd; + int arg_len; + u_long target; + + memset(uc, 0, sizeof(*uc)); + + if (len > 0 && ptr[len-1] == '\n') + --len; + + if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) + uc->cmd = UC_SETSYNC; + else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) + uc->cmd = UC_SETTAGS; + else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) + uc->cmd = UC_SETVERBOSE; + else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) + uc->cmd = UC_SETWIDE; +#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT + else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) + uc->cmd = UC_SETDEBUG; +#endif + else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) + uc->cmd = UC_SETFLAG; + else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) + uc->cmd = UC_RESETDEV; + else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) + uc->cmd = UC_CLEARDEV; + else + arg_len = 0; + +#ifdef DEBUG_PROC_INFO +printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); +#endif + + if (!arg_len) + return -EINVAL; + ptr += arg_len; len -= arg_len; + + switch(uc->cmd) { + case UC_SETSYNC: + case UC_SETTAGS: + case UC_SETWIDE: + case UC_SETFLAG: + case UC_RESETDEV: + case UC_CLEARDEV: + SKIP_SPACES(ptr, len); + if ((arg_len = is_keyword(ptr, len, "all")) != 0) { + ptr += arg_len; len -= arg_len; + uc->target = ~0; + } else { + GET_INT_ARG(ptr, len, target); + uc->target = (1<cmd) { + case UC_SETVERBOSE: + case UC_SETSYNC: + case UC_SETTAGS: + case UC_SETWIDE: + SKIP_SPACES(ptr, len); + GET_INT_ARG(ptr, len, uc->data); +#ifdef DEBUG_PROC_INFO +printk("sym_user_command: data=%ld\n", uc->data); +#endif + break; +#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT + case UC_SETDEBUG: + while (len > 0) { + SKIP_SPACES(ptr, len); + if ((arg_len = is_keyword(ptr, len, "alloc"))) + uc->data |= DEBUG_ALLOC; + else if ((arg_len = is_keyword(ptr, len, "phase"))) + uc->data |= DEBUG_PHASE; + else if ((arg_len = is_keyword(ptr, len, "queue"))) + uc->data |= DEBUG_QUEUE; + else if ((arg_len = is_keyword(ptr, len, "result"))) + uc->data |= DEBUG_RESULT; + else if ((arg_len = is_keyword(ptr, len, "scatter"))) + uc->data |= DEBUG_SCATTER; + else if ((arg_len = is_keyword(ptr, len, "script"))) + uc->data |= DEBUG_SCRIPT; + else if ((arg_len = is_keyword(ptr, len, "tiny"))) + uc->data |= DEBUG_TINY; + else if ((arg_len = is_keyword(ptr, len, "timing"))) + uc->data |= DEBUG_TIMING; + else if ((arg_len = is_keyword(ptr, len, "nego"))) + uc->data |= DEBUG_NEGO; + else if ((arg_len = is_keyword(ptr, len, "tags"))) + uc->data |= DEBUG_TAGS; + else if ((arg_len = is_keyword(ptr, len, "pointer"))) + uc->data |= DEBUG_POINTER; + else + return -EINVAL; + ptr += arg_len; len -= arg_len; + } +#ifdef DEBUG_PROC_INFO +printk("sym_user_command: data=%ld\n", uc->data); +#endif + break; +#endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */ + case UC_SETFLAG: + while (len > 0) { + SKIP_SPACES(ptr, len); + if ((arg_len = is_keyword(ptr, len, "no_disc"))) + uc->data &= ~SYM_DISC_ENABLED; + else + return -EINVAL; + ptr += arg_len; len -= arg_len; + } + break; + default: + break; + } + + if (len) + return -EINVAL; + else { + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + sym_exec_user_command(np, uc); + spin_unlock_irqrestore(shost->host_lock, flags); + } + return length; +} + +#endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ + + +/* + * Copy formatted information into the input buffer. + */ +static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost) +{ +#ifdef SYM_LINUX_USER_INFO_SUPPORT + struct sym_data *sym_data = shost_priv(shost); + struct pci_dev *pdev = sym_data->pdev; + struct sym_hcb *np = sym_data->ncb; + + seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, " + "revision id 0x%x\n", np->s.chip_name, + pdev->device, pdev->revision); + seq_printf(m, "At PCI address %s, IRQ %u\n", + pci_name(pdev), pdev->irq); + seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n", + (int) (np->minsync_dt ? np->minsync_dt : np->minsync), + np->maxwide ? "Wide" : "Narrow", + np->minsync_dt ? ", DT capable" : ""); + + seq_printf(m, "Max. started commands %d, " + "max. commands per LUN %d\n", + SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); + + return 0; +#else + return -EINVAL; +#endif /* SYM_LINUX_USER_INFO_SUPPORT */ +} + +#endif /* SYM_LINUX_PROC_INFO_SUPPORT */ + +/* + * Free resources claimed by sym_iomap_device(). Note that + * sym_free_resources() should be used instead of this function after calling + * sym_attach(). + */ +static void sym_iounmap_device(struct sym_device *device) +{ + if (device->s.ioaddr) + pci_iounmap(device->pdev, device->s.ioaddr); + if (device->s.ramaddr) + pci_iounmap(device->pdev, device->s.ramaddr); +} + +/* + * Free controller resources. + */ +static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev, + int do_free_irq) +{ + /* + * Free O/S specific resources. + */ + if (do_free_irq) + free_irq(pdev->irq, np->s.host); + if (np->s.ioaddr) + pci_iounmap(pdev, np->s.ioaddr); + if (np->s.ramaddr) + pci_iounmap(pdev, np->s.ramaddr); + /* + * Free O/S independent resources. + */ + sym_hcb_free(np); + + sym_mfree_dma(np, sizeof(*np), "HCB"); +} + +/* + * Host attach and initialisations. + * + * Allocate host data and ncb structure. + * Remap MMIO region. + * Do chip initialization. + * If all is OK, install interrupt handling and + * start the timer daemon. + */ +static struct Scsi_Host *sym_attach(const struct scsi_host_template *tpnt, int unit, + struct sym_device *dev) +{ + struct sym_data *sym_data; + struct sym_hcb *np = NULL; + struct Scsi_Host *shost = NULL; + struct pci_dev *pdev = dev->pdev; + unsigned long flags; + struct sym_fw *fw; + int do_free_irq = 0; + + printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n", + unit, dev->chip.name, pdev->revision, pci_name(pdev), + pdev->irq); + + /* + * Get the firmware for this chip. + */ + fw = sym_find_firmware(&dev->chip); + if (!fw) + goto attach_failed; + + shost = scsi_host_alloc(tpnt, sizeof(*sym_data)); + if (!shost) + goto attach_failed; + sym_data = shost_priv(shost); + + /* + * Allocate immediately the host control block, + * since we are only expecting to succeed. :) + * We keep track in the HCB of all the resources that + * are to be released on error. + */ + np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); + if (!np) + goto attach_failed; + np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ + sym_data->ncb = np; + sym_data->pdev = pdev; + np->s.host = shost; + + pci_set_drvdata(pdev, shost); + + /* + * Copy some useful infos to the HCB. + */ + np->hcb_ba = vtobus(np); + np->verbose = sym_driver_setup.verbose; + np->s.unit = unit; + np->features = dev->chip.features; + np->clock_divn = dev->chip.nr_divisor; + np->maxoffs = dev->chip.offset_max; + np->maxburst = dev->chip.burst_max; + np->myaddr = dev->host_id; + np->mmio_ba = (u32)dev->mmio_base; + np->ram_ba = (u32)dev->ram_base; + np->s.ioaddr = dev->s.ioaddr; + np->s.ramaddr = dev->s.ramaddr; + + /* + * Edit its name. + */ + strscpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); + sprintf(np->s.inst_name, "sym%d", np->s.unit); + + if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) && + !dma_set_mask(&pdev->dev, DMA_DAC_MASK)) { + set_dac(np); + } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { + printf_warning("%s: No suitable DMA available\n", sym_name(np)); + goto attach_failed; + } + + if (sym_hcb_attach(shost, fw, dev->nvram)) + goto attach_failed; + + /* + * Install the interrupt handler. + * If we synchonize the C code with SCRIPTS on interrupt, + * we do not want to share the INTR line at all. + */ + if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, + shost)) { + printf_err("%s: request irq %u failure\n", + sym_name(np), pdev->irq); + goto attach_failed; + } + do_free_irq = 1; + + /* + * After SCSI devices have been opened, we cannot + * reset the bus safely, so we do it here. + */ + spin_lock_irqsave(shost->host_lock, flags); + if (sym_reset_scsi_bus(np, 0)) + goto reset_failed; + + /* + * Start the SCRIPTS. + */ + sym_start_up(shost, 1); + + /* + * Start the timer daemon + */ + timer_setup(&np->s.timer, sym53c8xx_timer, 0); + np->s.lasttime=0; + sym_timer (np); + + /* + * Fill Linux host instance structure + * and return success. + */ + shost->max_channel = 0; + shost->this_id = np->myaddr; + shost->max_id = np->maxwide ? 16 : 8; + shost->max_lun = SYM_CONF_MAX_LUN; + shost->unique_id = pci_resource_start(pdev, 0); + shost->cmd_per_lun = SYM_CONF_MAX_TAG; + shost->can_queue = (SYM_CONF_MAX_START-2); + shost->sg_tablesize = SYM_CONF_MAX_SG; + shost->max_cmd_len = 16; + BUG_ON(sym2_transport_template == NULL); + shost->transportt = sym2_transport_template; + + /* 53c896 rev 1 errata: DMA may not cross 16MB boundary */ + if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2) + shost->dma_boundary = 0xFFFFFF; + + spin_unlock_irqrestore(shost->host_lock, flags); + + return shost; + + reset_failed: + printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " + "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); + spin_unlock_irqrestore(shost->host_lock, flags); + attach_failed: + printf_info("sym%d: giving up ...\n", unit); + if (np) + sym_free_resources(np, pdev, do_free_irq); + else + sym_iounmap_device(dev); + if (shost) + scsi_host_put(shost); + + return NULL; +} + + +/* + * Detect and try to read SYMBIOS and TEKRAM NVRAM. + */ +#if SYM_CONF_NVRAM_SUPPORT +static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) +{ + devp->nvram = nvp; + nvp->type = 0; + + sym_read_nvram(devp, nvp); +} +#else +static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) +{ +} +#endif /* SYM_CONF_NVRAM_SUPPORT */ + +static int sym_check_supported(struct sym_device *device) +{ + struct sym_chip *chip; + struct pci_dev *pdev = device->pdev; + unsigned long io_port = pci_resource_start(pdev, 0); + int i; + + /* + * If user excluded this chip, do not initialize it. + * I hate this code so much. Must kill it. + */ + if (io_port) { + for (i = 0 ; i < 8 ; i++) { + if (sym_driver_setup.excludes[i] == io_port) + return -ENODEV; + } + } + + /* + * Check if the chip is supported. Then copy the chip description + * to our device structure so we can make it match the actual device + * and options. + */ + chip = sym_lookup_chip_table(pdev->device, pdev->revision); + if (!chip) { + dev_info(&pdev->dev, "device not supported\n"); + return -ENODEV; + } + memcpy(&device->chip, chip, sizeof(device->chip)); + + return 0; +} + +/* + * Ignore Symbios chips controlled by various RAID controllers. + * These controllers set value 0x52414944 at RAM end - 16. + */ +static int sym_check_raid(struct sym_device *device) +{ + unsigned int ram_size, ram_val; + + if (!device->s.ramaddr) + return 0; + + if (device->chip.features & FE_RAM8K) + ram_size = 8192; + else + ram_size = 4096; + + ram_val = readl(device->s.ramaddr + ram_size - 16); + if (ram_val != 0x52414944) + return 0; + + dev_info(&device->pdev->dev, + "not initializing, driven by RAID controller.\n"); + return -ENODEV; +} + +static int sym_set_workarounds(struct sym_device *device) +{ + struct sym_chip *chip = &device->chip; + struct pci_dev *pdev = device->pdev; + u_short status_reg; + + /* + * (ITEM 12 of a DEL about the 896 I haven't yet). + * We must ensure the chip will use WRITE AND INVALIDATE. + * The revision number limit is for now arbitrary. + */ + if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) { + chip->features |= (FE_WRIE | FE_CLSE); + } + + /* If the chip can do Memory Write Invalidate, enable it */ + if (chip->features & FE_WRIE) { + if (pci_set_mwi(pdev)) + return -ENODEV; + } + + /* + * Work around for errant bit in 895A. The 66Mhz + * capable bit is set erroneously. Clear this bit. + * (Item 1 DEL 533) + * + * Make sure Config space and Features agree. + * + * Recall: writes are not normal to status register - + * write a 1 to clear and a 0 to leave unchanged. + * Can only reset bits. + */ + pci_read_config_word(pdev, PCI_STATUS, &status_reg); + if (chip->features & FE_66MHZ) { + if (!(status_reg & PCI_STATUS_66MHZ)) + chip->features &= ~FE_66MHZ; + } else { + if (status_reg & PCI_STATUS_66MHZ) { + status_reg = PCI_STATUS_66MHZ; + pci_write_config_word(pdev, PCI_STATUS, status_reg); + pci_read_config_word(pdev, PCI_STATUS, &status_reg); + } + } + + return 0; +} + +/* + * Map HBA registers and on-chip SRAM (if present). + */ +static int sym_iomap_device(struct sym_device *device) +{ + struct pci_dev *pdev = device->pdev; + struct pci_bus_region bus_addr; + int i = 2; + + pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]); + device->mmio_base = bus_addr.start; + + if (device->chip.features & FE_RAM) { + /* + * If the BAR is 64-bit, resource 2 will be occupied by the + * upper 32 bits + */ + if (!pdev->resource[i].flags) + i++; + pcibios_resource_to_bus(pdev->bus, &bus_addr, + &pdev->resource[i]); + device->ram_base = bus_addr.start; + } + +#ifdef CONFIG_SCSI_SYM53C8XX_MMIO + if (device->mmio_base) + device->s.ioaddr = pci_iomap(pdev, 1, + pci_resource_len(pdev, 1)); +#endif + if (!device->s.ioaddr) + device->s.ioaddr = pci_iomap(pdev, 0, + pci_resource_len(pdev, 0)); + if (!device->s.ioaddr) { + dev_err(&pdev->dev, "could not map registers; giving up.\n"); + return -EIO; + } + if (device->ram_base) { + device->s.ramaddr = pci_iomap(pdev, i, + pci_resource_len(pdev, i)); + if (!device->s.ramaddr) { + dev_warn(&pdev->dev, + "could not map SRAM; continuing anyway.\n"); + device->ram_base = 0; + } + } + + return 0; +} + +/* + * The NCR PQS and PDS cards are constructed as a DEC bridge + * behind which sits a proprietary NCR memory controller and + * either four or two 53c875s as separate devices. We can tell + * if an 875 is part of a PQS/PDS or not since if it is, it will + * be on the same bus as the memory controller. In its usual + * mode of operation, the 875s are slaved to the memory + * controller for all transfers. To operate with the Linux + * driver, the memory controller is disabled and the 875s + * freed to function independently. The only wrinkle is that + * the preset SCSI ID (which may be zero) must be read in from + * a special configuration space register of the 875. + */ +static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) +{ + int slot; + u8 tmp; + + for (slot = 0; slot < 256; slot++) { + struct pci_dev *memc = pci_get_slot(pdev->bus, slot); + + if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { + pci_dev_put(memc); + continue; + } + + /* bit 1: allow individual 875 configuration */ + pci_read_config_byte(memc, 0x44, &tmp); + if ((tmp & 0x2) == 0) { + tmp |= 0x2; + pci_write_config_byte(memc, 0x44, tmp); + } + + /* bit 2: drive individual 875 interrupts to the bus */ + pci_read_config_byte(memc, 0x45, &tmp); + if ((tmp & 0x4) == 0) { + tmp |= 0x4; + pci_write_config_byte(memc, 0x45, tmp); + } + + pci_dev_put(memc); + break; + } + + pci_read_config_byte(pdev, 0x84, &tmp); + sym_dev->host_id = tmp; +} + +/* + * Called before unloading the module. + * Detach the host. + * We have to free resources and halt the NCR chip. + */ +static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev) +{ + struct sym_hcb *np = sym_get_hcb(shost); + printk("%s: detaching ...\n", sym_name(np)); + + del_timer_sync(&np->s.timer); + + /* + * Reset NCR chip. + * We should use sym_soft_reset(), but we don't want to do + * so, since we may not be safe if interrupts occur. + */ + printk("%s: resetting chip\n", sym_name(np)); + OUTB(np, nc_istat, SRST); + INB(np, nc_mbox1); + udelay(10); + OUTB(np, nc_istat, 0); + + sym_free_resources(np, pdev, 1); + scsi_host_put(shost); + + return 1; +} + +/* + * Driver host template. + */ +static const struct scsi_host_template sym2_template = { + .module = THIS_MODULE, + .name = "sym53c8xx", + .info = sym53c8xx_info, + .cmd_size = sizeof(struct sym_ucmd), + .queuecommand = sym53c8xx_queue_command, + .slave_alloc = sym53c8xx_slave_alloc, + .slave_configure = sym53c8xx_slave_configure, + .slave_destroy = sym53c8xx_slave_destroy, + .eh_abort_handler = sym53c8xx_eh_abort_handler, + .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler, + .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, + .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, + .this_id = 7, + .max_sectors = 0xFFFF, +#ifdef SYM_LINUX_PROC_INFO_SUPPORT + .show_info = sym_show_info, +#ifdef SYM_LINUX_USER_COMMAND_SUPPORT + .write_info = sym_user_command, +#endif + .proc_name = NAME53C8XX, +#endif +}; + +static int attach_count; + +static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct sym_device sym_dev; + struct sym_nvram nvram; + struct Scsi_Host *shost; + int do_iounmap = 0; + int do_disable_device = 1; + + memset(&sym_dev, 0, sizeof(sym_dev)); + memset(&nvram, 0, sizeof(nvram)); + sym_dev.pdev = pdev; + sym_dev.host_id = SYM_SETUP_HOST_ID; + + if (pci_enable_device(pdev)) + goto leave; + + pci_set_master(pdev); + + if (pci_request_regions(pdev, NAME53C8XX)) + goto disable; + + if (sym_check_supported(&sym_dev)) + goto free; + + if (sym_iomap_device(&sym_dev)) + goto free; + do_iounmap = 1; + + if (sym_check_raid(&sym_dev)) { + do_disable_device = 0; /* Don't disable the device */ + goto free; + } + + if (sym_set_workarounds(&sym_dev)) + goto free; + + sym_config_pqs(pdev, &sym_dev); + + sym_get_nvram(&sym_dev, &nvram); + + do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */ + shost = sym_attach(&sym2_template, attach_count, &sym_dev); + if (!shost) + goto free; + + if (scsi_add_host(shost, &pdev->dev)) + goto detach; + scsi_scan_host(shost); + + attach_count++; + + return 0; + + detach: + sym_detach(pci_get_drvdata(pdev), pdev); + free: + if (do_iounmap) + sym_iounmap_device(&sym_dev); + pci_release_regions(pdev); + disable: + if (do_disable_device) + pci_disable_device(pdev); + leave: + return -ENODEV; +} + +static void sym2_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + + scsi_remove_host(shost); + sym_detach(shost, pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + + attach_count--; +} + +/** + * sym2_io_error_detected() - called when PCI error is detected + * @pdev: pointer to PCI device + * @state: current state of the PCI slot + */ +static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + /* If slot is permanently frozen, turn everything off */ + if (state == pci_channel_io_perm_failure) { + sym2_remove(pdev); + return PCI_ERS_RESULT_DISCONNECT; + } + + disable_irq(pdev->irq); + pci_disable_device(pdev); + + /* Request that MMIO be enabled, so register dump can be taken. */ + return PCI_ERS_RESULT_CAN_RECOVER; +} + +/** + * sym2_io_slot_dump - Enable MMIO and dump debug registers + * @pdev: pointer to PCI device + */ +static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + + sym_dump_registers(shost); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * sym2_reset_workarounds - hardware-specific work-arounds + * @pdev: pointer to PCI device + * + * This routine is similar to sym_set_workarounds(), except + * that, at this point, we already know that the device was + * successfully initialized at least once before, and so most + * of the steps taken there are un-needed here. + */ +static void sym2_reset_workarounds(struct pci_dev *pdev) +{ + u_short status_reg; + struct sym_chip *chip; + + chip = sym_lookup_chip_table(pdev->device, pdev->revision); + + /* Work around for errant bit in 895A, in a fashion + * similar to what is done in sym_set_workarounds(). + */ + pci_read_config_word(pdev, PCI_STATUS, &status_reg); + if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) { + status_reg = PCI_STATUS_66MHZ; + pci_write_config_word(pdev, PCI_STATUS, status_reg); + pci_read_config_word(pdev, PCI_STATUS, &status_reg); + } +} + +/** + * sym2_io_slot_reset() - called when the pci bus has been reset. + * @pdev: pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sym_hcb *np = sym_get_hcb(shost); + + printk(KERN_INFO "%s: recovering from a PCI slot reset\n", + sym_name(np)); + + if (pci_enable_device(pdev)) { + printk(KERN_ERR "%s: Unable to enable after PCI reset\n", + sym_name(np)); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + enable_irq(pdev->irq); + + /* If the chip can do Memory Write Invalidate, enable it */ + if (np->features & FE_WRIE) { + if (pci_set_mwi(pdev)) + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Perform work-arounds, analogous to sym_set_workarounds() */ + sym2_reset_workarounds(pdev); + + /* Perform host reset only on one instance of the card */ + if (PCI_FUNC(pdev->devfn) == 0) { + if (sym_reset_scsi_bus(np, 0)) { + printk(KERN_ERR "%s: Unable to reset scsi host\n", + sym_name(np)); + return PCI_ERS_RESULT_DISCONNECT; + } + sym_start_up(shost, 1); + } + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * sym2_io_resume() - resume normal ops after PCI reset + * @pdev: pointer to PCI device + * + * Called when the error recovery driver tells us that its + * OK to resume normal operation. Use completion to allow + * halted scsi ops to resume. + */ +static void sym2_io_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sym_data *sym_data = shost_priv(shost); + + spin_lock_irq(shost->host_lock); + if (sym_data->io_reset) + complete(sym_data->io_reset); + spin_unlock_irq(shost->host_lock); +} + +static void sym2_get_signalling(struct Scsi_Host *shost) +{ + struct sym_hcb *np = sym_get_hcb(shost); + enum spi_signal_type type; + + switch (np->scsi_mode) { + case SMODE_SE: + type = SPI_SIGNAL_SE; + break; + case SMODE_LVD: + type = SPI_SIGNAL_LVD; + break; + case SMODE_HVD: + type = SPI_SIGNAL_HVD; + break; + default: + type = SPI_SIGNAL_UNKNOWN; + break; + } + spi_signalling(shost) = type; +} + +static void sym2_set_offset(struct scsi_target *starget, int offset) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct sym_hcb *np = sym_get_hcb(shost); + struct sym_tcb *tp = &np->target[starget->id]; + + tp->tgoal.offset = offset; + tp->tgoal.check_nego = 1; +} + +static void sym2_set_period(struct scsi_target *starget, int period) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct sym_hcb *np = sym_get_hcb(shost); + struct sym_tcb *tp = &np->target[starget->id]; + + /* have to have DT for these transfers, but DT will also + * set width, so check that this is allowed */ + if (period <= np->minsync && spi_width(starget)) + tp->tgoal.dt = 1; + + tp->tgoal.period = period; + tp->tgoal.check_nego = 1; +} + +static void sym2_set_width(struct scsi_target *starget, int width) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct sym_hcb *np = sym_get_hcb(shost); + struct sym_tcb *tp = &np->target[starget->id]; + + /* It is illegal to have DT set on narrow transfers. If DT is + * clear, we must also clear IU and QAS. */ + if (width == 0) + tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; + + tp->tgoal.width = width; + tp->tgoal.check_nego = 1; +} + +static void sym2_set_dt(struct scsi_target *starget, int dt) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct sym_hcb *np = sym_get_hcb(shost); + struct sym_tcb *tp = &np->target[starget->id]; + + /* We must clear QAS and IU if DT is clear */ + if (dt) + tp->tgoal.dt = 1; + else + tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; + tp->tgoal.check_nego = 1; +} + +#if 0 +static void sym2_set_iu(struct scsi_target *starget, int iu) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct sym_hcb *np = sym_get_hcb(shost); + struct sym_tcb *tp = &np->target[starget->id]; + + if (iu) + tp->tgoal.iu = tp->tgoal.dt = 1; + else + tp->tgoal.iu = 0; + tp->tgoal.check_nego = 1; +} + +static void sym2_set_qas(struct scsi_target *starget, int qas) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct sym_hcb *np = sym_get_hcb(shost); + struct sym_tcb *tp = &np->target[starget->id]; + + if (qas) + tp->tgoal.dt = tp->tgoal.qas = 1; + else + tp->tgoal.qas = 0; + tp->tgoal.check_nego = 1; +} +#endif + +static struct spi_function_template sym2_transport_functions = { + .set_offset = sym2_set_offset, + .show_offset = 1, + .set_period = sym2_set_period, + .show_period = 1, + .set_width = sym2_set_width, + .show_width = 1, + .set_dt = sym2_set_dt, + .show_dt = 1, +#if 0 + .set_iu = sym2_set_iu, + .show_iu = 1, + .set_qas = sym2_set_qas, + .show_qas = 1, +#endif + .get_signalling = sym2_get_signalling, +}; + +static struct pci_device_id sym2_id_table[] = { + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, /* new */ + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, sym2_id_table); + +static const struct pci_error_handlers sym2_err_handler = { + .error_detected = sym2_io_error_detected, + .mmio_enabled = sym2_io_slot_dump, + .slot_reset = sym2_io_slot_reset, + .resume = sym2_io_resume, +}; + +static struct pci_driver sym2_driver = { + .name = NAME53C8XX, + .id_table = sym2_id_table, + .probe = sym2_probe, + .remove = sym2_remove, + .err_handler = &sym2_err_handler, +}; + +static int __init sym2_init(void) +{ + int error; + + sym2_setup_params(); + sym2_transport_template = spi_attach_transport(&sym2_transport_functions); + if (!sym2_transport_template) + return -ENODEV; + + error = pci_register_driver(&sym2_driver); + if (error) + spi_release_transport(sym2_transport_template); + return error; +} + +static void __exit sym2_exit(void) +{ + pci_unregister_driver(&sym2_driver); + spi_release_transport(sym2_transport_template); +} + +module_init(sym2_init); +module_exit(sym2_exit); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h new file mode 100644 index 000000000..7d5c9b988 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_glue.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#ifndef SYM_GLUE_H +#define SYM_GLUE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef __sparc__ +# include +#endif + +#include +#include +#include +#include +#include + +#include "sym53c8xx.h" +#include "sym_defs.h" +#include "sym_misc.h" + +/* + * Configuration addendum for Linux. + */ +#define SYM_CONF_TIMER_INTERVAL ((HZ+1)/2) + +#undef SYM_OPT_HANDLE_DEVICE_QUEUEING +#define SYM_OPT_LIMIT_COMMAND_REORDERING + +/* + * Print a message with severity. + */ +#define printf_emerg(args...) printk(KERN_EMERG args) +#define printf_alert(args...) printk(KERN_ALERT args) +#define printf_crit(args...) printk(KERN_CRIT args) +#define printf_err(args...) printk(KERN_ERR args) +#define printf_warning(args...) printk(KERN_WARNING args) +#define printf_notice(args...) printk(KERN_NOTICE args) +#define printf_info(args...) printk(KERN_INFO args) +#define printf_debug(args...) printk(KERN_DEBUG args) +#define printf(args...) printk(args) + +/* + * A 'read barrier' flushes any data that have been prefetched + * by the processor due to out of order execution. Such a barrier + * must notably be inserted prior to looking at data that have + * been DMAed, assuming that program does memory READs in proper + * order and that the device ensured proper ordering of WRITEs. + * + * A 'write barrier' prevents any previous WRITEs to pass further + * WRITEs. Such barriers must be inserted each time another agent + * relies on ordering of WRITEs. + * + * Note that, due to posting of PCI memory writes, we also must + * insert dummy PCI read transactions when some ordering involving + * both directions over the PCI does matter. PCI transactions are + * fully ordered in each direction. + */ + +#define MEMORY_READ_BARRIER() rmb() +#define MEMORY_WRITE_BARRIER() wmb() + +/* + * IO functions definition for big/little endian CPU support. + * For now, PCI chips are only supported in little endian addressing mode, + */ + +#ifdef __BIG_ENDIAN + +#define readw_l2b readw +#define readl_l2b readl +#define writew_b2l writew +#define writel_b2l writel + +#else /* little endian */ + +#define readw_raw readw +#define readl_raw readl +#define writew_raw writew +#define writel_raw writel + +#endif /* endian */ + +#ifdef SYM_CONF_CHIP_BIG_ENDIAN +#error "Chips in BIG ENDIAN addressing mode are not (yet) supported" +#endif + +/* + * If the CPU and the chip use same endian-ness addressing, + * no byte reordering is needed for script patching. + * Macro cpu_to_scr() is to be used for script patching. + * Macro scr_to_cpu() is to be used for getting a DWORD + * from the script. + */ + +#define cpu_to_scr(dw) cpu_to_le32(dw) +#define scr_to_cpu(dw) le32_to_cpu(dw) + +/* + * These ones are used as return code from + * error recovery handlers under Linux. + */ +#define SCSI_SUCCESS SUCCESS +#define SCSI_FAILED FAILED + +/* + * System specific target data structure. + * None for now, under Linux. + */ +/* #define SYM_HAVE_STCB */ + +/* + * System specific lun data structure. + */ +#define SYM_HAVE_SLCB +struct sym_slcb { + u_short reqtags; /* Number of tags requested by user */ + u_short scdev_depth; /* Queue depth set in select_queue_depth() */ +}; + +/* + * System specific command data structure. + * Not needed under Linux. + */ +/* struct sym_sccb */ + +/* + * System specific host data structure. + */ +struct sym_shcb { + /* + * Chip and controller identification. + */ + int unit; + char inst_name[16]; + char chip_name[8]; + + struct Scsi_Host *host; + + void __iomem * ioaddr; /* MMIO kernel io address */ + void __iomem * ramaddr; /* RAM kernel io address */ + + struct timer_list timer; /* Timer handler link header */ + u_long lasttime; + u_long settle_time; /* Resetting the SCSI BUS */ + u_char settle_time_valid; +}; + +/* + * Return the name of the controller. + */ +#define sym_name(np) (np)->s.inst_name + +struct sym_nvram; + +/* + * The IO macros require a struct called 's' and are abused in sym_nvram.c + */ +struct sym_device { + struct pci_dev *pdev; + unsigned long mmio_base; + unsigned long ram_base; + struct { + void __iomem *ioaddr; + void __iomem *ramaddr; + } s; + struct sym_chip chip; + struct sym_nvram *nvram; + u_char host_id; +}; + +/* + * Driver host data structure. + */ +struct sym_data { + struct sym_hcb *ncb; + struct completion *io_reset; /* PCI error handling */ + struct pci_dev *pdev; +}; + +static inline struct sym_hcb * sym_get_hcb(struct Scsi_Host *host) +{ + return ((struct sym_data *)host->hostdata)->ncb; +} + +#include "sym_fw.h" +#include "sym_hipd.h" + +/* + * Set the status field of a CAM CCB. + */ +static inline void +sym_set_cam_status(struct scsi_cmnd *cmd, int status) +{ + cmd->result &= ~(0xff << 16); + cmd->result |= (status << 16); +} + +/* + * Get the status field of a CAM CCB. + */ +static inline int +sym_get_cam_status(struct scsi_cmnd *cmd) +{ + return host_byte(cmd->result); +} + +/* + * Build CAM result for a successful IO and for a failed IO. + */ +static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid) +{ + scsi_set_resid(cmd, resid); + cmd->result = (DID_OK << 16) | (cp->ssss_status & 0x7f); +} +void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid); + +void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *ccb); +#define sym_print_addr(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg) +void sym_xpt_async_bus_reset(struct sym_hcb *np); +int sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); +void sym_log_bus_error(struct Scsi_Host *); +void sym_dump_registers(struct Scsi_Host *); + +#endif /* SYM_GLUE_H */ diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c new file mode 100644 index 000000000..f0db17e34 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -0,0 +1,5839 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * Copyright (c) 2003-2005 Matthew Wilcox + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#include +#include /* for timeouts in units of HZ */ + +#include "sym_glue.h" +#include "sym_nvram.h" + +#if 0 +#define SYM_DEBUG_GENERIC_SUPPORT +#endif + +/* + * Needed function prototypes. + */ +static void sym_int_ma (struct sym_hcb *np); +static void sym_int_sir(struct sym_hcb *); +static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); +static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); +static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); +static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp); +static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp); +static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp); + +/* + * Print a buffer in hexadecimal format with a ".\n" at end. + */ +static void sym_printl_hex(u_char *p, int n) +{ + while (n-- > 0) + printf (" %x", *p++); + printf (".\n"); +} + +static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) +{ + sym_print_addr(cp->cmd, "%s: ", label); + + spi_print_msg(msg); + printf("\n"); +} + +static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) +{ + struct sym_tcb *tp = &np->target[target]; + dev_info(&tp->starget->dev, "%s: ", label); + + spi_print_msg(msg); + printf("\n"); +} + +/* + * Print something that tells about extended errors. + */ +void sym_print_xerr(struct scsi_cmnd *cmd, int x_status) +{ + if (x_status & XE_PARITY_ERR) { + sym_print_addr(cmd, "unrecovered SCSI parity error.\n"); + } + if (x_status & XE_EXTRA_DATA) { + sym_print_addr(cmd, "extraneous data discarded.\n"); + } + if (x_status & XE_BAD_PHASE) { + sym_print_addr(cmd, "illegal scsi phase (4/5).\n"); + } + if (x_status & XE_SODL_UNRUN) { + sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n"); + } + if (x_status & XE_SWIDE_OVRUN) { + sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n"); + } +} + +/* + * Return a string for SCSI BUS mode. + */ +static char *sym_scsi_bus_mode(int mode) +{ + switch(mode) { + case SMODE_HVD: return "HVD"; + case SMODE_SE: return "SE"; + case SMODE_LVD: return "LVD"; + } + return "??"; +} + +/* + * Soft reset the chip. + * + * Raising SRST when the chip is running may cause + * problems on dual function chips (see below). + * On the other hand, LVD devices need some delay + * to settle and report actual BUS mode in STEST4. + */ +static void sym_chip_reset (struct sym_hcb *np) +{ + OUTB(np, nc_istat, SRST); + INB(np, nc_mbox1); + udelay(10); + OUTB(np, nc_istat, 0); + INB(np, nc_mbox1); + udelay(2000); /* For BUS MODE to settle */ +} + +/* + * Really soft reset the chip.:) + * + * Some 896 and 876 chip revisions may hang-up if we set + * the SRST (soft reset) bit at the wrong time when SCRIPTS + * are running. + * So, we need to abort the current operation prior to + * soft resetting the chip. + */ +static void sym_soft_reset (struct sym_hcb *np) +{ + u_char istat = 0; + int i; + + if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN)) + goto do_chip_reset; + + OUTB(np, nc_istat, CABRT); + for (i = 100000 ; i ; --i) { + istat = INB(np, nc_istat); + if (istat & SIP) { + INW(np, nc_sist); + } + else if (istat & DIP) { + if (INB(np, nc_dstat) & ABRT) + break; + } + udelay(5); + } + OUTB(np, nc_istat, 0); + if (!i) + printf("%s: unable to abort current chip operation, " + "ISTAT=0x%02x.\n", sym_name(np), istat); +do_chip_reset: + sym_chip_reset(np); +} + +/* + * Start reset process. + * + * The interrupt handler will reinitialize the chip. + */ +static void sym_start_reset(struct sym_hcb *np) +{ + sym_reset_scsi_bus(np, 1); +} + +int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int) +{ + u32 term; + int retv = 0; + + sym_soft_reset(np); /* Soft reset the chip */ + if (enab_int) + OUTW(np, nc_sien, RST); + /* + * Enable Tolerant, reset IRQD if present and + * properly set IRQ mode, prior to resetting the bus. + */ + OUTB(np, nc_stest3, TE); + OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM)); + OUTB(np, nc_scntl1, CRST); + INB(np, nc_mbox1); + udelay(200); + + if (!SYM_SETUP_SCSI_BUS_CHECK) + goto out; + /* + * Check for no terminators or SCSI bus shorts to ground. + * Read SCSI data bus, data parity bits and control signals. + * We are expecting RESET to be TRUE and other signals to be + * FALSE. + */ + term = INB(np, nc_sstat0); + term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ + term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */ + ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */ + ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */ + INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */ + + if (!np->maxwide) + term &= 0x3ffff; + + if (term != (2<<7)) { + printf("%s: suspicious SCSI data while resetting the BUS.\n", + sym_name(np)); + printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " + "0x%lx, expecting 0x%lx\n", + sym_name(np), + (np->features & FE_WIDE) ? "dp1,d15-8," : "", + (u_long)term, (u_long)(2<<7)); + if (SYM_SETUP_SCSI_BUS_CHECK == 1) + retv = 1; + } +out: + OUTB(np, nc_scntl1, 0); + return retv; +} + +/* + * Select SCSI clock frequency + */ +static void sym_selectclock(struct sym_hcb *np, u_char scntl3) +{ + /* + * If multiplier not present or not selected, leave here. + */ + if (np->multiplier <= 1) { + OUTB(np, nc_scntl3, scntl3); + return; + } + + if (sym_verbose >= 2) + printf ("%s: enabling clock multiplier\n", sym_name(np)); + + OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */ + /* + * Wait for the LCKFRQ bit to be set if supported by the chip. + * Otherwise wait 50 micro-seconds (at least). + */ + if (np->features & FE_LCKFRQ) { + int i = 20; + while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0) + udelay(20); + if (!i) + printf("%s: the chip cannot lock the frequency\n", + sym_name(np)); + } else { + INB(np, nc_mbox1); + udelay(50+10); + } + OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */ + OUTB(np, nc_scntl3, scntl3); + OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ + OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */ +} + + +/* + * Determine the chip's clock frequency. + * + * This is essential for the negotiation of the synchronous + * transfer rate. + * + * Note: we have to return the correct value. + * THERE IS NO SAFE DEFAULT VALUE. + * + * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. + * 53C860 and 53C875 rev. 1 support fast20 transfers but + * do not have a clock doubler and so are provided with a + * 80 MHz clock. All other fast20 boards incorporate a doubler + * and so should be delivered with a 40 MHz clock. + * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base + * clock and provide a clock quadrupler (160 Mhz). + */ + +/* + * calculate SCSI clock frequency (in KHz) + */ +static unsigned getfreq (struct sym_hcb *np, int gen) +{ + unsigned int ms = 0; + unsigned int f; + + /* + * Measure GEN timer delay in order + * to calculate SCSI clock frequency + * + * This code will never execute too + * many loop iterations (if DELAY is + * reasonably correct). It could get + * too low a delay (too high a freq.) + * if the CPU is slow executing the + * loop for some reason (an NMI, for + * example). For this reason we will + * if multiple measurements are to be + * performed trust the higher delay + * (lower frequency returned). + */ + OUTW(np, nc_sien, 0); /* mask all scsi interrupts */ + INW(np, nc_sist); /* clear pending scsi interrupt */ + OUTB(np, nc_dien, 0); /* mask all dma interrupts */ + INW(np, nc_sist); /* another one, just to be sure :) */ + /* + * The C1010-33 core does not report GEN in SIST, + * if this interrupt is masked in SIEN. + * I don't know yet if the C1010-66 behaves the same way. + */ + if (np->features & FE_C10) { + OUTW(np, nc_sien, GEN); + OUTB(np, nc_istat1, SIRQD); + } + OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */ + OUTB(np, nc_stime1, 0); /* disable general purpose timer */ + OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<features & FE_C10) { + OUTW(np, nc_sien, 0); + OUTB(np, nc_istat1, 0); + } + /* + * set prescaler to divide by whatever 0 means + * 0 ought to choose divide by 2, but appears + * to set divide by 3.5 mode in my 53c810 ... + */ + OUTB(np, nc_scntl3, 0); + + /* + * adjust for prescaler, and convert into KHz + */ + f = ms ? ((1 << gen) * (4340*4)) / ms : 0; + + /* + * The C1010-33 result is biased by a factor + * of 2/3 compared to earlier chips. + */ + if (np->features & FE_C10) + f = (f * 2) / 3; + + if (sym_verbose >= 2) + printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", + sym_name(np), gen, ms/4, f); + + return f; +} + +static unsigned sym_getfreq (struct sym_hcb *np) +{ + u_int f1, f2; + int gen = 8; + + getfreq (np, gen); /* throw away first result */ + f1 = getfreq (np, gen); + f2 = getfreq (np, gen); + if (f1 > f2) f1 = f2; /* trust lower result */ + return f1; +} + +/* + * Get/probe chip SCSI clock frequency + */ +static void sym_getclock (struct sym_hcb *np, int mult) +{ + unsigned char scntl3 = np->sv_scntl3; + unsigned char stest1 = np->sv_stest1; + unsigned f1; + + np->multiplier = 1; + f1 = 40000; + /* + * True with 875/895/896/895A with clock multiplier selected + */ + if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { + if (sym_verbose >= 2) + printf ("%s: clock multiplier found\n", sym_name(np)); + np->multiplier = mult; + } + + /* + * If multiplier not found or scntl3 not 7,5,3, + * reset chip and get frequency from general purpose timer. + * Otherwise trust scntl3 BIOS setting. + */ + if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { + OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */ + f1 = sym_getfreq (np); + + if (sym_verbose) + printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); + + if (f1 < 45000) f1 = 40000; + else if (f1 < 55000) f1 = 50000; + else f1 = 80000; + + if (f1 < 80000 && mult > 1) { + if (sym_verbose >= 2) + printf ("%s: clock multiplier assumed\n", + sym_name(np)); + np->multiplier = mult; + } + } else { + if ((scntl3 & 7) == 3) f1 = 40000; + else if ((scntl3 & 7) == 5) f1 = 80000; + else f1 = 160000; + + f1 /= np->multiplier; + } + + /* + * Compute controller synchronous parameters. + */ + f1 *= np->multiplier; + np->clock_khz = f1; +} + +/* + * Get/probe PCI clock frequency + */ +static int sym_getpciclock (struct sym_hcb *np) +{ + int f = 0; + + /* + * For now, we only need to know about the actual + * PCI BUS clock frequency for C1010-66 chips. + */ +#if 1 + if (np->features & FE_66MHZ) { +#else + if (1) { +#endif + OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ + f = sym_getfreq(np); + OUTB(np, nc_stest1, 0); + } + np->pciclk_khz = f; + + return f; +} + +/* + * SYMBIOS chip clock divisor table. + * + * Divisors are multiplied by 10,000,000 in order to make + * calculations more simple. + */ +#define _5M 5000000 +static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; + +/* + * Get clock factor and sync divisor for a given + * synchronous factor period. + */ +static int +sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) +{ + u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ + int div = np->clock_divn; /* Number of divisors supported */ + u32 fak; /* Sync factor in sxfer */ + u32 per; /* Period in tenths of ns */ + u32 kpc; /* (per * clk) */ + int ret; + + /* + * Compute the synchronous period in tenths of nano-seconds + */ + if (dt && sfac <= 9) per = 125; + else if (sfac <= 10) per = 250; + else if (sfac == 11) per = 303; + else if (sfac == 12) per = 500; + else per = 40 * sfac; + ret = per; + + kpc = per * clk; + if (dt) + kpc <<= 1; + + /* + * For earliest C10 revision 0, we cannot use extra + * clocks for the setting of the SCSI clocking. + * Note that this limits the lowest sync data transfer + * to 5 Mega-transfers per second and may result in + * using higher clock divisors. + */ +#if 1 + if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { + /* + * Look for the lowest clock divisor that allows an + * output speed not faster than the period. + */ + while (div > 0) { + --div; + if (kpc > (div_10M[div] << 2)) { + ++div; + break; + } + } + fak = 0; /* No extra clocks */ + if (div == np->clock_divn) { /* Are we too fast ? */ + ret = -1; + } + *divp = div; + *fakp = fak; + return ret; + } +#endif + + /* + * Look for the greatest clock divisor that allows an + * input speed faster than the period. + */ + while (--div > 0) + if (kpc >= (div_10M[div] << 2)) break; + + /* + * Calculate the lowest clock factor that allows an output + * speed not faster than the period, and the max output speed. + * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. + * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. + */ + if (dt) { + fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; + /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ + } else { + fak = (kpc - 1) / div_10M[div] + 1 - 4; + /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ + } + + /* + * Check against our hardware limits, or bugs :). + */ + if (fak > 2) { + fak = 2; + ret = -1; + } + + /* + * Compute and return sync parameters. + */ + *divp = div; + *fakp = fak; + + return ret; +} + +/* + * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, + * 128 transfers. All chips support at least 16 transfers + * bursts. The 825A, 875 and 895 chips support bursts of up + * to 128 transfers and the 895A and 896 support bursts of up + * to 64 transfers. All other chips support up to 16 + * transfers bursts. + * + * For PCI 32 bit data transfers each transfer is a DWORD. + * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. + * + * We use log base 2 (burst length) as internal code, with + * value 0 meaning "burst disabled". + */ + +/* + * Burst length from burst code. + */ +#define burst_length(bc) (!(bc))? 0 : 1 << (bc) + +/* + * Burst code from io register bits. + */ +#define burst_code(dmode, ctest4, ctest5) \ + (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 + +/* + * Set initial io register bits from burst code. + */ +static inline void sym_init_burst(struct sym_hcb *np, u_char bc) +{ + np->rv_ctest4 &= ~0x80; + np->rv_dmode &= ~(0x3 << 6); + np->rv_ctest5 &= ~0x4; + + if (!bc) { + np->rv_ctest4 |= 0x80; + } + else { + --bc; + np->rv_dmode |= ((bc & 0x3) << 6); + np->rv_ctest5 |= (bc & 0x4); + } +} + +/* + * Save initial settings of some IO registers. + * Assumed to have been set by BIOS. + * We cannot reset the chip prior to reading the + * IO registers, since informations will be lost. + * Since the SCRIPTS processor may be running, this + * is not safe on paper, but it seems to work quite + * well. :) + */ +static void sym_save_initial_setting (struct sym_hcb *np) +{ + np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a; + np->sv_scntl3 = INB(np, nc_scntl3) & 0x07; + np->sv_dmode = INB(np, nc_dmode) & 0xce; + np->sv_dcntl = INB(np, nc_dcntl) & 0xa8; + np->sv_ctest3 = INB(np, nc_ctest3) & 0x01; + np->sv_ctest4 = INB(np, nc_ctest4) & 0x80; + np->sv_gpcntl = INB(np, nc_gpcntl); + np->sv_stest1 = INB(np, nc_stest1); + np->sv_stest2 = INB(np, nc_stest2) & 0x20; + np->sv_stest4 = INB(np, nc_stest4); + if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ + np->sv_scntl4 = INB(np, nc_scntl4); + np->sv_ctest5 = INB(np, nc_ctest5) & 0x04; + } + else + np->sv_ctest5 = INB(np, nc_ctest5) & 0x24; +} + +/* + * Set SCSI BUS mode. + * - LVD capable chips (895/895A/896/1010) report the current BUS mode + * through the STEST4 IO register. + * - For previous generation chips (825/825A/875), the user has to tell us + * how to check against HVD, since a 100% safe algorithm is not possible. + */ +static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram) +{ + if (np->scsi_mode) + return; + + np->scsi_mode = SMODE_SE; + if (np->features & (FE_ULTRA2|FE_ULTRA3)) + np->scsi_mode = (np->sv_stest4 & SMODE); + else if (np->features & FE_DIFF) { + if (SYM_SETUP_SCSI_DIFF == 1) { + if (np->sv_scntl3) { + if (np->sv_stest2 & 0x20) + np->scsi_mode = SMODE_HVD; + } else if (nvram->type == SYM_SYMBIOS_NVRAM) { + if (!(INB(np, nc_gpreg) & 0x08)) + np->scsi_mode = SMODE_HVD; + } + } else if (SYM_SETUP_SCSI_DIFF == 2) + np->scsi_mode = SMODE_HVD; + } + if (np->scsi_mode == SMODE_HVD) + np->rv_stest2 |= 0x20; +} + +/* + * Prepare io register values used by sym_start_up() + * according to selected and supported features. + */ +static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) +{ + struct sym_data *sym_data = shost_priv(shost); + struct pci_dev *pdev = sym_data->pdev; + u_char burst_max; + u32 period; + int i; + + np->maxwide = (np->features & FE_WIDE) ? 1 : 0; + + /* + * Guess the frequency of the chip's clock. + */ + if (np->features & (FE_ULTRA3 | FE_ULTRA2)) + np->clock_khz = 160000; + else if (np->features & FE_ULTRA) + np->clock_khz = 80000; + else + np->clock_khz = 40000; + + /* + * Get the clock multiplier factor. + */ + if (np->features & FE_QUAD) + np->multiplier = 4; + else if (np->features & FE_DBLR) + np->multiplier = 2; + else + np->multiplier = 1; + + /* + * Measure SCSI clock frequency for chips + * it may vary from assumed one. + */ + if (np->features & FE_VARCLK) + sym_getclock(np, np->multiplier); + + /* + * Divisor to be used for async (timer pre-scaler). + */ + i = np->clock_divn - 1; + while (--i >= 0) { + if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { + ++i; + break; + } + } + np->rv_scntl3 = i+1; + + /* + * The C1010 uses hardwired divisors for async. + * So, we just throw away, the async. divisor.:-) + */ + if (np->features & FE_C10) + np->rv_scntl3 = 0; + + /* + * Minimum synchronous period factor supported by the chip. + * Btw, 'period' is in tenths of nanoseconds. + */ + period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; + + if (period <= 250) np->minsync = 10; + else if (period <= 303) np->minsync = 11; + else if (period <= 500) np->minsync = 12; + else np->minsync = (period + 40 - 1) / 40; + + /* + * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). + */ + if (np->minsync < 25 && + !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) + np->minsync = 25; + else if (np->minsync < 12 && + !(np->features & (FE_ULTRA2|FE_ULTRA3))) + np->minsync = 12; + + /* + * Maximum synchronous period factor supported by the chip. + */ + period = div64_ul(11 * div_10M[np->clock_divn - 1], 4 * np->clock_khz); + np->maxsync = period > 2540 ? 254 : period / 10; + + /* + * If chip is a C1010, guess the sync limits in DT mode. + */ + if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { + if (np->clock_khz == 160000) { + np->minsync_dt = 9; + np->maxsync_dt = 50; + np->maxoffs_dt = nvram->type ? 62 : 31; + } + } + + /* + * 64 bit addressing (895A/896/1010) ? + */ + if (np->features & FE_DAC) { + if (!use_dac(np)) + np->rv_ccntl1 |= (DDAC); + else if (SYM_CONF_DMA_ADDRESSING_MODE == 1) + np->rv_ccntl1 |= (XTIMOD | EXTIBMV); + else if (SYM_CONF_DMA_ADDRESSING_MODE == 2) + np->rv_ccntl1 |= (0 | EXTIBMV); + } + + /* + * Phase mismatch handled by SCRIPTS (895A/896/1010) ? + */ + if (np->features & FE_NOPM) + np->rv_ccntl0 |= (ENPMJ); + + /* + * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed. + * In dual channel mode, contention occurs if internal cycles + * are used. Disable internal cycles. + */ + if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && + pdev->revision < 0x1) + np->rv_ccntl0 |= DILS; + + /* + * Select burst length (dwords) + */ + burst_max = SYM_SETUP_BURST_ORDER; + if (burst_max == 255) + burst_max = burst_code(np->sv_dmode, np->sv_ctest4, + np->sv_ctest5); + if (burst_max > 7) + burst_max = 7; + if (burst_max > np->maxburst) + burst_max = np->maxburst; + + /* + * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. + * This chip and the 860 Rev 1 may wrongly use PCI cache line + * based transactions on LOAD/STORE instructions. So we have + * to prevent these chips from using such PCI transactions in + * this driver. The generic ncr driver that does not use + * LOAD/STORE instructions does not need this work-around. + */ + if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 && + pdev->revision >= 0x10 && pdev->revision <= 0x11) || + (pdev->device == PCI_DEVICE_ID_NCR_53C860 && + pdev->revision <= 0x1)) + np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); + + /* + * Select all supported special features. + * If we are using on-board RAM for scripts, prefetch (PFEN) + * does not help, but burst op fetch (BOF) does. + * Disabling PFEN makes sure BOF will be used. + */ + if (np->features & FE_ERL) + np->rv_dmode |= ERL; /* Enable Read Line */ + if (np->features & FE_BOF) + np->rv_dmode |= BOF; /* Burst Opcode Fetch */ + if (np->features & FE_ERMP) + np->rv_dmode |= ERMP; /* Enable Read Multiple */ +#if 1 + if ((np->features & FE_PFEN) && !np->ram_ba) +#else + if (np->features & FE_PFEN) +#endif + np->rv_dcntl |= PFEN; /* Prefetch Enable */ + if (np->features & FE_CLSE) + np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ + if (np->features & FE_WRIE) + np->rv_ctest3 |= WRIE; /* Write and Invalidate */ + if (np->features & FE_DFS) + np->rv_ctest5 |= DFS; /* Dma Fifo Size */ + + /* + * Select some other + */ + np->rv_ctest4 |= MPEE; /* Master parity checking */ + np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ + + /* + * Get parity checking, host ID and verbose mode from NVRAM + */ + np->myaddr = 255; + np->scsi_mode = 0; + sym_nvram_setup_host(shost, np, nvram); + + /* + * Get SCSI addr of host adapter (set by bios?). + */ + if (np->myaddr == 255) { + np->myaddr = INB(np, nc_scid) & 0x07; + if (!np->myaddr) + np->myaddr = SYM_SETUP_HOST_ID; + } + + /* + * Prepare initial io register bits for burst length + */ + sym_init_burst(np, burst_max); + + sym_set_bus_mode(np, nvram); + + /* + * Set LED support from SCRIPTS. + * Ignore this feature for boards known to use a + * specific GPIO wiring and for the 895A, 896 + * and 1010 that drive the LED directly. + */ + if ((SYM_SETUP_SCSI_LED || + (nvram->type == SYM_SYMBIOS_NVRAM || + (nvram->type == SYM_TEKRAM_NVRAM && + pdev->device == PCI_DEVICE_ID_NCR_53C895))) && + !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) + np->features |= FE_LED0; + + /* + * Set irq mode. + */ + switch(SYM_SETUP_IRQ_MODE & 3) { + case 2: + np->rv_dcntl |= IRQM; + break; + case 1: + np->rv_dcntl |= (np->sv_dcntl & IRQM); + break; + default: + break; + } + + /* + * Configure targets according to driver setup. + * If NVRAM present get targets setup from NVRAM. + */ + for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { + struct sym_tcb *tp = &np->target[i]; + + tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); + tp->usrtags = SYM_SETUP_MAX_TAG; + tp->usr_width = np->maxwide; + tp->usr_period = 9; + + sym_nvram_setup_target(tp, i, nvram); + + if (!tp->usrtags) + tp->usrflags &= ~SYM_TAGS_ENABLED; + } + + /* + * Let user know about the settings. + */ + printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np), + sym_nvram_type(nvram), np->myaddr, + (np->features & FE_ULTRA3) ? 80 : + (np->features & FE_ULTRA2) ? 40 : + (np->features & FE_ULTRA) ? 20 : 10, + sym_scsi_bus_mode(np->scsi_mode), + (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); + /* + * Tell him more on demand. + */ + if (sym_verbose) { + printf("%s: %s IRQ line driver%s\n", + sym_name(np), + np->rv_dcntl & IRQM ? "totem pole" : "open drain", + np->ram_ba ? ", using on-chip SRAM" : ""); + printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); + if (np->features & FE_NOPM) + printf("%s: handling phase mismatch from SCRIPTS.\n", + sym_name(np)); + } + /* + * And still more. + */ + if (sym_verbose >= 2) { + printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " + "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", + sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, + np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); + + printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " + "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", + sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, + np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); + } + + return 0; +} + +/* + * Test the pci bus snoop logic :-( + * + * Has to be called with interrupts disabled. + */ +#ifdef CONFIG_SCSI_SYM53C8XX_MMIO +static int sym_regtest(struct sym_hcb *np) +{ + register volatile u32 data; + /* + * chip registers may NOT be cached. + * write 0xffffffff to a read only register area, + * and try to read it back. + */ + data = 0xffffffff; + OUTL(np, nc_dstat, data); + data = INL(np, nc_dstat); +#if 1 + if (data == 0xffffffff) { +#else + if ((data & 0xe2f0fffd) != 0x02000080) { +#endif + printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", + (unsigned) data); + return 0x10; + } + return 0; +} +#else +static inline int sym_regtest(struct sym_hcb *np) +{ + return 0; +} +#endif + +static int sym_snooptest(struct sym_hcb *np) +{ + u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; + int i, err; + + err = sym_regtest(np); + if (err) + return err; +restart_test: + /* + * Enable Master Parity Checking as we intend + * to enable it for normal operations. + */ + OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE)); + /* + * init + */ + pc = SCRIPTZ_BA(np, snooptest); + host_wr = 1; + sym_wr = 2; + /* + * Set memory and register. + */ + np->scratch = cpu_to_scr(host_wr); + OUTL(np, nc_temp, sym_wr); + /* + * Start script (exchange values) + */ + OUTL(np, nc_dsa, np->hcb_ba); + OUTL_DSP(np, pc); + /* + * Wait 'til done (with timeout) + */ + for (i=0; i=SYM_SNOOP_TIMEOUT) { + printf ("CACHE TEST FAILED: timeout.\n"); + return (0x20); + } + /* + * Check for fatal DMA errors. + */ + dstat = INB(np, nc_dstat); +#if 1 /* Band aiding for broken hardwares that fail PCI parity */ + if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { + printf ("%s: PCI DATA PARITY ERROR DETECTED - " + "DISABLING MASTER DATA PARITY CHECKING.\n", + sym_name(np)); + np->rv_ctest4 &= ~MPEE; + goto restart_test; + } +#endif + if (dstat & (MDPE|BF|IID)) { + printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); + return (0x80); + } + /* + * Save termination position. + */ + pc = INL(np, nc_dsp); + /* + * Read memory and register. + */ + host_rd = scr_to_cpu(np->scratch); + sym_rd = INL(np, nc_scratcha); + sym_bk = INL(np, nc_temp); + /* + * Check termination position. + */ + if (pc != SCRIPTZ_BA(np, snoopend)+8) { + printf ("CACHE TEST FAILED: script execution failed.\n"); + printf ("start=%08lx, pc=%08lx, end=%08lx\n", + (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc, + (u_long) SCRIPTZ_BA(np, snoopend) +8); + return (0x40); + } + /* + * Show results. + */ + if (host_wr != sym_rd) { + printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", + (int) host_wr, (int) sym_rd); + err |= 1; + } + if (host_rd != sym_wr) { + printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", + (int) sym_wr, (int) host_rd); + err |= 2; + } + if (sym_bk != sym_wr) { + printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", + (int) sym_wr, (int) sym_bk); + err |= 4; + } + + return err; +} + +/* + * log message for real hard errors + * + * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc). + * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. + * + * exception register: + * ds: dstat + * si: sist + * + * SCSI bus lines: + * so: control lines as driven by chip. + * si: control lines as seen by chip. + * sd: scsi data lines as seen by chip. + * + * wide/fastmode: + * sx: sxfer (see the manual) + * s3: scntl3 (see the manual) + * s4: scntl4 (see the manual) + * + * current script command: + * dsp: script address (relative to start of script). + * dbc: first word of script command. + * + * First 24 register of the chip: + * r0..rf + */ +static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat) +{ + struct sym_hcb *np = sym_get_hcb(shost); + u32 dsp; + int script_ofs; + int script_size; + char *script_name; + u_char *script_base; + int i; + + dsp = INL(np, nc_dsp); + + if (dsp > np->scripta_ba && + dsp <= np->scripta_ba + np->scripta_sz) { + script_ofs = dsp - np->scripta_ba; + script_size = np->scripta_sz; + script_base = (u_char *) np->scripta0; + script_name = "scripta"; + } + else if (np->scriptb_ba < dsp && + dsp <= np->scriptb_ba + np->scriptb_sz) { + script_ofs = dsp - np->scriptb_ba; + script_size = np->scriptb_sz; + script_base = (u_char *) np->scriptb0; + script_name = "scriptb"; + } else { + script_ofs = dsp; + script_size = 0; + script_base = NULL; + script_name = "mem"; + } + + printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n", + sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist, + (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl), + (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer), + (unsigned)INB(np, nc_scntl3), + (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0, + script_name, script_ofs, (unsigned)INL(np, nc_dbc)); + + if (((script_ofs & 3) == 0) && + (unsigned)script_ofs < script_size) { + printf ("%s: script cmd = %08x\n", sym_name(np), + scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); + } + + printf("%s: regdump:", sym_name(np)); + for (i = 0; i < 24; i++) + printf(" %02x", (unsigned)INB_OFF(np, i)); + printf(".\n"); + + /* + * PCI BUS error. + */ + if (dstat & (MDPE|BF)) + sym_log_bus_error(shost); +} + +void sym_dump_registers(struct Scsi_Host *shost) +{ + struct sym_hcb *np = sym_get_hcb(shost); + u_short sist; + u_char dstat; + + sist = INW(np, nc_sist); + dstat = INB(np, nc_dstat); + sym_log_hard_error(shost, sist, dstat); +} + +static struct sym_chip sym_dev_table[] = { + {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64, + FE_ERL} + , +#ifdef SYM_DEBUG_GENERIC_SUPPORT + {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, + FE_BOF} + , +#else + {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, + FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} + , +#endif + {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64, + FE_BOF|FE_ERL} + , + {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64, + FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} + , + {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2, + FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} + , + {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1, + FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} + , + {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2, + FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_DIFF|FE_VARCLK} + , + {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2, + FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_DIFF|FE_VARCLK} + , + {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2, + FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_DIFF|FE_VARCLK} + , + {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2, + FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_DIFF|FE_VARCLK} + , +#ifdef SYM_DEBUG_GENERIC_SUPPORT + {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, + FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| + FE_RAM|FE_LCKFRQ} + , +#else + {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, + FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_LCKFRQ} + , +#endif + {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4, + FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} + , + {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4, + FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} + , + {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4, + FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} + , + {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8, + FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| + FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| + FE_C10} + , + {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8, + FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| + FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| + FE_C10|FE_U3EN} + , + {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8, + FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| + FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| + FE_C10|FE_U3EN} + , + {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4, + FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| + FE_RAM|FE_IO256|FE_LEDC} +}; + +#define sym_num_devs (ARRAY_SIZE(sym_dev_table)) + +/* + * Look up the chip table. + * + * Return a pointer to the chip entry if found, + * zero otherwise. + */ +struct sym_chip * +sym_lookup_chip_table (u_short device_id, u_char revision) +{ + struct sym_chip *chip; + int i; + + for (i = 0; i < sym_num_devs; i++) { + chip = &sym_dev_table[i]; + if (device_id != chip->device_id) + continue; + if (revision > chip->revision_id) + continue; + return chip; + } + + return NULL; +} + +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 +/* + * Lookup the 64 bit DMA segments map. + * This is only used if the direct mapping + * has been unsuccessful. + */ +int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) +{ + int i; + + if (!use_dac(np)) + goto weird; + + /* Look up existing mappings */ + for (i = SYM_DMAP_SIZE-1; i > 0; i--) { + if (h == np->dmap_bah[i]) + return i; + } + /* If direct mapping is free, get it */ + if (!np->dmap_bah[s]) + goto new; + /* Collision -> lookup free mappings */ + for (s = SYM_DMAP_SIZE-1; s > 0; s--) { + if (!np->dmap_bah[s]) + goto new; + } +weird: + panic("sym: ran out of 64 bit DMA segment registers"); + return -1; +new: + np->dmap_bah[s] = h; + np->dmap_dirty = 1; + return s; +} + +/* + * Update IO registers scratch C..R so they will be + * in sync. with queued CCB expectations. + */ +static void sym_update_dmap_regs(struct sym_hcb *np) +{ + int o, i; + + if (!np->dmap_dirty) + return; + o = offsetof(struct sym_reg, nc_scrx[0]); + for (i = 0; i < SYM_DMAP_SIZE; i++) { + OUTL_OFF(np, o, np->dmap_bah[i]); + o += 4; + } + np->dmap_dirty = 0; +} +#endif + +/* Enforce all the fiddly SPI rules and the chip limitations */ +static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, + struct sym_trans *goal) +{ + if (!spi_support_wide(starget)) + goal->width = 0; + + if (!spi_support_sync(starget)) { + goal->iu = 0; + goal->dt = 0; + goal->qas = 0; + goal->offset = 0; + return; + } + + if (spi_support_dt(starget)) { + if (spi_support_dt_only(starget)) + goal->dt = 1; + + if (goal->offset == 0) + goal->dt = 0; + } else { + goal->dt = 0; + } + + /* Some targets fail to properly negotiate DT in SE mode */ + if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN)) + goal->dt = 0; + + if (goal->dt) { + /* all DT transfers must be wide */ + goal->width = 1; + if (goal->offset > np->maxoffs_dt) + goal->offset = np->maxoffs_dt; + if (goal->period < np->minsync_dt) + goal->period = np->minsync_dt; + if (goal->period > np->maxsync_dt) + goal->period = np->maxsync_dt; + } else { + goal->iu = goal->qas = 0; + if (goal->offset > np->maxoffs) + goal->offset = np->maxoffs; + if (goal->period < np->minsync) + goal->period = np->minsync; + if (goal->period > np->maxsync) + goal->period = np->maxsync; + } +} + +/* + * Prepare the next negotiation message if needed. + * + * Fill in the part of message buffer that contains the + * negotiation and the nego_status field of the CCB. + * Returns the size of the message in bytes. + */ +static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) +{ + struct sym_tcb *tp = &np->target[cp->target]; + struct scsi_target *starget = tp->starget; + struct sym_trans *goal = &tp->tgoal; + int msglen = 0; + int nego; + + sym_check_goals(np, starget, goal); + + /* + * Many devices implement PPR in a buggy way, so only use it if we + * really want to. + */ + if (goal->renego == NS_PPR || (goal->offset && + (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) { + nego = NS_PPR; + } else if (goal->renego == NS_WIDE || goal->width) { + nego = NS_WIDE; + } else if (goal->renego == NS_SYNC || goal->offset) { + nego = NS_SYNC; + } else { + goal->check_nego = 0; + nego = 0; + } + + switch (nego) { + case NS_SYNC: + msglen += spi_populate_sync_msg(msgptr + msglen, goal->period, + goal->offset); + break; + case NS_WIDE: + msglen += spi_populate_width_msg(msgptr + msglen, goal->width); + break; + case NS_PPR: + msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period, + goal->offset, goal->width, + (goal->iu ? PPR_OPT_IU : 0) | + (goal->dt ? PPR_OPT_DT : 0) | + (goal->qas ? PPR_OPT_QAS : 0)); + break; + } + + cp->nego_status = nego; + + if (nego) { + tp->nego_cp = cp; /* Keep track a nego will be performed */ + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, cp->target, + nego == NS_SYNC ? "sync msgout" : + nego == NS_WIDE ? "wide msgout" : + "ppr msgout", msgptr); + } + } + + return msglen; +} + +/* + * Insert a job into the start queue. + */ +void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) +{ + u_short qidx; + +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If the previously queued CCB is not yet done, + * set the IARB hint. The SCRIPTS will go with IARB + * for this job when starting the previous one. + * We leave devices a chance to win arbitration by + * not using more than 'iarb_max' consecutive + * immediate arbitrations. + */ + if (np->last_cp && np->iarb_count < np->iarb_max) { + np->last_cp->host_flags |= HF_HINT_IARB; + ++np->iarb_count; + } + else + np->iarb_count = 0; + np->last_cp = cp; +#endif + +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 + /* + * Make SCRIPTS aware of the 64 bit DMA + * segment registers not being up-to-date. + */ + if (np->dmap_dirty) + cp->host_xflags |= HX_DMAP_DIRTY; +#endif + + /* + * Insert first the idle task and then our job. + * The MBs should ensure proper ordering. + */ + qidx = np->squeueput + 2; + if (qidx >= MAX_QUEUE*2) qidx = 0; + + np->squeue [qidx] = cpu_to_scr(np->idletask_ba); + MEMORY_WRITE_BARRIER(); + np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); + + np->squeueput = qidx; + + if (DEBUG_FLAGS & DEBUG_QUEUE) + scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n", + np->squeueput); + + /* + * Script processor may be waiting for reselect. + * Wake it up. + */ + MEMORY_WRITE_BARRIER(); + OUTB(np, nc_istat, SIGP|np->istat_sem); +} + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING +/* + * Start next ready-to-start CCBs. + */ +void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn) +{ + SYM_QUEHEAD *qp; + struct sym_ccb *cp; + + /* + * Paranoia, as usual. :-) + */ + assert(!lp->started_tags || !lp->started_no_tag); + + /* + * Try to start as many commands as asked by caller. + * Prevent from having both tagged and untagged + * commands queued to the device at the same time. + */ + while (maxn--) { + qp = sym_remque_head(&lp->waiting_ccbq); + if (!qp) + break; + cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); + if (cp->tag != NO_TAG) { + if (lp->started_no_tag || + lp->started_tags >= lp->started_max) { + sym_insque_head(qp, &lp->waiting_ccbq); + break; + } + lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); + lp->head.resel_sa = + cpu_to_scr(SCRIPTA_BA(np, resel_tag)); + ++lp->started_tags; + } else { + if (lp->started_no_tag || lp->started_tags) { + sym_insque_head(qp, &lp->waiting_ccbq); + break; + } + lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); + lp->head.resel_sa = + cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); + ++lp->started_no_tag; + } + cp->started = 1; + sym_insque_tail(qp, &lp->started_ccbq); + sym_put_start_queue(np, cp); + } +} +#endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */ + +/* + * The chip may have completed jobs. Look at the DONE QUEUE. + * + * On paper, memory read barriers may be needed here to + * prevent out of order LOADs by the CPU from having + * prefetched stale data prior to DMA having occurred. + */ +static int sym_wakeup_done (struct sym_hcb *np) +{ + struct sym_ccb *cp; + int i, n; + u32 dsa; + + n = 0; + i = np->dqueueget; + + /* MEMORY_READ_BARRIER(); */ + while (1) { + dsa = scr_to_cpu(np->dqueue[i]); + if (!dsa) + break; + np->dqueue[i] = 0; + if ((i = i+2) >= MAX_QUEUE*2) + i = 0; + + cp = sym_ccb_from_dsa(np, dsa); + if (cp) { + MEMORY_READ_BARRIER(); + sym_complete_ok (np, cp); + ++n; + } + else + printf ("%s: bad DSA (%x) in done queue.\n", + sym_name(np), (u_int) dsa); + } + np->dqueueget = i; + + return n; +} + +/* + * Complete all CCBs queued to the COMP queue. + * + * These CCBs are assumed: + * - Not to be referenced either by devices or + * SCRIPTS-related queues and datas. + * - To have to be completed with an error condition + * or requeued. + * + * The device queue freeze count is incremented + * for each CCB that does not prevent this. + * This function is called when all CCBs involved + * in error handling/recovery have been reaped. + */ +static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status) +{ + SYM_QUEHEAD *qp; + struct sym_ccb *cp; + + while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { + struct scsi_cmnd *cmd; + cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); + /* Leave quiet CCBs waiting for resources */ + if (cp->host_status == HS_WAIT) + continue; + cmd = cp->cmd; + if (cam_status) + sym_set_cam_status(cmd, cam_status); +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) { + struct sym_tcb *tp = &np->target[cp->target]; + struct sym_lcb *lp = sym_lp(tp, cp->lun); + if (lp) { + sym_remque(&cp->link2_ccbq); + sym_insque_tail(&cp->link2_ccbq, + &lp->waiting_ccbq); + if (cp->started) { + if (cp->tag != NO_TAG) + --lp->started_tags; + else + --lp->started_no_tag; + } + } + cp->started = 0; + continue; + } +#endif + sym_free_ccb(np, cp); + sym_xpt_done(np, cmd); + } +} + +/* + * Complete all active CCBs with error. + * Used on CHIP/SCSI RESET. + */ +static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status) +{ + /* + * Move all active CCBs to the COMP queue + * and flush this queue. + */ + sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); + sym_que_init(&np->busy_ccbq); + sym_flush_comp_queue(np, cam_status); +} + +/* + * Start chip. + * + * 'reason' means: + * 0: initialisation. + * 1: SCSI BUS RESET delivered or received. + * 2: SCSI BUS MODE changed. + */ +void sym_start_up(struct Scsi_Host *shost, int reason) +{ + struct sym_data *sym_data = shost_priv(shost); + struct pci_dev *pdev = sym_data->pdev; + struct sym_hcb *np = sym_data->ncb; + int i; + u32 phys; + + /* + * Reset chip if asked, otherwise just clear fifos. + */ + if (reason == 1) + sym_soft_reset(np); + else { + OUTB(np, nc_stest3, TE|CSF); + OUTONB(np, nc_ctest3, CLF); + } + + /* + * Clear Start Queue + */ + phys = np->squeue_ba; + for (i = 0; i < MAX_QUEUE*2; i += 2) { + np->squeue[i] = cpu_to_scr(np->idletask_ba); + np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); + } + np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); + + /* + * Start at first entry. + */ + np->squeueput = 0; + + /* + * Clear Done Queue + */ + phys = np->dqueue_ba; + for (i = 0; i < MAX_QUEUE*2; i += 2) { + np->dqueue[i] = 0; + np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); + } + np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); + + /* + * Start at first entry. + */ + np->dqueueget = 0; + + /* + * Install patches in scripts. + * This also let point to first position the start + * and done queue pointers used from SCRIPTS. + */ + np->fw_patch(shost); + + /* + * Wakeup all pending jobs. + */ + sym_flush_busy_queue(np, DID_RESET); + + /* + * Init chip. + */ + OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ + INB(np, nc_mbox1); + udelay(2000); /* The 895 needs time for the bus mode to settle */ + + OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); + /* full arb., ena parity, par->ATN */ + OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ + + sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ + + OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ + OUTW(np, nc_respid, 1ul<myaddr); /* Id to respond to */ + OUTB(np, nc_istat , SIGP ); /* Signal Process */ + OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */ + OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ + + OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ + OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */ + OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */ + + /* Extended Sreq/Sack filtering not supported on the C10 */ + if (np->features & FE_C10) + OUTB(np, nc_stest2, np->rv_stest2); + else + OUTB(np, nc_stest2, EXT|np->rv_stest2); + + OUTB(np, nc_stest3, TE); /* TolerANT enable */ + OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ + + /* + * For now, disable AIP generation on C1010-66. + */ + if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66) + OUTB(np, nc_aipcntl1, DISAIP); + + /* + * C10101 rev. 0 errata. + * Errant SGE's when in narrow. Write bits 4 & 5 of + * STEST1 register to disable SGE. We probably should do + * that from SCRIPTS for each selection/reselection, but + * I just don't want. :) + */ + if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && + pdev->revision < 1) + OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); + + /* + * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. + * Disable overlapped arbitration for some dual function devices, + * regardless revision id (kind of post-chip-design feature. ;-)) + */ + if (pdev->device == PCI_DEVICE_ID_NCR_53C875) + OUTB(np, nc_ctest0, (1<<5)); + else if (pdev->device == PCI_DEVICE_ID_NCR_53C896) + np->rv_ccntl0 |= DPR; + + /* + * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing + * and/or hardware phase mismatch, since only such chips + * seem to support those IO registers. + */ + if (np->features & (FE_DAC|FE_NOPM)) { + OUTB(np, nc_ccntl0, np->rv_ccntl0); + OUTB(np, nc_ccntl1, np->rv_ccntl1); + } + +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 + /* + * Set up scratch C and DRS IO registers to map the 32 bit + * DMA address range our data structures are located in. + */ + if (use_dac(np)) { + np->dmap_bah[0] = 0; /* ??? */ + OUTL(np, nc_scrx[0], np->dmap_bah[0]); + OUTL(np, nc_drs, np->dmap_bah[0]); + } +#endif + + /* + * If phase mismatch handled by scripts (895A/896/1010), + * set PM jump addresses. + */ + if (np->features & FE_NOPM) { + OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle)); + OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle)); + } + + /* + * Enable GPIO0 pin for writing if LED support from SCRIPTS. + * Also set GPIO5 and clear GPIO6 if hardware LED control. + */ + if (np->features & FE_LED0) + OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); + else if (np->features & FE_LEDC) + OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20); + + /* + * enable ints + */ + OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); + OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID); + + /* + * For 895/6 enable SBMC interrupt and save current SCSI bus mode. + * Try to eat the spurious SBMC interrupt that may occur when + * we reset the chip but not the SCSI BUS (at initialization). + */ + if (np->features & (FE_ULTRA2|FE_ULTRA3)) { + OUTONW(np, nc_sien, SBMC); + if (reason == 0) { + INB(np, nc_mbox1); + mdelay(100); + INW(np, nc_sist); + } + np->scsi_mode = INB(np, nc_stest4) & SMODE; + } + + /* + * Fill in target structure. + * Reinitialize usrsync. + * Reinitialize usrwide. + * Prepare sync negotiation according to actual SCSI bus mode. + */ + for (i=0;itarget[i]; + + tp->to_reset = 0; + tp->head.sval = 0; + tp->head.wval = np->rv_scntl3; + tp->head.uval = 0; + if (tp->lun0p) + tp->lun0p->to_clear = 0; + if (tp->lunmp) { + int ln; + + for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) + if (tp->lunmp[ln]) + tp->lunmp[ln]->to_clear = 0; + } + } + + /* + * Download SCSI SCRIPTS to on-chip RAM if present, + * and start script processor. + * We do the download preferently from the CPU. + * For platforms that may not support PCI memory mapping, + * we use simple SCRIPTS that performs MEMORY MOVEs. + */ + phys = SCRIPTA_BA(np, init); + if (np->ram_ba) { + if (sym_verbose >= 2) + printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); + memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); + if (np->features & FE_RAM8K) { + memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); + phys = scr_to_cpu(np->scr_ram_seg); + OUTL(np, nc_mmws, phys); + OUTL(np, nc_mmrs, phys); + OUTL(np, nc_sfs, phys); + phys = SCRIPTB_BA(np, start64); + } + } + + np->istat_sem = 0; + + OUTL(np, nc_dsa, np->hcb_ba); + OUTL_DSP(np, phys); + + /* + * Notify the XPT about the RESET condition. + */ + if (reason != 0) + sym_xpt_async_bus_reset(np); +} + +/* + * Switch trans mode for current job and its target. + */ +static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, + u_char per, u_char wide, u_char div, u_char fak) +{ + SYM_QUEHEAD *qp; + u_char sval, wval, uval; + struct sym_tcb *tp = &np->target[target]; + + assert(target == (INB(np, nc_sdid) & 0x0f)); + + sval = tp->head.sval; + wval = tp->head.wval; + uval = tp->head.uval; + +#if 0 + printf("XXXX sval=%x wval=%x uval=%x (%x)\n", + sval, wval, uval, np->rv_scntl3); +#endif + /* + * Set the offset. + */ + if (!(np->features & FE_C10)) + sval = (sval & ~0x1f) | ofs; + else + sval = (sval & ~0x3f) | ofs; + + /* + * Set the sync divisor and extra clock factor. + */ + if (ofs != 0) { + wval = (wval & ~0x70) | ((div+1) << 4); + if (!(np->features & FE_C10)) + sval = (sval & ~0xe0) | (fak << 5); + else { + uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); + if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); + if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); + } + } + + /* + * Set the bus width. + */ + wval = wval & ~EWS; + if (wide != 0) + wval |= EWS; + + /* + * Set misc. ultra enable bits. + */ + if (np->features & FE_C10) { + uval = uval & ~(U3EN|AIPCKEN); + if (opts) { + assert(np->features & FE_U3EN); + uval |= U3EN; + } + } else { + wval = wval & ~ULTRA; + if (per <= 12) wval |= ULTRA; + } + + /* + * Stop there if sync parameters are unchanged. + */ + if (tp->head.sval == sval && + tp->head.wval == wval && + tp->head.uval == uval) + return; + tp->head.sval = sval; + tp->head.wval = wval; + tp->head.uval = uval; + + /* + * Disable extended Sreq/Sack filtering if per < 50. + * Not supported on the C1010. + */ + if (per < 50 && !(np->features & FE_C10)) + OUTOFFB(np, nc_stest2, EXT); + + /* + * set actual value and sync_status + */ + OUTB(np, nc_sxfer, tp->head.sval); + OUTB(np, nc_scntl3, tp->head.wval); + + if (np->features & FE_C10) { + OUTB(np, nc_scntl4, tp->head.uval); + } + + /* + * patch ALL busy ccbs of this target. + */ + FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { + struct sym_ccb *cp; + cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + if (cp->target != target) + continue; + cp->phys.select.sel_scntl3 = tp->head.wval; + cp->phys.select.sel_sxfer = tp->head.sval; + if (np->features & FE_C10) { + cp->phys.select.sel_scntl4 = tp->head.uval; + } + } +} + +static void sym_announce_transfer_rate(struct sym_tcb *tp) +{ + struct scsi_target *starget = tp->starget; + + if (tp->tprint.period != spi_period(starget) || + tp->tprint.offset != spi_offset(starget) || + tp->tprint.width != spi_width(starget) || + tp->tprint.iu != spi_iu(starget) || + tp->tprint.dt != spi_dt(starget) || + tp->tprint.qas != spi_qas(starget) || + !tp->tprint.check_nego) { + tp->tprint.period = spi_period(starget); + tp->tprint.offset = spi_offset(starget); + tp->tprint.width = spi_width(starget); + tp->tprint.iu = spi_iu(starget); + tp->tprint.dt = spi_dt(starget); + tp->tprint.qas = spi_qas(starget); + tp->tprint.check_nego = 1; + + spi_display_xfer_agreement(starget); + } +} + +/* + * We received a WDTR. + * Let everything be aware of the changes. + */ +static void sym_setwide(struct sym_hcb *np, int target, u_char wide) +{ + struct sym_tcb *tp = &np->target[target]; + struct scsi_target *starget = tp->starget; + + sym_settrans(np, target, 0, 0, 0, wide, 0, 0); + + if (wide) + tp->tgoal.renego = NS_WIDE; + else + tp->tgoal.renego = 0; + tp->tgoal.check_nego = 0; + tp->tgoal.width = wide; + spi_offset(starget) = 0; + spi_period(starget) = 0; + spi_width(starget) = wide; + spi_iu(starget) = 0; + spi_dt(starget) = 0; + spi_qas(starget) = 0; + + if (sym_verbose >= 3) + sym_announce_transfer_rate(tp); +} + +/* + * We received a SDTR. + * Let everything be aware of the changes. + */ +static void +sym_setsync(struct sym_hcb *np, int target, + u_char ofs, u_char per, u_char div, u_char fak) +{ + struct sym_tcb *tp = &np->target[target]; + struct scsi_target *starget = tp->starget; + u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; + + sym_settrans(np, target, 0, ofs, per, wide, div, fak); + + if (wide) + tp->tgoal.renego = NS_WIDE; + else if (ofs) + tp->tgoal.renego = NS_SYNC; + else + tp->tgoal.renego = 0; + spi_period(starget) = per; + spi_offset(starget) = ofs; + spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0; + + if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) { + tp->tgoal.period = per; + tp->tgoal.offset = ofs; + tp->tgoal.check_nego = 0; + } + + sym_announce_transfer_rate(tp); +} + +/* + * We received a PPR. + * Let everything be aware of the changes. + */ +static void +sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, + u_char per, u_char wide, u_char div, u_char fak) +{ + struct sym_tcb *tp = &np->target[target]; + struct scsi_target *starget = tp->starget; + + sym_settrans(np, target, opts, ofs, per, wide, div, fak); + + if (wide || ofs) + tp->tgoal.renego = NS_PPR; + else + tp->tgoal.renego = 0; + spi_width(starget) = tp->tgoal.width = wide; + spi_period(starget) = tp->tgoal.period = per; + spi_offset(starget) = tp->tgoal.offset = ofs; + spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU); + spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT); + spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS); + tp->tgoal.check_nego = 0; + + sym_announce_transfer_rate(tp); +} + +/* + * generic recovery from scsi interrupt + * + * The doc says that when the chip gets an SCSI interrupt, + * it tries to stop in an orderly fashion, by completing + * an instruction fetch that had started or by flushing + * the DMA fifo for a write to memory that was executing. + * Such a fashion is not enough to know if the instruction + * that was just before the current DSP value has been + * executed or not. + * + * There are some small SCRIPTS sections that deal with + * the start queue and the done queue that may break any + * assomption from the C code if we are interrupted + * inside, so we reset if this happens. Btw, since these + * SCRIPTS sections are executed while the SCRIPTS hasn't + * started SCSI operations, it is very unlikely to happen. + * + * All the driver data structures are supposed to be + * allocated from the same 4 GB memory window, so there + * is a 1 to 1 relationship between DSA and driver data + * structures. Since we are careful :) to invalidate the + * DSA when we complete a command or when the SCRIPTS + * pushes a DSA into a queue, we can trust it when it + * points to a CCB. + */ +static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts) +{ + u32 dsp = INL(np, nc_dsp); + u32 dsa = INL(np, nc_dsa); + struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); + + /* + * If we haven't been interrupted inside the SCRIPTS + * critical pathes, we can safely restart the SCRIPTS + * and trust the DSA value if it matches a CCB. + */ + if ((!(dsp > SCRIPTA_BA(np, getjob_begin) && + dsp < SCRIPTA_BA(np, getjob_end) + 1)) && + (!(dsp > SCRIPTA_BA(np, ungetjob) && + dsp < SCRIPTA_BA(np, reselect) + 1)) && + (!(dsp > SCRIPTB_BA(np, sel_for_abort) && + dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) && + (!(dsp > SCRIPTA_BA(np, done) && + dsp < SCRIPTA_BA(np, done_end) + 1))) { + OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ + OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ + /* + * If we have a CCB, let the SCRIPTS call us back for + * the handling of the error with SCRATCHA filled with + * STARTPOS. This way, we will be able to freeze the + * device queue and requeue awaiting IOs. + */ + if (cp) { + cp->host_status = hsts; + OUTL_DSP(np, SCRIPTA_BA(np, complete_error)); + } + /* + * Otherwise just restart the SCRIPTS. + */ + else { + OUTL(np, nc_dsa, 0xffffff); + OUTL_DSP(np, SCRIPTA_BA(np, start)); + } + } + else + goto reset_all; + + return; + +reset_all: + sym_start_reset(np); +} + +/* + * chip exception handler for selection timeout + */ +static void sym_int_sto (struct sym_hcb *np) +{ + u32 dsp = INL(np, nc_dsp); + + if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); + + if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8) + sym_recover_scsi_int(np, HS_SEL_TIMEOUT); + else + sym_start_reset(np); +} + +/* + * chip exception handler for unexpected disconnect + */ +static void sym_int_udc (struct sym_hcb *np) +{ + printf ("%s: unexpected disconnect\n", sym_name(np)); + sym_recover_scsi_int(np, HS_UNEXPECTED); +} + +/* + * chip exception handler for SCSI bus mode change + * + * spi2-r12 11.2.3 says a transceiver mode change must + * generate a reset event and a device that detects a reset + * event shall initiate a hard reset. It says also that a + * device that detects a mode change shall set data transfer + * mode to eight bit asynchronous, etc... + * So, just reinitializing all except chip should be enough. + */ +static void sym_int_sbmc(struct Scsi_Host *shost) +{ + struct sym_hcb *np = sym_get_hcb(shost); + u_char scsi_mode = INB(np, nc_stest4) & SMODE; + + /* + * Notify user. + */ + printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np), + sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); + + /* + * Should suspend command processing for a few seconds and + * reinitialize all except the chip. + */ + sym_start_up(shost, 2); +} + +/* + * chip exception handler for SCSI parity error. + * + * When the chip detects a SCSI parity error and is + * currently executing a (CH)MOV instruction, it does + * not interrupt immediately, but tries to finish the + * transfer of the current scatter entry before + * interrupting. The following situations may occur: + * + * - The complete scatter entry has been transferred + * without the device having changed phase. + * The chip will then interrupt with the DSP pointing + * to the instruction that follows the MOV. + * + * - A phase mismatch occurs before the MOV finished + * and phase errors are to be handled by the C code. + * The chip will then interrupt with both PAR and MA + * conditions set. + * + * - A phase mismatch occurs before the MOV finished and + * phase errors are to be handled by SCRIPTS. + * The chip will load the DSP with the phase mismatch + * JUMP address and interrupt the host processor. + */ +static void sym_int_par (struct sym_hcb *np, u_short sist) +{ + u_char hsts = INB(np, HS_PRT); + u32 dsp = INL(np, nc_dsp); + u32 dbc = INL(np, nc_dbc); + u32 dsa = INL(np, nc_dsa); + u_char sbcl = INB(np, nc_sbcl); + u_char cmd = dbc >> 24; + int phase = cmd & 7; + struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); + + if (printk_ratelimit()) + printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", + sym_name(np), hsts, dbc, sbcl); + + /* + * Check that the chip is connected to the SCSI BUS. + */ + if (!(INB(np, nc_scntl1) & ISCON)) { + sym_recover_scsi_int(np, HS_UNEXPECTED); + return; + } + + /* + * If the nexus is not clearly identified, reset the bus. + * We will try to do better later. + */ + if (!cp) + goto reset_all; + + /* + * Check instruction was a MOV, direction was INPUT and + * ATN is asserted. + */ + if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) + goto reset_all; + + /* + * Keep track of the parity error. + */ + OUTONB(np, HF_PRT, HF_EXT_ERR); + cp->xerr_status |= XE_PARITY_ERR; + + /* + * Prepare the message to send to the device. + */ + np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; + + /* + * If the old phase was DATA IN phase, we have to deal with + * the 3 situations described above. + * For other input phases (MSG IN and STATUS), the device + * must resend the whole thing that failed parity checking + * or signal error. So, jumping to dispatcher should be OK. + */ + if (phase == 1 || phase == 5) { + /* Phase mismatch handled by SCRIPTS */ + if (dsp == SCRIPTB_BA(np, pm_handle)) + OUTL_DSP(np, dsp); + /* Phase mismatch handled by the C code */ + else if (sist & MA) + sym_int_ma (np); + /* No phase mismatch occurred */ + else { + sym_set_script_dp (np, cp, dsp); + OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); + } + } + else if (phase == 7) /* We definitely cannot handle parity errors */ +#if 1 /* in message-in phase due to the relection */ + goto reset_all; /* path and various message anticipations. */ +#else + OUTL_DSP(np, SCRIPTA_BA(np, clrack)); +#endif + else + OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); + return; + +reset_all: + sym_start_reset(np); + return; +} + +/* + * chip exception handler for phase errors. + * + * We have to construct a new transfer descriptor, + * to transfer the rest of the current block. + */ +static void sym_int_ma (struct sym_hcb *np) +{ + u32 dbc; + u32 rest; + u32 dsp; + u32 dsa; + u32 nxtdsp; + u32 *vdsp; + u32 oadr, olen; + u32 *tblp; + u32 newcmd; + u_int delta; + u_char cmd; + u_char hflags, hflags0; + struct sym_pmc *pm; + struct sym_ccb *cp; + + dsp = INL(np, nc_dsp); + dbc = INL(np, nc_dbc); + dsa = INL(np, nc_dsa); + + cmd = dbc >> 24; + rest = dbc & 0xffffff; + delta = 0; + + /* + * locate matching cp if any. + */ + cp = sym_ccb_from_dsa(np, dsa); + + /* + * Donnot take into account dma fifo and various buffers in + * INPUT phase since the chip flushes everything before + * raising the MA interrupt for interrupted INPUT phases. + * For DATA IN phase, we will check for the SWIDE later. + */ + if ((cmd & 7) != 1 && (cmd & 7) != 5) { + u_char ss0, ss2; + + if (np->features & FE_DFBC) + delta = INW(np, nc_dfbc); + else { + u32 dfifo; + + /* + * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. + */ + dfifo = INL(np, nc_dfifo); + + /* + * Calculate remaining bytes in DMA fifo. + * (CTEST5 = dfifo >> 16) + */ + if (dfifo & (DFS << 16)) + delta = ((((dfifo >> 8) & 0x300) | + (dfifo & 0xff)) - rest) & 0x3ff; + else + delta = ((dfifo & 0xff) - rest) & 0x7f; + } + + /* + * The data in the dma fifo has not been transferred to + * the target -> add the amount to the rest + * and clear the data. + * Check the sstat2 register in case of wide transfer. + */ + rest += delta; + ss0 = INB(np, nc_sstat0); + if (ss0 & OLF) rest++; + if (!(np->features & FE_C10)) + if (ss0 & ORF) rest++; + if (cp && (cp->phys.select.sel_scntl3 & EWS)) { + ss2 = INB(np, nc_sstat2); + if (ss2 & OLF1) rest++; + if (!(np->features & FE_C10)) + if (ss2 & ORF1) rest++; + } + + /* + * Clear fifos. + */ + OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ + OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */ + } + + /* + * log the information + */ + if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) + printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7, + (unsigned) rest, (unsigned) delta); + + /* + * try to find the interrupted script command, + * and the address at which to continue. + */ + vdsp = NULL; + nxtdsp = 0; + if (dsp > np->scripta_ba && + dsp <= np->scripta_ba + np->scripta_sz) { + vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); + nxtdsp = dsp; + } + else if (dsp > np->scriptb_ba && + dsp <= np->scriptb_ba + np->scriptb_sz) { + vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); + nxtdsp = dsp; + } + + /* + * log the information + */ + if (DEBUG_FLAGS & DEBUG_PHASE) { + printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", + cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); + } + + if (!vdsp) { + printf ("%s: interrupted SCRIPT address not found.\n", + sym_name (np)); + goto reset_all; + } + + if (!cp) { + printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", + sym_name (np)); + goto reset_all; + } + + /* + * get old startaddress and old length. + */ + oadr = scr_to_cpu(vdsp[1]); + + if (cmd & 0x10) { /* Table indirect */ + tblp = (u32 *) ((char*) &cp->phys + oadr); + olen = scr_to_cpu(tblp[0]); + oadr = scr_to_cpu(tblp[1]); + } else { + tblp = (u32 *) 0; + olen = scr_to_cpu(vdsp[0]) & 0xffffff; + } + + if (DEBUG_FLAGS & DEBUG_PHASE) { + printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", + (unsigned) (scr_to_cpu(vdsp[0]) >> 24), + tblp, + (unsigned) olen, + (unsigned) oadr); + } + + /* + * check cmd against assumed interrupted script command. + * If dt data phase, the MOVE instruction hasn't bit 4 of + * the phase. + */ + if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { + sym_print_addr(cp->cmd, + "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", + cmd, scr_to_cpu(vdsp[0]) >> 24); + + goto reset_all; + } + + /* + * if old phase not dataphase, leave here. + */ + if (cmd & 2) { + sym_print_addr(cp->cmd, + "phase change %x-%x %d@%08x resid=%d.\n", + cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen, + (unsigned)oadr, (unsigned)rest); + goto unexpected_phase; + } + + /* + * Choose the correct PM save area. + * + * Look at the PM_SAVE SCRIPT if you want to understand + * this stuff. The equivalent code is implemented in + * SCRIPTS for the 895A, 896 and 1010 that are able to + * handle PM from the SCRIPTS processor. + */ + hflags0 = INB(np, HF_PRT); + hflags = hflags0; + + if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { + if (hflags & HF_IN_PM0) + nxtdsp = scr_to_cpu(cp->phys.pm0.ret); + else if (hflags & HF_IN_PM1) + nxtdsp = scr_to_cpu(cp->phys.pm1.ret); + + if (hflags & HF_DP_SAVED) + hflags ^= HF_ACT_PM; + } + + if (!(hflags & HF_ACT_PM)) { + pm = &cp->phys.pm0; + newcmd = SCRIPTA_BA(np, pm0_data); + } + else { + pm = &cp->phys.pm1; + newcmd = SCRIPTA_BA(np, pm1_data); + } + + hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); + if (hflags != hflags0) + OUTB(np, HF_PRT, hflags); + + /* + * fillin the phase mismatch context + */ + pm->sg.addr = cpu_to_scr(oadr + olen - rest); + pm->sg.size = cpu_to_scr(rest); + pm->ret = cpu_to_scr(nxtdsp); + + /* + * If we have a SWIDE, + * - prepare the address to write the SWIDE from SCRIPTS, + * - compute the SCRIPTS address to restart from, + * - move current data pointer context by one byte. + */ + nxtdsp = SCRIPTA_BA(np, dispatch); + if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && + (INB(np, nc_scntl2) & WSR)) { + u32 tmp; + + /* + * Set up the table indirect for the MOVE + * of the residual byte and adjust the data + * pointer context. + */ + tmp = scr_to_cpu(pm->sg.addr); + cp->phys.wresid.addr = cpu_to_scr(tmp); + pm->sg.addr = cpu_to_scr(tmp + 1); + tmp = scr_to_cpu(pm->sg.size); + cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); + pm->sg.size = cpu_to_scr(tmp - 1); + + /* + * If only the residual byte is to be moved, + * no PM context is needed. + */ + if ((tmp&0xffffff) == 1) + newcmd = pm->ret; + + /* + * Prepare the address of SCRIPTS that will + * move the residual byte to memory. + */ + nxtdsp = SCRIPTB_BA(np, wsr_ma_helper); + } + + if (DEBUG_FLAGS & DEBUG_PHASE) { + sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n", + hflags0, hflags, newcmd, + (unsigned)scr_to_cpu(pm->sg.addr), + (unsigned)scr_to_cpu(pm->sg.size), + (unsigned)scr_to_cpu(pm->ret)); + } + + /* + * Restart the SCRIPTS processor. + */ + sym_set_script_dp (np, cp, newcmd); + OUTL_DSP(np, nxtdsp); + return; + + /* + * Unexpected phase changes that occurs when the current phase + * is not a DATA IN or DATA OUT phase are due to error conditions. + * Such event may only happen when the SCRIPTS is using a + * multibyte SCSI MOVE. + * + * Phase change Some possible cause + * + * COMMAND --> MSG IN SCSI parity error detected by target. + * COMMAND --> STATUS Bad command or refused by target. + * MSG OUT --> MSG IN Message rejected by target. + * MSG OUT --> COMMAND Bogus target that discards extended + * negotiation messages. + * + * The code below does not care of the new phase and so + * trusts the target. Why to annoy it ? + * If the interrupted phase is COMMAND phase, we restart at + * dispatcher. + * If a target does not get all the messages after selection, + * the code assumes blindly that the target discards extended + * messages and clears the negotiation status. + * If the target does not want all our response to negotiation, + * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids + * bloat for such a should_not_happen situation). + * In all other situation, we reset the BUS. + * Are these assumptions reasonable ? (Wait and see ...) + */ +unexpected_phase: + dsp -= 8; + nxtdsp = 0; + + switch (cmd & 7) { + case 2: /* COMMAND phase */ + nxtdsp = SCRIPTA_BA(np, dispatch); + break; +#if 0 + case 3: /* STATUS phase */ + nxtdsp = SCRIPTA_BA(np, dispatch); + break; +#endif + case 6: /* MSG OUT phase */ + /* + * If the device may want to use untagged when we want + * tagged, we prepare an IDENTIFY without disc. granted, + * since we will not be able to handle reselect. + * Otherwise, we just don't care. + */ + if (dsp == SCRIPTA_BA(np, send_ident)) { + if (cp->tag != NO_TAG && olen - rest <= 3) { + cp->host_status = HS_BUSY; + np->msgout[0] = IDENTIFY(0, cp->lun); + nxtdsp = SCRIPTB_BA(np, ident_break_atn); + } + else + nxtdsp = SCRIPTB_BA(np, ident_break); + } + else if (dsp == SCRIPTB_BA(np, send_wdtr) || + dsp == SCRIPTB_BA(np, send_sdtr) || + dsp == SCRIPTB_BA(np, send_ppr)) { + nxtdsp = SCRIPTB_BA(np, nego_bad_phase); + if (dsp == SCRIPTB_BA(np, send_ppr)) { + struct scsi_device *dev = cp->cmd->device; + dev->ppr = 0; + } + } + break; +#if 0 + case 7: /* MSG IN phase */ + nxtdsp = SCRIPTA_BA(np, clrack); + break; +#endif + } + + if (nxtdsp) { + OUTL_DSP(np, nxtdsp); + return; + } + +reset_all: + sym_start_reset(np); +} + +/* + * chip interrupt handler + * + * In normal situations, interrupt conditions occur one at + * a time. But when something bad happens on the SCSI BUS, + * the chip may raise several interrupt flags before + * stopping and interrupting the CPU. The additionnal + * interrupt flags are stacked in some extra registers + * after the SIP and/or DIP flag has been raised in the + * ISTAT. After the CPU has read the interrupt condition + * flag from SIST or DSTAT, the chip unstacks the other + * interrupt flags and sets the corresponding bits in + * SIST or DSTAT. Since the chip starts stacking once the + * SIP or DIP flag is set, there is a small window of time + * where the stacking does not occur. + * + * Typically, multiple interrupt conditions may happen in + * the following situations: + * + * - SCSI parity error + Phase mismatch (PAR|MA) + * When an parity error is detected in input phase + * and the device switches to msg-in phase inside a + * block MOV. + * - SCSI parity error + Unexpected disconnect (PAR|UDC) + * When a stupid device does not want to handle the + * recovery of an SCSI parity error. + * - Some combinations of STO, PAR, UDC, ... + * When using non compliant SCSI stuff, when user is + * doing non compliant hot tampering on the BUS, when + * something really bad happens to a device, etc ... + * + * The heuristic suggested by SYMBIOS to handle + * multiple interrupts is to try unstacking all + * interrupts conditions and to handle them on some + * priority based on error severity. + * This will work when the unstacking has been + * successful, but we cannot be 100 % sure of that, + * since the CPU may have been faster to unstack than + * the chip is able to stack. Hmmm ... But it seems that + * such a situation is very unlikely to happen. + * + * If this happen, for example STO caught by the CPU + * then UDC happenning before the CPU have restarted + * the SCRIPTS, the driver may wrongly complete the + * same command on UDC, since the SCRIPTS didn't restart + * and the DSA still points to the same command. + * We avoid this situation by setting the DSA to an + * invalid value when the CCB is completed and before + * restarting the SCRIPTS. + * + * Another issue is that we need some section of our + * recovery procedures to be somehow uninterruptible but + * the SCRIPTS processor does not provides such a + * feature. For this reason, we handle recovery preferently + * from the C code and check against some SCRIPTS critical + * sections from the C code. + * + * Hopefully, the interrupt handling of the driver is now + * able to resist to weird BUS error conditions, but donnot + * ask me for any guarantee that it will never fail. :-) + * Use at your own decision and risk. + */ + +irqreturn_t sym_interrupt(struct Scsi_Host *shost) +{ + struct sym_data *sym_data = shost_priv(shost); + struct sym_hcb *np = sym_data->ncb; + struct pci_dev *pdev = sym_data->pdev; + u_char istat, istatc; + u_char dstat; + u_short sist; + + /* + * interrupt on the fly ? + * (SCRIPTS may still be running) + * + * A `dummy read' is needed to ensure that the + * clear of the INTF flag reaches the device + * and that posted writes are flushed to memory + * before the scanning of the DONE queue. + * Note that SCRIPTS also (dummy) read to memory + * prior to deliver the INTF interrupt condition. + */ + istat = INB(np, nc_istat); + if (istat & INTF) { + OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); + istat |= INB(np, nc_istat); /* DUMMY READ */ + if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); + sym_wakeup_done(np); + } + + if (!(istat & (SIP|DIP))) + return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE; + +#if 0 /* We should never get this one */ + if (istat & CABRT) + OUTB(np, nc_istat, CABRT); +#endif + + /* + * PAR and MA interrupts may occur at the same time, + * and we need to know of both in order to handle + * this situation properly. We try to unstack SCSI + * interrupts for that reason. BTW, I dislike a LOT + * such a loop inside the interrupt routine. + * Even if DMA interrupt stacking is very unlikely to + * happen, we also try unstacking these ones, since + * this has no performance impact. + */ + sist = 0; + dstat = 0; + istatc = istat; + do { + if (istatc & SIP) + sist |= INW(np, nc_sist); + if (istatc & DIP) + dstat |= INB(np, nc_dstat); + istatc = INB(np, nc_istat); + istat |= istatc; + + /* Prevent deadlock waiting on a condition that may + * never clear. */ + if (unlikely(sist == 0xffff && dstat == 0xff)) { + if (pci_channel_offline(pdev)) + return IRQ_NONE; + } + } while (istatc & (SIP|DIP)); + + if (DEBUG_FLAGS & DEBUG_TINY) + printf ("<%d|%x:%x|%x:%x>", + (int)INB(np, nc_scr0), + dstat,sist, + (unsigned)INL(np, nc_dsp), + (unsigned)INL(np, nc_dbc)); + /* + * On paper, a memory read barrier may be needed here to + * prevent out of order LOADs by the CPU from having + * prefetched stale data prior to DMA having occurred. + * And since we are paranoid ... :) + */ + MEMORY_READ_BARRIER(); + + /* + * First, interrupts we want to service cleanly. + * + * Phase mismatch (MA) is the most frequent interrupt + * for chip earlier than the 896 and so we have to service + * it as quickly as possible. + * A SCSI parity error (PAR) may be combined with a phase + * mismatch condition (MA). + * Programmed interrupts (SIR) are used to call the C code + * from SCRIPTS. + * The single step interrupt (SSI) is not used in this + * driver. + */ + if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && + !(dstat & (MDPE|BF|ABRT|IID))) { + if (sist & PAR) sym_int_par (np, sist); + else if (sist & MA) sym_int_ma (np); + else if (dstat & SIR) sym_int_sir(np); + else if (dstat & SSI) OUTONB_STD(); + else goto unknown_int; + return IRQ_HANDLED; + } + + /* + * Now, interrupts that donnot happen in normal + * situations and that we may need to recover from. + * + * On SCSI RESET (RST), we reset everything. + * On SCSI BUS MODE CHANGE (SBMC), we complete all + * active CCBs with RESET status, prepare all devices + * for negotiating again and restart the SCRIPTS. + * On STO and UDC, we complete the CCB with the corres- + * ponding status and restart the SCRIPTS. + */ + if (sist & RST) { + printf("%s: SCSI BUS reset detected.\n", sym_name(np)); + sym_start_up(shost, 1); + return IRQ_HANDLED; + } + + OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ + OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ + + if (!(sist & (GEN|HTH|SGE)) && + !(dstat & (MDPE|BF|ABRT|IID))) { + if (sist & SBMC) sym_int_sbmc(shost); + else if (sist & STO) sym_int_sto (np); + else if (sist & UDC) sym_int_udc (np); + else goto unknown_int; + return IRQ_HANDLED; + } + + /* + * Now, interrupts we are not able to recover cleanly. + * + * Log message for hard errors. + * Reset everything. + */ + + sym_log_hard_error(shost, sist, dstat); + + if ((sist & (GEN|HTH|SGE)) || + (dstat & (MDPE|BF|ABRT|IID))) { + sym_start_reset(np); + return IRQ_HANDLED; + } + +unknown_int: + /* + * We just miss the cause of the interrupt. :( + * Print a message. The timeout will do the real work. + */ + printf( "%s: unknown interrupt(s) ignored, " + "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", + sym_name(np), istat, dstat, sist); + return IRQ_NONE; +} + +/* + * Dequeue from the START queue all CCBs that match + * a given target/lun/task condition (-1 means all), + * and move them from the BUSY queue to the COMP queue + * with DID_SOFT_ERROR status condition. + * This function is used during error handling/recovery. + * It is called with SCRIPTS not running. + */ +static int +sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) +{ + int j; + struct sym_ccb *cp; + + /* + * Make sure the starting index is within range. + */ + assert((i >= 0) && (i < 2*MAX_QUEUE)); + + /* + * Walk until end of START queue and dequeue every job + * that matches the target/lun/task condition. + */ + j = i; + while (i != np->squeueput) { + cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); + assert(cp); +#ifdef SYM_CONF_IARB_SUPPORT + /* Forget hints for IARB, they may be no longer relevant */ + cp->host_flags &= ~HF_HINT_IARB; +#endif + if ((target == -1 || cp->target == target) && + (lun == -1 || cp->lun == lun) && + (task == -1 || cp->tag == task)) { +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); +#else + sym_set_cam_status(cp->cmd, DID_REQUEUE); +#endif + sym_remque(&cp->link_ccbq); + sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); + } + else { + if (i != j) + np->squeue[j] = np->squeue[i]; + if ((j += 2) >= MAX_QUEUE*2) j = 0; + } + if ((i += 2) >= MAX_QUEUE*2) i = 0; + } + if (i != j) /* Copy back the idle task if needed */ + np->squeue[j] = np->squeue[i]; + np->squeueput = j; /* Update our current start queue pointer */ + + return (i - j) / 2; +} + +/* + * chip handler for bad SCSI status condition + * + * In case of bad SCSI status, we unqueue all the tasks + * currently queued to the controller but not yet started + * and then restart the SCRIPTS processor immediately. + * + * QUEUE FULL and BUSY conditions are handled the same way. + * Basically all the not yet started tasks are requeued in + * device queue and the queue is frozen until a completion. + * + * For CHECK CONDITION and COMMAND TERMINATED status, we use + * the CCB of the failed command to prepare a REQUEST SENSE + * SCSI command and queue it to the controller queue. + * + * SCRATCHA is assumed to have been loaded with STARTPOS + * before the SCRIPTS called the C code. + */ +static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp) +{ + u32 startp; + u_char s_status = cp->ssss_status; + u_char h_flags = cp->host_flags; + int msglen; + int i; + + /* + * Compute the index of the next job to start from SCRIPTS. + */ + i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; + + /* + * The last CCB queued used for IARB hint may be + * no longer relevant. Forget it. + */ +#ifdef SYM_CONF_IARB_SUPPORT + if (np->last_cp) + np->last_cp = 0; +#endif + + /* + * Now deal with the SCSI status. + */ + switch(s_status) { + case S_BUSY: + case S_QUEUE_FULL: + if (sym_verbose >= 2) { + sym_print_addr(cp->cmd, "%s\n", + s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); + } + fallthrough; + default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ + sym_complete_error (np, cp); + break; + case S_TERMINATED: + case S_CHECK_COND: + /* + * If we get an SCSI error when requesting sense, give up. + */ + if (h_flags & HF_SENSE) { + sym_complete_error (np, cp); + break; + } + + /* + * Dequeue all queued CCBs for that device not yet started, + * and restart the SCRIPTS processor immediately. + */ + sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); + OUTL_DSP(np, SCRIPTA_BA(np, start)); + + /* + * Save some info of the actual IO. + * Compute the data residual. + */ + cp->sv_scsi_status = cp->ssss_status; + cp->sv_xerr_status = cp->xerr_status; + cp->sv_resid = sym_compute_residual(np, cp); + + /* + * Prepare all needed data structures for + * requesting sense data. + */ + + cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun); + msglen = 1; + + /* + * If we are currently using anything different from + * async. 8 bit data transfers with that target, + * start a negotiation, since the device may want + * to report us a UNIT ATTENTION condition due to + * a cause we currently ignore, and we donnot want + * to be stuck with WIDE and/or SYNC data transfer. + * + * cp->nego_status is filled by sym_prepare_nego(). + */ + cp->nego_status = 0; + msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]); + /* + * Message table indirect structure. + */ + cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2); + cp->phys.smsg.size = cpu_to_scr(msglen); + + /* + * sense command + */ + cp->phys.cmd.addr = CCB_BA(cp, sensecmd); + cp->phys.cmd.size = cpu_to_scr(6); + + /* + * patch requested size into sense command + */ + cp->sensecmd[0] = REQUEST_SENSE; + cp->sensecmd[1] = 0; + if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7) + cp->sensecmd[1] = cp->lun << 5; + cp->sensecmd[4] = SYM_SNS_BBUF_LEN; + cp->data_len = SYM_SNS_BBUF_LEN; + + /* + * sense data + */ + memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); + cp->phys.sense.addr = CCB_BA(cp, sns_bbuf); + cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); + + /* + * requeue the command. + */ + startp = SCRIPTB_BA(np, sdata_in); + + cp->phys.head.savep = cpu_to_scr(startp); + cp->phys.head.lastp = cpu_to_scr(startp); + cp->startp = cpu_to_scr(startp); + cp->goalp = cpu_to_scr(startp + 16); + + cp->host_xflags = 0; + cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; + cp->ssss_status = S_ILLEGAL; + cp->host_flags = (HF_SENSE|HF_DATA_IN); + cp->xerr_status = 0; + cp->extra_bytes = 0; + + cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); + + /* + * Requeue the command. + */ + sym_put_start_queue(np, cp); + + /* + * Give back to upper layer everything we have dequeued. + */ + sym_flush_comp_queue(np, 0); + break; + } +} + +/* + * After a device has accepted some management message + * as BUS DEVICE RESET, ABORT TASK, etc ..., or when + * a device signals a UNIT ATTENTION condition, some + * tasks are thrown away by the device. We are required + * to reflect that on our tasks list since the device + * will never complete these tasks. + * + * This function move from the BUSY queue to the COMP + * queue all disconnected CCBs for a given target that + * match the following criteria: + * - lun=-1 means any logical UNIT otherwise a given one. + * - task=-1 means any task, otherwise a given one. + */ +int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) +{ + SYM_QUEHEAD qtmp, *qp; + int i = 0; + struct sym_ccb *cp; + + /* + * Move the entire BUSY queue to our temporary queue. + */ + sym_que_init(&qtmp); + sym_que_splice(&np->busy_ccbq, &qtmp); + sym_que_init(&np->busy_ccbq); + + /* + * Put all CCBs that matches our criteria into + * the COMP queue and put back other ones into + * the BUSY queue. + */ + while ((qp = sym_remque_head(&qtmp)) != NULL) { + struct scsi_cmnd *cmd; + cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + cmd = cp->cmd; + if (cp->host_status != HS_DISCONNECT || + cp->target != target || + (lun != -1 && cp->lun != lun) || + (task != -1 && + (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { + sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); + continue; + } + sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); + + /* Preserve the software timeout condition */ + if (sym_get_cam_status(cmd) != DID_TIME_OUT) + sym_set_cam_status(cmd, cam_status); + ++i; +#if 0 +printf("XXXX TASK @%p CLEARED\n", cp); +#endif + } + return i; +} + +/* + * chip handler for TASKS recovery + * + * We cannot safely abort a command, while the SCRIPTS + * processor is running, since we just would be in race + * with it. + * + * As long as we have tasks to abort, we keep the SEM + * bit set in the ISTAT. When this bit is set, the + * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) + * each time it enters the scheduler. + * + * If we have to reset a target, clear tasks of a unit, + * or to perform the abort of a disconnected job, we + * restart the SCRIPTS for selecting the target. Once + * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). + * If it loses arbitration, the SCRIPTS will interrupt again + * the next time it will enter its scheduler, and so on ... + * + * On SIR_TARGET_SELECTED, we scan for the more + * appropriate thing to do: + * + * - If nothing, we just sent a M_ABORT message to the + * target to get rid of the useless SCSI bus ownership. + * According to the specs, no tasks shall be affected. + * - If the target is to be reset, we send it a M_RESET + * message. + * - If a logical UNIT is to be cleared , we send the + * IDENTIFY(lun) + M_ABORT. + * - If an untagged task is to be aborted, we send the + * IDENTIFY(lun) + M_ABORT. + * - If a tagged task is to be aborted, we send the + * IDENTIFY(lun) + task attributes + M_ABORT_TAG. + * + * Once our 'kiss of death' :) message has been accepted + * by the target, the SCRIPTS interrupts again + * (SIR_ABORT_SENT). On this interrupt, we complete + * all the CCBs that should have been aborted by the + * target according to our message. + */ +static void sym_sir_task_recovery(struct sym_hcb *np, int num) +{ + SYM_QUEHEAD *qp; + struct sym_ccb *cp; + struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */ + struct scsi_target *starget; + int target=-1, lun=-1, task; + int i, k; + + switch(num) { + /* + * The SCRIPTS processor stopped before starting + * the next command in order to allow us to perform + * some task recovery. + */ + case SIR_SCRIPT_STOPPED: + /* + * Do we have any target to reset or unit to clear ? + */ + for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { + tp = &np->target[i]; + if (tp->to_reset || + (tp->lun0p && tp->lun0p->to_clear)) { + target = i; + break; + } + if (!tp->lunmp) + continue; + for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { + if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { + target = i; + break; + } + } + if (target != -1) + break; + } + + /* + * If not, walk the busy queue for any + * disconnected CCB to be aborted. + */ + if (target == -1) { + FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { + cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); + if (cp->host_status != HS_DISCONNECT) + continue; + if (cp->to_abort) { + target = cp->target; + break; + } + } + } + + /* + * If some target is to be selected, + * prepare and start the selection. + */ + if (target != -1) { + tp = &np->target[target]; + np->abrt_sel.sel_id = target; + np->abrt_sel.sel_scntl3 = tp->head.wval; + np->abrt_sel.sel_sxfer = tp->head.sval; + OUTL(np, nc_dsa, np->hcb_ba); + OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort)); + return; + } + + /* + * Now look for a CCB to abort that haven't started yet. + * Btw, the SCRIPTS processor is still stopped, so + * we are not in race. + */ + i = 0; + cp = NULL; + FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { + cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + if (cp->host_status != HS_BUSY && + cp->host_status != HS_NEGOTIATE) + continue; + if (!cp->to_abort) + continue; +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If we are using IMMEDIATE ARBITRATION, we donnot + * want to cancel the last queued CCB, since the + * SCRIPTS may have anticipated the selection. + */ + if (cp == np->last_cp) { + cp->to_abort = 0; + continue; + } +#endif + i = 1; /* Means we have found some */ + break; + } + if (!i) { + /* + * We are done, so we donnot need + * to synchronize with the SCRIPTS anylonger. + * Remove the SEM flag from the ISTAT. + */ + np->istat_sem = 0; + OUTB(np, nc_istat, SIGP); + break; + } + /* + * Compute index of next position in the start + * queue the SCRIPTS intends to start and dequeue + * all CCBs for that device that haven't been started. + */ + i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; + i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); + + /* + * Make sure at least our IO to abort has been dequeued. + */ +#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING + assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR); +#else + sym_remque(&cp->link_ccbq); + sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); +#endif + /* + * Keep track in cam status of the reason of the abort. + */ + if (cp->to_abort == 2) + sym_set_cam_status(cp->cmd, DID_TIME_OUT); + else + sym_set_cam_status(cp->cmd, DID_ABORT); + + /* + * Complete with error everything that we have dequeued. + */ + sym_flush_comp_queue(np, 0); + break; + /* + * The SCRIPTS processor has selected a target + * we may have some manual recovery to perform for. + */ + case SIR_TARGET_SELECTED: + target = INB(np, nc_sdid) & 0xf; + tp = &np->target[target]; + + np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); + + /* + * If the target is to be reset, prepare a + * M_RESET message and clear the to_reset flag + * since we donnot expect this operation to fail. + */ + if (tp->to_reset) { + np->abrt_msg[0] = M_RESET; + np->abrt_tbl.size = 1; + tp->to_reset = 0; + break; + } + + /* + * Otherwise, look for some logical unit to be cleared. + */ + if (tp->lun0p && tp->lun0p->to_clear) + lun = 0; + else if (tp->lunmp) { + for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { + if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { + lun = k; + break; + } + } + } + + /* + * If a logical unit is to be cleared, prepare + * an IDENTIFY(lun) + ABORT MESSAGE. + */ + if (lun != -1) { + struct sym_lcb *lp = sym_lp(tp, lun); + lp->to_clear = 0; /* We don't expect to fail here */ + np->abrt_msg[0] = IDENTIFY(0, lun); + np->abrt_msg[1] = M_ABORT; + np->abrt_tbl.size = 2; + break; + } + + /* + * Otherwise, look for some disconnected job to + * abort for this target. + */ + i = 0; + cp = NULL; + FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { + cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + if (cp->host_status != HS_DISCONNECT) + continue; + if (cp->target != target) + continue; + if (!cp->to_abort) + continue; + i = 1; /* Means we have some */ + break; + } + + /* + * If we have none, probably since the device has + * completed the command before we won abitration, + * send a M_ABORT message without IDENTIFY. + * According to the specs, the device must just + * disconnect the BUS and not abort any task. + */ + if (!i) { + np->abrt_msg[0] = M_ABORT; + np->abrt_tbl.size = 1; + break; + } + + /* + * We have some task to abort. + * Set the IDENTIFY(lun) + */ + np->abrt_msg[0] = IDENTIFY(0, cp->lun); + + /* + * If we want to abort an untagged command, we + * will send a IDENTIFY + M_ABORT. + * Otherwise (tagged command), we will send + * a IDENTITFY + task attributes + ABORT TAG. + */ + if (cp->tag == NO_TAG) { + np->abrt_msg[1] = M_ABORT; + np->abrt_tbl.size = 2; + } else { + np->abrt_msg[1] = cp->scsi_smsg[1]; + np->abrt_msg[2] = cp->scsi_smsg[2]; + np->abrt_msg[3] = M_ABORT_TAG; + np->abrt_tbl.size = 4; + } + /* + * Keep track of software timeout condition, since the + * peripheral driver may not count retries on abort + * conditions not due to timeout. + */ + if (cp->to_abort == 2) + sym_set_cam_status(cp->cmd, DID_TIME_OUT); + cp->to_abort = 0; /* We donnot expect to fail here */ + break; + + /* + * The target has accepted our message and switched + * to BUS FREE phase as we expected. + */ + case SIR_ABORT_SENT: + target = INB(np, nc_sdid) & 0xf; + tp = &np->target[target]; + starget = tp->starget; + + /* + ** If we didn't abort anything, leave here. + */ + if (np->abrt_msg[0] == M_ABORT) + break; + + /* + * If we sent a M_RESET, then a hardware reset has + * been performed by the target. + * - Reset everything to async 8 bit + * - Tell ourself to negotiate next time :-) + * - Prepare to clear all disconnected CCBs for + * this target from our task list (lun=task=-1) + */ + lun = -1; + task = -1; + if (np->abrt_msg[0] == M_RESET) { + tp->head.sval = 0; + tp->head.wval = np->rv_scntl3; + tp->head.uval = 0; + spi_period(starget) = 0; + spi_offset(starget) = 0; + spi_width(starget) = 0; + spi_iu(starget) = 0; + spi_dt(starget) = 0; + spi_qas(starget) = 0; + tp->tgoal.check_nego = 1; + tp->tgoal.renego = 0; + } + + /* + * Otherwise, check for the LUN and TASK(s) + * concerned by the cancelation. + * If it is not ABORT_TAG then it is CLEAR_QUEUE + * or an ABORT message :-) + */ + else { + lun = np->abrt_msg[0] & 0x3f; + if (np->abrt_msg[1] == M_ABORT_TAG) + task = np->abrt_msg[2]; + } + + /* + * Complete all the CCBs the device should have + * aborted due to our 'kiss of death' message. + */ + i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; + sym_dequeue_from_squeue(np, i, target, lun, -1); + sym_clear_tasks(np, DID_ABORT, target, lun, task); + sym_flush_comp_queue(np, 0); + + /* + * If we sent a BDR, make upper layer aware of that. + */ + if (np->abrt_msg[0] == M_RESET) + starget_printk(KERN_NOTICE, starget, + "has been reset\n"); + break; + } + + /* + * Print to the log the message we intend to send. + */ + if (num == SIR_TARGET_SELECTED) { + dev_info(&tp->starget->dev, "control msgout:"); + sym_printl_hex(np->abrt_msg, np->abrt_tbl.size); + np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); + } + + /* + * Let the SCRIPTS processor continue. + */ + OUTONB_STD(); +} + +/* + * Gerard's alchemy:) that deals with the data + * pointer for both MDP and the residual calculation. + * + * I didn't want to bloat the code by more than 200 + * lines for the handling of both MDP and the residual. + * This has been achieved by using a data pointer + * representation consisting in an index in the data + * array (dp_sg) and a negative offset (dp_ofs) that + * have the following meaning: + * + * - dp_sg = SYM_CONF_MAX_SG + * we are at the end of the data script. + * - dp_sg < SYM_CONF_MAX_SG + * dp_sg points to the next entry of the scatter array + * we want to transfer. + * - dp_ofs < 0 + * dp_ofs represents the residual of bytes of the + * previous entry scatter entry we will send first. + * - dp_ofs = 0 + * no residual to send first. + * + * The function sym_evaluate_dp() accepts an arbitray + * offset (basically from the MDP message) and returns + * the corresponding values of dp_sg and dp_ofs. + */ + +static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs) +{ + u32 dp_scr; + int dp_ofs, dp_sg, dp_sgmin; + int tmp; + struct sym_pmc *pm; + + /* + * Compute the resulted data pointer in term of a script + * address within some DATA script and a signed byte offset. + */ + dp_scr = scr; + dp_ofs = *ofs; + if (dp_scr == SCRIPTA_BA(np, pm0_data)) + pm = &cp->phys.pm0; + else if (dp_scr == SCRIPTA_BA(np, pm1_data)) + pm = &cp->phys.pm1; + else + pm = NULL; + + if (pm) { + dp_scr = scr_to_cpu(pm->ret); + dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; + } + + /* + * If we are auto-sensing, then we are done. + */ + if (cp->host_flags & HF_SENSE) { + *ofs = dp_ofs; + return 0; + } + + /* + * Deduce the index of the sg entry. + * Keep track of the index of the first valid entry. + * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the + * end of the data. + */ + tmp = scr_to_cpu(cp->goalp); + dp_sg = SYM_CONF_MAX_SG; + if (dp_scr != tmp) + dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); + dp_sgmin = SYM_CONF_MAX_SG - cp->segments; + + /* + * Move to the sg entry the data pointer belongs to. + * + * If we are inside the data area, we expect result to be: + * + * Either, + * dp_ofs = 0 and dp_sg is the index of the sg entry + * the data pointer belongs to (or the end of the data) + * Or, + * dp_ofs < 0 and dp_sg is the index of the sg entry + * the data pointer belongs to + 1. + */ + if (dp_ofs < 0) { + int n; + while (dp_sg > dp_sgmin) { + --dp_sg; + tmp = scr_to_cpu(cp->phys.data[dp_sg].size); + n = dp_ofs + (tmp & 0xffffff); + if (n > 0) { + ++dp_sg; + break; + } + dp_ofs = n; + } + } + else if (dp_ofs > 0) { + while (dp_sg < SYM_CONF_MAX_SG) { + tmp = scr_to_cpu(cp->phys.data[dp_sg].size); + dp_ofs -= (tmp & 0xffffff); + ++dp_sg; + if (dp_ofs <= 0) + break; + } + } + + /* + * Make sure the data pointer is inside the data area. + * If not, return some error. + */ + if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) + goto out_err; + else if (dp_sg > SYM_CONF_MAX_SG || + (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) + goto out_err; + + /* + * Save the extreme pointer if needed. + */ + if (dp_sg > cp->ext_sg || + (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { + cp->ext_sg = dp_sg; + cp->ext_ofs = dp_ofs; + } + + /* + * Return data. + */ + *ofs = dp_ofs; + return dp_sg; + +out_err: + return -1; +} + +/* + * chip handler for MODIFY DATA POINTER MESSAGE + * + * We also call this function on IGNORE WIDE RESIDUE + * messages that do not match a SWIDE full condition. + * Btw, we assume in that situation that such a message + * is equivalent to a MODIFY DATA POINTER (offset=-1). + */ + +static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs) +{ + int dp_ofs = ofs; + u32 dp_scr = sym_get_script_dp (np, cp); + u32 dp_ret; + u32 tmp; + u_char hflags; + int dp_sg; + struct sym_pmc *pm; + + /* + * Not supported for auto-sense. + */ + if (cp->host_flags & HF_SENSE) + goto out_reject; + + /* + * Apply our alchemy:) (see comments in sym_evaluate_dp()), + * to the resulted data pointer. + */ + dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); + if (dp_sg < 0) + goto out_reject; + + /* + * And our alchemy:) allows to easily calculate the data + * script address we want to return for the next data phase. + */ + dp_ret = cpu_to_scr(cp->goalp); + dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); + + /* + * If offset / scatter entry is zero we donnot need + * a context for the new current data pointer. + */ + if (dp_ofs == 0) { + dp_scr = dp_ret; + goto out_ok; + } + + /* + * Get a context for the new current data pointer. + */ + hflags = INB(np, HF_PRT); + + if (hflags & HF_DP_SAVED) + hflags ^= HF_ACT_PM; + + if (!(hflags & HF_ACT_PM)) { + pm = &cp->phys.pm0; + dp_scr = SCRIPTA_BA(np, pm0_data); + } + else { + pm = &cp->phys.pm1; + dp_scr = SCRIPTA_BA(np, pm1_data); + } + + hflags &= ~(HF_DP_SAVED); + + OUTB(np, HF_PRT, hflags); + + /* + * Set up the new current data pointer. + * ofs < 0 there, and for the next data phase, we + * want to transfer part of the data of the sg entry + * corresponding to index dp_sg-1 prior to returning + * to the main data script. + */ + pm->ret = cpu_to_scr(dp_ret); + tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); + tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; + pm->sg.addr = cpu_to_scr(tmp); + pm->sg.size = cpu_to_scr(-dp_ofs); + +out_ok: + sym_set_script_dp (np, cp, dp_scr); + OUTL_DSP(np, SCRIPTA_BA(np, clrack)); + return; + +out_reject: + OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); +} + + +/* + * chip calculation of the data residual. + * + * As I used to say, the requirement of data residual + * in SCSI is broken, useless and cannot be achieved + * without huge complexity. + * But most OSes and even the official CAM require it. + * When stupidity happens to be so widely spread inside + * a community, it gets hard to convince. + * + * Anyway, I don't care, since I am not going to use + * any software that considers this data residual as + * a relevant information. :) + */ + +int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) +{ + int dp_sg, resid = 0; + int dp_ofs = 0; + + /* + * Check for some data lost or just thrown away. + * We are not required to be quite accurate in this + * situation. Btw, if we are odd for output and the + * device claims some more data, it may well happen + * than our residual be zero. :-) + */ + if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { + if (cp->xerr_status & XE_EXTRA_DATA) + resid -= cp->extra_bytes; + if (cp->xerr_status & XE_SODL_UNRUN) + ++resid; + if (cp->xerr_status & XE_SWIDE_OVRUN) + --resid; + } + + /* + * If all data has been transferred, + * there is no residual. + */ + if (cp->phys.head.lastp == cp->goalp) + return resid; + + /* + * If no data transfer occurs, or if the data + * pointer is weird, return full residual. + */ + if (cp->startp == cp->phys.head.lastp || + sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), + &dp_ofs) < 0) { + return cp->data_len - cp->odd_byte_adjustment; + } + + /* + * If we were auto-sensing, then we are done. + */ + if (cp->host_flags & HF_SENSE) { + return -dp_ofs; + } + + /* + * We are now full comfortable in the computation + * of the data residual (2's complement). + */ + resid = -cp->ext_ofs; + for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { + u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); + resid += (tmp & 0xffffff); + } + + resid -= cp->odd_byte_adjustment; + + /* + * Hopefully, the result is not too wrong. + */ + return resid; +} + +/* + * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. + * + * When we try to negotiate, we append the negotiation message + * to the identify and (maybe) simple tag message. + * The host status field is set to HS_NEGOTIATE to mark this + * situation. + * + * If the target doesn't answer this message immediately + * (as required by the standard), the SIR_NEGO_FAILED interrupt + * will be raised eventually. + * The handler removes the HS_NEGOTIATE status, and sets the + * negotiated value to the default (async / nowide). + * + * If we receive a matching answer immediately, we check it + * for validity, and set the values. + * + * If we receive a Reject message immediately, we assume the + * negotiation has failed, and fall back to standard values. + * + * If we receive a negotiation message while not in HS_NEGOTIATE + * state, it's a target initiated negotiation. We prepare a + * (hopefully) valid answer, set our parameters, and send back + * this answer to the target. + * + * If the target doesn't fetch the answer (no message out phase), + * we assume the negotiation has failed, and fall back to default + * settings (SIR_NEGO_PROTO interrupt). + * + * When we set the values, we adjust them in all ccbs belonging + * to this target, in the controller's register, and in the "phys" + * field of the controller's struct sym_hcb. + */ + +/* + * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. + */ +static int +sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) +{ + int target = cp->target; + u_char chg, ofs, per, fak, div; + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, target, "sync msgin", np->msgin); + } + + /* + * Get requested values. + */ + chg = 0; + per = np->msgin[3]; + ofs = np->msgin[4]; + + /* + * Check values against our limits. + */ + if (ofs) { + if (ofs > np->maxoffs) + {chg = 1; ofs = np->maxoffs;} + } + + if (ofs) { + if (per < np->minsync) + {chg = 1; per = np->minsync;} + } + + /* + * Get new chip synchronous parameters value. + */ + div = fak = 0; + if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) + goto reject_it; + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_addr(cp->cmd, + "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", + ofs, per, div, fak, chg); + } + + /* + * If it was an answer we want to change, + * then it isn't acceptable. Reject it. + */ + if (!req && chg) + goto reject_it; + + /* + * Apply new values. + */ + sym_setsync (np, target, ofs, per, div, fak); + + /* + * It was an answer. We are done. + */ + if (!req) + return 0; + + /* + * It was a request. Prepare an answer message. + */ + spi_populate_sync_msg(np->msgout, per, ofs); + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, target, "sync msgout", np->msgout); + } + + np->msgin [0] = M_NOOP; + + return 0; + +reject_it: + sym_setsync (np, target, 0, 0, 0, 0); + return -1; +} + +static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) +{ + int req = 1; + int result; + + /* + * Request or answer ? + */ + if (INB(np, HS_PRT) == HS_NEGOTIATE) { + OUTB(np, HS_PRT, HS_BUSY); + if (cp->nego_status && cp->nego_status != NS_SYNC) + goto reject_it; + req = 0; + } + + /* + * Check and apply new values. + */ + result = sym_sync_nego_check(np, req, cp); + if (result) /* Not acceptable, reject it */ + goto reject_it; + if (req) { /* Was a request, send response. */ + cp->nego_status = NS_SYNC; + OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); + } + else /* Was a response, we are done. */ + OUTL_DSP(np, SCRIPTA_BA(np, clrack)); + return; + +reject_it: + OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); +} + +/* + * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. + */ +static int +sym_ppr_nego_check(struct sym_hcb *np, int req, int target) +{ + struct sym_tcb *tp = &np->target[target]; + unsigned char fak, div; + int dt, chg = 0; + + unsigned char per = np->msgin[3]; + unsigned char ofs = np->msgin[5]; + unsigned char wide = np->msgin[6]; + unsigned char opts = np->msgin[7] & PPR_OPT_MASK; + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, target, "ppr msgin", np->msgin); + } + + /* + * Check values against our limits. + */ + if (wide > np->maxwide) { + chg = 1; + wide = np->maxwide; + } + if (!wide || !(np->features & FE_U3EN)) + opts = 0; + + if (opts != (np->msgin[7] & PPR_OPT_MASK)) + chg = 1; + + dt = opts & PPR_OPT_DT; + + if (ofs) { + unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs; + if (ofs > maxoffs) { + chg = 1; + ofs = maxoffs; + } + } + + if (ofs) { + unsigned char minsync = dt ? np->minsync_dt : np->minsync; + if (per < minsync) { + chg = 1; + per = minsync; + } + } + + /* + * Get new chip synchronous parameters value. + */ + div = fak = 0; + if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) + goto reject_it; + + /* + * If it was an answer we want to change, + * then it isn't acceptable. Reject it. + */ + if (!req && chg) + goto reject_it; + + /* + * Apply new values. + */ + sym_setpprot(np, target, opts, ofs, per, wide, div, fak); + + /* + * It was an answer. We are done. + */ + if (!req) + return 0; + + /* + * It was a request. Prepare an answer message. + */ + spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts); + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, target, "ppr msgout", np->msgout); + } + + np->msgin [0] = M_NOOP; + + return 0; + +reject_it: + sym_setpprot (np, target, 0, 0, 0, 0, 0, 0); + /* + * If it is a device response that should result in + * ST, we may want to try a legacy negotiation later. + */ + if (!req && !opts) { + tp->tgoal.period = per; + tp->tgoal.offset = ofs; + tp->tgoal.width = wide; + tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; + tp->tgoal.check_nego = 1; + } + return -1; +} + +static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) +{ + int req = 1; + int result; + + /* + * Request or answer ? + */ + if (INB(np, HS_PRT) == HS_NEGOTIATE) { + OUTB(np, HS_PRT, HS_BUSY); + if (cp->nego_status && cp->nego_status != NS_PPR) + goto reject_it; + req = 0; + } + + /* + * Check and apply new values. + */ + result = sym_ppr_nego_check(np, req, cp->target); + if (result) /* Not acceptable, reject it */ + goto reject_it; + if (req) { /* Was a request, send response. */ + cp->nego_status = NS_PPR; + OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp)); + } + else /* Was a response, we are done. */ + OUTL_DSP(np, SCRIPTA_BA(np, clrack)); + return; + +reject_it: + OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); +} + +/* + * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. + */ +static int +sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) +{ + int target = cp->target; + u_char chg, wide; + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, target, "wide msgin", np->msgin); + } + + /* + * Get requested values. + */ + chg = 0; + wide = np->msgin[3]; + + /* + * Check values against our limits. + */ + if (wide > np->maxwide) { + chg = 1; + wide = np->maxwide; + } + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n", + wide, chg); + } + + /* + * If it was an answer we want to change, + * then it isn't acceptable. Reject it. + */ + if (!req && chg) + goto reject_it; + + /* + * Apply new values. + */ + sym_setwide (np, target, wide); + + /* + * It was an answer. We are done. + */ + if (!req) + return 0; + + /* + * It was a request. Prepare an answer message. + */ + spi_populate_width_msg(np->msgout, wide); + + np->msgin [0] = M_NOOP; + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, target, "wide msgout", np->msgout); + } + + return 0; + +reject_it: + return -1; +} + +static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) +{ + int req = 1; + int result; + + /* + * Request or answer ? + */ + if (INB(np, HS_PRT) == HS_NEGOTIATE) { + OUTB(np, HS_PRT, HS_BUSY); + if (cp->nego_status && cp->nego_status != NS_WIDE) + goto reject_it; + req = 0; + } + + /* + * Check and apply new values. + */ + result = sym_wide_nego_check(np, req, cp); + if (result) /* Not acceptable, reject it */ + goto reject_it; + if (req) { /* Was a request, send response. */ + cp->nego_status = NS_WIDE; + OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp)); + } else { /* Was a response. */ + /* + * Negotiate for SYNC immediately after WIDE response. + * This allows to negotiate for both WIDE and SYNC on + * a single SCSI command (Suggested by Justin Gibbs). + */ + if (tp->tgoal.offset) { + spi_populate_sync_msg(np->msgout, tp->tgoal.period, + tp->tgoal.offset); + + if (DEBUG_FLAGS & DEBUG_NEGO) { + sym_print_nego_msg(np, cp->target, + "sync msgout", np->msgout); + } + + cp->nego_status = NS_SYNC; + OUTB(np, HS_PRT, HS_NEGOTIATE); + OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); + return; + } else + OUTL_DSP(np, SCRIPTA_BA(np, clrack)); + } + + return; + +reject_it: + OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); +} + +/* + * Reset DT, SYNC or WIDE to default settings. + * + * Called when a negotiation does not succeed either + * on rejection or on protocol error. + * + * A target that understands a PPR message should never + * reject it, and messing with it is very unlikely. + * So, if a PPR makes problems, we may just want to + * try a legacy negotiation later. + */ +static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) +{ + switch (cp->nego_status) { + case NS_PPR: +#if 0 + sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); +#else + if (tp->tgoal.period < np->minsync) + tp->tgoal.period = np->minsync; + if (tp->tgoal.offset > np->maxoffs) + tp->tgoal.offset = np->maxoffs; + tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; + tp->tgoal.check_nego = 1; +#endif + break; + case NS_SYNC: + sym_setsync (np, cp->target, 0, 0, 0, 0); + break; + case NS_WIDE: + sym_setwide (np, cp->target, 0); + break; + } + np->msgin [0] = M_NOOP; + np->msgout[0] = M_NOOP; + cp->nego_status = 0; +} + +/* + * chip handler for MESSAGE REJECT received in response to + * PPR, WIDE or SYNCHRONOUS negotiation. + */ +static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) +{ + sym_nego_default(np, tp, cp); + OUTB(np, HS_PRT, HS_BUSY); +} + +#define sym_printk(lvl, tp, cp, fmt, v...) do { \ + if (cp) \ + scmd_printk(lvl, cp->cmd, fmt, ##v); \ + else \ + starget_printk(lvl, tp->starget, fmt, ##v); \ +} while (0) + +/* + * chip exception handler for programmed interrupts. + */ +static void sym_int_sir(struct sym_hcb *np) +{ + u_char num = INB(np, nc_dsps); + u32 dsa = INL(np, nc_dsa); + struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); + u_char target = INB(np, nc_sdid) & 0x0f; + struct sym_tcb *tp = &np->target[target]; + int tmp; + + if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); + + switch (num) { +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 + /* + * SCRIPTS tell us that we may have to update + * 64 bit DMA segment registers. + */ + case SIR_DMAP_DIRTY: + sym_update_dmap_regs(np); + goto out; +#endif + /* + * Command has been completed with error condition + * or has been auto-sensed. + */ + case SIR_COMPLETE_ERROR: + sym_complete_error(np, cp); + return; + /* + * The C code is currently trying to recover from something. + * Typically, user want to abort some command. + */ + case SIR_SCRIPT_STOPPED: + case SIR_TARGET_SELECTED: + case SIR_ABORT_SENT: + sym_sir_task_recovery(np, num); + return; + /* + * The device didn't go to MSG OUT phase after having + * been selected with ATN. We do not want to handle that. + */ + case SIR_SEL_ATN_NO_MSG_OUT: + sym_printk(KERN_WARNING, tp, cp, + "No MSG OUT phase after selection with ATN\n"); + goto out_stuck; + /* + * The device didn't switch to MSG IN phase after + * having reselected the initiator. + */ + case SIR_RESEL_NO_MSG_IN: + sym_printk(KERN_WARNING, tp, cp, + "No MSG IN phase after reselection\n"); + goto out_stuck; + /* + * After reselection, the device sent a message that wasn't + * an IDENTIFY. + */ + case SIR_RESEL_NO_IDENTIFY: + sym_printk(KERN_WARNING, tp, cp, + "No IDENTIFY after reselection\n"); + goto out_stuck; + /* + * The device reselected a LUN we do not know about. + */ + case SIR_RESEL_BAD_LUN: + np->msgout[0] = M_RESET; + goto out; + /* + * The device reselected for an untagged nexus and we + * haven't any. + */ + case SIR_RESEL_BAD_I_T_L: + np->msgout[0] = M_ABORT; + goto out; + /* + * The device reselected for a tagged nexus that we do not have. + */ + case SIR_RESEL_BAD_I_T_L_Q: + np->msgout[0] = M_ABORT_TAG; + goto out; + /* + * The SCRIPTS let us know that the device has grabbed + * our message and will abort the job. + */ + case SIR_RESEL_ABORTED: + np->lastmsg = np->msgout[0]; + np->msgout[0] = M_NOOP; + sym_printk(KERN_WARNING, tp, cp, + "message %x sent on bad reselection\n", np->lastmsg); + goto out; + /* + * The SCRIPTS let us know that a message has been + * successfully sent to the device. + */ + case SIR_MSG_OUT_DONE: + np->lastmsg = np->msgout[0]; + np->msgout[0] = M_NOOP; + /* Should we really care of that */ + if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { + if (cp) { + cp->xerr_status &= ~XE_PARITY_ERR; + if (!cp->xerr_status) + OUTOFFB(np, HF_PRT, HF_EXT_ERR); + } + } + goto out; + /* + * The device didn't send a GOOD SCSI status. + * We may have some work to do prior to allow + * the SCRIPTS processor to continue. + */ + case SIR_BAD_SCSI_STATUS: + if (!cp) + goto out; + sym_sir_bad_scsi_status(np, num, cp); + return; + /* + * We are asked by the SCRIPTS to prepare a + * REJECT message. + */ + case SIR_REJECT_TO_SEND: + sym_print_msg(cp, "M_REJECT to send for ", np->msgin); + np->msgout[0] = M_REJECT; + goto out; + /* + * We have been ODD at the end of a DATA IN + * transfer and the device didn't send a + * IGNORE WIDE RESIDUE message. + * It is a data overrun condition. + */ + case SIR_SWIDE_OVERRUN: + if (cp) { + OUTONB(np, HF_PRT, HF_EXT_ERR); + cp->xerr_status |= XE_SWIDE_OVRUN; + } + goto out; + /* + * We have been ODD at the end of a DATA OUT + * transfer. + * It is a data underrun condition. + */ + case SIR_SODL_UNDERRUN: + if (cp) { + OUTONB(np, HF_PRT, HF_EXT_ERR); + cp->xerr_status |= XE_SODL_UNRUN; + } + goto out; + /* + * The device wants us to tranfer more data than + * expected or in the wrong direction. + * The number of extra bytes is in scratcha. + * It is a data overrun condition. + */ + case SIR_DATA_OVERRUN: + if (cp) { + OUTONB(np, HF_PRT, HF_EXT_ERR); + cp->xerr_status |= XE_EXTRA_DATA; + cp->extra_bytes += INL(np, nc_scratcha); + } + goto out; + /* + * The device switched to an illegal phase (4/5). + */ + case SIR_BAD_PHASE: + if (cp) { + OUTONB(np, HF_PRT, HF_EXT_ERR); + cp->xerr_status |= XE_BAD_PHASE; + } + goto out; + /* + * We received a message. + */ + case SIR_MSG_RECEIVED: + if (!cp) + goto out_stuck; + switch (np->msgin [0]) { + /* + * We received an extended message. + * We handle MODIFY DATA POINTER, SDTR, WDTR + * and reject all other extended messages. + */ + case M_EXTENDED: + switch (np->msgin [2]) { + case M_X_MODIFY_DP: + if (DEBUG_FLAGS & DEBUG_POINTER) + sym_print_msg(cp, "extended msg ", + np->msgin); + tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + + (np->msgin[5]<<8) + (np->msgin[6]); + sym_modify_dp(np, tp, cp, tmp); + return; + case M_X_SYNC_REQ: + sym_sync_nego(np, tp, cp); + return; + case M_X_PPR_REQ: + sym_ppr_nego(np, tp, cp); + return; + case M_X_WIDE_REQ: + sym_wide_nego(np, tp, cp); + return; + default: + goto out_reject; + } + break; + /* + * We received a 1/2 byte message not handled from SCRIPTS. + * We are only expecting MESSAGE REJECT and IGNORE WIDE + * RESIDUE messages that haven't been anticipated by + * SCRIPTS on SWIDE full condition. Unanticipated IGNORE + * WIDE RESIDUE messages are aliased as MODIFY DP (-1). + */ + case M_IGN_RESIDUE: + if (DEBUG_FLAGS & DEBUG_POINTER) + sym_print_msg(cp, "1 or 2 byte ", np->msgin); + if (cp->host_flags & HF_SENSE) + OUTL_DSP(np, SCRIPTA_BA(np, clrack)); + else + sym_modify_dp(np, tp, cp, -1); + return; + case M_REJECT: + if (INB(np, HS_PRT) == HS_NEGOTIATE) + sym_nego_rejected(np, tp, cp); + else { + sym_print_addr(cp->cmd, + "M_REJECT received (%x:%x).\n", + scr_to_cpu(np->lastmsg), np->msgout[0]); + } + goto out_clrack; + default: + goto out_reject; + } + break; + /* + * We received an unknown message. + * Ignore all MSG IN phases and reject it. + */ + case SIR_MSG_WEIRD: + sym_print_msg(cp, "WEIRD message received", np->msgin); + OUTL_DSP(np, SCRIPTB_BA(np, msg_weird)); + return; + /* + * Negotiation failed. + * Target does not send us the reply. + * Remove the HS_NEGOTIATE status. + */ + case SIR_NEGO_FAILED: + OUTB(np, HS_PRT, HS_BUSY); + /* + * Negotiation failed. + * Target does not want answer message. + */ + fallthrough; + case SIR_NEGO_PROTO: + sym_nego_default(np, tp, cp); + goto out; + } + +out: + OUTONB_STD(); + return; +out_reject: + OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); + return; +out_clrack: + OUTL_DSP(np, SCRIPTA_BA(np, clrack)); + return; +out_stuck: + return; +} + +/* + * Acquire a control block + */ +struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order) +{ + u_char tn = cmd->device->id; + u_char ln = cmd->device->lun; + struct sym_tcb *tp = &np->target[tn]; + struct sym_lcb *lp = sym_lp(tp, ln); + u_short tag = NO_TAG; + SYM_QUEHEAD *qp; + struct sym_ccb *cp = NULL; + + /* + * Look for a free CCB + */ + if (sym_que_empty(&np->free_ccbq)) + sym_alloc_ccb(np); + qp = sym_remque_head(&np->free_ccbq); + if (!qp) + goto out; + cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + + { + /* + * If we have been asked for a tagged command. + */ + if (tag_order) { + /* + * Debugging purpose. + */ +#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING + if (lp->busy_itl != 0) + goto out_free; +#endif + /* + * Allocate resources for tags if not yet. + */ + if (!lp->cb_tags) { + sym_alloc_lcb_tags(np, tn, ln); + if (!lp->cb_tags) + goto out_free; + } + /* + * Get a tag for this SCSI IO and set up + * the CCB bus address for reselection, + * and count it for this LUN. + * Toggle reselect path to tagged. + */ + if (lp->busy_itlq < SYM_CONF_MAX_TASK) { + tag = lp->cb_tags[lp->ia_tag]; + if (++lp->ia_tag == SYM_CONF_MAX_TASK) + lp->ia_tag = 0; + ++lp->busy_itlq; +#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING + lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); + lp->head.resel_sa = + cpu_to_scr(SCRIPTA_BA(np, resel_tag)); +#endif +#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING + cp->tags_si = lp->tags_si; + ++lp->tags_sum[cp->tags_si]; + ++lp->tags_since; +#endif + } + else + goto out_free; + } + /* + * This command will not be tagged. + * If we already have either a tagged or untagged + * one, refuse to overlap this untagged one. + */ + else { + /* + * Debugging purpose. + */ +#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING + if (lp->busy_itl != 0 || lp->busy_itlq != 0) + goto out_free; +#endif + /* + * Count this nexus for this LUN. + * Set up the CCB bus address for reselection. + * Toggle reselect path to untagged. + */ + ++lp->busy_itl; +#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING + if (lp->busy_itl == 1) { + lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); + lp->head.resel_sa = + cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); + } + else + goto out_free; +#endif + } + } + /* + * Put the CCB into the busy queue. + */ + sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + if (lp) { + sym_remque(&cp->link2_ccbq); + sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); + } + +#endif + cp->to_abort = 0; + cp->odd_byte_adjustment = 0; + cp->tag = tag; + cp->order = tag_order; + cp->target = tn; + cp->lun = ln; + + if (DEBUG_FLAGS & DEBUG_TAGS) { + sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag); + } + +out: + return cp; +out_free: + sym_insque_head(&cp->link_ccbq, &np->free_ccbq); + return NULL; +} + +/* + * Release one control block + */ +void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) +{ + struct sym_tcb *tp = &np->target[cp->target]; + struct sym_lcb *lp = sym_lp(tp, cp->lun); + + if (DEBUG_FLAGS & DEBUG_TAGS) { + sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n", + cp, cp->tag); + } + + /* + * If LCB available, + */ + if (lp) { + /* + * If tagged, release the tag, set the relect path + */ + if (cp->tag != NO_TAG) { +#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING + --lp->tags_sum[cp->tags_si]; +#endif + /* + * Free the tag value. + */ + lp->cb_tags[lp->if_tag] = cp->tag; + if (++lp->if_tag == SYM_CONF_MAX_TASK) + lp->if_tag = 0; + /* + * Make the reselect path invalid, + * and uncount this CCB. + */ + lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); + --lp->busy_itlq; + } else { /* Untagged */ + /* + * Make the reselect path invalid, + * and uncount this CCB. + */ + lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); + --lp->busy_itl; + } + /* + * If no JOB active, make the LUN reselect path invalid. + */ + if (lp->busy_itlq == 0 && lp->busy_itl == 0) + lp->head.resel_sa = + cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); + } + + /* + * We donnot queue more than 1 ccb per target + * with negotiation at any time. If this ccb was + * used for negotiation, clear this info in the tcb. + */ + if (cp == tp->nego_cp) + tp->nego_cp = NULL; + +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If we just complete the last queued CCB, + * clear this info that is no longer relevant. + */ + if (cp == np->last_cp) + np->last_cp = 0; +#endif + + /* + * Make this CCB available. + */ + cp->cmd = NULL; + cp->host_status = HS_IDLE; + sym_remque(&cp->link_ccbq); + sym_insque_head(&cp->link_ccbq, &np->free_ccbq); + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + if (lp) { + sym_remque(&cp->link2_ccbq); + sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq); + if (cp->started) { + if (cp->tag != NO_TAG) + --lp->started_tags; + else + --lp->started_no_tag; + } + } + cp->started = 0; +#endif +} + +/* + * Allocate a CCB from memory and initialize its fixed part. + */ +static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np) +{ + struct sym_ccb *cp = NULL; + int hcode; + + /* + * Prevent from allocating more CCBs than we can + * queue to the controller. + */ + if (np->actccbs >= SYM_CONF_MAX_START) + return NULL; + + /* + * Allocate memory for this CCB. + */ + cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); + if (!cp) + goto out_free; + + /* + * Count it. + */ + np->actccbs++; + + /* + * Compute the bus address of this ccb. + */ + cp->ccb_ba = vtobus(cp); + + /* + * Insert this ccb into the hashed list. + */ + hcode = CCB_HASH_CODE(cp->ccb_ba); + cp->link_ccbh = np->ccbh[hcode]; + np->ccbh[hcode] = cp; + + /* + * Initialyze the start and restart actions. + */ + cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); + cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); + + /* + * Initilialyze some other fields. + */ + cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); + + /* + * Chain into free ccb queue. + */ + sym_insque_head(&cp->link_ccbq, &np->free_ccbq); + + /* + * Chain into optionnal lists. + */ +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq); +#endif + return cp; +out_free: + if (cp) + sym_mfree_dma(cp, sizeof(*cp), "CCB"); + return NULL; +} + +/* + * Look up a CCB from a DSA value. + */ +static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa) +{ + int hcode; + struct sym_ccb *cp; + + hcode = CCB_HASH_CODE(dsa); + cp = np->ccbh[hcode]; + while (cp) { + if (cp->ccb_ba == dsa) + break; + cp = cp->link_ccbh; + } + + return cp; +} + +/* + * Target control block initialisation. + * Nothing important to do at the moment. + */ +static void sym_init_tcb (struct sym_hcb *np, u_char tn) +{ +#if 0 /* Hmmm... this checking looks paranoid. */ + /* + * Check some alignments required by the chip. + */ + assert (((offsetof(struct sym_reg, nc_sxfer) ^ + offsetof(struct sym_tcb, head.sval)) &3) == 0); + assert (((offsetof(struct sym_reg, nc_scntl3) ^ + offsetof(struct sym_tcb, head.wval)) &3) == 0); +#endif +} + +/* + * Lun control block allocation and initialization. + */ +struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) +{ + struct sym_tcb *tp = &np->target[tn]; + struct sym_lcb *lp = NULL; + + /* + * Initialize the target control block if not yet. + */ + sym_init_tcb (np, tn); + + /* + * Allocate the LCB bus address array. + * Compute the bus address of this table. + */ + if (ln && !tp->luntbl) { + tp->luntbl = sym_calloc_dma(256, "LUNTBL"); + if (!tp->luntbl) + goto fail; + memset32(tp->luntbl, cpu_to_scr(vtobus(&np->badlun_sa)), 64); + tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); + } + + /* + * Allocate the table of pointers for LUN(s) > 0, if needed. + */ + if (ln && !tp->lunmp) { + tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), + GFP_ATOMIC); + if (!tp->lunmp) + goto fail; + } + + /* + * Allocate the lcb. + * Make it available to the chip. + */ + lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); + if (!lp) + goto fail; + if (ln) { + tp->lunmp[ln] = lp; + tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); + } + else { + tp->lun0p = lp; + tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); + } + tp->nlcb++; + + /* + * Let the itl task point to error handling. + */ + lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); + + /* + * Set the reselect pattern to our default. :) + */ + lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); + + /* + * Set user capabilities. + */ + lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + /* + * Initialize device queueing. + */ + sym_que_init(&lp->waiting_ccbq); + sym_que_init(&lp->started_ccbq); + lp->started_max = SYM_CONF_MAX_TASK; + lp->started_limit = SYM_CONF_MAX_TASK; +#endif + +fail: + return lp; +} + +/* + * Allocate LCB resources for tagged command queuing. + */ +static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) +{ + struct sym_tcb *tp = &np->target[tn]; + struct sym_lcb *lp = sym_lp(tp, ln); + int i; + + /* + * Allocate the task table and and the tag allocation + * circular buffer. We want both or none. + */ + lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); + if (!lp->itlq_tbl) + goto fail; + lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC); + if (!lp->cb_tags) { + sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); + lp->itlq_tbl = NULL; + goto fail; + } + + /* + * Initialize the task table with invalid entries. + */ + memset32(lp->itlq_tbl, cpu_to_scr(np->notask_ba), SYM_CONF_MAX_TASK); + + /* + * Fill up the tag buffer with tag numbers. + */ + for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) + lp->cb_tags[i] = i; + + /* + * Make the task table available to SCRIPTS, + * And accept tagged commands now. + */ + lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); + + return; +fail: + return; +} + +/* + * Lun control block deallocation. Returns the number of valid remaining LCBs + * for the target. + */ +int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) +{ + struct sym_tcb *tp = &np->target[tn]; + struct sym_lcb *lp = sym_lp(tp, ln); + + tp->nlcb--; + + if (ln) { + if (!tp->nlcb) { + kfree(tp->lunmp); + sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); + tp->lunmp = NULL; + tp->luntbl = NULL; + tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); + } else { + tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); + tp->lunmp[ln] = NULL; + } + } else { + tp->lun0p = NULL; + tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); + } + + if (lp->itlq_tbl) { + sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); + kfree(lp->cb_tags); + } + + sym_mfree_dma(lp, sizeof(*lp), "LCB"); + + return tp->nlcb; +} + +/* + * Queue a SCSI IO to the controller. + */ +int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) +{ + struct scsi_device *sdev = cmd->device; + struct sym_tcb *tp; + struct sym_lcb *lp; + u_char *msgptr; + u_int msglen; + int can_disconnect; + + /* + * Keep track of the IO in our CCB. + */ + cp->cmd = cmd; + + /* + * Retrieve the target descriptor. + */ + tp = &np->target[cp->target]; + + /* + * Retrieve the lun descriptor. + */ + lp = sym_lp(tp, sdev->lun); + + can_disconnect = (cp->tag != NO_TAG) || + (lp && (lp->curr_flags & SYM_DISC_ENABLED)); + + msgptr = cp->scsi_smsg; + msglen = 0; + msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun); + + /* + * Build the tag message if present. + */ + if (cp->tag != NO_TAG) { + u_char order = cp->order; + + switch(order) { + case M_ORDERED_TAG: + break; + case M_HEAD_TAG: + break; + default: + order = M_SIMPLE_TAG; + } +#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING + /* + * Avoid too much reordering of SCSI commands. + * The algorithm tries to prevent completion of any + * tagged command from being delayed against more + * than 3 times the max number of queued commands. + */ + if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) { + lp->tags_si = !(lp->tags_si); + if (lp->tags_sum[lp->tags_si]) { + order = M_ORDERED_TAG; + if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) { + sym_print_addr(cmd, + "ordered tag forced.\n"); + } + } + lp->tags_since = 0; + } +#endif + msgptr[msglen++] = order; + + /* + * For less than 128 tags, actual tags are numbered + * 1,3,5,..2*MAXTAGS+1,since we may have to deal + * with devices that have problems with #TAG 0 or too + * great #TAG numbers. For more tags (up to 256), + * we use directly our tag number. + */ +#if SYM_CONF_MAX_TASK > (512/4) + msgptr[msglen++] = cp->tag; +#else + msgptr[msglen++] = (cp->tag << 1) + 1; +#endif + } + + /* + * Build a negotiation message if needed. + * (nego_status is filled by sym_prepare_nego()) + * + * Always negotiate on INQUIRY and REQUEST SENSE. + * + */ + cp->nego_status = 0; + if ((tp->tgoal.check_nego || + cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) && + !tp->nego_cp && lp) { + msglen += sym_prepare_nego(np, cp, msgptr + msglen); + } + + /* + * Startqueue + */ + cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); + cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); + + /* + * select + */ + cp->phys.select.sel_id = cp->target; + cp->phys.select.sel_scntl3 = tp->head.wval; + cp->phys.select.sel_sxfer = tp->head.sval; + cp->phys.select.sel_scntl4 = tp->head.uval; + + /* + * message + */ + cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg); + cp->phys.smsg.size = cpu_to_scr(msglen); + + /* + * status + */ + cp->host_xflags = 0; + cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; + cp->ssss_status = S_ILLEGAL; + cp->xerr_status = 0; + cp->host_flags = 0; + cp->extra_bytes = 0; + + /* + * extreme data pointer. + * shall be positive, so -1 is lower than lowest.:) + */ + cp->ext_sg = -1; + cp->ext_ofs = 0; + + /* + * Build the CDB and DATA descriptor block + * and start the IO. + */ + return sym_setup_data_and_start(np, cmd, cp); +} + +/* + * Reset a SCSI target (all LUNs of this target). + */ +int sym_reset_scsi_target(struct sym_hcb *np, int target) +{ + struct sym_tcb *tp; + + if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET) + return -1; + + tp = &np->target[target]; + tp->to_reset = 1; + + np->istat_sem = SEM; + OUTB(np, nc_istat, SIGP|SEM); + + return 0; +} + +/* + * Abort a SCSI IO. + */ +static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out) +{ + /* + * Check that the IO is active. + */ + if (!cp || !cp->host_status || cp->host_status == HS_WAIT) + return -1; + + /* + * If a previous abort didn't succeed in time, + * perform a BUS reset. + */ + if (cp->to_abort) { + sym_reset_scsi_bus(np, 1); + return 0; + } + + /* + * Mark the CCB for abort and allow time for. + */ + cp->to_abort = timed_out ? 2 : 1; + + /* + * Tell the SCRIPTS processor to stop and synchronize with us. + */ + np->istat_sem = SEM; + OUTB(np, nc_istat, SIGP|SEM); + return 0; +} + +int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out) +{ + struct sym_ccb *cp; + SYM_QUEHEAD *qp; + + /* + * Look up our CCB control block. + */ + cp = NULL; + FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { + struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); + if (cp2->cmd == cmd) { + cp = cp2; + break; + } + } + + return sym_abort_ccb(np, cp, timed_out); +} + +/* + * Complete execution of a SCSI command with extended + * error, SCSI status error, or having been auto-sensed. + * + * The SCRIPTS processor is not running there, so we + * can safely access IO registers and remove JOBs from + * the START queue. + * SCRATCHA is assumed to have been loaded with STARTPOS + * before the SCRIPTS called the C code. + */ +void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp) +{ + struct scsi_device *sdev; + struct scsi_cmnd *cmd; +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + struct sym_tcb *tp; + struct sym_lcb *lp; +#endif + int resid; + int i; + + /* + * Paranoid check. :) + */ + if (!cp || !cp->cmd) + return; + + cmd = cp->cmd; + sdev = cmd->device; + if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { + dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp, + cp->host_status, cp->ssss_status, cp->host_flags); + } + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + /* + * Get target and lun pointers. + */ + tp = &np->target[cp->target]; + lp = sym_lp(tp, sdev->lun); +#endif + + /* + * Check for extended errors. + */ + if (cp->xerr_status) { + if (sym_verbose) + sym_print_xerr(cmd, cp->xerr_status); + if (cp->host_status == HS_COMPLETE) + cp->host_status = HS_COMP_ERR; + } + + /* + * Calculate the residual. + */ + resid = sym_compute_residual(np, cp); + + if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */ + resid = 0; /* throw them away. :) */ + cp->sv_resid = 0; + } +#ifdef DEBUG_2_0_X +if (resid) + printf("XXXX RESID= %d - 0x%x\n", resid, resid); +#endif + + /* + * Dequeue all queued CCBs for that device + * not yet started by SCRIPTS. + */ + i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; + i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); + + /* + * Restart the SCRIPTS processor. + */ + OUTL_DSP(np, SCRIPTA_BA(np, start)); + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + if (cp->host_status == HS_COMPLETE && + cp->ssss_status == S_QUEUE_FULL) { + if (!lp || lp->started_tags - i < 2) + goto weirdness; + /* + * Decrease queue depth as needed. + */ + lp->started_max = lp->started_tags - i - 1; + lp->num_sgood = 0; + + if (sym_verbose >= 2) { + sym_print_addr(cmd, " queue depth is now %d\n", + lp->started_max); + } + + /* + * Repair the CCB. + */ + cp->host_status = HS_BUSY; + cp->ssss_status = S_ILLEGAL; + + /* + * Let's requeue it to device. + */ + sym_set_cam_status(cmd, DID_SOFT_ERROR); + goto finish; + } +weirdness: +#endif + /* + * Build result in CAM ccb. + */ + sym_set_cam_result_error(np, cp, resid); + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING +finish: +#endif + /* + * Add this one to the COMP queue. + */ + sym_remque(&cp->link_ccbq); + sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); + + /* + * Complete all those commands with either error + * or requeue condition. + */ + sym_flush_comp_queue(np, 0); + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + /* + * Donnot start more than 1 command after an error. + */ + sym_start_next_ccbs(np, lp, 1); +#endif +} + +/* + * Complete execution of a successful SCSI command. + * + * Only successful commands go to the DONE queue, + * since we need to have the SCRIPTS processor + * stopped on any error condition. + * The SCRIPTS processor is running while we are + * completing successful commands. + */ +void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) +{ +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + struct sym_tcb *tp; + struct sym_lcb *lp; +#endif + struct scsi_cmnd *cmd; + int resid; + + /* + * Paranoid check. :) + */ + if (!cp || !cp->cmd) + return; + assert (cp->host_status == HS_COMPLETE); + + /* + * Get user command. + */ + cmd = cp->cmd; + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + /* + * Get target and lun pointers. + */ + tp = &np->target[cp->target]; + lp = sym_lp(tp, cp->lun); +#endif + + /* + * If all data have been transferred, given than no + * extended error did occur, there is no residual. + */ + resid = 0; + if (cp->phys.head.lastp != cp->goalp) + resid = sym_compute_residual(np, cp); + + /* + * Wrong transfer residuals may be worse than just always + * returning zero. User can disable this feature in + * sym53c8xx.h. Residual support is enabled by default. + */ + if (!SYM_SETUP_RESIDUAL_SUPPORT) + resid = 0; +#ifdef DEBUG_2_0_X +if (resid) + printf("XXXX RESID= %d - 0x%x\n", resid, resid); +#endif + + /* + * Build result in CAM ccb. + */ + sym_set_cam_result_ok(cp, cmd, resid); + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + /* + * If max number of started ccbs had been reduced, + * increase it if 200 good status received. + */ + if (lp && lp->started_max < lp->started_limit) { + ++lp->num_sgood; + if (lp->num_sgood >= 200) { + lp->num_sgood = 0; + ++lp->started_max; + if (sym_verbose >= 2) { + sym_print_addr(cmd, " queue depth is now %d\n", + lp->started_max); + } + } + } +#endif + + /* + * Free our CCB. + */ + sym_free_ccb (np, cp); + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + /* + * Requeue a couple of awaiting scsi commands. + */ + if (!sym_que_empty(&lp->waiting_ccbq)) + sym_start_next_ccbs(np, lp, 2); +#endif + /* + * Complete the command. + */ + sym_xpt_done(np, cmd); +} + +/* + * Soft-attach the controller. + */ +int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram) +{ + struct sym_hcb *np = sym_get_hcb(shost); + int i; + + /* + * Get some info about the firmware. + */ + np->scripta_sz = fw->a_size; + np->scriptb_sz = fw->b_size; + np->scriptz_sz = fw->z_size; + np->fw_setup = fw->setup; + np->fw_patch = fw->patch; + np->fw_name = fw->name; + + /* + * Save setting of some IO registers, so we will + * be able to probe specific implementations. + */ + sym_save_initial_setting (np); + + /* + * Reset the chip now, since it has been reported + * that SCSI clock calibration may not work properly + * if the chip is currently active. + */ + sym_chip_reset(np); + + /* + * Prepare controller and devices settings, according + * to chip features, user set-up and driver set-up. + */ + sym_prepare_setting(shost, np, nvram); + + /* + * Check the PCI clock frequency. + * Must be performed after prepare_setting since it destroys + * STEST1 that is used to probe for the clock doubler. + */ + i = sym_getpciclock(np); + if (i > 37000 && !(np->features & FE_66MHZ)) + printf("%s: PCI BUS clock seems too high: %u KHz.\n", + sym_name(np), i); + + /* + * Allocate the start queue. + */ + np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); + if (!np->squeue) + goto attach_failed; + np->squeue_ba = vtobus(np->squeue); + + /* + * Allocate the done queue. + */ + np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); + if (!np->dqueue) + goto attach_failed; + np->dqueue_ba = vtobus(np->dqueue); + + /* + * Allocate the target bus address array. + */ + np->targtbl = sym_calloc_dma(256, "TARGTBL"); + if (!np->targtbl) + goto attach_failed; + np->targtbl_ba = vtobus(np->targtbl); + + /* + * Allocate SCRIPTS areas. + */ + np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); + np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); + np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0"); + if (!np->scripta0 || !np->scriptb0 || !np->scriptz0) + goto attach_failed; + + /* + * Allocate the array of lists of CCBs hashed by DSA. + */ + np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(*np->ccbh), GFP_KERNEL); + if (!np->ccbh) + goto attach_failed; + + /* + * Initialyze the CCB free and busy queues. + */ + sym_que_init(&np->free_ccbq); + sym_que_init(&np->busy_ccbq); + sym_que_init(&np->comp_ccbq); + + /* + * Initialization for optional handling + * of device queueing. + */ +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + sym_que_init(&np->dummy_ccbq); +#endif + /* + * Allocate some CCB. We need at least ONE. + */ + if (!sym_alloc_ccb(np)) + goto attach_failed; + + /* + * Calculate BUS addresses where we are going + * to load the SCRIPTS. + */ + np->scripta_ba = vtobus(np->scripta0); + np->scriptb_ba = vtobus(np->scriptb0); + np->scriptz_ba = vtobus(np->scriptz0); + + if (np->ram_ba) { + np->scripta_ba = np->ram_ba; + if (np->features & FE_RAM8K) { + np->scriptb_ba = np->scripta_ba + 4096; +#if 0 /* May get useful for 64 BIT PCI addressing */ + np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); +#endif + } + } + + /* + * Copy scripts to controller instance. + */ + memcpy(np->scripta0, fw->a_base, np->scripta_sz); + memcpy(np->scriptb0, fw->b_base, np->scriptb_sz); + memcpy(np->scriptz0, fw->z_base, np->scriptz_sz); + + /* + * Setup variable parts in scripts and compute + * scripts bus addresses used from the C code. + */ + np->fw_setup(np, fw); + + /* + * Bind SCRIPTS with physical addresses usable by the + * SCRIPTS processor (as seen from the BUS = BUS addresses). + */ + sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); + sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); + sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz); + +#ifdef SYM_CONF_IARB_SUPPORT + /* + * If user wants IARB to be set when we win arbitration + * and have other jobs, compute the max number of consecutive + * settings of IARB hints before we leave devices a chance to + * arbitrate for reselection. + */ +#ifdef SYM_SETUP_IARB_MAX + np->iarb_max = SYM_SETUP_IARB_MAX; +#else + np->iarb_max = 4; +#endif +#endif + + /* + * Prepare the idle and invalid task actions. + */ + np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); + np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); + np->idletask_ba = vtobus(&np->idletask); + + np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); + np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); + np->notask_ba = vtobus(&np->notask); + + np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle)); + np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); + np->bad_itl_ba = vtobus(&np->bad_itl); + + np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle)); + np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q)); + np->bad_itlq_ba = vtobus(&np->bad_itlq); + + /* + * Allocate and prepare the lun JUMP table that is used + * for a target prior the probing of devices (bad lun table). + * A private table will be allocated for the target on the + * first INQUIRY response received. + */ + np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); + if (!np->badluntbl) + goto attach_failed; + + np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); + memset32(np->badluntbl, cpu_to_scr(vtobus(&np->badlun_sa)), 64); + + /* + * Prepare the bus address array that contains the bus + * address of each target control block. + * For now, assume all logical units are wrong. :) + */ + for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { + np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); + np->target[i].head.luntbl_sa = + cpu_to_scr(vtobus(np->badluntbl)); + np->target[i].head.lun0_sa = + cpu_to_scr(vtobus(&np->badlun_sa)); + } + + /* + * Now check the cache handling of the pci chipset. + */ + if (sym_snooptest (np)) { + printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); + goto attach_failed; + } + + /* + * Sigh! we are done. + */ + return 0; + +attach_failed: + return -ENXIO; +} + +/* + * Free everything that has been allocated for this device. + */ +void sym_hcb_free(struct sym_hcb *np) +{ + SYM_QUEHEAD *qp; + struct sym_ccb *cp; + struct sym_tcb *tp; + int target; + + if (np->scriptz0) + sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0"); + if (np->scriptb0) + sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); + if (np->scripta0) + sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); + if (np->squeue) + sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); + if (np->dqueue) + sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); + + if (np->actccbs) { + while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { + cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); + sym_mfree_dma(cp, sizeof(*cp), "CCB"); + } + } + kfree(np->ccbh); + + if (np->badluntbl) + sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); + + for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { + tp = &np->target[target]; + if (tp->luntbl) + sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); +#if SYM_CONF_MAX_LUN > 1 + kfree(tp->lunmp); +#endif + } + if (np->targtbl) + sym_mfree_dma(np->targtbl, 256, "TARGTBL"); +} diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h new file mode 100644 index 000000000..9231a2899 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h @@ -0,0 +1,1213 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#include + +#ifndef SYM_HIPD_H +#define SYM_HIPD_H + +/* + * Generic driver options. + * + * They may be defined in platform specific headers, if they + * are useful. + * + * SYM_OPT_HANDLE_DEVICE_QUEUEING + * When this option is set, the driver will use a queue per + * device and handle QUEUE FULL status requeuing internally. + * + * SYM_OPT_LIMIT_COMMAND_REORDERING + * When this option is set, the driver tries to limit tagged + * command reordering to some reasonable value. + * (set for Linux) + */ +#if 0 +#define SYM_OPT_HANDLE_DEVICE_QUEUEING +#define SYM_OPT_LIMIT_COMMAND_REORDERING +#endif + +/* + * Active debugging tags and verbosity. + * Both DEBUG_FLAGS and sym_verbose can be redefined + * by the platform specific code to something else. + */ +#define DEBUG_ALLOC (0x0001) +#define DEBUG_PHASE (0x0002) +#define DEBUG_POLL (0x0004) +#define DEBUG_QUEUE (0x0008) +#define DEBUG_RESULT (0x0010) +#define DEBUG_SCATTER (0x0020) +#define DEBUG_SCRIPT (0x0040) +#define DEBUG_TINY (0x0080) +#define DEBUG_TIMING (0x0100) +#define DEBUG_NEGO (0x0200) +#define DEBUG_TAGS (0x0400) +#define DEBUG_POINTER (0x0800) + +#ifndef DEBUG_FLAGS +#define DEBUG_FLAGS (0x0000) +#endif + +#ifndef sym_verbose +#define sym_verbose (np->verbose) +#endif + +/* + * These ones should have been already defined. + */ +#ifndef assert +#define assert(expression) { \ + if (!(expression)) { \ + (void)panic( \ + "assertion \"%s\" failed: file \"%s\", line %d\n", \ + #expression, \ + __FILE__, __LINE__); \ + } \ +} +#endif + +/* + * Number of tasks per device we want to handle. + */ +#if SYM_CONF_MAX_TAG_ORDER > 8 +#error "more than 256 tags per logical unit not allowed." +#endif +#define SYM_CONF_MAX_TASK (1< SYM_CONF_MAX_TASK +#undef SYM_CONF_MAX_TAG +#define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK +#endif + +/* + * This one means 'NO TAG for this job' + */ +#define NO_TAG (256) + +/* + * Number of SCSI targets. + */ +#if SYM_CONF_MAX_TARGET > 16 +#error "more than 16 targets not allowed." +#endif + +/* + * Number of logical units per target. + */ +#if SYM_CONF_MAX_LUN > 64 +#error "more than 64 logical units per target not allowed." +#endif + +/* + * Asynchronous pre-scaler (ns). Shall be 40 for + * the SCSI timings to be compliant. + */ +#define SYM_CONF_MIN_ASYNC (40) + + +/* + * MEMORY ALLOCATOR. + */ + +#define SYM_MEM_WARN 1 /* Warn on failed operations */ + +#define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */ +#define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER) +#define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */ +/* + * Shortest memory chunk is (1< SYM_MEM_CLUSTER_SIZE/8 +#undef SYM_CONF_MAX_QUEUE +#define SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8) +#undef SYM_CONF_MAX_START +#define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) +#endif + +/* + * For this one, we want a short name :-) + */ +#define MAX_QUEUE SYM_CONF_MAX_QUEUE + +/* + * Common definitions for both bus space based and legacy IO methods. + */ + +#define INB_OFF(np, o) ioread8(np->s.ioaddr + (o)) +#define INW_OFF(np, o) ioread16(np->s.ioaddr + (o)) +#define INL_OFF(np, o) ioread32(np->s.ioaddr + (o)) + +#define OUTB_OFF(np, o, val) iowrite8((val), np->s.ioaddr + (o)) +#define OUTW_OFF(np, o, val) iowrite16((val), np->s.ioaddr + (o)) +#define OUTL_OFF(np, o, val) iowrite32((val), np->s.ioaddr + (o)) + +#define INB(np, r) INB_OFF(np, offsetof(struct sym_reg, r)) +#define INW(np, r) INW_OFF(np, offsetof(struct sym_reg, r)) +#define INL(np, r) INL_OFF(np, offsetof(struct sym_reg, r)) + +#define OUTB(np, r, v) OUTB_OFF(np, offsetof(struct sym_reg, r), (v)) +#define OUTW(np, r, v) OUTW_OFF(np, offsetof(struct sym_reg, r), (v)) +#define OUTL(np, r, v) OUTL_OFF(np, offsetof(struct sym_reg, r), (v)) + +#define OUTONB(np, r, m) OUTB(np, r, INB(np, r) | (m)) +#define OUTOFFB(np, r, m) OUTB(np, r, INB(np, r) & ~(m)) +#define OUTONW(np, r, m) OUTW(np, r, INW(np, r) | (m)) +#define OUTOFFW(np, r, m) OUTW(np, r, INW(np, r) & ~(m)) +#define OUTONL(np, r, m) OUTL(np, r, INL(np, r) | (m)) +#define OUTOFFL(np, r, m) OUTL(np, r, INL(np, r) & ~(m)) + +/* + * We normally want the chip to have a consistent view + * of driver internal data structures when we restart it. + * Thus these macros. + */ +#define OUTL_DSP(np, v) \ + do { \ + MEMORY_WRITE_BARRIER(); \ + OUTL(np, nc_dsp, (v)); \ + } while (0) + +#define OUTONB_STD() \ + do { \ + MEMORY_WRITE_BARRIER(); \ + OUTONB(np, nc_dcntl, (STD|NOCOM)); \ + } while (0) + +/* + * Command control block states. + */ +#define HS_IDLE (0) +#define HS_BUSY (1) +#define HS_NEGOTIATE (2) /* sync/wide data transfer*/ +#define HS_DISCONNECT (3) /* Disconnected by target */ +#define HS_WAIT (4) /* waiting for resource */ + +#define HS_DONEMASK (0x80) +#define HS_COMPLETE (4|HS_DONEMASK) +#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ +#define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ +#define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ + +/* + * Software Interrupt Codes + */ +#define SIR_BAD_SCSI_STATUS (1) +#define SIR_SEL_ATN_NO_MSG_OUT (2) +#define SIR_MSG_RECEIVED (3) +#define SIR_MSG_WEIRD (4) +#define SIR_NEGO_FAILED (5) +#define SIR_NEGO_PROTO (6) +#define SIR_SCRIPT_STOPPED (7) +#define SIR_REJECT_TO_SEND (8) +#define SIR_SWIDE_OVERRUN (9) +#define SIR_SODL_UNDERRUN (10) +#define SIR_RESEL_NO_MSG_IN (11) +#define SIR_RESEL_NO_IDENTIFY (12) +#define SIR_RESEL_BAD_LUN (13) +#define SIR_TARGET_SELECTED (14) +#define SIR_RESEL_BAD_I_T_L (15) +#define SIR_RESEL_BAD_I_T_L_Q (16) +#define SIR_ABORT_SENT (17) +#define SIR_RESEL_ABORTED (18) +#define SIR_MSG_OUT_DONE (19) +#define SIR_COMPLETE_ERROR (20) +#define SIR_DATA_OVERRUN (21) +#define SIR_BAD_PHASE (22) +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 +#define SIR_DMAP_DIRTY (23) +#define SIR_MAX (23) +#else +#define SIR_MAX (22) +#endif + +/* + * Extended error bit codes. + * xerr_status field of struct sym_ccb. + */ +#define XE_EXTRA_DATA (1) /* unexpected data phase */ +#define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ +#define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ +#define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ +#define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ + +/* + * Negotiation status. + * nego_status field of struct sym_ccb. + */ +#define NS_SYNC (1) +#define NS_WIDE (2) +#define NS_PPR (3) + +/* + * A CCB hashed table is used to retrieve CCB address + * from DSA value. + */ +#define CCB_HASH_SHIFT 8 +#define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) +#define CCB_HASH_MASK (CCB_HASH_SIZE-1) +#if 1 +#define CCB_HASH_CODE(dsa) \ + (((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK) +#else +#define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) +#endif + +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 +/* + * We may want to use segment registers for 64 bit DMA. + * 16 segments registers -> up to 64 GB addressable. + */ +#define SYM_DMAP_SHIFT (4) +#define SYM_DMAP_SIZE (1u< SCNTL4 register */ +/*1*/ u_char sval; /* -> SXFER io register */ +/*2*/ u_char filler1; +/*3*/ u_char wval; /* -> SCNTL3 io register */ +}; + +/* + * Target Control Block + */ +struct sym_tcb { + /* + * TCB header. + * Assumed at offset 0. + */ +/*0*/ struct sym_tcbh head; + + /* + * LUN table used by the SCRIPTS processor. + * An array of bus addresses is used on reselection. + */ + u32 *luntbl; /* LCBs bus address table */ + int nlcb; /* Number of valid LCBs (including LUN #0) */ + + /* + * LUN table used by the C code. + */ + struct sym_lcb *lun0p; /* LCB of LUN #0 (usual case) */ +#if SYM_CONF_MAX_LUN > 1 + struct sym_lcb **lunmp; /* Other LCBs [1..MAX_LUN] */ +#endif + +#ifdef SYM_HAVE_STCB + /* + * O/S specific data structure. + */ + struct sym_stcb s; +#endif + + /* Transfer goal */ + struct sym_trans tgoal; + + /* Last printed transfer speed */ + struct sym_trans tprint; + + /* + * Keep track of the CCB used for the negotiation in order + * to ensure that only 1 negotiation is queued at a time. + */ + struct sym_ccb * nego_cp; /* CCB used for the nego */ + + /* + * Set when we want to reset the device. + */ + u_char to_reset; + + /* + * Other user settable limits and options. + * These limits are read from the NVRAM if present. + */ + unsigned char usrflags; + unsigned char usr_period; + unsigned char usr_width; + unsigned short usrtags; + struct scsi_target *starget; +}; + +/* + * Global LCB HEADER. + * + * Due to lack of indirect addressing on earlier NCR chips, + * this substructure is copied from the LCB to a global + * address after selection. + * For SYMBIOS chips that support LOAD/STORE this copy is + * not needed and thus not performed. + */ +struct sym_lcbh { + /* + * SCRIPTS address jumped by SCRIPTS on reselection. + * For not probed logical units, this address points to + * SCRIPTS that deal with bad LU handling (must be at + * offset zero of the LCB for that reason). + */ +/*0*/ u32 resel_sa; + + /* + * Task (bus address of a CCB) read from SCRIPTS that points + * to the unique ITL nexus allowed to be disconnected. + */ + u32 itl_task_sa; + + /* + * Task table bus address (read from SCRIPTS). + */ + u32 itlq_tbl_sa; +}; + +/* + * Logical Unit Control Block + */ +struct sym_lcb { + /* + * TCB header. + * Assumed at offset 0. + */ +/*0*/ struct sym_lcbh head; + + /* + * Task table read from SCRIPTS that contains pointers to + * ITLQ nexuses. The bus address read from SCRIPTS is + * inside the header. + */ + u32 *itlq_tbl; /* Kernel virtual address */ + + /* + * Busy CCBs management. + */ + u_short busy_itlq; /* Number of busy tagged CCBs */ + u_short busy_itl; /* Number of busy untagged CCBs */ + + /* + * Circular tag allocation buffer. + */ + u_short ia_tag; /* Tag allocation index */ + u_short if_tag; /* Tag release index */ + u_char *cb_tags; /* Circular tags buffer */ + + /* + * O/S specific data structure. + */ +#ifdef SYM_HAVE_SLCB + struct sym_slcb s; +#endif + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + /* + * Optionnaly the driver can handle device queueing, + * and requeues internally command to redo. + */ + SYM_QUEHEAD waiting_ccbq; + SYM_QUEHEAD started_ccbq; + int num_sgood; + u_short started_tags; + u_short started_no_tag; + u_short started_max; + u_short started_limit; +#endif + +#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING + /* + * Optionally the driver can try to prevent SCSI + * IOs from being reordered too much. + */ + u_char tags_si; /* Current index to tags sum */ + u_short tags_sum[2]; /* Tags sum counters */ + u_short tags_since; /* # of tags since last switch */ +#endif + + /* + * Set when we want to clear all tasks. + */ + u_char to_clear; + + /* + * Capabilities. + */ + u_char user_flags; + u_char curr_flags; +}; + +/* + * Action from SCRIPTS on a task. + * Is part of the CCB, but is also used separately to plug + * error handling action to perform from SCRIPTS. + */ +struct sym_actscr { + u32 start; /* Jumped by SCRIPTS after selection */ + u32 restart; /* Jumped by SCRIPTS on relection */ +}; + +/* + * Phase mismatch context. + * + * It is part of the CCB and is used as parameters for the + * DATA pointer. We need two contexts to handle correctly the + * SAVED DATA POINTER. + */ +struct sym_pmc { + struct sym_tblmove sg; /* Updated interrupted SG block */ + u32 ret; /* SCRIPT return address */ +}; + +/* + * LUN control block lookup. + * We use a direct pointer for LUN #0, and a table of + * pointers which is only allocated for devices that support + * LUN(s) > 0. + */ +#if SYM_CONF_MAX_LUN <= 1 +#define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL +#else +#define sym_lp(tp, lun) \ + (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL +#endif + +/* + * Status are used by the host and the script processor. + * + * The last four bytes (status[4]) are copied to the + * scratchb register (declared as scr0..scr3) just after the + * select/reselect, and copied back just after disconnecting. + * Inside the script the XX_REG are used. + */ + +/* + * Last four bytes (script) + */ +#define HX_REG scr0 +#define HX_PRT nc_scr0 +#define HS_REG scr1 +#define HS_PRT nc_scr1 +#define SS_REG scr2 +#define SS_PRT nc_scr2 +#define HF_REG scr3 +#define HF_PRT nc_scr3 + +/* + * Last four bytes (host) + */ +#define host_xflags phys.head.status[0] +#define host_status phys.head.status[1] +#define ssss_status phys.head.status[2] +#define host_flags phys.head.status[3] + +/* + * Host flags + */ +#define HF_IN_PM0 1u +#define HF_IN_PM1 (1u<<1) +#define HF_ACT_PM (1u<<2) +#define HF_DP_SAVED (1u<<3) +#define HF_SENSE (1u<<4) +#define HF_EXT_ERR (1u<<5) +#define HF_DATA_IN (1u<<6) +#ifdef SYM_CONF_IARB_SUPPORT +#define HF_HINT_IARB (1u<<7) +#endif + +/* + * More host flags + */ +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 +#define HX_DMAP_DIRTY (1u<<7) +#endif + +/* + * Global CCB HEADER. + * + * Due to lack of indirect addressing on earlier NCR chips, + * this substructure is copied from the ccb to a global + * address after selection (or reselection) and copied back + * before disconnect. + * For SYMBIOS chips that support LOAD/STORE this copy is + * not needed and thus not performed. + */ + +struct sym_ccbh { + /* + * Start and restart SCRIPTS addresses (must be at 0). + */ +/*0*/ struct sym_actscr go; + + /* + * SCRIPTS jump address that deal with data pointers. + * 'savep' points to the position in the script responsible + * for the actual transfer of data. + * It's written on reception of a SAVE_DATA_POINTER message. + */ + u32 savep; /* Jump address to saved data pointer */ + u32 lastp; /* SCRIPTS address at end of data */ + + /* + * Status fields. + */ + u8 status[4]; +}; + +/* + * GET/SET the value of the data pointer used by SCRIPTS. + * + * We must distinguish between the LOAD/STORE-based SCRIPTS + * that use directly the header in the CCB, and the NCR-GENERIC + * SCRIPTS that use the copy of the header in the HCB. + */ +#if SYM_CONF_GENERIC_SUPPORT +#define sym_set_script_dp(np, cp, dp) \ + do { \ + if (np->features & FE_LDSTR) \ + cp->phys.head.lastp = cpu_to_scr(dp); \ + else \ + np->ccb_head.lastp = cpu_to_scr(dp); \ + } while (0) +#define sym_get_script_dp(np, cp) \ + scr_to_cpu((np->features & FE_LDSTR) ? \ + cp->phys.head.lastp : np->ccb_head.lastp) +#else +#define sym_set_script_dp(np, cp, dp) \ + do { \ + cp->phys.head.lastp = cpu_to_scr(dp); \ + } while (0) + +#define sym_get_script_dp(np, cp) (cp->phys.head.lastp) +#endif + +/* + * Data Structure Block + * + * During execution of a ccb by the script processor, the + * DSA (data structure address) register points to this + * substructure of the ccb. + */ +struct sym_dsb { + /* + * CCB header. + * Also assumed at offset 0 of the sym_ccb structure. + */ +/*0*/ struct sym_ccbh head; + + /* + * Phase mismatch contexts. + * We need two to handle correctly the SAVED DATA POINTER. + * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic + * for address calculation from SCRIPTS. + */ + struct sym_pmc pm0; + struct sym_pmc pm1; + + /* + * Table data for Script + */ + struct sym_tblsel select; + struct sym_tblmove smsg; + struct sym_tblmove smsg_ext; + struct sym_tblmove cmd; + struct sym_tblmove sense; + struct sym_tblmove wresid; + struct sym_tblmove data [SYM_CONF_MAX_SG]; +}; + +/* + * Our Command Control Block + */ +struct sym_ccb { + /* + * This is the data structure which is pointed by the DSA + * register when it is executed by the script processor. + * It must be the first entry. + */ + struct sym_dsb phys; + + /* + * Pointer to CAM ccb and related stuff. + */ + struct scsi_cmnd *cmd; /* CAM scsiio ccb */ + u8 cdb_buf[16]; /* Copy of CDB */ +#define SYM_SNS_BBUF_LEN 32 + u8 sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */ + int data_len; /* Total data length */ + int segments; /* Number of SG segments */ + + u8 order; /* Tag type (if tagged command) */ + unsigned char odd_byte_adjustment; /* odd-sized req on wide bus */ + + u_char nego_status; /* Negotiation status */ + u_char xerr_status; /* Extended error flags */ + u32 extra_bytes; /* Extraneous bytes transferred */ + + /* + * Message areas. + * We prepare a message to be sent after selection. + * We may use a second one if the command is rescheduled + * due to CHECK_CONDITION or COMMAND TERMINATED. + * Contents are IDENTIFY and SIMPLE_TAG. + * While negotiating sync or wide transfer, + * a SDTR or WDTR message is appended. + */ + u_char scsi_smsg [12]; + u_char scsi_smsg2[12]; + + /* + * Auto request sense related fields. + */ + u_char sensecmd[6]; /* Request Sense command */ + u_char sv_scsi_status; /* Saved SCSI status */ + u_char sv_xerr_status; /* Saved extended status */ + int sv_resid; /* Saved residual */ + + /* + * Other fields. + */ + u32 ccb_ba; /* BUS address of this CCB */ + u_short tag; /* Tag for this transfer */ + /* NO_TAG means no tag */ + u_char target; + u_char lun; + struct sym_ccb *link_ccbh; /* Host adapter CCB hash chain */ + SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */ + u32 startp; /* Initial data pointer */ + u32 goalp; /* Expected last data pointer */ + int ext_sg; /* Extreme data pointer, used */ + int ext_ofs; /* to calculate the residual. */ +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + SYM_QUEHEAD link2_ccbq; /* Link for device queueing */ + u_char started; /* CCB queued to the squeue */ +#endif + u_char to_abort; /* Want this IO to be aborted */ +#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING + u_char tags_si; /* Lun tags sum index (0,1) */ +#endif +}; + +#define CCB_BA(cp,lbl) cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl)) + +typedef struct device *m_pool_ident_t; + +/* + * Host Control Block + */ +struct sym_hcb { + /* + * Global headers. + * Due to poorness of addressing capabilities, earlier + * chips (810, 815, 825) copy part of the data structures + * (CCB, TCB and LCB) in fixed areas. + */ +#if SYM_CONF_GENERIC_SUPPORT + struct sym_ccbh ccb_head; + struct sym_tcbh tcb_head; + struct sym_lcbh lcb_head; +#endif + /* + * Idle task and invalid task actions and + * their bus addresses. + */ + struct sym_actscr idletask, notask, bad_itl, bad_itlq; + u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; + + /* + * Dummy lun table to protect us against target + * returning bad lun number on reselection. + */ + u32 *badluntbl; /* Table physical address */ + u32 badlun_sa; /* SCRIPT handler BUS address */ + + /* + * Bus address of this host control block. + */ + u32 hcb_ba; + + /* + * Bit 32-63 of the on-chip RAM bus address in LE format. + * The START_RAM64 script loads the MMRS and MMWS from this + * field. + */ + u32 scr_ram_seg; + + /* + * Initial value of some IO register bits. + * These values are assumed to have been set by BIOS, and may + * be used to probe adapter implementation differences. + */ + u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, + sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, + sv_stest1; + + /* + * Actual initial value of IO register bits used by the + * driver. They are loaded at initialisation according to + * features that are to be enabled/disabled. + */ + u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, + rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; + + /* + * Target data. + */ + struct sym_tcb target[SYM_CONF_MAX_TARGET]; + + /* + * Target control block bus address array used by the SCRIPT + * on reselection. + */ + u32 *targtbl; + u32 targtbl_ba; + + /* + * DMA pool handle for this HBA. + */ + m_pool_ident_t bus_dmat; + + /* + * O/S specific data structure + */ + struct sym_shcb s; + + /* + * Physical bus addresses of the chip. + */ + u32 mmio_ba; /* MMIO 32 bit BUS address */ + u32 ram_ba; /* RAM 32 bit BUS address */ + + /* + * SCRIPTS virtual and physical bus addresses. + * 'script' is loaded in the on-chip RAM if present. + * 'scripth' stays in main memory for all chips except the + * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. + */ + u_char *scripta0; /* Copy of scripts A, B, Z */ + u_char *scriptb0; + u_char *scriptz0; + u32 scripta_ba; /* Actual scripts A, B, Z */ + u32 scriptb_ba; /* 32 bit bus addresses. */ + u32 scriptz_ba; + u_short scripta_sz; /* Actual size of script A, B, Z*/ + u_short scriptb_sz; + u_short scriptz_sz; + + /* + * Bus addresses, setup and patch methods for + * the selected firmware. + */ + struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ + struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ + struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */ + void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw); + void (*fw_patch)(struct Scsi_Host *); + char *fw_name; + + /* + * General controller parameters and configuration. + */ + u_int features; /* Chip features map */ + u_char myaddr; /* SCSI id of the adapter */ + u_char maxburst; /* log base 2 of dwords burst */ + u_char maxwide; /* Maximum transfer width */ + u_char minsync; /* Min sync period factor (ST) */ + u_char maxsync; /* Max sync period factor (ST) */ + u_char maxoffs; /* Max scsi offset (ST) */ + u_char minsync_dt; /* Min sync period factor (DT) */ + u_char maxsync_dt; /* Max sync period factor (DT) */ + u_char maxoffs_dt; /* Max scsi offset (DT) */ + u_char multiplier; /* Clock multiplier (1,2,4) */ + u_char clock_divn; /* Number of clock divisors */ + u32 clock_khz; /* SCSI clock frequency in KHz */ + u32 pciclk_khz; /* Estimated PCI clock in KHz */ + /* + * Start queue management. + * It is filled up by the host processor and accessed by the + * SCRIPTS processor in order to start SCSI commands. + */ + volatile /* Prevent code optimizations */ + u32 *squeue; /* Start queue virtual address */ + u32 squeue_ba; /* Start queue BUS address */ + u_short squeueput; /* Next free slot of the queue */ + u_short actccbs; /* Number of allocated CCBs */ + + /* + * Command completion queue. + * It is the same size as the start queue to avoid overflow. + */ + u_short dqueueget; /* Next position to scan */ + volatile /* Prevent code optimizations */ + u32 *dqueue; /* Completion (done) queue */ + u32 dqueue_ba; /* Done queue BUS address */ + + /* + * Miscellaneous buffers accessed by the scripts-processor. + * They shall be DWORD aligned, because they may be read or + * written with a script command. + */ + u_char msgout[8]; /* Buffer for MESSAGE OUT */ + u_char msgin [8]; /* Buffer for MESSAGE IN */ + u32 lastmsg; /* Last SCSI message sent */ + u32 scratch; /* Scratch for SCSI receive */ + /* Also used for cache test */ + /* + * Miscellaneous configuration and status parameters. + */ + u_char usrflags; /* Miscellaneous user flags */ + u_char scsi_mode; /* Current SCSI BUS mode */ + u_char verbose; /* Verbosity for this controller*/ + + /* + * CCB lists and queue. + */ + struct sym_ccb **ccbh; /* CCBs hashed by DSA value */ + /* CCB_HASH_SIZE lists of CCBs */ + SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ + SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ + + /* + * During error handling and/or recovery, + * active CCBs that are to be completed with + * error or requeued are moved from the busy_ccbq + * to the comp_ccbq prior to completion. + */ + SYM_QUEHEAD comp_ccbq; + +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING + SYM_QUEHEAD dummy_ccbq; +#endif + + /* + * IMMEDIATE ARBITRATION (IARB) control. + * + * We keep track in 'last_cp' of the last CCB that has been + * queued to the SCRIPTS processor and clear 'last_cp' when + * this CCB completes. If last_cp is not zero at the moment + * we queue a new CCB, we set a flag in 'last_cp' that is + * used by the SCRIPTS as a hint for setting IARB. + * We donnot set more than 'iarb_max' consecutive hints for + * IARB in order to leave devices a chance to reselect. + * By the way, any non zero value of 'iarb_max' is unfair. :) + */ +#ifdef SYM_CONF_IARB_SUPPORT + u_short iarb_max; /* Max. # consecutive IARB hints*/ + u_short iarb_count; /* Actual # of these hints */ + struct sym_ccb * last_cp; +#endif + + /* + * Command abort handling. + * We need to synchronize tightly with the SCRIPTS + * processor in order to handle things correctly. + */ + u_char abrt_msg[4]; /* Message to send buffer */ + struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ + struct sym_tblsel abrt_sel; /* Sync params for selection */ + u_char istat_sem; /* Tells the chip to stop (SEM) */ + + /* + * 64 bit DMA handling. + */ +#if SYM_CONF_DMA_ADDRESSING_MODE != 0 + u_char use_dac; /* Use PCI DAC cycles */ +#if SYM_CONF_DMA_ADDRESSING_MODE == 2 + u_char dmap_dirty; /* Dma segments registers dirty */ + u32 dmap_bah[SYM_DMAP_SIZE];/* Segment registers map */ +#endif +#endif +}; + +#if SYM_CONF_DMA_ADDRESSING_MODE == 0 +#define use_dac(np) 0 +#define set_dac(np) do { } while (0) +#else +#define use_dac(np) (np)->use_dac +#define set_dac(np) (np)->use_dac = 1 +#endif + +#define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) + + +/* + * FIRMWARES (sym_fw.c) + */ +struct sym_fw * sym_find_firmware(struct sym_chip *chip); +void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len); + +/* + * Driver methods called from O/S specific code. + */ +char *sym_driver_name(void); +void sym_print_xerr(struct scsi_cmnd *cmd, int x_status); +int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int); +struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision); +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING +void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn); +#else +void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp); +#endif +void sym_start_up(struct Scsi_Host *, int reason); +irqreturn_t sym_interrupt(struct Scsi_Host *); +int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task); +struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); +void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); +struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); +int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln); +int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); +int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); +int sym_reset_scsi_target(struct sym_hcb *np, int target); +void sym_hcb_free(struct sym_hcb *np); +int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram); + +/* + * Build a scatter/gather entry. + * + * For 64 bit systems, we use the 8 upper bits of the size field + * to provide bus address bits 32-39 to the SCRIPTS processor. + * This allows the 895A, 896, 1010 to address up to 1 TB of memory. + */ + +#if SYM_CONF_DMA_ADDRESSING_MODE == 0 +#define DMA_DAC_MASK DMA_BIT_MASK(32) +#define sym_build_sge(np, data, badd, len) \ +do { \ + (data)->addr = cpu_to_scr(badd); \ + (data)->size = cpu_to_scr(len); \ +} while (0) +#elif SYM_CONF_DMA_ADDRESSING_MODE == 1 +#define DMA_DAC_MASK DMA_BIT_MASK(40) +#define sym_build_sge(np, data, badd, len) \ +do { \ + (data)->addr = cpu_to_scr(badd); \ + (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \ +} while (0) +#elif SYM_CONF_DMA_ADDRESSING_MODE == 2 +#define DMA_DAC_MASK DMA_BIT_MASK(64) +int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s); +static inline void +sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len) +{ + u32 h = (badd>>32); + int s = (h&SYM_DMAP_MASK); + + if (h != np->dmap_bah[s]) + goto bad; +good: + (data)->addr = cpu_to_scr(badd); + (data)->size = cpu_to_scr((s<<24) + len); + return; +bad: + s = sym_lookup_dmap(np, h, s); + goto good; +} +#else +#error "Unsupported DMA addressing mode" +#endif + +/* + * MEMORY ALLOCATOR. + */ + +#define sym_get_mem_cluster() \ + (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER) +#define sym_free_mem_cluster(p) \ + free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER) + +/* + * Link between free memory chunks of a given size. + */ +typedef struct sym_m_link { + struct sym_m_link *next; +} *m_link_p; + +/* + * Virtual to bus physical translation for a given cluster. + * Such a structure is only useful with DMA abstraction. + */ +typedef struct sym_m_vtob { /* Virtual to Bus address translation */ + struct sym_m_vtob *next; + void *vaddr; /* Virtual address */ + dma_addr_t baddr; /* Bus physical address */ +} *m_vtob_p; + +/* Hash this stuff a bit to speed up translations */ +#define VTOB_HASH_SHIFT 5 +#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) +#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) +#define VTOB_HASH_CODE(m) \ + ((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK) + +/* + * Memory pool of a given kind. + * Ideally, we want to use: + * 1) 1 pool for memory we donnot need to involve in DMA. + * 2) The same pool for controllers that require same DMA + * constraints and features. + * The OS specific m_pool_id_t thing and the sym_m_pool_match() + * method are expected to tell the driver about. + */ +typedef struct sym_m_pool { + m_pool_ident_t dev_dmat; /* Identifies the pool (see above) */ + void * (*get_mem_cluster)(struct sym_m_pool *); +#ifdef SYM_MEM_FREE_UNUSED + void (*free_mem_cluster)(struct sym_m_pool *, void *); +#endif +#define M_GET_MEM_CLUSTER() mp->get_mem_cluster(mp) +#define M_FREE_MEM_CLUSTER(p) mp->free_mem_cluster(mp, p) + int nump; + m_vtob_p vtob[VTOB_HASH_SIZE]; + struct sym_m_pool *next; + struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1]; +} *m_pool_p; + +/* + * Alloc, free and translate addresses to bus physical + * for DMAable memory. + */ +void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name); +void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name); +dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m); + +/* + * Verbs used by the driver code for DMAable memory handling. + * The _uvptv_ macro avoids a nasty warning about pointer to volatile + * being discarded. + */ +#define _uvptv_(p) ((void *)((u_long)(p))) + +#define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n) +#define _sym_mfree_dma(np, p, l, n) \ + __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n) +#define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n) +#define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n) +#define vtobus(p) __vtobus(np->bus_dmat, _uvptv_(p)) + +/* + * We have to provide the driver memory allocator with methods for + * it to maintain virtual to bus physical address translations. + */ + +#define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2) + +static inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp) +{ + void *vaddr = NULL; + dma_addr_t baddr = 0; + + vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr, + GFP_ATOMIC); + if (vaddr) { + vbp->vaddr = vaddr; + vbp->baddr = baddr; + } + return vaddr; +} + +static inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp) +{ + dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr, + vbp->baddr); +} + +#endif /* SYM_HIPD_H */ diff --git a/drivers/scsi/sym53c8xx_2/sym_malloc.c b/drivers/scsi/sym53c8xx_2/sym_malloc.c new file mode 100644 index 000000000..eb5c045c7 --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_malloc.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#include "sym_glue.h" + +/* + * Simple power of two buddy-like generic allocator. + * Provides naturally aligned memory chunks. + * + * This simple code is not intended to be fast, but to + * provide power of 2 aligned memory allocations. + * Since the SCRIPTS processor only supplies 8 bit arithmetic, + * this allocator allows simple and fast address calculations + * from the SCRIPTS code. In addition, cache line alignment + * is guaranteed for power of 2 cache line size. + * + * This allocator has been developed for the Linux sym53c8xx + * driver, since this O/S does not provide naturally aligned + * allocations. + * It has the advantage of allowing the driver to use private + * pages of memory that will be useful if we ever need to deal + * with IO MMUs for PCI. + */ +static void *___sym_malloc(m_pool_p mp, int size) +{ + int i = 0; + int s = (1 << SYM_MEM_SHIFT); + int j; + void *a; + m_link_p h = mp->h; + + if (size > SYM_MEM_CLUSTER_SIZE) + return NULL; + + while (size > s) { + s <<= 1; + ++i; + } + + j = i; + while (!h[j].next) { + if (s == SYM_MEM_CLUSTER_SIZE) { + h[j].next = (m_link_p) M_GET_MEM_CLUSTER(); + if (h[j].next) + h[j].next->next = NULL; + break; + } + ++j; + s <<= 1; + } + a = h[j].next; + if (a) { + h[j].next = h[j].next->next; + while (j > i) { + j -= 1; + s >>= 1; + h[j].next = (m_link_p) (a+s); + h[j].next->next = NULL; + } + } +#ifdef DEBUG + printf("___sym_malloc(%d) = %p\n", size, (void *) a); +#endif + return a; +} + +/* + * Counter-part of the generic allocator. + */ +static void ___sym_mfree(m_pool_p mp, void *ptr, int size) +{ + int i = 0; + int s = (1 << SYM_MEM_SHIFT); + m_link_p q; + unsigned long a, b; + m_link_p h = mp->h; + +#ifdef DEBUG + printf("___sym_mfree(%p, %d)\n", ptr, size); +#endif + + if (size > SYM_MEM_CLUSTER_SIZE) + return; + + while (size > s) { + s <<= 1; + ++i; + } + + a = (unsigned long)ptr; + + while (1) { + if (s == SYM_MEM_CLUSTER_SIZE) { +#ifdef SYM_MEM_FREE_UNUSED + M_FREE_MEM_CLUSTER((void *)a); +#else + ((m_link_p) a)->next = h[i].next; + h[i].next = (m_link_p) a; +#endif + break; + } + b = a ^ s; + q = &h[i]; + while (q->next && q->next != (m_link_p) b) { + q = q->next; + } + if (!q->next) { + ((m_link_p) a)->next = h[i].next; + h[i].next = (m_link_p) a; + break; + } + q->next = q->next->next; + a = a & b; + s <<= 1; + ++i; + } +} + +/* + * Verbose and zeroing allocator that wrapps to the generic allocator. + */ +static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags) +{ + void *p; + + p = ___sym_malloc(mp, size); + + if (DEBUG_FLAGS & DEBUG_ALLOC) { + printf ("new %-10s[%4d] @%p.\n", name, size, p); + } + + if (p) + memset(p, 0, size); + else if (uflags & SYM_MEM_WARN) + printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); + return p; +} +#define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN) + +/* + * Its counter-part. + */ +static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name) +{ + if (DEBUG_FLAGS & DEBUG_ALLOC) + printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); + + ___sym_mfree(mp, ptr, size); +} + +/* + * Default memory pool we donnot need to involve in DMA. + * + * With DMA abstraction, we use functions (methods), to + * distinguish between non DMAable memory and DMAable memory. + */ +static void *___mp0_get_mem_cluster(m_pool_p mp) +{ + void *m = sym_get_mem_cluster(); + if (m) + ++mp->nump; + return m; +} + +#ifdef SYM_MEM_FREE_UNUSED +static void ___mp0_free_mem_cluster(m_pool_p mp, void *m) +{ + sym_free_mem_cluster(m); + --mp->nump; +} +#else +#define ___mp0_free_mem_cluster NULL +#endif + +static struct sym_m_pool mp0 = { + NULL, + ___mp0_get_mem_cluster, + ___mp0_free_mem_cluster +}; + +/* + * Methods that maintains DMAable pools according to user allocations. + * New pools are created on the fly when a new pool id is provided. + * They are deleted on the fly when they get emptied. + */ +/* Get a memory cluster that matches the DMA constraints of a given pool */ +static void * ___get_dma_mem_cluster(m_pool_p mp) +{ + m_vtob_p vbp; + void *vaddr; + + vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); + if (!vbp) + goto out_err; + + vaddr = sym_m_get_dma_mem_cluster(mp, vbp); + if (vaddr) { + int hc = VTOB_HASH_CODE(vaddr); + vbp->next = mp->vtob[hc]; + mp->vtob[hc] = vbp; + ++mp->nump; + } + return vaddr; +out_err: + return NULL; +} + +#ifdef SYM_MEM_FREE_UNUSED +/* Free a memory cluster and associated resources for DMA */ +static void ___free_dma_mem_cluster(m_pool_p mp, void *m) +{ + m_vtob_p *vbpp, vbp; + int hc = VTOB_HASH_CODE(m); + + vbpp = &mp->vtob[hc]; + while (*vbpp && (*vbpp)->vaddr != m) + vbpp = &(*vbpp)->next; + if (*vbpp) { + vbp = *vbpp; + *vbpp = (*vbpp)->next; + sym_m_free_dma_mem_cluster(mp, vbp); + __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); + --mp->nump; + } +} +#endif + +/* Fetch the memory pool for a given pool id (i.e. DMA constraints) */ +static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat) +{ + m_pool_p mp; + for (mp = mp0.next; + mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat); + mp = mp->next); + return mp; +} + +/* Create a new memory DMAable pool (when fetch failed) */ +static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat) +{ + m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); + if (mp) { + mp->dev_dmat = dev_dmat; + mp->get_mem_cluster = ___get_dma_mem_cluster; +#ifdef SYM_MEM_FREE_UNUSED + mp->free_mem_cluster = ___free_dma_mem_cluster; +#endif + mp->next = mp0.next; + mp0.next = mp; + return mp; + } + return NULL; +} + +#ifdef SYM_MEM_FREE_UNUSED +/* Destroy a DMAable memory pool (when got emptied) */ +static void ___del_dma_pool(m_pool_p p) +{ + m_pool_p *pp = &mp0.next; + + while (*pp && *pp != p) + pp = &(*pp)->next; + if (*pp) { + *pp = (*pp)->next; + __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); + } +} +#endif + +/* This lock protects only the memory allocation/free. */ +static DEFINE_SPINLOCK(sym53c8xx_lock); + +/* + * Actual allocator for DMAable memory. + */ +void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name) +{ + unsigned long flags; + m_pool_p mp; + void *m = NULL; + + spin_lock_irqsave(&sym53c8xx_lock, flags); + mp = ___get_dma_pool(dev_dmat); + if (!mp) + mp = ___cre_dma_pool(dev_dmat); + if (!mp) + goto out; + m = __sym_calloc(mp, size, name); +#ifdef SYM_MEM_FREE_UNUSED + if (!mp->nump) + ___del_dma_pool(mp); +#endif + + out: + spin_unlock_irqrestore(&sym53c8xx_lock, flags); + return m; +} + +void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name) +{ + unsigned long flags; + m_pool_p mp; + + spin_lock_irqsave(&sym53c8xx_lock, flags); + mp = ___get_dma_pool(dev_dmat); + if (!mp) + goto out; + __sym_mfree(mp, m, size, name); +#ifdef SYM_MEM_FREE_UNUSED + if (!mp->nump) + ___del_dma_pool(mp); +#endif + out: + spin_unlock_irqrestore(&sym53c8xx_lock, flags); +} + +/* + * Actual virtual to bus physical address translator + * for 32 bit addressable DMAable memory. + */ +dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m) +{ + unsigned long flags; + m_pool_p mp; + int hc = VTOB_HASH_CODE(m); + m_vtob_p vp = NULL; + void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK); + dma_addr_t b; + + spin_lock_irqsave(&sym53c8xx_lock, flags); + mp = ___get_dma_pool(dev_dmat); + if (mp) { + vp = mp->vtob[hc]; + while (vp && vp->vaddr != a) + vp = vp->next; + } + if (!vp) + panic("sym: VTOBUS FAILED!\n"); + b = vp->baddr + (m - a); + spin_unlock_irqrestore(&sym53c8xx_lock, flags); + return b; +} diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h new file mode 100644 index 000000000..ef419b7ec --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_misc.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#ifndef SYM_MISC_H +#define SYM_MISC_H + +/* + * A la VMS/CAM-3 queue management. + */ +typedef struct sym_quehead { + struct sym_quehead *flink; /* Forward pointer */ + struct sym_quehead *blink; /* Backward pointer */ +} SYM_QUEHEAD; + +#define sym_que_init(ptr) do { \ + (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ +} while (0) + +static inline struct sym_quehead *sym_que_first(struct sym_quehead *head) +{ + return (head->flink == head) ? 0 : head->flink; +} + +static inline struct sym_quehead *sym_que_last(struct sym_quehead *head) +{ + return (head->blink == head) ? 0 : head->blink; +} + +static inline void __sym_que_add(struct sym_quehead * new, + struct sym_quehead * blink, + struct sym_quehead * flink) +{ + flink->blink = new; + new->flink = flink; + new->blink = blink; + blink->flink = new; +} + +static inline void __sym_que_del(struct sym_quehead * blink, + struct sym_quehead * flink) +{ + flink->blink = blink; + blink->flink = flink; +} + +static inline int sym_que_empty(struct sym_quehead *head) +{ + return head->flink == head; +} + +static inline void sym_que_splice(struct sym_quehead *list, + struct sym_quehead *head) +{ + struct sym_quehead *first = list->flink; + + if (first != list) { + struct sym_quehead *last = list->blink; + struct sym_quehead *at = head->flink; + + first->blink = head; + head->flink = first; + + last->flink = at; + at->blink = last; + } +} + +static inline void sym_que_move(struct sym_quehead *orig, + struct sym_quehead *dest) +{ + struct sym_quehead *first, *last; + + first = orig->flink; + if (first != orig) { + first->blink = dest; + dest->flink = first; + last = orig->blink; + last->flink = dest; + dest->blink = last; + orig->flink = orig; + orig->blink = orig; + } else { + dest->flink = dest; + dest->blink = dest; + } +} + +#define sym_que_entry(ptr, type, member) container_of(ptr, type, member) + +#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) + +#define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) + +#define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) + +static inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) +{ + struct sym_quehead *elem = head->flink; + + if (elem != head) + __sym_que_del(head, elem->flink); + else + elem = NULL; + return elem; +} + +#define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) + +static inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) +{ + struct sym_quehead *elem = head->blink; + + if (elem != head) + __sym_que_del(elem->blink, head); + else + elem = 0; + return elem; +} + +/* + * This one may be useful. + */ +#define FOR_EACH_QUEUED_ELEMENT(head, qp) \ + for (qp = (head)->flink; qp != (head); qp = qp->flink) +/* + * FreeBSD does not offer our kind of queue in the CAM CCB. + * So, we have to cast. + */ +#define sym_qptr(p) ((struct sym_quehead *) (p)) + +/* + * Simple bitmap operations. + */ +#define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) +#define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) +#define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) + +/* + * The below round up/down macros are to be used with a constant + * as argument (sizeof(...) for example), for the compiler to + * optimize the whole thing. + */ +#define _U_(a,m) (a)<=(1< + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#include "sym_glue.h" +#include "sym_nvram.h" + +#ifdef SYM_CONF_DEBUG_NVRAM +static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120}; +#endif + +/* + * Get host setup from NVRAM. + */ +void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) +{ + /* + * Get parity checking, host ID, verbose mode + * and miscellaneous host flags from NVRAM. + */ + switch (nvram->type) { + case SYM_SYMBIOS_NVRAM: + if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) + np->rv_scntl0 &= ~0x0a; + np->myaddr = nvram->data.Symbios.host_id & 0x0f; + if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) + np->verbose += 1; + if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) + shost->reverse_ordering = 1; + if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) + np->usrflags |= SYM_AVOID_BUS_RESET; + break; + case SYM_TEKRAM_NVRAM: + np->myaddr = nvram->data.Tekram.host_id & 0x0f; + break; +#ifdef CONFIG_PARISC + case SYM_PARISC_PDC: + if (nvram->data.parisc.host_id != -1) + np->myaddr = nvram->data.parisc.host_id; + if (nvram->data.parisc.factor != -1) + np->minsync = nvram->data.parisc.factor; + if (nvram->data.parisc.width != -1) + np->maxwide = nvram->data.parisc.width; + switch (nvram->data.parisc.mode) { + case 0: np->scsi_mode = SMODE_SE; break; + case 1: np->scsi_mode = SMODE_HVD; break; + case 2: np->scsi_mode = SMODE_LVD; break; + default: break; + } +#endif + default: + break; + } +} + +/* + * Get target set-up from Symbios format NVRAM. + */ +static void +sym_Symbios_setup_target(struct sym_tcb *tp, int target, Symbios_nvram *nvram) +{ + Symbios_target *tn = &nvram->target[target]; + + if (!(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)) + tp->usrtags = 0; + if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) + tp->usrflags &= ~SYM_DISC_ENABLED; + if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) + tp->usrflags |= SYM_SCAN_BOOT_DISABLED; + if (!(tn->flags & SYMBIOS_SCAN_LUNS)) + tp->usrflags |= SYM_SCAN_LUNS_DISABLED; + tp->usr_period = (tn->sync_period + 3) / 4; + tp->usr_width = (tn->bus_width == 0x8) ? 0 : 1; +} + +static const unsigned char Tekram_sync[16] = { + 25, 31, 37, 43, 50, 62, 75, 125, 12, 15, 18, 21, 6, 7, 9, 10 +}; + +/* + * Get target set-up from Tekram format NVRAM. + */ +static void +sym_Tekram_setup_target(struct sym_tcb *tp, int target, Tekram_nvram *nvram) +{ + struct Tekram_target *tn = &nvram->target[target]; + + if (tn->flags & TEKRAM_TAGGED_COMMANDS) { + tp->usrtags = 2 << nvram->max_tags_index; + } + + if (tn->flags & TEKRAM_DISCONNECT_ENABLE) + tp->usrflags |= SYM_DISC_ENABLED; + + if (tn->flags & TEKRAM_SYNC_NEGO) + tp->usr_period = Tekram_sync[tn->sync_index & 0xf]; + tp->usr_width = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0; +} + +/* + * Get target setup from NVRAM. + */ +void sym_nvram_setup_target(struct sym_tcb *tp, int target, struct sym_nvram *nvp) +{ + switch (nvp->type) { + case SYM_SYMBIOS_NVRAM: + sym_Symbios_setup_target(tp, target, &nvp->data.Symbios); + break; + case SYM_TEKRAM_NVRAM: + sym_Tekram_setup_target(tp, target, &nvp->data.Tekram); + break; + default: + break; + } +} + +#ifdef SYM_CONF_DEBUG_NVRAM +/* + * Dump Symbios format NVRAM for debugging purpose. + */ +static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) +{ + int i; + + /* display Symbios nvram host data */ + printf("%s: HOST ID=%d%s%s%s%s%s%s\n", + sym_name(np), nvram->host_id & 0x0f, + (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", + (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", + (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", + (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", + (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", + (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); + + /* display Symbios nvram drive data */ + for (i = 0 ; i < 15 ; i++) { + struct Symbios_target *tn = &nvram->target[i]; + printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", + sym_name(np), i, + (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", + (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", + (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", + (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", + tn->bus_width, + tn->sync_period / 4, + tn->timeout); + } +} + +/* + * Dump TEKRAM format NVRAM for debugging purpose. + */ +static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) +{ + int i, tags, boot_delay; + char *rem; + + /* display Tekram nvram host data */ + tags = 2 << nvram->max_tags_index; + boot_delay = 0; + if (nvram->boot_delay_index < 6) + boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; + switch ((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { + default: + case 0: rem = ""; break; + case 1: rem = " REMOVABLE=boot device"; break; + case 2: rem = " REMOVABLE=all"; break; + } + + printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", + sym_name(np), nvram->host_id & 0x0f, + (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", + (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"", + (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", + (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", + (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", + (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", + (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", + (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", + rem, boot_delay, tags); + + /* display Tekram nvram drive data */ + for (i = 0; i <= 15; i++) { + int sync, j; + struct Tekram_target *tn = &nvram->target[i]; + j = tn->sync_index & 0xf; + sync = Tekram_sync[j]; + printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", + sym_name(np), i, + (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", + (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", + (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", + (tn->flags & TEKRAM_START_CMD) ? " START" : "", + (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", + (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", + sync); + } +} +#else +static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) { (void)np; (void)nvram; } +static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) { (void)np; (void)nvram; } +#endif /* SYM_CONF_DEBUG_NVRAM */ + + +/* + * 24C16 EEPROM reading. + * + * GPIO0 - data in/data out + * GPIO1 - clock + * Symbios NVRAM wiring now also used by Tekram. + */ + +#define SET_BIT 0 +#define CLR_BIT 1 +#define SET_CLK 2 +#define CLR_CLK 3 + +/* + * Set/clear data/clock bit in GPIO0 + */ +static void S24C16_set_bit(struct sym_device *np, u_char write_bit, u_char *gpreg, + int bit_mode) +{ + udelay(5); + switch (bit_mode) { + case SET_BIT: + *gpreg |= write_bit; + break; + case CLR_BIT: + *gpreg &= 0xfe; + break; + case SET_CLK: + *gpreg |= 0x02; + break; + case CLR_CLK: + *gpreg &= 0xfd; + break; + + } + OUTB(np, nc_gpreg, *gpreg); + INB(np, nc_mbox1); + udelay(5); +} + +/* + * Send START condition to NVRAM to wake it up. + */ +static void S24C16_start(struct sym_device *np, u_char *gpreg) +{ + S24C16_set_bit(np, 1, gpreg, SET_BIT); + S24C16_set_bit(np, 0, gpreg, SET_CLK); + S24C16_set_bit(np, 0, gpreg, CLR_BIT); + S24C16_set_bit(np, 0, gpreg, CLR_CLK); +} + +/* + * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! + */ +static void S24C16_stop(struct sym_device *np, u_char *gpreg) +{ + S24C16_set_bit(np, 0, gpreg, SET_CLK); + S24C16_set_bit(np, 1, gpreg, SET_BIT); +} + +/* + * Read or write a bit to the NVRAM, + * read if GPIO0 input else write if GPIO0 output + */ +static void S24C16_do_bit(struct sym_device *np, u_char *read_bit, u_char write_bit, + u_char *gpreg) +{ + S24C16_set_bit(np, write_bit, gpreg, SET_BIT); + S24C16_set_bit(np, 0, gpreg, SET_CLK); + if (read_bit) + *read_bit = INB(np, nc_gpreg); + S24C16_set_bit(np, 0, gpreg, CLR_CLK); + S24C16_set_bit(np, 0, gpreg, CLR_BIT); +} + +/* + * Output an ACK to the NVRAM after reading, + * change GPIO0 to output and when done back to an input + */ +static void S24C16_write_ack(struct sym_device *np, u_char write_bit, u_char *gpreg, + u_char *gpcntl) +{ + OUTB(np, nc_gpcntl, *gpcntl & 0xfe); + S24C16_do_bit(np, NULL, write_bit, gpreg); + OUTB(np, nc_gpcntl, *gpcntl); +} + +/* + * Input an ACK from NVRAM after writing, + * change GPIO0 to input and when done back to an output + */ +static void S24C16_read_ack(struct sym_device *np, u_char *read_bit, u_char *gpreg, + u_char *gpcntl) +{ + OUTB(np, nc_gpcntl, *gpcntl | 0x01); + S24C16_do_bit(np, read_bit, 1, gpreg); + OUTB(np, nc_gpcntl, *gpcntl); +} + +/* + * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, + * GPIO0 must already be set as an output + */ +static void S24C16_write_byte(struct sym_device *np, u_char *ack_data, u_char write_data, + u_char *gpreg, u_char *gpcntl) +{ + int x; + + for (x = 0; x < 8; x++) + S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg); + + S24C16_read_ack(np, ack_data, gpreg, gpcntl); +} + +/* + * READ a byte from the NVRAM and then send an ACK to say we have got it, + * GPIO0 must already be set as an input + */ +static void S24C16_read_byte(struct sym_device *np, u_char *read_data, u_char ack_data, + u_char *gpreg, u_char *gpcntl) +{ + int x; + u_char read_bit; + + *read_data = 0; + for (x = 0; x < 8; x++) { + S24C16_do_bit(np, &read_bit, 1, gpreg); + *read_data |= ((read_bit & 0x01) << (7 - x)); + } + + S24C16_write_ack(np, ack_data, gpreg, gpcntl); +} + +#ifdef SYM_CONF_NVRAM_WRITE_SUPPORT +/* + * Write 'len' bytes starting at 'offset'. + */ +static int sym_write_S24C16_nvram(struct sym_device *np, int offset, + u_char *data, int len) +{ + u_char gpcntl, gpreg; + u_char old_gpcntl, old_gpreg; + u_char ack_data; + int x; + + /* save current state of GPCNTL and GPREG */ + old_gpreg = INB(np, nc_gpreg); + old_gpcntl = INB(np, nc_gpcntl); + gpcntl = old_gpcntl & 0x1c; + + /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ + OUTB(np, nc_gpreg, old_gpreg); + OUTB(np, nc_gpcntl, gpcntl); + + /* this is to set NVRAM into a known state with GPIO0/1 both low */ + gpreg = old_gpreg; + S24C16_set_bit(np, 0, &gpreg, CLR_CLK); + S24C16_set_bit(np, 0, &gpreg, CLR_BIT); + + /* now set NVRAM inactive with GPIO0/1 both high */ + S24C16_stop(np, &gpreg); + + /* NVRAM has to be written in segments of 16 bytes */ + for (x = 0; x < len ; x += 16) { + do { + S24C16_start(np, &gpreg); + S24C16_write_byte(np, &ack_data, + 0xa0 | (((offset+x) >> 7) & 0x0e), + &gpreg, &gpcntl); + } while (ack_data & 0x01); + + S24C16_write_byte(np, &ack_data, (offset+x) & 0xff, + &gpreg, &gpcntl); + + for (y = 0; y < 16; y++) + S24C16_write_byte(np, &ack_data, data[x+y], + &gpreg, &gpcntl); + S24C16_stop(np, &gpreg); + } + + /* return GPIO0/1 to original states after having accessed NVRAM */ + OUTB(np, nc_gpcntl, old_gpcntl); + OUTB(np, nc_gpreg, old_gpreg); + + return 0; +} +#endif /* SYM_CONF_NVRAM_WRITE_SUPPORT */ + +/* + * Read 'len' bytes starting at 'offset'. + */ +static int sym_read_S24C16_nvram(struct sym_device *np, int offset, u_char *data, int len) +{ + u_char gpcntl, gpreg; + u_char old_gpcntl, old_gpreg; + u_char ack_data; + int retv = 1; + int x; + + /* save current state of GPCNTL and GPREG */ + old_gpreg = INB(np, nc_gpreg); + old_gpcntl = INB(np, nc_gpcntl); + gpcntl = old_gpcntl & 0x1c; + + /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ + OUTB(np, nc_gpreg, old_gpreg); + OUTB(np, nc_gpcntl, gpcntl); + + /* this is to set NVRAM into a known state with GPIO0/1 both low */ + gpreg = old_gpreg; + S24C16_set_bit(np, 0, &gpreg, CLR_CLK); + S24C16_set_bit(np, 0, &gpreg, CLR_BIT); + + /* now set NVRAM inactive with GPIO0/1 both high */ + S24C16_stop(np, &gpreg); + + /* activate NVRAM */ + S24C16_start(np, &gpreg); + + /* write device code and random address MSB */ + S24C16_write_byte(np, &ack_data, + 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); + if (ack_data & 0x01) + goto out; + + /* write random address LSB */ + S24C16_write_byte(np, &ack_data, + offset & 0xff, &gpreg, &gpcntl); + if (ack_data & 0x01) + goto out; + + /* regenerate START state to set up for reading */ + S24C16_start(np, &gpreg); + + /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ + S24C16_write_byte(np, &ack_data, + 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); + if (ack_data & 0x01) + goto out; + + /* now set up GPIO0 for inputting data */ + gpcntl |= 0x01; + OUTB(np, nc_gpcntl, gpcntl); + + /* input all requested data - only part of total NVRAM */ + for (x = 0; x < len; x++) + S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); + + /* finally put NVRAM back in inactive mode */ + gpcntl &= 0xfe; + OUTB(np, nc_gpcntl, gpcntl); + S24C16_stop(np, &gpreg); + retv = 0; +out: + /* return GPIO0/1 to original states after having accessed NVRAM */ + OUTB(np, nc_gpcntl, old_gpcntl); + OUTB(np, nc_gpreg, old_gpreg); + + return retv; +} + +#undef SET_BIT +#undef CLR_BIT +#undef SET_CLK +#undef CLR_CLK + +/* + * Try reading Symbios NVRAM. + * Return 0 if OK. + */ +static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) +{ + static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; + u_char *data = (u_char *) nvram; + int len = sizeof(*nvram); + u_short csum; + int x; + + /* probe the 24c16 and read the SYMBIOS 24c16 area */ + if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) + return 1; + + /* check valid NVRAM signature, verify byte count and checksum */ + if (nvram->type != 0 || + memcmp(nvram->trailer, Symbios_trailer, 6) || + nvram->byte_count != len - 12) + return 1; + + /* verify checksum */ + for (x = 6, csum = 0; x < len - 6; x++) + csum += data[x]; + if (csum != nvram->checksum) + return 1; + + return 0; +} + +/* + * 93C46 EEPROM reading. + * + * GPIO0 - data in + * GPIO1 - data out + * GPIO2 - clock + * GPIO4 - chip select + * + * Used by Tekram. + */ + +/* + * Pulse clock bit in GPIO0 + */ +static void T93C46_Clk(struct sym_device *np, u_char *gpreg) +{ + OUTB(np, nc_gpreg, *gpreg | 0x04); + INB(np, nc_mbox1); + udelay(2); + OUTB(np, nc_gpreg, *gpreg); +} + +/* + * Read bit from NVRAM + */ +static void T93C46_Read_Bit(struct sym_device *np, u_char *read_bit, u_char *gpreg) +{ + udelay(2); + T93C46_Clk(np, gpreg); + *read_bit = INB(np, nc_gpreg); +} + +/* + * Write bit to GPIO0 + */ +static void T93C46_Write_Bit(struct sym_device *np, u_char write_bit, u_char *gpreg) +{ + if (write_bit & 0x01) + *gpreg |= 0x02; + else + *gpreg &= 0xfd; + + *gpreg |= 0x10; + + OUTB(np, nc_gpreg, *gpreg); + INB(np, nc_mbox1); + udelay(2); + + T93C46_Clk(np, gpreg); +} + +/* + * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! + */ +static void T93C46_Stop(struct sym_device *np, u_char *gpreg) +{ + *gpreg &= 0xef; + OUTB(np, nc_gpreg, *gpreg); + INB(np, nc_mbox1); + udelay(2); + + T93C46_Clk(np, gpreg); +} + +/* + * Send read command and address to NVRAM + */ +static void T93C46_Send_Command(struct sym_device *np, u_short write_data, + u_char *read_bit, u_char *gpreg) +{ + int x; + + /* send 9 bits, start bit (1), command (2), address (6) */ + for (x = 0; x < 9; x++) + T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); + + *read_bit = INB(np, nc_gpreg); +} + +/* + * READ 2 bytes from the NVRAM + */ +static void T93C46_Read_Word(struct sym_device *np, + unsigned short *nvram_data, unsigned char *gpreg) +{ + int x; + u_char read_bit; + + *nvram_data = 0; + for (x = 0; x < 16; x++) { + T93C46_Read_Bit(np, &read_bit, gpreg); + + if (read_bit & 0x01) + *nvram_data |= (0x01 << (15 - x)); + else + *nvram_data &= ~(0x01 << (15 - x)); + } +} + +/* + * Read Tekram NvRAM data. + */ +static int T93C46_Read_Data(struct sym_device *np, unsigned short *data, + int len, unsigned char *gpreg) +{ + int x; + + for (x = 0; x < len; x++) { + unsigned char read_bit; + /* output read command and address */ + T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); + if (read_bit & 0x01) + return 1; /* Bad */ + T93C46_Read_Word(np, &data[x], gpreg); + T93C46_Stop(np, gpreg); + } + + return 0; +} + +/* + * Try reading 93C46 Tekram NVRAM. + */ +static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram) +{ + u_char gpcntl, gpreg; + u_char old_gpcntl, old_gpreg; + int retv; + + /* save current state of GPCNTL and GPREG */ + old_gpreg = INB(np, nc_gpreg); + old_gpcntl = INB(np, nc_gpcntl); + + /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, + 1/2/4 out */ + gpreg = old_gpreg & 0xe9; + OUTB(np, nc_gpreg, gpreg); + gpcntl = (old_gpcntl & 0xe9) | 0x09; + OUTB(np, nc_gpcntl, gpcntl); + + /* input all of NVRAM, 64 words */ + retv = T93C46_Read_Data(np, (u_short *) nvram, + sizeof(*nvram) / sizeof(short), &gpreg); + + /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ + OUTB(np, nc_gpcntl, old_gpcntl); + OUTB(np, nc_gpreg, old_gpreg); + + return retv; +} + +/* + * Try reading Tekram NVRAM. + * Return 0 if OK. + */ +static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram) +{ + u_char *data = (u_char *) nvram; + int len = sizeof(*nvram); + u_short csum; + int x; + + switch (np->pdev->device) { + case PCI_DEVICE_ID_NCR_53C885: + case PCI_DEVICE_ID_NCR_53C895: + case PCI_DEVICE_ID_NCR_53C896: + x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, + data, len); + break; + case PCI_DEVICE_ID_NCR_53C875: + x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, + data, len); + if (!x) + break; + fallthrough; + default: + x = sym_read_T93C46_nvram(np, nvram); + break; + } + if (x) + return 1; + + /* verify checksum */ + for (x = 0, csum = 0; x < len - 1; x += 2) + csum += data[x] + (data[x+1] << 8); + if (csum != 0x1234) + return 1; + + return 0; +} + +#ifdef CONFIG_PARISC +/* + * Host firmware (PDC) keeps a table for altering SCSI capabilities. + * Many newer machines export one channel of 53c896 chip as SE, 50-pin HD. + * Also used for Multi-initiator SCSI clusters to set the SCSI Initiator ID. + */ +static int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *pdc) +{ + struct hardware_path hwpath; + get_pci_node_path(np->pdev, &hwpath); + if (!pdc_get_initiator(&hwpath, pdc)) + return 0; + + return SYM_PARISC_PDC; +} +#else +static inline int sym_read_parisc_pdc(struct sym_device *np, + struct pdc_initiator *x) +{ + return 0; +} +#endif + +/* + * Try reading Symbios or Tekram NVRAM + */ +int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp) +{ + if (!sym_read_Symbios_nvram(np, &nvp->data.Symbios)) { + nvp->type = SYM_SYMBIOS_NVRAM; + sym_display_Symbios_nvram(np, &nvp->data.Symbios); + } else if (!sym_read_Tekram_nvram(np, &nvp->data.Tekram)) { + nvp->type = SYM_TEKRAM_NVRAM; + sym_display_Tekram_nvram(np, &nvp->data.Tekram); + } else { + nvp->type = sym_read_parisc_pdc(np, &nvp->data.parisc); + } + return nvp->type; +} + +char *sym_nvram_type(struct sym_nvram *nvp) +{ + switch (nvp->type) { + case SYM_SYMBIOS_NVRAM: + return "Symbios NVRAM"; + case SYM_TEKRAM_NVRAM: + return "Tekram NVRAM"; + case SYM_PARISC_PDC: + return "PA-RISC Firmware"; + default: + return "No NVRAM"; + } +} diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.h b/drivers/scsi/sym53c8xx_2/sym_nvram.h new file mode 100644 index 000000000..d07da39cc --- /dev/null +++ b/drivers/scsi/sym53c8xx_2/sym_nvram.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family + * of PCI-SCSI IO processors. + * + * Copyright (C) 1999-2001 Gerard Roudier + * + * This driver is derived from the Linux sym53c8xx driver. + * Copyright (C) 1998-2000 Gerard Roudier + * + * The sym53c8xx driver is derived from the ncr53c8xx driver that had been + * a port of the FreeBSD ncr driver to Linux-1.2.13. + * + * The original ncr driver has been written for 386bsd and FreeBSD by + * Wolfgang Stanglmeier + * Stefan Esser + * Copyright (C) 1994 Wolfgang Stanglmeier + * + * Other major contributions: + * + * NVRAM detection and reading. + * Copyright (C) 1997 Richard Waltham + * + *----------------------------------------------------------------------------- + */ + +#ifndef SYM_NVRAM_H +#define SYM_NVRAM_H + +#include "sym53c8xx.h" + +/* + * Symbios NVRAM data format + */ +#define SYMBIOS_NVRAM_SIZE 368 +#define SYMBIOS_NVRAM_ADDRESS 0x100 + +struct Symbios_nvram { +/* Header 6 bytes */ + u_short type; /* 0x0000 */ + u_short byte_count; /* excluding header/trailer */ + u_short checksum; + +/* Controller set up 20 bytes */ + u_char v_major; /* 0x00 */ + u_char v_minor; /* 0x30 */ + u32 boot_crc; + u_short flags; +#define SYMBIOS_SCAM_ENABLE (1) +#define SYMBIOS_PARITY_ENABLE (1<<1) +#define SYMBIOS_VERBOSE_MSGS (1<<2) +#define SYMBIOS_CHS_MAPPING (1<<3) +#define SYMBIOS_NO_NVRAM (1<<3) /* ??? */ + u_short flags1; +#define SYMBIOS_SCAN_HI_LO (1) + u_short term_state; +#define SYMBIOS_TERM_CANT_PROGRAM (0) +#define SYMBIOS_TERM_ENABLED (1) +#define SYMBIOS_TERM_DISABLED (2) + u_short rmvbl_flags; +#define SYMBIOS_RMVBL_NO_SUPPORT (0) +#define SYMBIOS_RMVBL_BOOT_DEVICE (1) +#define SYMBIOS_RMVBL_MEDIA_INSTALLED (2) + u_char host_id; + u_char num_hba; /* 0x04 */ + u_char num_devices; /* 0x10 */ + u_char max_scam_devices; /* 0x04 */ + u_char num_valid_scam_devices; /* 0x00 */ + u_char flags2; +#define SYMBIOS_AVOID_BUS_RESET (1<<2) + +/* Boot order 14 bytes * 4 */ + struct Symbios_host{ + u_short type; /* 4:8xx / 0:nok */ + u_short device_id; /* PCI device id */ + u_short vendor_id; /* PCI vendor id */ + u_char bus_nr; /* PCI bus number */ + u_char device_fn; /* PCI device/function number << 3*/ + u_short word8; + u_short flags; +#define SYMBIOS_INIT_SCAN_AT_BOOT (1) + u_short io_port; /* PCI io_port address */ + } host[4]; + +/* Targets 8 bytes * 16 */ + struct Symbios_target { + u_char flags; +#define SYMBIOS_DISCONNECT_ENABLE (1) +#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1) +#define SYMBIOS_SCAN_LUNS (1<<2) +#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3) + u_char rsvd; + u_char bus_width; /* 0x08/0x10 */ + u_char sync_offset; + u_short sync_period; /* 4*period factor */ + u_short timeout; + } target[16]; +/* Scam table 8 bytes * 4 */ + struct Symbios_scam { + u_short id; + u_short method; +#define SYMBIOS_SCAM_DEFAULT_METHOD (0) +#define SYMBIOS_SCAM_DONT_ASSIGN (1) +#define SYMBIOS_SCAM_SET_SPECIFIC_ID (2) +#define SYMBIOS_SCAM_USE_ORDER_GIVEN (3) + u_short status; +#define SYMBIOS_SCAM_UNKNOWN (0) +#define SYMBIOS_SCAM_DEVICE_NOT_FOUND (1) +#define SYMBIOS_SCAM_ID_NOT_SET (2) +#define SYMBIOS_SCAM_ID_VALID (3) + u_char target_id; + u_char rsvd; + } scam[4]; + + u_char spare_devices[15*8]; + u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */ +}; +typedef struct Symbios_nvram Symbios_nvram; +typedef struct Symbios_host Symbios_host; +typedef struct Symbios_target Symbios_target; +typedef struct Symbios_scam Symbios_scam; + +/* + * Tekram NvRAM data format. + */ +#define TEKRAM_NVRAM_SIZE 64 +#define TEKRAM_93C46_NVRAM_ADDRESS 0 +#define TEKRAM_24C16_NVRAM_ADDRESS 0x40 + +struct Tekram_nvram { + struct Tekram_target { + u_char flags; +#define TEKRAM_PARITY_CHECK (1) +#define TEKRAM_SYNC_NEGO (1<<1) +#define TEKRAM_DISCONNECT_ENABLE (1<<2) +#define TEKRAM_START_CMD (1<<3) +#define TEKRAM_TAGGED_COMMANDS (1<<4) +#define TEKRAM_WIDE_NEGO (1<<5) + u_char sync_index; + u_short word2; + } target[16]; + u_char host_id; + u_char flags; +#define TEKRAM_MORE_THAN_2_DRIVES (1) +#define TEKRAM_DRIVES_SUP_1GB (1<<1) +#define TEKRAM_RESET_ON_POWER_ON (1<<2) +#define TEKRAM_ACTIVE_NEGATION (1<<3) +#define TEKRAM_IMMEDIATE_SEEK (1<<4) +#define TEKRAM_SCAN_LUNS (1<<5) +#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; */ + /* 1: boot device; 2:all */ + u_char boot_delay_index; + u_char max_tags_index; + u_short flags1; +#define TEKRAM_F2_F6_ENABLED (1) + u_short spare[29]; +}; +typedef struct Tekram_nvram Tekram_nvram; +typedef struct Tekram_target Tekram_target; + +#ifndef CONFIG_PARISC +struct pdc_initiator { int dummy; }; +#endif + +/* + * Union of supported NVRAM formats. + */ +struct sym_nvram { + int type; +#define SYM_SYMBIOS_NVRAM (1) +#define SYM_TEKRAM_NVRAM (2) +#define SYM_PARISC_PDC (3) +#if SYM_CONF_NVRAM_SUPPORT + union { + Symbios_nvram Symbios; + Tekram_nvram Tekram; + struct pdc_initiator parisc; + } data; +#endif +}; + +#if SYM_CONF_NVRAM_SUPPORT +void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram); +void sym_nvram_setup_target (struct sym_tcb *tp, int target, struct sym_nvram *nvp); +int sym_read_nvram (struct sym_device *np, struct sym_nvram *nvp); +char *sym_nvram_type(struct sym_nvram *nvp); +#else +static inline void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { } +static inline void sym_nvram_setup_target(struct sym_tcb *tp, struct sym_nvram *nvram) { } +static inline int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp) +{ + nvp->type = 0; + return 0; +} +static inline char *sym_nvram_type(struct sym_nvram *nvp) +{ + return "No NVRAM"; +} +#endif + +#endif /* SYM_NVRAM_H */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c new file mode 100644 index 000000000..9d1bdcdc1 --- /dev/null +++ b/drivers/scsi/virtio_scsi.c @@ -0,0 +1,1042 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtio SCSI HBA driver + * + * Copyright IBM Corp. 2010 + * Copyright Red Hat, Inc. 2011 + * + * Authors: + * Stefan Hajnoczi + * Paolo Bonzini + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sd.h" + +#define VIRTIO_SCSI_MEMPOOL_SZ 64 +#define VIRTIO_SCSI_EVENT_LEN 8 +#define VIRTIO_SCSI_VQ_BASE 2 + +/* Command queue element */ +struct virtio_scsi_cmd { + struct scsi_cmnd *sc; + struct completion *comp; + union { + struct virtio_scsi_cmd_req cmd; + struct virtio_scsi_cmd_req_pi cmd_pi; + struct virtio_scsi_ctrl_tmf_req tmf; + struct virtio_scsi_ctrl_an_req an; + } req; + union { + struct virtio_scsi_cmd_resp cmd; + struct virtio_scsi_ctrl_tmf_resp tmf; + struct virtio_scsi_ctrl_an_resp an; + struct virtio_scsi_event evt; + } resp; +} ____cacheline_aligned_in_smp; + +struct virtio_scsi_event_node { + struct virtio_scsi *vscsi; + struct virtio_scsi_event event; + struct work_struct work; +}; + +struct virtio_scsi_vq { + /* Protects vq */ + spinlock_t vq_lock; + + struct virtqueue *vq; +}; + +/* Driver instance state */ +struct virtio_scsi { + struct virtio_device *vdev; + + /* Get some buffers ready for event vq */ + struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; + + u32 num_queues; + + struct hlist_node node; + + /* Protected by event_vq lock */ + bool stop_events; + + struct virtio_scsi_vq ctrl_vq; + struct virtio_scsi_vq event_vq; + struct virtio_scsi_vq req_vqs[]; +}; + +static struct kmem_cache *virtscsi_cmd_cache; +static mempool_t *virtscsi_cmd_pool; + +static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) +{ + return vdev->priv; +} + +static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) +{ + if (resid) + scsi_set_resid(sc, min(resid, scsi_bufflen(sc))); +} + +/* + * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done + * + * Called with vq_lock held. + */ +static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_cmd *cmd = buf; + struct scsi_cmnd *sc = cmd->sc; + struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; + + dev_dbg(&sc->device->sdev_gendev, + "cmd %p response %u status %#02x sense_len %u\n", + sc, resp->response, resp->status, resp->sense_len); + + sc->result = resp->status; + virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid)); + switch (resp->response) { + case VIRTIO_SCSI_S_OK: + set_host_byte(sc, DID_OK); + break; + case VIRTIO_SCSI_S_OVERRUN: + set_host_byte(sc, DID_ERROR); + break; + case VIRTIO_SCSI_S_ABORTED: + set_host_byte(sc, DID_ABORT); + break; + case VIRTIO_SCSI_S_BAD_TARGET: + set_host_byte(sc, DID_BAD_TARGET); + break; + case VIRTIO_SCSI_S_RESET: + set_host_byte(sc, DID_RESET); + break; + case VIRTIO_SCSI_S_BUSY: + set_host_byte(sc, DID_BUS_BUSY); + break; + case VIRTIO_SCSI_S_TRANSPORT_FAILURE: + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + break; + case VIRTIO_SCSI_S_TARGET_FAILURE: + set_host_byte(sc, DID_BAD_TARGET); + break; + case VIRTIO_SCSI_S_NEXUS_FAILURE: + set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT); + break; + default: + scmd_printk(KERN_WARNING, sc, "Unknown response %d", + resp->response); + fallthrough; + case VIRTIO_SCSI_S_FAILURE: + set_host_byte(sc, DID_ERROR); + break; + } + + WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > + VIRTIO_SCSI_SENSE_SIZE); + if (resp->sense_len) { + memcpy(sc->sense_buffer, resp->sense, + min_t(u32, + virtio32_to_cpu(vscsi->vdev, resp->sense_len), + VIRTIO_SCSI_SENSE_SIZE)); + } + + scsi_done(sc); +} + +static void virtscsi_vq_done(struct virtio_scsi *vscsi, + struct virtio_scsi_vq *virtscsi_vq, + void (*fn)(struct virtio_scsi *vscsi, void *buf)) +{ + void *buf; + unsigned int len; + unsigned long flags; + struct virtqueue *vq = virtscsi_vq->vq; + + spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); + do { + virtqueue_disable_cb(vq); + while ((buf = virtqueue_get_buf(vq, &len)) != NULL) + fn(vscsi, buf); + + if (unlikely(virtqueue_is_broken(vq))) + break; + } while (!virtqueue_enable_cb(vq)); + spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); +} + +static void virtscsi_req_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + int index = vq->index - VIRTIO_SCSI_VQ_BASE; + struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; + + virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); +}; + +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + +static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_cmd *cmd = buf; + + if (cmd->comp) + complete(cmd->comp); +} + +static void virtscsi_ctrl_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); +}; + +static void virtscsi_handle_event(struct work_struct *work); + +static int virtscsi_kick_event(struct virtio_scsi *vscsi, + struct virtio_scsi_event_node *event_node) +{ + int err; + struct scatterlist sg; + unsigned long flags; + + INIT_WORK(&event_node->work, virtscsi_handle_event); + sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); + + spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); + + err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, + GFP_ATOMIC); + if (!err) + virtqueue_kick(vscsi->event_vq.vq); + + spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); + + return err; +} + +static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) +{ + int i; + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { + vscsi->event_list[i].vscsi = vscsi; + virtscsi_kick_event(vscsi, &vscsi->event_list[i]); + } + + return 0; +} + +static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) +{ + int i; + + /* Stop scheduling work before calling cancel_work_sync. */ + spin_lock_irq(&vscsi->event_vq.vq_lock); + vscsi->stop_events = true; + spin_unlock_irq(&vscsi->event_vq.vq_lock); + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) + cancel_work_sync(&vscsi->event_list[i].work); +} + +static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, + struct virtio_scsi_event *event) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned int target = event->lun[1]; + unsigned int lun = (event->lun[2] << 8) | event->lun[3]; + + switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { + case VIRTIO_SCSI_EVT_RESET_RESCAN: + if (lun == 0) { + scsi_scan_target(&shost->shost_gendev, 0, target, + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); + } else { + scsi_add_device(shost, 0, target, lun); + } + break; + case VIRTIO_SCSI_EVT_RESET_REMOVED: + sdev = scsi_device_lookup(shost, 0, target, lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + pr_err("SCSI device %d 0 %d %d not found\n", + shost->host_no, target, lun); + } + break; + default: + pr_info("Unsupported virtio scsi event reason %x\n", event->reason); + } +} + +static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, + struct virtio_scsi_event *event) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned int target = event->lun[1]; + unsigned int lun = (event->lun[2] << 8) | event->lun[3]; + u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255; + u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8; + + sdev = scsi_device_lookup(shost, 0, target, lun); + if (!sdev) { + pr_err("SCSI device %d 0 %d %d not found\n", + shost->host_no, target, lun); + return; + } + + /* Handle "Parameters changed", "Mode parameters changed", and + "Capacity data has changed". */ + if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) + scsi_rescan_device(sdev); + + scsi_device_put(sdev); +} + +static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned char scsi_cmd[MAX_COMMAND_SIZE]; + int result, inquiry_len, inq_result_len = 256; + char *inq_result = kmalloc(inq_result_len, GFP_KERNEL); + + if (!inq_result) + return -ENOMEM; + + shost_for_each_device(sdev, shost) { + inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; + + memset(scsi_cmd, 0, sizeof(scsi_cmd)); + scsi_cmd[0] = INQUIRY; + scsi_cmd[4] = (unsigned char) inquiry_len; + + memset(inq_result, 0, inq_result_len); + + result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, + inq_result, inquiry_len, + SD_TIMEOUT, SD_MAX_RETRIES, NULL); + + if (result == 0 && inq_result[0] >> 5) { + /* PQ indicates the LUN is not attached */ + scsi_remove_device(sdev); + } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) { + /* + * If all LUNs of a virtio-scsi device are unplugged + * it will respond with BAD TARGET on any INQUIRY + * command. + * Remove the device in this case as well. + */ + scsi_remove_device(sdev); + } + } + + kfree(inq_result); + return 0; +} + +static void virtscsi_handle_event(struct work_struct *work) +{ + struct virtio_scsi_event_node *event_node = + container_of(work, struct virtio_scsi_event_node, work); + struct virtio_scsi *vscsi = event_node->vscsi; + struct virtio_scsi_event *event = &event_node->event; + + if (event->event & + cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { + int ret; + + event->event &= ~cpu_to_virtio32(vscsi->vdev, + VIRTIO_SCSI_T_EVENTS_MISSED); + ret = virtscsi_rescan_hotunplug(vscsi); + if (ret) + return; + scsi_scan_host(virtio_scsi_host(vscsi->vdev)); + } + + switch (virtio32_to_cpu(vscsi->vdev, event->event)) { + case VIRTIO_SCSI_T_NO_EVENT: + break; + case VIRTIO_SCSI_T_TRANSPORT_RESET: + virtscsi_handle_transport_reset(vscsi, event); + break; + case VIRTIO_SCSI_T_PARAM_CHANGE: + virtscsi_handle_param_change(vscsi, event); + break; + default: + pr_err("Unsupported virtio scsi event %x\n", event->event); + } + virtscsi_kick_event(vscsi, event_node); +} + +static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_event_node *event_node = buf; + + if (!vscsi->stop_events) + queue_work(system_freezable_wq, &event_node->work); +} + +static void virtscsi_event_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); +}; + +static int __virtscsi_add_cmd(struct virtqueue *vq, + struct virtio_scsi_cmd *cmd, + size_t req_size, size_t resp_size) +{ + struct scsi_cmnd *sc = cmd->sc; + struct scatterlist *sgs[6], req, resp; + struct sg_table *out, *in; + unsigned out_num = 0, in_num = 0; + + out = in = NULL; + + if (sc && sc->sc_data_direction != DMA_NONE) { + if (sc->sc_data_direction != DMA_FROM_DEVICE) + out = &sc->sdb.table; + if (sc->sc_data_direction != DMA_TO_DEVICE) + in = &sc->sdb.table; + } + + /* Request header. */ + sg_init_one(&req, &cmd->req, req_size); + sgs[out_num++] = &req; + + /* Data-out buffer. */ + if (out) { + /* Place WRITE protection SGLs before Data OUT payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num++] = scsi_prot_sglist(sc); + sgs[out_num++] = out->sgl; + } + + /* Response header. */ + sg_init_one(&resp, &cmd->resp, resp_size); + sgs[out_num + in_num++] = &resp; + + /* Data-in buffer */ + if (in) { + /* Place READ protection SGLs before Data IN payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num + in_num++] = scsi_prot_sglist(sc); + sgs[out_num + in_num++] = in->sgl; + } + + return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); +} + +static void virtscsi_kick_vq(struct virtio_scsi_vq *vq) +{ + bool needs_kick; + unsigned long flags; + + spin_lock_irqsave(&vq->vq_lock, flags); + needs_kick = virtqueue_kick_prepare(vq->vq); + spin_unlock_irqrestore(&vq->vq_lock, flags); + + if (needs_kick) + virtqueue_notify(vq->vq); +} + +/** + * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it + * @vq : the struct virtqueue we're talking about + * @cmd : command structure + * @req_size : size of the request buffer + * @resp_size : size of the response buffer + * @kick : whether to kick the virtqueue immediately + */ +static int virtscsi_add_cmd(struct virtio_scsi_vq *vq, + struct virtio_scsi_cmd *cmd, + size_t req_size, size_t resp_size, + bool kick) +{ + unsigned long flags; + int err; + bool needs_kick = false; + + spin_lock_irqsave(&vq->vq_lock, flags); + err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); + if (!err && kick) + needs_kick = virtqueue_kick_prepare(vq->vq); + + spin_unlock_irqrestore(&vq->vq_lock, flags); + + if (needs_kick) + virtqueue_notify(vq->vq); + return err; +} + +static void virtio_scsi_init_hdr(struct virtio_device *vdev, + struct virtio_scsi_cmd_req *cmd, + struct scsi_cmnd *sc) +{ + cmd->lun[0] = 1; + cmd->lun[1] = sc->device->id; + cmd->lun[2] = (sc->device->lun >> 8) | 0x40; + cmd->lun[3] = sc->device->lun & 0xff; + cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc); + cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; + cmd->prio = 0; + cmd->crn = 0; +} + +#ifdef CONFIG_BLK_DEV_INTEGRITY +static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, + struct virtio_scsi_cmd_req_pi *cmd_pi, + struct scsi_cmnd *sc) +{ + struct request *rq = scsi_cmd_to_rq(sc); + struct blk_integrity *bi; + + virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); + + if (!rq || !scsi_prot_sg_count(sc)) + return; + + bi = blk_get_integrity(rq->q->disk); + + if (sc->sc_data_direction == DMA_TO_DEVICE) + cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, + bio_integrity_bytes(bi, + blk_rq_sectors(rq))); + else if (sc->sc_data_direction == DMA_FROM_DEVICE) + cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, + bio_integrity_bytes(bi, + blk_rq_sectors(rq))); +} +#endif + +static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, + struct scsi_cmnd *sc) +{ + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc)); + u16 hwq = blk_mq_unique_tag_to_hwq(tag); + + return &vscsi->req_vqs[hwq]; +} + +static int virtscsi_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); + struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + bool kick; + unsigned long flags; + int req_size; + int ret; + + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); + + /* TODO: check feature bit and fail if unsupported? */ + BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); + + dev_dbg(&sc->device->sdev_gendev, + "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); + + cmd->sc = sc; + + BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); + +#ifdef CONFIG_BLK_DEV_INTEGRITY + if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { + virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); + memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd_pi); + } else +#endif + { + virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); + memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd); + } + + kick = (sc->flags & SCMD_LAST) != 0; + ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick); + if (ret == -EIO) { + cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + spin_lock_irqsave(&req_vq->vq_lock, flags); + virtscsi_complete_cmd(vscsi, cmd); + spin_unlock_irqrestore(&req_vq->vq_lock, flags); + } else if (ret != 0) { + return SCSI_MLQUEUE_HOST_BUSY; + } + return 0; +} + +static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) +{ + DECLARE_COMPLETION_ONSTACK(comp); + int ret = FAILED; + + cmd->comp = ∁ + if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd, + sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0) + goto out; + + wait_for_completion(&comp); + if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || + cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) + ret = SUCCESS; + + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, scsi_done() will do nothing, because the + * command timed out and hence SCMD_STATE_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + +out: + mempool_free(cmd, virtscsi_cmd_pool); + return ret; +} + +static int virtscsi_device_reset(struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sc->device->host); + struct virtio_scsi_cmd *cmd; + + sdev_printk(KERN_INFO, sc->device, "device reset\n"); + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); + if (!cmd) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = cpu_to_virtio32(vscsi->vdev, + VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET), + .lun[0] = 1, + .lun[1] = sc->device->id, + .lun[2] = (sc->device->lun >> 8) | 0x40, + .lun[3] = sc->device->lun & 0xff, + }; + return virtscsi_tmf(vscsi, cmd); +} + +static int virtscsi_device_alloc(struct scsi_device *sdevice) +{ + /* + * Passed through SCSI targets (e.g. with qemu's 'scsi-block') + * may have transfer limits which come from the host SCSI + * controller or something on the host side other than the + * target itself. + * + * To make this work properly, the hypervisor can adjust the + * target's VPD information to advertise these limits. But + * for that to work, the guest has to look at the VPD pages, + * which we won't do by default if it is an SPC-2 device, even + * if it does actually support it. + * + * So, set the blist to always try to read the VPD pages. + */ + sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; + + return 0; +} + + +/** + * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth + * @sdev: Virtscsi target whose queue depth to change + * @qdepth: New queue depth + */ +static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth = shost->cmd_per_lun; + + return scsi_change_queue_depth(sdev, min(max_depth, qdepth)); +} + +static int virtscsi_abort(struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sc->device->host); + struct virtio_scsi_cmd *cmd; + + scmd_printk(KERN_INFO, sc, "abort\n"); + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); + if (!cmd) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, + .lun[0] = 1, + .lun[1] = sc->device->id, + .lun[2] = (sc->device->lun >> 8) | 0x40, + .lun[3] = sc->device->lun & 0xff, + .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc), + }; + return virtscsi_tmf(vscsi, cmd); +} + +static void virtscsi_map_queues(struct Scsi_Host *shost) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + + blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); +} + +static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + + virtscsi_kick_vq(&vscsi->req_vqs[hwq]); +} + +/* + * The host guarantees to respond to each command, although I/O + * latencies might be higher than on bare metal. Reset the timer + * unconditionally to give the host a chance to perform EH. + */ +static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) +{ + return SCSI_EH_RESET_TIMER; +} + +static const struct scsi_host_template virtscsi_host_template = { + .module = THIS_MODULE, + .name = "Virtio SCSI HBA", + .proc_name = "virtio_scsi", + .this_id = -1, + .cmd_size = sizeof(struct virtio_scsi_cmd), + .queuecommand = virtscsi_queuecommand, + .commit_rqs = virtscsi_commit_rqs, + .change_queue_depth = virtscsi_change_queue_depth, + .eh_abort_handler = virtscsi_abort, + .eh_device_reset_handler = virtscsi_device_reset, + .eh_timed_out = virtscsi_eh_timed_out, + .slave_alloc = virtscsi_device_alloc, + + .dma_boundary = UINT_MAX, + .map_queues = virtscsi_map_queues, + .track_queue_depth = 1, +}; + +#define virtscsi_config_get(vdev, fld) \ + ({ \ + __virtio_native_type(struct virtio_scsi_config, fld) __val; \ + virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ + __val; \ + }) + +#define virtscsi_config_set(vdev, fld, val) \ + do { \ + __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \ + virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ + } while(0) + +static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, + struct virtqueue *vq) +{ + spin_lock_init(&virtscsi_vq->vq_lock); + virtscsi_vq->vq = vq; +} + +static void virtscsi_remove_vqs(struct virtio_device *vdev) +{ + /* Stop all the virtqueues. */ + virtio_reset_device(vdev); + vdev->config->del_vqs(vdev); +} + +static int virtscsi_init(struct virtio_device *vdev, + struct virtio_scsi *vscsi) +{ + int err; + u32 i; + u32 num_vqs; + vq_callback_t **callbacks; + const char **names; + struct virtqueue **vqs; + struct irq_affinity desc = { .pre_vectors = 2 }; + + num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; + vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL); + callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *), + GFP_KERNEL); + names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL); + + if (!callbacks || !vqs || !names) { + err = -ENOMEM; + goto out; + } + + callbacks[0] = virtscsi_ctrl_done; + callbacks[1] = virtscsi_event_done; + names[0] = "control"; + names[1] = "event"; + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { + callbacks[i] = virtscsi_req_done; + names[i] = "request"; + } + + /* Discover virtqueues and write information to configuration. */ + err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); + if (err) + goto out; + + virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); + virtscsi_init_vq(&vscsi->event_vq, vqs[1]); + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) + virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], + vqs[i]); + + virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); + virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); + + err = 0; + +out: + kfree(names); + kfree(callbacks); + kfree(vqs); + if (err) + virtscsi_remove_vqs(vdev); + return err; +} + +static int virtscsi_probe(struct virtio_device *vdev) +{ + struct Scsi_Host *shost; + struct virtio_scsi *vscsi; + int err; + u32 sg_elems, num_targets; + u32 cmd_per_lun; + u32 num_queues; + + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + /* We need to know how many queues before we allocate. */ + num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); + + num_targets = virtscsi_config_get(vdev, max_target) + 1; + + shost = scsi_host_alloc(&virtscsi_host_template, + struct_size(vscsi, req_vqs, num_queues)); + if (!shost) + return -ENOMEM; + + sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; + shost->sg_tablesize = sg_elems; + vscsi = shost_priv(shost); + vscsi->vdev = vdev; + vscsi->num_queues = num_queues; + vdev->priv = shost; + + err = virtscsi_init(vdev, vscsi); + if (err) + goto virtscsi_init_failed; + + shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq); + + cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; + shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); + shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; + + /* LUNs > 256 are reported with format 1, so they go in the range + * 16640-32767. + */ + shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; + shost->max_id = num_targets; + shost->max_channel = 0; + shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + shost->nr_hw_queues = num_queues; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + int host_prot; + + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(shost, host_prot); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + } +#endif + + err = scsi_add_host(shost, &vdev->dev); + if (err) + goto scsi_add_host_failed; + + virtio_device_ready(vdev); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_kick_event_all(vscsi); + + scsi_scan_host(shost); + return 0; + +scsi_add_host_failed: + vdev->config->del_vqs(vdev); +virtscsi_init_failed: + scsi_host_put(shost); + return err; +} + +static void virtscsi_remove(struct virtio_device *vdev) +{ + struct Scsi_Host *shost = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(shost); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_cancel_event_work(vscsi); + + scsi_remove_host(shost); + virtscsi_remove_vqs(vdev); + scsi_host_put(shost); +} + +#ifdef CONFIG_PM_SLEEP +static int virtscsi_freeze(struct virtio_device *vdev) +{ + virtscsi_remove_vqs(vdev); + return 0; +} + +static int virtscsi_restore(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + int err; + + err = virtscsi_init(vdev, vscsi); + if (err) + return err; + + virtio_device_ready(vdev); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_kick_event_all(vscsi); + + return err; +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_SCSI_F_HOTPLUG, + VIRTIO_SCSI_F_CHANGE, +#ifdef CONFIG_BLK_DEV_INTEGRITY + VIRTIO_SCSI_F_T10_PI, +#endif +}; + +static struct virtio_driver virtio_scsi_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtscsi_probe, +#ifdef CONFIG_PM_SLEEP + .freeze = virtscsi_freeze, + .restore = virtscsi_restore, +#endif + .remove = virtscsi_remove, +}; + +static int __init virtio_scsi_init(void) +{ + int ret = -ENOMEM; + + virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); + if (!virtscsi_cmd_cache) { + pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); + goto error; + } + + + virtscsi_cmd_pool = + mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, + virtscsi_cmd_cache); + if (!virtscsi_cmd_pool) { + pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); + goto error; + } + ret = register_virtio_driver(&virtio_scsi_driver); + if (ret < 0) + goto error; + + return 0; + +error: + mempool_destroy(virtscsi_cmd_pool); + virtscsi_cmd_pool = NULL; + kmem_cache_destroy(virtscsi_cmd_cache); + virtscsi_cmd_cache = NULL; + return ret; +} + +static void __exit virtio_scsi_fini(void) +{ + unregister_virtio_driver(&virtio_scsi_driver); + mempool_destroy(virtscsi_cmd_pool); + kmem_cache_destroy(virtscsi_cmd_cache); +} +module_init(virtio_scsi_init); +module_exit(virtio_scsi_fini); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio SCSI HBA driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c new file mode 100644 index 000000000..f88ecdb93 --- /dev/null +++ b/drivers/scsi/vmw_pvscsi.c @@ -0,0 +1,1621 @@ +/* + * Linux driver for VMware's para-virtualized SCSI HBA. + * + * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "vmw_pvscsi.h" + +#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" + +MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); +MODULE_AUTHOR("VMware, Inc."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); + +#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 +#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 +#define PVSCSI_DEFAULT_QUEUE_DEPTH 254 +#define SGL_SIZE PAGE_SIZE + +struct pvscsi_sg_list { + struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; +}; + +struct pvscsi_ctx { + /* + * The index of the context in cmd_map serves as the context ID for a + * 1-to-1 mapping completions back to requests. + */ + struct scsi_cmnd *cmd; + struct pvscsi_sg_list *sgl; + struct list_head list; + dma_addr_t dataPA; + dma_addr_t sensePA; + dma_addr_t sglPA; + struct completion *abort_cmp; +}; + +struct pvscsi_adapter { + char *mmioBase; + u8 rev; + bool use_msg; + bool use_req_threshold; + + spinlock_t hw_lock; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct PVSCSIRingReqDesc *req_ring; + unsigned req_pages; + unsigned req_depth; + dma_addr_t reqRingPA; + + struct PVSCSIRingCmpDesc *cmp_ring; + unsigned cmp_pages; + dma_addr_t cmpRingPA; + + struct PVSCSIRingMsgDesc *msg_ring; + unsigned msg_pages; + dma_addr_t msgRingPA; + + struct PVSCSIRingsState *rings_state; + dma_addr_t ringStatePA; + + struct pci_dev *dev; + struct Scsi_Host *host; + + struct list_head cmd_pool; + struct pvscsi_ctx *cmd_map; +}; + + +/* Command line parameters */ +static int pvscsi_ring_pages; +static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; +static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; +static bool pvscsi_disable_msi; +static bool pvscsi_disable_msix; +static bool pvscsi_use_msg = true; +static bool pvscsi_use_req_threshold = true; + +#define PVSCSI_RW (S_IRUSR | S_IWUSR) + +module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); +MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" + __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) + "[up to 16 targets]," + __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) + "[for 16+ targets])"); + +module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); +MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" + __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); + +module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); +MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" + __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")"); + +module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); +MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); + +module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); +MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); + +module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); +MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); + +module_param_named(use_req_threshold, pvscsi_use_req_threshold, + bool, PVSCSI_RW); +MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)"); + +static const struct pci_device_id pvscsi_pci_tbl[] = { + { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); + +static struct device * +pvscsi_dev(const struct pvscsi_adapter *adapter) +{ + return &(adapter->dev->dev); +} + +static struct pvscsi_ctx * +pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) +{ + struct pvscsi_ctx *ctx, *end; + + end = &adapter->cmd_map[adapter->req_depth]; + for (ctx = adapter->cmd_map; ctx < end; ctx++) + if (ctx->cmd == cmd) + return ctx; + + return NULL; +} + +static struct pvscsi_ctx * +pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) +{ + struct pvscsi_ctx *ctx; + + if (list_empty(&adapter->cmd_pool)) + return NULL; + + ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); + ctx->cmd = cmd; + list_del(&ctx->list); + + return ctx; +} + +static void pvscsi_release_context(struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx) +{ + ctx->cmd = NULL; + ctx->abort_cmp = NULL; + list_add(&ctx->list, &adapter->cmd_pool); +} + +/* + * Map a pvscsi_ctx struct to a context ID field value; we map to a simple + * non-zero integer. ctx always points to an entry in cmd_map array, hence + * the return value is always >=1. + */ +static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, + const struct pvscsi_ctx *ctx) +{ + return ctx - adapter->cmd_map + 1; +} + +static struct pvscsi_ctx * +pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) +{ + return &adapter->cmd_map[context - 1]; +} + +static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, + u32 offset, u32 val) +{ + writel(val, adapter->mmioBase + offset); +} + +static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) +{ + return readl(adapter->mmioBase + offset); +} + +static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) +{ + return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); +} + +static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, + u32 val) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); +} + +static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) +{ + u32 intr_bits; + + intr_bits = PVSCSI_INTR_CMPL_MASK; + if (adapter->use_msg) + intr_bits |= PVSCSI_INTR_MSG_MASK; + + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); +} + +static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); +} + +static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, + u32 cmd, const void *desc, size_t len) +{ + const u32 *ptr = desc; + size_t i; + + len /= sizeof(*ptr); + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); + for (i = 0; i < len; i++) + pvscsi_reg_write(adapter, + PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); +} + +static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, + const struct pvscsi_ctx *ctx) +{ + struct PVSCSICmdDescAbortCmd cmd = { 0 }; + + cmd.target = ctx->cmd->device->id; + cmd.context = pvscsi_map_context(adapter, ctx); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); +} + +static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); +} + +static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); +} + +static int scsi_is_rw(unsigned char op) +{ + return op == READ_6 || op == WRITE_6 || + op == READ_10 || op == WRITE_10 || + op == READ_12 || op == WRITE_12 || + op == READ_16 || op == WRITE_16; +} + +static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, + unsigned char op) +{ + if (scsi_is_rw(op)) { + struct PVSCSIRingsState *s = adapter->rings_state; + + if (!adapter->use_req_threshold || + s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) + pvscsi_kick_rw_io(adapter); + } else { + pvscsi_process_request_ring(adapter); + } +} + +static void ll_adapter_reset(const struct pvscsi_adapter *adapter) +{ + dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); +} + +static void ll_bus_reset(const struct pvscsi_adapter *adapter) +{ + dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); +} + +static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) +{ + struct PVSCSICmdDescResetDevice cmd = { 0 }; + + dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target); + + cmd.target = target; + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, + &cmd, sizeof(cmd)); +} + +static void pvscsi_create_sg(struct pvscsi_ctx *ctx, + struct scatterlist *sg, unsigned count) +{ + unsigned i; + struct PVSCSISGElement *sge; + + BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); + + sge = &ctx->sgl->sge[0]; + for (i = 0; i < count; i++, sg = sg_next(sg)) { + sge[i].addr = sg_dma_address(sg); + sge[i].length = sg_dma_len(sg); + sge[i].flags = 0; + } +} + +/* + * Map all data buffers for a command into PCI space and + * setup the scatter/gather list if needed. + */ +static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, + struct PVSCSIRingReqDesc *e) +{ + unsigned count; + unsigned bufflen = scsi_bufflen(cmd); + struct scatterlist *sg; + + e->dataLen = bufflen; + e->dataAddr = 0; + if (bufflen == 0) + return 0; + + sg = scsi_sglist(cmd); + count = scsi_sg_count(cmd); + if (count != 0) { + int segs = scsi_dma_map(cmd); + + if (segs == -ENOMEM) { + scmd_printk(KERN_DEBUG, cmd, + "vmw_pvscsi: Failed to map cmd sglist for DMA.\n"); + return -ENOMEM; + } else if (segs > 1) { + pvscsi_create_sg(ctx, sg, segs); + + e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; + ctx->sglPA = dma_map_single(&adapter->dev->dev, + ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { + scmd_printk(KERN_ERR, cmd, + "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); + scsi_dma_unmap(cmd); + ctx->sglPA = 0; + return -ENOMEM; + } + e->dataAddr = ctx->sglPA; + } else + e->dataAddr = sg_dma_address(sg); + } else { + /* + * In case there is no S/G list, scsi_sglist points + * directly to the buffer. + */ + ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, + cmd->sc_data_direction); + if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { + scmd_printk(KERN_DEBUG, cmd, + "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); + return -ENOMEM; + } + e->dataAddr = ctx->dataPA; + } + + return 0; +} + +/* + * The device incorrectly doesn't clear the first byte of the sense + * buffer in some cases. We have to do it ourselves. + * Otherwise we run into trouble when SWIOTLB is forced. + */ +static void pvscsi_patch_sense(struct scsi_cmnd *cmd) +{ + if (cmd->sense_buffer) + cmd->sense_buffer[0] = 0; +} + +static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx) +{ + struct scsi_cmnd *cmd; + unsigned bufflen; + + cmd = ctx->cmd; + bufflen = scsi_bufflen(cmd); + + if (bufflen != 0) { + unsigned count = scsi_sg_count(cmd); + + if (count != 0) { + scsi_dma_unmap(cmd); + if (ctx->sglPA) { + dma_unmap_single(&adapter->dev->dev, ctx->sglPA, + SGL_SIZE, DMA_TO_DEVICE); + ctx->sglPA = 0; + } + } else + dma_unmap_single(&adapter->dev->dev, ctx->dataPA, + bufflen, cmd->sc_data_direction); + } + if (cmd->sense_buffer) + dma_unmap_single(&adapter->dev->dev, ctx->sensePA, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); +} + +static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) +{ + adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, + &adapter->ringStatePA, GFP_KERNEL); + if (!adapter->rings_state) + return -ENOMEM; + + adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, + pvscsi_ring_pages); + adapter->req_depth = adapter->req_pages + * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev, + adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA, + GFP_KERNEL); + if (!adapter->req_ring) + return -ENOMEM; + + adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, + pvscsi_ring_pages); + adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev, + adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA, + GFP_KERNEL); + if (!adapter->cmp_ring) + return -ENOMEM; + + BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); + BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); + BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); + + if (!adapter->use_msg) + return 0; + + adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, + pvscsi_msg_ring_pages); + adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev, + adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA, + GFP_KERNEL); + if (!adapter->msg_ring) + return -ENOMEM; + BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); + + return 0; +} + +static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) +{ + struct PVSCSICmdDescSetupRings cmd = { 0 }; + dma_addr_t base; + unsigned i; + + cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; + cmd.reqRingNumPages = adapter->req_pages; + cmd.cmpRingNumPages = adapter->cmp_pages; + + base = adapter->reqRingPA; + for (i = 0; i < adapter->req_pages; i++) { + cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; + base += PAGE_SIZE; + } + + base = adapter->cmpRingPA; + for (i = 0; i < adapter->cmp_pages; i++) { + cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; + base += PAGE_SIZE; + } + + memset(adapter->rings_state, 0, PAGE_SIZE); + memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); + memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, + &cmd, sizeof(cmd)); + + if (adapter->use_msg) { + struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; + + cmd_msg.numPages = adapter->msg_pages; + + base = adapter->msgRingPA; + for (i = 0; i < adapter->msg_pages; i++) { + cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; + base += PAGE_SIZE; + } + memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, + &cmd_msg, sizeof(cmd_msg)); + } +} + +static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + if (!sdev->tagged_supported) + qdepth = 1; + return scsi_change_queue_depth(sdev, qdepth); +} + +/* + * Pull a completion descriptor off and pass the completion back + * to the SCSI mid layer. + */ +static void pvscsi_complete_request(struct pvscsi_adapter *adapter, + const struct PVSCSIRingCmpDesc *e) +{ + struct pvscsi_ctx *ctx; + struct scsi_cmnd *cmd; + struct completion *abort_cmp; + u32 btstat = e->hostStatus; + u32 sdstat = e->scsiStatus; + + ctx = pvscsi_get_context(adapter, e->context); + cmd = ctx->cmd; + abort_cmp = ctx->abort_cmp; + pvscsi_unmap_buffers(adapter, ctx); + if (sdstat != SAM_STAT_CHECK_CONDITION) + pvscsi_patch_sense(cmd); + pvscsi_release_context(adapter, ctx); + if (abort_cmp) { + /* + * The command was requested to be aborted. Just signal that + * the request completed and swallow the actual cmd completion + * here. The abort handler will post a completion for this + * command indicating that it got successfully aborted. + */ + complete(abort_cmp); + return; + } + + cmd->result = 0; + if (sdstat != SAM_STAT_GOOD && + (btstat == BTSTAT_SUCCESS || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { + if (sdstat == SAM_STAT_COMMAND_TERMINATED) { + cmd->result = (DID_RESET << 16); + } else { + cmd->result = (DID_OK << 16) | sdstat; + } + } else + switch (btstat) { + case BTSTAT_SUCCESS: + case BTSTAT_LINKED_COMMAND_COMPLETED: + case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: + /* + * Commands like INQUIRY may transfer less data than + * requested by the initiator via bufflen. Set residual + * count to make upper layer aware of the actual amount + * of data returned. There are cases when controller + * returns zero dataLen with non zero data - do not set + * residual count in that case. + */ + if (e->dataLen && (e->dataLen < scsi_bufflen(cmd))) + scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); + cmd->result = (DID_OK << 16); + break; + + case BTSTAT_DATARUN: + case BTSTAT_DATA_UNDERRUN: + /* Report residual data in underruns */ + scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); + cmd->result = (DID_ERROR << 16); + break; + + case BTSTAT_SELTIMEO: + /* Our emulation returns this for non-connected devs */ + cmd->result = (DID_BAD_TARGET << 16); + break; + + case BTSTAT_LUNMISMATCH: + case BTSTAT_TAGREJECT: + case BTSTAT_BADMSG: + case BTSTAT_HAHARDWARE: + case BTSTAT_INVPHASE: + case BTSTAT_HATIMEOUT: + case BTSTAT_NORESPONSE: + case BTSTAT_DISCONNECT: + case BTSTAT_HASOFTWARE: + case BTSTAT_BUSFREE: + case BTSTAT_SENSFAILED: + cmd->result |= (DID_ERROR << 16); + break; + + case BTSTAT_SENTRST: + case BTSTAT_RECVRST: + case BTSTAT_BUSRESET: + cmd->result = (DID_RESET << 16); + break; + + case BTSTAT_ABORTQUEUE: + cmd->result = (DID_BUS_BUSY << 16); + break; + + case BTSTAT_SCSIPARITY: + cmd->result = (DID_PARITY << 16); + break; + + default: + cmd->result = (DID_ERROR << 16); + scmd_printk(KERN_DEBUG, cmd, + "Unknown completion status: 0x%x\n", + btstat); + } + + dev_dbg(&cmd->device->sdev_gendev, + "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", + cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); + + scsi_done(cmd); +} + +/* + * barrier usage : Since the PVSCSI device is emulated, there could be cases + * where we may want to serialize some accesses between the driver and the + * emulation layer. We use compiler barriers instead of the more expensive + * memory barriers because PVSCSI is only supported on X86 which has strong + * memory access ordering. + */ +static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; + u32 cmp_entries = s->cmpNumEntriesLog2; + + while (s->cmpConsIdx != s->cmpProdIdx) { + struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & + MASK(cmp_entries)); + /* + * This barrier() ensures that *e is not dereferenced while + * the device emulation still writes data into the slot. + * Since the device emulation advances s->cmpProdIdx only after + * updating the slot we want to check it first. + */ + barrier(); + pvscsi_complete_request(adapter, e); + /* + * This barrier() ensures that compiler doesn't reorder write + * to s->cmpConsIdx before the read of (*e) inside + * pvscsi_complete_request. Otherwise, device emulation may + * overwrite *e before we had a chance to read it. + */ + barrier(); + s->cmpConsIdx++; + } +} + +/* + * Translate a Linux SCSI request into a request ring entry. + */ +static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) +{ + struct PVSCSIRingsState *s; + struct PVSCSIRingReqDesc *e; + struct scsi_device *sdev; + u32 req_entries; + + s = adapter->rings_state; + sdev = cmd->device; + req_entries = s->reqNumEntriesLog2; + + /* + * If this condition holds, we might have room on the request ring, but + * we might not have room on the completion ring for the response. + * However, we have already ruled out this possibility - we would not + * have successfully allocated a context if it were true, since we only + * have one context per request entry. Check for it anyway, since it + * would be a serious bug. + */ + if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { + scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " + "ring full: reqProdIdx=%d cmpConsIdx=%d\n", + s->reqProdIdx, s->cmpConsIdx); + return -1; + } + + e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); + + e->bus = sdev->channel; + e->target = sdev->id; + memset(e->lun, 0, sizeof(e->lun)); + e->lun[1] = sdev->lun; + + if (cmd->sense_buffer) { + ctx->sensePA = dma_map_single(&adapter->dev->dev, + cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { + scmd_printk(KERN_DEBUG, cmd, + "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); + ctx->sensePA = 0; + return -ENOMEM; + } + e->senseAddr = ctx->sensePA; + e->senseLen = SCSI_SENSE_BUFFERSIZE; + } else { + e->senseLen = 0; + e->senseAddr = 0; + } + e->cdbLen = cmd->cmd_len; + e->vcpuHint = smp_processor_id(); + memcpy(e->cdb, cmd->cmnd, e->cdbLen); + + e->tag = SIMPLE_QUEUE_TAG; + + if (cmd->sc_data_direction == DMA_FROM_DEVICE) + e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; + else if (cmd->sc_data_direction == DMA_TO_DEVICE) + e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; + else if (cmd->sc_data_direction == DMA_NONE) + e->flags = PVSCSI_FLAG_CMD_DIR_NONE; + else + e->flags = 0; + + if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) { + if (cmd->sense_buffer) { + dma_unmap_single(&adapter->dev->dev, ctx->sensePA, + SCSI_SENSE_BUFFERSIZE, + DMA_FROM_DEVICE); + ctx->sensePA = 0; + } + return -ENOMEM; + } + + e->context = pvscsi_map_context(adapter, ctx); + + barrier(); + + s->reqProdIdx++; + + return 0; +} + +static int pvscsi_queue_lck(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + struct pvscsi_ctx *ctx; + unsigned long flags; + unsigned char op; + + spin_lock_irqsave(&adapter->hw_lock, flags); + + ctx = pvscsi_acquire_context(adapter, cmd); + if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { + if (ctx) + pvscsi_release_context(adapter, ctx); + spin_unlock_irqrestore(&adapter->hw_lock, flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + op = cmd->cmnd[0]; + + dev_dbg(&cmd->device->sdev_gendev, + "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + pvscsi_kick_io(adapter, op); + + return 0; +} + +static DEF_SCSI_QCMD(pvscsi_queue) + +static int pvscsi_abort(struct scsi_cmnd *cmd) +{ + struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); + struct pvscsi_ctx *ctx; + unsigned long flags; + int result = SUCCESS; + DECLARE_COMPLETION_ONSTACK(abort_cmp); + int done; + + scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", + adapter->host->host_no, cmd); + + spin_lock_irqsave(&adapter->hw_lock, flags); + + /* + * Poll the completion ring first - we might be trying to abort + * a command that is waiting to be dispatched in the completion ring. + */ + pvscsi_process_completion_ring(adapter); + + /* + * If there is no context for the command, it either already succeeded + * or else was never properly issued. Not our problem. + */ + ctx = pvscsi_find_context(adapter, cmd); + if (!ctx) { + scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); + goto out; + } + + /* + * Mark that the command has been requested to be aborted and issue + * the abort. + */ + ctx->abort_cmp = &abort_cmp; + + pvscsi_abort_cmd(adapter, ctx); + spin_unlock_irqrestore(&adapter->hw_lock, flags); + /* Wait for 2 secs for the completion. */ + done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); + spin_lock_irqsave(&adapter->hw_lock, flags); + + if (!done) { + /* + * Failed to abort the command, unmark the fact that it + * was requested to be aborted. + */ + ctx->abort_cmp = NULL; + result = FAILED; + scmd_printk(KERN_DEBUG, cmd, + "Failed to get completion for aborted cmd %p\n", + cmd); + goto out; + } + + /* + * Successfully aborted the command. + */ + cmd->result = (DID_ABORT << 16); + scsi_done(cmd); + +out: + spin_unlock_irqrestore(&adapter->hw_lock, flags); + return result; +} + +/* + * Abort all outstanding requests. This is only safe to use if the completion + * ring will never be walked again or the device has been reset, because it + * destroys the 1-1 mapping between context field passed to emulation and our + * request structure. + */ +static void pvscsi_reset_all(struct pvscsi_adapter *adapter) +{ + unsigned i; + + for (i = 0; i < adapter->req_depth; i++) { + struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; + struct scsi_cmnd *cmd = ctx->cmd; + if (cmd) { + scmd_printk(KERN_ERR, cmd, + "Forced reset on cmd %p\n", cmd); + pvscsi_unmap_buffers(adapter, ctx); + pvscsi_patch_sense(cmd); + pvscsi_release_context(adapter, ctx); + cmd->result = (DID_RESET << 16); + scsi_done(cmd); + } + } +} + +static int pvscsi_host_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + unsigned long flags; + bool use_msg; + + scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); + + spin_lock_irqsave(&adapter->hw_lock, flags); + + use_msg = adapter->use_msg; + + if (use_msg) { + adapter->use_msg = false; + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + /* + * Now that we know that the ISR won't add more work on the + * workqueue we can safely flush any outstanding work. + */ + flush_workqueue(adapter->workqueue); + spin_lock_irqsave(&adapter->hw_lock, flags); + } + + /* + * We're going to tear down the entire ring structure and set it back + * up, so stalling new requests until all completions are flushed and + * the rings are back in place. + */ + + pvscsi_process_request_ring(adapter); + + ll_adapter_reset(adapter); + + /* + * Now process any completions. Note we do this AFTER adapter reset, + * which is strange, but stops races where completions get posted + * between processing the ring and issuing the reset. The backend will + * not touch the ring memory after reset, so the immediately pre-reset + * completion ring state is still valid. + */ + pvscsi_process_completion_ring(adapter); + + pvscsi_reset_all(adapter); + adapter->use_msg = use_msg; + pvscsi_setup_all_rings(adapter); + pvscsi_unmask_intr(adapter); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + return SUCCESS; +} + +static int pvscsi_bus_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + unsigned long flags; + + scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); + + /* + * We don't want to queue new requests for this bus after + * flushing all pending requests to emulation, since new + * requests could then sneak in during this bus reset phase, + * so take the lock now. + */ + spin_lock_irqsave(&adapter->hw_lock, flags); + + pvscsi_process_request_ring(adapter); + ll_bus_reset(adapter); + pvscsi_process_completion_ring(adapter); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + return SUCCESS; +} + +static int pvscsi_device_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + unsigned long flags; + + scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", + host->host_no, cmd->device->id); + + /* + * We don't want to queue new requests for this device after flushing + * all pending requests to emulation, since new requests could then + * sneak in during this device reset phase, so take the lock now. + */ + spin_lock_irqsave(&adapter->hw_lock, flags); + + pvscsi_process_request_ring(adapter); + ll_device_reset(adapter, cmd->device->id); + pvscsi_process_completion_ring(adapter); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + return SUCCESS; +} + +static struct scsi_host_template pvscsi_template; + +static const char *pvscsi_info(struct Scsi_Host *host) +{ + struct pvscsi_adapter *adapter = shost_priv(host); + static char buf[256]; + + sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " + "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, + adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, + pvscsi_template.cmd_per_lun); + + return buf; +} + +static struct scsi_host_template pvscsi_template = { + .module = THIS_MODULE, + .name = "VMware PVSCSI Host Adapter", + .proc_name = "vmw_pvscsi", + .info = pvscsi_info, + .queuecommand = pvscsi_queue, + .this_id = -1, + .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, + .dma_boundary = UINT_MAX, + .max_sectors = 0xffff, + .change_queue_depth = pvscsi_change_queue_depth, + .eh_abort_handler = pvscsi_abort, + .eh_device_reset_handler = pvscsi_device_reset, + .eh_bus_reset_handler = pvscsi_bus_reset, + .eh_host_reset_handler = pvscsi_host_reset, +}; + +static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, + const struct PVSCSIRingMsgDesc *e) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + struct Scsi_Host *host = adapter->host; + struct scsi_device *sdev; + + printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", + e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); + + BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); + + if (e->type == PVSCSI_MSG_DEV_ADDED) { + struct PVSCSIMsgDescDevStatusChanged *desc; + desc = (struct PVSCSIMsgDescDevStatusChanged *)e; + + printk(KERN_INFO + "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", + desc->bus, desc->target, desc->lun[1]); + + if (!scsi_host_get(host)) + return; + + sdev = scsi_device_lookup(host, desc->bus, desc->target, + desc->lun[1]); + if (sdev) { + printk(KERN_INFO "vmw_pvscsi: device already exists\n"); + scsi_device_put(sdev); + } else + scsi_add_device(adapter->host, desc->bus, + desc->target, desc->lun[1]); + + scsi_host_put(host); + } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { + struct PVSCSIMsgDescDevStatusChanged *desc; + desc = (struct PVSCSIMsgDescDevStatusChanged *)e; + + printk(KERN_INFO + "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", + desc->bus, desc->target, desc->lun[1]); + + if (!scsi_host_get(host)) + return; + + sdev = scsi_device_lookup(host, desc->bus, desc->target, + desc->lun[1]); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else + printk(KERN_INFO + "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", + desc->bus, desc->target, desc->lun[1]); + + scsi_host_put(host); + } +} + +static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + + return s->msgProdIdx != s->msgConsIdx; +} + +static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; + u32 msg_entries = s->msgNumEntriesLog2; + + while (pvscsi_msg_pending(adapter)) { + struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & + MASK(msg_entries)); + + barrier(); + pvscsi_process_msg(adapter, e); + barrier(); + s->msgConsIdx++; + } +} + +static void pvscsi_msg_workqueue_handler(struct work_struct *data) +{ + struct pvscsi_adapter *adapter; + + adapter = container_of(data, struct pvscsi_adapter, work); + + pvscsi_process_msg_ring(adapter); +} + +static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) +{ + char name[32]; + + if (!pvscsi_use_msg) + return 0; + + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, + PVSCSI_CMD_SETUP_MSG_RING); + + if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) + return 0; + + snprintf(name, sizeof(name), + "vmw_pvscsi_wq_%u", adapter->host->host_no); + + adapter->workqueue = create_singlethread_workqueue(name); + if (!adapter->workqueue) { + printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); + return 0; + } + INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); + + return 1; +} + +static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter, + bool enable) +{ + u32 val; + + if (!pvscsi_use_req_threshold) + return false; + + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, + PVSCSI_CMD_SETUP_REQCALLTHRESHOLD); + val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS); + if (val == -1) { + printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n"); + return false; + } else { + struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 }; + cmd_msg.enable = enable; + printk(KERN_INFO + "vmw_pvscsi: %sabling reqCallThreshold\n", + enable ? "en" : "dis"); + pvscsi_write_cmd_desc(adapter, + PVSCSI_CMD_SETUP_REQCALLTHRESHOLD, + &cmd_msg, sizeof(cmd_msg)); + return pvscsi_reg_read(adapter, + PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0; + } +} + +static irqreturn_t pvscsi_isr(int irq, void *devp) +{ + struct pvscsi_adapter *adapter = devp; + unsigned long flags; + + spin_lock_irqsave(&adapter->hw_lock, flags); + pvscsi_process_completion_ring(adapter); + if (adapter->use_msg && pvscsi_msg_pending(adapter)) + queue_work(adapter->workqueue, &adapter->work); + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t pvscsi_shared_isr(int irq, void *devp) +{ + struct pvscsi_adapter *adapter = devp; + u32 val = pvscsi_read_intr_status(adapter); + + if (!(val & PVSCSI_INTR_ALL_SUPPORTED)) + return IRQ_NONE; + pvscsi_write_intr_status(devp, val); + return pvscsi_isr(irq, devp); +} + +static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) +{ + struct pvscsi_ctx *ctx = adapter->cmd_map; + unsigned i; + + for (i = 0; i < adapter->req_depth; ++i, ++ctx) + free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); +} + +static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) +{ + free_irq(pci_irq_vector(adapter->dev, 0), adapter); + pci_free_irq_vectors(adapter->dev); +} + +static void pvscsi_release_resources(struct pvscsi_adapter *adapter) +{ + if (adapter->workqueue) + destroy_workqueue(adapter->workqueue); + + if (adapter->mmioBase) + pci_iounmap(adapter->dev, adapter->mmioBase); + + pci_release_regions(adapter->dev); + + if (adapter->cmd_map) { + pvscsi_free_sgls(adapter); + kfree(adapter->cmd_map); + } + + if (adapter->rings_state) + dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, + adapter->rings_state, adapter->ringStatePA); + + if (adapter->req_ring) + dma_free_coherent(&adapter->dev->dev, + adapter->req_pages * PAGE_SIZE, + adapter->req_ring, adapter->reqRingPA); + + if (adapter->cmp_ring) + dma_free_coherent(&adapter->dev->dev, + adapter->cmp_pages * PAGE_SIZE, + adapter->cmp_ring, adapter->cmpRingPA); + + if (adapter->msg_ring) + dma_free_coherent(&adapter->dev->dev, + adapter->msg_pages * PAGE_SIZE, + adapter->msg_ring, adapter->msgRingPA); +} + +/* + * Allocate scatter gather lists. + * + * These are statically allocated. Trying to be clever was not worth it. + * + * Dynamic allocation can fail, and we can't go deep into the memory + * allocator, since we're a SCSI driver, and trying too hard to allocate + * memory might generate disk I/O. We also don't want to fail disk I/O + * in that case because we can't get an allocation - the I/O could be + * trying to swap out data to free memory. Since that is pathological, + * just use a statically allocated scatter list. + * + */ +static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter) +{ + struct pvscsi_ctx *ctx; + int i; + + ctx = adapter->cmd_map; + BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); + + for (i = 0; i < adapter->req_depth; ++i, ++ctx) { + ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, + get_order(SGL_SIZE)); + ctx->sglPA = 0; + BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); + if (!ctx->sgl) { + for (; i >= 0; --i, --ctx) { + free_pages((unsigned long)ctx->sgl, + get_order(SGL_SIZE)); + ctx->sgl = NULL; + } + return -ENOMEM; + } + } + + return 0; +} + +/* + * Query the device, fetch the config info and return the + * maximum number of targets on the adapter. In case of + * failure due to any reason return default i.e. 16. + */ +static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) +{ + struct PVSCSICmdDescConfigCmd cmd; + struct PVSCSIConfigPageHeader *header; + struct device *dev; + dma_addr_t configPagePA; + void *config_page; + u32 numPhys = 16; + + dev = pvscsi_dev(adapter); + config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, + &configPagePA, GFP_KERNEL); + if (!config_page) { + dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); + goto exit; + } + BUG_ON(configPagePA & ~PAGE_MASK); + + /* Fetch config info from the device. */ + cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32; + cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER; + cmd.cmpAddr = configPagePA; + cmd._pad = 0; + + /* + * Mark the completion page header with error values. If the device + * completes the command successfully, it sets the status values to + * indicate success. + */ + header = config_page; + header->hostStatus = BTSTAT_INVPARAM; + header->scsiStatus = SDSTAT_CHECK; + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd); + + if (header->hostStatus == BTSTAT_SUCCESS && + header->scsiStatus == SDSTAT_GOOD) { + struct PVSCSIConfigPageController *config; + + config = config_page; + numPhys = config->numPhys; + } else + dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", + header->hostStatus, header->scsiStatus); + dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page, + configPagePA); +exit: + return numPhys; +} + +static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY; + struct pvscsi_adapter *adapter; + struct pvscsi_adapter adapter_temp; + struct Scsi_Host *host = NULL; + unsigned int i; + int error; + u32 max_id; + + error = -ENODEV; + + if (pci_enable_device(pdev)) + return error; + + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); + } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); + } else { + printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); + goto out_disable_device; + } + + /* + * Let's use a temp pvscsi_adapter struct until we find the number of + * targets on the adapter, after that we will switch to the real + * allocated struct. + */ + adapter = &adapter_temp; + memset(adapter, 0, sizeof(*adapter)); + adapter->dev = pdev; + adapter->rev = pdev->revision; + + if (pci_request_regions(pdev, "vmw_pvscsi")) { + printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); + goto out_disable_device; + } + + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) + continue; + + if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) + continue; + + break; + } + + if (i == DEVICE_COUNT_RESOURCE) { + printk(KERN_ERR + "vmw_pvscsi: adapter has no suitable MMIO region\n"); + goto out_release_resources_and_disable; + } + + adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); + + if (!adapter->mmioBase) { + printk(KERN_ERR + "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", + i, PVSCSI_MEM_SPACE_SIZE); + goto out_release_resources_and_disable; + } + + pci_set_master(pdev); + + /* + * Ask the device for max number of targets before deciding the + * default pvscsi_ring_pages value. + */ + max_id = pvscsi_get_max_targets(adapter); + printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id); + + if (pvscsi_ring_pages == 0) + /* + * Set the right default value. Up to 16 it is 8, above it is + * max. + */ + pvscsi_ring_pages = (max_id > 16) ? + PVSCSI_SETUP_RINGS_MAX_NUM_PAGES : + PVSCSI_DEFAULT_NUM_PAGES_PER_RING; + printk(KERN_INFO + "vmw_pvscsi: setting ring_pages to %d\n", + pvscsi_ring_pages); + + pvscsi_template.can_queue = + min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * + PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + pvscsi_template.cmd_per_lun = + min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); + host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); + if (!host) { + printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); + goto out_release_resources_and_disable; + } + + /* + * Let's use the real pvscsi_adapter struct here onwards. + */ + adapter = shost_priv(host); + memset(adapter, 0, sizeof(*adapter)); + adapter->dev = pdev; + adapter->host = host; + /* + * Copy back what we already have to the allocated adapter struct. + */ + adapter->rev = adapter_temp.rev; + adapter->mmioBase = adapter_temp.mmioBase; + + spin_lock_init(&adapter->hw_lock); + host->max_channel = 0; + host->max_lun = 1; + host->max_cmd_len = 16; + host->max_id = max_id; + + pci_set_drvdata(pdev, host); + + ll_adapter_reset(adapter); + + adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); + + error = pvscsi_allocate_rings(adapter); + if (error) { + printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); + goto out_release_resources; + } + + /* + * From this point on we should reset the adapter if anything goes + * wrong. + */ + pvscsi_setup_all_rings(adapter); + + adapter->cmd_map = kcalloc(adapter->req_depth, + sizeof(struct pvscsi_ctx), GFP_KERNEL); + if (!adapter->cmd_map) { + printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); + error = -ENOMEM; + goto out_reset_adapter; + } + + INIT_LIST_HEAD(&adapter->cmd_pool); + for (i = 0; i < adapter->req_depth; i++) { + struct pvscsi_ctx *ctx = adapter->cmd_map + i; + list_add(&ctx->list, &adapter->cmd_pool); + } + + error = pvscsi_allocate_sg(adapter); + if (error) { + printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); + goto out_reset_adapter; + } + + if (pvscsi_disable_msix) + irq_flag &= ~PCI_IRQ_MSIX; + if (pvscsi_disable_msi) + irq_flag &= ~PCI_IRQ_MSI; + + error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); + if (error < 0) + goto out_reset_adapter; + + adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); + printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", + adapter->use_req_threshold ? "en" : "dis"); + + if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) { + printk(KERN_INFO "vmw_pvscsi: using MSI%s\n", + adapter->dev->msix_enabled ? "-X" : ""); + error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr, + 0, "vmw_pvscsi", adapter); + } else { + printk(KERN_INFO "vmw_pvscsi: using INTx\n"); + error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr, + IRQF_SHARED, "vmw_pvscsi", adapter); + } + + if (error) { + printk(KERN_ERR + "vmw_pvscsi: unable to request IRQ: %d\n", error); + goto out_reset_adapter; + } + + error = scsi_add_host(host, &pdev->dev); + if (error) { + printk(KERN_ERR + "vmw_pvscsi: scsi_add_host failed: %d\n", error); + goto out_reset_adapter; + } + + dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", + adapter->rev, host->host_no); + + pvscsi_unmask_intr(adapter); + + scsi_scan_host(host); + + return 0; + +out_reset_adapter: + ll_adapter_reset(adapter); +out_release_resources: + pvscsi_shutdown_intr(adapter); + pvscsi_release_resources(adapter); + scsi_host_put(host); +out_disable_device: + pci_disable_device(pdev); + + return error; + +out_release_resources_and_disable: + pvscsi_shutdown_intr(adapter); + pvscsi_release_resources(adapter); + goto out_disable_device; +} + +static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) +{ + pvscsi_mask_intr(adapter); + + if (adapter->workqueue) + flush_workqueue(adapter->workqueue); + + pvscsi_shutdown_intr(adapter); + + pvscsi_process_request_ring(adapter); + pvscsi_process_completion_ring(adapter); + ll_adapter_reset(adapter); +} + +static void pvscsi_shutdown(struct pci_dev *dev) +{ + struct Scsi_Host *host = pci_get_drvdata(dev); + struct pvscsi_adapter *adapter = shost_priv(host); + + __pvscsi_shutdown(adapter); +} + +static void pvscsi_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct pvscsi_adapter *adapter = shost_priv(host); + + scsi_remove_host(host); + + __pvscsi_shutdown(adapter); + pvscsi_release_resources(adapter); + + scsi_host_put(host); + + pci_disable_device(pdev); +} + +static struct pci_driver pvscsi_pci_driver = { + .name = "vmw_pvscsi", + .id_table = pvscsi_pci_tbl, + .probe = pvscsi_probe, + .remove = pvscsi_remove, + .shutdown = pvscsi_shutdown, +}; + +static int __init pvscsi_init(void) +{ + pr_info("%s - version %s\n", + PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); + return pci_register_driver(&pvscsi_pci_driver); +} + +static void __exit pvscsi_exit(void) +{ + pci_unregister_driver(&pvscsi_pci_driver); +} + +module_init(pvscsi_init); +module_exit(pvscsi_exit); diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h new file mode 100644 index 000000000..9d16cf925 --- /dev/null +++ b/drivers/scsi/vmw_pvscsi.h @@ -0,0 +1,461 @@ +/* + * VMware PVSCSI header file + * + * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef _VMW_PVSCSI_H_ +#define _VMW_PVSCSI_H_ + +#include + +#define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k" + +#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 + +#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ + +#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0 + +/* + * host adapter status/error codes + */ +enum HostBusAdapterStatus { + BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ + BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, + BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, + BTSTAT_DATA_UNDERRUN = 0x0c, + BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ + BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ + BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ + BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence + * requested by target */ + BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from + * first CCB */ + BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment + * list */ + BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ + BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message + * rejected by target */ + BTSTAT_BADMSG = 0x1d, /* unsupported message received by the + * host adapter */ + BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ + BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, + * sent a SCSI RST */ + BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ + BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI + * RST */ + BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly + * (w/o tag) */ + BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ + BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ + BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ + BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ + BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ +}; + +/* + * SCSI device status values. + */ +enum ScsiDeviceStatus { + SDSTAT_GOOD = 0x00, /* No errors. */ + SDSTAT_CHECK = 0x02, /* Check condition. */ +}; + +/* + * Register offsets. + * + * These registers are accessible both via i/o space and mm i/o. + */ + +enum PVSCSIRegOffset { + PVSCSI_REG_OFFSET_COMMAND = 0x0, + PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4, + PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8, + PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100, + PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104, + PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108, + PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c, + PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c, + PVSCSI_REG_OFFSET_INTR_MASK = 0x2010, + PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014, + PVSCSI_REG_OFFSET_DEBUG = 0x3018, + PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018, +}; + +/* + * Virtual h/w commands. + */ + +enum PVSCSICommands { + PVSCSI_CMD_FIRST = 0, /* has to be first */ + + PVSCSI_CMD_ADAPTER_RESET = 1, + PVSCSI_CMD_ISSUE_SCSI = 2, + PVSCSI_CMD_SETUP_RINGS = 3, + PVSCSI_CMD_RESET_BUS = 4, + PVSCSI_CMD_RESET_DEVICE = 5, + PVSCSI_CMD_ABORT_CMD = 6, + PVSCSI_CMD_CONFIG = 7, + PVSCSI_CMD_SETUP_MSG_RING = 8, + PVSCSI_CMD_DEVICE_UNPLUG = 9, + PVSCSI_CMD_SETUP_REQCALLTHRESHOLD = 10, + + PVSCSI_CMD_LAST = 11 /* has to be last */ +}; + +/* + * Command descriptor for PVSCSI_CMD_RESET_DEVICE -- + */ + +struct PVSCSICmdDescResetDevice { + u32 target; + u8 lun[8]; +} __packed; + +/* + * Command descriptor for PVSCSI_CMD_CONFIG -- + */ + +struct PVSCSICmdDescConfigCmd { + u64 cmpAddr; + u64 configPageAddress; + u32 configPageNum; + u32 _pad; +} __packed; + +/* + * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD -- + */ + +struct PVSCSICmdDescSetupReqCall { + u32 enable; +} __packed; + +enum PVSCSIConfigPageType { + PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958, + PVSCSI_CONFIG_PAGE_PHY = 0x1959, + PVSCSI_CONFIG_PAGE_DEVICE = 0x195a, +}; + +enum PVSCSIConfigPageAddressType { + PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120, + PVSCSI_CONFIG_BUSTARGET_ADDRESS = 0x2121, + PVSCSI_CONFIG_PHY_ADDRESS = 0x2122, +}; + +/* + * Command descriptor for PVSCSI_CMD_ABORT_CMD -- + * + * - currently does not support specifying the LUN. + * - _pad should be 0. + */ + +struct PVSCSICmdDescAbortCmd { + u64 context; + u32 target; + u32 _pad; +} __packed; + +/* + * Command descriptor for PVSCSI_CMD_SETUP_RINGS -- + * + * Notes: + * - reqRingNumPages and cmpRingNumPages need to be power of two. + * - reqRingNumPages and cmpRingNumPages need to be different from 0, + * - reqRingNumPages and cmpRingNumPages need to be inferior to + * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES. + */ + +#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32 +struct PVSCSICmdDescSetupRings { + u32 reqRingNumPages; + u32 cmpRingNumPages; + u64 ringsStatePPN; + u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; + u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; +} __packed; + +/* + * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING -- + * + * Notes: + * - this command was not supported in the initial revision of the h/w + * interface. Before using it, you need to check that it is supported by + * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then + * immediately after read the 'command status' register: + * * a value of -1 means that the cmd is NOT supported, + * * a value != -1 means that the cmd IS supported. + * If it's supported the 'command status' register should return: + * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32). + * - this command should be issued _after_ the usual SETUP_RINGS so that the + * RingsState page is already setup. If not, the command is a nop. + * - numPages needs to be a power of two, + * - numPages needs to be different from 0, + * - _pad should be zero. + */ + +#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16 + +struct PVSCSICmdDescSetupMsgRing { + u32 numPages; + u32 _pad; + u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES]; +} __packed; + +enum PVSCSIMsgType { + PVSCSI_MSG_DEV_ADDED = 0, + PVSCSI_MSG_DEV_REMOVED = 1, + PVSCSI_MSG_LAST = 2, +}; + +/* + * Msg descriptor. + * + * sizeof(struct PVSCSIRingMsgDesc) == 128. + * + * - type is of type enum PVSCSIMsgType. + * - the content of args depend on the type of event being delivered. + */ + +struct PVSCSIRingMsgDesc { + u32 type; + u32 args[31]; +} __packed; + +struct PVSCSIMsgDescDevStatusChanged { + u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */ + u32 bus; + u32 target; + u8 lun[8]; + u32 pad[27]; +} __packed; + +/* + * Rings state. + * + * - the fields: + * . msgProdIdx, + * . msgConsIdx, + * . msgNumEntriesLog2, + * .. are only used once the SETUP_MSG_RING cmd has been issued. + * - '_pad' helps to ensure that the msg related fields are on their own + * cache-line. + */ + +struct PVSCSIRingsState { + u32 reqProdIdx; + u32 reqConsIdx; + u32 reqNumEntriesLog2; + + u32 cmpProdIdx; + u32 cmpConsIdx; + u32 cmpNumEntriesLog2; + + u32 reqCallThreshold; + + u8 _pad[100]; + + u32 msgProdIdx; + u32 msgConsIdx; + u32 msgNumEntriesLog2; +} __packed; + +/* + * Request descriptor. + * + * sizeof(RingReqDesc) = 128 + * + * - context: is a unique identifier of a command. It could normally be any + * 64bit value, however we currently store it in the serialNumber variable + * of struct SCSI_Command, so we have the following restrictions due to the + * way this field is handled in the vmkernel storage stack: + * * this value can't be 0, + * * the upper 32bit need to be 0 since serialNumber is as a u32. + * Currently tracked as PR 292060. + * - dataLen: contains the total number of bytes that need to be transferred. + * - dataAddr: + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first + * s/g table segment, each s/g segment is entirely contained on a single + * page of physical memory, + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of + * the buffer used for the DMA transfer, + * - flags: + * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above, + * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved, + * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory, + * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device, + * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than + * 16bytes. To be specified. + * - vcpuHint: vcpuId of the processor that will be most likely waiting for the + * completion of the i/o. For guest OSes that use lowest priority message + * delivery mode (such as windows), we use this "hint" to deliver the + * completion action to the proper vcpu. For now, we can use the vcpuId of + * the processor that initiated the i/o as a likely candidate for the vcpu + * that will be waiting for the completion.. + * - bus should be 0: we currently only support bus 0 for now. + * - unused should be zero'd. + */ + +#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0) +#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1) +#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2) +#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3) +#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4) + +struct PVSCSIRingReqDesc { + u64 context; + u64 dataAddr; + u64 dataLen; + u64 senseAddr; + u32 senseLen; + u32 flags; + u8 cdb[16]; + u8 cdbLen; + u8 lun[8]; + u8 tag; + u8 bus; + u8 target; + u16 vcpuHint; + u8 unused[58]; +} __packed; + +/* + * Scatter-gather list management. + * + * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the + * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g + * table segment. + * + * - each segment of the s/g table contain a succession of struct + * PVSCSISGElement. + * - each segment is entirely contained on a single physical page of memory. + * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in + * PVSCSISGElement.flags and in this case: + * * addr is the PA of the next s/g segment, + * * length is undefined, assumed to be 0. + */ + +struct PVSCSISGElement { + u64 addr; + u32 length; + u32 flags; +} __packed; + +/* + * Completion descriptor. + * + * sizeof(RingCmpDesc) = 32 + * + * - context: identifier of the command. The same thing that was specified + * under "context" as part of struct RingReqDesc at initiation time, + * - dataLen: number of bytes transferred for the actual i/o operation, + * - senseLen: number of bytes written into the sense buffer, + * - hostStatus: adapter status, + * - scsiStatus: device status, + * - _pad should be zero. + */ + +struct PVSCSIRingCmpDesc { + u64 context; + u64 dataLen; + u32 senseLen; + u16 hostStatus; + u16 scsiStatus; + u32 _pad[2]; +} __packed; + +struct PVSCSIConfigPageHeader { + u32 pageNum; + u16 numDwords; + u16 hostStatus; + u16 scsiStatus; + u16 reserved[3]; +} __packed; + +struct PVSCSIConfigPageController { + struct PVSCSIConfigPageHeader header; + u64 nodeWWN; /* Device name as defined in the SAS spec. */ + u16 manufacturer[64]; + u16 serialNumber[64]; + u16 opromVersion[32]; + u16 hwVersion[32]; + u16 firmwareVersion[32]; + u32 numPhys; + u8 useConsecutivePhyWWNs; + u8 reserved[3]; +} __packed; + +/* + * Interrupt status / IRQ bits. + */ + +#define PVSCSI_INTR_CMPL_0 (1 << 0) +#define PVSCSI_INTR_CMPL_1 (1 << 1) +#define PVSCSI_INTR_CMPL_MASK MASK(2) + +#define PVSCSI_INTR_MSG_0 (1 << 2) +#define PVSCSI_INTR_MSG_1 (1 << 3) +#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2) + +#define PVSCSI_INTR_ALL_SUPPORTED MASK(4) + +/* + * Number of MSI-X vectors supported. + */ +#define PVSCSI_MAX_INTRS 24 + +/* + * Misc constants for the rings. + */ + +#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES +#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES +#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES + +#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \ + (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc)) + +#define PVSCSI_MAX_REQ_QUEUE_DEPTH \ + (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE) + +#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1 +#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1 +#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2 +#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2 +#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2 + +enum PVSCSIMemSpace { + PVSCSI_MEM_SPACE_COMMAND_PAGE = 0, + PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1, + PVSCSI_MEM_SPACE_MISC_PAGE = 2, + PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4, + PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6, + PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7, +}; + +#define PVSCSI_MEM_SPACE_NUM_PAGES \ + (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \ + PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \ + PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \ + PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \ + PVSCSI_MEM_SPACE_MSIX_NUM_PAGES) + +#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE) + +#endif /* _VMW_PVSCSI_H_ */ diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c new file mode 100644 index 000000000..e4fafc77b --- /dev/null +++ b/drivers/scsi/wd33c93.c @@ -0,0 +1,2147 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 1996 John Shifflett, GeoLog Consulting + * john@geolog.com + * jshiffle@netcom.com + */ + +/* + * Drew Eckhardt's excellent 'Generic NCR5380' sources from Linux-PC + * provided much of the inspiration and some of the code for this + * driver. Everything I know about Amiga DMA was gleaned from careful + * reading of Hamish Mcdonald's original wd33c93 driver; in fact, I + * borrowed shamelessly from all over that source. Thanks Hamish! + * + * _This_ driver is (I feel) an improvement over the old one in + * several respects: + * + * - Target Disconnection/Reconnection is now supported. Any + * system with more than one device active on the SCSI bus + * will benefit from this. The driver defaults to what I + * call 'adaptive disconnect' - meaning that each command + * is evaluated individually as to whether or not it should + * be run with the option to disconnect/reselect (if the + * device chooses), or as a "SCSI-bus-hog". + * + * - Synchronous data transfers are now supported. Because of + * a few devices that choke after telling the driver that + * they can do sync transfers, we don't automatically use + * this faster protocol - it can be enabled via the command- + * line on a device-by-device basis. + * + * - Runtime operating parameters can now be specified through + * the 'amiboot' or the 'insmod' command line. For amiboot do: + * "amiboot [usual stuff] wd33c93=blah,blah,blah" + * The defaults should be good for most people. See the comment + * for 'setup_strings' below for more details. + * + * - The old driver relied exclusively on what the Western Digital + * docs call "Combination Level 2 Commands", which are a great + * idea in that the CPU is relieved of a lot of interrupt + * overhead. However, by accepting a certain (user-settable) + * amount of additional interrupts, this driver achieves + * better control over the SCSI bus, and data transfers are + * almost as fast while being much easier to define, track, + * and debug. + * + * + * TODO: + * more speed. linked commands. + * + * + * People with bug reports, wish-lists, complaints, comments, + * or improvements are asked to pah-leeez email me (John Shifflett) + * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get + * this thing into as good a shape as possible, and I'm positive + * there are lots of lurking bugs and "Stupid Places". + * + * Updates: + * + * Added support for pre -A chips, which don't have advanced features + * and will generate CSR_RESEL rather than CSR_RESEL_AM. + * Richard Hirst August 2000 + * + * Added support for Burst Mode DMA and Fast SCSI. Enabled the use of + * default_sx_per for asynchronous data transfers. Added adjustment + * of transfer periods in sx_table to the actual input-clock. + * peter fuerst February 2007 + */ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "wd33c93.h" + +#define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns + + +#define WD33C93_VERSION "1.26++" +#define WD33C93_DATE "10/Feb/2007" + +MODULE_AUTHOR("John Shifflett"); +MODULE_DESCRIPTION("Generic WD33C93 SCSI driver"); +MODULE_LICENSE("GPL"); + +/* + * 'setup_strings' is a single string used to pass operating parameters and + * settings from the kernel/module command-line to the driver. 'setup_args[]' + * is an array of strings that define the compile-time default values for + * these settings. If Linux boots with an amiboot or insmod command-line, + * those settings are combined with 'setup_args[]'. Note that amiboot + * command-lines are prefixed with "wd33c93=" while insmod uses a + * "setup_strings=" prefix. The driver recognizes the following keywords + * (lower case required) and arguments: + * + * - nosync:bitmask -bitmask is a byte where the 1st 7 bits correspond with + * the 7 possible SCSI devices. Set a bit to negotiate for + * asynchronous transfers on that device. To maintain + * backwards compatibility, a command-line such as + * "wd33c93=255" will be automatically translated to + * "wd33c93=nosync:0xff". + * - nodma:x -x = 1 to disable DMA, x = 0 to enable it. Argument is + * optional - if not present, same as "nodma:1". + * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer + * period. Default is 500; acceptable values are 250 - 1000. + * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them. + * x = 1 does 'adaptive' disconnects, which is the default + * and generally the best choice. + * - debug:x -If 'DEBUGGING_ON' is defined, x is a bit mask that causes + * various types of debug output to printed - see the DB_xxx + * defines in wd33c93.h + * - clock:x -x = clock input in MHz for WD33c93 chip. Normal values + * would be from 8 through 20. Default is 8. + * - burst:x -x = 1 to use Burst Mode (or Demand-Mode) DMA, x = 0 to use + * Single Byte DMA, which is the default. Argument is + * optional - if not present, same as "burst:1". + * - fast:x -x = 1 to enable Fast SCSI, which is only effective with + * input-clock divisor 4 (WD33C93_FS_16_20), x = 0 to disable + * it, which is the default. Argument is optional - if not + * present, same as "fast:1". + * - next -No argument. Used to separate blocks of keywords when + * there's more than one host adapter in the system. + * + * Syntax Notes: + * - Numeric arguments can be decimal or the '0x' form of hex notation. There + * _must_ be a colon between a keyword and its numeric argument, with no + * spaces. + * - Keywords are separated by commas, no spaces, in the standard kernel + * command-line manner. + * - A keyword in the 'nth' comma-separated command-line member will overwrite + * the 'nth' element of setup_args[]. A blank command-line member (in + * other words, a comma with no preceding keyword) will _not_ overwrite + * the corresponding setup_args[] element. + * - If a keyword is used more than once, the first one applies to the first + * SCSI host found, the second to the second card, etc, unless the 'next' + * keyword is used to change the order. + * + * Some amiboot examples (for insmod, use 'setup_strings' instead of 'wd33c93'): + * - wd33c93=nosync:255 + * - wd33c93=nodma + * - wd33c93=nodma:1 + * - wd33c93=disconnect:2,nosync:0x08,period:250 + * - wd33c93=debug:0x1c + */ + +/* Normally, no defaults are specified */ +static char *setup_args[] = { "", "", "", "", "", "", "", "", "", "" }; + +static char *setup_strings; +module_param(setup_strings, charp, 0); + +static void wd33c93_execute(struct Scsi_Host *instance); + +static inline uchar +read_wd33c93(const wd33c93_regs regs, uchar reg_num) +{ + *regs.SASR = reg_num; + mb(); + return (*regs.SCMD); +} + +static unsigned long +read_wd33c93_count(const wd33c93_regs regs) +{ + unsigned long value; + + *regs.SASR = WD_TRANSFER_COUNT_MSB; + mb(); + value = *regs.SCMD << 16; + value |= *regs.SCMD << 8; + value |= *regs.SCMD; + mb(); + return value; +} + +static inline uchar +read_aux_stat(const wd33c93_regs regs) +{ + return *regs.SASR; +} + +static inline void +write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value) +{ + *regs.SASR = reg_num; + mb(); + *regs.SCMD = value; + mb(); +} + +static void +write_wd33c93_count(const wd33c93_regs regs, unsigned long value) +{ + *regs.SASR = WD_TRANSFER_COUNT_MSB; + mb(); + *regs.SCMD = value >> 16; + *regs.SCMD = value >> 8; + *regs.SCMD = value; + mb(); +} + +static inline void +write_wd33c93_cmd(const wd33c93_regs regs, uchar cmd) +{ + *regs.SASR = WD_COMMAND; + mb(); + *regs.SCMD = cmd; + mb(); +} + +static inline void +write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[]) +{ + int i; + + *regs.SASR = WD_CDB_1; + for (i = 0; i < len; i++) + *regs.SCMD = cmnd[i]; +} + +static inline uchar +read_1_byte(const wd33c93_regs regs) +{ + uchar asr; + uchar x = 0; + + write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); + write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO | 0x80); + do { + asr = read_aux_stat(regs); + if (asr & ASR_DBR) + x = read_wd33c93(regs, WD_DATA); + } while (!(asr & ASR_INT)); + return x; +} + +static int +round_period(unsigned int period, const struct sx_period *sx_table) +{ + int x; + + for (x = 1; sx_table[x].period_ns; x++) { + if ((period <= sx_table[x - 0].period_ns) && + (period > sx_table[x - 1].period_ns)) { + return x; + } + } + return 7; +} + +/* + * Calculate Synchronous Transfer Register value from SDTR code. + */ +static uchar +calc_sync_xfer(unsigned int period, unsigned int offset, unsigned int fast, + const struct sx_period *sx_table) +{ + /* When doing Fast SCSI synchronous data transfers, the corresponding + * value in 'sx_table' is two times the actually used transfer period. + */ + uchar result; + + if (offset && fast) { + fast = STR_FSS; + period *= 2; + } else { + fast = 0; + } + period *= 4; /* convert SDTR code to ns */ + result = sx_table[round_period(period,sx_table)].reg_value; + result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF; + result |= fast; + return result; +} + +/* + * Calculate SDTR code bytes [3],[4] from period and offset. + */ +static inline void +calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast, + uchar msg[2]) +{ + /* 'period' is a "normal"-mode value, like the ones in 'sx_table'. The + * actually used transfer period for Fast SCSI synchronous data + * transfers is half that value. + */ + period /= 4; + if (offset && fast) + period /= 2; + msg[0] = period; + msg[1] = offset; +} + +static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + struct WD33C93_hostdata *hostdata; + struct scsi_cmnd *tmp; + + hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; + + DB(DB_QUEUE_COMMAND, + printk("Q-%d-%02x( ", cmd->device->id, cmd->cmnd[0])) + +/* Set up a few fields in the scsi_cmnd structure for our own use: + * - host_scribble is the pointer to the next cmd in the input queue + * - result is what you'd expect + */ + cmd->host_scribble = NULL; + cmd->result = 0; + +/* We use the Scsi_Pointer structure that's included with each command + * as a scratchpad (as it's intended to be used!). The handy thing about + * the SCp.xxx fields is that they're always associated with a given + * cmd, and are preserved across disconnect-reselect. This means we + * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages + * if we keep all the critical pointers and counters in SCp: + * - SCp.ptr is the pointer into the RAM buffer + * - SCp.this_residual is the size of that buffer + * - SCp.buffer points to the current scatter-gather buffer + * - SCp.buffers_residual tells us how many S.G. buffers there are + * - SCp.have_data_in is not used + * - SCp.sent_command is not used + * - SCp.phase records this command's SRCID_ER bit setting + */ + + if (scsi_bufflen(cmd)) { + scsi_pointer->buffer = scsi_sglist(cmd); + scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1; + scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); + scsi_pointer->this_residual = scsi_pointer->buffer->length; + } else { + scsi_pointer->buffer = NULL; + scsi_pointer->buffers_residual = 0; + scsi_pointer->ptr = NULL; + scsi_pointer->this_residual = 0; + } + +/* WD docs state that at the conclusion of a "LEVEL2" command, the + * status byte can be retrieved from the LUN register. Apparently, + * this is the case only for *uninterrupted* LEVEL2 commands! If + * there are any unexpected phases entered, even if they are 100% + * legal (different devices may choose to do things differently), + * the LEVEL2 command sequence is exited. This often occurs prior + * to receiving the status byte, in which case the driver does a + * status phase interrupt and gets the status byte on its own. + * While such a command can then be "resumed" (ie restarted to + * finish up as a LEVEL2 command), the LUN register will NOT be + * a valid status byte at the command's conclusion, and we must + * use the byte obtained during the earlier interrupt. Here, we + * preset SCp.Status to an illegal value (0xff) so that when + * this command finally completes, we can tell where the actual + * status byte is stored. + */ + + scsi_pointer->Status = ILLEGAL_STATUS_BYTE; + + /* + * Add the cmd to the end of 'input_Q'. Note that REQUEST SENSE + * commands are added to the head of the queue so that the desired + * sense data is not lost before REQUEST_SENSE executes. + */ + + spin_lock_irq(&hostdata->lock); + + if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) { + cmd->host_scribble = (uchar *) hostdata->input_Q; + hostdata->input_Q = cmd; + } else { /* find the end of the queue */ + for (tmp = (struct scsi_cmnd *) hostdata->input_Q; + tmp->host_scribble; + tmp = (struct scsi_cmnd *) tmp->host_scribble) ; + tmp->host_scribble = (uchar *) cmd; + } + +/* We know that there's at least one command in 'input_Q' now. + * Go see if any of them are runnable! + */ + + wd33c93_execute(cmd->device->host); + + DB(DB_QUEUE_COMMAND, printk(")Q ")) + + spin_unlock_irq(&hostdata->lock); + return 0; +} + +DEF_SCSI_QCMD(wd33c93_queuecommand) + +/* + * This routine attempts to start a scsi command. If the host_card is + * already connected, we give up immediately. Otherwise, look through + * the input_Q, using the first command we find that's intended + * for a currently non-busy target/lun. + * + * wd33c93_execute() is always called with interrupts disabled or from + * the wd33c93_intr itself, which means that a wd33c93 interrupt + * cannot occur while we are in here. + */ +static void +wd33c93_execute(struct Scsi_Host *instance) +{ + struct scsi_pointer *scsi_pointer; + struct WD33C93_hostdata *hostdata = + (struct WD33C93_hostdata *) instance->hostdata; + const wd33c93_regs regs = hostdata->regs; + struct scsi_cmnd *cmd, *prev; + + DB(DB_EXECUTE, printk("EX(")) + if (hostdata->selecting || hostdata->connected) { + DB(DB_EXECUTE, printk(")EX-0 ")) + return; + } + + /* + * Search through the input_Q for a command destined + * for an idle target/lun. + */ + + cmd = (struct scsi_cmnd *) hostdata->input_Q; + prev = NULL; + while (cmd) { + if (!(hostdata->busy[cmd->device->id] & + (1 << (cmd->device->lun & 0xff)))) + break; + prev = cmd; + cmd = (struct scsi_cmnd *) cmd->host_scribble; + } + + /* quit if queue empty or all possible targets are busy */ + + if (!cmd) { + DB(DB_EXECUTE, printk(")EX-1 ")) + return; + } + + /* remove command from queue */ + + if (prev) + prev->host_scribble = cmd->host_scribble; + else + hostdata->input_Q = (struct scsi_cmnd *) cmd->host_scribble; + +#ifdef PROC_STATISTICS + hostdata->cmd_cnt[cmd->device->id]++; +#endif + + /* + * Start the selection process + */ + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id); + else + write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); + +/* Now we need to figure out whether or not this command is a good + * candidate for disconnect/reselect. We guess to the best of our + * ability, based on a set of hierarchical rules. When several + * devices are operating simultaneously, disconnects are usually + * an advantage. In a single device system, or if only 1 device + * is being accessed, transfers usually go faster if disconnects + * are not allowed: + * + * + Commands should NEVER disconnect if hostdata->disconnect = + * DIS_NEVER (this holds for tape drives also), and ALWAYS + * disconnect if hostdata->disconnect = DIS_ALWAYS. + * + Tape drive commands should always be allowed to disconnect. + * + Disconnect should be allowed if disconnected_Q isn't empty. + * + Commands should NOT disconnect if input_Q is empty. + * + Disconnect should be allowed if there are commands in input_Q + * for a different target/lun. In this case, the other commands + * should be made disconnect-able, if not already. + * + * I know, I know - this code would flunk me out of any + * "C Programming 101" class ever offered. But it's easy + * to change around and experiment with for now. + */ + + scsi_pointer = WD33C93_scsi_pointer(cmd); + scsi_pointer->phase = 0; /* assume no disconnect */ + if (hostdata->disconnect == DIS_NEVER) + goto no; + if (hostdata->disconnect == DIS_ALWAYS) + goto yes; + if (cmd->device->type == 1) /* tape drive? */ + goto yes; + if (hostdata->disconnected_Q) /* other commands disconnected? */ + goto yes; + if (!(hostdata->input_Q)) /* input_Q empty? */ + goto no; + for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev; + prev = (struct scsi_cmnd *) prev->host_scribble) { + if ((prev->device->id != cmd->device->id) || + (prev->device->lun != cmd->device->lun)) { + for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev; + prev = (struct scsi_cmnd *) prev->host_scribble) + WD33C93_scsi_pointer(prev)->phase = 1; + goto yes; + } + } + + goto no; + + yes: + scsi_pointer->phase = 1; + +#ifdef PROC_STATISTICS + hostdata->disc_allowed_cnt[cmd->device->id]++; +#endif + + no: + + write_wd33c93(regs, WD_SOURCE_ID, scsi_pointer->phase ? SRCID_ER : 0); + + write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun); + write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, + hostdata->sync_xfer[cmd->device->id]); + hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); + + if ((hostdata->level2 == L2_NONE) || + (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { + + /* + * Do a 'Select-With-ATN' command. This will end with + * one of the following interrupts: + * CSR_RESEL_AM: failure - can try again later. + * CSR_TIMEOUT: failure - give up. + * CSR_SELECT: success - proceed. + */ + + hostdata->selecting = cmd; + +/* Every target has its own synchronous transfer setting, kept in the + * sync_xfer array, and a corresponding status byte in sync_stat[]. + * Each target's sync_stat[] entry is initialized to SX_UNSET, and its + * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET + * means that the parameters are undetermined as yet, and that we + * need to send an SDTR message to this device after selection is + * complete: We set SS_FIRST to tell the interrupt routine to do so. + * If we've been asked not to try synchronous transfers on this + * target (and _all_ luns within it), we'll still send the SDTR message + * later, but at that time we'll negotiate for async by specifying a + * sync fifo depth of 0. + */ + if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) + hostdata->sync_stat[cmd->device->id] = SS_FIRST; + hostdata->state = S_SELECTING; + write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ + write_wd33c93_cmd(regs, WD_CMD_SEL_ATN); + } else { + + /* + * Do a 'Select-With-ATN-Xfer' command. This will end with + * one of the following interrupts: + * CSR_RESEL_AM: failure - can try again later. + * CSR_TIMEOUT: failure - give up. + * anything else: success - proceed. + */ + + hostdata->connected = cmd; + write_wd33c93(regs, WD_COMMAND_PHASE, 0); + + /* copy command_descriptor_block into WD chip + * (take advantage of auto-incrementing) + */ + + write_wd33c93_cdb(regs, cmd->cmd_len, cmd->cmnd); + + /* The wd33c93 only knows about Group 0, 1, and 5 commands when + * it's doing a 'select-and-transfer'. To be safe, we write the + * size of the CDB into the OWN_ID register for every case. This + * way there won't be problems with vendor-unique, audio, etc. + */ + + write_wd33c93(regs, WD_OWN_ID, cmd->cmd_len); + + /* When doing a non-disconnect command with DMA, we can save + * ourselves a DATA phase interrupt later by setting everything + * up ahead of time. + */ + + if (scsi_pointer->phase == 0 && hostdata->no_dma == 0) { + if (hostdata->dma_setup(cmd, + (cmd->sc_data_direction == DMA_TO_DEVICE) ? + DATA_OUT_DIR : DATA_IN_DIR)) + write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ + else { + write_wd33c93_count(regs, + scsi_pointer->this_residual); + write_wd33c93(regs, WD_CONTROL, + CTRL_IDI | CTRL_EDI | hostdata->dma_mode); + hostdata->dma = D_DMA_RUNNING; + } + } else + write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ + + hostdata->state = S_RUNNING_LEVEL2; + write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); + } + + /* + * Since the SCSI bus can handle only 1 connection at a time, + * we get out of here now. If the selection fails, or when + * the command disconnects, we'll come back to this routine + * to search the input_Q again... + */ + + DB(DB_EXECUTE, + printk("%s)EX-2 ", scsi_pointer->phase ? "d:" : "")) +} + +static void +transfer_pio(const wd33c93_regs regs, uchar * buf, int cnt, + int data_in_dir, struct WD33C93_hostdata *hostdata) +{ + uchar asr; + + DB(DB_TRANSFER, + printk("(%p,%d,%s:", buf, cnt, data_in_dir ? "in" : "out")) + + write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); + write_wd33c93_count(regs, cnt); + write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO); + if (data_in_dir) { + do { + asr = read_aux_stat(regs); + if (asr & ASR_DBR) + *buf++ = read_wd33c93(regs, WD_DATA); + } while (!(asr & ASR_INT)); + } else { + do { + asr = read_aux_stat(regs); + if (asr & ASR_DBR) + write_wd33c93(regs, WD_DATA, *buf++); + } while (!(asr & ASR_INT)); + } + + /* Note: we are returning with the interrupt UN-cleared. + * Since (presumably) an entire I/O operation has + * completed, the bus phase is probably different, and + * the interrupt routine will discover this when it + * responds to the uncleared int. + */ + +} + +static void +transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd, + int data_in_dir) +{ + struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + struct WD33C93_hostdata *hostdata; + unsigned long length; + + hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; + +/* Normally, you'd expect 'this_residual' to be non-zero here. + * In a series of scatter-gather transfers, however, this + * routine will usually be called with 'this_residual' equal + * to 0 and 'buffers_residual' non-zero. This means that a + * previous transfer completed, clearing 'this_residual', and + * now we need to setup the next scatter-gather buffer as the + * source or destination for THIS transfer. + */ + if (!scsi_pointer->this_residual && scsi_pointer->buffers_residual) { + scsi_pointer->buffer = sg_next(scsi_pointer->buffer); + --scsi_pointer->buffers_residual; + scsi_pointer->this_residual = scsi_pointer->buffer->length; + scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); + } + if (!scsi_pointer->this_residual) /* avoid bogus setups */ + return; + + write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, + hostdata->sync_xfer[cmd->device->id]); + +/* 'hostdata->no_dma' is TRUE if we don't even want to try DMA. + * Update 'this_residual' and 'ptr' after 'transfer_pio()' returns. + */ + + if (hostdata->no_dma || hostdata->dma_setup(cmd, data_in_dir)) { +#ifdef PROC_STATISTICS + hostdata->pio_cnt++; +#endif + transfer_pio(regs, (uchar *) scsi_pointer->ptr, + scsi_pointer->this_residual, data_in_dir, + hostdata); + length = scsi_pointer->this_residual; + scsi_pointer->this_residual = read_wd33c93_count(regs); + scsi_pointer->ptr += length - scsi_pointer->this_residual; + } + +/* We are able to do DMA (in fact, the Amiga hardware is + * already going!), so start up the wd33c93 in DMA mode. + * We set 'hostdata->dma' = D_DMA_RUNNING so that when the + * transfer completes and causes an interrupt, we're + * reminded to tell the Amiga to shut down its end. We'll + * postpone the updating of 'this_residual' and 'ptr' + * until then. + */ + + else { +#ifdef PROC_STATISTICS + hostdata->dma_cnt++; +#endif + write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode); + write_wd33c93_count(regs, scsi_pointer->this_residual); + + if ((hostdata->level2 >= L2_DATA) || + (hostdata->level2 == L2_BASIC && scsi_pointer->phase == 0)) { + write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); + write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); + hostdata->state = S_RUNNING_LEVEL2; + } else + write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO); + + hostdata->dma = D_DMA_RUNNING; + } +} + +void +wd33c93_intr(struct Scsi_Host *instance) +{ + struct scsi_pointer *scsi_pointer; + struct WD33C93_hostdata *hostdata = + (struct WD33C93_hostdata *) instance->hostdata; + const wd33c93_regs regs = hostdata->regs; + struct scsi_cmnd *patch, *cmd; + uchar asr, sr, phs, id, lun, *ucp, msg; + unsigned long length, flags; + + asr = read_aux_stat(regs); + if (!(asr & ASR_INT) || (asr & ASR_BSY)) + return; + + spin_lock_irqsave(&hostdata->lock, flags); + +#ifdef PROC_STATISTICS + hostdata->int_cnt++; +#endif + + cmd = (struct scsi_cmnd *) hostdata->connected; /* assume we're connected */ + scsi_pointer = WD33C93_scsi_pointer(cmd); + sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear the interrupt */ + phs = read_wd33c93(regs, WD_COMMAND_PHASE); + + DB(DB_INTR, printk("{%02x:%02x-", asr, sr)) + +/* After starting a DMA transfer, the next interrupt + * is guaranteed to be in response to completion of + * the transfer. Since the Amiga DMA hardware runs in + * in an open-ended fashion, it needs to be told when + * to stop; do that here if D_DMA_RUNNING is true. + * Also, we have to update 'this_residual' and 'ptr' + * based on the contents of the TRANSFER_COUNT register, + * in case the device decided to do an intermediate + * disconnect (a device may do this if it has to do a + * seek, or just to be nice and let other devices have + * some bus time during long transfers). After doing + * whatever is needed, we go on and service the WD3393 + * interrupt normally. + */ + if (hostdata->dma == D_DMA_RUNNING) { + DB(DB_TRANSFER, + printk("[%p/%d:", scsi_pointer->ptr, scsi_pointer->this_residual)) + hostdata->dma_stop(cmd->device->host, cmd, 1); + hostdata->dma = D_DMA_OFF; + length = scsi_pointer->this_residual; + scsi_pointer->this_residual = read_wd33c93_count(regs); + scsi_pointer->ptr += length - scsi_pointer->this_residual; + DB(DB_TRANSFER, + printk("%p/%d]", scsi_pointer->ptr, scsi_pointer->this_residual)) + } + +/* Respond to the specific WD3393 interrupt - there are quite a few! */ + switch (sr) { + case CSR_TIMEOUT: + DB(DB_INTR, printk("TIMEOUT")) + + if (hostdata->state == S_RUNNING_LEVEL2) + hostdata->connected = NULL; + else { + cmd = (struct scsi_cmnd *) hostdata->selecting; /* get a valid cmd */ + hostdata->selecting = NULL; + } + + cmd->result = DID_NO_CONNECT << 16; + hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); + hostdata->state = S_UNCONNECTED; + scsi_done(cmd); + + /* From esp.c: + * There is a window of time within the scsi_done() path + * of execution where interrupts are turned back on full + * blast and left that way. During that time we could + * reconnect to a disconnected command, then we'd bomb + * out below. We could also end up executing two commands + * at _once_. ...just so you know why the restore_flags() + * is here... + */ + + spin_unlock_irqrestore(&hostdata->lock, flags); + +/* We are not connected to a target - check to see if there + * are commands waiting to be executed. + */ + + wd33c93_execute(instance); + break; + +/* Note: this interrupt should not occur in a LEVEL2 command */ + + case CSR_SELECT: + DB(DB_INTR, printk("SELECT")) + hostdata->connected = cmd = + (struct scsi_cmnd *) hostdata->selecting; + hostdata->selecting = NULL; + + /* construct an IDENTIFY message with correct disconnect bit */ + + hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun); + if (scsi_pointer->phase) + hostdata->outgoing_msg[0] |= 0x40; + + if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { + + hostdata->sync_stat[cmd->device->id] = SS_WAITING; + +/* Tack on a 2nd message to ask about synchronous transfers. If we've + * been asked to do only asynchronous transfers on this device, we + * request a fifo depth of 0, which is equivalent to async - should + * solve the problems some people have had with GVP's Guru ROM. + */ + + hostdata->outgoing_msg[1] = EXTENDED_MESSAGE; + hostdata->outgoing_msg[2] = 3; + hostdata->outgoing_msg[3] = EXTENDED_SDTR; + if (hostdata->no_sync & (1 << cmd->device->id)) { + calc_sync_msg(hostdata->default_sx_per, 0, + 0, hostdata->outgoing_msg + 4); + } else { + calc_sync_msg(optimum_sx_per(hostdata), + OPTIMUM_SX_OFF, + hostdata->fast, + hostdata->outgoing_msg + 4); + } + hostdata->outgoing_len = 6; +#ifdef SYNC_DEBUG + ucp = hostdata->outgoing_msg + 1; + printk(" sending SDTR %02x03%02x%02x%02x ", + ucp[0], ucp[2], ucp[3], ucp[4]); +#endif + } else + hostdata->outgoing_len = 1; + + hostdata->state = S_CONNECTED; + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + + case CSR_XFER_DONE | PHS_DATA_IN: + case CSR_UNEXP | PHS_DATA_IN: + case CSR_SRV_REQ | PHS_DATA_IN: + DB(DB_INTR, + printk("IN-%d.%d", scsi_pointer->this_residual, + scsi_pointer->buffers_residual)) + transfer_bytes(regs, cmd, DATA_IN_DIR); + if (hostdata->state != S_RUNNING_LEVEL2) + hostdata->state = S_CONNECTED; + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + + case CSR_XFER_DONE | PHS_DATA_OUT: + case CSR_UNEXP | PHS_DATA_OUT: + case CSR_SRV_REQ | PHS_DATA_OUT: + DB(DB_INTR, + printk("OUT-%d.%d", scsi_pointer->this_residual, + scsi_pointer->buffers_residual)) + transfer_bytes(regs, cmd, DATA_OUT_DIR); + if (hostdata->state != S_RUNNING_LEVEL2) + hostdata->state = S_CONNECTED; + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + +/* Note: this interrupt should not occur in a LEVEL2 command */ + + case CSR_XFER_DONE | PHS_COMMAND: + case CSR_UNEXP | PHS_COMMAND: + case CSR_SRV_REQ | PHS_COMMAND: + DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) + transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, + hostdata); + hostdata->state = S_CONNECTED; + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + + case CSR_XFER_DONE | PHS_STATUS: + case CSR_UNEXP | PHS_STATUS: + case CSR_SRV_REQ | PHS_STATUS: + DB(DB_INTR, printk("STATUS=")) + scsi_pointer->Status = read_1_byte(regs); + DB(DB_INTR, printk("%02x", scsi_pointer->Status)) + if (hostdata->level2 >= L2_BASIC) { + sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */ + udelay(7); + hostdata->state = S_RUNNING_LEVEL2; + write_wd33c93(regs, WD_COMMAND_PHASE, 0x50); + write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); + } else { + hostdata->state = S_CONNECTED; + } + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + + case CSR_XFER_DONE | PHS_MESS_IN: + case CSR_UNEXP | PHS_MESS_IN: + case CSR_SRV_REQ | PHS_MESS_IN: + DB(DB_INTR, printk("MSG_IN=")) + + msg = read_1_byte(regs); + sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */ + udelay(7); + + hostdata->incoming_msg[hostdata->incoming_ptr] = msg; + if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE) + msg = EXTENDED_MESSAGE; + else + hostdata->incoming_ptr = 0; + + scsi_pointer->Message = msg; + switch (msg) { + + case COMMAND_COMPLETE: + DB(DB_INTR, printk("CCMP")) + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + hostdata->state = S_PRE_CMP_DISC; + break; + + case SAVE_POINTERS: + DB(DB_INTR, printk("SDP")) + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + break; + + case RESTORE_POINTERS: + DB(DB_INTR, printk("RDP")) + if (hostdata->level2 >= L2_BASIC) { + write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); + write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); + hostdata->state = S_RUNNING_LEVEL2; + } else { + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + } + break; + + case DISCONNECT: + DB(DB_INTR, printk("DIS")) + cmd->device->disconnect = 1; + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + hostdata->state = S_PRE_TMP_DISC; + break; + + case MESSAGE_REJECT: + DB(DB_INTR, printk("REJ")) +#ifdef SYNC_DEBUG + printk("-REJ-"); +#endif + if (hostdata->sync_stat[cmd->device->id] == SS_WAITING) { + hostdata->sync_stat[cmd->device->id] = SS_SET; + /* we want default_sx_per, not DEFAULT_SX_PER */ + hostdata->sync_xfer[cmd->device->id] = + calc_sync_xfer(hostdata->default_sx_per + / 4, 0, 0, hostdata->sx_table); + } + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + break; + + case EXTENDED_MESSAGE: + DB(DB_INTR, printk("EXT")) + + ucp = hostdata->incoming_msg; + +#ifdef SYNC_DEBUG + printk("%02x", ucp[hostdata->incoming_ptr]); +#endif + /* Is this the last byte of the extended message? */ + + if ((hostdata->incoming_ptr >= 2) && + (hostdata->incoming_ptr == (ucp[1] + 1))) { + + switch (ucp[2]) { /* what's the EXTENDED code? */ + case EXTENDED_SDTR: + /* default to default async period */ + id = calc_sync_xfer(hostdata-> + default_sx_per / 4, 0, + 0, hostdata->sx_table); + if (hostdata->sync_stat[cmd->device->id] != + SS_WAITING) { + +/* A device has sent an unsolicited SDTR message; rather than go + * through the effort of decoding it and then figuring out what + * our reply should be, we're just gonna say that we have a + * synchronous fifo depth of 0. This will result in asynchronous + * transfers - not ideal but so much easier. + * Actually, this is OK because it assures us that if we don't + * specifically ask for sync transfers, we won't do any. + */ + + write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ + hostdata->outgoing_msg[0] = + EXTENDED_MESSAGE; + hostdata->outgoing_msg[1] = 3; + hostdata->outgoing_msg[2] = + EXTENDED_SDTR; + calc_sync_msg(hostdata-> + default_sx_per, 0, + 0, hostdata->outgoing_msg + 3); + hostdata->outgoing_len = 5; + } else { + if (ucp[4]) /* well, sync transfer */ + id = calc_sync_xfer(ucp[3], ucp[4], + hostdata->fast, + hostdata->sx_table); + else if (ucp[3]) /* very unlikely... */ + id = calc_sync_xfer(ucp[3], ucp[4], + 0, hostdata->sx_table); + } + hostdata->sync_xfer[cmd->device->id] = id; +#ifdef SYNC_DEBUG + printk(" sync_xfer=%02x\n", + hostdata->sync_xfer[cmd->device->id]); +#endif + hostdata->sync_stat[cmd->device->id] = + SS_SET; + write_wd33c93_cmd(regs, + WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + break; + case EXTENDED_WDTR: + write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ + printk("sending WDTR "); + hostdata->outgoing_msg[0] = + EXTENDED_MESSAGE; + hostdata->outgoing_msg[1] = 2; + hostdata->outgoing_msg[2] = + EXTENDED_WDTR; + hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */ + hostdata->outgoing_len = 4; + write_wd33c93_cmd(regs, + WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + break; + default: + write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ + printk + ("Rejecting Unknown Extended Message(%02x). ", + ucp[2]); + hostdata->outgoing_msg[0] = + MESSAGE_REJECT; + hostdata->outgoing_len = 1; + write_wd33c93_cmd(regs, + WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + break; + } + hostdata->incoming_ptr = 0; + } + + /* We need to read more MESS_IN bytes for the extended message */ + + else { + hostdata->incoming_ptr++; + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + } + break; + + default: + printk("Rejecting Unknown Message(%02x) ", msg); + write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ + hostdata->outgoing_msg[0] = MESSAGE_REJECT; + hostdata->outgoing_len = 1; + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + hostdata->state = S_CONNECTED; + } + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + +/* Note: this interrupt will occur only after a LEVEL2 command */ + + case CSR_SEL_XFER_DONE: + +/* Make sure that reselection is enabled at this point - it may + * have been turned off for the command that just completed. + */ + + write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); + if (phs == 0x60) { + DB(DB_INTR, printk("SX-DONE")) + scsi_pointer->Message = COMMAND_COMPLETE; + lun = read_wd33c93(regs, WD_TARGET_LUN); + DB(DB_INTR, printk(":%d.%d", scsi_pointer->Status, lun)) + hostdata->connected = NULL; + hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); + hostdata->state = S_UNCONNECTED; + if (scsi_pointer->Status == ILLEGAL_STATUS_BYTE) + scsi_pointer->Status = lun; + if (cmd->cmnd[0] == REQUEST_SENSE + && scsi_pointer->Status != SAM_STAT_GOOD) { + set_host_byte(cmd, DID_ERROR); + } else { + set_host_byte(cmd, DID_OK); + scsi_msg_to_host_byte(cmd, scsi_pointer->Message); + set_status_byte(cmd, scsi_pointer->Status); + } + scsi_done(cmd); + +/* We are no longer connected to a target - check to see if + * there are commands waiting to be executed. + */ + spin_unlock_irqrestore(&hostdata->lock, flags); + wd33c93_execute(instance); + } else { + printk + ("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", + asr, sr, phs); + spin_unlock_irqrestore(&hostdata->lock, flags); + } + break; + +/* Note: this interrupt will occur only after a LEVEL2 command */ + + case CSR_SDP: + DB(DB_INTR, printk("SDP")) + hostdata->state = S_RUNNING_LEVEL2; + write_wd33c93(regs, WD_COMMAND_PHASE, 0x41); + write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + + case CSR_XFER_DONE | PHS_MESS_OUT: + case CSR_UNEXP | PHS_MESS_OUT: + case CSR_SRV_REQ | PHS_MESS_OUT: + DB(DB_INTR, printk("MSG_OUT=")) + +/* To get here, we've probably requested MESSAGE_OUT and have + * already put the correct bytes in outgoing_msg[] and filled + * in outgoing_len. We simply send them out to the SCSI bus. + * Sometimes we get MESSAGE_OUT phase when we're not expecting + * it - like when our SDTR message is rejected by a target. Some + * targets send the REJECT before receiving all of the extended + * message, and then seem to go back to MESSAGE_OUT for a byte + * or two. Not sure why, or if I'm doing something wrong to + * cause this to happen. Regardless, it seems that sending + * NOP messages in these situations results in no harm and + * makes everyone happy. + */ + if (hostdata->outgoing_len == 0) { + hostdata->outgoing_len = 1; + hostdata->outgoing_msg[0] = NOP; + } + transfer_pio(regs, hostdata->outgoing_msg, + hostdata->outgoing_len, DATA_OUT_DIR, hostdata); + DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0])) + hostdata->outgoing_len = 0; + hostdata->state = S_CONNECTED; + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + + case CSR_UNEXP_DISC: + +/* I think I've seen this after a request-sense that was in response + * to an error condition, but not sure. We certainly need to do + * something when we get this interrupt - the question is 'what?'. + * Let's think positively, and assume some command has finished + * in a legal manner (like a command that provokes a request-sense), + * so we treat it as a normal command-complete-disconnect. + */ + +/* Make sure that reselection is enabled at this point - it may + * have been turned off for the command that just completed. + */ + + write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); + if (cmd == NULL) { + printk(" - Already disconnected! "); + hostdata->state = S_UNCONNECTED; + spin_unlock_irqrestore(&hostdata->lock, flags); + return; + } + DB(DB_INTR, printk("UNEXP_DISC")) + hostdata->connected = NULL; + hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); + hostdata->state = S_UNCONNECTED; + if (cmd->cmnd[0] == REQUEST_SENSE && + scsi_pointer->Status != SAM_STAT_GOOD) { + set_host_byte(cmd, DID_ERROR); + } else { + set_host_byte(cmd, DID_OK); + scsi_msg_to_host_byte(cmd, scsi_pointer->Message); + set_status_byte(cmd, scsi_pointer->Status); + } + scsi_done(cmd); + +/* We are no longer connected to a target - check to see if + * there are commands waiting to be executed. + */ + /* look above for comments on scsi_done() */ + spin_unlock_irqrestore(&hostdata->lock, flags); + wd33c93_execute(instance); + break; + + case CSR_DISC: + +/* Make sure that reselection is enabled at this point - it may + * have been turned off for the command that just completed. + */ + + write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); + DB(DB_INTR, printk("DISC")) + if (cmd == NULL) { + printk(" - Already disconnected! "); + hostdata->state = S_UNCONNECTED; + } + switch (hostdata->state) { + case S_PRE_CMP_DISC: + hostdata->connected = NULL; + hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); + hostdata->state = S_UNCONNECTED; + DB(DB_INTR, printk(":%d", scsi_pointer->Status)) + if (cmd->cmnd[0] == REQUEST_SENSE + && scsi_pointer->Status != SAM_STAT_GOOD) { + set_host_byte(cmd, DID_ERROR); + } else { + set_host_byte(cmd, DID_OK); + scsi_msg_to_host_byte(cmd, scsi_pointer->Message); + set_status_byte(cmd, scsi_pointer->Status); + } + scsi_done(cmd); + break; + case S_PRE_TMP_DISC: + case S_RUNNING_LEVEL2: + cmd->host_scribble = (uchar *) hostdata->disconnected_Q; + hostdata->disconnected_Q = cmd; + hostdata->connected = NULL; + hostdata->state = S_UNCONNECTED; + +#ifdef PROC_STATISTICS + hostdata->disc_done_cnt[cmd->device->id]++; +#endif + + break; + default: + printk("*** Unexpected DISCONNECT interrupt! ***"); + hostdata->state = S_UNCONNECTED; + } + +/* We are no longer connected to a target - check to see if + * there are commands waiting to be executed. + */ + spin_unlock_irqrestore(&hostdata->lock, flags); + wd33c93_execute(instance); + break; + + case CSR_RESEL_AM: + case CSR_RESEL: + DB(DB_INTR, printk("RESEL%s", sr == CSR_RESEL_AM ? "_AM" : "")) + + /* Old chips (pre -A ???) don't have advanced features and will + * generate CSR_RESEL. In that case we have to extract the LUN the + * hard way (see below). + * First we have to make sure this reselection didn't + * happen during Arbitration/Selection of some other device. + * If yes, put losing command back on top of input_Q. + */ + if (hostdata->level2 <= L2_NONE) { + + if (hostdata->selecting) { + cmd = (struct scsi_cmnd *) hostdata->selecting; + hostdata->selecting = NULL; + hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); + cmd->host_scribble = + (uchar *) hostdata->input_Q; + hostdata->input_Q = cmd; + } + } + + else { + + if (cmd) { + if (phs == 0x00) { + hostdata->busy[cmd->device->id] &= + ~(1 << (cmd->device->lun & 0xff)); + cmd->host_scribble = + (uchar *) hostdata->input_Q; + hostdata->input_Q = cmd; + } else { + printk + ("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", + asr, sr, phs); + while (1) + printk("\r"); + } + } + + } + + /* OK - find out which device reselected us. */ + + id = read_wd33c93(regs, WD_SOURCE_ID); + id &= SRCID_MASK; + + /* and extract the lun from the ID message. (Note that we don't + * bother to check for a valid message here - I guess this is + * not the right way to go, but...) + */ + + if (sr == CSR_RESEL_AM) { + lun = read_wd33c93(regs, WD_DATA); + if (hostdata->level2 < L2_RESELECT) + write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); + lun &= 7; + } else { + /* Old chip; wait for msgin phase to pick up the LUN. */ + for (lun = 255; lun; lun--) { + if ((asr = read_aux_stat(regs)) & ASR_INT) + break; + udelay(10); + } + if (!(asr & ASR_INT)) { + printk + ("wd33c93: Reselected without IDENTIFY\n"); + lun = 0; + } else { + /* Verify this is a change to MSG_IN and read the message */ + sr = read_wd33c93(regs, WD_SCSI_STATUS); + udelay(7); + if (sr == (CSR_ABORT | PHS_MESS_IN) || + sr == (CSR_UNEXP | PHS_MESS_IN) || + sr == (CSR_SRV_REQ | PHS_MESS_IN)) { + /* Got MSG_IN, grab target LUN */ + lun = read_1_byte(regs); + /* Now we expect a 'paused with ACK asserted' int.. */ + asr = read_aux_stat(regs); + if (!(asr & ASR_INT)) { + udelay(10); + asr = read_aux_stat(regs); + if (!(asr & ASR_INT)) + printk + ("wd33c93: No int after LUN on RESEL (%02x)\n", + asr); + } + sr = read_wd33c93(regs, WD_SCSI_STATUS); + udelay(7); + if (sr != CSR_MSGIN) + printk + ("wd33c93: Not paused with ACK on RESEL (%02x)\n", + sr); + lun &= 7; + write_wd33c93_cmd(regs, + WD_CMD_NEGATE_ACK); + } else { + printk + ("wd33c93: Not MSG_IN on reselect (%02x)\n", + sr); + lun = 0; + } + } + } + + /* Now we look for the command that's reconnecting. */ + + cmd = (struct scsi_cmnd *) hostdata->disconnected_Q; + patch = NULL; + while (cmd) { + if (id == cmd->device->id && lun == (u8)cmd->device->lun) + break; + patch = cmd; + cmd = (struct scsi_cmnd *) cmd->host_scribble; + } + + /* Hmm. Couldn't find a valid command.... What to do? */ + + if (!cmd) { + printk + ("---TROUBLE: target %d.%d not in disconnect queue---", + id, (u8)lun); + spin_unlock_irqrestore(&hostdata->lock, flags); + return; + } + + /* Ok, found the command - now start it up again. */ + + if (patch) + patch->host_scribble = cmd->host_scribble; + else + hostdata->disconnected_Q = + (struct scsi_cmnd *) cmd->host_scribble; + hostdata->connected = cmd; + + /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]' + * because these things are preserved over a disconnect. + * But we DO need to fix the DPD bit so it's correct for this command. + */ + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id); + else + write_wd33c93(regs, WD_DESTINATION_ID, + cmd->device->id | DSTID_DPD); + if (hostdata->level2 >= L2_RESELECT) { + write_wd33c93_count(regs, 0); /* we want a DATA_PHASE interrupt */ + write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); + write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); + hostdata->state = S_RUNNING_LEVEL2; + } else + hostdata->state = S_CONNECTED; + + spin_unlock_irqrestore(&hostdata->lock, flags); + break; + + default: + printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs); + spin_unlock_irqrestore(&hostdata->lock, flags); + } + + DB(DB_INTR, printk("} ")) + +} + +static void +reset_wd33c93(struct Scsi_Host *instance) +{ + struct WD33C93_hostdata *hostdata = + (struct WD33C93_hostdata *) instance->hostdata; + const wd33c93_regs regs = hostdata->regs; + uchar sr; + +#ifdef CONFIG_SGI_IP22 + { + int busycount = 0; + extern void sgiwd93_reset(unsigned long); + /* wait 'til the chip gets some time for us */ + while ((read_aux_stat(regs) & ASR_BSY) && busycount++ < 100) + udelay (10); + /* + * there are scsi devices out there, which manage to lock up + * the wd33c93 in a busy condition. In this state it won't + * accept the reset command. The only way to solve this is to + * give the chip a hardware reset (if possible). The code below + * does this for the SGI Indy, where this is possible + */ + /* still busy ? */ + if (read_aux_stat(regs) & ASR_BSY) + sgiwd93_reset(instance->base); /* yeah, give it the hard one */ + } +#endif + + write_wd33c93(regs, WD_OWN_ID, OWNID_EAF | OWNID_RAF | + instance->this_id | hostdata->clock_freq); + write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); + write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, + calc_sync_xfer(hostdata->default_sx_per / 4, + DEFAULT_SX_OFF, 0, hostdata->sx_table)); + write_wd33c93(regs, WD_COMMAND, WD_CMD_RESET); + + +#ifdef CONFIG_MVME147_SCSI + udelay(25); /* The old wd33c93 on MVME147 needs this, at least */ +#endif + + while (!(read_aux_stat(regs) & ASR_INT)) + ; + sr = read_wd33c93(regs, WD_SCSI_STATUS); + + hostdata->microcode = read_wd33c93(regs, WD_CDB_1); + if (sr == 0x00) + hostdata->chip = C_WD33C93; + else if (sr == 0x01) { + write_wd33c93(regs, WD_QUEUE_TAG, 0xa5); /* any random number */ + sr = read_wd33c93(regs, WD_QUEUE_TAG); + if (sr == 0xa5) { + hostdata->chip = C_WD33C93B; + write_wd33c93(regs, WD_QUEUE_TAG, 0); + } else + hostdata->chip = C_WD33C93A; + } else + hostdata->chip = C_UNKNOWN_CHIP; + + if (hostdata->chip != C_WD33C93B) /* Fast SCSI unavailable */ + hostdata->fast = 0; + + write_wd33c93(regs, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE); + write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); +} + +int +wd33c93_host_reset(struct scsi_cmnd * SCpnt) +{ + struct Scsi_Host *instance; + struct WD33C93_hostdata *hostdata; + int i; + + instance = SCpnt->device->host; + spin_lock_irq(instance->host_lock); + hostdata = (struct WD33C93_hostdata *) instance->hostdata; + + printk("scsi%d: reset. ", instance->host_no); + disable_irq(instance->irq); + + hostdata->dma_stop(instance, NULL, 0); + for (i = 0; i < 8; i++) { + hostdata->busy[i] = 0; + hostdata->sync_xfer[i] = + calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF, + 0, hostdata->sx_table); + hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */ + } + hostdata->input_Q = NULL; + hostdata->selecting = NULL; + hostdata->connected = NULL; + hostdata->disconnected_Q = NULL; + hostdata->state = S_UNCONNECTED; + hostdata->dma = D_DMA_OFF; + hostdata->incoming_ptr = 0; + hostdata->outgoing_len = 0; + + reset_wd33c93(instance); + SCpnt->result = DID_RESET << 16; + enable_irq(instance->irq); + spin_unlock_irq(instance->host_lock); + return SUCCESS; +} + +int +wd33c93_abort(struct scsi_cmnd * cmd) +{ + struct Scsi_Host *instance; + struct WD33C93_hostdata *hostdata; + wd33c93_regs regs; + struct scsi_cmnd *tmp, *prev; + + disable_irq(cmd->device->host->irq); + + instance = cmd->device->host; + hostdata = (struct WD33C93_hostdata *) instance->hostdata; + regs = hostdata->regs; + +/* + * Case 1 : If the command hasn't been issued yet, we simply remove it + * from the input_Q. + */ + + tmp = (struct scsi_cmnd *) hostdata->input_Q; + prev = NULL; + while (tmp) { + if (tmp == cmd) { + if (prev) + prev->host_scribble = cmd->host_scribble; + else + hostdata->input_Q = + (struct scsi_cmnd *) cmd->host_scribble; + cmd->host_scribble = NULL; + cmd->result = DID_ABORT << 16; + printk + ("scsi%d: Abort - removing command from input_Q. ", + instance->host_no); + enable_irq(cmd->device->host->irq); + scsi_done(cmd); + return SUCCESS; + } + prev = tmp; + tmp = (struct scsi_cmnd *) tmp->host_scribble; + } + +/* + * Case 2 : If the command is connected, we're going to fail the abort + * and let the high level SCSI driver retry at a later time or + * issue a reset. + * + * Timeouts, and therefore aborted commands, will be highly unlikely + * and handling them cleanly in this situation would make the common + * case of noresets less efficient, and would pollute our code. So, + * we fail. + */ + + if (hostdata->connected == cmd) { + uchar sr, asr; + unsigned long timeout; + + printk("scsi%d: Aborting connected command - ", + instance->host_no); + + printk("stopping DMA - "); + if (hostdata->dma == D_DMA_RUNNING) { + hostdata->dma_stop(instance, cmd, 0); + hostdata->dma = D_DMA_OFF; + } + + printk("sending wd33c93 ABORT command - "); + write_wd33c93(regs, WD_CONTROL, + CTRL_IDI | CTRL_EDI | CTRL_POLLED); + write_wd33c93_cmd(regs, WD_CMD_ABORT); + +/* Now we have to attempt to flush out the FIFO... */ + + printk("flushing fifo - "); + timeout = 1000000; + do { + asr = read_aux_stat(regs); + if (asr & ASR_DBR) + read_wd33c93(regs, WD_DATA); + } while (!(asr & ASR_INT) && timeout-- > 0); + sr = read_wd33c93(regs, WD_SCSI_STATUS); + printk + ("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", + asr, sr, read_wd33c93_count(regs), timeout); + + /* + * Abort command processed. + * Still connected. + * We must disconnect. + */ + + printk("sending wd33c93 DISCONNECT command - "); + write_wd33c93_cmd(regs, WD_CMD_DISCONNECT); + + timeout = 1000000; + asr = read_aux_stat(regs); + while ((asr & ASR_CIP) && timeout-- > 0) + asr = read_aux_stat(regs); + sr = read_wd33c93(regs, WD_SCSI_STATUS); + printk("asr=%02x, sr=%02x.", asr, sr); + + hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); + hostdata->connected = NULL; + hostdata->state = S_UNCONNECTED; + cmd->result = DID_ABORT << 16; + +/* sti();*/ + wd33c93_execute(instance); + + enable_irq(cmd->device->host->irq); + scsi_done(cmd); + return SUCCESS; + } + +/* + * Case 3: If the command is currently disconnected from the bus, + * we're not going to expend much effort here: Let's just return + * an ABORT_SNOOZE and hope for the best... + */ + + tmp = (struct scsi_cmnd *) hostdata->disconnected_Q; + while (tmp) { + if (tmp == cmd) { + printk + ("scsi%d: Abort - command found on disconnected_Q - ", + instance->host_no); + printk("Abort SNOOZE. "); + enable_irq(cmd->device->host->irq); + return FAILED; + } + tmp = (struct scsi_cmnd *) tmp->host_scribble; + } + +/* + * Case 4 : If we reached this point, the command was not found in any of + * the queues. + * + * We probably reached this point because of an unlikely race condition + * between the command completing successfully and the abortion code, + * so we won't panic, but we will notify the user in case something really + * broke. + */ + +/* sti();*/ + wd33c93_execute(instance); + + enable_irq(cmd->device->host->irq); + printk("scsi%d: warning : SCSI command probably completed successfully" + " before abortion. ", instance->host_no); + return FAILED; +} + +#define MAX_WD33C93_HOSTS 4 +#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args) +#define SETUP_BUFFER_SIZE 200 +static char setup_buffer[SETUP_BUFFER_SIZE]; +static char setup_used[MAX_SETUP_ARGS]; +static int done_setup = 0; + +static int +wd33c93_setup(char *str) +{ + int i; + char *p1, *p2; + + /* The kernel does some processing of the command-line before calling + * this function: If it begins with any decimal or hex number arguments, + * ints[0] = how many numbers found and ints[1] through [n] are the values + * themselves. str points to where the non-numeric arguments (if any) + * start: We do our own parsing of those. We construct synthetic 'nosync' + * keywords out of numeric args (to maintain compatibility with older + * versions) and then add the rest of the arguments. + */ + + p1 = setup_buffer; + *p1 = '\0'; + if (str) + strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer)); + setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0'; + p1 = setup_buffer; + i = 0; + while (*p1 && (i < MAX_SETUP_ARGS)) { + p2 = strchr(p1, ','); + if (p2) { + *p2 = '\0'; + if (p1 != p2) + setup_args[i] = p1; + p1 = p2 + 1; + i++; + } else { + setup_args[i] = p1; + break; + } + } + for (i = 0; i < MAX_SETUP_ARGS; i++) + setup_used[i] = 0; + done_setup = 1; + + return 1; +} +__setup("wd33c93=", wd33c93_setup); + +/* check_setup_args() returns index if key found, 0 if not + */ +static int +check_setup_args(char *key, int *flags, int *val, char *buf) +{ + int x; + char *cp; + + for (x = 0; x < MAX_SETUP_ARGS; x++) { + if (setup_used[x]) + continue; + if (!strncmp(setup_args[x], key, strlen(key))) + break; + if (!strncmp(setup_args[x], "next", strlen("next"))) + return 0; + } + if (x == MAX_SETUP_ARGS) + return 0; + setup_used[x] = 1; + cp = setup_args[x] + strlen(key); + *val = -1; + if (*cp != ':') + return ++x; + cp++; + if ((*cp >= '0') && (*cp <= '9')) { + *val = simple_strtoul(cp, NULL, 0); + } + return ++x; +} + +/* + * Calculate internal data-transfer-clock cycle from input-clock + * frequency (/MHz) and fill 'sx_table'. + * + * The original driver used to rely on a fixed sx_table, containing periods + * for (only) the lower limits of the respective input-clock-frequency ranges + * (8-10/12-15/16-20 MHz). Although it seems, that no problems occurred with + * this setting so far, it might be desirable to adjust the transfer periods + * closer to the really attached, possibly 25% higher, input-clock, since + * - the wd33c93 may really use a significant shorter period, than it has + * negotiated (eg. thrashing the target, which expects 4/8MHz, with 5/10MHz + * instead). + * - the wd33c93 may ask the target for a lower transfer rate, than the target + * is capable of (eg. negotiating for an assumed minimum of 252ns instead of + * possible 200ns, which indeed shows up in tests as an approx. 10% lower + * transfer rate). + */ +static inline unsigned int +round_4(unsigned int x) +{ + switch (x & 3) { + case 1: --x; + break; + case 2: ++x; + fallthrough; + case 3: ++x; + } + return x; +} + +static void +calc_sx_table(unsigned int mhz, struct sx_period sx_table[9]) +{ + unsigned int d, i; + if (mhz < 11) + d = 2; /* divisor for 8-10 MHz input-clock */ + else if (mhz < 16) + d = 3; /* divisor for 12-15 MHz input-clock */ + else + d = 4; /* divisor for 16-20 MHz input-clock */ + + d = (100000 * d) / 2 / mhz; /* 100 x DTCC / nanosec */ + + sx_table[0].period_ns = 1; + sx_table[0].reg_value = 0x20; + for (i = 1; i < 8; i++) { + sx_table[i].period_ns = round_4((i+1)*d / 100); + sx_table[i].reg_value = (i+1)*0x10; + } + sx_table[7].reg_value = 0; + sx_table[8].period_ns = 0; + sx_table[8].reg_value = 0; +} + +/* + * check and, maybe, map an init- or "clock:"- argument. + */ +static uchar +set_clk_freq(int freq, int *mhz) +{ + int x = freq; + if (WD33C93_FS_8_10 == freq) + freq = 8; + else if (WD33C93_FS_12_15 == freq) + freq = 12; + else if (WD33C93_FS_16_20 == freq) + freq = 16; + else if (freq > 7 && freq < 11) + x = WD33C93_FS_8_10; + else if (freq > 11 && freq < 16) + x = WD33C93_FS_12_15; + else if (freq > 15 && freq < 21) + x = WD33C93_FS_16_20; + else { + /* Hmm, wouldn't it be safer to assume highest freq here? */ + x = WD33C93_FS_8_10; + freq = 8; + } + *mhz = freq; + return x; +} + +/* + * to be used with the resync: fast: ... options + */ +static inline void set_resync ( struct WD33C93_hostdata *hd, int mask ) +{ + int i; + for (i = 0; i < 8; i++) + if (mask & (1 << i)) + hd->sync_stat[i] = SS_UNSET; +} + +void +wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs, + dma_setup_t setup, dma_stop_t stop, int clock_freq) +{ + struct WD33C93_hostdata *hostdata; + int i; + int flags; + int val; + char buf[32]; + + if (!done_setup && setup_strings) + wd33c93_setup(setup_strings); + + hostdata = (struct WD33C93_hostdata *) instance->hostdata; + + hostdata->regs = regs; + hostdata->clock_freq = set_clk_freq(clock_freq, &i); + calc_sx_table(i, hostdata->sx_table); + hostdata->dma_setup = setup; + hostdata->dma_stop = stop; + hostdata->dma_bounce_buffer = NULL; + hostdata->dma_bounce_len = 0; + for (i = 0; i < 8; i++) { + hostdata->busy[i] = 0; + hostdata->sync_xfer[i] = + calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF, + 0, hostdata->sx_table); + hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */ +#ifdef PROC_STATISTICS + hostdata->cmd_cnt[i] = 0; + hostdata->disc_allowed_cnt[i] = 0; + hostdata->disc_done_cnt[i] = 0; +#endif + } + hostdata->input_Q = NULL; + hostdata->selecting = NULL; + hostdata->connected = NULL; + hostdata->disconnected_Q = NULL; + hostdata->state = S_UNCONNECTED; + hostdata->dma = D_DMA_OFF; + hostdata->level2 = L2_BASIC; + hostdata->disconnect = DIS_ADAPTIVE; + hostdata->args = DEBUG_DEFAULTS; + hostdata->incoming_ptr = 0; + hostdata->outgoing_len = 0; + hostdata->default_sx_per = DEFAULT_SX_PER; + hostdata->no_dma = 0; /* default is DMA enabled */ + +#ifdef PROC_INTERFACE + hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | + PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP; +#ifdef PROC_STATISTICS + hostdata->dma_cnt = 0; + hostdata->pio_cnt = 0; + hostdata->int_cnt = 0; +#endif +#endif + + if (check_setup_args("clock", &flags, &val, buf)) { + hostdata->clock_freq = set_clk_freq(val, &val); + calc_sx_table(val, hostdata->sx_table); + } + + if (check_setup_args("nosync", &flags, &val, buf)) + hostdata->no_sync = val; + + if (check_setup_args("nodma", &flags, &val, buf)) + hostdata->no_dma = (val == -1) ? 1 : val; + + if (check_setup_args("period", &flags, &val, buf)) + hostdata->default_sx_per = + hostdata->sx_table[round_period((unsigned int) val, + hostdata->sx_table)].period_ns; + + if (check_setup_args("disconnect", &flags, &val, buf)) { + if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS)) + hostdata->disconnect = val; + else + hostdata->disconnect = DIS_ADAPTIVE; + } + + if (check_setup_args("level2", &flags, &val, buf)) + hostdata->level2 = val; + + if (check_setup_args("debug", &flags, &val, buf)) + hostdata->args = val & DB_MASK; + + if (check_setup_args("burst", &flags, &val, buf)) + hostdata->dma_mode = val ? CTRL_BURST:CTRL_DMA; + + if (WD33C93_FS_16_20 == hostdata->clock_freq /* divisor 4 */ + && check_setup_args("fast", &flags, &val, buf)) + hostdata->fast = !!val; + + if ((i = check_setup_args("next", &flags, &val, buf))) { + while (i) + setup_used[--i] = 1; + } +#ifdef PROC_INTERFACE + if (check_setup_args("proc", &flags, &val, buf)) + hostdata->proc = val; +#endif + + spin_lock_irq(&hostdata->lock); + reset_wd33c93(instance); + spin_unlock_irq(&hostdata->lock); + + printk("wd33c93-%d: chip=%s/%d no_sync=0x%x no_dma=%d", + instance->host_no, + (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == + C_WD33C93A) ? + "WD33c93A" : (hostdata->chip == + C_WD33C93B) ? "WD33c93B" : "unknown", + hostdata->microcode, hostdata->no_sync, hostdata->no_dma); +#ifdef DEBUGGING_ON + printk(" debug_flags=0x%02x\n", hostdata->args); +#else + printk(" debugging=OFF\n"); +#endif + printk(" setup_args="); + for (i = 0; i < MAX_SETUP_ARGS; i++) + printk("%s,", setup_args[i]); + printk("\n"); + printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE); +} + +int wd33c93_write_info(struct Scsi_Host *instance, char *buf, int len) +{ +#ifdef PROC_INTERFACE + char *bp; + struct WD33C93_hostdata *hd; + int x; + + hd = (struct WD33C93_hostdata *) instance->hostdata; + +/* We accept the following + * keywords (same format as command-line, but arguments are not optional): + * debug + * disconnect + * period + * resync + * proc + * nodma + * level2 + * burst + * fast + * nosync + */ + + buf[len] = '\0'; + for (bp = buf; *bp; ) { + while (',' == *bp || ' ' == *bp) + ++bp; + if (!strncmp(bp, "debug:", 6)) { + hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; + } else if (!strncmp(bp, "disconnect:", 11)) { + x = simple_strtoul(bp+11, &bp, 0); + if (x < DIS_NEVER || x > DIS_ALWAYS) + x = DIS_ADAPTIVE; + hd->disconnect = x; + } else if (!strncmp(bp, "period:", 7)) { + x = simple_strtoul(bp+7, &bp, 0); + hd->default_sx_per = + hd->sx_table[round_period((unsigned int) x, + hd->sx_table)].period_ns; + } else if (!strncmp(bp, "resync:", 7)) { + set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); + } else if (!strncmp(bp, "proc:", 5)) { + hd->proc = simple_strtoul(bp+5, &bp, 0); + } else if (!strncmp(bp, "nodma:", 6)) { + hd->no_dma = simple_strtoul(bp+6, &bp, 0); + } else if (!strncmp(bp, "level2:", 7)) { + hd->level2 = simple_strtoul(bp+7, &bp, 0); + } else if (!strncmp(bp, "burst:", 6)) { + hd->dma_mode = + simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA; + } else if (!strncmp(bp, "fast:", 5)) { + x = !!simple_strtol(bp+5, &bp, 0); + if (x != hd->fast) + set_resync(hd, 0xff); + hd->fast = x; + } else if (!strncmp(bp, "nosync:", 7)) { + x = simple_strtoul(bp+7, &bp, 0); + set_resync(hd, x ^ hd->no_sync); + hd->no_sync = x; + } else { + break; /* unknown keyword,syntax-error,... */ + } + } + return len; +#else + return 0; +#endif +} + +int +wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) +{ +#ifdef PROC_INTERFACE + struct WD33C93_hostdata *hd; + struct scsi_cmnd *cmd; + int x; + + hd = (struct WD33C93_hostdata *) instance->hostdata; + + spin_lock_irq(&hd->lock); + if (hd->proc & PR_VERSION) + seq_printf(m, "\nVersion %s - %s.", + WD33C93_VERSION, WD33C93_DATE); + + if (hd->proc & PR_INFO) { + seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d" + " dma_mode=%02x fast=%d", + hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); + seq_puts(m, "\nsync_xfer[] = "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%02x", hd->sync_xfer[x]); + seq_puts(m, "\nsync_stat[] = "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%02x", hd->sync_stat[x]); + } +#ifdef PROC_STATISTICS + if (hd->proc & PR_STATISTICS) { + seq_puts(m, "\ncommands issued: "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->cmd_cnt[x]); + seq_puts(m, "\ndisconnects allowed:"); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); + seq_puts(m, "\ndisconnects done: "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); + seq_printf(m, + "\ninterrupts: %ld, DATA_PHASE ints: %ld DMA, %ld PIO", + hd->int_cnt, hd->dma_cnt, hd->pio_cnt); + } +#endif + if (hd->proc & PR_CONNECTED) { + seq_puts(m, "\nconnected: "); + if (hd->connected) { + cmd = (struct scsi_cmnd *) hd->connected; + seq_printf(m, " %d:%llu(%02x)", + cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + } + } + if (hd->proc & PR_INPUTQ) { + seq_puts(m, "\ninput_Q: "); + cmd = (struct scsi_cmnd *) hd->input_Q; + while (cmd) { + seq_printf(m, " %d:%llu(%02x)", + cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + cmd = (struct scsi_cmnd *) cmd->host_scribble; + } + } + if (hd->proc & PR_DISCQ) { + seq_puts(m, "\ndisconnected_Q:"); + cmd = (struct scsi_cmnd *) hd->disconnected_Q; + while (cmd) { + seq_printf(m, " %d:%llu(%02x)", + cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + cmd = (struct scsi_cmnd *) cmd->host_scribble; + } + } + seq_putc(m, '\n'); + spin_unlock_irq(&hd->lock); +#endif /* PROC_INTERFACE */ + return 0; +} + +EXPORT_SYMBOL(wd33c93_host_reset); +EXPORT_SYMBOL(wd33c93_init); +EXPORT_SYMBOL(wd33c93_abort); +EXPORT_SYMBOL(wd33c93_queuecommand); +EXPORT_SYMBOL(wd33c93_intr); +EXPORT_SYMBOL(wd33c93_show_info); +EXPORT_SYMBOL(wd33c93_write_info); diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h new file mode 100644 index 000000000..e5e4254b1 --- /dev/null +++ b/drivers/scsi/wd33c93.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * wd33c93.h - Linux device driver definitions for the + * Commodore Amiga A2091/590 SCSI controller card + * + * IMPORTANT: This file is for version 1.25 - 09/Jul/1997 + * + * Copyright (c) 1996 John Shifflett, GeoLog Consulting + * john@geolog.com + * jshiffle@netcom.com + */ +#ifndef WD33C93_H +#define WD33C93_H + + +#define PROC_INTERFACE /* add code for /proc/scsi/wd33c93/xxx interface */ +#ifdef PROC_INTERFACE +#define PROC_STATISTICS /* add code for keeping various real time stats */ +#endif + +#define SYNC_DEBUG /* extra info on sync negotiation printed */ +#define DEBUGGING_ON /* enable command-line debugging bitmask */ +#define DEBUG_DEFAULTS 0 /* default debugging bitmask */ + + +#ifdef DEBUGGING_ON +#define DB(f,a) if (hostdata->args & (f)) a; +#else +#define DB(f,a) +#endif + +#define uchar unsigned char + + +/* wd register names */ +#define WD_OWN_ID 0x00 +#define WD_CONTROL 0x01 +#define WD_TIMEOUT_PERIOD 0x02 +#define WD_CDB_1 0x03 +#define WD_CDB_2 0x04 +#define WD_CDB_3 0x05 +#define WD_CDB_4 0x06 +#define WD_CDB_5 0x07 +#define WD_CDB_6 0x08 +#define WD_CDB_7 0x09 +#define WD_CDB_8 0x0a +#define WD_CDB_9 0x0b +#define WD_CDB_10 0x0c +#define WD_CDB_11 0x0d +#define WD_CDB_12 0x0e +#define WD_TARGET_LUN 0x0f +#define WD_COMMAND_PHASE 0x10 +#define WD_SYNCHRONOUS_TRANSFER 0x11 +#define WD_TRANSFER_COUNT_MSB 0x12 +#define WD_TRANSFER_COUNT 0x13 +#define WD_TRANSFER_COUNT_LSB 0x14 +#define WD_DESTINATION_ID 0x15 +#define WD_SOURCE_ID 0x16 +#define WD_SCSI_STATUS 0x17 +#define WD_COMMAND 0x18 +#define WD_DATA 0x19 +#define WD_QUEUE_TAG 0x1a +#define WD_AUXILIARY_STATUS 0x1f + +/* WD commands */ +#define WD_CMD_RESET 0x00 +#define WD_CMD_ABORT 0x01 +#define WD_CMD_ASSERT_ATN 0x02 +#define WD_CMD_NEGATE_ACK 0x03 +#define WD_CMD_DISCONNECT 0x04 +#define WD_CMD_RESELECT 0x05 +#define WD_CMD_SEL_ATN 0x06 +#define WD_CMD_SEL 0x07 +#define WD_CMD_SEL_ATN_XFER 0x08 +#define WD_CMD_SEL_XFER 0x09 +#define WD_CMD_RESEL_RECEIVE 0x0a +#define WD_CMD_RESEL_SEND 0x0b +#define WD_CMD_WAIT_SEL_RECEIVE 0x0c +#define WD_CMD_TRANS_ADDR 0x18 +#define WD_CMD_TRANS_INFO 0x20 +#define WD_CMD_TRANSFER_PAD 0x21 +#define WD_CMD_SBT_MODE 0x80 + +/* ASR register */ +#define ASR_INT (0x80) +#define ASR_LCI (0x40) +#define ASR_BSY (0x20) +#define ASR_CIP (0x10) +#define ASR_PE (0x02) +#define ASR_DBR (0x01) + +/* SCSI Bus Phases */ +#define PHS_DATA_OUT 0x00 +#define PHS_DATA_IN 0x01 +#define PHS_COMMAND 0x02 +#define PHS_STATUS 0x03 +#define PHS_MESS_OUT 0x06 +#define PHS_MESS_IN 0x07 + +/* Command Status Register definitions */ + + /* reset state interrupts */ +#define CSR_RESET 0x00 +#define CSR_RESET_AF 0x01 + + /* successful completion interrupts */ +#define CSR_RESELECT 0x10 +#define CSR_SELECT 0x11 +#define CSR_SEL_XFER_DONE 0x16 +#define CSR_XFER_DONE 0x18 + + /* paused or aborted interrupts */ +#define CSR_MSGIN 0x20 +#define CSR_SDP 0x21 +#define CSR_SEL_ABORT 0x22 +#define CSR_RESEL_ABORT 0x25 +#define CSR_RESEL_ABORT_AM 0x27 +#define CSR_ABORT 0x28 + + /* terminated interrupts */ +#define CSR_INVALID 0x40 +#define CSR_UNEXP_DISC 0x41 +#define CSR_TIMEOUT 0x42 +#define CSR_PARITY 0x43 +#define CSR_PARITY_ATN 0x44 +#define CSR_BAD_STATUS 0x45 +#define CSR_UNEXP 0x48 + + /* service required interrupts */ +#define CSR_RESEL 0x80 +#define CSR_RESEL_AM 0x81 +#define CSR_DISC 0x85 +#define CSR_SRV_REQ 0x88 + + /* Own ID/CDB Size register */ +#define OWNID_EAF 0x08 +#define OWNID_EHP 0x10 +#define OWNID_RAF 0x20 +#define OWNID_FS_8 0x00 +#define OWNID_FS_12 0x40 +#define OWNID_FS_16 0x80 + + /* define these so we don't have to change a2091.c, etc. */ +#define WD33C93_FS_8_10 OWNID_FS_8 +#define WD33C93_FS_12_15 OWNID_FS_12 +#define WD33C93_FS_16_20 OWNID_FS_16 + + /* pass input-clock explicitly. accepted mhz values are 8-10,12-20 */ +#define WD33C93_FS_MHZ(mhz) (mhz) + + /* Control register */ +#define CTRL_HSP 0x01 +#define CTRL_HA 0x02 +#define CTRL_IDI 0x04 +#define CTRL_EDI 0x08 +#define CTRL_HHP 0x10 +#define CTRL_POLLED 0x00 +#define CTRL_BURST 0x20 +#define CTRL_BUS 0x40 +#define CTRL_DMA 0x80 + + /* Timeout Period register */ +#define TIMEOUT_PERIOD_VALUE 20 /* 20 = 200 ms */ + + /* Synchronous Transfer Register */ +#define STR_FSS 0x80 + + /* Destination ID register */ +#define DSTID_DPD 0x40 +#define DATA_OUT_DIR 0 +#define DATA_IN_DIR 1 +#define DSTID_SCC 0x80 + + /* Source ID register */ +#define SRCID_MASK 0x07 +#define SRCID_SIV 0x08 +#define SRCID_DSP 0x20 +#define SRCID_ES 0x40 +#define SRCID_ER 0x80 + + /* This is what the 3393 chip looks like to us */ +typedef struct { + volatile unsigned char *SASR; + volatile unsigned char *SCMD; +} wd33c93_regs; + + +typedef int (*dma_setup_t) (struct scsi_cmnd *SCpnt, int dir_in); +typedef void (*dma_stop_t) (struct Scsi_Host *instance, + struct scsi_cmnd *SCpnt, int status); + + +#define ILLEGAL_STATUS_BYTE 0xff + +#define DEFAULT_SX_PER 376 /* (ns) fairly safe */ +#define DEFAULT_SX_OFF 0 /* aka async */ + +#define OPTIMUM_SX_PER 252 /* (ns) best we can do (mult-of-4) */ +#define OPTIMUM_SX_OFF 12 /* size of wd3393 fifo */ + +struct sx_period { + unsigned int period_ns; + uchar reg_value; + }; + +/* FEF: defines for hostdata->dma_buffer_pool */ + +#define BUF_CHIP_ALLOCED 0 +#define BUF_SCSI_ALLOCED 1 + +struct WD33C93_hostdata { + struct Scsi_Host *next; + wd33c93_regs regs; + spinlock_t lock; + uchar clock_freq; + uchar chip; /* what kind of wd33c93? */ + uchar microcode; /* microcode rev */ + uchar dma_buffer_pool; /* FEF: buffer from chip_ram? */ + int dma_dir; /* data transfer dir. */ + dma_setup_t dma_setup; + dma_stop_t dma_stop; + unsigned int dma_xfer_mask; + uchar *dma_bounce_buffer; + unsigned int dma_bounce_len; + volatile uchar busy[8]; /* index = target, bit = lun */ + volatile struct scsi_cmnd *input_Q; /* commands waiting to be started */ + volatile struct scsi_cmnd *selecting; /* trying to select this command */ + volatile struct scsi_cmnd *connected; /* currently connected command */ + volatile struct scsi_cmnd *disconnected_Q;/* commands waiting for reconnect */ + uchar state; /* what we are currently doing */ + uchar dma; /* current state of DMA (on/off) */ + uchar level2; /* extent to which Level-2 commands are used */ + uchar disconnect; /* disconnect/reselect policy */ + unsigned int args; /* set from command-line argument */ + uchar incoming_msg[8]; /* filled during message_in phase */ + int incoming_ptr; /* mainly used with EXTENDED messages */ + uchar outgoing_msg[8]; /* send this during next message_out */ + int outgoing_len; /* length of outgoing message */ + unsigned int default_sx_per; /* default transfer period for SCSI bus */ + uchar sync_xfer[8]; /* sync_xfer reg settings per target */ + uchar sync_stat[8]; /* status of sync negotiation per target */ + uchar no_sync; /* bitmask: don't do sync on these targets */ + uchar no_dma; /* set this flag to disable DMA */ + uchar dma_mode; /* DMA Burst Mode or Single Byte DMA */ + uchar fast; /* set this flag to enable Fast SCSI */ + struct sx_period sx_table[9]; /* transfer periods for actual DTC-setting */ +#ifdef PROC_INTERFACE + uchar proc; /* bitmask: what's in proc output */ +#ifdef PROC_STATISTICS + unsigned long cmd_cnt[8]; /* # of commands issued per target */ + unsigned long int_cnt; /* # of interrupts serviced */ + unsigned long pio_cnt; /* # of pio data transfers */ + unsigned long dma_cnt; /* # of DMA data transfers */ + unsigned long disc_allowed_cnt[8]; /* # of disconnects allowed per target */ + unsigned long disc_done_cnt[8]; /* # of disconnects done per target*/ +#endif +#endif + }; + +static inline struct scsi_pointer *WD33C93_scsi_pointer(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +/* defines for hostdata->chip */ + +#define C_WD33C93 0 +#define C_WD33C93A 1 +#define C_WD33C93B 2 +#define C_UNKNOWN_CHIP 100 + +/* defines for hostdata->state */ + +#define S_UNCONNECTED 0 +#define S_SELECTING 1 +#define S_RUNNING_LEVEL2 2 +#define S_CONNECTED 3 +#define S_PRE_TMP_DISC 4 +#define S_PRE_CMP_DISC 5 + +/* defines for hostdata->dma */ + +#define D_DMA_OFF 0 +#define D_DMA_RUNNING 1 + +/* defines for hostdata->level2 */ +/* NOTE: only the first 3 are implemented so far */ + +#define L2_NONE 1 /* no combination commands - we get lots of ints */ +#define L2_SELECT 2 /* start with SEL_ATN_XFER, but never resume it */ +#define L2_BASIC 3 /* resume after STATUS ints & RDP messages */ +#define L2_DATA 4 /* resume after DATA_IN/OUT ints */ +#define L2_MOST 5 /* resume after anything except a RESELECT int */ +#define L2_RESELECT 6 /* resume after everything, including RESELECT ints */ +#define L2_ALL 7 /* always resume */ + +/* defines for hostdata->disconnect */ + +#define DIS_NEVER 0 +#define DIS_ADAPTIVE 1 +#define DIS_ALWAYS 2 + +/* defines for hostdata->args */ + +#define DB_TEST1 1<<0 +#define DB_TEST2 1<<1 +#define DB_QUEUE_COMMAND 1<<2 +#define DB_EXECUTE 1<<3 +#define DB_INTR 1<<4 +#define DB_TRANSFER 1<<5 +#define DB_MASK 0x3f + +/* defines for hostdata->sync_stat[] */ + +#define SS_UNSET 0 +#define SS_FIRST 1 +#define SS_WAITING 2 +#define SS_SET 3 + +/* defines for hostdata->proc */ + +#define PR_VERSION 1<<0 +#define PR_INFO 1<<1 +#define PR_STATISTICS 1<<2 +#define PR_CONNECTED 1<<3 +#define PR_INPUTQ 1<<4 +#define PR_DISCQ 1<<5 +#define PR_TEST 1<<6 +#define PR_STOP 1<<7 + + +void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, + dma_setup_t setup, dma_stop_t stop, int clock_freq); +int wd33c93_abort (struct scsi_cmnd *cmd); +int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd); +void wd33c93_intr (struct Scsi_Host *instance); +int wd33c93_show_info(struct seq_file *, struct Scsi_Host *); +int wd33c93_write_info(struct Scsi_Host *, char *, int); +int wd33c93_host_reset (struct scsi_cmnd *); + +#endif /* WD33C93_H */ diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c new file mode 100644 index 000000000..5a380eecf --- /dev/null +++ b/drivers/scsi/wd719x.c @@ -0,0 +1,995 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards + * Copyright 2013 Ondrej Zary + * + * Original driver by + * Aaron Dewell + * Gaerti + * + * HW documentation available in book: + * + * SPIDER Command Protocol + * by Chandru M. Sippy + * SCSI Storage Products (MCP) + * Western Digital Corporation + * 09-15-95 + * + * http://web.archive.org/web/20070717175254/http://sun1.rrzn.uni-hannover.de/gaertner.juergen/wd719x/Linux/Docu/Spider/ + */ + +/* + * Driver workflow: + * 1. SCSI command is transformed to SCB (Spider Control Block) by the + * queuecommand function. + * 2. The address of the SCB is stored in a list to be able to access it, if + * something goes wrong. + * 3. The address of the SCB is written to the Controller, which loads the SCB + * via BM-DMA and processes it. + * 4. After it has finished, it generates an interrupt, and sets registers. + * + * flaws: + * - abort/reset functions + * + * ToDo: + * - tagged queueing + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wd719x.h" + +/* low-level register access */ +static inline u8 wd719x_readb(struct wd719x *wd, u8 reg) +{ + return ioread8(wd->base + reg); +} + +static inline u32 wd719x_readl(struct wd719x *wd, u8 reg) +{ + return ioread32(wd->base + reg); +} + +static inline void wd719x_writeb(struct wd719x *wd, u8 reg, u8 val) +{ + iowrite8(val, wd->base + reg); +} + +static inline void wd719x_writew(struct wd719x *wd, u8 reg, u16 val) +{ + iowrite16(val, wd->base + reg); +} + +static inline void wd719x_writel(struct wd719x *wd, u8 reg, u32 val) +{ + iowrite32(val, wd->base + reg); +} + +/* wait until the command register is ready */ +static inline int wd719x_wait_ready(struct wd719x *wd) +{ + int i = 0; + + do { + if (wd719x_readb(wd, WD719X_AMR_COMMAND) == WD719X_CMD_READY) + return 0; + udelay(1); + } while (i++ < WD719X_WAIT_FOR_CMD_READY); + + dev_err(&wd->pdev->dev, "command register is not ready: 0x%02x\n", + wd719x_readb(wd, WD719X_AMR_COMMAND)); + + return -ETIMEDOUT; +} + +/* poll interrupt status register until command finishes */ +static inline int wd719x_wait_done(struct wd719x *wd, int timeout) +{ + u8 status; + + while (timeout > 0) { + status = wd719x_readb(wd, WD719X_AMR_INT_STATUS); + if (status) + break; + timeout--; + udelay(1); + } + + if (timeout <= 0) { + dev_err(&wd->pdev->dev, "direct command timed out\n"); + return -ETIMEDOUT; + } + + if (status != WD719X_INT_NOERRORS) { + u8 sue = wd719x_readb(wd, WD719X_AMR_SCB_ERROR); + /* we get this after wd719x_dev_reset, it's not an error */ + if (sue == WD719X_SUE_TERM) + return 0; + /* we get this after wd719x_bus_reset, it's not an error */ + if (sue == WD719X_SUE_RESET) + return 0; + dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n", + status, sue); + return -EIO; + } + + return 0; +} + +static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun, + u8 tag, dma_addr_t data, int timeout) +{ + int ret = 0; + + /* clear interrupt status register (allow command register to clear) */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + + /* Wait for the Command register to become free */ + if (wd719x_wait_ready(wd)) + return -ETIMEDOUT; + + /* disable interrupts except for RESET/ABORT (it breaks them) */ + if (opcode != WD719X_CMD_BUSRESET && opcode != WD719X_CMD_ABORT && + opcode != WD719X_CMD_ABORT_TAG && opcode != WD719X_CMD_RESET) + dev |= WD719X_DISABLE_INT; + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev); + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun); + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag); + if (data) + wd719x_writel(wd, WD719X_AMR_SCB_IN, data); + + /* clear interrupt status register again */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + + /* Now, write the command */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, opcode); + + if (timeout) /* wait for the command to complete */ + ret = wd719x_wait_done(wd, timeout); + + /* clear interrupt status register (clean up) */ + if (opcode != WD719X_CMD_READ_FIRMVER) + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + + return ret; +} + +static void wd719x_destroy(struct wd719x *wd) +{ + /* stop the RISC */ + if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0, + WD719X_WAIT_FOR_RISC)) + dev_warn(&wd->pdev->dev, "RISC sleep command failed\n"); + /* disable RISC */ + wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); + + WARN_ON_ONCE(!list_empty(&wd->active_scbs)); + + /* free internal buffers */ + dma_free_coherent(&wd->pdev->dev, wd->fw_size, wd->fw_virt, + wd->fw_phys); + wd->fw_virt = NULL; + dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, + wd->hash_phys); + wd->hash_virt = NULL; + dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param), + wd->params, wd->params_phys); + wd->params = NULL; + free_irq(wd->pdev->irq, wd); +} + +/* finish a SCSI command, unmap buffers */ +static void wd719x_finish_cmd(struct wd719x_scb *scb, int result) +{ + struct scsi_cmnd *cmd = scb->cmd; + struct wd719x *wd = shost_priv(cmd->device->host); + + list_del(&scb->list); + + dma_unmap_single(&wd->pdev->dev, scb->phys, + sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL); + scsi_dma_unmap(cmd); + dma_unmap_single(&wd->pdev->dev, scb->dma_handle, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + + cmd->result = result << 16; + scsi_done(cmd); +} + +/* Build a SCB and send it to the card */ +static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) +{ + int i, count_sg; + unsigned long flags; + struct wd719x_scb *scb = scsi_cmd_priv(cmd); + struct wd719x *wd = shost_priv(sh); + + scb->cmd = cmd; + + scb->CDB_tag = 0; /* Tagged queueing not supported yet */ + scb->devid = cmd->device->id; + scb->lun = cmd->device->lun; + + /* copy the command */ + memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len); + + /* map SCB */ + scb->phys = dma_map_single(&wd->pdev->dev, scb, sizeof(*scb), + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&wd->pdev->dev, scb->phys)) + goto out_error; + + /* map sense buffer */ + scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE; + scb->dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&wd->pdev->dev, scb->dma_handle)) + goto out_unmap_scb; + scb->sense_buf = cpu_to_le32(scb->dma_handle); + + /* request autosense */ + scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE; + + /* check direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) + scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION + | WD719X_SCB_FLAGS_PCI_TO_SCSI; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION; + + /* Scather/gather */ + count_sg = scsi_dma_map(cmd); + if (count_sg < 0) + goto out_unmap_sense; + BUG_ON(count_sg > WD719X_SG); + + if (count_sg) { + struct scatterlist *sg; + + scb->data_length = cpu_to_le32(count_sg * + sizeof(struct wd719x_sglist)); + scb->data_p = cpu_to_le32(scb->phys + + offsetof(struct wd719x_scb, sg_list)); + + scsi_for_each_sg(cmd, sg, count_sg, i) { + scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg)); + scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); + } + scb->SCB_options |= WD719X_SCB_FLAGS_DO_SCATTER_GATHER; + } else { /* zero length */ + scb->data_length = 0; + scb->data_p = 0; + } + + spin_lock_irqsave(wd->sh->host_lock, flags); + + /* check if the Command register is free */ + if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) { + spin_unlock_irqrestore(wd->sh->host_lock, flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + list_add(&scb->list, &wd->active_scbs); + + /* write pointer to the AMR */ + wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys); + /* send SCB opcode */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB); + + spin_unlock_irqrestore(wd->sh->host_lock, flags); + return 0; + +out_unmap_sense: + dma_unmap_single(&wd->pdev->dev, scb->dma_handle, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); +out_unmap_scb: + dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb), + DMA_BIDIRECTIONAL); +out_error: + cmd->result = DID_ERROR << 16; + scsi_done(cmd); + return 0; +} + +static int wd719x_chip_init(struct wd719x *wd) +{ + int i, ret; + u32 risc_init[3]; + const struct firmware *fw_wcs, *fw_risc; + const char fwname_wcs[] = "wd719x-wcs.bin"; + const char fwname_risc[] = "wd719x-risc.bin"; + + memset(wd->hash_virt, 0, WD719X_HASH_TABLE_SIZE); + + /* WCS (sequencer) firmware */ + ret = request_firmware(&fw_wcs, fwname_wcs, &wd->pdev->dev); + if (ret) { + dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n", + fwname_wcs, ret); + return ret; + } + /* RISC firmware */ + ret = request_firmware(&fw_risc, fwname_risc, &wd->pdev->dev); + if (ret) { + dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n", + fwname_risc, ret); + release_firmware(fw_wcs); + return ret; + } + wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size; + + if (!wd->fw_virt) + wd->fw_virt = dma_alloc_coherent(&wd->pdev->dev, wd->fw_size, + &wd->fw_phys, GFP_KERNEL); + if (!wd->fw_virt) { + ret = -ENOMEM; + goto wd719x_init_end; + } + + /* make a fresh copy of WCS and RISC code */ + memcpy(wd->fw_virt, fw_wcs->data, fw_wcs->size); + memcpy(wd->fw_virt + ALIGN(fw_wcs->size, 4), fw_risc->data, + fw_risc->size); + + /* Reset the Spider Chip and adapter itself */ + wd719x_writeb(wd, WD719X_PCI_PORT_RESET, WD719X_PCI_RESET); + udelay(WD719X_WAIT_FOR_RISC); + /* Clear PIO mode bits set by BIOS */ + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, 0); + /* ensure RISC is not running */ + wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); + /* ensure command port is ready */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, 0); + if (wd719x_wait_ready(wd)) { + ret = -ETIMEDOUT; + goto wd719x_init_end; + } + + /* Transfer the first 2K words of RISC code to kick start the uP */ + risc_init[0] = wd->fw_phys; /* WCS FW */ + risc_init[1] = wd->fw_phys + ALIGN(fw_wcs->size, 4); /* RISC FW */ + risc_init[2] = wd->hash_phys; /* hash table */ + + /* clear DMA status */ + wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3STATUS, 0); + + /* address to read firmware from */ + wd719x_writel(wd, WD719X_PCI_EXTERNAL_ADDR, risc_init[1]); + /* base address to write firmware to (on card) */ + wd719x_writew(wd, WD719X_PCI_INTERNAL_ADDR, WD719X_PRAM_BASE_ADDR); + /* size: first 2K words */ + wd719x_writew(wd, WD719X_PCI_DMA_TRANSFER_SIZE, 2048 * 2); + /* start DMA */ + wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3CMD, WD719X_START_CHANNEL2_3DMA); + + /* wait for DMA to complete */ + i = WD719X_WAIT_FOR_RISC; + while (i-- > 0) { + u8 status = wd719x_readb(wd, WD719X_PCI_CHANNEL2_3STATUS); + if (status == WD719X_START_CHANNEL2_3DONE) + break; + if (status == WD719X_START_CHANNEL2_3ABORT) { + dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA aborted\n"); + ret = -EIO; + goto wd719x_init_end; + } + udelay(1); + } + if (i < 1) { + dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA timeout\n"); + ret = -ETIMEDOUT; + goto wd719x_init_end; + } + + /* firmware is loaded, now initialize and wake up the RISC */ + /* write RISC initialization long words to Spider */ + wd719x_writel(wd, WD719X_AMR_SCB_IN, risc_init[0]); + wd719x_writel(wd, WD719X_AMR_SCB_IN + 4, risc_init[1]); + wd719x_writel(wd, WD719X_AMR_SCB_IN + 8, risc_init[2]); + + /* disable interrupts during initialization of RISC */ + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, WD719X_DISABLE_INT); + + /* issue INITIALIZE RISC comand */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_INIT_RISC); + /* enable advanced mode (wake up RISC) */ + wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, WD719X_ENABLE_ADVANCE_MODE); + udelay(WD719X_WAIT_FOR_RISC); + + ret = wd719x_wait_done(wd, WD719X_WAIT_FOR_RISC); + /* clear interrupt status register */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + if (ret) { + dev_warn(&wd->pdev->dev, "Unable to initialize RISC\n"); + goto wd719x_init_end; + } + /* RISC is up and running */ + + /* Read FW version from RISC */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_READ_FIRMVER, 0, 0, 0, 0, + WD719X_WAIT_FOR_RISC); + if (ret) { + dev_warn(&wd->pdev->dev, "Unable to read firmware version\n"); + goto wd719x_init_end; + } + dev_info(&wd->pdev->dev, "RISC initialized with firmware version %.2x.%.2x\n", + wd719x_readb(wd, WD719X_AMR_SCB_OUT + 1), + wd719x_readb(wd, WD719X_AMR_SCB_OUT)); + + /* RESET SCSI bus */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_BUSRESET, 0, 0, 0, 0, + WD719X_WAIT_FOR_SCSI_RESET); + if (ret) { + dev_warn(&wd->pdev->dev, "SCSI bus reset failed\n"); + goto wd719x_init_end; + } + + /* use HostParameter structure to set Spider's Host Parameter Block */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_SET_PARAM, 0, + sizeof(struct wd719x_host_param), 0, + wd->params_phys, WD719X_WAIT_FOR_RISC); + if (ret) { + dev_warn(&wd->pdev->dev, "Failed to set HOST PARAMETERS\n"); + goto wd719x_init_end; + } + + /* initiate SCAM (does nothing if disabled in BIOS) */ + /* bug?: we should pass a mask of static IDs which we don't have */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_INIT_SCAM, 0, 0, 0, 0, + WD719X_WAIT_FOR_SCSI_RESET); + if (ret) { + dev_warn(&wd->pdev->dev, "SCAM initialization failed\n"); + goto wd719x_init_end; + } + + /* clear AMR_BIOS_SHARE_INT register */ + wd719x_writeb(wd, WD719X_AMR_BIOS_SHARE_INT, 0); + +wd719x_init_end: + release_firmware(fw_wcs); + release_firmware(fw_risc); + + return ret; +} + +static int wd719x_abort(struct scsi_cmnd *cmd) +{ + int action, result; + unsigned long flags; + struct wd719x_scb *scb = scsi_cmd_priv(cmd); + struct wd719x *wd = shost_priv(cmd->device->host); + struct device *dev = &wd->pdev->dev; + + dev_info(dev, "abort command, tag: %x\n", scsi_cmd_to_rq(cmd)->tag); + + action = WD719X_CMD_ABORT; + + spin_lock_irqsave(wd->sh->host_lock, flags); + result = wd719x_direct_cmd(wd, action, cmd->device->id, + cmd->device->lun, scsi_cmd_to_rq(cmd)->tag, + scb->phys, 0); + wd719x_finish_cmd(scb, DID_ABORT); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + if (result) + return FAILED; + + return SUCCESS; +} + +static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device) +{ + int result; + unsigned long flags; + struct wd719x *wd = shost_priv(cmd->device->host); + struct wd719x_scb *scb, *tmp; + + dev_info(&wd->pdev->dev, "%s reset requested\n", + (opcode == WD719X_CMD_BUSRESET) ? "bus" : "device"); + + spin_lock_irqsave(wd->sh->host_lock, flags); + result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0, + WD719X_WAIT_FOR_SCSI_RESET); + /* flush all SCBs (or all for a device if dev_reset) */ + list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) { + if (opcode == WD719X_CMD_BUSRESET || + scb->cmd->device->id == device) + wd719x_finish_cmd(scb, DID_RESET); + } + spin_unlock_irqrestore(wd->sh->host_lock, flags); + if (result) + return FAILED; + + return SUCCESS; +} + +static int wd719x_dev_reset(struct scsi_cmnd *cmd) +{ + return wd719x_reset(cmd, WD719X_CMD_RESET, cmd->device->id); +} + +static int wd719x_bus_reset(struct scsi_cmnd *cmd) +{ + return wd719x_reset(cmd, WD719X_CMD_BUSRESET, 0); +} + +static int wd719x_host_reset(struct scsi_cmnd *cmd) +{ + struct wd719x *wd = shost_priv(cmd->device->host); + struct wd719x_scb *scb, *tmp; + unsigned long flags; + + dev_info(&wd->pdev->dev, "host reset requested\n"); + spin_lock_irqsave(wd->sh->host_lock, flags); + /* stop the RISC */ + if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0, + WD719X_WAIT_FOR_RISC)) + dev_warn(&wd->pdev->dev, "RISC sleep command failed\n"); + /* disable RISC */ + wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); + + /* flush all SCBs */ + list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) + wd719x_finish_cmd(scb, DID_RESET); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + + /* Try to reinit the RISC */ + return wd719x_chip_init(wd) == 0 ? SUCCESS : FAILED; +} + +static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + if (capacity >= 0x200000) { + geom[0] = 255; /* heads */ + geom[1] = 63; /* sectors */ + } else { + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ + } + geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ + + return 0; +} + +/* process a SCB-completion interrupt */ +static inline void wd719x_interrupt_SCB(struct wd719x *wd, + union wd719x_regs regs, + struct wd719x_scb *scb) +{ + int result; + + /* now have to find result from card */ + switch (regs.bytes.SUE) { + case WD719X_SUE_NOERRORS: + result = DID_OK; + break; + case WD719X_SUE_REJECTED: + dev_err(&wd->pdev->dev, "command rejected\n"); + result = DID_ERROR; + break; + case WD719X_SUE_SCBQFULL: + dev_err(&wd->pdev->dev, "SCB queue is full\n"); + result = DID_ERROR; + break; + case WD719X_SUE_TERM: + dev_dbg(&wd->pdev->dev, "SCB terminated by direct command\n"); + result = DID_ABORT; /* or DID_RESET? */ + break; + case WD719X_SUE_CHAN1ABORT: + case WD719X_SUE_CHAN23ABORT: + result = DID_ABORT; + dev_err(&wd->pdev->dev, "DMA abort\n"); + break; + case WD719X_SUE_CHAN1PAR: + case WD719X_SUE_CHAN23PAR: + result = DID_PARITY; + dev_err(&wd->pdev->dev, "DMA parity error\n"); + break; + case WD719X_SUE_TIMEOUT: + result = DID_TIME_OUT; + dev_dbg(&wd->pdev->dev, "selection timeout\n"); + break; + case WD719X_SUE_RESET: + dev_dbg(&wd->pdev->dev, "bus reset occurred\n"); + result = DID_RESET; + break; + case WD719X_SUE_BUSERROR: + dev_dbg(&wd->pdev->dev, "SCSI bus error\n"); + result = DID_ERROR; + break; + case WD719X_SUE_WRONGWAY: + dev_err(&wd->pdev->dev, "wrong data transfer direction\n"); + result = DID_ERROR; + break; + case WD719X_SUE_BADPHASE: + dev_err(&wd->pdev->dev, "invalid SCSI phase\n"); + result = DID_ERROR; + break; + case WD719X_SUE_TOOLONG: + dev_err(&wd->pdev->dev, "record too long\n"); + result = DID_ERROR; + break; + case WD719X_SUE_BUSFREE: + dev_err(&wd->pdev->dev, "unexpected bus free\n"); + result = DID_NO_CONNECT; /* or DID_ERROR ???*/ + break; + case WD719X_SUE_ARSDONE: + dev_dbg(&wd->pdev->dev, "auto request sense\n"); + if (regs.bytes.SCSI == 0) + result = DID_OK; + else + result = DID_PARITY; + break; + case WD719X_SUE_IGNORED: + dev_err(&wd->pdev->dev, "target id %d ignored command\n", + scb->cmd->device->id); + result = DID_NO_CONNECT; + break; + case WD719X_SUE_WRONGTAGS: + dev_err(&wd->pdev->dev, "reversed tags\n"); + result = DID_ERROR; + break; + case WD719X_SUE_BADTAGS: + dev_err(&wd->pdev->dev, "tag type not supported by target\n"); + result = DID_ERROR; + break; + case WD719X_SUE_NOSCAMID: + dev_err(&wd->pdev->dev, "no SCAM soft ID available\n"); + result = DID_ERROR; + break; + default: + dev_warn(&wd->pdev->dev, "unknown SUE error code: 0x%x\n", + regs.bytes.SUE); + result = DID_ERROR; + break; + } + + wd719x_finish_cmd(scb, result); +} + +static irqreturn_t wd719x_interrupt(int irq, void *dev_id) +{ + struct wd719x *wd = dev_id; + union wd719x_regs regs; + unsigned long flags; + u32 SCB_out; + + spin_lock_irqsave(wd->sh->host_lock, flags); + /* read SCB pointer back from card */ + SCB_out = wd719x_readl(wd, WD719X_AMR_SCB_OUT); + /* read all status info at once */ + regs.all = cpu_to_le32(wd719x_readl(wd, WD719X_AMR_OP_CODE)); + + switch (regs.bytes.INT) { + case WD719X_INT_NONE: + spin_unlock_irqrestore(wd->sh->host_lock, flags); + return IRQ_NONE; + case WD719X_INT_LINKNOSTATUS: + dev_err(&wd->pdev->dev, "linked command completed with no status\n"); + break; + case WD719X_INT_BADINT: + dev_err(&wd->pdev->dev, "unsolicited interrupt\n"); + break; + case WD719X_INT_NOERRORS: + case WD719X_INT_LINKNOERRORS: + case WD719X_INT_ERRORSLOGGED: + case WD719X_INT_SPIDERFAILED: + /* was the cmd completed a direct or SCB command? */ + if (regs.bytes.OPC == WD719X_CMD_PROCESS_SCB) { + struct wd719x_scb *scb; + list_for_each_entry(scb, &wd->active_scbs, list) + if (SCB_out == scb->phys) + break; + if (SCB_out == scb->phys) + wd719x_interrupt_SCB(wd, regs, scb); + else + dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n"); + } else + dev_dbg(&wd->pdev->dev, "direct command 0x%x completed\n", + regs.bytes.OPC); + break; + case WD719X_INT_PIOREADY: + dev_err(&wd->pdev->dev, "card indicates PIO data ready but we never use PIO\n"); + /* interrupt will not be cleared until all data is read */ + break; + default: + dev_err(&wd->pdev->dev, "unknown interrupt reason: %d\n", + regs.bytes.INT); + + } + /* clear interrupt so another can happen */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + + return IRQ_HANDLED; +} + +static void wd719x_eeprom_reg_read(struct eeprom_93cx6 *eeprom) +{ + struct wd719x *wd = eeprom->data; + u8 reg = wd719x_readb(wd, WD719X_PCI_GPIO_DATA); + + eeprom->reg_data_out = reg & WD719X_EE_DO; +} + +static void wd719x_eeprom_reg_write(struct eeprom_93cx6 *eeprom) +{ + struct wd719x *wd = eeprom->data; + u8 reg = 0; + + if (eeprom->reg_data_in) + reg |= WD719X_EE_DI; + if (eeprom->reg_data_clock) + reg |= WD719X_EE_CLK; + if (eeprom->reg_chip_select) + reg |= WD719X_EE_CS; + + wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, reg); +} + +/* read config from EEPROM so it can be downloaded by the RISC on (re-)init */ +static void wd719x_read_eeprom(struct wd719x *wd) +{ + struct eeprom_93cx6 eeprom; + u8 gpio; + struct wd719x_eeprom_header header; + + eeprom.data = wd; + eeprom.register_read = wd719x_eeprom_reg_read; + eeprom.register_write = wd719x_eeprom_reg_write; + eeprom.width = PCI_EEPROM_WIDTH_93C46; + + /* set all outputs to low */ + wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, 0); + /* configure GPIO pins */ + gpio = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL); + /* GPIO outputs */ + gpio &= (~(WD719X_EE_CLK | WD719X_EE_DI | WD719X_EE_CS)); + /* GPIO input */ + gpio |= WD719X_EE_DO; + wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, gpio); + + /* read EEPROM header */ + eeprom_93cx6_multireadb(&eeprom, 0, (u8 *)&header, sizeof(header)); + + if (header.sig1 == 'W' && header.sig2 == 'D') + eeprom_93cx6_multireadb(&eeprom, header.cfg_offset, + (u8 *)wd->params, + sizeof(struct wd719x_host_param)); + else { /* default EEPROM values */ + dev_warn(&wd->pdev->dev, "EEPROM signature is invalid (0x%02x 0x%02x), using default values\n", + header.sig1, header.sig2); + wd->params->ch_1_th = 0x10; /* 16 DWs = 64 B */ + wd->params->scsi_conf = 0x4c; /* 48ma, spue, parity check */ + wd->params->own_scsi_id = 0x07; /* ID 7, SCAM disabled */ + wd->params->sel_timeout = 0x4d; /* 250 ms */ + wd->params->sleep_timer = 0x01; + wd->params->cdb_size = cpu_to_le16(0x5555); /* all 6 B */ + wd->params->scsi_pad = 0x1b; + if (wd->type == WD719X_TYPE_7193) /* narrow card - disable */ + wd->params->wide = cpu_to_le32(0x00000000); + else /* initiate & respond to WIDE messages */ + wd->params->wide = cpu_to_le32(0xffffffff); + wd->params->sync = cpu_to_le32(0xffffffff); + wd->params->soft_mask = 0x00; /* all disabled */ + wd->params->unsol_mask = 0x00; /* all disabled */ + } + /* disable TAGGED messages */ + wd->params->tag_en = cpu_to_le16(0x0000); +} + +/* Read card type from GPIO bits 1 and 3 */ +static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd) +{ + u8 card = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL); + + card |= WD719X_GPIO_ID_BITS; + wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, card); + card = wd719x_readb(wd, WD719X_PCI_GPIO_DATA) & WD719X_GPIO_ID_BITS; + switch (card) { + case 0x08: + return WD719X_TYPE_7193; + case 0x02: + return WD719X_TYPE_7197; + case 0x00: + return WD719X_TYPE_7296; + default: + dev_warn(&wd->pdev->dev, "unknown card type 0x%x\n", card); + return WD719X_TYPE_UNKNOWN; + } +} + +static int wd719x_board_found(struct Scsi_Host *sh) +{ + struct wd719x *wd = shost_priv(sh); + static const char * const card_types[] = { + "Unknown card", "WD7193", "WD7197", "WD7296" + }; + int ret; + + INIT_LIST_HEAD(&wd->active_scbs); + + sh->base = pci_resource_start(wd->pdev, 0); + + wd->type = wd719x_detect_type(wd); + + wd->sh = sh; + sh->irq = wd->pdev->irq; + wd->fw_virt = NULL; + + /* memory area for host (EEPROM) parameters */ + wd->params = dma_alloc_coherent(&wd->pdev->dev, + sizeof(struct wd719x_host_param), + &wd->params_phys, GFP_KERNEL); + if (!wd->params) { + dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n"); + return -ENOMEM; + } + + /* memory area for the RISC for hash table of outstanding requests */ + wd->hash_virt = dma_alloc_coherent(&wd->pdev->dev, + WD719X_HASH_TABLE_SIZE, + &wd->hash_phys, GFP_KERNEL); + if (!wd->hash_virt) { + dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n"); + ret = -ENOMEM; + goto fail_free_params; + } + + ret = request_irq(wd->pdev->irq, wd719x_interrupt, IRQF_SHARED, + "wd719x", wd); + if (ret) { + dev_warn(&wd->pdev->dev, "unable to assign IRQ %d\n", + wd->pdev->irq); + goto fail_free_hash; + } + + /* read parameters from EEPROM */ + wd719x_read_eeprom(wd); + + ret = wd719x_chip_init(wd); + if (ret) + goto fail_free_irq; + + sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK; + + dev_info(&wd->pdev->dev, "%s at I/O 0x%lx, IRQ %u, SCSI ID %d\n", + card_types[wd->type], sh->base, sh->irq, sh->this_id); + + return 0; + +fail_free_irq: + free_irq(wd->pdev->irq, wd); +fail_free_hash: + dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, + wd->hash_phys); +fail_free_params: + dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param), + wd->params, wd->params_phys); + + return ret; +} + +static const struct scsi_host_template wd719x_template = { + .module = THIS_MODULE, + .name = "Western Digital 719x", + .cmd_size = sizeof(struct wd719x_scb), + .queuecommand = wd719x_queuecommand, + .eh_abort_handler = wd719x_abort, + .eh_device_reset_handler = wd719x_dev_reset, + .eh_bus_reset_handler = wd719x_bus_reset, + .eh_host_reset_handler = wd719x_host_reset, + .bios_param = wd719x_biosparam, + .proc_name = "wd719x", + .can_queue = 255, + .this_id = 7, + .sg_tablesize = WD719X_SG, +}; + +static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d) +{ + int err; + struct Scsi_Host *sh; + struct wd719x *wd; + + err = pci_enable_device(pdev); + if (err) + goto fail; + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n"); + goto disable_device; + } + + err = pci_request_regions(pdev, "wd719x"); + if (err) + goto disable_device; + pci_set_master(pdev); + + err = -ENODEV; + if (pci_resource_len(pdev, 0) == 0) + goto release_region; + + err = -ENOMEM; + sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x)); + if (!sh) + goto release_region; + + wd = shost_priv(sh); + wd->base = pci_iomap(pdev, 0, 0); + if (!wd->base) + goto free_host; + wd->pdev = pdev; + + err = wd719x_board_found(sh); + if (err) + goto unmap; + + err = scsi_add_host(sh, &wd->pdev->dev); + if (err) + goto destroy; + + scsi_scan_host(sh); + + pci_set_drvdata(pdev, sh); + return 0; + +destroy: + wd719x_destroy(wd); +unmap: + pci_iounmap(pdev, wd->base); +free_host: + scsi_host_put(sh); +release_region: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +fail: + return err; +} + + +static void wd719x_pci_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *sh = pci_get_drvdata(pdev); + struct wd719x *wd = shost_priv(sh); + + scsi_remove_host(sh); + wd719x_destroy(wd); + pci_iounmap(pdev, wd->base); + pci_release_regions(pdev); + pci_disable_device(pdev); + + scsi_host_put(sh); +} + +static const struct pci_device_id wd719x_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, wd719x_pci_table); + +static struct pci_driver wd719x_pci_driver = { + .name = "wd719x", + .id_table = wd719x_pci_table, + .probe = wd719x_pci_probe, + .remove = wd719x_pci_remove, +}; + +module_pci_driver(wd719x_pci_driver); + +MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver"); +MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("wd719x-wcs.bin"); +MODULE_FIRMWARE("wd719x-risc.bin"); diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h new file mode 100644 index 000000000..966ab0fb4 --- /dev/null +++ b/drivers/scsi/wd719x.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _WD719X_H_ +#define _WD719X_H_ + +#define WD719X_SG 255 /* Scatter/gather size */ + +struct wd719x_sglist { + __le32 ptr; + __le32 length; +} __packed; + +enum wd719x_card_type { + WD719X_TYPE_UNKNOWN = 0, + WD719X_TYPE_7193, + WD719X_TYPE_7197, + WD719X_TYPE_7296, +}; + +union wd719x_regs { + __le32 all; /* All Status at once */ + struct { + u8 OPC; /* Opcode register */ + u8 SCSI; /* SCSI Errors */ + u8 SUE; /* Spider unique Errors */ + u8 INT; /* Interrupt Status */ + } bytes; +}; + +/* Spider Command Block (SCB) */ +struct wd719x_scb { + __le32 Int_SCB; /* 00-03 Internal SCB link pointer (must be cleared) */ + u8 SCB_opcode; /* 04 SCB Command opcode */ + u8 CDB_tag; /* 05 SCSI Tag byte for CDB queues (0 if untagged) */ + u8 lun; /* 06 SCSI LUN */ + u8 devid; /* 07 SCSI Device ID */ + u8 CDB[16]; /* 08-23 SCSI CDB (16 bytes as defined by ANSI spec. */ + __le32 data_p; /* 24-27 Data transfer address (or SG list address) */ + __le32 data_length; /* 28-31 Data transfer Length (or SG list length) */ + __le32 CDB_link; /* 32-35 SCSI CDB Link Ptr */ + __le32 sense_buf; /* 36-39 Auto request sense buffer address */ + u8 sense_buf_length;/* 40 Auto request sense transfer length */ + u8 reserved; /* 41 reserved */ + u8 SCB_options; /* 42 SCB-options */ + u8 SCB_tag_msg; /* 43 Tagged messages options */ + /* Not filled in by host */ + __le32 req_ptr; /* 44-47 Ptr to Host Request returned on interrupt */ + u8 host_opcode; /* 48 Host Command Opcode (same as AMR_00) */ + u8 scsi_stat; /* 49 SCSI Status returned */ + u8 ret_error; /* 50 SPIDER Unique Error Code returned (SUE) */ + u8 int_stat; /* 51 Message u8 / Interrupt Status byte returned */ + __le32 transferred; /* 52-55 Bytes Transferred */ + u8 last_trans[3]; /* 56-58 Bytes Transferred in last session */ + u8 length; /* 59 SCSI Messages Length (1-8) */ + u8 sync_offset; /* 60 Synchronous offset */ + u8 sync_rate; /* 61 Synchronous rate */ + u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */ + /* everything below is for driver use (not used by card) */ + dma_addr_t phys; /* bus address of the SCB */ + dma_addr_t dma_handle; + struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */ + struct list_head list; + struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */ +} __packed; + +struct wd719x { + struct Scsi_Host *sh; /* pointer to host structure */ + struct pci_dev *pdev; + void __iomem *base; + enum wd719x_card_type type; /* type of card */ + void *fw_virt; /* firmware buffer CPU address */ + dma_addr_t fw_phys; /* firmware buffer bus address */ + size_t fw_size; /* firmware buffer size */ + struct wd719x_host_param *params; /* host parameters (EEPROM) */ + dma_addr_t params_phys; /* host parameters bus address */ + void *hash_virt; /* hash table CPU address */ + dma_addr_t hash_phys; /* hash table bus address */ + struct list_head active_scbs; +}; + +/* timeout delays in microsecs */ +#define WD719X_WAIT_FOR_CMD_READY 500 +#define WD719X_WAIT_FOR_RISC 2000 +#define WD719X_WAIT_FOR_SCSI_RESET 3000000 + +/* All commands except 0x00 generate an interrupt */ +#define WD719X_CMD_READY 0x00 /* Command register ready (or noop) */ +#define WD719X_CMD_INIT_RISC 0x01 /* Initialize RISC */ +/* 0x02 is reserved */ +#define WD719X_CMD_BUSRESET 0x03 /* Assert SCSI bus reset */ +#define WD719X_CMD_READ_FIRMVER 0x04 /* Read the Firmware Revision */ +#define WD719X_CMD_ECHO_BYTES 0x05 /* Echo command bytes (DW) */ +/* 0x06 is reserved */ +/* 0x07 is reserved */ +#define WD719X_CMD_GET_PARAM 0x08 /* Get programmable parameters */ +#define WD719X_CMD_SET_PARAM 0x09 /* Set programmable parameters */ +#define WD719X_CMD_SLEEP 0x0a /* Put SPIDER to sleep */ +#define WD719X_CMD_READ_INIT 0x0b /* Read initialization parameters */ +#define WD719X_CMD_RESTORE_INIT 0x0c /* Restore initialization parameters */ +/* 0x0d is reserved */ +/* 0x0e is reserved */ +/* 0x0f is reserved */ +#define WD719X_CMD_ABORT_TAG 0x10 /* Send Abort tag message to target */ +#define WD719X_CMD_ABORT 0x11 /* Send Abort message to target */ +#define WD719X_CMD_RESET 0x12 /* Send Reset message to target */ +#define WD719X_CMD_INIT_SCAM 0x13 /* Initiate SCAM */ +#define WD719X_CMD_GET_SYNC 0x14 /* Get synchronous rates */ +#define WD719X_CMD_SET_SYNC 0x15 /* Set synchronous rates */ +#define WD719X_CMD_GET_WIDTH 0x16 /* Get SCSI bus width */ +#define WD719X_CMD_SET_WIDTH 0x17 /* Set SCSI bus width */ +#define WD719X_CMD_GET_TAGS 0x18 /* Get tag flags */ +#define WD719X_CMD_SET_TAGS 0x19 /* Set tag flags */ +#define WD719X_CMD_GET_PARAM2 0x1a /* Get programmable params (format 2) */ +#define WD719X_CMD_SET_PARAM2 0x1b /* Set programmable params (format 2) */ +/* Commands with request pointers (mailbox) */ +#define WD719X_CMD_PROCESS_SCB 0x80 /* Process SCSI Control Block (SCB) */ +/* No interrupt generated on acceptance of SCB pointer */ + +/* interrupt status defines */ +#define WD719X_INT_NONE 0x00 /* No interrupt pending */ +#define WD719X_INT_NOERRORS 0x01 /* Command completed with no errors */ +#define WD719X_INT_LINKNOERRORS 0x02 /* link cmd completed with no errors */ +#define WD719X_INT_LINKNOSTATUS 0x03 /* link cmd completed with no flag set */ +#define WD719X_INT_ERRORSLOGGED 0x04 /* cmd completed with errors logged */ +#define WD719X_INT_SPIDERFAILED 0x05 /* cmd failed without valid SCSI status */ +#define WD719X_INT_BADINT 0x80 /* unsolicited interrupt */ +#define WD719X_INT_PIOREADY 0xf0 /* data ready for PIO output */ + +/* Spider Unique Error Codes (SUE) */ +#define WD719X_SUE_NOERRORS 0x00 /* No errors detected by SPIDER */ +#define WD719X_SUE_REJECTED 0x01 /* Command Rejected (bad opcode/param) */ +#define WD719X_SUE_SCBQFULL 0x02 /* SCB queue full */ +/* 0x03 is reserved */ +#define WD719X_SUE_TERM 0x04 /* Host terminated SCB via primative cmd */ +#define WD719X_SUE_CHAN1PAR 0x05 /* PCI Channel 1 parity error occurred */ +#define WD719X_SUE_CHAN1ABORT 0x06 /* PCI Channel 1 system abort occurred */ +#define WD719X_SUE_CHAN23PAR 0x07 /* PCI Channel 2/3 parity error occurred */ +#define WD719X_SUE_CHAN23ABORT 0x08 /* PCI Channel 2/3 system abort occurred */ +#define WD719X_SUE_TIMEOUT 0x10 /* Selection/reselection timeout */ +#define WD719X_SUE_RESET 0x11 /* SCSI bus reset occurred */ +#define WD719X_SUE_BUSERROR 0x12 /* SCSI bus error */ +#define WD719X_SUE_WRONGWAY 0x13 /* Wrong data transfer dir set by target */ +#define WD719X_SUE_BADPHASE 0x14 /* SCSI phase illegal or unexpected */ +#define WD719X_SUE_TOOLONG 0x15 /* target requested too much data */ +#define WD719X_SUE_BUSFREE 0x16 /* Unexpected SCSI bus free */ +#define WD719X_SUE_ARSDONE 0x17 /* Auto request sense executed */ +#define WD719X_SUE_IGNORED 0x18 /* SCSI message was ignored by target */ +#define WD719X_SUE_WRONGTAGS 0x19 /* Tagged SCB & tags off (or vice versa) */ +#define WD719X_SUE_BADTAGS 0x1a /* Wrong tag message type for target */ +#define WD719X_SUE_NOSCAMID 0x1b /* No SCAM soft ID available */ + +/* code sizes */ +#define WD719X_HASH_TABLE_SIZE 4096 + +/* Advanced Mode Registers */ +/* Regs 0x00..0x1f are for Advanced Mode of the card (RISC is running). */ +#define WD719X_AMR_COMMAND 0x00 +#define WD719X_AMR_CMD_PARAM 0x01 +#define WD719X_AMR_CMD_PARAM_2 0x02 +#define WD719X_AMR_CMD_PARAM_3 0x03 +#define WD719X_AMR_SCB_IN 0x04 + +#define WD719X_AMR_BIOS_SHARE_INT 0x0f + +#define WD719X_AMR_SCB_OUT 0x18 +#define WD719X_AMR_OP_CODE 0x1c +#define WD719X_AMR_SCSI_STATUS 0x1d +#define WD719X_AMR_SCB_ERROR 0x1e +#define WD719X_AMR_INT_STATUS 0x1f + +#define WD719X_DISABLE_INT 0x80 + +/* SCB flags */ +#define WD719X_SCB_FLAGS_CHECK_DIRECTION 0x01 +#define WD719X_SCB_FLAGS_PCI_TO_SCSI 0x02 +#define WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE 0x10 +#define WD719X_SCB_FLAGS_DO_SCATTER_GATHER 0x20 +#define WD719X_SCB_FLAGS_NO_DISCONNECT 0x40 + +/* PCI Registers used for reset, initial code download */ +/* Regs 0x20..0x3f are for Normal (DOS) mode (RISC is asleep). */ +#define WD719X_PCI_GPIO_CONTROL 0x3C +#define WD719X_PCI_GPIO_DATA 0x3D +#define WD719X_PCI_PORT_RESET 0x3E +#define WD719X_PCI_MODE_SELECT 0x3F + +#define WD719X_PCI_EXTERNAL_ADDR 0x60 +#define WD719X_PCI_INTERNAL_ADDR 0x64 +#define WD719X_PCI_DMA_TRANSFER_SIZE 0x66 +#define WD719X_PCI_CHANNEL2_3CMD 0x68 +#define WD719X_PCI_CHANNEL2_3STATUS 0x69 + +#define WD719X_GPIO_ID_BITS 0x0a +#define WD719X_PRAM_BASE_ADDR 0x00 + +/* codes written to or read from the card */ +#define WD719X_PCI_RESET 0x01 +#define WD719X_ENABLE_ADVANCE_MODE 0x01 + +#define WD719X_START_CHANNEL2_3DMA 0x17 +#define WD719X_START_CHANNEL2_3DONE 0x01 +#define WD719X_START_CHANNEL2_3ABORT 0x20 + +/* 33C296 GPIO bits for EEPROM pins */ +#define WD719X_EE_DI (1 << 1) +#define WD719X_EE_CS (1 << 2) +#define WD719X_EE_CLK (1 << 3) +#define WD719X_EE_DO (1 << 4) + +/* EEPROM contents */ +struct wd719x_eeprom_header { + u8 sig1; + u8 sig2; + u8 version; + u8 checksum; + u8 cfg_offset; + u8 cfg_size; + u8 setup_offset; + u8 setup_size; +} __packed; + +#define WD719X_EE_SIG1 0 +#define WD719X_EE_SIG2 1 +#define WD719X_EE_VERSION 2 +#define WD719X_EE_CHECKSUM 3 +#define WD719X_EE_CFG_OFFSET 4 +#define WD719X_EE_CFG_SIZE 5 +#define WD719X_EE_SETUP_OFFSET 6 +#define WD719X_EE_SETUP_SIZE 7 + +#define WD719X_EE_SCSI_ID_MASK 0xf + +/* SPIDER Host Parameters Block (=EEPROM configuration block) */ +struct wd719x_host_param { + u8 ch_1_th; /* FIFO threshold */ + u8 scsi_conf; /* SCSI configuration */ + u8 own_scsi_id; /* controller SCSI ID */ + u8 sel_timeout; /* selection timeout*/ + u8 sleep_timer; /* seep timer */ + __le16 cdb_size;/* CDB size groups */ + __le16 tag_en; /* Tag msg enables (ID 0-15) */ + u8 scsi_pad; /* SCSI pad control */ + __le32 wide; /* WIDE msg options (ID 0-15) */ + __le32 sync; /* SYNC msg options (ID 0-15) */ + u8 soft_mask; /* soft error mask */ + u8 unsol_mask; /* unsolicited error mask */ +} __packed; + +#endif /* _WD719X_H_ */ diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c new file mode 100644 index 000000000..9ec55ddc1 --- /dev/null +++ b/drivers/scsi/xen-scsifront.c @@ -0,0 +1,1237 @@ +/* + * Xen SCSI frontend driver + * + * Copyright (c) 2008, FUJITSU Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#define VSCSIFRONT_OP_ADD_LUN 1 +#define VSCSIFRONT_OP_DEL_LUN 2 +#define VSCSIFRONT_OP_READD_LUN 3 + +/* Tuning point. */ +#define VSCSIIF_DEFAULT_CMD_PER_LUN 10 +#define VSCSIIF_MAX_TARGET 64 +#define VSCSIIF_MAX_LUN 255 + +#define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE) +#define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE + +#define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \ + sizeof(struct scsiif_request_segment))) + +struct vscsifrnt_shadow { + /* command between backend and frontend */ + unsigned char act; + uint8_t nr_segments; + uint16_t rqid; + uint16_t ref_rqid; + + bool inflight; + + unsigned int nr_grants; /* number of grants in gref[] */ + struct scsiif_request_segment *sg; /* scatter/gather elements */ + struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; + + /* Do reset or abort function. */ + wait_queue_head_t wq_reset; /* reset work queue */ + int wait_reset; /* reset work queue condition */ + int32_t rslt_reset; /* reset response status: */ + /* SUCCESS or FAILED or: */ +#define RSLT_RESET_WAITING 0 +#define RSLT_RESET_ERR -1 + + /* Requested struct scsi_cmnd is stored from kernel. */ + struct scsi_cmnd *sc; + int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL]; +}; + +struct vscsifrnt_info { + struct xenbus_device *dev; + + struct Scsi_Host *host; + enum { + STATE_INACTIVE, + STATE_ACTIVE, + STATE_ERROR + } host_active; + + unsigned int evtchn; + unsigned int irq; + + grant_ref_t ring_ref; + struct vscsiif_front_ring ring; + struct vscsiif_response ring_rsp; + + spinlock_t shadow_lock; + DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS); + struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS]; + + /* Following items are protected by the host lock. */ + wait_queue_head_t wq_sync; + wait_queue_head_t wq_pause; + unsigned int wait_ring_available:1; + unsigned int waiting_pause:1; + unsigned int pause:1; + unsigned callers; + + char dev_state_path[64]; + struct task_struct *curr; +}; + +static DEFINE_MUTEX(scsifront_mutex); + +static void scsifront_wake_up(struct vscsifrnt_info *info) +{ + info->wait_ring_available = 0; + wake_up(&info->wq_sync); +} + +static int scsifront_get_rqid(struct vscsifrnt_info *info) +{ + unsigned long flags; + int free; + + spin_lock_irqsave(&info->shadow_lock, flags); + + free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS); + __clear_bit(free, info->shadow_free_bitmap); + + spin_unlock_irqrestore(&info->shadow_lock, flags); + + return free; +} + +static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id) +{ + int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS); + + __set_bit(id, info->shadow_free_bitmap); + info->shadow[id] = NULL; + + return empty || info->wait_ring_available; +} + +static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id) +{ + unsigned long flags; + int kick; + + spin_lock_irqsave(&info->shadow_lock, flags); + kick = _scsifront_put_rqid(info, id); + spin_unlock_irqrestore(&info->shadow_lock, flags); + + if (kick) + scsifront_wake_up(info); +} + +static int scsifront_do_request(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) +{ + struct vscsiif_front_ring *ring = &(info->ring); + struct vscsiif_request *ring_req; + struct scsi_cmnd *sc = shadow->sc; + uint32_t id; + int i, notify; + + if (RING_FULL(&info->ring)) + return -EBUSY; + + id = scsifront_get_rqid(info); /* use id in response */ + if (id >= VSCSIIF_MAX_REQS) + return -EBUSY; + + info->shadow[id] = shadow; + shadow->rqid = id; + + ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); + ring->req_prod_pvt++; + + ring_req->rqid = id; + ring_req->act = shadow->act; + ring_req->ref_rqid = shadow->ref_rqid; + ring_req->nr_segments = shadow->nr_segments; + + ring_req->id = sc->device->id; + ring_req->lun = sc->device->lun; + ring_req->channel = sc->device->channel; + ring_req->cmd_len = sc->cmd_len; + + BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); + + memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); + + ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; + ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ; + + for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) + ring_req->seg[i] = shadow->seg[i]; + + shadow->inflight = true; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); + if (notify) + notify_remote_via_irq(info->irq); + + return 0; +} + +static void scsifront_set_error(struct vscsifrnt_info *info, const char *msg) +{ + shost_printk(KERN_ERR, info->host, KBUILD_MODNAME "%s\n" + "Disabling device for further use\n", msg); + info->host_active = STATE_ERROR; +} + +static void scsifront_gnttab_done(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) +{ + int i; + + if (shadow->sc->sc_data_direction == DMA_NONE) + return; + + for (i = 0; i < shadow->nr_grants; i++) { + if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { + scsifront_set_error(info, "grant still in use by backend"); + return; + } + } + + kfree(shadow->sg); +} + +static unsigned int scsifront_host_byte(int32_t rslt) +{ + switch (XEN_VSCSIIF_RSLT_HOST(rslt)) { + case XEN_VSCSIIF_RSLT_HOST_OK: + return DID_OK; + case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT: + return DID_NO_CONNECT; + case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY: + return DID_BUS_BUSY; + case XEN_VSCSIIF_RSLT_HOST_TIME_OUT: + return DID_TIME_OUT; + case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET: + return DID_BAD_TARGET; + case XEN_VSCSIIF_RSLT_HOST_ABORT: + return DID_ABORT; + case XEN_VSCSIIF_RSLT_HOST_PARITY: + return DID_PARITY; + case XEN_VSCSIIF_RSLT_HOST_ERROR: + return DID_ERROR; + case XEN_VSCSIIF_RSLT_HOST_RESET: + return DID_RESET; + case XEN_VSCSIIF_RSLT_HOST_BAD_INTR: + return DID_BAD_INTR; + case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH: + return DID_PASSTHROUGH; + case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR: + return DID_SOFT_ERROR; + case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY: + return DID_IMM_RETRY; + case XEN_VSCSIIF_RSLT_HOST_REQUEUE: + return DID_REQUEUE; + case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED: + return DID_TRANSPORT_DISRUPTED; + case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST: + return DID_TRANSPORT_FAILFAST; + case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL: + return DID_TRANSPORT_MARGINAL; + default: + return DID_ERROR; + } +} + +static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, + struct vscsiif_response *ring_rsp) +{ + struct vscsifrnt_shadow *shadow; + struct scsi_cmnd *sc; + uint32_t id; + uint8_t sense_len; + + id = ring_rsp->rqid; + shadow = info->shadow[id]; + sc = shadow->sc; + + BUG_ON(sc == NULL); + + scsifront_gnttab_done(info, shadow); + if (info->host_active == STATE_ERROR) + return; + scsifront_put_rqid(info, id); + + set_host_byte(sc, scsifront_host_byte(ring_rsp->rslt)); + set_status_byte(sc, XEN_VSCSIIF_RSLT_STATUS(ring_rsp->rslt)); + scsi_set_resid(sc, ring_rsp->residual_len); + + sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE, + ring_rsp->sense_len); + + if (sense_len) + memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len); + + scsi_done(sc); +} + +static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, + struct vscsiif_response *ring_rsp) +{ + uint16_t id = ring_rsp->rqid; + unsigned long flags; + struct vscsifrnt_shadow *shadow = info->shadow[id]; + int kick; + + spin_lock_irqsave(&info->shadow_lock, flags); + shadow->wait_reset = 1; + switch (shadow->rslt_reset) { + case RSLT_RESET_WAITING: + if (ring_rsp->rslt == XEN_VSCSIIF_RSLT_RESET_SUCCESS) + shadow->rslt_reset = SUCCESS; + else + shadow->rslt_reset = FAILED; + break; + case RSLT_RESET_ERR: + kick = _scsifront_put_rqid(info, id); + spin_unlock_irqrestore(&info->shadow_lock, flags); + kfree(shadow); + if (kick) + scsifront_wake_up(info); + return; + default: + scsifront_set_error(info, "bad reset state"); + break; + } + spin_unlock_irqrestore(&info->shadow_lock, flags); + + wake_up(&shadow->wq_reset); +} + +static void scsifront_do_response(struct vscsifrnt_info *info, + struct vscsiif_response *ring_rsp) +{ + struct vscsifrnt_shadow *shadow; + + if (ring_rsp->rqid >= VSCSIIF_MAX_REQS || + !info->shadow[ring_rsp->rqid]->inflight) { + scsifront_set_error(info, "illegal rqid returned by backend!"); + return; + } + shadow = info->shadow[ring_rsp->rqid]; + shadow->inflight = false; + + if (shadow->act == VSCSIIF_ACT_SCSI_CDB) + scsifront_cdb_cmd_done(info, ring_rsp); + else + scsifront_sync_cmd_done(info, ring_rsp); +} + +static int scsifront_ring_drain(struct vscsifrnt_info *info, + unsigned int *eoiflag) +{ + struct vscsiif_response ring_rsp; + RING_IDX i, rp; + int more_to_do = 0; + + rp = READ_ONCE(info->ring.sring->rsp_prod); + virt_rmb(); /* ordering required respective to backend */ + if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) { + scsifront_set_error(info, "illegal number of responses"); + return 0; + } + for (i = info->ring.rsp_cons; i != rp; i++) { + RING_COPY_RESPONSE(&info->ring, i, &ring_rsp); + scsifront_do_response(info, &ring_rsp); + if (info->host_active == STATE_ERROR) + return 0; + *eoiflag &= ~XEN_EOI_FLAG_SPURIOUS; + } + + info->ring.rsp_cons = i; + + if (i != info->ring.req_prod_pvt) + RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); + else + info->ring.sring->rsp_event = i + 1; + + return more_to_do; +} + +static int scsifront_cmd_done(struct vscsifrnt_info *info, + unsigned int *eoiflag) +{ + int more_to_do; + unsigned long flags; + + spin_lock_irqsave(info->host->host_lock, flags); + + more_to_do = scsifront_ring_drain(info, eoiflag); + + info->wait_ring_available = 0; + + spin_unlock_irqrestore(info->host->host_lock, flags); + + wake_up(&info->wq_sync); + + return more_to_do; +} + +static irqreturn_t scsifront_irq_fn(int irq, void *dev_id) +{ + struct vscsifrnt_info *info = dev_id; + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (info->host_active == STATE_ERROR) { + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + return IRQ_HANDLED; + } + + while (scsifront_cmd_done(info, &eoiflag)) + /* Yield point for this unbounded loop. */ + cond_resched(); + + xen_irq_lateeoi(irq, eoiflag); + + return IRQ_HANDLED; +} + +static void scsifront_finish_all(struct vscsifrnt_info *info) +{ + unsigned int i, dummy; + struct vscsiif_response resp; + + scsifront_ring_drain(info, &dummy); + + for (i = 0; i < VSCSIIF_MAX_REQS; i++) { + if (test_bit(i, info->shadow_free_bitmap)) + continue; + resp.rqid = i; + resp.sense_len = 0; + resp.rslt = DID_RESET << 16; + resp.residual_len = 0; + scsifront_do_response(info, &resp); + } +} + +static int map_data_for_request(struct vscsifrnt_info *info, + struct scsi_cmnd *sc, + struct vscsifrnt_shadow *shadow) +{ + grant_ref_t gref_head; + struct page *page; + int err, ref, ref_cnt = 0; + int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE); + unsigned int i, off, len, bytes; + unsigned int data_len = scsi_bufflen(sc); + unsigned int data_grants = 0, seg_grants = 0; + struct scatterlist *sg; + struct scsiif_request_segment *seg; + + if (sc->sc_data_direction == DMA_NONE || !data_len) + return 0; + + scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) + data_grants += PFN_UP(sg->offset + sg->length); + + if (data_grants > VSCSIIF_SG_TABLESIZE) { + if (data_grants > info->host->sg_tablesize) { + shost_printk(KERN_ERR, info->host, KBUILD_MODNAME + "Unable to map request_buffer for command!\n"); + return -E2BIG; + } + seg_grants = vscsiif_grants_sg(data_grants); + shadow->sg = kcalloc(data_grants, + sizeof(struct scsiif_request_segment), GFP_ATOMIC); + if (!shadow->sg) + return -ENOMEM; + } + seg = shadow->sg ? : shadow->seg; + + err = gnttab_alloc_grant_references(seg_grants + data_grants, + &gref_head); + if (err) { + kfree(shadow->sg); + shost_printk(KERN_ERR, info->host, KBUILD_MODNAME + "gnttab_alloc_grant_references() error\n"); + return -ENOMEM; + } + + if (seg_grants) { + page = virt_to_page(seg); + off = offset_in_page(seg); + len = sizeof(struct scsiif_request_segment) * data_grants; + while (len > 0) { + bytes = min_t(unsigned int, len, PAGE_SIZE - off); + + ref = gnttab_claim_grant_reference(&gref_head); + BUG_ON(ref == -ENOSPC); + + gnttab_grant_foreign_access_ref(ref, + info->dev->otherend_id, + xen_page_to_gfn(page), 1); + shadow->gref[ref_cnt] = ref; + shadow->seg[ref_cnt].gref = ref; + shadow->seg[ref_cnt].offset = (uint16_t)off; + shadow->seg[ref_cnt].length = (uint16_t)bytes; + + page++; + len -= bytes; + off = 0; + ref_cnt++; + } + BUG_ON(seg_grants < ref_cnt); + seg_grants = ref_cnt; + } + + scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) { + page = sg_page(sg); + off = sg->offset; + len = sg->length; + + while (len > 0 && data_len > 0) { + /* + * sg sends a scatterlist that is larger than + * the data_len it wants transferred for certain + * IO sizes. + */ + bytes = min_t(unsigned int, len, PAGE_SIZE - off); + bytes = min(bytes, data_len); + + ref = gnttab_claim_grant_reference(&gref_head); + BUG_ON(ref == -ENOSPC); + + gnttab_grant_foreign_access_ref(ref, + info->dev->otherend_id, + xen_page_to_gfn(page), + grant_ro); + + shadow->gref[ref_cnt] = ref; + seg->gref = ref; + seg->offset = (uint16_t)off; + seg->length = (uint16_t)bytes; + + page++; + seg++; + len -= bytes; + data_len -= bytes; + off = 0; + ref_cnt++; + } + } + + if (seg_grants) + shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants; + else + shadow->nr_segments = (uint8_t)ref_cnt; + shadow->nr_grants = ref_cnt; + + return 0; +} + +static int scsifront_enter(struct vscsifrnt_info *info) +{ + if (info->pause) + return 1; + info->callers++; + return 0; +} + +static void scsifront_return(struct vscsifrnt_info *info) +{ + info->callers--; + if (info->callers) + return; + + if (!info->waiting_pause) + return; + + info->waiting_pause = 0; + wake_up(&info->wq_pause); +} + +static int scsifront_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *sc) +{ + struct vscsifrnt_info *info = shost_priv(shost); + struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); + unsigned long flags; + int err; + + if (info->host_active == STATE_ERROR) + return SCSI_MLQUEUE_HOST_BUSY; + + sc->result = 0; + + shadow->sc = sc; + shadow->act = VSCSIIF_ACT_SCSI_CDB; + + spin_lock_irqsave(shost->host_lock, flags); + if (scsifront_enter(info)) { + spin_unlock_irqrestore(shost->host_lock, flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + err = map_data_for_request(info, sc, shadow); + if (err < 0) { + pr_debug("%s: err %d\n", __func__, err); + scsifront_return(info); + spin_unlock_irqrestore(shost->host_lock, flags); + if (err == -ENOMEM) + return SCSI_MLQUEUE_HOST_BUSY; + sc->result = DID_ERROR << 16; + scsi_done(sc); + return 0; + } + + if (scsifront_do_request(info, shadow)) { + scsifront_gnttab_done(info, shadow); + goto busy; + } + + scsifront_return(info); + spin_unlock_irqrestore(shost->host_lock, flags); + + return 0; + +busy: + scsifront_return(info); + spin_unlock_irqrestore(shost->host_lock, flags); + pr_debug("%s: busy\n", __func__); + return SCSI_MLQUEUE_HOST_BUSY; +} + +/* + * Any exception handling (reset or abort) must be forwarded to the backend. + * We have to wait until an answer is returned. This answer contains the + * result to be returned to the requestor. + */ +static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) +{ + struct Scsi_Host *host = sc->device->host; + struct vscsifrnt_info *info = shost_priv(host); + struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); + int err = 0; + + if (info->host_active == STATE_ERROR) + return FAILED; + + shadow = kzalloc(sizeof(*shadow), GFP_NOIO); + if (!shadow) + return FAILED; + + shadow->act = act; + shadow->rslt_reset = RSLT_RESET_WAITING; + shadow->sc = sc; + shadow->ref_rqid = s->rqid; + init_waitqueue_head(&shadow->wq_reset); + + spin_lock_irq(host->host_lock); + + for (;;) { + if (scsifront_enter(info)) + goto fail; + + if (!scsifront_do_request(info, shadow)) + break; + + scsifront_return(info); + if (err) + goto fail; + info->wait_ring_available = 1; + spin_unlock_irq(host->host_lock); + err = wait_event_interruptible(info->wq_sync, + !info->wait_ring_available); + spin_lock_irq(host->host_lock); + } + + spin_unlock_irq(host->host_lock); + err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); + spin_lock_irq(host->host_lock); + + if (!err) { + err = shadow->rslt_reset; + scsifront_put_rqid(info, shadow->rqid); + kfree(shadow); + } else { + spin_lock(&info->shadow_lock); + shadow->rslt_reset = RSLT_RESET_ERR; + spin_unlock(&info->shadow_lock); + err = FAILED; + } + + scsifront_return(info); + spin_unlock_irq(host->host_lock); + return err; + +fail: + spin_unlock_irq(host->host_lock); + kfree(shadow); + return FAILED; +} + +static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) +{ + pr_debug("%s\n", __func__); + return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT); +} + +static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) +{ + pr_debug("%s\n", __func__); + return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET); +} + +static int scsifront_sdev_configure(struct scsi_device *sdev) +{ + struct vscsifrnt_info *info = shost_priv(sdev->host); + int err; + + if (info->host_active == STATE_ERROR) + return -EIO; + + if (current == info->curr) { + err = xenbus_printf(XBT_NIL, info->dev->nodename, + info->dev_state_path, "%d", XenbusStateConnected); + if (err) { + xenbus_dev_error(info->dev, err, + "%s: writing dev_state_path", __func__); + return err; + } + } + + return 0; +} + +static void scsifront_sdev_destroy(struct scsi_device *sdev) +{ + struct vscsifrnt_info *info = shost_priv(sdev->host); + int err; + + if (current == info->curr) { + err = xenbus_printf(XBT_NIL, info->dev->nodename, + info->dev_state_path, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing dev_state_path", __func__); + } +} + +static const struct scsi_host_template scsifront_sht = { + .module = THIS_MODULE, + .name = "Xen SCSI frontend driver", + .queuecommand = scsifront_queuecommand, + .eh_abort_handler = scsifront_eh_abort_handler, + .eh_device_reset_handler = scsifront_dev_reset_handler, + .slave_configure = scsifront_sdev_configure, + .slave_destroy = scsifront_sdev_destroy, + .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, + .can_queue = VSCSIIF_MAX_REQS, + .this_id = -1, + .cmd_size = sizeof(struct vscsifrnt_shadow), + .sg_tablesize = VSCSIIF_SG_TABLESIZE, + .proc_name = "scsifront", +}; + +static int scsifront_alloc_ring(struct vscsifrnt_info *info) +{ + struct xenbus_device *dev = info->dev; + struct vscsiif_sring *sring; + int err; + + /***** Frontend to Backend ring start *****/ + err = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&sring, 1, + &info->ring_ref); + if (err) + return err; + + XEN_FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); + + err = xenbus_alloc_evtchn(dev, &info->evtchn); + if (err) { + xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn"); + goto free_gnttab; + } + + err = bind_evtchn_to_irq_lateeoi(info->evtchn); + if (err <= 0) { + xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq"); + goto free_gnttab; + } + + info->irq = err; + + err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn, + IRQF_ONESHOT, "scsifront", info); + if (err) { + xenbus_dev_fatal(dev, err, "request_threaded_irq"); + goto free_irq; + } + + return 0; + +/* free resource */ +free_irq: + unbind_from_irqhandler(info->irq, info); +free_gnttab: + xenbus_teardown_ring((void **)&sring, 1, &info->ring_ref); + + return err; +} + +static void scsifront_free_ring(struct vscsifrnt_info *info) +{ + unbind_from_irqhandler(info->irq, info); + xenbus_teardown_ring((void **)&info->ring.sring, 1, &info->ring_ref); +} + +static int scsifront_init_ring(struct vscsifrnt_info *info) +{ + struct xenbus_device *dev = info->dev; + struct xenbus_transaction xbt; + int err; + + pr_debug("%s\n", __func__); + + err = scsifront_alloc_ring(info); + if (err) + return err; + pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn); + +again: + err = xenbus_transaction_start(&xbt); + if (err) + xenbus_dev_fatal(dev, err, "starting transaction"); + + err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", + info->ring_ref); + if (err) { + xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); + goto fail; + } + + err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", + info->evtchn); + + if (err) { + xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); + goto fail; + } + + err = xenbus_transaction_end(xbt, 0); + if (err) { + if (err == -EAGAIN) + goto again; + xenbus_dev_fatal(dev, err, "completing transaction"); + goto free_sring; + } + + return 0; + +fail: + xenbus_transaction_end(xbt, 1); +free_sring: + scsifront_free_ring(info); + + return err; +} + + +static int scsifront_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + struct vscsifrnt_info *info; + struct Scsi_Host *host; + int err = -ENOMEM; + char name[TASK_COMM_LEN]; + + host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); + if (!host) { + xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); + return err; + } + info = shost_priv(host); + + dev_set_drvdata(&dev->dev, info); + info->dev = dev; + + bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS); + + err = scsifront_init_ring(info); + if (err) { + scsi_host_put(host); + return err; + } + + init_waitqueue_head(&info->wq_sync); + init_waitqueue_head(&info->wq_pause); + spin_lock_init(&info->shadow_lock); + + snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no); + + host->max_id = VSCSIIF_MAX_TARGET; + host->max_channel = 0; + host->max_lun = VSCSIIF_MAX_LUN; + host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; + host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE; + + err = scsi_add_host(host, &dev->dev); + if (err) { + dev_err(&dev->dev, "fail to add scsi host %d\n", err); + goto free_sring; + } + info->host = host; + info->host_active = STATE_ACTIVE; + + xenbus_switch_state(dev, XenbusStateInitialised); + + return 0; + +free_sring: + scsifront_free_ring(info); + scsi_host_put(host); + return err; +} + +static int scsifront_resume(struct xenbus_device *dev) +{ + struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); + struct Scsi_Host *host = info->host; + int err; + + spin_lock_irq(host->host_lock); + + /* Finish all still pending commands. */ + scsifront_finish_all(info); + + spin_unlock_irq(host->host_lock); + + /* Reconnect to dom0. */ + scsifront_free_ring(info); + err = scsifront_init_ring(info); + if (err) { + dev_err(&dev->dev, "fail to resume %d\n", err); + scsi_host_put(host); + return err; + } + + xenbus_switch_state(dev, XenbusStateInitialised); + + return 0; +} + +static int scsifront_suspend(struct xenbus_device *dev) +{ + struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); + struct Scsi_Host *host = info->host; + int err = 0; + + /* No new commands for the backend. */ + spin_lock_irq(host->host_lock); + info->pause = 1; + while (info->callers && !err) { + info->waiting_pause = 1; + info->wait_ring_available = 0; + spin_unlock_irq(host->host_lock); + wake_up(&info->wq_sync); + err = wait_event_interruptible(info->wq_pause, + !info->waiting_pause); + spin_lock_irq(host->host_lock); + } + spin_unlock_irq(host->host_lock); + return err; +} + +static void scsifront_remove(struct xenbus_device *dev) +{ + struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); + + pr_debug("%s: %s removed\n", __func__, dev->nodename); + + mutex_lock(&scsifront_mutex); + if (info->host_active != STATE_INACTIVE) { + /* Scsi_host not yet removed */ + scsi_remove_host(info->host); + info->host_active = STATE_INACTIVE; + } + mutex_unlock(&scsifront_mutex); + + scsifront_free_ring(info); + scsi_host_put(info->host); +} + +static void scsifront_disconnect(struct vscsifrnt_info *info) +{ + struct xenbus_device *dev = info->dev; + struct Scsi_Host *host = info->host; + + pr_debug("%s: %s disconnect\n", __func__, dev->nodename); + + /* + * When this function is executed, all devices of + * Frontend have been deleted. + * Therefore, it need not block I/O before remove_host. + */ + + mutex_lock(&scsifront_mutex); + if (info->host_active != STATE_INACTIVE) { + scsi_remove_host(host); + info->host_active = STATE_INACTIVE; + } + mutex_unlock(&scsifront_mutex); + + xenbus_frontend_closed(dev); +} + +static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) +{ + struct xenbus_device *dev = info->dev; + int i, err = 0; + char str[64]; + char **dir; + unsigned int dir_n = 0; + unsigned int device_state; + unsigned int hst, chn, tgt, lun; + struct scsi_device *sdev; + + if (info->host_active == STATE_ERROR) + return; + + dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); + if (IS_ERR(dir)) + return; + + /* mark current task as the one allowed to modify device states */ + BUG_ON(info->curr); + info->curr = current; + + for (i = 0; i < dir_n; i++) { + /* read status */ + snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); + err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", + &device_state); + if (XENBUS_EXIST_ERR(err)) + continue; + + /* virtual SCSI device */ + snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); + err = xenbus_scanf(XBT_NIL, dev->otherend, str, + "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); + if (XENBUS_EXIST_ERR(err)) + continue; + + /* + * Front device state path, used in slave_configure called + * on successfull scsi_add_device, and in slave_destroy called + * on remove of a device. + */ + snprintf(info->dev_state_path, sizeof(info->dev_state_path), + "vscsi-devs/%s/state", dir[i]); + + switch (op) { + case VSCSIFRONT_OP_ADD_LUN: + if (device_state != XenbusStateInitialised) + break; + + if (scsi_add_device(info->host, chn, tgt, lun)) { + dev_err(&dev->dev, "scsi_add_device\n"); + err = xenbus_printf(XBT_NIL, dev->nodename, + info->dev_state_path, + "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(dev, err, + "%s: writing dev_state_path", __func__); + } + break; + case VSCSIFRONT_OP_DEL_LUN: + if (device_state != XenbusStateClosing) + break; + + sdev = scsi_device_lookup(info->host, chn, tgt, lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } + break; + case VSCSIFRONT_OP_READD_LUN: + if (device_state == XenbusStateConnected) { + err = xenbus_printf(XBT_NIL, dev->nodename, + info->dev_state_path, + "%d", XenbusStateConnected); + if (err) + xenbus_dev_error(dev, err, + "%s: writing dev_state_path", __func__); + } + break; + default: + break; + } + } + + info->curr = NULL; + + kfree(dir); +} + +static void scsifront_read_backend_params(struct xenbus_device *dev, + struct vscsifrnt_info *info) +{ + unsigned int sg_grant, nr_segs; + struct Scsi_Host *host = info->host; + + sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0); + nr_segs = min_t(unsigned int, sg_grant, SG_ALL); + nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); + nr_segs = min_t(unsigned int, nr_segs, + VSCSIIF_SG_TABLESIZE * PAGE_SIZE / + sizeof(struct scsiif_request_segment)); + + if (!info->pause && sg_grant) + dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); + else if (info->pause && nr_segs < host->sg_tablesize) + dev_warn(&dev->dev, + "SG entries decreased from %d to %u - device may not work properly anymore\n", + host->sg_tablesize, nr_segs); + + host->sg_tablesize = nr_segs; + host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512; +} + +static void scsifront_backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) +{ + struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); + + pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state); + + switch (backend_state) { + case XenbusStateUnknown: + case XenbusStateInitialising: + case XenbusStateInitWait: + case XenbusStateInitialised: + break; + + case XenbusStateConnected: + scsifront_read_backend_params(dev, info); + + if (info->pause) { + scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN); + xenbus_switch_state(dev, XenbusStateConnected); + info->pause = 0; + return; + } + + if (xenbus_read_driver_state(dev->nodename) == + XenbusStateInitialised) + scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); + + if (dev->state != XenbusStateConnected) + xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateClosed: + if (dev->state == XenbusStateClosed) + break; + fallthrough; /* Missed the backend's Closing state */ + case XenbusStateClosing: + scsifront_disconnect(info); + break; + + case XenbusStateReconfiguring: + scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); + xenbus_switch_state(dev, XenbusStateReconfiguring); + break; + + case XenbusStateReconfigured: + scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); + xenbus_switch_state(dev, XenbusStateConnected); + break; + } +} + +static const struct xenbus_device_id scsifront_ids[] = { + { "vscsi" }, + { "" } +}; + +static struct xenbus_driver scsifront_driver = { + .ids = scsifront_ids, + .probe = scsifront_probe, + .remove = scsifront_remove, + .resume = scsifront_resume, + .suspend = scsifront_suspend, + .otherend_changed = scsifront_backend_changed, +}; + +static int __init scsifront_init(void) +{ + if (!xen_domain()) + return -ENODEV; + + return xenbus_register_frontend(&scsifront_driver); +} +module_init(scsifront_init); + +static void __exit scsifront_exit(void) +{ + xenbus_unregister_driver(&scsifront_driver); +} +module_exit(scsifront_exit); + +MODULE_DESCRIPTION("Xen SCSI frontend driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("xen:vscsi"); +MODULE_AUTHOR("Juergen Gross "); diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c new file mode 100644 index 000000000..22d412cab --- /dev/null +++ b/drivers/scsi/zalon.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Zalon 53c7xx device driver. + * By Richard Hirst (rhirst@linuxcare.com) + */ + +#include +#include +#include +#include +#include +#include + +#include "../parisc/gsc.h" + +#include "ncr53c8xx.h" + +MODULE_AUTHOR("Richard Hirst"); +MODULE_DESCRIPTION("Bluefish/Zalon 720 SCSI Driver"); +MODULE_LICENSE("GPL"); + +#define GSC_SCSI_ZALON_OFFSET 0x800 + +#define IO_MODULE_EIM (1*4) +#define IO_MODULE_DC_ADATA (2*4) +#define IO_MODULE_II_CDATA (3*4) +#define IO_MODULE_IO_COMMAND (12*4) +#define IO_MODULE_IO_STATUS (13*4) + +#define IOSTATUS_RY 0x40 +#define IOSTATUS_FE 0x80 +#define IOIIDATA_SMINT5L 0x40000000 +#define IOIIDATA_MINT5EN 0x20000000 +#define IOIIDATA_PACKEN 0x10000000 +#define IOIIDATA_PREFETCHEN 0x08000000 +#define IOIIDATA_IOII 0x00000020 + +#define CMD_RESET 5 + +static struct ncr_chip zalon720_chip __initdata = { + .revision_id = 0x0f, + .burst_max = 3, + .offset_max = 8, + .nr_divisor = 4, + .features = FE_WIDE | FE_DIFF | FE_EHP| FE_MUX | FE_EA, +}; + + + +#if 0 +/* FIXME: + * Is this function dead code? or is someone planning on using it in the + * future. The clock = (int) pdc_result[16] does not look correct to + * me ... I think it should be iodc_data[16]. Since this cause a compile + * error with the new encapsulated PDC, I'm not compiling in this function. + * - RB + */ +/* poke SCSI clock out of iodc data */ + +static u8 iodc_data[32] __attribute__ ((aligned (64))); +static unsigned long pdc_result[32] __attribute__ ((aligned (16))) ={0,0,0,0}; + +static int +lasi_scsi_clock(void * hpa, int defaultclock) +{ + int clock, status; + + status = pdc_iodc_read(&pdc_result, hpa, 0, &iodc_data, 32 ); + if (status == PDC_RET_OK) { + clock = (int) pdc_result[16]; + } else { + printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status); + clock = defaultclock; + } + + printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock); + return clock; +} +#endif + +static struct scsi_host_template zalon7xx_template = { + .module = THIS_MODULE, + .proc_name = "zalon7xx", + .cmd_size = sizeof(struct ncr_cmd_priv), +}; + +static int __init +zalon_probe(struct parisc_device *dev) +{ + struct gsc_irq gsc_irq; + u32 zalon_vers; + int error = -ENODEV; + void __iomem *zalon = ioremap(dev->hpa.start, 4096); + void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET; + static int unit = 0; + struct Scsi_Host *host; + struct ncr_device device; + + __raw_writel(CMD_RESET, zalon + IO_MODULE_IO_COMMAND); + while (!(__raw_readl(zalon + IO_MODULE_IO_STATUS) & IOSTATUS_RY)) + cpu_relax(); + __raw_writel(IOIIDATA_MINT5EN | IOIIDATA_PACKEN | IOIIDATA_PREFETCHEN, + zalon + IO_MODULE_II_CDATA); + + /* XXX: Save the Zalon version for bug workarounds? */ + zalon_vers = (__raw_readl(zalon + IO_MODULE_II_CDATA) >> 24) & 0x07; + + /* Setup the interrupts first. + ** Later on request_irq() will register the handler. + */ + dev->irq = gsc_alloc_irq(&gsc_irq); + + printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__, + zalon_vers, dev->irq); + + __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM); + + if (zalon_vers == 0) + printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__); + + memset(&device, 0, sizeof(struct ncr_device)); + + /* The following three are needed before any other access. */ + __raw_writeb(0x20, io_port + 0x38); /* DCNTL_REG, EA */ + __raw_writeb(0x04, io_port + 0x1b); /* CTEST0_REG, EHP */ + __raw_writeb(0x80, io_port + 0x22); /* CTEST4_REG, MUX */ + + /* Initialise ncr_device structure with items required by ncr_attach. */ + device.chip = zalon720_chip; + device.host_id = 7; + device.dev = &dev->dev; + device.slot.base = dev->hpa.start + GSC_SCSI_ZALON_OFFSET; + device.slot.base_v = io_port; + device.slot.irq = dev->irq; + device.differential = 2; + + host = ncr_attach(&zalon7xx_template, unit, &device); + if (!host) + return -ENODEV; + + if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { + dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", + dev->irq); + goto fail; + } + + unit++; + + dev_set_drvdata(&dev->dev, host); + + error = scsi_add_host(host, &dev->dev); + if (error) + goto fail_free_irq; + + scsi_scan_host(host); + return 0; + + fail_free_irq: + free_irq(dev->irq, host); + fail: + ncr53c8xx_release(host); + return error; +} + +static const struct parisc_device_id zalon_tbl[] __initconst = { + { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00089 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(parisc, zalon_tbl); + +static void __exit zalon_remove(struct parisc_device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(&dev->dev); + + scsi_remove_host(host); + ncr53c8xx_release(host); + free_irq(dev->irq, host); +} + +static struct parisc_driver zalon_driver __refdata = { + .name = "zalon", + .id_table = zalon_tbl, + .probe = zalon_probe, + .remove = __exit_p(zalon_remove), +}; + +static int __init zalon7xx_init(void) +{ + int ret = ncr53c8xx_init(); + if (!ret) + ret = register_parisc_driver(&zalon_driver); + if (ret) + ncr53c8xx_exit(); + return ret; +} + +static void __exit zalon7xx_exit(void) +{ + unregister_parisc_driver(&zalon_driver); + ncr53c8xx_exit(); +} + +module_init(zalon7xx_init); +module_exit(zalon7xx_exit); diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c new file mode 100644 index 000000000..7acf9193a --- /dev/null +++ b/drivers/scsi/zorro7xx.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux. + * Amiga MacroSystemUS WarpEngine SCSI controller. + * Amiga Technologies/DKB A4091 SCSI controller. + * + * Written 1997 by Alan Hourihane + * plus modifications of the 53c7xx.c driver to support the Amiga. + * + * Rewritten to use 53c700.c by Kars de Jong + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "53c700.h" + +MODULE_AUTHOR("Alan Hourihane / Kars de Jong "); +MODULE_DESCRIPTION("Amiga Zorro NCR53C710 driver"); +MODULE_LICENSE("GPL"); + + +static struct scsi_host_template zorro7xx_scsi_driver_template = { + .proc_name = "zorro7xx", + .this_id = 7, + .module = THIS_MODULE, +}; + +static struct zorro_driver_data { + const char *name; + unsigned long offset; + int absolute; /* offset is absolute address */ +} zorro7xx_driver_data[] = { + { .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 }, + { .name = "WarpEngine 40xx", .offset = 0x40000 }, + { .name = "A4091", .offset = 0x800000 }, + { .name = "GForce 040/060", .offset = 0x40000 }, + { 0 } +}; + +static struct zorro_device_id zorro7xx_zorro_tbl[] = { + { + .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS, + .driver_data = (unsigned long)&zorro7xx_driver_data[0], + }, + { + .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx, + .driver_data = (unsigned long)&zorro7xx_driver_data[1], + }, + { + .id = ZORRO_PROD_CBM_A4091_1, + .driver_data = (unsigned long)&zorro7xx_driver_data[2], + }, + { + .id = ZORRO_PROD_CBM_A4091_2, + .driver_data = (unsigned long)&zorro7xx_driver_data[2], + }, + { + .id = ZORRO_PROD_GVP_GFORCE_040_060, + .driver_data = (unsigned long)&zorro7xx_driver_data[3], + }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl); + +static int zorro7xx_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + struct Scsi_Host *host; + struct NCR_700_Host_Parameters *hostdata; + struct zorro_driver_data *zdd; + unsigned long board, ioaddr; + + board = zorro_resource_start(z); + zdd = (struct zorro_driver_data *)ent->driver_data; + + if (zdd->absolute) { + ioaddr = zdd->offset; + } else { + ioaddr = board + zdd->offset; + } + + if (!zorro_request_device(z, zdd->name)) { + printk(KERN_ERR "zorro7xx: cannot reserve region 0x%lx, abort\n", + board); + return -EBUSY; + } + + hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); + if (!hostdata) { + printk(KERN_ERR "zorro7xx: Failed to allocate host data\n"); + goto out_release; + } + + /* Fill in the required pieces of hostdata */ + if (ioaddr > 0x01000000) + hostdata->base = ioremap(ioaddr, zorro_resource_len(z)); + else + hostdata->base = ZTWO_VADDR(ioaddr); + + hostdata->clock = 50; + hostdata->chip710 = 1; + + /* Settings for at least WarpEngine 40xx */ + hostdata->ctest7_extra = CTEST7_TT1; + + zorro7xx_scsi_driver_template.name = zdd->name; + + /* and register the chip */ + host = NCR_700_detect(&zorro7xx_scsi_driver_template, hostdata, + &z->dev); + if (!host) { + printk(KERN_ERR "zorro7xx: No host detected; " + "board configuration problem?\n"); + goto out_free; + } + + host->this_id = 7; + host->base = ioaddr; + host->irq = IRQ_AMIGA_PORTS; + + if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "zorro7xx-scsi", + host)) { + printk(KERN_ERR "zorro7xx: request_irq failed\n"); + goto out_put_host; + } + + zorro_set_drvdata(z, host); + scsi_scan_host(host); + + return 0; + + out_put_host: + scsi_host_put(host); + out_free: + if (ioaddr > 0x01000000) + iounmap(hostdata->base); + kfree(hostdata); + out_release: + zorro_release_device(z); + + return -ENODEV; +} + +static void zorro7xx_remove_one(struct zorro_dev *z) +{ + struct Scsi_Host *host = zorro_get_drvdata(z); + struct NCR_700_Host_Parameters *hostdata = shost_priv(host); + + scsi_remove_host(host); + + NCR_700_release(host); + if (host->base > 0x01000000) + iounmap(hostdata->base); + kfree(hostdata); + free_irq(host->irq, host); + zorro_release_device(z); +} + +static struct zorro_driver zorro7xx_driver = { + .name = "zorro7xx-scsi", + .id_table = zorro7xx_zorro_tbl, + .probe = zorro7xx_init_one, + .remove = zorro7xx_remove_one, +}; + +static int __init zorro7xx_scsi_init(void) +{ + return zorro_register_driver(&zorro7xx_driver); +} + +static void __exit zorro7xx_scsi_exit(void) +{ + zorro_unregister_driver(&zorro7xx_driver); +} + +module_init(zorro7xx_scsi_init); +module_exit(zorro7xx_scsi_exit); diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c new file mode 100644 index 000000000..56cae22a4 --- /dev/null +++ b/drivers/scsi/zorro_esp.c @@ -0,0 +1,960 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ESP front-end for Amiga ZORRO SCSI systems. + * + * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) + * + * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for + * migration to ESP SCSI core + * + * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for + * Blizzard 1230 DMA and probe function fixes + */ +/* + * ZORRO bus code from: + */ +/* + * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux. + * Amiga MacroSystemUS WarpEngine SCSI controller. + * Amiga Technologies/DKB A4091 SCSI controller. + * + * Written 1997 by Alan Hourihane + * plus modifications of the 53c7xx.c driver to support the Amiga. + * + * Rewritten to use 53c700.c by Kars de Jong + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "esp_scsi.h" + +MODULE_AUTHOR("Michael Schmitz "); +MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver"); +MODULE_LICENSE("GPL"); + +/* per-board register layout definitions */ + +/* Blizzard 1230 DMA interface */ + +struct blz1230_dma_registers { + unsigned char dma_addr; /* DMA address [0x0000] */ + unsigned char dmapad2[0x7fff]; + unsigned char dma_latch; /* DMA latch [0x8000] */ +}; + +/* Blizzard 1230II DMA interface */ + +struct blz1230II_dma_registers { + unsigned char dma_addr; /* DMA address [0x0000] */ + unsigned char dmapad2[0xf]; + unsigned char dma_latch; /* DMA latch [0x0010] */ +}; + +/* Blizzard 2060 DMA interface */ + +struct blz2060_dma_registers { + unsigned char dma_led_ctrl; /* DMA led control [0x000] */ + unsigned char dmapad1[0x0f]; + unsigned char dma_addr0; /* DMA address (MSB) [0x010] */ + unsigned char dmapad2[0x03]; + unsigned char dma_addr1; /* DMA address [0x014] */ + unsigned char dmapad3[0x03]; + unsigned char dma_addr2; /* DMA address [0x018] */ + unsigned char dmapad4[0x03]; + unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */ +}; + +/* DMA control bits */ +#define DMA_WRITE 0x80000000 + +/* Cyberstorm DMA interface */ + +struct cyber_dma_registers { + unsigned char dma_addr0; /* DMA address (MSB) [0x000] */ + unsigned char dmapad1[1]; + unsigned char dma_addr1; /* DMA address [0x002] */ + unsigned char dmapad2[1]; + unsigned char dma_addr2; /* DMA address [0x004] */ + unsigned char dmapad3[1]; + unsigned char dma_addr3; /* DMA address (LSB) [0x006] */ + unsigned char dmapad4[0x3fb]; + unsigned char cond_reg; /* DMA cond (ro) [0x402] */ +#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */ +}; + +/* DMA control bits */ +#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */ +#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */ + +/* DMA status bits */ +#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */ + +/* The CyberStorm II DMA interface */ +struct cyberII_dma_registers { + unsigned char cond_reg; /* DMA cond (ro) [0x000] */ +#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */ + unsigned char dmapad4[0x3f]; + unsigned char dma_addr0; /* DMA address (MSB) [0x040] */ + unsigned char dmapad1[3]; + unsigned char dma_addr1; /* DMA address [0x044] */ + unsigned char dmapad2[3]; + unsigned char dma_addr2; /* DMA address [0x048] */ + unsigned char dmapad3[3]; + unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */ +}; + +/* Fastlane DMA interface */ + +struct fastlane_dma_registers { + unsigned char cond_reg; /* DMA status (ro) [0x0000] */ +#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */ + char dmapad1[0x3f]; + unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */ +}; + +/* + * The controller registers can be found in the Z2 config area at these + * offsets: + */ +#define FASTLANE_ESP_ADDR 0x1000001 + +/* DMA status bits */ +#define FASTLANE_DMA_MINT 0x80 +#define FASTLANE_DMA_IACT 0x40 +#define FASTLANE_DMA_CREQ 0x20 + +/* DMA control bits */ +#define FASTLANE_DMA_FCODE 0xa0 +#define FASTLANE_DMA_MASK 0xf3 +#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */ +#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */ +#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */ +#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */ + +/* + * private data used for driver + */ +struct zorro_esp_priv { + struct esp *esp; /* our ESP instance - for Scsi_host* */ + void __iomem *board_base; /* virtual address (Zorro III board) */ + int zorro3; /* board is Zorro III */ + unsigned char ctrl_data; /* shadow copy of ctrl_reg */ +}; + +/* + * On all implementations except for the Oktagon, padding between ESP + * registers is three bytes. + * On Oktagon, it is one byte - use a different accessor there. + * + * Oktagon needs PDMA - currently unsupported! + */ + +static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg) +{ + writeb(val, esp->regs + (reg * 4UL)); +} + +static u8 zorro_esp_read8(struct esp *esp, unsigned long reg) +{ + return readb(esp->regs + (reg * 4UL)); +} + +static int zorro_esp_irq_pending(struct esp *esp) +{ + /* check ESP status register; DMA has no status reg. */ + if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) + return 1; + + return 0; +} + +static int cyber_esp_irq_pending(struct esp *esp) +{ + struct cyber_dma_registers __iomem *dregs = esp->dma_regs; + unsigned char dma_status = readb(&dregs->cond_reg); + + /* It's important to check the DMA IRQ bit in the correct way! */ + return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) && + (dma_status & CYBER_DMA_HNDL_INTR)); +} + +static int fastlane_esp_irq_pending(struct esp *esp) +{ + struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; + unsigned char dma_status; + + dma_status = readb(&dregs->cond_reg); + + if (dma_status & FASTLANE_DMA_IACT) + return 0; /* not our IRQ */ + + /* Return non-zero if ESP requested IRQ */ + return ( + (dma_status & FASTLANE_DMA_CREQ) && + (!(dma_status & FASTLANE_DMA_MINT)) && + (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)); +} + +static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr, + u32 dma_len) +{ + return dma_len > (1U << 16) ? (1U << 16) : dma_len; +} + +static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr, + u32 dma_len) +{ + /* The old driver used 0xfffc as limit, so do that here too */ + return dma_len > 0xfffc ? 0xfffc : dma_len; +} + +static void zorro_esp_reset_dma(struct esp *esp) +{ + /* nothing to do here */ +} + +static void zorro_esp_dma_drain(struct esp *esp) +{ + /* nothing to do here */ +} + +static void zorro_esp_dma_invalidate(struct esp *esp) +{ + /* nothing to do here */ +} + +static void fastlane_esp_dma_invalidate(struct esp *esp) +{ + struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); + struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; + unsigned char *ctrl_data = &zep->ctrl_data; + + *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK); + writeb(0, &dregs->clear_strobe); + z_writel(0, zep->board_base); +} + +/* Blizzard 1230/60 SCSI-IV DMA */ + +static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr, + u32 esp_count, u32 dma_count, int write, u8 cmd) +{ + struct blz1230_dma_registers __iomem *dregs = esp->dma_regs; + u8 phase = esp->sreg & ESP_STAT_PMASK; + + /* + * Use PIO if transferring message bytes to esp->command_block_dma. + * PIO requires a virtual address, so substitute esp->command_block + * for addr. + */ + if (phase == ESP_MIP && addr == esp->command_block_dma) { + esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, + dma_count, write, cmd); + return; + } + + /* Clear the results of a possible prior esp->ops->send_dma_cmd() */ + esp->send_cmd_error = 0; + esp->send_cmd_residual = 0; + + if (write) + /* DMA receive */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_FROM_DEVICE); + else + /* DMA send */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_TO_DEVICE); + + addr >>= 1; + if (write) + addr &= ~(DMA_WRITE); + else + addr |= DMA_WRITE; + + writeb((addr >> 24) & 0xff, &dregs->dma_latch); + writeb((addr >> 24) & 0xff, &dregs->dma_addr); + writeb((addr >> 16) & 0xff, &dregs->dma_addr); + writeb((addr >> 8) & 0xff, &dregs->dma_addr); + writeb(addr & 0xff, &dregs->dma_addr); + + scsi_esp_cmd(esp, ESP_CMD_DMA); + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + + scsi_esp_cmd(esp, cmd); +} + +/* Blizzard 1230-II DMA */ + +static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr, + u32 esp_count, u32 dma_count, int write, u8 cmd) +{ + struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs; + u8 phase = esp->sreg & ESP_STAT_PMASK; + + /* Use PIO if transferring message bytes to esp->command_block_dma */ + if (phase == ESP_MIP && addr == esp->command_block_dma) { + esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, + dma_count, write, cmd); + return; + } + + esp->send_cmd_error = 0; + esp->send_cmd_residual = 0; + + if (write) + /* DMA receive */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_FROM_DEVICE); + else + /* DMA send */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_TO_DEVICE); + + addr >>= 1; + if (write) + addr &= ~(DMA_WRITE); + else + addr |= DMA_WRITE; + + writeb((addr >> 24) & 0xff, &dregs->dma_latch); + writeb((addr >> 16) & 0xff, &dregs->dma_addr); + writeb((addr >> 8) & 0xff, &dregs->dma_addr); + writeb(addr & 0xff, &dregs->dma_addr); + + scsi_esp_cmd(esp, ESP_CMD_DMA); + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + + scsi_esp_cmd(esp, cmd); +} + +/* Blizzard 2060 DMA */ + +static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr, + u32 esp_count, u32 dma_count, int write, u8 cmd) +{ + struct blz2060_dma_registers __iomem *dregs = esp->dma_regs; + u8 phase = esp->sreg & ESP_STAT_PMASK; + + /* Use PIO if transferring message bytes to esp->command_block_dma */ + if (phase == ESP_MIP && addr == esp->command_block_dma) { + esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, + dma_count, write, cmd); + return; + } + + esp->send_cmd_error = 0; + esp->send_cmd_residual = 0; + + if (write) + /* DMA receive */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_FROM_DEVICE); + else + /* DMA send */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_TO_DEVICE); + + addr >>= 1; + if (write) + addr &= ~(DMA_WRITE); + else + addr |= DMA_WRITE; + + writeb(addr & 0xff, &dregs->dma_addr3); + writeb((addr >> 8) & 0xff, &dregs->dma_addr2); + writeb((addr >> 16) & 0xff, &dregs->dma_addr1); + writeb((addr >> 24) & 0xff, &dregs->dma_addr0); + + scsi_esp_cmd(esp, ESP_CMD_DMA); + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + + scsi_esp_cmd(esp, cmd); +} + +/* Cyberstorm I DMA */ + +static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr, + u32 esp_count, u32 dma_count, int write, u8 cmd) +{ + struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); + struct cyber_dma_registers __iomem *dregs = esp->dma_regs; + u8 phase = esp->sreg & ESP_STAT_PMASK; + unsigned char *ctrl_data = &zep->ctrl_data; + + /* Use PIO if transferring message bytes to esp->command_block_dma */ + if (phase == ESP_MIP && addr == esp->command_block_dma) { + esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, + dma_count, write, cmd); + return; + } + + esp->send_cmd_error = 0; + esp->send_cmd_residual = 0; + + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + + if (write) { + /* DMA receive */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_FROM_DEVICE); + addr &= ~(1); + } else { + /* DMA send */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_TO_DEVICE); + addr |= 1; + } + + writeb((addr >> 24) & 0xff, &dregs->dma_addr0); + writeb((addr >> 16) & 0xff, &dregs->dma_addr1); + writeb((addr >> 8) & 0xff, &dregs->dma_addr2); + writeb(addr & 0xff, &dregs->dma_addr3); + + if (write) + *ctrl_data &= ~(CYBER_DMA_WRITE); + else + *ctrl_data |= CYBER_DMA_WRITE; + + *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ + + writeb(*ctrl_data, &dregs->ctrl_reg); + + scsi_esp_cmd(esp, cmd); +} + +/* Cyberstorm II DMA */ + +static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr, + u32 esp_count, u32 dma_count, int write, u8 cmd) +{ + struct cyberII_dma_registers __iomem *dregs = esp->dma_regs; + u8 phase = esp->sreg & ESP_STAT_PMASK; + + /* Use PIO if transferring message bytes to esp->command_block_dma */ + if (phase == ESP_MIP && addr == esp->command_block_dma) { + esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, + dma_count, write, cmd); + return; + } + + esp->send_cmd_error = 0; + esp->send_cmd_residual = 0; + + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + + if (write) { + /* DMA receive */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_FROM_DEVICE); + addr &= ~(1); + } else { + /* DMA send */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_TO_DEVICE); + addr |= 1; + } + + writeb((addr >> 24) & 0xff, &dregs->dma_addr0); + writeb((addr >> 16) & 0xff, &dregs->dma_addr1); + writeb((addr >> 8) & 0xff, &dregs->dma_addr2); + writeb(addr & 0xff, &dregs->dma_addr3); + + scsi_esp_cmd(esp, cmd); +} + +/* Fastlane DMA */ + +static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr, + u32 esp_count, u32 dma_count, int write, u8 cmd) +{ + struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); + struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; + u8 phase = esp->sreg & ESP_STAT_PMASK; + unsigned char *ctrl_data = &zep->ctrl_data; + + /* Use PIO if transferring message bytes to esp->command_block_dma */ + if (phase == ESP_MIP && addr == esp->command_block_dma) { + esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, + dma_count, write, cmd); + return; + } + + esp->send_cmd_error = 0; + esp->send_cmd_residual = 0; + + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + + if (write) { + /* DMA receive */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_FROM_DEVICE); + addr &= ~(1); + } else { + /* DMA send */ + dma_sync_single_for_device(esp->dev, addr, esp_count, + DMA_TO_DEVICE); + addr |= 1; + } + + writeb(0, &dregs->clear_strobe); + z_writel(addr, ((addr & 0x00ffffff) + zep->board_base)); + + if (write) { + *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) | + FASTLANE_DMA_ENABLE; + } else { + *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) | + FASTLANE_DMA_ENABLE | + FASTLANE_DMA_WRITE); + } + + writeb(*ctrl_data, &dregs->ctrl_reg); + + scsi_esp_cmd(esp, cmd); +} + +static int zorro_esp_dma_error(struct esp *esp) +{ + return esp->send_cmd_error; +} + +/* per-board ESP driver ops */ + +static const struct esp_driver_ops blz1230_esp_ops = { + .esp_write8 = zorro_esp_write8, + .esp_read8 = zorro_esp_read8, + .irq_pending = zorro_esp_irq_pending, + .dma_length_limit = zorro_esp_dma_length_limit, + .reset_dma = zorro_esp_reset_dma, + .dma_drain = zorro_esp_dma_drain, + .dma_invalidate = zorro_esp_dma_invalidate, + .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd, + .dma_error = zorro_esp_dma_error, +}; + +static const struct esp_driver_ops blz1230II_esp_ops = { + .esp_write8 = zorro_esp_write8, + .esp_read8 = zorro_esp_read8, + .irq_pending = zorro_esp_irq_pending, + .dma_length_limit = zorro_esp_dma_length_limit, + .reset_dma = zorro_esp_reset_dma, + .dma_drain = zorro_esp_dma_drain, + .dma_invalidate = zorro_esp_dma_invalidate, + .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd, + .dma_error = zorro_esp_dma_error, +}; + +static const struct esp_driver_ops blz2060_esp_ops = { + .esp_write8 = zorro_esp_write8, + .esp_read8 = zorro_esp_read8, + .irq_pending = zorro_esp_irq_pending, + .dma_length_limit = zorro_esp_dma_length_limit, + .reset_dma = zorro_esp_reset_dma, + .dma_drain = zorro_esp_dma_drain, + .dma_invalidate = zorro_esp_dma_invalidate, + .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd, + .dma_error = zorro_esp_dma_error, +}; + +static const struct esp_driver_ops cyber_esp_ops = { + .esp_write8 = zorro_esp_write8, + .esp_read8 = zorro_esp_read8, + .irq_pending = cyber_esp_irq_pending, + .dma_length_limit = zorro_esp_dma_length_limit, + .reset_dma = zorro_esp_reset_dma, + .dma_drain = zorro_esp_dma_drain, + .dma_invalidate = zorro_esp_dma_invalidate, + .send_dma_cmd = zorro_esp_send_cyber_dma_cmd, + .dma_error = zorro_esp_dma_error, +}; + +static const struct esp_driver_ops cyberII_esp_ops = { + .esp_write8 = zorro_esp_write8, + .esp_read8 = zorro_esp_read8, + .irq_pending = zorro_esp_irq_pending, + .dma_length_limit = zorro_esp_dma_length_limit, + .reset_dma = zorro_esp_reset_dma, + .dma_drain = zorro_esp_dma_drain, + .dma_invalidate = zorro_esp_dma_invalidate, + .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd, + .dma_error = zorro_esp_dma_error, +}; + +static const struct esp_driver_ops fastlane_esp_ops = { + .esp_write8 = zorro_esp_write8, + .esp_read8 = zorro_esp_read8, + .irq_pending = fastlane_esp_irq_pending, + .dma_length_limit = fastlane_esp_dma_length_limit, + .reset_dma = zorro_esp_reset_dma, + .dma_drain = zorro_esp_dma_drain, + .dma_invalidate = fastlane_esp_dma_invalidate, + .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd, + .dma_error = zorro_esp_dma_error, +}; + +/* Zorro driver config data */ + +struct zorro_driver_data { + const char *name; + unsigned long offset; + unsigned long dma_offset; + int absolute; /* offset is absolute address */ + int scsi_option; + const struct esp_driver_ops *esp_ops; +}; + +/* board types */ + +enum { + ZORRO_BLZ1230, + ZORRO_BLZ1230II, + ZORRO_BLZ2060, + ZORRO_CYBER, + ZORRO_CYBERII, + ZORRO_FASTLANE, +}; + +/* per-board config data */ + +static const struct zorro_driver_data zorro_esp_boards[] = { + [ZORRO_BLZ1230] = { + .name = "Blizzard 1230", + .offset = 0x8000, + .dma_offset = 0x10000, + .scsi_option = 1, + .esp_ops = &blz1230_esp_ops, + }, + [ZORRO_BLZ1230II] = { + .name = "Blizzard 1230II", + .offset = 0x10000, + .dma_offset = 0x10021, + .scsi_option = 1, + .esp_ops = &blz1230II_esp_ops, + }, + [ZORRO_BLZ2060] = { + .name = "Blizzard 2060", + .offset = 0x1ff00, + .dma_offset = 0x1ffe0, + .esp_ops = &blz2060_esp_ops, + }, + [ZORRO_CYBER] = { + .name = "CyberStormI", + .offset = 0xf400, + .dma_offset = 0xf800, + .esp_ops = &cyber_esp_ops, + }, + [ZORRO_CYBERII] = { + .name = "CyberStormII", + .offset = 0x1ff03, + .dma_offset = 0x1ff43, + .scsi_option = 1, + .esp_ops = &cyberII_esp_ops, + }, + [ZORRO_FASTLANE] = { + .name = "Fastlane", + .offset = 0x1000001, + .dma_offset = 0x1000041, + .esp_ops = &fastlane_esp_ops, + }, +}; + +static const struct zorro_device_id zorro_esp_zorro_tbl[] = { + { /* Blizzard 1230 IV */ + .id = ZORRO_ID(PHASE5, 0x11, 0), + .driver_data = ZORRO_BLZ1230, + }, + { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */ + .id = ZORRO_ID(PHASE5, 0x0B, 0), + .driver_data = ZORRO_BLZ1230II, + }, + { /* Blizzard 2060 */ + .id = ZORRO_ID(PHASE5, 0x18, 0), + .driver_data = ZORRO_BLZ2060, + }, + { /* Cyberstorm */ + .id = ZORRO_ID(PHASE5, 0x0C, 0), + .driver_data = ZORRO_CYBER, + }, + { /* Cyberstorm II */ + .id = ZORRO_ID(PHASE5, 0x19, 0), + .driver_data = ZORRO_CYBERII, + }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl); + +static int zorro_esp_probe(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + const struct scsi_host_template *tpnt = &scsi_esp_template; + struct Scsi_Host *host; + struct esp *esp; + const struct zorro_driver_data *zdd; + struct zorro_esp_priv *zep; + unsigned long board, ioaddr, dmaaddr; + int err; + + board = zorro_resource_start(z); + zdd = &zorro_esp_boards[ent->driver_data]; + + pr_info("%s found at address 0x%lx.\n", zdd->name, board); + + zep = kzalloc(sizeof(*zep), GFP_KERNEL); + if (!zep) { + pr_err("Can't allocate device private data!\n"); + return -ENOMEM; + } + + /* let's figure out whether we have a Zorro II or Zorro III board */ + if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) { + if (board > 0xffffff) + zep->zorro3 = 1; + } else { + /* + * Even though most of these boards identify as Zorro II, + * they are in fact CPU expansion slot boards and have full + * access to all of memory. Fix up DMA bitmask here. + */ + z->dev.coherent_dma_mask = DMA_BIT_MASK(32); + } + + /* + * If Zorro III and ID matches Fastlane, our device table entry + * contains data for the Blizzard 1230 II board which does share the + * same ID. Fix up device table entry here. + * TODO: Some Cyberstom060 boards also share this ID but would need + * to use the Cyberstorm I driver data ... we catch this by checking + * for presence of ESP chip later, but don't try to fix up yet. + */ + if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { + pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n", + zdd->name, board); + zdd = &zorro_esp_boards[ZORRO_FASTLANE]; + } + + if (zdd->absolute) { + ioaddr = zdd->offset; + dmaaddr = zdd->dma_offset; + } else { + ioaddr = board + zdd->offset; + dmaaddr = board + zdd->dma_offset; + } + + if (!zorro_request_device(z, zdd->name)) { + pr_err("cannot reserve region 0x%lx, abort\n", + board); + err = -EBUSY; + goto fail_free_zep; + } + + host = scsi_host_alloc(tpnt, sizeof(struct esp)); + + if (!host) { + pr_err("No host detected; board configuration problem?\n"); + err = -ENOMEM; + goto fail_release_device; + } + + host->base = ioaddr; + host->this_id = 7; + + esp = shost_priv(host); + esp->host = host; + esp->dev = &z->dev; + + esp->scsi_id = host->this_id; + esp->scsi_id_mask = (1 << esp->scsi_id); + + esp->cfreq = 40000000; + + zep->esp = esp; + + dev_set_drvdata(esp->dev, zep); + + /* additional setup required for Fastlane */ + if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { + /* map full address space up to ESP base for DMA */ + zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1); + if (!zep->board_base) { + pr_err("Cannot allocate board address space\n"); + err = -ENOMEM; + goto fail_free_host; + } + /* initialize DMA control shadow register */ + zep->ctrl_data = (FASTLANE_DMA_FCODE | + FASTLANE_DMA_EDI | FASTLANE_DMA_ESI); + } + + esp->ops = zdd->esp_ops; + + if (ioaddr > 0xffffff) + esp->regs = ioremap(ioaddr, 0x20); + else + /* ZorroII address space remapped nocache by early startup */ + esp->regs = ZTWO_VADDR(ioaddr); + + if (!esp->regs) { + err = -ENOMEM; + goto fail_unmap_fastlane; + } + + esp->fifo_reg = esp->regs + ESP_FDATA * 4; + + /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */ + if (zdd->scsi_option) { + zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1); + if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) { + err = -ENODEV; + goto fail_unmap_regs; + } + } + + if (zep->zorro3) { + /* + * Only Fastlane Z3 for now - add switch for correct struct + * dma_registers size if adding any more + */ + esp->dma_regs = ioremap(dmaaddr, + sizeof(struct fastlane_dma_registers)); + } else + /* ZorroII address space remapped nocache by early startup */ + esp->dma_regs = ZTWO_VADDR(dmaaddr); + + if (!esp->dma_regs) { + err = -ENOMEM; + goto fail_unmap_regs; + } + + esp->command_block = dma_alloc_coherent(esp->dev, 16, + &esp->command_block_dma, + GFP_KERNEL); + + if (!esp->command_block) { + err = -ENOMEM; + goto fail_unmap_dma_regs; + } + + host->irq = IRQ_AMIGA_PORTS; + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, + "Amiga Zorro ESP", esp); + if (err < 0) { + err = -ENODEV; + goto fail_free_command_block; + } + + /* register the chip */ + err = scsi_esp_register(esp); + + if (err) { + err = -ENOMEM; + goto fail_free_irq; + } + + return 0; + +fail_free_irq: + free_irq(host->irq, esp); + +fail_free_command_block: + dma_free_coherent(esp->dev, 16, + esp->command_block, + esp->command_block_dma); + +fail_unmap_dma_regs: + if (zep->zorro3) + iounmap(esp->dma_regs); + +fail_unmap_regs: + if (ioaddr > 0xffffff) + iounmap(esp->regs); + +fail_unmap_fastlane: + if (zep->zorro3) + iounmap(zep->board_base); + +fail_free_host: + scsi_host_put(host); + +fail_release_device: + zorro_release_device(z); + +fail_free_zep: + kfree(zep); + + return err; +} + +static void zorro_esp_remove(struct zorro_dev *z) +{ + struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev); + struct esp *esp = zep->esp; + struct Scsi_Host *host = esp->host; + + scsi_esp_unregister(esp); + + free_irq(host->irq, esp); + dma_free_coherent(esp->dev, 16, + esp->command_block, + esp->command_block_dma); + + if (zep->zorro3) { + iounmap(zep->board_base); + iounmap(esp->dma_regs); + } + + if (host->base > 0xffffff) + iounmap(esp->regs); + + scsi_host_put(host); + + zorro_release_device(z); + + kfree(zep); +} + +static struct zorro_driver zorro_esp_driver = { + .name = KBUILD_MODNAME, + .id_table = zorro_esp_zorro_tbl, + .probe = zorro_esp_probe, + .remove = zorro_esp_remove, +}; + +static int __init zorro_esp_scsi_init(void) +{ + return zorro_register_driver(&zorro_esp_driver); +} + +static void __exit zorro_esp_scsi_exit(void) +{ + zorro_unregister_driver(&zorro_esp_driver); +} + +module_init(zorro_esp_scsi_init); +module_exit(zorro_esp_scsi_exit); -- cgit v1.2.3